summaryrefslogtreecommitdiff
path: root/tools/perf/scripts/python/stackcollapse.py
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2021-05-23 06:12:25 -1000
committerLinus Torvalds <torvalds@linux-foundation.org>2021-05-23 06:12:25 -1000
commit7de7ac8d60697d844489b6a68649fa9873174eec (patch)
tree5fe925a59eaadfd0e22d15a458c2058f6fe62d7b /tools/perf/scripts/python/stackcollapse.py
parent28ceac6959e1db015729c52ec74e0a4ff496c2b8 (diff)
parent4954f5b8ef0baf70fe978d1a99a5f70e4dd5c877 (diff)
Merge tag 'x86_urgent_for_v5.13_rc3' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 fixes from Borislav Petkov: - Fix how SEV handles MMIO accesses by forwarding potential page faults instead of killing the machine and by using the accessors with the exact functionality needed when accessing memory. - Fix a confusion with Clang LTO compiler switches passed to the it - Handle the case gracefully when VMGEXIT has been executed in userspace * tag 'x86_urgent_for_v5.13_rc3' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: x86/sev-es: Use __put_user()/__get_user() for data accesses x86/sev-es: Forward page-faults which happen during emulation x86/sev-es: Don't return NULL from sev_es_get_ghcb() x86/build: Fix location of '-plugin-opt=' flags x86/sev-es: Invalidate the GHCB after completing VMGEXIT x86/sev-es: Move sev_es_put_ghcb() in prep for follow on patch
Diffstat (limited to 'tools/perf/scripts/python/stackcollapse.py')
0 files changed, 0 insertions, 0 deletions
'/> -rw-r--r--drivers/staging/android/Kconfig73
-rw-r--r--drivers/staging/android/Makefile10
-rw-r--r--drivers/staging/android/TODO29
-rw-r--r--drivers/staging/android/ashmem.c878
-rw-r--r--drivers/staging/android/ashmem.h27
-rw-r--r--drivers/staging/android/ion/Kconfig35
-rw-r--r--drivers/staging/android/ion/Makefile10
-rw-r--r--drivers/staging/android/ion/compat_ion.c195
-rw-r--r--drivers/staging/android/ion/compat_ion.h29
-rw-r--r--drivers/staging/android/ion/ion.c1655
-rw-r--r--drivers/staging/android/ion/ion.h205
-rw-r--r--drivers/staging/android/ion/ion_carveout_heap.c193
-rw-r--r--drivers/staging/android/ion/ion_chunk_heap.c194
-rw-r--r--drivers/staging/android/ion/ion_cma_heap.c197
-rw-r--r--drivers/staging/android/ion/ion_dummy_driver.c154
-rw-r--r--drivers/staging/android/ion/ion_heap.c381
-rw-r--r--drivers/staging/android/ion/ion_page_pool.c183
-rw-r--r--drivers/staging/android/ion/ion_priv.h406
-rw-r--r--drivers/staging/android/ion/ion_system_heap.c437
-rw-r--r--drivers/staging/android/ion/ion_test.c302
-rw-r--r--drivers/staging/android/ion/tegra/Makefile1
-rw-r--r--drivers/staging/android/ion/tegra/tegra_ion.c81
-rw-r--r--drivers/staging/android/lowmemorykiller.c214
-rw-r--r--drivers/staging/android/sw_sync.c267
-rw-r--r--drivers/staging/android/sw_sync.h59
-rw-r--r--drivers/staging/android/sync.c729
-rw-r--r--drivers/staging/android/sync.h356
-rw-r--r--drivers/staging/android/sync_debug.c254
-rw-r--r--drivers/staging/android/timed_gpio.c168
-rw-r--r--drivers/staging/android/timed_gpio.h33
-rw-r--r--drivers/staging/android/timed_output.c120
-rw-r--r--drivers/staging/android/timed_output.h37
-rw-r--r--drivers/staging/android/trace/sync.h82
-rw-r--r--drivers/staging/android/uapi/ashmem.h47
-rw-r--r--drivers/staging/android/uapi/ion.h203
-rw-r--r--drivers/staging/android/uapi/ion_test.h70
-rw-r--r--drivers/staging/android/uapi/sw_sync.h32
-rw-r--r--drivers/staging/android/uapi/sync.h97
-rw-r--r--drivers/staging/axis-fifo/Kconfig12
-rw-r--r--drivers/staging/axis-fifo/Makefile2
-rw-r--r--drivers/staging/axis-fifo/README0
-rw-r--r--drivers/staging/axis-fifo/axis-fifo.c703
-rw-r--r--drivers/staging/axis-fifo/axis-fifo.txt96
-rw-r--r--drivers/staging/board/Kconfig8
-rw-r--r--drivers/staging/board/Makefile3
-rw-r--r--drivers/staging/board/TODO2
-rw-r--r--drivers/staging/board/armadillo800eva.c105
-rw-r--r--drivers/staging/board/board.c210
-rw-r--r--drivers/staging/board/board.h45
-rw-r--r--drivers/staging/board/kzm9d.c25
-rw-r--r--drivers/staging/clocking-wizard/Kconfig9
-rw-r--r--drivers/staging/clocking-wizard/Makefile1
-rw-r--r--drivers/staging/clocking-wizard/TODO12
-rw-r--r--drivers/staging/clocking-wizard/clk-xlnx-clock-wizard.c345
-rw-r--r--drivers/staging/clocking-wizard/dt-binding.txt30
-rw-r--r--drivers/staging/comedi/Kconfig1305
-rw-r--r--drivers/staging/comedi/Makefile15
-rw-r--r--drivers/staging/comedi/TODO11
-rw-r--r--drivers/staging/comedi/comedi.h937
-rw-r--r--drivers/staging/comedi/comedi_buf.c620
-rw-r--r--drivers/staging/comedi/comedi_compat32.c464
-rw-r--r--drivers/staging/comedi/comedi_compat32.h36
-rw-r--r--drivers/staging/comedi/comedi_fops.c2943
-rw-r--r--drivers/staging/comedi/comedi_internal.h68
-rw-r--r--drivers/staging/comedi/comedi_pci.c237
-rw-r--r--drivers/staging/comedi/comedi_pci.h64
-rw-r--r--drivers/staging/comedi/comedi_pcmcia.c169
-rw-r--r--drivers/staging/comedi/comedi_pcmcia.h55
-rw-r--r--drivers/staging/comedi/comedi_usb.c131
-rw-r--r--drivers/staging/comedi/comedi_usb.h50
-rw-r--r--drivers/staging/comedi/comedidev.h1034
-rw-r--r--drivers/staging/comedi/comedilib.h35
-rw-r--r--drivers/staging/comedi/drivers.c1137
-rw-r--r--drivers/staging/comedi/drivers/8255.c134
-rw-r--r--drivers/staging/comedi/drivers/8255.h51
-rw-r--r--drivers/staging/comedi/drivers/8255_pci.c304
-rw-r--r--drivers/staging/comedi/drivers/Makefile145
-rw-r--r--drivers/staging/comedi/drivers/addi-data/hwdrv_apci1564.c187
-rw-r--r--drivers/staging/comedi/drivers/addi-data/hwdrv_apci3501.c141
-rw-r--r--drivers/staging/comedi/drivers/addi_apci_1032.c394
-rw-r--r--drivers/staging/comedi/drivers/addi_apci_1500.c878
-rw-r--r--drivers/staging/comedi/drivers/addi_apci_1516.c225
-rw-r--r--drivers/staging/comedi/drivers/addi_apci_1564.c598
-rw-r--r--drivers/staging/comedi/drivers/addi_apci_16xx.c187
-rw-r--r--drivers/staging/comedi/drivers/addi_apci_2032.c339
-rw-r--r--drivers/staging/comedi/drivers/addi_apci_2200.c152
-rw-r--r--drivers/staging/comedi/drivers/addi_apci_3120.c1124
-rw-r--r--drivers/staging/comedi/drivers/addi_apci_3501.c446
-rw-r--r--drivers/staging/comedi/drivers/addi_apci_3xxx.c968
-rw-r--r--drivers/staging/comedi/drivers/addi_tcw.h63
-rw-r--r--drivers/staging/comedi/drivers/addi_watchdog.c149
-rw-r--r--drivers/staging/comedi/drivers/addi_watchdog.h9
-rw-r--r--drivers/staging/comedi/drivers/adl_pci6208.c211
-rw-r--r--drivers/staging/comedi/drivers/adl_pci7x3x.c287
-rw-r--r--drivers/staging/comedi/drivers/adl_pci8164.c163
-rw-r--r--drivers/staging/comedi/drivers/adl_pci9111.c775
-rw-r--r--drivers/staging/comedi/drivers/adl_pci9118.c1761
-rw-r--r--drivers/staging/comedi/drivers/adq12b.c268
-rw-r--r--drivers/staging/comedi/drivers/adv_pci1710.c1100
-rw-r--r--drivers/staging/comedi/drivers/adv_pci1723.c233
-rw-r--r--drivers/staging/comedi/drivers/adv_pci1724.c216
-rw-r--r--drivers/staging/comedi/drivers/adv_pci_dio.c1113
-rw-r--r--drivers/staging/comedi/drivers/aio_aio12_8.c249
-rw-r--r--drivers/staging/comedi/drivers/aio_iiro_16.c244
-rw-r--r--drivers/staging/comedi/drivers/amcc_s5933.h176
-rw-r--r--drivers/staging/comedi/drivers/amplc_dio200.c274
-rw-r--r--drivers/staging/comedi/drivers/amplc_dio200.h55
-rw-r--r--drivers/staging/comedi/drivers/amplc_dio200_common.c879
-rw-r--r--drivers/staging/comedi/drivers/amplc_dio200_pci.c424
-rw-r--r--drivers/staging/comedi/drivers/amplc_pc236.c85
-rw-r--r--drivers/staging/comedi/drivers/amplc_pc236.h42
-rw-r--r--drivers/staging/comedi/drivers/amplc_pc236_common.c200
-rw-r--r--drivers/staging/comedi/drivers/amplc_pc263.c111
-rw-r--r--drivers/staging/comedi/drivers/amplc_pci224.c1136
-rw-r--r--drivers/staging/comedi/drivers/amplc_pci230.c2568
-rw-r--r--drivers/staging/comedi/drivers/amplc_pci236.c153
-rw-r--r--drivers/staging/comedi/drivers/amplc_pci263.c114
-rw-r--r--drivers/staging/comedi/drivers/c6xdigio.c307
-rw-r--r--drivers/staging/comedi/drivers/cb_das16_cs.c355
-rw-r--r--drivers/staging/comedi/drivers/cb_pcidas.c1582
-rw-r--r--drivers/staging/comedi/drivers/cb_pcidas64.c4129
-rw-r--r--drivers/staging/comedi/drivers/cb_pcidda.c428
-rw-r--r--drivers/staging/comedi/drivers/cb_pcimdas.c484
-rw-r--r--drivers/staging/comedi/drivers/cb_pcimdda.c202
-rw-r--r--drivers/staging/comedi/drivers/comedi_8254.c664
-rw-r--r--drivers/staging/comedi/drivers/comedi_8254.h139
-rw-r--r--drivers/staging/comedi/drivers/comedi_8255.c285
-rw-r--r--drivers/staging/comedi/drivers/comedi_bond.c356
-rw-r--r--drivers/staging/comedi/drivers/comedi_isadma.c264
-rw-r--r--drivers/staging/comedi/drivers/comedi_isadma.h120
-rw-r--r--drivers/staging/comedi/drivers/comedi_parport.c314
-rw-r--r--drivers/staging/comedi/drivers/comedi_test.c456
-rw-r--r--drivers/staging/comedi/drivers/contec_pci_dio.c125
-rw-r--r--drivers/staging/comedi/drivers/dac02.c146
-rw-r--r--drivers/staging/comedi/drivers/daqboard2000.c764
-rw-r--r--drivers/staging/comedi/drivers/das08.c479
-rw-r--r--drivers/staging/comedi/drivers/das08.h55
-rw-r--r--drivers/staging/comedi/drivers/das08_cs.c114
-rw-r--r--drivers/staging/comedi/drivers/das08_isa.c199
-rw-r--r--drivers/staging/comedi/drivers/das08_pci.c105
-rw-r--r--drivers/staging/comedi/drivers/das16.c1206
-rw-r--r--drivers/staging/comedi/drivers/das16m1.c647
-rw-r--r--drivers/staging/comedi/drivers/das1800.c1458
-rw-r--r--drivers/staging/comedi/drivers/das6402.c676
-rw-r--r--drivers/staging/comedi/drivers/das800.c753
-rw-r--r--drivers/staging/comedi/drivers/dmm32at.c623
-rw-r--r--drivers/staging/comedi/drivers/dt2801.c639
-rw-r--r--drivers/staging/comedi/drivers/dt2811.c474
-rw-r--r--drivers/staging/comedi/drivers/dt2814.c297
-rw-r--r--drivers/staging/comedi/drivers/dt2815.c223
-rw-r--r--drivers/staging/comedi/drivers/dt2817.c149
-rw-r--r--drivers/staging/comedi/drivers/dt282x.c1211
-rw-r--r--drivers/staging/comedi/drivers/dt3000.c769
-rw-r--r--drivers/staging/comedi/drivers/dt9812.c885
-rw-r--r--drivers/staging/comedi/drivers/dyna_pci10xx.c282
-rw-r--r--drivers/staging/comedi/drivers/fl512.c152
-rw-r--r--drivers/staging/comedi/drivers/gsc_hpdi.c721
-rw-r--r--drivers/staging/comedi/drivers/icp_multi.c568
-rw-r--r--drivers/staging/comedi/drivers/ii_pci20kc.c527
-rw-r--r--drivers/staging/comedi/drivers/jr3_pci.c829
-rw-r--r--drivers/staging/comedi/drivers/jr3_pci.h682
-rw-r--r--drivers/staging/comedi/drivers/ke_counter.c240
-rw-r--r--drivers/staging/comedi/drivers/me4000.c1287
-rw-r--r--drivers/staging/comedi/drivers/me_daq.c582
-rw-r--r--drivers/staging/comedi/drivers/mf6x4.c330
-rw-r--r--drivers/staging/comedi/drivers/mite.c628
-rw-r--r--drivers/staging/comedi/drivers/mite.h349
-rw-r--r--drivers/staging/comedi/drivers/mpc624.c357
-rw-r--r--drivers/staging/comedi/drivers/multiq3.c289
-rw-r--r--drivers/staging/comedi/drivers/ni_6527.c500
-rw-r--r--drivers/staging/comedi/drivers/ni_65xx.c831
-rw-r--r--drivers/staging/comedi/drivers/ni_660x.c1222
-rw-r--r--drivers/staging/comedi/drivers/ni_670x.c296
-rw-r--r--drivers/staging/comedi/drivers/ni_at_a2150.c796
-rw-r--r--drivers/staging/comedi/drivers/ni_at_ao.c383
-rw-r--r--drivers/staging/comedi/drivers/ni_atmio.c375
-rw-r--r--drivers/staging/comedi/drivers/ni_atmio16d.c760
-rw-r--r--drivers/staging/comedi/drivers/ni_daq_700.c289
-rw-r--r--drivers/staging/comedi/drivers/ni_daq_dio24.c91
-rw-r--r--drivers/staging/comedi/drivers/ni_labpc.c125
-rw-r--r--drivers/staging/comedi/drivers/ni_labpc.h69
-rw-r--r--drivers/staging/comedi/drivers/ni_labpc_common.c1355
-rw-r--r--drivers/staging/comedi/drivers/ni_labpc_cs.c128
-rw-r--r--drivers/staging/comedi/drivers/ni_labpc_isadma.c190
-rw-r--r--drivers/staging/comedi/drivers/ni_labpc_isadma.h42
-rw-r--r--drivers/staging/comedi/drivers/ni_labpc_pci.c141
-rw-r--r--drivers/staging/comedi/drivers/ni_labpc_regs.h75
-rw-r--r--drivers/staging/comedi/drivers/ni_mio_common.c5415
-rw-r--r--drivers/staging/comedi/drivers/ni_mio_cs.c228
-rw-r--r--drivers/staging/comedi/drivers/ni_pcidio.c1026
-rw-r--r--drivers/staging/comedi/drivers/ni_pcimio.c1307
-rw-r--r--drivers/staging/comedi/drivers/ni_stc.h1062
-rw-r--r--drivers/staging/comedi/drivers/ni_tio.c1436
-rw-r--r--drivers/staging/comedi/drivers/ni_tio.h154
-rw-r--r--drivers/staging/comedi/drivers/ni_tio_internal.h245
-rw-r--r--drivers/staging/comedi/drivers/ni_tiocmd.c478
-rw-r--r--drivers/staging/comedi/drivers/ni_usb6501.c615
-rw-r--r--drivers/staging/comedi/drivers/pcl711.c521
-rw-r--r--drivers/staging/comedi/drivers/pcl724.c152
-rw-r--r--drivers/staging/comedi/drivers/pcl726.c432
-rw-r--r--drivers/staging/comedi/drivers/pcl730.c349
-rw-r--r--drivers/staging/comedi/drivers/pcl812.c1317
-rw-r--r--drivers/staging/comedi/drivers/pcl816.c712
-rw-r--r--drivers/staging/comedi/drivers/pcl818.c1146
-rw-r--r--drivers/staging/comedi/drivers/pcm3724.c220
-rw-r--r--drivers/staging/comedi/drivers/pcmad.c158
-rw-r--r--drivers/staging/comedi/drivers/pcmda12.c174
-rw-r--r--drivers/staging/comedi/drivers/pcmmio.c786
-rw-r--r--drivers/staging/comedi/drivers/pcmuio.c633
-rw-r--r--drivers/staging/comedi/drivers/plx9052.h79
-rw-r--r--drivers/staging/comedi/drivers/plx9080.h422
-rw-r--r--drivers/staging/comedi/drivers/quatech_daqp_cs.c815
-rw-r--r--drivers/staging/comedi/drivers/rtd520.c1375
-rw-r--r--drivers/staging/comedi/drivers/rti800.c366
-rw-r--r--drivers/staging/comedi/drivers/rti802.c129
-rw-r--r--drivers/staging/comedi/drivers/s526.c576
-rw-r--r--drivers/staging/comedi/drivers/s626.c2921
-rw-r--r--drivers/staging/comedi/drivers/s626.h760
-rw-r--r--drivers/staging/comedi/drivers/serial2002.c797
-rw-r--r--drivers/staging/comedi/drivers/ssv_dnp.c189
-rw-r--r--drivers/staging/comedi/drivers/usbdux.c1737
-rw-r--r--drivers/staging/comedi/drivers/usbduxfast.c1041
-rw-r--r--drivers/staging/comedi/drivers/usbduxsigma.c1623
-rw-r--r--drivers/staging/comedi/drivers/vmk80xx.c889
-rw-r--r--drivers/staging/comedi/drivers/z8536.h202
-rw-r--r--drivers/staging/comedi/kcomedilib/Makefile5
-rw-r--r--drivers/staging/comedi/kcomedilib/kcomedilib_main.c252
-rw-r--r--drivers/staging/comedi/proc.c97
-rw-r--r--drivers/staging/comedi/range.c143
-rw-r--r--drivers/staging/dgap/Kconfig6
-rw-r--r--drivers/staging/dgap/Makefile1
-rw-r--r--drivers/staging/dgap/dgap.c7135
-rw-r--r--drivers/staging/dgap/dgap.h1233
-rw-r--r--drivers/staging/dgnc/Kconfig6
-rw-r--r--drivers/staging/dgnc/Makefile6
-rw-r--r--drivers/staging/dgnc/TODO10
-rw-r--r--drivers/staging/dgnc/dgnc_cls.c1314
-rw-r--r--drivers/staging/dgnc/dgnc_cls.h82
-rw-r--r--drivers/staging/dgnc/dgnc_driver.c720
-rw-r--r--drivers/staging/dgnc/dgnc_driver.h396
-rw-r--r--drivers/staging/dgnc/dgnc_mgmt.c263
-rw-r--r--drivers/staging/dgnc/dgnc_mgmt.h25
-rw-r--r--drivers/staging/dgnc/dgnc_neo.c1846
-rw-r--r--drivers/staging/dgnc/dgnc_neo.h149
-rw-r--r--drivers/staging/dgnc/dgnc_pci.h69
-rw-r--r--drivers/staging/dgnc/dgnc_sysfs.c735
-rw-r--r--drivers/staging/dgnc/dgnc_sysfs.h40
-rw-r--r--drivers/staging/dgnc/dgnc_tty.c2999
-rw-r--r--drivers/staging/dgnc/dgnc_tty.h34
-rw-r--r--drivers/staging/dgnc/dgnc_utils.c18
-rw-r--r--drivers/staging/dgnc/dgnc_utils.h6
-rw-r--r--drivers/staging/dgnc/digi.h179
-rw-r--r--drivers/staging/emxx_udc/Kconfig10
-rw-r--r--drivers/staging/emxx_udc/Makefile1
-rw-r--r--drivers/staging/emxx_udc/TODO4
-rw-r--r--drivers/staging/emxx_udc/emxx_udc.c3410
-rw-r--r--drivers/staging/emxx_udc/emxx_udc.h606
-rw-r--r--drivers/staging/fbtft/Kconfig81
-rw-r--r--drivers/staging/fbtft/Makefile11
-rw-r--r--drivers/staging/fbtft/TODO3
-rw-r--r--drivers/staging/fbtft/fb_agm1264k-fl.c177
-rw-r--r--drivers/staging/fbtft/fb_bd663474.c15
-rw-r--r--drivers/staging/fbtft/fb_hx8340bn.c163
-rw-r--r--drivers/staging/fbtft/fb_hx8347d.c55
-rw-r--r--drivers/staging/fbtft/fb_hx8353d.c65
-rw-r--r--drivers/staging/fbtft/fb_hx8357d.c155
-rw-r--r--drivers/staging/fbtft/fb_hx8357d.h61
-rw-r--r--drivers/staging/fbtft/fb_ili9163.c212
-rw-r--r--drivers/staging/fbtft/fb_ili9320.c38
-rw-r--r--drivers/staging/fbtft/fb_ili9325.c133
-rw-r--r--drivers/staging/fbtft/fb_ili9340.c46
-rw-r--r--drivers/staging/fbtft/fb_ili9341.c86
-rw-r--r--drivers/staging/fbtft/fb_ili9481.c46
-rw-r--r--drivers/staging/fbtft/fb_ili9486.c48
-rw-r--r--drivers/staging/fbtft/fb_pcd8544.c33
-rw-r--r--drivers/staging/fbtft/fb_ra8875.c48
-rw-r--r--drivers/staging/fbtft/fb_s6d02a1.c98
-rw-r--r--drivers/staging/fbtft/fb_s6d1121.c30
-rw-r--r--drivers/staging/fbtft/fb_seps525.c212
-rw-r--r--drivers/staging/fbtft/fb_sh1106.c177
-rw-r--r--drivers/staging/fbtft/fb_ssd1289.c39
-rw-r--r--drivers/staging/fbtft/fb_ssd1305.c207
-rw-r--r--drivers/staging/fbtft/fb_ssd1306.c85
-rw-r--r--drivers/staging/fbtft/fb_ssd1325.c185
-rw-r--r--drivers/staging/fbtft/fb_ssd1331.c92
-rw-r--r--drivers/staging/fbtft/fb_ssd1351.c114
-rw-r--r--drivers/staging/fbtft/fb_st7735r.c115
-rw-r--r--drivers/staging/fbtft/fb_st7789v.c394
-rw-r--r--drivers/staging/fbtft/fb_tinylcd.c41
-rw-r--r--drivers/staging/fbtft/fb_tls8204.c84
-rw-r--r--drivers/staging/fbtft/fb_uc1611.c86
-rw-r--r--drivers/staging/fbtft/fb_uc1701.c50
-rw-r--r--drivers/staging/fbtft/fb_upd161704.c15
-rw-r--r--drivers/staging/fbtft/fb_watterott.c308
-rw-r--r--drivers/staging/fbtft/fbtft-bus.c107
-rw-r--r--drivers/staging/fbtft/fbtft-core.c810
-rw-r--r--drivers/staging/fbtft/fbtft-io.c54
-rw-r--r--drivers/staging/fbtft/fbtft-sysfs.c45
-rw-r--r--drivers/staging/fbtft/fbtft.h265
-rw-r--r--drivers/staging/fbtft/fbtft_device.c1495
-rw-r--r--drivers/staging/fbtft/flexfb.c620
-rw-r--r--drivers/staging/fbtft/internal.h18
-rw-r--r--drivers/staging/fsl-mc/Kconfig1
-rw-r--r--drivers/staging/fsl-mc/Makefile2
-rw-r--r--drivers/staging/fsl-mc/README.txt364
-rw-r--r--drivers/staging/fsl-mc/TODO31
-rw-r--r--drivers/staging/fsl-mc/bus/Kconfig24
-rw-r--r--drivers/staging/fsl-mc/bus/Makefile17
-rw-r--r--drivers/staging/fsl-mc/bus/dpbp.c582
-rw-r--r--drivers/staging/fsl-mc/bus/dpmcp-cmd.h136
-rw-r--r--drivers/staging/fsl-mc/bus/dpmcp.c500
-rw-r--r--drivers/staging/fsl-mc/bus/dpmcp.h165
-rw-r--r--drivers/staging/fsl-mc/bus/dpmng-cmd.h47
-rw-r--r--drivers/staging/fsl-mc/bus/dpmng.c101
-rw-r--r--drivers/staging/fsl-mc/bus/dprc-cmd.h87
-rw-r--r--drivers/staging/fsl-mc/bus/dprc-driver.c488
-rw-r--r--drivers/staging/fsl-mc/bus/dprc.c1622
-rw-r--r--drivers/staging/fsl-mc/bus/mc-allocator.c573
-rw-r--r--drivers/staging/fsl-mc/bus/mc-bus.c814
-rw-r--r--drivers/staging/fsl-mc/bus/mc-sys.c287
-rw-r--r--drivers/staging/fsl-mc/include/dpbp-cmd.h60
-rw-r--r--drivers/staging/fsl-mc/include/dpbp.h173
-rw-r--r--drivers/staging/fsl-mc/include/dpcon-cmd.h62
-rw-r--r--drivers/staging/fsl-mc/include/dpmng.h69
-rw-r--r--drivers/staging/fsl-mc/include/dprc.h539
-rw-r--r--drivers/staging/fsl-mc/include/mc-cmd.h131
-rw-r--r--drivers/staging/fsl-mc/include/mc-private.h119
-rw-r--r--drivers/staging/fsl-mc/include/mc-sys.h76
-rw-r--r--drivers/staging/fsl-mc/include/mc.h201
-rw-r--r--drivers/staging/ft1000/Kconfig22
-rw-r--r--drivers/staging/ft1000/Makefile3
-rw-r--r--drivers/staging/ft1000/TODO9
-rw-r--r--drivers/staging/ft1000/ft1000-pcmcia/Makefile2
-rw-r--r--drivers/staging/ft1000/ft1000-pcmcia/boot.h158
-rw-r--r--drivers/staging/ft1000/ft1000-pcmcia/ft1000.h70
-rw-r--r--drivers/staging/ft1000/ft1000-pcmcia/ft1000.imgbin305770 -> 0 bytes-rw-r--r--drivers/staging/ft1000/ft1000-pcmcia/ft1000_cs.c158
-rw-r--r--drivers/staging/ft1000/ft1000-pcmcia/ft1000_dnld.c762
-rw-r--r--drivers/staging/ft1000/ft1000-pcmcia/ft1000_hw.c2068
-rw-r--r--drivers/staging/ft1000/ft1000-usb/Makefile3
-rw-r--r--drivers/staging/ft1000/ft1000-usb/ft1000_debug.c789
-rw-r--r--drivers/staging/ft1000/ft1000-usb/ft1000_download.c1058
-rw-r--r--drivers/staging/ft1000/ft1000-usb/ft1000_hw.c1586
-rw-r--r--drivers/staging/ft1000/ft1000-usb/ft1000_ioctl.h123
-rw-r--r--drivers/staging/ft1000/ft1000-usb/ft1000_usb.c248
-rw-r--r--drivers/staging/ft1000/ft1000-usb/ft1000_usb.h150
-rw-r--r--drivers/staging/ft1000/ft1000-usb/ft3000.imgbin280414 -> 0 bytes-rw-r--r--drivers/staging/ft1000/ft1000.h366
-rw-r--r--drivers/staging/fwserial/Kconfig31
-rw-r--r--drivers/staging/fwserial/Makefile2
-rw-r--r--drivers/staging/fwserial/TODO14
-rw-r--r--drivers/staging/fwserial/dma_fifo.c303
-rw-r--r--drivers/staging/fwserial/dma_fifo.h126
-rw-r--r--drivers/staging/fwserial/fwserial.c2957
-rw-r--r--drivers/staging/fwserial/fwserial.h369
-rw-r--r--drivers/staging/gdm724x/Kconfig15
-rw-r--r--drivers/staging/gdm724x/Makefile7
-rw-r--r--drivers/staging/gdm724x/TODO16
-rw-r--r--drivers/staging/gdm724x/gdm_endian.c55
-rw-r--r--drivers/staging/gdm724x/gdm_endian.h38
-rw-r--r--drivers/staging/gdm724x/gdm_lte.c947
-rw-r--r--drivers/staging/gdm724x/gdm_lte.h81
-rw-r--r--drivers/staging/gdm724x/gdm_mux.c691
-rw-r--r--drivers/staging/gdm724x/gdm_mux.h95
-rw-r--r--drivers/staging/gdm724x/gdm_tty.c351
-rw-r--r--drivers/staging/gdm724x/gdm_tty.h71
-rw-r--r--drivers/staging/gdm724x/gdm_usb.c1041
-rw-r--r--drivers/staging/gdm724x/gdm_usb.h109
-rw-r--r--drivers/staging/gdm724x/hci.h55
-rw-r--r--drivers/staging/gdm724x/hci_packet.h93
-rw-r--r--drivers/staging/gdm724x/netlink_k.c150
-rw-r--r--drivers/staging/gdm724x/netlink_k.h25
-rw-r--r--drivers/staging/gdm72xx/Kconfig63
-rw-r--r--drivers/staging/gdm72xx/Makefile6
-rw-r--r--drivers/staging/gdm72xx/TODO2
-rw-r--r--drivers/staging/gdm72xx/gdm_qos.c438
-rw-r--r--drivers/staging/gdm72xx/gdm_qos.h74
-rw-r--r--drivers/staging/gdm72xx/gdm_sdio.c701
-rw-r--r--drivers/staging/gdm72xx/gdm_sdio.h63
-rw-r--r--drivers/staging/gdm72xx/gdm_usb.c789
-rw-r--r--drivers/staging/gdm72xx/gdm_usb.h78
-rw-r--r--drivers/staging/gdm72xx/gdm_wimax.c816
-rw-r--r--drivers/staging/gdm72xx/gdm_wimax.h49
-rw-r--r--drivers/staging/gdm72xx/hci.h213
-rw-r--r--drivers/staging/gdm72xx/netlink_k.c156
-rw-r--r--drivers/staging/gdm72xx/netlink_k.h25
-rw-r--r--drivers/staging/gdm72xx/sdio_boot.c158
-rw-r--r--drivers/staging/gdm72xx/sdio_boot.h21
-rw-r--r--drivers/staging/gdm72xx/usb_boot.c363
-rw-r--r--drivers/staging/gdm72xx/usb_boot.h22
-rw-r--r--drivers/staging/gdm72xx/usb_ids.h86
-rw-r--r--drivers/staging/gdm72xx/wm_ioctl.h96
-rw-r--r--drivers/staging/goldfish/Kconfig13
-rw-r--r--drivers/staging/goldfish/Makefile6
-rw-r--r--drivers/staging/goldfish/README11
-rw-r--r--drivers/staging/goldfish/goldfish_audio.c355
-rw-r--r--drivers/staging/goldfish/goldfish_nand.c442
-rw-r--r--drivers/staging/goldfish/goldfish_nand_reg.h76
-rw-r--r--drivers/staging/greybus/Documentation/firmware/authenticate.c94
-rw-r--r--drivers/staging/greybus/Documentation/firmware/firmware-management333
-rw-r--r--drivers/staging/greybus/Documentation/firmware/firmware.c218
-rw-r--r--drivers/staging/greybus/Documentation/sysfs-bus-greybus275
-rw-r--r--drivers/staging/greybus/Kconfig216
-rw-r--r--drivers/staging/greybus/Makefile73
-rw-r--r--drivers/staging/greybus/TODO5
-rw-r--r--drivers/staging/greybus/arche-apb-ctrl.c490
-rw-r--r--drivers/staging/greybus/arche-platform.c660
-rw-r--r--drivers/staging/greybus/arche_platform.h28
-rw-r--r--drivers/staging/greybus/audio_apbridgea.c205
-rw-r--r--drivers/staging/greybus/audio_apbridgea.h132
-rw-r--r--drivers/staging/greybus/audio_codec.c1100
-rw-r--r--drivers/staging/greybus/audio_codec.h243
-rw-r--r--drivers/staging/greybus/audio_gb.c225
-rw-r--r--drivers/staging/greybus/audio_helper.c180
-rw-r--r--drivers/staging/greybus/audio_helper.h17
-rw-r--r--drivers/staging/greybus/audio_manager.c187
-rw-r--r--drivers/staging/greybus/audio_manager.h81
-rw-r--r--drivers/staging/greybus/audio_manager_module.c241
-rw-r--r--drivers/staging/greybus/audio_manager_private.h26
-rw-r--r--drivers/staging/greybus/audio_manager_sysfs.c101
-rw-r--r--drivers/staging/greybus/audio_module.c479
-rw-r--r--drivers/staging/greybus/audio_topology.c1443
-rw-r--r--drivers/staging/greybus/authentication.c429
-rw-r--r--drivers/staging/greybus/bootrom.c526
-rw-r--r--drivers/staging/greybus/camera.c1367
-rw-r--r--drivers/staging/greybus/firmware.h41
-rw-r--r--drivers/staging/greybus/fw-core.c311
-rw-r--r--drivers/staging/greybus/fw-download.c465
-rw-r--r--drivers/staging/greybus/fw-management.c705
-rw-r--r--drivers/staging/greybus/gb-camera.h126
-rw-r--r--drivers/staging/greybus/gbphy.c358
-rw-r--r--drivers/staging/greybus/gbphy.h109
-rw-r--r--drivers/staging/greybus/gpio.c626
-rw-r--r--drivers/staging/greybus/greybus_authentication.h74
-rw-r--r--drivers/staging/greybus/greybus_firmware.h75
-rw-r--r--drivers/staging/greybus/hid.c520
-rw-r--r--drivers/staging/greybus/i2c.c322
-rw-r--r--drivers/staging/greybus/light.c1343
-rw-r--r--drivers/staging/greybus/log.c133
-rw-r--r--drivers/staging/greybus/loopback.c1179
-rw-r--r--drivers/staging/greybus/power_supply.c1140
-rw-r--r--drivers/staging/greybus/pwm.c331
-rw-r--r--drivers/staging/greybus/raw.c381
-rw-r--r--drivers/staging/greybus/sdio.c884
-rw-r--r--drivers/staging/greybus/spi.c79
-rw-r--r--drivers/staging/greybus/spilib.c571
-rw-r--r--drivers/staging/greybus/spilib.h26
-rw-r--r--drivers/staging/greybus/uart.c1029
-rw-r--r--drivers/staging/greybus/usb.c246
-rw-r--r--drivers/staging/greybus/vibrator.c249
-rw-r--r--drivers/staging/gs_fpgaboot/Kconfig8
-rw-r--r--drivers/staging/gs_fpgaboot/Makefile2
-rw-r--r--drivers/staging/gs_fpgaboot/README70
-rw-r--r--drivers/staging/gs_fpgaboot/TODO7
-rw-r--r--drivers/staging/gs_fpgaboot/gs_fpgaboot.c389
-rw-r--r--drivers/staging/gs_fpgaboot/gs_fpgaboot.h56
-rw-r--r--drivers/staging/gs_fpgaboot/io.c122
-rw-r--r--drivers/staging/gs_fpgaboot/io.h90
-rw-r--r--drivers/staging/iio/Documentation/dac/max51741
-rw-r--r--drivers/staging/iio/Documentation/device.txt79
-rw-r--r--drivers/staging/iio/Documentation/light/sysfs-bus-iio-light-tsl25836
-rw-r--r--drivers/staging/iio/Documentation/light/sysfs-bus-iio-light-tsl2x7x13
-rw-r--r--drivers/staging/iio/Documentation/overview.txt57
-rw-r--r--drivers/staging/iio/Documentation/ring.txt47
-rw-r--r--drivers/staging/iio/Documentation/sysfs-bus-iio-ad719220
-rw-r--r--drivers/staging/iio/Documentation/sysfs-bus-iio-impedance-analyzer-ad593330
-rw-r--r--drivers/staging/iio/Documentation/sysfs-bus-iio-light107
-rw-r--r--drivers/staging/iio/Documentation/sysfs-bus-iio-light-tsl258320
-rw-r--r--drivers/staging/iio/Documentation/trigger.txt35
-rw-r--r--drivers/staging/iio/Kconfig36
-rw-r--r--drivers/staging/iio/Makefile15
-rw-r--r--drivers/staging/iio/TODO84
-rw-r--r--drivers/staging/iio/accel/Kconfig84
-rw-r--r--drivers/staging/iio/accel/Makefile24
-rw-r--r--drivers/staging/iio/accel/adis16201.h66
-rw-r--r--drivers/staging/iio/accel/adis16201_core.c248
-rw-r--r--drivers/staging/iio/accel/adis16203.c315
-rw-r--r--drivers/staging/iio/accel/adis16203.h59
-rw-r--r--drivers/staging/iio/accel/adis16203_core.c216
-rw-r--r--drivers/staging/iio/accel/adis16204.h68
-rw-r--r--drivers/staging/iio/accel/adis16204_core.c254
-rw-r--r--drivers/staging/iio/accel/adis16209.h105
-rw-r--r--drivers/staging/iio/accel/adis16209_core.c248
-rw-r--r--drivers/staging/iio/accel/adis16220.h140
-rw-r--r--drivers/staging/iio/accel/adis16220_core.c495
-rw-r--r--drivers/staging/iio/accel/adis16240.h129
-rw-r--r--drivers/staging/iio/accel/adis16240_core.c301
-rw-r--r--drivers/staging/iio/accel/lis3l02dq.h212
-rw-r--r--drivers/staging/iio/accel/lis3l02dq_core.c813
-rw-r--r--drivers/staging/iio/accel/lis3l02dq_ring.c426
-rw-r--r--drivers/staging/iio/accel/sca3000.h278
-rw-r--r--drivers/staging/iio/accel/sca3000_core.c1209
-rw-r--r--drivers/staging/iio/accel/sca3000_ring.c350
-rw-r--r--drivers/staging/iio/adc/Kconfig104
-rw-r--r--drivers/staging/iio/adc/Makefile13
-rw-r--r--drivers/staging/iio/adc/ad7192.c720
-rw-r--r--drivers/staging/iio/adc/ad7192.h47
-rw-r--r--drivers/staging/iio/adc/ad7280a.c985
-rw-r--r--drivers/staging/iio/adc/ad7280a.h38
-rw-r--r--drivers/staging/iio/adc/ad7606.h104
-rw-r--r--drivers/staging/iio/adc/ad7606_core.c603
-rw-r--r--drivers/staging/iio/adc/ad7606_par.c152
-rw-r--r--drivers/staging/iio/adc/ad7606_ring.c101
-rw-r--r--drivers/staging/iio/adc/ad7606_spi.c116
-rw-r--r--drivers/staging/iio/adc/ad7780.c276
-rw-r--r--drivers/staging/iio/adc/ad7780.h30
-rw-r--r--drivers/staging/iio/adc/ad7816.c205
-rw-r--r--drivers/staging/iio/adc/lpc32xx_adc.c215
-rw-r--r--drivers/staging/iio/adc/mxs-lradc.c1752
-rw-r--r--drivers/staging/iio/adc/spear_adc.c400
-rw-r--r--drivers/staging/iio/addac/Kconfig1
-rw-r--r--drivers/staging/iio/addac/Makefile1
-rw-r--r--drivers/staging/iio/addac/adt7316-i2c.c43
-rw-r--r--drivers/staging/iio/addac/adt7316-spi.c22
-rw-r--r--drivers/staging/iio/addac/adt7316.c869
-rw-r--r--drivers/staging/iio/addac/adt7316.h6
-rw-r--r--drivers/staging/iio/cdc/Kconfig36
-rw-r--r--drivers/staging/iio/cdc/Makefile7
-rw-r--r--drivers/staging/iio/cdc/ad7150.c679
-rw-r--r--drivers/staging/iio/cdc/ad7152.c543
-rw-r--r--drivers/staging/iio/cdc/ad7746.c791
-rw-r--r--drivers/staging/iio/cdc/ad7746.h29
-rw-r--r--drivers/staging/iio/frequency/Kconfig1
-rw-r--r--drivers/staging/iio/frequency/Makefile1
-rw-r--r--drivers/staging/iio/frequency/ad9832.c317
-rw-r--r--drivers/staging/iio/frequency/ad9832.h95
-rw-r--r--drivers/staging/iio/frequency/ad9834.c224
-rw-r--r--drivers/staging/iio/frequency/ad9834.h111
-rw-r--r--drivers/staging/iio/frequency/dds.h5
-rw-r--r--drivers/staging/iio/gyro/Kconfig16
-rw-r--r--drivers/staging/iio/gyro/Makefile6
-rw-r--r--drivers/staging/iio/gyro/adis16060_core.c246
-rw-r--r--drivers/staging/iio/iio_dummy_evgen.c238
-rw-r--r--drivers/staging/iio/iio_dummy_evgen.h13
-rw-r--r--drivers/staging/iio/iio_simple_dummy.c748
-rw-r--r--drivers/staging/iio/iio_simple_dummy.h128
-rw-r--r--drivers/staging/iio/iio_simple_dummy_buffer.c192
-rw-r--r--drivers/staging/iio/iio_simple_dummy_events.c267
-rw-r--r--drivers/staging/iio/impedance-analyzer/Kconfig3
-rw-r--r--drivers/staging/iio/impedance-analyzer/Makefile1
-rw-r--r--drivers/staging/iio/impedance-analyzer/ad5933.c396
-rw-r--r--drivers/staging/iio/impedance-analyzer/ad5933.h28
-rw-r--r--drivers/staging/iio/light/Kconfig43
-rw-r--r--drivers/staging/iio/light/Makefile8
-rw-r--r--drivers/staging/iio/light/isl29018.c849
-rw-r--r--drivers/staging/iio/light/isl29028.c561
-rw-r--r--drivers/staging/iio/light/tsl2583.c949
-rw-r--r--drivers/staging/iio/light/tsl2x7x.h100
-rw-r--r--drivers/staging/iio/light/tsl2x7x_core.c2030
-rw-r--r--drivers/staging/iio/magnetometer/Kconfig40
-rw-r--r--drivers/staging/iio/magnetometer/Makefile7
-rw-r--r--drivers/staging/iio/magnetometer/hmc5843.h66
-rw-r--r--drivers/staging/iio/magnetometer/hmc5843_core.c646
-rw-r--r--drivers/staging/iio/magnetometer/hmc5843_i2c.c104
-rw-r--r--drivers/staging/iio/magnetometer/hmc5843_spi.c100
-rw-r--r--drivers/staging/iio/meter/Kconfig78
-rw-r--r--drivers/staging/iio/meter/Makefile15
-rw-r--r--drivers/staging/iio/meter/ade7753.c547
-rw-r--r--drivers/staging/iio/meter/ade7753.h72
-rw-r--r--drivers/staging/iio/meter/ade7754.c588
-rw-r--r--drivers/staging/iio/meter/ade7754.h90
-rw-r--r--drivers/staging/iio/meter/ade7758.h181
-rw-r--r--drivers/staging/iio/meter/ade7758_core.c917
-rw-r--r--drivers/staging/iio/meter/ade7758_ring.c180
-rw-r--r--drivers/staging/iio/meter/ade7758_trigger.c109
-rw-r--r--drivers/staging/iio/meter/ade7759.c503
-rw-r--r--drivers/staging/iio/meter/ade7759.h53
-rw-r--r--drivers/staging/iio/meter/ade7854-i2c.c256
-rw-r--r--drivers/staging/iio/meter/ade7854-spi.c327
-rw-r--r--drivers/staging/iio/meter/ade7854.c578
-rw-r--r--drivers/staging/iio/meter/ade7854.h174
-rw-r--r--drivers/staging/iio/meter/meter.h400
-rw-r--r--drivers/staging/iio/resolver/Kconfig39
-rw-r--r--drivers/staging/iio/resolver/Makefile7
-rw-r--r--drivers/staging/iio/resolver/ad2s1200.c167
-rw-r--r--drivers/staging/iio/resolver/ad2s1210.c748
-rw-r--r--drivers/staging/iio/resolver/ad2s1210.h20
-rw-r--r--drivers/staging/iio/resolver/ad2s90.c120
-rw-r--r--drivers/staging/iio/ring_hw.h27
-rw-r--r--drivers/staging/iio/trigger/Kconfig29
-rw-r--r--drivers/staging/iio/trigger/Makefile6
-rw-r--r--drivers/staging/iio/trigger/iio-trig-bfin-timer.c293
-rw-r--r--drivers/staging/iio/trigger/iio-trig-bfin-timer.h24
-rw-r--r--drivers/staging/iio/trigger/iio-trig-periodic-rtc.c216
-rw-r--r--drivers/staging/lustre/Kconfig3
-rw-r--r--drivers/staging/lustre/Makefile2
-rw-r--r--drivers/staging/lustre/README.txt83
-rw-r--r--drivers/staging/lustre/TODO12
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/curproc.h97
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs.h169
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs_cpu.h219
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs_crypto.h199
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs_debug.h262
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs_fail.h173
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs_hash.h843
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs_ioctl.h214
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs_kernelcomm.h118
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs_prim.h74
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs_private.h462
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs_string.h105
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs_time.h82
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs_workitem.h110
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/linux/libcfs.h146
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/linux/linux-cpu.h82
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/linux/linux-mem.h80
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/linux/linux-time.h111
-rw-r--r--drivers/staging/lustre/include/linux/lnet/api.h210
-rw-r--r--drivers/staging/lustre/include/linux/lnet/lib-lnet.h688
-rw-r--r--drivers/staging/lustre/include/linux/lnet/lib-types.h611
-rw-r--r--drivers/staging/lustre/include/linux/lnet/lnet.h44
-rw-r--r--drivers/staging/lustre/include/linux/lnet/lnetctl.h79
-rw-r--r--drivers/staging/lustre/include/linux/lnet/lnetst.h521
-rw-r--r--drivers/staging/lustre/include/linux/lnet/nidstr.h77
-rw-r--r--drivers/staging/lustre/include/linux/lnet/socklnd.h99
-rw-r--r--drivers/staging/lustre/include/linux/lnet/types.h662
-rw-r--r--drivers/staging/lustre/lnet/Kconfig40
-rw-r--r--drivers/staging/lustre/lnet/Makefile1
-rw-r--r--drivers/staging/lustre/lnet/klnds/Makefile1
-rw-r--r--drivers/staging/lustre/lnet/klnds/o2iblnd/Makefile2
-rw-r--r--drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c2873
-rw-r--r--drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h984
-rw-r--r--drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c3479
-rw-r--r--drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_modparams.c224
-rw-r--r--drivers/staging/lustre/lnet/klnds/socklnd/Makefile3
-rw-r--r--drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c2879
-rw-r--r--drivers/staging/lustre/lnet/klnds/socklnd/socklnd.h689
-rw-r--r--drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c2633
-rw-r--r--drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib.c710
-rw-r--r--drivers/staging/lustre/lnet/klnds/socklnd/socklnd_modparams.c185
-rw-r--r--drivers/staging/lustre/lnet/klnds/socklnd/socklnd_proto.c794
-rw-r--r--drivers/staging/lustre/lnet/lnet/Makefile6
-rw-r--r--drivers/staging/lustre/lnet/lnet/acceptor.c493
-rw-r--r--drivers/staging/lustre/lnet/lnet/api-ni.c1867
-rw-r--r--drivers/staging/lustre/lnet/lnet/config.c1212
-rw-r--r--drivers/staging/lustre/lnet/lnet/lib-eq.c438
-rw-r--r--drivers/staging/lustre/lnet/lnet/lib-md.c454
-rw-r--r--drivers/staging/lustre/lnet/lnet/lib-me.c298
-rw-r--r--drivers/staging/lustre/lnet/lnet/lib-move.c2436
-rw-r--r--drivers/staging/lustre/lnet/lnet/lib-msg.c647
-rw-r--r--drivers/staging/lustre/lnet/lnet/lib-ptl.c935
-rw-r--r--drivers/staging/lustre/lnet/lnet/lib-socket.c594
-rw-r--r--drivers/staging/lustre/lnet/lnet/lo.c120
-rw-r--r--drivers/staging/lustre/lnet/lnet/module.c155
-rw-r--r--drivers/staging/lustre/lnet/lnet/peer.c338
-rw-r--r--drivers/staging/lustre/lnet/lnet/router.c1561
-rw-r--r--drivers/staging/lustre/lnet/lnet/router_proc.c920
-rw-r--r--drivers/staging/lustre/lnet/selftest/Makefile4
-rw-r--r--drivers/staging/lustre/lnet/selftest/brw_test.c508
-rw-r--r--drivers/staging/lustre/lnet/selftest/conctl.c929
-rw-r--r--drivers/staging/lustre/lnet/selftest/conrpc.c1399
-rw-r--r--drivers/staging/lustre/lnet/selftest/conrpc.h144
-rw-r--r--drivers/staging/lustre/lnet/selftest/console.c2096
-rw-r--r--drivers/staging/lustre/lnet/selftest/console.h235
-rw-r--r--drivers/staging/lustre/lnet/selftest/framework.c1803
-rw-r--r--drivers/staging/lustre/lnet/selftest/module.c159
-rw-r--r--drivers/staging/lustre/lnet/selftest/ping_test.c230
-rw-r--r--drivers/staging/lustre/lnet/selftest/rpc.c1672
-rw-r--r--drivers/staging/lustre/lnet/selftest/rpc.h295
-rw-r--r--drivers/staging/lustre/lnet/selftest/selftest.h629
-rw-r--r--drivers/staging/lustre/lnet/selftest/timer.c248
-rw-r--r--drivers/staging/lustre/lnet/selftest/timer.h53
-rw-r--r--drivers/staging/lustre/lustre/Kconfig62
-rw-r--r--drivers/staging/lustre/lustre/Makefile2
-rw-r--r--drivers/staging/lustre/lustre/fid/Makefile2
-rw-r--r--drivers/staging/lustre/lustre/fid/fid_internal.h52
-rw-r--r--drivers/staging/lustre/lustre/fid/fid_lib.c95
-rw-r--r--drivers/staging/lustre/lustre/fid/fid_request.c506
-rw-r--r--drivers/staging/lustre/lustre/fid/lproc_fid.c225
-rw-r--r--drivers/staging/lustre/lustre/fld/Makefile2
-rw-r--r--drivers/staging/lustre/lustre/fld/fld_cache.c545
-rw-r--r--drivers/staging/lustre/lustre/fld/fld_internal.h187
-rw-r--r--drivers/staging/lustre/lustre/fld/fld_request.c509
-rw-r--r--drivers/staging/lustre/lustre/fld/lproc_fld.c166
-rw-r--r--drivers/staging/lustre/lustre/include/cl_object.h3257
-rw-r--r--drivers/staging/lustre/lustre/include/interval_tree.h124
-rw-r--r--drivers/staging/lustre/lustre/include/lclient.h432
-rw-r--r--drivers/staging/lustre/lustre/include/linux/lustre_compat25.h82
-rw-r--r--drivers/staging/lustre/lustre/include/linux/lustre_lite.h97
-rw-r--r--drivers/staging/lustre/lustre/include/linux/lustre_patchless_compat.h71
-rw-r--r--drivers/staging/lustre/lustre/include/linux/lustre_user.h70
-rw-r--r--drivers/staging/lustre/lustre/include/linux/obd.h125
-rw-r--r--drivers/staging/lustre/lustre/include/lprocfs_status.h782
-rw-r--r--drivers/staging/lustre/lustre/include/lu_object.h1312
-rw-r--r--drivers/staging/lustre/lustre/include/lu_ref.h182
-rw-r--r--drivers/staging/lustre/lustre/include/lustre/libiam.h145
-rw-r--r--drivers/staging/lustre/lustre/include/lustre/ll_fiemap.h121
-rw-r--r--drivers/staging/lustre/lustre/include/lustre/lustre_build_version.h2
-rw-r--r--drivers/staging/lustre/lustre/include/lustre/lustre_errno.h215
-rw-r--r--drivers/staging/lustre/lustre/include/lustre/lustre_idl.h3696
-rw-r--r--drivers/staging/lustre/lustre/include/lustre/lustre_lfsck_user.h95
-rw-r--r--drivers/staging/lustre/lustre/include/lustre/lustre_user.h1177
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_acl.h49
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_cfg.h293
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_debug.h55
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_disk.h403
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_dlm.h1443
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_dlm_flags.h467
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_eacl.h91
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_export.h360
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_fid.h726
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_fld.h152
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_ha.h64
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_handles.h96
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_import.h382
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_intent.h62
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_lib.h660
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_lite.h150
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_log.h475
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_mdc.h191
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_mds.h76
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_net.h2893
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_param.h115
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_req_layout.h334
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_sec.h1119
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_ver.h26
-rw-r--r--drivers/staging/lustre/lustre/include/obd.h1371
-rw-r--r--drivers/staging/lustre/lustre/include/obd_cache.h39
-rw-r--r--drivers/staging/lustre/lustre/include/obd_cksum.h176
-rw-r--r--drivers/staging/lustre/lustre/include/obd_class.h1829
-rw-r--r--drivers/staging/lustre/lustre/include/obd_support.h602
-rw-r--r--drivers/staging/lustre/lustre/lclient/glimpse.c269
-rw-r--r--drivers/staging/lustre/lustre/lclient/lcommon_cl.c1277
-rw-r--r--drivers/staging/lustre/lustre/lclient/lcommon_misc.c199
-rw-r--r--drivers/staging/lustre/lustre/ldlm/interval_tree.c751
-rw-r--r--drivers/staging/lustre/lustre/ldlm/l_lock.c76
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_extent.c241
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_flock.c859
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_inodebits.c74
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_internal.h360
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_lib.c868
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_lock.c2322
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_lockd.c1245
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_plain.c72
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_pool.c1495
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_request.c2291
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_resource.c1464
-rw-r--r--drivers/staging/lustre/lustre/libcfs/Makefile18
-rw-r--r--drivers/staging/lustre/lustre/libcfs/debug.c575
-rw-r--r--drivers/staging/lustre/lustre/libcfs/fail.c138
-rw-r--r--drivers/staging/lustre/lustre/libcfs/hash.c2102
-rw-r--r--drivers/staging/lustre/lustre/libcfs/kernel_user_comm.c240
-rw-r--r--drivers/staging/lustre/lustre/libcfs/libcfs_cpu.c224
-rw-r--r--drivers/staging/lustre/lustre/libcfs/libcfs_lock.c189
-rw-r--r--drivers/staging/lustre/lustre/libcfs/libcfs_mem.c202
-rw-r--r--drivers/staging/lustre/lustre/libcfs/libcfs_string.c562
-rw-r--r--drivers/staging/lustre/lustre/libcfs/linux/linux-cpu.c1057
-rw-r--r--drivers/staging/lustre/lustre/libcfs/linux/linux-crypto-adler.c141
-rw-r--r--drivers/staging/lustre/lustre/libcfs/linux/linux-crypto.c290
-rw-r--r--drivers/staging/lustre/lustre/libcfs/linux/linux-crypto.h29
-rw-r--r--drivers/staging/lustre/lustre/libcfs/linux/linux-curproc.c111
-rw-r--r--drivers/staging/lustre/lustre/libcfs/linux/linux-debug.c200
-rw-r--r--drivers/staging/lustre/lustre/libcfs/linux/linux-mem.c59
-rw-r--r--drivers/staging/lustre/lustre/libcfs/linux/linux-module.c181
-rw-r--r--drivers/staging/lustre/lustre/libcfs/linux/linux-prim.c147
-rw-r--r--drivers/staging/lustre/lustre/libcfs/linux/linux-tracefile.c272
-rw-r--r--drivers/staging/lustre/lustre/libcfs/linux/linux-tracefile.h48
-rw-r--r--drivers/staging/lustre/lustre/libcfs/module.c784
-rw-r--r--drivers/staging/lustre/lustre/libcfs/nidstrings.c842
-rw-r--r--drivers/staging/lustre/lustre/libcfs/prng.c139
-rw-r--r--drivers/staging/lustre/lustre/libcfs/tracefile.c1184
-rw-r--r--drivers/staging/lustre/lustre/libcfs/tracefile.h339
-rw-r--r--drivers/staging/lustre/lustre/libcfs/workitem.c479
-rw-r--r--drivers/staging/lustre/lustre/llite/Makefile10
-rw-r--r--drivers/staging/lustre/lustre/llite/dcache.c362
-rw-r--r--drivers/staging/lustre/lustre/llite/dir.c1928
-rw-r--r--drivers/staging/lustre/lustre/llite/file.c3589
-rw-r--r--drivers/staging/lustre/lustre/llite/llite_close.c392
-rw-r--r--drivers/staging/lustre/lustre/llite/llite_internal.h1479
-rw-r--r--drivers/staging/lustre/lustre/llite/llite_lib.c2315
-rw-r--r--drivers/staging/lustre/lustre/llite/llite_mmap.c492
-rw-r--r--drivers/staging/lustre/lustre/llite/llite_nfs.c332
-rw-r--r--drivers/staging/lustre/lustre/llite/llite_rmtacl.c300
-rw-r--r--drivers/staging/lustre/lustre/llite/lloop.c880
-rw-r--r--drivers/staging/lustre/lustre/llite/lproc_llite.c1502
-rw-r--r--drivers/staging/lustre/lustre/llite/namei.c1174
-rw-r--r--drivers/staging/lustre/lustre/llite/remote_perm.c327
-rw-r--r--drivers/staging/lustre/lustre/llite/rw.c1281
-rw-r--r--drivers/staging/lustre/lustre/llite/rw26.c533
-rw-r--r--drivers/staging/lustre/lustre/llite/statahead.c1708
-rw-r--r--drivers/staging/lustre/lustre/llite/super25.c211
-rw-r--r--drivers/staging/lustre/lustre/llite/symlink.c160
-rw-r--r--drivers/staging/lustre/lustre/llite/vvp_dev.c548
-rw-r--r--drivers/staging/lustre/lustre/llite/vvp_internal.h62
-rw-r--r--drivers/staging/lustre/lustre/llite/vvp_io.c1208
-rw-r--r--drivers/staging/lustre/lustre/llite/vvp_lock.c85
-rw-r--r--drivers/staging/lustre/lustre/llite/vvp_object.c201
-rw-r--r--drivers/staging/lustre/lustre/llite/vvp_page.c558
-rw-r--r--drivers/staging/lustre/lustre/llite/xattr.c616
-rw-r--r--drivers/staging/lustre/lustre/llite/xattr_cache.c538
-rw-r--r--drivers/staging/lustre/lustre/lmv/Makefile2
-rw-r--r--drivers/staging/lustre/lustre/lmv/lmv_fld.c83
-rw-r--r--drivers/staging/lustre/lustre/lmv/lmv_intent.c323
-rw-r--r--drivers/staging/lustre/lustre/lmv/lmv_internal.h151
-rw-r--r--drivers/staging/lustre/lustre/lmv/lmv_obd.c2820
-rw-r--r--drivers/staging/lustre/lustre/lmv/lproc_lmv.c230
-rw-r--r--drivers/staging/lustre/lustre/lov/Makefile5
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_cl_internal.h839
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_dev.c529
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_ea.c362
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_internal.h275
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_io.c993
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_lock.c1197
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_merge.c187
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_obd.c2361
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_object.c1002
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_offset.c264
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_pack.c512
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_page.c232
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_pool.c672
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_request.c759
-rw-r--r--drivers/staging/lustre/lustre/lov/lovsub_dev.c209
-rw-r--r--drivers/staging/lustre/lustre/lov/lovsub_io.c55
-rw-r--r--drivers/staging/lustre/lustre/lov/lovsub_lock.c466
-rw-r--r--drivers/staging/lustre/lustre/lov/lovsub_object.c164
-rw-r--r--drivers/staging/lustre/lustre/lov/lovsub_page.c71
-rw-r--r--drivers/staging/lustre/lustre/lov/lproc_lov.c297
-rw-r--r--drivers/staging/lustre/lustre/mdc/Makefile2
-rw-r--r--drivers/staging/lustre/lustre/mdc/lproc_mdc.c218
-rw-r--r--drivers/staging/lustre/lustre/mdc/mdc_internal.h160
-rw-r--r--drivers/staging/lustre/lustre/mdc/mdc_lib.c549
-rw-r--r--drivers/staging/lustre/lustre/mdc/mdc_locks.c1282
-rw-r--r--drivers/staging/lustre/lustre/mdc/mdc_reint.c463
-rw-r--r--drivers/staging/lustre/lustre/mdc/mdc_request.c2561
-rw-r--r--drivers/staging/lustre/lustre/mgc/Makefile2
-rw-r--r--drivers/staging/lustre/lustre/mgc/lproc_mgc.c71
-rw-r--r--drivers/staging/lustre/lustre/mgc/mgc_internal.h62
-rw-r--r--drivers/staging/lustre/lustre/mgc/mgc_request.c1728
-rw-r--r--drivers/staging/lustre/lustre/obdclass/Makefile9
-rw-r--r--drivers/staging/lustre/lustre/obdclass/acl.c425
-rw-r--r--drivers/staging/lustre/lustre/obdclass/cl_internal.h121
-rw-r--r--drivers/staging/lustre/lustre/obdclass/cl_io.c1605
-rw-r--r--drivers/staging/lustre/lustre/obdclass/cl_lock.c2213
-rw-r--r--drivers/staging/lustre/lustre/obdclass/cl_object.c1097
-rw-r--r--drivers/staging/lustre/lustre/obdclass/cl_page.c1534
-rw-r--r--drivers/staging/lustre/lustre/obdclass/class_obd.c584
-rw-r--r--drivers/staging/lustre/lustre/obdclass/debug.c99
-rw-r--r--drivers/staging/lustre/lustre/obdclass/genops.c1438
-rw-r--r--drivers/staging/lustre/lustre/obdclass/linux/linux-module.c468
-rw-r--r--drivers/staging/lustre/lustre/obdclass/linux/linux-obdo.c85
-rw-r--r--drivers/staging/lustre/lustre/obdclass/linux/linux-sysctl.c164
-rw-r--r--drivers/staging/lustre/lustre/obdclass/llog.c462
-rw-r--r--drivers/staging/lustre/lustre/obdclass/llog_cat.c239
-rw-r--r--drivers/staging/lustre/lustre/obdclass/llog_internal.h80
-rw-r--r--drivers/staging/lustre/lustre/obdclass/llog_obd.c232
-rw-r--r--drivers/staging/lustre/lustre/obdclass/llog_swab.c415
-rw-r--r--drivers/staging/lustre/lustre/obdclass/lprocfs_counters.c130
-rw-r--r--drivers/staging/lustre/lustre/obdclass/lprocfs_status.c1492
-rw-r--r--drivers/staging/lustre/lustre/obdclass/lu_object.c1954
-rw-r--r--drivers/staging/lustre/lustre/obdclass/lu_ref.c50
-rw-r--r--drivers/staging/lustre/lustre/obdclass/lustre_handles.c245
-rw-r--r--drivers/staging/lustre/lustre/obdclass/lustre_peer.c217
-rw-r--r--drivers/staging/lustre/lustre/obdclass/obd_config.c1588
-rw-r--r--drivers/staging/lustre/lustre/obdclass/obd_mount.c1206
-rw-r--r--drivers/staging/lustre/lustre/obdclass/obdo.c192
-rw-r--r--drivers/staging/lustre/lustre/obdclass/statfs_pack.c61
-rw-r--r--drivers/staging/lustre/lustre/obdclass/uuid.c50
-rw-r--r--drivers/staging/lustre/lustre/obdecho/Makefile2
-rw-r--r--drivers/staging/lustre/lustre/obdecho/echo_client.c2180
-rw-r--r--drivers/staging/lustre/lustre/obdecho/echo_internal.h47
-rw-r--r--drivers/staging/lustre/lustre/osc/Makefile3
-rw-r--r--drivers/staging/lustre/lustre/osc/lproc_osc.c783
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_cache.c2936
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_cl_internal.h685
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_dev.c262
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_internal.h199
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_io.c817
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_lock.c1612
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_object.c271
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_page.c917
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_quota.c327
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_request.c3367
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/Makefile19
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/client.c3051
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/connection.c241
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/errno.c380
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/events.c585
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/import.c1621
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/layout.c2316
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/llog_client.c330
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/llog_net.c72
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/lproc_ptlrpc.c1311
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/niobuf.c730
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/nrs.c1632
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/nrs_fifo.c272
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/pack_generic.c2291
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/pers.c75
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/pinger.c518
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/ptlrpc_internal.h300
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/ptlrpc_module.c170
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/ptlrpcd.c917
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/recover.c379
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/sec.c2341
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c552
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/sec_config.c851
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/sec_gc.c237
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/sec_lproc.c196
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/sec_null.c460
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/sec_plain.c1013
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/service.c2781
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/wiretest.c4490
-rw-r--r--drivers/staging/lustre/sysfs-fs-lustre646
-rw-r--r--drivers/staging/media/Kconfig54
-rw-r--r--drivers/staging/media/Makefile19
-rw-r--r--drivers/staging/media/atomisp/Kconfig32
-rw-r--r--drivers/staging/media/atomisp/Makefile302
-rw-r--r--drivers/staging/media/atomisp/TODO71
-rw-r--r--drivers/staging/media/atomisp/i2c/Kconfig28
-rw-r--r--drivers/staging/media/atomisp/i2c/Makefile7
-rw-r--r--drivers/staging/media/atomisp/i2c/atomisp-gc2235.c870
-rw-r--r--drivers/staging/media/atomisp/i2c/atomisp-ov2722.c1015
-rw-r--r--drivers/staging/media/atomisp/i2c/gc2235.h633
-rw-r--r--drivers/staging/media/atomisp/i2c/ov2722.h1226
-rw-r--r--drivers/staging/media/atomisp/include/hmm/hmm.h70
-rw-r--r--drivers/staging/media/atomisp/include/hmm/hmm_bo.h261
-rw-r--r--drivers/staging/media/atomisp/include/hmm/hmm_common.h60
-rw-r--r--drivers/staging/media/atomisp/include/linux/atomisp.h897
-rw-r--r--drivers/staging/media/atomisp/include/linux/atomisp_gmin_platform.h22
-rw-r--r--drivers/staging/media/atomisp/include/linux/atomisp_platform.h181
-rw-r--r--drivers/staging/media/atomisp/include/mmu/isp_mmu.h158
-rw-r--r--drivers/staging/media/atomisp/include/mmu/sh_mmu_mrfld.h14
-rw-r--r--drivers/staging/media/atomisp/notes.txt43
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp-regs.h185
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_cmd.c4663
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_cmd.h301
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_common.h59
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_compat.h381
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_compat_css20.c3397
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_compat_css20.h158
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_csi2.c359
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_csi2.h51
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_csi2_bridge.c554
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_dfs_tables.h30
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_fops.c568
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_fops.h23
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_gmin_platform.c1318
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_internal.h217
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_ioctl.c1577
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_ioctl.h36
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_subdev.c919
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_subdev.h342
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_tables.h177
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_trace_event.h117
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_v4l2.c1512
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_v4l2.h24
-rw-r--r--drivers/staging/media/atomisp/pci/base/circbuf/interface/ia_css_circbuf.h368
-rw-r--r--drivers/staging/media/atomisp/pci/base/circbuf/interface/ia_css_circbuf_comm.h56
-rw-r--r--drivers/staging/media/atomisp/pci/base/circbuf/interface/ia_css_circbuf_desc.h165
-rw-r--r--drivers/staging/media/atomisp/pci/base/circbuf/src/circbuf.c312
-rw-r--r--drivers/staging/media/atomisp/pci/base/refcount/interface/ia_css_refcount.h76
-rw-r--r--drivers/staging/media/atomisp/pci/base/refcount/src/refcount.c268
-rw-r--r--drivers/staging/media/atomisp/pci/bits.h96
-rw-r--r--drivers/staging/media/atomisp/pci/camera/pipe/interface/ia_css_pipe_binarydesc.h286
-rw-r--r--drivers/staging/media/atomisp/pci/camera/pipe/interface/ia_css_pipe_stagedesc.h38
-rw-r--r--drivers/staging/media/atomisp/pci/camera/pipe/interface/ia_css_pipe_util.h31
-rw-r--r--drivers/staging/media/atomisp/pci/camera/pipe/src/pipe_binarydesc.c835
-rw-r--r--drivers/staging/media/atomisp/pci/camera/pipe/src/pipe_stagedesc.c89
-rw-r--r--drivers/staging/media/atomisp/pci/camera/pipe/src/pipe_util.c42
-rw-r--r--drivers/staging/media/atomisp/pci/camera/util/interface/ia_css_util.h123
-rw-r--r--drivers/staging/media/atomisp/pci/camera/util/src/util.c185
-rw-r--r--drivers/staging/media/atomisp/pci/cell_params.h32
-rw-r--r--drivers/staging/media/atomisp/pci/css_2401_system/csi_rx_global.h55
-rw-r--r--drivers/staging/media/atomisp/pci/css_2401_system/host/csi_rx.c33
-rw-r--r--drivers/staging/media/atomisp/pci/css_2401_system/host/csi_rx_local.h54
-rw-r--r--drivers/staging/media/atomisp/pci/css_2401_system/host/csi_rx_private.h297
-rw-r--r--drivers/staging/media/atomisp/pci/css_2401_system/host/ibuf_ctrl.c15
-rw-r--r--drivers/staging/media/atomisp/pci/css_2401_system/host/ibuf_ctrl_local.h51
-rw-r--r--drivers/staging/media/atomisp/pci/css_2401_system/host/isys_dma.c26
-rw-r--r--drivers/staging/media/atomisp/pci/css_2401_system/host/isys_dma_private.h49
-rw-r--r--drivers/staging/media/atomisp/pci/css_2401_system/host/isys_irq.c34
-rw-r--r--drivers/staging/media/atomisp/pci/css_2401_system/host/isys_irq_local.h24
-rw-r--r--drivers/staging/media/atomisp/pci/css_2401_system/host/isys_irq_private.h96
-rw-r--r--drivers/staging/media/atomisp/pci/css_2401_system/host/isys_stream2mmio.c13
-rw-r--r--drivers/staging/media/atomisp/pci/css_2401_system/host/isys_stream2mmio_local.h28
-rw-r--r--drivers/staging/media/atomisp/pci/css_2401_system/host/isys_stream2mmio_private.h159
-rw-r--r--drivers/staging/media/atomisp/pci/css_2401_system/host/pixelgen_local.h42
-rw-r--r--drivers/staging/media/atomisp/pci/css_2401_system/host/pixelgen_private.h175
-rw-r--r--drivers/staging/media/atomisp/pci/css_2401_system/hrt/PixelGen_SysBlock_defs.h105
-rw-r--r--drivers/staging/media/atomisp/pci/css_2401_system/hrt/ibuf_cntrl_defs.h126
-rw-r--r--drivers/staging/media/atomisp/pci/css_2401_system/hrt/mipi_backend_common_defs.h197
-rw-r--r--drivers/staging/media/atomisp/pci/css_2401_system/hrt/mipi_backend_defs.h200
-rw-r--r--drivers/staging/media/atomisp/pci/css_2401_system/hrt/rx_csi_defs.h161
-rw-r--r--drivers/staging/media/atomisp/pci/css_2401_system/hrt/stream2mmio_defs.h60
-rw-r--r--drivers/staging/media/atomisp/pci/css_2401_system/ibuf_ctrl_global.h71
-rw-r--r--drivers/staging/media/atomisp/pci/css_2401_system/isys_dma_global.h82
-rw-r--r--drivers/staging/media/atomisp/pci/css_2401_system/isys_irq_global.h25
-rw-r--r--drivers/staging/media/atomisp/pci/css_2401_system/isys_stream2mmio_global.h31
-rw-r--r--drivers/staging/media/atomisp/pci/css_2401_system/pixelgen_global.h82
-rw-r--r--drivers/staging/media/atomisp/pci/css_receiver_2400_common_defs.h190
-rw-r--r--drivers/staging/media/atomisp/pci/css_receiver_2400_defs.h248
-rw-r--r--drivers/staging/media/atomisp/pci/css_trace.h269
-rw-r--r--drivers/staging/media/atomisp/pci/dma_v2_defs.h191
-rw-r--r--drivers/staging/media/atomisp/pci/gdc_v2_defs.h155
-rw-r--r--drivers/staging/media/atomisp/pci/gp_timer_defs.h28
-rw-r--r--drivers/staging/media/atomisp/pci/gpio_block_defs.h16
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/debug_global.h66
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/dma_global.h245
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/event_fifo_global.h12
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/fifo_monitor_global.h24
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/gdc_global.h81
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/gp_device_global.h76
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/gp_timer_global.h25
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/gpio_global.h14
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/hmem_global.h37
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/debug.c63
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/debug_local.h12
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/debug_private.h116
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/dma.c25
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/dma_local.h82
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/dma_private.h33
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/event_fifo.c11
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/event_fifo_local.h53
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/event_fifo_private.h69
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/fifo_monitor.c561
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/fifo_monitor_local.h91
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/fifo_monitor_private.h72
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/gdc.c106
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/gdc_local.h12
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/gdc_private.h12
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/gp_device.c100
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/gp_device_local.h135
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/gp_device_private.h38
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/gp_timer.c62
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/gp_timer_local.h35
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/gp_timer_private.h14
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/gpio_private.h33
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/hmem.c11
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/hmem_local.h12
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/hmem_private.h22
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/input_formatter.c235
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/input_formatter_local.h109
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/input_formatter_private.h38
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/input_system.c1297
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/irq.c419
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/irq_local.h109
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/irq_private.h36
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/isp.c61
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/isp_local.h16
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/isp_private.h152
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/mmu.c38
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/mmu_local.h12
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/sp.c26
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/sp_local.h67
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/sp_private.h158
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/timed_ctrl.c66
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/timed_ctrl_local.h12
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/timed_ctrl_private.h26
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/vamem_local.h12
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/vmem.c275
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/vmem_local.h51
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/vmem_private.h12
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/input_formatter_global.h106
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/irq_global.h29
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/isp_global.h91
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/mmu_global.h14
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/sp_global.h75
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/timed_ctrl_global.h46
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/vamem_global.h26
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/vmem_global.h20
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_defs.h403
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/assert_support.h44
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/bitop_support.h16
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/csi_rx.h34
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/debug.h38
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/device_access/device_access.h170
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/dma.h38
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/event_fifo.h37
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/fifo_monitor.h37
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/gdc_device.h39
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/gp_device.h37
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/gp_timer.h37
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/hmem.h37
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/host/csi_rx_public.h125
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/host/debug_public.h91
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/host/dma_public.h51
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/host/event_fifo_public.h71
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/host/fifo_monitor_public.h102
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/host/gdc_public.h51
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/host/gp_device_public.h50
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/host/gp_timer_public.h25
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/host/hmem_public.h24
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/host/input_formatter_public.h107
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/host/irq_public.h164
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/host/isp_public.h164
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/host/isys_dma_public.h28
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/host/isys_irq_public.h30
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/host/isys_stream2mmio_public.h93
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/host/mmu_public.h86
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/host/pixelgen_public.h69
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/host/sp_public.h199
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/host/tag_public.h32
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/host/timed_ctrl_public.h51
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/host/vamem_public.h10
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/host/vmem_public.h12
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/input_formatter.h37
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/input_system.h37
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/irq.h37
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/isp.h37
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/isys_irq.h17
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/isys_stream2mmio.h38
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/math_support.h23
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/misc_support.h18
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/mmu_device.h31
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/pixelgen.h38
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/platform_support.h25
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/print_support.h33
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/queue.h37
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/resource.h38
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/sp.h37
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/tag.h36
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/timed_ctrl.h37
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/type_support.h33
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/vamem.h28
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/vmem.h37
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_shared/host/queue_local.h12
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_shared/host/queue_private.h10
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_shared/host/tag.c83
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_shared/host/tag_local.h14
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_shared/host/tag_private.h10
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_shared/queue_global.h10
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_shared/sw_event_global.h27
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_shared/tag_global.h48
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_streaming_to_mipi_types_hrt.h18
-rw-r--r--drivers/staging/media/atomisp/pci/hive_types.h80
-rw-r--r--drivers/staging/media/atomisp/pci/hmm/hmm.c499
-rw-r--r--drivers/staging/media/atomisp/pci/hmm/hmm_bo.c1075
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css.h48
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_3a.h186
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_acc_types.h460
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_buffer.h77
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_control.h112
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_device_access.c87
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_device_access.h52
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_dvs.h293
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_env.h87
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_err.h34
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_event_public.h174
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_firmware.h57
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_frac.h29
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_frame_format.h93
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_frame_public.h253
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_host_data.h37
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_input_port.h52
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_irq.h225
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_isp_configs.c312
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_isp_configs.h110
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_isp_params.c3335
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_isp_params.h383
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_isp_states.c215
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_isp_states.h65
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_metadata.h68
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_mipi.h39
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_mmu.h24
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_mmu_private.h21
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_morph.h31
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_pipe.h173
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_pipe_public.h464
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_prbs.h45
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_properties.h33
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_shading.h32
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_stream.h99
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_stream_format.h21
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_stream_public.h553
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_timer.h61
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_types.h596
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_version.h32
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_version_data.h19
-rw-r--r--drivers/staging/media/atomisp/pci/if_defs.h14
-rw-r--r--drivers/staging/media/atomisp/pci/input_formatter_subsystem_defs.h45
-rw-r--r--drivers/staging/media/atomisp/pci/input_selector_defs.h80
-rw-r--r--drivers/staging/media/atomisp/pci/input_switch_2400_defs.h22
-rw-r--r--drivers/staging/media/atomisp/pci/input_system_ctrl_defs.h235
-rw-r--r--drivers/staging/media/atomisp/pci/input_system_defs.h118
-rw-r--r--drivers/staging/media/atomisp/pci/input_system_global.h30
-rw-r--r--drivers/staging/media/atomisp/pci/input_system_local.h142
-rw-r--r--drivers/staging/media/atomisp/pci/input_system_private.h8
-rw-r--r--drivers/staging/media/atomisp/pci/input_system_public.h7
-rw-r--r--drivers/staging/media/atomisp/pci/irq_controller_defs.h20
-rw-r--r--drivers/staging/media/atomisp/pci/irq_types_hrt.h60
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/aa/aa_2/ia_css_aa2.host.c23
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/aa/aa_2/ia_css_aa2.host.h19
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/aa/aa_2/ia_css_aa2_param.h16
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/aa/aa_2/ia_css_aa2_types.h38
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/anr/anr_1.0/ia_css_anr.host.c53
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/anr/anr_1.0/ia_css_anr.host.h31
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/anr/anr_1.0/ia_css_anr_param.h17
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/anr/anr_1.0/ia_css_anr_types.h29
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/anr/anr_2/ia_css_anr2.host.c38
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/anr/anr_2/ia_css_anr2.host.h27
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/anr/anr_2/ia_css_anr2_param.h19
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/anr/anr_2/ia_css_anr2_table.host.c47
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/anr/anr_2/ia_css_anr2_table.host.h14
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/anr/anr_2/ia_css_anr2_types.h23
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/bh/bh_2/ia_css_bh.host.c55
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/bh/bh_2/ia_css_bh.host.h24
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/bh/bh_2/ia_css_bh_param.h32
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/bh/bh_2/ia_css_bh_types.h27
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/bnlm/ia_css_bnlm.host.c188
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/bnlm/ia_css_bnlm.host.h32
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/bnlm/ia_css_bnlm_param.h56
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/bnlm/ia_css_bnlm_types.h98
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/bnr/bnr2_2/ia_css_bnr2_2.host.c123
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/bnr/bnr2_2/ia_css_bnr2_2.host.h27
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/bnr/bnr2_2/ia_css_bnr2_2_param.h39
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/bnr/bnr2_2/ia_css_bnr2_2_types.h63
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/bnr/bnr_1.0/ia_css_bnr.host.c57
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/bnr/bnr_1.0/ia_css_bnr.host.h26
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/bnr/bnr_1.0/ia_css_bnr_param.h22
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/cnr/cnr_1.0/ia_css_cnr.host.c20
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/cnr/cnr_1.0/ia_css_cnr.host.h17
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/cnr/cnr_1.0/ia_css_cnr_param.h16
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/cnr/cnr_2/ia_css_cnr2.host.c65
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/cnr/cnr_2/ia_css_cnr2.host.h35
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/cnr/cnr_2/ia_css_cnr2_param.h24
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/cnr/cnr_2/ia_css_cnr2_types.h46
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/conversion/conversion_1.0/ia_css_conversion.host.c28
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/conversion/conversion_1.0/ia_css_conversion.host.h21
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/conversion/conversion_1.0/ia_css_conversion_param.h20
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/conversion/conversion_1.0/ia_css_conversion_types.h24
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/copy_output/copy_output_1.0/ia_css_copy_output.host.c36
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/copy_output/copy_output_1.0/ia_css_copy_output.host.h24
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/copy_output/copy_output_1.0/ia_css_copy_output_param.h18
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/crop/crop_1.0/ia_css_crop.host.c58
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/crop/crop_1.0/ia_css_crop.host.h29
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/crop/crop_1.0/ia_css_crop_param.h24
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/crop/crop_1.0/ia_css_crop_types.h26
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/csc/csc_1.0/ia_css_csc.host.c119
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/csc/csc_1.0/ia_css_csc.host.h46
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/csc/csc_1.0/ia_css_csc_param.h25
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/csc/csc_1.0/ia_css_csc_types.h70
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ctc/ctc1_5/ia_css_ctc1_5.host.c113
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ctc/ctc1_5/ia_css_ctc1_5.host.h25
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ctc/ctc1_5/ia_css_ctc1_5_param.h38
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ctc/ctc2/ia_css_ctc2.host.c149
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ctc/ctc2/ia_css_ctc2.host.h25
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ctc/ctc2/ia_css_ctc2_param.h40
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ctc/ctc2/ia_css_ctc2_types.h46
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ctc/ctc_1.0/ia_css_ctc.host.c50
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ctc/ctc_1.0/ia_css_ctc.host.h28
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ctc/ctc_1.0/ia_css_ctc_param.h29
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ctc/ctc_1.0/ia_css_ctc_table.host.c62
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ctc/ctc_1.0/ia_css_ctc_table.host.h16
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ctc/ctc_1.0/ia_css_ctc_types.h102
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/de/de_1.0/ia_css_de.host.c71
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/de/de_1.0/ia_css_de.host.h36
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/de/de_1.0/ia_css_de_param.h19
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/de/de_1.0/ia_css_de_types.h34
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/de/de_2/ia_css_de2.host.c45
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/de/de_2/ia_css_de2.host.h30
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/de/de_2/ia_css_de2_param.h22
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/de/de_2/ia_css_de2_types.h33
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/dp/dp_1.0/ia_css_dp.host.c123
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/dp/dp_1.0/ia_css_dp.host.h39
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/dp/dp_1.0/ia_css_dp_param.h28
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/dp/dp_1.0/ia_css_dp_types.h40
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/dpc2/ia_css_dpc2.host.c57
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/dpc2/ia_css_dpc2.host.h31
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/dpc2/ia_css_dpc2_param.h45
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/dpc2/ia_css_dpc2_types.h51
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/dvs/dvs_1.0/ia_css_dvs.host.c289
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/dvs/dvs_1.0/ia_css_dvs.host.h50
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/dvs/dvs_1.0/ia_css_dvs_param.h24
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/dvs/dvs_1.0/ia_css_dvs_types.h21
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/eed1_8/ia_css_eed1_8.host.c326
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/eed1_8/ia_css_eed1_8.host.h37
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/eed1_8/ia_css_eed1_8_param.h147
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/eed1_8/ia_css_eed1_8_types.h79
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/fc/fc_1.0/ia_css_formats.host.c55
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/fc/fc_1.0/ia_css_formats.host.h36
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/fc/fc_1.0/ia_css_formats_param.h17
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/fc/fc_1.0/ia_css_formats_types.h30
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/fixedbds/fixedbds_1.0/ia_css_fixedbds_param.h24
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/fixedbds/fixedbds_1.0/ia_css_fixedbds_types.h16
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/fpn/fpn_1.0/ia_css_fpn.host.c85
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/fpn/fpn_1.0/ia_css_fpn.host.h32
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/fpn/fpn_1.0/ia_css_fpn_param.h27
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/fpn/fpn_1.0/ia_css_fpn_types.h44
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/gc/gc_1.0/ia_css_gc.host.c109
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/gc/gc_1.0/ia_css_gc.host.h57
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/gc/gc_1.0/ia_css_gc_param.h53
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/gc/gc_1.0/ia_css_gc_table.host.c62
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/gc/gc_1.0/ia_css_gc_table.host.h16
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/gc/gc_1.0/ia_css_gc_types.h89
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/gc/gc_2/ia_css_gc2.host.c101
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/gc/gc_2/ia_css_gc2.host.h71
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/gc/gc_2/ia_css_gc2_param.h35
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/gc/gc_2/ia_css_gc2_table.host.c71
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/gc/gc_2/ia_css_gc2_table.host.h18
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/gc/gc_2/ia_css_gc2_types.h46
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/hdr/ia_css_hdr.host.c32
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/hdr/ia_css_hdr.host.h22
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/hdr/ia_css_hdr_param.h50
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/hdr/ia_css_hdr_types.h61
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ipu2_io_ls/bayer_io_ls/ia_css_bayer_io.host.c88
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ipu2_io_ls/bayer_io_ls/ia_css_bayer_io.host.h18
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ipu2_io_ls/bayer_io_ls/ia_css_bayer_io_param.h12
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ipu2_io_ls/bayer_io_ls/ia_css_bayer_io_types.h12
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ipu2_io_ls/common/ia_css_common_io_param.h13
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ipu2_io_ls/common/ia_css_common_io_types.h22
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ipu2_io_ls/yuv444_io_ls/ia_css_yuv444_io.host.c91
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ipu2_io_ls/yuv444_io_ls/ia_css_yuv444_io.host.h19
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ipu2_io_ls/yuv444_io_ls/ia_css_yuv444_io_param.h13
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ipu2_io_ls/yuv444_io_ls/ia_css_yuv444_io_types.h13
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/iterator/iterator_1.0/ia_css_iterator.host.c68
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/iterator/iterator_1.0/ia_css_iterator.host.h26
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/iterator/iterator_1.0/ia_css_iterator_param.h30
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/macc/macc1_5/ia_css_macc1_5.host.c66
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/macc/macc1_5/ia_css_macc1_5.host.h33
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/macc/macc1_5/ia_css_macc1_5_param.h23
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/macc/macc1_5/ia_css_macc1_5_table.host.c26
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/macc/macc1_5/ia_css_macc1_5_table.host.h14
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/macc/macc1_5/ia_css_macc1_5_types.h65
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/macc/macc_1.0/ia_css_macc.host.c41
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/macc/macc_1.0/ia_css_macc.host.h33
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/macc/macc_1.0/ia_css_macc_param.h17
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/macc/macc_1.0/ia_css_macc_table.host.c43
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/macc/macc_1.0/ia_css_macc_table.host.h15
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/macc/macc_1.0/ia_css_macc_types.h55
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/norm/norm_1.0/ia_css_norm.host.c7
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/norm/norm_1.0/ia_css_norm.host.h12
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/norm/norm_1.0/ia_css_norm_param.h10
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ob/ob2/ia_css_ob2.host.c68
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ob/ob2/ia_css_ob2.host.h32
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ob/ob2/ia_css_ob2_param.h20
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ob/ob2/ia_css_ob2_types.h36
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ob/ob_1.0/ia_css_ob.host.c146
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ob/ob_1.0/ia_css_ob.host.h45
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ob/ob_1.0/ia_css_ob_param.h39
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ob/ob_1.0/ia_css_ob_types.h60
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/output/output_1.0/ia_css_output.host.c150
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/output/output_1.0/ia_css_output.host.h55
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/output/output_1.0/ia_css_output_param.h28
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/output/output_1.0/ia_css_output_types.h39
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/qplane/qplane_2/ia_css_qplane.host.c55
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/qplane/qplane_2/ia_css_qplane.host.h31
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/qplane/qplane_2/ia_css_qplane_param.h22
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/qplane/qplane_2/ia_css_qplane_types.h23
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/raw/raw_1.0/ia_css_raw.host.c114
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/raw/raw_1.0/ia_css_raw.host.h26
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/raw/raw_1.0/ia_css_raw_param.h30
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/raw/raw_1.0/ia_css_raw_types.h28
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/raw_aa_binning/raw_aa_binning_1.0/ia_css_raa.host.c24
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/raw_aa_binning/raw_aa_binning_1.0/ia_css_raa.host.h19
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ref/ref_1.0/ia_css_ref.host.c77
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ref/ref_1.0/ia_css_ref.host.h29
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ref/ref_1.0/ia_css_ref_param.h28
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ref/ref_1.0/ia_css_ref_state.h18
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ref/ref_1.0/ia_css_ref_types.h17
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/s3a/s3a_1.0/ia_css_s3a.host.c373
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/s3a/s3a_1.0/ia_css_s3a.host.h69
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/s3a/s3a_1.0/ia_css_s3a_param.h45
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/s3a/s3a_1.0/ia_css_s3a_types.h213
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/sc/sc_1.0/ia_css_sc.host.c82
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/sc/sc_1.0/ia_css_sc.host.h36
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/sc/sc_1.0/ia_css_sc_param.h34
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/sc/sc_1.0/ia_css_sc_types.h112
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/sdis/common/ia_css_sdis_common.host.h93
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/sdis/common/ia_css_sdis_common_types.h211
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/sdis/sdis_1.0/ia_css_sdis.host.c428
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/sdis/sdis_1.0/ia_css_sdis.host.h93
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/sdis/sdis_1.0/ia_css_sdis_types.h47
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/sdis/sdis_2/ia_css_sdis2.host.c340
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/sdis/sdis_2/ia_css_sdis2.host.h87
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/sdis/sdis_2/ia_css_sdis2_types.h67
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/tdf/tdf_1.0/ia_css_tdf.host.c66
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/tdf/tdf_1.0/ia_css_tdf.host.h30
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/tdf/tdf_1.0/ia_css_tdf_param.h35
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/tdf/tdf_1.0/ia_css_tdf_types.h44
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/tnr/tnr3/ia_css_tnr3_types.h56
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/tnr/tnr_1.0/ia_css_tnr.host.c113
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/tnr/tnr_1.0/ia_css_tnr.host.h44
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/tnr/tnr_1.0/ia_css_tnr_param.h32
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/tnr/tnr_1.0/ia_css_tnr_state.h18
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/tnr/tnr_1.0/ia_css_tnr_types.h49
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/uds/uds_1.0/ia_css_uds_param.h23
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/vf/vf_1.0/ia_css_vf.host.c134
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/vf/vf_1.0/ia_css_vf.host.h37
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/vf/vf_1.0/ia_css_vf_param.h29
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/vf/vf_1.0/ia_css_vf_types.h23
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/wb/wb_1.0/ia_css_wb.host.c78
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/wb/wb_1.0/ia_css_wb.host.h31
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/wb/wb_1.0/ia_css_wb_param.h21
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/wb/wb_1.0/ia_css_wb_types.h38
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/xnr/xnr_1.0/ia_css_xnr.host.c57
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/xnr/xnr_1.0/ia_css_xnr.host.h39
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/xnr/xnr_1.0/ia_css_xnr_param.h35
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/xnr/xnr_1.0/ia_css_xnr_table.host.c43
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/xnr/xnr_1.0/ia_css_xnr_table.host.h14
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/xnr/xnr_1.0/ia_css_xnr_types.h62
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/xnr/xnr_3.0/ia_css_xnr3.host.c240
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/xnr/xnr_3.0/ia_css_xnr3.host.h33
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/xnr/xnr_3.0/ia_css_xnr3_param.h75
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/xnr/xnr_3.0/ia_css_xnr3_types.h89
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ynr/ynr_1.0/ia_css_ynr.host.c209
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ynr/ynr_1.0/ia_css_ynr.host.h52
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ynr/ynr_1.0/ia_css_ynr_param.h41
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ynr/ynr_1.0/ia_css_ynr_types.h72
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ynr/ynr_2/ia_css_ynr2.host.c110
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ynr/ynr_2/ia_css_ynr2.host.h48
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ynr/ynr_2/ia_css_ynr2_param.h37
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ynr/ynr_2/ia_css_ynr2_types.h85
-rw-r--r--drivers/staging/media/atomisp/pci/isp/modes/interface/input_buf.isp.h32
-rw-r--r--drivers/staging/media/atomisp/pci/isp/modes/interface/isp_types.h76
-rw-r--r--drivers/staging/media/atomisp/pci/isp2400_input_system_global.h139
-rw-r--r--drivers/staging/media/atomisp/pci/isp2400_input_system_local.h234
-rw-r--r--drivers/staging/media/atomisp/pci/isp2400_input_system_private.h114
-rw-r--r--drivers/staging/media/atomisp/pci/isp2400_input_system_public.h308
-rw-r--r--drivers/staging/media/atomisp/pci/isp2400_support.h30
-rw-r--r--drivers/staging/media/atomisp/pci/isp2401_input_system_global.h166
-rw-r--r--drivers/staging/media/atomisp/pci/isp2401_input_system_local.h61
-rw-r--r--drivers/staging/media/atomisp/pci/isp2401_input_system_private.h225
-rw-r--r--drivers/staging/media/atomisp/pci/isp_acquisition_defs.h221
-rw-r--r--drivers/staging/media/atomisp/pci/isp_capture_defs.h270
-rw-r--r--drivers/staging/media/atomisp/pci/mamoiada_params.h202
-rw-r--r--drivers/staging/media/atomisp/pci/mmu/isp_mmu.c556
-rw-r--r--drivers/staging/media/atomisp/pci/mmu/sh_mmu_mrfld.c66
-rw-r--r--drivers/staging/media/atomisp/pci/mmu_defs.h15
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/binary/interface/ia_css_binary.h216
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/binary/src/binary.c1286
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/bufq/interface/ia_css_bufq.h169
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/bufq/interface/ia_css_bufq_comm.h32
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/bufq/src/bufq.c523
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/debug/interface/ia_css_debug.h406
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/debug/interface/ia_css_debug_internal.h7
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/debug/interface/ia_css_debug_pipe.h59
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/debug/src/ia_css_debug.c1885
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/event/interface/ia_css_event.h22
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/event/src/event.c101
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/eventq/interface/ia_css_eventq.h45
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/eventq/src/eventq.c67
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/frame/interface/ia_css_frame.h134
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/frame/interface/ia_css_frame_comm.h107
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/frame/src/frame.c740
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/ifmtr/interface/ia_css_ifmtr.h25
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/ifmtr/src/ifmtr.c530
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/inputfifo/interface/ia_css_inputfifo.h45
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/inputfifo/src/inputfifo.c520
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/isp_param/interface/ia_css_isp_param.h94
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/isp_param/interface/ia_css_isp_param_types.h74
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/isp_param/src/isp_param.c210
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/isys/interface/ia_css_isys.h167
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/isys/interface/ia_css_isys_comm.h43
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/isys/src/csi_rx_rmgr.c157
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/isys/src/csi_rx_rmgr.h18
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/isys/src/ibuf_ctrl_rmgr.c113
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/isys/src/ibuf_ctrl_rmgr.h30
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/isys/src/isys_dma_rmgr.c77
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/isys/src/isys_dma_rmgr.h16
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/isys/src/isys_init.c112
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/isys/src/isys_stream2mmio_rmgr.c79
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/isys/src/isys_stream2mmio_rmgr.h16
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/isys/src/rx.c654
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/isys/src/virtual_isys.c832
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/isys/src/virtual_isys.h16
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/pipeline/interface/ia_css_pipeline.h272
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/pipeline/interface/ia_css_pipeline_common.h19
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/pipeline/src/pipeline.c770
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/queue/interface/ia_css_queue.h167
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/queue/interface/ia_css_queue_comm.h45
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/queue/src/queue.c392
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/queue/src/queue_access.c169
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/queue/src/queue_access.h78
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/rmgr/interface/ia_css_rmgr.h64
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/rmgr/interface/ia_css_rmgr_vbuf.h92
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/rmgr/src/rmgr.c31
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/rmgr/src/rmgr_vbuf.c320
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/spctrl/interface/ia_css_spctrl.h60
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/spctrl/interface/ia_css_spctrl_comm.h41
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/spctrl/src/spctrl.c176
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/tagger/interface/ia_css_tagger_common.h31
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/timer/src/timer.c20
-rw-r--r--drivers/staging/media/atomisp/pci/scalar_processor_2400_params.h12
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css.c9063
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_defs.h353
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_firmware.c382
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_firmware.h45
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_frac.h48
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_host_data.c34
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_hrt.c73
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_hrt.h26
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_internal.h956
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_legacy.h61
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_metrics.c145
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_metrics.h47
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_mipi.c545
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_mipi.h26
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_mmu.c51
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_param_dvs.c274
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_param_dvs.h76
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_param_shading.c383
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_param_shading.h26
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_params.c4548
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_params.h172
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_params_internal.h13
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_properties.c25
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_sp.c1741
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_sp.h220
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_stream_format.c68
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_stream_format.h15
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_struct.h76
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_uds.h29
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_version.c31
-rw-r--r--drivers/staging/media/atomisp/pci/str2mem_defs.h31
-rw-r--r--drivers/staging/media/atomisp/pci/streaming_to_mipi_defs.h20
-rw-r--r--drivers/staging/media/atomisp/pci/system_global.h368
-rw-r--r--drivers/staging/media/atomisp/pci/system_local.c170
-rw-r--r--drivers/staging/media/atomisp/pci/system_local.h94
-rw-r--r--drivers/staging/media/atomisp/pci/timed_controller_defs.h14
-rw-r--r--drivers/staging/media/atomisp/pci/version.h12
-rw-r--r--drivers/staging/media/av7110/Kconfig72
-rw-r--r--drivers/staging/media/av7110/Makefile20
-rw-r--r--drivers/staging/media/av7110/av7110.c2895
-rw-r--r--drivers/staging/media/av7110/av7110.h311
-rw-r--r--drivers/staging/media/av7110/av7110_av.c1689
-rw-r--r--drivers/staging/media/av7110/av7110_av.h31
-rw-r--r--drivers/staging/media/av7110/av7110_ca.c386
-rw-r--r--drivers/staging/media/av7110/av7110_ca.h15
-rw-r--r--drivers/staging/media/av7110/av7110_hw.c1191
-rw-r--r--drivers/staging/media/av7110/av7110_hw.h483
-rw-r--r--drivers/staging/media/av7110/av7110_ipack.c394
-rw-r--r--drivers/staging/media/av7110/av7110_ipack.h13
-rw-r--r--drivers/staging/media/av7110/av7110_ir.c157
-rw-r--r--drivers/staging/media/av7110/av7110_v4l.c958
-rw-r--r--drivers/staging/media/av7110/dvb_filter.c115
-rw-r--r--drivers/staging/media/av7110/dvb_filter.h238
-rw-r--r--drivers/staging/media/av7110/sp8870.c625
-rw-r--r--drivers/staging/media/av7110/sp8870.h33
-rw-r--r--drivers/staging/media/bcm2048/Kconfig13
-rw-r--r--drivers/staging/media/bcm2048/Makefile1
-rw-r--r--drivers/staging/media/bcm2048/TODO24
-rw-r--r--drivers/staging/media/bcm2048/radio-bcm2048.c2711
-rw-r--r--drivers/staging/media/bcm2048/radio-bcm2048.h30
-rw-r--r--drivers/staging/media/cxd2099/Kconfig12
-rw-r--r--drivers/staging/media/cxd2099/Makefile5
-rw-r--r--drivers/staging/media/cxd2099/TODO12
-rw-r--r--drivers/staging/media/cxd2099/cxd2099.c721
-rw-r--r--drivers/staging/media/cxd2099/cxd2099.h51
-rw-r--r--drivers/staging/media/davinci_vpfe/Kconfig10
-rw-r--r--drivers/staging/media/davinci_vpfe/Makefile3
-rw-r--r--drivers/staging/media/davinci_vpfe/TODO37
-rw-r--r--drivers/staging/media/davinci_vpfe/davinci-vpfe-mc.txt154
-rw-r--r--drivers/staging/media/davinci_vpfe/davinci_vpfe_user.h1290
-rw-r--r--drivers/staging/media/davinci_vpfe/dm365_ipipe.c1862
-rw-r--r--drivers/staging/media/davinci_vpfe/dm365_ipipe.h179
-rw-r--r--drivers/staging/media/davinci_vpfe/dm365_ipipe_hw.c1048
-rw-r--r--drivers/staging/media/davinci_vpfe/dm365_ipipe_hw.h558
-rw-r--r--drivers/staging/media/davinci_vpfe/dm365_ipipeif.c1067
-rw-r--r--drivers/staging/media/davinci_vpfe/dm365_ipipeif.h233
-rw-r--r--drivers/staging/media/davinci_vpfe/dm365_ipipeif_user.h93
-rw-r--r--drivers/staging/media/davinci_vpfe/dm365_isif.c2106
-rw-r--r--drivers/staging/media/davinci_vpfe/dm365_isif.h203
-rw-r--r--drivers/staging/media/davinci_vpfe/dm365_isif_regs.h294
-rw-r--r--drivers/staging/media/davinci_vpfe/dm365_resizer.c1996
-rw-r--r--drivers/staging/media/davinci_vpfe/dm365_resizer.h244
-rw-r--r--drivers/staging/media/davinci_vpfe/vpfe.h86
-rw-r--r--drivers/staging/media/davinci_vpfe/vpfe_mc_capture.c716
-rw-r--r--drivers/staging/media/davinci_vpfe/vpfe_mc_capture.h93
-rw-r--r--drivers/staging/media/davinci_vpfe/vpfe_video.c1628
-rw-r--r--drivers/staging/media/davinci_vpfe/vpfe_video.h153
-rw-r--r--drivers/staging/media/deprecated/atmel/Kconfig47
-rw-r--r--drivers/staging/media/deprecated/atmel/Makefile8
-rw-r--r--drivers/staging/media/deprecated/atmel/TODO34
-rw-r--r--drivers/staging/media/deprecated/atmel/atmel-isc-base.c2008
-rw-r--r--drivers/staging/media/deprecated/atmel/atmel-isc-clk.c311
-rw-r--r--drivers/staging/media/deprecated/atmel/atmel-isc-regs.h413
-rw-r--r--drivers/staging/media/deprecated/atmel/atmel-isc.h362
-rw-r--r--drivers/staging/media/deprecated/atmel/atmel-sama5d2-isc.c644
-rw-r--r--drivers/staging/media/deprecated/atmel/atmel-sama7g5-isc.c607
-rw-r--r--drivers/staging/media/imx/Kconfig15
-rw-r--r--drivers/staging/media/imx/Makefile14
-rw-r--r--drivers/staging/media/imx/TODO13
-rw-r--r--drivers/staging/media/imx/imx-ic-common.c87
-rw-r--r--drivers/staging/media/imx/imx-ic-prp.c528
-rw-r--r--drivers/staging/media/imx/imx-ic-prpencvf.c1386
-rw-r--r--drivers/staging/media/imx/imx-ic.h32
-rw-r--r--drivers/staging/media/imx/imx-media-capture.c1055
-rw-r--r--drivers/staging/media/imx/imx-media-csc-scaler.c925
-rw-r--r--drivers/staging/media/imx/imx-media-csi.c2083
-rw-r--r--drivers/staging/media/imx/imx-media-dev-common.c401
-rw-r--r--drivers/staging/media/imx/imx-media-dev.c143
-rw-r--r--drivers/staging/media/imx/imx-media-fim.c431
-rw-r--r--drivers/staging/media/imx/imx-media-internal-sd.c306
-rw-r--r--drivers/staging/media/imx/imx-media-of.c71
-rw-r--r--drivers/staging/media/imx/imx-media-utils.c785
-rw-r--r--drivers/staging/media/imx/imx-media-vdic.c933
-rw-r--r--drivers/staging/media/imx/imx-media.h299
-rw-r--r--drivers/staging/media/imx/imx6-mipi-csi2.c846
-rw-r--r--drivers/staging/media/ipu3/Kconfig16
-rw-r--r--drivers/staging/media/ipu3/Makefile12
-rw-r--r--drivers/staging/media/ipu3/TODO12
-rw-r--r--drivers/staging/media/ipu3/include/uapi/intel-ipu3.h2820
-rw-r--r--drivers/staging/media/ipu3/ipu3-abi.h2011
-rw-r--r--drivers/staging/media/ipu3/ipu3-css-fw.c266
-rw-r--r--drivers/staging/media/ipu3/ipu3-css-fw.h193
-rw-r--r--drivers/staging/media/ipu3/ipu3-css-params.c2959
-rw-r--r--drivers/staging/media/ipu3/ipu3-css-params.h28
-rw-r--r--drivers/staging/media/ipu3/ipu3-css-pool.c100
-rw-r--r--drivers/staging/media/ipu3/ipu3-css-pool.h54
-rw-r--r--drivers/staging/media/ipu3/ipu3-css.c2355
-rw-r--r--drivers/staging/media/ipu3/ipu3-css.h213
-rw-r--r--drivers/staging/media/ipu3/ipu3-dmamap.c250
-rw-r--r--drivers/staging/media/ipu3/ipu3-dmamap.h22
-rw-r--r--drivers/staging/media/ipu3/ipu3-mmu.c537
-rw-r--r--drivers/staging/media/ipu3/ipu3-mmu.h36
-rw-r--r--drivers/staging/media/ipu3/ipu3-tables.c9609
-rw-r--r--drivers/staging/media/ipu3/ipu3-tables.h68
-rw-r--r--drivers/staging/media/ipu3/ipu3-v4l2.c1419
-rw-r--r--drivers/staging/media/ipu3/ipu3.c865
-rw-r--r--drivers/staging/media/ipu3/ipu3.h178
-rw-r--r--drivers/staging/media/ipu7/Kconfig19
-rw-r--r--drivers/staging/media/ipu7/Makefile23
-rw-r--r--drivers/staging/media/ipu7/TODO28
-rw-r--r--drivers/staging/media/ipu7/abi/ipu7_fw_boot_abi.h163
-rw-r--r--drivers/staging/media/ipu7/abi/ipu7_fw_common_abi.h175
-rw-r--r--drivers/staging/media/ipu7/abi/ipu7_fw_config_abi.h19
-rw-r--r--drivers/staging/media/ipu7/abi/ipu7_fw_insys_config_abi.h19
-rw-r--r--drivers/staging/media/ipu7/abi/ipu7_fw_isys_abi.h412
-rw-r--r--drivers/staging/media/ipu7/abi/ipu7_fw_msg_abi.h465
-rw-r--r--drivers/staging/media/ipu7/abi/ipu7_fw_psys_config_abi.h24
-rw-r--r--drivers/staging/media/ipu7/abi/ipu7_fw_syscom_abi.h49
-rw-r--r--drivers/staging/media/ipu7/ipu7-boot.c430
-rw-r--r--drivers/staging/media/ipu7/ipu7-boot.h25
-rw-r--r--drivers/staging/media/ipu7/ipu7-bus.c158
-rw-r--r--drivers/staging/media/ipu7/ipu7-bus.h69
-rw-r--r--drivers/staging/media/ipu7/ipu7-buttress-regs.h461
-rw-r--r--drivers/staging/media/ipu7/ipu7-buttress.c1192
-rw-r--r--drivers/staging/media/ipu7/ipu7-buttress.h77
-rw-r--r--drivers/staging/media/ipu7/ipu7-cpd.c276
-rw-r--r--drivers/staging/media/ipu7/ipu7-cpd.h16
-rw-r--r--drivers/staging/media/ipu7/ipu7-dma.c477
-rw-r--r--drivers/staging/media/ipu7/ipu7-dma.h46
-rw-r--r--drivers/staging/media/ipu7/ipu7-fw-isys.c301
-rw-r--r--drivers/staging/media/ipu7/ipu7-fw-isys.h39
-rw-r--r--drivers/staging/media/ipu7/ipu7-isys-csi-phy.c1034
-rw-r--r--drivers/staging/media/ipu7/ipu7-isys-csi-phy.h16
-rw-r--r--drivers/staging/media/ipu7/ipu7-isys-csi2-regs.h1197
-rw-r--r--drivers/staging/media/ipu7/ipu7-isys-csi2.c543
-rw-r--r--drivers/staging/media/ipu7/ipu7-isys-csi2.h64
-rw-r--r--drivers/staging/media/ipu7/ipu7-isys-queue.c828
-rw-r--r--drivers/staging/media/ipu7/ipu7-isys-queue.h72
-rw-r--r--drivers/staging/media/ipu7/ipu7-isys-subdev.c333
-rw-r--r--drivers/staging/media/ipu7/ipu7-isys-subdev.h52
-rw-r--r--drivers/staging/media/ipu7/ipu7-isys-video.c1078
-rw-r--r--drivers/staging/media/ipu7/ipu7-isys-video.h117
-rw-r--r--drivers/staging/media/ipu7/ipu7-isys.c1166
-rw-r--r--drivers/staging/media/ipu7/ipu7-isys.h140
-rw-r--r--drivers/staging/media/ipu7/ipu7-mmu.c853
-rw-r--r--drivers/staging/media/ipu7/ipu7-mmu.h414
-rw-r--r--drivers/staging/media/ipu7/ipu7-platform-regs.h82
-rw-r--r--drivers/staging/media/ipu7/ipu7-syscom.c78
-rw-r--r--drivers/staging/media/ipu7/ipu7-syscom.h35
-rw-r--r--drivers/staging/media/ipu7/ipu7.c2778
-rw-r--r--drivers/staging/media/ipu7/ipu7.h242
-rw-r--r--drivers/staging/media/lirc/Kconfig66
-rw-r--r--drivers/staging/media/lirc/Makefile12
-rw-r--r--drivers/staging/media/lirc/TODO13
-rw-r--r--drivers/staging/media/lirc/TODO.lirc_zilog36
-rw-r--r--drivers/staging/media/lirc/lirc_bt829.c404
-rw-r--r--drivers/staging/media/lirc/lirc_imon.c983
-rw-r--r--drivers/staging/media/lirc/lirc_parallel.c744
-rw-r--r--drivers/staging/media/lirc/lirc_parallel.h26
-rw-r--r--drivers/staging/media/lirc/lirc_sasem.c921
-rw-r--r--drivers/staging/media/lirc/lirc_serial.c1158
-rw-r--r--drivers/staging/media/lirc/lirc_sir.c999
-rw-r--r--drivers/staging/media/lirc/lirc_zilog.c1697
-rw-r--r--drivers/staging/media/max96712/Kconfig14
-rw-r--r--drivers/staging/media/max96712/Makefile2
-rw-r--r--drivers/staging/media/max96712/max96712.c487
-rw-r--r--drivers/staging/media/meson/vdec/Kconfig11
-rw-r--r--drivers/staging/media/meson/vdec/Makefile8
-rw-r--r--drivers/staging/media/meson/vdec/TODO8
-rw-r--r--drivers/staging/media/meson/vdec/codec_h264.c485
-rw-r--r--drivers/staging/media/meson/vdec/codec_h264.h14
-rw-r--r--drivers/staging/media/meson/vdec/codec_hevc_common.c297
-rw-r--r--drivers/staging/media/meson/vdec/codec_hevc_common.h69
-rw-r--r--drivers/staging/media/meson/vdec/codec_mpeg12.c210
-rw-r--r--drivers/staging/media/meson/vdec/codec_mpeg12.h14
-rw-r--r--drivers/staging/media/meson/vdec/codec_vp9.c2170
-rw-r--r--drivers/staging/media/meson/vdec/codec_vp9.h13
-rw-r--r--drivers/staging/media/meson/vdec/dos_regs.h98
-rw-r--r--drivers/staging/media/meson/vdec/esparser.c457
-rw-r--r--drivers/staging/media/meson/vdec/esparser.h36
-rw-r--r--drivers/staging/media/meson/vdec/hevc_regs.h218
-rw-r--r--drivers/staging/media/meson/vdec/vdec.c1121
-rw-r--r--drivers/staging/media/meson/vdec/vdec.h292
-rw-r--r--drivers/staging/media/meson/vdec/vdec_1.c255
-rw-r--r--drivers/staging/media/meson/vdec/vdec_1.h14
-rw-r--r--drivers/staging/media/meson/vdec/vdec_helpers.c480
-rw-r--r--drivers/staging/media/meson/vdec/vdec_helpers.h90
-rw-r--r--drivers/staging/media/meson/vdec/vdec_hevc.c256
-rw-r--r--drivers/staging/media/meson/vdec/vdec_hevc.h13
-rw-r--r--drivers/staging/media/meson/vdec/vdec_platform.c335
-rw-r--r--drivers/staging/media/meson/vdec/vdec_platform.h36
-rw-r--r--drivers/staging/media/mn88472/Kconfig7
-rw-r--r--drivers/staging/media/mn88472/Makefile5
-rw-r--r--drivers/staging/media/mn88472/TODO21
-rw-r--r--drivers/staging/media/mn88472/mn88472.c576
-rw-r--r--drivers/staging/media/mn88472/mn88472_priv.h39
-rw-r--r--drivers/staging/media/mn88473/Kconfig7
-rw-r--r--drivers/staging/media/mn88473/Makefile5
-rw-r--r--drivers/staging/media/mn88473/TODO21
-rw-r--r--drivers/staging/media/mn88473/mn88473.c522
-rw-r--r--drivers/staging/media/mn88473/mn88473_priv.h37
-rw-r--r--drivers/staging/media/omap4iss/Kconfig8
-rw-r--r--drivers/staging/media/omap4iss/Makefile6
-rw-r--r--drivers/staging/media/omap4iss/TODO3
-rw-r--r--drivers/staging/media/omap4iss/iss.c1512
-rw-r--r--drivers/staging/media/omap4iss/iss.h254
-rw-r--r--drivers/staging/media/omap4iss/iss_csi2.c1357
-rw-r--r--drivers/staging/media/omap4iss/iss_csi2.h158
-rw-r--r--drivers/staging/media/omap4iss/iss_csiphy.c281
-rw-r--r--drivers/staging/media/omap4iss/iss_csiphy.h51
-rw-r--r--drivers/staging/media/omap4iss/iss_ipipe.c578
-rw-r--r--drivers/staging/media/omap4iss/iss_ipipe.h67
-rw-r--r--drivers/staging/media/omap4iss/iss_ipipeif.c832
-rw-r--r--drivers/staging/media/omap4iss/iss_ipipeif.h92
-rw-r--r--drivers/staging/media/omap4iss/iss_regs.h903
-rw-r--r--drivers/staging/media/omap4iss/iss_resizer.c876
-rw-r--r--drivers/staging/media/omap4iss/iss_resizer.h75
-rw-r--r--drivers/staging/media/omap4iss/iss_video.c1159
-rw-r--r--drivers/staging/media/omap4iss/iss_video.h204
-rw-r--r--drivers/staging/media/starfive/Kconfig5
-rw-r--r--drivers/staging/media/starfive/Makefile2
-rw-r--r--drivers/staging/media/starfive/camss/Kconfig18
-rw-r--r--drivers/staging/media/starfive/camss/Makefile13
-rw-r--r--drivers/staging/media/starfive/camss/TODO.txt4
-rw-r--r--drivers/staging/media/starfive/camss/stf-camss.c438
-rw-r--r--drivers/staging/media/starfive/camss/stf-camss.h134
-rw-r--r--drivers/staging/media/starfive/camss/stf-capture.c605
-rw-r--r--drivers/staging/media/starfive/camss/stf-capture.h86
-rw-r--r--drivers/staging/media/starfive/camss/stf-isp-hw-ops.c445
-rw-r--r--drivers/staging/media/starfive/camss/stf-isp.c379
-rw-r--r--drivers/staging/media/starfive/camss/stf-isp.h428
-rw-r--r--drivers/staging/media/starfive/camss/stf-video.c570
-rw-r--r--drivers/staging/media/starfive/camss/stf-video.h100
-rw-r--r--drivers/staging/media/sunxi/Kconfig17
-rw-r--r--drivers/staging/media/sunxi/Makefile3
-rw-r--r--drivers/staging/media/sunxi/cedrus/Kconfig17
-rw-r--r--drivers/staging/media/sunxi/cedrus/Makefile6
-rw-r--r--drivers/staging/media/sunxi/cedrus/TODO16
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus.c719
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus.h279
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus_dec.c119
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus_dec.h21
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus_h264.c711
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus_h265.c923
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus_hw.c358
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus_hw.h33
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus_mpeg2.c198
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus_regs.h717
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus_video.c621
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus_video.h33
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus_vp8.c882
-rw-r--r--drivers/staging/media/sunxi/sun6i-isp/Kconfig15
-rw-r--r--drivers/staging/media/sunxi/sun6i-isp/Makefile4
-rw-r--r--drivers/staging/media/sunxi/sun6i-isp/TODO.txt6
-rw-r--r--drivers/staging/media/sunxi/sun6i-isp/sun6i_isp.c551
-rw-r--r--drivers/staging/media/sunxi/sun6i-isp/sun6i_isp.h90
-rw-r--r--drivers/staging/media/sunxi/sun6i-isp/sun6i_isp_capture.c740
-rw-r--r--drivers/staging/media/sunxi/sun6i-isp/sun6i_isp_capture.h78
-rw-r--r--drivers/staging/media/sunxi/sun6i-isp/sun6i_isp_params.c566
-rw-r--r--drivers/staging/media/sunxi/sun6i-isp/sun6i_isp_params.h52
-rw-r--r--drivers/staging/media/sunxi/sun6i-isp/sun6i_isp_proc.c581
-rw-r--r--drivers/staging/media/sunxi/sun6i-isp/sun6i_isp_proc.h66
-rw-r--r--drivers/staging/media/sunxi/sun6i-isp/sun6i_isp_reg.h275
-rw-r--r--drivers/staging/media/sunxi/sun6i-isp/uapi/sun6i-isp-config.h43
-rw-r--r--drivers/staging/media/tegra-video/Kconfig20
-rw-r--r--drivers/staging/media/tegra-video/Makefile10
-rw-r--r--drivers/staging/media/tegra-video/TODO5
-rw-r--r--drivers/staging/media/tegra-video/csi.c862
-rw-r--r--drivers/staging/media/tegra-video/csi.h159
-rw-r--r--drivers/staging/media/tegra-video/tegra20.c657
-rw-r--r--drivers/staging/media/tegra-video/tegra210.c1221
-rw-r--r--drivers/staging/media/tegra-video/vi.c1981
-rw-r--r--drivers/staging/media/tegra-video/vi.h314
-rw-r--r--drivers/staging/media/tegra-video/video.c181
-rw-r--r--drivers/staging/media/tegra-video/video.h29
-rw-r--r--drivers/staging/media/tegra-video/vip.c285
-rw-r--r--drivers/staging/media/tegra-video/vip.h68
-rw-r--r--drivers/staging/most/Documentation/ABI/sysfs-class-most.txt136
-rw-r--r--drivers/staging/most/Documentation/driver_usage.txt237
-rw-r--r--drivers/staging/most/Kconfig38
-rw-r--r--drivers/staging/most/Makefile13
-rw-r--r--drivers/staging/most/TODO7
-rw-r--r--drivers/staging/most/aim-cdev/Kconfig12
-rw-r--r--drivers/staging/most/aim-cdev/Makefile4
-rw-r--r--drivers/staging/most/aim-cdev/cdev.c554
-rw-r--r--drivers/staging/most/aim-network/Kconfig13
-rw-r--r--drivers/staging/most/aim-network/Makefile4
-rw-r--r--drivers/staging/most/aim-network/networking.c573
-rw-r--r--drivers/staging/most/aim-network/networking.h23
-rw-r--r--drivers/staging/most/aim-sound/Kconfig13
-rw-r--r--drivers/staging/most/aim-sound/Makefile4
-rw-r--r--drivers/staging/most/aim-sound/sound.c769
-rw-r--r--drivers/staging/most/aim-v4l2/Kconfig12
-rw-r--r--drivers/staging/most/aim-v4l2/Makefile5
-rw-r--r--drivers/staging/most/aim-v4l2/video.c637
-rw-r--r--drivers/staging/most/dim2/Kconfig17
-rw-r--r--drivers/staging/most/dim2/Makefile4
-rw-r--r--drivers/staging/most/dim2/dim2.c1105
-rw-r--r--drivers/staging/most/dim2/errors.h51
-rw-r--r--drivers/staging/most/dim2/hal.c974
-rw-r--r--drivers/staging/most/dim2/hal.h102
-rw-r--r--drivers/staging/most/dim2/reg.h157
-rw-r--r--drivers/staging/most/dim2/sysfs.h19
-rw-r--r--drivers/staging/most/hdm-dim2/Kconfig17
-rw-r--r--drivers/staging/most/hdm-dim2/Makefile5
-rw-r--r--drivers/staging/most/hdm-dim2/dim2_errors.h67
-rw-r--r--drivers/staging/most/hdm-dim2/dim2_hal.c914
-rw-r--r--drivers/staging/most/hdm-dim2/dim2_hal.h119
-rw-r--r--drivers/staging/most/hdm-dim2/dim2_hdm.c953
-rw-r--r--drivers/staging/most/hdm-dim2/dim2_hdm.h26
-rw-r--r--drivers/staging/most/hdm-dim2/dim2_reg.h176
-rw-r--r--drivers/staging/most/hdm-dim2/dim2_sysfs.c116
-rw-r--r--drivers/staging/most/hdm-dim2/dim2_sysfs.h39
-rw-r--r--drivers/staging/most/hdm-i2c/Kconfig12
-rw-r--r--drivers/staging/most/hdm-i2c/Makefile3
-rw-r--r--drivers/staging/most/hdm-i2c/hdm_i2c.c431
-rw-r--r--drivers/staging/most/hdm-usb/Kconfig14
-rw-r--r--drivers/staging/most/hdm-usb/Makefile4
-rw-r--r--drivers/staging/most/hdm-usb/hdm_usb.c1433
-rw-r--r--drivers/staging/most/mostcore/Kconfig14
-rw-r--r--drivers/staging/most/mostcore/Makefile3
-rw-r--r--drivers/staging/most/mostcore/core.c1993
-rw-r--r--drivers/staging/most/mostcore/mostcore.h321
-rw-r--r--drivers/staging/most/net/Kconfig14
-rw-r--r--drivers/staging/most/net/Makefile4
-rw-r--r--drivers/staging/most/net/net.c581
-rw-r--r--drivers/staging/most/video/Kconfig13
-rw-r--r--drivers/staging/most/video/Makefile4
-rw-r--r--drivers/staging/most/video/video.c586
-rw-r--r--drivers/staging/mt29f_spinand/Kconfig16
-rw-r--r--drivers/staging/mt29f_spinand/Makefile1
-rw-r--r--drivers/staging/mt29f_spinand/TODO13
-rw-r--r--drivers/staging/mt29f_spinand/mt29f_spinand.c963
-rw-r--r--drivers/staging/mt29f_spinand/mt29f_spinand.h107
-rw-r--r--drivers/staging/netlogic/Kconfig7
-rw-r--r--drivers/staging/netlogic/Makefile1
-rw-r--r--drivers/staging/netlogic/TODO11
-rw-r--r--drivers/staging/netlogic/platform_net.c245
-rw-r--r--drivers/staging/netlogic/platform_net.h49
-rw-r--r--drivers/staging/netlogic/xlr_net.c1144
-rw-r--r--drivers/staging/netlogic/xlr_net.h1105
-rw-r--r--drivers/staging/nvec/Kconfig11
-rw-r--r--drivers/staging/nvec/Makefile1
-rw-r--r--drivers/staging/nvec/README4
-rw-r--r--drivers/staging/nvec/TODO8
-rw-r--r--drivers/staging/nvec/nvec-keytable.h15
-rw-r--r--drivers/staging/nvec/nvec.c271
-rw-r--r--drivers/staging/nvec/nvec.h15
-rw-r--r--drivers/staging/nvec/nvec_kbd.c23
-rw-r--r--drivers/staging/nvec/nvec_paz00.c24
-rw-r--r--drivers/staging/nvec/nvec_power.c34
-rw-r--r--drivers/staging/nvec/nvec_ps2.c67
-rw-r--r--drivers/staging/octeon-usb/Kconfig10
-rw-r--r--drivers/staging/octeon-usb/Makefile1
-rw-r--r--drivers/staging/octeon-usb/TODO11
-rw-r--r--drivers/staging/octeon-usb/octeon-hcd.c3777
-rw-r--r--drivers/staging/octeon-usb/octeon-hcd.h1846
-rw-r--r--drivers/staging/octeon/Kconfig4
-rw-r--r--drivers/staging/octeon/Makefile6
-rw-r--r--drivers/staging/octeon/TODO8
-rw-r--r--drivers/staging/octeon/ethernet-defines.h22
-rw-r--r--drivers/staging/octeon/ethernet-mdio.c101
-rw-r--r--drivers/staging/octeon/ethernet-mdio.h7
-rw-r--r--drivers/staging/octeon/ethernet-mem.c14
-rw-r--r--drivers/staging/octeon/ethernet-mem.h5
-rw-r--r--drivers/staging/octeon/ethernet-rgmii.c311
-rw-r--r--drivers/staging/octeon/ethernet-rx.c378
-rw-r--r--drivers/staging/octeon/ethernet-rx.h9
-rw-r--r--drivers/staging/octeon/ethernet-sgmii.c15
-rw-r--r--drivers/staging/octeon/ethernet-spi.c19
-rw-r--r--drivers/staging/octeon/ethernet-tx.c217
-rw-r--r--drivers/staging/octeon/ethernet-tx.h9
-rw-r--r--drivers/staging/octeon/ethernet-util.h18
-rw-r--r--drivers/staging/octeon/ethernet-xaui.c42
-rw-r--r--drivers/staging/octeon/ethernet.c386
-rw-r--r--drivers/staging/octeon/octeon-ethernet.h54
-rw-r--r--drivers/staging/octeon/octeon-stubs.h1435
-rw-r--r--drivers/staging/olpc_dcon/Kconfig35
-rw-r--r--drivers/staging/olpc_dcon/Makefile6
-rw-r--r--drivers/staging/olpc_dcon/TODO9
-rw-r--r--drivers/staging/olpc_dcon/olpc_dcon.c817
-rw-r--r--drivers/staging/olpc_dcon/olpc_dcon.h111
-rw-r--r--drivers/staging/olpc_dcon/olpc_dcon_xo_1.c205
-rw-r--r--drivers/staging/olpc_dcon/olpc_dcon_xo_1_5.c161
-rw-r--r--drivers/staging/panel/Kconfig278
-rw-r--r--drivers/staging/panel/Makefile1
-rw-r--r--drivers/staging/panel/TODO8
-rw-r--r--drivers/staging/panel/lcd-panel-cgram.txt24
-rw-r--r--drivers/staging/panel/panel.c2447
-rw-r--r--drivers/staging/rdma/Kconfig33
-rw-r--r--drivers/staging/rdma/Makefile5
-rw-r--r--drivers/staging/rdma/amso1100/Kbuild6
-rw-r--r--drivers/staging/rdma/amso1100/Kconfig15
-rw-r--r--drivers/staging/rdma/amso1100/TODO4
-rw-r--r--drivers/staging/rdma/amso1100/c2.c1241
-rw-r--r--drivers/staging/rdma/amso1100/c2.h547
-rw-r--r--drivers/staging/rdma/amso1100/c2_ae.c327
-rw-r--r--drivers/staging/rdma/amso1100/c2_ae.h108
-rw-r--r--drivers/staging/rdma/amso1100/c2_alloc.c142
-rw-r--r--drivers/staging/rdma/amso1100/c2_cm.c461
-rw-r--r--drivers/staging/rdma/amso1100/c2_cq.c440
-rw-r--r--drivers/staging/rdma/amso1100/c2_intr.c219
-rw-r--r--drivers/staging/rdma/amso1100/c2_mm.c377
-rw-r--r--drivers/staging/rdma/amso1100/c2_mq.c175
-rw-r--r--drivers/staging/rdma/amso1100/c2_mq.h106
-rw-r--r--drivers/staging/rdma/amso1100/c2_pd.c90
-rw-r--r--drivers/staging/rdma/amso1100/c2_provider.c912
-rw-r--r--drivers/staging/rdma/amso1100/c2_provider.h182
-rw-r--r--drivers/staging/rdma/amso1100/c2_qp.c1024
-rw-r--r--drivers/staging/rdma/amso1100/c2_rnic.c655
-rw-r--r--drivers/staging/rdma/amso1100/c2_status.h158
-rw-r--r--drivers/staging/rdma/amso1100/c2_user.h82
-rw-r--r--drivers/staging/rdma/amso1100/c2_vq.c260
-rw-r--r--drivers/staging/rdma/amso1100/c2_vq.h63
-rw-r--r--drivers/staging/rdma/amso1100/c2_wr.h1520
-rw-r--r--drivers/staging/rdma/ehca/Kconfig10
-rw-r--r--drivers/staging/rdma/ehca/Makefile16
-rw-r--r--drivers/staging/rdma/ehca/TODO4
-rw-r--r--drivers/staging/rdma/ehca/ehca_av.c277
-rw-r--r--drivers/staging/rdma/ehca/ehca_classes.h482
-rw-r--r--drivers/staging/rdma/ehca/ehca_classes_pSeries.h208
-rw-r--r--drivers/staging/rdma/ehca/ehca_cq.c397
-rw-r--r--drivers/staging/rdma/ehca/ehca_eq.c189
-rw-r--r--drivers/staging/rdma/ehca/ehca_hca.c414
-rw-r--r--drivers/staging/rdma/ehca/ehca_irq.c870
-rw-r--r--drivers/staging/rdma/ehca/ehca_irq.h77
-rw-r--r--drivers/staging/rdma/ehca/ehca_iverbs.h218
-rw-r--r--drivers/staging/rdma/ehca/ehca_main.c1123
-rw-r--r--drivers/staging/rdma/ehca/ehca_mcast.c131
-rw-r--r--drivers/staging/rdma/ehca/ehca_mrmw.c2593
-rw-r--r--drivers/staging/rdma/ehca/ehca_mrmw.h132
-rw-r--r--drivers/staging/rdma/ehca/ehca_pd.c124
-rw-r--r--drivers/staging/rdma/ehca/ehca_qes.h260
-rw-r--r--drivers/staging/rdma/ehca/ehca_qp.c2257
-rw-r--r--drivers/staging/rdma/ehca/ehca_reqs.c953
-rw-r--r--drivers/staging/rdma/ehca/ehca_sqp.c245
-rw-r--r--drivers/staging/rdma/ehca/ehca_tools.h155
-rw-r--r--drivers/staging/rdma/ehca/ehca_uverbs.c309
-rw-r--r--drivers/staging/rdma/ehca/hcp_if.c949
-rw-r--r--drivers/staging/rdma/ehca/hcp_if.h265
-rw-r--r--drivers/staging/rdma/ehca/hcp_phyp.c82
-rw-r--r--drivers/staging/rdma/ehca/hcp_phyp.h90
-rw-r--r--drivers/staging/rdma/ehca/hipz_fns.h68
-rw-r--r--drivers/staging/rdma/ehca/hipz_fns_core.h100
-rw-r--r--drivers/staging/rdma/ehca/hipz_hw.h414
-rw-r--r--drivers/staging/rdma/ehca/ipz_pt_fn.c289
-rw-r--r--drivers/staging/rdma/ehca/ipz_pt_fn.h289
-rw-r--r--drivers/staging/rdma/hfi1/Kconfig37
-rw-r--r--drivers/staging/rdma/hfi1/Makefile19
-rw-r--r--drivers/staging/rdma/hfi1/TODO6
-rw-r--r--drivers/staging/rdma/hfi1/chip.c10798
-rw-r--r--drivers/staging/rdma/hfi1/chip.h1035
-rw-r--r--drivers/staging/rdma/hfi1/chip_registers.h1292
-rw-r--r--drivers/staging/rdma/hfi1/common.h415
-rw-r--r--drivers/staging/rdma/hfi1/cq.c558
-rw-r--r--drivers/staging/rdma/hfi1/debugfs.c899
-rw-r--r--drivers/staging/rdma/hfi1/debugfs.h78
-rw-r--r--drivers/staging/rdma/hfi1/device.c184
-rw-r--r--drivers/staging/rdma/hfi1/device.h62
-rw-r--r--drivers/staging/rdma/hfi1/diag.c1873
-rw-r--r--drivers/staging/rdma/hfi1/dma.c186
-rw-r--r--drivers/staging/rdma/hfi1/driver.c1241
-rw-r--r--drivers/staging/rdma/hfi1/eprom.c475
-rw-r--r--drivers/staging/rdma/hfi1/eprom.h55
-rw-r--r--drivers/staging/rdma/hfi1/file_ops.c2145
-rw-r--r--drivers/staging/rdma/hfi1/firmware.c1620
-rw-r--r--drivers/staging/rdma/hfi1/hfi.h1821
-rw-r--r--drivers/staging/rdma/hfi1/init.c1722
-rw-r--r--drivers/staging/rdma/hfi1/intr.c207
-rw-r--r--drivers/staging/rdma/hfi1/iowait.h186
-rw-r--r--drivers/staging/rdma/hfi1/keys.c411
-rw-r--r--drivers/staging/rdma/hfi1/mad.c4257
-rw-r--r--drivers/staging/rdma/hfi1/mad.h325
-rw-r--r--drivers/staging/rdma/hfi1/mmap.c192
-rw-r--r--drivers/staging/rdma/hfi1/mr.c551
-rw-r--r--drivers/staging/rdma/hfi1/opa_compat.h129
-rw-r--r--drivers/staging/rdma/hfi1/pcie.c1253
-rw-r--r--drivers/staging/rdma/hfi1/pio.c1771
-rw-r--r--drivers/staging/rdma/hfi1/pio.h224
-rw-r--r--drivers/staging/rdma/hfi1/pio_copy.c858
-rw-r--r--drivers/staging/rdma/hfi1/platform_config.h286
-rw-r--r--drivers/staging/rdma/hfi1/qp.c1687
-rw-r--r--drivers/staging/rdma/hfi1/qp.h235
-rw-r--r--drivers/staging/rdma/hfi1/qsfp.c546
-rw-r--r--drivers/staging/rdma/hfi1/qsfp.h222
-rw-r--r--drivers/staging/rdma/hfi1/rc.c2426
-rw-r--r--drivers/staging/rdma/hfi1/ruc.c948
-rw-r--r--drivers/staging/rdma/hfi1/sdma.c2960
-rw-r--r--drivers/staging/rdma/hfi1/sdma.h1123
-rw-r--r--drivers/staging/rdma/hfi1/srq.c397
-rw-r--r--drivers/staging/rdma/hfi1/sysfs.c739
-rw-r--r--drivers/staging/rdma/hfi1/trace.c221
-rw-r--r--drivers/staging/rdma/hfi1/trace.h1409
-rw-r--r--drivers/staging/rdma/hfi1/twsi.c518
-rw-r--r--drivers/staging/rdma/hfi1/twsi.h68
-rw-r--r--drivers/staging/rdma/hfi1/uc.c585
-rw-r--r--drivers/staging/rdma/hfi1/ud.c885
-rw-r--r--drivers/staging/rdma/hfi1/user_pages.c156
-rw-r--r--drivers/staging/rdma/hfi1/user_sdma.c1443
-rw-r--r--drivers/staging/rdma/hfi1/user_sdma.h89
-rw-r--r--drivers/staging/rdma/hfi1/verbs.c2146
-rw-r--r--drivers/staging/rdma/hfi1/verbs.h1151
-rw-r--r--drivers/staging/rdma/hfi1/verbs_mcast.c385
-rw-r--r--drivers/staging/rdma/ipath/Kconfig16
-rw-r--r--drivers/staging/rdma/ipath/Makefile37
-rw-r--r--drivers/staging/rdma/ipath/TODO5
-rw-r--r--drivers/staging/rdma/ipath/ipath_common.h851
-rw-r--r--drivers/staging/rdma/ipath/ipath_cq.c483
-rw-r--r--drivers/staging/rdma/ipath/ipath_debug.h99
-rw-r--r--drivers/staging/rdma/ipath/ipath_diag.c551
-rw-r--r--drivers/staging/rdma/ipath/ipath_dma.c179
-rw-r--r--drivers/staging/rdma/ipath/ipath_driver.c2789
-rw-r--r--drivers/staging/rdma/ipath/ipath_eeprom.c1183
-rw-r--r--drivers/staging/rdma/ipath/ipath_file_ops.c2620
-rw-r--r--drivers/staging/rdma/ipath/ipath_fs.c422
-rw-r--r--drivers/staging/rdma/ipath/ipath_iba6110.c1940
-rw-r--r--drivers/staging/rdma/ipath/ipath_init_chip.c1066
-rw-r--r--drivers/staging/rdma/ipath/ipath_intr.c1273
-rw-r--r--drivers/staging/rdma/ipath/ipath_kernel.h1373
-rw-r--r--drivers/staging/rdma/ipath/ipath_keys.c270
-rw-r--r--drivers/staging/rdma/ipath/ipath_mad.c1521
-rw-r--r--drivers/staging/rdma/ipath/ipath_mmap.c174
-rw-r--r--drivers/staging/rdma/ipath/ipath_mr.c425
-rw-r--r--drivers/staging/rdma/ipath/ipath_qp.c1080
-rw-r--r--drivers/staging/rdma/ipath/ipath_rc.c1969
-rw-r--r--drivers/staging/rdma/ipath/ipath_registers.h512
-rw-r--r--drivers/staging/rdma/ipath/ipath_ruc.c734
-rw-r--r--drivers/staging/rdma/ipath/ipath_sdma.c818
-rw-r--r--drivers/staging/rdma/ipath/ipath_srq.c380
-rw-r--r--drivers/staging/rdma/ipath/ipath_stats.c347
-rw-r--r--drivers/staging/rdma/ipath/ipath_sysfs.c1238
-rw-r--r--drivers/staging/rdma/ipath/ipath_uc.c547
-rw-r--r--drivers/staging/rdma/ipath/ipath_ud.c580
-rw-r--r--drivers/staging/rdma/ipath/ipath_user_pages.c229
-rw-r--r--drivers/staging/rdma/ipath/ipath_user_sdma.c875
-rw-r--r--drivers/staging/rdma/ipath/ipath_user_sdma.h52
-rw-r--r--drivers/staging/rdma/ipath/ipath_verbs.c2365
-rw-r--r--drivers/staging/rdma/ipath/ipath_verbs.h939
-rw-r--r--drivers/staging/rdma/ipath/ipath_verbs_mcast.c364
-rw-r--r--drivers/staging/rdma/ipath/ipath_wc_ppc64.c49
-rw-r--r--drivers/staging/rdma/ipath/ipath_wc_x86_64.c144
-rw-r--r--drivers/staging/rtl8188eu/Kconfig20
-rw-r--r--drivers/staging/rtl8188eu/Makefile56
-rw-r--r--drivers/staging/rtl8188eu/TODO19
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_ap.c1986
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_cmd.c1395
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_debug.c949
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_efuse.c1014
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_ieee80211.c1397
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_ioctl_set.c660
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_iol.c31
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_led.c497
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_mlme.c2180
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_mlme_ext.c5630
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_pwrctrl.c666
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_recv.c2166
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_rf.c89
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_security.c1681
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_sreset.c64
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_sta_mgt.c535
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_wlan_util.c1609
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_xmit.c2213
-rw-r--r--drivers/staging/rtl8188eu/hal/Hal8188ERateAdaptive.c780
-rw-r--r--drivers/staging/rtl8188eu/hal/bb_cfg.c730
-rw-r--r--drivers/staging/rtl8188eu/hal/fw.c235
-rw-r--r--drivers/staging/rtl8188eu/hal/hal_com.c300
-rw-r--r--drivers/staging/rtl8188eu/hal/hal_intf.c305
-rw-r--r--drivers/staging/rtl8188eu/hal/mac_cfg.c134
-rw-r--r--drivers/staging/rtl8188eu/hal/odm.c1365
-rw-r--r--drivers/staging/rtl8188eu/hal/odm_HWConfig.c433
-rw-r--r--drivers/staging/rtl8188eu/hal/odm_RTL8188E.c372
-rw-r--r--drivers/staging/rtl8188eu/hal/phy.c1464
-rw-r--r--drivers/staging/rtl8188eu/hal/pwrseq.c102
-rw-r--r--drivers/staging/rtl8188eu/hal/pwrseqcmd.c122
-rw-r--r--drivers/staging/rtl8188eu/hal/rf.c318
-rw-r--r--drivers/staging/rtl8188eu/hal/rf_cfg.c320
-rw-r--r--drivers/staging/rtl8188eu/hal/rtl8188e_cmd.c653
-rw-r--r--drivers/staging/rtl8188eu/hal/rtl8188e_dm.c245
-rw-r--r--drivers/staging/rtl8188eu/hal/rtl8188e_hal_init.c669
-rw-r--r--drivers/staging/rtl8188eu/hal/rtl8188e_rxdesc.c205
-rw-r--r--drivers/staging/rtl8188eu/hal/rtl8188e_xmit.c92
-rw-r--r--drivers/staging/rtl8188eu/hal/rtl8188eu_led.c93
-rw-r--r--drivers/staging/rtl8188eu/hal/rtl8188eu_recv.c120
-rw-r--r--drivers/staging/rtl8188eu/hal/rtl8188eu_xmit.c702
-rw-r--r--drivers/staging/rtl8188eu/hal/usb_halinit.c2119
-rw-r--r--drivers/staging/rtl8188eu/include/Hal8188EPhyCfg.h238
-rw-r--r--drivers/staging/rtl8188eu/include/Hal8188EPhyReg.h1094
-rw-r--r--drivers/staging/rtl8188eu/include/Hal8188ERateAdaptive.h75
-rw-r--r--drivers/staging/rtl8188eu/include/HalHWImg8188E_FW.h34
-rw-r--r--drivers/staging/rtl8188eu/include/HalVerDef.h83
-rw-r--r--drivers/staging/rtl8188eu/include/basic_types.h184
-rw-r--r--drivers/staging/rtl8188eu/include/drv_types.h255
-rw-r--r--drivers/staging/rtl8188eu/include/fw.h59
-rw-r--r--drivers/staging/rtl8188eu/include/hal_com.h169
-rw-r--r--drivers/staging/rtl8188eu/include/hal_intf.h309
-rw-r--r--drivers/staging/rtl8188eu/include/ieee80211.h1179
-rw-r--r--drivers/staging/rtl8188eu/include/ieee80211_ext.h290
-rw-r--r--drivers/staging/rtl8188eu/include/mlme_osdep.h35
-rw-r--r--drivers/staging/rtl8188eu/include/mon.h36
-rw-r--r--drivers/staging/rtl8188eu/include/mp_custom_oid.h352
-rw-r--r--drivers/staging/rtl8188eu/include/odm.h1101
-rw-r--r--drivers/staging/rtl8188eu/include/odm_HWConfig.h123
-rw-r--r--drivers/staging/rtl8188eu/include/odm_RTL8188E.h52
-rw-r--r--drivers/staging/rtl8188eu/include/odm_RegDefine11N.h171
-rw-r--r--drivers/staging/rtl8188eu/include/odm_debug.h111
-rw-r--r--drivers/staging/rtl8188eu/include/odm_precomp.h82
-rw-r--r--drivers/staging/rtl8188eu/include/odm_reg.h119
-rw-r--r--drivers/staging/rtl8188eu/include/odm_types.h37
-rw-r--r--drivers/staging/rtl8188eu/include/osdep_intf.h49
-rw-r--r--drivers/staging/rtl8188eu/include/osdep_service.h169
-rw-r--r--drivers/staging/rtl8188eu/include/phy.h30
-rw-r--r--drivers/staging/rtl8188eu/include/pwrseq.h341
-rw-r--r--drivers/staging/rtl8188eu/include/pwrseqcmd.h90
-rw-r--r--drivers/staging/rtl8188eu/include/recv_osdep.h47
-rw-r--r--drivers/staging/rtl8188eu/include/rf.h11
-rw-r--r--drivers/staging/rtl8188eu/include/rtl8188e_cmd.h115
-rw-r--r--drivers/staging/rtl8188eu/include/rtl8188e_dm.h61
-rw-r--r--drivers/staging/rtl8188eu/include/rtl8188e_hal.h407
-rw-r--r--drivers/staging/rtl8188eu/include/rtl8188e_led.h35
-rw-r--r--drivers/staging/rtl8188eu/include/rtl8188e_recv.h69
-rw-r--r--drivers/staging/rtl8188eu/include/rtl8188e_spec.h1435
-rw-r--r--drivers/staging/rtl8188eu/include/rtl8188e_xmit.h177
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_android.h64
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_ap.h63
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_cmd.h423
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_debug.h266
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_eeprom.h130
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_efuse.h118
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_event.h115
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_ht.h44
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_ioctl.h120
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_ioctl_rtl.h79
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_ioctl_set.h42
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_iol.h28
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_led.h120
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_mlme.h592
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_mlme_ext.h740
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_mp_phy_regdef.h1084
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_pwrctrl.h269
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_qos.h30
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_recv.h365
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_rf.h146
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_security.h356
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_sreset.h45
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_xmit.h379
-rw-r--r--drivers/staging/rtl8188eu/include/sta_info.h368
-rw-r--r--drivers/staging/rtl8188eu/include/usb_hal.h26
-rw-r--r--drivers/staging/rtl8188eu/include/usb_ops_linux.h86
-rw-r--r--drivers/staging/rtl8188eu/include/wifi.h1003
-rw-r--r--drivers/staging/rtl8188eu/include/wlan_bssdef.h333
-rw-r--r--drivers/staging/rtl8188eu/include/xmit_osdep.h61
-rw-r--r--drivers/staging/rtl8188eu/os_dep/ioctl_linux.c3121
-rw-r--r--drivers/staging/rtl8188eu/os_dep/mlme_linux.c198
-rw-r--r--drivers/staging/rtl8188eu/os_dep/mon.c194
-rw-r--r--drivers/staging/rtl8188eu/os_dep/os_intfs.c1196
-rw-r--r--drivers/staging/rtl8188eu/os_dep/osdep_service.c164
-rw-r--r--drivers/staging/rtl8188eu/os_dep/recv_linux.c194
-rw-r--r--drivers/staging/rtl8188eu/os_dep/rtw_android.c270
-rw-r--r--drivers/staging/rtl8188eu/os_dep/usb_intf.c550
-rw-r--r--drivers/staging/rtl8188eu/os_dep/usb_ops_linux.c855
-rw-r--r--drivers/staging/rtl8188eu/os_dep/xmit_linux.c260
-rw-r--r--drivers/staging/rtl8192e/Kconfig47
-rw-r--r--drivers/staging/rtl8192e/Makefile21
-rw-r--r--drivers/staging/rtl8192e/TODO2
-rw-r--r--drivers/staging/rtl8192e/dot11d.c173
-rw-r--r--drivers/staging/rtl8192e/dot11d.h93
-rw-r--r--drivers/staging/rtl8192e/license339
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/Kconfig9
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/Makefile20
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8190P_def.h320
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8190P_rtl8256.c228
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8190P_rtl8256.h26
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8192E_cmdpkt.c83
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8192E_cmdpkt.h20
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c2373
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8192E_dev.h52
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8192E_firmware.c315
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8192E_firmware.h61
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8192E_hw.h449
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8192E_hwimg.c561
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8192E_hwimg.h41
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8192E_phy.c1636
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8192E_phy.h98
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8192E_phyreg.h886
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_cam.c272
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_cam.h37
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_core.c2829
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_core.h617
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_dm.c2546
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_dm.h203
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_eeprom.c95
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_eeprom.h25
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_ethtool.c49
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_pci.c94
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_pci.h30
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_pm.c113
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_pm.h25
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_ps.c311
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_ps.h42
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_wx.c1253
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_wx.h22
-rw-r--r--drivers/staging/rtl8192e/rtl819x_BA.h68
-rw-r--r--drivers/staging/rtl8192e/rtl819x_BAProc.c563
-rw-r--r--drivers/staging/rtl8192e/rtl819x_HT.h286
-rw-r--r--drivers/staging/rtl8192e/rtl819x_HTProc.c917
-rw-r--r--drivers/staging/rtl8192e/rtl819x_Qos.h186
-rw-r--r--drivers/staging/rtl8192e/rtl819x_TS.h66
-rw-r--r--drivers/staging/rtl8192e/rtl819x_TSProc.c545
-rw-r--r--drivers/staging/rtl8192e/rtllib.h2219
-rw-r--r--drivers/staging/rtl8192e/rtllib_crypt_ccmp.c457
-rw-r--r--drivers/staging/rtl8192e/rtllib_crypt_tkip.c776
-rw-r--r--drivers/staging/rtl8192e/rtllib_crypt_wep.c289
-rw-r--r--drivers/staging/rtl8192e/rtllib_debug.h67
-rw-r--r--drivers/staging/rtl8192e/rtllib_module.c198
-rw-r--r--drivers/staging/rtl8192e/rtllib_rx.c2787
-rw-r--r--drivers/staging/rtl8192e/rtllib_softmac.c3661
-rw-r--r--drivers/staging/rtl8192e/rtllib_softmac_wx.c655
-rw-r--r--drivers/staging/rtl8192e/rtllib_tx.c989
-rw-r--r--drivers/staging/rtl8192e/rtllib_wx.c821
-rw-r--r--drivers/staging/rtl8192u/Kconfig8
-rw-r--r--drivers/staging/rtl8192u/Makefile29
-rw-r--r--drivers/staging/rtl8192u/authors1
-rw-r--r--drivers/staging/rtl8192u/changes4
-rw-r--r--drivers/staging/rtl8192u/copying340
-rw-r--r--drivers/staging/rtl8192u/ieee80211/Makefile27
-rw-r--r--drivers/staging/rtl8192u/ieee80211/dot11d.c175
-rw-r--r--drivers/staging/rtl8192u/ieee80211/dot11d.h100
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211.h2451
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_crypt.c240
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_crypt.h86
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_ccmp.c474
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_tkip.c777
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_wep.c279
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_module.c309
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c2638
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c3217
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_softmac_wx.c599
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_tx.c914
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_wx.c852
-rw-r--r--drivers/staging/rtl8192u/ieee80211/rtl819x_BA.h67
-rw-r--r--drivers/staging/rtl8192u/ieee80211/rtl819x_BAProc.c742
-rw-r--r--drivers/staging/rtl8192u/ieee80211/rtl819x_HT.h480
-rw-r--r--drivers/staging/rtl8192u/ieee80211/rtl819x_HTProc.c1410
-rw-r--r--drivers/staging/rtl8192u/ieee80211/rtl819x_Qos.h565
-rw-r--r--drivers/staging/rtl8192u/ieee80211/rtl819x_TS.h55
-rw-r--r--drivers/staging/rtl8192u/ieee80211/rtl819x_TSProc.c603
-rw-r--r--drivers/staging/rtl8192u/r8180_93cx6.c157
-rw-r--r--drivers/staging/rtl8192u/r8180_93cx6.h43
-rw-r--r--drivers/staging/rtl8192u/r8190_rtl8256.c290
-rw-r--r--drivers/staging/rtl8192u/r8190_rtl8256.h23
-rw-r--r--drivers/staging/rtl8192u/r8192U.h1196
-rw-r--r--drivers/staging/rtl8192u/r8192U_core.c5167
-rw-r--r--drivers/staging/rtl8192u/r8192U_dm.c3125
-rw-r--r--drivers/staging/rtl8192u/r8192U_dm.h239
-rw-r--r--drivers/staging/rtl8192u/r8192U_hw.h412
-rw-r--r--drivers/staging/rtl8192u/r8192U_wx.c995
-rw-r--r--drivers/staging/rtl8192u/r8192U_wx.h24
-rw-r--r--drivers/staging/rtl8192u/r819xU_cmdpkt.c571
-rw-r--r--drivers/staging/rtl8192u/r819xU_cmdpkt.h191
-rw-r--r--drivers/staging/rtl8192u/r819xU_firmware.c345
-rw-r--r--drivers/staging/rtl8192u/r819xU_firmware.h19
-rw-r--r--drivers/staging/rtl8192u/r819xU_firmware_img.c548
-rw-r--r--drivers/staging/rtl8192u/r819xU_firmware_img.h28
-rw-r--r--drivers/staging/rtl8192u/r819xU_phy.c1795
-rw-r--r--drivers/staging/rtl8192u/r819xU_phy.h91
-rw-r--r--drivers/staging/rtl8192u/r819xU_phyreg.h878
-rw-r--r--drivers/staging/rtl8712/Kconfig17
-rw-r--r--drivers/staging/rtl8712/Makefile34
-rw-r--r--drivers/staging/rtl8712/TODO13
-rw-r--r--drivers/staging/rtl8712/basic_types.h48
-rw-r--r--drivers/staging/rtl8712/drv_types.h193
-rw-r--r--drivers/staging/rtl8712/ethernet.h33
-rw-r--r--drivers/staging/rtl8712/hal_init.c396
-rw-r--r--drivers/staging/rtl8712/ieee80211.c415
-rw-r--r--drivers/staging/rtl8712/ieee80211.h767
-rw-r--r--drivers/staging/rtl8712/mlme_linux.c172
-rw-r--r--drivers/staging/rtl8712/mlme_osdep.h43
-rw-r--r--drivers/staging/rtl8712/mp_custom_oid.h299
-rw-r--r--drivers/staging/rtl8712/os_intfs.c474
-rw-r--r--drivers/staging/rtl8712/osdep_intf.h44
-rw-r--r--drivers/staging/rtl8712/osdep_service.h92
-rw-r--r--drivers/staging/rtl8712/recv_linux.c153
-rw-r--r--drivers/staging/rtl8712/recv_osdep.h51
-rw-r--r--drivers/staging/rtl8712/rtl8712_bitdef.h40
-rw-r--r--drivers/staging/rtl8712/rtl8712_cmd.c469
-rw-r--r--drivers/staging/rtl8712/rtl8712_cmd.h244
-rw-r--r--drivers/staging/rtl8712/rtl8712_cmdctrl_bitdef.h108
-rw-r--r--drivers/staging/rtl8712/rtl8712_cmdctrl_regdef.h34
-rw-r--r--drivers/staging/rtl8712/rtl8712_debugctrl_bitdef.h55
-rw-r--r--drivers/staging/rtl8712/rtl8712_debugctrl_regdef.h47
-rw-r--r--drivers/staging/rtl8712/rtl8712_edcasetting_bitdef.h77
-rw-r--r--drivers/staging/rtl8712/rtl8712_edcasetting_regdef.h37
-rw-r--r--drivers/staging/rtl8712/rtl8712_efuse.c574
-rw-r--r--drivers/staging/rtl8712/rtl8712_efuse.h43
-rw-r--r--drivers/staging/rtl8712/rtl8712_event.h99
-rw-r--r--drivers/staging/rtl8712/rtl8712_fifoctrl_bitdef.h145
-rw-r--r--drivers/staging/rtl8712/rtl8712_fifoctrl_regdef.h76
-rw-r--r--drivers/staging/rtl8712/rtl8712_gp_bitdef.h79
-rw-r--r--drivers/staging/rtl8712/rtl8712_gp_regdef.h42
-rw-r--r--drivers/staging/rtl8712/rtl8712_hal.h150
-rw-r--r--drivers/staging/rtl8712/rtl8712_interrupt_bitdef.h58
-rw-r--r--drivers/staging/rtl8712/rtl8712_io.c146
-rw-r--r--drivers/staging/rtl8712/rtl8712_led.c1834
-rw-r--r--drivers/staging/rtl8712/rtl8712_macsetting_bitdef.h47
-rw-r--r--drivers/staging/rtl8712/rtl8712_macsetting_regdef.h35
-rw-r--r--drivers/staging/rtl8712/rtl8712_powersave_bitdef.h52
-rw-r--r--drivers/staging/rtl8712/rtl8712_powersave_regdef.h39
-rw-r--r--drivers/staging/rtl8712/rtl8712_ratectrl_bitdef.h49
-rw-r--r--drivers/staging/rtl8712/rtl8712_ratectrl_regdef.h56
-rw-r--r--drivers/staging/rtl8712/rtl8712_recv.c1118
-rw-r--r--drivers/staging/rtl8712/rtl8712_recv.h157
-rw-r--r--drivers/staging/rtl8712/rtl8712_regdef.h44
-rw-r--r--drivers/staging/rtl8712/rtl8712_security_bitdef.h48
-rw-r--r--drivers/staging/rtl8712/rtl8712_spec.h135
-rw-r--r--drivers/staging/rtl8712/rtl8712_syscfg_bitdef.h170
-rw-r--r--drivers/staging/rtl8712/rtl8712_syscfg_regdef.h56
-rw-r--r--drivers/staging/rtl8712/rtl8712_timectrl_bitdef.h63
-rw-r--r--drivers/staging/rtl8712/rtl8712_timectrl_regdef.h39
-rw-r--r--drivers/staging/rtl8712/rtl8712_wmac_bitdef.h62
-rw-r--r--drivers/staging/rtl8712/rtl8712_wmac_regdef.h49
-rw-r--r--drivers/staging/rtl8712/rtl8712_xmit.c760
-rw-r--r--drivers/staging/rtl8712/rtl8712_xmit.h123
-rw-r--r--drivers/staging/rtl8712/rtl871x_cmd.c1030
-rw-r--r--drivers/staging/rtl8712/rtl871x_cmd.h772
-rw-r--r--drivers/staging/rtl8712/rtl871x_debug.h167
-rw-r--r--drivers/staging/rtl8712/rtl871x_eeprom.c233
-rw-r--r--drivers/staging/rtl8712/rtl871x_eeprom.h101
-rw-r--r--drivers/staging/rtl8712/rtl871x_event.h120
-rw-r--r--drivers/staging/rtl8712/rtl871x_ht.h44
-rw-r--r--drivers/staging/rtl8712/rtl871x_io.c161
-rw-r--r--drivers/staging/rtl8712/rtl871x_io.h258
-rw-r--r--drivers/staging/rtl8712/rtl871x_ioctl.h93
-rw-r--r--drivers/staging/rtl8712/rtl871x_ioctl_linux.c2358
-rw-r--r--drivers/staging/rtl8712/rtl871x_ioctl_rtl.c536
-rw-r--r--drivers/staging/rtl8712/rtl871x_ioctl_rtl.h121
-rw-r--r--drivers/staging/rtl8712/rtl871x_ioctl_set.c370
-rw-r--r--drivers/staging/rtl8712/rtl871x_ioctl_set.h57
-rw-r--r--drivers/staging/rtl8712/rtl871x_led.h124
-rw-r--r--drivers/staging/rtl8712/rtl871x_mlme.c1797
-rw-r--r--drivers/staging/rtl8712/rtl871x_mlme.h232
-rw-r--r--drivers/staging/rtl8712/rtl871x_mp.c745
-rw-r--r--drivers/staging/rtl8712/rtl871x_mp.h286
-rw-r--r--drivers/staging/rtl8712/rtl871x_mp_ioctl.c929
-rw-r--r--drivers/staging/rtl8712/rtl871x_mp_ioctl.h434
-rw-r--r--drivers/staging/rtl8712/rtl871x_mp_phy_regdef.h1025
-rw-r--r--drivers/staging/rtl8712/rtl871x_pwrctrl.c245
-rw-r--r--drivers/staging/rtl8712/rtl871x_pwrctrl.h131
-rw-r--r--drivers/staging/rtl8712/rtl871x_recv.c678
-rw-r--r--drivers/staging/rtl8712/rtl871x_recv.h216
-rw-r--r--drivers/staging/rtl8712/rtl871x_rf.h68
-rw-r--r--drivers/staging/rtl8712/rtl871x_security.c1397
-rw-r--r--drivers/staging/rtl8712/rtl871x_security.h222
-rw-r--r--drivers/staging/rtl8712/rtl871x_sta_mgt.c283
-rw-r--r--drivers/staging/rtl8712/rtl871x_wlan_sme.h47
-rw-r--r--drivers/staging/rtl8712/rtl871x_xmit.c1056
-rw-r--r--drivers/staging/rtl8712/rtl871x_xmit.h302
-rw-r--r--drivers/staging/rtl8712/sta_info.h146
-rw-r--r--drivers/staging/rtl8712/usb_halinit.c317
-rw-r--r--drivers/staging/rtl8712/usb_intf.c648
-rw-r--r--drivers/staging/rtl8712/usb_ops.c200
-rw-r--r--drivers/staging/rtl8712/usb_ops.h50
-rw-r--r--drivers/staging/rtl8712/usb_ops_linux.c523
-rw-r--r--drivers/staging/rtl8712/usb_osintf.h47
-rw-r--r--drivers/staging/rtl8712/wifi.h585
-rw-r--r--drivers/staging/rtl8712/wlan_bssdef.h233
-rw-r--r--drivers/staging/rtl8712/xmit_linux.c198
-rw-r--r--drivers/staging/rtl8712/xmit_osdep.h64
-rw-r--r--drivers/staging/rtl8723au/Kconfig30
-rw-r--r--drivers/staging/rtl8723au/Makefile53
-rw-r--r--drivers/staging/rtl8723au/TODO13
-rw-r--r--drivers/staging/rtl8723au/core/rtw_ap.c1910
-rw-r--r--drivers/staging/rtl8723au/core/rtw_cmd.c1469
-rw-r--r--drivers/staging/rtl8723au/core/rtw_efuse.c715
-rw-r--r--drivers/staging/rtl8723au/core/rtw_ieee80211.c854
-rw-r--r--drivers/staging/rtl8723au/core/rtw_mlme.c2339
-rw-r--r--drivers/staging/rtl8723au/core/rtw_mlme_ext.c6211
-rw-r--r--drivers/staging/rtl8723au/core/rtw_pwrctrl.c606
-rw-r--r--drivers/staging/rtl8723au/core/rtw_recv.c2334
-rw-r--r--drivers/staging/rtl8723au/core/rtw_security.c1630
-rw-r--r--drivers/staging/rtl8723au/core/rtw_sreset.c214
-rw-r--r--drivers/staging/rtl8723au/core/rtw_sta_mgt.c451
-rw-r--r--drivers/staging/rtl8723au/core/rtw_wlan_util.c1547
-rw-r--r--drivers/staging/rtl8723au/core/rtw_xmit.c2377
-rw-r--r--drivers/staging/rtl8723au/hal/Hal8723PwrSeq.c80
-rw-r--r--drivers/staging/rtl8723au/hal/Hal8723UHWImg_CE.c136
-rw-r--r--drivers/staging/rtl8723au/hal/HalDMOutSrc8723A_CE.c1097
-rw-r--r--drivers/staging/rtl8723au/hal/HalHWImg8723A_BB.c565
-rw-r--r--drivers/staging/rtl8723au/hal/HalHWImg8723A_MAC.c187
-rw-r--r--drivers/staging/rtl8723au/hal/HalHWImg8723A_RF.c259
-rw-r--r--drivers/staging/rtl8723au/hal/HalPwrSeqCmd.c156
-rw-r--r--drivers/staging/rtl8723au/hal/hal_com.c853
-rw-r--r--drivers/staging/rtl8723au/hal/hal_intf.c42
-rw-r--r--drivers/staging/rtl8723au/hal/odm.c1738
-rw-r--r--drivers/staging/rtl8723au/hal/odm_HWConfig.c400
-rw-r--r--drivers/staging/rtl8723au/hal/odm_RegConfig8723A.c88
-rw-r--r--drivers/staging/rtl8723au/hal/odm_debug.c39
-rw-r--r--drivers/staging/rtl8723au/hal/odm_interface.c49
-rw-r--r--drivers/staging/rtl8723au/hal/rtl8723a_bt-coexist.c11297
-rw-r--r--drivers/staging/rtl8723au/hal/rtl8723a_cmd.c756
-rw-r--r--drivers/staging/rtl8723au/hal/rtl8723a_dm.c194
-rw-r--r--drivers/staging/rtl8723au/hal/rtl8723a_hal_init.c2082
-rw-r--r--drivers/staging/rtl8723au/hal/rtl8723a_phycfg.c961
-rw-r--r--drivers/staging/rtl8723au/hal/rtl8723a_rf6052.c503
-rw-r--r--drivers/staging/rtl8723au/hal/rtl8723a_rxdesc.c69
-rw-r--r--drivers/staging/rtl8723au/hal/rtl8723a_sreset.c55
-rw-r--r--drivers/staging/rtl8723au/hal/rtl8723au_recv.c267
-rw-r--r--drivers/staging/rtl8723au/hal/rtl8723au_xmit.c520
-rw-r--r--drivers/staging/rtl8723au/hal/usb_halinit.c1272
-rw-r--r--drivers/staging/rtl8723au/hal/usb_ops_linux.c694
-rw-r--r--drivers/staging/rtl8723au/include/Hal8723APhyCfg.h162
-rw-r--r--drivers/staging/rtl8723au/include/Hal8723APhyReg.h1078
-rw-r--r--drivers/staging/rtl8723au/include/Hal8723PwrSeq.h126
-rw-r--r--drivers/staging/rtl8723au/include/Hal8723UHWImg_CE.h29
-rw-r--r--drivers/staging/rtl8723au/include/HalDMOutSrc8723A.h64
-rw-r--r--drivers/staging/rtl8723au/include/HalHWImg8723A_BB.h38
-rw-r--r--drivers/staging/rtl8723au/include/HalHWImg8723A_FW.h28
-rw-r--r--drivers/staging/rtl8723au/include/HalHWImg8723A_MAC.h26
-rw-r--r--drivers/staging/rtl8723au/include/HalHWImg8723A_RF.h25
-rw-r--r--drivers/staging/rtl8723au/include/HalPwrSeqCmd.h130
-rw-r--r--drivers/staging/rtl8723au/include/HalVerDef.h114
-rw-r--r--drivers/staging/rtl8723au/include/drv_types.h274
-rw-r--r--drivers/staging/rtl8723au/include/hal_com.h182
-rw-r--r--drivers/staging/rtl8723au/include/hal_intf.h115
-rw-r--r--drivers/staging/rtl8723au/include/ieee80211.h341
-rw-r--r--drivers/staging/rtl8723au/include/ioctl_cfg80211.h66
-rw-r--r--drivers/staging/rtl8723au/include/mlme_osdep.h24
-rw-r--r--drivers/staging/rtl8723au/include/odm.h860
-rw-r--r--drivers/staging/rtl8723au/include/odm_HWConfig.h155
-rw-r--r--drivers/staging/rtl8723au/include/odm_RegConfig8723A.h27
-rw-r--r--drivers/staging/rtl8723au/include/odm_RegDefine11N.h165
-rw-r--r--drivers/staging/rtl8723au/include/odm_debug.h117
-rw-r--r--drivers/staging/rtl8723au/include/odm_interface.h62
-rw-r--r--drivers/staging/rtl8723au/include/odm_precomp.h49
-rw-r--r--drivers/staging/rtl8723au/include/odm_reg.h111
-rw-r--r--drivers/staging/rtl8723au/include/osdep_intf.h45
-rw-r--r--drivers/staging/rtl8723au/include/osdep_service.h88
-rw-r--r--drivers/staging/rtl8723au/include/recv_osdep.h36
-rw-r--r--drivers/staging/rtl8723au/include/rtl8723a_bt-coexist.h1627
-rw-r--r--drivers/staging/rtl8723au/include/rtl8723a_bt_intf.h69
-rw-r--r--drivers/staging/rtl8723au/include/rtl8723a_cmd.h158
-rw-r--r--drivers/staging/rtl8723au/include/rtl8723a_dm.h137
-rw-r--r--drivers/staging/rtl8723au/include/rtl8723a_hal.h538
-rw-r--r--drivers/staging/rtl8723au/include/rtl8723a_pg.h98
-rw-r--r--drivers/staging/rtl8723au/include/rtl8723a_recv.h65
-rw-r--r--drivers/staging/rtl8723au/include/rtl8723a_rf.h58
-rw-r--r--drivers/staging/rtl8723au/include/rtl8723a_spec.h2148
-rw-r--r--drivers/staging/rtl8723au/include/rtl8723a_sreset.h24
-rw-r--r--drivers/staging/rtl8723au/include/rtl8723a_xmit.h225
-rw-r--r--drivers/staging/rtl8723au/include/rtw_ap.h54
-rw-r--r--drivers/staging/rtl8723au/include/rtw_cmd.h815
-rw-r--r--drivers/staging/rtl8723au/include/rtw_debug.h191
-rw-r--r--drivers/staging/rtl8723au/include/rtw_eeprom.h135
-rw-r--r--drivers/staging/rtl8723au/include/rtw_efuse.h109
-rw-r--r--drivers/staging/rtl8723au/include/rtw_event.h74
-rw-r--r--drivers/staging/rtl8723au/include/rtw_ht.h42
-rw-r--r--drivers/staging/rtl8723au/include/rtw_io.h237
-rw-r--r--drivers/staging/rtl8723au/include/rtw_mlme.h340
-rw-r--r--drivers/staging/rtl8723au/include/rtw_mlme_ext.h685
-rw-r--r--drivers/staging/rtl8723au/include/rtw_pwrctrl.h241
-rw-r--r--drivers/staging/rtl8723au/include/rtw_recv.h307
-rw-r--r--drivers/staging/rtl8723au/include/rtw_rf.h102
-rw-r--r--drivers/staging/rtl8723au/include/rtw_security.h331
-rw-r--r--drivers/staging/rtl8723au/include/rtw_sreset.h36
-rw-r--r--drivers/staging/rtl8723au/include/rtw_version.h1
-rw-r--r--drivers/staging/rtl8723au/include/rtw_xmit.h385
-rw-r--r--drivers/staging/rtl8723au/include/sta_info.h373
-rw-r--r--drivers/staging/rtl8723au/include/usb_ops.h68
-rw-r--r--drivers/staging/rtl8723au/include/usb_ops_linux.h41
-rw-r--r--drivers/staging/rtl8723au/include/wifi.h84
-rw-r--r--drivers/staging/rtl8723au/include/wlan_bssdef.h123
-rw-r--r--drivers/staging/rtl8723au/include/xmit_osdep.h38
-rw-r--r--drivers/staging/rtl8723au/os_dep/ioctl_cfg80211.c3356
-rw-r--r--drivers/staging/rtl8723au/os_dep/mlme_linux.c81
-rw-r--r--drivers/staging/rtl8723au/os_dep/os_intfs.c852
-rw-r--r--drivers/staging/rtl8723au/os_dep/recv_linux.c165
-rw-r--r--drivers/staging/rtl8723au/os_dep/usb_intf.c622
-rw-r--r--drivers/staging/rtl8723au/os_dep/usb_ops_linux.c234
-rw-r--r--drivers/staging/rtl8723au/os_dep/xmit_linux.c154
-rw-r--r--drivers/staging/rtl8723bs/Kconfig13
-rw-r--r--drivers/staging/rtl8723bs/Makefile60
-rw-r--r--drivers/staging/rtl8723bs/TODO8
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_ap.c2118
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_btcoex.c77
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_cmd.c1927
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_efuse.c279
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_ieee80211.c1166
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_io.c154
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_ioctl_set.c505
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_mlme.c2619
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_mlme_ext.c5994
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_pwrctrl.c1155
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_recv.c2341
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_security.c1528
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_sta_mgt.c551
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_wlan_util.c1772
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_xmit.c2562
-rw-r--r--drivers/staging/rtl8723bs/hal/Hal8723BReg.h65
-rw-r--r--drivers/staging/rtl8723bs/hal/HalBtc8723b1Ant.c2666
-rw-r--r--drivers/staging/rtl8723bs/hal/HalBtc8723b1Ant.h184
-rw-r--r--drivers/staging/rtl8723bs/hal/HalBtc8723b2Ant.c2630
-rw-r--r--drivers/staging/rtl8723bs/hal/HalBtc8723b2Ant.h146
-rw-r--r--drivers/staging/rtl8723bs/hal/HalBtcOutSrc.h426
-rw-r--r--drivers/staging/rtl8723bs/hal/HalHWImg8723B_BB.c557
-rw-r--r--drivers/staging/rtl8723bs/hal/HalHWImg8723B_BB.h40
-rw-r--r--drivers/staging/rtl8723bs/hal/HalHWImg8723B_MAC.c235
-rw-r--r--drivers/staging/rtl8723bs/hal/HalHWImg8723B_MAC.h20
-rw-r--r--drivers/staging/rtl8723bs/hal/HalHWImg8723B_RF.c554
-rw-r--r--drivers/staging/rtl8723bs/hal/HalHWImg8723B_RF.h41
-rw-r--r--drivers/staging/rtl8723bs/hal/HalPhyRf.c273
-rw-r--r--drivers/staging/rtl8723bs/hal/HalPhyRf.h40
-rw-r--r--drivers/staging/rtl8723bs/hal/HalPhyRf_8723B.c1810
-rw-r--r--drivers/staging/rtl8723bs/hal/HalPhyRf_8723B.h68
-rw-r--r--drivers/staging/rtl8723bs/hal/HalPwrSeqCmd.c145
-rw-r--r--drivers/staging/rtl8723bs/hal/Mp_Precomp.h23
-rw-r--r--drivers/staging/rtl8723bs/hal/hal_btcoex.c1332
-rw-r--r--drivers/staging/rtl8723bs/hal/hal_com.c842
-rw-r--r--drivers/staging/rtl8723bs/hal/hal_com_phycfg.c980
-rw-r--r--drivers/staging/rtl8723bs/hal/hal_intf.c335
-rw-r--r--drivers/staging/rtl8723bs/hal/hal_pwr_seq.c130
-rw-r--r--drivers/staging/rtl8723bs/hal/hal_sdio.c105
-rw-r--r--drivers/staging/rtl8723bs/hal/odm.c1026
-rw-r--r--drivers/staging/rtl8723bs/hal/odm.h1163
-rw-r--r--drivers/staging/rtl8723bs/hal/odm_CfoTracking.c211
-rw-r--r--drivers/staging/rtl8723bs/hal/odm_CfoTracking.h39
-rw-r--r--drivers/staging/rtl8723bs/hal/odm_DIG.c821
-rw-r--r--drivers/staging/rtl8723bs/hal/odm_DIG.h169
-rw-r--r--drivers/staging/rtl8723bs/hal/odm_DynamicBBPowerSaving.c81
-rw-r--r--drivers/staging/rtl8723bs/hal/odm_DynamicBBPowerSaving.h31
-rw-r--r--drivers/staging/rtl8723bs/hal/odm_DynamicTxPower.c22
-rw-r--r--drivers/staging/rtl8723bs/hal/odm_DynamicTxPower.h29
-rw-r--r--drivers/staging/rtl8723bs/hal/odm_EdcaTurboCheck.c158
-rw-r--r--drivers/staging/rtl8723bs/hal/odm_EdcaTurboCheck.h23
-rw-r--r--drivers/staging/rtl8723bs/hal/odm_HWConfig.c446
-rw-r--r--drivers/staging/rtl8723bs/hal/odm_HWConfig.h86
-rw-r--r--drivers/staging/rtl8723bs/hal/odm_RegConfig8723B.c179
-rw-r--r--drivers/staging/rtl8723bs/hal/odm_RegConfig8723B.h45
-rw-r--r--drivers/staging/rtl8723bs/hal/odm_RegDefine11N.h162
-rw-r--r--drivers/staging/rtl8723bs/hal/odm_interface.h40
-rw-r--r--drivers/staging/rtl8723bs/hal/odm_precomp.h47
-rw-r--r--drivers/staging/rtl8723bs/hal/odm_reg.h91
-rw-r--r--drivers/staging/rtl8723bs/hal/odm_types.h51
-rw-r--r--drivers/staging/rtl8723bs/hal/rtl8723b_cmd.c961
-rw-r--r--drivers/staging/rtl8723bs/hal/rtl8723b_dm.c264
-rw-r--r--drivers/staging/rtl8723bs/hal/rtl8723b_hal_init.c2935
-rw-r--r--drivers/staging/rtl8723bs/hal/rtl8723b_phycfg.c778
-rw-r--r--drivers/staging/rtl8723bs/hal/rtl8723b_rf6052.c168
-rw-r--r--drivers/staging/rtl8723bs/hal/rtl8723b_rxdesc.c70
-rw-r--r--drivers/staging/rtl8723bs/hal/rtl8723bs_recv.c480
-rw-r--r--drivers/staging/rtl8723bs/hal/rtl8723bs_xmit.c576
-rw-r--r--drivers/staging/rtl8723bs/hal/sdio_halinit.c1234
-rw-r--r--drivers/staging/rtl8723bs/hal/sdio_ops.c1008
-rw-r--r--drivers/staging/rtl8723bs/include/Hal8192CPhyReg.h232
-rw-r--r--drivers/staging/rtl8723bs/include/HalPwrSeqCmd.h111
-rw-r--r--drivers/staging/rtl8723bs/include/HalVerDef.h85
-rw-r--r--drivers/staging/rtl8723bs/include/basic_types.h201
-rw-r--r--drivers/staging/rtl8723bs/include/cmd_osdep.h18
-rw-r--r--drivers/staging/rtl8723bs/include/drv_types.h478
-rw-r--r--drivers/staging/rtl8723bs/include/drv_types_sdio.h25
-rw-r--r--drivers/staging/rtl8723bs/include/hal_btcoex.h54
-rw-r--r--drivers/staging/rtl8723bs/include/hal_com.h157
-rw-r--r--drivers/staging/rtl8723bs/include/hal_com_h2c.h28
-rw-r--r--drivers/staging/rtl8723bs/include/hal_com_phycfg.h108
-rw-r--r--drivers/staging/rtl8723bs/include/hal_com_reg.h598
-rw-r--r--drivers/staging/rtl8723bs/include/hal_data.h400
-rw-r--r--drivers/staging/rtl8723bs/include/hal_intf.h272
-rw-r--r--drivers/staging/rtl8723bs/include/hal_pg.h69
-rw-r--r--drivers/staging/rtl8723bs/include/hal_phy.h73
-rw-r--r--drivers/staging/rtl8723bs/include/hal_phy_cfg.h63
-rw-r--r--drivers/staging/rtl8723bs/include/hal_phy_reg.h17
-rw-r--r--drivers/staging/rtl8723bs/include/hal_pwr_seq.h226
-rw-r--r--drivers/staging/rtl8723bs/include/hal_sdio.h18
-rw-r--r--drivers/staging/rtl8723bs/include/ieee80211.h789
-rw-r--r--drivers/staging/rtl8723bs/include/ioctl_cfg80211.h118
-rw-r--r--drivers/staging/rtl8723bs/include/osdep_intf.h42
-rw-r--r--drivers/staging/rtl8723bs/include/osdep_service.h124
-rw-r--r--drivers/staging/rtl8723bs/include/osdep_service_linux.h121
-rw-r--r--drivers/staging/rtl8723bs/include/rtl8192c_recv.h41
-rw-r--r--drivers/staging/rtl8723bs/include/rtl8723b_cmd.h182
-rw-r--r--drivers/staging/rtl8723bs/include/rtl8723b_dm.h33
-rw-r--r--drivers/staging/rtl8723bs/include/rtl8723b_hal.h248
-rw-r--r--drivers/staging/rtl8723bs/include/rtl8723b_recv.h95
-rw-r--r--drivers/staging/rtl8723bs/include/rtl8723b_rf.h17
-rw-r--r--drivers/staging/rtl8723bs/include/rtl8723b_spec.h237
-rw-r--r--drivers/staging/rtl8723bs/include/rtl8723b_xmit.h421
-rw-r--r--drivers/staging/rtl8723bs/include/rtw_ap.h39
-rw-r--r--drivers/staging/rtl8723bs/include/rtw_btcoex.h28
-rw-r--r--drivers/staging/rtl8723bs/include/rtw_byteorder.h16
-rw-r--r--drivers/staging/rtl8723bs/include/rtw_cmd.h715
-rw-r--r--drivers/staging/rtl8723bs/include/rtw_eeprom.h118
-rw-r--r--drivers/staging/rtl8723bs/include/rtw_efuse.h99
-rw-r--r--drivers/staging/rtl8723bs/include/rtw_event.h96
-rw-r--r--drivers/staging/rtl8723bs/include/rtw_ht.h83
-rw-r--r--drivers/staging/rtl8723bs/include/rtw_io.h74
-rw-r--r--drivers/staging/rtl8723bs/include/rtw_ioctl_set.h28
-rw-r--r--drivers/staging/rtl8723bs/include/rtw_mlme.h396
-rw-r--r--drivers/staging/rtl8723bs/include/rtw_mlme_ext.h717
-rw-r--r--drivers/staging/rtl8723bs/include/rtw_pwrctrl.h254
-rw-r--r--drivers/staging/rtl8723bs/include/rtw_qos.h19
-rw-r--r--drivers/staging/rtl8723bs/include/rtw_recv.h457
-rw-r--r--drivers/staging/rtl8723bs/include/rtw_rf.h100
-rw-r--r--drivers/staging/rtl8723bs/include/rtw_security.h272
-rw-r--r--drivers/staging/rtl8723bs/include/rtw_version.h3
-rw-r--r--drivers/staging/rtl8723bs/include/rtw_wifi_regd.h17
-rw-r--r--drivers/staging/rtl8723bs/include/rtw_xmit.h491
-rw-r--r--drivers/staging/rtl8723bs/include/sdio_hal.h14
-rw-r--r--drivers/staging/rtl8723bs/include/sdio_ops.h34
-rw-r--r--drivers/staging/rtl8723bs/include/sdio_ops_linux.h30
-rw-r--r--drivers/staging/rtl8723bs/include/sta_info.h330
-rw-r--r--drivers/staging/rtl8723bs/include/wifi.h464
-rw-r--r--drivers/staging/rtl8723bs/include/wlan_bssdef.h216
-rw-r--r--drivers/staging/rtl8723bs/include/xmit_osdep.h44
-rw-r--r--drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c2809
-rw-r--r--drivers/staging/rtl8723bs/os_dep/os_intfs.c1210
-rw-r--r--drivers/staging/rtl8723bs/os_dep/osdep_service.c234
-rw-r--r--drivers/staging/rtl8723bs/os_dep/sdio_intf.c494
-rw-r--r--drivers/staging/rtl8723bs/os_dep/sdio_ops_linux.c495
-rw-r--r--drivers/staging/rtl8723bs/os_dep/wifi_regd.c135
-rw-r--r--drivers/staging/rtl8723bs/os_dep/xmit_linux.c224
-rw-r--r--drivers/staging/rts5208/Kconfig8
-rw-r--r--drivers/staging/rts5208/Makefile6
-rw-r--r--drivers/staging/rts5208/TODO7
-rw-r--r--drivers/staging/rts5208/general.c36
-rw-r--r--drivers/staging/rts5208/general.h31
-rw-r--r--drivers/staging/rts5208/ms.c4809
-rw-r--r--drivers/staging/rts5208/ms.h227
-rw-r--r--drivers/staging/rts5208/rtsx.c1044
-rw-r--r--drivers/staging/rts5208/rtsx.h191
-rw-r--r--drivers/staging/rts5208/rtsx_card.c1235
-rw-r--r--drivers/staging/rts5208/rtsx_card.h1103
-rw-r--r--drivers/staging/rts5208/rtsx_chip.c2475
-rw-r--r--drivers/staging/rts5208/rtsx_chip.h991
-rw-r--r--drivers/staging/rts5208/rtsx_scsi.c3548
-rw-r--r--drivers/staging/rts5208/rtsx_scsi.h143
-rw-r--r--drivers/staging/rts5208/rtsx_sys.h50
-rw-r--r--drivers/staging/rts5208/rtsx_transport.c775
-rw-r--r--drivers/staging/rts5208/rtsx_transport.h66
-rw-r--r--drivers/staging/rts5208/sd.c5313
-rw-r--r--drivers/staging/rts5208/sd.h301
-rw-r--r--drivers/staging/rts5208/spi.c1036
-rw-r--r--drivers/staging/rts5208/spi.h65
-rw-r--r--drivers/staging/rts5208/trace.c26
-rw-r--r--drivers/staging/rts5208/trace.h40
-rw-r--r--drivers/staging/rts5208/xd.c2344
-rw-r--r--drivers/staging/rts5208/xd.h188
-rw-r--r--drivers/staging/skein/Kconfig16
-rw-r--r--drivers/staging/skein/Makefile10
-rw-r--r--drivers/staging/skein/TODO8
-rw-r--r--drivers/staging/skein/skein_api.c239
-rw-r--r--drivers/staging/skein/skein_api.h230
-rw-r--r--drivers/staging/skein/skein_base.c864
-rw-r--r--drivers/staging/skein/skein_base.h335
-rw-r--r--drivers/staging/skein/skein_block.c782
-rw-r--r--drivers/staging/skein/skein_block.h22
-rw-r--r--drivers/staging/skein/skein_generic.c215
-rw-r--r--drivers/staging/skein/skein_iv.h186
-rw-r--r--drivers/staging/skein/threefish_api.c77
-rw-r--r--drivers/staging/skein/threefish_api.h170
-rw-r--r--drivers/staging/skein/threefish_block.c8258
-rw-r--r--drivers/staging/slicoss/Kconfig14
-rw-r--r--drivers/staging/slicoss/Makefile1
-rw-r--r--drivers/staging/slicoss/README7
-rw-r--r--drivers/staging/slicoss/TODO36
-rw-r--r--drivers/staging/slicoss/slic.h519
-rw-r--r--drivers/staging/slicoss/slichw.h827
-rw-r--r--drivers/staging/slicoss/slicoss.c3185
-rw-r--r--drivers/staging/sm750fb/Kconfig7
-rw-r--r--drivers/staging/sm750fb/Makefile6
-rw-r--r--drivers/staging/sm750fb/TODO15
-rw-r--r--drivers/staging/sm750fb/ddk750.h24
-rw-r--r--drivers/staging/sm750fb/ddk750_chip.c568
-rw-r--r--drivers/staging/sm750fb/ddk750_chip.h122
-rw-r--r--drivers/staging/sm750fb/ddk750_display.c337
-rw-r--r--drivers/staging/sm750fb/ddk750_display.h142
-rw-r--r--drivers/staging/sm750fb/ddk750_dvi.c96
-rw-r--r--drivers/staging/sm750fb/ddk750_dvi.h62
-rw-r--r--drivers/staging/sm750fb/ddk750_help.c17
-rw-r--r--drivers/staging/sm750fb/ddk750_help.h22
-rw-r--r--drivers/staging/sm750fb/ddk750_hwi2c.c262
-rw-r--r--drivers/staging/sm750fb/ddk750_hwi2c.h11
-rw-r--r--drivers/staging/sm750fb/ddk750_mode.c292
-rw-r--r--drivers/staging/sm750fb/ddk750_mode.h22
-rw-r--r--drivers/staging/sm750fb/ddk750_power.c201
-rw-r--r--drivers/staging/sm750fb/ddk750_power.h65
-rw-r--r--drivers/staging/sm750fb/ddk750_reg.h3007
-rw-r--r--drivers/staging/sm750fb/ddk750_sii164.c409
-rw-r--r--drivers/staging/sm750fb/ddk750_sii164.h171
-rw-r--r--drivers/staging/sm750fb/ddk750_swi2c.c98
-rw-r--r--drivers/staging/sm750fb/ddk750_swi2c.h40
-rw-r--r--drivers/staging/sm750fb/modedb.h233
-rw-r--r--drivers/staging/sm750fb/sm750.c1032
-rw-r--r--drivers/staging/sm750fb/sm750.h215
-rw-r--r--drivers/staging/sm750fb/sm750_accel.c409
-rw-r--r--drivers/staging/sm750fb/sm750_accel.h370
-rw-r--r--drivers/staging/sm750fb/sm750_cursor.c120
-rw-r--r--drivers/staging/sm750fb/sm750_cursor.h22
-rw-r--r--drivers/staging/sm750fb/sm750_help.h89
-rw-r--r--drivers/staging/sm750fb/sm750_hw.c582
-rw-r--r--drivers/staging/sm750fb/sm750_hw.h101
-rw-r--r--drivers/staging/speakup/DefaultKeyAssignments46
-rw-r--r--drivers/staging/speakup/Kconfig199
-rw-r--r--drivers/staging/speakup/Makefile30
-rw-r--r--drivers/staging/speakup/TODO47
-rw-r--r--drivers/staging/speakup/buffers.c106
-rw-r--r--drivers/staging/speakup/devsynth.c95
-rw-r--r--drivers/staging/speakup/fakekey.c99
-rw-r--r--drivers/staging/speakup/i18n.c630
-rw-r--r--drivers/staging/speakup/i18n.h234
-rw-r--r--drivers/staging/speakup/keyhelp.c224
-rw-r--r--drivers/staging/speakup/kobjects.c1040
-rw-r--r--drivers/staging/speakup/main.c2411
-rw-r--r--drivers/staging/speakup/selection.c187
-rw-r--r--drivers/staging/speakup/serialio.c222
-rw-r--r--drivers/staging/speakup/serialio.h40
-rw-r--r--drivers/staging/speakup/speakup.h121
-rw-r--r--drivers/staging/speakup/speakup_acnt.h18
-rw-r--r--drivers/staging/speakup/speakup_acntpc.c328
-rw-r--r--drivers/staging/speakup/speakup_acntsa.c153
-rw-r--r--drivers/staging/speakup/speakup_apollo.c217
-rw-r--r--drivers/staging/speakup/speakup_audptr.c187
-rw-r--r--drivers/staging/speakup/speakup_bns.c137
-rw-r--r--drivers/staging/speakup/speakup_decext.c246
-rw-r--r--drivers/staging/speakup/speakup_decpc.c503
-rw-r--r--drivers/staging/speakup/speakup_dectlk.c315
-rw-r--r--drivers/staging/speakup/speakup_dtlk.c398
-rw-r--r--drivers/staging/speakup/speakup_dtlk.h62
-rw-r--r--drivers/staging/speakup/speakup_dummy.c138
-rw-r--r--drivers/staging/speakup/speakup_keypc.c328
-rw-r--r--drivers/staging/speakup/speakup_ltlk.c185
-rw-r--r--drivers/staging/speakup/speakup_soft.c358
-rw-r--r--drivers/staging/speakup/speakup_spkout.c156
-rw-r--r--drivers/staging/speakup/speakup_txprt.c137
-rw-r--r--drivers/staging/speakup/speakupmap.h65
-rw-r--r--drivers/staging/speakup/speakupmap.map93
-rw-r--r--drivers/staging/speakup/spk_priv.h80
-rw-r--r--drivers/staging/speakup/spk_priv_keyinfo.h110
-rw-r--r--drivers/staging/speakup/spk_types.h205
-rw-r--r--drivers/staging/speakup/spkguide.txt1575
-rw-r--r--drivers/staging/speakup/synth.c485
-rw-r--r--drivers/staging/speakup/thread.c60
-rw-r--r--drivers/staging/speakup/varhandlers.c333
-rw-r--r--drivers/staging/staging.c19
-rw-r--r--drivers/staging/ste_rmi4/Kconfig9
-rw-r--r--drivers/staging/ste_rmi4/Makefile4
-rw-r--r--drivers/staging/ste_rmi4/TODO7
-rw-r--r--drivers/staging/ste_rmi4/synaptics_i2c_rmi4.c1140
-rw-r--r--drivers/staging/ste_rmi4/synaptics_i2c_rmi4.h46
-rw-r--r--drivers/staging/unisys/Documentation/ABI/sysfs-platform-visorchipset103
-rw-r--r--drivers/staging/unisys/Documentation/overview.txt356
-rw-r--r--drivers/staging/unisys/Documentation/proc-entries.txt93
-rw-r--r--drivers/staging/unisys/Kconfig19
-rw-r--r--drivers/staging/unisys/MAINTAINERS6
-rw-r--r--drivers/staging/unisys/Makefile7
-rw-r--r--drivers/staging/unisys/TODO16
-rw-r--r--drivers/staging/unisys/include/channel.h555
-rw-r--r--drivers/staging/unisys/include/channel_guid.h55
-rw-r--r--drivers/staging/unisys/include/diagchannel.h43
-rw-r--r--drivers/staging/unisys/include/guestlinuxdebug.h179
-rw-r--r--drivers/staging/unisys/include/iochannel.h644
-rw-r--r--drivers/staging/unisys/include/periodic_work.h40
-rw-r--r--drivers/staging/unisys/include/vbushelper.h45
-rw-r--r--drivers/staging/unisys/include/version.h45
-rw-r--r--drivers/staging/unisys/include/visorbus.h225
-rw-r--r--drivers/staging/unisys/visorbus/Kconfig9
-rw-r--r--drivers/staging/unisys/visorbus/Makefile12
-rw-r--r--drivers/staging/unisys/visorbus/controlvmchannel.h484
-rw-r--r--drivers/staging/unisys/visorbus/controlvmcompletionstatus.h93
-rw-r--r--drivers/staging/unisys/visorbus/iovmcall_gnuc.h48
-rw-r--r--drivers/staging/unisys/visorbus/periodic_work.c203
-rw-r--r--drivers/staging/unisys/visorbus/vbuschannel.h93
-rw-r--r--drivers/staging/unisys/visorbus/vbusdeviceinfo.h212
-rw-r--r--drivers/staging/unisys/visorbus/visorbus_main.c1560
-rw-r--r--drivers/staging/unisys/visorbus/visorbus_private.h68
-rw-r--r--drivers/staging/unisys/visorbus/visorchannel.c638
-rw-r--r--drivers/staging/unisys/visorbus/visorchipset.c2442
-rw-r--r--drivers/staging/unisys/visorbus/vmcallinterface.h148
-rw-r--r--drivers/staging/unisys/visorhba/Kconfig14
-rw-r--r--drivers/staging/unisys/visorhba/Makefile10
-rw-r--r--drivers/staging/unisys/visorhba/visorhba_main.c1241
-rw-r--r--drivers/staging/unisys/visorhid/Kconfig10
-rw-r--r--drivers/staging/unisys/visorhid/Makefile7
-rw-r--r--drivers/staging/unisys/visorhid/keyboardchannel.h32
-rw-r--r--drivers/staging/unisys/visorhid/mousechannel.h33
-rw-r--r--drivers/staging/unisys/visorhid/ultrainputreport.h74
-rw-r--r--drivers/staging/unisys/visorhid/visorhid.c668
-rw-r--r--drivers/staging/unisys/visornic/Kconfig15
-rw-r--r--drivers/staging/unisys/visornic/Makefile10
-rw-r--r--drivers/staging/unisys/visornic/visornic_main.c2174
-rw-r--r--drivers/staging/vc04_services/Kconfig7
-rw-r--r--drivers/staging/vc04_services/Makefile3
-rw-r--r--drivers/staging/vc04_services/bcm2835-audio/Kconfig11
-rw-r--r--drivers/staging/vc04_services/bcm2835-audio/Makefile3
-rw-r--r--drivers/staging/vc04_services/bcm2835-audio/bcm2835-ctl.c238
-rw-r--r--drivers/staging/vc04_services/bcm2835-audio/bcm2835-pcm.c355
-rw-r--r--drivers/staging/vc04_services/bcm2835-audio/bcm2835-vchiq.c383
-rw-r--r--drivers/staging/vc04_services/bcm2835-audio/bcm2835.c336
-rw-r--r--drivers/staging/vc04_services/bcm2835-audio/bcm2835.h112
-rw-r--r--drivers/staging/vc04_services/bcm2835-audio/vc_vchi_audioserv_defs.h98
-rw-r--r--drivers/staging/vme/Makefile1
-rw-r--r--drivers/staging/vme/devices/Kconfig25
-rw-r--r--drivers/staging/vme/devices/Makefile8
-rw-r--r--drivers/staging/vme/devices/vme_pio2.h249
-rw-r--r--drivers/staging/vme/devices/vme_pio2_cntr.c71
-rw-r--r--drivers/staging/vme/devices/vme_pio2_core.c505
-rw-r--r--drivers/staging/vme/devices/vme_pio2_gpio.c226
-rw-r--r--drivers/staging/vme_user/Kconfig53
-rw-r--r--drivers/staging/vme_user/Makefile9
-rw-r--r--drivers/staging/vme_user/vme.c1977
-rw-r--r--drivers/staging/vme_user/vme.h189
-rw-r--r--drivers/staging/vme_user/vme_bridge.h190
-rw-r--r--drivers/staging/vme_user/vme_fake.c1298
-rw-r--r--drivers/staging/vme_user/vme_tsi148.c2636
-rw-r--r--drivers/staging/vme_user/vme_tsi148.h1392
-rw-r--r--drivers/staging/vme_user/vme_user.c (renamed from drivers/staging/vme/devices/vme_user.c)122
-rw-r--r--drivers/staging/vme_user/vme_user.h (renamed from drivers/staging/vme/devices/vme_user.h)3
-rw-r--r--drivers/staging/vt6655/Kconfig6
-rw-r--r--drivers/staging/vt6655/Makefile17
-rw-r--r--drivers/staging/vt6655/TODO21
-rw-r--r--drivers/staging/vt6655/baseband.c2393
-rw-r--r--drivers/staging/vt6655/baseband.h96
-rw-r--r--drivers/staging/vt6655/card.c1016
-rw-r--r--drivers/staging/vt6655/card.h89
-rw-r--r--drivers/staging/vt6655/channel.c237
-rw-r--r--drivers/staging/vt6655/channel.h32
-rw-r--r--drivers/staging/vt6655/desc.h263
-rw-r--r--drivers/staging/vt6655/device.h406
-rw-r--r--drivers/staging/vt6655/device_cfg.h72
-rw-r--r--drivers/staging/vt6655/device_main.c1731
-rw-r--r--drivers/staging/vt6655/dpc.c162
-rw-r--r--drivers/staging/vt6655/dpc.h36
-rw-r--r--drivers/staging/vt6655/key.c168
-rw-r--r--drivers/staging/vt6655/key.h69
-rw-r--r--drivers/staging/vt6655/mac.c892
-rw-r--r--drivers/staging/vt6655/mac.h946
-rw-r--r--drivers/staging/vt6655/power.c175
-rw-r--r--drivers/staging/vt6655/power.h53
-rw-r--r--drivers/staging/vt6655/rf.c965
-rw-r--r--drivers/staging/vt6655/rf.h100
-rw-r--r--drivers/staging/vt6655/rxtx.c1519
-rw-r--r--drivers/staging/vt6655/rxtx.h200
-rw-r--r--drivers/staging/vt6655/srom.c153
-rw-r--r--drivers/staging/vt6655/srom.h101
-rw-r--r--drivers/staging/vt6655/test9
-rw-r--r--drivers/staging/vt6655/tmacro.h58
-rw-r--r--drivers/staging/vt6655/upc.h77
-rw-r--r--drivers/staging/vt6656/Kconfig7
-rw-r--r--drivers/staging/vt6656/Makefile20
-rw-r--r--drivers/staging/vt6656/TODO19
-rw-r--r--drivers/staging/vt6656/baseband.c831
-rw-r--r--drivers/staging/vt6656/baseband.h104
-rw-r--r--drivers/staging/vt6656/card.c820
-rw-r--r--drivers/staging/vt6656/card.h58
-rw-r--r--drivers/staging/vt6656/channel.c178
-rw-r--r--drivers/staging/vt6656/channel.h37
-rw-r--r--drivers/staging/vt6656/desc.h105
-rw-r--r--drivers/staging/vt6656/device.h409
-rw-r--r--drivers/staging/vt6656/dpc.c186
-rw-r--r--drivers/staging/vt6656/dpc.h37
-rw-r--r--drivers/staging/vt6656/firmware.c141
-rw-r--r--drivers/staging/vt6656/firmware.h39
-rw-r--r--drivers/staging/vt6656/int.c174
-rw-r--r--drivers/staging/vt6656/int.h61
-rw-r--r--drivers/staging/vt6656/key.c179
-rw-r--r--drivers/staging/vt6656/key.h55
-rw-r--r--drivers/staging/vt6656/mac.c248
-rw-r--r--drivers/staging/vt6656/mac.h387
-rw-r--r--drivers/staging/vt6656/main_usb.c1066
-rw-r--r--drivers/staging/vt6656/power.c144
-rw-r--r--drivers/staging/vt6656/power.h38
-rw-r--r--drivers/staging/vt6656/rf.c951
-rw-r--r--drivers/staging/vt6656/rf.h63
-rw-r--r--drivers/staging/vt6656/rxtx.c1118
-rw-r--r--drivers/staging/vt6656/rxtx.h260
-rw-r--r--drivers/staging/vt6656/usbpipe.c302
-rw-r--r--drivers/staging/vt6656/usbpipe.h45
-rw-r--r--drivers/staging/vt6656/wcmd.c194
-rw-r--r--drivers/staging/vt6656/wcmd.h63
-rw-r--r--drivers/staging/wilc1000/Kconfig70
-rw-r--r--drivers/staging/wilc1000/Makefile29
-rw-r--r--drivers/staging/wilc1000/TODO14
-rw-r--r--drivers/staging/wilc1000/coreconfigurator.c724
-rw-r--r--drivers/staging/wilc1000/coreconfigurator.h166
-rw-r--r--drivers/staging/wilc1000/host_interface.c7622
-rw-r--r--drivers/staging/wilc1000/host_interface.h1250
-rw-r--r--drivers/staging/wilc1000/linux_mon.c418
-rw-r--r--drivers/staging/wilc1000/linux_wlan.c1998
-rw-r--r--drivers/staging/wilc1000/linux_wlan_common.h170
-rw-r--r--drivers/staging/wilc1000/linux_wlan_sdio.c244
-rw-r--r--drivers/staging/wilc1000/linux_wlan_sdio.h14
-rw-r--r--drivers/staging/wilc1000/linux_wlan_spi.c408
-rw-r--r--drivers/staging/wilc1000/linux_wlan_spi.h14
-rw-r--r--drivers/staging/wilc1000/wilc_debugfs.c181
-rw-r--r--drivers/staging/wilc1000/wilc_msgqueue.c187
-rw-r--r--drivers/staging/wilc1000/wilc_msgqueue.h94
-rw-r--r--drivers/staging/wilc1000/wilc_oswrapper.h18
-rw-r--r--drivers/staging/wilc1000/wilc_platform.h28
-rw-r--r--drivers/staging/wilc1000/wilc_sdio.c1035
-rw-r--r--drivers/staging/wilc1000/wilc_spi.c1400
-rw-r--r--drivers/staging/wilc1000/wilc_wfi_cfgoperations.c3686
-rw-r--r--drivers/staging/wilc1000/wilc_wfi_cfgoperations.h109
-rw-r--r--drivers/staging/wilc1000/wilc_wfi_netdevice.h225
-rw-r--r--drivers/staging/wilc1000/wilc_wlan.c2091
-rw-r--r--drivers/staging/wilc1000/wilc_wlan.h309
-rw-r--r--drivers/staging/wilc1000/wilc_wlan_cfg.c573
-rw-r--r--drivers/staging/wilc1000/wilc_wlan_cfg.h33
-rw-r--r--drivers/staging/wilc1000/wilc_wlan_if.h967
-rw-r--r--drivers/staging/wlan-ng/Kconfig12
-rw-r--r--drivers/staging/wlan-ng/Makefile7
-rw-r--r--drivers/staging/wlan-ng/README8
-rw-r--r--drivers/staging/wlan-ng/cfg80211.c796
-rw-r--r--drivers/staging/wlan-ng/hfa384x.h1431
-rw-r--r--drivers/staging/wlan-ng/hfa384x_usb.c4127
-rw-r--r--drivers/staging/wlan-ng/p80211conv.c663
-rw-r--r--drivers/staging/wlan-ng/p80211conv.h160
-rw-r--r--drivers/staging/wlan-ng/p80211hdr.h213
-rw-r--r--drivers/staging/wlan-ng/p80211ioctl.h89
-rw-r--r--drivers/staging/wlan-ng/p80211meta.h90
-rw-r--r--drivers/staging/wlan-ng/p80211metadef.h261
-rw-r--r--drivers/staging/wlan-ng/p80211metastruct.h271
-rw-r--r--drivers/staging/wlan-ng/p80211mgmt.h520
-rw-r--r--drivers/staging/wlan-ng/p80211msg.h59
-rw-r--r--drivers/staging/wlan-ng/p80211netdev.c1085
-rw-r--r--drivers/staging/wlan-ng/p80211netdev.h242
-rw-r--r--drivers/staging/wlan-ng/p80211req.c250
-rw-r--r--drivers/staging/wlan-ng/p80211req.h53
-rw-r--r--drivers/staging/wlan-ng/p80211types.h375
-rw-r--r--drivers/staging/wlan-ng/p80211wep.c287
-rw-r--r--drivers/staging/wlan-ng/prism2fw.c1218
-rw-r--r--drivers/staging/wlan-ng/prism2mgmt.c1322
-rw-r--r--drivers/staging/wlan-ng/prism2mgmt.h117
-rw-r--r--drivers/staging/wlan-ng/prism2mib.c825
-rw-r--r--drivers/staging/wlan-ng/prism2sta.c2023
-rw-r--r--drivers/staging/wlan-ng/prism2usb.c299
-rw-r--r--drivers/staging/xgifb/Kconfig11
-rw-r--r--drivers/staging/xgifb/Makefile4
-rw-r--r--drivers/staging/xgifb/TODO13
-rw-r--r--drivers/staging/xgifb/XGI_main.h377
-rw-r--r--drivers/staging/xgifb/XGI_main_26.c2112
-rw-r--r--drivers/staging/xgifb/XGIfb.h108
-rw-r--r--drivers/staging/xgifb/vb_def.h257
-rw-r--r--drivers/staging/xgifb/vb_init.c1367
-rw-r--r--drivers/staging/xgifb/vb_init.h6
-rw-r--r--drivers/staging/xgifb/vb_setmode.c5547
-rw-r--r--drivers/staging/xgifb/vb_setmode.h23
-rw-r--r--drivers/staging/xgifb/vb_struct.h166
-rw-r--r--drivers/staging/xgifb/vb_table.h2491
-rw-r--r--drivers/staging/xgifb/vb_util.h43
-rw-r--r--drivers/staging/xgifb/vgatypes.h49
2950 files changed, 300606 insertions, 941803 deletions
diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig
index 39d950584c9f..2f92cd698bef 100644
--- a/drivers/staging/Kconfig
+++ b/drivers/staging/Kconfig
@@ -1,7 +1,7 @@
+# SPDX-License-Identifier: GPL-2.0
menuconfig STAGING
bool "Staging drivers"
- default n
- ---help---
+ help
This option allows you to select a number of drivers that are
not of the "normal" Linux kernel quality level. These drivers
are placed here in order to get a wider audience to make use of
@@ -16,100 +16,36 @@ menuconfig STAGING
If you wish to work on these drivers, to help improve them, or
to report problems you have with them, please see the
- driver_name.README file in the drivers/staging/ directory to
- see what needs to be worked on, and who to contact.
+ drivers/staging/<driver_name>/TODO file to see what needs to be
+ worked on, and who to contact.
If in doubt, say N here.
if STAGING
-source "drivers/staging/slicoss/Kconfig"
-
-source "drivers/staging/wlan-ng/Kconfig"
-
-source "drivers/staging/comedi/Kconfig"
-
-source "drivers/staging/olpc_dcon/Kconfig"
-
-source "drivers/staging/panel/Kconfig"
-
-source "drivers/staging/rtl8192u/Kconfig"
-
-source "drivers/staging/rtl8192e/Kconfig"
-
-source "drivers/staging/rtl8712/Kconfig"
-
-source "drivers/staging/rtl8188eu/Kconfig"
-
-source "drivers/staging/rtl8723au/Kconfig"
-
-source "drivers/staging/rts5208/Kconfig"
+source "drivers/staging/rtl8723bs/Kconfig"
source "drivers/staging/octeon/Kconfig"
-source "drivers/staging/octeon-usb/Kconfig"
-
-source "drivers/staging/vt6655/Kconfig"
-
-source "drivers/staging/vt6656/Kconfig"
-
source "drivers/staging/iio/Kconfig"
source "drivers/staging/sm750fb/Kconfig"
-source "drivers/staging/xgifb/Kconfig"
-
-source "drivers/staging/emxx_udc/Kconfig"
-
-source "drivers/staging/ft1000/Kconfig"
-
-source "drivers/staging/speakup/Kconfig"
-
-source "drivers/staging/ste_rmi4/Kconfig"
-
source "drivers/staging/nvec/Kconfig"
source "drivers/staging/media/Kconfig"
-source "drivers/staging/rdma/Kconfig"
-
-source "drivers/staging/android/Kconfig"
-
-source "drivers/staging/board/Kconfig"
-
-source "drivers/staging/gdm72xx/Kconfig"
-
-source "drivers/staging/gdm724x/Kconfig"
-
-source "drivers/staging/fwserial/Kconfig"
-
-source "drivers/staging/goldfish/Kconfig"
-
-source "drivers/staging/netlogic/Kconfig"
-
-source "drivers/staging/mt29f_spinand/Kconfig"
-
-source "drivers/staging/lustre/Kconfig"
-
-source "drivers/staging/dgnc/Kconfig"
-
-source "drivers/staging/dgap/Kconfig"
-
-source "drivers/staging/gs_fpgaboot/Kconfig"
-
-source "drivers/staging/skein/Kconfig"
-
-source "drivers/staging/unisys/Kconfig"
+source "drivers/staging/fbtft/Kconfig"
-source "drivers/staging/clocking-wizard/Kconfig"
+source "drivers/staging/most/Kconfig"
-source "drivers/staging/fbtft/Kconfig"
+source "drivers/staging/greybus/Kconfig"
-source "drivers/staging/fsl-mc/Kconfig"
+source "drivers/staging/vc04_services/Kconfig"
-source "drivers/staging/wilc1000/Kconfig"
+source "drivers/staging/axis-fifo/Kconfig"
-source "drivers/staging/most/Kconfig"
+source "drivers/staging/vme_user/Kconfig"
endif # STAGING
diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile
index e4f33d91872b..f5b8876aa536 100644
--- a/drivers/staging/Makefile
+++ b/drivers/staging/Makefile
@@ -1,50 +1,15 @@
+# SPDX-License-Identifier: GPL-2.0
# Makefile for staging directory
-# fix for build system bug...
-obj-$(CONFIG_STAGING) += staging.o
-
obj-y += media/
-obj-$(CONFIG_SLICOSS) += slicoss/
-obj-$(CONFIG_PRISM2_USB) += wlan-ng/
-obj-$(CONFIG_COMEDI) += comedi/
-obj-$(CONFIG_FB_OLPC_DCON) += olpc_dcon/
-obj-$(CONFIG_PANEL) += panel/
-obj-$(CONFIG_RTL8192U) += rtl8192u/
-obj-$(CONFIG_RTL8192E) += rtl8192e/
-obj-$(CONFIG_R8712U) += rtl8712/
-obj-$(CONFIG_R8188EU) += rtl8188eu/
-obj-$(CONFIG_R8723AU) += rtl8723au/
-obj-$(CONFIG_RTS5208) += rts5208/
-obj-$(CONFIG_NETLOGIC_XLR_NET) += netlogic/
+obj-$(CONFIG_RTL8723BS) += rtl8723bs/
obj-$(CONFIG_OCTEON_ETHERNET) += octeon/
-obj-$(CONFIG_OCTEON_USB) += octeon-usb/
-obj-$(CONFIG_VT6655) += vt6655/
-obj-$(CONFIG_VT6656) += vt6656/
-obj-$(CONFIG_VME_BUS) += vme/
+obj-$(CONFIG_VME_BUS) += vme_user/
obj-$(CONFIG_IIO) += iio/
obj-$(CONFIG_FB_SM750) += sm750fb/
-obj-$(CONFIG_FB_XGI) += xgifb/
-obj-$(CONFIG_USB_EMXX) += emxx_udc/
-obj-$(CONFIG_FT1000) += ft1000/
-obj-$(CONFIG_SPEAKUP) += speakup/
-obj-$(CONFIG_TOUCHSCREEN_SYNAPTICS_I2C_RMI4) += ste_rmi4/
obj-$(CONFIG_MFD_NVEC) += nvec/
-obj-$(CONFIG_STAGING_RDMA) += rdma/
-obj-$(CONFIG_ANDROID) += android/
-obj-$(CONFIG_STAGING_BOARD) += board/
-obj-$(CONFIG_WIMAX_GDM72XX) += gdm72xx/
-obj-$(CONFIG_LTE_GDM724X) += gdm724x/
-obj-$(CONFIG_FIREWIRE_SERIAL) += fwserial/
-obj-$(CONFIG_GOLDFISH) += goldfish/
-obj-$(CONFIG_LUSTRE_FS) += lustre/
-obj-$(CONFIG_DGNC) += dgnc/
-obj-$(CONFIG_DGAP) += dgap/
-obj-$(CONFIG_MTD_SPINAND_MT29F) += mt29f_spinand/
-obj-$(CONFIG_GS_FPGABOOT) += gs_fpgaboot/
-obj-$(CONFIG_CRYPTO_SKEIN) += skein/
-obj-$(CONFIG_UNISYSSPAR) += unisys/
-obj-$(CONFIG_COMMON_CLK_XLNX_CLKWZRD) += clocking-wizard/
obj-$(CONFIG_FB_TFT) += fbtft/
-obj-$(CONFIG_FSL_MC_BUS) += fsl-mc/
-obj-$(CONFIG_WILC1000) += wilc1000/
obj-$(CONFIG_MOST) += most/
+obj-$(CONFIG_GREYBUS) += greybus/
+obj-$(CONFIG_BCM2835_VCHIQ) += vc04_services/
+obj-$(CONFIG_XIL_AXIS_FIFO) += axis-fifo/
diff --git a/drivers/staging/android/Kconfig b/drivers/staging/android/Kconfig
deleted file mode 100644
index 42b15126aa06..000000000000
--- a/drivers/staging/android/Kconfig
+++ /dev/null
@@ -1,73 +0,0 @@
-menu "Android"
-
-if ANDROID
-
-config ASHMEM
- bool "Enable the Anonymous Shared Memory Subsystem"
- default n
- depends on SHMEM
- ---help---
- The ashmem subsystem is a new shared memory allocator, similar to
- POSIX SHM but with different behavior and sporting a simpler
- file-based API.
-
- It is, in theory, a good memory allocator for low-memory devices,
- because it can discard shared memory units when under memory pressure.
-
-config ANDROID_TIMED_OUTPUT
- bool "Timed output class driver"
- default y
-
-config ANDROID_TIMED_GPIO
- tristate "Android timed gpio driver"
- depends on GPIOLIB || COMPILE_TEST
- depends on ANDROID_TIMED_OUTPUT
- default n
- ---help---
- Unlike generic gpio is to allow programs to access and manipulate gpio
- registers from user space, timed output/gpio is a system to allow changing
- a gpio pin and restore it automatically after a specified timeout.
-
-config ANDROID_LOW_MEMORY_KILLER
- bool "Android Low Memory Killer"
- ---help---
- Registers processes to be killed when low memory conditions, this is useful
- as there is no particular swap space on android.
-
- The registered process will kills according to the priorities in android init
- scripts (/init.rc), and it defines priority values with minimum free memory size
- for each priority.
-
-config SYNC
- bool "Synchronization framework"
- default n
- select ANON_INODES
- select DMA_SHARED_BUFFER
- ---help---
- This option enables the framework for synchronization between multiple
- drivers. Sync implementations can take advantage of hardware
- synchronization built into devices like GPUs.
-
-config SW_SYNC
- bool "Software synchronization objects"
- default n
- depends on SYNC
- ---help---
- A sync object driver that uses a 32bit counter to coordinate
- synchronization. Useful when there is no hardware primitive backing
- the synchronization.
-
-config SW_SYNC_USER
- bool "Userspace API for SW_SYNC"
- default n
- depends on SW_SYNC
- ---help---
- Provides a user space API to the sw sync object.
- *WARNING* improper use of this can result in deadlocking kernel
- drivers from userspace.
-
-source "drivers/staging/android/ion/Kconfig"
-
-endif # if ANDROID
-
-endmenu
diff --git a/drivers/staging/android/Makefile b/drivers/staging/android/Makefile
deleted file mode 100644
index c7b6c99cc5ce..000000000000
--- a/drivers/staging/android/Makefile
+++ /dev/null
@@ -1,10 +0,0 @@
-ccflags-y += -I$(src) # needed for trace events
-
-obj-y += ion/
-
-obj-$(CONFIG_ASHMEM) += ashmem.o
-obj-$(CONFIG_ANDROID_TIMED_OUTPUT) += timed_output.o
-obj-$(CONFIG_ANDROID_TIMED_GPIO) += timed_gpio.o
-obj-$(CONFIG_ANDROID_LOW_MEMORY_KILLER) += lowmemorykiller.o
-obj-$(CONFIG_SYNC) += sync.o sync_debug.o
-obj-$(CONFIG_SW_SYNC) += sw_sync.o
diff --git a/drivers/staging/android/TODO b/drivers/staging/android/TODO
deleted file mode 100644
index 8f3ac37bfe12..000000000000
--- a/drivers/staging/android/TODO
+++ /dev/null
@@ -1,29 +0,0 @@
-TODO:
- - checkpatch.pl cleanups
- - sparse fixes
- - rename files to be not so "generic"
- - add proper arch dependencies as needed
- - audit userspace interfaces to make sure they are sane
-
-
-ion/
- - Remove ION_IOC_SYNC: Flushing for devices should be purely a kernel internal
- interface on top of dma-buf. flush_for_device needs to be added to dma-buf
- first.
- - Remove ION_IOC_CUSTOM: Atm used for cache flushing for cpu access in some
- vendor trees. Should be replaced with an ioctl on the dma-buf to expose the
- begin/end_cpu_access hooks to userspace.
- - Clarify the tricks ion plays with explicitly managing coherency behind the
- dma api's back (this is absolutely needed for high-perf gpu drivers): Add an
- explicit coherency management mode to flush_for_device to be used by drivers
- which want to manage caches themselves and which indicates whether cpu caches
- need flushing.
- - With those removed there's probably no use for ION_IOC_IMPORT anymore either
- since ion would just be the central allocator for shared buffers.
- - Add dt-binding to expose cma regions as ion heaps, with the rule that any
- such cma regions must already be used by some device for dma. I.e. ion only
- exposes existing cma regions and doesn't reserve unecessarily memory when
- booting a system which doesn't use ion.
-
-Please send patches to Greg Kroah-Hartman <greg@kroah.com> and Cc:
-Arve Hjønnevåg <arve@android.com> and Riley Andrews <riandrews@android.com>
diff --git a/drivers/staging/android/ashmem.c b/drivers/staging/android/ashmem.c
deleted file mode 100644
index f9318003bd0b..000000000000
--- a/drivers/staging/android/ashmem.c
+++ /dev/null
@@ -1,878 +0,0 @@
-/* mm/ashmem.c
- *
- * Anonymous Shared Memory Subsystem, ashmem
- *
- * Copyright (C) 2008 Google, Inc.
- *
- * Robert Love <rlove@google.com>
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#define pr_fmt(fmt) "ashmem: " fmt
-
-#include <linux/module.h>
-#include <linux/file.h>
-#include <linux/fs.h>
-#include <linux/falloc.h>
-#include <linux/miscdevice.h>
-#include <linux/security.h>
-#include <linux/mm.h>
-#include <linux/mman.h>
-#include <linux/uaccess.h>
-#include <linux/personality.h>
-#include <linux/bitops.h>
-#include <linux/mutex.h>
-#include <linux/shmem_fs.h>
-#include "ashmem.h"
-
-#define ASHMEM_NAME_PREFIX "dev/ashmem/"
-#define ASHMEM_NAME_PREFIX_LEN (sizeof(ASHMEM_NAME_PREFIX) - 1)
-#define ASHMEM_FULL_NAME_LEN (ASHMEM_NAME_LEN + ASHMEM_NAME_PREFIX_LEN)
-
-/**
- * struct ashmem_area - The anonymous shared memory area
- * @name: The optional name in /proc/pid/maps
- * @unpinned_list: The list of all ashmem areas
- * @file: The shmem-based backing file
- * @size: The size of the mapping, in bytes
- * @prot_masks: The allowed protection bits, as vm_flags
- *
- * The lifecycle of this structure is from our parent file's open() until
- * its release(). It is also protected by 'ashmem_mutex'
- *
- * Warning: Mappings do NOT pin this structure; It dies on close()
- */
-struct ashmem_area {
- char name[ASHMEM_FULL_NAME_LEN];
- struct list_head unpinned_list;
- struct file *file;
- size_t size;
- unsigned long prot_mask;
-};
-
-/**
- * struct ashmem_range - A range of unpinned/evictable pages
- * @lru: The entry in the LRU list
- * @unpinned: The entry in its area's unpinned list
- * @asma: The associated anonymous shared memory area.
- * @pgstart: The starting page (inclusive)
- * @pgend: The ending page (inclusive)
- * @purged: The purge status (ASHMEM_NOT or ASHMEM_WAS_PURGED)
- *
- * The lifecycle of this structure is from unpin to pin.
- * It is protected by 'ashmem_mutex'
- */
-struct ashmem_range {
- struct list_head lru;
- struct list_head unpinned;
- struct ashmem_area *asma;
- size_t pgstart;
- size_t pgend;
- unsigned int purged;
-};
-
-/* LRU list of unpinned pages, protected by ashmem_mutex */
-static LIST_HEAD(ashmem_lru_list);
-
-/**
- * long lru_count - The count of pages on our LRU list.
- *
- * This is protected by ashmem_mutex.
- */
-static unsigned long lru_count;
-
-/**
- * ashmem_mutex - protects the list of and each individual ashmem_area
- *
- * Lock Ordering: ashmex_mutex -> i_mutex -> i_alloc_sem
- */
-static DEFINE_MUTEX(ashmem_mutex);
-
-static struct kmem_cache *ashmem_area_cachep __read_mostly;
-static struct kmem_cache *ashmem_range_cachep __read_mostly;
-
-#define range_size(range) \
- ((range)->pgend - (range)->pgstart + 1)
-
-#define range_on_lru(range) \
- ((range)->purged == ASHMEM_NOT_PURGED)
-
-#define page_range_subsumes_range(range, start, end) \
- (((range)->pgstart >= (start)) && ((range)->pgend <= (end)))
-
-#define page_range_subsumed_by_range(range, start, end) \
- (((range)->pgstart <= (start)) && ((range)->pgend >= (end)))
-
-#define page_in_range(range, page) \
- (((range)->pgstart <= (page)) && ((range)->pgend >= (page)))
-
-#define page_range_in_range(range, start, end) \
- (page_in_range(range, start) || page_in_range(range, end) || \
- page_range_subsumes_range(range, start, end))
-
-#define range_before_page(range, page) \
- ((range)->pgend < (page))
-
-#define PROT_MASK (PROT_EXEC | PROT_READ | PROT_WRITE)
-
-/**
- * lru_add() - Adds a range of memory to the LRU list
- * @range: The memory range being added.
- *
- * The range is first added to the end (tail) of the LRU list.
- * After this, the size of the range is added to @lru_count
- */
-static inline void lru_add(struct ashmem_range *range)
-{
- list_add_tail(&range->lru, &ashmem_lru_list);
- lru_count += range_size(range);
-}
-
-/**
- * lru_del() - Removes a range of memory from the LRU list
- * @range: The memory range being removed
- *
- * The range is first deleted from the LRU list.
- * After this, the size of the range is removed from @lru_count
- */
-static inline void lru_del(struct ashmem_range *range)
-{
- list_del(&range->lru);
- lru_count -= range_size(range);
-}
-
-/**
- * range_alloc() - Allocates and initializes a new ashmem_range structure
- * @asma: The associated ashmem_area
- * @prev_range: The previous ashmem_range in the sorted asma->unpinned list
- * @purged: Initial purge status (ASMEM_NOT_PURGED or ASHMEM_WAS_PURGED)
- * @start: The starting page (inclusive)
- * @end: The ending page (inclusive)
- *
- * This function is protected by ashmem_mutex.
- *
- * Return: 0 if successful, or -ENOMEM if there is an error
- */
-static int range_alloc(struct ashmem_area *asma,
- struct ashmem_range *prev_range, unsigned int purged,
- size_t start, size_t end)
-{
- struct ashmem_range *range;
-
- range = kmem_cache_zalloc(ashmem_range_cachep, GFP_KERNEL);
- if (unlikely(!range))
- return -ENOMEM;
-
- range->asma = asma;
- range->pgstart = start;
- range->pgend = end;
- range->purged = purged;
-
- list_add_tail(&range->unpinned, &prev_range->unpinned);
-
- if (range_on_lru(range))
- lru_add(range);
-
- return 0;
-}
-
-/**
- * range_del() - Deletes and dealloctes an ashmem_range structure
- * @range: The associated ashmem_range that has previously been allocated
- */
-static void range_del(struct ashmem_range *range)
-{
- list_del(&range->unpinned);
- if (range_on_lru(range))
- lru_del(range);
- kmem_cache_free(ashmem_range_cachep, range);
-}
-
-/**
- * range_shrink() - Shrinks an ashmem_range
- * @range: The associated ashmem_range being shrunk
- * @start: The starting byte of the new range
- * @end: The ending byte of the new range
- *
- * This does not modify the data inside the existing range in any way - It
- * simply shrinks the boundaries of the range.
- *
- * Theoretically, with a little tweaking, this could eventually be changed
- * to range_resize, and expand the lru_count if the new range is larger.
- */
-static inline void range_shrink(struct ashmem_range *range,
- size_t start, size_t end)
-{
- size_t pre = range_size(range);
-
- range->pgstart = start;
- range->pgend = end;
-
- if (range_on_lru(range))
- lru_count -= pre - range_size(range);
-}
-
-/**
- * ashmem_open() - Opens an Anonymous Shared Memory structure
- * @inode: The backing file's index node(?)
- * @file: The backing file
- *
- * Please note that the ashmem_area is not returned by this function - It is
- * instead written to "file->private_data".
- *
- * Return: 0 if successful, or another code if unsuccessful.
- */
-static int ashmem_open(struct inode *inode, struct file *file)
-{
- struct ashmem_area *asma;
- int ret;
-
- ret = generic_file_open(inode, file);
- if (unlikely(ret))
- return ret;
-
- asma = kmem_cache_zalloc(ashmem_area_cachep, GFP_KERNEL);
- if (unlikely(!asma))
- return -ENOMEM;
-
- INIT_LIST_HEAD(&asma->unpinned_list);
- memcpy(asma->name, ASHMEM_NAME_PREFIX, ASHMEM_NAME_PREFIX_LEN);
- asma->prot_mask = PROT_MASK;
- file->private_data = asma;
-
- return 0;
-}
-
-/**
- * ashmem_release() - Releases an Anonymous Shared Memory structure
- * @ignored: The backing file's Index Node(?) - It is ignored here.
- * @file: The backing file
- *
- * Return: 0 if successful. If it is anything else, go have a coffee and
- * try again.
- */
-static int ashmem_release(struct inode *ignored, struct file *file)
-{
- struct ashmem_area *asma = file->private_data;
- struct ashmem_range *range, *next;
-
- mutex_lock(&ashmem_mutex);
- list_for_each_entry_safe(range, next, &asma->unpinned_list, unpinned)
- range_del(range);
- mutex_unlock(&ashmem_mutex);
-
- if (asma->file)
- fput(asma->file);
- kmem_cache_free(ashmem_area_cachep, asma);
-
- return 0;
-}
-
-/**
- * ashmem_read() - Reads a set of bytes from an Ashmem-enabled file
- * @file: The associated backing file.
- * @buf: The buffer of data being written to
- * @len: The number of bytes being read
- * @pos: The position of the first byte to read.
- *
- * Return: 0 if successful, or another return code if not.
- */
-static ssize_t ashmem_read(struct file *file, char __user *buf,
- size_t len, loff_t *pos)
-{
- struct ashmem_area *asma = file->private_data;
- int ret = 0;
-
- mutex_lock(&ashmem_mutex);
-
- /* If size is not set, or set to 0, always return EOF. */
- if (asma->size == 0)
- goto out_unlock;
-
- if (!asma->file) {
- ret = -EBADF;
- goto out_unlock;
- }
-
- mutex_unlock(&ashmem_mutex);
-
- /*
- * asma and asma->file are used outside the lock here. We assume
- * once asma->file is set it will never be changed, and will not
- * be destroyed until all references to the file are dropped and
- * ashmem_release is called.
- */
- ret = __vfs_read(asma->file, buf, len, pos);
- if (ret >= 0)
- /** Update backing file pos, since f_ops->read() doesn't */
- asma->file->f_pos = *pos;
- return ret;
-
-out_unlock:
- mutex_unlock(&ashmem_mutex);
- return ret;
-}
-
-static loff_t ashmem_llseek(struct file *file, loff_t offset, int origin)
-{
- struct ashmem_area *asma = file->private_data;
- int ret;
-
- mutex_lock(&ashmem_mutex);
-
- if (asma->size == 0) {
- ret = -EINVAL;
- goto out;
- }
-
- if (!asma->file) {
- ret = -EBADF;
- goto out;
- }
-
- ret = vfs_llseek(asma->file, offset, origin);
- if (ret < 0)
- goto out;
-
- /** Copy f_pos from backing file, since f_ops->llseek() sets it */
- file->f_pos = asma->file->f_pos;
-
-out:
- mutex_unlock(&ashmem_mutex);
- return ret;
-}
-
-static inline vm_flags_t calc_vm_may_flags(unsigned long prot)
-{
- return _calc_vm_trans(prot, PROT_READ, VM_MAYREAD) |
- _calc_vm_trans(prot, PROT_WRITE, VM_MAYWRITE) |
- _calc_vm_trans(prot, PROT_EXEC, VM_MAYEXEC);
-}
-
-static int ashmem_mmap(struct file *file, struct vm_area_struct *vma)
-{
- struct ashmem_area *asma = file->private_data;
- int ret = 0;
-
- mutex_lock(&ashmem_mutex);
-
- /* user needs to SET_SIZE before mapping */
- if (unlikely(!asma->size)) {
- ret = -EINVAL;
- goto out;
- }
-
- /* requested protection bits must match our allowed protection mask */
- if (unlikely((vma->vm_flags & ~calc_vm_prot_bits(asma->prot_mask)) &
- calc_vm_prot_bits(PROT_MASK))) {
- ret = -EPERM;
- goto out;
- }
- vma->vm_flags &= ~calc_vm_may_flags(~asma->prot_mask);
-
- if (!asma->file) {
- char *name = ASHMEM_NAME_DEF;
- struct file *vmfile;
-
- if (asma->name[ASHMEM_NAME_PREFIX_LEN] != '\0')
- name = asma->name;
-
- /* ... and allocate the backing shmem file */
- vmfile = shmem_file_setup(name, asma->size, vma->vm_flags);
- if (IS_ERR(vmfile)) {
- ret = PTR_ERR(vmfile);
- goto out;
- }
- asma->file = vmfile;
- }
- get_file(asma->file);
-
- /*
- * XXX - Reworked to use shmem_zero_setup() instead of
- * shmem_set_file while we're in staging. -jstultz
- */
- if (vma->vm_flags & VM_SHARED) {
- ret = shmem_zero_setup(vma);
- if (ret) {
- fput(asma->file);
- goto out;
- }
- }
-
- if (vma->vm_file)
- fput(vma->vm_file);
- vma->vm_file = asma->file;
-
-out:
- mutex_unlock(&ashmem_mutex);
- return ret;
-}
-
-/*
- * ashmem_shrink - our cache shrinker, called from mm/vmscan.c
- *
- * 'nr_to_scan' is the number of objects to scan for freeing.
- *
- * 'gfp_mask' is the mask of the allocation that got us into this mess.
- *
- * Return value is the number of objects freed or -1 if we cannot
- * proceed without risk of deadlock (due to gfp_mask).
- *
- * We approximate LRU via least-recently-unpinned, jettisoning unpinned partial
- * chunks of ashmem regions LRU-wise one-at-a-time until we hit 'nr_to_scan'
- * pages freed.
- */
-static unsigned long
-ashmem_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
-{
- struct ashmem_range *range, *next;
- unsigned long freed = 0;
-
- /* We might recurse into filesystem code, so bail out if necessary */
- if (!(sc->gfp_mask & __GFP_FS))
- return SHRINK_STOP;
-
- mutex_lock(&ashmem_mutex);
- list_for_each_entry_safe(range, next, &ashmem_lru_list, lru) {
- loff_t start = range->pgstart * PAGE_SIZE;
- loff_t end = (range->pgend + 1) * PAGE_SIZE;
-
- vfs_fallocate(range->asma->file,
- FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
- start, end - start);
- range->purged = ASHMEM_WAS_PURGED;
- lru_del(range);
-
- freed += range_size(range);
- if (--sc->nr_to_scan <= 0)
- break;
- }
- mutex_unlock(&ashmem_mutex);
- return freed;
-}
-
-static unsigned long
-ashmem_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
-{
- /*
- * note that lru_count is count of pages on the lru, not a count of
- * objects on the list. This means the scan function needs to return the
- * number of pages freed, not the number of objects scanned.
- */
- return lru_count;
-}
-
-static struct shrinker ashmem_shrinker = {
- .count_objects = ashmem_shrink_count,
- .scan_objects = ashmem_shrink_scan,
- /*
- * XXX (dchinner): I wish people would comment on why they need on
- * significant changes to the default value here
- */
- .seeks = DEFAULT_SEEKS * 4,
-};
-
-static int set_prot_mask(struct ashmem_area *asma, unsigned long prot)
-{
- int ret = 0;
-
- mutex_lock(&ashmem_mutex);
-
- /* the user can only remove, not add, protection bits */
- if (unlikely((asma->prot_mask & prot) != prot)) {
- ret = -EINVAL;
- goto out;
- }
-
- /* does the application expect PROT_READ to imply PROT_EXEC? */
- if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
- prot |= PROT_EXEC;
-
- asma->prot_mask = prot;
-
-out:
- mutex_unlock(&ashmem_mutex);
- return ret;
-}
-
-static int set_name(struct ashmem_area *asma, void __user *name)
-{
- int len;
- int ret = 0;
- char local_name[ASHMEM_NAME_LEN];
-
- /*
- * Holding the ashmem_mutex while doing a copy_from_user might cause
- * an data abort which would try to access mmap_sem. If another
- * thread has invoked ashmem_mmap then it will be holding the
- * semaphore and will be waiting for ashmem_mutex, there by leading to
- * deadlock. We'll release the mutex and take the name to a local
- * variable that does not need protection and later copy the local
- * variable to the structure member with lock held.
- */
- len = strncpy_from_user(local_name, name, ASHMEM_NAME_LEN);
- if (len < 0)
- return len;
- if (len == ASHMEM_NAME_LEN)
- local_name[ASHMEM_NAME_LEN - 1] = '\0';
- mutex_lock(&ashmem_mutex);
- /* cannot change an existing mapping's name */
- if (unlikely(asma->file))
- ret = -EINVAL;
- else
- strcpy(asma->name + ASHMEM_NAME_PREFIX_LEN, local_name);
-
- mutex_unlock(&ashmem_mutex);
- return ret;
-}
-
-static int get_name(struct ashmem_area *asma, void __user *name)
-{
- int ret = 0;
- size_t len;
- /*
- * Have a local variable to which we'll copy the content
- * from asma with the lock held. Later we can copy this to the user
- * space safely without holding any locks. So even if we proceed to
- * wait for mmap_sem, it won't lead to deadlock.
- */
- char local_name[ASHMEM_NAME_LEN];
-
- mutex_lock(&ashmem_mutex);
- if (asma->name[ASHMEM_NAME_PREFIX_LEN] != '\0') {
- /*
- * Copying only `len', instead of ASHMEM_NAME_LEN, bytes
- * prevents us from revealing one user's stack to another.
- */
- len = strlen(asma->name + ASHMEM_NAME_PREFIX_LEN) + 1;
- memcpy(local_name, asma->name + ASHMEM_NAME_PREFIX_LEN, len);
- } else {
- len = sizeof(ASHMEM_NAME_DEF);
- memcpy(local_name, ASHMEM_NAME_DEF, len);
- }
- mutex_unlock(&ashmem_mutex);
-
- /*
- * Now we are just copying from the stack variable to userland
- * No lock held
- */
- if (unlikely(copy_to_user(name, local_name, len)))
- ret = -EFAULT;
- return ret;
-}
-
-/*
- * ashmem_pin - pin the given ashmem region, returning whether it was
- * previously purged (ASHMEM_WAS_PURGED) or not (ASHMEM_NOT_PURGED).
- *
- * Caller must hold ashmem_mutex.
- */
-static int ashmem_pin(struct ashmem_area *asma, size_t pgstart, size_t pgend)
-{
- struct ashmem_range *range, *next;
- int ret = ASHMEM_NOT_PURGED;
-
- list_for_each_entry_safe(range, next, &asma->unpinned_list, unpinned) {
- /* moved past last applicable page; we can short circuit */
- if (range_before_page(range, pgstart))
- break;
-
- /*
- * The user can ask us to pin pages that span multiple ranges,
- * or to pin pages that aren't even unpinned, so this is messy.
- *
- * Four cases:
- * 1. The requested range subsumes an existing range, so we
- * just remove the entire matching range.
- * 2. The requested range overlaps the start of an existing
- * range, so we just update that range.
- * 3. The requested range overlaps the end of an existing
- * range, so we just update that range.
- * 4. The requested range punches a hole in an existing range,
- * so we have to update one side of the range and then
- * create a new range for the other side.
- */
- if (page_range_in_range(range, pgstart, pgend)) {
- ret |= range->purged;
-
- /* Case #1: Easy. Just nuke the whole thing. */
- if (page_range_subsumes_range(range, pgstart, pgend)) {
- range_del(range);
- continue;
- }
-
- /* Case #2: We overlap from the start, so adjust it */
- if (range->pgstart >= pgstart) {
- range_shrink(range, pgend + 1, range->pgend);
- continue;
- }
-
- /* Case #3: We overlap from the rear, so adjust it */
- if (range->pgend <= pgend) {
- range_shrink(range, range->pgstart,
- pgstart - 1);
- continue;
- }
-
- /*
- * Case #4: We eat a chunk out of the middle. A bit
- * more complicated, we allocate a new range for the
- * second half and adjust the first chunk's endpoint.
- */
- range_alloc(asma, range, range->purged,
- pgend + 1, range->pgend);
- range_shrink(range, range->pgstart, pgstart - 1);
- break;
- }
- }
-
- return ret;
-}
-
-/*
- * ashmem_unpin - unpin the given range of pages. Returns zero on success.
- *
- * Caller must hold ashmem_mutex.
- */
-static int ashmem_unpin(struct ashmem_area *asma, size_t pgstart, size_t pgend)
-{
- struct ashmem_range *range, *next;
- unsigned int purged = ASHMEM_NOT_PURGED;
-
-restart:
- list_for_each_entry_safe(range, next, &asma->unpinned_list, unpinned) {
- /* short circuit: this is our insertion point */
- if (range_before_page(range, pgstart))
- break;
-
- /*
- * The user can ask us to unpin pages that are already entirely
- * or partially pinned. We handle those two cases here.
- */
- if (page_range_subsumed_by_range(range, pgstart, pgend))
- return 0;
- if (page_range_in_range(range, pgstart, pgend)) {
- pgstart = min_t(size_t, range->pgstart, pgstart);
- pgend = max_t(size_t, range->pgend, pgend);
- purged |= range->purged;
- range_del(range);
- goto restart;
- }
- }
-
- return range_alloc(asma, range, purged, pgstart, pgend);
-}
-
-/*
- * ashmem_get_pin_status - Returns ASHMEM_IS_UNPINNED if _any_ pages in the
- * given interval are unpinned and ASHMEM_IS_PINNED otherwise.
- *
- * Caller must hold ashmem_mutex.
- */
-static int ashmem_get_pin_status(struct ashmem_area *asma, size_t pgstart,
- size_t pgend)
-{
- struct ashmem_range *range;
- int ret = ASHMEM_IS_PINNED;
-
- list_for_each_entry(range, &asma->unpinned_list, unpinned) {
- if (range_before_page(range, pgstart))
- break;
- if (page_range_in_range(range, pgstart, pgend)) {
- ret = ASHMEM_IS_UNPINNED;
- break;
- }
- }
-
- return ret;
-}
-
-static int ashmem_pin_unpin(struct ashmem_area *asma, unsigned long cmd,
- void __user *p)
-{
- struct ashmem_pin pin;
- size_t pgstart, pgend;
- int ret = -EINVAL;
-
- if (unlikely(!asma->file))
- return -EINVAL;
-
- if (unlikely(copy_from_user(&pin, p, sizeof(pin))))
- return -EFAULT;
-
- /* per custom, you can pass zero for len to mean "everything onward" */
- if (!pin.len)
- pin.len = PAGE_ALIGN(asma->size) - pin.offset;
-
- if (unlikely((pin.offset | pin.len) & ~PAGE_MASK))
- return -EINVAL;
-
- if (unlikely(((__u32)-1) - pin.offset < pin.len))
- return -EINVAL;
-
- if (unlikely(PAGE_ALIGN(asma->size) < pin.offset + pin.len))
- return -EINVAL;
-
- pgstart = pin.offset / PAGE_SIZE;
- pgend = pgstart + (pin.len / PAGE_SIZE) - 1;
-
- mutex_lock(&ashmem_mutex);
-
- switch (cmd) {
- case ASHMEM_PIN:
- ret = ashmem_pin(asma, pgstart, pgend);
- break;
- case ASHMEM_UNPIN:
- ret = ashmem_unpin(asma, pgstart, pgend);
- break;
- case ASHMEM_GET_PIN_STATUS:
- ret = ashmem_get_pin_status(asma, pgstart, pgend);
- break;
- }
-
- mutex_unlock(&ashmem_mutex);
-
- return ret;
-}
-
-static long ashmem_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
-{
- struct ashmem_area *asma = file->private_data;
- long ret = -ENOTTY;
-
- switch (cmd) {
- case ASHMEM_SET_NAME:
- ret = set_name(asma, (void __user *)arg);
- break;
- case ASHMEM_GET_NAME:
- ret = get_name(asma, (void __user *)arg);
- break;
- case ASHMEM_SET_SIZE:
- ret = -EINVAL;
- if (!asma->file) {
- ret = 0;
- asma->size = (size_t)arg;
- }
- break;
- case ASHMEM_GET_SIZE:
- ret = asma->size;
- break;
- case ASHMEM_SET_PROT_MASK:
- ret = set_prot_mask(asma, arg);
- break;
- case ASHMEM_GET_PROT_MASK:
- ret = asma->prot_mask;
- break;
- case ASHMEM_PIN:
- case ASHMEM_UNPIN:
- case ASHMEM_GET_PIN_STATUS:
- ret = ashmem_pin_unpin(asma, cmd, (void __user *)arg);
- break;
- case ASHMEM_PURGE_ALL_CACHES:
- ret = -EPERM;
- if (capable(CAP_SYS_ADMIN)) {
- struct shrink_control sc = {
- .gfp_mask = GFP_KERNEL,
- .nr_to_scan = LONG_MAX,
- };
- ret = ashmem_shrink_count(&ashmem_shrinker, &sc);
- ashmem_shrink_scan(&ashmem_shrinker, &sc);
- }
- break;
- }
-
- return ret;
-}
-
-/* support of 32bit userspace on 64bit platforms */
-#ifdef CONFIG_COMPAT
-static long compat_ashmem_ioctl(struct file *file, unsigned int cmd,
- unsigned long arg)
-{
- switch (cmd) {
- case COMPAT_ASHMEM_SET_SIZE:
- cmd = ASHMEM_SET_SIZE;
- break;
- case COMPAT_ASHMEM_SET_PROT_MASK:
- cmd = ASHMEM_SET_PROT_MASK;
- break;
- }
- return ashmem_ioctl(file, cmd, arg);
-}
-#endif
-
-static const struct file_operations ashmem_fops = {
- .owner = THIS_MODULE,
- .open = ashmem_open,
- .release = ashmem_release,
- .read = ashmem_read,
- .llseek = ashmem_llseek,
- .mmap = ashmem_mmap,
- .unlocked_ioctl = ashmem_ioctl,
-#ifdef CONFIG_COMPAT
- .compat_ioctl = compat_ashmem_ioctl,
-#endif
-};
-
-static struct miscdevice ashmem_misc = {
- .minor = MISC_DYNAMIC_MINOR,
- .name = "ashmem",
- .fops = &ashmem_fops,
-};
-
-static int __init ashmem_init(void)
-{
- int ret;
-
- ashmem_area_cachep = kmem_cache_create("ashmem_area_cache",
- sizeof(struct ashmem_area),
- 0, 0, NULL);
- if (unlikely(!ashmem_area_cachep)) {
- pr_err("failed to create slab cache\n");
- return -ENOMEM;
- }
-
- ashmem_range_cachep = kmem_cache_create("ashmem_range_cache",
- sizeof(struct ashmem_range),
- 0, 0, NULL);
- if (unlikely(!ashmem_range_cachep)) {
- pr_err("failed to create slab cache\n");
- return -ENOMEM;
- }
-
- ret = misc_register(&ashmem_misc);
- if (unlikely(ret)) {
- pr_err("failed to register misc device!\n");
- return ret;
- }
-
- register_shrinker(&ashmem_shrinker);
-
- pr_info("initialized\n");
-
- return 0;
-}
-
-static void __exit ashmem_exit(void)
-{
- unregister_shrinker(&ashmem_shrinker);
-
- misc_deregister(&ashmem_misc);
- kmem_cache_destroy(ashmem_range_cachep);
- kmem_cache_destroy(ashmem_area_cachep);
-
- pr_info("unloaded\n");
-}
-
-module_init(ashmem_init);
-module_exit(ashmem_exit);
-
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/android/ashmem.h b/drivers/staging/android/ashmem.h
deleted file mode 100644
index 5abcfd7aa706..000000000000
--- a/drivers/staging/android/ashmem.h
+++ /dev/null
@@ -1,27 +0,0 @@
-/*
- * include/linux/ashmem.h
- *
- * Copyright 2008 Google Inc.
- * Author: Robert Love
- *
- * This file is dual licensed. It may be redistributed and/or modified
- * under the terms of the Apache 2.0 License OR version 2 of the GNU
- * General Public License.
- */
-
-#ifndef _LINUX_ASHMEM_H
-#define _LINUX_ASHMEM_H
-
-#include <linux/limits.h>
-#include <linux/ioctl.h>
-#include <linux/compat.h>
-
-#include "uapi/ashmem.h"
-
-/* support of 32bit userspace on 64bit platforms */
-#ifdef CONFIG_COMPAT
-#define COMPAT_ASHMEM_SET_SIZE _IOW(__ASHMEMIOC, 3, compat_size_t)
-#define COMPAT_ASHMEM_SET_PROT_MASK _IOW(__ASHMEMIOC, 5, unsigned int)
-#endif
-
-#endif /* _LINUX_ASHMEM_H */
diff --git a/drivers/staging/android/ion/Kconfig b/drivers/staging/android/ion/Kconfig
deleted file mode 100644
index 345234624492..000000000000
--- a/drivers/staging/android/ion/Kconfig
+++ /dev/null
@@ -1,35 +0,0 @@
-menuconfig ION
- bool "Ion Memory Manager"
- depends on HAVE_MEMBLOCK && HAS_DMA && MMU
- select GENERIC_ALLOCATOR
- select DMA_SHARED_BUFFER
- ---help---
- Chose this option to enable the ION Memory Manager,
- used by Android to efficiently allocate buffers
- from userspace that can be shared between drivers.
- If you're not using Android its probably safe to
- say N here.
-
-config ION_TEST
- tristate "Ion Test Device"
- depends on ION
- help
- Choose this option to create a device that can be used to test the
- kernel and device side ION functions.
-
-config ION_DUMMY
- bool "Dummy Ion driver"
- depends on ION
- help
- Provides a dummy ION driver that registers the
- /dev/ion device and some basic heaps. This can
- be used for testing the ION infrastructure if
- one doesn't have access to hardware drivers that
- use ION.
-
-config ION_TEGRA
- tristate "Ion for Tegra"
- depends on ARCH_TEGRA && ION
- help
- Choose this option if you wish to use ion on an nVidia Tegra.
-
diff --git a/drivers/staging/android/ion/Makefile b/drivers/staging/android/ion/Makefile
deleted file mode 100644
index b56fd2bf2b4f..000000000000
--- a/drivers/staging/android/ion/Makefile
+++ /dev/null
@@ -1,10 +0,0 @@
-obj-$(CONFIG_ION) += ion.o ion_heap.o ion_page_pool.o ion_system_heap.o \
- ion_carveout_heap.o ion_chunk_heap.o ion_cma_heap.o
-obj-$(CONFIG_ION_TEST) += ion_test.o
-ifdef CONFIG_COMPAT
-obj-$(CONFIG_ION) += compat_ion.o
-endif
-
-obj-$(CONFIG_ION_DUMMY) += ion_dummy_driver.o
-obj-$(CONFIG_ION_TEGRA) += tegra/
-
diff --git a/drivers/staging/android/ion/compat_ion.c b/drivers/staging/android/ion/compat_ion.c
deleted file mode 100644
index a402fdaf54ca..000000000000
--- a/drivers/staging/android/ion/compat_ion.c
+++ /dev/null
@@ -1,195 +0,0 @@
-/*
- * drivers/staging/android/ion/compat_ion.c
- *
- * Copyright (C) 2013 Google, Inc.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#include <linux/compat.h>
-#include <linux/fs.h>
-#include <linux/uaccess.h>
-
-#include "ion.h"
-#include "compat_ion.h"
-
-/* See drivers/staging/android/uapi/ion.h for the definition of these structs */
-struct compat_ion_allocation_data {
- compat_size_t len;
- compat_size_t align;
- compat_uint_t heap_id_mask;
- compat_uint_t flags;
- compat_int_t handle;
-};
-
-struct compat_ion_custom_data {
- compat_uint_t cmd;
- compat_ulong_t arg;
-};
-
-struct compat_ion_handle_data {
- compat_int_t handle;
-};
-
-#define COMPAT_ION_IOC_ALLOC _IOWR(ION_IOC_MAGIC, 0, \
- struct compat_ion_allocation_data)
-#define COMPAT_ION_IOC_FREE _IOWR(ION_IOC_MAGIC, 1, \
- struct compat_ion_handle_data)
-#define COMPAT_ION_IOC_CUSTOM _IOWR(ION_IOC_MAGIC, 6, \
- struct compat_ion_custom_data)
-
-static int compat_get_ion_allocation_data(
- struct compat_ion_allocation_data __user *data32,
- struct ion_allocation_data __user *data)
-{
- compat_size_t s;
- compat_uint_t u;
- compat_int_t i;
- int err;
-
- err = get_user(s, &data32->len);
- err |= put_user(s, &data->len);
- err |= get_user(s, &data32->align);
- err |= put_user(s, &data->align);
- err |= get_user(u, &data32->heap_id_mask);
- err |= put_user(u, &data->heap_id_mask);
- err |= get_user(u, &data32->flags);
- err |= put_user(u, &data->flags);
- err |= get_user(i, &data32->handle);
- err |= put_user(i, &data->handle);
-
- return err;
-}
-
-static int compat_get_ion_handle_data(
- struct compat_ion_handle_data __user *data32,
- struct ion_handle_data __user *data)
-{
- compat_int_t i;
- int err;
-
- err = get_user(i, &data32->handle);
- err |= put_user(i, &data->handle);
-
- return err;
-}
-
-static int compat_put_ion_allocation_data(
- struct compat_ion_allocation_data __user *data32,
- struct ion_allocation_data __user *data)
-{
- compat_size_t s;
- compat_uint_t u;
- compat_int_t i;
- int err;
-
- err = get_user(s, &data->len);
- err |= put_user(s, &data32->len);
- err |= get_user(s, &data->align);
- err |= put_user(s, &data32->align);
- err |= get_user(u, &data->heap_id_mask);
- err |= put_user(u, &data32->heap_id_mask);
- err |= get_user(u, &data->flags);
- err |= put_user(u, &data32->flags);
- err |= get_user(i, &data->handle);
- err |= put_user(i, &data32->handle);
-
- return err;
-}
-
-static int compat_get_ion_custom_data(
- struct compat_ion_custom_data __user *data32,
- struct ion_custom_data __user *data)
-{
- compat_uint_t cmd;
- compat_ulong_t arg;
- int err;
-
- err = get_user(cmd, &data32->cmd);
- err |= put_user(cmd, &data->cmd);
- err |= get_user(arg, &data32->arg);
- err |= put_user(arg, &data->arg);
-
- return err;
-};
-
-long compat_ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
-{
- long ret;
-
- if (!filp->f_op->unlocked_ioctl)
- return -ENOTTY;
-
- switch (cmd) {
- case COMPAT_ION_IOC_ALLOC:
- {
- struct compat_ion_allocation_data __user *data32;
- struct ion_allocation_data __user *data;
- int err;
-
- data32 = compat_ptr(arg);
- data = compat_alloc_user_space(sizeof(*data));
- if (data == NULL)
- return -EFAULT;
-
- err = compat_get_ion_allocation_data(data32, data);
- if (err)
- return err;
- ret = filp->f_op->unlocked_ioctl(filp, ION_IOC_ALLOC,
- (unsigned long)data);
- err = compat_put_ion_allocation_data(data32, data);
- return ret ? ret : err;
- }
- case COMPAT_ION_IOC_FREE:
- {
- struct compat_ion_handle_data __user *data32;
- struct ion_handle_data __user *data;
- int err;
-
- data32 = compat_ptr(arg);
- data = compat_alloc_user_space(sizeof(*data));
- if (data == NULL)
- return -EFAULT;
-
- err = compat_get_ion_handle_data(data32, data);
- if (err)
- return err;
-
- return filp->f_op->unlocked_ioctl(filp, ION_IOC_FREE,
- (unsigned long)data);
- }
- case COMPAT_ION_IOC_CUSTOM: {
- struct compat_ion_custom_data __user *data32;
- struct ion_custom_data __user *data;
- int err;
-
- data32 = compat_ptr(arg);
- data = compat_alloc_user_space(sizeof(*data));
- if (data == NULL)
- return -EFAULT;
-
- err = compat_get_ion_custom_data(data32, data);
- if (err)
- return err;
-
- return filp->f_op->unlocked_ioctl(filp, ION_IOC_CUSTOM,
- (unsigned long)data);
- }
- case ION_IOC_SHARE:
- case ION_IOC_MAP:
- case ION_IOC_IMPORT:
- case ION_IOC_SYNC:
- return filp->f_op->unlocked_ioctl(filp, cmd,
- (unsigned long)compat_ptr(arg));
- default:
- return -ENOIOCTLCMD;
- }
-}
diff --git a/drivers/staging/android/ion/compat_ion.h b/drivers/staging/android/ion/compat_ion.h
deleted file mode 100644
index 9da8f917670b..000000000000
--- a/drivers/staging/android/ion/compat_ion.h
+++ /dev/null
@@ -1,29 +0,0 @@
-/*
- * drivers/staging/android/ion/compat_ion.h
- *
- * Copyright (C) 2013 Google, Inc.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#ifndef _LINUX_COMPAT_ION_H
-#define _LINUX_COMPAT_ION_H
-
-#if IS_ENABLED(CONFIG_COMPAT)
-
-long compat_ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg);
-
-#else
-
-#define compat_ion_ioctl NULL
-
-#endif /* CONFIG_COMPAT */
-#endif /* _LINUX_COMPAT_ION_H */
diff --git a/drivers/staging/android/ion/ion.c b/drivers/staging/android/ion/ion.c
deleted file mode 100644
index df3539865fb4..000000000000
--- a/drivers/staging/android/ion/ion.c
+++ /dev/null
@@ -1,1655 +0,0 @@
-/*
- *
- * drivers/staging/android/ion/ion.c
- *
- * Copyright (C) 2011 Google, Inc.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#include <linux/device.h>
-#include <linux/err.h>
-#include <linux/file.h>
-#include <linux/freezer.h>
-#include <linux/fs.h>
-#include <linux/anon_inodes.h>
-#include <linux/kthread.h>
-#include <linux/list.h>
-#include <linux/memblock.h>
-#include <linux/miscdevice.h>
-#include <linux/export.h>
-#include <linux/mm.h>
-#include <linux/mm_types.h>
-#include <linux/rbtree.h>
-#include <linux/slab.h>
-#include <linux/seq_file.h>
-#include <linux/uaccess.h>
-#include <linux/vmalloc.h>
-#include <linux/debugfs.h>
-#include <linux/dma-buf.h>
-#include <linux/idr.h>
-
-#include "ion.h"
-#include "ion_priv.h"
-#include "compat_ion.h"
-
-/**
- * struct ion_device - the metadata of the ion device node
- * @dev: the actual misc device
- * @buffers: an rb tree of all the existing buffers
- * @buffer_lock: lock protecting the tree of buffers
- * @lock: rwsem protecting the tree of heaps and clients
- * @heaps: list of all the heaps in the system
- * @user_clients: list of all the clients created from userspace
- */
-struct ion_device {
- struct miscdevice dev;
- struct rb_root buffers;
- struct mutex buffer_lock;
- struct rw_semaphore lock;
- struct plist_head heaps;
- long (*custom_ioctl)(struct ion_client *client, unsigned int cmd,
- unsigned long arg);
- struct rb_root clients;
- struct dentry *debug_root;
- struct dentry *heaps_debug_root;
- struct dentry *clients_debug_root;
-};
-
-/**
- * struct ion_client - a process/hw block local address space
- * @node: node in the tree of all clients
- * @dev: backpointer to ion device
- * @handles: an rb tree of all the handles in this client
- * @idr: an idr space for allocating handle ids
- * @lock: lock protecting the tree of handles
- * @name: used for debugging
- * @display_name: used for debugging (unique version of @name)
- * @display_serial: used for debugging (to make display_name unique)
- * @task: used for debugging
- *
- * A client represents a list of buffers this client may access.
- * The mutex stored here is used to protect both handles tree
- * as well as the handles themselves, and should be held while modifying either.
- */
-struct ion_client {
- struct rb_node node;
- struct ion_device *dev;
- struct rb_root handles;
- struct idr idr;
- struct mutex lock;
- const char *name;
- char *display_name;
- int display_serial;
- struct task_struct *task;
- pid_t pid;
- struct dentry *debug_root;
-};
-
-/**
- * ion_handle - a client local reference to a buffer
- * @ref: reference count
- * @client: back pointer to the client the buffer resides in
- * @buffer: pointer to the buffer
- * @node: node in the client's handle rbtree
- * @kmap_cnt: count of times this client has mapped to kernel
- * @id: client-unique id allocated by client->idr
- *
- * Modifications to node, map_cnt or mapping should be protected by the
- * lock in the client. Other fields are never changed after initialization.
- */
-struct ion_handle {
- struct kref ref;
- struct ion_client *client;
- struct ion_buffer *buffer;
- struct rb_node node;
- unsigned int kmap_cnt;
- int id;
-};
-
-bool ion_buffer_fault_user_mappings(struct ion_buffer *buffer)
-{
- return (buffer->flags & ION_FLAG_CACHED) &&
- !(buffer->flags & ION_FLAG_CACHED_NEEDS_SYNC);
-}
-
-bool ion_buffer_cached(struct ion_buffer *buffer)
-{
- return !!(buffer->flags & ION_FLAG_CACHED);
-}
-
-static inline struct page *ion_buffer_page(struct page *page)
-{
- return (struct page *)((unsigned long)page & ~(1UL));
-}
-
-static inline bool ion_buffer_page_is_dirty(struct page *page)
-{
- return !!((unsigned long)page & 1UL);
-}
-
-static inline void ion_buffer_page_dirty(struct page **page)
-{
- *page = (struct page *)((unsigned long)(*page) | 1UL);
-}
-
-static inline void ion_buffer_page_clean(struct page **page)
-{
- *page = (struct page *)((unsigned long)(*page) & ~(1UL));
-}
-
-/* this function should only be called while dev->lock is held */
-static void ion_buffer_add(struct ion_device *dev,
- struct ion_buffer *buffer)
-{
- struct rb_node **p = &dev->buffers.rb_node;
- struct rb_node *parent = NULL;
- struct ion_buffer *entry;
-
- while (*p) {
- parent = *p;
- entry = rb_entry(parent, struct ion_buffer, node);
-
- if (buffer < entry) {
- p = &(*p)->rb_left;
- } else if (buffer > entry) {
- p = &(*p)->rb_right;
- } else {
- pr_err("%s: buffer already found.", __func__);
- BUG();
- }
- }
-
- rb_link_node(&buffer->node, parent, p);
- rb_insert_color(&buffer->node, &dev->buffers);
-}
-
-/* this function should only be called while dev->lock is held */
-static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
- struct ion_device *dev,
- unsigned long len,
- unsigned long align,
- unsigned long flags)
-{
- struct ion_buffer *buffer;
- struct sg_table *table;
- struct scatterlist *sg;
- int i, ret;
-
- buffer = kzalloc(sizeof(struct ion_buffer), GFP_KERNEL);
- if (!buffer)
- return ERR_PTR(-ENOMEM);
-
- buffer->heap = heap;
- buffer->flags = flags;
- kref_init(&buffer->ref);
-
- ret = heap->ops->allocate(heap, buffer, len, align, flags);
-
- if (ret) {
- if (!(heap->flags & ION_HEAP_FLAG_DEFER_FREE))
- goto err2;
-
- ion_heap_freelist_drain(heap, 0);
- ret = heap->ops->allocate(heap, buffer, len, align,
- flags);
- if (ret)
- goto err2;
- }
-
- buffer->dev = dev;
- buffer->size = len;
-
- table = heap->ops->map_dma(heap, buffer);
- if (WARN_ONCE(table == NULL,
- "heap->ops->map_dma should return ERR_PTR on error"))
- table = ERR_PTR(-EINVAL);
- if (IS_ERR(table)) {
- heap->ops->free(buffer);
- kfree(buffer);
- return ERR_CAST(table);
- }
- buffer->sg_table = table;
- if (ion_buffer_fault_user_mappings(buffer)) {
- int num_pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
- struct scatterlist *sg;
- int i, j, k = 0;
-
- buffer->pages = vmalloc(sizeof(struct page *) * num_pages);
- if (!buffer->pages) {
- ret = -ENOMEM;
- goto err1;
- }
-
- for_each_sg(table->sgl, sg, table->nents, i) {
- struct page *page = sg_page(sg);
-
- for (j = 0; j < sg->length / PAGE_SIZE; j++)
- buffer->pages[k++] = page++;
- }
-
- if (ret)
- goto err;
- }
-
- buffer->dev = dev;
- buffer->size = len;
- INIT_LIST_HEAD(&buffer->vmas);
- mutex_init(&buffer->lock);
- /*
- * this will set up dma addresses for the sglist -- it is not
- * technically correct as per the dma api -- a specific
- * device isn't really taking ownership here. However, in practice on
- * our systems the only dma_address space is physical addresses.
- * Additionally, we can't afford the overhead of invalidating every
- * allocation via dma_map_sg. The implicit contract here is that
- * memory coming from the heaps is ready for dma, ie if it has a
- * cached mapping that mapping has been invalidated
- */
- for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i)
- sg_dma_address(sg) = sg_phys(sg);
- mutex_lock(&dev->buffer_lock);
- ion_buffer_add(dev, buffer);
- mutex_unlock(&dev->buffer_lock);
- return buffer;
-
-err:
- heap->ops->unmap_dma(heap, buffer);
- heap->ops->free(buffer);
-err1:
- vfree(buffer->pages);
-err2:
- kfree(buffer);
- return ERR_PTR(ret);
-}
-
-void ion_buffer_destroy(struct ion_buffer *buffer)
-{
- if (WARN_ON(buffer->kmap_cnt > 0))
- buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
- buffer->heap->ops->unmap_dma(buffer->heap, buffer);
- buffer->heap->ops->free(buffer);
- vfree(buffer->pages);
- kfree(buffer);
-}
-
-static void _ion_buffer_destroy(struct kref *kref)
-{
- struct ion_buffer *buffer = container_of(kref, struct ion_buffer, ref);
- struct ion_heap *heap = buffer->heap;
- struct ion_device *dev = buffer->dev;
-
- mutex_lock(&dev->buffer_lock);
- rb_erase(&buffer->node, &dev->buffers);
- mutex_unlock(&dev->buffer_lock);
-
- if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
- ion_heap_freelist_add(heap, buffer);
- else
- ion_buffer_destroy(buffer);
-}
-
-static void ion_buffer_get(struct ion_buffer *buffer)
-{
- kref_get(&buffer->ref);
-}
-
-static int ion_buffer_put(struct ion_buffer *buffer)
-{
- return kref_put(&buffer->ref, _ion_buffer_destroy);
-}
-
-static void ion_buffer_add_to_handle(struct ion_buffer *buffer)
-{
- mutex_lock(&buffer->lock);
- buffer->handle_count++;
- mutex_unlock(&buffer->lock);
-}
-
-static void ion_buffer_remove_from_handle(struct ion_buffer *buffer)
-{
- /*
- * when a buffer is removed from a handle, if it is not in
- * any other handles, copy the taskcomm and the pid of the
- * process it's being removed from into the buffer. At this
- * point there will be no way to track what processes this buffer is
- * being used by, it only exists as a dma_buf file descriptor.
- * The taskcomm and pid can provide a debug hint as to where this fd
- * is in the system
- */
- mutex_lock(&buffer->lock);
- buffer->handle_count--;
- BUG_ON(buffer->handle_count < 0);
- if (!buffer->handle_count) {
- struct task_struct *task;
-
- task = current->group_leader;
- get_task_comm(buffer->task_comm, task);
- buffer->pid = task_pid_nr(task);
- }
- mutex_unlock(&buffer->lock);
-}
-
-static struct ion_handle *ion_handle_create(struct ion_client *client,
- struct ion_buffer *buffer)
-{
- struct ion_handle *handle;
-
- handle = kzalloc(sizeof(struct ion_handle), GFP_KERNEL);
- if (!handle)
- return ERR_PTR(-ENOMEM);
- kref_init(&handle->ref);
- RB_CLEAR_NODE(&handle->node);
- handle->client = client;
- ion_buffer_get(buffer);
- ion_buffer_add_to_handle(buffer);
- handle->buffer = buffer;
-
- return handle;
-}
-
-static void ion_handle_kmap_put(struct ion_handle *);
-
-static void ion_handle_destroy(struct kref *kref)
-{
- struct ion_handle *handle = container_of(kref, struct ion_handle, ref);
- struct ion_client *client = handle->client;
- struct ion_buffer *buffer = handle->buffer;
-
- mutex_lock(&buffer->lock);
- while (handle->kmap_cnt)
- ion_handle_kmap_put(handle);
- mutex_unlock(&buffer->lock);
-
- idr_remove(&client->idr, handle->id);
- if (!RB_EMPTY_NODE(&handle->node))
- rb_erase(&handle->node, &client->handles);
-
- ion_buffer_remove_from_handle(buffer);
- ion_buffer_put(buffer);
-
- kfree(handle);
-}
-
-struct ion_buffer *ion_handle_buffer(struct ion_handle *handle)
-{
- return handle->buffer;
-}
-
-static void ion_handle_get(struct ion_handle *handle)
-{
- kref_get(&handle->ref);
-}
-
-static int ion_handle_put(struct ion_handle *handle)
-{
- struct ion_client *client = handle->client;
- int ret;
-
- mutex_lock(&client->lock);
- ret = kref_put(&handle->ref, ion_handle_destroy);
- mutex_unlock(&client->lock);
-
- return ret;
-}
-
-static struct ion_handle *ion_handle_lookup(struct ion_client *client,
- struct ion_buffer *buffer)
-{
- struct rb_node *n = client->handles.rb_node;
-
- while (n) {
- struct ion_handle *entry = rb_entry(n, struct ion_handle, node);
-
- if (buffer < entry->buffer)
- n = n->rb_left;
- else if (buffer > entry->buffer)
- n = n->rb_right;
- else
- return entry;
- }
- return ERR_PTR(-EINVAL);
-}
-
-static struct ion_handle *ion_handle_get_by_id(struct ion_client *client,
- int id)
-{
- struct ion_handle *handle;
-
- mutex_lock(&client->lock);
- handle = idr_find(&client->idr, id);
- if (handle)
- ion_handle_get(handle);
- mutex_unlock(&client->lock);
-
- return handle ? handle : ERR_PTR(-EINVAL);
-}
-
-static bool ion_handle_validate(struct ion_client *client,
- struct ion_handle *handle)
-{
- WARN_ON(!mutex_is_locked(&client->lock));
- return idr_find(&client->idr, handle->id) == handle;
-}
-
-static int ion_handle_add(struct ion_client *client, struct ion_handle *handle)
-{
- int id;
- struct rb_node **p = &client->handles.rb_node;
- struct rb_node *parent = NULL;
- struct ion_handle *entry;
-
- id = idr_alloc(&client->idr, handle, 1, 0, GFP_KERNEL);
- if (id < 0)
- return id;
-
- handle->id = id;
-
- while (*p) {
- parent = *p;
- entry = rb_entry(parent, struct ion_handle, node);
-
- if (handle->buffer < entry->buffer)
- p = &(*p)->rb_left;
- else if (handle->buffer > entry->buffer)
- p = &(*p)->rb_right;
- else
- WARN(1, "%s: buffer already found.", __func__);
- }
-
- rb_link_node(&handle->node, parent, p);
- rb_insert_color(&handle->node, &client->handles);
-
- return 0;
-}
-
-struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
- size_t align, unsigned int heap_id_mask,
- unsigned int flags)
-{
- struct ion_handle *handle;
- struct ion_device *dev = client->dev;
- struct ion_buffer *buffer = NULL;
- struct ion_heap *heap;
- int ret;
-
- pr_debug("%s: len %zu align %zu heap_id_mask %u flags %x\n", __func__,
- len, align, heap_id_mask, flags);
- /*
- * traverse the list of heaps available in this system in priority
- * order. If the heap type is supported by the client, and matches the
- * request of the caller allocate from it. Repeat until allocate has
- * succeeded or all heaps have been tried
- */
- len = PAGE_ALIGN(len);
-
- if (!len)
- return ERR_PTR(-EINVAL);
-
- down_read(&dev->lock);
- plist_for_each_entry(heap, &dev->heaps, node) {
- /* if the caller didn't specify this heap id */
- if (!((1 << heap->id) & heap_id_mask))
- continue;
- buffer = ion_buffer_create(heap, dev, len, align, flags);
- if (!IS_ERR(buffer))
- break;
- }
- up_read(&dev->lock);
-
- if (buffer == NULL)
- return ERR_PTR(-ENODEV);
-
- if (IS_ERR(buffer))
- return ERR_CAST(buffer);
-
- handle = ion_handle_create(client, buffer);
-
- /*
- * ion_buffer_create will create a buffer with a ref_cnt of 1,
- * and ion_handle_create will take a second reference, drop one here
- */
- ion_buffer_put(buffer);
-
- if (IS_ERR(handle))
- return handle;
-
- mutex_lock(&client->lock);
- ret = ion_handle_add(client, handle);
- mutex_unlock(&client->lock);
- if (ret) {
- ion_handle_put(handle);
- handle = ERR_PTR(ret);
- }
-
- return handle;
-}
-EXPORT_SYMBOL(ion_alloc);
-
-void ion_free(struct ion_client *client, struct ion_handle *handle)
-{
- bool valid_handle;
-
- BUG_ON(client != handle->client);
-
- mutex_lock(&client->lock);
- valid_handle = ion_handle_validate(client, handle);
-
- if (!valid_handle) {
- WARN(1, "%s: invalid handle passed to free.\n", __func__);
- mutex_unlock(&client->lock);
- return;
- }
- mutex_unlock(&client->lock);
- ion_handle_put(handle);
-}
-EXPORT_SYMBOL(ion_free);
-
-int ion_phys(struct ion_client *client, struct ion_handle *handle,
- ion_phys_addr_t *addr, size_t *len)
-{
- struct ion_buffer *buffer;
- int ret;
-
- mutex_lock(&client->lock);
- if (!ion_handle_validate(client, handle)) {
- mutex_unlock(&client->lock);
- return -EINVAL;
- }
-
- buffer = handle->buffer;
-
- if (!buffer->heap->ops->phys) {
- pr_err("%s: ion_phys is not implemented by this heap (name=%s, type=%d).\n",
- __func__, buffer->heap->name, buffer->heap->type);
- mutex_unlock(&client->lock);
- return -ENODEV;
- }
- mutex_unlock(&client->lock);
- ret = buffer->heap->ops->phys(buffer->heap, buffer, addr, len);
- return ret;
-}
-EXPORT_SYMBOL(ion_phys);
-
-static void *ion_buffer_kmap_get(struct ion_buffer *buffer)
-{
- void *vaddr;
-
- if (buffer->kmap_cnt) {
- buffer->kmap_cnt++;
- return buffer->vaddr;
- }
- vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer);
- if (WARN_ONCE(vaddr == NULL,
- "heap->ops->map_kernel should return ERR_PTR on error"))
- return ERR_PTR(-EINVAL);
- if (IS_ERR(vaddr))
- return vaddr;
- buffer->vaddr = vaddr;
- buffer->kmap_cnt++;
- return vaddr;
-}
-
-static void *ion_handle_kmap_get(struct ion_handle *handle)
-{
- struct ion_buffer *buffer = handle->buffer;
- void *vaddr;
-
- if (handle->kmap_cnt) {
- handle->kmap_cnt++;
- return buffer->vaddr;
- }
- vaddr = ion_buffer_kmap_get(buffer);
- if (IS_ERR(vaddr))
- return vaddr;
- handle->kmap_cnt++;
- return vaddr;
-}
-
-static void ion_buffer_kmap_put(struct ion_buffer *buffer)
-{
- buffer->kmap_cnt--;
- if (!buffer->kmap_cnt) {
- buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
- buffer->vaddr = NULL;
- }
-}
-
-static void ion_handle_kmap_put(struct ion_handle *handle)
-{
- struct ion_buffer *buffer = handle->buffer;
-
- if (!handle->kmap_cnt) {
- WARN(1, "%s: Double unmap detected! bailing...\n", __func__);
- return;
- }
- handle->kmap_cnt--;
- if (!handle->kmap_cnt)
- ion_buffer_kmap_put(buffer);
-}
-
-void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle)
-{
- struct ion_buffer *buffer;
- void *vaddr;
-
- mutex_lock(&client->lock);
- if (!ion_handle_validate(client, handle)) {
- pr_err("%s: invalid handle passed to map_kernel.\n",
- __func__);
- mutex_unlock(&client->lock);
- return ERR_PTR(-EINVAL);
- }
-
- buffer = handle->buffer;
-
- if (!handle->buffer->heap->ops->map_kernel) {
- pr_err("%s: map_kernel is not implemented by this heap.\n",
- __func__);
- mutex_unlock(&client->lock);
- return ERR_PTR(-ENODEV);
- }
-
- mutex_lock(&buffer->lock);
- vaddr = ion_handle_kmap_get(handle);
- mutex_unlock(&buffer->lock);
- mutex_unlock(&client->lock);
- return vaddr;
-}
-EXPORT_SYMBOL(ion_map_kernel);
-
-void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle)
-{
- struct ion_buffer *buffer;
-
- mutex_lock(&client->lock);
- buffer = handle->buffer;
- mutex_lock(&buffer->lock);
- ion_handle_kmap_put(handle);
- mutex_unlock(&buffer->lock);
- mutex_unlock(&client->lock);
-}
-EXPORT_SYMBOL(ion_unmap_kernel);
-
-static int ion_debug_client_show(struct seq_file *s, void *unused)
-{
- struct ion_client *client = s->private;
- struct rb_node *n;
- size_t sizes[ION_NUM_HEAP_IDS] = {0};
- const char *names[ION_NUM_HEAP_IDS] = {NULL};
- int i;
-
- mutex_lock(&client->lock);
- for (n = rb_first(&client->handles); n; n = rb_next(n)) {
- struct ion_handle *handle = rb_entry(n, struct ion_handle,
- node);
- unsigned int id = handle->buffer->heap->id;
-
- if (!names[id])
- names[id] = handle->buffer->heap->name;
- sizes[id] += handle->buffer->size;
- }
- mutex_unlock(&client->lock);
-
- seq_printf(s, "%16.16s: %16.16s\n", "heap_name", "size_in_bytes");
- for (i = 0; i < ION_NUM_HEAP_IDS; i++) {
- if (!names[i])
- continue;
- seq_printf(s, "%16.16s: %16zu\n", names[i], sizes[i]);
- }
- return 0;
-}
-
-static int ion_debug_client_open(struct inode *inode, struct file *file)
-{
- return single_open(file, ion_debug_client_show, inode->i_private);
-}
-
-static const struct file_operations debug_client_fops = {
- .open = ion_debug_client_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
-static int ion_get_client_serial(const struct rb_root *root,
- const unsigned char *name)
-{
- int serial = -1;
- struct rb_node *node;
-
- for (node = rb_first(root); node; node = rb_next(node)) {
- struct ion_client *client = rb_entry(node, struct ion_client,
- node);
-
- if (strcmp(client->name, name))
- continue;
- serial = max(serial, client->display_serial);
- }
- return serial + 1;
-}
-
-struct ion_client *ion_client_create(struct ion_device *dev,
- const char *name)
-{
- struct ion_client *client;
- struct task_struct *task;
- struct rb_node **p;
- struct rb_node *parent = NULL;
- struct ion_client *entry;
- pid_t pid;
-
- if (!name) {
- pr_err("%s: Name cannot be null\n", __func__);
- return ERR_PTR(-EINVAL);
- }
-
- get_task_struct(current->group_leader);
- task_lock(current->group_leader);
- pid = task_pid_nr(current->group_leader);
- /*
- * don't bother to store task struct for kernel threads,
- * they can't be killed anyway
- */
- if (current->group_leader->flags & PF_KTHREAD) {
- put_task_struct(current->group_leader);
- task = NULL;
- } else {
- task = current->group_leader;
- }
- task_unlock(current->group_leader);
-
- client = kzalloc(sizeof(struct ion_client), GFP_KERNEL);
- if (!client)
- goto err_put_task_struct;
-
- client->dev = dev;
- client->handles = RB_ROOT;
- idr_init(&client->idr);
- mutex_init(&client->lock);
- client->task = task;
- client->pid = pid;
- client->name = kstrdup(name, GFP_KERNEL);
- if (!client->name)
- goto err_free_client;
-
- down_write(&dev->lock);
- client->display_serial = ion_get_client_serial(&dev->clients, name);
- client->display_name = kasprintf(
- GFP_KERNEL, "%s-%d", name, client->display_serial);
- if (!client->display_name) {
- up_write(&dev->lock);
- goto err_free_client_name;
- }
- p = &dev->clients.rb_node;
- while (*p) {
- parent = *p;
- entry = rb_entry(parent, struct ion_client, node);
-
- if (client < entry)
- p = &(*p)->rb_left;
- else if (client > entry)
- p = &(*p)->rb_right;
- }
- rb_link_node(&client->node, parent, p);
- rb_insert_color(&client->node, &dev->clients);
-
- client->debug_root = debugfs_create_file(client->display_name, 0664,
- dev->clients_debug_root,
- client, &debug_client_fops);
- if (!client->debug_root) {
- char buf[256], *path;
-
- path = dentry_path(dev->clients_debug_root, buf, 256);
- pr_err("Failed to create client debugfs at %s/%s\n",
- path, client->display_name);
- }
-
- up_write(&dev->lock);
-
- return client;
-
-err_free_client_name:
- kfree(client->name);
-err_free_client:
- kfree(client);
-err_put_task_struct:
- if (task)
- put_task_struct(current->group_leader);
- return ERR_PTR(-ENOMEM);
-}
-EXPORT_SYMBOL(ion_client_create);
-
-void ion_client_destroy(struct ion_client *client)
-{
- struct ion_device *dev = client->dev;
- struct rb_node *n;
-
- pr_debug("%s: %d\n", __func__, __LINE__);
- while ((n = rb_first(&client->handles))) {
- struct ion_handle *handle = rb_entry(n, struct ion_handle,
- node);
- ion_handle_destroy(&handle->ref);
- }
-
- idr_destroy(&client->idr);
-
- down_write(&dev->lock);
- if (client->task)
- put_task_struct(client->task);
- rb_erase(&client->node, &dev->clients);
- debugfs_remove_recursive(client->debug_root);
- up_write(&dev->lock);
-
- kfree(client->display_name);
- kfree(client->name);
- kfree(client);
-}
-EXPORT_SYMBOL(ion_client_destroy);
-
-struct sg_table *ion_sg_table(struct ion_client *client,
- struct ion_handle *handle)
-{
- struct ion_buffer *buffer;
- struct sg_table *table;
-
- mutex_lock(&client->lock);
- if (!ion_handle_validate(client, handle)) {
- pr_err("%s: invalid handle passed to map_dma.\n",
- __func__);
- mutex_unlock(&client->lock);
- return ERR_PTR(-EINVAL);
- }
- buffer = handle->buffer;
- table = buffer->sg_table;
- mutex_unlock(&client->lock);
- return table;
-}
-EXPORT_SYMBOL(ion_sg_table);
-
-static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
- struct device *dev,
- enum dma_data_direction direction);
-
-static struct sg_table *ion_map_dma_buf(struct dma_buf_attachment *attachment,
- enum dma_data_direction direction)
-{
- struct dma_buf *dmabuf = attachment->dmabuf;
- struct ion_buffer *buffer = dmabuf->priv;
-
- ion_buffer_sync_for_device(buffer, attachment->dev, direction);
- return buffer->sg_table;
-}
-
-static void ion_unmap_dma_buf(struct dma_buf_attachment *attachment,
- struct sg_table *table,
- enum dma_data_direction direction)
-{
-}
-
-void ion_pages_sync_for_device(struct device *dev, struct page *page,
- size_t size, enum dma_data_direction dir)
-{
- struct scatterlist sg;
-
- sg_init_table(&sg, 1);
- sg_set_page(&sg, page, size, 0);
- /*
- * This is not correct - sg_dma_address needs a dma_addr_t that is valid
- * for the targeted device, but this works on the currently targeted
- * hardware.
- */
- sg_dma_address(&sg) = page_to_phys(page);
- dma_sync_sg_for_device(dev, &sg, 1, dir);
-}
-
-struct ion_vma_list {
- struct list_head list;
- struct vm_area_struct *vma;
-};
-
-static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
- struct device *dev,
- enum dma_data_direction dir)
-{
- struct ion_vma_list *vma_list;
- int pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
- int i;
-
- pr_debug("%s: syncing for device %s\n", __func__,
- dev ? dev_name(dev) : "null");
-
- if (!ion_buffer_fault_user_mappings(buffer))
- return;
-
- mutex_lock(&buffer->lock);
- for (i = 0; i < pages; i++) {
- struct page *page = buffer->pages[i];
-
- if (ion_buffer_page_is_dirty(page))
- ion_pages_sync_for_device(dev, ion_buffer_page(page),
- PAGE_SIZE, dir);
-
- ion_buffer_page_clean(buffer->pages + i);
- }
- list_for_each_entry(vma_list, &buffer->vmas, list) {
- struct vm_area_struct *vma = vma_list->vma;
-
- zap_page_range(vma, vma->vm_start, vma->vm_end - vma->vm_start,
- NULL);
- }
- mutex_unlock(&buffer->lock);
-}
-
-static int ion_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
-{
- struct ion_buffer *buffer = vma->vm_private_data;
- unsigned long pfn;
- int ret;
-
- mutex_lock(&buffer->lock);
- ion_buffer_page_dirty(buffer->pages + vmf->pgoff);
- BUG_ON(!buffer->pages || !buffer->pages[vmf->pgoff]);
-
- pfn = page_to_pfn(ion_buffer_page(buffer->pages[vmf->pgoff]));
- ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
- mutex_unlock(&buffer->lock);
- if (ret)
- return VM_FAULT_ERROR;
-
- return VM_FAULT_NOPAGE;
-}
-
-static void ion_vm_open(struct vm_area_struct *vma)
-{
- struct ion_buffer *buffer = vma->vm_private_data;
- struct ion_vma_list *vma_list;
-
- vma_list = kmalloc(sizeof(struct ion_vma_list), GFP_KERNEL);
- if (!vma_list)
- return;
- vma_list->vma = vma;
- mutex_lock(&buffer->lock);
- list_add(&vma_list->list, &buffer->vmas);
- mutex_unlock(&buffer->lock);
- pr_debug("%s: adding %p\n", __func__, vma);
-}
-
-static void ion_vm_close(struct vm_area_struct *vma)
-{
- struct ion_buffer *buffer = vma->vm_private_data;
- struct ion_vma_list *vma_list, *tmp;
-
- pr_debug("%s\n", __func__);
- mutex_lock(&buffer->lock);
- list_for_each_entry_safe(vma_list, tmp, &buffer->vmas, list) {
- if (vma_list->vma != vma)
- continue;
- list_del(&vma_list->list);
- kfree(vma_list);
- pr_debug("%s: deleting %p\n", __func__, vma);
- break;
- }
- mutex_unlock(&buffer->lock);
-}
-
-static const struct vm_operations_struct ion_vma_ops = {
- .open = ion_vm_open,
- .close = ion_vm_close,
- .fault = ion_vm_fault,
-};
-
-static int ion_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
-{
- struct ion_buffer *buffer = dmabuf->priv;
- int ret = 0;
-
- if (!buffer->heap->ops->map_user) {
- pr_err("%s: this heap does not define a method for mapping to userspace\n",
- __func__);
- return -EINVAL;
- }
-
- if (ion_buffer_fault_user_mappings(buffer)) {
- vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND |
- VM_DONTDUMP;
- vma->vm_private_data = buffer;
- vma->vm_ops = &ion_vma_ops;
- ion_vm_open(vma);
- return 0;
- }
-
- if (!(buffer->flags & ION_FLAG_CACHED))
- vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
-
- mutex_lock(&buffer->lock);
- /* now map it to userspace */
- ret = buffer->heap->ops->map_user(buffer->heap, buffer, vma);
- mutex_unlock(&buffer->lock);
-
- if (ret)
- pr_err("%s: failure mapping buffer to userspace\n",
- __func__);
-
- return ret;
-}
-
-static void ion_dma_buf_release(struct dma_buf *dmabuf)
-{
- struct ion_buffer *buffer = dmabuf->priv;
-
- ion_buffer_put(buffer);
-}
-
-static void *ion_dma_buf_kmap(struct dma_buf *dmabuf, unsigned long offset)
-{
- struct ion_buffer *buffer = dmabuf->priv;
-
- return buffer->vaddr + offset * PAGE_SIZE;
-}
-
-static void ion_dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long offset,
- void *ptr)
-{
-}
-
-static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, size_t start,
- size_t len,
- enum dma_data_direction direction)
-{
- struct ion_buffer *buffer = dmabuf->priv;
- void *vaddr;
-
- if (!buffer->heap->ops->map_kernel) {
- pr_err("%s: map kernel is not implemented by this heap.\n",
- __func__);
- return -ENODEV;
- }
-
- mutex_lock(&buffer->lock);
- vaddr = ion_buffer_kmap_get(buffer);
- mutex_unlock(&buffer->lock);
- return PTR_ERR_OR_ZERO(vaddr);
-}
-
-static void ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf, size_t start,
- size_t len,
- enum dma_data_direction direction)
-{
- struct ion_buffer *buffer = dmabuf->priv;
-
- mutex_lock(&buffer->lock);
- ion_buffer_kmap_put(buffer);
- mutex_unlock(&buffer->lock);
-}
-
-static struct dma_buf_ops dma_buf_ops = {
- .map_dma_buf = ion_map_dma_buf,
- .unmap_dma_buf = ion_unmap_dma_buf,
- .mmap = ion_mmap,
- .release = ion_dma_buf_release,
- .begin_cpu_access = ion_dma_buf_begin_cpu_access,
- .end_cpu_access = ion_dma_buf_end_cpu_access,
- .kmap_atomic = ion_dma_buf_kmap,
- .kunmap_atomic = ion_dma_buf_kunmap,
- .kmap = ion_dma_buf_kmap,
- .kunmap = ion_dma_buf_kunmap,
-};
-
-struct dma_buf *ion_share_dma_buf(struct ion_client *client,
- struct ion_handle *handle)
-{
- DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
- struct ion_buffer *buffer;
- struct dma_buf *dmabuf;
- bool valid_handle;
-
- mutex_lock(&client->lock);
- valid_handle = ion_handle_validate(client, handle);
- if (!valid_handle) {
- WARN(1, "%s: invalid handle passed to share.\n", __func__);
- mutex_unlock(&client->lock);
- return ERR_PTR(-EINVAL);
- }
- buffer = handle->buffer;
- ion_buffer_get(buffer);
- mutex_unlock(&client->lock);
-
- exp_info.ops = &dma_buf_ops;
- exp_info.size = buffer->size;
- exp_info.flags = O_RDWR;
- exp_info.priv = buffer;
-
- dmabuf = dma_buf_export(&exp_info);
- if (IS_ERR(dmabuf)) {
- ion_buffer_put(buffer);
- return dmabuf;
- }
-
- return dmabuf;
-}
-EXPORT_SYMBOL(ion_share_dma_buf);
-
-int ion_share_dma_buf_fd(struct ion_client *client, struct ion_handle *handle)
-{
- struct dma_buf *dmabuf;
- int fd;
-
- dmabuf = ion_share_dma_buf(client, handle);
- if (IS_ERR(dmabuf))
- return PTR_ERR(dmabuf);
-
- fd = dma_buf_fd(dmabuf, O_CLOEXEC);
- if (fd < 0)
- dma_buf_put(dmabuf);
-
- return fd;
-}
-EXPORT_SYMBOL(ion_share_dma_buf_fd);
-
-struct ion_handle *ion_import_dma_buf(struct ion_client *client, int fd)
-{
- struct dma_buf *dmabuf;
- struct ion_buffer *buffer;
- struct ion_handle *handle;
- int ret;
-
- dmabuf = dma_buf_get(fd);
- if (IS_ERR(dmabuf))
- return ERR_CAST(dmabuf);
- /* if this memory came from ion */
-
- if (dmabuf->ops != &dma_buf_ops) {
- pr_err("%s: can not import dmabuf from another exporter\n",
- __func__);
- dma_buf_put(dmabuf);
- return ERR_PTR(-EINVAL);
- }
- buffer = dmabuf->priv;
-
- mutex_lock(&client->lock);
- /* if a handle exists for this buffer just take a reference to it */
- handle = ion_handle_lookup(client, buffer);
- if (!IS_ERR(handle)) {
- ion_handle_get(handle);
- mutex_unlock(&client->lock);
- goto end;
- }
-
- handle = ion_handle_create(client, buffer);
- if (IS_ERR(handle)) {
- mutex_unlock(&client->lock);
- goto end;
- }
-
- ret = ion_handle_add(client, handle);
- mutex_unlock(&client->lock);
- if (ret) {
- ion_handle_put(handle);
- handle = ERR_PTR(ret);
- }
-
-end:
- dma_buf_put(dmabuf);
- return handle;
-}
-EXPORT_SYMBOL(ion_import_dma_buf);
-
-static int ion_sync_for_device(struct ion_client *client, int fd)
-{
- struct dma_buf *dmabuf;
- struct ion_buffer *buffer;
-
- dmabuf = dma_buf_get(fd);
- if (IS_ERR(dmabuf))
- return PTR_ERR(dmabuf);
-
- /* if this memory came from ion */
- if (dmabuf->ops != &dma_buf_ops) {
- pr_err("%s: can not sync dmabuf from another exporter\n",
- __func__);
- dma_buf_put(dmabuf);
- return -EINVAL;
- }
- buffer = dmabuf->priv;
-
- dma_sync_sg_for_device(NULL, buffer->sg_table->sgl,
- buffer->sg_table->nents, DMA_BIDIRECTIONAL);
- dma_buf_put(dmabuf);
- return 0;
-}
-
-/* fix up the cases where the ioctl direction bits are incorrect */
-static unsigned int ion_ioctl_dir(unsigned int cmd)
-{
- switch (cmd) {
- case ION_IOC_SYNC:
- case ION_IOC_FREE:
- case ION_IOC_CUSTOM:
- return _IOC_WRITE;
- default:
- return _IOC_DIR(cmd);
- }
-}
-
-static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
-{
- struct ion_client *client = filp->private_data;
- struct ion_device *dev = client->dev;
- struct ion_handle *cleanup_handle = NULL;
- int ret = 0;
- unsigned int dir;
-
- union {
- struct ion_fd_data fd;
- struct ion_allocation_data allocation;
- struct ion_handle_data handle;
- struct ion_custom_data custom;
- } data;
-
- dir = ion_ioctl_dir(cmd);
-
- if (_IOC_SIZE(cmd) > sizeof(data))
- return -EINVAL;
-
- if (dir & _IOC_WRITE)
- if (copy_from_user(&data, (void __user *)arg, _IOC_SIZE(cmd)))
- return -EFAULT;
-
- switch (cmd) {
- case ION_IOC_ALLOC:
- {
- struct ion_handle *handle;
-
- handle = ion_alloc(client, data.allocation.len,
- data.allocation.align,
- data.allocation.heap_id_mask,
- data.allocation.flags);
- if (IS_ERR(handle))
- return PTR_ERR(handle);
-
- data.allocation.handle = handle->id;
-
- cleanup_handle = handle;
- break;
- }
- case ION_IOC_FREE:
- {
- struct ion_handle *handle;
-
- handle = ion_handle_get_by_id(client, data.handle.handle);
- if (IS_ERR(handle))
- return PTR_ERR(handle);
- ion_free(client, handle);
- ion_handle_put(handle);
- break;
- }
- case ION_IOC_SHARE:
- case ION_IOC_MAP:
- {
- struct ion_handle *handle;
-
- handle = ion_handle_get_by_id(client, data.handle.handle);
- if (IS_ERR(handle))
- return PTR_ERR(handle);
- data.fd.fd = ion_share_dma_buf_fd(client, handle);
- ion_handle_put(handle);
- if (data.fd.fd < 0)
- ret = data.fd.fd;
- break;
- }
- case ION_IOC_IMPORT:
- {
- struct ion_handle *handle;
-
- handle = ion_import_dma_buf(client, data.fd.fd);
- if (IS_ERR(handle))
- ret = PTR_ERR(handle);
- else
- data.handle.handle = handle->id;
- break;
- }
- case ION_IOC_SYNC:
- {
- ret = ion_sync_for_device(client, data.fd.fd);
- break;
- }
- case ION_IOC_CUSTOM:
- {
- if (!dev->custom_ioctl)
- return -ENOTTY;
- ret = dev->custom_ioctl(client, data.custom.cmd,
- data.custom.arg);
- break;
- }
- default:
- return -ENOTTY;
- }
-
- if (dir & _IOC_READ) {
- if (copy_to_user((void __user *)arg, &data, _IOC_SIZE(cmd))) {
- if (cleanup_handle)
- ion_free(client, cleanup_handle);
- return -EFAULT;
- }
- }
- return ret;
-}
-
-static int ion_release(struct inode *inode, struct file *file)
-{
- struct ion_client *client = file->private_data;
-
- pr_debug("%s: %d\n", __func__, __LINE__);
- ion_client_destroy(client);
- return 0;
-}
-
-static int ion_open(struct inode *inode, struct file *file)
-{
- struct miscdevice *miscdev = file->private_data;
- struct ion_device *dev = container_of(miscdev, struct ion_device, dev);
- struct ion_client *client;
- char debug_name[64];
-
- pr_debug("%s: %d\n", __func__, __LINE__);
- snprintf(debug_name, 64, "%u", task_pid_nr(current->group_leader));
- client = ion_client_create(dev, debug_name);
- if (IS_ERR(client))
- return PTR_ERR(client);
- file->private_data = client;
-
- return 0;
-}
-
-static const struct file_operations ion_fops = {
- .owner = THIS_MODULE,
- .open = ion_open,
- .release = ion_release,
- .unlocked_ioctl = ion_ioctl,
- .compat_ioctl = compat_ion_ioctl,
-};
-
-static size_t ion_debug_heap_total(struct ion_client *client,
- unsigned int id)
-{
- size_t size = 0;
- struct rb_node *n;
-
- mutex_lock(&client->lock);
- for (n = rb_first(&client->handles); n; n = rb_next(n)) {
- struct ion_handle *handle = rb_entry(n,
- struct ion_handle,
- node);
- if (handle->buffer->heap->id == id)
- size += handle->buffer->size;
- }
- mutex_unlock(&client->lock);
- return size;
-}
-
-static int ion_debug_heap_show(struct seq_file *s, void *unused)
-{
- struct ion_heap *heap = s->private;
- struct ion_device *dev = heap->dev;
- struct rb_node *n;
- size_t total_size = 0;
- size_t total_orphaned_size = 0;
-
- seq_printf(s, "%16s %16s %16s\n", "client", "pid", "size");
- seq_puts(s, "----------------------------------------------------\n");
-
- for (n = rb_first(&dev->clients); n; n = rb_next(n)) {
- struct ion_client *client = rb_entry(n, struct ion_client,
- node);
- size_t size = ion_debug_heap_total(client, heap->id);
-
- if (!size)
- continue;
- if (client->task) {
- char task_comm[TASK_COMM_LEN];
-
- get_task_comm(task_comm, client->task);
- seq_printf(s, "%16s %16u %16zu\n", task_comm,
- client->pid, size);
- } else {
- seq_printf(s, "%16s %16u %16zu\n", client->name,
- client->pid, size);
- }
- }
- seq_puts(s, "----------------------------------------------------\n");
- seq_puts(s, "orphaned allocations (info is from last known client):\n");
- mutex_lock(&dev->buffer_lock);
- for (n = rb_first(&dev->buffers); n; n = rb_next(n)) {
- struct ion_buffer *buffer = rb_entry(n, struct ion_buffer,
- node);
- if (buffer->heap->id != heap->id)
- continue;
- total_size += buffer->size;
- if (!buffer->handle_count) {
- seq_printf(s, "%16s %16u %16zu %d %d\n",
- buffer->task_comm, buffer->pid,
- buffer->size, buffer->kmap_cnt,
- atomic_read(&buffer->ref.refcount));
- total_orphaned_size += buffer->size;
- }
- }
- mutex_unlock(&dev->buffer_lock);
- seq_puts(s, "----------------------------------------------------\n");
- seq_printf(s, "%16s %16zu\n", "total orphaned",
- total_orphaned_size);
- seq_printf(s, "%16s %16zu\n", "total ", total_size);
- if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
- seq_printf(s, "%16s %16zu\n", "deferred free",
- heap->free_list_size);
- seq_puts(s, "----------------------------------------------------\n");
-
- if (heap->debug_show)
- heap->debug_show(heap, s, unused);
-
- return 0;
-}
-
-static int ion_debug_heap_open(struct inode *inode, struct file *file)
-{
- return single_open(file, ion_debug_heap_show, inode->i_private);
-}
-
-static const struct file_operations debug_heap_fops = {
- .open = ion_debug_heap_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
-static int debug_shrink_set(void *data, u64 val)
-{
- struct ion_heap *heap = data;
- struct shrink_control sc;
- int objs;
-
- sc.gfp_mask = -1;
- sc.nr_to_scan = val;
-
- if (!val) {
- objs = heap->shrinker.count_objects(&heap->shrinker, &sc);
- sc.nr_to_scan = objs;
- }
-
- heap->shrinker.scan_objects(&heap->shrinker, &sc);
- return 0;
-}
-
-static int debug_shrink_get(void *data, u64 *val)
-{
- struct ion_heap *heap = data;
- struct shrink_control sc;
- int objs;
-
- sc.gfp_mask = -1;
- sc.nr_to_scan = 0;
-
- objs = heap->shrinker.count_objects(&heap->shrinker, &sc);
- *val = objs;
- return 0;
-}
-
-DEFINE_SIMPLE_ATTRIBUTE(debug_shrink_fops, debug_shrink_get,
- debug_shrink_set, "%llu\n");
-
-void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap)
-{
- struct dentry *debug_file;
-
- if (!heap->ops->allocate || !heap->ops->free || !heap->ops->map_dma ||
- !heap->ops->unmap_dma)
- pr_err("%s: can not add heap with invalid ops struct.\n",
- __func__);
-
- spin_lock_init(&heap->free_lock);
- heap->free_list_size = 0;
-
- if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
- ion_heap_init_deferred_free(heap);
-
- if ((heap->flags & ION_HEAP_FLAG_DEFER_FREE) || heap->ops->shrink)
- ion_heap_init_shrinker(heap);
-
- heap->dev = dev;
- down_write(&dev->lock);
- /*
- * use negative heap->id to reverse the priority -- when traversing
- * the list later attempt higher id numbers first
- */
- plist_node_init(&heap->node, -heap->id);
- plist_add(&heap->node, &dev->heaps);
- debug_file = debugfs_create_file(heap->name, 0664,
- dev->heaps_debug_root, heap,
- &debug_heap_fops);
-
- if (!debug_file) {
- char buf[256], *path;
-
- path = dentry_path(dev->heaps_debug_root, buf, 256);
- pr_err("Failed to create heap debugfs at %s/%s\n",
- path, heap->name);
- }
-
- if (heap->shrinker.count_objects && heap->shrinker.scan_objects) {
- char debug_name[64];
-
- snprintf(debug_name, 64, "%s_shrink", heap->name);
- debug_file = debugfs_create_file(
- debug_name, 0644, dev->heaps_debug_root, heap,
- &debug_shrink_fops);
- if (!debug_file) {
- char buf[256], *path;
-
- path = dentry_path(dev->heaps_debug_root, buf, 256);
- pr_err("Failed to create heap shrinker debugfs at %s/%s\n",
- path, debug_name);
- }
- }
-
- up_write(&dev->lock);
-}
-
-struct ion_device *ion_device_create(long (*custom_ioctl)
- (struct ion_client *client,
- unsigned int cmd,
- unsigned long arg))
-{
- struct ion_device *idev;
- int ret;
-
- idev = kzalloc(sizeof(struct ion_device), GFP_KERNEL);
- if (!idev)
- return ERR_PTR(-ENOMEM);
-
- idev->dev.minor = MISC_DYNAMIC_MINOR;
- idev->dev.name = "ion";
- idev->dev.fops = &ion_fops;
- idev->dev.parent = NULL;
- ret = misc_register(&idev->dev);
- if (ret) {
- pr_err("ion: failed to register misc device.\n");
- kfree(idev);
- return ERR_PTR(ret);
- }
-
- idev->debug_root = debugfs_create_dir("ion", NULL);
- if (!idev->debug_root) {
- pr_err("ion: failed to create debugfs root directory.\n");
- goto debugfs_done;
- }
- idev->heaps_debug_root = debugfs_create_dir("heaps", idev->debug_root);
- if (!idev->heaps_debug_root) {
- pr_err("ion: failed to create debugfs heaps directory.\n");
- goto debugfs_done;
- }
- idev->clients_debug_root = debugfs_create_dir("clients",
- idev->debug_root);
- if (!idev->clients_debug_root)
- pr_err("ion: failed to create debugfs clients directory.\n");
-
-debugfs_done:
-
- idev->custom_ioctl = custom_ioctl;
- idev->buffers = RB_ROOT;
- mutex_init(&idev->buffer_lock);
- init_rwsem(&idev->lock);
- plist_head_init(&idev->heaps);
- idev->clients = RB_ROOT;
- return idev;
-}
-
-void ion_device_destroy(struct ion_device *dev)
-{
- misc_deregister(&dev->dev);
- debugfs_remove_recursive(dev->debug_root);
- /* XXX need to free the heaps and clients ? */
- kfree(dev);
-}
-
-void __init ion_reserve(struct ion_platform_data *data)
-{
- int i;
-
- for (i = 0; i < data->nr; i++) {
- if (data->heaps[i].size == 0)
- continue;
-
- if (data->heaps[i].base == 0) {
- phys_addr_t paddr;
-
- paddr = memblock_alloc_base(data->heaps[i].size,
- data->heaps[i].align,
- MEMBLOCK_ALLOC_ANYWHERE);
- if (!paddr) {
- pr_err("%s: error allocating memblock for heap %d\n",
- __func__, i);
- continue;
- }
- data->heaps[i].base = paddr;
- } else {
- int ret = memblock_reserve(data->heaps[i].base,
- data->heaps[i].size);
- if (ret)
- pr_err("memblock reserve of %zx@%lx failed\n",
- data->heaps[i].size,
- data->heaps[i].base);
- }
- pr_info("%s: %s reserved base %lx size %zu\n", __func__,
- data->heaps[i].name,
- data->heaps[i].base,
- data->heaps[i].size);
- }
-}
diff --git a/drivers/staging/android/ion/ion.h b/drivers/staging/android/ion/ion.h
deleted file mode 100644
index b860c5f579f5..000000000000
--- a/drivers/staging/android/ion/ion.h
+++ /dev/null
@@ -1,205 +0,0 @@
-/*
- * drivers/staging/android/ion/ion.h
- *
- * Copyright (C) 2011 Google, Inc.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#ifndef _LINUX_ION_H
-#define _LINUX_ION_H
-
-#include <linux/types.h>
-
-#include "../uapi/ion.h"
-
-struct ion_handle;
-struct ion_device;
-struct ion_heap;
-struct ion_mapper;
-struct ion_client;
-struct ion_buffer;
-
-/*
- * This should be removed some day when phys_addr_t's are fully
- * plumbed in the kernel, and all instances of ion_phys_addr_t should
- * be converted to phys_addr_t. For the time being many kernel interfaces
- * do not accept phys_addr_t's that would have to
- */
-#define ion_phys_addr_t unsigned long
-
-/**
- * struct ion_platform_heap - defines a heap in the given platform
- * @type: type of the heap from ion_heap_type enum
- * @id: unique identifier for heap. When allocating higher numbers
- * will be allocated from first. At allocation these are passed
- * as a bit mask and therefore can not exceed ION_NUM_HEAP_IDS.
- * @name: used for debug purposes
- * @base: base address of heap in physical memory if applicable
- * @size: size of the heap in bytes if applicable
- * @align: required alignment in physical memory if applicable
- * @priv: private info passed from the board file
- *
- * Provided by the board file.
- */
-struct ion_platform_heap {
- enum ion_heap_type type;
- unsigned int id;
- const char *name;
- ion_phys_addr_t base;
- size_t size;
- ion_phys_addr_t align;
- void *priv;
-};
-
-/**
- * struct ion_platform_data - array of platform heaps passed from board file
- * @nr: number of structures in the array
- * @heaps: array of platform_heap structions
- *
- * Provided by the board file in the form of platform data to a platform device.
- */
-struct ion_platform_data {
- int nr;
- struct ion_platform_heap *heaps;
-};
-
-/**
- * ion_reserve() - reserve memory for ion heaps if applicable
- * @data: platform data specifying starting physical address and
- * size
- *
- * Calls memblock reserve to set aside memory for heaps that are
- * located at specific memory addresses or of specific sizes not
- * managed by the kernel
- */
-void ion_reserve(struct ion_platform_data *data);
-
-/**
- * ion_client_create() - allocate a client and returns it
- * @dev: the global ion device
- * @name: used for debugging
- */
-struct ion_client *ion_client_create(struct ion_device *dev,
- const char *name);
-
-/**
- * ion_client_destroy() - free's a client and all it's handles
- * @client: the client
- *
- * Free the provided client and all it's resources including
- * any handles it is holding.
- */
-void ion_client_destroy(struct ion_client *client);
-
-/**
- * ion_alloc - allocate ion memory
- * @client: the client
- * @len: size of the allocation
- * @align: requested allocation alignment, lots of hardware blocks
- * have alignment requirements of some kind
- * @heap_id_mask: mask of heaps to allocate from, if multiple bits are set
- * heaps will be tried in order from highest to lowest
- * id
- * @flags: heap flags, the low 16 bits are consumed by ion, the
- * high 16 bits are passed on to the respective heap and
- * can be heap custom
- *
- * Allocate memory in one of the heaps provided in heap mask and return
- * an opaque handle to it.
- */
-struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
- size_t align, unsigned int heap_id_mask,
- unsigned int flags);
-
-/**
- * ion_free - free a handle
- * @client: the client
- * @handle: the handle to free
- *
- * Free the provided handle.
- */
-void ion_free(struct ion_client *client, struct ion_handle *handle);
-
-/**
- * ion_phys - returns the physical address and len of a handle
- * @client: the client
- * @handle: the handle
- * @addr: a pointer to put the address in
- * @len: a pointer to put the length in
- *
- * This function queries the heap for a particular handle to get the
- * handle's physical address. It't output is only correct if
- * a heap returns physically contiguous memory -- in other cases
- * this api should not be implemented -- ion_sg_table should be used
- * instead. Returns -EINVAL if the handle is invalid. This has
- * no implications on the reference counting of the handle --
- * the returned value may not be valid if the caller is not
- * holding a reference.
- */
-int ion_phys(struct ion_client *client, struct ion_handle *handle,
- ion_phys_addr_t *addr, size_t *len);
-
-/**
- * ion_map_dma - return an sg_table describing a handle
- * @client: the client
- * @handle: the handle
- *
- * This function returns the sg_table describing
- * a particular ion handle.
- */
-struct sg_table *ion_sg_table(struct ion_client *client,
- struct ion_handle *handle);
-
-/**
- * ion_map_kernel - create mapping for the given handle
- * @client: the client
- * @handle: handle to map
- *
- * Map the given handle into the kernel and return a kernel address that
- * can be used to access this address.
- */
-void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle);
-
-/**
- * ion_unmap_kernel() - destroy a kernel mapping for a handle
- * @client: the client
- * @handle: handle to unmap
- */
-void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle);
-
-/**
- * ion_share_dma_buf() - share buffer as dma-buf
- * @client: the client
- * @handle: the handle
- */
-struct dma_buf *ion_share_dma_buf(struct ion_client *client,
- struct ion_handle *handle);
-
-/**
- * ion_share_dma_buf_fd() - given an ion client, create a dma-buf fd
- * @client: the client
- * @handle: the handle
- */
-int ion_share_dma_buf_fd(struct ion_client *client, struct ion_handle *handle);
-
-/**
- * ion_import_dma_buf() - given an dma-buf fd from the ion exporter get handle
- * @client: the client
- * @fd: the dma-buf fd
- *
- * Given an dma-buf fd that was allocated through ion via ion_share_dma_buf,
- * import that fd and return a handle representing it. If a dma-buf from
- * another exporter is passed in this function will return ERR_PTR(-EINVAL)
- */
-struct ion_handle *ion_import_dma_buf(struct ion_client *client, int fd);
-
-#endif /* _LINUX_ION_H */
diff --git a/drivers/staging/android/ion/ion_carveout_heap.c b/drivers/staging/android/ion/ion_carveout_heap.c
deleted file mode 100644
index 9156d8238c97..000000000000
--- a/drivers/staging/android/ion/ion_carveout_heap.c
+++ /dev/null
@@ -1,193 +0,0 @@
-/*
- * drivers/staging/android/ion/ion_carveout_heap.c
- *
- * Copyright (C) 2011 Google, Inc.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-#include <linux/spinlock.h>
-#include <linux/dma-mapping.h>
-#include <linux/err.h>
-#include <linux/genalloc.h>
-#include <linux/io.h>
-#include <linux/mm.h>
-#include <linux/scatterlist.h>
-#include <linux/slab.h>
-#include <linux/vmalloc.h>
-#include "ion.h"
-#include "ion_priv.h"
-
-struct ion_carveout_heap {
- struct ion_heap heap;
- struct gen_pool *pool;
- ion_phys_addr_t base;
-};
-
-ion_phys_addr_t ion_carveout_allocate(struct ion_heap *heap,
- unsigned long size,
- unsigned long align)
-{
- struct ion_carveout_heap *carveout_heap =
- container_of(heap, struct ion_carveout_heap, heap);
- unsigned long offset = gen_pool_alloc(carveout_heap->pool, size);
-
- if (!offset)
- return ION_CARVEOUT_ALLOCATE_FAIL;
-
- return offset;
-}
-
-void ion_carveout_free(struct ion_heap *heap, ion_phys_addr_t addr,
- unsigned long size)
-{
- struct ion_carveout_heap *carveout_heap =
- container_of(heap, struct ion_carveout_heap, heap);
-
- if (addr == ION_CARVEOUT_ALLOCATE_FAIL)
- return;
- gen_pool_free(carveout_heap->pool, addr, size);
-}
-
-static int ion_carveout_heap_phys(struct ion_heap *heap,
- struct ion_buffer *buffer,
- ion_phys_addr_t *addr, size_t *len)
-{
- struct sg_table *table = buffer->priv_virt;
- struct page *page = sg_page(table->sgl);
- ion_phys_addr_t paddr = PFN_PHYS(page_to_pfn(page));
-
- *addr = paddr;
- *len = buffer->size;
- return 0;
-}
-
-static int ion_carveout_heap_allocate(struct ion_heap *heap,
- struct ion_buffer *buffer,
- unsigned long size, unsigned long align,
- unsigned long flags)
-{
- struct sg_table *table;
- ion_phys_addr_t paddr;
- int ret;
-
- if (align > PAGE_SIZE)
- return -EINVAL;
-
- table = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
- if (!table)
- return -ENOMEM;
- ret = sg_alloc_table(table, 1, GFP_KERNEL);
- if (ret)
- goto err_free;
-
- paddr = ion_carveout_allocate(heap, size, align);
- if (paddr == ION_CARVEOUT_ALLOCATE_FAIL) {
- ret = -ENOMEM;
- goto err_free_table;
- }
-
- sg_set_page(table->sgl, pfn_to_page(PFN_DOWN(paddr)), size, 0);
- buffer->priv_virt = table;
-
- return 0;
-
-err_free_table:
- sg_free_table(table);
-err_free:
- kfree(table);
- return ret;
-}
-
-static void ion_carveout_heap_free(struct ion_buffer *buffer)
-{
- struct ion_heap *heap = buffer->heap;
- struct sg_table *table = buffer->priv_virt;
- struct page *page = sg_page(table->sgl);
- ion_phys_addr_t paddr = PFN_PHYS(page_to_pfn(page));
-
- ion_heap_buffer_zero(buffer);
-
- if (ion_buffer_cached(buffer))
- dma_sync_sg_for_device(NULL, table->sgl, table->nents,
- DMA_BIDIRECTIONAL);
-
- ion_carveout_free(heap, paddr, buffer->size);
- sg_free_table(table);
- kfree(table);
-}
-
-static struct sg_table *ion_carveout_heap_map_dma(struct ion_heap *heap,
- struct ion_buffer *buffer)
-{
- return buffer->priv_virt;
-}
-
-static void ion_carveout_heap_unmap_dma(struct ion_heap *heap,
- struct ion_buffer *buffer)
-{
-}
-
-static struct ion_heap_ops carveout_heap_ops = {
- .allocate = ion_carveout_heap_allocate,
- .free = ion_carveout_heap_free,
- .phys = ion_carveout_heap_phys,
- .map_dma = ion_carveout_heap_map_dma,
- .unmap_dma = ion_carveout_heap_unmap_dma,
- .map_user = ion_heap_map_user,
- .map_kernel = ion_heap_map_kernel,
- .unmap_kernel = ion_heap_unmap_kernel,
-};
-
-struct ion_heap *ion_carveout_heap_create(struct ion_platform_heap *heap_data)
-{
- struct ion_carveout_heap *carveout_heap;
- int ret;
-
- struct page *page;
- size_t size;
-
- page = pfn_to_page(PFN_DOWN(heap_data->base));
- size = heap_data->size;
-
- ion_pages_sync_for_device(NULL, page, size, DMA_BIDIRECTIONAL);
-
- ret = ion_heap_pages_zero(page, size, pgprot_writecombine(PAGE_KERNEL));
- if (ret)
- return ERR_PTR(ret);
-
- carveout_heap = kzalloc(sizeof(struct ion_carveout_heap), GFP_KERNEL);
- if (!carveout_heap)
- return ERR_PTR(-ENOMEM);
-
- carveout_heap->pool = gen_pool_create(12, -1);
- if (!carveout_heap->pool) {
- kfree(carveout_heap);
- return ERR_PTR(-ENOMEM);
- }
- carveout_heap->base = heap_data->base;
- gen_pool_add(carveout_heap->pool, carveout_heap->base, heap_data->size,
- -1);
- carveout_heap->heap.ops = &carveout_heap_ops;
- carveout_heap->heap.type = ION_HEAP_TYPE_CARVEOUT;
- carveout_heap->heap.flags = ION_HEAP_FLAG_DEFER_FREE;
-
- return &carveout_heap->heap;
-}
-
-void ion_carveout_heap_destroy(struct ion_heap *heap)
-{
- struct ion_carveout_heap *carveout_heap =
- container_of(heap, struct ion_carveout_heap, heap);
-
- gen_pool_destroy(carveout_heap->pool);
- kfree(carveout_heap);
- carveout_heap = NULL;
-}
diff --git a/drivers/staging/android/ion/ion_chunk_heap.c b/drivers/staging/android/ion/ion_chunk_heap.c
deleted file mode 100644
index 195c41d7bd53..000000000000
--- a/drivers/staging/android/ion/ion_chunk_heap.c
+++ /dev/null
@@ -1,194 +0,0 @@
-/*
- * drivers/staging/android/ion/ion_chunk_heap.c
- *
- * Copyright (C) 2012 Google, Inc.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-#include <linux/dma-mapping.h>
-#include <linux/err.h>
-#include <linux/genalloc.h>
-#include <linux/io.h>
-#include <linux/mm.h>
-#include <linux/scatterlist.h>
-#include <linux/slab.h>
-#include <linux/vmalloc.h>
-#include "ion.h"
-#include "ion_priv.h"
-
-struct ion_chunk_heap {
- struct ion_heap heap;
- struct gen_pool *pool;
- ion_phys_addr_t base;
- unsigned long chunk_size;
- unsigned long size;
- unsigned long allocated;
-};
-
-static int ion_chunk_heap_allocate(struct ion_heap *heap,
- struct ion_buffer *buffer,
- unsigned long size, unsigned long align,
- unsigned long flags)
-{
- struct ion_chunk_heap *chunk_heap =
- container_of(heap, struct ion_chunk_heap, heap);
- struct sg_table *table;
- struct scatterlist *sg;
- int ret, i;
- unsigned long num_chunks;
- unsigned long allocated_size;
-
- if (align > chunk_heap->chunk_size)
- return -EINVAL;
-
- allocated_size = ALIGN(size, chunk_heap->chunk_size);
- num_chunks = allocated_size / chunk_heap->chunk_size;
-
- if (allocated_size > chunk_heap->size - chunk_heap->allocated)
- return -ENOMEM;
-
- table = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
- if (!table)
- return -ENOMEM;
- ret = sg_alloc_table(table, num_chunks, GFP_KERNEL);
- if (ret) {
- kfree(table);
- return ret;
- }
-
- sg = table->sgl;
- for (i = 0; i < num_chunks; i++) {
- unsigned long paddr = gen_pool_alloc(chunk_heap->pool,
- chunk_heap->chunk_size);
- if (!paddr)
- goto err;
- sg_set_page(sg, pfn_to_page(PFN_DOWN(paddr)),
- chunk_heap->chunk_size, 0);
- sg = sg_next(sg);
- }
-
- buffer->priv_virt = table;
- chunk_heap->allocated += allocated_size;
- return 0;
-err:
- sg = table->sgl;
- for (i -= 1; i >= 0; i--) {
- gen_pool_free(chunk_heap->pool, sg_phys(sg) & PAGE_MASK,
- sg->length);
- sg = sg_next(sg);
- }
- sg_free_table(table);
- kfree(table);
- return -ENOMEM;
-}
-
-static void ion_chunk_heap_free(struct ion_buffer *buffer)
-{
- struct ion_heap *heap = buffer->heap;
- struct ion_chunk_heap *chunk_heap =
- container_of(heap, struct ion_chunk_heap, heap);
- struct sg_table *table = buffer->priv_virt;
- struct scatterlist *sg;
- int i;
- unsigned long allocated_size;
-
- allocated_size = ALIGN(buffer->size, chunk_heap->chunk_size);
-
- ion_heap_buffer_zero(buffer);
-
- if (ion_buffer_cached(buffer))
- dma_sync_sg_for_device(NULL, table->sgl, table->nents,
- DMA_BIDIRECTIONAL);
-
- for_each_sg(table->sgl, sg, table->nents, i) {
- gen_pool_free(chunk_heap->pool, sg_phys(sg) & PAGE_MASK,
- sg->length);
- }
- chunk_heap->allocated -= allocated_size;
- sg_free_table(table);
- kfree(table);
-}
-
-static struct sg_table *ion_chunk_heap_map_dma(struct ion_heap *heap,
- struct ion_buffer *buffer)
-{
- return buffer->priv_virt;
-}
-
-static void ion_chunk_heap_unmap_dma(struct ion_heap *heap,
- struct ion_buffer *buffer)
-{
-}
-
-static struct ion_heap_ops chunk_heap_ops = {
- .allocate = ion_chunk_heap_allocate,
- .free = ion_chunk_heap_free,
- .map_dma = ion_chunk_heap_map_dma,
- .unmap_dma = ion_chunk_heap_unmap_dma,
- .map_user = ion_heap_map_user,
- .map_kernel = ion_heap_map_kernel,
- .unmap_kernel = ion_heap_unmap_kernel,
-};
-
-struct ion_heap *ion_chunk_heap_create(struct ion_platform_heap *heap_data)
-{
- struct ion_chunk_heap *chunk_heap;
- int ret;
- struct page *page;
- size_t size;
-
- page = pfn_to_page(PFN_DOWN(heap_data->base));
- size = heap_data->size;
-
- ion_pages_sync_for_device(NULL, page, size, DMA_BIDIRECTIONAL);
-
- ret = ion_heap_pages_zero(page, size, pgprot_writecombine(PAGE_KERNEL));
- if (ret)
- return ERR_PTR(ret);
-
- chunk_heap = kzalloc(sizeof(struct ion_chunk_heap), GFP_KERNEL);
- if (!chunk_heap)
- return ERR_PTR(-ENOMEM);
-
- chunk_heap->chunk_size = (unsigned long)heap_data->priv;
- chunk_heap->pool = gen_pool_create(get_order(chunk_heap->chunk_size) +
- PAGE_SHIFT, -1);
- if (!chunk_heap->pool) {
- ret = -ENOMEM;
- goto error_gen_pool_create;
- }
- chunk_heap->base = heap_data->base;
- chunk_heap->size = heap_data->size;
- chunk_heap->allocated = 0;
-
- gen_pool_add(chunk_heap->pool, chunk_heap->base, heap_data->size, -1);
- chunk_heap->heap.ops = &chunk_heap_ops;
- chunk_heap->heap.type = ION_HEAP_TYPE_CHUNK;
- chunk_heap->heap.flags = ION_HEAP_FLAG_DEFER_FREE;
- pr_debug("%s: base %lu size %zu align %ld\n", __func__,
- chunk_heap->base, heap_data->size, heap_data->align);
-
- return &chunk_heap->heap;
-
-error_gen_pool_create:
- kfree(chunk_heap);
- return ERR_PTR(ret);
-}
-
-void ion_chunk_heap_destroy(struct ion_heap *heap)
-{
- struct ion_chunk_heap *chunk_heap =
- container_of(heap, struct ion_chunk_heap, heap);
-
- gen_pool_destroy(chunk_heap->pool);
- kfree(chunk_heap);
- chunk_heap = NULL;
-}
diff --git a/drivers/staging/android/ion/ion_cma_heap.c b/drivers/staging/android/ion/ion_cma_heap.c
deleted file mode 100644
index a3446da4fdc2..000000000000
--- a/drivers/staging/android/ion/ion_cma_heap.c
+++ /dev/null
@@ -1,197 +0,0 @@
-/*
- * drivers/staging/android/ion/ion_cma_heap.c
- *
- * Copyright (C) Linaro 2012
- * Author: <benjamin.gaignard@linaro.org> for ST-Ericsson.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#include <linux/device.h>
-#include <linux/slab.h>
-#include <linux/errno.h>
-#include <linux/err.h>
-#include <linux/dma-mapping.h>
-
-#include "ion.h"
-#include "ion_priv.h"
-
-#define ION_CMA_ALLOCATE_FAILED -1
-
-struct ion_cma_heap {
- struct ion_heap heap;
- struct device *dev;
-};
-
-#define to_cma_heap(x) container_of(x, struct ion_cma_heap, heap)
-
-struct ion_cma_buffer_info {
- void *cpu_addr;
- dma_addr_t handle;
- struct sg_table *table;
-};
-
-
-/* ION CMA heap operations functions */
-static int ion_cma_allocate(struct ion_heap *heap, struct ion_buffer *buffer,
- unsigned long len, unsigned long align,
- unsigned long flags)
-{
- struct ion_cma_heap *cma_heap = to_cma_heap(heap);
- struct device *dev = cma_heap->dev;
- struct ion_cma_buffer_info *info;
-
- dev_dbg(dev, "Request buffer allocation len %ld\n", len);
-
- if (buffer->flags & ION_FLAG_CACHED)
- return -EINVAL;
-
- if (align > PAGE_SIZE)
- return -EINVAL;
-
- info = kzalloc(sizeof(struct ion_cma_buffer_info), GFP_KERNEL);
- if (!info)
- return ION_CMA_ALLOCATE_FAILED;
-
- info->cpu_addr = dma_alloc_coherent(dev, len, &(info->handle),
- GFP_HIGHUSER | __GFP_ZERO);
-
- if (!info->cpu_addr) {
- dev_err(dev, "Fail to allocate buffer\n");
- goto err;
- }
-
- info->table = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
- if (!info->table)
- goto free_mem;
-
- if (dma_get_sgtable(dev, info->table, info->cpu_addr, info->handle,
- len))
- goto free_table;
- /* keep this for memory release */
- buffer->priv_virt = info;
- dev_dbg(dev, "Allocate buffer %p\n", buffer);
- return 0;
-
-free_table:
- kfree(info->table);
-free_mem:
- dma_free_coherent(dev, len, info->cpu_addr, info->handle);
-err:
- kfree(info);
- return ION_CMA_ALLOCATE_FAILED;
-}
-
-static void ion_cma_free(struct ion_buffer *buffer)
-{
- struct ion_cma_heap *cma_heap = to_cma_heap(buffer->heap);
- struct device *dev = cma_heap->dev;
- struct ion_cma_buffer_info *info = buffer->priv_virt;
-
- dev_dbg(dev, "Release buffer %p\n", buffer);
- /* release memory */
- dma_free_coherent(dev, buffer->size, info->cpu_addr, info->handle);
- /* release sg table */
- sg_free_table(info->table);
- kfree(info->table);
- kfree(info);
-}
-
-/* return physical address in addr */
-static int ion_cma_phys(struct ion_heap *heap, struct ion_buffer *buffer,
- ion_phys_addr_t *addr, size_t *len)
-{
- struct ion_cma_heap *cma_heap = to_cma_heap(buffer->heap);
- struct device *dev = cma_heap->dev;
- struct ion_cma_buffer_info *info = buffer->priv_virt;
-
- dev_dbg(dev, "Return buffer %p physical address %pa\n", buffer,
- &info->handle);
-
- *addr = info->handle;
- *len = buffer->size;
-
- return 0;
-}
-
-static struct sg_table *ion_cma_heap_map_dma(struct ion_heap *heap,
- struct ion_buffer *buffer)
-{
- struct ion_cma_buffer_info *info = buffer->priv_virt;
-
- return info->table;
-}
-
-static void ion_cma_heap_unmap_dma(struct ion_heap *heap,
- struct ion_buffer *buffer)
-{
-}
-
-static int ion_cma_mmap(struct ion_heap *mapper, struct ion_buffer *buffer,
- struct vm_area_struct *vma)
-{
- struct ion_cma_heap *cma_heap = to_cma_heap(buffer->heap);
- struct device *dev = cma_heap->dev;
- struct ion_cma_buffer_info *info = buffer->priv_virt;
-
- return dma_mmap_coherent(dev, vma, info->cpu_addr, info->handle,
- buffer->size);
-}
-
-static void *ion_cma_map_kernel(struct ion_heap *heap,
- struct ion_buffer *buffer)
-{
- struct ion_cma_buffer_info *info = buffer->priv_virt;
- /* kernel memory mapping has been done at allocation time */
- return info->cpu_addr;
-}
-
-static void ion_cma_unmap_kernel(struct ion_heap *heap,
- struct ion_buffer *buffer)
-{
-}
-
-static struct ion_heap_ops ion_cma_ops = {
- .allocate = ion_cma_allocate,
- .free = ion_cma_free,
- .map_dma = ion_cma_heap_map_dma,
- .unmap_dma = ion_cma_heap_unmap_dma,
- .phys = ion_cma_phys,
- .map_user = ion_cma_mmap,
- .map_kernel = ion_cma_map_kernel,
- .unmap_kernel = ion_cma_unmap_kernel,
-};
-
-struct ion_heap *ion_cma_heap_create(struct ion_platform_heap *data)
-{
- struct ion_cma_heap *cma_heap;
-
- cma_heap = kzalloc(sizeof(struct ion_cma_heap), GFP_KERNEL);
-
- if (!cma_heap)
- return ERR_PTR(-ENOMEM);
-
- cma_heap->heap.ops = &ion_cma_ops;
- /*
- * get device from private heaps data, later it will be
- * used to make the link with reserved CMA memory
- */
- cma_heap->dev = data->priv;
- cma_heap->heap.type = ION_HEAP_TYPE_DMA;
- return &cma_heap->heap;
-}
-
-void ion_cma_heap_destroy(struct ion_heap *heap)
-{
- struct ion_cma_heap *cma_heap = to_cma_heap(heap);
-
- kfree(cma_heap);
-}
diff --git a/drivers/staging/android/ion/ion_dummy_driver.c b/drivers/staging/android/ion/ion_dummy_driver.c
deleted file mode 100644
index 5678870bff48..000000000000
--- a/drivers/staging/android/ion/ion_dummy_driver.c
+++ /dev/null
@@ -1,154 +0,0 @@
-/*
- * drivers/gpu/ion/ion_dummy_driver.c
- *
- * Copyright (C) 2013 Linaro, Inc
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#include <linux/err.h>
-#include <linux/platform_device.h>
-#include <linux/slab.h>
-#include <linux/init.h>
-#include <linux/bootmem.h>
-#include <linux/memblock.h>
-#include <linux/sizes.h>
-#include <linux/io.h>
-#include "ion.h"
-#include "ion_priv.h"
-
-static struct ion_device *idev;
-static struct ion_heap **heaps;
-
-static void *carveout_ptr;
-static void *chunk_ptr;
-
-static struct ion_platform_heap dummy_heaps[] = {
- {
- .id = ION_HEAP_TYPE_SYSTEM,
- .type = ION_HEAP_TYPE_SYSTEM,
- .name = "system",
- },
- {
- .id = ION_HEAP_TYPE_SYSTEM_CONTIG,
- .type = ION_HEAP_TYPE_SYSTEM_CONTIG,
- .name = "system contig",
- },
- {
- .id = ION_HEAP_TYPE_CARVEOUT,
- .type = ION_HEAP_TYPE_CARVEOUT,
- .name = "carveout",
- .size = SZ_4M,
- },
- {
- .id = ION_HEAP_TYPE_CHUNK,
- .type = ION_HEAP_TYPE_CHUNK,
- .name = "chunk",
- .size = SZ_4M,
- .align = SZ_16K,
- .priv = (void *)(SZ_16K),
- },
-};
-
-static struct ion_platform_data dummy_ion_pdata = {
- .nr = ARRAY_SIZE(dummy_heaps),
- .heaps = dummy_heaps,
-};
-
-static int __init ion_dummy_init(void)
-{
- int i, err;
-
- idev = ion_device_create(NULL);
- heaps = kcalloc(dummy_ion_pdata.nr, sizeof(struct ion_heap *),
- GFP_KERNEL);
- if (!heaps)
- return -ENOMEM;
-
-
- /* Allocate a dummy carveout heap */
- carveout_ptr = alloc_pages_exact(
- dummy_heaps[ION_HEAP_TYPE_CARVEOUT].size,
- GFP_KERNEL);
- if (carveout_ptr)
- dummy_heaps[ION_HEAP_TYPE_CARVEOUT].base =
- virt_to_phys(carveout_ptr);
- else
- pr_err("ion_dummy: Could not allocate carveout\n");
-
- /* Allocate a dummy chunk heap */
- chunk_ptr = alloc_pages_exact(
- dummy_heaps[ION_HEAP_TYPE_CHUNK].size,
- GFP_KERNEL);
- if (chunk_ptr)
- dummy_heaps[ION_HEAP_TYPE_CHUNK].base = virt_to_phys(chunk_ptr);
- else
- pr_err("ion_dummy: Could not allocate chunk\n");
-
- for (i = 0; i < dummy_ion_pdata.nr; i++) {
- struct ion_platform_heap *heap_data = &dummy_ion_pdata.heaps[i];
-
- if (heap_data->type == ION_HEAP_TYPE_CARVEOUT &&
- !heap_data->base)
- continue;
-
- if (heap_data->type == ION_HEAP_TYPE_CHUNK && !heap_data->base)
- continue;
-
- heaps[i] = ion_heap_create(heap_data);
- if (IS_ERR_OR_NULL(heaps[i])) {
- err = PTR_ERR(heaps[i]);
- goto err;
- }
- ion_device_add_heap(idev, heaps[i]);
- }
- return 0;
-err:
- for (i = 0; i < dummy_ion_pdata.nr; ++i)
- ion_heap_destroy(heaps[i]);
- kfree(heaps);
-
- if (carveout_ptr) {
- free_pages_exact(carveout_ptr,
- dummy_heaps[ION_HEAP_TYPE_CARVEOUT].size);
- carveout_ptr = NULL;
- }
- if (chunk_ptr) {
- free_pages_exact(chunk_ptr,
- dummy_heaps[ION_HEAP_TYPE_CHUNK].size);
- chunk_ptr = NULL;
- }
- return err;
-}
-device_initcall(ion_dummy_init);
-
-static void __exit ion_dummy_exit(void)
-{
- int i;
-
- ion_device_destroy(idev);
-
- for (i = 0; i < dummy_ion_pdata.nr; i++)
- ion_heap_destroy(heaps[i]);
- kfree(heaps);
-
- if (carveout_ptr) {
- free_pages_exact(carveout_ptr,
- dummy_heaps[ION_HEAP_TYPE_CARVEOUT].size);
- carveout_ptr = NULL;
- }
- if (chunk_ptr) {
- free_pages_exact(chunk_ptr,
- dummy_heaps[ION_HEAP_TYPE_CHUNK].size);
- chunk_ptr = NULL;
- }
-}
-__exitcall(ion_dummy_exit);
diff --git a/drivers/staging/android/ion/ion_heap.c b/drivers/staging/android/ion/ion_heap.c
deleted file mode 100644
index fd13d05b538a..000000000000
--- a/drivers/staging/android/ion/ion_heap.c
+++ /dev/null
@@ -1,381 +0,0 @@
-/*
- * drivers/staging/android/ion/ion_heap.c
- *
- * Copyright (C) 2011 Google, Inc.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#include <linux/err.h>
-#include <linux/freezer.h>
-#include <linux/kthread.h>
-#include <linux/mm.h>
-#include <linux/rtmutex.h>
-#include <linux/sched.h>
-#include <linux/scatterlist.h>
-#include <linux/vmalloc.h>
-#include "ion.h"
-#include "ion_priv.h"
-
-void *ion_heap_map_kernel(struct ion_heap *heap,
- struct ion_buffer *buffer)
-{
- struct scatterlist *sg;
- int i, j;
- void *vaddr;
- pgprot_t pgprot;
- struct sg_table *table = buffer->sg_table;
- int npages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
- struct page **pages = vmalloc(sizeof(struct page *) * npages);
- struct page **tmp = pages;
-
- if (!pages)
- return NULL;
-
- if (buffer->flags & ION_FLAG_CACHED)
- pgprot = PAGE_KERNEL;
- else
- pgprot = pgprot_writecombine(PAGE_KERNEL);
-
- for_each_sg(table->sgl, sg, table->nents, i) {
- int npages_this_entry = PAGE_ALIGN(sg->length) / PAGE_SIZE;
- struct page *page = sg_page(sg);
-
- BUG_ON(i >= npages);
- for (j = 0; j < npages_this_entry; j++)
- *(tmp++) = page++;
- }
- vaddr = vmap(pages, npages, VM_MAP, pgprot);
- vfree(pages);
-
- if (vaddr == NULL)
- return ERR_PTR(-ENOMEM);
-
- return vaddr;
-}
-
-void ion_heap_unmap_kernel(struct ion_heap *heap,
- struct ion_buffer *buffer)
-{
- vunmap(buffer->vaddr);
-}
-
-int ion_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer,
- struct vm_area_struct *vma)
-{
- struct sg_table *table = buffer->sg_table;
- unsigned long addr = vma->vm_start;
- unsigned long offset = vma->vm_pgoff * PAGE_SIZE;
- struct scatterlist *sg;
- int i;
- int ret;
-
- for_each_sg(table->sgl, sg, table->nents, i) {
- struct page *page = sg_page(sg);
- unsigned long remainder = vma->vm_end - addr;
- unsigned long len = sg->length;
-
- if (offset >= sg->length) {
- offset -= sg->length;
- continue;
- } else if (offset) {
- page += offset / PAGE_SIZE;
- len = sg->length - offset;
- offset = 0;
- }
- len = min(len, remainder);
- ret = remap_pfn_range(vma, addr, page_to_pfn(page), len,
- vma->vm_page_prot);
- if (ret)
- return ret;
- addr += len;
- if (addr >= vma->vm_end)
- return 0;
- }
- return 0;
-}
-
-static int ion_heap_clear_pages(struct page **pages, int num, pgprot_t pgprot)
-{
- void *addr = vm_map_ram(pages, num, -1, pgprot);
-
- if (!addr)
- return -ENOMEM;
- memset(addr, 0, PAGE_SIZE * num);
- vm_unmap_ram(addr, num);
-
- return 0;
-}
-
-static int ion_heap_sglist_zero(struct scatterlist *sgl, unsigned int nents,
- pgprot_t pgprot)
-{
- int p = 0;
- int ret = 0;
- struct sg_page_iter piter;
- struct page *pages[32];
-
- for_each_sg_page(sgl, &piter, nents, 0) {
- pages[p++] = sg_page_iter_page(&piter);
- if (p == ARRAY_SIZE(pages)) {
- ret = ion_heap_clear_pages(pages, p, pgprot);
- if (ret)
- return ret;
- p = 0;
- }
- }
- if (p)
- ret = ion_heap_clear_pages(pages, p, pgprot);
-
- return ret;
-}
-
-int ion_heap_buffer_zero(struct ion_buffer *buffer)
-{
- struct sg_table *table = buffer->sg_table;
- pgprot_t pgprot;
-
- if (buffer->flags & ION_FLAG_CACHED)
- pgprot = PAGE_KERNEL;
- else
- pgprot = pgprot_writecombine(PAGE_KERNEL);
-
- return ion_heap_sglist_zero(table->sgl, table->nents, pgprot);
-}
-
-int ion_heap_pages_zero(struct page *page, size_t size, pgprot_t pgprot)
-{
- struct scatterlist sg;
-
- sg_init_table(&sg, 1);
- sg_set_page(&sg, page, size, 0);
- return ion_heap_sglist_zero(&sg, 1, pgprot);
-}
-
-void ion_heap_freelist_add(struct ion_heap *heap, struct ion_buffer *buffer)
-{
- spin_lock(&heap->free_lock);
- list_add(&buffer->list, &heap->free_list);
- heap->free_list_size += buffer->size;
- spin_unlock(&heap->free_lock);
- wake_up(&heap->waitqueue);
-}
-
-size_t ion_heap_freelist_size(struct ion_heap *heap)
-{
- size_t size;
-
- spin_lock(&heap->free_lock);
- size = heap->free_list_size;
- spin_unlock(&heap->free_lock);
-
- return size;
-}
-
-static size_t _ion_heap_freelist_drain(struct ion_heap *heap, size_t size,
- bool skip_pools)
-{
- struct ion_buffer *buffer;
- size_t total_drained = 0;
-
- if (ion_heap_freelist_size(heap) == 0)
- return 0;
-
- spin_lock(&heap->free_lock);
- if (size == 0)
- size = heap->free_list_size;
-
- while (!list_empty(&heap->free_list)) {
- if (total_drained >= size)
- break;
- buffer = list_first_entry(&heap->free_list, struct ion_buffer,
- list);
- list_del(&buffer->list);
- heap->free_list_size -= buffer->size;
- if (skip_pools)
- buffer->private_flags |= ION_PRIV_FLAG_SHRINKER_FREE;
- total_drained += buffer->size;
- spin_unlock(&heap->free_lock);
- ion_buffer_destroy(buffer);
- spin_lock(&heap->free_lock);
- }
- spin_unlock(&heap->free_lock);
-
- return total_drained;
-}
-
-size_t ion_heap_freelist_drain(struct ion_heap *heap, size_t size)
-{
- return _ion_heap_freelist_drain(heap, size, false);
-}
-
-size_t ion_heap_freelist_shrink(struct ion_heap *heap, size_t size)
-{
- return _ion_heap_freelist_drain(heap, size, true);
-}
-
-static int ion_heap_deferred_free(void *data)
-{
- struct ion_heap *heap = data;
-
- while (true) {
- struct ion_buffer *buffer;
-
- wait_event_freezable(heap->waitqueue,
- ion_heap_freelist_size(heap) > 0);
-
- spin_lock(&heap->free_lock);
- if (list_empty(&heap->free_list)) {
- spin_unlock(&heap->free_lock);
- continue;
- }
- buffer = list_first_entry(&heap->free_list, struct ion_buffer,
- list);
- list_del(&buffer->list);
- heap->free_list_size -= buffer->size;
- spin_unlock(&heap->free_lock);
- ion_buffer_destroy(buffer);
- }
-
- return 0;
-}
-
-int ion_heap_init_deferred_free(struct ion_heap *heap)
-{
- struct sched_param param = { .sched_priority = 0 };
-
- INIT_LIST_HEAD(&heap->free_list);
- init_waitqueue_head(&heap->waitqueue);
- heap->task = kthread_run(ion_heap_deferred_free, heap,
- "%s", heap->name);
- if (IS_ERR(heap->task)) {
- pr_err("%s: creating thread for deferred free failed\n",
- __func__);
- return PTR_ERR_OR_ZERO(heap->task);
- }
- sched_setscheduler(heap->task, SCHED_IDLE, &param);
- return 0;
-}
-
-static unsigned long ion_heap_shrink_count(struct shrinker *shrinker,
- struct shrink_control *sc)
-{
- struct ion_heap *heap = container_of(shrinker, struct ion_heap,
- shrinker);
- int total = 0;
-
- total = ion_heap_freelist_size(heap) / PAGE_SIZE;
- if (heap->ops->shrink)
- total += heap->ops->shrink(heap, sc->gfp_mask, 0);
- return total;
-}
-
-static unsigned long ion_heap_shrink_scan(struct shrinker *shrinker,
- struct shrink_control *sc)
-{
- struct ion_heap *heap = container_of(shrinker, struct ion_heap,
- shrinker);
- int freed = 0;
- int to_scan = sc->nr_to_scan;
-
- if (to_scan == 0)
- return 0;
-
- /*
- * shrink the free list first, no point in zeroing the memory if we're
- * just going to reclaim it. Also, skip any possible page pooling.
- */
- if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
- freed = ion_heap_freelist_shrink(heap, to_scan * PAGE_SIZE) /
- PAGE_SIZE;
-
- to_scan -= freed;
- if (to_scan <= 0)
- return freed;
-
- if (heap->ops->shrink)
- freed += heap->ops->shrink(heap, sc->gfp_mask, to_scan);
- return freed;
-}
-
-void ion_heap_init_shrinker(struct ion_heap *heap)
-{
- heap->shrinker.count_objects = ion_heap_shrink_count;
- heap->shrinker.scan_objects = ion_heap_shrink_scan;
- heap->shrinker.seeks = DEFAULT_SEEKS;
- heap->shrinker.batch = 0;
- register_shrinker(&heap->shrinker);
-}
-
-struct ion_heap *ion_heap_create(struct ion_platform_heap *heap_data)
-{
- struct ion_heap *heap = NULL;
-
- switch (heap_data->type) {
- case ION_HEAP_TYPE_SYSTEM_CONTIG:
- heap = ion_system_contig_heap_create(heap_data);
- break;
- case ION_HEAP_TYPE_SYSTEM:
- heap = ion_system_heap_create(heap_data);
- break;
- case ION_HEAP_TYPE_CARVEOUT:
- heap = ion_carveout_heap_create(heap_data);
- break;
- case ION_HEAP_TYPE_CHUNK:
- heap = ion_chunk_heap_create(heap_data);
- break;
- case ION_HEAP_TYPE_DMA:
- heap = ion_cma_heap_create(heap_data);
- break;
- default:
- pr_err("%s: Invalid heap type %d\n", __func__,
- heap_data->type);
- return ERR_PTR(-EINVAL);
- }
-
- if (IS_ERR_OR_NULL(heap)) {
- pr_err("%s: error creating heap %s type %d base %lu size %zu\n",
- __func__, heap_data->name, heap_data->type,
- heap_data->base, heap_data->size);
- return ERR_PTR(-EINVAL);
- }
-
- heap->name = heap_data->name;
- heap->id = heap_data->id;
- return heap;
-}
-
-void ion_heap_destroy(struct ion_heap *heap)
-{
- if (!heap)
- return;
-
- switch (heap->type) {
- case ION_HEAP_TYPE_SYSTEM_CONTIG:
- ion_system_contig_heap_destroy(heap);
- break;
- case ION_HEAP_TYPE_SYSTEM:
- ion_system_heap_destroy(heap);
- break;
- case ION_HEAP_TYPE_CARVEOUT:
- ion_carveout_heap_destroy(heap);
- break;
- case ION_HEAP_TYPE_CHUNK:
- ion_chunk_heap_destroy(heap);
- break;
- case ION_HEAP_TYPE_DMA:
- ion_cma_heap_destroy(heap);
- break;
- default:
- pr_err("%s: Invalid heap type %d\n", __func__,
- heap->type);
- }
-}
diff --git a/drivers/staging/android/ion/ion_page_pool.c b/drivers/staging/android/ion/ion_page_pool.c
deleted file mode 100644
index 19ad3aba499a..000000000000
--- a/drivers/staging/android/ion/ion_page_pool.c
+++ /dev/null
@@ -1,183 +0,0 @@
-/*
- * drivers/staging/android/ion/ion_mem_pool.c
- *
- * Copyright (C) 2011 Google, Inc.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#include <linux/debugfs.h>
-#include <linux/dma-mapping.h>
-#include <linux/err.h>
-#include <linux/fs.h>
-#include <linux/list.h>
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/swap.h>
-#include "ion_priv.h"
-
-static void *ion_page_pool_alloc_pages(struct ion_page_pool *pool)
-{
- struct page *page = alloc_pages(pool->gfp_mask, pool->order);
-
- if (!page)
- return NULL;
- ion_pages_sync_for_device(NULL, page, PAGE_SIZE << pool->order,
- DMA_BIDIRECTIONAL);
- return page;
-}
-
-static void ion_page_pool_free_pages(struct ion_page_pool *pool,
- struct page *page)
-{
- __free_pages(page, pool->order);
-}
-
-static int ion_page_pool_add(struct ion_page_pool *pool, struct page *page)
-{
- mutex_lock(&pool->mutex);
- if (PageHighMem(page)) {
- list_add_tail(&page->lru, &pool->high_items);
- pool->high_count++;
- } else {
- list_add_tail(&page->lru, &pool->low_items);
- pool->low_count++;
- }
- mutex_unlock(&pool->mutex);
- return 0;
-}
-
-static struct page *ion_page_pool_remove(struct ion_page_pool *pool, bool high)
-{
- struct page *page;
-
- if (high) {
- BUG_ON(!pool->high_count);
- page = list_first_entry(&pool->high_items, struct page, lru);
- pool->high_count--;
- } else {
- BUG_ON(!pool->low_count);
- page = list_first_entry(&pool->low_items, struct page, lru);
- pool->low_count--;
- }
-
- list_del(&page->lru);
- return page;
-}
-
-struct page *ion_page_pool_alloc(struct ion_page_pool *pool)
-{
- struct page *page = NULL;
-
- BUG_ON(!pool);
-
- mutex_lock(&pool->mutex);
- if (pool->high_count)
- page = ion_page_pool_remove(pool, true);
- else if (pool->low_count)
- page = ion_page_pool_remove(pool, false);
- mutex_unlock(&pool->mutex);
-
- if (!page)
- page = ion_page_pool_alloc_pages(pool);
-
- return page;
-}
-
-void ion_page_pool_free(struct ion_page_pool *pool, struct page *page)
-{
- int ret;
-
- BUG_ON(pool->order != compound_order(page));
-
- ret = ion_page_pool_add(pool, page);
- if (ret)
- ion_page_pool_free_pages(pool, page);
-}
-
-static int ion_page_pool_total(struct ion_page_pool *pool, bool high)
-{
- int count = pool->low_count;
-
- if (high)
- count += pool->high_count;
-
- return count << pool->order;
-}
-
-int ion_page_pool_shrink(struct ion_page_pool *pool, gfp_t gfp_mask,
- int nr_to_scan)
-{
- int freed = 0;
- bool high;
-
- if (current_is_kswapd())
- high = true;
- else
- high = !!(gfp_mask & __GFP_HIGHMEM);
-
- if (nr_to_scan == 0)
- return ion_page_pool_total(pool, high);
-
- while (freed < nr_to_scan) {
- struct page *page;
-
- mutex_lock(&pool->mutex);
- if (pool->low_count) {
- page = ion_page_pool_remove(pool, false);
- } else if (high && pool->high_count) {
- page = ion_page_pool_remove(pool, true);
- } else {
- mutex_unlock(&pool->mutex);
- break;
- }
- mutex_unlock(&pool->mutex);
- ion_page_pool_free_pages(pool, page);
- freed += (1 << pool->order);
- }
-
- return freed;
-}
-
-struct ion_page_pool *ion_page_pool_create(gfp_t gfp_mask, unsigned int order)
-{
- struct ion_page_pool *pool = kmalloc(sizeof(struct ion_page_pool),
- GFP_KERNEL);
- if (!pool)
- return NULL;
- pool->high_count = 0;
- pool->low_count = 0;
- INIT_LIST_HEAD(&pool->low_items);
- INIT_LIST_HEAD(&pool->high_items);
- pool->gfp_mask = gfp_mask | __GFP_COMP;
- pool->order = order;
- mutex_init(&pool->mutex);
- plist_node_init(&pool->list, order);
-
- return pool;
-}
-
-void ion_page_pool_destroy(struct ion_page_pool *pool)
-{
- kfree(pool);
-}
-
-static int __init ion_page_pool_init(void)
-{
- return 0;
-}
-
-static void __exit ion_page_pool_exit(void)
-{
-}
-
-module_init(ion_page_pool_init);
-module_exit(ion_page_pool_exit);
diff --git a/drivers/staging/android/ion/ion_priv.h b/drivers/staging/android/ion/ion_priv.h
deleted file mode 100644
index 0239883bffb7..000000000000
--- a/drivers/staging/android/ion/ion_priv.h
+++ /dev/null
@@ -1,406 +0,0 @@
-/*
- * drivers/staging/android/ion/ion_priv.h
- *
- * Copyright (C) 2011 Google, Inc.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#ifndef _ION_PRIV_H
-#define _ION_PRIV_H
-
-#include <linux/device.h>
-#include <linux/dma-direction.h>
-#include <linux/kref.h>
-#include <linux/mm_types.h>
-#include <linux/mutex.h>
-#include <linux/rbtree.h>
-#include <linux/sched.h>
-#include <linux/shrinker.h>
-#include <linux/types.h>
-
-#include "ion.h"
-
-struct ion_buffer *ion_handle_buffer(struct ion_handle *handle);
-
-/**
- * struct ion_buffer - metadata for a particular buffer
- * @ref: reference count
- * @node: node in the ion_device buffers tree
- * @dev: back pointer to the ion_device
- * @heap: back pointer to the heap the buffer came from
- * @flags: buffer specific flags
- * @private_flags: internal buffer specific flags
- * @size: size of the buffer
- * @priv_virt: private data to the buffer representable as
- * a void *
- * @priv_phys: private data to the buffer representable as
- * an ion_phys_addr_t (and someday a phys_addr_t)
- * @lock: protects the buffers cnt fields
- * @kmap_cnt: number of times the buffer is mapped to the kernel
- * @vaddr: the kernel mapping if kmap_cnt is not zero
- * @dmap_cnt: number of times the buffer is mapped for dma
- * @sg_table: the sg table for the buffer if dmap_cnt is not zero
- * @pages: flat array of pages in the buffer -- used by fault
- * handler and only valid for buffers that are faulted in
- * @vmas: list of vma's mapping this buffer
- * @handle_count: count of handles referencing this buffer
- * @task_comm: taskcomm of last client to reference this buffer in a
- * handle, used for debugging
- * @pid: pid of last client to reference this buffer in a
- * handle, used for debugging
-*/
-struct ion_buffer {
- struct kref ref;
- union {
- struct rb_node node;
- struct list_head list;
- };
- struct ion_device *dev;
- struct ion_heap *heap;
- unsigned long flags;
- unsigned long private_flags;
- size_t size;
- union {
- void *priv_virt;
- ion_phys_addr_t priv_phys;
- };
- struct mutex lock;
- int kmap_cnt;
- void *vaddr;
- int dmap_cnt;
- struct sg_table *sg_table;
- struct page **pages;
- struct list_head vmas;
- /* used to track orphaned buffers */
- int handle_count;
- char task_comm[TASK_COMM_LEN];
- pid_t pid;
-};
-void ion_buffer_destroy(struct ion_buffer *buffer);
-
-/**
- * struct ion_heap_ops - ops to operate on a given heap
- * @allocate: allocate memory
- * @free: free memory
- * @phys get physical address of a buffer (only define on
- * physically contiguous heaps)
- * @map_dma map the memory for dma to a scatterlist
- * @unmap_dma unmap the memory for dma
- * @map_kernel map memory to the kernel
- * @unmap_kernel unmap memory to the kernel
- * @map_user map memory to userspace
- *
- * allocate, phys, and map_user return 0 on success, -errno on error.
- * map_dma and map_kernel return pointer on success, ERR_PTR on
- * error. @free will be called with ION_PRIV_FLAG_SHRINKER_FREE set in
- * the buffer's private_flags when called from a shrinker. In that
- * case, the pages being free'd must be truly free'd back to the
- * system, not put in a page pool or otherwise cached.
- */
-struct ion_heap_ops {
- int (*allocate)(struct ion_heap *heap,
- struct ion_buffer *buffer, unsigned long len,
- unsigned long align, unsigned long flags);
- void (*free)(struct ion_buffer *buffer);
- int (*phys)(struct ion_heap *heap, struct ion_buffer *buffer,
- ion_phys_addr_t *addr, size_t *len);
- struct sg_table * (*map_dma)(struct ion_heap *heap,
- struct ion_buffer *buffer);
- void (*unmap_dma)(struct ion_heap *heap, struct ion_buffer *buffer);
- void * (*map_kernel)(struct ion_heap *heap, struct ion_buffer *buffer);
- void (*unmap_kernel)(struct ion_heap *heap, struct ion_buffer *buffer);
- int (*map_user)(struct ion_heap *mapper, struct ion_buffer *buffer,
- struct vm_area_struct *vma);
- int (*shrink)(struct ion_heap *heap, gfp_t gfp_mask, int nr_to_scan);
-};
-
-/**
- * heap flags - flags between the heaps and core ion code
- */
-#define ION_HEAP_FLAG_DEFER_FREE (1 << 0)
-
-/**
- * private flags - flags internal to ion
- */
-/*
- * Buffer is being freed from a shrinker function. Skip any possible
- * heap-specific caching mechanism (e.g. page pools). Guarantees that
- * any buffer storage that came from the system allocator will be
- * returned to the system allocator.
- */
-#define ION_PRIV_FLAG_SHRINKER_FREE (1 << 0)
-
-/**
- * struct ion_heap - represents a heap in the system
- * @node: rb node to put the heap on the device's tree of heaps
- * @dev: back pointer to the ion_device
- * @type: type of heap
- * @ops: ops struct as above
- * @flags: flags
- * @id: id of heap, also indicates priority of this heap when
- * allocating. These are specified by platform data and
- * MUST be unique
- * @name: used for debugging
- * @shrinker: a shrinker for the heap
- * @free_list: free list head if deferred free is used
- * @free_list_size size of the deferred free list in bytes
- * @lock: protects the free list
- * @waitqueue: queue to wait on from deferred free thread
- * @task: task struct of deferred free thread
- * @debug_show: called when heap debug file is read to add any
- * heap specific debug info to output
- *
- * Represents a pool of memory from which buffers can be made. In some
- * systems the only heap is regular system memory allocated via vmalloc.
- * On others, some blocks might require large physically contiguous buffers
- * that are allocated from a specially reserved heap.
- */
-struct ion_heap {
- struct plist_node node;
- struct ion_device *dev;
- enum ion_heap_type type;
- struct ion_heap_ops *ops;
- unsigned long flags;
- unsigned int id;
- const char *name;
- struct shrinker shrinker;
- struct list_head free_list;
- size_t free_list_size;
- spinlock_t free_lock;
- wait_queue_head_t waitqueue;
- struct task_struct *task;
-
- int (*debug_show)(struct ion_heap *heap, struct seq_file *, void *);
-};
-
-/**
- * ion_buffer_cached - this ion buffer is cached
- * @buffer: buffer
- *
- * indicates whether this ion buffer is cached
- */
-bool ion_buffer_cached(struct ion_buffer *buffer);
-
-/**
- * ion_buffer_fault_user_mappings - fault in user mappings of this buffer
- * @buffer: buffer
- *
- * indicates whether userspace mappings of this buffer will be faulted
- * in, this can affect how buffers are allocated from the heap.
- */
-bool ion_buffer_fault_user_mappings(struct ion_buffer *buffer);
-
-/**
- * ion_device_create - allocates and returns an ion device
- * @custom_ioctl: arch specific ioctl function if applicable
- *
- * returns a valid device or -PTR_ERR
- */
-struct ion_device *ion_device_create(long (*custom_ioctl)
- (struct ion_client *client,
- unsigned int cmd,
- unsigned long arg));
-
-/**
- * ion_device_destroy - free and device and it's resource
- * @dev: the device
- */
-void ion_device_destroy(struct ion_device *dev);
-
-/**
- * ion_device_add_heap - adds a heap to the ion device
- * @dev: the device
- * @heap: the heap to add
- */
-void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap);
-
-/**
- * some helpers for common operations on buffers using the sg_table
- * and vaddr fields
- */
-void *ion_heap_map_kernel(struct ion_heap *, struct ion_buffer *);
-void ion_heap_unmap_kernel(struct ion_heap *, struct ion_buffer *);
-int ion_heap_map_user(struct ion_heap *, struct ion_buffer *,
- struct vm_area_struct *);
-int ion_heap_buffer_zero(struct ion_buffer *buffer);
-int ion_heap_pages_zero(struct page *page, size_t size, pgprot_t pgprot);
-
-/**
- * ion_heap_init_shrinker
- * @heap: the heap
- *
- * If a heap sets the ION_HEAP_FLAG_DEFER_FREE flag or defines the shrink op
- * this function will be called to setup a shrinker to shrink the freelists
- * and call the heap's shrink op.
- */
-void ion_heap_init_shrinker(struct ion_heap *heap);
-
-/**
- * ion_heap_init_deferred_free -- initialize deferred free functionality
- * @heap: the heap
- *
- * If a heap sets the ION_HEAP_FLAG_DEFER_FREE flag this function will
- * be called to setup deferred frees. Calls to free the buffer will
- * return immediately and the actual free will occur some time later
- */
-int ion_heap_init_deferred_free(struct ion_heap *heap);
-
-/**
- * ion_heap_freelist_add - add a buffer to the deferred free list
- * @heap: the heap
- * @buffer: the buffer
- *
- * Adds an item to the deferred freelist.
- */
-void ion_heap_freelist_add(struct ion_heap *heap, struct ion_buffer *buffer);
-
-/**
- * ion_heap_freelist_drain - drain the deferred free list
- * @heap: the heap
- * @size: amount of memory to drain in bytes
- *
- * Drains the indicated amount of memory from the deferred freelist immediately.
- * Returns the total amount freed. The total freed may be higher depending
- * on the size of the items in the list, or lower if there is insufficient
- * total memory on the freelist.
- */
-size_t ion_heap_freelist_drain(struct ion_heap *heap, size_t size);
-
-/**
- * ion_heap_freelist_shrink - drain the deferred free
- * list, skipping any heap-specific
- * pooling or caching mechanisms
- *
- * @heap: the heap
- * @size: amount of memory to drain in bytes
- *
- * Drains the indicated amount of memory from the deferred freelist immediately.
- * Returns the total amount freed. The total freed may be higher depending
- * on the size of the items in the list, or lower if there is insufficient
- * total memory on the freelist.
- *
- * Unlike with @ion_heap_freelist_drain, don't put any pages back into
- * page pools or otherwise cache the pages. Everything must be
- * genuinely free'd back to the system. If you're free'ing from a
- * shrinker you probably want to use this. Note that this relies on
- * the heap.ops.free callback honoring the ION_PRIV_FLAG_SHRINKER_FREE
- * flag.
- */
-size_t ion_heap_freelist_shrink(struct ion_heap *heap,
- size_t size);
-
-/**
- * ion_heap_freelist_size - returns the size of the freelist in bytes
- * @heap: the heap
- */
-size_t ion_heap_freelist_size(struct ion_heap *heap);
-
-
-/**
- * functions for creating and destroying the built in ion heaps.
- * architectures can add their own custom architecture specific
- * heaps as appropriate.
- */
-
-struct ion_heap *ion_heap_create(struct ion_platform_heap *);
-void ion_heap_destroy(struct ion_heap *);
-struct ion_heap *ion_system_heap_create(struct ion_platform_heap *);
-void ion_system_heap_destroy(struct ion_heap *);
-
-struct ion_heap *ion_system_contig_heap_create(struct ion_platform_heap *);
-void ion_system_contig_heap_destroy(struct ion_heap *);
-
-struct ion_heap *ion_carveout_heap_create(struct ion_platform_heap *);
-void ion_carveout_heap_destroy(struct ion_heap *);
-
-struct ion_heap *ion_chunk_heap_create(struct ion_platform_heap *);
-void ion_chunk_heap_destroy(struct ion_heap *);
-struct ion_heap *ion_cma_heap_create(struct ion_platform_heap *);
-void ion_cma_heap_destroy(struct ion_heap *);
-
-/**
- * kernel api to allocate/free from carveout -- used when carveout is
- * used to back an architecture specific custom heap
- */
-ion_phys_addr_t ion_carveout_allocate(struct ion_heap *heap, unsigned long size,
- unsigned long align);
-void ion_carveout_free(struct ion_heap *heap, ion_phys_addr_t addr,
- unsigned long size);
-/**
- * The carveout heap returns physical addresses, since 0 may be a valid
- * physical address, this is used to indicate allocation failed
- */
-#define ION_CARVEOUT_ALLOCATE_FAIL -1
-
-/**
- * functions for creating and destroying a heap pool -- allows you
- * to keep a pool of pre allocated memory to use from your heap. Keeping
- * a pool of memory that is ready for dma, ie any cached mapping have been
- * invalidated from the cache, provides a significant performance benefit on
- * many systems
- */
-
-/**
- * struct ion_page_pool - pagepool struct
- * @high_count: number of highmem items in the pool
- * @low_count: number of lowmem items in the pool
- * @high_items: list of highmem items
- * @low_items: list of lowmem items
- * @mutex: lock protecting this struct and especially the count
- * item list
- * @gfp_mask: gfp_mask to use from alloc
- * @order: order of pages in the pool
- * @list: plist node for list of pools
- *
- * Allows you to keep a pool of pre allocated pages to use from your heap.
- * Keeping a pool of pages that is ready for dma, ie any cached mapping have
- * been invalidated from the cache, provides a significant performance benefit
- * on many systems
- */
-struct ion_page_pool {
- int high_count;
- int low_count;
- struct list_head high_items;
- struct list_head low_items;
- struct mutex mutex;
- gfp_t gfp_mask;
- unsigned int order;
- struct plist_node list;
-};
-
-struct ion_page_pool *ion_page_pool_create(gfp_t gfp_mask, unsigned int order);
-void ion_page_pool_destroy(struct ion_page_pool *);
-struct page *ion_page_pool_alloc(struct ion_page_pool *);
-void ion_page_pool_free(struct ion_page_pool *, struct page *);
-
-/** ion_page_pool_shrink - shrinks the size of the memory cached in the pool
- * @pool: the pool
- * @gfp_mask: the memory type to reclaim
- * @nr_to_scan: number of items to shrink in pages
- *
- * returns the number of items freed in pages
- */
-int ion_page_pool_shrink(struct ion_page_pool *pool, gfp_t gfp_mask,
- int nr_to_scan);
-
-/**
- * ion_pages_sync_for_device - cache flush pages for use with the specified
- * device
- * @dev: the device the pages will be used with
- * @page: the first page to be flushed
- * @size: size in bytes of region to be flushed
- * @dir: direction of dma transfer
- */
-void ion_pages_sync_for_device(struct device *dev, struct page *page,
- size_t size, enum dma_data_direction dir);
-
-#endif /* _ION_PRIV_H */
diff --git a/drivers/staging/android/ion/ion_system_heap.c b/drivers/staging/android/ion/ion_system_heap.c
deleted file mode 100644
index ada724aab3d5..000000000000
--- a/drivers/staging/android/ion/ion_system_heap.c
+++ /dev/null
@@ -1,437 +0,0 @@
-/*
- * drivers/staging/android/ion/ion_system_heap.c
- *
- * Copyright (C) 2011 Google, Inc.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#include <asm/page.h>
-#include <linux/dma-mapping.h>
-#include <linux/err.h>
-#include <linux/highmem.h>
-#include <linux/mm.h>
-#include <linux/scatterlist.h>
-#include <linux/seq_file.h>
-#include <linux/slab.h>
-#include <linux/vmalloc.h>
-#include "ion.h"
-#include "ion_priv.h"
-
-static gfp_t high_order_gfp_flags = (GFP_HIGHUSER | __GFP_ZERO | __GFP_NOWARN |
- __GFP_NORETRY) & ~__GFP_WAIT;
-static gfp_t low_order_gfp_flags = (GFP_HIGHUSER | __GFP_ZERO | __GFP_NOWARN);
-static const unsigned int orders[] = {8, 4, 0};
-static const int num_orders = ARRAY_SIZE(orders);
-static int order_to_index(unsigned int order)
-{
- int i;
-
- for (i = 0; i < num_orders; i++)
- if (order == orders[i])
- return i;
- BUG();
- return -1;
-}
-
-static inline unsigned int order_to_size(int order)
-{
- return PAGE_SIZE << order;
-}
-
-struct ion_system_heap {
- struct ion_heap heap;
- struct ion_page_pool *pools[0];
-};
-
-static struct page *alloc_buffer_page(struct ion_system_heap *heap,
- struct ion_buffer *buffer,
- unsigned long order)
-{
- bool cached = ion_buffer_cached(buffer);
- struct ion_page_pool *pool = heap->pools[order_to_index(order)];
- struct page *page;
-
- if (!cached) {
- page = ion_page_pool_alloc(pool);
- } else {
- gfp_t gfp_flags = low_order_gfp_flags;
-
- if (order > 4)
- gfp_flags = high_order_gfp_flags;
- page = alloc_pages(gfp_flags | __GFP_COMP, order);
- if (!page)
- return NULL;
- ion_pages_sync_for_device(NULL, page, PAGE_SIZE << order,
- DMA_BIDIRECTIONAL);
- }
-
- return page;
-}
-
-static void free_buffer_page(struct ion_system_heap *heap,
- struct ion_buffer *buffer, struct page *page)
-{
- unsigned int order = compound_order(page);
- bool cached = ion_buffer_cached(buffer);
-
- if (!cached && !(buffer->private_flags & ION_PRIV_FLAG_SHRINKER_FREE)) {
- struct ion_page_pool *pool = heap->pools[order_to_index(order)];
-
- ion_page_pool_free(pool, page);
- } else {
- __free_pages(page, order);
- }
-}
-
-
-static struct page *alloc_largest_available(struct ion_system_heap *heap,
- struct ion_buffer *buffer,
- unsigned long size,
- unsigned int max_order)
-{
- struct page *page;
- int i;
-
- for (i = 0; i < num_orders; i++) {
- if (size < order_to_size(orders[i]))
- continue;
- if (max_order < orders[i])
- continue;
-
- page = alloc_buffer_page(heap, buffer, orders[i]);
- if (!page)
- continue;
-
- return page;
- }
-
- return NULL;
-}
-
-static int ion_system_heap_allocate(struct ion_heap *heap,
- struct ion_buffer *buffer,
- unsigned long size, unsigned long align,
- unsigned long flags)
-{
- struct ion_system_heap *sys_heap = container_of(heap,
- struct ion_system_heap,
- heap);
- struct sg_table *table;
- struct scatterlist *sg;
- struct list_head pages;
- struct page *page, *tmp_page;
- int i = 0;
- unsigned long size_remaining = PAGE_ALIGN(size);
- unsigned int max_order = orders[0];
-
- if (align > PAGE_SIZE)
- return -EINVAL;
-
- if (size / PAGE_SIZE > totalram_pages / 2)
- return -ENOMEM;
-
- INIT_LIST_HEAD(&pages);
- while (size_remaining > 0) {
- page = alloc_largest_available(sys_heap, buffer, size_remaining,
- max_order);
- if (!page)
- goto free_pages;
- list_add_tail(&page->lru, &pages);
- size_remaining -= PAGE_SIZE << compound_order(page);
- max_order = compound_order(page);
- i++;
- }
- table = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
- if (!table)
- goto free_pages;
-
- if (sg_alloc_table(table, i, GFP_KERNEL))
- goto free_table;
-
- sg = table->sgl;
- list_for_each_entry_safe(page, tmp_page, &pages, lru) {
- sg_set_page(sg, page, PAGE_SIZE << compound_order(page), 0);
- sg = sg_next(sg);
- list_del(&page->lru);
- }
-
- buffer->priv_virt = table;
- return 0;
-
-free_table:
- kfree(table);
-free_pages:
- list_for_each_entry_safe(page, tmp_page, &pages, lru)
- free_buffer_page(sys_heap, buffer, page);
- return -ENOMEM;
-}
-
-static void ion_system_heap_free(struct ion_buffer *buffer)
-{
- struct ion_system_heap *sys_heap = container_of(buffer->heap,
- struct ion_system_heap,
- heap);
- struct sg_table *table = buffer->sg_table;
- bool cached = ion_buffer_cached(buffer);
- struct scatterlist *sg;
- int i;
-
- /*
- * uncached pages come from the page pools, zero them before returning
- * for security purposes (other allocations are zerod at
- * alloc time
- */
- if (!cached && !(buffer->private_flags & ION_PRIV_FLAG_SHRINKER_FREE))
- ion_heap_buffer_zero(buffer);
-
- for_each_sg(table->sgl, sg, table->nents, i)
- free_buffer_page(sys_heap, buffer, sg_page(sg));
- sg_free_table(table);
- kfree(table);
-}
-
-static struct sg_table *ion_system_heap_map_dma(struct ion_heap *heap,
- struct ion_buffer *buffer)
-{
- return buffer->priv_virt;
-}
-
-static void ion_system_heap_unmap_dma(struct ion_heap *heap,
- struct ion_buffer *buffer)
-{
-}
-
-static int ion_system_heap_shrink(struct ion_heap *heap, gfp_t gfp_mask,
- int nr_to_scan)
-{
- struct ion_system_heap *sys_heap;
- int nr_total = 0;
- int i, nr_freed;
- int only_scan = 0;
-
- sys_heap = container_of(heap, struct ion_system_heap, heap);
-
- if (!nr_to_scan)
- only_scan = 1;
-
- for (i = 0; i < num_orders; i++) {
- struct ion_page_pool *pool = sys_heap->pools[i];
-
- nr_freed = ion_page_pool_shrink(pool, gfp_mask, nr_to_scan);
- nr_total += nr_freed;
-
- if (!only_scan) {
- nr_to_scan -= nr_freed;
- /* shrink completed */
- if (nr_to_scan <= 0)
- break;
- }
- }
-
- return nr_total;
-}
-
-static struct ion_heap_ops system_heap_ops = {
- .allocate = ion_system_heap_allocate,
- .free = ion_system_heap_free,
- .map_dma = ion_system_heap_map_dma,
- .unmap_dma = ion_system_heap_unmap_dma,
- .map_kernel = ion_heap_map_kernel,
- .unmap_kernel = ion_heap_unmap_kernel,
- .map_user = ion_heap_map_user,
- .shrink = ion_system_heap_shrink,
-};
-
-static int ion_system_heap_debug_show(struct ion_heap *heap, struct seq_file *s,
- void *unused)
-{
-
- struct ion_system_heap *sys_heap = container_of(heap,
- struct ion_system_heap,
- heap);
- int i;
-
- for (i = 0; i < num_orders; i++) {
- struct ion_page_pool *pool = sys_heap->pools[i];
-
- seq_printf(s, "%d order %u highmem pages in pool = %lu total\n",
- pool->high_count, pool->order,
- (PAGE_SIZE << pool->order) * pool->high_count);
- seq_printf(s, "%d order %u lowmem pages in pool = %lu total\n",
- pool->low_count, pool->order,
- (PAGE_SIZE << pool->order) * pool->low_count);
- }
- return 0;
-}
-
-struct ion_heap *ion_system_heap_create(struct ion_platform_heap *unused)
-{
- struct ion_system_heap *heap;
- int i;
-
- heap = kzalloc(sizeof(struct ion_system_heap) +
- sizeof(struct ion_page_pool *) * num_orders,
- GFP_KERNEL);
- if (!heap)
- return ERR_PTR(-ENOMEM);
- heap->heap.ops = &system_heap_ops;
- heap->heap.type = ION_HEAP_TYPE_SYSTEM;
- heap->heap.flags = ION_HEAP_FLAG_DEFER_FREE;
-
- for (i = 0; i < num_orders; i++) {
- struct ion_page_pool *pool;
- gfp_t gfp_flags = low_order_gfp_flags;
-
- if (orders[i] > 4)
- gfp_flags = high_order_gfp_flags;
- pool = ion_page_pool_create(gfp_flags, orders[i]);
- if (!pool)
- goto destroy_pools;
- heap->pools[i] = pool;
- }
-
- heap->heap.debug_show = ion_system_heap_debug_show;
- return &heap->heap;
-
-destroy_pools:
- while (i--)
- ion_page_pool_destroy(heap->pools[i]);
- kfree(heap);
- return ERR_PTR(-ENOMEM);
-}
-
-void ion_system_heap_destroy(struct ion_heap *heap)
-{
- struct ion_system_heap *sys_heap = container_of(heap,
- struct ion_system_heap,
- heap);
- int i;
-
- for (i = 0; i < num_orders; i++)
- ion_page_pool_destroy(sys_heap->pools[i]);
- kfree(sys_heap);
-}
-
-static int ion_system_contig_heap_allocate(struct ion_heap *heap,
- struct ion_buffer *buffer,
- unsigned long len,
- unsigned long align,
- unsigned long flags)
-{
- int order = get_order(len);
- struct page *page;
- struct sg_table *table;
- unsigned long i;
- int ret;
-
- if (align > (PAGE_SIZE << order))
- return -EINVAL;
-
- page = alloc_pages(low_order_gfp_flags, order);
- if (!page)
- return -ENOMEM;
-
- split_page(page, order);
-
- len = PAGE_ALIGN(len);
- for (i = len >> PAGE_SHIFT; i < (1 << order); i++)
- __free_page(page + i);
-
- table = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
- if (!table) {
- ret = -ENOMEM;
- goto free_pages;
- }
-
- ret = sg_alloc_table(table, 1, GFP_KERNEL);
- if (ret)
- goto free_table;
-
- sg_set_page(table->sgl, page, len, 0);
-
- buffer->priv_virt = table;
-
- ion_pages_sync_for_device(NULL, page, len, DMA_BIDIRECTIONAL);
-
- return 0;
-
-free_table:
- kfree(table);
-free_pages:
- for (i = 0; i < len >> PAGE_SHIFT; i++)
- __free_page(page + i);
-
- return ret;
-}
-
-static void ion_system_contig_heap_free(struct ion_buffer *buffer)
-{
- struct sg_table *table = buffer->priv_virt;
- struct page *page = sg_page(table->sgl);
- unsigned long pages = PAGE_ALIGN(buffer->size) >> PAGE_SHIFT;
- unsigned long i;
-
- for (i = 0; i < pages; i++)
- __free_page(page + i);
- sg_free_table(table);
- kfree(table);
-}
-
-static int ion_system_contig_heap_phys(struct ion_heap *heap,
- struct ion_buffer *buffer,
- ion_phys_addr_t *addr, size_t *len)
-{
- struct sg_table *table = buffer->priv_virt;
- struct page *page = sg_page(table->sgl);
- *addr = page_to_phys(page);
- *len = buffer->size;
- return 0;
-}
-
-static struct sg_table *ion_system_contig_heap_map_dma(struct ion_heap *heap,
- struct ion_buffer *buffer)
-{
- return buffer->priv_virt;
-}
-
-static void ion_system_contig_heap_unmap_dma(struct ion_heap *heap,
- struct ion_buffer *buffer)
-{
-}
-
-static struct ion_heap_ops kmalloc_ops = {
- .allocate = ion_system_contig_heap_allocate,
- .free = ion_system_contig_heap_free,
- .phys = ion_system_contig_heap_phys,
- .map_dma = ion_system_contig_heap_map_dma,
- .unmap_dma = ion_system_contig_heap_unmap_dma,
- .map_kernel = ion_heap_map_kernel,
- .unmap_kernel = ion_heap_unmap_kernel,
- .map_user = ion_heap_map_user,
-};
-
-struct ion_heap *ion_system_contig_heap_create(struct ion_platform_heap *unused)
-{
- struct ion_heap *heap;
-
- heap = kzalloc(sizeof(struct ion_heap), GFP_KERNEL);
- if (!heap)
- return ERR_PTR(-ENOMEM);
- heap->ops = &kmalloc_ops;
- heap->type = ION_HEAP_TYPE_SYSTEM_CONTIG;
- return heap;
-}
-
-void ion_system_contig_heap_destroy(struct ion_heap *heap)
-{
- kfree(heap);
-}
diff --git a/drivers/staging/android/ion/ion_test.c b/drivers/staging/android/ion/ion_test.c
deleted file mode 100644
index b8dcf5a26cc4..000000000000
--- a/drivers/staging/android/ion/ion_test.c
+++ /dev/null
@@ -1,302 +0,0 @@
-/*
- *
- * Copyright (C) 2013 Google, Inc.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#define pr_fmt(fmt) "ion-test: " fmt
-
-#include <linux/dma-buf.h>
-#include <linux/dma-direction.h>
-#include <linux/fs.h>
-#include <linux/miscdevice.h>
-#include <linux/mm.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/sched.h>
-#include <linux/slab.h>
-#include <linux/uaccess.h>
-#include <linux/vmalloc.h>
-
-#include "ion.h"
-#include "../uapi/ion_test.h"
-
-#define u64_to_uptr(x) ((void __user *)(unsigned long)(x))
-
-struct ion_test_device {
- struct miscdevice misc;
-};
-
-struct ion_test_data {
- struct dma_buf *dma_buf;
- struct device *dev;
-};
-
-static int ion_handle_test_dma(struct device *dev, struct dma_buf *dma_buf,
- void __user *ptr, size_t offset, size_t size, bool write)
-{
- int ret = 0;
- struct dma_buf_attachment *attach;
- struct sg_table *table;
- pgprot_t pgprot = pgprot_writecombine(PAGE_KERNEL);
- enum dma_data_direction dir = write ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
- struct sg_page_iter sg_iter;
- unsigned long offset_page;
-
- attach = dma_buf_attach(dma_buf, dev);
- if (IS_ERR(attach))
- return PTR_ERR(attach);
-
- table = dma_buf_map_attachment(attach, dir);
- if (IS_ERR(table))
- return PTR_ERR(table);
-
- offset_page = offset >> PAGE_SHIFT;
- offset %= PAGE_SIZE;
-
- for_each_sg_page(table->sgl, &sg_iter, table->nents, offset_page) {
- struct page *page = sg_page_iter_page(&sg_iter);
- void *vaddr = vmap(&page, 1, VM_MAP, pgprot);
- size_t to_copy = PAGE_SIZE - offset;
-
- to_copy = min(to_copy, size);
- if (!vaddr) {
- ret = -ENOMEM;
- goto err;
- }
-
- if (write)
- ret = copy_from_user(vaddr + offset, ptr, to_copy);
- else
- ret = copy_to_user(ptr, vaddr + offset, to_copy);
-
- vunmap(vaddr);
- if (ret) {
- ret = -EFAULT;
- goto err;
- }
- size -= to_copy;
- if (!size)
- break;
- ptr += to_copy;
- offset = 0;
- }
-
-err:
- dma_buf_unmap_attachment(attach, table, dir);
- dma_buf_detach(dma_buf, attach);
- return ret;
-}
-
-static int ion_handle_test_kernel(struct dma_buf *dma_buf, void __user *ptr,
- size_t offset, size_t size, bool write)
-{
- int ret;
- unsigned long page_offset = offset >> PAGE_SHIFT;
- size_t copy_offset = offset % PAGE_SIZE;
- size_t copy_size = size;
- enum dma_data_direction dir = write ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
-
- if (offset > dma_buf->size || size > dma_buf->size - offset)
- return -EINVAL;
-
- ret = dma_buf_begin_cpu_access(dma_buf, offset, size, dir);
- if (ret)
- return ret;
-
- while (copy_size > 0) {
- size_t to_copy;
- void *vaddr = dma_buf_kmap(dma_buf, page_offset);
-
- if (!vaddr)
- goto err;
-
- to_copy = min_t(size_t, PAGE_SIZE - copy_offset, copy_size);
-
- if (write)
- ret = copy_from_user(vaddr + copy_offset, ptr, to_copy);
- else
- ret = copy_to_user(ptr, vaddr + copy_offset, to_copy);
-
- dma_buf_kunmap(dma_buf, page_offset, vaddr);
- if (ret) {
- ret = -EFAULT;
- goto err;
- }
-
- copy_size -= to_copy;
- ptr += to_copy;
- page_offset++;
- copy_offset = 0;
- }
-err:
- dma_buf_end_cpu_access(dma_buf, offset, size, dir);
- return ret;
-}
-
-static long ion_test_ioctl(struct file *filp, unsigned int cmd,
- unsigned long arg)
-{
- struct ion_test_data *test_data = filp->private_data;
- int ret = 0;
-
- union {
- struct ion_test_rw_data test_rw;
- } data;
-
- if (_IOC_SIZE(cmd) > sizeof(data))
- return -EINVAL;
-
- if (_IOC_DIR(cmd) & _IOC_WRITE)
- if (copy_from_user(&data, (void __user *)arg, _IOC_SIZE(cmd)))
- return -EFAULT;
-
- switch (cmd) {
- case ION_IOC_TEST_SET_FD:
- {
- struct dma_buf *dma_buf = NULL;
- int fd = arg;
-
- if (fd >= 0) {
- dma_buf = dma_buf_get((int)arg);
- if (IS_ERR(dma_buf))
- return PTR_ERR(dma_buf);
- }
- if (test_data->dma_buf)
- dma_buf_put(test_data->dma_buf);
- test_data->dma_buf = dma_buf;
- break;
- }
- case ION_IOC_TEST_DMA_MAPPING:
- {
- ret = ion_handle_test_dma(test_data->dev, test_data->dma_buf,
- u64_to_uptr(data.test_rw.ptr),
- data.test_rw.offset, data.test_rw.size,
- data.test_rw.write);
- break;
- }
- case ION_IOC_TEST_KERNEL_MAPPING:
- {
- ret = ion_handle_test_kernel(test_data->dma_buf,
- u64_to_uptr(data.test_rw.ptr),
- data.test_rw.offset, data.test_rw.size,
- data.test_rw.write);
- break;
- }
- default:
- return -ENOTTY;
- }
-
- if (_IOC_DIR(cmd) & _IOC_READ) {
- if (copy_to_user((void __user *)arg, &data, sizeof(data)))
- return -EFAULT;
- }
- return ret;
-}
-
-static int ion_test_open(struct inode *inode, struct file *file)
-{
- struct ion_test_data *data;
- struct miscdevice *miscdev = file->private_data;
-
- data = kzalloc(sizeof(struct ion_test_data), GFP_KERNEL);
- if (!data)
- return -ENOMEM;
-
- data->dev = miscdev->parent;
-
- file->private_data = data;
-
- return 0;
-}
-
-static int ion_test_release(struct inode *inode, struct file *file)
-{
- struct ion_test_data *data = file->private_data;
-
- kfree(data);
-
- return 0;
-}
-
-static const struct file_operations ion_test_fops = {
- .owner = THIS_MODULE,
- .unlocked_ioctl = ion_test_ioctl,
- .compat_ioctl = ion_test_ioctl,
- .open = ion_test_open,
- .release = ion_test_release,
-};
-
-static int __init ion_test_probe(struct platform_device *pdev)
-{
- int ret;
- struct ion_test_device *testdev;
-
- testdev = devm_kzalloc(&pdev->dev, sizeof(struct ion_test_device),
- GFP_KERNEL);
- if (!testdev)
- return -ENOMEM;
-
- testdev->misc.minor = MISC_DYNAMIC_MINOR;
- testdev->misc.name = "ion-test";
- testdev->misc.fops = &ion_test_fops;
- testdev->misc.parent = &pdev->dev;
- ret = misc_register(&testdev->misc);
- if (ret) {
- pr_err("failed to register misc device.\n");
- return ret;
- }
-
- platform_set_drvdata(pdev, testdev);
-
- return 0;
-}
-
-static int ion_test_remove(struct platform_device *pdev)
-{
- struct ion_test_device *testdev;
-
- testdev = platform_get_drvdata(pdev);
- if (!testdev)
- return -ENODATA;
-
- misc_deregister(&testdev->misc);
- return 0;
-}
-
-static struct platform_device *ion_test_pdev;
-static struct platform_driver ion_test_platform_driver = {
- .remove = ion_test_remove,
- .driver = {
- .name = "ion-test",
- },
-};
-
-static int __init ion_test_init(void)
-{
- ion_test_pdev = platform_device_register_simple("ion-test",
- -1, NULL, 0);
- if (!ion_test_pdev)
- return -ENODEV;
-
- return platform_driver_probe(&ion_test_platform_driver, ion_test_probe);
-}
-
-static void __exit ion_test_exit(void)
-{
- platform_driver_unregister(&ion_test_platform_driver);
- platform_device_unregister(ion_test_pdev);
-}
-
-module_init(ion_test_init);
-module_exit(ion_test_exit);
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/android/ion/tegra/Makefile b/drivers/staging/android/ion/tegra/Makefile
deleted file mode 100644
index 11cd003fb08f..000000000000
--- a/drivers/staging/android/ion/tegra/Makefile
+++ /dev/null
@@ -1 +0,0 @@
-obj-y += tegra_ion.o
diff --git a/drivers/staging/android/ion/tegra/tegra_ion.c b/drivers/staging/android/ion/tegra/tegra_ion.c
deleted file mode 100644
index 4d3c516cc15e..000000000000
--- a/drivers/staging/android/ion/tegra/tegra_ion.c
+++ /dev/null
@@ -1,81 +0,0 @@
-/*
- * drivers/gpu/tegra/tegra_ion.c
- *
- * Copyright (C) 2011 Google, Inc.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#include <linux/err.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/slab.h>
-#include "../ion.h"
-#include "../ion_priv.h"
-
-static struct ion_device *idev;
-static int num_heaps;
-static struct ion_heap **heaps;
-
-static int tegra_ion_probe(struct platform_device *pdev)
-{
- struct ion_platform_data *pdata = pdev->dev.platform_data;
- int err;
- int i;
-
- num_heaps = pdata->nr;
-
- heaps = devm_kzalloc(&pdev->dev,
- sizeof(struct ion_heap *) * pdata->nr,
- GFP_KERNEL);
-
- idev = ion_device_create(NULL);
- if (IS_ERR_OR_NULL(idev))
- return PTR_ERR(idev);
-
- /* create the heaps as specified in the board file */
- for (i = 0; i < num_heaps; i++) {
- struct ion_platform_heap *heap_data = &pdata->heaps[i];
-
- heaps[i] = ion_heap_create(heap_data);
- if (IS_ERR_OR_NULL(heaps[i])) {
- err = PTR_ERR(heaps[i]);
- goto err;
- }
- ion_device_add_heap(idev, heaps[i]);
- }
- platform_set_drvdata(pdev, idev);
- return 0;
-err:
- for (i = 0; i < num_heaps; ++i)
- ion_heap_destroy(heaps[i]);
- return err;
-}
-
-static int tegra_ion_remove(struct platform_device *pdev)
-{
- struct ion_device *idev = platform_get_drvdata(pdev);
- int i;
-
- ion_device_destroy(idev);
- for (i = 0; i < num_heaps; i++)
- ion_heap_destroy(heaps[i]);
- return 0;
-}
-
-static struct platform_driver ion_driver = {
- .probe = tegra_ion_probe,
- .remove = tegra_ion_remove,
- .driver = { .name = "ion-tegra" }
-};
-
-module_platform_driver(ion_driver);
-
diff --git a/drivers/staging/android/lowmemorykiller.c b/drivers/staging/android/lowmemorykiller.c
deleted file mode 100644
index 872bd603fd0d..000000000000
--- a/drivers/staging/android/lowmemorykiller.c
+++ /dev/null
@@ -1,214 +0,0 @@
-/* drivers/misc/lowmemorykiller.c
- *
- * The lowmemorykiller driver lets user-space specify a set of memory thresholds
- * where processes with a range of oom_score_adj values will get killed. Specify
- * the minimum oom_score_adj values in
- * /sys/module/lowmemorykiller/parameters/adj and the number of free pages in
- * /sys/module/lowmemorykiller/parameters/minfree. Both files take a comma
- * separated list of numbers in ascending order.
- *
- * For example, write "0,8" to /sys/module/lowmemorykiller/parameters/adj and
- * "1024,4096" to /sys/module/lowmemorykiller/parameters/minfree to kill
- * processes with a oom_score_adj value of 8 or higher when the free memory
- * drops below 4096 pages and kill processes with a oom_score_adj value of 0 or
- * higher when the free memory drops below 1024 pages.
- *
- * The driver considers memory used for caches to be free, but if a large
- * percentage of the cached memory is locked this can be very inaccurate
- * and processes may not get killed until the normal oom killer is triggered.
- *
- * Copyright (C) 2007-2008 Google, Inc.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/mm.h>
-#include <linux/oom.h>
-#include <linux/sched.h>
-#include <linux/swap.h>
-#include <linux/rcupdate.h>
-#include <linux/profile.h>
-#include <linux/notifier.h>
-
-static uint32_t lowmem_debug_level = 1;
-static short lowmem_adj[6] = {
- 0,
- 1,
- 6,
- 12,
-};
-static int lowmem_adj_size = 4;
-static int lowmem_minfree[6] = {
- 3 * 512, /* 6MB */
- 2 * 1024, /* 8MB */
- 4 * 1024, /* 16MB */
- 16 * 1024, /* 64MB */
-};
-static int lowmem_minfree_size = 4;
-
-static unsigned long lowmem_deathpending_timeout;
-
-#define lowmem_print(level, x...) \
- do { \
- if (lowmem_debug_level >= (level)) \
- pr_info(x); \
- } while (0)
-
-static unsigned long lowmem_count(struct shrinker *s,
- struct shrink_control *sc)
-{
- return global_page_state(NR_ACTIVE_ANON) +
- global_page_state(NR_ACTIVE_FILE) +
- global_page_state(NR_INACTIVE_ANON) +
- global_page_state(NR_INACTIVE_FILE);
-}
-
-static unsigned long lowmem_scan(struct shrinker *s, struct shrink_control *sc)
-{
- struct task_struct *tsk;
- struct task_struct *selected = NULL;
- unsigned long rem = 0;
- int tasksize;
- int i;
- short min_score_adj = OOM_SCORE_ADJ_MAX + 1;
- int selected_tasksize = 0;
- short selected_oom_score_adj;
- int array_size = ARRAY_SIZE(lowmem_adj);
- int other_free = global_page_state(NR_FREE_PAGES) - totalreserve_pages;
- int other_file = global_page_state(NR_FILE_PAGES) -
- global_page_state(NR_SHMEM) -
- total_swapcache_pages();
-
- if (lowmem_adj_size < array_size)
- array_size = lowmem_adj_size;
- if (lowmem_minfree_size < array_size)
- array_size = lowmem_minfree_size;
- for (i = 0; i < array_size; i++) {
- if (other_free < lowmem_minfree[i] &&
- other_file < lowmem_minfree[i]) {
- min_score_adj = lowmem_adj[i];
- break;
- }
- }
-
- lowmem_print(3, "lowmem_scan %lu, %x, ofree %d %d, ma %hd\n",
- sc->nr_to_scan, sc->gfp_mask, other_free,
- other_file, min_score_adj);
-
- if (min_score_adj == OOM_SCORE_ADJ_MAX + 1) {
- lowmem_print(5, "lowmem_scan %lu, %x, return 0\n",
- sc->nr_to_scan, sc->gfp_mask);
- return 0;
- }
-
- selected_oom_score_adj = min_score_adj;
-
- rcu_read_lock();
- for_each_process(tsk) {
- struct task_struct *p;
- short oom_score_adj;
-
- if (tsk->flags & PF_KTHREAD)
- continue;
-
- p = find_lock_task_mm(tsk);
- if (!p)
- continue;
-
- if (test_tsk_thread_flag(p, TIF_MEMDIE) &&
- time_before_eq(jiffies, lowmem_deathpending_timeout)) {
- task_unlock(p);
- rcu_read_unlock();
- return 0;
- }
- oom_score_adj = p->signal->oom_score_adj;
- if (oom_score_adj < min_score_adj) {
- task_unlock(p);
- continue;
- }
- tasksize = get_mm_rss(p->mm);
- task_unlock(p);
- if (tasksize <= 0)
- continue;
- if (selected) {
- if (oom_score_adj < selected_oom_score_adj)
- continue;
- if (oom_score_adj == selected_oom_score_adj &&
- tasksize <= selected_tasksize)
- continue;
- }
- selected = p;
- selected_tasksize = tasksize;
- selected_oom_score_adj = oom_score_adj;
- lowmem_print(2, "select %d (%s), adj %hd, size %d, to kill\n",
- p->pid, p->comm, oom_score_adj, tasksize);
- }
- if (selected) {
- task_lock(selected);
- if (!selected->mm) {
- /* Already exited, cannot do mark_tsk_oom_victim() */
- task_unlock(selected);
- goto out;
- }
- /*
- * FIXME: lowmemorykiller shouldn't abuse global OOM killer
- * infrastructure. There is no real reason why the selected
- * task should have access to the memory reserves.
- */
- mark_oom_victim(selected);
- task_unlock(selected);
- lowmem_print(1, "send sigkill to %d (%s), adj %hd, size %d\n",
- selected->pid, selected->comm,
- selected_oom_score_adj, selected_tasksize);
- lowmem_deathpending_timeout = jiffies + HZ;
- send_sig(SIGKILL, selected, 0);
- rem += selected_tasksize;
- }
-out:
- lowmem_print(4, "lowmem_scan %lu, %x, return %lu\n",
- sc->nr_to_scan, sc->gfp_mask, rem);
- rcu_read_unlock();
- return rem;
-}
-
-static struct shrinker lowmem_shrinker = {
- .scan_objects = lowmem_scan,
- .count_objects = lowmem_count,
- .seeks = DEFAULT_SEEKS * 16
-};
-
-static int __init lowmem_init(void)
-{
- register_shrinker(&lowmem_shrinker);
- return 0;
-}
-
-static void __exit lowmem_exit(void)
-{
- unregister_shrinker(&lowmem_shrinker);
-}
-
-module_param_named(cost, lowmem_shrinker.seeks, int, S_IRUGO | S_IWUSR);
-module_param_array_named(adj, lowmem_adj, short, &lowmem_adj_size,
- S_IRUGO | S_IWUSR);
-module_param_array_named(minfree, lowmem_minfree, uint, &lowmem_minfree_size,
- S_IRUGO | S_IWUSR);
-module_param_named(debug_level, lowmem_debug_level, uint, S_IRUGO | S_IWUSR);
-
-module_init(lowmem_init);
-module_exit(lowmem_exit);
-
-MODULE_LICENSE("GPL");
-
diff --git a/drivers/staging/android/sw_sync.c b/drivers/staging/android/sw_sync.c
deleted file mode 100644
index 29b5c3567180..000000000000
--- a/drivers/staging/android/sw_sync.c
+++ /dev/null
@@ -1,267 +0,0 @@
-/*
- * drivers/base/sw_sync.c
- *
- * Copyright (C) 2012 Google, Inc.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#include <linux/kernel.h>
-#include <linux/export.h>
-#include <linux/file.h>
-#include <linux/fs.h>
-#include <linux/miscdevice.h>
-#include <linux/module.h>
-#include <linux/syscalls.h>
-#include <linux/uaccess.h>
-
-#include "sw_sync.h"
-
-static int sw_sync_cmp(u32 a, u32 b)
-{
- if (a == b)
- return 0;
-
- return ((s32)a - (s32)b) < 0 ? -1 : 1;
-}
-
-struct sync_pt *sw_sync_pt_create(struct sw_sync_timeline *obj, u32 value)
-{
- struct sw_sync_pt *pt;
-
- pt = (struct sw_sync_pt *)
- sync_pt_create(&obj->obj, sizeof(struct sw_sync_pt));
-
- pt->value = value;
-
- return (struct sync_pt *)pt;
-}
-EXPORT_SYMBOL(sw_sync_pt_create);
-
-static struct sync_pt *sw_sync_pt_dup(struct sync_pt *sync_pt)
-{
- struct sw_sync_pt *pt = (struct sw_sync_pt *)sync_pt;
- struct sw_sync_timeline *obj =
- (struct sw_sync_timeline *)sync_pt_parent(sync_pt);
-
- return (struct sync_pt *)sw_sync_pt_create(obj, pt->value);
-}
-
-static int sw_sync_pt_has_signaled(struct sync_pt *sync_pt)
-{
- struct sw_sync_pt *pt = (struct sw_sync_pt *)sync_pt;
- struct sw_sync_timeline *obj =
- (struct sw_sync_timeline *)sync_pt_parent(sync_pt);
-
- return sw_sync_cmp(obj->value, pt->value) >= 0;
-}
-
-static int sw_sync_pt_compare(struct sync_pt *a, struct sync_pt *b)
-{
- struct sw_sync_pt *pt_a = (struct sw_sync_pt *)a;
- struct sw_sync_pt *pt_b = (struct sw_sync_pt *)b;
-
- return sw_sync_cmp(pt_a->value, pt_b->value);
-}
-
-static int sw_sync_fill_driver_data(struct sync_pt *sync_pt,
- void *data, int size)
-{
- struct sw_sync_pt *pt = (struct sw_sync_pt *)sync_pt;
-
- if (size < sizeof(pt->value))
- return -ENOMEM;
-
- memcpy(data, &pt->value, sizeof(pt->value));
-
- return sizeof(pt->value);
-}
-
-static void sw_sync_timeline_value_str(struct sync_timeline *sync_timeline,
- char *str, int size)
-{
- struct sw_sync_timeline *timeline =
- (struct sw_sync_timeline *)sync_timeline;
- snprintf(str, size, "%d", timeline->value);
-}
-
-static void sw_sync_pt_value_str(struct sync_pt *sync_pt,
- char *str, int size)
-{
- struct sw_sync_pt *pt = (struct sw_sync_pt *)sync_pt;
-
- snprintf(str, size, "%d", pt->value);
-}
-
-static struct sync_timeline_ops sw_sync_timeline_ops = {
- .driver_name = "sw_sync",
- .dup = sw_sync_pt_dup,
- .has_signaled = sw_sync_pt_has_signaled,
- .compare = sw_sync_pt_compare,
- .fill_driver_data = sw_sync_fill_driver_data,
- .timeline_value_str = sw_sync_timeline_value_str,
- .pt_value_str = sw_sync_pt_value_str,
-};
-
-struct sw_sync_timeline *sw_sync_timeline_create(const char *name)
-{
- struct sw_sync_timeline *obj = (struct sw_sync_timeline *)
- sync_timeline_create(&sw_sync_timeline_ops,
- sizeof(struct sw_sync_timeline),
- name);
-
- return obj;
-}
-EXPORT_SYMBOL(sw_sync_timeline_create);
-
-void sw_sync_timeline_inc(struct sw_sync_timeline *obj, u32 inc)
-{
- obj->value += inc;
-
- sync_timeline_signal(&obj->obj);
-}
-EXPORT_SYMBOL(sw_sync_timeline_inc);
-
-#ifdef CONFIG_SW_SYNC_USER
-/* *WARNING*
- *
- * improper use of this can result in deadlocking kernel drivers from userspace.
- */
-
-/* opening sw_sync create a new sync obj */
-static int sw_sync_open(struct inode *inode, struct file *file)
-{
- struct sw_sync_timeline *obj;
- char task_comm[TASK_COMM_LEN];
-
- get_task_comm(task_comm, current);
-
- obj = sw_sync_timeline_create(task_comm);
- if (!obj)
- return -ENOMEM;
-
- file->private_data = obj;
-
- return 0;
-}
-
-static int sw_sync_release(struct inode *inode, struct file *file)
-{
- struct sw_sync_timeline *obj = file->private_data;
-
- sync_timeline_destroy(&obj->obj);
- return 0;
-}
-
-static long sw_sync_ioctl_create_fence(struct sw_sync_timeline *obj,
- unsigned long arg)
-{
- int fd = get_unused_fd_flags(O_CLOEXEC);
- int err;
- struct sync_pt *pt;
- struct sync_fence *fence;
- struct sw_sync_create_fence_data data;
-
- if (fd < 0)
- return fd;
-
- if (copy_from_user(&data, (void __user *)arg, sizeof(data))) {
- err = -EFAULT;
- goto err;
- }
-
- pt = sw_sync_pt_create(obj, data.value);
- if (!pt) {
- err = -ENOMEM;
- goto err;
- }
-
- data.name[sizeof(data.name) - 1] = '\0';
- fence = sync_fence_create(data.name, pt);
- if (!fence) {
- sync_pt_free(pt);
- err = -ENOMEM;
- goto err;
- }
-
- data.fence = fd;
- if (copy_to_user((void __user *)arg, &data, sizeof(data))) {
- sync_fence_put(fence);
- err = -EFAULT;
- goto err;
- }
-
- sync_fence_install(fence, fd);
-
- return 0;
-
-err:
- put_unused_fd(fd);
- return err;
-}
-
-static long sw_sync_ioctl_inc(struct sw_sync_timeline *obj, unsigned long arg)
-{
- u32 value;
-
- if (copy_from_user(&value, (void __user *)arg, sizeof(value)))
- return -EFAULT;
-
- sw_sync_timeline_inc(obj, value);
-
- return 0;
-}
-
-static long sw_sync_ioctl(struct file *file, unsigned int cmd,
- unsigned long arg)
-{
- struct sw_sync_timeline *obj = file->private_data;
-
- switch (cmd) {
- case SW_SYNC_IOC_CREATE_FENCE:
- return sw_sync_ioctl_create_fence(obj, arg);
-
- case SW_SYNC_IOC_INC:
- return sw_sync_ioctl_inc(obj, arg);
-
- default:
- return -ENOTTY;
- }
-}
-
-static const struct file_operations sw_sync_fops = {
- .owner = THIS_MODULE,
- .open = sw_sync_open,
- .release = sw_sync_release,
- .unlocked_ioctl = sw_sync_ioctl,
- .compat_ioctl = sw_sync_ioctl,
-};
-
-static struct miscdevice sw_sync_dev = {
- .minor = MISC_DYNAMIC_MINOR,
- .name = "sw_sync",
- .fops = &sw_sync_fops,
-};
-
-static int __init sw_sync_device_init(void)
-{
- return misc_register(&sw_sync_dev);
-}
-
-static void __exit sw_sync_device_remove(void)
-{
- misc_deregister(&sw_sync_dev);
-}
-
-module_init(sw_sync_device_init);
-module_exit(sw_sync_device_remove);
-
-#endif /* CONFIG_SW_SYNC_USER */
diff --git a/drivers/staging/android/sw_sync.h b/drivers/staging/android/sw_sync.h
deleted file mode 100644
index c87ae9ebf267..000000000000
--- a/drivers/staging/android/sw_sync.h
+++ /dev/null
@@ -1,59 +0,0 @@
-/*
- * include/linux/sw_sync.h
- *
- * Copyright (C) 2012 Google, Inc.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#ifndef _LINUX_SW_SYNC_H
-#define _LINUX_SW_SYNC_H
-
-#include <linux/types.h>
-#include <linux/kconfig.h>
-#include "sync.h"
-#include "uapi/sw_sync.h"
-
-struct sw_sync_timeline {
- struct sync_timeline obj;
-
- u32 value;
-};
-
-struct sw_sync_pt {
- struct sync_pt pt;
-
- u32 value;
-};
-
-#if IS_ENABLED(CONFIG_SW_SYNC)
-struct sw_sync_timeline *sw_sync_timeline_create(const char *name);
-void sw_sync_timeline_inc(struct sw_sync_timeline *obj, u32 inc);
-
-struct sync_pt *sw_sync_pt_create(struct sw_sync_timeline *obj, u32 value);
-#else
-static inline struct sw_sync_timeline *sw_sync_timeline_create(const char *name)
-{
- return NULL;
-}
-
-static inline void sw_sync_timeline_inc(struct sw_sync_timeline *obj, u32 inc)
-{
-}
-
-static inline struct sync_pt *sw_sync_pt_create(struct sw_sync_timeline *obj,
- u32 value)
-{
- return NULL;
-}
-#endif /* IS_ENABLED(CONFIG_SW_SYNC) */
-
-#endif /* _LINUX_SW_SYNC_H */
diff --git a/drivers/staging/android/sync.c b/drivers/staging/android/sync.c
deleted file mode 100644
index f83e00c78051..000000000000
--- a/drivers/staging/android/sync.c
+++ /dev/null
@@ -1,729 +0,0 @@
-/*
- * drivers/base/sync.c
- *
- * Copyright (C) 2012 Google, Inc.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#include <linux/debugfs.h>
-#include <linux/export.h>
-#include <linux/file.h>
-#include <linux/fs.h>
-#include <linux/kernel.h>
-#include <linux/poll.h>
-#include <linux/sched.h>
-#include <linux/seq_file.h>
-#include <linux/slab.h>
-#include <linux/uaccess.h>
-#include <linux/anon_inodes.h>
-
-#include "sync.h"
-
-#define CREATE_TRACE_POINTS
-#include "trace/sync.h"
-
-static const struct fence_ops android_fence_ops;
-static const struct file_operations sync_fence_fops;
-
-struct sync_timeline *sync_timeline_create(const struct sync_timeline_ops *ops,
- int size, const char *name)
-{
- struct sync_timeline *obj;
-
- if (size < sizeof(struct sync_timeline))
- return NULL;
-
- obj = kzalloc(size, GFP_KERNEL);
- if (obj == NULL)
- return NULL;
-
- kref_init(&obj->kref);
- obj->ops = ops;
- obj->context = fence_context_alloc(1);
- strlcpy(obj->name, name, sizeof(obj->name));
-
- INIT_LIST_HEAD(&obj->child_list_head);
- INIT_LIST_HEAD(&obj->active_list_head);
- spin_lock_init(&obj->child_list_lock);
-
- sync_timeline_debug_add(obj);
-
- return obj;
-}
-EXPORT_SYMBOL(sync_timeline_create);
-
-static void sync_timeline_free(struct kref *kref)
-{
- struct sync_timeline *obj =
- container_of(kref, struct sync_timeline, kref);
-
- sync_timeline_debug_remove(obj);
-
- if (obj->ops->release_obj)
- obj->ops->release_obj(obj);
-
- kfree(obj);
-}
-
-static void sync_timeline_get(struct sync_timeline *obj)
-{
- kref_get(&obj->kref);
-}
-
-static void sync_timeline_put(struct sync_timeline *obj)
-{
- kref_put(&obj->kref, sync_timeline_free);
-}
-
-void sync_timeline_destroy(struct sync_timeline *obj)
-{
- obj->destroyed = true;
- /*
- * Ensure timeline is marked as destroyed before
- * changing timeline's fences status.
- */
- smp_wmb();
-
- /*
- * signal any children that their parent is going away.
- */
- sync_timeline_signal(obj);
- sync_timeline_put(obj);
-}
-EXPORT_SYMBOL(sync_timeline_destroy);
-
-void sync_timeline_signal(struct sync_timeline *obj)
-{
- unsigned long flags;
- LIST_HEAD(signaled_pts);
- struct sync_pt *pt, *next;
-
- trace_sync_timeline(obj);
-
- spin_lock_irqsave(&obj->child_list_lock, flags);
-
- list_for_each_entry_safe(pt, next, &obj->active_list_head,
- active_list) {
- if (fence_is_signaled_locked(&pt->base))
- list_del_init(&pt->active_list);
- }
-
- spin_unlock_irqrestore(&obj->child_list_lock, flags);
-}
-EXPORT_SYMBOL(sync_timeline_signal);
-
-struct sync_pt *sync_pt_create(struct sync_timeline *obj, int size)
-{
- unsigned long flags;
- struct sync_pt *pt;
-
- if (size < sizeof(struct sync_pt))
- return NULL;
-
- pt = kzalloc(size, GFP_KERNEL);
- if (pt == NULL)
- return NULL;
-
- spin_lock_irqsave(&obj->child_list_lock, flags);
- sync_timeline_get(obj);
- fence_init(&pt->base, &android_fence_ops, &obj->child_list_lock,
- obj->context, ++obj->value);
- list_add_tail(&pt->child_list, &obj->child_list_head);
- INIT_LIST_HEAD(&pt->active_list);
- spin_unlock_irqrestore(&obj->child_list_lock, flags);
- return pt;
-}
-EXPORT_SYMBOL(sync_pt_create);
-
-void sync_pt_free(struct sync_pt *pt)
-{
- fence_put(&pt->base);
-}
-EXPORT_SYMBOL(sync_pt_free);
-
-static struct sync_fence *sync_fence_alloc(int size, const char *name)
-{
- struct sync_fence *fence;
-
- fence = kzalloc(size, GFP_KERNEL);
- if (fence == NULL)
- return NULL;
-
- fence->file = anon_inode_getfile("sync_fence", &sync_fence_fops,
- fence, 0);
- if (IS_ERR(fence->file))
- goto err;
-
- kref_init(&fence->kref);
- strlcpy(fence->name, name, sizeof(fence->name));
-
- init_waitqueue_head(&fence->wq);
-
- return fence;
-
-err:
- kfree(fence);
- return NULL;
-}
-
-static void fence_check_cb_func(struct fence *f, struct fence_cb *cb)
-{
- struct sync_fence_cb *check;
- struct sync_fence *fence;
-
- check = container_of(cb, struct sync_fence_cb, cb);
- fence = check->fence;
-
- if (atomic_dec_and_test(&fence->status))
- wake_up_all(&fence->wq);
-}
-
-/* TODO: implement a create which takes more that one sync_pt */
-struct sync_fence *sync_fence_create(const char *name, struct sync_pt *pt)
-{
- struct sync_fence *fence;
-
- fence = sync_fence_alloc(offsetof(struct sync_fence, cbs[1]), name);
- if (fence == NULL)
- return NULL;
-
- fence->num_fences = 1;
- atomic_set(&fence->status, 1);
-
- fence->cbs[0].sync_pt = &pt->base;
- fence->cbs[0].fence = fence;
- if (fence_add_callback(&pt->base, &fence->cbs[0].cb,
- fence_check_cb_func))
- atomic_dec(&fence->status);
-
- sync_fence_debug_add(fence);
-
- return fence;
-}
-EXPORT_SYMBOL(sync_fence_create);
-
-struct sync_fence *sync_fence_fdget(int fd)
-{
- struct file *file = fget(fd);
-
- if (file == NULL)
- return NULL;
-
- if (file->f_op != &sync_fence_fops)
- goto err;
-
- return file->private_data;
-
-err:
- fput(file);
- return NULL;
-}
-EXPORT_SYMBOL(sync_fence_fdget);
-
-void sync_fence_put(struct sync_fence *fence)
-{
- fput(fence->file);
-}
-EXPORT_SYMBOL(sync_fence_put);
-
-void sync_fence_install(struct sync_fence *fence, int fd)
-{
- fd_install(fd, fence->file);
-}
-EXPORT_SYMBOL(sync_fence_install);
-
-static void sync_fence_add_pt(struct sync_fence *fence,
- int *i, struct fence *pt)
-{
- fence->cbs[*i].sync_pt = pt;
- fence->cbs[*i].fence = fence;
-
- if (!fence_add_callback(pt, &fence->cbs[*i].cb, fence_check_cb_func)) {
- fence_get(pt);
- (*i)++;
- }
-}
-
-struct sync_fence *sync_fence_merge(const char *name,
- struct sync_fence *a, struct sync_fence *b)
-{
- int num_fences = a->num_fences + b->num_fences;
- struct sync_fence *fence;
- int i, i_a, i_b;
- unsigned long size = offsetof(struct sync_fence, cbs[num_fences]);
-
- fence = sync_fence_alloc(size, name);
- if (fence == NULL)
- return NULL;
-
- atomic_set(&fence->status, num_fences);
-
- /*
- * Assume sync_fence a and b are both ordered and have no
- * duplicates with the same context.
- *
- * If a sync_fence can only be created with sync_fence_merge
- * and sync_fence_create, this is a reasonable assumption.
- */
- for (i = i_a = i_b = 0; i_a < a->num_fences && i_b < b->num_fences; ) {
- struct fence *pt_a = a->cbs[i_a].sync_pt;
- struct fence *pt_b = b->cbs[i_b].sync_pt;
-
- if (pt_a->context < pt_b->context) {
- sync_fence_add_pt(fence, &i, pt_a);
-
- i_a++;
- } else if (pt_a->context > pt_b->context) {
- sync_fence_add_pt(fence, &i, pt_b);
-
- i_b++;
- } else {
- if (pt_a->seqno - pt_b->seqno <= INT_MAX)
- sync_fence_add_pt(fence, &i, pt_a);
- else
- sync_fence_add_pt(fence, &i, pt_b);
-
- i_a++;
- i_b++;
- }
- }
-
- for (; i_a < a->num_fences; i_a++)
- sync_fence_add_pt(fence, &i, a->cbs[i_a].sync_pt);
-
- for (; i_b < b->num_fences; i_b++)
- sync_fence_add_pt(fence, &i, b->cbs[i_b].sync_pt);
-
- if (num_fences > i)
- atomic_sub(num_fences - i, &fence->status);
- fence->num_fences = i;
-
- sync_fence_debug_add(fence);
- return fence;
-}
-EXPORT_SYMBOL(sync_fence_merge);
-
-int sync_fence_wake_up_wq(wait_queue_t *curr, unsigned mode,
- int wake_flags, void *key)
-{
- struct sync_fence_waiter *wait;
-
- wait = container_of(curr, struct sync_fence_waiter, work);
- list_del_init(&wait->work.task_list);
-
- wait->callback(wait->work.private, wait);
- return 1;
-}
-
-int sync_fence_wait_async(struct sync_fence *fence,
- struct sync_fence_waiter *waiter)
-{
- int err = atomic_read(&fence->status);
- unsigned long flags;
-
- if (err < 0)
- return err;
-
- if (!err)
- return 1;
-
- init_waitqueue_func_entry(&waiter->work, sync_fence_wake_up_wq);
- waiter->work.private = fence;
-
- spin_lock_irqsave(&fence->wq.lock, flags);
- err = atomic_read(&fence->status);
- if (err > 0)
- __add_wait_queue_tail(&fence->wq, &waiter->work);
- spin_unlock_irqrestore(&fence->wq.lock, flags);
-
- if (err < 0)
- return err;
-
- return !err;
-}
-EXPORT_SYMBOL(sync_fence_wait_async);
-
-int sync_fence_cancel_async(struct sync_fence *fence,
- struct sync_fence_waiter *waiter)
-{
- unsigned long flags;
- int ret = 0;
-
- spin_lock_irqsave(&fence->wq.lock, flags);
- if (!list_empty(&waiter->work.task_list))
- list_del_init(&waiter->work.task_list);
- else
- ret = -ENOENT;
- spin_unlock_irqrestore(&fence->wq.lock, flags);
- return ret;
-}
-EXPORT_SYMBOL(sync_fence_cancel_async);
-
-int sync_fence_wait(struct sync_fence *fence, long timeout)
-{
- long ret;
- int i;
-
- if (timeout < 0)
- timeout = MAX_SCHEDULE_TIMEOUT;
- else
- timeout = msecs_to_jiffies(timeout);
-
- trace_sync_wait(fence, 1);
- for (i = 0; i < fence->num_fences; ++i)
- trace_sync_pt(fence->cbs[i].sync_pt);
- ret = wait_event_interruptible_timeout(fence->wq,
- atomic_read(&fence->status) <= 0,
- timeout);
- trace_sync_wait(fence, 0);
-
- if (ret < 0) {
- return ret;
- } else if (ret == 0) {
- if (timeout) {
- pr_info("fence timeout on [%p] after %dms\n", fence,
- jiffies_to_msecs(timeout));
- sync_dump();
- }
- return -ETIME;
- }
-
- ret = atomic_read(&fence->status);
- if (ret) {
- pr_info("fence error %ld on [%p]\n", ret, fence);
- sync_dump();
- }
- return ret;
-}
-EXPORT_SYMBOL(sync_fence_wait);
-
-static const char *android_fence_get_driver_name(struct fence *fence)
-{
- struct sync_pt *pt = container_of(fence, struct sync_pt, base);
- struct sync_timeline *parent = sync_pt_parent(pt);
-
- return parent->ops->driver_name;
-}
-
-static const char *android_fence_get_timeline_name(struct fence *fence)
-{
- struct sync_pt *pt = container_of(fence, struct sync_pt, base);
- struct sync_timeline *parent = sync_pt_parent(pt);
-
- return parent->name;
-}
-
-static void android_fence_release(struct fence *fence)
-{
- struct sync_pt *pt = container_of(fence, struct sync_pt, base);
- struct sync_timeline *parent = sync_pt_parent(pt);
- unsigned long flags;
-
- spin_lock_irqsave(fence->lock, flags);
- list_del(&pt->child_list);
- if (WARN_ON_ONCE(!list_empty(&pt->active_list)))
- list_del(&pt->active_list);
- spin_unlock_irqrestore(fence->lock, flags);
-
- if (parent->ops->free_pt)
- parent->ops->free_pt(pt);
-
- sync_timeline_put(parent);
- fence_free(&pt->base);
-}
-
-static bool android_fence_signaled(struct fence *fence)
-{
- struct sync_pt *pt = container_of(fence, struct sync_pt, base);
- struct sync_timeline *parent = sync_pt_parent(pt);
- int ret;
-
- ret = parent->ops->has_signaled(pt);
- if (ret < 0)
- fence->status = ret;
- return ret;
-}
-
-static bool android_fence_enable_signaling(struct fence *fence)
-{
- struct sync_pt *pt = container_of(fence, struct sync_pt, base);
- struct sync_timeline *parent = sync_pt_parent(pt);
-
- if (android_fence_signaled(fence))
- return false;
-
- list_add_tail(&pt->active_list, &parent->active_list_head);
- return true;
-}
-
-static int android_fence_fill_driver_data(struct fence *fence,
- void *data, int size)
-{
- struct sync_pt *pt = container_of(fence, struct sync_pt, base);
- struct sync_timeline *parent = sync_pt_parent(pt);
-
- if (!parent->ops->fill_driver_data)
- return 0;
- return parent->ops->fill_driver_data(pt, data, size);
-}
-
-static void android_fence_value_str(struct fence *fence,
- char *str, int size)
-{
- struct sync_pt *pt = container_of(fence, struct sync_pt, base);
- struct sync_timeline *parent = sync_pt_parent(pt);
-
- if (!parent->ops->pt_value_str) {
- if (size)
- *str = 0;
- return;
- }
- parent->ops->pt_value_str(pt, str, size);
-}
-
-static void android_fence_timeline_value_str(struct fence *fence,
- char *str, int size)
-{
- struct sync_pt *pt = container_of(fence, struct sync_pt, base);
- struct sync_timeline *parent = sync_pt_parent(pt);
-
- if (!parent->ops->timeline_value_str) {
- if (size)
- *str = 0;
- return;
- }
- parent->ops->timeline_value_str(parent, str, size);
-}
-
-static const struct fence_ops android_fence_ops = {
- .get_driver_name = android_fence_get_driver_name,
- .get_timeline_name = android_fence_get_timeline_name,
- .enable_signaling = android_fence_enable_signaling,
- .signaled = android_fence_signaled,
- .wait = fence_default_wait,
- .release = android_fence_release,
- .fill_driver_data = android_fence_fill_driver_data,
- .fence_value_str = android_fence_value_str,
- .timeline_value_str = android_fence_timeline_value_str,
-};
-
-static void sync_fence_free(struct kref *kref)
-{
- struct sync_fence *fence = container_of(kref, struct sync_fence, kref);
- int i, status = atomic_read(&fence->status);
-
- for (i = 0; i < fence->num_fences; ++i) {
- if (status)
- fence_remove_callback(fence->cbs[i].sync_pt,
- &fence->cbs[i].cb);
- fence_put(fence->cbs[i].sync_pt);
- }
-
- kfree(fence);
-}
-
-static int sync_fence_release(struct inode *inode, struct file *file)
-{
- struct sync_fence *fence = file->private_data;
-
- sync_fence_debug_remove(fence);
-
- kref_put(&fence->kref, sync_fence_free);
- return 0;
-}
-
-static unsigned int sync_fence_poll(struct file *file, poll_table *wait)
-{
- struct sync_fence *fence = file->private_data;
- int status;
-
- poll_wait(file, &fence->wq, wait);
-
- status = atomic_read(&fence->status);
-
- if (!status)
- return POLLIN;
- else if (status < 0)
- return POLLERR;
- return 0;
-}
-
-static long sync_fence_ioctl_wait(struct sync_fence *fence, unsigned long arg)
-{
- __s32 value;
-
- if (copy_from_user(&value, (void __user *)arg, sizeof(value)))
- return -EFAULT;
-
- return sync_fence_wait(fence, value);
-}
-
-static long sync_fence_ioctl_merge(struct sync_fence *fence, unsigned long arg)
-{
- int fd = get_unused_fd_flags(O_CLOEXEC);
- int err;
- struct sync_fence *fence2, *fence3;
- struct sync_merge_data data;
-
- if (fd < 0)
- return fd;
-
- if (copy_from_user(&data, (void __user *)arg, sizeof(data))) {
- err = -EFAULT;
- goto err_put_fd;
- }
-
- fence2 = sync_fence_fdget(data.fd2);
- if (fence2 == NULL) {
- err = -ENOENT;
- goto err_put_fd;
- }
-
- data.name[sizeof(data.name) - 1] = '\0';
- fence3 = sync_fence_merge(data.name, fence, fence2);
- if (fence3 == NULL) {
- err = -ENOMEM;
- goto err_put_fence2;
- }
-
- data.fence = fd;
- if (copy_to_user((void __user *)arg, &data, sizeof(data))) {
- err = -EFAULT;
- goto err_put_fence3;
- }
-
- sync_fence_install(fence3, fd);
- sync_fence_put(fence2);
- return 0;
-
-err_put_fence3:
- sync_fence_put(fence3);
-
-err_put_fence2:
- sync_fence_put(fence2);
-
-err_put_fd:
- put_unused_fd(fd);
- return err;
-}
-
-static int sync_fill_pt_info(struct fence *fence, void *data, int size)
-{
- struct sync_pt_info *info = data;
- int ret;
-
- if (size < sizeof(struct sync_pt_info))
- return -ENOMEM;
-
- info->len = sizeof(struct sync_pt_info);
-
- if (fence->ops->fill_driver_data) {
- ret = fence->ops->fill_driver_data(fence, info->driver_data,
- size - sizeof(*info));
- if (ret < 0)
- return ret;
-
- info->len += ret;
- }
-
- strlcpy(info->obj_name, fence->ops->get_timeline_name(fence),
- sizeof(info->obj_name));
- strlcpy(info->driver_name, fence->ops->get_driver_name(fence),
- sizeof(info->driver_name));
- if (fence_is_signaled(fence))
- info->status = fence->status >= 0 ? 1 : fence->status;
- else
- info->status = 0;
- info->timestamp_ns = ktime_to_ns(fence->timestamp);
-
- return info->len;
-}
-
-static long sync_fence_ioctl_fence_info(struct sync_fence *fence,
- unsigned long arg)
-{
- struct sync_fence_info_data *data;
- __u32 size;
- __u32 len = 0;
- int ret, i;
-
- if (copy_from_user(&size, (void __user *)arg, sizeof(size)))
- return -EFAULT;
-
- if (size < sizeof(struct sync_fence_info_data))
- return -EINVAL;
-
- if (size > 4096)
- size = 4096;
-
- data = kzalloc(size, GFP_KERNEL);
- if (data == NULL)
- return -ENOMEM;
-
- strlcpy(data->name, fence->name, sizeof(data->name));
- data->status = atomic_read(&fence->status);
- if (data->status >= 0)
- data->status = !data->status;
-
- len = sizeof(struct sync_fence_info_data);
-
- for (i = 0; i < fence->num_fences; ++i) {
- struct fence *pt = fence->cbs[i].sync_pt;
-
- ret = sync_fill_pt_info(pt, (u8 *)data + len, size - len);
-
- if (ret < 0)
- goto out;
-
- len += ret;
- }
-
- data->len = len;
-
- if (copy_to_user((void __user *)arg, data, len))
- ret = -EFAULT;
- else
- ret = 0;
-
-out:
- kfree(data);
-
- return ret;
-}
-
-static long sync_fence_ioctl(struct file *file, unsigned int cmd,
- unsigned long arg)
-{
- struct sync_fence *fence = file->private_data;
-
- switch (cmd) {
- case SYNC_IOC_WAIT:
- return sync_fence_ioctl_wait(fence, arg);
-
- case SYNC_IOC_MERGE:
- return sync_fence_ioctl_merge(fence, arg);
-
- case SYNC_IOC_FENCE_INFO:
- return sync_fence_ioctl_fence_info(fence, arg);
-
- default:
- return -ENOTTY;
- }
-}
-
-static const struct file_operations sync_fence_fops = {
- .release = sync_fence_release,
- .poll = sync_fence_poll,
- .unlocked_ioctl = sync_fence_ioctl,
- .compat_ioctl = sync_fence_ioctl,
-};
-
diff --git a/drivers/staging/android/sync.h b/drivers/staging/android/sync.h
deleted file mode 100644
index 61f8a3aede96..000000000000
--- a/drivers/staging/android/sync.h
+++ /dev/null
@@ -1,356 +0,0 @@
-/*
- * include/linux/sync.h
- *
- * Copyright (C) 2012 Google, Inc.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#ifndef _LINUX_SYNC_H
-#define _LINUX_SYNC_H
-
-#include <linux/types.h>
-#include <linux/kref.h>
-#include <linux/ktime.h>
-#include <linux/list.h>
-#include <linux/spinlock.h>
-#include <linux/wait.h>
-#include <linux/fence.h>
-
-#include "uapi/sync.h"
-
-struct sync_timeline;
-struct sync_pt;
-struct sync_fence;
-
-/**
- * struct sync_timeline_ops - sync object implementation ops
- * @driver_name: name of the implementation
- * @dup: duplicate a sync_pt
- * @has_signaled: returns:
- * 1 if pt has signaled
- * 0 if pt has not signaled
- * <0 on error
- * @compare: returns:
- * 1 if b will signal before a
- * 0 if a and b will signal at the same time
- * -1 if a will signal before b
- * @free_pt: called before sync_pt is freed
- * @release_obj: called before sync_timeline is freed
- * @fill_driver_data: write implementation specific driver data to data.
- * should return an error if there is not enough room
- * as specified by size. This information is returned
- * to userspace by SYNC_IOC_FENCE_INFO.
- * @timeline_value_str: fill str with the value of the sync_timeline's counter
- * @pt_value_str: fill str with the value of the sync_pt
- */
-struct sync_timeline_ops {
- const char *driver_name;
-
- /* required */
- struct sync_pt * (*dup)(struct sync_pt *pt);
-
- /* required */
- int (*has_signaled)(struct sync_pt *pt);
-
- /* required */
- int (*compare)(struct sync_pt *a, struct sync_pt *b);
-
- /* optional */
- void (*free_pt)(struct sync_pt *sync_pt);
-
- /* optional */
- void (*release_obj)(struct sync_timeline *sync_timeline);
-
- /* optional */
- int (*fill_driver_data)(struct sync_pt *syncpt, void *data, int size);
-
- /* optional */
- void (*timeline_value_str)(struct sync_timeline *timeline, char *str,
- int size);
-
- /* optional */
- void (*pt_value_str)(struct sync_pt *pt, char *str, int size);
-};
-
-/**
- * struct sync_timeline - sync object
- * @kref: reference count on fence.
- * @ops: ops that define the implementation of the sync_timeline
- * @name: name of the sync_timeline. Useful for debugging
- * @destroyed: set when sync_timeline is destroyed
- * @child_list_head: list of children sync_pts for this sync_timeline
- * @child_list_lock: lock protecting @child_list_head, destroyed, and
- * sync_pt.status
- * @active_list_head: list of active (unsignaled/errored) sync_pts
- * @sync_timeline_list: membership in global sync_timeline_list
- */
-struct sync_timeline {
- struct kref kref;
- const struct sync_timeline_ops *ops;
- char name[32];
-
- /* protected by child_list_lock */
- bool destroyed;
- int context, value;
-
- struct list_head child_list_head;
- spinlock_t child_list_lock;
-
- struct list_head active_list_head;
-
-#ifdef CONFIG_DEBUG_FS
- struct list_head sync_timeline_list;
-#endif
-};
-
-/**
- * struct sync_pt - sync point
- * @fence: base fence class
- * @child_list: membership in sync_timeline.child_list_head
- * @active_list: membership in sync_timeline.active_list_head
- * @signaled_list: membership in temporary signaled_list on stack
- * @fence: sync_fence to which the sync_pt belongs
- * @pt_list: membership in sync_fence.pt_list_head
- * @status: 1: signaled, 0:active, <0: error
- * @timestamp: time which sync_pt status transitioned from active to
- * signaled or error.
- */
-struct sync_pt {
- struct fence base;
-
- struct list_head child_list;
- struct list_head active_list;
-};
-
-static inline struct sync_timeline *sync_pt_parent(struct sync_pt *pt)
-{
- return container_of(pt->base.lock, struct sync_timeline,
- child_list_lock);
-}
-
-struct sync_fence_cb {
- struct fence_cb cb;
- struct fence *sync_pt;
- struct sync_fence *fence;
-};
-
-/**
- * struct sync_fence - sync fence
- * @file: file representing this fence
- * @kref: reference count on fence.
- * @name: name of sync_fence. Useful for debugging
- * @pt_list_head: list of sync_pts in the fence. immutable once fence
- * is created
- * @status: 0: signaled, >0:active, <0: error
- *
- * @wq: wait queue for fence signaling
- * @sync_fence_list: membership in global fence list
- */
-struct sync_fence {
- struct file *file;
- struct kref kref;
- char name[32];
-#ifdef CONFIG_DEBUG_FS
- struct list_head sync_fence_list;
-#endif
- int num_fences;
-
- wait_queue_head_t wq;
- atomic_t status;
-
- struct sync_fence_cb cbs[];
-};
-
-struct sync_fence_waiter;
-typedef void (*sync_callback_t)(struct sync_fence *fence,
- struct sync_fence_waiter *waiter);
-
-/**
- * struct sync_fence_waiter - metadata for asynchronous waiter on a fence
- * @waiter_list: membership in sync_fence.waiter_list_head
- * @callback: function pointer to call when fence signals
- * @callback_data: pointer to pass to @callback
- */
-struct sync_fence_waiter {
- wait_queue_t work;
- sync_callback_t callback;
-};
-
-static inline void sync_fence_waiter_init(struct sync_fence_waiter *waiter,
- sync_callback_t callback)
-{
- INIT_LIST_HEAD(&waiter->work.task_list);
- waiter->callback = callback;
-}
-
-/*
- * API for sync_timeline implementers
- */
-
-/**
- * sync_timeline_create() - creates a sync object
- * @ops: specifies the implementation ops for the object
- * @size: size to allocate for this obj
- * @name: sync_timeline name
- *
- * Creates a new sync_timeline which will use the implementation specified by
- * @ops. @size bytes will be allocated allowing for implementation specific
- * data to be kept after the generic sync_timeline struct.
- */
-struct sync_timeline *sync_timeline_create(const struct sync_timeline_ops *ops,
- int size, const char *name);
-
-/**
- * sync_timeline_destroy() - destroys a sync object
- * @obj: sync_timeline to destroy
- *
- * A sync implementation should call this when the @obj is going away
- * (i.e. module unload.) @obj won't actually be freed until all its children
- * sync_pts are freed.
- */
-void sync_timeline_destroy(struct sync_timeline *obj);
-
-/**
- * sync_timeline_signal() - signal a status change on a sync_timeline
- * @obj: sync_timeline to signal
- *
- * A sync implementation should call this any time one of it's sync_pts
- * has signaled or has an error condition.
- */
-void sync_timeline_signal(struct sync_timeline *obj);
-
-/**
- * sync_pt_create() - creates a sync pt
- * @parent: sync_pt's parent sync_timeline
- * @size: size to allocate for this pt
- *
- * Creates a new sync_pt as a child of @parent. @size bytes will be
- * allocated allowing for implementation specific data to be kept after
- * the generic sync_timeline struct.
- */
-struct sync_pt *sync_pt_create(struct sync_timeline *parent, int size);
-
-/**
- * sync_pt_free() - frees a sync pt
- * @pt: sync_pt to free
- *
- * This should only be called on sync_pts which have been created but
- * not added to a fence.
- */
-void sync_pt_free(struct sync_pt *pt);
-
-/**
- * sync_fence_create() - creates a sync fence
- * @name: name of fence to create
- * @pt: sync_pt to add to the fence
- *
- * Creates a fence containg @pt. Once this is called, the fence takes
- * ownership of @pt.
- */
-struct sync_fence *sync_fence_create(const char *name, struct sync_pt *pt);
-
-/*
- * API for sync_fence consumers
- */
-
-/**
- * sync_fence_merge() - merge two fences
- * @name: name of new fence
- * @a: fence a
- * @b: fence b
- *
- * Creates a new fence which contains copies of all the sync_pts in both
- * @a and @b. @a and @b remain valid, independent fences.
- */
-struct sync_fence *sync_fence_merge(const char *name,
- struct sync_fence *a, struct sync_fence *b);
-
-/**
- * sync_fence_fdget() - get a fence from an fd
- * @fd: fd referencing a fence
- *
- * Ensures @fd references a valid fence, increments the refcount of the backing
- * file, and returns the fence.
- */
-struct sync_fence *sync_fence_fdget(int fd);
-
-/**
- * sync_fence_put() - puts a reference of a sync fence
- * @fence: fence to put
- *
- * Puts a reference on @fence. If this is the last reference, the fence and
- * all it's sync_pts will be freed
- */
-void sync_fence_put(struct sync_fence *fence);
-
-/**
- * sync_fence_install() - installs a fence into a file descriptor
- * @fence: fence to install
- * @fd: file descriptor in which to install the fence
- *
- * Installs @fence into @fd. @fd's should be acquired through
- * get_unused_fd_flags(O_CLOEXEC).
- */
-void sync_fence_install(struct sync_fence *fence, int fd);
-
-/**
- * sync_fence_wait_async() - registers and async wait on the fence
- * @fence: fence to wait on
- * @waiter: waiter callback struck
- *
- * Returns 1 if @fence has already signaled.
- *
- * Registers a callback to be called when @fence signals or has an error.
- * @waiter should be initialized with sync_fence_waiter_init().
- */
-int sync_fence_wait_async(struct sync_fence *fence,
- struct sync_fence_waiter *waiter);
-
-/**
- * sync_fence_cancel_async() - cancels an async wait
- * @fence: fence to wait on
- * @waiter: waiter callback struck
- *
- * returns 0 if waiter was removed from fence's async waiter list.
- * returns -ENOENT if waiter was not found on fence's async waiter list.
- *
- * Cancels a previously registered async wait. Will fail gracefully if
- * @waiter was never registered or if @fence has already signaled @waiter.
- */
-int sync_fence_cancel_async(struct sync_fence *fence,
- struct sync_fence_waiter *waiter);
-
-/**
- * sync_fence_wait() - wait on fence
- * @fence: fence to wait on
- * @tiemout: timeout in ms
- *
- * Wait for @fence to be signaled or have an error. Waits indefinitely
- * if @timeout < 0
- */
-int sync_fence_wait(struct sync_fence *fence, long timeout);
-
-#ifdef CONFIG_DEBUG_FS
-
-void sync_timeline_debug_add(struct sync_timeline *obj);
-void sync_timeline_debug_remove(struct sync_timeline *obj);
-void sync_fence_debug_add(struct sync_fence *fence);
-void sync_fence_debug_remove(struct sync_fence *fence);
-void sync_dump(void);
-
-#else
-# define sync_timeline_debug_add(obj)
-# define sync_timeline_debug_remove(obj)
-# define sync_fence_debug_add(fence)
-# define sync_fence_debug_remove(fence)
-# define sync_dump()
-#endif
-int sync_fence_wake_up_wq(wait_queue_t *curr, unsigned mode,
- int wake_flags, void *key);
-
-#endif /* _LINUX_SYNC_H */
diff --git a/drivers/staging/android/sync_debug.c b/drivers/staging/android/sync_debug.c
deleted file mode 100644
index 91ed2c4cff45..000000000000
--- a/drivers/staging/android/sync_debug.c
+++ /dev/null
@@ -1,254 +0,0 @@
-/*
- * drivers/base/sync.c
- *
- * Copyright (C) 2012 Google, Inc.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#include <linux/debugfs.h>
-#include <linux/export.h>
-#include <linux/file.h>
-#include <linux/fs.h>
-#include <linux/kernel.h>
-#include <linux/poll.h>
-#include <linux/sched.h>
-#include <linux/seq_file.h>
-#include <linux/slab.h>
-#include <linux/uaccess.h>
-#include <linux/anon_inodes.h>
-#include <linux/time64.h>
-#include "sync.h"
-
-#ifdef CONFIG_DEBUG_FS
-
-static LIST_HEAD(sync_timeline_list_head);
-static DEFINE_SPINLOCK(sync_timeline_list_lock);
-static LIST_HEAD(sync_fence_list_head);
-static DEFINE_SPINLOCK(sync_fence_list_lock);
-
-void sync_timeline_debug_add(struct sync_timeline *obj)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&sync_timeline_list_lock, flags);
- list_add_tail(&obj->sync_timeline_list, &sync_timeline_list_head);
- spin_unlock_irqrestore(&sync_timeline_list_lock, flags);
-}
-
-void sync_timeline_debug_remove(struct sync_timeline *obj)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&sync_timeline_list_lock, flags);
- list_del(&obj->sync_timeline_list);
- spin_unlock_irqrestore(&sync_timeline_list_lock, flags);
-}
-
-void sync_fence_debug_add(struct sync_fence *fence)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&sync_fence_list_lock, flags);
- list_add_tail(&fence->sync_fence_list, &sync_fence_list_head);
- spin_unlock_irqrestore(&sync_fence_list_lock, flags);
-}
-
-void sync_fence_debug_remove(struct sync_fence *fence)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&sync_fence_list_lock, flags);
- list_del(&fence->sync_fence_list);
- spin_unlock_irqrestore(&sync_fence_list_lock, flags);
-}
-
-static const char *sync_status_str(int status)
-{
- if (status == 0)
- return "signaled";
-
- if (status > 0)
- return "active";
-
- return "error";
-}
-
-static void sync_print_pt(struct seq_file *s, struct sync_pt *pt, bool fence)
-{
- int status = 1;
- struct sync_timeline *parent = sync_pt_parent(pt);
-
- if (fence_is_signaled_locked(&pt->base))
- status = pt->base.status;
-
- seq_printf(s, " %s%spt %s",
- fence ? parent->name : "",
- fence ? "_" : "",
- sync_status_str(status));
-
- if (status <= 0) {
- struct timespec64 ts64 =
- ktime_to_timespec64(pt->base.timestamp);
-
- seq_printf(s, "@%lld.%09ld", (s64)ts64.tv_sec, ts64.tv_nsec);
- }
-
- if (parent->ops->timeline_value_str &&
- parent->ops->pt_value_str) {
- char value[64];
-
- parent->ops->pt_value_str(pt, value, sizeof(value));
- seq_printf(s, ": %s", value);
- if (fence) {
- parent->ops->timeline_value_str(parent, value,
- sizeof(value));
- seq_printf(s, " / %s", value);
- }
- }
-
- seq_puts(s, "\n");
-}
-
-static void sync_print_obj(struct seq_file *s, struct sync_timeline *obj)
-{
- struct list_head *pos;
- unsigned long flags;
-
- seq_printf(s, "%s %s", obj->name, obj->ops->driver_name);
-
- if (obj->ops->timeline_value_str) {
- char value[64];
-
- obj->ops->timeline_value_str(obj, value, sizeof(value));
- seq_printf(s, ": %s", value);
- }
-
- seq_puts(s, "\n");
-
- spin_lock_irqsave(&obj->child_list_lock, flags);
- list_for_each(pos, &obj->child_list_head) {
- struct sync_pt *pt =
- container_of(pos, struct sync_pt, child_list);
- sync_print_pt(s, pt, false);
- }
- spin_unlock_irqrestore(&obj->child_list_lock, flags);
-}
-
-static void sync_print_fence(struct seq_file *s, struct sync_fence *fence)
-{
- wait_queue_t *pos;
- unsigned long flags;
- int i;
-
- seq_printf(s, "[%p] %s: %s\n", fence, fence->name,
- sync_status_str(atomic_read(&fence->status)));
-
- for (i = 0; i < fence->num_fences; ++i) {
- struct sync_pt *pt =
- container_of(fence->cbs[i].sync_pt,
- struct sync_pt, base);
-
- sync_print_pt(s, pt, true);
- }
-
- spin_lock_irqsave(&fence->wq.lock, flags);
- list_for_each_entry(pos, &fence->wq.task_list, task_list) {
- struct sync_fence_waiter *waiter;
-
- if (pos->func != &sync_fence_wake_up_wq)
- continue;
-
- waiter = container_of(pos, struct sync_fence_waiter, work);
-
- seq_printf(s, "waiter %pF\n", waiter->callback);
- }
- spin_unlock_irqrestore(&fence->wq.lock, flags);
-}
-
-static int sync_debugfs_show(struct seq_file *s, void *unused)
-{
- unsigned long flags;
- struct list_head *pos;
-
- seq_puts(s, "objs:\n--------------\n");
-
- spin_lock_irqsave(&sync_timeline_list_lock, flags);
- list_for_each(pos, &sync_timeline_list_head) {
- struct sync_timeline *obj =
- container_of(pos, struct sync_timeline,
- sync_timeline_list);
-
- sync_print_obj(s, obj);
- seq_puts(s, "\n");
- }
- spin_unlock_irqrestore(&sync_timeline_list_lock, flags);
-
- seq_puts(s, "fences:\n--------------\n");
-
- spin_lock_irqsave(&sync_fence_list_lock, flags);
- list_for_each(pos, &sync_fence_list_head) {
- struct sync_fence *fence =
- container_of(pos, struct sync_fence, sync_fence_list);
-
- sync_print_fence(s, fence);
- seq_puts(s, "\n");
- }
- spin_unlock_irqrestore(&sync_fence_list_lock, flags);
- return 0;
-}
-
-static int sync_debugfs_open(struct inode *inode, struct file *file)
-{
- return single_open(file, sync_debugfs_show, inode->i_private);
-}
-
-static const struct file_operations sync_debugfs_fops = {
- .open = sync_debugfs_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
-static __init int sync_debugfs_init(void)
-{
- debugfs_create_file("sync", S_IRUGO, NULL, NULL, &sync_debugfs_fops);
- return 0;
-}
-late_initcall(sync_debugfs_init);
-
-#define DUMP_CHUNK 256
-static char sync_dump_buf[64 * 1024];
-void sync_dump(void)
-{
- struct seq_file s = {
- .buf = sync_dump_buf,
- .size = sizeof(sync_dump_buf) - 1,
- };
- int i;
-
- sync_debugfs_show(&s, NULL);
-
- for (i = 0; i < s.count; i += DUMP_CHUNK) {
- if ((s.count - i) > DUMP_CHUNK) {
- char c = s.buf[i + DUMP_CHUNK];
-
- s.buf[i + DUMP_CHUNK] = 0;
- pr_cont("%s", s.buf + i);
- s.buf[i + DUMP_CHUNK] = c;
- } else {
- s.buf[s.count] = 0;
- pr_cont("%s", s.buf + i);
- }
- }
-}
-
-#endif
diff --git a/drivers/staging/android/timed_gpio.c b/drivers/staging/android/timed_gpio.c
deleted file mode 100644
index ce11726f1a6c..000000000000
--- a/drivers/staging/android/timed_gpio.c
+++ /dev/null
@@ -1,168 +0,0 @@
-/* drivers/misc/timed_gpio.c
- *
- * Copyright (C) 2008 Google, Inc.
- * Author: Mike Lockwood <lockwood@android.com>
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/slab.h>
-#include <linux/hrtimer.h>
-#include <linux/err.h>
-#include <linux/gpio.h>
-#include <linux/ktime.h>
-
-#include "timed_output.h"
-#include "timed_gpio.h"
-
-
-struct timed_gpio_data {
- struct timed_output_dev dev;
- struct hrtimer timer;
- spinlock_t lock;
- unsigned gpio;
- int max_timeout;
- u8 active_low;
-};
-
-static enum hrtimer_restart gpio_timer_func(struct hrtimer *timer)
-{
- struct timed_gpio_data *data =
- container_of(timer, struct timed_gpio_data, timer);
-
- gpio_direction_output(data->gpio, data->active_low ? 1 : 0);
- return HRTIMER_NORESTART;
-}
-
-static int gpio_get_time(struct timed_output_dev *dev)
-{
- struct timed_gpio_data *data;
- ktime_t t;
-
- data = container_of(dev, struct timed_gpio_data, dev);
-
- if (!hrtimer_active(&data->timer))
- return 0;
-
- t = hrtimer_get_remaining(&data->timer);
-
- return ktime_to_ms(t);
-}
-
-static void gpio_enable(struct timed_output_dev *dev, int value)
-{
- struct timed_gpio_data *data =
- container_of(dev, struct timed_gpio_data, dev);
- unsigned long flags;
-
- spin_lock_irqsave(&data->lock, flags);
-
- /* cancel previous timer and set GPIO according to value */
- hrtimer_cancel(&data->timer);
- gpio_direction_output(data->gpio, data->active_low ? !value : !!value);
-
- if (value > 0) {
- if (value > data->max_timeout)
- value = data->max_timeout;
-
- hrtimer_start(&data->timer,
- ktime_set(value / 1000, (value % 1000) * 1000000),
- HRTIMER_MODE_REL);
- }
-
- spin_unlock_irqrestore(&data->lock, flags);
-}
-
-static int timed_gpio_probe(struct platform_device *pdev)
-{
- struct timed_gpio_platform_data *pdata = pdev->dev.platform_data;
- struct timed_gpio *cur_gpio;
- struct timed_gpio_data *gpio_data, *gpio_dat;
- int i, ret;
-
- if (!pdata)
- return -EBUSY;
-
- gpio_data = devm_kzalloc(&pdev->dev,
- sizeof(struct timed_gpio_data) * pdata->num_gpios,
- GFP_KERNEL);
- if (!gpio_data)
- return -ENOMEM;
-
- for (i = 0; i < pdata->num_gpios; i++) {
- cur_gpio = &pdata->gpios[i];
- gpio_dat = &gpio_data[i];
-
- hrtimer_init(&gpio_dat->timer, CLOCK_MONOTONIC,
- HRTIMER_MODE_REL);
- gpio_dat->timer.function = gpio_timer_func;
- spin_lock_init(&gpio_dat->lock);
-
- gpio_dat->dev.name = cur_gpio->name;
- gpio_dat->dev.get_time = gpio_get_time;
- gpio_dat->dev.enable = gpio_enable;
- ret = gpio_request(cur_gpio->gpio, cur_gpio->name);
- if (ret < 0)
- goto err_out;
- ret = timed_output_dev_register(&gpio_dat->dev);
- if (ret < 0) {
- gpio_free(cur_gpio->gpio);
- goto err_out;
- }
-
- gpio_dat->gpio = cur_gpio->gpio;
- gpio_dat->max_timeout = cur_gpio->max_timeout;
- gpio_dat->active_low = cur_gpio->active_low;
- gpio_direction_output(gpio_dat->gpio, gpio_dat->active_low);
- }
-
- platform_set_drvdata(pdev, gpio_data);
-
- return 0;
-
-err_out:
- while (--i >= 0) {
- timed_output_dev_unregister(&gpio_data[i].dev);
- gpio_free(gpio_data[i].gpio);
- }
-
- return ret;
-}
-
-static int timed_gpio_remove(struct platform_device *pdev)
-{
- struct timed_gpio_platform_data *pdata = pdev->dev.platform_data;
- struct timed_gpio_data *gpio_data = platform_get_drvdata(pdev);
- int i;
-
- for (i = 0; i < pdata->num_gpios; i++) {
- timed_output_dev_unregister(&gpio_data[i].dev);
- gpio_free(gpio_data[i].gpio);
- }
-
- return 0;
-}
-
-static struct platform_driver timed_gpio_driver = {
- .probe = timed_gpio_probe,
- .remove = timed_gpio_remove,
- .driver = {
- .name = TIMED_GPIO_NAME,
- },
-};
-
-module_platform_driver(timed_gpio_driver);
-
-MODULE_AUTHOR("Mike Lockwood <lockwood@android.com>");
-MODULE_DESCRIPTION("timed gpio driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/android/timed_gpio.h b/drivers/staging/android/timed_gpio.h
deleted file mode 100644
index d29e169d7ebe..000000000000
--- a/drivers/staging/android/timed_gpio.h
+++ /dev/null
@@ -1,33 +0,0 @@
-/* include/linux/timed_gpio.h
- *
- * Copyright (C) 2008 Google, Inc.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
-*/
-
-#ifndef _LINUX_TIMED_GPIO_H
-#define _LINUX_TIMED_GPIO_H
-
-#define TIMED_GPIO_NAME "timed-gpio"
-
-struct timed_gpio {
- const char *name;
- unsigned gpio;
- int max_timeout;
- u8 active_low;
-};
-
-struct timed_gpio_platform_data {
- int num_gpios;
- struct timed_gpio *gpios;
-};
-
-#endif
diff --git a/drivers/staging/android/timed_output.c b/drivers/staging/android/timed_output.c
deleted file mode 100644
index b41429f379fe..000000000000
--- a/drivers/staging/android/timed_output.c
+++ /dev/null
@@ -1,120 +0,0 @@
-/* drivers/misc/timed_output.c
- *
- * Copyright (C) 2009 Google, Inc.
- * Author: Mike Lockwood <lockwood@android.com>
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#define pr_fmt(fmt) "timed_output: " fmt
-
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/device.h>
-#include <linux/fs.h>
-#include <linux/err.h>
-
-#include "timed_output.h"
-
-static struct class *timed_output_class;
-static atomic_t device_count;
-
-static ssize_t enable_show(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- struct timed_output_dev *tdev = dev_get_drvdata(dev);
- int remaining = tdev->get_time(tdev);
-
- return sprintf(buf, "%d\n", remaining);
-}
-
-static ssize_t enable_store(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t size)
-{
- struct timed_output_dev *tdev = dev_get_drvdata(dev);
- int value;
- int rc;
-
- rc = kstrtoint(buf, 0, &value);
- if (rc != 0)
- return -EINVAL;
-
- tdev->enable(tdev, value);
-
- return size;
-}
-static DEVICE_ATTR_RW(enable);
-
-static struct attribute *timed_output_attrs[] = {
- &dev_attr_enable.attr,
- NULL,
-};
-ATTRIBUTE_GROUPS(timed_output);
-
-static int create_timed_output_class(void)
-{
- if (!timed_output_class) {
- timed_output_class = class_create(THIS_MODULE, "timed_output");
- if (IS_ERR(timed_output_class))
- return PTR_ERR(timed_output_class);
- atomic_set(&device_count, 0);
- timed_output_class->dev_groups = timed_output_groups;
- }
-
- return 0;
-}
-
-int timed_output_dev_register(struct timed_output_dev *tdev)
-{
- int ret;
-
- if (!tdev || !tdev->name || !tdev->enable || !tdev->get_time)
- return -EINVAL;
-
- ret = create_timed_output_class();
- if (ret < 0)
- return ret;
-
- tdev->index = atomic_inc_return(&device_count);
- tdev->dev = device_create(timed_output_class, NULL,
- MKDEV(0, tdev->index), NULL, "%s", tdev->name);
- if (IS_ERR(tdev->dev))
- return PTR_ERR(tdev->dev);
-
- dev_set_drvdata(tdev->dev, tdev);
- tdev->state = 0;
- return 0;
-}
-EXPORT_SYMBOL_GPL(timed_output_dev_register);
-
-void timed_output_dev_unregister(struct timed_output_dev *tdev)
-{
- tdev->enable(tdev, 0);
- device_destroy(timed_output_class, MKDEV(0, tdev->index));
-}
-EXPORT_SYMBOL_GPL(timed_output_dev_unregister);
-
-static int __init timed_output_init(void)
-{
- return create_timed_output_class();
-}
-
-static void __exit timed_output_exit(void)
-{
- class_destroy(timed_output_class);
-}
-
-module_init(timed_output_init);
-module_exit(timed_output_exit);
-
-MODULE_AUTHOR("Mike Lockwood <lockwood@android.com>");
-MODULE_DESCRIPTION("timed output class driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/android/timed_output.h b/drivers/staging/android/timed_output.h
deleted file mode 100644
index 13d2ca51cbe8..000000000000
--- a/drivers/staging/android/timed_output.h
+++ /dev/null
@@ -1,37 +0,0 @@
-/* include/linux/timed_output.h
- *
- * Copyright (C) 2008 Google, Inc.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
-*/
-
-#ifndef _LINUX_TIMED_OUTPUT_H
-#define _LINUX_TIMED_OUTPUT_H
-
-struct timed_output_dev {
- const char *name;
-
- /* enable the output and set the timer */
- void (*enable)(struct timed_output_dev *sdev, int timeout);
-
- /* returns the current number of milliseconds remaining on the timer */
- int (*get_time)(struct timed_output_dev *sdev);
-
- /* private data */
- struct device *dev;
- int index;
- int state;
-};
-
-int timed_output_dev_register(struct timed_output_dev *dev);
-void timed_output_dev_unregister(struct timed_output_dev *dev);
-
-#endif
diff --git a/drivers/staging/android/trace/sync.h b/drivers/staging/android/trace/sync.h
deleted file mode 100644
index 77edb977a7bf..000000000000
--- a/drivers/staging/android/trace/sync.h
+++ /dev/null
@@ -1,82 +0,0 @@
-#undef TRACE_SYSTEM
-#define TRACE_INCLUDE_PATH ../../drivers/staging/android/trace
-#define TRACE_SYSTEM sync
-
-#if !defined(_TRACE_SYNC_H) || defined(TRACE_HEADER_MULTI_READ)
-#define _TRACE_SYNC_H
-
-#include "../sync.h"
-#include <linux/tracepoint.h>
-
-TRACE_EVENT(sync_timeline,
- TP_PROTO(struct sync_timeline *timeline),
-
- TP_ARGS(timeline),
-
- TP_STRUCT__entry(
- __string(name, timeline->name)
- __array(char, value, 32)
- ),
-
- TP_fast_assign(
- __assign_str(name, timeline->name);
- if (timeline->ops->timeline_value_str) {
- timeline->ops->timeline_value_str(timeline,
- __entry->value,
- sizeof(__entry->value));
- } else {
- __entry->value[0] = '\0';
- }
- ),
-
- TP_printk("name=%s value=%s", __get_str(name), __entry->value)
-);
-
-TRACE_EVENT(sync_wait,
- TP_PROTO(struct sync_fence *fence, int begin),
-
- TP_ARGS(fence, begin),
-
- TP_STRUCT__entry(
- __string(name, fence->name)
- __field(s32, status)
- __field(u32, begin)
- ),
-
- TP_fast_assign(
- __assign_str(name, fence->name);
- __entry->status = atomic_read(&fence->status);
- __entry->begin = begin;
- ),
-
- TP_printk("%s name=%s state=%d", __entry->begin ? "begin" : "end",
- __get_str(name), __entry->status)
-);
-
-TRACE_EVENT(sync_pt,
- TP_PROTO(struct fence *pt),
-
- TP_ARGS(pt),
-
- TP_STRUCT__entry(
- __string(timeline, pt->ops->get_timeline_name(pt))
- __array(char, value, 32)
- ),
-
- TP_fast_assign(
- __assign_str(timeline, pt->ops->get_timeline_name(pt));
- if (pt->ops->fence_value_str) {
- pt->ops->fence_value_str(pt, __entry->value,
- sizeof(__entry->value));
- } else {
- __entry->value[0] = '\0';
- }
- ),
-
- TP_printk("name=%s value=%s", __get_str(timeline), __entry->value)
-);
-
-#endif /* if !defined(_TRACE_SYNC_H) || defined(TRACE_HEADER_MULTI_READ) */
-
-/* This part must be outside protection */
-#include <trace/define_trace.h>
diff --git a/drivers/staging/android/uapi/ashmem.h b/drivers/staging/android/uapi/ashmem.h
deleted file mode 100644
index ba4743c71d6b..000000000000
--- a/drivers/staging/android/uapi/ashmem.h
+++ /dev/null
@@ -1,47 +0,0 @@
-/*
- * drivers/staging/android/uapi/ashmem.h
- *
- * Copyright 2008 Google Inc.
- * Author: Robert Love
- *
- * This file is dual licensed. It may be redistributed and/or modified
- * under the terms of the Apache 2.0 License OR version 2 of the GNU
- * General Public License.
- */
-
-#ifndef _UAPI_LINUX_ASHMEM_H
-#define _UAPI_LINUX_ASHMEM_H
-
-#include <linux/ioctl.h>
-
-#define ASHMEM_NAME_LEN 256
-
-#define ASHMEM_NAME_DEF "dev/ashmem"
-
-/* Return values from ASHMEM_PIN: Was the mapping purged while unpinned? */
-#define ASHMEM_NOT_PURGED 0
-#define ASHMEM_WAS_PURGED 1
-
-/* Return values from ASHMEM_GET_PIN_STATUS: Is the mapping pinned? */
-#define ASHMEM_IS_UNPINNED 0
-#define ASHMEM_IS_PINNED 1
-
-struct ashmem_pin {
- __u32 offset; /* offset into region, in bytes, page-aligned */
- __u32 len; /* length forward from offset, in bytes, page-aligned */
-};
-
-#define __ASHMEMIOC 0x77
-
-#define ASHMEM_SET_NAME _IOW(__ASHMEMIOC, 1, char[ASHMEM_NAME_LEN])
-#define ASHMEM_GET_NAME _IOR(__ASHMEMIOC, 2, char[ASHMEM_NAME_LEN])
-#define ASHMEM_SET_SIZE _IOW(__ASHMEMIOC, 3, size_t)
-#define ASHMEM_GET_SIZE _IO(__ASHMEMIOC, 4)
-#define ASHMEM_SET_PROT_MASK _IOW(__ASHMEMIOC, 5, unsigned long)
-#define ASHMEM_GET_PROT_MASK _IO(__ASHMEMIOC, 6)
-#define ASHMEM_PIN _IOW(__ASHMEMIOC, 7, struct ashmem_pin)
-#define ASHMEM_UNPIN _IOW(__ASHMEMIOC, 8, struct ashmem_pin)
-#define ASHMEM_GET_PIN_STATUS _IO(__ASHMEMIOC, 9)
-#define ASHMEM_PURGE_ALL_CACHES _IO(__ASHMEMIOC, 10)
-
-#endif /* _UAPI_LINUX_ASHMEM_H */
diff --git a/drivers/staging/android/uapi/ion.h b/drivers/staging/android/uapi/ion.h
deleted file mode 100644
index 0a8e40f92cd7..000000000000
--- a/drivers/staging/android/uapi/ion.h
+++ /dev/null
@@ -1,203 +0,0 @@
-/*
- * drivers/staging/android/uapi/ion.h
- *
- * Copyright (C) 2011 Google, Inc.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#ifndef _UAPI_LINUX_ION_H
-#define _UAPI_LINUX_ION_H
-
-#include <linux/ioctl.h>
-#include <linux/types.h>
-
-typedef int ion_user_handle_t;
-
-/**
- * enum ion_heap_types - list of all possible types of heaps
- * @ION_HEAP_TYPE_SYSTEM: memory allocated via vmalloc
- * @ION_HEAP_TYPE_SYSTEM_CONTIG: memory allocated via kmalloc
- * @ION_HEAP_TYPE_CARVEOUT: memory allocated from a prereserved
- * carveout heap, allocations are physically
- * contiguous
- * @ION_HEAP_TYPE_DMA: memory allocated via DMA API
- * @ION_NUM_HEAPS: helper for iterating over heaps, a bit mask
- * is used to identify the heaps, so only 32
- * total heap types are supported
- */
-enum ion_heap_type {
- ION_HEAP_TYPE_SYSTEM,
- ION_HEAP_TYPE_SYSTEM_CONTIG,
- ION_HEAP_TYPE_CARVEOUT,
- ION_HEAP_TYPE_CHUNK,
- ION_HEAP_TYPE_DMA,
- ION_HEAP_TYPE_CUSTOM, /*
- * must be last so device specific heaps always
- * are at the end of this enum
- */
- ION_NUM_HEAPS = 16,
-};
-
-#define ION_HEAP_SYSTEM_MASK (1 << ION_HEAP_TYPE_SYSTEM)
-#define ION_HEAP_SYSTEM_CONTIG_MASK (1 << ION_HEAP_TYPE_SYSTEM_CONTIG)
-#define ION_HEAP_CARVEOUT_MASK (1 << ION_HEAP_TYPE_CARVEOUT)
-#define ION_HEAP_TYPE_DMA_MASK (1 << ION_HEAP_TYPE_DMA)
-
-#define ION_NUM_HEAP_IDS (sizeof(unsigned int) * 8)
-
-/**
- * allocation flags - the lower 16 bits are used by core ion, the upper 16
- * bits are reserved for use by the heaps themselves.
- */
-#define ION_FLAG_CACHED 1 /*
- * mappings of this buffer should be
- * cached, ion will do cache
- * maintenance when the buffer is
- * mapped for dma
- */
-#define ION_FLAG_CACHED_NEEDS_SYNC 2 /*
- * mappings of this buffer will created
- * at mmap time, if this is set
- * caches must be managed
- * manually
- */
-
-/**
- * DOC: Ion Userspace API
- *
- * create a client by opening /dev/ion
- * most operations handled via following ioctls
- *
- */
-
-/**
- * struct ion_allocation_data - metadata passed from userspace for allocations
- * @len: size of the allocation
- * @align: required alignment of the allocation
- * @heap_id_mask: mask of heap ids to allocate from
- * @flags: flags passed to heap
- * @handle: pointer that will be populated with a cookie to use to
- * refer to this allocation
- *
- * Provided by userspace as an argument to the ioctl
- */
-struct ion_allocation_data {
- size_t len;
- size_t align;
- unsigned int heap_id_mask;
- unsigned int flags;
- ion_user_handle_t handle;
-};
-
-/**
- * struct ion_fd_data - metadata passed to/from userspace for a handle/fd pair
- * @handle: a handle
- * @fd: a file descriptor representing that handle
- *
- * For ION_IOC_SHARE or ION_IOC_MAP userspace populates the handle field with
- * the handle returned from ion alloc, and the kernel returns the file
- * descriptor to share or map in the fd field. For ION_IOC_IMPORT, userspace
- * provides the file descriptor and the kernel returns the handle.
- */
-struct ion_fd_data {
- ion_user_handle_t handle;
- int fd;
-};
-
-/**
- * struct ion_handle_data - a handle passed to/from the kernel
- * @handle: a handle
- */
-struct ion_handle_data {
- ion_user_handle_t handle;
-};
-
-/**
- * struct ion_custom_data - metadata passed to/from userspace for a custom ioctl
- * @cmd: the custom ioctl function to call
- * @arg: additional data to pass to the custom ioctl, typically a user
- * pointer to a predefined structure
- *
- * This works just like the regular cmd and arg fields of an ioctl.
- */
-struct ion_custom_data {
- unsigned int cmd;
- unsigned long arg;
-};
-
-#define ION_IOC_MAGIC 'I'
-
-/**
- * DOC: ION_IOC_ALLOC - allocate memory
- *
- * Takes an ion_allocation_data struct and returns it with the handle field
- * populated with the opaque handle for the allocation.
- */
-#define ION_IOC_ALLOC _IOWR(ION_IOC_MAGIC, 0, \
- struct ion_allocation_data)
-
-/**
- * DOC: ION_IOC_FREE - free memory
- *
- * Takes an ion_handle_data struct and frees the handle.
- */
-#define ION_IOC_FREE _IOWR(ION_IOC_MAGIC, 1, struct ion_handle_data)
-
-/**
- * DOC: ION_IOC_MAP - get a file descriptor to mmap
- *
- * Takes an ion_fd_data struct with the handle field populated with a valid
- * opaque handle. Returns the struct with the fd field set to a file
- * descriptor open in the current address space. This file descriptor
- * can then be used as an argument to mmap.
- */
-#define ION_IOC_MAP _IOWR(ION_IOC_MAGIC, 2, struct ion_fd_data)
-
-/**
- * DOC: ION_IOC_SHARE - creates a file descriptor to use to share an allocation
- *
- * Takes an ion_fd_data struct with the handle field populated with a valid
- * opaque handle. Returns the struct with the fd field set to a file
- * descriptor open in the current address space. This file descriptor
- * can then be passed to another process. The corresponding opaque handle can
- * be retrieved via ION_IOC_IMPORT.
- */
-#define ION_IOC_SHARE _IOWR(ION_IOC_MAGIC, 4, struct ion_fd_data)
-
-/**
- * DOC: ION_IOC_IMPORT - imports a shared file descriptor
- *
- * Takes an ion_fd_data struct with the fd field populated with a valid file
- * descriptor obtained from ION_IOC_SHARE and returns the struct with the handle
- * filed set to the corresponding opaque handle.
- */
-#define ION_IOC_IMPORT _IOWR(ION_IOC_MAGIC, 5, struct ion_fd_data)
-
-/**
- * DOC: ION_IOC_SYNC - syncs a shared file descriptors to memory
- *
- * Deprecated in favor of using the dma_buf api's correctly (syncing
- * will happen automatically when the buffer is mapped to a device).
- * If necessary should be used after touching a cached buffer from the cpu,
- * this will make the buffer in memory coherent.
- */
-#define ION_IOC_SYNC _IOWR(ION_IOC_MAGIC, 7, struct ion_fd_data)
-
-/**
- * DOC: ION_IOC_CUSTOM - call architecture specific ion ioctl
- *
- * Takes the argument of the architecture specific ioctl to call and
- * passes appropriate userdata for that ioctl
- */
-#define ION_IOC_CUSTOM _IOWR(ION_IOC_MAGIC, 6, struct ion_custom_data)
-
-#endif /* _UAPI_LINUX_ION_H */
diff --git a/drivers/staging/android/uapi/ion_test.h b/drivers/staging/android/uapi/ion_test.h
deleted file mode 100644
index ffef06f63133..000000000000
--- a/drivers/staging/android/uapi/ion_test.h
+++ /dev/null
@@ -1,70 +0,0 @@
-/*
- * drivers/staging/android/uapi/ion.h
- *
- * Copyright (C) 2011 Google, Inc.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#ifndef _UAPI_LINUX_ION_TEST_H
-#define _UAPI_LINUX_ION_TEST_H
-
-#include <linux/ioctl.h>
-#include <linux/types.h>
-
-/**
- * struct ion_test_rw_data - metadata passed to the kernel to read handle
- * @ptr: a pointer to an area at least as large as size
- * @offset: offset into the ion buffer to start reading
- * @size: size to read or write
- * @write: 1 to write, 0 to read
- */
-struct ion_test_rw_data {
- __u64 ptr;
- __u64 offset;
- __u64 size;
- int write;
- int __padding;
-};
-
-#define ION_IOC_MAGIC 'I'
-
-/**
- * DOC: ION_IOC_TEST_SET_DMA_BUF - attach a dma buf to the test driver
- *
- * Attaches a dma buf fd to the test driver. Passing a second fd or -1 will
- * release the first fd.
- */
-#define ION_IOC_TEST_SET_FD \
- _IO(ION_IOC_MAGIC, 0xf0)
-
-/**
- * DOC: ION_IOC_TEST_DMA_MAPPING - read or write memory from a handle as DMA
- *
- * Reads or writes the memory from a handle using an uncached mapping. Can be
- * used by unit tests to emulate a DMA engine as close as possible. Only
- * expected to be used for debugging and testing, may not always be available.
- */
-#define ION_IOC_TEST_DMA_MAPPING \
- _IOW(ION_IOC_MAGIC, 0xf1, struct ion_test_rw_data)
-
-/**
- * DOC: ION_IOC_TEST_KERNEL_MAPPING - read or write memory from a handle
- *
- * Reads or writes the memory from a handle using a kernel mapping. Can be
- * used by unit tests to test heap map_kernel functions. Only expected to be
- * used for debugging and testing, may not always be available.
- */
-#define ION_IOC_TEST_KERNEL_MAPPING \
- _IOW(ION_IOC_MAGIC, 0xf2, struct ion_test_rw_data)
-
-
-#endif /* _UAPI_LINUX_ION_H */
diff --git a/drivers/staging/android/uapi/sw_sync.h b/drivers/staging/android/uapi/sw_sync.h
deleted file mode 100644
index 9b5d4869505c..000000000000
--- a/drivers/staging/android/uapi/sw_sync.h
+++ /dev/null
@@ -1,32 +0,0 @@
-/*
- * Copyright (C) 2012 Google, Inc.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#ifndef _UAPI_LINUX_SW_SYNC_H
-#define _UAPI_LINUX_SW_SYNC_H
-
-#include <linux/types.h>
-
-struct sw_sync_create_fence_data {
- __u32 value;
- char name[32];
- __s32 fence; /* fd of new fence */
-};
-
-#define SW_SYNC_IOC_MAGIC 'W'
-
-#define SW_SYNC_IOC_CREATE_FENCE _IOWR(SW_SYNC_IOC_MAGIC, 0,\
- struct sw_sync_create_fence_data)
-#define SW_SYNC_IOC_INC _IOW(SW_SYNC_IOC_MAGIC, 1, __u32)
-
-#endif /* _UAPI_LINUX_SW_SYNC_H */
diff --git a/drivers/staging/android/uapi/sync.h b/drivers/staging/android/uapi/sync.h
deleted file mode 100644
index e964c751f6b8..000000000000
--- a/drivers/staging/android/uapi/sync.h
+++ /dev/null
@@ -1,97 +0,0 @@
-/*
- * Copyright (C) 2012 Google, Inc.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#ifndef _UAPI_LINUX_SYNC_H
-#define _UAPI_LINUX_SYNC_H
-
-#include <linux/ioctl.h>
-#include <linux/types.h>
-
-/**
- * struct sync_merge_data - data passed to merge ioctl
- * @fd2: file descriptor of second fence
- * @name: name of new fence
- * @fence: returns the fd of the new fence to userspace
- */
-struct sync_merge_data {
- __s32 fd2; /* fd of second fence */
- char name[32]; /* name of new fence */
- __s32 fence; /* fd on newly created fence */
-};
-
-/**
- * struct sync_pt_info - detailed sync_pt information
- * @len: length of sync_pt_info including any driver_data
- * @obj_name: name of parent sync_timeline
- * @driver_name: name of driver implementing the parent
- * @status: status of the sync_pt 0:active 1:signaled <0:error
- * @timestamp_ns: timestamp of status change in nanoseconds
- * @driver_data: any driver dependent data
- */
-struct sync_pt_info {
- __u32 len;
- char obj_name[32];
- char driver_name[32];
- __s32 status;
- __u64 timestamp_ns;
-
- __u8 driver_data[0];
-};
-
-/**
- * struct sync_fence_info_data - data returned from fence info ioctl
- * @len: ioctl caller writes the size of the buffer its passing in.
- * ioctl returns length of sync_fence_data returned to userspace
- * including pt_info.
- * @name: name of fence
- * @status: status of fence. 1: signaled 0:active <0:error
- * @pt_info: a sync_pt_info struct for every sync_pt in the fence
- */
-struct sync_fence_info_data {
- __u32 len;
- char name[32];
- __s32 status;
-
- __u8 pt_info[0];
-};
-
-#define SYNC_IOC_MAGIC '>'
-
-/**
- * DOC: SYNC_IOC_WAIT - wait for a fence to signal
- *
- * pass timeout in milliseconds. Waits indefinitely timeout < 0.
- */
-#define SYNC_IOC_WAIT _IOW(SYNC_IOC_MAGIC, 0, __s32)
-
-/**
- * DOC: SYNC_IOC_MERGE - merge two fences
- *
- * Takes a struct sync_merge_data. Creates a new fence containing copies of
- * the sync_pts in both the calling fd and sync_merge_data.fd2. Returns the
- * new fence's fd in sync_merge_data.fence
- */
-#define SYNC_IOC_MERGE _IOWR(SYNC_IOC_MAGIC, 1, struct sync_merge_data)
-
-/**
- * DOC: SYNC_IOC_FENCE_INFO - get detailed information on a fence
- *
- * Takes a struct sync_fence_info_data with extra space allocated for pt_info.
- * Caller should write the size of the buffer into len. On return, len is
- * updated to reflect the total size of the sync_fence_info_data including
- * pt_info.
- *
- * pt_info is a buffer containing sync_pt_infos for every sync_pt in the fence.
- * To iterate over the sync_pt_infos, use the sync_pt_info.len field.
- */
-#define SYNC_IOC_FENCE_INFO _IOWR(SYNC_IOC_MAGIC, 2,\
- struct sync_fence_info_data)
-
-#endif /* _UAPI_LINUX_SYNC_H */
diff --git a/drivers/staging/axis-fifo/Kconfig b/drivers/staging/axis-fifo/Kconfig
new file mode 100644
index 000000000000..f180a8e9f58a
--- /dev/null
+++ b/drivers/staging/axis-fifo/Kconfig
@@ -0,0 +1,12 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# "Xilinx AXI-Stream FIFO IP core driver"
+#
+config XIL_AXIS_FIFO
+ tristate "Xilinx AXI-Stream FIFO IP core driver"
+ depends on OF && HAS_IOMEM
+ help
+ This adds support for the Xilinx AXI-Stream FIFO IP core driver.
+ The AXI Streaming FIFO allows memory mapped access to a AXI Streaming
+ interface. The Xilinx AXI-Stream FIFO IP core can be used to interface
+ to the AXI Ethernet without the need to use DMA.
diff --git a/drivers/staging/axis-fifo/Makefile b/drivers/staging/axis-fifo/Makefile
new file mode 100644
index 000000000000..c626005c99db
--- /dev/null
+++ b/drivers/staging/axis-fifo/Makefile
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_XIL_AXIS_FIFO) += axis-fifo.o
diff --git a/drivers/staging/axis-fifo/README b/drivers/staging/axis-fifo/README
new file mode 100644
index 000000000000..e69de29bb2d1
--- /dev/null
+++ b/drivers/staging/axis-fifo/README
diff --git a/drivers/staging/axis-fifo/axis-fifo.c b/drivers/staging/axis-fifo/axis-fifo.c
new file mode 100644
index 000000000000..509d620d6ce7
--- /dev/null
+++ b/drivers/staging/axis-fifo/axis-fifo.c
@@ -0,0 +1,703 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Xilinx AXIS FIFO: interface to the Xilinx AXI-Stream FIFO IP core
+ *
+ * Copyright (C) 2018 Jacob Feder
+ *
+ * Authors: Jacob Feder <jacobsfeder@gmail.com>
+ *
+ * See Xilinx PG080 document for IP details
+ */
+
+/* ----------------------------
+ * includes
+ * ----------------------------
+ */
+
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/wait.h>
+#include <linux/mutex.h>
+#include <linux/device.h>
+#include <linux/cdev.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/moduleparam.h>
+#include <linux/interrupt.h>
+#include <linux/param.h>
+#include <linux/fs.h>
+#include <linux/types.h>
+#include <linux/uaccess.h>
+#include <linux/jiffies.h>
+#include <linux/miscdevice.h>
+#include <linux/debugfs.h>
+
+/* ----------------------------
+ * driver parameters
+ * ----------------------------
+ */
+
+#define DRIVER_NAME "axis_fifo"
+
+#define READ_BUF_SIZE 128U /* read buffer length in words */
+
+#define AXIS_FIFO_DEBUG_REG_NAME_MAX_LEN 4
+
+/* ----------------------------
+ * IP register offsets
+ * ----------------------------
+ */
+
+#define XLLF_ISR_OFFSET 0x00000000 /* Interrupt Status */
+#define XLLF_IER_OFFSET 0x00000004 /* Interrupt Enable */
+
+#define XLLF_TDFR_OFFSET 0x00000008 /* Transmit Reset */
+#define XLLF_TDFV_OFFSET 0x0000000c /* Transmit Vacancy */
+#define XLLF_TDFD_OFFSET 0x00000010 /* Transmit Data */
+#define XLLF_TLR_OFFSET 0x00000014 /* Transmit Length */
+
+#define XLLF_RDFR_OFFSET 0x00000018 /* Receive Reset */
+#define XLLF_RDFO_OFFSET 0x0000001c /* Receive Occupancy */
+#define XLLF_RDFD_OFFSET 0x00000020 /* Receive Data */
+#define XLLF_RLR_OFFSET 0x00000024 /* Receive Length */
+#define XLLF_SRR_OFFSET 0x00000028 /* Local Link Reset */
+#define XLLF_TDR_OFFSET 0x0000002C /* Transmit Destination */
+#define XLLF_RDR_OFFSET 0x00000030 /* Receive Destination */
+
+/* ----------------------------
+ * reset register masks
+ * ----------------------------
+ */
+
+#define XLLF_RDFR_RESET_MASK 0x000000a5 /* receive reset value */
+#define XLLF_TDFR_RESET_MASK 0x000000a5 /* Transmit reset value */
+#define XLLF_SRR_RESET_MASK 0x000000a5 /* Local Link reset value */
+
+/* ----------------------------
+ * interrupt masks
+ * ----------------------------
+ */
+
+#define XLLF_INT_RPURE_MASK 0x80000000 /* Receive under-read */
+#define XLLF_INT_RPORE_MASK 0x40000000 /* Receive over-read */
+#define XLLF_INT_RPUE_MASK 0x20000000 /* Receive underrun (empty) */
+#define XLLF_INT_TPOE_MASK 0x10000000 /* Transmit overrun */
+#define XLLF_INT_TC_MASK 0x08000000 /* Transmit complete */
+#define XLLF_INT_RC_MASK 0x04000000 /* Receive complete */
+#define XLLF_INT_TSE_MASK 0x02000000 /* Transmit length mismatch */
+
+#define XLLF_INT_CLEAR_ALL GENMASK(31, 0)
+
+/* ----------------------------
+ * globals
+ * ----------------------------
+ */
+static long read_timeout = 1000; /* ms to wait before read() times out */
+static long write_timeout = 1000; /* ms to wait before write() times out */
+
+static DEFINE_IDA(axis_fifo_ida);
+
+/* ----------------------------
+ * module command-line arguments
+ * ----------------------------
+ */
+
+module_param(read_timeout, long, 0444);
+MODULE_PARM_DESC(read_timeout, "ms to wait before blocking read() timing out; set to -1 for no timeout");
+module_param(write_timeout, long, 0444);
+MODULE_PARM_DESC(write_timeout, "ms to wait before blocking write() timing out; set to -1 for no timeout");
+
+/* ----------------------------
+ * types
+ * ----------------------------
+ */
+
+struct axis_fifo {
+ int id;
+ void __iomem *base_addr; /* kernel space memory */
+
+ unsigned int rx_fifo_depth; /* max words in the receive fifo */
+ unsigned int tx_fifo_depth; /* max words in the transmit fifo */
+ int has_rx_fifo; /* whether the IP has the rx fifo enabled */
+ int has_tx_fifo; /* whether the IP has the tx fifo enabled */
+
+ wait_queue_head_t read_queue; /* wait queue for asynchronos read */
+ struct mutex read_lock; /* lock for reading */
+ wait_queue_head_t write_queue; /* wait queue for asynchronos write */
+ struct mutex write_lock; /* lock for writing */
+
+ struct device *dt_device; /* device created from the device tree */
+ struct miscdevice miscdev;
+
+ struct dentry *debugfs_dir;
+};
+
+struct axis_fifo_debug_reg {
+ const char * const name;
+ unsigned int offset;
+};
+
+/* ----------------------------
+ * implementation
+ * ----------------------------
+ */
+
+static void reset_ip_core(struct axis_fifo *fifo)
+{
+ iowrite32(XLLF_SRR_RESET_MASK, fifo->base_addr + XLLF_SRR_OFFSET);
+ iowrite32(XLLF_TDFR_RESET_MASK, fifo->base_addr + XLLF_TDFR_OFFSET);
+ iowrite32(XLLF_RDFR_RESET_MASK, fifo->base_addr + XLLF_RDFR_OFFSET);
+ iowrite32(XLLF_INT_TC_MASK | XLLF_INT_RC_MASK | XLLF_INT_RPURE_MASK |
+ XLLF_INT_RPORE_MASK | XLLF_INT_RPUE_MASK |
+ XLLF_INT_TPOE_MASK | XLLF_INT_TSE_MASK,
+ fifo->base_addr + XLLF_IER_OFFSET);
+ iowrite32(XLLF_INT_CLEAR_ALL, fifo->base_addr + XLLF_ISR_OFFSET);
+}
+
+/**
+ * axis_fifo_read() - Read a packet from AXIS-FIFO character device.
+ * @f: Open file.
+ * @buf: User space buffer to read to.
+ * @len: User space buffer length.
+ * @off: Buffer offset.
+ *
+ * As defined by the device's documentation, we need to check the device's
+ * occupancy before reading the length register and then the data. All these
+ * operations must be executed atomically, in order and one after the other
+ * without missing any.
+ *
+ * Returns the number of bytes read from the device or negative error code
+ * on failure.
+ */
+static ssize_t axis_fifo_read(struct file *f, char __user *buf,
+ size_t len, loff_t *off)
+{
+ struct axis_fifo *fifo = (struct axis_fifo *)f->private_data;
+ size_t bytes_available;
+ unsigned int words_available;
+ unsigned int copied;
+ unsigned int copy;
+ unsigned int i;
+ int ret;
+ u32 tmp_buf[READ_BUF_SIZE];
+
+ if (f->f_flags & O_NONBLOCK) {
+ /*
+ * Device opened in non-blocking mode. Try to lock it and then
+ * check if any packet is available.
+ */
+ if (!mutex_trylock(&fifo->read_lock))
+ return -EAGAIN;
+
+ if (!ioread32(fifo->base_addr + XLLF_RDFO_OFFSET)) {
+ ret = -EAGAIN;
+ goto end_unlock;
+ }
+ } else {
+ /* opened in blocking mode
+ * wait for a packet available interrupt (or timeout)
+ * if nothing is currently available
+ */
+ mutex_lock(&fifo->read_lock);
+ ret = wait_event_interruptible_timeout(fifo->read_queue,
+ ioread32(fifo->base_addr + XLLF_RDFO_OFFSET),
+ read_timeout);
+
+ if (ret <= 0) {
+ if (ret == 0) {
+ ret = -EAGAIN;
+ } else if (ret != -ERESTARTSYS) {
+ dev_err(fifo->dt_device, "wait_event_interruptible_timeout() error in read (ret=%i)\n",
+ ret);
+ }
+
+ goto end_unlock;
+ }
+ }
+
+ bytes_available = ioread32(fifo->base_addr + XLLF_RLR_OFFSET);
+ words_available = bytes_available / sizeof(u32);
+ if (!bytes_available) {
+ dev_err(fifo->dt_device, "received a packet of length 0\n");
+ ret = -EIO;
+ goto end_unlock;
+ }
+
+ if (bytes_available > len) {
+ dev_err(fifo->dt_device, "user read buffer too small (available bytes=%zu user buffer bytes=%zu)\n",
+ bytes_available, len);
+ ret = -EINVAL;
+ goto err_flush_rx;
+ }
+
+ if (bytes_available % sizeof(u32)) {
+ /* this probably can't happen unless IP
+ * registers were previously mishandled
+ */
+ dev_err(fifo->dt_device, "received a packet that isn't word-aligned\n");
+ ret = -EIO;
+ goto err_flush_rx;
+ }
+
+ /* read data into an intermediate buffer, copying the contents
+ * to userspace when the buffer is full
+ */
+ copied = 0;
+ while (words_available > 0) {
+ copy = min(words_available, READ_BUF_SIZE);
+
+ for (i = 0; i < copy; i++) {
+ tmp_buf[i] = ioread32(fifo->base_addr +
+ XLLF_RDFD_OFFSET);
+ }
+ words_available -= copy;
+
+ if (copy_to_user(buf + copied * sizeof(u32), tmp_buf,
+ copy * sizeof(u32))) {
+ ret = -EFAULT;
+ goto err_flush_rx;
+ }
+
+ copied += copy;
+ }
+ mutex_unlock(&fifo->read_lock);
+
+ return bytes_available;
+
+err_flush_rx:
+ while (words_available--)
+ ioread32(fifo->base_addr + XLLF_RDFD_OFFSET);
+
+end_unlock:
+ mutex_unlock(&fifo->read_lock);
+
+ return ret;
+}
+
+/**
+ * axis_fifo_write() - Write buffer to AXIS-FIFO character device.
+ * @f: Open file.
+ * @buf: User space buffer to write to the device.
+ * @len: User space buffer length.
+ * @off: Buffer offset.
+ *
+ * As defined by the device's documentation, we need to write to the device's
+ * data buffer then to the device's packet length register atomically. Also,
+ * we need to lock before checking if the device has available space to avoid
+ * any concurrency issue.
+ *
+ * Returns the number of bytes written to the device or negative error code
+ * on failure.
+ */
+static ssize_t axis_fifo_write(struct file *f, const char __user *buf,
+ size_t len, loff_t *off)
+{
+ struct axis_fifo *fifo = (struct axis_fifo *)f->private_data;
+ unsigned int words_to_write;
+ u32 *txbuf;
+ int ret;
+
+ if (len % sizeof(u32)) {
+ dev_err(fifo->dt_device,
+ "tried to send a packet that isn't word-aligned\n");
+ return -EINVAL;
+ }
+
+ words_to_write = len / sizeof(u32);
+
+ if (!words_to_write) {
+ dev_err(fifo->dt_device,
+ "tried to send a packet of length 0\n");
+ return -EINVAL;
+ }
+
+ /*
+ * In 'Store-and-Forward' mode, the maximum packet that can be
+ * transmitted is limited by the size of the FIFO, which is
+ * (C_TX_FIFO_DEPTH–4)*(data interface width/8) bytes.
+ *
+ * Do not attempt to send a packet larger than 'tx_fifo_depth - 4',
+ * otherwise a 'Transmit Packet Overrun Error' interrupt will be
+ * raised, which requires a reset of the TX circuit to recover.
+ */
+ if (words_to_write > (fifo->tx_fifo_depth - 4))
+ return -EINVAL;
+
+ if (f->f_flags & O_NONBLOCK) {
+ /*
+ * Device opened in non-blocking mode. Try to lock it and then
+ * check if there is any room to write the given buffer.
+ */
+ if (!mutex_trylock(&fifo->write_lock))
+ return -EAGAIN;
+
+ if (words_to_write > ioread32(fifo->base_addr +
+ XLLF_TDFV_OFFSET)) {
+ ret = -EAGAIN;
+ goto end_unlock;
+ }
+ } else {
+ /* opened in blocking mode */
+
+ /* wait for an interrupt (or timeout) if there isn't
+ * currently enough room in the fifo
+ */
+ mutex_lock(&fifo->write_lock);
+ ret = wait_event_interruptible_timeout(fifo->write_queue,
+ ioread32(fifo->base_addr + XLLF_TDFV_OFFSET)
+ >= words_to_write,
+ write_timeout);
+
+ if (ret <= 0) {
+ if (ret == 0) {
+ ret = -EAGAIN;
+ } else if (ret != -ERESTARTSYS) {
+ dev_err(fifo->dt_device, "wait_event_interruptible_timeout() error in write (ret=%i)\n",
+ ret);
+ }
+
+ goto end_unlock;
+ }
+ }
+
+ txbuf = vmemdup_user(buf, len);
+ if (IS_ERR(txbuf)) {
+ ret = PTR_ERR(txbuf);
+ goto end_unlock;
+ }
+
+ for (int i = 0; i < words_to_write; ++i)
+ iowrite32(txbuf[i], fifo->base_addr + XLLF_TDFD_OFFSET);
+
+ /* write packet size to fifo */
+ iowrite32(len, fifo->base_addr + XLLF_TLR_OFFSET);
+
+ ret = len;
+ kvfree(txbuf);
+end_unlock:
+ mutex_unlock(&fifo->write_lock);
+
+ return ret;
+}
+
+static irqreturn_t axis_fifo_irq(int irq, void *dw)
+{
+ struct axis_fifo *fifo = dw;
+ u32 isr, ier, intr;
+
+ ier = ioread32(fifo->base_addr + XLLF_IER_OFFSET);
+ isr = ioread32(fifo->base_addr + XLLF_ISR_OFFSET);
+ intr = ier & isr;
+
+ if (intr & XLLF_INT_RC_MASK)
+ wake_up(&fifo->read_queue);
+
+ if (intr & XLLF_INT_TC_MASK)
+ wake_up(&fifo->write_queue);
+
+ if (intr & XLLF_INT_RPURE_MASK)
+ dev_err(fifo->dt_device, "receive under-read interrupt\n");
+
+ if (intr & XLLF_INT_RPORE_MASK)
+ dev_err(fifo->dt_device, "receive over-read interrupt\n");
+
+ if (intr & XLLF_INT_RPUE_MASK)
+ dev_err(fifo->dt_device, "receive underrun error interrupt\n");
+
+ if (intr & XLLF_INT_TPOE_MASK)
+ dev_err(fifo->dt_device, "transmit overrun error interrupt\n");
+
+ if (intr & XLLF_INT_TSE_MASK)
+ dev_err(fifo->dt_device,
+ "transmit length mismatch error interrupt\n");
+
+ iowrite32(XLLF_INT_CLEAR_ALL, fifo->base_addr + XLLF_ISR_OFFSET);
+
+ return IRQ_HANDLED;
+}
+
+static int axis_fifo_open(struct inode *inod, struct file *f)
+{
+ struct axis_fifo *fifo = container_of(f->private_data,
+ struct axis_fifo, miscdev);
+ unsigned int flags = f->f_flags & O_ACCMODE;
+
+ f->private_data = fifo;
+
+ if ((flags == O_WRONLY || flags == O_RDWR) && !fifo->has_tx_fifo)
+ return -EPERM;
+
+ if ((flags == O_RDONLY || flags == O_RDWR) && !fifo->has_rx_fifo)
+ return -EPERM;
+
+ return 0;
+}
+
+static int axis_fifo_close(struct inode *inod, struct file *f)
+{
+ f->private_data = NULL;
+
+ return 0;
+}
+
+static const struct file_operations fops = {
+ .owner = THIS_MODULE,
+ .open = axis_fifo_open,
+ .release = axis_fifo_close,
+ .read = axis_fifo_read,
+ .write = axis_fifo_write
+};
+
+static int axis_fifo_debugfs_regs_show(struct seq_file *m, void *p)
+{
+ static const struct axis_fifo_debug_reg regs[] = {
+ {"isr", XLLF_ISR_OFFSET},
+ {"ier", XLLF_IER_OFFSET},
+ {"tdfv", XLLF_TDFV_OFFSET},
+ {"rdfo", XLLF_RDFO_OFFSET},
+ { /* Sentinel */ },
+ };
+ const struct axis_fifo_debug_reg *reg;
+ struct axis_fifo *fifo = m->private;
+
+ for (reg = regs; reg->name; ++reg) {
+ u32 val = ioread32(fifo->base_addr + reg->offset);
+
+ seq_printf(m, "%*s: 0x%08x\n", AXIS_FIFO_DEBUG_REG_NAME_MAX_LEN,
+ reg->name, val);
+ }
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(axis_fifo_debugfs_regs);
+
+static void axis_fifo_debugfs_init(struct axis_fifo *fifo)
+{
+ fifo->debugfs_dir = debugfs_create_dir(dev_name(fifo->dt_device), NULL);
+
+ debugfs_create_file("regs", 0444, fifo->debugfs_dir, fifo,
+ &axis_fifo_debugfs_regs_fops);
+}
+
+static int axis_fifo_parse_dt(struct axis_fifo *fifo)
+{
+ int ret;
+ unsigned int value;
+ struct device_node *node = fifo->dt_device->of_node;
+
+ ret = of_property_read_u32(node, "xlnx,axi-str-rxd-tdata-width",
+ &value);
+ if (ret) {
+ dev_err(fifo->dt_device, "missing xlnx,axi-str-rxd-tdata-width property\n");
+ goto end;
+ } else if (value != 32) {
+ dev_err(fifo->dt_device, "xlnx,axi-str-rxd-tdata-width only supports 32 bits\n");
+ ret = -EIO;
+ goto end;
+ }
+
+ ret = of_property_read_u32(node, "xlnx,axi-str-txd-tdata-width",
+ &value);
+ if (ret) {
+ dev_err(fifo->dt_device, "missing xlnx,axi-str-txd-tdata-width property\n");
+ goto end;
+ } else if (value != 32) {
+ dev_err(fifo->dt_device, "xlnx,axi-str-txd-tdata-width only supports 32 bits\n");
+ ret = -EIO;
+ goto end;
+ }
+
+ ret = of_property_read_u32(node, "xlnx,rx-fifo-depth",
+ &fifo->rx_fifo_depth);
+ if (ret) {
+ dev_err(fifo->dt_device, "missing xlnx,rx-fifo-depth property\n");
+ ret = -EIO;
+ goto end;
+ }
+
+ ret = of_property_read_u32(node, "xlnx,tx-fifo-depth",
+ &fifo->tx_fifo_depth);
+ if (ret) {
+ dev_err(fifo->dt_device, "missing xlnx,tx-fifo-depth property\n");
+ ret = -EIO;
+ goto end;
+ }
+
+ ret = of_property_read_u32(node, "xlnx,use-rx-data",
+ &fifo->has_rx_fifo);
+ if (ret) {
+ dev_err(fifo->dt_device, "missing xlnx,use-rx-data property\n");
+ ret = -EIO;
+ goto end;
+ }
+
+ ret = of_property_read_u32(node, "xlnx,use-tx-data",
+ &fifo->has_tx_fifo);
+ if (ret) {
+ dev_err(fifo->dt_device, "missing xlnx,use-tx-data property\n");
+ ret = -EIO;
+ goto end;
+ }
+
+end:
+ return ret;
+}
+
+static int axis_fifo_probe(struct platform_device *pdev)
+{
+ struct resource *r_mem; /* IO mem resources */
+ struct device *dev = &pdev->dev; /* OS device (from device tree) */
+ struct axis_fifo *fifo = NULL;
+ char *device_name;
+ int rc = 0; /* error return value */
+ int irq;
+
+ /* ----------------------------
+ * init wrapper device
+ * ----------------------------
+ */
+
+ device_name = devm_kzalloc(dev, 32, GFP_KERNEL);
+ if (!device_name)
+ return -ENOMEM;
+
+ /* allocate device wrapper memory */
+ fifo = devm_kzalloc(dev, sizeof(*fifo), GFP_KERNEL);
+ if (!fifo)
+ return -ENOMEM;
+
+ dev_set_drvdata(dev, fifo);
+ fifo->dt_device = dev;
+
+ init_waitqueue_head(&fifo->read_queue);
+ init_waitqueue_head(&fifo->write_queue);
+
+ mutex_init(&fifo->read_lock);
+ mutex_init(&fifo->write_lock);
+
+ /* ----------------------------
+ * init device memory space
+ * ----------------------------
+ */
+
+ /* get iospace for the device and request physical memory */
+ fifo->base_addr = devm_platform_get_and_ioremap_resource(pdev, 0, &r_mem);
+ if (IS_ERR(fifo->base_addr))
+ return PTR_ERR(fifo->base_addr);
+
+ /* ----------------------------
+ * init IP
+ * ----------------------------
+ */
+
+ rc = axis_fifo_parse_dt(fifo);
+ if (rc)
+ return rc;
+
+ reset_ip_core(fifo);
+
+ /* ----------------------------
+ * init device interrupts
+ * ----------------------------
+ */
+
+ /* get IRQ resource */
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ /* request IRQ */
+ rc = devm_request_irq(fifo->dt_device, irq, &axis_fifo_irq, 0,
+ DRIVER_NAME, fifo);
+ if (rc) {
+ dev_err(fifo->dt_device, "couldn't allocate interrupt %i\n",
+ irq);
+ return rc;
+ }
+
+ /* ----------------------------
+ * init char device
+ * ----------------------------
+ */
+ fifo->id = ida_alloc(&axis_fifo_ida, GFP_KERNEL);
+ if (fifo->id < 0)
+ return fifo->id;
+
+ snprintf(device_name, 32, "%s%d", DRIVER_NAME, fifo->id);
+
+ /* create character device */
+ fifo->miscdev.fops = &fops;
+ fifo->miscdev.minor = MISC_DYNAMIC_MINOR;
+ fifo->miscdev.name = device_name;
+ fifo->miscdev.parent = dev;
+ rc = misc_register(&fifo->miscdev);
+ if (rc < 0) {
+ ida_free(&axis_fifo_ida, fifo->id);
+ return rc;
+ }
+
+ axis_fifo_debugfs_init(fifo);
+
+ return 0;
+}
+
+static void axis_fifo_remove(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct axis_fifo *fifo = dev_get_drvdata(dev);
+
+ debugfs_remove(fifo->debugfs_dir);
+ misc_deregister(&fifo->miscdev);
+ ida_free(&axis_fifo_ida, fifo->id);
+}
+
+static const struct of_device_id axis_fifo_of_match[] = {
+ { .compatible = "xlnx,axi-fifo-mm-s-4.1", },
+ { .compatible = "xlnx,axi-fifo-mm-s-4.2", },
+ { .compatible = "xlnx,axi-fifo-mm-s-4.3", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, axis_fifo_of_match);
+
+static struct platform_driver axis_fifo_driver = {
+ .driver = {
+ .name = DRIVER_NAME,
+ .of_match_table = axis_fifo_of_match,
+ },
+ .probe = axis_fifo_probe,
+ .remove = axis_fifo_remove,
+};
+
+static int __init axis_fifo_init(void)
+{
+ if (read_timeout >= 0)
+ read_timeout = msecs_to_jiffies(read_timeout);
+ else
+ read_timeout = MAX_SCHEDULE_TIMEOUT;
+
+ if (write_timeout >= 0)
+ write_timeout = msecs_to_jiffies(write_timeout);
+ else
+ write_timeout = MAX_SCHEDULE_TIMEOUT;
+
+ pr_info("axis-fifo driver loaded with parameters read_timeout = %li, write_timeout = %li\n",
+ read_timeout, write_timeout);
+ return platform_driver_register(&axis_fifo_driver);
+}
+
+module_init(axis_fifo_init);
+
+static void __exit axis_fifo_exit(void)
+{
+ platform_driver_unregister(&axis_fifo_driver);
+ ida_destroy(&axis_fifo_ida);
+}
+
+module_exit(axis_fifo_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jacob Feder <jacobsfeder@gmail.com>");
+MODULE_DESCRIPTION("Xilinx AXI-Stream FIFO IP core driver");
diff --git a/drivers/staging/axis-fifo/axis-fifo.txt b/drivers/staging/axis-fifo/axis-fifo.txt
new file mode 100644
index 000000000000..413b81a53202
--- /dev/null
+++ b/drivers/staging/axis-fifo/axis-fifo.txt
@@ -0,0 +1,96 @@
+Xilinx AXI-Stream FIFO v4.1 IP core
+
+This IP core has read and write AXI-Stream FIFOs, the contents of which can
+be accessed from the AXI4 memory-mapped interface. This is useful for
+transferring data from a processor into the FPGA fabric. The driver creates
+a character device that can be read/written to with standard
+open/read/write/close.
+
+See Xilinx PG080 document for IP details.
+
+Currently supports only store-forward mode with a 32-bit
+AXI4-Lite interface. DOES NOT support:
+ - cut-through mode
+ - AXI4 (non-lite)
+
+Required properties:
+- compatible: Should be one of:
+ "xlnx,axi-fifo-mm-s-4.1"
+ "xlnx,axi-fifo-mm-s-4.2"
+ "xlnx,axi-fifo-mm-s-4.3"
+- interrupt-names: Should be "interrupt"
+- interrupt-parent: Should be <&intc>
+- interrupts: Should contain interrupts lines.
+- reg: Should contain registers location and length.
+- xlnx,axi-str-rxd-protocol: Should be "XIL_AXI_STREAM_ETH_DATA"
+- xlnx,axi-str-rxd-tdata-width: Should be <0x20>
+- xlnx,axi-str-txc-protocol: Should be "XIL_AXI_STREAM_ETH_CTRL"
+- xlnx,axi-str-txc-tdata-width: Should be <0x20>
+- xlnx,axi-str-txd-protocol: Should be "XIL_AXI_STREAM_ETH_DATA"
+- xlnx,axi-str-txd-tdata-width: Should be <0x20>
+- xlnx,axis-tdest-width: AXI-Stream TDEST width (ignored by the driver)
+- xlnx,axis-tid-width: AXI-Stream TID width (ignored by the driver)
+- xlnx,axis-tuser-width: AXI-Stream TUSER width (ignored by the driver)
+- xlnx,data-interface-type: Should be <0x0> (ignored by the driver)
+- xlnx,has-axis-tdest: Should be <0x0> (this feature isn't supported)
+- xlnx,has-axis-tid: Should be <0x0> (this feature isn't supported)
+- xlnx,has-axis-tkeep: Should be <0x0> (this feature isn't supported)
+- xlnx,has-axis-tstrb: Should be <0x0> (this feature isn't supported)
+- xlnx,has-axis-tuser: Should be <0x0> (this feature isn't supported)
+- xlnx,rx-fifo-depth: Depth of RX FIFO in words
+- xlnx,rx-fifo-pe-threshold: RX programmable empty interrupt threshold
+ (ignored by the driver)
+- xlnx,rx-fifo-pf-threshold: RX programmable full interrupt threshold
+ (ignored by the driver)
+- xlnx,s-axi-id-width: Should be <0x4> (ignored by the driver)
+- xlnx,s-axi4-data-width: Should be <0x20> (ignored by the driver)
+- xlnx,select-xpm: Should be <0x0> (ignored by the driver)
+- xlnx,tx-fifo-depth: Depth of TX FIFO in words
+- xlnx,tx-fifo-pe-threshold: TX programmable empty interrupt threshold
+ (ignored by the driver)
+- xlnx,tx-fifo-pf-threshold: TX programmable full interrupt threshold
+ (ignored by the driver)
+- xlnx,use-rx-cut-through: Should be <0x0> (this feature isn't supported)
+- xlnx,use-rx-data: <0x1> if RX FIFO is enabled, <0x0> otherwise
+- xlnx,use-tx-ctrl: Should be <0x0> (this feature isn't supported)
+- xlnx,use-tx-cut-through: Should be <0x0> (this feature isn't supported)
+- xlnx,use-tx-data: <0x1> if TX FIFO is enabled, <0x0> otherwise
+
+Example:
+
+axi_fifo_mm_s_0: axi_fifo_mm_s@43c00000 {
+ compatible = "xlnx,axi-fifo-mm-s-4.1";
+ interrupt-names = "interrupt";
+ interrupt-parent = <&intc>;
+ interrupts = <0 29 4>;
+ reg = <0x43c00000 0x10000>;
+ xlnx,axi-str-rxd-protocol = "XIL_AXI_STREAM_ETH_DATA";
+ xlnx,axi-str-rxd-tdata-width = <0x20>;
+ xlnx,axi-str-txc-protocol = "XIL_AXI_STREAM_ETH_CTRL";
+ xlnx,axi-str-txc-tdata-width = <0x20>;
+ xlnx,axi-str-txd-protocol = "XIL_AXI_STREAM_ETH_DATA";
+ xlnx,axi-str-txd-tdata-width = <0x20>;
+ xlnx,axis-tdest-width = <0x4>;
+ xlnx,axis-tid-width = <0x4>;
+ xlnx,axis-tuser-width = <0x4>;
+ xlnx,data-interface-type = <0x0>;
+ xlnx,has-axis-tdest = <0x0>;
+ xlnx,has-axis-tid = <0x0>;
+ xlnx,has-axis-tkeep = <0x0>;
+ xlnx,has-axis-tstrb = <0x0>;
+ xlnx,has-axis-tuser = <0x0>;
+ xlnx,rx-fifo-depth = <0x200>;
+ xlnx,rx-fifo-pe-threshold = <0x2>;
+ xlnx,rx-fifo-pf-threshold = <0x1fb>;
+ xlnx,s-axi-id-width = <0x4>;
+ xlnx,s-axi4-data-width = <0x20>;
+ xlnx,select-xpm = <0x0>;
+ xlnx,tx-fifo-depth = <0x8000>;
+ xlnx,tx-fifo-pe-threshold = <0x200>;
+ xlnx,tx-fifo-pf-threshold = <0x7ffb>;
+ xlnx,use-rx-cut-through = <0x0>;
+ xlnx,use-rx-data = <0x0>;
+ xlnx,use-tx-ctrl = <0x0>;
+ xlnx,use-tx-cut-through = <0x0>;
+ xlnx,use-tx-data = <0x1>;
+};
diff --git a/drivers/staging/board/Kconfig b/drivers/staging/board/Kconfig
deleted file mode 100644
index 3f287c48e082..000000000000
--- a/drivers/staging/board/Kconfig
+++ /dev/null
@@ -1,8 +0,0 @@
-config STAGING_BOARD
- bool "Staging Board Support"
- depends on OF_ADDRESS && OF_IRQ && CLKDEV_LOOKUP
- help
- Select to enable per-board staging support code.
-
- If in doubt, say N here.
-
diff --git a/drivers/staging/board/Makefile b/drivers/staging/board/Makefile
deleted file mode 100644
index 6842745feb94..000000000000
--- a/drivers/staging/board/Makefile
+++ /dev/null
@@ -1,3 +0,0 @@
-obj-y := board.o
-obj-$(CONFIG_ARCH_EMEV2) += kzm9d.o
-obj-$(CONFIG_ARCH_R8A7740) += armadillo800eva.o
diff --git a/drivers/staging/board/TODO b/drivers/staging/board/TODO
deleted file mode 100644
index 8db70e10aa67..000000000000
--- a/drivers/staging/board/TODO
+++ /dev/null
@@ -1,2 +0,0 @@
-* replace platform device code with DT nodes once the driver supports DT
-* remove staging board code when no more platform devices are needed
diff --git a/drivers/staging/board/armadillo800eva.c b/drivers/staging/board/armadillo800eva.c
deleted file mode 100644
index 9c41652ee908..000000000000
--- a/drivers/staging/board/armadillo800eva.c
+++ /dev/null
@@ -1,105 +0,0 @@
-/*
- * Staging board support for Armadillo 800 eva.
- * Enable not-yet-DT-capable devices here.
- *
- * Based on board-armadillo800eva.c
- *
- * Copyright (C) 2012 Renesas Solutions Corp.
- * Copyright (C) 2012 Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#include <linux/dma-mapping.h>
-#include <linux/fb.h>
-#include <linux/kernel.h>
-#include <linux/platform_device.h>
-#include <linux/videodev2.h>
-
-#include <video/sh_mobile_lcdc.h>
-
-#include "board.h"
-
-
-static struct fb_videomode lcdc0_mode = {
- .name = "AMPIER/AM-800480",
- .xres = 800,
- .yres = 480,
- .left_margin = 88,
- .right_margin = 40,
- .hsync_len = 128,
- .upper_margin = 20,
- .lower_margin = 5,
- .vsync_len = 5,
- .sync = 0,
-};
-
-static struct sh_mobile_lcdc_info lcdc0_info = {
- .clock_source = LCDC_CLK_BUS,
- .ch[0] = {
- .chan = LCDC_CHAN_MAINLCD,
- .fourcc = V4L2_PIX_FMT_RGB565,
- .interface_type = RGB24,
- .clock_divider = 5,
- .flags = 0,
- .lcd_modes = &lcdc0_mode,
- .num_modes = 1,
- .panel_cfg = {
- .width = 111,
- .height = 68,
- },
- },
-};
-
-static struct resource lcdc0_resources[] = {
- [0] = {
- .name = "LCD0",
- .start = 0xfe940000,
- .end = 0xfe943fff,
- .flags = IORESOURCE_MEM,
- },
- [1] = {
- .start = 177 + 32,
- .flags = IORESOURCE_IRQ,
- },
-};
-
-static struct platform_device lcdc0_device = {
- .name = "sh_mobile_lcdc_fb",
- .num_resources = ARRAY_SIZE(lcdc0_resources),
- .resource = lcdc0_resources,
- .id = 0,
- .dev = {
- .platform_data = &lcdc0_info,
- .coherent_dma_mask = DMA_BIT_MASK(32),
- },
-};
-
-static const struct board_staging_clk lcdc0_clocks[] __initconst = {
- { "lcdc0", NULL, "sh_mobile_lcdc_fb.0" },
-};
-
-static const struct board_staging_dev armadillo800eva_devices[] __initconst = {
- {
- .pdev = &lcdc0_device,
- .clocks = lcdc0_clocks,
- .nclocks = ARRAY_SIZE(lcdc0_clocks),
- .domain = "/system-controller@e6180000/pm-domains/c5/a4lc@1"
- },
-};
-
-static void __init armadillo800eva_init(void)
-{
- board_staging_gic_setup_xlate("arm,cortex-a9-gic", 32);
- board_staging_register_devices(armadillo800eva_devices,
- ARRAY_SIZE(armadillo800eva_devices));
-}
-
-board_staging("renesas,armadillo800eva", armadillo800eva_init);
diff --git a/drivers/staging/board/board.c b/drivers/staging/board/board.c
deleted file mode 100644
index 3eb5eb8f069c..000000000000
--- a/drivers/staging/board/board.c
+++ /dev/null
@@ -1,210 +0,0 @@
-/*
- * Copyright (C) 2014 Magnus Damm
- * Copyright (C) 2015 Glider bvba
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- */
-
-#define pr_fmt(fmt) "board_staging: " fmt
-
-#include <linux/clkdev.h>
-#include <linux/init.h>
-#include <linux/irq.h>
-#include <linux/device.h>
-#include <linux/kernel.h>
-#include <linux/of.h>
-#include <linux/of_address.h>
-#include <linux/of_irq.h>
-#include <linux/platform_device.h>
-#include <linux/pm_domain.h>
-
-#include "board.h"
-
-static struct device_node *irqc_node __initdata;
-static unsigned int irqc_base __initdata;
-
-static bool find_by_address(u64 base_address)
-{
- struct device_node *dn = of_find_all_nodes(NULL);
- struct resource res;
-
- while (dn) {
- if (!of_address_to_resource(dn, 0, &res)) {
- if (res.start == base_address) {
- of_node_put(dn);
- return true;
- }
- }
- dn = of_find_all_nodes(dn);
- }
-
- return false;
-}
-
-bool __init board_staging_dt_node_available(const struct resource *resource,
- unsigned int num_resources)
-{
- unsigned int i;
-
- for (i = 0; i < num_resources; i++) {
- const struct resource *r = resource + i;
-
- if (resource_type(r) == IORESOURCE_MEM)
- if (find_by_address(r->start))
- return true; /* DT node available */
- }
-
- return false; /* Nothing found */
-}
-
-int __init board_staging_gic_setup_xlate(const char *gic_match,
- unsigned int base)
-{
- WARN_ON(irqc_node);
-
- irqc_node = of_find_compatible_node(NULL, NULL, gic_match);
-
- WARN_ON(!irqc_node);
- if (!irqc_node)
- return -ENOENT;
-
- irqc_base = base;
- return 0;
-}
-
-static void __init gic_fixup_resource(struct resource *res)
-{
- struct of_phandle_args irq_data;
- unsigned int hwirq = res->start;
- unsigned int virq;
-
- if (resource_type(res) != IORESOURCE_IRQ || !irqc_node)
- return;
-
- irq_data.np = irqc_node;
- irq_data.args_count = 3;
- irq_data.args[0] = 0;
- irq_data.args[1] = hwirq - irqc_base;
- switch (res->flags &
- (IORESOURCE_IRQ_LOWEDGE | IORESOURCE_IRQ_HIGHEDGE |
- IORESOURCE_IRQ_LOWLEVEL | IORESOURCE_IRQ_HIGHLEVEL)) {
- case IORESOURCE_IRQ_LOWEDGE:
- irq_data.args[2] = IRQ_TYPE_EDGE_FALLING;
- break;
- case IORESOURCE_IRQ_HIGHEDGE:
- irq_data.args[2] = IRQ_TYPE_EDGE_RISING;
- break;
- case IORESOURCE_IRQ_LOWLEVEL:
- irq_data.args[2] = IRQ_TYPE_LEVEL_LOW;
- break;
- case IORESOURCE_IRQ_HIGHLEVEL:
- default:
- irq_data.args[2] = IRQ_TYPE_LEVEL_HIGH;
- break;
- }
-
- virq = irq_create_of_mapping(&irq_data);
- if (WARN_ON(!virq))
- return;
-
- pr_debug("hwirq %u -> virq %u\n", hwirq, virq);
- res->start = virq;
-}
-
-void __init board_staging_gic_fixup_resources(struct resource *res,
- unsigned int nres)
-{
- unsigned int i;
-
- for (i = 0; i < nres; i++)
- gic_fixup_resource(&res[i]);
-}
-
-int __init board_staging_register_clock(const struct board_staging_clk *bsc)
-{
- int error;
-
- pr_debug("Aliasing clock %s for con_id %s dev_id %s\n", bsc->clk,
- bsc->con_id, bsc->dev_id);
- error = clk_add_alias(bsc->con_id, bsc->dev_id, bsc->clk, NULL);
- if (error)
- pr_err("Failed to alias clock %s (%d)\n", bsc->clk, error);
-
- return error;
-}
-
-#ifdef CONFIG_PM_GENERIC_DOMAINS_OF
-static int board_staging_add_dev_domain(struct platform_device *pdev,
- const char *domain)
-{
- struct of_phandle_args pd_args;
- struct generic_pm_domain *pd;
- struct device_node *np;
-
- np = of_find_node_by_path(domain);
- if (!np) {
- pr_err("Cannot find domain node %s\n", domain);
- return -ENOENT;
- }
-
- pd_args.np = np;
- pd_args.args_count = 0;
- pd = of_genpd_get_from_provider(&pd_args);
- if (IS_ERR(pd)) {
- pr_err("Cannot find genpd %s (%ld)\n", domain, PTR_ERR(pd));
- return PTR_ERR(pd);
-
- }
- pr_debug("Found genpd %s for device %s\n", pd->name, pdev->name);
-
- return pm_genpd_add_device(pd, &pdev->dev);
-}
-#else
-static inline int board_staging_add_dev_domain(struct platform_device *pdev,
- const char *domain)
-{
- return 0;
-}
-#endif
-
-int __init board_staging_register_device(const struct board_staging_dev *dev)
-{
- struct platform_device *pdev = dev->pdev;
- unsigned int i;
- int error;
-
- pr_debug("Trying to register device %s\n", pdev->name);
- if (board_staging_dt_node_available(pdev->resource,
- pdev->num_resources)) {
- pr_warn("Skipping %s, already in DT\n", pdev->name);
- return -EEXIST;
- }
-
- board_staging_gic_fixup_resources(pdev->resource, pdev->num_resources);
-
- for (i = 0; i < dev->nclocks; i++)
- board_staging_register_clock(&dev->clocks[i]);
-
- error = platform_device_register(pdev);
- if (error) {
- pr_err("Failed to register device %s (%d)\n", pdev->name,
- error);
- return error;
- }
-
- if (dev->domain)
- board_staging_add_dev_domain(pdev, dev->domain);
-
- return error;
-}
-
-void __init board_staging_register_devices(const struct board_staging_dev *devs,
- unsigned int ndevs)
-{
- unsigned int i;
-
- for (i = 0; i < ndevs; i++)
- board_staging_register_device(&devs[i]);
-}
diff --git a/drivers/staging/board/board.h b/drivers/staging/board/board.h
deleted file mode 100644
index 42ed12513220..000000000000
--- a/drivers/staging/board/board.h
+++ /dev/null
@@ -1,45 +0,0 @@
-#ifndef __BOARD_H__
-#define __BOARD_H__
-
-#include <linux/init.h>
-#include <linux/of.h>
-
-struct board_staging_clk {
- const char *clk;
- const char *con_id;
- const char *dev_id;
-};
-
-struct board_staging_dev {
- /* Platform Device */
- struct platform_device *pdev;
- /* Clocks (optional) */
- const struct board_staging_clk *clocks;
- unsigned int nclocks;
- /* Generic PM Domain (optional) */
- const char *domain;
-};
-
-struct resource;
-
-bool board_staging_dt_node_available(const struct resource *resource,
- unsigned int num_resources);
-int board_staging_gic_setup_xlate(const char *gic_match, unsigned int base);
-void board_staging_gic_fixup_resources(struct resource *res, unsigned int nres);
-int board_staging_register_clock(const struct board_staging_clk *bsc);
-int board_staging_register_device(const struct board_staging_dev *dev);
-void board_staging_register_devices(const struct board_staging_dev *devs,
- unsigned int ndevs);
-
-#define board_staging(str, fn) \
-static int __init runtime_board_check(void) \
-{ \
- if (of_machine_is_compatible(str)) \
- fn(); \
- \
- return 0; \
-} \
- \
-device_initcall(runtime_board_check)
-
-#endif /* __BOARD_H__ */
diff --git a/drivers/staging/board/kzm9d.c b/drivers/staging/board/kzm9d.c
deleted file mode 100644
index 8d1eb09bc66e..000000000000
--- a/drivers/staging/board/kzm9d.c
+++ /dev/null
@@ -1,25 +0,0 @@
-/* Staging board support for KZM9D. Enable not-yet-DT-capable devices here. */
-
-#include <linux/kernel.h>
-#include <linux/platform_device.h>
-#include "board.h"
-
-static struct resource usbs1_res[] __initdata = {
- DEFINE_RES_MEM(0xe2800000, 0x2000),
- DEFINE_RES_IRQ(159),
-};
-
-static void __init kzm9d_init(void)
-{
- board_staging_gic_setup_xlate("arm,cortex-a9-gic", 32);
-
- if (!board_staging_dt_node_available(usbs1_res,
- ARRAY_SIZE(usbs1_res))) {
- board_staging_gic_fixup_resources(usbs1_res,
- ARRAY_SIZE(usbs1_res));
- platform_device_register_simple("emxx_udc", -1, usbs1_res,
- ARRAY_SIZE(usbs1_res));
- }
-}
-
-board_staging("renesas,kzm9d", kzm9d_init);
diff --git a/drivers/staging/clocking-wizard/Kconfig b/drivers/staging/clocking-wizard/Kconfig
deleted file mode 100644
index 357af02c562c..000000000000
--- a/drivers/staging/clocking-wizard/Kconfig
+++ /dev/null
@@ -1,9 +0,0 @@
-#
-# Xilinx Clocking Wizard Driver
-#
-
-config COMMON_CLK_XLNX_CLKWZRD
- tristate "Xilinx Clocking Wizard"
- depends on COMMON_CLK && OF
- ---help---
- Support for the Xilinx Clocking Wizard IP core clock generator.
diff --git a/drivers/staging/clocking-wizard/Makefile b/drivers/staging/clocking-wizard/Makefile
deleted file mode 100644
index 5ad352f521fe..000000000000
--- a/drivers/staging/clocking-wizard/Makefile
+++ /dev/null
@@ -1 +0,0 @@
-obj-$(CONFIG_COMMON_CLK_XLNX_CLKWZRD) += clk-xlnx-clock-wizard.o
diff --git a/drivers/staging/clocking-wizard/TODO b/drivers/staging/clocking-wizard/TODO
deleted file mode 100644
index ebe99db7d153..000000000000
--- a/drivers/staging/clocking-wizard/TODO
+++ /dev/null
@@ -1,12 +0,0 @@
-TODO:
- - support for fractional multiplier
- - support for fractional divider (output 0 only)
- - support for set_rate() operations (may benefit from Stephen Boyd's
- refactoring of the clk primitives: https://lkml.org/lkml/2014/9/5/766)
- - review arithmetic
- - overflow after multiplication?
- - maximize accuracy before divisions
-
-Patches to:
- Greg Kroah-Hartman <gregkh@linuxfoundation.org>
- Sören Brinkmann <soren.brinkmann@xilinx.com>
diff --git a/drivers/staging/clocking-wizard/clk-xlnx-clock-wizard.c b/drivers/staging/clocking-wizard/clk-xlnx-clock-wizard.c
deleted file mode 100644
index b8e2f611fd47..000000000000
--- a/drivers/staging/clocking-wizard/clk-xlnx-clock-wizard.c
+++ /dev/null
@@ -1,345 +0,0 @@
-/*
- * Xilinx 'Clocking Wizard' driver
- *
- * Copyright (C) 2013 - 2014 Xilinx
- *
- * Sören Brinkmann <soren.brinkmann@xilinx.com>
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU General Public License v2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- */
-
-#include <linux/platform_device.h>
-#include <linux/clk.h>
-#include <linux/clk-provider.h>
-#include <linux/slab.h>
-#include <linux/io.h>
-#include <linux/of.h>
-#include <linux/module.h>
-#include <linux/err.h>
-
-#define WZRD_NUM_OUTPUTS 7
-#define WZRD_ACLK_MAX_FREQ 250000000UL
-
-#define WZRD_CLK_CFG_REG(n) (0x200 + 4 * (n))
-
-#define WZRD_CLkOUT0_FRAC_EN BIT(18)
-#define WZRD_CLkFBOUT_FRAC_EN BIT(26)
-
-#define WZRD_CLKFBOUT_MULT_SHIFT 8
-#define WZRD_CLKFBOUT_MULT_MASK (0xff << WZRD_CLKFBOUT_MULT_SHIFT)
-#define WZRD_DIVCLK_DIVIDE_SHIFT 0
-#define WZRD_DIVCLK_DIVIDE_MASK (0xff << WZRD_DIVCLK_DIVIDE_SHIFT)
-#define WZRD_CLKOUT_DIVIDE_SHIFT 0
-#define WZRD_CLKOUT_DIVIDE_MASK (0xff << WZRD_DIVCLK_DIVIDE_SHIFT)
-
-enum clk_wzrd_int_clks {
- wzrd_clk_mul,
- wzrd_clk_mul_div,
- wzrd_clk_int_max
-};
-
-/**
- * struct clk_wzrd:
- * @clk_data: Clock data
- * @nb: Notifier block
- * @base: Memory base
- * @clk_in1: Handle to input clock 'clk_in1'
- * @axi_clk: Handle to input clock 's_axi_aclk'
- * @clks_internal: Internal clocks
- * @clkout: Output clocks
- * @speed_grade: Speed grade of the device
- * @suspended: Flag indicating power state of the device
- */
-struct clk_wzrd {
- struct clk_onecell_data clk_data;
- struct notifier_block nb;
- void __iomem *base;
- struct clk *clk_in1;
- struct clk *axi_clk;
- struct clk *clks_internal[wzrd_clk_int_max];
- struct clk *clkout[WZRD_NUM_OUTPUTS];
- int speed_grade;
- bool suspended;
-};
-#define to_clk_wzrd(_nb) container_of(_nb, struct clk_wzrd, nb)
-
-/* maximum frequencies for input/output clocks per speed grade */
-static const unsigned long clk_wzrd_max_freq[] = {
- 800000000UL,
- 933000000UL,
- 1066000000UL
-};
-
-static int clk_wzrd_clk_notifier(struct notifier_block *nb, unsigned long event,
- void *data)
-{
- unsigned long max;
- struct clk_notifier_data *ndata = data;
- struct clk_wzrd *clk_wzrd = to_clk_wzrd(nb);
-
- if (clk_wzrd->suspended)
- return NOTIFY_OK;
-
- if (ndata->clk == clk_wzrd->clk_in1)
- max = clk_wzrd_max_freq[clk_wzrd->speed_grade - 1];
- else if (ndata->clk == clk_wzrd->axi_clk)
- max = WZRD_ACLK_MAX_FREQ;
- else
- return NOTIFY_DONE; /* should never happen */
-
- switch (event) {
- case PRE_RATE_CHANGE:
- if (ndata->new_rate > max)
- return NOTIFY_BAD;
- return NOTIFY_OK;
- case POST_RATE_CHANGE:
- case ABORT_RATE_CHANGE:
- default:
- return NOTIFY_DONE;
- }
-}
-
-static int __maybe_unused clk_wzrd_suspend(struct device *dev)
-{
- struct clk_wzrd *clk_wzrd = dev_get_drvdata(dev);
-
- clk_disable_unprepare(clk_wzrd->axi_clk);
- clk_wzrd->suspended = true;
-
- return 0;
-}
-
-static int __maybe_unused clk_wzrd_resume(struct device *dev)
-{
- int ret;
- struct clk_wzrd *clk_wzrd = dev_get_drvdata(dev);
-
- ret = clk_prepare_enable(clk_wzrd->axi_clk);
- if (ret) {
- dev_err(dev, "unable to enable s_axi_aclk\n");
- return ret;
- }
-
- clk_wzrd->suspended = false;
-
- return 0;
-}
-
-static SIMPLE_DEV_PM_OPS(clk_wzrd_dev_pm_ops, clk_wzrd_suspend,
- clk_wzrd_resume);
-
-static int clk_wzrd_probe(struct platform_device *pdev)
-{
- int i, ret;
- u32 reg;
- unsigned long rate;
- const char *clk_name;
- struct clk_wzrd *clk_wzrd;
- struct resource *mem;
- struct device_node *np = pdev->dev.of_node;
-
- clk_wzrd = devm_kzalloc(&pdev->dev, sizeof(*clk_wzrd), GFP_KERNEL);
- if (!clk_wzrd)
- return -ENOMEM;
- platform_set_drvdata(pdev, clk_wzrd);
-
- mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- clk_wzrd->base = devm_ioremap_resource(&pdev->dev, mem);
- if (IS_ERR(clk_wzrd->base))
- return PTR_ERR(clk_wzrd->base);
-
- ret = of_property_read_u32(np, "speed-grade", &clk_wzrd->speed_grade);
- if (!ret) {
- if (clk_wzrd->speed_grade < 1 || clk_wzrd->speed_grade > 3) {
- dev_warn(&pdev->dev, "invalid speed grade '%d'\n",
- clk_wzrd->speed_grade);
- clk_wzrd->speed_grade = 0;
- }
- }
-
- clk_wzrd->clk_in1 = devm_clk_get(&pdev->dev, "clk_in1");
- if (IS_ERR(clk_wzrd->clk_in1)) {
- if (clk_wzrd->clk_in1 != ERR_PTR(-EPROBE_DEFER))
- dev_err(&pdev->dev, "clk_in1 not found\n");
- return PTR_ERR(clk_wzrd->clk_in1);
- }
-
- clk_wzrd->axi_clk = devm_clk_get(&pdev->dev, "s_axi_aclk");
- if (IS_ERR(clk_wzrd->axi_clk)) {
- if (clk_wzrd->axi_clk != ERR_PTR(-EPROBE_DEFER))
- dev_err(&pdev->dev, "s_axi_aclk not found\n");
- return PTR_ERR(clk_wzrd->axi_clk);
- }
- ret = clk_prepare_enable(clk_wzrd->axi_clk);
- if (ret) {
- dev_err(&pdev->dev, "enabling s_axi_aclk failed\n");
- return ret;
- }
- rate = clk_get_rate(clk_wzrd->axi_clk);
- if (rate > WZRD_ACLK_MAX_FREQ) {
- dev_err(&pdev->dev, "s_axi_aclk frequency (%lu) too high\n",
- rate);
- ret = -EINVAL;
- goto err_disable_clk;
- }
-
- /* we don't support fractional div/mul yet */
- reg = readl(clk_wzrd->base + WZRD_CLK_CFG_REG(0)) &
- WZRD_CLkFBOUT_FRAC_EN;
- reg |= readl(clk_wzrd->base + WZRD_CLK_CFG_REG(2)) &
- WZRD_CLkOUT0_FRAC_EN;
- if (reg)
- dev_warn(&pdev->dev, "fractional div/mul not supported\n");
-
- /* register multiplier */
- reg = (readl(clk_wzrd->base + WZRD_CLK_CFG_REG(0)) &
- WZRD_CLKFBOUT_MULT_MASK) >> WZRD_CLKFBOUT_MULT_SHIFT;
- clk_name = kasprintf(GFP_KERNEL, "%s_mul", dev_name(&pdev->dev));
- if (!clk_name) {
- ret = -ENOMEM;
- goto err_disable_clk;
- }
- clk_wzrd->clks_internal[wzrd_clk_mul] = clk_register_fixed_factor(
- &pdev->dev, clk_name,
- __clk_get_name(clk_wzrd->clk_in1),
- 0, reg, 1);
- kfree(clk_name);
- if (IS_ERR(clk_wzrd->clks_internal[wzrd_clk_mul])) {
- dev_err(&pdev->dev, "unable to register fixed-factor clock\n");
- ret = PTR_ERR(clk_wzrd->clks_internal[wzrd_clk_mul]);
- goto err_disable_clk;
- }
-
- /* register div */
- reg = (readl(clk_wzrd->base + WZRD_CLK_CFG_REG(0)) &
- WZRD_DIVCLK_DIVIDE_MASK) >> WZRD_DIVCLK_DIVIDE_SHIFT;
- clk_name = kasprintf(GFP_KERNEL, "%s_mul_div", dev_name(&pdev->dev));
- if (!clk_name) {
- ret = -ENOMEM;
- goto err_rm_int_clk;
- }
-
- clk_wzrd->clks_internal[wzrd_clk_mul_div] = clk_register_fixed_factor(
- &pdev->dev, clk_name,
- __clk_get_name(clk_wzrd->clks_internal[wzrd_clk_mul]),
- 0, 1, reg);
- if (IS_ERR(clk_wzrd->clks_internal[wzrd_clk_mul_div])) {
- dev_err(&pdev->dev, "unable to register divider clock\n");
- ret = PTR_ERR(clk_wzrd->clks_internal[wzrd_clk_mul_div]);
- goto err_rm_int_clk;
- }
-
- /* register div per output */
- for (i = WZRD_NUM_OUTPUTS - 1; i >= 0 ; i--) {
- const char *clkout_name;
-
- if (of_property_read_string_index(np, "clock-output-names", i,
- &clkout_name)) {
- dev_err(&pdev->dev,
- "clock output name not specified\n");
- ret = -EINVAL;
- goto err_rm_int_clks;
- }
- reg = readl(clk_wzrd->base + WZRD_CLK_CFG_REG(2) + i * 12);
- reg &= WZRD_CLKOUT_DIVIDE_MASK;
- reg >>= WZRD_CLKOUT_DIVIDE_SHIFT;
- clk_wzrd->clkout[i] = clk_register_fixed_factor(&pdev->dev,
- clkout_name, clk_name, 0, 1, reg);
- if (IS_ERR(clk_wzrd->clkout[i])) {
- int j;
-
- for (j = i + 1; j < WZRD_NUM_OUTPUTS; j++)
- clk_unregister(clk_wzrd->clkout[j]);
- dev_err(&pdev->dev,
- "unable to register divider clock\n");
- ret = PTR_ERR(clk_wzrd->clkout[i]);
- goto err_rm_int_clks;
- }
- }
-
- kfree(clk_name);
-
- clk_wzrd->clk_data.clks = clk_wzrd->clkout;
- clk_wzrd->clk_data.clk_num = ARRAY_SIZE(clk_wzrd->clkout);
- of_clk_add_provider(np, of_clk_src_onecell_get, &clk_wzrd->clk_data);
-
- if (clk_wzrd->speed_grade) {
- clk_wzrd->nb.notifier_call = clk_wzrd_clk_notifier;
-
- ret = clk_notifier_register(clk_wzrd->clk_in1,
- &clk_wzrd->nb);
- if (ret)
- dev_warn(&pdev->dev,
- "unable to register clock notifier\n");
-
- ret = clk_notifier_register(clk_wzrd->axi_clk, &clk_wzrd->nb);
- if (ret)
- dev_warn(&pdev->dev,
- "unable to register clock notifier\n");
- }
-
- return 0;
-
-err_rm_int_clks:
- clk_unregister(clk_wzrd->clks_internal[1]);
-err_rm_int_clk:
- kfree(clk_name);
- clk_unregister(clk_wzrd->clks_internal[0]);
-err_disable_clk:
- clk_disable_unprepare(clk_wzrd->axi_clk);
-
- return ret;
-}
-
-static int clk_wzrd_remove(struct platform_device *pdev)
-{
- int i;
- struct clk_wzrd *clk_wzrd = platform_get_drvdata(pdev);
-
- of_clk_del_provider(pdev->dev.of_node);
-
- for (i = 0; i < WZRD_NUM_OUTPUTS; i++)
- clk_unregister(clk_wzrd->clkout[i]);
- for (i = 0; i < wzrd_clk_int_max; i++)
- clk_unregister(clk_wzrd->clks_internal[i]);
-
- if (clk_wzrd->speed_grade) {
- clk_notifier_unregister(clk_wzrd->axi_clk, &clk_wzrd->nb);
- clk_notifier_unregister(clk_wzrd->clk_in1, &clk_wzrd->nb);
- }
-
- clk_disable_unprepare(clk_wzrd->axi_clk);
-
- return 0;
-}
-
-static const struct of_device_id clk_wzrd_ids[] = {
- { .compatible = "xlnx,clocking-wizard" },
- { },
-};
-MODULE_DEVICE_TABLE(of, clk_wzrd_ids);
-
-static struct platform_driver clk_wzrd_driver = {
- .driver = {
- .name = "clk-wizard",
- .of_match_table = clk_wzrd_ids,
- .pm = &clk_wzrd_dev_pm_ops,
- },
- .probe = clk_wzrd_probe,
- .remove = clk_wzrd_remove,
-};
-module_platform_driver(clk_wzrd_driver);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Soeren Brinkmann <soren.brinkmann@xilinx.com");
-MODULE_DESCRIPTION("Driver for the Xilinx Clocking Wizard IP core");
diff --git a/drivers/staging/clocking-wizard/dt-binding.txt b/drivers/staging/clocking-wizard/dt-binding.txt
deleted file mode 100644
index 723271e93316..000000000000
--- a/drivers/staging/clocking-wizard/dt-binding.txt
+++ /dev/null
@@ -1,30 +0,0 @@
-Binding for Xilinx Clocking Wizard IP Core
-
-This binding uses the common clock binding[1]. Details about the devices can be
-found in the product guide[2].
-
-[1] Documentation/devicetree/bindings/clock/clock-bindings.txt
-[2] Clocking Wizard Product Guide
-http://www.xilinx.com/support/documentation/ip_documentation/clk_wiz/v5_1/pg065-clk-wiz.pdf
-
-Required properties:
- - compatible: Must be 'xlnx,clocking-wizard'
- - reg: Base and size of the cores register space
- - clocks: Handle to input clock
- - clock-names: Tuple containing 'clk_in1' and 's_axi_aclk'
- - clock-output-names: Names for the output clocks
-
-Optional properties:
- - speed-grade: Speed grade of the device (valid values are 1..3)
-
-Example:
- clock-generator@40040000 {
- reg = <0x40040000 0x1000>;
- compatible = "xlnx,clocking-wizard";
- speed-grade = <1>;
- clock-names = "clk_in1", "s_axi_aclk";
- clocks = <&clkc 15>, <&clkc 15>;
- clock-output-names = "clk_out0", "clk_out1", "clk_out2",
- "clk_out3", "clk_out4", "clk_out5",
- "clk_out6", "clk_out7";
- };
diff --git a/drivers/staging/comedi/Kconfig b/drivers/staging/comedi/Kconfig
deleted file mode 100644
index a5f2a3e45f26..000000000000
--- a/drivers/staging/comedi/Kconfig
+++ /dev/null
@@ -1,1305 +0,0 @@
-config COMEDI
- tristate "Data acquisition support (comedi)"
- depends on m
- ---help---
- Enable support for a wide range of data acquisition devices
- for Linux.
-
-if COMEDI
-
-config COMEDI_DEBUG
- bool "Comedi debugging"
- ---help---
- This is an option for use by developers; most people should
- say N here. This enables comedi core and driver debugging.
-
-config COMEDI_DEFAULT_BUF_SIZE_KB
- int "Comedi default initial asynchronous buffer size in KiB"
- default "2048"
- ---help---
- This is the default asynchronous buffer size which is used for
- commands running in the background in kernel space. This
- defaults to 2048 KiB of memory so that a 16 channel card
- running at 10 kHz has of 2-4 seconds of buffer.
-
-config COMEDI_DEFAULT_BUF_MAXSIZE_KB
- int "Comedi default maximum asynchronous buffer size in KiB"
- default "20480"
- ---help---
- This is the default maximum asynchronous buffer size which can
- be requested by a userspace program without root privileges.
- This is set to 20480 KiB so that a fast I/O card with 16
- channels running at 100 kHz has 2-4 seconds of buffer.
-
-menuconfig COMEDI_MISC_DRIVERS
- bool "Comedi misc drivers"
- ---help---
- Enable comedi misc drivers to be built
-
- Note that the answer to this question won't directly affect the
- kernel: saying N will just cause the configurator to skip all
- the questions about misc non-hardware comedi drivers.
-
-if COMEDI_MISC_DRIVERS
-
-config COMEDI_BOND
- tristate "Comedi device bonding support"
- select COMEDI_KCOMEDILIB
- ---help---
- Enable support for a driver to 'bond' (merge) multiple subdevices
- from multiple devices together as one.
-
- Currently, it only handles digital I/O subdevices.
-
- To compile this driver as a module, choose M here: the module will be
- called comedi_bond.
-
-config COMEDI_TEST
- tristate "Fake waveform generator support"
- ---help---
- Enable support for the fake waveform generator.
- This driver is mainly for testing purposes, but can also be used to
- generate sample waveforms on systems that don't have data acquisition
- hardware.
-
- To compile this driver as a module, choose M here: the module will be
- called comedi_test.
-
-config COMEDI_PARPORT
- tristate "Parallel port support"
- ---help---
- Enable support for the standard parallel port.
- A cheap and easy way to get a few more digital I/O lines. Steal
- additional parallel ports from old computers or your neighbors'
- computers.
-
- To compile this driver as a module, choose M here: the module will be
- called comedi_parport.
-
-config COMEDI_SERIAL2002
- tristate "Driver for serial connected hardware"
- ---help---
- Enable support for serial connected hardware
-
- To compile this driver as a module, choose M here: the module will be
- called serial2002.
-
-config COMEDI_SSV_DNP
- tristate "SSV Embedded Systems DIL/Net-PC support"
- depends on X86_32 || COMPILE_TEST
- ---help---
- Enable support for SSV Embedded Systems DIL/Net-PC
-
- To compile this driver as a module, choose M here: the module will be
- called ssv_dnp.
-
-endif # COMEDI_MISC_DRIVERS
-
-menuconfig COMEDI_ISA_DRIVERS
- bool "Comedi ISA and PC/104 drivers"
- ---help---
- Enable comedi ISA and PC/104 drivers to be built
-
- Note that the answer to this question won't directly affect the
- kernel: saying N will just cause the configurator to skip all
- the questions about ISA and PC/104 comedi drivers.
-
-if COMEDI_ISA_DRIVERS
-
-config COMEDI_PCL711
- tristate "Advantech PCL-711/711b and ADlink ACL-8112 ISA card support"
- select COMEDI_8254
- ---help---
- Enable support for Advantech PCL-711 and 711b, ADlink ACL-8112
-
- To compile this driver as a module, choose M here: the module will be
- called pcl711.
-
-config COMEDI_PCL724
- tristate "Advantech PCL-722/724/731 and ADlink ACL-7122/7124/PET-48DIO"
- select COMEDI_8255
- ---help---
- Enable support for ISA and PC/104 based 8255 digital i/o boards. This
- driver provides a legacy comedi driver wrapper for the generic 8255
- support driver.
-
- Supported boards include:
- Advantech PCL-724 24 channels
- Advantech PCL-722 144 (or 96) channels
- Advantech PCL-731 48 channels
- ADlink ACL-7122 144 (or 96) channels
- ADlink ACL-7124 24 channels
- ADlink PET-48DIO 48 channels
- WinSystems PCM-IO48 48 channels (PC/104)
- Diamond Systems ONYX-MM-DIO 48 channels (PC/104)
-
- To compile this driver as a module, choose M here: the module will be
- called pcl724.
-
-config COMEDI_PCL726
- tristate "Advantech PCL-726 and compatible ISA card support"
- ---help---
- Enable support for Advantech PCL-726 and compatible ISA cards.
-
- To compile this driver as a module, choose M here: the module will be
- called pcl726.
-
-config COMEDI_PCL730
- tristate "Simple Digital I/O board support (8-bit ports)"
- ---help---
- Enable support for various simple ISA or PC/104 Digital I/O boards.
- These boards all use 8-bit I/O ports.
-
- Advantech PCL-730 iso - 16 in/16 out ttl - 16 in/16 out
- ICP ISO-730 iso - 16 in/16 out ttl - 16 in/16 out
- ADlink ACL-7130 iso - 16 in/16 out ttl - 16 in/16 out
- Advantech PCM-3730 iso - 8 in/8 out ttl - 16 in/16 out
- Advantech PCL-725 iso - 8 in/8 out
- ICP P8R8-DIO iso - 8 in/8 out
- ADlink ACL-7225b iso - 16 in/16 out
- ICP P16R16-DIO iso - 16 in/16 out
- Advantech PCL-733 iso - 32 in
- Advantech PCL-734 iso - 32 out
- Diamond Systems OPMM-1616-XT iso - 16 in/16 out
- Diamond Systems PEARL-MM-P iso - 16 out
- Diamond Systems IR104-PBF iso - 20 in/20 out
-
- To compile this driver as a module, choose M here: the module will be
- called pcl730.
-
-config COMEDI_PCL812
- tristate "Advantech PCL-812/813 and ADlink ACL-8112/8113/8113/8216"
- select COMEDI_ISADMA if ISA_DMA_API
- select COMEDI_8254
- ---help---
- Enable support for Advantech PCL-812/PG, PCL-813/B, ADLink
- ACL-8112DG/HG/PG, ACL-8113, ACL-8216, ICP DAS A-821PGH/PGL/PGL-NDA,
- A-822PGH/PGL, A-823PGH/PGL, A-826PG and ICP DAS ISO-813 ISA cards
-
- To compile this driver as a module, choose M here: the module will be
- called pcl812.
-
-config COMEDI_PCL816
- tristate "Advantech PCL-814 and PCL-816 ISA card support"
- select COMEDI_ISADMA if ISA_DMA_API
- select COMEDI_8254
- ---help---
- Enable support for Advantech PCL-814 and PCL-816 ISA cards
-
- To compile this driver as a module, choose M here: the module will be
- called pcl816.
-
-config COMEDI_PCL818
- tristate "Advantech PCL-718 and PCL-818 ISA card support"
- select COMEDI_ISADMA if ISA_DMA_API
- select COMEDI_8254
- ---help---
- Enable support for Advantech PCL-818 ISA cards
- PCL-818L, PCL-818H, PCL-818HD, PCL-818HG, PCL-818 and PCL-718
-
- To compile this driver as a module, choose M here: the module will be
- called pcl818.
-
-config COMEDI_PCM3724
- tristate "Advantech PCM-3724 PC/104 card support"
- select COMEDI_8255
- ---help---
- Enable support for Advantech PCM-3724 PC/104 cards.
-
- To compile this driver as a module, choose M here: the module will be
- called pcm3724.
-
-config COMEDI_AMPLC_DIO200_ISA
- tristate "Amplicon PC212E/PC214E/PC215E/PC218E/PC272E"
- select COMEDI_AMPLC_DIO200
- ---help---
- Enable support for Amplicon PC212E, PC214E, PC215E, PC218E and
- PC272E ISA DIO boards
-
- To compile this driver as a module, choose M here: the module will be
- called amplc_dio200.
-
-config COMEDI_AMPLC_PC236_ISA
- tristate "Amplicon PC36AT DIO board support"
- select COMEDI_AMPLC_PC236
- ---help---
- Enable support for Amplicon PC36AT ISA DIO board.
-
- To compile this driver as a module, choose M here: the module will be
- called amplc_pc236.
-
-config COMEDI_AMPLC_PC263_ISA
- tristate "Amplicon PC263 relay board support"
- ---help---
- Enable support for Amplicon PC263 ISA relay board. This board has
- 16 reed relay output channels.
-
- To compile this driver as a module, choose M here: the module will be
- called amplc_pc263.
-
-config COMEDI_RTI800
- tristate "Analog Devices RTI-800/815 ISA card support"
- ---help---
- Enable support for Analog Devices RTI-800/815 ISA cards
-
- To compile this driver as a module, choose M here: the module will be
- called rti800.
-
-config COMEDI_RTI802
- tristate "Analog Devices RTI-802 ISA card support"
- ---help---
- Enable support for Analog Devices RTI-802 ISA cards
-
- To compile this driver as a module, choose M here: the module will be
- called rti802.
-
-config COMEDI_DAC02
- tristate "Keithley Metrabyte DAC02 compatible ISA card support"
- ---help---
- Enable support for Keithley Metrabyte DAC02 compatible ISA cards.
-
- To compile this driver as a module, choose M here: the module will be
- called dac02.
-
-config COMEDI_DAS16M1
- tristate "MeasurementComputing CIO-DAS16/M1DAS-16 ISA card support"
- select COMEDI_8254
- select COMEDI_8255
- ---help---
- Enable support for Measurement Computing CIO-DAS16/M1 ISA cards.
-
- To compile this driver as a module, choose M here: the module will be
- called das16m1.
-
-config COMEDI_DAS08_ISA
- tristate "DAS-08 compatible ISA and PC/104 card support"
- select COMEDI_DAS08
- ---help---
- Enable support for Keithley Metrabyte/ComputerBoards DAS08
- and compatible ISA and PC/104 cards:
- Keithley Metrabyte/ComputerBoards DAS08, DAS08-PGM, DAS08-PGH,
- DAS08-PGL, DAS08-AOH, DAS08-AOL, DAS08-AOM, DAS08/JR-AO,
- DAS08/JR-16-AO, PC104-DAS08, DAS08/JR/16.
-
- To compile this driver as a module, choose M here: the module will be
- called das08_isa.
-
-config COMEDI_DAS16
- tristate "DAS-16 compatible ISA and PC/104 card support"
- select COMEDI_ISADMA if ISA_DMA_API
- select COMEDI_8254
- select COMEDI_8255
- ---help---
- Enable support for Keithley Metrabyte/ComputerBoards DAS16
- and compatible ISA and PC/104 cards:
- Keithley Metrabyte DAS-16, DAS-16G, DAS-16F, DAS-1201, DAS-1202,
- DAS-1401, DAS-1402, DAS-1601, DAS-1602 and
- ComputerBoards/MeasurementComputing PC104-DAS16/JR/,
- PC104-DAS16JR/16, CIO-DAS16JR/16, CIO-DAS16/JR, CIO-DAS1401/12,
- CIO-DAS1402/12, CIO-DAS1402/16, CIO-DAS1601/12, CIO-DAS1602/12,
- CIO-DAS1602/16, CIO-DAS16/330
-
- To compile this driver as a module, choose M here: the module will be
- called das16.
-
-config COMEDI_DAS800
- tristate "DAS800 and compatible ISA card support"
- select COMEDI_8254
- ---help---
- Enable support for Keithley Metrabyte DAS800 and compatible ISA cards
- Keithley Metrabyte DAS-800, DAS-801, DAS-802
- Measurement Computing CIO-DAS800, CIO-DAS801, CIO-DAS802 and
- CIO-DAS802/16
-
- To compile this driver as a module, choose M here: the module will be
- called das800.
-
-config COMEDI_DAS1800
- tristate "DAS1800 and compatible ISA card support"
- select COMEDI_ISADMA if ISA_DMA_API
- select COMEDI_8254
- ---help---
- Enable support for DAS1800 and compatible ISA cards
- Keithley Metrabyte DAS-1701ST, DAS-1701ST-DA, DAS-1701/AO,
- DAS-1702ST, DAS-1702ST-DA, DAS-1702HR, DAS-1702HR-DA, DAS-1702/AO,
- DAS-1801ST, DAS-1801ST-DA, DAS-1801HC, DAS-1801AO, DAS-1802ST,
- DAS-1802ST-DA, DAS-1802HR, DAS-1802HR-DA, DAS-1802HC and
- DAS-1802AO
-
- To compile this driver as a module, choose M here: the module will be
- called das1800.
-
-config COMEDI_DAS6402
- tristate "DAS6402 and compatible ISA card support"
- select COMEDI_8254
- ---help---
- Enable support for DAS6402 and compatible ISA cards
- Computerboards, Keithley Metrabyte DAS6402 and compatibles
-
- To compile this driver as a module, choose M here: the module will be
- called das6402.
-
-config COMEDI_DT2801
- tristate "Data Translation DT2801 ISA card support"
- ---help---
- Enable support for Data Translation DT2801 ISA cards
-
- To compile this driver as a module, choose M here: the module will be
- called dt2801.
-
-config COMEDI_DT2811
- tristate "Data Translation DT2811 ISA card support"
- ---help---
- Enable support for Data Translation DT2811 ISA cards
-
- To compile this driver as a module, choose M here: the module will be
- called dt2811.
-
-config COMEDI_DT2814
- tristate "Data Translation DT2814 ISA card support"
- ---help---
- Enable support for Data Translation DT2814 ISA cards
-
- To compile this driver as a module, choose M here: the module will be
- called dt2814.
-
-config COMEDI_DT2815
- tristate "Data Translation DT2815 ISA card support"
- ---help---
- Enable support for Data Translation DT2815 ISA cards
-
- To compile this driver as a module, choose M here: the module will be
- called dt2815.
-
-config COMEDI_DT2817
- tristate "Data Translation DT2817 ISA card support"
- ---help---
- Enable support for Data Translation DT2817 ISA cards
-
- To compile this driver as a module, choose M here: the module will be
- called dt2817.
-
-config COMEDI_DT282X
- tristate "Data Translation DT2821 series and DT-EZ ISA card support"
- select COMEDI_ISADMA if ISA_DMA_API
- ---help---
- Enable support for Data Translation DT2821 series including DT-EZ
- DT2821, DT2821-F-16SE, DT2821-F-8DI, DT2821-G-16SE, DT2821-G-8DI,
- DT2823 (dt2823), DT2824-PGH, DT2824-PGL, DT2825, DT2827, DT2828,
- DT21-EZ, DT23-EZ, DT24-EZ and DT24-EZ-PGL
-
- To compile this driver as a module, choose M here: the module will be
- called dt282x.
-
-config COMEDI_DMM32AT
- tristate "Diamond Systems MM-32-AT PC/104 board support"
- select COMEDI_8255
- ---help---
- Enable support for Diamond Systems MM-32-AT PC/104 boards
-
- To compile this driver as a module, choose M here: the module will be
- called dmm32at.
-
-config COMEDI_FL512
- tristate "FL512 ISA card support"
- ---help---
- Enable support for FL512 ISA card
-
- To compile this driver as a module, choose M here: the module will be
- called fl512.
-
-config COMEDI_AIO_AIO12_8
- tristate "I/O Products PC/104 AIO12-8 Analog I/O Board support"
- select COMEDI_8255
- ---help---
- Enable support for I/O Products PC/104 AIO12-8 Analog I/O Board
-
- To compile this driver as a module, choose M here: the module will be
- called aio_aio12_8.
-
-config COMEDI_AIO_IIRO_16
- tristate "I/O Products PC/104 IIRO16 Board support"
- ---help---
- Enable support for I/O Products PC/104 IIRO16 Relay And Isolated
- Input Board
-
- To compile this driver as a module, choose M here: the module will be
- called aio_iiro_16.
-
-config COMEDI_II_PCI20KC
- tristate "Intelligent Instruments PCI-20001C carrier support"
- depends on HAS_IOMEM
- ---help---
- Enable support for Intelligent Instruments PCI-20001C carrier
- PCI-20001, PCI-20006 and PCI-20341
-
- To compile this driver as a module, choose M here: the module will be
- called ii_pci20kc.
-
-config COMEDI_C6XDIGIO
- tristate "Mechatronic Systems Inc. C6x_DIGIO DSP daughter card support"
- ---help---
- Enable support for Mechatronic Systems Inc. C6x_DIGIO DSP daughter
- card
-
- To compile this driver as a module, choose M here: the module will be
- called c6xdigio.
-
-config COMEDI_MPC624
- tristate "Micro/sys MPC-624 PC/104 board support"
- ---help---
- Enable support for Micro/sys MPC-624 PC/104 board
-
- To compile this driver as a module, choose M here: the module will be
- called mpc624.
-
-config COMEDI_ADQ12B
- tristate "MicroAxial ADQ12-B data acquisition and control card support"
- ---help---
- Enable MicroAxial ADQ12-B daq and control card support.
-
- To compile this driver as a module, choose M here: the module will be
- called adq12b.
-
-config COMEDI_NI_AT_A2150
- tristate "NI AT-A2150 ISA card support"
- select COMEDI_ISADMA if ISA_DMA_API
- select COMEDI_8254
- ---help---
- Enable support for National Instruments AT-A2150 cards
-
- To compile this driver as a module, choose M here: the module will be
- called ni_at_a2150.
-
-config COMEDI_NI_AT_AO
- tristate "NI AT-AO-6/10 EISA card support"
- select COMEDI_8254
- ---help---
- Enable support for National Instruments AT-AO-6/10 cards
-
- To compile this driver as a module, choose M here: the module will be
- called ni_at_ao.
-
-config COMEDI_NI_ATMIO
- tristate "NI AT-MIO E series ISA-PNP card support"
- select COMEDI_8255
- select COMEDI_NI_TIO
- ---help---
- Enable support for National Instruments AT-MIO E series cards
- National Instruments AT-MIO-16E-1 (ni_atmio),
- AT-MIO-16E-2, AT-MIO-16E-10, AT-MIO-16DE-10, AT-MIO-64E-3,
- AT-MIO-16XE-50, AT-MIO-16XE-10, AT-AI-16XE-10
-
- To compile this driver as a module, choose M here: the module will be
- called ni_atmio.
-
-config COMEDI_NI_ATMIO16D
- tristate "NI AT-MIO-16/AT-MIO-16D series ISA card support"
- select COMEDI_8255
- ---help---
- Enable support for National Instruments AT-MIO-16/AT-MIO-16D cards.
-
- To compile this driver as a module, choose M here: the module will be
- called ni_atmio16d.
-
-config COMEDI_NI_LABPC_ISA
- tristate "NI Lab-PC and compatibles ISA support"
- select COMEDI_NI_LABPC
- select COMEDI_NI_LABPC_ISADMA if ISA_DMA_API
- ---help---
- Enable support for National Instruments Lab-PC and compatibles
- Lab-PC-1200, Lab-PC-1200AI, Lab-PC+.
- Kernel-level ISA plug-and-play support for the lab-pc-1200 boards has
- not yet been added to the driver.
-
- To compile this driver as a module, choose M here: the module will be
- called ni_labpc.
-
-config COMEDI_PCMAD
- tristate "Winsystems PCM-A/D12 and PCM-A/D16 PC/104 board support"
- ---help---
- Enable support for Winsystems PCM-A/D12 and PCM-A/D16 PC/104 boards.
-
- To compile this driver as a module, choose M here: the module will be
- called pcmad.
-
-config COMEDI_PCMDA12
- tristate "Winsystems PCM-D/A-12 8-channel AO PC/104 board support"
- ---help---
- Enable support for Winsystems PCM-D/A-12 8-channel AO PC/104 boards.
- Note that the board is not ISA-PNP capable and thus needs the I/O
- port comedi_config parameter.
-
- To compile this driver as a module, choose M here: the module will be
- called pcmda12.
-
-config COMEDI_PCMMIO
- tristate "Winsystems PCM-MIO PC/104 board support"
- ---help---
- Enable support for Winsystems PCM-MIO multifunction PC/104 boards.
-
- To compile this driver as a module, choose M here: the module will be
- called pcmmio.
-
-config COMEDI_PCMUIO
- tristate "Winsystems PCM-UIO48A and PCM-UIO96A PC/104 board support"
- ---help---
- Enable support for PCM-UIO48A and PCM-UIO96A PC/104 boards.
-
- To compile this driver as a module, choose M here: the module will be
- called pcmuio.
-
-config COMEDI_MULTIQ3
- tristate "Quanser Consulting MultiQ-3 ISA card support"
- ---help---
- Enable support for Quanser Consulting MultiQ-3 ISA cards
-
- To compile this driver as a module, choose M here: the module will be
- called multiq3.
-
-config COMEDI_S526
- tristate "Sensoray s526 support"
- ---help---
- Enable support for Sensoray s526
-
- To compile this driver as a module, choose M here: the module will be
- called s526.
-
-endif # COMEDI_ISA_DRIVERS
-
-menuconfig COMEDI_PCI_DRIVERS
- tristate "Comedi PCI drivers"
- depends on PCI
- ---help---
- Enable support for comedi PCI drivers.
-
- To compile this support as a module, choose M here: the module will
- be called comedi_pci.
-
-if COMEDI_PCI_DRIVERS
-
-config COMEDI_8255_PCI
- tristate "Generic PCI based 8255 digital i/o board support"
- select COMEDI_8255
- ---help---
- Enable support for PCI based 8255 digital i/o boards. This driver
- provides a PCI wrapper around the generic 8255 driver.
-
- Supported boards:
- ADlink - PCI-7224, PCI-7248, and PCI-7296
- Measurement Computing - PCI-DIO24, PCI-DIO24H, PCI-DIO48H and
- PCI-DIO96H
- National Instruments - PCI-DIO-96, PCI-DIO-96B, PXI-6508, PCI-6503,
- PCI-6503B, PCI-6503X, and PXI-6503
-
- To compile this driver as a module, choose M here: the module will
- be called 8255_pci.
-
-config COMEDI_ADDI_WATCHDOG
- tristate
- ---help---
- Provides support for the watchdog subdevice found on many ADDI-DATA
- boards. This module will be automatically selected when needed. The
- module will be called addi_watchdog.
-
-config COMEDI_ADDI_APCI_1032
- tristate "ADDI-DATA APCI_1032 support"
- ---help---
- Enable support for ADDI-DATA APCI_1032 cards
-
- To compile this driver as a module, choose M here: the module will be
- called addi_apci_1032.
-
-config COMEDI_ADDI_APCI_1500
- tristate "ADDI-DATA APCI_1500 support"
- ---help---
- Enable support for ADDI-DATA APCI_1500 cards
-
- To compile this driver as a module, choose M here: the module will be
- called addi_apci_1500.
-
-config COMEDI_ADDI_APCI_1516
- tristate "ADDI-DATA APCI-1016/1516/2016 support"
- select COMEDI_ADDI_WATCHDOG
- ---help---
- Enable support for ADDI-DATA APCI-1016, APCI-1516 and APCI-2016 boards.
- These are 16 channel, optically isolated, digital I/O boards. The 1516
- and 2016 boards also have a watchdog for resetting the outputs to "0".
-
- To compile this driver as a module, choose M here: the module will be
- called addi_apci_1516.
-
-config COMEDI_ADDI_APCI_1564
- tristate "ADDI-DATA APCI_1564 support"
- select COMEDI_ADDI_WATCHDOG
- ---help---
- Enable support for ADDI-DATA APCI_1564 cards
-
- To compile this driver as a module, choose M here: the module will be
- called addi_apci_1564.
-
-config COMEDI_ADDI_APCI_16XX
- tristate "ADDI-DATA APCI_16xx support"
- ---help---
- Enable support for ADDI-DATA APCI_16xx cards
-
- To compile this driver as a module, choose M here: the module will be
- called addi_apci_16xx.
-
-config COMEDI_ADDI_APCI_2032
- tristate "ADDI-DATA APCI_2032 support"
- select COMEDI_ADDI_WATCHDOG
- ---help---
- Enable support for ADDI-DATA APCI_2032 cards
-
- To compile this driver as a module, choose M here: the module will be
- called addi_apci_2032.
-
-config COMEDI_ADDI_APCI_2200
- tristate "ADDI-DATA APCI_2200 support"
- select COMEDI_ADDI_WATCHDOG
- ---help---
- Enable support for ADDI-DATA APCI_2200 cards
-
- To compile this driver as a module, choose M here: the module will be
- called addi_apci_2200.
-
-config COMEDI_ADDI_APCI_3120
- tristate "ADDI-DATA APCI_3120/3001 support"
- depends on HAS_DMA
- ---help---
- Enable support for ADDI-DATA APCI_3120/3001 cards
-
- To compile this driver as a module, choose M here: the module will be
- called addi_apci_3120.
-
-config COMEDI_ADDI_APCI_3501
- tristate "ADDI-DATA APCI_3501 support"
- ---help---
- Enable support for ADDI-DATA APCI_3501 cards
-
- To compile this driver as a module, choose M here: the module will be
- called addi_apci_3501.
-
-config COMEDI_ADDI_APCI_3XXX
- tristate "ADDI-DATA APCI_3xxx support"
- ---help---
- Enable support for ADDI-DATA APCI_3xxx cards
-
- To compile this driver as a module, choose M here: the module will be
- called addi_apci_3xxx.
-
-config COMEDI_ADL_PCI6208
- tristate "ADLink PCI-6208A support"
- ---help---
- Enable support for ADLink PCI-6208A cards
-
- To compile this driver as a module, choose M here: the module will be
- called adl_pci6208.
-
-config COMEDI_ADL_PCI7X3X
- tristate "ADLink PCI-723X/743X isolated digital i/o board support"
- ---help---
- Enable support for ADlink PCI-723X/743X isolated digital i/o boards.
- Supported boards include the 32-channel PCI-7230 (16 in/16 out),
- PCI-7233 (32 in), and PCI-7234 (32 out) as well as the 64-channel
- PCI-7432 (32 in/32 out), PCI-7433 (64 in), and PCI-7434 (64 out).
-
- To compile this driver as a module, choose M here: the module will be
- called adl_pci7x3x.
-
-config COMEDI_ADL_PCI8164
- tristate "ADLink PCI-8164 4 Axes Motion Control board support"
- ---help---
- Enable support for ADlink PCI-8164 4 Axes Motion Control board
-
- To compile this driver as a module, choose M here: the module will be
- called adl_pci8164.
-
-config COMEDI_ADL_PCI9111
- tristate "ADLink PCI-9111HR support"
- select COMEDI_8254
- ---help---
- Enable support for ADlink PCI9111 cards
-
- To compile this driver as a module, choose M here: the module will be
- called adl_pci9111.
-
-config COMEDI_ADL_PCI9118
- tristate "ADLink PCI-9118DG, PCI-9118HG, PCI-9118HR support"
- depends on HAS_DMA
- select COMEDI_8254
- ---help---
- Enable support for ADlink PCI-9118DG, PCI-9118HG, PCI-9118HR cards
-
- To compile this driver as a module, choose M here: the module will be
- called adl_pci9118.
-
-config COMEDI_ADV_PCI1710
- tristate "Advantech PCI-171x, PCI-1720 and PCI-1731 support"
- select COMEDI_8254
- ---help---
- Enable support for Advantech PCI-1710, PCI-1710HG, PCI-1711,
- PCI-1713, PCI-1720 and PCI-1731
-
- To compile this driver as a module, choose M here: the module will be
- called adv_pci1710.
-
-config COMEDI_ADV_PCI1723
- tristate "Advantech PCI-1723 support"
- ---help---
- Enable support for Advantech PCI-1723 cards
-
- To compile this driver as a module, choose M here: the module will be
- called adv_pci1723.
-
-config COMEDI_ADV_PCI1724
- tristate "Advantech PCI-1724U support"
- ---help---
- Enable support for Advantech PCI-1724U cards. These are 32-channel
- analog output cards with voltage and current loop output ranges and
- 14-bit resolution.
-
- To compile this driver as a module, choose M here: the module will be
- called adv_pci1724.
-
-config COMEDI_ADV_PCI_DIO
- tristate "Advantech PCI DIO card support"
- select COMEDI_8254
- select COMEDI_8255
- ---help---
- Enable support for Advantech PCI DIO cards
- PCI-1730, PCI-1733, PCI-1734, PCI-1735U, PCI-1736UP, PCI-1739U,
- PCI-1750, PCI-1751, PCI-1752, PCI-1753/E, PCI-1754, PCI-1756,
- PCI-1760 and PCI-1762
-
- To compile this driver as a module, choose M here: the module will be
- called adv_pci_dio.
-
-config COMEDI_AMPLC_DIO200_PCI
- tristate "Amplicon PCI215/PCI272/PCIe215/PCIe236/PCIe296 DIO support"
- select COMEDI_AMPLC_DIO200
- ---help---
- Enable support for Amplicon PCI215, PCI272, PCIe215, PCIe236
- and PCIe296 DIO boards.
-
- To compile this driver as a module, choose M here: the module will be
- called amplc_dio200_pci.
-
-config COMEDI_AMPLC_PC236_PCI
- tristate "Amplicon PCI236 DIO board support"
- select COMEDI_AMPLC_PC236
- ---help---
- Enable support for Amplicon PCI236 DIO board.
-
- To compile this driver as a module, choose M here: the module will be
- called amplc_pci236.
-
-config COMEDI_AMPLC_PC263_PCI
- tristate "Amplicon PCI263 relay board support"
- ---help---
- Enable support for Amplicon PCI263 relay board. This is a PCI board
- with 16 reed relay output channels.
-
- To compile this driver as a module, choose M here: the module will be
- called amplc_pci263.
-
-config COMEDI_AMPLC_PCI224
- tristate "Amplicon PCI224 and PCI234 support"
- select COMEDI_8254
- ---help---
- Enable support for Amplicon PCI224 and PCI234 AO boards
-
- To compile this driver as a module, choose M here: the module will be
- called amplc_pci224.
-
-config COMEDI_AMPLC_PCI230
- tristate "Amplicon PCI230 and PCI260 support"
- select COMEDI_8254
- select COMEDI_8255
- ---help---
- Enable support for Amplicon PCI230 and PCI260 Multifunction I/O
- boards
-
- To compile this driver as a module, choose M here: the module will be
- called amplc_pci230.
-
-config COMEDI_CONTEC_PCI_DIO
- tristate "Contec PIO1616L digital I/O board support"
- ---help---
- Enable support for the Contec PIO1616L digital I/O board
-
- To compile this driver as a module, choose M here: the module will be
- called contec_pci_dio.
-
-config COMEDI_DAS08_PCI
- tristate "DAS-08 PCI support"
- select COMEDI_DAS08
- ---help---
- Enable support for PCI DAS-08 cards.
-
- To compile this driver as a module, choose M here: the module will be
- called das08_pci.
-
-config COMEDI_DT3000
- tristate "Data Translation DT3000 series support"
- ---help---
- Enable support for Data Translation DT3000 series
- DT3001, DT3001-PGL, DT3002, DT3003, DT3003-PGL, DT3004, DT3005 and
- DT3004-200
-
- To compile this driver as a module, choose M here: the module will be
- called dt3000.
-
-config COMEDI_DYNA_PCI10XX
- tristate "Dynalog PCI DAQ series support"
- ---help---
- Enable support for Dynalog PCI DAQ series
- PCI-1050
-
- To compile this driver as a module, choose M here: the module will be
- called dyna_pci10xx.
-
-config COMEDI_GSC_HPDI
- tristate "General Standards PCI-HPDI32 / PMC-HPDI32 support"
- ---help---
- Enable support for General Standards Corporation high speed parallel
- digital interface rs485 boards PCI-HPDI32 and PMC-HPDI32.
- Only receive mode works, transmit not supported.
-
- To compile this driver as a module, choose M here: the module will be
- called gsc_hpdi.
-
-config COMEDI_MF6X4
- tristate "Humusoft MF634 and MF624 DAQ Card support"
- ---help---
- This driver supports both Humusoft MF634 and MF624 Data acquisition
- cards. The legacy Humusoft MF614 card is not supported.
-
-config COMEDI_ICP_MULTI
- tristate "Inova ICP_MULTI support"
- ---help---
- Enable support for Inova ICP_MULTI card
-
- To compile this driver as a module, choose M here: the module will be
- called icp_multi.
-
-config COMEDI_DAQBOARD2000
- tristate "IOtech DAQboard/2000 support"
- select COMEDI_8255
- ---help---
- Enable support for the IOtech DAQboard/2000
-
- To compile this driver as a module, choose M here: the module will be
- called daqboard2000.
-
-config COMEDI_JR3_PCI
- tristate "JR3/PCI force sensor board support"
- ---help---
- Enable support for JR3/PCI force sensor boards
-
- To compile this driver as a module, choose M here: the module will be
- called jr3_pci.
-
-config COMEDI_KE_COUNTER
- tristate "Kolter-Electronic PCI Counter 1 card support"
- ---help---
- Enable support for Kolter-Electronic PCI Counter 1 cards
-
- To compile this driver as a module, choose M here: the module will be
- called ke_counter.
-
-config COMEDI_CB_PCIDAS64
- tristate "MeasurementComputing PCI-DAS 64xx, 60xx, and 4020 support"
- select COMEDI_8255
- ---help---
- Enable support for ComputerBoards/MeasurementComputing PCI-DAS 64xx,
- 60xx, and 4020 series with the PLX 9080 PCI controller
-
- To compile this driver as a module, choose M here: the module will be
- called cb_pcidas64.
-
-config COMEDI_CB_PCIDAS
- tristate "MeasurementComputing PCI-DAS support"
- select COMEDI_8254
- select COMEDI_8255
- ---help---
- Enable support for ComputerBoards/MeasurementComputing PCI-DAS with
- AMCC S5933 PCIcontroller: PCI-DAS1602/16, PCI-DAS1602/16jr,
- PCI-DAS1602/12, PCI-DAS1200, PCI-DAS1200jr, PCI-DAS1000, PCI-DAS1001
- and PCI_DAS1002.
-
- To compile this driver as a module, choose M here: the module will be
- called cb_pcidas.
-
-config COMEDI_CB_PCIDDA
- tristate "MeasurementComputing PCI-DDA series support"
- select COMEDI_8255
- ---help---
- Enable support for ComputerBoards/MeasurementComputing PCI-DDA
- series: PCI-DDA08/12, PCI-DDA04/12, PCI-DDA02/12, PCI-DDA08/16,
- PCI-DDA04/16 and PCI-DDA02/16
-
- To compile this driver as a module, choose M here: the module will be
- called cb_pcidda.
-
-config COMEDI_CB_PCIMDAS
- tristate "MeasurementComputing PCIM-DAS1602/16, PCIe-DAS1602/16 support"
- select COMEDI_8254
- select COMEDI_8255
- ---help---
- Enable support for ComputerBoards/MeasurementComputing PCI Migration
- series PCIM-DAS1602/16 and PCIe-DAS1602/16.
-
- To compile this driver as a module, choose M here: the module will be
- called cb_pcimdas.
-
-config COMEDI_CB_PCIMDDA
- tristate "MeasurementComputing PCIM-DDA06-16 support"
- select COMEDI_8255
- ---help---
- Enable support for ComputerBoards/MeasurementComputing PCIM-DDA06-16
-
- To compile this driver as a module, choose M here: the module will be
- called cb_pcimdda.
-
-config COMEDI_ME4000
- tristate "Meilhaus ME-4000 support"
- select COMEDI_8254
- ---help---
- Enable support for Meilhaus PCI data acquisition cards
- ME-4650, ME-4670i, ME-4680, ME-4680i and ME-4680is
-
- To compile this driver as a module, choose M here: the module will be
- called me4000.
-
-config COMEDI_ME_DAQ
- tristate "Meilhaus ME-2000i, ME-2600i, ME-3000vm1 support"
- ---help---
- Enable support for Meilhaus PCI data acquisition cards
- ME-2000i, ME-2600i and ME-3000vm1
-
- To compile this driver as a module, choose M here: the module will be
- called me_daq.
-
-config COMEDI_NI_6527
- tristate "NI 6527 support"
- ---help---
- Enable support for the National Instruments 6527 PCI card
-
- To compile this driver as a module, choose M here: the module will be
- called ni_6527.
-
-config COMEDI_NI_65XX
- tristate "NI 65xx static dio PCI card support"
- ---help---
- Enable support for National Instruments 65xx static dio boards.
- Supported devices: National Instruments PCI-6509 (ni_65xx),
- PXI-6509, PCI-6510, PCI-6511, PXI-6511, PCI-6512, PXI-6512, PCI-6513,
- PXI-6513, PCI-6514, PXI-6514, PCI-6515, PXI-6515, PCI-6516, PCI-6517,
- PCI-6518, PCI-6519, PCI-6520, PCI-6521, PXI-6521, PCI-6528, PXI-6528
-
- To compile this driver as a module, choose M here: the module will be
- called ni_65xx.
-
-config COMEDI_NI_660X
- tristate "NI 660x counter/timer PCI card support"
- depends on HAS_DMA
- select COMEDI_NI_TIOCMD
- ---help---
- Enable support for National Instruments PCI-6601 (ni_660x), PCI-6602,
- PXI-6602, PXI-6608 and PXI-6624.
-
- To compile this driver as a module, choose M here: the module will be
- called ni_660x.
-
-config COMEDI_NI_670X
- tristate "NI 670x PCI card support"
- ---help---
- Enable support for National Instruments PCI-6703 and PCI-6704
-
- To compile this driver as a module, choose M here: the module will be
- called ni_670x.
-
-config COMEDI_NI_LABPC_PCI
- tristate "NI Lab-PC PCI-1200 support"
- select COMEDI_NI_LABPC
- ---help---
- Enable support for National Instruments Lab-PC PCI-1200.
-
- To compile this driver as a module, choose M here: the module will be
- called ni_labpc_pci.
-
-config COMEDI_NI_PCIDIO
- tristate "NI PCI-DIO32HS, PCI-6533, PCI-6534 support"
- depends on HAS_DMA
- select COMEDI_MITE
- select COMEDI_8255
- ---help---
- Enable support for National Instruments PCI-DIO-32HS, PXI-6533,
- PCI-6533 and PCI-6534
-
- To compile this driver as a module, choose M here: the module will be
- called ni_pcidio.
-
-config COMEDI_NI_PCIMIO
- tristate "NI PCI-MIO-E series and M series support"
- depends on HAS_DMA
- select COMEDI_NI_TIOCMD
- select COMEDI_8255
- ---help---
- Enable support for National Instruments PCI-MIO-E series and M series
- (all boards): PCI-MIO-16XE-10, PXI-6030E, PCI-MIO-16E-1,
- PCI-MIO-16E-4, PCI-6014, PCI-6040E, PXI-6040E, PCI-6030E, PCI-6031E,
- PCI-6032E, PCI-6033E, PCI-6071E, PCI-6023E, PCI-6024E, PCI-6025E,
- PXI-6025E, PCI-6034E, PCI-6035E, PCI-6052E, PCI-6110, PCI-6111,
- PCI-6220, PCI-6221, PCI-6224, PXI-6224, PCI-6225, PXI-6225, PCI-6229,
- PCI-6250, PCI-6251, PCIe-6251, PCI-6254, PCI-6259, PCIe-6259,
- PCI-6280, PCI-6281, PXI-6281, PCI-6284, PCI-6289, PCI-6711, PXI-6711,
- PCI-6713, PXI-6713, PXI-6071E, PCI-6070E, PXI-6070E, PXI-6052E,
- PCI-6036E, PCI-6731, PCI-6733, PXI-6733, PCI-6143, PXI-6143
-
- To compile this driver as a module, choose M here: the module will be
- called ni_pcimio.
-
-config COMEDI_RTD520
- tristate "Real Time Devices PCI4520/DM7520 support"
- select COMEDI_8254
- ---help---
- Enable support for Real Time Devices PCI4520/DM7520
-
- To compile this driver as a module, choose M here: the module will be
- called rtd520.
-
-config COMEDI_S626
- tristate "Sensoray 626 support"
- ---help---
- Enable support for Sensoray 626
-
- To compile this driver as a module, choose M here: the module will be
- called s626.
-
-config COMEDI_MITE
- depends on HAS_DMA
- tristate
-
-config COMEDI_NI_TIOCMD
- tristate
- depends on HAS_DMA
- select COMEDI_NI_TIO
- select COMEDI_MITE
-
-endif # COMEDI_PCI_DRIVERS
-
-menuconfig COMEDI_PCMCIA_DRIVERS
- tristate "Comedi PCMCIA drivers"
- depends on PCMCIA
- ---help---
- Enable support for comedi PCMCIA drivers.
-
- To compile this support as a module, choose M here: the module will
- be called comedi_pcmcia.
-
-if COMEDI_PCMCIA_DRIVERS
-
-config COMEDI_CB_DAS16_CS
- tristate "CB DAS16 series PCMCIA support"
- select COMEDI_8254
- ---help---
- Enable support for the ComputerBoards/MeasurementComputing PCMCIA
- cards DAS16/16, PCM-DAS16D/12 and PCM-DAS16s/16
-
- To compile this driver as a module, choose M here: the module will be
- called cb_das16_cs.
-
-config COMEDI_DAS08_CS
- tristate "CB DAS08 PCMCIA support"
- select COMEDI_DAS08
- ---help---
- Enable support for the ComputerBoards/MeasurementComputing DAS-08
- PCMCIA card
-
- To compile this driver as a module, choose M here: the module will be
- called das08_cs.
-
-config COMEDI_NI_DAQ_700_CS
- tristate "NI DAQCard-700 PCMCIA support"
- ---help---
- Enable support for the National Instruments PCMCIA DAQCard-700 DIO
-
- To compile this driver as a module, choose M here: the module will be
- called ni_daq_700.
-
-config COMEDI_NI_DAQ_DIO24_CS
- tristate "NI DAQ-Card DIO-24 PCMCIA support"
- select COMEDI_8255
- ---help---
- Enable support for the National Instruments PCMCIA DAQ-Card DIO-24
-
- To compile this driver as a module, choose M here: the module will be
- called ni_daq_dio24.
-
-config COMEDI_NI_LABPC_CS
- tristate "NI DAQCard-1200 PCMCIA support"
- select COMEDI_NI_LABPC
- ---help---
- Enable support for the National Instruments PCMCIA DAQCard-1200
-
- To compile this driver as a module, choose M here: the module will be
- called ni_labpc_cs.
-
-config COMEDI_NI_MIO_CS
- tristate "NI DAQCard E series PCMCIA support"
- select COMEDI_NI_TIO
- select COMEDI_8255
- ---help---
- Enable support for the National Instruments PCMCIA DAQCard E series
- DAQCard-ai-16xe-50, DAQCard-ai-16e-4, DAQCard-6062E, DAQCard-6024E
- and DAQCard-6036E
-
- To compile this driver as a module, choose M here: the module will be
- called ni_mio_cs.
-
-config COMEDI_QUATECH_DAQP_CS
- tristate "Quatech DAQP PCMCIA data capture card support"
- ---help---
- Enable support for the Quatech DAQP PCMCIA data capture cards
- DAQP-208 and DAQP-308
-
- To compile this driver as a module, choose M here: the module will be
- called quatech_daqp_cs.
-
-endif # COMEDI_PCMCIA_DRIVERS
-
-menuconfig COMEDI_USB_DRIVERS
- tristate "Comedi USB drivers"
- depends on USB
- ---help---
- Enable support for comedi USB drivers.
-
- To compile this support as a module, choose M here: the module will
- be called comedi_usb.
-
-if COMEDI_USB_DRIVERS
-
-config COMEDI_DT9812
- tristate "DataTranslation DT9812 USB module support"
- ---help---
- Enable support for the Data Translation DT9812 USB module
-
- To compile this driver as a module, choose M here: the module will be
- called dt9812.
-
-config COMEDI_NI_USB6501
- tristate "NI USB-6501 support"
- ---help---
- Enable support for the National Instruments USB-6501 module.
-
- The NI USB-6501 is a Full-Speed USB 2.0 (12 Mbit/s) device that
- provides 24 digital I/O lines channels and one 32-bit counter.
-
- To compile this driver as a module, choose M here: the module will be
- called ni_usb6501.
-
-config COMEDI_USBDUX
- tristate "ITL USB-DUX-D support"
- ---help---
- Enable support for the Incite Technology Ltd USB-DUX-D Board
-
- To compile this driver as a module, choose M here: the module will be
- called usbdux.
-
-config COMEDI_USBDUXFAST
- tristate "ITL USB-DUXfast support"
- ---help---
- Enable support for the Incite Technology Ltd USB-DUXfast Board
-
- To compile this driver as a module, choose M here: the module will be
- called usbduxfast.
-
-config COMEDI_USBDUXSIGMA
- tristate "ITL USB-DUXsigma support"
- ---help---
- Enable support for the Incite Technology Ltd USB-DUXsigma Board
-
- To compile this driver as a module, choose M here: the module will be
- called usbduxsigma.
-
-config COMEDI_VMK80XX
- tristate "Velleman VM110/VM140 USB Board support"
- ---help---
- Build the Velleman USB Board Low-Level Driver supporting the
- K8055/K8061 aka VM110/VM140 devices
-
- To compile this driver as a module, choose M here: the module will be
- called vmk80xx.
-
-endif # COMEDI_USB_DRIVERS
-
-config COMEDI_8254
- tristate
-
-config COMEDI_8255
- tristate
-
-config COMEDI_8255_SA
- tristate "Standalone 8255 support"
- select COMEDI_8255
- ---help---
- Enable support for 8255 digital I/O as a standalone driver.
-
- You should enable compilation this driver if you plan to use a board
- that has an 8255 chip at a known I/O base address and there are no
- other Comedi drivers for the board.
-
- Note that Comedi drivers for most multi-function boards incorporating
- an 8255 chip use the 'comedi_8255' module. Most PCI-based 8255
- boards use the 8255_pci driver as a wrapper around the 'comedi_8255'
- module.
-
- To compile this driver as a module, choose M here: the module will be
- called 8255.
-
-config COMEDI_KCOMEDILIB
- tristate "Comedi kcomedilib"
- ---help---
- Build the kcomedilib.
-
- This is a kernel module used to open and manipulate Comedi devices
- from within kernel code. It is currently only used by the
- comedi_bond driver, and its functionality has been stripped down to
- the needs of that driver, so is currently not very useful for
- anything else.
-
- To compile kcomedilib as a module, choose M here: the module will be
- called kcomedilib.
-
-config COMEDI_AMPLC_DIO200
- select COMEDI_8254
- tristate
-
-config COMEDI_AMPLC_PC236
- tristate
- select COMEDI_8255
-
-config COMEDI_DAS08
- tristate
- select COMEDI_8254
- select COMEDI_8255
-
-config COMEDI_ISADMA
- tristate
-
-config COMEDI_NI_LABPC
- tristate
- select COMEDI_8254
- select COMEDI_8255
-
-config COMEDI_NI_LABPC_ISADMA
- tristate
- select COMEDI_ISADMA
-
-config COMEDI_NI_TIO
- tristate
-
-endif # COMEDI
diff --git a/drivers/staging/comedi/Makefile b/drivers/staging/comedi/Makefile
deleted file mode 100644
index 7f9dfb3923ab..000000000000
--- a/drivers/staging/comedi/Makefile
+++ /dev/null
@@ -1,15 +0,0 @@
-ccflags-$(CONFIG_COMEDI_DEBUG) := -DDEBUG
-
-comedi-y := comedi_fops.o range.o drivers.o \
- comedi_buf.o
-comedi-$(CONFIG_PROC_FS) += proc.o
-comedi-$(CONFIG_COMPAT) += comedi_compat32.o
-
-obj-$(CONFIG_COMEDI_PCI_DRIVERS) += comedi_pci.o
-obj-$(CONFIG_COMEDI_PCMCIA_DRIVERS) += comedi_pcmcia.o
-obj-$(CONFIG_COMEDI_USB_DRIVERS) += comedi_usb.o
-
-obj-$(CONFIG_COMEDI) += comedi.o
-
-obj-$(CONFIG_COMEDI) += kcomedilib/
-obj-$(CONFIG_COMEDI) += drivers/
diff --git a/drivers/staging/comedi/TODO b/drivers/staging/comedi/TODO
deleted file mode 100644
index b68fbdb5eebf..000000000000
--- a/drivers/staging/comedi/TODO
+++ /dev/null
@@ -1,11 +0,0 @@
-TODO:
- - checkpatch.pl cleanups
- - Lindent
- - remove all wrappers
- - audit userspace interface
- - cleanup the individual comedi drivers as well
-
-Please send patches to Greg Kroah-Hartman <greg@kroah.com> and
-copy:
- Ian Abbott <abbotti@mev.co.uk>
- H Hartley Sweeten <hsweeten@visionengravers.com>
diff --git a/drivers/staging/comedi/comedi.h b/drivers/staging/comedi/comedi.h
deleted file mode 100644
index 66edda190b75..000000000000
--- a/drivers/staging/comedi/comedi.h
+++ /dev/null
@@ -1,937 +0,0 @@
-/*
- include/comedi.h (installed as /usr/include/comedi.h)
- header file for comedi
-
- COMEDI - Linux Control and Measurement Device Interface
- Copyright (C) 1998-2001 David A. Schleef <ds@schleef.org>
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU Lesser General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-*/
-
-#ifndef _COMEDI_H
-#define _COMEDI_H
-
-#define COMEDI_MAJORVERSION 0
-#define COMEDI_MINORVERSION 7
-#define COMEDI_MICROVERSION 76
-#define VERSION "0.7.76"
-
-/* comedi's major device number */
-#define COMEDI_MAJOR 98
-
-/*
- maximum number of minor devices. This can be increased, although
- kernel structures are currently statically allocated, thus you
- don't want this to be much more than you actually use.
- */
-#define COMEDI_NDEVICES 16
-
-/* number of config options in the config structure */
-#define COMEDI_NDEVCONFOPTS 32
-
-/*
- * NOTE: 'comedi_config --init-data' is deprecated
- *
- * The following indexes in the config options were used by
- * comedi_config to pass firmware blobs from user space to the
- * comedi drivers. The request_firmware() hotplug interface is
- * now used by all comedi drivers instead.
- */
-
-/* length of nth chunk of firmware data -*/
-#define COMEDI_DEVCONF_AUX_DATA3_LENGTH 25
-#define COMEDI_DEVCONF_AUX_DATA2_LENGTH 26
-#define COMEDI_DEVCONF_AUX_DATA1_LENGTH 27
-#define COMEDI_DEVCONF_AUX_DATA0_LENGTH 28
-/* most significant 32 bits of pointer address (if needed) */
-#define COMEDI_DEVCONF_AUX_DATA_HI 29
-/* least significant 32 bits of pointer address */
-#define COMEDI_DEVCONF_AUX_DATA_LO 30
-#define COMEDI_DEVCONF_AUX_DATA_LENGTH 31 /* total data length */
-
-/* max length of device and driver names */
-#define COMEDI_NAMELEN 20
-
-/* packs and unpacks a channel/range number */
-
-#define CR_PACK(chan, rng, aref) \
- ((((aref)&0x3)<<24) | (((rng)&0xff)<<16) | (chan))
-#define CR_PACK_FLAGS(chan, range, aref, flags) \
- (CR_PACK(chan, range, aref) | ((flags) & CR_FLAGS_MASK))
-
-#define CR_CHAN(a) ((a)&0xffff)
-#define CR_RANGE(a) (((a)>>16)&0xff)
-#define CR_AREF(a) (((a)>>24)&0x03)
-
-#define CR_FLAGS_MASK 0xfc000000
-#define CR_ALT_FILTER (1<<26)
-#define CR_DITHER CR_ALT_FILTER
-#define CR_DEGLITCH CR_ALT_FILTER
-#define CR_ALT_SOURCE (1<<27)
-#define CR_EDGE (1<<30)
-#define CR_INVERT (1<<31)
-
-#define AREF_GROUND 0x00 /* analog ref = analog ground */
-#define AREF_COMMON 0x01 /* analog ref = analog common */
-#define AREF_DIFF 0x02 /* analog ref = differential */
-#define AREF_OTHER 0x03 /* analog ref = other (undefined) */
-
-/* counters -- these are arbitrary values */
-#define GPCT_RESET 0x0001
-#define GPCT_SET_SOURCE 0x0002
-#define GPCT_SET_GATE 0x0004
-#define GPCT_SET_DIRECTION 0x0008
-#define GPCT_SET_OPERATION 0x0010
-#define GPCT_ARM 0x0020
-#define GPCT_DISARM 0x0040
-#define GPCT_GET_INT_CLK_FRQ 0x0080
-
-#define GPCT_INT_CLOCK 0x0001
-#define GPCT_EXT_PIN 0x0002
-#define GPCT_NO_GATE 0x0004
-#define GPCT_UP 0x0008
-#define GPCT_DOWN 0x0010
-#define GPCT_HWUD 0x0020
-#define GPCT_SIMPLE_EVENT 0x0040
-#define GPCT_SINGLE_PERIOD 0x0080
-#define GPCT_SINGLE_PW 0x0100
-#define GPCT_CONT_PULSE_OUT 0x0200
-#define GPCT_SINGLE_PULSE_OUT 0x0400
-
-/* instructions */
-
-#define INSN_MASK_WRITE 0x8000000
-#define INSN_MASK_READ 0x4000000
-#define INSN_MASK_SPECIAL 0x2000000
-
-#define INSN_READ (0 | INSN_MASK_READ)
-#define INSN_WRITE (1 | INSN_MASK_WRITE)
-#define INSN_BITS (2 | INSN_MASK_READ|INSN_MASK_WRITE)
-#define INSN_CONFIG (3 | INSN_MASK_READ|INSN_MASK_WRITE)
-#define INSN_GTOD (4 | INSN_MASK_READ|INSN_MASK_SPECIAL)
-#define INSN_WAIT (5 | INSN_MASK_WRITE|INSN_MASK_SPECIAL)
-#define INSN_INTTRIG (6 | INSN_MASK_WRITE|INSN_MASK_SPECIAL)
-
-/* trigger flags */
-/* These flags are used in comedi_trig structures */
-
-#define TRIG_DITHER 0x0002 /* enable dithering */
-#define TRIG_DEGLITCH 0x0004 /* enable deglitching */
-#define TRIG_CONFIG 0x0010 /* perform configuration, not triggering */
-
-/* command flags */
-/* These flags are used in comedi_cmd structures */
-
-#define CMDF_BOGUS 0x00000001 /* do the motions */
-
-/* try to use a real-time interrupt while performing command */
-#define CMDF_PRIORITY 0x00000008
-
-/* wake up on end-of-scan events */
-#define CMDF_WAKE_EOS 0x00000020
-
-#define CMDF_WRITE 0x00000040
-
-#define CMDF_RAWDATA 0x00000080
-
-/* timer rounding definitions */
-#define CMDF_ROUND_MASK 0x00030000
-#define CMDF_ROUND_NEAREST 0x00000000
-#define CMDF_ROUND_DOWN 0x00010000
-#define CMDF_ROUND_UP 0x00020000
-#define CMDF_ROUND_UP_NEXT 0x00030000
-
-#define COMEDI_EV_START 0x00040000
-#define COMEDI_EV_SCAN_BEGIN 0x00080000
-#define COMEDI_EV_CONVERT 0x00100000
-#define COMEDI_EV_SCAN_END 0x00200000
-#define COMEDI_EV_STOP 0x00400000
-
-/* compatibility definitions */
-#define TRIG_BOGUS CMDF_BOGUS
-#define TRIG_RT CMDF_PRIORITY
-#define TRIG_WAKE_EOS CMDF_WAKE_EOS
-#define TRIG_WRITE CMDF_WRITE
-#define TRIG_ROUND_MASK CMDF_ROUND_MASK
-#define TRIG_ROUND_NEAREST CMDF_ROUND_NEAREST
-#define TRIG_ROUND_DOWN CMDF_ROUND_DOWN
-#define TRIG_ROUND_UP CMDF_ROUND_UP
-#define TRIG_ROUND_UP_NEXT CMDF_ROUND_UP_NEXT
-
-/* trigger sources */
-
-#define TRIG_ANY 0xffffffff
-#define TRIG_INVALID 0x00000000
-
-#define TRIG_NONE 0x00000001 /* never trigger */
-#define TRIG_NOW 0x00000002 /* trigger now + N ns */
-#define TRIG_FOLLOW 0x00000004 /* trigger on next lower level trig */
-#define TRIG_TIME 0x00000008 /* trigger at time N ns */
-#define TRIG_TIMER 0x00000010 /* trigger at rate N ns */
-#define TRIG_COUNT 0x00000020 /* trigger when count reaches N */
-#define TRIG_EXT 0x00000040 /* trigger on external signal N */
-#define TRIG_INT 0x00000080 /* trigger on comedi-internal signal N */
-#define TRIG_OTHER 0x00000100 /* driver defined */
-
-/* subdevice flags */
-
-#define SDF_BUSY 0x0001 /* device is busy */
-#define SDF_BUSY_OWNER 0x0002 /* device is busy with your job */
-#define SDF_LOCKED 0x0004 /* subdevice is locked */
-#define SDF_LOCK_OWNER 0x0008 /* you own lock */
-#define SDF_MAXDATA 0x0010 /* maxdata depends on channel */
-#define SDF_FLAGS 0x0020 /* flags depend on channel */
-#define SDF_RANGETYPE 0x0040 /* range type depends on channel */
-#define SDF_MODE0 0x0080 /* can do mode 0 */
-#define SDF_MODE1 0x0100 /* can do mode 1 */
-#define SDF_MODE2 0x0200 /* can do mode 2 */
-#define SDF_MODE3 0x0400 /* can do mode 3 */
-#define SDF_MODE4 0x0800 /* can do mode 4 */
-#define SDF_CMD 0x1000 /* can do commands (deprecated) */
-#define SDF_SOFT_CALIBRATED 0x2000 /* subdevice uses software calibration */
-#define SDF_CMD_WRITE 0x4000 /* can do output commands */
-#define SDF_CMD_READ 0x8000 /* can do input commands */
-
-/* subdevice can be read (e.g. analog input) */
-#define SDF_READABLE 0x00010000
-/* subdevice can be written (e.g. analog output) */
-#define SDF_WRITABLE 0x00020000
-#define SDF_WRITEABLE SDF_WRITABLE /* spelling error in API */
-/* subdevice does not have externally visible lines */
-#define SDF_INTERNAL 0x00040000
-#define SDF_GROUND 0x00100000 /* can do aref=ground */
-#define SDF_COMMON 0x00200000 /* can do aref=common */
-#define SDF_DIFF 0x00400000 /* can do aref=diff */
-#define SDF_OTHER 0x00800000 /* can do aref=other */
-#define SDF_DITHER 0x01000000 /* can do dithering */
-#define SDF_DEGLITCH 0x02000000 /* can do deglitching */
-#define SDF_MMAP 0x04000000 /* can do mmap() */
-#define SDF_RUNNING 0x08000000 /* subdevice is acquiring data */
-#define SDF_LSAMPL 0x10000000 /* subdevice uses 32-bit samples */
-#define SDF_PACKED 0x20000000 /* subdevice can do packed DIO */
-/* re recycle these flags for PWM */
-#define SDF_PWM_COUNTER SDF_MODE0 /* PWM can automatically switch off */
-#define SDF_PWM_HBRIDGE SDF_MODE1 /* PWM is signed (H-bridge) */
-
-/* subdevice types */
-
-enum comedi_subdevice_type {
- COMEDI_SUBD_UNUSED, /* unused by driver */
- COMEDI_SUBD_AI, /* analog input */
- COMEDI_SUBD_AO, /* analog output */
- COMEDI_SUBD_DI, /* digital input */
- COMEDI_SUBD_DO, /* digital output */
- COMEDI_SUBD_DIO, /* digital input/output */
- COMEDI_SUBD_COUNTER, /* counter */
- COMEDI_SUBD_TIMER, /* timer */
- COMEDI_SUBD_MEMORY, /* memory, EEPROM, DPRAM */
- COMEDI_SUBD_CALIB, /* calibration DACs */
- COMEDI_SUBD_PROC, /* processor, DSP */
- COMEDI_SUBD_SERIAL, /* serial IO */
- COMEDI_SUBD_PWM /* PWM */
-};
-
-/* configuration instructions */
-
-enum configuration_ids {
- INSN_CONFIG_DIO_INPUT = 0,
- INSN_CONFIG_DIO_OUTPUT = 1,
- INSN_CONFIG_DIO_OPENDRAIN = 2,
- INSN_CONFIG_ANALOG_TRIG = 16,
-/* INSN_CONFIG_WAVEFORM = 17, */
-/* INSN_CONFIG_TRIG = 18, */
-/* INSN_CONFIG_COUNTER = 19, */
- INSN_CONFIG_ALT_SOURCE = 20,
- INSN_CONFIG_DIGITAL_TRIG = 21,
- INSN_CONFIG_BLOCK_SIZE = 22,
- INSN_CONFIG_TIMER_1 = 23,
- INSN_CONFIG_FILTER = 24,
- INSN_CONFIG_CHANGE_NOTIFY = 25,
-
- INSN_CONFIG_SERIAL_CLOCK = 26, /*ALPHA*/
- INSN_CONFIG_BIDIRECTIONAL_DATA = 27,
- INSN_CONFIG_DIO_QUERY = 28,
- INSN_CONFIG_PWM_OUTPUT = 29,
- INSN_CONFIG_GET_PWM_OUTPUT = 30,
- INSN_CONFIG_ARM = 31,
- INSN_CONFIG_DISARM = 32,
- INSN_CONFIG_GET_COUNTER_STATUS = 33,
- INSN_CONFIG_RESET = 34,
- /* Use CTR as single pulsegenerator */
- INSN_CONFIG_GPCT_SINGLE_PULSE_GENERATOR = 1001,
- /* Use CTR as pulsetraingenerator */
- INSN_CONFIG_GPCT_PULSE_TRAIN_GENERATOR = 1002,
- /* Use the counter as encoder */
- INSN_CONFIG_GPCT_QUADRATURE_ENCODER = 1003,
- INSN_CONFIG_SET_GATE_SRC = 2001, /* Set gate source */
- INSN_CONFIG_GET_GATE_SRC = 2002, /* Get gate source */
- /* Set master clock source */
- INSN_CONFIG_SET_CLOCK_SRC = 2003,
- INSN_CONFIG_GET_CLOCK_SRC = 2004, /* Get master clock source */
- INSN_CONFIG_SET_OTHER_SRC = 2005, /* Set other source */
- /* INSN_CONFIG_GET_OTHER_SRC = 2006,*//* Get other source */
- /* Get size in bytes of subdevice's on-board fifos used during
- * streaming input/output */
- INSN_CONFIG_GET_HARDWARE_BUFFER_SIZE = 2006,
- INSN_CONFIG_SET_COUNTER_MODE = 4097,
- /* INSN_CONFIG_8254_SET_MODE is deprecated */
- INSN_CONFIG_8254_SET_MODE = INSN_CONFIG_SET_COUNTER_MODE,
- INSN_CONFIG_8254_READ_STATUS = 4098,
- INSN_CONFIG_SET_ROUTING = 4099,
- INSN_CONFIG_GET_ROUTING = 4109,
- /* PWM */
- INSN_CONFIG_PWM_SET_PERIOD = 5000, /* sets frequency */
- INSN_CONFIG_PWM_GET_PERIOD = 5001, /* gets frequency */
- INSN_CONFIG_GET_PWM_STATUS = 5002, /* is it running? */
- /* sets H bridge: duty cycle and sign bit for a relay at the
- * same time */
- INSN_CONFIG_PWM_SET_H_BRIDGE = 5003,
- /* gets H bridge data: duty cycle and the sign bit */
- INSN_CONFIG_PWM_GET_H_BRIDGE = 5004
-};
-
-/*
- * Settings for INSN_CONFIG_DIGITAL_TRIG:
- * data[0] = INSN_CONFIG_DIGITAL_TRIG
- * data[1] = trigger ID
- * data[2] = configuration operation
- * data[3] = configuration parameter 1
- * data[4] = configuration parameter 2
- * data[5] = configuration parameter 3
- *
- * operation parameter 1 parameter 2 parameter 3
- * --------------------------------- ----------- ----------- -----------
- * COMEDI_DIGITAL_TRIG_DISABLE
- * COMEDI_DIGITAL_TRIG_ENABLE_EDGES left-shift rising-edges falling-edges
- * COMEDI_DIGITAL_TRIG_ENABLE_LEVELS left-shift high-levels low-levels
- *
- * COMEDI_DIGITAL_TRIG_DISABLE returns the trigger to its default, inactive,
- * unconfigured state.
- *
- * COMEDI_DIGITAL_TRIG_ENABLE_EDGES sets the rising and/or falling edge inputs
- * that each can fire the trigger.
- *
- * COMEDI_DIGITAL_TRIG_ENABLE_LEVELS sets a combination of high and/or low
- * level inputs that can fire the trigger.
- *
- * "left-shift" is useful if the trigger has more than 32 inputs to specify the
- * first input for this configuration.
- *
- * Some sequences of INSN_CONFIG_DIGITAL_TRIG instructions may have a (partly)
- * accumulative effect, depending on the low-level driver. This is useful
- * when setting up a trigger that has more than 32 inputs or has a combination
- * of edge and level triggered inputs.
- */
-enum comedi_digital_trig_op {
- COMEDI_DIGITAL_TRIG_DISABLE = 0,
- COMEDI_DIGITAL_TRIG_ENABLE_EDGES = 1,
- COMEDI_DIGITAL_TRIG_ENABLE_LEVELS = 2
-};
-
-enum comedi_io_direction {
- COMEDI_INPUT = 0,
- COMEDI_OUTPUT = 1,
- COMEDI_OPENDRAIN = 2
-};
-
-enum comedi_support_level {
- COMEDI_UNKNOWN_SUPPORT = 0,
- COMEDI_SUPPORTED,
- COMEDI_UNSUPPORTED
-};
-
-/* ioctls */
-
-#define CIO 'd'
-#define COMEDI_DEVCONFIG _IOW(CIO, 0, struct comedi_devconfig)
-#define COMEDI_DEVINFO _IOR(CIO, 1, struct comedi_devinfo)
-#define COMEDI_SUBDINFO _IOR(CIO, 2, struct comedi_subdinfo)
-#define COMEDI_CHANINFO _IOR(CIO, 3, struct comedi_chaninfo)
-#define COMEDI_TRIG _IOWR(CIO, 4, comedi_trig)
-#define COMEDI_LOCK _IO(CIO, 5)
-#define COMEDI_UNLOCK _IO(CIO, 6)
-#define COMEDI_CANCEL _IO(CIO, 7)
-#define COMEDI_RANGEINFO _IOR(CIO, 8, struct comedi_rangeinfo)
-#define COMEDI_CMD _IOR(CIO, 9, struct comedi_cmd)
-#define COMEDI_CMDTEST _IOR(CIO, 10, struct comedi_cmd)
-#define COMEDI_INSNLIST _IOR(CIO, 11, struct comedi_insnlist)
-#define COMEDI_INSN _IOR(CIO, 12, struct comedi_insn)
-#define COMEDI_BUFCONFIG _IOR(CIO, 13, struct comedi_bufconfig)
-#define COMEDI_BUFINFO _IOWR(CIO, 14, struct comedi_bufinfo)
-#define COMEDI_POLL _IO(CIO, 15)
-#define COMEDI_SETRSUBD _IO(CIO, 16)
-#define COMEDI_SETWSUBD _IO(CIO, 17)
-
-/* structures */
-
-struct comedi_trig {
- unsigned int subdev; /* subdevice */
- unsigned int mode; /* mode */
- unsigned int flags;
- unsigned int n_chan; /* number of channels */
- unsigned int *chanlist; /* channel/range list */
- short *data; /* data list, size depends on subd flags */
- unsigned int n; /* number of scans */
- unsigned int trigsrc;
- unsigned int trigvar;
- unsigned int trigvar1;
- unsigned int data_len;
- unsigned int unused[3];
-};
-
-struct comedi_insn {
- unsigned int insn;
- unsigned int n;
- unsigned int __user *data;
- unsigned int subdev;
- unsigned int chanspec;
- unsigned int unused[3];
-};
-
-struct comedi_insnlist {
- unsigned int n_insns;
- struct comedi_insn __user *insns;
-};
-
-struct comedi_cmd {
- unsigned int subdev;
- unsigned int flags;
-
- unsigned int start_src;
- unsigned int start_arg;
-
- unsigned int scan_begin_src;
- unsigned int scan_begin_arg;
-
- unsigned int convert_src;
- unsigned int convert_arg;
-
- unsigned int scan_end_src;
- unsigned int scan_end_arg;
-
- unsigned int stop_src;
- unsigned int stop_arg;
-
- unsigned int *chanlist; /* channel/range list */
- unsigned int chanlist_len;
-
- short __user *data; /* data list, size depends on subd flags */
- unsigned int data_len;
-};
-
-struct comedi_chaninfo {
- unsigned int subdev;
- unsigned int __user *maxdata_list;
- unsigned int __user *flaglist;
- unsigned int __user *rangelist;
- unsigned int unused[4];
-};
-
-struct comedi_rangeinfo {
- unsigned int range_type;
- void __user *range_ptr;
-};
-
-struct comedi_krange {
- int min; /* fixed point, multiply by 1e-6 */
- int max; /* fixed point, multiply by 1e-6 */
- unsigned int flags;
-};
-
-struct comedi_subdinfo {
- unsigned int type;
- unsigned int n_chan;
- unsigned int subd_flags;
- unsigned int timer_type;
- unsigned int len_chanlist;
- unsigned int maxdata;
- unsigned int flags; /* channel flags */
- unsigned int range_type; /* lookup in kernel */
- unsigned int settling_time_0;
- /* see support_level enum for values */
- unsigned insn_bits_support;
- unsigned int unused[8];
-};
-
-struct comedi_devinfo {
- unsigned int version_code;
- unsigned int n_subdevs;
- char driver_name[COMEDI_NAMELEN];
- char board_name[COMEDI_NAMELEN];
- int read_subdevice;
- int write_subdevice;
- int unused[30];
-};
-
-struct comedi_devconfig {
- char board_name[COMEDI_NAMELEN];
- int options[COMEDI_NDEVCONFOPTS];
-};
-
-struct comedi_bufconfig {
- unsigned int subdevice;
- unsigned int flags;
-
- unsigned int maximum_size;
- unsigned int size;
-
- unsigned int unused[4];
-};
-
-struct comedi_bufinfo {
- unsigned int subdevice;
- unsigned int bytes_read;
-
- unsigned int buf_write_ptr;
- unsigned int buf_read_ptr;
- unsigned int buf_write_count;
- unsigned int buf_read_count;
-
- unsigned int bytes_written;
-
- unsigned int unused[4];
-};
-
-/* range stuff */
-
-#define __RANGE(a, b) ((((a)&0xffff)<<16)|((b)&0xffff))
-
-#define RANGE_OFFSET(a) (((a)>>16)&0xffff)
-#define RANGE_LENGTH(b) ((b)&0xffff)
-
-#define RF_UNIT(flags) ((flags)&0xff)
-#define RF_EXTERNAL (1<<8)
-
-#define UNIT_volt 0
-#define UNIT_mA 1
-#define UNIT_none 2
-
-#define COMEDI_MIN_SPEED ((unsigned int)0xffffffff)
-
-/**********************************************************/
-/* everything after this line is ALPHA */
-/**********************************************************/
-
-/*
- 8254 specific configuration.
-
- It supports two config commands:
-
- 0 ID: INSN_CONFIG_SET_COUNTER_MODE
- 1 8254 Mode
- I8254_MODE0, I8254_MODE1, ..., I8254_MODE5
- OR'ed with:
- I8254_BCD, I8254_BINARY
-
- 0 ID: INSN_CONFIG_8254_READ_STATUS
- 1 <-- Status byte returned here.
- B7 = Output
- B6 = NULL Count
- B5 - B0 Current mode.
-
-*/
-
-enum i8254_mode {
- I8254_MODE0 = (0 << 1), /* Interrupt on terminal count */
- I8254_MODE1 = (1 << 1), /* Hardware retriggerable one-shot */
- I8254_MODE2 = (2 << 1), /* Rate generator */
- I8254_MODE3 = (3 << 1), /* Square wave mode */
- I8254_MODE4 = (4 << 1), /* Software triggered strobe */
- I8254_MODE5 = (5 << 1), /* Hardware triggered strobe
- * (retriggerable) */
- I8254_BCD = 1, /* use binary-coded decimal instead of binary
- * (pretty useless) */
- I8254_BINARY = 0
-};
-
-#define NI_USUAL_PFI_SELECT(x) (((x) < 10) ? (0x1 + (x)) : (0xb + (x)))
-#define NI_USUAL_RTSI_SELECT(x) (((x) < 7) ? (0xb + (x)) : 0x1b)
-
-/* mode bits for NI general-purpose counters, set with
- * INSN_CONFIG_SET_COUNTER_MODE */
-#define NI_GPCT_COUNTING_MODE_SHIFT 16
-#define NI_GPCT_INDEX_PHASE_BITSHIFT 20
-#define NI_GPCT_COUNTING_DIRECTION_SHIFT 24
-enum ni_gpct_mode_bits {
- NI_GPCT_GATE_ON_BOTH_EDGES_BIT = 0x4,
- NI_GPCT_EDGE_GATE_MODE_MASK = 0x18,
- NI_GPCT_EDGE_GATE_STARTS_STOPS_BITS = 0x0,
- NI_GPCT_EDGE_GATE_STOPS_STARTS_BITS = 0x8,
- NI_GPCT_EDGE_GATE_STARTS_BITS = 0x10,
- NI_GPCT_EDGE_GATE_NO_STARTS_NO_STOPS_BITS = 0x18,
- NI_GPCT_STOP_MODE_MASK = 0x60,
- NI_GPCT_STOP_ON_GATE_BITS = 0x00,
- NI_GPCT_STOP_ON_GATE_OR_TC_BITS = 0x20,
- NI_GPCT_STOP_ON_GATE_OR_SECOND_TC_BITS = 0x40,
- NI_GPCT_LOAD_B_SELECT_BIT = 0x80,
- NI_GPCT_OUTPUT_MODE_MASK = 0x300,
- NI_GPCT_OUTPUT_TC_PULSE_BITS = 0x100,
- NI_GPCT_OUTPUT_TC_TOGGLE_BITS = 0x200,
- NI_GPCT_OUTPUT_TC_OR_GATE_TOGGLE_BITS = 0x300,
- NI_GPCT_HARDWARE_DISARM_MASK = 0xc00,
- NI_GPCT_NO_HARDWARE_DISARM_BITS = 0x000,
- NI_GPCT_DISARM_AT_TC_BITS = 0x400,
- NI_GPCT_DISARM_AT_GATE_BITS = 0x800,
- NI_GPCT_DISARM_AT_TC_OR_GATE_BITS = 0xc00,
- NI_GPCT_LOADING_ON_TC_BIT = 0x1000,
- NI_GPCT_LOADING_ON_GATE_BIT = 0x4000,
- NI_GPCT_COUNTING_MODE_MASK = 0x7 << NI_GPCT_COUNTING_MODE_SHIFT,
- NI_GPCT_COUNTING_MODE_NORMAL_BITS =
- 0x0 << NI_GPCT_COUNTING_MODE_SHIFT,
- NI_GPCT_COUNTING_MODE_QUADRATURE_X1_BITS =
- 0x1 << NI_GPCT_COUNTING_MODE_SHIFT,
- NI_GPCT_COUNTING_MODE_QUADRATURE_X2_BITS =
- 0x2 << NI_GPCT_COUNTING_MODE_SHIFT,
- NI_GPCT_COUNTING_MODE_QUADRATURE_X4_BITS =
- 0x3 << NI_GPCT_COUNTING_MODE_SHIFT,
- NI_GPCT_COUNTING_MODE_TWO_PULSE_BITS =
- 0x4 << NI_GPCT_COUNTING_MODE_SHIFT,
- NI_GPCT_COUNTING_MODE_SYNC_SOURCE_BITS =
- 0x6 << NI_GPCT_COUNTING_MODE_SHIFT,
- NI_GPCT_INDEX_PHASE_MASK = 0x3 << NI_GPCT_INDEX_PHASE_BITSHIFT,
- NI_GPCT_INDEX_PHASE_LOW_A_LOW_B_BITS =
- 0x0 << NI_GPCT_INDEX_PHASE_BITSHIFT,
- NI_GPCT_INDEX_PHASE_LOW_A_HIGH_B_BITS =
- 0x1 << NI_GPCT_INDEX_PHASE_BITSHIFT,
- NI_GPCT_INDEX_PHASE_HIGH_A_LOW_B_BITS =
- 0x2 << NI_GPCT_INDEX_PHASE_BITSHIFT,
- NI_GPCT_INDEX_PHASE_HIGH_A_HIGH_B_BITS =
- 0x3 << NI_GPCT_INDEX_PHASE_BITSHIFT,
- NI_GPCT_INDEX_ENABLE_BIT = 0x400000,
- NI_GPCT_COUNTING_DIRECTION_MASK =
- 0x3 << NI_GPCT_COUNTING_DIRECTION_SHIFT,
- NI_GPCT_COUNTING_DIRECTION_DOWN_BITS =
- 0x00 << NI_GPCT_COUNTING_DIRECTION_SHIFT,
- NI_GPCT_COUNTING_DIRECTION_UP_BITS =
- 0x1 << NI_GPCT_COUNTING_DIRECTION_SHIFT,
- NI_GPCT_COUNTING_DIRECTION_HW_UP_DOWN_BITS =
- 0x2 << NI_GPCT_COUNTING_DIRECTION_SHIFT,
- NI_GPCT_COUNTING_DIRECTION_HW_GATE_BITS =
- 0x3 << NI_GPCT_COUNTING_DIRECTION_SHIFT,
- NI_GPCT_RELOAD_SOURCE_MASK = 0xc000000,
- NI_GPCT_RELOAD_SOURCE_FIXED_BITS = 0x0,
- NI_GPCT_RELOAD_SOURCE_SWITCHING_BITS = 0x4000000,
- NI_GPCT_RELOAD_SOURCE_GATE_SELECT_BITS = 0x8000000,
- NI_GPCT_OR_GATE_BIT = 0x10000000,
- NI_GPCT_INVERT_OUTPUT_BIT = 0x20000000
-};
-
-/* Bits for setting a clock source with
- * INSN_CONFIG_SET_CLOCK_SRC when using NI general-purpose counters. */
-enum ni_gpct_clock_source_bits {
- NI_GPCT_CLOCK_SRC_SELECT_MASK = 0x3f,
- NI_GPCT_TIMEBASE_1_CLOCK_SRC_BITS = 0x0,
- NI_GPCT_TIMEBASE_2_CLOCK_SRC_BITS = 0x1,
- NI_GPCT_TIMEBASE_3_CLOCK_SRC_BITS = 0x2,
- NI_GPCT_LOGIC_LOW_CLOCK_SRC_BITS = 0x3,
- NI_GPCT_NEXT_GATE_CLOCK_SRC_BITS = 0x4,
- NI_GPCT_NEXT_TC_CLOCK_SRC_BITS = 0x5,
- /* NI 660x-specific */
- NI_GPCT_SOURCE_PIN_i_CLOCK_SRC_BITS = 0x6,
- NI_GPCT_PXI10_CLOCK_SRC_BITS = 0x7,
- NI_GPCT_PXI_STAR_TRIGGER_CLOCK_SRC_BITS = 0x8,
- NI_GPCT_ANALOG_TRIGGER_OUT_CLOCK_SRC_BITS = 0x9,
- NI_GPCT_PRESCALE_MODE_CLOCK_SRC_MASK = 0x30000000,
- NI_GPCT_NO_PRESCALE_CLOCK_SRC_BITS = 0x0,
- /* divide source by 2 */
- NI_GPCT_PRESCALE_X2_CLOCK_SRC_BITS = 0x10000000,
- /* divide source by 8 */
- NI_GPCT_PRESCALE_X8_CLOCK_SRC_BITS = 0x20000000,
- NI_GPCT_INVERT_CLOCK_SRC_BIT = 0x80000000
-};
-
-/* NI 660x-specific */
-#define NI_GPCT_SOURCE_PIN_CLOCK_SRC_BITS(x) (0x10 + (x))
-
-#define NI_GPCT_RTSI_CLOCK_SRC_BITS(x) (0x18 + (x))
-
-/* no pfi on NI 660x */
-#define NI_GPCT_PFI_CLOCK_SRC_BITS(x) (0x20 + (x))
-
-/* Possibilities for setting a gate source with
-INSN_CONFIG_SET_GATE_SRC when using NI general-purpose counters.
-May be bitwise-or'd with CR_EDGE or CR_INVERT. */
-enum ni_gpct_gate_select {
- /* m-series gates */
- NI_GPCT_TIMESTAMP_MUX_GATE_SELECT = 0x0,
- NI_GPCT_AI_START2_GATE_SELECT = 0x12,
- NI_GPCT_PXI_STAR_TRIGGER_GATE_SELECT = 0x13,
- NI_GPCT_NEXT_OUT_GATE_SELECT = 0x14,
- NI_GPCT_AI_START1_GATE_SELECT = 0x1c,
- NI_GPCT_NEXT_SOURCE_GATE_SELECT = 0x1d,
- NI_GPCT_ANALOG_TRIGGER_OUT_GATE_SELECT = 0x1e,
- NI_GPCT_LOGIC_LOW_GATE_SELECT = 0x1f,
- /* more gates for 660x */
- NI_GPCT_SOURCE_PIN_i_GATE_SELECT = 0x100,
- NI_GPCT_GATE_PIN_i_GATE_SELECT = 0x101,
- /* more gates for 660x "second gate" */
- NI_GPCT_UP_DOWN_PIN_i_GATE_SELECT = 0x201,
- NI_GPCT_SELECTED_GATE_GATE_SELECT = 0x21e,
- /* m-series "second gate" sources are unknown,
- * we should add them here with an offset of 0x300 when
- * known. */
- NI_GPCT_DISABLED_GATE_SELECT = 0x8000,
-};
-
-#define NI_GPCT_GATE_PIN_GATE_SELECT(x) (0x102 + (x))
-#define NI_GPCT_RTSI_GATE_SELECT(x) NI_USUAL_RTSI_SELECT(x)
-#define NI_GPCT_PFI_GATE_SELECT(x) NI_USUAL_PFI_SELECT(x)
-#define NI_GPCT_UP_DOWN_PIN_GATE_SELECT(x) (0x202 + (x))
-
-/* Possibilities for setting a source with
-INSN_CONFIG_SET_OTHER_SRC when using NI general-purpose counters. */
-enum ni_gpct_other_index {
- NI_GPCT_SOURCE_ENCODER_A,
- NI_GPCT_SOURCE_ENCODER_B,
- NI_GPCT_SOURCE_ENCODER_Z
-};
-
-enum ni_gpct_other_select {
- /* m-series gates */
- /* Still unknown, probably only need NI_GPCT_PFI_OTHER_SELECT */
- NI_GPCT_DISABLED_OTHER_SELECT = 0x8000,
-};
-
-#define NI_GPCT_PFI_OTHER_SELECT(x) NI_USUAL_PFI_SELECT(x)
-
-/* start sources for ni general-purpose counters for use with
-INSN_CONFIG_ARM */
-enum ni_gpct_arm_source {
- NI_GPCT_ARM_IMMEDIATE = 0x0,
- NI_GPCT_ARM_PAIRED_IMMEDIATE = 0x1, /* Start both the counter
- * and the adjacent paired
- * counter simultaneously */
- /* NI doesn't document bits for selecting hardware arm triggers.
- * If the NI_GPCT_ARM_UNKNOWN bit is set, we will pass the least
- * significant bits (3 bits for 660x or 5 bits for m-series)
- * through to the hardware. This will at least allow someone to
- * figure out what the bits do later. */
- NI_GPCT_ARM_UNKNOWN = 0x1000,
-};
-
-/* digital filtering options for ni 660x for use with INSN_CONFIG_FILTER. */
-enum ni_gpct_filter_select {
- NI_GPCT_FILTER_OFF = 0x0,
- NI_GPCT_FILTER_TIMEBASE_3_SYNC = 0x1,
- NI_GPCT_FILTER_100x_TIMEBASE_1 = 0x2,
- NI_GPCT_FILTER_20x_TIMEBASE_1 = 0x3,
- NI_GPCT_FILTER_10x_TIMEBASE_1 = 0x4,
- NI_GPCT_FILTER_2x_TIMEBASE_1 = 0x5,
- NI_GPCT_FILTER_2x_TIMEBASE_3 = 0x6
-};
-
-/* PFI digital filtering options for ni m-series for use with
- * INSN_CONFIG_FILTER. */
-enum ni_pfi_filter_select {
- NI_PFI_FILTER_OFF = 0x0,
- NI_PFI_FILTER_125ns = 0x1,
- NI_PFI_FILTER_6425ns = 0x2,
- NI_PFI_FILTER_2550us = 0x3
-};
-
-/* master clock sources for ni mio boards and INSN_CONFIG_SET_CLOCK_SRC */
-enum ni_mio_clock_source {
- NI_MIO_INTERNAL_CLOCK = 0,
- NI_MIO_RTSI_CLOCK = 1, /* doesn't work for m-series, use
- NI_MIO_PLL_RTSI_CLOCK() */
- /* the NI_MIO_PLL_* sources are m-series only */
- NI_MIO_PLL_PXI_STAR_TRIGGER_CLOCK = 2,
- NI_MIO_PLL_PXI10_CLOCK = 3,
- NI_MIO_PLL_RTSI0_CLOCK = 4
-};
-
-#define NI_MIO_PLL_RTSI_CLOCK(x) (NI_MIO_PLL_RTSI0_CLOCK + (x))
-
-/* Signals which can be routed to an NI RTSI pin with INSN_CONFIG_SET_ROUTING.
- The numbers assigned are not arbitrary, they correspond to the bits required
- to program the board. */
-enum ni_rtsi_routing {
- NI_RTSI_OUTPUT_ADR_START1 = 0,
- NI_RTSI_OUTPUT_ADR_START2 = 1,
- NI_RTSI_OUTPUT_SCLKG = 2,
- NI_RTSI_OUTPUT_DACUPDN = 3,
- NI_RTSI_OUTPUT_DA_START1 = 4,
- NI_RTSI_OUTPUT_G_SRC0 = 5,
- NI_RTSI_OUTPUT_G_GATE0 = 6,
- NI_RTSI_OUTPUT_RGOUT0 = 7,
- NI_RTSI_OUTPUT_RTSI_BRD_0 = 8,
- NI_RTSI_OUTPUT_RTSI_OSC = 12 /* pre-m-series always have RTSI
- * clock on line 7 */
-};
-
-#define NI_RTSI_OUTPUT_RTSI_BRD(x) (NI_RTSI_OUTPUT_RTSI_BRD_0 + (x))
-
-/* Signals which can be routed to an NI PFI pin on an m-series board with
- * INSN_CONFIG_SET_ROUTING. These numbers are also returned by
- * INSN_CONFIG_GET_ROUTING on pre-m-series boards, even though their routing
- * cannot be changed. The numbers assigned are not arbitrary, they correspond
- * to the bits required to program the board. */
-enum ni_pfi_routing {
- NI_PFI_OUTPUT_PFI_DEFAULT = 0,
- NI_PFI_OUTPUT_AI_START1 = 1,
- NI_PFI_OUTPUT_AI_START2 = 2,
- NI_PFI_OUTPUT_AI_CONVERT = 3,
- NI_PFI_OUTPUT_G_SRC1 = 4,
- NI_PFI_OUTPUT_G_GATE1 = 5,
- NI_PFI_OUTPUT_AO_UPDATE_N = 6,
- NI_PFI_OUTPUT_AO_START1 = 7,
- NI_PFI_OUTPUT_AI_START_PULSE = 8,
- NI_PFI_OUTPUT_G_SRC0 = 9,
- NI_PFI_OUTPUT_G_GATE0 = 10,
- NI_PFI_OUTPUT_EXT_STROBE = 11,
- NI_PFI_OUTPUT_AI_EXT_MUX_CLK = 12,
- NI_PFI_OUTPUT_GOUT0 = 13,
- NI_PFI_OUTPUT_GOUT1 = 14,
- NI_PFI_OUTPUT_FREQ_OUT = 15,
- NI_PFI_OUTPUT_PFI_DO = 16,
- NI_PFI_OUTPUT_I_ATRIG = 17,
- NI_PFI_OUTPUT_RTSI0 = 18,
- NI_PFI_OUTPUT_PXI_STAR_TRIGGER_IN = 26,
- NI_PFI_OUTPUT_SCXI_TRIG1 = 27,
- NI_PFI_OUTPUT_DIO_CHANGE_DETECT_RTSI = 28,
- NI_PFI_OUTPUT_CDI_SAMPLE = 29,
- NI_PFI_OUTPUT_CDO_UPDATE = 30
-};
-
-#define NI_PFI_OUTPUT_RTSI(x) (NI_PFI_OUTPUT_RTSI0 + (x))
-
-/* Signals which can be routed to output on a NI PFI pin on a 660x board
- with INSN_CONFIG_SET_ROUTING. The numbers assigned are
- not arbitrary, they correspond to the bits required
- to program the board. Lines 0 to 7 can only be set to
- NI_660X_PFI_OUTPUT_DIO. Lines 32 to 39 can only be set to
- NI_660X_PFI_OUTPUT_COUNTER. */
-enum ni_660x_pfi_routing {
- NI_660X_PFI_OUTPUT_COUNTER = 1, /* counter */
- NI_660X_PFI_OUTPUT_DIO = 2, /* static digital output */
-};
-
-/* NI External Trigger lines. These values are not arbitrary, but are related
- * to the bits required to program the board (offset by 1 for historical
- * reasons). */
-#define NI_EXT_PFI(x) (NI_USUAL_PFI_SELECT(x) - 1)
-#define NI_EXT_RTSI(x) (NI_USUAL_RTSI_SELECT(x) - 1)
-
-/* status bits for INSN_CONFIG_GET_COUNTER_STATUS */
-enum comedi_counter_status_flags {
- COMEDI_COUNTER_ARMED = 0x1,
- COMEDI_COUNTER_COUNTING = 0x2,
- COMEDI_COUNTER_TERMINAL_COUNT = 0x4,
-};
-
-/* Clock sources for CDIO subdevice on NI m-series boards. Used as the
- * scan_begin_arg for a comedi_command. These sources may also be bitwise-or'd
- * with CR_INVERT to change polarity. */
-enum ni_m_series_cdio_scan_begin_src {
- NI_CDIO_SCAN_BEGIN_SRC_GROUND = 0,
- NI_CDIO_SCAN_BEGIN_SRC_AI_START = 18,
- NI_CDIO_SCAN_BEGIN_SRC_AI_CONVERT = 19,
- NI_CDIO_SCAN_BEGIN_SRC_PXI_STAR_TRIGGER = 20,
- NI_CDIO_SCAN_BEGIN_SRC_G0_OUT = 28,
- NI_CDIO_SCAN_BEGIN_SRC_G1_OUT = 29,
- NI_CDIO_SCAN_BEGIN_SRC_ANALOG_TRIGGER = 30,
- NI_CDIO_SCAN_BEGIN_SRC_AO_UPDATE = 31,
- NI_CDIO_SCAN_BEGIN_SRC_FREQ_OUT = 32,
- NI_CDIO_SCAN_BEGIN_SRC_DIO_CHANGE_DETECT_IRQ = 33
-};
-
-#define NI_CDIO_SCAN_BEGIN_SRC_PFI(x) NI_USUAL_PFI_SELECT(x)
-#define NI_CDIO_SCAN_BEGIN_SRC_RTSI(x) NI_USUAL_RTSI_SELECT(x)
-
-/* scan_begin_src for scan_begin_arg==TRIG_EXT with analog output command on NI
- * boards. These scan begin sources can also be bitwise-or'd with CR_INVERT to
- * change polarity. */
-#define NI_AO_SCAN_BEGIN_SRC_PFI(x) NI_USUAL_PFI_SELECT(x)
-#define NI_AO_SCAN_BEGIN_SRC_RTSI(x) NI_USUAL_RTSI_SELECT(x)
-
-/* Bits for setting a clock source with
- * INSN_CONFIG_SET_CLOCK_SRC when using NI frequency output subdevice. */
-enum ni_freq_out_clock_source_bits {
- NI_FREQ_OUT_TIMEBASE_1_DIV_2_CLOCK_SRC, /* 10 MHz */
- NI_FREQ_OUT_TIMEBASE_2_CLOCK_SRC /* 100 KHz */
-};
-
-/* Values for setting a clock source with INSN_CONFIG_SET_CLOCK_SRC for
- * 8254 counter subdevices on Amplicon DIO boards (amplc_dio200 driver). */
-enum amplc_dio_clock_source {
- AMPLC_DIO_CLK_CLKN, /* per channel external clock
- input/output pin (pin is only an
- input when clock source set to this
- value, otherwise it is an output) */
- AMPLC_DIO_CLK_10MHZ, /* 10 MHz internal clock */
- AMPLC_DIO_CLK_1MHZ, /* 1 MHz internal clock */
- AMPLC_DIO_CLK_100KHZ, /* 100 kHz internal clock */
- AMPLC_DIO_CLK_10KHZ, /* 10 kHz internal clock */
- AMPLC_DIO_CLK_1KHZ, /* 1 kHz internal clock */
- AMPLC_DIO_CLK_OUTNM1, /* output of preceding counter channel
- (for channel 0, preceding counter
- channel is channel 2 on preceding
- counter subdevice, for first counter
- subdevice, preceding counter
- subdevice is the last counter
- subdevice) */
- AMPLC_DIO_CLK_EXT, /* per chip external input pin */
- /* the following are "enhanced" clock sources for PCIe models */
- AMPLC_DIO_CLK_VCC, /* clock input HIGH */
- AMPLC_DIO_CLK_GND, /* clock input LOW */
- AMPLC_DIO_CLK_PAT_PRESENT, /* "pattern present" signal */
- AMPLC_DIO_CLK_20MHZ /* 20 MHz internal clock */
-};
-
-/* Values for setting a clock source with INSN_CONFIG_SET_CLOCK_SRC for
- * timer subdevice on some Amplicon DIO PCIe boards (amplc_dio200 driver). */
-enum amplc_dio_ts_clock_src {
- AMPLC_DIO_TS_CLK_1GHZ, /* 1 ns period with 20 ns granularity */
- AMPLC_DIO_TS_CLK_1MHZ, /* 1 us period */
- AMPLC_DIO_TS_CLK_1KHZ /* 1 ms period */
-};
-
-/* Values for setting a gate source with INSN_CONFIG_SET_GATE_SRC for
- * 8254 counter subdevices on Amplicon DIO boards (amplc_dio200 driver). */
-enum amplc_dio_gate_source {
- AMPLC_DIO_GAT_VCC, /* internal high logic level */
- AMPLC_DIO_GAT_GND, /* internal low logic level */
- AMPLC_DIO_GAT_GATN, /* per channel external gate input */
- AMPLC_DIO_GAT_NOUTNM2, /* negated output of counter channel
- minus 2 (for channels 0 or 1,
- channel minus 2 is channel 1 or 2 on
- the preceding counter subdevice, for
- the first counter subdevice the
- preceding counter subdevice is the
- last counter subdevice) */
- AMPLC_DIO_GAT_RESERVED4,
- AMPLC_DIO_GAT_RESERVED5,
- AMPLC_DIO_GAT_RESERVED6,
- AMPLC_DIO_GAT_RESERVED7,
- /* the following are "enhanced" gate sources for PCIe models */
- AMPLC_DIO_GAT_NGATN = 6, /* negated per channel gate input */
- AMPLC_DIO_GAT_OUTNM2, /* non-negated output of counter
- channel minus 2 */
- AMPLC_DIO_GAT_PAT_PRESENT, /* "pattern present" signal */
- AMPLC_DIO_GAT_PAT_OCCURRED, /* "pattern occurred" latched */
- AMPLC_DIO_GAT_PAT_GONE, /* "pattern gone away" latched */
- AMPLC_DIO_GAT_NPAT_PRESENT, /* negated "pattern present" */
- AMPLC_DIO_GAT_NPAT_OCCURRED, /* negated "pattern occurred" */
- AMPLC_DIO_GAT_NPAT_GONE /* negated "pattern gone away" */
-};
-
-/*
- * Values for setting a clock source with INSN_CONFIG_SET_CLOCK_SRC for
- * the counter subdevice on the Kolter Electronic PCI-Counter board
- * (ke_counter driver).
- */
-enum ke_counter_clock_source {
- KE_CLK_20MHZ, /* internal 20MHz (default) */
- KE_CLK_4MHZ, /* internal 4MHz (option) */
- KE_CLK_EXT /* external clock on pin 21 of D-Sub */
-};
-
-#endif /* _COMEDI_H */
diff --git a/drivers/staging/comedi/comedi_buf.c b/drivers/staging/comedi/comedi_buf.c
deleted file mode 100644
index d45a4b65e8d3..000000000000
--- a/drivers/staging/comedi/comedi_buf.c
+++ /dev/null
@@ -1,620 +0,0 @@
-/*
- * comedi_buf.c
- *
- * COMEDI - Linux Control and Measurement Device Interface
- * Copyright (C) 1997-2000 David A. Schleef <ds@schleef.org>
- * Copyright (C) 2002 Frank Mori Hess <fmhess@users.sourceforge.net>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#include <linux/vmalloc.h>
-#include <linux/slab.h>
-
-#include "comedidev.h"
-#include "comedi_internal.h"
-
-#ifdef PAGE_KERNEL_NOCACHE
-#define COMEDI_PAGE_PROTECTION PAGE_KERNEL_NOCACHE
-#else
-#define COMEDI_PAGE_PROTECTION PAGE_KERNEL
-#endif
-
-static void comedi_buf_map_kref_release(struct kref *kref)
-{
- struct comedi_buf_map *bm =
- container_of(kref, struct comedi_buf_map, refcount);
- struct comedi_buf_page *buf;
- unsigned int i;
-
- if (bm->page_list) {
- for (i = 0; i < bm->n_pages; i++) {
- buf = &bm->page_list[i];
- clear_bit(PG_reserved,
- &(virt_to_page(buf->virt_addr)->flags));
- if (bm->dma_dir != DMA_NONE) {
-#ifdef CONFIG_HAS_DMA
- dma_free_coherent(bm->dma_hw_dev,
- PAGE_SIZE,
- buf->virt_addr,
- buf->dma_addr);
-#endif
- } else {
- free_page((unsigned long)buf->virt_addr);
- }
- }
- vfree(bm->page_list);
- }
- if (bm->dma_dir != DMA_NONE)
- put_device(bm->dma_hw_dev);
- kfree(bm);
-}
-
-static void __comedi_buf_free(struct comedi_device *dev,
- struct comedi_subdevice *s)
-{
- struct comedi_async *async = s->async;
- struct comedi_buf_map *bm;
- unsigned long flags;
-
- if (async->prealloc_buf) {
- vunmap(async->prealloc_buf);
- async->prealloc_buf = NULL;
- async->prealloc_bufsz = 0;
- }
-
- spin_lock_irqsave(&s->spin_lock, flags);
- bm = async->buf_map;
- async->buf_map = NULL;
- spin_unlock_irqrestore(&s->spin_lock, flags);
- comedi_buf_map_put(bm);
-}
-
-static void __comedi_buf_alloc(struct comedi_device *dev,
- struct comedi_subdevice *s,
- unsigned n_pages)
-{
- struct comedi_async *async = s->async;
- struct page **pages = NULL;
- struct comedi_buf_map *bm;
- struct comedi_buf_page *buf;
- unsigned long flags;
- unsigned i;
-
- if (!IS_ENABLED(CONFIG_HAS_DMA) && s->async_dma_dir != DMA_NONE) {
- dev_err(dev->class_dev,
- "dma buffer allocation not supported\n");
- return;
- }
-
- bm = kzalloc(sizeof(*async->buf_map), GFP_KERNEL);
- if (!bm)
- return;
-
- kref_init(&bm->refcount);
- spin_lock_irqsave(&s->spin_lock, flags);
- async->buf_map = bm;
- spin_unlock_irqrestore(&s->spin_lock, flags);
- bm->dma_dir = s->async_dma_dir;
- if (bm->dma_dir != DMA_NONE)
- /* Need ref to hardware device to free buffer later. */
- bm->dma_hw_dev = get_device(dev->hw_dev);
-
- bm->page_list = vzalloc(sizeof(*buf) * n_pages);
- if (bm->page_list)
- pages = vmalloc(sizeof(struct page *) * n_pages);
-
- if (!pages)
- return;
-
- for (i = 0; i < n_pages; i++) {
- buf = &bm->page_list[i];
- if (bm->dma_dir != DMA_NONE)
-#ifdef CONFIG_HAS_DMA
- buf->virt_addr = dma_alloc_coherent(bm->dma_hw_dev,
- PAGE_SIZE,
- &buf->dma_addr,
- GFP_KERNEL |
- __GFP_COMP);
-#else
- break;
-#endif
- else
- buf->virt_addr = (void *)get_zeroed_page(GFP_KERNEL);
- if (!buf->virt_addr)
- break;
-
- set_bit(PG_reserved, &(virt_to_page(buf->virt_addr)->flags));
-
- pages[i] = virt_to_page(buf->virt_addr);
- }
- spin_lock_irqsave(&s->spin_lock, flags);
- bm->n_pages = i;
- spin_unlock_irqrestore(&s->spin_lock, flags);
-
- /* vmap the prealloc_buf if all the pages were allocated */
- if (i == n_pages)
- async->prealloc_buf = vmap(pages, n_pages, VM_MAP,
- COMEDI_PAGE_PROTECTION);
-
- vfree(pages);
-}
-
-void comedi_buf_map_get(struct comedi_buf_map *bm)
-{
- if (bm)
- kref_get(&bm->refcount);
-}
-
-int comedi_buf_map_put(struct comedi_buf_map *bm)
-{
- if (bm)
- return kref_put(&bm->refcount, comedi_buf_map_kref_release);
- return 1;
-}
-
-/* returns s->async->buf_map and increments its kref refcount */
-struct comedi_buf_map *
-comedi_buf_map_from_subdev_get(struct comedi_subdevice *s)
-{
- struct comedi_async *async = s->async;
- struct comedi_buf_map *bm = NULL;
- unsigned long flags;
-
- if (!async)
- return NULL;
-
- spin_lock_irqsave(&s->spin_lock, flags);
- bm = async->buf_map;
- /* only want it if buffer pages allocated */
- if (bm && bm->n_pages)
- comedi_buf_map_get(bm);
- else
- bm = NULL;
- spin_unlock_irqrestore(&s->spin_lock, flags);
-
- return bm;
-}
-
-bool comedi_buf_is_mmapped(struct comedi_subdevice *s)
-{
- struct comedi_buf_map *bm = s->async->buf_map;
-
- return bm && (atomic_read(&bm->refcount.refcount) > 1);
-}
-
-int comedi_buf_alloc(struct comedi_device *dev, struct comedi_subdevice *s,
- unsigned long new_size)
-{
- struct comedi_async *async = s->async;
-
- /* Round up new_size to multiple of PAGE_SIZE */
- new_size = (new_size + PAGE_SIZE - 1) & PAGE_MASK;
-
- /* if no change is required, do nothing */
- if (async->prealloc_buf && async->prealloc_bufsz == new_size)
- return 0;
-
- /* deallocate old buffer */
- __comedi_buf_free(dev, s);
-
- /* allocate new buffer */
- if (new_size) {
- unsigned n_pages = new_size >> PAGE_SHIFT;
-
- __comedi_buf_alloc(dev, s, n_pages);
-
- if (!async->prealloc_buf) {
- /* allocation failed */
- __comedi_buf_free(dev, s);
- return -ENOMEM;
- }
- }
- async->prealloc_bufsz = new_size;
-
- return 0;
-}
-
-void comedi_buf_reset(struct comedi_subdevice *s)
-{
- struct comedi_async *async = s->async;
-
- async->buf_write_alloc_count = 0;
- async->buf_write_count = 0;
- async->buf_read_alloc_count = 0;
- async->buf_read_count = 0;
-
- async->buf_write_ptr = 0;
- async->buf_read_ptr = 0;
-
- async->cur_chan = 0;
- async->scans_done = 0;
- async->scan_progress = 0;
- async->munge_chan = 0;
- async->munge_count = 0;
- async->munge_ptr = 0;
-
- async->events = 0;
-}
-
-static unsigned int comedi_buf_write_n_available(struct comedi_subdevice *s)
-{
- struct comedi_async *async = s->async;
- unsigned int free_end = async->buf_read_count + async->prealloc_bufsz;
-
- return free_end - async->buf_write_alloc_count;
-}
-
-/**
- * comedi_buf_write_alloc() - Reserve buffer space for writing
- * @s: COMEDI subdevice.
- * @nbytes: Maximum space to reserve in bytes.
- *
- * Reserve up to @nbytes bytes of space to be written in the COMEDI acquisition
- * data buffer associated with the subdevice. The amount reserved is limited
- * by the space available.
- *
- * Return: The amount of space reserved in bytes.
- */
-unsigned int comedi_buf_write_alloc(struct comedi_subdevice *s,
- unsigned int nbytes)
-{
- struct comedi_async *async = s->async;
- unsigned int available = comedi_buf_write_n_available(s);
-
- if (nbytes > available)
- nbytes = available;
-
- async->buf_write_alloc_count += nbytes;
-
- /*
- * ensure the async buffer 'counts' are read and updated
- * before we write data to the write-alloc'ed buffer space
- */
- smp_mb();
-
- return nbytes;
-}
-EXPORT_SYMBOL_GPL(comedi_buf_write_alloc);
-
-/*
- * munging is applied to data by core as it passes between user
- * and kernel space
- */
-static unsigned int comedi_buf_munge(struct comedi_subdevice *s,
- unsigned int num_bytes)
-{
- struct comedi_async *async = s->async;
- unsigned int count = 0;
- const unsigned num_sample_bytes = comedi_bytes_per_sample(s);
-
- if (!s->munge || (async->cmd.flags & CMDF_RAWDATA)) {
- async->munge_count += num_bytes;
- count = num_bytes;
- } else {
- /* don't munge partial samples */
- num_bytes -= num_bytes % num_sample_bytes;
- while (count < num_bytes) {
- int block_size = num_bytes - count;
- unsigned int buf_end;
-
- buf_end = async->prealloc_bufsz - async->munge_ptr;
- if (block_size > buf_end)
- block_size = buf_end;
-
- s->munge(s->device, s,
- async->prealloc_buf + async->munge_ptr,
- block_size, async->munge_chan);
-
- /*
- * ensure data is munged in buffer before the
- * async buffer munge_count is incremented
- */
- smp_wmb();
-
- async->munge_chan += block_size / num_sample_bytes;
- async->munge_chan %= async->cmd.chanlist_len;
- async->munge_count += block_size;
- async->munge_ptr += block_size;
- async->munge_ptr %= async->prealloc_bufsz;
- count += block_size;
- }
- }
-
- return count;
-}
-
-unsigned int comedi_buf_write_n_allocated(struct comedi_subdevice *s)
-{
- struct comedi_async *async = s->async;
-
- return async->buf_write_alloc_count - async->buf_write_count;
-}
-
-/**
- * comedi_buf_write_free() - Free buffer space after it is written
- * @s: COMEDI subdevice.
- * @nbytes: Maximum space to free in bytes.
- *
- * Free up to @nbytes bytes of space previously reserved for writing in the
- * COMEDI acquisition data buffer associated with the subdevice. The amount of
- * space freed is limited to the amount that was reserved. The freed space is
- * assumed to have been filled with sample data by the writer.
- *
- * If the samples in the freed space need to be "munged", do so here. The
- * freed space becomes available for allocation by the reader.
- *
- * Return: The amount of space freed in bytes.
- */
-unsigned int comedi_buf_write_free(struct comedi_subdevice *s,
- unsigned int nbytes)
-{
- struct comedi_async *async = s->async;
- unsigned int allocated = comedi_buf_write_n_allocated(s);
-
- if (nbytes > allocated)
- nbytes = allocated;
-
- async->buf_write_count += nbytes;
- async->buf_write_ptr += nbytes;
- comedi_buf_munge(s, async->buf_write_count - async->munge_count);
- if (async->buf_write_ptr >= async->prealloc_bufsz)
- async->buf_write_ptr %= async->prealloc_bufsz;
-
- return nbytes;
-}
-EXPORT_SYMBOL_GPL(comedi_buf_write_free);
-
-/**
- * comedi_buf_read_n_available() - Determine amount of readable buffer space
- * @s: COMEDI subdevice.
- *
- * Determine the amount of readable buffer space in the COMEDI acquisition data
- * buffer associated with the subdevice. The readable buffer space is that
- * which has been freed by the writer and "munged" to the sample data format
- * expected by COMEDI if necessary.
- *
- * Return: The amount of readable buffer space.
- */
-unsigned int comedi_buf_read_n_available(struct comedi_subdevice *s)
-{
- struct comedi_async *async = s->async;
- unsigned num_bytes;
-
- if (!async)
- return 0;
-
- num_bytes = async->munge_count - async->buf_read_count;
-
- /*
- * ensure the async buffer 'counts' are read before we
- * attempt to read data from the buffer
- */
- smp_rmb();
-
- return num_bytes;
-}
-EXPORT_SYMBOL_GPL(comedi_buf_read_n_available);
-
-/**
- * comedi_buf_read_alloc() - Reserve buffer space for reading
- * @s: COMEDI subdevice.
- * @nbytes: Maximum space to reserve in bytes.
- *
- * Reserve up to @nbytes bytes of previously written and "munged" buffer space
- * for reading in the COMEDI acquisition data buffer associated with the
- * subdevice. The amount reserved is limited to the space available. The
- * reader can read from the reserved space and then free it. A reader is also
- * allowed to read from the space before reserving it as long as it determines
- * the amount of readable data available, but the space needs to be marked as
- * reserved before it can be freed.
- *
- * Return: The amount of space reserved in bytes.
- */
-unsigned int comedi_buf_read_alloc(struct comedi_subdevice *s,
- unsigned int nbytes)
-{
- struct comedi_async *async = s->async;
- unsigned int available;
-
- available = async->munge_count - async->buf_read_alloc_count;
- if (nbytes > available)
- nbytes = available;
-
- async->buf_read_alloc_count += nbytes;
-
- /*
- * ensure the async buffer 'counts' are read before we
- * attempt to read data from the read-alloc'ed buffer space
- */
- smp_rmb();
-
- return nbytes;
-}
-EXPORT_SYMBOL_GPL(comedi_buf_read_alloc);
-
-static unsigned int comedi_buf_read_n_allocated(struct comedi_async *async)
-{
- return async->buf_read_alloc_count - async->buf_read_count;
-}
-
-/**
- * comedi_buf_read_free() - Free buffer space after it has been read
- * @s: COMEDI subdevice.
- * @nbytes: Maximum space to free in bytes.
- *
- * Free up to @nbytes bytes of buffer space previously reserved for reading in
- * the COMEDI acquisition data buffer associated with the subdevice. The
- * amount of space freed is limited to the amount that was reserved.
- *
- * The freed space becomes available for allocation by the writer.
- *
- * Return: The amount of space freed in bytes.
- */
-unsigned int comedi_buf_read_free(struct comedi_subdevice *s,
- unsigned int nbytes)
-{
- struct comedi_async *async = s->async;
- unsigned int allocated;
-
- /*
- * ensure data has been read out of buffer before
- * the async read count is incremented
- */
- smp_mb();
-
- allocated = comedi_buf_read_n_allocated(async);
- if (nbytes > allocated)
- nbytes = allocated;
-
- async->buf_read_count += nbytes;
- async->buf_read_ptr += nbytes;
- async->buf_read_ptr %= async->prealloc_bufsz;
- return nbytes;
-}
-EXPORT_SYMBOL_GPL(comedi_buf_read_free);
-
-static void comedi_buf_memcpy_to(struct comedi_subdevice *s,
- const void *data, unsigned int num_bytes)
-{
- struct comedi_async *async = s->async;
- unsigned int write_ptr = async->buf_write_ptr;
-
- while (num_bytes) {
- unsigned int block_size;
-
- if (write_ptr + num_bytes > async->prealloc_bufsz)
- block_size = async->prealloc_bufsz - write_ptr;
- else
- block_size = num_bytes;
-
- memcpy(async->prealloc_buf + write_ptr, data, block_size);
-
- data += block_size;
- num_bytes -= block_size;
-
- write_ptr = 0;
- }
-}
-
-static void comedi_buf_memcpy_from(struct comedi_subdevice *s,
- void *dest, unsigned int nbytes)
-{
- void *src;
- struct comedi_async *async = s->async;
- unsigned int read_ptr = async->buf_read_ptr;
-
- while (nbytes) {
- unsigned int block_size;
-
- src = async->prealloc_buf + read_ptr;
-
- if (nbytes >= async->prealloc_bufsz - read_ptr)
- block_size = async->prealloc_bufsz - read_ptr;
- else
- block_size = nbytes;
-
- memcpy(dest, src, block_size);
- nbytes -= block_size;
- dest += block_size;
- read_ptr = 0;
- }
-}
-
-/**
- * comedi_buf_write_samples() - Write sample data to COMEDI buffer
- * @s: COMEDI subdevice.
- * @data: Pointer to source samples.
- * @nsamples: Number of samples to write.
- *
- * Write up to @nsamples samples to the COMEDI acquisition data buffer
- * associated with the subdevice, mark it as written and update the
- * acquisition scan progress. If there is not enough room for the specified
- * number of samples, the number of samples written is limited to the number
- * that will fit and the %COMEDI_CB_OVERFLOW event flag is set to cause the
- * acquisition to terminate with an overrun error. Set the %COMEDI_CB_BLOCK
- * event flag if any samples are written to cause waiting tasks to be woken
- * when the event flags are processed.
- *
- * Return: The amount of data written in bytes.
- */
-unsigned int comedi_buf_write_samples(struct comedi_subdevice *s,
- const void *data, unsigned int nsamples)
-{
- unsigned int max_samples;
- unsigned int nbytes;
-
- /*
- * Make sure there is enough room in the buffer for all the samples.
- * If not, clamp the nsamples to the number that will fit, flag the
- * buffer overrun and add the samples that fit.
- */
- max_samples = comedi_bytes_to_samples(s,
- comedi_buf_write_n_available(s));
- if (nsamples > max_samples) {
- dev_warn(s->device->class_dev, "buffer overrun\n");
- s->async->events |= COMEDI_CB_OVERFLOW;
- nsamples = max_samples;
- }
-
- if (nsamples == 0)
- return 0;
-
- nbytes = comedi_buf_write_alloc(s,
- comedi_samples_to_bytes(s, nsamples));
- comedi_buf_memcpy_to(s, data, nbytes);
- comedi_buf_write_free(s, nbytes);
- comedi_inc_scan_progress(s, nbytes);
- s->async->events |= COMEDI_CB_BLOCK;
-
- return nbytes;
-}
-EXPORT_SYMBOL_GPL(comedi_buf_write_samples);
-
-/**
- * comedi_buf_read_samples() - Read sample data from COMEDI buffer
- * @s: COMEDI subdevice.
- * @data: Pointer to destination.
- * @nsamples: Maximum number of samples to read.
- *
- * Read up to @nsamples samples from the COMEDI acquisition data buffer
- * associated with the subdevice, mark it as read and update the acquisition
- * scan progress. Limit the number of samples read to the number available.
- * Set the %COMEDI_CB_BLOCK event flag if any samples are read to cause waiting
- * tasks to be woken when the event flags are processed.
- *
- * Return: The amount of data read in bytes.
- */
-unsigned int comedi_buf_read_samples(struct comedi_subdevice *s,
- void *data, unsigned int nsamples)
-{
- unsigned int max_samples;
- unsigned int nbytes;
-
- /* clamp nsamples to the number of full samples available */
- max_samples = comedi_bytes_to_samples(s,
- comedi_buf_read_n_available(s));
- if (nsamples > max_samples)
- nsamples = max_samples;
-
- if (nsamples == 0)
- return 0;
-
- nbytes = comedi_buf_read_alloc(s,
- comedi_samples_to_bytes(s, nsamples));
- comedi_buf_memcpy_from(s, data, nbytes);
- comedi_buf_read_free(s, nbytes);
- comedi_inc_scan_progress(s, nbytes);
- s->async->events |= COMEDI_CB_BLOCK;
-
- return nbytes;
-}
-EXPORT_SYMBOL_GPL(comedi_buf_read_samples);
diff --git a/drivers/staging/comedi/comedi_compat32.c b/drivers/staging/comedi/comedi_compat32.c
deleted file mode 100644
index f356386d833a..000000000000
--- a/drivers/staging/comedi/comedi_compat32.c
+++ /dev/null
@@ -1,464 +0,0 @@
-/*
- * comedi/comedi_compat32.c
- * 32-bit ioctl compatibility for 64-bit comedi kernel module.
- *
- * Author: Ian Abbott, MEV Ltd. <abbotti@mev.co.uk>
- * Copyright (C) 2007 MEV Ltd. <http://www.mev.co.uk/>
- *
- * COMEDI - Linux Control and Measurement Device Interface
- * Copyright (C) 1997-2007 David A. Schleef <ds@schleef.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#include <linux/uaccess.h>
-#include <linux/compat.h>
-#include <linux/fs.h>
-#include "comedi.h"
-#include "comedi_compat32.h"
-
-#define COMEDI32_CHANINFO _IOR(CIO, 3, struct comedi32_chaninfo_struct)
-#define COMEDI32_RANGEINFO _IOR(CIO, 8, struct comedi32_rangeinfo_struct)
-/*
- * N.B. COMEDI32_CMD and COMEDI_CMD ought to use _IOWR, not _IOR.
- * It's too late to change it now, but it only affects the command number.
- */
-#define COMEDI32_CMD _IOR(CIO, 9, struct comedi32_cmd_struct)
-/*
- * N.B. COMEDI32_CMDTEST and COMEDI_CMDTEST ought to use _IOWR, not _IOR.
- * It's too late to change it now, but it only affects the command number.
- */
-#define COMEDI32_CMDTEST _IOR(CIO, 10, struct comedi32_cmd_struct)
-#define COMEDI32_INSNLIST _IOR(CIO, 11, struct comedi32_insnlist_struct)
-#define COMEDI32_INSN _IOR(CIO, 12, struct comedi32_insn_struct)
-
-struct comedi32_chaninfo_struct {
- unsigned int subdev;
- compat_uptr_t maxdata_list; /* 32-bit 'unsigned int *' */
- compat_uptr_t flaglist; /* 32-bit 'unsigned int *' */
- compat_uptr_t rangelist; /* 32-bit 'unsigned int *' */
- unsigned int unused[4];
-};
-
-struct comedi32_rangeinfo_struct {
- unsigned int range_type;
- compat_uptr_t range_ptr; /* 32-bit 'void *' */
-};
-
-struct comedi32_cmd_struct {
- unsigned int subdev;
- unsigned int flags;
- unsigned int start_src;
- unsigned int start_arg;
- unsigned int scan_begin_src;
- unsigned int scan_begin_arg;
- unsigned int convert_src;
- unsigned int convert_arg;
- unsigned int scan_end_src;
- unsigned int scan_end_arg;
- unsigned int stop_src;
- unsigned int stop_arg;
- compat_uptr_t chanlist; /* 32-bit 'unsigned int *' */
- unsigned int chanlist_len;
- compat_uptr_t data; /* 32-bit 'short *' */
- unsigned int data_len;
-};
-
-struct comedi32_insn_struct {
- unsigned int insn;
- unsigned int n;
- compat_uptr_t data; /* 32-bit 'unsigned int *' */
- unsigned int subdev;
- unsigned int chanspec;
- unsigned int unused[3];
-};
-
-struct comedi32_insnlist_struct {
- unsigned int n_insns;
- compat_uptr_t insns; /* 32-bit 'struct comedi_insn *' */
-};
-
-/* Handle translated ioctl. */
-static int translated_ioctl(struct file *file, unsigned int cmd,
- unsigned long arg)
-{
- if (file->f_op->unlocked_ioctl)
- return file->f_op->unlocked_ioctl(file, cmd, arg);
-
- return -ENOTTY;
-}
-
-/* Handle 32-bit COMEDI_CHANINFO ioctl. */
-static int compat_chaninfo(struct file *file, unsigned long arg)
-{
- struct comedi_chaninfo __user *chaninfo;
- struct comedi32_chaninfo_struct __user *chaninfo32;
- int err;
- union {
- unsigned int uint;
- compat_uptr_t uptr;
- } temp;
-
- chaninfo32 = compat_ptr(arg);
- chaninfo = compat_alloc_user_space(sizeof(*chaninfo));
-
- /* Copy chaninfo structure. Ignore unused members. */
- if (!access_ok(VERIFY_READ, chaninfo32, sizeof(*chaninfo32)) ||
- !access_ok(VERIFY_WRITE, chaninfo, sizeof(*chaninfo)))
- return -EFAULT;
-
- err = 0;
- err |= __get_user(temp.uint, &chaninfo32->subdev);
- err |= __put_user(temp.uint, &chaninfo->subdev);
- err |= __get_user(temp.uptr, &chaninfo32->maxdata_list);
- err |= __put_user(compat_ptr(temp.uptr), &chaninfo->maxdata_list);
- err |= __get_user(temp.uptr, &chaninfo32->flaglist);
- err |= __put_user(compat_ptr(temp.uptr), &chaninfo->flaglist);
- err |= __get_user(temp.uptr, &chaninfo32->rangelist);
- err |= __put_user(compat_ptr(temp.uptr), &chaninfo->rangelist);
- if (err)
- return -EFAULT;
-
- return translated_ioctl(file, COMEDI_CHANINFO, (unsigned long)chaninfo);
-}
-
-/* Handle 32-bit COMEDI_RANGEINFO ioctl. */
-static int compat_rangeinfo(struct file *file, unsigned long arg)
-{
- struct comedi_rangeinfo __user *rangeinfo;
- struct comedi32_rangeinfo_struct __user *rangeinfo32;
- int err;
- union {
- unsigned int uint;
- compat_uptr_t uptr;
- } temp;
-
- rangeinfo32 = compat_ptr(arg);
- rangeinfo = compat_alloc_user_space(sizeof(*rangeinfo));
-
- /* Copy rangeinfo structure. */
- if (!access_ok(VERIFY_READ, rangeinfo32, sizeof(*rangeinfo32)) ||
- !access_ok(VERIFY_WRITE, rangeinfo, sizeof(*rangeinfo)))
- return -EFAULT;
-
- err = 0;
- err |= __get_user(temp.uint, &rangeinfo32->range_type);
- err |= __put_user(temp.uint, &rangeinfo->range_type);
- err |= __get_user(temp.uptr, &rangeinfo32->range_ptr);
- err |= __put_user(compat_ptr(temp.uptr), &rangeinfo->range_ptr);
- if (err)
- return -EFAULT;
-
- return translated_ioctl(file, COMEDI_RANGEINFO,
- (unsigned long)rangeinfo);
-}
-
-/* Copy 32-bit cmd structure to native cmd structure. */
-static int get_compat_cmd(struct comedi_cmd __user *cmd,
- struct comedi32_cmd_struct __user *cmd32)
-{
- int err;
- union {
- unsigned int uint;
- compat_uptr_t uptr;
- } temp;
-
- /* Copy cmd structure. */
- if (!access_ok(VERIFY_READ, cmd32, sizeof(*cmd32)) ||
- !access_ok(VERIFY_WRITE, cmd, sizeof(*cmd)))
- return -EFAULT;
-
- err = 0;
- err |= __get_user(temp.uint, &cmd32->subdev);
- err |= __put_user(temp.uint, &cmd->subdev);
- err |= __get_user(temp.uint, &cmd32->flags);
- err |= __put_user(temp.uint, &cmd->flags);
- err |= __get_user(temp.uint, &cmd32->start_src);
- err |= __put_user(temp.uint, &cmd->start_src);
- err |= __get_user(temp.uint, &cmd32->start_arg);
- err |= __put_user(temp.uint, &cmd->start_arg);
- err |= __get_user(temp.uint, &cmd32->scan_begin_src);
- err |= __put_user(temp.uint, &cmd->scan_begin_src);
- err |= __get_user(temp.uint, &cmd32->scan_begin_arg);
- err |= __put_user(temp.uint, &cmd->scan_begin_arg);
- err |= __get_user(temp.uint, &cmd32->convert_src);
- err |= __put_user(temp.uint, &cmd->convert_src);
- err |= __get_user(temp.uint, &cmd32->convert_arg);
- err |= __put_user(temp.uint, &cmd->convert_arg);
- err |= __get_user(temp.uint, &cmd32->scan_end_src);
- err |= __put_user(temp.uint, &cmd->scan_end_src);
- err |= __get_user(temp.uint, &cmd32->scan_end_arg);
- err |= __put_user(temp.uint, &cmd->scan_end_arg);
- err |= __get_user(temp.uint, &cmd32->stop_src);
- err |= __put_user(temp.uint, &cmd->stop_src);
- err |= __get_user(temp.uint, &cmd32->stop_arg);
- err |= __put_user(temp.uint, &cmd->stop_arg);
- err |= __get_user(temp.uptr, &cmd32->chanlist);
- err |= __put_user((unsigned int __force *)compat_ptr(temp.uptr),
- &cmd->chanlist);
- err |= __get_user(temp.uint, &cmd32->chanlist_len);
- err |= __put_user(temp.uint, &cmd->chanlist_len);
- err |= __get_user(temp.uptr, &cmd32->data);
- err |= __put_user(compat_ptr(temp.uptr), &cmd->data);
- err |= __get_user(temp.uint, &cmd32->data_len);
- err |= __put_user(temp.uint, &cmd->data_len);
- return err ? -EFAULT : 0;
-}
-
-/* Copy native cmd structure to 32-bit cmd structure. */
-static int put_compat_cmd(struct comedi32_cmd_struct __user *cmd32,
- struct comedi_cmd __user *cmd)
-{
- int err;
- unsigned int temp;
-
- /*
- * Copy back most of cmd structure.
- *
- * Assume the pointer values are already valid.
- * (Could use ptr_to_compat() to set them.)
- */
- if (!access_ok(VERIFY_READ, cmd, sizeof(*cmd)) ||
- !access_ok(VERIFY_WRITE, cmd32, sizeof(*cmd32)))
- return -EFAULT;
-
- err = 0;
- err |= __get_user(temp, &cmd->subdev);
- err |= __put_user(temp, &cmd32->subdev);
- err |= __get_user(temp, &cmd->flags);
- err |= __put_user(temp, &cmd32->flags);
- err |= __get_user(temp, &cmd->start_src);
- err |= __put_user(temp, &cmd32->start_src);
- err |= __get_user(temp, &cmd->start_arg);
- err |= __put_user(temp, &cmd32->start_arg);
- err |= __get_user(temp, &cmd->scan_begin_src);
- err |= __put_user(temp, &cmd32->scan_begin_src);
- err |= __get_user(temp, &cmd->scan_begin_arg);
- err |= __put_user(temp, &cmd32->scan_begin_arg);
- err |= __get_user(temp, &cmd->convert_src);
- err |= __put_user(temp, &cmd32->convert_src);
- err |= __get_user(temp, &cmd->convert_arg);
- err |= __put_user(temp, &cmd32->convert_arg);
- err |= __get_user(temp, &cmd->scan_end_src);
- err |= __put_user(temp, &cmd32->scan_end_src);
- err |= __get_user(temp, &cmd->scan_end_arg);
- err |= __put_user(temp, &cmd32->scan_end_arg);
- err |= __get_user(temp, &cmd->stop_src);
- err |= __put_user(temp, &cmd32->stop_src);
- err |= __get_user(temp, &cmd->stop_arg);
- err |= __put_user(temp, &cmd32->stop_arg);
- /* Assume chanlist pointer is unchanged. */
- err |= __get_user(temp, &cmd->chanlist_len);
- err |= __put_user(temp, &cmd32->chanlist_len);
- /* Assume data pointer is unchanged. */
- err |= __get_user(temp, &cmd->data_len);
- err |= __put_user(temp, &cmd32->data_len);
- return err ? -EFAULT : 0;
-}
-
-/* Handle 32-bit COMEDI_CMD ioctl. */
-static int compat_cmd(struct file *file, unsigned long arg)
-{
- struct comedi_cmd __user *cmd;
- struct comedi32_cmd_struct __user *cmd32;
- int rc, err;
-
- cmd32 = compat_ptr(arg);
- cmd = compat_alloc_user_space(sizeof(*cmd));
-
- rc = get_compat_cmd(cmd, cmd32);
- if (rc)
- return rc;
-
- rc = translated_ioctl(file, COMEDI_CMD, (unsigned long)cmd);
- if (rc == -EAGAIN) {
- /* Special case: copy cmd back to user. */
- err = put_compat_cmd(cmd32, cmd);
- if (err)
- rc = err;
- }
-
- return rc;
-}
-
-/* Handle 32-bit COMEDI_CMDTEST ioctl. */
-static int compat_cmdtest(struct file *file, unsigned long arg)
-{
- struct comedi_cmd __user *cmd;
- struct comedi32_cmd_struct __user *cmd32;
- int rc, err;
-
- cmd32 = compat_ptr(arg);
- cmd = compat_alloc_user_space(sizeof(*cmd));
-
- rc = get_compat_cmd(cmd, cmd32);
- if (rc)
- return rc;
-
- rc = translated_ioctl(file, COMEDI_CMDTEST, (unsigned long)cmd);
- if (rc < 0)
- return rc;
-
- err = put_compat_cmd(cmd32, cmd);
- if (err)
- rc = err;
-
- return rc;
-}
-
-/* Copy 32-bit insn structure to native insn structure. */
-static int get_compat_insn(struct comedi_insn __user *insn,
- struct comedi32_insn_struct __user *insn32)
-{
- int err;
- union {
- unsigned int uint;
- compat_uptr_t uptr;
- } temp;
-
- /* Copy insn structure. Ignore the unused members. */
- err = 0;
- if (!access_ok(VERIFY_READ, insn32, sizeof(*insn32)) ||
- !access_ok(VERIFY_WRITE, insn, sizeof(*insn)))
- return -EFAULT;
-
- err |= __get_user(temp.uint, &insn32->insn);
- err |= __put_user(temp.uint, &insn->insn);
- err |= __get_user(temp.uint, &insn32->n);
- err |= __put_user(temp.uint, &insn->n);
- err |= __get_user(temp.uptr, &insn32->data);
- err |= __put_user(compat_ptr(temp.uptr), &insn->data);
- err |= __get_user(temp.uint, &insn32->subdev);
- err |= __put_user(temp.uint, &insn->subdev);
- err |= __get_user(temp.uint, &insn32->chanspec);
- err |= __put_user(temp.uint, &insn->chanspec);
- return err ? -EFAULT : 0;
-}
-
-/* Handle 32-bit COMEDI_INSNLIST ioctl. */
-static int compat_insnlist(struct file *file, unsigned long arg)
-{
- struct combined_insnlist {
- struct comedi_insnlist insnlist;
- struct comedi_insn insn[1];
- } __user *s;
- struct comedi32_insnlist_struct __user *insnlist32;
- struct comedi32_insn_struct __user *insn32;
- compat_uptr_t uptr;
- unsigned int n_insns, n;
- int err, rc;
-
- insnlist32 = compat_ptr(arg);
-
- /* Get 32-bit insnlist structure. */
- if (!access_ok(VERIFY_READ, insnlist32, sizeof(*insnlist32)))
- return -EFAULT;
-
- err = 0;
- err |= __get_user(n_insns, &insnlist32->n_insns);
- err |= __get_user(uptr, &insnlist32->insns);
- insn32 = compat_ptr(uptr);
- if (err)
- return -EFAULT;
-
- /* Allocate user memory to copy insnlist and insns into. */
- s = compat_alloc_user_space(offsetof(struct combined_insnlist,
- insn[n_insns]));
-
- /* Set native insnlist structure. */
- if (!access_ok(VERIFY_WRITE, &s->insnlist, sizeof(s->insnlist)))
- return -EFAULT;
-
- err |= __put_user(n_insns, &s->insnlist.n_insns);
- err |= __put_user(&s->insn[0], &s->insnlist.insns);
- if (err)
- return -EFAULT;
-
- /* Copy insn structures. */
- for (n = 0; n < n_insns; n++) {
- rc = get_compat_insn(&s->insn[n], &insn32[n]);
- if (rc)
- return rc;
- }
-
- return translated_ioctl(file, COMEDI_INSNLIST,
- (unsigned long)&s->insnlist);
-}
-
-/* Handle 32-bit COMEDI_INSN ioctl. */
-static int compat_insn(struct file *file, unsigned long arg)
-{
- struct comedi_insn __user *insn;
- struct comedi32_insn_struct __user *insn32;
- int rc;
-
- insn32 = compat_ptr(arg);
- insn = compat_alloc_user_space(sizeof(*insn));
-
- rc = get_compat_insn(insn, insn32);
- if (rc)
- return rc;
-
- return translated_ioctl(file, COMEDI_INSN, (unsigned long)insn);
-}
-
-/*
- * compat_ioctl file operation.
- *
- * Returns -ENOIOCTLCMD for unrecognised ioctl codes.
- */
-long comedi_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
-{
- int rc;
-
- switch (cmd) {
- case COMEDI_DEVCONFIG:
- case COMEDI_DEVINFO:
- case COMEDI_SUBDINFO:
- case COMEDI_BUFCONFIG:
- case COMEDI_BUFINFO:
- /* Just need to translate the pointer argument. */
- arg = (unsigned long)compat_ptr(arg);
- rc = translated_ioctl(file, cmd, arg);
- break;
- case COMEDI_LOCK:
- case COMEDI_UNLOCK:
- case COMEDI_CANCEL:
- case COMEDI_POLL:
- case COMEDI_SETRSUBD:
- case COMEDI_SETWSUBD:
- /* No translation needed. */
- rc = translated_ioctl(file, cmd, arg);
- break;
- case COMEDI32_CHANINFO:
- rc = compat_chaninfo(file, arg);
- break;
- case COMEDI32_RANGEINFO:
- rc = compat_rangeinfo(file, arg);
- break;
- case COMEDI32_CMD:
- rc = compat_cmd(file, arg);
- break;
- case COMEDI32_CMDTEST:
- rc = compat_cmdtest(file, arg);
- break;
- case COMEDI32_INSNLIST:
- rc = compat_insnlist(file, arg);
- break;
- case COMEDI32_INSN:
- rc = compat_insn(file, arg);
- break;
- default:
- rc = -ENOIOCTLCMD;
- break;
- }
- return rc;
-}
diff --git a/drivers/staging/comedi/comedi_compat32.h b/drivers/staging/comedi/comedi_compat32.h
deleted file mode 100644
index 5ce77f3e8c22..000000000000
--- a/drivers/staging/comedi/comedi_compat32.h
+++ /dev/null
@@ -1,36 +0,0 @@
-/*
- * comedi/comedi_compat32.h
- * 32-bit ioctl compatibility for 64-bit comedi kernel module.
- *
- * Author: Ian Abbott, MEV Ltd. <abbotti@mev.co.uk>
- * Copyright (C) 2007 MEV Ltd. <http://www.mev.co.uk/>
- *
- * COMEDI - Linux Control and Measurement Device Interface
- * Copyright (C) 1997-2007 David A. Schleef <ds@schleef.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#ifndef _COMEDI_COMPAT32_H
-#define _COMEDI_COMPAT32_H
-
-#ifdef CONFIG_COMPAT
-
-struct file;
-long comedi_compat_ioctl(struct file *, unsigned int cmd, unsigned long arg);
-
-#else /* CONFIG_COMPAT */
-
-#define comedi_compat_ioctl NULL
-
-#endif /* CONFIG_COMPAT */
-
-#endif /* _COMEDI_COMPAT32_H */
diff --git a/drivers/staging/comedi/comedi_fops.c b/drivers/staging/comedi/comedi_fops.c
deleted file mode 100644
index ef4b58b2f7ef..000000000000
--- a/drivers/staging/comedi/comedi_fops.c
+++ /dev/null
@@ -1,2943 +0,0 @@
-/*
- * comedi/comedi_fops.c
- * comedi kernel module
- *
- * COMEDI - Linux Control and Measurement Device Interface
- * Copyright (C) 1997-2000 David A. Schleef <ds@schleef.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include "comedi_compat32.h"
-
-#include <linux/module.h>
-#include <linux/errno.h>
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/fcntl.h>
-#include <linux/delay.h>
-#include <linux/mm.h>
-#include <linux/slab.h>
-#include <linux/kmod.h>
-#include <linux/poll.h>
-#include <linux/init.h>
-#include <linux/device.h>
-#include <linux/vmalloc.h>
-#include <linux/fs.h>
-#include "comedidev.h"
-#include <linux/cdev.h>
-#include <linux/stat.h>
-
-#include <linux/io.h>
-#include <linux/uaccess.h>
-
-#include "comedi_internal.h"
-
-/*
- * comedi_subdevice "runflags"
- * COMEDI_SRF_RT: DEPRECATED: command is running real-time
- * COMEDI_SRF_ERROR: indicates an COMEDI_CB_ERROR event has occurred
- * since the last command was started
- * COMEDI_SRF_RUNNING: command is running
- * COMEDI_SRF_FREE_SPRIV: free s->private on detach
- *
- * COMEDI_SRF_BUSY_MASK: runflags that indicate the subdevice is "busy"
- */
-#define COMEDI_SRF_RT BIT(1)
-#define COMEDI_SRF_ERROR BIT(2)
-#define COMEDI_SRF_RUNNING BIT(27)
-#define COMEDI_SRF_FREE_SPRIV BIT(31)
-
-#define COMEDI_SRF_BUSY_MASK (COMEDI_SRF_ERROR | COMEDI_SRF_RUNNING)
-
-/**
- * struct comedi_file - Per-file private data for COMEDI device
- * @dev: COMEDI device.
- * @read_subdev: Current "read" subdevice.
- * @write_subdev: Current "write" subdevice.
- * @last_detach_count: Last known detach count.
- * @last_attached: Last known attached/detached state.
- */
-struct comedi_file {
- struct comedi_device *dev;
- struct comedi_subdevice *read_subdev;
- struct comedi_subdevice *write_subdev;
- unsigned int last_detach_count;
- bool last_attached:1;
-};
-
-#define COMEDI_NUM_MINORS 0x100
-#define COMEDI_NUM_SUBDEVICE_MINORS \
- (COMEDI_NUM_MINORS - COMEDI_NUM_BOARD_MINORS)
-
-static int comedi_num_legacy_minors;
-module_param(comedi_num_legacy_minors, int, S_IRUGO);
-MODULE_PARM_DESC(comedi_num_legacy_minors,
- "number of comedi minor devices to reserve for non-auto-configured devices (default 0)"
- );
-
-unsigned int comedi_default_buf_size_kb = CONFIG_COMEDI_DEFAULT_BUF_SIZE_KB;
-module_param(comedi_default_buf_size_kb, uint, S_IRUGO | S_IWUSR);
-MODULE_PARM_DESC(comedi_default_buf_size_kb,
- "default asynchronous buffer size in KiB (default "
- __MODULE_STRING(CONFIG_COMEDI_DEFAULT_BUF_SIZE_KB) ")");
-
-unsigned int comedi_default_buf_maxsize_kb
- = CONFIG_COMEDI_DEFAULT_BUF_MAXSIZE_KB;
-module_param(comedi_default_buf_maxsize_kb, uint, S_IRUGO | S_IWUSR);
-MODULE_PARM_DESC(comedi_default_buf_maxsize_kb,
- "default maximum size of asynchronous buffer in KiB (default "
- __MODULE_STRING(CONFIG_COMEDI_DEFAULT_BUF_MAXSIZE_KB) ")");
-
-static DEFINE_MUTEX(comedi_board_minor_table_lock);
-static struct comedi_device
-*comedi_board_minor_table[COMEDI_NUM_BOARD_MINORS];
-
-static DEFINE_MUTEX(comedi_subdevice_minor_table_lock);
-/* Note: indexed by minor - COMEDI_NUM_BOARD_MINORS. */
-static struct comedi_subdevice
-*comedi_subdevice_minor_table[COMEDI_NUM_SUBDEVICE_MINORS];
-
-static struct class *comedi_class;
-static struct cdev comedi_cdev;
-
-static void comedi_device_init(struct comedi_device *dev)
-{
- kref_init(&dev->refcount);
- spin_lock_init(&dev->spinlock);
- mutex_init(&dev->mutex);
- init_rwsem(&dev->attach_lock);
- dev->minor = -1;
-}
-
-static void comedi_dev_kref_release(struct kref *kref)
-{
- struct comedi_device *dev =
- container_of(kref, struct comedi_device, refcount);
-
- mutex_destroy(&dev->mutex);
- put_device(dev->class_dev);
- kfree(dev);
-}
-
-/**
- * comedi_dev_put() - Release a use of a COMEDI device
- * @dev: COMEDI device.
- *
- * Must be called when a user of a COMEDI device is finished with it.
- * When the last user of the COMEDI device calls this function, the
- * COMEDI device is destroyed.
- *
- * Return: 1 if the COMEDI device is destroyed by this call or @dev is
- * NULL, otherwise return 0. Callers must not assume the COMEDI
- * device is still valid if this function returns 0.
- */
-int comedi_dev_put(struct comedi_device *dev)
-{
- if (dev)
- return kref_put(&dev->refcount, comedi_dev_kref_release);
- return 1;
-}
-EXPORT_SYMBOL_GPL(comedi_dev_put);
-
-static struct comedi_device *comedi_dev_get(struct comedi_device *dev)
-{
- if (dev)
- kref_get(&dev->refcount);
- return dev;
-}
-
-static void comedi_device_cleanup(struct comedi_device *dev)
-{
- struct module *driver_module = NULL;
-
- if (!dev)
- return;
- mutex_lock(&dev->mutex);
- if (dev->attached)
- driver_module = dev->driver->module;
- comedi_device_detach(dev);
- if (driver_module && dev->use_count)
- module_put(driver_module);
- mutex_unlock(&dev->mutex);
-}
-
-static bool comedi_clear_board_dev(struct comedi_device *dev)
-{
- unsigned int i = dev->minor;
- bool cleared = false;
-
- mutex_lock(&comedi_board_minor_table_lock);
- if (dev == comedi_board_minor_table[i]) {
- comedi_board_minor_table[i] = NULL;
- cleared = true;
- }
- mutex_unlock(&comedi_board_minor_table_lock);
- return cleared;
-}
-
-static struct comedi_device *comedi_clear_board_minor(unsigned minor)
-{
- struct comedi_device *dev;
-
- mutex_lock(&comedi_board_minor_table_lock);
- dev = comedi_board_minor_table[minor];
- comedi_board_minor_table[minor] = NULL;
- mutex_unlock(&comedi_board_minor_table_lock);
- return dev;
-}
-
-static void comedi_free_board_dev(struct comedi_device *dev)
-{
- if (dev) {
- comedi_device_cleanup(dev);
- if (dev->class_dev) {
- device_destroy(comedi_class,
- MKDEV(COMEDI_MAJOR, dev->minor));
- }
- comedi_dev_put(dev);
- }
-}
-
-static struct comedi_subdevice
-*comedi_subdevice_from_minor(const struct comedi_device *dev, unsigned minor)
-{
- struct comedi_subdevice *s;
- unsigned int i = minor - COMEDI_NUM_BOARD_MINORS;
-
- mutex_lock(&comedi_subdevice_minor_table_lock);
- s = comedi_subdevice_minor_table[i];
- if (s && s->device != dev)
- s = NULL;
- mutex_unlock(&comedi_subdevice_minor_table_lock);
- return s;
-}
-
-static struct comedi_device *comedi_dev_get_from_board_minor(unsigned minor)
-{
- struct comedi_device *dev;
-
- mutex_lock(&comedi_board_minor_table_lock);
- dev = comedi_dev_get(comedi_board_minor_table[minor]);
- mutex_unlock(&comedi_board_minor_table_lock);
- return dev;
-}
-
-static struct comedi_device *comedi_dev_get_from_subdevice_minor(unsigned minor)
-{
- struct comedi_device *dev;
- struct comedi_subdevice *s;
- unsigned int i = minor - COMEDI_NUM_BOARD_MINORS;
-
- mutex_lock(&comedi_subdevice_minor_table_lock);
- s = comedi_subdevice_minor_table[i];
- dev = comedi_dev_get(s ? s->device : NULL);
- mutex_unlock(&comedi_subdevice_minor_table_lock);
- return dev;
-}
-
-/**
- * comedi_dev_get_from_minor() - Get COMEDI device by minor device number
- * @minor: Minor device number.
- *
- * Finds the COMEDI device associated with the minor device number, if any,
- * and increments its reference count. The COMEDI device is prevented from
- * being freed until a matching call is made to comedi_dev_put().
- *
- * Return: A pointer to the COMEDI device if it exists, with its usage
- * reference incremented. Return NULL if no COMEDI device exists with the
- * specified minor device number.
- */
-struct comedi_device *comedi_dev_get_from_minor(unsigned minor)
-{
- if (minor < COMEDI_NUM_BOARD_MINORS)
- return comedi_dev_get_from_board_minor(minor);
-
- return comedi_dev_get_from_subdevice_minor(minor);
-}
-EXPORT_SYMBOL_GPL(comedi_dev_get_from_minor);
-
-static struct comedi_subdevice *
-comedi_read_subdevice(const struct comedi_device *dev, unsigned int minor)
-{
- struct comedi_subdevice *s;
-
- if (minor >= COMEDI_NUM_BOARD_MINORS) {
- s = comedi_subdevice_from_minor(dev, minor);
- if (!s || (s->subdev_flags & SDF_CMD_READ))
- return s;
- }
- return dev->read_subdev;
-}
-
-static struct comedi_subdevice *
-comedi_write_subdevice(const struct comedi_device *dev, unsigned int minor)
-{
- struct comedi_subdevice *s;
-
- if (minor >= COMEDI_NUM_BOARD_MINORS) {
- s = comedi_subdevice_from_minor(dev, minor);
- if (!s || (s->subdev_flags & SDF_CMD_WRITE))
- return s;
- }
- return dev->write_subdev;
-}
-
-static void comedi_file_reset(struct file *file)
-{
- struct comedi_file *cfp = file->private_data;
- struct comedi_device *dev = cfp->dev;
- struct comedi_subdevice *s, *read_s, *write_s;
- unsigned int minor = iminor(file_inode(file));
-
- read_s = dev->read_subdev;
- write_s = dev->write_subdev;
- if (minor >= COMEDI_NUM_BOARD_MINORS) {
- s = comedi_subdevice_from_minor(dev, minor);
- if (!s || s->subdev_flags & SDF_CMD_READ)
- read_s = s;
- if (!s || s->subdev_flags & SDF_CMD_WRITE)
- write_s = s;
- }
- cfp->last_attached = dev->attached;
- cfp->last_detach_count = dev->detach_count;
- ACCESS_ONCE(cfp->read_subdev) = read_s;
- ACCESS_ONCE(cfp->write_subdev) = write_s;
-}
-
-static void comedi_file_check(struct file *file)
-{
- struct comedi_file *cfp = file->private_data;
- struct comedi_device *dev = cfp->dev;
-
- if (cfp->last_attached != dev->attached ||
- cfp->last_detach_count != dev->detach_count)
- comedi_file_reset(file);
-}
-
-static struct comedi_subdevice *comedi_file_read_subdevice(struct file *file)
-{
- struct comedi_file *cfp = file->private_data;
-
- comedi_file_check(file);
- return ACCESS_ONCE(cfp->read_subdev);
-}
-
-static struct comedi_subdevice *comedi_file_write_subdevice(struct file *file)
-{
- struct comedi_file *cfp = file->private_data;
-
- comedi_file_check(file);
- return ACCESS_ONCE(cfp->write_subdev);
-}
-
-static int resize_async_buffer(struct comedi_device *dev,
- struct comedi_subdevice *s, unsigned new_size)
-{
- struct comedi_async *async = s->async;
- int retval;
-
- if (new_size > async->max_bufsize)
- return -EPERM;
-
- if (s->busy) {
- dev_dbg(dev->class_dev,
- "subdevice is busy, cannot resize buffer\n");
- return -EBUSY;
- }
- if (comedi_buf_is_mmapped(s)) {
- dev_dbg(dev->class_dev,
- "subdevice is mmapped, cannot resize buffer\n");
- return -EBUSY;
- }
-
- /* make sure buffer is an integral number of pages (we round up) */
- new_size = (new_size + PAGE_SIZE - 1) & PAGE_MASK;
-
- retval = comedi_buf_alloc(dev, s, new_size);
- if (retval < 0)
- return retval;
-
- if (s->buf_change) {
- retval = s->buf_change(dev, s);
- if (retval < 0)
- return retval;
- }
-
- dev_dbg(dev->class_dev, "subd %d buffer resized to %i bytes\n",
- s->index, async->prealloc_bufsz);
- return 0;
-}
-
-/* sysfs attribute files */
-
-static ssize_t max_read_buffer_kb_show(struct device *csdev,
- struct device_attribute *attr, char *buf)
-{
- unsigned int minor = MINOR(csdev->devt);
- struct comedi_device *dev;
- struct comedi_subdevice *s;
- unsigned int size = 0;
-
- dev = comedi_dev_get_from_minor(minor);
- if (!dev)
- return -ENODEV;
-
- mutex_lock(&dev->mutex);
- s = comedi_read_subdevice(dev, minor);
- if (s && (s->subdev_flags & SDF_CMD_READ) && s->async)
- size = s->async->max_bufsize / 1024;
- mutex_unlock(&dev->mutex);
-
- comedi_dev_put(dev);
- return snprintf(buf, PAGE_SIZE, "%u\n", size);
-}
-
-static ssize_t max_read_buffer_kb_store(struct device *csdev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- unsigned int minor = MINOR(csdev->devt);
- struct comedi_device *dev;
- struct comedi_subdevice *s;
- unsigned int size;
- int err;
-
- err = kstrtouint(buf, 10, &size);
- if (err)
- return err;
- if (size > (UINT_MAX / 1024))
- return -EINVAL;
- size *= 1024;
-
- dev = comedi_dev_get_from_minor(minor);
- if (!dev)
- return -ENODEV;
-
- mutex_lock(&dev->mutex);
- s = comedi_read_subdevice(dev, minor);
- if (s && (s->subdev_flags & SDF_CMD_READ) && s->async)
- s->async->max_bufsize = size;
- else
- err = -EINVAL;
- mutex_unlock(&dev->mutex);
-
- comedi_dev_put(dev);
- return err ? err : count;
-}
-static DEVICE_ATTR_RW(max_read_buffer_kb);
-
-static ssize_t read_buffer_kb_show(struct device *csdev,
- struct device_attribute *attr, char *buf)
-{
- unsigned int minor = MINOR(csdev->devt);
- struct comedi_device *dev;
- struct comedi_subdevice *s;
- unsigned int size = 0;
-
- dev = comedi_dev_get_from_minor(minor);
- if (!dev)
- return -ENODEV;
-
- mutex_lock(&dev->mutex);
- s = comedi_read_subdevice(dev, minor);
- if (s && (s->subdev_flags & SDF_CMD_READ) && s->async)
- size = s->async->prealloc_bufsz / 1024;
- mutex_unlock(&dev->mutex);
-
- comedi_dev_put(dev);
- return snprintf(buf, PAGE_SIZE, "%u\n", size);
-}
-
-static ssize_t read_buffer_kb_store(struct device *csdev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- unsigned int minor = MINOR(csdev->devt);
- struct comedi_device *dev;
- struct comedi_subdevice *s;
- unsigned int size;
- int err;
-
- err = kstrtouint(buf, 10, &size);
- if (err)
- return err;
- if (size > (UINT_MAX / 1024))
- return -EINVAL;
- size *= 1024;
-
- dev = comedi_dev_get_from_minor(minor);
- if (!dev)
- return -ENODEV;
-
- mutex_lock(&dev->mutex);
- s = comedi_read_subdevice(dev, minor);
- if (s && (s->subdev_flags & SDF_CMD_READ) && s->async)
- err = resize_async_buffer(dev, s, size);
- else
- err = -EINVAL;
- mutex_unlock(&dev->mutex);
-
- comedi_dev_put(dev);
- return err ? err : count;
-}
-static DEVICE_ATTR_RW(read_buffer_kb);
-
-static ssize_t max_write_buffer_kb_show(struct device *csdev,
- struct device_attribute *attr,
- char *buf)
-{
- unsigned int minor = MINOR(csdev->devt);
- struct comedi_device *dev;
- struct comedi_subdevice *s;
- unsigned int size = 0;
-
- dev = comedi_dev_get_from_minor(minor);
- if (!dev)
- return -ENODEV;
-
- mutex_lock(&dev->mutex);
- s = comedi_write_subdevice(dev, minor);
- if (s && (s->subdev_flags & SDF_CMD_WRITE) && s->async)
- size = s->async->max_bufsize / 1024;
- mutex_unlock(&dev->mutex);
-
- comedi_dev_put(dev);
- return snprintf(buf, PAGE_SIZE, "%u\n", size);
-}
-
-static ssize_t max_write_buffer_kb_store(struct device *csdev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- unsigned int minor = MINOR(csdev->devt);
- struct comedi_device *dev;
- struct comedi_subdevice *s;
- unsigned int size;
- int err;
-
- err = kstrtouint(buf, 10, &size);
- if (err)
- return err;
- if (size > (UINT_MAX / 1024))
- return -EINVAL;
- size *= 1024;
-
- dev = comedi_dev_get_from_minor(minor);
- if (!dev)
- return -ENODEV;
-
- mutex_lock(&dev->mutex);
- s = comedi_write_subdevice(dev, minor);
- if (s && (s->subdev_flags & SDF_CMD_WRITE) && s->async)
- s->async->max_bufsize = size;
- else
- err = -EINVAL;
- mutex_unlock(&dev->mutex);
-
- comedi_dev_put(dev);
- return err ? err : count;
-}
-static DEVICE_ATTR_RW(max_write_buffer_kb);
-
-static ssize_t write_buffer_kb_show(struct device *csdev,
- struct device_attribute *attr, char *buf)
-{
- unsigned int minor = MINOR(csdev->devt);
- struct comedi_device *dev;
- struct comedi_subdevice *s;
- unsigned int size = 0;
-
- dev = comedi_dev_get_from_minor(minor);
- if (!dev)
- return -ENODEV;
-
- mutex_lock(&dev->mutex);
- s = comedi_write_subdevice(dev, minor);
- if (s && (s->subdev_flags & SDF_CMD_WRITE) && s->async)
- size = s->async->prealloc_bufsz / 1024;
- mutex_unlock(&dev->mutex);
-
- comedi_dev_put(dev);
- return snprintf(buf, PAGE_SIZE, "%u\n", size);
-}
-
-static ssize_t write_buffer_kb_store(struct device *csdev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- unsigned int minor = MINOR(csdev->devt);
- struct comedi_device *dev;
- struct comedi_subdevice *s;
- unsigned int size;
- int err;
-
- err = kstrtouint(buf, 10, &size);
- if (err)
- return err;
- if (size > (UINT_MAX / 1024))
- return -EINVAL;
- size *= 1024;
-
- dev = comedi_dev_get_from_minor(minor);
- if (!dev)
- return -ENODEV;
-
- mutex_lock(&dev->mutex);
- s = comedi_write_subdevice(dev, minor);
- if (s && (s->subdev_flags & SDF_CMD_WRITE) && s->async)
- err = resize_async_buffer(dev, s, size);
- else
- err = -EINVAL;
- mutex_unlock(&dev->mutex);
-
- comedi_dev_put(dev);
- return err ? err : count;
-}
-static DEVICE_ATTR_RW(write_buffer_kb);
-
-static struct attribute *comedi_dev_attrs[] = {
- &dev_attr_max_read_buffer_kb.attr,
- &dev_attr_read_buffer_kb.attr,
- &dev_attr_max_write_buffer_kb.attr,
- &dev_attr_write_buffer_kb.attr,
- NULL,
-};
-ATTRIBUTE_GROUPS(comedi_dev);
-
-static void __comedi_clear_subdevice_runflags(struct comedi_subdevice *s,
- unsigned bits)
-{
- s->runflags &= ~bits;
-}
-
-static void __comedi_set_subdevice_runflags(struct comedi_subdevice *s,
- unsigned bits)
-{
- s->runflags |= bits;
-}
-
-static void comedi_update_subdevice_runflags(struct comedi_subdevice *s,
- unsigned mask, unsigned bits)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&s->spin_lock, flags);
- __comedi_clear_subdevice_runflags(s, mask);
- __comedi_set_subdevice_runflags(s, bits & mask);
- spin_unlock_irqrestore(&s->spin_lock, flags);
-}
-
-static unsigned __comedi_get_subdevice_runflags(struct comedi_subdevice *s)
-{
- return s->runflags;
-}
-
-static unsigned comedi_get_subdevice_runflags(struct comedi_subdevice *s)
-{
- unsigned long flags;
- unsigned runflags;
-
- spin_lock_irqsave(&s->spin_lock, flags);
- runflags = __comedi_get_subdevice_runflags(s);
- spin_unlock_irqrestore(&s->spin_lock, flags);
- return runflags;
-}
-
-static bool comedi_is_runflags_running(unsigned runflags)
-{
- return runflags & COMEDI_SRF_RUNNING;
-}
-
-static bool comedi_is_runflags_in_error(unsigned runflags)
-{
- return runflags & COMEDI_SRF_ERROR;
-}
-
-/**
- * comedi_is_subdevice_running() - Check if async command running on subdevice
- * @s: COMEDI subdevice.
- *
- * Return: %true if an asynchronous COMEDI command is active on the
- * subdevice, else %false.
- */
-bool comedi_is_subdevice_running(struct comedi_subdevice *s)
-{
- unsigned runflags = comedi_get_subdevice_runflags(s);
-
- return comedi_is_runflags_running(runflags);
-}
-EXPORT_SYMBOL_GPL(comedi_is_subdevice_running);
-
-static bool __comedi_is_subdevice_running(struct comedi_subdevice *s)
-{
- unsigned runflags = __comedi_get_subdevice_runflags(s);
-
- return comedi_is_runflags_running(runflags);
-}
-
-static bool comedi_is_subdevice_idle(struct comedi_subdevice *s)
-{
- unsigned runflags = comedi_get_subdevice_runflags(s);
-
- return !(runflags & COMEDI_SRF_BUSY_MASK);
-}
-
-bool comedi_can_auto_free_spriv(struct comedi_subdevice *s)
-{
- unsigned runflags = __comedi_get_subdevice_runflags(s);
-
- return runflags & COMEDI_SRF_FREE_SPRIV;
-}
-
-/**
- * comedi_set_spriv_auto_free() - Mark subdevice private data as freeable
- * @s: COMEDI subdevice.
- *
- * Mark the subdevice as having a pointer to private data that can be
- * automatically freed when the COMEDI device is detached from the low-level
- * driver.
- */
-void comedi_set_spriv_auto_free(struct comedi_subdevice *s)
-{
- __comedi_set_subdevice_runflags(s, COMEDI_SRF_FREE_SPRIV);
-}
-EXPORT_SYMBOL_GPL(comedi_set_spriv_auto_free);
-
-/**
- * comedi_alloc_spriv - Allocate memory for the subdevice private data
- * @s: COMEDI subdevice.
- * @size: Size of the memory to allocate.
- *
- * Allocate memory for the subdevice private data and point @s->private
- * to it. The memory will be freed automatically when the COMEDI device
- * is detached from the low-level driver.
- *
- * Return: A pointer to the allocated memory @s->private on success.
- * Return NULL on failure.
- */
-void *comedi_alloc_spriv(struct comedi_subdevice *s, size_t size)
-{
- s->private = kzalloc(size, GFP_KERNEL);
- if (s->private)
- comedi_set_spriv_auto_free(s);
- return s->private;
-}
-EXPORT_SYMBOL_GPL(comedi_alloc_spriv);
-
-/*
- * This function restores a subdevice to an idle state.
- */
-static void do_become_nonbusy(struct comedi_device *dev,
- struct comedi_subdevice *s)
-{
- struct comedi_async *async = s->async;
-
- comedi_update_subdevice_runflags(s, COMEDI_SRF_RUNNING, 0);
- if (async) {
- comedi_buf_reset(s);
- async->inttrig = NULL;
- kfree(async->cmd.chanlist);
- async->cmd.chanlist = NULL;
- s->busy = NULL;
- wake_up_interruptible_all(&async->wait_head);
- } else {
- dev_err(dev->class_dev,
- "BUG: (?) do_become_nonbusy called with async=NULL\n");
- s->busy = NULL;
- }
-}
-
-static int do_cancel(struct comedi_device *dev, struct comedi_subdevice *s)
-{
- int ret = 0;
-
- if (comedi_is_subdevice_running(s) && s->cancel)
- ret = s->cancel(dev, s);
-
- do_become_nonbusy(dev, s);
-
- return ret;
-}
-
-void comedi_device_cancel_all(struct comedi_device *dev)
-{
- struct comedi_subdevice *s;
- int i;
-
- if (!dev->attached)
- return;
-
- for (i = 0; i < dev->n_subdevices; i++) {
- s = &dev->subdevices[i];
- if (s->async)
- do_cancel(dev, s);
- }
-}
-
-static int is_device_busy(struct comedi_device *dev)
-{
- struct comedi_subdevice *s;
- int i;
-
- if (!dev->attached)
- return 0;
-
- for (i = 0; i < dev->n_subdevices; i++) {
- s = &dev->subdevices[i];
- if (s->busy)
- return 1;
- if (s->async && comedi_buf_is_mmapped(s))
- return 1;
- }
-
- return 0;
-}
-
-/*
- * COMEDI_DEVCONFIG ioctl
- * attaches (and configures) or detaches a legacy device
- *
- * arg:
- * pointer to comedi_devconfig structure (NULL if detaching)
- *
- * reads:
- * comedi_devconfig structure (if attaching)
- *
- * writes:
- * nothing
- */
-static int do_devconfig_ioctl(struct comedi_device *dev,
- struct comedi_devconfig __user *arg)
-{
- struct comedi_devconfig it;
-
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
-
- if (!arg) {
- if (is_device_busy(dev))
- return -EBUSY;
- if (dev->attached) {
- struct module *driver_module = dev->driver->module;
-
- comedi_device_detach(dev);
- module_put(driver_module);
- }
- return 0;
- }
-
- if (copy_from_user(&it, arg, sizeof(it)))
- return -EFAULT;
-
- it.board_name[COMEDI_NAMELEN - 1] = 0;
-
- if (it.options[COMEDI_DEVCONF_AUX_DATA_LENGTH]) {
- dev_warn(dev->class_dev,
- "comedi_config --init_data is deprecated\n");
- return -EINVAL;
- }
-
- if (dev->minor >= comedi_num_legacy_minors)
- /* don't re-use dynamically allocated comedi devices */
- return -EBUSY;
-
- /* This increments the driver module count on success. */
- return comedi_device_attach(dev, &it);
-}
-
-/*
- * COMEDI_BUFCONFIG ioctl
- * buffer configuration
- *
- * arg:
- * pointer to comedi_bufconfig structure
- *
- * reads:
- * comedi_bufconfig structure
- *
- * writes:
- * modified comedi_bufconfig structure
- */
-static int do_bufconfig_ioctl(struct comedi_device *dev,
- struct comedi_bufconfig __user *arg)
-{
- struct comedi_bufconfig bc;
- struct comedi_async *async;
- struct comedi_subdevice *s;
- int retval = 0;
-
- if (copy_from_user(&bc, arg, sizeof(bc)))
- return -EFAULT;
-
- if (bc.subdevice >= dev->n_subdevices)
- return -EINVAL;
-
- s = &dev->subdevices[bc.subdevice];
- async = s->async;
-
- if (!async) {
- dev_dbg(dev->class_dev,
- "subdevice does not have async capability\n");
- bc.size = 0;
- bc.maximum_size = 0;
- goto copyback;
- }
-
- if (bc.maximum_size) {
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
-
- async->max_bufsize = bc.maximum_size;
- }
-
- if (bc.size) {
- retval = resize_async_buffer(dev, s, bc.size);
- if (retval < 0)
- return retval;
- }
-
- bc.size = async->prealloc_bufsz;
- bc.maximum_size = async->max_bufsize;
-
-copyback:
- if (copy_to_user(arg, &bc, sizeof(bc)))
- return -EFAULT;
-
- return 0;
-}
-
-/*
- * COMEDI_DEVINFO ioctl
- * device info
- *
- * arg:
- * pointer to comedi_devinfo structure
- *
- * reads:
- * nothing
- *
- * writes:
- * comedi_devinfo structure
- */
-static int do_devinfo_ioctl(struct comedi_device *dev,
- struct comedi_devinfo __user *arg,
- struct file *file)
-{
- struct comedi_subdevice *s;
- struct comedi_devinfo devinfo;
-
- memset(&devinfo, 0, sizeof(devinfo));
-
- /* fill devinfo structure */
- devinfo.version_code = COMEDI_VERSION_CODE;
- devinfo.n_subdevs = dev->n_subdevices;
- strlcpy(devinfo.driver_name, dev->driver->driver_name, COMEDI_NAMELEN);
- strlcpy(devinfo.board_name, dev->board_name, COMEDI_NAMELEN);
-
- s = comedi_file_read_subdevice(file);
- if (s)
- devinfo.read_subdevice = s->index;
- else
- devinfo.read_subdevice = -1;
-
- s = comedi_file_write_subdevice(file);
- if (s)
- devinfo.write_subdevice = s->index;
- else
- devinfo.write_subdevice = -1;
-
- if (copy_to_user(arg, &devinfo, sizeof(devinfo)))
- return -EFAULT;
-
- return 0;
-}
-
-/*
- * COMEDI_SUBDINFO ioctl
- * subdevices info
- *
- * arg:
- * pointer to array of comedi_subdinfo structures
- *
- * reads:
- * nothing
- *
- * writes:
- * array of comedi_subdinfo structures
- */
-static int do_subdinfo_ioctl(struct comedi_device *dev,
- struct comedi_subdinfo __user *arg, void *file)
-{
- int ret, i;
- struct comedi_subdinfo *tmp, *us;
- struct comedi_subdevice *s;
-
- tmp = kcalloc(dev->n_subdevices, sizeof(*tmp), GFP_KERNEL);
- if (!tmp)
- return -ENOMEM;
-
- /* fill subdinfo structs */
- for (i = 0; i < dev->n_subdevices; i++) {
- s = &dev->subdevices[i];
- us = tmp + i;
-
- us->type = s->type;
- us->n_chan = s->n_chan;
- us->subd_flags = s->subdev_flags;
- if (comedi_is_subdevice_running(s))
- us->subd_flags |= SDF_RUNNING;
-#define TIMER_nanosec 5 /* backwards compatibility */
- us->timer_type = TIMER_nanosec;
- us->len_chanlist = s->len_chanlist;
- us->maxdata = s->maxdata;
- if (s->range_table) {
- us->range_type =
- (i << 24) | (0 << 16) | (s->range_table->length);
- } else {
- us->range_type = 0; /* XXX */
- }
-
- if (s->busy)
- us->subd_flags |= SDF_BUSY;
- if (s->busy == file)
- us->subd_flags |= SDF_BUSY_OWNER;
- if (s->lock)
- us->subd_flags |= SDF_LOCKED;
- if (s->lock == file)
- us->subd_flags |= SDF_LOCK_OWNER;
- if (!s->maxdata && s->maxdata_list)
- us->subd_flags |= SDF_MAXDATA;
- if (s->range_table_list)
- us->subd_flags |= SDF_RANGETYPE;
- if (s->do_cmd)
- us->subd_flags |= SDF_CMD;
-
- if (s->insn_bits != &insn_inval)
- us->insn_bits_support = COMEDI_SUPPORTED;
- else
- us->insn_bits_support = COMEDI_UNSUPPORTED;
- }
-
- ret = copy_to_user(arg, tmp, dev->n_subdevices * sizeof(*tmp));
-
- kfree(tmp);
-
- return ret ? -EFAULT : 0;
-}
-
-/*
- * COMEDI_CHANINFO ioctl
- * subdevice channel info
- *
- * arg:
- * pointer to comedi_chaninfo structure
- *
- * reads:
- * comedi_chaninfo structure
- *
- * writes:
- * array of maxdata values to chaninfo->maxdata_list if requested
- * array of range table lengths to chaninfo->range_table_list if requested
- */
-static int do_chaninfo_ioctl(struct comedi_device *dev,
- struct comedi_chaninfo __user *arg)
-{
- struct comedi_subdevice *s;
- struct comedi_chaninfo it;
-
- if (copy_from_user(&it, arg, sizeof(it)))
- return -EFAULT;
-
- if (it.subdev >= dev->n_subdevices)
- return -EINVAL;
- s = &dev->subdevices[it.subdev];
-
- if (it.maxdata_list) {
- if (s->maxdata || !s->maxdata_list)
- return -EINVAL;
- if (copy_to_user(it.maxdata_list, s->maxdata_list,
- s->n_chan * sizeof(unsigned int)))
- return -EFAULT;
- }
-
- if (it.flaglist)
- return -EINVAL; /* flaglist not supported */
-
- if (it.rangelist) {
- int i;
-
- if (!s->range_table_list)
- return -EINVAL;
- for (i = 0; i < s->n_chan; i++) {
- int x;
-
- x = (dev->minor << 28) | (it.subdev << 24) | (i << 16) |
- (s->range_table_list[i]->length);
- if (put_user(x, it.rangelist + i))
- return -EFAULT;
- }
- }
-
- return 0;
-}
-
-/*
- * COMEDI_BUFINFO ioctl
- * buffer information
- *
- * arg:
- * pointer to comedi_bufinfo structure
- *
- * reads:
- * comedi_bufinfo structure
- *
- * writes:
- * modified comedi_bufinfo structure
- */
-static int do_bufinfo_ioctl(struct comedi_device *dev,
- struct comedi_bufinfo __user *arg, void *file)
-{
- struct comedi_bufinfo bi;
- struct comedi_subdevice *s;
- struct comedi_async *async;
-
- if (copy_from_user(&bi, arg, sizeof(bi)))
- return -EFAULT;
-
- if (bi.subdevice >= dev->n_subdevices)
- return -EINVAL;
-
- s = &dev->subdevices[bi.subdevice];
-
- async = s->async;
-
- if (!async) {
- dev_dbg(dev->class_dev,
- "subdevice does not have async capability\n");
- bi.buf_write_ptr = 0;
- bi.buf_read_ptr = 0;
- bi.buf_write_count = 0;
- bi.buf_read_count = 0;
- bi.bytes_read = 0;
- bi.bytes_written = 0;
- goto copyback;
- }
- if (!s->busy) {
- bi.bytes_read = 0;
- bi.bytes_written = 0;
- goto copyback_position;
- }
- if (s->busy != file)
- return -EACCES;
-
- if (bi.bytes_read && !(async->cmd.flags & CMDF_WRITE)) {
- bi.bytes_read = comedi_buf_read_alloc(s, bi.bytes_read);
- comedi_buf_read_free(s, bi.bytes_read);
-
- if (comedi_is_subdevice_idle(s) &&
- comedi_buf_n_bytes_ready(s) == 0) {
- do_become_nonbusy(dev, s);
- }
- }
-
- if (bi.bytes_written && (async->cmd.flags & CMDF_WRITE)) {
- bi.bytes_written =
- comedi_buf_write_alloc(s, bi.bytes_written);
- comedi_buf_write_free(s, bi.bytes_written);
- }
-
-copyback_position:
- bi.buf_write_count = async->buf_write_count;
- bi.buf_write_ptr = async->buf_write_ptr;
- bi.buf_read_count = async->buf_read_count;
- bi.buf_read_ptr = async->buf_read_ptr;
-
-copyback:
- if (copy_to_user(arg, &bi, sizeof(bi)))
- return -EFAULT;
-
- return 0;
-}
-
-static int check_insn_config_length(struct comedi_insn *insn,
- unsigned int *data)
-{
- if (insn->n < 1)
- return -EINVAL;
-
- switch (data[0]) {
- case INSN_CONFIG_DIO_OUTPUT:
- case INSN_CONFIG_DIO_INPUT:
- case INSN_CONFIG_DISARM:
- case INSN_CONFIG_RESET:
- if (insn->n == 1)
- return 0;
- break;
- case INSN_CONFIG_ARM:
- case INSN_CONFIG_DIO_QUERY:
- case INSN_CONFIG_BLOCK_SIZE:
- case INSN_CONFIG_FILTER:
- case INSN_CONFIG_SERIAL_CLOCK:
- case INSN_CONFIG_BIDIRECTIONAL_DATA:
- case INSN_CONFIG_ALT_SOURCE:
- case INSN_CONFIG_SET_COUNTER_MODE:
- case INSN_CONFIG_8254_READ_STATUS:
- case INSN_CONFIG_SET_ROUTING:
- case INSN_CONFIG_GET_ROUTING:
- case INSN_CONFIG_GET_PWM_STATUS:
- case INSN_CONFIG_PWM_SET_PERIOD:
- case INSN_CONFIG_PWM_GET_PERIOD:
- if (insn->n == 2)
- return 0;
- break;
- case INSN_CONFIG_SET_GATE_SRC:
- case INSN_CONFIG_GET_GATE_SRC:
- case INSN_CONFIG_SET_CLOCK_SRC:
- case INSN_CONFIG_GET_CLOCK_SRC:
- case INSN_CONFIG_SET_OTHER_SRC:
- case INSN_CONFIG_GET_COUNTER_STATUS:
- case INSN_CONFIG_PWM_SET_H_BRIDGE:
- case INSN_CONFIG_PWM_GET_H_BRIDGE:
- case INSN_CONFIG_GET_HARDWARE_BUFFER_SIZE:
- if (insn->n == 3)
- return 0;
- break;
- case INSN_CONFIG_PWM_OUTPUT:
- case INSN_CONFIG_ANALOG_TRIG:
- if (insn->n == 5)
- return 0;
- break;
- case INSN_CONFIG_DIGITAL_TRIG:
- if (insn->n == 6)
- return 0;
- break;
- /*
- * by default we allow the insn since we don't have checks for
- * all possible cases yet
- */
- default:
- pr_warn("No check for data length of config insn id %i is implemented\n",
- data[0]);
- pr_warn("Add a check to %s in %s\n", __func__, __FILE__);
- pr_warn("Assuming n=%i is correct\n", insn->n);
- return 0;
- }
- return -EINVAL;
-}
-
-static int parse_insn(struct comedi_device *dev, struct comedi_insn *insn,
- unsigned int *data, void *file)
-{
- struct comedi_subdevice *s;
- int ret = 0;
- int i;
-
- if (insn->insn & INSN_MASK_SPECIAL) {
- /* a non-subdevice instruction */
-
- switch (insn->insn) {
- case INSN_GTOD:
- {
- struct timeval tv;
-
- if (insn->n != 2) {
- ret = -EINVAL;
- break;
- }
-
- do_gettimeofday(&tv);
- data[0] = tv.tv_sec;
- data[1] = tv.tv_usec;
- ret = 2;
-
- break;
- }
- case INSN_WAIT:
- if (insn->n != 1 || data[0] >= 100000) {
- ret = -EINVAL;
- break;
- }
- udelay(data[0] / 1000);
- ret = 1;
- break;
- case INSN_INTTRIG:
- if (insn->n != 1) {
- ret = -EINVAL;
- break;
- }
- if (insn->subdev >= dev->n_subdevices) {
- dev_dbg(dev->class_dev,
- "%d not usable subdevice\n",
- insn->subdev);
- ret = -EINVAL;
- break;
- }
- s = &dev->subdevices[insn->subdev];
- if (!s->async) {
- dev_dbg(dev->class_dev, "no async\n");
- ret = -EINVAL;
- break;
- }
- if (!s->async->inttrig) {
- dev_dbg(dev->class_dev, "no inttrig\n");
- ret = -EAGAIN;
- break;
- }
- ret = s->async->inttrig(dev, s, data[0]);
- if (ret >= 0)
- ret = 1;
- break;
- default:
- dev_dbg(dev->class_dev, "invalid insn\n");
- ret = -EINVAL;
- break;
- }
- } else {
- /* a subdevice instruction */
- unsigned int maxdata;
-
- if (insn->subdev >= dev->n_subdevices) {
- dev_dbg(dev->class_dev, "subdevice %d out of range\n",
- insn->subdev);
- ret = -EINVAL;
- goto out;
- }
- s = &dev->subdevices[insn->subdev];
-
- if (s->type == COMEDI_SUBD_UNUSED) {
- dev_dbg(dev->class_dev, "%d not usable subdevice\n",
- insn->subdev);
- ret = -EIO;
- goto out;
- }
-
- /* are we locked? (ioctl lock) */
- if (s->lock && s->lock != file) {
- dev_dbg(dev->class_dev, "device locked\n");
- ret = -EACCES;
- goto out;
- }
-
- ret = comedi_check_chanlist(s, 1, &insn->chanspec);
- if (ret < 0) {
- ret = -EINVAL;
- dev_dbg(dev->class_dev, "bad chanspec\n");
- goto out;
- }
-
- if (s->busy) {
- ret = -EBUSY;
- goto out;
- }
- /* This looks arbitrary. It is. */
- s->busy = &parse_insn;
- switch (insn->insn) {
- case INSN_READ:
- ret = s->insn_read(dev, s, insn, data);
- if (ret == -ETIMEDOUT) {
- dev_dbg(dev->class_dev,
- "subdevice %d read instruction timed out\n",
- s->index);
- }
- break;
- case INSN_WRITE:
- maxdata = s->maxdata_list
- ? s->maxdata_list[CR_CHAN(insn->chanspec)]
- : s->maxdata;
- for (i = 0; i < insn->n; ++i) {
- if (data[i] > maxdata) {
- ret = -EINVAL;
- dev_dbg(dev->class_dev,
- "bad data value(s)\n");
- break;
- }
- }
- if (ret == 0) {
- ret = s->insn_write(dev, s, insn, data);
- if (ret == -ETIMEDOUT) {
- dev_dbg(dev->class_dev,
- "subdevice %d write instruction timed out\n",
- s->index);
- }
- }
- break;
- case INSN_BITS:
- if (insn->n != 2) {
- ret = -EINVAL;
- } else {
- /*
- * Most drivers ignore the base channel in
- * insn->chanspec. Fix this here if
- * the subdevice has <= 32 channels.
- */
- unsigned int orig_mask = data[0];
- unsigned int shift = 0;
-
- if (s->n_chan <= 32) {
- shift = CR_CHAN(insn->chanspec);
- if (shift > 0) {
- insn->chanspec = 0;
- data[0] <<= shift;
- data[1] <<= shift;
- }
- }
- ret = s->insn_bits(dev, s, insn, data);
- data[0] = orig_mask;
- if (shift > 0)
- data[1] >>= shift;
- }
- break;
- case INSN_CONFIG:
- ret = check_insn_config_length(insn, data);
- if (ret)
- break;
- ret = s->insn_config(dev, s, insn, data);
- break;
- default:
- ret = -EINVAL;
- break;
- }
-
- s->busy = NULL;
- }
-
-out:
- return ret;
-}
-
-/*
- * COMEDI_INSNLIST ioctl
- * synchronous instruction list
- *
- * arg:
- * pointer to comedi_insnlist structure
- *
- * reads:
- * comedi_insnlist structure
- * array of comedi_insn structures from insnlist->insns pointer
- * data (for writes) from insns[].data pointers
- *
- * writes:
- * data (for reads) to insns[].data pointers
- */
-/* arbitrary limits */
-#define MAX_SAMPLES 256
-static int do_insnlist_ioctl(struct comedi_device *dev,
- struct comedi_insnlist __user *arg, void *file)
-{
- struct comedi_insnlist insnlist;
- struct comedi_insn *insns = NULL;
- unsigned int *data = NULL;
- int i = 0;
- int ret = 0;
-
- if (copy_from_user(&insnlist, arg, sizeof(insnlist)))
- return -EFAULT;
-
- data = kmalloc_array(MAX_SAMPLES, sizeof(unsigned int), GFP_KERNEL);
- if (!data) {
- ret = -ENOMEM;
- goto error;
- }
-
- insns = kcalloc(insnlist.n_insns, sizeof(*insns), GFP_KERNEL);
- if (!insns) {
- ret = -ENOMEM;
- goto error;
- }
-
- if (copy_from_user(insns, insnlist.insns,
- sizeof(*insns) * insnlist.n_insns)) {
- dev_dbg(dev->class_dev, "copy_from_user failed\n");
- ret = -EFAULT;
- goto error;
- }
-
- for (i = 0; i < insnlist.n_insns; i++) {
- if (insns[i].n > MAX_SAMPLES) {
- dev_dbg(dev->class_dev,
- "number of samples too large\n");
- ret = -EINVAL;
- goto error;
- }
- if (insns[i].insn & INSN_MASK_WRITE) {
- if (copy_from_user(data, insns[i].data,
- insns[i].n * sizeof(unsigned int))) {
- dev_dbg(dev->class_dev,
- "copy_from_user failed\n");
- ret = -EFAULT;
- goto error;
- }
- }
- ret = parse_insn(dev, insns + i, data, file);
- if (ret < 0)
- goto error;
- if (insns[i].insn & INSN_MASK_READ) {
- if (copy_to_user(insns[i].data, data,
- insns[i].n * sizeof(unsigned int))) {
- dev_dbg(dev->class_dev,
- "copy_to_user failed\n");
- ret = -EFAULT;
- goto error;
- }
- }
- if (need_resched())
- schedule();
- }
-
-error:
- kfree(insns);
- kfree(data);
-
- if (ret < 0)
- return ret;
- return i;
-}
-
-/*
- * COMEDI_INSN ioctl
- * synchronous instruction
- *
- * arg:
- * pointer to comedi_insn structure
- *
- * reads:
- * comedi_insn structure
- * data (for writes) from insn->data pointer
- *
- * writes:
- * data (for reads) to insn->data pointer
- */
-static int do_insn_ioctl(struct comedi_device *dev,
- struct comedi_insn __user *arg, void *file)
-{
- struct comedi_insn insn;
- unsigned int *data = NULL;
- int ret = 0;
-
- data = kmalloc_array(MAX_SAMPLES, sizeof(unsigned int), GFP_KERNEL);
- if (!data) {
- ret = -ENOMEM;
- goto error;
- }
-
- if (copy_from_user(&insn, arg, sizeof(insn))) {
- ret = -EFAULT;
- goto error;
- }
-
- /* This is where the behavior of insn and insnlist deviate. */
- if (insn.n > MAX_SAMPLES)
- insn.n = MAX_SAMPLES;
- if (insn.insn & INSN_MASK_WRITE) {
- if (copy_from_user(data,
- insn.data,
- insn.n * sizeof(unsigned int))) {
- ret = -EFAULT;
- goto error;
- }
- }
- ret = parse_insn(dev, &insn, data, file);
- if (ret < 0)
- goto error;
- if (insn.insn & INSN_MASK_READ) {
- if (copy_to_user(insn.data,
- data,
- insn.n * sizeof(unsigned int))) {
- ret = -EFAULT;
- goto error;
- }
- }
- ret = insn.n;
-
-error:
- kfree(data);
-
- return ret;
-}
-
-static int __comedi_get_user_cmd(struct comedi_device *dev,
- struct comedi_cmd __user *arg,
- struct comedi_cmd *cmd)
-{
- struct comedi_subdevice *s;
-
- if (copy_from_user(cmd, arg, sizeof(*cmd))) {
- dev_dbg(dev->class_dev, "bad cmd address\n");
- return -EFAULT;
- }
-
- if (cmd->subdev >= dev->n_subdevices) {
- dev_dbg(dev->class_dev, "%d no such subdevice\n", cmd->subdev);
- return -ENODEV;
- }
-
- s = &dev->subdevices[cmd->subdev];
-
- if (s->type == COMEDI_SUBD_UNUSED) {
- dev_dbg(dev->class_dev, "%d not valid subdevice\n",
- cmd->subdev);
- return -EIO;
- }
-
- if (!s->do_cmd || !s->do_cmdtest || !s->async) {
- dev_dbg(dev->class_dev,
- "subdevice %d does not support commands\n",
- cmd->subdev);
- return -EIO;
- }
-
- /* make sure channel/gain list isn't too long */
- if (cmd->chanlist_len > s->len_chanlist) {
- dev_dbg(dev->class_dev, "channel/gain list too long %d > %d\n",
- cmd->chanlist_len, s->len_chanlist);
- return -EINVAL;
- }
-
- /*
- * Set the CMDF_WRITE flag to the correct state if the subdevice
- * supports only "read" commands or only "write" commands.
- */
- switch (s->subdev_flags & (SDF_CMD_READ | SDF_CMD_WRITE)) {
- case SDF_CMD_READ:
- cmd->flags &= ~CMDF_WRITE;
- break;
- case SDF_CMD_WRITE:
- cmd->flags |= CMDF_WRITE;
- break;
- default:
- break;
- }
-
- return 0;
-}
-
-static int __comedi_get_user_chanlist(struct comedi_device *dev,
- struct comedi_subdevice *s,
- unsigned int __user *user_chanlist,
- struct comedi_cmd *cmd)
-{
- unsigned int *chanlist;
- int ret;
-
- cmd->chanlist = NULL;
- chanlist = memdup_user(user_chanlist,
- cmd->chanlist_len * sizeof(unsigned int));
- if (IS_ERR(chanlist))
- return PTR_ERR(chanlist);
-
- /* make sure each element in channel/gain list is valid */
- ret = comedi_check_chanlist(s, cmd->chanlist_len, chanlist);
- if (ret < 0) {
- kfree(chanlist);
- return ret;
- }
-
- cmd->chanlist = chanlist;
-
- return 0;
-}
-
-/*
- * COMEDI_CMD ioctl
- * asynchronous acquisition command set-up
- *
- * arg:
- * pointer to comedi_cmd structure
- *
- * reads:
- * comedi_cmd structure
- * channel/range list from cmd->chanlist pointer
- *
- * writes:
- * possibly modified comedi_cmd structure (when -EAGAIN returned)
- */
-static int do_cmd_ioctl(struct comedi_device *dev,
- struct comedi_cmd __user *arg, void *file)
-{
- struct comedi_cmd cmd;
- struct comedi_subdevice *s;
- struct comedi_async *async;
- unsigned int __user *user_chanlist;
- int ret;
-
- /* get the user's cmd and do some simple validation */
- ret = __comedi_get_user_cmd(dev, arg, &cmd);
- if (ret)
- return ret;
-
- /* save user's chanlist pointer so it can be restored later */
- user_chanlist = (unsigned int __user *)cmd.chanlist;
-
- s = &dev->subdevices[cmd.subdev];
- async = s->async;
-
- /* are we locked? (ioctl lock) */
- if (s->lock && s->lock != file) {
- dev_dbg(dev->class_dev, "subdevice locked\n");
- return -EACCES;
- }
-
- /* are we busy? */
- if (s->busy) {
- dev_dbg(dev->class_dev, "subdevice busy\n");
- return -EBUSY;
- }
-
- /* make sure channel/gain list isn't too short */
- if (cmd.chanlist_len < 1) {
- dev_dbg(dev->class_dev, "channel/gain list too short %u < 1\n",
- cmd.chanlist_len);
- return -EINVAL;
- }
-
- async->cmd = cmd;
- async->cmd.data = NULL;
-
- /* load channel/gain list */
- ret = __comedi_get_user_chanlist(dev, s, user_chanlist, &async->cmd);
- if (ret)
- goto cleanup;
-
- ret = s->do_cmdtest(dev, s, &async->cmd);
-
- if (async->cmd.flags & CMDF_BOGUS || ret) {
- dev_dbg(dev->class_dev, "test returned %d\n", ret);
- cmd = async->cmd;
- /* restore chanlist pointer before copying back */
- cmd.chanlist = (unsigned int __force *)user_chanlist;
- cmd.data = NULL;
- if (copy_to_user(arg, &cmd, sizeof(cmd))) {
- dev_dbg(dev->class_dev, "fault writing cmd\n");
- ret = -EFAULT;
- goto cleanup;
- }
- ret = -EAGAIN;
- goto cleanup;
- }
-
- if (!async->prealloc_bufsz) {
- ret = -ENOMEM;
- dev_dbg(dev->class_dev, "no buffer (?)\n");
- goto cleanup;
- }
-
- comedi_buf_reset(s);
-
- async->cb_mask = COMEDI_CB_BLOCK | COMEDI_CB_CANCEL_MASK;
- if (async->cmd.flags & CMDF_WAKE_EOS)
- async->cb_mask |= COMEDI_CB_EOS;
-
- comedi_update_subdevice_runflags(s, COMEDI_SRF_BUSY_MASK,
- COMEDI_SRF_RUNNING);
-
- /*
- * Set s->busy _after_ setting COMEDI_SRF_RUNNING flag to avoid
- * race with comedi_read() or comedi_write().
- */
- s->busy = file;
- ret = s->do_cmd(dev, s);
- if (ret == 0)
- return 0;
-
-cleanup:
- do_become_nonbusy(dev, s);
-
- return ret;
-}
-
-/*
- * COMEDI_CMDTEST ioctl
- * asynchronous acquisition command testing
- *
- * arg:
- * pointer to comedi_cmd structure
- *
- * reads:
- * comedi_cmd structure
- * channel/range list from cmd->chanlist pointer
- *
- * writes:
- * possibly modified comedi_cmd structure
- */
-static int do_cmdtest_ioctl(struct comedi_device *dev,
- struct comedi_cmd __user *arg, void *file)
-{
- struct comedi_cmd cmd;
- struct comedi_subdevice *s;
- unsigned int __user *user_chanlist;
- int ret;
-
- /* get the user's cmd and do some simple validation */
- ret = __comedi_get_user_cmd(dev, arg, &cmd);
- if (ret)
- return ret;
-
- /* save user's chanlist pointer so it can be restored later */
- user_chanlist = (unsigned int __user *)cmd.chanlist;
-
- s = &dev->subdevices[cmd.subdev];
-
- /* user_chanlist can be NULL for COMEDI_CMDTEST ioctl */
- if (user_chanlist) {
- /* load channel/gain list */
- ret = __comedi_get_user_chanlist(dev, s, user_chanlist, &cmd);
- if (ret)
- return ret;
- }
-
- ret = s->do_cmdtest(dev, s, &cmd);
-
- kfree(cmd.chanlist); /* free kernel copy of user chanlist */
-
- /* restore chanlist pointer before copying back */
- cmd.chanlist = (unsigned int __force *)user_chanlist;
-
- if (copy_to_user(arg, &cmd, sizeof(cmd))) {
- dev_dbg(dev->class_dev, "bad cmd address\n");
- ret = -EFAULT;
- }
-
- return ret;
-}
-
-/*
- * COMEDI_LOCK ioctl
- * lock subdevice
- *
- * arg:
- * subdevice number
- *
- * reads:
- * nothing
- *
- * writes:
- * nothing
- */
-static int do_lock_ioctl(struct comedi_device *dev, unsigned long arg,
- void *file)
-{
- int ret = 0;
- unsigned long flags;
- struct comedi_subdevice *s;
-
- if (arg >= dev->n_subdevices)
- return -EINVAL;
- s = &dev->subdevices[arg];
-
- spin_lock_irqsave(&s->spin_lock, flags);
- if (s->busy || s->lock)
- ret = -EBUSY;
- else
- s->lock = file;
- spin_unlock_irqrestore(&s->spin_lock, flags);
-
- return ret;
-}
-
-/*
- * COMEDI_UNLOCK ioctl
- * unlock subdevice
- *
- * arg:
- * subdevice number
- *
- * reads:
- * nothing
- *
- * writes:
- * nothing
- */
-static int do_unlock_ioctl(struct comedi_device *dev, unsigned long arg,
- void *file)
-{
- struct comedi_subdevice *s;
-
- if (arg >= dev->n_subdevices)
- return -EINVAL;
- s = &dev->subdevices[arg];
-
- if (s->busy)
- return -EBUSY;
-
- if (s->lock && s->lock != file)
- return -EACCES;
-
- if (s->lock == file)
- s->lock = NULL;
-
- return 0;
-}
-
-/*
- * COMEDI_CANCEL ioctl
- * cancel asynchronous acquisition
- *
- * arg:
- * subdevice number
- *
- * reads:
- * nothing
- *
- * writes:
- * nothing
- */
-static int do_cancel_ioctl(struct comedi_device *dev, unsigned long arg,
- void *file)
-{
- struct comedi_subdevice *s;
-
- if (arg >= dev->n_subdevices)
- return -EINVAL;
- s = &dev->subdevices[arg];
- if (!s->async)
- return -EINVAL;
-
- if (!s->busy)
- return 0;
-
- if (s->busy != file)
- return -EBUSY;
-
- return do_cancel(dev, s);
-}
-
-/*
- * COMEDI_POLL ioctl
- * instructs driver to synchronize buffers
- *
- * arg:
- * subdevice number
- *
- * reads:
- * nothing
- *
- * writes:
- * nothing
- */
-static int do_poll_ioctl(struct comedi_device *dev, unsigned long arg,
- void *file)
-{
- struct comedi_subdevice *s;
-
- if (arg >= dev->n_subdevices)
- return -EINVAL;
- s = &dev->subdevices[arg];
-
- if (!s->busy)
- return 0;
-
- if (s->busy != file)
- return -EBUSY;
-
- if (s->poll)
- return s->poll(dev, s);
-
- return -EINVAL;
-}
-
-/*
- * COMEDI_SETRSUBD ioctl
- * sets the current "read" subdevice on a per-file basis
- *
- * arg:
- * subdevice number
- *
- * reads:
- * nothing
- *
- * writes:
- * nothing
- */
-static int do_setrsubd_ioctl(struct comedi_device *dev, unsigned long arg,
- struct file *file)
-{
- struct comedi_file *cfp = file->private_data;
- struct comedi_subdevice *s_old, *s_new;
-
- if (arg >= dev->n_subdevices)
- return -EINVAL;
-
- s_new = &dev->subdevices[arg];
- s_old = comedi_file_read_subdevice(file);
- if (s_old == s_new)
- return 0; /* no change */
-
- if (!(s_new->subdev_flags & SDF_CMD_READ))
- return -EINVAL;
-
- /*
- * Check the file isn't still busy handling a "read" command on the
- * old subdevice (if any).
- */
- if (s_old && s_old->busy == file && s_old->async &&
- !(s_old->async->cmd.flags & CMDF_WRITE))
- return -EBUSY;
-
- ACCESS_ONCE(cfp->read_subdev) = s_new;
- return 0;
-}
-
-/*
- * COMEDI_SETWSUBD ioctl
- * sets the current "write" subdevice on a per-file basis
- *
- * arg:
- * subdevice number
- *
- * reads:
- * nothing
- *
- * writes:
- * nothing
- */
-static int do_setwsubd_ioctl(struct comedi_device *dev, unsigned long arg,
- struct file *file)
-{
- struct comedi_file *cfp = file->private_data;
- struct comedi_subdevice *s_old, *s_new;
-
- if (arg >= dev->n_subdevices)
- return -EINVAL;
-
- s_new = &dev->subdevices[arg];
- s_old = comedi_file_write_subdevice(file);
- if (s_old == s_new)
- return 0; /* no change */
-
- if (!(s_new->subdev_flags & SDF_CMD_WRITE))
- return -EINVAL;
-
- /*
- * Check the file isn't still busy handling a "write" command on the
- * old subdevice (if any).
- */
- if (s_old && s_old->busy == file && s_old->async &&
- (s_old->async->cmd.flags & CMDF_WRITE))
- return -EBUSY;
-
- ACCESS_ONCE(cfp->write_subdev) = s_new;
- return 0;
-}
-
-static long comedi_unlocked_ioctl(struct file *file, unsigned int cmd,
- unsigned long arg)
-{
- unsigned minor = iminor(file_inode(file));
- struct comedi_file *cfp = file->private_data;
- struct comedi_device *dev = cfp->dev;
- int rc;
-
- mutex_lock(&dev->mutex);
-
- /*
- * Device config is special, because it must work on
- * an unconfigured device.
- */
- if (cmd == COMEDI_DEVCONFIG) {
- if (minor >= COMEDI_NUM_BOARD_MINORS) {
- /* Device config not appropriate on non-board minors. */
- rc = -ENOTTY;
- goto done;
- }
- rc = do_devconfig_ioctl(dev,
- (struct comedi_devconfig __user *)arg);
- if (rc == 0) {
- if (arg == 0 &&
- dev->minor >= comedi_num_legacy_minors) {
- /*
- * Successfully unconfigured a dynamically
- * allocated device. Try and remove it.
- */
- if (comedi_clear_board_dev(dev)) {
- mutex_unlock(&dev->mutex);
- comedi_free_board_dev(dev);
- return rc;
- }
- }
- }
- goto done;
- }
-
- if (!dev->attached) {
- dev_dbg(dev->class_dev, "no driver attached\n");
- rc = -ENODEV;
- goto done;
- }
-
- switch (cmd) {
- case COMEDI_BUFCONFIG:
- rc = do_bufconfig_ioctl(dev,
- (struct comedi_bufconfig __user *)arg);
- break;
- case COMEDI_DEVINFO:
- rc = do_devinfo_ioctl(dev, (struct comedi_devinfo __user *)arg,
- file);
- break;
- case COMEDI_SUBDINFO:
- rc = do_subdinfo_ioctl(dev,
- (struct comedi_subdinfo __user *)arg,
- file);
- break;
- case COMEDI_CHANINFO:
- rc = do_chaninfo_ioctl(dev, (void __user *)arg);
- break;
- case COMEDI_RANGEINFO:
- rc = do_rangeinfo_ioctl(dev, (void __user *)arg);
- break;
- case COMEDI_BUFINFO:
- rc = do_bufinfo_ioctl(dev,
- (struct comedi_bufinfo __user *)arg,
- file);
- break;
- case COMEDI_LOCK:
- rc = do_lock_ioctl(dev, arg, file);
- break;
- case COMEDI_UNLOCK:
- rc = do_unlock_ioctl(dev, arg, file);
- break;
- case COMEDI_CANCEL:
- rc = do_cancel_ioctl(dev, arg, file);
- break;
- case COMEDI_CMD:
- rc = do_cmd_ioctl(dev, (struct comedi_cmd __user *)arg, file);
- break;
- case COMEDI_CMDTEST:
- rc = do_cmdtest_ioctl(dev, (struct comedi_cmd __user *)arg,
- file);
- break;
- case COMEDI_INSNLIST:
- rc = do_insnlist_ioctl(dev,
- (struct comedi_insnlist __user *)arg,
- file);
- break;
- case COMEDI_INSN:
- rc = do_insn_ioctl(dev, (struct comedi_insn __user *)arg,
- file);
- break;
- case COMEDI_POLL:
- rc = do_poll_ioctl(dev, arg, file);
- break;
- case COMEDI_SETRSUBD:
- rc = do_setrsubd_ioctl(dev, arg, file);
- break;
- case COMEDI_SETWSUBD:
- rc = do_setwsubd_ioctl(dev, arg, file);
- break;
- default:
- rc = -ENOTTY;
- break;
- }
-
-done:
- mutex_unlock(&dev->mutex);
- return rc;
-}
-
-static void comedi_vm_open(struct vm_area_struct *area)
-{
- struct comedi_buf_map *bm;
-
- bm = area->vm_private_data;
- comedi_buf_map_get(bm);
-}
-
-static void comedi_vm_close(struct vm_area_struct *area)
-{
- struct comedi_buf_map *bm;
-
- bm = area->vm_private_data;
- comedi_buf_map_put(bm);
-}
-
-static const struct vm_operations_struct comedi_vm_ops = {
- .open = comedi_vm_open,
- .close = comedi_vm_close,
-};
-
-static int comedi_mmap(struct file *file, struct vm_area_struct *vma)
-{
- struct comedi_file *cfp = file->private_data;
- struct comedi_device *dev = cfp->dev;
- struct comedi_subdevice *s;
- struct comedi_async *async;
- struct comedi_buf_map *bm = NULL;
- unsigned long start = vma->vm_start;
- unsigned long size;
- int n_pages;
- int i;
- int retval;
-
- /*
- * 'trylock' avoids circular dependency with current->mm->mmap_sem
- * and down-reading &dev->attach_lock should normally succeed without
- * contention unless the device is in the process of being attached
- * or detached.
- */
- if (!down_read_trylock(&dev->attach_lock))
- return -EAGAIN;
-
- if (!dev->attached) {
- dev_dbg(dev->class_dev, "no driver attached\n");
- retval = -ENODEV;
- goto done;
- }
-
- if (vma->vm_flags & VM_WRITE)
- s = comedi_file_write_subdevice(file);
- else
- s = comedi_file_read_subdevice(file);
- if (!s) {
- retval = -EINVAL;
- goto done;
- }
-
- async = s->async;
- if (!async) {
- retval = -EINVAL;
- goto done;
- }
-
- if (vma->vm_pgoff != 0) {
- dev_dbg(dev->class_dev, "mmap() offset must be 0.\n");
- retval = -EINVAL;
- goto done;
- }
-
- size = vma->vm_end - vma->vm_start;
- if (size > async->prealloc_bufsz) {
- retval = -EFAULT;
- goto done;
- }
- if (size & (~PAGE_MASK)) {
- retval = -EFAULT;
- goto done;
- }
-
- n_pages = size >> PAGE_SHIFT;
-
- /* get reference to current buf map (if any) */
- bm = comedi_buf_map_from_subdev_get(s);
- if (!bm || n_pages > bm->n_pages) {
- retval = -EINVAL;
- goto done;
- }
- for (i = 0; i < n_pages; ++i) {
- struct comedi_buf_page *buf = &bm->page_list[i];
-
- if (remap_pfn_range(vma, start,
- page_to_pfn(virt_to_page(buf->virt_addr)),
- PAGE_SIZE, PAGE_SHARED)) {
- retval = -EAGAIN;
- goto done;
- }
- start += PAGE_SIZE;
- }
-
- vma->vm_ops = &comedi_vm_ops;
- vma->vm_private_data = bm;
-
- vma->vm_ops->open(vma);
-
- retval = 0;
-done:
- up_read(&dev->attach_lock);
- comedi_buf_map_put(bm); /* put reference to buf map - okay if NULL */
- return retval;
-}
-
-static unsigned int comedi_poll(struct file *file, poll_table *wait)
-{
- unsigned int mask = 0;
- struct comedi_file *cfp = file->private_data;
- struct comedi_device *dev = cfp->dev;
- struct comedi_subdevice *s;
-
- mutex_lock(&dev->mutex);
-
- if (!dev->attached) {
- dev_dbg(dev->class_dev, "no driver attached\n");
- goto done;
- }
-
- s = comedi_file_read_subdevice(file);
- if (s && s->async) {
- poll_wait(file, &s->async->wait_head, wait);
- if (!s->busy || !comedi_is_subdevice_running(s) ||
- (s->async->cmd.flags & CMDF_WRITE) ||
- comedi_buf_read_n_available(s) > 0)
- mask |= POLLIN | POLLRDNORM;
- }
-
- s = comedi_file_write_subdevice(file);
- if (s && s->async) {
- unsigned int bps = comedi_bytes_per_sample(s);
-
- poll_wait(file, &s->async->wait_head, wait);
- comedi_buf_write_alloc(s, s->async->prealloc_bufsz);
- if (!s->busy || !comedi_is_subdevice_running(s) ||
- !(s->async->cmd.flags & CMDF_WRITE) ||
- comedi_buf_write_n_allocated(s) >= bps)
- mask |= POLLOUT | POLLWRNORM;
- }
-
-done:
- mutex_unlock(&dev->mutex);
- return mask;
-}
-
-static ssize_t comedi_write(struct file *file, const char __user *buf,
- size_t nbytes, loff_t *offset)
-{
- struct comedi_subdevice *s;
- struct comedi_async *async;
- int n, m, count = 0, retval = 0;
- DECLARE_WAITQUEUE(wait, current);
- struct comedi_file *cfp = file->private_data;
- struct comedi_device *dev = cfp->dev;
- bool on_wait_queue = false;
- bool attach_locked;
- unsigned int old_detach_count;
-
- /* Protect against device detachment during operation. */
- down_read(&dev->attach_lock);
- attach_locked = true;
- old_detach_count = dev->detach_count;
-
- if (!dev->attached) {
- dev_dbg(dev->class_dev, "no driver attached\n");
- retval = -ENODEV;
- goto out;
- }
-
- s = comedi_file_write_subdevice(file);
- if (!s || !s->async) {
- retval = -EIO;
- goto out;
- }
-
- async = s->async;
-
- if (!s->busy || !nbytes)
- goto out;
- if (s->busy != file) {
- retval = -EACCES;
- goto out;
- }
- if (!(async->cmd.flags & CMDF_WRITE)) {
- retval = -EINVAL;
- goto out;
- }
-
- add_wait_queue(&async->wait_head, &wait);
- on_wait_queue = true;
- while (nbytes > 0 && !retval) {
- unsigned runflags;
-
- set_current_state(TASK_INTERRUPTIBLE);
-
- runflags = comedi_get_subdevice_runflags(s);
- if (!comedi_is_runflags_running(runflags)) {
- if (count == 0) {
- struct comedi_subdevice *new_s;
-
- if (comedi_is_runflags_in_error(runflags))
- retval = -EPIPE;
- else
- retval = 0;
- /*
- * To avoid deadlock, cannot acquire dev->mutex
- * while dev->attach_lock is held. Need to
- * remove task from the async wait queue before
- * releasing dev->attach_lock, as it might not
- * be valid afterwards.
- */
- remove_wait_queue(&async->wait_head, &wait);
- on_wait_queue = false;
- up_read(&dev->attach_lock);
- attach_locked = false;
- mutex_lock(&dev->mutex);
- /*
- * Become non-busy unless things have changed
- * behind our back. Checking dev->detach_count
- * is unchanged ought to be sufficient (unless
- * there have been 2**32 detaches in the
- * meantime!), but check the subdevice pointer
- * as well just in case.
- */
- new_s = comedi_file_write_subdevice(file);
- if (dev->attached &&
- old_detach_count == dev->detach_count &&
- s == new_s && new_s->async == async)
- do_become_nonbusy(dev, s);
- mutex_unlock(&dev->mutex);
- }
- break;
- }
-
- n = nbytes;
-
- m = n;
- if (async->buf_write_ptr + m > async->prealloc_bufsz)
- m = async->prealloc_bufsz - async->buf_write_ptr;
- comedi_buf_write_alloc(s, async->prealloc_bufsz);
- if (m > comedi_buf_write_n_allocated(s))
- m = comedi_buf_write_n_allocated(s);
- if (m < n)
- n = m;
-
- if (n == 0) {
- if (file->f_flags & O_NONBLOCK) {
- retval = -EAGAIN;
- break;
- }
- schedule();
- if (signal_pending(current)) {
- retval = -ERESTARTSYS;
- break;
- }
- if (!s->busy)
- break;
- if (s->busy != file) {
- retval = -EACCES;
- break;
- }
- if (!(async->cmd.flags & CMDF_WRITE)) {
- retval = -EINVAL;
- break;
- }
- continue;
- }
-
- m = copy_from_user(async->prealloc_buf + async->buf_write_ptr,
- buf, n);
- if (m) {
- n -= m;
- retval = -EFAULT;
- }
- comedi_buf_write_free(s, n);
-
- count += n;
- nbytes -= n;
-
- buf += n;
- break; /* makes device work like a pipe */
- }
-out:
- if (on_wait_queue)
- remove_wait_queue(&async->wait_head, &wait);
- set_current_state(TASK_RUNNING);
- if (attach_locked)
- up_read(&dev->attach_lock);
-
- return count ? count : retval;
-}
-
-static ssize_t comedi_read(struct file *file, char __user *buf, size_t nbytes,
- loff_t *offset)
-{
- struct comedi_subdevice *s;
- struct comedi_async *async;
- int n, m, count = 0, retval = 0;
- DECLARE_WAITQUEUE(wait, current);
- struct comedi_file *cfp = file->private_data;
- struct comedi_device *dev = cfp->dev;
- unsigned int old_detach_count;
- bool become_nonbusy = false;
- bool attach_locked;
-
- /* Protect against device detachment during operation. */
- down_read(&dev->attach_lock);
- attach_locked = true;
- old_detach_count = dev->detach_count;
-
- if (!dev->attached) {
- dev_dbg(dev->class_dev, "no driver attached\n");
- retval = -ENODEV;
- goto out;
- }
-
- s = comedi_file_read_subdevice(file);
- if (!s || !s->async) {
- retval = -EIO;
- goto out;
- }
-
- async = s->async;
- if (!s->busy || !nbytes)
- goto out;
- if (s->busy != file) {
- retval = -EACCES;
- goto out;
- }
- if (async->cmd.flags & CMDF_WRITE) {
- retval = -EINVAL;
- goto out;
- }
-
- add_wait_queue(&async->wait_head, &wait);
- while (nbytes > 0 && !retval) {
- set_current_state(TASK_INTERRUPTIBLE);
-
- n = nbytes;
-
- m = comedi_buf_read_n_available(s);
- if (async->buf_read_ptr + m > async->prealloc_bufsz)
- m = async->prealloc_bufsz - async->buf_read_ptr;
- if (m < n)
- n = m;
-
- if (n == 0) {
- unsigned runflags = comedi_get_subdevice_runflags(s);
-
- if (!comedi_is_runflags_running(runflags)) {
- if (comedi_is_runflags_in_error(runflags))
- retval = -EPIPE;
- else
- retval = 0;
- become_nonbusy = true;
- break;
- }
- if (file->f_flags & O_NONBLOCK) {
- retval = -EAGAIN;
- break;
- }
- schedule();
- if (signal_pending(current)) {
- retval = -ERESTARTSYS;
- break;
- }
- if (!s->busy) {
- retval = 0;
- break;
- }
- if (s->busy != file) {
- retval = -EACCES;
- break;
- }
- if (async->cmd.flags & CMDF_WRITE) {
- retval = -EINVAL;
- break;
- }
- continue;
- }
- m = copy_to_user(buf, async->prealloc_buf +
- async->buf_read_ptr, n);
- if (m) {
- n -= m;
- retval = -EFAULT;
- }
-
- comedi_buf_read_alloc(s, n);
- comedi_buf_read_free(s, n);
-
- count += n;
- nbytes -= n;
-
- buf += n;
- break; /* makes device work like a pipe */
- }
- remove_wait_queue(&async->wait_head, &wait);
- set_current_state(TASK_RUNNING);
- if (become_nonbusy || comedi_is_subdevice_idle(s)) {
- struct comedi_subdevice *new_s;
-
- /*
- * To avoid deadlock, cannot acquire dev->mutex
- * while dev->attach_lock is held.
- */
- up_read(&dev->attach_lock);
- attach_locked = false;
- mutex_lock(&dev->mutex);
- /*
- * Check device hasn't become detached behind our back.
- * Checking dev->detach_count is unchanged ought to be
- * sufficient (unless there have been 2**32 detaches in the
- * meantime!), but check the subdevice pointer as well just in
- * case.
- */
- new_s = comedi_file_read_subdevice(file);
- if (dev->attached && old_detach_count == dev->detach_count &&
- s == new_s && new_s->async == async) {
- if (become_nonbusy || comedi_buf_n_bytes_ready(s) == 0)
- do_become_nonbusy(dev, s);
- }
- mutex_unlock(&dev->mutex);
- }
-out:
- if (attach_locked)
- up_read(&dev->attach_lock);
-
- return count ? count : retval;
-}
-
-static int comedi_open(struct inode *inode, struct file *file)
-{
- const unsigned minor = iminor(inode);
- struct comedi_file *cfp;
- struct comedi_device *dev = comedi_dev_get_from_minor(minor);
- int rc;
-
- if (!dev) {
- pr_debug("invalid minor number\n");
- return -ENODEV;
- }
-
- cfp = kzalloc(sizeof(*cfp), GFP_KERNEL);
- if (!cfp)
- return -ENOMEM;
-
- cfp->dev = dev;
-
- mutex_lock(&dev->mutex);
- if (!dev->attached && !capable(CAP_SYS_ADMIN)) {
- dev_dbg(dev->class_dev, "not attached and not CAP_SYS_ADMIN\n");
- rc = -ENODEV;
- goto out;
- }
- if (dev->attached && dev->use_count == 0) {
- if (!try_module_get(dev->driver->module)) {
- rc = -ENXIO;
- goto out;
- }
- if (dev->open) {
- rc = dev->open(dev);
- if (rc < 0) {
- module_put(dev->driver->module);
- goto out;
- }
- }
- }
-
- dev->use_count++;
- file->private_data = cfp;
- comedi_file_reset(file);
- rc = 0;
-
-out:
- mutex_unlock(&dev->mutex);
- if (rc) {
- comedi_dev_put(dev);
- kfree(cfp);
- }
- return rc;
-}
-
-static int comedi_fasync(int fd, struct file *file, int on)
-{
- struct comedi_file *cfp = file->private_data;
- struct comedi_device *dev = cfp->dev;
-
- return fasync_helper(fd, file, on, &dev->async_queue);
-}
-
-static int comedi_close(struct inode *inode, struct file *file)
-{
- struct comedi_file *cfp = file->private_data;
- struct comedi_device *dev = cfp->dev;
- struct comedi_subdevice *s = NULL;
- int i;
-
- mutex_lock(&dev->mutex);
-
- if (dev->subdevices) {
- for (i = 0; i < dev->n_subdevices; i++) {
- s = &dev->subdevices[i];
-
- if (s->busy == file)
- do_cancel(dev, s);
- if (s->lock == file)
- s->lock = NULL;
- }
- }
- if (dev->attached && dev->use_count == 1) {
- if (dev->close)
- dev->close(dev);
- module_put(dev->driver->module);
- }
-
- dev->use_count--;
-
- mutex_unlock(&dev->mutex);
- comedi_dev_put(dev);
- kfree(cfp);
-
- return 0;
-}
-
-static const struct file_operations comedi_fops = {
- .owner = THIS_MODULE,
- .unlocked_ioctl = comedi_unlocked_ioctl,
- .compat_ioctl = comedi_compat_ioctl,
- .open = comedi_open,
- .release = comedi_close,
- .read = comedi_read,
- .write = comedi_write,
- .mmap = comedi_mmap,
- .poll = comedi_poll,
- .fasync = comedi_fasync,
- .llseek = noop_llseek,
-};
-
-/**
- * comedi_event() - Handle events for asynchronous COMEDI command
- * @dev: COMEDI device.
- * @s: COMEDI subdevice.
- * Context: in_interrupt() (usually), @s->spin_lock spin-lock not held.
- *
- * If an asynchronous COMEDI command is active on the subdevice, process
- * any %COMEDI_CB_... event flags that have been set, usually by an
- * interrupt handler. These may change the run state of the asynchronous
- * command, wake a task, and/or send a %SIGIO signal.
- */
-void comedi_event(struct comedi_device *dev, struct comedi_subdevice *s)
-{
- struct comedi_async *async = s->async;
- unsigned int events;
- int si_code = 0;
- unsigned long flags;
-
- spin_lock_irqsave(&s->spin_lock, flags);
-
- events = async->events;
- async->events = 0;
- if (!__comedi_is_subdevice_running(s)) {
- spin_unlock_irqrestore(&s->spin_lock, flags);
- return;
- }
-
- if (events & COMEDI_CB_CANCEL_MASK)
- __comedi_clear_subdevice_runflags(s, COMEDI_SRF_RUNNING);
-
- /*
- * Remember if an error event has occurred, so an error can be
- * returned the next time the user does a read() or write().
- */
- if (events & COMEDI_CB_ERROR_MASK)
- __comedi_set_subdevice_runflags(s, COMEDI_SRF_ERROR);
-
- if (async->cb_mask & events) {
- wake_up_interruptible(&async->wait_head);
- si_code = async->cmd.flags & CMDF_WRITE ? POLL_OUT : POLL_IN;
- }
-
- spin_unlock_irqrestore(&s->spin_lock, flags);
-
- if (si_code)
- kill_fasync(&dev->async_queue, SIGIO, si_code);
-}
-EXPORT_SYMBOL_GPL(comedi_event);
-
-/* Note: the ->mutex is pre-locked on successful return */
-struct comedi_device *comedi_alloc_board_minor(struct device *hardware_device)
-{
- struct comedi_device *dev;
- struct device *csdev;
- unsigned i;
-
- dev = kzalloc(sizeof(*dev), GFP_KERNEL);
- if (!dev)
- return ERR_PTR(-ENOMEM);
- comedi_device_init(dev);
- comedi_set_hw_dev(dev, hardware_device);
- mutex_lock(&dev->mutex);
- mutex_lock(&comedi_board_minor_table_lock);
- for (i = hardware_device ? comedi_num_legacy_minors : 0;
- i < COMEDI_NUM_BOARD_MINORS; ++i) {
- if (!comedi_board_minor_table[i]) {
- comedi_board_minor_table[i] = dev;
- break;
- }
- }
- mutex_unlock(&comedi_board_minor_table_lock);
- if (i == COMEDI_NUM_BOARD_MINORS) {
- mutex_unlock(&dev->mutex);
- comedi_device_cleanup(dev);
- comedi_dev_put(dev);
- dev_err(hardware_device,
- "ran out of minor numbers for board device files\n");
- return ERR_PTR(-EBUSY);
- }
- dev->minor = i;
- csdev = device_create(comedi_class, hardware_device,
- MKDEV(COMEDI_MAJOR, i), NULL, "comedi%i", i);
- if (!IS_ERR(csdev))
- dev->class_dev = get_device(csdev);
-
- /* Note: dev->mutex needs to be unlocked by the caller. */
- return dev;
-}
-
-void comedi_release_hardware_device(struct device *hardware_device)
-{
- int minor;
- struct comedi_device *dev;
-
- for (minor = comedi_num_legacy_minors; minor < COMEDI_NUM_BOARD_MINORS;
- minor++) {
- mutex_lock(&comedi_board_minor_table_lock);
- dev = comedi_board_minor_table[minor];
- if (dev && dev->hw_dev == hardware_device) {
- comedi_board_minor_table[minor] = NULL;
- mutex_unlock(&comedi_board_minor_table_lock);
- comedi_free_board_dev(dev);
- break;
- }
- mutex_unlock(&comedi_board_minor_table_lock);
- }
-}
-
-int comedi_alloc_subdevice_minor(struct comedi_subdevice *s)
-{
- struct comedi_device *dev = s->device;
- struct device *csdev;
- unsigned i;
-
- mutex_lock(&comedi_subdevice_minor_table_lock);
- for (i = 0; i < COMEDI_NUM_SUBDEVICE_MINORS; ++i) {
- if (!comedi_subdevice_minor_table[i]) {
- comedi_subdevice_minor_table[i] = s;
- break;
- }
- }
- mutex_unlock(&comedi_subdevice_minor_table_lock);
- if (i == COMEDI_NUM_SUBDEVICE_MINORS) {
- dev_err(dev->class_dev,
- "ran out of minor numbers for subdevice files\n");
- return -EBUSY;
- }
- i += COMEDI_NUM_BOARD_MINORS;
- s->minor = i;
- csdev = device_create(comedi_class, dev->class_dev,
- MKDEV(COMEDI_MAJOR, i), NULL, "comedi%i_subd%i",
- dev->minor, s->index);
- if (!IS_ERR(csdev))
- s->class_dev = csdev;
-
- return 0;
-}
-
-void comedi_free_subdevice_minor(struct comedi_subdevice *s)
-{
- unsigned int i;
-
- if (!s)
- return;
- if (s->minor < COMEDI_NUM_BOARD_MINORS ||
- s->minor >= COMEDI_NUM_MINORS)
- return;
-
- i = s->minor - COMEDI_NUM_BOARD_MINORS;
- mutex_lock(&comedi_subdevice_minor_table_lock);
- if (s == comedi_subdevice_minor_table[i])
- comedi_subdevice_minor_table[i] = NULL;
- mutex_unlock(&comedi_subdevice_minor_table_lock);
- if (s->class_dev) {
- device_destroy(comedi_class, MKDEV(COMEDI_MAJOR, s->minor));
- s->class_dev = NULL;
- }
-}
-
-static void comedi_cleanup_board_minors(void)
-{
- struct comedi_device *dev;
- unsigned i;
-
- for (i = 0; i < COMEDI_NUM_BOARD_MINORS; i++) {
- dev = comedi_clear_board_minor(i);
- comedi_free_board_dev(dev);
- }
-}
-
-static int __init comedi_init(void)
-{
- int i;
- int retval;
-
- pr_info("version " COMEDI_RELEASE " - http://www.comedi.org\n");
-
- if (comedi_num_legacy_minors < 0 ||
- comedi_num_legacy_minors > COMEDI_NUM_BOARD_MINORS) {
- pr_err("invalid value for module parameter \"comedi_num_legacy_minors\". Valid values are 0 through %i.\n",
- COMEDI_NUM_BOARD_MINORS);
- return -EINVAL;
- }
-
- retval = register_chrdev_region(MKDEV(COMEDI_MAJOR, 0),
- COMEDI_NUM_MINORS, "comedi");
- if (retval)
- return -EIO;
- cdev_init(&comedi_cdev, &comedi_fops);
- comedi_cdev.owner = THIS_MODULE;
-
- retval = kobject_set_name(&comedi_cdev.kobj, "comedi");
- if (retval) {
- unregister_chrdev_region(MKDEV(COMEDI_MAJOR, 0),
- COMEDI_NUM_MINORS);
- return retval;
- }
-
- if (cdev_add(&comedi_cdev, MKDEV(COMEDI_MAJOR, 0), COMEDI_NUM_MINORS)) {
- unregister_chrdev_region(MKDEV(COMEDI_MAJOR, 0),
- COMEDI_NUM_MINORS);
- return -EIO;
- }
- comedi_class = class_create(THIS_MODULE, "comedi");
- if (IS_ERR(comedi_class)) {
- pr_err("failed to create class\n");
- cdev_del(&comedi_cdev);
- unregister_chrdev_region(MKDEV(COMEDI_MAJOR, 0),
- COMEDI_NUM_MINORS);
- return PTR_ERR(comedi_class);
- }
-
- comedi_class->dev_groups = comedi_dev_groups;
-
- /* XXX requires /proc interface */
- comedi_proc_init();
-
- /* create devices files for legacy/manual use */
- for (i = 0; i < comedi_num_legacy_minors; i++) {
- struct comedi_device *dev;
-
- dev = comedi_alloc_board_minor(NULL);
- if (IS_ERR(dev)) {
- comedi_cleanup_board_minors();
- cdev_del(&comedi_cdev);
- unregister_chrdev_region(MKDEV(COMEDI_MAJOR, 0),
- COMEDI_NUM_MINORS);
- return PTR_ERR(dev);
- }
- /* comedi_alloc_board_minor() locked the mutex */
- mutex_unlock(&dev->mutex);
- }
-
- return 0;
-}
-module_init(comedi_init);
-
-static void __exit comedi_cleanup(void)
-{
- comedi_cleanup_board_minors();
- class_destroy(comedi_class);
- cdev_del(&comedi_cdev);
- unregister_chrdev_region(MKDEV(COMEDI_MAJOR, 0), COMEDI_NUM_MINORS);
-
- comedi_proc_cleanup();
-}
-module_exit(comedi_cleanup);
-
-MODULE_AUTHOR("http://www.comedi.org");
-MODULE_DESCRIPTION("Comedi core module");
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/comedi_internal.h b/drivers/staging/comedi/comedi_internal.h
deleted file mode 100644
index cd9437f72c35..000000000000
--- a/drivers/staging/comedi/comedi_internal.h
+++ /dev/null
@@ -1,68 +0,0 @@
-#ifndef _COMEDI_INTERNAL_H
-#define _COMEDI_INTERNAL_H
-
-#include <linux/compiler.h>
-#include <linux/types.h>
-
-/*
- * various internal comedi stuff
- */
-
-struct comedi_buf_map;
-struct comedi_devconfig;
-struct comedi_device;
-struct comedi_insn;
-struct comedi_rangeinfo;
-struct comedi_subdevice;
-struct device;
-
-int do_rangeinfo_ioctl(struct comedi_device *dev,
- struct comedi_rangeinfo __user *arg);
-struct comedi_device *comedi_alloc_board_minor(struct device *hardware_device);
-void comedi_release_hardware_device(struct device *hardware_device);
-int comedi_alloc_subdevice_minor(struct comedi_subdevice *s);
-void comedi_free_subdevice_minor(struct comedi_subdevice *s);
-
-int comedi_buf_alloc(struct comedi_device *dev, struct comedi_subdevice *s,
- unsigned long new_size);
-void comedi_buf_reset(struct comedi_subdevice *s);
-bool comedi_buf_is_mmapped(struct comedi_subdevice *s);
-void comedi_buf_map_get(struct comedi_buf_map *bm);
-int comedi_buf_map_put(struct comedi_buf_map *bm);
-struct comedi_buf_map *comedi_buf_map_from_subdev_get(
- struct comedi_subdevice *s);
-unsigned int comedi_buf_write_n_allocated(struct comedi_subdevice *s);
-void comedi_device_cancel_all(struct comedi_device *dev);
-bool comedi_can_auto_free_spriv(struct comedi_subdevice *s);
-
-extern unsigned int comedi_default_buf_size_kb;
-extern unsigned int comedi_default_buf_maxsize_kb;
-
-/* drivers.c */
-
-extern struct comedi_driver *comedi_drivers;
-extern struct mutex comedi_drivers_list_lock;
-
-int insn_inval(struct comedi_device *, struct comedi_subdevice *,
- struct comedi_insn *, unsigned int *);
-
-void comedi_device_detach(struct comedi_device *);
-int comedi_device_attach(struct comedi_device *, struct comedi_devconfig *);
-
-#ifdef CONFIG_PROC_FS
-
-/* proc.c */
-
-void comedi_proc_init(void);
-void comedi_proc_cleanup(void);
-#else
-static inline void comedi_proc_init(void)
-{
-}
-
-static inline void comedi_proc_cleanup(void)
-{
-}
-#endif
-
-#endif /* _COMEDI_INTERNAL_H */
diff --git a/drivers/staging/comedi/comedi_pci.c b/drivers/staging/comedi/comedi_pci.c
deleted file mode 100644
index 0601049d8923..000000000000
--- a/drivers/staging/comedi/comedi_pci.c
+++ /dev/null
@@ -1,237 +0,0 @@
-/*
- * comedi_pci.c
- * Comedi PCI driver specific functions.
- *
- * COMEDI - Linux Control and Measurement Device Interface
- * Copyright (C) 1997-2000 David A. Schleef <ds@schleef.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#include <linux/module.h>
-#include <linux/interrupt.h>
-
-#include "comedi_pci.h"
-
-/**
- * comedi_to_pci_dev() - Return PCI device attached to COMEDI device
- * @dev: COMEDI device.
- *
- * Assuming @dev->hw_dev is non-%NULL, it is assumed to be pointing to a
- * a &struct device embedded in a &struct pci_dev.
- *
- * Returns: Attached PCI device if @dev->hw_dev is non-%NULL.
- * Returns %NULL if @dev->hw_dev is %NULL.
- */
-struct pci_dev *comedi_to_pci_dev(struct comedi_device *dev)
-{
- return dev->hw_dev ? to_pci_dev(dev->hw_dev) : NULL;
-}
-EXPORT_SYMBOL_GPL(comedi_to_pci_dev);
-
-/**
- * comedi_pci_enable() - Enable the PCI device and request the regions
- * @dev: COMEDI device.
- *
- * Assuming @dev->hw_dev is non-%NULL, it is assumed to be pointing to a
- * a &struct device embedded in a &struct pci_dev. Enable the PCI device
- * and request its regions. Set @dev->ioenabled to %true if successful,
- * otherwise undo what was done.
- *
- * Calls to comedi_pci_enable() and comedi_pci_disable() cannot be nested.
- *
- * Returns:
- * 0 on success,
- * -%ENODEV if @dev->hw_dev is %NULL,
- * -%EBUSY if regions busy,
- * or some negative error number if failed to enable PCI device.
- *
- */
-int comedi_pci_enable(struct comedi_device *dev)
-{
- struct pci_dev *pcidev = comedi_to_pci_dev(dev);
- int rc;
-
- if (!pcidev)
- return -ENODEV;
-
- rc = pci_enable_device(pcidev);
- if (rc < 0)
- return rc;
-
- rc = pci_request_regions(pcidev, dev->board_name);
- if (rc < 0)
- pci_disable_device(pcidev);
- else
- dev->ioenabled = true;
-
- return rc;
-}
-EXPORT_SYMBOL_GPL(comedi_pci_enable);
-
-/**
- * comedi_pci_disable() - Release the regions and disable the PCI device
- * @dev: COMEDI device.
- *
- * Assuming @dev->hw_dev is non-%NULL, it is assumed to be pointing to a
- * a &struct device embedded in a &struct pci_dev. If the earlier call
- * to comedi_pci_enable() was successful, release the PCI device's regions
- * and disable it. Reset @dev->ioenabled back to %false.
- */
-void comedi_pci_disable(struct comedi_device *dev)
-{
- struct pci_dev *pcidev = comedi_to_pci_dev(dev);
-
- if (pcidev && dev->ioenabled) {
- pci_release_regions(pcidev);
- pci_disable_device(pcidev);
- }
- dev->ioenabled = false;
-}
-EXPORT_SYMBOL_GPL(comedi_pci_disable);
-
-/**
- * comedi_pci_detach() - A generic "detach" handler for PCI COMEDI drivers
- * @dev: COMEDI device.
- *
- * COMEDI drivers for PCI devices that need no special clean-up of private data
- * and have no ioremapped regions other than that pointed to by @dev->mmio may
- * use this function as its "detach" handler called by the COMEDI core when a
- * COMEDI device is being detached from the low-level driver. It may be also
- * called from a more specific "detach" handler that does additional clean-up.
- *
- * Free the IRQ if @dev->irq is non-zero, iounmap @dev->mmio if it is
- * non-%NULL, and call comedi_pci_disable() to release the PCI device's regions
- * and disable it.
- */
-void comedi_pci_detach(struct comedi_device *dev)
-{
- struct pci_dev *pcidev = comedi_to_pci_dev(dev);
-
- if (!pcidev || !dev->ioenabled)
- return;
-
- if (dev->irq) {
- free_irq(dev->irq, dev);
- dev->irq = 0;
- }
- if (dev->mmio) {
- iounmap(dev->mmio);
- dev->mmio = NULL;
- }
- comedi_pci_disable(dev);
-}
-EXPORT_SYMBOL_GPL(comedi_pci_detach);
-
-/**
- * comedi_pci_auto_config() - Configure/probe a PCI COMEDI device
- * @pcidev: PCI device.
- * @driver: Registered COMEDI driver.
- * @context: Driver specific data, passed to comedi_auto_config().
- *
- * Typically called from the pci_driver (*probe) function. Auto-configure
- * a COMEDI device, using the &struct device embedded in *@pcidev as the
- * hardware device. The @context value gets passed through to @driver's
- * "auto_attach" handler. The "auto_attach" handler may call
- * comedi_to_pci_dev() on the passed in COMEDI device to recover @pcidev.
- *
- * Returns: The result of calling comedi_auto_config() (0 on success, or
- * a negative error number on failure).
- */
-int comedi_pci_auto_config(struct pci_dev *pcidev,
- struct comedi_driver *driver,
- unsigned long context)
-{
- return comedi_auto_config(&pcidev->dev, driver, context);
-}
-EXPORT_SYMBOL_GPL(comedi_pci_auto_config);
-
-/**
- * comedi_pci_auto_unconfig() - Unconfigure/remove a PCI COMEDI device
- * @pcidev: PCI device.
- *
- * Typically called from the pci_driver (*remove) function. Auto-unconfigure
- * a COMEDI device attached to this PCI device, using a pointer to the
- * &struct device embedded in *@pcidev as the hardware device. The COMEDI
- * driver's "detach" handler will be called during unconfiguration of the
- * COMEDI device.
- *
- * Note that the COMEDI device may have already been unconfigured using the
- * %COMEDI_DEVCONFIG ioctl, in which case this attempt to unconfigure it
- * again should be ignored.
- */
-void comedi_pci_auto_unconfig(struct pci_dev *pcidev)
-{
- comedi_auto_unconfig(&pcidev->dev);
-}
-EXPORT_SYMBOL_GPL(comedi_pci_auto_unconfig);
-
-/**
- * comedi_pci_driver_register() - Register a PCI COMEDI driver
- * @comedi_driver: COMEDI driver to be registered.
- * @pci_driver: PCI driver to be registered.
- *
- * This function is called from the module_init() of PCI COMEDI driver modules
- * to register the COMEDI driver and the PCI driver. Do not call it directly,
- * use the module_comedi_pci_driver() helper macro instead.
- *
- * Returns: 0 on success, or a negative error number on failure.
- */
-int comedi_pci_driver_register(struct comedi_driver *comedi_driver,
- struct pci_driver *pci_driver)
-{
- int ret;
-
- ret = comedi_driver_register(comedi_driver);
- if (ret < 0)
- return ret;
-
- ret = pci_register_driver(pci_driver);
- if (ret < 0) {
- comedi_driver_unregister(comedi_driver);
- return ret;
- }
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(comedi_pci_driver_register);
-
-/**
- * comedi_pci_driver_unregister() - Unregister a PCI COMEDI driver
- * @comedi_driver: COMEDI driver to be unregistered.
- * @pci_driver: PCI driver to be unregistered.
- *
- * This function is called from the module_exit() of PCI COMEDI driver modules
- * to unregister the PCI driver and the COMEDI driver. Do not call it
- * directly, use the module_comedi_pci_driver() helper macro instead.
- */
-void comedi_pci_driver_unregister(struct comedi_driver *comedi_driver,
- struct pci_driver *pci_driver)
-{
- pci_unregister_driver(pci_driver);
- comedi_driver_unregister(comedi_driver);
-}
-EXPORT_SYMBOL_GPL(comedi_pci_driver_unregister);
-
-static int __init comedi_pci_init(void)
-{
- return 0;
-}
-module_init(comedi_pci_init);
-
-static void __exit comedi_pci_exit(void)
-{
-}
-module_exit(comedi_pci_exit);
-
-MODULE_AUTHOR("http://www.comedi.org");
-MODULE_DESCRIPTION("Comedi PCI interface module");
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/comedi_pci.h b/drivers/staging/comedi/comedi_pci.h
deleted file mode 100644
index 4005cc9cf7f1..000000000000
--- a/drivers/staging/comedi/comedi_pci.h
+++ /dev/null
@@ -1,64 +0,0 @@
-/*
- * comedi_pci.h
- * header file for Comedi PCI drivers
- *
- * COMEDI - Linux Control and Measurement Device Interface
- * Copyright (C) 1997-2000 David A. Schleef <ds@schleef.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#ifndef _COMEDI_PCI_H
-#define _COMEDI_PCI_H
-
-#include <linux/pci.h>
-
-#include "comedidev.h"
-
-/*
- * PCI Vendor IDs not in <linux/pci_ids.h>
- */
-#define PCI_VENDOR_ID_KOLTER 0x1001
-#define PCI_VENDOR_ID_ICP 0x104c
-#define PCI_VENDOR_ID_DT 0x1116
-#define PCI_VENDOR_ID_IOTECH 0x1616
-#define PCI_VENDOR_ID_CONTEC 0x1221
-#define PCI_VENDOR_ID_RTD 0x1435
-#define PCI_VENDOR_ID_HUMUSOFT 0x186c
-
-struct pci_dev *comedi_to_pci_dev(struct comedi_device *);
-
-int comedi_pci_enable(struct comedi_device *);
-void comedi_pci_disable(struct comedi_device *);
-void comedi_pci_detach(struct comedi_device *);
-
-int comedi_pci_auto_config(struct pci_dev *, struct comedi_driver *,
- unsigned long context);
-void comedi_pci_auto_unconfig(struct pci_dev *);
-
-int comedi_pci_driver_register(struct comedi_driver *, struct pci_driver *);
-void comedi_pci_driver_unregister(struct comedi_driver *, struct pci_driver *);
-
-/**
- * module_comedi_pci_driver() - Helper macro for registering a comedi PCI driver
- * @__comedi_driver: comedi_driver struct
- * @__pci_driver: pci_driver struct
- *
- * Helper macro for comedi PCI drivers which do not do anything special
- * in module init/exit. This eliminates a lot of boilerplate. Each
- * module may only use this macro once, and calling it replaces
- * module_init() and module_exit()
- */
-#define module_comedi_pci_driver(__comedi_driver, __pci_driver) \
- module_driver(__comedi_driver, comedi_pci_driver_register, \
- comedi_pci_driver_unregister, &(__pci_driver))
-
-#endif /* _COMEDI_PCI_H */
diff --git a/drivers/staging/comedi/comedi_pcmcia.c b/drivers/staging/comedi/comedi_pcmcia.c
deleted file mode 100644
index 7e784399a16f..000000000000
--- a/drivers/staging/comedi/comedi_pcmcia.c
+++ /dev/null
@@ -1,169 +0,0 @@
-/*
- * comedi_pcmcia.c
- * Comedi PCMCIA driver specific functions.
- *
- * COMEDI - Linux Control and Measurement Device Interface
- * Copyright (C) 1997-2000 David A. Schleef <ds@schleef.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-
-#include "comedi_pcmcia.h"
-
-/**
- * comedi_to_pcmcia_dev() - comedi_device pointer to pcmcia_device pointer.
- * @dev: comedi_device struct
- */
-struct pcmcia_device *comedi_to_pcmcia_dev(struct comedi_device *dev)
-{
- return dev->hw_dev ? to_pcmcia_dev(dev->hw_dev) : NULL;
-}
-EXPORT_SYMBOL_GPL(comedi_to_pcmcia_dev);
-
-static int comedi_pcmcia_conf_check(struct pcmcia_device *link,
- void *priv_data)
-{
- if (link->config_index == 0)
- return -EINVAL;
-
- return pcmcia_request_io(link);
-}
-
-/**
- * comedi_pcmcia_enable() - Request the regions and enable the PCMCIA device.
- * @dev: comedi_device struct
- * @conf_check: optional callback to check the pcmcia_device configuration
- *
- * The comedi PCMCIA driver needs to set the link->config_flags, as
- * appropriate for that driver, before calling this function in order
- * to allow pcmcia_loop_config() to do its internal autoconfiguration.
- */
-int comedi_pcmcia_enable(struct comedi_device *dev,
- int (*conf_check)(struct pcmcia_device *, void *))
-{
- struct pcmcia_device *link = comedi_to_pcmcia_dev(dev);
- int ret;
-
- if (!link)
- return -ENODEV;
-
- if (!conf_check)
- conf_check = comedi_pcmcia_conf_check;
-
- ret = pcmcia_loop_config(link, conf_check, NULL);
- if (ret)
- return ret;
-
- return pcmcia_enable_device(link);
-}
-EXPORT_SYMBOL_GPL(comedi_pcmcia_enable);
-
-/**
- * comedi_pcmcia_disable() - Disable the PCMCIA device and release the regions.
- * @dev: comedi_device struct
- */
-void comedi_pcmcia_disable(struct comedi_device *dev)
-{
- struct pcmcia_device *link = comedi_to_pcmcia_dev(dev);
-
- if (link)
- pcmcia_disable_device(link);
-}
-EXPORT_SYMBOL_GPL(comedi_pcmcia_disable);
-
-/**
- * comedi_pcmcia_auto_config() - Configure/probe a comedi PCMCIA driver.
- * @link: pcmcia_device struct
- * @driver: comedi_driver struct
- *
- * Typically called from the pcmcia_driver (*probe) function.
- */
-int comedi_pcmcia_auto_config(struct pcmcia_device *link,
- struct comedi_driver *driver)
-{
- return comedi_auto_config(&link->dev, driver, 0);
-}
-EXPORT_SYMBOL_GPL(comedi_pcmcia_auto_config);
-
-/**
- * comedi_pcmcia_auto_unconfig() - Unconfigure/remove a comedi PCMCIA driver.
- * @link: pcmcia_device struct
- *
- * Typically called from the pcmcia_driver (*remove) function.
- */
-void comedi_pcmcia_auto_unconfig(struct pcmcia_device *link)
-{
- comedi_auto_unconfig(&link->dev);
-}
-EXPORT_SYMBOL_GPL(comedi_pcmcia_auto_unconfig);
-
-/**
- * comedi_pcmcia_driver_register() - Register a comedi PCMCIA driver.
- * @comedi_driver: comedi_driver struct
- * @pcmcia_driver: pcmcia_driver struct
- *
- * This function is used for the module_init() of comedi USB drivers.
- * Do not call it directly, use the module_comedi_pcmcia_driver() helper
- * macro instead.
- */
-int comedi_pcmcia_driver_register(struct comedi_driver *comedi_driver,
- struct pcmcia_driver *pcmcia_driver)
-{
- int ret;
-
- ret = comedi_driver_register(comedi_driver);
- if (ret < 0)
- return ret;
-
- ret = pcmcia_register_driver(pcmcia_driver);
- if (ret < 0) {
- comedi_driver_unregister(comedi_driver);
- return ret;
- }
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(comedi_pcmcia_driver_register);
-
-/**
- * comedi_pcmcia_driver_unregister() - Unregister a comedi PCMCIA driver.
- * @comedi_driver: comedi_driver struct
- * @pcmcia_driver: pcmcia_driver struct
- *
- * This function is used for the module_exit() of comedi PCMCIA drivers.
- * Do not call it directly, use the module_comedi_pcmcia_driver() helper
- * macro instead.
- */
-void comedi_pcmcia_driver_unregister(struct comedi_driver *comedi_driver,
- struct pcmcia_driver *pcmcia_driver)
-{
- pcmcia_unregister_driver(pcmcia_driver);
- comedi_driver_unregister(comedi_driver);
-}
-EXPORT_SYMBOL_GPL(comedi_pcmcia_driver_unregister);
-
-static int __init comedi_pcmcia_init(void)
-{
- return 0;
-}
-module_init(comedi_pcmcia_init);
-
-static void __exit comedi_pcmcia_exit(void)
-{
-}
-module_exit(comedi_pcmcia_exit);
-
-MODULE_AUTHOR("http://www.comedi.org");
-MODULE_DESCRIPTION("Comedi PCMCIA interface module");
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/comedi_pcmcia.h b/drivers/staging/comedi/comedi_pcmcia.h
deleted file mode 100644
index 5d3db2b9b4a1..000000000000
--- a/drivers/staging/comedi/comedi_pcmcia.h
+++ /dev/null
@@ -1,55 +0,0 @@
-/*
- * comedi_pcmcia.h
- * header file for Comedi PCMCIA drivers
- *
- * COMEDI - Linux Control and Measurement Device Interface
- * Copyright (C) 1997-2000 David A. Schleef <ds@schleef.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#ifndef _COMEDI_PCMCIA_H
-#define _COMEDI_PCMCIA_H
-
-#include <pcmcia/cistpl.h>
-#include <pcmcia/ds.h>
-
-#include "comedidev.h"
-
-struct pcmcia_device *comedi_to_pcmcia_dev(struct comedi_device *);
-
-int comedi_pcmcia_enable(struct comedi_device *,
- int (*conf_check)(struct pcmcia_device *, void *));
-void comedi_pcmcia_disable(struct comedi_device *);
-
-int comedi_pcmcia_auto_config(struct pcmcia_device *, struct comedi_driver *);
-void comedi_pcmcia_auto_unconfig(struct pcmcia_device *);
-
-int comedi_pcmcia_driver_register(struct comedi_driver *,
- struct pcmcia_driver *);
-void comedi_pcmcia_driver_unregister(struct comedi_driver *,
- struct pcmcia_driver *);
-
-/**
- * module_comedi_pcmcia_driver() - Helper macro for registering a comedi PCMCIA driver
- * @__comedi_driver: comedi_driver struct
- * @__pcmcia_driver: pcmcia_driver struct
- *
- * Helper macro for comedi PCMCIA drivers which do not do anything special
- * in module init/exit. This eliminates a lot of boilerplate. Each
- * module may only use this macro once, and calling it replaces
- * module_init() and module_exit()
- */
-#define module_comedi_pcmcia_driver(__comedi_driver, __pcmcia_driver) \
- module_driver(__comedi_driver, comedi_pcmcia_driver_register, \
- comedi_pcmcia_driver_unregister, &(__pcmcia_driver))
-
-#endif /* _COMEDI_PCMCIA_H */
diff --git a/drivers/staging/comedi/comedi_usb.c b/drivers/staging/comedi/comedi_usb.c
deleted file mode 100644
index 68b75e8feec0..000000000000
--- a/drivers/staging/comedi/comedi_usb.c
+++ /dev/null
@@ -1,131 +0,0 @@
-/*
- * comedi_usb.c
- * Comedi USB driver specific functions.
- *
- * COMEDI - Linux Control and Measurement Device Interface
- * Copyright (C) 1997-2000 David A. Schleef <ds@schleef.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#include <linux/module.h>
-
-#include "comedi_usb.h"
-
-/**
- * comedi_to_usb_interface() - comedi_device pointer to usb_interface pointer.
- * @dev: comedi_device struct
- */
-struct usb_interface *comedi_to_usb_interface(struct comedi_device *dev)
-{
- return dev->hw_dev ? to_usb_interface(dev->hw_dev) : NULL;
-}
-EXPORT_SYMBOL_GPL(comedi_to_usb_interface);
-
-/**
- * comedi_to_usb_dev() - comedi_device pointer to usb_device pointer.
- * @dev: comedi_device struct
- */
-struct usb_device *comedi_to_usb_dev(struct comedi_device *dev)
-{
- struct usb_interface *intf = comedi_to_usb_interface(dev);
-
- return intf ? interface_to_usbdev(intf) : NULL;
-}
-EXPORT_SYMBOL_GPL(comedi_to_usb_dev);
-
-/**
- * comedi_usb_auto_config() - Configure/probe a comedi USB driver.
- * @intf: usb_interface struct
- * @driver: comedi_driver struct
- * @context: driver specific data, passed to comedi_auto_config()
- *
- * Typically called from the usb_driver (*probe) function.
- */
-int comedi_usb_auto_config(struct usb_interface *intf,
- struct comedi_driver *driver,
- unsigned long context)
-{
- return comedi_auto_config(&intf->dev, driver, context);
-}
-EXPORT_SYMBOL_GPL(comedi_usb_auto_config);
-
-/**
- * comedi_pci_auto_unconfig() - Unconfigure/disconnect a comedi USB driver.
- * @intf: usb_interface struct
- *
- * Typically called from the usb_driver (*disconnect) function.
- */
-void comedi_usb_auto_unconfig(struct usb_interface *intf)
-{
- comedi_auto_unconfig(&intf->dev);
-}
-EXPORT_SYMBOL_GPL(comedi_usb_auto_unconfig);
-
-/**
- * comedi_usb_driver_register() - Register a comedi USB driver.
- * @comedi_driver: comedi_driver struct
- * @usb_driver: usb_driver struct
- *
- * This function is used for the module_init() of comedi USB drivers.
- * Do not call it directly, use the module_comedi_usb_driver() helper
- * macro instead.
- */
-int comedi_usb_driver_register(struct comedi_driver *comedi_driver,
- struct usb_driver *usb_driver)
-{
- int ret;
-
- ret = comedi_driver_register(comedi_driver);
- if (ret < 0)
- return ret;
-
- ret = usb_register(usb_driver);
- if (ret < 0) {
- comedi_driver_unregister(comedi_driver);
- return ret;
- }
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(comedi_usb_driver_register);
-
-/**
- * comedi_usb_driver_unregister() - Unregister a comedi USB driver.
- * @comedi_driver: comedi_driver struct
- * @usb_driver: usb_driver struct
- *
- * This function is used for the module_exit() of comedi USB drivers.
- * Do not call it directly, use the module_comedi_usb_driver() helper
- * macro instead.
- */
-void comedi_usb_driver_unregister(struct comedi_driver *comedi_driver,
- struct usb_driver *usb_driver)
-{
- usb_deregister(usb_driver);
- comedi_driver_unregister(comedi_driver);
-}
-EXPORT_SYMBOL_GPL(comedi_usb_driver_unregister);
-
-static int __init comedi_usb_init(void)
-{
- return 0;
-}
-module_init(comedi_usb_init);
-
-static void __exit comedi_usb_exit(void)
-{
-}
-module_exit(comedi_usb_exit);
-
-MODULE_AUTHOR("http://www.comedi.org");
-MODULE_DESCRIPTION("Comedi USB interface module");
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/comedi_usb.h b/drivers/staging/comedi/comedi_usb.h
deleted file mode 100644
index 721128bece3c..000000000000
--- a/drivers/staging/comedi/comedi_usb.h
+++ /dev/null
@@ -1,50 +0,0 @@
-/*
- * comedi_usb.h
- * header file for USB Comedi drivers
- *
- * COMEDI - Linux Control and Measurement Device Interface
- * Copyright (C) 1997-2000 David A. Schleef <ds@schleef.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#ifndef _COMEDI_USB_H
-#define _COMEDI_USB_H
-
-#include <linux/usb.h>
-
-#include "comedidev.h"
-
-struct usb_interface *comedi_to_usb_interface(struct comedi_device *);
-struct usb_device *comedi_to_usb_dev(struct comedi_device *);
-
-int comedi_usb_auto_config(struct usb_interface *, struct comedi_driver *,
- unsigned long context);
-void comedi_usb_auto_unconfig(struct usb_interface *);
-
-int comedi_usb_driver_register(struct comedi_driver *, struct usb_driver *);
-void comedi_usb_driver_unregister(struct comedi_driver *, struct usb_driver *);
-
-/**
- * module_comedi_usb_driver() - Helper macro for registering a comedi USB driver
- * @__comedi_driver: comedi_driver struct
- * @__usb_driver: usb_driver struct
- *
- * Helper macro for comedi USB drivers which do not do anything special
- * in module init/exit. This eliminates a lot of boilerplate. Each
- * module may only use this macro once, and calling it replaces
- * module_init() and module_exit()
- */
-#define module_comedi_usb_driver(__comedi_driver, __usb_driver) \
- module_driver(__comedi_driver, comedi_usb_driver_register, \
- comedi_usb_driver_unregister, &(__usb_driver))
-
-#endif /* _COMEDI_USB_H */
diff --git a/drivers/staging/comedi/comedidev.h b/drivers/staging/comedi/comedidev.h
deleted file mode 100644
index 6062493a7146..000000000000
--- a/drivers/staging/comedi/comedidev.h
+++ /dev/null
@@ -1,1034 +0,0 @@
-/*
- * comedidev.h
- * header file for kernel-only structures, variables, and constants
- *
- * COMEDI - Linux Control and Measurement Device Interface
- * Copyright (C) 1997-2000 David A. Schleef <ds@schleef.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#ifndef _COMEDIDEV_H
-#define _COMEDIDEV_H
-
-#include <linux/dma-mapping.h>
-#include <linux/mutex.h>
-#include <linux/spinlock_types.h>
-#include <linux/rwsem.h>
-#include <linux/kref.h>
-
-#include "comedi.h"
-
-#define COMEDI_VERSION(a, b, c) (((a) << 16) + ((b) << 8) + (c))
-#define COMEDI_VERSION_CODE COMEDI_VERSION(COMEDI_MAJORVERSION, \
- COMEDI_MINORVERSION, COMEDI_MICROVERSION)
-#define COMEDI_RELEASE VERSION
-
-#define COMEDI_NUM_BOARD_MINORS 0x30
-
-/**
- * struct comedi_subdevice - Working data for a COMEDI subdevice
- * @device: COMEDI device to which this subdevice belongs. (Initialized by
- * comedi_alloc_subdevices().)
- * @index: Index of this subdevice within device's array of subdevices.
- * (Initialized by comedi_alloc_subdevices().)
- * @type: Type of subdevice from &enum comedi_subdevice_type. (Initialized by
- * the low-level driver.)
- * @n_chan: Number of channels the subdevice supports. (Initialized by the
- * low-level driver.)
- * @subdev_flags: Various "SDF" flags indicating aspects of the subdevice to
- * the COMEDI core and user application. (Initialized by the low-level
- * driver.)
- * @len_chanlist: Maximum length of a channel list if the subdevice supports
- * asynchronous acquisition commands. (Optionally initialized by the
- * low-level driver, or changed from 0 to 1 during post-configuration.)
- * @private: Private data pointer which is either set by the low-level driver
- * itself, or by a call to comedi_alloc_spriv() which allocates storage.
- * In the latter case, the storage is automatically freed after the
- * low-level driver's "detach" handler is called for the device.
- * (Initialized by the low-level driver.)
- * @async: Pointer to &struct comedi_async id the subdevice supports
- * asynchronous acquisition commands. (Allocated and initialized during
- * post-configuration if needed.)
- * @lock: Pointer to a file object that performed a %COMEDI_LOCK ioctl on the
- * subdevice. (Initially NULL.)
- * @busy: Pointer to a file object that is performing an asynchronous
- * acquisition command on the subdevice. (Initially NULL.)
- * @runflags: Internal flags for use by COMEDI core, mostly indicating whether
- * an asynchronous acquisition command is running.
- * @spin_lock: Generic spin-lock for use by the COMEDI core and the low-level
- * driver. (Initialized by comedi_alloc_subdevices().)
- * @io_bits: Bit-mask indicating the channel directions for a DIO subdevice
- * with no more than 32 channels. A '1' at a bit position indicates the
- * corresponding channel is configured as an output. (Initialized by the
- * low-level driver for a DIO subdevice. Forced to all-outputs during
- * post-configuration for a digital output subdevice.)
- * @maxdata: If non-zero, this is the maximum raw data value of each channel.
- * If zero, the maximum data value is channel-specific. (Initialized by
- * the low-level driver.)
- * @maxdata_list: If the maximum data value is channel-specific, this points
- * to an array of maximum data values indexed by channel index.
- * (Initialized by the low-level driver.)
- * @range_table: If non-NULL, this points to a COMEDI range table for the
- * subdevice. If NULL, the range table is channel-specific. (Initialized
- * by the low-level driver, will be set to an "invalid" range table during
- * post-configuration if @range_table and @range_table_list are both
- * NULL.)
- * @range_table_list: If the COMEDI range table is channel-specific, this
- * points to an array of pointers to COMEDI range tables indexed by
- * channel number. (Initialized by the low-level driver.)
- * @chanlist: Not used.
- * @insn_read: Optional pointer to a handler for the %INSN_READ instruction.
- * (Initialized by the low-level driver, or set to a default handler
- * during post-configuration.)
- * @insn_write: Optional pointer to a handler for the %INSN_WRITE instruction.
- * (Initialized by the low-level driver, or set to a default handler
- * during post-configuration.)
- * @insn_bits: Optional pointer to a handler for the %INSN_BITS instruction
- * for a digital input, digital output or digital input/output subdevice.
- * (Initialized by the low-level driver, or set to a default handler
- * during post-configuration.)
- * @insn_config: Optional pointer to a handler for the %INSN_CONFIG
- * instruction. (Initialized by the low-level driver, or set to a default
- * handler during post-configuration.)
- * @do_cmd: If the subdevice supports asynchronous acquisition commands, this
- * points to a handler to set it up in hardware. (Initialized by the
- * low-level driver.)
- * @do_cmdtest: If the subdevice supports asynchronous acquisition commands,
- * this points to a handler used to check and possibly tweak a prospective
- * acquisition command without setting it up in hardware. (Initialized by
- * the low-level driver.)
- * @poll: If the subdevice supports asynchronous acquisition commands, this
- * is an optional pointer to a handler for the %COMEDI_POLL ioctl which
- * instructs the low-level driver to synchronize buffers. (Initialized by
- * the low-level driver if needed.)
- * @cancel: If the subdevice supports asynchronous acquisition commands, this
- * points to a handler used to terminate a running command. (Initialized
- * by the low-level driver.)
- * @buf_change: If the subdevice supports asynchronous acquisition commands,
- * this is an optional pointer to a handler that is called when the data
- * buffer for handling asynchronous commands is allocated or reallocated.
- * (Initialized by the low-level driver if needed.)
- * @munge: If the subdevice supports asynchronous acquisition commands and
- * uses DMA to transfer data from the hardware to the acquisition buffer,
- * this points to a function used to "munge" the data values from the
- * hardware into the format expected by COMEDI. (Initialized by the
- * low-level driver if needed.)
- * @async_dma_dir: If the subdevice supports asynchronous acquisition commands
- * and uses DMA to transfer data from the hardware to the acquisition
- * buffer, this sets the DMA direction for the buffer. (initialized to
- * %DMA_NONE by comedi_alloc_subdevices() and changed by the low-level
- * driver if necessary.)
- * @state: Handy bit-mask indicating the output states for a DIO or digital
- * output subdevice with no more than 32 channels. (Initialized by the
- * low-level driver.)
- * @class_dev: If the subdevice supports asynchronous acquisition commands,
- * this points to a sysfs comediX_subdY device where X is the minor device
- * number of the COMEDI device and Y is the subdevice number. The minor
- * device number for the sysfs device is allocated dynamically in the
- * range 48 to 255. This is used to allow the COMEDI device to be opened
- * with a different default read or write subdevice. (Allocated during
- * post-configuration if needed.)
- * @minor: If @class_dev is set, this is its dynamically allocated minor
- * device number. (Set during post-configuration if necessary.)
- * @readback: Optional pointer to memory allocated by
- * comedi_alloc_subdev_readback() used to hold the values written to
- * analog output channels so they can be read back. The storage is
- * automatically freed after the low-level driver's "detach" handler is
- * called for the device. (Initialized by the low-level driver.)
- *
- * This is the main control structure for a COMEDI subdevice. If the subdevice
- * supports asynchronous acquisition commands, additional information is stored
- * in the &struct comedi_async pointed to by @async.
- *
- * Most of the subdevice is initialized by the low-level driver's "attach" or
- * "auto_attach" handlers but parts of it are initialized by
- * comedi_alloc_subdevices(), and other parts are initialized during
- * post-configuration on return from that handler.
- *
- * A low-level driver that sets @insn_bits for a digital input, digital output,
- * or DIO subdevice may leave @insn_read and @insn_write uninitialized, in
- * which case they will be set to a default handler during post-configuration
- * that uses @insn_bits to emulate the %INSN_READ and %INSN_WRITE instructions.
- */
-struct comedi_subdevice {
- struct comedi_device *device;
- int index;
- int type;
- int n_chan;
- int subdev_flags;
- int len_chanlist; /* maximum length of channel/gain list */
-
- void *private;
-
- struct comedi_async *async;
-
- void *lock;
- void *busy;
- unsigned runflags;
- spinlock_t spin_lock;
-
- unsigned int io_bits;
-
- unsigned int maxdata; /* if maxdata==0, use list */
- const unsigned int *maxdata_list; /* list is channel specific */
-
- const struct comedi_lrange *range_table;
- const struct comedi_lrange *const *range_table_list;
-
- unsigned int *chanlist; /* driver-owned chanlist (not used) */
-
- int (*insn_read)(struct comedi_device *, struct comedi_subdevice *,
- struct comedi_insn *, unsigned int *);
- int (*insn_write)(struct comedi_device *, struct comedi_subdevice *,
- struct comedi_insn *, unsigned int *);
- int (*insn_bits)(struct comedi_device *, struct comedi_subdevice *,
- struct comedi_insn *, unsigned int *);
- int (*insn_config)(struct comedi_device *, struct comedi_subdevice *,
- struct comedi_insn *, unsigned int *);
-
- int (*do_cmd)(struct comedi_device *, struct comedi_subdevice *);
- int (*do_cmdtest)(struct comedi_device *, struct comedi_subdevice *,
- struct comedi_cmd *);
- int (*poll)(struct comedi_device *, struct comedi_subdevice *);
- int (*cancel)(struct comedi_device *, struct comedi_subdevice *);
-
- /* called when the buffer changes */
- int (*buf_change)(struct comedi_device *, struct comedi_subdevice *);
-
- void (*munge)(struct comedi_device *dev, struct comedi_subdevice *s,
- void *data, unsigned int num_bytes,
- unsigned int start_chan_index);
- enum dma_data_direction async_dma_dir;
-
- unsigned int state;
-
- struct device *class_dev;
- int minor;
-
- unsigned int *readback;
-};
-
-/**
- * struct comedi_buf_page - Describe a page of a COMEDI buffer
- * @virt_addr: Kernel address of page.
- * @dma_addr: DMA address of page if in DMA coherent memory.
- */
-struct comedi_buf_page {
- void *virt_addr;
- dma_addr_t dma_addr;
-};
-
-/**
- * struct comedi_buf_map - Describe pages in a COMEDI buffer
- * @dma_hw_dev: Low-level hardware &struct device pointer copied from the
- * COMEDI device's hw_dev member.
- * @page_list: Pointer to array of &struct comedi_buf_page, one for each
- * page in the buffer.
- * @n_pages: Number of pages in the buffer.
- * @dma_dir: DMA direction used to allocate pages of DMA coherent memory,
- * or %DMA_NONE if pages allocated from regular memory.
- * @refcount: &struct kref reference counter used to free the buffer.
- *
- * A COMEDI data buffer is allocated as individual pages, either in
- * conventional memory or DMA coherent memory, depending on the attached,
- * low-level hardware device. (The buffer pages also get mapped into the
- * kernel's contiguous virtual address space pointed to by the 'prealloc_buf'
- * member of &struct comedi_async.)
- *
- * The buffer is normally freed when the COMEDI device is detached from the
- * low-level driver (which may happen due to device removal), but if it happens
- * to be mmapped at the time, the pages cannot be freed until the buffer has
- * been munmapped. That is what the reference counter is for. (The virtual
- * address space pointed by 'prealloc_buf' is freed when the COMEDI device is
- * detached.)
- */
-struct comedi_buf_map {
- struct device *dma_hw_dev;
- struct comedi_buf_page *page_list;
- unsigned int n_pages;
- enum dma_data_direction dma_dir;
- struct kref refcount;
-};
-
-/**
- * struct comedi_async - Control data for asynchronous COMEDI commands
- * @prealloc_buf: Kernel virtual address of allocated acquisition buffer.
- * @prealloc_bufsz: Buffer size (in bytes).
- * @buf_map: Map of buffer pages.
- * @max_bufsize: Maximum allowed buffer size (in bytes).
- * @buf_write_count: "Write completed" count (in bytes, modulo 2**32).
- * @buf_write_alloc_count: "Allocated for writing" count (in bytes,
- * modulo 2**32).
- * @buf_read_count: "Read completed" count (in bytes, modulo 2**32).
- * @buf_read_alloc_count: "Allocated for reading" count (in bytes,
- * modulo 2**32).
- * @buf_write_ptr: Buffer position for writer.
- * @buf_read_ptr: Buffer position for reader.
- * @cur_chan: Current position in chanlist for scan (for those drivers that
- * use it).
- * @scans_done: The number of scans completed.
- * @scan_progress: Amount received or sent for current scan (in bytes).
- * @munge_chan: Current position in chanlist for "munging".
- * @munge_count: "Munge" count (in bytes, modulo 2**32).
- * @munge_ptr: Buffer position for "munging".
- * @events: Bit-vector of events that have occurred.
- * @cmd: Details of comedi command in progress.
- * @wait_head: Task wait queue for file reader or writer.
- * @cb_mask: Bit-vector of events that should wake waiting tasks.
- * @inttrig: Software trigger function for command, or NULL.
- *
- * Note about the ..._count and ..._ptr members:
- *
- * Think of the _Count values being integers of unlimited size, indexing
- * into a buffer of infinite length (though only an advancing portion
- * of the buffer of fixed length prealloc_bufsz is accessible at any
- * time). Then:
- *
- * Buf_Read_Count <= Buf_Read_Alloc_Count <= Munge_Count <=
- * Buf_Write_Count <= Buf_Write_Alloc_Count <=
- * (Buf_Read_Count + prealloc_bufsz)
- *
- * (Those aren't the actual members, apart from prealloc_bufsz.) When the
- * buffer is reset, those _Count values start at 0 and only increase in value,
- * maintaining the above inequalities until the next time the buffer is
- * reset. The buffer is divided into the following regions by the inequalities:
- *
- * [0, Buf_Read_Count):
- * old region no longer accessible
- *
- * [Buf_Read_Count, Buf_Read_Alloc_Count):
- * filled and munged region allocated for reading but not yet read
- *
- * [Buf_Read_Alloc_Count, Munge_Count):
- * filled and munged region not yet allocated for reading
- *
- * [Munge_Count, Buf_Write_Count):
- * filled region not yet munged
- *
- * [Buf_Write_Count, Buf_Write_Alloc_Count):
- * unfilled region allocated for writing but not yet written
- *
- * [Buf_Write_Alloc_Count, Buf_Read_Count + prealloc_bufsz):
- * unfilled region not yet allocated for writing
- *
- * [Buf_Read_Count + prealloc_bufsz, infinity):
- * unfilled region not yet accessible
- *
- * Data needs to be written into the buffer before it can be read out,
- * and may need to be converted (or "munged") between the two
- * operations. Extra unfilled buffer space may need to allocated for
- * writing (advancing Buf_Write_Alloc_Count) before new data is written.
- * After writing new data, the newly filled space needs to be released
- * (advancing Buf_Write_Count). This also results in the new data being
- * "munged" (advancing Munge_Count). Before data is read out of the
- * buffer, extra space may need to be allocated for reading (advancing
- * Buf_Read_Alloc_Count). After the data has been read out, the space
- * needs to be released (advancing Buf_Read_Count).
- *
- * The actual members, buf_read_count, buf_read_alloc_count,
- * munge_count, buf_write_count, and buf_write_alloc_count take the
- * value of the corresponding capitalized _Count values modulo 2^32
- * (UINT_MAX+1). Subtracting a "higher" _count value from a "lower"
- * _count value gives the same answer as subtracting a "higher" _Count
- * value from a lower _Count value because prealloc_bufsz < UINT_MAX+1.
- * The modulo operation is done implicitly.
- *
- * The buf_read_ptr, munge_ptr, and buf_write_ptr members take the value
- * of the corresponding capitalized _Count values modulo prealloc_bufsz.
- * These correspond to byte indices in the physical buffer. The modulo
- * operation is done by subtracting prealloc_bufsz when the value
- * exceeds prealloc_bufsz (assuming prealloc_bufsz plus the increment is
- * less than or equal to UINT_MAX).
- */
-struct comedi_async {
- void *prealloc_buf;
- unsigned int prealloc_bufsz;
- struct comedi_buf_map *buf_map;
- unsigned int max_bufsize;
- unsigned int buf_write_count;
- unsigned int buf_write_alloc_count;
- unsigned int buf_read_count;
- unsigned int buf_read_alloc_count;
- unsigned int buf_write_ptr;
- unsigned int buf_read_ptr;
- unsigned int cur_chan;
- unsigned int scans_done;
- unsigned int scan_progress;
- unsigned int munge_chan;
- unsigned int munge_count;
- unsigned int munge_ptr;
- unsigned int events;
- struct comedi_cmd cmd;
- wait_queue_head_t wait_head;
- unsigned int cb_mask;
- int (*inttrig)(struct comedi_device *dev, struct comedi_subdevice *s,
- unsigned int x);
-};
-
-/**
- * enum comedi_cb - &struct comedi_async callback "events"
- * @COMEDI_CB_EOS: end-of-scan
- * @COMEDI_CB_EOA: end-of-acquisition/output
- * @COMEDI_CB_BLOCK: data has arrived, wakes up read() / write()
- * @COMEDI_CB_EOBUF: DEPRECATED: end of buffer
- * @COMEDI_CB_ERROR: card error during acquisition
- * @COMEDI_CB_OVERFLOW: buffer overflow/underflow
- * @COMEDI_CB_ERROR_MASK: events that indicate an error has occurred
- * @COMEDI_CB_CANCEL_MASK: events that will cancel an async command
- */
-enum comedi_cb {
- COMEDI_CB_EOS = BIT(0),
- COMEDI_CB_EOA = BIT(1),
- COMEDI_CB_BLOCK = BIT(2),
- COMEDI_CB_EOBUF = BIT(3),
- COMEDI_CB_ERROR = BIT(4),
- COMEDI_CB_OVERFLOW = BIT(5),
- /* masks */
- COMEDI_CB_ERROR_MASK = (COMEDI_CB_ERROR | COMEDI_CB_OVERFLOW),
- COMEDI_CB_CANCEL_MASK = (COMEDI_CB_EOA | COMEDI_CB_ERROR_MASK)
-};
-
-/**
- * struct comedi_driver - COMEDI driver registration
- * @driver_name: Name of driver.
- * @module: Owning module.
- * @attach: The optional "attach" handler for manually configured COMEDI
- * devices.
- * @detach: The "detach" handler for deconfiguring COMEDI devices.
- * @auto_attach: The optional "auto_attach" handler for automatically
- * configured COMEDI devices.
- * @num_names: Optional number of "board names" supported.
- * @board_name: Optional pointer to a pointer to a board name. The pointer
- * to a board name is embedded in an element of a driver-defined array
- * of static, read-only board type information.
- * @offset: Optional size of each element of the driver-defined array of
- * static, read-only board type information, i.e. the offset between each
- * pointer to a board name.
- *
- * This is used with comedi_driver_register() and comedi_driver_unregister() to
- * register and unregister a low-level COMEDI driver with the COMEDI core.
- *
- * If @num_names is non-zero, @board_name should be non-NULL, and @offset
- * should be at least sizeof(*board_name). These are used by the handler for
- * the %COMEDI_DEVCONFIG ioctl to match a hardware device and its driver by
- * board name. If @num_names is zero, the %COMEDI_DEVCONFIG ioctl matches a
- * hardware device and its driver by driver name. This is only useful if the
- * @attach handler is set. If @num_names is non-zero, the driver's @attach
- * handler will be called with the COMEDI device structure's board_ptr member
- * pointing to the matched pointer to a board name within the driver's private
- * array of static, read-only board type information.
- */
-struct comedi_driver {
- /* private: */
- struct comedi_driver *next; /* Next in list of COMEDI drivers. */
- /* public: */
- const char *driver_name;
- struct module *module;
- int (*attach)(struct comedi_device *, struct comedi_devconfig *);
- void (*detach)(struct comedi_device *);
- int (*auto_attach)(struct comedi_device *, unsigned long);
- unsigned int num_names;
- const char *const *board_name;
- int offset;
-};
-
-/**
- * struct comedi_device - Working data for a COMEDI device
- * @use_count: Number of open file objects.
- * @driver: Low-level COMEDI driver attached to this COMEDI device.
- * @pacer: Optional pointer to a dynamically allocated acquisition pacer
- * control. It is freed automatically after the COMEDI device is
- * detached from the low-level driver.
- * @private: Optional pointer to private data allocated by the low-level
- * driver. It is freed automatically after the COMEDI device is
- * detached from the low-level driver.
- * @class_dev: Sysfs comediX device.
- * @minor: Minor device number of COMEDI char device (0-47).
- * @detach_count: Counter incremented every time the COMEDI device is detached.
- * Used for checking a previous attachment is still valid.
- * @hw_dev: Optional pointer to the low-level hardware &struct device. It is
- * required for automatically configured COMEDI devices and optional for
- * COMEDI devices configured by the %COMEDI_DEVCONFIG ioctl, although
- * the bus-specific COMEDI functions only work if it is set correctly.
- * It is also passed to dma_alloc_coherent() for COMEDI subdevices that
- * have their 'async_dma_dir' member set to something other than
- * %DMA_NONE.
- * @board_name: Pointer to a COMEDI board name or a COMEDI driver name. When
- * the low-level driver's "attach" handler is called by the handler for
- * the %COMEDI_DEVCONFIG ioctl, it either points to a matched board name
- * string if the 'num_names' member of the &struct comedi_driver is
- * non-zero, otherwise it points to the low-level driver name string.
- * When the low-lever driver's "auto_attach" handler is called for an
- * automatically configured COMEDI device, it points to the low-level
- * driver name string. The low-level driver is free to change it in its
- * "attach" or "auto_attach" handler if it wishes.
- * @board_ptr: Optional pointer to private, read-only board type information in
- * the low-level driver. If the 'num_names' member of the &struct
- * comedi_driver is non-zero, the handler for the %COMEDI_DEVCONFIG ioctl
- * will point it to a pointer to a matched board name string within the
- * driver's private array of static, read-only board type information when
- * calling the driver's "attach" handler. The low-level driver is free to
- * change it.
- * @attached: Flag indicating that the COMEDI device is attached to a low-level
- * driver.
- * @ioenabled: Flag used to indicate that a PCI device has been enabled and
- * its regions requested.
- * @spinlock: Generic spin-lock for use by the low-level driver.
- * @mutex: Generic mutex for use by the COMEDI core module.
- * @attach_lock: &struct rw_semaphore used to guard against the COMEDI device
- * being detached while an operation is in progress. The down_write()
- * operation is only allowed while @mutex is held and is used when
- * changing @attached and @detach_count and calling the low-level driver's
- * "detach" handler. The down_read() operation is generally used without
- * holding @mutex.
- * @refcount: &struct kref reference counter for freeing COMEDI device.
- * @n_subdevices: Number of COMEDI subdevices allocated by the low-level
- * driver for this device.
- * @subdevices: Dynamically allocated array of COMEDI subdevices.
- * @mmio: Optional pointer to a remapped MMIO region set by the low-level
- * driver.
- * @iobase: Optional base of an I/O port region requested by the low-level
- * driver.
- * @iolen: Length of I/O port region requested at @iobase.
- * @irq: Optional IRQ number requested by the low-level driver.
- * @read_subdev: Optional pointer to a default COMEDI subdevice operated on by
- * the read() file operation. Set by the low-level driver.
- * @write_subdev: Optional pointer to a default COMEDI subdevice operated on by
- * the write() file operation. Set by the low-level driver.
- * @async_queue: Storage for fasync_helper().
- * @open: Optional pointer to a function set by the low-level driver to be
- * called when @use_count changes from 0 to 1.
- * @close: Optional pointer to a function set by the low-level driver to be
- * called when @use_count changed from 1 to 0.
- *
- * This is the main control data structure for a COMEDI device (as far as the
- * COMEDI core is concerned). There are two groups of COMEDI devices -
- * "legacy" devices that are configured by the handler for the
- * %COMEDI_DEVCONFIG ioctl, and automatically configured devices resulting
- * from a call to comedi_auto_config() as a result of a bus driver probe in
- * a low-level COMEDI driver. The "legacy" COMEDI devices are allocated
- * during module initialization if the "comedi_num_legacy_minors" module
- * parameter is non-zero and use minor device numbers from 0 to
- * comedi_num_legacy_minors minus one. The automatically configured COMEDI
- * devices are allocated on demand and use minor device numbers from
- * comedi_num_legacy_minors to 47.
- */
-struct comedi_device {
- int use_count;
- struct comedi_driver *driver;
- struct comedi_8254 *pacer;
- void *private;
-
- struct device *class_dev;
- int minor;
- unsigned int detach_count;
- struct device *hw_dev;
-
- const char *board_name;
- const void *board_ptr;
- bool attached:1;
- bool ioenabled:1;
- spinlock_t spinlock;
- struct mutex mutex;
- struct rw_semaphore attach_lock;
- struct kref refcount;
-
- int n_subdevices;
- struct comedi_subdevice *subdevices;
-
- /* dumb */
- void __iomem *mmio;
- unsigned long iobase;
- unsigned long iolen;
- unsigned int irq;
-
- struct comedi_subdevice *read_subdev;
- struct comedi_subdevice *write_subdev;
-
- struct fasync_struct *async_queue;
-
- int (*open)(struct comedi_device *dev);
- void (*close)(struct comedi_device *dev);
-};
-
-/*
- * function prototypes
- */
-
-void comedi_event(struct comedi_device *dev, struct comedi_subdevice *s);
-
-struct comedi_device *comedi_dev_get_from_minor(unsigned minor);
-int comedi_dev_put(struct comedi_device *dev);
-
-bool comedi_is_subdevice_running(struct comedi_subdevice *s);
-
-void *comedi_alloc_spriv(struct comedi_subdevice *s, size_t size);
-void comedi_set_spriv_auto_free(struct comedi_subdevice *s);
-
-int comedi_check_chanlist(struct comedi_subdevice *s,
- int n,
- unsigned int *chanlist);
-
-/* range stuff */
-
-#define RANGE(a, b) {(a)*1e6, (b)*1e6, 0}
-#define RANGE_ext(a, b) {(a)*1e6, (b)*1e6, RF_EXTERNAL}
-#define RANGE_mA(a, b) {(a)*1e6, (b)*1e6, UNIT_mA}
-#define RANGE_unitless(a, b) {(a)*1e6, (b)*1e6, 0}
-#define BIP_RANGE(a) {-(a)*1e6, (a)*1e6, 0}
-#define UNI_RANGE(a) {0, (a)*1e6, 0}
-
-extern const struct comedi_lrange range_bipolar10;
-extern const struct comedi_lrange range_bipolar5;
-extern const struct comedi_lrange range_bipolar2_5;
-extern const struct comedi_lrange range_unipolar10;
-extern const struct comedi_lrange range_unipolar5;
-extern const struct comedi_lrange range_unipolar2_5;
-extern const struct comedi_lrange range_0_20mA;
-extern const struct comedi_lrange range_4_20mA;
-extern const struct comedi_lrange range_0_32mA;
-extern const struct comedi_lrange range_unknown;
-
-#define range_digital range_unipolar5
-
-#if __GNUC__ >= 3
-#define GCC_ZERO_LENGTH_ARRAY
-#else
-#define GCC_ZERO_LENGTH_ARRAY 0
-#endif
-
-/**
- * struct comedi_lrange - Describes a COMEDI range table
- * @length: Number of entries in the range table.
- * @range: Array of &struct comedi_krange, one for each range.
- *
- * Each element of @range[] describes the minimum and maximum physical range
- * range and the type of units. Typically, the type of unit is %UNIT_volt
- * (i.e. volts) and the minimum and maximum are in millionths of a volt.
- * There may also be a flag that indicates the minimum and maximum are merely
- * scale factors for an unknown, external reference.
- */
-struct comedi_lrange {
- int length;
- struct comedi_krange range[GCC_ZERO_LENGTH_ARRAY];
-};
-
-/**
- * comedi_range_is_bipolar() - Test if subdevice range is bipolar
- * @s: COMEDI subdevice.
- * @range: Index of range within a range table.
- *
- * Tests whether a range is bipolar by checking whether its minimum value
- * is negative.
- *
- * Assumes @range is valid. Does not work for subdevices using a
- * channel-specific range table list.
- *
- * Return:
- * %true if the range is bipolar.
- * %false if the range is unipolar.
- */
-static inline bool comedi_range_is_bipolar(struct comedi_subdevice *s,
- unsigned int range)
-{
- return s->range_table->range[range].min < 0;
-}
-
-/**
- * comedi_range_is_unipolar() - Test if subdevice range is unipolar
- * @s: COMEDI subdevice.
- * @range: Index of range within a range table.
- *
- * Tests whether a range is unipolar by checking whether its minimum value
- * is at least 0.
- *
- * Assumes @range is valid. Does not work for subdevices using a
- * channel-specific range table list.
- *
- * Return:
- * %true if the range is unipolar.
- * %false if the range is bipolar.
- */
-static inline bool comedi_range_is_unipolar(struct comedi_subdevice *s,
- unsigned int range)
-{
- return s->range_table->range[range].min >= 0;
-}
-
-/**
- * comedi_range_is_external() - Test if subdevice range is external
- * @s: COMEDI subdevice.
- * @range: Index of range within a range table.
- *
- * Tests whether a range is externally reference by checking whether its
- * %RF_EXTERNAL flag is set.
- *
- * Assumes @range is valid. Does not work for subdevices using a
- * channel-specific range table list.
- *
- * Return:
- * %true if the range is external.
- * %false if the range is internal.
- */
-static inline bool comedi_range_is_external(struct comedi_subdevice *s,
- unsigned int range)
-{
- return !!(s->range_table->range[range].flags & RF_EXTERNAL);
-}
-
-/**
- * comedi_chan_range_is_bipolar() - Test if channel-specific range is bipolar
- * @s: COMEDI subdevice.
- * @chan: The channel number.
- * @range: Index of range within a range table.
- *
- * Tests whether a range is bipolar by checking whether its minimum value
- * is negative.
- *
- * Assumes @chan and @range are valid. Only works for subdevices with a
- * channel-specific range table list.
- *
- * Return:
- * %true if the range is bipolar.
- * %false if the range is unipolar.
- */
-static inline bool comedi_chan_range_is_bipolar(struct comedi_subdevice *s,
- unsigned int chan,
- unsigned int range)
-{
- return s->range_table_list[chan]->range[range].min < 0;
-}
-
-/**
- * comedi_chan_range_is_unipolar() - Test if channel-specific range is unipolar
- * @s: COMEDI subdevice.
- * @chan: The channel number.
- * @range: Index of range within a range table.
- *
- * Tests whether a range is unipolar by checking whether its minimum value
- * is at least 0.
- *
- * Assumes @chan and @range are valid. Only works for subdevices with a
- * channel-specific range table list.
- *
- * Return:
- * %true if the range is unipolar.
- * %false if the range is bipolar.
- */
-static inline bool comedi_chan_range_is_unipolar(struct comedi_subdevice *s,
- unsigned int chan,
- unsigned int range)
-{
- return s->range_table_list[chan]->range[range].min >= 0;
-}
-
-/**
- * comedi_chan_range_is_external() - Test if channel-specific range is external
- * @s: COMEDI subdevice.
- * @chan: The channel number.
- * @range: Index of range within a range table.
- *
- * Tests whether a range is externally reference by checking whether its
- * %RF_EXTERNAL flag is set.
- *
- * Assumes @chan and @range are valid. Only works for subdevices with a
- * channel-specific range table list.
- *
- * Return:
- * %true if the range is bipolar.
- * %false if the range is unipolar.
- */
-static inline bool comedi_chan_range_is_external(struct comedi_subdevice *s,
- unsigned int chan,
- unsigned int range)
-{
- return !!(s->range_table_list[chan]->range[range].flags & RF_EXTERNAL);
-}
-
-/**
- * comedi_offset_munge() - Convert between offset binary and 2's complement
- * @s: COMEDI subdevice.
- * @val: Value to be converted.
- *
- * Toggles the highest bit of a sample value to toggle between offset binary
- * and 2's complement. Assumes that @s->maxdata is a power of 2 minus 1.
- *
- * Return: The converted value.
- */
-static inline unsigned int comedi_offset_munge(struct comedi_subdevice *s,
- unsigned int val)
-{
- return val ^ s->maxdata ^ (s->maxdata >> 1);
-}
-
-/**
- * comedi_bytes_per_sample() - Determine subdevice sample size
- * @s: COMEDI subdevice.
- *
- * The sample size will be 4 (sizeof int) or 2 (sizeof short) depending on
- * whether the %SDF_LSAMPL subdevice flag is set or not.
- *
- * Return: The subdevice sample size.
- */
-static inline unsigned int comedi_bytes_per_sample(struct comedi_subdevice *s)
-{
- return s->subdev_flags & SDF_LSAMPL ? sizeof(int) : sizeof(short);
-}
-
-/**
- * comedi_sample_shift() - Determine log2 of subdevice sample size
- * @s: COMEDI subdevice.
- *
- * The sample size will be 4 (sizeof int) or 2 (sizeof short) depending on
- * whether the %SDF_LSAMPL subdevice flag is set or not. The log2 of the
- * sample size will be 2 or 1 and can be used as the right operand of a
- * bit-shift operator to multiply or divide something by the sample size.
- *
- * Return: log2 of the subdevice sample size.
- */
-static inline unsigned int comedi_sample_shift(struct comedi_subdevice *s)
-{
- return s->subdev_flags & SDF_LSAMPL ? 2 : 1;
-}
-
-/**
- * comedi_bytes_to_samples() - Convert a number of bytes to a number of samples
- * @s: COMEDI subdevice.
- * @nbytes: Number of bytes
- *
- * Return: The number of bytes divided by the subdevice sample size.
- */
-static inline unsigned int comedi_bytes_to_samples(struct comedi_subdevice *s,
- unsigned int nbytes)
-{
- return nbytes >> comedi_sample_shift(s);
-}
-
-/**
- * comedi_samples_to_bytes() - Convert a number of samples to a number of bytes
- * @s: COMEDI subdevice.
- * @nsamples: Number of samples.
- *
- * Return: The number of samples multiplied by the subdevice sample size.
- * (Does not check for arithmetic overflow.)
- */
-static inline unsigned int comedi_samples_to_bytes(struct comedi_subdevice *s,
- unsigned int nsamples)
-{
- return nsamples << comedi_sample_shift(s);
-}
-
-/**
- * comedi_check_trigger_src() - Trivially validate a comedi_cmd trigger source
- * @src: Pointer to the trigger source to validate.
- * @flags: Bitmask of valid %TRIG_* for the trigger.
- *
- * This is used in "step 1" of the do_cmdtest functions of comedi drivers
- * to validate the comedi_cmd triggers. The mask of the @src against the
- * @flags allows the userspace comedilib to pass all the comedi_cmd
- * triggers as %TRIG_ANY and get back a bitmask of the valid trigger sources.
- *
- * Return:
- * 0 if trigger sources in *@src are all supported.
- * -EINVAL if any trigger source in *@src is unsupported.
- */
-static inline int comedi_check_trigger_src(unsigned int *src,
- unsigned int flags)
-{
- unsigned int orig_src = *src;
-
- *src = orig_src & flags;
- if (*src == TRIG_INVALID || *src != orig_src)
- return -EINVAL;
- return 0;
-}
-
-/**
- * comedi_check_trigger_is_unique() - Make sure a trigger source is unique
- * @src: The trigger source to check.
- *
- * Return:
- * 0 if no more than one trigger source is set.
- * -EINVAL if more than one trigger source is set.
- */
-static inline int comedi_check_trigger_is_unique(unsigned int src)
-{
- /* this test is true if more than one _src bit is set */
- if ((src & (src - 1)) != 0)
- return -EINVAL;
- return 0;
-}
-
-/**
- * comedi_check_trigger_arg_is() - Trivially validate a trigger argument
- * @arg: Pointer to the trigger arg to validate.
- * @val: The value the argument should be.
- *
- * Forces *@arg to be @val.
- *
- * Return:
- * 0 if *@arg was already @val.
- * -EINVAL if *@arg differed from @val.
- */
-static inline int comedi_check_trigger_arg_is(unsigned int *arg,
- unsigned int val)
-{
- if (*arg != val) {
- *arg = val;
- return -EINVAL;
- }
- return 0;
-}
-
-/**
- * comedi_check_trigger_arg_min() - Trivially validate a trigger argument min
- * @arg: Pointer to the trigger arg to validate.
- * @val: The minimum value the argument should be.
- *
- * Forces *@arg to be at least @val, setting it to @val if necessary.
- *
- * Return:
- * 0 if *@arg was already at least @val.
- * -EINVAL if *@arg was less than @val.
- */
-static inline int comedi_check_trigger_arg_min(unsigned int *arg,
- unsigned int val)
-{
- if (*arg < val) {
- *arg = val;
- return -EINVAL;
- }
- return 0;
-}
-
-/**
- * comedi_check_trigger_arg_max() - Trivially validate a trigger argument max
- * @arg: Pointer to the trigger arg to validate.
- * @val: The maximum value the argument should be.
- *
- * Forces *@arg to be no more than @val, setting it to @val if necessary.
- *
- * Return:
- * 0 if*@arg was already no more than @val.
- * -EINVAL if *@arg was greater than @val.
- */
-static inline int comedi_check_trigger_arg_max(unsigned int *arg,
- unsigned int val)
-{
- if (*arg > val) {
- *arg = val;
- return -EINVAL;
- }
- return 0;
-}
-
-/*
- * Must set dev->hw_dev if you wish to dma directly into comedi's buffer.
- * Also useful for retrieving a previously configured hardware device of
- * known bus type. Set automatically for auto-configured devices.
- * Automatically set to NULL when detaching hardware device.
- */
-int comedi_set_hw_dev(struct comedi_device *dev, struct device *hw_dev);
-
-/**
- * comedi_buf_n_bytes_ready - Determine amount of unread data in buffer
- * @s: COMEDI subdevice.
- *
- * Determines the number of bytes of unread data in the asynchronous
- * acquisition data buffer for a subdevice. The data in question might not
- * have been fully "munged" yet.
- *
- * Returns: The amount of unread data in bytes.
- */
-static inline unsigned int comedi_buf_n_bytes_ready(struct comedi_subdevice *s)
-{
- return s->async->buf_write_count - s->async->buf_read_count;
-}
-
-unsigned int comedi_buf_write_alloc(struct comedi_subdevice *s, unsigned int n);
-unsigned int comedi_buf_write_free(struct comedi_subdevice *s, unsigned int n);
-
-unsigned int comedi_buf_read_n_available(struct comedi_subdevice *s);
-unsigned int comedi_buf_read_alloc(struct comedi_subdevice *s, unsigned int n);
-unsigned int comedi_buf_read_free(struct comedi_subdevice *s, unsigned int n);
-
-unsigned int comedi_buf_write_samples(struct comedi_subdevice *s,
- const void *data, unsigned int nsamples);
-unsigned int comedi_buf_read_samples(struct comedi_subdevice *s,
- void *data, unsigned int nsamples);
-
-/* drivers.c - general comedi driver functions */
-
-#define COMEDI_TIMEOUT_MS 1000
-
-int comedi_timeout(struct comedi_device *, struct comedi_subdevice *,
- struct comedi_insn *,
- int (*cb)(struct comedi_device *, struct comedi_subdevice *,
- struct comedi_insn *, unsigned long context),
- unsigned long context);
-
-unsigned int comedi_handle_events(struct comedi_device *dev,
- struct comedi_subdevice *s);
-
-int comedi_dio_insn_config(struct comedi_device *, struct comedi_subdevice *,
- struct comedi_insn *, unsigned int *data,
- unsigned int mask);
-unsigned int comedi_dio_update_state(struct comedi_subdevice *,
- unsigned int *data);
-unsigned int comedi_bytes_per_scan(struct comedi_subdevice *s);
-unsigned int comedi_nscans_left(struct comedi_subdevice *s,
- unsigned int nscans);
-unsigned int comedi_nsamples_left(struct comedi_subdevice *s,
- unsigned int nsamples);
-void comedi_inc_scan_progress(struct comedi_subdevice *s,
- unsigned int num_bytes);
-
-void *comedi_alloc_devpriv(struct comedi_device *, size_t);
-int comedi_alloc_subdevices(struct comedi_device *, int);
-int comedi_alloc_subdev_readback(struct comedi_subdevice *);
-
-int comedi_readback_insn_read(struct comedi_device *, struct comedi_subdevice *,
- struct comedi_insn *, unsigned int *data);
-
-int comedi_load_firmware(struct comedi_device *, struct device *,
- const char *name,
- int (*cb)(struct comedi_device *,
- const u8 *data, size_t size,
- unsigned long context),
- unsigned long context);
-
-int __comedi_request_region(struct comedi_device *,
- unsigned long start, unsigned long len);
-int comedi_request_region(struct comedi_device *,
- unsigned long start, unsigned long len);
-void comedi_legacy_detach(struct comedi_device *);
-
-int comedi_auto_config(struct device *, struct comedi_driver *,
- unsigned long context);
-void comedi_auto_unconfig(struct device *);
-
-int comedi_driver_register(struct comedi_driver *);
-void comedi_driver_unregister(struct comedi_driver *);
-
-/**
- * module_comedi_driver() - Helper macro for registering a comedi driver
- * @__comedi_driver: comedi_driver struct
- *
- * Helper macro for comedi drivers which do not do anything special in module
- * init/exit. This eliminates a lot of boilerplate. Each module may only use
- * this macro once, and calling it replaces module_init() and module_exit().
- */
-#define module_comedi_driver(__comedi_driver) \
- module_driver(__comedi_driver, comedi_driver_register, \
- comedi_driver_unregister)
-
-#endif /* _COMEDIDEV_H */
diff --git a/drivers/staging/comedi/comedilib.h b/drivers/staging/comedi/comedilib.h
deleted file mode 100644
index 56baf852ecf5..000000000000
--- a/drivers/staging/comedi/comedilib.h
+++ /dev/null
@@ -1,35 +0,0 @@
-/*
- linux/include/comedilib.h
- header file for kcomedilib
-
- COMEDI - Linux Control and Measurement Device Interface
- Copyright (C) 1998-2001 David A. Schleef <ds@schleef.org>
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-*/
-
-#ifndef _LINUX_COMEDILIB_H
-#define _LINUX_COMEDILIB_H
-
-struct comedi_device *comedi_open(const char *path);
-int comedi_close(struct comedi_device *dev);
-int comedi_dio_get_config(struct comedi_device *dev, unsigned int subdev,
- unsigned int chan, unsigned int *io);
-int comedi_dio_config(struct comedi_device *dev, unsigned int subdev,
- unsigned int chan, unsigned int io);
-int comedi_dio_bitfield2(struct comedi_device *dev, unsigned int subdev,
- unsigned int mask, unsigned int *bits,
- unsigned int base_channel);
-int comedi_find_subdevice_by_type(struct comedi_device *dev, int type,
- unsigned int subd);
-int comedi_get_n_channels(struct comedi_device *dev, unsigned int subdevice);
-
-#endif
diff --git a/drivers/staging/comedi/drivers.c b/drivers/staging/comedi/drivers.c
deleted file mode 100644
index aae94815b593..000000000000
--- a/drivers/staging/comedi/drivers.c
+++ /dev/null
@@ -1,1137 +0,0 @@
-/*
- * module/drivers.c
- * functions for manipulating drivers
- *
- * COMEDI - Linux Control and Measurement Device Interface
- * Copyright (C) 1997-2000 David A. Schleef <ds@schleef.org>
- * Copyright (C) 2002 Frank Mori Hess <fmhess@users.sourceforge.net>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
-*/
-
-#include <linux/device.h>
-#include <linux/module.h>
-#include <linux/errno.h>
-#include <linux/kernel.h>
-#include <linux/ioport.h>
-#include <linux/slab.h>
-#include <linux/dma-direction.h>
-#include <linux/interrupt.h>
-#include <linux/firmware.h>
-
-#include "comedidev.h"
-#include "comedi_internal.h"
-
-struct comedi_driver *comedi_drivers;
-/* protects access to comedi_drivers */
-DEFINE_MUTEX(comedi_drivers_list_lock);
-
-/**
- * comedi_set_hw_dev() - Set hardware device associated with COMEDI device
- * @dev: COMEDI device.
- * @hw_dev: Hardware device.
- *
- * For automatically configured COMEDI devices (resulting from a call to
- * comedi_auto_config() or one of its wrappers from the low-level COMEDI
- * driver), comedi_set_hw_dev() is called automatically by the COMEDI core
- * to associate the COMEDI device with the hardware device. It can also be
- * called directly by "legacy" low-level COMEDI drivers that rely on the
- * %COMEDI_DEVCONFIG ioctl to configure the hardware as long as the hardware
- * has a &struct device.
- *
- * If @dev->hw_dev is NULL, it gets a reference to @hw_dev and sets
- * @dev->hw_dev, otherwise, it does nothing. Calling it multiple times
- * with the same hardware device is not considered an error. If it gets
- * a reference to the hardware device, it will be automatically 'put' when
- * the device is detached from COMEDI.
- *
- * Returns 0 if @dev->hw_dev was NULL or the same as @hw_dev, otherwise
- * returns -EEXIST.
- */
-int comedi_set_hw_dev(struct comedi_device *dev, struct device *hw_dev)
-{
- if (hw_dev == dev->hw_dev)
- return 0;
- if (dev->hw_dev)
- return -EEXIST;
- dev->hw_dev = get_device(hw_dev);
- return 0;
-}
-EXPORT_SYMBOL_GPL(comedi_set_hw_dev);
-
-static void comedi_clear_hw_dev(struct comedi_device *dev)
-{
- put_device(dev->hw_dev);
- dev->hw_dev = NULL;
-}
-
-/**
- * comedi_alloc_devpriv() - Allocate memory for the device private data
- * @dev: COMEDI device.
- * @size: Size of the memory to allocate.
- *
- * The allocated memory is zero-filled. @dev->private points to it on
- * return. The memory will be automatically freed when the COMEDI device is
- * "detached".
- *
- * Returns a pointer to the allocated memory, or NULL on failure.
- */
-void *comedi_alloc_devpriv(struct comedi_device *dev, size_t size)
-{
- dev->private = kzalloc(size, GFP_KERNEL);
- return dev->private;
-}
-EXPORT_SYMBOL_GPL(comedi_alloc_devpriv);
-
-/**
- * comedi_alloc_subdevices() - Allocate subdevices for COMEDI device
- * @dev: COMEDI device.
- * @num_subdevices: Number of subdevices to allocate.
- *
- * Allocates and initializes an array of &struct comedi_subdevice for the
- * COMEDI device. If successful, sets @dev->subdevices to point to the
- * first one and @dev->n_subdevices to the number.
- *
- * Returns 0 on success, -EINVAL if @num_subdevices is < 1, or -ENOMEM if
- * failed to allocate the memory.
- */
-int comedi_alloc_subdevices(struct comedi_device *dev, int num_subdevices)
-{
- struct comedi_subdevice *s;
- int i;
-
- if (num_subdevices < 1)
- return -EINVAL;
-
- s = kcalloc(num_subdevices, sizeof(*s), GFP_KERNEL);
- if (!s)
- return -ENOMEM;
- dev->subdevices = s;
- dev->n_subdevices = num_subdevices;
-
- for (i = 0; i < num_subdevices; ++i) {
- s = &dev->subdevices[i];
- s->device = dev;
- s->index = i;
- s->async_dma_dir = DMA_NONE;
- spin_lock_init(&s->spin_lock);
- s->minor = -1;
- }
- return 0;
-}
-EXPORT_SYMBOL_GPL(comedi_alloc_subdevices);
-
-/**
- * comedi_alloc_subdev_readback() - Allocate memory for the subdevice readback
- * @s: COMEDI subdevice.
- *
- * This is called by low-level COMEDI drivers to allocate an array to record
- * the last values written to a subdevice's analog output channels (at least
- * by the %INSN_WRITE instruction), to allow them to be read back by an
- * %INSN_READ instruction. It also provides a default handler for the
- * %INSN_READ instruction unless one has already been set.
- *
- * On success, @s->readback points to the first element of the array, which
- * is zero-filled. The low-level driver is responsible for updating its
- * contents. @s->insn_read will be set to comedi_readback_insn_read()
- * unless it is already non-NULL.
- *
- * Returns 0 on success, -EINVAL if the subdevice has no channels, or
- * -ENOMEM on allocation failure.
- */
-int comedi_alloc_subdev_readback(struct comedi_subdevice *s)
-{
- if (!s->n_chan)
- return -EINVAL;
-
- s->readback = kcalloc(s->n_chan, sizeof(*s->readback), GFP_KERNEL);
- if (!s->readback)
- return -ENOMEM;
-
- if (!s->insn_read)
- s->insn_read = comedi_readback_insn_read;
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(comedi_alloc_subdev_readback);
-
-static void comedi_device_detach_cleanup(struct comedi_device *dev)
-{
- int i;
- struct comedi_subdevice *s;
-
- if (dev->subdevices) {
- for (i = 0; i < dev->n_subdevices; i++) {
- s = &dev->subdevices[i];
- if (comedi_can_auto_free_spriv(s))
- kfree(s->private);
- comedi_free_subdevice_minor(s);
- if (s->async) {
- comedi_buf_alloc(dev, s, 0);
- kfree(s->async);
- }
- kfree(s->readback);
- }
- kfree(dev->subdevices);
- dev->subdevices = NULL;
- dev->n_subdevices = 0;
- }
- kfree(dev->private);
- kfree(dev->pacer);
- dev->private = NULL;
- dev->pacer = NULL;
- dev->driver = NULL;
- dev->board_name = NULL;
- dev->board_ptr = NULL;
- dev->mmio = NULL;
- dev->iobase = 0;
- dev->iolen = 0;
- dev->ioenabled = false;
- dev->irq = 0;
- dev->read_subdev = NULL;
- dev->write_subdev = NULL;
- dev->open = NULL;
- dev->close = NULL;
- comedi_clear_hw_dev(dev);
-}
-
-void comedi_device_detach(struct comedi_device *dev)
-{
- comedi_device_cancel_all(dev);
- down_write(&dev->attach_lock);
- dev->attached = false;
- dev->detach_count++;
- if (dev->driver)
- dev->driver->detach(dev);
- comedi_device_detach_cleanup(dev);
- up_write(&dev->attach_lock);
-}
-
-static int poll_invalid(struct comedi_device *dev, struct comedi_subdevice *s)
-{
- return -EINVAL;
-}
-
-int insn_inval(struct comedi_device *dev, struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
-{
- return -EINVAL;
-}
-
-/**
- * comedi_readback_insn_read() - A generic (*insn_read) for subdevice readback.
- * @dev: COMEDI device.
- * @s: COMEDI subdevice.
- * @insn: COMEDI instruction.
- * @data: Pointer to return the readback data.
- *
- * Handles the %INSN_READ instruction for subdevices that use the readback
- * array allocated by comedi_alloc_subdev_readback(). It may be used
- * directly as the subdevice's handler (@s->insn_read) or called via a
- * wrapper.
- *
- * @insn->n is normally 1, which will read a single value. If higher, the
- * same element of the readback array will be read multiple times.
- *
- * Returns @insn->n on success, or -EINVAL if @s->readback is NULL.
- */
-int comedi_readback_insn_read(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- unsigned int chan = CR_CHAN(insn->chanspec);
- int i;
-
- if (!s->readback)
- return -EINVAL;
-
- for (i = 0; i < insn->n; i++)
- data[i] = s->readback[chan];
-
- return insn->n;
-}
-EXPORT_SYMBOL_GPL(comedi_readback_insn_read);
-
-/**
- * comedi_timeout() - Busy-wait for a driver condition to occur
- * @dev: COMEDI device.
- * @s: COMEDI subdevice.
- * @insn: COMEDI instruction.
- * @cb: Callback to check for the condition.
- * @context: Private context from the driver.
- *
- * Busy-waits for up to a second (%COMEDI_TIMEOUT_MS) for the condition or
- * some error (other than -EBUSY) to occur. The parameters @dev, @s, @insn,
- * and @context are passed to the callback function, which returns -EBUSY to
- * continue waiting or some other value to stop waiting (generally 0 if the
- * condition occurred, or some error value).
- *
- * Returns -ETIMEDOUT if timed out, otherwise the return value from the
- * callback function.
- */
-int comedi_timeout(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- int (*cb)(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned long context),
- unsigned long context)
-{
- unsigned long timeout = jiffies + msecs_to_jiffies(COMEDI_TIMEOUT_MS);
- int ret;
-
- while (time_before(jiffies, timeout)) {
- ret = cb(dev, s, insn, context);
- if (ret != -EBUSY)
- return ret; /* success (0) or non EBUSY errno */
- cpu_relax();
- }
- return -ETIMEDOUT;
-}
-EXPORT_SYMBOL_GPL(comedi_timeout);
-
-/**
- * comedi_dio_insn_config() - Boilerplate (*insn_config) for DIO subdevices
- * @dev: COMEDI device.
- * @s: COMEDI subdevice.
- * @insn: COMEDI instruction.
- * @data: Instruction parameters and return data.
- * @mask: io_bits mask for grouped channels, or 0 for single channel.
- *
- * If @mask is 0, it is replaced with a single-bit mask corresponding to the
- * channel number specified by @insn->chanspec. Otherwise, @mask
- * corresponds to a group of channels (which should include the specified
- * channel) that are always configured together as inputs or outputs.
- *
- * Partially handles the %INSN_CONFIG_DIO_INPUT, %INSN_CONFIG_DIO_OUTPUTS,
- * and %INSN_CONFIG_DIO_QUERY instructions. The first two update
- * @s->io_bits to record the directions of the masked channels. The last
- * one sets @data[1] to the current direction of the group of channels
- * (%COMEDI_INPUT) or %COMEDI_OUTPUT) as recorded in @s->io_bits.
- *
- * The caller is responsible for updating the DIO direction in the hardware
- * registers if this function returns 0.
- *
- * Returns 0 for a %INSN_CONFIG_DIO_INPUT or %INSN_CONFIG_DIO_OUTPUT
- * instruction, @insn->n (> 0) for a %INSN_CONFIG_DIO_QUERY instruction, or
- * -EINVAL for some other instruction.
- */
-int comedi_dio_insn_config(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data,
- unsigned int mask)
-{
- unsigned int chan_mask = 1 << CR_CHAN(insn->chanspec);
-
- if (!mask)
- mask = chan_mask;
-
- switch (data[0]) {
- case INSN_CONFIG_DIO_INPUT:
- s->io_bits &= ~mask;
- break;
-
- case INSN_CONFIG_DIO_OUTPUT:
- s->io_bits |= mask;
- break;
-
- case INSN_CONFIG_DIO_QUERY:
- data[1] = (s->io_bits & mask) ? COMEDI_OUTPUT : COMEDI_INPUT;
- return insn->n;
-
- default:
- return -EINVAL;
- }
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(comedi_dio_insn_config);
-
-/**
- * comedi_dio_update_state() - Update the internal state of DIO subdevices
- * @s: COMEDI subdevice.
- * @data: The channel mask and bits to update.
- *
- * Updates @s->state which holds the internal state of the outputs for DIO
- * or DO subdevices (up to 32 channels). @data[0] contains a bit-mask of
- * the channels to be updated. @data[1] contains a bit-mask of those
- * channels to be set to '1'. The caller is responsible for updating the
- * outputs in hardware according to @s->state. As a minimum, the channels
- * in the returned bit-mask need to be updated.
- *
- * Returns @mask with non-existent channels removed.
- */
-unsigned int comedi_dio_update_state(struct comedi_subdevice *s,
- unsigned int *data)
-{
- unsigned int chanmask = (s->n_chan < 32) ? ((1 << s->n_chan) - 1)
- : 0xffffffff;
- unsigned int mask = data[0] & chanmask;
- unsigned int bits = data[1];
-
- if (mask) {
- s->state &= ~mask;
- s->state |= (bits & mask);
- }
-
- return mask;
-}
-EXPORT_SYMBOL_GPL(comedi_dio_update_state);
-
-/**
- * comedi_bytes_per_scan() - Get length of asynchronous command "scan" in bytes
- * @s: COMEDI subdevice.
- *
- * Determines the overall scan length according to the subdevice type and the
- * number of channels in the scan.
- *
- * For digital input, output or input/output subdevices, samples for
- * multiple channels are assumed to be packed into one or more unsigned
- * short or unsigned int values according to the subdevice's %SDF_LSAMPL
- * flag. For other types of subdevice, samples are assumed to occupy a
- * whole unsigned short or unsigned int according to the %SDF_LSAMPL flag.
- *
- * Returns the overall scan length in bytes.
- */
-unsigned int comedi_bytes_per_scan(struct comedi_subdevice *s)
-{
- struct comedi_cmd *cmd = &s->async->cmd;
- unsigned int num_samples;
- unsigned int bits_per_sample;
-
- switch (s->type) {
- case COMEDI_SUBD_DI:
- case COMEDI_SUBD_DO:
- case COMEDI_SUBD_DIO:
- bits_per_sample = 8 * comedi_bytes_per_sample(s);
- num_samples = DIV_ROUND_UP(cmd->scan_end_arg, bits_per_sample);
- break;
- default:
- num_samples = cmd->scan_end_arg;
- break;
- }
- return comedi_samples_to_bytes(s, num_samples);
-}
-EXPORT_SYMBOL_GPL(comedi_bytes_per_scan);
-
-/**
- * comedi_nscans_left() - Return the number of scans left in the command
- * @s: COMEDI subdevice.
- * @nscans: The expected number of scans or 0 for all available scans.
- *
- * If @nscans is 0, it is set to the number of scans available in the
- * async buffer.
- *
- * If the async command has a stop_src of %TRIG_COUNT, the @nscans will be
- * checked against the number of scans remaining to complete the command.
- *
- * The return value will then be either the expected number of scans or the
- * number of scans remaining to complete the command, whichever is fewer.
- */
-unsigned int comedi_nscans_left(struct comedi_subdevice *s,
- unsigned int nscans)
-{
- struct comedi_async *async = s->async;
- struct comedi_cmd *cmd = &async->cmd;
-
- if (nscans == 0) {
- unsigned int nbytes = comedi_buf_read_n_available(s);
-
- nscans = nbytes / comedi_bytes_per_scan(s);
- }
-
- if (cmd->stop_src == TRIG_COUNT) {
- unsigned int scans_left = 0;
-
- if (async->scans_done < cmd->stop_arg)
- scans_left = cmd->stop_arg - async->scans_done;
-
- if (nscans > scans_left)
- nscans = scans_left;
- }
- return nscans;
-}
-EXPORT_SYMBOL_GPL(comedi_nscans_left);
-
-/**
- * comedi_nsamples_left() - Return the number of samples left in the command
- * @s: COMEDI subdevice.
- * @nsamples: The expected number of samples.
- *
- * Returns the number of samples remaining to complete the command, or the
- * specified expected number of samples (@nsamples), whichever is fewer.
- */
-unsigned int comedi_nsamples_left(struct comedi_subdevice *s,
- unsigned int nsamples)
-{
- struct comedi_async *async = s->async;
- struct comedi_cmd *cmd = &async->cmd;
-
- if (cmd->stop_src == TRIG_COUNT) {
- /* +1 to force comedi_nscans_left() to return the scans left */
- unsigned int nscans = (nsamples / cmd->scan_end_arg) + 1;
- unsigned int scans_left = comedi_nscans_left(s, nscans);
- unsigned int scan_pos =
- comedi_bytes_to_samples(s, async->scan_progress);
- unsigned long long samples_left = 0;
-
- if (scans_left) {
- samples_left = ((unsigned long long)scans_left *
- cmd->scan_end_arg) - scan_pos;
- }
-
- if (samples_left < nsamples)
- nsamples = samples_left;
- }
- return nsamples;
-}
-EXPORT_SYMBOL_GPL(comedi_nsamples_left);
-
-/**
- * comedi_inc_scan_progress() - Update scan progress in asynchronous command
- * @s: COMEDI subdevice.
- * @num_bytes: Amount of data in bytes to increment scan progress.
- *
- * Increments the scan progress by the number of bytes specified by @num_bytes.
- * If the scan progress reaches or exceeds the scan length in bytes, reduce
- * it modulo the scan length in bytes and set the "end of scan" asynchronous
- * event flag (%COMEDI_CB_EOS) to be processed later.
- */
-void comedi_inc_scan_progress(struct comedi_subdevice *s,
- unsigned int num_bytes)
-{
- struct comedi_async *async = s->async;
- struct comedi_cmd *cmd = &async->cmd;
- unsigned int scan_length = comedi_bytes_per_scan(s);
-
- /* track the 'cur_chan' for non-SDF_PACKED subdevices */
- if (!(s->subdev_flags & SDF_PACKED)) {
- async->cur_chan += comedi_bytes_to_samples(s, num_bytes);
- async->cur_chan %= cmd->chanlist_len;
- }
-
- async->scan_progress += num_bytes;
- if (async->scan_progress >= scan_length) {
- unsigned int nscans = async->scan_progress / scan_length;
-
- if (async->scans_done < (UINT_MAX - nscans))
- async->scans_done += nscans;
- else
- async->scans_done = UINT_MAX;
-
- async->scan_progress %= scan_length;
- async->events |= COMEDI_CB_EOS;
- }
-}
-EXPORT_SYMBOL_GPL(comedi_inc_scan_progress);
-
-/**
- * comedi_handle_events() - Handle events and possibly stop acquisition
- * @dev: COMEDI device.
- * @s: COMEDI subdevice.
- *
- * Handles outstanding asynchronous acquisition event flags associated
- * with the subdevice. Call the subdevice's @s->cancel() handler if the
- * "end of acquisition", "error" or "overflow" event flags are set in order
- * to stop the acquisition at the driver level.
- *
- * Calls comedi_event() to further process the event flags, which may mark
- * the asynchronous command as no longer running, possibly terminated with
- * an error, and may wake up tasks.
- *
- * Return a bit-mask of the handled events.
- */
-unsigned int comedi_handle_events(struct comedi_device *dev,
- struct comedi_subdevice *s)
-{
- unsigned int events = s->async->events;
-
- if (events == 0)
- return events;
-
- if (events & COMEDI_CB_CANCEL_MASK)
- s->cancel(dev, s);
-
- comedi_event(dev, s);
-
- return events;
-}
-EXPORT_SYMBOL_GPL(comedi_handle_events);
-
-static int insn_rw_emulate_bits(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
-{
- struct comedi_insn new_insn;
- int ret;
- static const unsigned channels_per_bitfield = 32;
-
- unsigned chan = CR_CHAN(insn->chanspec);
- const unsigned base_bitfield_channel =
- (chan < channels_per_bitfield) ? 0 : chan;
- unsigned int new_data[2];
-
- memset(new_data, 0, sizeof(new_data));
- memset(&new_insn, 0, sizeof(new_insn));
- new_insn.insn = INSN_BITS;
- new_insn.chanspec = base_bitfield_channel;
- new_insn.n = 2;
- new_insn.subdev = insn->subdev;
-
- if (insn->insn == INSN_WRITE) {
- if (!(s->subdev_flags & SDF_WRITABLE))
- return -EINVAL;
- new_data[0] = 1 << (chan - base_bitfield_channel); /* mask */
- new_data[1] = data[0] ? (1 << (chan - base_bitfield_channel))
- : 0; /* bits */
- }
-
- ret = s->insn_bits(dev, s, &new_insn, new_data);
- if (ret < 0)
- return ret;
-
- if (insn->insn == INSN_READ)
- data[0] = (new_data[1] >> (chan - base_bitfield_channel)) & 1;
-
- return 1;
-}
-
-static int __comedi_device_postconfig_async(struct comedi_device *dev,
- struct comedi_subdevice *s)
-{
- struct comedi_async *async;
- unsigned int buf_size;
- int ret;
-
- if ((s->subdev_flags & (SDF_CMD_READ | SDF_CMD_WRITE)) == 0) {
- dev_warn(dev->class_dev,
- "async subdevices must support SDF_CMD_READ or SDF_CMD_WRITE\n");
- return -EINVAL;
- }
- if (!s->do_cmdtest) {
- dev_warn(dev->class_dev,
- "async subdevices must have a do_cmdtest() function\n");
- return -EINVAL;
- }
-
- async = kzalloc(sizeof(*async), GFP_KERNEL);
- if (!async)
- return -ENOMEM;
-
- init_waitqueue_head(&async->wait_head);
- s->async = async;
-
- async->max_bufsize = comedi_default_buf_maxsize_kb * 1024;
- buf_size = comedi_default_buf_size_kb * 1024;
- if (buf_size > async->max_bufsize)
- buf_size = async->max_bufsize;
-
- if (comedi_buf_alloc(dev, s, buf_size) < 0) {
- dev_warn(dev->class_dev, "Buffer allocation failed\n");
- return -ENOMEM;
- }
- if (s->buf_change) {
- ret = s->buf_change(dev, s);
- if (ret < 0)
- return ret;
- }
-
- comedi_alloc_subdevice_minor(s);
-
- return 0;
-}
-
-static int __comedi_device_postconfig(struct comedi_device *dev)
-{
- struct comedi_subdevice *s;
- int ret;
- int i;
-
- for (i = 0; i < dev->n_subdevices; i++) {
- s = &dev->subdevices[i];
-
- if (s->type == COMEDI_SUBD_UNUSED)
- continue;
-
- if (s->type == COMEDI_SUBD_DO) {
- if (s->n_chan < 32)
- s->io_bits = (1 << s->n_chan) - 1;
- else
- s->io_bits = 0xffffffff;
- }
-
- if (s->len_chanlist == 0)
- s->len_chanlist = 1;
-
- if (s->do_cmd) {
- ret = __comedi_device_postconfig_async(dev, s);
- if (ret)
- return ret;
- }
-
- if (!s->range_table && !s->range_table_list)
- s->range_table = &range_unknown;
-
- if (!s->insn_read && s->insn_bits)
- s->insn_read = insn_rw_emulate_bits;
- if (!s->insn_write && s->insn_bits)
- s->insn_write = insn_rw_emulate_bits;
-
- if (!s->insn_read)
- s->insn_read = insn_inval;
- if (!s->insn_write)
- s->insn_write = insn_inval;
- if (!s->insn_bits)
- s->insn_bits = insn_inval;
- if (!s->insn_config)
- s->insn_config = insn_inval;
-
- if (!s->poll)
- s->poll = poll_invalid;
- }
-
- return 0;
-}
-
-/* do a little post-config cleanup */
-static int comedi_device_postconfig(struct comedi_device *dev)
-{
- int ret;
-
- ret = __comedi_device_postconfig(dev);
- if (ret < 0)
- return ret;
- down_write(&dev->attach_lock);
- dev->attached = true;
- up_write(&dev->attach_lock);
- return 0;
-}
-
-/*
- * Generic recognize function for drivers that register their supported
- * board names.
- *
- * 'driv->board_name' points to a 'const char *' member within the
- * zeroth element of an array of some private board information
- * structure, say 'struct foo_board' containing a member 'const char
- * *board_name' that is initialized to point to a board name string that
- * is one of the candidates matched against this function's 'name'
- * parameter.
- *
- * 'driv->offset' is the size of the private board information
- * structure, say 'sizeof(struct foo_board)', and 'driv->num_names' is
- * the length of the array of private board information structures.
- *
- * If one of the board names in the array of private board information
- * structures matches the name supplied to this function, the function
- * returns a pointer to the pointer to the board name, otherwise it
- * returns NULL. The return value ends up in the 'board_ptr' member of
- * a 'struct comedi_device' that the low-level comedi driver's
- * 'attach()' hook can convert to a point to a particular element of its
- * array of private board information structures by subtracting the
- * offset of the member that points to the board name. (No subtraction
- * is required if the board name pointer is the first member of the
- * private board information structure, which is generally the case.)
- */
-static void *comedi_recognize(struct comedi_driver *driv, const char *name)
-{
- char **name_ptr = (char **)driv->board_name;
- int i;
-
- for (i = 0; i < driv->num_names; i++) {
- if (strcmp(*name_ptr, name) == 0)
- return name_ptr;
- name_ptr = (void *)name_ptr + driv->offset;
- }
-
- return NULL;
-}
-
-static void comedi_report_boards(struct comedi_driver *driv)
-{
- unsigned int i;
- const char *const *name_ptr;
-
- pr_info("comedi: valid board names for %s driver are:\n",
- driv->driver_name);
-
- name_ptr = driv->board_name;
- for (i = 0; i < driv->num_names; i++) {
- pr_info(" %s\n", *name_ptr);
- name_ptr = (const char **)((char *)name_ptr + driv->offset);
- }
-
- if (driv->num_names == 0)
- pr_info(" %s\n", driv->driver_name);
-}
-
-/**
- * comedi_load_firmware() - Request and load firmware for a device
- * @dev: COMEDI device.
- * @device: Hardware device.
- * @name: The name of the firmware image.
- * @cb: Callback to the upload the firmware image.
- * @context: Private context from the driver.
- *
- * Sends a firmware request for the hardware device and waits for it. Calls
- * the callback function to upload the firmware to the device, them releases
- * the firmware.
- *
- * Returns 0 on success, -EINVAL if @cb is NULL, or a negative error number
- * from the firmware request or the callback function.
- */
-int comedi_load_firmware(struct comedi_device *dev,
- struct device *device,
- const char *name,
- int (*cb)(struct comedi_device *dev,
- const u8 *data, size_t size,
- unsigned long context),
- unsigned long context)
-{
- const struct firmware *fw;
- int ret;
-
- if (!cb)
- return -EINVAL;
-
- ret = request_firmware(&fw, name, device);
- if (ret == 0) {
- ret = cb(dev, fw->data, fw->size, context);
- release_firmware(fw);
- }
-
- return ret < 0 ? ret : 0;
-}
-EXPORT_SYMBOL_GPL(comedi_load_firmware);
-
-/**
- * __comedi_request_region() - Request an I/O region for a legacy driver
- * @dev: COMEDI device.
- * @start: Base address of the I/O region.
- * @len: Length of the I/O region.
- *
- * Requests the specified I/O port region which must start at a non-zero
- * address.
- *
- * Returns 0 on success, -EINVAL if @start is 0, or -EIO if the request
- * fails.
- */
-int __comedi_request_region(struct comedi_device *dev,
- unsigned long start, unsigned long len)
-{
- if (!start) {
- dev_warn(dev->class_dev,
- "%s: a I/O base address must be specified\n",
- dev->board_name);
- return -EINVAL;
- }
-
- if (!request_region(start, len, dev->board_name)) {
- dev_warn(dev->class_dev, "%s: I/O port conflict (%#lx,%lu)\n",
- dev->board_name, start, len);
- return -EIO;
- }
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(__comedi_request_region);
-
-/**
- * comedi_request_region() - Request an I/O region for a legacy driver
- * @dev: COMEDI device.
- * @start: Base address of the I/O region.
- * @len: Length of the I/O region.
- *
- * Requests the specified I/O port region which must start at a non-zero
- * address.
- *
- * On success, @dev->iobase is set to the base address of the region and
- * @dev->iolen is set to its length.
- *
- * Returns 0 on success, -EINVAL if @start is 0, or -EIO if the request
- * fails.
- */
-int comedi_request_region(struct comedi_device *dev,
- unsigned long start, unsigned long len)
-{
- int ret;
-
- ret = __comedi_request_region(dev, start, len);
- if (ret == 0) {
- dev->iobase = start;
- dev->iolen = len;
- }
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(comedi_request_region);
-
-/**
- * comedi_legacy_detach() - A generic (*detach) function for legacy drivers
- * @dev: COMEDI device.
- *
- * This is a simple, generic 'detach' handler for legacy COMEDI devices that
- * just use a single I/O port region and possibly an IRQ and that don't need
- * any special clean-up for their private device or subdevice storage. It
- * can also be called by a driver-specific 'detach' handler.
- *
- * If @dev->irq is non-zero, the IRQ will be freed. If @dev->iobase and
- * @dev->iolen are both non-zero, the I/O port region will be released.
- */
-void comedi_legacy_detach(struct comedi_device *dev)
-{
- if (dev->irq) {
- free_irq(dev->irq, dev);
- dev->irq = 0;
- }
- if (dev->iobase && dev->iolen) {
- release_region(dev->iobase, dev->iolen);
- dev->iobase = 0;
- dev->iolen = 0;
- }
-}
-EXPORT_SYMBOL_GPL(comedi_legacy_detach);
-
-int comedi_device_attach(struct comedi_device *dev, struct comedi_devconfig *it)
-{
- struct comedi_driver *driv;
- int ret;
-
- if (dev->attached)
- return -EBUSY;
-
- mutex_lock(&comedi_drivers_list_lock);
- for (driv = comedi_drivers; driv; driv = driv->next) {
- if (!try_module_get(driv->module))
- continue;
- if (driv->num_names) {
- dev->board_ptr = comedi_recognize(driv, it->board_name);
- if (dev->board_ptr)
- break;
- } else if (strcmp(driv->driver_name, it->board_name) == 0) {
- break;
- }
- module_put(driv->module);
- }
- if (!driv) {
- /* recognize has failed if we get here */
- /* report valid board names before returning error */
- for (driv = comedi_drivers; driv; driv = driv->next) {
- if (!try_module_get(driv->module))
- continue;
- comedi_report_boards(driv);
- module_put(driv->module);
- }
- ret = -EIO;
- goto out;
- }
- if (!driv->attach) {
- /* driver does not support manual configuration */
- dev_warn(dev->class_dev,
- "driver '%s' does not support attach using comedi_config\n",
- driv->driver_name);
- module_put(driv->module);
- ret = -EIO;
- goto out;
- }
- dev->driver = driv;
- dev->board_name = dev->board_ptr ? *(const char **)dev->board_ptr
- : dev->driver->driver_name;
- ret = driv->attach(dev, it);
- if (ret >= 0)
- ret = comedi_device_postconfig(dev);
- if (ret < 0) {
- comedi_device_detach(dev);
- module_put(driv->module);
- }
- /* On success, the driver module count has been incremented. */
-out:
- mutex_unlock(&comedi_drivers_list_lock);
- return ret;
-}
-
-/**
- * comedi_auto_config() - Create a COMEDI device for a hardware device
- * @hardware_device: Hardware device.
- * @driver: COMEDI low-level driver for the hardware device.
- * @context: Driver context for the auto_attach handler.
- *
- * Allocates a new COMEDI device for the hardware device and calls the
- * low-level driver's 'auto_attach' handler to set-up the hardware and
- * allocate the COMEDI subdevices. Additional "post-configuration" setting
- * up is performed on successful return from the 'auto_attach' handler.
- * If the 'auto_attach' handler fails, the low-level driver's 'detach'
- * handler will be called as part of the clean-up.
- *
- * This is usually called from a wrapper function in a bus-specific COMEDI
- * module, which in turn is usually called from a bus device 'probe'
- * function in the low-level driver.
- *
- * Returns 0 on success, -EINVAL if the parameters are invalid or the
- * post-configuration determines the driver has set the COMEDI device up
- * incorrectly, -ENOMEM if failed to allocate memory, -EBUSY if run out of
- * COMEDI minor device numbers, or some negative error number returned by
- * the driver's 'auto_attach' handler.
- */
-int comedi_auto_config(struct device *hardware_device,
- struct comedi_driver *driver, unsigned long context)
-{
- struct comedi_device *dev;
- int ret;
-
- if (!hardware_device) {
- pr_warn("BUG! comedi_auto_config called with NULL hardware_device\n");
- return -EINVAL;
- }
- if (!driver) {
- dev_warn(hardware_device,
- "BUG! comedi_auto_config called with NULL comedi driver\n");
- return -EINVAL;
- }
-
- if (!driver->auto_attach) {
- dev_warn(hardware_device,
- "BUG! comedi driver '%s' has no auto_attach handler\n",
- driver->driver_name);
- return -EINVAL;
- }
-
- dev = comedi_alloc_board_minor(hardware_device);
- if (IS_ERR(dev)) {
- dev_warn(hardware_device,
- "driver '%s' could not create device.\n",
- driver->driver_name);
- return PTR_ERR(dev);
- }
- /* Note: comedi_alloc_board_minor() locked dev->mutex. */
-
- dev->driver = driver;
- dev->board_name = dev->driver->driver_name;
- ret = driver->auto_attach(dev, context);
- if (ret >= 0)
- ret = comedi_device_postconfig(dev);
- mutex_unlock(&dev->mutex);
-
- if (ret < 0) {
- dev_warn(hardware_device,
- "driver '%s' failed to auto-configure device.\n",
- driver->driver_name);
- comedi_release_hardware_device(hardware_device);
- } else {
- /*
- * class_dev should be set properly here
- * after a successful auto config
- */
- dev_info(dev->class_dev,
- "driver '%s' has successfully auto-configured '%s'.\n",
- driver->driver_name, dev->board_name);
- }
- return ret;
-}
-EXPORT_SYMBOL_GPL(comedi_auto_config);
-
-/**
- * comedi_auto_unconfig() - Unconfigure auto-allocated COMEDI device
- * @hardware_device: Hardware device previously passed to
- * comedi_auto_config().
- *
- * Cleans up and eventually destroys the COMEDI device allocated by
- * comedi_auto_config() for the same hardware device. As part of this
- * clean-up, the low-level COMEDI driver's 'detach' handler will be called.
- * (The COMEDI device itself will persist in an unattached state if it is
- * still open, until it is released, and any mmapped buffers will persist
- * until they are munmapped.)
- *
- * This is usually called from a wrapper module in a bus-specific COMEDI
- * module, which in turn is usually set as the bus device 'remove' function
- * in the low-level COMEDI driver.
- */
-void comedi_auto_unconfig(struct device *hardware_device)
-{
- if (!hardware_device)
- return;
- comedi_release_hardware_device(hardware_device);
-}
-EXPORT_SYMBOL_GPL(comedi_auto_unconfig);
-
-/**
- * comedi_driver_register() - Register a low-level COMEDI driver
- * @driver: Low-level COMEDI driver.
- *
- * The low-level COMEDI driver is added to the list of registered COMEDI
- * drivers. This is used by the handler for the "/proc/comedi" file and is
- * also used by the handler for the %COMEDI_DEVCONFIG ioctl to configure
- * "legacy" COMEDI devices (for those low-level drivers that support it).
- *
- * Returns 0.
- */
-int comedi_driver_register(struct comedi_driver *driver)
-{
- mutex_lock(&comedi_drivers_list_lock);
- driver->next = comedi_drivers;
- comedi_drivers = driver;
- mutex_unlock(&comedi_drivers_list_lock);
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(comedi_driver_register);
-
-/**
- * comedi_driver_unregister() - Unregister a low-level COMEDI driver
- * @driver: Low-level COMEDI driver.
- *
- * The low-level COMEDI driver is removed from the list of registered COMEDI
- * drivers. Detaches any COMEDI devices attached to the driver, which will
- * result in the low-level driver's 'detach' handler being called for those
- * devices before this function returns.
- */
-void comedi_driver_unregister(struct comedi_driver *driver)
-{
- struct comedi_driver *prev;
- int i;
-
- /* unlink the driver */
- mutex_lock(&comedi_drivers_list_lock);
- if (comedi_drivers == driver) {
- comedi_drivers = driver->next;
- } else {
- for (prev = comedi_drivers; prev->next; prev = prev->next) {
- if (prev->next == driver) {
- prev->next = driver->next;
- break;
- }
- }
- }
- mutex_unlock(&comedi_drivers_list_lock);
-
- /* check for devices using this driver */
- for (i = 0; i < COMEDI_NUM_BOARD_MINORS; i++) {
- struct comedi_device *dev = comedi_dev_get_from_minor(i);
-
- if (!dev)
- continue;
-
- mutex_lock(&dev->mutex);
- if (dev->attached && dev->driver == driver) {
- if (dev->use_count)
- dev_warn(dev->class_dev,
- "BUG! detaching device with use_count=%d\n",
- dev->use_count);
- comedi_device_detach(dev);
- }
- mutex_unlock(&dev->mutex);
- comedi_dev_put(dev);
- }
-}
-EXPORT_SYMBOL_GPL(comedi_driver_unregister);
diff --git a/drivers/staging/comedi/drivers/8255.c b/drivers/staging/comedi/drivers/8255.c
deleted file mode 100644
index b79d3764a8a0..000000000000
--- a/drivers/staging/comedi/drivers/8255.c
+++ /dev/null
@@ -1,134 +0,0 @@
-/*
- * comedi/drivers/8255.c
- * Driver for 8255
- *
- * COMEDI - Linux Control and Measurement Device Interface
- * Copyright (C) 1998 David A. Schleef <ds@schleef.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-/*
- * Driver: 8255
- * Description: generic 8255 support
- * Devices: [standard] 8255 (8255)
- * Author: ds
- * Status: works
- * Updated: Fri, 7 Jun 2002 12:56:45 -0700
- *
- * The classic in digital I/O. The 8255 appears in Comedi as a single
- * digital I/O subdevice with 24 channels. The channel 0 corresponds
- * to the 8255's port A, bit 0; channel 23 corresponds to port C, bit
- * 7. Direction configuration is done in blocks, with channels 0-7,
- * 8-15, 16-19, and 20-23 making up the 4 blocks. The only 8255 mode
- * supported is mode 0.
- *
- * You should enable compilation this driver if you plan to use a board
- * that has an 8255 chip. For multifunction boards, the main driver will
- * configure the 8255 subdevice automatically.
- *
- * This driver also works independently with ISA and PCI cards that
- * directly map the 8255 registers to I/O ports, including cards with
- * multiple 8255 chips. To configure the driver for such a card, the
- * option list should be a list of the I/O port bases for each of the
- * 8255 chips. For example,
- *
- * comedi_config /dev/comedi0 8255 0x200,0x204,0x208,0x20c
- *
- * Note that most PCI 8255 boards do NOT work with this driver, and
- * need a separate driver as a wrapper. For those that do work, the
- * I/O port base address can be found in the output of 'lspci -v'.
- */
-
-#include <linux/module.h>
-#include "../comedidev.h"
-
-#include "8255.h"
-
-static int dev_8255_attach(struct comedi_device *dev,
- struct comedi_devconfig *it)
-{
- struct comedi_subdevice *s;
- unsigned long iobase;
- int ret;
- int i;
-
- for (i = 0; i < COMEDI_NDEVCONFOPTS; i++) {
- iobase = it->options[i];
- if (!iobase)
- break;
- }
- if (i == 0) {
- dev_warn(dev->class_dev, "no devices specified\n");
- return -EINVAL;
- }
-
- ret = comedi_alloc_subdevices(dev, i);
- if (ret)
- return ret;
-
- for (i = 0; i < dev->n_subdevices; i++) {
- s = &dev->subdevices[i];
- iobase = it->options[i];
-
- /*
- * __comedi_request_region() does not set dev->iobase.
- *
- * For 8255 devices that are manually attached using
- * comedi_config, the 'iobase' is the actual I/O port
- * base address of the chip.
- */
- ret = __comedi_request_region(dev, iobase, I8255_SIZE);
- if (ret) {
- s->type = COMEDI_SUBD_UNUSED;
- } else {
- ret = subdev_8255_init(dev, s, NULL, iobase);
- if (ret) {
- /*
- * Release the I/O port region here, as the
- * "detach" handler cannot find it.
- */
- release_region(iobase, I8255_SIZE);
- s->type = COMEDI_SUBD_UNUSED;
- return ret;
- }
- }
- }
-
- return 0;
-}
-
-static void dev_8255_detach(struct comedi_device *dev)
-{
- struct comedi_subdevice *s;
- int i;
-
- for (i = 0; i < dev->n_subdevices; i++) {
- s = &dev->subdevices[i];
- if (s->type != COMEDI_SUBD_UNUSED) {
- unsigned long regbase = subdev_8255_regbase(s);
-
- release_region(regbase, I8255_SIZE);
- }
- }
-}
-
-static struct comedi_driver dev_8255_driver = {
- .driver_name = "8255",
- .module = THIS_MODULE,
- .attach = dev_8255_attach,
- .detach = dev_8255_detach,
-};
-module_comedi_driver(dev_8255_driver);
-
-MODULE_AUTHOR("Comedi http://www.comedi.org");
-MODULE_DESCRIPTION("Comedi driver for standalone 8255 devices");
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/8255.h b/drivers/staging/comedi/drivers/8255.h
deleted file mode 100644
index 41823de69b77..000000000000
--- a/drivers/staging/comedi/drivers/8255.h
+++ /dev/null
@@ -1,51 +0,0 @@
-/*
- * module/8255.h
- * Header file for 8255
- *
- * COMEDI - Linux Control and Measurement Device Interface
- * Copyright (C) 1998 David A. Schleef <ds@schleef.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#ifndef _8255_H
-#define _8255_H
-
-#define I8255_SIZE 0x04
-
-#define I8255_DATA_A_REG 0x00
-#define I8255_DATA_B_REG 0x01
-#define I8255_DATA_C_REG 0x02
-#define I8255_CTRL_REG 0x03
-#define I8255_CTRL_C_LO_IO (1 << 0)
-#define I8255_CTRL_B_IO (1 << 1)
-#define I8255_CTRL_B_MODE (1 << 2)
-#define I8255_CTRL_C_HI_IO (1 << 3)
-#define I8255_CTRL_A_IO (1 << 4)
-#define I8255_CTRL_A_MODE(x) ((x) << 5)
-#define I8255_CTRL_CW (1 << 7)
-
-struct comedi_device;
-struct comedi_subdevice;
-
-int subdev_8255_init(struct comedi_device *dev, struct comedi_subdevice *s,
- int (*io)(struct comedi_device *dev, int dir, int port,
- int data, unsigned long regbase),
- unsigned long regbase);
-
-int subdev_8255_mm_init(struct comedi_device *dev, struct comedi_subdevice *s,
- int (*io)(struct comedi_device *dev, int dir, int port,
- int data, unsigned long regbase),
- unsigned long regbase);
-
-unsigned long subdev_8255_regbase(struct comedi_subdevice *s);
-
-#endif
diff --git a/drivers/staging/comedi/drivers/8255_pci.c b/drivers/staging/comedi/drivers/8255_pci.c
deleted file mode 100644
index bb9854b56807..000000000000
--- a/drivers/staging/comedi/drivers/8255_pci.c
+++ /dev/null
@@ -1,304 +0,0 @@
-/*
- * COMEDI driver for generic PCI based 8255 digital i/o boards
- * Copyright (C) 2012 H Hartley Sweeten <hsweeten@visionengravers.com>
- *
- * Based on the tested adl_pci7296 driver written by:
- * Jon Grierson <jd@renko.co.uk>
- * and the experimental cb_pcidio driver written by:
- * Yoshiya Matsuzaka
- *
- * COMEDI - Linux Control and Measurement Device Interface
- * Copyright (C) 2000 David A. Schleef <ds@schleef.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-/*
- * Driver: 8255_pci
- * Description: Generic PCI based 8255 Digital I/O boards
- * Devices: [ADLink] PCI-7224 (adl_pci-7224), PCI-7248 (adl_pci-7248),
- * PCI-7296 (adl_pci-7296),
- * [Measurement Computing] PCI-DIO24 (cb_pci-dio24),
- * PCI-DIO24H (cb_pci-dio24h), PCI-DIO48H (cb_pci-dio48h),
- * PCI-DIO96H (cb_pci-dio96h),
- * [National Instruments] PCI-DIO-96 (ni_pci-dio-96),
- * PCI-DIO-96B (ni_pci-dio-96b), PXI-6508 (ni_pxi-6508),
- * PCI-6503 (ni_pci-6503), PCI-6503B (ni_pci-6503b),
- * PCI-6503X (ni_pci-6503x), PXI-6503 (ni_pxi-6503)
- * Author: H Hartley Sweeten <hsweeten@visionengravers.com>
- * Updated: Wed, 12 Sep 2012 11:52:01 -0700
- * Status: untested
- *
- * These boards have one or more 8255 digital I/O chips, each of which
- * is supported as a separate 24-channel DIO subdevice.
- *
- * Boards with 24 DIO channels (1 DIO subdevice):
- *
- * PCI-7224, PCI-DIO24, PCI-DIO24H, PCI-6503, PCI-6503B, PCI-6503X,
- * PXI-6503
- *
- * Boards with 48 DIO channels (2 DIO subdevices):
- *
- * PCI-7248, PCI-DIO48H
- *
- * Boards with 96 DIO channels (4 DIO subdevices):
- *
- * PCI-7296, PCI-DIO96H, PCI-DIO-96, PCI-DIO-96B, PXI-6508
- *
- * Some of these boards also have an 8254 programmable timer/counter
- * chip. This chip is not currently supported by this driver.
- *
- * Interrupt support for these boards is also not currently supported.
- *
- * Configuration Options: not applicable, uses PCI auto config.
- */
-
-#include <linux/module.h>
-
-#include "../comedi_pci.h"
-
-#include "8255.h"
-
-enum pci_8255_boardid {
- BOARD_ADLINK_PCI7224,
- BOARD_ADLINK_PCI7248,
- BOARD_ADLINK_PCI7296,
- BOARD_CB_PCIDIO24,
- BOARD_CB_PCIDIO24H,
- BOARD_CB_PCIDIO48H_OLD,
- BOARD_CB_PCIDIO48H_NEW,
- BOARD_CB_PCIDIO96H,
- BOARD_NI_PCIDIO96,
- BOARD_NI_PCIDIO96B,
- BOARD_NI_PXI6508,
- BOARD_NI_PCI6503,
- BOARD_NI_PCI6503B,
- BOARD_NI_PCI6503X,
- BOARD_NI_PXI_6503,
-};
-
-struct pci_8255_boardinfo {
- const char *name;
- int dio_badr;
- int n_8255;
- unsigned int has_mite:1;
-};
-
-static const struct pci_8255_boardinfo pci_8255_boards[] = {
- [BOARD_ADLINK_PCI7224] = {
- .name = "adl_pci-7224",
- .dio_badr = 2,
- .n_8255 = 1,
- },
- [BOARD_ADLINK_PCI7248] = {
- .name = "adl_pci-7248",
- .dio_badr = 2,
- .n_8255 = 2,
- },
- [BOARD_ADLINK_PCI7296] = {
- .name = "adl_pci-7296",
- .dio_badr = 2,
- .n_8255 = 4,
- },
- [BOARD_CB_PCIDIO24] = {
- .name = "cb_pci-dio24",
- .dio_badr = 2,
- .n_8255 = 1,
- },
- [BOARD_CB_PCIDIO24H] = {
- .name = "cb_pci-dio24h",
- .dio_badr = 2,
- .n_8255 = 1,
- },
- [BOARD_CB_PCIDIO48H_OLD] = {
- .name = "cb_pci-dio48h",
- .dio_badr = 1,
- .n_8255 = 2,
- },
- [BOARD_CB_PCIDIO48H_NEW] = {
- .name = "cb_pci-dio48h",
- .dio_badr = 2,
- .n_8255 = 2,
- },
- [BOARD_CB_PCIDIO96H] = {
- .name = "cb_pci-dio96h",
- .dio_badr = 2,
- .n_8255 = 4,
- },
- [BOARD_NI_PCIDIO96] = {
- .name = "ni_pci-dio-96",
- .dio_badr = 1,
- .n_8255 = 4,
- .has_mite = 1,
- },
- [BOARD_NI_PCIDIO96B] = {
- .name = "ni_pci-dio-96b",
- .dio_badr = 1,
- .n_8255 = 4,
- .has_mite = 1,
- },
- [BOARD_NI_PXI6508] = {
- .name = "ni_pxi-6508",
- .dio_badr = 1,
- .n_8255 = 4,
- .has_mite = 1,
- },
- [BOARD_NI_PCI6503] = {
- .name = "ni_pci-6503",
- .dio_badr = 1,
- .n_8255 = 1,
- .has_mite = 1,
- },
- [BOARD_NI_PCI6503B] = {
- .name = "ni_pci-6503b",
- .dio_badr = 1,
- .n_8255 = 1,
- .has_mite = 1,
- },
- [BOARD_NI_PCI6503X] = {
- .name = "ni_pci-6503x",
- .dio_badr = 1,
- .n_8255 = 1,
- .has_mite = 1,
- },
- [BOARD_NI_PXI_6503] = {
- .name = "ni_pxi-6503",
- .dio_badr = 1,
- .n_8255 = 1,
- .has_mite = 1,
- },
-};
-
-/* ripped from mite.h and mite_setup2() to avoid mite dependency */
-#define MITE_IODWBSR 0xc0 /* IO Device Window Base Size Register */
-#define WENAB (1 << 7) /* window enable */
-
-static int pci_8255_mite_init(struct pci_dev *pcidev)
-{
- void __iomem *mite_base;
- u32 main_phys_addr;
-
- /* ioremap the MITE registers (BAR 0) temporarily */
- mite_base = pci_ioremap_bar(pcidev, 0);
- if (!mite_base)
- return -ENOMEM;
-
- /* set data window to main registers (BAR 1) */
- main_phys_addr = pci_resource_start(pcidev, 1);
- writel(main_phys_addr | WENAB, mite_base + MITE_IODWBSR);
-
- /* finished with MITE registers */
- iounmap(mite_base);
- return 0;
-}
-
-static int pci_8255_auto_attach(struct comedi_device *dev,
- unsigned long context)
-{
- struct pci_dev *pcidev = comedi_to_pci_dev(dev);
- const struct pci_8255_boardinfo *board = NULL;
- struct comedi_subdevice *s;
- int ret;
- int i;
-
- if (context < ARRAY_SIZE(pci_8255_boards))
- board = &pci_8255_boards[context];
- if (!board)
- return -ENODEV;
- dev->board_ptr = board;
- dev->board_name = board->name;
-
- ret = comedi_pci_enable(dev);
- if (ret)
- return ret;
-
- if (board->has_mite) {
- ret = pci_8255_mite_init(pcidev);
- if (ret)
- return ret;
- }
-
- if ((pci_resource_flags(pcidev, board->dio_badr) & IORESOURCE_MEM)) {
- dev->mmio = pci_ioremap_bar(pcidev, board->dio_badr);
- if (!dev->mmio)
- return -ENOMEM;
- } else {
- dev->iobase = pci_resource_start(pcidev, board->dio_badr);
- }
-
- /*
- * One, two, or four subdevices are setup by this driver depending
- * on the number of channels provided by the board. Each subdevice
- * has 24 channels supported by the 8255 module.
- */
- ret = comedi_alloc_subdevices(dev, board->n_8255);
- if (ret)
- return ret;
-
- for (i = 0; i < board->n_8255; i++) {
- s = &dev->subdevices[i];
- if (dev->mmio)
- ret = subdev_8255_mm_init(dev, s, NULL, i * I8255_SIZE);
- else
- ret = subdev_8255_init(dev, s, NULL, i * I8255_SIZE);
- if (ret)
- return ret;
- }
-
- return 0;
-}
-
-static struct comedi_driver pci_8255_driver = {
- .driver_name = "8255_pci",
- .module = THIS_MODULE,
- .auto_attach = pci_8255_auto_attach,
- .detach = comedi_pci_detach,
-};
-
-static int pci_8255_pci_probe(struct pci_dev *dev,
- const struct pci_device_id *id)
-{
- return comedi_pci_auto_config(dev, &pci_8255_driver, id->driver_data);
-}
-
-static const struct pci_device_id pci_8255_pci_table[] = {
- { PCI_VDEVICE(ADLINK, 0x7224), BOARD_ADLINK_PCI7224 },
- { PCI_VDEVICE(ADLINK, 0x7248), BOARD_ADLINK_PCI7248 },
- { PCI_VDEVICE(ADLINK, 0x7296), BOARD_ADLINK_PCI7296 },
- { PCI_VDEVICE(CB, 0x0028), BOARD_CB_PCIDIO24 },
- { PCI_VDEVICE(CB, 0x0014), BOARD_CB_PCIDIO24H },
- { PCI_DEVICE_SUB(PCI_VENDOR_ID_CB, 0x000b, 0x0000, 0x0000),
- .driver_data = BOARD_CB_PCIDIO48H_OLD },
- { PCI_DEVICE_SUB(PCI_VENDOR_ID_CB, 0x000b, PCI_VENDOR_ID_CB, 0x000b),
- .driver_data = BOARD_CB_PCIDIO48H_NEW },
- { PCI_VDEVICE(CB, 0x0017), BOARD_CB_PCIDIO96H },
- { PCI_VDEVICE(NI, 0x0160), BOARD_NI_PCIDIO96 },
- { PCI_VDEVICE(NI, 0x1630), BOARD_NI_PCIDIO96B },
- { PCI_VDEVICE(NI, 0x13c0), BOARD_NI_PXI6508 },
- { PCI_VDEVICE(NI, 0x0400), BOARD_NI_PCI6503 },
- { PCI_VDEVICE(NI, 0x1250), BOARD_NI_PCI6503B },
- { PCI_VDEVICE(NI, 0x17d0), BOARD_NI_PCI6503X },
- { PCI_VDEVICE(NI, 0x1800), BOARD_NI_PXI_6503 },
- { 0 }
-};
-MODULE_DEVICE_TABLE(pci, pci_8255_pci_table);
-
-static struct pci_driver pci_8255_pci_driver = {
- .name = "8255_pci",
- .id_table = pci_8255_pci_table,
- .probe = pci_8255_pci_probe,
- .remove = comedi_pci_auto_unconfig,
-};
-module_comedi_pci_driver(pci_8255_driver, pci_8255_pci_driver);
-
-MODULE_DESCRIPTION("COMEDI - Generic PCI based 8255 Digital I/O boards");
-MODULE_AUTHOR("Comedi http://www.comedi.org");
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/Makefile b/drivers/staging/comedi/drivers/Makefile
deleted file mode 100644
index c3b8f2d7611b..000000000000
--- a/drivers/staging/comedi/drivers/Makefile
+++ /dev/null
@@ -1,145 +0,0 @@
-# Makefile for individual comedi drivers
-#
-ccflags-$(CONFIG_COMEDI_DEBUG) := -DDEBUG
-
-# Comedi "helper" modules
-obj-$(CONFIG_COMEDI_8254) += comedi_8254.o
-obj-$(CONFIG_COMEDI_ISADMA) += comedi_isadma.o
-
-# Comedi misc drivers
-obj-$(CONFIG_COMEDI_BOND) += comedi_bond.o
-obj-$(CONFIG_COMEDI_TEST) += comedi_test.o
-obj-$(CONFIG_COMEDI_PARPORT) += comedi_parport.o
-obj-$(CONFIG_COMEDI_SERIAL2002) += serial2002.o
-
-# Comedi ISA drivers
-obj-$(CONFIG_COMEDI_AMPLC_DIO200_ISA) += amplc_dio200.o
-obj-$(CONFIG_COMEDI_AMPLC_PC236_ISA) += amplc_pc236.o
-obj-$(CONFIG_COMEDI_AMPLC_PC263_ISA) += amplc_pc263.o
-obj-$(CONFIG_COMEDI_PCL711) += pcl711.o
-obj-$(CONFIG_COMEDI_PCL724) += pcl724.o
-obj-$(CONFIG_COMEDI_PCL726) += pcl726.o
-obj-$(CONFIG_COMEDI_PCL730) += pcl730.o
-obj-$(CONFIG_COMEDI_PCL812) += pcl812.o
-obj-$(CONFIG_COMEDI_PCL816) += pcl816.o
-obj-$(CONFIG_COMEDI_PCL818) += pcl818.o
-obj-$(CONFIG_COMEDI_PCM3724) += pcm3724.o
-obj-$(CONFIG_COMEDI_RTI800) += rti800.o
-obj-$(CONFIG_COMEDI_RTI802) += rti802.o
-obj-$(CONFIG_COMEDI_DAC02) += dac02.o
-obj-$(CONFIG_COMEDI_DAS16M1) += das16m1.o
-obj-$(CONFIG_COMEDI_DAS08_ISA) += das08_isa.o
-obj-$(CONFIG_COMEDI_DAS16) += das16.o
-obj-$(CONFIG_COMEDI_DAS800) += das800.o
-obj-$(CONFIG_COMEDI_DAS1800) += das1800.o
-obj-$(CONFIG_COMEDI_DAS6402) += das6402.o
-obj-$(CONFIG_COMEDI_DT2801) += dt2801.o
-obj-$(CONFIG_COMEDI_DT2811) += dt2811.o
-obj-$(CONFIG_COMEDI_DT2814) += dt2814.o
-obj-$(CONFIG_COMEDI_DT2815) += dt2815.o
-obj-$(CONFIG_COMEDI_DT2817) += dt2817.o
-obj-$(CONFIG_COMEDI_DT282X) += dt282x.o
-obj-$(CONFIG_COMEDI_DMM32AT) += dmm32at.o
-obj-$(CONFIG_COMEDI_FL512) += fl512.o
-obj-$(CONFIG_COMEDI_AIO_AIO12_8) += aio_aio12_8.o
-obj-$(CONFIG_COMEDI_AIO_IIRO_16) += aio_iiro_16.o
-obj-$(CONFIG_COMEDI_II_PCI20KC) += ii_pci20kc.o
-obj-$(CONFIG_COMEDI_C6XDIGIO) += c6xdigio.o
-obj-$(CONFIG_COMEDI_MPC624) += mpc624.o
-obj-$(CONFIG_COMEDI_ADQ12B) += adq12b.o
-obj-$(CONFIG_COMEDI_NI_AT_A2150) += ni_at_a2150.o
-obj-$(CONFIG_COMEDI_NI_AT_AO) += ni_at_ao.o
-obj-$(CONFIG_COMEDI_NI_ATMIO) += ni_atmio.o
-obj-$(CONFIG_COMEDI_NI_ATMIO16D) += ni_atmio16d.o
-obj-$(CONFIG_COMEDI_NI_LABPC_ISA) += ni_labpc.o
-obj-$(CONFIG_COMEDI_PCMAD) += pcmad.o
-obj-$(CONFIG_COMEDI_PCMDA12) += pcmda12.o
-obj-$(CONFIG_COMEDI_PCMMIO) += pcmmio.o
-obj-$(CONFIG_COMEDI_PCMUIO) += pcmuio.o
-obj-$(CONFIG_COMEDI_MULTIQ3) += multiq3.o
-obj-$(CONFIG_COMEDI_S526) += s526.o
-
-# Comedi PCI drivers
-obj-$(CONFIG_COMEDI_8255_PCI) += 8255_pci.o
-obj-$(CONFIG_COMEDI_ADDI_WATCHDOG) += addi_watchdog.o
-obj-$(CONFIG_COMEDI_ADDI_APCI_1032) += addi_apci_1032.o
-obj-$(CONFIG_COMEDI_ADDI_APCI_1500) += addi_apci_1500.o
-obj-$(CONFIG_COMEDI_ADDI_APCI_1516) += addi_apci_1516.o
-obj-$(CONFIG_COMEDI_ADDI_APCI_1564) += addi_apci_1564.o
-obj-$(CONFIG_COMEDI_ADDI_APCI_16XX) += addi_apci_16xx.o
-obj-$(CONFIG_COMEDI_ADDI_APCI_2032) += addi_apci_2032.o
-obj-$(CONFIG_COMEDI_ADDI_APCI_2200) += addi_apci_2200.o
-obj-$(CONFIG_COMEDI_ADDI_APCI_3120) += addi_apci_3120.o
-obj-$(CONFIG_COMEDI_ADDI_APCI_3501) += addi_apci_3501.o
-obj-$(CONFIG_COMEDI_ADDI_APCI_3XXX) += addi_apci_3xxx.o
-obj-$(CONFIG_COMEDI_ADL_PCI6208) += adl_pci6208.o
-obj-$(CONFIG_COMEDI_ADL_PCI7X3X) += adl_pci7x3x.o
-obj-$(CONFIG_COMEDI_ADL_PCI8164) += adl_pci8164.o
-obj-$(CONFIG_COMEDI_ADL_PCI9111) += adl_pci9111.o
-obj-$(CONFIG_COMEDI_ADL_PCI9118) += adl_pci9118.o
-obj-$(CONFIG_COMEDI_ADV_PCI1710) += adv_pci1710.o
-obj-$(CONFIG_COMEDI_ADV_PCI1723) += adv_pci1723.o
-obj-$(CONFIG_COMEDI_ADV_PCI1724) += adv_pci1724.o
-obj-$(CONFIG_COMEDI_ADV_PCI_DIO) += adv_pci_dio.o
-obj-$(CONFIG_COMEDI_AMPLC_DIO200_PCI) += amplc_dio200_pci.o
-obj-$(CONFIG_COMEDI_AMPLC_PC236_PCI) += amplc_pci236.o
-obj-$(CONFIG_COMEDI_AMPLC_PC263_PCI) += amplc_pci263.o
-obj-$(CONFIG_COMEDI_AMPLC_PCI224) += amplc_pci224.o
-obj-$(CONFIG_COMEDI_AMPLC_PCI230) += amplc_pci230.o
-obj-$(CONFIG_COMEDI_CONTEC_PCI_DIO) += contec_pci_dio.o
-obj-$(CONFIG_COMEDI_DAS08_PCI) += das08_pci.o
-obj-$(CONFIG_COMEDI_DT3000) += dt3000.o
-obj-$(CONFIG_COMEDI_DYNA_PCI10XX) += dyna_pci10xx.o
-obj-$(CONFIG_COMEDI_GSC_HPDI) += gsc_hpdi.o
-obj-$(CONFIG_COMEDI_ICP_MULTI) += icp_multi.o
-obj-$(CONFIG_COMEDI_DAQBOARD2000) += daqboard2000.o
-obj-$(CONFIG_COMEDI_JR3_PCI) += jr3_pci.o
-obj-$(CONFIG_COMEDI_KE_COUNTER) += ke_counter.o
-obj-$(CONFIG_COMEDI_CB_PCIDAS64) += cb_pcidas64.o
-obj-$(CONFIG_COMEDI_CB_PCIDAS) += cb_pcidas.o
-obj-$(CONFIG_COMEDI_CB_PCIDDA) += cb_pcidda.o
-obj-$(CONFIG_COMEDI_CB_PCIMDAS) += cb_pcimdas.o
-obj-$(CONFIG_COMEDI_CB_PCIMDDA) += cb_pcimdda.o
-obj-$(CONFIG_COMEDI_ME4000) += me4000.o
-obj-$(CONFIG_COMEDI_ME_DAQ) += me_daq.o
-obj-$(CONFIG_COMEDI_NI_6527) += ni_6527.o
-obj-$(CONFIG_COMEDI_NI_65XX) += ni_65xx.o
-obj-$(CONFIG_COMEDI_NI_660X) += ni_660x.o
-obj-$(CONFIG_COMEDI_NI_670X) += ni_670x.o
-obj-$(CONFIG_COMEDI_NI_LABPC_PCI) += ni_labpc_pci.o
-obj-$(CONFIG_COMEDI_NI_PCIDIO) += ni_pcidio.o
-obj-$(CONFIG_COMEDI_NI_PCIMIO) += ni_pcimio.o
-obj-$(CONFIG_COMEDI_RTD520) += rtd520.o
-obj-$(CONFIG_COMEDI_S626) += s626.o
-obj-$(CONFIG_COMEDI_SSV_DNP) += ssv_dnp.o
-obj-$(CONFIG_COMEDI_MF6X4) += mf6x4.o
-
-# Comedi PCMCIA drivers
-obj-$(CONFIG_COMEDI_CB_DAS16_CS) += cb_das16_cs.o
-obj-$(CONFIG_COMEDI_DAS08_CS) += das08_cs.o
-obj-$(CONFIG_COMEDI_NI_DAQ_700_CS) += ni_daq_700.o
-obj-$(CONFIG_COMEDI_NI_DAQ_DIO24_CS) += ni_daq_dio24.o
-obj-$(CONFIG_COMEDI_NI_LABPC_CS) += ni_labpc_cs.o
-obj-$(CONFIG_COMEDI_NI_MIO_CS) += ni_mio_cs.o
-obj-$(CONFIG_COMEDI_QUATECH_DAQP_CS) += quatech_daqp_cs.o
-
-# Comedi USB drivers
-obj-$(CONFIG_COMEDI_DT9812) += dt9812.o
-obj-$(CONFIG_COMEDI_NI_USB6501) += ni_usb6501.o
-obj-$(CONFIG_COMEDI_USBDUX) += usbdux.o
-obj-$(CONFIG_COMEDI_USBDUXFAST) += usbduxfast.o
-obj-$(CONFIG_COMEDI_USBDUXSIGMA) += usbduxsigma.o
-obj-$(CONFIG_COMEDI_VMK80XX) += vmk80xx.o
-
-# Comedi NI drivers
-obj-$(CONFIG_COMEDI_MITE) += mite.o
-obj-$(CONFIG_COMEDI_NI_TIO) += ni_tio.o
-obj-$(CONFIG_COMEDI_NI_TIOCMD) += ni_tiocmd.o
-obj-$(CONFIG_COMEDI_NI_LABPC) += ni_labpc_common.o
-obj-$(CONFIG_COMEDI_NI_LABPC_ISADMA) += ni_labpc_isadma.o
-
-obj-$(CONFIG_COMEDI_8255) += comedi_8255.o
-obj-$(CONFIG_COMEDI_8255_SA) += 8255.o
-obj-$(CONFIG_COMEDI_AMPLC_DIO200) += amplc_dio200_common.o
-obj-$(CONFIG_COMEDI_AMPLC_PC236) += amplc_pc236_common.o
-obj-$(CONFIG_COMEDI_DAS08) += das08.o
diff --git a/drivers/staging/comedi/drivers/addi-data/hwdrv_apci1564.c b/drivers/staging/comedi/drivers/addi-data/hwdrv_apci1564.c
deleted file mode 100644
index f0c0d58383ca..000000000000
--- a/drivers/staging/comedi/drivers/addi-data/hwdrv_apci1564.c
+++ /dev/null
@@ -1,187 +0,0 @@
-static int apci1564_timer_insn_config(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct apci1564_private *devpriv = dev->private;
- unsigned int ctrl;
-
- devpriv->tsk_current = current;
-
- /* Stop the timer */
- ctrl = inl(devpriv->timer + ADDI_TCW_CTRL_REG);
- ctrl &= ~(ADDI_TCW_CTRL_GATE | ADDI_TCW_CTRL_TRIG |
- ADDI_TCW_CTRL_ENA);
- outl(ctrl, devpriv->timer + ADDI_TCW_CTRL_REG);
-
- if (data[1] == 1) {
- /* Enable timer int & disable all the other int sources */
- outl(ADDI_TCW_CTRL_IRQ_ENA,
- devpriv->timer + ADDI_TCW_CTRL_REG);
- outl(0x0, dev->iobase + APCI1564_DI_IRQ_REG);
- outl(0x0, dev->iobase + APCI1564_DO_IRQ_REG);
- outl(0x0, dev->iobase + APCI1564_WDOG_IRQ_REG);
- if (devpriv->counters) {
- unsigned long iobase;
-
- iobase = devpriv->counters + ADDI_TCW_IRQ_REG;
- outl(0x0, iobase + APCI1564_COUNTER(0));
- outl(0x0, iobase + APCI1564_COUNTER(1));
- outl(0x0, iobase + APCI1564_COUNTER(2));
- }
- } else {
- /* disable Timer interrupt */
- outl(0x0, devpriv->timer + ADDI_TCW_CTRL_REG);
- }
-
- /* Loading Timebase */
- outl(data[2], devpriv->timer + ADDI_TCW_TIMEBASE_REG);
-
- /* Loading the Reload value */
- outl(data[3], devpriv->timer + ADDI_TCW_RELOAD_REG);
-
- ctrl = inl(devpriv->timer + ADDI_TCW_CTRL_REG);
- ctrl &= ~(ADDI_TCW_CTRL_CNTR_ENA | ADDI_TCW_CTRL_MODE_MASK |
- ADDI_TCW_CTRL_GATE | ADDI_TCW_CTRL_TRIG |
- ADDI_TCW_CTRL_TIMER_ENA | ADDI_TCW_CTRL_RESET_ENA |
- ADDI_TCW_CTRL_WARN_ENA | ADDI_TCW_CTRL_ENA);
- ctrl |= ADDI_TCW_CTRL_MODE(2) | ADDI_TCW_CTRL_TIMER_ENA;
- outl(ctrl, devpriv->timer + ADDI_TCW_CTRL_REG);
-
- return insn->n;
-}
-
-static int apci1564_timer_insn_write(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct apci1564_private *devpriv = dev->private;
- unsigned int ctrl;
-
- ctrl = inl(devpriv->timer + ADDI_TCW_CTRL_REG);
- ctrl &= ~(ADDI_TCW_CTRL_GATE | ADDI_TCW_CTRL_TRIG);
- switch (data[1]) {
- case 0: /* Stop The Timer */
- ctrl &= ~ADDI_TCW_CTRL_ENA;
- break;
- case 1: /* Enable the Timer */
- ctrl |= ADDI_TCW_CTRL_ENA;
- break;
- }
- outl(ctrl, devpriv->timer + ADDI_TCW_CTRL_REG);
-
- return insn->n;
-}
-
-static int apci1564_timer_insn_read(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct apci1564_private *devpriv = dev->private;
-
- /* Stores the status of the Timer */
- data[0] = inl(devpriv->timer + ADDI_TCW_STATUS_REG) &
- ADDI_TCW_STATUS_OVERFLOW;
-
- /* Stores the Actual value of the Timer */
- data[1] = inl(devpriv->timer + ADDI_TCW_VAL_REG);
-
- return insn->n;
-}
-
-static int apci1564_counter_insn_config(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct apci1564_private *devpriv = dev->private;
- unsigned int chan = CR_CHAN(insn->chanspec);
- unsigned long iobase = devpriv->counters + APCI1564_COUNTER(chan);
- unsigned int ctrl;
-
- devpriv->tsk_current = current;
-
- /* Stop The Timer */
- ctrl = inl(iobase + ADDI_TCW_CTRL_REG);
- ctrl &= ~(ADDI_TCW_CTRL_GATE | ADDI_TCW_CTRL_TRIG |
- ADDI_TCW_CTRL_ENA);
- outl(ctrl, iobase + ADDI_TCW_CTRL_REG);
-
- /* Set the reload value */
- outl(data[3], iobase + ADDI_TCW_RELOAD_REG);
-
- /* Set the mode */
- ctrl &= ~(ADDI_TCW_CTRL_EXT_CLK_MASK | ADDI_TCW_CTRL_MODE_MASK |
- ADDI_TCW_CTRL_TIMER_ENA | ADDI_TCW_CTRL_RESET_ENA |
- ADDI_TCW_CTRL_WARN_ENA);
- ctrl |= ADDI_TCW_CTRL_CNTR_ENA | ADDI_TCW_CTRL_MODE(data[4]);
- outl(ctrl, iobase + ADDI_TCW_CTRL_REG);
-
- /* Enable or Disable Interrupt */
- if (data[1])
- ctrl |= ADDI_TCW_CTRL_IRQ_ENA;
- else
- ctrl &= ~ADDI_TCW_CTRL_IRQ_ENA;
- outl(ctrl, iobase + ADDI_TCW_CTRL_REG);
-
- /* Set the Up/Down selection */
- if (data[6])
- ctrl |= ADDI_TCW_CTRL_CNT_UP;
- else
- ctrl &= ~ADDI_TCW_CTRL_CNT_UP;
- outl(ctrl, iobase + ADDI_TCW_CTRL_REG);
-
- return insn->n;
-}
-
-static int apci1564_counter_insn_write(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct apci1564_private *devpriv = dev->private;
- unsigned int chan = CR_CHAN(insn->chanspec);
- unsigned long iobase = devpriv->counters + APCI1564_COUNTER(chan);
- unsigned int ctrl;
-
- ctrl = inl(iobase + ADDI_TCW_CTRL_REG);
- ctrl &= ~(ADDI_TCW_CTRL_GATE | ADDI_TCW_CTRL_TRIG);
- switch (data[1]) {
- case 0: /* Stops the Counter subdevice */
- ctrl = 0;
- break;
- case 1: /* Start the Counter subdevice */
- ctrl |= ADDI_TCW_CTRL_ENA;
- break;
- case 2: /* Clears the Counter subdevice */
- ctrl |= ADDI_TCW_CTRL_GATE;
- break;
- }
- outl(ctrl, iobase + ADDI_TCW_CTRL_REG);
-
- return insn->n;
-}
-
-static int apci1564_counter_insn_read(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct apci1564_private *devpriv = dev->private;
- unsigned int chan = CR_CHAN(insn->chanspec);
- unsigned long iobase = devpriv->counters + APCI1564_COUNTER(chan);
- unsigned int status;
-
- /* Read the Counter Actual Value. */
- data[0] = inl(iobase + ADDI_TCW_VAL_REG);
-
- status = inl(iobase + ADDI_TCW_STATUS_REG);
- data[1] = (status & ADDI_TCW_STATUS_SOFT_TRIG) ? 1 : 0;
- data[2] = (status & ADDI_TCW_STATUS_HARDWARE_TRIG) ? 1 : 0;
- data[3] = (status & ADDI_TCW_STATUS_SOFT_CLR) ? 1 : 0;
- data[4] = (status & ADDI_TCW_STATUS_OVERFLOW) ? 1 : 0;
-
- return insn->n;
-}
diff --git a/drivers/staging/comedi/drivers/addi-data/hwdrv_apci3501.c b/drivers/staging/comedi/drivers/addi-data/hwdrv_apci3501.c
deleted file mode 100644
index 375707497896..000000000000
--- a/drivers/staging/comedi/drivers/addi-data/hwdrv_apci3501.c
+++ /dev/null
@@ -1,141 +0,0 @@
-/* Watchdog Related Defines */
-
-#define ADDIDATA_TIMER 0
-#define ADDIDATA_WATCHDOG 2
-
-/*
- * (*insn_config) for the timer subdevice
- *
- * Configures The Timer, Counter or Watchdog
- * Data Pointer contains configuration parameters as below
- * data[0] : 0 Configure As Timer
- * 1 Configure As Counter
- * 2 Configure As Watchdog
- * data[1] : 1 Enable Interrupt
- * 0 Disable Interrupt
- * data[2] : Time Unit
- * data[3] : Reload Value
- */
-static int apci3501_config_insn_timer(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct apci3501_private *devpriv = dev->private;
- unsigned int ctrl;
-
- if (data[0] != ADDIDATA_WATCHDOG &&
- data[0] != ADDIDATA_TIMER)
- return -EINVAL;
-
- devpriv->tsk_Current = current;
-
- devpriv->timer_mode = data[0];
-
- /* first, disable the watchdog or stop the timer */
- if (devpriv->timer_mode == ADDIDATA_WATCHDOG) {
- ctrl = 0;
- } else {
- ctrl = inl(devpriv->tcw + ADDI_TCW_CTRL_REG);
- ctrl &= ~(ADDI_TCW_CTRL_GATE | ADDI_TCW_CTRL_TRIG |
- ADDI_TCW_CTRL_ENA);
- }
- outl(ctrl, devpriv->tcw + ADDI_TCW_CTRL_REG);
-
- /* enable/disable the timer interrupt */
- ctrl = (data[1] == 1) ? ADDI_TCW_CTRL_IRQ_ENA : 0;
- outl(ctrl, devpriv->tcw + ADDI_TCW_CTRL_REG);
-
- outl(data[2], devpriv->tcw + ADDI_TCW_TIMEBASE_REG);
- outl(data[3], devpriv->tcw + ADDI_TCW_RELOAD_REG);
-
- ctrl = inl(devpriv->tcw + ADDI_TCW_CTRL_REG);
- if (devpriv->timer_mode == ADDIDATA_WATCHDOG) {
- /* Set the mode (e2->e0) NOTE: this doesn't look correct */
- ctrl |= ~(ADDI_TCW_CTRL_CNT_UP | ADDI_TCW_CTRL_EXT_CLK_MASK |
- ADDI_TCW_CTRL_MODE_MASK | ADDI_TCW_CTRL_GATE |
- ADDI_TCW_CTRL_TRIG | ADDI_TCW_CTRL_TIMER_ENA |
- ADDI_TCW_CTRL_RESET_ENA | ADDI_TCW_CTRL_WARN_ENA |
- ADDI_TCW_CTRL_IRQ_ENA | ADDI_TCW_CTRL_ENA);
- } else {
- /* mode 2 */
- ctrl &= ~(ADDI_TCW_CTRL_CNTR_ENA | ADDI_TCW_CTRL_MODE_MASK |
- ADDI_TCW_CTRL_GATE | ADDI_TCW_CTRL_TRIG |
- ADDI_TCW_CTRL_TIMER_ENA | ADDI_TCW_CTRL_RESET_ENA |
- ADDI_TCW_CTRL_WARN_ENA | ADDI_TCW_CTRL_ENA);
- ctrl |= ADDI_TCW_CTRL_MODE(2) | ADDI_TCW_CTRL_TIMER_ENA;
- }
- outl(ctrl, devpriv->tcw + ADDI_TCW_CTRL_REG);
-
- return insn->n;
-}
-
-/*
- * (*insn_write) for the timer subdevice
- *
- * Start / Stop The Selected Timer , Counter or Watchdog
- * Data Pointer contains configuration parameters as below
- * data[0] : 0 Timer
- * 1 Counter
- * 2 Watchdog
- * data[1] : 1 Start
- * 0 Stop
- * 2 Trigger
- */
-static int apci3501_write_insn_timer(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct apci3501_private *devpriv = dev->private;
- unsigned int ctrl;
-
- if (devpriv->timer_mode == ADDIDATA_WATCHDOG ||
- devpriv->timer_mode == ADDIDATA_TIMER) {
- ctrl = inl(devpriv->tcw + ADDI_TCW_CTRL_REG);
- ctrl &= ~(ADDI_TCW_CTRL_GATE | ADDI_TCW_CTRL_TRIG);
-
- if (data[1] == 1) { /* enable */
- ctrl |= ADDI_TCW_CTRL_ENA;
- } else if (data[1] == 0) { /* stop */
- if (devpriv->timer_mode == ADDIDATA_WATCHDOG)
- ctrl = 0;
- else
- ctrl &= ~ADDI_TCW_CTRL_ENA;
- } else if (data[1] == 2) { /* trigger */
- ctrl |= ADDI_TCW_CTRL_TRIG;
- }
- outl(ctrl, devpriv->tcw + ADDI_TCW_CTRL_REG);
- }
-
- inl(devpriv->tcw + ADDI_TCW_STATUS_REG);
- return insn->n;
-}
-
-/*
- * (*insn_read) for the timer subdevice
- *
- * Read The Selected Timer, Counter or Watchdog
- * Data Pointer contains configuration parameters as below
- * data[0] : 0 Timer
- * 1 Counter
- * 2 Watchdog
- * data[1] : Timer Counter Watchdog Number
- */
-static int apci3501_read_insn_timer(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct apci3501_private *devpriv = dev->private;
-
- if (devpriv->timer_mode != ADDIDATA_TIMER &&
- devpriv->timer_mode != ADDIDATA_WATCHDOG)
- return -EINVAL;
-
- data[0] = inl(devpriv->tcw + ADDI_TCW_STATUS_REG) &
- ADDI_TCW_STATUS_OVERFLOW;
- data[1] = inl(devpriv->tcw + ADDI_TCW_VAL_REG);
-
- return insn->n;
-}
diff --git a/drivers/staging/comedi/drivers/addi_apci_1032.c b/drivers/staging/comedi/drivers/addi_apci_1032.c
deleted file mode 100644
index b37166d57b64..000000000000
--- a/drivers/staging/comedi/drivers/addi_apci_1032.c
+++ /dev/null
@@ -1,394 +0,0 @@
-/*
- * addi_apci_1032.c
- * Copyright (C) 2004,2005 ADDI-DATA GmbH for the source code of this module.
- * Project manager: Eric Stolz
- *
- * ADDI-DATA GmbH
- * Dieselstrasse 3
- * D-77833 Ottersweier
- * Tel: +19(0)7223/9493-0
- * Fax: +49(0)7223/9493-92
- * http://www.addi-data.com
- * info@addi-data.com
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- */
-
-/*
- * Driver: addi_apci_1032
- * Description: ADDI-DATA APCI-1032 Digital Input Board
- * Author: ADDI-DATA GmbH <info@addi-data.com>,
- * H Hartley Sweeten <hsweeten@visionengravers.com>
- * Status: untested
- * Devices: [ADDI-DATA] APCI-1032 (addi_apci_1032)
- *
- * Configuration options:
- * None; devices are configured automatically.
- *
- * This driver models the APCI-1032 as a 32-channel, digital input subdevice
- * plus an additional digital input subdevice to handle change-of-state (COS)
- * interrupts (if an interrupt handler can be set up successfully).
- *
- * The COS subdevice supports comedi asynchronous read commands.
- *
- * Change-Of-State (COS) interrupt configuration:
- *
- * Channels 0 to 15 are interruptible. These channels can be configured
- * to generate interrupts based on AND/OR logic for the desired channels.
- *
- * OR logic:
- * - reacts to rising or falling edges
- * - interrupt is generated when any enabled channel meets the desired
- * interrupt condition
- *
- * AND logic:
- * - reacts to changes in level of the selected inputs
- * - interrupt is generated when all enabled channels meet the desired
- * interrupt condition
- * - after an interrupt, a change in level must occur on the selected
- * inputs to release the IRQ logic
- *
- * The COS subdevice must be configured before setting up a comedi
- * asynchronous command:
- *
- * data[0] : INSN_CONFIG_DIGITAL_TRIG
- * data[1] : trigger number (= 0)
- * data[2] : configuration operation:
- * - COMEDI_DIGITAL_TRIG_DISABLE = no interrupts
- * - COMEDI_DIGITAL_TRIG_ENABLE_EDGES = OR (edge) interrupts
- * - COMEDI_DIGITAL_TRIG_ENABLE_LEVELS = AND (level) interrupts
- * data[3] : left-shift for data[4] and data[5]
- * data[4] : rising-edge/high level channels
- * data[5] : falling-edge/low level channels
- */
-
-#include <linux/module.h>
-#include <linux/interrupt.h>
-
-#include "../comedi_pci.h"
-#include "amcc_s5933.h"
-
-/*
- * I/O Register Map
- */
-#define APCI1032_DI_REG 0x00
-#define APCI1032_MODE1_REG 0x04
-#define APCI1032_MODE2_REG 0x08
-#define APCI1032_STATUS_REG 0x0c
-#define APCI1032_CTRL_REG 0x10
-#define APCI1032_CTRL_INT_OR (0 << 1)
-#define APCI1032_CTRL_INT_AND (1 << 1)
-#define APCI1032_CTRL_INT_ENA (1 << 2)
-
-struct apci1032_private {
- unsigned long amcc_iobase; /* base of AMCC I/O registers */
- unsigned int mode1; /* rising-edge/high level channels */
- unsigned int mode2; /* falling-edge/low level channels */
- unsigned int ctrl; /* interrupt mode OR (edge) . AND (level) */
-};
-
-static int apci1032_reset(struct comedi_device *dev)
-{
- /* disable the interrupts */
- outl(0x0, dev->iobase + APCI1032_CTRL_REG);
- /* Reset the interrupt status register */
- inl(dev->iobase + APCI1032_STATUS_REG);
- /* Disable the and/or interrupt */
- outl(0x0, dev->iobase + APCI1032_MODE1_REG);
- outl(0x0, dev->iobase + APCI1032_MODE2_REG);
-
- return 0;
-}
-
-static int apci1032_cos_insn_config(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct apci1032_private *devpriv = dev->private;
- unsigned int shift, oldmask;
-
- switch (data[0]) {
- case INSN_CONFIG_DIGITAL_TRIG:
- if (data[1] != 0)
- return -EINVAL;
- shift = data[3];
- oldmask = (1U << shift) - 1;
- switch (data[2]) {
- case COMEDI_DIGITAL_TRIG_DISABLE:
- devpriv->ctrl = 0;
- devpriv->mode1 = 0;
- devpriv->mode2 = 0;
- apci1032_reset(dev);
- break;
- case COMEDI_DIGITAL_TRIG_ENABLE_EDGES:
- if (devpriv->ctrl != (APCI1032_CTRL_INT_ENA |
- APCI1032_CTRL_INT_OR)) {
- /* switching to 'OR' mode */
- devpriv->ctrl = APCI1032_CTRL_INT_ENA |
- APCI1032_CTRL_INT_OR;
- /* wipe old channels */
- devpriv->mode1 = 0;
- devpriv->mode2 = 0;
- } else {
- /* preserve unspecified channels */
- devpriv->mode1 &= oldmask;
- devpriv->mode2 &= oldmask;
- }
- /* configure specified channels */
- devpriv->mode1 |= data[4] << shift;
- devpriv->mode2 |= data[5] << shift;
- break;
- case COMEDI_DIGITAL_TRIG_ENABLE_LEVELS:
- if (devpriv->ctrl != (APCI1032_CTRL_INT_ENA |
- APCI1032_CTRL_INT_AND)) {
- /* switching to 'AND' mode */
- devpriv->ctrl = APCI1032_CTRL_INT_ENA |
- APCI1032_CTRL_INT_AND;
- /* wipe old channels */
- devpriv->mode1 = 0;
- devpriv->mode2 = 0;
- } else {
- /* preserve unspecified channels */
- devpriv->mode1 &= oldmask;
- devpriv->mode2 &= oldmask;
- }
- /* configure specified channels */
- devpriv->mode1 |= data[4] << shift;
- devpriv->mode2 |= data[5] << shift;
- break;
- default:
- return -EINVAL;
- }
- break;
- default:
- return -EINVAL;
- }
-
- return insn->n;
-}
-
-static int apci1032_cos_insn_bits(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- data[1] = s->state;
-
- return 0;
-}
-
-static int apci1032_cos_cmdtest(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_cmd *cmd)
-{
- int err = 0;
-
- /* Step 1 : check if triggers are trivially valid */
-
- err |= comedi_check_trigger_src(&cmd->start_src, TRIG_NOW);
- err |= comedi_check_trigger_src(&cmd->scan_begin_src, TRIG_EXT);
- err |= comedi_check_trigger_src(&cmd->convert_src, TRIG_FOLLOW);
- err |= comedi_check_trigger_src(&cmd->scan_end_src, TRIG_COUNT);
- err |= comedi_check_trigger_src(&cmd->stop_src, TRIG_NONE);
-
- if (err)
- return 1;
-
- /* Step 2a : make sure trigger sources are unique */
- /* Step 2b : and mutually compatible */
-
- /* Step 3: check if arguments are trivially valid */
-
- err |= comedi_check_trigger_arg_is(&cmd->start_arg, 0);
- err |= comedi_check_trigger_arg_is(&cmd->scan_begin_arg, 0);
- err |= comedi_check_trigger_arg_is(&cmd->convert_arg, 0);
- err |= comedi_check_trigger_arg_is(&cmd->scan_end_arg,
- cmd->chanlist_len);
- err |= comedi_check_trigger_arg_is(&cmd->stop_arg, 0);
-
- if (err)
- return 3;
-
- /* Step 4: fix up any arguments */
-
- /* Step 5: check channel list if it exists */
-
- return 0;
-}
-
-/*
- * Change-Of-State (COS) 'do_cmd' operation
- *
- * Enable the COS interrupt as configured by apci1032_cos_insn_config().
- */
-static int apci1032_cos_cmd(struct comedi_device *dev,
- struct comedi_subdevice *s)
-{
- struct apci1032_private *devpriv = dev->private;
-
- if (!devpriv->ctrl) {
- dev_warn(dev->class_dev,
- "Interrupts disabled due to mode configuration!\n");
- return -EINVAL;
- }
-
- outl(devpriv->mode1, dev->iobase + APCI1032_MODE1_REG);
- outl(devpriv->mode2, dev->iobase + APCI1032_MODE2_REG);
- outl(devpriv->ctrl, dev->iobase + APCI1032_CTRL_REG);
-
- return 0;
-}
-
-static int apci1032_cos_cancel(struct comedi_device *dev,
- struct comedi_subdevice *s)
-{
- return apci1032_reset(dev);
-}
-
-static irqreturn_t apci1032_interrupt(int irq, void *d)
-{
- struct comedi_device *dev = d;
- struct apci1032_private *devpriv = dev->private;
- struct comedi_subdevice *s = dev->read_subdev;
- unsigned int ctrl;
-
- /* check interrupt is from this device */
- if ((inl(devpriv->amcc_iobase + AMCC_OP_REG_INTCSR) &
- INTCSR_INTR_ASSERTED) == 0)
- return IRQ_NONE;
-
- /* check interrupt is enabled */
- ctrl = inl(dev->iobase + APCI1032_CTRL_REG);
- if ((ctrl & APCI1032_CTRL_INT_ENA) == 0)
- return IRQ_HANDLED;
-
- /* disable the interrupt */
- outl(ctrl & ~APCI1032_CTRL_INT_ENA, dev->iobase + APCI1032_CTRL_REG);
-
- s->state = inl(dev->iobase + APCI1032_STATUS_REG) & 0xffff;
- comedi_buf_write_samples(s, &s->state, 1);
- comedi_handle_events(dev, s);
-
- /* enable the interrupt */
- outl(ctrl, dev->iobase + APCI1032_CTRL_REG);
-
- return IRQ_HANDLED;
-}
-
-static int apci1032_di_insn_bits(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- data[1] = inl(dev->iobase + APCI1032_DI_REG);
-
- return insn->n;
-}
-
-static int apci1032_auto_attach(struct comedi_device *dev,
- unsigned long context_unused)
-{
- struct pci_dev *pcidev = comedi_to_pci_dev(dev);
- struct apci1032_private *devpriv;
- struct comedi_subdevice *s;
- int ret;
-
- devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
- if (!devpriv)
- return -ENOMEM;
-
- ret = comedi_pci_enable(dev);
- if (ret)
- return ret;
-
- devpriv->amcc_iobase = pci_resource_start(pcidev, 0);
- dev->iobase = pci_resource_start(pcidev, 1);
- apci1032_reset(dev);
- if (pcidev->irq > 0) {
- ret = request_irq(pcidev->irq, apci1032_interrupt, IRQF_SHARED,
- dev->board_name, dev);
- if (ret == 0)
- dev->irq = pcidev->irq;
- }
-
- ret = comedi_alloc_subdevices(dev, 2);
- if (ret)
- return ret;
-
- /* Allocate and Initialise DI Subdevice Structures */
- s = &dev->subdevices[0];
- s->type = COMEDI_SUBD_DI;
- s->subdev_flags = SDF_READABLE;
- s->n_chan = 32;
- s->maxdata = 1;
- s->range_table = &range_digital;
- s->insn_bits = apci1032_di_insn_bits;
-
- /* Change-Of-State (COS) interrupt subdevice */
- s = &dev->subdevices[1];
- if (dev->irq) {
- dev->read_subdev = s;
- s->type = COMEDI_SUBD_DI;
- s->subdev_flags = SDF_READABLE | SDF_CMD_READ;
- s->n_chan = 1;
- s->maxdata = 1;
- s->range_table = &range_digital;
- s->insn_config = apci1032_cos_insn_config;
- s->insn_bits = apci1032_cos_insn_bits;
- s->len_chanlist = 1;
- s->do_cmdtest = apci1032_cos_cmdtest;
- s->do_cmd = apci1032_cos_cmd;
- s->cancel = apci1032_cos_cancel;
- } else {
- s->type = COMEDI_SUBD_UNUSED;
- }
-
- return 0;
-}
-
-static void apci1032_detach(struct comedi_device *dev)
-{
- if (dev->iobase)
- apci1032_reset(dev);
- comedi_pci_detach(dev);
-}
-
-static struct comedi_driver apci1032_driver = {
- .driver_name = "addi_apci_1032",
- .module = THIS_MODULE,
- .auto_attach = apci1032_auto_attach,
- .detach = apci1032_detach,
-};
-
-static int apci1032_pci_probe(struct pci_dev *dev,
- const struct pci_device_id *id)
-{
- return comedi_pci_auto_config(dev, &apci1032_driver, id->driver_data);
-}
-
-static const struct pci_device_id apci1032_pci_table[] = {
- { PCI_DEVICE(PCI_VENDOR_ID_ADDIDATA, 0x1003) },
- { 0 }
-};
-MODULE_DEVICE_TABLE(pci, apci1032_pci_table);
-
-static struct pci_driver apci1032_pci_driver = {
- .name = "addi_apci_1032",
- .id_table = apci1032_pci_table,
- .probe = apci1032_pci_probe,
- .remove = comedi_pci_auto_unconfig,
-};
-module_comedi_pci_driver(apci1032_driver, apci1032_pci_driver);
-
-MODULE_AUTHOR("Comedi http://www.comedi.org");
-MODULE_DESCRIPTION("ADDI-DATA APCI-1032, 32 channel DI boards");
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/addi_apci_1500.c b/drivers/staging/comedi/drivers/addi_apci_1500.c
deleted file mode 100644
index 63991c49ff23..000000000000
--- a/drivers/staging/comedi/drivers/addi_apci_1500.c
+++ /dev/null
@@ -1,878 +0,0 @@
-/*
- * addi_apci_1500.c
- * Copyright (C) 2004,2005 ADDI-DATA GmbH for the source code of this module.
- *
- * ADDI-DATA GmbH
- * Dieselstrasse 3
- * D-77833 Ottersweier
- * Tel: +19(0)7223/9493-0
- * Fax: +49(0)7223/9493-92
- * http://www.addi-data.com
- * info@addi-data.com
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- */
-
-#include <linux/module.h>
-#include <linux/interrupt.h>
-
-#include "../comedi_pci.h"
-#include "amcc_s5933.h"
-#include "z8536.h"
-
-/*
- * PCI Bar 0 Register map (devpriv->amcc)
- * see amcc_s5933.h for register and bit defines
- */
-
-/*
- * PCI Bar 1 Register map (dev->iobase)
- * see z8536.h for Z8536 internal registers and bit defines
- */
-#define APCI1500_Z8536_PORTC_REG 0x00
-#define APCI1500_Z8536_PORTB_REG 0x01
-#define APCI1500_Z8536_PORTA_REG 0x02
-#define APCI1500_Z8536_CTRL_REG 0x03
-
-/*
- * PCI Bar 2 Register map (devpriv->addon)
- */
-#define APCI1500_CLK_SEL_REG 0x00
-#define APCI1500_DI_REG 0x00
-#define APCI1500_DO_REG 0x02
-
-struct apci1500_private {
- unsigned long amcc;
- unsigned long addon;
-
- unsigned int clk_src;
-
- /* Digital trigger configuration [0]=AND [1]=OR */
- unsigned int pm[2]; /* Pattern Mask */
- unsigned int pt[2]; /* Pattern Transition */
- unsigned int pp[2]; /* Pattern Polarity */
-};
-
-static unsigned int z8536_read(struct comedi_device *dev, unsigned int reg)
-{
- unsigned long flags;
- unsigned int val;
-
- spin_lock_irqsave(&dev->spinlock, flags);
- outb(reg, dev->iobase + APCI1500_Z8536_CTRL_REG);
- val = inb(dev->iobase + APCI1500_Z8536_CTRL_REG);
- spin_unlock_irqrestore(&dev->spinlock, flags);
-
- return val;
-}
-
-static void z8536_write(struct comedi_device *dev,
- unsigned int val, unsigned int reg)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&dev->spinlock, flags);
- outb(reg, dev->iobase + APCI1500_Z8536_CTRL_REG);
- outb(val, dev->iobase + APCI1500_Z8536_CTRL_REG);
- spin_unlock_irqrestore(&dev->spinlock, flags);
-}
-
-static void z8536_reset(struct comedi_device *dev)
-{
- unsigned long flags;
-
- /*
- * Even if the state of the Z8536 is not known, the following
- * sequence will reset it and put it in State 0.
- */
- spin_lock_irqsave(&dev->spinlock, flags);
- inb(dev->iobase + APCI1500_Z8536_CTRL_REG);
- outb(0, dev->iobase + APCI1500_Z8536_CTRL_REG);
- inb(dev->iobase + APCI1500_Z8536_CTRL_REG);
- outb(0, dev->iobase + APCI1500_Z8536_CTRL_REG);
- outb(1, dev->iobase + APCI1500_Z8536_CTRL_REG);
- outb(0, dev->iobase + APCI1500_Z8536_CTRL_REG);
- spin_unlock_irqrestore(&dev->spinlock, flags);
-
- /* Disable all Ports and Counter/Timers */
- z8536_write(dev, 0x00, Z8536_CFG_CTRL_REG);
-
- /*
- * Port A is connected to Ditial Input channels 0-7.
- * Configure the port to allow interrupt detection.
- */
- z8536_write(dev, Z8536_PAB_MODE_PTS_BIT |
- Z8536_PAB_MODE_SB |
- Z8536_PAB_MODE_PMS_DISABLE,
- Z8536_PA_MODE_REG);
- z8536_write(dev, 0xff, Z8536_PB_DPP_REG);
- z8536_write(dev, 0xff, Z8536_PA_DD_REG);
-
- /*
- * Port B is connected to Ditial Input channels 8-13.
- * Configure the port to allow interrupt detection.
- *
- * NOTE: Bits 7 and 6 of Port B are connected to internal
- * diagnostic signals and bit 7 is inverted.
- */
- z8536_write(dev, Z8536_PAB_MODE_PTS_BIT |
- Z8536_PAB_MODE_SB |
- Z8536_PAB_MODE_PMS_DISABLE,
- Z8536_PB_MODE_REG);
- z8536_write(dev, 0x7f, Z8536_PB_DPP_REG);
- z8536_write(dev, 0xff, Z8536_PB_DD_REG);
-
- /*
- * Not sure what Port C is connected to...
- */
- z8536_write(dev, 0x09, Z8536_PC_DPP_REG);
- z8536_write(dev, 0x0e, Z8536_PC_DD_REG);
-
- /*
- * Clear and disable all interrupt sources.
- *
- * Just in case, the reset of the Z8536 should have already
- * done this.
- */
- z8536_write(dev, Z8536_CMD_CLR_IP_IUS, Z8536_PA_CMDSTAT_REG);
- z8536_write(dev, Z8536_CMD_CLR_IE, Z8536_PA_CMDSTAT_REG);
-
- z8536_write(dev, Z8536_CMD_CLR_IP_IUS, Z8536_PB_CMDSTAT_REG);
- z8536_write(dev, Z8536_CMD_CLR_IE, Z8536_PB_CMDSTAT_REG);
-
- z8536_write(dev, Z8536_CMD_CLR_IP_IUS, Z8536_CT_CMDSTAT_REG(0));
- z8536_write(dev, Z8536_CMD_CLR_IE, Z8536_CT_CMDSTAT_REG(0));
-
- z8536_write(dev, Z8536_CMD_CLR_IP_IUS, Z8536_CT_CMDSTAT_REG(1));
- z8536_write(dev, Z8536_CMD_CLR_IE, Z8536_CT_CMDSTAT_REG(1));
-
- z8536_write(dev, Z8536_CMD_CLR_IP_IUS, Z8536_CT_CMDSTAT_REG(2));
- z8536_write(dev, Z8536_CMD_CLR_IE, Z8536_CT_CMDSTAT_REG(2));
-
- /* Disable all interrupts */
- z8536_write(dev, 0x00, Z8536_INT_CTRL_REG);
-}
-
-static void apci1500_port_enable(struct comedi_device *dev, bool enable)
-{
- unsigned int cfg;
-
- cfg = z8536_read(dev, Z8536_CFG_CTRL_REG);
- if (enable)
- cfg |= (Z8536_CFG_CTRL_PAE | Z8536_CFG_CTRL_PBE);
- else
- cfg &= ~(Z8536_CFG_CTRL_PAE | Z8536_CFG_CTRL_PBE);
- z8536_write(dev, cfg, Z8536_CFG_CTRL_REG);
-}
-
-static void apci1500_timer_enable(struct comedi_device *dev,
- unsigned int chan, bool enable)
-{
- unsigned int bit;
- unsigned int cfg;
-
- if (chan == 0)
- bit = Z8536_CFG_CTRL_CT1E;
- else if (chan == 1)
- bit = Z8536_CFG_CTRL_CT2E;
- else
- bit = Z8536_CFG_CTRL_PCE_CT3E;
-
- cfg = z8536_read(dev, Z8536_CFG_CTRL_REG);
- if (enable) {
- cfg |= bit;
- } else {
- cfg &= ~bit;
- z8536_write(dev, 0x00, Z8536_CT_CMDSTAT_REG(chan));
- }
- z8536_write(dev, cfg, Z8536_CFG_CTRL_REG);
-}
-
-static bool apci1500_ack_irq(struct comedi_device *dev,
- unsigned int reg)
-{
- unsigned int val;
-
- val = z8536_read(dev, reg);
- if ((val & Z8536_STAT_IE_IP) == Z8536_STAT_IE_IP) {
- val &= 0x0f; /* preserve any write bits */
- val |= Z8536_CMD_CLR_IP_IUS;
- z8536_write(dev, val, reg);
-
- return true;
- }
- return false;
-}
-
-static irqreturn_t apci1500_interrupt(int irq, void *d)
-{
- struct comedi_device *dev = d;
- struct apci1500_private *devpriv = dev->private;
- struct comedi_subdevice *s = dev->read_subdev;
- unsigned int status = 0;
- unsigned int val;
-
- val = inl(devpriv->amcc + AMCC_OP_REG_INTCSR);
- if (!(val & INTCSR_INTR_ASSERTED))
- return IRQ_NONE;
-
- if (apci1500_ack_irq(dev, Z8536_PA_CMDSTAT_REG))
- status |= 0x01; /* port a event (inputs 0-7) */
-
- if (apci1500_ack_irq(dev, Z8536_PB_CMDSTAT_REG)) {
- /* Tests if this is an external error */
- val = inb(dev->iobase + APCI1500_Z8536_PORTB_REG);
- val &= 0xc0;
- if (val) {
- if (val & 0x80) /* voltage error */
- status |= 0x40;
- if (val & 0x40) /* short circuit error */
- status |= 0x80;
- } else {
- status |= 0x02; /* port b event (inputs 8-13) */
- }
- }
-
- /*
- * NOTE: The 'status' returned by the sample matches the
- * interrupt mask information from the APCI-1500 Users Manual.
- *
- * Mask Meaning
- * ---------- ------------------------------------------
- * 0x00000001 Event 1 has occurred
- * 0x00000010 Event 2 has occurred
- * 0x00000100 Counter/timer 1 has run down (not implemented)
- * 0x00001000 Counter/timer 2 has run down (not implemented)
- * 0x00010000 Counter 3 has run down (not implemented)
- * 0x00100000 Watchdog has run down (not implemented)
- * 0x01000000 Voltage error
- * 0x10000000 Short-circuit error
- */
- comedi_buf_write_samples(s, &status, 1);
- comedi_handle_events(dev, s);
-
- return IRQ_HANDLED;
-}
-
-static int apci1500_di_cancel(struct comedi_device *dev,
- struct comedi_subdevice *s)
-{
- /* Disables the main interrupt on the board */
- z8536_write(dev, 0x00, Z8536_INT_CTRL_REG);
-
- /* Disable Ports A & B */
- apci1500_port_enable(dev, false);
-
- /* Ack any pending interrupts */
- apci1500_ack_irq(dev, Z8536_PA_CMDSTAT_REG);
- apci1500_ack_irq(dev, Z8536_PB_CMDSTAT_REG);
-
- /* Disable pattern interrupts */
- z8536_write(dev, Z8536_CMD_CLR_IE, Z8536_PA_CMDSTAT_REG);
- z8536_write(dev, Z8536_CMD_CLR_IE, Z8536_PB_CMDSTAT_REG);
-
- /* Enable Ports A & B */
- apci1500_port_enable(dev, true);
-
- return 0;
-}
-
-static int apci1500_di_inttrig_start(struct comedi_device *dev,
- struct comedi_subdevice *s,
- unsigned int trig_num)
-{
- struct apci1500_private *devpriv = dev->private;
- struct comedi_cmd *cmd = &s->async->cmd;
- unsigned int pa_mode = Z8536_PAB_MODE_PMS_DISABLE;
- unsigned int pb_mode = Z8536_PAB_MODE_PMS_DISABLE;
- unsigned int pa_trig = trig_num & 0x01;
- unsigned int pb_trig = (trig_num >> 1) & 0x01;
- bool valid_trig = false;
- unsigned int val;
-
- if (trig_num != cmd->start_arg)
- return -EINVAL;
-
- /* Disable Ports A & B */
- apci1500_port_enable(dev, false);
-
- /* Set Port A for selected trigger pattern */
- z8536_write(dev, devpriv->pm[pa_trig] & 0xff, Z8536_PA_PM_REG);
- z8536_write(dev, devpriv->pt[pa_trig] & 0xff, Z8536_PA_PT_REG);
- z8536_write(dev, devpriv->pp[pa_trig] & 0xff, Z8536_PA_PP_REG);
-
- /* Set Port B for selected trigger pattern */
- z8536_write(dev, (devpriv->pm[pb_trig] >> 8) & 0xff, Z8536_PB_PM_REG);
- z8536_write(dev, (devpriv->pt[pb_trig] >> 8) & 0xff, Z8536_PB_PT_REG);
- z8536_write(dev, (devpriv->pp[pb_trig] >> 8) & 0xff, Z8536_PB_PP_REG);
-
- /* Set Port A trigger mode (if enabled) and enable interrupt */
- if (devpriv->pm[pa_trig] & 0xff) {
- pa_mode = pa_trig ? Z8536_PAB_MODE_PMS_AND
- : Z8536_PAB_MODE_PMS_OR;
-
- val = z8536_read(dev, Z8536_PA_MODE_REG);
- val &= ~Z8536_PAB_MODE_PMS_MASK;
- val |= (pa_mode | Z8536_PAB_MODE_IMO);
- z8536_write(dev, val, Z8536_PA_MODE_REG);
-
- z8536_write(dev, Z8536_CMD_SET_IE, Z8536_PA_CMDSTAT_REG);
-
- valid_trig = true;
-
- dev_dbg(dev->class_dev,
- "Port A configured for %s mode pattern detection\n",
- pa_trig ? "AND" : "OR");
- }
-
- /* Set Port B trigger mode (if enabled) and enable interrupt */
- if (devpriv->pm[pb_trig] & 0xff00) {
- pb_mode = pb_trig ? Z8536_PAB_MODE_PMS_AND
- : Z8536_PAB_MODE_PMS_OR;
-
- val = z8536_read(dev, Z8536_PB_MODE_REG);
- val &= ~Z8536_PAB_MODE_PMS_MASK;
- val |= (pb_mode | Z8536_PAB_MODE_IMO);
- z8536_write(dev, val, Z8536_PB_MODE_REG);
-
- z8536_write(dev, Z8536_CMD_SET_IE, Z8536_PB_CMDSTAT_REG);
-
- valid_trig = true;
-
- dev_dbg(dev->class_dev,
- "Port B configured for %s mode pattern detection\n",
- pb_trig ? "AND" : "OR");
- }
-
- /* Enable Ports A & B */
- apci1500_port_enable(dev, true);
-
- if (!valid_trig) {
- dev_dbg(dev->class_dev,
- "digital trigger %d is not configured\n", trig_num);
- return -EINVAL;
- }
-
- /* Authorizes the main interrupt on the board */
- z8536_write(dev, Z8536_INT_CTRL_MIE | Z8536_INT_CTRL_DLC,
- Z8536_INT_CTRL_REG);
-
- return 0;
-}
-
-static int apci1500_di_cmd(struct comedi_device *dev,
- struct comedi_subdevice *s)
-{
- s->async->inttrig = apci1500_di_inttrig_start;
-
- return 0;
-}
-
-static int apci1500_di_cmdtest(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_cmd *cmd)
-{
- int err = 0;
-
- /* Step 1 : check if triggers are trivially valid */
-
- err |= comedi_check_trigger_src(&cmd->start_src, TRIG_INT);
- err |= comedi_check_trigger_src(&cmd->scan_begin_src, TRIG_EXT);
- err |= comedi_check_trigger_src(&cmd->convert_src, TRIG_FOLLOW);
- err |= comedi_check_trigger_src(&cmd->scan_end_src, TRIG_COUNT);
- err |= comedi_check_trigger_src(&cmd->stop_src, TRIG_NONE);
-
- if (err)
- return 1;
-
- /* Step 2a : make sure trigger sources are unique */
- /* Step 2b : and mutually compatible */
-
- /* Step 3: check if arguments are trivially valid */
-
- /*
- * Internal start source triggers:
- *
- * 0 AND mode for Port A (digital inputs 0-7)
- * AND mode for Port B (digital inputs 8-13 and internal signals)
- *
- * 1 OR mode for Port A (digital inputs 0-7)
- * AND mode for Port B (digital inputs 8-13 and internal signals)
- *
- * 2 AND mode for Port A (digital inputs 0-7)
- * OR mode for Port B (digital inputs 8-13 and internal signals)
- *
- * 3 OR mode for Port A (digital inputs 0-7)
- * OR mode for Port B (digital inputs 8-13 and internal signals)
- */
- err |= comedi_check_trigger_arg_max(&cmd->start_arg, 3);
-
- err |= comedi_check_trigger_arg_is(&cmd->scan_begin_arg, 0);
- err |= comedi_check_trigger_arg_is(&cmd->convert_arg, 0);
- err |= comedi_check_trigger_arg_is(&cmd->scan_end_arg,
- cmd->chanlist_len);
- err |= comedi_check_trigger_arg_is(&cmd->stop_arg, 0);
-
- if (err)
- return 3;
-
- /* Step 4: fix up any arguments */
-
- /* Step 5: check channel list if it exists */
-
- return 0;
-}
-
-/*
- * The pattern-recognition logic must be configured before the digital
- * input async command is started.
- *
- * Digital input channels 0 to 13 can generate interrupts. Channels 14
- * and 15 are connected to internal board status/diagnostic signals.
- *
- * Channel 14 - Voltage error (the external supply is < 5V)
- * Channel 15 - Short-circuit/overtemperature error
- *
- * data[0] : INSN_CONFIG_DIGITAL_TRIG
- * data[1] : trigger number
- * 0 = AND mode
- * 1 = OR mode
- * data[2] : configuration operation:
- * COMEDI_DIGITAL_TRIG_DISABLE = no interrupts
- * COMEDI_DIGITAL_TRIG_ENABLE_EDGES = edge interrupts
- * COMEDI_DIGITAL_TRIG_ENABLE_LEVELS = level interrupts
- * data[3] : left-shift for data[4] and data[5]
- * data[4] : rising-edge/high level channels
- * data[5] : falling-edge/low level channels
- */
-static int apci1500_di_cfg_trig(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct apci1500_private *devpriv = dev->private;
- unsigned int trig = data[1];
- unsigned int shift = data[3];
- unsigned int hi_mask = data[4] << shift;
- unsigned int lo_mask = data[5] << shift;
- unsigned int chan_mask = hi_mask | lo_mask;
- unsigned int old_mask = (1 << shift) - 1;
- unsigned int pm = devpriv->pm[trig] & old_mask;
- unsigned int pt = devpriv->pt[trig] & old_mask;
- unsigned int pp = devpriv->pp[trig] & old_mask;
-
- if (trig > 1) {
- dev_dbg(dev->class_dev,
- "invalid digital trigger number (0=AND, 1=OR)\n");
- return -EINVAL;
- }
-
- if (chan_mask > 0xffff) {
- dev_dbg(dev->class_dev, "invalid digital trigger channel\n");
- return -EINVAL;
- }
-
- switch (data[2]) {
- case COMEDI_DIGITAL_TRIG_DISABLE:
- /* clear trigger configuration */
- pm = 0;
- pt = 0;
- pp = 0;
- break;
- case COMEDI_DIGITAL_TRIG_ENABLE_EDGES:
- pm |= chan_mask; /* enable channels */
- pt |= chan_mask; /* enable edge detection */
- pp |= hi_mask; /* rising-edge channels */
- pp &= ~lo_mask; /* falling-edge channels */
- break;
- case COMEDI_DIGITAL_TRIG_ENABLE_LEVELS:
- pm |= chan_mask; /* enable channels */
- pt &= ~chan_mask; /* enable level detection */
- pp |= hi_mask; /* high level channels */
- pp &= ~lo_mask; /* low level channels */
- break;
- default:
- return -EINVAL;
- }
-
- /*
- * The AND mode trigger can only have one channel (max) enabled
- * for edge detection.
- */
- if (trig == 0) {
- int ret = 0;
- unsigned int src;
-
- src = pt & 0xff;
- if (src)
- ret |= comedi_check_trigger_is_unique(src);
-
- src = (pt >> 8) & 0xff;
- if (src)
- ret |= comedi_check_trigger_is_unique(src);
-
- if (ret) {
- dev_dbg(dev->class_dev,
- "invalid AND trigger configuration\n");
- return ret;
- }
- }
-
- /* save the trigger configuration */
- devpriv->pm[trig] = pm;
- devpriv->pt[trig] = pt;
- devpriv->pp[trig] = pp;
-
- return insn->n;
-}
-
-static int apci1500_di_insn_config(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- switch (data[0]) {
- case INSN_CONFIG_DIGITAL_TRIG:
- return apci1500_di_cfg_trig(dev, s, insn, data);
- default:
- return -EINVAL;
- }
-}
-
-static int apci1500_di_insn_bits(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct apci1500_private *devpriv = dev->private;
-
- data[1] = inw(devpriv->addon + APCI1500_DI_REG);
-
- return insn->n;
-}
-
-static int apci1500_do_insn_bits(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct apci1500_private *devpriv = dev->private;
-
- if (comedi_dio_update_state(s, data))
- outw(s->state, devpriv->addon + APCI1500_DO_REG);
-
- data[1] = s->state;
-
- return insn->n;
-}
-
-static int apci1500_timer_insn_config(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct apci1500_private *devpriv = dev->private;
- unsigned int chan = CR_CHAN(insn->chanspec);
- unsigned int val;
-
- switch (data[0]) {
- case INSN_CONFIG_ARM:
- val = data[1] & s->maxdata;
- z8536_write(dev, val & 0xff, Z8536_CT_RELOAD_LSB_REG(chan));
- z8536_write(dev, (val >> 8) & 0xff,
- Z8536_CT_RELOAD_MSB_REG(chan));
-
- apci1500_timer_enable(dev, chan, true);
- z8536_write(dev, Z8536_CT_CMDSTAT_GCB,
- Z8536_CT_CMDSTAT_REG(chan));
- break;
- case INSN_CONFIG_DISARM:
- apci1500_timer_enable(dev, chan, false);
- break;
-
- case INSN_CONFIG_GET_COUNTER_STATUS:
- data[1] = 0;
- val = z8536_read(dev, Z8536_CT_CMDSTAT_REG(chan));
- if (val & Z8536_CT_STAT_CIP)
- data[1] |= COMEDI_COUNTER_COUNTING;
- if (val & Z8536_CT_CMDSTAT_GCB)
- data[1] |= COMEDI_COUNTER_ARMED;
- if (val & Z8536_STAT_IP) {
- data[1] |= COMEDI_COUNTER_TERMINAL_COUNT;
- apci1500_ack_irq(dev, Z8536_CT_CMDSTAT_REG(chan));
- }
- data[2] = COMEDI_COUNTER_ARMED | COMEDI_COUNTER_COUNTING |
- COMEDI_COUNTER_TERMINAL_COUNT;
- break;
-
- case INSN_CONFIG_SET_COUNTER_MODE:
- /* Simulate the 8254 timer modes */
- switch (data[1]) {
- case I8254_MODE0:
- /* Interrupt on Terminal Count */
- val = Z8536_CT_MODE_ECE |
- Z8536_CT_MODE_DCS_ONESHOT;
- break;
- case I8254_MODE1:
- /* Hardware Retriggerable One-Shot */
- val = Z8536_CT_MODE_ETE |
- Z8536_CT_MODE_DCS_ONESHOT;
- break;
- case I8254_MODE2:
- /* Rate Generator */
- val = Z8536_CT_MODE_CSC |
- Z8536_CT_MODE_DCS_PULSE;
- break;
- case I8254_MODE3:
- /* Square Wave Mode */
- val = Z8536_CT_MODE_CSC |
- Z8536_CT_MODE_DCS_SQRWAVE;
- break;
- case I8254_MODE4:
- /* Software Triggered Strobe */
- val = Z8536_CT_MODE_REB |
- Z8536_CT_MODE_DCS_PULSE;
- break;
- case I8254_MODE5:
- /* Hardware Triggered Strobe (watchdog) */
- val = Z8536_CT_MODE_EOE |
- Z8536_CT_MODE_ETE |
- Z8536_CT_MODE_REB |
- Z8536_CT_MODE_DCS_PULSE;
- break;
- default:
- return -EINVAL;
- }
- apci1500_timer_enable(dev, chan, false);
- z8536_write(dev, val, Z8536_CT_MODE_REG(chan));
- break;
-
- case INSN_CONFIG_SET_CLOCK_SRC:
- if (data[1] > 2)
- return -EINVAL;
- devpriv->clk_src = data[1];
- if (devpriv->clk_src == 2)
- devpriv->clk_src = 3;
- outw(devpriv->clk_src, devpriv->addon + APCI1500_CLK_SEL_REG);
- break;
- case INSN_CONFIG_GET_CLOCK_SRC:
- switch (devpriv->clk_src) {
- case 0:
- data[1] = 0; /* 111.86 kHz / 2 */
- data[2] = 17879; /* 17879 ns (approx) */
- break;
- case 1:
- data[1] = 1; /* 3.49 kHz / 2 */
- data[2] = 573066; /* 573066 ns (approx) */
- break;
- case 3:
- data[1] = 2; /* 1.747 kHz / 2 */
- data[2] = 1164822; /* 1164822 ns (approx) */
- break;
- default:
- return -EINVAL;
- }
- break;
-
- case INSN_CONFIG_SET_GATE_SRC:
- if (chan == 0)
- return -EINVAL;
-
- val = z8536_read(dev, Z8536_CT_MODE_REG(chan));
- val &= Z8536_CT_MODE_EGE;
- if (data[1] == 1)
- val |= Z8536_CT_MODE_EGE;
- else if (data[1] > 1)
- return -EINVAL;
- z8536_write(dev, val, Z8536_CT_MODE_REG(chan));
- break;
- case INSN_CONFIG_GET_GATE_SRC:
- if (chan == 0)
- return -EINVAL;
- break;
-
- default:
- return -EINVAL;
- }
- return insn->n;
-}
-
-static int apci1500_timer_insn_write(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- unsigned int chan = CR_CHAN(insn->chanspec);
- unsigned int cmd;
-
- cmd = z8536_read(dev, Z8536_CT_CMDSTAT_REG(chan));
- cmd &= Z8536_CT_CMDSTAT_GCB; /* preserve gate */
- cmd |= Z8536_CT_CMD_TCB; /* set trigger */
-
- /* software trigger a timer, it only makes sense to do one write */
- if (insn->n)
- z8536_write(dev, cmd, Z8536_CT_CMDSTAT_REG(chan));
-
- return insn->n;
-}
-
-static int apci1500_timer_insn_read(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- unsigned int chan = CR_CHAN(insn->chanspec);
- unsigned int cmd;
- unsigned int val;
- int i;
-
- cmd = z8536_read(dev, Z8536_CT_CMDSTAT_REG(chan));
- cmd &= Z8536_CT_CMDSTAT_GCB; /* preserve gate */
- cmd |= Z8536_CT_CMD_RCC; /* set RCC */
-
- for (i = 0; i < insn->n; i++) {
- z8536_write(dev, cmd, Z8536_CT_CMDSTAT_REG(chan));
-
- val = z8536_read(dev, Z8536_CT_VAL_MSB_REG(chan)) << 8;
- val |= z8536_read(dev, Z8536_CT_VAL_LSB_REG(chan));
-
- data[i] = val;
- }
-
- return insn->n;
-}
-
-static int apci1500_auto_attach(struct comedi_device *dev,
- unsigned long context)
-{
- struct pci_dev *pcidev = comedi_to_pci_dev(dev);
- struct apci1500_private *devpriv;
- struct comedi_subdevice *s;
- int ret;
-
- devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
- if (!devpriv)
- return -ENOMEM;
-
- ret = comedi_pci_enable(dev);
- if (ret)
- return ret;
-
- dev->iobase = pci_resource_start(pcidev, 1);
- devpriv->amcc = pci_resource_start(pcidev, 0);
- devpriv->addon = pci_resource_start(pcidev, 2);
-
- z8536_reset(dev);
-
- if (pcidev->irq > 0) {
- ret = request_irq(pcidev->irq, apci1500_interrupt, IRQF_SHARED,
- dev->board_name, dev);
- if (ret == 0)
- dev->irq = pcidev->irq;
- }
-
- ret = comedi_alloc_subdevices(dev, 3);
- if (ret)
- return ret;
-
- /* Digital Input subdevice */
- s = &dev->subdevices[0];
- s->type = COMEDI_SUBD_DI;
- s->subdev_flags = SDF_READABLE;
- s->n_chan = 16;
- s->maxdata = 1;
- s->range_table = &range_digital;
- s->insn_bits = apci1500_di_insn_bits;
- if (dev->irq) {
- dev->read_subdev = s;
- s->subdev_flags |= SDF_CMD_READ;
- s->len_chanlist = 1;
- s->insn_config = apci1500_di_insn_config;
- s->do_cmdtest = apci1500_di_cmdtest;
- s->do_cmd = apci1500_di_cmd;
- s->cancel = apci1500_di_cancel;
- }
-
- /* Digital Output subdevice */
- s = &dev->subdevices[1];
- s->type = COMEDI_SUBD_DO;
- s->subdev_flags = SDF_WRITABLE;
- s->n_chan = 16;
- s->maxdata = 1;
- s->range_table = &range_digital;
- s->insn_bits = apci1500_do_insn_bits;
-
- /* reset all the digital outputs */
- outw(0x0, devpriv->addon + APCI1500_DO_REG);
-
- /* Counter/Timer(Watchdog) subdevice */
- s = &dev->subdevices[2];
- s->type = COMEDI_SUBD_TIMER;
- s->subdev_flags = SDF_WRITABLE | SDF_READABLE;
- s->n_chan = 3;
- s->maxdata = 0xffff;
- s->range_table = &range_unknown;
- s->insn_config = apci1500_timer_insn_config;
- s->insn_write = apci1500_timer_insn_write;
- s->insn_read = apci1500_timer_insn_read;
-
- /* Enable the PCI interrupt */
- if (dev->irq) {
- outl(0x2000 | INTCSR_INBOX_FULL_INT,
- devpriv->amcc + AMCC_OP_REG_INTCSR);
- inl(devpriv->amcc + AMCC_OP_REG_IMB1);
- inl(devpriv->amcc + AMCC_OP_REG_INTCSR);
- outl(INTCSR_INBOX_INTR_STATUS | 0x2000 | INTCSR_INBOX_FULL_INT,
- devpriv->amcc + AMCC_OP_REG_INTCSR);
- }
-
- return 0;
-}
-
-static void apci1500_detach(struct comedi_device *dev)
-{
- struct apci1500_private *devpriv = dev->private;
-
- if (devpriv->amcc)
- outl(0x0, devpriv->amcc + AMCC_OP_REG_INTCSR);
- comedi_pci_detach(dev);
-}
-
-static struct comedi_driver apci1500_driver = {
- .driver_name = "addi_apci_1500",
- .module = THIS_MODULE,
- .auto_attach = apci1500_auto_attach,
- .detach = apci1500_detach,
-};
-
-static int apci1500_pci_probe(struct pci_dev *dev,
- const struct pci_device_id *id)
-{
- return comedi_pci_auto_config(dev, &apci1500_driver, id->driver_data);
-}
-
-static const struct pci_device_id apci1500_pci_table[] = {
- { PCI_DEVICE(PCI_VENDOR_ID_AMCC, 0x80fc) },
- { 0 }
-};
-MODULE_DEVICE_TABLE(pci, apci1500_pci_table);
-
-static struct pci_driver apci1500_pci_driver = {
- .name = "addi_apci_1500",
- .id_table = apci1500_pci_table,
- .probe = apci1500_pci_probe,
- .remove = comedi_pci_auto_unconfig,
-};
-module_comedi_pci_driver(apci1500_driver, apci1500_pci_driver);
-
-MODULE_AUTHOR("Comedi http://www.comedi.org");
-MODULE_DESCRIPTION("ADDI-DATA APCI-1500, 16 channel DI / 16 channel DO boards");
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/addi_apci_1516.c b/drivers/staging/comedi/drivers/addi_apci_1516.c
deleted file mode 100644
index f1f8b1c422a7..000000000000
--- a/drivers/staging/comedi/drivers/addi_apci_1516.c
+++ /dev/null
@@ -1,225 +0,0 @@
-/*
- * addi_apci_1516.c
- * Copyright (C) 2004,2005 ADDI-DATA GmbH for the source code of this module.
- * Project manager: Eric Stolz
- *
- * ADDI-DATA GmbH
- * Dieselstrasse 3
- * D-77833 Ottersweier
- * Tel: +19(0)7223/9493-0
- * Fax: +49(0)7223/9493-92
- * http://www.addi-data.com
- * info@addi-data.com
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- */
-
-#include <linux/module.h>
-
-#include "../comedi_pci.h"
-#include "addi_watchdog.h"
-
-/*
- * PCI bar 1 I/O Register map - Digital input/output
- */
-#define APCI1516_DI_REG 0x00
-#define APCI1516_DO_REG 0x04
-
-/*
- * PCI bar 2 I/O Register map - Watchdog (APCI-1516 and APCI-2016)
- */
-#define APCI1516_WDOG_REG 0x00
-
-enum apci1516_boardid {
- BOARD_APCI1016,
- BOARD_APCI1516,
- BOARD_APCI2016,
-};
-
-struct apci1516_boardinfo {
- const char *name;
- int di_nchan;
- int do_nchan;
- int has_wdog;
-};
-
-static const struct apci1516_boardinfo apci1516_boardtypes[] = {
- [BOARD_APCI1016] = {
- .name = "apci1016",
- .di_nchan = 16,
- },
- [BOARD_APCI1516] = {
- .name = "apci1516",
- .di_nchan = 8,
- .do_nchan = 8,
- .has_wdog = 1,
- },
- [BOARD_APCI2016] = {
- .name = "apci2016",
- .do_nchan = 16,
- .has_wdog = 1,
- },
-};
-
-struct apci1516_private {
- unsigned long wdog_iobase;
-};
-
-static int apci1516_di_insn_bits(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- data[1] = inw(dev->iobase + APCI1516_DI_REG);
-
- return insn->n;
-}
-
-static int apci1516_do_insn_bits(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- s->state = inw(dev->iobase + APCI1516_DO_REG);
-
- if (comedi_dio_update_state(s, data))
- outw(s->state, dev->iobase + APCI1516_DO_REG);
-
- data[1] = s->state;
-
- return insn->n;
-}
-
-static int apci1516_reset(struct comedi_device *dev)
-{
- const struct apci1516_boardinfo *board = dev->board_ptr;
- struct apci1516_private *devpriv = dev->private;
-
- if (!board->has_wdog)
- return 0;
-
- outw(0x0, dev->iobase + APCI1516_DO_REG);
-
- addi_watchdog_reset(devpriv->wdog_iobase);
-
- return 0;
-}
-
-static int apci1516_auto_attach(struct comedi_device *dev,
- unsigned long context)
-{
- struct pci_dev *pcidev = comedi_to_pci_dev(dev);
- const struct apci1516_boardinfo *board = NULL;
- struct apci1516_private *devpriv;
- struct comedi_subdevice *s;
- int ret;
-
- if (context < ARRAY_SIZE(apci1516_boardtypes))
- board = &apci1516_boardtypes[context];
- if (!board)
- return -ENODEV;
- dev->board_ptr = board;
- dev->board_name = board->name;
-
- devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
- if (!devpriv)
- return -ENOMEM;
-
- ret = comedi_pci_enable(dev);
- if (ret)
- return ret;
-
- dev->iobase = pci_resource_start(pcidev, 1);
- devpriv->wdog_iobase = pci_resource_start(pcidev, 2);
-
- ret = comedi_alloc_subdevices(dev, 3);
- if (ret)
- return ret;
-
- /* Initialize the digital input subdevice */
- s = &dev->subdevices[0];
- if (board->di_nchan) {
- s->type = COMEDI_SUBD_DI;
- s->subdev_flags = SDF_READABLE;
- s->n_chan = board->di_nchan;
- s->maxdata = 1;
- s->range_table = &range_digital;
- s->insn_bits = apci1516_di_insn_bits;
- } else {
- s->type = COMEDI_SUBD_UNUSED;
- }
-
- /* Initialize the digital output subdevice */
- s = &dev->subdevices[1];
- if (board->do_nchan) {
- s->type = COMEDI_SUBD_DO;
- s->subdev_flags = SDF_WRITABLE;
- s->n_chan = board->do_nchan;
- s->maxdata = 1;
- s->range_table = &range_digital;
- s->insn_bits = apci1516_do_insn_bits;
- } else {
- s->type = COMEDI_SUBD_UNUSED;
- }
-
- /* Initialize the watchdog subdevice */
- s = &dev->subdevices[2];
- if (board->has_wdog) {
- ret = addi_watchdog_init(s, devpriv->wdog_iobase);
- if (ret)
- return ret;
- } else {
- s->type = COMEDI_SUBD_UNUSED;
- }
-
- apci1516_reset(dev);
- return 0;
-}
-
-static void apci1516_detach(struct comedi_device *dev)
-{
- if (dev->iobase)
- apci1516_reset(dev);
- comedi_pci_detach(dev);
-}
-
-static struct comedi_driver apci1516_driver = {
- .driver_name = "addi_apci_1516",
- .module = THIS_MODULE,
- .auto_attach = apci1516_auto_attach,
- .detach = apci1516_detach,
-};
-
-static int apci1516_pci_probe(struct pci_dev *dev,
- const struct pci_device_id *id)
-{
- return comedi_pci_auto_config(dev, &apci1516_driver, id->driver_data);
-}
-
-static const struct pci_device_id apci1516_pci_table[] = {
- { PCI_VDEVICE(ADDIDATA, 0x1000), BOARD_APCI1016 },
- { PCI_VDEVICE(ADDIDATA, 0x1001), BOARD_APCI1516 },
- { PCI_VDEVICE(ADDIDATA, 0x1002), BOARD_APCI2016 },
- { 0 }
-};
-MODULE_DEVICE_TABLE(pci, apci1516_pci_table);
-
-static struct pci_driver apci1516_pci_driver = {
- .name = "addi_apci_1516",
- .id_table = apci1516_pci_table,
- .probe = apci1516_pci_probe,
- .remove = comedi_pci_auto_unconfig,
-};
-module_comedi_pci_driver(apci1516_driver, apci1516_pci_driver);
-
-MODULE_DESCRIPTION("ADDI-DATA APCI-1016/1516/2016, 16 channel DIO boards");
-MODULE_AUTHOR("Comedi http://www.comedi.org");
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/addi_apci_1564.c b/drivers/staging/comedi/drivers/addi_apci_1564.c
deleted file mode 100644
index f1ccfbd4c578..000000000000
--- a/drivers/staging/comedi/drivers/addi_apci_1564.c
+++ /dev/null
@@ -1,598 +0,0 @@
-/*
- * addi_apci_1564.c
- * Copyright (C) 2004,2005 ADDI-DATA GmbH for the source code of this module.
- *
- * ADDI-DATA GmbH
- * Dieselstrasse 3
- * D-77833 Ottersweier
- * Tel: +19(0)7223/9493-0
- * Fax: +49(0)7223/9493-92
- * http://www.addi-data.com
- * info@addi-data.com
- *
- * This program is free software; you can redistribute it and/or modify it under
- * the terms of the GNU General Public License as published by the Free Software
- * Foundation; either version 2 of the License, or (at your option) any later
- * version.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
- * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
- * details.
- */
-
-#include <linux/module.h>
-#include <linux/interrupt.h>
-#include <linux/sched.h>
-
-#include "../comedi_pci.h"
-#include "addi_tcw.h"
-#include "addi_watchdog.h"
-
-/*
- * PCI BAR 0
- *
- * PLD Revision 1.0 I/O Mapping
- * 0x00 93C76 EEPROM
- * 0x04 - 0x18 Timer 12-Bit
- *
- * PLD Revision 2.x I/O Mapping
- * 0x00 93C76 EEPROM
- * 0x04 - 0x14 Digital Input
- * 0x18 - 0x25 Digital Output
- * 0x28 - 0x44 Watchdog 8-Bit
- * 0x48 - 0x64 Timer 12-Bit
- */
-#define APCI1564_EEPROM_REG 0x00
-#define APCI1564_EEPROM_VCC_STATUS BIT(8)
-#define APCI1564_EEPROM_TO_REV(x) (((x) >> 4) & 0xf)
-#define APCI1564_EEPROM_DI BIT(3)
-#define APCI1564_EEPROM_DO BIT(2)
-#define APCI1564_EEPROM_CS BIT(1)
-#define APCI1564_EEPROM_CLK BIT(0)
-#define APCI1564_REV1_TIMER_IOBASE 0x04
-#define APCI1564_REV2_MAIN_IOBASE 0x04
-#define APCI1564_REV2_TIMER_IOBASE 0x48
-
-/*
- * PCI BAR 1
- *
- * PLD Revision 1.0 I/O Mapping
- * 0x00 - 0x10 Digital Input
- * 0x14 - 0x20 Digital Output
- * 0x24 - 0x3c Watchdog 8-Bit
- *
- * PLD Revision 2.x I/O Mapping
- * 0x00 Counter_0
- * 0x20 Counter_1
- * 0x30 Counter_3
- */
-#define APCI1564_REV1_MAIN_IOBASE 0x00
-
-/*
- * dev->iobase Register Map
- * PLD Revision 1.0 - PCI BAR 1 + 0x00
- * PLD Revision 2.x - PCI BAR 0 + 0x04
- */
-#define APCI1564_DI_REG 0x00
-#define APCI1564_DI_INT_MODE1_REG 0x04
-#define APCI1564_DI_INT_MODE2_REG 0x08
-#define APCI1564_DI_INT_STATUS_REG 0x0c
-#define APCI1564_DI_IRQ_REG 0x10
-#define APCI1564_DI_IRQ_ENA BIT(2)
-#define APCI1564_DI_IRQ_MODE BIT(1) /* 1=AND, 0=OR */
-#define APCI1564_DO_REG 0x14
-#define APCI1564_DO_INT_CTRL_REG 0x18
-#define APCI1564_DO_INT_CTRL_CC_INT_ENA BIT(1)
-#define APCI1564_DO_INT_CTRL_VCC_INT_ENA BIT(0)
-#define APCI1564_DO_INT_STATUS_REG 0x1c
-#define APCI1564_DO_INT_STATUS_CC BIT(1)
-#define APCI1564_DO_INT_STATUS_VCC BIT(0)
-#define APCI1564_DO_IRQ_REG 0x20
-#define APCI1564_DO_IRQ_INTR BIT(0)
-#define APCI1564_WDOG_REG 0x24
-#define APCI1564_WDOG_RELOAD_REG 0x28
-#define APCI1564_WDOG_TIMEBASE_REG 0x2c
-#define APCI1564_WDOG_CTRL_REG 0x30
-#define APCI1564_WDOG_STATUS_REG 0x34
-#define APCI1564_WDOG_IRQ_REG 0x38
-#define APCI1564_WDOG_WARN_TIMEVAL_REG 0x3c
-#define APCI1564_WDOG_WARN_TIMEBASE_REG 0x40
-
-/*
- * devpriv->timer Register Map (see addi_tcw.h for register/bit defines)
- * PLD Revision 1.0 - PCI BAR 0 + 0x04
- * PLD Revision 2.x - PCI BAR 0 + 0x48
- */
-
-/*
- * devpriv->counters Register Map (see addi_tcw.h for register/bit defines)
- * PLD Revision 2.x - PCI BAR 1 + 0x00
- */
-#define APCI1564_COUNTER(x) ((x) * 0x20)
-
-struct apci1564_private {
- unsigned long eeprom; /* base address of EEPROM register */
- unsigned long timer; /* base address of 12-bit timer */
- unsigned long counters; /* base address of 32-bit counters */
- unsigned int mode1; /* riding-edge/high level channels */
- unsigned int mode2; /* falling-edge/low level channels */
- unsigned int ctrl; /* interrupt mode OR (edge) . AND (level) */
- struct task_struct *tsk_current;
-};
-
-#include "addi-data/hwdrv_apci1564.c"
-
-static int apci1564_reset(struct comedi_device *dev)
-{
- struct apci1564_private *devpriv = dev->private;
-
- /* Disable the input interrupts and reset status register */
- outl(0x0, dev->iobase + APCI1564_DI_IRQ_REG);
- inl(dev->iobase + APCI1564_DI_INT_STATUS_REG);
- outl(0x0, dev->iobase + APCI1564_DI_INT_MODE1_REG);
- outl(0x0, dev->iobase + APCI1564_DI_INT_MODE2_REG);
-
- /* Reset the output channels and disable interrupts */
- outl(0x0, dev->iobase + APCI1564_DO_REG);
- outl(0x0, dev->iobase + APCI1564_DO_INT_CTRL_REG);
-
- /* Reset the watchdog registers */
- addi_watchdog_reset(dev->iobase + APCI1564_WDOG_REG);
-
- /* Reset the timer registers */
- outl(0x0, devpriv->timer + ADDI_TCW_CTRL_REG);
- outl(0x0, devpriv->timer + ADDI_TCW_RELOAD_REG);
-
- if (devpriv->counters) {
- unsigned long iobase = devpriv->counters + ADDI_TCW_CTRL_REG;
-
- /* Reset the counter registers */
- outl(0x0, iobase + APCI1564_COUNTER(0));
- outl(0x0, iobase + APCI1564_COUNTER(1));
- outl(0x0, iobase + APCI1564_COUNTER(2));
- }
-
- return 0;
-}
-
-static irqreturn_t apci1564_interrupt(int irq, void *d)
-{
- struct comedi_device *dev = d;
- struct apci1564_private *devpriv = dev->private;
- struct comedi_subdevice *s = dev->read_subdev;
- unsigned int status;
- unsigned int ctrl;
- unsigned int chan;
-
- status = inl(dev->iobase + APCI1564_DI_IRQ_REG);
- if (status & APCI1564_DI_IRQ_ENA) {
- /* disable the interrupt */
- outl(status & ~APCI1564_DI_IRQ_ENA,
- dev->iobase + APCI1564_DI_IRQ_REG);
-
- s->state = inl(dev->iobase + APCI1564_DI_INT_STATUS_REG) &
- 0xffff;
- comedi_buf_write_samples(s, &s->state, 1);
- comedi_handle_events(dev, s);
-
- /* enable the interrupt */
- outl(status, dev->iobase + APCI1564_DI_IRQ_REG);
- }
-
- status = inl(devpriv->timer + ADDI_TCW_IRQ_REG);
- if (status & 0x01) {
- /* Disable Timer Interrupt */
- ctrl = inl(devpriv->timer + ADDI_TCW_CTRL_REG);
- outl(0x0, devpriv->timer + ADDI_TCW_CTRL_REG);
-
- /* Send a signal to from kernel to user space */
- send_sig(SIGIO, devpriv->tsk_current, 0);
-
- /* Enable Timer Interrupt */
- outl(ctrl, devpriv->timer + ADDI_TCW_CTRL_REG);
- }
-
- if (devpriv->counters) {
- for (chan = 0; chan < 4; chan++) {
- unsigned long iobase;
-
- iobase = devpriv->counters + APCI1564_COUNTER(chan);
-
- status = inl(iobase + ADDI_TCW_IRQ_REG);
- if (status & 0x01) {
- /* Disable Counter Interrupt */
- ctrl = inl(iobase + ADDI_TCW_CTRL_REG);
- outl(0x0, iobase + ADDI_TCW_CTRL_REG);
-
- /* Send a signal to from kernel to user space */
- send_sig(SIGIO, devpriv->tsk_current, 0);
-
- /* Enable Counter Interrupt */
- outl(ctrl, iobase + ADDI_TCW_CTRL_REG);
- }
- }
- }
-
- return IRQ_HANDLED;
-}
-
-static int apci1564_di_insn_bits(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- data[1] = inl(dev->iobase + APCI1564_DI_REG);
-
- return insn->n;
-}
-
-static int apci1564_do_insn_bits(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- s->state = inl(dev->iobase + APCI1564_DO_REG);
-
- if (comedi_dio_update_state(s, data))
- outl(s->state, dev->iobase + APCI1564_DO_REG);
-
- data[1] = s->state;
-
- return insn->n;
-}
-
-static int apci1564_diag_insn_bits(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- data[1] = inl(dev->iobase + APCI1564_DO_INT_STATUS_REG) & 3;
-
- return insn->n;
-}
-
-/*
- * Change-Of-State (COS) interrupt configuration
- *
- * Channels 0 to 15 are interruptible. These channels can be configured
- * to generate interrupts based on AND/OR logic for the desired channels.
- *
- * OR logic
- * - reacts to rising or falling edges
- * - interrupt is generated when any enabled channel
- * meet the desired interrupt condition
- *
- * AND logic
- * - reacts to changes in level of the selected inputs
- * - interrupt is generated when all enabled channels
- * meet the desired interrupt condition
- * - after an interrupt, a change in level must occur on
- * the selected inputs to release the IRQ logic
- *
- * The COS interrupt must be configured before it can be enabled.
- *
- * data[0] : INSN_CONFIG_DIGITAL_TRIG
- * data[1] : trigger number (= 0)
- * data[2] : configuration operation:
- * COMEDI_DIGITAL_TRIG_DISABLE = no interrupts
- * COMEDI_DIGITAL_TRIG_ENABLE_EDGES = OR (edge) interrupts
- * COMEDI_DIGITAL_TRIG_ENABLE_LEVELS = AND (level) interrupts
- * data[3] : left-shift for data[4] and data[5]
- * data[4] : rising-edge/high level channels
- * data[5] : falling-edge/low level channels
- */
-static int apci1564_cos_insn_config(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct apci1564_private *devpriv = dev->private;
- unsigned int shift, oldmask;
-
- switch (data[0]) {
- case INSN_CONFIG_DIGITAL_TRIG:
- if (data[1] != 0)
- return -EINVAL;
- shift = data[3];
- oldmask = (1U << shift) - 1;
- switch (data[2]) {
- case COMEDI_DIGITAL_TRIG_DISABLE:
- devpriv->ctrl = 0;
- devpriv->mode1 = 0;
- devpriv->mode2 = 0;
- outl(0x0, dev->iobase + APCI1564_DI_IRQ_REG);
- inl(dev->iobase + APCI1564_DI_INT_STATUS_REG);
- outl(0x0, dev->iobase + APCI1564_DI_INT_MODE1_REG);
- outl(0x0, dev->iobase + APCI1564_DI_INT_MODE2_REG);
- break;
- case COMEDI_DIGITAL_TRIG_ENABLE_EDGES:
- if (devpriv->ctrl != APCI1564_DI_IRQ_ENA) {
- /* switching to 'OR' mode */
- devpriv->ctrl = APCI1564_DI_IRQ_ENA;
- /* wipe old channels */
- devpriv->mode1 = 0;
- devpriv->mode2 = 0;
- } else {
- /* preserve unspecified channels */
- devpriv->mode1 &= oldmask;
- devpriv->mode2 &= oldmask;
- }
- /* configure specified channels */
- devpriv->mode1 |= data[4] << shift;
- devpriv->mode2 |= data[5] << shift;
- break;
- case COMEDI_DIGITAL_TRIG_ENABLE_LEVELS:
- if (devpriv->ctrl != (APCI1564_DI_IRQ_ENA |
- APCI1564_DI_IRQ_MODE)) {
- /* switching to 'AND' mode */
- devpriv->ctrl = APCI1564_DI_IRQ_ENA |
- APCI1564_DI_IRQ_MODE;
- /* wipe old channels */
- devpriv->mode1 = 0;
- devpriv->mode2 = 0;
- } else {
- /* preserve unspecified channels */
- devpriv->mode1 &= oldmask;
- devpriv->mode2 &= oldmask;
- }
- /* configure specified channels */
- devpriv->mode1 |= data[4] << shift;
- devpriv->mode2 |= data[5] << shift;
- break;
- default:
- return -EINVAL;
- }
- break;
- default:
- return -EINVAL;
- }
- return insn->n;
-}
-
-static int apci1564_cos_insn_bits(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- data[1] = s->state;
-
- return 0;
-}
-
-static int apci1564_cos_cmdtest(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_cmd *cmd)
-{
- int err = 0;
-
- /* Step 1 : check if triggers are trivially valid */
-
- err |= comedi_check_trigger_src(&cmd->start_src, TRIG_NOW);
- err |= comedi_check_trigger_src(&cmd->scan_begin_src, TRIG_EXT);
- err |= comedi_check_trigger_src(&cmd->convert_src, TRIG_FOLLOW);
- err |= comedi_check_trigger_src(&cmd->scan_end_src, TRIG_COUNT);
- err |= comedi_check_trigger_src(&cmd->stop_src, TRIG_NONE);
-
- if (err)
- return 1;
-
- /* Step 2a : make sure trigger sources are unique */
- /* Step 2b : and mutually compatible */
-
- /* Step 3: check if arguments are trivially valid */
-
- err |= comedi_check_trigger_arg_is(&cmd->start_arg, 0);
- err |= comedi_check_trigger_arg_is(&cmd->scan_begin_arg, 0);
- err |= comedi_check_trigger_arg_is(&cmd->convert_arg, 0);
- err |= comedi_check_trigger_arg_is(&cmd->scan_end_arg,
- cmd->chanlist_len);
- err |= comedi_check_trigger_arg_is(&cmd->stop_arg, 0);
-
- if (err)
- return 3;
-
- /* Step 4: fix up any arguments */
-
- /* Step 5: check channel list if it exists */
-
- return 0;
-}
-
-/*
- * Change-Of-State (COS) 'do_cmd' operation
- *
- * Enable the COS interrupt as configured by apci1564_cos_insn_config().
- */
-static int apci1564_cos_cmd(struct comedi_device *dev,
- struct comedi_subdevice *s)
-{
- struct apci1564_private *devpriv = dev->private;
-
- if (!devpriv->ctrl) {
- dev_warn(dev->class_dev,
- "Interrupts disabled due to mode configuration!\n");
- return -EINVAL;
- }
-
- outl(devpriv->mode1, dev->iobase + APCI1564_DI_INT_MODE1_REG);
- outl(devpriv->mode2, dev->iobase + APCI1564_DI_INT_MODE2_REG);
- outl(devpriv->ctrl, dev->iobase + APCI1564_DI_IRQ_REG);
-
- return 0;
-}
-
-static int apci1564_cos_cancel(struct comedi_device *dev,
- struct comedi_subdevice *s)
-{
- outl(0x0, dev->iobase + APCI1564_DI_IRQ_REG);
- inl(dev->iobase + APCI1564_DI_INT_STATUS_REG);
- outl(0x0, dev->iobase + APCI1564_DI_INT_MODE1_REG);
- outl(0x0, dev->iobase + APCI1564_DI_INT_MODE2_REG);
-
- return 0;
-}
-
-static int apci1564_auto_attach(struct comedi_device *dev,
- unsigned long context_unused)
-{
- struct pci_dev *pcidev = comedi_to_pci_dev(dev);
- struct apci1564_private *devpriv;
- struct comedi_subdevice *s;
- unsigned int val;
- int ret;
-
- devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
- if (!devpriv)
- return -ENOMEM;
-
- ret = comedi_pci_enable(dev);
- if (ret)
- return ret;
-
- /* read the EEPROM register and check the I/O map revision */
- devpriv->eeprom = pci_resource_start(pcidev, 0);
- val = inl(devpriv->eeprom + APCI1564_EEPROM_REG);
- if (APCI1564_EEPROM_TO_REV(val) == 0) {
- /* PLD Revision 1.0 I/O Mapping */
- dev->iobase = pci_resource_start(pcidev, 1) +
- APCI1564_REV1_MAIN_IOBASE;
- devpriv->timer = devpriv->eeprom + APCI1564_REV1_TIMER_IOBASE;
- } else {
- /* PLD Revision 2.x I/O Mapping */
- dev->iobase = devpriv->eeprom + APCI1564_REV2_MAIN_IOBASE;
- devpriv->timer = devpriv->eeprom + APCI1564_REV2_TIMER_IOBASE;
- devpriv->counters = pci_resource_start(pcidev, 1);
- }
-
- apci1564_reset(dev);
-
- if (pcidev->irq > 0) {
- ret = request_irq(pcidev->irq, apci1564_interrupt, IRQF_SHARED,
- dev->board_name, dev);
- if (ret == 0)
- dev->irq = pcidev->irq;
- }
-
- ret = comedi_alloc_subdevices(dev, 7);
- if (ret)
- return ret;
-
- /* Allocate and Initialise DI Subdevice Structures */
- s = &dev->subdevices[0];
- s->type = COMEDI_SUBD_DI;
- s->subdev_flags = SDF_READABLE;
- s->n_chan = 32;
- s->maxdata = 1;
- s->range_table = &range_digital;
- s->insn_bits = apci1564_di_insn_bits;
-
- /* Allocate and Initialise DO Subdevice Structures */
- s = &dev->subdevices[1];
- s->type = COMEDI_SUBD_DO;
- s->subdev_flags = SDF_WRITABLE;
- s->n_chan = 32;
- s->maxdata = 1;
- s->range_table = &range_digital;
- s->insn_bits = apci1564_do_insn_bits;
-
- /* Change-Of-State (COS) interrupt subdevice */
- s = &dev->subdevices[2];
- if (dev->irq) {
- dev->read_subdev = s;
- s->type = COMEDI_SUBD_DI;
- s->subdev_flags = SDF_READABLE | SDF_CMD_READ;
- s->n_chan = 1;
- s->maxdata = 1;
- s->range_table = &range_digital;
- s->len_chanlist = 1;
- s->insn_config = apci1564_cos_insn_config;
- s->insn_bits = apci1564_cos_insn_bits;
- s->do_cmdtest = apci1564_cos_cmdtest;
- s->do_cmd = apci1564_cos_cmd;
- s->cancel = apci1564_cos_cancel;
- } else {
- s->type = COMEDI_SUBD_UNUSED;
- }
-
- /* Timer subdevice */
- s = &dev->subdevices[3];
- s->type = COMEDI_SUBD_TIMER;
- s->subdev_flags = SDF_WRITABLE | SDF_READABLE;
- s->n_chan = 1;
- s->maxdata = 0x0fff;
- s->range_table = &range_digital;
- s->insn_config = apci1564_timer_insn_config;
- s->insn_write = apci1564_timer_insn_write;
- s->insn_read = apci1564_timer_insn_read;
-
- /* Counter subdevice */
- s = &dev->subdevices[4];
- if (devpriv->counters) {
- s->type = COMEDI_SUBD_COUNTER;
- s->subdev_flags = SDF_WRITABLE | SDF_READABLE | SDF_LSAMPL;
- s->n_chan = 3;
- s->maxdata = 0xffffffff;
- s->range_table = &range_digital;
- s->insn_config = apci1564_counter_insn_config;
- s->insn_write = apci1564_counter_insn_write;
- s->insn_read = apci1564_counter_insn_read;
- } else {
- s->type = COMEDI_SUBD_UNUSED;
- }
-
- /* Initialize the watchdog subdevice */
- s = &dev->subdevices[5];
- ret = addi_watchdog_init(s, dev->iobase + APCI1564_WDOG_REG);
- if (ret)
- return ret;
-
- /* Initialize the diagnostic status subdevice */
- s = &dev->subdevices[6];
- s->type = COMEDI_SUBD_DI;
- s->subdev_flags = SDF_READABLE;
- s->n_chan = 2;
- s->maxdata = 1;
- s->range_table = &range_digital;
- s->insn_bits = apci1564_diag_insn_bits;
-
- return 0;
-}
-
-static void apci1564_detach(struct comedi_device *dev)
-{
- if (dev->iobase)
- apci1564_reset(dev);
- comedi_pci_detach(dev);
-}
-
-static struct comedi_driver apci1564_driver = {
- .driver_name = "addi_apci_1564",
- .module = THIS_MODULE,
- .auto_attach = apci1564_auto_attach,
- .detach = apci1564_detach,
-};
-
-static int apci1564_pci_probe(struct pci_dev *dev,
- const struct pci_device_id *id)
-{
- return comedi_pci_auto_config(dev, &apci1564_driver, id->driver_data);
-}
-
-static const struct pci_device_id apci1564_pci_table[] = {
- { PCI_DEVICE(PCI_VENDOR_ID_ADDIDATA, 0x1006) },
- { 0 }
-};
-MODULE_DEVICE_TABLE(pci, apci1564_pci_table);
-
-static struct pci_driver apci1564_pci_driver = {
- .name = "addi_apci_1564",
- .id_table = apci1564_pci_table,
- .probe = apci1564_pci_probe,
- .remove = comedi_pci_auto_unconfig,
-};
-module_comedi_pci_driver(apci1564_driver, apci1564_pci_driver);
-
-MODULE_AUTHOR("Comedi http://www.comedi.org");
-MODULE_DESCRIPTION("ADDI-DATA APCI-1564, 32 channel DI / 32 channel DO boards");
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/addi_apci_16xx.c b/drivers/staging/comedi/drivers/addi_apci_16xx.c
deleted file mode 100644
index c63133a12a4e..000000000000
--- a/drivers/staging/comedi/drivers/addi_apci_16xx.c
+++ /dev/null
@@ -1,187 +0,0 @@
-/*
- * addi_apci_16xx.c
- * Copyright (C) 2004,2005 ADDI-DATA GmbH for the source code of this module.
- * Project manager: S. Weber
- *
- * ADDI-DATA GmbH
- * Dieselstrasse 3
- * D-77833 Ottersweier
- * Tel: +19(0)7223/9493-0
- * Fax: +49(0)7223/9493-92
- * http://www.addi-data.com
- * info@addi-data.com
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- */
-
-#include <linux/module.h>
-
-#include "../comedi_pci.h"
-
-/*
- * Register I/O map
- */
-#define APCI16XX_IN_REG(x) (((x) * 4) + 0x08)
-#define APCI16XX_OUT_REG(x) (((x) * 4) + 0x14)
-#define APCI16XX_DIR_REG(x) (((x) * 4) + 0x20)
-
-enum apci16xx_boardid {
- BOARD_APCI1648,
- BOARD_APCI1696,
-};
-
-struct apci16xx_boardinfo {
- const char *name;
- int n_chan;
-};
-
-static const struct apci16xx_boardinfo apci16xx_boardtypes[] = {
- [BOARD_APCI1648] = {
- .name = "apci1648",
- .n_chan = 48, /* 2 subdevices */
- },
- [BOARD_APCI1696] = {
- .name = "apci1696",
- .n_chan = 96, /* 3 subdevices */
- },
-};
-
-static int apci16xx_insn_config(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- unsigned int chan = CR_CHAN(insn->chanspec);
- unsigned int mask;
- int ret;
-
- if (chan < 8)
- mask = 0x000000ff;
- else if (chan < 16)
- mask = 0x0000ff00;
- else if (chan < 24)
- mask = 0x00ff0000;
- else
- mask = 0xff000000;
-
- ret = comedi_dio_insn_config(dev, s, insn, data, mask);
- if (ret)
- return ret;
-
- outl(s->io_bits, dev->iobase + APCI16XX_DIR_REG(s->index));
-
- return insn->n;
-}
-
-static int apci16xx_dio_insn_bits(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- if (comedi_dio_update_state(s, data))
- outl(s->state, dev->iobase + APCI16XX_OUT_REG(s->index));
-
- data[1] = inl(dev->iobase + APCI16XX_IN_REG(s->index));
-
- return insn->n;
-}
-
-static int apci16xx_auto_attach(struct comedi_device *dev,
- unsigned long context)
-{
- struct pci_dev *pcidev = comedi_to_pci_dev(dev);
- const struct apci16xx_boardinfo *board = NULL;
- struct comedi_subdevice *s;
- unsigned int n_subdevs;
- unsigned int last;
- int i;
- int ret;
-
- if (context < ARRAY_SIZE(apci16xx_boardtypes))
- board = &apci16xx_boardtypes[context];
- if (!board)
- return -ENODEV;
- dev->board_ptr = board;
- dev->board_name = board->name;
-
- ret = comedi_pci_enable(dev);
- if (ret)
- return ret;
-
- dev->iobase = pci_resource_start(pcidev, 0);
-
- /*
- * Work out the nubmer of subdevices needed to support all the
- * digital i/o channels on the board. Each subdevice supports
- * up to 32 channels.
- */
- n_subdevs = board->n_chan / 32;
- if ((n_subdevs * 32) < board->n_chan) {
- last = board->n_chan - (n_subdevs * 32);
- n_subdevs++;
- } else {
- last = 0;
- }
-
- ret = comedi_alloc_subdevices(dev, n_subdevs);
- if (ret)
- return ret;
-
- /* Initialize the TTL digital i/o subdevices */
- for (i = 0; i < n_subdevs; i++) {
- s = &dev->subdevices[i];
- s->type = COMEDI_SUBD_DIO;
- s->subdev_flags = SDF_WRITABLE | SDF_READABLE;
- s->n_chan = ((i * 32) < board->n_chan) ? 32 : last;
- s->maxdata = 1;
- s->range_table = &range_digital;
- s->insn_config = apci16xx_insn_config;
- s->insn_bits = apci16xx_dio_insn_bits;
-
- /* Default all channels to inputs */
- s->io_bits = 0;
- outl(s->io_bits, dev->iobase + APCI16XX_DIR_REG(i));
- }
-
- return 0;
-}
-
-static struct comedi_driver apci16xx_driver = {
- .driver_name = "addi_apci_16xx",
- .module = THIS_MODULE,
- .auto_attach = apci16xx_auto_attach,
- .detach = comedi_pci_detach,
-};
-
-static int apci16xx_pci_probe(struct pci_dev *dev,
- const struct pci_device_id *id)
-{
- return comedi_pci_auto_config(dev, &apci16xx_driver, id->driver_data);
-}
-
-static const struct pci_device_id apci16xx_pci_table[] = {
- { PCI_VDEVICE(ADDIDATA, 0x1009), BOARD_APCI1648 },
- { PCI_VDEVICE(ADDIDATA, 0x100a), BOARD_APCI1696 },
- { 0 }
-};
-MODULE_DEVICE_TABLE(pci, apci16xx_pci_table);
-
-static struct pci_driver apci16xx_pci_driver = {
- .name = "addi_apci_16xx",
- .id_table = apci16xx_pci_table,
- .probe = apci16xx_pci_probe,
- .remove = comedi_pci_auto_unconfig,
-};
-module_comedi_pci_driver(apci16xx_driver, apci16xx_pci_driver);
-
-MODULE_DESCRIPTION("ADDI-DATA APCI-1648/1696, TTL I/O boards");
-MODULE_AUTHOR("Comedi http://www.comedi.org");
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/addi_apci_2032.c b/drivers/staging/comedi/drivers/addi_apci_2032.c
deleted file mode 100644
index ad715253bdcc..000000000000
--- a/drivers/staging/comedi/drivers/addi_apci_2032.c
+++ /dev/null
@@ -1,339 +0,0 @@
-/*
- * addi_apci_2032.c
- * Copyright (C) 2004,2005 ADDI-DATA GmbH for the source code of this module.
- * Project manager: Eric Stolz
- *
- * ADDI-DATA GmbH
- * Dieselstrasse 3
- * D-77833 Ottersweier
- * Tel: +19(0)7223/9493-0
- * Fax: +49(0)7223/9493-92
- * http://www.addi-data.com
- * info@addi-data.com
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- */
-
-#include <linux/module.h>
-#include <linux/interrupt.h>
-#include <linux/slab.h>
-
-#include "../comedi_pci.h"
-#include "addi_watchdog.h"
-
-/*
- * PCI bar 1 I/O Register map
- */
-#define APCI2032_DO_REG 0x00
-#define APCI2032_INT_CTRL_REG 0x04
-#define APCI2032_INT_CTRL_VCC_ENA (1 << 0)
-#define APCI2032_INT_CTRL_CC_ENA (1 << 1)
-#define APCI2032_INT_STATUS_REG 0x08
-#define APCI2032_INT_STATUS_VCC (1 << 0)
-#define APCI2032_INT_STATUS_CC (1 << 1)
-#define APCI2032_STATUS_REG 0x0c
-#define APCI2032_STATUS_IRQ (1 << 0)
-#define APCI2032_WDOG_REG 0x10
-
-struct apci2032_int_private {
- spinlock_t spinlock;
- bool active;
- unsigned char enabled_isns;
-};
-
-static int apci2032_do_insn_bits(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- s->state = inl(dev->iobase + APCI2032_DO_REG);
-
- if (comedi_dio_update_state(s, data))
- outl(s->state, dev->iobase + APCI2032_DO_REG);
-
- data[1] = s->state;
-
- return insn->n;
-}
-
-static int apci2032_int_insn_bits(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- data[1] = inl(dev->iobase + APCI2032_INT_STATUS_REG) & 3;
- return insn->n;
-}
-
-static void apci2032_int_stop(struct comedi_device *dev,
- struct comedi_subdevice *s)
-{
- struct apci2032_int_private *subpriv = s->private;
-
- subpriv->active = false;
- subpriv->enabled_isns = 0;
- outl(0x0, dev->iobase + APCI2032_INT_CTRL_REG);
-}
-
-static int apci2032_int_cmdtest(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_cmd *cmd)
-{
- int err = 0;
-
- /* Step 1 : check if triggers are trivially valid */
-
- err |= comedi_check_trigger_src(&cmd->start_src, TRIG_NOW);
- err |= comedi_check_trigger_src(&cmd->scan_begin_src, TRIG_EXT);
- err |= comedi_check_trigger_src(&cmd->convert_src, TRIG_NOW);
- err |= comedi_check_trigger_src(&cmd->scan_end_src, TRIG_COUNT);
- err |= comedi_check_trigger_src(&cmd->stop_src, TRIG_COUNT | TRIG_NONE);
-
- if (err)
- return 1;
-
- /* Step 2a : make sure trigger sources are unique */
- err |= comedi_check_trigger_is_unique(cmd->stop_src);
-
- /* Step 2b : and mutually compatible */
-
- if (err)
- return 2;
-
- /* Step 3: check if arguments are trivially valid */
-
- err |= comedi_check_trigger_arg_is(&cmd->start_arg, 0);
- err |= comedi_check_trigger_arg_is(&cmd->scan_begin_arg, 0);
- err |= comedi_check_trigger_arg_is(&cmd->convert_arg, 0);
- err |= comedi_check_trigger_arg_is(&cmd->scan_end_arg,
- cmd->chanlist_len);
- if (cmd->stop_src == TRIG_COUNT)
- err |= comedi_check_trigger_arg_min(&cmd->stop_arg, 1);
- else /* TRIG_NONE */
- err |= comedi_check_trigger_arg_is(&cmd->stop_arg, 0);
-
- if (err)
- return 3;
-
- /* Step 4: fix up any arguments */
-
- /* Step 5: check channel list if it exists */
-
- return 0;
-}
-
-static int apci2032_int_cmd(struct comedi_device *dev,
- struct comedi_subdevice *s)
-{
- struct comedi_cmd *cmd = &s->async->cmd;
- struct apci2032_int_private *subpriv = s->private;
- unsigned char enabled_isns;
- unsigned int n;
- unsigned long flags;
-
- enabled_isns = 0;
- for (n = 0; n < cmd->chanlist_len; n++)
- enabled_isns |= 1 << CR_CHAN(cmd->chanlist[n]);
-
- spin_lock_irqsave(&subpriv->spinlock, flags);
-
- subpriv->enabled_isns = enabled_isns;
- subpriv->active = true;
- outl(enabled_isns, dev->iobase + APCI2032_INT_CTRL_REG);
-
- spin_unlock_irqrestore(&subpriv->spinlock, flags);
-
- return 0;
-}
-
-static int apci2032_int_cancel(struct comedi_device *dev,
- struct comedi_subdevice *s)
-{
- struct apci2032_int_private *subpriv = s->private;
- unsigned long flags;
-
- spin_lock_irqsave(&subpriv->spinlock, flags);
- if (subpriv->active)
- apci2032_int_stop(dev, s);
- spin_unlock_irqrestore(&subpriv->spinlock, flags);
-
- return 0;
-}
-
-static irqreturn_t apci2032_interrupt(int irq, void *d)
-{
- struct comedi_device *dev = d;
- struct comedi_subdevice *s = dev->read_subdev;
- struct comedi_cmd *cmd = &s->async->cmd;
- struct apci2032_int_private *subpriv;
- unsigned int val;
-
- if (!dev->attached)
- return IRQ_NONE;
-
- /* Check if VCC OR CC interrupt has occurred */
- val = inl(dev->iobase + APCI2032_STATUS_REG) & APCI2032_STATUS_IRQ;
- if (!val)
- return IRQ_NONE;
-
- subpriv = s->private;
- spin_lock(&subpriv->spinlock);
-
- val = inl(dev->iobase + APCI2032_INT_STATUS_REG) & 3;
- /* Disable triggered interrupt sources. */
- outl(~val & 3, dev->iobase + APCI2032_INT_CTRL_REG);
- /*
- * Note: We don't reenable the triggered interrupt sources because they
- * are level-sensitive, hardware error status interrupt sources and
- * they'd keep triggering interrupts repeatedly.
- */
-
- if (subpriv->active && (val & subpriv->enabled_isns) != 0) {
- unsigned short bits = 0;
- int i;
-
- /* Bits in scan data correspond to indices in channel list. */
- for (i = 0; i < cmd->chanlist_len; i++) {
- unsigned int chan = CR_CHAN(cmd->chanlist[i]);
-
- if (val & (1 << chan))
- bits |= (1 << i);
- }
-
- comedi_buf_write_samples(s, &bits, 1);
-
- if (cmd->stop_src == TRIG_COUNT &&
- s->async->scans_done >= cmd->stop_arg)
- s->async->events |= COMEDI_CB_EOA;
- }
-
- spin_unlock(&subpriv->spinlock);
-
- comedi_handle_events(dev, s);
-
- return IRQ_HANDLED;
-}
-
-static int apci2032_reset(struct comedi_device *dev)
-{
- outl(0x0, dev->iobase + APCI2032_DO_REG);
- outl(0x0, dev->iobase + APCI2032_INT_CTRL_REG);
-
- addi_watchdog_reset(dev->iobase + APCI2032_WDOG_REG);
-
- return 0;
-}
-
-static int apci2032_auto_attach(struct comedi_device *dev,
- unsigned long context_unused)
-{
- struct pci_dev *pcidev = comedi_to_pci_dev(dev);
- struct comedi_subdevice *s;
- int ret;
-
- ret = comedi_pci_enable(dev);
- if (ret)
- return ret;
- dev->iobase = pci_resource_start(pcidev, 1);
- apci2032_reset(dev);
-
- if (pcidev->irq > 0) {
- ret = request_irq(pcidev->irq, apci2032_interrupt,
- IRQF_SHARED, dev->board_name, dev);
- if (ret == 0)
- dev->irq = pcidev->irq;
- }
-
- ret = comedi_alloc_subdevices(dev, 3);
- if (ret)
- return ret;
-
- /* Initialize the digital output subdevice */
- s = &dev->subdevices[0];
- s->type = COMEDI_SUBD_DO;
- s->subdev_flags = SDF_WRITABLE;
- s->n_chan = 32;
- s->maxdata = 1;
- s->range_table = &range_digital;
- s->insn_bits = apci2032_do_insn_bits;
-
- /* Initialize the watchdog subdevice */
- s = &dev->subdevices[1];
- ret = addi_watchdog_init(s, dev->iobase + APCI2032_WDOG_REG);
- if (ret)
- return ret;
-
- /* Initialize the interrupt subdevice */
- s = &dev->subdevices[2];
- s->type = COMEDI_SUBD_DI;
- s->subdev_flags = SDF_READABLE;
- s->n_chan = 2;
- s->maxdata = 1;
- s->range_table = &range_digital;
- s->insn_bits = apci2032_int_insn_bits;
- if (dev->irq) {
- struct apci2032_int_private *subpriv;
-
- dev->read_subdev = s;
- subpriv = kzalloc(sizeof(*subpriv), GFP_KERNEL);
- if (!subpriv)
- return -ENOMEM;
- spin_lock_init(&subpriv->spinlock);
- s->private = subpriv;
- s->subdev_flags = SDF_READABLE | SDF_CMD_READ | SDF_PACKED;
- s->len_chanlist = 2;
- s->do_cmdtest = apci2032_int_cmdtest;
- s->do_cmd = apci2032_int_cmd;
- s->cancel = apci2032_int_cancel;
- }
-
- return 0;
-}
-
-static void apci2032_detach(struct comedi_device *dev)
-{
- if (dev->iobase)
- apci2032_reset(dev);
- comedi_pci_detach(dev);
- if (dev->read_subdev)
- kfree(dev->read_subdev->private);
-}
-
-static struct comedi_driver apci2032_driver = {
- .driver_name = "addi_apci_2032",
- .module = THIS_MODULE,
- .auto_attach = apci2032_auto_attach,
- .detach = apci2032_detach,
-};
-
-static int apci2032_pci_probe(struct pci_dev *dev,
- const struct pci_device_id *id)
-{
- return comedi_pci_auto_config(dev, &apci2032_driver, id->driver_data);
-}
-
-static const struct pci_device_id apci2032_pci_table[] = {
- { PCI_DEVICE(PCI_VENDOR_ID_ADDIDATA, 0x1004) },
- { 0 }
-};
-MODULE_DEVICE_TABLE(pci, apci2032_pci_table);
-
-static struct pci_driver apci2032_pci_driver = {
- .name = "addi_apci_2032",
- .id_table = apci2032_pci_table,
- .probe = apci2032_pci_probe,
- .remove = comedi_pci_auto_unconfig,
-};
-module_comedi_pci_driver(apci2032_driver, apci2032_pci_driver);
-
-MODULE_AUTHOR("Comedi http://www.comedi.org");
-MODULE_DESCRIPTION("ADDI-DATA APCI-2032, 32 channel DO boards");
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/addi_apci_2200.c b/drivers/staging/comedi/drivers/addi_apci_2200.c
deleted file mode 100644
index 2b382a52d80d..000000000000
--- a/drivers/staging/comedi/drivers/addi_apci_2200.c
+++ /dev/null
@@ -1,152 +0,0 @@
-/*
- * addi_apci_2200.c
- * Copyright (C) 2004,2005 ADDI-DATA GmbH for the source code of this module.
- * Project manager: Eric Stolz
- *
- * ADDI-DATA GmbH
- * Dieselstrasse 3
- * D-77833 Ottersweier
- * Tel: +19(0)7223/9493-0
- * Fax: +49(0)7223/9493-92
- * http://www.addi-data.com
- * info@addi-data.com
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- */
-
-#include <linux/module.h>
-
-#include "../comedi_pci.h"
-#include "addi_watchdog.h"
-
-/*
- * I/O Register Map
- */
-#define APCI2200_DI_REG 0x00
-#define APCI2200_DO_REG 0x04
-#define APCI2200_WDOG_REG 0x08
-
-static int apci2200_di_insn_bits(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- data[1] = inw(dev->iobase + APCI2200_DI_REG);
-
- return insn->n;
-}
-
-static int apci2200_do_insn_bits(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- s->state = inw(dev->iobase + APCI2200_DO_REG);
-
- if (comedi_dio_update_state(s, data))
- outw(s->state, dev->iobase + APCI2200_DO_REG);
-
- data[1] = s->state;
-
- return insn->n;
-}
-
-static int apci2200_reset(struct comedi_device *dev)
-{
- outw(0x0, dev->iobase + APCI2200_DO_REG);
-
- addi_watchdog_reset(dev->iobase + APCI2200_WDOG_REG);
-
- return 0;
-}
-
-static int apci2200_auto_attach(struct comedi_device *dev,
- unsigned long context_unused)
-{
- struct pci_dev *pcidev = comedi_to_pci_dev(dev);
- struct comedi_subdevice *s;
- int ret;
-
- ret = comedi_pci_enable(dev);
- if (ret)
- return ret;
-
- dev->iobase = pci_resource_start(pcidev, 1);
-
- ret = comedi_alloc_subdevices(dev, 3);
- if (ret)
- return ret;
-
- /* Initialize the digital input subdevice */
- s = &dev->subdevices[0];
- s->type = COMEDI_SUBD_DI;
- s->subdev_flags = SDF_READABLE;
- s->n_chan = 8;
- s->maxdata = 1;
- s->range_table = &range_digital;
- s->insn_bits = apci2200_di_insn_bits;
-
- /* Initialize the digital output subdevice */
- s = &dev->subdevices[1];
- s->type = COMEDI_SUBD_DO;
- s->subdev_flags = SDF_WRITABLE;
- s->n_chan = 16;
- s->maxdata = 1;
- s->range_table = &range_digital;
- s->insn_bits = apci2200_do_insn_bits;
-
- /* Initialize the watchdog subdevice */
- s = &dev->subdevices[2];
- ret = addi_watchdog_init(s, dev->iobase + APCI2200_WDOG_REG);
- if (ret)
- return ret;
-
- apci2200_reset(dev);
- return 0;
-}
-
-static void apci2200_detach(struct comedi_device *dev)
-{
- if (dev->iobase)
- apci2200_reset(dev);
- comedi_pci_detach(dev);
-}
-
-static struct comedi_driver apci2200_driver = {
- .driver_name = "addi_apci_2200",
- .module = THIS_MODULE,
- .auto_attach = apci2200_auto_attach,
- .detach = apci2200_detach,
-};
-
-static int apci2200_pci_probe(struct pci_dev *dev,
- const struct pci_device_id *id)
-{
- return comedi_pci_auto_config(dev, &apci2200_driver, id->driver_data);
-}
-
-static const struct pci_device_id apci2200_pci_table[] = {
- { PCI_DEVICE(PCI_VENDOR_ID_ADDIDATA, 0x1005) },
- { 0 }
-};
-MODULE_DEVICE_TABLE(pci, apci2200_pci_table);
-
-static struct pci_driver apci2200_pci_driver = {
- .name = "addi_apci_2200",
- .id_table = apci2200_pci_table,
- .probe = apci2200_pci_probe,
- .remove = comedi_pci_auto_unconfig,
-};
-module_comedi_pci_driver(apci2200_driver, apci2200_pci_driver);
-
-MODULE_DESCRIPTION("ADDI-DATA APCI-2200 Relay board, optically isolated");
-MODULE_AUTHOR("Comedi http://www.comedi.org");
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/addi_apci_3120.c b/drivers/staging/comedi/drivers/addi_apci_3120.c
deleted file mode 100644
index 651b39319a3f..000000000000
--- a/drivers/staging/comedi/drivers/addi_apci_3120.c
+++ /dev/null
@@ -1,1124 +0,0 @@
-/*
- * addi_apci_3120.c
- * Copyright (C) 2004,2005 ADDI-DATA GmbH for the source code of this module.
- *
- * ADDI-DATA GmbH
- * Dieselstrasse 3
- * D-77833 Ottersweier
- * Tel: +19(0)7223/9493-0
- * Fax: +49(0)7223/9493-92
- * http://www.addi-data.com
- * info@addi-data.com
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- */
-
-#include <linux/module.h>
-#include <linux/interrupt.h>
-
-#include "../comedi_pci.h"
-#include "amcc_s5933.h"
-
-/*
- * PCI BAR 0 register map (devpriv->amcc)
- * see amcc_s5933.h for register and bit defines
- */
-#define APCI3120_FIFO_ADVANCE_ON_BYTE_2 (1 << 29)
-
-/*
- * PCI BAR 1 register map (dev->iobase)
- */
-#define APCI3120_AI_FIFO_REG 0x00
-#define APCI3120_CTRL_REG 0x00
-#define APCI3120_CTRL_EXT_TRIG (1 << 15)
-#define APCI3120_CTRL_GATE(x) (1 << (12 + (x)))
-#define APCI3120_CTRL_PR(x) (((x) & 0xf) << 8)
-#define APCI3120_CTRL_PA(x) (((x) & 0xf) << 0)
-#define APCI3120_AI_SOFTTRIG_REG 0x02
-#define APCI3120_STATUS_REG 0x02
-#define APCI3120_STATUS_EOC_INT (1 << 15)
-#define APCI3120_STATUS_AMCC_INT (1 << 14)
-#define APCI3120_STATUS_EOS_INT (1 << 13)
-#define APCI3120_STATUS_TIMER2_INT (1 << 12)
-#define APCI3120_STATUS_INT_MASK (0xf << 12)
-#define APCI3120_STATUS_TO_DI_BITS(x) (((x) >> 8) & 0xf)
-#define APCI3120_STATUS_TO_VERSION(x) (((x) >> 4) & 0xf)
-#define APCI3120_STATUS_FIFO_FULL (1 << 2)
-#define APCI3120_STATUS_FIFO_EMPTY (1 << 1)
-#define APCI3120_STATUS_DA_READY (1 << 0)
-#define APCI3120_TIMER_REG 0x04
-#define APCI3120_CHANLIST_REG 0x06
-#define APCI3120_CHANLIST_INDEX(x) (((x) & 0xf) << 8)
-#define APCI3120_CHANLIST_UNIPOLAR (1 << 7)
-#define APCI3120_CHANLIST_GAIN(x) (((x) & 0x3) << 4)
-#define APCI3120_CHANLIST_MUX(x) (((x) & 0xf) << 0)
-#define APCI3120_AO_REG(x) (0x08 + (((x) / 4) * 2))
-#define APCI3120_AO_MUX(x) (((x) & 0x3) << 14)
-#define APCI3120_AO_DATA(x) ((x) << 0)
-#define APCI3120_TIMER_MODE_REG 0x0c
-#define APCI3120_TIMER_MODE(_t, _m) ((_m) << ((_t) * 2))
-#define APCI3120_TIMER_MODE0 0 /* I8254_MODE0 */
-#define APCI3120_TIMER_MODE2 1 /* I8254_MODE2 */
-#define APCI3120_TIMER_MODE4 2 /* I8254_MODE4 */
-#define APCI3120_TIMER_MODE5 3 /* I8254_MODE5 */
-#define APCI3120_TIMER_MODE_MASK(_t) (3 << ((_t) * 2))
-#define APCI3120_CTR0_REG 0x0d
-#define APCI3120_CTR0_DO_BITS(x) ((x) << 4)
-#define APCI3120_CTR0_TIMER_SEL(x) ((x) << 0)
-#define APCI3120_MODE_REG 0x0e
-#define APCI3120_MODE_TIMER2_CLK_OSC (0 << 6)
-#define APCI3120_MODE_TIMER2_CLK_OUT1 (1 << 6)
-#define APCI3120_MODE_TIMER2_CLK_EOC (2 << 6)
-#define APCI3120_MODE_TIMER2_CLK_EOS (3 << 6)
-#define APCI3120_MODE_TIMER2_CLK_MASK (3 << 6)
-#define APCI3120_MODE_TIMER2_AS_TIMER (0 << 4)
-#define APCI3120_MODE_TIMER2_AS_COUNTER (1 << 4)
-#define APCI3120_MODE_TIMER2_AS_WDOG (2 << 4)
-#define APCI3120_MODE_TIMER2_AS_MASK (3 << 4) /* sets AS_TIMER */
-#define APCI3120_MODE_SCAN_ENA (1 << 3)
-#define APCI3120_MODE_TIMER2_IRQ_ENA (1 << 2)
-#define APCI3120_MODE_EOS_IRQ_ENA (1 << 1)
-#define APCI3120_MODE_EOC_IRQ_ENA (1 << 0)
-
-/*
- * PCI BAR 2 register map (devpriv->addon)
- */
-#define APCI3120_ADDON_ADDR_REG 0x00
-#define APCI3120_ADDON_DATA_REG 0x02
-#define APCI3120_ADDON_CTRL_REG 0x04
-#define APCI3120_ADDON_CTRL_AMWEN_ENA (1 << 1)
-#define APCI3120_ADDON_CTRL_A2P_FIFO_ENA (1 << 0)
-
-/*
- * Board revisions
- */
-#define APCI3120_REVA 0xa
-#define APCI3120_REVB 0xb
-#define APCI3120_REVA_OSC_BASE 70 /* 70ns = 14.29MHz */
-#define APCI3120_REVB_OSC_BASE 50 /* 50ns = 20MHz */
-
-static const struct comedi_lrange apci3120_ai_range = {
- 8, {
- BIP_RANGE(10),
- BIP_RANGE(5),
- BIP_RANGE(2),
- BIP_RANGE(1),
- UNI_RANGE(10),
- UNI_RANGE(5),
- UNI_RANGE(2),
- UNI_RANGE(1)
- }
-};
-
-enum apci3120_boardid {
- BOARD_APCI3120,
- BOARD_APCI3001,
-};
-
-struct apci3120_board {
- const char *name;
- unsigned int ai_is_16bit:1;
- unsigned int has_ao:1;
-};
-
-static const struct apci3120_board apci3120_boardtypes[] = {
- [BOARD_APCI3120] = {
- .name = "apci3120",
- .ai_is_16bit = 1,
- .has_ao = 1,
- },
- [BOARD_APCI3001] = {
- .name = "apci3001",
- },
-};
-
-struct apci3120_dmabuf {
- unsigned short *virt;
- dma_addr_t hw;
- unsigned int size;
- unsigned int use_size;
-};
-
-struct apci3120_private {
- unsigned long amcc;
- unsigned long addon;
- unsigned int osc_base;
- unsigned int use_dma:1;
- unsigned int use_double_buffer:1;
- unsigned int cur_dmabuf:1;
- struct apci3120_dmabuf dmabuf[2];
- unsigned char do_bits;
- unsigned char timer_mode;
- unsigned char mode;
- unsigned short ctrl;
-};
-
-static void apci3120_addon_write(struct comedi_device *dev,
- unsigned int val, unsigned int reg)
-{
- struct apci3120_private *devpriv = dev->private;
-
- /* 16-bit interface for AMCC add-on registers */
-
- outw(reg, devpriv->addon + APCI3120_ADDON_ADDR_REG);
- outw(val & 0xffff, devpriv->addon + APCI3120_ADDON_DATA_REG);
-
- outw(reg + 2, devpriv->addon + APCI3120_ADDON_ADDR_REG);
- outw((val >> 16) & 0xffff, devpriv->addon + APCI3120_ADDON_DATA_REG);
-}
-
-static void apci3120_init_dma(struct comedi_device *dev,
- struct apci3120_dmabuf *dmabuf)
-{
- struct apci3120_private *devpriv = dev->private;
-
- /* AMCC - enable transfer count and reset A2P FIFO */
- outl(AGCSTS_TC_ENABLE | AGCSTS_RESET_A2P_FIFO,
- devpriv->amcc + AMCC_OP_REG_AGCSTS);
-
- /* Add-On - enable transfer count and reset A2P FIFO */
- apci3120_addon_write(dev, AGCSTS_TC_ENABLE | AGCSTS_RESET_A2P_FIFO,
- AMCC_OP_REG_AGCSTS);
-
- /* AMCC - enable transfers and reset A2P flags */
- outl(RESET_A2P_FLAGS | EN_A2P_TRANSFERS,
- devpriv->amcc + AMCC_OP_REG_MCSR);
-
- /* Add-On - DMA start address */
- apci3120_addon_write(dev, dmabuf->hw, AMCC_OP_REG_AMWAR);
-
- /* Add-On - Number of acquisitions */
- apci3120_addon_write(dev, dmabuf->use_size, AMCC_OP_REG_AMWTC);
-
- /* AMCC - enable write complete (DMA) and set FIFO advance */
- outl(APCI3120_FIFO_ADVANCE_ON_BYTE_2 | AINT_WRITE_COMPL,
- devpriv->amcc + AMCC_OP_REG_INTCSR);
-
- /* Add-On - enable DMA */
- outw(APCI3120_ADDON_CTRL_AMWEN_ENA | APCI3120_ADDON_CTRL_A2P_FIFO_ENA,
- devpriv->addon + APCI3120_ADDON_CTRL_REG);
-}
-
-static void apci3120_setup_dma(struct comedi_device *dev,
- struct comedi_subdevice *s)
-{
- struct apci3120_private *devpriv = dev->private;
- struct comedi_cmd *cmd = &s->async->cmd;
- struct apci3120_dmabuf *dmabuf0 = &devpriv->dmabuf[0];
- struct apci3120_dmabuf *dmabuf1 = &devpriv->dmabuf[1];
- unsigned int dmalen0 = dmabuf0->size;
- unsigned int dmalen1 = dmabuf1->size;
- unsigned int scan_bytes;
-
- scan_bytes = comedi_samples_to_bytes(s, cmd->scan_end_arg);
-
- if (cmd->stop_src == TRIG_COUNT) {
- /*
- * Must we fill full first buffer? And must we fill
- * full second buffer when first is once filled?
- */
- if (dmalen0 > (cmd->stop_arg * scan_bytes))
- dmalen0 = cmd->stop_arg * scan_bytes;
- else if (dmalen1 > (cmd->stop_arg * scan_bytes - dmalen0))
- dmalen1 = cmd->stop_arg * scan_bytes - dmalen0;
- }
-
- if (cmd->flags & CMDF_WAKE_EOS) {
- /* don't we want wake up every scan? */
- if (dmalen0 > scan_bytes) {
- dmalen0 = scan_bytes;
- if (cmd->scan_end_arg & 1)
- dmalen0 += 2;
- }
- if (dmalen1 > scan_bytes) {
- dmalen1 = scan_bytes;
- if (cmd->scan_end_arg & 1)
- dmalen1 -= 2;
- if (dmalen1 < 4)
- dmalen1 = 4;
- }
- } else {
- /* isn't output buff smaller that our DMA buff? */
- if (dmalen0 > s->async->prealloc_bufsz)
- dmalen0 = s->async->prealloc_bufsz;
- if (dmalen1 > s->async->prealloc_bufsz)
- dmalen1 = s->async->prealloc_bufsz;
- }
- dmabuf0->use_size = dmalen0;
- dmabuf1->use_size = dmalen1;
-
- apci3120_init_dma(dev, dmabuf0);
-}
-
-/*
- * There are three timers on the board. They all use the same base
- * clock with a fixed prescaler for each timer. The base clock used
- * depends on the board version and type.
- *
- * APCI-3120 Rev A boards OSC = 14.29MHz base clock (~70ns)
- * APCI-3120 Rev B boards OSC = 20MHz base clock (50ns)
- * APCI-3001 boards OSC = 20MHz base clock (50ns)
- *
- * The prescalers for each timer are:
- * Timer 0 CLK = OSC/10
- * Timer 1 CLK = OSC/1000
- * Timer 2 CLK = OSC/1000
- */
-static unsigned int apci3120_ns_to_timer(struct comedi_device *dev,
- unsigned int timer,
- unsigned int ns,
- unsigned int flags)
-{
- struct apci3120_private *devpriv = dev->private;
- unsigned int prescale = (timer == 0) ? 10 : 1000;
- unsigned int timer_base = devpriv->osc_base * prescale;
- unsigned int divisor;
-
- switch (flags & CMDF_ROUND_MASK) {
- case CMDF_ROUND_UP:
- divisor = DIV_ROUND_UP(ns, timer_base);
- break;
- case CMDF_ROUND_DOWN:
- divisor = ns / timer_base;
- break;
- case CMDF_ROUND_NEAREST:
- default:
- divisor = DIV_ROUND_CLOSEST(ns, timer_base);
- break;
- }
-
- if (timer == 2) {
- /* timer 2 is 24-bits */
- if (divisor > 0x00ffffff)
- divisor = 0x00ffffff;
- } else {
- /* timers 0 and 1 are 16-bits */
- if (divisor > 0xffff)
- divisor = 0xffff;
- }
- /* the timers require a minimum divisor of 2 */
- if (divisor < 2)
- divisor = 2;
-
- return divisor;
-}
-
-static void apci3120_clr_timer2_interrupt(struct comedi_device *dev)
-{
- /* a dummy read of APCI3120_CTR0_REG clears the timer 2 interrupt */
- inb(dev->iobase + APCI3120_CTR0_REG);
-}
-
-static void apci3120_timer_write(struct comedi_device *dev,
- unsigned int timer, unsigned int val)
-{
- struct apci3120_private *devpriv = dev->private;
-
- /* write 16-bit value to timer (lower 16-bits of timer 2) */
- outb(APCI3120_CTR0_DO_BITS(devpriv->do_bits) |
- APCI3120_CTR0_TIMER_SEL(timer),
- dev->iobase + APCI3120_CTR0_REG);
- outw(val & 0xffff, dev->iobase + APCI3120_TIMER_REG);
-
- if (timer == 2) {
- /* write upper 16-bits to timer 2 */
- outb(APCI3120_CTR0_DO_BITS(devpriv->do_bits) |
- APCI3120_CTR0_TIMER_SEL(timer + 1),
- dev->iobase + APCI3120_CTR0_REG);
- outw((val >> 16) & 0xffff, dev->iobase + APCI3120_TIMER_REG);
- }
-}
-
-static unsigned int apci3120_timer_read(struct comedi_device *dev,
- unsigned int timer)
-{
- struct apci3120_private *devpriv = dev->private;
- unsigned int val;
-
- /* read 16-bit value from timer (lower 16-bits of timer 2) */
- outb(APCI3120_CTR0_DO_BITS(devpriv->do_bits) |
- APCI3120_CTR0_TIMER_SEL(timer),
- dev->iobase + APCI3120_CTR0_REG);
- val = inw(dev->iobase + APCI3120_TIMER_REG);
-
- if (timer == 2) {
- /* read upper 16-bits from timer 2 */
- outb(APCI3120_CTR0_DO_BITS(devpriv->do_bits) |
- APCI3120_CTR0_TIMER_SEL(timer + 1),
- dev->iobase + APCI3120_CTR0_REG);
- val |= (inw(dev->iobase + APCI3120_TIMER_REG) << 16);
- }
-
- return val;
-}
-
-static void apci3120_timer_set_mode(struct comedi_device *dev,
- unsigned int timer, unsigned int mode)
-{
- struct apci3120_private *devpriv = dev->private;
-
- devpriv->timer_mode &= ~APCI3120_TIMER_MODE_MASK(timer);
- devpriv->timer_mode |= APCI3120_TIMER_MODE(timer, mode);
- outb(devpriv->timer_mode, dev->iobase + APCI3120_TIMER_MODE_REG);
-}
-
-static void apci3120_timer_enable(struct comedi_device *dev,
- unsigned int timer, bool enable)
-{
- struct apci3120_private *devpriv = dev->private;
-
- if (enable)
- devpriv->ctrl |= APCI3120_CTRL_GATE(timer);
- else
- devpriv->ctrl &= ~APCI3120_CTRL_GATE(timer);
- outw(devpriv->ctrl, dev->iobase + APCI3120_CTRL_REG);
-}
-
-static void apci3120_exttrig_enable(struct comedi_device *dev, bool enable)
-{
- struct apci3120_private *devpriv = dev->private;
-
- if (enable)
- devpriv->ctrl |= APCI3120_CTRL_EXT_TRIG;
- else
- devpriv->ctrl &= ~APCI3120_CTRL_EXT_TRIG;
- outw(devpriv->ctrl, dev->iobase + APCI3120_CTRL_REG);
-}
-
-static void apci3120_set_chanlist(struct comedi_device *dev,
- struct comedi_subdevice *s,
- int n_chan, unsigned int *chanlist)
-{
- struct apci3120_private *devpriv = dev->private;
- int i;
-
- /* set chanlist for scan */
- for (i = 0; i < n_chan; i++) {
- unsigned int chan = CR_CHAN(chanlist[i]);
- unsigned int range = CR_RANGE(chanlist[i]);
- unsigned int val;
-
- val = APCI3120_CHANLIST_MUX(chan) |
- APCI3120_CHANLIST_GAIN(range) |
- APCI3120_CHANLIST_INDEX(i);
-
- if (comedi_range_is_unipolar(s, range))
- val |= APCI3120_CHANLIST_UNIPOLAR;
-
- outw(val, dev->iobase + APCI3120_CHANLIST_REG);
- }
-
- /* a dummy read of APCI3120_TIMER_MODE_REG resets the ai FIFO */
- inw(dev->iobase + APCI3120_TIMER_MODE_REG);
-
- /* set scan length (PR) and scan start (PA) */
- devpriv->ctrl = APCI3120_CTRL_PR(n_chan - 1) | APCI3120_CTRL_PA(0);
- outw(devpriv->ctrl, dev->iobase + APCI3120_CTRL_REG);
-
- /* enable chanlist scanning if necessary */
- if (n_chan > 1)
- devpriv->mode |= APCI3120_MODE_SCAN_ENA;
-}
-
-static void apci3120_interrupt_dma(struct comedi_device *dev,
- struct comedi_subdevice *s)
-{
- struct apci3120_private *devpriv = dev->private;
- struct comedi_async *async = s->async;
- struct comedi_cmd *cmd = &async->cmd;
- struct apci3120_dmabuf *dmabuf;
- unsigned int nbytes;
- unsigned int nsamples;
-
- dmabuf = &devpriv->dmabuf[devpriv->cur_dmabuf];
-
- nbytes = dmabuf->use_size - inl(devpriv->amcc + AMCC_OP_REG_MWTC);
-
- if (nbytes < dmabuf->use_size)
- dev_err(dev->class_dev, "Interrupted DMA transfer!\n");
- if (nbytes & 1) {
- dev_err(dev->class_dev, "Odd count of bytes in DMA ring!\n");
- async->events |= COMEDI_CB_ERROR;
- return;
- }
-
- nsamples = comedi_bytes_to_samples(s, nbytes);
- if (nsamples) {
- comedi_buf_write_samples(s, dmabuf->virt, nsamples);
-
- if (!(cmd->flags & CMDF_WAKE_EOS))
- async->events |= COMEDI_CB_EOS;
- }
-
- if ((async->events & COMEDI_CB_CANCEL_MASK) ||
- (cmd->stop_src == TRIG_COUNT && async->scans_done >= cmd->stop_arg))
- return;
-
- if (devpriv->use_double_buffer) {
- /* switch DMA buffers for next interrupt */
- devpriv->cur_dmabuf = !devpriv->cur_dmabuf;
- dmabuf = &devpriv->dmabuf[devpriv->cur_dmabuf];
- apci3120_init_dma(dev, dmabuf);
- } else {
- /* restart DMA if not using double buffering */
- apci3120_init_dma(dev, dmabuf);
- }
-}
-
-static irqreturn_t apci3120_interrupt(int irq, void *d)
-{
- struct comedi_device *dev = d;
- struct apci3120_private *devpriv = dev->private;
- struct comedi_subdevice *s = dev->read_subdev;
- struct comedi_async *async = s->async;
- struct comedi_cmd *cmd = &async->cmd;
- unsigned int status;
- unsigned int int_amcc;
-
- status = inw(dev->iobase + APCI3120_STATUS_REG);
- int_amcc = inl(devpriv->amcc + AMCC_OP_REG_INTCSR);
-
- if (!(status & APCI3120_STATUS_INT_MASK) &&
- !(int_amcc & ANY_S593X_INT)) {
- dev_err(dev->class_dev, "IRQ from unknown source\n");
- return IRQ_NONE;
- }
-
- outl(int_amcc | AINT_INT_MASK, devpriv->amcc + AMCC_OP_REG_INTCSR);
-
- if (devpriv->ctrl & APCI3120_CTRL_EXT_TRIG)
- apci3120_exttrig_enable(dev, false);
-
- if (int_amcc & MASTER_ABORT_INT)
- dev_err(dev->class_dev, "AMCC IRQ - MASTER DMA ABORT!\n");
- if (int_amcc & TARGET_ABORT_INT)
- dev_err(dev->class_dev, "AMCC IRQ - TARGET DMA ABORT!\n");
-
- if ((status & APCI3120_STATUS_EOS_INT) &&
- (devpriv->mode & APCI3120_MODE_EOS_IRQ_ENA)) {
- unsigned short val;
- int i;
-
- for (i = 0; i < cmd->chanlist_len; i++) {
- val = inw(dev->iobase + APCI3120_AI_FIFO_REG);
- comedi_buf_write_samples(s, &val, 1);
- }
-
- devpriv->mode |= APCI3120_MODE_EOS_IRQ_ENA;
- outb(devpriv->mode, dev->iobase + APCI3120_MODE_REG);
- }
-
- if (status & APCI3120_STATUS_TIMER2_INT) {
- /*
- * for safety...
- * timer2 interrupts are not enabled in the driver
- */
- apci3120_clr_timer2_interrupt(dev);
- }
-
- if (status & APCI3120_STATUS_AMCC_INT) {
- /* AMCC- Clear write complete interrupt (DMA) */
- outl(AINT_WT_COMPLETE, devpriv->amcc + AMCC_OP_REG_INTCSR);
-
- /* do some data transfer */
- apci3120_interrupt_dma(dev, s);
- }
-
- if (cmd->stop_src == TRIG_COUNT && async->scans_done >= cmd->stop_arg)
- async->events |= COMEDI_CB_EOA;
-
- comedi_handle_events(dev, s);
-
- return IRQ_HANDLED;
-}
-
-static int apci3120_ai_cmd(struct comedi_device *dev,
- struct comedi_subdevice *s)
-{
- struct apci3120_private *devpriv = dev->private;
- struct comedi_cmd *cmd = &s->async->cmd;
- unsigned int divisor;
-
- /* set default mode bits */
- devpriv->mode = APCI3120_MODE_TIMER2_CLK_OSC |
- APCI3120_MODE_TIMER2_AS_TIMER;
-
- /* AMCC- Clear write complete interrupt (DMA) */
- outl(AINT_WT_COMPLETE, devpriv->amcc + AMCC_OP_REG_INTCSR);
-
- devpriv->cur_dmabuf = 0;
-
- /* load chanlist for command scan */
- apci3120_set_chanlist(dev, s, cmd->chanlist_len, cmd->chanlist);
-
- if (cmd->start_src == TRIG_EXT)
- apci3120_exttrig_enable(dev, true);
-
- if (cmd->scan_begin_src == TRIG_TIMER) {
- /*
- * Timer 1 is used in MODE2 (rate generator) to set the
- * start time for each scan.
- */
- divisor = apci3120_ns_to_timer(dev, 1, cmd->scan_begin_arg,
- cmd->flags);
- apci3120_timer_set_mode(dev, 1, APCI3120_TIMER_MODE2);
- apci3120_timer_write(dev, 1, divisor);
- }
-
- /*
- * Timer 0 is used in MODE2 (rate generator) to set the conversion
- * time for each acquisition.
- */
- divisor = apci3120_ns_to_timer(dev, 0, cmd->convert_arg, cmd->flags);
- apci3120_timer_set_mode(dev, 0, APCI3120_TIMER_MODE2);
- apci3120_timer_write(dev, 0, divisor);
-
- if (devpriv->use_dma)
- apci3120_setup_dma(dev, s);
- else
- devpriv->mode |= APCI3120_MODE_EOS_IRQ_ENA;
-
- /* set mode to enable acquisition */
- outb(devpriv->mode, dev->iobase + APCI3120_MODE_REG);
-
- if (cmd->scan_begin_src == TRIG_TIMER)
- apci3120_timer_enable(dev, 1, true);
- apci3120_timer_enable(dev, 0, true);
-
- return 0;
-}
-
-static int apci3120_ai_cmdtest(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_cmd *cmd)
-{
- unsigned int arg;
- int err = 0;
-
- /* Step 1 : check if triggers are trivially valid */
-
- err |= comedi_check_trigger_src(&cmd->start_src, TRIG_NOW | TRIG_EXT);
- err |= comedi_check_trigger_src(&cmd->scan_begin_src,
- TRIG_TIMER | TRIG_FOLLOW);
- err |= comedi_check_trigger_src(&cmd->convert_src, TRIG_TIMER);
- err |= comedi_check_trigger_src(&cmd->scan_end_src, TRIG_COUNT);
- err |= comedi_check_trigger_src(&cmd->stop_src, TRIG_COUNT | TRIG_NONE);
-
- if (err)
- return 1;
-
- /* Step 2a : make sure trigger sources are unique */
-
- err |= comedi_check_trigger_is_unique(cmd->start_src);
- err |= comedi_check_trigger_is_unique(cmd->scan_begin_src);
- err |= comedi_check_trigger_is_unique(cmd->stop_src);
-
- /* Step 2b : and mutually compatible */
-
- if (err)
- return 2;
-
- /* Step 3: check if arguments are trivially valid */
-
- err |= comedi_check_trigger_arg_is(&cmd->start_arg, 0);
-
- if (cmd->scan_begin_src == TRIG_TIMER) { /* Test Delay timing */
- err |= comedi_check_trigger_arg_min(&cmd->scan_begin_arg,
- 100000);
- }
-
- /* minimum conversion time per sample is 10us */
- err |= comedi_check_trigger_arg_min(&cmd->convert_arg, 10000);
-
- err |= comedi_check_trigger_arg_min(&cmd->chanlist_len, 1);
- err |= comedi_check_trigger_arg_is(&cmd->scan_end_arg,
- cmd->chanlist_len);
-
- if (cmd->stop_src == TRIG_COUNT)
- err |= comedi_check_trigger_arg_min(&cmd->stop_arg, 1);
- else /* TRIG_NONE */
- err |= comedi_check_trigger_arg_is(&cmd->stop_arg, 0);
-
- if (err)
- return 3;
-
- /* Step 4: fix up any arguments */
-
- if (cmd->scan_begin_src == TRIG_TIMER) {
- /* scan begin must be larger than the scan time */
- arg = cmd->convert_arg * cmd->scan_end_arg;
- err |= comedi_check_trigger_arg_min(&cmd->scan_begin_arg, arg);
- }
-
- if (err)
- return 4;
-
- /* Step 5: check channel list if it exists */
-
- return 0;
-}
-
-static int apci3120_cancel(struct comedi_device *dev,
- struct comedi_subdevice *s)
-{
- struct apci3120_private *devpriv = dev->private;
-
- /* Add-On - disable DMA */
- outw(0, devpriv->addon + 4);
-
- /* Add-On - disable bus master */
- apci3120_addon_write(dev, 0, AMCC_OP_REG_AGCSTS);
-
- /* AMCC - disable bus master */
- outl(0, devpriv->amcc + AMCC_OP_REG_MCSR);
-
- /* disable all counters, ext trigger, and reset scan */
- devpriv->ctrl = 0;
- outw(devpriv->ctrl, dev->iobase + APCI3120_CTRL_REG);
-
- /* DISABLE_ALL_INTERRUPT */
- devpriv->mode = 0;
- outb(devpriv->mode, dev->iobase + APCI3120_MODE_REG);
-
- inw(dev->iobase + APCI3120_STATUS_REG);
- devpriv->cur_dmabuf = 0;
-
- return 0;
-}
-
-static int apci3120_ai_eoc(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned long context)
-{
- unsigned int status;
-
- status = inw(dev->iobase + APCI3120_STATUS_REG);
- if ((status & APCI3120_STATUS_EOC_INT) == 0)
- return 0;
- return -EBUSY;
-}
-
-static int apci3120_ai_insn_read(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct apci3120_private *devpriv = dev->private;
- unsigned int divisor;
- int ret;
- int i;
-
- /* set mode for A/D conversions by software trigger with timer 0 */
- devpriv->mode = APCI3120_MODE_TIMER2_CLK_OSC |
- APCI3120_MODE_TIMER2_AS_TIMER;
- outb(devpriv->mode, dev->iobase + APCI3120_MODE_REG);
-
- /* load chanlist for single channel scan */
- apci3120_set_chanlist(dev, s, 1, &insn->chanspec);
-
- /*
- * Timer 0 is used in MODE4 (software triggered strobe) to set the
- * conversion time for each acquisition. Each conversion is triggered
- * when the divisor is written to the timer, The conversion is done
- * when the EOC bit in the status register is '0'.
- */
- apci3120_timer_set_mode(dev, 0, APCI3120_TIMER_MODE4);
- apci3120_timer_enable(dev, 0, true);
-
- /* fixed conversion time of 10 us */
- divisor = apci3120_ns_to_timer(dev, 0, 10000, CMDF_ROUND_NEAREST);
-
- for (i = 0; i < insn->n; i++) {
- /* trigger conversion */
- apci3120_timer_write(dev, 0, divisor);
-
- ret = comedi_timeout(dev, s, insn, apci3120_ai_eoc, 0);
- if (ret)
- return ret;
-
- data[i] = inw(dev->iobase + APCI3120_AI_FIFO_REG);
- }
-
- return insn->n;
-}
-
-static int apci3120_ao_ready(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned long context)
-{
- unsigned int status;
-
- status = inw(dev->iobase + APCI3120_STATUS_REG);
- if (status & APCI3120_STATUS_DA_READY)
- return 0;
- return -EBUSY;
-}
-
-static int apci3120_ao_insn_write(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- unsigned int chan = CR_CHAN(insn->chanspec);
- int i;
-
- for (i = 0; i < insn->n; i++) {
- unsigned int val = data[i];
- int ret;
-
- ret = comedi_timeout(dev, s, insn, apci3120_ao_ready, 0);
- if (ret)
- return ret;
-
- outw(APCI3120_AO_MUX(chan) | APCI3120_AO_DATA(val),
- dev->iobase + APCI3120_AO_REG(chan));
-
- s->readback[chan] = val;
- }
-
- return insn->n;
-}
-
-static int apci3120_di_insn_bits(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- unsigned int status;
-
- status = inw(dev->iobase + APCI3120_STATUS_REG);
- data[1] = APCI3120_STATUS_TO_DI_BITS(status);
-
- return insn->n;
-}
-
-static int apci3120_do_insn_bits(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct apci3120_private *devpriv = dev->private;
-
- if (comedi_dio_update_state(s, data)) {
- devpriv->do_bits = s->state;
- outb(APCI3120_CTR0_DO_BITS(devpriv->do_bits),
- dev->iobase + APCI3120_CTR0_REG);
- }
-
- data[1] = s->state;
-
- return insn->n;
-}
-
-static int apci3120_timer_insn_config(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct apci3120_private *devpriv = dev->private;
- unsigned int divisor;
- unsigned int status;
- unsigned int mode;
- unsigned int timer_mode;
-
- switch (data[0]) {
- case INSN_CONFIG_ARM:
- apci3120_clr_timer2_interrupt(dev);
- divisor = apci3120_ns_to_timer(dev, 2, data[1],
- CMDF_ROUND_DOWN);
- apci3120_timer_write(dev, 2, divisor);
- apci3120_timer_enable(dev, 2, true);
- break;
-
- case INSN_CONFIG_DISARM:
- apci3120_timer_enable(dev, 2, false);
- apci3120_clr_timer2_interrupt(dev);
- break;
-
- case INSN_CONFIG_GET_COUNTER_STATUS:
- data[1] = 0;
- data[2] = COMEDI_COUNTER_ARMED | COMEDI_COUNTER_COUNTING |
- COMEDI_COUNTER_TERMINAL_COUNT;
-
- if (devpriv->ctrl & APCI3120_CTRL_GATE(2)) {
- data[1] |= COMEDI_COUNTER_ARMED;
- data[1] |= COMEDI_COUNTER_COUNTING;
- }
- status = inw(dev->iobase + APCI3120_STATUS_REG);
- if (status & APCI3120_STATUS_TIMER2_INT) {
- data[1] &= ~COMEDI_COUNTER_COUNTING;
- data[1] |= COMEDI_COUNTER_TERMINAL_COUNT;
- }
- break;
-
- case INSN_CONFIG_SET_COUNTER_MODE:
- switch (data[1]) {
- case I8254_MODE0:
- mode = APCI3120_MODE_TIMER2_AS_COUNTER;
- timer_mode = APCI3120_TIMER_MODE0;
- break;
- case I8254_MODE2:
- mode = APCI3120_MODE_TIMER2_AS_TIMER;
- timer_mode = APCI3120_TIMER_MODE2;
- break;
- case I8254_MODE4:
- mode = APCI3120_MODE_TIMER2_AS_TIMER;
- timer_mode = APCI3120_TIMER_MODE4;
- break;
- case I8254_MODE5:
- mode = APCI3120_MODE_TIMER2_AS_WDOG;
- timer_mode = APCI3120_TIMER_MODE5;
- break;
- default:
- return -EINVAL;
- }
- apci3120_timer_enable(dev, 2, false);
- apci3120_clr_timer2_interrupt(dev);
- apci3120_timer_set_mode(dev, 2, timer_mode);
- devpriv->mode &= ~APCI3120_MODE_TIMER2_AS_MASK;
- devpriv->mode |= mode;
- outb(devpriv->mode, dev->iobase + APCI3120_MODE_REG);
- break;
-
- default:
- return -EINVAL;
- }
-
- return insn->n;
-}
-
-static int apci3120_timer_insn_read(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- int i;
-
- for (i = 0; i < insn->n; i++)
- data[i] = apci3120_timer_read(dev, 2);
-
- return insn->n;
-}
-
-static void apci3120_dma_alloc(struct comedi_device *dev)
-{
- struct apci3120_private *devpriv = dev->private;
- struct apci3120_dmabuf *dmabuf;
- int order;
- int i;
-
- for (i = 0; i < 2; i++) {
- dmabuf = &devpriv->dmabuf[i];
- for (order = 2; order >= 0; order--) {
- dmabuf->virt = dma_alloc_coherent(dev->hw_dev,
- PAGE_SIZE << order,
- &dmabuf->hw,
- GFP_KERNEL);
- if (dmabuf->virt)
- break;
- }
- if (!dmabuf->virt)
- break;
- dmabuf->size = PAGE_SIZE << order;
-
- if (i == 0)
- devpriv->use_dma = 1;
- if (i == 1)
- devpriv->use_double_buffer = 1;
- }
-}
-
-static void apci3120_dma_free(struct comedi_device *dev)
-{
- struct apci3120_private *devpriv = dev->private;
- struct apci3120_dmabuf *dmabuf;
- int i;
-
- if (!devpriv)
- return;
-
- for (i = 0; i < 2; i++) {
- dmabuf = &devpriv->dmabuf[i];
- if (dmabuf->virt) {
- dma_free_coherent(dev->hw_dev, dmabuf->size,
- dmabuf->virt, dmabuf->hw);
- }
- }
-}
-
-static void apci3120_reset(struct comedi_device *dev)
-{
- /* disable all interrupt sources */
- outb(0, dev->iobase + APCI3120_MODE_REG);
-
- /* disable all counters, ext trigger, and reset scan */
- outw(0, dev->iobase + APCI3120_CTRL_REG);
-
- /* clear interrupt status */
- inw(dev->iobase + APCI3120_STATUS_REG);
-}
-
-static int apci3120_auto_attach(struct comedi_device *dev,
- unsigned long context)
-{
- struct pci_dev *pcidev = comedi_to_pci_dev(dev);
- const struct apci3120_board *board = NULL;
- struct apci3120_private *devpriv;
- struct comedi_subdevice *s;
- unsigned int status;
- int ret;
-
- if (context < ARRAY_SIZE(apci3120_boardtypes))
- board = &apci3120_boardtypes[context];
- if (!board)
- return -ENODEV;
- dev->board_ptr = board;
- dev->board_name = board->name;
-
- devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
- if (!devpriv)
- return -ENOMEM;
-
- ret = comedi_pci_enable(dev);
- if (ret)
- return ret;
- pci_set_master(pcidev);
-
- dev->iobase = pci_resource_start(pcidev, 1);
- devpriv->amcc = pci_resource_start(pcidev, 0);
- devpriv->addon = pci_resource_start(pcidev, 2);
-
- apci3120_reset(dev);
-
- if (pcidev->irq > 0) {
- ret = request_irq(pcidev->irq, apci3120_interrupt, IRQF_SHARED,
- dev->board_name, dev);
- if (ret == 0) {
- dev->irq = pcidev->irq;
-
- apci3120_dma_alloc(dev);
- }
- }
-
- status = inw(dev->iobase + APCI3120_STATUS_REG);
- if (APCI3120_STATUS_TO_VERSION(status) == APCI3120_REVB ||
- context == BOARD_APCI3001)
- devpriv->osc_base = APCI3120_REVB_OSC_BASE;
- else
- devpriv->osc_base = APCI3120_REVA_OSC_BASE;
-
- ret = comedi_alloc_subdevices(dev, 5);
- if (ret)
- return ret;
-
- /* Analog Input subdevice */
- s = &dev->subdevices[0];
- s->type = COMEDI_SUBD_AI;
- s->subdev_flags = SDF_READABLE | SDF_COMMON | SDF_GROUND | SDF_DIFF;
- s->n_chan = 16;
- s->maxdata = board->ai_is_16bit ? 0xffff : 0x0fff;
- s->range_table = &apci3120_ai_range;
- s->insn_read = apci3120_ai_insn_read;
- if (dev->irq) {
- dev->read_subdev = s;
- s->subdev_flags |= SDF_CMD_READ;
- s->len_chanlist = s->n_chan;
- s->do_cmdtest = apci3120_ai_cmdtest;
- s->do_cmd = apci3120_ai_cmd;
- s->cancel = apci3120_cancel;
- }
-
- /* Analog Output subdevice */
- s = &dev->subdevices[1];
- if (board->has_ao) {
- s->type = COMEDI_SUBD_AO;
- s->subdev_flags = SDF_WRITABLE | SDF_GROUND | SDF_COMMON;
- s->n_chan = 8;
- s->maxdata = 0x3fff;
- s->range_table = &range_bipolar10;
- s->insn_write = apci3120_ao_insn_write;
-
- ret = comedi_alloc_subdev_readback(s);
- if (ret)
- return ret;
- } else {
- s->type = COMEDI_SUBD_UNUSED;
- }
-
- /* Digital Input subdevice */
- s = &dev->subdevices[2];
- s->type = COMEDI_SUBD_DI;
- s->subdev_flags = SDF_READABLE;
- s->n_chan = 4;
- s->maxdata = 1;
- s->range_table = &range_digital;
- s->insn_bits = apci3120_di_insn_bits;
-
- /* Digital Output subdevice */
- s = &dev->subdevices[3];
- s->type = COMEDI_SUBD_DO;
- s->subdev_flags = SDF_WRITABLE;
- s->n_chan = 4;
- s->maxdata = 1;
- s->range_table = &range_digital;
- s->insn_bits = apci3120_do_insn_bits;
-
- /* Timer subdevice */
- s = &dev->subdevices[4];
- s->type = COMEDI_SUBD_TIMER;
- s->subdev_flags = SDF_READABLE;
- s->n_chan = 1;
- s->maxdata = 0x00ffffff;
- s->insn_config = apci3120_timer_insn_config;
- s->insn_read = apci3120_timer_insn_read;
-
- return 0;
-}
-
-static void apci3120_detach(struct comedi_device *dev)
-{
- comedi_pci_detach(dev);
- apci3120_dma_free(dev);
-}
-
-static struct comedi_driver apci3120_driver = {
- .driver_name = "addi_apci_3120",
- .module = THIS_MODULE,
- .auto_attach = apci3120_auto_attach,
- .detach = apci3120_detach,
-};
-
-static int apci3120_pci_probe(struct pci_dev *dev,
- const struct pci_device_id *id)
-{
- return comedi_pci_auto_config(dev, &apci3120_driver, id->driver_data);
-}
-
-static const struct pci_device_id apci3120_pci_table[] = {
- { PCI_VDEVICE(AMCC, 0x818d), BOARD_APCI3120 },
- { PCI_VDEVICE(AMCC, 0x828d), BOARD_APCI3001 },
- { 0 }
-};
-MODULE_DEVICE_TABLE(pci, apci3120_pci_table);
-
-static struct pci_driver apci3120_pci_driver = {
- .name = "addi_apci_3120",
- .id_table = apci3120_pci_table,
- .probe = apci3120_pci_probe,
- .remove = comedi_pci_auto_unconfig,
-};
-module_comedi_pci_driver(apci3120_driver, apci3120_pci_driver);
-
-MODULE_AUTHOR("Comedi http://www.comedi.org");
-MODULE_DESCRIPTION("ADDI-DATA APCI-3120, Analog input board");
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/addi_apci_3501.c b/drivers/staging/comedi/drivers/addi_apci_3501.c
deleted file mode 100644
index 40ff91411139..000000000000
--- a/drivers/staging/comedi/drivers/addi_apci_3501.c
+++ /dev/null
@@ -1,446 +0,0 @@
-/*
- * addi_apci_3501.c
- * Copyright (C) 2004,2005 ADDI-DATA GmbH for the source code of this module.
- * Project manager: Eric Stolz
- *
- * ADDI-DATA GmbH
- * Dieselstrasse 3
- * D-77833 Ottersweier
- * Tel: +19(0)7223/9493-0
- * Fax: +49(0)7223/9493-92
- * http://www.addi-data.com
- * info@addi-data.com
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- */
-
-#include <linux/module.h>
-#include <linux/interrupt.h>
-#include <linux/sched.h>
-
-#include "../comedi_pci.h"
-#include "addi_tcw.h"
-#include "amcc_s5933.h"
-
-/*
- * PCI bar 1 register I/O map
- */
-#define APCI3501_AO_CTRL_STATUS_REG 0x00
-#define APCI3501_AO_CTRL_BIPOLAR BIT(0)
-#define APCI3501_AO_STATUS_READY BIT(8)
-#define APCI3501_AO_DATA_REG 0x04
-#define APCI3501_AO_DATA_CHAN(x) ((x) << 0)
-#define APCI3501_AO_DATA_VAL(x) ((x) << 8)
-#define APCI3501_AO_DATA_BIPOLAR BIT(31)
-#define APCI3501_AO_TRIG_SCS_REG 0x08
-#define APCI3501_TIMER_BASE 0x20
-#define APCI3501_DO_REG 0x40
-#define APCI3501_DI_REG 0x50
-
-/*
- * AMCC S5933 NVRAM
- */
-#define NVRAM_USER_DATA_START 0x100
-
-#define NVCMD_BEGIN_READ (0x7 << 5)
-#define NVCMD_LOAD_LOW (0x4 << 5)
-#define NVCMD_LOAD_HIGH (0x5 << 5)
-
-/*
- * Function types stored in the eeprom
- */
-#define EEPROM_DIGITALINPUT 0
-#define EEPROM_DIGITALOUTPUT 1
-#define EEPROM_ANALOGINPUT 2
-#define EEPROM_ANALOGOUTPUT 3
-#define EEPROM_TIMER 4
-#define EEPROM_WATCHDOG 5
-#define EEPROM_TIMER_WATCHDOG_COUNTER 10
-
-struct apci3501_private {
- unsigned long amcc;
- unsigned long tcw;
- struct task_struct *tsk_Current;
- unsigned char timer_mode;
-};
-
-static struct comedi_lrange apci3501_ao_range = {
- 2, {
- BIP_RANGE(10),
- UNI_RANGE(10)
- }
-};
-
-static int apci3501_wait_for_dac(struct comedi_device *dev)
-{
- unsigned int status;
-
- do {
- status = inl(dev->iobase + APCI3501_AO_CTRL_STATUS_REG);
- } while (!(status & APCI3501_AO_STATUS_READY));
-
- return 0;
-}
-
-static int apci3501_ao_insn_write(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- unsigned int chan = CR_CHAN(insn->chanspec);
- unsigned int range = CR_RANGE(insn->chanspec);
- unsigned int cfg = APCI3501_AO_DATA_CHAN(chan);
- int ret;
- int i;
-
- /*
- * All analog output channels have the same output range.
- * 14-bit bipolar: 0-10V
- * 13-bit unipolar: +/-10V
- * Changing the range of one channel changes all of them!
- */
- if (range) {
- outl(0, dev->iobase + APCI3501_AO_CTRL_STATUS_REG);
- } else {
- cfg |= APCI3501_AO_DATA_BIPOLAR;
- outl(APCI3501_AO_CTRL_BIPOLAR,
- dev->iobase + APCI3501_AO_CTRL_STATUS_REG);
- }
-
- for (i = 0; i < insn->n; i++) {
- unsigned int val = data[i];
-
- if (range == 1) {
- if (data[i] > 0x1fff) {
- dev_err(dev->class_dev,
- "Unipolar resolution is only 13-bits\n");
- return -EINVAL;
- }
- }
-
- ret = apci3501_wait_for_dac(dev);
- if (ret)
- return ret;
-
- outl(cfg | APCI3501_AO_DATA_VAL(val),
- dev->iobase + APCI3501_AO_DATA_REG);
-
- s->readback[chan] = val;
- }
-
- return insn->n;
-}
-
-#include "addi-data/hwdrv_apci3501.c"
-
-static int apci3501_di_insn_bits(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- data[1] = inl(dev->iobase + APCI3501_DI_REG) & 0x3;
-
- return insn->n;
-}
-
-static int apci3501_do_insn_bits(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- s->state = inl(dev->iobase + APCI3501_DO_REG);
-
- if (comedi_dio_update_state(s, data))
- outl(s->state, dev->iobase + APCI3501_DO_REG);
-
- data[1] = s->state;
-
- return insn->n;
-}
-
-static void apci3501_eeprom_wait(unsigned long iobase)
-{
- unsigned char val;
-
- do {
- val = inb(iobase + AMCC_OP_REG_MCSR_NVCMD);
- } while (val & 0x80);
-}
-
-static unsigned short apci3501_eeprom_readw(unsigned long iobase,
- unsigned short addr)
-{
- unsigned short val = 0;
- unsigned char tmp;
- unsigned char i;
-
- /* Add the offset to the start of the user data */
- addr += NVRAM_USER_DATA_START;
-
- for (i = 0; i < 2; i++) {
- /* Load the low 8 bit address */
- outb(NVCMD_LOAD_LOW, iobase + AMCC_OP_REG_MCSR_NVCMD);
- apci3501_eeprom_wait(iobase);
- outb((addr + i) & 0xff, iobase + AMCC_OP_REG_MCSR_NVDATA);
- apci3501_eeprom_wait(iobase);
-
- /* Load the high 8 bit address */
- outb(NVCMD_LOAD_HIGH, iobase + AMCC_OP_REG_MCSR_NVCMD);
- apci3501_eeprom_wait(iobase);
- outb(((addr + i) >> 8) & 0xff,
- iobase + AMCC_OP_REG_MCSR_NVDATA);
- apci3501_eeprom_wait(iobase);
-
- /* Read the eeprom data byte */
- outb(NVCMD_BEGIN_READ, iobase + AMCC_OP_REG_MCSR_NVCMD);
- apci3501_eeprom_wait(iobase);
- tmp = inb(iobase + AMCC_OP_REG_MCSR_NVDATA);
- apci3501_eeprom_wait(iobase);
-
- if (i == 0)
- val |= tmp;
- else
- val |= (tmp << 8);
- }
-
- return val;
-}
-
-static int apci3501_eeprom_get_ao_n_chan(struct comedi_device *dev)
-{
- struct apci3501_private *devpriv = dev->private;
- unsigned char nfuncs;
- int i;
-
- nfuncs = apci3501_eeprom_readw(devpriv->amcc, 10) & 0xff;
-
- /* Read functionality details */
- for (i = 0; i < nfuncs; i++) {
- unsigned short offset = i * 4;
- unsigned short addr;
- unsigned char func;
- unsigned short val;
-
- func = apci3501_eeprom_readw(devpriv->amcc, 12 + offset) & 0x3f;
- addr = apci3501_eeprom_readw(devpriv->amcc, 14 + offset);
-
- if (func == EEPROM_ANALOGOUTPUT) {
- val = apci3501_eeprom_readw(devpriv->amcc, addr + 10);
- return (val >> 4) & 0x3ff;
- }
- }
- return 0;
-}
-
-static int apci3501_eeprom_insn_read(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct apci3501_private *devpriv = dev->private;
- unsigned short addr = CR_CHAN(insn->chanspec);
-
- data[0] = apci3501_eeprom_readw(devpriv->amcc, 2 * addr);
-
- return insn->n;
-}
-
-static irqreturn_t apci3501_interrupt(int irq, void *d)
-{
- struct comedi_device *dev = d;
- struct apci3501_private *devpriv = dev->private;
- unsigned int status;
- unsigned int ctrl;
-
- /* Disable Interrupt */
- ctrl = inl(devpriv->tcw + ADDI_TCW_CTRL_REG);
- ctrl &= ~(ADDI_TCW_CTRL_GATE | ADDI_TCW_CTRL_TRIG |
- ADDI_TCW_CTRL_IRQ_ENA);
- outl(ctrl, devpriv->tcw + ADDI_TCW_CTRL_REG);
-
- status = inl(devpriv->tcw + ADDI_TCW_IRQ_REG);
- if (!(status & ADDI_TCW_IRQ)) {
- dev_err(dev->class_dev, "IRQ from unknown source\n");
- return IRQ_NONE;
- }
-
- /* Enable Interrupt Send a signal to from kernel to user space */
- send_sig(SIGIO, devpriv->tsk_Current, 0);
- ctrl = inl(devpriv->tcw + ADDI_TCW_CTRL_REG);
- ctrl &= ~(ADDI_TCW_CTRL_GATE | ADDI_TCW_CTRL_TRIG |
- ADDI_TCW_CTRL_IRQ_ENA);
- ctrl |= ADDI_TCW_CTRL_IRQ_ENA;
- outl(ctrl, devpriv->tcw + ADDI_TCW_CTRL_REG);
- inl(devpriv->tcw + ADDI_TCW_STATUS_REG);
-
- return IRQ_HANDLED;
-}
-
-static int apci3501_reset(struct comedi_device *dev)
-{
- unsigned int val;
- int chan;
- int ret;
-
- /* Reset all digital outputs to "0" */
- outl(0x0, dev->iobase + APCI3501_DO_REG);
-
- /* Default all analog outputs to 0V (bipolar) */
- outl(APCI3501_AO_CTRL_BIPOLAR,
- dev->iobase + APCI3501_AO_CTRL_STATUS_REG);
- val = APCI3501_AO_DATA_BIPOLAR | APCI3501_AO_DATA_VAL(0);
-
- /* Set all analog output channels */
- for (chan = 0; chan < 8; chan++) {
- ret = apci3501_wait_for_dac(dev);
- if (ret) {
- dev_warn(dev->class_dev,
- "%s: DAC not-ready for channel %i\n",
- __func__, chan);
- } else {
- outl(val | APCI3501_AO_DATA_CHAN(chan),
- dev->iobase + APCI3501_AO_DATA_REG);
- }
- }
-
- return 0;
-}
-
-static int apci3501_auto_attach(struct comedi_device *dev,
- unsigned long context_unused)
-{
- struct pci_dev *pcidev = comedi_to_pci_dev(dev);
- struct apci3501_private *devpriv;
- struct comedi_subdevice *s;
- int ao_n_chan;
- int ret;
-
- devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
- if (!devpriv)
- return -ENOMEM;
-
- ret = comedi_pci_enable(dev);
- if (ret)
- return ret;
-
- devpriv->amcc = pci_resource_start(pcidev, 0);
- dev->iobase = pci_resource_start(pcidev, 1);
- devpriv->tcw = dev->iobase + APCI3501_TIMER_BASE;
-
- ao_n_chan = apci3501_eeprom_get_ao_n_chan(dev);
-
- if (pcidev->irq > 0) {
- ret = request_irq(pcidev->irq, apci3501_interrupt, IRQF_SHARED,
- dev->board_name, dev);
- if (ret == 0)
- dev->irq = pcidev->irq;
- }
-
- ret = comedi_alloc_subdevices(dev, 5);
- if (ret)
- return ret;
-
- /* Initialize the analog output subdevice */
- s = &dev->subdevices[0];
- if (ao_n_chan) {
- s->type = COMEDI_SUBD_AO;
- s->subdev_flags = SDF_WRITABLE | SDF_GROUND | SDF_COMMON;
- s->n_chan = ao_n_chan;
- s->maxdata = 0x3fff;
- s->range_table = &apci3501_ao_range;
- s->insn_write = apci3501_ao_insn_write;
-
- ret = comedi_alloc_subdev_readback(s);
- if (ret)
- return ret;
- } else {
- s->type = COMEDI_SUBD_UNUSED;
- }
-
- /* Initialize the digital input subdevice */
- s = &dev->subdevices[1];
- s->type = COMEDI_SUBD_DI;
- s->subdev_flags = SDF_READABLE;
- s->n_chan = 2;
- s->maxdata = 1;
- s->range_table = &range_digital;
- s->insn_bits = apci3501_di_insn_bits;
-
- /* Initialize the digital output subdevice */
- s = &dev->subdevices[2];
- s->type = COMEDI_SUBD_DO;
- s->subdev_flags = SDF_WRITABLE;
- s->n_chan = 2;
- s->maxdata = 1;
- s->range_table = &range_digital;
- s->insn_bits = apci3501_do_insn_bits;
-
- /* Initialize the timer/watchdog subdevice */
- s = &dev->subdevices[3];
- s->type = COMEDI_SUBD_TIMER;
- s->subdev_flags = SDF_WRITABLE;
- s->n_chan = 1;
- s->maxdata = 0;
- s->len_chanlist = 1;
- s->range_table = &range_digital;
- s->insn_write = apci3501_write_insn_timer;
- s->insn_read = apci3501_read_insn_timer;
- s->insn_config = apci3501_config_insn_timer;
-
- /* Initialize the eeprom subdevice */
- s = &dev->subdevices[4];
- s->type = COMEDI_SUBD_MEMORY;
- s->subdev_flags = SDF_READABLE | SDF_INTERNAL;
- s->n_chan = 256;
- s->maxdata = 0xffff;
- s->insn_read = apci3501_eeprom_insn_read;
-
- apci3501_reset(dev);
- return 0;
-}
-
-static void apci3501_detach(struct comedi_device *dev)
-{
- if (dev->iobase)
- apci3501_reset(dev);
- comedi_pci_detach(dev);
-}
-
-static struct comedi_driver apci3501_driver = {
- .driver_name = "addi_apci_3501",
- .module = THIS_MODULE,
- .auto_attach = apci3501_auto_attach,
- .detach = apci3501_detach,
-};
-
-static int apci3501_pci_probe(struct pci_dev *dev,
- const struct pci_device_id *id)
-{
- return comedi_pci_auto_config(dev, &apci3501_driver, id->driver_data);
-}
-
-static const struct pci_device_id apci3501_pci_table[] = {
- { PCI_DEVICE(PCI_VENDOR_ID_ADDIDATA, 0x3001) },
- { 0 }
-};
-MODULE_DEVICE_TABLE(pci, apci3501_pci_table);
-
-static struct pci_driver apci3501_pci_driver = {
- .name = "addi_apci_3501",
- .id_table = apci3501_pci_table,
- .probe = apci3501_pci_probe,
- .remove = comedi_pci_auto_unconfig,
-};
-module_comedi_pci_driver(apci3501_driver, apci3501_pci_driver);
-
-MODULE_DESCRIPTION("ADDI-DATA APCI-3501 Analog output board");
-MODULE_AUTHOR("Comedi http://www.comedi.org");
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/addi_apci_3xxx.c b/drivers/staging/comedi/drivers/addi_apci_3xxx.c
deleted file mode 100644
index bef6efc84efd..000000000000
--- a/drivers/staging/comedi/drivers/addi_apci_3xxx.c
+++ /dev/null
@@ -1,968 +0,0 @@
-/*
- * addi_apci_3xxx.c
- * Copyright (C) 2004,2005 ADDI-DATA GmbH for the source code of this module.
- * Project manager: S. Weber
- *
- * ADDI-DATA GmbH
- * Dieselstrasse 3
- * D-77833 Ottersweier
- * Tel: +19(0)7223/9493-0
- * Fax: +49(0)7223/9493-92
- * http://www.addi-data.com
- * info@addi-data.com
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- */
-
-#include <linux/module.h>
-#include <linux/interrupt.h>
-
-#include "../comedi_pci.h"
-
-#define CONV_UNIT_NS (1 << 0)
-#define CONV_UNIT_US (1 << 1)
-#define CONV_UNIT_MS (1 << 2)
-
-static const struct comedi_lrange apci3xxx_ai_range = {
- 8, {
- BIP_RANGE(10),
- BIP_RANGE(5),
- BIP_RANGE(2),
- BIP_RANGE(1),
- UNI_RANGE(10),
- UNI_RANGE(5),
- UNI_RANGE(2),
- UNI_RANGE(1)
- }
-};
-
-static const struct comedi_lrange apci3xxx_ao_range = {
- 2, {
- BIP_RANGE(10),
- UNI_RANGE(10)
- }
-};
-
-enum apci3xxx_boardid {
- BOARD_APCI3000_16,
- BOARD_APCI3000_8,
- BOARD_APCI3000_4,
- BOARD_APCI3006_16,
- BOARD_APCI3006_8,
- BOARD_APCI3006_4,
- BOARD_APCI3010_16,
- BOARD_APCI3010_8,
- BOARD_APCI3010_4,
- BOARD_APCI3016_16,
- BOARD_APCI3016_8,
- BOARD_APCI3016_4,
- BOARD_APCI3100_16_4,
- BOARD_APCI3100_8_4,
- BOARD_APCI3106_16_4,
- BOARD_APCI3106_8_4,
- BOARD_APCI3110_16_4,
- BOARD_APCI3110_8_4,
- BOARD_APCI3116_16_4,
- BOARD_APCI3116_8_4,
- BOARD_APCI3003,
- BOARD_APCI3002_16,
- BOARD_APCI3002_8,
- BOARD_APCI3002_4,
- BOARD_APCI3500,
-};
-
-struct apci3xxx_boardinfo {
- const char *name;
- int ai_subdev_flags;
- int ai_n_chan;
- unsigned int ai_maxdata;
- unsigned char ai_conv_units;
- unsigned int ai_min_acq_ns;
- unsigned int has_ao:1;
- unsigned int has_dig_in:1;
- unsigned int has_dig_out:1;
- unsigned int has_ttl_io:1;
-};
-
-static const struct apci3xxx_boardinfo apci3xxx_boardtypes[] = {
- [BOARD_APCI3000_16] = {
- .name = "apci3000-16",
- .ai_subdev_flags = SDF_COMMON | SDF_GROUND | SDF_DIFF,
- .ai_n_chan = 16,
- .ai_maxdata = 0x0fff,
- .ai_conv_units = CONV_UNIT_MS | CONV_UNIT_US,
- .ai_min_acq_ns = 10000,
- .has_ttl_io = 1,
- },
- [BOARD_APCI3000_8] = {
- .name = "apci3000-8",
- .ai_subdev_flags = SDF_COMMON | SDF_GROUND | SDF_DIFF,
- .ai_n_chan = 8,
- .ai_maxdata = 0x0fff,
- .ai_conv_units = CONV_UNIT_MS | CONV_UNIT_US,
- .ai_min_acq_ns = 10000,
- .has_ttl_io = 1,
- },
- [BOARD_APCI3000_4] = {
- .name = "apci3000-4",
- .ai_subdev_flags = SDF_COMMON | SDF_GROUND | SDF_DIFF,
- .ai_n_chan = 4,
- .ai_maxdata = 0x0fff,
- .ai_conv_units = CONV_UNIT_MS | CONV_UNIT_US,
- .ai_min_acq_ns = 10000,
- .has_ttl_io = 1,
- },
- [BOARD_APCI3006_16] = {
- .name = "apci3006-16",
- .ai_subdev_flags = SDF_COMMON | SDF_GROUND | SDF_DIFF,
- .ai_n_chan = 16,
- .ai_maxdata = 0xffff,
- .ai_conv_units = CONV_UNIT_MS | CONV_UNIT_US,
- .ai_min_acq_ns = 10000,
- .has_ttl_io = 1,
- },
- [BOARD_APCI3006_8] = {
- .name = "apci3006-8",
- .ai_subdev_flags = SDF_COMMON | SDF_GROUND | SDF_DIFF,
- .ai_n_chan = 8,
- .ai_maxdata = 0xffff,
- .ai_conv_units = CONV_UNIT_MS | CONV_UNIT_US,
- .ai_min_acq_ns = 10000,
- .has_ttl_io = 1,
- },
- [BOARD_APCI3006_4] = {
- .name = "apci3006-4",
- .ai_subdev_flags = SDF_COMMON | SDF_GROUND | SDF_DIFF,
- .ai_n_chan = 4,
- .ai_maxdata = 0xffff,
- .ai_conv_units = CONV_UNIT_MS | CONV_UNIT_US,
- .ai_min_acq_ns = 10000,
- .has_ttl_io = 1,
- },
- [BOARD_APCI3010_16] = {
- .name = "apci3010-16",
- .ai_subdev_flags = SDF_COMMON | SDF_GROUND | SDF_DIFF,
- .ai_n_chan = 16,
- .ai_maxdata = 0x0fff,
- .ai_conv_units = CONV_UNIT_MS | CONV_UNIT_US,
- .ai_min_acq_ns = 5000,
- .has_dig_in = 1,
- .has_dig_out = 1,
- .has_ttl_io = 1,
- },
- [BOARD_APCI3010_8] = {
- .name = "apci3010-8",
- .ai_subdev_flags = SDF_COMMON | SDF_GROUND | SDF_DIFF,
- .ai_n_chan = 8,
- .ai_maxdata = 0x0fff,
- .ai_conv_units = CONV_UNIT_MS | CONV_UNIT_US,
- .ai_min_acq_ns = 5000,
- .has_dig_in = 1,
- .has_dig_out = 1,
- .has_ttl_io = 1,
- },
- [BOARD_APCI3010_4] = {
- .name = "apci3010-4",
- .ai_subdev_flags = SDF_COMMON | SDF_GROUND | SDF_DIFF,
- .ai_n_chan = 4,
- .ai_maxdata = 0x0fff,
- .ai_conv_units = CONV_UNIT_MS | CONV_UNIT_US,
- .ai_min_acq_ns = 5000,
- .has_dig_in = 1,
- .has_dig_out = 1,
- .has_ttl_io = 1,
- },
- [BOARD_APCI3016_16] = {
- .name = "apci3016-16",
- .ai_subdev_flags = SDF_COMMON | SDF_GROUND | SDF_DIFF,
- .ai_n_chan = 16,
- .ai_maxdata = 0xffff,
- .ai_conv_units = CONV_UNIT_MS | CONV_UNIT_US,
- .ai_min_acq_ns = 5000,
- .has_dig_in = 1,
- .has_dig_out = 1,
- .has_ttl_io = 1,
- },
- [BOARD_APCI3016_8] = {
- .name = "apci3016-8",
- .ai_subdev_flags = SDF_COMMON | SDF_GROUND | SDF_DIFF,
- .ai_n_chan = 8,
- .ai_maxdata = 0xffff,
- .ai_conv_units = CONV_UNIT_MS | CONV_UNIT_US,
- .ai_min_acq_ns = 5000,
- .has_dig_in = 1,
- .has_dig_out = 1,
- .has_ttl_io = 1,
- },
- [BOARD_APCI3016_4] = {
- .name = "apci3016-4",
- .ai_subdev_flags = SDF_COMMON | SDF_GROUND | SDF_DIFF,
- .ai_n_chan = 4,
- .ai_maxdata = 0xffff,
- .ai_conv_units = CONV_UNIT_MS | CONV_UNIT_US,
- .ai_min_acq_ns = 5000,
- .has_dig_in = 1,
- .has_dig_out = 1,
- .has_ttl_io = 1,
- },
- [BOARD_APCI3100_16_4] = {
- .name = "apci3100-16-4",
- .ai_subdev_flags = SDF_COMMON | SDF_GROUND | SDF_DIFF,
- .ai_n_chan = 16,
- .ai_maxdata = 0x0fff,
- .ai_conv_units = CONV_UNIT_MS | CONV_UNIT_US,
- .ai_min_acq_ns = 10000,
- .has_ao = 1,
- .has_ttl_io = 1,
- },
- [BOARD_APCI3100_8_4] = {
- .name = "apci3100-8-4",
- .ai_subdev_flags = SDF_COMMON | SDF_GROUND | SDF_DIFF,
- .ai_n_chan = 8,
- .ai_maxdata = 0x0fff,
- .ai_conv_units = CONV_UNIT_MS | CONV_UNIT_US,
- .ai_min_acq_ns = 10000,
- .has_ao = 1,
- .has_ttl_io = 1,
- },
- [BOARD_APCI3106_16_4] = {
- .name = "apci3106-16-4",
- .ai_subdev_flags = SDF_COMMON | SDF_GROUND | SDF_DIFF,
- .ai_n_chan = 16,
- .ai_maxdata = 0xffff,
- .ai_conv_units = CONV_UNIT_MS | CONV_UNIT_US,
- .ai_min_acq_ns = 10000,
- .has_ao = 1,
- .has_ttl_io = 1,
- },
- [BOARD_APCI3106_8_4] = {
- .name = "apci3106-8-4",
- .ai_subdev_flags = SDF_COMMON | SDF_GROUND | SDF_DIFF,
- .ai_n_chan = 8,
- .ai_maxdata = 0xffff,
- .ai_conv_units = CONV_UNIT_MS | CONV_UNIT_US,
- .ai_min_acq_ns = 10000,
- .has_ao = 1,
- .has_ttl_io = 1,
- },
- [BOARD_APCI3110_16_4] = {
- .name = "apci3110-16-4",
- .ai_subdev_flags = SDF_COMMON | SDF_GROUND | SDF_DIFF,
- .ai_n_chan = 16,
- .ai_maxdata = 0x0fff,
- .ai_conv_units = CONV_UNIT_MS | CONV_UNIT_US,
- .ai_min_acq_ns = 5000,
- .has_ao = 1,
- .has_dig_in = 1,
- .has_dig_out = 1,
- .has_ttl_io = 1,
- },
- [BOARD_APCI3110_8_4] = {
- .name = "apci3110-8-4",
- .ai_subdev_flags = SDF_COMMON | SDF_GROUND | SDF_DIFF,
- .ai_n_chan = 8,
- .ai_maxdata = 0x0fff,
- .ai_conv_units = CONV_UNIT_MS | CONV_UNIT_US,
- .ai_min_acq_ns = 5000,
- .has_ao = 1,
- .has_dig_in = 1,
- .has_dig_out = 1,
- .has_ttl_io = 1,
- },
- [BOARD_APCI3116_16_4] = {
- .name = "apci3116-16-4",
- .ai_subdev_flags = SDF_COMMON | SDF_GROUND | SDF_DIFF,
- .ai_n_chan = 16,
- .ai_maxdata = 0xffff,
- .ai_conv_units = CONV_UNIT_MS | CONV_UNIT_US,
- .ai_min_acq_ns = 5000,
- .has_ao = 1,
- .has_dig_in = 1,
- .has_dig_out = 1,
- .has_ttl_io = 1,
- },
- [BOARD_APCI3116_8_4] = {
- .name = "apci3116-8-4",
- .ai_subdev_flags = SDF_COMMON | SDF_GROUND | SDF_DIFF,
- .ai_n_chan = 8,
- .ai_maxdata = 0xffff,
- .ai_conv_units = CONV_UNIT_MS | CONV_UNIT_US,
- .ai_min_acq_ns = 5000,
- .has_ao = 1,
- .has_dig_in = 1,
- .has_dig_out = 1,
- .has_ttl_io = 1,
- },
- [BOARD_APCI3003] = {
- .name = "apci3003",
- .ai_subdev_flags = SDF_DIFF,
- .ai_n_chan = 4,
- .ai_maxdata = 0xffff,
- .ai_conv_units = CONV_UNIT_MS | CONV_UNIT_US |
- CONV_UNIT_NS,
- .ai_min_acq_ns = 2500,
- .has_dig_in = 1,
- .has_dig_out = 1,
- },
- [BOARD_APCI3002_16] = {
- .name = "apci3002-16",
- .ai_subdev_flags = SDF_DIFF,
- .ai_n_chan = 16,
- .ai_maxdata = 0xffff,
- .ai_conv_units = CONV_UNIT_MS | CONV_UNIT_US,
- .ai_min_acq_ns = 5000,
- .has_dig_in = 1,
- .has_dig_out = 1,
- },
- [BOARD_APCI3002_8] = {
- .name = "apci3002-8",
- .ai_subdev_flags = SDF_DIFF,
- .ai_n_chan = 8,
- .ai_maxdata = 0xffff,
- .ai_conv_units = CONV_UNIT_MS | CONV_UNIT_US,
- .ai_min_acq_ns = 5000,
- .has_dig_in = 1,
- .has_dig_out = 1,
- },
- [BOARD_APCI3002_4] = {
- .name = "apci3002-4",
- .ai_subdev_flags = SDF_DIFF,
- .ai_n_chan = 4,
- .ai_maxdata = 0xffff,
- .ai_conv_units = CONV_UNIT_MS | CONV_UNIT_US,
- .ai_min_acq_ns = 5000,
- .has_dig_in = 1,
- .has_dig_out = 1,
- },
- [BOARD_APCI3500] = {
- .name = "apci3500",
- .has_ao = 1,
- .has_ttl_io = 1,
- },
-};
-
-struct apci3xxx_private {
- unsigned int ai_timer;
- unsigned char ai_time_base;
-};
-
-static irqreturn_t apci3xxx_irq_handler(int irq, void *d)
-{
- struct comedi_device *dev = d;
- struct comedi_subdevice *s = dev->read_subdev;
- unsigned int status;
- unsigned int val;
-
- /* Test if interrupt occur */
- status = readl(dev->mmio + 16);
- if ((status & 0x2) == 0x2) {
- /* Reset the interrupt */
- writel(status, dev->mmio + 16);
-
- val = readl(dev->mmio + 28);
- comedi_buf_write_samples(s, &val, 1);
-
- s->async->events |= COMEDI_CB_EOA;
- comedi_handle_events(dev, s);
-
- return IRQ_HANDLED;
- }
- return IRQ_NONE;
-}
-
-static int apci3xxx_ai_started(struct comedi_device *dev)
-{
- if ((readl(dev->mmio + 8) & 0x80000) == 0x80000)
- return 1;
-
- return 0;
-}
-
-static int apci3xxx_ai_setup(struct comedi_device *dev, unsigned int chanspec)
-{
- unsigned int chan = CR_CHAN(chanspec);
- unsigned int range = CR_RANGE(chanspec);
- unsigned int aref = CR_AREF(chanspec);
- unsigned int delay_mode;
- unsigned int val;
-
- if (apci3xxx_ai_started(dev))
- return -EBUSY;
-
- /* Clear the FIFO */
- writel(0x10000, dev->mmio + 12);
-
- /* Get and save the delay mode */
- delay_mode = readl(dev->mmio + 4);
- delay_mode &= 0xfffffef0;
-
- /* Channel configuration selection */
- writel(delay_mode, dev->mmio + 4);
-
- /* Make the configuration */
- val = (range & 3) | ((range >> 2) << 6) |
- ((aref == AREF_DIFF) << 7);
- writel(val, dev->mmio + 0);
-
- /* Channel selection */
- writel(delay_mode | 0x100, dev->mmio + 4);
- writel(chan, dev->mmio + 0);
-
- /* Restore delay mode */
- writel(delay_mode, dev->mmio + 4);
-
- /* Set the number of sequence to 1 */
- writel(1, dev->mmio + 48);
-
- return 0;
-}
-
-static int apci3xxx_ai_eoc(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned long context)
-{
- unsigned int status;
-
- status = readl(dev->mmio + 20);
- if (status & 0x1)
- return 0;
- return -EBUSY;
-}
-
-static int apci3xxx_ai_insn_read(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- int ret;
- int i;
-
- ret = apci3xxx_ai_setup(dev, insn->chanspec);
- if (ret)
- return ret;
-
- for (i = 0; i < insn->n; i++) {
- /* Start the conversion */
- writel(0x80000, dev->mmio + 8);
-
- /* Wait the EOS */
- ret = comedi_timeout(dev, s, insn, apci3xxx_ai_eoc, 0);
- if (ret)
- return ret;
-
- /* Read the analog value */
- data[i] = readl(dev->mmio + 28);
- }
-
- return insn->n;
-}
-
-static int apci3xxx_ai_ns_to_timer(struct comedi_device *dev,
- unsigned int *ns, unsigned int flags)
-{
- const struct apci3xxx_boardinfo *board = dev->board_ptr;
- struct apci3xxx_private *devpriv = dev->private;
- unsigned int base;
- unsigned int timer;
- int time_base;
-
- /* time_base: 0 = ns, 1 = us, 2 = ms */
- for (time_base = 0; time_base < 3; time_base++) {
- /* skip unsupported time bases */
- if (!(board->ai_conv_units & (1 << time_base)))
- continue;
-
- switch (time_base) {
- case 0:
- base = 1;
- break;
- case 1:
- base = 1000;
- break;
- case 2:
- base = 1000000;
- break;
- }
-
- switch (flags & CMDF_ROUND_MASK) {
- case CMDF_ROUND_NEAREST:
- default:
- timer = (*ns + base / 2) / base;
- break;
- case CMDF_ROUND_DOWN:
- timer = *ns / base;
- break;
- case CMDF_ROUND_UP:
- timer = (*ns + base - 1) / base;
- break;
- }
-
- if (timer < 0x10000) {
- devpriv->ai_time_base = time_base;
- devpriv->ai_timer = timer;
- *ns = timer * time_base;
- return 0;
- }
- }
- return -EINVAL;
-}
-
-static int apci3xxx_ai_cmdtest(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_cmd *cmd)
-{
- const struct apci3xxx_boardinfo *board = dev->board_ptr;
- int err = 0;
- unsigned int arg;
-
- /* Step 1 : check if triggers are trivially valid */
-
- err |= comedi_check_trigger_src(&cmd->start_src, TRIG_NOW);
- err |= comedi_check_trigger_src(&cmd->scan_begin_src, TRIG_FOLLOW);
- err |= comedi_check_trigger_src(&cmd->convert_src, TRIG_TIMER);
- err |= comedi_check_trigger_src(&cmd->scan_end_src, TRIG_COUNT);
- err |= comedi_check_trigger_src(&cmd->stop_src, TRIG_COUNT | TRIG_NONE);
-
- if (err)
- return 1;
-
- /* Step 2a : make sure trigger sources are unique */
-
- err |= comedi_check_trigger_is_unique(cmd->stop_src);
-
- /* Step 2b : and mutually compatible */
-
- if (err)
- return 2;
-
- /* Step 3: check if arguments are trivially valid */
-
- err |= comedi_check_trigger_arg_is(&cmd->start_arg, 0);
- err |= comedi_check_trigger_arg_is(&cmd->scan_begin_arg, 0);
- err |= comedi_check_trigger_arg_min(&cmd->convert_arg,
- board->ai_min_acq_ns);
- err |= comedi_check_trigger_arg_is(&cmd->scan_end_arg,
- cmd->chanlist_len);
-
- if (cmd->stop_src == TRIG_COUNT)
- err |= comedi_check_trigger_arg_min(&cmd->stop_arg, 1);
- else /* TRIG_NONE */
- err |= comedi_check_trigger_arg_is(&cmd->stop_arg, 0);
-
- if (err)
- return 3;
-
- /* step 4: fix up any arguments */
-
- arg = cmd->convert_arg;
- err |= apci3xxx_ai_ns_to_timer(dev, &arg, cmd->flags);
- err |= comedi_check_trigger_arg_is(&cmd->convert_arg, arg);
-
- if (err)
- return 4;
-
- return 0;
-}
-
-static int apci3xxx_ai_cmd(struct comedi_device *dev,
- struct comedi_subdevice *s)
-{
- struct apci3xxx_private *devpriv = dev->private;
- struct comedi_cmd *cmd = &s->async->cmd;
- int ret;
-
- ret = apci3xxx_ai_setup(dev, cmd->chanlist[0]);
- if (ret)
- return ret;
-
- /* Set the convert timing unit */
- writel(devpriv->ai_time_base, dev->mmio + 36);
-
- /* Set the convert timing */
- writel(devpriv->ai_timer, dev->mmio + 32);
-
- /* Start the conversion */
- writel(0x180000, dev->mmio + 8);
-
- return 0;
-}
-
-static int apci3xxx_ai_cancel(struct comedi_device *dev,
- struct comedi_subdevice *s)
-{
- return 0;
-}
-
-static int apci3xxx_ao_eoc(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned long context)
-{
- unsigned int status;
-
- status = readl(dev->mmio + 96);
- if (status & 0x100)
- return 0;
- return -EBUSY;
-}
-
-static int apci3xxx_ao_insn_write(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- unsigned int chan = CR_CHAN(insn->chanspec);
- unsigned int range = CR_RANGE(insn->chanspec);
- int ret;
- int i;
-
- for (i = 0; i < insn->n; i++) {
- unsigned int val = data[i];
-
- /* Set the range selection */
- writel(range, dev->mmio + 96);
-
- /* Write the analog value to the selected channel */
- writel((val << 8) | chan, dev->mmio + 100);
-
- /* Wait the end of transfer */
- ret = comedi_timeout(dev, s, insn, apci3xxx_ao_eoc, 0);
- if (ret)
- return ret;
-
- s->readback[chan] = val;
- }
-
- return insn->n;
-}
-
-static int apci3xxx_di_insn_bits(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- data[1] = inl(dev->iobase + 32) & 0xf;
-
- return insn->n;
-}
-
-static int apci3xxx_do_insn_bits(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- s->state = inl(dev->iobase + 48) & 0xf;
-
- if (comedi_dio_update_state(s, data))
- outl(s->state, dev->iobase + 48);
-
- data[1] = s->state;
-
- return insn->n;
-}
-
-static int apci3xxx_dio_insn_config(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- unsigned int chan = CR_CHAN(insn->chanspec);
- unsigned int mask = 0;
- int ret;
-
- /*
- * Port 0 (channels 0-7) are always inputs
- * Port 1 (channels 8-15) are always outputs
- * Port 2 (channels 16-23) are programmable i/o
- */
- if (data[0] != INSN_CONFIG_DIO_QUERY) {
- /* ignore all other instructions for ports 0 and 1 */
- if (chan < 16)
- return -EINVAL;
-
- /* changing any channel in port 2 changes the entire port */
- mask = 0xff0000;
- }
-
- ret = comedi_dio_insn_config(dev, s, insn, data, mask);
- if (ret)
- return ret;
-
- /* update port 2 configuration */
- outl((s->io_bits >> 24) & 0xff, dev->iobase + 224);
-
- return insn->n;
-}
-
-static int apci3xxx_dio_insn_bits(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- unsigned int mask;
- unsigned int val;
-
- mask = comedi_dio_update_state(s, data);
- if (mask) {
- if (mask & 0xff)
- outl(s->state & 0xff, dev->iobase + 80);
- if (mask & 0xff0000)
- outl((s->state >> 16) & 0xff, dev->iobase + 112);
- }
-
- val = inl(dev->iobase + 80);
- val |= (inl(dev->iobase + 64) << 8);
- if (s->io_bits & 0xff0000)
- val |= (inl(dev->iobase + 112) << 16);
- else
- val |= (inl(dev->iobase + 96) << 16);
-
- data[1] = val;
-
- return insn->n;
-}
-
-static int apci3xxx_reset(struct comedi_device *dev)
-{
- unsigned int val;
- int i;
-
- /* Disable the interrupt */
- disable_irq(dev->irq);
-
- /* Clear the start command */
- writel(0, dev->mmio + 8);
-
- /* Reset the interrupt flags */
- val = readl(dev->mmio + 16);
- writel(val, dev->mmio + 16);
-
- /* clear the EOS */
- readl(dev->mmio + 20);
-
- /* Clear the FIFO */
- for (i = 0; i < 16; i++)
- val = readl(dev->mmio + 28);
-
- /* Enable the interrupt */
- enable_irq(dev->irq);
-
- return 0;
-}
-
-static int apci3xxx_auto_attach(struct comedi_device *dev,
- unsigned long context)
-{
- struct pci_dev *pcidev = comedi_to_pci_dev(dev);
- const struct apci3xxx_boardinfo *board = NULL;
- struct apci3xxx_private *devpriv;
- struct comedi_subdevice *s;
- int n_subdevices;
- int subdev;
- int ret;
-
- if (context < ARRAY_SIZE(apci3xxx_boardtypes))
- board = &apci3xxx_boardtypes[context];
- if (!board)
- return -ENODEV;
- dev->board_ptr = board;
- dev->board_name = board->name;
-
- devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
- if (!devpriv)
- return -ENOMEM;
-
- ret = comedi_pci_enable(dev);
- if (ret)
- return ret;
-
- dev->iobase = pci_resource_start(pcidev, 2);
- dev->mmio = pci_ioremap_bar(pcidev, 3);
-
- if (pcidev->irq > 0) {
- ret = request_irq(pcidev->irq, apci3xxx_irq_handler,
- IRQF_SHARED, dev->board_name, dev);
- if (ret == 0)
- dev->irq = pcidev->irq;
- }
-
- n_subdevices = (board->ai_n_chan ? 0 : 1) + board->has_ao +
- board->has_dig_in + board->has_dig_out +
- board->has_ttl_io;
- ret = comedi_alloc_subdevices(dev, n_subdevices);
- if (ret)
- return ret;
-
- subdev = 0;
-
- /* Analog Input subdevice */
- if (board->ai_n_chan) {
- s = &dev->subdevices[subdev];
- s->type = COMEDI_SUBD_AI;
- s->subdev_flags = SDF_READABLE | board->ai_subdev_flags;
- s->n_chan = board->ai_n_chan;
- s->maxdata = board->ai_maxdata;
- s->range_table = &apci3xxx_ai_range;
- s->insn_read = apci3xxx_ai_insn_read;
- if (dev->irq) {
- /*
- * FIXME: The hardware supports multiple scan modes
- * but the original addi-data driver only supported
- * reading a single channel with interrupts. Need a
- * proper datasheet to fix this.
- *
- * The following scan modes are supported by the
- * hardware:
- * 1) Single software scan
- * 2) Single hardware triggered scan
- * 3) Continuous software scan
- * 4) Continuous software scan with timer delay
- * 5) Continuous hardware triggered scan
- * 6) Continuous hardware triggered scan with timer
- * delay
- *
- * For now, limit the chanlist to a single channel.
- */
- dev->read_subdev = s;
- s->subdev_flags |= SDF_CMD_READ;
- s->len_chanlist = 1;
- s->do_cmdtest = apci3xxx_ai_cmdtest;
- s->do_cmd = apci3xxx_ai_cmd;
- s->cancel = apci3xxx_ai_cancel;
- }
-
- subdev++;
- }
-
- /* Analog Output subdevice */
- if (board->has_ao) {
- s = &dev->subdevices[subdev];
- s->type = COMEDI_SUBD_AO;
- s->subdev_flags = SDF_WRITABLE | SDF_GROUND | SDF_COMMON;
- s->n_chan = 4;
- s->maxdata = 0x0fff;
- s->range_table = &apci3xxx_ao_range;
- s->insn_write = apci3xxx_ao_insn_write;
-
- ret = comedi_alloc_subdev_readback(s);
- if (ret)
- return ret;
-
- subdev++;
- }
-
- /* Digital Input subdevice */
- if (board->has_dig_in) {
- s = &dev->subdevices[subdev];
- s->type = COMEDI_SUBD_DI;
- s->subdev_flags = SDF_READABLE;
- s->n_chan = 4;
- s->maxdata = 1;
- s->range_table = &range_digital;
- s->insn_bits = apci3xxx_di_insn_bits;
-
- subdev++;
- }
-
- /* Digital Output subdevice */
- if (board->has_dig_out) {
- s = &dev->subdevices[subdev];
- s->type = COMEDI_SUBD_DO;
- s->subdev_flags = SDF_WRITABLE;
- s->n_chan = 4;
- s->maxdata = 1;
- s->range_table = &range_digital;
- s->insn_bits = apci3xxx_do_insn_bits;
-
- subdev++;
- }
-
- /* TTL Digital I/O subdevice */
- if (board->has_ttl_io) {
- s = &dev->subdevices[subdev];
- s->type = COMEDI_SUBD_DIO;
- s->subdev_flags = SDF_READABLE | SDF_WRITABLE;
- s->n_chan = 24;
- s->maxdata = 1;
- s->io_bits = 0xff; /* channels 0-7 are always outputs */
- s->range_table = &range_digital;
- s->insn_config = apci3xxx_dio_insn_config;
- s->insn_bits = apci3xxx_dio_insn_bits;
-
- subdev++;
- }
-
- apci3xxx_reset(dev);
- return 0;
-}
-
-static void apci3xxx_detach(struct comedi_device *dev)
-{
- if (dev->iobase)
- apci3xxx_reset(dev);
- comedi_pci_detach(dev);
-}
-
-static struct comedi_driver apci3xxx_driver = {
- .driver_name = "addi_apci_3xxx",
- .module = THIS_MODULE,
- .auto_attach = apci3xxx_auto_attach,
- .detach = apci3xxx_detach,
-};
-
-static int apci3xxx_pci_probe(struct pci_dev *dev,
- const struct pci_device_id *id)
-{
- return comedi_pci_auto_config(dev, &apci3xxx_driver, id->driver_data);
-}
-
-static const struct pci_device_id apci3xxx_pci_table[] = {
- { PCI_VDEVICE(ADDIDATA, 0x3010), BOARD_APCI3000_16 },
- { PCI_VDEVICE(ADDIDATA, 0x300f), BOARD_APCI3000_8 },
- { PCI_VDEVICE(ADDIDATA, 0x300e), BOARD_APCI3000_4 },
- { PCI_VDEVICE(ADDIDATA, 0x3013), BOARD_APCI3006_16 },
- { PCI_VDEVICE(ADDIDATA, 0x3014), BOARD_APCI3006_8 },
- { PCI_VDEVICE(ADDIDATA, 0x3015), BOARD_APCI3006_4 },
- { PCI_VDEVICE(ADDIDATA, 0x3016), BOARD_APCI3010_16 },
- { PCI_VDEVICE(ADDIDATA, 0x3017), BOARD_APCI3010_8 },
- { PCI_VDEVICE(ADDIDATA, 0x3018), BOARD_APCI3010_4 },
- { PCI_VDEVICE(ADDIDATA, 0x3019), BOARD_APCI3016_16 },
- { PCI_VDEVICE(ADDIDATA, 0x301a), BOARD_APCI3016_8 },
- { PCI_VDEVICE(ADDIDATA, 0x301b), BOARD_APCI3016_4 },
- { PCI_VDEVICE(ADDIDATA, 0x301c), BOARD_APCI3100_16_4 },
- { PCI_VDEVICE(ADDIDATA, 0x301d), BOARD_APCI3100_8_4 },
- { PCI_VDEVICE(ADDIDATA, 0x301e), BOARD_APCI3106_16_4 },
- { PCI_VDEVICE(ADDIDATA, 0x301f), BOARD_APCI3106_8_4 },
- { PCI_VDEVICE(ADDIDATA, 0x3020), BOARD_APCI3110_16_4 },
- { PCI_VDEVICE(ADDIDATA, 0x3021), BOARD_APCI3110_8_4 },
- { PCI_VDEVICE(ADDIDATA, 0x3022), BOARD_APCI3116_16_4 },
- { PCI_VDEVICE(ADDIDATA, 0x3023), BOARD_APCI3116_8_4 },
- { PCI_VDEVICE(ADDIDATA, 0x300B), BOARD_APCI3003 },
- { PCI_VDEVICE(ADDIDATA, 0x3002), BOARD_APCI3002_16 },
- { PCI_VDEVICE(ADDIDATA, 0x3003), BOARD_APCI3002_8 },
- { PCI_VDEVICE(ADDIDATA, 0x3004), BOARD_APCI3002_4 },
- { PCI_VDEVICE(ADDIDATA, 0x3024), BOARD_APCI3500 },
- { 0 }
-};
-MODULE_DEVICE_TABLE(pci, apci3xxx_pci_table);
-
-static struct pci_driver apci3xxx_pci_driver = {
- .name = "addi_apci_3xxx",
- .id_table = apci3xxx_pci_table,
- .probe = apci3xxx_pci_probe,
- .remove = comedi_pci_auto_unconfig,
-};
-module_comedi_pci_driver(apci3xxx_driver, apci3xxx_pci_driver);
-
-MODULE_AUTHOR("Comedi http://www.comedi.org");
-MODULE_DESCRIPTION("Comedi low-level driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/addi_tcw.h b/drivers/staging/comedi/drivers/addi_tcw.h
deleted file mode 100644
index db6d5a4e8889..000000000000
--- a/drivers/staging/comedi/drivers/addi_tcw.h
+++ /dev/null
@@ -1,63 +0,0 @@
-#ifndef _ADDI_TCW_H
-#define _ADDI_TCW_H
-
-/*
- * Following are the generic definitions for the ADDI-DATA timer/counter/
- * watchdog (TCW) registers and bits. Some of the registers are not used
- * depending on the use of the TCW.
- */
-
-#define ADDI_TCW_VAL_REG 0x00
-
-#define ADDI_TCW_SYNC_REG 0x00
-#define ADDI_TCW_SYNC_CTR_TRIG BIT(8)
-#define ADDI_TCW_SYNC_CTR_DIS BIT(7)
-#define ADDI_TCW_SYNC_CTR_ENA BIT(6)
-#define ADDI_TCW_SYNC_TIMER_TRIG BIT(5)
-#define ADDI_TCW_SYNC_TIMER_DIS BIT(4)
-#define ADDI_TCW_SYNC_TIMER_ENA BIT(3)
-#define ADDI_TCW_SYNC_WDOG_TRIG BIT(2)
-#define ADDI_TCW_SYNC_WDOG_DIS BIT(1)
-#define ADDI_TCW_SYNC_WDOG_ENA BIT(0)
-
-#define ADDI_TCW_RELOAD_REG 0x04
-
-#define ADDI_TCW_TIMEBASE_REG 0x08
-
-#define ADDI_TCW_CTRL_REG 0x0c
-#define ADDI_TCW_CTRL_EXT_CLK_STATUS BIT(21)
-#define ADDI_TCW_CTRL_CASCADE BIT(20)
-#define ADDI_TCW_CTRL_CNTR_ENA BIT(19)
-#define ADDI_TCW_CTRL_CNT_UP BIT(18)
-#define ADDI_TCW_CTRL_EXT_CLK(x) (((x) & 3) << 16)
-#define ADDI_TCW_CTRL_EXT_CLK_MASK ADDI_TCW_CTRL_EXT_CLK(3)
-#define ADDI_TCW_CTRL_MODE(x) (((x) & 7) << 13)
-#define ADDI_TCW_CTRL_MODE_MASK ADDI_TCW_CTRL_MODE(7)
-#define ADDI_TCW_CTRL_OUT(x) (((x) & 3) << 11)
-#define ADDI_TCW_CTRL_OUT_MASK ADDI_TCW_CTRL_OUT(3)
-#define ADDI_TCW_CTRL_GATE BIT(10)
-#define ADDI_TCW_CTRL_TRIG BIT(9)
-#define ADDI_TCW_CTRL_EXT_GATE(x) (((x) & 3) << 7)
-#define ADDI_TCW_CTRL_EXT_GATE_MASK ADDI_TCW_CTRL_EXT_GATE(3)
-#define ADDI_TCW_CTRL_EXT_TRIG(x) (((x) & 3) << 5)
-#define ADDI_TCW_CTRL_EXT_TRIG_MASK ADDI_TCW_CTRL_EXT_TRIG(3)
-#define ADDI_TCW_CTRL_TIMER_ENA BIT(4)
-#define ADDI_TCW_CTRL_RESET_ENA BIT(3)
-#define ADDI_TCW_CTRL_WARN_ENA BIT(2)
-#define ADDI_TCW_CTRL_IRQ_ENA BIT(1)
-#define ADDI_TCW_CTRL_ENA BIT(0)
-
-#define ADDI_TCW_STATUS_REG 0x10
-#define ADDI_TCW_STATUS_SOFT_CLR BIT(3)
-#define ADDI_TCW_STATUS_HARDWARE_TRIG BIT(2)
-#define ADDI_TCW_STATUS_SOFT_TRIG BIT(1)
-#define ADDI_TCW_STATUS_OVERFLOW BIT(0)
-
-#define ADDI_TCW_IRQ_REG 0x14
-#define ADDI_TCW_IRQ BIT(0)
-
-#define ADDI_TCW_WARN_TIMEVAL_REG 0x18
-
-#define ADDI_TCW_WARN_TIMEBASE_REG 0x1c
-
-#endif
diff --git a/drivers/staging/comedi/drivers/addi_watchdog.c b/drivers/staging/comedi/drivers/addi_watchdog.c
deleted file mode 100644
index 9d9853fe54a0..000000000000
--- a/drivers/staging/comedi/drivers/addi_watchdog.c
+++ /dev/null
@@ -1,149 +0,0 @@
-/*
- * COMEDI driver for the watchdog subdevice found on some addi-data boards
- * Copyright (c) 2013 H Hartley Sweeten <hsweeten@visionengravers.com>
- *
- * Based on implementations in various addi-data COMEDI drivers.
- *
- * COMEDI - Linux Control and Measurement Device Interface
- * Copyright (C) 1998 David A. Schleef <ds@schleef.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#include <linux/module.h>
-#include "../comedidev.h"
-#include "addi_tcw.h"
-#include "addi_watchdog.h"
-
-struct addi_watchdog_private {
- unsigned long iobase;
- unsigned int wdog_ctrl;
-};
-
-/*
- * The watchdog subdevice is configured with two INSN_CONFIG instructions:
- *
- * Enable the watchdog and set the reload timeout:
- * data[0] = INSN_CONFIG_ARM
- * data[1] = timeout reload value
- *
- * Disable the watchdog:
- * data[0] = INSN_CONFIG_DISARM
- */
-static int addi_watchdog_insn_config(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct addi_watchdog_private *spriv = s->private;
- unsigned int reload;
-
- switch (data[0]) {
- case INSN_CONFIG_ARM:
- spriv->wdog_ctrl = ADDI_TCW_CTRL_ENA;
- reload = data[1] & s->maxdata;
- outl(reload, spriv->iobase + ADDI_TCW_RELOAD_REG);
-
- /* Time base is 20ms, let the user know the timeout */
- dev_info(dev->class_dev, "watchdog enabled, timeout:%dms\n",
- 20 * reload + 20);
- break;
- case INSN_CONFIG_DISARM:
- spriv->wdog_ctrl = 0;
- break;
- default:
- return -EINVAL;
- }
-
- outl(spriv->wdog_ctrl, spriv->iobase + ADDI_TCW_CTRL_REG);
-
- return insn->n;
-}
-
-static int addi_watchdog_insn_read(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct addi_watchdog_private *spriv = s->private;
- int i;
-
- for (i = 0; i < insn->n; i++)
- data[i] = inl(spriv->iobase + ADDI_TCW_STATUS_REG);
-
- return insn->n;
-}
-
-static int addi_watchdog_insn_write(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct addi_watchdog_private *spriv = s->private;
- int i;
-
- if (spriv->wdog_ctrl == 0) {
- dev_warn(dev->class_dev, "watchdog is disabled\n");
- return -EINVAL;
- }
-
- /* "ping" the watchdog */
- for (i = 0; i < insn->n; i++) {
- outl(spriv->wdog_ctrl | ADDI_TCW_CTRL_TRIG,
- spriv->iobase + ADDI_TCW_CTRL_REG);
- }
-
- return insn->n;
-}
-
-void addi_watchdog_reset(unsigned long iobase)
-{
- outl(0x0, iobase + ADDI_TCW_CTRL_REG);
- outl(0x0, iobase + ADDI_TCW_RELOAD_REG);
-}
-EXPORT_SYMBOL_GPL(addi_watchdog_reset);
-
-int addi_watchdog_init(struct comedi_subdevice *s, unsigned long iobase)
-{
- struct addi_watchdog_private *spriv;
-
- spriv = comedi_alloc_spriv(s, sizeof(*spriv));
- if (!spriv)
- return -ENOMEM;
-
- spriv->iobase = iobase;
-
- s->type = COMEDI_SUBD_TIMER;
- s->subdev_flags = SDF_WRITABLE;
- s->n_chan = 1;
- s->maxdata = 0xff;
- s->insn_config = addi_watchdog_insn_config;
- s->insn_read = addi_watchdog_insn_read;
- s->insn_write = addi_watchdog_insn_write;
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(addi_watchdog_init);
-
-static int __init addi_watchdog_module_init(void)
-{
- return 0;
-}
-module_init(addi_watchdog_module_init);
-
-static void __exit addi_watchdog_module_exit(void)
-{
-}
-module_exit(addi_watchdog_module_exit);
-
-MODULE_DESCRIPTION("ADDI-DATA Watchdog subdevice");
-MODULE_AUTHOR("H Hartley Sweeten <hsweeten@visionengravers.com>");
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/addi_watchdog.h b/drivers/staging/comedi/drivers/addi_watchdog.h
deleted file mode 100644
index 3f8e7388bbca..000000000000
--- a/drivers/staging/comedi/drivers/addi_watchdog.h
+++ /dev/null
@@ -1,9 +0,0 @@
-#ifndef _ADDI_WATCHDOG_H
-#define _ADDI_WATCHDOG_H
-
-struct comedi_subdevice;
-
-void addi_watchdog_reset(unsigned long iobase);
-int addi_watchdog_init(struct comedi_subdevice *, unsigned long iobase);
-
-#endif
diff --git a/drivers/staging/comedi/drivers/adl_pci6208.c b/drivers/staging/comedi/drivers/adl_pci6208.c
deleted file mode 100644
index 7ed3fd6fbd3e..000000000000
--- a/drivers/staging/comedi/drivers/adl_pci6208.c
+++ /dev/null
@@ -1,211 +0,0 @@
-/*
- * adl_pci6208.c
- * Comedi driver for ADLink 6208 series cards
- *
- * COMEDI - Linux Control and Measurement Device Interface
- * Copyright (C) 2000 David A. Schleef <ds@schleef.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-/*
- * Driver: adl_pci6208
- * Description: ADLink PCI-6208/6216 Series Multi-channel Analog Output Cards
- * Devices: [ADLink] PCI-6208 (adl_pci6208), PCI-6216
- * Author: nsyeow <nsyeow@pd.jaring.my>
- * Updated: Wed, 11 Feb 2015 11:37:18 +0000
- * Status: untested
- *
- * Configuration Options: not applicable, uses PCI auto config
- *
- * All supported devices share the same PCI device ID and are treated as a
- * PCI-6216 with 16 analog output channels. On a PCI-6208, the upper 8
- * channels exist in registers, but don't go to DAC chips.
- */
-
-#include <linux/module.h>
-#include <linux/delay.h>
-
-#include "../comedi_pci.h"
-
-/*
- * PCI-6208/6216-GL register map
- */
-#define PCI6208_AO_CONTROL(x) (0x00 + (2 * (x)))
-#define PCI6208_AO_STATUS 0x00
-#define PCI6208_AO_STATUS_DATA_SEND (1 << 0)
-#define PCI6208_DIO 0x40
-#define PCI6208_DIO_DO_MASK (0x0f)
-#define PCI6208_DIO_DO_SHIFT (0)
-#define PCI6208_DIO_DI_MASK (0xf0)
-#define PCI6208_DIO_DI_SHIFT (4)
-
-static int pci6208_ao_eoc(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned long context)
-{
- unsigned int status;
-
- status = inw(dev->iobase + PCI6208_AO_STATUS);
- if ((status & PCI6208_AO_STATUS_DATA_SEND) == 0)
- return 0;
- return -EBUSY;
-}
-
-static int pci6208_ao_insn_write(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- unsigned int chan = CR_CHAN(insn->chanspec);
- unsigned int val = s->readback[chan];
- int ret;
- int i;
-
- for (i = 0; i < insn->n; i++) {
- val = data[i];
-
- /* D/A transfer rate is 2.2us */
- ret = comedi_timeout(dev, s, insn, pci6208_ao_eoc, 0);
- if (ret)
- return ret;
-
- /* the hardware expects two's complement values */
- outw(comedi_offset_munge(s, val),
- dev->iobase + PCI6208_AO_CONTROL(chan));
-
- s->readback[chan] = val;
- }
-
- return insn->n;
-}
-
-static int pci6208_di_insn_bits(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- unsigned int val;
-
- val = inw(dev->iobase + PCI6208_DIO);
- val = (val & PCI6208_DIO_DI_MASK) >> PCI6208_DIO_DI_SHIFT;
-
- data[1] = val;
-
- return insn->n;
-}
-
-static int pci6208_do_insn_bits(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- if (comedi_dio_update_state(s, data))
- outw(s->state, dev->iobase + PCI6208_DIO);
-
- data[1] = s->state;
-
- return insn->n;
-}
-
-static int pci6208_auto_attach(struct comedi_device *dev,
- unsigned long context_unused)
-{
- struct pci_dev *pcidev = comedi_to_pci_dev(dev);
- struct comedi_subdevice *s;
- unsigned int val;
- int ret;
-
- ret = comedi_pci_enable(dev);
- if (ret)
- return ret;
- dev->iobase = pci_resource_start(pcidev, 2);
-
- ret = comedi_alloc_subdevices(dev, 3);
- if (ret)
- return ret;
-
- s = &dev->subdevices[0];
- /* analog output subdevice */
- s->type = COMEDI_SUBD_AO;
- s->subdev_flags = SDF_WRITABLE;
- s->n_chan = 16; /* Only 8 usable on PCI-6208 */
- s->maxdata = 0xffff;
- s->range_table = &range_bipolar10;
- s->insn_write = pci6208_ao_insn_write;
-
- ret = comedi_alloc_subdev_readback(s);
- if (ret)
- return ret;
-
- s = &dev->subdevices[1];
- /* digital input subdevice */
- s->type = COMEDI_SUBD_DI;
- s->subdev_flags = SDF_READABLE;
- s->n_chan = 4;
- s->maxdata = 1;
- s->range_table = &range_digital;
- s->insn_bits = pci6208_di_insn_bits;
-
- s = &dev->subdevices[2];
- /* digital output subdevice */
- s->type = COMEDI_SUBD_DO;
- s->subdev_flags = SDF_WRITABLE;
- s->n_chan = 4;
- s->maxdata = 1;
- s->range_table = &range_digital;
- s->insn_bits = pci6208_do_insn_bits;
-
- /*
- * Get the read back signals from the digital outputs
- * and save it as the initial state for the subdevice.
- */
- val = inw(dev->iobase + PCI6208_DIO);
- val = (val & PCI6208_DIO_DO_MASK) >> PCI6208_DIO_DO_SHIFT;
- s->state = val;
-
- return 0;
-}
-
-static struct comedi_driver adl_pci6208_driver = {
- .driver_name = "adl_pci6208",
- .module = THIS_MODULE,
- .auto_attach = pci6208_auto_attach,
- .detach = comedi_pci_detach,
-};
-
-static int adl_pci6208_pci_probe(struct pci_dev *dev,
- const struct pci_device_id *id)
-{
- return comedi_pci_auto_config(dev, &adl_pci6208_driver,
- id->driver_data);
-}
-
-static const struct pci_device_id adl_pci6208_pci_table[] = {
- { PCI_DEVICE(PCI_VENDOR_ID_ADLINK, 0x6208) },
- { PCI_DEVICE_SUB(PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9050,
- 0x9999, 0x6208) },
- { 0 }
-};
-MODULE_DEVICE_TABLE(pci, adl_pci6208_pci_table);
-
-static struct pci_driver adl_pci6208_pci_driver = {
- .name = "adl_pci6208",
- .id_table = adl_pci6208_pci_table,
- .probe = adl_pci6208_pci_probe,
- .remove = comedi_pci_auto_unconfig,
-};
-module_comedi_pci_driver(adl_pci6208_driver, adl_pci6208_pci_driver);
-
-MODULE_AUTHOR("Comedi http://www.comedi.org");
-MODULE_DESCRIPTION("Comedi driver for ADLink 6208 series cards");
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/adl_pci7x3x.c b/drivers/staging/comedi/drivers/adl_pci7x3x.c
deleted file mode 100644
index b0fc027cf485..000000000000
--- a/drivers/staging/comedi/drivers/adl_pci7x3x.c
+++ /dev/null
@@ -1,287 +0,0 @@
-/*
- * COMEDI driver for the ADLINK PCI-723x/743x series boards.
- * Copyright (C) 2012 H Hartley Sweeten <hsweeten@visionengravers.com>
- *
- * Based on the adl_pci7230 driver written by:
- * David Fernandez <dfcastelao@gmail.com>
- * and the adl_pci7432 driver written by:
- * Michel Lachaine <mike@mikelachaine.ca>
- *
- * COMEDI - Linux Control and Measurement Device Interface
- * Copyright (C) 2000 David A. Schleef <ds@schleef.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-/*
- * Driver: adl_pci7x3x
- * Description: 32/64-Channel Isolated Digital I/O Boards
- * Devices: [ADLink] PCI-7230 (adl_pci7230), PCI-7233 (adl_pci7233),
- * PCI-7234 (adl_pci7234), PCI-7432 (adl_pci7432), PCI-7433 (adl_pci7433),
- * PCI-7434 (adl_pci7434)
- * Author: H Hartley Sweeten <hsweeten@visionengravers.com>
- * Updated: Thu, 02 Aug 2012 14:27:46 -0700
- * Status: untested
- *
- * One or two subdevices are setup by this driver depending on
- * the number of digital inputs and/or outputs provided by the
- * board. Each subdevice has a maximum of 32 channels.
- *
- * PCI-7230 - 2 subdevices: 0 - 16 input, 1 - 16 output
- * PCI-7233 - 1 subdevice: 0 - 32 input
- * PCI-7234 - 1 subdevice: 0 - 32 output
- * PCI-7432 - 2 subdevices: 0 - 32 input, 1 - 32 output
- * PCI-7433 - 2 subdevices: 0 - 32 input, 1 - 32 input
- * PCI-7434 - 2 subdevices: 0 - 32 output, 1 - 32 output
- *
- * The PCI-7230, PCI-7432 and PCI-7433 boards also support external
- * interrupt signals on digital input channels 0 and 1. The PCI-7233
- * has dual-interrupt sources for change-of-state (COS) on any 16
- * digital input channels of LSB and for COS on any 16 digital input
- * lines of MSB. Interrupts are not currently supported by this
- * driver.
- *
- * Configuration Options: not applicable, uses comedi PCI auto config
- */
-
-#include <linux/module.h>
-
-#include "../comedi_pci.h"
-
-/*
- * Register I/O map (32-bit access only)
- */
-#define PCI7X3X_DIO_REG 0x00
-#define PCI743X_DIO_REG 0x04
-
-enum apci1516_boardid {
- BOARD_PCI7230,
- BOARD_PCI7233,
- BOARD_PCI7234,
- BOARD_PCI7432,
- BOARD_PCI7433,
- BOARD_PCI7434,
-};
-
-struct adl_pci7x3x_boardinfo {
- const char *name;
- int nsubdevs;
- int di_nchan;
- int do_nchan;
-};
-
-static const struct adl_pci7x3x_boardinfo adl_pci7x3x_boards[] = {
- [BOARD_PCI7230] = {
- .name = "adl_pci7230",
- .nsubdevs = 2,
- .di_nchan = 16,
- .do_nchan = 16,
- },
- [BOARD_PCI7233] = {
- .name = "adl_pci7233",
- .nsubdevs = 1,
- .di_nchan = 32,
- },
- [BOARD_PCI7234] = {
- .name = "adl_pci7234",
- .nsubdevs = 1,
- .do_nchan = 32,
- },
- [BOARD_PCI7432] = {
- .name = "adl_pci7432",
- .nsubdevs = 2,
- .di_nchan = 32,
- .do_nchan = 32,
- },
- [BOARD_PCI7433] = {
- .name = "adl_pci7433",
- .nsubdevs = 2,
- .di_nchan = 64,
- },
- [BOARD_PCI7434] = {
- .name = "adl_pci7434",
- .nsubdevs = 2,
- .do_nchan = 64,
- }
-};
-
-static int adl_pci7x3x_do_insn_bits(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- unsigned long reg = (unsigned long)s->private;
-
- if (comedi_dio_update_state(s, data)) {
- unsigned int val = s->state;
-
- if (s->n_chan == 16) {
- /*
- * It seems the PCI-7230 needs the 16-bit DO state
- * to be shifted left by 16 bits before being written
- * to the 32-bit register. Set the value in both
- * halves of the register to be sure.
- */
- val |= val << 16;
- }
- outl(val, dev->iobase + reg);
- }
-
- data[1] = s->state;
-
- return insn->n;
-}
-
-static int adl_pci7x3x_di_insn_bits(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- unsigned long reg = (unsigned long)s->private;
-
- data[1] = inl(dev->iobase + reg);
-
- return insn->n;
-}
-
-static int adl_pci7x3x_auto_attach(struct comedi_device *dev,
- unsigned long context)
-{
- struct pci_dev *pcidev = comedi_to_pci_dev(dev);
- const struct adl_pci7x3x_boardinfo *board = NULL;
- struct comedi_subdevice *s;
- int subdev;
- int nchan;
- int ret;
-
- if (context < ARRAY_SIZE(adl_pci7x3x_boards))
- board = &adl_pci7x3x_boards[context];
- if (!board)
- return -ENODEV;
- dev->board_ptr = board;
- dev->board_name = board->name;
-
- ret = comedi_pci_enable(dev);
- if (ret)
- return ret;
- dev->iobase = pci_resource_start(pcidev, 2);
-
- ret = comedi_alloc_subdevices(dev, board->nsubdevs);
- if (ret)
- return ret;
-
- subdev = 0;
-
- if (board->di_nchan) {
- nchan = min(board->di_nchan, 32);
-
- s = &dev->subdevices[subdev];
- /* Isolated digital inputs 0 to 15/31 */
- s->type = COMEDI_SUBD_DI;
- s->subdev_flags = SDF_READABLE;
- s->n_chan = nchan;
- s->maxdata = 1;
- s->insn_bits = adl_pci7x3x_di_insn_bits;
- s->range_table = &range_digital;
-
- s->private = (void *)PCI7X3X_DIO_REG;
-
- subdev++;
-
- nchan = board->di_nchan - nchan;
- if (nchan) {
- s = &dev->subdevices[subdev];
- /* Isolated digital inputs 32 to 63 */
- s->type = COMEDI_SUBD_DI;
- s->subdev_flags = SDF_READABLE;
- s->n_chan = nchan;
- s->maxdata = 1;
- s->insn_bits = adl_pci7x3x_di_insn_bits;
- s->range_table = &range_digital;
-
- s->private = (void *)PCI743X_DIO_REG;
-
- subdev++;
- }
- }
-
- if (board->do_nchan) {
- nchan = min(board->do_nchan, 32);
-
- s = &dev->subdevices[subdev];
- /* Isolated digital outputs 0 to 15/31 */
- s->type = COMEDI_SUBD_DO;
- s->subdev_flags = SDF_WRITABLE;
- s->n_chan = nchan;
- s->maxdata = 1;
- s->insn_bits = adl_pci7x3x_do_insn_bits;
- s->range_table = &range_digital;
-
- s->private = (void *)PCI7X3X_DIO_REG;
-
- subdev++;
-
- nchan = board->do_nchan - nchan;
- if (nchan) {
- s = &dev->subdevices[subdev];
- /* Isolated digital outputs 32 to 63 */
- s->type = COMEDI_SUBD_DO;
- s->subdev_flags = SDF_WRITABLE;
- s->n_chan = nchan;
- s->maxdata = 1;
- s->insn_bits = adl_pci7x3x_do_insn_bits;
- s->range_table = &range_digital;
-
- s->private = (void *)PCI743X_DIO_REG;
-
- subdev++;
- }
- }
-
- return 0;
-}
-
-static struct comedi_driver adl_pci7x3x_driver = {
- .driver_name = "adl_pci7x3x",
- .module = THIS_MODULE,
- .auto_attach = adl_pci7x3x_auto_attach,
- .detach = comedi_pci_detach,
-};
-
-static int adl_pci7x3x_pci_probe(struct pci_dev *dev,
- const struct pci_device_id *id)
-{
- return comedi_pci_auto_config(dev, &adl_pci7x3x_driver,
- id->driver_data);
-}
-
-static const struct pci_device_id adl_pci7x3x_pci_table[] = {
- { PCI_VDEVICE(ADLINK, 0x7230), BOARD_PCI7230 },
- { PCI_VDEVICE(ADLINK, 0x7233), BOARD_PCI7233 },
- { PCI_VDEVICE(ADLINK, 0x7234), BOARD_PCI7234 },
- { PCI_VDEVICE(ADLINK, 0x7432), BOARD_PCI7432 },
- { PCI_VDEVICE(ADLINK, 0x7433), BOARD_PCI7433 },
- { PCI_VDEVICE(ADLINK, 0x7434), BOARD_PCI7434 },
- { 0 }
-};
-MODULE_DEVICE_TABLE(pci, adl_pci7x3x_pci_table);
-
-static struct pci_driver adl_pci7x3x_pci_driver = {
- .name = "adl_pci7x3x",
- .id_table = adl_pci7x3x_pci_table,
- .probe = adl_pci7x3x_pci_probe,
- .remove = comedi_pci_auto_unconfig,
-};
-module_comedi_pci_driver(adl_pci7x3x_driver, adl_pci7x3x_pci_driver);
-
-MODULE_DESCRIPTION("ADLINK PCI-723x/743x Isolated Digital I/O boards");
-MODULE_AUTHOR("H Hartley Sweeten <hsweeten@visionengravers.com>");
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/adl_pci8164.c b/drivers/staging/comedi/drivers/adl_pci8164.c
deleted file mode 100644
index da901c8dec0e..000000000000
--- a/drivers/staging/comedi/drivers/adl_pci8164.c
+++ /dev/null
@@ -1,163 +0,0 @@
-/*
- * comedi/drivers/adl_pci8164.c
- *
- * Hardware comedi driver for PCI-8164 Adlink card
- * Copyright (C) 2004 Michel Lachine <mike@mikelachaine.ca>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-/*
- * Driver: adl_pci8164
- * Description: Driver for the Adlink PCI-8164 4 Axes Motion Control board
- * Devices: [ADLink] PCI-8164 (adl_pci8164)
- * Author: Michel Lachaine <mike@mikelachaine.ca>
- * Status: experimental
- * Updated: Mon, 14 Apr 2008 15:10:32 +0100
- *
- * Configuration Options: not applicable, uses PCI auto config
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-
-#include "../comedi_pci.h"
-
-#define PCI8164_AXIS(x) ((x) * 0x08)
-#define PCI8164_CMD_MSTS_REG 0x00
-#define PCI8164_OTP_SSTS_REG 0x02
-#define PCI8164_BUF0_REG 0x04
-#define PCI8164_BUF1_REG 0x06
-
-static int adl_pci8164_insn_read(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- unsigned long offset = (unsigned long)s->private;
- unsigned int chan = CR_CHAN(insn->chanspec);
- int i;
-
- for (i = 0; i < insn->n; i++)
- data[i] = inw(dev->iobase + PCI8164_AXIS(chan) + offset);
-
- return insn->n;
-}
-
-static int adl_pci8164_insn_write(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- unsigned long offset = (unsigned long)s->private;
- unsigned int chan = CR_CHAN(insn->chanspec);
- int i;
-
- for (i = 0; i < insn->n; i++)
- outw(data[i], dev->iobase + PCI8164_AXIS(chan) + offset);
-
- return insn->n;
-}
-
-static int adl_pci8164_auto_attach(struct comedi_device *dev,
- unsigned long context_unused)
-{
- struct pci_dev *pcidev = comedi_to_pci_dev(dev);
- struct comedi_subdevice *s;
- int ret;
-
- ret = comedi_pci_enable(dev);
- if (ret)
- return ret;
- dev->iobase = pci_resource_start(pcidev, 2);
-
- ret = comedi_alloc_subdevices(dev, 4);
- if (ret)
- return ret;
-
- /* read MSTS register / write CMD register for each axis (channel) */
- s = &dev->subdevices[0];
- s->type = COMEDI_SUBD_PROC;
- s->subdev_flags = SDF_READABLE | SDF_WRITABLE;
- s->n_chan = 4;
- s->maxdata = 0xffff;
- s->len_chanlist = 4;
- s->insn_read = adl_pci8164_insn_read;
- s->insn_write = adl_pci8164_insn_write;
- s->private = (void *)PCI8164_CMD_MSTS_REG;
-
- /* read SSTS register / write OTP register for each axis (channel) */
- s = &dev->subdevices[1];
- s->type = COMEDI_SUBD_PROC;
- s->subdev_flags = SDF_READABLE | SDF_WRITABLE;
- s->n_chan = 4;
- s->maxdata = 0xffff;
- s->len_chanlist = 4;
- s->insn_read = adl_pci8164_insn_read;
- s->insn_write = adl_pci8164_insn_write;
- s->private = (void *)PCI8164_OTP_SSTS_REG;
-
- /* read/write BUF0 register for each axis (channel) */
- s = &dev->subdevices[2];
- s->type = COMEDI_SUBD_PROC;
- s->subdev_flags = SDF_READABLE | SDF_WRITABLE;
- s->n_chan = 4;
- s->maxdata = 0xffff;
- s->len_chanlist = 4;
- s->insn_read = adl_pci8164_insn_read;
- s->insn_write = adl_pci8164_insn_write;
- s->private = (void *)PCI8164_BUF0_REG;
-
- /* read/write BUF1 register for each axis (channel) */
- s = &dev->subdevices[3];
- s->type = COMEDI_SUBD_PROC;
- s->subdev_flags = SDF_READABLE | SDF_WRITABLE;
- s->n_chan = 4;
- s->maxdata = 0xffff;
- s->len_chanlist = 4;
- s->insn_read = adl_pci8164_insn_read;
- s->insn_write = adl_pci8164_insn_write;
- s->private = (void *)PCI8164_BUF1_REG;
-
- return 0;
-}
-
-static struct comedi_driver adl_pci8164_driver = {
- .driver_name = "adl_pci8164",
- .module = THIS_MODULE,
- .auto_attach = adl_pci8164_auto_attach,
- .detach = comedi_pci_detach,
-};
-
-static int adl_pci8164_pci_probe(struct pci_dev *dev,
- const struct pci_device_id *id)
-{
- return comedi_pci_auto_config(dev, &adl_pci8164_driver,
- id->driver_data);
-}
-
-static const struct pci_device_id adl_pci8164_pci_table[] = {
- { PCI_DEVICE(PCI_VENDOR_ID_ADLINK, 0x8164) },
- { 0 }
-};
-MODULE_DEVICE_TABLE(pci, adl_pci8164_pci_table);
-
-static struct pci_driver adl_pci8164_pci_driver = {
- .name = "adl_pci8164",
- .id_table = adl_pci8164_pci_table,
- .probe = adl_pci8164_pci_probe,
- .remove = comedi_pci_auto_unconfig,
-};
-module_comedi_pci_driver(adl_pci8164_driver, adl_pci8164_pci_driver);
-
-MODULE_AUTHOR("Comedi http://www.comedi.org");
-MODULE_DESCRIPTION("Comedi low-level driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/adl_pci9111.c b/drivers/staging/comedi/drivers/adl_pci9111.c
deleted file mode 100644
index c9df3afe97f6..000000000000
--- a/drivers/staging/comedi/drivers/adl_pci9111.c
+++ /dev/null
@@ -1,775 +0,0 @@
-/*
-
-comedi/drivers/adl_pci9111.c
-
-Hardware driver for PCI9111 ADLink cards:
-
-PCI-9111HR
-
-Copyright (C) 2002-2005 Emmanuel Pacaud <emmanuel.pacaud@univ-poitiers.fr>
-
-This program is free software; you can redistribute it and/or modify
-it under the terms of the GNU General Public License as published by
-the Free Software Foundation; either version 2 of the License, or
-(at your option) any later version.
-
-This program is distributed in the hope that it will be useful,
-but WITHOUT ANY WARRANTY; without even the implied warranty of
-MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-GNU General Public License for more details.
-*/
-
-/*
-Driver: adl_pci9111
-Description: Adlink PCI-9111HR
-Author: Emmanuel Pacaud <emmanuel.pacaud@univ-poitiers.fr>
-Devices: [ADLink] PCI-9111HR (adl_pci9111)
-Status: experimental
-
-Supports:
-
- - ai_insn read
- - ao_insn read/write
- - di_insn read
- - do_insn read/write
- - ai_do_cmd mode with the following sources:
-
- - start_src TRIG_NOW
- - scan_begin_src TRIG_FOLLOW TRIG_TIMER TRIG_EXT
- - convert_src TRIG_TIMER TRIG_EXT
- - scan_end_src TRIG_COUNT
- - stop_src TRIG_COUNT TRIG_NONE
-
-The scanned channels must be consecutive and start from 0. They must
-all have the same range and aref.
-
-Configuration options: not applicable, uses PCI auto config
-*/
-
-/*
-CHANGELOG:
-
-2005/02/17 Extend AI streaming capabilities. Now, scan_begin_arg can be
-a multiple of chanlist_len*convert_arg.
-2002/02/19 Fixed the two's complement conversion in pci9111_(hr_)ai_get_data.
-2002/02/18 Added external trigger support for analog input.
-
-TODO:
-
- - Really test implemented functionality.
- - Add support for the PCI-9111DG with a probe routine to identify
- the card type (perhaps with the help of the channel number readback
- of the A/D Data register).
- - Add external multiplexer support.
-
-*/
-
-#include <linux/module.h>
-#include <linux/delay.h>
-#include <linux/interrupt.h>
-
-#include "../comedi_pci.h"
-
-#include "plx9052.h"
-#include "comedi_8254.h"
-
-#define PCI9111_FIFO_HALF_SIZE 512
-
-#define PCI9111_AI_ACQUISITION_PERIOD_MIN_NS 10000
-
-#define PCI9111_RANGE_SETTING_DELAY 10
-#define PCI9111_AI_INSTANT_READ_UDELAY_US 2
-
-/*
- * IO address map and bit defines
- */
-#define PCI9111_AI_FIFO_REG 0x00
-#define PCI9111_AO_REG 0x00
-#define PCI9111_DIO_REG 0x02
-#define PCI9111_EDIO_REG 0x04
-#define PCI9111_AI_CHANNEL_REG 0x06
-#define PCI9111_AI_RANGE_STAT_REG 0x08
-#define PCI9111_AI_STAT_AD_BUSY (1 << 7)
-#define PCI9111_AI_STAT_FF_FF (1 << 6)
-#define PCI9111_AI_STAT_FF_HF (1 << 5)
-#define PCI9111_AI_STAT_FF_EF (1 << 4)
-#define PCI9111_AI_RANGE_MASK (7 << 0)
-#define PCI9111_AI_TRIG_CTRL_REG 0x0a
-#define PCI9111_AI_TRIG_CTRL_TRGEVENT (1 << 5)
-#define PCI9111_AI_TRIG_CTRL_POTRG (1 << 4)
-#define PCI9111_AI_TRIG_CTRL_PTRG (1 << 3)
-#define PCI9111_AI_TRIG_CTRL_ETIS (1 << 2)
-#define PCI9111_AI_TRIG_CTRL_TPST (1 << 1)
-#define PCI9111_AI_TRIG_CTRL_ASCAN (1 << 0)
-#define PCI9111_INT_CTRL_REG 0x0c
-#define PCI9111_INT_CTRL_ISC2 (1 << 3)
-#define PCI9111_INT_CTRL_FFEN (1 << 2)
-#define PCI9111_INT_CTRL_ISC1 (1 << 1)
-#define PCI9111_INT_CTRL_ISC0 (1 << 0)
-#define PCI9111_SOFT_TRIG_REG 0x0e
-#define PCI9111_8254_BASE_REG 0x40
-#define PCI9111_INT_CLR_REG 0x48
-
-/* PLX 9052 Local Interrupt 1 enabled and active */
-#define PCI9111_LI1_ACTIVE (PLX9052_INTCSR_LI1ENAB | \
- PLX9052_INTCSR_LI1STAT)
-
-/* PLX 9052 Local Interrupt 2 enabled and active */
-#define PCI9111_LI2_ACTIVE (PLX9052_INTCSR_LI2ENAB | \
- PLX9052_INTCSR_LI2STAT)
-
-static const struct comedi_lrange pci9111_ai_range = {
- 5, {
- BIP_RANGE(10),
- BIP_RANGE(5),
- BIP_RANGE(2.5),
- BIP_RANGE(1.25),
- BIP_RANGE(0.625)
- }
-};
-
-struct pci9111_private_data {
- unsigned long lcr_io_base;
-
- unsigned int scan_delay;
- unsigned int chunk_counter;
- unsigned int chunk_num_samples;
-
- unsigned short ai_bounce_buffer[2 * PCI9111_FIFO_HALF_SIZE];
-};
-
-static void plx9050_interrupt_control(unsigned long io_base,
- bool LINTi1_enable,
- bool LINTi1_active_high,
- bool LINTi2_enable,
- bool LINTi2_active_high,
- bool interrupt_enable)
-{
- int flags = 0;
-
- if (LINTi1_enable)
- flags |= PLX9052_INTCSR_LI1ENAB;
- if (LINTi1_active_high)
- flags |= PLX9052_INTCSR_LI1POL;
- if (LINTi2_enable)
- flags |= PLX9052_INTCSR_LI2ENAB;
- if (LINTi2_active_high)
- flags |= PLX9052_INTCSR_LI2POL;
-
- if (interrupt_enable)
- flags |= PLX9052_INTCSR_PCIENAB;
-
- outb(flags, io_base + PLX9052_INTCSR);
-}
-
-enum pci9111_ISC0_sources {
- irq_on_eoc,
- irq_on_fifo_half_full
-};
-
-enum pci9111_ISC1_sources {
- irq_on_timer_tick,
- irq_on_external_trigger
-};
-
-static void pci9111_interrupt_source_set(struct comedi_device *dev,
- enum pci9111_ISC0_sources irq_0_source,
- enum pci9111_ISC1_sources irq_1_source)
-{
- int flags;
-
- /* Read the current interrupt control bits */
- flags = inb(dev->iobase + PCI9111_AI_TRIG_CTRL_REG);
- /* Shift the bits so they are compatible with the write register */
- flags >>= 4;
- /* Mask off the ISCx bits */
- flags &= 0xc0;
-
- /* Now set the new ISCx bits */
- if (irq_0_source == irq_on_fifo_half_full)
- flags |= PCI9111_INT_CTRL_ISC0;
-
- if (irq_1_source == irq_on_external_trigger)
- flags |= PCI9111_INT_CTRL_ISC1;
-
- outb(flags, dev->iobase + PCI9111_INT_CTRL_REG);
-}
-
-static void pci9111_fifo_reset(struct comedi_device *dev)
-{
- unsigned long int_ctrl_reg = dev->iobase + PCI9111_INT_CTRL_REG;
-
- /* To reset the FIFO, set FFEN sequence as 0 -> 1 -> 0 */
- outb(0, int_ctrl_reg);
- outb(PCI9111_INT_CTRL_FFEN, int_ctrl_reg);
- outb(0, int_ctrl_reg);
-}
-
-static int pci9111_ai_cancel(struct comedi_device *dev,
- struct comedi_subdevice *s)
-{
- struct pci9111_private_data *dev_private = dev->private;
-
- /* Disable interrupts */
- plx9050_interrupt_control(dev_private->lcr_io_base, true, true, true,
- true, false);
-
- /* disable A/D triggers (software trigger mode) and auto scan off */
- outb(0, dev->iobase + PCI9111_AI_TRIG_CTRL_REG);
-
- pci9111_fifo_reset(dev);
-
- return 0;
-}
-
-static int pci9111_ai_check_chanlist(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_cmd *cmd)
-{
- unsigned int range0 = CR_RANGE(cmd->chanlist[0]);
- unsigned int aref0 = CR_AREF(cmd->chanlist[0]);
- int i;
-
- for (i = 1; i < cmd->chanlist_len; i++) {
- unsigned int chan = CR_CHAN(cmd->chanlist[i]);
- unsigned int range = CR_RANGE(cmd->chanlist[i]);
- unsigned int aref = CR_AREF(cmd->chanlist[i]);
-
- if (chan != i) {
- dev_dbg(dev->class_dev,
- "entries in chanlist must be consecutive channels,counting upwards from 0\n");
- return -EINVAL;
- }
-
- if (range != range0) {
- dev_dbg(dev->class_dev,
- "entries in chanlist must all have the same gain\n");
- return -EINVAL;
- }
-
- if (aref != aref0) {
- dev_dbg(dev->class_dev,
- "entries in chanlist must all have the same reference\n");
- return -EINVAL;
- }
- }
-
- return 0;
-}
-
-static int pci9111_ai_do_cmd_test(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_cmd *cmd)
-{
- int err = 0;
- unsigned int arg;
-
- /* Step 1 : check if triggers are trivially valid */
-
- err |= comedi_check_trigger_src(&cmd->start_src, TRIG_NOW);
- err |= comedi_check_trigger_src(&cmd->scan_begin_src,
- TRIG_TIMER | TRIG_FOLLOW | TRIG_EXT);
- err |= comedi_check_trigger_src(&cmd->convert_src,
- TRIG_TIMER | TRIG_EXT);
- err |= comedi_check_trigger_src(&cmd->scan_end_src, TRIG_COUNT);
- err |= comedi_check_trigger_src(&cmd->stop_src,
- TRIG_COUNT | TRIG_NONE);
-
- if (err)
- return 1;
-
- /* Step 2a : make sure trigger sources are unique */
-
- err |= comedi_check_trigger_is_unique(cmd->scan_begin_src);
- err |= comedi_check_trigger_is_unique(cmd->convert_src);
- err |= comedi_check_trigger_is_unique(cmd->stop_src);
-
- /* Step 2b : and mutually compatible */
-
- if (cmd->scan_begin_src != TRIG_FOLLOW) {
- if (cmd->scan_begin_src != cmd->convert_src)
- err |= -EINVAL;
- }
-
- if (err)
- return 2;
-
- /* Step 3: check if arguments are trivially valid */
-
- err |= comedi_check_trigger_arg_is(&cmd->start_arg, 0);
-
- if (cmd->convert_src == TRIG_TIMER) {
- err |= comedi_check_trigger_arg_min(&cmd->convert_arg,
- PCI9111_AI_ACQUISITION_PERIOD_MIN_NS);
- } else { /* TRIG_EXT */
- err |= comedi_check_trigger_arg_is(&cmd->convert_arg, 0);
- }
-
- if (cmd->scan_begin_src == TRIG_TIMER) {
- err |= comedi_check_trigger_arg_min(&cmd->scan_begin_arg,
- PCI9111_AI_ACQUISITION_PERIOD_MIN_NS);
- } else { /* TRIG_FOLLOW || TRIG_EXT */
- err |= comedi_check_trigger_arg_is(&cmd->scan_begin_arg, 0);
- }
-
- err |= comedi_check_trigger_arg_is(&cmd->scan_end_arg,
- cmd->chanlist_len);
-
- if (cmd->stop_src == TRIG_COUNT)
- err |= comedi_check_trigger_arg_min(&cmd->stop_arg, 1);
- else /* TRIG_NONE */
- err |= comedi_check_trigger_arg_is(&cmd->stop_arg, 0);
-
- if (err)
- return 3;
-
- /* Step 4: fix up any arguments */
-
- if (cmd->convert_src == TRIG_TIMER) {
- arg = cmd->convert_arg;
- comedi_8254_cascade_ns_to_timer(dev->pacer, &arg, cmd->flags);
- err |= comedi_check_trigger_arg_is(&cmd->convert_arg, arg);
- }
-
- /*
- * There's only one timer on this card, so the scan_begin timer
- * must be a multiple of chanlist_len*convert_arg
- */
- if (cmd->scan_begin_src == TRIG_TIMER) {
- arg = cmd->chanlist_len * cmd->convert_arg;
-
- if (arg < cmd->scan_begin_arg)
- arg *= (cmd->scan_begin_arg / arg);
-
- err |= comedi_check_trigger_arg_is(&cmd->scan_begin_arg, arg);
- }
-
- if (err)
- return 4;
-
- /* Step 5: check channel list if it exists */
- if (cmd->chanlist && cmd->chanlist_len > 0)
- err |= pci9111_ai_check_chanlist(dev, s, cmd);
-
- if (err)
- return 5;
-
- return 0;
-}
-
-static int pci9111_ai_do_cmd(struct comedi_device *dev,
- struct comedi_subdevice *s)
-{
- struct pci9111_private_data *dev_private = dev->private;
- struct comedi_cmd *cmd = &s->async->cmd;
- unsigned int last_chan = CR_CHAN(cmd->chanlist[cmd->chanlist_len - 1]);
- unsigned int trig = 0;
-
- /* Set channel scan limit */
- /* PCI9111 allows only scanning from channel 0 to channel n */
- /* TODO: handle the case of an external multiplexer */
-
- if (cmd->chanlist_len > 1)
- trig |= PCI9111_AI_TRIG_CTRL_ASCAN;
-
- outb(last_chan, dev->iobase + PCI9111_AI_CHANNEL_REG);
-
- /* Set gain */
- /* This is the same gain on every channel */
-
- outb(CR_RANGE(cmd->chanlist[0]) & PCI9111_AI_RANGE_MASK,
- dev->iobase + PCI9111_AI_RANGE_STAT_REG);
-
- /* Set timer pacer */
- dev_private->scan_delay = 0;
- if (cmd->convert_src == TRIG_TIMER) {
- trig |= PCI9111_AI_TRIG_CTRL_TPST;
- comedi_8254_update_divisors(dev->pacer);
- comedi_8254_pacer_enable(dev->pacer, 1, 2, true);
- pci9111_fifo_reset(dev);
- pci9111_interrupt_source_set(dev, irq_on_fifo_half_full,
- irq_on_timer_tick);
- plx9050_interrupt_control(dev_private->lcr_io_base, true, true,
- false, true, true);
-
- if (cmd->scan_begin_src == TRIG_TIMER) {
- dev_private->scan_delay = (cmd->scan_begin_arg /
- (cmd->convert_arg * cmd->chanlist_len)) - 1;
- }
- } else { /* TRIG_EXT */
- trig |= PCI9111_AI_TRIG_CTRL_ETIS;
- pci9111_fifo_reset(dev);
- pci9111_interrupt_source_set(dev, irq_on_fifo_half_full,
- irq_on_timer_tick);
- plx9050_interrupt_control(dev_private->lcr_io_base, true, true,
- false, true, true);
- }
- outb(trig, dev->iobase + PCI9111_AI_TRIG_CTRL_REG);
-
- dev_private->chunk_counter = 0;
- dev_private->chunk_num_samples = cmd->chanlist_len *
- (1 + dev_private->scan_delay);
-
- return 0;
-}
-
-static void pci9111_ai_munge(struct comedi_device *dev,
- struct comedi_subdevice *s, void *data,
- unsigned int num_bytes,
- unsigned int start_chan_index)
-{
- unsigned short *array = data;
- unsigned int maxdata = s->maxdata;
- unsigned int invert = (maxdata + 1) >> 1;
- unsigned int shift = (maxdata == 0xffff) ? 0 : 4;
- unsigned int num_samples = comedi_bytes_to_samples(s, num_bytes);
- unsigned int i;
-
- for (i = 0; i < num_samples; i++)
- array[i] = ((array[i] >> shift) & maxdata) ^ invert;
-}
-
-static void pci9111_handle_fifo_half_full(struct comedi_device *dev,
- struct comedi_subdevice *s)
-{
- struct pci9111_private_data *devpriv = dev->private;
- struct comedi_cmd *cmd = &s->async->cmd;
- unsigned int samples;
-
- samples = comedi_nsamples_left(s, PCI9111_FIFO_HALF_SIZE);
- insw(dev->iobase + PCI9111_AI_FIFO_REG,
- devpriv->ai_bounce_buffer, samples);
-
- if (devpriv->scan_delay < 1) {
- comedi_buf_write_samples(s, devpriv->ai_bounce_buffer, samples);
- } else {
- unsigned int pos = 0;
- unsigned int to_read;
-
- while (pos < samples) {
- if (devpriv->chunk_counter < cmd->chanlist_len) {
- to_read = cmd->chanlist_len -
- devpriv->chunk_counter;
-
- if (to_read > samples - pos)
- to_read = samples - pos;
-
- comedi_buf_write_samples(s,
- devpriv->ai_bounce_buffer + pos,
- to_read);
- } else {
- to_read = devpriv->chunk_num_samples -
- devpriv->chunk_counter;
-
- if (to_read > samples - pos)
- to_read = samples - pos;
- }
-
- pos += to_read;
- devpriv->chunk_counter += to_read;
-
- if (devpriv->chunk_counter >=
- devpriv->chunk_num_samples)
- devpriv->chunk_counter = 0;
- }
- }
-}
-
-static irqreturn_t pci9111_interrupt(int irq, void *p_device)
-{
- struct comedi_device *dev = p_device;
- struct pci9111_private_data *dev_private = dev->private;
- struct comedi_subdevice *s = dev->read_subdev;
- struct comedi_async *async;
- struct comedi_cmd *cmd;
- unsigned int status;
- unsigned long irq_flags;
- unsigned char intcsr;
-
- if (!dev->attached) {
- /* Ignore interrupt before device fully attached. */
- /* Might not even have allocated subdevices yet! */
- return IRQ_NONE;
- }
-
- async = s->async;
- cmd = &async->cmd;
-
- spin_lock_irqsave(&dev->spinlock, irq_flags);
-
- /* Check if we are source of interrupt */
- intcsr = inb(dev_private->lcr_io_base + PLX9052_INTCSR);
- if (!(((intcsr & PLX9052_INTCSR_PCIENAB) != 0) &&
- (((intcsr & PCI9111_LI1_ACTIVE) == PCI9111_LI1_ACTIVE) ||
- ((intcsr & PCI9111_LI2_ACTIVE) == PCI9111_LI2_ACTIVE)))) {
- /* Not the source of the interrupt. */
- /* (N.B. not using PLX9052_INTCSR_SOFTINT) */
- spin_unlock_irqrestore(&dev->spinlock, irq_flags);
- return IRQ_NONE;
- }
-
- if ((intcsr & PCI9111_LI1_ACTIVE) == PCI9111_LI1_ACTIVE) {
- /* Interrupt comes from fifo_half-full signal */
-
- status = inb(dev->iobase + PCI9111_AI_RANGE_STAT_REG);
-
- /* '0' means FIFO is full, data may have been lost */
- if (!(status & PCI9111_AI_STAT_FF_FF)) {
- spin_unlock_irqrestore(&dev->spinlock, irq_flags);
- dev_dbg(dev->class_dev, "fifo overflow\n");
- outb(0, dev->iobase + PCI9111_INT_CLR_REG);
- async->events |= COMEDI_CB_ERROR;
- comedi_handle_events(dev, s);
-
- return IRQ_HANDLED;
- }
-
- /* '0' means FIFO is half-full */
- if (!(status & PCI9111_AI_STAT_FF_HF))
- pci9111_handle_fifo_half_full(dev, s);
- }
-
- if (cmd->stop_src == TRIG_COUNT && async->scans_done >= cmd->stop_arg)
- async->events |= COMEDI_CB_EOA;
-
- outb(0, dev->iobase + PCI9111_INT_CLR_REG);
-
- spin_unlock_irqrestore(&dev->spinlock, irq_flags);
-
- comedi_handle_events(dev, s);
-
- return IRQ_HANDLED;
-}
-
-static int pci9111_ai_eoc(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned long context)
-{
- unsigned int status;
-
- status = inb(dev->iobase + PCI9111_AI_RANGE_STAT_REG);
- if (status & PCI9111_AI_STAT_FF_EF)
- return 0;
- return -EBUSY;
-}
-
-static int pci9111_ai_insn_read(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
-{
- unsigned int chan = CR_CHAN(insn->chanspec);
- unsigned int range = CR_RANGE(insn->chanspec);
- unsigned int maxdata = s->maxdata;
- unsigned int invert = (maxdata + 1) >> 1;
- unsigned int shift = (maxdata == 0xffff) ? 0 : 4;
- unsigned int status;
- int ret;
- int i;
-
- outb(chan, dev->iobase + PCI9111_AI_CHANNEL_REG);
-
- status = inb(dev->iobase + PCI9111_AI_RANGE_STAT_REG);
- if ((status & PCI9111_AI_RANGE_MASK) != range) {
- outb(range & PCI9111_AI_RANGE_MASK,
- dev->iobase + PCI9111_AI_RANGE_STAT_REG);
- }
-
- pci9111_fifo_reset(dev);
-
- for (i = 0; i < insn->n; i++) {
- /* Generate a software trigger */
- outb(0, dev->iobase + PCI9111_SOFT_TRIG_REG);
-
- ret = comedi_timeout(dev, s, insn, pci9111_ai_eoc, 0);
- if (ret) {
- pci9111_fifo_reset(dev);
- return ret;
- }
-
- data[i] = inw(dev->iobase + PCI9111_AI_FIFO_REG);
- data[i] = ((data[i] >> shift) & maxdata) ^ invert;
- }
-
- return i;
-}
-
-static int pci9111_ao_insn_write(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- unsigned int chan = CR_CHAN(insn->chanspec);
- unsigned int val = s->readback[chan];
- int i;
-
- for (i = 0; i < insn->n; i++) {
- val = data[i];
- outw(val, dev->iobase + PCI9111_AO_REG);
- }
- s->readback[chan] = val;
-
- return insn->n;
-}
-
-static int pci9111_di_insn_bits(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- data[1] = inw(dev->iobase + PCI9111_DIO_REG);
-
- return insn->n;
-}
-
-static int pci9111_do_insn_bits(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- if (comedi_dio_update_state(s, data))
- outw(s->state, dev->iobase + PCI9111_DIO_REG);
-
- data[1] = s->state;
-
- return insn->n;
-}
-
-static int pci9111_reset(struct comedi_device *dev)
-{
- struct pci9111_private_data *dev_private = dev->private;
-
- /* Set trigger source to software */
- plx9050_interrupt_control(dev_private->lcr_io_base, true, true, true,
- true, false);
-
- /* disable A/D triggers (software trigger mode) and auto scan off */
- outb(0, dev->iobase + PCI9111_AI_TRIG_CTRL_REG);
-
- return 0;
-}
-
-static int pci9111_auto_attach(struct comedi_device *dev,
- unsigned long context_unused)
-{
- struct pci_dev *pcidev = comedi_to_pci_dev(dev);
- struct pci9111_private_data *dev_private;
- struct comedi_subdevice *s;
- int ret;
-
- dev_private = comedi_alloc_devpriv(dev, sizeof(*dev_private));
- if (!dev_private)
- return -ENOMEM;
-
- ret = comedi_pci_enable(dev);
- if (ret)
- return ret;
- dev_private->lcr_io_base = pci_resource_start(pcidev, 1);
- dev->iobase = pci_resource_start(pcidev, 2);
-
- pci9111_reset(dev);
-
- if (pcidev->irq) {
- ret = request_irq(pcidev->irq, pci9111_interrupt,
- IRQF_SHARED, dev->board_name, dev);
- if (ret == 0)
- dev->irq = pcidev->irq;
- }
-
- dev->pacer = comedi_8254_init(dev->iobase + PCI9111_8254_BASE_REG,
- I8254_OSC_BASE_2MHZ, I8254_IO16, 0);
- if (!dev->pacer)
- return -ENOMEM;
-
- ret = comedi_alloc_subdevices(dev, 4);
- if (ret)
- return ret;
-
- s = &dev->subdevices[0];
- s->type = COMEDI_SUBD_AI;
- s->subdev_flags = SDF_READABLE | SDF_COMMON;
- s->n_chan = 16;
- s->maxdata = 0xffff;
- s->range_table = &pci9111_ai_range;
- s->insn_read = pci9111_ai_insn_read;
- if (dev->irq) {
- dev->read_subdev = s;
- s->subdev_flags |= SDF_CMD_READ;
- s->len_chanlist = s->n_chan;
- s->do_cmdtest = pci9111_ai_do_cmd_test;
- s->do_cmd = pci9111_ai_do_cmd;
- s->cancel = pci9111_ai_cancel;
- s->munge = pci9111_ai_munge;
- }
-
- s = &dev->subdevices[1];
- s->type = COMEDI_SUBD_AO;
- s->subdev_flags = SDF_WRITABLE | SDF_COMMON;
- s->n_chan = 1;
- s->maxdata = 0x0fff;
- s->len_chanlist = 1;
- s->range_table = &range_bipolar10;
- s->insn_write = pci9111_ao_insn_write;
-
- ret = comedi_alloc_subdev_readback(s);
- if (ret)
- return ret;
-
- s = &dev->subdevices[2];
- s->type = COMEDI_SUBD_DI;
- s->subdev_flags = SDF_READABLE;
- s->n_chan = 16;
- s->maxdata = 1;
- s->range_table = &range_digital;
- s->insn_bits = pci9111_di_insn_bits;
-
- s = &dev->subdevices[3];
- s->type = COMEDI_SUBD_DO;
- s->subdev_flags = SDF_WRITABLE;
- s->n_chan = 16;
- s->maxdata = 1;
- s->range_table = &range_digital;
- s->insn_bits = pci9111_do_insn_bits;
-
- return 0;
-}
-
-static void pci9111_detach(struct comedi_device *dev)
-{
- if (dev->iobase)
- pci9111_reset(dev);
- comedi_pci_detach(dev);
-}
-
-static struct comedi_driver adl_pci9111_driver = {
- .driver_name = "adl_pci9111",
- .module = THIS_MODULE,
- .auto_attach = pci9111_auto_attach,
- .detach = pci9111_detach,
-};
-
-static int pci9111_pci_probe(struct pci_dev *dev,
- const struct pci_device_id *id)
-{
- return comedi_pci_auto_config(dev, &adl_pci9111_driver,
- id->driver_data);
-}
-
-static const struct pci_device_id pci9111_pci_table[] = {
- { PCI_DEVICE(PCI_VENDOR_ID_ADLINK, 0x9111) },
- /* { PCI_DEVICE(PCI_VENDOR_ID_ADLINK, PCI9111_HG_DEVICE_ID) }, */
- { 0 }
-};
-MODULE_DEVICE_TABLE(pci, pci9111_pci_table);
-
-static struct pci_driver adl_pci9111_pci_driver = {
- .name = "adl_pci9111",
- .id_table = pci9111_pci_table,
- .probe = pci9111_pci_probe,
- .remove = comedi_pci_auto_unconfig,
-};
-module_comedi_pci_driver(adl_pci9111_driver, adl_pci9111_pci_driver);
-
-MODULE_AUTHOR("Comedi http://www.comedi.org");
-MODULE_DESCRIPTION("Comedi low-level driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/adl_pci9118.c b/drivers/staging/comedi/drivers/adl_pci9118.c
deleted file mode 100644
index fb3043dcfff1..000000000000
--- a/drivers/staging/comedi/drivers/adl_pci9118.c
+++ /dev/null
@@ -1,1761 +0,0 @@
-/*
- * comedi/drivers/adl_pci9118.c
- *
- * hardware driver for ADLink cards:
- * card: PCI-9118DG, PCI-9118HG, PCI-9118HR
- * driver: pci9118dg, pci9118hg, pci9118hr
- *
- * Author: Michal Dobes <dobes@tesnet.cz>
- *
- */
-
-/*
- * Driver: adl_pci9118
- * Description: Adlink PCI-9118DG, PCI-9118HG, PCI-9118HR
- * Author: Michal Dobes <dobes@tesnet.cz>
- * Devices: [ADLink] PCI-9118DG (pci9118dg), PCI-9118HG (pci9118hg),
- * PCI-9118HR (pci9118hr)
- * Status: works
- *
- * This driver supports AI, AO, DI and DO subdevices.
- * AI subdevice supports cmd and insn interface,
- * other subdevices support only insn interface.
- * For AI:
- * - If cmd->scan_begin_src=TRIG_EXT then trigger input is TGIN (pin 46).
- * - If cmd->convert_src=TRIG_EXT then trigger input is EXTTRG (pin 44).
- * - If cmd->start_src/stop_src=TRIG_EXT then trigger input is TGIN (pin 46).
- * - It is not necessary to have cmd.scan_end_arg=cmd.chanlist_len but
- * cmd.scan_end_arg modulo cmd.chanlist_len must by 0.
- * - If return value of cmdtest is 5 then you've bad channel list
- * (it isn't possible mixture S.E. and DIFF inputs or bipolar and unipolar
- * ranges).
- *
- * There are some hardware limitations:
- * a) You cann't use mixture of unipolar/bipoar ranges or differencial/single
- * ended inputs.
- * b) DMA transfers must have the length aligned to two samples (32 bit),
- * so there is some problems if cmd->chanlist_len is odd. This driver tries
- * bypass this with adding one sample to the end of the every scan and discard
- * it on output but this can't be used if cmd->scan_begin_src=TRIG_FOLLOW
- * and is used flag CMDF_WAKE_EOS, then driver switch to interrupt driven mode
- * with interrupt after every sample.
- * c) If isn't used DMA then you can use only mode where
- * cmd->scan_begin_src=TRIG_FOLLOW.
- *
- * Configuration options:
- * [0] - PCI bus of device (optional)
- * [1] - PCI slot of device (optional)
- * If bus/slot is not specified, then first available PCI
- * card will be used.
- * [2] - 0= standard 8 DIFF/16 SE channels configuration
- * n = external multiplexer connected, 1 <= n <= 256
- * [3] - ignored
- * [4] - sample&hold signal - card can generate signal for external S&H board
- * 0 = use SSHO(pin 45) signal is generated in onboard hardware S&H logic
- * 0 != use ADCHN7(pin 23) signal is generated from driver, number say how
- * long delay is requested in ns and sign polarity of the hold
- * (in this case external multiplexor can serve only 128 channels)
- * [5] - ignored
- */
-
-/*
- * FIXME
- *
- * All the supported boards have the same PCI vendor and device IDs, so
- * auto-attachment of PCI devices will always find the first board type.
- *
- * Perhaps the boards have different subdevice IDs that we could use to
- * distinguish them?
- *
- * Need some device attributes so the board type can be corrected after
- * attachment if necessary, and possibly to set other options supported by
- * manual attachment.
- */
-
-#include <linux/module.h>
-#include <linux/delay.h>
-#include <linux/gfp.h>
-#include <linux/interrupt.h>
-#include <linux/io.h>
-
-#include "../comedi_pci.h"
-
-#include "amcc_s5933.h"
-#include "comedi_8254.h"
-
-#define IORANGE_9118 64 /* I hope */
-#define PCI9118_CHANLEN 255 /*
- * len of chanlist, some source say 256,
- * but reality looks like 255 :-(
- */
-
-/*
- * PCI BAR2 Register map (dev->iobase)
- */
-#define PCI9118_TIMER_BASE 0x00
-#define PCI9118_AI_FIFO_REG 0x10
-#define PCI9118_AO_REG(x) (0x10 + ((x) * 4))
-#define PCI9118_AI_STATUS_REG 0x18
-#define PCI9118_AI_STATUS_NFULL (1 << 8) /* 0=FIFO full (fatal) */
-#define PCI9118_AI_STATUS_NHFULL (1 << 7) /* 0=FIFO half full */
-#define PCI9118_AI_STATUS_NEPTY (1 << 6) /* 0=FIFO empty */
-#define PCI9118_AI_STATUS_ACMP (1 << 5) /* 1=about trigger complete */
-#define PCI9118_AI_STATUS_DTH (1 << 4) /* 1=ext. digital trigger */
-#define PCI9118_AI_STATUS_BOVER (1 << 3) /* 1=burst overrun (fatal) */
-#define PCI9118_AI_STATUS_ADOS (1 << 2) /* 1=A/D over speed (warn) */
-#define PCI9118_AI_STATUS_ADOR (1 << 1) /* 1=A/D overrun (fatal) */
-#define PCI9118_AI_STATUS_ADRDY (1 << 0) /* 1=A/D ready */
-#define PCI9118_AI_CTRL_REG 0x18
-#define PCI9118_AI_CTRL_UNIP (1 << 7) /* 1=unipolar */
-#define PCI9118_AI_CTRL_DIFF (1 << 6) /* 1=differential inputs */
-#define PCI9118_AI_CTRL_SOFTG (1 << 5) /* 1=8254 software gate */
-#define PCI9118_AI_CTRL_EXTG (1 << 4) /* 1=8254 TGIN(pin 46) gate */
-#define PCI9118_AI_CTRL_EXTM (1 << 3) /* 1=ext. trigger (pin 44) */
-#define PCI9118_AI_CTRL_TMRTR (1 << 2) /* 1=8254 is trigger source */
-#define PCI9118_AI_CTRL_INT (1 << 1) /* 1=enable interrupt */
-#define PCI9118_AI_CTRL_DMA (1 << 0) /* 1=enable DMA */
-#define PCI9118_DIO_REG 0x1c
-#define PCI9118_SOFTTRG_REG 0x20
-#define PCI9118_AI_CHANLIST_REG 0x24
-#define PCI9118_AI_CHANLIST_RANGE(x) (((x) & 0x3) << 8)
-#define PCI9118_AI_CHANLIST_CHAN(x) ((x) << 0)
-#define PCI9118_AI_BURST_NUM_REG 0x28
-#define PCI9118_AI_AUTOSCAN_MODE_REG 0x2c
-#define PCI9118_AI_CFG_REG 0x30
-#define PCI9118_AI_CFG_PDTRG (1 << 7) /* 1=positive trigger */
-#define PCI9118_AI_CFG_PETRG (1 << 6) /* 1=positive ext. trigger */
-#define PCI9118_AI_CFG_BSSH (1 << 5) /* 1=with sample & hold */
-#define PCI9118_AI_CFG_BM (1 << 4) /* 1=burst mode */
-#define PCI9118_AI_CFG_BS (1 << 3) /* 1=burst mode start */
-#define PCI9118_AI_CFG_PM (1 << 2) /* 1=post trigger */
-#define PCI9118_AI_CFG_AM (1 << 1) /* 1=about trigger */
-#define PCI9118_AI_CFG_START (1 << 0) /* 1=trigger start */
-#define PCI9118_FIFO_RESET_REG 0x34
-#define PCI9118_INT_CTRL_REG 0x38
-#define PCI9118_INT_CTRL_TIMER (1 << 3) /* timer interrupt */
-#define PCI9118_INT_CTRL_ABOUT (1 << 2) /* about trigger complete */
-#define PCI9118_INT_CTRL_HFULL (1 << 1) /* A/D FIFO half full */
-#define PCI9118_INT_CTRL_DTRG (1 << 0) /* ext. digital trigger */
-
-#define START_AI_EXT 0x01 /* start measure on external trigger */
-#define STOP_AI_EXT 0x02 /* stop measure on external trigger */
-#define STOP_AI_INT 0x08 /* stop measure on internal trigger */
-
-#define PCI9118_HALF_FIFO_SZ (1024 / 2)
-
-static const struct comedi_lrange pci9118_ai_range = {
- 8, {
- BIP_RANGE(5),
- BIP_RANGE(2.5),
- BIP_RANGE(1.25),
- BIP_RANGE(0.625),
- UNI_RANGE(10),
- UNI_RANGE(5),
- UNI_RANGE(2.5),
- UNI_RANGE(1.25)
- }
-};
-
-static const struct comedi_lrange pci9118hg_ai_range = {
- 8, {
- BIP_RANGE(5),
- BIP_RANGE(0.5),
- BIP_RANGE(0.05),
- BIP_RANGE(0.005),
- UNI_RANGE(10),
- UNI_RANGE(1),
- UNI_RANGE(0.1),
- UNI_RANGE(0.01)
- }
-};
-
-#define PCI9118_BIPOLAR_RANGES 4 /*
- * used for test on mixture
- * of BIP/UNI ranges
- */
-
-enum pci9118_boardid {
- BOARD_PCI9118DG,
- BOARD_PCI9118HG,
- BOARD_PCI9118HR,
-};
-
-struct pci9118_boardinfo {
- const char *name;
- unsigned int ai_is_16bit:1;
- unsigned int is_hg:1;
-};
-
-static const struct pci9118_boardinfo pci9118_boards[] = {
- [BOARD_PCI9118DG] = {
- .name = "pci9118dg",
- },
- [BOARD_PCI9118HG] = {
- .name = "pci9118hg",
- .is_hg = 1,
- },
- [BOARD_PCI9118HR] = {
- .name = "pci9118hr",
- .ai_is_16bit = 1,
- },
-};
-
-struct pci9118_dmabuf {
- unsigned short *virt; /* virtual address of buffer */
- dma_addr_t hw; /* hardware (bus) address of buffer */
- unsigned int size; /* size of dma buffer in bytes */
- unsigned int use_size; /* which size we may now use for transfer */
-};
-
-struct pci9118_private {
- unsigned long iobase_a; /* base+size for AMCC chip */
- unsigned int master:1;
- unsigned int dma_doublebuf:1;
- unsigned int ai_neverending:1;
- unsigned int usedma:1;
- unsigned int usemux:1;
- unsigned char ai_ctrl;
- unsigned char int_ctrl;
- unsigned char ai_cfg;
- unsigned int ai_do; /* what do AI? 0=nothing, 1 to 4 mode */
- unsigned int ai_n_realscanlen; /*
- * what we must transfer for one
- * outgoing scan include front/back adds
- */
- unsigned int ai_act_dmapos; /* position in actual real stream */
- unsigned int ai_add_front; /*
- * how many channels we must add
- * before scan to satisfy S&H?
- */
- unsigned int ai_add_back; /*
- * how many channels we must add
- * before scan to satisfy DMA?
- */
- unsigned int ai_flags;
- char ai12_startstop; /*
- * measure can start/stop
- * on external trigger
- */
- unsigned int dma_actbuf; /* which buffer is used now */
- struct pci9118_dmabuf dmabuf[2];
- int softsshdelay; /*
- * >0 use software S&H,
- * numer is requested delay in ns
- */
- unsigned char softsshsample; /*
- * polarity of S&H signal
- * in sample state
- */
- unsigned char softsshhold; /*
- * polarity of S&H signal
- * in hold state
- */
- unsigned int ai_ns_min;
-};
-
-static void pci9118_amcc_setup_dma(struct comedi_device *dev, unsigned int buf)
-{
- struct pci9118_private *devpriv = dev->private;
- struct pci9118_dmabuf *dmabuf = &devpriv->dmabuf[buf];
-
- /* set the master write address and transfer count */
- outl(dmabuf->hw, devpriv->iobase_a + AMCC_OP_REG_MWAR);
- outl(dmabuf->use_size, devpriv->iobase_a + AMCC_OP_REG_MWTC);
-}
-
-static void pci9118_amcc_dma_ena(struct comedi_device *dev, bool enable)
-{
- struct pci9118_private *devpriv = dev->private;
- unsigned int mcsr;
-
- mcsr = inl(devpriv->iobase_a + AMCC_OP_REG_MCSR);
- if (enable)
- mcsr |= RESET_A2P_FLAGS | A2P_HI_PRIORITY | EN_A2P_TRANSFERS;
- else
- mcsr &= ~EN_A2P_TRANSFERS;
- outl(mcsr, devpriv->iobase_a + AMCC_OP_REG_MCSR);
-}
-
-static void pci9118_amcc_int_ena(struct comedi_device *dev, bool enable)
-{
- struct pci9118_private *devpriv = dev->private;
- unsigned int intcsr;
-
- /* enable/disable interrupt for AMCC Incoming Mailbox 4 (32-bit) */
- intcsr = inl(devpriv->iobase_a + AMCC_OP_REG_INTCSR);
- if (enable)
- intcsr |= 0x1f00;
- else
- intcsr &= ~0x1f00;
- outl(intcsr, devpriv->iobase_a + AMCC_OP_REG_INTCSR);
-}
-
-static void pci9118_ai_reset_fifo(struct comedi_device *dev)
-{
- /* writing any value resets the A/D FIFO */
- outl(0, dev->iobase + PCI9118_FIFO_RESET_REG);
-}
-
-static int check_channel_list(struct comedi_device *dev,
- struct comedi_subdevice *s, int n_chan,
- unsigned int *chanlist, int frontadd, int backadd)
-{
- struct pci9118_private *devpriv = dev->private;
- unsigned int i, differencial = 0, bipolar = 0;
-
- /* correct channel and range number check itself comedi/range.c */
- if (n_chan < 1) {
- dev_err(dev->class_dev, "range/channel list is empty!\n");
- return 0;
- }
- if ((frontadd + n_chan + backadd) > s->len_chanlist) {
- dev_err(dev->class_dev,
- "range/channel list is too long for actual configuration!\n");
- return 0;
- }
-
- if (CR_AREF(chanlist[0]) == AREF_DIFF)
- differencial = 1; /* all input must be diff */
- if (CR_RANGE(chanlist[0]) < PCI9118_BIPOLAR_RANGES)
- bipolar = 1; /* all input must be bipolar */
- if (n_chan > 1)
- for (i = 1; i < n_chan; i++) { /* check S.E/diff */
- if ((CR_AREF(chanlist[i]) == AREF_DIFF) !=
- (differencial)) {
- dev_err(dev->class_dev,
- "Differential and single ended inputs can't be mixed!\n");
- return 0;
- }
- if ((CR_RANGE(chanlist[i]) < PCI9118_BIPOLAR_RANGES) !=
- (bipolar)) {
- dev_err(dev->class_dev,
- "Bipolar and unipolar ranges can't be mixed!\n");
- return 0;
- }
- if (!devpriv->usemux && differencial &&
- (CR_CHAN(chanlist[i]) >= (s->n_chan / 2))) {
- dev_err(dev->class_dev,
- "AREF_DIFF is only available for the first 8 channels!\n");
- return 0;
- }
- }
-
- return 1;
-}
-
-static void pci9118_set_chanlist(struct comedi_device *dev,
- struct comedi_subdevice *s,
- int n_chan, unsigned int *chanlist,
- int frontadd, int backadd)
-{
- struct pci9118_private *devpriv = dev->private;
- unsigned int chan0 = CR_CHAN(chanlist[0]);
- unsigned int range0 = CR_RANGE(chanlist[0]);
- unsigned int aref0 = CR_AREF(chanlist[0]);
- unsigned int ssh = 0x00;
- unsigned int val;
- int i;
-
- /*
- * Configure analog input based on the first chanlist entry.
- * All entries are either unipolar or bipolar and single-ended
- * or differential.
- */
- devpriv->ai_ctrl = 0;
- if (comedi_range_is_unipolar(s, range0))
- devpriv->ai_ctrl |= PCI9118_AI_CTRL_UNIP;
- if (aref0 == AREF_DIFF)
- devpriv->ai_ctrl |= PCI9118_AI_CTRL_DIFF;
- outl(devpriv->ai_ctrl, dev->iobase + PCI9118_AI_CTRL_REG);
-
- /* gods know why this sequence! */
- outl(2, dev->iobase + PCI9118_AI_AUTOSCAN_MODE_REG);
- outl(0, dev->iobase + PCI9118_AI_AUTOSCAN_MODE_REG);
- outl(1, dev->iobase + PCI9118_AI_AUTOSCAN_MODE_REG);
-
- /* insert channels for S&H */
- if (frontadd) {
- val = PCI9118_AI_CHANLIST_CHAN(chan0) |
- PCI9118_AI_CHANLIST_RANGE(range0);
- ssh = devpriv->softsshsample;
- for (i = 0; i < frontadd; i++) {
- outl(val | ssh, dev->iobase + PCI9118_AI_CHANLIST_REG);
- ssh = devpriv->softsshhold;
- }
- }
-
- /* store chanlist */
- for (i = 0; i < n_chan; i++) {
- unsigned int chan = CR_CHAN(chanlist[i]);
- unsigned int range = CR_RANGE(chanlist[i]);
-
- val = PCI9118_AI_CHANLIST_CHAN(chan) |
- PCI9118_AI_CHANLIST_RANGE(range);
- outl(val | ssh, dev->iobase + PCI9118_AI_CHANLIST_REG);
- }
-
- /* insert channels to fit onto 32bit DMA */
- if (backadd) {
- val = PCI9118_AI_CHANLIST_CHAN(chan0) |
- PCI9118_AI_CHANLIST_RANGE(range0);
- for (i = 0; i < backadd; i++)
- outl(val | ssh, dev->iobase + PCI9118_AI_CHANLIST_REG);
- }
- /* close scan queue */
- outl(0, dev->iobase + PCI9118_AI_AUTOSCAN_MODE_REG);
- /* udelay(100); important delay, or first sample will be crippled */
-}
-
-static void interrupt_pci9118_ai_mode4_switch(struct comedi_device *dev,
- unsigned int next_buf)
-{
- struct pci9118_private *devpriv = dev->private;
- struct pci9118_dmabuf *dmabuf = &devpriv->dmabuf[next_buf];
-
- devpriv->ai_cfg = PCI9118_AI_CFG_PDTRG | PCI9118_AI_CFG_PETRG |
- PCI9118_AI_CFG_AM;
- outl(devpriv->ai_cfg, dev->iobase + PCI9118_AI_CFG_REG);
- comedi_8254_load(dev->pacer, 0, dmabuf->hw >> 1,
- I8254_MODE0 | I8254_BINARY);
- devpriv->ai_cfg |= PCI9118_AI_CFG_START;
- outl(devpriv->ai_cfg, dev->iobase + PCI9118_AI_CFG_REG);
-}
-
-static unsigned int valid_samples_in_act_dma_buf(struct comedi_device *dev,
- struct comedi_subdevice *s,
- unsigned int n_raw_samples)
-{
- struct pci9118_private *devpriv = dev->private;
- struct comedi_cmd *cmd = &s->async->cmd;
- unsigned int start_pos = devpriv->ai_add_front;
- unsigned int stop_pos = start_pos + cmd->chanlist_len;
- unsigned int span_len = stop_pos + devpriv->ai_add_back;
- unsigned int dma_pos = devpriv->ai_act_dmapos;
- unsigned int whole_spans, n_samples, x;
-
- if (span_len == cmd->chanlist_len)
- return n_raw_samples; /* use all samples */
-
- /*
- * Not all samples are to be used. Buffer contents consist of a
- * possibly non-whole number of spans and a region of each span
- * is to be used.
- *
- * Account for samples in whole number of spans.
- */
- whole_spans = n_raw_samples / span_len;
- n_samples = whole_spans * cmd->chanlist_len;
- n_raw_samples -= whole_spans * span_len;
-
- /*
- * Deal with remaining samples which could overlap up to two spans.
- */
- while (n_raw_samples) {
- if (dma_pos < start_pos) {
- /* Skip samples before start position. */
- x = start_pos - dma_pos;
- if (x > n_raw_samples)
- x = n_raw_samples;
- dma_pos += x;
- n_raw_samples -= x;
- if (!n_raw_samples)
- break;
- }
- if (dma_pos < stop_pos) {
- /* Include samples before stop position. */
- x = stop_pos - dma_pos;
- if (x > n_raw_samples)
- x = n_raw_samples;
- n_samples += x;
- dma_pos += x;
- n_raw_samples -= x;
- }
- /* Advance to next span. */
- start_pos += span_len;
- stop_pos += span_len;
- }
- return n_samples;
-}
-
-static void move_block_from_dma(struct comedi_device *dev,
- struct comedi_subdevice *s,
- unsigned short *dma_buffer,
- unsigned int n_raw_samples)
-{
- struct pci9118_private *devpriv = dev->private;
- struct comedi_cmd *cmd = &s->async->cmd;
- unsigned int start_pos = devpriv->ai_add_front;
- unsigned int stop_pos = start_pos + cmd->chanlist_len;
- unsigned int span_len = stop_pos + devpriv->ai_add_back;
- unsigned int dma_pos = devpriv->ai_act_dmapos;
- unsigned int x;
-
- if (span_len == cmd->chanlist_len) {
- /* All samples are to be copied. */
- comedi_buf_write_samples(s, dma_buffer, n_raw_samples);
- dma_pos += n_raw_samples;
- } else {
- /*
- * Not all samples are to be copied. Buffer contents consist
- * of a possibly non-whole number of spans and a region of
- * each span is to be copied.
- */
- while (n_raw_samples) {
- if (dma_pos < start_pos) {
- /* Skip samples before start position. */
- x = start_pos - dma_pos;
- if (x > n_raw_samples)
- x = n_raw_samples;
- dma_pos += x;
- n_raw_samples -= x;
- if (!n_raw_samples)
- break;
- }
- if (dma_pos < stop_pos) {
- /* Copy samples before stop position. */
- x = stop_pos - dma_pos;
- if (x > n_raw_samples)
- x = n_raw_samples;
- comedi_buf_write_samples(s, dma_buffer, x);
- dma_pos += x;
- n_raw_samples -= x;
- }
- /* Advance to next span. */
- start_pos += span_len;
- stop_pos += span_len;
- }
- }
- /* Update position in span for next time. */
- devpriv->ai_act_dmapos = dma_pos % span_len;
-}
-
-static void pci9118_exttrg_enable(struct comedi_device *dev, bool enable)
-{
- struct pci9118_private *devpriv = dev->private;
-
- if (enable)
- devpriv->int_ctrl |= PCI9118_INT_CTRL_DTRG;
- else
- devpriv->int_ctrl &= ~PCI9118_INT_CTRL_DTRG;
- outl(devpriv->int_ctrl, dev->iobase + PCI9118_INT_CTRL_REG);
-
- if (devpriv->int_ctrl)
- pci9118_amcc_int_ena(dev, true);
- else
- pci9118_amcc_int_ena(dev, false);
-}
-
-static void pci9118_calc_divisors(struct comedi_device *dev,
- struct comedi_subdevice *s,
- unsigned int *tim1, unsigned int *tim2,
- unsigned int flags, int chans,
- unsigned int *div1, unsigned int *div2,
- unsigned int chnsshfront)
-{
- struct comedi_8254 *pacer = dev->pacer;
- struct comedi_cmd *cmd = &s->async->cmd;
-
- *div1 = *tim2 / pacer->osc_base; /* convert timer (burst) */
- *div2 = *tim1 / pacer->osc_base; /* scan timer */
- *div2 = *div2 / *div1; /* major timer is c1*c2 */
- if (*div2 < chans)
- *div2 = chans;
-
- *tim2 = *div1 * pacer->osc_base; /* real convert timer */
-
- if (cmd->convert_src == TRIG_NOW && !chnsshfront) {
- /* use BSSH signal */
- if (*div2 < (chans + 2))
- *div2 = chans + 2;
- }
-
- *tim1 = *div1 * *div2 * pacer->osc_base;
-}
-
-static void pci9118_start_pacer(struct comedi_device *dev, int mode)
-{
- if (mode == 1 || mode == 2 || mode == 4)
- comedi_8254_pacer_enable(dev->pacer, 1, 2, true);
-}
-
-static int pci9118_ai_cancel(struct comedi_device *dev,
- struct comedi_subdevice *s)
-{
- struct pci9118_private *devpriv = dev->private;
-
- if (devpriv->usedma)
- pci9118_amcc_dma_ena(dev, false);
- pci9118_exttrg_enable(dev, false);
- comedi_8254_pacer_enable(dev->pacer, 1, 2, false);
- /* set default config (disable burst and triggers) */
- devpriv->ai_cfg = PCI9118_AI_CFG_PDTRG | PCI9118_AI_CFG_PETRG;
- outl(devpriv->ai_cfg, dev->iobase + PCI9118_AI_CFG_REG);
- /* reset acqusition control */
- devpriv->ai_ctrl = 0;
- outl(devpriv->ai_ctrl, dev->iobase + PCI9118_AI_CTRL_REG);
- outl(0, dev->iobase + PCI9118_AI_BURST_NUM_REG);
- /* reset scan queue */
- outl(1, dev->iobase + PCI9118_AI_AUTOSCAN_MODE_REG);
- outl(2, dev->iobase + PCI9118_AI_AUTOSCAN_MODE_REG);
- pci9118_ai_reset_fifo(dev);
-
- devpriv->int_ctrl = 0;
- outl(devpriv->int_ctrl, dev->iobase + PCI9118_INT_CTRL_REG);
- pci9118_amcc_int_ena(dev, false);
-
- devpriv->ai_do = 0;
- devpriv->usedma = 0;
-
- devpriv->ai_act_dmapos = 0;
- s->async->inttrig = NULL;
- devpriv->ai_neverending = 0;
- devpriv->dma_actbuf = 0;
-
- return 0;
-}
-
-static void pci9118_ai_munge(struct comedi_device *dev,
- struct comedi_subdevice *s, void *data,
- unsigned int num_bytes,
- unsigned int start_chan_index)
-{
- struct pci9118_private *devpriv = dev->private;
- unsigned short *array = data;
- unsigned int num_samples = comedi_bytes_to_samples(s, num_bytes);
- unsigned int i;
-
- for (i = 0; i < num_samples; i++) {
- if (devpriv->usedma)
- array[i] = be16_to_cpu(array[i]);
- if (s->maxdata == 0xffff)
- array[i] ^= 0x8000;
- else
- array[i] = (array[i] >> 4) & 0x0fff;
- }
-}
-
-static void interrupt_pci9118_ai_onesample(struct comedi_device *dev,
- struct comedi_subdevice *s)
-{
- struct pci9118_private *devpriv = dev->private;
- struct comedi_cmd *cmd = &s->async->cmd;
- unsigned short sampl;
-
- sampl = inl(dev->iobase + PCI9118_AI_FIFO_REG);
-
- comedi_buf_write_samples(s, &sampl, 1);
-
- if (!devpriv->ai_neverending) {
- if (s->async->scans_done >= cmd->stop_arg)
- s->async->events |= COMEDI_CB_EOA;
- }
-}
-
-static void interrupt_pci9118_ai_dma(struct comedi_device *dev,
- struct comedi_subdevice *s)
-{
- struct pci9118_private *devpriv = dev->private;
- struct comedi_cmd *cmd = &s->async->cmd;
- struct pci9118_dmabuf *dmabuf = &devpriv->dmabuf[devpriv->dma_actbuf];
- unsigned int n_all = comedi_bytes_to_samples(s, dmabuf->use_size);
- unsigned int n_valid;
- bool more_dma;
-
- /* determine whether more DMA buffers to do after this one */
- n_valid = valid_samples_in_act_dma_buf(dev, s, n_all);
- more_dma = n_valid < comedi_nsamples_left(s, n_valid + 1);
-
- /* switch DMA buffers and restart DMA if double buffering */
- if (more_dma && devpriv->dma_doublebuf) {
- devpriv->dma_actbuf = 1 - devpriv->dma_actbuf;
- pci9118_amcc_setup_dma(dev, devpriv->dma_actbuf);
- if (devpriv->ai_do == 4) {
- interrupt_pci9118_ai_mode4_switch(dev,
- devpriv->dma_actbuf);
- }
- }
-
- if (n_all)
- move_block_from_dma(dev, s, dmabuf->virt, n_all);
-
- if (!devpriv->ai_neverending) {
- if (s->async->scans_done >= cmd->stop_arg)
- s->async->events |= COMEDI_CB_EOA;
- }
-
- if (s->async->events & COMEDI_CB_CANCEL_MASK)
- more_dma = false;
-
- /* restart DMA if not double buffering */
- if (more_dma && !devpriv->dma_doublebuf) {
- pci9118_amcc_setup_dma(dev, 0);
- if (devpriv->ai_do == 4)
- interrupt_pci9118_ai_mode4_switch(dev, 0);
- }
-}
-
-static irqreturn_t pci9118_interrupt(int irq, void *d)
-{
- struct comedi_device *dev = d;
- struct comedi_subdevice *s = dev->read_subdev;
- struct pci9118_private *devpriv = dev->private;
- unsigned int intsrc; /* IRQ reasons from card */
- unsigned int intcsr; /* INT register from AMCC chip */
- unsigned int adstat; /* STATUS register */
-
- if (!dev->attached)
- return IRQ_NONE;
-
- intsrc = inl(dev->iobase + PCI9118_INT_CTRL_REG) & 0xf;
- intcsr = inl(devpriv->iobase_a + AMCC_OP_REG_INTCSR);
-
- if (!intsrc && !(intcsr & ANY_S593X_INT))
- return IRQ_NONE;
-
- outl(intcsr | 0x00ff0000, devpriv->iobase_a + AMCC_OP_REG_INTCSR);
-
- if (intcsr & MASTER_ABORT_INT) {
- dev_err(dev->class_dev, "AMCC IRQ - MASTER DMA ABORT!\n");
- s->async->events |= COMEDI_CB_ERROR;
- goto interrupt_exit;
- }
-
- if (intcsr & TARGET_ABORT_INT) {
- dev_err(dev->class_dev, "AMCC IRQ - TARGET DMA ABORT!\n");
- s->async->events |= COMEDI_CB_ERROR;
- goto interrupt_exit;
- }
-
- adstat = inl(dev->iobase + PCI9118_AI_STATUS_REG);
- if ((adstat & PCI9118_AI_STATUS_NFULL) == 0) {
- dev_err(dev->class_dev,
- "A/D FIFO Full status (Fatal Error!)\n");
- s->async->events |= COMEDI_CB_ERROR | COMEDI_CB_OVERFLOW;
- goto interrupt_exit;
- }
- if (adstat & PCI9118_AI_STATUS_BOVER) {
- dev_err(dev->class_dev,
- "A/D Burst Mode Overrun Status (Fatal Error!)\n");
- s->async->events |= COMEDI_CB_ERROR | COMEDI_CB_OVERFLOW;
- goto interrupt_exit;
- }
- if (adstat & PCI9118_AI_STATUS_ADOS) {
- dev_err(dev->class_dev, "A/D Over Speed Status (Warning!)\n");
- s->async->events |= COMEDI_CB_ERROR;
- goto interrupt_exit;
- }
- if (adstat & PCI9118_AI_STATUS_ADOR) {
- dev_err(dev->class_dev, "A/D Overrun Status (Fatal Error!)\n");
- s->async->events |= COMEDI_CB_ERROR | COMEDI_CB_OVERFLOW;
- goto interrupt_exit;
- }
-
- if (!devpriv->ai_do)
- return IRQ_HANDLED;
-
- if (devpriv->ai12_startstop) {
- if ((adstat & PCI9118_AI_STATUS_DTH) &&
- (intsrc & PCI9118_INT_CTRL_DTRG)) {
- /* start/stop of measure */
- if (devpriv->ai12_startstop & START_AI_EXT) {
- /* deactivate EXT trigger */
- devpriv->ai12_startstop &= ~START_AI_EXT;
- if (!(devpriv->ai12_startstop & STOP_AI_EXT))
- pci9118_exttrg_enable(dev, false);
-
- /* start pacer */
- pci9118_start_pacer(dev, devpriv->ai_do);
- outl(devpriv->ai_ctrl,
- dev->iobase + PCI9118_AI_CTRL_REG);
- } else if (devpriv->ai12_startstop & STOP_AI_EXT) {
- /* deactivate EXT trigger */
- devpriv->ai12_startstop &= ~STOP_AI_EXT;
- pci9118_exttrg_enable(dev, false);
-
- /* on next interrupt measure will stop */
- devpriv->ai_neverending = 0;
- }
- }
- }
-
- if (devpriv->usedma)
- interrupt_pci9118_ai_dma(dev, s);
- else
- interrupt_pci9118_ai_onesample(dev, s);
-
-interrupt_exit:
- comedi_handle_events(dev, s);
- return IRQ_HANDLED;
-}
-
-static void pci9118_ai_cmd_start(struct comedi_device *dev)
-{
- struct pci9118_private *devpriv = dev->private;
-
- outl(devpriv->int_ctrl, dev->iobase + PCI9118_INT_CTRL_REG);
- outl(devpriv->ai_cfg, dev->iobase + PCI9118_AI_CFG_REG);
- if (devpriv->ai_do != 3) {
- pci9118_start_pacer(dev, devpriv->ai_do);
- devpriv->ai_ctrl |= PCI9118_AI_CTRL_SOFTG;
- }
- outl(devpriv->ai_ctrl, dev->iobase + PCI9118_AI_CTRL_REG);
-}
-
-static int pci9118_ai_inttrig(struct comedi_device *dev,
- struct comedi_subdevice *s,
- unsigned int trig_num)
-{
- struct comedi_cmd *cmd = &s->async->cmd;
-
- if (trig_num != cmd->start_arg)
- return -EINVAL;
-
- s->async->inttrig = NULL;
- pci9118_ai_cmd_start(dev);
-
- return 1;
-}
-
-static int Compute_and_setup_dma(struct comedi_device *dev,
- struct comedi_subdevice *s)
-{
- struct pci9118_private *devpriv = dev->private;
- struct comedi_cmd *cmd = &s->async->cmd;
- struct pci9118_dmabuf *dmabuf0 = &devpriv->dmabuf[0];
- struct pci9118_dmabuf *dmabuf1 = &devpriv->dmabuf[1];
- unsigned int dmalen0, dmalen1, i;
-
- dmalen0 = dmabuf0->size;
- dmalen1 = dmabuf1->size;
- /* isn't output buff smaller that our DMA buff? */
- if (dmalen0 > s->async->prealloc_bufsz) {
- /* align to 32bit down */
- dmalen0 = s->async->prealloc_bufsz & ~3L;
- }
- if (dmalen1 > s->async->prealloc_bufsz) {
- /* align to 32bit down */
- dmalen1 = s->async->prealloc_bufsz & ~3L;
- }
-
- /* we want wake up every scan? */
- if (devpriv->ai_flags & CMDF_WAKE_EOS) {
- if (dmalen0 < (devpriv->ai_n_realscanlen << 1)) {
- /* uff, too short DMA buffer, disable EOS support! */
- devpriv->ai_flags &= (~CMDF_WAKE_EOS);
- dev_info(dev->class_dev,
- "WAR: DMA0 buf too short, can't support CMDF_WAKE_EOS (%d<%d)\n",
- dmalen0, devpriv->ai_n_realscanlen << 1);
- } else {
- /* short first DMA buffer to one scan */
- dmalen0 = devpriv->ai_n_realscanlen << 1;
- if (dmalen0 < 4) {
- dev_info(dev->class_dev,
- "ERR: DMA0 buf len bug? (%d<4)\n",
- dmalen0);
- dmalen0 = 4;
- }
- }
- }
- if (devpriv->ai_flags & CMDF_WAKE_EOS) {
- if (dmalen1 < (devpriv->ai_n_realscanlen << 1)) {
- /* uff, too short DMA buffer, disable EOS support! */
- devpriv->ai_flags &= (~CMDF_WAKE_EOS);
- dev_info(dev->class_dev,
- "WAR: DMA1 buf too short, can't support CMDF_WAKE_EOS (%d<%d)\n",
- dmalen1, devpriv->ai_n_realscanlen << 1);
- } else {
- /* short second DMA buffer to one scan */
- dmalen1 = devpriv->ai_n_realscanlen << 1;
- if (dmalen1 < 4) {
- dev_info(dev->class_dev,
- "ERR: DMA1 buf len bug? (%d<4)\n",
- dmalen1);
- dmalen1 = 4;
- }
- }
- }
-
- /* transfer without CMDF_WAKE_EOS */
- if (!(devpriv->ai_flags & CMDF_WAKE_EOS)) {
- /* if it's possible then align DMA buffers to length of scan */
- i = dmalen0;
- dmalen0 =
- (dmalen0 / (devpriv->ai_n_realscanlen << 1)) *
- (devpriv->ai_n_realscanlen << 1);
- dmalen0 &= ~3L;
- if (!dmalen0)
- dmalen0 = i; /* uff. very long scan? */
- i = dmalen1;
- dmalen1 =
- (dmalen1 / (devpriv->ai_n_realscanlen << 1)) *
- (devpriv->ai_n_realscanlen << 1);
- dmalen1 &= ~3L;
- if (!dmalen1)
- dmalen1 = i; /* uff. very long scan? */
- /*
- * if measure isn't neverending then test, if it fits whole
- * into one or two DMA buffers
- */
- if (!devpriv->ai_neverending) {
- /* fits whole measure into one DMA buffer? */
- if (dmalen0 >
- ((devpriv->ai_n_realscanlen << 1) *
- cmd->stop_arg)) {
- dmalen0 =
- (devpriv->ai_n_realscanlen << 1) *
- cmd->stop_arg;
- dmalen0 &= ~3L;
- } else { /*
- * fits whole measure into
- * two DMA buffer?
- */
- if (dmalen1 >
- ((devpriv->ai_n_realscanlen << 1) *
- cmd->stop_arg - dmalen0))
- dmalen1 =
- (devpriv->ai_n_realscanlen << 1) *
- cmd->stop_arg - dmalen0;
- dmalen1 &= ~3L;
- }
- }
- }
-
- /* these DMA buffer size will be used */
- devpriv->dma_actbuf = 0;
- dmabuf0->use_size = dmalen0;
- dmabuf1->use_size = dmalen1;
-
- pci9118_amcc_dma_ena(dev, false);
- pci9118_amcc_setup_dma(dev, 0);
- /* init DMA transfer */
- outl(0x00000000 | AINT_WRITE_COMPL,
- devpriv->iobase_a + AMCC_OP_REG_INTCSR);
-/* outl(0x02000000|AINT_WRITE_COMPL, devpriv->iobase_a+AMCC_OP_REG_INTCSR); */
- pci9118_amcc_dma_ena(dev, true);
- outl(inl(devpriv->iobase_a + AMCC_OP_REG_INTCSR) | EN_A2P_TRANSFERS,
- devpriv->iobase_a + AMCC_OP_REG_INTCSR);
- /* allow bus mastering */
-
- return 0;
-}
-
-static int pci9118_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
-{
- struct pci9118_private *devpriv = dev->private;
- struct comedi_8254 *pacer = dev->pacer;
- struct comedi_cmd *cmd = &s->async->cmd;
- unsigned int addchans = 0;
-
- devpriv->ai12_startstop = 0;
- devpriv->ai_flags = cmd->flags;
- devpriv->ai_add_front = 0;
- devpriv->ai_add_back = 0;
-
- /* prepare for start/stop conditions */
- if (cmd->start_src == TRIG_EXT)
- devpriv->ai12_startstop |= START_AI_EXT;
- if (cmd->stop_src == TRIG_EXT) {
- devpriv->ai_neverending = 1;
- devpriv->ai12_startstop |= STOP_AI_EXT;
- }
- if (cmd->stop_src == TRIG_NONE)
- devpriv->ai_neverending = 1;
- if (cmd->stop_src == TRIG_COUNT)
- devpriv->ai_neverending = 0;
-
- /*
- * use additional sample at end of every scan
- * to satisty DMA 32 bit transfer?
- */
- devpriv->ai_add_front = 0;
- devpriv->ai_add_back = 0;
- if (devpriv->master) {
- devpriv->usedma = 1;
- if ((cmd->flags & CMDF_WAKE_EOS) &&
- (cmd->scan_end_arg == 1)) {
- if (cmd->convert_src == TRIG_NOW)
- devpriv->ai_add_back = 1;
- if (cmd->convert_src == TRIG_TIMER) {
- devpriv->usedma = 0;
- /*
- * use INT transfer if scanlist
- * have only one channel
- */
- }
- }
- if ((cmd->flags & CMDF_WAKE_EOS) &&
- (cmd->scan_end_arg & 1) &&
- (cmd->scan_end_arg > 1)) {
- if (cmd->scan_begin_src == TRIG_FOLLOW) {
- devpriv->usedma = 0;
- /*
- * XXX maybe can be corrected to use 16 bit DMA
- */
- } else { /*
- * well, we must insert one sample
- * to end of EOS to meet 32 bit transfer
- */
- devpriv->ai_add_back = 1;
- }
- }
- } else { /* interrupt transfer don't need any correction */
- devpriv->usedma = 0;
- }
-
- /*
- * we need software S&H signal?
- * It adds two samples before every scan as minimum
- */
- if (cmd->convert_src == TRIG_NOW && devpriv->softsshdelay) {
- devpriv->ai_add_front = 2;
- if ((devpriv->usedma == 1) && (devpriv->ai_add_back == 1)) {
- /* move it to front */
- devpriv->ai_add_front++;
- devpriv->ai_add_back = 0;
- }
- if (cmd->convert_arg < devpriv->ai_ns_min)
- cmd->convert_arg = devpriv->ai_ns_min;
- addchans = devpriv->softsshdelay / cmd->convert_arg;
- if (devpriv->softsshdelay % cmd->convert_arg)
- addchans++;
- if (addchans > (devpriv->ai_add_front - 1)) {
- /* uff, still short */
- devpriv->ai_add_front = addchans + 1;
- if (devpriv->usedma == 1)
- if ((devpriv->ai_add_front +
- cmd->chanlist_len +
- devpriv->ai_add_back) & 1)
- devpriv->ai_add_front++;
- /* round up to 32 bit */
- }
- }
- /* well, we now know what must be all added */
- devpriv->ai_n_realscanlen = /*
- * what we must take from card in real
- * to have cmd->scan_end_arg on output?
- */
- (devpriv->ai_add_front + cmd->chanlist_len +
- devpriv->ai_add_back) * (cmd->scan_end_arg /
- cmd->chanlist_len);
-
- /* check and setup channel list */
- if (!check_channel_list(dev, s, cmd->chanlist_len,
- cmd->chanlist, devpriv->ai_add_front,
- devpriv->ai_add_back))
- return -EINVAL;
-
- /*
- * Configure analog input and load the chanlist.
- * The acqusition control bits are enabled later.
- */
- pci9118_set_chanlist(dev, s, cmd->chanlist_len, cmd->chanlist,
- devpriv->ai_add_front, devpriv->ai_add_back);
-
- /* Determine acqusition mode and calculate timing */
- devpriv->ai_do = 0;
- if (cmd->scan_begin_src != TRIG_TIMER &&
- cmd->convert_src == TRIG_TIMER) {
- /* cascaded timers 1 and 2 are used for convert timing */
- if (cmd->scan_begin_src == TRIG_EXT)
- devpriv->ai_do = 4;
- else
- devpriv->ai_do = 1;
-
- comedi_8254_cascade_ns_to_timer(pacer, &cmd->convert_arg,
- devpriv->ai_flags &
- CMDF_ROUND_NEAREST);
- comedi_8254_update_divisors(pacer);
-
- devpriv->ai_ctrl |= PCI9118_AI_CTRL_TMRTR;
-
- if (!devpriv->usedma) {
- devpriv->ai_ctrl |= PCI9118_AI_CTRL_INT;
- devpriv->int_ctrl |= PCI9118_INT_CTRL_TIMER;
- }
-
- if (cmd->scan_begin_src == TRIG_EXT) {
- struct pci9118_dmabuf *dmabuf = &devpriv->dmabuf[0];
-
- devpriv->ai_cfg |= PCI9118_AI_CFG_AM;
- outl(devpriv->ai_cfg, dev->iobase + PCI9118_AI_CFG_REG);
- comedi_8254_load(pacer, 0, dmabuf->hw >> 1,
- I8254_MODE0 | I8254_BINARY);
- devpriv->ai_cfg |= PCI9118_AI_CFG_START;
- }
- }
-
- if (cmd->scan_begin_src == TRIG_TIMER &&
- cmd->convert_src != TRIG_EXT) {
- if (!devpriv->usedma) {
- dev_err(dev->class_dev,
- "cmd->scan_begin_src=TRIG_TIMER works only with bus mastering!\n");
- return -EIO;
- }
-
- /* double timed action */
- devpriv->ai_do = 2;
-
- pci9118_calc_divisors(dev, s,
- &cmd->scan_begin_arg, &cmd->convert_arg,
- devpriv->ai_flags,
- devpriv->ai_n_realscanlen,
- &pacer->divisor1,
- &pacer->divisor2,
- devpriv->ai_add_front);
-
- devpriv->ai_ctrl |= PCI9118_AI_CTRL_TMRTR;
- devpriv->ai_cfg |= PCI9118_AI_CFG_BM | PCI9118_AI_CFG_BS;
- if (cmd->convert_src == TRIG_NOW && !devpriv->softsshdelay)
- devpriv->ai_cfg |= PCI9118_AI_CFG_BSSH;
- outl(devpriv->ai_n_realscanlen,
- dev->iobase + PCI9118_AI_BURST_NUM_REG);
- }
-
- if (cmd->scan_begin_src == TRIG_FOLLOW &&
- cmd->convert_src == TRIG_EXT) {
- /* external trigger conversion */
- devpriv->ai_do = 3;
-
- devpriv->ai_ctrl |= PCI9118_AI_CTRL_EXTM;
- }
-
- if (devpriv->ai_do == 0) {
- dev_err(dev->class_dev,
- "Unable to determine acqusition mode! BUG in (*do_cmdtest)?\n");
- return -EINVAL;
- }
-
- if (devpriv->usedma)
- devpriv->ai_ctrl |= PCI9118_AI_CTRL_DMA;
-
- /* set default config (disable burst and triggers) */
- devpriv->ai_cfg = PCI9118_AI_CFG_PDTRG | PCI9118_AI_CFG_PETRG;
- outl(devpriv->ai_cfg, dev->iobase + PCI9118_AI_CFG_REG);
- udelay(1);
- pci9118_ai_reset_fifo(dev);
-
- /* clear A/D and INT status registers */
- inl(dev->iobase + PCI9118_AI_STATUS_REG);
- inl(dev->iobase + PCI9118_INT_CTRL_REG);
-
- devpriv->ai_act_dmapos = 0;
-
- if (devpriv->usedma) {
- Compute_and_setup_dma(dev, s);
-
- outl(0x02000000 | AINT_WRITE_COMPL,
- devpriv->iobase_a + AMCC_OP_REG_INTCSR);
- } else {
- pci9118_amcc_int_ena(dev, true);
- }
-
- /* start async command now or wait for internal trigger */
- if (cmd->start_src == TRIG_NOW)
- pci9118_ai_cmd_start(dev);
- else if (cmd->start_src == TRIG_INT)
- s->async->inttrig = pci9118_ai_inttrig;
-
- /* enable external trigger for command start/stop */
- if (cmd->start_src == TRIG_EXT || cmd->stop_src == TRIG_EXT)
- pci9118_exttrg_enable(dev, true);
-
- return 0;
-}
-
-static int pci9118_ai_cmdtest(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_cmd *cmd)
-{
- struct pci9118_private *devpriv = dev->private;
- int err = 0;
- unsigned int flags;
- unsigned int arg;
-
- /* Step 1 : check if triggers are trivially valid */
-
- err |= comedi_check_trigger_src(&cmd->start_src,
- TRIG_NOW | TRIG_EXT | TRIG_INT);
-
- flags = TRIG_FOLLOW;
- if (devpriv->master)
- flags |= TRIG_TIMER | TRIG_EXT;
- err |= comedi_check_trigger_src(&cmd->scan_begin_src, flags);
-
- flags = TRIG_TIMER | TRIG_EXT;
- if (devpriv->master)
- flags |= TRIG_NOW;
- err |= comedi_check_trigger_src(&cmd->convert_src, flags);
-
- err |= comedi_check_trigger_src(&cmd->scan_end_src, TRIG_COUNT);
- err |= comedi_check_trigger_src(&cmd->stop_src,
- TRIG_COUNT | TRIG_NONE | TRIG_EXT);
-
- if (err)
- return 1;
-
- /* Step 2a : make sure trigger sources are unique */
-
- err |= comedi_check_trigger_is_unique(cmd->start_src);
- err |= comedi_check_trigger_is_unique(cmd->scan_begin_src);
- err |= comedi_check_trigger_is_unique(cmd->convert_src);
- err |= comedi_check_trigger_is_unique(cmd->stop_src);
-
- /* Step 2b : and mutually compatible */
-
- if (cmd->start_src == TRIG_EXT && cmd->scan_begin_src == TRIG_EXT)
- err |= -EINVAL;
-
- if (cmd->start_src == TRIG_INT && cmd->scan_begin_src == TRIG_INT)
- err |= -EINVAL;
-
- if ((cmd->scan_begin_src & (TRIG_TIMER | TRIG_EXT)) &&
- (!(cmd->convert_src & (TRIG_TIMER | TRIG_NOW))))
- err |= -EINVAL;
-
- if ((cmd->scan_begin_src == TRIG_FOLLOW) &&
- (!(cmd->convert_src & (TRIG_TIMER | TRIG_EXT))))
- err |= -EINVAL;
-
- if (cmd->stop_src == TRIG_EXT && cmd->scan_begin_src == TRIG_EXT)
- err |= -EINVAL;
-
- if (err)
- return 2;
-
- /* Step 3: check if arguments are trivially valid */
-
- switch (cmd->start_src) {
- case TRIG_NOW:
- case TRIG_EXT:
- err |= comedi_check_trigger_arg_is(&cmd->start_arg, 0);
- break;
- case TRIG_INT:
- /* start_arg is the internal trigger (any value) */
- break;
- }
-
- if (cmd->scan_begin_src & (TRIG_FOLLOW | TRIG_EXT))
- err |= comedi_check_trigger_arg_is(&cmd->scan_begin_arg, 0);
-
- if ((cmd->scan_begin_src == TRIG_TIMER) &&
- (cmd->convert_src == TRIG_TIMER) && (cmd->scan_end_arg == 1)) {
- cmd->scan_begin_src = TRIG_FOLLOW;
- cmd->convert_arg = cmd->scan_begin_arg;
- cmd->scan_begin_arg = 0;
- }
-
- if (cmd->scan_begin_src == TRIG_TIMER) {
- err |= comedi_check_trigger_arg_min(&cmd->scan_begin_arg,
- devpriv->ai_ns_min);
- }
-
- if (cmd->scan_begin_src == TRIG_EXT) {
- if (cmd->scan_begin_arg) {
- cmd->scan_begin_arg = 0;
- err |= -EINVAL;
- err |= comedi_check_trigger_arg_max(&cmd->scan_end_arg,
- 65535);
- }
- }
-
- if (cmd->convert_src & (TRIG_TIMER | TRIG_NOW)) {
- err |= comedi_check_trigger_arg_min(&cmd->convert_arg,
- devpriv->ai_ns_min);
- }
-
- if (cmd->convert_src == TRIG_EXT)
- err |= comedi_check_trigger_arg_is(&cmd->convert_arg, 0);
-
- if (cmd->stop_src == TRIG_COUNT)
- err |= comedi_check_trigger_arg_min(&cmd->stop_arg, 1);
- else /* TRIG_NONE */
- err |= comedi_check_trigger_arg_is(&cmd->stop_arg, 0);
-
- err |= comedi_check_trigger_arg_min(&cmd->chanlist_len, 1);
-
- err |= comedi_check_trigger_arg_min(&cmd->scan_end_arg,
- cmd->chanlist_len);
-
- if ((cmd->scan_end_arg % cmd->chanlist_len)) {
- cmd->scan_end_arg =
- cmd->chanlist_len * (cmd->scan_end_arg / cmd->chanlist_len);
- err |= -EINVAL;
- }
-
- if (err)
- return 3;
-
- /* step 4: fix up any arguments */
-
- if (cmd->scan_begin_src == TRIG_TIMER) {
- arg = cmd->scan_begin_arg;
- comedi_8254_cascade_ns_to_timer(dev->pacer, &arg, cmd->flags);
- err |= comedi_check_trigger_arg_is(&cmd->scan_begin_arg, arg);
- }
-
- if (cmd->convert_src & (TRIG_TIMER | TRIG_NOW)) {
- arg = cmd->convert_arg;
- comedi_8254_cascade_ns_to_timer(dev->pacer, &arg, cmd->flags);
- err |= comedi_check_trigger_arg_is(&cmd->convert_arg, arg);
-
- if (cmd->scan_begin_src == TRIG_TIMER &&
- cmd->convert_src == TRIG_NOW) {
- if (cmd->convert_arg == 0) {
- arg = devpriv->ai_ns_min *
- (cmd->scan_end_arg + 2);
- } else {
- arg = cmd->convert_arg * cmd->chanlist_len;
- }
- err |= comedi_check_trigger_arg_min(&cmd->
- scan_begin_arg,
- arg);
- }
- }
-
- if (err)
- return 4;
-
- if (cmd->chanlist)
- if (!check_channel_list(dev, s, cmd->chanlist_len,
- cmd->chanlist, 0, 0))
- return 5; /* incorrect channels list */
-
- return 0;
-}
-
-static int pci9118_ai_eoc(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned long context)
-{
- unsigned int status;
-
- status = inl(dev->iobase + PCI9118_AI_STATUS_REG);
- if (status & PCI9118_AI_STATUS_ADRDY)
- return 0;
- return -EBUSY;
-}
-
-static void pci9118_ai_start_conv(struct comedi_device *dev)
-{
- /* writing any value triggers an A/D conversion */
- outl(0, dev->iobase + PCI9118_SOFTTRG_REG);
-}
-
-static int pci9118_ai_insn_read(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct pci9118_private *devpriv = dev->private;
- unsigned int val;
- int ret;
- int i;
-
- /*
- * Configure analog input based on the chanspec.
- * Acqusition is software controlled without interrupts.
- */
- pci9118_set_chanlist(dev, s, 1, &insn->chanspec, 0, 0);
-
- /* set default config (disable burst and triggers) */
- devpriv->ai_cfg = PCI9118_AI_CFG_PDTRG | PCI9118_AI_CFG_PETRG;
- outl(devpriv->ai_cfg, dev->iobase + PCI9118_AI_CFG_REG);
-
- pci9118_ai_reset_fifo(dev);
-
- for (i = 0; i < insn->n; i++) {
- pci9118_ai_start_conv(dev);
-
- ret = comedi_timeout(dev, s, insn, pci9118_ai_eoc, 0);
- if (ret)
- return ret;
-
- val = inl(dev->iobase + PCI9118_AI_FIFO_REG);
- if (s->maxdata == 0xffff)
- data[i] = (val & 0xffff) ^ 0x8000;
- else
- data[i] = (val >> 4) & 0xfff;
- }
-
- return insn->n;
-}
-
-static int pci9118_ao_insn_write(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- unsigned int chan = CR_CHAN(insn->chanspec);
- unsigned int val = s->readback[chan];
- int i;
-
- for (i = 0; i < insn->n; i++) {
- val = data[i];
- outl(val, dev->iobase + PCI9118_AO_REG(chan));
- }
- s->readback[chan] = val;
-
- return insn->n;
-}
-
-static int pci9118_di_insn_bits(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- /*
- * The digital inputs and outputs share the read register.
- * bits [7:4] are the digital outputs
- * bits [3:0] are the digital inputs
- */
- data[1] = inl(dev->iobase + PCI9118_DIO_REG) & 0xf;
-
- return insn->n;
-}
-
-static int pci9118_do_insn_bits(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- /*
- * The digital outputs are set with the same register that
- * the digital inputs and outputs are read from. But the
- * outputs are set with bits [3:0] so we can simply write
- * the s->state to set them.
- */
- if (comedi_dio_update_state(s, data))
- outl(s->state, dev->iobase + PCI9118_DIO_REG);
-
- data[1] = s->state;
-
- return insn->n;
-}
-
-static void pci9118_reset(struct comedi_device *dev)
-{
- /* reset analog input subsystem */
- outl(0, dev->iobase + PCI9118_INT_CTRL_REG);
- outl(0, dev->iobase + PCI9118_AI_CTRL_REG);
- outl(0, dev->iobase + PCI9118_AI_CFG_REG);
- pci9118_ai_reset_fifo(dev);
-
- /* clear any pending interrupts and status */
- inl(dev->iobase + PCI9118_INT_CTRL_REG);
- inl(dev->iobase + PCI9118_AI_STATUS_REG);
-
- /* reset DMA and scan queue */
- outl(0, dev->iobase + PCI9118_AI_BURST_NUM_REG);
- outl(1, dev->iobase + PCI9118_AI_AUTOSCAN_MODE_REG);
- outl(2, dev->iobase + PCI9118_AI_AUTOSCAN_MODE_REG);
-
- /* reset analog outputs to 0V */
- outl(2047, dev->iobase + PCI9118_AO_REG(0));
- outl(2047, dev->iobase + PCI9118_AO_REG(1));
-}
-
-static struct pci_dev *pci9118_find_pci(struct comedi_device *dev,
- struct comedi_devconfig *it)
-{
- struct pci_dev *pcidev = NULL;
- int bus = it->options[0];
- int slot = it->options[1];
-
- for_each_pci_dev(pcidev) {
- if (pcidev->vendor != PCI_VENDOR_ID_AMCC)
- continue;
- if (pcidev->device != 0x80d9)
- continue;
- if (bus || slot) {
- /* requested particular bus/slot */
- if (pcidev->bus->number != bus ||
- PCI_SLOT(pcidev->devfn) != slot)
- continue;
- }
- return pcidev;
- }
- dev_err(dev->class_dev,
- "no supported board found! (req. bus/slot : %d/%d)\n",
- bus, slot);
- return NULL;
-}
-
-static void pci9118_alloc_dma(struct comedi_device *dev)
-{
- struct pci9118_private *devpriv = dev->private;
- struct pci9118_dmabuf *dmabuf;
- int order;
- int i;
-
- for (i = 0; i < 2; i++) {
- dmabuf = &devpriv->dmabuf[i];
- for (order = 2; order >= 0; order--) {
- dmabuf->virt =
- dma_alloc_coherent(dev->hw_dev, PAGE_SIZE << order,
- &dmabuf->hw, GFP_KERNEL);
- if (dmabuf->virt)
- break;
- }
- if (!dmabuf->virt)
- break;
- dmabuf->size = PAGE_SIZE << order;
-
- if (i == 0)
- devpriv->master = 1;
- if (i == 1)
- devpriv->dma_doublebuf = 1;
- }
-}
-
-static void pci9118_free_dma(struct comedi_device *dev)
-{
- struct pci9118_private *devpriv = dev->private;
- struct pci9118_dmabuf *dmabuf;
- int i;
-
- if (!devpriv)
- return;
-
- for (i = 0; i < 2; i++) {
- dmabuf = &devpriv->dmabuf[i];
- if (dmabuf->virt) {
- dma_free_coherent(dev->hw_dev, dmabuf->size,
- dmabuf->virt, dmabuf->hw);
- }
- }
-}
-
-static int pci9118_common_attach(struct comedi_device *dev,
- int ext_mux, int softsshdelay)
-{
- const struct pci9118_boardinfo *board = dev->board_ptr;
- struct pci_dev *pcidev = comedi_to_pci_dev(dev);
- struct pci9118_private *devpriv;
- struct comedi_subdevice *s;
- int ret;
- int i;
- u16 u16w;
-
- devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
- if (!devpriv)
- return -ENOMEM;
-
- ret = comedi_pci_enable(dev);
- if (ret)
- return ret;
- pci_set_master(pcidev);
-
- devpriv->iobase_a = pci_resource_start(pcidev, 0);
- dev->iobase = pci_resource_start(pcidev, 2);
-
- dev->pacer = comedi_8254_init(dev->iobase + PCI9118_TIMER_BASE,
- I8254_OSC_BASE_4MHZ, I8254_IO32, 0);
- if (!dev->pacer)
- return -ENOMEM;
-
- pci9118_reset(dev);
-
- if (pcidev->irq) {
- ret = request_irq(pcidev->irq, pci9118_interrupt, IRQF_SHARED,
- dev->board_name, dev);
- if (ret == 0) {
- dev->irq = pcidev->irq;
-
- pci9118_alloc_dma(dev);
- }
- }
-
- if (ext_mux > 0) {
- if (ext_mux > 256)
- ext_mux = 256; /* max 256 channels! */
- if (softsshdelay > 0)
- if (ext_mux > 128)
- ext_mux = 128;
- devpriv->usemux = 1;
- } else {
- devpriv->usemux = 0;
- }
-
- if (softsshdelay < 0) {
- /* select sample&hold signal polarity */
- devpriv->softsshdelay = -softsshdelay;
- devpriv->softsshsample = 0x80;
- devpriv->softsshhold = 0x00;
- } else {
- devpriv->softsshdelay = softsshdelay;
- devpriv->softsshsample = 0x00;
- devpriv->softsshhold = 0x80;
- }
-
- pci_read_config_word(pcidev, PCI_COMMAND, &u16w);
- pci_write_config_word(pcidev, PCI_COMMAND, u16w | 64);
- /* Enable parity check for parity error */
-
- ret = comedi_alloc_subdevices(dev, 4);
- if (ret)
- return ret;
-
- /* Analog Input subdevice */
- s = &dev->subdevices[0];
- s->type = COMEDI_SUBD_AI;
- s->subdev_flags = SDF_READABLE | SDF_COMMON | SDF_GROUND | SDF_DIFF;
- s->n_chan = (devpriv->usemux) ? ext_mux : 16;
- s->maxdata = board->ai_is_16bit ? 0xffff : 0x0fff;
- s->range_table = board->is_hg ? &pci9118hg_ai_range
- : &pci9118_ai_range;
- s->insn_read = pci9118_ai_insn_read;
- if (dev->irq) {
- dev->read_subdev = s;
- s->subdev_flags |= SDF_CMD_READ;
- s->len_chanlist = PCI9118_CHANLEN;
- s->do_cmdtest = pci9118_ai_cmdtest;
- s->do_cmd = pci9118_ai_cmd;
- s->cancel = pci9118_ai_cancel;
- s->munge = pci9118_ai_munge;
- }
-
- if (s->maxdata == 0xffff) {
- /*
- * 16-bit samples are from an ADS7805 A/D converter.
- * Minimum sampling rate is 10us.
- */
- devpriv->ai_ns_min = 10000;
- } else {
- /*
- * 12-bit samples are from an ADS7800 A/D converter.
- * Minimum sampling rate is 3us.
- */
- devpriv->ai_ns_min = 3000;
- }
-
- /* Analog Output subdevice */
- s = &dev->subdevices[1];
- s->type = COMEDI_SUBD_AO;
- s->subdev_flags = SDF_WRITABLE | SDF_GROUND | SDF_COMMON;
- s->n_chan = 2;
- s->maxdata = 0x0fff;
- s->range_table = &range_bipolar10;
- s->insn_write = pci9118_ao_insn_write;
-
- ret = comedi_alloc_subdev_readback(s);
- if (ret)
- return ret;
-
- /* the analog outputs were reset to 0V, make the readback match */
- for (i = 0; i < s->n_chan; i++)
- s->readback[i] = 2047;
-
- /* Digital Input subdevice */
- s = &dev->subdevices[2];
- s->type = COMEDI_SUBD_DI;
- s->subdev_flags = SDF_READABLE;
- s->n_chan = 4;
- s->maxdata = 1;
- s->range_table = &range_digital;
- s->insn_bits = pci9118_di_insn_bits;
-
- /* Digital Output subdevice */
- s = &dev->subdevices[3];
- s->type = COMEDI_SUBD_DO;
- s->subdev_flags = SDF_WRITABLE;
- s->n_chan = 4;
- s->maxdata = 1;
- s->range_table = &range_digital;
- s->insn_bits = pci9118_do_insn_bits;
-
- /* get the current state of the digital outputs */
- s->state = inl(dev->iobase + PCI9118_DIO_REG) >> 4;
-
- return 0;
-}
-
-static int pci9118_attach(struct comedi_device *dev,
- struct comedi_devconfig *it)
-{
- struct pci_dev *pcidev;
- int ext_mux, softsshdelay;
-
- ext_mux = it->options[2];
- softsshdelay = it->options[4];
-
- pcidev = pci9118_find_pci(dev, it);
- if (!pcidev)
- return -EIO;
- comedi_set_hw_dev(dev, &pcidev->dev);
-
- return pci9118_common_attach(dev, ext_mux, softsshdelay);
-}
-
-static int pci9118_auto_attach(struct comedi_device *dev,
- unsigned long context)
-{
- struct pci_dev *pcidev = comedi_to_pci_dev(dev);
- const struct pci9118_boardinfo *board = NULL;
-
- if (context < ARRAY_SIZE(pci9118_boards))
- board = &pci9118_boards[context];
- if (!board)
- return -ENODEV;
- dev->board_ptr = board;
- dev->board_name = board->name;
-
- /*
- * Need to 'get' the PCI device to match the 'put' in pci9118_detach().
- * (The 'put' also matches the implicit 'get' by pci9118_find_pci().)
- */
- pci_dev_get(pcidev);
- /* no external mux, no sample-hold delay */
- return pci9118_common_attach(dev, 0, 0);
-}
-
-static void pci9118_detach(struct comedi_device *dev)
-{
- struct pci_dev *pcidev = comedi_to_pci_dev(dev);
-
- if (dev->iobase)
- pci9118_reset(dev);
- comedi_pci_detach(dev);
- pci9118_free_dma(dev);
- if (pcidev)
- pci_dev_put(pcidev);
-}
-
-static struct comedi_driver adl_pci9118_driver = {
- .driver_name = "adl_pci9118",
- .module = THIS_MODULE,
- .attach = pci9118_attach,
- .auto_attach = pci9118_auto_attach,
- .detach = pci9118_detach,
- .num_names = ARRAY_SIZE(pci9118_boards),
- .board_name = &pci9118_boards[0].name,
- .offset = sizeof(struct pci9118_boardinfo),
-};
-
-static int adl_pci9118_pci_probe(struct pci_dev *dev,
- const struct pci_device_id *id)
-{
- return comedi_pci_auto_config(dev, &adl_pci9118_driver,
- id->driver_data);
-}
-
-/* FIXME: All the supported board types have the same device ID! */
-static const struct pci_device_id adl_pci9118_pci_table[] = {
- { PCI_VDEVICE(AMCC, 0x80d9), BOARD_PCI9118DG },
-/* { PCI_VDEVICE(AMCC, 0x80d9), BOARD_PCI9118HG }, */
-/* { PCI_VDEVICE(AMCC, 0x80d9), BOARD_PCI9118HR }, */
- { 0 }
-};
-MODULE_DEVICE_TABLE(pci, adl_pci9118_pci_table);
-
-static struct pci_driver adl_pci9118_pci_driver = {
- .name = "adl_pci9118",
- .id_table = adl_pci9118_pci_table,
- .probe = adl_pci9118_pci_probe,
- .remove = comedi_pci_auto_unconfig,
-};
-module_comedi_pci_driver(adl_pci9118_driver, adl_pci9118_pci_driver);
-
-MODULE_AUTHOR("Comedi http://www.comedi.org");
-MODULE_DESCRIPTION("Comedi low-level driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/adq12b.c b/drivers/staging/comedi/drivers/adq12b.c
deleted file mode 100644
index bc5f97f50f9a..000000000000
--- a/drivers/staging/comedi/drivers/adq12b.c
+++ /dev/null
@@ -1,268 +0,0 @@
-/*
- comedi/drivers/adq12b.c
- driver for MicroAxial ADQ12-B data acquisition and control card
-
- COMEDI - Linux Control and Measurement Device Interface
- Copyright (C) 2000 David A. Schleef <ds@schleef.org>
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-*/
-/*
-Driver: adq12b
-Description: driver for MicroAxial ADQ12-B data acquisition and control card
-Devices: [MicroAxial] ADQ12-B (adq12b)
-Author: jeremy theler <thelerg@ib.cnea.gov.ar>
-Updated: Thu, 21 Feb 2008 02:56:27 -0300
-Status: works
-
-Driver for the acquisition card ADQ12-B (without any add-on).
-
- - Analog input is subdevice 0 (16 channels single-ended or 8 differential)
- - Digital input is subdevice 1 (5 channels)
- - Digital output is subdevice 1 (8 channels)
- - The PACER is not supported in this version
-
-If you do not specify any options, they will default to
-
- # comedi_config /dev/comedi0 adq12b 0x300,0,0
-
- option 1: I/O base address. The following table is provided as a help
- of the hardware jumpers.
-
- address jumper JADR
- 0x300 1 (factory default)
- 0x320 2
- 0x340 3
- 0x360 4
- 0x380 5
- 0x3A0 6
-
- option 2: unipolar/bipolar ADC selection: 0 -> bipolar, 1 -> unipolar
-
- selection comedi_config option JUB
- bipolar 0 2-3 (factory default)
- unipolar 1 1-2
-
- option 3: single-ended/differential AI selection: 0 -> SE, 1 -> differential
-
- selection comedi_config option JCHA JCHB
- single-ended 0 1-2 1-2 (factory default)
- differential 1 2-3 2-3
-
- written by jeremy theler <thelerg@ib.cnea.gov.ar>
-
- instituto balseiro
- commission nacional de energia atomica
- universidad nacional de cuyo
- argentina
-
- 21-feb-2008
- + changed supported devices string (missused the [] and ())
-
- 13-oct-2007
- + first try
-*/
-
-#include <linux/module.h>
-#include <linux/delay.h>
-
-#include "../comedidev.h"
-
-/* address scheme (page 2.17 of the manual) */
-#define ADQ12B_CTREG 0x00
-#define ADQ12B_CTREG_MSKP (1 << 7) /* enable pacer interrupt */
-#define ADQ12B_CTREG_GTP (1 << 6) /* enable pacer */
-#define ADQ12B_CTREG_RANGE(x) ((x) << 4)
-#define ADQ12B_CTREG_CHAN(x) ((x) << 0)
-#define ADQ12B_STINR 0x00
-#define ADQ12B_STINR_OUT2 (1 << 7) /* timer 2 output state */
-#define ADQ12B_STINR_OUTP (1 << 6) /* pacer output state */
-#define ADQ12B_STINR_EOC (1 << 5) /* A/D end-of-conversion */
-#define ADQ12B_STINR_IN_MASK (0x1f << 0)
-#define ADQ12B_OUTBR 0x04
-#define ADQ12B_ADLOW 0x08
-#define ADQ12B_ADHIG 0x09
-#define ADQ12B_TIMER_BASE 0x0c
-
-/* available ranges through the PGA gains */
-static const struct comedi_lrange range_adq12b_ai_bipolar = {
- 4, {
- BIP_RANGE(5),
- BIP_RANGE(2),
- BIP_RANGE(1),
- BIP_RANGE(0.5)
- }
-};
-
-static const struct comedi_lrange range_adq12b_ai_unipolar = {
- 4, {
- UNI_RANGE(5),
- UNI_RANGE(2),
- UNI_RANGE(1),
- UNI_RANGE(0.5)
- }
-};
-
-struct adq12b_private {
- unsigned int last_ctreg;
-};
-
-static int adq12b_ai_eoc(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned long context)
-{
- unsigned char status;
-
- status = inb(dev->iobase + ADQ12B_STINR);
- if (status & ADQ12B_STINR_EOC)
- return 0;
- return -EBUSY;
-}
-
-static int adq12b_ai_insn_read(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct adq12b_private *devpriv = dev->private;
- unsigned int chan = CR_CHAN(insn->chanspec);
- unsigned int range = CR_RANGE(insn->chanspec);
- unsigned int val;
- int ret;
- int i;
-
- /* change channel and range only if it is different from the previous */
- val = ADQ12B_CTREG_RANGE(range) | ADQ12B_CTREG_CHAN(chan);
- if (val != devpriv->last_ctreg) {
- outb(val, dev->iobase + ADQ12B_CTREG);
- devpriv->last_ctreg = val;
- udelay(50); /* wait for the mux to settle */
- }
-
- val = inb(dev->iobase + ADQ12B_ADLOW); /* trigger A/D */
-
- for (i = 0; i < insn->n; i++) {
- ret = comedi_timeout(dev, s, insn, adq12b_ai_eoc, 0);
- if (ret)
- return ret;
-
- val = inb(dev->iobase + ADQ12B_ADHIG) << 8;
- val |= inb(dev->iobase + ADQ12B_ADLOW); /* retriggers A/D */
-
- data[i] = val;
- }
-
- return insn->n;
-}
-
-static int adq12b_di_insn_bits(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
-{
- /* only bits 0-4 have information about digital inputs */
- data[1] = (inb(dev->iobase + ADQ12B_STINR) & ADQ12B_STINR_IN_MASK);
-
- return insn->n;
-}
-
-static int adq12b_do_insn_bits(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- unsigned int mask;
- unsigned int chan;
- unsigned int val;
-
- mask = comedi_dio_update_state(s, data);
- if (mask) {
- for (chan = 0; chan < 8; chan++) {
- if ((mask >> chan) & 0x01) {
- val = (s->state >> chan) & 0x01;
- outb((val << 3) | chan,
- dev->iobase + ADQ12B_OUTBR);
- }
- }
- }
-
- data[1] = s->state;
-
- return insn->n;
-}
-
-static int adq12b_attach(struct comedi_device *dev, struct comedi_devconfig *it)
-{
- struct adq12b_private *devpriv;
- struct comedi_subdevice *s;
- int ret;
-
- ret = comedi_request_region(dev, it->options[0], 0x10);
- if (ret)
- return ret;
-
- devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
- if (!devpriv)
- return -ENOMEM;
-
- devpriv->last_ctreg = -1; /* force ctreg update */
-
- ret = comedi_alloc_subdevices(dev, 3);
- if (ret)
- return ret;
-
- /* Analog Input subdevice */
- s = &dev->subdevices[0];
- s->type = COMEDI_SUBD_AI;
- if (it->options[2]) {
- s->subdev_flags = SDF_READABLE | SDF_DIFF;
- s->n_chan = 8;
- } else {
- s->subdev_flags = SDF_READABLE | SDF_GROUND;
- s->n_chan = 16;
- }
- s->maxdata = 0xfff;
- s->range_table = it->options[1] ? &range_adq12b_ai_unipolar
- : &range_adq12b_ai_bipolar;
- s->insn_read = adq12b_ai_insn_read;
-
- /* Digital Input subdevice */
- s = &dev->subdevices[1];
- s->type = COMEDI_SUBD_DI;
- s->subdev_flags = SDF_READABLE;
- s->n_chan = 5;
- s->maxdata = 1;
- s->range_table = &range_digital;
- s->insn_bits = adq12b_di_insn_bits;
-
- /* Digital Output subdevice */
- s = &dev->subdevices[2];
- s->type = COMEDI_SUBD_DO;
- s->subdev_flags = SDF_WRITABLE;
- s->n_chan = 8;
- s->maxdata = 1;
- s->range_table = &range_digital;
- s->insn_bits = adq12b_do_insn_bits;
-
- return 0;
-}
-
-static struct comedi_driver adq12b_driver = {
- .driver_name = "adq12b",
- .module = THIS_MODULE,
- .attach = adq12b_attach,
- .detach = comedi_legacy_detach,
-};
-module_comedi_driver(adq12b_driver);
-
-MODULE_AUTHOR("Comedi http://www.comedi.org");
-MODULE_DESCRIPTION("Comedi low-level driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/adv_pci1710.c b/drivers/staging/comedi/drivers/adv_pci1710.c
deleted file mode 100644
index 0c6aa964c884..000000000000
--- a/drivers/staging/comedi/drivers/adv_pci1710.c
+++ /dev/null
@@ -1,1100 +0,0 @@
-/*
- * comedi/drivers/adv_pci1710.c
- *
- * Author: Michal Dobes <dobes@tesnet.cz>
- *
- * Thanks to ZhenGang Shang <ZhenGang.Shang@Advantech.com.cn>
- * for testing and information.
- *
- * hardware driver for Advantech cards:
- * card: PCI-1710, PCI-1710HG, PCI-1711, PCI-1713, PCI-1720, PCI-1731
- * driver: pci1710, pci1710hg, pci1711, pci1713, pci1720, pci1731
- *
- * Options:
- * [0] - PCI bus number - if bus number and slot number are 0,
- * then driver search for first unused card
- * [1] - PCI slot number
- *
-*/
-/*
-Driver: adv_pci1710
-Description: Advantech PCI-1710, PCI-1710HG, PCI-1711, PCI-1713,
- Advantech PCI-1720, PCI-1731
-Author: Michal Dobes <dobes@tesnet.cz>
-Devices: [Advantech] PCI-1710 (adv_pci1710), PCI-1710HG (pci1710hg),
- PCI-1711 (adv_pci1710), PCI-1713, PCI-1720,
- PCI-1731
-Status: works
-
-This driver supports AI, AO, DI and DO subdevices.
-AI subdevice supports cmd and insn interface,
-other subdevices support only insn interface.
-
-The PCI-1710 and PCI-1710HG have the same PCI device ID, so the
-driver cannot distinguish between them, as would be normal for a
-PCI driver.
-
-Configuration options:
- [0] - PCI bus of device (optional)
- [1] - PCI slot of device (optional)
- If bus/slot is not specified, the first available PCI
- device will be used.
-*/
-
-#include <linux/module.h>
-#include <linux/interrupt.h>
-
-#include "../comedi_pci.h"
-
-#include "comedi_8254.h"
-#include "amcc_s5933.h"
-
-#define PCI171x_AD_DATA 0 /* R: A/D data */
-#define PCI171x_SOFTTRG 0 /* W: soft trigger for A/D */
-#define PCI171x_RANGE 2 /* W: A/D gain/range register */
-#define PCI171x_MUX 4 /* W: A/D multiplexor control */
-#define PCI171x_STATUS 6 /* R: status register */
-#define PCI171x_CONTROL 6 /* W: control register */
-#define PCI171x_CLRINT 8 /* W: clear interrupts request */
-#define PCI171x_CLRFIFO 9 /* W: clear FIFO */
-#define PCI171x_DA1 10 /* W: D/A register */
-#define PCI171x_DA2 12 /* W: D/A register */
-#define PCI171x_DAREF 14 /* W: D/A reference control */
-#define PCI171x_DI 16 /* R: digi inputs */
-#define PCI171x_DO 16 /* R: digi inputs */
-
-#define PCI171X_TIMER_BASE 0x18
-
-/* upper bits from status register (PCI171x_STATUS) (lower is same with control
- * reg) */
-#define Status_FE 0x0100 /* 1=FIFO is empty */
-#define Status_FH 0x0200 /* 1=FIFO is half full */
-#define Status_FF 0x0400 /* 1=FIFO is full, fatal error */
-#define Status_IRQ 0x0800 /* 1=IRQ occurred */
-/* bits from control register (PCI171x_CONTROL) */
-#define Control_CNT0 0x0040 /* 1=CNT0 have external source,
- * 0=have internal 100kHz source */
-#define Control_ONEFH 0x0020 /* 1=IRQ on FIFO is half full, 0=every sample */
-#define Control_IRQEN 0x0010 /* 1=enable IRQ */
-#define Control_GATE 0x0008 /* 1=enable external trigger GATE (8254?) */
-#define Control_EXT 0x0004 /* 1=external trigger source */
-#define Control_PACER 0x0002 /* 1=enable internal 8254 trigger source */
-#define Control_SW 0x0001 /* 1=enable software trigger source */
-
-#define PCI1720_DA0 0 /* W: D/A register 0 */
-#define PCI1720_DA1 2 /* W: D/A register 1 */
-#define PCI1720_DA2 4 /* W: D/A register 2 */
-#define PCI1720_DA3 6 /* W: D/A register 3 */
-#define PCI1720_RANGE 8 /* R/W: D/A range register */
-#define PCI1720_SYNCOUT 9 /* W: D/A synchronized output register */
-#define PCI1720_SYNCONT 15 /* R/W: D/A synchronized control */
-
-/* D/A synchronized control (PCI1720_SYNCONT) */
-#define Syncont_SC0 1 /* set synchronous output mode */
-
-static const struct comedi_lrange range_pci1710_3 = {
- 9, {
- BIP_RANGE(5),
- BIP_RANGE(2.5),
- BIP_RANGE(1.25),
- BIP_RANGE(0.625),
- BIP_RANGE(10),
- UNI_RANGE(10),
- UNI_RANGE(5),
- UNI_RANGE(2.5),
- UNI_RANGE(1.25)
- }
-};
-
-static const char range_codes_pci1710_3[] = { 0x00, 0x01, 0x02, 0x03, 0x04,
- 0x10, 0x11, 0x12, 0x13 };
-
-static const struct comedi_lrange range_pci1710hg = {
- 12, {
- BIP_RANGE(5),
- BIP_RANGE(0.5),
- BIP_RANGE(0.05),
- BIP_RANGE(0.005),
- BIP_RANGE(10),
- BIP_RANGE(1),
- BIP_RANGE(0.1),
- BIP_RANGE(0.01),
- UNI_RANGE(10),
- UNI_RANGE(1),
- UNI_RANGE(0.1),
- UNI_RANGE(0.01)
- }
-};
-
-static const char range_codes_pci1710hg[] = { 0x00, 0x01, 0x02, 0x03, 0x04,
- 0x05, 0x06, 0x07, 0x10, 0x11,
- 0x12, 0x13 };
-
-static const struct comedi_lrange range_pci17x1 = {
- 5, {
- BIP_RANGE(10),
- BIP_RANGE(5),
- BIP_RANGE(2.5),
- BIP_RANGE(1.25),
- BIP_RANGE(0.625)
- }
-};
-
-static const char range_codes_pci17x1[] = { 0x00, 0x01, 0x02, 0x03, 0x04 };
-
-static const struct comedi_lrange pci1720_ao_range = {
- 4, {
- UNI_RANGE(5),
- UNI_RANGE(10),
- BIP_RANGE(5),
- BIP_RANGE(10)
- }
-};
-
-static const struct comedi_lrange pci171x_ao_range = {
- 2, {
- UNI_RANGE(5),
- UNI_RANGE(10)
- }
-};
-
-enum pci1710_boardid {
- BOARD_PCI1710,
- BOARD_PCI1710HG,
- BOARD_PCI1711,
- BOARD_PCI1713,
- BOARD_PCI1720,
- BOARD_PCI1731,
-};
-
-struct boardtype {
- const char *name; /* board name */
- int n_aichan; /* num of A/D chans */
- const struct comedi_lrange *rangelist_ai; /* rangelist for A/D */
- const char *rangecode_ai; /* range codes for programming */
- unsigned int is_pci1713:1;
- unsigned int is_pci1720:1;
- unsigned int has_irq:1;
- unsigned int has_large_fifo:1; /* 4K or 1K FIFO */
- unsigned int has_diff_ai:1;
- unsigned int has_ao:1;
- unsigned int has_di_do:1;
- unsigned int has_counter:1;
-};
-
-static const struct boardtype boardtypes[] = {
- [BOARD_PCI1710] = {
- .name = "pci1710",
- .n_aichan = 16,
- .rangelist_ai = &range_pci1710_3,
- .rangecode_ai = range_codes_pci1710_3,
- .has_irq = 1,
- .has_large_fifo = 1,
- .has_diff_ai = 1,
- .has_ao = 1,
- .has_di_do = 1,
- .has_counter = 1,
- },
- [BOARD_PCI1710HG] = {
- .name = "pci1710hg",
- .n_aichan = 16,
- .rangelist_ai = &range_pci1710hg,
- .rangecode_ai = range_codes_pci1710hg,
- .has_irq = 1,
- .has_large_fifo = 1,
- .has_diff_ai = 1,
- .has_ao = 1,
- .has_di_do = 1,
- .has_counter = 1,
- },
- [BOARD_PCI1711] = {
- .name = "pci1711",
- .n_aichan = 16,
- .rangelist_ai = &range_pci17x1,
- .rangecode_ai = range_codes_pci17x1,
- .has_irq = 1,
- .has_ao = 1,
- .has_di_do = 1,
- .has_counter = 1,
- },
- [BOARD_PCI1713] = {
- .name = "pci1713",
- .n_aichan = 32,
- .rangelist_ai = &range_pci1710_3,
- .rangecode_ai = range_codes_pci1710_3,
- .is_pci1713 = 1,
- .has_irq = 1,
- .has_large_fifo = 1,
- .has_diff_ai = 1,
- },
- [BOARD_PCI1720] = {
- .name = "pci1720",
- .is_pci1720 = 1,
- .has_ao = 1,
- },
- [BOARD_PCI1731] = {
- .name = "pci1731",
- .n_aichan = 16,
- .rangelist_ai = &range_pci17x1,
- .rangecode_ai = range_codes_pci17x1,
- .has_irq = 1,
- .has_di_do = 1,
- },
-};
-
-struct pci1710_private {
- unsigned int max_samples;
- unsigned int CntrlReg; /* Control register */
- unsigned char ai_et;
- unsigned int ai_et_CntrlReg;
- unsigned int ai_et_MuxVal;
- unsigned int act_chanlist[32]; /* list of scanned channel */
- unsigned char saved_seglen; /* len of the non-repeating chanlist */
- unsigned char da_ranges; /* copy of D/A outpit range register */
-};
-
-static int pci171x_ai_check_chanlist(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_cmd *cmd)
-{
- struct pci1710_private *devpriv = dev->private;
- unsigned int chan0 = CR_CHAN(cmd->chanlist[0]);
- unsigned int last_aref = CR_AREF(cmd->chanlist[0]);
- unsigned int next_chan = (chan0 + 1) % s->n_chan;
- unsigned int chansegment[32];
- unsigned int seglen;
- int i;
-
- if (cmd->chanlist_len == 1) {
- devpriv->saved_seglen = cmd->chanlist_len;
- return 0;
- }
-
- /* first channel is always ok */
- chansegment[0] = cmd->chanlist[0];
-
- for (i = 1; i < cmd->chanlist_len; i++) {
- unsigned int chan = CR_CHAN(cmd->chanlist[i]);
- unsigned int aref = CR_AREF(cmd->chanlist[i]);
-
- if (cmd->chanlist[0] == cmd->chanlist[i])
- break; /* we detected a loop, stop */
-
- if (aref == AREF_DIFF && (chan & 1)) {
- dev_err(dev->class_dev,
- "Odd channel cannot be differential input!\n");
- return -EINVAL;
- }
-
- if (last_aref == AREF_DIFF)
- next_chan = (next_chan + 1) % s->n_chan;
- if (chan != next_chan) {
- dev_err(dev->class_dev,
- "channel list must be continuous! chanlist[%i]=%d but must be %d or %d!\n",
- i, chan, next_chan, chan0);
- return -EINVAL;
- }
-
- /* next correct channel in list */
- chansegment[i] = cmd->chanlist[i];
- last_aref = aref;
- }
- seglen = i;
-
- for (i = 0; i < cmd->chanlist_len; i++) {
- if (cmd->chanlist[i] != chansegment[i % seglen]) {
- dev_err(dev->class_dev,
- "bad channel, reference or range number! chanlist[%i]=%d,%d,%d and not %d,%d,%d!\n",
- i, CR_CHAN(chansegment[i]),
- CR_RANGE(chansegment[i]),
- CR_AREF(chansegment[i]),
- CR_CHAN(cmd->chanlist[i % seglen]),
- CR_RANGE(cmd->chanlist[i % seglen]),
- CR_AREF(chansegment[i % seglen]));
- return -EINVAL;
- }
- }
- devpriv->saved_seglen = seglen;
-
- return 0;
-}
-
-static void pci171x_ai_setup_chanlist(struct comedi_device *dev,
- struct comedi_subdevice *s,
- unsigned int *chanlist,
- unsigned int n_chan,
- unsigned int seglen)
-{
- const struct boardtype *board = dev->board_ptr;
- struct pci1710_private *devpriv = dev->private;
- unsigned int first_chan = CR_CHAN(chanlist[0]);
- unsigned int last_chan = CR_CHAN(chanlist[seglen - 1]);
- unsigned int i;
-
- for (i = 0; i < seglen; i++) { /* store range list to card */
- unsigned int chan = CR_CHAN(chanlist[i]);
- unsigned int range = CR_RANGE(chanlist[i]);
- unsigned int aref = CR_AREF(chanlist[i]);
- unsigned int rangeval;
-
- rangeval = board->rangecode_ai[range];
- if (aref == AREF_DIFF)
- rangeval |= 0x0020;
-
- /* select channel and set range */
- outw(chan | (chan << 8), dev->iobase + PCI171x_MUX);
- outw(rangeval, dev->iobase + PCI171x_RANGE);
-
- devpriv->act_chanlist[i] = chan;
- }
- for ( ; i < n_chan; i++) /* store remainder of channel list */
- devpriv->act_chanlist[i] = CR_CHAN(chanlist[i]);
-
- /* select channel interval to scan */
- devpriv->ai_et_MuxVal = first_chan | (last_chan << 8);
- outw(devpriv->ai_et_MuxVal, dev->iobase + PCI171x_MUX);
-}
-
-static int pci171x_ai_eoc(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned long context)
-{
- unsigned int status;
-
- status = inw(dev->iobase + PCI171x_STATUS);
- if ((status & Status_FE) == 0)
- return 0;
- return -EBUSY;
-}
-
-static int pci171x_ai_read_sample(struct comedi_device *dev,
- struct comedi_subdevice *s,
- unsigned int cur_chan,
- unsigned int *val)
-{
- const struct boardtype *board = dev->board_ptr;
- struct pci1710_private *devpriv = dev->private;
- unsigned int sample;
- unsigned int chan;
-
- sample = inw(dev->iobase + PCI171x_AD_DATA);
- if (!board->is_pci1713) {
- /*
- * The upper 4 bits of the 16-bit sample are the channel number
- * that the sample was acquired from. Verify that this channel
- * number matches the expected channel number.
- */
- chan = sample >> 12;
- if (chan != devpriv->act_chanlist[cur_chan]) {
- dev_err(dev->class_dev,
- "A/D data droput: received from channel %d, expected %d\n",
- chan, devpriv->act_chanlist[cur_chan]);
- return -ENODATA;
- }
- }
- *val = sample & s->maxdata;
- return 0;
-}
-
-static int pci171x_ai_insn_read(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct pci1710_private *devpriv = dev->private;
- int ret = 0;
- int i;
-
- devpriv->CntrlReg &= Control_CNT0;
- devpriv->CntrlReg |= Control_SW; /* set software trigger */
- outw(devpriv->CntrlReg, dev->iobase + PCI171x_CONTROL);
- outb(0, dev->iobase + PCI171x_CLRFIFO);
- outb(0, dev->iobase + PCI171x_CLRINT);
-
- pci171x_ai_setup_chanlist(dev, s, &insn->chanspec, 1, 1);
-
- for (i = 0; i < insn->n; i++) {
- unsigned int val;
-
- outw(0, dev->iobase + PCI171x_SOFTTRG); /* start conversion */
-
- ret = comedi_timeout(dev, s, insn, pci171x_ai_eoc, 0);
- if (ret)
- break;
-
- ret = pci171x_ai_read_sample(dev, s, 0, &val);
- if (ret)
- break;
-
- data[i] = val;
- }
-
- outb(0, dev->iobase + PCI171x_CLRFIFO);
- outb(0, dev->iobase + PCI171x_CLRINT);
-
- return ret ? ret : insn->n;
-}
-
-static int pci171x_ao_insn_write(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct pci1710_private *devpriv = dev->private;
- unsigned int chan = CR_CHAN(insn->chanspec);
- unsigned int range = CR_RANGE(insn->chanspec);
- unsigned int reg = chan ? PCI171x_DA2 : PCI171x_DA1;
- unsigned int val = s->readback[chan];
- int i;
-
- devpriv->da_ranges &= ~(1 << (chan << 1));
- devpriv->da_ranges |= (range << (chan << 1));
- outw(devpriv->da_ranges, dev->iobase + PCI171x_DAREF);
-
- for (i = 0; i < insn->n; i++) {
- val = data[i];
- outw(val, dev->iobase + reg);
- }
-
- s->readback[chan] = val;
-
- return insn->n;
-}
-
-static int pci171x_di_insn_bits(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- data[1] = inw(dev->iobase + PCI171x_DI);
-
- return insn->n;
-}
-
-static int pci171x_do_insn_bits(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- if (comedi_dio_update_state(s, data))
- outw(s->state, dev->iobase + PCI171x_DO);
-
- data[1] = s->state;
-
- return insn->n;
-}
-
-static int pci1720_ao_insn_write(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct pci1710_private *devpriv = dev->private;
- unsigned int chan = CR_CHAN(insn->chanspec);
- unsigned int range = CR_RANGE(insn->chanspec);
- unsigned int val;
- int i;
-
- val = devpriv->da_ranges & (~(0x03 << (chan << 1)));
- val |= (range << (chan << 1));
- if (val != devpriv->da_ranges) {
- outb(val, dev->iobase + PCI1720_RANGE);
- devpriv->da_ranges = val;
- }
-
- val = s->readback[chan];
- for (i = 0; i < insn->n; i++) {
- val = data[i];
- outw(val, dev->iobase + PCI1720_DA0 + (chan << 1));
- outb(0, dev->iobase + PCI1720_SYNCOUT); /* update outputs */
- }
-
- s->readback[chan] = val;
-
- return insn->n;
-}
-
-static int pci171x_ai_cancel(struct comedi_device *dev,
- struct comedi_subdevice *s)
-{
- struct pci1710_private *devpriv = dev->private;
-
- devpriv->CntrlReg &= Control_CNT0;
- devpriv->CntrlReg |= Control_SW;
- /* reset any operations */
- outw(devpriv->CntrlReg, dev->iobase + PCI171x_CONTROL);
- comedi_8254_pacer_enable(dev->pacer, 1, 2, false);
- outb(0, dev->iobase + PCI171x_CLRFIFO);
- outb(0, dev->iobase + PCI171x_CLRINT);
-
- return 0;
-}
-
-static void pci1710_handle_every_sample(struct comedi_device *dev,
- struct comedi_subdevice *s)
-{
- struct comedi_cmd *cmd = &s->async->cmd;
- unsigned int status;
- unsigned int val;
- int ret;
-
- status = inw(dev->iobase + PCI171x_STATUS);
- if (status & Status_FE) {
- dev_dbg(dev->class_dev, "A/D FIFO empty (%4x)\n", status);
- s->async->events |= COMEDI_CB_ERROR;
- return;
- }
- if (status & Status_FF) {
- dev_dbg(dev->class_dev,
- "A/D FIFO Full status (Fatal Error!) (%4x)\n", status);
- s->async->events |= COMEDI_CB_ERROR;
- return;
- }
-
- outb(0, dev->iobase + PCI171x_CLRINT); /* clear our INT request */
-
- for (; !(inw(dev->iobase + PCI171x_STATUS) & Status_FE);) {
- ret = pci171x_ai_read_sample(dev, s, s->async->cur_chan, &val);
- if (ret) {
- s->async->events |= COMEDI_CB_ERROR;
- break;
- }
-
- comedi_buf_write_samples(s, &val, 1);
-
- if (cmd->stop_src == TRIG_COUNT &&
- s->async->scans_done >= cmd->stop_arg) {
- s->async->events |= COMEDI_CB_EOA;
- break;
- }
- }
-
- outb(0, dev->iobase + PCI171x_CLRINT); /* clear our INT request */
-}
-
-static void pci1710_handle_fifo(struct comedi_device *dev,
- struct comedi_subdevice *s)
-{
- struct pci1710_private *devpriv = dev->private;
- struct comedi_async *async = s->async;
- struct comedi_cmd *cmd = &async->cmd;
- unsigned int status;
- int i;
-
- status = inw(dev->iobase + PCI171x_STATUS);
- if (!(status & Status_FH)) {
- dev_dbg(dev->class_dev, "A/D FIFO not half full!\n");
- async->events |= COMEDI_CB_ERROR;
- return;
- }
- if (status & Status_FF) {
- dev_dbg(dev->class_dev,
- "A/D FIFO Full status (Fatal Error!)\n");
- async->events |= COMEDI_CB_ERROR;
- return;
- }
-
- for (i = 0; i < devpriv->max_samples; i++) {
- unsigned int val;
- int ret;
-
- ret = pci171x_ai_read_sample(dev, s, s->async->cur_chan, &val);
- if (ret) {
- s->async->events |= COMEDI_CB_ERROR;
- break;
- }
-
- if (!comedi_buf_write_samples(s, &val, 1))
- break;
-
- if (cmd->stop_src == TRIG_COUNT &&
- async->scans_done >= cmd->stop_arg) {
- async->events |= COMEDI_CB_EOA;
- break;
- }
- }
-
- outb(0, dev->iobase + PCI171x_CLRINT); /* clear our INT request */
-}
-
-static irqreturn_t interrupt_service_pci1710(int irq, void *d)
-{
- struct comedi_device *dev = d;
- struct pci1710_private *devpriv = dev->private;
- struct comedi_subdevice *s;
- struct comedi_cmd *cmd;
-
- if (!dev->attached) /* is device attached? */
- return IRQ_NONE; /* no, exit */
-
- s = dev->read_subdev;
- cmd = &s->async->cmd;
-
- /* is this interrupt from our board? */
- if (!(inw(dev->iobase + PCI171x_STATUS) & Status_IRQ))
- return IRQ_NONE; /* no, exit */
-
- if (devpriv->ai_et) { /* Switch from initial TRIG_EXT to TRIG_xxx. */
- devpriv->ai_et = 0;
- devpriv->CntrlReg &= Control_CNT0;
- devpriv->CntrlReg |= Control_SW; /* set software trigger */
- outw(devpriv->CntrlReg, dev->iobase + PCI171x_CONTROL);
- devpriv->CntrlReg = devpriv->ai_et_CntrlReg;
- outb(0, dev->iobase + PCI171x_CLRFIFO);
- outb(0, dev->iobase + PCI171x_CLRINT);
- outw(devpriv->ai_et_MuxVal, dev->iobase + PCI171x_MUX);
- outw(devpriv->CntrlReg, dev->iobase + PCI171x_CONTROL);
- comedi_8254_pacer_enable(dev->pacer, 1, 2, true);
- return IRQ_HANDLED;
- }
-
- if (cmd->flags & CMDF_WAKE_EOS)
- pci1710_handle_every_sample(dev, s);
- else
- pci1710_handle_fifo(dev, s);
-
- comedi_handle_events(dev, s);
-
- return IRQ_HANDLED;
-}
-
-static int pci171x_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
-{
- struct pci1710_private *devpriv = dev->private;
- struct comedi_cmd *cmd = &s->async->cmd;
-
- pci171x_ai_setup_chanlist(dev, s, cmd->chanlist, cmd->chanlist_len,
- devpriv->saved_seglen);
-
- outb(0, dev->iobase + PCI171x_CLRFIFO);
- outb(0, dev->iobase + PCI171x_CLRINT);
-
- devpriv->CntrlReg &= Control_CNT0;
- if ((cmd->flags & CMDF_WAKE_EOS) == 0)
- devpriv->CntrlReg |= Control_ONEFH;
-
- if (cmd->convert_src == TRIG_TIMER) {
- comedi_8254_update_divisors(dev->pacer);
-
- devpriv->CntrlReg |= Control_PACER | Control_IRQEN;
- if (cmd->start_src == TRIG_EXT) {
- devpriv->ai_et_CntrlReg = devpriv->CntrlReg;
- devpriv->CntrlReg &=
- ~(Control_PACER | Control_ONEFH | Control_GATE);
- devpriv->CntrlReg |= Control_EXT;
- devpriv->ai_et = 1;
- } else { /* TRIG_NOW */
- devpriv->ai_et = 0;
- }
- outw(devpriv->CntrlReg, dev->iobase + PCI171x_CONTROL);
-
- if (cmd->start_src == TRIG_NOW)
- comedi_8254_pacer_enable(dev->pacer, 1, 2, true);
- } else { /* TRIG_EXT */
- devpriv->CntrlReg |= Control_EXT | Control_IRQEN;
- outw(devpriv->CntrlReg, dev->iobase + PCI171x_CONTROL);
- }
-
- return 0;
-}
-
-static int pci171x_ai_cmdtest(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_cmd *cmd)
-{
- int err = 0;
-
- /* Step 1 : check if triggers are trivially valid */
-
- err |= comedi_check_trigger_src(&cmd->start_src, TRIG_NOW | TRIG_EXT);
- err |= comedi_check_trigger_src(&cmd->scan_begin_src, TRIG_FOLLOW);
- err |= comedi_check_trigger_src(&cmd->convert_src,
- TRIG_TIMER | TRIG_EXT);
- err |= comedi_check_trigger_src(&cmd->scan_end_src, TRIG_COUNT);
- err |= comedi_check_trigger_src(&cmd->stop_src, TRIG_COUNT | TRIG_NONE);
-
- if (err)
- return 1;
-
- /* step 2a: make sure trigger sources are unique */
-
- err |= comedi_check_trigger_is_unique(cmd->start_src);
- err |= comedi_check_trigger_is_unique(cmd->convert_src);
- err |= comedi_check_trigger_is_unique(cmd->stop_src);
-
- /* step 2b: and mutually compatible */
-
- if (err)
- return 2;
-
- /* Step 3: check if arguments are trivially valid */
-
- err |= comedi_check_trigger_arg_is(&cmd->start_arg, 0);
- err |= comedi_check_trigger_arg_is(&cmd->scan_begin_arg, 0);
-
- if (cmd->convert_src == TRIG_TIMER)
- err |= comedi_check_trigger_arg_min(&cmd->convert_arg, 10000);
- else /* TRIG_FOLLOW */
- err |= comedi_check_trigger_arg_is(&cmd->convert_arg, 0);
-
- err |= comedi_check_trigger_arg_is(&cmd->scan_end_arg,
- cmd->chanlist_len);
-
- if (cmd->stop_src == TRIG_COUNT)
- err |= comedi_check_trigger_arg_min(&cmd->stop_arg, 1);
- else /* TRIG_NONE */
- err |= comedi_check_trigger_arg_is(&cmd->stop_arg, 0);
-
- if (err)
- return 3;
-
- /* step 4: fix up any arguments */
-
- if (cmd->convert_src == TRIG_TIMER) {
- unsigned int arg = cmd->convert_arg;
-
- comedi_8254_cascade_ns_to_timer(dev->pacer, &arg, cmd->flags);
- err |= comedi_check_trigger_arg_is(&cmd->convert_arg, arg);
- }
-
- if (err)
- return 4;
-
- /* Step 5: check channel list */
-
- err |= pci171x_ai_check_chanlist(dev, s, cmd);
-
- if (err)
- return 5;
-
- return 0;
-}
-
-static int pci171x_insn_counter_config(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct pci1710_private *devpriv = dev->private;
-
- switch (data[0]) {
- case INSN_CONFIG_SET_CLOCK_SRC:
- switch (data[1]) {
- case 0: /* internal */
- devpriv->ai_et_CntrlReg &= ~Control_CNT0;
- break;
- case 1: /* external */
- devpriv->ai_et_CntrlReg |= Control_CNT0;
- break;
- default:
- return -EINVAL;
- }
- outw(devpriv->ai_et_CntrlReg, dev->iobase + PCI171x_CONTROL);
- break;
- case INSN_CONFIG_GET_CLOCK_SRC:
- if (devpriv->ai_et_CntrlReg & Control_CNT0) {
- data[1] = 1;
- data[2] = 0;
- } else {
- data[1] = 0;
- data[2] = I8254_OSC_BASE_10MHZ;
- }
- break;
- default:
- return -EINVAL;
- }
-
- return insn->n;
-}
-
-static int pci171x_reset(struct comedi_device *dev)
-{
- const struct boardtype *board = dev->board_ptr;
- struct pci1710_private *devpriv = dev->private;
-
- /* Software trigger, CNT0=external */
- devpriv->CntrlReg = Control_SW | Control_CNT0;
- /* reset any operations */
- outw(devpriv->CntrlReg, dev->iobase + PCI171x_CONTROL);
- outb(0, dev->iobase + PCI171x_CLRFIFO); /* clear FIFO */
- outb(0, dev->iobase + PCI171x_CLRINT); /* clear INT request */
- devpriv->da_ranges = 0;
- if (board->has_ao) {
- /* set DACs to 0..5V */
- outb(devpriv->da_ranges, dev->iobase + PCI171x_DAREF);
- outw(0, dev->iobase + PCI171x_DA1); /* set DA outputs to 0V */
- outw(0, dev->iobase + PCI171x_DA2);
- }
- outw(0, dev->iobase + PCI171x_DO); /* digital outputs to 0 */
- outb(0, dev->iobase + PCI171x_CLRFIFO); /* clear FIFO */
- outb(0, dev->iobase + PCI171x_CLRINT); /* clear INT request */
-
- return 0;
-}
-
-static int pci1720_reset(struct comedi_device *dev)
-{
- struct pci1710_private *devpriv = dev->private;
- /* set synchronous output mode */
- outb(Syncont_SC0, dev->iobase + PCI1720_SYNCONT);
- devpriv->da_ranges = 0xAA;
- /* set all ranges to +/-5V */
- outb(devpriv->da_ranges, dev->iobase + PCI1720_RANGE);
- outw(0x0800, dev->iobase + PCI1720_DA0); /* set outputs to 0V */
- outw(0x0800, dev->iobase + PCI1720_DA1);
- outw(0x0800, dev->iobase + PCI1720_DA2);
- outw(0x0800, dev->iobase + PCI1720_DA3);
- outb(0, dev->iobase + PCI1720_SYNCOUT); /* update outputs */
-
- return 0;
-}
-
-static int pci1710_reset(struct comedi_device *dev)
-{
- const struct boardtype *board = dev->board_ptr;
-
- if (board->is_pci1720)
- return pci1720_reset(dev);
-
- return pci171x_reset(dev);
-}
-
-static int pci1710_auto_attach(struct comedi_device *dev,
- unsigned long context)
-{
- struct pci_dev *pcidev = comedi_to_pci_dev(dev);
- const struct boardtype *board = NULL;
- struct pci1710_private *devpriv;
- struct comedi_subdevice *s;
- int ret, subdev, n_subdevices;
-
- if (context < ARRAY_SIZE(boardtypes))
- board = &boardtypes[context];
- if (!board)
- return -ENODEV;
- dev->board_ptr = board;
- dev->board_name = board->name;
-
- devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
- if (!devpriv)
- return -ENOMEM;
-
- ret = comedi_pci_enable(dev);
- if (ret)
- return ret;
- dev->iobase = pci_resource_start(pcidev, 2);
-
- dev->pacer = comedi_8254_init(dev->iobase + PCI171X_TIMER_BASE,
- I8254_OSC_BASE_10MHZ, I8254_IO16, 0);
- if (!dev->pacer)
- return -ENOMEM;
-
- n_subdevices = 0;
- if (board->n_aichan)
- n_subdevices++;
- if (board->has_ao)
- n_subdevices++;
- if (board->has_di_do)
- n_subdevices += 2;
- if (board->has_counter)
- n_subdevices++;
-
- ret = comedi_alloc_subdevices(dev, n_subdevices);
- if (ret)
- return ret;
-
- pci1710_reset(dev);
-
- if (board->has_irq && pcidev->irq) {
- ret = request_irq(pcidev->irq, interrupt_service_pci1710,
- IRQF_SHARED, dev->board_name, dev);
- if (ret == 0)
- dev->irq = pcidev->irq;
- }
-
- subdev = 0;
-
- if (board->n_aichan) {
- s = &dev->subdevices[subdev];
- s->type = COMEDI_SUBD_AI;
- s->subdev_flags = SDF_READABLE | SDF_COMMON | SDF_GROUND;
- if (board->has_diff_ai)
- s->subdev_flags |= SDF_DIFF;
- s->n_chan = board->n_aichan;
- s->maxdata = 0x0fff;
- s->range_table = board->rangelist_ai;
- s->insn_read = pci171x_ai_insn_read;
- if (dev->irq) {
- dev->read_subdev = s;
- s->subdev_flags |= SDF_CMD_READ;
- s->len_chanlist = s->n_chan;
- s->do_cmdtest = pci171x_ai_cmdtest;
- s->do_cmd = pci171x_ai_cmd;
- s->cancel = pci171x_ai_cancel;
- }
- subdev++;
- }
-
- if (board->has_ao) {
- s = &dev->subdevices[subdev];
- s->type = COMEDI_SUBD_AO;
- s->subdev_flags = SDF_WRITABLE | SDF_GROUND | SDF_COMMON;
- s->maxdata = 0x0fff;
- if (board->is_pci1720) {
- s->n_chan = 4;
- s->range_table = &pci1720_ao_range;
- s->insn_write = pci1720_ao_insn_write;
- } else {
- s->n_chan = 2;
- s->range_table = &pci171x_ao_range;
- s->insn_write = pci171x_ao_insn_write;
- }
-
- ret = comedi_alloc_subdev_readback(s);
- if (ret)
- return ret;
-
- /* initialize the readback values to match the board reset */
- if (board->is_pci1720) {
- int i;
-
- for (i = 0; i < s->n_chan; i++)
- s->readback[i] = 0x0800;
- }
-
- subdev++;
- }
-
- if (board->has_di_do) {
- s = &dev->subdevices[subdev];
- s->type = COMEDI_SUBD_DI;
- s->subdev_flags = SDF_READABLE;
- s->n_chan = 16;
- s->maxdata = 1;
- s->range_table = &range_digital;
- s->insn_bits = pci171x_di_insn_bits;
- subdev++;
-
- s = &dev->subdevices[subdev];
- s->type = COMEDI_SUBD_DO;
- s->subdev_flags = SDF_WRITABLE;
- s->n_chan = 16;
- s->maxdata = 1;
- s->range_table = &range_digital;
- s->insn_bits = pci171x_do_insn_bits;
- subdev++;
- }
-
- /* Counter subdevice (8254) */
- if (board->has_counter) {
- s = &dev->subdevices[subdev];
- comedi_8254_subdevice_init(s, dev->pacer);
-
- dev->pacer->insn_config = pci171x_insn_counter_config;
-
- /* counters 1 and 2 are used internally for the pacer */
- comedi_8254_set_busy(dev->pacer, 1, true);
- comedi_8254_set_busy(dev->pacer, 2, true);
-
- subdev++;
- }
-
- /* max_samples is half the FIFO size (2 bytes/sample) */
- devpriv->max_samples = (board->has_large_fifo) ? 2048 : 512;
-
- return 0;
-}
-
-static void pci1710_detach(struct comedi_device *dev)
-{
- if (dev->iobase)
- pci1710_reset(dev);
- comedi_pci_detach(dev);
-}
-
-static struct comedi_driver adv_pci1710_driver = {
- .driver_name = "adv_pci1710",
- .module = THIS_MODULE,
- .auto_attach = pci1710_auto_attach,
- .detach = pci1710_detach,
-};
-
-static int adv_pci1710_pci_probe(struct pci_dev *dev,
- const struct pci_device_id *id)
-{
- return comedi_pci_auto_config(dev, &adv_pci1710_driver,
- id->driver_data);
-}
-
-static const struct pci_device_id adv_pci1710_pci_table[] = {
- {
- PCI_DEVICE_SUB(PCI_VENDOR_ID_ADVANTECH, 0x1710,
- PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9050),
- .driver_data = BOARD_PCI1710,
- }, {
- PCI_DEVICE_SUB(PCI_VENDOR_ID_ADVANTECH, 0x1710,
- PCI_VENDOR_ID_ADVANTECH, 0x0000),
- .driver_data = BOARD_PCI1710,
- }, {
- PCI_DEVICE_SUB(PCI_VENDOR_ID_ADVANTECH, 0x1710,
- PCI_VENDOR_ID_ADVANTECH, 0xb100),
- .driver_data = BOARD_PCI1710,
- }, {
- PCI_DEVICE_SUB(PCI_VENDOR_ID_ADVANTECH, 0x1710,
- PCI_VENDOR_ID_ADVANTECH, 0xb200),
- .driver_data = BOARD_PCI1710,
- }, {
- PCI_DEVICE_SUB(PCI_VENDOR_ID_ADVANTECH, 0x1710,
- PCI_VENDOR_ID_ADVANTECH, 0xc100),
- .driver_data = BOARD_PCI1710,
- }, {
- PCI_DEVICE_SUB(PCI_VENDOR_ID_ADVANTECH, 0x1710,
- PCI_VENDOR_ID_ADVANTECH, 0xc200),
- .driver_data = BOARD_PCI1710,
- }, {
- PCI_DEVICE_SUB(PCI_VENDOR_ID_ADVANTECH, 0x1710, 0x1000, 0xd100),
- .driver_data = BOARD_PCI1710,
- }, {
- PCI_DEVICE_SUB(PCI_VENDOR_ID_ADVANTECH, 0x1710,
- PCI_VENDOR_ID_ADVANTECH, 0x0002),
- .driver_data = BOARD_PCI1710HG,
- }, {
- PCI_DEVICE_SUB(PCI_VENDOR_ID_ADVANTECH, 0x1710,
- PCI_VENDOR_ID_ADVANTECH, 0xb102),
- .driver_data = BOARD_PCI1710HG,
- }, {
- PCI_DEVICE_SUB(PCI_VENDOR_ID_ADVANTECH, 0x1710,
- PCI_VENDOR_ID_ADVANTECH, 0xb202),
- .driver_data = BOARD_PCI1710HG,
- }, {
- PCI_DEVICE_SUB(PCI_VENDOR_ID_ADVANTECH, 0x1710,
- PCI_VENDOR_ID_ADVANTECH, 0xc102),
- .driver_data = BOARD_PCI1710HG,
- }, {
- PCI_DEVICE_SUB(PCI_VENDOR_ID_ADVANTECH, 0x1710,
- PCI_VENDOR_ID_ADVANTECH, 0xc202),
- .driver_data = BOARD_PCI1710HG,
- }, {
- PCI_DEVICE_SUB(PCI_VENDOR_ID_ADVANTECH, 0x1710, 0x1000, 0xd102),
- .driver_data = BOARD_PCI1710HG,
- },
- { PCI_VDEVICE(ADVANTECH, 0x1711), BOARD_PCI1711 },
- { PCI_VDEVICE(ADVANTECH, 0x1713), BOARD_PCI1713 },
- { PCI_VDEVICE(ADVANTECH, 0x1720), BOARD_PCI1720 },
- { PCI_VDEVICE(ADVANTECH, 0x1731), BOARD_PCI1731 },
- { 0 }
-};
-MODULE_DEVICE_TABLE(pci, adv_pci1710_pci_table);
-
-static struct pci_driver adv_pci1710_pci_driver = {
- .name = "adv_pci1710",
- .id_table = adv_pci1710_pci_table,
- .probe = adv_pci1710_pci_probe,
- .remove = comedi_pci_auto_unconfig,
-};
-module_comedi_pci_driver(adv_pci1710_driver, adv_pci1710_pci_driver);
-
-MODULE_AUTHOR("Comedi http://www.comedi.org");
-MODULE_DESCRIPTION("Comedi: Advantech PCI-1710 Series Multifunction DAS Cards");
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/adv_pci1723.c b/drivers/staging/comedi/drivers/adv_pci1723.c
deleted file mode 100644
index 1921a97cc5ca..000000000000
--- a/drivers/staging/comedi/drivers/adv_pci1723.c
+++ /dev/null
@@ -1,233 +0,0 @@
-/*
- * adv_pci1723.c
- * Comedi driver for the Advantech PCI-1723 card.
- *
- * COMEDI - Linux Control and Measurement Device Interface
- * Copyright (C) 2000 David A. Schleef <ds@schleef.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-/*
- * Driver: adv_pci1723
- * Description: Advantech PCI-1723
- * Author: yonggang <rsmgnu@gmail.com>, Ian Abbott <abbotti@mev.co.uk>
- * Devices: [Advantech] PCI-1723 (adv_pci1723)
- * Updated: Mon, 14 Apr 2008 15:12:56 +0100
- * Status: works
- *
- * Configuration Options: not applicable, uses comedi PCI auto config
- *
- * Subdevice 0 is 8-channel AO, 16-bit, range +/- 10 V.
- *
- * Subdevice 1 is 16-channel DIO. The channels are configurable as
- * input or output in 2 groups (0 to 7, 8 to 15). Configuring any
- * channel implicitly configures all channels in the same group.
- *
- * TODO:
- * 1. Add the two milliamp ranges to the AO subdevice (0 to 20 mA,
- * 4 to 20 mA).
- * 2. Read the initial ranges and values of the AO subdevice at
- * start-up instead of reinitializing them.
- * 3. Implement calibration.
- */
-
-#include <linux/module.h>
-
-#include "../comedi_pci.h"
-
-/*
- * PCI Bar 2 I/O Register map (dev->iobase)
- */
-#define PCI1723_AO_REG(x) (0x00 + ((x) * 2))
-#define PCI1723_BOARD_ID_REG 0x10
-#define PCI1723_BOARD_ID_MASK (0xf << 0)
-#define PCI1723_SYNC_CTRL_REG 0x12
-#define PCI1723_SYNC_CTRL_ASYNC (0 << 0)
-#define PCI1723_SYNC_CTRL_SYNC (1 << 0)
-#define PCI1723_CTRL_REG 0x14
-#define PCI1723_CTRL_BUSY (1 << 15)
-#define PCI1723_CTRL_INIT (1 << 14)
-#define PCI1723_CTRL_SELF (1 << 8)
-#define PCI1723_CTRL_IDX(x) (((x) & 0x3) << 6)
-#define PCI1723_CTRL_RANGE(x) (((x) & 0x3) << 4)
-#define PCI1723_CTRL_GAIN (0 << 3)
-#define PCI1723_CTRL_OFFSET (1 << 3)
-#define PCI1723_CTRL_CHAN(x) (((x) & 0x7) << 0)
-#define PCI1723_CALIB_CTRL_REG 0x16
-#define PCI1723_CALIB_CTRL_CS (1 << 2)
-#define PCI1723_CALIB_CTRL_DAT (1 << 1)
-#define PCI1723_CALIB_CTRL_CLK (1 << 0)
-#define PCI1723_CALIB_STROBE_REG 0x18
-#define PCI1723_DIO_CTRL_REG 0x1a
-#define PCI1723_DIO_CTRL_HDIO (1 << 1)
-#define PCI1723_DIO_CTRL_LDIO (1 << 0)
-#define PCI1723_DIO_DATA_REG 0x1c
-#define PCI1723_CALIB_DATA_REG 0x1e
-#define PCI1723_SYNC_STROBE_REG 0x20
-#define PCI1723_RESET_AO_STROBE_REG 0x22
-#define PCI1723_RESET_CALIB_STROBE_REG 0x24
-#define PCI1723_RANGE_STROBE_REG 0x26
-#define PCI1723_VREF_REG 0x28
-#define PCI1723_VREF_NEG10V (0 << 0)
-#define PCI1723_VREF_0V (1 << 0)
-#define PCI1723_VREF_POS10V (3 << 0)
-
-static int pci1723_ao_insn_write(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- unsigned int chan = CR_CHAN(insn->chanspec);
- int i;
-
- for (i = 0; i < insn->n; i++) {
- unsigned int val = data[i];
-
- outw(val, dev->iobase + PCI1723_AO_REG(chan));
- s->readback[chan] = val;
- }
-
- return insn->n;
-}
-
-static int pci1723_dio_insn_config(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- unsigned int chan = CR_CHAN(insn->chanspec);
- unsigned int mask = (chan < 8) ? 0x00ff : 0xff00;
- unsigned short mode = 0x0000; /* assume output */
- int ret;
-
- ret = comedi_dio_insn_config(dev, s, insn, data, mask);
- if (ret)
- return ret;
-
- if (!(s->io_bits & 0x00ff))
- mode |= PCI1723_DIO_CTRL_LDIO; /* low byte input */
- if (!(s->io_bits & 0xff00))
- mode |= PCI1723_DIO_CTRL_HDIO; /* high byte input */
- outw(mode, dev->iobase + PCI1723_DIO_CTRL_REG);
-
- return insn->n;
-}
-
-static int pci1723_dio_insn_bits(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- if (comedi_dio_update_state(s, data))
- outw(s->state, dev->iobase + PCI1723_DIO_DATA_REG);
-
- data[1] = inw(dev->iobase + PCI1723_DIO_DATA_REG);
-
- return insn->n;
-}
-
-static int pci1723_auto_attach(struct comedi_device *dev,
- unsigned long context_unused)
-{
- struct pci_dev *pcidev = comedi_to_pci_dev(dev);
- struct comedi_subdevice *s;
- unsigned int val;
- int ret;
- int i;
-
- ret = comedi_pci_enable(dev);
- if (ret)
- return ret;
- dev->iobase = pci_resource_start(pcidev, 2);
-
- ret = comedi_alloc_subdevices(dev, 2);
- if (ret)
- return ret;
-
- s = &dev->subdevices[0];
- s->type = COMEDI_SUBD_AO;
- s->subdev_flags = SDF_WRITABLE | SDF_GROUND | SDF_COMMON;
- s->n_chan = 8;
- s->maxdata = 0xffff;
- s->range_table = &range_bipolar10;
- s->insn_write = pci1723_ao_insn_write;
-
- ret = comedi_alloc_subdev_readback(s);
- if (ret)
- return ret;
-
- /* synchronously reset all analog outputs to 0V, +/-10V range */
- outw(PCI1723_SYNC_CTRL_SYNC, dev->iobase + PCI1723_SYNC_CTRL_REG);
- for (i = 0; i < s->n_chan; i++) {
- outw(PCI1723_CTRL_RANGE(0) | PCI1723_CTRL_CHAN(i),
- PCI1723_CTRL_REG);
- outw(0, dev->iobase + PCI1723_RANGE_STROBE_REG);
-
- outw(0x8000, dev->iobase + PCI1723_AO_REG(i));
- s->readback[i] = 0x8000;
- }
- outw(0, dev->iobase + PCI1723_SYNC_STROBE_REG);
-
- /* disable syncronous control */
- outw(PCI1723_SYNC_CTRL_ASYNC, dev->iobase + PCI1723_SYNC_CTRL_REG);
-
- s = &dev->subdevices[1];
- s->type = COMEDI_SUBD_DIO;
- s->subdev_flags = SDF_READABLE | SDF_WRITABLE;
- s->n_chan = 16;
- s->maxdata = 1;
- s->range_table = &range_digital;
- s->insn_config = pci1723_dio_insn_config;
- s->insn_bits = pci1723_dio_insn_bits;
-
- /* get initial DIO direction and state */
- val = inw(dev->iobase + PCI1723_DIO_CTRL_REG);
- if (!(val & PCI1723_DIO_CTRL_LDIO))
- s->io_bits |= 0x00ff; /* low byte output */
- if (!(val & PCI1723_DIO_CTRL_HDIO))
- s->io_bits |= 0xff00; /* high byte output */
- s->state = inw(dev->iobase + PCI1723_DIO_DATA_REG);
-
- return 0;
-}
-
-static struct comedi_driver adv_pci1723_driver = {
- .driver_name = "adv_pci1723",
- .module = THIS_MODULE,
- .auto_attach = pci1723_auto_attach,
- .detach = comedi_pci_detach,
-};
-
-static int adv_pci1723_pci_probe(struct pci_dev *dev,
- const struct pci_device_id *id)
-{
- return comedi_pci_auto_config(dev, &adv_pci1723_driver,
- id->driver_data);
-}
-
-static const struct pci_device_id adv_pci1723_pci_table[] = {
- { PCI_DEVICE(PCI_VENDOR_ID_ADVANTECH, 0x1723) },
- { 0 }
-};
-MODULE_DEVICE_TABLE(pci, adv_pci1723_pci_table);
-
-static struct pci_driver adv_pci1723_pci_driver = {
- .name = "adv_pci1723",
- .id_table = adv_pci1723_pci_table,
- .probe = adv_pci1723_pci_probe,
- .remove = comedi_pci_auto_unconfig,
-};
-module_comedi_pci_driver(adv_pci1723_driver, adv_pci1723_pci_driver);
-
-MODULE_AUTHOR("Comedi http://www.comedi.org");
-MODULE_DESCRIPTION("Advantech PCI-1723 Comedi driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/adv_pci1724.c b/drivers/staging/comedi/drivers/adv_pci1724.c
deleted file mode 100644
index 9677111f9ab2..000000000000
--- a/drivers/staging/comedi/drivers/adv_pci1724.c
+++ /dev/null
@@ -1,216 +0,0 @@
-/*
- * adv_pci1724.c
- * Comedi driver for the Advantech PCI-1724U card.
- *
- * Author: Frank Mori Hess <fmh6jj@gmail.com>
- * Copyright (C) 2013 GnuBIO Inc
- *
- * COMEDI - Linux Control and Measurement Device Interface
- * Copyright (C) 1997-8 David A. Schleef <ds@schleef.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-/*
- * Driver: adv_pci1724
- * Description: Advantech PCI-1724U
- * Devices: [Advantech] PCI-1724U (adv_pci1724)
- * Author: Frank Mori Hess <fmh6jj@gmail.com>
- * Updated: 2013-02-09
- * Status: works
- *
- * Configuration Options: not applicable, uses comedi PCI auto config
- *
- * Subdevice 0 is the analog output.
- * Subdevice 1 is the offset calibration for the analog output.
- * Subdevice 2 is the gain calibration for the analog output.
- *
- * The calibration offset and gains have quite a large effect on the
- * analog output, so it is possible to adjust the analog output to
- * have an output range significantly different from the board's
- * nominal output ranges. For a calibrated +/-10V range, the analog
- * output's offset will be set somewhere near mid-range (0x2000) and
- * its gain will be near maximum (0x3fff).
- *
- * There is really no difference between the board's documented 0-20mA
- * versus 4-20mA output ranges. To pick one or the other is simply a
- * matter of adjusting the offset and gain calibration until the board
- * outputs in the desired range.
- */
-
-#include <linux/module.h>
-
-#include "../comedi_pci.h"
-
-/*
- * PCI bar 2 Register I/O map (dev->iobase)
- */
-#define PCI1724_DAC_CTRL_REG 0x00
-#define PCI1724_DAC_CTRL_GX(x) (1 << (20 + ((x) / 8)))
-#define PCI1724_DAC_CTRL_CX(x) (((x) % 8) << 16)
-#define PCI1724_DAC_CTRL_MODE_GAIN (1 << 14)
-#define PCI1724_DAC_CTRL_MODE_OFFSET (2 << 14)
-#define PCI1724_DAC_CTRL_MODE_NORMAL (3 << 14)
-#define PCI1724_DAC_CTRL_MODE_MASK (3 << 14)
-#define PCI1724_DAC_CTRL_DATA(x) (((x) & 0x3fff) << 0)
-#define PCI1724_SYNC_CTRL_REG 0x04
-#define PCI1724_SYNC_CTRL_DACSTAT (1 << 1)
-#define PCI1724_SYNC_CTRL_SYN (1 << 0)
-#define PCI1724_EEPROM_CTRL_REG 0x08
-#define PCI1724_SYNC_TRIG_REG 0x0c /* any value works */
-#define PCI1724_BOARD_ID_REG 0x10
-#define PCI1724_BOARD_ID_MASK (0xf << 0)
-
-static const struct comedi_lrange adv_pci1724_ao_ranges = {
- 4, {
- BIP_RANGE(10),
- RANGE_mA(0, 20),
- RANGE_mA(4, 20),
- RANGE_unitless(0, 1)
- }
-};
-
-static int adv_pci1724_dac_idle(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned long context)
-{
- unsigned int status;
-
- status = inl(dev->iobase + PCI1724_SYNC_CTRL_REG);
- if ((status & PCI1724_SYNC_CTRL_DACSTAT) == 0)
- return 0;
- return -EBUSY;
-}
-
-static int adv_pci1724_insn_write(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- unsigned long mode = (unsigned long)s->private;
- unsigned int chan = CR_CHAN(insn->chanspec);
- unsigned int ctrl;
- int ret;
- int i;
-
- ctrl = PCI1724_DAC_CTRL_GX(chan) | PCI1724_DAC_CTRL_CX(chan) | mode;
-
- /* turn off synchronous mode */
- outl(0, dev->iobase + PCI1724_SYNC_CTRL_REG);
-
- for (i = 0; i < insn->n; ++i) {
- unsigned int val = data[i];
-
- ret = comedi_timeout(dev, s, insn, adv_pci1724_dac_idle, 0);
- if (ret)
- return ret;
-
- outl(ctrl | PCI1724_DAC_CTRL_DATA(val),
- dev->iobase + PCI1724_DAC_CTRL_REG);
-
- s->readback[chan] = val;
- }
-
- return insn->n;
-}
-
-static int adv_pci1724_auto_attach(struct comedi_device *dev,
- unsigned long context_unused)
-{
- struct pci_dev *pcidev = comedi_to_pci_dev(dev);
- struct comedi_subdevice *s;
- unsigned int board_id;
- int ret;
-
- ret = comedi_pci_enable(dev);
- if (ret)
- return ret;
-
- dev->iobase = pci_resource_start(pcidev, 2);
- board_id = inl(dev->iobase + PCI1724_BOARD_ID_REG);
- dev_info(dev->class_dev, "board id: %d\n",
- board_id & PCI1724_BOARD_ID_MASK);
-
- ret = comedi_alloc_subdevices(dev, 3);
- if (ret)
- return ret;
-
- /* Analog Output subdevice */
- s = &dev->subdevices[0];
- s->type = COMEDI_SUBD_AO;
- s->subdev_flags = SDF_READABLE | SDF_WRITABLE | SDF_GROUND;
- s->n_chan = 32;
- s->maxdata = 0x3fff;
- s->range_table = &adv_pci1724_ao_ranges;
- s->insn_write = adv_pci1724_insn_write;
- s->private = (void *)PCI1724_DAC_CTRL_MODE_NORMAL;
-
- ret = comedi_alloc_subdev_readback(s);
- if (ret)
- return ret;
-
- /* Offset Calibration subdevice */
- s = &dev->subdevices[1];
- s->type = COMEDI_SUBD_CALIB;
- s->subdev_flags = SDF_READABLE | SDF_WRITABLE | SDF_INTERNAL;
- s->n_chan = 32;
- s->maxdata = 0x3fff;
- s->insn_write = adv_pci1724_insn_write;
- s->private = (void *)PCI1724_DAC_CTRL_MODE_OFFSET;
-
- ret = comedi_alloc_subdev_readback(s);
- if (ret)
- return ret;
-
- /* Gain Calibration subdevice */
- s = &dev->subdevices[2];
- s->type = COMEDI_SUBD_CALIB;
- s->subdev_flags = SDF_READABLE | SDF_WRITABLE | SDF_INTERNAL;
- s->n_chan = 32;
- s->maxdata = 0x3fff;
- s->insn_write = adv_pci1724_insn_write;
- s->private = (void *)PCI1724_DAC_CTRL_MODE_GAIN;
-
- return comedi_alloc_subdev_readback(s);
-}
-
-static struct comedi_driver adv_pci1724_driver = {
- .driver_name = "adv_pci1724",
- .module = THIS_MODULE,
- .auto_attach = adv_pci1724_auto_attach,
- .detach = comedi_pci_detach,
-};
-
-static int adv_pci1724_pci_probe(struct pci_dev *dev,
- const struct pci_device_id *id)
-{
- return comedi_pci_auto_config(dev, &adv_pci1724_driver,
- id->driver_data);
-}
-
-static const struct pci_device_id adv_pci1724_pci_table[] = {
- { PCI_DEVICE(PCI_VENDOR_ID_ADVANTECH, 0x1724) },
- { 0 }
-};
-MODULE_DEVICE_TABLE(pci, adv_pci1724_pci_table);
-
-static struct pci_driver adv_pci1724_pci_driver = {
- .name = "adv_pci1724",
- .id_table = adv_pci1724_pci_table,
- .probe = adv_pci1724_pci_probe,
- .remove = comedi_pci_auto_unconfig,
-};
-module_comedi_pci_driver(adv_pci1724_driver, adv_pci1724_pci_driver);
-
-MODULE_AUTHOR("Frank Mori Hess <fmh6jj@gmail.com>");
-MODULE_DESCRIPTION("Advantech PCI-1724U Comedi driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/adv_pci_dio.c b/drivers/staging/comedi/drivers/adv_pci_dio.c
deleted file mode 100644
index f1b3c5aa8d79..000000000000
--- a/drivers/staging/comedi/drivers/adv_pci_dio.c
+++ /dev/null
@@ -1,1113 +0,0 @@
-/*
- * comedi/drivers/adv_pci_dio.c
- *
- * Author: Michal Dobes <dobes@tesnet.cz>
- *
- * Hardware driver for Advantech PCI DIO cards.
-*/
-/*
-Driver: adv_pci_dio
-Description: Advantech PCI-1730, PCI-1733, PCI-1734, PCI-1735U,
- PCI-1736UP, PCI-1739U, PCI-1750, PCI-1751, PCI-1752,
- PCI-1753/E, PCI-1754, PCI-1756, PCI-1760, PCI-1762
-Author: Michal Dobes <dobes@tesnet.cz>
-Devices: [Advantech] PCI-1730 (adv_pci_dio), PCI-1733,
- PCI-1734, PCI-1735U, PCI-1736UP, PCI-1739U, PCI-1750,
- PCI-1751, PCI-1752, PCI-1753,
- PCI-1753+PCI-1753E, PCI-1754, PCI-1756,
- PCI-1760, PCI-1762
-Status: untested
-Updated: Mon, 09 Jan 2012 12:40:46 +0000
-
-This driver supports now only insn interface for DI/DO/DIO.
-
-Configuration options:
- [0] - PCI bus of device (optional)
- [1] - PCI slot of device (optional)
- If bus/slot is not specified, the first available PCI
- device will be used.
-
-*/
-
-#include <linux/module.h>
-#include <linux/delay.h>
-
-#include "../comedi_pci.h"
-
-#include "8255.h"
-#include "comedi_8254.h"
-
-/* hardware types of the cards */
-enum hw_cards_id {
- TYPE_PCI1730, TYPE_PCI1733, TYPE_PCI1734, TYPE_PCI1735, TYPE_PCI1736,
- TYPE_PCI1739,
- TYPE_PCI1750,
- TYPE_PCI1751,
- TYPE_PCI1752,
- TYPE_PCI1753, TYPE_PCI1753E,
- TYPE_PCI1754, TYPE_PCI1756,
- TYPE_PCI1760,
- TYPE_PCI1762
-};
-
-/* which I/O instructions to use */
-enum hw_io_access {
- IO_8b, IO_16b
-};
-
-#define MAX_DI_SUBDEVS 2 /* max number of DI subdevices per card */
-#define MAX_DO_SUBDEVS 2 /* max number of DO subdevices per card */
-#define MAX_DIO_SUBDEVG 2 /* max number of DIO subdevices group per
- * card */
-
-#define PCIDIO_MAINREG 2 /* main I/O region for all Advantech cards? */
-
-/* Register offset definitions */
-/* Advantech PCI-1730/3/4 */
-#define PCI1730_IDI 0 /* R: Isolated digital input 0-15 */
-#define PCI1730_IDO 0 /* W: Isolated digital output 0-15 */
-#define PCI1730_DI 2 /* R: Digital input 0-15 */
-#define PCI1730_DO 2 /* W: Digital output 0-15 */
-#define PCI1733_IDI 0 /* R: Isolated digital input 0-31 */
-#define PCI1730_3_INT_EN 0x08 /* R/W: enable/disable interrupts */
-#define PCI1730_3_INT_RF 0x0c /* R/W: set falling/raising edge for
- * interrupts */
-#define PCI1730_3_INT_CLR 0x10 /* R/W: clear interrupts */
-#define PCI1734_IDO 0 /* W: Isolated digital output 0-31 */
-#define PCI173x_BOARDID 4 /* R: Board I/D switch for 1730/3/4 */
-
-/* Advantech PCI-1735U */
-#define PCI1735_DI 0 /* R: Digital input 0-31 */
-#define PCI1735_DO 0 /* W: Digital output 0-31 */
-#define PCI1735_C8254 4 /* R/W: 8254 counter */
-#define PCI1735_BOARDID 8 /* R: Board I/D switch for 1735U */
-
-/* Advantech PCI-1736UP */
-#define PCI1736_IDI 0 /* R: Isolated digital input 0-15 */
-#define PCI1736_IDO 0 /* W: Isolated digital output 0-15 */
-#define PCI1736_3_INT_EN 0x08 /* R/W: enable/disable interrupts */
-#define PCI1736_3_INT_RF 0x0c /* R/W: set falling/raising edge for
- * interrupts */
-#define PCI1736_3_INT_CLR 0x10 /* R/W: clear interrupts */
-#define PCI1736_BOARDID 4 /* R: Board I/D switch for 1736UP */
-#define PCI1736_MAINREG 0 /* Normal register (2) doesn't work */
-
-/* Advantech PCI-1739U */
-#define PCI1739_DIO 0 /* R/W: begin of 8255 registers block */
-#define PCI1739_ICR 32 /* W: Interrupt control register */
-#define PCI1739_ISR 32 /* R: Interrupt status register */
-#define PCI1739_BOARDID 8 /* R: Board I/D switch for 1739U */
-
-/* Advantech PCI-1750 */
-#define PCI1750_IDI 0 /* R: Isolated digital input 0-15 */
-#define PCI1750_IDO 0 /* W: Isolated digital output 0-15 */
-#define PCI1750_ICR 32 /* W: Interrupt control register */
-#define PCI1750_ISR 32 /* R: Interrupt status register */
-
-/* Advantech PCI-1751/3/3E */
-#define PCI1751_DIO 0 /* R/W: begin of 8255 registers block */
-#define PCI1751_CNT 24 /* R/W: begin of 8254 registers block */
-#define PCI1751_ICR 32 /* W: Interrupt control register */
-#define PCI1751_ISR 32 /* R: Interrupt status register */
-#define PCI1753_DIO 0 /* R/W: begin of 8255 registers block */
-#define PCI1753_ICR0 16 /* R/W: Interrupt control register group 0 */
-#define PCI1753_ICR1 17 /* R/W: Interrupt control register group 1 */
-#define PCI1753_ICR2 18 /* R/W: Interrupt control register group 2 */
-#define PCI1753_ICR3 19 /* R/W: Interrupt control register group 3 */
-#define PCI1753E_DIO 32 /* R/W: begin of 8255 registers block */
-#define PCI1753E_ICR0 48 /* R/W: Interrupt control register group 0 */
-#define PCI1753E_ICR1 49 /* R/W: Interrupt control register group 1 */
-#define PCI1753E_ICR2 50 /* R/W: Interrupt control register group 2 */
-#define PCI1753E_ICR3 51 /* R/W: Interrupt control register group 3 */
-
-/* Advantech PCI-1752/4/6 */
-#define PCI1752_IDO 0 /* R/W: Digital output 0-31 */
-#define PCI1752_IDO2 4 /* R/W: Digital output 32-63 */
-#define PCI1754_IDI 0 /* R: Digital input 0-31 */
-#define PCI1754_IDI2 4 /* R: Digital input 32-64 */
-#define PCI1756_IDI 0 /* R: Digital input 0-31 */
-#define PCI1756_IDO 4 /* R/W: Digital output 0-31 */
-#define PCI1754_6_ICR0 0x08 /* R/W: Interrupt control register group 0 */
-#define PCI1754_6_ICR1 0x0a /* R/W: Interrupt control register group 1 */
-#define PCI1754_ICR2 0x0c /* R/W: Interrupt control register group 2 */
-#define PCI1754_ICR3 0x0e /* R/W: Interrupt control register group 3 */
-#define PCI1752_6_CFC 0x12 /* R/W: set/read channel freeze function */
-#define PCI175x_BOARDID 0x10 /* R: Board I/D switch for 1752/4/6 */
-
-/* Advantech PCI-1762 registers */
-#define PCI1762_RO 0 /* R/W: Relays status/output */
-#define PCI1762_IDI 2 /* R: Isolated input status */
-#define PCI1762_BOARDID 4 /* R: Board I/D switch */
-#define PCI1762_ICR 6 /* W: Interrupt control register */
-#define PCI1762_ISR 6 /* R: Interrupt status register */
-
-/* Advantech PCI-1760 registers */
-#define OMB0 0x0c /* W: Mailbox outgoing registers */
-#define OMB1 0x0d
-#define OMB2 0x0e
-#define OMB3 0x0f
-#define IMB0 0x1c /* R: Mailbox incoming registers */
-#define IMB1 0x1d
-#define IMB2 0x1e
-#define IMB3 0x1f
-#define INTCSR0 0x38 /* R/W: Interrupt control registers */
-#define INTCSR1 0x39
-#define INTCSR2 0x3a
-#define INTCSR3 0x3b
-
-/* PCI-1760 mailbox commands */
-#define CMD_ClearIMB2 0x00 /* Clear IMB2 status and return actual
- * DI status in IMB3 */
-#define CMD_SetRelaysOutput 0x01 /* Set relay output from OMB0 */
-#define CMD_GetRelaysStatus 0x02 /* Get relay status to IMB0 */
-#define CMD_ReadCurrentStatus 0x07 /* Read the current status of the
- * register in OMB0, result in IMB0 */
-#define CMD_ReadFirmwareVersion 0x0e /* Read the firmware ver., result in
- * IMB1.IMB0 */
-#define CMD_ReadHardwareVersion 0x0f /* Read the hardware ver., result in
- * IMB1.IMB0 */
-#define CMD_EnableIDIFilters 0x20 /* Enable IDI filters based on bits in
- * OMB0 */
-#define CMD_EnableIDIPatternMatch 0x21 /* Enable IDI pattern match based on
- * bits in OMB0 */
-#define CMD_SetIDIPatternMatch 0x22 /* Enable IDI pattern match based on
- * bits in OMB0 */
-#define CMD_EnableIDICounters 0x28 /* Enable IDI counters based on bits in
- * OMB0 */
-#define CMD_ResetIDICounters 0x29 /* Reset IDI counters based on bits in
- * OMB0 to its reset values */
-#define CMD_OverflowIDICounters 0x2a /* Enable IDI counters overflow
- * interrupts based on bits in OMB0 */
-#define CMD_MatchIntIDICounters 0x2b /* Enable IDI counters match value
- * interrupts based on bits in OMB0 */
-#define CMD_EdgeIDICounters 0x2c /* Set IDI up counters count edge (bit=0
- * - rising, =1 - falling) */
-#define CMD_GetIDICntCurValue 0x2f /* Read IDI{OMB0} up counter current
- * value */
-#define CMD_SetIDI0CntResetValue 0x40 /* Set IDI0 Counter Reset Value
- * 256*OMB1+OMB0 */
-#define CMD_SetIDI1CntResetValue 0x41 /* Set IDI1 Counter Reset Value
- * 256*OMB1+OMB0 */
-#define CMD_SetIDI2CntResetValue 0x42 /* Set IDI2 Counter Reset Value
- * 256*OMB1+OMB0 */
-#define CMD_SetIDI3CntResetValue 0x43 /* Set IDI3 Counter Reset Value
- * 256*OMB1+OMB0 */
-#define CMD_SetIDI4CntResetValue 0x44 /* Set IDI4 Counter Reset Value
- * 256*OMB1+OMB0 */
-#define CMD_SetIDI5CntResetValue 0x45 /* Set IDI5 Counter Reset Value
- * 256*OMB1+OMB0 */
-#define CMD_SetIDI6CntResetValue 0x46 /* Set IDI6 Counter Reset Value
- * 256*OMB1+OMB0 */
-#define CMD_SetIDI7CntResetValue 0x47 /* Set IDI7 Counter Reset Value
- * 256*OMB1+OMB0 */
-#define CMD_SetIDI0CntMatchValue 0x48 /* Set IDI0 Counter Match Value
- * 256*OMB1+OMB0 */
-#define CMD_SetIDI1CntMatchValue 0x49 /* Set IDI1 Counter Match Value
- * 256*OMB1+OMB0 */
-#define CMD_SetIDI2CntMatchValue 0x4a /* Set IDI2 Counter Match Value
- * 256*OMB1+OMB0 */
-#define CMD_SetIDI3CntMatchValue 0x4b /* Set IDI3 Counter Match Value
- * 256*OMB1+OMB0 */
-#define CMD_SetIDI4CntMatchValue 0x4c /* Set IDI4 Counter Match Value
- * 256*OMB1+OMB0 */
-#define CMD_SetIDI5CntMatchValue 0x4d /* Set IDI5 Counter Match Value
- * 256*OMB1+OMB0 */
-#define CMD_SetIDI6CntMatchValue 0x4e /* Set IDI6 Counter Match Value
- * 256*OMB1+OMB0 */
-#define CMD_SetIDI7CntMatchValue 0x4f /* Set IDI7 Counter Match Value
- * 256*OMB1+OMB0 */
-
-#define OMBCMD_RETRY 0x03 /* 3 times try request before error */
-
-struct diosubd_data {
- int chans; /* num of chans */
- int addr; /* PCI address ofset */
- int regs; /* number of registers to read or 8255
- subdevices */
- unsigned int specflags; /* addon subdevice flags */
-};
-
-struct dio_boardtype {
- const char *name; /* board name */
- int main_pci_region; /* main I/O PCI region */
- enum hw_cards_id cardtype;
- int nsubdevs;
- struct diosubd_data sdi[MAX_DI_SUBDEVS]; /* DI chans */
- struct diosubd_data sdo[MAX_DO_SUBDEVS]; /* DO chans */
- struct diosubd_data sdio[MAX_DIO_SUBDEVG]; /* DIO 8255 chans */
- struct diosubd_data boardid; /* card supports board ID switch */
- unsigned long timer_regbase;
- enum hw_io_access io_access;
-};
-
-static const struct dio_boardtype boardtypes[] = {
- [TYPE_PCI1730] = {
- .name = "pci1730",
- .main_pci_region = PCIDIO_MAINREG,
- .cardtype = TYPE_PCI1730,
- .nsubdevs = 5,
- .sdi[0] = { 16, PCI1730_DI, 2, 0, },
- .sdi[1] = { 16, PCI1730_IDI, 2, 0, },
- .sdo[0] = { 16, PCI1730_DO, 2, 0, },
- .sdo[1] = { 16, PCI1730_IDO, 2, 0, },
- .boardid = { 4, PCI173x_BOARDID, 1, SDF_INTERNAL, },
- .io_access = IO_8b,
- },
- [TYPE_PCI1733] = {
- .name = "pci1733",
- .main_pci_region = PCIDIO_MAINREG,
- .cardtype = TYPE_PCI1733,
- .nsubdevs = 2,
- .sdi[1] = { 32, PCI1733_IDI, 4, 0, },
- .boardid = { 4, PCI173x_BOARDID, 1, SDF_INTERNAL, },
- .io_access = IO_8b,
- },
- [TYPE_PCI1734] = {
- .name = "pci1734",
- .main_pci_region = PCIDIO_MAINREG,
- .cardtype = TYPE_PCI1734,
- .nsubdevs = 2,
- .sdo[1] = { 32, PCI1734_IDO, 4, 0, },
- .boardid = { 4, PCI173x_BOARDID, 1, SDF_INTERNAL, },
- .io_access = IO_8b,
- },
- [TYPE_PCI1735] = {
- .name = "pci1735",
- .main_pci_region = PCIDIO_MAINREG,
- .cardtype = TYPE_PCI1735,
- .nsubdevs = 4,
- .sdi[0] = { 32, PCI1735_DI, 4, 0, },
- .sdo[0] = { 32, PCI1735_DO, 4, 0, },
- .boardid = { 4, PCI1735_BOARDID, 1, SDF_INTERNAL, },
- .timer_regbase = PCI1735_C8254,
- .io_access = IO_8b,
- },
- [TYPE_PCI1736] = {
- .name = "pci1736",
- .main_pci_region = PCI1736_MAINREG,
- .cardtype = TYPE_PCI1736,
- .nsubdevs = 3,
- .sdi[1] = { 16, PCI1736_IDI, 2, 0, },
- .sdo[1] = { 16, PCI1736_IDO, 2, 0, },
- .boardid = { 4, PCI1736_BOARDID, 1, SDF_INTERNAL, },
- .io_access = IO_8b,
- },
- [TYPE_PCI1739] = {
- .name = "pci1739",
- .main_pci_region = PCIDIO_MAINREG,
- .cardtype = TYPE_PCI1739,
- .nsubdevs = 2,
- .sdio[0] = { 48, PCI1739_DIO, 2, 0, },
- .io_access = IO_8b,
- },
- [TYPE_PCI1750] = {
- .name = "pci1750",
- .main_pci_region = PCIDIO_MAINREG,
- .cardtype = TYPE_PCI1750,
- .nsubdevs = 2,
- .sdi[1] = { 16, PCI1750_IDI, 2, 0, },
- .sdo[1] = { 16, PCI1750_IDO, 2, 0, },
- .io_access = IO_8b,
- },
- [TYPE_PCI1751] = {
- .name = "pci1751",
- .main_pci_region = PCIDIO_MAINREG,
- .cardtype = TYPE_PCI1751,
- .nsubdevs = 3,
- .sdio[0] = { 48, PCI1751_DIO, 2, 0, },
- .timer_regbase = PCI1751_CNT,
- .io_access = IO_8b,
- },
- [TYPE_PCI1752] = {
- .name = "pci1752",
- .main_pci_region = PCIDIO_MAINREG,
- .cardtype = TYPE_PCI1752,
- .nsubdevs = 3,
- .sdo[0] = { 32, PCI1752_IDO, 2, 0, },
- .sdo[1] = { 32, PCI1752_IDO2, 2, 0, },
- .boardid = { 4, PCI175x_BOARDID, 1, SDF_INTERNAL, },
- .io_access = IO_16b,
- },
- [TYPE_PCI1753] = {
- .name = "pci1753",
- .main_pci_region = PCIDIO_MAINREG,
- .cardtype = TYPE_PCI1753,
- .nsubdevs = 4,
- .sdio[0] = { 96, PCI1753_DIO, 4, 0, },
- .io_access = IO_8b,
- },
- [TYPE_PCI1753E] = {
- .name = "pci1753e",
- .main_pci_region = PCIDIO_MAINREG,
- .cardtype = TYPE_PCI1753E,
- .nsubdevs = 8,
- .sdio[0] = { 96, PCI1753_DIO, 4, 0, },
- .sdio[1] = { 96, PCI1753E_DIO, 4, 0, },
- .io_access = IO_8b,
- },
- [TYPE_PCI1754] = {
- .name = "pci1754",
- .main_pci_region = PCIDIO_MAINREG,
- .cardtype = TYPE_PCI1754,
- .nsubdevs = 3,
- .sdi[0] = { 32, PCI1754_IDI, 2, 0, },
- .sdi[1] = { 32, PCI1754_IDI2, 2, 0, },
- .boardid = { 4, PCI175x_BOARDID, 1, SDF_INTERNAL, },
- .io_access = IO_16b,
- },
- [TYPE_PCI1756] = {
- .name = "pci1756",
- .main_pci_region = PCIDIO_MAINREG,
- .cardtype = TYPE_PCI1756,
- .nsubdevs = 3,
- .sdi[1] = { 32, PCI1756_IDI, 2, 0, },
- .sdo[1] = { 32, PCI1756_IDO, 2, 0, },
- .boardid = { 4, PCI175x_BOARDID, 1, SDF_INTERNAL, },
- .io_access = IO_16b,
- },
- [TYPE_PCI1760] = {
- /* This card has its own 'attach' */
- .name = "pci1760",
- .main_pci_region = 0,
- .cardtype = TYPE_PCI1760,
- .nsubdevs = 4,
- .io_access = IO_8b,
- },
- [TYPE_PCI1762] = {
- .name = "pci1762",
- .main_pci_region = PCIDIO_MAINREG,
- .cardtype = TYPE_PCI1762,
- .nsubdevs = 3,
- .sdi[1] = { 16, PCI1762_IDI, 1, 0, },
- .sdo[1] = { 16, PCI1762_RO, 1, 0, },
- .boardid = { 4, PCI1762_BOARDID, 1, SDF_INTERNAL, },
- .io_access = IO_16b,
- },
-};
-
-struct pci_dio_private {
- char GlobalIrqEnabled; /* 1= any IRQ source is enabled */
- /* PCI-1760 specific data */
- unsigned char IDICntEnable; /* counter's counting enable status */
- unsigned char IDICntOverEnable; /* counter's overflow interrupts enable
- * status */
- unsigned char IDICntMatchEnable; /* counter's match interrupts
- * enable status */
- unsigned char IDICntEdge; /* counter's count edge value
- * (bit=0 - rising, =1 - falling) */
- unsigned short CntResValue[8]; /* counters' reset value */
- unsigned short CntMatchValue[8]; /* counters' match interrupt value */
- unsigned char IDIFiltersEn; /* IDI's digital filters enable status */
- unsigned char IDIPatMatchEn; /* IDI's pattern match enable status */
- unsigned char IDIPatMatchValue; /* IDI's pattern match value */
- unsigned short IDIFiltrLow[8]; /* IDI's filter value low signal */
- unsigned short IDIFiltrHigh[8]; /* IDI's filter value high signal */
-};
-
-/*
-==============================================================================
-*/
-static int pci_dio_insn_bits_di_b(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
-{
- const struct diosubd_data *d = (const struct diosubd_data *)s->private;
- int i;
-
- data[1] = 0;
- for (i = 0; i < d->regs; i++)
- data[1] |= inb(dev->iobase + d->addr + i) << (8 * i);
-
- return insn->n;
-}
-
-/*
-==============================================================================
-*/
-static int pci_dio_insn_bits_di_w(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
-{
- const struct diosubd_data *d = (const struct diosubd_data *)s->private;
- int i;
-
- data[1] = 0;
- for (i = 0; i < d->regs; i++)
- data[1] |= inw(dev->iobase + d->addr + 2 * i) << (16 * i);
-
- return insn->n;
-}
-
-static int pci_dio_insn_bits_do_b(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- const struct diosubd_data *d = (const struct diosubd_data *)s->private;
- int i;
-
- if (comedi_dio_update_state(s, data)) {
- for (i = 0; i < d->regs; i++)
- outb((s->state >> (8 * i)) & 0xff,
- dev->iobase + d->addr + i);
- }
-
- data[1] = s->state;
-
- return insn->n;
-}
-
-static int pci_dio_insn_bits_do_w(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- const struct diosubd_data *d = (const struct diosubd_data *)s->private;
- int i;
-
- if (comedi_dio_update_state(s, data)) {
- for (i = 0; i < d->regs; i++)
- outw((s->state >> (16 * i)) & 0xffff,
- dev->iobase + d->addr + 2 * i);
- }
-
- data[1] = s->state;
-
- return insn->n;
-}
-
-/*
-==============================================================================
-*/
-static int pci1760_unchecked_mbxrequest(struct comedi_device *dev,
- unsigned char *omb, unsigned char *imb,
- int repeats)
-{
- int cnt, tout, ok = 0;
-
- for (cnt = 0; cnt < repeats; cnt++) {
- outb(omb[0], dev->iobase + OMB0);
- outb(omb[1], dev->iobase + OMB1);
- outb(omb[2], dev->iobase + OMB2);
- outb(omb[3], dev->iobase + OMB3);
- for (tout = 0; tout < 251; tout++) {
- imb[2] = inb(dev->iobase + IMB2);
- if (imb[2] == omb[2]) {
- imb[0] = inb(dev->iobase + IMB0);
- imb[1] = inb(dev->iobase + IMB1);
- imb[3] = inb(dev->iobase + IMB3);
- ok = 1;
- break;
- }
- udelay(1);
- }
- if (ok)
- return 0;
- }
-
- dev_err(dev->class_dev, "PCI-1760 mailbox request timeout!\n");
- return -ETIME;
-}
-
-static int pci1760_clear_imb2(struct comedi_device *dev)
-{
- unsigned char omb[4] = { 0x0, 0x0, CMD_ClearIMB2, 0x0 };
- unsigned char imb[4];
- /* check if imb2 is already clear */
- if (inb(dev->iobase + IMB2) == CMD_ClearIMB2)
- return 0;
- return pci1760_unchecked_mbxrequest(dev, omb, imb, OMBCMD_RETRY);
-}
-
-static int pci1760_mbxrequest(struct comedi_device *dev,
- unsigned char *omb, unsigned char *imb)
-{
- if (omb[2] == CMD_ClearIMB2) {
- dev_err(dev->class_dev,
- "bug! this function should not be used for CMD_ClearIMB2 command\n");
- return -EINVAL;
- }
- if (inb(dev->iobase + IMB2) == omb[2]) {
- int retval;
-
- retval = pci1760_clear_imb2(dev);
- if (retval < 0)
- return retval;
- }
- return pci1760_unchecked_mbxrequest(dev, omb, imb, OMBCMD_RETRY);
-}
-
-/*
-==============================================================================
-*/
-static int pci1760_insn_bits_di(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
-{
- data[1] = inb(dev->iobase + IMB3);
-
- return insn->n;
-}
-
-static int pci1760_insn_bits_do(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- int ret;
- unsigned char omb[4] = {
- 0x00,
- 0x00,
- CMD_SetRelaysOutput,
- 0x00
- };
- unsigned char imb[4];
-
- if (comedi_dio_update_state(s, data)) {
- omb[0] = s->state;
- ret = pci1760_mbxrequest(dev, omb, imb);
- if (!ret)
- return ret;
- }
-
- data[1] = s->state;
-
- return insn->n;
-}
-
-/*
-==============================================================================
-*/
-static int pci1760_insn_cnt_read(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
-{
- int ret, n;
- unsigned char omb[4] = {
- CR_CHAN(insn->chanspec) & 0x07,
- 0x00,
- CMD_GetIDICntCurValue,
- 0x00
- };
- unsigned char imb[4];
-
- for (n = 0; n < insn->n; n++) {
- ret = pci1760_mbxrequest(dev, omb, imb);
- if (!ret)
- return ret;
- data[n] = (imb[1] << 8) + imb[0];
- }
-
- return n;
-}
-
-/*
-==============================================================================
-*/
-static int pci1760_insn_cnt_write(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
-{
- struct pci_dio_private *devpriv = dev->private;
- int ret;
- unsigned char chan = CR_CHAN(insn->chanspec) & 0x07;
- unsigned char bitmask = 1 << chan;
- unsigned char omb[4] = {
- data[0] & 0xff,
- (data[0] >> 8) & 0xff,
- CMD_SetIDI0CntResetValue + chan,
- 0x00
- };
- unsigned char imb[4];
-
- /* Set reset value if different */
- if (devpriv->CntResValue[chan] != (data[0] & 0xffff)) {
- ret = pci1760_mbxrequest(dev, omb, imb);
- if (!ret)
- return ret;
- devpriv->CntResValue[chan] = data[0] & 0xffff;
- }
-
- omb[0] = bitmask; /* reset counter to it reset value */
- omb[2] = CMD_ResetIDICounters;
- ret = pci1760_mbxrequest(dev, omb, imb);
- if (!ret)
- return ret;
-
- /* start counter if it don't run */
- if (!(bitmask & devpriv->IDICntEnable)) {
- omb[0] = bitmask;
- omb[2] = CMD_EnableIDICounters;
- ret = pci1760_mbxrequest(dev, omb, imb);
- if (!ret)
- return ret;
- devpriv->IDICntEnable |= bitmask;
- }
- return 1;
-}
-
-/*
-==============================================================================
-*/
-static int pci1760_reset(struct comedi_device *dev)
-{
- struct pci_dio_private *devpriv = dev->private;
- int i;
- unsigned char omb[4] = { 0x00, 0x00, 0x00, 0x00 };
- unsigned char imb[4];
-
- outb(0, dev->iobase + INTCSR0); /* disable IRQ */
- outb(0, dev->iobase + INTCSR1);
- outb(0, dev->iobase + INTCSR2);
- outb(0, dev->iobase + INTCSR3);
- devpriv->GlobalIrqEnabled = 0;
-
- omb[0] = 0x00;
- omb[2] = CMD_SetRelaysOutput; /* reset relay outputs */
- pci1760_mbxrequest(dev, omb, imb);
-
- omb[0] = 0x00;
- omb[2] = CMD_EnableIDICounters; /* disable IDI up counters */
- pci1760_mbxrequest(dev, omb, imb);
- devpriv->IDICntEnable = 0;
-
- omb[0] = 0x00;
- omb[2] = CMD_OverflowIDICounters; /* disable counters overflow
- * interrupts */
- pci1760_mbxrequest(dev, omb, imb);
- devpriv->IDICntOverEnable = 0;
-
- omb[0] = 0x00;
- omb[2] = CMD_MatchIntIDICounters; /* disable counters match value
- * interrupts */
- pci1760_mbxrequest(dev, omb, imb);
- devpriv->IDICntMatchEnable = 0;
-
- omb[0] = 0x00;
- omb[1] = 0x80;
- for (i = 0; i < 8; i++) { /* set IDI up counters match value */
- omb[2] = CMD_SetIDI0CntMatchValue + i;
- pci1760_mbxrequest(dev, omb, imb);
- devpriv->CntMatchValue[i] = 0x8000;
- }
-
- omb[0] = 0x00;
- omb[1] = 0x00;
- for (i = 0; i < 8; i++) { /* set IDI up counters reset value */
- omb[2] = CMD_SetIDI0CntResetValue + i;
- pci1760_mbxrequest(dev, omb, imb);
- devpriv->CntResValue[i] = 0x0000;
- }
-
- omb[0] = 0xff;
- omb[2] = CMD_ResetIDICounters; /* reset IDI up counters to reset
- * values */
- pci1760_mbxrequest(dev, omb, imb);
-
- omb[0] = 0x00;
- omb[2] = CMD_EdgeIDICounters; /* set IDI up counters count edge */
- pci1760_mbxrequest(dev, omb, imb);
- devpriv->IDICntEdge = 0x00;
-
- omb[0] = 0x00;
- omb[2] = CMD_EnableIDIFilters; /* disable all digital in filters */
- pci1760_mbxrequest(dev, omb, imb);
- devpriv->IDIFiltersEn = 0x00;
-
- omb[0] = 0x00;
- omb[2] = CMD_EnableIDIPatternMatch; /* disable pattern matching */
- pci1760_mbxrequest(dev, omb, imb);
- devpriv->IDIPatMatchEn = 0x00;
-
- omb[0] = 0x00;
- omb[2] = CMD_SetIDIPatternMatch; /* set pattern match value */
- pci1760_mbxrequest(dev, omb, imb);
- devpriv->IDIPatMatchValue = 0x00;
-
- return 0;
-}
-
-/*
-==============================================================================
-*/
-static int pci_dio_reset(struct comedi_device *dev)
-{
- const struct dio_boardtype *board = dev->board_ptr;
-
- switch (board->cardtype) {
- case TYPE_PCI1730:
- outb(0, dev->iobase + PCI1730_DO); /* clear outputs */
- outb(0, dev->iobase + PCI1730_DO + 1);
- outb(0, dev->iobase + PCI1730_IDO);
- outb(0, dev->iobase + PCI1730_IDO + 1);
- /* fallthrough */
- case TYPE_PCI1733:
- /* disable interrupts */
- outb(0, dev->iobase + PCI1730_3_INT_EN);
- /* clear interrupts */
- outb(0x0f, dev->iobase + PCI1730_3_INT_CLR);
- /* set rising edge trigger */
- outb(0, dev->iobase + PCI1730_3_INT_RF);
- break;
- case TYPE_PCI1734:
- outb(0, dev->iobase + PCI1734_IDO); /* clear outputs */
- outb(0, dev->iobase + PCI1734_IDO + 1);
- outb(0, dev->iobase + PCI1734_IDO + 2);
- outb(0, dev->iobase + PCI1734_IDO + 3);
- break;
- case TYPE_PCI1735:
- outb(0, dev->iobase + PCI1735_DO); /* clear outputs */
- outb(0, dev->iobase + PCI1735_DO + 1);
- outb(0, dev->iobase + PCI1735_DO + 2);
- outb(0, dev->iobase + PCI1735_DO + 3);
- break;
-
- case TYPE_PCI1736:
- outb(0, dev->iobase + PCI1736_IDO);
- outb(0, dev->iobase + PCI1736_IDO + 1);
- /* disable interrupts */
- outb(0, dev->iobase + PCI1736_3_INT_EN);
- /* clear interrupts */
- outb(0x0f, dev->iobase + PCI1736_3_INT_CLR);
- /* set rising edge trigger */
- outb(0, dev->iobase + PCI1736_3_INT_RF);
- break;
-
- case TYPE_PCI1739:
- /* disable & clear interrupts */
- outb(0x88, dev->iobase + PCI1739_ICR);
- break;
-
- case TYPE_PCI1750:
- case TYPE_PCI1751:
- /* disable & clear interrupts */
- outb(0x88, dev->iobase + PCI1750_ICR);
- break;
- case TYPE_PCI1752:
- outw(0, dev->iobase + PCI1752_6_CFC); /* disable channel freeze
- * function */
- outw(0, dev->iobase + PCI1752_IDO); /* clear outputs */
- outw(0, dev->iobase + PCI1752_IDO + 2);
- outw(0, dev->iobase + PCI1752_IDO2);
- outw(0, dev->iobase + PCI1752_IDO2 + 2);
- break;
- case TYPE_PCI1753E:
- outb(0x88, dev->iobase + PCI1753E_ICR0); /* disable & clear
- * interrupts */
- outb(0x80, dev->iobase + PCI1753E_ICR1);
- outb(0x80, dev->iobase + PCI1753E_ICR2);
- outb(0x80, dev->iobase + PCI1753E_ICR3);
- /* fallthrough */
- case TYPE_PCI1753:
- outb(0x88, dev->iobase + PCI1753_ICR0); /* disable & clear
- * interrupts */
- outb(0x80, dev->iobase + PCI1753_ICR1);
- outb(0x80, dev->iobase + PCI1753_ICR2);
- outb(0x80, dev->iobase + PCI1753_ICR3);
- break;
- case TYPE_PCI1754:
- outw(0x08, dev->iobase + PCI1754_6_ICR0); /* disable and clear
- * interrupts */
- outw(0x08, dev->iobase + PCI1754_6_ICR1);
- outw(0x08, dev->iobase + PCI1754_ICR2);
- outw(0x08, dev->iobase + PCI1754_ICR3);
- break;
- case TYPE_PCI1756:
- outw(0, dev->iobase + PCI1752_6_CFC); /* disable channel freeze
- * function */
- outw(0x08, dev->iobase + PCI1754_6_ICR0); /* disable and clear
- * interrupts */
- outw(0x08, dev->iobase + PCI1754_6_ICR1);
- outw(0, dev->iobase + PCI1756_IDO); /* clear outputs */
- outw(0, dev->iobase + PCI1756_IDO + 2);
- break;
- case TYPE_PCI1760:
- pci1760_reset(dev);
- break;
- case TYPE_PCI1762:
- outw(0x0101, dev->iobase + PCI1762_ICR); /* disable & clear
- * interrupts */
- break;
- }
-
- return 0;
-}
-
-/*
-==============================================================================
-*/
-static int pci1760_attach(struct comedi_device *dev)
-{
- struct comedi_subdevice *s;
-
- s = &dev->subdevices[0];
- s->type = COMEDI_SUBD_DI;
- s->subdev_flags = SDF_READABLE;
- s->n_chan = 8;
- s->maxdata = 1;
- s->len_chanlist = 8;
- s->range_table = &range_digital;
- s->insn_bits = pci1760_insn_bits_di;
-
- s = &dev->subdevices[1];
- s->type = COMEDI_SUBD_DO;
- s->subdev_flags = SDF_WRITABLE;
- s->n_chan = 8;
- s->maxdata = 1;
- s->len_chanlist = 8;
- s->range_table = &range_digital;
- s->state = 0;
- s->insn_bits = pci1760_insn_bits_do;
-
- s = &dev->subdevices[2];
- s->type = COMEDI_SUBD_TIMER;
- s->subdev_flags = SDF_WRITABLE | SDF_LSAMPL;
- s->n_chan = 2;
- s->maxdata = 0xffffffff;
- s->len_chanlist = 2;
-/* s->insn_config=pci1760_insn_pwm_cfg; */
-
- s = &dev->subdevices[3];
- s->type = COMEDI_SUBD_COUNTER;
- s->subdev_flags = SDF_READABLE | SDF_WRITABLE;
- s->n_chan = 8;
- s->maxdata = 0xffff;
- s->len_chanlist = 8;
- s->insn_read = pci1760_insn_cnt_read;
- s->insn_write = pci1760_insn_cnt_write;
-/* s->insn_config=pci1760_insn_cnt_cfg; */
-
- return 0;
-}
-
-/*
-==============================================================================
-*/
-static int pci_dio_add_di(struct comedi_device *dev,
- struct comedi_subdevice *s,
- const struct diosubd_data *d)
-{
- const struct dio_boardtype *board = dev->board_ptr;
-
- s->type = COMEDI_SUBD_DI;
- s->subdev_flags = SDF_READABLE | d->specflags;
- if (d->chans > 16)
- s->subdev_flags |= SDF_LSAMPL;
- s->n_chan = d->chans;
- s->maxdata = 1;
- s->len_chanlist = d->chans;
- s->range_table = &range_digital;
- switch (board->io_access) {
- case IO_8b:
- s->insn_bits = pci_dio_insn_bits_di_b;
- break;
- case IO_16b:
- s->insn_bits = pci_dio_insn_bits_di_w;
- break;
- }
- s->private = (void *)d;
-
- return 0;
-}
-
-/*
-==============================================================================
-*/
-static int pci_dio_add_do(struct comedi_device *dev,
- struct comedi_subdevice *s,
- const struct diosubd_data *d)
-{
- const struct dio_boardtype *board = dev->board_ptr;
-
- s->type = COMEDI_SUBD_DO;
- s->subdev_flags = SDF_WRITABLE;
- if (d->chans > 16)
- s->subdev_flags |= SDF_LSAMPL;
- s->n_chan = d->chans;
- s->maxdata = 1;
- s->len_chanlist = d->chans;
- s->range_table = &range_digital;
- s->state = 0;
- switch (board->io_access) {
- case IO_8b:
- s->insn_bits = pci_dio_insn_bits_do_b;
- break;
- case IO_16b:
- s->insn_bits = pci_dio_insn_bits_do_w;
- break;
- }
- s->private = (void *)d;
-
- return 0;
-}
-
-static unsigned long pci_dio_override_cardtype(struct pci_dev *pcidev,
- unsigned long cardtype)
-{
- /*
- * Change cardtype from TYPE_PCI1753 to TYPE_PCI1753E if expansion
- * board available. Need to enable PCI device and request the main
- * registers PCI BAR temporarily to perform the test.
- */
- if (cardtype != TYPE_PCI1753)
- return cardtype;
- if (pci_enable_device(pcidev) < 0)
- return cardtype;
- if (pci_request_region(pcidev, PCIDIO_MAINREG, "adv_pci_dio") == 0) {
- /*
- * This test is based on Advantech's "advdaq" driver source
- * (which declares its module licence as "GPL" although the
- * driver source does not include a "COPYING" file).
- */
- unsigned long reg =
- pci_resource_start(pcidev, PCIDIO_MAINREG) + 53;
-
- outb(0x05, reg);
- if ((inb(reg) & 0x07) == 0x02) {
- outb(0x02, reg);
- if ((inb(reg) & 0x07) == 0x05)
- cardtype = TYPE_PCI1753E;
- }
- pci_release_region(pcidev, PCIDIO_MAINREG);
- }
- pci_disable_device(pcidev);
- return cardtype;
-}
-
-static int pci_dio_auto_attach(struct comedi_device *dev,
- unsigned long context)
-{
- struct pci_dev *pcidev = comedi_to_pci_dev(dev);
- const struct dio_boardtype *board = NULL;
- struct pci_dio_private *devpriv;
- struct comedi_subdevice *s;
- int ret, subdev, i, j;
-
- if (context < ARRAY_SIZE(boardtypes))
- board = &boardtypes[context];
- if (!board)
- return -ENODEV;
- dev->board_ptr = board;
- dev->board_name = board->name;
-
- devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
- if (!devpriv)
- return -ENOMEM;
-
- ret = comedi_pci_enable(dev);
- if (ret)
- return ret;
- dev->iobase = pci_resource_start(pcidev, board->main_pci_region);
-
- ret = comedi_alloc_subdevices(dev, board->nsubdevs);
- if (ret)
- return ret;
-
- subdev = 0;
- for (i = 0; i < MAX_DI_SUBDEVS; i++)
- if (board->sdi[i].chans) {
- s = &dev->subdevices[subdev];
- pci_dio_add_di(dev, s, &board->sdi[i]);
- subdev++;
- }
-
- for (i = 0; i < MAX_DO_SUBDEVS; i++)
- if (board->sdo[i].chans) {
- s = &dev->subdevices[subdev];
- pci_dio_add_do(dev, s, &board->sdo[i]);
- subdev++;
- }
-
- for (i = 0; i < MAX_DIO_SUBDEVG; i++)
- for (j = 0; j < board->sdio[i].regs; j++) {
- s = &dev->subdevices[subdev];
- ret = subdev_8255_init(dev, s, NULL,
- board->sdio[i].addr +
- j * I8255_SIZE);
- if (ret)
- return ret;
- subdev++;
- }
-
- if (board->boardid.chans) {
- s = &dev->subdevices[subdev];
- s->type = COMEDI_SUBD_DI;
- pci_dio_add_di(dev, s, &board->boardid);
- subdev++;
- }
-
- if (board->timer_regbase) {
- s = &dev->subdevices[subdev];
-
- dev->pacer = comedi_8254_init(dev->iobase +
- board->timer_regbase,
- 0, I8254_IO8, 0);
- if (!dev->pacer)
- return -ENOMEM;
-
- comedi_8254_subdevice_init(s, dev->pacer);
-
- subdev++;
- }
-
- if (board->cardtype == TYPE_PCI1760)
- pci1760_attach(dev);
-
- pci_dio_reset(dev);
-
- return 0;
-}
-
-static void pci_dio_detach(struct comedi_device *dev)
-{
- if (dev->iobase)
- pci_dio_reset(dev);
- comedi_pci_detach(dev);
-}
-
-static struct comedi_driver adv_pci_dio_driver = {
- .driver_name = "adv_pci_dio",
- .module = THIS_MODULE,
- .auto_attach = pci_dio_auto_attach,
- .detach = pci_dio_detach,
-};
-
-static int adv_pci_dio_pci_probe(struct pci_dev *dev,
- const struct pci_device_id *id)
-{
- unsigned long cardtype;
-
- cardtype = pci_dio_override_cardtype(dev, id->driver_data);
- return comedi_pci_auto_config(dev, &adv_pci_dio_driver, cardtype);
-}
-
-static const struct pci_device_id adv_pci_dio_pci_table[] = {
- { PCI_VDEVICE(ADVANTECH, 0x1730), TYPE_PCI1730 },
- { PCI_VDEVICE(ADVANTECH, 0x1733), TYPE_PCI1733 },
- { PCI_VDEVICE(ADVANTECH, 0x1734), TYPE_PCI1734 },
- { PCI_VDEVICE(ADVANTECH, 0x1735), TYPE_PCI1735 },
- { PCI_VDEVICE(ADVANTECH, 0x1736), TYPE_PCI1736 },
- { PCI_VDEVICE(ADVANTECH, 0x1739), TYPE_PCI1739 },
- { PCI_VDEVICE(ADVANTECH, 0x1750), TYPE_PCI1750 },
- { PCI_VDEVICE(ADVANTECH, 0x1751), TYPE_PCI1751 },
- { PCI_VDEVICE(ADVANTECH, 0x1752), TYPE_PCI1752 },
- { PCI_VDEVICE(ADVANTECH, 0x1753), TYPE_PCI1753 },
- { PCI_VDEVICE(ADVANTECH, 0x1754), TYPE_PCI1754 },
- { PCI_VDEVICE(ADVANTECH, 0x1756), TYPE_PCI1756 },
- { PCI_VDEVICE(ADVANTECH, 0x1760), TYPE_PCI1760 },
- { PCI_VDEVICE(ADVANTECH, 0x1762), TYPE_PCI1762 },
- { 0 }
-};
-MODULE_DEVICE_TABLE(pci, adv_pci_dio_pci_table);
-
-static struct pci_driver adv_pci_dio_pci_driver = {
- .name = "adv_pci_dio",
- .id_table = adv_pci_dio_pci_table,
- .probe = adv_pci_dio_pci_probe,
- .remove = comedi_pci_auto_unconfig,
-};
-module_comedi_pci_driver(adv_pci_dio_driver, adv_pci_dio_pci_driver);
-
-MODULE_AUTHOR("Comedi http://www.comedi.org");
-MODULE_DESCRIPTION("Comedi low-level driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/aio_aio12_8.c b/drivers/staging/comedi/drivers/aio_aio12_8.c
deleted file mode 100644
index fbc3e5aa94cb..000000000000
--- a/drivers/staging/comedi/drivers/aio_aio12_8.c
+++ /dev/null
@@ -1,249 +0,0 @@
-/*
-
- comedi/drivers/aio_aio12_8.c
-
- Driver for Access I/O Products PC-104 AIO12-8 Analog I/O Board
- Copyright (C) 2006 C&C Technologies, Inc.
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-*/
-
-/*
-
-Driver: aio_aio12_8
-Description: Access I/O Products PC-104 AIO12-8 Analog I/O Board
-Author: Pablo Mejia <pablo.mejia@cctechnol.com>
-Devices: [Access I/O] PC-104 AIO12-8 (aio_aio12_8)
- [Access I/O] PC-104 AI12-8 (aio_ai12_8)
- [Access I/O] PC-104 AO12-8 (aio_ao12_8)
-Status: experimental
-
-Configuration Options:
- [0] - I/O port base address
-
-Notes:
-
- Only synchronous operations are supported.
-
-*/
-
-#include <linux/module.h>
-#include "../comedidev.h"
-#include "8255.h"
-
-/*
- * Register map
- */
-#define AIO12_8_STATUS_REG 0x00
-#define AIO12_8_STATUS_ADC_EOC (1 << 7)
-#define AIO12_8_STATUS_PORT_C_COS (1 << 6)
-#define AIO12_8_STATUS_IRQ_ENA (1 << 2)
-#define AIO12_8_INTERRUPT_REG 0x01
-#define AIO12_8_INTERRUPT_ADC (1 << 7)
-#define AIO12_8_INTERRUPT_COS (1 << 6)
-#define AIO12_8_INTERRUPT_COUNTER1 (1 << 5)
-#define AIO12_8_INTERRUPT_PORT_C3 (1 << 4)
-#define AIO12_8_INTERRUPT_PORT_C0 (1 << 3)
-#define AIO12_8_INTERRUPT_ENA (1 << 2)
-#define AIO12_8_ADC_REG 0x02
-#define AIO12_8_ADC_MODE_NORMAL (0 << 6)
-#define AIO12_8_ADC_MODE_INT_CLK (1 << 6)
-#define AIO12_8_ADC_MODE_STANDBY (2 << 6)
-#define AIO12_8_ADC_MODE_POWERDOWN (3 << 6)
-#define AIO12_8_ADC_ACQ_3USEC (0 << 5)
-#define AIO12_8_ADC_ACQ_PROGRAM (1 << 5)
-#define AIO12_8_ADC_RANGE(x) ((x) << 3)
-#define AIO12_8_ADC_CHAN(x) ((x) << 0)
-#define AIO12_8_DAC_REG(x) (0x04 + (x) * 2)
-#define AIO12_8_8254_BASE_REG 0x0c
-#define AIO12_8_8255_BASE_REG 0x10
-#define AIO12_8_DIO_CONTROL_REG 0x14
-#define AIO12_8_DIO_CONTROL_TST (1 << 0)
-#define AIO12_8_ADC_TRIGGER_REG 0x15
-#define AIO12_8_ADC_TRIGGER_RANGE(x) ((x) << 3)
-#define AIO12_8_ADC_TRIGGER_CHAN(x) ((x) << 0)
-#define AIO12_8_TRIGGER_REG 0x16
-#define AIO12_8_TRIGGER_ADTRIG (1 << 1)
-#define AIO12_8_TRIGGER_DACTRIG (1 << 0)
-#define AIO12_8_COS_REG 0x17
-#define AIO12_8_DAC_ENABLE_REG 0x18
-#define AIO12_8_DAC_ENABLE_REF_ENA (1 << 0)
-
-struct aio12_8_boardtype {
- const char *name;
- int ai_nchan;
- int ao_nchan;
-};
-
-static const struct aio12_8_boardtype board_types[] = {
- {
- .name = "aio_aio12_8",
- .ai_nchan = 8,
- .ao_nchan = 4,
- }, {
- .name = "aio_ai12_8",
- .ai_nchan = 8,
- }, {
- .name = "aio_ao12_8",
- .ao_nchan = 4,
- },
-};
-
-static int aio_aio12_8_ai_eoc(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned long context)
-{
- unsigned int status;
-
- status = inb(dev->iobase + AIO12_8_STATUS_REG);
- if (status & AIO12_8_STATUS_ADC_EOC)
- return 0;
- return -EBUSY;
-}
-
-static int aio_aio12_8_ai_read(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
-{
- unsigned int chan = CR_CHAN(insn->chanspec);
- unsigned int range = CR_RANGE(insn->chanspec);
- unsigned char control;
- int ret;
- int n;
-
- /*
- * Setup the control byte for internal 2MHz clock, 3uS conversion,
- * at the desired range of the requested channel.
- */
- control = AIO12_8_ADC_MODE_NORMAL | AIO12_8_ADC_ACQ_3USEC |
- AIO12_8_ADC_RANGE(range) | AIO12_8_ADC_CHAN(chan);
-
- /* Read status to clear EOC latch */
- inb(dev->iobase + AIO12_8_STATUS_REG);
-
- for (n = 0; n < insn->n; n++) {
- /* Setup and start conversion */
- outb(control, dev->iobase + AIO12_8_ADC_REG);
-
- /* Wait for conversion to complete */
- ret = comedi_timeout(dev, s, insn, aio_aio12_8_ai_eoc, 0);
- if (ret)
- return ret;
-
- data[n] = inw(dev->iobase + AIO12_8_ADC_REG) & s->maxdata;
- }
-
- return insn->n;
-}
-
-static int aio_aio12_8_ao_insn_write(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- unsigned int chan = CR_CHAN(insn->chanspec);
- unsigned int val = s->readback[chan];
- int i;
-
- /* enable DACs */
- outb(AIO12_8_DAC_ENABLE_REF_ENA, dev->iobase + AIO12_8_DAC_ENABLE_REG);
-
- for (i = 0; i < insn->n; i++) {
- val = data[i];
- outw(val, dev->iobase + AIO12_8_DAC_REG(chan));
- }
- s->readback[chan] = val;
-
- return insn->n;
-}
-
-static const struct comedi_lrange range_aio_aio12_8 = {
- 4, {
- UNI_RANGE(5),
- BIP_RANGE(5),
- UNI_RANGE(10),
- BIP_RANGE(10)
- }
-};
-
-static int aio_aio12_8_attach(struct comedi_device *dev,
- struct comedi_devconfig *it)
-{
- const struct aio12_8_boardtype *board = dev->board_ptr;
- struct comedi_subdevice *s;
- int ret;
-
- ret = comedi_request_region(dev, it->options[0], 32);
- if (ret)
- return ret;
-
- ret = comedi_alloc_subdevices(dev, 4);
- if (ret)
- return ret;
-
- s = &dev->subdevices[0];
- if (board->ai_nchan) {
- /* Analog input subdevice */
- s->type = COMEDI_SUBD_AI;
- s->subdev_flags = SDF_READABLE | SDF_GROUND | SDF_DIFF;
- s->n_chan = board->ai_nchan;
- s->maxdata = 0x0fff;
- s->range_table = &range_aio_aio12_8;
- s->insn_read = aio_aio12_8_ai_read;
- } else {
- s->type = COMEDI_SUBD_UNUSED;
- }
-
- s = &dev->subdevices[1];
- if (board->ao_nchan) {
- /* Analog output subdevice */
- s->type = COMEDI_SUBD_AO;
- s->subdev_flags = SDF_WRITABLE | SDF_GROUND | SDF_DIFF;
- s->n_chan = 4;
- s->maxdata = 0x0fff;
- s->range_table = &range_aio_aio12_8;
- s->insn_write = aio_aio12_8_ao_insn_write;
-
- ret = comedi_alloc_subdev_readback(s);
- if (ret)
- return ret;
- } else {
- s->type = COMEDI_SUBD_UNUSED;
- }
-
- s = &dev->subdevices[2];
- /* 8255 Digital i/o subdevice */
- ret = subdev_8255_init(dev, s, NULL, AIO12_8_8255_BASE_REG);
- if (ret)
- return ret;
-
- s = &dev->subdevices[3];
- /* 8254 counter/timer subdevice */
- s->type = COMEDI_SUBD_UNUSED;
-
- return 0;
-}
-
-static struct comedi_driver aio_aio12_8_driver = {
- .driver_name = "aio_aio12_8",
- .module = THIS_MODULE,
- .attach = aio_aio12_8_attach,
- .detach = comedi_legacy_detach,
- .board_name = &board_types[0].name,
- .num_names = ARRAY_SIZE(board_types),
- .offset = sizeof(struct aio12_8_boardtype),
-};
-module_comedi_driver(aio_aio12_8_driver);
-
-MODULE_AUTHOR("Comedi http://www.comedi.org");
-MODULE_DESCRIPTION("Comedi low-level driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/aio_iiro_16.c b/drivers/staging/comedi/drivers/aio_iiro_16.c
deleted file mode 100644
index 35b2f98f0de9..000000000000
--- a/drivers/staging/comedi/drivers/aio_iiro_16.c
+++ /dev/null
@@ -1,244 +0,0 @@
-/*
- * aio_iiro_16.c
- * Comedi driver for Access I/O Products 104-IIRO-16 board
- * Copyright (C) 2006 C&C Technologies, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-/*
- * Driver: aio_iiro_16
- * Description: Access I/O Products PC/104 Isolated Input/Relay Output Board
- * Author: Zachary Ware <zach.ware@cctechnol.com>
- * Devices: [Access I/O] 104-IIRO-16 (aio_iiro_16)
- * Status: experimental
- *
- * Configuration Options:
- * [0] - I/O port base address
- * [1] - IRQ (optional)
- *
- * The board supports interrupts on change of state of the digital inputs.
- * The sample data returned by the async command indicates which inputs
- * changed state and the current state of the inputs:
- *
- * Bit 23 - IRQ Enable (1) / Disable (0)
- * Bit 17 - Input 8-15 Changed State (1 = Changed, 0 = No Change)
- * Bit 16 - Input 0-7 Changed State (1 = Changed, 0 = No Change)
- * Bit 15 - Digital input 15
- * ...
- * Bit 0 - Digital input 0
- */
-
-#include <linux/module.h>
-#include <linux/interrupt.h>
-
-#include "../comedidev.h"
-
-#define AIO_IIRO_16_RELAY_0_7 0x00
-#define AIO_IIRO_16_INPUT_0_7 0x01
-#define AIO_IIRO_16_IRQ 0x02
-#define AIO_IIRO_16_RELAY_8_15 0x04
-#define AIO_IIRO_16_INPUT_8_15 0x05
-#define AIO_IIRO_16_STATUS 0x07
-#define AIO_IIRO_16_STATUS_IRQE BIT(7)
-#define AIO_IIRO_16_STATUS_INPUT_8_15 BIT(1)
-#define AIO_IIRO_16_STATUS_INPUT_0_7 BIT(0)
-
-static unsigned int aio_iiro_16_read_inputs(struct comedi_device *dev)
-{
- unsigned int val;
-
- val = inb(dev->iobase + AIO_IIRO_16_INPUT_0_7);
- val |= inb(dev->iobase + AIO_IIRO_16_INPUT_8_15) << 8;
-
- return val;
-}
-
-static irqreturn_t aio_iiro_16_cos(int irq, void *d)
-{
- struct comedi_device *dev = d;
- struct comedi_subdevice *s = dev->read_subdev;
- unsigned int status;
- unsigned int val;
-
- status = inb(dev->iobase + AIO_IIRO_16_STATUS);
- if (!(status & AIO_IIRO_16_STATUS_IRQE))
- return IRQ_NONE;
-
- val = aio_iiro_16_read_inputs(dev);
- val |= (status << 16);
-
- comedi_buf_write_samples(s, &val, 1);
- comedi_handle_events(dev, s);
-
- return IRQ_HANDLED;
-}
-
-static void aio_iiro_enable_irq(struct comedi_device *dev, bool enable)
-{
- if (enable)
- inb(dev->iobase + AIO_IIRO_16_IRQ);
- else
- outb(0, dev->iobase + AIO_IIRO_16_IRQ);
-}
-
-static int aio_iiro_16_cos_cancel(struct comedi_device *dev,
- struct comedi_subdevice *s)
-{
- aio_iiro_enable_irq(dev, false);
-
- return 0;
-}
-
-static int aio_iiro_16_cos_cmd(struct comedi_device *dev,
- struct comedi_subdevice *s)
-{
- aio_iiro_enable_irq(dev, true);
-
- return 0;
-}
-
-static int aio_iiro_16_cos_cmdtest(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_cmd *cmd)
-{
- int err = 0;
-
- /* Step 1 : check if triggers are trivially valid */
-
- err |= comedi_check_trigger_src(&cmd->start_src, TRIG_NOW);
- err |= comedi_check_trigger_src(&cmd->scan_begin_src, TRIG_EXT);
- err |= comedi_check_trigger_src(&cmd->convert_src, TRIG_FOLLOW);
- err |= comedi_check_trigger_src(&cmd->scan_end_src, TRIG_COUNT);
- err |= comedi_check_trigger_src(&cmd->stop_src, TRIG_NONE);
-
- if (err)
- return 1;
-
- /* Step 2a : make sure trigger sources are unique */
- /* Step 2b : and mutually compatible */
-
- /* Step 3: check if arguments are trivially valid */
-
- err |= comedi_check_trigger_arg_is(&cmd->start_arg, 0);
- err |= comedi_check_trigger_arg_is(&cmd->scan_begin_arg, 0);
- err |= comedi_check_trigger_arg_is(&cmd->convert_arg, 0);
- err |= comedi_check_trigger_arg_is(&cmd->scan_end_arg,
- cmd->chanlist_len);
- err |= comedi_check_trigger_arg_is(&cmd->stop_arg, 0);
-
- if (err)
- return 3;
-
- /* Step 4: fix up any arguments */
-
- /* Step 5: check channel list if it exists */
-
- return 0;
-}
-
-static int aio_iiro_16_do_insn_bits(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- if (comedi_dio_update_state(s, data)) {
- outb(s->state & 0xff, dev->iobase + AIO_IIRO_16_RELAY_0_7);
- outb((s->state >> 8) & 0xff,
- dev->iobase + AIO_IIRO_16_RELAY_8_15);
- }
-
- data[1] = s->state;
-
- return insn->n;
-}
-
-static int aio_iiro_16_di_insn_bits(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- data[1] = aio_iiro_16_read_inputs(dev);
-
- return insn->n;
-}
-
-static int aio_iiro_16_attach(struct comedi_device *dev,
- struct comedi_devconfig *it)
-{
- struct comedi_subdevice *s;
- int ret;
-
- ret = comedi_request_region(dev, it->options[0], 0x8);
- if (ret)
- return ret;
-
- aio_iiro_enable_irq(dev, false);
-
- /*
- * Digital input change of state interrupts are optionally supported
- * using IRQ 2-7, 10-12, 14, or 15.
- */
- if ((1 << it->options[1]) & 0xdcfc) {
- ret = request_irq(it->options[1], aio_iiro_16_cos, 0,
- dev->board_name, dev);
- if (ret == 0)
- dev->irq = it->options[1];
- }
-
- ret = comedi_alloc_subdevices(dev, 2);
- if (ret)
- return ret;
-
- /* Digital Output subdevice */
- s = &dev->subdevices[0];
- s->type = COMEDI_SUBD_DO;
- s->subdev_flags = SDF_WRITABLE;
- s->n_chan = 16;
- s->maxdata = 1;
- s->range_table = &range_digital;
- s->insn_bits = aio_iiro_16_do_insn_bits;
-
- /* get the initial state of the relays */
- s->state = inb(dev->iobase + AIO_IIRO_16_RELAY_0_7) |
- (inb(dev->iobase + AIO_IIRO_16_RELAY_8_15) << 8);
-
- /* Digital Input subdevice */
- s = &dev->subdevices[1];
- s->type = COMEDI_SUBD_DI;
- s->subdev_flags = SDF_READABLE;
- s->n_chan = 16;
- s->maxdata = 1;
- s->range_table = &range_digital;
- s->insn_bits = aio_iiro_16_di_insn_bits;
- if (dev->irq) {
- dev->read_subdev = s;
- s->subdev_flags |= SDF_CMD_READ | SDF_LSAMPL;
- s->len_chanlist = 1;
- s->do_cmdtest = aio_iiro_16_cos_cmdtest;
- s->do_cmd = aio_iiro_16_cos_cmd;
- s->cancel = aio_iiro_16_cos_cancel;
- }
-
- return 0;
-}
-
-static struct comedi_driver aio_iiro_16_driver = {
- .driver_name = "aio_iiro_16",
- .module = THIS_MODULE,
- .attach = aio_iiro_16_attach,
- .detach = comedi_legacy_detach,
-};
-module_comedi_driver(aio_iiro_16_driver);
-
-MODULE_AUTHOR("Comedi http://www.comedi.org");
-MODULE_DESCRIPTION("Comedi driver for Access I/O Products 104-IIRO-16 board");
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/amcc_s5933.h b/drivers/staging/comedi/drivers/amcc_s5933.h
deleted file mode 100644
index d4b8c0195bd3..000000000000
--- a/drivers/staging/comedi/drivers/amcc_s5933.h
+++ /dev/null
@@ -1,176 +0,0 @@
-/*
- comedi/drivers/amcc_s5933.h
-
- Stuff for AMCC S5933 PCI Controller
-
- Author: Michal Dobes <dobes@tesnet.cz>
-
- Inspirated from general-purpose AMCC S5933 PCI Matchmaker driver
- made by Andrea Cisternino <acister@pcape1.pi.infn.it>
- and as result of espionage from MITE code made by David A. Schleef.
- Thanks to AMCC for their on-line documentation and bus master DMA
- example.
-*/
-
-#ifndef _AMCC_S5933_H_
-#define _AMCC_S5933_H_
-
-/****************************************************************************/
-/* AMCC Operation Register Offsets - PCI */
-/****************************************************************************/
-
-#define AMCC_OP_REG_OMB1 0x00
-#define AMCC_OP_REG_OMB2 0x04
-#define AMCC_OP_REG_OMB3 0x08
-#define AMCC_OP_REG_OMB4 0x0c
-#define AMCC_OP_REG_IMB1 0x10
-#define AMCC_OP_REG_IMB2 0x14
-#define AMCC_OP_REG_IMB3 0x18
-#define AMCC_OP_REG_IMB4 0x1c
-#define AMCC_OP_REG_FIFO 0x20
-#define AMCC_OP_REG_MWAR 0x24
-#define AMCC_OP_REG_MWTC 0x28
-#define AMCC_OP_REG_MRAR 0x2c
-#define AMCC_OP_REG_MRTC 0x30
-#define AMCC_OP_REG_MBEF 0x34
-#define AMCC_OP_REG_INTCSR 0x38
-#define AMCC_OP_REG_INTCSR_SRC (AMCC_OP_REG_INTCSR + 2) /* INT source */
-#define AMCC_OP_REG_INTCSR_FEC (AMCC_OP_REG_INTCSR + 3) /* FIFO ctrl */
-#define AMCC_OP_REG_MCSR 0x3c
-#define AMCC_OP_REG_MCSR_NVDATA (AMCC_OP_REG_MCSR + 2) /* Data in byte 2 */
-#define AMCC_OP_REG_MCSR_NVCMD (AMCC_OP_REG_MCSR + 3) /* Command in byte 3 */
-
-#define AMCC_FIFO_DEPTH_DWORD 8
-#define AMCC_FIFO_DEPTH_BYTES (8 * sizeof(u32))
-
-/****************************************************************************/
-/* AMCC - PCI Interrupt Control/Status Register */
-/****************************************************************************/
-#define INTCSR_OUTBOX_BYTE(x) ((x) & 0x3)
-#define INTCSR_OUTBOX_SELECT(x) (((x) & 0x3) << 2)
-#define INTCSR_OUTBOX_EMPTY_INT 0x10 /* enable outbox empty interrupt */
-#define INTCSR_INBOX_BYTE(x) (((x) & 0x3) << 8)
-#define INTCSR_INBOX_SELECT(x) (((x) & 0x3) << 10)
-#define INTCSR_INBOX_FULL_INT 0x1000 /* enable inbox full interrupt */
-/* read, or write clear inbox full interrupt */
-#define INTCSR_INBOX_INTR_STATUS 0x20000
-/* read only, interrupt asserted */
-#define INTCSR_INTR_ASSERTED 0x800000
-
-/****************************************************************************/
-/* AMCC - PCI non-volatile ram command register (byte 3 of master control/status register) */
-/****************************************************************************/
-#define MCSR_NV_LOAD_LOW_ADDR 0x0
-#define MCSR_NV_LOAD_HIGH_ADDR 0x20
-#define MCSR_NV_WRITE 0x40
-#define MCSR_NV_READ 0x60
-#define MCSR_NV_MASK 0x60
-#define MCSR_NV_ENABLE 0x80
-#define MCSR_NV_BUSY MCSR_NV_ENABLE
-
-/****************************************************************************/
-/* AMCC Operation Registers Size - PCI */
-/****************************************************************************/
-
-#define AMCC_OP_REG_SIZE 64 /* in bytes */
-
-/****************************************************************************/
-/* AMCC Operation Register Offsets - Add-on */
-/****************************************************************************/
-
-#define AMCC_OP_REG_AIMB1 0x00
-#define AMCC_OP_REG_AIMB2 0x04
-#define AMCC_OP_REG_AIMB3 0x08
-#define AMCC_OP_REG_AIMB4 0x0c
-#define AMCC_OP_REG_AOMB1 0x10
-#define AMCC_OP_REG_AOMB2 0x14
-#define AMCC_OP_REG_AOMB3 0x18
-#define AMCC_OP_REG_AOMB4 0x1c
-#define AMCC_OP_REG_AFIFO 0x20
-#define AMCC_OP_REG_AMWAR 0x24
-#define AMCC_OP_REG_APTA 0x28
-#define AMCC_OP_REG_APTD 0x2c
-#define AMCC_OP_REG_AMRAR 0x30
-#define AMCC_OP_REG_AMBEF 0x34
-#define AMCC_OP_REG_AINT 0x38
-#define AMCC_OP_REG_AGCSTS 0x3c
-#define AMCC_OP_REG_AMWTC 0x58
-#define AMCC_OP_REG_AMRTC 0x5c
-
-/****************************************************************************/
-/* AMCC - Add-on General Control/Status Register */
-/****************************************************************************/
-
-#define AGCSTS_CONTROL_MASK 0xfffff000
-#define AGCSTS_NV_ACC_MASK 0xe0000000
-#define AGCSTS_RESET_MASK 0x0e000000
-#define AGCSTS_NV_DA_MASK 0x00ff0000
-#define AGCSTS_BIST_MASK 0x0000f000
-#define AGCSTS_STATUS_MASK 0x000000ff
-#define AGCSTS_TCZERO_MASK 0x000000c0
-#define AGCSTS_FIFO_ST_MASK 0x0000003f
-
-#define AGCSTS_TC_ENABLE 0x10000000
-
-#define AGCSTS_RESET_MBFLAGS 0x08000000
-#define AGCSTS_RESET_P2A_FIFO 0x04000000
-#define AGCSTS_RESET_A2P_FIFO 0x02000000
-#define AGCSTS_RESET_FIFOS (AGCSTS_RESET_A2P_FIFO | AGCSTS_RESET_P2A_FIFO)
-
-#define AGCSTS_A2P_TCOUNT 0x00000080
-#define AGCSTS_P2A_TCOUNT 0x00000040
-
-#define AGCSTS_FS_P2A_EMPTY 0x00000020
-#define AGCSTS_FS_P2A_HALF 0x00000010
-#define AGCSTS_FS_P2A_FULL 0x00000008
-
-#define AGCSTS_FS_A2P_EMPTY 0x00000004
-#define AGCSTS_FS_A2P_HALF 0x00000002
-#define AGCSTS_FS_A2P_FULL 0x00000001
-
-/****************************************************************************/
-/* AMCC - Add-on Interrupt Control/Status Register */
-/****************************************************************************/
-
-#define AINT_INT_MASK 0x00ff0000
-#define AINT_SEL_MASK 0x0000ffff
-#define AINT_IS_ENSEL_MASK 0x00001f1f
-
-#define AINT_INT_ASSERTED 0x00800000
-#define AINT_BM_ERROR 0x00200000
-#define AINT_BIST_INT 0x00100000
-
-#define AINT_RT_COMPLETE 0x00080000
-#define AINT_WT_COMPLETE 0x00040000
-
-#define AINT_OUT_MB_INT 0x00020000
-#define AINT_IN_MB_INT 0x00010000
-
-#define AINT_READ_COMPL 0x00008000
-#define AINT_WRITE_COMPL 0x00004000
-
-#define AINT_OMB_ENABLE 0x00001000
-#define AINT_OMB_SELECT 0x00000c00
-#define AINT_OMB_BYTE 0x00000300
-
-#define AINT_IMB_ENABLE 0x00000010
-#define AINT_IMB_SELECT 0x0000000c
-#define AINT_IMB_BYTE 0x00000003
-
-/* these are bits from various different registers, needs cleanup XXX */
-/* Enable Bus Mastering */
-#define EN_A2P_TRANSFERS 0x00000400
-/* FIFO Flag Reset */
-#define RESET_A2P_FLAGS 0x04000000L
-/* FIFO Relative Priority */
-#define A2P_HI_PRIORITY 0x00000100L
-/* Identify Interrupt Sources */
-#define ANY_S593X_INT 0x00800000L
-#define READ_TC_INT 0x00080000L
-#define WRITE_TC_INT 0x00040000L
-#define IN_MB_INT 0x00020000L
-#define MASTER_ABORT_INT 0x00100000L
-#define TARGET_ABORT_INT 0x00200000L
-#define BUS_MASTER_INT 0x00200000L
-
-#endif
diff --git a/drivers/staging/comedi/drivers/amplc_dio200.c b/drivers/staging/comedi/drivers/amplc_dio200.c
deleted file mode 100644
index f5cfa71a90c6..000000000000
--- a/drivers/staging/comedi/drivers/amplc_dio200.c
+++ /dev/null
@@ -1,274 +0,0 @@
-/*
- * comedi/drivers/amplc_dio200.c
- *
- * Driver for Amplicon PC212E, PC214E, PC215E, PC218E, PC272E.
- *
- * Copyright (C) 2005-2013 MEV Ltd. <http://www.mev.co.uk/>
- *
- * COMEDI - Linux Control and Measurement Device Interface
- * Copyright (C) 1998,2000 David A. Schleef <ds@schleef.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-/*
- * Driver: amplc_dio200
- * Description: Amplicon 200 Series ISA Digital I/O
- * Author: Ian Abbott <abbotti@mev.co.uk>
- * Devices: [Amplicon] PC212E (pc212e), PC214E (pc214e), PC215E (pc215e),
- * PC218E (pc218e), PC272E (pc272e)
- * Updated: Mon, 18 Mar 2013 14:40:41 +0000
- *
- * Status: works
- *
- * Configuration options:
- * [0] - I/O port base address
- * [1] - IRQ (optional, but commands won't work without it)
- *
- * Passing a zero for an option is the same as leaving it unspecified.
- *
- * SUBDEVICES
- *
- * PC212E PC214E PC215E
- * ------------- ------------- -------------
- * Subdevices 6 4 5
- * 0 PPI-X PPI-X PPI-X
- * 1 CTR-Y1 PPI-Y PPI-Y
- * 2 CTR-Y2 CTR-Z1* CTR-Z1
- * 3 CTR-Z1 INTERRUPT* CTR-Z2
- * 4 CTR-Z2 INTERRUPT
- * 5 INTERRUPT
- *
- * PC218E PC272E
- * ------------- -------------
- * Subdevices 7 4
- * 0 CTR-X1 PPI-X
- * 1 CTR-X2 PPI-Y
- * 2 CTR-Y1 PPI-Z
- * 3 CTR-Y2 INTERRUPT
- * 4 CTR-Z1
- * 5 CTR-Z2
- * 6 INTERRUPT
- *
- * Each PPI is a 8255 chip providing 24 DIO channels. The DIO channels
- * are configurable as inputs or outputs in four groups:
- *
- * Port A - channels 0 to 7
- * Port B - channels 8 to 15
- * Port CL - channels 16 to 19
- * Port CH - channels 20 to 23
- *
- * Only mode 0 of the 8255 chips is supported.
- *
- * Each CTR is a 8254 chip providing 3 16-bit counter channels. Each
- * channel is configured individually with INSN_CONFIG instructions. The
- * specific type of configuration instruction is specified in data[0].
- * Some configuration instructions expect an additional parameter in
- * data[1]; others return a value in data[1]. The following configuration
- * instructions are supported:
- *
- * INSN_CONFIG_SET_COUNTER_MODE. Sets the counter channel's mode and
- * BCD/binary setting specified in data[1].
- *
- * INSN_CONFIG_8254_READ_STATUS. Reads the status register value for the
- * counter channel into data[1].
- *
- * INSN_CONFIG_SET_CLOCK_SRC. Sets the counter channel's clock source as
- * specified in data[1] (this is a hardware-specific value). Not
- * supported on PC214E. For the other boards, valid clock sources are
- * 0 to 7 as follows:
- *
- * 0. CLK n, the counter channel's dedicated CLK input from the SK1
- * connector. (N.B. for other values, the counter channel's CLKn
- * pin on the SK1 connector is an output!)
- * 1. Internal 10 MHz clock.
- * 2. Internal 1 MHz clock.
- * 3. Internal 100 kHz clock.
- * 4. Internal 10 kHz clock.
- * 5. Internal 1 kHz clock.
- * 6. OUT n-1, the output of counter channel n-1 (see note 1 below).
- * 7. Ext Clock, the counter chip's dedicated Ext Clock input from
- * the SK1 connector. This pin is shared by all three counter
- * channels on the chip.
- *
- * INSN_CONFIG_GET_CLOCK_SRC. Returns the counter channel's current
- * clock source in data[1]. For internal clock sources, data[2] is set
- * to the period in ns.
- *
- * INSN_CONFIG_SET_GATE_SRC. Sets the counter channel's gate source as
- * specified in data[2] (this is a hardware-specific value). Not
- * supported on PC214E. For the other boards, valid gate sources are 0
- * to 7 as follows:
- *
- * 0. VCC (internal +5V d.c.), i.e. gate permanently enabled.
- * 1. GND (internal 0V d.c.), i.e. gate permanently disabled.
- * 2. GAT n, the counter channel's dedicated GAT input from the SK1
- * connector. (N.B. for other values, the counter channel's GATn
- * pin on the SK1 connector is an output!)
- * 3. /OUT n-2, the inverted output of counter channel n-2 (see note
- * 2 below).
- * 4. Reserved.
- * 5. Reserved.
- * 6. Reserved.
- * 7. Reserved.
- *
- * INSN_CONFIG_GET_GATE_SRC. Returns the counter channel's current gate
- * source in data[2].
- *
- * Clock and gate interconnection notes:
- *
- * 1. Clock source OUT n-1 is the output of the preceding channel on the
- * same counter subdevice if n > 0, or the output of channel 2 on the
- * preceding counter subdevice (see note 3) if n = 0.
- *
- * 2. Gate source /OUT n-2 is the inverted output of channel 0 on the
- * same counter subdevice if n = 2, or the inverted output of channel n+1
- * on the preceding counter subdevice (see note 3) if n < 2.
- *
- * 3. The counter subdevices are connected in a ring, so the highest
- * counter subdevice precedes the lowest.
- *
- * The 'INTERRUPT' subdevice pretends to be a digital input subdevice. The
- * digital inputs come from the interrupt status register. The number of
- * channels matches the number of interrupt sources. The PC214E does not
- * have an interrupt status register; see notes on 'INTERRUPT SOURCES'
- * below.
- *
- * INTERRUPT SOURCES
- *
- * PC212E PC214E PC215E
- * ------------- ------------- -------------
- * Sources 6 1 6
- * 0 PPI-X-C0 JUMPER-J5 PPI-X-C0
- * 1 PPI-X-C3 PPI-X-C3
- * 2 CTR-Y1-OUT1 PPI-Y-C0
- * 3 CTR-Y2-OUT1 PPI-Y-C3
- * 4 CTR-Z1-OUT1 CTR-Z1-OUT1
- * 5 CTR-Z2-OUT1 CTR-Z2-OUT1
- *
- * PC218E PC272E
- * ------------- -------------
- * Sources 6 6
- * 0 CTR-X1-OUT1 PPI-X-C0
- * 1 CTR-X2-OUT1 PPI-X-C3
- * 2 CTR-Y1-OUT1 PPI-Y-C0
- * 3 CTR-Y2-OUT1 PPI-Y-C3
- * 4 CTR-Z1-OUT1 PPI-Z-C0
- * 5 CTR-Z2-OUT1 PPI-Z-C3
- *
- * When an interrupt source is enabled in the interrupt source enable
- * register, a rising edge on the source signal latches the corresponding
- * bit to 1 in the interrupt status register.
- *
- * When the interrupt status register value as a whole (actually, just the
- * 6 least significant bits) goes from zero to non-zero, the board will
- * generate an interrupt. No further interrupts will occur until the
- * interrupt status register is cleared to zero. To clear a bit to zero in
- * the interrupt status register, the corresponding interrupt source must
- * be disabled in the interrupt source enable register (there is no
- * separate interrupt clear register).
- *
- * The PC214E does not have an interrupt source enable register or an
- * interrupt status register; its 'INTERRUPT' subdevice has a single
- * channel and its interrupt source is selected by the position of jumper
- * J5.
- *
- * COMMANDS
- *
- * The driver supports a read streaming acquisition command on the
- * 'INTERRUPT' subdevice. The channel list selects the interrupt sources
- * to be enabled. All channels will be sampled together (convert_src ==
- * TRIG_NOW). The scan begins a short time after the hardware interrupt
- * occurs, subject to interrupt latencies (scan_begin_src == TRIG_EXT,
- * scan_begin_arg == 0). The value read from the interrupt status register
- * is packed into a short value, one bit per requested channel, in the
- * order they appear in the channel list.
- */
-
-#include <linux/module.h>
-#include "../comedidev.h"
-
-#include "amplc_dio200.h"
-
-/*
- * Board descriptions.
- */
-static const struct dio200_board dio200_isa_boards[] = {
- {
- .name = "pc212e",
- .n_subdevs = 6,
- .sdtype = {
- sd_8255, sd_8254, sd_8254, sd_8254, sd_8254, sd_intr
- },
- .sdinfo = { 0x00, 0x08, 0x0c, 0x10, 0x14, 0x3f },
- .has_int_sce = true,
- .has_clk_gat_sce = true,
- }, {
- .name = "pc214e",
- .n_subdevs = 4,
- .sdtype = {
- sd_8255, sd_8255, sd_8254, sd_intr
- },
- .sdinfo = { 0x00, 0x08, 0x10, 0x01 },
- }, {
- .name = "pc215e",
- .n_subdevs = 5,
- .sdtype = {
- sd_8255, sd_8255, sd_8254, sd_8254, sd_intr
- },
- .sdinfo = { 0x00, 0x08, 0x10, 0x14, 0x3f },
- .has_int_sce = true,
- .has_clk_gat_sce = true,
- }, {
- .name = "pc218e",
- .n_subdevs = 7,
- .sdtype = {
- sd_8254, sd_8254, sd_8255, sd_8254, sd_8254, sd_intr
- },
- .sdinfo = { 0x00, 0x04, 0x08, 0x0c, 0x10, 0x14, 0x3f },
- .has_int_sce = true,
- .has_clk_gat_sce = true,
- }, {
- .name = "pc272e",
- .n_subdevs = 4,
- .sdtype = {
- sd_8255, sd_8255, sd_8255, sd_intr
- },
- .sdinfo = { 0x00, 0x08, 0x10, 0x3f },
- .has_int_sce = true,
- },
-};
-
-static int dio200_attach(struct comedi_device *dev, struct comedi_devconfig *it)
-{
- int ret;
-
- ret = comedi_request_region(dev, it->options[0], 0x20);
- if (ret)
- return ret;
-
- return amplc_dio200_common_attach(dev, it->options[1], 0);
-}
-
-static struct comedi_driver amplc_dio200_driver = {
- .driver_name = "amplc_dio200",
- .module = THIS_MODULE,
- .attach = dio200_attach,
- .detach = comedi_legacy_detach,
- .board_name = &dio200_isa_boards[0].name,
- .offset = sizeof(struct dio200_board),
- .num_names = ARRAY_SIZE(dio200_isa_boards),
-};
-module_comedi_driver(amplc_dio200_driver);
-
-MODULE_AUTHOR("Comedi http://www.comedi.org");
-MODULE_DESCRIPTION("Comedi driver for Amplicon 200 Series ISA DIO boards");
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/amplc_dio200.h b/drivers/staging/comedi/drivers/amplc_dio200.h
deleted file mode 100644
index 53fb86d59fc3..000000000000
--- a/drivers/staging/comedi/drivers/amplc_dio200.h
+++ /dev/null
@@ -1,55 +0,0 @@
-/*
- * comedi/drivers/amplc_dio.h
- *
- * Header for amplc_dio200.c, amplc_dio200_common.c and
- * amplc_dio200_pci.c.
- *
- * Copyright (C) 2005-2013 MEV Ltd. <http://www.mev.co.uk/>
- *
- * COMEDI - Linux Control and Measurement Device Interface
- * Copyright (C) 1998,2000 David A. Schleef <ds@schleef.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#ifndef AMPLC_DIO200_H_INCLUDED
-#define AMPLC_DIO200_H_INCLUDED
-
-#include <linux/types.h>
-
-struct comedi_device;
-
-/*
- * Subdevice types.
- */
-enum dio200_sdtype { sd_none, sd_intr, sd_8255, sd_8254, sd_timer };
-
-#define DIO200_MAX_SUBDEVS 8
-#define DIO200_MAX_ISNS 6
-
-struct dio200_board {
- const char *name;
- unsigned char mainbar;
- unsigned short n_subdevs; /* number of subdevices */
- unsigned char sdtype[DIO200_MAX_SUBDEVS]; /* enum dio200_sdtype */
- unsigned char sdinfo[DIO200_MAX_SUBDEVS]; /* depends on sdtype */
- bool has_int_sce:1; /* has interrupt enable/status reg */
- bool has_clk_gat_sce:1; /* has clock/gate selection registers */
- bool is_pcie:1; /* has enhanced features */
-};
-
-int amplc_dio200_common_attach(struct comedi_device *dev, unsigned int irq,
- unsigned long req_irq_flags);
-
-/* Used by initialization of PCIe boards. */
-void amplc_dio200_set_enhance(struct comedi_device *dev, unsigned char val);
-
-#endif
diff --git a/drivers/staging/comedi/drivers/amplc_dio200_common.c b/drivers/staging/comedi/drivers/amplc_dio200_common.c
deleted file mode 100644
index d1539e798ffd..000000000000
--- a/drivers/staging/comedi/drivers/amplc_dio200_common.c
+++ /dev/null
@@ -1,879 +0,0 @@
-/*
- * comedi/drivers/amplc_dio200_common.c
- *
- * Common support code for "amplc_dio200" and "amplc_dio200_pci".
- *
- * Copyright (C) 2005-2013 MEV Ltd. <http://www.mev.co.uk/>
- *
- * COMEDI - Linux Control and Measurement Device Interface
- * Copyright (C) 1998,2000 David A. Schleef <ds@schleef.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#include <linux/module.h>
-#include <linux/interrupt.h>
-
-#include "../comedidev.h"
-
-#include "amplc_dio200.h"
-#include "comedi_8254.h"
-#include "8255.h" /* only for register defines */
-
-/* 200 series registers */
-#define DIO200_IO_SIZE 0x20
-#define DIO200_PCIE_IO_SIZE 0x4000
-#define DIO200_CLK_SCE(x) (0x18 + (x)) /* Group X/Y/Z clock sel reg */
-#define DIO200_GAT_SCE(x) (0x1b + (x)) /* Group X/Y/Z gate sel reg */
-#define DIO200_INT_SCE 0x1e /* Interrupt enable/status register */
-/* Extra registers for new PCIe boards */
-#define DIO200_ENHANCE 0x20 /* 1 to enable enhanced features */
-#define DIO200_VERSION 0x24 /* Hardware version register */
-#define DIO200_TS_CONFIG 0x600 /* Timestamp timer config register */
-#define DIO200_TS_COUNT 0x602 /* Timestamp timer count register */
-
-/*
- * Functions for constructing value for DIO_200_?CLK_SCE and
- * DIO_200_?GAT_SCE registers:
- *
- * 'which' is: 0 for CTR-X1, CTR-Y1, CTR-Z1; 1 for CTR-X2, CTR-Y2 or CTR-Z2.
- * 'chan' is the channel: 0, 1 or 2.
- * 'source' is the signal source: 0 to 7, or 0 to 31 for "enhanced" boards.
- */
-static unsigned char clk_gat_sce(unsigned int which, unsigned int chan,
- unsigned int source)
-{
- return (which << 5) | (chan << 3) |
- ((source & 030) << 3) | (source & 007);
-}
-
-static unsigned char clk_sce(unsigned int which, unsigned int chan,
- unsigned int source)
-{
- return clk_gat_sce(which, chan, source);
-}
-
-static unsigned char gat_sce(unsigned int which, unsigned int chan,
- unsigned int source)
-{
- return clk_gat_sce(which, chan, source);
-}
-
-/*
- * Periods of the internal clock sources in nanoseconds.
- */
-static const unsigned int clock_period[32] = {
- [1] = 100, /* 10 MHz */
- [2] = 1000, /* 1 MHz */
- [3] = 10000, /* 100 kHz */
- [4] = 100000, /* 10 kHz */
- [5] = 1000000, /* 1 kHz */
- [11] = 50, /* 20 MHz (enhanced boards) */
- /* clock sources 12 and later reserved for enhanced boards */
-};
-
-/*
- * Timestamp timer configuration register (for new PCIe boards).
- */
-#define TS_CONFIG_RESET 0x100 /* Reset counter to zero. */
-#define TS_CONFIG_CLK_SRC_MASK 0x0FF /* Clock source. */
-#define TS_CONFIG_MAX_CLK_SRC 2 /* Maximum clock source value. */
-
-/*
- * Periods of the timestamp timer clock sources in nanoseconds.
- */
-static const unsigned int ts_clock_period[TS_CONFIG_MAX_CLK_SRC + 1] = {
- 1, /* 1 nanosecond (but with 20 ns granularity). */
- 1000, /* 1 microsecond. */
- 1000000, /* 1 millisecond. */
-};
-
-struct dio200_subdev_8255 {
- unsigned int ofs; /* DIO base offset */
-};
-
-struct dio200_subdev_intr {
- spinlock_t spinlock;
- unsigned int ofs;
- unsigned int valid_isns;
- unsigned int enabled_isns;
- bool active:1;
-};
-
-static unsigned char dio200_read8(struct comedi_device *dev,
- unsigned int offset)
-{
- const struct dio200_board *board = dev->board_ptr;
-
- if (board->is_pcie)
- offset <<= 3;
-
- if (dev->mmio)
- return readb(dev->mmio + offset);
- return inb(dev->iobase + offset);
-}
-
-static void dio200_write8(struct comedi_device *dev,
- unsigned int offset, unsigned char val)
-{
- const struct dio200_board *board = dev->board_ptr;
-
- if (board->is_pcie)
- offset <<= 3;
-
- if (dev->mmio)
- writeb(val, dev->mmio + offset);
- else
- outb(val, dev->iobase + offset);
-}
-
-static unsigned int dio200_read32(struct comedi_device *dev,
- unsigned int offset)
-{
- const struct dio200_board *board = dev->board_ptr;
-
- if (board->is_pcie)
- offset <<= 3;
-
- if (dev->mmio)
- return readl(dev->mmio + offset);
- return inl(dev->iobase + offset);
-}
-
-static void dio200_write32(struct comedi_device *dev,
- unsigned int offset, unsigned int val)
-{
- const struct dio200_board *board = dev->board_ptr;
-
- if (board->is_pcie)
- offset <<= 3;
-
- if (dev->mmio)
- writel(val, dev->mmio + offset);
- else
- outl(val, dev->iobase + offset);
-}
-
-static unsigned int dio200_subdev_8254_offset(struct comedi_device *dev,
- struct comedi_subdevice *s)
-{
- const struct dio200_board *board = dev->board_ptr;
- struct comedi_8254 *i8254 = s->private;
- unsigned int offset;
-
- /* get the offset that was passed to comedi_8254_*_init() */
- if (dev->mmio)
- offset = i8254->mmio - dev->mmio;
- else
- offset = i8254->iobase - dev->iobase;
-
- /* remove the shift that was added for PCIe boards */
- if (board->is_pcie)
- offset >>= 3;
-
- /* this offset now works for the dio200_{read,write} helpers */
- return offset;
-}
-
-static int dio200_subdev_intr_insn_bits(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- const struct dio200_board *board = dev->board_ptr;
- struct dio200_subdev_intr *subpriv = s->private;
-
- if (board->has_int_sce) {
- /* Just read the interrupt status register. */
- data[1] = dio200_read8(dev, subpriv->ofs) & subpriv->valid_isns;
- } else {
- /* No interrupt status register. */
- data[0] = 0;
- }
-
- return insn->n;
-}
-
-static void dio200_stop_intr(struct comedi_device *dev,
- struct comedi_subdevice *s)
-{
- const struct dio200_board *board = dev->board_ptr;
- struct dio200_subdev_intr *subpriv = s->private;
-
- subpriv->active = false;
- subpriv->enabled_isns = 0;
- if (board->has_int_sce)
- dio200_write8(dev, subpriv->ofs, 0);
-}
-
-static void dio200_start_intr(struct comedi_device *dev,
- struct comedi_subdevice *s)
-{
- const struct dio200_board *board = dev->board_ptr;
- struct dio200_subdev_intr *subpriv = s->private;
- struct comedi_cmd *cmd = &s->async->cmd;
- unsigned int n;
- unsigned isn_bits;
-
- /* Determine interrupt sources to enable. */
- isn_bits = 0;
- if (cmd->chanlist) {
- for (n = 0; n < cmd->chanlist_len; n++)
- isn_bits |= (1U << CR_CHAN(cmd->chanlist[n]));
- }
- isn_bits &= subpriv->valid_isns;
- /* Enable interrupt sources. */
- subpriv->enabled_isns = isn_bits;
- if (board->has_int_sce)
- dio200_write8(dev, subpriv->ofs, isn_bits);
-}
-
-static int dio200_inttrig_start_intr(struct comedi_device *dev,
- struct comedi_subdevice *s,
- unsigned int trig_num)
-{
- struct dio200_subdev_intr *subpriv = s->private;
- struct comedi_cmd *cmd = &s->async->cmd;
- unsigned long flags;
-
- if (trig_num != cmd->start_arg)
- return -EINVAL;
-
- spin_lock_irqsave(&subpriv->spinlock, flags);
- s->async->inttrig = NULL;
- if (subpriv->active)
- dio200_start_intr(dev, s);
-
- spin_unlock_irqrestore(&subpriv->spinlock, flags);
-
- return 1;
-}
-
-static void dio200_read_scan_intr(struct comedi_device *dev,
- struct comedi_subdevice *s,
- unsigned int triggered)
-{
- struct comedi_cmd *cmd = &s->async->cmd;
- unsigned short val;
- unsigned int n, ch;
-
- val = 0;
- for (n = 0; n < cmd->chanlist_len; n++) {
- ch = CR_CHAN(cmd->chanlist[n]);
- if (triggered & (1U << ch))
- val |= (1U << n);
- }
-
- comedi_buf_write_samples(s, &val, 1);
-
- if (cmd->stop_src == TRIG_COUNT &&
- s->async->scans_done >= cmd->stop_arg)
- s->async->events |= COMEDI_CB_EOA;
-}
-
-static int dio200_handle_read_intr(struct comedi_device *dev,
- struct comedi_subdevice *s)
-{
- const struct dio200_board *board = dev->board_ptr;
- struct dio200_subdev_intr *subpriv = s->private;
- unsigned triggered;
- unsigned intstat;
- unsigned cur_enabled;
- unsigned long flags;
-
- triggered = 0;
-
- spin_lock_irqsave(&subpriv->spinlock, flags);
- if (board->has_int_sce) {
- /*
- * Collect interrupt sources that have triggered and disable
- * them temporarily. Loop around until no extra interrupt
- * sources have triggered, at which point, the valid part of
- * the interrupt status register will read zero, clearing the
- * cause of the interrupt.
- *
- * Mask off interrupt sources already seen to avoid infinite
- * loop in case of misconfiguration.
- */
- cur_enabled = subpriv->enabled_isns;
- while ((intstat = (dio200_read8(dev, subpriv->ofs) &
- subpriv->valid_isns & ~triggered)) != 0) {
- triggered |= intstat;
- cur_enabled &= ~triggered;
- dio200_write8(dev, subpriv->ofs, cur_enabled);
- }
- } else {
- /*
- * No interrupt status register. Assume the single interrupt
- * source has triggered.
- */
- triggered = subpriv->enabled_isns;
- }
-
- if (triggered) {
- /*
- * Some interrupt sources have triggered and have been
- * temporarily disabled to clear the cause of the interrupt.
- *
- * Reenable them NOW to minimize the time they are disabled.
- */
- cur_enabled = subpriv->enabled_isns;
- if (board->has_int_sce)
- dio200_write8(dev, subpriv->ofs, cur_enabled);
-
- if (subpriv->active) {
- /*
- * The command is still active.
- *
- * Ignore interrupt sources that the command isn't
- * interested in (just in case there's a race
- * condition).
- */
- if (triggered & subpriv->enabled_isns) {
- /* Collect scan data. */
- dio200_read_scan_intr(dev, s, triggered);
- }
- }
- }
- spin_unlock_irqrestore(&subpriv->spinlock, flags);
-
- comedi_handle_events(dev, s);
-
- return (triggered != 0);
-}
-
-static int dio200_subdev_intr_cancel(struct comedi_device *dev,
- struct comedi_subdevice *s)
-{
- struct dio200_subdev_intr *subpriv = s->private;
- unsigned long flags;
-
- spin_lock_irqsave(&subpriv->spinlock, flags);
- if (subpriv->active)
- dio200_stop_intr(dev, s);
-
- spin_unlock_irqrestore(&subpriv->spinlock, flags);
-
- return 0;
-}
-
-static int dio200_subdev_intr_cmdtest(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_cmd *cmd)
-{
- int err = 0;
-
- /* Step 1 : check if triggers are trivially valid */
-
- err |= comedi_check_trigger_src(&cmd->start_src, TRIG_NOW | TRIG_INT);
- err |= comedi_check_trigger_src(&cmd->scan_begin_src, TRIG_EXT);
- err |= comedi_check_trigger_src(&cmd->convert_src, TRIG_NOW);
- err |= comedi_check_trigger_src(&cmd->scan_end_src, TRIG_COUNT);
- err |= comedi_check_trigger_src(&cmd->stop_src, TRIG_COUNT | TRIG_NONE);
-
- if (err)
- return 1;
-
- /* Step 2a : make sure trigger sources are unique */
-
- err |= comedi_check_trigger_is_unique(cmd->start_src);
- err |= comedi_check_trigger_is_unique(cmd->stop_src);
-
- /* Step 2b : and mutually compatible */
-
- if (err)
- return 2;
-
- /* Step 3: check if arguments are trivially valid */
-
- err |= comedi_check_trigger_arg_is(&cmd->start_arg, 0);
- err |= comedi_check_trigger_arg_is(&cmd->scan_begin_arg, 0);
- err |= comedi_check_trigger_arg_is(&cmd->convert_arg, 0);
- err |= comedi_check_trigger_arg_is(&cmd->scan_end_arg,
- cmd->chanlist_len);
-
- if (cmd->stop_src == TRIG_COUNT)
- err |= comedi_check_trigger_arg_min(&cmd->stop_arg, 1);
- else /* TRIG_NONE */
- err |= comedi_check_trigger_arg_is(&cmd->stop_arg, 0);
-
- if (err)
- return 3;
-
- /* step 4: fix up any arguments */
-
- /* if (err) return 4; */
-
- return 0;
-}
-
-static int dio200_subdev_intr_cmd(struct comedi_device *dev,
- struct comedi_subdevice *s)
-{
- struct comedi_cmd *cmd = &s->async->cmd;
- struct dio200_subdev_intr *subpriv = s->private;
- unsigned long flags;
-
- spin_lock_irqsave(&subpriv->spinlock, flags);
-
- subpriv->active = true;
-
- if (cmd->start_src == TRIG_INT)
- s->async->inttrig = dio200_inttrig_start_intr;
- else /* TRIG_NOW */
- dio200_start_intr(dev, s);
-
- spin_unlock_irqrestore(&subpriv->spinlock, flags);
-
- return 0;
-}
-
-static int dio200_subdev_intr_init(struct comedi_device *dev,
- struct comedi_subdevice *s,
- unsigned int offset,
- unsigned valid_isns)
-{
- const struct dio200_board *board = dev->board_ptr;
- struct dio200_subdev_intr *subpriv;
-
- subpriv = comedi_alloc_spriv(s, sizeof(*subpriv));
- if (!subpriv)
- return -ENOMEM;
-
- subpriv->ofs = offset;
- subpriv->valid_isns = valid_isns;
- spin_lock_init(&subpriv->spinlock);
-
- if (board->has_int_sce)
- /* Disable interrupt sources. */
- dio200_write8(dev, subpriv->ofs, 0);
-
- s->type = COMEDI_SUBD_DI;
- s->subdev_flags = SDF_READABLE | SDF_CMD_READ | SDF_PACKED;
- if (board->has_int_sce) {
- s->n_chan = DIO200_MAX_ISNS;
- s->len_chanlist = DIO200_MAX_ISNS;
- } else {
- /* No interrupt source register. Support single channel. */
- s->n_chan = 1;
- s->len_chanlist = 1;
- }
- s->range_table = &range_digital;
- s->maxdata = 1;
- s->insn_bits = dio200_subdev_intr_insn_bits;
- s->do_cmdtest = dio200_subdev_intr_cmdtest;
- s->do_cmd = dio200_subdev_intr_cmd;
- s->cancel = dio200_subdev_intr_cancel;
-
- return 0;
-}
-
-static irqreturn_t dio200_interrupt(int irq, void *d)
-{
- struct comedi_device *dev = d;
- struct comedi_subdevice *s = dev->read_subdev;
- int handled;
-
- if (!dev->attached)
- return IRQ_NONE;
-
- handled = dio200_handle_read_intr(dev, s);
-
- return IRQ_RETVAL(handled);
-}
-
-static void dio200_subdev_8254_set_gate_src(struct comedi_device *dev,
- struct comedi_subdevice *s,
- unsigned int chan,
- unsigned int src)
-{
- unsigned int offset = dio200_subdev_8254_offset(dev, s);
-
- dio200_write8(dev, DIO200_GAT_SCE(offset >> 3),
- gat_sce((offset >> 2) & 1, chan, src));
-}
-
-static void dio200_subdev_8254_set_clock_src(struct comedi_device *dev,
- struct comedi_subdevice *s,
- unsigned int chan,
- unsigned int src)
-{
- unsigned int offset = dio200_subdev_8254_offset(dev, s);
-
- dio200_write8(dev, DIO200_CLK_SCE(offset >> 3),
- clk_sce((offset >> 2) & 1, chan, src));
-}
-
-static int dio200_subdev_8254_config(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- const struct dio200_board *board = dev->board_ptr;
- struct comedi_8254 *i8254 = s->private;
- unsigned int chan = CR_CHAN(insn->chanspec);
- unsigned int max_src = board->is_pcie ? 31 : 7;
- unsigned int src;
-
- if (!board->has_clk_gat_sce)
- return -EINVAL;
-
- switch (data[0]) {
- case INSN_CONFIG_SET_GATE_SRC:
- src = data[2];
- if (src > max_src)
- return -EINVAL;
-
- dio200_subdev_8254_set_gate_src(dev, s, chan, src);
- i8254->gate_src[chan] = src;
- break;
- case INSN_CONFIG_GET_GATE_SRC:
- data[2] = i8254->gate_src[chan];
- break;
- case INSN_CONFIG_SET_CLOCK_SRC:
- src = data[1];
- if (src > max_src)
- return -EINVAL;
-
- dio200_subdev_8254_set_clock_src(dev, s, chan, src);
- i8254->clock_src[chan] = src;
- break;
- case INSN_CONFIG_GET_CLOCK_SRC:
- data[1] = i8254->clock_src[chan];
- data[2] = clock_period[i8254->clock_src[chan]];
- break;
- default:
- return -EINVAL;
- }
-
- return insn->n;
-}
-
-static int dio200_subdev_8254_init(struct comedi_device *dev,
- struct comedi_subdevice *s,
- unsigned int offset)
-{
- const struct dio200_board *board = dev->board_ptr;
- struct comedi_8254 *i8254;
- unsigned int regshift;
- int chan;
-
- /*
- * PCIe boards need the offset shifted in order to get the
- * correct base address of the timer.
- */
- if (board->is_pcie) {
- offset <<= 3;
- regshift = 3;
- } else {
- regshift = 0;
- }
-
- if (dev->mmio) {
- i8254 = comedi_8254_mm_init(dev->mmio + offset,
- 0, I8254_IO8, regshift);
- } else {
- i8254 = comedi_8254_init(dev->iobase + offset,
- 0, I8254_IO8, regshift);
- }
- if (!i8254)
- return -ENOMEM;
-
- comedi_8254_subdevice_init(s, i8254);
-
- i8254->insn_config = dio200_subdev_8254_config;
-
- /*
- * There could be multiple timers so this driver does not
- * use dev->pacer to save the i8254 pointer. Instead,
- * comedi_8254_subdevice_init() saved the i8254 pointer in
- * s->private. Mark the subdevice as having private data
- * to be automatically freed when the device is detached.
- */
- comedi_set_spriv_auto_free(s);
-
- /* Initialize channels. */
- if (board->has_clk_gat_sce) {
- for (chan = 0; chan < 3; chan++) {
- /* Gate source 0 is VCC (logic 1). */
- dio200_subdev_8254_set_gate_src(dev, s, chan, 0);
- /* Clock source 0 is the dedicated clock input. */
- dio200_subdev_8254_set_clock_src(dev, s, chan, 0);
- }
- }
-
- return 0;
-}
-
-static void dio200_subdev_8255_set_dir(struct comedi_device *dev,
- struct comedi_subdevice *s)
-{
- struct dio200_subdev_8255 *subpriv = s->private;
- int config;
-
- config = I8255_CTRL_CW;
- /* 1 in io_bits indicates output, 1 in config indicates input */
- if (!(s->io_bits & 0x0000ff))
- config |= I8255_CTRL_A_IO;
- if (!(s->io_bits & 0x00ff00))
- config |= I8255_CTRL_B_IO;
- if (!(s->io_bits & 0x0f0000))
- config |= I8255_CTRL_C_LO_IO;
- if (!(s->io_bits & 0xf00000))
- config |= I8255_CTRL_C_HI_IO;
- dio200_write8(dev, subpriv->ofs + I8255_CTRL_REG, config);
-}
-
-static int dio200_subdev_8255_bits(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct dio200_subdev_8255 *subpriv = s->private;
- unsigned int mask;
- unsigned int val;
-
- mask = comedi_dio_update_state(s, data);
- if (mask) {
- if (mask & 0xff) {
- dio200_write8(dev, subpriv->ofs + I8255_DATA_A_REG,
- s->state & 0xff);
- }
- if (mask & 0xff00) {
- dio200_write8(dev, subpriv->ofs + I8255_DATA_B_REG,
- (s->state >> 8) & 0xff);
- }
- if (mask & 0xff0000) {
- dio200_write8(dev, subpriv->ofs + I8255_DATA_C_REG,
- (s->state >> 16) & 0xff);
- }
- }
-
- val = dio200_read8(dev, subpriv->ofs + I8255_DATA_A_REG);
- val |= dio200_read8(dev, subpriv->ofs + I8255_DATA_B_REG) << 8;
- val |= dio200_read8(dev, subpriv->ofs + I8255_DATA_C_REG) << 16;
-
- data[1] = val;
-
- return insn->n;
-}
-
-static int dio200_subdev_8255_config(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- unsigned int chan = CR_CHAN(insn->chanspec);
- unsigned int mask;
- int ret;
-
- if (chan < 8)
- mask = 0x0000ff;
- else if (chan < 16)
- mask = 0x00ff00;
- else if (chan < 20)
- mask = 0x0f0000;
- else
- mask = 0xf00000;
-
- ret = comedi_dio_insn_config(dev, s, insn, data, mask);
- if (ret)
- return ret;
-
- dio200_subdev_8255_set_dir(dev, s);
-
- return insn->n;
-}
-
-static int dio200_subdev_8255_init(struct comedi_device *dev,
- struct comedi_subdevice *s,
- unsigned int offset)
-{
- struct dio200_subdev_8255 *subpriv;
-
- subpriv = comedi_alloc_spriv(s, sizeof(*subpriv));
- if (!subpriv)
- return -ENOMEM;
-
- subpriv->ofs = offset;
-
- s->type = COMEDI_SUBD_DIO;
- s->subdev_flags = SDF_READABLE | SDF_WRITABLE;
- s->n_chan = 24;
- s->range_table = &range_digital;
- s->maxdata = 1;
- s->insn_bits = dio200_subdev_8255_bits;
- s->insn_config = dio200_subdev_8255_config;
- dio200_subdev_8255_set_dir(dev, s);
- return 0;
-}
-
-static int dio200_subdev_timer_read(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- unsigned int n;
-
- for (n = 0; n < insn->n; n++)
- data[n] = dio200_read32(dev, DIO200_TS_COUNT);
- return n;
-}
-
-static void dio200_subdev_timer_reset(struct comedi_device *dev,
- struct comedi_subdevice *s)
-{
- unsigned int clock;
-
- clock = dio200_read32(dev, DIO200_TS_CONFIG) & TS_CONFIG_CLK_SRC_MASK;
- dio200_write32(dev, DIO200_TS_CONFIG, clock | TS_CONFIG_RESET);
- dio200_write32(dev, DIO200_TS_CONFIG, clock);
-}
-
-static void dio200_subdev_timer_get_clock_src(struct comedi_device *dev,
- struct comedi_subdevice *s,
- unsigned int *src,
- unsigned int *period)
-{
- unsigned int clk;
-
- clk = dio200_read32(dev, DIO200_TS_CONFIG) & TS_CONFIG_CLK_SRC_MASK;
- *src = clk;
- *period = (clk < ARRAY_SIZE(ts_clock_period)) ?
- ts_clock_period[clk] : 0;
-}
-
-static int dio200_subdev_timer_set_clock_src(struct comedi_device *dev,
- struct comedi_subdevice *s,
- unsigned int src)
-{
- if (src > TS_CONFIG_MAX_CLK_SRC)
- return -EINVAL;
- dio200_write32(dev, DIO200_TS_CONFIG, src);
- return 0;
-}
-
-static int dio200_subdev_timer_config(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- int ret = 0;
-
- switch (data[0]) {
- case INSN_CONFIG_RESET:
- dio200_subdev_timer_reset(dev, s);
- break;
- case INSN_CONFIG_SET_CLOCK_SRC:
- ret = dio200_subdev_timer_set_clock_src(dev, s, data[1]);
- if (ret < 0)
- ret = -EINVAL;
- break;
- case INSN_CONFIG_GET_CLOCK_SRC:
- dio200_subdev_timer_get_clock_src(dev, s, &data[1], &data[2]);
- break;
- default:
- ret = -EINVAL;
- break;
- }
- return ret < 0 ? ret : insn->n;
-}
-
-void amplc_dio200_set_enhance(struct comedi_device *dev, unsigned char val)
-{
- dio200_write8(dev, DIO200_ENHANCE, val);
-}
-EXPORT_SYMBOL_GPL(amplc_dio200_set_enhance);
-
-int amplc_dio200_common_attach(struct comedi_device *dev, unsigned int irq,
- unsigned long req_irq_flags)
-{
- const struct dio200_board *board = dev->board_ptr;
- struct comedi_subdevice *s;
- unsigned int n;
- int ret;
-
- ret = comedi_alloc_subdevices(dev, board->n_subdevs);
- if (ret)
- return ret;
-
- for (n = 0; n < dev->n_subdevices; n++) {
- s = &dev->subdevices[n];
- switch (board->sdtype[n]) {
- case sd_8254:
- /* counter subdevice (8254) */
- ret = dio200_subdev_8254_init(dev, s,
- board->sdinfo[n]);
- if (ret < 0)
- return ret;
- break;
- case sd_8255:
- /* digital i/o subdevice (8255) */
- ret = dio200_subdev_8255_init(dev, s,
- board->sdinfo[n]);
- if (ret < 0)
- return ret;
- break;
- case sd_intr:
- /* 'INTERRUPT' subdevice */
- if (irq && !dev->read_subdev) {
- ret = dio200_subdev_intr_init(dev, s,
- DIO200_INT_SCE,
- board->sdinfo[n]);
- if (ret < 0)
- return ret;
- dev->read_subdev = s;
- } else {
- s->type = COMEDI_SUBD_UNUSED;
- }
- break;
- case sd_timer:
- s->type = COMEDI_SUBD_TIMER;
- s->subdev_flags = SDF_READABLE | SDF_LSAMPL;
- s->n_chan = 1;
- s->maxdata = 0xffffffff;
- s->insn_read = dio200_subdev_timer_read;
- s->insn_config = dio200_subdev_timer_config;
- break;
- default:
- s->type = COMEDI_SUBD_UNUSED;
- break;
- }
- }
-
- if (irq && dev->read_subdev) {
- if (request_irq(irq, dio200_interrupt, req_irq_flags,
- dev->board_name, dev) >= 0) {
- dev->irq = irq;
- } else {
- dev_warn(dev->class_dev,
- "warning! irq %u unavailable!\n", irq);
- }
- }
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(amplc_dio200_common_attach);
-
-static int __init amplc_dio200_common_init(void)
-{
- return 0;
-}
-module_init(amplc_dio200_common_init);
-
-static void __exit amplc_dio200_common_exit(void)
-{
-}
-module_exit(amplc_dio200_common_exit);
-
-MODULE_AUTHOR("Comedi http://www.comedi.org");
-MODULE_DESCRIPTION("Comedi helper for amplc_dio200 and amplc_dio200_pci");
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/amplc_dio200_pci.c b/drivers/staging/comedi/drivers/amplc_dio200_pci.c
deleted file mode 100644
index 2598e6e7d47d..000000000000
--- a/drivers/staging/comedi/drivers/amplc_dio200_pci.c
+++ /dev/null
@@ -1,424 +0,0 @@
-/* comedi/drivers/amplc_dio200_pci.c
- *
- * Driver for Amplicon PCI215, PCI272, PCIe215, PCIe236, PCIe296.
- *
- * Copyright (C) 2005-2013 MEV Ltd. <http://www.mev.co.uk/>
- *
- * COMEDI - Linux Control and Measurement Device Interface
- * Copyright (C) 1998,2000 David A. Schleef <ds@schleef.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-/*
- * Driver: amplc_dio200_pci
- * Description: Amplicon 200 Series PCI Digital I/O
- * Author: Ian Abbott <abbotti@mev.co.uk>
- * Devices: [Amplicon] PCI215 (amplc_dio200_pci), PCIe215, PCIe236,
- * PCI272, PCIe296
- * Updated: Mon, 18 Mar 2013 15:03:50 +0000
- * Status: works
- *
- * Configuration options:
- * none
- *
- * Manual configuration of PCI(e) cards is not supported; they are configured
- * automatically.
- *
- * SUBDEVICES
- *
- * PCI215 PCIe215 PCIe236
- * ------------- ------------- -------------
- * Subdevices 5 8 8
- * 0 PPI-X PPI-X PPI-X
- * 1 PPI-Y UNUSED UNUSED
- * 2 CTR-Z1 PPI-Y UNUSED
- * 3 CTR-Z2 UNUSED UNUSED
- * 4 INTERRUPT CTR-Z1 CTR-Z1
- * 5 CTR-Z2 CTR-Z2
- * 6 TIMER TIMER
- * 7 INTERRUPT INTERRUPT
- *
- *
- * PCI272 PCIe296
- * ------------- -------------
- * Subdevices 4 8
- * 0 PPI-X PPI-X1
- * 1 PPI-Y PPI-X2
- * 2 PPI-Z PPI-Y1
- * 3 INTERRUPT PPI-Y2
- * 4 CTR-Z1
- * 5 CTR-Z2
- * 6 TIMER
- * 7 INTERRUPT
- *
- * Each PPI is a 8255 chip providing 24 DIO channels. The DIO channels
- * are configurable as inputs or outputs in four groups:
- *
- * Port A - channels 0 to 7
- * Port B - channels 8 to 15
- * Port CL - channels 16 to 19
- * Port CH - channels 20 to 23
- *
- * Only mode 0 of the 8255 chips is supported.
- *
- * Each CTR is a 8254 chip providing 3 16-bit counter channels. Each
- * channel is configured individually with INSN_CONFIG instructions. The
- * specific type of configuration instruction is specified in data[0].
- * Some configuration instructions expect an additional parameter in
- * data[1]; others return a value in data[1]. The following configuration
- * instructions are supported:
- *
- * INSN_CONFIG_SET_COUNTER_MODE. Sets the counter channel's mode and
- * BCD/binary setting specified in data[1].
- *
- * INSN_CONFIG_8254_READ_STATUS. Reads the status register value for the
- * counter channel into data[1].
- *
- * INSN_CONFIG_SET_CLOCK_SRC. Sets the counter channel's clock source as
- * specified in data[1] (this is a hardware-specific value). Not
- * supported on PC214E. For the other boards, valid clock sources are
- * 0 to 7 as follows:
- *
- * 0. CLK n, the counter channel's dedicated CLK input from the SK1
- * connector. (N.B. for other values, the counter channel's CLKn
- * pin on the SK1 connector is an output!)
- * 1. Internal 10 MHz clock.
- * 2. Internal 1 MHz clock.
- * 3. Internal 100 kHz clock.
- * 4. Internal 10 kHz clock.
- * 5. Internal 1 kHz clock.
- * 6. OUT n-1, the output of counter channel n-1 (see note 1 below).
- * 7. Ext Clock, the counter chip's dedicated Ext Clock input from
- * the SK1 connector. This pin is shared by all three counter
- * channels on the chip.
- *
- * For the PCIe boards, clock sources in the range 0 to 31 are allowed
- * and the following additional clock sources are defined:
- *
- * 8. HIGH logic level.
- * 9. LOW logic level.
- * 10. "Pattern present" signal.
- * 11. Internal 20 MHz clock.
- *
- * INSN_CONFIG_GET_CLOCK_SRC. Returns the counter channel's current
- * clock source in data[1]. For internal clock sources, data[2] is set
- * to the period in ns.
- *
- * INSN_CONFIG_SET_GATE_SRC. Sets the counter channel's gate source as
- * specified in data[2] (this is a hardware-specific value). Not
- * supported on PC214E. For the other boards, valid gate sources are 0
- * to 7 as follows:
- *
- * 0. VCC (internal +5V d.c.), i.e. gate permanently enabled.
- * 1. GND (internal 0V d.c.), i.e. gate permanently disabled.
- * 2. GAT n, the counter channel's dedicated GAT input from the SK1
- * connector. (N.B. for other values, the counter channel's GATn
- * pin on the SK1 connector is an output!)
- * 3. /OUT n-2, the inverted output of counter channel n-2 (see note
- * 2 below).
- * 4. Reserved.
- * 5. Reserved.
- * 6. Reserved.
- * 7. Reserved.
- *
- * For the PCIe boards, gate sources in the range 0 to 31 are allowed;
- * the following additional clock sources and clock sources 6 and 7 are
- * (re)defined:
- *
- * 6. /GAT n, negated version of the counter channel's dedicated
- * GAT input (negated version of gate source 2).
- * 7. OUT n-2, the non-inverted output of counter channel n-2
- * (negated version of gate source 3).
- * 8. "Pattern present" signal, HIGH while pattern present.
- * 9. "Pattern occurred" latched signal, latches HIGH when pattern
- * occurs.
- * 10. "Pattern gone away" latched signal, latches LOW when pattern
- * goes away after it occurred.
- * 11. Negated "pattern present" signal, LOW while pattern present
- * (negated version of gate source 8).
- * 12. Negated "pattern occurred" latched signal, latches LOW when
- * pattern occurs (negated version of gate source 9).
- * 13. Negated "pattern gone away" latched signal, latches LOW when
- * pattern goes away after it occurred (negated version of gate
- * source 10).
- *
- * INSN_CONFIG_GET_GATE_SRC. Returns the counter channel's current gate
- * source in data[2].
- *
- * Clock and gate interconnection notes:
- *
- * 1. Clock source OUT n-1 is the output of the preceding channel on the
- * same counter subdevice if n > 0, or the output of channel 2 on the
- * preceding counter subdevice (see note 3) if n = 0.
- *
- * 2. Gate source /OUT n-2 is the inverted output of channel 0 on the
- * same counter subdevice if n = 2, or the inverted output of channel n+1
- * on the preceding counter subdevice (see note 3) if n < 2.
- *
- * 3. The counter subdevices are connected in a ring, so the highest
- * counter subdevice precedes the lowest.
- *
- * The 'TIMER' subdevice is a free-running 32-bit timer subdevice.
- *
- * The 'INTERRUPT' subdevice pretends to be a digital input subdevice. The
- * digital inputs come from the interrupt status register. The number of
- * channels matches the number of interrupt sources. The PC214E does not
- * have an interrupt status register; see notes on 'INTERRUPT SOURCES'
- * below.
- *
- * INTERRUPT SOURCES
- *
- * PCI215 PCIe215 PCIe236
- * ------------- ------------- -------------
- * Sources 6 6 6
- * 0 PPI-X-C0 PPI-X-C0 PPI-X-C0
- * 1 PPI-X-C3 PPI-X-C3 PPI-X-C3
- * 2 PPI-Y-C0 PPI-Y-C0 unused
- * 3 PPI-Y-C3 PPI-Y-C3 unused
- * 4 CTR-Z1-OUT1 CTR-Z1-OUT1 CTR-Z1-OUT1
- * 5 CTR-Z2-OUT1 CTR-Z2-OUT1 CTR-Z2-OUT1
- *
- * PCI272 PCIe296
- * ------------- -------------
- * Sources 6 6
- * 0 PPI-X-C0 PPI-X1-C0
- * 1 PPI-X-C3 PPI-X1-C3
- * 2 PPI-Y-C0 PPI-Y1-C0
- * 3 PPI-Y-C3 PPI-Y1-C3
- * 4 PPI-Z-C0 CTR-Z1-OUT1
- * 5 PPI-Z-C3 CTR-Z2-OUT1
- *
- * When an interrupt source is enabled in the interrupt source enable
- * register, a rising edge on the source signal latches the corresponding
- * bit to 1 in the interrupt status register.
- *
- * When the interrupt status register value as a whole (actually, just the
- * 6 least significant bits) goes from zero to non-zero, the board will
- * generate an interrupt. The interrupt will remain asserted until the
- * interrupt status register is cleared to zero. To clear a bit to zero in
- * the interrupt status register, the corresponding interrupt source must
- * be disabled in the interrupt source enable register (there is no
- * separate interrupt clear register).
- *
- * COMMANDS
- *
- * The driver supports a read streaming acquisition command on the
- * 'INTERRUPT' subdevice. The channel list selects the interrupt sources
- * to be enabled. All channels will be sampled together (convert_src ==
- * TRIG_NOW). The scan begins a short time after the hardware interrupt
- * occurs, subject to interrupt latencies (scan_begin_src == TRIG_EXT,
- * scan_begin_arg == 0). The value read from the interrupt status register
- * is packed into a short value, one bit per requested channel, in the
- * order they appear in the channel list.
- */
-
-#include <linux/module.h>
-#include <linux/interrupt.h>
-
-#include "../comedi_pci.h"
-
-#include "amplc_dio200.h"
-
-/*
- * Board descriptions.
- */
-
-enum dio200_pci_model {
- pci215_model,
- pci272_model,
- pcie215_model,
- pcie236_model,
- pcie296_model
-};
-
-static const struct dio200_board dio200_pci_boards[] = {
- [pci215_model] = {
- .name = "pci215",
- .mainbar = 2,
- .n_subdevs = 5,
- .sdtype = {
- sd_8255, sd_8255, sd_8254, sd_8254, sd_intr
- },
- .sdinfo = { 0x00, 0x08, 0x10, 0x14, 0x3f },
- .has_int_sce = true,
- .has_clk_gat_sce = true,
- },
- [pci272_model] = {
- .name = "pci272",
- .mainbar = 2,
- .n_subdevs = 4,
- .sdtype = {
- sd_8255, sd_8255, sd_8255, sd_intr
- },
- .sdinfo = { 0x00, 0x08, 0x10, 0x3f },
- .has_int_sce = true,
- },
- [pcie215_model] = {
- .name = "pcie215",
- .mainbar = 1,
- .n_subdevs = 8,
- .sdtype = {
- sd_8255, sd_none, sd_8255, sd_none,
- sd_8254, sd_8254, sd_timer, sd_intr
- },
- .sdinfo = {
- 0x00, 0x00, 0x08, 0x00, 0x10, 0x14, 0x00, 0x3f
- },
- .has_int_sce = true,
- .has_clk_gat_sce = true,
- .is_pcie = true,
- },
- [pcie236_model] = {
- .name = "pcie236",
- .mainbar = 1,
- .n_subdevs = 8,
- .sdtype = {
- sd_8255, sd_none, sd_none, sd_none,
- sd_8254, sd_8254, sd_timer, sd_intr
- },
- .sdinfo = {
- 0x00, 0x00, 0x00, 0x00, 0x10, 0x14, 0x00, 0x3f
- },
- .has_int_sce = true,
- .has_clk_gat_sce = true,
- .is_pcie = true,
- },
- [pcie296_model] = {
- .name = "pcie296",
- .mainbar = 1,
- .n_subdevs = 8,
- .sdtype = {
- sd_8255, sd_8255, sd_8255, sd_8255,
- sd_8254, sd_8254, sd_timer, sd_intr
- },
- .sdinfo = {
- 0x00, 0x04, 0x08, 0x0c, 0x10, 0x14, 0x00, 0x3f
- },
- .has_int_sce = true,
- .has_clk_gat_sce = true,
- .is_pcie = true,
- },
-};
-
-/*
- * This function does some special set-up for the PCIe boards
- * PCIe215, PCIe236, PCIe296.
- */
-static int dio200_pcie_board_setup(struct comedi_device *dev)
-{
- struct pci_dev *pcidev = comedi_to_pci_dev(dev);
- void __iomem *brbase;
-
- /*
- * The board uses Altera Cyclone IV with PCI-Express hard IP.
- * The FPGA configuration has the PCI-Express Avalon-MM Bridge
- * Control registers in PCI BAR 0, offset 0, and the length of
- * these registers is 0x4000.
- *
- * We need to write 0x80 to the "Avalon-MM to PCI-Express Interrupt
- * Enable" register at offset 0x50 to allow generation of PCIe
- * interrupts when RXmlrq_i is asserted in the SOPC Builder system.
- */
- if (pci_resource_len(pcidev, 0) < 0x4000) {
- dev_err(dev->class_dev, "error! bad PCI region!\n");
- return -EINVAL;
- }
- brbase = pci_ioremap_bar(pcidev, 0);
- if (!brbase) {
- dev_err(dev->class_dev, "error! failed to map registers!\n");
- return -ENOMEM;
- }
- writel(0x80, brbase + 0x50);
- iounmap(brbase);
- /* Enable "enhanced" features of board. */
- amplc_dio200_set_enhance(dev, 1);
- return 0;
-}
-
-static int dio200_pci_auto_attach(struct comedi_device *dev,
- unsigned long context_model)
-{
- struct pci_dev *pci_dev = comedi_to_pci_dev(dev);
- const struct dio200_board *board = NULL;
- unsigned int bar;
- int ret;
-
- if (context_model < ARRAY_SIZE(dio200_pci_boards))
- board = &dio200_pci_boards[context_model];
- if (!board)
- return -EINVAL;
- dev->board_ptr = board;
- dev->board_name = board->name;
-
- dev_info(dev->class_dev, "%s: attach pci %s (%s)\n",
- dev->driver->driver_name, pci_name(pci_dev), dev->board_name);
-
- ret = comedi_pci_enable(dev);
- if (ret)
- return ret;
-
- bar = board->mainbar;
- if (pci_resource_flags(pci_dev, bar) & IORESOURCE_MEM) {
- dev->mmio = pci_ioremap_bar(pci_dev, bar);
- if (!dev->mmio) {
- dev_err(dev->class_dev,
- "error! cannot remap registers\n");
- return -ENOMEM;
- }
- } else {
- dev->iobase = pci_resource_start(pci_dev, bar);
- }
-
- if (board->is_pcie) {
- ret = dio200_pcie_board_setup(dev);
- if (ret < 0)
- return ret;
- }
-
- return amplc_dio200_common_attach(dev, pci_dev->irq, IRQF_SHARED);
-}
-
-static struct comedi_driver dio200_pci_comedi_driver = {
- .driver_name = "amplc_dio200_pci",
- .module = THIS_MODULE,
- .auto_attach = dio200_pci_auto_attach,
- .detach = comedi_pci_detach,
-};
-
-static const struct pci_device_id dio200_pci_table[] = {
- { PCI_VDEVICE(AMPLICON, 0x000b), pci215_model },
- { PCI_VDEVICE(AMPLICON, 0x000a), pci272_model },
- { PCI_VDEVICE(AMPLICON, 0x0011), pcie236_model },
- { PCI_VDEVICE(AMPLICON, 0x0012), pcie215_model },
- { PCI_VDEVICE(AMPLICON, 0x0014), pcie296_model },
- {0}
-};
-
-MODULE_DEVICE_TABLE(pci, dio200_pci_table);
-
-static int dio200_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
-{
- return comedi_pci_auto_config(dev, &dio200_pci_comedi_driver,
- id->driver_data);
-}
-
-static struct pci_driver dio200_pci_pci_driver = {
- .name = "amplc_dio200_pci",
- .id_table = dio200_pci_table,
- .probe = dio200_pci_probe,
- .remove = comedi_pci_auto_unconfig,
-};
-module_comedi_pci_driver(dio200_pci_comedi_driver, dio200_pci_pci_driver);
-
-MODULE_AUTHOR("Comedi http://www.comedi.org");
-MODULE_DESCRIPTION("Comedi driver for Amplicon 200 Series PCI(e) DIO boards");
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/amplc_pc236.c b/drivers/staging/comedi/drivers/amplc_pc236.c
deleted file mode 100644
index 875cc19cb969..000000000000
--- a/drivers/staging/comedi/drivers/amplc_pc236.c
+++ /dev/null
@@ -1,85 +0,0 @@
-/*
- * comedi/drivers/amplc_pc236.c
- * Driver for Amplicon PC36AT DIO boards.
- *
- * Copyright (C) 2002 MEV Ltd. <http://www.mev.co.uk/>
- *
- * COMEDI - Linux Control and Measurement Device Interface
- * Copyright (C) 2000 David A. Schleef <ds@schleef.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-/*
- * Driver: amplc_pc236
- * Description: Amplicon PC36AT
- * Author: Ian Abbott <abbotti@mev.co.uk>
- * Devices: [Amplicon] PC36AT (pc36at)
- * Updated: Fri, 25 Jul 2014 15:32:40 +0000
- * Status: works
- *
- * Configuration options - PC36AT:
- * [0] - I/O port base address
- * [1] - IRQ (optional)
- *
- * The PC36AT board has a single 8255 appearing as subdevice 0.
- *
- * Subdevice 1 pretends to be a digital input device, but it always returns
- * 0 when read. However, if you run a command with scan_begin_src=TRIG_EXT,
- * a rising edge on port C bit 3 acts as an external trigger, which can be
- * used to wake up tasks. This is like the comedi_parport device, but the
- * only way to physically disable the interrupt on the PC36AT is to remove
- * the IRQ jumper. If no interrupt is connected, then subdevice 1 is
- * unused.
- */
-
-#include <linux/module.h>
-
-#include "../comedidev.h"
-
-#include "amplc_pc236.h"
-
-static int pc236_attach(struct comedi_device *dev, struct comedi_devconfig *it)
-{
- struct pc236_private *devpriv;
- int ret;
-
- devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
- if (!devpriv)
- return -ENOMEM;
-
- ret = comedi_request_region(dev, it->options[0], 0x4);
- if (ret)
- return ret;
-
- return amplc_pc236_common_attach(dev, dev->iobase, it->options[1], 0);
-}
-
-static const struct pc236_board pc236_boards[] = {
- {
- .name = "pc36at",
- },
-};
-
-static struct comedi_driver amplc_pc236_driver = {
- .driver_name = "amplc_pc236",
- .module = THIS_MODULE,
- .attach = pc236_attach,
- .detach = comedi_legacy_detach,
- .board_name = &pc236_boards[0].name,
- .offset = sizeof(struct pc236_board),
- .num_names = ARRAY_SIZE(pc236_boards),
-};
-
-module_comedi_driver(amplc_pc236_driver);
-
-MODULE_AUTHOR("Comedi http://www.comedi.org");
-MODULE_DESCRIPTION("Comedi driver for Amplicon PC36AT DIO boards");
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/amplc_pc236.h b/drivers/staging/comedi/drivers/amplc_pc236.h
deleted file mode 100644
index 91d6d9c065b5..000000000000
--- a/drivers/staging/comedi/drivers/amplc_pc236.h
+++ /dev/null
@@ -1,42 +0,0 @@
-/*
- * comedi/drivers/amplc_pc236.h
- * Header for "amplc_pc236", "amplc_pci236" and "amplc_pc236_common".
- *
- * Copyright (C) 2002-2014 MEV Ltd. <http://www.mev.co.uk/>
- *
- * COMEDI - Linux Control and Measurement Device Interface
- * Copyright (C) 2000 David A. Schleef <ds@schleef.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#ifndef AMPLC_PC236_H_INCLUDED
-#define AMPLC_PC236_H_INCLUDED
-
-#include <linux/types.h>
-
-struct comedi_device;
-
-struct pc236_board {
- const char *name;
- void (*intr_update_cb)(struct comedi_device *dev, bool enable);
- bool (*intr_chk_clr_cb)(struct comedi_device *dev);
-};
-
-struct pc236_private {
- unsigned long lcr_iobase; /* PLX PCI9052 config registers in PCIBAR1 */
- bool enable_irq;
-};
-
-int amplc_pc236_common_attach(struct comedi_device *dev, unsigned long iobase,
- unsigned int irq, unsigned long req_irq_flags);
-
-#endif
diff --git a/drivers/staging/comedi/drivers/amplc_pc236_common.c b/drivers/staging/comedi/drivers/amplc_pc236_common.c
deleted file mode 100644
index 0c02d3245679..000000000000
--- a/drivers/staging/comedi/drivers/amplc_pc236_common.c
+++ /dev/null
@@ -1,200 +0,0 @@
-/*
- * comedi/drivers/amplc_pc236_common.c
- * Common support code for "amplc_pc236" and "amplc_pci236".
- *
- * Copyright (C) 2002-2014 MEV Ltd. <http://www.mev.co.uk/>
- *
- * COMEDI - Linux Control and Measurement Device Interface
- * Copyright (C) 2000 David A. Schleef <ds@schleef.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#include <linux/module.h>
-#include <linux/interrupt.h>
-
-#include "../comedidev.h"
-
-#include "amplc_pc236.h"
-#include "8255.h"
-
-static void pc236_intr_update(struct comedi_device *dev, bool enable)
-{
- const struct pc236_board *board = dev->board_ptr;
- struct pc236_private *devpriv = dev->private;
- unsigned long flags;
-
- spin_lock_irqsave(&dev->spinlock, flags);
- devpriv->enable_irq = enable;
- if (board->intr_update_cb)
- board->intr_update_cb(dev, enable);
- spin_unlock_irqrestore(&dev->spinlock, flags);
-}
-
-/*
- * This function is called when an interrupt occurs to check whether
- * the interrupt has been marked as enabled and was generated by the
- * board. If so, the function prepares the hardware for the next
- * interrupt.
- * Returns false if the interrupt should be ignored.
- */
-static bool pc236_intr_check(struct comedi_device *dev)
-{
- const struct pc236_board *board = dev->board_ptr;
- struct pc236_private *devpriv = dev->private;
- bool retval = false;
- unsigned long flags;
-
- spin_lock_irqsave(&dev->spinlock, flags);
- if (devpriv->enable_irq) {
- if (board->intr_chk_clr_cb)
- retval = board->intr_chk_clr_cb(dev);
- else
- retval = true;
- }
- spin_unlock_irqrestore(&dev->spinlock, flags);
-
- return retval;
-}
-
-static int pc236_intr_insn(struct comedi_device *dev,
- struct comedi_subdevice *s, struct comedi_insn *insn,
- unsigned int *data)
-{
- data[1] = 0;
- return insn->n;
-}
-
-static int pc236_intr_cmdtest(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_cmd *cmd)
-{
- int err = 0;
-
- /* Step 1 : check if triggers are trivially valid */
-
- err |= comedi_check_trigger_src(&cmd->start_src, TRIG_NOW);
- err |= comedi_check_trigger_src(&cmd->scan_begin_src, TRIG_EXT);
- err |= comedi_check_trigger_src(&cmd->convert_src, TRIG_FOLLOW);
- err |= comedi_check_trigger_src(&cmd->scan_end_src, TRIG_COUNT);
- err |= comedi_check_trigger_src(&cmd->stop_src, TRIG_NONE);
-
- if (err)
- return 1;
-
- /* Step 2a : make sure trigger sources are unique */
- /* Step 2b : and mutually compatible */
-
- /* Step 3: check it arguments are trivially valid */
-
- err |= comedi_check_trigger_arg_is(&cmd->start_arg, 0);
- err |= comedi_check_trigger_arg_is(&cmd->scan_begin_arg, 0);
- err |= comedi_check_trigger_arg_is(&cmd->convert_arg, 0);
- err |= comedi_check_trigger_arg_is(&cmd->scan_end_arg,
- cmd->chanlist_len);
- err |= comedi_check_trigger_arg_is(&cmd->stop_arg, 0);
-
- if (err)
- return 3;
-
- /* Step 4: fix up any arguments */
-
- /* Step 5: check channel list if it exists */
-
- return 0;
-}
-
-static int pc236_intr_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
-{
- pc236_intr_update(dev, true);
-
- return 0;
-}
-
-static int pc236_intr_cancel(struct comedi_device *dev,
- struct comedi_subdevice *s)
-{
- pc236_intr_update(dev, false);
-
- return 0;
-}
-
-static irqreturn_t pc236_interrupt(int irq, void *d)
-{
- struct comedi_device *dev = d;
- struct comedi_subdevice *s = dev->read_subdev;
- bool handled;
-
- handled = pc236_intr_check(dev);
- if (dev->attached && handled) {
- comedi_buf_write_samples(s, &s->state, 1);
- comedi_handle_events(dev, s);
- }
- return IRQ_RETVAL(handled);
-}
-
-int amplc_pc236_common_attach(struct comedi_device *dev, unsigned long iobase,
- unsigned int irq, unsigned long req_irq_flags)
-{
- struct comedi_subdevice *s;
- int ret;
-
- dev->iobase = iobase;
-
- ret = comedi_alloc_subdevices(dev, 2);
- if (ret)
- return ret;
-
- s = &dev->subdevices[0];
- /* digital i/o subdevice (8255) */
- ret = subdev_8255_init(dev, s, NULL, 0x00);
- if (ret)
- return ret;
-
- s = &dev->subdevices[1];
- dev->read_subdev = s;
- s->type = COMEDI_SUBD_UNUSED;
- pc236_intr_update(dev, false);
- if (irq) {
- if (request_irq(irq, pc236_interrupt, req_irq_flags,
- dev->board_name, dev) >= 0) {
- dev->irq = irq;
- s->type = COMEDI_SUBD_DI;
- s->subdev_flags = SDF_READABLE | SDF_CMD_READ;
- s->n_chan = 1;
- s->maxdata = 1;
- s->range_table = &range_digital;
- s->insn_bits = pc236_intr_insn;
- s->len_chanlist = 1;
- s->do_cmdtest = pc236_intr_cmdtest;
- s->do_cmd = pc236_intr_cmd;
- s->cancel = pc236_intr_cancel;
- }
- }
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(amplc_pc236_common_attach);
-
-static int __init amplc_pc236_common_init(void)
-{
- return 0;
-}
-module_init(amplc_pc236_common_init);
-
-static void __exit amplc_pc236_common_exit(void)
-{
-}
-module_exit(amplc_pc236_common_exit);
-
-MODULE_AUTHOR("Comedi http://www.comedi.org");
-MODULE_DESCRIPTION("Comedi helper for amplc_pc236 and amplc_pci236");
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/amplc_pc263.c b/drivers/staging/comedi/drivers/amplc_pc263.c
deleted file mode 100644
index b1946ce6ecc1..000000000000
--- a/drivers/staging/comedi/drivers/amplc_pc263.c
+++ /dev/null
@@ -1,111 +0,0 @@
-/*
- comedi/drivers/amplc_pc263.c
- Driver for Amplicon PC263 and PCI263 relay boards.
-
- Copyright (C) 2002 MEV Ltd. <http://www.mev.co.uk/>
-
- COMEDI - Linux Control and Measurement Device Interface
- Copyright (C) 2000 David A. Schleef <ds@schleef.org>
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-*/
-/*
-Driver: amplc_pc263
-Description: Amplicon PC263
-Author: Ian Abbott <abbotti@mev.co.uk>
-Devices: [Amplicon] PC263 (pc263)
-Updated: Fri, 12 Apr 2013 15:19:36 +0100
-Status: works
-
-Configuration options:
- [0] - I/O port base address
-
-The board appears as one subdevice, with 16 digital outputs, each
-connected to a reed-relay. Relay contacts are closed when output is 1.
-The state of the outputs can be read.
-*/
-
-#include <linux/module.h>
-#include "../comedidev.h"
-
-/* PC263 registers */
-
-/*
- * Board descriptions for Amplicon PC263.
- */
-
-struct pc263_board {
- const char *name;
-};
-
-static const struct pc263_board pc263_boards[] = {
- {
- .name = "pc263",
- },
-};
-
-static int pc263_do_insn_bits(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- if (comedi_dio_update_state(s, data)) {
- outb(s->state & 0xff, dev->iobase);
- outb((s->state >> 8) & 0xff, dev->iobase + 1);
- }
-
- data[1] = s->state;
-
- return insn->n;
-}
-
-static int pc263_attach(struct comedi_device *dev, struct comedi_devconfig *it)
-{
- struct comedi_subdevice *s;
- int ret;
-
- ret = comedi_request_region(dev, it->options[0], 0x2);
- if (ret)
- return ret;
-
- ret = comedi_alloc_subdevices(dev, 1);
- if (ret)
- return ret;
-
- s = &dev->subdevices[0];
- /* digital output subdevice */
- s->type = COMEDI_SUBD_DO;
- s->subdev_flags = SDF_WRITABLE;
- s->n_chan = 16;
- s->maxdata = 1;
- s->range_table = &range_digital;
- s->insn_bits = pc263_do_insn_bits;
- /* read initial relay state */
- s->state = inb(dev->iobase) | (inb(dev->iobase + 1) << 8);
-
- return 0;
-}
-
-static struct comedi_driver amplc_pc263_driver = {
- .driver_name = "amplc_pc263",
- .module = THIS_MODULE,
- .attach = pc263_attach,
- .detach = comedi_legacy_detach,
- .board_name = &pc263_boards[0].name,
- .offset = sizeof(struct pc263_board),
- .num_names = ARRAY_SIZE(pc263_boards),
-};
-
-module_comedi_driver(amplc_pc263_driver);
-
-MODULE_AUTHOR("Comedi http://www.comedi.org");
-MODULE_DESCRIPTION("Comedi driver for Amplicon PC263 relay board");
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/amplc_pci224.c b/drivers/staging/comedi/drivers/amplc_pci224.c
deleted file mode 100644
index b2f7679a0116..000000000000
--- a/drivers/staging/comedi/drivers/amplc_pci224.c
+++ /dev/null
@@ -1,1136 +0,0 @@
-/*
- * comedi/drivers/amplc_pci224.c
- * Driver for Amplicon PCI224 and PCI234 AO boards.
- *
- * Copyright (C) 2005 MEV Ltd. <http://www.mev.co.uk/>
- *
- * COMEDI - Linux Control and Measurement Device Interface
- * Copyright (C) 1998,2000 David A. Schleef <ds@schleef.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-/*
- * Driver: amplc_pci224
- * Description: Amplicon PCI224, PCI234
- * Author: Ian Abbott <abbotti@mev.co.uk>
- * Devices: [Amplicon] PCI224 (amplc_pci224), PCI234
- * Updated: Thu, 31 Jul 2014 11:08:03 +0000
- * Status: works, but see caveats
- *
- * Supports:
- *
- * - ao_insn read/write
- * - ao_do_cmd mode with the following sources:
- *
- * - start_src TRIG_INT TRIG_EXT
- * - scan_begin_src TRIG_TIMER TRIG_EXT
- * - convert_src TRIG_NOW
- * - scan_end_src TRIG_COUNT
- * - stop_src TRIG_COUNT TRIG_EXT TRIG_NONE
- *
- * The channel list must contain at least one channel with no repeated
- * channels. The scan end count must equal the number of channels in
- * the channel list.
- *
- * There is only one external trigger source so only one of start_src,
- * scan_begin_src or stop_src may use TRIG_EXT.
- *
- * Configuration options:
- * none
- *
- * Manual configuration of PCI cards is not supported; they are configured
- * automatically.
- *
- * Output range selection - PCI224:
- *
- * Output ranges on PCI224 are partly software-selectable and partly
- * hardware-selectable according to jumper LK1. All channels are set
- * to the same range:
- *
- * - LK1 position 1-2 (factory default) corresponds to the following
- * comedi ranges:
- *
- * 0: [-10V,+10V]; 1: [-5V,+5V]; 2: [-2.5V,+2.5V], 3: [-1.25V,+1.25V],
- * 4: [0,+10V], 5: [0,+5V], 6: [0,+2.5V], 7: [0,+1.25V]
- *
- * - LK1 position 2-3 corresponds to the following Comedi ranges, using
- * an external voltage reference:
- *
- * 0: [-Vext,+Vext],
- * 1: [0,+Vext]
- *
- * Output range selection - PCI234:
- *
- * Output ranges on PCI234 are hardware-selectable according to jumper
- * LK1 which affects all channels, and jumpers LK2, LK3, LK4 and LK5
- * which affect channels 0, 1, 2 and 3 individually. LK1 chooses between
- * an internal 5V reference and an external voltage reference (Vext).
- * LK2/3/4/5 choose (per channel) to double the reference or not according
- * to the following table:
- *
- * LK1 position LK2/3/4/5 pos Comedi range
- * ------------- ------------- --------------
- * 2-3 (factory) 1-2 (factory) 0: [-10V,+10V]
- * 2-3 (factory) 2-3 1: [-5V,+5V]
- * 1-2 1-2 (factory) 2: [-2*Vext,+2*Vext]
- * 1-2 2-3 3: [-Vext,+Vext]
- *
- * Caveats:
- *
- * 1) All channels on the PCI224 share the same range. Any change to the
- * range as a result of insn_write or a streaming command will affect
- * the output voltages of all channels, including those not specified
- * by the instruction or command.
- *
- * 2) For the analog output command, the first scan may be triggered
- * falsely at the start of acquisition. This occurs when the DAC scan
- * trigger source is switched from 'none' to 'timer' (scan_begin_src =
- * TRIG_TIMER) or 'external' (scan_begin_src == TRIG_EXT) at the start
- * of acquisition and the trigger source is at logic level 1 at the
- * time of the switch. This is very likely for TRIG_TIMER. For
- * TRIG_EXT, it depends on the state of the external line and whether
- * the CR_INVERT flag has been set. The remaining scans are triggered
- * correctly.
- */
-
-#include <linux/module.h>
-#include <linux/interrupt.h>
-#include <linux/slab.h>
-
-#include "../comedi_pci.h"
-
-#include "comedi_8254.h"
-
-/*
- * PCI224/234 i/o space 1 (PCIBAR2) registers.
- */
-#define PCI224_Z2_BASE 0x14 /* 82C54 counter/timer */
-#define PCI224_ZCLK_SCE 0x1A /* Group Z Clock Configuration Register */
-#define PCI224_ZGAT_SCE 0x1D /* Group Z Gate Configuration Register */
-#define PCI224_INT_SCE 0x1E /* ISR Interrupt source mask register */
- /* /Interrupt status */
-
-/*
- * PCI224/234 i/o space 2 (PCIBAR3) 16-bit registers.
- */
-#define PCI224_DACDATA 0x00 /* (w-o) DAC FIFO data. */
-#define PCI224_SOFTTRIG 0x00 /* (r-o) DAC software scan trigger. */
-#define PCI224_DACCON 0x02 /* (r/w) DAC status/configuration. */
-#define PCI224_FIFOSIZ 0x04 /* (w-o) FIFO size for wraparound mode. */
-#define PCI224_DACCEN 0x06 /* (w-o) DAC channel enable register. */
-
-/*
- * DACCON values.
- */
-/* (r/w) Scan trigger. */
-#define PCI224_DACCON_TRIG_MASK (7 << 0)
-#define PCI224_DACCON_TRIG_NONE (0 << 0) /* none */
-#define PCI224_DACCON_TRIG_SW (1 << 0) /* software trig */
-#define PCI224_DACCON_TRIG_EXTP (2 << 0) /* ext +ve edge */
-#define PCI224_DACCON_TRIG_EXTN (3 << 0) /* ext -ve edge */
-#define PCI224_DACCON_TRIG_Z2CT0 (4 << 0) /* Z2 CT0 out */
-#define PCI224_DACCON_TRIG_Z2CT1 (5 << 0) /* Z2 CT1 out */
-#define PCI224_DACCON_TRIG_Z2CT2 (6 << 0) /* Z2 CT2 out */
-/* (r/w) Polarity (PCI224 only, PCI234 always bipolar!). */
-#define PCI224_DACCON_POLAR_MASK (1 << 3)
-#define PCI224_DACCON_POLAR_UNI (0 << 3) /* range [0,Vref] */
-#define PCI224_DACCON_POLAR_BI (1 << 3) /* range [-Vref,Vref] */
-/* (r/w) Internal Vref (PCI224 only, when LK1 in position 1-2). */
-#define PCI224_DACCON_VREF_MASK (3 << 4)
-#define PCI224_DACCON_VREF_1_25 (0 << 4) /* Vref = 1.25V */
-#define PCI224_DACCON_VREF_2_5 (1 << 4) /* Vref = 2.5V */
-#define PCI224_DACCON_VREF_5 (2 << 4) /* Vref = 5V */
-#define PCI224_DACCON_VREF_10 (3 << 4) /* Vref = 10V */
-/* (r/w) Wraparound mode enable (to play back stored waveform). */
-#define PCI224_DACCON_FIFOWRAP (1 << 7)
-/* (r/w) FIFO enable. It MUST be set! */
-#define PCI224_DACCON_FIFOENAB (1 << 8)
-/* (r/w) FIFO interrupt trigger level (most values are not very useful). */
-#define PCI224_DACCON_FIFOINTR_MASK (7 << 9)
-#define PCI224_DACCON_FIFOINTR_EMPTY (0 << 9) /* when empty */
-#define PCI224_DACCON_FIFOINTR_NEMPTY (1 << 9) /* when not empty */
-#define PCI224_DACCON_FIFOINTR_NHALF (2 << 9) /* when not half full */
-#define PCI224_DACCON_FIFOINTR_HALF (3 << 9) /* when half full */
-#define PCI224_DACCON_FIFOINTR_NFULL (4 << 9) /* when not full */
-#define PCI224_DACCON_FIFOINTR_FULL (5 << 9) /* when full */
-/* (r-o) FIFO fill level. */
-#define PCI224_DACCON_FIFOFL_MASK (7 << 12)
-#define PCI224_DACCON_FIFOFL_EMPTY (1 << 12) /* 0 */
-#define PCI224_DACCON_FIFOFL_ONETOHALF (0 << 12) /* [1,2048] */
-#define PCI224_DACCON_FIFOFL_HALFTOFULL (4 << 12) /* [2049,4095] */
-#define PCI224_DACCON_FIFOFL_FULL (6 << 12) /* 4096 */
-/* (r-o) DAC busy flag. */
-#define PCI224_DACCON_BUSY (1 << 15)
-/* (w-o) FIFO reset. */
-#define PCI224_DACCON_FIFORESET (1 << 12)
-/* (w-o) Global reset (not sure what it does). */
-#define PCI224_DACCON_GLOBALRESET (1 << 13)
-
-/*
- * DAC FIFO size.
- */
-#define PCI224_FIFO_SIZE 4096
-
-/*
- * DAC FIFO guaranteed minimum room available, depending on reported fill level.
- * The maximum room available depends on the reported fill level and how much
- * has been written!
- */
-#define PCI224_FIFO_ROOM_EMPTY PCI224_FIFO_SIZE
-#define PCI224_FIFO_ROOM_ONETOHALF (PCI224_FIFO_SIZE / 2)
-#define PCI224_FIFO_ROOM_HALFTOFULL 1
-#define PCI224_FIFO_ROOM_FULL 0
-
-/*
- * Counter/timer clock input configuration sources.
- */
-#define CLK_CLK 0 /* reserved (channel-specific clock) */
-#define CLK_10MHZ 1 /* internal 10 MHz clock */
-#define CLK_1MHZ 2 /* internal 1 MHz clock */
-#define CLK_100KHZ 3 /* internal 100 kHz clock */
-#define CLK_10KHZ 4 /* internal 10 kHz clock */
-#define CLK_1KHZ 5 /* internal 1 kHz clock */
-#define CLK_OUTNM1 6 /* output of channel-1 modulo total */
-#define CLK_EXT 7 /* external clock */
-/* Macro to construct clock input configuration register value. */
-#define CLK_CONFIG(chan, src) ((((chan) & 3) << 3) | ((src) & 7))
-
-/*
- * Counter/timer gate input configuration sources.
- */
-#define GAT_VCC 0 /* VCC (i.e. enabled) */
-#define GAT_GND 1 /* GND (i.e. disabled) */
-#define GAT_EXT 2 /* reserved (external gate input) */
-#define GAT_NOUTNM2 3 /* inverted output of channel-2 modulo total */
-/* Macro to construct gate input configuration register value. */
-#define GAT_CONFIG(chan, src) ((((chan) & 3) << 3) | ((src) & 7))
-
-/*
- * Summary of CLK_OUTNM1 and GAT_NOUTNM2 connections for PCI224 and PCI234:
- *
- * Channel's Channel's
- * clock input gate input
- * Channel CLK_OUTNM1 GAT_NOUTNM2
- * ------- ---------- -----------
- * Z2-CT0 Z2-CT2-OUT /Z2-CT1-OUT
- * Z2-CT1 Z2-CT0-OUT /Z2-CT2-OUT
- * Z2-CT2 Z2-CT1-OUT /Z2-CT0-OUT
- */
-
-/*
- * Interrupt enable/status bits
- */
-#define PCI224_INTR_EXT 0x01 /* rising edge on external input */
-#define PCI224_INTR_DAC 0x04 /* DAC (FIFO) interrupt */
-#define PCI224_INTR_Z2CT1 0x20 /* rising edge on Z2-CT1 output */
-
-#define PCI224_INTR_EDGE_BITS (PCI224_INTR_EXT | PCI224_INTR_Z2CT1)
-#define PCI224_INTR_LEVEL_BITS PCI224_INTR_DACFIFO
-
-/*
- * Handy macros.
- */
-
-/* Combine old and new bits. */
-#define COMBINE(old, new, mask) (((old) & ~(mask)) | ((new) & (mask)))
-
-/* Current CPU. XXX should this be hard_smp_processor_id()? */
-#define THISCPU smp_processor_id()
-
-/* State bits for use with atomic bit operations. */
-#define AO_CMD_STARTED 0
-
-/*
- * Range tables.
- */
-
-/*
- * The ranges for PCI224.
- *
- * These are partly hardware-selectable by jumper LK1 and partly
- * software-selectable.
- *
- * All channels share the same hardware range.
- */
-static const struct comedi_lrange range_pci224 = {
- 10, {
- /* jumper LK1 in position 1-2 (factory default) */
- BIP_RANGE(10),
- BIP_RANGE(5),
- BIP_RANGE(2.5),
- BIP_RANGE(1.25),
- UNI_RANGE(10),
- UNI_RANGE(5),
- UNI_RANGE(2.5),
- UNI_RANGE(1.25),
- /* jumper LK1 in position 2-3 */
- RANGE_ext(-1, 1), /* bipolar [-Vext,+Vext] */
- RANGE_ext(0, 1), /* unipolar [0,+Vext] */
- }
-};
-
-static const unsigned short hwrange_pci224[10] = {
- /* jumper LK1 in position 1-2 (factory default) */
- PCI224_DACCON_POLAR_BI | PCI224_DACCON_VREF_10,
- PCI224_DACCON_POLAR_BI | PCI224_DACCON_VREF_5,
- PCI224_DACCON_POLAR_BI | PCI224_DACCON_VREF_2_5,
- PCI224_DACCON_POLAR_BI | PCI224_DACCON_VREF_1_25,
- PCI224_DACCON_POLAR_UNI | PCI224_DACCON_VREF_10,
- PCI224_DACCON_POLAR_UNI | PCI224_DACCON_VREF_5,
- PCI224_DACCON_POLAR_UNI | PCI224_DACCON_VREF_2_5,
- PCI224_DACCON_POLAR_UNI | PCI224_DACCON_VREF_1_25,
- /* jumper LK1 in position 2-3 */
- PCI224_DACCON_POLAR_BI,
- PCI224_DACCON_POLAR_UNI,
-};
-
-/* Used to check all channels set to the same range on PCI224. */
-static const unsigned char range_check_pci224[10] = {
- 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
-};
-
-/*
- * The ranges for PCI234.
- *
- * These are all hardware-selectable by jumper LK1 affecting all channels,
- * and jumpers LK2, LK3, LK4 and LK5 affecting channels 0, 1, 2 and 3
- * individually.
- */
-static const struct comedi_lrange range_pci234 = {
- 4, {
- /* LK1: 1-2 (fact def), LK2/3/4/5: 2-3 (fac def) */
- BIP_RANGE(10),
- /* LK1: 1-2 (fact def), LK2/3/4/5: 1-2 */
- BIP_RANGE(5),
- /* LK1: 2-3, LK2/3/4/5: 2-3 (fac def) */
- RANGE_ext(-2, 2), /* bipolar [-2*Vext,+2*Vext] */
- /* LK1: 2-3, LK2/3/4/5: 1-2 */
- RANGE_ext(-1, 1), /* bipolar [-Vext,+Vext] */
- }
-};
-
-/* N.B. PCI234 ignores the polarity bit, but software uses it. */
-static const unsigned short hwrange_pci234[4] = {
- PCI224_DACCON_POLAR_BI,
- PCI224_DACCON_POLAR_BI,
- PCI224_DACCON_POLAR_BI,
- PCI224_DACCON_POLAR_BI,
-};
-
-/* Used to check all channels use same LK1 setting on PCI234. */
-static const unsigned char range_check_pci234[4] = {
- 0, 0, 1, 1,
-};
-
-/*
- * Board descriptions.
- */
-
-enum pci224_model { pci224_model, pci234_model };
-
-struct pci224_board {
- const char *name;
- unsigned int ao_chans;
- unsigned int ao_bits;
- const struct comedi_lrange *ao_range;
- const unsigned short *ao_hwrange;
- const unsigned char *ao_range_check;
-};
-
-static const struct pci224_board pci224_boards[] = {
- [pci224_model] = {
- .name = "pci224",
- .ao_chans = 16,
- .ao_bits = 12,
- .ao_range = &range_pci224,
- .ao_hwrange = &hwrange_pci224[0],
- .ao_range_check = &range_check_pci224[0],
- },
- [pci234_model] = {
- .name = "pci234",
- .ao_chans = 4,
- .ao_bits = 16,
- .ao_range = &range_pci234,
- .ao_hwrange = &hwrange_pci234[0],
- .ao_range_check = &range_check_pci234[0],
- },
-};
-
-struct pci224_private {
- unsigned long iobase1;
- unsigned long state;
- spinlock_t ao_spinlock; /* spinlock for AO command handling */
- unsigned short *ao_scan_vals;
- unsigned char *ao_scan_order;
- int intr_cpuid;
- short intr_running;
- unsigned short daccon;
- unsigned short ao_enab; /* max 16 channels so 'short' will do */
- unsigned char intsce;
-};
-
-/*
- * Called from the 'insn_write' function to perform a single write.
- */
-static void
-pci224_ao_set_data(struct comedi_device *dev, int chan, int range,
- unsigned int data)
-{
- const struct pci224_board *board = dev->board_ptr;
- struct pci224_private *devpriv = dev->private;
- unsigned short mangled;
-
- /* Enable the channel. */
- outw(1 << chan, dev->iobase + PCI224_DACCEN);
- /* Set range and reset FIFO. */
- devpriv->daccon = COMBINE(devpriv->daccon, board->ao_hwrange[range],
- PCI224_DACCON_POLAR_MASK |
- PCI224_DACCON_VREF_MASK);
- outw(devpriv->daccon | PCI224_DACCON_FIFORESET,
- dev->iobase + PCI224_DACCON);
- /*
- * Mangle the data. The hardware expects:
- * - bipolar: 16-bit 2's complement
- * - unipolar: 16-bit unsigned
- */
- mangled = (unsigned short)data << (16 - board->ao_bits);
- if ((devpriv->daccon & PCI224_DACCON_POLAR_MASK) ==
- PCI224_DACCON_POLAR_BI) {
- mangled ^= 0x8000;
- }
- /* Write mangled data to the FIFO. */
- outw(mangled, dev->iobase + PCI224_DACDATA);
- /* Trigger the conversion. */
- inw(dev->iobase + PCI224_SOFTTRIG);
-}
-
-static int pci224_ao_insn_write(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- unsigned int chan = CR_CHAN(insn->chanspec);
- unsigned int range = CR_RANGE(insn->chanspec);
- unsigned int val = s->readback[chan];
- int i;
-
- for (i = 0; i < insn->n; i++) {
- val = data[i];
- pci224_ao_set_data(dev, chan, range, val);
- }
- s->readback[chan] = val;
-
- return insn->n;
-}
-
-/*
- * Kills a command running on the AO subdevice.
- */
-static void pci224_ao_stop(struct comedi_device *dev,
- struct comedi_subdevice *s)
-{
- struct pci224_private *devpriv = dev->private;
- unsigned long flags;
-
- if (!test_and_clear_bit(AO_CMD_STARTED, &devpriv->state))
- return;
-
- spin_lock_irqsave(&devpriv->ao_spinlock, flags);
- /* Kill the interrupts. */
- devpriv->intsce = 0;
- outb(0, devpriv->iobase1 + PCI224_INT_SCE);
- /*
- * Interrupt routine may or may not be running. We may or may not
- * have been called from the interrupt routine (directly or
- * indirectly via a comedi_events() callback routine). It's highly
- * unlikely that we've been called from some other interrupt routine
- * but who knows what strange things coders get up to!
- *
- * If the interrupt routine is currently running, wait for it to
- * finish, unless we appear to have been called via the interrupt
- * routine.
- */
- while (devpriv->intr_running && devpriv->intr_cpuid != THISCPU) {
- spin_unlock_irqrestore(&devpriv->ao_spinlock, flags);
- spin_lock_irqsave(&devpriv->ao_spinlock, flags);
- }
- spin_unlock_irqrestore(&devpriv->ao_spinlock, flags);
- /* Reconfigure DAC for insn_write usage. */
- outw(0, dev->iobase + PCI224_DACCEN); /* Disable channels. */
- devpriv->daccon =
- COMBINE(devpriv->daccon,
- PCI224_DACCON_TRIG_SW | PCI224_DACCON_FIFOINTR_EMPTY,
- PCI224_DACCON_TRIG_MASK | PCI224_DACCON_FIFOINTR_MASK);
- outw(devpriv->daccon | PCI224_DACCON_FIFORESET,
- dev->iobase + PCI224_DACCON);
-}
-
-/*
- * Handles start of acquisition for the AO subdevice.
- */
-static void pci224_ao_start(struct comedi_device *dev,
- struct comedi_subdevice *s)
-{
- struct pci224_private *devpriv = dev->private;
- struct comedi_cmd *cmd = &s->async->cmd;
- unsigned long flags;
-
- set_bit(AO_CMD_STARTED, &devpriv->state);
-
- /* Enable interrupts. */
- spin_lock_irqsave(&devpriv->ao_spinlock, flags);
- if (cmd->stop_src == TRIG_EXT)
- devpriv->intsce = PCI224_INTR_EXT | PCI224_INTR_DAC;
- else
- devpriv->intsce = PCI224_INTR_DAC;
-
- outb(devpriv->intsce, devpriv->iobase1 + PCI224_INT_SCE);
- spin_unlock_irqrestore(&devpriv->ao_spinlock, flags);
-}
-
-/*
- * Handles interrupts from the DAC FIFO.
- */
-static void pci224_ao_handle_fifo(struct comedi_device *dev,
- struct comedi_subdevice *s)
-{
- struct pci224_private *devpriv = dev->private;
- struct comedi_cmd *cmd = &s->async->cmd;
- unsigned int num_scans = comedi_nscans_left(s, 0);
- unsigned int room;
- unsigned short dacstat;
- unsigned int i, n;
-
- /* Determine how much room is in the FIFO (in samples). */
- dacstat = inw(dev->iobase + PCI224_DACCON);
- switch (dacstat & PCI224_DACCON_FIFOFL_MASK) {
- case PCI224_DACCON_FIFOFL_EMPTY:
- room = PCI224_FIFO_ROOM_EMPTY;
- if (cmd->stop_src == TRIG_COUNT &&
- s->async->scans_done >= cmd->stop_arg) {
- /* FIFO empty at end of counted acquisition. */
- s->async->events |= COMEDI_CB_EOA;
- comedi_handle_events(dev, s);
- return;
- }
- break;
- case PCI224_DACCON_FIFOFL_ONETOHALF:
- room = PCI224_FIFO_ROOM_ONETOHALF;
- break;
- case PCI224_DACCON_FIFOFL_HALFTOFULL:
- room = PCI224_FIFO_ROOM_HALFTOFULL;
- break;
- default:
- room = PCI224_FIFO_ROOM_FULL;
- break;
- }
- if (room >= PCI224_FIFO_ROOM_ONETOHALF) {
- /* FIFO is less than half-full. */
- if (num_scans == 0) {
- /* Nothing left to put in the FIFO. */
- dev_err(dev->class_dev, "AO buffer underrun\n");
- s->async->events |= COMEDI_CB_OVERFLOW;
- }
- }
- /* Determine how many new scans can be put in the FIFO. */
- room /= cmd->chanlist_len;
-
- /* Determine how many scans to process. */
- if (num_scans > room)
- num_scans = room;
-
- /* Process scans. */
- for (n = 0; n < num_scans; n++) {
- comedi_buf_read_samples(s, &devpriv->ao_scan_vals[0],
- cmd->chanlist_len);
- for (i = 0; i < cmd->chanlist_len; i++) {
- outw(devpriv->ao_scan_vals[devpriv->ao_scan_order[i]],
- dev->iobase + PCI224_DACDATA);
- }
- }
- if (cmd->stop_src == TRIG_COUNT &&
- s->async->scans_done >= cmd->stop_arg) {
- /*
- * Change FIFO interrupt trigger level to wait
- * until FIFO is empty.
- */
- devpriv->daccon = COMBINE(devpriv->daccon,
- PCI224_DACCON_FIFOINTR_EMPTY,
- PCI224_DACCON_FIFOINTR_MASK);
- outw(devpriv->daccon, dev->iobase + PCI224_DACCON);
- }
- if ((devpriv->daccon & PCI224_DACCON_TRIG_MASK) ==
- PCI224_DACCON_TRIG_NONE) {
- unsigned short trig;
-
- /*
- * This is the initial DAC FIFO interrupt at the
- * start of the acquisition. The DAC's scan trigger
- * has been set to 'none' up until now.
- *
- * Now that data has been written to the FIFO, the
- * DAC's scan trigger source can be set to the
- * correct value.
- *
- * BUG: The first scan will be triggered immediately
- * if the scan trigger source is at logic level 1.
- */
- if (cmd->scan_begin_src == TRIG_TIMER) {
- trig = PCI224_DACCON_TRIG_Z2CT0;
- } else {
- /* cmd->scan_begin_src == TRIG_EXT */
- if (cmd->scan_begin_arg & CR_INVERT)
- trig = PCI224_DACCON_TRIG_EXTN;
- else
- trig = PCI224_DACCON_TRIG_EXTP;
- }
- devpriv->daccon =
- COMBINE(devpriv->daccon, trig, PCI224_DACCON_TRIG_MASK);
- outw(devpriv->daccon, dev->iobase + PCI224_DACCON);
- }
-
- comedi_handle_events(dev, s);
-}
-
-static int pci224_ao_inttrig_start(struct comedi_device *dev,
- struct comedi_subdevice *s,
- unsigned int trig_num)
-{
- struct comedi_cmd *cmd = &s->async->cmd;
-
- if (trig_num != cmd->start_arg)
- return -EINVAL;
-
- s->async->inttrig = NULL;
- pci224_ao_start(dev, s);
-
- return 1;
-}
-
-static int pci224_ao_check_chanlist(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_cmd *cmd)
-{
- const struct pci224_board *board = dev->board_ptr;
- unsigned int range_check_0;
- unsigned int chan_mask = 0;
- int i;
-
- range_check_0 = board->ao_range_check[CR_RANGE(cmd->chanlist[0])];
- for (i = 0; i < cmd->chanlist_len; i++) {
- unsigned int chan = CR_CHAN(cmd->chanlist[i]);
-
- if (chan_mask & (1 << chan)) {
- dev_dbg(dev->class_dev,
- "%s: entries in chanlist must contain no duplicate channels\n",
- __func__);
- return -EINVAL;
- }
- chan_mask |= 1 << chan;
-
- if (board->ao_range_check[CR_RANGE(cmd->chanlist[i])] !=
- range_check_0) {
- dev_dbg(dev->class_dev,
- "%s: entries in chanlist have incompatible ranges\n",
- __func__);
- return -EINVAL;
- }
- }
-
- return 0;
-}
-
-#define MAX_SCAN_PERIOD 0xFFFFFFFFU
-#define MIN_SCAN_PERIOD 2500
-#define CONVERT_PERIOD 625
-
-/*
- * 'do_cmdtest' function for AO subdevice.
- */
-static int
-pci224_ao_cmdtest(struct comedi_device *dev, struct comedi_subdevice *s,
- struct comedi_cmd *cmd)
-{
- int err = 0;
- unsigned int arg;
-
- /* Step 1 : check if triggers are trivially valid */
-
- err |= comedi_check_trigger_src(&cmd->start_src, TRIG_INT | TRIG_EXT);
- err |= comedi_check_trigger_src(&cmd->scan_begin_src,
- TRIG_EXT | TRIG_TIMER);
- err |= comedi_check_trigger_src(&cmd->convert_src, TRIG_NOW);
- err |= comedi_check_trigger_src(&cmd->scan_end_src, TRIG_COUNT);
- err |= comedi_check_trigger_src(&cmd->stop_src,
- TRIG_COUNT | TRIG_EXT | TRIG_NONE);
-
- if (err)
- return 1;
-
- /* Step 2a : make sure trigger sources are unique */
-
- err |= comedi_check_trigger_is_unique(cmd->start_src);
- err |= comedi_check_trigger_is_unique(cmd->scan_begin_src);
- err |= comedi_check_trigger_is_unique(cmd->stop_src);
-
- /* Step 2b : and mutually compatible */
-
- /*
- * There's only one external trigger signal (which makes these
- * tests easier). Only one thing can use it.
- */
- arg = 0;
- if (cmd->start_src & TRIG_EXT)
- arg++;
- if (cmd->scan_begin_src & TRIG_EXT)
- arg++;
- if (cmd->stop_src & TRIG_EXT)
- arg++;
- if (arg > 1)
- err |= -EINVAL;
-
- if (err)
- return 2;
-
- /* Step 3: check if arguments are trivially valid */
-
- switch (cmd->start_src) {
- case TRIG_INT:
- err |= comedi_check_trigger_arg_is(&cmd->start_arg, 0);
- break;
- case TRIG_EXT:
- /* Force to external trigger 0. */
- if (cmd->start_arg & ~CR_FLAGS_MASK) {
- cmd->start_arg =
- COMBINE(cmd->start_arg, 0, ~CR_FLAGS_MASK);
- err |= -EINVAL;
- }
- /* The only flag allowed is CR_EDGE, which is ignored. */
- if (cmd->start_arg & CR_FLAGS_MASK & ~CR_EDGE) {
- cmd->start_arg = COMBINE(cmd->start_arg, 0,
- CR_FLAGS_MASK & ~CR_EDGE);
- err |= -EINVAL;
- }
- break;
- }
-
- switch (cmd->scan_begin_src) {
- case TRIG_TIMER:
- err |= comedi_check_trigger_arg_max(&cmd->scan_begin_arg,
- MAX_SCAN_PERIOD);
-
- arg = cmd->chanlist_len * CONVERT_PERIOD;
- if (arg < MIN_SCAN_PERIOD)
- arg = MIN_SCAN_PERIOD;
- err |= comedi_check_trigger_arg_min(&cmd->scan_begin_arg, arg);
- break;
- case TRIG_EXT:
- /* Force to external trigger 0. */
- if (cmd->scan_begin_arg & ~CR_FLAGS_MASK) {
- cmd->scan_begin_arg =
- COMBINE(cmd->scan_begin_arg, 0, ~CR_FLAGS_MASK);
- err |= -EINVAL;
- }
- /* Only allow flags CR_EDGE and CR_INVERT. Ignore CR_EDGE. */
- if (cmd->scan_begin_arg & CR_FLAGS_MASK &
- ~(CR_EDGE | CR_INVERT)) {
- cmd->scan_begin_arg =
- COMBINE(cmd->scan_begin_arg, 0,
- CR_FLAGS_MASK & ~(CR_EDGE | CR_INVERT));
- err |= -EINVAL;
- }
- break;
- }
-
- err |= comedi_check_trigger_arg_is(&cmd->convert_arg, 0);
- err |= comedi_check_trigger_arg_is(&cmd->scan_end_arg,
- cmd->chanlist_len);
-
- switch (cmd->stop_src) {
- case TRIG_COUNT:
- err |= comedi_check_trigger_arg_min(&cmd->stop_arg, 1);
- break;
- case TRIG_EXT:
- /* Force to external trigger 0. */
- if (cmd->stop_arg & ~CR_FLAGS_MASK) {
- cmd->stop_arg =
- COMBINE(cmd->stop_arg, 0, ~CR_FLAGS_MASK);
- err |= -EINVAL;
- }
- /* The only flag allowed is CR_EDGE, which is ignored. */
- if (cmd->stop_arg & CR_FLAGS_MASK & ~CR_EDGE) {
- cmd->stop_arg =
- COMBINE(cmd->stop_arg, 0, CR_FLAGS_MASK & ~CR_EDGE);
- }
- break;
- case TRIG_NONE:
- err |= comedi_check_trigger_arg_is(&cmd->stop_arg, 0);
- break;
- }
-
- if (err)
- return 3;
-
- /* Step 4: fix up any arguments. */
-
- if (cmd->scan_begin_src == TRIG_TIMER) {
- arg = cmd->scan_begin_arg;
- /* Use two timers. */
- comedi_8254_cascade_ns_to_timer(dev->pacer, &arg, cmd->flags);
- err |= comedi_check_trigger_arg_is(&cmd->scan_begin_arg, arg);
- }
-
- if (err)
- return 4;
-
- /* Step 5: check channel list if it exists */
- if (cmd->chanlist && cmd->chanlist_len > 0)
- err |= pci224_ao_check_chanlist(dev, s, cmd);
-
- if (err)
- return 5;
-
- return 0;
-}
-
-static void pci224_ao_start_pacer(struct comedi_device *dev,
- struct comedi_subdevice *s)
-{
- struct pci224_private *devpriv = dev->private;
-
- /*
- * The output of timer Z2-0 will be used as the scan trigger
- * source.
- */
- /* Make sure Z2-0 is gated on. */
- outb(GAT_CONFIG(0, GAT_VCC), devpriv->iobase1 + PCI224_ZGAT_SCE);
- /* Cascading with Z2-2. */
- /* Make sure Z2-2 is gated on. */
- outb(GAT_CONFIG(2, GAT_VCC), devpriv->iobase1 + PCI224_ZGAT_SCE);
- /* Z2-2 needs 10 MHz clock. */
- outb(CLK_CONFIG(2, CLK_10MHZ), devpriv->iobase1 + PCI224_ZCLK_SCE);
- /* Z2-0 is clocked from Z2-2's output. */
- outb(CLK_CONFIG(0, CLK_OUTNM1), devpriv->iobase1 + PCI224_ZCLK_SCE);
-
- comedi_8254_pacer_enable(dev->pacer, 2, 0, false);
-}
-
-static int pci224_ao_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
-{
- const struct pci224_board *board = dev->board_ptr;
- struct pci224_private *devpriv = dev->private;
- struct comedi_cmd *cmd = &s->async->cmd;
- int range;
- unsigned int i, j;
- unsigned int ch;
- unsigned int rank;
- unsigned long flags;
-
- /* Cannot handle null/empty chanlist. */
- if (!cmd->chanlist || cmd->chanlist_len == 0)
- return -EINVAL;
-
- /* Determine which channels are enabled and their load order. */
- devpriv->ao_enab = 0;
-
- for (i = 0; i < cmd->chanlist_len; i++) {
- ch = CR_CHAN(cmd->chanlist[i]);
- devpriv->ao_enab |= 1U << ch;
- rank = 0;
- for (j = 0; j < cmd->chanlist_len; j++) {
- if (CR_CHAN(cmd->chanlist[j]) < ch)
- rank++;
- }
- devpriv->ao_scan_order[rank] = i;
- }
-
- /* Set enabled channels. */
- outw(devpriv->ao_enab, dev->iobase + PCI224_DACCEN);
-
- /* Determine range and polarity. All channels the same. */
- range = CR_RANGE(cmd->chanlist[0]);
-
- /*
- * Set DAC range and polarity.
- * Set DAC scan trigger source to 'none'.
- * Set DAC FIFO interrupt trigger level to 'not half full'.
- * Reset DAC FIFO.
- *
- * N.B. DAC FIFO interrupts are currently disabled.
- */
- devpriv->daccon =
- COMBINE(devpriv->daccon,
- board->ao_hwrange[range] | PCI224_DACCON_TRIG_NONE |
- PCI224_DACCON_FIFOINTR_NHALF,
- PCI224_DACCON_POLAR_MASK | PCI224_DACCON_VREF_MASK |
- PCI224_DACCON_TRIG_MASK | PCI224_DACCON_FIFOINTR_MASK);
- outw(devpriv->daccon | PCI224_DACCON_FIFORESET,
- dev->iobase + PCI224_DACCON);
-
- if (cmd->scan_begin_src == TRIG_TIMER) {
- comedi_8254_update_divisors(dev->pacer);
- pci224_ao_start_pacer(dev, s);
- }
-
- spin_lock_irqsave(&devpriv->ao_spinlock, flags);
- if (cmd->start_src == TRIG_INT) {
- s->async->inttrig = pci224_ao_inttrig_start;
- } else { /* TRIG_EXT */
- /* Enable external interrupt trigger to start acquisition. */
- devpriv->intsce |= PCI224_INTR_EXT;
- outb(devpriv->intsce, devpriv->iobase1 + PCI224_INT_SCE);
- }
- spin_unlock_irqrestore(&devpriv->ao_spinlock, flags);
-
- return 0;
-}
-
-/*
- * 'cancel' function for AO subdevice.
- */
-static int pci224_ao_cancel(struct comedi_device *dev,
- struct comedi_subdevice *s)
-{
- pci224_ao_stop(dev, s);
- return 0;
-}
-
-/*
- * 'munge' data for AO command.
- */
-static void
-pci224_ao_munge(struct comedi_device *dev, struct comedi_subdevice *s,
- void *data, unsigned int num_bytes, unsigned int chan_index)
-{
- const struct pci224_board *board = dev->board_ptr;
- struct comedi_cmd *cmd = &s->async->cmd;
- unsigned short *array = data;
- unsigned int length = num_bytes / sizeof(*array);
- unsigned int offset;
- unsigned int shift;
- unsigned int i;
-
- /* The hardware expects 16-bit numbers. */
- shift = 16 - board->ao_bits;
- /* Channels will be all bipolar or all unipolar. */
- if ((board->ao_hwrange[CR_RANGE(cmd->chanlist[0])] &
- PCI224_DACCON_POLAR_MASK) == PCI224_DACCON_POLAR_UNI) {
- /* Unipolar */
- offset = 0;
- } else {
- /* Bipolar */
- offset = 32768;
- }
- /* Munge the data. */
- for (i = 0; i < length; i++)
- array[i] = (array[i] << shift) - offset;
-}
-
-/*
- * Interrupt handler.
- */
-static irqreturn_t pci224_interrupt(int irq, void *d)
-{
- struct comedi_device *dev = d;
- struct pci224_private *devpriv = dev->private;
- struct comedi_subdevice *s = dev->write_subdev;
- struct comedi_cmd *cmd;
- unsigned char intstat, valid_intstat;
- unsigned char curenab;
- int retval = 0;
- unsigned long flags;
-
- intstat = inb(devpriv->iobase1 + PCI224_INT_SCE) & 0x3F;
- if (intstat) {
- retval = 1;
- spin_lock_irqsave(&devpriv->ao_spinlock, flags);
- valid_intstat = devpriv->intsce & intstat;
- /* Temporarily disable interrupt sources. */
- curenab = devpriv->intsce & ~intstat;
- outb(curenab, devpriv->iobase1 + PCI224_INT_SCE);
- devpriv->intr_running = 1;
- devpriv->intr_cpuid = THISCPU;
- spin_unlock_irqrestore(&devpriv->ao_spinlock, flags);
- if (valid_intstat) {
- cmd = &s->async->cmd;
- if (valid_intstat & PCI224_INTR_EXT) {
- devpriv->intsce &= ~PCI224_INTR_EXT;
- if (cmd->start_src == TRIG_EXT)
- pci224_ao_start(dev, s);
- else if (cmd->stop_src == TRIG_EXT)
- pci224_ao_stop(dev, s);
- }
- if (valid_intstat & PCI224_INTR_DAC)
- pci224_ao_handle_fifo(dev, s);
- }
- /* Reenable interrupt sources. */
- spin_lock_irqsave(&devpriv->ao_spinlock, flags);
- if (curenab != devpriv->intsce) {
- outb(devpriv->intsce,
- devpriv->iobase1 + PCI224_INT_SCE);
- }
- devpriv->intr_running = 0;
- spin_unlock_irqrestore(&devpriv->ao_spinlock, flags);
- }
- return IRQ_RETVAL(retval);
-}
-
-static int
-pci224_auto_attach(struct comedi_device *dev, unsigned long context_model)
-{
- struct pci_dev *pci_dev = comedi_to_pci_dev(dev);
- const struct pci224_board *board = NULL;
- struct pci224_private *devpriv;
- struct comedi_subdevice *s;
- unsigned int irq;
- int ret;
-
- if (context_model < ARRAY_SIZE(pci224_boards))
- board = &pci224_boards[context_model];
- if (!board || !board->name) {
- dev_err(dev->class_dev,
- "amplc_pci224: BUG! cannot determine board type!\n");
- return -EINVAL;
- }
- dev->board_ptr = board;
- dev->board_name = board->name;
-
- dev_info(dev->class_dev, "amplc_pci224: attach pci %s - %s\n",
- pci_name(pci_dev), dev->board_name);
-
- devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
- if (!devpriv)
- return -ENOMEM;
-
- ret = comedi_pci_enable(dev);
- if (ret)
- return ret;
-
- spin_lock_init(&devpriv->ao_spinlock);
-
- devpriv->iobase1 = pci_resource_start(pci_dev, 2);
- dev->iobase = pci_resource_start(pci_dev, 3);
- irq = pci_dev->irq;
-
- /* Allocate buffer to hold values for AO channel scan. */
- devpriv->ao_scan_vals = kmalloc(sizeof(devpriv->ao_scan_vals[0]) *
- board->ao_chans, GFP_KERNEL);
- if (!devpriv->ao_scan_vals)
- return -ENOMEM;
-
- /* Allocate buffer to hold AO channel scan order. */
- devpriv->ao_scan_order = kmalloc(sizeof(devpriv->ao_scan_order[0]) *
- board->ao_chans, GFP_KERNEL);
- if (!devpriv->ao_scan_order)
- return -ENOMEM;
-
- /* Disable interrupt sources. */
- devpriv->intsce = 0;
- outb(0, devpriv->iobase1 + PCI224_INT_SCE);
-
- /* Initialize the DAC hardware. */
- outw(PCI224_DACCON_GLOBALRESET, dev->iobase + PCI224_DACCON);
- outw(0, dev->iobase + PCI224_DACCEN);
- outw(0, dev->iobase + PCI224_FIFOSIZ);
- devpriv->daccon = PCI224_DACCON_TRIG_SW | PCI224_DACCON_POLAR_BI |
- PCI224_DACCON_FIFOENAB | PCI224_DACCON_FIFOINTR_EMPTY;
- outw(devpriv->daccon | PCI224_DACCON_FIFORESET,
- dev->iobase + PCI224_DACCON);
-
- dev->pacer = comedi_8254_init(devpriv->iobase1 + PCI224_Z2_BASE,
- I8254_OSC_BASE_10MHZ, I8254_IO8, 0);
- if (!dev->pacer)
- return -ENOMEM;
-
- ret = comedi_alloc_subdevices(dev, 1);
- if (ret)
- return ret;
-
- s = &dev->subdevices[0];
- /* Analog output subdevice. */
- s->type = COMEDI_SUBD_AO;
- s->subdev_flags = SDF_WRITABLE | SDF_GROUND | SDF_CMD_WRITE;
- s->n_chan = board->ao_chans;
- s->maxdata = (1 << board->ao_bits) - 1;
- s->range_table = board->ao_range;
- s->insn_write = pci224_ao_insn_write;
- s->len_chanlist = s->n_chan;
- dev->write_subdev = s;
- s->do_cmd = pci224_ao_cmd;
- s->do_cmdtest = pci224_ao_cmdtest;
- s->cancel = pci224_ao_cancel;
- s->munge = pci224_ao_munge;
-
- ret = comedi_alloc_subdev_readback(s);
- if (ret)
- return ret;
-
- if (irq) {
- ret = request_irq(irq, pci224_interrupt, IRQF_SHARED,
- dev->board_name, dev);
- if (ret < 0) {
- dev_err(dev->class_dev,
- "error! unable to allocate irq %u\n", irq);
- return ret;
- }
- dev->irq = irq;
- }
-
- return 0;
-}
-
-static void pci224_detach(struct comedi_device *dev)
-{
- struct pci224_private *devpriv = dev->private;
-
- comedi_pci_detach(dev);
- if (devpriv) {
- kfree(devpriv->ao_scan_vals);
- kfree(devpriv->ao_scan_order);
- }
-}
-
-static struct comedi_driver amplc_pci224_driver = {
- .driver_name = "amplc_pci224",
- .module = THIS_MODULE,
- .detach = pci224_detach,
- .auto_attach = pci224_auto_attach,
- .board_name = &pci224_boards[0].name,
- .offset = sizeof(struct pci224_board),
- .num_names = ARRAY_SIZE(pci224_boards),
-};
-
-static int amplc_pci224_pci_probe(struct pci_dev *dev,
- const struct pci_device_id *id)
-{
- return comedi_pci_auto_config(dev, &amplc_pci224_driver,
- id->driver_data);
-}
-
-static const struct pci_device_id amplc_pci224_pci_table[] = {
- { PCI_VDEVICE(AMPLICON, 0x0007), pci224_model },
- { PCI_VDEVICE(AMPLICON, 0x0008), pci234_model },
- { 0 }
-};
-MODULE_DEVICE_TABLE(pci, amplc_pci224_pci_table);
-
-static struct pci_driver amplc_pci224_pci_driver = {
- .name = "amplc_pci224",
- .id_table = amplc_pci224_pci_table,
- .probe = amplc_pci224_pci_probe,
- .remove = comedi_pci_auto_unconfig,
-};
-module_comedi_pci_driver(amplc_pci224_driver, amplc_pci224_pci_driver);
-
-MODULE_AUTHOR("Comedi http://www.comedi.org");
-MODULE_DESCRIPTION("Comedi driver for Amplicon PCI224 and PCI234 AO boards");
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/amplc_pci230.c b/drivers/staging/comedi/drivers/amplc_pci230.c
deleted file mode 100644
index 5c5c4e2ec3d5..000000000000
--- a/drivers/staging/comedi/drivers/amplc_pci230.c
+++ /dev/null
@@ -1,2568 +0,0 @@
-/*
- * comedi/drivers/amplc_pci230.c
- * Driver for Amplicon PCI230 and PCI260 Multifunction I/O boards.
- *
- * Copyright (C) 2001 Allan Willcox <allanwillcox@ozemail.com.au>
- *
- * COMEDI - Linux Control and Measurement Device Interface
- * Copyright (C) 2000 David A. Schleef <ds@schleef.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-/*
- * Driver: amplc_pci230
- * Description: Amplicon PCI230, PCI260 Multifunction I/O boards
- * Author: Allan Willcox <allanwillcox@ozemail.com.au>,
- * Steve D Sharples <steve.sharples@nottingham.ac.uk>,
- * Ian Abbott <abbotti@mev.co.uk>
- * Updated: Mon, 01 Sep 2014 10:09:16 +0000
- * Devices: [Amplicon] PCI230 (amplc_pci230), PCI230+, PCI260, PCI260+
- * Status: works
- *
- * Configuration options:
- * none
- *
- * Manual configuration of PCI cards is not supported; they are configured
- * automatically.
- *
- * The PCI230+ and PCI260+ have the same PCI device IDs as the PCI230 and
- * PCI260, but can be distinguished by the size of the PCI regions. A
- * card will be configured as a "+" model if detected as such.
- *
- * Subdevices:
- *
- * PCI230(+) PCI260(+)
- * --------- ---------
- * Subdevices 3 1
- * 0 AI AI
- * 1 AO
- * 2 DIO
- *
- * AI Subdevice:
- *
- * The AI subdevice has 16 single-ended channels or 8 differential
- * channels.
- *
- * The PCI230 and PCI260 cards have 12-bit resolution. The PCI230+ and
- * PCI260+ cards have 16-bit resolution.
- *
- * For differential mode, use inputs 2N and 2N+1 for channel N (e.g. use
- * inputs 14 and 15 for channel 7). If the card is physically a PCI230
- * or PCI260 then it actually uses a "pseudo-differential" mode where the
- * inputs are sampled a few microseconds apart. The PCI230+ and PCI260+
- * use true differential sampling. Another difference is that if the
- * card is physically a PCI230 or PCI260, the inverting input is 2N,
- * whereas for a PCI230+ or PCI260+ the inverting input is 2N+1. So if a
- * PCI230 is physically replaced by a PCI230+ (or a PCI260 with a
- * PCI260+) and differential mode is used, the differential inputs need
- * to be physically swapped on the connector.
- *
- * The following input ranges are supported:
- *
- * 0 => [-10, +10] V
- * 1 => [-5, +5] V
- * 2 => [-2.5, +2.5] V
- * 3 => [-1.25, +1.25] V
- * 4 => [0, 10] V
- * 5 => [0, 5] V
- * 6 => [0, 2.5] V
- *
- * AI Commands:
- *
- * +=========+==============+===========+============+==========+
- * |start_src|scan_begin_src|convert_src|scan_end_src| stop_src |
- * +=========+==============+===========+============+==========+
- * |TRIG_NOW | TRIG_FOLLOW |TRIG_TIMER | TRIG_COUNT |TRIG_NONE |
- * |TRIG_INT | |TRIG_EXT(3)| |TRIG_COUNT|
- * | | |TRIG_INT | | |
- * | |--------------|-----------| | |
- * | | TRIG_TIMER(1)|TRIG_TIMER | | |
- * | | TRIG_EXT(2) | | | |
- * | | TRIG_INT | | | |
- * +---------+--------------+-----------+------------+----------+
- *
- * Note 1: If AI command and AO command are used simultaneously, only
- * one may have scan_begin_src == TRIG_TIMER.
- *
- * Note 2: For PCI230 and PCI230+, scan_begin_src == TRIG_EXT uses
- * DIO channel 16 (pin 49) which will need to be configured as
- * a digital input. For PCI260+, the EXTTRIG/EXTCONVCLK input
- * (pin 17) is used instead. For PCI230, scan_begin_src ==
- * TRIG_EXT is not supported. The trigger is a rising edge
- * on the input.
- *
- * Note 3: For convert_src == TRIG_EXT, the EXTTRIG/EXTCONVCLK input
- * (pin 25 on PCI230(+), pin 17 on PCI260(+)) is used. The
- * convert_arg value is interpreted as follows:
- *
- * convert_arg == (CR_EDGE | 0) => rising edge
- * convert_arg == (CR_EDGE | CR_INVERT | 0) => falling edge
- * convert_arg == 0 => falling edge (backwards compatibility)
- * convert_arg == 1 => rising edge (backwards compatibility)
- *
- * All entries in the channel list must use the same analogue reference.
- * If the analogue reference is not AREF_DIFF (not differential) each
- * pair of channel numbers (0 and 1, 2 and 3, etc.) must use the same
- * input range. The input ranges used in the sequence must be all
- * bipolar (ranges 0 to 3) or all unipolar (ranges 4 to 6). The channel
- * sequence must consist of 1 or more identical subsequences. Within the
- * subsequence, channels must be in ascending order with no repeated
- * channels. For example, the following sequences are valid: 0 1 2 3
- * (single valid subsequence), 0 2 3 5 0 2 3 5 (repeated valid
- * subsequence), 1 1 1 1 (repeated valid subsequence). The following
- * sequences are invalid: 0 3 2 1 (invalid subsequence), 0 2 3 5 0 2 3
- * (incompletely repeated subsequence). Some versions of the PCI230+ and
- * PCI260+ have a bug that requires a subsequence longer than one entry
- * long to include channel 0.
- *
- * AO Subdevice:
- *
- * The AO subdevice has 2 channels with 12-bit resolution.
- * The following output ranges are supported:
- * 0 => [0, 10] V
- * 1 => [-10, +10] V
- *
- * AO Commands:
- *
- * +=========+==============+===========+============+==========+
- * |start_src|scan_begin_src|convert_src|scan_end_src| stop_src |
- * +=========+==============+===========+============+==========+
- * |TRIG_INT | TRIG_TIMER(1)| TRIG_NOW | TRIG_COUNT |TRIG_NONE |
- * | | TRIG_EXT(2) | | |TRIG_COUNT|
- * | | TRIG_INT | | | |
- * +---------+--------------+-----------+------------+----------+
- *
- * Note 1: If AI command and AO command are used simultaneously, only
- * one may have scan_begin_src == TRIG_TIMER.
- *
- * Note 2: scan_begin_src == TRIG_EXT is only supported if the card is
- * configured as a PCI230+ and is only supported on later
- * versions of the card. As a card configured as a PCI230+ is
- * not guaranteed to support external triggering, please consider
- * this support to be a bonus. It uses the EXTTRIG/ EXTCONVCLK
- * input (PCI230+ pin 25). Triggering will be on the rising edge
- * unless the CR_INVERT flag is set in scan_begin_arg.
- *
- * The channels in the channel sequence must be in ascending order with
- * no repeats. All entries in the channel sequence must use the same
- * output range.
- *
- * DIO Subdevice:
- *
- * The DIO subdevice is a 8255 chip providing 24 DIO channels. The DIO
- * channels are configurable as inputs or outputs in four groups:
- *
- * Port A - channels 0 to 7
- * Port B - channels 8 to 15
- * Port CL - channels 16 to 19
- * Port CH - channels 20 to 23
- *
- * Only mode 0 of the 8255 chip is supported.
- *
- * Bit 0 of port C (DIO channel 16) is also used as an external scan
- * trigger input for AI commands on PCI230 and PCI230+, so would need to
- * be configured as an input to use it for that purpose.
- */
-
-/*
- * Extra triggered scan functionality, interrupt bug-fix added by Steve
- * Sharples. Support for PCI230+/260+, more triggered scan functionality,
- * and workarounds for (or detection of) various hardware problems added
- * by Ian Abbott.
- */
-
-#include <linux/module.h>
-#include <linux/delay.h>
-#include <linux/interrupt.h>
-
-#include "../comedi_pci.h"
-
-#include "comedi_8254.h"
-#include "8255.h"
-
-/*
- * PCI230 PCI configuration register information
- */
-#define PCI_DEVICE_ID_PCI230 0x0000
-#define PCI_DEVICE_ID_PCI260 0x0006
-
-/*
- * PCI230 i/o space 1 registers.
- */
-#define PCI230_PPI_X_BASE 0x00 /* User PPI (82C55) base */
-#define PCI230_PPI_X_A 0x00 /* User PPI (82C55) port A */
-#define PCI230_PPI_X_B 0x01 /* User PPI (82C55) port B */
-#define PCI230_PPI_X_C 0x02 /* User PPI (82C55) port C */
-#define PCI230_PPI_X_CMD 0x03 /* User PPI (82C55) control word */
-#define PCI230_Z2_CT_BASE 0x14 /* 82C54 counter/timer base */
-#define PCI230_ZCLK_SCE 0x1A /* Group Z Clock Configuration */
-#define PCI230_ZGAT_SCE 0x1D /* Group Z Gate Configuration */
-#define PCI230_INT_SCE 0x1E /* Interrupt source mask (w) */
-#define PCI230_INT_STAT 0x1E /* Interrupt status (r) */
-
-/*
- * PCI230 i/o space 2 registers.
- */
-#define PCI230_DACCON 0x00 /* DAC control */
-#define PCI230_DACOUT1 0x02 /* DAC channel 0 (w) */
-#define PCI230_DACOUT2 0x04 /* DAC channel 1 (w) (not FIFO mode) */
-#define PCI230_ADCDATA 0x08 /* ADC data (r) */
-#define PCI230_ADCSWTRIG 0x08 /* ADC software trigger (w) */
-#define PCI230_ADCCON 0x0A /* ADC control */
-#define PCI230_ADCEN 0x0C /* ADC channel enable bits */
-#define PCI230_ADCG 0x0E /* ADC gain control bits */
-/* PCI230+ i/o space 2 additional registers. */
-#define PCI230P_ADCTRIG 0x10 /* ADC start acquisition trigger */
-#define PCI230P_ADCTH 0x12 /* ADC analog trigger threshold */
-#define PCI230P_ADCFFTH 0x14 /* ADC FIFO interrupt threshold */
-#define PCI230P_ADCFFLEV 0x16 /* ADC FIFO level (r) */
-#define PCI230P_ADCPTSC 0x18 /* ADC pre-trigger sample count (r) */
-#define PCI230P_ADCHYST 0x1A /* ADC analog trigger hysteresys */
-#define PCI230P_EXTFUNC 0x1C /* Extended functions */
-#define PCI230P_HWVER 0x1E /* Hardware version (r) */
-/* PCI230+ hardware version 2 onwards. */
-#define PCI230P2_DACDATA 0x02 /* DAC data (FIFO mode) (w) */
-#define PCI230P2_DACSWTRIG 0x02 /* DAC soft trigger (FIFO mode) (r) */
-#define PCI230P2_DACEN 0x06 /* DAC channel enable (FIFO mode) */
-
-/*
- * DACCON read-write values.
- */
-#define PCI230_DAC_OR_UNI (0 << 0) /* Output range unipolar */
-#define PCI230_DAC_OR_BIP (1 << 0) /* Output range bipolar */
-#define PCI230_DAC_OR_MASK (1 << 0)
-/*
- * The following applies only if DAC FIFO support is enabled in the EXTFUNC
- * register (and only for PCI230+ hardware version 2 onwards).
- */
-#define PCI230P2_DAC_FIFO_EN (1 << 8) /* FIFO enable */
-/*
- * The following apply only if the DAC FIFO is enabled (and only for PCI230+
- * hardware version 2 onwards).
- */
-#define PCI230P2_DAC_TRIG_NONE (0 << 2) /* No trigger */
-#define PCI230P2_DAC_TRIG_SW (1 << 2) /* Software trigger trigger */
-#define PCI230P2_DAC_TRIG_EXTP (2 << 2) /* EXTTRIG +ve edge trigger */
-#define PCI230P2_DAC_TRIG_EXTN (3 << 2) /* EXTTRIG -ve edge trigger */
-#define PCI230P2_DAC_TRIG_Z2CT0 (4 << 2) /* CT0-OUT +ve edge trigger */
-#define PCI230P2_DAC_TRIG_Z2CT1 (5 << 2) /* CT1-OUT +ve edge trigger */
-#define PCI230P2_DAC_TRIG_Z2CT2 (6 << 2) /* CT2-OUT +ve edge trigger */
-#define PCI230P2_DAC_TRIG_MASK (7 << 2)
-#define PCI230P2_DAC_FIFO_WRAP (1 << 7) /* FIFO wraparound mode */
-#define PCI230P2_DAC_INT_FIFO_EMPTY (0 << 9) /* FIFO interrupt empty */
-#define PCI230P2_DAC_INT_FIFO_NEMPTY (1 << 9)
-#define PCI230P2_DAC_INT_FIFO_NHALF (2 << 9) /* FIFO intr not half full */
-#define PCI230P2_DAC_INT_FIFO_HALF (3 << 9)
-#define PCI230P2_DAC_INT_FIFO_NFULL (4 << 9) /* FIFO interrupt not full */
-#define PCI230P2_DAC_INT_FIFO_FULL (5 << 9)
-#define PCI230P2_DAC_INT_FIFO_MASK (7 << 9)
-
-/*
- * DACCON read-only values.
- */
-#define PCI230_DAC_BUSY (1 << 1) /* DAC busy. */
-/*
- * The following apply only if the DAC FIFO is enabled (and only for PCI230+
- * hardware version 2 onwards).
- */
-#define PCI230P2_DAC_FIFO_UNDERRUN_LATCHED (1 << 5) /* Underrun error */
-#define PCI230P2_DAC_FIFO_EMPTY (1 << 13) /* FIFO empty */
-#define PCI230P2_DAC_FIFO_FULL (1 << 14) /* FIFO full */
-#define PCI230P2_DAC_FIFO_HALF (1 << 15) /* FIFO half full */
-
-/*
- * DACCON write-only, transient values.
- */
-/*
- * The following apply only if the DAC FIFO is enabled (and only for PCI230+
- * hardware version 2 onwards).
- */
-#define PCI230P2_DAC_FIFO_UNDERRUN_CLEAR (1 << 5) /* Clear underrun */
-#define PCI230P2_DAC_FIFO_RESET (1 << 12) /* FIFO reset */
-
-/*
- * PCI230+ hardware version 2 DAC FIFO levels.
- */
-#define PCI230P2_DAC_FIFOLEVEL_HALF 512
-#define PCI230P2_DAC_FIFOLEVEL_FULL 1024
-/* Free space in DAC FIFO. */
-#define PCI230P2_DAC_FIFOROOM_EMPTY PCI230P2_DAC_FIFOLEVEL_FULL
-#define PCI230P2_DAC_FIFOROOM_ONETOHALF \
- (PCI230P2_DAC_FIFOLEVEL_FULL - PCI230P2_DAC_FIFOLEVEL_HALF)
-#define PCI230P2_DAC_FIFOROOM_HALFTOFULL 1
-#define PCI230P2_DAC_FIFOROOM_FULL 0
-
-/*
- * ADCCON read/write values.
- */
-#define PCI230_ADC_TRIG_NONE (0 << 0) /* No trigger */
-#define PCI230_ADC_TRIG_SW (1 << 0) /* Software trigger trigger */
-#define PCI230_ADC_TRIG_EXTP (2 << 0) /* EXTTRIG +ve edge trigger */
-#define PCI230_ADC_TRIG_EXTN (3 << 0) /* EXTTRIG -ve edge trigger */
-#define PCI230_ADC_TRIG_Z2CT0 (4 << 0) /* CT0-OUT +ve edge trigger */
-#define PCI230_ADC_TRIG_Z2CT1 (5 << 0) /* CT1-OUT +ve edge trigger */
-#define PCI230_ADC_TRIG_Z2CT2 (6 << 0) /* CT2-OUT +ve edge trigger */
-#define PCI230_ADC_TRIG_MASK (7 << 0)
-#define PCI230_ADC_IR_UNI (0 << 3) /* Input range unipolar */
-#define PCI230_ADC_IR_BIP (1 << 3) /* Input range bipolar */
-#define PCI230_ADC_IR_MASK (1 << 3)
-#define PCI230_ADC_IM_SE (0 << 4) /* Input mode single ended */
-#define PCI230_ADC_IM_DIF (1 << 4) /* Input mode differential */
-#define PCI230_ADC_IM_MASK (1 << 4)
-#define PCI230_ADC_FIFO_EN (1 << 8) /* FIFO enable */
-#define PCI230_ADC_INT_FIFO_EMPTY (0 << 9)
-#define PCI230_ADC_INT_FIFO_NEMPTY (1 << 9) /* FIFO interrupt not empty */
-#define PCI230_ADC_INT_FIFO_NHALF (2 << 9)
-#define PCI230_ADC_INT_FIFO_HALF (3 << 9) /* FIFO interrupt half full */
-#define PCI230_ADC_INT_FIFO_NFULL (4 << 9)
-#define PCI230_ADC_INT_FIFO_FULL (5 << 9) /* FIFO interrupt full */
-#define PCI230P_ADC_INT_FIFO_THRESH (7 << 9) /* FIFO interrupt threshold */
-#define PCI230_ADC_INT_FIFO_MASK (7 << 9)
-
-/*
- * ADCCON write-only, transient values.
- */
-#define PCI230_ADC_FIFO_RESET (1 << 12) /* FIFO reset */
-#define PCI230_ADC_GLOB_RESET (1 << 13) /* Global reset */
-
-/*
- * ADCCON read-only values.
- */
-#define PCI230_ADC_BUSY (1 << 15) /* ADC busy */
-#define PCI230_ADC_FIFO_EMPTY (1 << 12) /* FIFO empty */
-#define PCI230_ADC_FIFO_FULL (1 << 13) /* FIFO full */
-#define PCI230_ADC_FIFO_HALF (1 << 14) /* FIFO half full */
-#define PCI230_ADC_FIFO_FULL_LATCHED (1 << 5) /* FIFO overrun occurred */
-
-/*
- * PCI230 ADC FIFO levels.
- */
-#define PCI230_ADC_FIFOLEVEL_HALFFULL 2049 /* Value for FIFO half full */
-#define PCI230_ADC_FIFOLEVEL_FULL 4096 /* FIFO size */
-
-/*
- * PCI230+ EXTFUNC values.
- */
-/* Route EXTTRIG pin to external gate inputs. */
-#define PCI230P_EXTFUNC_GAT_EXTTRIG (1 << 0)
-/* PCI230+ hardware version 2 values. */
-/* Allow DAC FIFO to be enabled. */
-#define PCI230P2_EXTFUNC_DACFIFO (1 << 1)
-
-/*
- * Counter/timer clock input configuration sources.
- */
-#define CLK_CLK 0 /* reserved (channel-specific clock) */
-#define CLK_10MHZ 1 /* internal 10 MHz clock */
-#define CLK_1MHZ 2 /* internal 1 MHz clock */
-#define CLK_100KHZ 3 /* internal 100 kHz clock */
-#define CLK_10KHZ 4 /* internal 10 kHz clock */
-#define CLK_1KHZ 5 /* internal 1 kHz clock */
-#define CLK_OUTNM1 6 /* output of channel-1 modulo total */
-#define CLK_EXT 7 /* external clock */
-/* Macro to construct clock input configuration register value. */
-#define CLK_CONFIG(chan, src) ((((chan) & 3) << 3) | ((src) & 7))
-
-/*
- * Counter/timer gate input configuration sources.
- */
-#define GAT_VCC 0 /* VCC (i.e. enabled) */
-#define GAT_GND 1 /* GND (i.e. disabled) */
-#define GAT_EXT 2 /* external gate input (PPCn on PCI230) */
-#define GAT_NOUTNM2 3 /* inverted output of channel-2 modulo total */
-/* Macro to construct gate input configuration register value. */
-#define GAT_CONFIG(chan, src) ((((chan) & 3) << 3) | ((src) & 7))
-
-/*
- * Summary of CLK_OUTNM1 and GAT_NOUTNM2 connections for PCI230 and PCI260:
- *
- * Channel's Channel's
- * clock input gate input
- * Channel CLK_OUTNM1 GAT_NOUTNM2
- * ------- ---------- -----------
- * Z2-CT0 Z2-CT2-OUT /Z2-CT1-OUT
- * Z2-CT1 Z2-CT0-OUT /Z2-CT2-OUT
- * Z2-CT2 Z2-CT1-OUT /Z2-CT0-OUT
- */
-
-/*
- * Interrupt enables/status register values.
- */
-#define PCI230_INT_DISABLE 0
-#define PCI230_INT_PPI_C0 (1 << 0)
-#define PCI230_INT_PPI_C3 (1 << 1)
-#define PCI230_INT_ADC (1 << 2)
-#define PCI230_INT_ZCLK_CT1 (1 << 5)
-/* For PCI230+ hardware version 2 when DAC FIFO enabled. */
-#define PCI230P2_INT_DAC (1 << 4)
-
-/*
- * (Potentially) shared resources and their owners
- */
-enum {
- RES_Z2CT0 = (1U << 0), /* Z2-CT0 */
- RES_Z2CT1 = (1U << 1), /* Z2-CT1 */
- RES_Z2CT2 = (1U << 2) /* Z2-CT2 */
-};
-
-enum {
- OWNER_AICMD, /* Owned by AI command */
- OWNER_AOCMD, /* Owned by AO command */
- NUM_OWNERS /* Number of owners */
-};
-
-/*
- * Handy macros.
- */
-
-/* Combine old and new bits. */
-#define COMBINE(old, new, mask) (((old) & ~(mask)) | ((new) & (mask)))
-
-/* Current CPU. XXX should this be hard_smp_processor_id()? */
-#define THISCPU smp_processor_id()
-
-/*
- * Board descriptions for the two boards supported.
- */
-
-struct pci230_board {
- const char *name;
- unsigned short id;
- unsigned char ai_bits;
- unsigned char ao_bits;
- unsigned char min_hwver; /* Minimum hardware version supported. */
- bool have_dio:1;
-};
-
-static const struct pci230_board pci230_boards[] = {
- {
- .name = "pci230+",
- .id = PCI_DEVICE_ID_PCI230,
- .ai_bits = 16,
- .ao_bits = 12,
- .have_dio = true,
- .min_hwver = 1,
- },
- {
- .name = "pci260+",
- .id = PCI_DEVICE_ID_PCI260,
- .ai_bits = 16,
- .min_hwver = 1,
- },
- {
- .name = "pci230",
- .id = PCI_DEVICE_ID_PCI230,
- .ai_bits = 12,
- .ao_bits = 12,
- .have_dio = true,
- },
- {
- .name = "pci260",
- .id = PCI_DEVICE_ID_PCI260,
- .ai_bits = 12,
- },
-};
-
-struct pci230_private {
- spinlock_t isr_spinlock; /* Interrupt spin lock */
- spinlock_t res_spinlock; /* Shared resources spin lock */
- spinlock_t ai_stop_spinlock; /* Spin lock for stopping AI command */
- spinlock_t ao_stop_spinlock; /* Spin lock for stopping AO command */
- unsigned long daqio; /* PCI230's DAQ I/O space */
- int intr_cpuid; /* ID of CPU running ISR */
- unsigned short hwver; /* Hardware version (for '+' models) */
- unsigned short adccon; /* ADCCON register value */
- unsigned short daccon; /* DACCON register value */
- unsigned short adcfifothresh; /* ADC FIFO threshold (PCI230+/260+) */
- unsigned short adcg; /* ADCG register value */
- unsigned char ier; /* Interrupt enable bits */
- unsigned char res_owned[NUM_OWNERS]; /* Owned resources */
- bool intr_running:1; /* Flag set in interrupt routine */
- bool ai_bipolar:1; /* Flag AI range is bipolar */
- bool ao_bipolar:1; /* Flag AO range is bipolar */
- bool ai_cmd_started:1; /* Flag AI command started */
- bool ao_cmd_started:1; /* Flag AO command started */
-};
-
-/* PCI230 clock source periods in ns */
-static const unsigned int pci230_timebase[8] = {
- [CLK_10MHZ] = I8254_OSC_BASE_10MHZ,
- [CLK_1MHZ] = I8254_OSC_BASE_1MHZ,
- [CLK_100KHZ] = I8254_OSC_BASE_100KHZ,
- [CLK_10KHZ] = I8254_OSC_BASE_10KHZ,
- [CLK_1KHZ] = I8254_OSC_BASE_1KHZ,
-};
-
-/* PCI230 analogue input range table */
-static const struct comedi_lrange pci230_ai_range = {
- 7, {
- BIP_RANGE(10),
- BIP_RANGE(5),
- BIP_RANGE(2.5),
- BIP_RANGE(1.25),
- UNI_RANGE(10),
- UNI_RANGE(5),
- UNI_RANGE(2.5)
- }
-};
-
-/* PCI230 analogue gain bits for each input range. */
-static const unsigned char pci230_ai_gain[7] = { 0, 1, 2, 3, 1, 2, 3 };
-
-/* PCI230 analogue output range table */
-static const struct comedi_lrange pci230_ao_range = {
- 2, {
- UNI_RANGE(10),
- BIP_RANGE(10)
- }
-};
-
-static unsigned short pci230_ai_read(struct comedi_device *dev)
-{
- const struct pci230_board *board = dev->board_ptr;
- struct pci230_private *devpriv = dev->private;
- unsigned short data;
-
- /* Read sample. */
- data = inw(devpriv->daqio + PCI230_ADCDATA);
- /*
- * PCI230 is 12 bit - stored in upper bits of 16 bit register
- * (lower four bits reserved for expansion). PCI230+ is 16 bit AI.
- *
- * If a bipolar range was specified, mangle it
- * (twos complement->straight binary).
- */
- if (devpriv->ai_bipolar)
- data ^= 0x8000;
- data >>= (16 - board->ai_bits);
- return data;
-}
-
-static unsigned short pci230_ao_mangle_datum(struct comedi_device *dev,
- unsigned short datum)
-{
- const struct pci230_board *board = dev->board_ptr;
- struct pci230_private *devpriv = dev->private;
-
- /*
- * PCI230 is 12 bit - stored in upper bits of 16 bit register (lower
- * four bits reserved for expansion). PCI230+ is also 12 bit AO.
- */
- datum <<= (16 - board->ao_bits);
- /*
- * If a bipolar range was specified, mangle it
- * (straight binary->twos complement).
- */
- if (devpriv->ao_bipolar)
- datum ^= 0x8000;
- return datum;
-}
-
-static void pci230_ao_write_nofifo(struct comedi_device *dev,
- unsigned short datum, unsigned int chan)
-{
- struct pci230_private *devpriv = dev->private;
-
- /* Write mangled datum to appropriate DACOUT register. */
- outw(pci230_ao_mangle_datum(dev, datum),
- devpriv->daqio + ((chan == 0) ? PCI230_DACOUT1 : PCI230_DACOUT2));
-}
-
-static void pci230_ao_write_fifo(struct comedi_device *dev,
- unsigned short datum, unsigned int chan)
-{
- struct pci230_private *devpriv = dev->private;
-
- /* Write mangled datum to appropriate DACDATA register. */
- outw(pci230_ao_mangle_datum(dev, datum),
- devpriv->daqio + PCI230P2_DACDATA);
-}
-
-static bool pci230_claim_shared(struct comedi_device *dev,
- unsigned char res_mask, unsigned int owner)
-{
- struct pci230_private *devpriv = dev->private;
- unsigned int o;
- unsigned long irqflags;
-
- spin_lock_irqsave(&devpriv->res_spinlock, irqflags);
- for (o = 0; o < NUM_OWNERS; o++) {
- if (o == owner)
- continue;
- if (devpriv->res_owned[o] & res_mask) {
- spin_unlock_irqrestore(&devpriv->res_spinlock,
- irqflags);
- return false;
- }
- }
- devpriv->res_owned[owner] |= res_mask;
- spin_unlock_irqrestore(&devpriv->res_spinlock, irqflags);
- return true;
-}
-
-static void pci230_release_shared(struct comedi_device *dev,
- unsigned char res_mask, unsigned int owner)
-{
- struct pci230_private *devpriv = dev->private;
- unsigned long irqflags;
-
- spin_lock_irqsave(&devpriv->res_spinlock, irqflags);
- devpriv->res_owned[owner] &= ~res_mask;
- spin_unlock_irqrestore(&devpriv->res_spinlock, irqflags);
-}
-
-static void pci230_release_all_resources(struct comedi_device *dev,
- unsigned int owner)
-{
- pci230_release_shared(dev, (unsigned char)~0, owner);
-}
-
-static unsigned int pci230_divide_ns(uint64_t ns, unsigned int timebase,
- unsigned int flags)
-{
- uint64_t div;
- unsigned int rem;
-
- div = ns;
- rem = do_div(div, timebase);
- switch (flags & CMDF_ROUND_MASK) {
- default:
- case CMDF_ROUND_NEAREST:
- div += (rem + (timebase / 2)) / timebase;
- break;
- case CMDF_ROUND_DOWN:
- break;
- case CMDF_ROUND_UP:
- div += (rem + timebase - 1) / timebase;
- break;
- }
- return div > UINT_MAX ? UINT_MAX : (unsigned int)div;
-}
-
-/*
- * Given desired period in ns, returns the required internal clock source
- * and gets the initial count.
- */
-static unsigned int pci230_choose_clk_count(uint64_t ns, unsigned int *count,
- unsigned int flags)
-{
- unsigned int clk_src, cnt;
-
- for (clk_src = CLK_10MHZ;; clk_src++) {
- cnt = pci230_divide_ns(ns, pci230_timebase[clk_src], flags);
- if (cnt <= 65536 || clk_src == CLK_1KHZ)
- break;
- }
- *count = cnt;
- return clk_src;
-}
-
-static void pci230_ns_to_single_timer(unsigned int *ns, unsigned int flags)
-{
- unsigned int count;
- unsigned int clk_src;
-
- clk_src = pci230_choose_clk_count(*ns, &count, flags);
- *ns = count * pci230_timebase[clk_src];
-}
-
-static void pci230_ct_setup_ns_mode(struct comedi_device *dev, unsigned int ct,
- unsigned int mode, uint64_t ns,
- unsigned int flags)
-{
- unsigned int clk_src;
- unsigned int count;
-
- /* Set mode. */
- comedi_8254_set_mode(dev->pacer, ct, mode);
- /* Determine clock source and count. */
- clk_src = pci230_choose_clk_count(ns, &count, flags);
- /* Program clock source. */
- outb(CLK_CONFIG(ct, clk_src), dev->iobase + PCI230_ZCLK_SCE);
- /* Set initial count. */
- if (count >= 65536)
- count = 0;
-
- comedi_8254_write(dev->pacer, ct, count);
-}
-
-static void pci230_cancel_ct(struct comedi_device *dev, unsigned int ct)
-{
- /* Counter ct, 8254 mode 1, initial count not written. */
- comedi_8254_set_mode(dev->pacer, ct, I8254_MODE1);
-}
-
-static int pci230_ai_eoc(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned long context)
-{
- struct pci230_private *devpriv = dev->private;
- unsigned int status;
-
- status = inw(devpriv->daqio + PCI230_ADCCON);
- if ((status & PCI230_ADC_FIFO_EMPTY) == 0)
- return 0;
- return -EBUSY;
-}
-
-static int pci230_ai_insn_read(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
-{
- struct pci230_private *devpriv = dev->private;
- unsigned int n;
- unsigned int chan, range, aref;
- unsigned int gainshift;
- unsigned short adccon, adcen;
- int ret;
-
- /* Unpack channel and range. */
- chan = CR_CHAN(insn->chanspec);
- range = CR_RANGE(insn->chanspec);
- aref = CR_AREF(insn->chanspec);
- if (aref == AREF_DIFF) {
- /* Differential. */
- if (chan >= s->n_chan / 2) {
- dev_dbg(dev->class_dev,
- "%s: differential channel number out of range 0 to %u\n",
- __func__, (s->n_chan / 2) - 1);
- return -EINVAL;
- }
- }
-
- /*
- * Use Z2-CT2 as a conversion trigger instead of the built-in
- * software trigger, as otherwise triggering of differential channels
- * doesn't work properly for some versions of PCI230/260. Also set
- * FIFO mode because the ADC busy bit only works for software triggers.
- */
- adccon = PCI230_ADC_TRIG_Z2CT2 | PCI230_ADC_FIFO_EN;
- /* Set Z2-CT2 output low to avoid any false triggers. */
- comedi_8254_set_mode(dev->pacer, 2, I8254_MODE0);
- devpriv->ai_bipolar = comedi_range_is_bipolar(s, range);
- if (aref == AREF_DIFF) {
- /* Differential. */
- gainshift = chan * 2;
- if (devpriv->hwver == 0) {
- /*
- * Original PCI230/260 expects both inputs of the
- * differential channel to be enabled.
- */
- adcen = 3 << gainshift;
- } else {
- /*
- * PCI230+/260+ expects only one input of the
- * differential channel to be enabled.
- */
- adcen = 1 << gainshift;
- }
- adccon |= PCI230_ADC_IM_DIF;
- } else {
- /* Single ended. */
- adcen = 1 << chan;
- gainshift = chan & ~1;
- adccon |= PCI230_ADC_IM_SE;
- }
- devpriv->adcg = (devpriv->adcg & ~(3 << gainshift)) |
- (pci230_ai_gain[range] << gainshift);
- if (devpriv->ai_bipolar)
- adccon |= PCI230_ADC_IR_BIP;
- else
- adccon |= PCI230_ADC_IR_UNI;
-
- /*
- * Enable only this channel in the scan list - otherwise by default
- * we'll get one sample from each channel.
- */
- outw(adcen, devpriv->daqio + PCI230_ADCEN);
-
- /* Set gain for channel. */
- outw(devpriv->adcg, devpriv->daqio + PCI230_ADCG);
-
- /* Specify uni/bip, se/diff, conversion source, and reset FIFO. */
- devpriv->adccon = adccon;
- outw(adccon | PCI230_ADC_FIFO_RESET, devpriv->daqio + PCI230_ADCCON);
-
- /* Convert n samples */
- for (n = 0; n < insn->n; n++) {
- /*
- * Trigger conversion by toggling Z2-CT2 output
- * (finish with output high).
- */
- comedi_8254_set_mode(dev->pacer, 2, I8254_MODE0);
- comedi_8254_set_mode(dev->pacer, 2, I8254_MODE1);
-
- /* wait for conversion to end */
- ret = comedi_timeout(dev, s, insn, pci230_ai_eoc, 0);
- if (ret)
- return ret;
-
- /* read data */
- data[n] = pci230_ai_read(dev);
- }
-
- /* return the number of samples read/written */
- return n;
-}
-
-static int pci230_ao_insn_write(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct pci230_private *devpriv = dev->private;
- unsigned int chan = CR_CHAN(insn->chanspec);
- unsigned int range = CR_RANGE(insn->chanspec);
- unsigned int val = s->readback[chan];
- int i;
-
- /*
- * Set range - see analogue output range table; 0 => unipolar 10V,
- * 1 => bipolar +/-10V range scale
- */
- devpriv->ao_bipolar = comedi_range_is_bipolar(s, range);
- outw(range, devpriv->daqio + PCI230_DACCON);
-
- for (i = 0; i < insn->n; i++) {
- val = data[i];
- pci230_ao_write_nofifo(dev, val, chan);
- }
- s->readback[chan] = val;
-
- return insn->n;
-}
-
-static int pci230_ao_check_chanlist(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_cmd *cmd)
-{
- unsigned int prev_chan = CR_CHAN(cmd->chanlist[0]);
- unsigned int range0 = CR_RANGE(cmd->chanlist[0]);
- int i;
-
- for (i = 1; i < cmd->chanlist_len; i++) {
- unsigned int chan = CR_CHAN(cmd->chanlist[i]);
- unsigned int range = CR_RANGE(cmd->chanlist[i]);
-
- if (chan < prev_chan) {
- dev_dbg(dev->class_dev,
- "%s: channel numbers must increase\n",
- __func__);
- return -EINVAL;
- }
-
- if (range != range0) {
- dev_dbg(dev->class_dev,
- "%s: channels must have the same range\n",
- __func__);
- return -EINVAL;
- }
-
- prev_chan = chan;
- }
-
- return 0;
-}
-
-static int pci230_ao_cmdtest(struct comedi_device *dev,
- struct comedi_subdevice *s, struct comedi_cmd *cmd)
-{
- const struct pci230_board *board = dev->board_ptr;
- struct pci230_private *devpriv = dev->private;
- int err = 0;
- unsigned int tmp;
-
- /* Step 1 : check if triggers are trivially valid */
-
- err |= comedi_check_trigger_src(&cmd->start_src, TRIG_INT);
-
- tmp = TRIG_TIMER | TRIG_INT;
- if (board->min_hwver > 0 && devpriv->hwver >= 2) {
- /*
- * For PCI230+ hardware version 2 onwards, allow external
- * trigger from EXTTRIG/EXTCONVCLK input (PCI230+ pin 25).
- *
- * FIXME: The permitted scan_begin_src values shouldn't depend
- * on devpriv->hwver (the detected card's actual hardware
- * version). They should only depend on board->min_hwver
- * (the static capabilities of the configured card). To fix
- * it, a new card model, e.g. "pci230+2" would have to be
- * defined with min_hwver set to 2. It doesn't seem worth it
- * for this alone. At the moment, please consider
- * scan_begin_src==TRIG_EXT support to be a bonus rather than a
- * guarantee!
- */
- tmp |= TRIG_EXT;
- }
- err |= comedi_check_trigger_src(&cmd->scan_begin_src, tmp);
-
- err |= comedi_check_trigger_src(&cmd->convert_src, TRIG_NOW);
- err |= comedi_check_trigger_src(&cmd->scan_end_src, TRIG_COUNT);
- err |= comedi_check_trigger_src(&cmd->stop_src, TRIG_COUNT | TRIG_NONE);
-
- if (err)
- return 1;
-
- /* Step 2a : make sure trigger sources are unique */
-
- err |= comedi_check_trigger_is_unique(cmd->scan_begin_src);
- err |= comedi_check_trigger_is_unique(cmd->stop_src);
-
- /* Step 2b : and mutually compatible */
-
- if (err)
- return 2;
-
- /* Step 3: check if arguments are trivially valid */
-
- err |= comedi_check_trigger_arg_is(&cmd->start_arg, 0);
-
-#define MAX_SPEED_AO 8000 /* 8000 ns => 125 kHz */
-/*
- * Comedi limit due to unsigned int cmd. Driver limit =
- * 2^16 (16bit * counter) * 1000000ns (1kHz onboard clock) = 65.536s
- */
-#define MIN_SPEED_AO 4294967295u /* 4294967295ns = 4.29s */
-
- switch (cmd->scan_begin_src) {
- case TRIG_TIMER:
- err |= comedi_check_trigger_arg_min(&cmd->scan_begin_arg,
- MAX_SPEED_AO);
- err |= comedi_check_trigger_arg_max(&cmd->scan_begin_arg,
- MIN_SPEED_AO);
- break;
- case TRIG_EXT:
- /*
- * External trigger - for PCI230+ hardware version 2 onwards.
- */
- /* Trigger number must be 0. */
- if (cmd->scan_begin_arg & ~CR_FLAGS_MASK) {
- cmd->scan_begin_arg = COMBINE(cmd->scan_begin_arg, 0,
- ~CR_FLAGS_MASK);
- err |= -EINVAL;
- }
- /*
- * The only flags allowed are CR_EDGE and CR_INVERT.
- * The CR_EDGE flag is ignored.
- */
- if (cmd->scan_begin_arg & CR_FLAGS_MASK &
- ~(CR_EDGE | CR_INVERT)) {
- cmd->scan_begin_arg =
- COMBINE(cmd->scan_begin_arg, 0,
- CR_FLAGS_MASK & ~(CR_EDGE | CR_INVERT));
- err |= -EINVAL;
- }
- break;
- default:
- err |= comedi_check_trigger_arg_is(&cmd->scan_begin_arg, 0);
- break;
- }
-
- err |= comedi_check_trigger_arg_is(&cmd->scan_end_arg,
- cmd->chanlist_len);
-
- if (cmd->stop_src == TRIG_COUNT)
- err |= comedi_check_trigger_arg_min(&cmd->stop_arg, 1);
- else /* TRIG_NONE */
- err |= comedi_check_trigger_arg_is(&cmd->stop_arg, 0);
-
- if (err)
- return 3;
-
- /* Step 4: fix up any arguments */
-
- if (cmd->scan_begin_src == TRIG_TIMER) {
- tmp = cmd->scan_begin_arg;
- pci230_ns_to_single_timer(&cmd->scan_begin_arg, cmd->flags);
- if (tmp != cmd->scan_begin_arg)
- err++;
- }
-
- if (err)
- return 4;
-
- /* Step 5: check channel list if it exists */
- if (cmd->chanlist && cmd->chanlist_len > 0)
- err |= pci230_ao_check_chanlist(dev, s, cmd);
-
- if (err)
- return 5;
-
- return 0;
-}
-
-static void pci230_ao_stop(struct comedi_device *dev,
- struct comedi_subdevice *s)
-{
- struct pci230_private *devpriv = dev->private;
- unsigned long irqflags;
- unsigned char intsrc;
- bool started;
- struct comedi_cmd *cmd;
-
- spin_lock_irqsave(&devpriv->ao_stop_spinlock, irqflags);
- started = devpriv->ao_cmd_started;
- devpriv->ao_cmd_started = false;
- spin_unlock_irqrestore(&devpriv->ao_stop_spinlock, irqflags);
- if (!started)
- return;
- cmd = &s->async->cmd;
- if (cmd->scan_begin_src == TRIG_TIMER) {
- /* Stop scan rate generator. */
- pci230_cancel_ct(dev, 1);
- }
- /* Determine interrupt source. */
- if (devpriv->hwver < 2) {
- /* Not using DAC FIFO. Using CT1 interrupt. */
- intsrc = PCI230_INT_ZCLK_CT1;
- } else {
- /* Using DAC FIFO interrupt. */
- intsrc = PCI230P2_INT_DAC;
- }
- /*
- * Disable interrupt and wait for interrupt routine to finish running
- * unless we are called from the interrupt routine.
- */
- spin_lock_irqsave(&devpriv->isr_spinlock, irqflags);
- devpriv->ier &= ~intsrc;
- while (devpriv->intr_running && devpriv->intr_cpuid != THISCPU) {
- spin_unlock_irqrestore(&devpriv->isr_spinlock, irqflags);
- spin_lock_irqsave(&devpriv->isr_spinlock, irqflags);
- }
- outb(devpriv->ier, dev->iobase + PCI230_INT_SCE);
- spin_unlock_irqrestore(&devpriv->isr_spinlock, irqflags);
- if (devpriv->hwver >= 2) {
- /*
- * Using DAC FIFO. Reset FIFO, clear underrun error,
- * disable FIFO.
- */
- devpriv->daccon &= PCI230_DAC_OR_MASK;
- outw(devpriv->daccon | PCI230P2_DAC_FIFO_RESET |
- PCI230P2_DAC_FIFO_UNDERRUN_CLEAR,
- devpriv->daqio + PCI230_DACCON);
- }
- /* Release resources. */
- pci230_release_all_resources(dev, OWNER_AOCMD);
-}
-
-static void pci230_handle_ao_nofifo(struct comedi_device *dev,
- struct comedi_subdevice *s)
-{
- struct comedi_async *async = s->async;
- struct comedi_cmd *cmd = &async->cmd;
- unsigned short data;
- int i;
-
- if (cmd->stop_src == TRIG_COUNT && async->scans_done >= cmd->stop_arg)
- return;
-
- for (i = 0; i < cmd->chanlist_len; i++) {
- unsigned int chan = CR_CHAN(cmd->chanlist[i]);
-
- if (!comedi_buf_read_samples(s, &data, 1)) {
- async->events |= COMEDI_CB_OVERFLOW;
- return;
- }
- pci230_ao_write_nofifo(dev, data, chan);
- s->readback[chan] = data;
- }
-
- if (cmd->stop_src == TRIG_COUNT && async->scans_done >= cmd->stop_arg)
- async->events |= COMEDI_CB_EOA;
-}
-
-/*
- * Loads DAC FIFO (if using it) from buffer.
- * Returns false if AO finished due to completion or error, true if still going.
- */
-static bool pci230_handle_ao_fifo(struct comedi_device *dev,
- struct comedi_subdevice *s)
-{
- struct pci230_private *devpriv = dev->private;
- struct comedi_async *async = s->async;
- struct comedi_cmd *cmd = &async->cmd;
- unsigned int num_scans = comedi_nscans_left(s, 0);
- unsigned int room;
- unsigned short dacstat;
- unsigned int i, n;
- unsigned int events = 0;
-
- /* Get DAC FIFO status. */
- dacstat = inw(devpriv->daqio + PCI230_DACCON);
-
- if (cmd->stop_src == TRIG_COUNT && num_scans == 0)
- events |= COMEDI_CB_EOA;
-
- if (events == 0) {
- /* Check for FIFO underrun. */
- if (dacstat & PCI230P2_DAC_FIFO_UNDERRUN_LATCHED) {
- dev_err(dev->class_dev, "AO FIFO underrun\n");
- events |= COMEDI_CB_OVERFLOW | COMEDI_CB_ERROR;
- }
- /*
- * Check for buffer underrun if FIFO less than half full
- * (otherwise there will be loads of "DAC FIFO not half full"
- * interrupts).
- */
- if (num_scans == 0 &&
- (dacstat & PCI230P2_DAC_FIFO_HALF) == 0) {
- dev_err(dev->class_dev, "AO buffer underrun\n");
- events |= COMEDI_CB_OVERFLOW | COMEDI_CB_ERROR;
- }
- }
- if (events == 0) {
- /* Determine how much room is in the FIFO (in samples). */
- if (dacstat & PCI230P2_DAC_FIFO_FULL)
- room = PCI230P2_DAC_FIFOROOM_FULL;
- else if (dacstat & PCI230P2_DAC_FIFO_HALF)
- room = PCI230P2_DAC_FIFOROOM_HALFTOFULL;
- else if (dacstat & PCI230P2_DAC_FIFO_EMPTY)
- room = PCI230P2_DAC_FIFOROOM_EMPTY;
- else
- room = PCI230P2_DAC_FIFOROOM_ONETOHALF;
- /* Convert room to number of scans that can be added. */
- room /= cmd->chanlist_len;
- /* Determine number of scans to process. */
- if (num_scans > room)
- num_scans = room;
- /* Process scans. */
- for (n = 0; n < num_scans; n++) {
- for (i = 0; i < cmd->chanlist_len; i++) {
- unsigned int chan = CR_CHAN(cmd->chanlist[i]);
- unsigned short datum;
-
- comedi_buf_read_samples(s, &datum, 1);
- pci230_ao_write_fifo(dev, datum, chan);
- s->readback[chan] = datum;
- }
- }
-
- if (cmd->stop_src == TRIG_COUNT &&
- async->scans_done >= cmd->stop_arg) {
- /*
- * All data for the command has been written
- * to FIFO. Set FIFO interrupt trigger level
- * to 'empty'.
- */
- devpriv->daccon &= ~PCI230P2_DAC_INT_FIFO_MASK;
- devpriv->daccon |= PCI230P2_DAC_INT_FIFO_EMPTY;
- outw(devpriv->daccon, devpriv->daqio + PCI230_DACCON);
- }
- /* Check if FIFO underrun occurred while writing to FIFO. */
- dacstat = inw(devpriv->daqio + PCI230_DACCON);
- if (dacstat & PCI230P2_DAC_FIFO_UNDERRUN_LATCHED) {
- dev_err(dev->class_dev, "AO FIFO underrun\n");
- events |= COMEDI_CB_OVERFLOW | COMEDI_CB_ERROR;
- }
- }
- async->events |= events;
- return !(async->events & COMEDI_CB_CANCEL_MASK);
-}
-
-static int pci230_ao_inttrig_scan_begin(struct comedi_device *dev,
- struct comedi_subdevice *s,
- unsigned int trig_num)
-{
- struct pci230_private *devpriv = dev->private;
- unsigned long irqflags;
-
- if (trig_num)
- return -EINVAL;
-
- spin_lock_irqsave(&devpriv->ao_stop_spinlock, irqflags);
- if (!devpriv->ao_cmd_started) {
- spin_unlock_irqrestore(&devpriv->ao_stop_spinlock, irqflags);
- return 1;
- }
- /* Perform scan. */
- if (devpriv->hwver < 2) {
- /* Not using DAC FIFO. */
- spin_unlock_irqrestore(&devpriv->ao_stop_spinlock, irqflags);
- pci230_handle_ao_nofifo(dev, s);
- comedi_handle_events(dev, s);
- } else {
- /* Using DAC FIFO. */
- /* Read DACSWTRIG register to trigger conversion. */
- inw(devpriv->daqio + PCI230P2_DACSWTRIG);
- spin_unlock_irqrestore(&devpriv->ao_stop_spinlock, irqflags);
- }
- /* Delay. Should driver be responsible for this? */
- /* XXX TODO: See if DAC busy bit can be used. */
- udelay(8);
- return 1;
-}
-
-static void pci230_ao_start(struct comedi_device *dev,
- struct comedi_subdevice *s)
-{
- struct pci230_private *devpriv = dev->private;
- struct comedi_async *async = s->async;
- struct comedi_cmd *cmd = &async->cmd;
- unsigned long irqflags;
-
- devpriv->ao_cmd_started = true;
-
- if (devpriv->hwver >= 2) {
- /* Using DAC FIFO. */
- unsigned short scantrig;
- bool run;
-
- /* Preload FIFO data. */
- run = pci230_handle_ao_fifo(dev, s);
- comedi_handle_events(dev, s);
- if (!run) {
- /* Stopped. */
- return;
- }
- /* Set scan trigger source. */
- switch (cmd->scan_begin_src) {
- case TRIG_TIMER:
- scantrig = PCI230P2_DAC_TRIG_Z2CT1;
- break;
- case TRIG_EXT:
- /* Trigger on EXTTRIG/EXTCONVCLK pin. */
- if ((cmd->scan_begin_arg & CR_INVERT) == 0) {
- /* +ve edge */
- scantrig = PCI230P2_DAC_TRIG_EXTP;
- } else {
- /* -ve edge */
- scantrig = PCI230P2_DAC_TRIG_EXTN;
- }
- break;
- case TRIG_INT:
- scantrig = PCI230P2_DAC_TRIG_SW;
- break;
- default:
- /* Shouldn't get here. */
- scantrig = PCI230P2_DAC_TRIG_NONE;
- break;
- }
- devpriv->daccon =
- (devpriv->daccon & ~PCI230P2_DAC_TRIG_MASK) | scantrig;
- outw(devpriv->daccon, devpriv->daqio + PCI230_DACCON);
- }
- switch (cmd->scan_begin_src) {
- case TRIG_TIMER:
- if (devpriv->hwver < 2) {
- /* Not using DAC FIFO. */
- /* Enable CT1 timer interrupt. */
- spin_lock_irqsave(&devpriv->isr_spinlock, irqflags);
- devpriv->ier |= PCI230_INT_ZCLK_CT1;
- outb(devpriv->ier, dev->iobase + PCI230_INT_SCE);
- spin_unlock_irqrestore(&devpriv->isr_spinlock,
- irqflags);
- }
- /* Set CT1 gate high to start counting. */
- outb(GAT_CONFIG(1, GAT_VCC), dev->iobase + PCI230_ZGAT_SCE);
- break;
- case TRIG_INT:
- async->inttrig = pci230_ao_inttrig_scan_begin;
- break;
- }
- if (devpriv->hwver >= 2) {
- /* Using DAC FIFO. Enable DAC FIFO interrupt. */
- spin_lock_irqsave(&devpriv->isr_spinlock, irqflags);
- devpriv->ier |= PCI230P2_INT_DAC;
- outb(devpriv->ier, dev->iobase + PCI230_INT_SCE);
- spin_unlock_irqrestore(&devpriv->isr_spinlock, irqflags);
- }
-}
-
-static int pci230_ao_inttrig_start(struct comedi_device *dev,
- struct comedi_subdevice *s,
- unsigned int trig_num)
-{
- struct comedi_cmd *cmd = &s->async->cmd;
-
- if (trig_num != cmd->start_src)
- return -EINVAL;
-
- s->async->inttrig = NULL;
- pci230_ao_start(dev, s);
-
- return 1;
-}
-
-static int pci230_ao_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
-{
- struct pci230_private *devpriv = dev->private;
- unsigned short daccon;
- unsigned int range;
-
- /* Get the command. */
- struct comedi_cmd *cmd = &s->async->cmd;
-
- if (cmd->scan_begin_src == TRIG_TIMER) {
- /* Claim Z2-CT1. */
- if (!pci230_claim_shared(dev, RES_Z2CT1, OWNER_AOCMD))
- return -EBUSY;
- }
-
- /*
- * Set range - see analogue output range table; 0 => unipolar 10V,
- * 1 => bipolar +/-10V range scale
- */
- range = CR_RANGE(cmd->chanlist[0]);
- devpriv->ao_bipolar = comedi_range_is_bipolar(s, range);
- daccon = devpriv->ao_bipolar ? PCI230_DAC_OR_BIP : PCI230_DAC_OR_UNI;
- /* Use DAC FIFO for hardware version 2 onwards. */
- if (devpriv->hwver >= 2) {
- unsigned short dacen;
- unsigned int i;
-
- dacen = 0;
- for (i = 0; i < cmd->chanlist_len; i++)
- dacen |= 1 << CR_CHAN(cmd->chanlist[i]);
-
- /* Set channel scan list. */
- outw(dacen, devpriv->daqio + PCI230P2_DACEN);
- /*
- * Enable DAC FIFO.
- * Set DAC scan source to 'none'.
- * Set DAC FIFO interrupt trigger level to 'not half full'.
- * Reset DAC FIFO and clear underrun.
- *
- * N.B. DAC FIFO interrupts are currently disabled.
- */
- daccon |= PCI230P2_DAC_FIFO_EN | PCI230P2_DAC_FIFO_RESET |
- PCI230P2_DAC_FIFO_UNDERRUN_CLEAR |
- PCI230P2_DAC_TRIG_NONE | PCI230P2_DAC_INT_FIFO_NHALF;
- }
-
- /* Set DACCON. */
- outw(daccon, devpriv->daqio + PCI230_DACCON);
- /* Preserve most of DACCON apart from write-only, transient bits. */
- devpriv->daccon = daccon & ~(PCI230P2_DAC_FIFO_RESET |
- PCI230P2_DAC_FIFO_UNDERRUN_CLEAR);
-
- if (cmd->scan_begin_src == TRIG_TIMER) {
- /*
- * Set the counter timer 1 to the specified scan frequency.
- * cmd->scan_begin_arg is sampling period in ns.
- * Gate it off for now.
- */
- outb(GAT_CONFIG(1, GAT_GND), dev->iobase + PCI230_ZGAT_SCE);
- pci230_ct_setup_ns_mode(dev, 1, I8254_MODE3,
- cmd->scan_begin_arg,
- cmd->flags);
- }
-
- /* N.B. cmd->start_src == TRIG_INT */
- s->async->inttrig = pci230_ao_inttrig_start;
-
- return 0;
-}
-
-static int pci230_ao_cancel(struct comedi_device *dev,
- struct comedi_subdevice *s)
-{
- pci230_ao_stop(dev, s);
- return 0;
-}
-
-static int pci230_ai_check_scan_period(struct comedi_cmd *cmd)
-{
- unsigned int min_scan_period, chanlist_len;
- int err = 0;
-
- chanlist_len = cmd->chanlist_len;
- if (cmd->chanlist_len == 0)
- chanlist_len = 1;
-
- min_scan_period = chanlist_len * cmd->convert_arg;
- if (min_scan_period < chanlist_len ||
- min_scan_period < cmd->convert_arg) {
- /* Arithmetic overflow. */
- min_scan_period = UINT_MAX;
- err++;
- }
- if (cmd->scan_begin_arg < min_scan_period) {
- cmd->scan_begin_arg = min_scan_period;
- err++;
- }
-
- return !err;
-}
-
-static int pci230_ai_check_chanlist(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_cmd *cmd)
-{
- struct pci230_private *devpriv = dev->private;
- unsigned int max_diff_chan = (s->n_chan / 2) - 1;
- unsigned int prev_chan = 0;
- unsigned int prev_range = 0;
- unsigned int prev_aref = 0;
- bool prev_bipolar = false;
- unsigned int subseq_len = 0;
- int i;
-
- for (i = 0; i < cmd->chanlist_len; i++) {
- unsigned int chanspec = cmd->chanlist[i];
- unsigned int chan = CR_CHAN(chanspec);
- unsigned int range = CR_RANGE(chanspec);
- unsigned int aref = CR_AREF(chanspec);
- bool bipolar = comedi_range_is_bipolar(s, range);
-
- if (aref == AREF_DIFF && chan >= max_diff_chan) {
- dev_dbg(dev->class_dev,
- "%s: differential channel number out of range 0 to %u\n",
- __func__, max_diff_chan);
- return -EINVAL;
- }
-
- if (i > 0) {
- /*
- * Channel numbers must strictly increase or
- * subsequence must repeat exactly.
- */
- if (chan <= prev_chan && subseq_len == 0)
- subseq_len = i;
-
- if (subseq_len > 0 &&
- cmd->chanlist[i % subseq_len] != chanspec) {
- dev_dbg(dev->class_dev,
- "%s: channel numbers must increase or sequence must repeat exactly\n",
- __func__);
- return -EINVAL;
- }
-
- if (aref != prev_aref) {
- dev_dbg(dev->class_dev,
- "%s: channel sequence analogue references must be all the same (single-ended or differential)\n",
- __func__);
- return -EINVAL;
- }
-
- if (bipolar != prev_bipolar) {
- dev_dbg(dev->class_dev,
- "%s: channel sequence ranges must be all bipolar or all unipolar\n",
- __func__);
- return -EINVAL;
- }
-
- if (aref != AREF_DIFF && range != prev_range &&
- ((chan ^ prev_chan) & ~1) == 0) {
- dev_dbg(dev->class_dev,
- "%s: single-ended channel pairs must have the same range\n",
- __func__);
- return -EINVAL;
- }
- }
- prev_chan = chan;
- prev_range = range;
- prev_aref = aref;
- prev_bipolar = bipolar;
- }
-
- if (subseq_len == 0)
- subseq_len = cmd->chanlist_len;
-
- if (cmd->chanlist_len % subseq_len) {
- dev_dbg(dev->class_dev,
- "%s: sequence must repeat exactly\n", __func__);
- return -EINVAL;
- }
-
- /*
- * Buggy PCI230+ or PCI260+ requires channel 0 to be (first) in the
- * sequence if the sequence contains more than one channel. Hardware
- * versions 1 and 2 have the bug. There is no hardware version 3.
- *
- * Actually, there are two firmwares that report themselves as
- * hardware version 1 (the boards have different ADC chips with
- * slightly different timing requirements, which was supposed to
- * be invisible to software). The first one doesn't seem to have
- * the bug, but the second one does, and we can't tell them apart!
- */
- if (devpriv->hwver > 0 && devpriv->hwver < 4) {
- if (subseq_len > 1 && CR_CHAN(cmd->chanlist[0])) {
- dev_info(dev->class_dev,
- "amplc_pci230: ai_cmdtest: Buggy PCI230+/260+ h/w version %u requires first channel of multi-channel sequence to be 0 (corrected in h/w version 4)\n",
- devpriv->hwver);
- return -EINVAL;
- }
- }
-
- return 0;
-}
-
-static int pci230_ai_cmdtest(struct comedi_device *dev,
- struct comedi_subdevice *s, struct comedi_cmd *cmd)
-{
- const struct pci230_board *board = dev->board_ptr;
- struct pci230_private *devpriv = dev->private;
- int err = 0;
- unsigned int tmp;
-
- /* Step 1 : check if triggers are trivially valid */
-
- err |= comedi_check_trigger_src(&cmd->start_src, TRIG_NOW | TRIG_INT);
-
- tmp = TRIG_FOLLOW | TRIG_TIMER | TRIG_INT;
- if (board->have_dio || board->min_hwver > 0) {
- /*
- * Unfortunately, we cannot trigger a scan off an external
- * source on the PCI260 board, since it uses the PPIC0 (DIO)
- * input, which isn't present on the PCI260. For PCI260+
- * we can use the EXTTRIG/EXTCONVCLK input on pin 17 instead.
- */
- tmp |= TRIG_EXT;
- }
- err |= comedi_check_trigger_src(&cmd->scan_begin_src, tmp);
- err |= comedi_check_trigger_src(&cmd->convert_src,
- TRIG_TIMER | TRIG_INT | TRIG_EXT);
- err |= comedi_check_trigger_src(&cmd->scan_end_src, TRIG_COUNT);
- err |= comedi_check_trigger_src(&cmd->stop_src, TRIG_COUNT | TRIG_NONE);
-
- if (err)
- return 1;
-
- /* Step 2a : make sure trigger sources are unique */
-
- err |= comedi_check_trigger_is_unique(cmd->start_src);
- err |= comedi_check_trigger_is_unique(cmd->scan_begin_src);
- err |= comedi_check_trigger_is_unique(cmd->convert_src);
- err |= comedi_check_trigger_is_unique(cmd->stop_src);
-
- /* Step 2b : and mutually compatible */
-
- /*
- * If scan_begin_src is not TRIG_FOLLOW, then a monostable will be
- * set up to generate a fixed number of timed conversion pulses.
- */
- if (cmd->scan_begin_src != TRIG_FOLLOW &&
- cmd->convert_src != TRIG_TIMER)
- err |= -EINVAL;
-
- if (err)
- return 2;
-
- /* Step 3: check if arguments are trivially valid */
-
- err |= comedi_check_trigger_arg_is(&cmd->start_arg, 0);
-
-#define MAX_SPEED_AI_SE 3200 /* PCI230 SE: 3200 ns => 312.5 kHz */
-#define MAX_SPEED_AI_DIFF 8000 /* PCI230 DIFF: 8000 ns => 125 kHz */
-#define MAX_SPEED_AI_PLUS 4000 /* PCI230+: 4000 ns => 250 kHz */
-/*
- * Comedi limit due to unsigned int cmd. Driver limit =
- * 2^16 (16bit * counter) * 1000000ns (1kHz onboard clock) = 65.536s
- */
-#define MIN_SPEED_AI 4294967295u /* 4294967295ns = 4.29s */
-
- if (cmd->convert_src == TRIG_TIMER) {
- unsigned int max_speed_ai;
-
- if (devpriv->hwver == 0) {
- /*
- * PCI230 or PCI260. Max speed depends whether
- * single-ended or pseudo-differential.
- */
- if (cmd->chanlist && cmd->chanlist_len > 0) {
- /* Peek analogue reference of first channel. */
- if (CR_AREF(cmd->chanlist[0]) == AREF_DIFF)
- max_speed_ai = MAX_SPEED_AI_DIFF;
- else
- max_speed_ai = MAX_SPEED_AI_SE;
-
- } else {
- /* No channel list. Assume single-ended. */
- max_speed_ai = MAX_SPEED_AI_SE;
- }
- } else {
- /* PCI230+ or PCI260+. */
- max_speed_ai = MAX_SPEED_AI_PLUS;
- }
-
- err |= comedi_check_trigger_arg_min(&cmd->convert_arg,
- max_speed_ai);
- err |= comedi_check_trigger_arg_max(&cmd->convert_arg,
- MIN_SPEED_AI);
- } else if (cmd->convert_src == TRIG_EXT) {
- /*
- * external trigger
- *
- * convert_arg == (CR_EDGE | 0)
- * => trigger on +ve edge.
- * convert_arg == (CR_EDGE | CR_INVERT | 0)
- * => trigger on -ve edge.
- */
- if (cmd->convert_arg & CR_FLAGS_MASK) {
- /* Trigger number must be 0. */
- if (cmd->convert_arg & ~CR_FLAGS_MASK) {
- cmd->convert_arg = COMBINE(cmd->convert_arg, 0,
- ~CR_FLAGS_MASK);
- err |= -EINVAL;
- }
- /*
- * The only flags allowed are CR_INVERT and CR_EDGE.
- * CR_EDGE is required.
- */
- if ((cmd->convert_arg & CR_FLAGS_MASK & ~CR_INVERT) !=
- CR_EDGE) {
- /* Set CR_EDGE, preserve CR_INVERT. */
- cmd->convert_arg =
- COMBINE(cmd->start_arg, CR_EDGE | 0,
- CR_FLAGS_MASK & ~CR_INVERT);
- err |= -EINVAL;
- }
- } else {
- /*
- * Backwards compatibility with previous versions:
- * convert_arg == 0 => trigger on -ve edge.
- * convert_arg == 1 => trigger on +ve edge.
- */
- err |= comedi_check_trigger_arg_max(&cmd->convert_arg,
- 1);
- }
- } else {
- err |= comedi_check_trigger_arg_is(&cmd->convert_arg, 0);
- }
-
- err |= comedi_check_trigger_arg_is(&cmd->scan_end_arg,
- cmd->chanlist_len);
-
- if (cmd->stop_src == TRIG_COUNT)
- err |= comedi_check_trigger_arg_min(&cmd->stop_arg, 1);
- else /* TRIG_NONE */
- err |= comedi_check_trigger_arg_is(&cmd->stop_arg, 0);
-
- if (cmd->scan_begin_src == TRIG_EXT) {
- /*
- * external "trigger" to begin each scan:
- * scan_begin_arg==0 => use PPC0 input -> gate of CT0 -> gate
- * of CT2 (sample convert trigger is CT2)
- */
- if (cmd->scan_begin_arg & ~CR_FLAGS_MASK) {
- cmd->scan_begin_arg = COMBINE(cmd->scan_begin_arg, 0,
- ~CR_FLAGS_MASK);
- err |= -EINVAL;
- }
- /* The only flag allowed is CR_EDGE, which is ignored. */
- if (cmd->scan_begin_arg & CR_FLAGS_MASK & ~CR_EDGE) {
- cmd->scan_begin_arg = COMBINE(cmd->scan_begin_arg, 0,
- CR_FLAGS_MASK & ~CR_EDGE);
- err |= -EINVAL;
- }
- } else if (cmd->scan_begin_src == TRIG_TIMER) {
- /* N.B. cmd->convert_arg is also TRIG_TIMER */
- if (!pci230_ai_check_scan_period(cmd))
- err |= -EINVAL;
-
- } else {
- err |= comedi_check_trigger_arg_is(&cmd->scan_begin_arg, 0);
- }
-
- if (err)
- return 3;
-
- /* Step 4: fix up any arguments */
-
- if (cmd->convert_src == TRIG_TIMER) {
- tmp = cmd->convert_arg;
- pci230_ns_to_single_timer(&cmd->convert_arg, cmd->flags);
- if (tmp != cmd->convert_arg)
- err++;
- }
-
- if (cmd->scan_begin_src == TRIG_TIMER) {
- /* N.B. cmd->convert_arg is also TRIG_TIMER */
- tmp = cmd->scan_begin_arg;
- pci230_ns_to_single_timer(&cmd->scan_begin_arg, cmd->flags);
- if (!pci230_ai_check_scan_period(cmd)) {
- /* Was below minimum required. Round up. */
- pci230_ns_to_single_timer(&cmd->scan_begin_arg,
- CMDF_ROUND_UP);
- pci230_ai_check_scan_period(cmd);
- }
- if (tmp != cmd->scan_begin_arg)
- err++;
- }
-
- if (err)
- return 4;
-
- /* Step 5: check channel list if it exists */
- if (cmd->chanlist && cmd->chanlist_len > 0)
- err |= pci230_ai_check_chanlist(dev, s, cmd);
-
- if (err)
- return 5;
-
- return 0;
-}
-
-static void pci230_ai_update_fifo_trigger_level(struct comedi_device *dev,
- struct comedi_subdevice *s)
-{
- struct pci230_private *devpriv = dev->private;
- struct comedi_cmd *cmd = &s->async->cmd;
- unsigned int wake;
- unsigned short triglev;
- unsigned short adccon;
-
- if (cmd->flags & CMDF_WAKE_EOS)
- wake = cmd->scan_end_arg - s->async->cur_chan;
- else
- wake = comedi_nsamples_left(s, PCI230_ADC_FIFOLEVEL_HALFFULL);
-
- if (wake >= PCI230_ADC_FIFOLEVEL_HALFFULL) {
- triglev = PCI230_ADC_INT_FIFO_HALF;
- } else if (wake > 1 && devpriv->hwver > 0) {
- /* PCI230+/260+ programmable FIFO interrupt level. */
- if (devpriv->adcfifothresh != wake) {
- devpriv->adcfifothresh = wake;
- outw(wake, devpriv->daqio + PCI230P_ADCFFTH);
- }
- triglev = PCI230P_ADC_INT_FIFO_THRESH;
- } else {
- triglev = PCI230_ADC_INT_FIFO_NEMPTY;
- }
- adccon = (devpriv->adccon & ~PCI230_ADC_INT_FIFO_MASK) | triglev;
- if (adccon != devpriv->adccon) {
- devpriv->adccon = adccon;
- outw(adccon, devpriv->daqio + PCI230_ADCCON);
- }
-}
-
-static int pci230_ai_inttrig_convert(struct comedi_device *dev,
- struct comedi_subdevice *s,
- unsigned int trig_num)
-{
- struct pci230_private *devpriv = dev->private;
- unsigned long irqflags;
- unsigned int delayus;
-
- if (trig_num)
- return -EINVAL;
-
- spin_lock_irqsave(&devpriv->ai_stop_spinlock, irqflags);
- if (!devpriv->ai_cmd_started) {
- spin_unlock_irqrestore(&devpriv->ai_stop_spinlock, irqflags);
- return 1;
- }
- /*
- * Trigger conversion by toggling Z2-CT2 output.
- * Finish with output high.
- */
- comedi_8254_set_mode(dev->pacer, 2, I8254_MODE0);
- comedi_8254_set_mode(dev->pacer, 2, I8254_MODE1);
- /*
- * Delay. Should driver be responsible for this? An
- * alternative would be to wait until conversion is complete,
- * but we can't tell when it's complete because the ADC busy
- * bit has a different meaning when FIFO enabled (and when
- * FIFO not enabled, it only works for software triggers).
- */
- if ((devpriv->adccon & PCI230_ADC_IM_MASK) == PCI230_ADC_IM_DIF &&
- devpriv->hwver == 0) {
- /* PCI230/260 in differential mode */
- delayus = 8;
- } else {
- /* single-ended or PCI230+/260+ */
- delayus = 4;
- }
- spin_unlock_irqrestore(&devpriv->ai_stop_spinlock, irqflags);
- udelay(delayus);
- return 1;
-}
-
-static int pci230_ai_inttrig_scan_begin(struct comedi_device *dev,
- struct comedi_subdevice *s,
- unsigned int trig_num)
-{
- struct pci230_private *devpriv = dev->private;
- unsigned long irqflags;
- unsigned char zgat;
-
- if (trig_num)
- return -EINVAL;
-
- spin_lock_irqsave(&devpriv->ai_stop_spinlock, irqflags);
- if (devpriv->ai_cmd_started) {
- /* Trigger scan by waggling CT0 gate source. */
- zgat = GAT_CONFIG(0, GAT_GND);
- outb(zgat, dev->iobase + PCI230_ZGAT_SCE);
- zgat = GAT_CONFIG(0, GAT_VCC);
- outb(zgat, dev->iobase + PCI230_ZGAT_SCE);
- }
- spin_unlock_irqrestore(&devpriv->ai_stop_spinlock, irqflags);
-
- return 1;
-}
-
-static void pci230_ai_stop(struct comedi_device *dev,
- struct comedi_subdevice *s)
-{
- struct pci230_private *devpriv = dev->private;
- unsigned long irqflags;
- struct comedi_cmd *cmd;
- bool started;
-
- spin_lock_irqsave(&devpriv->ai_stop_spinlock, irqflags);
- started = devpriv->ai_cmd_started;
- devpriv->ai_cmd_started = false;
- spin_unlock_irqrestore(&devpriv->ai_stop_spinlock, irqflags);
- if (!started)
- return;
- cmd = &s->async->cmd;
- if (cmd->convert_src == TRIG_TIMER) {
- /* Stop conversion rate generator. */
- pci230_cancel_ct(dev, 2);
- }
- if (cmd->scan_begin_src != TRIG_FOLLOW) {
- /* Stop scan period monostable. */
- pci230_cancel_ct(dev, 0);
- }
- spin_lock_irqsave(&devpriv->isr_spinlock, irqflags);
- /*
- * Disable ADC interrupt and wait for interrupt routine to finish
- * running unless we are called from the interrupt routine.
- */
- devpriv->ier &= ~PCI230_INT_ADC;
- while (devpriv->intr_running && devpriv->intr_cpuid != THISCPU) {
- spin_unlock_irqrestore(&devpriv->isr_spinlock, irqflags);
- spin_lock_irqsave(&devpriv->isr_spinlock, irqflags);
- }
- outb(devpriv->ier, dev->iobase + PCI230_INT_SCE);
- spin_unlock_irqrestore(&devpriv->isr_spinlock, irqflags);
- /*
- * Reset FIFO, disable FIFO and set start conversion source to none.
- * Keep se/diff and bip/uni settings.
- */
- devpriv->adccon =
- (devpriv->adccon & (PCI230_ADC_IR_MASK | PCI230_ADC_IM_MASK)) |
- PCI230_ADC_TRIG_NONE;
- outw(devpriv->adccon | PCI230_ADC_FIFO_RESET,
- devpriv->daqio + PCI230_ADCCON);
- /* Release resources. */
- pci230_release_all_resources(dev, OWNER_AICMD);
-}
-
-static void pci230_ai_start(struct comedi_device *dev,
- struct comedi_subdevice *s)
-{
- struct pci230_private *devpriv = dev->private;
- unsigned long irqflags;
- unsigned short conv;
- struct comedi_async *async = s->async;
- struct comedi_cmd *cmd = &async->cmd;
-
- devpriv->ai_cmd_started = true;
-
- /* Enable ADC FIFO trigger level interrupt. */
- spin_lock_irqsave(&devpriv->isr_spinlock, irqflags);
- devpriv->ier |= PCI230_INT_ADC;
- outb(devpriv->ier, dev->iobase + PCI230_INT_SCE);
- spin_unlock_irqrestore(&devpriv->isr_spinlock, irqflags);
-
- /*
- * Update conversion trigger source which is currently set
- * to CT2 output, which is currently stuck high.
- */
- switch (cmd->convert_src) {
- default:
- conv = PCI230_ADC_TRIG_NONE;
- break;
- case TRIG_TIMER:
- /* Using CT2 output. */
- conv = PCI230_ADC_TRIG_Z2CT2;
- break;
- case TRIG_EXT:
- if (cmd->convert_arg & CR_EDGE) {
- if ((cmd->convert_arg & CR_INVERT) == 0) {
- /* Trigger on +ve edge. */
- conv = PCI230_ADC_TRIG_EXTP;
- } else {
- /* Trigger on -ve edge. */
- conv = PCI230_ADC_TRIG_EXTN;
- }
- } else {
- /* Backwards compatibility. */
- if (cmd->convert_arg) {
- /* Trigger on +ve edge. */
- conv = PCI230_ADC_TRIG_EXTP;
- } else {
- /* Trigger on -ve edge. */
- conv = PCI230_ADC_TRIG_EXTN;
- }
- }
- break;
- case TRIG_INT:
- /*
- * Use CT2 output for software trigger due to problems
- * in differential mode on PCI230/260.
- */
- conv = PCI230_ADC_TRIG_Z2CT2;
- break;
- }
- devpriv->adccon = (devpriv->adccon & ~PCI230_ADC_TRIG_MASK) | conv;
- outw(devpriv->adccon, devpriv->daqio + PCI230_ADCCON);
- if (cmd->convert_src == TRIG_INT)
- async->inttrig = pci230_ai_inttrig_convert;
-
- /*
- * Update FIFO interrupt trigger level, which is currently
- * set to "full".
- */
- pci230_ai_update_fifo_trigger_level(dev, s);
- if (cmd->convert_src == TRIG_TIMER) {
- /* Update timer gates. */
- unsigned char zgat;
-
- if (cmd->scan_begin_src != TRIG_FOLLOW) {
- /*
- * Conversion timer CT2 needs to be gated by
- * inverted output of monostable CT2.
- */
- zgat = GAT_CONFIG(2, GAT_NOUTNM2);
- } else {
- /*
- * Conversion timer CT2 needs to be gated on
- * continuously.
- */
- zgat = GAT_CONFIG(2, GAT_VCC);
- }
- outb(zgat, dev->iobase + PCI230_ZGAT_SCE);
- if (cmd->scan_begin_src != TRIG_FOLLOW) {
- /* Set monostable CT0 trigger source. */
- switch (cmd->scan_begin_src) {
- default:
- zgat = GAT_CONFIG(0, GAT_VCC);
- break;
- case TRIG_EXT:
- /*
- * For CT0 on PCI230, the external trigger
- * (gate) signal comes from PPC0, which is
- * channel 16 of the DIO subdevice. The
- * application needs to configure this as an
- * input in order to use it as an external scan
- * trigger.
- */
- zgat = GAT_CONFIG(0, GAT_EXT);
- break;
- case TRIG_TIMER:
- /*
- * Monostable CT0 triggered by rising edge on
- * inverted output of CT1 (falling edge on CT1).
- */
- zgat = GAT_CONFIG(0, GAT_NOUTNM2);
- break;
- case TRIG_INT:
- /*
- * Monostable CT0 is triggered by inttrig
- * function waggling the CT0 gate source.
- */
- zgat = GAT_CONFIG(0, GAT_VCC);
- break;
- }
- outb(zgat, dev->iobase + PCI230_ZGAT_SCE);
- switch (cmd->scan_begin_src) {
- case TRIG_TIMER:
- /*
- * Scan period timer CT1 needs to be
- * gated on to start counting.
- */
- zgat = GAT_CONFIG(1, GAT_VCC);
- outb(zgat, dev->iobase + PCI230_ZGAT_SCE);
- break;
- case TRIG_INT:
- async->inttrig = pci230_ai_inttrig_scan_begin;
- break;
- }
- }
- } else if (cmd->convert_src != TRIG_INT) {
- /* No longer need Z2-CT2. */
- pci230_release_shared(dev, RES_Z2CT2, OWNER_AICMD);
- }
-}
-
-static int pci230_ai_inttrig_start(struct comedi_device *dev,
- struct comedi_subdevice *s,
- unsigned int trig_num)
-{
- struct comedi_cmd *cmd = &s->async->cmd;
-
- if (trig_num != cmd->start_arg)
- return -EINVAL;
-
- s->async->inttrig = NULL;
- pci230_ai_start(dev, s);
-
- return 1;
-}
-
-static void pci230_handle_ai(struct comedi_device *dev,
- struct comedi_subdevice *s)
-{
- struct pci230_private *devpriv = dev->private;
- struct comedi_async *async = s->async;
- struct comedi_cmd *cmd = &async->cmd;
- unsigned int status_fifo;
- unsigned int i;
- unsigned int nsamples;
- unsigned int fifoamount;
- unsigned short val;
-
- /* Determine number of samples to read. */
- nsamples = comedi_nsamples_left(s, PCI230_ADC_FIFOLEVEL_HALFFULL);
- if (nsamples == 0)
- return;
-
- fifoamount = 0;
- for (i = 0; i < nsamples; i++) {
- if (fifoamount == 0) {
- /* Read FIFO state. */
- status_fifo = inw(devpriv->daqio + PCI230_ADCCON);
- if (status_fifo & PCI230_ADC_FIFO_FULL_LATCHED) {
- /*
- * Report error otherwise FIFO overruns will go
- * unnoticed by the caller.
- */
- dev_err(dev->class_dev, "AI FIFO overrun\n");
- async->events |= COMEDI_CB_ERROR;
- break;
- } else if (status_fifo & PCI230_ADC_FIFO_EMPTY) {
- /* FIFO empty. */
- break;
- } else if (status_fifo & PCI230_ADC_FIFO_HALF) {
- /* FIFO half full. */
- fifoamount = PCI230_ADC_FIFOLEVEL_HALFFULL;
- } else if (devpriv->hwver > 0) {
- /* Read PCI230+/260+ ADC FIFO level. */
- fifoamount = inw(devpriv->daqio +
- PCI230P_ADCFFLEV);
- if (fifoamount == 0)
- break; /* Shouldn't happen. */
- } else {
- /* FIFO not empty. */
- fifoamount = 1;
- }
- }
-
- val = pci230_ai_read(dev);
- if (!comedi_buf_write_samples(s, &val, 1))
- break;
-
- fifoamount--;
-
- if (cmd->stop_src == TRIG_COUNT &&
- async->scans_done >= cmd->stop_arg) {
- async->events |= COMEDI_CB_EOA;
- break;
- }
- }
-
- /* update FIFO interrupt trigger level if still running */
- if (!(async->events & COMEDI_CB_CANCEL_MASK))
- pci230_ai_update_fifo_trigger_level(dev, s);
-}
-
-static int pci230_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
-{
- struct pci230_private *devpriv = dev->private;
- unsigned int i, chan, range, diff;
- unsigned int res_mask;
- unsigned short adccon, adcen;
- unsigned char zgat;
-
- /* Get the command. */
- struct comedi_async *async = s->async;
- struct comedi_cmd *cmd = &async->cmd;
-
- /*
- * Determine which shared resources are needed.
- */
- res_mask = 0;
- /*
- * Need Z2-CT2 to supply a conversion trigger source at a high
- * logic level, even if not doing timed conversions.
- */
- res_mask |= RES_Z2CT2;
- if (cmd->scan_begin_src != TRIG_FOLLOW) {
- /* Using Z2-CT0 monostable to gate Z2-CT2 conversion timer */
- res_mask |= RES_Z2CT0;
- if (cmd->scan_begin_src == TRIG_TIMER) {
- /* Using Z2-CT1 for scan frequency */
- res_mask |= RES_Z2CT1;
- }
- }
- /* Claim resources. */
- if (!pci230_claim_shared(dev, res_mask, OWNER_AICMD))
- return -EBUSY;
-
- /*
- * Steps:
- * - Set channel scan list.
- * - Set channel gains.
- * - Enable and reset FIFO, specify uni/bip, se/diff, and set
- * start conversion source to point to something at a high logic
- * level (we use the output of counter/timer 2 for this purpose.
- * - PAUSE to allow things to settle down.
- * - Reset the FIFO again because it needs resetting twice and there
- * may have been a false conversion trigger on some versions of
- * PCI230/260 due to the start conversion source being set to a
- * high logic level.
- * - Enable ADC FIFO level interrupt.
- * - Set actual conversion trigger source and FIFO interrupt trigger
- * level.
- * - If convert_src is TRIG_TIMER, set up the timers.
- */
-
- adccon = PCI230_ADC_FIFO_EN;
- adcen = 0;
-
- if (CR_AREF(cmd->chanlist[0]) == AREF_DIFF) {
- /* Differential - all channels must be differential. */
- diff = 1;
- adccon |= PCI230_ADC_IM_DIF;
- } else {
- /* Single ended - all channels must be single-ended. */
- diff = 0;
- adccon |= PCI230_ADC_IM_SE;
- }
-
- range = CR_RANGE(cmd->chanlist[0]);
- devpriv->ai_bipolar = comedi_range_is_bipolar(s, range);
- if (devpriv->ai_bipolar)
- adccon |= PCI230_ADC_IR_BIP;
- else
- adccon |= PCI230_ADC_IR_UNI;
-
- for (i = 0; i < cmd->chanlist_len; i++) {
- unsigned int gainshift;
-
- chan = CR_CHAN(cmd->chanlist[i]);
- range = CR_RANGE(cmd->chanlist[i]);
- if (diff) {
- gainshift = 2 * chan;
- if (devpriv->hwver == 0) {
- /*
- * Original PCI230/260 expects both inputs of
- * the differential channel to be enabled.
- */
- adcen |= 3 << gainshift;
- } else {
- /*
- * PCI230+/260+ expects only one input of the
- * differential channel to be enabled.
- */
- adcen |= 1 << gainshift;
- }
- } else {
- gainshift = chan & ~1;
- adcen |= 1 << chan;
- }
- devpriv->adcg = (devpriv->adcg & ~(3 << gainshift)) |
- (pci230_ai_gain[range] << gainshift);
- }
-
- /* Set channel scan list. */
- outw(adcen, devpriv->daqio + PCI230_ADCEN);
-
- /* Set channel gains. */
- outw(devpriv->adcg, devpriv->daqio + PCI230_ADCG);
-
- /*
- * Set counter/timer 2 output high for use as the initial start
- * conversion source.
- */
- comedi_8254_set_mode(dev->pacer, 2, I8254_MODE1);
-
- /*
- * Temporarily use CT2 output as conversion trigger source and
- * temporarily set FIFO interrupt trigger level to 'full'.
- */
- adccon |= PCI230_ADC_INT_FIFO_FULL | PCI230_ADC_TRIG_Z2CT2;
-
- /*
- * Enable and reset FIFO, specify FIFO trigger level full, specify
- * uni/bip, se/diff, and temporarily set the start conversion source
- * to CT2 output. Note that CT2 output is currently high, and this
- * will produce a false conversion trigger on some versions of the
- * PCI230/260, but that will be dealt with later.
- */
- devpriv->adccon = adccon;
- outw(adccon | PCI230_ADC_FIFO_RESET, devpriv->daqio + PCI230_ADCCON);
-
- /*
- * Delay -
- * Failure to include this will result in the first few channels'-worth
- * of data being corrupt, normally manifesting itself by large negative
- * voltages. It seems the board needs time to settle between the first
- * FIFO reset (above) and the second FIFO reset (below). Setting the
- * channel gains and scan list _before_ the first FIFO reset also
- * helps, though only slightly.
- */
- usleep_range(25, 100);
-
- /* Reset FIFO again. */
- outw(adccon | PCI230_ADC_FIFO_RESET, devpriv->daqio + PCI230_ADCCON);
-
- if (cmd->convert_src == TRIG_TIMER) {
- /*
- * Set up CT2 as conversion timer, but gate it off for now.
- * Note, counter/timer output 2 can be monitored on the
- * connector: PCI230 pin 21, PCI260 pin 18.
- */
- zgat = GAT_CONFIG(2, GAT_GND);
- outb(zgat, dev->iobase + PCI230_ZGAT_SCE);
- /* Set counter/timer 2 to the specified conversion period. */
- pci230_ct_setup_ns_mode(dev, 2, I8254_MODE3, cmd->convert_arg,
- cmd->flags);
- if (cmd->scan_begin_src != TRIG_FOLLOW) {
- /*
- * Set up monostable on CT0 output for scan timing. A
- * rising edge on the trigger (gate) input of CT0 will
- * trigger the monostable, causing its output to go low
- * for the configured period. The period depends on
- * the conversion period and the number of conversions
- * in the scan.
- *
- * Set the trigger high before setting up the
- * monostable to stop it triggering. The trigger
- * source will be changed later.
- */
- zgat = GAT_CONFIG(0, GAT_VCC);
- outb(zgat, dev->iobase + PCI230_ZGAT_SCE);
- pci230_ct_setup_ns_mode(dev, 0, I8254_MODE1,
- ((uint64_t)cmd->convert_arg *
- cmd->scan_end_arg),
- CMDF_ROUND_UP);
- if (cmd->scan_begin_src == TRIG_TIMER) {
- /*
- * Monostable on CT0 will be triggered by
- * output of CT1 at configured scan frequency.
- *
- * Set up CT1 but gate it off for now.
- */
- zgat = GAT_CONFIG(1, GAT_GND);
- outb(zgat, dev->iobase + PCI230_ZGAT_SCE);
- pci230_ct_setup_ns_mode(dev, 1, I8254_MODE3,
- cmd->scan_begin_arg,
- cmd->flags);
- }
- }
- }
-
- if (cmd->start_src == TRIG_INT)
- s->async->inttrig = pci230_ai_inttrig_start;
- else /* TRIG_NOW */
- pci230_ai_start(dev, s);
-
- return 0;
-}
-
-static int pci230_ai_cancel(struct comedi_device *dev,
- struct comedi_subdevice *s)
-{
- pci230_ai_stop(dev, s);
- return 0;
-}
-
-/* Interrupt handler */
-static irqreturn_t pci230_interrupt(int irq, void *d)
-{
- unsigned char status_int, valid_status_int, temp_ier;
- struct comedi_device *dev = (struct comedi_device *)d;
- struct pci230_private *devpriv = dev->private;
- struct comedi_subdevice *s_ao = dev->write_subdev;
- struct comedi_subdevice *s_ai = dev->read_subdev;
- unsigned long irqflags;
-
- /* Read interrupt status/enable register. */
- status_int = inb(dev->iobase + PCI230_INT_STAT);
-
- if (status_int == PCI230_INT_DISABLE)
- return IRQ_NONE;
-
- spin_lock_irqsave(&devpriv->isr_spinlock, irqflags);
- valid_status_int = devpriv->ier & status_int;
- /*
- * Disable triggered interrupts.
- * (Only those interrupts that need re-enabling, are, later in the
- * handler).
- */
- temp_ier = devpriv->ier & ~status_int;
- outb(temp_ier, dev->iobase + PCI230_INT_SCE);
- devpriv->intr_running = true;
- devpriv->intr_cpuid = THISCPU;
- spin_unlock_irqrestore(&devpriv->isr_spinlock, irqflags);
-
- /*
- * Check the source of interrupt and handle it.
- * The PCI230 can cope with concurrent ADC, DAC, PPI C0 and C3
- * interrupts. However, at present (Comedi-0.7.60) does not allow
- * concurrent execution of commands, instructions or a mixture of the
- * two.
- */
-
- if (valid_status_int & PCI230_INT_ZCLK_CT1)
- pci230_handle_ao_nofifo(dev, s_ao);
-
- if (valid_status_int & PCI230P2_INT_DAC)
- pci230_handle_ao_fifo(dev, s_ao);
-
- if (valid_status_int & PCI230_INT_ADC)
- pci230_handle_ai(dev, s_ai);
-
- /* Reenable interrupts. */
- spin_lock_irqsave(&devpriv->isr_spinlock, irqflags);
- if (devpriv->ier != temp_ier)
- outb(devpriv->ier, dev->iobase + PCI230_INT_SCE);
- devpriv->intr_running = false;
- spin_unlock_irqrestore(&devpriv->isr_spinlock, irqflags);
-
- comedi_handle_events(dev, s_ao);
- comedi_handle_events(dev, s_ai);
-
- return IRQ_HANDLED;
-}
-
-/* Check if PCI device matches a specific board. */
-static bool pci230_match_pci_board(const struct pci230_board *board,
- struct pci_dev *pci_dev)
-{
- /* assume pci_dev->device != PCI_DEVICE_ID_INVALID */
- if (board->id != pci_dev->device)
- return false;
- if (board->min_hwver == 0)
- return true;
- /* Looking for a '+' model. First check length of registers. */
- if (pci_resource_len(pci_dev, 3) < 32)
- return false; /* Not a '+' model. */
- /*
- * TODO: temporarily enable PCI device and read the hardware version
- * register. For now, assume it's okay.
- */
- return true;
-}
-
-/* Look for board matching PCI device. */
-static const struct pci230_board *pci230_find_pci_board(struct pci_dev *pci_dev)
-{
- unsigned int i;
-
- for (i = 0; i < ARRAY_SIZE(pci230_boards); i++)
- if (pci230_match_pci_board(&pci230_boards[i], pci_dev))
- return &pci230_boards[i];
- return NULL;
-}
-
-static int pci230_auto_attach(struct comedi_device *dev,
- unsigned long context_unused)
-{
- struct pci_dev *pci_dev = comedi_to_pci_dev(dev);
- const struct pci230_board *board;
- struct pci230_private *devpriv;
- struct comedi_subdevice *s;
- int rc;
-
- dev_info(dev->class_dev, "amplc_pci230: attach pci %s\n",
- pci_name(pci_dev));
-
- devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
- if (!devpriv)
- return -ENOMEM;
-
- spin_lock_init(&devpriv->isr_spinlock);
- spin_lock_init(&devpriv->res_spinlock);
- spin_lock_init(&devpriv->ai_stop_spinlock);
- spin_lock_init(&devpriv->ao_stop_spinlock);
-
- board = pci230_find_pci_board(pci_dev);
- if (!board) {
- dev_err(dev->class_dev,
- "amplc_pci230: BUG! cannot determine board type!\n");
- return -EINVAL;
- }
- dev->board_ptr = board;
- dev->board_name = board->name;
-
- rc = comedi_pci_enable(dev);
- if (rc)
- return rc;
-
- /*
- * Read base addresses of the PCI230's two I/O regions from PCI
- * configuration register.
- */
- dev->iobase = pci_resource_start(pci_dev, 2);
- devpriv->daqio = pci_resource_start(pci_dev, 3);
- dev_dbg(dev->class_dev,
- "%s I/O region 1 0x%04lx I/O region 2 0x%04lx\n",
- dev->board_name, dev->iobase, devpriv->daqio);
- /* Read bits of DACCON register - only the output range. */
- devpriv->daccon = inw(devpriv->daqio + PCI230_DACCON) &
- PCI230_DAC_OR_MASK;
- /*
- * Read hardware version register and set extended function register
- * if they exist.
- */
- if (pci_resource_len(pci_dev, 3) >= 32) {
- unsigned short extfunc = 0;
-
- devpriv->hwver = inw(devpriv->daqio + PCI230P_HWVER);
- if (devpriv->hwver < board->min_hwver) {
- dev_err(dev->class_dev,
- "%s - bad hardware version - got %u, need %u\n",
- dev->board_name, devpriv->hwver,
- board->min_hwver);
- return -EIO;
- }
- if (devpriv->hwver > 0) {
- if (!board->have_dio) {
- /*
- * No DIO ports. Route counters' external gates
- * to the EXTTRIG signal (PCI260+ pin 17).
- * (Otherwise, they would be routed to DIO
- * inputs PC0, PC1 and PC2 which don't exist
- * on PCI260[+].)
- */
- extfunc |= PCI230P_EXTFUNC_GAT_EXTTRIG;
- }
- if (board->ao_bits && devpriv->hwver >= 2) {
- /* Enable DAC FIFO functionality. */
- extfunc |= PCI230P2_EXTFUNC_DACFIFO;
- }
- }
- outw(extfunc, devpriv->daqio + PCI230P_EXTFUNC);
- if (extfunc & PCI230P2_EXTFUNC_DACFIFO) {
- /*
- * Temporarily enable DAC FIFO, reset it and disable
- * FIFO wraparound.
- */
- outw(devpriv->daccon | PCI230P2_DAC_FIFO_EN |
- PCI230P2_DAC_FIFO_RESET,
- devpriv->daqio + PCI230_DACCON);
- /* Clear DAC FIFO channel enable register. */
- outw(0, devpriv->daqio + PCI230P2_DACEN);
- /* Disable DAC FIFO. */
- outw(devpriv->daccon, devpriv->daqio + PCI230_DACCON);
- }
- }
- /* Disable board's interrupts. */
- outb(0, dev->iobase + PCI230_INT_SCE);
- /* Set ADC to a reasonable state. */
- devpriv->adcg = 0;
- devpriv->adccon = PCI230_ADC_TRIG_NONE | PCI230_ADC_IM_SE |
- PCI230_ADC_IR_BIP;
- outw(1 << 0, devpriv->daqio + PCI230_ADCEN);
- outw(devpriv->adcg, devpriv->daqio + PCI230_ADCG);
- outw(devpriv->adccon | PCI230_ADC_FIFO_RESET,
- devpriv->daqio + PCI230_ADCCON);
-
- if (pci_dev->irq) {
- rc = request_irq(pci_dev->irq, pci230_interrupt, IRQF_SHARED,
- dev->board_name, dev);
- if (rc == 0)
- dev->irq = pci_dev->irq;
- }
-
- dev->pacer = comedi_8254_init(dev->iobase + PCI230_Z2_CT_BASE,
- 0, I8254_IO8, 0);
- if (!dev->pacer)
- return -ENOMEM;
-
- rc = comedi_alloc_subdevices(dev, 3);
- if (rc)
- return rc;
-
- s = &dev->subdevices[0];
- /* analog input subdevice */
- s->type = COMEDI_SUBD_AI;
- s->subdev_flags = SDF_READABLE | SDF_DIFF | SDF_GROUND;
- s->n_chan = 16;
- s->maxdata = (1 << board->ai_bits) - 1;
- s->range_table = &pci230_ai_range;
- s->insn_read = pci230_ai_insn_read;
- s->len_chanlist = 256; /* but there are restrictions. */
- if (dev->irq) {
- dev->read_subdev = s;
- s->subdev_flags |= SDF_CMD_READ;
- s->do_cmd = pci230_ai_cmd;
- s->do_cmdtest = pci230_ai_cmdtest;
- s->cancel = pci230_ai_cancel;
- }
-
- s = &dev->subdevices[1];
- /* analog output subdevice */
- if (board->ao_bits) {
- s->type = COMEDI_SUBD_AO;
- s->subdev_flags = SDF_WRITABLE | SDF_GROUND;
- s->n_chan = 2;
- s->maxdata = (1 << board->ao_bits) - 1;
- s->range_table = &pci230_ao_range;
- s->insn_write = pci230_ao_insn_write;
- s->len_chanlist = 2;
- if (dev->irq) {
- dev->write_subdev = s;
- s->subdev_flags |= SDF_CMD_WRITE;
- s->do_cmd = pci230_ao_cmd;
- s->do_cmdtest = pci230_ao_cmdtest;
- s->cancel = pci230_ao_cancel;
- }
-
- rc = comedi_alloc_subdev_readback(s);
- if (rc)
- return rc;
- } else {
- s->type = COMEDI_SUBD_UNUSED;
- }
-
- s = &dev->subdevices[2];
- /* digital i/o subdevice */
- if (board->have_dio) {
- rc = subdev_8255_init(dev, s, NULL, PCI230_PPI_X_BASE);
- if (rc)
- return rc;
- } else {
- s->type = COMEDI_SUBD_UNUSED;
- }
-
- return 0;
-}
-
-static struct comedi_driver amplc_pci230_driver = {
- .driver_name = "amplc_pci230",
- .module = THIS_MODULE,
- .auto_attach = pci230_auto_attach,
- .detach = comedi_pci_detach,
-};
-
-static int amplc_pci230_pci_probe(struct pci_dev *dev,
- const struct pci_device_id *id)
-{
- return comedi_pci_auto_config(dev, &amplc_pci230_driver,
- id->driver_data);
-}
-
-static const struct pci_device_id amplc_pci230_pci_table[] = {
- { PCI_DEVICE(PCI_VENDOR_ID_AMPLICON, PCI_DEVICE_ID_PCI230) },
- { PCI_DEVICE(PCI_VENDOR_ID_AMPLICON, PCI_DEVICE_ID_PCI260) },
- { 0 }
-};
-MODULE_DEVICE_TABLE(pci, amplc_pci230_pci_table);
-
-static struct pci_driver amplc_pci230_pci_driver = {
- .name = "amplc_pci230",
- .id_table = amplc_pci230_pci_table,
- .probe = amplc_pci230_pci_probe,
- .remove = comedi_pci_auto_unconfig,
-};
-module_comedi_pci_driver(amplc_pci230_driver, amplc_pci230_pci_driver);
-
-MODULE_AUTHOR("Comedi http://www.comedi.org");
-MODULE_DESCRIPTION("Comedi driver for Amplicon PCI230(+) and PCI260(+)");
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/amplc_pci236.c b/drivers/staging/comedi/drivers/amplc_pci236.c
deleted file mode 100644
index 31cc38b4bcad..000000000000
--- a/drivers/staging/comedi/drivers/amplc_pci236.c
+++ /dev/null
@@ -1,153 +0,0 @@
-/*
- * comedi/drivers/amplc_pci236.c
- * Driver for Amplicon PCI236 DIO boards.
- *
- * Copyright (C) 2002-2014 MEV Ltd. <http://www.mev.co.uk/>
- *
- * COMEDI - Linux Control and Measurement Device Interface
- * Copyright (C) 2000 David A. Schleef <ds@schleef.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-/*
- * Driver: amplc_pci236
- * Description: Amplicon PCI236
- * Author: Ian Abbott <abbotti@mev.co.uk>
- * Devices: [Amplicon] PCI236 (amplc_pci236)
- * Updated: Fri, 25 Jul 2014 15:32:40 +0000
- * Status: works
- *
- * Configuration options:
- * none
- *
- * Manual configuration of PCI board (PCI236) is not supported; it is
- * configured automatically.
- *
- * The PCI236 board has a single 8255 appearing as subdevice 0.
- *
- * Subdevice 1 pretends to be a digital input device, but it always
- * returns 0 when read. However, if you run a command with
- * scan_begin_src=TRIG_EXT, a rising edge on port C bit 3 acts as an
- * external trigger, which can be used to wake up tasks. This is like
- * the comedi_parport device. If no interrupt is connected, then
- * subdevice 1 is unused.
- */
-
-#include <linux/module.h>
-#include <linux/interrupt.h>
-
-#include "../comedi_pci.h"
-
-#include "amplc_pc236.h"
-#include "plx9052.h"
-
-/* Disable, and clear, interrupts */
-#define PCI236_INTR_DISABLE (PLX9052_INTCSR_LI1POL | \
- PLX9052_INTCSR_LI2POL | \
- PLX9052_INTCSR_LI1SEL | \
- PLX9052_INTCSR_LI1CLRINT)
-
-/* Enable, and clear, interrupts */
-#define PCI236_INTR_ENABLE (PLX9052_INTCSR_LI1ENAB | \
- PLX9052_INTCSR_LI1POL | \
- PLX9052_INTCSR_LI2POL | \
- PLX9052_INTCSR_PCIENAB | \
- PLX9052_INTCSR_LI1SEL | \
- PLX9052_INTCSR_LI1CLRINT)
-
-static void pci236_intr_update_cb(struct comedi_device *dev, bool enable)
-{
- struct pc236_private *devpriv = dev->private;
-
- /* this will also clear the "local interrupt 1" latch */
- outl(enable ? PCI236_INTR_ENABLE : PCI236_INTR_DISABLE,
- devpriv->lcr_iobase + PLX9052_INTCSR);
-}
-
-static bool pci236_intr_chk_clr_cb(struct comedi_device *dev)
-{
- struct pc236_private *devpriv = dev->private;
-
- /* check if interrupt occurred */
- if (!(inl(devpriv->lcr_iobase + PLX9052_INTCSR) &
- PLX9052_INTCSR_LI1STAT))
- return false;
- /* clear the interrupt */
- pci236_intr_update_cb(dev, devpriv->enable_irq);
- return true;
-}
-
-static const struct pc236_board pc236_pci_board = {
- .name = "pci236",
- .intr_update_cb = pci236_intr_update_cb,
- .intr_chk_clr_cb = pci236_intr_chk_clr_cb,
-};
-
-static int pci236_auto_attach(struct comedi_device *dev,
- unsigned long context_unused)
-{
- struct pci_dev *pci_dev = comedi_to_pci_dev(dev);
- struct pc236_private *devpriv;
- unsigned long iobase;
- int ret;
-
- dev_info(dev->class_dev, "amplc_pci236: attach pci %s\n",
- pci_name(pci_dev));
-
- devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
- if (!devpriv)
- return -ENOMEM;
-
- dev->board_ptr = &pc236_pci_board;
- dev->board_name = pc236_pci_board.name;
- ret = comedi_pci_enable(dev);
- if (ret)
- return ret;
-
- devpriv->lcr_iobase = pci_resource_start(pci_dev, 1);
- iobase = pci_resource_start(pci_dev, 2);
- return amplc_pc236_common_attach(dev, iobase, pci_dev->irq,
- IRQF_SHARED);
-}
-
-static struct comedi_driver amplc_pci236_driver = {
- .driver_name = "amplc_pci236",
- .module = THIS_MODULE,
- .auto_attach = pci236_auto_attach,
- .detach = comedi_pci_detach,
-};
-
-static const struct pci_device_id pci236_pci_table[] = {
- { PCI_DEVICE(PCI_VENDOR_ID_AMPLICON, 0x0009) },
- { 0 }
-};
-
-MODULE_DEVICE_TABLE(pci, pci236_pci_table);
-
-static int amplc_pci236_pci_probe(struct pci_dev *dev,
- const struct pci_device_id *id)
-{
- return comedi_pci_auto_config(dev, &amplc_pci236_driver,
- id->driver_data);
-}
-
-static struct pci_driver amplc_pci236_pci_driver = {
- .name = "amplc_pci236",
- .id_table = pci236_pci_table,
- .probe = &amplc_pci236_pci_probe,
- .remove = comedi_pci_auto_unconfig,
-};
-
-module_comedi_pci_driver(amplc_pci236_driver, amplc_pci236_pci_driver);
-
-MODULE_AUTHOR("Comedi http://www.comedi.org");
-MODULE_DESCRIPTION("Comedi driver for Amplicon PCI236 DIO boards");
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/amplc_pci263.c b/drivers/staging/comedi/drivers/amplc_pci263.c
deleted file mode 100644
index b6768aa90547..000000000000
--- a/drivers/staging/comedi/drivers/amplc_pci263.c
+++ /dev/null
@@ -1,114 +0,0 @@
-/*
- comedi/drivers/amplc_pci263.c
- Driver for Amplicon PCI263 relay board.
-
- Copyright (C) 2002 MEV Ltd. <http://www.mev.co.uk/>
-
- COMEDI - Linux Control and Measurement Device Interface
- Copyright (C) 2000 David A. Schleef <ds@schleef.org>
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-*/
-/*
-Driver: amplc_pci263
-Description: Amplicon PCI263
-Author: Ian Abbott <abbotti@mev.co.uk>
-Devices: [Amplicon] PCI263 (amplc_pci263)
-Updated: Fri, 12 Apr 2013 15:19:36 +0100
-Status: works
-
-Configuration options: not applicable, uses PCI auto config
-
-The board appears as one subdevice, with 16 digital outputs, each
-connected to a reed-relay. Relay contacts are closed when output is 1.
-The state of the outputs can be read.
-*/
-
-#include <linux/module.h>
-
-#include "../comedi_pci.h"
-
-static int pci263_do_insn_bits(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- if (comedi_dio_update_state(s, data)) {
- outb(s->state & 0xff, dev->iobase);
- outb((s->state >> 8) & 0xff, dev->iobase + 1);
- }
-
- data[1] = s->state;
-
- return insn->n;
-}
-
-static int pci263_auto_attach(struct comedi_device *dev,
- unsigned long context_unused)
-{
- struct pci_dev *pci_dev = comedi_to_pci_dev(dev);
- struct comedi_subdevice *s;
- int ret;
-
- ret = comedi_pci_enable(dev);
- if (ret)
- return ret;
-
- dev->iobase = pci_resource_start(pci_dev, 2);
- ret = comedi_alloc_subdevices(dev, 1);
- if (ret)
- return ret;
-
- s = &dev->subdevices[0];
- /* digital output subdevice */
- s->type = COMEDI_SUBD_DO;
- s->subdev_flags = SDF_WRITABLE;
- s->n_chan = 16;
- s->maxdata = 1;
- s->range_table = &range_digital;
- s->insn_bits = pci263_do_insn_bits;
- /* read initial relay state */
- s->state = inb(dev->iobase) | (inb(dev->iobase + 1) << 8);
-
- return 0;
-}
-
-static struct comedi_driver amplc_pci263_driver = {
- .driver_name = "amplc_pci263",
- .module = THIS_MODULE,
- .auto_attach = pci263_auto_attach,
- .detach = comedi_pci_detach,
-};
-
-static const struct pci_device_id pci263_pci_table[] = {
- { PCI_DEVICE(PCI_VENDOR_ID_AMPLICON, 0x000c) },
- {0}
-};
-MODULE_DEVICE_TABLE(pci, pci263_pci_table);
-
-static int amplc_pci263_pci_probe(struct pci_dev *dev,
- const struct pci_device_id *id)
-{
- return comedi_pci_auto_config(dev, &amplc_pci263_driver,
- id->driver_data);
-}
-
-static struct pci_driver amplc_pci263_pci_driver = {
- .name = "amplc_pci263",
- .id_table = pci263_pci_table,
- .probe = &amplc_pci263_pci_probe,
- .remove = comedi_pci_auto_unconfig,
-};
-module_comedi_pci_driver(amplc_pci263_driver, amplc_pci263_pci_driver);
-
-MODULE_AUTHOR("Comedi http://www.comedi.org");
-MODULE_DESCRIPTION("Comedi driver for Amplicon PCI263 relay board");
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/c6xdigio.c b/drivers/staging/comedi/drivers/c6xdigio.c
deleted file mode 100644
index 1a109e30d8ff..000000000000
--- a/drivers/staging/comedi/drivers/c6xdigio.c
+++ /dev/null
@@ -1,307 +0,0 @@
-/*
- * c6xdigio.c
- * Hardware driver for Mechatronic Systems Inc. C6x_DIGIO DSP daughter card.
- * http://web.archive.org/web/%2A/http://robot0.ge.uiuc.edu/~spong/mecha/
- *
- * COMEDI - Linux Control and Measurement Device Interface
- * Copyright (C) 1999 Dan Block
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-/*
- * Driver: c6xdigio
- * Description: Mechatronic Systems Inc. C6x_DIGIO DSP daughter card
- * Author: Dan Block
- * Status: unknown
- * Devices: [Mechatronic Systems Inc.] C6x_DIGIO DSP daughter card (c6xdigio)
- * Updated: Sun Nov 20 20:18:34 EST 2005
- *
- * Configuration Options:
- * [0] - base address
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/sched.h>
-#include <linux/mm.h>
-#include <linux/errno.h>
-#include <linux/interrupt.h>
-#include <linux/timex.h>
-#include <linux/timer.h>
-#include <linux/io.h>
-#include <linux/pnp.h>
-
-#include "../comedidev.h"
-
-/*
- * Register I/O map
- */
-#define C6XDIGIO_DATA_REG 0x00
-#define C6XDIGIO_DATA_CHAN(x) (((x) + 1) << 4)
-#define C6XDIGIO_DATA_PWM (1 << 5)
-#define C6XDIGIO_DATA_ENCODER (1 << 6)
-#define C6XDIGIO_STATUS_REG 0x01
-#define C6XDIGIO_CTRL_REG 0x02
-
-#define C6XDIGIO_TIME_OUT 20
-
-static int c6xdigio_chk_status(struct comedi_device *dev, unsigned long context)
-{
- unsigned int status;
- int timeout = 0;
-
- do {
- status = inb(dev->iobase + C6XDIGIO_STATUS_REG);
- if ((status & 0x80) != context)
- return 0;
- timeout++;
- } while (timeout < C6XDIGIO_TIME_OUT);
-
- return -EBUSY;
-}
-
-static int c6xdigio_write_data(struct comedi_device *dev,
- unsigned int val, unsigned int status)
-{
- outb_p(val, dev->iobase + C6XDIGIO_DATA_REG);
- return c6xdigio_chk_status(dev, status);
-}
-
-static int c6xdigio_get_encoder_bits(struct comedi_device *dev,
- unsigned int *bits,
- unsigned int cmd,
- unsigned int status)
-{
- unsigned int val;
-
- val = inb(dev->iobase + C6XDIGIO_STATUS_REG);
- val >>= 3;
- val &= 0x07;
-
- *bits = val;
-
- return c6xdigio_write_data(dev, cmd, status);
-}
-
-static void c6xdigio_pwm_write(struct comedi_device *dev,
- unsigned int chan, unsigned int val)
-{
- unsigned int cmd = C6XDIGIO_DATA_PWM | C6XDIGIO_DATA_CHAN(chan);
- unsigned int bits;
-
- if (val > 498)
- val = 498;
- if (val < 2)
- val = 2;
-
- bits = (val >> 0) & 0x03;
- c6xdigio_write_data(dev, cmd | bits | (0 << 2), 0x00);
- bits = (val >> 2) & 0x03;
- c6xdigio_write_data(dev, cmd | bits | (1 << 2), 0x80);
- bits = (val >> 4) & 0x03;
- c6xdigio_write_data(dev, cmd | bits | (0 << 2), 0x00);
- bits = (val >> 6) & 0x03;
- c6xdigio_write_data(dev, cmd | bits | (1 << 2), 0x80);
- bits = (val >> 8) & 0x03;
- c6xdigio_write_data(dev, cmd | bits | (0 << 2), 0x00);
-
- c6xdigio_write_data(dev, 0x00, 0x80);
-}
-
-static int c6xdigio_encoder_read(struct comedi_device *dev,
- unsigned int chan)
-{
- unsigned int cmd = C6XDIGIO_DATA_ENCODER | C6XDIGIO_DATA_CHAN(chan);
- unsigned int val = 0;
- unsigned int bits;
-
- c6xdigio_write_data(dev, cmd, 0x00);
-
- c6xdigio_get_encoder_bits(dev, &bits, cmd | (1 << 2), 0x80);
- val |= (bits << 0);
-
- c6xdigio_get_encoder_bits(dev, &bits, cmd | (0 << 2), 0x00);
- val |= (bits << 3);
-
- c6xdigio_get_encoder_bits(dev, &bits, cmd | (1 << 2), 0x80);
- val |= (bits << 6);
-
- c6xdigio_get_encoder_bits(dev, &bits, cmd | (0 << 2), 0x00);
- val |= (bits << 9);
-
- c6xdigio_get_encoder_bits(dev, &bits, cmd | (1 << 2), 0x80);
- val |= (bits << 12);
-
- c6xdigio_get_encoder_bits(dev, &bits, cmd | (0 << 2), 0x00);
- val |= (bits << 15);
-
- c6xdigio_get_encoder_bits(dev, &bits, cmd | (1 << 2), 0x80);
- val |= (bits << 18);
-
- c6xdigio_get_encoder_bits(dev, &bits, cmd | (0 << 2), 0x00);
- val |= (bits << 21);
-
- c6xdigio_write_data(dev, 0x00, 0x80);
-
- return val;
-}
-
-static int c6xdigio_pwm_insn_write(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- unsigned int chan = CR_CHAN(insn->chanspec);
- unsigned int val = (s->state >> (16 * chan)) & 0xffff;
- int i;
-
- for (i = 0; i < insn->n; i++) {
- val = data[i];
- c6xdigio_pwm_write(dev, chan, val);
- }
-
- /*
- * There are only 2 PWM channels and they have a maxdata of 500.
- * Instead of allocating private data to save the values in for
- * readback this driver just packs the values for the two channels
- * in the s->state.
- */
- s->state &= (0xffff << (16 * chan));
- s->state |= (val << (16 * chan));
-
- return insn->n;
-}
-
-static int c6xdigio_pwm_insn_read(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- unsigned int chan = CR_CHAN(insn->chanspec);
- unsigned int val;
- int i;
-
- val = (s->state >> (16 * chan)) & 0xffff;
-
- for (i = 0; i < insn->n; i++)
- data[i] = val;
-
- return insn->n;
-}
-
-static int c6xdigio_encoder_insn_read(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- unsigned int chan = CR_CHAN(insn->chanspec);
- unsigned int val;
- int i;
-
- for (i = 0; i < insn->n; i++) {
- val = c6xdigio_encoder_read(dev, chan);
-
- /* munge two's complement value to offset binary */
- data[i] = comedi_offset_munge(s, val);
- }
-
- return insn->n;
-}
-
-static void c6xdigio_init(struct comedi_device *dev)
-{
- /* Initialize the PWM */
- c6xdigio_write_data(dev, 0x70, 0x00);
- c6xdigio_write_data(dev, 0x74, 0x80);
- c6xdigio_write_data(dev, 0x70, 0x00);
- c6xdigio_write_data(dev, 0x00, 0x80);
-
- /* Reset the encoders */
- c6xdigio_write_data(dev, 0x68, 0x00);
- c6xdigio_write_data(dev, 0x6c, 0x80);
- c6xdigio_write_data(dev, 0x68, 0x00);
- c6xdigio_write_data(dev, 0x00, 0x80);
-}
-
-static const struct pnp_device_id c6xdigio_pnp_tbl[] = {
- /* Standard LPT Printer Port */
- {.id = "PNP0400", .driver_data = 0},
- /* ECP Printer Port */
- {.id = "PNP0401", .driver_data = 0},
- {}
-};
-
-static struct pnp_driver c6xdigio_pnp_driver = {
- .name = "c6xdigio",
- .id_table = c6xdigio_pnp_tbl,
-};
-
-static int c6xdigio_attach(struct comedi_device *dev,
- struct comedi_devconfig *it)
-{
- struct comedi_subdevice *s;
- int ret;
-
- ret = comedi_request_region(dev, it->options[0], 0x03);
- if (ret)
- return ret;
-
- ret = comedi_alloc_subdevices(dev, 2);
- if (ret)
- return ret;
-
- /* Make sure that PnP ports get activated */
- pnp_register_driver(&c6xdigio_pnp_driver);
-
- s = &dev->subdevices[0];
- /* pwm output subdevice */
- s->type = COMEDI_SUBD_PWM;
- s->subdev_flags = SDF_WRITABLE;
- s->n_chan = 2;
- s->maxdata = 500;
- s->range_table = &range_unknown;
- s->insn_write = c6xdigio_pwm_insn_write;
- s->insn_read = c6xdigio_pwm_insn_read;
-
- s = &dev->subdevices[1];
- /* encoder (counter) subdevice */
- s->type = COMEDI_SUBD_COUNTER;
- s->subdev_flags = SDF_READABLE | SDF_LSAMPL;
- s->n_chan = 2;
- s->maxdata = 0xffffff;
- s->range_table = &range_unknown;
- s->insn_read = c6xdigio_encoder_insn_read;
-
- /* I will call this init anyway but more than likely the DSP board */
- /* will not be connected when device driver is loaded. */
- c6xdigio_init(dev);
-
- return 0;
-}
-
-static void c6xdigio_detach(struct comedi_device *dev)
-{
- comedi_legacy_detach(dev);
- pnp_unregister_driver(&c6xdigio_pnp_driver);
-}
-
-static struct comedi_driver c6xdigio_driver = {
- .driver_name = "c6xdigio",
- .module = THIS_MODULE,
- .attach = c6xdigio_attach,
- .detach = c6xdigio_detach,
-};
-module_comedi_driver(c6xdigio_driver);
-
-MODULE_AUTHOR("Comedi http://www.comedi.org");
-MODULE_DESCRIPTION("Comedi driver for the C6x_DIGIO DSP daughter card");
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/cb_das16_cs.c b/drivers/staging/comedi/drivers/cb_das16_cs.c
deleted file mode 100644
index ae84f2c0cc9c..000000000000
--- a/drivers/staging/comedi/drivers/cb_das16_cs.c
+++ /dev/null
@@ -1,355 +0,0 @@
-/*
- comedi/drivers/das16cs.c
- Driver for Computer Boards PC-CARD DAS16/16.
-
- COMEDI - Linux Control and Measurement Device Interface
- Copyright (C) 2000, 2001, 2002 David A. Schleef <ds@schleef.org>
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- PCMCIA support code for this driver is adapted from the dummy_cs.c
- driver of the Linux PCMCIA Card Services package.
-
- The initial developer of the original code is David A. Hinds
- <dahinds@users.sourceforge.net>. Portions created by David A. Hinds
- are Copyright (C) 1999 David A. Hinds. All Rights Reserved.
-
-*/
-/*
-Driver: cb_das16_cs
-Description: Computer Boards PC-CARD DAS16/16
-Devices: [ComputerBoards] PC-CARD DAS16/16 (cb_das16_cs), PC-CARD DAS16/16-AO
-Author: ds
-Updated: Mon, 04 Nov 2002 20:04:21 -0800
-Status: experimental
-*/
-
-#include <linux/module.h>
-#include <linux/interrupt.h>
-#include <linux/delay.h>
-
-#include "../comedi_pcmcia.h"
-
-#include "comedi_8254.h"
-
-#define DAS16CS_ADC_DATA 0
-#define DAS16CS_DIO_MUX 2
-#define DAS16CS_MISC1 4
-#define DAS16CS_MISC2 6
-#define DAS16CS_TIMER_BASE 8
-#define DAS16CS_DIO 16
-
-struct das16cs_board {
- const char *name;
- int device_id;
- int n_ao_chans;
-};
-
-static const struct das16cs_board das16cs_boards[] = {
- {
- .name = "PC-CARD DAS16/16-AO",
- .device_id = 0x0039,
- .n_ao_chans = 2,
- }, {
- .name = "PCM-DAS16s/16",
- .device_id = 0x4009,
- .n_ao_chans = 0,
- }, {
- .name = "PC-CARD DAS16/16",
- .device_id = 0x0000, /* unknown */
- .n_ao_chans = 0,
- },
-};
-
-struct das16cs_private {
- unsigned short status1;
- unsigned short status2;
-};
-
-static const struct comedi_lrange das16cs_ai_range = {
- 4, {
- BIP_RANGE(10),
- BIP_RANGE(5),
- BIP_RANGE(2.5),
- BIP_RANGE(1.25),
- }
-};
-
-static int das16cs_ai_eoc(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned long context)
-{
- unsigned int status;
-
- status = inw(dev->iobase + DAS16CS_MISC1);
- if (status & 0x0080)
- return 0;
- return -EBUSY;
-}
-
-static int das16cs_ai_rinsn(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
-{
- struct das16cs_private *devpriv = dev->private;
- int chan = CR_CHAN(insn->chanspec);
- int range = CR_RANGE(insn->chanspec);
- int aref = CR_AREF(insn->chanspec);
- int ret;
- int i;
-
- outw(chan, dev->iobase + DAS16CS_DIO_MUX);
-
- devpriv->status1 &= ~0xf320;
- devpriv->status1 |= (aref == AREF_DIFF) ? 0 : 0x0020;
- outw(devpriv->status1, dev->iobase + DAS16CS_MISC1);
-
- devpriv->status2 &= ~0xff00;
- switch (range) {
- case 0:
- devpriv->status2 |= 0x800;
- break;
- case 1:
- devpriv->status2 |= 0x000;
- break;
- case 2:
- devpriv->status2 |= 0x100;
- break;
- case 3:
- devpriv->status2 |= 0x200;
- break;
- }
- outw(devpriv->status2, dev->iobase + DAS16CS_MISC2);
-
- for (i = 0; i < insn->n; i++) {
- outw(0, dev->iobase + DAS16CS_ADC_DATA);
-
- ret = comedi_timeout(dev, s, insn, das16cs_ai_eoc, 0);
- if (ret)
- return ret;
-
- data[i] = inw(dev->iobase + DAS16CS_ADC_DATA);
- }
-
- return i;
-}
-
-static int das16cs_ao_insn_write(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct das16cs_private *devpriv = dev->private;
- unsigned int chan = CR_CHAN(insn->chanspec);
- unsigned int val = s->readback[chan];
- unsigned short status1;
- int bit;
- int i;
-
- for (i = 0; i < insn->n; i++) {
- val = data[i];
-
- outw(devpriv->status1, dev->iobase + DAS16CS_MISC1);
- udelay(1);
-
- status1 = devpriv->status1 & ~0xf;
- if (chan)
- status1 |= 0x0001;
- else
- status1 |= 0x0008;
-
- outw(status1, dev->iobase + DAS16CS_MISC1);
- udelay(1);
-
- for (bit = 15; bit >= 0; bit--) {
- int b = (val >> bit) & 0x1;
-
- b <<= 1;
- outw(status1 | b | 0x0000, dev->iobase + DAS16CS_MISC1);
- udelay(1);
- outw(status1 | b | 0x0004, dev->iobase + DAS16CS_MISC1);
- udelay(1);
- }
- /*
- * Make both DAC0CS and DAC1CS high to load
- * the new data and update analog the output
- */
- outw(status1 | 0x9, dev->iobase + DAS16CS_MISC1);
- }
- s->readback[chan] = val;
-
- return insn->n;
-}
-
-static int das16cs_dio_insn_bits(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- if (comedi_dio_update_state(s, data))
- outw(s->state, dev->iobase + DAS16CS_DIO);
-
- data[1] = inw(dev->iobase + DAS16CS_DIO);
-
- return insn->n;
-}
-
-static int das16cs_dio_insn_config(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct das16cs_private *devpriv = dev->private;
- unsigned int chan = CR_CHAN(insn->chanspec);
- unsigned int mask;
- int ret;
-
- if (chan < 4)
- mask = 0x0f;
- else
- mask = 0xf0;
-
- ret = comedi_dio_insn_config(dev, s, insn, data, mask);
- if (ret)
- return ret;
-
- devpriv->status2 &= ~0x00c0;
- devpriv->status2 |= (s->io_bits & 0xf0) ? 0x0080 : 0;
- devpriv->status2 |= (s->io_bits & 0x0f) ? 0x0040 : 0;
-
- outw(devpriv->status2, dev->iobase + DAS16CS_MISC2);
-
- return insn->n;
-}
-
-static const void *das16cs_find_boardinfo(struct comedi_device *dev,
- struct pcmcia_device *link)
-{
- const struct das16cs_board *board;
- int i;
-
- for (i = 0; i < ARRAY_SIZE(das16cs_boards); i++) {
- board = &das16cs_boards[i];
- if (board->device_id == link->card_id)
- return board;
- }
-
- return NULL;
-}
-
-static int das16cs_auto_attach(struct comedi_device *dev,
- unsigned long context)
-{
- struct pcmcia_device *link = comedi_to_pcmcia_dev(dev);
- const struct das16cs_board *board;
- struct das16cs_private *devpriv;
- struct comedi_subdevice *s;
- int ret;
-
- board = das16cs_find_boardinfo(dev, link);
- if (!board)
- return -ENODEV;
- dev->board_ptr = board;
- dev->board_name = board->name;
-
- link->config_flags |= CONF_AUTO_SET_IO | CONF_ENABLE_IRQ;
- ret = comedi_pcmcia_enable(dev, NULL);
- if (ret)
- return ret;
- dev->iobase = link->resource[0]->start;
-
- link->priv = dev;
-
- devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
- if (!devpriv)
- return -ENOMEM;
-
- dev->pacer = comedi_8254_init(dev->iobase + DAS16CS_TIMER_BASE,
- I8254_OSC_BASE_10MHZ, I8254_IO16, 0);
- if (!dev->pacer)
- return -ENOMEM;
-
- ret = comedi_alloc_subdevices(dev, 3);
- if (ret)
- return ret;
-
- s = &dev->subdevices[0];
- /* analog input subdevice */
- s->type = COMEDI_SUBD_AI;
- s->subdev_flags = SDF_READABLE | SDF_GROUND | SDF_DIFF | SDF_CMD_READ;
- s->n_chan = 16;
- s->maxdata = 0xffff;
- s->range_table = &das16cs_ai_range;
- s->len_chanlist = 16;
- s->insn_read = das16cs_ai_rinsn;
-
- s = &dev->subdevices[1];
- /* analog output subdevice */
- if (board->n_ao_chans) {
- s->type = COMEDI_SUBD_AO;
- s->subdev_flags = SDF_WRITABLE;
- s->n_chan = board->n_ao_chans;
- s->maxdata = 0xffff;
- s->range_table = &range_bipolar10;
- s->insn_write = &das16cs_ao_insn_write;
-
- ret = comedi_alloc_subdev_readback(s);
- if (ret)
- return ret;
- } else {
- s->type = COMEDI_SUBD_UNUSED;
- }
-
- s = &dev->subdevices[2];
- /* digital i/o subdevice */
- s->type = COMEDI_SUBD_DIO;
- s->subdev_flags = SDF_READABLE | SDF_WRITABLE;
- s->n_chan = 8;
- s->maxdata = 1;
- s->range_table = &range_digital;
- s->insn_bits = das16cs_dio_insn_bits;
- s->insn_config = das16cs_dio_insn_config;
-
- return 0;
-}
-
-static struct comedi_driver driver_das16cs = {
- .driver_name = "cb_das16_cs",
- .module = THIS_MODULE,
- .auto_attach = das16cs_auto_attach,
- .detach = comedi_pcmcia_disable,
-};
-
-static int das16cs_pcmcia_attach(struct pcmcia_device *link)
-{
- return comedi_pcmcia_auto_config(link, &driver_das16cs);
-}
-
-static const struct pcmcia_device_id das16cs_id_table[] = {
- PCMCIA_DEVICE_MANF_CARD(0x01c5, 0x0039),
- PCMCIA_DEVICE_MANF_CARD(0x01c5, 0x4009),
- PCMCIA_DEVICE_NULL
-};
-MODULE_DEVICE_TABLE(pcmcia, das16cs_id_table);
-
-static struct pcmcia_driver das16cs_driver = {
- .name = "cb_das16_cs",
- .owner = THIS_MODULE,
- .id_table = das16cs_id_table,
- .probe = das16cs_pcmcia_attach,
- .remove = comedi_pcmcia_auto_unconfig,
-};
-module_comedi_pcmcia_driver(driver_das16cs, das16cs_driver);
-
-MODULE_AUTHOR("David A. Schleef <ds@schleef.org>");
-MODULE_DESCRIPTION("Comedi driver for Computer Boards PC-CARD DAS16/16");
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/cb_pcidas.c b/drivers/staging/comedi/drivers/cb_pcidas.c
deleted file mode 100644
index b43e836575fd..000000000000
--- a/drivers/staging/comedi/drivers/cb_pcidas.c
+++ /dev/null
@@ -1,1582 +0,0 @@
-/*
- comedi/drivers/cb_pcidas.c
-
- Developed by Ivan Martinez and Frank Mori Hess, with valuable help from
- David Schleef and the rest of the Comedi developers comunity.
-
- Copyright (C) 2001-2003 Ivan Martinez <imr@oersted.dtu.dk>
- Copyright (C) 2001,2002 Frank Mori Hess <fmhess@users.sourceforge.net>
-
- COMEDI - Linux Control and Measurement Device Interface
- Copyright (C) 1997-8 David A. Schleef <ds@schleef.org>
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-*/
-/*
-Driver: cb_pcidas
-Description: MeasurementComputing PCI-DAS series
- with the AMCC S5933 PCI controller
-Author: Ivan Martinez <imr@oersted.dtu.dk>,
- Frank Mori Hess <fmhess@users.sourceforge.net>
-Updated: 2003-3-11
-Devices: [Measurement Computing] PCI-DAS1602/16 (cb_pcidas),
- PCI-DAS1602/16jr, PCI-DAS1602/12, PCI-DAS1200, PCI-DAS1200jr,
- PCI-DAS1000, PCI-DAS1001, PCI_DAS1002
-
-Status:
- There are many reports of the driver being used with most of the
- supported cards. Despite no detailed log is maintained, it can
- be said that the driver is quite tested and stable.
-
- The boards may be autocalibrated using the comedi_calibrate
- utility.
-
-Configuration options: not applicable, uses PCI auto config
-
-For commands, the scanned channels must be consecutive
-(i.e. 4-5-6-7, 2-3-4,...), and must all have the same
-range and aref.
-
-AI Triggering:
- For start_src == TRIG_EXT, the A/D EXTERNAL TRIGGER IN (pin 45) is used.
- For 1602 series, the start_arg is interpreted as follows:
- start_arg == 0 => gated trigger (level high)
- start_arg == CR_INVERT => gated trigger (level low)
- start_arg == CR_EDGE => Rising edge
- start_arg == CR_EDGE | CR_INVERT => Falling edge
- For the other boards the trigger will be done on rising edge
-*/
-/*
-
-TODO:
-
-analog triggering on 1602 series
-*/
-
-#include <linux/module.h>
-#include <linux/delay.h>
-#include <linux/interrupt.h>
-
-#include "../comedi_pci.h"
-
-#include "comedi_8254.h"
-#include "8255.h"
-#include "amcc_s5933.h"
-
-#define AI_BUFFER_SIZE 1024 /* max ai fifo size */
-#define AO_BUFFER_SIZE 1024 /* max ao fifo size */
-#define NUM_CHANNELS_8800 8
-#define NUM_CHANNELS_7376 1
-#define NUM_CHANNELS_8402 2
-#define NUM_CHANNELS_DAC08 1
-
-/* Control/Status registers */
-#define INT_ADCFIFO 0 /* INTERRUPT / ADC FIFO register */
-#define INT_EOS 0x1 /* int end of scan */
-#define INT_FHF 0x2 /* int fifo half full */
-#define INT_FNE 0x3 /* int fifo not empty */
-#define INT_MASK 0x3 /* mask of int select bits */
-#define INTE 0x4 /* int enable */
-#define DAHFIE 0x8 /* dac half full int enable */
-#define EOAIE 0x10 /* end of acq. int enable */
-#define DAHFI 0x20 /* dac half full status / clear */
-#define EOAI 0x40 /* end of acq. int status / clear */
-#define INT 0x80 /* int status / clear */
-#define EOBI 0x200 /* end of burst int status */
-#define ADHFI 0x400 /* half-full int status */
-#define ADNEI 0x800 /* fifo not empty int status (latch) */
-#define ADNE 0x1000 /* fifo not empty status (realtime) */
-#define DAEMIE 0x1000 /* dac empty int enable */
-#define LADFUL 0x2000 /* fifo overflow / clear */
-#define DAEMI 0x4000 /* dac fifo empty int status / clear */
-
-#define ADCMUX_CONT 2 /* ADC CHANNEL MUX AND CONTROL reg */
-#define BEGIN_SCAN(x) ((x) & 0xf)
-#define END_SCAN(x) (((x) & 0xf) << 4)
-#define GAIN_BITS(x) (((x) & 0x3) << 8)
-#define UNIP 0x800 /* Analog front-end unipolar mode */
-#define SE 0x400 /* Inputs in single-ended mode */
-#define PACER_MASK 0x3000 /* pacer source bits */
-#define PACER_INT 0x1000 /* int. pacer */
-#define PACER_EXT_FALL 0x2000 /* ext. falling edge */
-#define PACER_EXT_RISE 0x3000 /* ext. rising edge */
-#define EOC 0x4000 /* adc not busy */
-
-#define TRIG_CONTSTAT 4 /* TRIGGER CONTROL/STATUS register */
-#define SW_TRIGGER 0x1 /* software start trigger */
-#define EXT_TRIGGER 0x2 /* ext. start trigger */
-#define ANALOG_TRIGGER 0x3 /* ext. analog trigger */
-#define TRIGGER_MASK 0x3 /* start trigger mask */
-#define TGPOL 0x04 /* invert trigger (1602 only) */
-#define TGSEL 0x08 /* edge/level trigerred (1602 only) */
-#define TGEN 0x10 /* enable external start trigger */
-#define BURSTE 0x20 /* burst mode enable */
-#define XTRCL 0x80 /* clear external trigger */
-
-#define CALIBRATION_REG 6 /* CALIBRATION register */
-#define SELECT_8800_BIT 0x100 /* select 8800 caldac */
-#define SELECT_TRIMPOT_BIT 0x200 /* select ad7376 trim pot */
-#define SELECT_DAC08_BIT 0x400 /* select dac08 caldac */
-#define CAL_SRC_BITS(x) (((x) & 0x7) << 11)
-#define CAL_EN_BIT 0x4000 /* calibration source enable */
-#define SERIAL_DATA_IN_BIT 0x8000 /* serial data bit going to caldac */
-
-#define DAC_CSR 0x8 /* dac control and status register */
-#define DACEN 0x02 /* dac enable */
-#define DAC_MODE_UPDATE_BOTH 0x80 /* update both dacs */
-
-static inline unsigned int DAC_RANGE(unsigned int channel, unsigned int range)
-{
- return (range & 0x3) << (8 + 2 * (channel & 0x1));
-}
-
-static inline unsigned int DAC_RANGE_MASK(unsigned int channel)
-{
- return 0x3 << (8 + 2 * (channel & 0x1));
-};
-
-/* bits for 1602 series only */
-#define DAC_EMPTY 0x1 /* fifo empty, read, write clear */
-#define DAC_START 0x4 /* start/arm fifo operations */
-#define DAC_PACER_MASK 0x18 /* bits that set pacer source */
-#define DAC_PACER_INT 0x8 /* int. pacing */
-#define DAC_PACER_EXT_FALL 0x10 /* ext. pacing, falling edge */
-#define DAC_PACER_EXT_RISE 0x18 /* ext. pacing, rising edge */
-
-static inline unsigned int DAC_CHAN_EN(unsigned int channel)
-{
- return 1 << (5 + (channel & 0x1)); /* enable channel 0 or 1 */
-};
-
-/* analog input fifo */
-#define ADCDATA 0 /* ADC DATA register */
-#define ADCFIFOCLR 2 /* ADC FIFO CLEAR */
-
-/* pacer, counter, dio registers */
-#define ADC8254 0
-#define DIO_8255 4
-#define DAC8254 8
-
-/* analog output registers for 100x, 1200 series */
-static inline unsigned int DAC_DATA_REG(unsigned int channel)
-{
- return 2 * (channel & 0x1);
-}
-
-/* analog output registers for 1602 series*/
-#define DACDATA 0 /* DAC DATA register */
-#define DACFIFOCLR 2 /* DAC FIFO CLEAR */
-
-#define IS_UNIPOLAR 0x4 /* unipolar range mask */
-
-/* analog input ranges for most boards */
-static const struct comedi_lrange cb_pcidas_ranges = {
- 8, {
- BIP_RANGE(10),
- BIP_RANGE(5),
- BIP_RANGE(2.5),
- BIP_RANGE(1.25),
- UNI_RANGE(10),
- UNI_RANGE(5),
- UNI_RANGE(2.5),
- UNI_RANGE(1.25)
- }
-};
-
-/* pci-das1001 input ranges */
-static const struct comedi_lrange cb_pcidas_alt_ranges = {
- 8, {
- BIP_RANGE(10),
- BIP_RANGE(1),
- BIP_RANGE(0.1),
- BIP_RANGE(0.01),
- UNI_RANGE(10),
- UNI_RANGE(1),
- UNI_RANGE(0.1),
- UNI_RANGE(0.01)
- }
-};
-
-/* analog output ranges */
-static const struct comedi_lrange cb_pcidas_ao_ranges = {
- 4, {
- BIP_RANGE(5),
- BIP_RANGE(10),
- UNI_RANGE(5),
- UNI_RANGE(10)
- }
-};
-
-enum trimpot_model {
- AD7376,
- AD8402,
-};
-
-enum cb_pcidas_boardid {
- BOARD_PCIDAS1602_16,
- BOARD_PCIDAS1200,
- BOARD_PCIDAS1602_12,
- BOARD_PCIDAS1200_JR,
- BOARD_PCIDAS1602_16_JR,
- BOARD_PCIDAS1000,
- BOARD_PCIDAS1001,
- BOARD_PCIDAS1002,
-};
-
-struct cb_pcidas_board {
- const char *name;
- int ai_nchan; /* Inputs in single-ended mode */
- int ai_bits; /* analog input resolution */
- int ai_speed; /* fastest conversion period in ns */
- int ao_nchan; /* number of analog out channels */
- int has_ao_fifo; /* analog output has fifo */
- int ao_scan_speed; /* analog output scan speed for 1602 series */
- int fifo_size; /* number of samples fifo can hold */
- const struct comedi_lrange *ranges;
- enum trimpot_model trimpot;
- unsigned has_dac08:1;
- unsigned is_1602:1;
-};
-
-static const struct cb_pcidas_board cb_pcidas_boards[] = {
- [BOARD_PCIDAS1602_16] = {
- .name = "pci-das1602/16",
- .ai_nchan = 16,
- .ai_bits = 16,
- .ai_speed = 5000,
- .ao_nchan = 2,
- .has_ao_fifo = 1,
- .ao_scan_speed = 10000,
- .fifo_size = 512,
- .ranges = &cb_pcidas_ranges,
- .trimpot = AD8402,
- .has_dac08 = 1,
- .is_1602 = 1,
- },
- [BOARD_PCIDAS1200] = {
- .name = "pci-das1200",
- .ai_nchan = 16,
- .ai_bits = 12,
- .ai_speed = 3200,
- .ao_nchan = 2,
- .fifo_size = 1024,
- .ranges = &cb_pcidas_ranges,
- .trimpot = AD7376,
- },
- [BOARD_PCIDAS1602_12] = {
- .name = "pci-das1602/12",
- .ai_nchan = 16,
- .ai_bits = 12,
- .ai_speed = 3200,
- .ao_nchan = 2,
- .has_ao_fifo = 1,
- .ao_scan_speed = 4000,
- .fifo_size = 1024,
- .ranges = &cb_pcidas_ranges,
- .trimpot = AD7376,
- .is_1602 = 1,
- },
- [BOARD_PCIDAS1200_JR] = {
- .name = "pci-das1200/jr",
- .ai_nchan = 16,
- .ai_bits = 12,
- .ai_speed = 3200,
- .fifo_size = 1024,
- .ranges = &cb_pcidas_ranges,
- .trimpot = AD7376,
- },
- [BOARD_PCIDAS1602_16_JR] = {
- .name = "pci-das1602/16/jr",
- .ai_nchan = 16,
- .ai_bits = 16,
- .ai_speed = 5000,
- .fifo_size = 512,
- .ranges = &cb_pcidas_ranges,
- .trimpot = AD8402,
- .has_dac08 = 1,
- .is_1602 = 1,
- },
- [BOARD_PCIDAS1000] = {
- .name = "pci-das1000",
- .ai_nchan = 16,
- .ai_bits = 12,
- .ai_speed = 4000,
- .fifo_size = 1024,
- .ranges = &cb_pcidas_ranges,
- .trimpot = AD7376,
- },
- [BOARD_PCIDAS1001] = {
- .name = "pci-das1001",
- .ai_nchan = 16,
- .ai_bits = 12,
- .ai_speed = 6800,
- .ao_nchan = 2,
- .fifo_size = 1024,
- .ranges = &cb_pcidas_alt_ranges,
- .trimpot = AD7376,
- },
- [BOARD_PCIDAS1002] = {
- .name = "pci-das1002",
- .ai_nchan = 16,
- .ai_bits = 12,
- .ai_speed = 6800,
- .ao_nchan = 2,
- .fifo_size = 1024,
- .ranges = &cb_pcidas_ranges,
- .trimpot = AD7376,
- },
-};
-
-struct cb_pcidas_private {
- struct comedi_8254 *ao_pacer;
- /* base addresses */
- unsigned long s5933_config;
- unsigned long control_status;
- unsigned long adc_fifo;
- unsigned long ao_registers;
- /* bits to write to registers */
- unsigned int adc_fifo_bits;
- unsigned int s5933_intcsr_bits;
- unsigned int ao_control_bits;
- /* fifo buffers */
- unsigned short ai_buffer[AI_BUFFER_SIZE];
- unsigned short ao_buffer[AO_BUFFER_SIZE];
- unsigned int calibration_source;
-};
-
-static inline unsigned int cal_enable_bits(struct comedi_device *dev)
-{
- struct cb_pcidas_private *devpriv = dev->private;
-
- return CAL_EN_BIT | CAL_SRC_BITS(devpriv->calibration_source);
-}
-
-static int cb_pcidas_ai_eoc(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned long context)
-{
- struct cb_pcidas_private *devpriv = dev->private;
- unsigned int status;
-
- status = inw(devpriv->control_status + ADCMUX_CONT);
- if (status & EOC)
- return 0;
- return -EBUSY;
-}
-
-static int cb_pcidas_ai_rinsn(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
-{
- struct cb_pcidas_private *devpriv = dev->private;
- unsigned int chan = CR_CHAN(insn->chanspec);
- unsigned int range = CR_RANGE(insn->chanspec);
- unsigned int aref = CR_AREF(insn->chanspec);
- unsigned int bits;
- int ret;
- int n;
-
- /* enable calibration input if appropriate */
- if (insn->chanspec & CR_ALT_SOURCE) {
- outw(cal_enable_bits(dev),
- devpriv->control_status + CALIBRATION_REG);
- chan = 0;
- } else {
- outw(0, devpriv->control_status + CALIBRATION_REG);
- }
-
- /* set mux limits and gain */
- bits = BEGIN_SCAN(chan) | END_SCAN(chan) | GAIN_BITS(range);
- /* set unipolar/bipolar */
- if (range & IS_UNIPOLAR)
- bits |= UNIP;
- /* set single-ended/differential */
- if (aref != AREF_DIFF)
- bits |= SE;
- outw(bits, devpriv->control_status + ADCMUX_CONT);
-
- /* clear fifo */
- outw(0, devpriv->adc_fifo + ADCFIFOCLR);
-
- /* convert n samples */
- for (n = 0; n < insn->n; n++) {
- /* trigger conversion */
- outw(0, devpriv->adc_fifo + ADCDATA);
-
- /* wait for conversion to end */
- ret = comedi_timeout(dev, s, insn, cb_pcidas_ai_eoc, 0);
- if (ret)
- return ret;
-
- /* read data */
- data[n] = inw(devpriv->adc_fifo + ADCDATA);
- }
-
- /* return the number of samples read/written */
- return n;
-}
-
-static int ai_config_insn(struct comedi_device *dev, struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
-{
- struct cb_pcidas_private *devpriv = dev->private;
- int id = data[0];
- unsigned int source = data[1];
-
- switch (id) {
- case INSN_CONFIG_ALT_SOURCE:
- if (source >= 8) {
- dev_err(dev->class_dev,
- "invalid calibration source: %i\n",
- source);
- return -EINVAL;
- }
- devpriv->calibration_source = source;
- break;
- default:
- return -EINVAL;
- }
- return insn->n;
-}
-
-/* analog output insn for pcidas-1000 and 1200 series */
-static int cb_pcidas_ao_nofifo_winsn(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct cb_pcidas_private *devpriv = dev->private;
- unsigned int chan = CR_CHAN(insn->chanspec);
- unsigned int range = CR_RANGE(insn->chanspec);
- unsigned long flags;
-
- /* set channel and range */
- spin_lock_irqsave(&dev->spinlock, flags);
- devpriv->ao_control_bits &= (~DAC_MODE_UPDATE_BOTH &
- ~DAC_RANGE_MASK(chan));
- devpriv->ao_control_bits |= (DACEN | DAC_RANGE(chan, range));
- outw(devpriv->ao_control_bits, devpriv->control_status + DAC_CSR);
- spin_unlock_irqrestore(&dev->spinlock, flags);
-
- /* remember value for readback */
- s->readback[chan] = data[0];
-
- /* send data */
- outw(data[0], devpriv->ao_registers + DAC_DATA_REG(chan));
-
- return insn->n;
-}
-
-/* analog output insn for pcidas-1602 series */
-static int cb_pcidas_ao_fifo_winsn(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
-{
- struct cb_pcidas_private *devpriv = dev->private;
- unsigned int chan = CR_CHAN(insn->chanspec);
- unsigned int range = CR_RANGE(insn->chanspec);
- unsigned long flags;
-
- /* clear dac fifo */
- outw(0, devpriv->ao_registers + DACFIFOCLR);
-
- /* set channel and range */
- spin_lock_irqsave(&dev->spinlock, flags);
- devpriv->ao_control_bits &= (~DAC_CHAN_EN(0) & ~DAC_CHAN_EN(1) &
- ~DAC_RANGE_MASK(chan) & ~DAC_PACER_MASK);
- devpriv->ao_control_bits |= (DACEN | DAC_RANGE(chan, range) |
- DAC_CHAN_EN(chan) | DAC_START);
- outw(devpriv->ao_control_bits, devpriv->control_status + DAC_CSR);
- spin_unlock_irqrestore(&dev->spinlock, flags);
-
- /* remember value for readback */
- s->readback[chan] = data[0];
-
- /* send data */
- outw(data[0], devpriv->ao_registers + DACDATA);
-
- return insn->n;
-}
-
-static int wait_for_nvram_ready(unsigned long s5933_base_addr)
-{
- static const int timeout = 1000;
- unsigned int i;
-
- for (i = 0; i < timeout; i++) {
- if ((inb(s5933_base_addr +
- AMCC_OP_REG_MCSR_NVCMD) & MCSR_NV_BUSY)
- == 0)
- return 0;
- udelay(1);
- }
- return -1;
-}
-
-static int nvram_read(struct comedi_device *dev, unsigned int address,
- uint8_t *data)
-{
- struct cb_pcidas_private *devpriv = dev->private;
- unsigned long iobase = devpriv->s5933_config;
-
- if (wait_for_nvram_ready(iobase) < 0)
- return -ETIMEDOUT;
-
- outb(MCSR_NV_ENABLE | MCSR_NV_LOAD_LOW_ADDR,
- iobase + AMCC_OP_REG_MCSR_NVCMD);
- outb(address & 0xff, iobase + AMCC_OP_REG_MCSR_NVDATA);
- outb(MCSR_NV_ENABLE | MCSR_NV_LOAD_HIGH_ADDR,
- iobase + AMCC_OP_REG_MCSR_NVCMD);
- outb((address >> 8) & 0xff, iobase + AMCC_OP_REG_MCSR_NVDATA);
- outb(MCSR_NV_ENABLE | MCSR_NV_READ, iobase + AMCC_OP_REG_MCSR_NVCMD);
-
- if (wait_for_nvram_ready(iobase) < 0)
- return -ETIMEDOUT;
-
- *data = inb(iobase + AMCC_OP_REG_MCSR_NVDATA);
-
- return 0;
-}
-
-static int eeprom_read_insn(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
-{
- uint8_t nvram_data;
- int retval;
-
- retval = nvram_read(dev, CR_CHAN(insn->chanspec), &nvram_data);
- if (retval < 0)
- return retval;
-
- data[0] = nvram_data;
-
- return 1;
-}
-
-static void write_calibration_bitstream(struct comedi_device *dev,
- unsigned int register_bits,
- unsigned int bitstream,
- unsigned int bitstream_length)
-{
- struct cb_pcidas_private *devpriv = dev->private;
- static const int write_delay = 1;
- unsigned int bit;
-
- for (bit = 1 << (bitstream_length - 1); bit; bit >>= 1) {
- if (bitstream & bit)
- register_bits |= SERIAL_DATA_IN_BIT;
- else
- register_bits &= ~SERIAL_DATA_IN_BIT;
- udelay(write_delay);
- outw(register_bits, devpriv->control_status + CALIBRATION_REG);
- }
-}
-
-static void caldac_8800_write(struct comedi_device *dev,
- unsigned int chan, uint8_t val)
-{
- struct cb_pcidas_private *devpriv = dev->private;
- static const int bitstream_length = 11;
- unsigned int bitstream = ((chan & 0x7) << 8) | val;
- static const int caldac_8800_udelay = 1;
-
- write_calibration_bitstream(dev, cal_enable_bits(dev), bitstream,
- bitstream_length);
-
- udelay(caldac_8800_udelay);
- outw(cal_enable_bits(dev) | SELECT_8800_BIT,
- devpriv->control_status + CALIBRATION_REG);
- udelay(caldac_8800_udelay);
- outw(cal_enable_bits(dev), devpriv->control_status + CALIBRATION_REG);
-}
-
-static int cb_pcidas_caldac_insn_write(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- unsigned int chan = CR_CHAN(insn->chanspec);
-
- if (insn->n) {
- unsigned int val = data[insn->n - 1];
-
- if (s->readback[chan] != val) {
- caldac_8800_write(dev, chan, val);
- s->readback[chan] = val;
- }
- }
-
- return insn->n;
-}
-
-/* 1602/16 pregain offset */
-static void dac08_write(struct comedi_device *dev, unsigned int value)
-{
- struct cb_pcidas_private *devpriv = dev->private;
-
- value &= 0xff;
- value |= cal_enable_bits(dev);
-
- /* latch the new value into the caldac */
- outw(value, devpriv->control_status + CALIBRATION_REG);
- udelay(1);
- outw(value | SELECT_DAC08_BIT,
- devpriv->control_status + CALIBRATION_REG);
- udelay(1);
- outw(value, devpriv->control_status + CALIBRATION_REG);
- udelay(1);
-}
-
-static int cb_pcidas_dac08_insn_write(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- unsigned int chan = CR_CHAN(insn->chanspec);
-
- if (insn->n) {
- unsigned int val = data[insn->n - 1];
-
- if (s->readback[chan] != val) {
- dac08_write(dev, val);
- s->readback[chan] = val;
- }
- }
-
- return insn->n;
-}
-
-static int trimpot_7376_write(struct comedi_device *dev, uint8_t value)
-{
- struct cb_pcidas_private *devpriv = dev->private;
- static const int bitstream_length = 7;
- unsigned int bitstream = value & 0x7f;
- unsigned int register_bits;
- static const int ad7376_udelay = 1;
-
- register_bits = cal_enable_bits(dev) | SELECT_TRIMPOT_BIT;
- udelay(ad7376_udelay);
- outw(register_bits, devpriv->control_status + CALIBRATION_REG);
-
- write_calibration_bitstream(dev, register_bits, bitstream,
- bitstream_length);
-
- udelay(ad7376_udelay);
- outw(cal_enable_bits(dev), devpriv->control_status + CALIBRATION_REG);
-
- return 0;
-}
-
-/* For 1602/16 only
- * ch 0 : adc gain
- * ch 1 : adc postgain offset */
-static int trimpot_8402_write(struct comedi_device *dev, unsigned int channel,
- uint8_t value)
-{
- struct cb_pcidas_private *devpriv = dev->private;
- static const int bitstream_length = 10;
- unsigned int bitstream = ((channel & 0x3) << 8) | (value & 0xff);
- unsigned int register_bits;
- static const int ad8402_udelay = 1;
-
- register_bits = cal_enable_bits(dev) | SELECT_TRIMPOT_BIT;
- udelay(ad8402_udelay);
- outw(register_bits, devpriv->control_status + CALIBRATION_REG);
-
- write_calibration_bitstream(dev, register_bits, bitstream,
- bitstream_length);
-
- udelay(ad8402_udelay);
- outw(cal_enable_bits(dev), devpriv->control_status + CALIBRATION_REG);
-
- return 0;
-}
-
-static void cb_pcidas_trimpot_write(struct comedi_device *dev,
- unsigned int chan, unsigned int val)
-{
- const struct cb_pcidas_board *board = dev->board_ptr;
-
- switch (board->trimpot) {
- case AD7376:
- trimpot_7376_write(dev, val);
- break;
- case AD8402:
- trimpot_8402_write(dev, chan, val);
- break;
- default:
- dev_err(dev->class_dev, "driver bug?\n");
- break;
- }
-}
-
-static int cb_pcidas_trimpot_insn_write(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- unsigned int chan = CR_CHAN(insn->chanspec);
-
- if (insn->n) {
- unsigned int val = data[insn->n - 1];
-
- if (s->readback[chan] != val) {
- cb_pcidas_trimpot_write(dev, chan, val);
- s->readback[chan] = val;
- }
- }
-
- return insn->n;
-}
-
-static int cb_pcidas_ai_check_chanlist(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_cmd *cmd)
-{
- unsigned int chan0 = CR_CHAN(cmd->chanlist[0]);
- unsigned int range0 = CR_RANGE(cmd->chanlist[0]);
- int i;
-
- for (i = 1; i < cmd->chanlist_len; i++) {
- unsigned int chan = CR_CHAN(cmd->chanlist[i]);
- unsigned int range = CR_RANGE(cmd->chanlist[i]);
-
- if (chan != (chan0 + i) % s->n_chan) {
- dev_dbg(dev->class_dev,
- "entries in chanlist must be consecutive channels, counting upwards\n");
- return -EINVAL;
- }
-
- if (range != range0) {
- dev_dbg(dev->class_dev,
- "entries in chanlist must all have the same gain\n");
- return -EINVAL;
- }
- }
- return 0;
-}
-
-static int cb_pcidas_ai_cmdtest(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_cmd *cmd)
-{
- const struct cb_pcidas_board *board = dev->board_ptr;
- int err = 0;
- unsigned int arg;
-
- /* Step 1 : check if triggers are trivially valid */
-
- err |= comedi_check_trigger_src(&cmd->start_src, TRIG_NOW | TRIG_EXT);
- err |= comedi_check_trigger_src(&cmd->scan_begin_src,
- TRIG_FOLLOW | TRIG_TIMER | TRIG_EXT);
- err |= comedi_check_trigger_src(&cmd->convert_src,
- TRIG_TIMER | TRIG_NOW | TRIG_EXT);
- err |= comedi_check_trigger_src(&cmd->scan_end_src, TRIG_COUNT);
- err |= comedi_check_trigger_src(&cmd->stop_src, TRIG_COUNT | TRIG_NONE);
-
- if (err)
- return 1;
-
- /* Step 2a : make sure trigger sources are unique */
-
- err |= comedi_check_trigger_is_unique(cmd->start_src);
- err |= comedi_check_trigger_is_unique(cmd->scan_begin_src);
- err |= comedi_check_trigger_is_unique(cmd->convert_src);
- err |= comedi_check_trigger_is_unique(cmd->stop_src);
-
- /* Step 2b : and mutually compatible */
-
- if (cmd->scan_begin_src == TRIG_FOLLOW && cmd->convert_src == TRIG_NOW)
- err |= -EINVAL;
- if (cmd->scan_begin_src != TRIG_FOLLOW && cmd->convert_src != TRIG_NOW)
- err |= -EINVAL;
- if (cmd->start_src == TRIG_EXT &&
- (cmd->convert_src == TRIG_EXT || cmd->scan_begin_src == TRIG_EXT))
- err |= -EINVAL;
-
- if (err)
- return 2;
-
- /* Step 3: check if arguments are trivially valid */
-
- switch (cmd->start_src) {
- case TRIG_NOW:
- err |= comedi_check_trigger_arg_is(&cmd->start_arg, 0);
- break;
- case TRIG_EXT:
- /* External trigger, only CR_EDGE and CR_INVERT flags allowed */
- if ((cmd->start_arg
- & (CR_FLAGS_MASK & ~(CR_EDGE | CR_INVERT))) != 0) {
- cmd->start_arg &= ~(CR_FLAGS_MASK &
- ~(CR_EDGE | CR_INVERT));
- err |= -EINVAL;
- }
- if (!board->is_1602 && (cmd->start_arg & CR_INVERT)) {
- cmd->start_arg &= (CR_FLAGS_MASK & ~CR_INVERT);
- err |= -EINVAL;
- }
- break;
- }
-
- if (cmd->scan_begin_src == TRIG_TIMER) {
- err |= comedi_check_trigger_arg_min(&cmd->scan_begin_arg,
- board->ai_speed *
- cmd->chanlist_len);
- }
-
- if (cmd->convert_src == TRIG_TIMER) {
- err |= comedi_check_trigger_arg_min(&cmd->convert_arg,
- board->ai_speed);
- }
-
- err |= comedi_check_trigger_arg_is(&cmd->scan_end_arg,
- cmd->chanlist_len);
-
- if (cmd->stop_src == TRIG_COUNT)
- err |= comedi_check_trigger_arg_min(&cmd->stop_arg, 1);
- else /* TRIG_NONE */
- err |= comedi_check_trigger_arg_is(&cmd->stop_arg, 0);
-
- if (err)
- return 3;
-
- /* step 4: fix up any arguments */
-
- if (cmd->scan_begin_src == TRIG_TIMER) {
- arg = cmd->scan_begin_arg;
- comedi_8254_cascade_ns_to_timer(dev->pacer, &arg, cmd->flags);
- err |= comedi_check_trigger_arg_is(&cmd->scan_begin_arg, arg);
- }
- if (cmd->convert_src == TRIG_TIMER) {
- arg = cmd->convert_arg;
- comedi_8254_cascade_ns_to_timer(dev->pacer, &arg, cmd->flags);
- err |= comedi_check_trigger_arg_is(&cmd->convert_arg, arg);
- }
-
- if (err)
- return 4;
-
- /* Step 5: check channel list if it exists */
- if (cmd->chanlist && cmd->chanlist_len > 0)
- err |= cb_pcidas_ai_check_chanlist(dev, s, cmd);
-
- if (err)
- return 5;
-
- return 0;
-}
-
-static int cb_pcidas_ai_cmd(struct comedi_device *dev,
- struct comedi_subdevice *s)
-{
- const struct cb_pcidas_board *board = dev->board_ptr;
- struct cb_pcidas_private *devpriv = dev->private;
- struct comedi_async *async = s->async;
- struct comedi_cmd *cmd = &async->cmd;
- unsigned int bits;
- unsigned long flags;
-
- /* make sure CAL_EN_BIT is disabled */
- outw(0, devpriv->control_status + CALIBRATION_REG);
- /* initialize before settings pacer source and count values */
- outw(0, devpriv->control_status + TRIG_CONTSTAT);
- /* clear fifo */
- outw(0, devpriv->adc_fifo + ADCFIFOCLR);
-
- /* set mux limits, gain and pacer source */
- bits = BEGIN_SCAN(CR_CHAN(cmd->chanlist[0])) |
- END_SCAN(CR_CHAN(cmd->chanlist[cmd->chanlist_len - 1])) |
- GAIN_BITS(CR_RANGE(cmd->chanlist[0]));
- /* set unipolar/bipolar */
- if (CR_RANGE(cmd->chanlist[0]) & IS_UNIPOLAR)
- bits |= UNIP;
- /* set singleended/differential */
- if (CR_AREF(cmd->chanlist[0]) != AREF_DIFF)
- bits |= SE;
- /* set pacer source */
- if (cmd->convert_src == TRIG_EXT || cmd->scan_begin_src == TRIG_EXT)
- bits |= PACER_EXT_RISE;
- else
- bits |= PACER_INT;
- outw(bits, devpriv->control_status + ADCMUX_CONT);
-
- /* load counters */
- if (cmd->scan_begin_src == TRIG_TIMER ||
- cmd->convert_src == TRIG_TIMER) {
- comedi_8254_update_divisors(dev->pacer);
- comedi_8254_pacer_enable(dev->pacer, 1, 2, true);
- }
-
- /* enable interrupts */
- spin_lock_irqsave(&dev->spinlock, flags);
- devpriv->adc_fifo_bits |= INTE;
- devpriv->adc_fifo_bits &= ~INT_MASK;
- if (cmd->flags & CMDF_WAKE_EOS) {
- if (cmd->convert_src == TRIG_NOW && cmd->chanlist_len > 1) {
- /* interrupt end of burst */
- devpriv->adc_fifo_bits |= INT_EOS;
- } else {
- /* interrupt fifo not empty */
- devpriv->adc_fifo_bits |= INT_FNE;
- }
- } else {
- /* interrupt fifo half full */
- devpriv->adc_fifo_bits |= INT_FHF;
- }
-
- /* enable (and clear) interrupts */
- outw(devpriv->adc_fifo_bits | EOAI | INT | LADFUL,
- devpriv->control_status + INT_ADCFIFO);
- spin_unlock_irqrestore(&dev->spinlock, flags);
-
- /* set start trigger and burst mode */
- bits = 0;
- if (cmd->start_src == TRIG_NOW) {
- bits |= SW_TRIGGER;
- } else { /* TRIG_EXT */
- bits |= EXT_TRIGGER | TGEN | XTRCL;
- if (board->is_1602) {
- if (cmd->start_arg & CR_INVERT)
- bits |= TGPOL;
- if (cmd->start_arg & CR_EDGE)
- bits |= TGSEL;
- }
- }
- if (cmd->convert_src == TRIG_NOW && cmd->chanlist_len > 1)
- bits |= BURSTE;
- outw(bits, devpriv->control_status + TRIG_CONTSTAT);
-
- return 0;
-}
-
-static int cb_pcidas_ao_check_chanlist(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_cmd *cmd)
-{
- unsigned int chan0 = CR_CHAN(cmd->chanlist[0]);
-
- if (cmd->chanlist_len > 1) {
- unsigned int chan1 = CR_CHAN(cmd->chanlist[1]);
-
- if (chan0 != 0 || chan1 != 1) {
- dev_dbg(dev->class_dev,
- "channels must be ordered channel 0, channel 1 in chanlist\n");
- return -EINVAL;
- }
- }
-
- return 0;
-}
-
-static int cb_pcidas_ao_cmdtest(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_cmd *cmd)
-{
- const struct cb_pcidas_board *board = dev->board_ptr;
- struct cb_pcidas_private *devpriv = dev->private;
- int err = 0;
-
- /* Step 1 : check if triggers are trivially valid */
-
- err |= comedi_check_trigger_src(&cmd->start_src, TRIG_INT);
- err |= comedi_check_trigger_src(&cmd->scan_begin_src,
- TRIG_TIMER | TRIG_EXT);
- err |= comedi_check_trigger_src(&cmd->convert_src, TRIG_NOW);
- err |= comedi_check_trigger_src(&cmd->scan_end_src, TRIG_COUNT);
- err |= comedi_check_trigger_src(&cmd->stop_src, TRIG_COUNT | TRIG_NONE);
-
- if (err)
- return 1;
-
- /* Step 2a : make sure trigger sources are unique */
-
- err |= comedi_check_trigger_is_unique(cmd->scan_begin_src);
- err |= comedi_check_trigger_is_unique(cmd->stop_src);
-
- /* Step 2b : and mutually compatible */
-
- if (err)
- return 2;
-
- /* Step 3: check if arguments are trivially valid */
-
- err |= comedi_check_trigger_arg_is(&cmd->start_arg, 0);
-
- if (cmd->scan_begin_src == TRIG_TIMER) {
- err |= comedi_check_trigger_arg_min(&cmd->scan_begin_arg,
- board->ao_scan_speed);
- }
-
- err |= comedi_check_trigger_arg_is(&cmd->scan_end_arg,
- cmd->chanlist_len);
-
- if (cmd->stop_src == TRIG_COUNT)
- err |= comedi_check_trigger_arg_min(&cmd->stop_arg, 1);
- else /* TRIG_NONE */
- err |= comedi_check_trigger_arg_is(&cmd->stop_arg, 0);
-
- if (err)
- return 3;
-
- /* step 4: fix up any arguments */
-
- if (cmd->scan_begin_src == TRIG_TIMER) {
- unsigned int arg = cmd->scan_begin_arg;
-
- comedi_8254_cascade_ns_to_timer(devpriv->ao_pacer,
- &arg, cmd->flags);
- err |= comedi_check_trigger_arg_is(&cmd->scan_begin_arg, arg);
- }
-
- if (err)
- return 4;
-
- /* Step 5: check channel list if it exists */
- if (cmd->chanlist && cmd->chanlist_len > 0)
- err |= cb_pcidas_ao_check_chanlist(dev, s, cmd);
-
- if (err)
- return 5;
-
- return 0;
-}
-
-/* cancel analog input command */
-static int cb_pcidas_cancel(struct comedi_device *dev,
- struct comedi_subdevice *s)
-{
- struct cb_pcidas_private *devpriv = dev->private;
- unsigned long flags;
-
- spin_lock_irqsave(&dev->spinlock, flags);
- /* disable interrupts */
- devpriv->adc_fifo_bits &= ~INTE & ~EOAIE;
- outw(devpriv->adc_fifo_bits, devpriv->control_status + INT_ADCFIFO);
- spin_unlock_irqrestore(&dev->spinlock, flags);
-
- /* disable start trigger source and burst mode */
- outw(0, devpriv->control_status + TRIG_CONTSTAT);
- /* software pacer source */
- outw(0, devpriv->control_status + ADCMUX_CONT);
-
- return 0;
-}
-
-static void cb_pcidas_ao_load_fifo(struct comedi_device *dev,
- struct comedi_subdevice *s,
- unsigned int nsamples)
-{
- struct cb_pcidas_private *devpriv = dev->private;
- unsigned int nbytes;
-
- nsamples = comedi_nsamples_left(s, nsamples);
- nbytes = comedi_buf_read_samples(s, devpriv->ao_buffer, nsamples);
-
- nsamples = comedi_bytes_to_samples(s, nbytes);
- outsw(devpriv->ao_registers + DACDATA, devpriv->ao_buffer, nsamples);
-}
-
-static int cb_pcidas_ao_inttrig(struct comedi_device *dev,
- struct comedi_subdevice *s,
- unsigned int trig_num)
-{
- const struct cb_pcidas_board *board = dev->board_ptr;
- struct cb_pcidas_private *devpriv = dev->private;
- struct comedi_async *async = s->async;
- struct comedi_cmd *cmd = &async->cmd;
- unsigned long flags;
-
- if (trig_num != cmd->start_arg)
- return -EINVAL;
-
- cb_pcidas_ao_load_fifo(dev, s, board->fifo_size);
-
- /* enable dac half-full and empty interrupts */
- spin_lock_irqsave(&dev->spinlock, flags);
- devpriv->adc_fifo_bits |= DAEMIE | DAHFIE;
-
- /* enable and clear interrupts */
- outw(devpriv->adc_fifo_bits | DAEMI | DAHFI,
- devpriv->control_status + INT_ADCFIFO);
-
- /* start dac */
- devpriv->ao_control_bits |= DAC_START | DACEN | DAC_EMPTY;
- outw(devpriv->ao_control_bits, devpriv->control_status + DAC_CSR);
-
- spin_unlock_irqrestore(&dev->spinlock, flags);
-
- async->inttrig = NULL;
-
- return 0;
-}
-
-static int cb_pcidas_ao_cmd(struct comedi_device *dev,
- struct comedi_subdevice *s)
-{
- struct cb_pcidas_private *devpriv = dev->private;
- struct comedi_async *async = s->async;
- struct comedi_cmd *cmd = &async->cmd;
- unsigned int i;
- unsigned long flags;
-
- /* set channel limits, gain */
- spin_lock_irqsave(&dev->spinlock, flags);
- for (i = 0; i < cmd->chanlist_len; i++) {
- /* enable channel */
- devpriv->ao_control_bits |=
- DAC_CHAN_EN(CR_CHAN(cmd->chanlist[i]));
- /* set range */
- devpriv->ao_control_bits |= DAC_RANGE(CR_CHAN(cmd->chanlist[i]),
- CR_RANGE(cmd->
- chanlist[i]));
- }
-
- /* disable analog out before settings pacer source and count values */
- outw(devpriv->ao_control_bits, devpriv->control_status + DAC_CSR);
- spin_unlock_irqrestore(&dev->spinlock, flags);
-
- /* clear fifo */
- outw(0, devpriv->ao_registers + DACFIFOCLR);
-
- /* load counters */
- if (cmd->scan_begin_src == TRIG_TIMER) {
- comedi_8254_update_divisors(devpriv->ao_pacer);
- comedi_8254_pacer_enable(devpriv->ao_pacer, 1, 2, true);
- }
-
- /* set pacer source */
- spin_lock_irqsave(&dev->spinlock, flags);
- switch (cmd->scan_begin_src) {
- case TRIG_TIMER:
- devpriv->ao_control_bits |= DAC_PACER_INT;
- break;
- case TRIG_EXT:
- devpriv->ao_control_bits |= DAC_PACER_EXT_RISE;
- break;
- default:
- spin_unlock_irqrestore(&dev->spinlock, flags);
- dev_err(dev->class_dev, "error setting dac pacer source\n");
- return -1;
- }
- spin_unlock_irqrestore(&dev->spinlock, flags);
-
- async->inttrig = cb_pcidas_ao_inttrig;
-
- return 0;
-}
-
-/* cancel analog output command */
-static int cb_pcidas_ao_cancel(struct comedi_device *dev,
- struct comedi_subdevice *s)
-{
- struct cb_pcidas_private *devpriv = dev->private;
- unsigned long flags;
-
- spin_lock_irqsave(&dev->spinlock, flags);
- /* disable interrupts */
- devpriv->adc_fifo_bits &= ~DAHFIE & ~DAEMIE;
- outw(devpriv->adc_fifo_bits, devpriv->control_status + INT_ADCFIFO);
-
- /* disable output */
- devpriv->ao_control_bits &= ~DACEN & ~DAC_PACER_MASK;
- outw(devpriv->ao_control_bits, devpriv->control_status + DAC_CSR);
- spin_unlock_irqrestore(&dev->spinlock, flags);
-
- return 0;
-}
-
-static void handle_ao_interrupt(struct comedi_device *dev, unsigned int status)
-{
- const struct cb_pcidas_board *board = dev->board_ptr;
- struct cb_pcidas_private *devpriv = dev->private;
- struct comedi_subdevice *s = dev->write_subdev;
- struct comedi_async *async = s->async;
- struct comedi_cmd *cmd = &async->cmd;
- unsigned long flags;
-
- if (status & DAEMI) {
- /* clear dac empty interrupt latch */
- spin_lock_irqsave(&dev->spinlock, flags);
- outw(devpriv->adc_fifo_bits | DAEMI,
- devpriv->control_status + INT_ADCFIFO);
- spin_unlock_irqrestore(&dev->spinlock, flags);
- if (inw(devpriv->ao_registers + DAC_CSR) & DAC_EMPTY) {
- if (cmd->stop_src == TRIG_COUNT &&
- async->scans_done >= cmd->stop_arg) {
- async->events |= COMEDI_CB_EOA;
- } else {
- dev_err(dev->class_dev, "dac fifo underflow\n");
- async->events |= COMEDI_CB_ERROR;
- }
- }
- } else if (status & DAHFI) {
- cb_pcidas_ao_load_fifo(dev, s, board->fifo_size / 2);
-
- /* clear half-full interrupt latch */
- spin_lock_irqsave(&dev->spinlock, flags);
- outw(devpriv->adc_fifo_bits | DAHFI,
- devpriv->control_status + INT_ADCFIFO);
- spin_unlock_irqrestore(&dev->spinlock, flags);
- }
-
- comedi_handle_events(dev, s);
-}
-
-static irqreturn_t cb_pcidas_interrupt(int irq, void *d)
-{
- struct comedi_device *dev = (struct comedi_device *)d;
- const struct cb_pcidas_board *board = dev->board_ptr;
- struct cb_pcidas_private *devpriv = dev->private;
- struct comedi_subdevice *s = dev->read_subdev;
- struct comedi_async *async;
- struct comedi_cmd *cmd;
- int status, s5933_status;
- int half_fifo = board->fifo_size / 2;
- unsigned int num_samples, i;
- static const int timeout = 10000;
- unsigned long flags;
-
- if (!dev->attached)
- return IRQ_NONE;
-
- async = s->async;
- cmd = &async->cmd;
-
- s5933_status = inl(devpriv->s5933_config + AMCC_OP_REG_INTCSR);
-
- if ((INTCSR_INTR_ASSERTED & s5933_status) == 0)
- return IRQ_NONE;
-
- /* make sure mailbox 4 is empty */
- inl_p(devpriv->s5933_config + AMCC_OP_REG_IMB4);
- /* clear interrupt on amcc s5933 */
- outl(devpriv->s5933_intcsr_bits | INTCSR_INBOX_INTR_STATUS,
- devpriv->s5933_config + AMCC_OP_REG_INTCSR);
-
- status = inw(devpriv->control_status + INT_ADCFIFO);
-
- /* check for analog output interrupt */
- if (status & (DAHFI | DAEMI))
- handle_ao_interrupt(dev, status);
- /* check for analog input interrupts */
- /* if fifo half-full */
- if (status & ADHFI) {
- /* read data */
- num_samples = comedi_nsamples_left(s, half_fifo);
- insw(devpriv->adc_fifo + ADCDATA, devpriv->ai_buffer,
- num_samples);
- comedi_buf_write_samples(s, devpriv->ai_buffer, num_samples);
-
- if (cmd->stop_src == TRIG_COUNT &&
- async->scans_done >= cmd->stop_arg)
- async->events |= COMEDI_CB_EOA;
-
- /* clear half-full interrupt latch */
- spin_lock_irqsave(&dev->spinlock, flags);
- outw(devpriv->adc_fifo_bits | INT,
- devpriv->control_status + INT_ADCFIFO);
- spin_unlock_irqrestore(&dev->spinlock, flags);
- /* else if fifo not empty */
- } else if (status & (ADNEI | EOBI)) {
- for (i = 0; i < timeout; i++) {
- unsigned short val;
-
- /* break if fifo is empty */
- if ((ADNE & inw(devpriv->control_status +
- INT_ADCFIFO)) == 0)
- break;
- val = inw(devpriv->adc_fifo);
- comedi_buf_write_samples(s, &val, 1);
-
- if (cmd->stop_src == TRIG_COUNT &&
- async->scans_done >= cmd->stop_arg) {
- async->events |= COMEDI_CB_EOA;
- break;
- }
- }
- /* clear not-empty interrupt latch */
- spin_lock_irqsave(&dev->spinlock, flags);
- outw(devpriv->adc_fifo_bits | INT,
- devpriv->control_status + INT_ADCFIFO);
- spin_unlock_irqrestore(&dev->spinlock, flags);
- } else if (status & EOAI) {
- dev_err(dev->class_dev,
- "bug! encountered end of acquisition interrupt?\n");
- /* clear EOA interrupt latch */
- spin_lock_irqsave(&dev->spinlock, flags);
- outw(devpriv->adc_fifo_bits | EOAI,
- devpriv->control_status + INT_ADCFIFO);
- spin_unlock_irqrestore(&dev->spinlock, flags);
- }
- /* check for fifo overflow */
- if (status & LADFUL) {
- dev_err(dev->class_dev, "fifo overflow\n");
- /* clear overflow interrupt latch */
- spin_lock_irqsave(&dev->spinlock, flags);
- outw(devpriv->adc_fifo_bits | LADFUL,
- devpriv->control_status + INT_ADCFIFO);
- spin_unlock_irqrestore(&dev->spinlock, flags);
- async->events |= COMEDI_CB_ERROR;
- }
-
- comedi_handle_events(dev, s);
-
- return IRQ_HANDLED;
-}
-
-static int cb_pcidas_auto_attach(struct comedi_device *dev,
- unsigned long context)
-{
- struct pci_dev *pcidev = comedi_to_pci_dev(dev);
- const struct cb_pcidas_board *board = NULL;
- struct cb_pcidas_private *devpriv;
- struct comedi_subdevice *s;
- int i;
- int ret;
-
- if (context < ARRAY_SIZE(cb_pcidas_boards))
- board = &cb_pcidas_boards[context];
- if (!board)
- return -ENODEV;
- dev->board_ptr = board;
- dev->board_name = board->name;
-
- devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
- if (!devpriv)
- return -ENOMEM;
-
- ret = comedi_pci_enable(dev);
- if (ret)
- return ret;
-
- devpriv->s5933_config = pci_resource_start(pcidev, 0);
- devpriv->control_status = pci_resource_start(pcidev, 1);
- devpriv->adc_fifo = pci_resource_start(pcidev, 2);
- dev->iobase = pci_resource_start(pcidev, 3);
- if (board->ao_nchan)
- devpriv->ao_registers = pci_resource_start(pcidev, 4);
-
- /* disable and clear interrupts on amcc s5933 */
- outl(INTCSR_INBOX_INTR_STATUS,
- devpriv->s5933_config + AMCC_OP_REG_INTCSR);
-
- ret = request_irq(pcidev->irq, cb_pcidas_interrupt, IRQF_SHARED,
- dev->board_name, dev);
- if (ret) {
- dev_dbg(dev->class_dev, "unable to allocate irq %d\n",
- pcidev->irq);
- return ret;
- }
- dev->irq = pcidev->irq;
-
- dev->pacer = comedi_8254_init(dev->iobase + ADC8254,
- I8254_OSC_BASE_10MHZ, I8254_IO8, 0);
- if (!dev->pacer)
- return -ENOMEM;
-
- devpriv->ao_pacer = comedi_8254_init(dev->iobase + DAC8254,
- I8254_OSC_BASE_10MHZ,
- I8254_IO8, 0);
- if (!devpriv->ao_pacer)
- return -ENOMEM;
-
- ret = comedi_alloc_subdevices(dev, 7);
- if (ret)
- return ret;
-
- s = &dev->subdevices[0];
- /* analog input subdevice */
- dev->read_subdev = s;
- s->type = COMEDI_SUBD_AI;
- s->subdev_flags = SDF_READABLE | SDF_GROUND | SDF_DIFF | SDF_CMD_READ;
- /* WARNING: Number of inputs in differential mode is ignored */
- s->n_chan = board->ai_nchan;
- s->len_chanlist = board->ai_nchan;
- s->maxdata = (1 << board->ai_bits) - 1;
- s->range_table = board->ranges;
- s->insn_read = cb_pcidas_ai_rinsn;
- s->insn_config = ai_config_insn;
- s->do_cmd = cb_pcidas_ai_cmd;
- s->do_cmdtest = cb_pcidas_ai_cmdtest;
- s->cancel = cb_pcidas_cancel;
-
- /* analog output subdevice */
- s = &dev->subdevices[1];
- if (board->ao_nchan) {
- s->type = COMEDI_SUBD_AO;
- s->subdev_flags = SDF_READABLE | SDF_WRITABLE | SDF_GROUND;
- s->n_chan = board->ao_nchan;
- /*
- * analog out resolution is the same as
- * analog input resolution, so use ai_bits
- */
- s->maxdata = (1 << board->ai_bits) - 1;
- s->range_table = &cb_pcidas_ao_ranges;
- /* default to no fifo (*insn_write) */
- s->insn_write = cb_pcidas_ao_nofifo_winsn;
-
- ret = comedi_alloc_subdev_readback(s);
- if (ret)
- return ret;
-
- if (board->has_ao_fifo) {
- dev->write_subdev = s;
- s->subdev_flags |= SDF_CMD_WRITE;
- /* use fifo (*insn_write) instead */
- s->insn_write = cb_pcidas_ao_fifo_winsn;
- s->do_cmdtest = cb_pcidas_ao_cmdtest;
- s->do_cmd = cb_pcidas_ao_cmd;
- s->cancel = cb_pcidas_ao_cancel;
- }
- } else {
- s->type = COMEDI_SUBD_UNUSED;
- }
-
- /* 8255 */
- s = &dev->subdevices[2];
- ret = subdev_8255_init(dev, s, NULL, DIO_8255);
- if (ret)
- return ret;
-
- /* serial EEPROM, */
- s = &dev->subdevices[3];
- s->type = COMEDI_SUBD_MEMORY;
- s->subdev_flags = SDF_READABLE | SDF_INTERNAL;
- s->n_chan = 256;
- s->maxdata = 0xff;
- s->insn_read = eeprom_read_insn;
-
- /* 8800 caldac */
- s = &dev->subdevices[4];
- s->type = COMEDI_SUBD_CALIB;
- s->subdev_flags = SDF_READABLE | SDF_WRITABLE | SDF_INTERNAL;
- s->n_chan = NUM_CHANNELS_8800;
- s->maxdata = 0xff;
- s->insn_write = cb_pcidas_caldac_insn_write;
-
- ret = comedi_alloc_subdev_readback(s);
- if (ret)
- return ret;
-
- for (i = 0; i < s->n_chan; i++) {
- caldac_8800_write(dev, i, s->maxdata / 2);
- s->readback[i] = s->maxdata / 2;
- }
-
- /* trim potentiometer */
- s = &dev->subdevices[5];
- s->type = COMEDI_SUBD_CALIB;
- s->subdev_flags = SDF_READABLE | SDF_WRITABLE | SDF_INTERNAL;
- if (board->trimpot == AD7376) {
- s->n_chan = NUM_CHANNELS_7376;
- s->maxdata = 0x7f;
- } else {
- s->n_chan = NUM_CHANNELS_8402;
- s->maxdata = 0xff;
- }
- s->insn_write = cb_pcidas_trimpot_insn_write;
-
- ret = comedi_alloc_subdev_readback(s);
- if (ret)
- return ret;
-
- for (i = 0; i < s->n_chan; i++) {
- cb_pcidas_trimpot_write(dev, i, s->maxdata / 2);
- s->readback[i] = s->maxdata / 2;
- }
-
- /* dac08 caldac */
- s = &dev->subdevices[6];
- if (board->has_dac08) {
- s->type = COMEDI_SUBD_CALIB;
- s->subdev_flags = SDF_READABLE | SDF_WRITABLE | SDF_INTERNAL;
- s->n_chan = NUM_CHANNELS_DAC08;
- s->maxdata = 0xff;
- s->insn_write = cb_pcidas_dac08_insn_write;
-
- ret = comedi_alloc_subdev_readback(s);
- if (ret)
- return ret;
-
- for (i = 0; i < s->n_chan; i++) {
- dac08_write(dev, s->maxdata / 2);
- s->readback[i] = s->maxdata / 2;
- }
- } else {
- s->type = COMEDI_SUBD_UNUSED;
- }
-
- /* make sure mailbox 4 is empty */
- inl(devpriv->s5933_config + AMCC_OP_REG_IMB4);
- /* Set bits to enable incoming mailbox interrupts on amcc s5933. */
- devpriv->s5933_intcsr_bits =
- INTCSR_INBOX_BYTE(3) | INTCSR_INBOX_SELECT(3) |
- INTCSR_INBOX_FULL_INT;
- /* clear and enable interrupt on amcc s5933 */
- outl(devpriv->s5933_intcsr_bits | INTCSR_INBOX_INTR_STATUS,
- devpriv->s5933_config + AMCC_OP_REG_INTCSR);
-
- return 0;
-}
-
-static void cb_pcidas_detach(struct comedi_device *dev)
-{
- struct cb_pcidas_private *devpriv = dev->private;
-
- if (devpriv) {
- if (devpriv->s5933_config)
- outl(INTCSR_INBOX_INTR_STATUS,
- devpriv->s5933_config + AMCC_OP_REG_INTCSR);
- kfree(devpriv->ao_pacer);
- }
- comedi_pci_detach(dev);
-}
-
-static struct comedi_driver cb_pcidas_driver = {
- .driver_name = "cb_pcidas",
- .module = THIS_MODULE,
- .auto_attach = cb_pcidas_auto_attach,
- .detach = cb_pcidas_detach,
-};
-
-static int cb_pcidas_pci_probe(struct pci_dev *dev,
- const struct pci_device_id *id)
-{
- return comedi_pci_auto_config(dev, &cb_pcidas_driver,
- id->driver_data);
-}
-
-static const struct pci_device_id cb_pcidas_pci_table[] = {
- { PCI_VDEVICE(CB, 0x0001), BOARD_PCIDAS1602_16 },
- { PCI_VDEVICE(CB, 0x000f), BOARD_PCIDAS1200 },
- { PCI_VDEVICE(CB, 0x0010), BOARD_PCIDAS1602_12 },
- { PCI_VDEVICE(CB, 0x0019), BOARD_PCIDAS1200_JR },
- { PCI_VDEVICE(CB, 0x001c), BOARD_PCIDAS1602_16_JR },
- { PCI_VDEVICE(CB, 0x004c), BOARD_PCIDAS1000 },
- { PCI_VDEVICE(CB, 0x001a), BOARD_PCIDAS1001 },
- { PCI_VDEVICE(CB, 0x001b), BOARD_PCIDAS1002 },
- { 0 }
-};
-MODULE_DEVICE_TABLE(pci, cb_pcidas_pci_table);
-
-static struct pci_driver cb_pcidas_pci_driver = {
- .name = "cb_pcidas",
- .id_table = cb_pcidas_pci_table,
- .probe = cb_pcidas_pci_probe,
- .remove = comedi_pci_auto_unconfig,
-};
-module_comedi_pci_driver(cb_pcidas_driver, cb_pcidas_pci_driver);
-
-MODULE_AUTHOR("Comedi http://www.comedi.org");
-MODULE_DESCRIPTION("Comedi low-level driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/cb_pcidas64.c b/drivers/staging/comedi/drivers/cb_pcidas64.c
deleted file mode 100644
index d33b8fe872a7..000000000000
--- a/drivers/staging/comedi/drivers/cb_pcidas64.c
+++ /dev/null
@@ -1,4129 +0,0 @@
-/*
- comedi/drivers/cb_pcidas64.c
- This is a driver for the ComputerBoards/MeasurementComputing PCI-DAS
- 64xx, 60xx, and 4020 cards.
-
- Author: Frank Mori Hess <fmhess@users.sourceforge.net>
- Copyright (C) 2001, 2002 Frank Mori Hess
-
- Thanks also go to the following people:
-
- Steve Rosenbluth, for providing the source code for
- his pci-das6402 driver, and source code for working QNX pci-6402
- drivers by Greg Laird and Mariusz Bogacz. None of the code was
- used directly here, but it was useful as an additional source of
- documentation on how to program the boards.
-
- John Sims, for much testing and feedback on pcidas-4020 support.
-
- COMEDI - Linux Control and Measurement Device Interface
- Copyright (C) 1997-8 David A. Schleef <ds@schleef.org>
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-*/
-
-/*
- * Driver: cb_pcidas64
- * Description: MeasurementComputing PCI-DAS64xx, 60XX, and 4020 series
- * with the PLX 9080 PCI controller
- * Author: Frank Mori Hess <fmhess@users.sourceforge.net>
- * Status: works
- * Updated: Fri, 02 Nov 2012 18:58:55 +0000
- * Devices: [Measurement Computing] PCI-DAS6402/16 (cb_pcidas64),
- * PCI-DAS6402/12, PCI-DAS64/M1/16, PCI-DAS64/M2/16,
- * PCI-DAS64/M3/16, PCI-DAS6402/16/JR, PCI-DAS64/M1/16/JR,
- * PCI-DAS64/M2/16/JR, PCI-DAS64/M3/16/JR, PCI-DAS64/M1/14,
- * PCI-DAS64/M2/14, PCI-DAS64/M3/14, PCI-DAS6013, PCI-DAS6014,
- * PCI-DAS6023, PCI-DAS6025, PCI-DAS6030,
- * PCI-DAS6031, PCI-DAS6032, PCI-DAS6033, PCI-DAS6034,
- * PCI-DAS6035, PCI-DAS6036, PCI-DAS6040, PCI-DAS6052,
- * PCI-DAS6070, PCI-DAS6071, PCI-DAS4020/12
- *
- * Configuration options:
- * None.
- *
- * Manual attachment of PCI cards with the comedi_config utility is not
- * supported by this driver; they are attached automatically.
- *
- * These boards may be autocalibrated with the comedi_calibrate utility.
- *
- * To select the bnc trigger input on the 4020 (instead of the dio input),
- * specify a nonzero channel in the chanspec. If you wish to use an external
- * master clock on the 4020, you may do so by setting the scan_begin_src
- * to TRIG_OTHER, and using an INSN_CONFIG_TIMER_1 configuration insn
- * to configure the divisor to use for the external clock.
- *
- * Some devices are not identified because the PCI device IDs are not yet
- * known. If you have such a board, please let the maintainers know.
- */
-
-/*
-
-TODO:
- make it return error if user attempts an ai command that uses the
- external queue, and an ao command simultaneously user counter subdevice
- there are a number of boards this driver will support when they are
- fully released, but does not yet since the pci device id numbers
- are not yet available.
-
- support prescaled 100khz clock for slow pacing (not available on 6000
- series?)
-
- make ao fifo size adjustable like ai fifo
-*/
-
-#include <linux/module.h>
-#include <linux/delay.h>
-#include <linux/interrupt.h>
-
-#include "../comedi_pci.h"
-
-#include "8255.h"
-#include "plx9080.h"
-
-#define TIMER_BASE 25 /* 40MHz master clock */
-/* 100kHz 'prescaled' clock for slow acquisition,
- * maybe I'll support this someday */
-#define PRESCALED_TIMER_BASE 10000
-#define DMA_BUFFER_SIZE 0x1000
-
-/* maximum value that can be loaded into board's 24-bit counters*/
-static const int max_counter_value = 0xffffff;
-
-/* PCI-DAS64xxx base addresses */
-
-/* devpriv->main_iobase registers */
-enum write_only_registers {
- INTR_ENABLE_REG = 0x0, /* interrupt enable register */
- HW_CONFIG_REG = 0x2, /* hardware config register */
- DAQ_SYNC_REG = 0xc,
- DAQ_ATRIG_LOW_4020_REG = 0xc,
- ADC_CONTROL0_REG = 0x10, /* adc control register 0 */
- ADC_CONTROL1_REG = 0x12, /* adc control register 1 */
- CALIBRATION_REG = 0x14,
- /* lower 16 bits of adc sample interval counter */
- ADC_SAMPLE_INTERVAL_LOWER_REG = 0x16,
- /* upper 8 bits of adc sample interval counter */
- ADC_SAMPLE_INTERVAL_UPPER_REG = 0x18,
- /* lower 16 bits of delay interval counter */
- ADC_DELAY_INTERVAL_LOWER_REG = 0x1a,
- /* upper 8 bits of delay interval counter */
- ADC_DELAY_INTERVAL_UPPER_REG = 0x1c,
- /* lower 16 bits of hardware conversion/scan counter */
- ADC_COUNT_LOWER_REG = 0x1e,
- /* upper 8 bits of hardware conversion/scan counter */
- ADC_COUNT_UPPER_REG = 0x20,
- ADC_START_REG = 0x22, /* software trigger to start acquisition */
- ADC_CONVERT_REG = 0x24, /* initiates single conversion */
- ADC_QUEUE_CLEAR_REG = 0x26, /* clears adc queue */
- ADC_QUEUE_LOAD_REG = 0x28, /* loads adc queue */
- ADC_BUFFER_CLEAR_REG = 0x2a,
- /* high channel for internal queue, use adc_chan_bits() inline above */
- ADC_QUEUE_HIGH_REG = 0x2c,
- DAC_CONTROL0_REG = 0x50, /* dac control register 0 */
- DAC_CONTROL1_REG = 0x52, /* dac control register 0 */
- /* lower 16 bits of dac sample interval counter */
- DAC_SAMPLE_INTERVAL_LOWER_REG = 0x54,
- /* upper 8 bits of dac sample interval counter */
- DAC_SAMPLE_INTERVAL_UPPER_REG = 0x56,
- DAC_SELECT_REG = 0x60,
- DAC_START_REG = 0x64,
- DAC_BUFFER_CLEAR_REG = 0x66, /* clear dac buffer */
-};
-
-static inline unsigned int dac_convert_reg(unsigned int channel)
-{
- return 0x70 + (2 * (channel & 0x1));
-}
-
-static inline unsigned int dac_lsb_4020_reg(unsigned int channel)
-{
- return 0x70 + (4 * (channel & 0x1));
-}
-
-static inline unsigned int dac_msb_4020_reg(unsigned int channel)
-{
- return 0x72 + (4 * (channel & 0x1));
-}
-
-enum read_only_registers {
- /*
- * hardware status register,
- * reading this apparently clears pending interrupts as well
- */
- HW_STATUS_REG = 0x0,
- PIPE1_READ_REG = 0x4,
- ADC_READ_PNTR_REG = 0x8,
- LOWER_XFER_REG = 0x10,
- ADC_WRITE_PNTR_REG = 0xc,
- PREPOST_REG = 0x14,
-};
-
-enum read_write_registers {
- I8255_4020_REG = 0x48, /* 8255 offset, for 4020 only */
- /* external channel/gain queue, uses same bits as ADC_QUEUE_LOAD_REG */
- ADC_QUEUE_FIFO_REG = 0x100,
- ADC_FIFO_REG = 0x200, /* adc data fifo */
- /* dac data fifo, has weird interactions with external channel queue */
- DAC_FIFO_REG = 0x300,
-};
-
-/* dev->mmio registers */
-enum dio_counter_registers {
- DIO_8255_OFFSET = 0x0,
- DO_REG = 0x20,
- DI_REG = 0x28,
- DIO_DIRECTION_60XX_REG = 0x40,
- DIO_DATA_60XX_REG = 0x48,
-};
-
-/* bit definitions for write-only registers */
-
-enum intr_enable_contents {
- ADC_INTR_SRC_MASK = 0x3, /* adc interrupt source mask */
- ADC_INTR_QFULL_BITS = 0x0, /* interrupt fifo quarter full */
- ADC_INTR_EOC_BITS = 0x1, /* interrupt end of conversion */
- ADC_INTR_EOSCAN_BITS = 0x2, /* interrupt end of scan */
- ADC_INTR_EOSEQ_BITS = 0x3, /* interrupt end of sequence mask */
- EN_ADC_INTR_SRC_BIT = 0x4, /* enable adc interrupt source */
- EN_ADC_DONE_INTR_BIT = 0x8, /* enable adc acquisition done intr */
- DAC_INTR_SRC_MASK = 0x30,
- DAC_INTR_QEMPTY_BITS = 0x0,
- DAC_INTR_HIGH_CHAN_BITS = 0x10,
- EN_DAC_INTR_SRC_BIT = 0x40, /* enable dac interrupt source */
- EN_DAC_DONE_INTR_BIT = 0x80,
- EN_ADC_ACTIVE_INTR_BIT = 0x200, /* enable adc active interrupt */
- EN_ADC_STOP_INTR_BIT = 0x400, /* enable adc stop trigger interrupt */
- EN_DAC_ACTIVE_INTR_BIT = 0x800, /* enable dac active interrupt */
- EN_DAC_UNDERRUN_BIT = 0x4000, /* enable dac underrun status bit */
- EN_ADC_OVERRUN_BIT = 0x8000, /* enable adc overrun status bit */
-};
-
-enum hw_config_contents {
- MASTER_CLOCK_4020_MASK = 0x3, /* master clock source mask for 4020 */
- INTERNAL_CLOCK_4020_BITS = 0x1, /* use 40 MHz internal master clock */
- BNC_CLOCK_4020_BITS = 0x2, /* use BNC input for master clock */
- EXT_CLOCK_4020_BITS = 0x3, /* use dio input for master clock */
- EXT_QUEUE_BIT = 0x200, /* use external channel/gain queue */
- /* use 225 nanosec strobe when loading dac instead of 50 nanosec */
- SLOW_DAC_BIT = 0x400,
- /* bit with unknown function yet given as default value in pci-das64
- * manual */
- HW_CONFIG_DUMMY_BITS = 0x2000,
- /* bit selects channels 1/0 for analog input/output, otherwise 0/1 */
- DMA_CH_SELECT_BIT = 0x8000,
- FIFO_SIZE_REG = 0x4, /* allows adjustment of fifo sizes */
- DAC_FIFO_SIZE_MASK = 0xff00, /* bits that set dac fifo size */
- DAC_FIFO_BITS = 0xf800, /* 8k sample ao fifo */
-};
-#define DAC_FIFO_SIZE 0x2000
-
-enum daq_atrig_low_4020_contents {
- /* use trig/ext clk bnc input for analog gate signal */
- EXT_AGATE_BNC_BIT = 0x8000,
- /* use trig/ext clk bnc input for external stop trigger signal */
- EXT_STOP_TRIG_BNC_BIT = 0x4000,
- /* use trig/ext clk bnc input for external start trigger signal */
- EXT_START_TRIG_BNC_BIT = 0x2000,
-};
-
-static inline uint16_t analog_trig_low_threshold_bits(uint16_t threshold)
-{
- return threshold & 0xfff;
-}
-
-enum adc_control0_contents {
- ADC_GATE_SRC_MASK = 0x3, /* bits that select gate */
- ADC_SOFT_GATE_BITS = 0x1, /* software gate */
- ADC_EXT_GATE_BITS = 0x2, /* external digital gate */
- ADC_ANALOG_GATE_BITS = 0x3, /* analog level gate */
- /* level-sensitive gate (for digital) */
- ADC_GATE_LEVEL_BIT = 0x4,
- ADC_GATE_POLARITY_BIT = 0x8, /* gate active low */
- ADC_START_TRIG_SOFT_BITS = 0x10,
- ADC_START_TRIG_EXT_BITS = 0x20,
- ADC_START_TRIG_ANALOG_BITS = 0x30,
- ADC_START_TRIG_MASK = 0x30,
- ADC_START_TRIG_FALLING_BIT = 0x40, /* trig 1 uses falling edge */
- /* external pacing uses falling edge */
- ADC_EXT_CONV_FALLING_BIT = 0x800,
- /* enable hardware scan counter */
- ADC_SAMPLE_COUNTER_EN_BIT = 0x1000,
- ADC_DMA_DISABLE_BIT = 0x4000, /* disables dma */
- ADC_ENABLE_BIT = 0x8000, /* master adc enable */
-};
-
-enum adc_control1_contents {
- /* should be set for boards with > 16 channels */
- ADC_QUEUE_CONFIG_BIT = 0x1,
- CONVERT_POLARITY_BIT = 0x10,
- EOC_POLARITY_BIT = 0x20,
- ADC_SW_GATE_BIT = 0x40, /* software gate of adc */
- ADC_DITHER_BIT = 0x200, /* turn on extra noise for dithering */
- RETRIGGER_BIT = 0x800,
- ADC_LO_CHANNEL_4020_MASK = 0x300,
- ADC_HI_CHANNEL_4020_MASK = 0xc00,
- TWO_CHANNEL_4020_BITS = 0x1000, /* two channel mode for 4020 */
- FOUR_CHANNEL_4020_BITS = 0x2000, /* four channel mode for 4020 */
- CHANNEL_MODE_4020_MASK = 0x3000,
- ADC_MODE_MASK = 0xf000,
-};
-
-static inline uint16_t adc_lo_chan_4020_bits(unsigned int channel)
-{
- return (channel & 0x3) << 8;
-};
-
-static inline uint16_t adc_hi_chan_4020_bits(unsigned int channel)
-{
- return (channel & 0x3) << 10;
-};
-
-static inline uint16_t adc_mode_bits(unsigned int mode)
-{
- return (mode & 0xf) << 12;
-};
-
-enum calibration_contents {
- SELECT_8800_BIT = 0x1,
- SELECT_8402_64XX_BIT = 0x2,
- SELECT_1590_60XX_BIT = 0x2,
- CAL_EN_64XX_BIT = 0x40, /* calibration enable for 64xx series */
- SERIAL_DATA_IN_BIT = 0x80,
- SERIAL_CLOCK_BIT = 0x100,
- CAL_EN_60XX_BIT = 0x200, /* calibration enable for 60xx series */
- CAL_GAIN_BIT = 0x800,
-};
-
-/*
- * calibration sources for 6025 are:
- * 0 : ground
- * 1 : 10V
- * 2 : 5V
- * 3 : 0.5V
- * 4 : 0.05V
- * 5 : ground
- * 6 : dac channel 0
- * 7 : dac channel 1
- */
-
-static inline uint16_t adc_src_bits(unsigned int source)
-{
- return (source & 0xf) << 3;
-};
-
-static inline uint16_t adc_convert_chan_4020_bits(unsigned int channel)
-{
- return (channel & 0x3) << 8;
-};
-
-enum adc_queue_load_contents {
- UNIP_BIT = 0x800, /* unipolar/bipolar bit */
- ADC_SE_DIFF_BIT = 0x1000, /* single-ended/ differential bit */
- /* non-referenced single-ended (common-mode input) */
- ADC_COMMON_BIT = 0x2000,
- QUEUE_EOSEQ_BIT = 0x4000, /* queue end of sequence */
- QUEUE_EOSCAN_BIT = 0x8000, /* queue end of scan */
-};
-
-static inline uint16_t adc_chan_bits(unsigned int channel)
-{
- return channel & 0x3f;
-};
-
-enum dac_control0_contents {
- DAC_ENABLE_BIT = 0x8000, /* dac controller enable bit */
- DAC_CYCLIC_STOP_BIT = 0x4000,
- DAC_WAVEFORM_MODE_BIT = 0x100,
- DAC_EXT_UPDATE_FALLING_BIT = 0x80,
- DAC_EXT_UPDATE_ENABLE_BIT = 0x40,
- WAVEFORM_TRIG_MASK = 0x30,
- WAVEFORM_TRIG_DISABLED_BITS = 0x0,
- WAVEFORM_TRIG_SOFT_BITS = 0x10,
- WAVEFORM_TRIG_EXT_BITS = 0x20,
- WAVEFORM_TRIG_ADC1_BITS = 0x30,
- WAVEFORM_TRIG_FALLING_BIT = 0x8,
- WAVEFORM_GATE_LEVEL_BIT = 0x4,
- WAVEFORM_GATE_ENABLE_BIT = 0x2,
- WAVEFORM_GATE_SELECT_BIT = 0x1,
-};
-
-enum dac_control1_contents {
- DAC_WRITE_POLARITY_BIT = 0x800, /* board-dependent setting */
- DAC1_EXT_REF_BIT = 0x200,
- DAC0_EXT_REF_BIT = 0x100,
- DAC_OUTPUT_ENABLE_BIT = 0x80, /* dac output enable bit */
- DAC_UPDATE_POLARITY_BIT = 0x40, /* board-dependent setting */
- DAC_SW_GATE_BIT = 0x20,
- DAC1_UNIPOLAR_BIT = 0x8,
- DAC0_UNIPOLAR_BIT = 0x2,
-};
-
-/* bit definitions for read-only registers */
-enum hw_status_contents {
- DAC_UNDERRUN_BIT = 0x1,
- ADC_OVERRUN_BIT = 0x2,
- DAC_ACTIVE_BIT = 0x4,
- ADC_ACTIVE_BIT = 0x8,
- DAC_INTR_PENDING_BIT = 0x10,
- ADC_INTR_PENDING_BIT = 0x20,
- DAC_DONE_BIT = 0x40,
- ADC_DONE_BIT = 0x80,
- EXT_INTR_PENDING_BIT = 0x100,
- ADC_STOP_BIT = 0x200,
-};
-
-static inline uint16_t pipe_full_bits(uint16_t hw_status_bits)
-{
- return (hw_status_bits >> 10) & 0x3;
-};
-
-static inline unsigned int dma_chain_flag_bits(uint16_t prepost_bits)
-{
- return (prepost_bits >> 6) & 0x3;
-}
-
-static inline unsigned int adc_upper_read_ptr_code(uint16_t prepost_bits)
-{
- return (prepost_bits >> 12) & 0x3;
-}
-
-static inline unsigned int adc_upper_write_ptr_code(uint16_t prepost_bits)
-{
- return (prepost_bits >> 14) & 0x3;
-}
-
-/* I2C addresses for 4020 */
-enum i2c_addresses {
- RANGE_CAL_I2C_ADDR = 0x20,
- CALDAC0_I2C_ADDR = 0xc,
- CALDAC1_I2C_ADDR = 0xd,
-};
-
-enum range_cal_i2c_contents {
- /* bits that set what source the adc converter measures */
- ADC_SRC_4020_MASK = 0x70,
- /* make bnc trig/ext clock threshold 0V instead of 2.5V */
- BNC_TRIG_THRESHOLD_0V_BIT = 0x80,
-};
-
-static inline uint8_t adc_src_4020_bits(unsigned int source)
-{
- return (source << 4) & ADC_SRC_4020_MASK;
-};
-
-static inline uint8_t attenuate_bit(unsigned int channel)
-{
- /* attenuate channel (+-5V input range) */
- return 1 << (channel & 0x3);
-};
-
-/* analog input ranges for 64xx boards */
-static const struct comedi_lrange ai_ranges_64xx = {
- 8, {
- BIP_RANGE(10),
- BIP_RANGE(5),
- BIP_RANGE(2.5),
- BIP_RANGE(1.25),
- UNI_RANGE(10),
- UNI_RANGE(5),
- UNI_RANGE(2.5),
- UNI_RANGE(1.25)
- }
-};
-
-static const uint8_t ai_range_code_64xx[8] = {
- 0x0, 0x1, 0x2, 0x3, /* bipolar 10, 5, 2,5, 1.25 */
- 0x8, 0x9, 0xa, 0xb /* unipolar 10, 5, 2.5, 1.25 */
-};
-
-/* analog input ranges for 64-Mx boards */
-static const struct comedi_lrange ai_ranges_64_mx = {
- 7, {
- BIP_RANGE(5),
- BIP_RANGE(2.5),
- BIP_RANGE(1.25),
- BIP_RANGE(0.625),
- UNI_RANGE(5),
- UNI_RANGE(2.5),
- UNI_RANGE(1.25)
- }
-};
-
-static const uint8_t ai_range_code_64_mx[7] = {
- 0x0, 0x1, 0x2, 0x3, /* bipolar 5, 2.5, 1.25, 0.625 */
- 0x9, 0xa, 0xb /* unipolar 5, 2.5, 1.25 */
-};
-
-/* analog input ranges for 60xx boards */
-static const struct comedi_lrange ai_ranges_60xx = {
- 4, {
- BIP_RANGE(10),
- BIP_RANGE(5),
- BIP_RANGE(0.5),
- BIP_RANGE(0.05)
- }
-};
-
-static const uint8_t ai_range_code_60xx[4] = {
- 0x0, 0x1, 0x4, 0x7 /* bipolar 10, 5, 0.5, 0.05 */
-};
-
-/* analog input ranges for 6030, etc boards */
-static const struct comedi_lrange ai_ranges_6030 = {
- 14, {
- BIP_RANGE(10),
- BIP_RANGE(5),
- BIP_RANGE(2),
- BIP_RANGE(1),
- BIP_RANGE(0.5),
- BIP_RANGE(0.2),
- BIP_RANGE(0.1),
- UNI_RANGE(10),
- UNI_RANGE(5),
- UNI_RANGE(2),
- UNI_RANGE(1),
- UNI_RANGE(0.5),
- UNI_RANGE(0.2),
- UNI_RANGE(0.1)
- }
-};
-
-static const uint8_t ai_range_code_6030[14] = {
- 0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, /* bip 10, 5, 2, 1, 0.5, 0.2, 0.1 */
- 0x9, 0xa, 0xb, 0xc, 0xd, 0xe, 0xf /* uni 10, 5, 2, 1, 0.5, 0.2, 0.1 */
-};
-
-/* analog input ranges for 6052, etc boards */
-static const struct comedi_lrange ai_ranges_6052 = {
- 15, {
- BIP_RANGE(10),
- BIP_RANGE(5),
- BIP_RANGE(2.5),
- BIP_RANGE(1),
- BIP_RANGE(0.5),
- BIP_RANGE(0.25),
- BIP_RANGE(0.1),
- BIP_RANGE(0.05),
- UNI_RANGE(10),
- UNI_RANGE(5),
- UNI_RANGE(2),
- UNI_RANGE(1),
- UNI_RANGE(0.5),
- UNI_RANGE(0.2),
- UNI_RANGE(0.1)
- }
-};
-
-static const uint8_t ai_range_code_6052[15] = {
- 0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, /* bipolar 10 ... 0.05 */
- 0x9, 0xa, 0xb, 0xc, 0xd, 0xe, 0xf /* unipolar 10 ... 0.1 */
-};
-
-/* analog input ranges for 4020 board */
-static const struct comedi_lrange ai_ranges_4020 = {
- 2, {
- BIP_RANGE(5),
- BIP_RANGE(1)
- }
-};
-
-/* analog output ranges */
-static const struct comedi_lrange ao_ranges_64xx = {
- 4, {
- BIP_RANGE(5),
- BIP_RANGE(10),
- UNI_RANGE(5),
- UNI_RANGE(10)
- }
-};
-
-static const int ao_range_code_64xx[] = {
- 0x0,
- 0x1,
- 0x2,
- 0x3,
-};
-
-static const int ao_range_code_60xx[] = {
- 0x0,
-};
-
-static const struct comedi_lrange ao_ranges_6030 = {
- 2, {
- BIP_RANGE(10),
- UNI_RANGE(10)
- }
-};
-
-static const int ao_range_code_6030[] = {
- 0x0,
- 0x2,
-};
-
-static const struct comedi_lrange ao_ranges_4020 = {
- 2, {
- BIP_RANGE(5),
- BIP_RANGE(10)
- }
-};
-
-static const int ao_range_code_4020[] = {
- 0x1,
- 0x0,
-};
-
-enum register_layout {
- LAYOUT_60XX,
- LAYOUT_64XX,
- LAYOUT_4020,
-};
-
-struct hw_fifo_info {
- unsigned int num_segments;
- unsigned int max_segment_length;
- unsigned int sample_packing_ratio;
- uint16_t fifo_size_reg_mask;
-};
-
-enum pcidas64_boardid {
- BOARD_PCIDAS6402_16,
- BOARD_PCIDAS6402_12,
- BOARD_PCIDAS64_M1_16,
- BOARD_PCIDAS64_M2_16,
- BOARD_PCIDAS64_M3_16,
- BOARD_PCIDAS6013,
- BOARD_PCIDAS6014,
- BOARD_PCIDAS6023,
- BOARD_PCIDAS6025,
- BOARD_PCIDAS6030,
- BOARD_PCIDAS6031,
- BOARD_PCIDAS6032,
- BOARD_PCIDAS6033,
- BOARD_PCIDAS6034,
- BOARD_PCIDAS6035,
- BOARD_PCIDAS6036,
- BOARD_PCIDAS6040,
- BOARD_PCIDAS6052,
- BOARD_PCIDAS6070,
- BOARD_PCIDAS6071,
- BOARD_PCIDAS4020_12,
- BOARD_PCIDAS6402_16_JR,
- BOARD_PCIDAS64_M1_16_JR,
- BOARD_PCIDAS64_M2_16_JR,
- BOARD_PCIDAS64_M3_16_JR,
- BOARD_PCIDAS64_M1_14,
- BOARD_PCIDAS64_M2_14,
- BOARD_PCIDAS64_M3_14,
-};
-
-struct pcidas64_board {
- const char *name;
- int ai_se_chans; /* number of ai inputs in single-ended mode */
- int ai_bits; /* analog input resolution */
- int ai_speed; /* fastest conversion period in ns */
- const struct comedi_lrange *ai_range_table;
- const uint8_t *ai_range_code;
- int ao_nchan; /* number of analog out channels */
- int ao_bits; /* analog output resolution */
- int ao_scan_speed; /* analog output scan speed */
- const struct comedi_lrange *ao_range_table;
- const int *ao_range_code;
- const struct hw_fifo_info *const ai_fifo;
- /* different board families have slightly different registers */
- enum register_layout layout;
- unsigned has_8255:1;
-};
-
-static const struct hw_fifo_info ai_fifo_4020 = {
- .num_segments = 2,
- .max_segment_length = 0x8000,
- .sample_packing_ratio = 2,
- .fifo_size_reg_mask = 0x7f,
-};
-
-static const struct hw_fifo_info ai_fifo_64xx = {
- .num_segments = 4,
- .max_segment_length = 0x800,
- .sample_packing_ratio = 1,
- .fifo_size_reg_mask = 0x3f,
-};
-
-static const struct hw_fifo_info ai_fifo_60xx = {
- .num_segments = 4,
- .max_segment_length = 0x800,
- .sample_packing_ratio = 1,
- .fifo_size_reg_mask = 0x7f,
-};
-
-/*
- * maximum number of dma transfers we will chain together into a ring
- * (and the maximum number of dma buffers we maintain)
- */
-#define MAX_AI_DMA_RING_COUNT (0x80000 / DMA_BUFFER_SIZE)
-#define MIN_AI_DMA_RING_COUNT (0x10000 / DMA_BUFFER_SIZE)
-#define AO_DMA_RING_COUNT (0x10000 / DMA_BUFFER_SIZE)
-static inline unsigned int ai_dma_ring_count(const struct pcidas64_board *board)
-{
- if (board->layout == LAYOUT_4020)
- return MAX_AI_DMA_RING_COUNT;
-
- return MIN_AI_DMA_RING_COUNT;
-}
-
-static const int bytes_in_sample = 2;
-
-static const struct pcidas64_board pcidas64_boards[] = {
- [BOARD_PCIDAS6402_16] = {
- .name = "pci-das6402/16",
- .ai_se_chans = 64,
- .ai_bits = 16,
- .ai_speed = 5000,
- .ao_nchan = 2,
- .ao_bits = 16,
- .ao_scan_speed = 10000,
- .layout = LAYOUT_64XX,
- .ai_range_table = &ai_ranges_64xx,
- .ai_range_code = ai_range_code_64xx,
- .ao_range_table = &ao_ranges_64xx,
- .ao_range_code = ao_range_code_64xx,
- .ai_fifo = &ai_fifo_64xx,
- .has_8255 = 1,
- },
- [BOARD_PCIDAS6402_12] = {
- .name = "pci-das6402/12", /* XXX check */
- .ai_se_chans = 64,
- .ai_bits = 12,
- .ai_speed = 5000,
- .ao_nchan = 2,
- .ao_bits = 12,
- .ao_scan_speed = 10000,
- .layout = LAYOUT_64XX,
- .ai_range_table = &ai_ranges_64xx,
- .ai_range_code = ai_range_code_64xx,
- .ao_range_table = &ao_ranges_64xx,
- .ao_range_code = ao_range_code_64xx,
- .ai_fifo = &ai_fifo_64xx,
- .has_8255 = 1,
- },
- [BOARD_PCIDAS64_M1_16] = {
- .name = "pci-das64/m1/16",
- .ai_se_chans = 64,
- .ai_bits = 16,
- .ai_speed = 1000,
- .ao_nchan = 2,
- .ao_bits = 16,
- .ao_scan_speed = 10000,
- .layout = LAYOUT_64XX,
- .ai_range_table = &ai_ranges_64_mx,
- .ai_range_code = ai_range_code_64_mx,
- .ao_range_table = &ao_ranges_64xx,
- .ao_range_code = ao_range_code_64xx,
- .ai_fifo = &ai_fifo_64xx,
- .has_8255 = 1,
- },
- [BOARD_PCIDAS64_M2_16] = {
- .name = "pci-das64/m2/16",
- .ai_se_chans = 64,
- .ai_bits = 16,
- .ai_speed = 500,
- .ao_nchan = 2,
- .ao_bits = 16,
- .ao_scan_speed = 10000,
- .layout = LAYOUT_64XX,
- .ai_range_table = &ai_ranges_64_mx,
- .ai_range_code = ai_range_code_64_mx,
- .ao_range_table = &ao_ranges_64xx,
- .ao_range_code = ao_range_code_64xx,
- .ai_fifo = &ai_fifo_64xx,
- .has_8255 = 1,
- },
- [BOARD_PCIDAS64_M3_16] = {
- .name = "pci-das64/m3/16",
- .ai_se_chans = 64,
- .ai_bits = 16,
- .ai_speed = 333,
- .ao_nchan = 2,
- .ao_bits = 16,
- .ao_scan_speed = 10000,
- .layout = LAYOUT_64XX,
- .ai_range_table = &ai_ranges_64_mx,
- .ai_range_code = ai_range_code_64_mx,
- .ao_range_table = &ao_ranges_64xx,
- .ao_range_code = ao_range_code_64xx,
- .ai_fifo = &ai_fifo_64xx,
- .has_8255 = 1,
- },
- [BOARD_PCIDAS6013] = {
- .name = "pci-das6013",
- .ai_se_chans = 16,
- .ai_bits = 16,
- .ai_speed = 5000,
- .ao_nchan = 0,
- .ao_bits = 16,
- .layout = LAYOUT_60XX,
- .ai_range_table = &ai_ranges_60xx,
- .ai_range_code = ai_range_code_60xx,
- .ao_range_table = &range_bipolar10,
- .ao_range_code = ao_range_code_60xx,
- .ai_fifo = &ai_fifo_60xx,
- .has_8255 = 0,
- },
- [BOARD_PCIDAS6014] = {
- .name = "pci-das6014",
- .ai_se_chans = 16,
- .ai_bits = 16,
- .ai_speed = 5000,
- .ao_nchan = 2,
- .ao_bits = 16,
- .ao_scan_speed = 100000,
- .layout = LAYOUT_60XX,
- .ai_range_table = &ai_ranges_60xx,
- .ai_range_code = ai_range_code_60xx,
- .ao_range_table = &range_bipolar10,
- .ao_range_code = ao_range_code_60xx,
- .ai_fifo = &ai_fifo_60xx,
- .has_8255 = 0,
- },
- [BOARD_PCIDAS6023] = {
- .name = "pci-das6023",
- .ai_se_chans = 16,
- .ai_bits = 12,
- .ai_speed = 5000,
- .ao_nchan = 0,
- .ao_scan_speed = 100000,
- .layout = LAYOUT_60XX,
- .ai_range_table = &ai_ranges_60xx,
- .ai_range_code = ai_range_code_60xx,
- .ao_range_table = &range_bipolar10,
- .ao_range_code = ao_range_code_60xx,
- .ai_fifo = &ai_fifo_60xx,
- .has_8255 = 1,
- },
- [BOARD_PCIDAS6025] = {
- .name = "pci-das6025",
- .ai_se_chans = 16,
- .ai_bits = 12,
- .ai_speed = 5000,
- .ao_nchan = 2,
- .ao_bits = 12,
- .ao_scan_speed = 100000,
- .layout = LAYOUT_60XX,
- .ai_range_table = &ai_ranges_60xx,
- .ai_range_code = ai_range_code_60xx,
- .ao_range_table = &range_bipolar10,
- .ao_range_code = ao_range_code_60xx,
- .ai_fifo = &ai_fifo_60xx,
- .has_8255 = 1,
- },
- [BOARD_PCIDAS6030] = {
- .name = "pci-das6030",
- .ai_se_chans = 16,
- .ai_bits = 16,
- .ai_speed = 10000,
- .ao_nchan = 2,
- .ao_bits = 16,
- .ao_scan_speed = 10000,
- .layout = LAYOUT_60XX,
- .ai_range_table = &ai_ranges_6030,
- .ai_range_code = ai_range_code_6030,
- .ao_range_table = &ao_ranges_6030,
- .ao_range_code = ao_range_code_6030,
- .ai_fifo = &ai_fifo_60xx,
- .has_8255 = 0,
- },
- [BOARD_PCIDAS6031] = {
- .name = "pci-das6031",
- .ai_se_chans = 64,
- .ai_bits = 16,
- .ai_speed = 10000,
- .ao_nchan = 2,
- .ao_bits = 16,
- .ao_scan_speed = 10000,
- .layout = LAYOUT_60XX,
- .ai_range_table = &ai_ranges_6030,
- .ai_range_code = ai_range_code_6030,
- .ao_range_table = &ao_ranges_6030,
- .ao_range_code = ao_range_code_6030,
- .ai_fifo = &ai_fifo_60xx,
- .has_8255 = 0,
- },
- [BOARD_PCIDAS6032] = {
- .name = "pci-das6032",
- .ai_se_chans = 16,
- .ai_bits = 16,
- .ai_speed = 10000,
- .ao_nchan = 0,
- .layout = LAYOUT_60XX,
- .ai_range_table = &ai_ranges_6030,
- .ai_range_code = ai_range_code_6030,
- .ai_fifo = &ai_fifo_60xx,
- .has_8255 = 0,
- },
- [BOARD_PCIDAS6033] = {
- .name = "pci-das6033",
- .ai_se_chans = 64,
- .ai_bits = 16,
- .ai_speed = 10000,
- .ao_nchan = 0,
- .layout = LAYOUT_60XX,
- .ai_range_table = &ai_ranges_6030,
- .ai_range_code = ai_range_code_6030,
- .ai_fifo = &ai_fifo_60xx,
- .has_8255 = 0,
- },
- [BOARD_PCIDAS6034] = {
- .name = "pci-das6034",
- .ai_se_chans = 16,
- .ai_bits = 16,
- .ai_speed = 5000,
- .ao_nchan = 0,
- .ao_scan_speed = 0,
- .layout = LAYOUT_60XX,
- .ai_range_table = &ai_ranges_60xx,
- .ai_range_code = ai_range_code_60xx,
- .ai_fifo = &ai_fifo_60xx,
- .has_8255 = 0,
- },
- [BOARD_PCIDAS6035] = {
- .name = "pci-das6035",
- .ai_se_chans = 16,
- .ai_bits = 16,
- .ai_speed = 5000,
- .ao_nchan = 2,
- .ao_bits = 12,
- .ao_scan_speed = 100000,
- .layout = LAYOUT_60XX,
- .ai_range_table = &ai_ranges_60xx,
- .ai_range_code = ai_range_code_60xx,
- .ao_range_table = &range_bipolar10,
- .ao_range_code = ao_range_code_60xx,
- .ai_fifo = &ai_fifo_60xx,
- .has_8255 = 0,
- },
- [BOARD_PCIDAS6036] = {
- .name = "pci-das6036",
- .ai_se_chans = 16,
- .ai_bits = 16,
- .ai_speed = 5000,
- .ao_nchan = 2,
- .ao_bits = 16,
- .ao_scan_speed = 100000,
- .layout = LAYOUT_60XX,
- .ai_range_table = &ai_ranges_60xx,
- .ai_range_code = ai_range_code_60xx,
- .ao_range_table = &range_bipolar10,
- .ao_range_code = ao_range_code_60xx,
- .ai_fifo = &ai_fifo_60xx,
- .has_8255 = 0,
- },
- [BOARD_PCIDAS6040] = {
- .name = "pci-das6040",
- .ai_se_chans = 16,
- .ai_bits = 12,
- .ai_speed = 2000,
- .ao_nchan = 2,
- .ao_bits = 12,
- .ao_scan_speed = 1000,
- .layout = LAYOUT_60XX,
- .ai_range_table = &ai_ranges_6052,
- .ai_range_code = ai_range_code_6052,
- .ao_range_table = &ao_ranges_6030,
- .ao_range_code = ao_range_code_6030,
- .ai_fifo = &ai_fifo_60xx,
- .has_8255 = 0,
- },
- [BOARD_PCIDAS6052] = {
- .name = "pci-das6052",
- .ai_se_chans = 16,
- .ai_bits = 16,
- .ai_speed = 3333,
- .ao_nchan = 2,
- .ao_bits = 16,
- .ao_scan_speed = 3333,
- .layout = LAYOUT_60XX,
- .ai_range_table = &ai_ranges_6052,
- .ai_range_code = ai_range_code_6052,
- .ao_range_table = &ao_ranges_6030,
- .ao_range_code = ao_range_code_6030,
- .ai_fifo = &ai_fifo_60xx,
- .has_8255 = 0,
- },
- [BOARD_PCIDAS6070] = {
- .name = "pci-das6070",
- .ai_se_chans = 16,
- .ai_bits = 12,
- .ai_speed = 800,
- .ao_nchan = 2,
- .ao_bits = 12,
- .ao_scan_speed = 1000,
- .layout = LAYOUT_60XX,
- .ai_range_table = &ai_ranges_6052,
- .ai_range_code = ai_range_code_6052,
- .ao_range_table = &ao_ranges_6030,
- .ao_range_code = ao_range_code_6030,
- .ai_fifo = &ai_fifo_60xx,
- .has_8255 = 0,
- },
- [BOARD_PCIDAS6071] = {
- .name = "pci-das6071",
- .ai_se_chans = 64,
- .ai_bits = 12,
- .ai_speed = 800,
- .ao_nchan = 2,
- .ao_bits = 12,
- .ao_scan_speed = 1000,
- .layout = LAYOUT_60XX,
- .ai_range_table = &ai_ranges_6052,
- .ai_range_code = ai_range_code_6052,
- .ao_range_table = &ao_ranges_6030,
- .ao_range_code = ao_range_code_6030,
- .ai_fifo = &ai_fifo_60xx,
- .has_8255 = 0,
- },
- [BOARD_PCIDAS4020_12] = {
- .name = "pci-das4020/12",
- .ai_se_chans = 4,
- .ai_bits = 12,
- .ai_speed = 50,
- .ao_bits = 12,
- .ao_nchan = 2,
- .ao_scan_speed = 0, /* no hardware pacing on ao */
- .layout = LAYOUT_4020,
- .ai_range_table = &ai_ranges_4020,
- .ao_range_table = &ao_ranges_4020,
- .ao_range_code = ao_range_code_4020,
- .ai_fifo = &ai_fifo_4020,
- .has_8255 = 1,
- },
-#if 0
- /*
- * The device id for these boards is unknown
- */
-
- [BOARD_PCIDAS6402_16_JR] = {
- .name = "pci-das6402/16/jr",
- .ai_se_chans = 64,
- .ai_bits = 16,
- .ai_speed = 5000,
- .ao_nchan = 0,
- .ao_scan_speed = 10000,
- .layout = LAYOUT_64XX,
- .ai_range_table = &ai_ranges_64xx,
- .ai_range_code = ai_range_code_64xx,
- .ai_fifo = ai_fifo_64xx,
- .has_8255 = 1,
- },
- [BOARD_PCIDAS64_M1_16_JR] = {
- .name = "pci-das64/m1/16/jr",
- .ai_se_chans = 64,
- .ai_bits = 16,
- .ai_speed = 1000,
- .ao_nchan = 0,
- .ao_scan_speed = 10000,
- .layout = LAYOUT_64XX,
- .ai_range_table = &ai_ranges_64_mx,
- .ai_range_code = ai_range_code_64_mx,
- .ai_fifo = ai_fifo_64xx,
- .has_8255 = 1,
- },
- [BOARD_PCIDAS64_M2_16_JR] = {
- .name = "pci-das64/m2/16/jr",
- .ai_se_chans = 64,
- .ai_bits = 16,
- .ai_speed = 500,
- .ao_nchan = 0,
- .ao_scan_speed = 10000,
- .layout = LAYOUT_64XX,
- .ai_range_table = &ai_ranges_64_mx,
- .ai_range_code = ai_range_code_64_mx,
- .ai_fifo = ai_fifo_64xx,
- .has_8255 = 1,
- },
- [BOARD_PCIDAS64_M3_16_JR] = {
- .name = "pci-das64/m3/16/jr",
- .ai_se_chans = 64,
- .ai_bits = 16,
- .ai_speed = 333,
- .ao_nchan = 0,
- .ao_scan_speed = 10000,
- .layout = LAYOUT_64XX,
- .ai_range_table = &ai_ranges_64_mx,
- .ai_range_code = ai_range_code_64_mx,
- .ai_fifo = ai_fifo_64xx,
- .has_8255 = 1,
- },
- [BOARD_PCIDAS64_M1_14] = {
- .name = "pci-das64/m1/14",
- .ai_se_chans = 64,
- .ai_bits = 14,
- .ai_speed = 1000,
- .ao_nchan = 2,
- .ao_scan_speed = 10000,
- .layout = LAYOUT_64XX,
- .ai_range_table = &ai_ranges_64_mx,
- .ai_range_code = ai_range_code_64_mx,
- .ai_fifo = ai_fifo_64xx,
- .has_8255 = 1,
- },
- [BOARD_PCIDAS64_M2_14] = {
- .name = "pci-das64/m2/14",
- .ai_se_chans = 64,
- .ai_bits = 14,
- .ai_speed = 500,
- .ao_nchan = 2,
- .ao_scan_speed = 10000,
- .layout = LAYOUT_64XX,
- .ai_range_table = &ai_ranges_64_mx,
- .ai_range_code = ai_range_code_64_mx,
- .ai_fifo = ai_fifo_64xx,
- .has_8255 = 1,
- },
- [BOARD_PCIDAS64_M3_14] = {
- .name = "pci-das64/m3/14",
- .ai_se_chans = 64,
- .ai_bits = 14,
- .ai_speed = 333,
- .ao_nchan = 2,
- .ao_scan_speed = 10000,
- .layout = LAYOUT_64XX,
- .ai_range_table = &ai_ranges_64_mx,
- .ai_range_code = ai_range_code_64_mx,
- .ai_fifo = ai_fifo_64xx,
- .has_8255 = 1,
- },
-#endif
-};
-
-static inline unsigned short se_diff_bit_6xxx(struct comedi_device *dev,
- int use_differential)
-{
- const struct pcidas64_board *board = dev->board_ptr;
-
- if ((board->layout == LAYOUT_64XX && !use_differential) ||
- (board->layout == LAYOUT_60XX && use_differential))
- return ADC_SE_DIFF_BIT;
-
- return 0;
-}
-
-struct ext_clock_info {
- /* master clock divisor to use for scans with external master clock */
- unsigned int divisor;
- /* chanspec for master clock input when used as scan begin src */
- unsigned int chanspec;
-};
-
-/* this structure is for data unique to this hardware driver. */
-struct pcidas64_private {
- /* base addresses (physical) */
- resource_size_t main_phys_iobase;
- resource_size_t dio_counter_phys_iobase;
- /* base addresses (ioremapped) */
- void __iomem *plx9080_iobase;
- void __iomem *main_iobase;
- /* local address (used by dma controller) */
- uint32_t local0_iobase;
- uint32_t local1_iobase;
- /* dma buffers for analog input */
- uint16_t *ai_buffer[MAX_AI_DMA_RING_COUNT];
- /* physical addresses of ai dma buffers */
- dma_addr_t ai_buffer_bus_addr[MAX_AI_DMA_RING_COUNT];
- /* array of ai dma descriptors read by plx9080,
- * allocated to get proper alignment */
- struct plx_dma_desc *ai_dma_desc;
- /* physical address of ai dma descriptor array */
- dma_addr_t ai_dma_desc_bus_addr;
- /* index of the ai dma descriptor/buffer
- * that is currently being used */
- unsigned int ai_dma_index;
- /* dma buffers for analog output */
- uint16_t *ao_buffer[AO_DMA_RING_COUNT];
- /* physical addresses of ao dma buffers */
- dma_addr_t ao_buffer_bus_addr[AO_DMA_RING_COUNT];
- struct plx_dma_desc *ao_dma_desc;
- dma_addr_t ao_dma_desc_bus_addr;
- /* keeps track of buffer where the next ao sample should go */
- unsigned int ao_dma_index;
- unsigned int hw_revision; /* stc chip hardware revision number */
- /* last bits sent to INTR_ENABLE_REG register */
- unsigned int intr_enable_bits;
- /* last bits sent to ADC_CONTROL1_REG register */
- uint16_t adc_control1_bits;
- /* last bits sent to FIFO_SIZE_REG register */
- uint16_t fifo_size_bits;
- /* last bits sent to HW_CONFIG_REG register */
- uint16_t hw_config_bits;
- uint16_t dac_control1_bits;
- /* last bits written to plx9080 control register */
- uint32_t plx_control_bits;
- /* last bits written to plx interrupt control and status register */
- uint32_t plx_intcsr_bits;
- /* index of calibration source readable through ai ch0 */
- int calibration_source;
- /* bits written to i2c calibration/range register */
- uint8_t i2c_cal_range_bits;
- /* configure digital triggers to trigger on falling edge */
- unsigned int ext_trig_falling;
- short ai_cmd_running;
- unsigned int ai_fifo_segment_length;
- struct ext_clock_info ext_clock;
- unsigned short ao_bounce_buffer[DAC_FIFO_SIZE];
-};
-
-static unsigned int ai_range_bits_6xxx(const struct comedi_device *dev,
- unsigned int range_index)
-{
- const struct pcidas64_board *board = dev->board_ptr;
-
- return board->ai_range_code[range_index] << 8;
-}
-
-static unsigned int hw_revision(const struct comedi_device *dev,
- uint16_t hw_status_bits)
-{
- const struct pcidas64_board *board = dev->board_ptr;
-
- if (board->layout == LAYOUT_4020)
- return (hw_status_bits >> 13) & 0x7;
-
- return (hw_status_bits >> 12) & 0xf;
-}
-
-static void set_dac_range_bits(struct comedi_device *dev,
- uint16_t *bits, unsigned int channel,
- unsigned int range)
-{
- const struct pcidas64_board *board = dev->board_ptr;
- unsigned int code = board->ao_range_code[range];
-
- if (channel > 1)
- dev_err(dev->class_dev, "bug! bad channel?\n");
- if (code & ~0x3)
- dev_err(dev->class_dev, "bug! bad range code?\n");
-
- *bits &= ~(0x3 << (2 * channel));
- *bits |= code << (2 * channel);
-};
-
-static inline int ao_cmd_is_supported(const struct pcidas64_board *board)
-{
- return board->ao_nchan && board->layout != LAYOUT_4020;
-}
-
-static void abort_dma(struct comedi_device *dev, unsigned int channel)
-{
- struct pcidas64_private *devpriv = dev->private;
- unsigned long flags;
-
- /* spinlock for plx dma control/status reg */
- spin_lock_irqsave(&dev->spinlock, flags);
-
- plx9080_abort_dma(devpriv->plx9080_iobase, channel);
-
- spin_unlock_irqrestore(&dev->spinlock, flags);
-}
-
-static void disable_plx_interrupts(struct comedi_device *dev)
-{
- struct pcidas64_private *devpriv = dev->private;
-
- devpriv->plx_intcsr_bits = 0;
- writel(devpriv->plx_intcsr_bits,
- devpriv->plx9080_iobase + PLX_INTRCS_REG);
-}
-
-static void disable_ai_interrupts(struct comedi_device *dev)
-{
- struct pcidas64_private *devpriv = dev->private;
- unsigned long flags;
-
- spin_lock_irqsave(&dev->spinlock, flags);
- devpriv->intr_enable_bits &=
- ~EN_ADC_INTR_SRC_BIT & ~EN_ADC_DONE_INTR_BIT &
- ~EN_ADC_ACTIVE_INTR_BIT & ~EN_ADC_STOP_INTR_BIT &
- ~EN_ADC_OVERRUN_BIT & ~ADC_INTR_SRC_MASK;
- writew(devpriv->intr_enable_bits,
- devpriv->main_iobase + INTR_ENABLE_REG);
- spin_unlock_irqrestore(&dev->spinlock, flags);
-}
-
-static void enable_ai_interrupts(struct comedi_device *dev,
- const struct comedi_cmd *cmd)
-{
- const struct pcidas64_board *board = dev->board_ptr;
- struct pcidas64_private *devpriv = dev->private;
- uint32_t bits;
- unsigned long flags;
-
- bits = EN_ADC_OVERRUN_BIT | EN_ADC_DONE_INTR_BIT |
- EN_ADC_ACTIVE_INTR_BIT | EN_ADC_STOP_INTR_BIT;
- /*
- * Use pio transfer and interrupt on end of conversion
- * if CMDF_WAKE_EOS flag is set.
- */
- if (cmd->flags & CMDF_WAKE_EOS) {
- /* 4020 doesn't support pio transfers except for fifo dregs */
- if (board->layout != LAYOUT_4020)
- bits |= ADC_INTR_EOSCAN_BITS | EN_ADC_INTR_SRC_BIT;
- }
- spin_lock_irqsave(&dev->spinlock, flags);
- devpriv->intr_enable_bits |= bits;
- writew(devpriv->intr_enable_bits,
- devpriv->main_iobase + INTR_ENABLE_REG);
- spin_unlock_irqrestore(&dev->spinlock, flags);
-}
-
-/* initialize plx9080 chip */
-static void init_plx9080(struct comedi_device *dev)
-{
- const struct pcidas64_board *board = dev->board_ptr;
- struct pcidas64_private *devpriv = dev->private;
- uint32_t bits;
- void __iomem *plx_iobase = devpriv->plx9080_iobase;
-
- devpriv->plx_control_bits =
- readl(devpriv->plx9080_iobase + PLX_CONTROL_REG);
-
-#ifdef __BIG_ENDIAN
- bits = BIGEND_DMA0 | BIGEND_DMA1;
-#else
- bits = 0;
-#endif
- writel(bits, devpriv->plx9080_iobase + PLX_BIGEND_REG);
-
- disable_plx_interrupts(dev);
-
- abort_dma(dev, 0);
- abort_dma(dev, 1);
-
- /* configure dma0 mode */
- bits = 0;
- /* enable ready input, not sure if this is necessary */
- bits |= PLX_DMA_EN_READYIN_BIT;
- /* enable bterm, not sure if this is necessary */
- bits |= PLX_EN_BTERM_BIT;
- /* enable dma chaining */
- bits |= PLX_EN_CHAIN_BIT;
- /* enable interrupt on dma done
- * (probably don't need this, since chain never finishes) */
- bits |= PLX_EN_DMA_DONE_INTR_BIT;
- /* don't increment local address during transfers
- * (we are transferring from a fixed fifo register) */
- bits |= PLX_LOCAL_ADDR_CONST_BIT;
- /* route dma interrupt to pci bus */
- bits |= PLX_DMA_INTR_PCI_BIT;
- /* enable demand mode */
- bits |= PLX_DEMAND_MODE_BIT;
- /* enable local burst mode */
- bits |= PLX_DMA_LOCAL_BURST_EN_BIT;
- /* 4020 uses 32 bit dma */
- if (board->layout == LAYOUT_4020)
- bits |= PLX_LOCAL_BUS_32_WIDE_BITS;
- else /* localspace0 bus is 16 bits wide */
- bits |= PLX_LOCAL_BUS_16_WIDE_BITS;
- writel(bits, plx_iobase + PLX_DMA1_MODE_REG);
- if (ao_cmd_is_supported(board))
- writel(bits, plx_iobase + PLX_DMA0_MODE_REG);
-
- /* enable interrupts on plx 9080 */
- devpriv->plx_intcsr_bits |=
- ICS_AERR | ICS_PERR | ICS_PIE | ICS_PLIE | ICS_PAIE | ICS_LIE |
- ICS_DMA0_E | ICS_DMA1_E;
- writel(devpriv->plx_intcsr_bits,
- devpriv->plx9080_iobase + PLX_INTRCS_REG);
-}
-
-static void disable_ai_pacing(struct comedi_device *dev)
-{
- struct pcidas64_private *devpriv = dev->private;
- unsigned long flags;
-
- disable_ai_interrupts(dev);
-
- spin_lock_irqsave(&dev->spinlock, flags);
- devpriv->adc_control1_bits &= ~ADC_SW_GATE_BIT;
- writew(devpriv->adc_control1_bits,
- devpriv->main_iobase + ADC_CONTROL1_REG);
- spin_unlock_irqrestore(&dev->spinlock, flags);
-
- /* disable pacing, triggering, etc */
- writew(ADC_DMA_DISABLE_BIT | ADC_SOFT_GATE_BITS | ADC_GATE_LEVEL_BIT,
- devpriv->main_iobase + ADC_CONTROL0_REG);
-}
-
-static int set_ai_fifo_segment_length(struct comedi_device *dev,
- unsigned int num_entries)
-{
- const struct pcidas64_board *board = dev->board_ptr;
- struct pcidas64_private *devpriv = dev->private;
- static const int increment_size = 0x100;
- const struct hw_fifo_info *const fifo = board->ai_fifo;
- unsigned int num_increments;
- uint16_t bits;
-
- if (num_entries < increment_size)
- num_entries = increment_size;
- if (num_entries > fifo->max_segment_length)
- num_entries = fifo->max_segment_length;
-
- /* 1 == 256 entries, 2 == 512 entries, etc */
- num_increments = (num_entries + increment_size / 2) / increment_size;
-
- bits = (~(num_increments - 1)) & fifo->fifo_size_reg_mask;
- devpriv->fifo_size_bits &= ~fifo->fifo_size_reg_mask;
- devpriv->fifo_size_bits |= bits;
- writew(devpriv->fifo_size_bits,
- devpriv->main_iobase + FIFO_SIZE_REG);
-
- devpriv->ai_fifo_segment_length = num_increments * increment_size;
-
- return devpriv->ai_fifo_segment_length;
-}
-
-/*
- * adjusts the size of hardware fifo (which determines block size for dma xfers)
- */
-static int set_ai_fifo_size(struct comedi_device *dev, unsigned int num_samples)
-{
- const struct pcidas64_board *board = dev->board_ptr;
- unsigned int num_fifo_entries;
- int retval;
- const struct hw_fifo_info *const fifo = board->ai_fifo;
-
- num_fifo_entries = num_samples / fifo->sample_packing_ratio;
-
- retval = set_ai_fifo_segment_length(dev,
- num_fifo_entries /
- fifo->num_segments);
- if (retval < 0)
- return retval;
-
- num_samples = retval * fifo->num_segments * fifo->sample_packing_ratio;
-
- return num_samples;
-}
-
-/* query length of fifo */
-static unsigned int ai_fifo_size(struct comedi_device *dev)
-{
- const struct pcidas64_board *board = dev->board_ptr;
- struct pcidas64_private *devpriv = dev->private;
-
- return devpriv->ai_fifo_segment_length *
- board->ai_fifo->num_segments *
- board->ai_fifo->sample_packing_ratio;
-}
-
-static void init_stc_registers(struct comedi_device *dev)
-{
- const struct pcidas64_board *board = dev->board_ptr;
- struct pcidas64_private *devpriv = dev->private;
- uint16_t bits;
- unsigned long flags;
-
- spin_lock_irqsave(&dev->spinlock, flags);
-
- /*
- * bit should be set for 6025,
- * although docs say boards with <= 16 chans should be cleared XXX
- */
- if (1)
- devpriv->adc_control1_bits |= ADC_QUEUE_CONFIG_BIT;
- writew(devpriv->adc_control1_bits,
- devpriv->main_iobase + ADC_CONTROL1_REG);
-
- /* 6402/16 manual says this register must be initialized to 0xff? */
- writew(0xff, devpriv->main_iobase + ADC_SAMPLE_INTERVAL_UPPER_REG);
-
- bits = SLOW_DAC_BIT | DMA_CH_SELECT_BIT;
- if (board->layout == LAYOUT_4020)
- bits |= INTERNAL_CLOCK_4020_BITS;
- devpriv->hw_config_bits |= bits;
- writew(devpriv->hw_config_bits,
- devpriv->main_iobase + HW_CONFIG_REG);
-
- writew(0, devpriv->main_iobase + DAQ_SYNC_REG);
- writew(0, devpriv->main_iobase + CALIBRATION_REG);
-
- spin_unlock_irqrestore(&dev->spinlock, flags);
-
- /* set fifos to maximum size */
- devpriv->fifo_size_bits |= DAC_FIFO_BITS;
- set_ai_fifo_segment_length(dev, board->ai_fifo->max_segment_length);
-
- devpriv->dac_control1_bits = DAC_OUTPUT_ENABLE_BIT;
- devpriv->intr_enable_bits =
- /* EN_DAC_INTR_SRC_BIT | DAC_INTR_QEMPTY_BITS | */
- EN_DAC_DONE_INTR_BIT | EN_DAC_UNDERRUN_BIT;
- writew(devpriv->intr_enable_bits,
- devpriv->main_iobase + INTR_ENABLE_REG);
-
- disable_ai_pacing(dev);
-};
-
-static int alloc_and_init_dma_members(struct comedi_device *dev)
-{
- const struct pcidas64_board *board = dev->board_ptr;
- struct pci_dev *pcidev = comedi_to_pci_dev(dev);
- struct pcidas64_private *devpriv = dev->private;
- int i;
-
- /* allocate pci dma buffers */
- for (i = 0; i < ai_dma_ring_count(board); i++) {
- devpriv->ai_buffer[i] =
- pci_alloc_consistent(pcidev, DMA_BUFFER_SIZE,
- &devpriv->ai_buffer_bus_addr[i]);
- if (!devpriv->ai_buffer[i])
- return -ENOMEM;
- }
- for (i = 0; i < AO_DMA_RING_COUNT; i++) {
- if (ao_cmd_is_supported(board)) {
- devpriv->ao_buffer[i] =
- pci_alloc_consistent(pcidev, DMA_BUFFER_SIZE,
- &devpriv->
- ao_buffer_bus_addr[i]);
- if (!devpriv->ao_buffer[i])
- return -ENOMEM;
- }
- }
- /* allocate dma descriptors */
- devpriv->ai_dma_desc =
- pci_alloc_consistent(pcidev, sizeof(struct plx_dma_desc) *
- ai_dma_ring_count(board),
- &devpriv->ai_dma_desc_bus_addr);
- if (!devpriv->ai_dma_desc)
- return -ENOMEM;
-
- if (ao_cmd_is_supported(board)) {
- devpriv->ao_dma_desc =
- pci_alloc_consistent(pcidev,
- sizeof(struct plx_dma_desc) *
- AO_DMA_RING_COUNT,
- &devpriv->ao_dma_desc_bus_addr);
- if (!devpriv->ao_dma_desc)
- return -ENOMEM;
- }
- /* initialize dma descriptors */
- for (i = 0; i < ai_dma_ring_count(board); i++) {
- devpriv->ai_dma_desc[i].pci_start_addr =
- cpu_to_le32(devpriv->ai_buffer_bus_addr[i]);
- if (board->layout == LAYOUT_4020)
- devpriv->ai_dma_desc[i].local_start_addr =
- cpu_to_le32(devpriv->local1_iobase +
- ADC_FIFO_REG);
- else
- devpriv->ai_dma_desc[i].local_start_addr =
- cpu_to_le32(devpriv->local0_iobase +
- ADC_FIFO_REG);
- devpriv->ai_dma_desc[i].transfer_size = cpu_to_le32(0);
- devpriv->ai_dma_desc[i].next =
- cpu_to_le32((devpriv->ai_dma_desc_bus_addr +
- ((i + 1) % ai_dma_ring_count(board)) *
- sizeof(devpriv->ai_dma_desc[0])) |
- PLX_DESC_IN_PCI_BIT | PLX_INTR_TERM_COUNT |
- PLX_XFER_LOCAL_TO_PCI);
- }
- if (ao_cmd_is_supported(board)) {
- for (i = 0; i < AO_DMA_RING_COUNT; i++) {
- devpriv->ao_dma_desc[i].pci_start_addr =
- cpu_to_le32(devpriv->ao_buffer_bus_addr[i]);
- devpriv->ao_dma_desc[i].local_start_addr =
- cpu_to_le32(devpriv->local0_iobase +
- DAC_FIFO_REG);
- devpriv->ao_dma_desc[i].transfer_size = cpu_to_le32(0);
- devpriv->ao_dma_desc[i].next =
- cpu_to_le32((devpriv->ao_dma_desc_bus_addr +
- ((i + 1) % (AO_DMA_RING_COUNT)) *
- sizeof(devpriv->ao_dma_desc[0])) |
- PLX_DESC_IN_PCI_BIT |
- PLX_INTR_TERM_COUNT);
- }
- }
- return 0;
-}
-
-static void cb_pcidas64_free_dma(struct comedi_device *dev)
-{
- const struct pcidas64_board *board = dev->board_ptr;
- struct pci_dev *pcidev = comedi_to_pci_dev(dev);
- struct pcidas64_private *devpriv = dev->private;
- int i;
-
- if (!devpriv)
- return;
-
- /* free pci dma buffers */
- for (i = 0; i < ai_dma_ring_count(board); i++) {
- if (devpriv->ai_buffer[i])
- pci_free_consistent(pcidev,
- DMA_BUFFER_SIZE,
- devpriv->ai_buffer[i],
- devpriv->ai_buffer_bus_addr[i]);
- }
- for (i = 0; i < AO_DMA_RING_COUNT; i++) {
- if (devpriv->ao_buffer[i])
- pci_free_consistent(pcidev,
- DMA_BUFFER_SIZE,
- devpriv->ao_buffer[i],
- devpriv->ao_buffer_bus_addr[i]);
- }
- /* free dma descriptors */
- if (devpriv->ai_dma_desc)
- pci_free_consistent(pcidev,
- sizeof(struct plx_dma_desc) *
- ai_dma_ring_count(board),
- devpriv->ai_dma_desc,
- devpriv->ai_dma_desc_bus_addr);
- if (devpriv->ao_dma_desc)
- pci_free_consistent(pcidev,
- sizeof(struct plx_dma_desc) *
- AO_DMA_RING_COUNT,
- devpriv->ao_dma_desc,
- devpriv->ao_dma_desc_bus_addr);
-}
-
-static inline void warn_external_queue(struct comedi_device *dev)
-{
- dev_err(dev->class_dev,
- "AO command and AI external channel queue cannot be used simultaneously\n");
- dev_err(dev->class_dev,
- "Use internal AI channel queue (channels must be consecutive and use same range/aref)\n");
-}
-
-/*
- * their i2c requires a huge delay on setting clock or data high for some reason
- */
-static const int i2c_high_udelay = 1000;
-static const int i2c_low_udelay = 10;
-
-/* set i2c data line high or low */
-static void i2c_set_sda(struct comedi_device *dev, int state)
-{
- struct pcidas64_private *devpriv = dev->private;
- static const int data_bit = CTL_EE_W;
- void __iomem *plx_control_addr = devpriv->plx9080_iobase +
- PLX_CONTROL_REG;
-
- if (state) {
- /* set data line high */
- devpriv->plx_control_bits &= ~data_bit;
- writel(devpriv->plx_control_bits, plx_control_addr);
- udelay(i2c_high_udelay);
- } else { /* set data line low */
-
- devpriv->plx_control_bits |= data_bit;
- writel(devpriv->plx_control_bits, plx_control_addr);
- udelay(i2c_low_udelay);
- }
-}
-
-/* set i2c clock line high or low */
-static void i2c_set_scl(struct comedi_device *dev, int state)
-{
- struct pcidas64_private *devpriv = dev->private;
- static const int clock_bit = CTL_USERO;
- void __iomem *plx_control_addr = devpriv->plx9080_iobase +
- PLX_CONTROL_REG;
-
- if (state) {
- /* set clock line high */
- devpriv->plx_control_bits &= ~clock_bit;
- writel(devpriv->plx_control_bits, plx_control_addr);
- udelay(i2c_high_udelay);
- } else { /* set clock line low */
-
- devpriv->plx_control_bits |= clock_bit;
- writel(devpriv->plx_control_bits, plx_control_addr);
- udelay(i2c_low_udelay);
- }
-}
-
-static void i2c_write_byte(struct comedi_device *dev, uint8_t byte)
-{
- uint8_t bit;
- unsigned int num_bits = 8;
-
- for (bit = 1 << (num_bits - 1); bit; bit >>= 1) {
- i2c_set_scl(dev, 0);
- if ((byte & bit))
- i2c_set_sda(dev, 1);
- else
- i2c_set_sda(dev, 0);
- i2c_set_scl(dev, 1);
- }
-}
-
-/* we can't really read the lines, so fake it */
-static int i2c_read_ack(struct comedi_device *dev)
-{
- i2c_set_scl(dev, 0);
- i2c_set_sda(dev, 1);
- i2c_set_scl(dev, 1);
-
- return 0; /* return fake acknowledge bit */
-}
-
-/* send start bit */
-static void i2c_start(struct comedi_device *dev)
-{
- i2c_set_scl(dev, 1);
- i2c_set_sda(dev, 1);
- i2c_set_sda(dev, 0);
-}
-
-/* send stop bit */
-static void i2c_stop(struct comedi_device *dev)
-{
- i2c_set_scl(dev, 0);
- i2c_set_sda(dev, 0);
- i2c_set_scl(dev, 1);
- i2c_set_sda(dev, 1);
-}
-
-static void i2c_write(struct comedi_device *dev, unsigned int address,
- const uint8_t *data, unsigned int length)
-{
- struct pcidas64_private *devpriv = dev->private;
- unsigned int i;
- uint8_t bitstream;
- static const int read_bit = 0x1;
-
- /*
- * XXX need mutex to prevent simultaneous attempts to access
- * eeprom and i2c bus
- */
-
- /* make sure we dont send anything to eeprom */
- devpriv->plx_control_bits &= ~CTL_EE_CS;
-
- i2c_stop(dev);
- i2c_start(dev);
-
- /* send address and write bit */
- bitstream = (address << 1) & ~read_bit;
- i2c_write_byte(dev, bitstream);
-
- /* get acknowledge */
- if (i2c_read_ack(dev) != 0) {
- dev_err(dev->class_dev, "failed: no acknowledge\n");
- i2c_stop(dev);
- return;
- }
- /* write data bytes */
- for (i = 0; i < length; i++) {
- i2c_write_byte(dev, data[i]);
- if (i2c_read_ack(dev) != 0) {
- dev_err(dev->class_dev, "failed: no acknowledge\n");
- i2c_stop(dev);
- return;
- }
- }
- i2c_stop(dev);
-}
-
-static int cb_pcidas64_ai_eoc(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned long context)
-{
- const struct pcidas64_board *board = dev->board_ptr;
- struct pcidas64_private *devpriv = dev->private;
- unsigned int status;
-
- status = readw(devpriv->main_iobase + HW_STATUS_REG);
- if (board->layout == LAYOUT_4020) {
- status = readw(devpriv->main_iobase + ADC_WRITE_PNTR_REG);
- if (status)
- return 0;
- } else {
- if (pipe_full_bits(status))
- return 0;
- }
- return -EBUSY;
-}
-
-static int ai_rinsn(struct comedi_device *dev, struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
-{
- const struct pcidas64_board *board = dev->board_ptr;
- struct pcidas64_private *devpriv = dev->private;
- unsigned int bits = 0, n;
- unsigned int channel, range, aref;
- unsigned long flags;
- int ret;
-
- channel = CR_CHAN(insn->chanspec);
- range = CR_RANGE(insn->chanspec);
- aref = CR_AREF(insn->chanspec);
-
- /* disable card's analog input interrupt sources and pacing */
- /* 4020 generates dac done interrupts even though they are disabled */
- disable_ai_pacing(dev);
-
- spin_lock_irqsave(&dev->spinlock, flags);
- if (insn->chanspec & CR_ALT_FILTER)
- devpriv->adc_control1_bits |= ADC_DITHER_BIT;
- else
- devpriv->adc_control1_bits &= ~ADC_DITHER_BIT;
- writew(devpriv->adc_control1_bits,
- devpriv->main_iobase + ADC_CONTROL1_REG);
- spin_unlock_irqrestore(&dev->spinlock, flags);
-
- if (board->layout != LAYOUT_4020) {
- /* use internal queue */
- devpriv->hw_config_bits &= ~EXT_QUEUE_BIT;
- writew(devpriv->hw_config_bits,
- devpriv->main_iobase + HW_CONFIG_REG);
-
- /* ALT_SOURCE is internal calibration reference */
- if (insn->chanspec & CR_ALT_SOURCE) {
- unsigned int cal_en_bit;
-
- if (board->layout == LAYOUT_60XX)
- cal_en_bit = CAL_EN_60XX_BIT;
- else
- cal_en_bit = CAL_EN_64XX_BIT;
- /*
- * select internal reference source to connect
- * to channel 0
- */
- writew(cal_en_bit |
- adc_src_bits(devpriv->calibration_source),
- devpriv->main_iobase + CALIBRATION_REG);
- } else {
- /*
- * make sure internal calibration source
- * is turned off
- */
- writew(0, devpriv->main_iobase + CALIBRATION_REG);
- }
- /* load internal queue */
- bits = 0;
- /* set gain */
- bits |= ai_range_bits_6xxx(dev, CR_RANGE(insn->chanspec));
- /* set single-ended / differential */
- bits |= se_diff_bit_6xxx(dev, aref == AREF_DIFF);
- if (aref == AREF_COMMON)
- bits |= ADC_COMMON_BIT;
- bits |= adc_chan_bits(channel);
- /* set stop channel */
- writew(adc_chan_bits(channel),
- devpriv->main_iobase + ADC_QUEUE_HIGH_REG);
- /* set start channel, and rest of settings */
- writew(bits, devpriv->main_iobase + ADC_QUEUE_LOAD_REG);
- } else {
- uint8_t old_cal_range_bits = devpriv->i2c_cal_range_bits;
-
- devpriv->i2c_cal_range_bits &= ~ADC_SRC_4020_MASK;
- if (insn->chanspec & CR_ALT_SOURCE) {
- devpriv->i2c_cal_range_bits |=
- adc_src_4020_bits(devpriv->calibration_source);
- } else { /* select BNC inputs */
- devpriv->i2c_cal_range_bits |= adc_src_4020_bits(4);
- }
- /* select range */
- if (range == 0)
- devpriv->i2c_cal_range_bits |= attenuate_bit(channel);
- else
- devpriv->i2c_cal_range_bits &= ~attenuate_bit(channel);
- /*
- * update calibration/range i2c register only if necessary,
- * as it is very slow
- */
- if (old_cal_range_bits != devpriv->i2c_cal_range_bits) {
- uint8_t i2c_data = devpriv->i2c_cal_range_bits;
-
- i2c_write(dev, RANGE_CAL_I2C_ADDR, &i2c_data,
- sizeof(i2c_data));
- }
-
- /*
- * 4020 manual asks that sample interval register to be set
- * before writing to convert register.
- * Using somewhat arbitrary setting of 4 master clock ticks
- * = 0.1 usec
- */
- writew(0, devpriv->main_iobase + ADC_SAMPLE_INTERVAL_UPPER_REG);
- writew(2, devpriv->main_iobase + ADC_SAMPLE_INTERVAL_LOWER_REG);
- }
-
- for (n = 0; n < insn->n; n++) {
- /* clear adc buffer (inside loop for 4020 sake) */
- writew(0, devpriv->main_iobase + ADC_BUFFER_CLEAR_REG);
-
- /* trigger conversion, bits sent only matter for 4020 */
- writew(adc_convert_chan_4020_bits(CR_CHAN(insn->chanspec)),
- devpriv->main_iobase + ADC_CONVERT_REG);
-
- /* wait for data */
- ret = comedi_timeout(dev, s, insn, cb_pcidas64_ai_eoc, 0);
- if (ret)
- return ret;
-
- if (board->layout == LAYOUT_4020)
- data[n] = readl(dev->mmio + ADC_FIFO_REG) & 0xffff;
- else
- data[n] = readw(devpriv->main_iobase + PIPE1_READ_REG);
- }
-
- return n;
-}
-
-static int ai_config_calibration_source(struct comedi_device *dev,
- unsigned int *data)
-{
- const struct pcidas64_board *board = dev->board_ptr;
- struct pcidas64_private *devpriv = dev->private;
- unsigned int source = data[1];
- int num_calibration_sources;
-
- if (board->layout == LAYOUT_60XX)
- num_calibration_sources = 16;
- else
- num_calibration_sources = 8;
- if (source >= num_calibration_sources) {
- dev_dbg(dev->class_dev, "invalid calibration source: %i\n",
- source);
- return -EINVAL;
- }
-
- devpriv->calibration_source = source;
-
- return 2;
-}
-
-static int ai_config_block_size(struct comedi_device *dev, unsigned int *data)
-{
- const struct pcidas64_board *board = dev->board_ptr;
- int fifo_size;
- const struct hw_fifo_info *const fifo = board->ai_fifo;
- unsigned int block_size, requested_block_size;
- int retval;
-
- requested_block_size = data[1];
-
- if (requested_block_size) {
- fifo_size = requested_block_size * fifo->num_segments /
- bytes_in_sample;
-
- retval = set_ai_fifo_size(dev, fifo_size);
- if (retval < 0)
- return retval;
- }
-
- block_size = ai_fifo_size(dev) / fifo->num_segments * bytes_in_sample;
-
- data[1] = block_size;
-
- return 2;
-}
-
-static int ai_config_master_clock_4020(struct comedi_device *dev,
- unsigned int *data)
-{
- struct pcidas64_private *devpriv = dev->private;
- unsigned int divisor = data[4];
- int retval = 0;
-
- if (divisor < 2) {
- divisor = 2;
- retval = -EAGAIN;
- }
-
- switch (data[1]) {
- case COMEDI_EV_SCAN_BEGIN:
- devpriv->ext_clock.divisor = divisor;
- devpriv->ext_clock.chanspec = data[2];
- break;
- default:
- return -EINVAL;
- }
-
- data[4] = divisor;
-
- return retval ? retval : 5;
-}
-
-/* XXX could add support for 60xx series */
-static int ai_config_master_clock(struct comedi_device *dev, unsigned int *data)
-{
- const struct pcidas64_board *board = dev->board_ptr;
-
- switch (board->layout) {
- case LAYOUT_4020:
- return ai_config_master_clock_4020(dev, data);
- default:
- return -EINVAL;
- }
-
- return -EINVAL;
-}
-
-static int ai_config_insn(struct comedi_device *dev, struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
-{
- int id = data[0];
-
- switch (id) {
- case INSN_CONFIG_ALT_SOURCE:
- return ai_config_calibration_source(dev, data);
- case INSN_CONFIG_BLOCK_SIZE:
- return ai_config_block_size(dev, data);
- case INSN_CONFIG_TIMER_1:
- return ai_config_master_clock(dev, data);
- default:
- return -EINVAL;
- }
- return -EINVAL;
-}
-
-/*
- * Gets nearest achievable timing given master clock speed, does not
- * take into account possible minimum/maximum divisor values. Used
- * by other timing checking functions.
- */
-static unsigned int get_divisor(unsigned int ns, unsigned int flags)
-{
- unsigned int divisor;
-
- switch (flags & CMDF_ROUND_MASK) {
- case CMDF_ROUND_UP:
- divisor = (ns + TIMER_BASE - 1) / TIMER_BASE;
- break;
- case CMDF_ROUND_DOWN:
- divisor = ns / TIMER_BASE;
- break;
- case CMDF_ROUND_NEAREST:
- default:
- divisor = (ns + TIMER_BASE / 2) / TIMER_BASE;
- break;
- }
- return divisor;
-}
-
-/*
- * utility function that rounds desired timing to an achievable time, and
- * sets cmd members appropriately.
- * adc paces conversions from master clock by dividing by (x + 3) where x is
- * 24 bit number
- */
-static void check_adc_timing(struct comedi_device *dev, struct comedi_cmd *cmd)
-{
- const struct pcidas64_board *board = dev->board_ptr;
- unsigned long long convert_divisor = 0;
- unsigned int scan_divisor;
- static const int min_convert_divisor = 3;
- static const int max_convert_divisor =
- max_counter_value + min_convert_divisor;
- static const int min_scan_divisor_4020 = 2;
- unsigned long long max_scan_divisor, min_scan_divisor;
-
- if (cmd->convert_src == TRIG_TIMER) {
- if (board->layout == LAYOUT_4020) {
- cmd->convert_arg = 0;
- } else {
- convert_divisor = get_divisor(cmd->convert_arg,
- cmd->flags);
- if (convert_divisor > max_convert_divisor)
- convert_divisor = max_convert_divisor;
- if (convert_divisor < min_convert_divisor)
- convert_divisor = min_convert_divisor;
- cmd->convert_arg = convert_divisor * TIMER_BASE;
- }
- } else if (cmd->convert_src == TRIG_NOW) {
- cmd->convert_arg = 0;
- }
-
- if (cmd->scan_begin_src == TRIG_TIMER) {
- scan_divisor = get_divisor(cmd->scan_begin_arg, cmd->flags);
- if (cmd->convert_src == TRIG_TIMER) {
- min_scan_divisor = convert_divisor * cmd->chanlist_len;
- max_scan_divisor =
- (convert_divisor * cmd->chanlist_len - 1) +
- max_counter_value;
- } else {
- min_scan_divisor = min_scan_divisor_4020;
- max_scan_divisor = max_counter_value + min_scan_divisor;
- }
- if (scan_divisor > max_scan_divisor)
- scan_divisor = max_scan_divisor;
- if (scan_divisor < min_scan_divisor)
- scan_divisor = min_scan_divisor;
- cmd->scan_begin_arg = scan_divisor * TIMER_BASE;
- }
-}
-
-static int cb_pcidas64_ai_check_chanlist(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_cmd *cmd)
-{
- const struct pcidas64_board *board = dev->board_ptr;
- unsigned int aref0 = CR_AREF(cmd->chanlist[0]);
- int i;
-
- for (i = 1; i < cmd->chanlist_len; i++) {
- unsigned int aref = CR_AREF(cmd->chanlist[i]);
-
- if (aref != aref0) {
- dev_dbg(dev->class_dev,
- "all elements in chanlist must use the same analog reference\n");
- return -EINVAL;
- }
- }
-
- if (board->layout == LAYOUT_4020) {
- unsigned int chan0 = CR_CHAN(cmd->chanlist[0]);
-
- for (i = 1; i < cmd->chanlist_len; i++) {
- unsigned int chan = CR_CHAN(cmd->chanlist[i]);
-
- if (chan != (chan0 + i)) {
- dev_dbg(dev->class_dev,
- "chanlist must use consecutive channels\n");
- return -EINVAL;
- }
- }
- if (cmd->chanlist_len == 3) {
- dev_dbg(dev->class_dev,
- "chanlist cannot be 3 channels long, use 1, 2, or 4 channels\n");
- return -EINVAL;
- }
- }
-
- return 0;
-}
-
-static int ai_cmdtest(struct comedi_device *dev, struct comedi_subdevice *s,
- struct comedi_cmd *cmd)
-{
- const struct pcidas64_board *board = dev->board_ptr;
- int err = 0;
- unsigned int tmp_arg, tmp_arg2;
- unsigned int triggers;
-
- /* Step 1 : check if triggers are trivially valid */
-
- err |= comedi_check_trigger_src(&cmd->start_src, TRIG_NOW | TRIG_EXT);
-
- triggers = TRIG_TIMER;
- if (board->layout == LAYOUT_4020)
- triggers |= TRIG_OTHER;
- else
- triggers |= TRIG_FOLLOW;
- err |= comedi_check_trigger_src(&cmd->scan_begin_src, triggers);
-
- triggers = TRIG_TIMER;
- if (board->layout == LAYOUT_4020)
- triggers |= TRIG_NOW;
- else
- triggers |= TRIG_EXT;
- err |= comedi_check_trigger_src(&cmd->convert_src, triggers);
- err |= comedi_check_trigger_src(&cmd->scan_end_src, TRIG_COUNT);
- err |= comedi_check_trigger_src(&cmd->stop_src,
- TRIG_COUNT | TRIG_EXT | TRIG_NONE);
-
- if (err)
- return 1;
-
- /* Step 2a : make sure trigger sources are unique */
-
- err |= comedi_check_trigger_is_unique(cmd->start_src);
- err |= comedi_check_trigger_is_unique(cmd->scan_begin_src);
- err |= comedi_check_trigger_is_unique(cmd->convert_src);
- err |= comedi_check_trigger_is_unique(cmd->stop_src);
-
- /* Step 2b : and mutually compatible */
-
- if (cmd->convert_src == TRIG_EXT && cmd->scan_begin_src == TRIG_TIMER)
- err |= -EINVAL;
-
- if (err)
- return 2;
-
- /* Step 3: check if arguments are trivially valid */
-
- switch (cmd->start_src) {
- case TRIG_NOW:
- err |= comedi_check_trigger_arg_is(&cmd->start_arg, 0);
- break;
- case TRIG_EXT:
- /*
- * start_arg is the CR_CHAN | CR_INVERT of the
- * external trigger.
- */
- break;
- }
-
- if (cmd->convert_src == TRIG_TIMER) {
- if (board->layout == LAYOUT_4020) {
- err |= comedi_check_trigger_arg_is(&cmd->convert_arg,
- 0);
- } else {
- err |= comedi_check_trigger_arg_min(&cmd->convert_arg,
- board->ai_speed);
- /*
- * if scans are timed faster than conversion rate
- * allows
- */
- if (cmd->scan_begin_src == TRIG_TIMER) {
- err |= comedi_check_trigger_arg_min(
- &cmd->scan_begin_arg,
- cmd->convert_arg *
- cmd->chanlist_len);
- }
- }
- }
-
- err |= comedi_check_trigger_arg_min(&cmd->chanlist_len, 1);
- err |= comedi_check_trigger_arg_is(&cmd->scan_end_arg,
- cmd->chanlist_len);
-
- switch (cmd->stop_src) {
- case TRIG_EXT:
- break;
- case TRIG_COUNT:
- err |= comedi_check_trigger_arg_min(&cmd->stop_arg, 1);
- break;
- case TRIG_NONE:
- err |= comedi_check_trigger_arg_is(&cmd->stop_arg, 0);
- break;
- default:
- break;
- }
-
- if (err)
- return 3;
-
- /* step 4: fix up any arguments */
-
- if (cmd->convert_src == TRIG_TIMER) {
- tmp_arg = cmd->convert_arg;
- tmp_arg2 = cmd->scan_begin_arg;
- check_adc_timing(dev, cmd);
- if (tmp_arg != cmd->convert_arg)
- err++;
- if (tmp_arg2 != cmd->scan_begin_arg)
- err++;
- }
-
- if (err)
- return 4;
-
- /* Step 5: check channel list if it exists */
- if (cmd->chanlist && cmd->chanlist_len > 0)
- err |= cb_pcidas64_ai_check_chanlist(dev, s, cmd);
-
- if (err)
- return 5;
-
- return 0;
-}
-
-static int use_hw_sample_counter(struct comedi_cmd *cmd)
-{
-/* disable for now until I work out a race */
- return 0;
-
- if (cmd->stop_src == TRIG_COUNT && cmd->stop_arg <= max_counter_value)
- return 1;
-
- return 0;
-}
-
-static void setup_sample_counters(struct comedi_device *dev,
- struct comedi_cmd *cmd)
-{
- struct pcidas64_private *devpriv = dev->private;
-
- /* load hardware conversion counter */
- if (use_hw_sample_counter(cmd)) {
- writew(cmd->stop_arg & 0xffff,
- devpriv->main_iobase + ADC_COUNT_LOWER_REG);
- writew((cmd->stop_arg >> 16) & 0xff,
- devpriv->main_iobase + ADC_COUNT_UPPER_REG);
- } else {
- writew(1, devpriv->main_iobase + ADC_COUNT_LOWER_REG);
- }
-}
-
-static inline unsigned int dma_transfer_size(struct comedi_device *dev)
-{
- const struct pcidas64_board *board = dev->board_ptr;
- struct pcidas64_private *devpriv = dev->private;
- unsigned int num_samples;
-
- num_samples = devpriv->ai_fifo_segment_length *
- board->ai_fifo->sample_packing_ratio;
- if (num_samples > DMA_BUFFER_SIZE / sizeof(uint16_t))
- num_samples = DMA_BUFFER_SIZE / sizeof(uint16_t);
-
- return num_samples;
-}
-
-static uint32_t ai_convert_counter_6xxx(const struct comedi_device *dev,
- const struct comedi_cmd *cmd)
-{
- /* supposed to load counter with desired divisor minus 3 */
- return cmd->convert_arg / TIMER_BASE - 3;
-}
-
-static uint32_t ai_scan_counter_6xxx(struct comedi_device *dev,
- struct comedi_cmd *cmd)
-{
- uint32_t count;
-
- /* figure out how long we need to delay at end of scan */
- switch (cmd->scan_begin_src) {
- case TRIG_TIMER:
- count = (cmd->scan_begin_arg -
- (cmd->convert_arg * (cmd->chanlist_len - 1))) /
- TIMER_BASE;
- break;
- case TRIG_FOLLOW:
- count = cmd->convert_arg / TIMER_BASE;
- break;
- default:
- return 0;
- }
- return count - 3;
-}
-
-static uint32_t ai_convert_counter_4020(struct comedi_device *dev,
- struct comedi_cmd *cmd)
-{
- struct pcidas64_private *devpriv = dev->private;
- unsigned int divisor;
-
- switch (cmd->scan_begin_src) {
- case TRIG_TIMER:
- divisor = cmd->scan_begin_arg / TIMER_BASE;
- break;
- case TRIG_OTHER:
- divisor = devpriv->ext_clock.divisor;
- break;
- default: /* should never happen */
- dev_err(dev->class_dev, "bug! failed to set ai pacing!\n");
- divisor = 1000;
- break;
- }
-
- /* supposed to load counter with desired divisor minus 2 for 4020 */
- return divisor - 2;
-}
-
-static void select_master_clock_4020(struct comedi_device *dev,
- const struct comedi_cmd *cmd)
-{
- struct pcidas64_private *devpriv = dev->private;
-
- /* select internal/external master clock */
- devpriv->hw_config_bits &= ~MASTER_CLOCK_4020_MASK;
- if (cmd->scan_begin_src == TRIG_OTHER) {
- int chanspec = devpriv->ext_clock.chanspec;
-
- if (CR_CHAN(chanspec))
- devpriv->hw_config_bits |= BNC_CLOCK_4020_BITS;
- else
- devpriv->hw_config_bits |= EXT_CLOCK_4020_BITS;
- } else {
- devpriv->hw_config_bits |= INTERNAL_CLOCK_4020_BITS;
- }
- writew(devpriv->hw_config_bits,
- devpriv->main_iobase + HW_CONFIG_REG);
-}
-
-static void select_master_clock(struct comedi_device *dev,
- const struct comedi_cmd *cmd)
-{
- const struct pcidas64_board *board = dev->board_ptr;
-
- switch (board->layout) {
- case LAYOUT_4020:
- select_master_clock_4020(dev, cmd);
- break;
- default:
- break;
- }
-}
-
-static inline void dma_start_sync(struct comedi_device *dev,
- unsigned int channel)
-{
- struct pcidas64_private *devpriv = dev->private;
- unsigned long flags;
-
- /* spinlock for plx dma control/status reg */
- spin_lock_irqsave(&dev->spinlock, flags);
- if (channel)
- writeb(PLX_DMA_EN_BIT | PLX_DMA_START_BIT |
- PLX_CLEAR_DMA_INTR_BIT,
- devpriv->plx9080_iobase + PLX_DMA1_CS_REG);
- else
- writeb(PLX_DMA_EN_BIT | PLX_DMA_START_BIT |
- PLX_CLEAR_DMA_INTR_BIT,
- devpriv->plx9080_iobase + PLX_DMA0_CS_REG);
- spin_unlock_irqrestore(&dev->spinlock, flags);
-}
-
-static void set_ai_pacing(struct comedi_device *dev, struct comedi_cmd *cmd)
-{
- const struct pcidas64_board *board = dev->board_ptr;
- struct pcidas64_private *devpriv = dev->private;
- uint32_t convert_counter = 0, scan_counter = 0;
-
- check_adc_timing(dev, cmd);
-
- select_master_clock(dev, cmd);
-
- if (board->layout == LAYOUT_4020) {
- convert_counter = ai_convert_counter_4020(dev, cmd);
- } else {
- convert_counter = ai_convert_counter_6xxx(dev, cmd);
- scan_counter = ai_scan_counter_6xxx(dev, cmd);
- }
-
- /* load lower 16 bits of convert interval */
- writew(convert_counter & 0xffff,
- devpriv->main_iobase + ADC_SAMPLE_INTERVAL_LOWER_REG);
- /* load upper 8 bits of convert interval */
- writew((convert_counter >> 16) & 0xff,
- devpriv->main_iobase + ADC_SAMPLE_INTERVAL_UPPER_REG);
- /* load lower 16 bits of scan delay */
- writew(scan_counter & 0xffff,
- devpriv->main_iobase + ADC_DELAY_INTERVAL_LOWER_REG);
- /* load upper 8 bits of scan delay */
- writew((scan_counter >> 16) & 0xff,
- devpriv->main_iobase + ADC_DELAY_INTERVAL_UPPER_REG);
-}
-
-static int use_internal_queue_6xxx(const struct comedi_cmd *cmd)
-{
- int i;
-
- for (i = 0; i + 1 < cmd->chanlist_len; i++) {
- if (CR_CHAN(cmd->chanlist[i + 1]) !=
- CR_CHAN(cmd->chanlist[i]) + 1)
- return 0;
- if (CR_RANGE(cmd->chanlist[i + 1]) !=
- CR_RANGE(cmd->chanlist[i]))
- return 0;
- if (CR_AREF(cmd->chanlist[i + 1]) != CR_AREF(cmd->chanlist[i]))
- return 0;
- }
- return 1;
-}
-
-static int setup_channel_queue(struct comedi_device *dev,
- const struct comedi_cmd *cmd)
-{
- const struct pcidas64_board *board = dev->board_ptr;
- struct pcidas64_private *devpriv = dev->private;
- unsigned short bits;
- int i;
-
- if (board->layout != LAYOUT_4020) {
- if (use_internal_queue_6xxx(cmd)) {
- devpriv->hw_config_bits &= ~EXT_QUEUE_BIT;
- writew(devpriv->hw_config_bits,
- devpriv->main_iobase + HW_CONFIG_REG);
- bits = 0;
- /* set channel */
- bits |= adc_chan_bits(CR_CHAN(cmd->chanlist[0]));
- /* set gain */
- bits |= ai_range_bits_6xxx(dev,
- CR_RANGE(cmd->chanlist[0]));
- /* set single-ended / differential */
- bits |= se_diff_bit_6xxx(dev,
- CR_AREF(cmd->chanlist[0]) ==
- AREF_DIFF);
- if (CR_AREF(cmd->chanlist[0]) == AREF_COMMON)
- bits |= ADC_COMMON_BIT;
- /* set stop channel */
- writew(adc_chan_bits
- (CR_CHAN(cmd->chanlist[cmd->chanlist_len - 1])),
- devpriv->main_iobase + ADC_QUEUE_HIGH_REG);
- /* set start channel, and rest of settings */
- writew(bits,
- devpriv->main_iobase + ADC_QUEUE_LOAD_REG);
- } else {
- /* use external queue */
- if (dev->write_subdev && dev->write_subdev->busy) {
- warn_external_queue(dev);
- return -EBUSY;
- }
- devpriv->hw_config_bits |= EXT_QUEUE_BIT;
- writew(devpriv->hw_config_bits,
- devpriv->main_iobase + HW_CONFIG_REG);
- /* clear DAC buffer to prevent weird interactions */
- writew(0,
- devpriv->main_iobase + DAC_BUFFER_CLEAR_REG);
- /* clear queue pointer */
- writew(0, devpriv->main_iobase + ADC_QUEUE_CLEAR_REG);
- /* load external queue */
- for (i = 0; i < cmd->chanlist_len; i++) {
- bits = 0;
- /* set channel */
- bits |= adc_chan_bits(CR_CHAN(cmd->
- chanlist[i]));
- /* set gain */
- bits |= ai_range_bits_6xxx(dev,
- CR_RANGE(cmd->
- chanlist
- [i]));
- /* set single-ended / differential */
- bits |= se_diff_bit_6xxx(dev,
- CR_AREF(cmd->
- chanlist[i]) ==
- AREF_DIFF);
- if (CR_AREF(cmd->chanlist[i]) == AREF_COMMON)
- bits |= ADC_COMMON_BIT;
- /* mark end of queue */
- if (i == cmd->chanlist_len - 1)
- bits |= QUEUE_EOSCAN_BIT |
- QUEUE_EOSEQ_BIT;
- writew(bits,
- devpriv->main_iobase +
- ADC_QUEUE_FIFO_REG);
- }
- /*
- * doing a queue clear is not specified in board docs,
- * but required for reliable operation
- */
- writew(0, devpriv->main_iobase + ADC_QUEUE_CLEAR_REG);
- /* prime queue holding register */
- writew(0, devpriv->main_iobase + ADC_QUEUE_LOAD_REG);
- }
- } else {
- unsigned short old_cal_range_bits = devpriv->i2c_cal_range_bits;
-
- devpriv->i2c_cal_range_bits &= ~ADC_SRC_4020_MASK;
- /* select BNC inputs */
- devpriv->i2c_cal_range_bits |= adc_src_4020_bits(4);
- /* select ranges */
- for (i = 0; i < cmd->chanlist_len; i++) {
- unsigned int channel = CR_CHAN(cmd->chanlist[i]);
- unsigned int range = CR_RANGE(cmd->chanlist[i]);
-
- if (range == 0)
- devpriv->i2c_cal_range_bits |=
- attenuate_bit(channel);
- else
- devpriv->i2c_cal_range_bits &=
- ~attenuate_bit(channel);
- }
- /*
- * update calibration/range i2c register only if necessary,
- * as it is very slow
- */
- if (old_cal_range_bits != devpriv->i2c_cal_range_bits) {
- uint8_t i2c_data = devpriv->i2c_cal_range_bits;
-
- i2c_write(dev, RANGE_CAL_I2C_ADDR, &i2c_data,
- sizeof(i2c_data));
- }
- }
- return 0;
-}
-
-static inline void load_first_dma_descriptor(struct comedi_device *dev,
- unsigned int dma_channel,
- unsigned int descriptor_bits)
-{
- struct pcidas64_private *devpriv = dev->private;
-
- /*
- * The transfer size, pci address, and local address registers
- * are supposedly unused during chained dma,
- * but I have found that left over values from last operation
- * occasionally cause problems with transfer of first dma
- * block. Initializing them to zero seems to fix the problem.
- */
- if (dma_channel) {
- writel(0,
- devpriv->plx9080_iobase + PLX_DMA1_TRANSFER_SIZE_REG);
- writel(0, devpriv->plx9080_iobase + PLX_DMA1_PCI_ADDRESS_REG);
- writel(0,
- devpriv->plx9080_iobase + PLX_DMA1_LOCAL_ADDRESS_REG);
- writel(descriptor_bits,
- devpriv->plx9080_iobase + PLX_DMA1_DESCRIPTOR_REG);
- } else {
- writel(0,
- devpriv->plx9080_iobase + PLX_DMA0_TRANSFER_SIZE_REG);
- writel(0, devpriv->plx9080_iobase + PLX_DMA0_PCI_ADDRESS_REG);
- writel(0,
- devpriv->plx9080_iobase + PLX_DMA0_LOCAL_ADDRESS_REG);
- writel(descriptor_bits,
- devpriv->plx9080_iobase + PLX_DMA0_DESCRIPTOR_REG);
- }
-}
-
-static int ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
-{
- const struct pcidas64_board *board = dev->board_ptr;
- struct pcidas64_private *devpriv = dev->private;
- struct comedi_async *async = s->async;
- struct comedi_cmd *cmd = &async->cmd;
- uint32_t bits;
- unsigned int i;
- unsigned long flags;
- int retval;
-
- disable_ai_pacing(dev);
- abort_dma(dev, 1);
-
- retval = setup_channel_queue(dev, cmd);
- if (retval < 0)
- return retval;
-
- /* make sure internal calibration source is turned off */
- writew(0, devpriv->main_iobase + CALIBRATION_REG);
-
- set_ai_pacing(dev, cmd);
-
- setup_sample_counters(dev, cmd);
-
- enable_ai_interrupts(dev, cmd);
-
- spin_lock_irqsave(&dev->spinlock, flags);
- /* set mode, allow conversions through software gate */
- devpriv->adc_control1_bits |= ADC_SW_GATE_BIT;
- devpriv->adc_control1_bits &= ~ADC_DITHER_BIT;
- if (board->layout != LAYOUT_4020) {
- devpriv->adc_control1_bits &= ~ADC_MODE_MASK;
- if (cmd->convert_src == TRIG_EXT)
- /* good old mode 13 */
- devpriv->adc_control1_bits |= adc_mode_bits(13);
- else
- /* mode 8. What else could you need? */
- devpriv->adc_control1_bits |= adc_mode_bits(8);
- } else {
- devpriv->adc_control1_bits &= ~CHANNEL_MODE_4020_MASK;
- if (cmd->chanlist_len == 4)
- devpriv->adc_control1_bits |= FOUR_CHANNEL_4020_BITS;
- else if (cmd->chanlist_len == 2)
- devpriv->adc_control1_bits |= TWO_CHANNEL_4020_BITS;
- devpriv->adc_control1_bits &= ~ADC_LO_CHANNEL_4020_MASK;
- devpriv->adc_control1_bits |=
- adc_lo_chan_4020_bits(CR_CHAN(cmd->chanlist[0]));
- devpriv->adc_control1_bits &= ~ADC_HI_CHANNEL_4020_MASK;
- devpriv->adc_control1_bits |=
- adc_hi_chan_4020_bits(CR_CHAN(cmd->chanlist
- [cmd->chanlist_len - 1]));
- }
- writew(devpriv->adc_control1_bits,
- devpriv->main_iobase + ADC_CONTROL1_REG);
- spin_unlock_irqrestore(&dev->spinlock, flags);
-
- /* clear adc buffer */
- writew(0, devpriv->main_iobase + ADC_BUFFER_CLEAR_REG);
-
- if ((cmd->flags & CMDF_WAKE_EOS) == 0 ||
- board->layout == LAYOUT_4020) {
- devpriv->ai_dma_index = 0;
-
- /* set dma transfer size */
- for (i = 0; i < ai_dma_ring_count(board); i++)
- devpriv->ai_dma_desc[i].transfer_size =
- cpu_to_le32(dma_transfer_size(dev) *
- sizeof(uint16_t));
-
- /* give location of first dma descriptor */
- load_first_dma_descriptor(dev, 1,
- devpriv->ai_dma_desc_bus_addr |
- PLX_DESC_IN_PCI_BIT |
- PLX_INTR_TERM_COUNT |
- PLX_XFER_LOCAL_TO_PCI);
-
- dma_start_sync(dev, 1);
- }
-
- if (board->layout == LAYOUT_4020) {
- /* set source for external triggers */
- bits = 0;
- if (cmd->start_src == TRIG_EXT && CR_CHAN(cmd->start_arg))
- bits |= EXT_START_TRIG_BNC_BIT;
- if (cmd->stop_src == TRIG_EXT && CR_CHAN(cmd->stop_arg))
- bits |= EXT_STOP_TRIG_BNC_BIT;
- writew(bits, devpriv->main_iobase + DAQ_ATRIG_LOW_4020_REG);
- }
-
- spin_lock_irqsave(&dev->spinlock, flags);
-
- /* enable pacing, triggering, etc */
- bits = ADC_ENABLE_BIT | ADC_SOFT_GATE_BITS | ADC_GATE_LEVEL_BIT;
- if (cmd->flags & CMDF_WAKE_EOS)
- bits |= ADC_DMA_DISABLE_BIT;
- /* set start trigger */
- if (cmd->start_src == TRIG_EXT) {
- bits |= ADC_START_TRIG_EXT_BITS;
- if (cmd->start_arg & CR_INVERT)
- bits |= ADC_START_TRIG_FALLING_BIT;
- } else if (cmd->start_src == TRIG_NOW) {
- bits |= ADC_START_TRIG_SOFT_BITS;
- }
- if (use_hw_sample_counter(cmd))
- bits |= ADC_SAMPLE_COUNTER_EN_BIT;
- writew(bits, devpriv->main_iobase + ADC_CONTROL0_REG);
-
- devpriv->ai_cmd_running = 1;
-
- spin_unlock_irqrestore(&dev->spinlock, flags);
-
- /* start acquisition */
- if (cmd->start_src == TRIG_NOW)
- writew(0, devpriv->main_iobase + ADC_START_REG);
-
- return 0;
-}
-
-/* read num_samples from 16 bit wide ai fifo */
-static void pio_drain_ai_fifo_16(struct comedi_device *dev)
-{
- struct pcidas64_private *devpriv = dev->private;
- struct comedi_subdevice *s = dev->read_subdev;
- unsigned int i;
- uint16_t prepost_bits;
- int read_segment, read_index, write_segment, write_index;
- int num_samples;
-
- do {
- /* get least significant 15 bits */
- read_index = readw(devpriv->main_iobase + ADC_READ_PNTR_REG) &
- 0x7fff;
- write_index = readw(devpriv->main_iobase + ADC_WRITE_PNTR_REG) &
- 0x7fff;
- /*
- * Get most significant bits (grey code).
- * Different boards use different code so use a scheme
- * that doesn't depend on encoding. This read must
- * occur after reading least significant 15 bits to avoid race
- * with fifo switching to next segment.
- */
- prepost_bits = readw(devpriv->main_iobase + PREPOST_REG);
-
- /*
- * if read and write pointers are not on the same fifo segment,
- * read to the end of the read segment
- */
- read_segment = adc_upper_read_ptr_code(prepost_bits);
- write_segment = adc_upper_write_ptr_code(prepost_bits);
-
- if (read_segment != write_segment)
- num_samples =
- devpriv->ai_fifo_segment_length - read_index;
- else
- num_samples = write_index - read_index;
- if (num_samples < 0) {
- dev_err(dev->class_dev,
- "cb_pcidas64: bug! num_samples < 0\n");
- break;
- }
-
- num_samples = comedi_nsamples_left(s, num_samples);
- if (num_samples == 0)
- break;
-
- for (i = 0; i < num_samples; i++) {
- unsigned short val;
-
- val = readw(devpriv->main_iobase + ADC_FIFO_REG);
- comedi_buf_write_samples(s, &val, 1);
- }
-
- } while (read_segment != write_segment);
-}
-
-/*
- * Read from 32 bit wide ai fifo of 4020 - deal with insane grey coding of
- * pointers. The pci-4020 hardware only supports dma transfers (it only
- * supports the use of pio for draining the last remaining points from the
- * fifo when a data acquisition operation has completed).
- */
-static void pio_drain_ai_fifo_32(struct comedi_device *dev)
-{
- struct pcidas64_private *devpriv = dev->private;
- struct comedi_subdevice *s = dev->read_subdev;
- unsigned int nsamples;
- unsigned int i;
- uint32_t fifo_data;
- int write_code =
- readw(devpriv->main_iobase + ADC_WRITE_PNTR_REG) & 0x7fff;
- int read_code =
- readw(devpriv->main_iobase + ADC_READ_PNTR_REG) & 0x7fff;
-
- nsamples = comedi_nsamples_left(s, 100000);
- for (i = 0; read_code != write_code && i < nsamples;) {
- unsigned short val;
-
- fifo_data = readl(dev->mmio + ADC_FIFO_REG);
- val = fifo_data & 0xffff;
- comedi_buf_write_samples(s, &val, 1);
- i++;
- if (i < nsamples) {
- val = (fifo_data >> 16) & 0xffff;
- comedi_buf_write_samples(s, &val, 1);
- i++;
- }
- read_code = readw(devpriv->main_iobase + ADC_READ_PNTR_REG) &
- 0x7fff;
- }
-}
-
-/* empty fifo */
-static void pio_drain_ai_fifo(struct comedi_device *dev)
-{
- const struct pcidas64_board *board = dev->board_ptr;
-
- if (board->layout == LAYOUT_4020)
- pio_drain_ai_fifo_32(dev);
- else
- pio_drain_ai_fifo_16(dev);
-}
-
-static void drain_dma_buffers(struct comedi_device *dev, unsigned int channel)
-{
- const struct pcidas64_board *board = dev->board_ptr;
- struct pcidas64_private *devpriv = dev->private;
- struct comedi_subdevice *s = dev->read_subdev;
- uint32_t next_transfer_addr;
- int j;
- int num_samples = 0;
- void __iomem *pci_addr_reg;
-
- if (channel)
- pci_addr_reg =
- devpriv->plx9080_iobase + PLX_DMA1_PCI_ADDRESS_REG;
- else
- pci_addr_reg =
- devpriv->plx9080_iobase + PLX_DMA0_PCI_ADDRESS_REG;
-
- /* loop until we have read all the full buffers */
- for (j = 0, next_transfer_addr = readl(pci_addr_reg);
- (next_transfer_addr <
- devpriv->ai_buffer_bus_addr[devpriv->ai_dma_index] ||
- next_transfer_addr >=
- devpriv->ai_buffer_bus_addr[devpriv->ai_dma_index] +
- DMA_BUFFER_SIZE) && j < ai_dma_ring_count(board); j++) {
- /* transfer data from dma buffer to comedi buffer */
- num_samples = comedi_nsamples_left(s, dma_transfer_size(dev));
- comedi_buf_write_samples(s,
- devpriv->ai_buffer[devpriv->ai_dma_index],
- num_samples);
- devpriv->ai_dma_index = (devpriv->ai_dma_index + 1) %
- ai_dma_ring_count(board);
- }
- /*
- * XXX check for dma ring buffer overrun
- * (use end-of-chain bit to mark last unused buffer)
- */
-}
-
-static void handle_ai_interrupt(struct comedi_device *dev,
- unsigned short status,
- unsigned int plx_status)
-{
- const struct pcidas64_board *board = dev->board_ptr;
- struct pcidas64_private *devpriv = dev->private;
- struct comedi_subdevice *s = dev->read_subdev;
- struct comedi_async *async = s->async;
- struct comedi_cmd *cmd = &async->cmd;
- uint8_t dma1_status;
- unsigned long flags;
-
- /* check for fifo overrun */
- if (status & ADC_OVERRUN_BIT) {
- dev_err(dev->class_dev, "fifo overrun\n");
- async->events |= COMEDI_CB_ERROR;
- }
- /* spin lock makes sure no one else changes plx dma control reg */
- spin_lock_irqsave(&dev->spinlock, flags);
- dma1_status = readb(devpriv->plx9080_iobase + PLX_DMA1_CS_REG);
- if (plx_status & ICS_DMA1_A) { /* dma chan 1 interrupt */
- writeb((dma1_status & PLX_DMA_EN_BIT) | PLX_CLEAR_DMA_INTR_BIT,
- devpriv->plx9080_iobase + PLX_DMA1_CS_REG);
-
- if (dma1_status & PLX_DMA_EN_BIT)
- drain_dma_buffers(dev, 1);
- }
- spin_unlock_irqrestore(&dev->spinlock, flags);
-
- /* drain fifo with pio */
- if ((status & ADC_DONE_BIT) ||
- ((cmd->flags & CMDF_WAKE_EOS) &&
- (status & ADC_INTR_PENDING_BIT) &&
- (board->layout != LAYOUT_4020))) {
- spin_lock_irqsave(&dev->spinlock, flags);
- if (devpriv->ai_cmd_running) {
- spin_unlock_irqrestore(&dev->spinlock, flags);
- pio_drain_ai_fifo(dev);
- } else {
- spin_unlock_irqrestore(&dev->spinlock, flags);
- }
- }
- /* if we are have all the data, then quit */
- if ((cmd->stop_src == TRIG_COUNT &&
- async->scans_done >= cmd->stop_arg) ||
- (cmd->stop_src == TRIG_EXT && (status & ADC_STOP_BIT)))
- async->events |= COMEDI_CB_EOA;
-
- comedi_handle_events(dev, s);
-}
-
-static inline unsigned int prev_ao_dma_index(struct comedi_device *dev)
-{
- struct pcidas64_private *devpriv = dev->private;
- unsigned int buffer_index;
-
- if (devpriv->ao_dma_index == 0)
- buffer_index = AO_DMA_RING_COUNT - 1;
- else
- buffer_index = devpriv->ao_dma_index - 1;
- return buffer_index;
-}
-
-static int last_ao_dma_load_completed(struct comedi_device *dev)
-{
- struct pcidas64_private *devpriv = dev->private;
- unsigned int buffer_index;
- unsigned int transfer_address;
- unsigned short dma_status;
-
- buffer_index = prev_ao_dma_index(dev);
- dma_status = readb(devpriv->plx9080_iobase + PLX_DMA0_CS_REG);
- if ((dma_status & PLX_DMA_DONE_BIT) == 0)
- return 0;
-
- transfer_address =
- readl(devpriv->plx9080_iobase + PLX_DMA0_PCI_ADDRESS_REG);
- if (transfer_address != devpriv->ao_buffer_bus_addr[buffer_index])
- return 0;
-
- return 1;
-}
-
-static inline int ao_dma_needs_restart(struct comedi_device *dev,
- unsigned short dma_status)
-{
- if ((dma_status & PLX_DMA_DONE_BIT) == 0 ||
- (dma_status & PLX_DMA_EN_BIT) == 0)
- return 0;
- if (last_ao_dma_load_completed(dev))
- return 0;
-
- return 1;
-}
-
-static void restart_ao_dma(struct comedi_device *dev)
-{
- struct pcidas64_private *devpriv = dev->private;
- unsigned int dma_desc_bits;
-
- dma_desc_bits =
- readl(devpriv->plx9080_iobase + PLX_DMA0_DESCRIPTOR_REG);
- dma_desc_bits &= ~PLX_END_OF_CHAIN_BIT;
- load_first_dma_descriptor(dev, 0, dma_desc_bits);
-
- dma_start_sync(dev, 0);
-}
-
-static unsigned int cb_pcidas64_ao_fill_buffer(struct comedi_device *dev,
- struct comedi_subdevice *s,
- unsigned short *dest,
- unsigned int max_bytes)
-{
- unsigned int nsamples = comedi_bytes_to_samples(s, max_bytes);
- unsigned int actual_bytes;
-
- nsamples = comedi_nsamples_left(s, nsamples);
- actual_bytes = comedi_buf_read_samples(s, dest, nsamples);
-
- return comedi_bytes_to_samples(s, actual_bytes);
-}
-
-static unsigned int load_ao_dma_buffer(struct comedi_device *dev,
- const struct comedi_cmd *cmd)
-{
- struct pcidas64_private *devpriv = dev->private;
- struct comedi_subdevice *s = dev->write_subdev;
- unsigned int buffer_index = devpriv->ao_dma_index;
- unsigned int prev_buffer_index = prev_ao_dma_index(dev);
- unsigned int nsamples;
- unsigned int nbytes;
- unsigned int next_bits;
-
- nsamples = cb_pcidas64_ao_fill_buffer(dev, s,
- devpriv->ao_buffer[buffer_index],
- DMA_BUFFER_SIZE);
- if (nsamples == 0)
- return 0;
-
- nbytes = comedi_samples_to_bytes(s, nsamples);
- devpriv->ao_dma_desc[buffer_index].transfer_size = cpu_to_le32(nbytes);
- /* set end of chain bit so we catch underruns */
- next_bits = le32_to_cpu(devpriv->ao_dma_desc[buffer_index].next);
- next_bits |= PLX_END_OF_CHAIN_BIT;
- devpriv->ao_dma_desc[buffer_index].next = cpu_to_le32(next_bits);
- /*
- * clear end of chain bit on previous buffer now that we have set it
- * for the last buffer
- */
- next_bits = le32_to_cpu(devpriv->ao_dma_desc[prev_buffer_index].next);
- next_bits &= ~PLX_END_OF_CHAIN_BIT;
- devpriv->ao_dma_desc[prev_buffer_index].next = cpu_to_le32(next_bits);
-
- devpriv->ao_dma_index = (buffer_index + 1) % AO_DMA_RING_COUNT;
-
- return nbytes;
-}
-
-static void load_ao_dma(struct comedi_device *dev, const struct comedi_cmd *cmd)
-{
- struct pcidas64_private *devpriv = dev->private;
- unsigned int num_bytes;
- unsigned int next_transfer_addr;
- void __iomem *pci_addr_reg =
- devpriv->plx9080_iobase + PLX_DMA0_PCI_ADDRESS_REG;
- unsigned int buffer_index;
-
- do {
- buffer_index = devpriv->ao_dma_index;
- /* don't overwrite data that hasn't been transferred yet */
- next_transfer_addr = readl(pci_addr_reg);
- if (next_transfer_addr >=
- devpriv->ao_buffer_bus_addr[buffer_index] &&
- next_transfer_addr <
- devpriv->ao_buffer_bus_addr[buffer_index] +
- DMA_BUFFER_SIZE)
- return;
- num_bytes = load_ao_dma_buffer(dev, cmd);
- } while (num_bytes >= DMA_BUFFER_SIZE);
-}
-
-static void handle_ao_interrupt(struct comedi_device *dev,
- unsigned short status, unsigned int plx_status)
-{
- struct pcidas64_private *devpriv = dev->private;
- struct comedi_subdevice *s = dev->write_subdev;
- struct comedi_async *async;
- struct comedi_cmd *cmd;
- uint8_t dma0_status;
- unsigned long flags;
-
- /* board might not support ao, in which case write_subdev is NULL */
- if (!s)
- return;
- async = s->async;
- cmd = &async->cmd;
-
- /* spin lock makes sure no one else changes plx dma control reg */
- spin_lock_irqsave(&dev->spinlock, flags);
- dma0_status = readb(devpriv->plx9080_iobase + PLX_DMA0_CS_REG);
- if (plx_status & ICS_DMA0_A) { /* dma chan 0 interrupt */
- if ((dma0_status & PLX_DMA_EN_BIT) &&
- !(dma0_status & PLX_DMA_DONE_BIT))
- writeb(PLX_DMA_EN_BIT | PLX_CLEAR_DMA_INTR_BIT,
- devpriv->plx9080_iobase + PLX_DMA0_CS_REG);
- else
- writeb(PLX_CLEAR_DMA_INTR_BIT,
- devpriv->plx9080_iobase + PLX_DMA0_CS_REG);
- spin_unlock_irqrestore(&dev->spinlock, flags);
- if (dma0_status & PLX_DMA_EN_BIT) {
- load_ao_dma(dev, cmd);
- /* try to recover from dma end-of-chain event */
- if (ao_dma_needs_restart(dev, dma0_status))
- restart_ao_dma(dev);
- }
- } else {
- spin_unlock_irqrestore(&dev->spinlock, flags);
- }
-
- if ((status & DAC_DONE_BIT)) {
- if ((cmd->stop_src == TRIG_COUNT &&
- async->scans_done >= cmd->stop_arg) ||
- last_ao_dma_load_completed(dev))
- async->events |= COMEDI_CB_EOA;
- else
- async->events |= COMEDI_CB_ERROR;
- }
- comedi_handle_events(dev, s);
-}
-
-static irqreturn_t handle_interrupt(int irq, void *d)
-{
- struct comedi_device *dev = d;
- struct pcidas64_private *devpriv = dev->private;
- unsigned short status;
- uint32_t plx_status;
- uint32_t plx_bits;
-
- plx_status = readl(devpriv->plx9080_iobase + PLX_INTRCS_REG);
- status = readw(devpriv->main_iobase + HW_STATUS_REG);
-
- /*
- * an interrupt before all the postconfig stuff gets done could
- * cause a NULL dereference if we continue through the
- * interrupt handler
- */
- if (!dev->attached)
- return IRQ_HANDLED;
-
- handle_ai_interrupt(dev, status, plx_status);
- handle_ao_interrupt(dev, status, plx_status);
-
- /* clear possible plx9080 interrupt sources */
- if (plx_status & ICS_LDIA) { /* clear local doorbell interrupt */
- plx_bits = readl(devpriv->plx9080_iobase + PLX_DBR_OUT_REG);
- writel(plx_bits, devpriv->plx9080_iobase + PLX_DBR_OUT_REG);
- }
-
- return IRQ_HANDLED;
-}
-
-static int ai_cancel(struct comedi_device *dev, struct comedi_subdevice *s)
-{
- struct pcidas64_private *devpriv = dev->private;
- unsigned long flags;
-
- spin_lock_irqsave(&dev->spinlock, flags);
- if (devpriv->ai_cmd_running == 0) {
- spin_unlock_irqrestore(&dev->spinlock, flags);
- return 0;
- }
- devpriv->ai_cmd_running = 0;
- spin_unlock_irqrestore(&dev->spinlock, flags);
-
- disable_ai_pacing(dev);
-
- abort_dma(dev, 1);
-
- return 0;
-}
-
-static int ao_winsn(struct comedi_device *dev, struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
-{
- const struct pcidas64_board *board = dev->board_ptr;
- struct pcidas64_private *devpriv = dev->private;
- int chan = CR_CHAN(insn->chanspec);
- int range = CR_RANGE(insn->chanspec);
-
- /* do some initializing */
- writew(0, devpriv->main_iobase + DAC_CONTROL0_REG);
-
- /* set range */
- set_dac_range_bits(dev, &devpriv->dac_control1_bits, chan, range);
- writew(devpriv->dac_control1_bits,
- devpriv->main_iobase + DAC_CONTROL1_REG);
-
- /* write to channel */
- if (board->layout == LAYOUT_4020) {
- writew(data[0] & 0xff,
- devpriv->main_iobase + dac_lsb_4020_reg(chan));
- writew((data[0] >> 8) & 0xf,
- devpriv->main_iobase + dac_msb_4020_reg(chan));
- } else {
- writew(data[0], devpriv->main_iobase + dac_convert_reg(chan));
- }
-
- /* remember output value */
- s->readback[chan] = data[0];
-
- return 1;
-}
-
-static void set_dac_control0_reg(struct comedi_device *dev,
- const struct comedi_cmd *cmd)
-{
- struct pcidas64_private *devpriv = dev->private;
- unsigned int bits = DAC_ENABLE_BIT | WAVEFORM_GATE_LEVEL_BIT |
- WAVEFORM_GATE_ENABLE_BIT | WAVEFORM_GATE_SELECT_BIT;
-
- if (cmd->start_src == TRIG_EXT) {
- bits |= WAVEFORM_TRIG_EXT_BITS;
- if (cmd->start_arg & CR_INVERT)
- bits |= WAVEFORM_TRIG_FALLING_BIT;
- } else {
- bits |= WAVEFORM_TRIG_SOFT_BITS;
- }
- if (cmd->scan_begin_src == TRIG_EXT) {
- bits |= DAC_EXT_UPDATE_ENABLE_BIT;
- if (cmd->scan_begin_arg & CR_INVERT)
- bits |= DAC_EXT_UPDATE_FALLING_BIT;
- }
- writew(bits, devpriv->main_iobase + DAC_CONTROL0_REG);
-}
-
-static void set_dac_control1_reg(struct comedi_device *dev,
- const struct comedi_cmd *cmd)
-{
- struct pcidas64_private *devpriv = dev->private;
- int i;
-
- for (i = 0; i < cmd->chanlist_len; i++) {
- int channel, range;
-
- channel = CR_CHAN(cmd->chanlist[i]);
- range = CR_RANGE(cmd->chanlist[i]);
- set_dac_range_bits(dev, &devpriv->dac_control1_bits, channel,
- range);
- }
- devpriv->dac_control1_bits |= DAC_SW_GATE_BIT;
- writew(devpriv->dac_control1_bits,
- devpriv->main_iobase + DAC_CONTROL1_REG);
-}
-
-static void set_dac_select_reg(struct comedi_device *dev,
- const struct comedi_cmd *cmd)
-{
- struct pcidas64_private *devpriv = dev->private;
- uint16_t bits;
- unsigned int first_channel, last_channel;
-
- first_channel = CR_CHAN(cmd->chanlist[0]);
- last_channel = CR_CHAN(cmd->chanlist[cmd->chanlist_len - 1]);
- if (last_channel < first_channel)
- dev_err(dev->class_dev,
- "bug! last ao channel < first ao channel\n");
-
- bits = (first_channel & 0x7) | (last_channel & 0x7) << 3;
-
- writew(bits, devpriv->main_iobase + DAC_SELECT_REG);
-}
-
-static unsigned int get_ao_divisor(unsigned int ns, unsigned int flags)
-{
- return get_divisor(ns, flags) - 2;
-}
-
-static void set_dac_interval_regs(struct comedi_device *dev,
- const struct comedi_cmd *cmd)
-{
- struct pcidas64_private *devpriv = dev->private;
- unsigned int divisor;
-
- if (cmd->scan_begin_src != TRIG_TIMER)
- return;
-
- divisor = get_ao_divisor(cmd->scan_begin_arg, cmd->flags);
- if (divisor > max_counter_value) {
- dev_err(dev->class_dev, "bug! ao divisor too big\n");
- divisor = max_counter_value;
- }
- writew(divisor & 0xffff,
- devpriv->main_iobase + DAC_SAMPLE_INTERVAL_LOWER_REG);
- writew((divisor >> 16) & 0xff,
- devpriv->main_iobase + DAC_SAMPLE_INTERVAL_UPPER_REG);
-}
-
-static int prep_ao_dma(struct comedi_device *dev, const struct comedi_cmd *cmd)
-{
- struct pcidas64_private *devpriv = dev->private;
- struct comedi_subdevice *s = dev->write_subdev;
- unsigned int nsamples;
- unsigned int nbytes;
- int i;
-
- /*
- * clear queue pointer too, since external queue has
- * weird interactions with ao fifo
- */
- writew(0, devpriv->main_iobase + ADC_QUEUE_CLEAR_REG);
- writew(0, devpriv->main_iobase + DAC_BUFFER_CLEAR_REG);
-
- nsamples = cb_pcidas64_ao_fill_buffer(dev, s,
- devpriv->ao_bounce_buffer,
- DAC_FIFO_SIZE);
- if (nsamples == 0)
- return -1;
-
- for (i = 0; i < nsamples; i++) {
- writew(devpriv->ao_bounce_buffer[i],
- devpriv->main_iobase + DAC_FIFO_REG);
- }
-
- if (cmd->stop_src == TRIG_COUNT &&
- s->async->scans_done >= cmd->stop_arg)
- return 0;
-
- nbytes = load_ao_dma_buffer(dev, cmd);
- if (nbytes == 0)
- return -1;
- load_ao_dma(dev, cmd);
-
- dma_start_sync(dev, 0);
-
- return 0;
-}
-
-static inline int external_ai_queue_in_use(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_cmd *cmd)
-{
- const struct pcidas64_board *board = dev->board_ptr;
-
- if (s->busy)
- return 0;
- if (board->layout == LAYOUT_4020)
- return 0;
- else if (use_internal_queue_6xxx(cmd))
- return 0;
- return 1;
-}
-
-static int ao_inttrig(struct comedi_device *dev, struct comedi_subdevice *s,
- unsigned int trig_num)
-{
- struct pcidas64_private *devpriv = dev->private;
- struct comedi_cmd *cmd = &s->async->cmd;
- int retval;
-
- if (trig_num != cmd->start_arg)
- return -EINVAL;
-
- retval = prep_ao_dma(dev, cmd);
- if (retval < 0)
- return -EPIPE;
-
- set_dac_control0_reg(dev, cmd);
-
- if (cmd->start_src == TRIG_INT)
- writew(0, devpriv->main_iobase + DAC_START_REG);
-
- s->async->inttrig = NULL;
-
- return 0;
-}
-
-static int ao_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
-{
- struct pcidas64_private *devpriv = dev->private;
- struct comedi_cmd *cmd = &s->async->cmd;
-
- if (external_ai_queue_in_use(dev, s, cmd)) {
- warn_external_queue(dev);
- return -EBUSY;
- }
- /* disable analog output system during setup */
- writew(0x0, devpriv->main_iobase + DAC_CONTROL0_REG);
-
- devpriv->ao_dma_index = 0;
-
- set_dac_select_reg(dev, cmd);
- set_dac_interval_regs(dev, cmd);
- load_first_dma_descriptor(dev, 0, devpriv->ao_dma_desc_bus_addr |
- PLX_DESC_IN_PCI_BIT | PLX_INTR_TERM_COUNT);
-
- set_dac_control1_reg(dev, cmd);
- s->async->inttrig = ao_inttrig;
-
- return 0;
-}
-
-static int cb_pcidas64_ao_check_chanlist(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_cmd *cmd)
-{
- unsigned int chan0 = CR_CHAN(cmd->chanlist[0]);
- int i;
-
- for (i = 1; i < cmd->chanlist_len; i++) {
- unsigned int chan = CR_CHAN(cmd->chanlist[i]);
-
- if (chan != (chan0 + i)) {
- dev_dbg(dev->class_dev,
- "chanlist must use consecutive channels\n");
- return -EINVAL;
- }
- }
-
- return 0;
-}
-
-static int ao_cmdtest(struct comedi_device *dev, struct comedi_subdevice *s,
- struct comedi_cmd *cmd)
-{
- const struct pcidas64_board *board = dev->board_ptr;
- int err = 0;
- unsigned int tmp_arg;
-
- /* Step 1 : check if triggers are trivially valid */
-
- err |= comedi_check_trigger_src(&cmd->start_src, TRIG_INT | TRIG_EXT);
- err |= comedi_check_trigger_src(&cmd->scan_begin_src,
- TRIG_TIMER | TRIG_EXT);
- err |= comedi_check_trigger_src(&cmd->convert_src, TRIG_NOW);
- err |= comedi_check_trigger_src(&cmd->scan_end_src, TRIG_COUNT);
- err |= comedi_check_trigger_src(&cmd->stop_src, TRIG_NONE);
-
- if (err)
- return 1;
-
- /* Step 2a : make sure trigger sources are unique */
-
- err |= comedi_check_trigger_is_unique(cmd->start_src);
- err |= comedi_check_trigger_is_unique(cmd->scan_begin_src);
-
- /* Step 2b : and mutually compatible */
-
- if (cmd->convert_src == TRIG_EXT && cmd->scan_begin_src == TRIG_TIMER)
- err |= -EINVAL;
- if (cmd->stop_src != TRIG_COUNT &&
- cmd->stop_src != TRIG_NONE && cmd->stop_src != TRIG_EXT)
- err |= -EINVAL;
-
- if (err)
- return 2;
-
- /* Step 3: check if arguments are trivially valid */
-
- err |= comedi_check_trigger_arg_is(&cmd->start_arg, 0);
-
- if (cmd->scan_begin_src == TRIG_TIMER) {
- err |= comedi_check_trigger_arg_min(&cmd->scan_begin_arg,
- board->ao_scan_speed);
- if (get_ao_divisor(cmd->scan_begin_arg, cmd->flags) >
- max_counter_value) {
- cmd->scan_begin_arg = (max_counter_value + 2) *
- TIMER_BASE;
- err |= -EINVAL;
- }
- }
-
- err |= comedi_check_trigger_arg_min(&cmd->chanlist_len, 1);
- err |= comedi_check_trigger_arg_is(&cmd->scan_end_arg,
- cmd->chanlist_len);
-
- if (err)
- return 3;
-
- /* step 4: fix up any arguments */
-
- if (cmd->scan_begin_src == TRIG_TIMER) {
- tmp_arg = cmd->scan_begin_arg;
- cmd->scan_begin_arg = get_divisor(cmd->scan_begin_arg,
- cmd->flags) * TIMER_BASE;
- if (tmp_arg != cmd->scan_begin_arg)
- err++;
- }
-
- if (err)
- return 4;
-
- /* Step 5: check channel list if it exists */
- if (cmd->chanlist && cmd->chanlist_len > 0)
- err |= cb_pcidas64_ao_check_chanlist(dev, s, cmd);
-
- if (err)
- return 5;
-
- return 0;
-}
-
-static int ao_cancel(struct comedi_device *dev, struct comedi_subdevice *s)
-{
- struct pcidas64_private *devpriv = dev->private;
-
- writew(0x0, devpriv->main_iobase + DAC_CONTROL0_REG);
- abort_dma(dev, 0);
- return 0;
-}
-
-static int dio_callback_4020(struct comedi_device *dev,
- int dir, int port, int data, unsigned long iobase)
-{
- struct pcidas64_private *devpriv = dev->private;
-
- if (dir) {
- writew(data, devpriv->main_iobase + iobase + 2 * port);
- return 0;
- }
- return readw(devpriv->main_iobase + iobase + 2 * port);
-}
-
-static int di_rbits(struct comedi_device *dev, struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
-{
- unsigned int bits;
-
- bits = readb(dev->mmio + DI_REG);
- bits &= 0xf;
- data[1] = bits;
- data[0] = 0;
-
- return insn->n;
-}
-
-static int do_wbits(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- if (comedi_dio_update_state(s, data))
- writeb(s->state, dev->mmio + DO_REG);
-
- data[1] = s->state;
-
- return insn->n;
-}
-
-static int dio_60xx_config_insn(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- int ret;
-
- ret = comedi_dio_insn_config(dev, s, insn, data, 0);
- if (ret)
- return ret;
-
- writeb(s->io_bits, dev->mmio + DIO_DIRECTION_60XX_REG);
-
- return insn->n;
-}
-
-static int dio_60xx_wbits(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- if (comedi_dio_update_state(s, data))
- writeb(s->state, dev->mmio + DIO_DATA_60XX_REG);
-
- data[1] = readb(dev->mmio + DIO_DATA_60XX_REG);
-
- return insn->n;
-}
-
-/*
- * pci-6025 8800 caldac:
- * address 0 == dac channel 0 offset
- * address 1 == dac channel 0 gain
- * address 2 == dac channel 1 offset
- * address 3 == dac channel 1 gain
- * address 4 == fine adc offset
- * address 5 == coarse adc offset
- * address 6 == coarse adc gain
- * address 7 == fine adc gain
- */
-/*
- * pci-6402/16 uses all 8 channels for dac:
- * address 0 == dac channel 0 fine gain
- * address 1 == dac channel 0 coarse gain
- * address 2 == dac channel 0 coarse offset
- * address 3 == dac channel 1 coarse offset
- * address 4 == dac channel 1 fine gain
- * address 5 == dac channel 1 coarse gain
- * address 6 == dac channel 0 fine offset
- * address 7 == dac channel 1 fine offset
- */
-
-static int caldac_8800_write(struct comedi_device *dev, unsigned int address,
- uint8_t value)
-{
- struct pcidas64_private *devpriv = dev->private;
- static const int num_caldac_channels = 8;
- static const int bitstream_length = 11;
- unsigned int bitstream = ((address & 0x7) << 8) | value;
- unsigned int bit, register_bits;
- static const int caldac_8800_udelay = 1;
-
- if (address >= num_caldac_channels) {
- dev_err(dev->class_dev, "illegal caldac channel\n");
- return -1;
- }
- for (bit = 1 << (bitstream_length - 1); bit; bit >>= 1) {
- register_bits = 0;
- if (bitstream & bit)
- register_bits |= SERIAL_DATA_IN_BIT;
- udelay(caldac_8800_udelay);
- writew(register_bits, devpriv->main_iobase + CALIBRATION_REG);
- register_bits |= SERIAL_CLOCK_BIT;
- udelay(caldac_8800_udelay);
- writew(register_bits, devpriv->main_iobase + CALIBRATION_REG);
- }
- udelay(caldac_8800_udelay);
- writew(SELECT_8800_BIT, devpriv->main_iobase + CALIBRATION_REG);
- udelay(caldac_8800_udelay);
- writew(0, devpriv->main_iobase + CALIBRATION_REG);
- udelay(caldac_8800_udelay);
- return 0;
-}
-
-/* 4020 caldacs */
-static int caldac_i2c_write(struct comedi_device *dev,
- unsigned int caldac_channel, unsigned int value)
-{
- uint8_t serial_bytes[3];
- uint8_t i2c_addr;
- enum pointer_bits {
- /* manual has gain and offset bits switched */
- OFFSET_0_2 = 0x1,
- GAIN_0_2 = 0x2,
- OFFSET_1_3 = 0x4,
- GAIN_1_3 = 0x8,
- };
- enum data_bits {
- NOT_CLEAR_REGISTERS = 0x20,
- };
-
- switch (caldac_channel) {
- case 0: /* chan 0 offset */
- i2c_addr = CALDAC0_I2C_ADDR;
- serial_bytes[0] = OFFSET_0_2;
- break;
- case 1: /* chan 1 offset */
- i2c_addr = CALDAC0_I2C_ADDR;
- serial_bytes[0] = OFFSET_1_3;
- break;
- case 2: /* chan 2 offset */
- i2c_addr = CALDAC1_I2C_ADDR;
- serial_bytes[0] = OFFSET_0_2;
- break;
- case 3: /* chan 3 offset */
- i2c_addr = CALDAC1_I2C_ADDR;
- serial_bytes[0] = OFFSET_1_3;
- break;
- case 4: /* chan 0 gain */
- i2c_addr = CALDAC0_I2C_ADDR;
- serial_bytes[0] = GAIN_0_2;
- break;
- case 5: /* chan 1 gain */
- i2c_addr = CALDAC0_I2C_ADDR;
- serial_bytes[0] = GAIN_1_3;
- break;
- case 6: /* chan 2 gain */
- i2c_addr = CALDAC1_I2C_ADDR;
- serial_bytes[0] = GAIN_0_2;
- break;
- case 7: /* chan 3 gain */
- i2c_addr = CALDAC1_I2C_ADDR;
- serial_bytes[0] = GAIN_1_3;
- break;
- default:
- dev_err(dev->class_dev, "invalid caldac channel\n");
- return -1;
- }
- serial_bytes[1] = NOT_CLEAR_REGISTERS | ((value >> 8) & 0xf);
- serial_bytes[2] = value & 0xff;
- i2c_write(dev, i2c_addr, serial_bytes, 3);
- return 0;
-}
-
-static void caldac_write(struct comedi_device *dev, unsigned int channel,
- unsigned int value)
-{
- const struct pcidas64_board *board = dev->board_ptr;
-
- switch (board->layout) {
- case LAYOUT_60XX:
- case LAYOUT_64XX:
- caldac_8800_write(dev, channel, value);
- break;
- case LAYOUT_4020:
- caldac_i2c_write(dev, channel, value);
- break;
- default:
- break;
- }
-}
-
-static int cb_pcidas64_calib_insn_write(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- unsigned int chan = CR_CHAN(insn->chanspec);
-
- /*
- * Programming the calib device is slow. Only write the
- * last data value if the value has changed.
- */
- if (insn->n) {
- unsigned int val = data[insn->n - 1];
-
- if (s->readback[chan] != val) {
- caldac_write(dev, chan, val);
- s->readback[chan] = val;
- }
- }
-
- return insn->n;
-}
-
-static void ad8402_write(struct comedi_device *dev, unsigned int channel,
- unsigned int value)
-{
- struct pcidas64_private *devpriv = dev->private;
- static const int bitstream_length = 10;
- unsigned int bit, register_bits;
- unsigned int bitstream = ((channel & 0x3) << 8) | (value & 0xff);
- static const int ad8402_udelay = 1;
-
- register_bits = SELECT_8402_64XX_BIT;
- udelay(ad8402_udelay);
- writew(register_bits, devpriv->main_iobase + CALIBRATION_REG);
-
- for (bit = 1 << (bitstream_length - 1); bit; bit >>= 1) {
- if (bitstream & bit)
- register_bits |= SERIAL_DATA_IN_BIT;
- else
- register_bits &= ~SERIAL_DATA_IN_BIT;
- udelay(ad8402_udelay);
- writew(register_bits, devpriv->main_iobase + CALIBRATION_REG);
- udelay(ad8402_udelay);
- writew(register_bits | SERIAL_CLOCK_BIT,
- devpriv->main_iobase + CALIBRATION_REG);
- }
-
- udelay(ad8402_udelay);
- writew(0, devpriv->main_iobase + CALIBRATION_REG);
-}
-
-/* for pci-das6402/16, channel 0 is analog input gain and channel 1 is offset */
-static int cb_pcidas64_ad8402_insn_write(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- unsigned int chan = CR_CHAN(insn->chanspec);
-
- /*
- * Programming the calib device is slow. Only write the
- * last data value if the value has changed.
- */
- if (insn->n) {
- unsigned int val = data[insn->n - 1];
-
- if (s->readback[chan] != val) {
- ad8402_write(dev, chan, val);
- s->readback[chan] = val;
- }
- }
-
- return insn->n;
-}
-
-static uint16_t read_eeprom(struct comedi_device *dev, uint8_t address)
-{
- struct pcidas64_private *devpriv = dev->private;
- static const int bitstream_length = 11;
- static const int read_command = 0x6;
- unsigned int bitstream = (read_command << 8) | address;
- unsigned int bit;
- void __iomem * const plx_control_addr =
- devpriv->plx9080_iobase + PLX_CONTROL_REG;
- uint16_t value;
- static const int value_length = 16;
- static const int eeprom_udelay = 1;
-
- udelay(eeprom_udelay);
- devpriv->plx_control_bits &= ~CTL_EE_CLK & ~CTL_EE_CS;
- /* make sure we don't send anything to the i2c bus on 4020 */
- devpriv->plx_control_bits |= CTL_USERO;
- writel(devpriv->plx_control_bits, plx_control_addr);
- /* activate serial eeprom */
- udelay(eeprom_udelay);
- devpriv->plx_control_bits |= CTL_EE_CS;
- writel(devpriv->plx_control_bits, plx_control_addr);
-
- /* write read command and desired memory address */
- for (bit = 1 << (bitstream_length - 1); bit; bit >>= 1) {
- /* set bit to be written */
- udelay(eeprom_udelay);
- if (bitstream & bit)
- devpriv->plx_control_bits |= CTL_EE_W;
- else
- devpriv->plx_control_bits &= ~CTL_EE_W;
- writel(devpriv->plx_control_bits, plx_control_addr);
- /* clock in bit */
- udelay(eeprom_udelay);
- devpriv->plx_control_bits |= CTL_EE_CLK;
- writel(devpriv->plx_control_bits, plx_control_addr);
- udelay(eeprom_udelay);
- devpriv->plx_control_bits &= ~CTL_EE_CLK;
- writel(devpriv->plx_control_bits, plx_control_addr);
- }
- /* read back value from eeprom memory location */
- value = 0;
- for (bit = 1 << (value_length - 1); bit; bit >>= 1) {
- /* clock out bit */
- udelay(eeprom_udelay);
- devpriv->plx_control_bits |= CTL_EE_CLK;
- writel(devpriv->plx_control_bits, plx_control_addr);
- udelay(eeprom_udelay);
- devpriv->plx_control_bits &= ~CTL_EE_CLK;
- writel(devpriv->plx_control_bits, plx_control_addr);
- udelay(eeprom_udelay);
- if (readl(plx_control_addr) & CTL_EE_R)
- value |= bit;
- }
-
- /* deactivate eeprom serial input */
- udelay(eeprom_udelay);
- devpriv->plx_control_bits &= ~CTL_EE_CS;
- writel(devpriv->plx_control_bits, plx_control_addr);
-
- return value;
-}
-
-static int eeprom_read_insn(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
-{
- data[0] = read_eeprom(dev, CR_CHAN(insn->chanspec));
-
- return 1;
-}
-
-/*
- * Allocate and initialize the subdevice structures.
- */
-static int setup_subdevices(struct comedi_device *dev)
-{
- const struct pcidas64_board *board = dev->board_ptr;
- struct pcidas64_private *devpriv = dev->private;
- struct comedi_subdevice *s;
- int i;
- int ret;
-
- ret = comedi_alloc_subdevices(dev, 10);
- if (ret)
- return ret;
-
- s = &dev->subdevices[0];
- /* analog input subdevice */
- dev->read_subdev = s;
- s->type = COMEDI_SUBD_AI;
- s->subdev_flags = SDF_READABLE | SDF_GROUND | SDF_DITHER | SDF_CMD_READ;
- if (board->layout == LAYOUT_60XX)
- s->subdev_flags |= SDF_COMMON | SDF_DIFF;
- else if (board->layout == LAYOUT_64XX)
- s->subdev_flags |= SDF_DIFF;
- /* XXX Number of inputs in differential mode is ignored */
- s->n_chan = board->ai_se_chans;
- s->len_chanlist = 0x2000;
- s->maxdata = (1 << board->ai_bits) - 1;
- s->range_table = board->ai_range_table;
- s->insn_read = ai_rinsn;
- s->insn_config = ai_config_insn;
- s->do_cmd = ai_cmd;
- s->do_cmdtest = ai_cmdtest;
- s->cancel = ai_cancel;
- if (board->layout == LAYOUT_4020) {
- uint8_t data;
- /*
- * set adc to read from inputs
- * (not internal calibration sources)
- */
- devpriv->i2c_cal_range_bits = adc_src_4020_bits(4);
- /* set channels to +-5 volt input ranges */
- for (i = 0; i < s->n_chan; i++)
- devpriv->i2c_cal_range_bits |= attenuate_bit(i);
- data = devpriv->i2c_cal_range_bits;
- i2c_write(dev, RANGE_CAL_I2C_ADDR, &data, sizeof(data));
- }
-
- /* analog output subdevice */
- s = &dev->subdevices[1];
- if (board->ao_nchan) {
- s->type = COMEDI_SUBD_AO;
- s->subdev_flags = SDF_READABLE | SDF_WRITABLE |
- SDF_GROUND | SDF_CMD_WRITE;
- s->n_chan = board->ao_nchan;
- s->maxdata = (1 << board->ao_bits) - 1;
- s->range_table = board->ao_range_table;
- s->insn_write = ao_winsn;
-
- ret = comedi_alloc_subdev_readback(s);
- if (ret)
- return ret;
-
- if (ao_cmd_is_supported(board)) {
- dev->write_subdev = s;
- s->do_cmdtest = ao_cmdtest;
- s->do_cmd = ao_cmd;
- s->len_chanlist = board->ao_nchan;
- s->cancel = ao_cancel;
- }
- } else {
- s->type = COMEDI_SUBD_UNUSED;
- }
-
- /* digital input */
- s = &dev->subdevices[2];
- if (board->layout == LAYOUT_64XX) {
- s->type = COMEDI_SUBD_DI;
- s->subdev_flags = SDF_READABLE;
- s->n_chan = 4;
- s->maxdata = 1;
- s->range_table = &range_digital;
- s->insn_bits = di_rbits;
- } else {
- s->type = COMEDI_SUBD_UNUSED;
- }
-
- /* digital output */
- if (board->layout == LAYOUT_64XX) {
- s = &dev->subdevices[3];
- s->type = COMEDI_SUBD_DO;
- s->subdev_flags = SDF_WRITABLE;
- s->n_chan = 4;
- s->maxdata = 1;
- s->range_table = &range_digital;
- s->insn_bits = do_wbits;
- } else {
- s->type = COMEDI_SUBD_UNUSED;
- }
-
- /* 8255 */
- s = &dev->subdevices[4];
- if (board->has_8255) {
- if (board->layout == LAYOUT_4020) {
- ret = subdev_8255_init(dev, s, dio_callback_4020,
- I8255_4020_REG);
- } else {
- ret = subdev_8255_mm_init(dev, s, NULL,
- DIO_8255_OFFSET);
- }
- if (ret)
- return ret;
- } else {
- s->type = COMEDI_SUBD_UNUSED;
- }
-
- /* 8 channel dio for 60xx */
- s = &dev->subdevices[5];
- if (board->layout == LAYOUT_60XX) {
- s->type = COMEDI_SUBD_DIO;
- s->subdev_flags = SDF_WRITABLE | SDF_READABLE;
- s->n_chan = 8;
- s->maxdata = 1;
- s->range_table = &range_digital;
- s->insn_config = dio_60xx_config_insn;
- s->insn_bits = dio_60xx_wbits;
- } else {
- s->type = COMEDI_SUBD_UNUSED;
- }
-
- /* caldac */
- s = &dev->subdevices[6];
- s->type = COMEDI_SUBD_CALIB;
- s->subdev_flags = SDF_READABLE | SDF_WRITABLE | SDF_INTERNAL;
- s->n_chan = 8;
- if (board->layout == LAYOUT_4020)
- s->maxdata = 0xfff;
- else
- s->maxdata = 0xff;
- s->insn_write = cb_pcidas64_calib_insn_write;
-
- ret = comedi_alloc_subdev_readback(s);
- if (ret)
- return ret;
-
- for (i = 0; i < s->n_chan; i++) {
- caldac_write(dev, i, s->maxdata / 2);
- s->readback[i] = s->maxdata / 2;
- }
-
- /* 2 channel ad8402 potentiometer */
- s = &dev->subdevices[7];
- if (board->layout == LAYOUT_64XX) {
- s->type = COMEDI_SUBD_CALIB;
- s->subdev_flags = SDF_READABLE | SDF_WRITABLE | SDF_INTERNAL;
- s->n_chan = 2;
- s->maxdata = 0xff;
- s->insn_write = cb_pcidas64_ad8402_insn_write;
-
- ret = comedi_alloc_subdev_readback(s);
- if (ret)
- return ret;
-
- for (i = 0; i < s->n_chan; i++) {
- ad8402_write(dev, i, s->maxdata / 2);
- s->readback[i] = s->maxdata / 2;
- }
- } else {
- s->type = COMEDI_SUBD_UNUSED;
- }
-
- /* serial EEPROM, if present */
- s = &dev->subdevices[8];
- if (readl(devpriv->plx9080_iobase + PLX_CONTROL_REG) & CTL_EECHK) {
- s->type = COMEDI_SUBD_MEMORY;
- s->subdev_flags = SDF_READABLE | SDF_INTERNAL;
- s->n_chan = 128;
- s->maxdata = 0xffff;
- s->insn_read = eeprom_read_insn;
- } else {
- s->type = COMEDI_SUBD_UNUSED;
- }
-
- /* user counter subd XXX */
- s = &dev->subdevices[9];
- s->type = COMEDI_SUBD_UNUSED;
-
- return 0;
-}
-
-static int auto_attach(struct comedi_device *dev,
- unsigned long context)
-{
- struct pci_dev *pcidev = comedi_to_pci_dev(dev);
- const struct pcidas64_board *board = NULL;
- struct pcidas64_private *devpriv;
- uint32_t local_range, local_decode;
- int retval;
-
- if (context < ARRAY_SIZE(pcidas64_boards))
- board = &pcidas64_boards[context];
- if (!board)
- return -ENODEV;
- dev->board_ptr = board;
-
- devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
- if (!devpriv)
- return -ENOMEM;
-
- retval = comedi_pci_enable(dev);
- if (retval)
- return retval;
- pci_set_master(pcidev);
-
- /* Initialize dev->board_name */
- dev->board_name = board->name;
-
- devpriv->main_phys_iobase = pci_resource_start(pcidev, 2);
- devpriv->dio_counter_phys_iobase = pci_resource_start(pcidev, 3);
-
- devpriv->plx9080_iobase = pci_ioremap_bar(pcidev, 0);
- devpriv->main_iobase = pci_ioremap_bar(pcidev, 2);
- dev->mmio = pci_ioremap_bar(pcidev, 3);
-
- if (!devpriv->plx9080_iobase || !devpriv->main_iobase || !dev->mmio) {
- dev_warn(dev->class_dev, "failed to remap io memory\n");
- return -ENOMEM;
- }
-
- /* figure out what local addresses are */
- local_range = readl(devpriv->plx9080_iobase + PLX_LAS0RNG_REG) &
- LRNG_MEM_MASK;
- local_decode = readl(devpriv->plx9080_iobase + PLX_LAS0MAP_REG) &
- local_range & LMAP_MEM_MASK;
- devpriv->local0_iobase = ((uint32_t)devpriv->main_phys_iobase &
- ~local_range) | local_decode;
- local_range = readl(devpriv->plx9080_iobase + PLX_LAS1RNG_REG) &
- LRNG_MEM_MASK;
- local_decode = readl(devpriv->plx9080_iobase + PLX_LAS1MAP_REG) &
- local_range & LMAP_MEM_MASK;
- devpriv->local1_iobase = ((uint32_t)devpriv->dio_counter_phys_iobase &
- ~local_range) | local_decode;
-
- retval = alloc_and_init_dma_members(dev);
- if (retval < 0)
- return retval;
-
- devpriv->hw_revision =
- hw_revision(dev, readw(devpriv->main_iobase + HW_STATUS_REG));
- dev_dbg(dev->class_dev, "stc hardware revision %i\n",
- devpriv->hw_revision);
- init_plx9080(dev);
- init_stc_registers(dev);
-
- retval = request_irq(pcidev->irq, handle_interrupt, IRQF_SHARED,
- dev->board_name, dev);
- if (retval) {
- dev_dbg(dev->class_dev, "unable to allocate irq %u\n",
- pcidev->irq);
- return retval;
- }
- dev->irq = pcidev->irq;
- dev_dbg(dev->class_dev, "irq %u\n", dev->irq);
-
- retval = setup_subdevices(dev);
- if (retval < 0)
- return retval;
-
- return 0;
-}
-
-static void detach(struct comedi_device *dev)
-{
- struct pcidas64_private *devpriv = dev->private;
-
- if (dev->irq)
- free_irq(dev->irq, dev);
- if (devpriv) {
- if (devpriv->plx9080_iobase) {
- disable_plx_interrupts(dev);
- iounmap(devpriv->plx9080_iobase);
- }
- if (devpriv->main_iobase)
- iounmap(devpriv->main_iobase);
- if (dev->mmio)
- iounmap(dev->mmio);
- }
- comedi_pci_disable(dev);
- cb_pcidas64_free_dma(dev);
-}
-
-static struct comedi_driver cb_pcidas64_driver = {
- .driver_name = "cb_pcidas64",
- .module = THIS_MODULE,
- .auto_attach = auto_attach,
- .detach = detach,
-};
-
-static int cb_pcidas64_pci_probe(struct pci_dev *dev,
- const struct pci_device_id *id)
-{
- return comedi_pci_auto_config(dev, &cb_pcidas64_driver,
- id->driver_data);
-}
-
-static const struct pci_device_id cb_pcidas64_pci_table[] = {
- { PCI_VDEVICE(CB, 0x001d), BOARD_PCIDAS6402_16 },
- { PCI_VDEVICE(CB, 0x001e), BOARD_PCIDAS6402_12 },
- { PCI_VDEVICE(CB, 0x0035), BOARD_PCIDAS64_M1_16 },
- { PCI_VDEVICE(CB, 0x0036), BOARD_PCIDAS64_M2_16 },
- { PCI_VDEVICE(CB, 0x0037), BOARD_PCIDAS64_M3_16 },
- { PCI_VDEVICE(CB, 0x0052), BOARD_PCIDAS4020_12 },
- { PCI_VDEVICE(CB, 0x005d), BOARD_PCIDAS6023 },
- { PCI_VDEVICE(CB, 0x005e), BOARD_PCIDAS6025 },
- { PCI_VDEVICE(CB, 0x005f), BOARD_PCIDAS6030 },
- { PCI_VDEVICE(CB, 0x0060), BOARD_PCIDAS6031 },
- { PCI_VDEVICE(CB, 0x0061), BOARD_PCIDAS6032 },
- { PCI_VDEVICE(CB, 0x0062), BOARD_PCIDAS6033 },
- { PCI_VDEVICE(CB, 0x0063), BOARD_PCIDAS6034 },
- { PCI_VDEVICE(CB, 0x0064), BOARD_PCIDAS6035 },
- { PCI_VDEVICE(CB, 0x0065), BOARD_PCIDAS6040 },
- { PCI_VDEVICE(CB, 0x0066), BOARD_PCIDAS6052 },
- { PCI_VDEVICE(CB, 0x0067), BOARD_PCIDAS6070 },
- { PCI_VDEVICE(CB, 0x0068), BOARD_PCIDAS6071 },
- { PCI_VDEVICE(CB, 0x006f), BOARD_PCIDAS6036 },
- { PCI_VDEVICE(CB, 0x0078), BOARD_PCIDAS6013 },
- { PCI_VDEVICE(CB, 0x0079), BOARD_PCIDAS6014 },
- { 0 }
-};
-MODULE_DEVICE_TABLE(pci, cb_pcidas64_pci_table);
-
-static struct pci_driver cb_pcidas64_pci_driver = {
- .name = "cb_pcidas64",
- .id_table = cb_pcidas64_pci_table,
- .probe = cb_pcidas64_pci_probe,
- .remove = comedi_pci_auto_unconfig,
-};
-module_comedi_pci_driver(cb_pcidas64_driver, cb_pcidas64_pci_driver);
-
-MODULE_AUTHOR("Comedi http://www.comedi.org");
-MODULE_DESCRIPTION("Comedi low-level driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/cb_pcidda.c b/drivers/staging/comedi/drivers/cb_pcidda.c
deleted file mode 100644
index b00a36a5cb36..000000000000
--- a/drivers/staging/comedi/drivers/cb_pcidda.c
+++ /dev/null
@@ -1,428 +0,0 @@
-/*
- * comedi/drivers/cb_pcidda.c
- * Driver for the ComputerBoards / MeasurementComputing PCI-DDA series.
- *
- * Copyright (C) 2001 Ivan Martinez <ivanmr@altavista.com>
- * Copyright (C) 2001 Frank Mori Hess <fmhess@users.sourceforge.net>
- *
- * COMEDI - Linux Control and Measurement Device Interface
- * Copyright (C) 1997-8 David A. Schleef <ds@schleef.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-/*
- * Driver: cb_pcidda
- * Description: MeasurementComputing PCI-DDA series
- * Devices: [Measurement Computing] PCI-DDA08/12 (pci-dda08/12),
- * PCI-DDA04/12 (pci-dda04/12), PCI-DDA02/12 (pci-dda02/12),
- * PCI-DDA08/16 (pci-dda08/16), PCI-DDA04/16 (pci-dda04/16),
- * PCI-DDA02/16 (pci-dda02/16)
- * Author: Ivan Martinez <ivanmr@altavista.com>
- * Frank Mori Hess <fmhess@users.sourceforge.net>
- * Status: works
- *
- * Configuration options: not applicable, uses PCI auto config
- *
- * Only simple analog output writing is supported.
- */
-
-#include <linux/module.h>
-
-#include "../comedi_pci.h"
-
-#include "8255.h"
-
-#define EEPROM_SIZE 128 /* number of entries in eeprom */
-/* maximum number of ao channels for supported boards */
-#define MAX_AO_CHANNELS 8
-
-/* Digital I/O registers */
-#define CB_DDA_DIO0_8255_BASE 0x00
-#define CB_DDA_DIO1_8255_BASE 0x04
-
-/* DAC registers */
-#define CB_DDA_DA_CTRL_REG 0x00 /* D/A Control Register */
-#define CB_DDA_DA_CTRL_SU (1 << 0) /* Simultaneous update */
-#define CB_DDA_DA_CTRL_EN (1 << 1) /* Enable specified DAC */
-#define CB_DDA_DA_CTRL_DAC(x) ((x) << 2) /* Specify DAC channel */
-#define CB_DDA_DA_CTRL_RANGE2V5 (0 << 6) /* 2.5V range */
-#define CB_DDA_DA_CTRL_RANGE5V (2 << 6) /* 5V range */
-#define CB_DDA_DA_CTRL_RANGE10V (3 << 6) /* 10V range */
-#define CB_DDA_DA_CTRL_UNIP (1 << 8) /* Unipolar range */
-
-#define DACALIBRATION1 4 /* D/A CALIBRATION REGISTER 1 */
-/* write bits */
-/* serial data input for eeprom, caldacs, reference dac */
-#define SERIAL_IN_BIT 0x1
-#define CAL_CHANNEL_MASK (0x7 << 1)
-#define CAL_CHANNEL_BITS(channel) (((channel) << 1) & CAL_CHANNEL_MASK)
-/* read bits */
-#define CAL_COUNTER_MASK 0x1f
-/* calibration counter overflow status bit */
-#define CAL_COUNTER_OVERFLOW_BIT 0x20
-/* analog output is less than reference dac voltage */
-#define AO_BELOW_REF_BIT 0x40
-#define SERIAL_OUT_BIT 0x80 /* serial data out, for reading from eeprom */
-
-#define DACALIBRATION2 6 /* D/A CALIBRATION REGISTER 2 */
-#define SELECT_EEPROM_BIT 0x1 /* send serial data in to eeprom */
-/* don't send serial data to MAX542 reference dac */
-#define DESELECT_REF_DAC_BIT 0x2
-/* don't send serial data to caldac n */
-#define DESELECT_CALDAC_BIT(n) (0x4 << (n))
-/* manual says to set this bit with no explanation */
-#define DUMMY_BIT 0x40
-
-#define CB_DDA_DA_DATA_REG(x) (0x08 + ((x) * 2))
-
-/* Offsets for the caldac channels */
-#define CB_DDA_CALDAC_FINE_GAIN 0
-#define CB_DDA_CALDAC_COURSE_GAIN 1
-#define CB_DDA_CALDAC_COURSE_OFFSET 2
-#define CB_DDA_CALDAC_FINE_OFFSET 3
-
-static const struct comedi_lrange cb_pcidda_ranges = {
- 6, {
- BIP_RANGE(10),
- BIP_RANGE(5),
- BIP_RANGE(2.5),
- UNI_RANGE(10),
- UNI_RANGE(5),
- UNI_RANGE(2.5)
- }
-};
-
-enum cb_pcidda_boardid {
- BOARD_DDA02_12,
- BOARD_DDA04_12,
- BOARD_DDA08_12,
- BOARD_DDA02_16,
- BOARD_DDA04_16,
- BOARD_DDA08_16,
-};
-
-struct cb_pcidda_board {
- const char *name;
- int ao_chans;
- int ao_bits;
-};
-
-static const struct cb_pcidda_board cb_pcidda_boards[] = {
- [BOARD_DDA02_12] = {
- .name = "pci-dda02/12",
- .ao_chans = 2,
- .ao_bits = 12,
- },
- [BOARD_DDA04_12] = {
- .name = "pci-dda04/12",
- .ao_chans = 4,
- .ao_bits = 12,
- },
- [BOARD_DDA08_12] = {
- .name = "pci-dda08/12",
- .ao_chans = 8,
- .ao_bits = 12,
- },
- [BOARD_DDA02_16] = {
- .name = "pci-dda02/16",
- .ao_chans = 2,
- .ao_bits = 16,
- },
- [BOARD_DDA04_16] = {
- .name = "pci-dda04/16",
- .ao_chans = 4,
- .ao_bits = 16,
- },
- [BOARD_DDA08_16] = {
- .name = "pci-dda08/16",
- .ao_chans = 8,
- .ao_bits = 16,
- },
-};
-
-struct cb_pcidda_private {
- unsigned long daqio;
- /* bits last written to da calibration register 1 */
- unsigned int dac_cal1_bits;
- /* current range settings for output channels */
- unsigned int ao_range[MAX_AO_CHANNELS];
- u16 eeprom_data[EEPROM_SIZE]; /* software copy of board's eeprom */
-};
-
-/* lowlevel read from eeprom */
-static unsigned int cb_pcidda_serial_in(struct comedi_device *dev)
-{
- struct cb_pcidda_private *devpriv = dev->private;
- unsigned int value = 0;
- int i;
- const int value_width = 16; /* number of bits wide values are */
-
- for (i = 1; i <= value_width; i++) {
- /* read bits most significant bit first */
- if (inw_p(devpriv->daqio + DACALIBRATION1) & SERIAL_OUT_BIT)
- value |= 1 << (value_width - i);
- }
-
- return value;
-}
-
-/* lowlevel write to eeprom/dac */
-static void cb_pcidda_serial_out(struct comedi_device *dev, unsigned int value,
- unsigned int num_bits)
-{
- struct cb_pcidda_private *devpriv = dev->private;
- int i;
-
- for (i = 1; i <= num_bits; i++) {
- /* send bits most significant bit first */
- if (value & (1 << (num_bits - i)))
- devpriv->dac_cal1_bits |= SERIAL_IN_BIT;
- else
- devpriv->dac_cal1_bits &= ~SERIAL_IN_BIT;
- outw_p(devpriv->dac_cal1_bits, devpriv->daqio + DACALIBRATION1);
- }
-}
-
-/* reads a 16 bit value from board's eeprom */
-static unsigned int cb_pcidda_read_eeprom(struct comedi_device *dev,
- unsigned int address)
-{
- struct cb_pcidda_private *devpriv = dev->private;
- unsigned int i;
- unsigned int cal2_bits;
- unsigned int value;
- /* one caldac for every two dac channels */
- const int max_num_caldacs = 4;
- /* bits to send to tell eeprom we want to read */
- const int read_instruction = 0x6;
- const int instruction_length = 3;
- const int address_length = 8;
-
- /* send serial output stream to eeprom */
- cal2_bits = SELECT_EEPROM_BIT | DESELECT_REF_DAC_BIT | DUMMY_BIT;
- /* deactivate caldacs (one caldac for every two channels) */
- for (i = 0; i < max_num_caldacs; i++)
- cal2_bits |= DESELECT_CALDAC_BIT(i);
- outw_p(cal2_bits, devpriv->daqio + DACALIBRATION2);
-
- /* tell eeprom we want to read */
- cb_pcidda_serial_out(dev, read_instruction, instruction_length);
- /* send address we want to read from */
- cb_pcidda_serial_out(dev, address, address_length);
-
- value = cb_pcidda_serial_in(dev);
-
- /* deactivate eeprom */
- cal2_bits &= ~SELECT_EEPROM_BIT;
- outw_p(cal2_bits, devpriv->daqio + DACALIBRATION2);
-
- return value;
-}
-
-/* writes to 8 bit calibration dacs */
-static void cb_pcidda_write_caldac(struct comedi_device *dev,
- unsigned int caldac, unsigned int channel,
- unsigned int value)
-{
- struct cb_pcidda_private *devpriv = dev->private;
- unsigned int cal2_bits;
- unsigned int i;
- /* caldacs use 3 bit channel specification */
- const int num_channel_bits = 3;
- const int num_caldac_bits = 8; /* 8 bit calibration dacs */
- /* one caldac for every two dac channels */
- const int max_num_caldacs = 4;
-
- /* write 3 bit channel */
- cb_pcidda_serial_out(dev, channel, num_channel_bits);
- /* write 8 bit caldac value */
- cb_pcidda_serial_out(dev, value, num_caldac_bits);
-
-/*
-* latch stream into appropriate caldac deselect reference dac
-*/
- cal2_bits = DESELECT_REF_DAC_BIT | DUMMY_BIT;
- /* deactivate caldacs (one caldac for every two channels) */
- for (i = 0; i < max_num_caldacs; i++)
- cal2_bits |= DESELECT_CALDAC_BIT(i);
- /* activate the caldac we want */
- cal2_bits &= ~DESELECT_CALDAC_BIT(caldac);
- outw_p(cal2_bits, devpriv->daqio + DACALIBRATION2);
- /* deactivate caldac */
- cal2_bits |= DESELECT_CALDAC_BIT(caldac);
- outw_p(cal2_bits, devpriv->daqio + DACALIBRATION2);
-}
-
-/* set caldacs to eeprom values for given channel and range */
-static void cb_pcidda_calibrate(struct comedi_device *dev, unsigned int channel,
- unsigned int range)
-{
- struct cb_pcidda_private *devpriv = dev->private;
- unsigned int caldac = channel / 2; /* two caldacs per channel */
- unsigned int chan = 4 * (channel % 2); /* caldac channel base */
- unsigned int index = 2 * range + 12 * channel;
- unsigned int offset;
- unsigned int gain;
-
- /* save range so we can tell when we need to readjust calibration */
- devpriv->ao_range[channel] = range;
-
- /* get values from eeprom data */
- offset = devpriv->eeprom_data[0x7 + index];
- gain = devpriv->eeprom_data[0x8 + index];
-
- /* set caldacs */
- cb_pcidda_write_caldac(dev, caldac, chan + CB_DDA_CALDAC_COURSE_OFFSET,
- (offset >> 8) & 0xff);
- cb_pcidda_write_caldac(dev, caldac, chan + CB_DDA_CALDAC_FINE_OFFSET,
- offset & 0xff);
- cb_pcidda_write_caldac(dev, caldac, chan + CB_DDA_CALDAC_COURSE_GAIN,
- (gain >> 8) & 0xff);
- cb_pcidda_write_caldac(dev, caldac, chan + CB_DDA_CALDAC_FINE_GAIN,
- gain & 0xff);
-}
-
-static int cb_pcidda_ao_insn_write(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct cb_pcidda_private *devpriv = dev->private;
- unsigned int channel = CR_CHAN(insn->chanspec);
- unsigned int range = CR_RANGE(insn->chanspec);
- unsigned int ctrl;
-
- if (range != devpriv->ao_range[channel])
- cb_pcidda_calibrate(dev, channel, range);
-
- ctrl = CB_DDA_DA_CTRL_EN | CB_DDA_DA_CTRL_DAC(channel);
-
- switch (range) {
- case 0:
- case 3:
- ctrl |= CB_DDA_DA_CTRL_RANGE10V;
- break;
- case 1:
- case 4:
- ctrl |= CB_DDA_DA_CTRL_RANGE5V;
- break;
- case 2:
- case 5:
- ctrl |= CB_DDA_DA_CTRL_RANGE2V5;
- break;
- }
-
- if (range > 2)
- ctrl |= CB_DDA_DA_CTRL_UNIP;
-
- outw(ctrl, devpriv->daqio + CB_DDA_DA_CTRL_REG);
-
- outw(data[0], devpriv->daqio + CB_DDA_DA_DATA_REG(channel));
-
- return insn->n;
-}
-
-static int cb_pcidda_auto_attach(struct comedi_device *dev,
- unsigned long context)
-{
- struct pci_dev *pcidev = comedi_to_pci_dev(dev);
- const struct cb_pcidda_board *board = NULL;
- struct cb_pcidda_private *devpriv;
- struct comedi_subdevice *s;
- int i;
- int ret;
-
- if (context < ARRAY_SIZE(cb_pcidda_boards))
- board = &cb_pcidda_boards[context];
- if (!board)
- return -ENODEV;
- dev->board_ptr = board;
- dev->board_name = board->name;
-
- devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
- if (!devpriv)
- return -ENOMEM;
-
- ret = comedi_pci_enable(dev);
- if (ret)
- return ret;
- dev->iobase = pci_resource_start(pcidev, 2);
- devpriv->daqio = pci_resource_start(pcidev, 3);
-
- ret = comedi_alloc_subdevices(dev, 3);
- if (ret)
- return ret;
-
- s = &dev->subdevices[0];
- /* analog output subdevice */
- s->type = COMEDI_SUBD_AO;
- s->subdev_flags = SDF_WRITABLE;
- s->n_chan = board->ao_chans;
- s->maxdata = (1 << board->ao_bits) - 1;
- s->range_table = &cb_pcidda_ranges;
- s->insn_write = cb_pcidda_ao_insn_write;
-
- /* two 8255 digital io subdevices */
- for (i = 0; i < 2; i++) {
- s = &dev->subdevices[1 + i];
- ret = subdev_8255_init(dev, s, NULL, i * I8255_SIZE);
- if (ret)
- return ret;
- }
-
- /* Read the caldac eeprom data */
- for (i = 0; i < EEPROM_SIZE; i++)
- devpriv->eeprom_data[i] = cb_pcidda_read_eeprom(dev, i);
-
- /* set calibrations dacs */
- for (i = 0; i < board->ao_chans; i++)
- cb_pcidda_calibrate(dev, i, devpriv->ao_range[i]);
-
- return 0;
-}
-
-static struct comedi_driver cb_pcidda_driver = {
- .driver_name = "cb_pcidda",
- .module = THIS_MODULE,
- .auto_attach = cb_pcidda_auto_attach,
- .detach = comedi_pci_detach,
-};
-
-static int cb_pcidda_pci_probe(struct pci_dev *dev,
- const struct pci_device_id *id)
-{
- return comedi_pci_auto_config(dev, &cb_pcidda_driver,
- id->driver_data);
-}
-
-static const struct pci_device_id cb_pcidda_pci_table[] = {
- { PCI_VDEVICE(CB, 0x0020), BOARD_DDA02_12 },
- { PCI_VDEVICE(CB, 0x0021), BOARD_DDA04_12 },
- { PCI_VDEVICE(CB, 0x0022), BOARD_DDA08_12 },
- { PCI_VDEVICE(CB, 0x0023), BOARD_DDA02_16 },
- { PCI_VDEVICE(CB, 0x0024), BOARD_DDA04_16 },
- { PCI_VDEVICE(CB, 0x0025), BOARD_DDA08_16 },
- { 0 }
-};
-MODULE_DEVICE_TABLE(pci, cb_pcidda_pci_table);
-
-static struct pci_driver cb_pcidda_pci_driver = {
- .name = "cb_pcidda",
- .id_table = cb_pcidda_pci_table,
- .probe = cb_pcidda_pci_probe,
- .remove = comedi_pci_auto_unconfig,
-};
-module_comedi_pci_driver(cb_pcidda_driver, cb_pcidda_pci_driver);
-
-MODULE_AUTHOR("Comedi http://www.comedi.org");
-MODULE_DESCRIPTION("Comedi low-level driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/cb_pcimdas.c b/drivers/staging/comedi/drivers/cb_pcimdas.c
deleted file mode 100644
index 47e38398921e..000000000000
--- a/drivers/staging/comedi/drivers/cb_pcimdas.c
+++ /dev/null
@@ -1,484 +0,0 @@
-/*
- * comedi/drivers/cb_pcimdas.c
- * Comedi driver for Computer Boards PCIM-DAS1602/16 and PCIe-DAS1602/16
- *
- * COMEDI - Linux Control and Measurement Device Interface
- * Copyright (C) 2000 David A. Schleef <ds@schleef.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-/*
- * Driver: cb_pcimdas
- * Description: Measurement Computing PCI Migration series boards
- * Devices: [ComputerBoards] PCIM-DAS1602/16 (cb_pcimdas), PCIe-DAS1602/16
- * Author: Richard Bytheway
- * Updated: Mon, 13 Oct 2014 11:57:39 +0000
- * Status: experimental
- *
- * Written to support the PCIM-DAS1602/16 and PCIe-DAS1602/16.
- *
- * Configuration Options:
- * none
- *
- * Manual configuration of PCI(e) cards is not supported; they are configured
- * automatically.
- *
- * Developed from cb_pcidas and skel by Richard Bytheway (mocelet@sucs.org).
- * Only supports DIO, AO and simple AI in it's present form.
- * No interrupts, multi channel or FIFO AI,
- * although the card looks like it could support this.
- *
- * http://www.mccdaq.com/PDFs/Manuals/pcim-das1602-16.pdf
- * http://www.mccdaq.com/PDFs/Manuals/pcie-das1602-16.pdf
- */
-
-#include <linux/module.h>
-#include <linux/interrupt.h>
-
-#include "../comedi_pci.h"
-
-#include "comedi_8254.h"
-#include "plx9052.h"
-#include "8255.h"
-
-/*
- * PCI Bar 1 Register map
- * see plx9052.h for register and bit defines
- */
-
-/*
- * PCI Bar 2 Register map (devpriv->daqio)
- */
-#define PCIMDAS_AI_REG 0x00
-#define PCIMDAS_AI_SOFTTRIG_REG 0x00
-#define PCIMDAS_AO_REG(x) (0x02 + ((x) * 2))
-
-/*
- * PCI Bar 3 Register map (devpriv->BADR3)
- */
-#define PCIMDAS_MUX_REG 0x00
-#define PCIMDAS_MUX(_lo, _hi) ((_lo) | ((_hi) << 4))
-#define PCIMDAS_DI_DO_REG 0x01
-#define PCIMDAS_STATUS_REG 0x02
-#define PCIMDAS_STATUS_EOC BIT(7)
-#define PCIMDAS_STATUS_UB BIT(6)
-#define PCIMDAS_STATUS_MUX BIT(5)
-#define PCIMDAS_STATUS_CLK BIT(4)
-#define PCIMDAS_STATUS_TO_CURR_MUX(x) ((x) & 0xf)
-#define PCIMDAS_CONV_STATUS_REG 0x03
-#define PCIMDAS_CONV_STATUS_EOC BIT(7)
-#define PCIMDAS_CONV_STATUS_EOB BIT(6)
-#define PCIMDAS_CONV_STATUS_EOA BIT(5)
-#define PCIMDAS_CONV_STATUS_FNE BIT(4)
-#define PCIMDAS_CONV_STATUS_FHF BIT(3)
-#define PCIMDAS_CONV_STATUS_OVERRUN BIT(2)
-#define PCIMDAS_IRQ_REG 0x04
-#define PCIMDAS_IRQ_INTE BIT(7)
-#define PCIMDAS_IRQ_INT BIT(6)
-#define PCIMDAS_IRQ_OVERRUN BIT(4)
-#define PCIMDAS_IRQ_EOA BIT(3)
-#define PCIMDAS_IRQ_EOA_INT_SEL BIT(2)
-#define PCIMDAS_IRQ_INTSEL(x) ((x) << 0)
-#define PCIMDAS_IRQ_INTSEL_EOC PCIMDAS_IRQ_INTSEL(0)
-#define PCIMDAS_IRQ_INTSEL_FNE PCIMDAS_IRQ_INTSEL(1)
-#define PCIMDAS_IRQ_INTSEL_EOB PCIMDAS_IRQ_INTSEL(2)
-#define PCIMDAS_IRQ_INTSEL_FHF_EOA PCIMDAS_IRQ_INTSEL(3)
-#define PCIMDAS_PACER_REG 0x05
-#define PCIMDAS_PACER_GATE_STATUS BIT(6)
-#define PCIMDAS_PACER_GATE_POL BIT(5)
-#define PCIMDAS_PACER_GATE_LATCH BIT(4)
-#define PCIMDAS_PACER_GATE_EN BIT(3)
-#define PCIMDAS_PACER_EXT_PACER_POL BIT(2)
-#define PCIMDAS_PACER_SRC(x) ((x) << 0)
-#define PCIMDAS_PACER_SRC_POLLED PCIMDAS_PACER_SRC(0)
-#define PCIMDAS_PACER_SRC_EXT PCIMDAS_PACER_SRC(2)
-#define PCIMDAS_PACER_SRC_INT PCIMDAS_PACER_SRC(3)
-#define PCIMDAS_PACER_SRC_MASK (3 << 0)
-#define PCIMDAS_BURST_REG 0x06
-#define PCIMDAS_BURST_BME BIT(1)
-#define PCIMDAS_BURST_CONV_EN BIT(0)
-#define PCIMDAS_GAIN_REG 0x07
-#define PCIMDAS_8254_BASE 0x08
-#define PCIMDAS_USER_CNTR_REG 0x0c
-#define PCIMDAS_USER_CNTR_CTR1_CLK_SEL BIT(0)
-#define PCIMDAS_RESIDUE_MSB_REG 0x0d
-#define PCIMDAS_RESIDUE_LSB_REG 0x0e
-
-/*
- * PCI Bar 4 Register map (dev->iobase)
- */
-#define PCIMDAS_8255_BASE 0x00
-
-static const struct comedi_lrange cb_pcimdas_ai_bip_range = {
- 4, {
- BIP_RANGE(10),
- BIP_RANGE(5),
- BIP_RANGE(2.5),
- BIP_RANGE(1.25)
- }
-};
-
-static const struct comedi_lrange cb_pcimdas_ai_uni_range = {
- 4, {
- UNI_RANGE(10),
- UNI_RANGE(5),
- UNI_RANGE(2.5),
- UNI_RANGE(1.25)
- }
-};
-
-/*
- * The Analog Output range is not programmable. The DAC ranges are
- * jumper-settable on the board. The settings are not software-readable.
- */
-static const struct comedi_lrange cb_pcimdas_ao_range = {
- 6, {
- BIP_RANGE(10),
- BIP_RANGE(5),
- UNI_RANGE(10),
- UNI_RANGE(5),
- RANGE_ext(-1, 1),
- RANGE_ext(0, 1)
- }
-};
-
-/*
- * this structure is for data unique to this hardware driver. If
- * several hardware drivers keep similar information in this structure,
- * feel free to suggest moving the variable to the struct comedi_device
- * struct.
- */
-struct cb_pcimdas_private {
- /* base addresses */
- unsigned long daqio;
- unsigned long BADR3;
-};
-
-static int cb_pcimdas_ai_eoc(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned long context)
-{
- struct cb_pcimdas_private *devpriv = dev->private;
- unsigned int status;
-
- status = inb(devpriv->BADR3 + PCIMDAS_STATUS_REG);
- if ((status & PCIMDAS_STATUS_EOC) == 0)
- return 0;
- return -EBUSY;
-}
-
-static int cb_pcimdas_ai_insn_read(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct cb_pcimdas_private *devpriv = dev->private;
- unsigned int chan = CR_CHAN(insn->chanspec);
- unsigned int range = CR_RANGE(insn->chanspec);
- int n;
- unsigned int d;
- int ret;
-
- /* only support sw initiated reads from a single channel */
-
- /* configure for sw initiated read */
- d = inb(devpriv->BADR3 + PCIMDAS_PACER_REG);
- if ((d & PCIMDAS_PACER_SRC_MASK) != PCIMDAS_PACER_SRC_POLLED) {
- d &= ~PCIMDAS_PACER_SRC_MASK;
- d |= PCIMDAS_PACER_SRC_POLLED;
- outb(d, devpriv->BADR3 + PCIMDAS_PACER_REG);
- }
-
- /* set bursting off, conversions on */
- outb(PCIMDAS_BURST_CONV_EN, devpriv->BADR3 + PCIMDAS_BURST_REG);
-
- /* set range */
- outb(range, devpriv->BADR3 + PCIMDAS_GAIN_REG);
-
- /* set mux for single channel scan */
- outb(PCIMDAS_MUX(chan, chan), devpriv->BADR3 + PCIMDAS_MUX_REG);
-
- /* convert n samples */
- for (n = 0; n < insn->n; n++) {
- /* trigger conversion */
- outw(0, devpriv->daqio + PCIMDAS_AI_SOFTTRIG_REG);
-
- /* wait for conversion to end */
- ret = comedi_timeout(dev, s, insn, cb_pcimdas_ai_eoc, 0);
- if (ret)
- return ret;
-
- /* read data */
- data[n] = inw(devpriv->daqio + PCIMDAS_AI_REG);
- }
-
- /* return the number of samples read/written */
- return n;
-}
-
-static int cb_pcimdas_ao_insn_write(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct cb_pcimdas_private *devpriv = dev->private;
- unsigned int chan = CR_CHAN(insn->chanspec);
- unsigned int val = s->readback[chan];
- int i;
-
- for (i = 0; i < insn->n; i++) {
- val = data[i];
- outw(val, devpriv->daqio + PCIMDAS_AO_REG(chan));
- }
- s->readback[chan] = val;
-
- return insn->n;
-}
-
-static int cb_pcimdas_di_insn_bits(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct cb_pcimdas_private *devpriv = dev->private;
- unsigned int val;
-
- val = inb(devpriv->BADR3 + PCIMDAS_DI_DO_REG);
-
- data[1] = val & 0x0f;
-
- return insn->n;
-}
-
-static int cb_pcimdas_do_insn_bits(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct cb_pcimdas_private *devpriv = dev->private;
-
- if (comedi_dio_update_state(s, data))
- outb(s->state, devpriv->BADR3 + PCIMDAS_DI_DO_REG);
-
- data[1] = s->state;
-
- return insn->n;
-}
-
-static int cb_pcimdas_counter_insn_config(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct cb_pcimdas_private *devpriv = dev->private;
- unsigned int ctrl;
-
- switch (data[0]) {
- case INSN_CONFIG_SET_CLOCK_SRC:
- switch (data[1]) {
- case 0: /* internal 100 kHz clock */
- ctrl = PCIMDAS_USER_CNTR_CTR1_CLK_SEL;
- break;
- case 1: /* external clk on pin 21 */
- ctrl = 0;
- break;
- default:
- return -EINVAL;
- }
- outb(ctrl, devpriv->BADR3 + PCIMDAS_USER_CNTR_REG);
- break;
- case INSN_CONFIG_GET_CLOCK_SRC:
- ctrl = inb(devpriv->BADR3 + PCIMDAS_USER_CNTR_REG);
- if (ctrl & PCIMDAS_USER_CNTR_CTR1_CLK_SEL) {
- data[1] = 0;
- data[2] = I8254_OSC_BASE_100KHZ;
- } else {
- data[1] = 1;
- data[2] = 0;
- }
- break;
- default:
- return -EINVAL;
- }
-
- return insn->n;
-}
-
-static unsigned int cb_pcimdas_pacer_clk(struct comedi_device *dev)
-{
- struct cb_pcimdas_private *devpriv = dev->private;
- unsigned int status;
-
- /* The Pacer Clock jumper selects a 10 MHz or 1 MHz clock */
- status = inb(devpriv->BADR3 + PCIMDAS_STATUS_REG);
- if (status & PCIMDAS_STATUS_CLK)
- return I8254_OSC_BASE_10MHZ;
- return I8254_OSC_BASE_1MHZ;
-}
-
-static bool cb_pcimdas_is_ai_se(struct comedi_device *dev)
-{
- struct cb_pcimdas_private *devpriv = dev->private;
- unsigned int status;
-
- /*
- * The number of Analog Input channels is set with the
- * Analog Input Mode Switch on the board. The board can
- * have 16 single-ended or 8 differential channels.
- */
- status = inb(devpriv->BADR3 + PCIMDAS_STATUS_REG);
- return status & PCIMDAS_STATUS_MUX;
-}
-
-static bool cb_pcimdas_is_ai_uni(struct comedi_device *dev)
-{
- struct cb_pcimdas_private *devpriv = dev->private;
- unsigned int status;
-
- /*
- * The Analog Input range polarity is set with the
- * Analog Input Polarity Switch on the board. The
- * inputs can be set to Unipolar or Bipolar ranges.
- */
- status = inb(devpriv->BADR3 + PCIMDAS_STATUS_REG);
- return status & PCIMDAS_STATUS_UB;
-}
-
-static int cb_pcimdas_auto_attach(struct comedi_device *dev,
- unsigned long context_unused)
-{
- struct pci_dev *pcidev = comedi_to_pci_dev(dev);
- struct cb_pcimdas_private *devpriv;
- struct comedi_subdevice *s;
- int ret;
-
- devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
- if (!devpriv)
- return -ENOMEM;
-
- ret = comedi_pci_enable(dev);
- if (ret)
- return ret;
-
- devpriv->daqio = pci_resource_start(pcidev, 2);
- devpriv->BADR3 = pci_resource_start(pcidev, 3);
- dev->iobase = pci_resource_start(pcidev, 4);
-
- dev->pacer = comedi_8254_init(devpriv->BADR3 + PCIMDAS_8254_BASE,
- cb_pcimdas_pacer_clk(dev),
- I8254_IO8, 0);
- if (!dev->pacer)
- return -ENOMEM;
-
- ret = comedi_alloc_subdevices(dev, 6);
- if (ret)
- return ret;
-
- /* Analog Input subdevice */
- s = &dev->subdevices[0];
- s->type = COMEDI_SUBD_AI;
- s->subdev_flags = SDF_READABLE;
- if (cb_pcimdas_is_ai_se(dev)) {
- s->subdev_flags |= SDF_GROUND;
- s->n_chan = 16;
- } else {
- s->subdev_flags |= SDF_DIFF;
- s->n_chan = 8;
- }
- s->maxdata = 0xffff;
- s->range_table = cb_pcimdas_is_ai_uni(dev) ? &cb_pcimdas_ai_uni_range
- : &cb_pcimdas_ai_bip_range;
- s->insn_read = cb_pcimdas_ai_insn_read;
-
- /* Analog Output subdevice */
- s = &dev->subdevices[1];
- s->type = COMEDI_SUBD_AO;
- s->subdev_flags = SDF_WRITABLE;
- s->n_chan = 2;
- s->maxdata = 0xfff;
- s->range_table = &cb_pcimdas_ao_range;
- s->insn_write = cb_pcimdas_ao_insn_write;
-
- ret = comedi_alloc_subdev_readback(s);
- if (ret)
- return ret;
-
- /* Digital I/O subdevice */
- s = &dev->subdevices[2];
- ret = subdev_8255_init(dev, s, NULL, PCIMDAS_8255_BASE);
- if (ret)
- return ret;
-
- /* Digital Input subdevice (main connector) */
- s = &dev->subdevices[3];
- s->type = COMEDI_SUBD_DI;
- s->subdev_flags = SDF_READABLE;
- s->n_chan = 4;
- s->maxdata = 1;
- s->range_table = &range_digital;
- s->insn_bits = cb_pcimdas_di_insn_bits;
-
- /* Digital Output subdevice (main connector) */
- s = &dev->subdevices[4];
- s->type = COMEDI_SUBD_DO;
- s->subdev_flags = SDF_WRITABLE;
- s->n_chan = 4;
- s->maxdata = 1;
- s->range_table = &range_digital;
- s->insn_bits = cb_pcimdas_do_insn_bits;
-
- /* Counter subdevice (8254) */
- s = &dev->subdevices[5];
- comedi_8254_subdevice_init(s, dev->pacer);
-
- dev->pacer->insn_config = cb_pcimdas_counter_insn_config;
-
- /* counters 1 and 2 are used internally for the pacer */
- comedi_8254_set_busy(dev->pacer, 1, true);
- comedi_8254_set_busy(dev->pacer, 2, true);
-
- return 0;
-}
-
-static struct comedi_driver cb_pcimdas_driver = {
- .driver_name = "cb_pcimdas",
- .module = THIS_MODULE,
- .auto_attach = cb_pcimdas_auto_attach,
- .detach = comedi_pci_detach,
-};
-
-static int cb_pcimdas_pci_probe(struct pci_dev *dev,
- const struct pci_device_id *id)
-{
- return comedi_pci_auto_config(dev, &cb_pcimdas_driver,
- id->driver_data);
-}
-
-static const struct pci_device_id cb_pcimdas_pci_table[] = {
- { PCI_DEVICE(PCI_VENDOR_ID_CB, 0x0056) }, /* PCIM-DAS1602/16 */
- { PCI_DEVICE(PCI_VENDOR_ID_CB, 0x0115) }, /* PCIe-DAS1602/16 */
- { 0 }
-};
-MODULE_DEVICE_TABLE(pci, cb_pcimdas_pci_table);
-
-static struct pci_driver cb_pcimdas_pci_driver = {
- .name = "cb_pcimdas",
- .id_table = cb_pcimdas_pci_table,
- .probe = cb_pcimdas_pci_probe,
- .remove = comedi_pci_auto_unconfig,
-};
-module_comedi_pci_driver(cb_pcimdas_driver, cb_pcimdas_pci_driver);
-
-MODULE_AUTHOR("Comedi http://www.comedi.org");
-MODULE_DESCRIPTION("Comedi driver for PCIM-DAS1602/16 and PCIe-DAS1602/16");
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/cb_pcimdda.c b/drivers/staging/comedi/drivers/cb_pcimdda.c
deleted file mode 100644
index 19210d89f2b2..000000000000
--- a/drivers/staging/comedi/drivers/cb_pcimdda.c
+++ /dev/null
@@ -1,202 +0,0 @@
-/*
- comedi/drivers/cb_pcimdda.c
- Computer Boards PCIM-DDA06-16 Comedi driver
- Author: Calin Culianu <calin@ajvar.org>
-
- COMEDI - Linux Control and Measurement Device Interface
- Copyright (C) 2000 David A. Schleef <ds@schleef.org>
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-*/
-/*
-Driver: cb_pcimdda
-Description: Measurement Computing PCIM-DDA06-16
-Devices: [Measurement Computing] PCIM-DDA06-16 (cb_pcimdda)
-Author: Calin Culianu <calin@ajvar.org>
-Updated: Mon, 14 Apr 2008 15:15:51 +0100
-Status: works
-
-All features of the PCIM-DDA06-16 board are supported. This board
-has 6 16-bit AO channels, and the usual 8255 DIO setup. (24 channels,
-configurable in banks of 8 and 4, etc.). This board does not support commands.
-
-The board has a peculiar way of specifying AO gain/range settings -- You have
-1 jumper bank on the card, which either makes all 6 AO channels either
-5 Volt unipolar, 5V bipolar, 10 Volt unipolar or 10V bipolar.
-
-Since there is absolutely _no_ way to tell in software how this jumper is set
-(well, at least according to the rather thin spec. from Measurement Computing
- that comes with the board), the driver assumes the jumper is at its factory
-default setting of +/-5V.
-
-Also of note is the fact that this board features another jumper, whose
-state is also completely invisible to software. It toggles two possible AO
-output modes on the board:
-
- - Update Mode: Writing to an AO channel instantaneously updates the actual
- signal output by the DAC on the board (this is the factory default).
- - Simultaneous XFER Mode: Writing to an AO channel has no effect until
- you read from any one of the AO channels. This is useful for loading
- all 6 AO values, and then reading from any one of the AO channels on the
- device to instantly update all 6 AO values in unison. Useful for some
- control apps, I would assume? If your jumper is in this setting, then you
- need to issue your comedi_data_write()s to load all the values you want,
- then issue one comedi_data_read() on any channel on the AO subdevice
- to initiate the simultaneous XFER.
-
-Configuration Options: not applicable, uses PCI auto config
-*/
-
-/*
- This is a driver for the Computer Boards PCIM-DDA06-16 Analog Output
- card. This board has a unique register layout and as such probably
- deserves its own driver file.
-
- It is theoretically possible to integrate this board into the cb_pcidda
- file, but since that isn't my code, I didn't want to significantly
- modify that file to support this board (I thought it impolite to do so).
-
- At any rate, if you feel ambitious, please feel free to take
- the code out of this file and combine it with a more unified driver
- file.
-
- I would like to thank Timothy Curry <Timothy.Curry@rdec.redstone.army.mil>
- for lending me a board so that I could write this driver.
-
- -Calin Culianu <calin@ajvar.org>
- */
-
-#include <linux/module.h>
-
-#include "../comedi_pci.h"
-
-#include "8255.h"
-
-/* device ids of the cards we support -- currently only 1 card supported */
-#define PCI_ID_PCIM_DDA06_16 0x0053
-
-/*
- * Register map, 8-bit access only
- */
-#define PCIMDDA_DA_CHAN(x) (0x00 + (x) * 2)
-#define PCIMDDA_8255_BASE_REG 0x0c
-
-static int cb_pcimdda_ao_insn_write(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- unsigned int chan = CR_CHAN(insn->chanspec);
- unsigned long offset = dev->iobase + PCIMDDA_DA_CHAN(chan);
- unsigned int val = s->readback[chan];
- int i;
-
- for (i = 0; i < insn->n; i++) {
- val = data[i];
-
- /*
- * Write the LSB then MSB.
- *
- * If the simultaneous xfer mode is selected by the
- * jumper on the card, a read instruction is needed
- * in order to initiate the simultaneous transfer.
- * Otherwise, the DAC will be updated when the MSB
- * is written.
- */
- outb(val & 0x00ff, offset);
- outb((val >> 8) & 0x00ff, offset + 1);
- }
- s->readback[chan] = val;
-
- return insn->n;
-}
-
-static int cb_pcimdda_ao_insn_read(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- unsigned int chan = CR_CHAN(insn->chanspec);
-
- /* Initiate the simultaneous transfer */
- inw(dev->iobase + PCIMDDA_DA_CHAN(chan));
-
- return comedi_readback_insn_read(dev, s, insn, data);
-}
-
-static int cb_pcimdda_auto_attach(struct comedi_device *dev,
- unsigned long context_unused)
-{
- struct pci_dev *pcidev = comedi_to_pci_dev(dev);
- struct comedi_subdevice *s;
- int ret;
-
- ret = comedi_pci_enable(dev);
- if (ret)
- return ret;
- dev->iobase = pci_resource_start(pcidev, 3);
-
- ret = comedi_alloc_subdevices(dev, 2);
- if (ret)
- return ret;
-
- s = &dev->subdevices[0];
- /* analog output subdevice */
- s->type = COMEDI_SUBD_AO;
- s->subdev_flags = SDF_WRITABLE | SDF_READABLE;
- s->n_chan = 6;
- s->maxdata = 0xffff;
- s->range_table = &range_bipolar5;
- s->insn_write = cb_pcimdda_ao_insn_write;
- s->insn_read = cb_pcimdda_ao_insn_read;
-
- ret = comedi_alloc_subdev_readback(s);
- if (ret)
- return ret;
-
- s = &dev->subdevices[1];
- /* digital i/o subdevice */
- return subdev_8255_init(dev, s, NULL, PCIMDDA_8255_BASE_REG);
-}
-
-static struct comedi_driver cb_pcimdda_driver = {
- .driver_name = "cb_pcimdda",
- .module = THIS_MODULE,
- .auto_attach = cb_pcimdda_auto_attach,
- .detach = comedi_pci_detach,
-};
-
-static int cb_pcimdda_pci_probe(struct pci_dev *dev,
- const struct pci_device_id *id)
-{
- return comedi_pci_auto_config(dev, &cb_pcimdda_driver,
- id->driver_data);
-}
-
-static const struct pci_device_id cb_pcimdda_pci_table[] = {
- { PCI_DEVICE(PCI_VENDOR_ID_CB, PCI_ID_PCIM_DDA06_16) },
- { 0 }
-};
-MODULE_DEVICE_TABLE(pci, cb_pcimdda_pci_table);
-
-static struct pci_driver cb_pcimdda_driver_pci_driver = {
- .name = "cb_pcimdda",
- .id_table = cb_pcimdda_pci_table,
- .probe = cb_pcimdda_pci_probe,
- .remove = comedi_pci_auto_unconfig,
-};
-module_comedi_pci_driver(cb_pcimdda_driver, cb_pcimdda_driver_pci_driver);
-
-MODULE_AUTHOR("Calin A. Culianu <calin@rtlab.org>");
-MODULE_DESCRIPTION("Comedi low-level driver for the Computerboards PCIM-DDA "
- "series. Currently only supports PCIM-DDA06-16 (which "
- "also happens to be the only board in this series. :) ) ");
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/comedi_8254.c b/drivers/staging/comedi/drivers/comedi_8254.c
deleted file mode 100644
index 0d5d56b61f60..000000000000
--- a/drivers/staging/comedi/drivers/comedi_8254.c
+++ /dev/null
@@ -1,664 +0,0 @@
-/*
- * comedi_8254.c
- * Generic 8254 timer/counter support
- * Copyright (C) 2014 H Hartley Sweeten <hsweeten@visionengravers.com>
- *
- * Based on 8253.h and various subdevice implementations in comedi drivers.
- *
- * COMEDI - Linux Control and Measurement Device Interface
- * Copyright (C) 2000 David A. Schleef <ds@schleef.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-/*
- * Module: comedi_8254
- * Description: Generic 8254 timer/counter support
- * Author: H Hartley Sweeten <hsweeten@visionengravers.com>
- * Updated: Thu Jan 8 16:45:45 MST 2015
- * Status: works
- *
- * This module is not used directly by end-users. Rather, it is used by other
- * drivers to provide support for an 8254 Programmable Interval Timer. These
- * counters are typically used to generate the pacer clock used for data
- * acquisition. Some drivers also expose the counters for general purpose use.
- *
- * This module provides the following basic functions:
- *
- * comedi_8254_init() / comedi_8254_mm_init()
- * Initializes this module to access the 8254 registers. The _mm version
- * sets up the module for MMIO register access the other for PIO access.
- * The pointer returned from these functions is normally stored in the
- * comedi_device dev->pacer and will be freed by the comedi core during
- * the driver (*detach). If a driver has multiple 8254 devices, they need
- * to be stored in the drivers private data and freed when the driver is
- * detached.
- *
- * NOTE: The counters are reset by setting them to I8254_MODE0 as part of
- * this initialization.
- *
- * comedi_8254_set_mode()
- * Sets a counters operation mode:
- * I8254_MODE0 Interrupt on terminal count
- * I8254_MODE1 Hardware retriggerable one-shot
- * I8254_MODE2 Rate generator
- * I8254_MODE3 Square wave mode
- * I8254_MODE4 Software triggered strobe
- * I8254_MODE5 Hardware triggered strobe (retriggerable)
- *
- * In addition I8254_BCD and I8254_BINARY specify the counting mode:
- * I8254_BCD BCD counting
- * I8254_BINARY Binary counting
- *
- * comedi_8254_write()
- * Writes an initial value to a counter.
- *
- * The largest possible initial count is 0; this is equivalent to 2^16
- * for binary counting and 10^4 for BCD counting.
- *
- * NOTE: The counter does not stop when it reaches zero. In Mode 0, 1, 4,
- * and 5 the counter "wraps around" to the highest count, either 0xffff
- * for binary counting or 9999 for BCD counting, and continues counting.
- * Modes 2 and 3 are periodic; the counter reloads itself with the initial
- * count and continues counting from there.
- *
- * comedi_8254_read()
- * Reads the current value from a counter.
- *
- * comedi_8254_status()
- * Reads the status of a counter.
- *
- * comedi_8254_load()
- * Sets a counters operation mode and writes the initial value.
- *
- * Typically the pacer clock is created by cascading two of the 16-bit counters
- * to create a 32-bit rate generator (I8254_MODE2). These functions are
- * provided to handle the cascaded counters:
- *
- * comedi_8254_ns_to_timer()
- * Calculates the divisor value needed for a single counter to generate
- * ns timing.
- *
- * comedi_8254_cascade_ns_to_timer()
- * Calculates the two divisor values needed to the generate the pacer
- * clock (in ns).
- *
- * comedi_8254_update_divisors()
- * Transfers the intermediate divisor values to the current divisors.
- *
- * comedi_8254_pacer_enable()
- * Programs the mode of the cascaded counters and writes the current
- * divisor values.
- *
- * To expose the counters as a subdevice for general purpose use the following
- * functions a provided:
- *
- * comedi_8254_subdevice_init()
- * Initializes a comedi_subdevice to use the 8254 timer.
- *
- * comedi_8254_set_busy()
- * Internally flags a counter as "busy". This is done to protect the
- * counters that are used for the cascaded 32-bit pacer.
- *
- * The subdevice provides (*insn_read) and (*insn_write) operations to read
- * the current value and write an initial value to a counter. A (*insn_config)
- * operation is also provided to handle the following comedi instructions:
- *
- * INSN_CONFIG_SET_COUNTER_MODE calls comedi_8254_set_mode()
- * INSN_CONFIG_8254_READ_STATUS calls comedi_8254_status()
- *
- * The (*insn_config) member of comedi_8254 can be initialized by the external
- * driver to handle any additional instructions.
- *
- * NOTE: Gate control, clock routing, and any interrupt handling for the
- * counters is not handled by this module. These features are driver dependent.
- */
-
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/io.h>
-
-#include "../comedidev.h"
-
-#include "comedi_8254.h"
-
-static unsigned int __i8254_read(struct comedi_8254 *i8254, unsigned int reg)
-{
- unsigned int reg_offset = (reg * i8254->iosize) << i8254->regshift;
- unsigned int val;
-
- switch (i8254->iosize) {
- default:
- case I8254_IO8:
- if (i8254->mmio)
- val = readb(i8254->mmio + reg_offset);
- else
- val = inb(i8254->iobase + reg_offset);
- break;
- case I8254_IO16:
- if (i8254->mmio)
- val = readw(i8254->mmio + reg_offset);
- else
- val = inw(i8254->iobase + reg_offset);
- break;
- case I8254_IO32:
- if (i8254->mmio)
- val = readl(i8254->mmio + reg_offset);
- else
- val = inl(i8254->iobase + reg_offset);
- break;
- }
- return val & 0xff;
-}
-
-static void __i8254_write(struct comedi_8254 *i8254,
- unsigned int val, unsigned int reg)
-{
- unsigned int reg_offset = (reg * i8254->iosize) << i8254->regshift;
-
- switch (i8254->iosize) {
- default:
- case I8254_IO8:
- if (i8254->mmio)
- writeb(val, i8254->mmio + reg_offset);
- else
- outb(val, i8254->iobase + reg_offset);
- break;
- case I8254_IO16:
- if (i8254->mmio)
- writew(val, i8254->mmio + reg_offset);
- else
- outw(val, i8254->iobase + reg_offset);
- break;
- case I8254_IO32:
- if (i8254->mmio)
- writel(val, i8254->mmio + reg_offset);
- else
- outl(val, i8254->iobase + reg_offset);
- break;
- }
-}
-
-/**
- * comedi_8254_status - return the status of a counter
- * @i8254: comedi_8254 struct for the timer
- * @counter: the counter number
- */
-unsigned int comedi_8254_status(struct comedi_8254 *i8254, unsigned int counter)
-{
- unsigned int cmd;
-
- if (counter > 2)
- return 0;
-
- cmd = I8254_CTRL_READBACK_STATUS | I8254_CTRL_READBACK_SEL_CTR(counter);
- __i8254_write(i8254, cmd, I8254_CTRL_REG);
-
- return __i8254_read(i8254, counter);
-}
-EXPORT_SYMBOL_GPL(comedi_8254_status);
-
-/**
- * comedi_8254_read - read the current counter value
- * @i8254: comedi_8254 struct for the timer
- * @counter: the counter number
- */
-unsigned int comedi_8254_read(struct comedi_8254 *i8254, unsigned int counter)
-{
- unsigned int val;
-
- if (counter > 2)
- return 0;
-
- /* latch counter */
- __i8254_write(i8254, I8254_CTRL_SEL_CTR(counter) | I8254_CTRL_LATCH,
- I8254_CTRL_REG);
-
- /* read LSB then MSB */
- val = __i8254_read(i8254, counter);
- val |= (__i8254_read(i8254, counter) << 8);
-
- return val;
-}
-EXPORT_SYMBOL_GPL(comedi_8254_read);
-
-/**
- * comedi_8254_write - load a 16-bit initial counter value
- * @i8254: comedi_8254 struct for the timer
- * @counter: the counter number
- * @val: the initial value
- */
-void comedi_8254_write(struct comedi_8254 *i8254,
- unsigned int counter, unsigned int val)
-{
- unsigned int byte;
-
- if (counter > 2)
- return;
- if (val > 0xffff)
- return;
-
- /* load LSB then MSB */
- byte = val & 0xff;
- __i8254_write(i8254, byte, counter);
- byte = (val >> 8) & 0xff;
- __i8254_write(i8254, byte, counter);
-}
-EXPORT_SYMBOL_GPL(comedi_8254_write);
-
-/**
- * comedi_8254_set_mode - set the mode of a counter
- * @i8254: comedi_8254 struct for the timer
- * @counter: the counter number
- * @mode: the I8254_MODEx and I8254_BCD|I8254_BINARY
- */
-int comedi_8254_set_mode(struct comedi_8254 *i8254, unsigned int counter,
- unsigned int mode)
-{
- unsigned int byte;
-
- if (counter > 2)
- return -EINVAL;
- if (mode > (I8254_MODE5 | I8254_BCD))
- return -EINVAL;
-
- byte = I8254_CTRL_SEL_CTR(counter) | /* select counter */
- I8254_CTRL_LSB_MSB | /* load LSB then MSB */
- mode; /* mode and BCD|binary */
- __i8254_write(i8254, byte, I8254_CTRL_REG);
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(comedi_8254_set_mode);
-
-/**
- * comedi_8254_load - program the mode and initial count of a counter
- * @i8254: comedi_8254 struct for the timer
- * @counter: the counter number
- * @mode: the I8254_MODEx and I8254_BCD|I8254_BINARY
- * @val: the initial value
- */
-int comedi_8254_load(struct comedi_8254 *i8254, unsigned int counter,
- unsigned int val, unsigned int mode)
-{
- if (counter > 2)
- return -EINVAL;
- if (val > 0xffff)
- return -EINVAL;
- if (mode > (I8254_MODE5 | I8254_BCD))
- return -EINVAL;
-
- comedi_8254_set_mode(i8254, counter, mode);
- comedi_8254_write(i8254, counter, val);
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(comedi_8254_load);
-
-/**
- * comedi_8254_pacer_enable - set the mode and load the cascaded counters
- * @i8254: comedi_8254 struct for the timer
- * @counter1: the counter number for the first divisor
- * @counter2: the counter number for the second divisor
- * @enable: flag to enable (load) the counters
- */
-void comedi_8254_pacer_enable(struct comedi_8254 *i8254,
- unsigned int counter1,
- unsigned int counter2,
- bool enable)
-{
- unsigned int mode;
-
- if (counter1 > 2 || counter2 > 2 || counter1 == counter2)
- return;
-
- if (enable)
- mode = I8254_MODE2 | I8254_BINARY;
- else
- mode = I8254_MODE0 | I8254_BINARY;
-
- comedi_8254_set_mode(i8254, counter1, mode);
- comedi_8254_set_mode(i8254, counter2, mode);
-
- if (enable) {
- /*
- * Divisors are loaded second counter then first counter to
- * avoid possible issues with the first counter expiring
- * before the second counter is loaded.
- */
- comedi_8254_write(i8254, counter2, i8254->divisor2);
- comedi_8254_write(i8254, counter1, i8254->divisor1);
- }
-}
-EXPORT_SYMBOL_GPL(comedi_8254_pacer_enable);
-
-/**
- * comedi_8254_update_divisors - update the divisors for the cascaded counters
- * @i8254: comedi_8254 struct for the timer
- */
-void comedi_8254_update_divisors(struct comedi_8254 *i8254)
-{
- /* masking is done since counter maps zero to 0x10000 */
- i8254->divisor = i8254->next_div & 0xffff;
- i8254->divisor1 = i8254->next_div1 & 0xffff;
- i8254->divisor2 = i8254->next_div2 & 0xffff;
-}
-EXPORT_SYMBOL_GPL(comedi_8254_update_divisors);
-
-/**
- * comedi_8254_cascade_ns_to_timer - calculate the cascaded divisor values
- * @i8254: comedi_8254 struct for the timer
- * @nanosec: the desired ns time
- * @flags: comedi_cmd flags
- */
-void comedi_8254_cascade_ns_to_timer(struct comedi_8254 *i8254,
- unsigned int *nanosec,
- unsigned int flags)
-{
- unsigned int d1 = i8254->next_div1 ? i8254->next_div1 : I8254_MAX_COUNT;
- unsigned int d2 = i8254->next_div2 ? i8254->next_div2 : I8254_MAX_COUNT;
- unsigned int div = d1 * d2;
- unsigned int ns_lub = 0xffffffff;
- unsigned int ns_glb = 0;
- unsigned int d1_lub = 0;
- unsigned int d1_glb = 0;
- unsigned int d2_lub = 0;
- unsigned int d2_glb = 0;
- unsigned int start;
- unsigned int ns;
- unsigned int ns_low;
- unsigned int ns_high;
-
- /* exit early if everything is already correct */
- if (div * i8254->osc_base == *nanosec &&
- d1 > 1 && d1 <= I8254_MAX_COUNT &&
- d2 > 1 && d2 <= I8254_MAX_COUNT &&
- /* check for overflow */
- div > d1 && div > d2 &&
- div * i8254->osc_base > div &&
- div * i8254->osc_base > i8254->osc_base)
- return;
-
- div = *nanosec / i8254->osc_base;
- d2 = I8254_MAX_COUNT;
- start = div / d2;
- if (start < 2)
- start = 2;
- for (d1 = start; d1 <= div / d1 + 1 && d1 <= I8254_MAX_COUNT; d1++) {
- for (d2 = div / d1;
- d1 * d2 <= div + d1 + 1 && d2 <= I8254_MAX_COUNT; d2++) {
- ns = i8254->osc_base * d1 * d2;
- if (ns <= *nanosec && ns > ns_glb) {
- ns_glb = ns;
- d1_glb = d1;
- d2_glb = d2;
- }
- if (ns >= *nanosec && ns < ns_lub) {
- ns_lub = ns;
- d1_lub = d1;
- d2_lub = d2;
- }
- }
- }
-
- switch (flags & CMDF_ROUND_MASK) {
- case CMDF_ROUND_NEAREST:
- default:
- ns_high = d1_lub * d2_lub * i8254->osc_base;
- ns_low = d1_glb * d2_glb * i8254->osc_base;
- if (ns_high - *nanosec < *nanosec - ns_low) {
- d1 = d1_lub;
- d2 = d2_lub;
- } else {
- d1 = d1_glb;
- d2 = d2_glb;
- }
- break;
- case CMDF_ROUND_UP:
- d1 = d1_lub;
- d2 = d2_lub;
- break;
- case CMDF_ROUND_DOWN:
- d1 = d1_glb;
- d2 = d2_glb;
- break;
- }
-
- *nanosec = d1 * d2 * i8254->osc_base;
- i8254->next_div1 = d1;
- i8254->next_div2 = d2;
-}
-EXPORT_SYMBOL_GPL(comedi_8254_cascade_ns_to_timer);
-
-/**
- * comedi_8254_ns_to_timer - calculate the divisor value for nanosec timing
- * @i8254: comedi_8254 struct for the timer
- * @nanosec: the desired ns time
- * @flags: comedi_cmd flags
- */
-void comedi_8254_ns_to_timer(struct comedi_8254 *i8254,
- unsigned int *nanosec, unsigned int flags)
-{
- unsigned int divisor;
-
- switch (flags & CMDF_ROUND_MASK) {
- default:
- case CMDF_ROUND_NEAREST:
- divisor = DIV_ROUND_CLOSEST(*nanosec, i8254->osc_base);
- break;
- case CMDF_ROUND_UP:
- divisor = DIV_ROUND_UP(*nanosec, i8254->osc_base);
- break;
- case CMDF_ROUND_DOWN:
- divisor = *nanosec / i8254->osc_base;
- break;
- }
- if (divisor < 2)
- divisor = 2;
- if (divisor > I8254_MAX_COUNT)
- divisor = I8254_MAX_COUNT;
-
- *nanosec = divisor * i8254->osc_base;
- i8254->next_div = divisor;
-}
-EXPORT_SYMBOL_GPL(comedi_8254_ns_to_timer);
-
-/**
- * comedi_8254_set_busy - set/clear the "busy" flag for a given counter
- * @i8254: comedi_8254 struct for the timer
- * @counter: the counter number
- * @busy: set/clear flag
- */
-void comedi_8254_set_busy(struct comedi_8254 *i8254,
- unsigned int counter, bool busy)
-{
- if (counter < 3)
- i8254->busy[counter] = busy;
-}
-EXPORT_SYMBOL_GPL(comedi_8254_set_busy);
-
-static int comedi_8254_insn_read(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct comedi_8254 *i8254 = s->private;
- unsigned int chan = CR_CHAN(insn->chanspec);
- int i;
-
- if (i8254->busy[chan])
- return -EBUSY;
-
- for (i = 0; i < insn->n; i++)
- data[i] = comedi_8254_read(i8254, chan);
-
- return insn->n;
-}
-
-static int comedi_8254_insn_write(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct comedi_8254 *i8254 = s->private;
- unsigned int chan = CR_CHAN(insn->chanspec);
-
- if (i8254->busy[chan])
- return -EBUSY;
-
- if (insn->n)
- comedi_8254_write(i8254, chan, data[insn->n - 1]);
-
- return insn->n;
-}
-
-static int comedi_8254_insn_config(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct comedi_8254 *i8254 = s->private;
- unsigned int chan = CR_CHAN(insn->chanspec);
- int ret;
-
- if (i8254->busy[chan])
- return -EBUSY;
-
- switch (data[0]) {
- case INSN_CONFIG_RESET:
- ret = comedi_8254_set_mode(i8254, chan,
- I8254_MODE0 | I8254_BINARY);
- if (ret)
- return ret;
- break;
- case INSN_CONFIG_SET_COUNTER_MODE:
- ret = comedi_8254_set_mode(i8254, chan, data[1]);
- if (ret)
- return ret;
- break;
- case INSN_CONFIG_8254_READ_STATUS:
- data[1] = comedi_8254_status(i8254, chan);
- break;
- default:
- /*
- * If available, call the driver provided (*insn_config)
- * to handle any driver implemented instructions.
- */
- if (i8254->insn_config)
- return i8254->insn_config(dev, s, insn, data);
-
- return -EINVAL;
- }
-
- return insn->n;
-}
-
-/**
- * comedi_8254_subdevice_init - initialize a comedi_subdevice for the 8254 timer
- * @s: comedi_subdevice struct
- */
-void comedi_8254_subdevice_init(struct comedi_subdevice *s,
- struct comedi_8254 *i8254)
-{
- s->type = COMEDI_SUBD_COUNTER;
- s->subdev_flags = SDF_READABLE | SDF_WRITABLE;
- s->n_chan = 3;
- s->maxdata = 0xffff;
- s->range_table = &range_unknown;
- s->insn_read = comedi_8254_insn_read;
- s->insn_write = comedi_8254_insn_write;
- s->insn_config = comedi_8254_insn_config;
-
- s->private = i8254;
-}
-EXPORT_SYMBOL_GPL(comedi_8254_subdevice_init);
-
-static struct comedi_8254 *__i8254_init(unsigned long iobase,
- void __iomem *mmio,
- unsigned int osc_base,
- unsigned int iosize,
- unsigned int regshift)
-{
- struct comedi_8254 *i8254;
- int i;
-
- /* sanity check that the iosize is valid */
- if (!(iosize == I8254_IO8 || iosize == I8254_IO16 ||
- iosize == I8254_IO32))
- return NULL;
-
- i8254 = kzalloc(sizeof(*i8254), GFP_KERNEL);
- if (!i8254)
- return NULL;
-
- i8254->iobase = iobase;
- i8254->mmio = mmio;
- i8254->iosize = iosize;
- i8254->regshift = regshift;
-
- /* default osc_base to the max speed of a generic 8254 timer */
- i8254->osc_base = osc_base ? osc_base : I8254_OSC_BASE_10MHZ;
-
- /* reset all the counters by setting them to I8254_MODE0 */
- for (i = 0; i < 3; i++)
- comedi_8254_set_mode(i8254, i, I8254_MODE0 | I8254_BINARY);
-
- return i8254;
-}
-
-/**
- * comedi_8254_init - allocate and initialize the 8254 device for pio access
- * @mmio: port I/O base address
- * @osc_base: base time of the counter in ns
- * OPTIONAL - only used by comedi_8254_cascade_ns_to_timer()
- * @iosize: I/O register size
- * @regshift: register gap shift
- */
-struct comedi_8254 *comedi_8254_init(unsigned long iobase,
- unsigned int osc_base,
- unsigned int iosize,
- unsigned int regshift)
-{
- return __i8254_init(iobase, NULL, osc_base, iosize, regshift);
-}
-EXPORT_SYMBOL_GPL(comedi_8254_init);
-
-/**
- * comedi_8254_mm_init - allocate and initialize the 8254 device for mmio access
- * @mmio: memory mapped I/O base address
- * @osc_base: base time of the counter in ns
- * OPTIONAL - only used by comedi_8254_cascade_ns_to_timer()
- * @iosize: I/O register size
- * @regshift: register gap shift
- */
-struct comedi_8254 *comedi_8254_mm_init(void __iomem *mmio,
- unsigned int osc_base,
- unsigned int iosize,
- unsigned int regshift)
-{
- return __i8254_init(0, mmio, osc_base, iosize, regshift);
-}
-EXPORT_SYMBOL_GPL(comedi_8254_mm_init);
-
-static int __init comedi_8254_module_init(void)
-{
- return 0;
-}
-module_init(comedi_8254_module_init);
-
-static void __exit comedi_8254_module_exit(void)
-{
-}
-module_exit(comedi_8254_module_exit);
-
-MODULE_AUTHOR("H Hartley Sweeten <hsweeten@visionengravers.com>");
-MODULE_DESCRIPTION("Comedi: Generic 8254 timer/counter support");
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/comedi_8254.h b/drivers/staging/comedi/drivers/comedi_8254.h
deleted file mode 100644
index f4610ead6172..000000000000
--- a/drivers/staging/comedi/drivers/comedi_8254.h
+++ /dev/null
@@ -1,139 +0,0 @@
-/*
- * comedi_8254.h
- * Generic 8254 timer/counter support
- * Copyright (C) 2014 H Hartley Sweeten <hsweeten@visionengravers.com>
- *
- * COMEDI - Linux Control and Measurement Device Interface
- * Copyright (C) 2000 David A. Schleef <ds@schleef.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#ifndef _COMEDI_8254_H
-#define _COMEDI_8254_H
-
-#include <linux/types.h>
-
-struct comedi_device;
-struct comedi_insn;
-struct comedi_subdevice;
-
-/*
- * Common oscillator base values in nanoseconds
- */
-#define I8254_OSC_BASE_10MHZ 100
-#define I8254_OSC_BASE_5MHZ 200
-#define I8254_OSC_BASE_4MHZ 250
-#define I8254_OSC_BASE_2MHZ 500
-#define I8254_OSC_BASE_1MHZ 1000
-#define I8254_OSC_BASE_100KHZ 10000
-#define I8254_OSC_BASE_10KHZ 100000
-#define I8254_OSC_BASE_1KHZ 1000000
-
-/*
- * I/O access size used to read/write registers
- */
-#define I8254_IO8 1
-#define I8254_IO16 2
-#define I8254_IO32 4
-
-/*
- * Register map for generic 8254 timer (I8254_IO8 with 0 regshift)
- */
-#define I8254_COUNTER0_REG 0x00
-#define I8254_COUNTER1_REG 0x01
-#define I8254_COUNTER2_REG 0x02
-#define I8254_CTRL_REG 0x03
-#define I8254_CTRL_SEL_CTR(x) ((x) << 6)
-#define I8254_CTRL_READBACK_COUNT ((3 << 6) | (1 << 4))
-#define I8254_CTRL_READBACK_STATUS ((3 << 6) | (1 << 5))
-#define I8254_CTRL_READBACK_SEL_CTR(x) (2 << (x))
-#define I8254_CTRL_LATCH (0 << 4)
-#define I8254_CTRL_LSB_ONLY (1 << 4)
-#define I8254_CTRL_MSB_ONLY (2 << 4)
-#define I8254_CTRL_LSB_MSB (3 << 4)
-
-/* counter maps zero to 0x10000 */
-#define I8254_MAX_COUNT 0x10000
-
-/**
- * struct comedi_8254 - private data used by this module
- * @iobase: PIO base address of the registers (in/out)
- * @mmio: MMIO base address of the registers (read/write)
- * @iosize: I/O size used to access the registers (b/w/l)
- * @regshift: register gap shift
- * @osc_base: cascaded oscillator speed in ns
- * @divisor: divisor for single counter
- * @divisor1: divisor loaded into first cascaded counter
- * @divisor2: divisor loaded into second cascaded counter
- * #next_div: next divisor for single counter
- * @next_div1: next divisor to use for first cascaded counter
- * @next_div2: next divisor to use for second cascaded counter
- * @clock_src; current clock source for each counter (driver specific)
- * @gate_src; current gate source for each counter (driver specific)
- * @busy: flags used to indicate that a counter is "busy"
- * @insn_config: driver specific (*insn_config) callback
- */
-struct comedi_8254 {
- unsigned long iobase;
- void __iomem *mmio;
- unsigned int iosize;
- unsigned int regshift;
- unsigned int osc_base;
- unsigned int divisor;
- unsigned int divisor1;
- unsigned int divisor2;
- unsigned int next_div;
- unsigned int next_div1;
- unsigned int next_div2;
- unsigned int clock_src[3];
- unsigned int gate_src[3];
- bool busy[3];
-
- int (*insn_config)(struct comedi_device *, struct comedi_subdevice *s,
- struct comedi_insn *, unsigned int *data);
-};
-
-unsigned int comedi_8254_status(struct comedi_8254 *, unsigned int counter);
-unsigned int comedi_8254_read(struct comedi_8254 *, unsigned int counter);
-void comedi_8254_write(struct comedi_8254 *,
- unsigned int counter, unsigned int val);
-
-int comedi_8254_set_mode(struct comedi_8254 *,
- unsigned int counter, unsigned int mode);
-int comedi_8254_load(struct comedi_8254 *,
- unsigned int counter, unsigned int val, unsigned int mode);
-
-void comedi_8254_pacer_enable(struct comedi_8254 *,
- unsigned int counter1, unsigned int counter2,
- bool enable);
-void comedi_8254_update_divisors(struct comedi_8254 *);
-void comedi_8254_cascade_ns_to_timer(struct comedi_8254 *,
- unsigned int *nanosec, unsigned int flags);
-void comedi_8254_ns_to_timer(struct comedi_8254 *,
- unsigned int *nanosec, unsigned int flags);
-
-void comedi_8254_set_busy(struct comedi_8254 *,
- unsigned int counter, bool busy);
-
-void comedi_8254_subdevice_init(struct comedi_subdevice *,
- struct comedi_8254 *);
-
-struct comedi_8254 *comedi_8254_init(unsigned long iobase,
- unsigned int osc_base,
- unsigned int iosize,
- unsigned int regshift);
-struct comedi_8254 *comedi_8254_mm_init(void __iomem *mmio,
- unsigned int osc_base,
- unsigned int iosize,
- unsigned int regshift);
-
-#endif /* _COMEDI_8254_H */
diff --git a/drivers/staging/comedi/drivers/comedi_8255.c b/drivers/staging/comedi/drivers/comedi_8255.c
deleted file mode 100644
index b2441efc61cc..000000000000
--- a/drivers/staging/comedi/drivers/comedi_8255.c
+++ /dev/null
@@ -1,285 +0,0 @@
-/*
- * comedi_8255.c
- * Generic 8255 digital I/O support
- *
- * Split from the Comedi "8255" driver module.
- *
- * COMEDI - Linux Control and Measurement Device Interface
- * Copyright (C) 1998 David A. Schleef <ds@schleef.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-/*
- * Module: comedi_8255
- * Description: Generic 8255 support
- * Author: ds
- * Updated: Fri, 22 May 2015 12:14:17 +0000
- * Status: works
- *
- * This module is not used directly by end-users. Rather, it is used by
- * other drivers to provide support for an 8255 "Programmable Peripheral
- * Interface" (PPI) chip.
- *
- * The classic in digital I/O. The 8255 appears in Comedi as a single
- * digital I/O subdevice with 24 channels. The channel 0 corresponds to
- * the 8255's port A, bit 0; channel 23 corresponds to port C, bit 7.
- * Direction configuration is done in blocks, with channels 0-7, 8-15,
- * 16-19, and 20-23 making up the 4 blocks. The only 8255 mode
- * supported is mode 0.
- */
-
-#include <linux/module.h>
-#include "../comedidev.h"
-
-#include "8255.h"
-
-struct subdev_8255_private {
- unsigned long regbase;
- int (*io)(struct comedi_device *dev, int dir, int port, int data,
- unsigned long regbase);
-};
-
-static int subdev_8255_io(struct comedi_device *dev,
- int dir, int port, int data, unsigned long regbase)
-{
- if (dir) {
- outb(data, dev->iobase + regbase + port);
- return 0;
- }
- return inb(dev->iobase + regbase + port);
-}
-
-static int subdev_8255_mmio(struct comedi_device *dev,
- int dir, int port, int data, unsigned long regbase)
-{
- if (dir) {
- writeb(data, dev->mmio + regbase + port);
- return 0;
- }
- return readb(dev->mmio + regbase + port);
-}
-
-static int subdev_8255_insn(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct subdev_8255_private *spriv = s->private;
- unsigned long regbase = spriv->regbase;
- unsigned int mask;
- unsigned int v;
-
- mask = comedi_dio_update_state(s, data);
- if (mask) {
- if (mask & 0xff)
- spriv->io(dev, 1, I8255_DATA_A_REG,
- s->state & 0xff, regbase);
- if (mask & 0xff00)
- spriv->io(dev, 1, I8255_DATA_B_REG,
- (s->state >> 8) & 0xff, regbase);
- if (mask & 0xff0000)
- spriv->io(dev, 1, I8255_DATA_C_REG,
- (s->state >> 16) & 0xff, regbase);
- }
-
- v = spriv->io(dev, 0, I8255_DATA_A_REG, 0, regbase);
- v |= (spriv->io(dev, 0, I8255_DATA_B_REG, 0, regbase) << 8);
- v |= (spriv->io(dev, 0, I8255_DATA_C_REG, 0, regbase) << 16);
-
- data[1] = v;
-
- return insn->n;
-}
-
-static void subdev_8255_do_config(struct comedi_device *dev,
- struct comedi_subdevice *s)
-{
- struct subdev_8255_private *spriv = s->private;
- unsigned long regbase = spriv->regbase;
- int config;
-
- config = I8255_CTRL_CW;
- /* 1 in io_bits indicates output, 1 in config indicates input */
- if (!(s->io_bits & 0x0000ff))
- config |= I8255_CTRL_A_IO;
- if (!(s->io_bits & 0x00ff00))
- config |= I8255_CTRL_B_IO;
- if (!(s->io_bits & 0x0f0000))
- config |= I8255_CTRL_C_LO_IO;
- if (!(s->io_bits & 0xf00000))
- config |= I8255_CTRL_C_HI_IO;
-
- spriv->io(dev, 1, I8255_CTRL_REG, config, regbase);
-}
-
-static int subdev_8255_insn_config(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- unsigned int chan = CR_CHAN(insn->chanspec);
- unsigned int mask;
- int ret;
-
- if (chan < 8)
- mask = 0x0000ff;
- else if (chan < 16)
- mask = 0x00ff00;
- else if (chan < 20)
- mask = 0x0f0000;
- else
- mask = 0xf00000;
-
- ret = comedi_dio_insn_config(dev, s, insn, data, mask);
- if (ret)
- return ret;
-
- subdev_8255_do_config(dev, s);
-
- return insn->n;
-}
-
-static int __subdev_8255_init(struct comedi_device *dev,
- struct comedi_subdevice *s,
- int (*io)(struct comedi_device *dev,
- int dir, int port, int data,
- unsigned long regbase),
- unsigned long regbase,
- bool is_mmio)
-{
- struct subdev_8255_private *spriv;
-
- spriv = comedi_alloc_spriv(s, sizeof(*spriv));
- if (!spriv)
- return -ENOMEM;
-
- if (io)
- spriv->io = io;
- else if (is_mmio)
- spriv->io = subdev_8255_mmio;
- else
- spriv->io = subdev_8255_io;
- spriv->regbase = regbase;
-
- s->type = COMEDI_SUBD_DIO;
- s->subdev_flags = SDF_READABLE | SDF_WRITABLE;
- s->n_chan = 24;
- s->range_table = &range_digital;
- s->maxdata = 1;
- s->insn_bits = subdev_8255_insn;
- s->insn_config = subdev_8255_insn_config;
-
- subdev_8255_do_config(dev, s);
-
- return 0;
-}
-
-/**
- * subdev_8255_init - initialize DIO subdevice for driving I/O mapped 8255
- * @dev: comedi device owning subdevice
- * @s: comedi subdevice to initialize
- * @io: (optional) register I/O call-back function
- * @regbase: offset of 8255 registers from dev->iobase, or call-back context
- *
- * Initializes a comedi subdevice as a DIO subdevice driving an 8255 chip.
- *
- * If the optional I/O call-back function is provided, its prototype is of
- * the following form:
- *
- * int my_8255_callback(struct comedi_device *dev, int dir, int port,
- * int data, unsigned long regbase);
- *
- * where 'dev', and 'regbase' match the values passed to this function,
- * 'port' is the 8255 port number 0 to 3 (including the control port), 'dir'
- * is the direction (0 for read, 1 for write) and 'data' is the value to be
- * written. It should return 0 if writing or the value read if reading.
- *
- * If the optional I/O call-back function is not provided, an internal
- * call-back function is used which uses consecutive I/O port addresses
- * starting at dev->iobase + regbase.
- *
- * Return: -ENOMEM if failed to allocate memory, zero on success.
- */
-int subdev_8255_init(struct comedi_device *dev, struct comedi_subdevice *s,
- int (*io)(struct comedi_device *dev, int dir, int port,
- int data, unsigned long regbase),
- unsigned long regbase)
-{
- return __subdev_8255_init(dev, s, io, regbase, false);
-}
-EXPORT_SYMBOL_GPL(subdev_8255_init);
-
-/**
- * subdev_8255_mm_init - initialize DIO subdevice for driving mmio-mapped 8255
- * @dev: comedi device owning subdevice
- * @s: comedi subdevice to initialize
- * @io: (optional) register I/O call-back function
- * @regbase: offset of 8255 registers from dev->mmio, or call-back context
- *
- * Initializes a comedi subdevice as a DIO subdevice driving an 8255 chip.
- *
- * If the optional I/O call-back function is provided, its prototype is of
- * the following form:
- *
- * int my_8255_callback(struct comedi_device *dev, int dir, int port,
- * int data, unsigned long regbase);
- *
- * where 'dev', and 'regbase' match the values passed to this function,
- * 'port' is the 8255 port number 0 to 3 (including the control port), 'dir'
- * is the direction (0 for read, 1 for write) and 'data' is the value to be
- * written. It should return 0 if writing or the value read if reading.
- *
- * If the optional I/O call-back function is not provided, an internal
- * call-back function is used which uses consecutive MMIO virtual addresses
- * starting at dev->mmio + regbase.
- *
- * Return: -ENOMEM if failed to allocate memory, zero on success.
- */
-int subdev_8255_mm_init(struct comedi_device *dev, struct comedi_subdevice *s,
- int (*io)(struct comedi_device *dev, int dir, int port,
- int data, unsigned long regbase),
- unsigned long regbase)
-{
- return __subdev_8255_init(dev, s, io, regbase, true);
-}
-EXPORT_SYMBOL_GPL(subdev_8255_mm_init);
-
-/**
- * subdev_8255_regbase - get offset of 8255 registers or call-back context
- * @s: comedi subdevice
- *
- * Returns the 'regbase' parameter that was previously passed to to
- * subdev_8255_init() or subdev_8255_mm_init() to set up the subdevice.
- * Only valid if the subdevice was set up successfully.
- */
-unsigned long subdev_8255_regbase(struct comedi_subdevice *s)
-{
- struct subdev_8255_private *spriv = s->private;
-
- return spriv->regbase;
-}
-EXPORT_SYMBOL_GPL(subdev_8255_regbase);
-
-static int __init comedi_8255_module_init(void)
-{
- return 0;
-}
-module_init(comedi_8255_module_init);
-
-static void __exit comedi_8255_module_exit(void)
-{
-}
-module_exit(comedi_8255_module_exit);
-
-MODULE_AUTHOR("Comedi http://www.comedi.org");
-MODULE_DESCRIPTION("Comedi: Generic 8255 digital I/O support");
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/comedi_bond.c b/drivers/staging/comedi/drivers/comedi_bond.c
deleted file mode 100644
index 50b76eccb7d7..000000000000
--- a/drivers/staging/comedi/drivers/comedi_bond.c
+++ /dev/null
@@ -1,356 +0,0 @@
-/*
- * comedi_bond.c
- * A Comedi driver to 'bond' or merge multiple drivers and devices as one.
- *
- * COMEDI - Linux Control and Measurement Device Interface
- * Copyright (C) 2000 David A. Schleef <ds@schleef.org>
- * Copyright (C) 2005 Calin A. Culianu <calin@ajvar.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-/*
- * Driver: comedi_bond
- * Description: A driver to 'bond' (merge) multiple subdevices from multiple
- * devices together as one.
- * Devices:
- * Author: ds
- * Updated: Mon, 10 Oct 00:18:25 -0500
- * Status: works
- *
- * This driver allows you to 'bond' (merge) multiple comedi subdevices
- * (coming from possibly difference boards and/or drivers) together. For
- * example, if you had a board with 2 different DIO subdevices, and
- * another with 1 DIO subdevice, you could 'bond' them with this driver
- * so that they look like one big fat DIO subdevice. This makes writing
- * applications slightly easier as you don't have to worry about managing
- * different subdevices in the application -- you just worry about
- * indexing one linear array of channel id's.
- *
- * Right now only DIO subdevices are supported as that's the personal itch
- * I am scratching with this driver. If you want to add support for AI and AO
- * subdevs, go right on ahead and do so!
- *
- * Commands aren't supported -- although it would be cool if they were.
- *
- * Configuration Options:
- * List of comedi-minors to bond. All subdevices of the same type
- * within each minor will be concatenated together in the order given here.
- */
-
-#include <linux/module.h>
-#include <linux/string.h>
-#include <linux/slab.h>
-#include "../comedi.h"
-#include "../comedilib.h"
-#include "../comedidev.h"
-
-struct bonded_device {
- struct comedi_device *dev;
- unsigned minor;
- unsigned subdev;
- unsigned nchans;
-};
-
-struct comedi_bond_private {
- char name[256];
- struct bonded_device **devs;
- unsigned ndevs;
- unsigned nchans;
-};
-
-static int bonding_dio_insn_bits(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
-{
- struct comedi_bond_private *devpriv = dev->private;
- unsigned int n_left, n_done, base_chan;
- unsigned int write_mask, data_bits;
- struct bonded_device **devs;
-
- write_mask = data[0];
- data_bits = data[1];
- base_chan = CR_CHAN(insn->chanspec);
- /* do a maximum of 32 channels, starting from base_chan. */
- n_left = devpriv->nchans - base_chan;
- if (n_left > 32)
- n_left = 32;
-
- n_done = 0;
- devs = devpriv->devs;
- do {
- struct bonded_device *bdev = *devs++;
-
- if (base_chan < bdev->nchans) {
- /* base channel falls within bonded device */
- unsigned int b_chans, b_mask, b_write_mask, b_data_bits;
- int ret;
-
- /*
- * Get num channels to do for bonded device and set
- * up mask and data bits for bonded device.
- */
- b_chans = bdev->nchans - base_chan;
- if (b_chans > n_left)
- b_chans = n_left;
- b_mask = (b_chans < 32) ? ((1 << b_chans) - 1)
- : 0xffffffff;
- b_write_mask = (write_mask >> n_done) & b_mask;
- b_data_bits = (data_bits >> n_done) & b_mask;
- /* Read/Write the new digital lines. */
- ret = comedi_dio_bitfield2(bdev->dev, bdev->subdev,
- b_write_mask, &b_data_bits,
- base_chan);
- if (ret < 0)
- return ret;
- /* Place read bits into data[1]. */
- data[1] &= ~(b_mask << n_done);
- data[1] |= (b_data_bits & b_mask) << n_done;
- /*
- * Set up for following bonded device (if still have
- * channels to read/write).
- */
- base_chan = 0;
- n_done += b_chans;
- n_left -= b_chans;
- } else {
- /* Skip bonded devices before base channel. */
- base_chan -= bdev->nchans;
- }
- } while (n_left);
-
- return insn->n;
-}
-
-static int bonding_dio_insn_config(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
-{
- struct comedi_bond_private *devpriv = dev->private;
- unsigned int chan = CR_CHAN(insn->chanspec);
- int ret;
- struct bonded_device *bdev;
- struct bonded_device **devs;
-
- /*
- * Locate bonded subdevice and adjust channel.
- */
- devs = devpriv->devs;
- for (bdev = *devs++; chan >= bdev->nchans; bdev = *devs++)
- chan -= bdev->nchans;
-
- /*
- * The input or output configuration of each digital line is
- * configured by a special insn_config instruction. chanspec
- * contains the channel to be changed, and data[0] contains the
- * configuration instruction INSN_CONFIG_DIO_OUTPUT,
- * INSN_CONFIG_DIO_INPUT or INSN_CONFIG_DIO_QUERY.
- *
- * Note that INSN_CONFIG_DIO_OUTPUT == COMEDI_OUTPUT,
- * and INSN_CONFIG_DIO_INPUT == COMEDI_INPUT. This is deliberate ;)
- */
- switch (data[0]) {
- case INSN_CONFIG_DIO_OUTPUT:
- case INSN_CONFIG_DIO_INPUT:
- ret = comedi_dio_config(bdev->dev, bdev->subdev, chan, data[0]);
- break;
- case INSN_CONFIG_DIO_QUERY:
- ret = comedi_dio_get_config(bdev->dev, bdev->subdev, chan,
- &data[1]);
- break;
- default:
- ret = -EINVAL;
- break;
- }
- if (ret >= 0)
- ret = insn->n;
- return ret;
-}
-
-static int do_dev_config(struct comedi_device *dev, struct comedi_devconfig *it)
-{
- struct comedi_bond_private *devpriv = dev->private;
- DECLARE_BITMAP(devs_opened, COMEDI_NUM_BOARD_MINORS);
- int i;
-
- memset(&devs_opened, 0, sizeof(devs_opened));
- devpriv->name[0] = 0;
- /*
- * Loop through all comedi devices specified on the command-line,
- * building our device list.
- */
- for (i = 0; i < COMEDI_NDEVCONFOPTS && (!i || it->options[i]); ++i) {
- char file[sizeof("/dev/comediXXXXXX")];
- int minor = it->options[i];
- struct comedi_device *d;
- int sdev = -1, nchans;
- struct bonded_device *bdev;
- struct bonded_device **devs;
-
- if (minor < 0 || minor >= COMEDI_NUM_BOARD_MINORS) {
- dev_err(dev->class_dev,
- "Minor %d is invalid!\n", minor);
- return -EINVAL;
- }
- if (minor == dev->minor) {
- dev_err(dev->class_dev,
- "Cannot bond this driver to itself!\n");
- return -EINVAL;
- }
- if (test_and_set_bit(minor, devs_opened)) {
- dev_err(dev->class_dev,
- "Minor %d specified more than once!\n", minor);
- return -EINVAL;
- }
-
- snprintf(file, sizeof(file), "/dev/comedi%d", minor);
- file[sizeof(file) - 1] = 0;
-
- d = comedi_open(file);
-
- if (!d) {
- dev_err(dev->class_dev,
- "Minor %u could not be opened\n", minor);
- return -ENODEV;
- }
-
- /* Do DIO, as that's all we support now.. */
- while ((sdev = comedi_find_subdevice_by_type(d, COMEDI_SUBD_DIO,
- sdev + 1)) > -1) {
- nchans = comedi_get_n_channels(d, sdev);
- if (nchans <= 0) {
- dev_err(dev->class_dev,
- "comedi_get_n_channels() returned %d on minor %u subdev %d!\n",
- nchans, minor, sdev);
- return -EINVAL;
- }
- bdev = kmalloc(sizeof(*bdev), GFP_KERNEL);
- if (!bdev)
- return -ENOMEM;
-
- bdev->dev = d;
- bdev->minor = minor;
- bdev->subdev = sdev;
- bdev->nchans = nchans;
- devpriv->nchans += nchans;
-
- /*
- * Now put bdev pointer at end of devpriv->devs array
- * list..
- */
-
- /* ergh.. ugly.. we need to realloc :( */
- devs = krealloc(devpriv->devs,
- (devpriv->ndevs + 1) * sizeof(*devs),
- GFP_KERNEL);
- if (!devs) {
- dev_err(dev->class_dev,
- "Could not allocate memory. Out of memory?\n");
- kfree(bdev);
- return -ENOMEM;
- }
- devpriv->devs = devs;
- devpriv->devs[devpriv->ndevs++] = bdev;
- {
- /* Append dev:subdev to devpriv->name */
- char buf[20];
-
- snprintf(buf, sizeof(buf), "%u:%u ",
- bdev->minor, bdev->subdev);
- strlcat(devpriv->name, buf,
- sizeof(devpriv->name));
- }
- }
- }
-
- if (!devpriv->nchans) {
- dev_err(dev->class_dev, "No channels found!\n");
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int bonding_attach(struct comedi_device *dev,
- struct comedi_devconfig *it)
-{
- struct comedi_bond_private *devpriv;
- struct comedi_subdevice *s;
- int ret;
-
- devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
- if (!devpriv)
- return -ENOMEM;
-
- /*
- * Setup our bonding from config params.. sets up our private struct..
- */
- ret = do_dev_config(dev, it);
- if (ret)
- return ret;
-
- dev->board_name = devpriv->name;
-
- ret = comedi_alloc_subdevices(dev, 1);
- if (ret)
- return ret;
-
- s = &dev->subdevices[0];
- s->type = COMEDI_SUBD_DIO;
- s->subdev_flags = SDF_READABLE | SDF_WRITABLE;
- s->n_chan = devpriv->nchans;
- s->maxdata = 1;
- s->range_table = &range_digital;
- s->insn_bits = bonding_dio_insn_bits;
- s->insn_config = bonding_dio_insn_config;
-
- dev_info(dev->class_dev,
- "%s: %s attached, %u channels from %u devices\n",
- dev->driver->driver_name, dev->board_name,
- devpriv->nchans, devpriv->ndevs);
-
- return 0;
-}
-
-static void bonding_detach(struct comedi_device *dev)
-{
- struct comedi_bond_private *devpriv = dev->private;
-
- if (devpriv && devpriv->devs) {
- DECLARE_BITMAP(devs_closed, COMEDI_NUM_BOARD_MINORS);
-
- memset(&devs_closed, 0, sizeof(devs_closed));
- while (devpriv->ndevs--) {
- struct bonded_device *bdev;
-
- bdev = devpriv->devs[devpriv->ndevs];
- if (!bdev)
- continue;
- if (!test_and_set_bit(bdev->minor, devs_closed))
- comedi_close(bdev->dev);
- kfree(bdev);
- }
- kfree(devpriv->devs);
- devpriv->devs = NULL;
- }
-}
-
-static struct comedi_driver bonding_driver = {
- .driver_name = "comedi_bond",
- .module = THIS_MODULE,
- .attach = bonding_attach,
- .detach = bonding_detach,
-};
-module_comedi_driver(bonding_driver);
-
-MODULE_AUTHOR("Calin A. Culianu");
-MODULE_DESCRIPTION("comedi_bond: A driver for COMEDI to bond multiple COMEDI devices together as one.");
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/comedi_isadma.c b/drivers/staging/comedi/drivers/comedi_isadma.c
deleted file mode 100644
index 6ba71d114a95..000000000000
--- a/drivers/staging/comedi/drivers/comedi_isadma.c
+++ /dev/null
@@ -1,264 +0,0 @@
-/*
- * COMEDI ISA DMA support functions
- * Copyright (c) 2014 H Hartley Sweeten <hsweeten@visionengravers.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/delay.h>
-#include <linux/dma-mapping.h>
-#include <asm/dma.h>
-
-#include "../comedidev.h"
-
-#include "comedi_isadma.h"
-
-/**
- * comedi_isadma_program - program and enable an ISA DMA transfer
- * @desc: the ISA DMA cookie to program and enable
- */
-void comedi_isadma_program(struct comedi_isadma_desc *desc)
-{
- unsigned long flags;
-
- flags = claim_dma_lock();
- clear_dma_ff(desc->chan);
- set_dma_mode(desc->chan, desc->mode);
- set_dma_addr(desc->chan, desc->hw_addr);
- set_dma_count(desc->chan, desc->size);
- enable_dma(desc->chan);
- release_dma_lock(flags);
-}
-EXPORT_SYMBOL_GPL(comedi_isadma_program);
-
-/**
- * comedi_isadma_disable - disable the ISA DMA channel
- * @dma_chan: the DMA channel to disable
- *
- * Returns the residue (remaining bytes) left in the DMA transfer.
- */
-unsigned int comedi_isadma_disable(unsigned int dma_chan)
-{
- unsigned long flags;
- unsigned int residue;
-
- flags = claim_dma_lock();
- disable_dma(dma_chan);
- residue = get_dma_residue(dma_chan);
- release_dma_lock(flags);
-
- return residue;
-}
-EXPORT_SYMBOL_GPL(comedi_isadma_disable);
-
-/**
- * comedi_isadma_disable_on_sample - disable the ISA DMA channel
- * @dma_chan: the DMA channel to disable
- * @size: the sample size (in bytes)
- *
- * Returns the residue (remaining bytes) left in the DMA transfer.
- */
-unsigned int comedi_isadma_disable_on_sample(unsigned int dma_chan,
- unsigned int size)
-{
- int stalled = 0;
- unsigned long flags;
- unsigned int residue;
- unsigned int new_residue;
-
- residue = comedi_isadma_disable(dma_chan);
- while (residue % size) {
- /* residue is a partial sample, enable DMA to allow more data */
- flags = claim_dma_lock();
- enable_dma(dma_chan);
- release_dma_lock(flags);
-
- udelay(2);
- new_residue = comedi_isadma_disable(dma_chan);
-
- /* is DMA stalled? */
- if (new_residue == residue) {
- stalled++;
- if (stalled > 10)
- break;
- } else {
- residue = new_residue;
- stalled = 0;
- }
- }
- return residue;
-}
-EXPORT_SYMBOL_GPL(comedi_isadma_disable_on_sample);
-
-/**
- * comedi_isadma_poll - poll the current DMA transfer
- * @dma: the ISA DMA to poll
- *
- * Returns the position (in bytes) of the current DMA transfer.
- */
-unsigned int comedi_isadma_poll(struct comedi_isadma *dma)
-{
- struct comedi_isadma_desc *desc = &dma->desc[dma->cur_dma];
- unsigned long flags;
- unsigned int result;
- unsigned int result1;
-
- flags = claim_dma_lock();
- clear_dma_ff(desc->chan);
- if (!isa_dma_bridge_buggy)
- disable_dma(desc->chan);
- result = get_dma_residue(desc->chan);
- /*
- * Read the counter again and choose higher value in order to
- * avoid reading during counter lower byte roll over if the
- * isa_dma_bridge_buggy is set.
- */
- result1 = get_dma_residue(desc->chan);
- if (!isa_dma_bridge_buggy)
- enable_dma(desc->chan);
- release_dma_lock(flags);
-
- if (result < result1)
- result = result1;
- if (result >= desc->size || result == 0)
- return 0;
- else
- return desc->size - result;
-}
-EXPORT_SYMBOL_GPL(comedi_isadma_poll);
-
-/**
- * comedi_isadma_set_mode - set the ISA DMA transfer direction
- * @desc: the ISA DMA cookie to set
- * @dma_dir: the DMA direction
- */
-void comedi_isadma_set_mode(struct comedi_isadma_desc *desc, char dma_dir)
-{
- desc->mode = (dma_dir == COMEDI_ISADMA_READ) ? DMA_MODE_READ
- : DMA_MODE_WRITE;
-}
-EXPORT_SYMBOL_GPL(comedi_isadma_set_mode);
-
-/**
- * comedi_isadma_alloc - allocate and initialize the ISA DMA
- * @dev: comedi_device struct
- * @n_desc: the number of cookies to allocate
- * @dma_chan: DMA channel for the first cookie
- * @dma_chan2: DMA channel for the second cookie
- * @maxsize: the size of the buffer to allocate for each cookie
- * @dma_dir: the DMA direction
- *
- * Returns the allocated and initialized ISA DMA or NULL if anything fails.
- */
-struct comedi_isadma *comedi_isadma_alloc(struct comedi_device *dev,
- int n_desc, unsigned int dma_chan1,
- unsigned int dma_chan2,
- unsigned int maxsize, char dma_dir)
-{
- struct comedi_isadma *dma = NULL;
- struct comedi_isadma_desc *desc;
- unsigned int dma_chans[2];
- int i;
-
- if (n_desc < 1 || n_desc > 2)
- goto no_dma;
-
- dma = kzalloc(sizeof(*dma), GFP_KERNEL);
- if (!dma)
- goto no_dma;
-
- desc = kcalloc(n_desc, sizeof(*desc), GFP_KERNEL);
- if (!desc)
- goto no_dma;
- dma->desc = desc;
- dma->n_desc = n_desc;
-
- dma_chans[0] = dma_chan1;
- if (dma_chan2 == 0 || dma_chan2 == dma_chan1)
- dma_chans[1] = dma_chan1;
- else
- dma_chans[1] = dma_chan2;
-
- if (request_dma(dma_chans[0], dev->board_name))
- goto no_dma;
- dma->chan = dma_chans[0];
- if (dma_chans[1] != dma_chans[0]) {
- if (request_dma(dma_chans[1], dev->board_name))
- goto no_dma;
- }
- dma->chan2 = dma_chans[1];
-
- for (i = 0; i < n_desc; i++) {
- desc = &dma->desc[i];
- desc->chan = dma_chans[i];
- desc->maxsize = maxsize;
- desc->virt_addr = dma_alloc_coherent(NULL, desc->maxsize,
- &desc->hw_addr,
- GFP_KERNEL);
- if (!desc->virt_addr)
- goto no_dma;
- comedi_isadma_set_mode(desc, dma_dir);
- }
-
- return dma;
-
-no_dma:
- comedi_isadma_free(dma);
- return NULL;
-}
-EXPORT_SYMBOL_GPL(comedi_isadma_alloc);
-
-/**
- * comedi_isadma_free - free the ISA DMA
- * @dma: the ISA DMA to free
- */
-void comedi_isadma_free(struct comedi_isadma *dma)
-{
- struct comedi_isadma_desc *desc;
- int i;
-
- if (!dma)
- return;
-
- if (dma->desc) {
- for (i = 0; i < dma->n_desc; i++) {
- desc = &dma->desc[i];
- if (desc->virt_addr)
- dma_free_coherent(NULL, desc->maxsize,
- desc->virt_addr,
- desc->hw_addr);
- }
- kfree(dma->desc);
- }
- if (dma->chan2 && dma->chan2 != dma->chan)
- free_dma(dma->chan2);
- if (dma->chan)
- free_dma(dma->chan);
- kfree(dma);
-}
-EXPORT_SYMBOL_GPL(comedi_isadma_free);
-
-static int __init comedi_isadma_init(void)
-{
- return 0;
-}
-module_init(comedi_isadma_init);
-
-static void __exit comedi_isadma_exit(void)
-{
-}
-module_exit(comedi_isadma_exit);
-
-MODULE_AUTHOR("H Hartley Sweeten <hsweeten@visionengravers.com>");
-MODULE_DESCRIPTION("Comedi ISA DMA support");
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/comedi_isadma.h b/drivers/staging/comedi/drivers/comedi_isadma.h
deleted file mode 100644
index 2fb6573ba9e4..000000000000
--- a/drivers/staging/comedi/drivers/comedi_isadma.h
+++ /dev/null
@@ -1,120 +0,0 @@
-/*
- * COMEDI ISA DMA support functions
- * Copyright (c) 2014 H Hartley Sweeten <hsweeten@visionengravers.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#ifndef _COMEDI_ISADMA_H
-#define _COMEDI_ISADMA_H
-
-#include <linux/types.h>
-
-struct comedi_device;
-
-/*
- * These are used to avoid issues when <asm/dma.h> and the DMA_MODE_
- * defines are not available.
- */
-#define COMEDI_ISADMA_READ 0
-#define COMEDI_ISADMA_WRITE 1
-
-/**
- * struct comedi_isadma_desc - cookie for ISA DMA
- * @virt_addr: virtual address of buffer
- * @hw_addr: hardware (bus) address of buffer
- * @chan: DMA channel
- * @maxsize: allocated size of buffer (in bytes)
- * @size: transfer size (in bytes)
- * @mode: DMA_MODE_READ or DMA_MODE_WRITE
- */
-struct comedi_isadma_desc {
- void *virt_addr;
- dma_addr_t hw_addr;
- unsigned int chan;
- unsigned int maxsize;
- unsigned int size;
- char mode;
-};
-
-/**
- * struct comedi_isadma - ISA DMA data
- * @desc: cookie for each DMA buffer
- * @n_desc: the number of cookies
- * @cur_dma: the current cookie in use
- * @chan: the first DMA channel requested
- * @chan2: the second DMA channel requested
- */
-struct comedi_isadma {
- struct comedi_isadma_desc *desc;
- int n_desc;
- int cur_dma;
- unsigned int chan;
- unsigned int chan2;
-};
-
-#if IS_ENABLED(CONFIG_ISA_DMA_API)
-
-void comedi_isadma_program(struct comedi_isadma_desc *);
-unsigned int comedi_isadma_disable(unsigned int dma_chan);
-unsigned int comedi_isadma_disable_on_sample(unsigned int dma_chan,
- unsigned int size);
-unsigned int comedi_isadma_poll(struct comedi_isadma *);
-void comedi_isadma_set_mode(struct comedi_isadma_desc *, char dma_dir);
-
-struct comedi_isadma *comedi_isadma_alloc(struct comedi_device *,
- int n_desc, unsigned int dma_chan1,
- unsigned int dma_chan2,
- unsigned int maxsize, char dma_dir);
-void comedi_isadma_free(struct comedi_isadma *);
-
-#else /* !IS_ENABLED(CONFIG_ISA_DMA_API) */
-
-static inline void comedi_isadma_program(struct comedi_isadma_desc *desc)
-{
-}
-
-static inline unsigned int comedi_isadma_disable(unsigned int dma_chan)
-{
- return 0;
-}
-
-static inline unsigned int
-comedi_isadma_disable_on_sample(unsigned int dma_chan, unsigned int size)
-{
- return 0;
-}
-
-static inline unsigned int comedi_isadma_poll(struct comedi_isadma *dma)
-{
- return 0;
-}
-
-static inline void comedi_isadma_set_mode(struct comedi_isadma_desc *desc,
- char dma_dir)
-{
-}
-
-static inline struct comedi_isadma *
-comedi_isadma_alloc(struct comedi_device *dev, int n_desc,
- unsigned int dma_chan1, unsigned int dma_chan2,
- unsigned int maxsize, char dma_dir)
-{
- return NULL;
-}
-
-static inline void comedi_isadma_free(struct comedi_isadma *dma)
-{
-}
-
-#endif /* !IS_ENABLED(CONFIG_ISA_DMA_API) */
-
-#endif /* #ifndef _COMEDI_ISADMA_H */
diff --git a/drivers/staging/comedi/drivers/comedi_parport.c b/drivers/staging/comedi/drivers/comedi_parport.c
deleted file mode 100644
index 15a4093efda1..000000000000
--- a/drivers/staging/comedi/drivers/comedi_parport.c
+++ /dev/null
@@ -1,314 +0,0 @@
-/*
- * comedi_parport.c
- * Comedi driver for standard parallel port
- *
- * For more information see:
- * http://retired.beyondlogic.org/spp/parallel.htm
- *
- * COMEDI - Linux Control and Measurement Device Interface
- * Copyright (C) 1998,2001 David A. Schleef <ds@schleef.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-/*
- * Driver: comedi_parport
- * Description: Standard PC parallel port
- * Author: ds
- * Status: works in immediate mode
- * Devices: [standard] parallel port (comedi_parport)
- * Updated: Tue, 30 Apr 2002 21:11:45 -0700
- *
- * A cheap and easy way to get a few more digital I/O lines. Steal
- * additional parallel ports from old computers or your neighbors'
- * computers.
- *
- * Option list:
- * 0: I/O port base for the parallel port.
- * 1: IRQ (optional)
- *
- * Parallel Port Lines:
- *
- * pin subdev chan type name
- * ----- ------ ---- ---- --------------
- * 1 2 0 DO strobe
- * 2 0 0 DIO data 0
- * 3 0 1 DIO data 1
- * 4 0 2 DIO data 2
- * 5 0 3 DIO data 3
- * 6 0 4 DIO data 4
- * 7 0 5 DIO data 5
- * 8 0 6 DIO data 6
- * 9 0 7 DIO data 7
- * 10 1 3 DI ack
- * 11 1 4 DI busy
- * 12 1 2 DI paper out
- * 13 1 1 DI select in
- * 14 2 1 DO auto LF
- * 15 1 0 DI error
- * 16 2 2 DO init
- * 17 2 3 DO select printer
- * 18-25 ground
- *
- * When an IRQ is configured subdevice 3 pretends to be a digital
- * input subdevice, but it always returns 0 when read. However, if
- * you run a command with scan_begin_src=TRIG_EXT, it uses pin 10
- * as a external trigger, which can be used to wake up tasks.
- */
-
-#include <linux/module.h>
-#include <linux/interrupt.h>
-
-#include "../comedidev.h"
-
-/*
- * Register map
- */
-#define PARPORT_DATA_REG 0x00
-#define PARPORT_STATUS_REG 0x01
-#define PARPORT_CTRL_REG 0x02
-#define PARPORT_CTRL_IRQ_ENA (1 << 4)
-#define PARPORT_CTRL_BIDIR_ENA (1 << 5)
-
-static int parport_data_reg_insn_bits(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- if (comedi_dio_update_state(s, data))
- outb(s->state, dev->iobase + PARPORT_DATA_REG);
-
- data[1] = inb(dev->iobase + PARPORT_DATA_REG);
-
- return insn->n;
-}
-
-static int parport_data_reg_insn_config(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- unsigned int ctrl;
- int ret;
-
- ret = comedi_dio_insn_config(dev, s, insn, data, 0xff);
- if (ret)
- return ret;
-
- ctrl = inb(dev->iobase + PARPORT_CTRL_REG);
- if (s->io_bits)
- ctrl &= ~PARPORT_CTRL_BIDIR_ENA;
- else
- ctrl |= PARPORT_CTRL_BIDIR_ENA;
- outb(ctrl, dev->iobase + PARPORT_CTRL_REG);
-
- return insn->n;
-}
-
-static int parport_status_reg_insn_bits(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- data[1] = inb(dev->iobase + PARPORT_STATUS_REG) >> 3;
-
- return insn->n;
-}
-
-static int parport_ctrl_reg_insn_bits(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- unsigned int ctrl;
-
- if (comedi_dio_update_state(s, data)) {
- ctrl = inb(dev->iobase + PARPORT_CTRL_REG);
- ctrl &= (PARPORT_CTRL_IRQ_ENA | PARPORT_CTRL_BIDIR_ENA);
- ctrl |= s->state;
- outb(ctrl, dev->iobase + PARPORT_CTRL_REG);
- }
-
- data[1] = s->state;
-
- return insn->n;
-}
-
-static int parport_intr_insn_bits(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- data[1] = 0;
- return insn->n;
-}
-
-static int parport_intr_cmdtest(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_cmd *cmd)
-{
- int err = 0;
-
- /* Step 1 : check if triggers are trivially valid */
-
- err |= comedi_check_trigger_src(&cmd->start_src, TRIG_NOW);
- err |= comedi_check_trigger_src(&cmd->scan_begin_src, TRIG_EXT);
- err |= comedi_check_trigger_src(&cmd->convert_src, TRIG_FOLLOW);
- err |= comedi_check_trigger_src(&cmd->scan_end_src, TRIG_COUNT);
- err |= comedi_check_trigger_src(&cmd->stop_src, TRIG_NONE);
-
- if (err)
- return 1;
-
- /* Step 2a : make sure trigger sources are unique */
- /* Step 2b : and mutually compatible */
-
- /* Step 3: check if arguments are trivially valid */
-
- err |= comedi_check_trigger_arg_is(&cmd->start_arg, 0);
- err |= comedi_check_trigger_arg_is(&cmd->scan_begin_arg, 0);
- err |= comedi_check_trigger_arg_is(&cmd->convert_arg, 0);
- err |= comedi_check_trigger_arg_is(&cmd->scan_end_arg,
- cmd->chanlist_len);
- err |= comedi_check_trigger_arg_is(&cmd->stop_arg, 0);
-
- if (err)
- return 3;
-
- /* Step 4: fix up any arguments */
-
- /* Step 5: check channel list if it exists */
-
- return 0;
-}
-
-static int parport_intr_cmd(struct comedi_device *dev,
- struct comedi_subdevice *s)
-{
- unsigned int ctrl;
-
- ctrl = inb(dev->iobase + PARPORT_CTRL_REG);
- ctrl |= PARPORT_CTRL_IRQ_ENA;
- outb(ctrl, dev->iobase + PARPORT_CTRL_REG);
-
- return 0;
-}
-
-static int parport_intr_cancel(struct comedi_device *dev,
- struct comedi_subdevice *s)
-{
- unsigned int ctrl;
-
- ctrl = inb(dev->iobase + PARPORT_CTRL_REG);
- ctrl &= ~PARPORT_CTRL_IRQ_ENA;
- outb(ctrl, dev->iobase + PARPORT_CTRL_REG);
-
- return 0;
-}
-
-static irqreturn_t parport_interrupt(int irq, void *d)
-{
- struct comedi_device *dev = d;
- struct comedi_subdevice *s = dev->read_subdev;
- unsigned int ctrl;
-
- ctrl = inb(dev->iobase + PARPORT_CTRL_REG);
- if (!(ctrl & PARPORT_CTRL_IRQ_ENA))
- return IRQ_NONE;
-
- comedi_buf_write_samples(s, &s->state, 1);
- comedi_handle_events(dev, s);
-
- return IRQ_HANDLED;
-}
-
-static int parport_attach(struct comedi_device *dev,
- struct comedi_devconfig *it)
-{
- struct comedi_subdevice *s;
- int ret;
-
- ret = comedi_request_region(dev, it->options[0], 0x03);
- if (ret)
- return ret;
-
- if (it->options[1]) {
- ret = request_irq(it->options[1], parport_interrupt, 0,
- dev->board_name, dev);
- if (ret == 0)
- dev->irq = it->options[1];
- }
-
- ret = comedi_alloc_subdevices(dev, dev->irq ? 4 : 3);
- if (ret)
- return ret;
-
- /* Digial I/O subdevice - Parallel port DATA register */
- s = &dev->subdevices[0];
- s->type = COMEDI_SUBD_DIO;
- s->subdev_flags = SDF_READABLE | SDF_WRITABLE;
- s->n_chan = 8;
- s->maxdata = 1;
- s->range_table = &range_digital;
- s->insn_bits = parport_data_reg_insn_bits;
- s->insn_config = parport_data_reg_insn_config;
-
- /* Digial Input subdevice - Parallel port STATUS register */
- s = &dev->subdevices[1];
- s->type = COMEDI_SUBD_DI;
- s->subdev_flags = SDF_READABLE;
- s->n_chan = 5;
- s->maxdata = 1;
- s->range_table = &range_digital;
- s->insn_bits = parport_status_reg_insn_bits;
-
- /* Digial Output subdevice - Parallel port CONTROL register */
- s = &dev->subdevices[2];
- s->type = COMEDI_SUBD_DO;
- s->subdev_flags = SDF_WRITABLE;
- s->n_chan = 4;
- s->maxdata = 1;
- s->range_table = &range_digital;
- s->insn_bits = parport_ctrl_reg_insn_bits;
-
- if (dev->irq) {
- /* Digial Input subdevice - Interrupt support */
- s = &dev->subdevices[3];
- dev->read_subdev = s;
- s->type = COMEDI_SUBD_DI;
- s->subdev_flags = SDF_READABLE | SDF_CMD_READ;
- s->n_chan = 1;
- s->maxdata = 1;
- s->range_table = &range_digital;
- s->insn_bits = parport_intr_insn_bits;
- s->len_chanlist = 1;
- s->do_cmdtest = parport_intr_cmdtest;
- s->do_cmd = parport_intr_cmd;
- s->cancel = parport_intr_cancel;
- }
-
- outb(0, dev->iobase + PARPORT_DATA_REG);
- outb(0, dev->iobase + PARPORT_CTRL_REG);
-
- return 0;
-}
-
-static struct comedi_driver parport_driver = {
- .driver_name = "comedi_parport",
- .module = THIS_MODULE,
- .attach = parport_attach,
- .detach = comedi_legacy_detach,
-};
-module_comedi_driver(parport_driver);
-
-MODULE_AUTHOR("Comedi http://www.comedi.org");
-MODULE_DESCRIPTION("Comedi: Standard parallel port driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/comedi_test.c b/drivers/staging/comedi/drivers/comedi_test.c
deleted file mode 100644
index 80d613c0fbc6..000000000000
--- a/drivers/staging/comedi/drivers/comedi_test.c
+++ /dev/null
@@ -1,456 +0,0 @@
-/*
- comedi/drivers/comedi_test.c
-
- Generates fake waveform signals that can be read through
- the command interface. It does _not_ read from any board;
- it just generates deterministic waveforms.
- Useful for various testing purposes.
-
- Copyright (C) 2002 Joachim Wuttke <Joachim.Wuttke@icn.siemens.de>
- Copyright (C) 2002 Frank Mori Hess <fmhess@users.sourceforge.net>
-
- COMEDI - Linux Control and Measurement Device Interface
- Copyright (C) 2000 David A. Schleef <ds@schleef.org>
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-*/
-/*
-Driver: comedi_test
-Description: generates fake waveforms
-Author: Joachim Wuttke <Joachim.Wuttke@icn.siemens.de>, Frank Mori Hess
- <fmhess@users.sourceforge.net>, ds
-Devices:
-Status: works
-Updated: Sat, 16 Mar 2002 17:34:48 -0800
-
-This driver is mainly for testing purposes, but can also be used to
-generate sample waveforms on systems that don't have data acquisition
-hardware.
-
-Configuration options:
- [0] - Amplitude in microvolts for fake waveforms (default 1 volt)
- [1] - Period in microseconds for fake waveforms (default 0.1 sec)
-
-Generates a sawtooth wave on channel 0, square wave on channel 1, additional
-waveforms could be added to other channels (currently they return flatline
-zero volts).
-
-*/
-
-#include <linux/module.h>
-#include "../comedidev.h"
-
-#include <asm/div64.h>
-
-#include <linux/timer.h>
-#include <linux/ktime.h>
-
-#define N_CHANS 8
-
-enum waveform_state_bits {
- WAVEFORM_AI_RUNNING = 0
-};
-
-/* Data unique to this driver */
-struct waveform_private {
- struct timer_list timer;
- ktime_t last; /* time last timer interrupt occurred */
- unsigned int uvolt_amplitude; /* waveform amplitude in microvolts */
- unsigned long usec_period; /* waveform period in microseconds */
- unsigned long usec_current; /* current time (mod waveform period) */
- unsigned long usec_remainder; /* usec since last scan */
- unsigned long state_bits;
- unsigned int scan_period; /* scan period in usec */
- unsigned int convert_period; /* conversion period in usec */
- unsigned int ao_loopbacks[N_CHANS];
-};
-
-/* 1000 nanosec in a microsec */
-static const int nano_per_micro = 1000;
-
-/* fake analog input ranges */
-static const struct comedi_lrange waveform_ai_ranges = {
- 2, {
- BIP_RANGE(10),
- BIP_RANGE(5)
- }
-};
-
-static unsigned short fake_sawtooth(struct comedi_device *dev,
- unsigned int range_index,
- unsigned long current_time)
-{
- struct waveform_private *devpriv = dev->private;
- struct comedi_subdevice *s = dev->read_subdev;
- unsigned int offset = s->maxdata / 2;
- u64 value;
- const struct comedi_krange *krange =
- &s->range_table->range[range_index];
- u64 binary_amplitude;
-
- binary_amplitude = s->maxdata;
- binary_amplitude *= devpriv->uvolt_amplitude;
- do_div(binary_amplitude, krange->max - krange->min);
-
- current_time %= devpriv->usec_period;
- value = current_time;
- value *= binary_amplitude * 2;
- do_div(value, devpriv->usec_period);
- value -= binary_amplitude; /* get rid of sawtooth's dc offset */
-
- return offset + value;
-}
-
-static unsigned short fake_squarewave(struct comedi_device *dev,
- unsigned int range_index,
- unsigned long current_time)
-{
- struct waveform_private *devpriv = dev->private;
- struct comedi_subdevice *s = dev->read_subdev;
- unsigned int offset = s->maxdata / 2;
- u64 value;
- const struct comedi_krange *krange =
- &s->range_table->range[range_index];
- current_time %= devpriv->usec_period;
-
- value = s->maxdata;
- value *= devpriv->uvolt_amplitude;
- do_div(value, krange->max - krange->min);
-
- if (current_time < devpriv->usec_period / 2)
- value *= -1;
-
- return offset + value;
-}
-
-static unsigned short fake_flatline(struct comedi_device *dev,
- unsigned int range_index,
- unsigned long current_time)
-{
- return dev->read_subdev->maxdata / 2;
-}
-
-/* generates a different waveform depending on what channel is read */
-static unsigned short fake_waveform(struct comedi_device *dev,
- unsigned int channel, unsigned int range,
- unsigned long current_time)
-{
- enum {
- SAWTOOTH_CHAN,
- SQUARE_CHAN,
- };
- switch (channel) {
- case SAWTOOTH_CHAN:
- return fake_sawtooth(dev, range, current_time);
- case SQUARE_CHAN:
- return fake_squarewave(dev, range, current_time);
- default:
- break;
- }
-
- return fake_flatline(dev, range, current_time);
-}
-
-/*
- This is the background routine used to generate arbitrary data.
- It should run in the background; therefore it is scheduled by
- a timer mechanism.
-*/
-static void waveform_ai_interrupt(unsigned long arg)
-{
- struct comedi_device *dev = (struct comedi_device *)arg;
- struct waveform_private *devpriv = dev->private;
- struct comedi_subdevice *s = dev->read_subdev;
- struct comedi_async *async = s->async;
- struct comedi_cmd *cmd = &async->cmd;
- unsigned int i, j;
- /* all times in microsec */
- unsigned long elapsed_time;
- unsigned int num_scans;
- ktime_t now;
-
- /* check command is still active */
- if (!test_bit(WAVEFORM_AI_RUNNING, &devpriv->state_bits))
- return;
-
- now = ktime_get();
-
- elapsed_time = ktime_to_us(ktime_sub(now, devpriv->last));
- devpriv->last = now;
- num_scans =
- (devpriv->usec_remainder + elapsed_time) / devpriv->scan_period;
- devpriv->usec_remainder =
- (devpriv->usec_remainder + elapsed_time) % devpriv->scan_period;
-
- num_scans = comedi_nscans_left(s, num_scans);
- for (i = 0; i < num_scans; i++) {
- for (j = 0; j < cmd->chanlist_len; j++) {
- unsigned short sample;
-
- sample = fake_waveform(dev, CR_CHAN(cmd->chanlist[j]),
- CR_RANGE(cmd->chanlist[j]),
- devpriv->usec_current +
- i * devpriv->scan_period +
- j * devpriv->convert_period);
- comedi_buf_write_samples(s, &sample, 1);
- }
- }
-
- devpriv->usec_current += elapsed_time;
- devpriv->usec_current %= devpriv->usec_period;
-
- if (cmd->stop_src == TRIG_COUNT && async->scans_done >= cmd->stop_arg)
- async->events |= COMEDI_CB_EOA;
- else
- mod_timer(&devpriv->timer, jiffies + 1);
-
- comedi_handle_events(dev, s);
-}
-
-static int waveform_ai_cmdtest(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_cmd *cmd)
-{
- int err = 0;
- unsigned int arg;
-
- /* Step 1 : check if triggers are trivially valid */
-
- err |= comedi_check_trigger_src(&cmd->start_src, TRIG_NOW);
- err |= comedi_check_trigger_src(&cmd->scan_begin_src, TRIG_TIMER);
- err |= comedi_check_trigger_src(&cmd->convert_src,
- TRIG_NOW | TRIG_TIMER);
- err |= comedi_check_trigger_src(&cmd->scan_end_src, TRIG_COUNT);
- err |= comedi_check_trigger_src(&cmd->stop_src, TRIG_COUNT | TRIG_NONE);
-
- if (err)
- return 1;
-
- /* Step 2a : make sure trigger sources are unique */
-
- err |= comedi_check_trigger_is_unique(cmd->convert_src);
- err |= comedi_check_trigger_is_unique(cmd->stop_src);
-
- /* Step 2b : and mutually compatible */
-
- if (err)
- return 2;
-
- /* Step 3: check if arguments are trivially valid */
-
- err |= comedi_check_trigger_arg_is(&cmd->start_arg, 0);
-
- if (cmd->convert_src == TRIG_NOW)
- err |= comedi_check_trigger_arg_is(&cmd->convert_arg, 0);
-
- if (cmd->scan_begin_src == TRIG_TIMER) {
- err |= comedi_check_trigger_arg_min(&cmd->scan_begin_arg,
- nano_per_micro);
- if (cmd->convert_src == TRIG_TIMER) {
- err |= comedi_check_trigger_arg_min(&cmd->
- scan_begin_arg,
- cmd->convert_arg *
- cmd->chanlist_len);
- }
- }
-
- err |= comedi_check_trigger_arg_min(&cmd->chanlist_len, 1);
- err |= comedi_check_trigger_arg_is(&cmd->scan_end_arg,
- cmd->chanlist_len);
-
- if (cmd->stop_src == TRIG_COUNT)
- err |= comedi_check_trigger_arg_min(&cmd->stop_arg, 1);
- else /* TRIG_NONE */
- err |= comedi_check_trigger_arg_is(&cmd->stop_arg, 0);
-
- if (err)
- return 3;
-
- /* step 4: fix up any arguments */
-
- if (cmd->scan_begin_src == TRIG_TIMER) {
- arg = cmd->scan_begin_arg;
- /* round to nearest microsec */
- arg = nano_per_micro *
- ((arg + (nano_per_micro / 2)) / nano_per_micro);
- err |= comedi_check_trigger_arg_is(&cmd->scan_begin_arg, arg);
- }
- if (cmd->convert_src == TRIG_TIMER) {
- arg = cmd->convert_arg;
- /* round to nearest microsec */
- arg = nano_per_micro *
- ((arg + (nano_per_micro / 2)) / nano_per_micro);
- err |= comedi_check_trigger_arg_is(&cmd->convert_arg, arg);
- }
-
- if (err)
- return 4;
-
- return 0;
-}
-
-static int waveform_ai_cmd(struct comedi_device *dev,
- struct comedi_subdevice *s)
-{
- struct waveform_private *devpriv = dev->private;
- struct comedi_cmd *cmd = &s->async->cmd;
-
- if (cmd->flags & CMDF_PRIORITY) {
- dev_err(dev->class_dev,
- "commands at RT priority not supported in this driver\n");
- return -1;
- }
-
- devpriv->scan_period = cmd->scan_begin_arg / nano_per_micro;
-
- if (cmd->convert_src == TRIG_NOW)
- devpriv->convert_period = 0;
- else /* TRIG_TIMER */
- devpriv->convert_period = cmd->convert_arg / nano_per_micro;
-
- devpriv->last = ktime_get();
- devpriv->usec_current =
- ((u32)ktime_to_us(devpriv->last)) % devpriv->usec_period;
- devpriv->usec_remainder = 0;
-
- devpriv->timer.expires = jiffies + 1;
- /* mark command as active */
- smp_mb__before_atomic();
- set_bit(WAVEFORM_AI_RUNNING, &devpriv->state_bits);
- smp_mb__after_atomic();
- add_timer(&devpriv->timer);
- return 0;
-}
-
-static int waveform_ai_cancel(struct comedi_device *dev,
- struct comedi_subdevice *s)
-{
- struct waveform_private *devpriv = dev->private;
-
- /* mark command as no longer active */
- clear_bit(WAVEFORM_AI_RUNNING, &devpriv->state_bits);
- smp_mb__after_atomic();
- /* cannot call del_timer_sync() as may be called from timer routine */
- del_timer(&devpriv->timer);
- return 0;
-}
-
-static int waveform_ai_insn_read(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
-{
- struct waveform_private *devpriv = dev->private;
- int i, chan = CR_CHAN(insn->chanspec);
-
- for (i = 0; i < insn->n; i++)
- data[i] = devpriv->ao_loopbacks[chan];
-
- return insn->n;
-}
-
-static int waveform_ao_insn_write(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
-{
- struct waveform_private *devpriv = dev->private;
- int i, chan = CR_CHAN(insn->chanspec);
-
- for (i = 0; i < insn->n; i++)
- devpriv->ao_loopbacks[chan] = data[i];
-
- return insn->n;
-}
-
-static int waveform_attach(struct comedi_device *dev,
- struct comedi_devconfig *it)
-{
- struct waveform_private *devpriv;
- struct comedi_subdevice *s;
- int amplitude = it->options[0];
- int period = it->options[1];
- int i;
- int ret;
-
- devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
- if (!devpriv)
- return -ENOMEM;
-
- /* set default amplitude and period */
- if (amplitude <= 0)
- amplitude = 1000000; /* 1 volt */
- if (period <= 0)
- period = 100000; /* 0.1 sec */
-
- devpriv->uvolt_amplitude = amplitude;
- devpriv->usec_period = period;
-
- ret = comedi_alloc_subdevices(dev, 2);
- if (ret)
- return ret;
-
- s = &dev->subdevices[0];
- dev->read_subdev = s;
- /* analog input subdevice */
- s->type = COMEDI_SUBD_AI;
- s->subdev_flags = SDF_READABLE | SDF_GROUND | SDF_CMD_READ;
- s->n_chan = N_CHANS;
- s->maxdata = 0xffff;
- s->range_table = &waveform_ai_ranges;
- s->len_chanlist = s->n_chan * 2;
- s->insn_read = waveform_ai_insn_read;
- s->do_cmd = waveform_ai_cmd;
- s->do_cmdtest = waveform_ai_cmdtest;
- s->cancel = waveform_ai_cancel;
-
- s = &dev->subdevices[1];
- dev->write_subdev = s;
- /* analog output subdevice (loopback) */
- s->type = COMEDI_SUBD_AO;
- s->subdev_flags = SDF_WRITABLE | SDF_GROUND;
- s->n_chan = N_CHANS;
- s->maxdata = 0xffff;
- s->range_table = &waveform_ai_ranges;
- s->insn_write = waveform_ao_insn_write;
-
- /* Our default loopback value is just a 0V flatline */
- for (i = 0; i < s->n_chan; i++)
- devpriv->ao_loopbacks[i] = s->maxdata / 2;
-
- setup_timer(&devpriv->timer, waveform_ai_interrupt,
- (unsigned long)dev);
-
- dev_info(dev->class_dev,
- "%s: %i microvolt, %li microsecond waveform attached\n",
- dev->board_name,
- devpriv->uvolt_amplitude, devpriv->usec_period);
-
- return 0;
-}
-
-static void waveform_detach(struct comedi_device *dev)
-{
- struct waveform_private *devpriv = dev->private;
-
- if (devpriv)
- del_timer_sync(&devpriv->timer);
-}
-
-static struct comedi_driver waveform_driver = {
- .driver_name = "comedi_test",
- .module = THIS_MODULE,
- .attach = waveform_attach,
- .detach = waveform_detach,
-};
-module_comedi_driver(waveform_driver);
-
-MODULE_AUTHOR("Comedi http://www.comedi.org");
-MODULE_DESCRIPTION("Comedi low-level driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/contec_pci_dio.c b/drivers/staging/comedi/drivers/contec_pci_dio.c
deleted file mode 100644
index 4956a49a6140..000000000000
--- a/drivers/staging/comedi/drivers/contec_pci_dio.c
+++ /dev/null
@@ -1,125 +0,0 @@
-/*
- comedi/drivers/contec_pci_dio.c
-
- COMEDI - Linux Control and Measurement Device Interface
- Copyright (C) 2000 David A. Schleef <ds@schleef.org>
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-*/
-/*
-Driver: contec_pci_dio
-Description: Contec PIO1616L digital I/O board
-Devices: [Contec] PIO1616L (contec_pci_dio)
-Author: Stefano Rivoir <s.rivoir@gts.it>
-Updated: Wed, 27 Jun 2007 13:00:06 +0100
-Status: works
-
-Configuration Options: not applicable, uses comedi PCI auto config
-*/
-
-#include <linux/module.h>
-
-#include "../comedi_pci.h"
-
-/*
- * Register map
- */
-#define PIO1616L_DI_REG 0x00
-#define PIO1616L_DO_REG 0x02
-
-static int contec_do_insn_bits(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- if (comedi_dio_update_state(s, data))
- outw(s->state, dev->iobase + PIO1616L_DO_REG);
-
- data[1] = s->state;
-
- return insn->n;
-}
-
-static int contec_di_insn_bits(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
-{
- data[1] = inw(dev->iobase + PIO1616L_DI_REG);
-
- return insn->n;
-}
-
-static int contec_auto_attach(struct comedi_device *dev,
- unsigned long context_unused)
-{
- struct pci_dev *pcidev = comedi_to_pci_dev(dev);
- struct comedi_subdevice *s;
- int ret;
-
- ret = comedi_pci_enable(dev);
- if (ret)
- return ret;
- dev->iobase = pci_resource_start(pcidev, 0);
-
- ret = comedi_alloc_subdevices(dev, 2);
- if (ret)
- return ret;
-
- s = &dev->subdevices[0];
- s->type = COMEDI_SUBD_DI;
- s->subdev_flags = SDF_READABLE;
- s->n_chan = 16;
- s->maxdata = 1;
- s->range_table = &range_digital;
- s->insn_bits = contec_di_insn_bits;
-
- s = &dev->subdevices[1];
- s->type = COMEDI_SUBD_DO;
- s->subdev_flags = SDF_WRITABLE;
- s->n_chan = 16;
- s->maxdata = 1;
- s->range_table = &range_digital;
- s->insn_bits = contec_do_insn_bits;
-
- return 0;
-}
-
-static struct comedi_driver contec_pci_dio_driver = {
- .driver_name = "contec_pci_dio",
- .module = THIS_MODULE,
- .auto_attach = contec_auto_attach,
- .detach = comedi_pci_detach,
-};
-
-static int contec_pci_dio_pci_probe(struct pci_dev *dev,
- const struct pci_device_id *id)
-{
- return comedi_pci_auto_config(dev, &contec_pci_dio_driver,
- id->driver_data);
-}
-
-static const struct pci_device_id contec_pci_dio_pci_table[] = {
- { PCI_DEVICE(PCI_VENDOR_ID_CONTEC, 0x8172) },
- { 0 }
-};
-MODULE_DEVICE_TABLE(pci, contec_pci_dio_pci_table);
-
-static struct pci_driver contec_pci_dio_pci_driver = {
- .name = "contec_pci_dio",
- .id_table = contec_pci_dio_pci_table,
- .probe = contec_pci_dio_pci_probe,
- .remove = comedi_pci_auto_unconfig,
-};
-module_comedi_pci_driver(contec_pci_dio_driver, contec_pci_dio_pci_driver);
-
-MODULE_AUTHOR("Comedi http://www.comedi.org");
-MODULE_DESCRIPTION("Comedi low-level driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/dac02.c b/drivers/staging/comedi/drivers/dac02.c
deleted file mode 100644
index a562df498b01..000000000000
--- a/drivers/staging/comedi/drivers/dac02.c
+++ /dev/null
@@ -1,146 +0,0 @@
-/*
- * dac02.c
- * Comedi driver for DAC02 compatible boards
- * Copyright (C) 2014 H Hartley Sweeten <hsweeten@visionengravers.com>
- *
- * Based on the poc driver
- * Copyright (C) 2000 Frank Mori Hess <fmhess@users.sourceforge.net>
- * Copyright (C) 2001 David A. Schleef <ds@schleef.org>
- *
- * COMEDI - Linux Control and Measurement Device Interface
- * Copyright (C) 1998 David A. Schleef <ds@schleef.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-/*
- * Driver: dac02
- * Description: Comedi driver for DAC02 compatible boards
- * Devices: [Keithley Metrabyte] DAC-02 (dac02)
- * Author: H Hartley Sweeten <hsweeten@visionengravers.com>
- * Updated: Tue, 11 Mar 2014 11:27:19 -0700
- * Status: unknown
- *
- * Configuration options:
- * [0] - I/O port base
- */
-
-#include <linux/module.h>
-
-#include "../comedidev.h"
-
-/*
- * The output range is selected by jumpering pins on the I/O connector.
- *
- * Range Chan # Jumper pins Output
- * ------------- ------ ------------- -----------------
- * 0 to 5V 0 21 to 22 24
- * 1 15 to 16 18
- * 0 to 10V 0 20 to 22 24
- * 1 14 to 16 18
- * +/-5V 0 21 to 22 23
- * 1 15 to 16 17
- * +/-10V 0 20 to 22 23
- * 1 14 to 16 17
- * 4 to 20mA 0 21 to 22 25
- * 1 15 to 16 19
- * AC reference 0 In on pin 22 24 (2-quadrant)
- * In on pin 22 23 (4-quadrant)
- * 1 In on pin 16 18 (2-quadrant)
- * In on pin 16 17 (4-quadrant)
- */
-static const struct comedi_lrange das02_ao_ranges = {
- 6, {
- UNI_RANGE(5),
- UNI_RANGE(10),
- BIP_RANGE(5),
- BIP_RANGE(10),
- RANGE_mA(4, 20),
- RANGE_ext(0, 1)
- }
-};
-
-/*
- * Register I/O map
- */
-#define DAC02_AO_LSB(x) (0x00 + ((x) * 2))
-#define DAC02_AO_MSB(x) (0x01 + ((x) * 2))
-
-static int dac02_ao_insn_write(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- unsigned int chan = CR_CHAN(insn->chanspec);
- unsigned int range = CR_RANGE(insn->chanspec);
- unsigned int val;
- int i;
-
- for (i = 0; i < insn->n; i++) {
- val = data[i];
-
- s->readback[chan] = val;
-
- /*
- * Unipolar outputs are true binary encoding.
- * Bipolar outputs are complementary offset binary
- * (that is, 0 = +full scale, maxdata = -full scale).
- */
- if (comedi_range_is_bipolar(s, range))
- val = s->maxdata - val;
-
- /*
- * DACs are double-buffered.
- * Write LSB then MSB to latch output.
- */
- outb((val << 4) & 0xf0, dev->iobase + DAC02_AO_LSB(chan));
- outb((val >> 4) & 0xff, dev->iobase + DAC02_AO_MSB(chan));
- }
-
- return insn->n;
-}
-
-static int dac02_attach(struct comedi_device *dev, struct comedi_devconfig *it)
-{
- struct comedi_subdevice *s;
- int ret;
-
- ret = comedi_request_region(dev, it->options[0], 0x08);
- if (ret)
- return ret;
-
- ret = comedi_alloc_subdevices(dev, 1);
- if (ret)
- return ret;
-
- /* Analog Output subdevice */
- s = &dev->subdevices[0];
- s->type = COMEDI_SUBD_AO;
- s->subdev_flags = SDF_WRITABLE;
- s->n_chan = 2;
- s->maxdata = 0x0fff;
- s->range_table = &das02_ao_ranges;
- s->insn_write = dac02_ao_insn_write;
-
- return comedi_alloc_subdev_readback(s);
-}
-
-static struct comedi_driver dac02_driver = {
- .driver_name = "dac02",
- .module = THIS_MODULE,
- .attach = dac02_attach,
- .detach = comedi_legacy_detach,
-};
-module_comedi_driver(dac02_driver);
-
-MODULE_AUTHOR("H Hartley Sweeten <hsweeten@visionengravers.com>");
-MODULE_DESCRIPTION("Comedi driver for DAC02 compatible boards");
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/daqboard2000.c b/drivers/staging/comedi/drivers/daqboard2000.c
deleted file mode 100644
index 611b0a3ef5d7..000000000000
--- a/drivers/staging/comedi/drivers/daqboard2000.c
+++ /dev/null
@@ -1,764 +0,0 @@
-/*
- * comedi/drivers/daqboard2000.c
- * hardware driver for IOtech DAQboard/2000
- *
- * COMEDI - Linux Control and Measurement Device Interface
- * Copyright (C) 1999 Anders Blomdell <anders.blomdell@control.lth.se>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-/*
- * Driver: daqboard2000
- * Description: IOTech DAQBoard/2000
- * Author: Anders Blomdell <anders.blomdell@control.lth.se>
- * Status: works
- * Updated: Mon, 14 Apr 2008 15:28:52 +0100
- * Devices: [IOTech] DAQBoard/2000 (daqboard2000)
- *
- * Much of the functionality of this driver was determined from reading
- * the source code for the Windows driver.
- *
- * The FPGA on the board requires fimware, which is available from
- * http://www.comedi.org in the comedi_nonfree_firmware tarball.
- *
- * Configuration options: not applicable, uses PCI auto config
- */
-/*
- * This card was obviously never intended to leave the Windows world,
- * since it lacked all kind of hardware documentation (except for cable
- * pinouts, plug and pray has something to catch up with yet).
- *
- * With some help from our swedish distributor, we got the Windows sourcecode
- * for the card, and here are the findings so far.
- *
- * 1. A good document that describes the PCI interface chip is 9080db-106.pdf
- * available from http://www.plxtech.com/products/io/pci9080
- *
- * 2. The initialization done so far is:
- * a. program the FPGA (windows code sans a lot of error messages)
- * b.
- *
- * 3. Analog out seems to work OK with DAC's disabled, if DAC's are enabled,
- * you have to output values to all enabled DAC's until result appears, I
- * guess that it has something to do with pacer clocks, but the source
- * gives me no clues. I'll keep it simple so far.
- *
- * 4. Analog in.
- * Each channel in the scanlist seems to be controlled by four
- * control words:
- *
- * Word0:
- * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- * ! | | | ! | | | ! | | | ! | | | !
- * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- *
- * Word1:
- * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- * ! | | | ! | | | ! | | | ! | | | !
- * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- * | | | | | | |
- * +------+------+ | | | | +-- Digital input (??)
- * | | | | +---- 10 us settling time
- * | | | +------ Suspend acquisition (last to scan)
- * | | +-------- Simultaneous sample and hold
- * | +---------- Signed data format
- * +------------------------- Correction offset low
- *
- * Word2:
- * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- * ! | | | ! | | | ! | | | ! | | | !
- * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- * | | | | | | | | | |
- * +-----+ +--+--+ +++ +++ +--+--+
- * | | | | +----- Expansion channel
- * | | | +----------- Expansion gain
- * | | +--------------- Channel (low)
- * | +--------------------- Correction offset high
- * +----------------------------- Correction gain low
- * Word3:
- * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- * ! | | | ! | | | ! | | | ! | | | !
- * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- * | | | | | | | | |
- * +------+------+ | | +-+-+ | | +-- Low bank enable
- * | | | | | +---- High bank enable
- * | | | | +------ Hi/low select
- * | | | +---------- Gain (1,?,2,4,8,16,32,64)
- * | | +-------------- differential/single ended
- * | +---------------- Unipolar
- * +------------------------- Correction gain high
- *
- * 999. The card seems to have an incredible amount of capabilities, but
- * trying to reverse engineer them from the Windows source is beyond my
- * patience.
- *
- */
-
-#include <linux/module.h>
-#include <linux/delay.h>
-#include <linux/interrupt.h>
-
-#include "../comedi_pci.h"
-
-#include "8255.h"
-
-#define DAQBOARD2000_FIRMWARE "daqboard2000_firmware.bin"
-
-#define DAQBOARD2000_SUBSYSTEM_IDS2 0x0002 /* Daqboard/2000 - 2 Dacs */
-#define DAQBOARD2000_SUBSYSTEM_IDS4 0x0004 /* Daqboard/2000 - 4 Dacs */
-
-/* Initialization bits for the Serial EEPROM Control Register */
-#define DAQBOARD2000_SECRProgPinHi 0x8001767e
-#define DAQBOARD2000_SECRProgPinLo 0x8000767e
-#define DAQBOARD2000_SECRLocalBusHi 0xc000767e
-#define DAQBOARD2000_SECRLocalBusLo 0x8000767e
-#define DAQBOARD2000_SECRReloadHi 0xa000767e
-#define DAQBOARD2000_SECRReloadLo 0x8000767e
-
-/* SECR status bits */
-#define DAQBOARD2000_EEPROM_PRESENT 0x10000000
-
-/* CPLD status bits */
-#define DAQBOARD2000_CPLD_INIT 0x0002
-#define DAQBOARD2000_CPLD_DONE 0x0004
-
-static const struct comedi_lrange range_daqboard2000_ai = {
- 13, {
- BIP_RANGE(10),
- BIP_RANGE(5),
- BIP_RANGE(2.5),
- BIP_RANGE(1.25),
- BIP_RANGE(0.625),
- BIP_RANGE(0.3125),
- BIP_RANGE(0.156),
- UNI_RANGE(10),
- UNI_RANGE(5),
- UNI_RANGE(2.5),
- UNI_RANGE(1.25),
- UNI_RANGE(0.625),
- UNI_RANGE(0.3125)
- }
-};
-
-/*
- * Register Memory Map
- */
-#define acqControl 0x00 /* u16 */
-#define acqScanListFIFO 0x02 /* u16 */
-#define acqPacerClockDivLow 0x04 /* u32 */
-#define acqScanCounter 0x08 /* u16 */
-#define acqPacerClockDivHigh 0x0a /* u16 */
-#define acqTriggerCount 0x0c /* u16 */
-#define acqResultsFIFO 0x10 /* u16 */
-#define acqResultsShadow 0x14 /* u16 */
-#define acqAdcResult 0x18 /* u16 */
-#define dacScanCounter 0x1c /* u16 */
-#define dacControl 0x20 /* u16 */
-#define dacFIFO 0x24 /* s16 */
-#define dacPacerClockDiv 0x2a /* u16 */
-#define refDacs 0x2c /* u16 */
-#define dioControl 0x30 /* u16 */
-#define dioP3hsioData 0x32 /* s16 */
-#define dioP3Control 0x34 /* u16 */
-#define calEepromControl 0x36 /* u16 */
-#define dacSetting(x) (0x38 + (x)*2) /* s16 */
-#define dioP2ExpansionIO8Bit 0x40 /* s16 */
-#define ctrTmrControl 0x80 /* u16 */
-#define ctrInput(x) (0x88 + (x)*2) /* s16 */
-#define timerDivisor(x) (0xa0 + (x)*2) /* u16 */
-#define dmaControl 0xb0 /* u16 */
-#define trigControl 0xb2 /* u16 */
-#define calEeprom 0xb8 /* u16 */
-#define acqDigitalMark 0xba /* u16 */
-#define trigDacs 0xbc /* u16 */
-#define dioP2ExpansionIO16Bit(x) (0xc0 + (x)*2) /* s16 */
-
-/* Scan Sequencer programming */
-#define DAQBOARD2000_SeqStartScanList 0x0011
-#define DAQBOARD2000_SeqStopScanList 0x0010
-
-/* Prepare for acquisition */
-#define DAQBOARD2000_AcqResetScanListFifo 0x0004
-#define DAQBOARD2000_AcqResetResultsFifo 0x0002
-#define DAQBOARD2000_AcqResetConfigPipe 0x0001
-
-/* Acqusition status bits */
-#define DAQBOARD2000_AcqResultsFIFOMore1Sample 0x0001
-#define DAQBOARD2000_AcqResultsFIFOHasValidData 0x0002
-#define DAQBOARD2000_AcqResultsFIFOOverrun 0x0004
-#define DAQBOARD2000_AcqLogicScanning 0x0008
-#define DAQBOARD2000_AcqConfigPipeFull 0x0010
-#define DAQBOARD2000_AcqScanListFIFOEmpty 0x0020
-#define DAQBOARD2000_AcqAdcNotReady 0x0040
-#define DAQBOARD2000_ArbitrationFailure 0x0080
-#define DAQBOARD2000_AcqPacerOverrun 0x0100
-#define DAQBOARD2000_DacPacerOverrun 0x0200
-#define DAQBOARD2000_AcqHardwareError 0x01c0
-
-/* Scan Sequencer programming */
-#define DAQBOARD2000_SeqStartScanList 0x0011
-#define DAQBOARD2000_SeqStopScanList 0x0010
-
-/* Pacer Clock Control */
-#define DAQBOARD2000_AdcPacerInternal 0x0030
-#define DAQBOARD2000_AdcPacerExternal 0x0032
-#define DAQBOARD2000_AdcPacerEnable 0x0031
-#define DAQBOARD2000_AdcPacerEnableDacPacer 0x0034
-#define DAQBOARD2000_AdcPacerDisable 0x0030
-#define DAQBOARD2000_AdcPacerNormalMode 0x0060
-#define DAQBOARD2000_AdcPacerCompatibilityMode 0x0061
-#define DAQBOARD2000_AdcPacerInternalOutEnable 0x0008
-#define DAQBOARD2000_AdcPacerExternalRising 0x0100
-
-/* DAC status */
-#define DAQBOARD2000_DacFull 0x0001
-#define DAQBOARD2000_RefBusy 0x0002
-#define DAQBOARD2000_TrgBusy 0x0004
-#define DAQBOARD2000_CalBusy 0x0008
-#define DAQBOARD2000_Dac0Busy 0x0010
-#define DAQBOARD2000_Dac1Busy 0x0020
-#define DAQBOARD2000_Dac2Busy 0x0040
-#define DAQBOARD2000_Dac3Busy 0x0080
-
-/* DAC control */
-#define DAQBOARD2000_Dac0Enable 0x0021
-#define DAQBOARD2000_Dac1Enable 0x0031
-#define DAQBOARD2000_Dac2Enable 0x0041
-#define DAQBOARD2000_Dac3Enable 0x0051
-#define DAQBOARD2000_DacEnableBit 0x0001
-#define DAQBOARD2000_Dac0Disable 0x0020
-#define DAQBOARD2000_Dac1Disable 0x0030
-#define DAQBOARD2000_Dac2Disable 0x0040
-#define DAQBOARD2000_Dac3Disable 0x0050
-#define DAQBOARD2000_DacResetFifo 0x0004
-#define DAQBOARD2000_DacPatternDisable 0x0060
-#define DAQBOARD2000_DacPatternEnable 0x0061
-#define DAQBOARD2000_DacSelectSignedData 0x0002
-#define DAQBOARD2000_DacSelectUnsignedData 0x0000
-
-/* Trigger Control */
-#define DAQBOARD2000_TrigAnalog 0x0000
-#define DAQBOARD2000_TrigTTL 0x0010
-#define DAQBOARD2000_TrigTransHiLo 0x0004
-#define DAQBOARD2000_TrigTransLoHi 0x0000
-#define DAQBOARD2000_TrigAbove 0x0000
-#define DAQBOARD2000_TrigBelow 0x0004
-#define DAQBOARD2000_TrigLevelSense 0x0002
-#define DAQBOARD2000_TrigEdgeSense 0x0000
-#define DAQBOARD2000_TrigEnable 0x0001
-#define DAQBOARD2000_TrigDisable 0x0000
-
-/* Reference Dac Selection */
-#define DAQBOARD2000_PosRefDacSelect 0x0100
-#define DAQBOARD2000_NegRefDacSelect 0x0000
-
-struct daq200_boardtype {
- const char *name;
- int id;
-};
-static const struct daq200_boardtype boardtypes[] = {
- {"ids2", DAQBOARD2000_SUBSYSTEM_IDS2},
- {"ids4", DAQBOARD2000_SUBSYSTEM_IDS4},
-};
-
-struct daqboard2000_private {
- enum {
- card_daqboard_2000
- } card;
- void __iomem *plx;
-};
-
-static void writeAcqScanListEntry(struct comedi_device *dev, u16 entry)
-{
- /* udelay(4); */
- writew(entry & 0x00ff, dev->mmio + acqScanListFIFO);
- /* udelay(4); */
- writew((entry >> 8) & 0x00ff, dev->mmio + acqScanListFIFO);
-}
-
-static void setup_sampling(struct comedi_device *dev, int chan, int gain)
-{
- u16 word0, word1, word2, word3;
-
- /* Channel 0-7 diff, channel 8-23 single ended */
- word0 = 0;
- word1 = 0x0004; /* Last scan */
- word2 = (chan << 6) & 0x00c0;
- switch (chan / 4) {
- case 0:
- word3 = 0x0001;
- break;
- case 1:
- word3 = 0x0002;
- break;
- case 2:
- word3 = 0x0005;
- break;
- case 3:
- word3 = 0x0006;
- break;
- case 4:
- word3 = 0x0041;
- break;
- case 5:
- word3 = 0x0042;
- break;
- default:
- word3 = 0;
- break;
- }
-/*
- dev->eeprom.correctionDACSE[i][j][k].offset = 0x800;
- dev->eeprom.correctionDACSE[i][j][k].gain = 0xc00;
-*/
- /* These should be read from EEPROM */
- word2 |= 0x0800;
- word3 |= 0xc000;
- writeAcqScanListEntry(dev, word0);
- writeAcqScanListEntry(dev, word1);
- writeAcqScanListEntry(dev, word2);
- writeAcqScanListEntry(dev, word3);
-}
-
-static int daqboard2000_ai_status(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned long context)
-{
- unsigned int status;
-
- status = readw(dev->mmio + acqControl);
- if (status & context)
- return 0;
- return -EBUSY;
-}
-
-static int daqboard2000_ai_insn_read(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- int gain, chan;
- int ret;
- int i;
-
- writew(DAQBOARD2000_AcqResetScanListFifo |
- DAQBOARD2000_AcqResetResultsFifo |
- DAQBOARD2000_AcqResetConfigPipe, dev->mmio + acqControl);
-
- /*
- * If pacer clock is not set to some high value (> 10 us), we
- * risk multiple samples to be put into the result FIFO.
- */
- /* 1 second, should be long enough */
- writel(1000000, dev->mmio + acqPacerClockDivLow);
- writew(0, dev->mmio + acqPacerClockDivHigh);
-
- gain = CR_RANGE(insn->chanspec);
- chan = CR_CHAN(insn->chanspec);
-
- /* This doesn't look efficient. I decided to take the conservative
- * approach when I did the insn conversion. Perhaps it would be
- * better to have broken it completely, then someone would have been
- * forced to fix it. --ds */
- for (i = 0; i < insn->n; i++) {
- setup_sampling(dev, chan, gain);
- /* Enable reading from the scanlist FIFO */
- writew(DAQBOARD2000_SeqStartScanList, dev->mmio + acqControl);
-
- ret = comedi_timeout(dev, s, insn, daqboard2000_ai_status,
- DAQBOARD2000_AcqConfigPipeFull);
- if (ret)
- return ret;
-
- writew(DAQBOARD2000_AdcPacerEnable, dev->mmio + acqControl);
-
- ret = comedi_timeout(dev, s, insn, daqboard2000_ai_status,
- DAQBOARD2000_AcqLogicScanning);
- if (ret)
- return ret;
-
- ret = comedi_timeout(dev, s, insn, daqboard2000_ai_status,
- DAQBOARD2000_AcqResultsFIFOHasValidData);
- if (ret)
- return ret;
-
- data[i] = readw(dev->mmio + acqResultsFIFO);
- writew(DAQBOARD2000_AdcPacerDisable, dev->mmio + acqControl);
- writew(DAQBOARD2000_SeqStopScanList, dev->mmio + acqControl);
- }
-
- return i;
-}
-
-static int daqboard2000_ao_eoc(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned long context)
-{
- unsigned int chan = CR_CHAN(insn->chanspec);
- unsigned int status;
-
- status = readw(dev->mmio + dacControl);
- if ((status & ((chan + 1) * 0x0010)) == 0)
- return 0;
- return -EBUSY;
-}
-
-static int daqboard2000_ao_insn_write(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- unsigned int chan = CR_CHAN(insn->chanspec);
- int i;
-
- for (i = 0; i < insn->n; i++) {
- unsigned int val = data[i];
- int ret;
-
- writew(val, dev->mmio + dacSetting(chan));
-
- ret = comedi_timeout(dev, s, insn, daqboard2000_ao_eoc, 0);
- if (ret)
- return ret;
-
- s->readback[chan] = val;
- }
-
- return insn->n;
-}
-
-static void daqboard2000_resetLocalBus(struct comedi_device *dev)
-{
- struct daqboard2000_private *devpriv = dev->private;
-
- writel(DAQBOARD2000_SECRLocalBusHi, devpriv->plx + 0x6c);
- mdelay(10);
- writel(DAQBOARD2000_SECRLocalBusLo, devpriv->plx + 0x6c);
- mdelay(10);
-}
-
-static void daqboard2000_reloadPLX(struct comedi_device *dev)
-{
- struct daqboard2000_private *devpriv = dev->private;
-
- writel(DAQBOARD2000_SECRReloadLo, devpriv->plx + 0x6c);
- mdelay(10);
- writel(DAQBOARD2000_SECRReloadHi, devpriv->plx + 0x6c);
- mdelay(10);
- writel(DAQBOARD2000_SECRReloadLo, devpriv->plx + 0x6c);
- mdelay(10);
-}
-
-static void daqboard2000_pulseProgPin(struct comedi_device *dev)
-{
- struct daqboard2000_private *devpriv = dev->private;
-
- writel(DAQBOARD2000_SECRProgPinHi, devpriv->plx + 0x6c);
- mdelay(10);
- writel(DAQBOARD2000_SECRProgPinLo, devpriv->plx + 0x6c);
- mdelay(10); /* Not in the original code, but I like symmetry... */
-}
-
-static int daqboard2000_pollCPLD(struct comedi_device *dev, int mask)
-{
- int result = 0;
- int i;
- int cpld;
-
- /* timeout after 50 tries -> 5ms */
- for (i = 0; i < 50; i++) {
- cpld = readw(dev->mmio + 0x1000);
- if ((cpld & mask) == mask) {
- result = 1;
- break;
- }
- udelay(100);
- }
- udelay(5);
- return result;
-}
-
-static int daqboard2000_writeCPLD(struct comedi_device *dev, int data)
-{
- int result = 0;
-
- udelay(10);
- writew(data, dev->mmio + 0x1000);
- if ((readw(dev->mmio + 0x1000) & DAQBOARD2000_CPLD_INIT) ==
- DAQBOARD2000_CPLD_INIT) {
- result = 1;
- }
- return result;
-}
-
-static int initialize_daqboard2000(struct comedi_device *dev,
- const u8 *cpld_array, size_t len,
- unsigned long context)
-{
- struct daqboard2000_private *devpriv = dev->private;
- int result = -EIO;
- /* Read the serial EEPROM control register */
- int secr;
- int retry;
- size_t i;
-
- /* Check to make sure the serial eeprom is present on the board */
- secr = readl(devpriv->plx + 0x6c);
- if (!(secr & DAQBOARD2000_EEPROM_PRESENT))
- return -EIO;
-
- for (retry = 0; retry < 3; retry++) {
- daqboard2000_resetLocalBus(dev);
- daqboard2000_reloadPLX(dev);
- daqboard2000_pulseProgPin(dev);
- if (daqboard2000_pollCPLD(dev, DAQBOARD2000_CPLD_INIT)) {
- for (i = 0; i < len; i++) {
- if (cpld_array[i] == 0xff &&
- cpld_array[i + 1] == 0x20)
- break;
- }
- for (; i < len; i += 2) {
- int data =
- (cpld_array[i] << 8) + cpld_array[i + 1];
- if (!daqboard2000_writeCPLD(dev, data))
- break;
- }
- if (i >= len) {
- daqboard2000_resetLocalBus(dev);
- daqboard2000_reloadPLX(dev);
- result = 0;
- break;
- }
- }
- }
- return result;
-}
-
-static void daqboard2000_adcStopDmaTransfer(struct comedi_device *dev)
-{
-}
-
-static void daqboard2000_adcDisarm(struct comedi_device *dev)
-{
- /* Disable hardware triggers */
- udelay(2);
- writew(DAQBOARD2000_TrigAnalog | DAQBOARD2000_TrigDisable,
- dev->mmio + trigControl);
- udelay(2);
- writew(DAQBOARD2000_TrigTTL | DAQBOARD2000_TrigDisable,
- dev->mmio + trigControl);
-
- /* Stop the scan list FIFO from loading the configuration pipe */
- udelay(2);
- writew(DAQBOARD2000_SeqStopScanList, dev->mmio + acqControl);
-
- /* Stop the pacer clock */
- udelay(2);
- writew(DAQBOARD2000_AdcPacerDisable, dev->mmio + acqControl);
-
- /* Stop the input dma (abort channel 1) */
- daqboard2000_adcStopDmaTransfer(dev);
-}
-
-static void daqboard2000_activateReferenceDacs(struct comedi_device *dev)
-{
- unsigned int val;
- int timeout;
-
- /* Set the + reference dac value in the FPGA */
- writew(0x80 | DAQBOARD2000_PosRefDacSelect, dev->mmio + refDacs);
- for (timeout = 0; timeout < 20; timeout++) {
- val = readw(dev->mmio + dacControl);
- if ((val & DAQBOARD2000_RefBusy) == 0)
- break;
- udelay(2);
- }
-
- /* Set the - reference dac value in the FPGA */
- writew(0x80 | DAQBOARD2000_NegRefDacSelect, dev->mmio + refDacs);
- for (timeout = 0; timeout < 20; timeout++) {
- val = readw(dev->mmio + dacControl);
- if ((val & DAQBOARD2000_RefBusy) == 0)
- break;
- udelay(2);
- }
-}
-
-static void daqboard2000_initializeCtrs(struct comedi_device *dev)
-{
-}
-
-static void daqboard2000_initializeTmrs(struct comedi_device *dev)
-{
-}
-
-static void daqboard2000_dacDisarm(struct comedi_device *dev)
-{
-}
-
-static void daqboard2000_initializeAdc(struct comedi_device *dev)
-{
- daqboard2000_adcDisarm(dev);
- daqboard2000_activateReferenceDacs(dev);
- daqboard2000_initializeCtrs(dev);
- daqboard2000_initializeTmrs(dev);
-}
-
-static void daqboard2000_initializeDac(struct comedi_device *dev)
-{
- daqboard2000_dacDisarm(dev);
-}
-
-static int daqboard2000_8255_cb(struct comedi_device *dev,
- int dir, int port, int data,
- unsigned long iobase)
-{
- if (dir) {
- writew(data, dev->mmio + iobase + port * 2);
- return 0;
- }
- return readw(dev->mmio + iobase + port * 2);
-}
-
-static const void *daqboard2000_find_boardinfo(struct comedi_device *dev,
- struct pci_dev *pcidev)
-{
- const struct daq200_boardtype *board;
- int i;
-
- if (pcidev->subsystem_device != PCI_VENDOR_ID_IOTECH)
- return NULL;
-
- for (i = 0; i < ARRAY_SIZE(boardtypes); i++) {
- board = &boardtypes[i];
- if (pcidev->subsystem_device == board->id)
- return board;
- }
- return NULL;
-}
-
-static int daqboard2000_auto_attach(struct comedi_device *dev,
- unsigned long context_unused)
-{
- struct pci_dev *pcidev = comedi_to_pci_dev(dev);
- const struct daq200_boardtype *board;
- struct daqboard2000_private *devpriv;
- struct comedi_subdevice *s;
- int result;
-
- board = daqboard2000_find_boardinfo(dev, pcidev);
- if (!board)
- return -ENODEV;
- dev->board_ptr = board;
- dev->board_name = board->name;
-
- devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
- if (!devpriv)
- return -ENOMEM;
-
- result = comedi_pci_enable(dev);
- if (result)
- return result;
-
- devpriv->plx = pci_ioremap_bar(pcidev, 0);
- dev->mmio = pci_ioremap_bar(pcidev, 2);
- if (!devpriv->plx || !dev->mmio)
- return -ENOMEM;
-
- result = comedi_alloc_subdevices(dev, 3);
- if (result)
- return result;
-
- readl(devpriv->plx + 0x6c);
-
- result = comedi_load_firmware(dev, &comedi_to_pci_dev(dev)->dev,
- DAQBOARD2000_FIRMWARE,
- initialize_daqboard2000, 0);
- if (result < 0)
- return result;
-
- daqboard2000_initializeAdc(dev);
- daqboard2000_initializeDac(dev);
-
- s = &dev->subdevices[0];
- /* ai subdevice */
- s->type = COMEDI_SUBD_AI;
- s->subdev_flags = SDF_READABLE | SDF_GROUND;
- s->n_chan = 24;
- s->maxdata = 0xffff;
- s->insn_read = daqboard2000_ai_insn_read;
- s->range_table = &range_daqboard2000_ai;
-
- s = &dev->subdevices[1];
- /* ao subdevice */
- s->type = COMEDI_SUBD_AO;
- s->subdev_flags = SDF_WRITABLE;
- s->n_chan = 2;
- s->maxdata = 0xffff;
- s->insn_write = daqboard2000_ao_insn_write;
- s->range_table = &range_bipolar10;
-
- result = comedi_alloc_subdev_readback(s);
- if (result)
- return result;
-
- s = &dev->subdevices[2];
- result = subdev_8255_init(dev, s, daqboard2000_8255_cb,
- dioP2ExpansionIO8Bit);
- if (result)
- return result;
-
- return 0;
-}
-
-static void daqboard2000_detach(struct comedi_device *dev)
-{
- struct daqboard2000_private *devpriv = dev->private;
-
- if (devpriv && devpriv->plx)
- iounmap(devpriv->plx);
- comedi_pci_detach(dev);
-}
-
-static struct comedi_driver daqboard2000_driver = {
- .driver_name = "daqboard2000",
- .module = THIS_MODULE,
- .auto_attach = daqboard2000_auto_attach,
- .detach = daqboard2000_detach,
-};
-
-static int daqboard2000_pci_probe(struct pci_dev *dev,
- const struct pci_device_id *id)
-{
- return comedi_pci_auto_config(dev, &daqboard2000_driver,
- id->driver_data);
-}
-
-static const struct pci_device_id daqboard2000_pci_table[] = {
- { PCI_DEVICE(PCI_VENDOR_ID_IOTECH, 0x0409) },
- { 0 }
-};
-MODULE_DEVICE_TABLE(pci, daqboard2000_pci_table);
-
-static struct pci_driver daqboard2000_pci_driver = {
- .name = "daqboard2000",
- .id_table = daqboard2000_pci_table,
- .probe = daqboard2000_pci_probe,
- .remove = comedi_pci_auto_unconfig,
-};
-module_comedi_pci_driver(daqboard2000_driver, daqboard2000_pci_driver);
-
-MODULE_AUTHOR("Comedi http://www.comedi.org");
-MODULE_DESCRIPTION("Comedi low-level driver");
-MODULE_LICENSE("GPL");
-MODULE_FIRMWARE(DAQBOARD2000_FIRMWARE);
diff --git a/drivers/staging/comedi/drivers/das08.c b/drivers/staging/comedi/drivers/das08.c
deleted file mode 100644
index 3d8fc6ad44df..000000000000
--- a/drivers/staging/comedi/drivers/das08.c
+++ /dev/null
@@ -1,479 +0,0 @@
-/*
- * comedi/drivers/das08.c
- * comedi module for common DAS08 support (used by ISA/PCI/PCMCIA drivers)
- *
- * COMEDI - Linux Control and Measurement Device Interface
- * Copyright (C) 2000 David A. Schleef <ds@schleef.org>
- * Copyright (C) 2001,2002,2003 Frank Mori Hess <fmhess@users.sourceforge.net>
- * Copyright (C) 2004 Salvador E. Tropea <set@users.sf.net> <set@ieee.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#include <linux/module.h>
-
-#include "../comedidev.h"
-
-#include "8255.h"
-#include "comedi_8254.h"
-#include "das08.h"
-
-/*
- * Data format of DAS08_AI_LSB_REG and DAS08_AI_MSB_REG depends on
- * 'ai_encoding' member of board structure:
- *
- * das08_encode12 : DATA[11..4] = MSB[7..0], DATA[3..0] = LSB[7..4].
- * das08_pcm_encode12 : DATA[11..8] = MSB[3..0], DATA[7..9] = LSB[7..0].
- * das08_encode16 : SIGN = MSB[7], MAGNITUDE[14..8] = MSB[6..0],
- * MAGNITUDE[7..0] = LSB[7..0].
- * SIGN==0 for negative input, SIGN==1 for positive input.
- * Note: when read a second time after conversion
- * complete, MSB[7] is an "over-range" bit.
- */
-#define DAS08_AI_LSB_REG 0x00 /* (R) AI least significant bits */
-#define DAS08_AI_MSB_REG 0x01 /* (R) AI most significant bits */
-#define DAS08_AI_TRIG_REG 0x01 /* (W) AI software trigger */
-#define DAS08_STATUS_REG 0x02 /* (R) status */
-#define DAS08_STATUS_AI_BUSY BIT(7) /* AI conversion in progress */
-/*
- * The IRQ status bit is set to 1 by a rising edge on the external interrupt
- * input (which may be jumpered to the pacer output). It is cleared by
- * setting the INTE control bit to 0. Not present on "JR" boards.
- */
-#define DAS08_STATUS_IRQ BIT(3) /* latched interrupt input */
-/* digital inputs (not "JR" boards) */
-#define DAS08_STATUS_DI(x) (((x) & 0x70) >> 4)
-#define DAS08_CONTROL_REG 0x02 /* (W) control */
-/*
- * Note: The AI multiplexor channel can also be read from status register using
- * the same mask.
- */
-#define DAS08_CONTROL_MUX_MASK 0x7 /* multiplexor channel mask */
-#define DAS08_CONTROL_MUX(x) ((x) & DAS08_CONTROL_MUX_MASK) /* mux channel */
-#define DAS08_CONTROL_INTE BIT(3) /* interrupt enable (not "JR" boards) */
-#define DAS08_CONTROL_DO_MASK 0xf0 /* digital outputs mask (not "JR") */
-/* digital outputs (not "JR" boards) */
-#define DAS08_CONTROL_DO(x) (((x) << 4) & DAS08_CONTROL_DO_MASK)
-/*
- * (R/W) programmable AI gain ("PGx" and "AOx" boards):
- * + bits 3..0 (R/W) show/set the gain for the current AI mux channel
- * + bits 6..4 (R) show the current AI mux channel
- * + bit 7 (R) not unused
- */
-#define DAS08_GAIN_REG 0x03
-
-#define DAS08JR_DI_REG 0x03 /* (R) digital inputs ("JR" boards) */
-#define DAS08JR_DO_REG 0x03 /* (W) digital outputs ("JR" boards) */
-/* (W) analog output l.s.b. registers for 2 channels ("JR" boards) */
-#define DAS08JR_AO_LSB_REG(x) ((x) ? 0x06 : 0x04)
-/* (W) analog output m.s.b. registers for 2 channels ("JR" boards) */
-#define DAS08JR_AO_MSB_REG(x) ((x) ? 0x07 : 0x05)
-/*
- * (R) update analog outputs ("JR" boards set for simultaneous output)
- * (same register as digital inputs)
- */
-#define DAS08JR_AO_UPDATE_REG 0x03
-
-/* (W) analog output l.s.b. registers for 2 channels ("AOx" boards) */
-#define DAS08AOX_AO_LSB_REG(x) ((x) ? 0x0a : 0x08)
-/* (W) analog output m.s.b. registers for 2 channels ("AOx" boards) */
-#define DAS08AOX_AO_MSB_REG(x) ((x) ? 0x0b : 0x09)
-/*
- * (R) update analog outputs ("AOx" boards set for simultaneous output)
- * (any of the analog output registers could be used for this)
- */
-#define DAS08AOX_AO_UPDATE_REG 0x08
-
-/* gainlist same as _pgx_ below */
-
-static const struct comedi_lrange das08_pgl_ai_range = {
- 9, {
- BIP_RANGE(10),
- BIP_RANGE(5),
- BIP_RANGE(2.5),
- BIP_RANGE(1.25),
- BIP_RANGE(0.625),
- UNI_RANGE(10),
- UNI_RANGE(5),
- UNI_RANGE(2.5),
- UNI_RANGE(1.25)
- }
-};
-
-static const struct comedi_lrange das08_pgh_ai_range = {
- 12, {
- BIP_RANGE(10),
- BIP_RANGE(5),
- BIP_RANGE(1),
- BIP_RANGE(0.5),
- BIP_RANGE(0.1),
- BIP_RANGE(0.05),
- BIP_RANGE(0.01),
- BIP_RANGE(0.005),
- UNI_RANGE(10),
- UNI_RANGE(1),
- UNI_RANGE(0.1),
- UNI_RANGE(0.01)
- }
-};
-
-static const struct comedi_lrange das08_pgm_ai_range = {
- 9, {
- BIP_RANGE(10),
- BIP_RANGE(5),
- BIP_RANGE(0.5),
- BIP_RANGE(0.05),
- BIP_RANGE(0.01),
- UNI_RANGE(10),
- UNI_RANGE(1),
- UNI_RANGE(0.1),
- UNI_RANGE(0.01)
- }
-};
-
-static const struct comedi_lrange *const das08_ai_lranges[] = {
- [das08_pg_none] = &range_unknown,
- [das08_bipolar5] = &range_bipolar5,
- [das08_pgh] = &das08_pgh_ai_range,
- [das08_pgl] = &das08_pgl_ai_range,
- [das08_pgm] = &das08_pgm_ai_range,
-};
-
-static const int das08_pgh_ai_gainlist[] = {
- 8, 0, 10, 2, 12, 4, 14, 6, 1, 3, 5, 7
-};
-static const int das08_pgl_ai_gainlist[] = { 8, 0, 2, 4, 6, 1, 3, 5, 7 };
-static const int das08_pgm_ai_gainlist[] = { 8, 0, 10, 12, 14, 9, 11, 13, 15 };
-
-static const int *const das08_ai_gainlists[] = {
- [das08_pg_none] = NULL,
- [das08_bipolar5] = NULL,
- [das08_pgh] = das08_pgh_ai_gainlist,
- [das08_pgl] = das08_pgl_ai_gainlist,
- [das08_pgm] = das08_pgm_ai_gainlist,
-};
-
-static int das08_ai_eoc(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned long context)
-{
- unsigned int status;
-
- status = inb(dev->iobase + DAS08_STATUS_REG);
- if ((status & DAS08_STATUS_AI_BUSY) == 0)
- return 0;
- return -EBUSY;
-}
-
-static int das08_ai_insn_read(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
-{
- const struct das08_board_struct *board = dev->board_ptr;
- struct das08_private_struct *devpriv = dev->private;
- int n;
- int chan;
- int range;
- int lsb, msb;
- int ret;
-
- chan = CR_CHAN(insn->chanspec);
- range = CR_RANGE(insn->chanspec);
-
- /* clear crap */
- inb(dev->iobase + DAS08_AI_LSB_REG);
- inb(dev->iobase + DAS08_AI_MSB_REG);
-
- /* set multiplexer */
- /* lock to prevent race with digital output */
- spin_lock(&dev->spinlock);
- devpriv->do_mux_bits &= ~DAS08_CONTROL_MUX_MASK;
- devpriv->do_mux_bits |= DAS08_CONTROL_MUX(chan);
- outb(devpriv->do_mux_bits, dev->iobase + DAS08_CONTROL_REG);
- spin_unlock(&dev->spinlock);
-
- if (devpriv->pg_gainlist) {
- /* set gain/range */
- range = CR_RANGE(insn->chanspec);
- outb(devpriv->pg_gainlist[range],
- dev->iobase + DAS08_GAIN_REG);
- }
-
- for (n = 0; n < insn->n; n++) {
- /* clear over-range bits for 16-bit boards */
- if (board->ai_nbits == 16)
- if (inb(dev->iobase + DAS08_AI_MSB_REG) & 0x80)
- dev_info(dev->class_dev, "over-range\n");
-
- /* trigger conversion */
- outb_p(0, dev->iobase + DAS08_AI_TRIG_REG);
-
- ret = comedi_timeout(dev, s, insn, das08_ai_eoc, 0);
- if (ret)
- return ret;
-
- msb = inb(dev->iobase + DAS08_AI_MSB_REG);
- lsb = inb(dev->iobase + DAS08_AI_LSB_REG);
- if (board->ai_encoding == das08_encode12) {
- data[n] = (lsb >> 4) | (msb << 4);
- } else if (board->ai_encoding == das08_pcm_encode12) {
- data[n] = (msb << 8) + lsb;
- } else if (board->ai_encoding == das08_encode16) {
- /*
- * "JR" 16-bit boards are sign-magnitude.
- *
- * XXX The manual seems to imply that 0 is full-scale
- * negative and 65535 is full-scale positive, but the
- * original COMEDI patch to add support for the
- * DAS08/JR/16 and DAS08/JR/16-AO boards have it
- * encoded as sign-magnitude. Assume the original
- * COMEDI code is correct for now.
- */
- unsigned int magnitude = lsb | ((msb & 0x7f) << 8);
-
- /*
- * MSB bit 7 is 0 for negative, 1 for positive voltage.
- * COMEDI 16-bit bipolar data value for 0V is 0x8000.
- */
- if (msb & 0x80)
- data[n] = (1 << 15) + magnitude;
- else
- data[n] = (1 << 15) - magnitude;
- } else {
- dev_err(dev->class_dev, "bug! unknown ai encoding\n");
- return -1;
- }
- }
-
- return n;
-}
-
-static int das08_di_insn_bits(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
-{
- data[0] = 0;
- data[1] = DAS08_STATUS_DI(inb(dev->iobase + DAS08_STATUS_REG));
-
- return insn->n;
-}
-
-static int das08_do_insn_bits(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
-{
- struct das08_private_struct *devpriv = dev->private;
-
- if (comedi_dio_update_state(s, data)) {
- /* prevent race with setting of analog input mux */
- spin_lock(&dev->spinlock);
- devpriv->do_mux_bits &= ~DAS08_CONTROL_DO_MASK;
- devpriv->do_mux_bits |= DAS08_CONTROL_DO(s->state);
- outb(devpriv->do_mux_bits, dev->iobase + DAS08_CONTROL_REG);
- spin_unlock(&dev->spinlock);
- }
-
- data[1] = s->state;
-
- return insn->n;
-}
-
-static int das08jr_di_insn_bits(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
-{
- data[0] = 0;
- data[1] = inb(dev->iobase + DAS08JR_DI_REG);
-
- return insn->n;
-}
-
-static int das08jr_do_insn_bits(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
-{
- if (comedi_dio_update_state(s, data))
- outb(s->state, dev->iobase + DAS08JR_DO_REG);
-
- data[1] = s->state;
-
- return insn->n;
-}
-
-static void das08_ao_set_data(struct comedi_device *dev,
- unsigned int chan, unsigned int data)
-{
- const struct das08_board_struct *board = dev->board_ptr;
- unsigned char lsb;
- unsigned char msb;
-
- lsb = data & 0xff;
- msb = (data >> 8) & 0xff;
- if (board->is_jr) {
- outb(lsb, dev->iobase + DAS08JR_AO_LSB_REG(chan));
- outb(msb, dev->iobase + DAS08JR_AO_MSB_REG(chan));
- /* load DACs */
- inb(dev->iobase + DAS08JR_AO_UPDATE_REG);
- } else {
- outb(lsb, dev->iobase + DAS08AOX_AO_LSB_REG(chan));
- outb(msb, dev->iobase + DAS08AOX_AO_MSB_REG(chan));
- /* load DACs */
- inb(dev->iobase + DAS08AOX_AO_UPDATE_REG);
- }
-}
-
-static int das08_ao_insn_write(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- unsigned int chan = CR_CHAN(insn->chanspec);
- unsigned int val = s->readback[chan];
- int i;
-
- for (i = 0; i < insn->n; i++) {
- val = data[i];
- das08_ao_set_data(dev, chan, val);
- }
- s->readback[chan] = val;
-
- return insn->n;
-}
-
-int das08_common_attach(struct comedi_device *dev, unsigned long iobase)
-{
- const struct das08_board_struct *board = dev->board_ptr;
- struct das08_private_struct *devpriv = dev->private;
- struct comedi_subdevice *s;
- int ret;
- int i;
-
- dev->iobase = iobase;
-
- dev->board_name = board->name;
-
- ret = comedi_alloc_subdevices(dev, 6);
- if (ret)
- return ret;
-
- s = &dev->subdevices[0];
- /* ai */
- if (board->ai_nbits) {
- s->type = COMEDI_SUBD_AI;
- /*
- * XXX some boards actually have differential
- * inputs instead of single ended.
- * The driver does nothing with arefs though,
- * so it's no big deal.
- */
- s->subdev_flags = SDF_READABLE | SDF_GROUND;
- s->n_chan = 8;
- s->maxdata = (1 << board->ai_nbits) - 1;
- s->range_table = das08_ai_lranges[board->ai_pg];
- s->insn_read = das08_ai_insn_read;
- devpriv->pg_gainlist = das08_ai_gainlists[board->ai_pg];
- } else {
- s->type = COMEDI_SUBD_UNUSED;
- }
-
- s = &dev->subdevices[1];
- /* ao */
- if (board->ao_nbits) {
- s->type = COMEDI_SUBD_AO;
- s->subdev_flags = SDF_WRITABLE;
- s->n_chan = 2;
- s->maxdata = (1 << board->ao_nbits) - 1;
- s->range_table = &range_bipolar5;
- s->insn_write = das08_ao_insn_write;
-
- ret = comedi_alloc_subdev_readback(s);
- if (ret)
- return ret;
-
- /* initialize all channels to 0V */
- for (i = 0; i < s->n_chan; i++) {
- s->readback[i] = s->maxdata / 2;
- das08_ao_set_data(dev, i, s->readback[i]);
- }
- } else {
- s->type = COMEDI_SUBD_UNUSED;
- }
-
- s = &dev->subdevices[2];
- /* di */
- if (board->di_nchan) {
- s->type = COMEDI_SUBD_DI;
- s->subdev_flags = SDF_READABLE;
- s->n_chan = board->di_nchan;
- s->maxdata = 1;
- s->range_table = &range_digital;
- s->insn_bits = board->is_jr ? das08jr_di_insn_bits :
- das08_di_insn_bits;
- } else {
- s->type = COMEDI_SUBD_UNUSED;
- }
-
- s = &dev->subdevices[3];
- /* do */
- if (board->do_nchan) {
- s->type = COMEDI_SUBD_DO;
- s->subdev_flags = SDF_WRITABLE;
- s->n_chan = board->do_nchan;
- s->maxdata = 1;
- s->range_table = &range_digital;
- s->insn_bits = board->is_jr ? das08jr_do_insn_bits :
- das08_do_insn_bits;
- } else {
- s->type = COMEDI_SUBD_UNUSED;
- }
-
- s = &dev->subdevices[4];
- /* 8255 */
- if (board->i8255_offset != 0) {
- ret = subdev_8255_init(dev, s, NULL, board->i8255_offset);
- if (ret)
- return ret;
- } else {
- s->type = COMEDI_SUBD_UNUSED;
- }
-
- /* Counter subdevice (8254) */
- s = &dev->subdevices[5];
- if (board->i8254_offset) {
- dev->pacer = comedi_8254_init(dev->iobase + board->i8254_offset,
- 0, I8254_IO8, 0);
- if (!dev->pacer)
- return -ENOMEM;
-
- comedi_8254_subdevice_init(s, dev->pacer);
- } else {
- s->type = COMEDI_SUBD_UNUSED;
- }
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(das08_common_attach);
-
-static int __init das08_init(void)
-{
- return 0;
-}
-module_init(das08_init);
-
-static void __exit das08_exit(void)
-{
-}
-module_exit(das08_exit);
-
-MODULE_AUTHOR("Comedi http://www.comedi.org");
-MODULE_DESCRIPTION("Comedi common DAS08 support module");
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/das08.h b/drivers/staging/comedi/drivers/das08.h
deleted file mode 100644
index d27044cb7158..000000000000
--- a/drivers/staging/comedi/drivers/das08.h
+++ /dev/null
@@ -1,55 +0,0 @@
-/*
- * das08.h
- *
- * Header for common DAS08 support (used by ISA/PCI/PCMCIA drivers)
- *
- * Copyright (C) 2003 Frank Mori Hess <fmhess@users.sourceforge.net>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#ifndef _DAS08_H
-#define _DAS08_H
-
-#include <linux/types.h>
-
-struct comedi_device;
-
-/* different ways ai data is encoded in first two registers */
-enum das08_ai_encoding { das08_encode12, das08_encode16, das08_pcm_encode12 };
-/* types of ai range table used by different boards */
-enum das08_lrange {
- das08_pg_none, das08_bipolar5, das08_pgh, das08_pgl, das08_pgm
-};
-
-struct das08_board_struct {
- const char *name;
- bool is_jr; /* true for 'JR' boards */
- unsigned int ai_nbits;
- enum das08_lrange ai_pg;
- enum das08_ai_encoding ai_encoding;
- unsigned int ao_nbits;
- unsigned int di_nchan;
- unsigned int do_nchan;
- unsigned int i8255_offset;
- unsigned int i8254_offset;
- unsigned int iosize; /* number of ioports used */
-};
-
-struct das08_private_struct {
- /* bits for do/mux register on boards without separate do register */
- unsigned int do_mux_bits;
- const unsigned int *pg_gainlist;
-};
-
-int das08_common_attach(struct comedi_device *dev, unsigned long iobase);
-
-#endif /* _DAS08_H */
diff --git a/drivers/staging/comedi/drivers/das08_cs.c b/drivers/staging/comedi/drivers/das08_cs.c
deleted file mode 100644
index 9c02b17a2834..000000000000
--- a/drivers/staging/comedi/drivers/das08_cs.c
+++ /dev/null
@@ -1,114 +0,0 @@
-/*
- comedi/drivers/das08_cs.c
- DAS08 driver
-
- COMEDI - Linux Control and Measurement Device Interface
- Copyright (C) 2000 David A. Schleef <ds@schleef.org>
- Copyright (C) 2001,2002,2003 Frank Mori Hess <fmhess@users.sourceforge.net>
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- PCMCIA support code for this driver is adapted from the dummy_cs.c
- driver of the Linux PCMCIA Card Services package.
-
- The initial developer of the original code is David A. Hinds
- <dahinds@users.sourceforge.net>. Portions created by David A. Hinds
- are Copyright (C) 1999 David A. Hinds. All Rights Reserved.
-*/
-/*
-Driver: das08_cs
-Description: DAS-08 PCMCIA boards
-Author: Warren Jasper, ds, Frank Hess
-Devices: [ComputerBoards] PCM-DAS08 (pcm-das08)
-Status: works
-
-This is the PCMCIA-specific support split off from the
-das08 driver.
-
-Options (for pcm-das08):
- NONE
-
-Command support does not exist, but could be added for this board.
-*/
-
-#include <linux/module.h>
-
-#include "../comedi_pcmcia.h"
-
-#include "das08.h"
-
-static const struct das08_board_struct das08_cs_boards[] = {
- {
- .name = "pcm-das08",
- .ai_nbits = 12,
- .ai_pg = das08_bipolar5,
- .ai_encoding = das08_pcm_encode12,
- .di_nchan = 3,
- .do_nchan = 3,
- .iosize = 16,
- },
-};
-
-static int das08_cs_auto_attach(struct comedi_device *dev,
- unsigned long context)
-{
- struct pcmcia_device *link = comedi_to_pcmcia_dev(dev);
- struct das08_private_struct *devpriv;
- unsigned long iobase;
- int ret;
-
- /* The das08 driver needs the board_ptr */
- dev->board_ptr = &das08_cs_boards[0];
-
- link->config_flags |= CONF_AUTO_SET_IO;
- ret = comedi_pcmcia_enable(dev, NULL);
- if (ret)
- return ret;
- iobase = link->resource[0]->start;
-
- devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
- if (!devpriv)
- return -ENOMEM;
-
- return das08_common_attach(dev, iobase);
-}
-
-static struct comedi_driver driver_das08_cs = {
- .driver_name = "das08_cs",
- .module = THIS_MODULE,
- .auto_attach = das08_cs_auto_attach,
- .detach = comedi_pcmcia_disable,
-};
-
-static int das08_pcmcia_attach(struct pcmcia_device *link)
-{
- return comedi_pcmcia_auto_config(link, &driver_das08_cs);
-}
-
-static const struct pcmcia_device_id das08_cs_id_table[] = {
- PCMCIA_DEVICE_MANF_CARD(0x01c5, 0x4001),
- PCMCIA_DEVICE_NULL
-};
-MODULE_DEVICE_TABLE(pcmcia, das08_cs_id_table);
-
-static struct pcmcia_driver das08_cs_driver = {
- .name = "pcm-das08",
- .owner = THIS_MODULE,
- .id_table = das08_cs_id_table,
- .probe = das08_pcmcia_attach,
- .remove = comedi_pcmcia_auto_unconfig,
-};
-module_comedi_pcmcia_driver(driver_das08_cs, das08_cs_driver);
-
-MODULE_AUTHOR("David A. Schleef <ds@schleef.org>");
-MODULE_AUTHOR("Frank Mori Hess <fmhess@users.sourceforge.net>");
-MODULE_DESCRIPTION("Comedi driver for ComputerBoards DAS-08 PCMCIA boards");
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/das08_isa.c b/drivers/staging/comedi/drivers/das08_isa.c
deleted file mode 100644
index cdefc99b6db3..000000000000
--- a/drivers/staging/comedi/drivers/das08_isa.c
+++ /dev/null
@@ -1,199 +0,0 @@
-/*
- * das08_isa.c
- * comedi driver for DAS08 ISA/PC-104 boards
- *
- * COMEDI - Linux Control and Measurement Device Interface
- * Copyright (C) 2000 David A. Schleef <ds@schleef.org>
- * Copyright (C) 2001,2002,2003 Frank Mori Hess <fmhess@users.sourceforge.net>
- * Copyright (C) 2004 Salvador E. Tropea <set@users.sf.net> <set@ieee.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-/*
- * Driver: das08_isa
- * Description: DAS-08 ISA/PC-104 compatible boards
- * Devices: [Keithley Metrabyte] DAS08 (isa-das08),
- * [ComputerBoards] DAS08 (isa-das08), DAS08-PGM (das08-pgm),
- * DAS08-PGH (das08-pgh), DAS08-PGL (das08-pgl), DAS08-AOH (das08-aoh),
- * DAS08-AOL (das08-aol), DAS08-AOM (das08-aom), DAS08/JR-AO (das08/jr-ao),
- * DAS08/JR-16-AO (das08jr-16-ao), PC104-DAS08 (pc104-das08),
- * DAS08/JR/16 (das08jr/16)
- * Author: Warren Jasper, ds, Frank Hess
- * Updated: Fri, 31 Aug 2012 19:19:06 +0100
- * Status: works
- *
- * This is the ISA/PC-104-specific support split off from the das08 driver.
- *
- * Configuration Options:
- * [0] - base io address
- */
-
-#include <linux/module.h>
-#include "../comedidev.h"
-
-#include "das08.h"
-
-static const struct das08_board_struct das08_isa_boards[] = {
- {
- /* cio-das08.pdf */
- .name = "isa-das08",
- .ai_nbits = 12,
- .ai_pg = das08_pg_none,
- .ai_encoding = das08_encode12,
- .di_nchan = 3,
- .do_nchan = 4,
- .i8255_offset = 8,
- .i8254_offset = 4,
- .iosize = 16, /* unchecked */
- }, {
- /* cio-das08pgx.pdf */
- .name = "das08-pgm",
- .ai_nbits = 12,
- .ai_pg = das08_pgm,
- .ai_encoding = das08_encode12,
- .di_nchan = 3,
- .do_nchan = 4,
- .i8255_offset = 0,
- .i8254_offset = 0x04,
- .iosize = 16, /* unchecked */
- }, {
- /* cio-das08pgx.pdf */
- .name = "das08-pgh",
- .ai_nbits = 12,
- .ai_pg = das08_pgh,
- .ai_encoding = das08_encode12,
- .di_nchan = 3,
- .do_nchan = 4,
- .i8254_offset = 0x04,
- .iosize = 16, /* unchecked */
- }, {
- /* cio-das08pgx.pdf */
- .name = "das08-pgl",
- .ai_nbits = 12,
- .ai_pg = das08_pgl,
- .ai_encoding = das08_encode12,
- .di_nchan = 3,
- .do_nchan = 4,
- .i8254_offset = 0x04,
- .iosize = 16, /* unchecked */
- }, {
- /* cio-das08_aox.pdf */
- .name = "das08-aoh",
- .ai_nbits = 12,
- .ai_pg = das08_pgh,
- .ai_encoding = das08_encode12,
- .ao_nbits = 12,
- .di_nchan = 3,
- .do_nchan = 4,
- .i8255_offset = 0x0c,
- .i8254_offset = 0x04,
- .iosize = 16, /* unchecked */
- }, {
- /* cio-das08_aox.pdf */
- .name = "das08-aol",
- .ai_nbits = 12,
- .ai_pg = das08_pgl,
- .ai_encoding = das08_encode12,
- .ao_nbits = 12,
- .di_nchan = 3,
- .do_nchan = 4,
- .i8255_offset = 0x0c,
- .i8254_offset = 0x04,
- .iosize = 16, /* unchecked */
- }, {
- /* cio-das08_aox.pdf */
- .name = "das08-aom",
- .ai_nbits = 12,
- .ai_pg = das08_pgm,
- .ai_encoding = das08_encode12,
- .ao_nbits = 12,
- .di_nchan = 3,
- .do_nchan = 4,
- .i8255_offset = 0x0c,
- .i8254_offset = 0x04,
- .iosize = 16, /* unchecked */
- }, {
- /* cio-das08-jr-ao.pdf */
- .name = "das08/jr-ao",
- .is_jr = true,
- .ai_nbits = 12,
- .ai_pg = das08_pg_none,
- .ai_encoding = das08_encode12,
- .ao_nbits = 12,
- .di_nchan = 8,
- .do_nchan = 8,
- .iosize = 16, /* unchecked */
- }, {
- /* cio-das08jr-16-ao.pdf */
- .name = "das08jr-16-ao",
- .is_jr = true,
- .ai_nbits = 16,
- .ai_pg = das08_pg_none,
- .ai_encoding = das08_encode16,
- .ao_nbits = 16,
- .di_nchan = 8,
- .do_nchan = 8,
- .i8254_offset = 0x04,
- .iosize = 16, /* unchecked */
- }, {
- .name = "pc104-das08",
- .ai_nbits = 12,
- .ai_pg = das08_pg_none,
- .ai_encoding = das08_encode12,
- .di_nchan = 3,
- .do_nchan = 4,
- .i8254_offset = 4,
- .iosize = 16, /* unchecked */
- }, {
- .name = "das08jr/16",
- .is_jr = true,
- .ai_nbits = 16,
- .ai_pg = das08_pg_none,
- .ai_encoding = das08_encode16,
- .di_nchan = 8,
- .do_nchan = 8,
- .iosize = 16, /* unchecked */
- },
-};
-
-static int das08_isa_attach(struct comedi_device *dev,
- struct comedi_devconfig *it)
-{
- const struct das08_board_struct *board = dev->board_ptr;
- struct das08_private_struct *devpriv;
- int ret;
-
- devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
- if (!devpriv)
- return -ENOMEM;
-
- ret = comedi_request_region(dev, it->options[0], board->iosize);
- if (ret)
- return ret;
-
- return das08_common_attach(dev, dev->iobase);
-}
-
-static struct comedi_driver das08_isa_driver = {
- .driver_name = "isa-das08",
- .module = THIS_MODULE,
- .attach = das08_isa_attach,
- .detach = comedi_legacy_detach,
- .board_name = &das08_isa_boards[0].name,
- .num_names = ARRAY_SIZE(das08_isa_boards),
- .offset = sizeof(das08_isa_boards[0]),
-};
-module_comedi_driver(das08_isa_driver);
-
-MODULE_AUTHOR("Comedi http://www.comedi.org");
-MODULE_DESCRIPTION("Comedi low-level driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/das08_pci.c b/drivers/staging/comedi/drivers/das08_pci.c
deleted file mode 100644
index d8d27fa44491..000000000000
--- a/drivers/staging/comedi/drivers/das08_pci.c
+++ /dev/null
@@ -1,105 +0,0 @@
-/*
- * das08_pci.c
- * comedi driver for DAS08 PCI boards
- *
- * COMEDI - Linux Control and Measurement Device Interface
- * Copyright (C) 2000 David A. Schleef <ds@schleef.org>
- * Copyright (C) 2001,2002,2003 Frank Mori Hess <fmhess@users.sourceforge.net>
- * Copyright (C) 2004 Salvador E. Tropea <set@users.sf.net> <set@ieee.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-/*
- * Driver: das08_pci
- * Description: DAS-08 PCI compatible boards
- * Devices: [ComputerBoards] PCI-DAS08 (pci-das08)
- * Author: Warren Jasper, ds, Frank Hess
- * Updated: Fri, 31 Aug 2012 19:19:06 +0100
- * Status: works
- *
- * This is the PCI-specific support split off from the das08 driver.
- *
- * Configuration Options: not applicable, uses PCI auto config
- */
-
-#include <linux/module.h>
-
-#include "../comedi_pci.h"
-
-#include "das08.h"
-
-static const struct das08_board_struct das08_pci_boards[] = {
- {
- .name = "pci-das08",
- .ai_nbits = 12,
- .ai_pg = das08_bipolar5,
- .ai_encoding = das08_encode12,
- .di_nchan = 3,
- .do_nchan = 4,
- .i8254_offset = 4,
- .iosize = 8,
- },
-};
-
-static int das08_pci_auto_attach(struct comedi_device *dev,
- unsigned long context_unused)
-{
- struct pci_dev *pdev = comedi_to_pci_dev(dev);
- struct das08_private_struct *devpriv;
- int ret;
-
- devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
- if (!devpriv)
- return -ENOMEM;
-
- /* The das08 driver needs the board_ptr */
- dev->board_ptr = &das08_pci_boards[0];
-
- ret = comedi_pci_enable(dev);
- if (ret)
- return ret;
- dev->iobase = pci_resource_start(pdev, 2);
-
- return das08_common_attach(dev, dev->iobase);
-}
-
-static struct comedi_driver das08_pci_comedi_driver = {
- .driver_name = "pci-das08",
- .module = THIS_MODULE,
- .auto_attach = das08_pci_auto_attach,
- .detach = comedi_pci_detach,
-};
-
-static int das08_pci_probe(struct pci_dev *dev,
- const struct pci_device_id *id)
-{
- return comedi_pci_auto_config(dev, &das08_pci_comedi_driver,
- id->driver_data);
-}
-
-static const struct pci_device_id das08_pci_table[] = {
- { PCI_DEVICE(PCI_VENDOR_ID_CB, 0x0029) },
- { 0 }
-};
-MODULE_DEVICE_TABLE(pci, das08_pci_table);
-
-static struct pci_driver das08_pci_driver = {
- .name = "pci-das08",
- .id_table = das08_pci_table,
- .probe = das08_pci_probe,
- .remove = comedi_pci_auto_unconfig,
-};
-module_comedi_pci_driver(das08_pci_comedi_driver, das08_pci_driver);
-
-MODULE_AUTHOR("Comedi http://www.comedi.org");
-MODULE_DESCRIPTION("Comedi low-level driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/das16.c b/drivers/staging/comedi/drivers/das16.c
deleted file mode 100644
index 056bca9c67d5..000000000000
--- a/drivers/staging/comedi/drivers/das16.c
+++ /dev/null
@@ -1,1206 +0,0 @@
-/*
- * das16.c
- * DAS16 driver
- *
- * COMEDI - Linux Control and Measurement Device Interface
- * Copyright (C) 2000 David A. Schleef <ds@schleef.org>
- * Copyright (C) 2000 Chris R. Baugher <baugher@enteract.com>
- * Copyright (C) 2001,2002 Frank Mori Hess <fmhess@users.sourceforge.net>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-/*
- * Driver: das16
- * Description: DAS16 compatible boards
- * Author: Sam Moore, Warren Jasper, ds, Chris Baugher, Frank Hess, Roman Fietze
- * Devices: [Keithley Metrabyte] DAS-16 (das-16), DAS-16G (das-16g),
- * DAS-16F (das-16f), DAS-1201 (das-1201), DAS-1202 (das-1202),
- * DAS-1401 (das-1401), DAS-1402 (das-1402), DAS-1601 (das-1601),
- * DAS-1602 (das-1602),
- * [ComputerBoards] PC104-DAS16/JR (pc104-das16jr),
- * PC104-DAS16JR/16 (pc104-das16jr/16), CIO-DAS16 (cio-das16),
- * CIO-DAS16F (cio-das16/f), CIO-DAS16/JR (cio-das16/jr),
- * CIO-DAS16JR/16 (cio-das16jr/16), CIO-DAS1401/12 (cio-das1401/12),
- * CIO-DAS1402/12 (cio-das1402/12), CIO-DAS1402/16 (cio-das1402/16),
- * CIO-DAS1601/12 (cio-das1601/12), CIO-DAS1602/12 (cio-das1602/12),
- * CIO-DAS1602/16 (cio-das1602/16), CIO-DAS16/330 (cio-das16/330)
- * Status: works
- * Updated: 2003-10-12
- *
- * A rewrite of the das16 and das1600 drivers.
- *
- * Options:
- * [0] - base io address
- * [1] - irq (does nothing, irq is not used anymore)
- * [2] - dma channel (optional, required for comedi_command support)
- * [3] - master clock speed in MHz (optional, 1 or 10, ignored if
- * board can probe clock, defaults to 1)
- * [4] - analog input range lowest voltage in microvolts (optional,
- * only useful if your board does not have software
- * programmable gain)
- * [5] - analog input range highest voltage in microvolts (optional,
- * only useful if board does not have software programmable
- * gain)
- * [6] - analog output range lowest voltage in microvolts (optional)
- * [7] - analog output range highest voltage in microvolts (optional)
- *
- * Passing a zero for an option is the same as leaving it unspecified.
- */
-
-/*
- * Testing and debugging help provided by Daniel Koch.
- *
- * Keithley Manuals:
- * 2309.PDF (das16)
- * 4919.PDF (das1400, 1600)
- * 4922.PDF (das-1400)
- * 4923.PDF (das1200, 1400, 1600)
- *
- * Computer boards manuals also available from their website
- * www.measurementcomputing.com
- */
-
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/interrupt.h>
-
-#include "../comedidev.h"
-
-#include "comedi_isadma.h"
-#include "comedi_8254.h"
-#include "8255.h"
-
-#define DAS16_DMA_SIZE 0xff00 /* size in bytes of allocated dma buffer */
-
-/*
- * Register I/O map
- */
-#define DAS16_TRIG_REG 0x00
-#define DAS16_AI_LSB_REG 0x00
-#define DAS16_AI_MSB_REG 0x01
-#define DAS16_MUX_REG 0x02
-#define DAS16_DIO_REG 0x03
-#define DAS16_AO_LSB_REG(x) ((x) ? 0x06 : 0x04)
-#define DAS16_AO_MSB_REG(x) ((x) ? 0x07 : 0x05)
-#define DAS16_STATUS_REG 0x08
-#define DAS16_STATUS_BUSY (1 << 7)
-#define DAS16_STATUS_UNIPOLAR (1 << 6)
-#define DAS16_STATUS_MUXBIT (1 << 5)
-#define DAS16_STATUS_INT (1 << 4)
-#define DAS16_CTRL_REG 0x09
-#define DAS16_CTRL_INTE (1 << 7)
-#define DAS16_CTRL_IRQ(x) (((x) & 0x7) << 4)
-#define DAS16_CTRL_DMAE (1 << 2)
-#define DAS16_CTRL_PACING_MASK (3 << 0)
-#define DAS16_CTRL_INT_PACER (3 << 0)
-#define DAS16_CTRL_EXT_PACER (2 << 0)
-#define DAS16_CTRL_SOFT_PACER (0 << 0)
-#define DAS16_PACER_REG 0x0a
-#define DAS16_PACER_BURST_LEN(x) (((x) & 0xf) << 4)
-#define DAS16_PACER_CTR0 (1 << 1)
-#define DAS16_PACER_TRIG0 (1 << 0)
-#define DAS16_GAIN_REG 0x0b
-#define DAS16_TIMER_BASE_REG 0x0c /* to 0x0f */
-
-#define DAS1600_CONV_REG 0x404
-#define DAS1600_CONV_DISABLE (1 << 6)
-#define DAS1600_BURST_REG 0x405
-#define DAS1600_BURST_VAL (1 << 6)
-#define DAS1600_ENABLE_REG 0x406
-#define DAS1600_ENABLE_VAL (1 << 6)
-#define DAS1600_STATUS_REG 0x407
-#define DAS1600_STATUS_BME (1 << 6)
-#define DAS1600_STATUS_ME (1 << 5)
-#define DAS1600_STATUS_CD (1 << 4)
-#define DAS1600_STATUS_WS (1 << 1)
-#define DAS1600_STATUS_CLK_10MHZ (1 << 0)
-
-static const struct comedi_lrange range_das1x01_bip = {
- 4, {
- BIP_RANGE(10),
- BIP_RANGE(1),
- BIP_RANGE(0.1),
- BIP_RANGE(0.01)
- }
-};
-
-static const struct comedi_lrange range_das1x01_unip = {
- 4, {
- UNI_RANGE(10),
- UNI_RANGE(1),
- UNI_RANGE(0.1),
- UNI_RANGE(0.01)
- }
-};
-
-static const struct comedi_lrange range_das1x02_bip = {
- 4, {
- BIP_RANGE(10),
- BIP_RANGE(5),
- BIP_RANGE(2.5),
- BIP_RANGE(1.25)
- }
-};
-
-static const struct comedi_lrange range_das1x02_unip = {
- 4, {
- UNI_RANGE(10),
- UNI_RANGE(5),
- UNI_RANGE(2.5),
- UNI_RANGE(1.25)
- }
-};
-
-static const struct comedi_lrange range_das16jr = {
- 9, {
- BIP_RANGE(10),
- BIP_RANGE(5),
- BIP_RANGE(2.5),
- BIP_RANGE(1.25),
- BIP_RANGE(0.625),
- UNI_RANGE(10),
- UNI_RANGE(5),
- UNI_RANGE(2.5),
- UNI_RANGE(1.25)
- }
-};
-
-static const struct comedi_lrange range_das16jr_16 = {
- 8, {
- BIP_RANGE(10),
- BIP_RANGE(5),
- BIP_RANGE(2.5),
- BIP_RANGE(1.25),
- UNI_RANGE(10),
- UNI_RANGE(5),
- UNI_RANGE(2.5),
- UNI_RANGE(1.25)
- }
-};
-
-static const int das16jr_gainlist[] = { 8, 0, 1, 2, 3, 4, 5, 6, 7 };
-static const int das16jr_16_gainlist[] = { 0, 1, 2, 3, 4, 5, 6, 7 };
-static const int das1600_gainlist[] = { 0, 1, 2, 3 };
-
-enum {
- das16_pg_none = 0,
- das16_pg_16jr,
- das16_pg_16jr_16,
- das16_pg_1601,
- das16_pg_1602,
-};
-static const int *const das16_gainlists[] = {
- NULL,
- das16jr_gainlist,
- das16jr_16_gainlist,
- das1600_gainlist,
- das1600_gainlist,
-};
-
-static const struct comedi_lrange *const das16_ai_uni_lranges[] = {
- &range_unknown,
- &range_das16jr,
- &range_das16jr_16,
- &range_das1x01_unip,
- &range_das1x02_unip,
-};
-
-static const struct comedi_lrange *const das16_ai_bip_lranges[] = {
- &range_unknown,
- &range_das16jr,
- &range_das16jr_16,
- &range_das1x01_bip,
- &range_das1x02_bip,
-};
-
-struct das16_board {
- const char *name;
- unsigned int ai_maxdata;
- unsigned int ai_speed; /* max conversion speed in nanosec */
- unsigned int ai_pg;
- unsigned int has_ao:1;
- unsigned int has_8255:1;
-
- unsigned int i8255_offset;
-
- unsigned int size;
- unsigned int id;
-};
-
-static const struct das16_board das16_boards[] = {
- {
- .name = "das-16",
- .ai_maxdata = 0x0fff,
- .ai_speed = 15000,
- .ai_pg = das16_pg_none,
- .has_ao = 1,
- .has_8255 = 1,
- .i8255_offset = 0x10,
- .size = 0x14,
- .id = 0x00,
- }, {
- .name = "das-16g",
- .ai_maxdata = 0x0fff,
- .ai_speed = 15000,
- .ai_pg = das16_pg_none,
- .has_ao = 1,
- .has_8255 = 1,
- .i8255_offset = 0x10,
- .size = 0x14,
- .id = 0x00,
- }, {
- .name = "das-16f",
- .ai_maxdata = 0x0fff,
- .ai_speed = 8500,
- .ai_pg = das16_pg_none,
- .has_ao = 1,
- .has_8255 = 1,
- .i8255_offset = 0x10,
- .size = 0x14,
- .id = 0x00,
- }, {
- .name = "cio-das16",
- .ai_maxdata = 0x0fff,
- .ai_speed = 20000,
- .ai_pg = das16_pg_none,
- .has_ao = 1,
- .has_8255 = 1,
- .i8255_offset = 0x10,
- .size = 0x14,
- .id = 0x80,
- }, {
- .name = "cio-das16/f",
- .ai_maxdata = 0x0fff,
- .ai_speed = 10000,
- .ai_pg = das16_pg_none,
- .has_ao = 1,
- .has_8255 = 1,
- .i8255_offset = 0x10,
- .size = 0x14,
- .id = 0x80,
- }, {
- .name = "cio-das16/jr",
- .ai_maxdata = 0x0fff,
- .ai_speed = 7692,
- .ai_pg = das16_pg_16jr,
- .size = 0x10,
- .id = 0x00,
- }, {
- .name = "pc104-das16jr",
- .ai_maxdata = 0x0fff,
- .ai_speed = 3300,
- .ai_pg = das16_pg_16jr,
- .size = 0x10,
- .id = 0x00,
- }, {
- .name = "cio-das16jr/16",
- .ai_maxdata = 0xffff,
- .ai_speed = 10000,
- .ai_pg = das16_pg_16jr_16,
- .size = 0x10,
- .id = 0x00,
- }, {
- .name = "pc104-das16jr/16",
- .ai_maxdata = 0xffff,
- .ai_speed = 10000,
- .ai_pg = das16_pg_16jr_16,
- .size = 0x10,
- .id = 0x00,
- }, {
- .name = "das-1201",
- .ai_maxdata = 0x0fff,
- .ai_speed = 20000,
- .ai_pg = das16_pg_none,
- .has_8255 = 1,
- .i8255_offset = 0x400,
- .size = 0x408,
- .id = 0x20,
- }, {
- .name = "das-1202",
- .ai_maxdata = 0x0fff,
- .ai_speed = 10000,
- .ai_pg = das16_pg_none,
- .has_8255 = 1,
- .i8255_offset = 0x400,
- .size = 0x408,
- .id = 0x20,
- }, {
- .name = "das-1401",
- .ai_maxdata = 0x0fff,
- .ai_speed = 10000,
- .ai_pg = das16_pg_1601,
- .size = 0x408,
- .id = 0xc0,
- }, {
- .name = "das-1402",
- .ai_maxdata = 0x0fff,
- .ai_speed = 10000,
- .ai_pg = das16_pg_1602,
- .size = 0x408,
- .id = 0xc0,
- }, {
- .name = "das-1601",
- .ai_maxdata = 0x0fff,
- .ai_speed = 10000,
- .ai_pg = das16_pg_1601,
- .has_ao = 1,
- .has_8255 = 1,
- .i8255_offset = 0x400,
- .size = 0x408,
- .id = 0xc0,
- }, {
- .name = "das-1602",
- .ai_maxdata = 0x0fff,
- .ai_speed = 10000,
- .ai_pg = das16_pg_1602,
- .has_ao = 1,
- .has_8255 = 1,
- .i8255_offset = 0x400,
- .size = 0x408,
- .id = 0xc0,
- }, {
- .name = "cio-das1401/12",
- .ai_maxdata = 0x0fff,
- .ai_speed = 6250,
- .ai_pg = das16_pg_1601,
- .size = 0x408,
- .id = 0xc0,
- }, {
- .name = "cio-das1402/12",
- .ai_maxdata = 0x0fff,
- .ai_speed = 6250,
- .ai_pg = das16_pg_1602,
- .size = 0x408,
- .id = 0xc0,
- }, {
- .name = "cio-das1402/16",
- .ai_maxdata = 0xffff,
- .ai_speed = 10000,
- .ai_pg = das16_pg_1602,
- .size = 0x408,
- .id = 0xc0,
- }, {
- .name = "cio-das1601/12",
- .ai_maxdata = 0x0fff,
- .ai_speed = 6250,
- .ai_pg = das16_pg_1601,
- .has_ao = 1,
- .has_8255 = 1,
- .i8255_offset = 0x400,
- .size = 0x408,
- .id = 0xc0,
- }, {
- .name = "cio-das1602/12",
- .ai_maxdata = 0x0fff,
- .ai_speed = 10000,
- .ai_pg = das16_pg_1602,
- .has_ao = 1,
- .has_8255 = 1,
- .i8255_offset = 0x400,
- .size = 0x408,
- .id = 0xc0,
- }, {
- .name = "cio-das1602/16",
- .ai_maxdata = 0xffff,
- .ai_speed = 10000,
- .ai_pg = das16_pg_1602,
- .has_ao = 1,
- .has_8255 = 1,
- .i8255_offset = 0x400,
- .size = 0x408,
- .id = 0xc0,
- }, {
- .name = "cio-das16/330",
- .ai_maxdata = 0x0fff,
- .ai_speed = 3030,
- .ai_pg = das16_pg_16jr,
- .size = 0x14,
- .id = 0xf0,
- },
-};
-
-/* Period for timer interrupt in jiffies. It's a function
- * to deal with possibility of dynamic HZ patches */
-static inline int timer_period(void)
-{
- return HZ / 20;
-}
-
-struct das16_private_struct {
- struct comedi_isadma *dma;
- unsigned int clockbase;
- unsigned int ctrl_reg;
- unsigned int divisor1;
- unsigned int divisor2;
- struct timer_list timer;
- unsigned long extra_iobase;
- unsigned int can_burst:1;
- unsigned int timer_running:1;
-};
-
-static void das16_ai_setup_dma(struct comedi_device *dev,
- struct comedi_subdevice *s,
- unsigned int unread_samples)
-{
- struct das16_private_struct *devpriv = dev->private;
- struct comedi_isadma *dma = devpriv->dma;
- struct comedi_isadma_desc *desc = &dma->desc[dma->cur_dma];
- unsigned int max_samples = comedi_bytes_to_samples(s, desc->maxsize);
- unsigned int nsamples;
-
- /*
- * Determine dma size based on the buffer size plus the number of
- * unread samples and the number of samples remaining in the command.
- */
- nsamples = comedi_nsamples_left(s, max_samples + unread_samples);
- if (nsamples > unread_samples) {
- nsamples -= unread_samples;
- desc->size = comedi_samples_to_bytes(s, nsamples);
- comedi_isadma_program(desc);
- }
-}
-
-static void das16_interrupt(struct comedi_device *dev)
-{
- struct das16_private_struct *devpriv = dev->private;
- struct comedi_subdevice *s = dev->read_subdev;
- struct comedi_async *async = s->async;
- struct comedi_cmd *cmd = &async->cmd;
- struct comedi_isadma *dma = devpriv->dma;
- struct comedi_isadma_desc *desc = &dma->desc[dma->cur_dma];
- unsigned long spin_flags;
- unsigned int residue;
- unsigned int nbytes;
- unsigned int nsamples;
-
- spin_lock_irqsave(&dev->spinlock, spin_flags);
- if (!(devpriv->ctrl_reg & DAS16_CTRL_DMAE)) {
- spin_unlock_irqrestore(&dev->spinlock, spin_flags);
- return;
- }
-
- /*
- * The pc104-das16jr (at least) has problems if the dma
- * transfer is interrupted in the middle of transferring
- * a 16 bit sample.
- */
- residue = comedi_isadma_disable_on_sample(desc->chan,
- comedi_bytes_per_sample(s));
-
- /* figure out how many samples to read */
- if (residue > desc->size) {
- dev_err(dev->class_dev, "residue > transfer size!\n");
- async->events |= COMEDI_CB_ERROR;
- nbytes = 0;
- } else {
- nbytes = desc->size - residue;
- }
- nsamples = comedi_bytes_to_samples(s, nbytes);
-
- /* restart DMA if more samples are needed */
- if (nsamples) {
- dma->cur_dma = 1 - dma->cur_dma;
- das16_ai_setup_dma(dev, s, nsamples);
- }
-
- spin_unlock_irqrestore(&dev->spinlock, spin_flags);
-
- comedi_buf_write_samples(s, desc->virt_addr, nsamples);
-
- if (cmd->stop_src == TRIG_COUNT && async->scans_done >= cmd->stop_arg)
- async->events |= COMEDI_CB_EOA;
-
- comedi_handle_events(dev, s);
-}
-
-static void das16_timer_interrupt(unsigned long arg)
-{
- struct comedi_device *dev = (struct comedi_device *)arg;
- struct das16_private_struct *devpriv = dev->private;
- unsigned long flags;
-
- das16_interrupt(dev);
-
- spin_lock_irqsave(&dev->spinlock, flags);
- if (devpriv->timer_running)
- mod_timer(&devpriv->timer, jiffies + timer_period());
- spin_unlock_irqrestore(&dev->spinlock, flags);
-}
-
-static void das16_ai_set_mux_range(struct comedi_device *dev,
- unsigned int first_chan,
- unsigned int last_chan,
- unsigned int range)
-{
- const struct das16_board *board = dev->board_ptr;
-
- /* set multiplexer */
- outb(first_chan | (last_chan << 4), dev->iobase + DAS16_MUX_REG);
-
- /* some boards do not have programmable gain */
- if (board->ai_pg == das16_pg_none)
- return;
-
- /*
- * Set gain (this is also burst rate register but according to
- * computer boards manual, burst rate does nothing, even on
- * keithley cards).
- */
- outb((das16_gainlists[board->ai_pg])[range],
- dev->iobase + DAS16_GAIN_REG);
-}
-
-static int das16_ai_check_chanlist(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_cmd *cmd)
-{
- unsigned int chan0 = CR_CHAN(cmd->chanlist[0]);
- unsigned int range0 = CR_RANGE(cmd->chanlist[0]);
- int i;
-
- for (i = 1; i < cmd->chanlist_len; i++) {
- unsigned int chan = CR_CHAN(cmd->chanlist[i]);
- unsigned int range = CR_RANGE(cmd->chanlist[i]);
-
- if (chan != ((chan0 + i) % s->n_chan)) {
- dev_dbg(dev->class_dev,
- "entries in chanlist must be consecutive channels, counting upwards\n");
- return -EINVAL;
- }
-
- if (range != range0) {
- dev_dbg(dev->class_dev,
- "entries in chanlist must all have the same gain\n");
- return -EINVAL;
- }
- }
-
- return 0;
-}
-
-static int das16_cmd_test(struct comedi_device *dev, struct comedi_subdevice *s,
- struct comedi_cmd *cmd)
-{
- const struct das16_board *board = dev->board_ptr;
- struct das16_private_struct *devpriv = dev->private;
- int err = 0;
- unsigned int trig_mask;
- unsigned int arg;
-
- /* Step 1 : check if triggers are trivially valid */
-
- err |= comedi_check_trigger_src(&cmd->start_src, TRIG_NOW);
-
- trig_mask = TRIG_FOLLOW;
- if (devpriv->can_burst)
- trig_mask |= TRIG_TIMER | TRIG_EXT;
- err |= comedi_check_trigger_src(&cmd->scan_begin_src, trig_mask);
-
- trig_mask = TRIG_TIMER | TRIG_EXT;
- if (devpriv->can_burst)
- trig_mask |= TRIG_NOW;
- err |= comedi_check_trigger_src(&cmd->convert_src, trig_mask);
-
- err |= comedi_check_trigger_src(&cmd->scan_end_src, TRIG_COUNT);
- err |= comedi_check_trigger_src(&cmd->stop_src, TRIG_COUNT | TRIG_NONE);
-
- if (err)
- return 1;
-
- /* Step 2a : make sure trigger sources are unique */
-
- err |= comedi_check_trigger_is_unique(cmd->scan_begin_src);
- err |= comedi_check_trigger_is_unique(cmd->convert_src);
- err |= comedi_check_trigger_is_unique(cmd->stop_src);
-
- /* Step 2b : and mutually compatible */
-
- /* make sure scan_begin_src and convert_src dont conflict */
- if (cmd->scan_begin_src == TRIG_FOLLOW && cmd->convert_src == TRIG_NOW)
- err |= -EINVAL;
- if (cmd->scan_begin_src != TRIG_FOLLOW && cmd->convert_src != TRIG_NOW)
- err |= -EINVAL;
-
- if (err)
- return 2;
-
- /* Step 3: check if arguments are trivially valid */
-
- err |= comedi_check_trigger_arg_is(&cmd->start_arg, 0);
-
- if (cmd->scan_begin_src == TRIG_FOLLOW) /* internal trigger */
- err |= comedi_check_trigger_arg_is(&cmd->scan_begin_arg, 0);
-
- err |= comedi_check_trigger_arg_is(&cmd->scan_end_arg,
- cmd->chanlist_len);
-
- /* check against maximum frequency */
- if (cmd->scan_begin_src == TRIG_TIMER) {
- err |= comedi_check_trigger_arg_min(&cmd->scan_begin_arg,
- board->ai_speed *
- cmd->chanlist_len);
- }
-
- if (cmd->convert_src == TRIG_TIMER) {
- err |= comedi_check_trigger_arg_min(&cmd->convert_arg,
- board->ai_speed);
- }
-
- if (cmd->stop_src == TRIG_COUNT)
- err |= comedi_check_trigger_arg_min(&cmd->stop_arg, 1);
- else /* TRIG_NONE */
- err |= comedi_check_trigger_arg_is(&cmd->stop_arg, 0);
-
- if (err)
- return 3;
-
- /* step 4: fix up arguments */
- if (cmd->scan_begin_src == TRIG_TIMER) {
- arg = cmd->scan_begin_arg;
- comedi_8254_cascade_ns_to_timer(dev->pacer, &arg, cmd->flags);
- err |= comedi_check_trigger_arg_is(&cmd->scan_begin_arg, arg);
- }
- if (cmd->convert_src == TRIG_TIMER) {
- arg = cmd->convert_arg;
- comedi_8254_cascade_ns_to_timer(dev->pacer, &arg, cmd->flags);
- err |= comedi_check_trigger_arg_is(&cmd->convert_arg, arg);
- }
- if (err)
- return 4;
-
- /* Step 5: check channel list if it exists */
- if (cmd->chanlist && cmd->chanlist_len > 0)
- err |= das16_ai_check_chanlist(dev, s, cmd);
-
- if (err)
- return 5;
-
- return 0;
-}
-
-static unsigned int das16_set_pacer(struct comedi_device *dev, unsigned int ns,
- unsigned int flags)
-{
- comedi_8254_cascade_ns_to_timer(dev->pacer, &ns, flags);
- comedi_8254_update_divisors(dev->pacer);
- comedi_8254_pacer_enable(dev->pacer, 1, 2, true);
-
- return ns;
-}
-
-static int das16_cmd_exec(struct comedi_device *dev, struct comedi_subdevice *s)
-{
- struct das16_private_struct *devpriv = dev->private;
- struct comedi_isadma *dma = devpriv->dma;
- struct comedi_async *async = s->async;
- struct comedi_cmd *cmd = &async->cmd;
- unsigned int first_chan = CR_CHAN(cmd->chanlist[0]);
- unsigned int last_chan = CR_CHAN(cmd->chanlist[cmd->chanlist_len - 1]);
- unsigned int range = CR_RANGE(cmd->chanlist[0]);
- unsigned int byte;
- unsigned long flags;
-
- if (cmd->flags & CMDF_PRIORITY) {
- dev_err(dev->class_dev,
- "isa dma transfers cannot be performed with CMDF_PRIORITY, aborting\n");
- return -1;
- }
-
- if (devpriv->can_burst)
- outb(DAS1600_CONV_DISABLE, dev->iobase + DAS1600_CONV_REG);
-
- /* set mux and range for chanlist scan */
- das16_ai_set_mux_range(dev, first_chan, last_chan, range);
-
- /* set counter mode and counts */
- cmd->convert_arg = das16_set_pacer(dev, cmd->convert_arg, cmd->flags);
-
- /* enable counters */
- byte = 0;
- if (devpriv->can_burst) {
- if (cmd->convert_src == TRIG_NOW) {
- outb(DAS1600_BURST_VAL,
- dev->iobase + DAS1600_BURST_REG);
- /* set burst length */
- byte |= DAS16_PACER_BURST_LEN(cmd->chanlist_len - 1);
- } else {
- outb(0, dev->iobase + DAS1600_BURST_REG);
- }
- }
- outb(byte, dev->iobase + DAS16_PACER_REG);
-
- /* set up dma transfer */
- dma->cur_dma = 0;
- das16_ai_setup_dma(dev, s, 0);
-
- /* set up timer */
- spin_lock_irqsave(&dev->spinlock, flags);
- devpriv->timer_running = 1;
- devpriv->timer.expires = jiffies + timer_period();
- add_timer(&devpriv->timer);
-
- /* enable DMA interrupt with external or internal pacing */
- devpriv->ctrl_reg &= ~(DAS16_CTRL_INTE | DAS16_CTRL_PACING_MASK);
- devpriv->ctrl_reg |= DAS16_CTRL_DMAE;
- if (cmd->convert_src == TRIG_EXT)
- devpriv->ctrl_reg |= DAS16_CTRL_EXT_PACER;
- else
- devpriv->ctrl_reg |= DAS16_CTRL_INT_PACER;
- outb(devpriv->ctrl_reg, dev->iobase + DAS16_CTRL_REG);
-
- if (devpriv->can_burst)
- outb(0, dev->iobase + DAS1600_CONV_REG);
- spin_unlock_irqrestore(&dev->spinlock, flags);
-
- return 0;
-}
-
-static int das16_cancel(struct comedi_device *dev, struct comedi_subdevice *s)
-{
- struct das16_private_struct *devpriv = dev->private;
- struct comedi_isadma *dma = devpriv->dma;
- unsigned long flags;
-
- spin_lock_irqsave(&dev->spinlock, flags);
-
- /* disable interrupts, dma and pacer clocked conversions */
- devpriv->ctrl_reg &= ~(DAS16_CTRL_INTE | DAS16_CTRL_DMAE |
- DAS16_CTRL_PACING_MASK);
- outb(devpriv->ctrl_reg, dev->iobase + DAS16_CTRL_REG);
-
- comedi_isadma_disable(dma->chan);
-
- /* disable SW timer */
- if (devpriv->timer_running) {
- devpriv->timer_running = 0;
- del_timer(&devpriv->timer);
- }
-
- if (devpriv->can_burst)
- outb(0, dev->iobase + DAS1600_BURST_REG);
-
- spin_unlock_irqrestore(&dev->spinlock, flags);
-
- return 0;
-}
-
-static void das16_ai_munge(struct comedi_device *dev,
- struct comedi_subdevice *s, void *array,
- unsigned int num_bytes,
- unsigned int start_chan_index)
-{
- unsigned short *data = array;
- unsigned int num_samples = comedi_bytes_to_samples(s, num_bytes);
- unsigned int i;
-
- for (i = 0; i < num_samples; i++) {
- data[i] = le16_to_cpu(data[i]);
- if (s->maxdata == 0x0fff)
- data[i] >>= 4;
- data[i] &= s->maxdata;
- }
-}
-
-static int das16_ai_eoc(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned long context)
-{
- unsigned int status;
-
- status = inb(dev->iobase + DAS16_STATUS_REG);
- if ((status & DAS16_STATUS_BUSY) == 0)
- return 0;
- return -EBUSY;
-}
-
-static int das16_ai_insn_read(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- unsigned int chan = CR_CHAN(insn->chanspec);
- unsigned int range = CR_RANGE(insn->chanspec);
- unsigned int val;
- int ret;
- int i;
-
- /* set mux and range for single channel */
- das16_ai_set_mux_range(dev, chan, chan, range);
-
- for (i = 0; i < insn->n; i++) {
- /* trigger conversion */
- outb_p(0, dev->iobase + DAS16_TRIG_REG);
-
- ret = comedi_timeout(dev, s, insn, das16_ai_eoc, 0);
- if (ret)
- return ret;
-
- val = inb(dev->iobase + DAS16_AI_MSB_REG) << 8;
- val |= inb(dev->iobase + DAS16_AI_LSB_REG);
- if (s->maxdata == 0x0fff)
- val >>= 4;
- val &= s->maxdata;
-
- data[i] = val;
- }
-
- return insn->n;
-}
-
-static int das16_ao_insn_write(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- unsigned int chan = CR_CHAN(insn->chanspec);
- int i;
-
- for (i = 0; i < insn->n; i++) {
- unsigned int val = data[i];
-
- s->readback[chan] = val;
-
- val <<= 4;
-
- outb(val & 0xff, dev->iobase + DAS16_AO_LSB_REG(chan));
- outb((val >> 8) & 0xff, dev->iobase + DAS16_AO_MSB_REG(chan));
- }
-
- return insn->n;
-}
-
-static int das16_di_insn_bits(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- data[1] = inb(dev->iobase + DAS16_DIO_REG) & 0xf;
-
- return insn->n;
-}
-
-static int das16_do_insn_bits(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- if (comedi_dio_update_state(s, data))
- outb(s->state, dev->iobase + DAS16_DIO_REG);
-
- data[1] = s->state;
-
- return insn->n;
-}
-
-static int das16_probe(struct comedi_device *dev, struct comedi_devconfig *it)
-{
- const struct das16_board *board = dev->board_ptr;
- int diobits;
-
- /* diobits indicates boards */
- diobits = inb(dev->iobase + DAS16_DIO_REG) & 0xf0;
- if (board->id != diobits) {
- dev_err(dev->class_dev,
- "requested board's id bits are incorrect (0x%x != 0x%x)\n",
- board->id, diobits);
- return -EINVAL;
- }
-
- return 0;
-}
-
-static void das16_reset(struct comedi_device *dev)
-{
- outb(0, dev->iobase + DAS16_STATUS_REG);
- outb(0, dev->iobase + DAS16_CTRL_REG);
- outb(0, dev->iobase + DAS16_PACER_REG);
-}
-
-static void das16_alloc_dma(struct comedi_device *dev, unsigned int dma_chan)
-{
- struct das16_private_struct *devpriv = dev->private;
-
- /* only DMA channels 3 and 1 are valid */
- if (!(dma_chan == 1 || dma_chan == 3))
- return;
-
- /* DMA uses two buffers */
- devpriv->dma = comedi_isadma_alloc(dev, 2, dma_chan, dma_chan,
- DAS16_DMA_SIZE, COMEDI_ISADMA_READ);
- if (devpriv->dma) {
- setup_timer(&devpriv->timer, das16_timer_interrupt,
- (unsigned long)dev);
- }
-}
-
-static void das16_free_dma(struct comedi_device *dev)
-{
- struct das16_private_struct *devpriv = dev->private;
-
- if (devpriv) {
- if (devpriv->timer.data)
- del_timer_sync(&devpriv->timer);
- comedi_isadma_free(devpriv->dma);
- }
-}
-
-static const struct comedi_lrange *das16_ai_range(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_devconfig *it,
- unsigned int pg_type,
- unsigned int status)
-{
- unsigned int min = it->options[4];
- unsigned int max = it->options[5];
-
- /* get any user-defined input range */
- if (pg_type == das16_pg_none && (min || max)) {
- struct comedi_lrange *lrange;
- struct comedi_krange *krange;
-
- /* allocate single-range range table */
- lrange = comedi_alloc_spriv(s,
- sizeof(*lrange) + sizeof(*krange));
- if (!lrange)
- return &range_unknown;
-
- /* initialize ai range */
- lrange->length = 1;
- krange = lrange->range;
- krange->min = min;
- krange->max = max;
- krange->flags = UNIT_volt;
-
- return lrange;
- }
-
- /* use software programmable range */
- if (status & DAS16_STATUS_UNIPOLAR)
- return das16_ai_uni_lranges[pg_type];
- return das16_ai_bip_lranges[pg_type];
-}
-
-static const struct comedi_lrange *das16_ao_range(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_devconfig *it)
-{
- unsigned int min = it->options[6];
- unsigned int max = it->options[7];
-
- /* get any user-defined output range */
- if (min || max) {
- struct comedi_lrange *lrange;
- struct comedi_krange *krange;
-
- /* allocate single-range range table */
- lrange = comedi_alloc_spriv(s,
- sizeof(*lrange) + sizeof(*krange));
- if (!lrange)
- return &range_unknown;
-
- /* initialize ao range */
- lrange->length = 1;
- krange = lrange->range;
- krange->min = min;
- krange->max = max;
- krange->flags = UNIT_volt;
-
- return lrange;
- }
-
- return &range_unknown;
-}
-
-static int das16_attach(struct comedi_device *dev, struct comedi_devconfig *it)
-{
- const struct das16_board *board = dev->board_ptr;
- struct das16_private_struct *devpriv;
- struct comedi_subdevice *s;
- unsigned int osc_base;
- unsigned int status;
- int ret;
-
- /* check that clock setting is valid */
- if (it->options[3]) {
- if (it->options[3] != 1 && it->options[3] != 10) {
- dev_err(dev->class_dev,
- "Invalid option. Master clock must be set to 1 or 10 (MHz)\n");
- return -EINVAL;
- }
- }
-
- devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
- if (!devpriv)
- return -ENOMEM;
-
- if (board->size < 0x400) {
- ret = comedi_request_region(dev, it->options[0], board->size);
- if (ret)
- return ret;
- } else {
- ret = comedi_request_region(dev, it->options[0], 0x10);
- if (ret)
- return ret;
- /* Request an additional region for the 8255 */
- ret = __comedi_request_region(dev, dev->iobase + 0x400,
- board->size & 0x3ff);
- if (ret)
- return ret;
- devpriv->extra_iobase = dev->iobase + 0x400;
- devpriv->can_burst = 1;
- }
-
- /* probe id bits to make sure they are consistent */
- if (das16_probe(dev, it))
- return -EINVAL;
-
- /* get master clock speed */
- osc_base = I8254_OSC_BASE_1MHZ;
- if (devpriv->can_burst) {
- status = inb(dev->iobase + DAS1600_STATUS_REG);
- if (status & DAS1600_STATUS_CLK_10MHZ)
- osc_base = I8254_OSC_BASE_10MHZ;
- } else {
- if (it->options[3])
- osc_base = I8254_OSC_BASE_1MHZ / it->options[3];
- }
-
- dev->pacer = comedi_8254_init(dev->iobase + DAS16_TIMER_BASE_REG,
- osc_base, I8254_IO8, 0);
- if (!dev->pacer)
- return -ENOMEM;
-
- das16_alloc_dma(dev, it->options[2]);
-
- ret = comedi_alloc_subdevices(dev, 4 + board->has_8255);
- if (ret)
- return ret;
-
- status = inb(dev->iobase + DAS16_STATUS_REG);
-
- /* Analog Input subdevice */
- s = &dev->subdevices[0];
- s->type = COMEDI_SUBD_AI;
- s->subdev_flags = SDF_READABLE;
- if (status & DAS16_STATUS_MUXBIT) {
- s->subdev_flags |= SDF_GROUND;
- s->n_chan = 16;
- } else {
- s->subdev_flags |= SDF_DIFF;
- s->n_chan = 8;
- }
- s->len_chanlist = s->n_chan;
- s->maxdata = board->ai_maxdata;
- s->range_table = das16_ai_range(dev, s, it, board->ai_pg, status);
- s->insn_read = das16_ai_insn_read;
- if (devpriv->dma) {
- dev->read_subdev = s;
- s->subdev_flags |= SDF_CMD_READ;
- s->do_cmdtest = das16_cmd_test;
- s->do_cmd = das16_cmd_exec;
- s->cancel = das16_cancel;
- s->munge = das16_ai_munge;
- }
-
- /* Analog Output subdevice */
- s = &dev->subdevices[1];
- if (board->has_ao) {
- s->type = COMEDI_SUBD_AO;
- s->subdev_flags = SDF_WRITABLE;
- s->n_chan = 2;
- s->maxdata = 0x0fff;
- s->range_table = das16_ao_range(dev, s, it);
- s->insn_write = das16_ao_insn_write;
-
- ret = comedi_alloc_subdev_readback(s);
- if (ret)
- return ret;
- } else {
- s->type = COMEDI_SUBD_UNUSED;
- }
-
- /* Digital Input subdevice */
- s = &dev->subdevices[2];
- s->type = COMEDI_SUBD_DI;
- s->subdev_flags = SDF_READABLE;
- s->n_chan = 4;
- s->maxdata = 1;
- s->range_table = &range_digital;
- s->insn_bits = das16_di_insn_bits;
-
- /* Digital Output subdevice */
- s = &dev->subdevices[3];
- s->type = COMEDI_SUBD_DO;
- s->subdev_flags = SDF_WRITABLE;
- s->n_chan = 4;
- s->maxdata = 1;
- s->range_table = &range_digital;
- s->insn_bits = das16_do_insn_bits;
-
- /* initialize digital output lines */
- outb(s->state, dev->iobase + DAS16_DIO_REG);
-
- /* 8255 Digital I/O subdevice */
- if (board->has_8255) {
- s = &dev->subdevices[4];
- ret = subdev_8255_init(dev, s, NULL, board->i8255_offset);
- if (ret)
- return ret;
- }
-
- das16_reset(dev);
- /* set the interrupt level */
- devpriv->ctrl_reg = DAS16_CTRL_IRQ(dev->irq);
- outb(devpriv->ctrl_reg, dev->iobase + DAS16_CTRL_REG);
-
- if (devpriv->can_burst) {
- outb(DAS1600_ENABLE_VAL, dev->iobase + DAS1600_ENABLE_REG);
- outb(0, dev->iobase + DAS1600_CONV_REG);
- outb(0, dev->iobase + DAS1600_BURST_REG);
- }
-
- return 0;
-}
-
-static void das16_detach(struct comedi_device *dev)
-{
- const struct das16_board *board = dev->board_ptr;
- struct das16_private_struct *devpriv = dev->private;
-
- if (devpriv) {
- if (dev->iobase)
- das16_reset(dev);
- das16_free_dma(dev);
-
- if (devpriv->extra_iobase)
- release_region(devpriv->extra_iobase,
- board->size & 0x3ff);
- }
-
- comedi_legacy_detach(dev);
-}
-
-static struct comedi_driver das16_driver = {
- .driver_name = "das16",
- .module = THIS_MODULE,
- .attach = das16_attach,
- .detach = das16_detach,
- .board_name = &das16_boards[0].name,
- .num_names = ARRAY_SIZE(das16_boards),
- .offset = sizeof(das16_boards[0]),
-};
-module_comedi_driver(das16_driver);
-
-MODULE_AUTHOR("Comedi http://www.comedi.org");
-MODULE_DESCRIPTION("Comedi driver for DAS16 compatible boards");
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/das16m1.c b/drivers/staging/comedi/drivers/das16m1.c
deleted file mode 100644
index 3a37373fbb6f..000000000000
--- a/drivers/staging/comedi/drivers/das16m1.c
+++ /dev/null
@@ -1,647 +0,0 @@
-/*
- comedi/drivers/das16m1.c
- CIO-DAS16/M1 driver
- Author: Frank Mori Hess, based on code from the das16
- driver.
- Copyright (C) 2001 Frank Mori Hess <fmhess@users.sourceforge.net>
-
- COMEDI - Linux Control and Measurement Device Interface
- Copyright (C) 2000 David A. Schleef <ds@schleef.org>
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-*/
-/*
-Driver: das16m1
-Description: CIO-DAS16/M1
-Author: Frank Mori Hess <fmhess@users.sourceforge.net>
-Devices: [Measurement Computing] CIO-DAS16/M1 (das16m1)
-Status: works
-
-This driver supports a single board - the CIO-DAS16/M1.
-As far as I know, there are no other boards that have
-the same register layout. Even the CIO-DAS16/M1/16 is
-significantly different.
-
-I was _barely_ able to reach the full 1 MHz capability
-of this board, using a hard real-time interrupt
-(set the TRIG_RT flag in your struct comedi_cmd and use
-rtlinux or RTAI). The board can't do dma, so the bottleneck is
-pulling the data across the ISA bus. I timed the interrupt
-handler, and it took my computer ~470 microseconds to pull 512
-samples from the board. So at 1 Mhz sampling rate,
-expect your CPU to be spending almost all of its
-time in the interrupt handler.
-
-This board has some unusual restrictions for its channel/gain list. If the
-list has 2 or more channels in it, then two conditions must be satisfied:
-(1) - even/odd channels must appear at even/odd indices in the list
-(2) - the list must have an even number of entries.
-
-Options:
- [0] - base io address
- [1] - irq (optional, but you probably want it)
-
-irq can be omitted, although the cmd interface will not work without it.
-*/
-
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/interrupt.h>
-#include "../comedidev.h"
-
-#include "8255.h"
-#include "comedi_8254.h"
-
-#define DAS16M1_SIZE2 8
-
-#define FIFO_SIZE 1024 /* 1024 sample fifo */
-
-/*
- CIO-DAS16_M1.pdf
-
- "cio-das16/m1"
-
- 0 a/d bits 0-3, mux start 12 bit
- 1 a/d bits 4-11 unused
- 2 status control
- 3 di 4 bit do 4 bit
- 4 unused clear interrupt
- 5 interrupt, pacer
- 6 channel/gain queue address
- 7 channel/gain queue data
- 89ab 8254
- cdef 8254
- 400 8255
- 404-407 8254
-
-*/
-
-#define DAS16M1_AI 0 /* 16-bit wide register */
-#define AI_CHAN(x) ((x) & 0xf)
-#define DAS16M1_CS 2
-#define EXT_TRIG_BIT 0x1
-#define OVRUN 0x20
-#define IRQDATA 0x80
-#define DAS16M1_DIO 3
-#define DAS16M1_CLEAR_INTR 4
-#define DAS16M1_INTR_CONTROL 5
-#define EXT_PACER 0x2
-#define INT_PACER 0x3
-#define PACER_MASK 0x3
-#define INTE 0x80
-#define DAS16M1_QUEUE_ADDR 6
-#define DAS16M1_QUEUE_DATA 7
-#define Q_CHAN(x) ((x) & 0x7)
-#define Q_RANGE(x) (((x) & 0xf) << 4)
-#define UNIPOLAR 0x40
-#define DAS16M1_8254_FIRST 0x8
-#define DAS16M1_8254_SECOND 0xc
-#define DAS16M1_82C55 0x400
-#define DAS16M1_8254_THIRD 0x404
-
-static const struct comedi_lrange range_das16m1 = {
- 9, {
- BIP_RANGE(5),
- BIP_RANGE(2.5),
- BIP_RANGE(1.25),
- BIP_RANGE(0.625),
- UNI_RANGE(10),
- UNI_RANGE(5),
- UNI_RANGE(2.5),
- UNI_RANGE(1.25),
- BIP_RANGE(10)
- }
-};
-
-struct das16m1_private_struct {
- struct comedi_8254 *counter;
- unsigned int control_state;
- unsigned int adc_count; /* number of samples completed */
- /* initial value in lower half of hardware conversion counter,
- * needed to keep track of whether new count has been loaded into
- * counter yet (loaded by first sample conversion) */
- u16 initial_hw_count;
- unsigned short ai_buffer[FIFO_SIZE];
- unsigned long extra_iobase;
-};
-
-static inline unsigned short munge_sample(unsigned short data)
-{
- return (data >> 4) & 0xfff;
-}
-
-static void munge_sample_array(unsigned short *array, unsigned int num_elements)
-{
- unsigned int i;
-
- for (i = 0; i < num_elements; i++)
- array[i] = munge_sample(array[i]);
-}
-
-static int das16m1_ai_check_chanlist(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_cmd *cmd)
-{
- int i;
-
- if (cmd->chanlist_len == 1)
- return 0;
-
- if ((cmd->chanlist_len % 2) != 0) {
- dev_dbg(dev->class_dev,
- "chanlist must be of even length or length 1\n");
- return -EINVAL;
- }
-
- for (i = 0; i < cmd->chanlist_len; i++) {
- unsigned int chan = CR_CHAN(cmd->chanlist[i]);
-
- if ((i % 2) != (chan % 2)) {
- dev_dbg(dev->class_dev,
- "even/odd channels must go have even/odd chanlist indices\n");
- return -EINVAL;
- }
- }
-
- return 0;
-}
-
-static int das16m1_cmd_test(struct comedi_device *dev,
- struct comedi_subdevice *s, struct comedi_cmd *cmd)
-{
- int err = 0;
-
- /* Step 1 : check if triggers are trivially valid */
-
- err |= comedi_check_trigger_src(&cmd->start_src, TRIG_NOW | TRIG_EXT);
- err |= comedi_check_trigger_src(&cmd->scan_begin_src, TRIG_FOLLOW);
- err |= comedi_check_trigger_src(&cmd->convert_src,
- TRIG_TIMER | TRIG_EXT);
- err |= comedi_check_trigger_src(&cmd->scan_end_src, TRIG_COUNT);
- err |= comedi_check_trigger_src(&cmd->stop_src, TRIG_COUNT | TRIG_NONE);
-
- if (err)
- return 1;
-
- /* Step 2a : make sure trigger sources are unique */
-
- err |= comedi_check_trigger_is_unique(cmd->start_src);
- err |= comedi_check_trigger_is_unique(cmd->convert_src);
- err |= comedi_check_trigger_is_unique(cmd->stop_src);
-
- /* Step 2b : and mutually compatible */
-
- if (err)
- return 2;
-
- /* Step 3: check if arguments are trivially valid */
-
- err |= comedi_check_trigger_arg_is(&cmd->start_arg, 0);
-
- if (cmd->scan_begin_src == TRIG_FOLLOW) /* internal trigger */
- err |= comedi_check_trigger_arg_is(&cmd->scan_begin_arg, 0);
-
- if (cmd->convert_src == TRIG_TIMER)
- err |= comedi_check_trigger_arg_min(&cmd->convert_arg, 1000);
-
- err |= comedi_check_trigger_arg_is(&cmd->scan_end_arg,
- cmd->chanlist_len);
-
- if (cmd->stop_src == TRIG_COUNT)
- err |= comedi_check_trigger_arg_min(&cmd->stop_arg, 1);
- else /* TRIG_NONE */
- err |= comedi_check_trigger_arg_is(&cmd->stop_arg, 0);
-
- if (err)
- return 3;
-
- /* step 4: fix up arguments */
-
- if (cmd->convert_src == TRIG_TIMER) {
- unsigned int arg = cmd->convert_arg;
-
- comedi_8254_cascade_ns_to_timer(dev->pacer, &arg, cmd->flags);
- err |= comedi_check_trigger_arg_is(&cmd->convert_arg, arg);
- }
-
- if (err)
- return 4;
-
- /* Step 5: check channel list if it exists */
- if (cmd->chanlist && cmd->chanlist_len > 0)
- err |= das16m1_ai_check_chanlist(dev, s, cmd);
-
- if (err)
- return 5;
-
- return 0;
-}
-
-static int das16m1_cmd_exec(struct comedi_device *dev,
- struct comedi_subdevice *s)
-{
- struct das16m1_private_struct *devpriv = dev->private;
- struct comedi_async *async = s->async;
- struct comedi_cmd *cmd = &async->cmd;
- unsigned int byte, i;
-
- /* disable interrupts and internal pacer */
- devpriv->control_state &= ~INTE & ~PACER_MASK;
- outb(devpriv->control_state, dev->iobase + DAS16M1_INTR_CONTROL);
-
- /* set software count */
- devpriv->adc_count = 0;
-
- /*
- * Initialize lower half of hardware counter, used to determine how
- * many samples are in fifo. Value doesn't actually load into counter
- * until counter's next clock (the next a/d conversion).
- */
- comedi_8254_set_mode(devpriv->counter, 1, I8254_MODE2 | I8254_BINARY);
- comedi_8254_write(devpriv->counter, 1, 0);
-
- /*
- * Remember current reading of counter so we know when counter has
- * actually been loaded.
- */
- devpriv->initial_hw_count = comedi_8254_read(devpriv->counter, 1);
-
- /* setup channel/gain queue */
- for (i = 0; i < cmd->chanlist_len; i++) {
- outb(i, dev->iobase + DAS16M1_QUEUE_ADDR);
- byte =
- Q_CHAN(CR_CHAN(cmd->chanlist[i])) |
- Q_RANGE(CR_RANGE(cmd->chanlist[i]));
- outb(byte, dev->iobase + DAS16M1_QUEUE_DATA);
- }
-
- /* enable interrupts and set internal pacer counter mode and counts */
- devpriv->control_state &= ~PACER_MASK;
- if (cmd->convert_src == TRIG_TIMER) {
- comedi_8254_update_divisors(dev->pacer);
- comedi_8254_pacer_enable(dev->pacer, 1, 2, true);
- devpriv->control_state |= INT_PACER;
- } else { /* TRIG_EXT */
- devpriv->control_state |= EXT_PACER;
- }
-
- /* set control & status register */
- byte = 0;
- /* if we are using external start trigger (also board dislikes having
- * both start and conversion triggers external simultaneously) */
- if (cmd->start_src == TRIG_EXT && cmd->convert_src != TRIG_EXT)
- byte |= EXT_TRIG_BIT;
-
- outb(byte, dev->iobase + DAS16M1_CS);
- /* clear interrupt bit */
- outb(0, dev->iobase + DAS16M1_CLEAR_INTR);
-
- devpriv->control_state |= INTE;
- outb(devpriv->control_state, dev->iobase + DAS16M1_INTR_CONTROL);
-
- return 0;
-}
-
-static int das16m1_cancel(struct comedi_device *dev, struct comedi_subdevice *s)
-{
- struct das16m1_private_struct *devpriv = dev->private;
-
- devpriv->control_state &= ~INTE & ~PACER_MASK;
- outb(devpriv->control_state, dev->iobase + DAS16M1_INTR_CONTROL);
-
- return 0;
-}
-
-static int das16m1_ai_eoc(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned long context)
-{
- unsigned int status;
-
- status = inb(dev->iobase + DAS16M1_CS);
- if (status & IRQDATA)
- return 0;
- return -EBUSY;
-}
-
-static int das16m1_ai_rinsn(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
-{
- struct das16m1_private_struct *devpriv = dev->private;
- int ret;
- int n;
- int byte;
-
- /* disable interrupts and internal pacer */
- devpriv->control_state &= ~INTE & ~PACER_MASK;
- outb(devpriv->control_state, dev->iobase + DAS16M1_INTR_CONTROL);
-
- /* setup channel/gain queue */
- outb(0, dev->iobase + DAS16M1_QUEUE_ADDR);
- byte =
- Q_CHAN(CR_CHAN(insn->chanspec)) | Q_RANGE(CR_RANGE(insn->chanspec));
- outb(byte, dev->iobase + DAS16M1_QUEUE_DATA);
-
- for (n = 0; n < insn->n; n++) {
- /* clear IRQDATA bit */
- outb(0, dev->iobase + DAS16M1_CLEAR_INTR);
- /* trigger conversion */
- outb(0, dev->iobase);
-
- ret = comedi_timeout(dev, s, insn, das16m1_ai_eoc, 0);
- if (ret)
- return ret;
-
- data[n] = munge_sample(inw(dev->iobase));
- }
-
- return n;
-}
-
-static int das16m1_di_rbits(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
-{
- unsigned int bits;
-
- bits = inb(dev->iobase + DAS16M1_DIO) & 0xf;
- data[1] = bits;
- data[0] = 0;
-
- return insn->n;
-}
-
-static int das16m1_do_wbits(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- if (comedi_dio_update_state(s, data))
- outb(s->state, dev->iobase + DAS16M1_DIO);
-
- data[1] = s->state;
-
- return insn->n;
-}
-
-static void das16m1_handler(struct comedi_device *dev, unsigned int status)
-{
- struct das16m1_private_struct *devpriv = dev->private;
- struct comedi_subdevice *s;
- struct comedi_async *async;
- struct comedi_cmd *cmd;
- u16 num_samples;
- u16 hw_counter;
-
- s = dev->read_subdev;
- async = s->async;
- cmd = &async->cmd;
-
- /* figure out how many samples are in fifo */
- hw_counter = comedi_8254_read(devpriv->counter, 1);
- /* make sure hardware counter reading is not bogus due to initial value
- * not having been loaded yet */
- if (devpriv->adc_count == 0 &&
- hw_counter == devpriv->initial_hw_count) {
- num_samples = 0;
- } else {
- /* The calculation of num_samples looks odd, but it uses the
- * following facts. 16 bit hardware counter is initialized with
- * value of zero (which really means 0x1000). The counter
- * decrements by one on each conversion (when the counter
- * decrements from zero it goes to 0xffff). num_samples is a
- * 16 bit variable, so it will roll over in a similar fashion
- * to the hardware counter. Work it out, and this is what you
- * get. */
- num_samples = -hw_counter - devpriv->adc_count;
- }
- /* check if we only need some of the points */
- if (cmd->stop_src == TRIG_COUNT) {
- if (num_samples > cmd->stop_arg * cmd->chanlist_len)
- num_samples = cmd->stop_arg * cmd->chanlist_len;
- }
- /* make sure we dont try to get too many points if fifo has overrun */
- if (num_samples > FIFO_SIZE)
- num_samples = FIFO_SIZE;
- insw(dev->iobase, devpriv->ai_buffer, num_samples);
- munge_sample_array(devpriv->ai_buffer, num_samples);
- comedi_buf_write_samples(s, devpriv->ai_buffer, num_samples);
- devpriv->adc_count += num_samples;
-
- if (cmd->stop_src == TRIG_COUNT) {
- if (devpriv->adc_count >= cmd->stop_arg * cmd->chanlist_len) {
- /* end of acquisition */
- async->events |= COMEDI_CB_EOA;
- }
- }
-
- /* this probably won't catch overruns since the card doesn't generate
- * overrun interrupts, but we might as well try */
- if (status & OVRUN) {
- async->events |= COMEDI_CB_ERROR;
- dev_err(dev->class_dev, "fifo overflow\n");
- }
-
- comedi_handle_events(dev, s);
-}
-
-static int das16m1_poll(struct comedi_device *dev, struct comedi_subdevice *s)
-{
- unsigned long flags;
- unsigned int status;
-
- /* prevent race with interrupt handler */
- spin_lock_irqsave(&dev->spinlock, flags);
- status = inb(dev->iobase + DAS16M1_CS);
- das16m1_handler(dev, status);
- spin_unlock_irqrestore(&dev->spinlock, flags);
-
- return comedi_buf_n_bytes_ready(s);
-}
-
-static irqreturn_t das16m1_interrupt(int irq, void *d)
-{
- int status;
- struct comedi_device *dev = d;
-
- if (!dev->attached) {
- dev_err(dev->class_dev, "premature interrupt\n");
- return IRQ_HANDLED;
- }
- /* prevent race with comedi_poll() */
- spin_lock(&dev->spinlock);
-
- status = inb(dev->iobase + DAS16M1_CS);
-
- if ((status & (IRQDATA | OVRUN)) == 0) {
- dev_err(dev->class_dev, "spurious interrupt\n");
- spin_unlock(&dev->spinlock);
- return IRQ_NONE;
- }
-
- das16m1_handler(dev, status);
-
- /* clear interrupt */
- outb(0, dev->iobase + DAS16M1_CLEAR_INTR);
-
- spin_unlock(&dev->spinlock);
- return IRQ_HANDLED;
-}
-
-static int das16m1_irq_bits(unsigned int irq)
-{
- switch (irq) {
- case 10:
- return 0x0;
- case 11:
- return 0x1;
- case 12:
- return 0x2;
- case 15:
- return 0x3;
- case 2:
- return 0x4;
- case 3:
- return 0x5;
- case 5:
- return 0x6;
- case 7:
- return 0x7;
- default:
- return 0x0;
- }
-}
-
-/*
- * Options list:
- * 0 I/O base
- * 1 IRQ
- */
-static int das16m1_attach(struct comedi_device *dev,
- struct comedi_devconfig *it)
-{
- struct das16m1_private_struct *devpriv;
- struct comedi_subdevice *s;
- int ret;
-
- devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
- if (!devpriv)
- return -ENOMEM;
-
- ret = comedi_request_region(dev, it->options[0], 0x10);
- if (ret)
- return ret;
- /* Request an additional region for the 8255 */
- ret = __comedi_request_region(dev, dev->iobase + DAS16M1_82C55,
- DAS16M1_SIZE2);
- if (ret)
- return ret;
- devpriv->extra_iobase = dev->iobase + DAS16M1_82C55;
-
- /* only irqs 2, 3, 4, 5, 6, 7, 10, 11, 12, 14, and 15 are valid */
- if ((1 << it->options[1]) & 0xdcfc) {
- ret = request_irq(it->options[1], das16m1_interrupt, 0,
- dev->board_name, dev);
- if (ret == 0)
- dev->irq = it->options[1];
- }
-
- dev->pacer = comedi_8254_init(dev->iobase + DAS16M1_8254_SECOND,
- I8254_OSC_BASE_10MHZ, I8254_IO8, 0);
- if (!dev->pacer)
- return -ENOMEM;
-
- devpriv->counter = comedi_8254_init(dev->iobase + DAS16M1_8254_FIRST,
- 0, I8254_IO8, 0);
- if (!devpriv->counter)
- return -ENOMEM;
-
- ret = comedi_alloc_subdevices(dev, 4);
- if (ret)
- return ret;
-
- s = &dev->subdevices[0];
- /* ai */
- s->type = COMEDI_SUBD_AI;
- s->subdev_flags = SDF_READABLE | SDF_DIFF;
- s->n_chan = 8;
- s->maxdata = (1 << 12) - 1;
- s->range_table = &range_das16m1;
- s->insn_read = das16m1_ai_rinsn;
- if (dev->irq) {
- dev->read_subdev = s;
- s->subdev_flags |= SDF_CMD_READ;
- s->len_chanlist = 256;
- s->do_cmdtest = das16m1_cmd_test;
- s->do_cmd = das16m1_cmd_exec;
- s->cancel = das16m1_cancel;
- s->poll = das16m1_poll;
- }
-
- s = &dev->subdevices[1];
- /* di */
- s->type = COMEDI_SUBD_DI;
- s->subdev_flags = SDF_READABLE;
- s->n_chan = 4;
- s->maxdata = 1;
- s->range_table = &range_digital;
- s->insn_bits = das16m1_di_rbits;
-
- s = &dev->subdevices[2];
- /* do */
- s->type = COMEDI_SUBD_DO;
- s->subdev_flags = SDF_WRITABLE;
- s->n_chan = 4;
- s->maxdata = 1;
- s->range_table = &range_digital;
- s->insn_bits = das16m1_do_wbits;
-
- s = &dev->subdevices[3];
- /* 8255 */
- ret = subdev_8255_init(dev, s, NULL, DAS16M1_82C55);
- if (ret)
- return ret;
-
- /* initialize digital output lines */
- outb(0, dev->iobase + DAS16M1_DIO);
-
- /* set the interrupt level */
- devpriv->control_state = das16m1_irq_bits(dev->irq) << 4;
- outb(devpriv->control_state, dev->iobase + DAS16M1_INTR_CONTROL);
-
- return 0;
-}
-
-static void das16m1_detach(struct comedi_device *dev)
-{
- struct das16m1_private_struct *devpriv = dev->private;
-
- if (devpriv) {
- if (devpriv->extra_iobase)
- release_region(devpriv->extra_iobase, DAS16M1_SIZE2);
- kfree(devpriv->counter);
- }
- comedi_legacy_detach(dev);
-}
-
-static struct comedi_driver das16m1_driver = {
- .driver_name = "das16m1",
- .module = THIS_MODULE,
- .attach = das16m1_attach,
- .detach = das16m1_detach,
-};
-module_comedi_driver(das16m1_driver);
-
-MODULE_AUTHOR("Comedi http://www.comedi.org");
-MODULE_DESCRIPTION("Comedi low-level driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/das1800.c b/drivers/staging/comedi/drivers/das1800.c
deleted file mode 100644
index 940781183fac..000000000000
--- a/drivers/staging/comedi/drivers/das1800.c
+++ /dev/null
@@ -1,1458 +0,0 @@
-/*
- comedi/drivers/das1800.c
- Driver for Keitley das1700/das1800 series boards
- Copyright (C) 2000 Frank Mori Hess <fmhess@users.sourceforge.net>
-
- COMEDI - Linux Control and Measurement Device Interface
- Copyright (C) 2000 David A. Schleef <ds@schleef.org>
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-*/
-/*
-Driver: das1800
-Description: Keithley Metrabyte DAS1800 (& compatibles)
-Author: Frank Mori Hess <fmhess@users.sourceforge.net>
-Devices: [Keithley Metrabyte] DAS-1701ST (das-1701st),
- DAS-1701ST-DA (das-1701st-da), DAS-1701/AO (das-1701ao),
- DAS-1702ST (das-1702st), DAS-1702ST-DA (das-1702st-da),
- DAS-1702HR (das-1702hr), DAS-1702HR-DA (das-1702hr-da),
- DAS-1702/AO (das-1702ao), DAS-1801ST (das-1801st),
- DAS-1801ST-DA (das-1801st-da), DAS-1801HC (das-1801hc),
- DAS-1801AO (das-1801ao), DAS-1802ST (das-1802st),
- DAS-1802ST-DA (das-1802st-da), DAS-1802HR (das-1802hr),
- DAS-1802HR-DA (das-1802hr-da), DAS-1802HC (das-1802hc),
- DAS-1802AO (das-1802ao)
-Status: works
-
-The waveform analog output on the 'ao' cards is not supported.
-If you need it, send me (Frank Hess) an email.
-
-Configuration options:
- [0] - I/O port base address
- [1] - IRQ (optional, required for timed or externally triggered conversions)
- [2] - DMA0 (optional, requires irq)
- [3] - DMA1 (optional, requires irq and dma0)
-*/
-/*
-
-This driver supports the following Keithley boards:
-
-das-1701st
-das-1701st-da
-das-1701ao
-das-1702st
-das-1702st-da
-das-1702hr
-das-1702hr-da
-das-1702ao
-das-1801st
-das-1801st-da
-das-1801hc
-das-1801ao
-das-1802st
-das-1802st-da
-das-1802hr
-das-1802hr-da
-das-1802hc
-das-1802ao
-
-Options:
- [0] - base io address
- [1] - irq (optional, required for timed or externally triggered conversions)
- [2] - dma0 (optional, requires irq)
- [3] - dma1 (optional, requires irq and dma0)
-
-irq can be omitted, although the cmd interface will not work without it.
-
-analog input cmd triggers supported:
- start_src: TRIG_NOW | TRIG_EXT
- scan_begin_src: TRIG_FOLLOW | TRIG_TIMER | TRIG_EXT
- scan_end_src: TRIG_COUNT
- convert_src: TRIG_TIMER | TRIG_EXT (TRIG_EXT requires scan_begin_src == TRIG_FOLLOW)
- stop_src: TRIG_COUNT | TRIG_EXT | TRIG_NONE
-
-scan_begin_src triggers TRIG_TIMER and TRIG_EXT use the card's
-'burst mode' which limits the valid conversion time to 64 microseconds
-(convert_arg <= 64000). This limitation does not apply if scan_begin_src
-is TRIG_FOLLOW.
-
-NOTES:
-Only the DAS-1801ST has been tested by me.
-Unipolar and bipolar ranges cannot be mixed in the channel/gain list.
-
-TODO:
- Make it automatically allocate irq and dma channels if they are not specified
- Add support for analog out on 'ao' cards
- read insn for analog out
-*/
-
-#include <linux/module.h>
-#include <linux/interrupt.h>
-#include <linux/slab.h>
-#include <linux/io.h>
-
-#include "../comedidev.h"
-
-#include "comedi_isadma.h"
-#include "comedi_8254.h"
-
-/* misc. defines */
-#define DAS1800_SIZE 16 /* uses 16 io addresses */
-#define FIFO_SIZE 1024 /* 1024 sample fifo */
-#define UNIPOLAR 0x4 /* bit that determines whether input range is uni/bipolar */
-#define DMA_BUF_SIZE 0x1ff00 /* size in bytes of dma buffers */
-
-/* Registers for the das1800 */
-#define DAS1800_FIFO 0x0
-#define DAS1800_QRAM 0x0
-#define DAS1800_DAC 0x0
-#define DAS1800_SELECT 0x2
-#define ADC 0x0
-#define QRAM 0x1
-#define DAC(a) (0x2 + a)
-#define DAS1800_DIGITAL 0x3
-#define DAS1800_CONTROL_A 0x4
-#define FFEN 0x1
-#define CGEN 0x4
-#define CGSL 0x8
-#define TGEN 0x10
-#define TGSL 0x20
-#define ATEN 0x80
-#define DAS1800_CONTROL_B 0x5
-#define DMA_CH5 0x1
-#define DMA_CH6 0x2
-#define DMA_CH7 0x3
-#define DMA_CH5_CH6 0x5
-#define DMA_CH6_CH7 0x6
-#define DMA_CH7_CH5 0x7
-#define DMA_ENABLED 0x3 /* mask used to determine if dma is enabled */
-#define DMA_DUAL 0x4
-#define IRQ3 0x8
-#define IRQ5 0x10
-#define IRQ7 0x18
-#define IRQ10 0x28
-#define IRQ11 0x30
-#define IRQ15 0x38
-#define FIMD 0x40
-#define DAS1800_CONTROL_C 0X6
-#define IPCLK 0x1
-#define XPCLK 0x3
-#define BMDE 0x4
-#define CMEN 0x8
-#define UQEN 0x10
-#define SD 0x40
-#define UB 0x80
-#define DAS1800_STATUS 0x7
-/* bits that prevent interrupt status bits (and CVEN) from being cleared on write */
-#define CLEAR_INTR_MASK (CVEN_MASK | 0x1f)
-#define INT 0x1
-#define DMATC 0x2
-#define CT0TC 0x8
-#define OVF 0x10
-#define FHF 0x20
-#define FNE 0x40
-#define CVEN_MASK 0x40 /* masks CVEN on write */
-#define CVEN 0x80
-#define DAS1800_BURST_LENGTH 0x8
-#define DAS1800_BURST_RATE 0x9
-#define DAS1800_QRAM_ADDRESS 0xa
-#define DAS1800_COUNTER 0xc
-
-#define IOBASE2 0x400 /* offset of additional ioports used on 'ao' cards */
-
-enum {
- das1701st, das1701st_da, das1702st, das1702st_da, das1702hr,
- das1702hr_da,
- das1701ao, das1702ao, das1801st, das1801st_da, das1802st, das1802st_da,
- das1802hr, das1802hr_da, das1801hc, das1802hc, das1801ao, das1802ao
-};
-
-/* analog input ranges */
-static const struct comedi_lrange range_ai_das1801 = {
- 8, {
- BIP_RANGE(5),
- BIP_RANGE(1),
- BIP_RANGE(0.1),
- BIP_RANGE(0.02),
- UNI_RANGE(5),
- UNI_RANGE(1),
- UNI_RANGE(0.1),
- UNI_RANGE(0.02)
- }
-};
-
-static const struct comedi_lrange range_ai_das1802 = {
- 8, {
- BIP_RANGE(10),
- BIP_RANGE(5),
- BIP_RANGE(2.5),
- BIP_RANGE(1.25),
- UNI_RANGE(10),
- UNI_RANGE(5),
- UNI_RANGE(2.5),
- UNI_RANGE(1.25)
- }
-};
-
-struct das1800_board {
- const char *name;
- int ai_speed; /* max conversion period in nanoseconds */
- int resolution; /* bits of ai resolution */
- int qram_len; /* length of card's channel / gain queue */
- int common; /* supports AREF_COMMON flag */
- int do_n_chan; /* number of digital output channels */
- int ao_ability; /* 0 == no analog out, 1 == basic analog out, 2 == waveform analog out */
- int ao_n_chan; /* number of analog out channels */
- const struct comedi_lrange *range_ai; /* available input ranges */
-};
-
-/* Warning: the maximum conversion speeds listed below are
- * not always achievable depending on board setup (see
- * user manual.)
- */
-static const struct das1800_board das1800_boards[] = {
- {
- .name = "das-1701st",
- .ai_speed = 6250,
- .resolution = 12,
- .qram_len = 256,
- .common = 1,
- .do_n_chan = 4,
- .ao_ability = 0,
- .ao_n_chan = 0,
- .range_ai = &range_ai_das1801,
- },
- {
- .name = "das-1701st-da",
- .ai_speed = 6250,
- .resolution = 12,
- .qram_len = 256,
- .common = 1,
- .do_n_chan = 4,
- .ao_ability = 1,
- .ao_n_chan = 4,
- .range_ai = &range_ai_das1801,
- },
- {
- .name = "das-1702st",
- .ai_speed = 6250,
- .resolution = 12,
- .qram_len = 256,
- .common = 1,
- .do_n_chan = 4,
- .ao_ability = 0,
- .ao_n_chan = 0,
- .range_ai = &range_ai_das1802,
- },
- {
- .name = "das-1702st-da",
- .ai_speed = 6250,
- .resolution = 12,
- .qram_len = 256,
- .common = 1,
- .do_n_chan = 4,
- .ao_ability = 1,
- .ao_n_chan = 4,
- .range_ai = &range_ai_das1802,
- },
- {
- .name = "das-1702hr",
- .ai_speed = 20000,
- .resolution = 16,
- .qram_len = 256,
- .common = 1,
- .do_n_chan = 4,
- .ao_ability = 0,
- .ao_n_chan = 0,
- .range_ai = &range_ai_das1802,
- },
- {
- .name = "das-1702hr-da",
- .ai_speed = 20000,
- .resolution = 16,
- .qram_len = 256,
- .common = 1,
- .do_n_chan = 4,
- .ao_ability = 1,
- .ao_n_chan = 2,
- .range_ai = &range_ai_das1802,
- },
- {
- .name = "das-1701ao",
- .ai_speed = 6250,
- .resolution = 12,
- .qram_len = 256,
- .common = 1,
- .do_n_chan = 4,
- .ao_ability = 2,
- .ao_n_chan = 2,
- .range_ai = &range_ai_das1801,
- },
- {
- .name = "das-1702ao",
- .ai_speed = 6250,
- .resolution = 12,
- .qram_len = 256,
- .common = 1,
- .do_n_chan = 4,
- .ao_ability = 2,
- .ao_n_chan = 2,
- .range_ai = &range_ai_das1802,
- },
- {
- .name = "das-1801st",
- .ai_speed = 3000,
- .resolution = 12,
- .qram_len = 256,
- .common = 1,
- .do_n_chan = 4,
- .ao_ability = 0,
- .ao_n_chan = 0,
- .range_ai = &range_ai_das1801,
- },
- {
- .name = "das-1801st-da",
- .ai_speed = 3000,
- .resolution = 12,
- .qram_len = 256,
- .common = 1,
- .do_n_chan = 4,
- .ao_ability = 0,
- .ao_n_chan = 4,
- .range_ai = &range_ai_das1801,
- },
- {
- .name = "das-1802st",
- .ai_speed = 3000,
- .resolution = 12,
- .qram_len = 256,
- .common = 1,
- .do_n_chan = 4,
- .ao_ability = 0,
- .ao_n_chan = 0,
- .range_ai = &range_ai_das1802,
- },
- {
- .name = "das-1802st-da",
- .ai_speed = 3000,
- .resolution = 12,
- .qram_len = 256,
- .common = 1,
- .do_n_chan = 4,
- .ao_ability = 1,
- .ao_n_chan = 4,
- .range_ai = &range_ai_das1802,
- },
- {
- .name = "das-1802hr",
- .ai_speed = 10000,
- .resolution = 16,
- .qram_len = 256,
- .common = 1,
- .do_n_chan = 4,
- .ao_ability = 0,
- .ao_n_chan = 0,
- .range_ai = &range_ai_das1802,
- },
- {
- .name = "das-1802hr-da",
- .ai_speed = 10000,
- .resolution = 16,
- .qram_len = 256,
- .common = 1,
- .do_n_chan = 4,
- .ao_ability = 1,
- .ao_n_chan = 2,
- .range_ai = &range_ai_das1802,
- },
- {
- .name = "das-1801hc",
- .ai_speed = 3000,
- .resolution = 12,
- .qram_len = 64,
- .common = 0,
- .do_n_chan = 8,
- .ao_ability = 1,
- .ao_n_chan = 2,
- .range_ai = &range_ai_das1801,
- },
- {
- .name = "das-1802hc",
- .ai_speed = 3000,
- .resolution = 12,
- .qram_len = 64,
- .common = 0,
- .do_n_chan = 8,
- .ao_ability = 1,
- .ao_n_chan = 2,
- .range_ai = &range_ai_das1802,
- },
- {
- .name = "das-1801ao",
- .ai_speed = 3000,
- .resolution = 12,
- .qram_len = 256,
- .common = 1,
- .do_n_chan = 4,
- .ao_ability = 2,
- .ao_n_chan = 2,
- .range_ai = &range_ai_das1801,
- },
- {
- .name = "das-1802ao",
- .ai_speed = 3000,
- .resolution = 12,
- .qram_len = 256,
- .common = 1,
- .do_n_chan = 4,
- .ao_ability = 2,
- .ao_n_chan = 2,
- .range_ai = &range_ai_das1802,
- },
-};
-
-struct das1800_private {
- struct comedi_isadma *dma;
- int irq_dma_bits; /* bits for control register b */
- /* dma bits for control register b, stored so that dma can be
- * turned on and off */
- int dma_bits;
- uint16_t *fifo_buf; /* bounce buffer for analog input FIFO */
- unsigned long iobase2; /* secondary io address used for analog out on 'ao' boards */
- unsigned short ao_update_bits; /* remembers the last write to the
- * 'update' dac */
-};
-
-/* analog out range for 'ao' boards */
-/*
-static const struct comedi_lrange range_ao_2 = {
- 2, {
- BIP_RANGE(10),
- BIP_RANGE(5)
- }
-};
-*/
-
-static inline uint16_t munge_bipolar_sample(const struct comedi_device *dev,
- uint16_t sample)
-{
- const struct das1800_board *board = dev->board_ptr;
-
- sample += 1 << (board->resolution - 1);
- return sample;
-}
-
-static void munge_data(struct comedi_device *dev, uint16_t *array,
- unsigned int num_elements)
-{
- unsigned int i;
- int unipolar;
-
- /* see if card is using a unipolar or bipolar range so we can munge data correctly */
- unipolar = inb(dev->iobase + DAS1800_CONTROL_C) & UB;
-
- /* convert to unsigned type if we are in a bipolar mode */
- if (!unipolar) {
- for (i = 0; i < num_elements; i++)
- array[i] = munge_bipolar_sample(dev, array[i]);
- }
-}
-
-static void das1800_handle_fifo_half_full(struct comedi_device *dev,
- struct comedi_subdevice *s)
-{
- struct das1800_private *devpriv = dev->private;
- unsigned int nsamples = comedi_nsamples_left(s, FIFO_SIZE / 2);
-
- insw(dev->iobase + DAS1800_FIFO, devpriv->fifo_buf, nsamples);
- munge_data(dev, devpriv->fifo_buf, nsamples);
- comedi_buf_write_samples(s, devpriv->fifo_buf, nsamples);
-}
-
-static void das1800_handle_fifo_not_empty(struct comedi_device *dev,
- struct comedi_subdevice *s)
-{
- struct comedi_cmd *cmd = &s->async->cmd;
- unsigned short dpnt;
- int unipolar;
-
- unipolar = inb(dev->iobase + DAS1800_CONTROL_C) & UB;
-
- while (inb(dev->iobase + DAS1800_STATUS) & FNE) {
- dpnt = inw(dev->iobase + DAS1800_FIFO);
- /* convert to unsigned type */
- dpnt = munge_bipolar_sample(dev, dpnt);
- comedi_buf_write_samples(s, &dpnt, 1);
-
- if (cmd->stop_src == TRIG_COUNT &&
- s->async->scans_done >= cmd->stop_arg)
- break;
- }
-}
-
-/* Utility function used by das1800_flush_dma() and das1800_handle_dma() */
-static void das1800_flush_dma_channel(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_isadma_desc *desc)
-{
- unsigned int residue = comedi_isadma_disable(desc->chan);
- unsigned int nbytes = desc->size - residue;
- unsigned int nsamples;
-
- /* figure out how many points to read */
- nsamples = comedi_bytes_to_samples(s, nbytes);
- nsamples = comedi_nsamples_left(s, nsamples);
-
- munge_data(dev, desc->virt_addr, nsamples);
- comedi_buf_write_samples(s, desc->virt_addr, nsamples);
-}
-
-/* flushes remaining data from board when external trigger has stopped acquisition
- * and we are using dma transfers */
-static void das1800_flush_dma(struct comedi_device *dev,
- struct comedi_subdevice *s)
-{
- struct das1800_private *devpriv = dev->private;
- struct comedi_isadma *dma = devpriv->dma;
- struct comedi_isadma_desc *desc = &dma->desc[dma->cur_dma];
- const int dual_dma = devpriv->irq_dma_bits & DMA_DUAL;
-
- das1800_flush_dma_channel(dev, s, desc);
-
- if (dual_dma) {
- /* switch to other channel and flush it */
- dma->cur_dma = 1 - dma->cur_dma;
- desc = &dma->desc[dma->cur_dma];
- das1800_flush_dma_channel(dev, s, desc);
- }
-
- /* get any remaining samples in fifo */
- das1800_handle_fifo_not_empty(dev, s);
-}
-
-static void das1800_handle_dma(struct comedi_device *dev,
- struct comedi_subdevice *s, unsigned int status)
-{
- struct das1800_private *devpriv = dev->private;
- struct comedi_isadma *dma = devpriv->dma;
- struct comedi_isadma_desc *desc = &dma->desc[dma->cur_dma];
- const int dual_dma = devpriv->irq_dma_bits & DMA_DUAL;
-
- das1800_flush_dma_channel(dev, s, desc);
-
- /* re-enable dma channel */
- comedi_isadma_program(desc);
-
- if (status & DMATC) {
- /* clear DMATC interrupt bit */
- outb(CLEAR_INTR_MASK & ~DMATC, dev->iobase + DAS1800_STATUS);
- /* switch dma channels for next time, if appropriate */
- if (dual_dma)
- dma->cur_dma = 1 - dma->cur_dma;
- }
-}
-
-static int das1800_cancel(struct comedi_device *dev, struct comedi_subdevice *s)
-{
- struct das1800_private *devpriv = dev->private;
- struct comedi_isadma *dma = devpriv->dma;
- struct comedi_isadma_desc *desc;
- int i;
-
- outb(0x0, dev->iobase + DAS1800_STATUS); /* disable conversions */
- outb(0x0, dev->iobase + DAS1800_CONTROL_B); /* disable interrupts and dma */
- outb(0x0, dev->iobase + DAS1800_CONTROL_A); /* disable and clear fifo and stop triggering */
-
- for (i = 0; i < 2; i++) {
- desc = &dma->desc[i];
- if (desc->chan)
- comedi_isadma_disable(desc->chan);
- }
-
- return 0;
-}
-
-/* the guts of the interrupt handler, that is shared with das1800_ai_poll */
-static void das1800_ai_handler(struct comedi_device *dev)
-{
- struct das1800_private *devpriv = dev->private;
- struct comedi_subdevice *s = dev->read_subdev;
- struct comedi_async *async = s->async;
- struct comedi_cmd *cmd = &async->cmd;
- unsigned int status = inb(dev->iobase + DAS1800_STATUS);
-
- /* select adc for base address + 0 */
- outb(ADC, dev->iobase + DAS1800_SELECT);
- /* dma buffer full */
- if (devpriv->irq_dma_bits & DMA_ENABLED) {
- /* look for data from dma transfer even if dma terminal count hasn't happened yet */
- das1800_handle_dma(dev, s, status);
- } else if (status & FHF) { /* if fifo half full */
- das1800_handle_fifo_half_full(dev, s);
- } else if (status & FNE) { /* if fifo not empty */
- das1800_handle_fifo_not_empty(dev, s);
- }
-
- /* if the card's fifo has overflowed */
- if (status & OVF) {
- /* clear OVF interrupt bit */
- outb(CLEAR_INTR_MASK & ~OVF, dev->iobase + DAS1800_STATUS);
- dev_err(dev->class_dev, "FIFO overflow\n");
- async->events |= COMEDI_CB_ERROR;
- comedi_handle_events(dev, s);
- return;
- }
- /* stop taking data if appropriate */
- /* stop_src TRIG_EXT */
- if (status & CT0TC) {
- /* clear CT0TC interrupt bit */
- outb(CLEAR_INTR_MASK & ~CT0TC, dev->iobase + DAS1800_STATUS);
- /* make sure we get all remaining data from board before quitting */
- if (devpriv->irq_dma_bits & DMA_ENABLED)
- das1800_flush_dma(dev, s);
- else
- das1800_handle_fifo_not_empty(dev, s);
- async->events |= COMEDI_CB_EOA;
- } else if (cmd->stop_src == TRIG_COUNT &&
- async->scans_done >= cmd->stop_arg) {
- async->events |= COMEDI_CB_EOA;
- }
-
- comedi_handle_events(dev, s);
-}
-
-static int das1800_ai_poll(struct comedi_device *dev,
- struct comedi_subdevice *s)
-{
- unsigned long flags;
-
- /* prevent race with interrupt handler */
- spin_lock_irqsave(&dev->spinlock, flags);
- das1800_ai_handler(dev);
- spin_unlock_irqrestore(&dev->spinlock, flags);
-
- return comedi_buf_n_bytes_ready(s);
-}
-
-static irqreturn_t das1800_interrupt(int irq, void *d)
-{
- struct comedi_device *dev = d;
- unsigned int status;
-
- if (!dev->attached) {
- dev_err(dev->class_dev, "premature interrupt\n");
- return IRQ_HANDLED;
- }
-
- /* Prevent race with das1800_ai_poll() on multi processor systems.
- * Also protects indirect addressing in das1800_ai_handler */
- spin_lock(&dev->spinlock);
- status = inb(dev->iobase + DAS1800_STATUS);
-
- /* if interrupt was not caused by das-1800 */
- if (!(status & INT)) {
- spin_unlock(&dev->spinlock);
- return IRQ_NONE;
- }
- /* clear the interrupt status bit INT */
- outb(CLEAR_INTR_MASK & ~INT, dev->iobase + DAS1800_STATUS);
- /* handle interrupt */
- das1800_ai_handler(dev);
-
- spin_unlock(&dev->spinlock);
- return IRQ_HANDLED;
-}
-
-/* converts requested conversion timing to timing compatible with
- * hardware, used only when card is in 'burst mode'
- */
-static unsigned int burst_convert_arg(unsigned int convert_arg, int flags)
-{
- unsigned int micro_sec;
-
- /* in burst mode, the maximum conversion time is 64 microseconds */
- if (convert_arg > 64000)
- convert_arg = 64000;
-
- /* the conversion time must be an integral number of microseconds */
- switch (flags & CMDF_ROUND_MASK) {
- case CMDF_ROUND_NEAREST:
- default:
- micro_sec = (convert_arg + 500) / 1000;
- break;
- case CMDF_ROUND_DOWN:
- micro_sec = convert_arg / 1000;
- break;
- case CMDF_ROUND_UP:
- micro_sec = (convert_arg - 1) / 1000 + 1;
- break;
- }
-
- /* return number of nanoseconds */
- return micro_sec * 1000;
-}
-
-static int das1800_ai_check_chanlist(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_cmd *cmd)
-{
- unsigned int unipolar0 = CR_RANGE(cmd->chanlist[0]) & UNIPOLAR;
- int i;
-
- for (i = 1; i < cmd->chanlist_len; i++) {
- unsigned int unipolar = CR_RANGE(cmd->chanlist[i]) & UNIPOLAR;
-
- if (unipolar != unipolar0) {
- dev_dbg(dev->class_dev,
- "unipolar and bipolar ranges cannot be mixed in the chanlist\n");
- return -EINVAL;
- }
- }
-
- return 0;
-}
-
-/* test analog input cmd */
-static int das1800_ai_do_cmdtest(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_cmd *cmd)
-{
- const struct das1800_board *board = dev->board_ptr;
- int err = 0;
- unsigned int arg;
-
- /* Step 1 : check if triggers are trivially valid */
-
- err |= comedi_check_trigger_src(&cmd->start_src, TRIG_NOW | TRIG_EXT);
- err |= comedi_check_trigger_src(&cmd->scan_begin_src,
- TRIG_FOLLOW | TRIG_TIMER | TRIG_EXT);
- err |= comedi_check_trigger_src(&cmd->convert_src,
- TRIG_TIMER | TRIG_EXT);
- err |= comedi_check_trigger_src(&cmd->scan_end_src, TRIG_COUNT);
- err |= comedi_check_trigger_src(&cmd->stop_src,
- TRIG_COUNT | TRIG_EXT | TRIG_NONE);
-
- if (err)
- return 1;
-
- /* Step 2a : make sure trigger sources are unique */
-
- err |= comedi_check_trigger_is_unique(cmd->start_src);
- err |= comedi_check_trigger_is_unique(cmd->scan_begin_src);
- err |= comedi_check_trigger_is_unique(cmd->convert_src);
- err |= comedi_check_trigger_is_unique(cmd->stop_src);
-
- /* Step 2b : and mutually compatible */
-
- if (cmd->scan_begin_src != TRIG_FOLLOW &&
- cmd->convert_src != TRIG_TIMER)
- err |= -EINVAL;
-
- if (err)
- return 2;
-
- /* Step 3: check if arguments are trivially valid */
-
- err |= comedi_check_trigger_arg_is(&cmd->start_arg, 0);
-
- if (cmd->convert_src == TRIG_TIMER) {
- err |= comedi_check_trigger_arg_min(&cmd->convert_arg,
- board->ai_speed);
- }
-
- err |= comedi_check_trigger_arg_min(&cmd->chanlist_len, 1);
- err |= comedi_check_trigger_arg_is(&cmd->scan_end_arg,
- cmd->chanlist_len);
-
- switch (cmd->stop_src) {
- case TRIG_COUNT:
- err |= comedi_check_trigger_arg_min(&cmd->stop_arg, 1);
- break;
- case TRIG_NONE:
- err |= comedi_check_trigger_arg_is(&cmd->stop_arg, 0);
- break;
- default:
- break;
- }
-
- if (err)
- return 3;
-
- /* step 4: fix up any arguments */
-
- if (cmd->scan_begin_src == TRIG_FOLLOW &&
- cmd->convert_src == TRIG_TIMER) {
- /* we are not in burst mode */
- arg = cmd->convert_arg;
- comedi_8254_cascade_ns_to_timer(dev->pacer, &arg, cmd->flags);
- err |= comedi_check_trigger_arg_is(&cmd->convert_arg, arg);
- } else if (cmd->convert_src == TRIG_TIMER) {
- /* we are in burst mode */
- arg = burst_convert_arg(cmd->convert_arg, cmd->flags);
- err |= comedi_check_trigger_arg_is(&cmd->convert_arg, arg);
-
- if (cmd->scan_begin_src == TRIG_TIMER) {
- arg = cmd->convert_arg * cmd->chanlist_len;
- err |= comedi_check_trigger_arg_max(&cmd->
- scan_begin_arg,
- arg);
-
- arg = cmd->scan_begin_arg;
- comedi_8254_cascade_ns_to_timer(dev->pacer, &arg,
- cmd->flags);
- err |= comedi_check_trigger_arg_is(&cmd->scan_begin_arg,
- arg);
- }
- }
-
- if (err)
- return 4;
-
- /* Step 5: check channel list if it exists */
- if (cmd->chanlist && cmd->chanlist_len > 0)
- err |= das1800_ai_check_chanlist(dev, s, cmd);
-
- if (err)
- return 5;
-
- return 0;
-}
-
-/* returns appropriate bits for control register a, depending on command */
-static int control_a_bits(const struct comedi_cmd *cmd)
-{
- int control_a;
-
- control_a = FFEN; /* enable fifo */
- if (cmd->stop_src == TRIG_EXT)
- control_a |= ATEN;
- switch (cmd->start_src) {
- case TRIG_EXT:
- control_a |= TGEN | CGSL;
- break;
- case TRIG_NOW:
- control_a |= CGEN;
- break;
- default:
- break;
- }
-
- return control_a;
-}
-
-/* returns appropriate bits for control register c, depending on command */
-static int control_c_bits(const struct comedi_cmd *cmd)
-{
- int control_c;
- int aref;
-
- /* set clock source to internal or external, select analog reference,
- * select unipolar / bipolar
- */
- aref = CR_AREF(cmd->chanlist[0]);
- control_c = UQEN; /* enable upper qram addresses */
- if (aref != AREF_DIFF)
- control_c |= SD;
- if (aref == AREF_COMMON)
- control_c |= CMEN;
- /* if a unipolar range was selected */
- if (CR_RANGE(cmd->chanlist[0]) & UNIPOLAR)
- control_c |= UB;
- switch (cmd->scan_begin_src) {
- case TRIG_FOLLOW: /* not in burst mode */
- switch (cmd->convert_src) {
- case TRIG_TIMER:
- /* trig on cascaded counters */
- control_c |= IPCLK;
- break;
- case TRIG_EXT:
- /* trig on falling edge of external trigger */
- control_c |= XPCLK;
- break;
- default:
- break;
- }
- break;
- case TRIG_TIMER:
- /* burst mode with internal pacer clock */
- control_c |= BMDE | IPCLK;
- break;
- case TRIG_EXT:
- /* burst mode with external trigger */
- control_c |= BMDE | XPCLK;
- break;
- default:
- break;
- }
-
- return control_c;
-}
-
-static unsigned int das1800_ai_transfer_size(struct comedi_device *dev,
- struct comedi_subdevice *s,
- unsigned int maxbytes,
- unsigned int ns)
-{
- struct comedi_cmd *cmd = &s->async->cmd;
- unsigned int max_samples = comedi_bytes_to_samples(s, maxbytes);
- unsigned int samples;
-
- samples = max_samples;
-
- /* for timed modes, make dma buffer fill in 'ns' time */
- switch (cmd->scan_begin_src) {
- case TRIG_FOLLOW: /* not in burst mode */
- if (cmd->convert_src == TRIG_TIMER)
- samples = ns / cmd->convert_arg;
- break;
- case TRIG_TIMER:
- samples = ns / (cmd->scan_begin_arg * cmd->chanlist_len);
- break;
- }
-
- /* limit samples to what is remaining in the command */
- samples = comedi_nsamples_left(s, samples);
-
- if (samples > max_samples)
- samples = max_samples;
- if (samples < 1)
- samples = 1;
-
- return comedi_samples_to_bytes(s, samples);
-}
-
-static void das1800_ai_setup_dma(struct comedi_device *dev,
- struct comedi_subdevice *s)
-{
- struct das1800_private *devpriv = dev->private;
- struct comedi_isadma *dma = devpriv->dma;
- struct comedi_isadma_desc *desc = &dma->desc[0];
- unsigned int bytes;
-
- if ((devpriv->irq_dma_bits & DMA_ENABLED) == 0)
- return;
-
- dma->cur_dma = 0;
-
- /* determine a dma transfer size to fill buffer in 0.3 sec */
- bytes = das1800_ai_transfer_size(dev, s, desc->maxsize, 300000000);
-
- desc->size = bytes;
- comedi_isadma_program(desc);
-
- /* set up dual dma if appropriate */
- if (devpriv->irq_dma_bits & DMA_DUAL) {
- desc = &dma->desc[1];
- desc->size = bytes;
- comedi_isadma_program(desc);
- }
-}
-
-/* programs channel/gain list into card */
-static void program_chanlist(struct comedi_device *dev,
- const struct comedi_cmd *cmd)
-{
- int i, n, chan_range;
- unsigned long irq_flags;
- const int range_mask = 0x3; /* masks unipolar/bipolar bit off range */
- const int range_bitshift = 8;
-
- n = cmd->chanlist_len;
- /* spinlock protects indirect addressing */
- spin_lock_irqsave(&dev->spinlock, irq_flags);
- outb(QRAM, dev->iobase + DAS1800_SELECT); /* select QRAM for baseAddress + 0x0 */
- outb(n - 1, dev->iobase + DAS1800_QRAM_ADDRESS); /*set QRAM address start */
- /* make channel / gain list */
- for (i = 0; i < n; i++) {
- chan_range =
- CR_CHAN(cmd->chanlist[i]) |
- ((CR_RANGE(cmd->chanlist[i]) & range_mask) <<
- range_bitshift);
- outw(chan_range, dev->iobase + DAS1800_QRAM);
- }
- outb(n - 1, dev->iobase + DAS1800_QRAM_ADDRESS); /*finish write to QRAM */
- spin_unlock_irqrestore(&dev->spinlock, irq_flags);
-}
-
-/* analog input do_cmd */
-static int das1800_ai_do_cmd(struct comedi_device *dev,
- struct comedi_subdevice *s)
-{
- struct das1800_private *devpriv = dev->private;
- int control_a, control_c;
- struct comedi_async *async = s->async;
- const struct comedi_cmd *cmd = &async->cmd;
-
- /* disable dma on CMDF_WAKE_EOS, or CMDF_PRIORITY
- * (because dma in handler is unsafe at hard real-time priority) */
- if (cmd->flags & (CMDF_WAKE_EOS | CMDF_PRIORITY))
- devpriv->irq_dma_bits &= ~DMA_ENABLED;
- else
- devpriv->irq_dma_bits |= devpriv->dma_bits;
- /* interrupt on end of conversion for CMDF_WAKE_EOS */
- if (cmd->flags & CMDF_WAKE_EOS) {
- /* interrupt fifo not empty */
- devpriv->irq_dma_bits &= ~FIMD;
- } else {
- /* interrupt fifo half full */
- devpriv->irq_dma_bits |= FIMD;
- }
-
- das1800_cancel(dev, s);
-
- /* determine proper bits for control registers */
- control_a = control_a_bits(cmd);
- control_c = control_c_bits(cmd);
-
- /* setup card and start */
- program_chanlist(dev, cmd);
-
- /* setup cascaded counters for conversion/scan frequency */
- if ((cmd->scan_begin_src == TRIG_FOLLOW ||
- cmd->scan_begin_src == TRIG_TIMER) &&
- cmd->convert_src == TRIG_TIMER) {
- comedi_8254_update_divisors(dev->pacer);
- comedi_8254_pacer_enable(dev->pacer, 1, 2, true);
- }
-
- /* setup counter 0 for 'about triggering' */
- if (cmd->stop_src == TRIG_EXT)
- comedi_8254_load(dev->pacer, 0, 1, I8254_MODE0 | I8254_BINARY);
-
- das1800_ai_setup_dma(dev, s);
- outb(control_c, dev->iobase + DAS1800_CONTROL_C);
- /* set conversion rate and length for burst mode */
- if (control_c & BMDE) {
- /* program conversion period with number of microseconds minus 1 */
- outb(cmd->convert_arg / 1000 - 1,
- dev->iobase + DAS1800_BURST_RATE);
- outb(cmd->chanlist_len - 1, dev->iobase + DAS1800_BURST_LENGTH);
- }
- outb(devpriv->irq_dma_bits, dev->iobase + DAS1800_CONTROL_B); /* enable irq/dma */
- outb(control_a, dev->iobase + DAS1800_CONTROL_A); /* enable fifo and triggering */
- outb(CVEN, dev->iobase + DAS1800_STATUS); /* enable conversions */
-
- return 0;
-}
-
-/* read analog input */
-static int das1800_ai_rinsn(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
-{
- const struct das1800_board *board = dev->board_ptr;
- int i, n;
- int chan, range, aref, chan_range;
- int timeout = 1000;
- unsigned short dpnt;
- int conv_flags = 0;
- unsigned long irq_flags;
-
- /* set up analog reference and unipolar / bipolar mode */
- aref = CR_AREF(insn->chanspec);
- conv_flags |= UQEN;
- if (aref != AREF_DIFF)
- conv_flags |= SD;
- if (aref == AREF_COMMON)
- conv_flags |= CMEN;
- /* if a unipolar range was selected */
- if (CR_RANGE(insn->chanspec) & UNIPOLAR)
- conv_flags |= UB;
-
- outb(conv_flags, dev->iobase + DAS1800_CONTROL_C); /* software conversion enabled */
- outb(CVEN, dev->iobase + DAS1800_STATUS); /* enable conversions */
- outb(0x0, dev->iobase + DAS1800_CONTROL_A); /* reset fifo */
- outb(FFEN, dev->iobase + DAS1800_CONTROL_A);
-
- chan = CR_CHAN(insn->chanspec);
- /* mask of unipolar/bipolar bit from range */
- range = CR_RANGE(insn->chanspec) & 0x3;
- chan_range = chan | (range << 8);
- spin_lock_irqsave(&dev->spinlock, irq_flags);
- outb(QRAM, dev->iobase + DAS1800_SELECT); /* select QRAM for baseAddress + 0x0 */
- outb(0x0, dev->iobase + DAS1800_QRAM_ADDRESS); /* set QRAM address start */
- outw(chan_range, dev->iobase + DAS1800_QRAM);
- outb(0x0, dev->iobase + DAS1800_QRAM_ADDRESS); /*finish write to QRAM */
- outb(ADC, dev->iobase + DAS1800_SELECT); /* select ADC for baseAddress + 0x0 */
-
- for (n = 0; n < insn->n; n++) {
- /* trigger conversion */
- outb(0, dev->iobase + DAS1800_FIFO);
- for (i = 0; i < timeout; i++) {
- if (inb(dev->iobase + DAS1800_STATUS) & FNE)
- break;
- }
- if (i == timeout) {
- dev_err(dev->class_dev, "timeout\n");
- n = -ETIME;
- goto exit;
- }
- dpnt = inw(dev->iobase + DAS1800_FIFO);
- /* shift data to offset binary for bipolar ranges */
- if ((conv_flags & UB) == 0)
- dpnt += 1 << (board->resolution - 1);
- data[n] = dpnt;
- }
-exit:
- spin_unlock_irqrestore(&dev->spinlock, irq_flags);
-
- return n;
-}
-
-/* writes to an analog output channel */
-static int das1800_ao_winsn(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
-{
- const struct das1800_board *board = dev->board_ptr;
- struct das1800_private *devpriv = dev->private;
- int chan = CR_CHAN(insn->chanspec);
-/* int range = CR_RANGE(insn->chanspec); */
- int update_chan = board->ao_n_chan - 1;
- unsigned short output;
- unsigned long irq_flags;
-
- /* card expects two's complement data */
- output = data[0] - (1 << (board->resolution - 1));
- /* if the write is to the 'update' channel, we need to remember its value */
- if (chan == update_chan)
- devpriv->ao_update_bits = output;
- /* write to channel */
- spin_lock_irqsave(&dev->spinlock, irq_flags);
- outb(DAC(chan), dev->iobase + DAS1800_SELECT); /* select dac channel for baseAddress + 0x0 */
- outw(output, dev->iobase + DAS1800_DAC);
- /* now we need to write to 'update' channel to update all dac channels */
- if (chan != update_chan) {
- outb(DAC(update_chan), dev->iobase + DAS1800_SELECT); /* select 'update' channel for baseAddress + 0x0 */
- outw(devpriv->ao_update_bits, dev->iobase + DAS1800_DAC);
- }
- spin_unlock_irqrestore(&dev->spinlock, irq_flags);
-
- return 1;
-}
-
-/* reads from digital input channels */
-static int das1800_di_rbits(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
-{
- data[1] = inb(dev->iobase + DAS1800_DIGITAL) & 0xf;
- data[0] = 0;
-
- return insn->n;
-}
-
-static int das1800_do_wbits(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- if (comedi_dio_update_state(s, data))
- outb(s->state, dev->iobase + DAS1800_DIGITAL);
-
- data[1] = s->state;
-
- return insn->n;
-}
-
-static void das1800_init_dma(struct comedi_device *dev,
- struct comedi_devconfig *it)
-{
- struct das1800_private *devpriv = dev->private;
- unsigned int *dma_chan;
-
- /*
- * it->options[2] is DMA channel 0
- * it->options[3] is DMA channel 1
- *
- * Encode the DMA channels into 2 digit hexadecimal for switch.
- */
- dma_chan = &it->options[2];
-
- switch ((dma_chan[0] & 0x7) | (dma_chan[1] << 4)) {
- case 0x5: /* dma0 == 5 */
- devpriv->dma_bits = DMA_CH5;
- break;
- case 0x6: /* dma0 == 6 */
- devpriv->dma_bits = DMA_CH6;
- break;
- case 0x7: /* dma0 == 7 */
- devpriv->dma_bits = DMA_CH7;
- break;
- case 0x65: /* dma0 == 5, dma1 == 6 */
- devpriv->dma_bits = DMA_CH5_CH6;
- break;
- case 0x76: /* dma0 == 6, dma1 == 7 */
- devpriv->dma_bits = DMA_CH6_CH7;
- break;
- case 0x57: /* dma0 == 7, dma1 == 5 */
- devpriv->dma_bits = DMA_CH7_CH5;
- break;
- default:
- return;
- }
-
- /* DMA can use 1 or 2 buffers, each with a separate channel */
- devpriv->dma = comedi_isadma_alloc(dev, dma_chan[1] ? 2 : 1,
- dma_chan[0], dma_chan[1],
- DMA_BUF_SIZE, COMEDI_ISADMA_READ);
- if (!devpriv->dma)
- devpriv->dma_bits = 0;
-}
-
-static void das1800_free_dma(struct comedi_device *dev)
-{
- struct das1800_private *devpriv = dev->private;
-
- if (devpriv)
- comedi_isadma_free(devpriv->dma);
-}
-
-static const struct das1800_board *das1800_probe(struct comedi_device *dev)
-{
- const struct das1800_board *board = dev->board_ptr;
- int index = board ? board - das1800_boards : -EINVAL;
- int id;
-
- /*
- * The dev->board_ptr will be set by comedi_device_attach() if the
- * board name provided by the user matches a board->name in this
- * driver. If so, this function sanity checks the id to verify that
- * the board is correct.
- *
- * If the dev->board_ptr is not set, the user is trying to attach
- * an unspecified board to this driver. In this case the id is used
- * to 'probe' for the correct dev->board_ptr.
- */
- id = (inb(dev->iobase + DAS1800_DIGITAL) >> 4) & 0xf;
- switch (id) {
- case 0x3:
- if (index == das1801st_da || index == das1802st_da ||
- index == das1701st_da || index == das1702st_da)
- return board;
- index = das1801st;
- break;
- case 0x4:
- if (index == das1802hr_da || index == das1702hr_da)
- return board;
- index = das1802hr;
- break;
- case 0x5:
- if (index == das1801ao || index == das1802ao ||
- index == das1701ao || index == das1702ao)
- return board;
- index = das1801ao;
- break;
- case 0x6:
- if (index == das1802hr || index == das1702hr)
- return board;
- index = das1802hr;
- break;
- case 0x7:
- if (index == das1801st || index == das1802st ||
- index == das1701st || index == das1702st)
- return board;
- index = das1801st;
- break;
- case 0x8:
- if (index == das1801hc || index == das1802hc)
- return board;
- index = das1801hc;
- break;
- default:
- dev_err(dev->class_dev,
- "Board model: probe returned 0x%x (unknown, please report)\n",
- id);
- return NULL;
- }
- dev_err(dev->class_dev,
- "Board model (probed, not recommended): %s series\n",
- das1800_boards[index].name);
-
- return &das1800_boards[index];
-}
-
-static int das1800_attach(struct comedi_device *dev,
- struct comedi_devconfig *it)
-{
- const struct das1800_board *board;
- struct das1800_private *devpriv;
- struct comedi_subdevice *s;
- unsigned int irq = it->options[1];
- int ret;
-
- devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
- if (!devpriv)
- return -ENOMEM;
-
- ret = comedi_request_region(dev, it->options[0], DAS1800_SIZE);
- if (ret)
- return ret;
-
- board = das1800_probe(dev);
- if (!board) {
- dev_err(dev->class_dev, "unable to determine board type\n");
- return -ENODEV;
- }
- dev->board_ptr = board;
- dev->board_name = board->name;
-
- /* if it is an 'ao' board with fancy analog out then we need extra io ports */
- if (board->ao_ability == 2) {
- unsigned long iobase2 = dev->iobase + IOBASE2;
-
- ret = __comedi_request_region(dev, iobase2, DAS1800_SIZE);
- if (ret)
- return ret;
- devpriv->iobase2 = iobase2;
- }
-
- if (irq == 3 || irq == 5 || irq == 7 || irq == 10 || irq == 11 ||
- irq == 15) {
- ret = request_irq(irq, das1800_interrupt, 0,
- dev->board_name, dev);
- if (ret == 0) {
- dev->irq = irq;
-
- switch (irq) {
- case 3:
- devpriv->irq_dma_bits |= 0x8;
- break;
- case 5:
- devpriv->irq_dma_bits |= 0x10;
- break;
- case 7:
- devpriv->irq_dma_bits |= 0x18;
- break;
- case 10:
- devpriv->irq_dma_bits |= 0x28;
- break;
- case 11:
- devpriv->irq_dma_bits |= 0x30;
- break;
- case 15:
- devpriv->irq_dma_bits |= 0x38;
- break;
- }
- }
- }
-
- /* an irq and one dma channel is required to use dma */
- if (dev->irq & it->options[2])
- das1800_init_dma(dev, it);
-
- devpriv->fifo_buf = kmalloc_array(FIFO_SIZE, sizeof(uint16_t), GFP_KERNEL);
- if (!devpriv->fifo_buf)
- return -ENOMEM;
-
- dev->pacer = comedi_8254_init(dev->iobase + DAS1800_COUNTER,
- I8254_OSC_BASE_5MHZ, I8254_IO8, 0);
- if (!dev->pacer)
- return -ENOMEM;
-
- ret = comedi_alloc_subdevices(dev, 4);
- if (ret)
- return ret;
-
- /* analog input subdevice */
- s = &dev->subdevices[0];
- s->type = COMEDI_SUBD_AI;
- s->subdev_flags = SDF_READABLE | SDF_DIFF | SDF_GROUND;
- if (board->common)
- s->subdev_flags |= SDF_COMMON;
- s->n_chan = board->qram_len;
- s->maxdata = (1 << board->resolution) - 1;
- s->range_table = board->range_ai;
- s->insn_read = das1800_ai_rinsn;
- if (dev->irq) {
- dev->read_subdev = s;
- s->subdev_flags |= SDF_CMD_READ;
- s->len_chanlist = s->n_chan;
- s->do_cmd = das1800_ai_do_cmd;
- s->do_cmdtest = das1800_ai_do_cmdtest;
- s->poll = das1800_ai_poll;
- s->cancel = das1800_cancel;
- }
-
- /* analog out */
- s = &dev->subdevices[1];
- if (board->ao_ability == 1) {
- s->type = COMEDI_SUBD_AO;
- s->subdev_flags = SDF_WRITABLE;
- s->n_chan = board->ao_n_chan;
- s->maxdata = (1 << board->resolution) - 1;
- s->range_table = &range_bipolar10;
- s->insn_write = das1800_ao_winsn;
- } else {
- s->type = COMEDI_SUBD_UNUSED;
- }
-
- /* di */
- s = &dev->subdevices[2];
- s->type = COMEDI_SUBD_DI;
- s->subdev_flags = SDF_READABLE;
- s->n_chan = 4;
- s->maxdata = 1;
- s->range_table = &range_digital;
- s->insn_bits = das1800_di_rbits;
-
- /* do */
- s = &dev->subdevices[3];
- s->type = COMEDI_SUBD_DO;
- s->subdev_flags = SDF_WRITABLE;
- s->n_chan = board->do_n_chan;
- s->maxdata = 1;
- s->range_table = &range_digital;
- s->insn_bits = das1800_do_wbits;
-
- das1800_cancel(dev, dev->read_subdev);
-
- /* initialize digital out channels */
- outb(0, dev->iobase + DAS1800_DIGITAL);
-
- /* initialize analog out channels */
- if (board->ao_ability == 1) {
- /* select 'update' dac channel for baseAddress + 0x0 */
- outb(DAC(board->ao_n_chan - 1),
- dev->iobase + DAS1800_SELECT);
- outw(devpriv->ao_update_bits, dev->iobase + DAS1800_DAC);
- }
-
- return 0;
-};
-
-static void das1800_detach(struct comedi_device *dev)
-{
- struct das1800_private *devpriv = dev->private;
-
- das1800_free_dma(dev);
- if (devpriv) {
- kfree(devpriv->fifo_buf);
- if (devpriv->iobase2)
- release_region(devpriv->iobase2, DAS1800_SIZE);
- }
- comedi_legacy_detach(dev);
-}
-
-static struct comedi_driver das1800_driver = {
- .driver_name = "das1800",
- .module = THIS_MODULE,
- .attach = das1800_attach,
- .detach = das1800_detach,
- .num_names = ARRAY_SIZE(das1800_boards),
- .board_name = &das1800_boards[0].name,
- .offset = sizeof(struct das1800_board),
-};
-module_comedi_driver(das1800_driver);
-
-MODULE_AUTHOR("Comedi http://www.comedi.org");
-MODULE_DESCRIPTION("Comedi low-level driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/das6402.c b/drivers/staging/comedi/drivers/das6402.c
deleted file mode 100644
index 1701294b79cd..000000000000
--- a/drivers/staging/comedi/drivers/das6402.c
+++ /dev/null
@@ -1,676 +0,0 @@
-/*
- * das6402.c
- * Comedi driver for DAS6402 compatible boards
- * Copyright(c) 2014 H Hartley Sweeten <hsweeten@visionengravers.com>
- *
- * Rewrite of an experimental driver by:
- * Copyright (C) 1999 Oystein Svendsen <svendsen@pvv.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-/*
- * Driver: das6402
- * Description: Keithley Metrabyte DAS6402 (& compatibles)
- * Devices: [Keithley Metrabyte] DAS6402-12 (das6402-12),
- * DAS6402-16 (das6402-16)
- * Author: H Hartley Sweeten <hsweeten@visionengravers.com>
- * Updated: Fri, 14 Mar 2014 10:18:43 -0700
- * Status: unknown
- *
- * Configuration Options:
- * [0] - I/O base address
- * [1] - IRQ (optional, needed for async command support)
- */
-
-#include <linux/module.h>
-#include <linux/interrupt.h>
-
-#include "../comedidev.h"
-
-#include "comedi_8254.h"
-
-/*
- * Register I/O map
- */
-#define DAS6402_AI_DATA_REG 0x00
-#define DAS6402_AI_MUX_REG 0x02
-#define DAS6402_AI_MUX_LO(x) (((x) & 0x3f) << 0)
-#define DAS6402_AI_MUX_HI(x) (((x) & 0x3f) << 8)
-#define DAS6402_DI_DO_REG 0x03
-#define DAS6402_AO_DATA_REG(x) (0x04 + ((x) * 2))
-#define DAS6402_AO_LSB_REG(x) (0x04 + ((x) * 2))
-#define DAS6402_AO_MSB_REG(x) (0x05 + ((x) * 2))
-#define DAS6402_STATUS_REG 0x08
-#define DAS6402_STATUS_FFNE (1 << 0)
-#define DAS6402_STATUS_FHALF (1 << 1)
-#define DAS6402_STATUS_FFULL (1 << 2)
-#define DAS6402_STATUS_XINT (1 << 3)
-#define DAS6402_STATUS_INT (1 << 4)
-#define DAS6402_STATUS_XTRIG (1 << 5)
-#define DAS6402_STATUS_INDGT (1 << 6)
-#define DAS6402_STATUS_10MHZ (1 << 7)
-#define DAS6402_STATUS_W_CLRINT (1 << 0)
-#define DAS6402_STATUS_W_CLRXTR (1 << 1)
-#define DAS6402_STATUS_W_CLRXIN (1 << 2)
-#define DAS6402_STATUS_W_EXTEND (1 << 4)
-#define DAS6402_STATUS_W_ARMED (1 << 5)
-#define DAS6402_STATUS_W_POSTMODE (1 << 6)
-#define DAS6402_STATUS_W_10MHZ (1 << 7)
-#define DAS6402_CTRL_REG 0x09
-#define DAS6402_CTRL_SOFT_TRIG (0 << 0)
-#define DAS6402_CTRL_EXT_FALL_TRIG (1 << 0)
-#define DAS6402_CTRL_EXT_RISE_TRIG (2 << 0)
-#define DAS6402_CTRL_PACER_TRIG (3 << 0)
-#define DAS6402_CTRL_BURSTEN (1 << 2)
-#define DAS6402_CTRL_XINTE (1 << 3)
-#define DAS6402_CTRL_IRQ(x) ((x) << 4)
-#define DAS6402_CTRL_INTE (1 << 7)
-#define DAS6402_TRIG_REG 0x0a
-#define DAS6402_TRIG_TGEN (1 << 0)
-#define DAS6402_TRIG_TGSEL (1 << 1)
-#define DAS6402_TRIG_TGPOL (1 << 2)
-#define DAS6402_TRIG_PRETRIG (1 << 3)
-#define DAS6402_AO_RANGE(_chan, _range) ((_range) << ((_chan) ? 6 : 4))
-#define DAS6402_AO_RANGE_MASK(_chan) (3 << ((_chan) ? 6 : 4))
-#define DAS6402_MODE_REG 0x0b
-#define DAS6402_MODE_RANGE(x) ((x) << 0)
-#define DAS6402_MODE_POLLED (0 << 2)
-#define DAS6402_MODE_FIFONEPTY (1 << 2)
-#define DAS6402_MODE_FIFOHFULL (2 << 2)
-#define DAS6402_MODE_EOB (3 << 2)
-#define DAS6402_MODE_ENHANCED (1 << 4)
-#define DAS6402_MODE_SE (1 << 5)
-#define DAS6402_MODE_UNI (1 << 6)
-#define DAS6402_MODE_DMA1 (0 << 7)
-#define DAS6402_MODE_DMA3 (1 << 7)
-#define DAS6402_TIMER_BASE 0x0c
-
-static const struct comedi_lrange das6402_ai_ranges = {
- 8, {
- BIP_RANGE(10),
- BIP_RANGE(5),
- BIP_RANGE(2.5),
- BIP_RANGE(1.25),
- UNI_RANGE(10),
- UNI_RANGE(5),
- UNI_RANGE(2.5),
- UNI_RANGE(1.25)
- }
-};
-
-/*
- * Analog output ranges are programmable on the DAS6402/12.
- * For the DAS6402/16 the range bits have no function, the
- * DAC ranges are selected by switches on the board.
- */
-static const struct comedi_lrange das6402_ao_ranges = {
- 4, {
- BIP_RANGE(5),
- BIP_RANGE(10),
- UNI_RANGE(5),
- UNI_RANGE(10)
- }
-};
-
-struct das6402_boardinfo {
- const char *name;
- unsigned int maxdata;
-};
-
-static struct das6402_boardinfo das6402_boards[] = {
- {
- .name = "das6402-12",
- .maxdata = 0x0fff,
- }, {
- .name = "das6402-16",
- .maxdata = 0xffff,
- },
-};
-
-struct das6402_private {
- unsigned int irq;
- unsigned int ao_range;
-};
-
-static void das6402_set_mode(struct comedi_device *dev,
- unsigned int mode)
-{
- outb(DAS6402_MODE_ENHANCED | mode, dev->iobase + DAS6402_MODE_REG);
-}
-
-static void das6402_set_extended(struct comedi_device *dev,
- unsigned int val)
-{
- outb(DAS6402_STATUS_W_EXTEND, dev->iobase + DAS6402_STATUS_REG);
- outb(DAS6402_STATUS_W_EXTEND | val, dev->iobase + DAS6402_STATUS_REG);
- outb(val, dev->iobase + DAS6402_STATUS_REG);
-}
-
-static void das6402_clear_all_interrupts(struct comedi_device *dev)
-{
- outb(DAS6402_STATUS_W_CLRINT |
- DAS6402_STATUS_W_CLRXTR |
- DAS6402_STATUS_W_CLRXIN, dev->iobase + DAS6402_STATUS_REG);
-}
-
-static void das6402_ai_clear_eoc(struct comedi_device *dev)
-{
- outb(DAS6402_STATUS_W_CLRINT, dev->iobase + DAS6402_STATUS_REG);
-}
-
-static unsigned int das6402_ai_read_sample(struct comedi_device *dev,
- struct comedi_subdevice *s)
-{
- unsigned int val;
-
- val = inw(dev->iobase + DAS6402_AI_DATA_REG);
- if (s->maxdata == 0x0fff)
- val >>= 4;
- return val;
-}
-
-static irqreturn_t das6402_interrupt(int irq, void *d)
-{
- struct comedi_device *dev = d;
- struct comedi_subdevice *s = dev->read_subdev;
- struct comedi_async *async = s->async;
- struct comedi_cmd *cmd = &async->cmd;
- unsigned int status;
-
- status = inb(dev->iobase + DAS6402_STATUS_REG);
- if ((status & DAS6402_STATUS_INT) == 0)
- return IRQ_NONE;
-
- if (status & DAS6402_STATUS_FFULL) {
- async->events |= COMEDI_CB_OVERFLOW;
- } else if (status & DAS6402_STATUS_FFNE) {
- unsigned int val;
-
- val = das6402_ai_read_sample(dev, s);
- comedi_buf_write_samples(s, &val, 1);
-
- if (cmd->stop_src == TRIG_COUNT &&
- async->scans_done >= cmd->stop_arg)
- async->events |= COMEDI_CB_EOA;
- }
-
- das6402_clear_all_interrupts(dev);
-
- comedi_handle_events(dev, s);
-
- return IRQ_HANDLED;
-}
-
-static void das6402_ai_set_mode(struct comedi_device *dev,
- struct comedi_subdevice *s,
- unsigned int chanspec,
- unsigned int mode)
-{
- unsigned int range = CR_RANGE(chanspec);
- unsigned int aref = CR_AREF(chanspec);
-
- mode |= DAS6402_MODE_RANGE(range);
- if (aref == AREF_GROUND)
- mode |= DAS6402_MODE_SE;
- if (comedi_range_is_unipolar(s, range))
- mode |= DAS6402_MODE_UNI;
-
- das6402_set_mode(dev, mode);
-}
-
-static int das6402_ai_cmd(struct comedi_device *dev,
- struct comedi_subdevice *s)
-{
- struct das6402_private *devpriv = dev->private;
- struct comedi_cmd *cmd = &s->async->cmd;
- unsigned int chan_lo = CR_CHAN(cmd->chanlist[0]);
- unsigned int chan_hi = CR_CHAN(cmd->chanlist[cmd->chanlist_len - 1]);
-
- das6402_ai_set_mode(dev, s, cmd->chanlist[0], DAS6402_MODE_FIFONEPTY);
-
- /* load the mux for chanlist conversion */
- outw(DAS6402_AI_MUX_HI(chan_hi) | DAS6402_AI_MUX_LO(chan_lo),
- dev->iobase + DAS6402_AI_MUX_REG);
-
- comedi_8254_update_divisors(dev->pacer);
- comedi_8254_pacer_enable(dev->pacer, 1, 2, true);
-
- /* enable interrupt and pacer trigger */
- outb(DAS6402_CTRL_INTE |
- DAS6402_CTRL_IRQ(devpriv->irq) |
- DAS6402_CTRL_PACER_TRIG, dev->iobase + DAS6402_CTRL_REG);
-
- return 0;
-}
-
-static int das6402_ai_check_chanlist(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_cmd *cmd)
-{
- unsigned int chan0 = CR_CHAN(cmd->chanlist[0]);
- unsigned int range0 = CR_RANGE(cmd->chanlist[0]);
- unsigned int aref0 = CR_AREF(cmd->chanlist[0]);
- int i;
-
- for (i = 1; i < cmd->chanlist_len; i++) {
- unsigned int chan = CR_CHAN(cmd->chanlist[i]);
- unsigned int range = CR_RANGE(cmd->chanlist[i]);
- unsigned int aref = CR_AREF(cmd->chanlist[i]);
-
- if (chan != chan0 + i) {
- dev_dbg(dev->class_dev,
- "chanlist must be consecutive\n");
- return -EINVAL;
- }
-
- if (range != range0) {
- dev_dbg(dev->class_dev,
- "chanlist must have the same range\n");
- return -EINVAL;
- }
-
- if (aref != aref0) {
- dev_dbg(dev->class_dev,
- "chanlist must have the same reference\n");
- return -EINVAL;
- }
-
- if (aref0 == AREF_DIFF && chan > (s->n_chan / 2)) {
- dev_dbg(dev->class_dev,
- "chanlist differential channel to large\n");
- return -EINVAL;
- }
- }
- return 0;
-}
-
-static int das6402_ai_cmdtest(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_cmd *cmd)
-{
- int err = 0;
- unsigned int arg;
-
- /* Step 1 : check if triggers are trivially valid */
-
- err |= comedi_check_trigger_src(&cmd->start_src, TRIG_NOW);
- err |= comedi_check_trigger_src(&cmd->scan_begin_src, TRIG_FOLLOW);
- err |= comedi_check_trigger_src(&cmd->convert_src, TRIG_TIMER);
- err |= comedi_check_trigger_src(&cmd->scan_end_src, TRIG_COUNT);
- err |= comedi_check_trigger_src(&cmd->stop_src, TRIG_COUNT | TRIG_NONE);
-
- if (err)
- return 1;
-
- /* Step 2a : make sure trigger sources are unique */
-
- err |= comedi_check_trigger_is_unique(cmd->stop_src);
-
- /* Step 2b : and mutually compatible */
-
- if (err)
- return 2;
-
- /* Step 3: check if arguments are trivially valid */
-
- err |= comedi_check_trigger_arg_is(&cmd->start_arg, 0);
- err |= comedi_check_trigger_arg_is(&cmd->scan_begin_arg, 0);
- err |= comedi_check_trigger_arg_min(&cmd->convert_arg, 10000);
- err |= comedi_check_trigger_arg_min(&cmd->chanlist_len, 1);
- err |= comedi_check_trigger_arg_is(&cmd->scan_end_arg,
- cmd->chanlist_len);
-
- if (cmd->stop_src == TRIG_COUNT)
- err |= comedi_check_trigger_arg_min(&cmd->stop_arg, 1);
- else /* TRIG_NONE */
- err |= comedi_check_trigger_arg_is(&cmd->stop_arg, 0);
-
- if (err)
- return 3;
-
- /* step 4: fix up any arguments */
-
- arg = cmd->convert_arg;
- comedi_8254_cascade_ns_to_timer(dev->pacer, &arg, cmd->flags);
- err |= comedi_check_trigger_arg_is(&cmd->convert_arg, arg);
-
- if (err)
- return 4;
-
- /* Step 5: check channel list if it exists */
- if (cmd->chanlist && cmd->chanlist_len > 0)
- err |= das6402_ai_check_chanlist(dev, s, cmd);
-
- if (err)
- return 5;
-
- return 0;
-}
-
-static int das6402_ai_cancel(struct comedi_device *dev,
- struct comedi_subdevice *s)
-{
- outb(DAS6402_CTRL_SOFT_TRIG, dev->iobase + DAS6402_CTRL_REG);
-
- return 0;
-}
-
-static void das6402_ai_soft_trig(struct comedi_device *dev)
-{
- outw(0, dev->iobase + DAS6402_AI_DATA_REG);
-}
-
-static int das6402_ai_eoc(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned long context)
-{
- unsigned int status;
-
- status = inb(dev->iobase + DAS6402_STATUS_REG);
- if (status & DAS6402_STATUS_FFNE)
- return 0;
- return -EBUSY;
-}
-
-static int das6402_ai_insn_read(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- unsigned int chan = CR_CHAN(insn->chanspec);
- unsigned int aref = CR_AREF(insn->chanspec);
- int ret;
- int i;
-
- if (aref == AREF_DIFF && chan > (s->n_chan / 2))
- return -EINVAL;
-
- /* enable software conversion trigger */
- outb(DAS6402_CTRL_SOFT_TRIG, dev->iobase + DAS6402_CTRL_REG);
-
- das6402_ai_set_mode(dev, s, insn->chanspec, DAS6402_MODE_POLLED);
-
- /* load the mux for single channel conversion */
- outw(DAS6402_AI_MUX_HI(chan) | DAS6402_AI_MUX_LO(chan),
- dev->iobase + DAS6402_AI_MUX_REG);
-
- for (i = 0; i < insn->n; i++) {
- das6402_ai_clear_eoc(dev);
- das6402_ai_soft_trig(dev);
-
- ret = comedi_timeout(dev, s, insn, das6402_ai_eoc, 0);
- if (ret)
- break;
-
- data[i] = das6402_ai_read_sample(dev, s);
- }
-
- das6402_ai_clear_eoc(dev);
-
- return insn->n;
-}
-
-static int das6402_ao_insn_write(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct das6402_private *devpriv = dev->private;
- unsigned int chan = CR_CHAN(insn->chanspec);
- unsigned int range = CR_RANGE(insn->chanspec);
- unsigned int val;
- int i;
-
- /* set the range for this channel */
- val = devpriv->ao_range;
- val &= ~DAS6402_AO_RANGE_MASK(chan);
- val |= DAS6402_AO_RANGE(chan, range);
- if (val != devpriv->ao_range) {
- devpriv->ao_range = val;
- outb(val, dev->iobase + DAS6402_TRIG_REG);
- }
-
- /*
- * The DAS6402/16 has a jumper to select either individual
- * update (UPDATE) or simultaneous updating (XFER) of both
- * DAC's. In UPDATE mode, when the MSB is written, that DAC
- * is updated. In XFER mode, after both DAC's are loaded,
- * a read cycle of any DAC register will update both DAC's
- * simultaneously.
- *
- * If you have XFER mode enabled a (*insn_read) will need
- * to be performed in order to update the DAC's with the
- * last value written.
- */
- for (i = 0; i < insn->n; i++) {
- val = data[i];
-
- s->readback[chan] = val;
-
- if (s->maxdata == 0x0fff) {
- /*
- * DAS6402/12 has the two 8-bit DAC registers, left
- * justified (the 4 LSB bits are don't care). Data
- * can be written as one word.
- */
- val <<= 4;
- outw(val, dev->iobase + DAS6402_AO_DATA_REG(chan));
- } else {
- /*
- * DAS6402/16 uses both 8-bit DAC registers and needs
- * to be written LSB then MSB.
- */
- outb(val & 0xff,
- dev->iobase + DAS6402_AO_LSB_REG(chan));
- outb((val >> 8) & 0xff,
- dev->iobase + DAS6402_AO_LSB_REG(chan));
- }
- }
-
- return insn->n;
-}
-
-static int das6402_ao_insn_read(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- unsigned int chan = CR_CHAN(insn->chanspec);
-
- /*
- * If XFER mode is enabled, reading any DAC register
- * will update both DAC's simultaneously.
- */
- inw(dev->iobase + DAS6402_AO_LSB_REG(chan));
-
- return comedi_readback_insn_read(dev, s, insn, data);
-}
-
-static int das6402_di_insn_bits(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- data[1] = inb(dev->iobase + DAS6402_DI_DO_REG);
-
- return insn->n;
-}
-
-static int das6402_do_insn_bits(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- if (comedi_dio_update_state(s, data))
- outb(s->state, dev->iobase + DAS6402_DI_DO_REG);
-
- data[1] = s->state;
-
- return insn->n;
-}
-
-static void das6402_reset(struct comedi_device *dev)
-{
- struct das6402_private *devpriv = dev->private;
-
- /* enable "Enhanced" mode */
- outb(DAS6402_MODE_ENHANCED, dev->iobase + DAS6402_MODE_REG);
-
- /* enable 10MHz pacer clock */
- das6402_set_extended(dev, DAS6402_STATUS_W_10MHZ);
-
- /* enable software conversion trigger */
- outb(DAS6402_CTRL_SOFT_TRIG, dev->iobase + DAS6402_CTRL_REG);
-
- /* default ADC to single-ended unipolar 10V inputs */
- das6402_set_mode(dev, DAS6402_MODE_RANGE(0) |
- DAS6402_MODE_POLLED |
- DAS6402_MODE_SE |
- DAS6402_MODE_UNI);
-
- /* default mux for single channel conversion (channel 0) */
- outw(DAS6402_AI_MUX_HI(0) | DAS6402_AI_MUX_LO(0),
- dev->iobase + DAS6402_AI_MUX_REG);
-
- /* set both DAC's for unipolar 5V output range */
- devpriv->ao_range = DAS6402_AO_RANGE(0, 2) | DAS6402_AO_RANGE(1, 2);
- outb(devpriv->ao_range, dev->iobase + DAS6402_TRIG_REG);
-
- /* set both DAC's to 0V */
- outw(0, dev->iobase + DAS6402_AO_DATA_REG(0));
- outw(0, dev->iobase + DAS6402_AO_DATA_REG(0));
- inw(dev->iobase + DAS6402_AO_LSB_REG(0));
-
- /* set all digital outputs low */
- outb(0, dev->iobase + DAS6402_DI_DO_REG);
-
- das6402_clear_all_interrupts(dev);
-}
-
-static int das6402_attach(struct comedi_device *dev,
- struct comedi_devconfig *it)
-{
- const struct das6402_boardinfo *board = dev->board_ptr;
- struct das6402_private *devpriv;
- struct comedi_subdevice *s;
- int ret;
-
- devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
- if (!devpriv)
- return -ENOMEM;
-
- ret = comedi_request_region(dev, it->options[0], 0x10);
- if (ret)
- return ret;
-
- das6402_reset(dev);
-
- /* IRQs 2,3,5,6,7, 10,11,15 are valid for "enhanced" mode */
- if ((1 << it->options[1]) & 0x8cec) {
- ret = request_irq(it->options[1], das6402_interrupt, 0,
- dev->board_name, dev);
- if (ret == 0) {
- dev->irq = it->options[1];
-
- switch (dev->irq) {
- case 10:
- devpriv->irq = 4;
- break;
- case 11:
- devpriv->irq = 1;
- break;
- case 15:
- devpriv->irq = 6;
- break;
- default:
- devpriv->irq = dev->irq;
- break;
- }
- }
- }
-
- dev->pacer = comedi_8254_init(dev->iobase + DAS6402_TIMER_BASE,
- I8254_OSC_BASE_10MHZ, I8254_IO8, 0);
- if (!dev->pacer)
- return -ENOMEM;
-
- ret = comedi_alloc_subdevices(dev, 4);
- if (ret)
- return ret;
-
- /* Analog Input subdevice */
- s = &dev->subdevices[0];
- s->type = COMEDI_SUBD_AI;
- s->subdev_flags = SDF_READABLE | SDF_GROUND | SDF_DIFF;
- s->n_chan = 64;
- s->maxdata = board->maxdata;
- s->range_table = &das6402_ai_ranges;
- s->insn_read = das6402_ai_insn_read;
- if (dev->irq) {
- dev->read_subdev = s;
- s->subdev_flags |= SDF_CMD_READ;
- s->len_chanlist = s->n_chan;
- s->do_cmdtest = das6402_ai_cmdtest;
- s->do_cmd = das6402_ai_cmd;
- s->cancel = das6402_ai_cancel;
- }
-
- /* Analog Output subdevice */
- s = &dev->subdevices[1];
- s->type = COMEDI_SUBD_AO;
- s->subdev_flags = SDF_WRITABLE;
- s->n_chan = 2;
- s->maxdata = board->maxdata;
- s->range_table = &das6402_ao_ranges;
- s->insn_write = das6402_ao_insn_write;
- s->insn_read = das6402_ao_insn_read;
-
- ret = comedi_alloc_subdev_readback(s);
- if (ret)
- return ret;
-
- /* Digital Input subdevice */
- s = &dev->subdevices[2];
- s->type = COMEDI_SUBD_DI;
- s->subdev_flags = SDF_READABLE;
- s->n_chan = 8;
- s->maxdata = 1;
- s->range_table = &range_digital;
- s->insn_bits = das6402_di_insn_bits;
-
- /* Digital Input subdevice */
- s = &dev->subdevices[3];
- s->type = COMEDI_SUBD_DO;
- s->subdev_flags = SDF_WRITABLE;
- s->n_chan = 8;
- s->maxdata = 1;
- s->range_table = &range_digital;
- s->insn_bits = das6402_do_insn_bits;
-
- return 0;
-}
-
-static struct comedi_driver das6402_driver = {
- .driver_name = "das6402",
- .module = THIS_MODULE,
- .attach = das6402_attach,
- .detach = comedi_legacy_detach,
- .board_name = &das6402_boards[0].name,
- .num_names = ARRAY_SIZE(das6402_boards),
- .offset = sizeof(struct das6402_boardinfo),
-};
-module_comedi_driver(das6402_driver)
-
-MODULE_AUTHOR("H Hartley Sweeten <hsweeten@visionengravers.com>");
-MODULE_DESCRIPTION("Comedi driver for DAS6402 compatible boards");
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/das800.c b/drivers/staging/comedi/drivers/das800.c
deleted file mode 100644
index b02f12201cf7..000000000000
--- a/drivers/staging/comedi/drivers/das800.c
+++ /dev/null
@@ -1,753 +0,0 @@
-/*
- comedi/drivers/das800.c
- Driver for Keitley das800 series boards and compatibles
- Copyright (C) 2000 Frank Mori Hess <fmhess@users.sourceforge.net>
-
- COMEDI - Linux Control and Measurement Device Interface
- Copyright (C) 2000 David A. Schleef <ds@schleef.org>
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-*/
-/*
-Driver: das800
-Description: Keithley Metrabyte DAS800 (& compatibles)
-Author: Frank Mori Hess <fmhess@users.sourceforge.net>
-Devices: [Keithley Metrabyte] DAS-800 (das-800), DAS-801 (das-801),
- DAS-802 (das-802),
- [Measurement Computing] CIO-DAS800 (cio-das800),
- CIO-DAS801 (cio-das801), CIO-DAS802 (cio-das802),
- CIO-DAS802/16 (cio-das802/16)
-Status: works, cio-das802/16 untested - email me if you have tested it
-
-Configuration options:
- [0] - I/O port base address
- [1] - IRQ (optional, required for timed or externally triggered conversions)
-
-Notes:
- IRQ can be omitted, although the cmd interface will not work without it.
-
- All entries in the channel/gain list must use the same gain and be
- consecutive channels counting upwards in channel number (these are
- hardware limitations.)
-
- I've never tested the gain setting stuff since I only have a
- DAS-800 board with fixed gain.
-
- The cio-das802/16 does not have a fifo-empty status bit! Therefore
- only fifo-half-full transfers are possible with this card.
-
-cmd triggers supported:
- start_src: TRIG_NOW | TRIG_EXT
- scan_begin_src: TRIG_FOLLOW
- scan_end_src: TRIG_COUNT
- convert_src: TRIG_TIMER | TRIG_EXT
- stop_src: TRIG_NONE | TRIG_COUNT
-*/
-
-#include <linux/module.h>
-#include <linux/interrupt.h>
-#include <linux/delay.h>
-
-#include "../comedidev.h"
-
-#include "comedi_8254.h"
-
-#define N_CHAN_AI 8 /* number of analog input channels */
-
-/* Registers for the das800 */
-
-#define DAS800_LSB 0
-#define FIFO_EMPTY 0x1
-#define FIFO_OVF 0x2
-#define DAS800_MSB 1
-#define DAS800_CONTROL1 2
-#define CONTROL1_INTE 0x8
-#define DAS800_CONV_CONTROL 2
-#define ITE 0x1
-#define CASC 0x2
-#define DTEN 0x4
-#define IEOC 0x8
-#define EACS 0x10
-#define CONV_HCEN 0x80
-#define DAS800_SCAN_LIMITS 2
-#define DAS800_STATUS 2
-#define IRQ 0x8
-#define BUSY 0x80
-#define DAS800_GAIN 3
-#define CIO_FFOV 0x8 /* cio-das802/16 fifo overflow */
-#define CIO_ENHF 0x90 /* cio-das802/16 fifo half full int ena */
-#define CONTROL1 0x80
-#define CONV_CONTROL 0xa0
-#define SCAN_LIMITS 0xc0
-#define ID 0xe0
-#define DAS800_8254 4
-#define DAS800_STATUS2 7
-#define STATUS2_HCEN 0x80
-#define STATUS2_INTE 0X20
-#define DAS800_ID 7
-
-#define DAS802_16_HALF_FIFO_SZ 128
-
-struct das800_board {
- const char *name;
- int ai_speed;
- const struct comedi_lrange *ai_range;
- int resolution;
-};
-
-static const struct comedi_lrange range_das801_ai = {
- 9, {
- BIP_RANGE(5),
- BIP_RANGE(10),
- UNI_RANGE(10),
- BIP_RANGE(0.5),
- UNI_RANGE(1),
- BIP_RANGE(0.05),
- UNI_RANGE(0.1),
- BIP_RANGE(0.01),
- UNI_RANGE(0.02)
- }
-};
-
-static const struct comedi_lrange range_cio_das801_ai = {
- 9, {
- BIP_RANGE(5),
- BIP_RANGE(10),
- UNI_RANGE(10),
- BIP_RANGE(0.5),
- UNI_RANGE(1),
- BIP_RANGE(0.05),
- UNI_RANGE(0.1),
- BIP_RANGE(0.005),
- UNI_RANGE(0.01)
- }
-};
-
-static const struct comedi_lrange range_das802_ai = {
- 9, {
- BIP_RANGE(5),
- BIP_RANGE(10),
- UNI_RANGE(10),
- BIP_RANGE(2.5),
- UNI_RANGE(5),
- BIP_RANGE(1.25),
- UNI_RANGE(2.5),
- BIP_RANGE(0.625),
- UNI_RANGE(1.25)
- }
-};
-
-static const struct comedi_lrange range_das80216_ai = {
- 8, {
- BIP_RANGE(10),
- UNI_RANGE(10),
- BIP_RANGE(5),
- UNI_RANGE(5),
- BIP_RANGE(2.5),
- UNI_RANGE(2.5),
- BIP_RANGE(1.25),
- UNI_RANGE(1.25)
- }
-};
-
-enum das800_boardinfo {
- BOARD_DAS800,
- BOARD_CIODAS800,
- BOARD_DAS801,
- BOARD_CIODAS801,
- BOARD_DAS802,
- BOARD_CIODAS802,
- BOARD_CIODAS80216,
-};
-
-static const struct das800_board das800_boards[] = {
- [BOARD_DAS800] = {
- .name = "das-800",
- .ai_speed = 25000,
- .ai_range = &range_bipolar5,
- .resolution = 12,
- },
- [BOARD_CIODAS800] = {
- .name = "cio-das800",
- .ai_speed = 20000,
- .ai_range = &range_bipolar5,
- .resolution = 12,
- },
- [BOARD_DAS801] = {
- .name = "das-801",
- .ai_speed = 25000,
- .ai_range = &range_das801_ai,
- .resolution = 12,
- },
- [BOARD_CIODAS801] = {
- .name = "cio-das801",
- .ai_speed = 20000,
- .ai_range = &range_cio_das801_ai,
- .resolution = 12,
- },
- [BOARD_DAS802] = {
- .name = "das-802",
- .ai_speed = 25000,
- .ai_range = &range_das802_ai,
- .resolution = 12,
- },
- [BOARD_CIODAS802] = {
- .name = "cio-das802",
- .ai_speed = 20000,
- .ai_range = &range_das802_ai,
- .resolution = 12,
- },
- [BOARD_CIODAS80216] = {
- .name = "cio-das802/16",
- .ai_speed = 10000,
- .ai_range = &range_das80216_ai,
- .resolution = 16,
- },
-};
-
-struct das800_private {
- unsigned int do_bits; /* digital output bits */
-};
-
-static void das800_ind_write(struct comedi_device *dev,
- unsigned val, unsigned reg)
-{
- /*
- * Select dev->iobase + 2 to be desired register
- * then write to that register.
- */
- outb(reg, dev->iobase + DAS800_GAIN);
- outb(val, dev->iobase + 2);
-}
-
-static unsigned das800_ind_read(struct comedi_device *dev, unsigned reg)
-{
- /*
- * Select dev->iobase + 7 to be desired register
- * then read from that register.
- */
- outb(reg, dev->iobase + DAS800_GAIN);
- return inb(dev->iobase + 7);
-}
-
-static void das800_enable(struct comedi_device *dev)
-{
- const struct das800_board *board = dev->board_ptr;
- struct das800_private *devpriv = dev->private;
- unsigned long irq_flags;
-
- spin_lock_irqsave(&dev->spinlock, irq_flags);
- /* enable fifo-half full interrupts for cio-das802/16 */
- if (board->resolution == 16)
- outb(CIO_ENHF, dev->iobase + DAS800_GAIN);
- /* enable hardware triggering */
- das800_ind_write(dev, CONV_HCEN, CONV_CONTROL);
- /* enable card's interrupt */
- das800_ind_write(dev, CONTROL1_INTE | devpriv->do_bits, CONTROL1);
- spin_unlock_irqrestore(&dev->spinlock, irq_flags);
-}
-
-static void das800_disable(struct comedi_device *dev)
-{
- unsigned long irq_flags;
-
- spin_lock_irqsave(&dev->spinlock, irq_flags);
- /* disable hardware triggering of conversions */
- das800_ind_write(dev, 0x0, CONV_CONTROL);
- spin_unlock_irqrestore(&dev->spinlock, irq_flags);
-}
-
-static int das800_cancel(struct comedi_device *dev, struct comedi_subdevice *s)
-{
- das800_disable(dev);
- return 0;
-}
-
-static int das800_ai_check_chanlist(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_cmd *cmd)
-{
- unsigned int chan0 = CR_CHAN(cmd->chanlist[0]);
- unsigned int range0 = CR_RANGE(cmd->chanlist[0]);
- int i;
-
- for (i = 1; i < cmd->chanlist_len; i++) {
- unsigned int chan = CR_CHAN(cmd->chanlist[i]);
- unsigned int range = CR_RANGE(cmd->chanlist[i]);
-
- if (chan != (chan0 + i) % s->n_chan) {
- dev_dbg(dev->class_dev,
- "chanlist must be consecutive, counting upwards\n");
- return -EINVAL;
- }
-
- if (range != range0) {
- dev_dbg(dev->class_dev,
- "chanlist must all have the same gain\n");
- return -EINVAL;
- }
- }
-
- return 0;
-}
-
-static int das800_ai_do_cmdtest(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_cmd *cmd)
-{
- const struct das800_board *board = dev->board_ptr;
- int err = 0;
-
- /* Step 1 : check if triggers are trivially valid */
-
- err |= comedi_check_trigger_src(&cmd->start_src, TRIG_NOW | TRIG_EXT);
- err |= comedi_check_trigger_src(&cmd->scan_begin_src, TRIG_FOLLOW);
- err |= comedi_check_trigger_src(&cmd->convert_src,
- TRIG_TIMER | TRIG_EXT);
- err |= comedi_check_trigger_src(&cmd->scan_end_src, TRIG_COUNT);
- err |= comedi_check_trigger_src(&cmd->stop_src, TRIG_COUNT | TRIG_NONE);
-
- if (err)
- return 1;
-
- /* Step 2a : make sure trigger sources are unique */
-
- err |= comedi_check_trigger_is_unique(cmd->start_src);
- err |= comedi_check_trigger_is_unique(cmd->convert_src);
- err |= comedi_check_trigger_is_unique(cmd->stop_src);
-
- /* Step 2b : and mutually compatible */
-
- if (err)
- return 2;
-
- /* Step 3: check if arguments are trivially valid */
-
- err |= comedi_check_trigger_arg_is(&cmd->start_arg, 0);
-
- if (cmd->convert_src == TRIG_TIMER) {
- err |= comedi_check_trigger_arg_min(&cmd->convert_arg,
- board->ai_speed);
- }
-
- err |= comedi_check_trigger_arg_min(&cmd->chanlist_len, 1);
- err |= comedi_check_trigger_arg_is(&cmd->scan_end_arg,
- cmd->chanlist_len);
-
- if (cmd->stop_src == TRIG_COUNT)
- err |= comedi_check_trigger_arg_min(&cmd->stop_arg, 1);
- else /* TRIG_NONE */
- err |= comedi_check_trigger_arg_is(&cmd->stop_arg, 0);
-
- if (err)
- return 3;
-
- /* step 4: fix up any arguments */
-
- if (cmd->convert_src == TRIG_TIMER) {
- unsigned int arg = cmd->convert_arg;
-
- comedi_8254_cascade_ns_to_timer(dev->pacer, &arg, cmd->flags);
- err |= comedi_check_trigger_arg_is(&cmd->convert_arg, arg);
- }
-
- if (err)
- return 4;
-
- /* Step 5: check channel list if it exists */
- if (cmd->chanlist && cmd->chanlist_len > 0)
- err |= das800_ai_check_chanlist(dev, s, cmd);
-
- if (err)
- return 5;
-
- return 0;
-}
-
-static int das800_ai_do_cmd(struct comedi_device *dev,
- struct comedi_subdevice *s)
-{
- const struct das800_board *board = dev->board_ptr;
- struct comedi_async *async = s->async;
- struct comedi_cmd *cmd = &async->cmd;
- unsigned int gain = CR_RANGE(cmd->chanlist[0]);
- unsigned int start_chan = CR_CHAN(cmd->chanlist[0]);
- unsigned int end_chan = (start_chan + cmd->chanlist_len - 1) % 8;
- unsigned int scan_chans = (end_chan << 3) | start_chan;
- int conv_bits;
- unsigned long irq_flags;
-
- das800_disable(dev);
-
- spin_lock_irqsave(&dev->spinlock, irq_flags);
- /* set scan limits */
- das800_ind_write(dev, scan_chans, SCAN_LIMITS);
- spin_unlock_irqrestore(&dev->spinlock, irq_flags);
-
- /* set gain */
- if (board->resolution == 12 && gain > 0)
- gain += 0x7;
- gain &= 0xf;
- outb(gain, dev->iobase + DAS800_GAIN);
-
- /* enable auto channel scan, send interrupts on end of conversion
- * and set clock source to internal or external
- */
- conv_bits = 0;
- conv_bits |= EACS | IEOC;
- if (cmd->start_src == TRIG_EXT)
- conv_bits |= DTEN;
- if (cmd->convert_src == TRIG_TIMER) {
- conv_bits |= CASC | ITE;
- comedi_8254_update_divisors(dev->pacer);
- comedi_8254_pacer_enable(dev->pacer, 1, 2, true);
- }
-
- spin_lock_irqsave(&dev->spinlock, irq_flags);
- das800_ind_write(dev, conv_bits, CONV_CONTROL);
- spin_unlock_irqrestore(&dev->spinlock, irq_flags);
-
- das800_enable(dev);
- return 0;
-}
-
-static unsigned int das800_ai_get_sample(struct comedi_device *dev)
-{
- unsigned int lsb = inb(dev->iobase + DAS800_LSB);
- unsigned int msb = inb(dev->iobase + DAS800_MSB);
-
- return (msb << 8) | lsb;
-}
-
-static irqreturn_t das800_interrupt(int irq, void *d)
-{
- struct comedi_device *dev = d;
- struct das800_private *devpriv = dev->private;
- struct comedi_subdevice *s = dev->read_subdev;
- struct comedi_async *async;
- struct comedi_cmd *cmd;
- unsigned long irq_flags;
- unsigned int status;
- unsigned int val;
- bool fifo_empty;
- bool fifo_overflow;
- int i;
-
- status = inb(dev->iobase + DAS800_STATUS);
- if (!(status & IRQ))
- return IRQ_NONE;
- if (!dev->attached)
- return IRQ_HANDLED;
-
- async = s->async;
- cmd = &async->cmd;
-
- spin_lock_irqsave(&dev->spinlock, irq_flags);
- status = das800_ind_read(dev, CONTROL1) & STATUS2_HCEN;
- /*
- * Don't release spinlock yet since we want to make sure
- * no one else disables hardware conversions.
- */
-
- /* if hardware conversions are not enabled, then quit */
- if (status == 0) {
- spin_unlock_irqrestore(&dev->spinlock, irq_flags);
- return IRQ_HANDLED;
- }
-
- for (i = 0; i < DAS802_16_HALF_FIFO_SZ; i++) {
- val = das800_ai_get_sample(dev);
- if (s->maxdata == 0x0fff) {
- fifo_empty = !!(val & FIFO_EMPTY);
- fifo_overflow = !!(val & FIFO_OVF);
- } else {
- /* cio-das802/16 has no fifo empty status bit */
- fifo_empty = false;
- fifo_overflow = !!(inb(dev->iobase + DAS800_GAIN) &
- CIO_FFOV);
- }
- if (fifo_empty || fifo_overflow)
- break;
-
- if (s->maxdata == 0x0fff)
- val >>= 4; /* 12-bit sample */
-
- val &= s->maxdata;
- comedi_buf_write_samples(s, &val, 1);
-
- if (cmd->stop_src == TRIG_COUNT &&
- async->scans_done >= cmd->stop_arg) {
- async->events |= COMEDI_CB_EOA;
- break;
- }
- }
-
- if (fifo_overflow) {
- spin_unlock_irqrestore(&dev->spinlock, irq_flags);
- async->events |= COMEDI_CB_ERROR;
- comedi_handle_events(dev, s);
- return IRQ_HANDLED;
- }
-
- if (!(async->events & COMEDI_CB_CANCEL_MASK)) {
- /*
- * Re-enable card's interrupt.
- * We already have spinlock, so indirect addressing is safe
- */
- das800_ind_write(dev, CONTROL1_INTE | devpriv->do_bits,
- CONTROL1);
- spin_unlock_irqrestore(&dev->spinlock, irq_flags);
- } else {
- /* otherwise, stop taking data */
- spin_unlock_irqrestore(&dev->spinlock, irq_flags);
- das800_disable(dev);
- }
- comedi_handle_events(dev, s);
- return IRQ_HANDLED;
-}
-
-static int das800_ai_eoc(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned long context)
-{
- unsigned int status;
-
- status = inb(dev->iobase + DAS800_STATUS);
- if ((status & BUSY) == 0)
- return 0;
- return -EBUSY;
-}
-
-static int das800_ai_insn_read(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct das800_private *devpriv = dev->private;
- unsigned int chan = CR_CHAN(insn->chanspec);
- unsigned int range = CR_RANGE(insn->chanspec);
- unsigned long irq_flags;
- unsigned int val;
- int ret;
- int i;
-
- das800_disable(dev);
-
- /* set multiplexer */
- spin_lock_irqsave(&dev->spinlock, irq_flags);
- das800_ind_write(dev, chan | devpriv->do_bits, CONTROL1);
- spin_unlock_irqrestore(&dev->spinlock, irq_flags);
-
- /* set gain / range */
- if (s->maxdata == 0x0fff && range)
- range += 0x7;
- range &= 0xf;
- outb(range, dev->iobase + DAS800_GAIN);
-
- udelay(5);
-
- for (i = 0; i < insn->n; i++) {
- /* trigger conversion */
- outb_p(0, dev->iobase + DAS800_MSB);
-
- ret = comedi_timeout(dev, s, insn, das800_ai_eoc, 0);
- if (ret)
- return ret;
-
- val = das800_ai_get_sample(dev);
- if (s->maxdata == 0x0fff)
- val >>= 4; /* 12-bit sample */
- data[i] = val & s->maxdata;
- }
-
- return insn->n;
-}
-
-static int das800_di_insn_bits(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- data[1] = (inb(dev->iobase + DAS800_STATUS) >> 4) & 0x7;
-
- return insn->n;
-}
-
-static int das800_do_insn_bits(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct das800_private *devpriv = dev->private;
- unsigned long irq_flags;
-
- if (comedi_dio_update_state(s, data)) {
- devpriv->do_bits = s->state << 4;
-
- spin_lock_irqsave(&dev->spinlock, irq_flags);
- das800_ind_write(dev, CONTROL1_INTE | devpriv->do_bits,
- CONTROL1);
- spin_unlock_irqrestore(&dev->spinlock, irq_flags);
- }
-
- data[1] = s->state;
-
- return insn->n;
-}
-
-static const struct das800_board *das800_probe(struct comedi_device *dev)
-{
- const struct das800_board *board = dev->board_ptr;
- int index = board ? board - das800_boards : -EINVAL;
- int id_bits;
- unsigned long irq_flags;
-
- /*
- * The dev->board_ptr will be set by comedi_device_attach() if the
- * board name provided by the user matches a board->name in this
- * driver. If so, this function sanity checks the id_bits to verify
- * that the board is correct.
- *
- * If the dev->board_ptr is not set, the user is trying to attach
- * an unspecified board to this driver. In this case the id_bits
- * are used to 'probe' for the correct dev->board_ptr.
- */
- spin_lock_irqsave(&dev->spinlock, irq_flags);
- id_bits = das800_ind_read(dev, ID) & 0x3;
- spin_unlock_irqrestore(&dev->spinlock, irq_flags);
-
- switch (id_bits) {
- case 0x0:
- if (index == BOARD_DAS800 || index == BOARD_CIODAS800)
- return board;
- index = BOARD_DAS800;
- break;
- case 0x2:
- if (index == BOARD_DAS801 || index == BOARD_CIODAS801)
- return board;
- index = BOARD_DAS801;
- break;
- case 0x3:
- if (index == BOARD_DAS802 || index == BOARD_CIODAS802 ||
- index == BOARD_CIODAS80216)
- return board;
- index = BOARD_DAS802;
- break;
- default:
- dev_dbg(dev->class_dev, "Board model: 0x%x (unknown)\n",
- id_bits);
- return NULL;
- }
- dev_dbg(dev->class_dev, "Board model (probed): %s series\n",
- das800_boards[index].name);
-
- return &das800_boards[index];
-}
-
-static int das800_attach(struct comedi_device *dev, struct comedi_devconfig *it)
-{
- const struct das800_board *board;
- struct das800_private *devpriv;
- struct comedi_subdevice *s;
- unsigned int irq = it->options[1];
- unsigned long irq_flags;
- int ret;
-
- devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
- if (!devpriv)
- return -ENOMEM;
-
- ret = comedi_request_region(dev, it->options[0], 0x8);
- if (ret)
- return ret;
-
- board = das800_probe(dev);
- if (!board)
- return -ENODEV;
- dev->board_ptr = board;
- dev->board_name = board->name;
-
- if (irq > 1 && irq <= 7) {
- ret = request_irq(irq, das800_interrupt, 0, dev->board_name,
- dev);
- if (ret == 0)
- dev->irq = irq;
- }
-
- dev->pacer = comedi_8254_init(dev->iobase + DAS800_8254,
- I8254_OSC_BASE_1MHZ, I8254_IO8, 0);
- if (!dev->pacer)
- return -ENOMEM;
-
- ret = comedi_alloc_subdevices(dev, 3);
- if (ret)
- return ret;
-
- /* Analog Input subdevice */
- s = &dev->subdevices[0];
- dev->read_subdev = s;
- s->type = COMEDI_SUBD_AI;
- s->subdev_flags = SDF_READABLE | SDF_GROUND;
- s->n_chan = 8;
- s->maxdata = (1 << board->resolution) - 1;
- s->range_table = board->ai_range;
- s->insn_read = das800_ai_insn_read;
- if (dev->irq) {
- s->subdev_flags |= SDF_CMD_READ;
- s->len_chanlist = 8;
- s->do_cmdtest = das800_ai_do_cmdtest;
- s->do_cmd = das800_ai_do_cmd;
- s->cancel = das800_cancel;
- }
-
- /* Digital Input subdevice */
- s = &dev->subdevices[1];
- s->type = COMEDI_SUBD_DI;
- s->subdev_flags = SDF_READABLE;
- s->n_chan = 3;
- s->maxdata = 1;
- s->range_table = &range_digital;
- s->insn_bits = das800_di_insn_bits;
-
- /* Digital Output subdevice */
- s = &dev->subdevices[2];
- s->type = COMEDI_SUBD_DO;
- s->subdev_flags = SDF_WRITABLE;
- s->n_chan = 4;
- s->maxdata = 1;
- s->range_table = &range_digital;
- s->insn_bits = das800_do_insn_bits;
-
- das800_disable(dev);
-
- /* initialize digital out channels */
- spin_lock_irqsave(&dev->spinlock, irq_flags);
- das800_ind_write(dev, CONTROL1_INTE | devpriv->do_bits, CONTROL1);
- spin_unlock_irqrestore(&dev->spinlock, irq_flags);
-
- return 0;
-};
-
-static struct comedi_driver driver_das800 = {
- .driver_name = "das800",
- .module = THIS_MODULE,
- .attach = das800_attach,
- .detach = comedi_legacy_detach,
- .num_names = ARRAY_SIZE(das800_boards),
- .board_name = &das800_boards[0].name,
- .offset = sizeof(struct das800_board),
-};
-module_comedi_driver(driver_das800);
-
-MODULE_AUTHOR("Comedi http://www.comedi.org");
-MODULE_DESCRIPTION("Comedi low-level driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/dmm32at.c b/drivers/staging/comedi/drivers/dmm32at.c
deleted file mode 100644
index 958c0d4aae5c..000000000000
--- a/drivers/staging/comedi/drivers/dmm32at.c
+++ /dev/null
@@ -1,623 +0,0 @@
-/*
- * dmm32at.c
- * Diamond Systems Diamond-MM-32-AT Comedi driver
- *
- * COMEDI - Linux Control and Measurement Device Interface
- * Copyright (C) 2000 David A. Schleef <ds@schleef.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-/*
- * Driver: dmm32at
- * Description: Diamond Systems Diamond-MM-32-AT
- * Devices: [Diamond Systems] Diamond-MM-32-AT (dmm32at)
- * Author: Perry J. Piplani <perry.j.piplani@nasa.gov>
- * Updated: Fri Jun 4 09:13:24 CDT 2004
- * Status: experimental
- *
- * Configuration Options:
- * comedi_config /dev/comedi0 dmm32at baseaddr,irq
- *
- * This driver is for the Diamond Systems MM-32-AT board
- * http://www.diamondsystems.com/products/diamondmm32at
- *
- * It is being used on several projects inside NASA, without
- * problems so far. For analog input commands, TRIG_EXT is not
- * yet supported.
- */
-
-#include <linux/module.h>
-#include <linux/delay.h>
-#include <linux/interrupt.h>
-#include "../comedidev.h"
-
-#include "8255.h"
-
-/* Board register addresses */
-#define DMM32AT_AI_START_CONV_REG 0x00
-#define DMM32AT_AI_LSB_REG 0x00
-#define DMM32AT_AUX_DOUT_REG 0x01
-#define DMM32AT_AUX_DOUT2 (1 << 2) /* J3.42 - OUT2 (OUT2EN) */
-#define DMM32AT_AUX_DOUT1 (1 << 1) /* J3.43 */
-#define DMM32AT_AUX_DOUT0 (1 << 0) /* J3.44 - OUT0 (OUT0EN) */
-#define DMM32AT_AI_MSB_REG 0x01
-#define DMM32AT_AI_LO_CHAN_REG 0x02
-#define DMM32AT_AI_HI_CHAN_REG 0x03
-#define DMM32AT_AUX_DI_REG 0x04
-#define DMM32AT_AUX_DI_DACBUSY (1 << 7)
-#define DMM32AT_AUX_DI_CALBUSY (1 << 6)
-#define DMM32AT_AUX_DI3 (1 << 3) /* J3.45 - ADCLK (CLKSEL) */
-#define DMM32AT_AUX_DI2 (1 << 2) /* J3.46 - GATE12 (GT12EN) */
-#define DMM32AT_AUX_DI1 (1 << 1) /* J3.47 - GATE0 (GT0EN) */
-#define DMM32AT_AUX_DI0 (1 << 0) /* J3.48 - CLK0 (SRC0) */
-#define DMM32AT_AO_LSB_REG 0x04
-#define DMM32AT_AO_MSB_REG 0x05
-#define DMM32AT_AO_MSB_DACH(x) ((x) << 6)
-#define DMM32AT_FIFO_DEPTH_REG 0x06
-#define DMM32AT_FIFO_CTRL_REG 0x07
-#define DMM32AT_FIFO_CTRL_FIFOEN (1 << 3)
-#define DMM32AT_FIFO_CTRL_SCANEN (1 << 2)
-#define DMM32AT_FIFO_CTRL_FIFORST (1 << 1)
-#define DMM32AT_FIFO_STATUS_REG 0x07
-#define DMM32AT_FIFO_STATUS_EF (1 << 7)
-#define DMM32AT_FIFO_STATUS_HF (1 << 6)
-#define DMM32AT_FIFO_STATUS_FF (1 << 5)
-#define DMM32AT_FIFO_STATUS_OVF (1 << 4)
-#define DMM32AT_FIFO_STATUS_FIFOEN (1 << 3)
-#define DMM32AT_FIFO_STATUS_SCANEN (1 << 2)
-#define DMM32AT_FIFO_STATUS_PAGE_MASK (3 << 0)
-#define DMM32AT_CTRL_REG 0x08
-#define DMM32AT_CTRL_RESETA (1 << 5)
-#define DMM32AT_CTRL_RESETD (1 << 4)
-#define DMM32AT_CTRL_INTRST (1 << 3)
-#define DMM32AT_CTRL_PAGE_8254 (0 << 0)
-#define DMM32AT_CTRL_PAGE_8255 (1 << 0)
-#define DMM32AT_CTRL_PAGE_CALIB (3 << 0)
-#define DMM32AT_AI_STATUS_REG 0x08
-#define DMM32AT_AI_STATUS_STS (1 << 7)
-#define DMM32AT_AI_STATUS_SD1 (1 << 6)
-#define DMM32AT_AI_STATUS_SD0 (1 << 5)
-#define DMM32AT_AI_STATUS_ADCH_MASK (0x1f << 0)
-#define DMM32AT_INTCLK_REG 0x09
-#define DMM32AT_INTCLK_ADINT (1 << 7)
-#define DMM32AT_INTCLK_DINT (1 << 6)
-#define DMM32AT_INTCLK_TINT (1 << 5)
-#define DMM32AT_INTCLK_CLKEN (1 << 1) /* 1=see below 0=software */
-#define DMM32AT_INTCLK_CLKSEL (1 << 0) /* 1=OUT2 0=EXTCLK */
-#define DMM32AT_CTRDIO_CFG_REG 0x0a
-#define DMM32AT_CTRDIO_CFG_FREQ12 (1 << 7) /* CLK12 1=100KHz 0=10MHz */
-#define DMM32AT_CTRDIO_CFG_FREQ0 (1 << 6) /* CLK0 1=10KHz 0=10MHz */
-#define DMM32AT_CTRDIO_CFG_OUT2EN (1 << 5) /* J3.42 1=OUT2 is DOUT2 */
-#define DMM32AT_CTRDIO_CFG_OUT0EN (1 << 4) /* J3,44 1=OUT0 is DOUT0 */
-#define DMM32AT_CTRDIO_CFG_GT0EN (1 << 2) /* J3.47 1=DIN1 is GATE0 */
-#define DMM32AT_CTRDIO_CFG_SRC0 (1 << 1) /* CLK0 is 0=FREQ0 1=J3.48 */
-#define DMM32AT_CTRDIO_CFG_GT12EN (1 << 0) /* J3.46 1=DIN2 is GATE12 */
-#define DMM32AT_AI_CFG_REG 0x0b
-#define DMM32AT_AI_CFG_SCINT_20US (0 << 4)
-#define DMM32AT_AI_CFG_SCINT_15US (1 << 4)
-#define DMM32AT_AI_CFG_SCINT_10US (2 << 4)
-#define DMM32AT_AI_CFG_SCINT_5US (3 << 4)
-#define DMM32AT_AI_CFG_RANGE (1 << 3) /* 0=5V 1=10V */
-#define DMM32AT_AI_CFG_ADBU (1 << 2) /* 0=bipolar 1=unipolar */
-#define DMM32AT_AI_CFG_GAIN(x) ((x) << 0)
-#define DMM32AT_AI_READBACK_REG 0x0b
-#define DMM32AT_AI_READBACK_WAIT (1 << 7) /* DMM32AT_AI_STATUS_STS */
-#define DMM32AT_AI_READBACK_RANGE (1 << 3)
-#define DMM32AT_AI_READBACK_ADBU (1 << 2)
-#define DMM32AT_AI_READBACK_GAIN_MASK (3 << 0)
-
-#define DMM32AT_CLK1 0x0d
-#define DMM32AT_CLK2 0x0e
-#define DMM32AT_CLKCT 0x0f
-
-#define DMM32AT_8255_IOBASE 0x0c /* Page 1 registers */
-
-/* Board register values. */
-
-/* DMM32AT_AI_CFG_REG 0x0b */
-#define DMM32AT_RANGE_U10 0x0c
-#define DMM32AT_RANGE_U5 0x0d
-#define DMM32AT_RANGE_B10 0x08
-#define DMM32AT_RANGE_B5 0x00
-
-/* DMM32AT_CLKCT 0x0f */
-#define DMM32AT_CLKCT1 0x56 /* mode3 counter 1 - write low byte only */
-#define DMM32AT_CLKCT2 0xb6 /* mode3 counter 2 - write high and low byte */
-
-/* board AI ranges in comedi structure */
-static const struct comedi_lrange dmm32at_airanges = {
- 4, {
- UNI_RANGE(10),
- UNI_RANGE(5),
- BIP_RANGE(10),
- BIP_RANGE(5)
- }
-};
-
-/* register values for above ranges */
-static const unsigned char dmm32at_rangebits[] = {
- DMM32AT_RANGE_U10,
- DMM32AT_RANGE_U5,
- DMM32AT_RANGE_B10,
- DMM32AT_RANGE_B5,
-};
-
-/* only one of these ranges is valid, as set by a jumper on the
- * board. The application should only use the range set by the jumper
- */
-static const struct comedi_lrange dmm32at_aoranges = {
- 4, {
- UNI_RANGE(10),
- UNI_RANGE(5),
- BIP_RANGE(10),
- BIP_RANGE(5)
- }
-};
-
-static void dmm32at_ai_set_chanspec(struct comedi_device *dev,
- struct comedi_subdevice *s,
- unsigned int chanspec, int nchan)
-{
- unsigned int chan = CR_CHAN(chanspec);
- unsigned int range = CR_RANGE(chanspec);
- unsigned int last_chan = (chan + nchan - 1) % s->n_chan;
-
- outb(DMM32AT_FIFO_CTRL_FIFORST, dev->iobase + DMM32AT_FIFO_CTRL_REG);
-
- if (nchan > 1)
- outb(DMM32AT_FIFO_CTRL_SCANEN,
- dev->iobase + DMM32AT_FIFO_CTRL_REG);
-
- outb(chan, dev->iobase + DMM32AT_AI_LO_CHAN_REG);
- outb(last_chan, dev->iobase + DMM32AT_AI_HI_CHAN_REG);
- outb(dmm32at_rangebits[range], dev->iobase + DMM32AT_AI_CFG_REG);
-}
-
-static unsigned int dmm32at_ai_get_sample(struct comedi_device *dev,
- struct comedi_subdevice *s)
-{
- unsigned int val;
-
- val = inb(dev->iobase + DMM32AT_AI_LSB_REG);
- val |= (inb(dev->iobase + DMM32AT_AI_MSB_REG) << 8);
-
- /* munge two's complement value to offset binary */
- return comedi_offset_munge(s, val);
-}
-
-static int dmm32at_ai_status(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned long context)
-{
- unsigned char status;
-
- status = inb(dev->iobase + context);
- if ((status & DMM32AT_AI_STATUS_STS) == 0)
- return 0;
- return -EBUSY;
-}
-
-static int dmm32at_ai_insn_read(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- int ret;
- int i;
-
- dmm32at_ai_set_chanspec(dev, s, insn->chanspec, 1);
-
- /* wait for circuit to settle */
- ret = comedi_timeout(dev, s, insn, dmm32at_ai_status,
- DMM32AT_AI_READBACK_REG);
- if (ret)
- return ret;
-
- for (i = 0; i < insn->n; i++) {
- outb(0xff, dev->iobase + DMM32AT_AI_START_CONV_REG);
-
- ret = comedi_timeout(dev, s, insn, dmm32at_ai_status,
- DMM32AT_AI_STATUS_REG);
- if (ret)
- return ret;
-
- data[i] = dmm32at_ai_get_sample(dev, s);
- }
-
- return insn->n;
-}
-
-static int dmm32at_ai_check_chanlist(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_cmd *cmd)
-{
- unsigned int chan0 = CR_CHAN(cmd->chanlist[0]);
- unsigned int range0 = CR_RANGE(cmd->chanlist[0]);
- int i;
-
- for (i = 1; i < cmd->chanlist_len; i++) {
- unsigned int chan = CR_CHAN(cmd->chanlist[i]);
- unsigned int range = CR_RANGE(cmd->chanlist[i]);
-
- if (chan != (chan0 + i) % s->n_chan) {
- dev_dbg(dev->class_dev,
- "entries in chanlist must be consecutive channels, counting upwards\n");
- return -EINVAL;
- }
- if (range != range0) {
- dev_dbg(dev->class_dev,
- "entries in chanlist must all have the same gain\n");
- return -EINVAL;
- }
- }
-
- return 0;
-}
-
-static int dmm32at_ai_cmdtest(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_cmd *cmd)
-{
- int err = 0;
- unsigned int arg;
-
- /* Step 1 : check if triggers are trivially valid */
-
- err |= comedi_check_trigger_src(&cmd->start_src, TRIG_NOW);
- err |= comedi_check_trigger_src(&cmd->scan_begin_src, TRIG_TIMER);
- err |= comedi_check_trigger_src(&cmd->convert_src, TRIG_TIMER);
- err |= comedi_check_trigger_src(&cmd->scan_end_src, TRIG_COUNT);
- err |= comedi_check_trigger_src(&cmd->stop_src, TRIG_COUNT | TRIG_NONE);
-
- if (err)
- return 1;
-
- /* Step 2a : make sure trigger sources are unique */
-
- err |= comedi_check_trigger_is_unique(cmd->stop_src);
-
- /* Step 2b : and mutually compatible */
-
- if (err)
- return 2;
-
- /* Step 3: check if arguments are trivially valid */
-
- err |= comedi_check_trigger_arg_is(&cmd->start_arg, 0);
-
- err |= comedi_check_trigger_arg_min(&cmd->scan_begin_arg, 1000000);
- err |= comedi_check_trigger_arg_max(&cmd->scan_begin_arg, 1000000000);
-
- if (cmd->convert_arg >= 17500)
- cmd->convert_arg = 20000;
- else if (cmd->convert_arg >= 12500)
- cmd->convert_arg = 15000;
- else if (cmd->convert_arg >= 7500)
- cmd->convert_arg = 10000;
- else
- cmd->convert_arg = 5000;
-
- err |= comedi_check_trigger_arg_is(&cmd->scan_end_arg,
- cmd->chanlist_len);
-
- if (cmd->stop_src == TRIG_COUNT)
- err |= comedi_check_trigger_arg_min(&cmd->stop_arg, 1);
- else /* TRIG_NONE */
- err |= comedi_check_trigger_arg_is(&cmd->stop_arg, 0);
-
- if (err)
- return 3;
-
- /* Step 4: fix up any arguments */
-
- arg = cmd->convert_arg * cmd->scan_end_arg;
- err |= comedi_check_trigger_arg_min(&cmd->scan_begin_arg, arg);
-
- if (err)
- return 4;
-
- /* Step 5: check channel list if it exists */
- if (cmd->chanlist && cmd->chanlist_len > 0)
- err |= dmm32at_ai_check_chanlist(dev, s, cmd);
-
- if (err)
- return 5;
-
- return 0;
-}
-
-static void dmm32at_setaitimer(struct comedi_device *dev, unsigned int nansec)
-{
- unsigned char lo1, lo2, hi2;
- unsigned short both2;
-
- /* based on 10mhz clock */
- lo1 = 200;
- both2 = nansec / 20000;
- hi2 = (both2 & 0xff00) >> 8;
- lo2 = both2 & 0x00ff;
-
- /* set counter clocks to 10MHz, disable all aux dio */
- outb(0, dev->iobase + DMM32AT_CTRDIO_CFG_REG);
-
- /* get access to the clock regs */
- outb(DMM32AT_CTRL_PAGE_8254, dev->iobase + DMM32AT_CTRL_REG);
-
- /* write the counter 1 control word and low byte to counter */
- outb(DMM32AT_CLKCT1, dev->iobase + DMM32AT_CLKCT);
- outb(lo1, dev->iobase + DMM32AT_CLK1);
-
- /* write the counter 2 control word and low byte then to counter */
- outb(DMM32AT_CLKCT2, dev->iobase + DMM32AT_CLKCT);
- outb(lo2, dev->iobase + DMM32AT_CLK2);
- outb(hi2, dev->iobase + DMM32AT_CLK2);
-
- /* enable the ai conversion interrupt and the clock to start scans */
- outb(DMM32AT_INTCLK_ADINT |
- DMM32AT_INTCLK_CLKEN | DMM32AT_INTCLK_CLKSEL,
- dev->iobase + DMM32AT_INTCLK_REG);
-}
-
-static int dmm32at_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
-{
- struct comedi_cmd *cmd = &s->async->cmd;
- int ret;
-
- dmm32at_ai_set_chanspec(dev, s, cmd->chanlist[0], cmd->chanlist_len);
-
- /* reset the interrupt just in case */
- outb(DMM32AT_CTRL_INTRST, dev->iobase + DMM32AT_CTRL_REG);
-
- /*
- * wait for circuit to settle
- * we don't have the 'insn' here but it's not needed
- */
- ret = comedi_timeout(dev, s, NULL, dmm32at_ai_status,
- DMM32AT_AI_READBACK_REG);
- if (ret)
- return ret;
-
- if (cmd->stop_src == TRIG_NONE || cmd->stop_arg > 1) {
- /* start the clock and enable the interrupts */
- dmm32at_setaitimer(dev, cmd->scan_begin_arg);
- } else {
- /* start the interrupts and initiate a single scan */
- outb(DMM32AT_INTCLK_ADINT, dev->iobase + DMM32AT_INTCLK_REG);
- outb(0xff, dev->iobase + DMM32AT_AI_START_CONV_REG);
- }
-
- return 0;
-}
-
-static int dmm32at_ai_cancel(struct comedi_device *dev,
- struct comedi_subdevice *s)
-{
- /* disable further interrupts and clocks */
- outb(0x0, dev->iobase + DMM32AT_INTCLK_REG);
- return 0;
-}
-
-static irqreturn_t dmm32at_isr(int irq, void *d)
-{
- struct comedi_device *dev = d;
- unsigned char intstat;
- unsigned int val;
- int i;
-
- if (!dev->attached) {
- dev_err(dev->class_dev, "spurious interrupt\n");
- return IRQ_HANDLED;
- }
-
- intstat = inb(dev->iobase + DMM32AT_INTCLK_REG);
-
- if (intstat & DMM32AT_INTCLK_ADINT) {
- struct comedi_subdevice *s = dev->read_subdev;
- struct comedi_cmd *cmd = &s->async->cmd;
-
- for (i = 0; i < cmd->chanlist_len; i++) {
- val = dmm32at_ai_get_sample(dev, s);
- comedi_buf_write_samples(s, &val, 1);
- }
-
- if (cmd->stop_src == TRIG_COUNT &&
- s->async->scans_done >= cmd->stop_arg)
- s->async->events |= COMEDI_CB_EOA;
-
- comedi_handle_events(dev, s);
- }
-
- /* reset the interrupt */
- outb(DMM32AT_CTRL_INTRST, dev->iobase + DMM32AT_CTRL_REG);
- return IRQ_HANDLED;
-}
-
-static int dmm32at_ao_eoc(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned long context)
-{
- unsigned char status;
-
- status = inb(dev->iobase + DMM32AT_AUX_DI_REG);
- if ((status & DMM32AT_AUX_DI_DACBUSY) == 0)
- return 0;
- return -EBUSY;
-}
-
-static int dmm32at_ao_insn_write(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- unsigned int chan = CR_CHAN(insn->chanspec);
- int i;
-
- for (i = 0; i < insn->n; i++) {
- unsigned int val = data[i];
- int ret;
-
- /* write LSB then MSB + chan to load DAC */
- outb(val & 0xff, dev->iobase + DMM32AT_AO_LSB_REG);
- outb((val >> 8) | DMM32AT_AO_MSB_DACH(chan),
- dev->iobase + DMM32AT_AO_MSB_REG);
-
- /* wait for circuit to settle */
- ret = comedi_timeout(dev, s, insn, dmm32at_ao_eoc, 0);
- if (ret)
- return ret;
-
- /* dummy read to update DAC */
- inb(dev->iobase + DMM32AT_AO_MSB_REG);
-
- s->readback[chan] = val;
- }
-
- return insn->n;
-}
-
-static int dmm32at_8255_io(struct comedi_device *dev,
- int dir, int port, int data, unsigned long regbase)
-{
- /* get access to the DIO regs */
- outb(DMM32AT_CTRL_PAGE_8255, dev->iobase + DMM32AT_CTRL_REG);
-
- if (dir) {
- outb(data, dev->iobase + regbase + port);
- return 0;
- }
- return inb(dev->iobase + regbase + port);
-}
-
-/* Make sure the board is there and put it to a known state */
-static int dmm32at_reset(struct comedi_device *dev)
-{
- unsigned char aihi, ailo, fifostat, aistat, intstat, airback;
-
- /* reset the board */
- outb(DMM32AT_CTRL_RESETA, dev->iobase + DMM32AT_CTRL_REG);
-
- /* allow a millisecond to reset */
- udelay(1000);
-
- /* zero scan and fifo control */
- outb(0x0, dev->iobase + DMM32AT_FIFO_CTRL_REG);
-
- /* zero interrupt and clock control */
- outb(0x0, dev->iobase + DMM32AT_INTCLK_REG);
-
- /* write a test channel range, the high 3 bits should drop */
- outb(0x80, dev->iobase + DMM32AT_AI_LO_CHAN_REG);
- outb(0xff, dev->iobase + DMM32AT_AI_HI_CHAN_REG);
-
- /* set the range at 10v unipolar */
- outb(DMM32AT_RANGE_U10, dev->iobase + DMM32AT_AI_CFG_REG);
-
- /* should take 10 us to settle, here's a hundred */
- udelay(100);
-
- /* read back the values */
- ailo = inb(dev->iobase + DMM32AT_AI_LO_CHAN_REG);
- aihi = inb(dev->iobase + DMM32AT_AI_HI_CHAN_REG);
- fifostat = inb(dev->iobase + DMM32AT_FIFO_STATUS_REG);
- aistat = inb(dev->iobase + DMM32AT_AI_STATUS_REG);
- intstat = inb(dev->iobase + DMM32AT_INTCLK_REG);
- airback = inb(dev->iobase + DMM32AT_AI_READBACK_REG);
-
- /*
- * NOTE: The (DMM32AT_AI_STATUS_SD1 | DMM32AT_AI_STATUS_SD0)
- * test makes this driver only work if the board is configured
- * with all A/D channels set for single-ended operation.
- */
- if (ailo != 0x00 || aihi != 0x1f ||
- fifostat != DMM32AT_FIFO_STATUS_EF ||
- aistat != (DMM32AT_AI_STATUS_SD1 | DMM32AT_AI_STATUS_SD0) ||
- intstat != 0x00 || airback != 0x0c)
- return -EIO;
-
- return 0;
-}
-
-static int dmm32at_attach(struct comedi_device *dev,
- struct comedi_devconfig *it)
-{
- struct comedi_subdevice *s;
- int ret;
-
- ret = comedi_request_region(dev, it->options[0], 0x10);
- if (ret)
- return ret;
-
- ret = dmm32at_reset(dev);
- if (ret) {
- dev_err(dev->class_dev, "board detection failed\n");
- return ret;
- }
-
- if (it->options[1]) {
- ret = request_irq(it->options[1], dmm32at_isr, 0,
- dev->board_name, dev);
- if (ret == 0)
- dev->irq = it->options[1];
- }
-
- ret = comedi_alloc_subdevices(dev, 3);
- if (ret)
- return ret;
-
- /* Analog Input subdevice */
- s = &dev->subdevices[0];
- s->type = COMEDI_SUBD_AI;
- s->subdev_flags = SDF_READABLE | SDF_GROUND | SDF_DIFF;
- s->n_chan = 32;
- s->maxdata = 0xffff;
- s->range_table = &dmm32at_airanges;
- s->insn_read = dmm32at_ai_insn_read;
- if (dev->irq) {
- dev->read_subdev = s;
- s->subdev_flags |= SDF_CMD_READ;
- s->len_chanlist = s->n_chan;
- s->do_cmd = dmm32at_ai_cmd;
- s->do_cmdtest = dmm32at_ai_cmdtest;
- s->cancel = dmm32at_ai_cancel;
- }
-
- /* Analog Output subdevice */
- s = &dev->subdevices[1];
- s->type = COMEDI_SUBD_AO;
- s->subdev_flags = SDF_WRITABLE;
- s->n_chan = 4;
- s->maxdata = 0x0fff;
- s->range_table = &dmm32at_aoranges;
- s->insn_write = dmm32at_ao_insn_write;
-
- ret = comedi_alloc_subdev_readback(s);
- if (ret)
- return ret;
-
- /* Digital I/O subdevice */
- s = &dev->subdevices[2];
- return subdev_8255_init(dev, s, dmm32at_8255_io, DMM32AT_8255_IOBASE);
-}
-
-static struct comedi_driver dmm32at_driver = {
- .driver_name = "dmm32at",
- .module = THIS_MODULE,
- .attach = dmm32at_attach,
- .detach = comedi_legacy_detach,
-};
-module_comedi_driver(dmm32at_driver);
-
-MODULE_AUTHOR("Comedi http://www.comedi.org");
-MODULE_DESCRIPTION("Comedi: Diamond Systems Diamond-MM-32-AT");
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/dt2801.c b/drivers/staging/comedi/drivers/dt2801.c
deleted file mode 100644
index 80e38dedd359..000000000000
--- a/drivers/staging/comedi/drivers/dt2801.c
+++ /dev/null
@@ -1,639 +0,0 @@
-/*
- * comedi/drivers/dt2801.c
- * Device Driver for DataTranslation DT2801
- *
- */
-/*
-Driver: dt2801
-Description: Data Translation DT2801 series and DT01-EZ
-Author: ds
-Status: works
-Devices: [Data Translation] DT2801 (dt2801), DT2801-A, DT2801/5716A,
- DT2805, DT2805/5716A, DT2808, DT2818, DT2809, DT01-EZ
-
-This driver can autoprobe the type of board.
-
-Configuration options:
- [0] - I/O port base address
- [1] - unused
- [2] - A/D reference 0=differential, 1=single-ended
- [3] - A/D range
- 0 = [-10, 10]
- 1 = [0,10]
- [4] - D/A 0 range
- 0 = [-10, 10]
- 1 = [-5,5]
- 2 = [-2.5,2.5]
- 3 = [0,10]
- 4 = [0,5]
- [5] - D/A 1 range (same choices)
-*/
-
-#include <linux/module.h>
-#include "../comedidev.h"
-#include <linux/delay.h>
-
-#define DT2801_TIMEOUT 1000
-
-/* Hardware Configuration */
-/* ====================== */
-
-#define DT2801_MAX_DMA_SIZE (64 * 1024)
-
-/* define's */
-/* ====================== */
-
-/* Commands */
-#define DT_C_RESET 0x0
-#define DT_C_CLEAR_ERR 0x1
-#define DT_C_READ_ERRREG 0x2
-#define DT_C_SET_CLOCK 0x3
-
-#define DT_C_TEST 0xb
-#define DT_C_STOP 0xf
-
-#define DT_C_SET_DIGIN 0x4
-#define DT_C_SET_DIGOUT 0x5
-#define DT_C_READ_DIG 0x6
-#define DT_C_WRITE_DIG 0x7
-
-#define DT_C_WRITE_DAIM 0x8
-#define DT_C_SET_DA 0x9
-#define DT_C_WRITE_DA 0xa
-
-#define DT_C_READ_ADIM 0xc
-#define DT_C_SET_AD 0xd
-#define DT_C_READ_AD 0xe
-
-/* Command modifiers (only used with read/write), EXTTRIG can be
- used with some other commands.
-*/
-#define DT_MOD_DMA (1<<4)
-#define DT_MOD_CONT (1<<5)
-#define DT_MOD_EXTCLK (1<<6)
-#define DT_MOD_EXTTRIG (1<<7)
-
-/* Bits in status register */
-#define DT_S_DATA_OUT_READY (1<<0)
-#define DT_S_DATA_IN_FULL (1<<1)
-#define DT_S_READY (1<<2)
-#define DT_S_COMMAND (1<<3)
-#define DT_S_COMPOSITE_ERROR (1<<7)
-
-/* registers */
-#define DT2801_DATA 0
-#define DT2801_STATUS 1
-#define DT2801_CMD 1
-
-#if 0
-/* ignore 'defined but not used' warning */
-static const struct comedi_lrange range_dt2801_ai_pgh_bipolar = {
- 4, {
- BIP_RANGE(10),
- BIP_RANGE(5),
- BIP_RANGE(2.5),
- BIP_RANGE(1.25)
- }
-};
-#endif
-static const struct comedi_lrange range_dt2801_ai_pgl_bipolar = {
- 4, {
- BIP_RANGE(10),
- BIP_RANGE(1),
- BIP_RANGE(0.1),
- BIP_RANGE(0.02)
- }
-};
-
-#if 0
-/* ignore 'defined but not used' warning */
-static const struct comedi_lrange range_dt2801_ai_pgh_unipolar = {
- 4, {
- UNI_RANGE(10),
- UNI_RANGE(5),
- UNI_RANGE(2.5),
- UNI_RANGE(1.25)
- }
-};
-#endif
-static const struct comedi_lrange range_dt2801_ai_pgl_unipolar = {
- 4, {
- UNI_RANGE(10),
- UNI_RANGE(1),
- UNI_RANGE(0.1),
- UNI_RANGE(0.02)
- }
-};
-
-struct dt2801_board {
- const char *name;
- int boardcode;
- int ad_diff;
- int ad_chan;
- int adbits;
- int adrangetype;
- int dabits;
-};
-
-/* Typeid's for the different boards of the DT2801-series
- (taken from the test-software, that comes with the board)
- */
-static const struct dt2801_board boardtypes[] = {
- {
- .name = "dt2801",
- .boardcode = 0x09,
- .ad_diff = 2,
- .ad_chan = 16,
- .adbits = 12,
- .adrangetype = 0,
- .dabits = 12},
- {
- .name = "dt2801-a",
- .boardcode = 0x52,
- .ad_diff = 2,
- .ad_chan = 16,
- .adbits = 12,
- .adrangetype = 0,
- .dabits = 12},
- {
- .name = "dt2801/5716a",
- .boardcode = 0x82,
- .ad_diff = 1,
- .ad_chan = 16,
- .adbits = 16,
- .adrangetype = 1,
- .dabits = 12},
- {
- .name = "dt2805",
- .boardcode = 0x12,
- .ad_diff = 1,
- .ad_chan = 16,
- .adbits = 12,
- .adrangetype = 0,
- .dabits = 12},
- {
- .name = "dt2805/5716a",
- .boardcode = 0x92,
- .ad_diff = 1,
- .ad_chan = 16,
- .adbits = 16,
- .adrangetype = 1,
- .dabits = 12},
- {
- .name = "dt2808",
- .boardcode = 0x20,
- .ad_diff = 0,
- .ad_chan = 16,
- .adbits = 12,
- .adrangetype = 2,
- .dabits = 8},
- {
- .name = "dt2818",
- .boardcode = 0xa2,
- .ad_diff = 0,
- .ad_chan = 4,
- .adbits = 12,
- .adrangetype = 0,
- .dabits = 12},
- {
- .name = "dt2809",
- .boardcode = 0xb0,
- .ad_diff = 0,
- .ad_chan = 8,
- .adbits = 12,
- .adrangetype = 1,
- .dabits = 12},
-};
-
-struct dt2801_private {
- const struct comedi_lrange *dac_range_types[2];
-};
-
-/* These are the low-level routines:
- writecommand: write a command to the board
- writedata: write data byte
- readdata: read data byte
- */
-
-/* Only checks DataOutReady-flag, not the Ready-flag as it is done
- in the examples of the manual. I don't see why this should be
- necessary. */
-static int dt2801_readdata(struct comedi_device *dev, int *data)
-{
- int stat = 0;
- int timeout = DT2801_TIMEOUT;
-
- do {
- stat = inb_p(dev->iobase + DT2801_STATUS);
- if (stat & (DT_S_COMPOSITE_ERROR | DT_S_READY))
- return stat;
- if (stat & DT_S_DATA_OUT_READY) {
- *data = inb_p(dev->iobase + DT2801_DATA);
- return 0;
- }
- } while (--timeout > 0);
-
- return -ETIME;
-}
-
-static int dt2801_readdata2(struct comedi_device *dev, int *data)
-{
- int lb = 0;
- int hb = 0;
- int ret;
-
- ret = dt2801_readdata(dev, &lb);
- if (ret)
- return ret;
- ret = dt2801_readdata(dev, &hb);
- if (ret)
- return ret;
-
- *data = (hb << 8) + lb;
- return 0;
-}
-
-static int dt2801_writedata(struct comedi_device *dev, unsigned int data)
-{
- int stat = 0;
- int timeout = DT2801_TIMEOUT;
-
- do {
- stat = inb_p(dev->iobase + DT2801_STATUS);
-
- if (stat & DT_S_COMPOSITE_ERROR)
- return stat;
- if (!(stat & DT_S_DATA_IN_FULL)) {
- outb_p(data & 0xff, dev->iobase + DT2801_DATA);
- return 0;
- }
- } while (--timeout > 0);
-
- return -ETIME;
-}
-
-static int dt2801_writedata2(struct comedi_device *dev, unsigned int data)
-{
- int ret;
-
- ret = dt2801_writedata(dev, data & 0xff);
- if (ret < 0)
- return ret;
- ret = dt2801_writedata(dev, data >> 8);
- if (ret < 0)
- return ret;
-
- return 0;
-}
-
-static int dt2801_wait_for_ready(struct comedi_device *dev)
-{
- int timeout = DT2801_TIMEOUT;
- int stat;
-
- stat = inb_p(dev->iobase + DT2801_STATUS);
- if (stat & DT_S_READY)
- return 0;
- do {
- stat = inb_p(dev->iobase + DT2801_STATUS);
-
- if (stat & DT_S_COMPOSITE_ERROR)
- return stat;
- if (stat & DT_S_READY)
- return 0;
- } while (--timeout > 0);
-
- return -ETIME;
-}
-
-static void dt2801_writecmd(struct comedi_device *dev, int command)
-{
- int stat;
-
- dt2801_wait_for_ready(dev);
-
- stat = inb_p(dev->iobase + DT2801_STATUS);
- if (stat & DT_S_COMPOSITE_ERROR) {
- dev_dbg(dev->class_dev,
- "composite-error in %s, ignoring\n", __func__);
- }
- if (!(stat & DT_S_READY))
- dev_dbg(dev->class_dev, "!ready in %s, ignoring\n", __func__);
- outb_p(command, dev->iobase + DT2801_CMD);
-}
-
-static int dt2801_reset(struct comedi_device *dev)
-{
- int board_code = 0;
- unsigned int stat;
- int timeout;
-
- /* pull random data from data port */
- inb_p(dev->iobase + DT2801_DATA);
- inb_p(dev->iobase + DT2801_DATA);
- inb_p(dev->iobase + DT2801_DATA);
- inb_p(dev->iobase + DT2801_DATA);
-
- /* dt2801_writecmd(dev,DT_C_STOP); */
- outb_p(DT_C_STOP, dev->iobase + DT2801_CMD);
-
- /* dt2801_wait_for_ready(dev); */
- udelay(100);
- timeout = 10000;
- do {
- stat = inb_p(dev->iobase + DT2801_STATUS);
- if (stat & DT_S_READY)
- break;
- } while (timeout--);
- if (!timeout)
- dev_dbg(dev->class_dev, "timeout 1 status=0x%02x\n", stat);
-
- /* dt2801_readdata(dev,&board_code); */
-
- outb_p(DT_C_RESET, dev->iobase + DT2801_CMD);
- /* dt2801_writecmd(dev,DT_C_RESET); */
-
- udelay(100);
- timeout = 10000;
- do {
- stat = inb_p(dev->iobase + DT2801_STATUS);
- if (stat & DT_S_READY)
- break;
- } while (timeout--);
- if (!timeout)
- dev_dbg(dev->class_dev, "timeout 2 status=0x%02x\n", stat);
-
- dt2801_readdata(dev, &board_code);
-
- return board_code;
-}
-
-static int probe_number_of_ai_chans(struct comedi_device *dev)
-{
- int n_chans;
- int stat;
- int data;
-
- for (n_chans = 0; n_chans < 16; n_chans++) {
- dt2801_writecmd(dev, DT_C_READ_ADIM);
- dt2801_writedata(dev, 0);
- dt2801_writedata(dev, n_chans);
- stat = dt2801_readdata2(dev, &data);
-
- if (stat)
- break;
- }
-
- dt2801_reset(dev);
- dt2801_reset(dev);
-
- return n_chans;
-}
-
-static const struct comedi_lrange *dac_range_table[] = {
- &range_bipolar10,
- &range_bipolar5,
- &range_bipolar2_5,
- &range_unipolar10,
- &range_unipolar5
-};
-
-static const struct comedi_lrange *dac_range_lkup(int opt)
-{
- if (opt < 0 || opt >= 5)
- return &range_unknown;
- return dac_range_table[opt];
-}
-
-static const struct comedi_lrange *ai_range_lkup(int type, int opt)
-{
- switch (type) {
- case 0:
- return (opt) ?
- &range_dt2801_ai_pgl_unipolar :
- &range_dt2801_ai_pgl_bipolar;
- case 1:
- return (opt) ? &range_unipolar10 : &range_bipolar10;
- case 2:
- return &range_unipolar5;
- }
- return &range_unknown;
-}
-
-static int dt2801_error(struct comedi_device *dev, int stat)
-{
- if (stat < 0) {
- if (stat == -ETIME)
- dev_dbg(dev->class_dev, "timeout\n");
- else
- dev_dbg(dev->class_dev, "error %d\n", stat);
- return stat;
- }
- dev_dbg(dev->class_dev, "error status 0x%02x, resetting...\n", stat);
-
- dt2801_reset(dev);
- dt2801_reset(dev);
-
- return -EIO;
-}
-
-static int dt2801_ai_insn_read(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
-{
- int d;
- int stat;
- int i;
-
- for (i = 0; i < insn->n; i++) {
- dt2801_writecmd(dev, DT_C_READ_ADIM);
- dt2801_writedata(dev, CR_RANGE(insn->chanspec));
- dt2801_writedata(dev, CR_CHAN(insn->chanspec));
- stat = dt2801_readdata2(dev, &d);
-
- if (stat != 0)
- return dt2801_error(dev, stat);
-
- data[i] = d;
- }
-
- return i;
-}
-
-static int dt2801_ao_insn_write(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- unsigned int chan = CR_CHAN(insn->chanspec);
-
- dt2801_writecmd(dev, DT_C_WRITE_DAIM);
- dt2801_writedata(dev, chan);
- dt2801_writedata2(dev, data[0]);
-
- s->readback[chan] = data[0];
-
- return 1;
-}
-
-static int dt2801_dio_insn_bits(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- int which = (s == &dev->subdevices[3]) ? 1 : 0;
- unsigned int val = 0;
-
- if (comedi_dio_update_state(s, data)) {
- dt2801_writecmd(dev, DT_C_WRITE_DIG);
- dt2801_writedata(dev, which);
- dt2801_writedata(dev, s->state);
- }
-
- dt2801_writecmd(dev, DT_C_READ_DIG);
- dt2801_writedata(dev, which);
- dt2801_readdata(dev, &val);
-
- data[1] = val;
-
- return insn->n;
-}
-
-static int dt2801_dio_insn_config(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- int ret;
-
- ret = comedi_dio_insn_config(dev, s, insn, data, 0xff);
- if (ret)
- return ret;
-
- dt2801_writecmd(dev, s->io_bits ? DT_C_SET_DIGOUT : DT_C_SET_DIGIN);
- dt2801_writedata(dev, (s == &dev->subdevices[3]) ? 1 : 0);
-
- return insn->n;
-}
-
-/*
- options:
- [0] - i/o base
- [1] - unused
- [2] - a/d 0=differential, 1=single-ended
- [3] - a/d range 0=[-10,10], 1=[0,10]
- [4] - dac0 range 0=[-10,10], 1=[-5,5], 2=[-2.5,2.5] 3=[0,10], 4=[0,5]
- [5] - dac1 range 0=[-10,10], 1=[-5,5], 2=[-2.5,2.5] 3=[0,10], 4=[0,5]
-*/
-static int dt2801_attach(struct comedi_device *dev, struct comedi_devconfig *it)
-{
- const struct dt2801_board *board;
- struct dt2801_private *devpriv;
- struct comedi_subdevice *s;
- int board_code, type;
- int ret = 0;
- int n_ai_chans;
-
- ret = comedi_request_region(dev, it->options[0], 0x2);
- if (ret)
- return ret;
-
- /* do some checking */
-
- board_code = dt2801_reset(dev);
-
- /* heh. if it didn't work, try it again. */
- if (!board_code)
- board_code = dt2801_reset(dev);
-
- for (type = 0; type < ARRAY_SIZE(boardtypes); type++) {
- if (boardtypes[type].boardcode == board_code)
- goto havetype;
- }
- dev_dbg(dev->class_dev,
- "unrecognized board code=0x%02x, contact author\n", board_code);
- type = 0;
-
-havetype:
- dev->board_ptr = boardtypes + type;
- board = dev->board_ptr;
-
- n_ai_chans = probe_number_of_ai_chans(dev);
-
- ret = comedi_alloc_subdevices(dev, 4);
- if (ret)
- goto out;
-
- devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
- if (!devpriv)
- return -ENOMEM;
-
- dev->board_name = board->name;
-
- s = &dev->subdevices[0];
- /* ai subdevice */
- s->type = COMEDI_SUBD_AI;
- s->subdev_flags = SDF_READABLE | SDF_GROUND;
-#if 1
- s->n_chan = n_ai_chans;
-#else
- if (it->options[2])
- s->n_chan = board->ad_chan;
- else
- s->n_chan = board->ad_chan / 2;
-#endif
- s->maxdata = (1 << board->adbits) - 1;
- s->range_table = ai_range_lkup(board->adrangetype, it->options[3]);
- s->insn_read = dt2801_ai_insn_read;
-
- s = &dev->subdevices[1];
- /* ao subdevice */
- s->type = COMEDI_SUBD_AO;
- s->subdev_flags = SDF_WRITABLE;
- s->n_chan = 2;
- s->maxdata = (1 << board->dabits) - 1;
- s->range_table_list = devpriv->dac_range_types;
- devpriv->dac_range_types[0] = dac_range_lkup(it->options[4]);
- devpriv->dac_range_types[1] = dac_range_lkup(it->options[5]);
- s->insn_write = dt2801_ao_insn_write;
-
- ret = comedi_alloc_subdev_readback(s);
- if (ret)
- return ret;
-
- s = &dev->subdevices[2];
- /* 1st digital subdevice */
- s->type = COMEDI_SUBD_DIO;
- s->subdev_flags = SDF_READABLE | SDF_WRITABLE;
- s->n_chan = 8;
- s->maxdata = 1;
- s->range_table = &range_digital;
- s->insn_bits = dt2801_dio_insn_bits;
- s->insn_config = dt2801_dio_insn_config;
-
- s = &dev->subdevices[3];
- /* 2nd digital subdevice */
- s->type = COMEDI_SUBD_DIO;
- s->subdev_flags = SDF_READABLE | SDF_WRITABLE;
- s->n_chan = 8;
- s->maxdata = 1;
- s->range_table = &range_digital;
- s->insn_bits = dt2801_dio_insn_bits;
- s->insn_config = dt2801_dio_insn_config;
-
- ret = 0;
-out:
- return ret;
-}
-
-static struct comedi_driver dt2801_driver = {
- .driver_name = "dt2801",
- .module = THIS_MODULE,
- .attach = dt2801_attach,
- .detach = comedi_legacy_detach,
-};
-module_comedi_driver(dt2801_driver);
-
-MODULE_AUTHOR("Comedi http://www.comedi.org");
-MODULE_DESCRIPTION("Comedi low-level driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/dt2811.c b/drivers/staging/comedi/drivers/dt2811.c
deleted file mode 100644
index a80773291fdc..000000000000
--- a/drivers/staging/comedi/drivers/dt2811.c
+++ /dev/null
@@ -1,474 +0,0 @@
-/*
- comedi/drivers/dt2811.c
- Hardware driver for Data Translation DT2811
-
- COMEDI - Linux Control and Measurement Device Interface
- History:
- Base Version - David A. Schleef <ds@schleef.org>
- December 1998 - Updated to work. David does not have a DT2811
- board any longer so this was suffering from bitrot.
- Updated performed by ...
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
- */
-/*
-Driver: dt2811
-Description: Data Translation DT2811
-Author: ds
-Devices: [Data Translation] DT2811-PGL (dt2811-pgl), DT2811-PGH (dt2811-pgh)
-Status: works
-
-Configuration options:
- [0] - I/O port base address
- [1] - IRQ, although this is currently unused
- [2] - A/D reference
- 0 = signle-ended
- 1 = differential
- 2 = pseudo-differential (common reference)
- [3] - A/D range
- 0 = [-5, 5]
- 1 = [-2.5, 2.5]
- 2 = [0, 5]
- [4] - D/A 0 range (same choices)
- [4] - D/A 1 range (same choices)
-*/
-
-#include <linux/module.h>
-#include "../comedidev.h"
-
-static const struct comedi_lrange range_dt2811_pgh_ai_5_unipolar = {
- 4, {
- UNI_RANGE(5),
- UNI_RANGE(2.5),
- UNI_RANGE(1.25),
- UNI_RANGE(0.625)
- }
-};
-
-static const struct comedi_lrange range_dt2811_pgh_ai_2_5_bipolar = {
- 4, {
- BIP_RANGE(2.5),
- BIP_RANGE(1.25),
- BIP_RANGE(0.625),
- BIP_RANGE(0.3125)
- }
-};
-
-static const struct comedi_lrange range_dt2811_pgh_ai_5_bipolar = {
- 4, {
- BIP_RANGE(5),
- BIP_RANGE(2.5),
- BIP_RANGE(1.25),
- BIP_RANGE(0.625)
- }
-};
-
-static const struct comedi_lrange range_dt2811_pgl_ai_5_unipolar = {
- 4, {
- UNI_RANGE(5),
- UNI_RANGE(0.5),
- UNI_RANGE(0.05),
- UNI_RANGE(0.01)
- }
-};
-
-static const struct comedi_lrange range_dt2811_pgl_ai_2_5_bipolar = {
- 4, {
- BIP_RANGE(2.5),
- BIP_RANGE(0.25),
- BIP_RANGE(0.025),
- BIP_RANGE(0.005)
- }
-};
-
-static const struct comedi_lrange range_dt2811_pgl_ai_5_bipolar = {
- 4, {
- BIP_RANGE(5),
- BIP_RANGE(0.5),
- BIP_RANGE(0.05),
- BIP_RANGE(0.01)
- }
-};
-
-/*
-
- 0x00 ADCSR R/W A/D Control/Status Register
- bit 7 - (R) 1 indicates A/D conversion done
- reading ADDAT clears bit
- (W) ignored
- bit 6 - (R) 1 indicates A/D error
- (W) ignored
- bit 5 - (R) 1 indicates A/D busy, cleared at end
- of conversion
- (W) ignored
- bit 4 - (R) 0
- (W)
- bit 3 - (R) 0
- bit 2 - (R/W) 1 indicates interrupts enabled
- bits 1,0 - (R/W) mode bits
- 00 single conversion on ADGCR load
- 01 continuous conversion, internal clock,
- (clock enabled on ADGCR load)
- 10 continuous conversion, internal clock,
- external trigger
- 11 continuous conversion, external clock,
- external trigger
-
- 0x01 ADGCR R/W A/D Gain/Channel Register
- bit 6,7 - (R/W) gain select
- 00 gain=1, both PGH, PGL models
- 01 gain=2 PGH, 10 PGL
- 10 gain=4 PGH, 100 PGL
- 11 gain=8 PGH, 500 PGL
- bit 4,5 - reserved
- bit 3-0 - (R/W) channel select
- channel number from 0-15
-
- 0x02,0x03 (R) ADDAT A/D Data Register
- (W) DADAT0 D/A Data Register 0
- 0x02 low byte
- 0x03 high byte
-
- 0x04,0x05 (W) DADAT0 D/A Data Register 1
-
- 0x06 (R) DIO0 Digital Input Port 0
- (W) DIO1 Digital Output Port 1
-
- 0x07 TMRCTR (R/W) Timer/Counter Register
- bits 6,7 - reserved
- bits 5-3 - Timer frequency control (mantissa)
- 543 divisor freqency (kHz)
- 000 1 600
- 001 10 60
- 010 2 300
- 011 3 200
- 100 4 150
- 101 5 120
- 110 6 100
- 111 12 50
- bits 2-0 - Timer frequency control (exponent)
- 210 multiply divisor/divide frequency by
- 000 1
- 001 10
- 010 100
- 011 1000
- 100 10000
- 101 100000
- 110 1000000
- 111 10000000
-
- */
-
-#define TIMEOUT 10000
-
-#define DT2811_ADCSR 0
-#define DT2811_ADGCR 1
-#define DT2811_ADDATLO 2
-#define DT2811_ADDATHI 3
-#define DT2811_DADAT0LO 2
-#define DT2811_DADAT0HI 3
-#define DT2811_DADAT1LO 4
-#define DT2811_DADAT1HI 5
-#define DT2811_DIO 6
-#define DT2811_TMRCTR 7
-
-/*
- * flags
- */
-
-/* ADCSR */
-
-#define DT2811_ADDONE 0x80
-#define DT2811_ADERROR 0x40
-#define DT2811_ADBUSY 0x20
-#define DT2811_CLRERROR 0x10
-#define DT2811_INTENB 0x04
-#define DT2811_ADMODE 0x03
-
-struct dt2811_board {
- const char *name;
- const struct comedi_lrange *bip_5;
- const struct comedi_lrange *bip_2_5;
- const struct comedi_lrange *unip_5;
-};
-
-enum { card_2811_pgh, card_2811_pgl };
-
-struct dt2811_private {
- int ntrig;
- int curadchan;
- enum {
- adc_singleended, adc_diff, adc_pseudo_diff
- } adc_mux;
- enum {
- dac_bipolar_5, dac_bipolar_2_5, dac_unipolar_5
- } dac_range[2];
- const struct comedi_lrange *range_type_list[2];
-};
-
-static const struct comedi_lrange *dac_range_types[] = {
- &range_bipolar5,
- &range_bipolar2_5,
- &range_unipolar5
-};
-
-static int dt2811_ai_eoc(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned long context)
-{
- unsigned int status;
-
- status = inb(dev->iobase + DT2811_ADCSR);
- if ((status & DT2811_ADBUSY) == 0)
- return 0;
- return -EBUSY;
-}
-
-static int dt2811_ai_insn(struct comedi_device *dev, struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
-{
- int chan = CR_CHAN(insn->chanspec);
- int ret;
- int i;
-
- for (i = 0; i < insn->n; i++) {
- outb(chan, dev->iobase + DT2811_ADGCR);
-
- ret = comedi_timeout(dev, s, insn, dt2811_ai_eoc, 0);
- if (ret)
- return ret;
-
- data[i] = inb(dev->iobase + DT2811_ADDATLO);
- data[i] |= inb(dev->iobase + DT2811_ADDATHI) << 8;
- data[i] &= 0xfff;
- }
-
- return i;
-}
-
-static int dt2811_ao_insn_write(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- unsigned int chan = CR_CHAN(insn->chanspec);
- unsigned int val = s->readback[chan];
- int i;
-
- for (i = 0; i < insn->n; i++) {
- val = data[i];
- outb(val & 0xff, dev->iobase + DT2811_DADAT0LO + 2 * chan);
- outb((val >> 8) & 0xff,
- dev->iobase + DT2811_DADAT0HI + 2 * chan);
- }
- s->readback[chan] = val;
-
- return insn->n;
-}
-
-static int dt2811_di_insn_bits(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
-{
- data[1] = inb(dev->iobase + DT2811_DIO);
-
- return insn->n;
-}
-
-static int dt2811_do_insn_bits(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- if (comedi_dio_update_state(s, data))
- outb(s->state, dev->iobase + DT2811_DIO);
-
- data[1] = s->state;
-
- return insn->n;
-}
-
-/*
- options[0] Board base address
- options[1] IRQ
- options[2] Input configuration
- 0 == single-ended
- 1 == differential
- 2 == pseudo-differential
- options[3] Analog input range configuration
- 0 == bipolar 5 (-5V -- +5V)
- 1 == bipolar 2.5V (-2.5V -- +2.5V)
- 2 == unipolar 5V (0V -- +5V)
- options[4] Analog output 0 range configuration
- 0 == bipolar 5 (-5V -- +5V)
- 1 == bipolar 2.5V (-2.5V -- +2.5V)
- 2 == unipolar 5V (0V -- +5V)
- options[5] Analog output 1 range configuration
- 0 == bipolar 5 (-5V -- +5V)
- 1 == bipolar 2.5V (-2.5V -- +2.5V)
- 2 == unipolar 5V (0V -- +5V)
-*/
-static int dt2811_attach(struct comedi_device *dev, struct comedi_devconfig *it)
-{
- /* int i; */
- const struct dt2811_board *board = dev->board_ptr;
- struct dt2811_private *devpriv;
- int ret;
- struct comedi_subdevice *s;
-
- ret = comedi_request_region(dev, it->options[0], 0x8);
- if (ret)
- return ret;
-
-#if 0
- outb(0, dev->iobase + DT2811_ADCSR);
- udelay(100);
- i = inb(dev->iobase + DT2811_ADDATLO);
- i = inb(dev->iobase + DT2811_ADDATHI);
-#endif
-
- ret = comedi_alloc_subdevices(dev, 4);
- if (ret)
- return ret;
-
- devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
- if (!devpriv)
- return -ENOMEM;
-
- switch (it->options[2]) {
- case 0:
- devpriv->adc_mux = adc_singleended;
- break;
- case 1:
- devpriv->adc_mux = adc_diff;
- break;
- case 2:
- devpriv->adc_mux = adc_pseudo_diff;
- break;
- default:
- devpriv->adc_mux = adc_singleended;
- break;
- }
- switch (it->options[4]) {
- case 0:
- devpriv->dac_range[0] = dac_bipolar_5;
- break;
- case 1:
- devpriv->dac_range[0] = dac_bipolar_2_5;
- break;
- case 2:
- devpriv->dac_range[0] = dac_unipolar_5;
- break;
- default:
- devpriv->dac_range[0] = dac_bipolar_5;
- break;
- }
- switch (it->options[5]) {
- case 0:
- devpriv->dac_range[1] = dac_bipolar_5;
- break;
- case 1:
- devpriv->dac_range[1] = dac_bipolar_2_5;
- break;
- case 2:
- devpriv->dac_range[1] = dac_unipolar_5;
- break;
- default:
- devpriv->dac_range[1] = dac_bipolar_5;
- break;
- }
-
- s = &dev->subdevices[0];
- /* initialize the ADC subdevice */
- s->type = COMEDI_SUBD_AI;
- s->subdev_flags = SDF_READABLE | SDF_GROUND;
- s->n_chan = devpriv->adc_mux == adc_diff ? 8 : 16;
- s->insn_read = dt2811_ai_insn;
- s->maxdata = 0xfff;
- switch (it->options[3]) {
- case 0:
- default:
- s->range_table = board->bip_5;
- break;
- case 1:
- s->range_table = board->bip_2_5;
- break;
- case 2:
- s->range_table = board->unip_5;
- break;
- }
-
- s = &dev->subdevices[1];
- /* ao subdevice */
- s->type = COMEDI_SUBD_AO;
- s->subdev_flags = SDF_WRITABLE;
- s->n_chan = 2;
- s->maxdata = 0xfff;
- s->range_table_list = devpriv->range_type_list;
- devpriv->range_type_list[0] = dac_range_types[devpriv->dac_range[0]];
- devpriv->range_type_list[1] = dac_range_types[devpriv->dac_range[1]];
- s->insn_write = dt2811_ao_insn_write;
-
- ret = comedi_alloc_subdev_readback(s);
- if (ret)
- return ret;
-
- s = &dev->subdevices[2];
- /* di subdevice */
- s->type = COMEDI_SUBD_DI;
- s->subdev_flags = SDF_READABLE;
- s->n_chan = 8;
- s->insn_bits = dt2811_di_insn_bits;
- s->maxdata = 1;
- s->range_table = &range_digital;
-
- s = &dev->subdevices[3];
- /* do subdevice */
- s->type = COMEDI_SUBD_DO;
- s->subdev_flags = SDF_WRITABLE;
- s->n_chan = 8;
- s->insn_bits = dt2811_do_insn_bits;
- s->maxdata = 1;
- s->state = 0;
- s->range_table = &range_digital;
-
- return 0;
-}
-
-static const struct dt2811_board boardtypes[] = {
- {
- .name = "dt2811-pgh",
- .bip_5 = &range_dt2811_pgh_ai_5_bipolar,
- .bip_2_5 = &range_dt2811_pgh_ai_2_5_bipolar,
- .unip_5 = &range_dt2811_pgh_ai_5_unipolar,
- }, {
- .name = "dt2811-pgl",
- .bip_5 = &range_dt2811_pgl_ai_5_bipolar,
- .bip_2_5 = &range_dt2811_pgl_ai_2_5_bipolar,
- .unip_5 = &range_dt2811_pgl_ai_5_unipolar,
- },
-};
-
-static struct comedi_driver dt2811_driver = {
- .driver_name = "dt2811",
- .module = THIS_MODULE,
- .attach = dt2811_attach,
- .detach = comedi_legacy_detach,
- .board_name = &boardtypes[0].name,
- .num_names = ARRAY_SIZE(boardtypes),
- .offset = sizeof(struct dt2811_board),
-};
-module_comedi_driver(dt2811_driver);
-
-MODULE_AUTHOR("Comedi http://www.comedi.org");
-MODULE_DESCRIPTION("Comedi low-level driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/dt2814.c b/drivers/staging/comedi/drivers/dt2814.c
deleted file mode 100644
index 66705f9a0621..000000000000
--- a/drivers/staging/comedi/drivers/dt2814.c
+++ /dev/null
@@ -1,297 +0,0 @@
-/*
- comedi/drivers/dt2814.c
- Hardware driver for Data Translation DT2814
-
- COMEDI - Linux Control and Measurement Device Interface
- Copyright (C) 1998 David A. Schleef <ds@schleef.org>
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-*/
-/*
-Driver: dt2814
-Description: Data Translation DT2814
-Author: ds
-Status: complete
-Devices: [Data Translation] DT2814 (dt2814)
-
-Configuration options:
- [0] - I/O port base address
- [1] - IRQ
-
-This card has 16 analog inputs multiplexed onto a 12 bit ADC. There
-is a minimally useful onboard clock. The base frequency for the
-clock is selected by jumpers, and the clock divider can be selected
-via programmed I/O. Unfortunately, the clock divider can only be
-a power of 10, from 1 to 10^7, of which only 3 or 4 are useful. In
-addition, the clock does not seem to be very accurate.
-*/
-
-#include <linux/module.h>
-#include <linux/interrupt.h>
-#include "../comedidev.h"
-
-#include <linux/delay.h>
-
-#define DT2814_CSR 0
-#define DT2814_DATA 1
-
-/*
- * flags
- */
-
-#define DT2814_FINISH 0x80
-#define DT2814_ERR 0x40
-#define DT2814_BUSY 0x20
-#define DT2814_ENB 0x10
-#define DT2814_CHANMASK 0x0f
-
-struct dt2814_private {
- int ntrig;
- int curadchan;
-};
-
-#define DT2814_TIMEOUT 10
-#define DT2814_MAX_SPEED 100000 /* Arbitrary 10 khz limit */
-
-static int dt2814_ai_eoc(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned long context)
-{
- unsigned int status;
-
- status = inb(dev->iobase + DT2814_CSR);
- if (status & DT2814_FINISH)
- return 0;
- return -EBUSY;
-}
-
-static int dt2814_ai_insn_read(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
-{
- int n, hi, lo;
- int chan;
- int ret;
-
- for (n = 0; n < insn->n; n++) {
- chan = CR_CHAN(insn->chanspec);
-
- outb(chan, dev->iobase + DT2814_CSR);
-
- ret = comedi_timeout(dev, s, insn, dt2814_ai_eoc, 0);
- if (ret)
- return ret;
-
- hi = inb(dev->iobase + DT2814_DATA);
- lo = inb(dev->iobase + DT2814_DATA);
-
- data[n] = (hi << 4) | (lo >> 4);
- }
-
- return n;
-}
-
-static int dt2814_ns_to_timer(unsigned int *ns, unsigned int flags)
-{
- int i;
- unsigned int f;
-
- /* XXX ignores flags */
-
- f = 10000; /* ns */
- for (i = 0; i < 8; i++) {
- if ((2 * (*ns)) < (f * 11))
- break;
- f *= 10;
- }
-
- *ns = f;
-
- return i;
-}
-
-static int dt2814_ai_cmdtest(struct comedi_device *dev,
- struct comedi_subdevice *s, struct comedi_cmd *cmd)
-{
- int err = 0;
- unsigned int arg;
-
- /* Step 1 : check if triggers are trivially valid */
-
- err |= comedi_check_trigger_src(&cmd->start_src, TRIG_NOW);
- err |= comedi_check_trigger_src(&cmd->scan_begin_src, TRIG_TIMER);
- err |= comedi_check_trigger_src(&cmd->convert_src, TRIG_NOW);
- err |= comedi_check_trigger_src(&cmd->scan_end_src, TRIG_COUNT);
- err |= comedi_check_trigger_src(&cmd->stop_src, TRIG_COUNT | TRIG_NONE);
-
- if (err)
- return 1;
-
- /* Step 2a : make sure trigger sources are unique */
-
- err |= comedi_check_trigger_is_unique(cmd->stop_src);
-
- /* Step 2b : and mutually compatible */
-
- if (err)
- return 2;
-
- /* Step 3: check if arguments are trivially valid */
-
- err |= comedi_check_trigger_arg_is(&cmd->start_arg, 0);
-
- err |= comedi_check_trigger_arg_max(&cmd->scan_begin_arg, 1000000000);
- err |= comedi_check_trigger_arg_min(&cmd->scan_begin_arg,
- DT2814_MAX_SPEED);
-
- err |= comedi_check_trigger_arg_is(&cmd->scan_end_arg,
- cmd->chanlist_len);
-
- if (cmd->stop_src == TRIG_COUNT)
- err |= comedi_check_trigger_arg_min(&cmd->stop_arg, 2);
- else /* TRIG_NONE */
- err |= comedi_check_trigger_arg_is(&cmd->stop_arg, 0);
-
- if (err)
- return 3;
-
- /* step 4: fix up any arguments */
-
- arg = cmd->scan_begin_arg;
- dt2814_ns_to_timer(&arg, cmd->flags);
- err |= comedi_check_trigger_arg_is(&cmd->scan_begin_arg, arg);
-
- if (err)
- return 4;
-
- return 0;
-}
-
-static int dt2814_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
-{
- struct dt2814_private *devpriv = dev->private;
- struct comedi_cmd *cmd = &s->async->cmd;
- int chan;
- int trigvar;
-
- trigvar = dt2814_ns_to_timer(&cmd->scan_begin_arg, cmd->flags);
-
- chan = CR_CHAN(cmd->chanlist[0]);
-
- devpriv->ntrig = cmd->stop_arg;
- outb(chan | DT2814_ENB | (trigvar << 5), dev->iobase + DT2814_CSR);
-
- return 0;
-}
-
-static irqreturn_t dt2814_interrupt(int irq, void *d)
-{
- int lo, hi;
- struct comedi_device *dev = d;
- struct dt2814_private *devpriv = dev->private;
- struct comedi_subdevice *s = dev->read_subdev;
- int data;
-
- if (!dev->attached) {
- dev_err(dev->class_dev, "spurious interrupt\n");
- return IRQ_HANDLED;
- }
-
- hi = inb(dev->iobase + DT2814_DATA);
- lo = inb(dev->iobase + DT2814_DATA);
-
- data = (hi << 4) | (lo >> 4);
-
- if (!(--devpriv->ntrig)) {
- int i;
-
- outb(0, dev->iobase + DT2814_CSR);
- /* note: turning off timed mode triggers another
- sample. */
-
- for (i = 0; i < DT2814_TIMEOUT; i++) {
- if (inb(dev->iobase + DT2814_CSR) & DT2814_FINISH)
- break;
- }
- inb(dev->iobase + DT2814_DATA);
- inb(dev->iobase + DT2814_DATA);
-
- s->async->events |= COMEDI_CB_EOA;
- }
- comedi_handle_events(dev, s);
- return IRQ_HANDLED;
-}
-
-static int dt2814_attach(struct comedi_device *dev, struct comedi_devconfig *it)
-{
- struct dt2814_private *devpriv;
- struct comedi_subdevice *s;
- int ret;
- int i;
-
- ret = comedi_request_region(dev, it->options[0], 0x2);
- if (ret)
- return ret;
-
- outb(0, dev->iobase + DT2814_CSR);
- udelay(100);
- if (inb(dev->iobase + DT2814_CSR) & DT2814_ERR) {
- dev_err(dev->class_dev, "reset error (fatal)\n");
- return -EIO;
- }
- i = inb(dev->iobase + DT2814_DATA);
- i = inb(dev->iobase + DT2814_DATA);
-
- if (it->options[1]) {
- ret = request_irq(it->options[1], dt2814_interrupt, 0,
- dev->board_name, dev);
- if (ret == 0)
- dev->irq = it->options[1];
- }
-
- ret = comedi_alloc_subdevices(dev, 1);
- if (ret)
- return ret;
-
- devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
- if (!devpriv)
- return -ENOMEM;
-
- s = &dev->subdevices[0];
- s->type = COMEDI_SUBD_AI;
- s->subdev_flags = SDF_READABLE | SDF_GROUND;
- s->n_chan = 16; /* XXX */
- s->insn_read = dt2814_ai_insn_read;
- s->maxdata = 0xfff;
- s->range_table = &range_unknown; /* XXX */
- if (dev->irq) {
- dev->read_subdev = s;
- s->subdev_flags |= SDF_CMD_READ;
- s->len_chanlist = 1;
- s->do_cmd = dt2814_ai_cmd;
- s->do_cmdtest = dt2814_ai_cmdtest;
- }
-
- return 0;
-}
-
-static struct comedi_driver dt2814_driver = {
- .driver_name = "dt2814",
- .module = THIS_MODULE,
- .attach = dt2814_attach,
- .detach = comedi_legacy_detach,
-};
-module_comedi_driver(dt2814_driver);
-
-MODULE_AUTHOR("Comedi http://www.comedi.org");
-MODULE_DESCRIPTION("Comedi low-level driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/dt2815.c b/drivers/staging/comedi/drivers/dt2815.c
deleted file mode 100644
index fb08569c1ac1..000000000000
--- a/drivers/staging/comedi/drivers/dt2815.c
+++ /dev/null
@@ -1,223 +0,0 @@
-/*
- comedi/drivers/dt2815.c
- Hardware driver for Data Translation DT2815
-
- COMEDI - Linux Control and Measurement Device Interface
- Copyright (C) 1999 Anders Blomdell <anders.blomdell@control.lth.se>
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
- */
-/*
-Driver: dt2815
-Description: Data Translation DT2815
-Author: ds
-Status: mostly complete, untested
-Devices: [Data Translation] DT2815 (dt2815)
-
-I'm not sure anyone has ever tested this board. If you have information
-contrary, please update.
-
-Configuration options:
- [0] - I/O port base base address
- [1] - IRQ (unused)
- [2] - Voltage unipolar/bipolar configuration
- 0 == unipolar 5V (0V -- +5V)
- 1 == bipolar 5V (-5V -- +5V)
- [3] - Current offset configuration
- 0 == disabled (0mA -- +32mAV)
- 1 == enabled (+4mA -- +20mAV)
- [4] - Firmware program configuration
- 0 == program 1 (see manual table 5-4)
- 1 == program 2 (see manual table 5-4)
- 2 == program 3 (see manual table 5-4)
- 3 == program 4 (see manual table 5-4)
- [5] - Analog output 0 range configuration
- 0 == voltage
- 1 == current
- [6] - Analog output 1 range configuration (same options)
- [7] - Analog output 2 range configuration (same options)
- [8] - Analog output 3 range configuration (same options)
- [9] - Analog output 4 range configuration (same options)
- [10] - Analog output 5 range configuration (same options)
- [11] - Analog output 6 range configuration (same options)
- [12] - Analog output 7 range configuration (same options)
-*/
-
-#include <linux/module.h>
-#include "../comedidev.h"
-
-#include <linux/delay.h>
-
-#define DT2815_DATA 0
-#define DT2815_STATUS 1
-
-struct dt2815_private {
- const struct comedi_lrange *range_type_list[8];
- unsigned int ao_readback[8];
-};
-
-static int dt2815_ao_status(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned long context)
-{
- unsigned int status;
-
- status = inb(dev->iobase + DT2815_STATUS);
- if (status == context)
- return 0;
- return -EBUSY;
-}
-
-static int dt2815_ao_insn_read(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
-{
- struct dt2815_private *devpriv = dev->private;
- int i;
- int chan = CR_CHAN(insn->chanspec);
-
- for (i = 0; i < insn->n; i++)
- data[i] = devpriv->ao_readback[chan];
-
- return i;
-}
-
-static int dt2815_ao_insn(struct comedi_device *dev, struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
-{
- struct dt2815_private *devpriv = dev->private;
- int i;
- int chan = CR_CHAN(insn->chanspec);
- unsigned int lo, hi;
- int ret;
-
- for (i = 0; i < insn->n; i++) {
- lo = ((data[i] & 0x0f) << 4) | (chan << 1) | 0x01;
- hi = (data[i] & 0xff0) >> 4;
-
- ret = comedi_timeout(dev, s, insn, dt2815_ao_status, 0x00);
- if (ret)
- return ret;
-
- outb(lo, dev->iobase + DT2815_DATA);
-
- ret = comedi_timeout(dev, s, insn, dt2815_ao_status, 0x10);
- if (ret)
- return ret;
-
- devpriv->ao_readback[chan] = data[i];
- }
- return i;
-}
-
-/*
- options[0] Board base address
- options[1] IRQ (not applicable)
- options[2] Voltage unipolar/bipolar configuration
- 0 == unipolar 5V (0V -- +5V)
- 1 == bipolar 5V (-5V -- +5V)
- options[3] Current offset configuration
- 0 == disabled (0mA -- +32mAV)
- 1 == enabled (+4mA -- +20mAV)
- options[4] Firmware program configuration
- 0 == program 1 (see manual table 5-4)
- 1 == program 2 (see manual table 5-4)
- 2 == program 3 (see manual table 5-4)
- 3 == program 4 (see manual table 5-4)
- options[5] Analog output 0 range configuration
- 0 == voltage
- 1 == current
- options[6] Analog output 1 range configuration
- ...
- options[12] Analog output 7 range configuration
- 0 == voltage
- 1 == current
- */
-
-static int dt2815_attach(struct comedi_device *dev, struct comedi_devconfig *it)
-{
- struct dt2815_private *devpriv;
- struct comedi_subdevice *s;
- int i;
- const struct comedi_lrange *current_range_type, *voltage_range_type;
- int ret;
-
- ret = comedi_request_region(dev, it->options[0], 0x2);
- if (ret)
- return ret;
-
- ret = comedi_alloc_subdevices(dev, 1);
- if (ret)
- return ret;
-
- devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
- if (!devpriv)
- return -ENOMEM;
-
- s = &dev->subdevices[0];
- /* ao subdevice */
- s->type = COMEDI_SUBD_AO;
- s->subdev_flags = SDF_WRITABLE;
- s->maxdata = 0xfff;
- s->n_chan = 8;
- s->insn_write = dt2815_ao_insn;
- s->insn_read = dt2815_ao_insn_read;
- s->range_table_list = devpriv->range_type_list;
-
- current_range_type = (it->options[3])
- ? &range_4_20mA : &range_0_32mA;
- voltage_range_type = (it->options[2])
- ? &range_bipolar5 : &range_unipolar5;
- for (i = 0; i < 8; i++) {
- devpriv->range_type_list[i] = (it->options[5 + i])
- ? current_range_type : voltage_range_type;
- }
-
- /* Init the 2815 */
- outb(0x00, dev->iobase + DT2815_STATUS);
- for (i = 0; i < 100; i++) {
- /* This is incredibly slow (approx 20 ms) */
- unsigned int status;
-
- udelay(1000);
- status = inb(dev->iobase + DT2815_STATUS);
- if (status == 4) {
- unsigned int program;
-
- program = (it->options[4] & 0x3) << 3 | 0x7;
- outb(program, dev->iobase + DT2815_DATA);
- dev_dbg(dev->class_dev, "program: 0x%x (@t=%d)\n",
- program, i);
- break;
- } else if (status != 0x00) {
- dev_dbg(dev->class_dev,
- "unexpected status 0x%x (@t=%d)\n",
- status, i);
- if (status & 0x60)
- outb(0x00, dev->iobase + DT2815_STATUS);
- }
- }
-
- return 0;
-}
-
-static struct comedi_driver dt2815_driver = {
- .driver_name = "dt2815",
- .module = THIS_MODULE,
- .attach = dt2815_attach,
- .detach = comedi_legacy_detach,
-};
-module_comedi_driver(dt2815_driver);
-
-MODULE_AUTHOR("Comedi http://www.comedi.org");
-MODULE_DESCRIPTION("Comedi low-level driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/dt2817.c b/drivers/staging/comedi/drivers/dt2817.c
deleted file mode 100644
index 5131deebf66f..000000000000
--- a/drivers/staging/comedi/drivers/dt2817.c
+++ /dev/null
@@ -1,149 +0,0 @@
-/*
- comedi/drivers/dt2817.c
- Hardware driver for Data Translation DT2817
-
- COMEDI - Linux Control and Measurement Device Interface
- Copyright (C) 1998 David A. Schleef <ds@schleef.org>
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-*/
-/*
-Driver: dt2817
-Description: Data Translation DT2817
-Author: ds
-Status: complete
-Devices: [Data Translation] DT2817 (dt2817)
-
-A very simple digital I/O card. Four banks of 8 lines, each bank
-is configurable for input or output. One wonders why it takes a
-50 page manual to describe this thing.
-
-The driver (which, btw, is much less than 50 pages) has 1 subdevice
-with 32 channels, configurable in groups of 8.
-
-Configuration options:
- [0] - I/O port base base address
-*/
-
-#include <linux/module.h>
-#include "../comedidev.h"
-
-#define DT2817_CR 0
-#define DT2817_DATA 1
-
-static int dt2817_dio_insn_config(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- unsigned int chan = CR_CHAN(insn->chanspec);
- unsigned int oe = 0;
- unsigned int mask;
- int ret;
-
- if (chan < 8)
- mask = 0x000000ff;
- else if (chan < 16)
- mask = 0x0000ff00;
- else if (chan < 24)
- mask = 0x00ff0000;
- else
- mask = 0xff000000;
-
- ret = comedi_dio_insn_config(dev, s, insn, data, mask);
- if (ret)
- return ret;
-
- if (s->io_bits & 0x000000ff)
- oe |= 0x1;
- if (s->io_bits & 0x0000ff00)
- oe |= 0x2;
- if (s->io_bits & 0x00ff0000)
- oe |= 0x4;
- if (s->io_bits & 0xff000000)
- oe |= 0x8;
-
- outb(oe, dev->iobase + DT2817_CR);
-
- return insn->n;
-}
-
-static int dt2817_dio_insn_bits(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- unsigned long iobase = dev->iobase + DT2817_DATA;
- unsigned int mask;
- unsigned int val;
-
- mask = comedi_dio_update_state(s, data);
- if (mask) {
- if (mask & 0x000000ff)
- outb(s->state & 0xff, iobase + 0);
- if (mask & 0x0000ff00)
- outb((s->state >> 8) & 0xff, iobase + 1);
- if (mask & 0x00ff0000)
- outb((s->state >> 16) & 0xff, iobase + 2);
- if (mask & 0xff000000)
- outb((s->state >> 24) & 0xff, iobase + 3);
- }
-
- val = inb(iobase + 0);
- val |= (inb(iobase + 1) << 8);
- val |= (inb(iobase + 2) << 16);
- val |= (inb(iobase + 3) << 24);
-
- data[1] = val;
-
- return insn->n;
-}
-
-static int dt2817_attach(struct comedi_device *dev, struct comedi_devconfig *it)
-{
- int ret;
- struct comedi_subdevice *s;
-
- ret = comedi_request_region(dev, it->options[0], 0x5);
- if (ret)
- return ret;
-
- ret = comedi_alloc_subdevices(dev, 1);
- if (ret)
- return ret;
-
- s = &dev->subdevices[0];
-
- s->n_chan = 32;
- s->type = COMEDI_SUBD_DIO;
- s->subdev_flags = SDF_READABLE | SDF_WRITABLE;
- s->range_table = &range_digital;
- s->maxdata = 1;
- s->insn_bits = dt2817_dio_insn_bits;
- s->insn_config = dt2817_dio_insn_config;
-
- s->state = 0;
- outb(0, dev->iobase + DT2817_CR);
-
- return 0;
-}
-
-static struct comedi_driver dt2817_driver = {
- .driver_name = "dt2817",
- .module = THIS_MODULE,
- .attach = dt2817_attach,
- .detach = comedi_legacy_detach,
-};
-module_comedi_driver(dt2817_driver);
-
-MODULE_AUTHOR("Comedi http://www.comedi.org");
-MODULE_DESCRIPTION("Comedi low-level driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/dt282x.c b/drivers/staging/comedi/drivers/dt282x.c
deleted file mode 100644
index 5a536a00066f..000000000000
--- a/drivers/staging/comedi/drivers/dt282x.c
+++ /dev/null
@@ -1,1211 +0,0 @@
-/*
- * dt282x.c
- * Comedi driver for Data Translation DT2821 series
- *
- * COMEDI - Linux Control and Measurement Device Interface
- * Copyright (C) 1997-8 David A. Schleef <ds@schleef.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-/*
- * Driver: dt282x
- * Description: Data Translation DT2821 series (including DT-EZ)
- * Author: ds
- * Devices: [Data Translation] DT2821 (dt2821), DT2821-F-16SE (dt2821-f),
- * DT2821-F-8DI (dt2821-f), DT2821-G-16SE (dt2821-g),
- * DT2821-G-8DI (dt2821-g), DT2823 (dt2823), DT2824-PGH (dt2824-pgh),
- * DT2824-PGL (dt2824-pgl), DT2825 (dt2825), DT2827 (dt2827),
- * DT2828 (dt2828), DT2928 (dt2829), DT21-EZ (dt21-ez), DT23-EZ (dt23-ez),
- * DT24-EZ (dt24-ez), DT24-EZ-PGL (dt24-ez-pgl)
- * Status: complete
- * Updated: Wed, 22 Aug 2001 17:11:34 -0700
- *
- * Configuration options:
- * [0] - I/O port base address
- * [1] - IRQ (optional, required for async command support)
- * [2] - DMA 1 (optional, required for async command support)
- * [3] - DMA 2 (optional, required for async command support)
- * [4] - AI jumpered for 0=single ended, 1=differential
- * [5] - AI jumpered for 0=straight binary, 1=2's complement
- * [6] - AO 0 data format (deprecated, see below)
- * [7] - AO 1 data format (deprecated, see below)
- * [8] - AI jumpered for 0=[-10,10]V, 1=[0,10], 2=[-5,5], 3=[0,5]
- * [9] - AO channel 0 range (deprecated, see below)
- * [10]- AO channel 1 range (deprecated, see below)
- *
- * Notes:
- * - AO commands might be broken.
- * - If you try to run a command on both the AI and AO subdevices
- * simultaneously, bad things will happen. The driver needs to
- * be fixed to check for this situation and return an error.
- * - AO range is not programmable. The AO subdevice has a range_table
- * containing all the possible analog output ranges. Use the range
- * that matches your board configuration to convert between data
- * values and physical units. The format of the data written to the
- * board is handled automatically based on the unipolar/bipolar
- * range that is selected.
- */
-
-#include <linux/module.h>
-#include <linux/delay.h>
-#include <linux/gfp.h>
-#include <linux/interrupt.h>
-#include <linux/io.h>
-
-#include "../comedidev.h"
-
-#include "comedi_isadma.h"
-
-/*
- * Register map
- */
-#define DT2821_ADCSR_REG 0x00
-#define DT2821_ADCSR_ADERR (1 << 15)
-#define DT2821_ADCSR_ADCLK (1 << 9)
-#define DT2821_ADCSR_MUXBUSY (1 << 8)
-#define DT2821_ADCSR_ADDONE (1 << 7)
-#define DT2821_ADCSR_IADDONE (1 << 6)
-#define DT2821_ADCSR_GS(x) (((x) & 0x3) << 4)
-#define DT2821_ADCSR_CHAN(x) (((x) & 0xf) << 0)
-#define DT2821_CHANCSR_REG 0x02
-#define DT2821_CHANCSR_LLE (1 << 15)
-#define DT2821_CHANCSR_PRESLA(x) (((x) & 0xf) >> 8)
-#define DT2821_CHANCSR_NUMB(x) ((((x) - 1) & 0xf) << 0)
-#define DT2821_ADDAT_REG 0x04
-#define DT2821_DACSR_REG 0x06
-#define DT2821_DACSR_DAERR (1 << 15)
-#define DT2821_DACSR_YSEL(x) ((x) << 9)
-#define DT2821_DACSR_SSEL (1 << 8)
-#define DT2821_DACSR_DACRDY (1 << 7)
-#define DT2821_DACSR_IDARDY (1 << 6)
-#define DT2821_DACSR_DACLK (1 << 5)
-#define DT2821_DACSR_HBOE (1 << 1)
-#define DT2821_DACSR_LBOE (1 << 0)
-#define DT2821_DADAT_REG 0x08
-#define DT2821_DIODAT_REG 0x0a
-#define DT2821_SUPCSR_REG 0x0c
-#define DT2821_SUPCSR_DMAD (1 << 15)
-#define DT2821_SUPCSR_ERRINTEN (1 << 14)
-#define DT2821_SUPCSR_CLRDMADNE (1 << 13)
-#define DT2821_SUPCSR_DDMA (1 << 12)
-#define DT2821_SUPCSR_DS_PIO (0 << 10)
-#define DT2821_SUPCSR_DS_AD_CLK (1 << 10)
-#define DT2821_SUPCSR_DS_DA_CLK (2 << 10)
-#define DT2821_SUPCSR_DS_AD_TRIG (3 << 10)
-#define DT2821_SUPCSR_BUFFB (1 << 9)
-#define DT2821_SUPCSR_SCDN (1 << 8)
-#define DT2821_SUPCSR_DACON (1 << 7)
-#define DT2821_SUPCSR_ADCINIT (1 << 6)
-#define DT2821_SUPCSR_DACINIT (1 << 5)
-#define DT2821_SUPCSR_PRLD (1 << 4)
-#define DT2821_SUPCSR_STRIG (1 << 3)
-#define DT2821_SUPCSR_XTRIG (1 << 2)
-#define DT2821_SUPCSR_XCLK (1 << 1)
-#define DT2821_SUPCSR_BDINIT (1 << 0)
-#define DT2821_TMRCTR_REG 0x0e
-
-static const struct comedi_lrange range_dt282x_ai_lo_bipolar = {
- 4, {
- BIP_RANGE(10),
- BIP_RANGE(5),
- BIP_RANGE(2.5),
- BIP_RANGE(1.25)
- }
-};
-
-static const struct comedi_lrange range_dt282x_ai_lo_unipolar = {
- 4, {
- UNI_RANGE(10),
- UNI_RANGE(5),
- UNI_RANGE(2.5),
- UNI_RANGE(1.25)
- }
-};
-
-static const struct comedi_lrange range_dt282x_ai_5_bipolar = {
- 4, {
- BIP_RANGE(5),
- BIP_RANGE(2.5),
- BIP_RANGE(1.25),
- BIP_RANGE(0.625)
- }
-};
-
-static const struct comedi_lrange range_dt282x_ai_5_unipolar = {
- 4, {
- UNI_RANGE(5),
- UNI_RANGE(2.5),
- UNI_RANGE(1.25),
- UNI_RANGE(0.625)
- }
-};
-
-static const struct comedi_lrange range_dt282x_ai_hi_bipolar = {
- 4, {
- BIP_RANGE(10),
- BIP_RANGE(1),
- BIP_RANGE(0.1),
- BIP_RANGE(0.02)
- }
-};
-
-static const struct comedi_lrange range_dt282x_ai_hi_unipolar = {
- 4, {
- UNI_RANGE(10),
- UNI_RANGE(1),
- UNI_RANGE(0.1),
- UNI_RANGE(0.02)
- }
-};
-
-/*
- * The Analog Output range is set per-channel using jumpers on the board.
- * All of these ranges may not be available on some DT2821 series boards.
- * The default jumper setting has both channels set for +/-10V output.
- */
-static const struct comedi_lrange dt282x_ao_range = {
- 5, {
- BIP_RANGE(10),
- BIP_RANGE(5),
- BIP_RANGE(2.5),
- UNI_RANGE(10),
- UNI_RANGE(5),
- }
-};
-
-struct dt282x_board {
- const char *name;
- unsigned int ai_maxdata;
- int adchan_se;
- int adchan_di;
- int ai_speed;
- int ispgl;
- int dachan;
- unsigned int ao_maxdata;
-};
-
-static const struct dt282x_board boardtypes[] = {
- {
- .name = "dt2821",
- .ai_maxdata = 0x0fff,
- .adchan_se = 16,
- .adchan_di = 8,
- .ai_speed = 20000,
- .dachan = 2,
- .ao_maxdata = 0x0fff,
- }, {
- .name = "dt2821-f",
- .ai_maxdata = 0x0fff,
- .adchan_se = 16,
- .adchan_di = 8,
- .ai_speed = 6500,
- .dachan = 2,
- .ao_maxdata = 0x0fff,
- }, {
- .name = "dt2821-g",
- .ai_maxdata = 0x0fff,
- .adchan_se = 16,
- .adchan_di = 8,
- .ai_speed = 4000,
- .dachan = 2,
- .ao_maxdata = 0x0fff,
- }, {
- .name = "dt2823",
- .ai_maxdata = 0xffff,
- .adchan_di = 4,
- .ai_speed = 10000,
- .dachan = 2,
- .ao_maxdata = 0xffff,
- }, {
- .name = "dt2824-pgh",
- .ai_maxdata = 0x0fff,
- .adchan_se = 16,
- .adchan_di = 8,
- .ai_speed = 20000,
- }, {
- .name = "dt2824-pgl",
- .ai_maxdata = 0x0fff,
- .adchan_se = 16,
- .adchan_di = 8,
- .ai_speed = 20000,
- .ispgl = 1,
- }, {
- .name = "dt2825",
- .ai_maxdata = 0x0fff,
- .adchan_se = 16,
- .adchan_di = 8,
- .ai_speed = 20000,
- .ispgl = 1,
- .dachan = 2,
- .ao_maxdata = 0x0fff,
- }, {
- .name = "dt2827",
- .ai_maxdata = 0xffff,
- .adchan_di = 4,
- .ai_speed = 10000,
- .dachan = 2,
- .ao_maxdata = 0x0fff,
- }, {
- .name = "dt2828",
- .ai_maxdata = 0x0fff,
- .adchan_se = 4,
- .ai_speed = 10000,
- .dachan = 2,
- .ao_maxdata = 0x0fff,
- }, {
- .name = "dt2829",
- .ai_maxdata = 0xffff,
- .adchan_se = 8,
- .ai_speed = 33250,
- .dachan = 2,
- .ao_maxdata = 0xffff,
- }, {
- .name = "dt21-ez",
- .ai_maxdata = 0x0fff,
- .adchan_se = 16,
- .adchan_di = 8,
- .ai_speed = 10000,
- .dachan = 2,
- .ao_maxdata = 0x0fff,
- }, {
- .name = "dt23-ez",
- .ai_maxdata = 0xffff,
- .adchan_se = 16,
- .adchan_di = 8,
- .ai_speed = 10000,
- }, {
- .name = "dt24-ez",
- .ai_maxdata = 0x0fff,
- .adchan_se = 16,
- .adchan_di = 8,
- .ai_speed = 10000,
- }, {
- .name = "dt24-ez-pgl",
- .ai_maxdata = 0x0fff,
- .adchan_se = 16,
- .adchan_di = 8,
- .ai_speed = 10000,
- .ispgl = 1,
- },
-};
-
-struct dt282x_private {
- struct comedi_isadma *dma;
- unsigned int ad_2scomp:1;
- unsigned int divisor;
- int dacsr; /* software copies of registers */
- int adcsr;
- int supcsr;
- int ntrig;
- int nread;
- int dma_dir;
-};
-
-static int dt282x_prep_ai_dma(struct comedi_device *dev, int dma_index, int n)
-{
- struct dt282x_private *devpriv = dev->private;
- struct comedi_isadma *dma = devpriv->dma;
- struct comedi_isadma_desc *desc = &dma->desc[dma_index];
-
- if (!devpriv->ntrig)
- return 0;
-
- if (n == 0)
- n = desc->maxsize;
- if (n > devpriv->ntrig * 2)
- n = devpriv->ntrig * 2;
- devpriv->ntrig -= n / 2;
-
- desc->size = n;
- comedi_isadma_set_mode(desc, devpriv->dma_dir);
-
- comedi_isadma_program(desc);
-
- return n;
-}
-
-static int dt282x_prep_ao_dma(struct comedi_device *dev, int dma_index, int n)
-{
- struct dt282x_private *devpriv = dev->private;
- struct comedi_isadma *dma = devpriv->dma;
- struct comedi_isadma_desc *desc = &dma->desc[dma_index];
-
- desc->size = n;
- comedi_isadma_set_mode(desc, devpriv->dma_dir);
-
- comedi_isadma_program(desc);
-
- return n;
-}
-
-static void dt282x_disable_dma(struct comedi_device *dev)
-{
- struct dt282x_private *devpriv = dev->private;
- struct comedi_isadma *dma = devpriv->dma;
- struct comedi_isadma_desc *desc;
- int i;
-
- for (i = 0; i < 2; i++) {
- desc = &dma->desc[i];
- comedi_isadma_disable(desc->chan);
- }
-}
-
-static unsigned int dt282x_ns_to_timer(unsigned int *ns, unsigned int flags)
-{
- unsigned int prescale, base, divider;
-
- for (prescale = 0; prescale < 16; prescale++) {
- if (prescale == 1)
- continue;
- base = 250 * (1 << prescale);
- switch (flags & CMDF_ROUND_MASK) {
- case CMDF_ROUND_NEAREST:
- default:
- divider = (*ns + base / 2) / base;
- break;
- case CMDF_ROUND_DOWN:
- divider = (*ns) / base;
- break;
- case CMDF_ROUND_UP:
- divider = (*ns + base - 1) / base;
- break;
- }
- if (divider < 256) {
- *ns = divider * base;
- return (prescale << 8) | (255 - divider);
- }
- }
- base = 250 * (1 << 15);
- divider = 255;
- *ns = divider * base;
- return (15 << 8) | (255 - divider);
-}
-
-static void dt282x_munge(struct comedi_device *dev,
- struct comedi_subdevice *s,
- unsigned short *buf,
- unsigned int nbytes)
-{
- struct dt282x_private *devpriv = dev->private;
- unsigned int val;
- int i;
-
- if (nbytes % 2)
- dev_err(dev->class_dev,
- "bug! odd number of bytes from dma xfer\n");
-
- for (i = 0; i < nbytes / 2; i++) {
- val = buf[i];
- val &= s->maxdata;
- if (devpriv->ad_2scomp)
- val = comedi_offset_munge(s, val);
-
- buf[i] = val;
- }
-}
-
-static unsigned int dt282x_ao_setup_dma(struct comedi_device *dev,
- struct comedi_subdevice *s,
- int cur_dma)
-{
- struct dt282x_private *devpriv = dev->private;
- struct comedi_isadma *dma = devpriv->dma;
- struct comedi_isadma_desc *desc = &dma->desc[cur_dma];
- unsigned int nsamples = comedi_bytes_to_samples(s, desc->maxsize);
- unsigned int nbytes;
-
- nbytes = comedi_buf_read_samples(s, desc->virt_addr, nsamples);
- if (nbytes)
- dt282x_prep_ao_dma(dev, cur_dma, nbytes);
- else
- dev_err(dev->class_dev, "AO underrun\n");
-
- return nbytes;
-}
-
-static void dt282x_ao_dma_interrupt(struct comedi_device *dev,
- struct comedi_subdevice *s)
-{
- struct dt282x_private *devpriv = dev->private;
- struct comedi_isadma *dma = devpriv->dma;
- struct comedi_isadma_desc *desc = &dma->desc[dma->cur_dma];
-
- outw(devpriv->supcsr | DT2821_SUPCSR_CLRDMADNE,
- dev->iobase + DT2821_SUPCSR_REG);
-
- comedi_isadma_disable(desc->chan);
-
- if (!dt282x_ao_setup_dma(dev, s, dma->cur_dma))
- s->async->events |= COMEDI_CB_OVERFLOW;
-
- dma->cur_dma = 1 - dma->cur_dma;
-}
-
-static void dt282x_ai_dma_interrupt(struct comedi_device *dev,
- struct comedi_subdevice *s)
-{
- struct dt282x_private *devpriv = dev->private;
- struct comedi_isadma *dma = devpriv->dma;
- struct comedi_isadma_desc *desc = &dma->desc[dma->cur_dma];
- unsigned int nsamples = comedi_bytes_to_samples(s, desc->size);
- int ret;
-
- outw(devpriv->supcsr | DT2821_SUPCSR_CLRDMADNE,
- dev->iobase + DT2821_SUPCSR_REG);
-
- comedi_isadma_disable(desc->chan);
-
- dt282x_munge(dev, s, desc->virt_addr, desc->size);
- ret = comedi_buf_write_samples(s, desc->virt_addr, nsamples);
- if (ret != desc->size)
- return;
-
- devpriv->nread -= nsamples;
- if (devpriv->nread < 0) {
- dev_info(dev->class_dev, "nread off by one\n");
- devpriv->nread = 0;
- }
- if (!devpriv->nread) {
- s->async->events |= COMEDI_CB_EOA;
- return;
- }
-#if 0
- /* clear the dual dma flag, making this the last dma segment */
- /* XXX probably wrong */
- if (!devpriv->ntrig) {
- devpriv->supcsr &= ~DT2821_SUPCSR_DDMA;
- outw(devpriv->supcsr, dev->iobase + DT2821_SUPCSR_REG);
- }
-#endif
- /* restart the channel */
- dt282x_prep_ai_dma(dev, dma->cur_dma, 0);
-
- dma->cur_dma = 1 - dma->cur_dma;
-}
-
-static irqreturn_t dt282x_interrupt(int irq, void *d)
-{
- struct comedi_device *dev = d;
- struct dt282x_private *devpriv = dev->private;
- struct comedi_subdevice *s = dev->read_subdev;
- struct comedi_subdevice *s_ao = dev->write_subdev;
- unsigned int supcsr, adcsr, dacsr;
- int handled = 0;
-
- if (!dev->attached) {
- dev_err(dev->class_dev, "spurious interrupt\n");
- return IRQ_HANDLED;
- }
-
- adcsr = inw(dev->iobase + DT2821_ADCSR_REG);
- dacsr = inw(dev->iobase + DT2821_DACSR_REG);
- supcsr = inw(dev->iobase + DT2821_SUPCSR_REG);
- if (supcsr & DT2821_SUPCSR_DMAD) {
- if (devpriv->dma_dir == COMEDI_ISADMA_READ)
- dt282x_ai_dma_interrupt(dev, s);
- else
- dt282x_ao_dma_interrupt(dev, s_ao);
- handled = 1;
- }
- if (adcsr & DT2821_ADCSR_ADERR) {
- if (devpriv->nread != 0) {
- dev_err(dev->class_dev, "A/D error\n");
- s->async->events |= COMEDI_CB_ERROR;
- }
- handled = 1;
- }
- if (dacsr & DT2821_DACSR_DAERR) {
- dev_err(dev->class_dev, "D/A error\n");
- s_ao->async->events |= COMEDI_CB_ERROR;
- handled = 1;
- }
-#if 0
- if (adcsr & DT2821_ADCSR_ADDONE) {
- unsigned short data;
-
- data = inw(dev->iobase + DT2821_ADDAT_REG);
- data &= s->maxdata;
- if (devpriv->ad_2scomp)
- data = comedi_offset_munge(s, data);
-
- comedi_buf_write_samples(s, &data, 1);
-
- devpriv->nread--;
- if (!devpriv->nread) {
- s->async->events |= COMEDI_CB_EOA;
- } else {
- if (supcsr & DT2821_SUPCSR_SCDN)
- outw(devpriv->supcsr | DT2821_SUPCSR_STRIG,
- dev->iobase + DT2821_SUPCSR_REG);
- }
- handled = 1;
- }
-#endif
- comedi_handle_events(dev, s);
- comedi_handle_events(dev, s_ao);
-
- return IRQ_RETVAL(handled);
-}
-
-static void dt282x_load_changain(struct comedi_device *dev, int n,
- unsigned int *chanlist)
-{
- struct dt282x_private *devpriv = dev->private;
- int i;
-
- outw(DT2821_CHANCSR_LLE | DT2821_CHANCSR_NUMB(n),
- dev->iobase + DT2821_CHANCSR_REG);
- for (i = 0; i < n; i++) {
- unsigned int chan = CR_CHAN(chanlist[i]);
- unsigned int range = CR_RANGE(chanlist[i]);
-
- outw(devpriv->adcsr |
- DT2821_ADCSR_GS(range) |
- DT2821_ADCSR_CHAN(chan),
- dev->iobase + DT2821_ADCSR_REG);
- }
- outw(DT2821_CHANCSR_NUMB(n), dev->iobase + DT2821_CHANCSR_REG);
-}
-
-static int dt282x_ai_timeout(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned long context)
-{
- unsigned int status;
-
- status = inw(dev->iobase + DT2821_ADCSR_REG);
- switch (context) {
- case DT2821_ADCSR_MUXBUSY:
- if ((status & DT2821_ADCSR_MUXBUSY) == 0)
- return 0;
- break;
- case DT2821_ADCSR_ADDONE:
- if (status & DT2821_ADCSR_ADDONE)
- return 0;
- break;
- default:
- return -EINVAL;
- }
- return -EBUSY;
-}
-
-/*
- * Performs a single A/D conversion.
- * - Put channel/gain into channel-gain list
- * - preload multiplexer
- * - trigger conversion and wait for it to finish
- */
-static int dt282x_ai_insn_read(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct dt282x_private *devpriv = dev->private;
- unsigned int val;
- int ret;
- int i;
-
- /* XXX should we really be enabling the ad clock here? */
- devpriv->adcsr = DT2821_ADCSR_ADCLK;
- outw(devpriv->adcsr, dev->iobase + DT2821_ADCSR_REG);
-
- dt282x_load_changain(dev, 1, &insn->chanspec);
-
- outw(devpriv->supcsr | DT2821_SUPCSR_PRLD,
- dev->iobase + DT2821_SUPCSR_REG);
- ret = comedi_timeout(dev, s, insn,
- dt282x_ai_timeout, DT2821_ADCSR_MUXBUSY);
- if (ret)
- return ret;
-
- for (i = 0; i < insn->n; i++) {
- outw(devpriv->supcsr | DT2821_SUPCSR_STRIG,
- dev->iobase + DT2821_SUPCSR_REG);
-
- ret = comedi_timeout(dev, s, insn,
- dt282x_ai_timeout, DT2821_ADCSR_ADDONE);
- if (ret)
- return ret;
-
- val = inw(dev->iobase + DT2821_ADDAT_REG);
- val &= s->maxdata;
- if (devpriv->ad_2scomp)
- val = comedi_offset_munge(s, val);
-
- data[i] = val;
- }
-
- return i;
-}
-
-static int dt282x_ai_cmdtest(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_cmd *cmd)
-{
- const struct dt282x_board *board = dev->board_ptr;
- struct dt282x_private *devpriv = dev->private;
- int err = 0;
- unsigned int arg;
-
- /* Step 1 : check if triggers are trivially valid */
-
- err |= comedi_check_trigger_src(&cmd->start_src, TRIG_NOW);
- err |= comedi_check_trigger_src(&cmd->scan_begin_src,
- TRIG_FOLLOW | TRIG_EXT);
- err |= comedi_check_trigger_src(&cmd->convert_src, TRIG_TIMER);
- err |= comedi_check_trigger_src(&cmd->scan_end_src, TRIG_COUNT);
- err |= comedi_check_trigger_src(&cmd->stop_src, TRIG_COUNT | TRIG_NONE);
-
- if (err)
- return 1;
-
- /* Step 2a : make sure trigger sources are unique */
-
- err |= comedi_check_trigger_is_unique(cmd->scan_begin_src);
- err |= comedi_check_trigger_is_unique(cmd->stop_src);
-
- /* Step 2b : and mutually compatible */
-
- if (err)
- return 2;
-
- /* Step 3: check if arguments are trivially valid */
-
- err |= comedi_check_trigger_arg_is(&cmd->start_arg, 0);
-
- err |= comedi_check_trigger_arg_is(&cmd->scan_begin_arg, 0);
-
- err |= comedi_check_trigger_arg_min(&cmd->convert_arg, 4000);
-
-#define SLOWEST_TIMER (250*(1<<15)*255)
- err |= comedi_check_trigger_arg_max(&cmd->convert_arg, SLOWEST_TIMER);
- err |= comedi_check_trigger_arg_min(&cmd->convert_arg, board->ai_speed);
- err |= comedi_check_trigger_arg_is(&cmd->scan_end_arg,
- cmd->chanlist_len);
-
- if (cmd->stop_src == TRIG_COUNT)
- err |= comedi_check_trigger_arg_min(&cmd->stop_arg, 1);
- else /* TRIG_EXT | TRIG_NONE */
- err |= comedi_check_trigger_arg_is(&cmd->stop_arg, 0);
-
- if (err)
- return 3;
-
- /* step 4: fix up any arguments */
-
- arg = cmd->convert_arg;
- devpriv->divisor = dt282x_ns_to_timer(&arg, cmd->flags);
- err |= comedi_check_trigger_arg_is(&cmd->convert_arg, arg);
-
- if (err)
- return 4;
-
- return 0;
-}
-
-static int dt282x_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
-{
- struct dt282x_private *devpriv = dev->private;
- struct comedi_isadma *dma = devpriv->dma;
- struct comedi_cmd *cmd = &s->async->cmd;
- int ret;
-
- dt282x_disable_dma(dev);
-
- outw(devpriv->divisor, dev->iobase + DT2821_TMRCTR_REG);
-
- devpriv->supcsr = DT2821_SUPCSR_ERRINTEN;
- if (cmd->scan_begin_src == TRIG_FOLLOW)
- devpriv->supcsr = DT2821_SUPCSR_DS_AD_CLK;
- else
- devpriv->supcsr = DT2821_SUPCSR_DS_AD_TRIG;
- outw(devpriv->supcsr |
- DT2821_SUPCSR_CLRDMADNE |
- DT2821_SUPCSR_BUFFB |
- DT2821_SUPCSR_ADCINIT,
- dev->iobase + DT2821_SUPCSR_REG);
-
- devpriv->ntrig = cmd->stop_arg * cmd->scan_end_arg;
- devpriv->nread = devpriv->ntrig;
-
- devpriv->dma_dir = COMEDI_ISADMA_READ;
- dma->cur_dma = 0;
- dt282x_prep_ai_dma(dev, 0, 0);
- if (devpriv->ntrig) {
- dt282x_prep_ai_dma(dev, 1, 0);
- devpriv->supcsr |= DT2821_SUPCSR_DDMA;
- outw(devpriv->supcsr, dev->iobase + DT2821_SUPCSR_REG);
- }
-
- devpriv->adcsr = 0;
-
- dt282x_load_changain(dev, cmd->chanlist_len, cmd->chanlist);
-
- devpriv->adcsr = DT2821_ADCSR_ADCLK | DT2821_ADCSR_IADDONE;
- outw(devpriv->adcsr, dev->iobase + DT2821_ADCSR_REG);
-
- outw(devpriv->supcsr | DT2821_SUPCSR_PRLD,
- dev->iobase + DT2821_SUPCSR_REG);
- ret = comedi_timeout(dev, s, NULL,
- dt282x_ai_timeout, DT2821_ADCSR_MUXBUSY);
- if (ret)
- return ret;
-
- if (cmd->scan_begin_src == TRIG_FOLLOW) {
- outw(devpriv->supcsr | DT2821_SUPCSR_STRIG,
- dev->iobase + DT2821_SUPCSR_REG);
- } else {
- devpriv->supcsr |= DT2821_SUPCSR_XTRIG;
- outw(devpriv->supcsr, dev->iobase + DT2821_SUPCSR_REG);
- }
-
- return 0;
-}
-
-static int dt282x_ai_cancel(struct comedi_device *dev,
- struct comedi_subdevice *s)
-{
- struct dt282x_private *devpriv = dev->private;
-
- dt282x_disable_dma(dev);
-
- devpriv->adcsr = 0;
- outw(devpriv->adcsr, dev->iobase + DT2821_ADCSR_REG);
-
- devpriv->supcsr = 0;
- outw(devpriv->supcsr | DT2821_SUPCSR_ADCINIT,
- dev->iobase + DT2821_SUPCSR_REG);
-
- return 0;
-}
-
-static int dt282x_ao_insn_write(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct dt282x_private *devpriv = dev->private;
- unsigned int chan = CR_CHAN(insn->chanspec);
- unsigned int range = CR_RANGE(insn->chanspec);
- int i;
-
- devpriv->dacsr |= DT2821_DACSR_SSEL | DT2821_DACSR_YSEL(chan);
-
- for (i = 0; i < insn->n; i++) {
- unsigned int val = data[i];
-
- s->readback[chan] = val;
-
- if (comedi_range_is_bipolar(s, range))
- val = comedi_offset_munge(s, val);
-
- outw(devpriv->dacsr, dev->iobase + DT2821_DACSR_REG);
-
- outw(val, dev->iobase + DT2821_DADAT_REG);
-
- outw(devpriv->supcsr | DT2821_SUPCSR_DACON,
- dev->iobase + DT2821_SUPCSR_REG);
- }
-
- return insn->n;
-}
-
-static int dt282x_ao_cmdtest(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_cmd *cmd)
-{
- struct dt282x_private *devpriv = dev->private;
- int err = 0;
- unsigned int arg;
-
- /* Step 1 : check if triggers are trivially valid */
-
- err |= comedi_check_trigger_src(&cmd->start_src, TRIG_INT);
- err |= comedi_check_trigger_src(&cmd->scan_begin_src, TRIG_TIMER);
- err |= comedi_check_trigger_src(&cmd->convert_src, TRIG_NOW);
- err |= comedi_check_trigger_src(&cmd->scan_end_src, TRIG_COUNT);
- err |= comedi_check_trigger_src(&cmd->stop_src, TRIG_COUNT | TRIG_NONE);
-
- if (err)
- return 1;
-
- /* Step 2a : make sure trigger sources are unique */
-
- err |= comedi_check_trigger_is_unique(cmd->stop_src);
-
- /* Step 2b : and mutually compatible */
-
- if (err)
- return 2;
-
- /* Step 3: check if arguments are trivially valid */
-
- err |= comedi_check_trigger_arg_is(&cmd->start_arg, 0);
- err |= comedi_check_trigger_arg_min(&cmd->scan_begin_arg, 5000);
- err |= comedi_check_trigger_arg_is(&cmd->convert_arg, 0);
- err |= comedi_check_trigger_arg_is(&cmd->scan_end_arg,
- cmd->chanlist_len);
-
- if (cmd->stop_src == TRIG_COUNT)
- err |= comedi_check_trigger_arg_min(&cmd->stop_arg, 1);
- else /* TRIG_EXT | TRIG_NONE */
- err |= comedi_check_trigger_arg_is(&cmd->stop_arg, 0);
-
- if (err)
- return 3;
-
- /* step 4: fix up any arguments */
-
- arg = cmd->scan_begin_arg;
- devpriv->divisor = dt282x_ns_to_timer(&arg, cmd->flags);
- err |= comedi_check_trigger_arg_is(&cmd->scan_begin_arg, arg);
-
- if (err)
- return 4;
-
- return 0;
-}
-
-static int dt282x_ao_inttrig(struct comedi_device *dev,
- struct comedi_subdevice *s,
- unsigned int trig_num)
-{
- struct dt282x_private *devpriv = dev->private;
- struct comedi_cmd *cmd = &s->async->cmd;
-
- if (trig_num != cmd->start_src)
- return -EINVAL;
-
- if (!dt282x_ao_setup_dma(dev, s, 0))
- return -EPIPE;
-
- if (!dt282x_ao_setup_dma(dev, s, 1))
- return -EPIPE;
-
- outw(devpriv->supcsr | DT2821_SUPCSR_STRIG,
- dev->iobase + DT2821_SUPCSR_REG);
- s->async->inttrig = NULL;
-
- return 1;
-}
-
-static int dt282x_ao_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
-{
- struct dt282x_private *devpriv = dev->private;
- struct comedi_isadma *dma = devpriv->dma;
- struct comedi_cmd *cmd = &s->async->cmd;
-
- dt282x_disable_dma(dev);
-
- devpriv->supcsr = DT2821_SUPCSR_ERRINTEN |
- DT2821_SUPCSR_DS_DA_CLK |
- DT2821_SUPCSR_DDMA;
- outw(devpriv->supcsr |
- DT2821_SUPCSR_CLRDMADNE |
- DT2821_SUPCSR_BUFFB |
- DT2821_SUPCSR_DACINIT,
- dev->iobase + DT2821_SUPCSR_REG);
-
- devpriv->ntrig = cmd->stop_arg * cmd->chanlist_len;
- devpriv->nread = devpriv->ntrig;
-
- devpriv->dma_dir = COMEDI_ISADMA_WRITE;
- dma->cur_dma = 0;
-
- outw(devpriv->divisor, dev->iobase + DT2821_TMRCTR_REG);
-
- /* clear all bits but the DIO direction bits */
- devpriv->dacsr &= (DT2821_DACSR_LBOE | DT2821_DACSR_HBOE);
-
- devpriv->dacsr |= (DT2821_DACSR_SSEL |
- DT2821_DACSR_DACLK |
- DT2821_DACSR_IDARDY);
- outw(devpriv->dacsr, dev->iobase + DT2821_DACSR_REG);
-
- s->async->inttrig = dt282x_ao_inttrig;
-
- return 0;
-}
-
-static int dt282x_ao_cancel(struct comedi_device *dev,
- struct comedi_subdevice *s)
-{
- struct dt282x_private *devpriv = dev->private;
-
- dt282x_disable_dma(dev);
-
- /* clear all bits but the DIO direction bits */
- devpriv->dacsr &= (DT2821_DACSR_LBOE | DT2821_DACSR_HBOE);
-
- outw(devpriv->dacsr, dev->iobase + DT2821_DACSR_REG);
-
- devpriv->supcsr = 0;
- outw(devpriv->supcsr | DT2821_SUPCSR_DACINIT,
- dev->iobase + DT2821_SUPCSR_REG);
-
- return 0;
-}
-
-static int dt282x_dio_insn_bits(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- if (comedi_dio_update_state(s, data))
- outw(s->state, dev->iobase + DT2821_DIODAT_REG);
-
- data[1] = inw(dev->iobase + DT2821_DIODAT_REG);
-
- return insn->n;
-}
-
-static int dt282x_dio_insn_config(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct dt282x_private *devpriv = dev->private;
- unsigned int chan = CR_CHAN(insn->chanspec);
- unsigned int mask;
- int ret;
-
- if (chan < 8)
- mask = 0x00ff;
- else
- mask = 0xff00;
-
- ret = comedi_dio_insn_config(dev, s, insn, data, mask);
- if (ret)
- return ret;
-
- devpriv->dacsr &= ~(DT2821_DACSR_LBOE | DT2821_DACSR_HBOE);
- if (s->io_bits & 0x00ff)
- devpriv->dacsr |= DT2821_DACSR_LBOE;
- if (s->io_bits & 0xff00)
- devpriv->dacsr |= DT2821_DACSR_HBOE;
-
- outw(devpriv->dacsr, dev->iobase + DT2821_DACSR_REG);
-
- return insn->n;
-}
-
-static const struct comedi_lrange *const ai_range_table[] = {
- &range_dt282x_ai_lo_bipolar,
- &range_dt282x_ai_lo_unipolar,
- &range_dt282x_ai_5_bipolar,
- &range_dt282x_ai_5_unipolar
-};
-
-static const struct comedi_lrange *const ai_range_pgl_table[] = {
- &range_dt282x_ai_hi_bipolar,
- &range_dt282x_ai_hi_unipolar
-};
-
-static const struct comedi_lrange *opt_ai_range_lkup(int ispgl, int x)
-{
- if (ispgl) {
- if (x < 0 || x >= 2)
- x = 0;
- return ai_range_pgl_table[x];
- }
-
- if (x < 0 || x >= 4)
- x = 0;
- return ai_range_table[x];
-}
-
-static void dt282x_alloc_dma(struct comedi_device *dev,
- struct comedi_devconfig *it)
-{
- struct dt282x_private *devpriv = dev->private;
- unsigned int irq_num = it->options[1];
- unsigned int dma_chan[2];
-
- if (it->options[2] < it->options[3]) {
- dma_chan[0] = it->options[2];
- dma_chan[1] = it->options[3];
- } else {
- dma_chan[0] = it->options[3];
- dma_chan[1] = it->options[2];
- }
-
- if (!irq_num || dma_chan[0] == dma_chan[1] ||
- dma_chan[0] < 5 || dma_chan[0] > 7 ||
- dma_chan[1] < 5 || dma_chan[1] > 7)
- return;
-
- if (request_irq(irq_num, dt282x_interrupt, 0, dev->board_name, dev))
- return;
-
- /* DMA uses two 4K buffers with separate DMA channels */
- devpriv->dma = comedi_isadma_alloc(dev, 2, dma_chan[0], dma_chan[1],
- PAGE_SIZE, 0);
- if (!devpriv->dma)
- free_irq(irq_num, dev);
-}
-
-static void dt282x_free_dma(struct comedi_device *dev)
-{
- struct dt282x_private *devpriv = dev->private;
-
- if (devpriv)
- comedi_isadma_free(devpriv->dma);
-}
-
-static int dt282x_initialize(struct comedi_device *dev)
-{
- /* Initialize board */
- outw(DT2821_SUPCSR_BDINIT, dev->iobase + DT2821_SUPCSR_REG);
- inw(dev->iobase + DT2821_ADCSR_REG);
-
- /*
- * At power up, some registers are in a well-known state.
- * Check them to see if a DT2821 series board is present.
- */
- if (((inw(dev->iobase + DT2821_ADCSR_REG) & 0xfff0) != 0x7c00) ||
- ((inw(dev->iobase + DT2821_CHANCSR_REG) & 0xf0f0) != 0x70f0) ||
- ((inw(dev->iobase + DT2821_DACSR_REG) & 0x7c93) != 0x7c90) ||
- ((inw(dev->iobase + DT2821_SUPCSR_REG) & 0xf8ff) != 0x0000) ||
- ((inw(dev->iobase + DT2821_TMRCTR_REG) & 0xff00) != 0xf000)) {
- dev_err(dev->class_dev, "board not found\n");
- return -EIO;
- }
- return 0;
-}
-
-/*
- options:
- 0 i/o base
- 1 irq
- 2 dma1
- 3 dma2
- 4 0=single ended, 1=differential
- 5 ai 0=straight binary, 1=2's comp
- 6 ao0 0=straight binary, 1=2's comp
- 7 ao1 0=straight binary, 1=2's comp
- 8 ai 0=±10 V, 1=0-10 V, 2=±5 V, 3=0-5 V
- 9 ao0 0=±10 V, 1=0-10 V, 2=±5 V, 3=0-5 V, 4=±2.5 V
- 10 ao1 0=±10 V, 1=0-10 V, 2=±5 V, 3=0-5 V, 4=±2.5 V
- */
-static int dt282x_attach(struct comedi_device *dev, struct comedi_devconfig *it)
-{
- const struct dt282x_board *board = dev->board_ptr;
- struct dt282x_private *devpriv;
- struct comedi_subdevice *s;
- int ret;
-
- ret = comedi_request_region(dev, it->options[0], 0x10);
- if (ret)
- return ret;
-
- ret = dt282x_initialize(dev);
- if (ret)
- return ret;
-
- devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
- if (!devpriv)
- return -ENOMEM;
-
- /* an IRQ and 2 DMA channels are required for async command support */
- dt282x_alloc_dma(dev, it);
-
- ret = comedi_alloc_subdevices(dev, 3);
- if (ret)
- return ret;
-
- /* Analog Input subdevice */
- s = &dev->subdevices[0];
- s->type = COMEDI_SUBD_AI;
- s->subdev_flags = SDF_READABLE;
- if ((it->options[4] && board->adchan_di) || board->adchan_se == 0) {
- s->subdev_flags |= SDF_DIFF;
- s->n_chan = board->adchan_di;
- } else {
- s->subdev_flags |= SDF_COMMON;
- s->n_chan = board->adchan_se;
- }
- s->maxdata = board->ai_maxdata;
-
- s->range_table = opt_ai_range_lkup(board->ispgl, it->options[8]);
- devpriv->ad_2scomp = it->options[5] ? 1 : 0;
-
- s->insn_read = dt282x_ai_insn_read;
- if (dev->irq) {
- dev->read_subdev = s;
- s->subdev_flags |= SDF_CMD_READ;
- s->len_chanlist = s->n_chan;
- s->do_cmdtest = dt282x_ai_cmdtest;
- s->do_cmd = dt282x_ai_cmd;
- s->cancel = dt282x_ai_cancel;
- }
-
- /* Analog Output subdevice */
- s = &dev->subdevices[1];
- if (board->dachan) {
- s->type = COMEDI_SUBD_AO;
- s->subdev_flags = SDF_WRITABLE;
- s->n_chan = board->dachan;
- s->maxdata = board->ao_maxdata;
- /* ranges are per-channel, set by jumpers on the board */
- s->range_table = &dt282x_ao_range;
- s->insn_write = dt282x_ao_insn_write;
- if (dev->irq) {
- dev->write_subdev = s;
- s->subdev_flags |= SDF_CMD_WRITE;
- s->len_chanlist = s->n_chan;
- s->do_cmdtest = dt282x_ao_cmdtest;
- s->do_cmd = dt282x_ao_cmd;
- s->cancel = dt282x_ao_cancel;
- }
-
- ret = comedi_alloc_subdev_readback(s);
- if (ret)
- return ret;
- } else {
- s->type = COMEDI_SUBD_UNUSED;
- }
-
- /* Digital I/O subdevice */
- s = &dev->subdevices[2];
- s->type = COMEDI_SUBD_DIO;
- s->subdev_flags = SDF_READABLE | SDF_WRITABLE;
- s->n_chan = 16;
- s->maxdata = 1;
- s->range_table = &range_digital;
- s->insn_bits = dt282x_dio_insn_bits;
- s->insn_config = dt282x_dio_insn_config;
-
- return 0;
-}
-
-static void dt282x_detach(struct comedi_device *dev)
-{
- dt282x_free_dma(dev);
- comedi_legacy_detach(dev);
-}
-
-static struct comedi_driver dt282x_driver = {
- .driver_name = "dt282x",
- .module = THIS_MODULE,
- .attach = dt282x_attach,
- .detach = dt282x_detach,
- .board_name = &boardtypes[0].name,
- .num_names = ARRAY_SIZE(boardtypes),
- .offset = sizeof(struct dt282x_board),
-};
-module_comedi_driver(dt282x_driver);
-
-MODULE_AUTHOR("Comedi http://www.comedi.org");
-MODULE_DESCRIPTION("Comedi driver for Data Translation DT2821 series");
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/dt3000.c b/drivers/staging/comedi/drivers/dt3000.c
deleted file mode 100644
index 8c4f284d1919..000000000000
--- a/drivers/staging/comedi/drivers/dt3000.c
+++ /dev/null
@@ -1,769 +0,0 @@
-/*
- comedi/drivers/dt3000.c
- Data Translation DT3000 series driver
-
- COMEDI - Linux Control and Measurement Device Interface
- Copyright (C) 1999 David A. Schleef <ds@schleef.org>
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-*/
-/*
-Driver: dt3000
-Description: Data Translation DT3000 series
-Author: ds
-Devices: [Data Translation] DT3001 (dt3000), DT3001-PGL, DT3002, DT3003,
- DT3003-PGL, DT3004, DT3005, DT3004-200
-Updated: Mon, 14 Apr 2008 15:41:24 +0100
-Status: works
-
-Configuration Options: not applicable, uses PCI auto config
-
-There is code to support AI commands, but it may not work.
-
-AO commands are not supported.
-*/
-
-/*
- The DT3000 series is Data Translation's attempt to make a PCI
- data acquisition board. The design of this series is very nice,
- since each board has an on-board DSP (Texas Instruments TMS320C52).
- However, a few details are a little annoying. The boards lack
- bus-mastering DMA, which eliminates them from serious work.
- They also are not capable of autocalibration, which is a common
- feature in modern hardware. The default firmware is pretty bad,
- making it nearly impossible to write an RT compatible driver.
- It would make an interesting project to write a decent firmware
- for these boards.
-
- Data Translation originally wanted an NDA for the documentation
- for the 3k series. However, if you ask nicely, they might send
- you the docs without one, also.
-*/
-
-#include <linux/module.h>
-#include <linux/delay.h>
-#include <linux/interrupt.h>
-
-#include "../comedi_pci.h"
-
-static const struct comedi_lrange range_dt3000_ai = {
- 4, {
- BIP_RANGE(10),
- BIP_RANGE(5),
- BIP_RANGE(2.5),
- BIP_RANGE(1.25)
- }
-};
-
-static const struct comedi_lrange range_dt3000_ai_pgl = {
- 4, {
- BIP_RANGE(10),
- BIP_RANGE(1),
- BIP_RANGE(0.1),
- BIP_RANGE(0.02)
- }
-};
-
-enum dt3k_boardid {
- BOARD_DT3001,
- BOARD_DT3001_PGL,
- BOARD_DT3002,
- BOARD_DT3003,
- BOARD_DT3003_PGL,
- BOARD_DT3004,
- BOARD_DT3005,
-};
-
-struct dt3k_boardtype {
- const char *name;
- int adchan;
- int adbits;
- int ai_speed;
- const struct comedi_lrange *adrange;
- int dachan;
- int dabits;
-};
-
-static const struct dt3k_boardtype dt3k_boardtypes[] = {
- [BOARD_DT3001] = {
- .name = "dt3001",
- .adchan = 16,
- .adbits = 12,
- .adrange = &range_dt3000_ai,
- .ai_speed = 3000,
- .dachan = 2,
- .dabits = 12,
- },
- [BOARD_DT3001_PGL] = {
- .name = "dt3001-pgl",
- .adchan = 16,
- .adbits = 12,
- .adrange = &range_dt3000_ai_pgl,
- .ai_speed = 3000,
- .dachan = 2,
- .dabits = 12,
- },
- [BOARD_DT3002] = {
- .name = "dt3002",
- .adchan = 32,
- .adbits = 12,
- .adrange = &range_dt3000_ai,
- .ai_speed = 3000,
- },
- [BOARD_DT3003] = {
- .name = "dt3003",
- .adchan = 64,
- .adbits = 12,
- .adrange = &range_dt3000_ai,
- .ai_speed = 3000,
- .dachan = 2,
- .dabits = 12,
- },
- [BOARD_DT3003_PGL] = {
- .name = "dt3003-pgl",
- .adchan = 64,
- .adbits = 12,
- .adrange = &range_dt3000_ai_pgl,
- .ai_speed = 3000,
- .dachan = 2,
- .dabits = 12,
- },
- [BOARD_DT3004] = {
- .name = "dt3004",
- .adchan = 16,
- .adbits = 16,
- .adrange = &range_dt3000_ai,
- .ai_speed = 10000,
- .dachan = 2,
- .dabits = 12,
- },
- [BOARD_DT3005] = {
- .name = "dt3005", /* a.k.a. 3004-200 */
- .adchan = 16,
- .adbits = 16,
- .adrange = &range_dt3000_ai,
- .ai_speed = 5000,
- .dachan = 2,
- .dabits = 12,
- },
-};
-
-/* dual-ported RAM location definitions */
-
-#define DPR_DAC_buffer (4*0x000)
-#define DPR_ADC_buffer (4*0x800)
-#define DPR_Command (4*0xfd3)
-#define DPR_SubSys (4*0xfd3)
-#define DPR_Encode (4*0xfd4)
-#define DPR_Params(a) (4*(0xfd5+(a)))
-#define DPR_Tick_Reg_Lo (4*0xff5)
-#define DPR_Tick_Reg_Hi (4*0xff6)
-#define DPR_DA_Buf_Front (4*0xff7)
-#define DPR_DA_Buf_Rear (4*0xff8)
-#define DPR_AD_Buf_Front (4*0xff9)
-#define DPR_AD_Buf_Rear (4*0xffa)
-#define DPR_Int_Mask (4*0xffb)
-#define DPR_Intr_Flag (4*0xffc)
-#define DPR_Response_Mbx (4*0xffe)
-#define DPR_Command_Mbx (4*0xfff)
-
-#define AI_FIFO_DEPTH 2003
-#define AO_FIFO_DEPTH 2048
-
-/* command list */
-
-#define CMD_GETBRDINFO 0
-#define CMD_CONFIG 1
-#define CMD_GETCONFIG 2
-#define CMD_START 3
-#define CMD_STOP 4
-#define CMD_READSINGLE 5
-#define CMD_WRITESINGLE 6
-#define CMD_CALCCLOCK 7
-#define CMD_READEVENTS 8
-#define CMD_WRITECTCTRL 16
-#define CMD_READCTCTRL 17
-#define CMD_WRITECT 18
-#define CMD_READCT 19
-#define CMD_WRITEDATA 32
-#define CMD_READDATA 33
-#define CMD_WRITEIO 34
-#define CMD_READIO 35
-#define CMD_WRITECODE 36
-#define CMD_READCODE 37
-#define CMD_EXECUTE 38
-#define CMD_HALT 48
-
-#define SUBS_AI 0
-#define SUBS_AO 1
-#define SUBS_DIN 2
-#define SUBS_DOUT 3
-#define SUBS_MEM 4
-#define SUBS_CT 5
-
-/* interrupt flags */
-#define DT3000_CMDONE 0x80
-#define DT3000_CTDONE 0x40
-#define DT3000_DAHWERR 0x20
-#define DT3000_DASWERR 0x10
-#define DT3000_DAEMPTY 0x08
-#define DT3000_ADHWERR 0x04
-#define DT3000_ADSWERR 0x02
-#define DT3000_ADFULL 0x01
-
-#define DT3000_COMPLETION_MASK 0xff00
-#define DT3000_COMMAND_MASK 0x00ff
-#define DT3000_NOTPROCESSED 0x0000
-#define DT3000_NOERROR 0x5500
-#define DT3000_ERROR 0xaa00
-#define DT3000_NOTSUPPORTED 0xff00
-
-#define DT3000_EXTERNAL_CLOCK 1
-#define DT3000_RISING_EDGE 2
-
-#define TMODE_MASK 0x1c
-
-#define DT3000_AD_TRIG_INTERNAL (0<<2)
-#define DT3000_AD_TRIG_EXTERNAL (1<<2)
-#define DT3000_AD_RETRIG_INTERNAL (2<<2)
-#define DT3000_AD_RETRIG_EXTERNAL (3<<2)
-#define DT3000_AD_EXTRETRIG (4<<2)
-
-#define DT3000_CHANNEL_MODE_SE 0
-#define DT3000_CHANNEL_MODE_DI 1
-
-struct dt3k_private {
- unsigned int lock;
- unsigned int ai_front;
- unsigned int ai_rear;
-};
-
-#define TIMEOUT 100
-
-static void dt3k_send_cmd(struct comedi_device *dev, unsigned int cmd)
-{
- int i;
- unsigned int status = 0;
-
- writew(cmd, dev->mmio + DPR_Command_Mbx);
-
- for (i = 0; i < TIMEOUT; i++) {
- status = readw(dev->mmio + DPR_Command_Mbx);
- if ((status & DT3000_COMPLETION_MASK) != DT3000_NOTPROCESSED)
- break;
- udelay(1);
- }
-
- if ((status & DT3000_COMPLETION_MASK) != DT3000_NOERROR)
- dev_dbg(dev->class_dev, "%s: timeout/error status=0x%04x\n",
- __func__, status);
-}
-
-static unsigned int dt3k_readsingle(struct comedi_device *dev,
- unsigned int subsys, unsigned int chan,
- unsigned int gain)
-{
- writew(subsys, dev->mmio + DPR_SubSys);
-
- writew(chan, dev->mmio + DPR_Params(0));
- writew(gain, dev->mmio + DPR_Params(1));
-
- dt3k_send_cmd(dev, CMD_READSINGLE);
-
- return readw(dev->mmio + DPR_Params(2));
-}
-
-static void dt3k_writesingle(struct comedi_device *dev, unsigned int subsys,
- unsigned int chan, unsigned int data)
-{
- writew(subsys, dev->mmio + DPR_SubSys);
-
- writew(chan, dev->mmio + DPR_Params(0));
- writew(0, dev->mmio + DPR_Params(1));
- writew(data, dev->mmio + DPR_Params(2));
-
- dt3k_send_cmd(dev, CMD_WRITESINGLE);
-}
-
-static void dt3k_ai_empty_fifo(struct comedi_device *dev,
- struct comedi_subdevice *s)
-{
- struct dt3k_private *devpriv = dev->private;
- int front;
- int rear;
- int count;
- int i;
- unsigned short data;
-
- front = readw(dev->mmio + DPR_AD_Buf_Front);
- count = front - devpriv->ai_front;
- if (count < 0)
- count += AI_FIFO_DEPTH;
-
- rear = devpriv->ai_rear;
-
- for (i = 0; i < count; i++) {
- data = readw(dev->mmio + DPR_ADC_buffer + rear);
- comedi_buf_write_samples(s, &data, 1);
- rear++;
- if (rear >= AI_FIFO_DEPTH)
- rear = 0;
- }
-
- devpriv->ai_rear = rear;
- writew(rear, dev->mmio + DPR_AD_Buf_Rear);
-}
-
-static int dt3k_ai_cancel(struct comedi_device *dev,
- struct comedi_subdevice *s)
-{
- writew(SUBS_AI, dev->mmio + DPR_SubSys);
- dt3k_send_cmd(dev, CMD_STOP);
-
- writew(0, dev->mmio + DPR_Int_Mask);
-
- return 0;
-}
-
-static int debug_n_ints;
-
-/* FIXME! Assumes shared interrupt is for this card. */
-/* What's this debug_n_ints stuff? Obviously needs some work... */
-static irqreturn_t dt3k_interrupt(int irq, void *d)
-{
- struct comedi_device *dev = d;
- struct comedi_subdevice *s = dev->read_subdev;
- unsigned int status;
-
- if (!dev->attached)
- return IRQ_NONE;
-
- status = readw(dev->mmio + DPR_Intr_Flag);
-
- if (status & DT3000_ADFULL)
- dt3k_ai_empty_fifo(dev, s);
-
- if (status & (DT3000_ADSWERR | DT3000_ADHWERR))
- s->async->events |= COMEDI_CB_ERROR;
-
- debug_n_ints++;
- if (debug_n_ints >= 10)
- s->async->events |= COMEDI_CB_EOA;
-
- comedi_handle_events(dev, s);
- return IRQ_HANDLED;
-}
-
-static int dt3k_ns_to_timer(unsigned int timer_base, unsigned int *nanosec,
- unsigned int flags)
-{
- int divider, base, prescale;
-
- /* This function needs improvment */
- /* Don't know if divider==0 works. */
-
- for (prescale = 0; prescale < 16; prescale++) {
- base = timer_base * (prescale + 1);
- switch (flags & CMDF_ROUND_MASK) {
- case CMDF_ROUND_NEAREST:
- default:
- divider = (*nanosec + base / 2) / base;
- break;
- case CMDF_ROUND_DOWN:
- divider = (*nanosec) / base;
- break;
- case CMDF_ROUND_UP:
- divider = (*nanosec) / base;
- break;
- }
- if (divider < 65536) {
- *nanosec = divider * base;
- return (prescale << 16) | (divider);
- }
- }
-
- prescale = 15;
- base = timer_base * (1 << prescale);
- divider = 65535;
- *nanosec = divider * base;
- return (prescale << 16) | (divider);
-}
-
-static int dt3k_ai_cmdtest(struct comedi_device *dev,
- struct comedi_subdevice *s, struct comedi_cmd *cmd)
-{
- const struct dt3k_boardtype *board = dev->board_ptr;
- int err = 0;
- unsigned int arg;
-
- /* Step 1 : check if triggers are trivially valid */
-
- err |= comedi_check_trigger_src(&cmd->start_src, TRIG_NOW);
- err |= comedi_check_trigger_src(&cmd->scan_begin_src, TRIG_TIMER);
- err |= comedi_check_trigger_src(&cmd->convert_src, TRIG_TIMER);
- err |= comedi_check_trigger_src(&cmd->scan_end_src, TRIG_COUNT);
- err |= comedi_check_trigger_src(&cmd->stop_src, TRIG_COUNT);
-
- if (err)
- return 1;
-
- /* Step 2a : make sure trigger sources are unique */
- /* Step 2b : and mutually compatible */
-
- /* Step 3: check if arguments are trivially valid */
-
- err |= comedi_check_trigger_arg_is(&cmd->start_arg, 0);
-
- if (cmd->scan_begin_src == TRIG_TIMER) {
- err |= comedi_check_trigger_arg_min(&cmd->scan_begin_arg,
- board->ai_speed);
- err |= comedi_check_trigger_arg_max(&cmd->scan_begin_arg,
- 100 * 16 * 65535);
- }
-
- if (cmd->convert_src == TRIG_TIMER) {
- err |= comedi_check_trigger_arg_min(&cmd->convert_arg,
- board->ai_speed);
- err |= comedi_check_trigger_arg_max(&cmd->convert_arg,
- 50 * 16 * 65535);
- }
-
- err |= comedi_check_trigger_arg_is(&cmd->scan_end_arg,
- cmd->chanlist_len);
-
- if (cmd->stop_src == TRIG_COUNT)
- err |= comedi_check_trigger_arg_max(&cmd->stop_arg, 0x00ffffff);
- else /* TRIG_NONE */
- err |= comedi_check_trigger_arg_is(&cmd->stop_arg, 0);
-
- if (err)
- return 3;
-
- /* step 4: fix up any arguments */
-
- if (cmd->scan_begin_src == TRIG_TIMER) {
- arg = cmd->scan_begin_arg;
- dt3k_ns_to_timer(100, &arg, cmd->flags);
- err |= comedi_check_trigger_arg_is(&cmd->scan_begin_arg, arg);
- }
-
- if (cmd->convert_src == TRIG_TIMER) {
- arg = cmd->convert_arg;
- dt3k_ns_to_timer(50, &arg, cmd->flags);
- err |= comedi_check_trigger_arg_is(&cmd->convert_arg, arg);
-
- if (cmd->scan_begin_src == TRIG_TIMER) {
- arg = cmd->convert_arg * cmd->scan_end_arg;
- err |= comedi_check_trigger_arg_min(&cmd->
- scan_begin_arg,
- arg);
- }
- }
-
- if (err)
- return 4;
-
- return 0;
-}
-
-static int dt3k_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
-{
- struct comedi_cmd *cmd = &s->async->cmd;
- int i;
- unsigned int chan, range, aref;
- unsigned int divider;
- unsigned int tscandiv;
-
- for (i = 0; i < cmd->chanlist_len; i++) {
- chan = CR_CHAN(cmd->chanlist[i]);
- range = CR_RANGE(cmd->chanlist[i]);
-
- writew((range << 6) | chan, dev->mmio + DPR_ADC_buffer + i);
- }
- aref = CR_AREF(cmd->chanlist[0]);
-
- writew(cmd->scan_end_arg, dev->mmio + DPR_Params(0));
-
- if (cmd->convert_src == TRIG_TIMER) {
- divider = dt3k_ns_to_timer(50, &cmd->convert_arg, cmd->flags);
- writew((divider >> 16), dev->mmio + DPR_Params(1));
- writew((divider & 0xffff), dev->mmio + DPR_Params(2));
- }
-
- if (cmd->scan_begin_src == TRIG_TIMER) {
- tscandiv = dt3k_ns_to_timer(100, &cmd->scan_begin_arg,
- cmd->flags);
- writew((tscandiv >> 16), dev->mmio + DPR_Params(3));
- writew((tscandiv & 0xffff), dev->mmio + DPR_Params(4));
- }
-
- writew(DT3000_AD_RETRIG_INTERNAL, dev->mmio + DPR_Params(5));
- writew(aref == AREF_DIFF, dev->mmio + DPR_Params(6));
-
- writew(AI_FIFO_DEPTH / 2, dev->mmio + DPR_Params(7));
-
- writew(SUBS_AI, dev->mmio + DPR_SubSys);
- dt3k_send_cmd(dev, CMD_CONFIG);
-
- writew(DT3000_ADFULL | DT3000_ADSWERR | DT3000_ADHWERR,
- dev->mmio + DPR_Int_Mask);
-
- debug_n_ints = 0;
-
- writew(SUBS_AI, dev->mmio + DPR_SubSys);
- dt3k_send_cmd(dev, CMD_START);
-
- return 0;
-}
-
-static int dt3k_ai_insn(struct comedi_device *dev, struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
-{
- int i;
- unsigned int chan, gain, aref;
-
- chan = CR_CHAN(insn->chanspec);
- gain = CR_RANGE(insn->chanspec);
- /* XXX docs don't explain how to select aref */
- aref = CR_AREF(insn->chanspec);
-
- for (i = 0; i < insn->n; i++)
- data[i] = dt3k_readsingle(dev, SUBS_AI, chan, gain);
-
- return i;
-}
-
-static int dt3k_ao_insn_write(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- unsigned int chan = CR_CHAN(insn->chanspec);
- unsigned int val = s->readback[chan];
- int i;
-
- for (i = 0; i < insn->n; i++) {
- val = data[i];
- dt3k_writesingle(dev, SUBS_AO, chan, val);
- }
- s->readback[chan] = val;
-
- return insn->n;
-}
-
-static void dt3k_dio_config(struct comedi_device *dev, int bits)
-{
- /* XXX */
- writew(SUBS_DOUT, dev->mmio + DPR_SubSys);
-
- writew(bits, dev->mmio + DPR_Params(0));
-#if 0
- /* don't know */
- writew(0, dev->mmio + DPR_Params(1));
- writew(0, dev->mmio + DPR_Params(2));
-#endif
-
- dt3k_send_cmd(dev, CMD_CONFIG);
-}
-
-static int dt3k_dio_insn_config(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- unsigned int chan = CR_CHAN(insn->chanspec);
- unsigned int mask;
- int ret;
-
- if (chan < 4)
- mask = 0x0f;
- else
- mask = 0xf0;
-
- ret = comedi_dio_insn_config(dev, s, insn, data, mask);
- if (ret)
- return ret;
-
- dt3k_dio_config(dev, (s->io_bits & 0x01) | ((s->io_bits & 0x10) >> 3));
-
- return insn->n;
-}
-
-static int dt3k_dio_insn_bits(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- if (comedi_dio_update_state(s, data))
- dt3k_writesingle(dev, SUBS_DOUT, 0, s->state);
-
- data[1] = dt3k_readsingle(dev, SUBS_DIN, 0, 0);
-
- return insn->n;
-}
-
-static int dt3k_mem_insn_read(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- unsigned int addr = CR_CHAN(insn->chanspec);
- int i;
-
- for (i = 0; i < insn->n; i++) {
- writew(SUBS_MEM, dev->mmio + DPR_SubSys);
- writew(addr, dev->mmio + DPR_Params(0));
- writew(1, dev->mmio + DPR_Params(1));
-
- dt3k_send_cmd(dev, CMD_READCODE);
-
- data[i] = readw(dev->mmio + DPR_Params(2));
- }
-
- return i;
-}
-
-static int dt3000_auto_attach(struct comedi_device *dev,
- unsigned long context)
-{
- struct pci_dev *pcidev = comedi_to_pci_dev(dev);
- const struct dt3k_boardtype *board = NULL;
- struct dt3k_private *devpriv;
- struct comedi_subdevice *s;
- int ret = 0;
-
- if (context < ARRAY_SIZE(dt3k_boardtypes))
- board = &dt3k_boardtypes[context];
- if (!board)
- return -ENODEV;
- dev->board_ptr = board;
- dev->board_name = board->name;
-
- devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
- if (!devpriv)
- return -ENOMEM;
-
- ret = comedi_pci_enable(dev);
- if (ret < 0)
- return ret;
-
- dev->mmio = pci_ioremap_bar(pcidev, 0);
- if (!dev->mmio)
- return -ENOMEM;
-
- if (pcidev->irq) {
- ret = request_irq(pcidev->irq, dt3k_interrupt, IRQF_SHARED,
- dev->board_name, dev);
- if (ret == 0)
- dev->irq = pcidev->irq;
- }
-
- ret = comedi_alloc_subdevices(dev, 4);
- if (ret)
- return ret;
-
- s = &dev->subdevices[0];
- /* ai subdevice */
- s->type = COMEDI_SUBD_AI;
- s->subdev_flags = SDF_READABLE | SDF_GROUND | SDF_DIFF;
- s->n_chan = board->adchan;
- s->insn_read = dt3k_ai_insn;
- s->maxdata = (1 << board->adbits) - 1;
- s->range_table = &range_dt3000_ai; /* XXX */
- if (dev->irq) {
- dev->read_subdev = s;
- s->subdev_flags |= SDF_CMD_READ;
- s->len_chanlist = 512;
- s->do_cmd = dt3k_ai_cmd;
- s->do_cmdtest = dt3k_ai_cmdtest;
- s->cancel = dt3k_ai_cancel;
- }
-
- s = &dev->subdevices[1];
- /* ao subsystem */
- s->type = COMEDI_SUBD_AO;
- s->subdev_flags = SDF_WRITABLE;
- s->n_chan = 2;
- s->maxdata = (1 << board->dabits) - 1;
- s->len_chanlist = 1;
- s->range_table = &range_bipolar10;
- s->insn_write = dt3k_ao_insn_write;
-
- ret = comedi_alloc_subdev_readback(s);
- if (ret)
- return ret;
-
- s = &dev->subdevices[2];
- /* dio subsystem */
- s->type = COMEDI_SUBD_DIO;
- s->subdev_flags = SDF_READABLE | SDF_WRITABLE;
- s->n_chan = 8;
- s->insn_config = dt3k_dio_insn_config;
- s->insn_bits = dt3k_dio_insn_bits;
- s->maxdata = 1;
- s->len_chanlist = 8;
- s->range_table = &range_digital;
-
- s = &dev->subdevices[3];
- /* mem subsystem */
- s->type = COMEDI_SUBD_MEMORY;
- s->subdev_flags = SDF_READABLE;
- s->n_chan = 0x1000;
- s->insn_read = dt3k_mem_insn_read;
- s->maxdata = 0xff;
- s->len_chanlist = 1;
- s->range_table = &range_unknown;
-
-#if 0
- s = &dev->subdevices[4];
- /* proc subsystem */
- s->type = COMEDI_SUBD_PROC;
-#endif
-
- return 0;
-}
-
-static struct comedi_driver dt3000_driver = {
- .driver_name = "dt3000",
- .module = THIS_MODULE,
- .auto_attach = dt3000_auto_attach,
- .detach = comedi_pci_detach,
-};
-
-static int dt3000_pci_probe(struct pci_dev *dev,
- const struct pci_device_id *id)
-{
- return comedi_pci_auto_config(dev, &dt3000_driver, id->driver_data);
-}
-
-static const struct pci_device_id dt3000_pci_table[] = {
- { PCI_VDEVICE(DT, 0x0022), BOARD_DT3001 },
- { PCI_VDEVICE(DT, 0x0023), BOARD_DT3002 },
- { PCI_VDEVICE(DT, 0x0024), BOARD_DT3003 },
- { PCI_VDEVICE(DT, 0x0025), BOARD_DT3004 },
- { PCI_VDEVICE(DT, 0x0026), BOARD_DT3005 },
- { PCI_VDEVICE(DT, 0x0027), BOARD_DT3001_PGL },
- { PCI_VDEVICE(DT, 0x0028), BOARD_DT3003_PGL },
- { 0 }
-};
-MODULE_DEVICE_TABLE(pci, dt3000_pci_table);
-
-static struct pci_driver dt3000_pci_driver = {
- .name = "dt3000",
- .id_table = dt3000_pci_table,
- .probe = dt3000_pci_probe,
- .remove = comedi_pci_auto_unconfig,
-};
-module_comedi_pci_driver(dt3000_driver, dt3000_pci_driver);
-
-MODULE_AUTHOR("Comedi http://www.comedi.org");
-MODULE_DESCRIPTION("Comedi low-level driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/dt9812.c b/drivers/staging/comedi/drivers/dt9812.c
deleted file mode 100644
index e11c216a4c85..000000000000
--- a/drivers/staging/comedi/drivers/dt9812.c
+++ /dev/null
@@ -1,885 +0,0 @@
-/*
- * comedi/drivers/dt9812.c
- * COMEDI driver for DataTranslation DT9812 USB module
- *
- * Copyright (C) 2005 Anders Blomdell <anders.blomdell@control.lth.se>
- *
- * COMEDI - Linux Control and Measurement Device Interface
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
-
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-/*
-Driver: dt9812
-Description: Data Translation DT9812 USB module
-Author: anders.blomdell@control.lth.se (Anders Blomdell)
-Status: in development
-Devices: [Data Translation] DT9812 (dt9812)
-Updated: Sun Nov 20 20:18:34 EST 2005
-
-This driver works, but bulk transfers not implemented. Might be a starting point
-for someone else. I found out too late that USB has too high latencies (>1 ms)
-for my needs.
-*/
-
-/*
- * Nota Bene:
- * 1. All writes to command pipe has to be 32 bytes (ISP1181B SHRTP=0 ?)
- * 2. The DDK source (as of sep 2005) is in error regarding the
- * input MUX bits (example code says P4, but firmware schematics
- * says P1).
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/errno.h>
-#include <linux/uaccess.h>
-
-#include "../comedi_usb.h"
-
-#define DT9812_DIAGS_BOARD_INFO_ADDR 0xFBFF
-#define DT9812_MAX_WRITE_CMD_PIPE_SIZE 32
-#define DT9812_MAX_READ_CMD_PIPE_SIZE 32
-
-/* usb_bulk_msg() timout in milliseconds */
-#define DT9812_USB_TIMEOUT 1000
-
-/*
- * See Silican Laboratories C8051F020/1/2/3 manual
- */
-#define F020_SFR_P4 0x84
-#define F020_SFR_P1 0x90
-#define F020_SFR_P2 0xa0
-#define F020_SFR_P3 0xb0
-#define F020_SFR_AMX0CF 0xba
-#define F020_SFR_AMX0SL 0xbb
-#define F020_SFR_ADC0CF 0xbc
-#define F020_SFR_ADC0L 0xbe
-#define F020_SFR_ADC0H 0xbf
-#define F020_SFR_DAC0L 0xd2
-#define F020_SFR_DAC0H 0xd3
-#define F020_SFR_DAC0CN 0xd4
-#define F020_SFR_DAC1L 0xd5
-#define F020_SFR_DAC1H 0xd6
-#define F020_SFR_DAC1CN 0xd7
-#define F020_SFR_ADC0CN 0xe8
-
-#define F020_MASK_ADC0CF_AMP0GN0 0x01
-#define F020_MASK_ADC0CF_AMP0GN1 0x02
-#define F020_MASK_ADC0CF_AMP0GN2 0x04
-
-#define F020_MASK_ADC0CN_AD0EN 0x80
-#define F020_MASK_ADC0CN_AD0INT 0x20
-#define F020_MASK_ADC0CN_AD0BUSY 0x10
-
-#define F020_MASK_DACxCN_DACxEN 0x80
-
-enum {
- /* A/D D/A DI DO CT */
- DT9812_DEVID_DT9812_10, /* 8 2 8 8 1 +/- 10V */
- DT9812_DEVID_DT9812_2PT5, /* 8 2 8 8 1 0-2.44V */
-};
-
-enum dt9812_gain {
- DT9812_GAIN_0PT25 = 1,
- DT9812_GAIN_0PT5 = 2,
- DT9812_GAIN_1 = 4,
- DT9812_GAIN_2 = 8,
- DT9812_GAIN_4 = 16,
- DT9812_GAIN_8 = 32,
- DT9812_GAIN_16 = 64,
-};
-
-enum {
- DT9812_LEAST_USB_FIRMWARE_CMD_CODE = 0,
- /* Write Flash memory */
- DT9812_W_FLASH_DATA = 0,
- /* Read Flash memory misc config info */
- DT9812_R_FLASH_DATA = 1,
-
- /*
- * Register read/write commands for processor
- */
-
- /* Read a single byte of USB memory */
- DT9812_R_SINGLE_BYTE_REG = 2,
- /* Write a single byte of USB memory */
- DT9812_W_SINGLE_BYTE_REG = 3,
- /* Multiple Reads of USB memory */
- DT9812_R_MULTI_BYTE_REG = 4,
- /* Multiple Writes of USB memory */
- DT9812_W_MULTI_BYTE_REG = 5,
- /* Read, (AND) with mask, OR value, then write (single) */
- DT9812_RMW_SINGLE_BYTE_REG = 6,
- /* Read, (AND) with mask, OR value, then write (multiple) */
- DT9812_RMW_MULTI_BYTE_REG = 7,
-
- /*
- * Register read/write commands for SMBus
- */
-
- /* Read a single byte of SMBus */
- DT9812_R_SINGLE_BYTE_SMBUS = 8,
- /* Write a single byte of SMBus */
- DT9812_W_SINGLE_BYTE_SMBUS = 9,
- /* Multiple Reads of SMBus */
- DT9812_R_MULTI_BYTE_SMBUS = 10,
- /* Multiple Writes of SMBus */
- DT9812_W_MULTI_BYTE_SMBUS = 11,
-
- /*
- * Register read/write commands for a device
- */
-
- /* Read a single byte of a device */
- DT9812_R_SINGLE_BYTE_DEV = 12,
- /* Write a single byte of a device */
- DT9812_W_SINGLE_BYTE_DEV = 13,
- /* Multiple Reads of a device */
- DT9812_R_MULTI_BYTE_DEV = 14,
- /* Multiple Writes of a device */
- DT9812_W_MULTI_BYTE_DEV = 15,
-
- /* Not sure if we'll need this */
- DT9812_W_DAC_THRESHOLD = 16,
-
- /* Set interrupt on change mask */
- DT9812_W_INT_ON_CHANGE_MASK = 17,
-
- /* Write (or Clear) the CGL for the ADC */
- DT9812_W_CGL = 18,
- /* Multiple Reads of USB memory */
- DT9812_R_MULTI_BYTE_USBMEM = 19,
- /* Multiple Writes to USB memory */
- DT9812_W_MULTI_BYTE_USBMEM = 20,
-
- /* Issue a start command to a given subsystem */
- DT9812_START_SUBSYSTEM = 21,
- /* Issue a stop command to a given subsystem */
- DT9812_STOP_SUBSYSTEM = 22,
-
- /* calibrate the board using CAL_POT_CMD */
- DT9812_CALIBRATE_POT = 23,
- /* set the DAC FIFO size */
- DT9812_W_DAC_FIFO_SIZE = 24,
- /* Write or Clear the CGL for the DAC */
- DT9812_W_CGL_DAC = 25,
- /* Read a single value from a subsystem */
- DT9812_R_SINGLE_VALUE_CMD = 26,
- /* Write a single value to a subsystem */
- DT9812_W_SINGLE_VALUE_CMD = 27,
- /* Valid DT9812_USB_FIRMWARE_CMD_CODE's will be less than this number */
- DT9812_MAX_USB_FIRMWARE_CMD_CODE,
-};
-
-struct dt9812_flash_data {
- __le16 numbytes;
- __le16 address;
-};
-
-#define DT9812_MAX_NUM_MULTI_BYTE_RDS \
- ((DT9812_MAX_WRITE_CMD_PIPE_SIZE - 4 - 1) / sizeof(u8))
-
-struct dt9812_read_multi {
- u8 count;
- u8 address[DT9812_MAX_NUM_MULTI_BYTE_RDS];
-};
-
-struct dt9812_write_byte {
- u8 address;
- u8 value;
-};
-
-#define DT9812_MAX_NUM_MULTI_BYTE_WRTS \
- ((DT9812_MAX_WRITE_CMD_PIPE_SIZE - 4 - 1) / \
- sizeof(struct dt9812_write_byte))
-
-struct dt9812_write_multi {
- u8 count;
- struct dt9812_write_byte write[DT9812_MAX_NUM_MULTI_BYTE_WRTS];
-};
-
-struct dt9812_rmw_byte {
- u8 address;
- u8 and_mask;
- u8 or_value;
-};
-
-#define DT9812_MAX_NUM_MULTI_BYTE_RMWS \
- ((DT9812_MAX_WRITE_CMD_PIPE_SIZE - 4 - 1) / \
- sizeof(struct dt9812_rmw_byte))
-
-struct dt9812_rmw_multi {
- u8 count;
- struct dt9812_rmw_byte rmw[DT9812_MAX_NUM_MULTI_BYTE_RMWS];
-};
-
-struct dt9812_usb_cmd {
- __le32 cmd;
- union {
- struct dt9812_flash_data flash_data_info;
- struct dt9812_read_multi read_multi_info;
- struct dt9812_write_multi write_multi_info;
- struct dt9812_rmw_multi rmw_multi_info;
- } u;
-};
-
-struct dt9812_private {
- struct semaphore sem;
- struct {
- __u8 addr;
- size_t size;
- } cmd_wr, cmd_rd;
- u16 device;
-};
-
-static int dt9812_read_info(struct comedi_device *dev,
- int offset, void *buf, size_t buf_size)
-{
- struct usb_device *usb = comedi_to_usb_dev(dev);
- struct dt9812_private *devpriv = dev->private;
- struct dt9812_usb_cmd cmd;
- int count, ret;
-
- cmd.cmd = cpu_to_le32(DT9812_R_FLASH_DATA);
- cmd.u.flash_data_info.address =
- cpu_to_le16(DT9812_DIAGS_BOARD_INFO_ADDR + offset);
- cmd.u.flash_data_info.numbytes = cpu_to_le16(buf_size);
-
- /* DT9812 only responds to 32 byte writes!! */
- ret = usb_bulk_msg(usb, usb_sndbulkpipe(usb, devpriv->cmd_wr.addr),
- &cmd, 32, &count, DT9812_USB_TIMEOUT);
- if (ret)
- return ret;
-
- return usb_bulk_msg(usb, usb_rcvbulkpipe(usb, devpriv->cmd_rd.addr),
- buf, buf_size, &count, DT9812_USB_TIMEOUT);
-}
-
-static int dt9812_read_multiple_registers(struct comedi_device *dev,
- int reg_count, u8 *address,
- u8 *value)
-{
- struct usb_device *usb = comedi_to_usb_dev(dev);
- struct dt9812_private *devpriv = dev->private;
- struct dt9812_usb_cmd cmd;
- int i, count, ret;
-
- cmd.cmd = cpu_to_le32(DT9812_R_MULTI_BYTE_REG);
- cmd.u.read_multi_info.count = reg_count;
- for (i = 0; i < reg_count; i++)
- cmd.u.read_multi_info.address[i] = address[i];
-
- /* DT9812 only responds to 32 byte writes!! */
- ret = usb_bulk_msg(usb, usb_sndbulkpipe(usb, devpriv->cmd_wr.addr),
- &cmd, 32, &count, DT9812_USB_TIMEOUT);
- if (ret)
- return ret;
-
- return usb_bulk_msg(usb, usb_rcvbulkpipe(usb, devpriv->cmd_rd.addr),
- value, reg_count, &count, DT9812_USB_TIMEOUT);
-}
-
-static int dt9812_write_multiple_registers(struct comedi_device *dev,
- int reg_count, u8 *address,
- u8 *value)
-{
- struct usb_device *usb = comedi_to_usb_dev(dev);
- struct dt9812_private *devpriv = dev->private;
- struct dt9812_usb_cmd cmd;
- int i, count;
-
- cmd.cmd = cpu_to_le32(DT9812_W_MULTI_BYTE_REG);
- cmd.u.read_multi_info.count = reg_count;
- for (i = 0; i < reg_count; i++) {
- cmd.u.write_multi_info.write[i].address = address[i];
- cmd.u.write_multi_info.write[i].value = value[i];
- }
-
- /* DT9812 only responds to 32 byte writes!! */
- return usb_bulk_msg(usb, usb_sndbulkpipe(usb, devpriv->cmd_wr.addr),
- &cmd, 32, &count, DT9812_USB_TIMEOUT);
-}
-
-static int dt9812_rmw_multiple_registers(struct comedi_device *dev,
- int reg_count,
- struct dt9812_rmw_byte *rmw)
-{
- struct usb_device *usb = comedi_to_usb_dev(dev);
- struct dt9812_private *devpriv = dev->private;
- struct dt9812_usb_cmd cmd;
- int i, count;
-
- cmd.cmd = cpu_to_le32(DT9812_RMW_MULTI_BYTE_REG);
- cmd.u.rmw_multi_info.count = reg_count;
- for (i = 0; i < reg_count; i++)
- cmd.u.rmw_multi_info.rmw[i] = rmw[i];
-
- /* DT9812 only responds to 32 byte writes!! */
- return usb_bulk_msg(usb, usb_sndbulkpipe(usb, devpriv->cmd_wr.addr),
- &cmd, 32, &count, DT9812_USB_TIMEOUT);
-}
-
-static int dt9812_digital_in(struct comedi_device *dev, u8 *bits)
-{
- struct dt9812_private *devpriv = dev->private;
- u8 reg[2] = { F020_SFR_P3, F020_SFR_P1 };
- u8 value[2];
- int ret;
-
- down(&devpriv->sem);
- ret = dt9812_read_multiple_registers(dev, 2, reg, value);
- if (ret == 0) {
- /*
- * bits 0-6 in F020_SFR_P3 are bits 0-6 in the digital
- * input port bit 3 in F020_SFR_P1 is bit 7 in the
- * digital input port
- */
- *bits = (value[0] & 0x7f) | ((value[1] & 0x08) << 4);
- }
- up(&devpriv->sem);
-
- return ret;
-}
-
-static int dt9812_digital_out(struct comedi_device *dev, u8 bits)
-{
- struct dt9812_private *devpriv = dev->private;
- u8 reg[1] = { F020_SFR_P2 };
- u8 value[1] = { bits };
- int ret;
-
- down(&devpriv->sem);
- ret = dt9812_write_multiple_registers(dev, 1, reg, value);
- up(&devpriv->sem);
-
- return ret;
-}
-
-static void dt9812_configure_mux(struct comedi_device *dev,
- struct dt9812_rmw_byte *rmw, int channel)
-{
- struct dt9812_private *devpriv = dev->private;
-
- if (devpriv->device == DT9812_DEVID_DT9812_10) {
- /* In the DT9812/10V MUX is selected by P1.5-7 */
- rmw->address = F020_SFR_P1;
- rmw->and_mask = 0xe0;
- rmw->or_value = channel << 5;
- } else {
- /* In the DT9812/2.5V, internal mux is selected by bits 0:2 */
- rmw->address = F020_SFR_AMX0SL;
- rmw->and_mask = 0xff;
- rmw->or_value = channel & 0x07;
- }
-}
-
-static void dt9812_configure_gain(struct comedi_device *dev,
- struct dt9812_rmw_byte *rmw,
- enum dt9812_gain gain)
-{
- struct dt9812_private *devpriv = dev->private;
-
- /* In the DT9812/10V, there is an external gain of 0.5 */
- if (devpriv->device == DT9812_DEVID_DT9812_10)
- gain <<= 1;
-
- rmw->address = F020_SFR_ADC0CF;
- rmw->and_mask = F020_MASK_ADC0CF_AMP0GN2 |
- F020_MASK_ADC0CF_AMP0GN1 |
- F020_MASK_ADC0CF_AMP0GN0;
-
- switch (gain) {
- /*
- * 000 -> Gain = 1
- * 001 -> Gain = 2
- * 010 -> Gain = 4
- * 011 -> Gain = 8
- * 10x -> Gain = 16
- * 11x -> Gain = 0.5
- */
- case DT9812_GAIN_0PT5:
- rmw->or_value = F020_MASK_ADC0CF_AMP0GN2 |
- F020_MASK_ADC0CF_AMP0GN1;
- break;
- default:
- /* this should never happen, just use a gain of 1 */
- case DT9812_GAIN_1:
- rmw->or_value = 0x00;
- break;
- case DT9812_GAIN_2:
- rmw->or_value = F020_MASK_ADC0CF_AMP0GN0;
- break;
- case DT9812_GAIN_4:
- rmw->or_value = F020_MASK_ADC0CF_AMP0GN1;
- break;
- case DT9812_GAIN_8:
- rmw->or_value = F020_MASK_ADC0CF_AMP0GN1 |
- F020_MASK_ADC0CF_AMP0GN0;
- break;
- case DT9812_GAIN_16:
- rmw->or_value = F020_MASK_ADC0CF_AMP0GN2;
- break;
- }
-}
-
-static int dt9812_analog_in(struct comedi_device *dev,
- int channel, u16 *value, enum dt9812_gain gain)
-{
- struct dt9812_private *devpriv = dev->private;
- struct dt9812_rmw_byte rmw[3];
- u8 reg[3] = {
- F020_SFR_ADC0CN,
- F020_SFR_ADC0H,
- F020_SFR_ADC0L
- };
- u8 val[3];
- int ret;
-
- down(&devpriv->sem);
-
- /* 1 select the gain */
- dt9812_configure_gain(dev, &rmw[0], gain);
-
- /* 2 set the MUX to select the channel */
- dt9812_configure_mux(dev, &rmw[1], channel);
-
- /* 3 start conversion */
- rmw[2].address = F020_SFR_ADC0CN;
- rmw[2].and_mask = 0xff;
- rmw[2].or_value = F020_MASK_ADC0CN_AD0EN | F020_MASK_ADC0CN_AD0BUSY;
-
- ret = dt9812_rmw_multiple_registers(dev, 3, rmw);
- if (ret)
- goto exit;
-
- /* read the status and ADC */
- ret = dt9812_read_multiple_registers(dev, 3, reg, val);
- if (ret)
- goto exit;
-
- /*
- * An ADC conversion takes 16 SAR clocks cycles, i.e. about 9us.
- * Therefore, between the instant that AD0BUSY was set via
- * dt9812_rmw_multiple_registers and the read of AD0BUSY via
- * dt9812_read_multiple_registers, the conversion should be complete
- * since these two operations require two USB transactions each taking
- * at least a millisecond to complete. However, lets make sure that
- * conversion is finished.
- */
- if ((val[0] & (F020_MASK_ADC0CN_AD0INT | F020_MASK_ADC0CN_AD0BUSY)) ==
- F020_MASK_ADC0CN_AD0INT) {
- switch (devpriv->device) {
- case DT9812_DEVID_DT9812_10:
- /*
- * For DT9812-10V the personality module set the
- * encoding to 2's complement. Hence, convert it before
- * returning it
- */
- *value = ((val[1] << 8) | val[2]) + 0x800;
- break;
- case DT9812_DEVID_DT9812_2PT5:
- *value = (val[1] << 8) | val[2];
- break;
- }
- }
-
-exit:
- up(&devpriv->sem);
-
- return ret;
-}
-
-static int dt9812_analog_out(struct comedi_device *dev, int channel, u16 value)
-{
- struct dt9812_private *devpriv = dev->private;
- struct dt9812_rmw_byte rmw[3];
- int ret;
-
- down(&devpriv->sem);
-
- switch (channel) {
- case 0:
- /* 1. Set DAC mode */
- rmw[0].address = F020_SFR_DAC0CN;
- rmw[0].and_mask = 0xff;
- rmw[0].or_value = F020_MASK_DACxCN_DACxEN;
-
- /* 2 load low byte of DAC value first */
- rmw[1].address = F020_SFR_DAC0L;
- rmw[1].and_mask = 0xff;
- rmw[1].or_value = value & 0xff;
-
- /* 3 load high byte of DAC value next to latch the
- 12-bit value */
- rmw[2].address = F020_SFR_DAC0H;
- rmw[2].and_mask = 0xff;
- rmw[2].or_value = (value >> 8) & 0xf;
- break;
-
- case 1:
- /* 1. Set DAC mode */
- rmw[0].address = F020_SFR_DAC1CN;
- rmw[0].and_mask = 0xff;
- rmw[0].or_value = F020_MASK_DACxCN_DACxEN;
-
- /* 2 load low byte of DAC value first */
- rmw[1].address = F020_SFR_DAC1L;
- rmw[1].and_mask = 0xff;
- rmw[1].or_value = value & 0xff;
-
- /* 3 load high byte of DAC value next to latch the
- 12-bit value */
- rmw[2].address = F020_SFR_DAC1H;
- rmw[2].and_mask = 0xff;
- rmw[2].or_value = (value >> 8) & 0xf;
- break;
- }
- ret = dt9812_rmw_multiple_registers(dev, 3, rmw);
-
- up(&devpriv->sem);
-
- return ret;
-}
-
-static int dt9812_di_insn_bits(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- u8 bits = 0;
- int ret;
-
- ret = dt9812_digital_in(dev, &bits);
- if (ret)
- return ret;
-
- data[1] = bits;
-
- return insn->n;
-}
-
-static int dt9812_do_insn_bits(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- if (comedi_dio_update_state(s, data))
- dt9812_digital_out(dev, s->state);
-
- data[1] = s->state;
-
- return insn->n;
-}
-
-static int dt9812_ai_insn_read(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- unsigned int chan = CR_CHAN(insn->chanspec);
- u16 val = 0;
- int ret;
- int i;
-
- for (i = 0; i < insn->n; i++) {
- ret = dt9812_analog_in(dev, chan, &val, DT9812_GAIN_1);
- if (ret)
- return ret;
- data[i] = val;
- }
-
- return insn->n;
-}
-
-static int dt9812_ao_insn_read(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct dt9812_private *devpriv = dev->private;
- int ret;
-
- down(&devpriv->sem);
- ret = comedi_readback_insn_read(dev, s, insn, data);
- up(&devpriv->sem);
-
- return ret;
-}
-
-static int dt9812_ao_insn_write(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- unsigned int chan = CR_CHAN(insn->chanspec);
- int i;
-
- for (i = 0; i < insn->n; i++) {
- unsigned int val = data[i];
- int ret;
-
- ret = dt9812_analog_out(dev, chan, val);
- if (ret)
- return ret;
-
- s->readback[chan] = val;
- }
-
- return insn->n;
-}
-
-static int dt9812_find_endpoints(struct comedi_device *dev)
-{
- struct usb_interface *intf = comedi_to_usb_interface(dev);
- struct usb_host_interface *host = intf->cur_altsetting;
- struct dt9812_private *devpriv = dev->private;
- struct usb_endpoint_descriptor *ep;
- int i;
-
- if (host->desc.bNumEndpoints != 5) {
- dev_err(dev->class_dev, "Wrong number of endpoints\n");
- return -ENODEV;
- }
-
- for (i = 0; i < host->desc.bNumEndpoints; ++i) {
- int dir = -1;
-
- ep = &host->endpoint[i].desc;
- switch (i) {
- case 0:
- /* unused message pipe */
- dir = USB_DIR_IN;
- break;
- case 1:
- dir = USB_DIR_OUT;
- devpriv->cmd_wr.addr = ep->bEndpointAddress;
- devpriv->cmd_wr.size = le16_to_cpu(ep->wMaxPacketSize);
- break;
- case 2:
- dir = USB_DIR_IN;
- devpriv->cmd_rd.addr = ep->bEndpointAddress;
- devpriv->cmd_rd.size = le16_to_cpu(ep->wMaxPacketSize);
- break;
- case 3:
- /* unused write stream */
- dir = USB_DIR_OUT;
- break;
- case 4:
- /* unused read stream */
- dir = USB_DIR_IN;
- break;
- }
- if ((ep->bEndpointAddress & USB_DIR_IN) != dir) {
- dev_err(dev->class_dev,
- "Endpoint has wrong direction\n");
- return -ENODEV;
- }
- }
- return 0;
-}
-
-static int dt9812_reset_device(struct comedi_device *dev)
-{
- struct usb_device *usb = comedi_to_usb_dev(dev);
- struct dt9812_private *devpriv = dev->private;
- u32 serial;
- u16 vendor;
- u16 product;
- u8 tmp8;
- __le16 tmp16;
- __le32 tmp32;
- int ret;
- int i;
-
- ret = dt9812_read_info(dev, 0, &tmp8, sizeof(tmp8));
- if (ret) {
- /*
- * Seems like a configuration reset is necessary if driver is
- * reloaded while device is attached
- */
- usb_reset_configuration(usb);
- for (i = 0; i < 10; i++) {
- ret = dt9812_read_info(dev, 1, &tmp8, sizeof(tmp8));
- if (ret == 0)
- break;
- }
- if (ret) {
- dev_err(dev->class_dev,
- "unable to reset configuration\n");
- return ret;
- }
- }
-
- ret = dt9812_read_info(dev, 1, &tmp16, sizeof(tmp16));
- if (ret) {
- dev_err(dev->class_dev, "failed to read vendor id\n");
- return ret;
- }
- vendor = le16_to_cpu(tmp16);
-
- ret = dt9812_read_info(dev, 3, &tmp16, sizeof(tmp16));
- if (ret) {
- dev_err(dev->class_dev, "failed to read product id\n");
- return ret;
- }
- product = le16_to_cpu(tmp16);
-
- ret = dt9812_read_info(dev, 5, &tmp16, sizeof(tmp16));
- if (ret) {
- dev_err(dev->class_dev, "failed to read device id\n");
- return ret;
- }
- devpriv->device = le16_to_cpu(tmp16);
-
- ret = dt9812_read_info(dev, 7, &tmp32, sizeof(tmp32));
- if (ret) {
- dev_err(dev->class_dev, "failed to read serial number\n");
- return ret;
- }
- serial = le32_to_cpu(tmp32);
-
- /* let the user know what node this device is now attached to */
- dev_info(dev->class_dev, "USB DT9812 (%4.4x.%4.4x.%4.4x) #0x%8.8x\n",
- vendor, product, devpriv->device, serial);
-
- if (devpriv->device != DT9812_DEVID_DT9812_10 &&
- devpriv->device != DT9812_DEVID_DT9812_2PT5) {
- dev_err(dev->class_dev, "Unsupported device!\n");
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int dt9812_auto_attach(struct comedi_device *dev,
- unsigned long context)
-{
- struct usb_interface *intf = comedi_to_usb_interface(dev);
- struct dt9812_private *devpriv;
- struct comedi_subdevice *s;
- bool is_unipolar;
- int ret;
- int i;
-
- devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
- if (!devpriv)
- return -ENOMEM;
-
- sema_init(&devpriv->sem, 1);
- usb_set_intfdata(intf, devpriv);
-
- ret = dt9812_find_endpoints(dev);
- if (ret)
- return ret;
-
- ret = dt9812_reset_device(dev);
- if (ret)
- return ret;
-
- is_unipolar = (devpriv->device == DT9812_DEVID_DT9812_2PT5);
-
- ret = comedi_alloc_subdevices(dev, 4);
- if (ret)
- return ret;
-
- /* Digital Input subdevice */
- s = &dev->subdevices[0];
- s->type = COMEDI_SUBD_DI;
- s->subdev_flags = SDF_READABLE;
- s->n_chan = 8;
- s->maxdata = 1;
- s->range_table = &range_digital;
- s->insn_bits = dt9812_di_insn_bits;
-
- /* Digital Output subdevice */
- s = &dev->subdevices[1];
- s->type = COMEDI_SUBD_DO;
- s->subdev_flags = SDF_WRITABLE;
- s->n_chan = 8;
- s->maxdata = 1;
- s->range_table = &range_digital;
- s->insn_bits = dt9812_do_insn_bits;
-
- /* Analog Input subdevice */
- s = &dev->subdevices[2];
- s->type = COMEDI_SUBD_AI;
- s->subdev_flags = SDF_READABLE | SDF_GROUND;
- s->n_chan = 8;
- s->maxdata = 0x0fff;
- s->range_table = is_unipolar ? &range_unipolar2_5 : &range_bipolar10;
- s->insn_read = dt9812_ai_insn_read;
-
- /* Analog Output subdevice */
- s = &dev->subdevices[3];
- s->type = COMEDI_SUBD_AO;
- s->subdev_flags = SDF_WRITABLE;
- s->n_chan = 2;
- s->maxdata = 0x0fff;
- s->range_table = is_unipolar ? &range_unipolar2_5 : &range_bipolar10;
- s->insn_write = dt9812_ao_insn_write;
- s->insn_read = dt9812_ao_insn_read;
-
- ret = comedi_alloc_subdev_readback(s);
- if (ret)
- return ret;
-
- for (i = 0; i < s->n_chan; i++)
- s->readback[i] = is_unipolar ? 0x0000 : 0x0800;
-
- return 0;
-}
-
-static void dt9812_detach(struct comedi_device *dev)
-{
- struct usb_interface *intf = comedi_to_usb_interface(dev);
- struct dt9812_private *devpriv = dev->private;
-
- if (!devpriv)
- return;
-
- down(&devpriv->sem);
-
- usb_set_intfdata(intf, NULL);
-
- up(&devpriv->sem);
-}
-
-static struct comedi_driver dt9812_driver = {
- .driver_name = "dt9812",
- .module = THIS_MODULE,
- .auto_attach = dt9812_auto_attach,
- .detach = dt9812_detach,
-};
-
-static int dt9812_usb_probe(struct usb_interface *intf,
- const struct usb_device_id *id)
-{
- return comedi_usb_auto_config(intf, &dt9812_driver, id->driver_info);
-}
-
-static const struct usb_device_id dt9812_usb_table[] = {
- { USB_DEVICE(0x0867, 0x9812) },
- { }
-};
-MODULE_DEVICE_TABLE(usb, dt9812_usb_table);
-
-static struct usb_driver dt9812_usb_driver = {
- .name = "dt9812",
- .id_table = dt9812_usb_table,
- .probe = dt9812_usb_probe,
- .disconnect = comedi_usb_auto_unconfig,
-};
-module_comedi_usb_driver(dt9812_driver, dt9812_usb_driver);
-
-MODULE_AUTHOR("Anders Blomdell <anders.blomdell@control.lth.se>");
-MODULE_DESCRIPTION("Comedi DT9812 driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/dyna_pci10xx.c b/drivers/staging/comedi/drivers/dyna_pci10xx.c
deleted file mode 100644
index c9eb26fab44e..000000000000
--- a/drivers/staging/comedi/drivers/dyna_pci10xx.c
+++ /dev/null
@@ -1,282 +0,0 @@
-/*
- * comedi/drivers/dyna_pci10xx.c
- * Copyright (C) 2011 Prashant Shah, pshah.mumbai@gmail.com
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-/*
- * Driver: dyna_pci10xx
- * Description: Dynalog India PCI DAQ Cards, http://www.dynalogindia.com/
- * Devices: [Dynalog] PCI-1050 (dyna_pci1050)
- * Author: Prashant Shah <pshah.mumbai@gmail.com>
- * Status: Stable
- *
- * Developed at Automation Labs, Chemical Dept., IIT Bombay, India.
- * Prof. Kannan Moudgalya <kannan@iitb.ac.in>
- * http://www.iitb.ac.in
- *
- * Notes :
- * - Dynalog India Pvt. Ltd. does not have a registered PCI Vendor ID and
- * they are using the PLX Technlogies Vendor ID since that is the PCI Chip
- * used in the card.
- * - Dynalog India Pvt. Ltd. has provided the internal register specification
- * for their cards in their manuals.
- */
-
-#include <linux/module.h>
-#include <linux/delay.h>
-#include <linux/mutex.h>
-
-#include "../comedi_pci.h"
-
-#define READ_TIMEOUT 50
-
-static const struct comedi_lrange range_pci1050_ai = {
- 3, {
- BIP_RANGE(10),
- BIP_RANGE(5),
- UNI_RANGE(10)
- }
-};
-
-static const char range_codes_pci1050_ai[] = { 0x00, 0x10, 0x30 };
-
-struct dyna_pci10xx_private {
- struct mutex mutex;
- unsigned long BADR3;
-};
-
-static int dyna_pci10xx_ai_eoc(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned long context)
-{
- unsigned int status;
-
- status = inw_p(dev->iobase);
- if (status & (1 << 15))
- return 0;
- return -EBUSY;
-}
-
-static int dyna_pci10xx_insn_read_ai(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct dyna_pci10xx_private *devpriv = dev->private;
- int n;
- u16 d = 0;
- int ret = 0;
- unsigned int chan, range;
-
- /* get the channel number and range */
- chan = CR_CHAN(insn->chanspec);
- range = range_codes_pci1050_ai[CR_RANGE((insn->chanspec))];
-
- mutex_lock(&devpriv->mutex);
- /* convert n samples */
- for (n = 0; n < insn->n; n++) {
- /* trigger conversion */
- smp_mb();
- outw_p(0x0000 + range + chan, dev->iobase + 2);
- udelay(10);
-
- ret = comedi_timeout(dev, s, insn, dyna_pci10xx_ai_eoc, 0);
- if (ret)
- break;
-
- /* read data */
- d = inw_p(dev->iobase);
- /* mask the first 4 bits - EOC bits */
- d &= 0x0FFF;
- data[n] = d;
- }
- mutex_unlock(&devpriv->mutex);
-
- /* return the number of samples read/written */
- return ret ? ret : n;
-}
-
-/* analog output callback */
-static int dyna_pci10xx_insn_write_ao(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct dyna_pci10xx_private *devpriv = dev->private;
- int n;
- unsigned int chan, range;
-
- chan = CR_CHAN(insn->chanspec);
- range = range_codes_pci1050_ai[CR_RANGE((insn->chanspec))];
-
- mutex_lock(&devpriv->mutex);
- for (n = 0; n < insn->n; n++) {
- smp_mb();
- /* trigger conversion and write data */
- outw_p(data[n], dev->iobase);
- udelay(10);
- }
- mutex_unlock(&devpriv->mutex);
- return n;
-}
-
-/* digital input bit interface */
-static int dyna_pci10xx_di_insn_bits(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct dyna_pci10xx_private *devpriv = dev->private;
- u16 d = 0;
-
- mutex_lock(&devpriv->mutex);
- smp_mb();
- d = inw_p(devpriv->BADR3);
- udelay(10);
-
- /* on return the data[0] contains output and data[1] contains input */
- data[1] = d;
- data[0] = s->state;
- mutex_unlock(&devpriv->mutex);
- return insn->n;
-}
-
-static int dyna_pci10xx_do_insn_bits(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct dyna_pci10xx_private *devpriv = dev->private;
-
- mutex_lock(&devpriv->mutex);
- if (comedi_dio_update_state(s, data)) {
- smp_mb();
- outw_p(s->state, devpriv->BADR3);
- udelay(10);
- }
-
- data[1] = s->state;
- mutex_unlock(&devpriv->mutex);
-
- return insn->n;
-}
-
-static int dyna_pci10xx_auto_attach(struct comedi_device *dev,
- unsigned long context_unused)
-{
- struct pci_dev *pcidev = comedi_to_pci_dev(dev);
- struct dyna_pci10xx_private *devpriv;
- struct comedi_subdevice *s;
- int ret;
-
- devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
- if (!devpriv)
- return -ENOMEM;
-
- ret = comedi_pci_enable(dev);
- if (ret)
- return ret;
- dev->iobase = pci_resource_start(pcidev, 2);
- devpriv->BADR3 = pci_resource_start(pcidev, 3);
-
- mutex_init(&devpriv->mutex);
-
- ret = comedi_alloc_subdevices(dev, 4);
- if (ret)
- return ret;
-
- /* analog input */
- s = &dev->subdevices[0];
- s->type = COMEDI_SUBD_AI;
- s->subdev_flags = SDF_READABLE | SDF_GROUND | SDF_DIFF;
- s->n_chan = 16;
- s->maxdata = 0x0FFF;
- s->range_table = &range_pci1050_ai;
- s->len_chanlist = 16;
- s->insn_read = dyna_pci10xx_insn_read_ai;
-
- /* analog output */
- s = &dev->subdevices[1];
- s->type = COMEDI_SUBD_AO;
- s->subdev_flags = SDF_WRITABLE;
- s->n_chan = 16;
- s->maxdata = 0x0FFF;
- s->range_table = &range_unipolar10;
- s->len_chanlist = 16;
- s->insn_write = dyna_pci10xx_insn_write_ao;
-
- /* digital input */
- s = &dev->subdevices[2];
- s->type = COMEDI_SUBD_DI;
- s->subdev_flags = SDF_READABLE;
- s->n_chan = 16;
- s->maxdata = 1;
- s->range_table = &range_digital;
- s->len_chanlist = 16;
- s->insn_bits = dyna_pci10xx_di_insn_bits;
-
- /* digital output */
- s = &dev->subdevices[3];
- s->type = COMEDI_SUBD_DO;
- s->subdev_flags = SDF_WRITABLE;
- s->n_chan = 16;
- s->maxdata = 1;
- s->range_table = &range_digital;
- s->len_chanlist = 16;
- s->state = 0;
- s->insn_bits = dyna_pci10xx_do_insn_bits;
-
- return 0;
-}
-
-static void dyna_pci10xx_detach(struct comedi_device *dev)
-{
- struct dyna_pci10xx_private *devpriv = dev->private;
-
- comedi_pci_detach(dev);
- if (devpriv)
- mutex_destroy(&devpriv->mutex);
-}
-
-static struct comedi_driver dyna_pci10xx_driver = {
- .driver_name = "dyna_pci10xx",
- .module = THIS_MODULE,
- .auto_attach = dyna_pci10xx_auto_attach,
- .detach = dyna_pci10xx_detach,
-};
-
-static int dyna_pci10xx_pci_probe(struct pci_dev *dev,
- const struct pci_device_id *id)
-{
- return comedi_pci_auto_config(dev, &dyna_pci10xx_driver,
- id->driver_data);
-}
-
-static const struct pci_device_id dyna_pci10xx_pci_table[] = {
- { PCI_DEVICE(PCI_VENDOR_ID_PLX, 0x1050) },
- { 0 }
-};
-MODULE_DEVICE_TABLE(pci, dyna_pci10xx_pci_table);
-
-static struct pci_driver dyna_pci10xx_pci_driver = {
- .name = "dyna_pci10xx",
- .id_table = dyna_pci10xx_pci_table,
- .probe = dyna_pci10xx_pci_probe,
- .remove = comedi_pci_auto_unconfig,
-};
-module_comedi_pci_driver(dyna_pci10xx_driver, dyna_pci10xx_pci_driver);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Prashant Shah <pshah.mumbai@gmail.com>");
-MODULE_DESCRIPTION("Comedi based drivers for Dynalog PCI DAQ cards");
diff --git a/drivers/staging/comedi/drivers/fl512.c b/drivers/staging/comedi/drivers/fl512.c
deleted file mode 100644
index 55cae61458cb..000000000000
--- a/drivers/staging/comedi/drivers/fl512.c
+++ /dev/null
@@ -1,152 +0,0 @@
-/*
- * fl512.c
- * Anders Gnistrup <ex18@kalman.iau.dtu.dk>
- *
- * COMEDI - Linux Control and Measurement Device Interface
- * Copyright (C) 2000 David A. Schleef <ds@schleef.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-/*
- * Driver: fl512
- * Description: unknown
- * Author: Anders Gnistrup <ex18@kalman.iau.dtu.dk>
- * Devices: [unknown] FL512 (fl512)
- * Status: unknown
- *
- * Digital I/O is not supported.
- *
- * Configuration options:
- * [0] - I/O port base address
- */
-
-#include <linux/module.h>
-#include "../comedidev.h"
-
-#include <linux/delay.h>
-
-/*
- * Register I/O map
- */
-#define FL512_AI_LSB_REG 0x02
-#define FL512_AI_MSB_REG 0x03
-#define FL512_AI_MUX_REG 0x02
-#define FL512_AI_START_CONV_REG 0x03
-#define FL512_AO_DATA_REG(x) (0x04 + ((x) * 2))
-#define FL512_AO_TRIG_REG(x) (0x04 + ((x) * 2))
-
-static const struct comedi_lrange range_fl512 = {
- 4, {
- BIP_RANGE(0.5),
- BIP_RANGE(1),
- BIP_RANGE(5),
- BIP_RANGE(10),
- UNI_RANGE(1),
- UNI_RANGE(5),
- UNI_RANGE(10)
- }
-};
-
-static int fl512_ai_insn_read(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- unsigned int chan = CR_CHAN(insn->chanspec);
- unsigned int val;
- int i;
-
- outb(chan, dev->iobase + FL512_AI_MUX_REG);
-
- for (i = 0; i < insn->n; i++) {
- outb(0, dev->iobase + FL512_AI_START_CONV_REG);
-
- /* XXX should test "done" flag instead of delay */
- udelay(30);
-
- val = inb(dev->iobase + FL512_AI_LSB_REG);
- val |= (inb(dev->iobase + FL512_AI_MSB_REG) << 8);
- val &= s->maxdata;
-
- data[i] = val;
- }
-
- return insn->n;
-}
-
-static int fl512_ao_insn_write(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- unsigned int chan = CR_CHAN(insn->chanspec);
- unsigned int val = s->readback[chan];
- int i;
-
- for (i = 0; i < insn->n; i++) {
- val = data[i];
-
- /* write LSB, MSB then trigger conversion */
- outb(val & 0x0ff, dev->iobase + FL512_AO_DATA_REG(chan));
- outb((val >> 8) & 0xf, dev->iobase + FL512_AO_DATA_REG(chan));
- inb(dev->iobase + FL512_AO_TRIG_REG(chan));
- }
- s->readback[chan] = val;
-
- return insn->n;
-}
-
-static int fl512_attach(struct comedi_device *dev, struct comedi_devconfig *it)
-{
- struct comedi_subdevice *s;
- int ret;
-
- ret = comedi_request_region(dev, it->options[0], 0x10);
- if (ret)
- return ret;
-
- ret = comedi_alloc_subdevices(dev, 2);
- if (ret)
- return ret;
-
- /* Analog Input subdevice */
- s = &dev->subdevices[0];
- s->type = COMEDI_SUBD_AI;
- s->subdev_flags = SDF_READABLE | SDF_GROUND;
- s->n_chan = 16;
- s->maxdata = 0x0fff;
- s->range_table = &range_fl512;
- s->insn_read = fl512_ai_insn_read;
-
- /* Analog Output subdevice */
- s = &dev->subdevices[1];
- s->type = COMEDI_SUBD_AO;
- s->subdev_flags = SDF_WRITABLE;
- s->n_chan = 2;
- s->maxdata = 0x0fff;
- s->range_table = &range_fl512;
- s->insn_write = fl512_ao_insn_write;
-
- return comedi_alloc_subdev_readback(s);
-}
-
-static struct comedi_driver fl512_driver = {
- .driver_name = "fl512",
- .module = THIS_MODULE,
- .attach = fl512_attach,
- .detach = comedi_legacy_detach,
-};
-module_comedi_driver(fl512_driver);
-
-MODULE_AUTHOR("Comedi http://www.comedi.org");
-MODULE_DESCRIPTION("Comedi low-level driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/gsc_hpdi.c b/drivers/staging/comedi/drivers/gsc_hpdi.c
deleted file mode 100644
index e9296182236e..000000000000
--- a/drivers/staging/comedi/drivers/gsc_hpdi.c
+++ /dev/null
@@ -1,721 +0,0 @@
-/*
- * gsc_hpdi.c
- * Comedi driver the General Standards Corporation
- * High Speed Parallel Digital Interface rs485 boards.
- *
- * Author: Frank Mori Hess <fmhess@users.sourceforge.net>
- * Copyright (C) 2003 Coherent Imaging Systems
- *
- * COMEDI - Linux Control and Measurement Device Interface
- * Copyright (C) 1997-8 David A. Schleef <ds@schleef.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-/*
- * Driver: gsc_hpdi
- * Description: General Standards Corporation High
- * Speed Parallel Digital Interface rs485 boards
- * Author: Frank Mori Hess <fmhess@users.sourceforge.net>
- * Status: only receive mode works, transmit not supported
- * Updated: Thu, 01 Nov 2012 16:17:38 +0000
- * Devices: [General Standards Corporation] PCI-HPDI32 (gsc_hpdi),
- * PMC-HPDI32
- *
- * Configuration options:
- * None.
- *
- * Manual configuration of supported devices is not supported; they are
- * configured automatically.
- *
- * There are some additional hpdi models available from GSC for which
- * support could be added to this driver.
- */
-
-#include <linux/module.h>
-#include <linux/delay.h>
-#include <linux/interrupt.h>
-
-#include "../comedi_pci.h"
-
-#include "plx9080.h"
-
-/*
- * PCI BAR2 Register map (dev->mmio)
- */
-#define FIRMWARE_REV_REG 0x00
-#define FEATURES_REG_PRESENT_BIT BIT(15)
-#define BOARD_CONTROL_REG 0x04
-#define BOARD_RESET_BIT BIT(0)
-#define TX_FIFO_RESET_BIT BIT(1)
-#define RX_FIFO_RESET_BIT BIT(2)
-#define TX_ENABLE_BIT BIT(4)
-#define RX_ENABLE_BIT BIT(5)
-#define DEMAND_DMA_DIRECTION_TX_BIT BIT(6) /* ch 0 only */
-#define LINE_VALID_ON_STATUS_VALID_BIT BIT(7)
-#define START_TX_BIT BIT(8)
-#define CABLE_THROTTLE_ENABLE_BIT BIT(9)
-#define TEST_MODE_ENABLE_BIT BIT(31)
-#define BOARD_STATUS_REG 0x08
-#define COMMAND_LINE_STATUS_MASK (0x7f << 0)
-#define TX_IN_PROGRESS_BIT BIT(7)
-#define TX_NOT_EMPTY_BIT BIT(8)
-#define TX_NOT_ALMOST_EMPTY_BIT BIT(9)
-#define TX_NOT_ALMOST_FULL_BIT BIT(10)
-#define TX_NOT_FULL_BIT BIT(11)
-#define RX_NOT_EMPTY_BIT BIT(12)
-#define RX_NOT_ALMOST_EMPTY_BIT BIT(13)
-#define RX_NOT_ALMOST_FULL_BIT BIT(14)
-#define RX_NOT_FULL_BIT BIT(15)
-#define BOARD_JUMPER0_INSTALLED_BIT BIT(16)
-#define BOARD_JUMPER1_INSTALLED_BIT BIT(17)
-#define TX_OVERRUN_BIT BIT(21)
-#define RX_UNDERRUN_BIT BIT(22)
-#define RX_OVERRUN_BIT BIT(23)
-#define TX_PROG_ALMOST_REG 0x0c
-#define RX_PROG_ALMOST_REG 0x10
-#define ALMOST_EMPTY_BITS(x) (((x) & 0xffff) << 0)
-#define ALMOST_FULL_BITS(x) (((x) & 0xff) << 16)
-#define FEATURES_REG 0x14
-#define FIFO_SIZE_PRESENT_BIT BIT(0)
-#define FIFO_WORDS_PRESENT_BIT BIT(1)
-#define LEVEL_EDGE_INTERRUPTS_PRESENT_BIT BIT(2)
-#define GPIO_SUPPORTED_BIT BIT(3)
-#define PLX_DMA_CH1_SUPPORTED_BIT BIT(4)
-#define OVERRUN_UNDERRUN_SUPPORTED_BIT BIT(5)
-#define FIFO_REG 0x18
-#define TX_STATUS_COUNT_REG 0x1c
-#define TX_LINE_VALID_COUNT_REG 0x20,
-#define TX_LINE_INVALID_COUNT_REG 0x24
-#define RX_STATUS_COUNT_REG 0x28
-#define RX_LINE_COUNT_REG 0x2c
-#define INTERRUPT_CONTROL_REG 0x30
-#define FRAME_VALID_START_INTR BIT(0)
-#define FRAME_VALID_END_INTR BIT(1)
-#define TX_FIFO_EMPTY_INTR BIT(8)
-#define TX_FIFO_ALMOST_EMPTY_INTR BIT(9)
-#define TX_FIFO_ALMOST_FULL_INTR BIT(10)
-#define TX_FIFO_FULL_INTR BIT(11)
-#define RX_EMPTY_INTR BIT(12)
-#define RX_ALMOST_EMPTY_INTR BIT(13)
-#define RX_ALMOST_FULL_INTR BIT(14)
-#define RX_FULL_INTR BIT(15)
-#define INTERRUPT_STATUS_REG 0x34
-#define TX_CLOCK_DIVIDER_REG 0x38
-#define TX_FIFO_SIZE_REG 0x40
-#define RX_FIFO_SIZE_REG 0x44
-#define FIFO_SIZE_MASK (0xfffff << 0)
-#define TX_FIFO_WORDS_REG 0x48
-#define RX_FIFO_WORDS_REG 0x4c
-#define INTERRUPT_EDGE_LEVEL_REG 0x50
-#define INTERRUPT_POLARITY_REG 0x54
-
-#define TIMER_BASE 50 /* 20MHz master clock */
-#define DMA_BUFFER_SIZE 0x10000
-#define NUM_DMA_BUFFERS 4
-#define NUM_DMA_DESCRIPTORS 256
-
-struct hpdi_private {
- void __iomem *plx9080_mmio;
- uint32_t *dio_buffer[NUM_DMA_BUFFERS]; /* dma buffers */
- /* physical addresses of dma buffers */
- dma_addr_t dio_buffer_phys_addr[NUM_DMA_BUFFERS];
- /*
- * array of dma descriptors read by plx9080, allocated to get proper
- * alignment
- */
- struct plx_dma_desc *dma_desc;
- /* physical address of dma descriptor array */
- dma_addr_t dma_desc_phys_addr;
- unsigned int num_dma_descriptors;
- /* pointer to start of buffers indexed by descriptor */
- uint32_t *desc_dio_buffer[NUM_DMA_DESCRIPTORS];
- /* index of the dma descriptor that is currently being used */
- unsigned int dma_desc_index;
- unsigned int tx_fifo_size;
- unsigned int rx_fifo_size;
- unsigned long dio_count;
- /* number of bytes at which to generate COMEDI_CB_BLOCK events */
- unsigned int block_size;
-};
-
-static void gsc_hpdi_drain_dma(struct comedi_device *dev, unsigned int channel)
-{
- struct hpdi_private *devpriv = dev->private;
- struct comedi_subdevice *s = dev->read_subdev;
- struct comedi_cmd *cmd = &s->async->cmd;
- unsigned int idx;
- unsigned int start;
- unsigned int desc;
- unsigned int size;
- unsigned int next;
-
- if (channel)
- next = readl(devpriv->plx9080_mmio + PLX_DMA1_PCI_ADDRESS_REG);
- else
- next = readl(devpriv->plx9080_mmio + PLX_DMA0_PCI_ADDRESS_REG);
-
- idx = devpriv->dma_desc_index;
- start = le32_to_cpu(devpriv->dma_desc[idx].pci_start_addr);
- /* loop until we have read all the full buffers */
- for (desc = 0; (next < start || next >= start + devpriv->block_size) &&
- desc < devpriv->num_dma_descriptors; desc++) {
- /* transfer data from dma buffer to comedi buffer */
- size = devpriv->block_size / sizeof(uint32_t);
- if (cmd->stop_src == TRIG_COUNT) {
- if (size > devpriv->dio_count)
- size = devpriv->dio_count;
- devpriv->dio_count -= size;
- }
- comedi_buf_write_samples(s, devpriv->desc_dio_buffer[idx],
- size);
- idx++;
- idx %= devpriv->num_dma_descriptors;
- start = le32_to_cpu(devpriv->dma_desc[idx].pci_start_addr);
-
- devpriv->dma_desc_index = idx;
- }
- /* XXX check for buffer overrun somehow */
-}
-
-static irqreturn_t gsc_hpdi_interrupt(int irq, void *d)
-{
- struct comedi_device *dev = d;
- struct hpdi_private *devpriv = dev->private;
- struct comedi_subdevice *s = dev->read_subdev;
- struct comedi_async *async = s->async;
- uint32_t hpdi_intr_status, hpdi_board_status;
- uint32_t plx_status;
- uint32_t plx_bits;
- uint8_t dma0_status, dma1_status;
- unsigned long flags;
-
- if (!dev->attached)
- return IRQ_NONE;
-
- plx_status = readl(devpriv->plx9080_mmio + PLX_INTRCS_REG);
- if ((plx_status & (ICS_DMA0_A | ICS_DMA1_A | ICS_LIA)) == 0)
- return IRQ_NONE;
-
- hpdi_intr_status = readl(dev->mmio + INTERRUPT_STATUS_REG);
- hpdi_board_status = readl(dev->mmio + BOARD_STATUS_REG);
-
- if (hpdi_intr_status)
- writel(hpdi_intr_status, dev->mmio + INTERRUPT_STATUS_REG);
-
- /* spin lock makes sure no one else changes plx dma control reg */
- spin_lock_irqsave(&dev->spinlock, flags);
- dma0_status = readb(devpriv->plx9080_mmio + PLX_DMA0_CS_REG);
- if (plx_status & ICS_DMA0_A) {
- /* dma chan 0 interrupt */
- writeb((dma0_status & PLX_DMA_EN_BIT) | PLX_CLEAR_DMA_INTR_BIT,
- devpriv->plx9080_mmio + PLX_DMA0_CS_REG);
-
- if (dma0_status & PLX_DMA_EN_BIT)
- gsc_hpdi_drain_dma(dev, 0);
- }
- spin_unlock_irqrestore(&dev->spinlock, flags);
-
- /* spin lock makes sure no one else changes plx dma control reg */
- spin_lock_irqsave(&dev->spinlock, flags);
- dma1_status = readb(devpriv->plx9080_mmio + PLX_DMA1_CS_REG);
- if (plx_status & ICS_DMA1_A) {
- /* XXX */ /* dma chan 1 interrupt */
- writeb((dma1_status & PLX_DMA_EN_BIT) | PLX_CLEAR_DMA_INTR_BIT,
- devpriv->plx9080_mmio + PLX_DMA1_CS_REG);
- }
- spin_unlock_irqrestore(&dev->spinlock, flags);
-
- /* clear possible plx9080 interrupt sources */
- if (plx_status & ICS_LDIA) {
- /* clear local doorbell interrupt */
- plx_bits = readl(devpriv->plx9080_mmio + PLX_DBR_OUT_REG);
- writel(plx_bits, devpriv->plx9080_mmio + PLX_DBR_OUT_REG);
- }
-
- if (hpdi_board_status & RX_OVERRUN_BIT) {
- dev_err(dev->class_dev, "rx fifo overrun\n");
- async->events |= COMEDI_CB_ERROR;
- }
-
- if (hpdi_board_status & RX_UNDERRUN_BIT) {
- dev_err(dev->class_dev, "rx fifo underrun\n");
- async->events |= COMEDI_CB_ERROR;
- }
-
- if (devpriv->dio_count == 0)
- async->events |= COMEDI_CB_EOA;
-
- comedi_handle_events(dev, s);
-
- return IRQ_HANDLED;
-}
-
-static void gsc_hpdi_abort_dma(struct comedi_device *dev, unsigned int channel)
-{
- struct hpdi_private *devpriv = dev->private;
- unsigned long flags;
-
- /* spinlock for plx dma control/status reg */
- spin_lock_irqsave(&dev->spinlock, flags);
-
- plx9080_abort_dma(devpriv->plx9080_mmio, channel);
-
- spin_unlock_irqrestore(&dev->spinlock, flags);
-}
-
-static int gsc_hpdi_cancel(struct comedi_device *dev,
- struct comedi_subdevice *s)
-{
- writel(0, dev->mmio + BOARD_CONTROL_REG);
- writel(0, dev->mmio + INTERRUPT_CONTROL_REG);
-
- gsc_hpdi_abort_dma(dev, 0);
-
- return 0;
-}
-
-static int gsc_hpdi_cmd(struct comedi_device *dev,
- struct comedi_subdevice *s)
-{
- struct hpdi_private *devpriv = dev->private;
- struct comedi_async *async = s->async;
- struct comedi_cmd *cmd = &async->cmd;
- unsigned long flags;
- uint32_t bits;
-
- if (s->io_bits)
- return -EINVAL;
-
- writel(RX_FIFO_RESET_BIT, dev->mmio + BOARD_CONTROL_REG);
-
- gsc_hpdi_abort_dma(dev, 0);
-
- devpriv->dma_desc_index = 0;
-
- /*
- * These register are supposedly unused during chained dma,
- * but I have found that left over values from last operation
- * occasionally cause problems with transfer of first dma
- * block. Initializing them to zero seems to fix the problem.
- */
- writel(0, devpriv->plx9080_mmio + PLX_DMA0_TRANSFER_SIZE_REG);
- writel(0, devpriv->plx9080_mmio + PLX_DMA0_PCI_ADDRESS_REG);
- writel(0, devpriv->plx9080_mmio + PLX_DMA0_LOCAL_ADDRESS_REG);
-
- /* give location of first dma descriptor */
- bits = devpriv->dma_desc_phys_addr | PLX_DESC_IN_PCI_BIT |
- PLX_INTR_TERM_COUNT | PLX_XFER_LOCAL_TO_PCI;
- writel(bits, devpriv->plx9080_mmio + PLX_DMA0_DESCRIPTOR_REG);
-
- /* enable dma transfer */
- spin_lock_irqsave(&dev->spinlock, flags);
- writeb(PLX_DMA_EN_BIT | PLX_DMA_START_BIT | PLX_CLEAR_DMA_INTR_BIT,
- devpriv->plx9080_mmio + PLX_DMA0_CS_REG);
- spin_unlock_irqrestore(&dev->spinlock, flags);
-
- if (cmd->stop_src == TRIG_COUNT)
- devpriv->dio_count = cmd->stop_arg;
- else
- devpriv->dio_count = 1;
-
- /* clear over/under run status flags */
- writel(RX_UNDERRUN_BIT | RX_OVERRUN_BIT, dev->mmio + BOARD_STATUS_REG);
-
- /* enable interrupts */
- writel(RX_FULL_INTR, dev->mmio + INTERRUPT_CONTROL_REG);
-
- writel(RX_ENABLE_BIT, dev->mmio + BOARD_CONTROL_REG);
-
- return 0;
-}
-
-static int gsc_hpdi_check_chanlist(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_cmd *cmd)
-{
- int i;
-
- for (i = 0; i < cmd->chanlist_len; i++) {
- unsigned int chan = CR_CHAN(cmd->chanlist[i]);
-
- if (chan != i) {
- dev_dbg(dev->class_dev,
- "chanlist must be ch 0 to 31 in order\n");
- return -EINVAL;
- }
- }
-
- return 0;
-}
-
-static int gsc_hpdi_cmd_test(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_cmd *cmd)
-{
- int err = 0;
-
- if (s->io_bits)
- return -EINVAL;
-
- /* Step 1 : check if triggers are trivially valid */
-
- err |= comedi_check_trigger_src(&cmd->start_src, TRIG_NOW);
- err |= comedi_check_trigger_src(&cmd->scan_begin_src, TRIG_EXT);
- err |= comedi_check_trigger_src(&cmd->convert_src, TRIG_NOW);
- err |= comedi_check_trigger_src(&cmd->scan_end_src, TRIG_COUNT);
- err |= comedi_check_trigger_src(&cmd->stop_src, TRIG_COUNT | TRIG_NONE);
-
- if (err)
- return 1;
-
- /* Step 2a : make sure trigger sources are unique */
-
- err |= comedi_check_trigger_is_unique(cmd->stop_src);
-
- /* Step 2b : and mutually compatible */
-
- if (err)
- return 2;
-
- /* Step 3: check if arguments are trivially valid */
-
- err |= comedi_check_trigger_arg_is(&cmd->start_arg, 0);
-
- if (!cmd->chanlist_len || !cmd->chanlist) {
- cmd->chanlist_len = 32;
- err |= -EINVAL;
- }
- err |= comedi_check_trigger_arg_is(&cmd->scan_end_arg,
- cmd->chanlist_len);
-
- if (cmd->stop_src == TRIG_COUNT)
- err |= comedi_check_trigger_arg_min(&cmd->stop_arg, 1);
- else /* TRIG_NONE */
- err |= comedi_check_trigger_arg_is(&cmd->stop_arg, 0);
-
- if (err)
- return 3;
-
- /* Step 4: fix up any arguments */
-
- /* Step 5: check channel list if it exists */
-
- if (cmd->chanlist && cmd->chanlist_len > 0)
- err |= gsc_hpdi_check_chanlist(dev, s, cmd);
-
- if (err)
- return 5;
-
- return 0;
-}
-
-/* setup dma descriptors so a link completes every 'len' bytes */
-static int gsc_hpdi_setup_dma_descriptors(struct comedi_device *dev,
- unsigned int len)
-{
- struct hpdi_private *devpriv = dev->private;
- dma_addr_t phys_addr = devpriv->dma_desc_phys_addr;
- uint32_t next_bits = PLX_DESC_IN_PCI_BIT | PLX_INTR_TERM_COUNT |
- PLX_XFER_LOCAL_TO_PCI;
- unsigned int offset = 0;
- unsigned int idx = 0;
- unsigned int i;
-
- if (len > DMA_BUFFER_SIZE)
- len = DMA_BUFFER_SIZE;
- len -= len % sizeof(uint32_t);
- if (len == 0)
- return -EINVAL;
-
- for (i = 0; i < NUM_DMA_DESCRIPTORS && idx < NUM_DMA_BUFFERS; i++) {
- devpriv->dma_desc[i].pci_start_addr =
- cpu_to_le32(devpriv->dio_buffer_phys_addr[idx] + offset);
- devpriv->dma_desc[i].local_start_addr = cpu_to_le32(FIFO_REG);
- devpriv->dma_desc[i].transfer_size = cpu_to_le32(len);
- devpriv->dma_desc[i].next = cpu_to_le32((phys_addr +
- (i + 1) * sizeof(devpriv->dma_desc[0])) | next_bits);
-
- devpriv->desc_dio_buffer[i] = devpriv->dio_buffer[idx] +
- (offset / sizeof(uint32_t));
-
- offset += len;
- if (len + offset > DMA_BUFFER_SIZE) {
- offset = 0;
- idx++;
- }
- }
- devpriv->num_dma_descriptors = i;
- /* fix last descriptor to point back to first */
- devpriv->dma_desc[i - 1].next = cpu_to_le32(phys_addr | next_bits);
-
- devpriv->block_size = len;
-
- return len;
-}
-
-static int gsc_hpdi_dio_insn_config(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- int ret;
-
- switch (data[0]) {
- case INSN_CONFIG_BLOCK_SIZE:
- ret = gsc_hpdi_setup_dma_descriptors(dev, data[1]);
- if (ret)
- return ret;
-
- data[1] = ret;
- break;
- default:
- ret = comedi_dio_insn_config(dev, s, insn, data, 0xffffffff);
- if (ret)
- return ret;
- break;
- }
-
- return insn->n;
-}
-
-static void gsc_hpdi_free_dma(struct comedi_device *dev)
-{
- struct pci_dev *pcidev = comedi_to_pci_dev(dev);
- struct hpdi_private *devpriv = dev->private;
- int i;
-
- if (!devpriv)
- return;
-
- /* free pci dma buffers */
- for (i = 0; i < NUM_DMA_BUFFERS; i++) {
- if (devpriv->dio_buffer[i])
- pci_free_consistent(pcidev,
- DMA_BUFFER_SIZE,
- devpriv->dio_buffer[i],
- devpriv->dio_buffer_phys_addr[i]);
- }
- /* free dma descriptors */
- if (devpriv->dma_desc)
- pci_free_consistent(pcidev,
- sizeof(struct plx_dma_desc) *
- NUM_DMA_DESCRIPTORS,
- devpriv->dma_desc,
- devpriv->dma_desc_phys_addr);
-}
-
-static int gsc_hpdi_init(struct comedi_device *dev)
-{
- struct hpdi_private *devpriv = dev->private;
- uint32_t plx_intcsr_bits;
-
- /* wait 10usec after reset before accessing fifos */
- writel(BOARD_RESET_BIT, dev->mmio + BOARD_CONTROL_REG);
- usleep_range(10, 1000);
-
- writel(ALMOST_EMPTY_BITS(32) | ALMOST_FULL_BITS(32),
- dev->mmio + RX_PROG_ALMOST_REG);
- writel(ALMOST_EMPTY_BITS(32) | ALMOST_FULL_BITS(32),
- dev->mmio + TX_PROG_ALMOST_REG);
-
- devpriv->tx_fifo_size = readl(dev->mmio + TX_FIFO_SIZE_REG) &
- FIFO_SIZE_MASK;
- devpriv->rx_fifo_size = readl(dev->mmio + RX_FIFO_SIZE_REG) &
- FIFO_SIZE_MASK;
-
- writel(0, dev->mmio + INTERRUPT_CONTROL_REG);
-
- /* enable interrupts */
- plx_intcsr_bits =
- ICS_AERR | ICS_PERR | ICS_PIE | ICS_PLIE | ICS_PAIE | ICS_LIE |
- ICS_DMA0_E;
- writel(plx_intcsr_bits, devpriv->plx9080_mmio + PLX_INTRCS_REG);
-
- return 0;
-}
-
-static void gsc_hpdi_init_plx9080(struct comedi_device *dev)
-{
- struct hpdi_private *devpriv = dev->private;
- uint32_t bits;
- void __iomem *plx_iobase = devpriv->plx9080_mmio;
-
-#ifdef __BIG_ENDIAN
- bits = BIGEND_DMA0 | BIGEND_DMA1;
-#else
- bits = 0;
-#endif
- writel(bits, devpriv->plx9080_mmio + PLX_BIGEND_REG);
-
- writel(0, devpriv->plx9080_mmio + PLX_INTRCS_REG);
-
- gsc_hpdi_abort_dma(dev, 0);
- gsc_hpdi_abort_dma(dev, 1);
-
- /* configure dma0 mode */
- bits = 0;
- /* enable ready input */
- bits |= PLX_DMA_EN_READYIN_BIT;
- /* enable dma chaining */
- bits |= PLX_EN_CHAIN_BIT;
- /*
- * enable interrupt on dma done
- * (probably don't need this, since chain never finishes)
- */
- bits |= PLX_EN_DMA_DONE_INTR_BIT;
- /*
- * don't increment local address during transfers
- * (we are transferring from a fixed fifo register)
- */
- bits |= PLX_LOCAL_ADDR_CONST_BIT;
- /* route dma interrupt to pci bus */
- bits |= PLX_DMA_INTR_PCI_BIT;
- /* enable demand mode */
- bits |= PLX_DEMAND_MODE_BIT;
- /* enable local burst mode */
- bits |= PLX_DMA_LOCAL_BURST_EN_BIT;
- bits |= PLX_LOCAL_BUS_32_WIDE_BITS;
- writel(bits, plx_iobase + PLX_DMA0_MODE_REG);
-}
-
-static int gsc_hpdi_auto_attach(struct comedi_device *dev,
- unsigned long context_unused)
-{
- struct pci_dev *pcidev = comedi_to_pci_dev(dev);
- struct hpdi_private *devpriv;
- struct comedi_subdevice *s;
- int i;
- int retval;
-
- dev->board_name = "pci-hpdi32";
-
- devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
- if (!devpriv)
- return -ENOMEM;
-
- retval = comedi_pci_enable(dev);
- if (retval)
- return retval;
- pci_set_master(pcidev);
-
- devpriv->plx9080_mmio = pci_ioremap_bar(pcidev, 0);
- dev->mmio = pci_ioremap_bar(pcidev, 2);
- if (!devpriv->plx9080_mmio || !dev->mmio) {
- dev_warn(dev->class_dev, "failed to remap io memory\n");
- return -ENOMEM;
- }
-
- gsc_hpdi_init_plx9080(dev);
-
- /* get irq */
- if (request_irq(pcidev->irq, gsc_hpdi_interrupt, IRQF_SHARED,
- dev->board_name, dev)) {
- dev_warn(dev->class_dev,
- "unable to allocate irq %u\n", pcidev->irq);
- return -EINVAL;
- }
- dev->irq = pcidev->irq;
-
- dev_dbg(dev->class_dev, " irq %u\n", dev->irq);
-
- /* allocate pci dma buffers */
- for (i = 0; i < NUM_DMA_BUFFERS; i++) {
- devpriv->dio_buffer[i] =
- pci_alloc_consistent(pcidev, DMA_BUFFER_SIZE,
- &devpriv->dio_buffer_phys_addr[i]);
- }
- /* allocate dma descriptors */
- devpriv->dma_desc = pci_alloc_consistent(pcidev,
- sizeof(struct plx_dma_desc) *
- NUM_DMA_DESCRIPTORS,
- &devpriv->dma_desc_phys_addr);
- if (devpriv->dma_desc_phys_addr & 0xf) {
- dev_warn(dev->class_dev,
- " dma descriptors not quad-word aligned (bug)\n");
- return -EIO;
- }
-
- retval = gsc_hpdi_setup_dma_descriptors(dev, 0x1000);
- if (retval < 0)
- return retval;
-
- retval = comedi_alloc_subdevices(dev, 1);
- if (retval)
- return retval;
-
- /* Digital I/O subdevice */
- s = &dev->subdevices[0];
- dev->read_subdev = s;
- s->type = COMEDI_SUBD_DIO;
- s->subdev_flags = SDF_READABLE | SDF_WRITABLE | SDF_LSAMPL |
- SDF_CMD_READ;
- s->n_chan = 32;
- s->len_chanlist = 32;
- s->maxdata = 1;
- s->range_table = &range_digital;
- s->insn_config = gsc_hpdi_dio_insn_config;
- s->do_cmd = gsc_hpdi_cmd;
- s->do_cmdtest = gsc_hpdi_cmd_test;
- s->cancel = gsc_hpdi_cancel;
-
- return gsc_hpdi_init(dev);
-}
-
-static void gsc_hpdi_detach(struct comedi_device *dev)
-{
- struct hpdi_private *devpriv = dev->private;
-
- if (dev->irq)
- free_irq(dev->irq, dev);
- if (devpriv) {
- if (devpriv->plx9080_mmio) {
- writel(0, devpriv->plx9080_mmio + PLX_INTRCS_REG);
- iounmap(devpriv->plx9080_mmio);
- }
- if (dev->mmio)
- iounmap(dev->mmio);
- }
- comedi_pci_disable(dev);
- gsc_hpdi_free_dma(dev);
-}
-
-static struct comedi_driver gsc_hpdi_driver = {
- .driver_name = "gsc_hpdi",
- .module = THIS_MODULE,
- .auto_attach = gsc_hpdi_auto_attach,
- .detach = gsc_hpdi_detach,
-};
-
-static int gsc_hpdi_pci_probe(struct pci_dev *dev,
- const struct pci_device_id *id)
-{
- return comedi_pci_auto_config(dev, &gsc_hpdi_driver, id->driver_data);
-}
-
-static const struct pci_device_id gsc_hpdi_pci_table[] = {
- { PCI_DEVICE_SUB(PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9080,
- PCI_VENDOR_ID_PLX, 0x2400) },
- { 0 }
-};
-MODULE_DEVICE_TABLE(pci, gsc_hpdi_pci_table);
-
-static struct pci_driver gsc_hpdi_pci_driver = {
- .name = "gsc_hpdi",
- .id_table = gsc_hpdi_pci_table,
- .probe = gsc_hpdi_pci_probe,
- .remove = comedi_pci_auto_unconfig,
-};
-module_comedi_pci_driver(gsc_hpdi_driver, gsc_hpdi_pci_driver);
-
-MODULE_AUTHOR("Comedi http://www.comedi.org");
-MODULE_DESCRIPTION("Comedi driver for General Standards PCI-HPDI32/PMC-HPDI32");
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/icp_multi.c b/drivers/staging/comedi/drivers/icp_multi.c
deleted file mode 100644
index 1e104ebf8057..000000000000
--- a/drivers/staging/comedi/drivers/icp_multi.c
+++ /dev/null
@@ -1,568 +0,0 @@
-/*
- comedi/drivers/icp_multi.c
-
- COMEDI - Linux Control and Measurement Device Interface
- Copyright (C) 1997-2002 David A. Schleef <ds@schleef.org>
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-*/
-
-/*
-Driver: icp_multi
-Description: Inova ICP_MULTI
-Author: Anne Smorthit <anne.smorthit@sfwte.ch>
-Devices: [Inova] ICP_MULTI (icp_multi)
-Status: works
-
-The driver works for analog input and output and digital input and output.
-It does not work with interrupts or with the counters. Currently no support
-for DMA.
-
-It has 16 single-ended or 8 differential Analogue Input channels with 12-bit
-resolution. Ranges : 5V, 10V, +/-5V, +/-10V, 0..20mA and 4..20mA. Input
-ranges can be individually programmed for each channel. Voltage or current
-measurement is selected by jumper.
-
-There are 4 x 12-bit Analogue Outputs. Ranges : 5V, 10V, +/-5V, +/-10V
-
-16 x Digital Inputs, 24V
-
-8 x Digital Outputs, 24V, 1A
-
-4 x 16-bit counters
-
-Configuration options: not applicable, uses PCI auto config
-*/
-
-#include <linux/module.h>
-#include <linux/delay.h>
-#include <linux/interrupt.h>
-
-#include "../comedi_pci.h"
-
-#define ICP_MULTI_ADC_CSR 0 /* R/W: ADC command/status register */
-#define ICP_MULTI_AI 2 /* R: Analogue input data */
-#define ICP_MULTI_DAC_CSR 4 /* R/W: DAC command/status register */
-#define ICP_MULTI_AO 6 /* R/W: Analogue output data */
-#define ICP_MULTI_DI 8 /* R/W: Digital inputs */
-#define ICP_MULTI_DO 0x0A /* R/W: Digital outputs */
-#define ICP_MULTI_INT_EN 0x0C /* R/W: Interrupt enable register */
-#define ICP_MULTI_INT_STAT 0x0E /* R/W: Interrupt status register */
-#define ICP_MULTI_CNTR0 0x10 /* R/W: Counter 0 */
-#define ICP_MULTI_CNTR1 0x12 /* R/W: counter 1 */
-#define ICP_MULTI_CNTR2 0x14 /* R/W: Counter 2 */
-#define ICP_MULTI_CNTR3 0x16 /* R/W: Counter 3 */
-
-/* Define bits from ADC command/status register */
-#define ADC_ST 0x0001 /* Start ADC */
-#define ADC_BSY 0x0001 /* ADC busy */
-#define ADC_BI 0x0010 /* Bipolar input range 1 = bipolar */
-#define ADC_RA 0x0020 /* Input range 0 = 5V, 1 = 10V */
-#define ADC_DI 0x0040 /* Differential input mode 1 = differential */
-
-/* Define bits from DAC command/status register */
-#define DAC_ST 0x0001 /* Start DAC */
-#define DAC_BSY 0x0001 /* DAC busy */
-#define DAC_BI 0x0010 /* Bipolar input range 1 = bipolar */
-#define DAC_RA 0x0020 /* Input range 0 = 5V, 1 = 10V */
-
-/* Define bits from interrupt enable/status registers */
-#define ADC_READY 0x0001 /* A/d conversion ready interrupt */
-#define DAC_READY 0x0002 /* D/a conversion ready interrupt */
-#define DOUT_ERROR 0x0004 /* Digital output error interrupt */
-#define DIN_STATUS 0x0008 /* Digital input status change interrupt */
-#define CIE0 0x0010 /* Counter 0 overrun interrupt */
-#define CIE1 0x0020 /* Counter 1 overrun interrupt */
-#define CIE2 0x0040 /* Counter 2 overrun interrupt */
-#define CIE3 0x0080 /* Counter 3 overrun interrupt */
-
-/* Useful definitions */
-#define Status_IRQ 0x00ff /* All interrupts */
-
-/* Define analogue range */
-static const struct comedi_lrange range_analog = {
- 4, {
- UNI_RANGE(5),
- UNI_RANGE(10),
- BIP_RANGE(5),
- BIP_RANGE(10)
- }
-};
-
-static const char range_codes_analog[] = { 0x00, 0x20, 0x10, 0x30 };
-
-/*
-==============================================================================
- Data & Structure declarations
-==============================================================================
-*/
-
-struct icp_multi_private {
- unsigned int AdcCmdStatus; /* ADC Command/Status register */
- unsigned int DacCmdStatus; /* DAC Command/Status register */
- unsigned int IntEnable; /* Interrupt Enable register */
- unsigned int IntStatus; /* Interrupt Status register */
- unsigned int act_chanlist[32]; /* list of scanned channel */
- unsigned char act_chanlist_len; /* len of scanlist */
- unsigned char act_chanlist_pos; /* actual position in MUX list */
- unsigned int *ai_chanlist; /* actaul chanlist */
- unsigned int do_data; /* Remember digital output data */
-};
-
-static void setup_channel_list(struct comedi_device *dev,
- struct comedi_subdevice *s,
- unsigned int *chanlist, unsigned int n_chan)
-{
- struct icp_multi_private *devpriv = dev->private;
- unsigned int i, range, chanprog;
- unsigned int diff;
-
- devpriv->act_chanlist_len = n_chan;
- devpriv->act_chanlist_pos = 0;
-
- for (i = 0; i < n_chan; i++) {
- /* Get channel */
- chanprog = CR_CHAN(chanlist[i]);
-
- /* Determine if it is a differential channel (Bit 15 = 1) */
- if (CR_AREF(chanlist[i]) == AREF_DIFF) {
- diff = 1;
- chanprog &= 0x0007;
- } else {
- diff = 0;
- chanprog &= 0x000f;
- }
-
- /* Clear channel, range and input mode bits
- * in A/D command/status register */
- devpriv->AdcCmdStatus &= 0xf00f;
-
- /* Set channel number and differential mode status bit */
- if (diff) {
- /* Set channel number, bits 9-11 & mode, bit 6 */
- devpriv->AdcCmdStatus |= (chanprog << 9);
- devpriv->AdcCmdStatus |= ADC_DI;
- } else
- /* Set channel number, bits 8-11 */
- devpriv->AdcCmdStatus |= (chanprog << 8);
-
- /* Get range for current channel */
- range = range_codes_analog[CR_RANGE(chanlist[i])];
- /* Set range. bits 4-5 */
- devpriv->AdcCmdStatus |= range;
-
- /* Output channel, range, mode to ICP Multi */
- writew(devpriv->AdcCmdStatus, dev->mmio + ICP_MULTI_ADC_CSR);
- }
-}
-
-static int icp_multi_ai_eoc(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned long context)
-{
- unsigned int status;
-
- status = readw(dev->mmio + ICP_MULTI_ADC_CSR);
- if ((status & ADC_BSY) == 0)
- return 0;
- return -EBUSY;
-}
-
-static int icp_multi_insn_read_ai(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct icp_multi_private *devpriv = dev->private;
- int ret = 0;
- int n;
-
- /* Disable A/D conversion ready interrupt */
- devpriv->IntEnable &= ~ADC_READY;
- writew(devpriv->IntEnable, dev->mmio + ICP_MULTI_INT_EN);
-
- /* Clear interrupt status */
- devpriv->IntStatus |= ADC_READY;
- writew(devpriv->IntStatus, dev->mmio + ICP_MULTI_INT_STAT);
-
- /* Set up appropriate channel, mode and range data, for specified ch */
- setup_channel_list(dev, s, &insn->chanspec, 1);
-
- for (n = 0; n < insn->n; n++) {
- /* Set start ADC bit */
- devpriv->AdcCmdStatus |= ADC_ST;
- writew(devpriv->AdcCmdStatus, dev->mmio + ICP_MULTI_ADC_CSR);
- devpriv->AdcCmdStatus &= ~ADC_ST;
-
- udelay(1);
-
- /* Wait for conversion to complete, or get fed up waiting */
- ret = comedi_timeout(dev, s, insn, icp_multi_ai_eoc, 0);
- if (ret)
- break;
-
- data[n] = (readw(dev->mmio + ICP_MULTI_AI) >> 4) & 0x0fff;
- }
-
- /* Disable interrupt */
- devpriv->IntEnable &= ~ADC_READY;
- writew(devpriv->IntEnable, dev->mmio + ICP_MULTI_INT_EN);
-
- /* Clear interrupt status */
- devpriv->IntStatus |= ADC_READY;
- writew(devpriv->IntStatus, dev->mmio + ICP_MULTI_INT_STAT);
-
- return ret ? ret : n;
-}
-
-static int icp_multi_ao_eoc(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned long context)
-{
- unsigned int status;
-
- status = readw(dev->mmio + ICP_MULTI_DAC_CSR);
- if ((status & DAC_BSY) == 0)
- return 0;
- return -EBUSY;
-}
-
-static int icp_multi_ao_insn_write(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct icp_multi_private *devpriv = dev->private;
- unsigned int chan = CR_CHAN(insn->chanspec);
- unsigned int range = CR_RANGE(insn->chanspec);
- int i;
-
- /* Disable D/A conversion ready interrupt */
- devpriv->IntEnable &= ~DAC_READY;
- writew(devpriv->IntEnable, dev->mmio + ICP_MULTI_INT_EN);
-
- /* Clear interrupt status */
- devpriv->IntStatus |= DAC_READY;
- writew(devpriv->IntStatus, dev->mmio + ICP_MULTI_INT_STAT);
-
- /* Set up range and channel data */
- /* Bit 4 = 1 : Bipolar */
- /* Bit 5 = 0 : 5V */
- /* Bit 5 = 1 : 10V */
- /* Bits 8-9 : Channel number */
- devpriv->DacCmdStatus &= 0xfccf;
- devpriv->DacCmdStatus |= range_codes_analog[range];
- devpriv->DacCmdStatus |= (chan << 8);
-
- writew(devpriv->DacCmdStatus, dev->mmio + ICP_MULTI_DAC_CSR);
-
- for (i = 0; i < insn->n; i++) {
- unsigned int val = data[i];
- int ret;
-
- /* Wait for analogue output data register to be
- * ready for new data, or get fed up waiting */
- ret = comedi_timeout(dev, s, insn, icp_multi_ao_eoc, 0);
- if (ret) {
- /* Disable interrupt */
- devpriv->IntEnable &= ~DAC_READY;
- writew(devpriv->IntEnable,
- dev->mmio + ICP_MULTI_INT_EN);
-
- /* Clear interrupt status */
- devpriv->IntStatus |= DAC_READY;
- writew(devpriv->IntStatus,
- dev->mmio + ICP_MULTI_INT_STAT);
-
- return ret;
- }
-
- writew(val, dev->mmio + ICP_MULTI_AO);
-
- /* Set DAC_ST bit to write the data to selected channel */
- devpriv->DacCmdStatus |= DAC_ST;
- writew(devpriv->DacCmdStatus, dev->mmio + ICP_MULTI_DAC_CSR);
- devpriv->DacCmdStatus &= ~DAC_ST;
-
- s->readback[chan] = val;
- }
-
- return insn->n;
-}
-
-static int icp_multi_insn_bits_di(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- data[1] = readw(dev->mmio + ICP_MULTI_DI);
-
- return insn->n;
-}
-
-static int icp_multi_insn_bits_do(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- if (comedi_dio_update_state(s, data))
- writew(s->state, dev->mmio + ICP_MULTI_DO);
-
- data[1] = s->state;
-
- return insn->n;
-}
-
-static int icp_multi_insn_read_ctr(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
-{
- return 0;
-}
-
-static int icp_multi_insn_write_ctr(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- return 0;
-}
-
-static irqreturn_t interrupt_service_icp_multi(int irq, void *d)
-{
- struct comedi_device *dev = d;
- int int_no;
-
- /* Is this interrupt from our board? */
- int_no = readw(dev->mmio + ICP_MULTI_INT_STAT) & Status_IRQ;
- if (!int_no)
- /* No, exit */
- return IRQ_NONE;
-
- /* Determine which interrupt is active & handle it */
- switch (int_no) {
- case ADC_READY:
- break;
- case DAC_READY:
- break;
- case DOUT_ERROR:
- break;
- case DIN_STATUS:
- break;
- case CIE0:
- break;
- case CIE1:
- break;
- case CIE2:
- break;
- case CIE3:
- break;
- default:
- break;
- }
-
- return IRQ_HANDLED;
-}
-
-#if 0
-static int check_channel_list(struct comedi_device *dev,
- struct comedi_subdevice *s,
- unsigned int *chanlist, unsigned int n_chan)
-{
- unsigned int i;
-
- /* Check that we at least have one channel to check */
- if (n_chan < 1) {
- dev_err(dev->class_dev, "range/channel list is empty!\n");
- return 0;
- }
- /* Check all channels */
- for (i = 0; i < n_chan; i++) {
- /* Check that channel number is < maximum */
- if (CR_AREF(chanlist[i]) == AREF_DIFF) {
- if (CR_CHAN(chanlist[i]) > (s->nchan / 2)) {
- dev_err(dev->class_dev,
- "Incorrect differential ai ch-nr\n");
- return 0;
- }
- } else {
- if (CR_CHAN(chanlist[i]) > s->n_chan) {
- dev_err(dev->class_dev,
- "Incorrect ai channel number\n");
- return 0;
- }
- }
- }
- return 1;
-}
-#endif
-
-static int icp_multi_reset(struct comedi_device *dev)
-{
- struct icp_multi_private *devpriv = dev->private;
- unsigned int i;
-
- /* Clear INT enables and requests */
- writew(0, dev->mmio + ICP_MULTI_INT_EN);
- writew(0x00ff, dev->mmio + ICP_MULTI_INT_STAT);
-
- /* Set DACs to 0..5V range and 0V output */
- for (i = 0; i < 4; i++) {
- devpriv->DacCmdStatus &= 0xfcce;
-
- /* Set channel number */
- devpriv->DacCmdStatus |= (i << 8);
-
- /* Output 0V */
- writew(0, dev->mmio + ICP_MULTI_AO);
-
- /* Set start conversion bit */
- devpriv->DacCmdStatus |= DAC_ST;
-
- /* Output to command / status register */
- writew(devpriv->DacCmdStatus, dev->mmio + ICP_MULTI_DAC_CSR);
-
- /* Delay to allow DAC time to recover */
- udelay(1);
- }
-
- /* Digital outputs to 0 */
- writew(0, dev->mmio + ICP_MULTI_DO);
-
- return 0;
-}
-
-static int icp_multi_auto_attach(struct comedi_device *dev,
- unsigned long context_unused)
-{
- struct pci_dev *pcidev = comedi_to_pci_dev(dev);
- struct icp_multi_private *devpriv;
- struct comedi_subdevice *s;
- int ret;
-
- devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
- if (!devpriv)
- return -ENOMEM;
-
- ret = comedi_pci_enable(dev);
- if (ret)
- return ret;
-
- dev->mmio = pci_ioremap_bar(pcidev, 2);
- if (!dev->mmio)
- return -ENOMEM;
-
- ret = comedi_alloc_subdevices(dev, 5);
- if (ret)
- return ret;
-
- icp_multi_reset(dev);
-
- if (pcidev->irq) {
- ret = request_irq(pcidev->irq, interrupt_service_icp_multi,
- IRQF_SHARED, dev->board_name, dev);
- if (ret == 0)
- dev->irq = pcidev->irq;
- }
-
- s = &dev->subdevices[0];
- dev->read_subdev = s;
- s->type = COMEDI_SUBD_AI;
- s->subdev_flags = SDF_READABLE | SDF_COMMON | SDF_GROUND | SDF_DIFF;
- s->n_chan = 16;
- s->maxdata = 0x0fff;
- s->len_chanlist = 16;
- s->range_table = &range_analog;
- s->insn_read = icp_multi_insn_read_ai;
-
- s = &dev->subdevices[1];
- s->type = COMEDI_SUBD_AO;
- s->subdev_flags = SDF_WRITABLE | SDF_GROUND | SDF_COMMON;
- s->n_chan = 4;
- s->maxdata = 0x0fff;
- s->len_chanlist = 4;
- s->range_table = &range_analog;
- s->insn_write = icp_multi_ao_insn_write;
-
- ret = comedi_alloc_subdev_readback(s);
- if (ret)
- return ret;
-
- s = &dev->subdevices[2];
- s->type = COMEDI_SUBD_DI;
- s->subdev_flags = SDF_READABLE;
- s->n_chan = 16;
- s->maxdata = 1;
- s->len_chanlist = 16;
- s->range_table = &range_digital;
- s->insn_bits = icp_multi_insn_bits_di;
-
- s = &dev->subdevices[3];
- s->type = COMEDI_SUBD_DO;
- s->subdev_flags = SDF_WRITABLE;
- s->n_chan = 8;
- s->maxdata = 1;
- s->len_chanlist = 8;
- s->range_table = &range_digital;
- s->insn_bits = icp_multi_insn_bits_do;
-
- s = &dev->subdevices[4];
- s->type = COMEDI_SUBD_COUNTER;
- s->subdev_flags = SDF_WRITABLE;
- s->n_chan = 4;
- s->maxdata = 0xffff;
- s->len_chanlist = 4;
- s->state = 0;
- s->insn_read = icp_multi_insn_read_ctr;
- s->insn_write = icp_multi_insn_write_ctr;
-
- return 0;
-}
-
-static void icp_multi_detach(struct comedi_device *dev)
-{
- if (dev->mmio)
- icp_multi_reset(dev);
- comedi_pci_detach(dev);
-}
-
-static struct comedi_driver icp_multi_driver = {
- .driver_name = "icp_multi",
- .module = THIS_MODULE,
- .auto_attach = icp_multi_auto_attach,
- .detach = icp_multi_detach,
-};
-
-static int icp_multi_pci_probe(struct pci_dev *dev,
- const struct pci_device_id *id)
-{
- return comedi_pci_auto_config(dev, &icp_multi_driver, id->driver_data);
-}
-
-static const struct pci_device_id icp_multi_pci_table[] = {
- { PCI_DEVICE(PCI_VENDOR_ID_ICP, 0x8000) },
- { 0 }
-};
-MODULE_DEVICE_TABLE(pci, icp_multi_pci_table);
-
-static struct pci_driver icp_multi_pci_driver = {
- .name = "icp_multi",
- .id_table = icp_multi_pci_table,
- .probe = icp_multi_pci_probe,
- .remove = comedi_pci_auto_unconfig,
-};
-module_comedi_pci_driver(icp_multi_driver, icp_multi_pci_driver);
-
-MODULE_AUTHOR("Comedi http://www.comedi.org");
-MODULE_DESCRIPTION("Comedi low-level driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/ii_pci20kc.c b/drivers/staging/comedi/drivers/ii_pci20kc.c
deleted file mode 100644
index 14ef1f67dd42..000000000000
--- a/drivers/staging/comedi/drivers/ii_pci20kc.c
+++ /dev/null
@@ -1,527 +0,0 @@
-/*
- * ii_pci20kc.c
- * Driver for Intelligent Instruments PCI-20001C carrier board and modules.
- *
- * Copyright (C) 2000 Markus Kempf <kempf@matsci.uni-sb.de>
- * with suggestions from David Schleef 16.06.2000
- */
-
-/*
- * Driver: ii_pci20kc
- * Description: Intelligent Instruments PCI-20001C carrier board
- * Devices: [Intelligent Instrumentation] PCI-20001C (ii_pci20kc)
- * Author: Markus Kempf <kempf@matsci.uni-sb.de>
- * Status: works
- *
- * Supports the PCI-20001C-1a and PCI-20001C-2a carrier boards. The
- * -2a version has 32 on-board DIO channels. Three add-on modules
- * can be added to the carrier board for additional functionality.
- *
- * Supported add-on modules:
- * PCI-20006M-1 1 channel, 16-bit analog output module
- * PCI-20006M-2 2 channel, 16-bit analog output module
- * PCI-20341M-1A 4 channel, 16-bit analog input module
- *
- * Options:
- * 0 Board base address
- * 1 IRQ (not-used)
- */
-
-#include <linux/module.h>
-#include <linux/io.h>
-#include "../comedidev.h"
-
-/*
- * Register I/O map
- */
-#define II20K_SIZE 0x400
-#define II20K_MOD_OFFSET 0x100
-#define II20K_ID_REG 0x00
-#define II20K_ID_MOD1_EMPTY (1 << 7)
-#define II20K_ID_MOD2_EMPTY (1 << 6)
-#define II20K_ID_MOD3_EMPTY (1 << 5)
-#define II20K_ID_MASK 0x1f
-#define II20K_ID_PCI20001C_1A 0x1b /* no on-board DIO */
-#define II20K_ID_PCI20001C_2A 0x1d /* on-board DIO */
-#define II20K_MOD_STATUS_REG 0x40
-#define II20K_MOD_STATUS_IRQ_MOD1 (1 << 7)
-#define II20K_MOD_STATUS_IRQ_MOD2 (1 << 6)
-#define II20K_MOD_STATUS_IRQ_MOD3 (1 << 5)
-#define II20K_DIO0_REG 0x80
-#define II20K_DIO1_REG 0x81
-#define II20K_DIR_ENA_REG 0x82
-#define II20K_DIR_DIO3_OUT (1 << 7)
-#define II20K_DIR_DIO2_OUT (1 << 6)
-#define II20K_BUF_DISAB_DIO3 (1 << 5)
-#define II20K_BUF_DISAB_DIO2 (1 << 4)
-#define II20K_DIR_DIO1_OUT (1 << 3)
-#define II20K_DIR_DIO0_OUT (1 << 2)
-#define II20K_BUF_DISAB_DIO1 (1 << 1)
-#define II20K_BUF_DISAB_DIO0 (1 << 0)
-#define II20K_CTRL01_REG 0x83
-#define II20K_CTRL01_SET (1 << 7)
-#define II20K_CTRL01_DIO0_IN (1 << 4)
-#define II20K_CTRL01_DIO1_IN (1 << 1)
-#define II20K_DIO2_REG 0xc0
-#define II20K_DIO3_REG 0xc1
-#define II20K_CTRL23_REG 0xc3
-#define II20K_CTRL23_SET (1 << 7)
-#define II20K_CTRL23_DIO2_IN (1 << 4)
-#define II20K_CTRL23_DIO3_IN (1 << 1)
-
-#define II20K_ID_PCI20006M_1 0xe2 /* 1 AO channels */
-#define II20K_ID_PCI20006M_2 0xe3 /* 2 AO channels */
-#define II20K_AO_STRB_REG(x) (0x0b + ((x) * 0x08))
-#define II20K_AO_LSB_REG(x) (0x0d + ((x) * 0x08))
-#define II20K_AO_MSB_REG(x) (0x0e + ((x) * 0x08))
-#define II20K_AO_STRB_BOTH_REG 0x1b
-
-#define II20K_ID_PCI20341M_1 0x77 /* 4 AI channels */
-#define II20K_AI_STATUS_CMD_REG 0x01
-#define II20K_AI_STATUS_CMD_BUSY (1 << 7)
-#define II20K_AI_STATUS_CMD_HW_ENA (1 << 1)
-#define II20K_AI_STATUS_CMD_EXT_START (1 << 0)
-#define II20K_AI_LSB_REG 0x02
-#define II20K_AI_MSB_REG 0x03
-#define II20K_AI_PACER_RESET_REG 0x04
-#define II20K_AI_16BIT_DATA_REG 0x06
-#define II20K_AI_CONF_REG 0x10
-#define II20K_AI_CONF_ENA (1 << 2)
-#define II20K_AI_OPT_REG 0x11
-#define II20K_AI_OPT_TRIG_ENA (1 << 5)
-#define II20K_AI_OPT_TRIG_INV (1 << 4)
-#define II20K_AI_OPT_TIMEBASE(x) (((x) & 0x3) << 1)
-#define II20K_AI_OPT_BURST_MODE (1 << 0)
-#define II20K_AI_STATUS_REG 0x12
-#define II20K_AI_STATUS_INT (1 << 7)
-#define II20K_AI_STATUS_TRIG (1 << 6)
-#define II20K_AI_STATUS_TRIG_ENA (1 << 5)
-#define II20K_AI_STATUS_PACER_ERR (1 << 2)
-#define II20K_AI_STATUS_DATA_ERR (1 << 1)
-#define II20K_AI_STATUS_SET_TIME_ERR (1 << 0)
-#define II20K_AI_LAST_CHAN_ADDR_REG 0x13
-#define II20K_AI_CUR_ADDR_REG 0x14
-#define II20K_AI_SET_TIME_REG 0x15
-#define II20K_AI_DELAY_LSB_REG 0x16
-#define II20K_AI_DELAY_MSB_REG 0x17
-#define II20K_AI_CHAN_ADV_REG 0x18
-#define II20K_AI_CHAN_RESET_REG 0x19
-#define II20K_AI_START_TRIG_REG 0x1a
-#define II20K_AI_COUNT_RESET_REG 0x1b
-#define II20K_AI_CHANLIST_REG 0x80
-#define II20K_AI_CHANLIST_ONBOARD_ONLY (1 << 5)
-#define II20K_AI_CHANLIST_GAIN(x) (((x) & 0x3) << 3)
-#define II20K_AI_CHANLIST_MUX_ENA (1 << 2)
-#define II20K_AI_CHANLIST_CHAN(x) (((x) & 0x3) << 0)
-#define II20K_AI_CHANLIST_LEN 0x80
-
-/* the AO range is set by jumpers on the 20006M module */
-static const struct comedi_lrange ii20k_ao_ranges = {
- 3, {
- BIP_RANGE(5), /* Chan 0 - W1/W3 in Chan 1 - W2/W4 in */
- UNI_RANGE(10), /* Chan 0 - W1/W3 out Chan 1 - W2/W4 in */
- BIP_RANGE(10) /* Chan 0 - W1/W3 in Chan 1 - W2/W4 out */
- }
-};
-
-static const struct comedi_lrange ii20k_ai_ranges = {
- 4, {
- BIP_RANGE(5), /* gain 1 */
- BIP_RANGE(0.5), /* gain 10 */
- BIP_RANGE(0.05), /* gain 100 */
- BIP_RANGE(0.025) /* gain 200 */
- },
-};
-
-static void __iomem *ii20k_module_iobase(struct comedi_device *dev,
- struct comedi_subdevice *s)
-{
- return dev->mmio + (s->index + 1) * II20K_MOD_OFFSET;
-}
-
-static int ii20k_ao_insn_write(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- void __iomem *iobase = ii20k_module_iobase(dev, s);
- unsigned int chan = CR_CHAN(insn->chanspec);
- int i;
-
- for (i = 0; i < insn->n; i++) {
- unsigned int val = data[i];
-
- s->readback[chan] = val;
-
- /* munge data */
- val += ((s->maxdata + 1) >> 1);
- val &= s->maxdata;
-
- writeb(val & 0xff, iobase + II20K_AO_LSB_REG(chan));
- writeb((val >> 8) & 0xff, iobase + II20K_AO_MSB_REG(chan));
- writeb(0x00, iobase + II20K_AO_STRB_REG(chan));
- }
-
- return insn->n;
-}
-
-static int ii20k_ai_eoc(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned long context)
-{
- void __iomem *iobase = ii20k_module_iobase(dev, s);
- unsigned char status;
-
- status = readb(iobase + II20K_AI_STATUS_REG);
- if ((status & II20K_AI_STATUS_INT) == 0)
- return 0;
- return -EBUSY;
-}
-
-static void ii20k_ai_setup(struct comedi_device *dev,
- struct comedi_subdevice *s,
- unsigned int chanspec)
-{
- void __iomem *iobase = ii20k_module_iobase(dev, s);
- unsigned int chan = CR_CHAN(chanspec);
- unsigned int range = CR_RANGE(chanspec);
- unsigned char val;
-
- /* initialize module */
- writeb(II20K_AI_CONF_ENA, iobase + II20K_AI_CONF_REG);
-
- /* software conversion */
- writeb(0, iobase + II20K_AI_STATUS_CMD_REG);
-
- /* set the time base for the settling time counter based on the gain */
- val = (range < 3) ? II20K_AI_OPT_TIMEBASE(0) : II20K_AI_OPT_TIMEBASE(2);
- writeb(val, iobase + II20K_AI_OPT_REG);
-
- /* set the settling time counter based on the gain */
- val = (range < 2) ? 0x58 : (range < 3) ? 0x93 : 0x99;
- writeb(val, iobase + II20K_AI_SET_TIME_REG);
-
- /* set number of input channels */
- writeb(1, iobase + II20K_AI_LAST_CHAN_ADDR_REG);
-
- /* set the channel list byte */
- val = II20K_AI_CHANLIST_ONBOARD_ONLY |
- II20K_AI_CHANLIST_MUX_ENA |
- II20K_AI_CHANLIST_GAIN(range) |
- II20K_AI_CHANLIST_CHAN(chan);
- writeb(val, iobase + II20K_AI_CHANLIST_REG);
-
- /* reset settling time counter and trigger delay counter */
- writeb(0, iobase + II20K_AI_COUNT_RESET_REG);
-
- /* reset channel scanner */
- writeb(0, iobase + II20K_AI_CHAN_RESET_REG);
-}
-
-static int ii20k_ai_insn_read(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- void __iomem *iobase = ii20k_module_iobase(dev, s);
- int ret;
- int i;
-
- ii20k_ai_setup(dev, s, insn->chanspec);
-
- for (i = 0; i < insn->n; i++) {
- unsigned int val;
-
- /* generate a software start convert signal */
- readb(iobase + II20K_AI_PACER_RESET_REG);
-
- ret = comedi_timeout(dev, s, insn, ii20k_ai_eoc, 0);
- if (ret)
- return ret;
-
- val = readb(iobase + II20K_AI_LSB_REG);
- val |= (readb(iobase + II20K_AI_MSB_REG) << 8);
-
- /* munge two's complement data */
- val += ((s->maxdata + 1) >> 1);
- val &= s->maxdata;
-
- data[i] = val;
- }
-
- return insn->n;
-}
-
-static void ii20k_dio_config(struct comedi_device *dev,
- struct comedi_subdevice *s)
-{
- unsigned char ctrl01 = 0;
- unsigned char ctrl23 = 0;
- unsigned char dir_ena = 0;
-
- /* port 0 - channels 0-7 */
- if (s->io_bits & 0x000000ff) {
- /* output port */
- ctrl01 &= ~II20K_CTRL01_DIO0_IN;
- dir_ena &= ~II20K_BUF_DISAB_DIO0;
- dir_ena |= II20K_DIR_DIO0_OUT;
- } else {
- /* input port */
- ctrl01 |= II20K_CTRL01_DIO0_IN;
- dir_ena &= ~II20K_DIR_DIO0_OUT;
- }
-
- /* port 1 - channels 8-15 */
- if (s->io_bits & 0x0000ff00) {
- /* output port */
- ctrl01 &= ~II20K_CTRL01_DIO1_IN;
- dir_ena &= ~II20K_BUF_DISAB_DIO1;
- dir_ena |= II20K_DIR_DIO1_OUT;
- } else {
- /* input port */
- ctrl01 |= II20K_CTRL01_DIO1_IN;
- dir_ena &= ~II20K_DIR_DIO1_OUT;
- }
-
- /* port 2 - channels 16-23 */
- if (s->io_bits & 0x00ff0000) {
- /* output port */
- ctrl23 &= ~II20K_CTRL23_DIO2_IN;
- dir_ena &= ~II20K_BUF_DISAB_DIO2;
- dir_ena |= II20K_DIR_DIO2_OUT;
- } else {
- /* input port */
- ctrl23 |= II20K_CTRL23_DIO2_IN;
- dir_ena &= ~II20K_DIR_DIO2_OUT;
- }
-
- /* port 3 - channels 24-31 */
- if (s->io_bits & 0xff000000) {
- /* output port */
- ctrl23 &= ~II20K_CTRL23_DIO3_IN;
- dir_ena &= ~II20K_BUF_DISAB_DIO3;
- dir_ena |= II20K_DIR_DIO3_OUT;
- } else {
- /* input port */
- ctrl23 |= II20K_CTRL23_DIO3_IN;
- dir_ena &= ~II20K_DIR_DIO3_OUT;
- }
-
- ctrl23 |= II20K_CTRL01_SET;
- ctrl23 |= II20K_CTRL23_SET;
-
- /* order is important */
- writeb(ctrl01, dev->mmio + II20K_CTRL01_REG);
- writeb(ctrl23, dev->mmio + II20K_CTRL23_REG);
- writeb(dir_ena, dev->mmio + II20K_DIR_ENA_REG);
-}
-
-static int ii20k_dio_insn_config(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- unsigned int chan = CR_CHAN(insn->chanspec);
- unsigned int mask;
- int ret;
-
- if (chan < 8)
- mask = 0x000000ff;
- else if (chan < 16)
- mask = 0x0000ff00;
- else if (chan < 24)
- mask = 0x00ff0000;
- else
- mask = 0xff000000;
-
- ret = comedi_dio_insn_config(dev, s, insn, data, mask);
- if (ret)
- return ret;
-
- ii20k_dio_config(dev, s);
-
- return insn->n;
-}
-
-static int ii20k_dio_insn_bits(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- unsigned int mask;
-
- mask = comedi_dio_update_state(s, data);
- if (mask) {
- if (mask & 0x000000ff)
- writeb((s->state >> 0) & 0xff,
- dev->mmio + II20K_DIO0_REG);
- if (mask & 0x0000ff00)
- writeb((s->state >> 8) & 0xff,
- dev->mmio + II20K_DIO1_REG);
- if (mask & 0x00ff0000)
- writeb((s->state >> 16) & 0xff,
- dev->mmio + II20K_DIO2_REG);
- if (mask & 0xff000000)
- writeb((s->state >> 24) & 0xff,
- dev->mmio + II20K_DIO3_REG);
- }
-
- data[1] = readb(dev->mmio + II20K_DIO0_REG);
- data[1] |= readb(dev->mmio + II20K_DIO1_REG) << 8;
- data[1] |= readb(dev->mmio + II20K_DIO2_REG) << 16;
- data[1] |= readb(dev->mmio + II20K_DIO3_REG) << 24;
-
- return insn->n;
-}
-
-static int ii20k_init_module(struct comedi_device *dev,
- struct comedi_subdevice *s)
-{
- void __iomem *iobase = ii20k_module_iobase(dev, s);
- unsigned char id;
- int ret;
-
- id = readb(iobase + II20K_ID_REG);
- switch (id) {
- case II20K_ID_PCI20006M_1:
- case II20K_ID_PCI20006M_2:
- /* Analog Output subdevice */
- s->type = COMEDI_SUBD_AO;
- s->subdev_flags = SDF_WRITABLE;
- s->n_chan = (id == II20K_ID_PCI20006M_2) ? 2 : 1;
- s->maxdata = 0xffff;
- s->range_table = &ii20k_ao_ranges;
- s->insn_write = ii20k_ao_insn_write;
-
- ret = comedi_alloc_subdev_readback(s);
- if (ret)
- return ret;
- break;
- case II20K_ID_PCI20341M_1:
- /* Analog Input subdevice */
- s->type = COMEDI_SUBD_AI;
- s->subdev_flags = SDF_READABLE | SDF_DIFF;
- s->n_chan = 4;
- s->maxdata = 0xffff;
- s->range_table = &ii20k_ai_ranges;
- s->insn_read = ii20k_ai_insn_read;
- break;
- default:
- s->type = COMEDI_SUBD_UNUSED;
- break;
- }
-
- return 0;
-}
-
-static int ii20k_attach(struct comedi_device *dev,
- struct comedi_devconfig *it)
-{
- struct comedi_subdevice *s;
- unsigned int membase;
- unsigned char id;
- bool has_dio;
- int ret;
-
- membase = it->options[0];
- if (!membase || (membase & ~(0x100000 - II20K_SIZE))) {
- dev_warn(dev->class_dev,
- "%s: invalid memory address specified\n",
- dev->board_name);
- return -EINVAL;
- }
-
- if (!request_mem_region(membase, II20K_SIZE, dev->board_name)) {
- dev_warn(dev->class_dev, "%s: I/O mem conflict (%#x,%u)\n",
- dev->board_name, membase, II20K_SIZE);
- return -EIO;
- }
- dev->iobase = membase; /* actually, a memory address */
-
- dev->mmio = ioremap(membase, II20K_SIZE);
- if (!dev->mmio)
- return -ENOMEM;
-
- id = readb(dev->mmio + II20K_ID_REG);
- switch (id & II20K_ID_MASK) {
- case II20K_ID_PCI20001C_1A:
- has_dio = false;
- break;
- case II20K_ID_PCI20001C_2A:
- has_dio = true;
- break;
- default:
- return -ENODEV;
- }
-
- ret = comedi_alloc_subdevices(dev, 4);
- if (ret)
- return ret;
-
- s = &dev->subdevices[0];
- if (id & II20K_ID_MOD1_EMPTY) {
- s->type = COMEDI_SUBD_UNUSED;
- } else {
- ret = ii20k_init_module(dev, s);
- if (ret)
- return ret;
- }
-
- s = &dev->subdevices[1];
- if (id & II20K_ID_MOD2_EMPTY) {
- s->type = COMEDI_SUBD_UNUSED;
- } else {
- ret = ii20k_init_module(dev, s);
- if (ret)
- return ret;
- }
-
- s = &dev->subdevices[2];
- if (id & II20K_ID_MOD3_EMPTY) {
- s->type = COMEDI_SUBD_UNUSED;
- } else {
- ret = ii20k_init_module(dev, s);
- if (ret)
- return ret;
- }
-
- /* Digital I/O subdevice */
- s = &dev->subdevices[3];
- if (has_dio) {
- s->type = COMEDI_SUBD_DIO;
- s->subdev_flags = SDF_READABLE | SDF_WRITABLE;
- s->n_chan = 32;
- s->maxdata = 1;
- s->range_table = &range_digital;
- s->insn_bits = ii20k_dio_insn_bits;
- s->insn_config = ii20k_dio_insn_config;
-
- /* default all channels to input */
- ii20k_dio_config(dev, s);
- } else {
- s->type = COMEDI_SUBD_UNUSED;
- }
-
- return 0;
-}
-
-static void ii20k_detach(struct comedi_device *dev)
-{
- if (dev->mmio)
- iounmap(dev->mmio);
- if (dev->iobase) /* actually, a memory address */
- release_mem_region(dev->iobase, II20K_SIZE);
-}
-
-static struct comedi_driver ii20k_driver = {
- .driver_name = "ii_pci20kc",
- .module = THIS_MODULE,
- .attach = ii20k_attach,
- .detach = ii20k_detach,
-};
-module_comedi_driver(ii20k_driver);
-
-MODULE_AUTHOR("Comedi http://www.comedi.org");
-MODULE_DESCRIPTION("Comedi low-level driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/jr3_pci.c b/drivers/staging/comedi/drivers/jr3_pci.c
deleted file mode 100644
index b87192e0f9aa..000000000000
--- a/drivers/staging/comedi/drivers/jr3_pci.c
+++ /dev/null
@@ -1,829 +0,0 @@
-/*
- comedi/drivers/jr3_pci.c
- hardware driver for JR3/PCI force sensor board
-
- COMEDI - Linux Control and Measurement Device Interface
- Copyright (C) 2007 Anders Blomdell <anders.blomdell@control.lth.se>
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-*/
-/*
- * Driver: jr3_pci
- * Description: JR3/PCI force sensor board
- * Author: Anders Blomdell <anders.blomdell@control.lth.se>
- * Updated: Thu, 01 Nov 2012 17:34:55 +0000
- * Status: works
- * Devices: [JR3] PCI force sensor board (jr3_pci)
- *
- * Configuration options:
- * None
- *
- * Manual configuration of comedi devices is not supported by this
- * driver; supported PCI devices are configured as comedi devices
- * automatically.
- *
- * The DSP on the board requires initialization code, which can be
- * loaded by placing it in /lib/firmware/comedi. The initialization
- * code should be somewhere on the media you got with your card. One
- * version is available from http://www.comedi.org in the
- * comedi_nonfree_firmware tarball. The file is called "jr3pci.idm".
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/delay.h>
-#include <linux/ctype.h>
-#include <linux/jiffies.h>
-#include <linux/slab.h>
-#include <linux/timer.h>
-
-#include "../comedi_pci.h"
-
-#include "jr3_pci.h"
-
-#define PCI_VENDOR_ID_JR3 0x1762
-
-enum jr3_pci_boardid {
- BOARD_JR3_1,
- BOARD_JR3_2,
- BOARD_JR3_3,
- BOARD_JR3_4,
-};
-
-struct jr3_pci_board {
- const char *name;
- int n_subdevs;
-};
-
-static const struct jr3_pci_board jr3_pci_boards[] = {
- [BOARD_JR3_1] = {
- .name = "jr3_pci_1",
- .n_subdevs = 1,
- },
- [BOARD_JR3_2] = {
- .name = "jr3_pci_2",
- .n_subdevs = 2,
- },
- [BOARD_JR3_3] = {
- .name = "jr3_pci_3",
- .n_subdevs = 3,
- },
- [BOARD_JR3_4] = {
- .name = "jr3_pci_4",
- .n_subdevs = 4,
- },
-};
-
-struct jr3_pci_transform {
- struct {
- u16 link_type;
- s16 link_amount;
- } link[8];
-};
-
-struct jr3_pci_poll_delay {
- int min;
- int max;
-};
-
-struct jr3_pci_dev_private {
- struct jr3_t __iomem *iobase;
- struct timer_list timer;
-};
-
-struct jr3_pci_subdev_private {
- struct jr3_channel __iomem *channel;
- unsigned long next_time_min;
- unsigned long next_time_max;
- enum { state_jr3_poll,
- state_jr3_init_wait_for_offset,
- state_jr3_init_transform_complete,
- state_jr3_init_set_full_scale_complete,
- state_jr3_init_use_offset_complete,
- state_jr3_done
- } state;
- int serial_no;
- int model_no;
- struct {
- int length;
- struct comedi_krange range;
- } range[9];
- const struct comedi_lrange *range_table_list[8 * 7 + 2];
- unsigned int maxdata_list[8 * 7 + 2];
- u16 errors;
- int retries;
-};
-
-static struct jr3_pci_poll_delay poll_delay_min_max(int min, int max)
-{
- struct jr3_pci_poll_delay result;
-
- result.min = min;
- result.max = max;
- return result;
-}
-
-static int is_complete(struct jr3_channel __iomem *channel)
-{
- return get_s16(&channel->command_word0) == 0;
-}
-
-static void set_transforms(struct jr3_channel __iomem *channel,
- struct jr3_pci_transform transf, short num)
-{
- int i;
-
- num &= 0x000f; /* Make sure that 0 <= num <= 15 */
- for (i = 0; i < 8; i++) {
- set_u16(&channel->transforms[num].link[i].link_type,
- transf.link[i].link_type);
- udelay(1);
- set_s16(&channel->transforms[num].link[i].link_amount,
- transf.link[i].link_amount);
- udelay(1);
- if (transf.link[i].link_type == end_x_form)
- break;
- }
-}
-
-static void use_transform(struct jr3_channel __iomem *channel,
- short transf_num)
-{
- set_s16(&channel->command_word0, 0x0500 + (transf_num & 0x000f));
-}
-
-static void use_offset(struct jr3_channel __iomem *channel, short offset_num)
-{
- set_s16(&channel->command_word0, 0x0600 + (offset_num & 0x000f));
-}
-
-static void set_offset(struct jr3_channel __iomem *channel)
-{
- set_s16(&channel->command_word0, 0x0700);
-}
-
-struct six_axis_t {
- s16 fx;
- s16 fy;
- s16 fz;
- s16 mx;
- s16 my;
- s16 mz;
-};
-
-static void set_full_scales(struct jr3_channel __iomem *channel,
- struct six_axis_t full_scale)
-{
- set_s16(&channel->full_scale.fx, full_scale.fx);
- set_s16(&channel->full_scale.fy, full_scale.fy);
- set_s16(&channel->full_scale.fz, full_scale.fz);
- set_s16(&channel->full_scale.mx, full_scale.mx);
- set_s16(&channel->full_scale.my, full_scale.my);
- set_s16(&channel->full_scale.mz, full_scale.mz);
- set_s16(&channel->command_word0, 0x0a00);
-}
-
-static struct six_axis_t get_min_full_scales(struct jr3_channel __iomem
- *channel)
-{
- struct six_axis_t result;
-
- result.fx = get_s16(&channel->min_full_scale.fx);
- result.fy = get_s16(&channel->min_full_scale.fy);
- result.fz = get_s16(&channel->min_full_scale.fz);
- result.mx = get_s16(&channel->min_full_scale.mx);
- result.my = get_s16(&channel->min_full_scale.my);
- result.mz = get_s16(&channel->min_full_scale.mz);
- return result;
-}
-
-static struct six_axis_t get_max_full_scales(struct jr3_channel __iomem
- *channel)
-{
- struct six_axis_t result;
-
- result.fx = get_s16(&channel->max_full_scale.fx);
- result.fy = get_s16(&channel->max_full_scale.fy);
- result.fz = get_s16(&channel->max_full_scale.fz);
- result.mx = get_s16(&channel->max_full_scale.mx);
- result.my = get_s16(&channel->max_full_scale.my);
- result.mz = get_s16(&channel->max_full_scale.mz);
- return result;
-}
-
-static unsigned int jr3_pci_ai_read_chan(struct comedi_device *dev,
- struct comedi_subdevice *s,
- unsigned int chan)
-{
- struct jr3_pci_subdev_private *spriv = s->private;
- unsigned int val = 0;
-
- if (spriv->state != state_jr3_done)
- return 0;
-
- if (chan < 56) {
- unsigned int axis = chan % 8;
- unsigned filter = chan / 8;
-
- switch (axis) {
- case 0:
- val = get_s16(&spriv->channel->filter[filter].fx);
- break;
- case 1:
- val = get_s16(&spriv->channel->filter[filter].fy);
- break;
- case 2:
- val = get_s16(&spriv->channel->filter[filter].fz);
- break;
- case 3:
- val = get_s16(&spriv->channel->filter[filter].mx);
- break;
- case 4:
- val = get_s16(&spriv->channel->filter[filter].my);
- break;
- case 5:
- val = get_s16(&spriv->channel->filter[filter].mz);
- break;
- case 6:
- val = get_s16(&spriv->channel->filter[filter].v1);
- break;
- case 7:
- val = get_s16(&spriv->channel->filter[filter].v2);
- break;
- }
- val += 0x4000;
- } else if (chan == 56) {
- val = get_u16(&spriv->channel->model_no);
- } else if (chan == 57) {
- val = get_u16(&spriv->channel->serial_no);
- }
-
- return val;
-}
-
-static int jr3_pci_ai_insn_read(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct jr3_pci_subdev_private *spriv = s->private;
- unsigned int chan = CR_CHAN(insn->chanspec);
- u16 errors;
- int i;
-
- if (!spriv)
- return -EINVAL;
-
- errors = get_u16(&spriv->channel->errors);
- if (spriv->state != state_jr3_done ||
- (errors & (watch_dog | watch_dog2 | sensor_change))) {
- /* No sensor or sensor changed */
- if (spriv->state == state_jr3_done) {
- /* Restart polling */
- spriv->state = state_jr3_poll;
- }
- return -EAGAIN;
- }
-
- for (i = 0; i < insn->n; i++)
- data[i] = jr3_pci_ai_read_chan(dev, s, chan);
-
- return insn->n;
-}
-
-static int jr3_pci_open(struct comedi_device *dev)
-{
- struct jr3_pci_subdev_private *spriv;
- struct comedi_subdevice *s;
- int i;
-
- dev_dbg(dev->class_dev, "jr3_pci_open\n");
- for (i = 0; i < dev->n_subdevices; i++) {
- s = &dev->subdevices[i];
- spriv = s->private;
- if (spriv)
- dev_dbg(dev->class_dev, "serial: %p %d (%d)\n",
- spriv, spriv->serial_no, s->index);
- }
- return 0;
-}
-
-static int read_idm_word(const u8 *data, size_t size, int *pos,
- unsigned int *val)
-{
- int result = 0;
- int value;
-
- if (pos && val) {
- /* Skip over non hex */
- for (; *pos < size && !isxdigit(data[*pos]); (*pos)++)
- ;
- /* Collect value */
- *val = 0;
- for (; *pos < size; (*pos)++) {
- value = hex_to_bin(data[*pos]);
- if (value >= 0) {
- result = 1;
- *val = (*val << 4) + value;
- } else {
- break;
- }
- }
- }
- return result;
-}
-
-static int jr3_check_firmware(struct comedi_device *dev,
- const u8 *data, size_t size)
-{
- int more = 1;
- int pos = 0;
-
- /*
- * IDM file format is:
- * { count, address, data <count> } *
- * ffff
- */
- while (more) {
- unsigned int count = 0;
- unsigned int addr = 0;
-
- more = more && read_idm_word(data, size, &pos, &count);
- if (more && count == 0xffff)
- return 0;
-
- more = more && read_idm_word(data, size, &pos, &addr);
- while (more && count > 0) {
- unsigned int dummy = 0;
-
- more = more && read_idm_word(data, size, &pos, &dummy);
- count--;
- }
- }
-
- return -ENODATA;
-}
-
-static void jr3_write_firmware(struct comedi_device *dev,
- int subdev, const u8 *data, size_t size)
-{
- struct jr3_pci_dev_private *devpriv = dev->private;
- struct jr3_t __iomem *iobase = devpriv->iobase;
- u32 __iomem *lo;
- u32 __iomem *hi;
- int more = 1;
- int pos = 0;
-
- while (more) {
- unsigned int count = 0;
- unsigned int addr = 0;
-
- more = more && read_idm_word(data, size, &pos, &count);
- if (more && count == 0xffff)
- return;
-
- more = more && read_idm_word(data, size, &pos, &addr);
-
- dev_dbg(dev->class_dev, "Loading#%d %4.4x bytes at %4.4x\n",
- subdev, count, addr);
-
- while (more && count > 0) {
- if (addr & 0x4000) {
- /* 16 bit data, never seen in real life!! */
- unsigned int data1 = 0;
-
- more = more &&
- read_idm_word(data, size, &pos, &data1);
- count--;
- /* jr3[addr + 0x20000 * pnum] = data1; */
- } else {
- /* Download 24 bit program */
- unsigned int data1 = 0;
- unsigned int data2 = 0;
-
- lo = &iobase->channel[subdev].program_lo[addr];
- hi = &iobase->channel[subdev].program_hi[addr];
-
- more = more &&
- read_idm_word(data, size, &pos, &data1);
- more = more &&
- read_idm_word(data, size, &pos, &data2);
- count -= 2;
- if (more) {
- set_u16(lo, data1);
- udelay(1);
- set_u16(hi, data2);
- udelay(1);
- }
- }
- addr++;
- }
- }
-}
-
-static int jr3_download_firmware(struct comedi_device *dev,
- const u8 *data, size_t size,
- unsigned long context)
-{
- int subdev;
- int ret;
-
- /* verify IDM file format */
- ret = jr3_check_firmware(dev, data, size);
- if (ret)
- return ret;
-
- /* write firmware to each subdevice */
- for (subdev = 0; subdev < dev->n_subdevices; subdev++)
- jr3_write_firmware(dev, subdev, data, size);
-
- return 0;
-}
-
-static struct jr3_pci_poll_delay jr3_pci_poll_subdevice(struct comedi_subdevice *s)
-{
- struct jr3_pci_subdev_private *spriv = s->private;
- struct jr3_pci_poll_delay result = poll_delay_min_max(1000, 2000);
- struct jr3_channel __iomem *channel;
- u16 model_no;
- u16 serial_no;
- int errors;
- int i;
-
- if (!spriv)
- return result;
-
- channel = spriv->channel;
- errors = get_u16(&channel->errors);
-
- if (errors != spriv->errors)
- spriv->errors = errors;
-
- /* Sensor communication lost? force poll mode */
- if (errors & (watch_dog | watch_dog2 | sensor_change))
- spriv->state = state_jr3_poll;
-
- switch (spriv->state) {
- case state_jr3_poll:
- model_no = get_u16(&channel->model_no);
- serial_no = get_u16(&channel->serial_no);
-
- if ((errors & (watch_dog | watch_dog2)) ||
- model_no == 0 || serial_no == 0) {
- /*
- * Still no sensor, keep on polling.
- * Since it takes up to 10 seconds for offsets to
- * stabilize, polling each second should suffice.
- */
- } else {
- spriv->retries = 0;
- spriv->state = state_jr3_init_wait_for_offset;
- }
- break;
- case state_jr3_init_wait_for_offset:
- spriv->retries++;
- if (spriv->retries < 10) {
- /*
- * Wait for offeset to stabilize
- * (< 10 s according to manual)
- */
- } else {
- struct jr3_pci_transform transf;
-
- spriv->model_no = get_u16(&channel->model_no);
- spriv->serial_no = get_u16(&channel->serial_no);
-
- /* Transformation all zeros */
- for (i = 0; i < ARRAY_SIZE(transf.link); i++) {
- transf.link[i].link_type = (enum link_types)0;
- transf.link[i].link_amount = 0;
- }
-
- set_transforms(channel, transf, 0);
- use_transform(channel, 0);
- spriv->state = state_jr3_init_transform_complete;
- /* Allow 20 ms for completion */
- result = poll_delay_min_max(20, 100);
- }
- break;
- case state_jr3_init_transform_complete:
- if (!is_complete(channel)) {
- result = poll_delay_min_max(20, 100);
- } else {
- /* Set full scale */
- struct six_axis_t min_full_scale;
- struct six_axis_t max_full_scale;
-
- min_full_scale = get_min_full_scales(channel);
- max_full_scale = get_max_full_scales(channel);
- set_full_scales(channel, max_full_scale);
-
- spriv->state = state_jr3_init_set_full_scale_complete;
- /* Allow 20 ms for completion */
- result = poll_delay_min_max(20, 100);
- }
- break;
- case state_jr3_init_set_full_scale_complete:
- if (!is_complete(channel)) {
- result = poll_delay_min_max(20, 100);
- } else {
- struct force_array __iomem *fs = &channel->full_scale;
-
- /* Use ranges in kN or we will overflow around 2000N! */
- spriv->range[0].range.min = -get_s16(&fs->fx) * 1000;
- spriv->range[0].range.max = get_s16(&fs->fx) * 1000;
- spriv->range[1].range.min = -get_s16(&fs->fy) * 1000;
- spriv->range[1].range.max = get_s16(&fs->fy) * 1000;
- spriv->range[2].range.min = -get_s16(&fs->fz) * 1000;
- spriv->range[2].range.max = get_s16(&fs->fz) * 1000;
- spriv->range[3].range.min = -get_s16(&fs->mx) * 100;
- spriv->range[3].range.max = get_s16(&fs->mx) * 100;
- spriv->range[4].range.min = -get_s16(&fs->my) * 100;
- spriv->range[4].range.max = get_s16(&fs->my) * 100;
- spriv->range[5].range.min = -get_s16(&fs->mz) * 100;
- /* the next five are questionable */
- spriv->range[5].range.max = get_s16(&fs->mz) * 100;
- spriv->range[6].range.min = -get_s16(&fs->v1) * 100;
- spriv->range[6].range.max = get_s16(&fs->v1) * 100;
- spriv->range[7].range.min = -get_s16(&fs->v2) * 100;
- spriv->range[7].range.max = get_s16(&fs->v2) * 100;
- spriv->range[8].range.min = 0;
- spriv->range[8].range.max = 65535;
-
- use_offset(channel, 0);
- spriv->state = state_jr3_init_use_offset_complete;
- /* Allow 40 ms for completion */
- result = poll_delay_min_max(40, 100);
- }
- break;
- case state_jr3_init_use_offset_complete:
- if (!is_complete(channel)) {
- result = poll_delay_min_max(20, 100);
- } else {
- set_s16(&channel->offsets.fx, 0);
- set_s16(&channel->offsets.fy, 0);
- set_s16(&channel->offsets.fz, 0);
- set_s16(&channel->offsets.mx, 0);
- set_s16(&channel->offsets.my, 0);
- set_s16(&channel->offsets.mz, 0);
-
- set_offset(channel);
-
- spriv->state = state_jr3_done;
- }
- break;
- case state_jr3_done:
- result = poll_delay_min_max(10000, 20000);
- break;
- default:
- break;
- }
-
- return result;
-}
-
-static void jr3_pci_poll_dev(unsigned long data)
-{
- struct comedi_device *dev = (struct comedi_device *)data;
- struct jr3_pci_dev_private *devpriv = dev->private;
- struct jr3_pci_subdev_private *spriv;
- struct comedi_subdevice *s;
- unsigned long flags;
- unsigned long now;
- int delay;
- int i;
-
- spin_lock_irqsave(&dev->spinlock, flags);
- delay = 1000;
- now = jiffies;
-
- /* Poll all channels that are ready to be polled */
- for (i = 0; i < dev->n_subdevices; i++) {
- s = &dev->subdevices[i];
- spriv = s->private;
-
- if (now > spriv->next_time_min) {
- struct jr3_pci_poll_delay sub_delay;
-
- sub_delay = jr3_pci_poll_subdevice(s);
-
- spriv->next_time_min = jiffies +
- msecs_to_jiffies(sub_delay.min);
- spriv->next_time_max = jiffies +
- msecs_to_jiffies(sub_delay.max);
-
- if (sub_delay.max && sub_delay.max < delay)
- /*
- * Wake up as late as possible ->
- * poll as many channels as possible at once.
- */
- delay = sub_delay.max;
- }
- }
- spin_unlock_irqrestore(&dev->spinlock, flags);
-
- devpriv->timer.expires = jiffies + msecs_to_jiffies(delay);
- add_timer(&devpriv->timer);
-}
-
-static struct jr3_pci_subdev_private *
-jr3_pci_alloc_spriv(struct comedi_device *dev, struct comedi_subdevice *s)
-{
- struct jr3_pci_dev_private *devpriv = dev->private;
- struct jr3_pci_subdev_private *spriv;
- int j;
- int k;
-
- spriv = comedi_alloc_spriv(s, sizeof(*spriv));
- if (!spriv)
- return NULL;
-
- spriv->channel = &devpriv->iobase->channel[s->index].data;
-
- for (j = 0; j < 8; j++) {
- spriv->range[j].length = 1;
- spriv->range[j].range.min = -1000000;
- spriv->range[j].range.max = 1000000;
-
- for (k = 0; k < 7; k++) {
- spriv->range_table_list[j + k * 8] =
- (struct comedi_lrange *)&spriv->range[j];
- spriv->maxdata_list[j + k * 8] = 0x7fff;
- }
- }
- spriv->range[8].length = 1;
- spriv->range[8].range.min = 0;
- spriv->range[8].range.max = 65536;
-
- spriv->range_table_list[56] = (struct comedi_lrange *)&spriv->range[8];
- spriv->range_table_list[57] = (struct comedi_lrange *)&spriv->range[8];
- spriv->maxdata_list[56] = 0xffff;
- spriv->maxdata_list[57] = 0xffff;
-
- dev_dbg(dev->class_dev, "p->channel %p %p (%tx)\n",
- spriv->channel, devpriv->iobase,
- ((char __iomem *)spriv->channel -
- (char __iomem *)devpriv->iobase));
-
- return spriv;
-}
-
-static int jr3_pci_auto_attach(struct comedi_device *dev,
- unsigned long context)
-{
- struct pci_dev *pcidev = comedi_to_pci_dev(dev);
- static const struct jr3_pci_board *board;
- struct jr3_pci_dev_private *devpriv;
- struct jr3_pci_subdev_private *spriv;
- struct comedi_subdevice *s;
- int ret;
- int i;
-
- if (sizeof(struct jr3_channel) != 0xc00) {
- dev_err(dev->class_dev,
- "sizeof(struct jr3_channel) = %x [expected %x]\n",
- (unsigned)sizeof(struct jr3_channel), 0xc00);
- return -EINVAL;
- }
-
- if (context < ARRAY_SIZE(jr3_pci_boards))
- board = &jr3_pci_boards[context];
- if (!board)
- return -ENODEV;
- dev->board_ptr = board;
- dev->board_name = board->name;
-
- devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
- if (!devpriv)
- return -ENOMEM;
-
- ret = comedi_pci_enable(dev);
- if (ret)
- return ret;
-
- devpriv->iobase = pci_ioremap_bar(pcidev, 0);
- if (!devpriv->iobase)
- return -ENOMEM;
-
- ret = comedi_alloc_subdevices(dev, board->n_subdevs);
- if (ret)
- return ret;
-
- dev->open = jr3_pci_open;
- for (i = 0; i < dev->n_subdevices; i++) {
- s = &dev->subdevices[i];
- s->type = COMEDI_SUBD_AI;
- s->subdev_flags = SDF_READABLE | SDF_GROUND;
- s->n_chan = 8 * 7 + 2;
- s->insn_read = jr3_pci_ai_insn_read;
-
- spriv = jr3_pci_alloc_spriv(dev, s);
- if (spriv) {
- /* Channel specific range and maxdata */
- s->range_table_list = spriv->range_table_list;
- s->maxdata_list = spriv->maxdata_list;
- }
- }
-
- /* Reset DSP card */
- writel(0, &devpriv->iobase->channel[0].reset);
-
- ret = comedi_load_firmware(dev, &comedi_to_pci_dev(dev)->dev,
- "comedi/jr3pci.idm",
- jr3_download_firmware, 0);
- dev_dbg(dev->class_dev, "Firmare load %d\n", ret);
- if (ret < 0)
- return ret;
- /*
- * TODO: use firmware to load preferred offset tables. Suggested
- * format:
- * model serial Fx Fy Fz Mx My Mz\n
- *
- * comedi_load_firmware(dev, &comedi_to_pci_dev(dev)->dev,
- * "comedi/jr3_offsets_table",
- * jr3_download_firmware, 1);
- */
-
- /*
- * It takes a few milliseconds for software to settle as much as we
- * can read firmware version
- */
- msleep_interruptible(25);
- for (i = 0; i < 0x18; i++) {
- dev_dbg(dev->class_dev, "%c\n",
- get_u16(&devpriv->iobase->channel[0].
- data.copyright[i]) >> 8);
- }
-
- /* Start card timer */
- for (i = 0; i < dev->n_subdevices; i++) {
- s = &dev->subdevices[i];
- spriv = s->private;
-
- spriv->next_time_min = jiffies + msecs_to_jiffies(500);
- spriv->next_time_max = jiffies + msecs_to_jiffies(2000);
- }
-
- setup_timer(&devpriv->timer, jr3_pci_poll_dev, (unsigned long)dev);
- devpriv->timer.expires = jiffies + msecs_to_jiffies(1000);
- add_timer(&devpriv->timer);
-
- return 0;
-}
-
-static void jr3_pci_detach(struct comedi_device *dev)
-{
- struct jr3_pci_dev_private *devpriv = dev->private;
-
- if (devpriv) {
- del_timer_sync(&devpriv->timer);
-
- if (devpriv->iobase)
- iounmap(devpriv->iobase);
- }
- comedi_pci_disable(dev);
-}
-
-static struct comedi_driver jr3_pci_driver = {
- .driver_name = "jr3_pci",
- .module = THIS_MODULE,
- .auto_attach = jr3_pci_auto_attach,
- .detach = jr3_pci_detach,
-};
-
-static int jr3_pci_pci_probe(struct pci_dev *dev,
- const struct pci_device_id *id)
-{
- return comedi_pci_auto_config(dev, &jr3_pci_driver, id->driver_data);
-}
-
-static const struct pci_device_id jr3_pci_pci_table[] = {
- { PCI_VDEVICE(JR3, 0x1111), BOARD_JR3_1 },
- { PCI_VDEVICE(JR3, 0x3111), BOARD_JR3_1 },
- { PCI_VDEVICE(JR3, 0x3112), BOARD_JR3_2 },
- { PCI_VDEVICE(JR3, 0x3113), BOARD_JR3_3 },
- { PCI_VDEVICE(JR3, 0x3114), BOARD_JR3_4 },
- { 0 }
-};
-MODULE_DEVICE_TABLE(pci, jr3_pci_pci_table);
-
-static struct pci_driver jr3_pci_pci_driver = {
- .name = "jr3_pci",
- .id_table = jr3_pci_pci_table,
- .probe = jr3_pci_pci_probe,
- .remove = comedi_pci_auto_unconfig,
-};
-module_comedi_pci_driver(jr3_pci_driver, jr3_pci_pci_driver);
-
-MODULE_AUTHOR("Comedi http://www.comedi.org");
-MODULE_DESCRIPTION("Comedi low-level driver");
-MODULE_LICENSE("GPL");
-MODULE_FIRMWARE("comedi/jr3pci.idm");
diff --git a/drivers/staging/comedi/drivers/jr3_pci.h b/drivers/staging/comedi/drivers/jr3_pci.h
deleted file mode 100644
index 356811defaf4..000000000000
--- a/drivers/staging/comedi/drivers/jr3_pci.h
+++ /dev/null
@@ -1,682 +0,0 @@
-/* Helper types to take care of the fact that the DSP card memory
- * is 16 bits, but aligned on a 32 bit PCI boundary
- */
-
-static inline u16 get_u16(const u32 __iomem *p)
-{
- return (u16)readl(p);
-}
-
-static inline void set_u16(u32 __iomem *p, u16 val)
-{
- writel(val, p);
-}
-
-static inline s16 get_s16(const s32 __iomem *p)
-{
- return (s16)readl(p);
-}
-
-static inline void set_s16(s32 __iomem *p, s16 val)
-{
- writel(val, p);
-}
-
-/* The raw data is stored in a format which facilitates rapid
- * processing by the JR3 DSP chip. The raw_channel structure shows the
- * format for a single channel of data. Each channel takes four,
- * two-byte words.
- *
- * Raw_time is an unsigned integer which shows the value of the JR3
- * DSP's internal clock at the time the sample was received. The clock
- * runs at 1/10 the JR3 DSP cycle time. JR3's slowest DSP runs at 10
- * Mhz. At 10 Mhz raw_time would therefore clock at 1 Mhz.
- *
- * Raw_data is the raw data received directly from the sensor. The
- * sensor data stream is capable of representing 16 different
- * channels. Channel 0 shows the excitation voltage at the sensor. It
- * is used to regulate the voltage over various cable lengths.
- * Channels 1-6 contain the coupled force data Fx through Mz. Channel
- * 7 contains the sensor's calibration data. The use of channels 8-15
- * varies with different sensors.
- */
-
-struct raw_channel {
- u32 raw_time;
- s32 raw_data;
- s32 reserved[2];
-};
-
-/* The force_array structure shows the layout for the decoupled and
- * filtered force data.
- */
-struct force_array {
- s32 fx;
- s32 fy;
- s32 fz;
- s32 mx;
- s32 my;
- s32 mz;
- s32 v1;
- s32 v2;
-};
-
-/* The six_axis_array structure shows the layout for the offsets and
- * the full scales.
- */
-struct six_axis_array {
- s32 fx;
- s32 fy;
- s32 fz;
- s32 mx;
- s32 my;
- s32 mz;
-};
-
-/* VECT_BITS */
-/* The vect_bits structure shows the layout for indicating
- * which axes to use in computing the vectors. Each bit signifies
- * selection of a single axis. The V1x axis bit corresponds to a hex
- * value of 0x0001 and the V2z bit corresponds to a hex value of
- * 0x0020. Example: to specify the axes V1x, V1y, V2x, and V2z the
- * pattern would be 0x002b. Vector 1 defaults to a force vector and
- * vector 2 defaults to a moment vector. It is possible to change one
- * or the other so that two force vectors or two moment vectors are
- * calculated. Setting the changeV1 bit or the changeV2 bit will
- * change that vector to be the opposite of its default. Therefore to
- * have two force vectors, set changeV1 to 1.
- */
-
-/* vect_bits appears to be unused at this time */
-enum {
- fx = 0x0001,
- fy = 0x0002,
- fz = 0x0004,
- mx = 0x0008,
- my = 0x0010,
- mz = 0x0020,
- changeV2 = 0x0040,
- changeV1 = 0x0080
-};
-
-/* WARNING_BITS */
-/* The warning_bits structure shows the bit pattern for the warning
- * word. The bit fields are shown from bit 0 (lsb) to bit 15 (msb).
- */
-
-/* XX_NEAR_SET */
-/* The xx_near_sat bits signify that the indicated axis has reached or
- * exceeded the near saturation value.
- */
-
-enum {
- fx_near_sat = 0x0001,
- fy_near_sat = 0x0002,
- fz_near_sat = 0x0004,
- mx_near_sat = 0x0008,
- my_near_sat = 0x0010,
- mz_near_sat = 0x0020
-};
-
-/* ERROR_BITS */
-/* XX_SAT */
-/* MEMORY_ERROR */
-/* SENSOR_CHANGE */
-
-/* The error_bits structure shows the bit pattern for the error word.
- * The bit fields are shown from bit 0 (lsb) to bit 15 (msb). The
- * xx_sat bits signify that the indicated axis has reached or exceeded
- * the saturation value. The memory_error bit indicates that a problem
- * was detected in the on-board RAM during the power-up
- * initialization. The sensor_change bit indicates that a sensor other
- * than the one originally plugged in has passed its CRC check. This
- * bit latches, and must be reset by the user.
- *
- */
-
-/* SYSTEM_BUSY */
-
-/* The system_busy bit indicates that the JR3 DSP is currently busy
- * and is not calculating force data. This occurs when a new
- * coordinate transformation, or new sensor full scale is set by the
- * user. A very fast system using the force data for feedback might
- * become unstable during the approximately 4 ms needed to accomplish
- * these calculations. This bit will also become active when a new
- * sensor is plugged in and the system needs to recalculate the
- * calibration CRC.
- */
-
-/* CAL_CRC_BAD */
-
-/* The cal_crc_bad bit indicates that the calibration CRC has not
- * calculated to zero. CRC is short for cyclic redundancy code. It is
- * a method for determining the integrity of messages in data
- * communication. The calibration data stored inside the sensor is
- * transmitted to the JR3 DSP along with the sensor data. The
- * calibration data has a CRC attached to the end of it, to assist in
- * determining the completeness and integrity of the calibration data
- * received from the sensor. There are two reasons the CRC may not
- * have calculated to zero. The first is that all the calibration data
- * has not yet been received, the second is that the calibration data
- * has been corrupted. A typical sensor transmits the entire contents
- * of its calibration matrix over 30 times a second. Therefore, if
- * this bit is not zero within a couple of seconds after the sensor
- * has been plugged in, there is a problem with the sensor's
- * calibration data.
- */
-
-/* WATCH_DOG */
-/* WATCH_DOG2 */
-
-/* The watch_dog and watch_dog2 bits are sensor, not processor, watch
- * dog bits. Watch_dog indicates that the sensor data line seems to be
- * acting correctly, while watch_dog2 indicates that sensor data and
- * clock are being received. It is possible for watch_dog2 to go off
- * while watch_dog does not. This would indicate an improper clock
- * signal, while data is acting correctly. If either watch dog barks,
- * the sensor data is not being received correctly.
- */
-
-enum error_bits_t {
- fx_sat = 0x0001,
- fy_sat = 0x0002,
- fz_sat = 0x0004,
- mx_sat = 0x0008,
- my_sat = 0x0010,
- mz_sat = 0x0020,
- memory_error = 0x0400,
- sensor_change = 0x0800,
- system_busy = 0x1000,
- cal_crc_bad = 0x2000,
- watch_dog2 = 0x4000,
- watch_dog = 0x8000
-};
-
-/* THRESH_STRUCT */
-
-/* This structure shows the layout for a single threshold packet inside of a
- * load envelope. Each load envelope can contain several threshold structures.
- * 1. data_address contains the address of the data for that threshold. This
- * includes filtered, unfiltered, raw, rate, counters, error and warning data
- * 2. threshold is the is the value at which, if data is above or below, the
- * bits will be set ... (pag.24).
- * 3. bit_pattern contains the bits that will be set if the threshold value is
- * met or exceeded.
- */
-
-struct thresh_struct {
- s32 data_address;
- s32 threshold;
- s32 bit_pattern;
-};
-
-/* LE_STRUCT */
-
-/* Layout of a load enveloped packet. Four thresholds are showed ... for more
- * see manual (pag.25)
- * 1. latch_bits is a bit pattern that show which bits the user wants to latch.
- * The latched bits will not be reset once the threshold which set them is
- * no longer true. In that case the user must reset them using the reset_bit
- * command.
- * 2. number_of_xx_thresholds specify how many GE/LE threshold there are.
- */
-struct le_struct {
- s32 latch_bits;
- s32 number_of_ge_thresholds;
- s32 number_of_le_thresholds;
- struct thresh_struct thresholds[4];
- s32 reserved;
-};
-
-/* LINK_TYPES */
-/* Link types is an enumerated value showing the different possible transform
- * link types.
- * 0 - end transform packet
- * 1 - translate along X axis (TX)
- * 2 - translate along Y axis (TY)
- * 3 - translate along Z axis (TZ)
- * 4 - rotate about X axis (RX)
- * 5 - rotate about Y axis (RY)
- * 6 - rotate about Z axis (RZ)
- * 7 - negate all axes (NEG)
- */
-
-enum link_types {
- end_x_form,
- tx,
- ty,
- tz,
- rx,
- ry,
- rz,
- neg
-};
-
-/* TRANSFORM */
-/* Structure used to describe a transform. */
-struct intern_transform {
- struct {
- u32 link_type;
- s32 link_amount;
- } link[8];
-};
-
-/* JR3 force/torque sensor data definition. For more information see sensor
- * and hardware manuals.
- */
-
-struct jr3_channel {
- /* Raw_channels is the area used to store the raw data coming from */
- /* the sensor. */
-
- struct raw_channel raw_channels[16]; /* offset 0x0000 */
-
- /* Copyright is a null terminated ASCII string containing the JR3 */
- /* copyright notice. */
-
- u32 copyright[0x0018]; /* offset 0x0040 */
- s32 reserved1[0x0008]; /* offset 0x0058 */
-
- /* Shunts contains the sensor shunt readings. Some JR3 sensors have
- * the ability to have their gains adjusted. This allows the
- * hardware full scales to be adjusted to potentially allow
- * better resolution or dynamic range. For sensors that have
- * this ability, the gain of each sensor channel is measured at
- * the time of calibration using a shunt resistor. The shunt
- * resistor is placed across one arm of the resistor bridge, and
- * the resulting change in the output of that channel is
- * measured. This measurement is called the shunt reading, and
- * is recorded here. If the user has changed the gain of the //
- * sensor, and made new shunt measurements, those shunt
- * measurements can be placed here. The JR3 DSP will then scale
- * the calibration matrix such so that the gains are again
- * proper for the indicated shunt readings. If shunts is 0, then
- * the sensor cannot have its gain changed. For details on
- * changing the sensor gain, and making shunts readings, please
- * see the sensor manual. To make these values take effect the
- * user must call either command (5) use transform # (pg. 33) or
- * command (10) set new full scales (pg. 38).
- */
-
- struct six_axis_array shunts; /* offset 0x0060 */
- s32 reserved2[2]; /* offset 0x0066 */
-
- /* Default_FS contains the full scale that is used if the user does */
- /* not set a full scale. */
-
- struct six_axis_array default_FS; /* offset 0x0068 */
- s32 reserved3; /* offset 0x006e */
-
- /* Load_envelope_num is the load envelope number that is currently
- * in use. This value is set by the user after one of the load
- * envelopes has been initialized.
- */
-
- s32 load_envelope_num; /* offset 0x006f */
-
- /* Min_full_scale is the recommend minimum full scale. */
-
- /* These values in conjunction with max_full_scale (pg. 9) helps
- * determine the appropriate value for setting the full scales. The
- * software allows the user to set the sensor full scale to an
- * arbitrary value. But setting the full scales has some hazards. If
- * the full scale is set too low, the data will saturate
- * prematurely, and dynamic range will be lost. If the full scale is
- * set too high, then resolution is lost as the data is shifted to
- * the right and the least significant bits are lost. Therefore the
- * maximum full scale is the maximum value at which no resolution is
- * lost, and the minimum full scale is the value at which the data
- * will not saturate prematurely. These values are calculated
- * whenever a new coordinate transformation is calculated. It is
- * possible for the recommended maximum to be less than the
- * recommended minimum. This comes about primarily when using
- * coordinate translations. If this is the case, it means that any
- * full scale selection will be a compromise between dynamic range
- * and resolution. It is usually recommended to compromise in favor
- * of resolution which means that the recommend maximum full scale
- * should be chosen.
- *
- * WARNING: Be sure that the full scale is no less than 0.4% of the
- * recommended minimum full scale. Full scales below this value will
- * cause erroneous results.
- */
-
- struct six_axis_array min_full_scale; /* offset 0x0070 */
- s32 reserved4; /* offset 0x0076 */
-
- /* Transform_num is the transform number that is currently in use.
- * This value is set by the JR3 DSP after the user has used command
- * (5) use transform # (pg. 33).
- */
-
- s32 transform_num; /* offset 0x0077 */
-
- /* Max_full_scale is the recommended maximum full scale. See */
- /* min_full_scale (pg. 9) for more details. */
-
- struct six_axis_array max_full_scale; /* offset 0x0078 */
- s32 reserved5; /* offset 0x007e */
-
- /* Peak_address is the address of the data which will be monitored
- * by the peak routine. This value is set by the user. The peak
- * routine will monitor any 8 contiguous addresses for peak values.
- * (ex. to watch filter3 data for peaks, set this value to 0x00a8).
- */
-
- s32 peak_address; /* offset 0x007f */
-
- /* Full_scale is the sensor full scales which are currently in use.
- * Decoupled and filtered data is scaled so that +/- 16384 is equal
- * to the full scales. The engineering units used are indicated by
- * the units value discussed on page 16. The full scales for Fx, Fy,
- * Fz, Mx, My and Mz can be written by the user prior to calling
- * command (10) set new full scales (pg. 38). The full scales for V1
- * and V2 are set whenever the full scales are changed or when the
- * axes used to calculate the vectors are changed. The full scale of
- * V1 and V2 will always be equal to the largest full scale of the
- * axes used for each vector respectively.
- */
-
- struct force_array full_scale; /* offset 0x0080 */
-
- /* Offsets contains the sensor offsets. These values are subtracted from
- * the sensor data to obtain the decoupled data. The offsets are set a
- * few seconds (< 10) after the calibration data has been received.
- * They are set so that the output data will be zero. These values
- * can be written as well as read. The JR3 DSP will use the values
- * written here within 2 ms of being written. To set future
- * decoupled data to zero, add these values to the current decoupled
- * data values and place the sum here. The JR3 DSP will change these
- * values when a new transform is applied. So if the offsets are
- * such that FX is 5 and all other values are zero, after rotating
- * about Z by 90 degrees, FY would be 5 and all others would be zero.
- */
-
- struct six_axis_array offsets; /* offset 0x0088 */
-
- /* Offset_num is the number of the offset currently in use. This
- * value is set by the JR3 DSP after the user has executed the use
- * offset # command (pg. 34). It can vary between 0 and 15.
- */
-
- s32 offset_num; /* offset 0x008e */
-
- /* Vect_axes is a bit map showing which of the axes are being used
- * in the vector calculations. This value is set by the JR3 DSP
- * after the user has executed the set vector axes command (pg. 37).
- */
-
- u32 vect_axes; /* offset 0x008f */
-
- /* Filter0 is the decoupled, unfiltered data from the JR3 sensor.
- * This data has had the offsets removed.
- *
- * These force_arrays hold the filtered data. The decoupled data is
- * passed through cascaded low pass filters. Each succeeding filter
- * has a cutoff frequency of 1/4 of the preceding filter. The cutoff
- * frequency of filter1 is 1/16 of the sample rate from the sensor.
- * For a typical sensor with a sample rate of 8 kHz, the cutoff
- * frequency of filter1 would be 500 Hz. The following filters would
- * cutoff at 125 Hz, 31.25 Hz, 7.813 Hz, 1.953 Hz and 0.4883 Hz.
- */
-
- struct force_array filter[7]; /* offset 0x0090,
- offset 0x0098,
- offset 0x00a0,
- offset 0x00a8,
- offset 0x00b0,
- offset 0x00b8 ,
- offset 0x00c0 */
-
- /* Rate_data is the calculated rate data. It is a first derivative
- * calculation. It is calculated at a frequency specified by the
- * variable rate_divisor (pg. 12). The data on which the rate is
- * calculated is specified by the variable rate_address (pg. 12).
- */
-
- struct force_array rate_data; /* offset 0x00c8 */
-
- /* Minimum_data & maximum_data are the minimum and maximum (peak)
- * data values. The JR3 DSP can monitor any 8 contiguous data items
- * for minimums and maximums at full sensor bandwidth. This area is
- * only updated at user request. This is done so that the user does
- * not miss any peaks. To read the data, use either the read peaks
- * command (pg. 40), or the read and reset peaks command (pg. 39).
- * The address of the data to watch for peaks is stored in the
- * variable peak_address (pg. 10). Peak data is lost when executing
- * a coordinate transformation or a full scale change. Peak data is
- * also lost when plugging in a new sensor.
- */
-
- struct force_array minimum_data; /* offset 0x00d0 */
- struct force_array maximum_data; /* offset 0x00d8 */
-
- /* Near_sat_value & sat_value contain the value used to determine if
- * the raw sensor is saturated. Because of decoupling and offset
- * removal, it is difficult to tell from the processed data if the
- * sensor is saturated. These values, in conjunction with the error
- * and warning words (pg. 14), provide this critical information.
- * These two values may be set by the host processor. These values
- * are positive signed values, since the saturation logic uses the
- * absolute values of the raw data. The near_sat_value defaults to
- * approximately 80% of the ADC's full scale, which is 26214, while
- * sat_value defaults to the ADC's full scale:
- *
- * sat_value = 32768 - 2^(16 - ADC bits)
- */
-
- s32 near_sat_value; /* offset 0x00e0 */
- s32 sat_value; /* offset 0x00e1 */
-
- /* Rate_address, rate_divisor & rate_count contain the data used to
- * control the calculations of the rates. Rate_address is the
- * address of the data used for the rate calculation. The JR3 DSP
- * will calculate rates for any 8 contiguous values (ex. to
- * calculate rates for filter3 data set rate_address to 0x00a8).
- * Rate_divisor is how often the rate is calculated. If rate_divisor
- * is 1, the rates are calculated at full sensor bandwidth. If
- * rate_divisor is 200, rates are calculated every 200 samples.
- * Rate_divisor can be any value between 1 and 65536. Set
- * rate_divisor to 0 to calculate rates every 65536 samples.
- * Rate_count starts at zero and counts until it equals
- * rate_divisor, at which point the rates are calculated, and
- * rate_count is reset to 0. When setting a new rate divisor, it is
- * a good idea to set rate_count to one less than rate divisor. This
- * will minimize the time necessary to start the rate calculations.
- */
-
- s32 rate_address; /* offset 0x00e2 */
- u32 rate_divisor; /* offset 0x00e3 */
- u32 rate_count; /* offset 0x00e4 */
-
- /* Command_word2 through command_word0 are the locations used to
- * send commands to the JR3 DSP. Their usage varies with the command
- * and is detailed later in the Command Definitions section (pg.
- * 29). In general the user places values into various memory
- * locations, and then places the command word into command_word0.
- * The JR3 DSP will process the command and place a 0 into
- * command_word0 to indicate successful completion. Alternatively
- * the JR3 DSP will place a negative number into command_word0 to
- * indicate an error condition. Please note the command locations
- * are numbered backwards. (I.E. command_word2 comes before
- * command_word1).
- */
-
- s32 command_word2; /* offset 0x00e5 */
- s32 command_word1; /* offset 0x00e6 */
- s32 command_word0; /* offset 0x00e7 */
-
- /* Count1 through count6 are unsigned counters which are incremented
- * every time the matching filters are calculated. Filter1 is
- * calculated at the sensor data bandwidth. So this counter would
- * increment at 8 kHz for a typical sensor. The rest of the counters
- * are incremented at 1/4 the interval of the counter immediately
- * preceding it, so they would count at 2 kHz, 500 Hz, 125 Hz etc.
- * These counters can be used to wait for data. Each time the
- * counter changes, the corresponding data set can be sampled, and
- * this will insure that the user gets each sample, once, and only
- * once.
- */
-
- u32 count1; /* offset 0x00e8 */
- u32 count2; /* offset 0x00e9 */
- u32 count3; /* offset 0x00ea */
- u32 count4; /* offset 0x00eb */
- u32 count5; /* offset 0x00ec */
- u32 count6; /* offset 0x00ed */
-
- /* Error_count is a running count of data reception errors. If this
- * counter is changing rapidly, it probably indicates a bad sensor
- * cable connection or other hardware problem. In most installations
- * error_count should not change at all. But it is possible in an
- * extremely noisy environment to experience occasional errors even
- * without a hardware problem. If the sensor is well grounded, this
- * is probably unavoidable in these environments. On the occasions
- * where this counter counts a bad sample, that sample is ignored.
- */
-
- u32 error_count; /* offset 0x00ee */
-
- /* Count_x is a counter which is incremented every time the JR3 DSP
- * searches its job queues and finds nothing to do. It indicates the
- * amount of idle time the JR3 DSP has available. It can also be
- * used to determine if the JR3 DSP is alive. See the Performance
- * Issues section on pg. 49 for more details.
- */
-
- u32 count_x; /* offset 0x00ef */
-
- /* Warnings & errors contain the warning and error bits
- * respectively. The format of these two words is discussed on page
- * 21 under the headings warnings_bits and error_bits.
- */
-
- u32 warnings; /* offset 0x00f0 */
- u32 errors; /* offset 0x00f1 */
-
- /* Threshold_bits is a word containing the bits that are set by the
- * load envelopes. See load_envelopes (pg. 17) and thresh_struct
- * (pg. 23) for more details.
- */
-
- s32 threshold_bits; /* offset 0x00f2 */
-
- /* Last_crc is the value that shows the actual calculated CRC. CRC
- * is short for cyclic redundancy code. It should be zero. See the
- * description for cal_crc_bad (pg. 21) for more information.
- */
-
- s32 last_CRC; /* offset 0x00f3 */
-
- /* EEProm_ver_no contains the version number of the sensor EEProm.
- * EEProm version numbers can vary between 0 and 255.
- * Software_ver_no contains the software version number. Version
- * 3.02 would be stored as 302.
- */
-
- s32 eeprom_ver_no; /* offset 0x00f4 */
- s32 software_ver_no; /* offset 0x00f5 */
-
- /* Software_day & software_year are the release date of the software
- * the JR3 DSP is currently running. Day is the day of the year,
- * with January 1 being 1, and December 31, being 365 for non leap
- * years.
- */
-
- s32 software_day; /* offset 0x00f6 */
- s32 software_year; /* offset 0x00f7 */
-
- /* Serial_no & model_no are the two values which uniquely identify a
- * sensor. This model number does not directly correspond to the JR3
- * model number, but it will provide a unique identifier for
- * different sensor configurations.
- */
-
- u32 serial_no; /* offset 0x00f8 */
- u32 model_no; /* offset 0x00f9 */
-
- /* Cal_day & cal_year are the sensor calibration date. Day is the
- * day of the year, with January 1 being 1, and December 31, being
- * 366 for leap years.
- */
-
- s32 cal_day; /* offset 0x00fa */
- s32 cal_year; /* offset 0x00fb */
-
- /* Units is an enumerated read only value defining the engineering
- * units used in the sensor full scale. The meanings of particular
- * values are discussed in the section detailing the force_units
- * structure on page 22. The engineering units are setto customer
- * specifications during sensor manufacture and cannot be changed by
- * writing to Units.
- *
- * Bits contains the number of bits of resolution of the ADC
- * currently in use.
- *
- * Channels is a bit field showing which channels the current sensor
- * is capable of sending. If bit 0 is active, this sensor can send
- * channel 0, if bit 13 is active, this sensor can send channel 13,
- * etc. This bit can be active, even if the sensor is not currently
- * sending this channel. Some sensors are configurable as to which
- * channels to send, and this field only contains information on the
- * channels available to send, not on the current configuration. To
- * find which channels are currently being sent, monitor the
- * Raw_time fields (pg. 19) in the raw_channels array (pg. 7). If
- * the time is changing periodically, then that channel is being
- * received.
- */
-
- u32 units; /* offset 0x00fc */
- s32 bits; /* offset 0x00fd */
- s32 channels; /* offset 0x00fe */
-
- /* Thickness specifies the overall thickness of the sensor from
- * flange to flange. The engineering units for this value are
- * contained in units (pg. 16). The sensor calibration is relative
- * to the center of the sensor. This value allows easy coordinate
- * transformation from the center of the sensor to either flange.
- */
-
- s32 thickness; /* offset 0x00ff */
-
- /* Load_envelopes is a table containing the load envelope
- * descriptions. There are 16 possible load envelope slots in the
- * table. The slots are on 16 word boundaries and are numbered 0-15.
- * Each load envelope needs to start at the beginning of a slot but
- * need not be fully contained in that slot. That is to say that a
- * single load envelope can be larger than a single slot. The
- * software has been tested and ran satisfactorily with 50
- * thresholds active. A single load envelope this large would take
- * up 5 of the 16 slots. The load envelope data is laid out in an
- * order that is most efficient for the JR3 DSP. The structure is
- * detailed later in the section showing the definition of the
- * le_struct structure (pg. 23).
- */
-
- struct le_struct load_envelopes[0x10]; /* offset 0x0100 */
-
- /* Transforms is a table containing the transform descriptions.
- * There are 16 possible transform slots in the table. The slots are
- * on 16 word boundaries and are numbered 0-15. Each transform needs
- * to start at the beginning of a slot but need not be fully
- * contained in that slot. That is to say that a single transform
- * can be larger than a single slot. A transform is 2 * no of links
- * + 1 words in length. So a single slot can contain a transform
- * with 7 links. Two slots can contain a transform that is 15 links.
- * The layout is detailed later in the section showing the
- * definition of the transform structure (pg. 26).
- */
-
- struct intern_transform transforms[0x10]; /* offset 0x0200 */
-};
-
-struct jr3_t {
- struct {
- u32 program_lo[0x4000]; /* 0x00000 - 0x10000 */
- struct jr3_channel data; /* 0x10000 - 0x10c00 */
- char pad2[0x30000 - 0x00c00]; /* 0x10c00 - 0x40000 */
- u32 program_hi[0x8000]; /* 0x40000 - 0x60000 */
- u32 reset; /* 0x60000 - 0x60004 */
- char pad3[0x20000 - 0x00004]; /* 0x60004 - 0x80000 */
- } channel[4];
-};
diff --git a/drivers/staging/comedi/drivers/ke_counter.c b/drivers/staging/comedi/drivers/ke_counter.c
deleted file mode 100644
index dc642edf4f65..000000000000
--- a/drivers/staging/comedi/drivers/ke_counter.c
+++ /dev/null
@@ -1,240 +0,0 @@
-/*
- * ke_counter.c
- * Comedi driver for Kolter-Electronic PCI Counter 1 Card
- *
- * COMEDI - Linux Control and Measurement Device Interface
- * Copyright (C) 2000 David A. Schleef <ds@schleef.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-/*
- * Driver: ke_counter
- * Description: Driver for Kolter Electronic Counter Card
- * Devices: [Kolter Electronic] PCI Counter Card (ke_counter)
- * Author: Michael Hillmann
- * Updated: Mon, 14 Apr 2008 15:42:42 +0100
- * Status: tested
- *
- * Configuration Options: not applicable, uses PCI auto config
- */
-
-#include <linux/module.h>
-
-#include "../comedi_pci.h"
-
-/*
- * PCI BAR 0 Register I/O map
- */
-#define KE_RESET_REG(x) (0x00 + ((x) * 0x20))
-#define KE_LATCH_REG(x) (0x00 + ((x) * 0x20))
-#define KE_LSB_REG(x) (0x04 + ((x) * 0x20))
-#define KE_MID_REG(x) (0x08 + ((x) * 0x20))
-#define KE_MSB_REG(x) (0x0c + ((x) * 0x20))
-#define KE_SIGN_REG(x) (0x10 + ((x) * 0x20))
-#define KE_OSC_SEL_REG 0xf8
-#define KE_OSC_SEL_EXT (1 << 0)
-#define KE_OSC_SEL_4MHZ (2 << 0)
-#define KE_OSC_SEL_20MHZ (3 << 0)
-#define KE_DO_REG 0xfc
-
-static int ke_counter_insn_write(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- unsigned int chan = CR_CHAN(insn->chanspec);
- unsigned int val;
- int i;
-
- for (i = 0; i < insn->n; i++) {
- val = data[0];
-
- /* Order matters */
- outb((val >> 24) & 0xff, dev->iobase + KE_SIGN_REG(chan));
- outb((val >> 16) & 0xff, dev->iobase + KE_MSB_REG(chan));
- outb((val >> 8) & 0xff, dev->iobase + KE_MID_REG(chan));
- outb((val >> 0) & 0xff, dev->iobase + KE_LSB_REG(chan));
- }
-
- return insn->n;
-}
-
-static int ke_counter_insn_read(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- unsigned int chan = CR_CHAN(insn->chanspec);
- unsigned int val;
- int i;
-
- for (i = 0; i < insn->n; i++) {
- /* Order matters */
- inb(dev->iobase + KE_LATCH_REG(chan));
-
- val = inb(dev->iobase + KE_LSB_REG(chan));
- val |= (inb(dev->iobase + KE_MID_REG(chan)) << 8);
- val |= (inb(dev->iobase + KE_MSB_REG(chan)) << 16);
- val |= (inb(dev->iobase + KE_SIGN_REG(chan)) << 24);
-
- data[i] = val;
- }
-
- return insn->n;
-}
-
-static void ke_counter_reset(struct comedi_device *dev)
-{
- unsigned int chan;
-
- for (chan = 0; chan < 3; chan++)
- outb(0, dev->iobase + KE_RESET_REG(chan));
-}
-
-static int ke_counter_insn_config(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- unsigned char src;
-
- switch (data[0]) {
- case INSN_CONFIG_SET_CLOCK_SRC:
- switch (data[1]) {
- case KE_CLK_20MHZ: /* default */
- src = KE_OSC_SEL_20MHZ;
- break;
- case KE_CLK_4MHZ: /* option */
- src = KE_OSC_SEL_4MHZ;
- break;
- case KE_CLK_EXT: /* Pin 21 on D-sub */
- src = KE_OSC_SEL_EXT;
- break;
- default:
- return -EINVAL;
- }
- outb(src, dev->iobase + KE_OSC_SEL_REG);
- break;
- case INSN_CONFIG_GET_CLOCK_SRC:
- src = inb(dev->iobase + KE_OSC_SEL_REG);
- switch (src) {
- case KE_OSC_SEL_20MHZ:
- data[1] = KE_CLK_20MHZ;
- data[2] = 50; /* 50ns */
- break;
- case KE_OSC_SEL_4MHZ:
- data[1] = KE_CLK_4MHZ;
- data[2] = 250; /* 250ns */
- break;
- case KE_OSC_SEL_EXT:
- data[1] = KE_CLK_EXT;
- data[2] = 0; /* Unknown */
- break;
- default:
- return -EINVAL;
- }
- break;
- case INSN_CONFIG_RESET:
- ke_counter_reset(dev);
- break;
- default:
- return -EINVAL;
- }
-
- return insn->n;
-}
-
-static int ke_counter_do_insn_bits(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- if (comedi_dio_update_state(s, data))
- outb(s->state, dev->iobase + KE_DO_REG);
-
- data[1] = s->state;
-
- return insn->n;
-}
-
-static int ke_counter_auto_attach(struct comedi_device *dev,
- unsigned long context_unused)
-{
- struct pci_dev *pcidev = comedi_to_pci_dev(dev);
- struct comedi_subdevice *s;
- int ret;
-
- ret = comedi_pci_enable(dev);
- if (ret)
- return ret;
- dev->iobase = pci_resource_start(pcidev, 0);
-
- ret = comedi_alloc_subdevices(dev, 2);
- if (ret)
- return ret;
-
- s = &dev->subdevices[0];
- s->type = COMEDI_SUBD_COUNTER;
- s->subdev_flags = SDF_READABLE;
- s->n_chan = 3;
- s->maxdata = 0x01ffffff;
- s->range_table = &range_unknown;
- s->insn_read = ke_counter_insn_read;
- s->insn_write = ke_counter_insn_write;
- s->insn_config = ke_counter_insn_config;
-
- s = &dev->subdevices[1];
- s->type = COMEDI_SUBD_DO;
- s->subdev_flags = SDF_WRITABLE;
- s->n_chan = 3;
- s->maxdata = 1;
- s->range_table = &range_digital;
- s->insn_bits = ke_counter_do_insn_bits;
-
- outb(KE_OSC_SEL_20MHZ, dev->iobase + KE_OSC_SEL_REG);
-
- ke_counter_reset(dev);
-
- return 0;
-}
-
-static struct comedi_driver ke_counter_driver = {
- .driver_name = "ke_counter",
- .module = THIS_MODULE,
- .auto_attach = ke_counter_auto_attach,
- .detach = comedi_pci_detach,
-};
-
-static int ke_counter_pci_probe(struct pci_dev *dev,
- const struct pci_device_id *id)
-{
- return comedi_pci_auto_config(dev, &ke_counter_driver,
- id->driver_data);
-}
-
-static const struct pci_device_id ke_counter_pci_table[] = {
- { PCI_DEVICE(PCI_VENDOR_ID_KOLTER, 0x0014) },
- { 0 }
-};
-MODULE_DEVICE_TABLE(pci, ke_counter_pci_table);
-
-static struct pci_driver ke_counter_pci_driver = {
- .name = "ke_counter",
- .id_table = ke_counter_pci_table,
- .probe = ke_counter_pci_probe,
- .remove = comedi_pci_auto_unconfig,
-};
-module_comedi_pci_driver(ke_counter_driver, ke_counter_pci_driver);
-
-MODULE_AUTHOR("Comedi http://www.comedi.org");
-MODULE_DESCRIPTION("Comedi driver for Kolter Electronic Counter Card");
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/me4000.c b/drivers/staging/comedi/drivers/me4000.c
deleted file mode 100644
index 15a53204a36a..000000000000
--- a/drivers/staging/comedi/drivers/me4000.c
+++ /dev/null
@@ -1,1287 +0,0 @@
-/*
- * me4000.c
- * Source code for the Meilhaus ME-4000 board family.
- *
- * COMEDI - Linux Control and Measurement Device Interface
- * Copyright (C) 2000 David A. Schleef <ds@schleef.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-/*
- * Driver: me4000
- * Description: Meilhaus ME-4000 series boards
- * Devices: [Meilhaus] ME-4650 (me4000), ME-4670i, ME-4680, ME-4680i,
- * ME-4680is
- * Author: gg (Guenter Gebhardt <g.gebhardt@meilhaus.com>)
- * Updated: Mon, 18 Mar 2002 15:34:01 -0800
- * Status: untested
- *
- * Supports:
- * - Analog Input
- * - Analog Output
- * - Digital I/O
- * - Counter
- *
- * Configuration Options: not applicable, uses PCI auto config
- *
- * The firmware required by these boards is available in the
- * comedi_nonfree_firmware tarball available from
- * http://www.comedi.org.
- */
-
-#include <linux/module.h>
-#include <linux/delay.h>
-#include <linux/interrupt.h>
-
-#include "../comedi_pci.h"
-
-#include "comedi_8254.h"
-#include "plx9052.h"
-
-#define ME4000_FIRMWARE "me4000_firmware.bin"
-
-/*
- * ME4000 Register map and bit defines
- */
-#define ME4000_AO_CHAN(x) ((x) * 0x18)
-
-#define ME4000_AO_CTRL_REG(x) (0x00 + ME4000_AO_CHAN(x))
-#define ME4000_AO_CTRL_MODE_0 BIT(0)
-#define ME4000_AO_CTRL_MODE_1 BIT(1)
-#define ME4000_AO_CTRL_STOP BIT(2)
-#define ME4000_AO_CTRL_ENABLE_FIFO BIT(3)
-#define ME4000_AO_CTRL_ENABLE_EX_TRIG BIT(4)
-#define ME4000_AO_CTRL_EX_TRIG_EDGE BIT(5)
-#define ME4000_AO_CTRL_IMMEDIATE_STOP BIT(7)
-#define ME4000_AO_CTRL_ENABLE_DO BIT(8)
-#define ME4000_AO_CTRL_ENABLE_IRQ BIT(9)
-#define ME4000_AO_CTRL_RESET_IRQ BIT(10)
-#define ME4000_AO_STATUS_REG(x) (0x04 + ME4000_AO_CHAN(x))
-#define ME4000_AO_STATUS_FSM BIT(0)
-#define ME4000_AO_STATUS_FF BIT(1)
-#define ME4000_AO_STATUS_HF BIT(2)
-#define ME4000_AO_STATUS_EF BIT(3)
-#define ME4000_AO_FIFO_REG(x) (0x08 + ME4000_AO_CHAN(x))
-#define ME4000_AO_SINGLE_REG(x) (0x0c + ME4000_AO_CHAN(x))
-#define ME4000_AO_TIMER_REG(x) (0x10 + ME4000_AO_CHAN(x))
-#define ME4000_AI_CTRL_REG 0x74
-#define ME4000_AI_STATUS_REG 0x74
-#define ME4000_AI_CTRL_MODE_0 BIT(0)
-#define ME4000_AI_CTRL_MODE_1 BIT(1)
-#define ME4000_AI_CTRL_MODE_2 BIT(2)
-#define ME4000_AI_CTRL_SAMPLE_HOLD BIT(3)
-#define ME4000_AI_CTRL_IMMEDIATE_STOP BIT(4)
-#define ME4000_AI_CTRL_STOP BIT(5)
-#define ME4000_AI_CTRL_CHANNEL_FIFO BIT(6)
-#define ME4000_AI_CTRL_DATA_FIFO BIT(7)
-#define ME4000_AI_CTRL_FULLSCALE BIT(8)
-#define ME4000_AI_CTRL_OFFSET BIT(9)
-#define ME4000_AI_CTRL_EX_TRIG_ANALOG BIT(10)
-#define ME4000_AI_CTRL_EX_TRIG BIT(11)
-#define ME4000_AI_CTRL_EX_TRIG_FALLING BIT(12)
-#define ME4000_AI_CTRL_EX_IRQ BIT(13)
-#define ME4000_AI_CTRL_EX_IRQ_RESET BIT(14)
-#define ME4000_AI_CTRL_LE_IRQ BIT(15)
-#define ME4000_AI_CTRL_LE_IRQ_RESET BIT(16)
-#define ME4000_AI_CTRL_HF_IRQ BIT(17)
-#define ME4000_AI_CTRL_HF_IRQ_RESET BIT(18)
-#define ME4000_AI_CTRL_SC_IRQ BIT(19)
-#define ME4000_AI_CTRL_SC_IRQ_RESET BIT(20)
-#define ME4000_AI_CTRL_SC_RELOAD BIT(21)
-#define ME4000_AI_STATUS_EF_CHANNEL BIT(22)
-#define ME4000_AI_STATUS_HF_CHANNEL BIT(23)
-#define ME4000_AI_STATUS_FF_CHANNEL BIT(24)
-#define ME4000_AI_STATUS_EF_DATA BIT(25)
-#define ME4000_AI_STATUS_HF_DATA BIT(26)
-#define ME4000_AI_STATUS_FF_DATA BIT(27)
-#define ME4000_AI_STATUS_LE BIT(28)
-#define ME4000_AI_STATUS_FSM BIT(29)
-#define ME4000_AI_CTRL_EX_TRIG_BOTH BIT(31)
-#define ME4000_AI_CHANNEL_LIST_REG 0x78
-#define ME4000_AI_LIST_INPUT_DIFFERENTIAL BIT(5)
-#define ME4000_AI_LIST_RANGE(x) ((3 - ((x) & 3)) << 6)
-#define ME4000_AI_LIST_LAST_ENTRY BIT(8)
-#define ME4000_AI_DATA_REG 0x7c
-#define ME4000_AI_CHAN_TIMER_REG 0x80
-#define ME4000_AI_CHAN_PRE_TIMER_REG 0x84
-#define ME4000_AI_SCAN_TIMER_LOW_REG 0x88
-#define ME4000_AI_SCAN_TIMER_HIGH_REG 0x8c
-#define ME4000_AI_SCAN_PRE_TIMER_LOW_REG 0x90
-#define ME4000_AI_SCAN_PRE_TIMER_HIGH_REG 0x94
-#define ME4000_AI_START_REG 0x98
-#define ME4000_IRQ_STATUS_REG 0x9c
-#define ME4000_IRQ_STATUS_EX BIT(0)
-#define ME4000_IRQ_STATUS_LE BIT(1)
-#define ME4000_IRQ_STATUS_AI_HF BIT(2)
-#define ME4000_IRQ_STATUS_AO_0_HF BIT(3)
-#define ME4000_IRQ_STATUS_AO_1_HF BIT(4)
-#define ME4000_IRQ_STATUS_AO_2_HF BIT(5)
-#define ME4000_IRQ_STATUS_AO_3_HF BIT(6)
-#define ME4000_IRQ_STATUS_SC BIT(7)
-#define ME4000_DIO_PORT_0_REG 0xa0
-#define ME4000_DIO_PORT_1_REG 0xa4
-#define ME4000_DIO_PORT_2_REG 0xa8
-#define ME4000_DIO_PORT_3_REG 0xac
-#define ME4000_DIO_DIR_REG 0xb0
-#define ME4000_AO_LOADSETREG_XX 0xb4
-#define ME4000_DIO_CTRL_REG 0xb8
-#define ME4000_DIO_CTRL_MODE_0 BIT(0)
-#define ME4000_DIO_CTRL_MODE_1 BIT(1)
-#define ME4000_DIO_CTRL_MODE_2 BIT(2)
-#define ME4000_DIO_CTRL_MODE_3 BIT(3)
-#define ME4000_DIO_CTRL_MODE_4 BIT(4)
-#define ME4000_DIO_CTRL_MODE_5 BIT(5)
-#define ME4000_DIO_CTRL_MODE_6 BIT(6)
-#define ME4000_DIO_CTRL_MODE_7 BIT(7)
-#define ME4000_DIO_CTRL_FUNCTION_0 BIT(8)
-#define ME4000_DIO_CTRL_FUNCTION_1 BIT(9)
-#define ME4000_DIO_CTRL_FIFO_HIGH_0 BIT(10)
-#define ME4000_DIO_CTRL_FIFO_HIGH_1 BIT(11)
-#define ME4000_DIO_CTRL_FIFO_HIGH_2 BIT(12)
-#define ME4000_DIO_CTRL_FIFO_HIGH_3 BIT(13)
-#define ME4000_AO_DEMUX_ADJUST_REG 0xbc
-#define ME4000_AO_DEMUX_ADJUST_VALUE 0x4c
-#define ME4000_AI_SAMPLE_COUNTER_REG 0xc0
-
-#define ME4000_AI_FIFO_COUNT 2048
-
-#define ME4000_AI_MIN_TICKS 66
-#define ME4000_AI_MIN_SAMPLE_TIME 2000
-
-#define ME4000_AI_CHANNEL_LIST_COUNT 1024
-
-struct me4000_private {
- unsigned long plx_regbase;
- unsigned int ai_ctrl_mode;
- unsigned int ai_init_ticks;
- unsigned int ai_scan_ticks;
- unsigned int ai_chan_ticks;
-};
-
-enum me4000_boardid {
- BOARD_ME4650,
- BOARD_ME4660,
- BOARD_ME4660I,
- BOARD_ME4660S,
- BOARD_ME4660IS,
- BOARD_ME4670,
- BOARD_ME4670I,
- BOARD_ME4670S,
- BOARD_ME4670IS,
- BOARD_ME4680,
- BOARD_ME4680I,
- BOARD_ME4680S,
- BOARD_ME4680IS,
-};
-
-struct me4000_board {
- const char *name;
- int ai_nchan;
- unsigned int can_do_diff_ai:1;
- unsigned int can_do_sh_ai:1; /* sample & hold (8 channels) */
- unsigned int ex_trig_analog:1;
- unsigned int has_ao:1;
- unsigned int has_ao_fifo:1;
- unsigned int has_counter:1;
-};
-
-static const struct me4000_board me4000_boards[] = {
- [BOARD_ME4650] = {
- .name = "ME-4650",
- .ai_nchan = 16,
- },
- [BOARD_ME4660] = {
- .name = "ME-4660",
- .ai_nchan = 32,
- .can_do_diff_ai = 1,
- .has_counter = 1,
- },
- [BOARD_ME4660I] = {
- .name = "ME-4660i",
- .ai_nchan = 32,
- .can_do_diff_ai = 1,
- .has_counter = 1,
- },
- [BOARD_ME4660S] = {
- .name = "ME-4660s",
- .ai_nchan = 32,
- .can_do_diff_ai = 1,
- .can_do_sh_ai = 1,
- .has_counter = 1,
- },
- [BOARD_ME4660IS] = {
- .name = "ME-4660is",
- .ai_nchan = 32,
- .can_do_diff_ai = 1,
- .can_do_sh_ai = 1,
- .has_counter = 1,
- },
- [BOARD_ME4670] = {
- .name = "ME-4670",
- .ai_nchan = 32,
- .can_do_diff_ai = 1,
- .ex_trig_analog = 1,
- .has_ao = 1,
- .has_counter = 1,
- },
- [BOARD_ME4670I] = {
- .name = "ME-4670i",
- .ai_nchan = 32,
- .can_do_diff_ai = 1,
- .ex_trig_analog = 1,
- .has_ao = 1,
- .has_counter = 1,
- },
- [BOARD_ME4670S] = {
- .name = "ME-4670s",
- .ai_nchan = 32,
- .can_do_diff_ai = 1,
- .can_do_sh_ai = 1,
- .ex_trig_analog = 1,
- .has_ao = 1,
- .has_counter = 1,
- },
- [BOARD_ME4670IS] = {
- .name = "ME-4670is",
- .ai_nchan = 32,
- .can_do_diff_ai = 1,
- .can_do_sh_ai = 1,
- .ex_trig_analog = 1,
- .has_ao = 1,
- .has_counter = 1,
- },
- [BOARD_ME4680] = {
- .name = "ME-4680",
- .ai_nchan = 32,
- .can_do_diff_ai = 1,
- .ex_trig_analog = 1,
- .has_ao = 1,
- .has_ao_fifo = 1,
- .has_counter = 1,
- },
- [BOARD_ME4680I] = {
- .name = "ME-4680i",
- .ai_nchan = 32,
- .can_do_diff_ai = 1,
- .ex_trig_analog = 1,
- .has_ao = 1,
- .has_ao_fifo = 1,
- .has_counter = 1,
- },
- [BOARD_ME4680S] = {
- .name = "ME-4680s",
- .ai_nchan = 32,
- .can_do_diff_ai = 1,
- .can_do_sh_ai = 1,
- .ex_trig_analog = 1,
- .has_ao = 1,
- .has_ao_fifo = 1,
- .has_counter = 1,
- },
- [BOARD_ME4680IS] = {
- .name = "ME-4680is",
- .ai_nchan = 32,
- .can_do_diff_ai = 1,
- .can_do_sh_ai = 1,
- .ex_trig_analog = 1,
- .has_ao = 1,
- .has_ao_fifo = 1,
- .has_counter = 1,
- },
-};
-
-/*
- * NOTE: the ranges here are inverted compared to the values
- * written to the ME4000_AI_CHANNEL_LIST_REG,
- *
- * The ME4000_AI_LIST_RANGE() macro handles the inversion.
- */
-static const struct comedi_lrange me4000_ai_range = {
- 4, {
- UNI_RANGE(2.5),
- UNI_RANGE(10),
- BIP_RANGE(2.5),
- BIP_RANGE(10)
- }
-};
-
-static int me4000_xilinx_download(struct comedi_device *dev,
- const u8 *data, size_t size,
- unsigned long context)
-{
- struct pci_dev *pcidev = comedi_to_pci_dev(dev);
- struct me4000_private *devpriv = dev->private;
- unsigned long xilinx_iobase = pci_resource_start(pcidev, 5);
- unsigned int file_length;
- unsigned int val;
- unsigned int i;
-
- if (!xilinx_iobase)
- return -ENODEV;
-
- /*
- * Set PLX local interrupt 2 polarity to high.
- * Interrupt is thrown by init pin of xilinx.
- */
- outl(PLX9052_INTCSR_LI2POL, devpriv->plx_regbase + PLX9052_INTCSR);
-
- /* Set /CS and /WRITE of the Xilinx */
- val = inl(devpriv->plx_regbase + PLX9052_CNTRL);
- val |= PLX9052_CNTRL_UIO2_DATA;
- outl(val, devpriv->plx_regbase + PLX9052_CNTRL);
-
- /* Init Xilinx with CS1 */
- inb(xilinx_iobase + 0xC8);
-
- /* Wait until /INIT pin is set */
- usleep_range(20, 1000);
- val = inl(devpriv->plx_regbase + PLX9052_INTCSR);
- if (!(val & PLX9052_INTCSR_LI2STAT)) {
- dev_err(dev->class_dev, "Can't init Xilinx\n");
- return -EIO;
- }
-
- /* Reset /CS and /WRITE of the Xilinx */
- val = inl(devpriv->plx_regbase + PLX9052_CNTRL);
- val &= ~PLX9052_CNTRL_UIO2_DATA;
- outl(val, devpriv->plx_regbase + PLX9052_CNTRL);
-
- /* Download Xilinx firmware */
- file_length = (((unsigned int)data[0] & 0xff) << 24) +
- (((unsigned int)data[1] & 0xff) << 16) +
- (((unsigned int)data[2] & 0xff) << 8) +
- ((unsigned int)data[3] & 0xff);
- usleep_range(10, 1000);
-
- for (i = 0; i < file_length; i++) {
- outb(data[16 + i], xilinx_iobase);
- usleep_range(10, 1000);
-
- /* Check if BUSY flag is low */
- val = inl(devpriv->plx_regbase + PLX9052_CNTRL);
- if (val & PLX9052_CNTRL_UIO1_DATA) {
- dev_err(dev->class_dev,
- "Xilinx is still busy (i = %d)\n", i);
- return -EIO;
- }
- }
-
- /* If done flag is high download was successful */
- val = inl(devpriv->plx_regbase + PLX9052_CNTRL);
- if (!(val & PLX9052_CNTRL_UIO0_DATA)) {
- dev_err(dev->class_dev, "DONE flag is not set\n");
- dev_err(dev->class_dev, "Download not successful\n");
- return -EIO;
- }
-
- /* Set /CS and /WRITE */
- val = inl(devpriv->plx_regbase + PLX9052_CNTRL);
- val |= PLX9052_CNTRL_UIO2_DATA;
- outl(val, devpriv->plx_regbase + PLX9052_CNTRL);
-
- return 0;
-}
-
-static void me4000_ai_reset(struct comedi_device *dev)
-{
- unsigned int ctrl;
-
- /* Stop any running conversion */
- ctrl = inl(dev->iobase + ME4000_AI_CTRL_REG);
- ctrl |= ME4000_AI_CTRL_STOP | ME4000_AI_CTRL_IMMEDIATE_STOP;
- outl(ctrl, dev->iobase + ME4000_AI_CTRL_REG);
-
- /* Clear the control register */
- outl(0x0, dev->iobase + ME4000_AI_CTRL_REG);
-}
-
-static void me4000_reset(struct comedi_device *dev)
-{
- struct me4000_private *devpriv = dev->private;
- unsigned int val;
- int chan;
-
- /* Disable interrupts on the PLX */
- outl(0, devpriv->plx_regbase + PLX9052_INTCSR);
-
- /* Software reset the PLX */
- val = inl(devpriv->plx_regbase + PLX9052_CNTRL);
- val |= PLX9052_CNTRL_PCI_RESET;
- outl(val, devpriv->plx_regbase + PLX9052_CNTRL);
- val &= ~PLX9052_CNTRL_PCI_RESET;
- outl(val, devpriv->plx_regbase + PLX9052_CNTRL);
-
- /* 0x8000 to the DACs means an output voltage of 0V */
- for (chan = 0; chan < 4; chan++)
- outl(0x8000, dev->iobase + ME4000_AO_SINGLE_REG(chan));
-
- me4000_ai_reset(dev);
-
- /* Set both stop bits in the analog output control register */
- val = ME4000_AO_CTRL_IMMEDIATE_STOP | ME4000_AO_CTRL_STOP;
- for (chan = 0; chan < 4; chan++)
- outl(val, dev->iobase + ME4000_AO_CTRL_REG(chan));
-
- /* Set the adustment register for AO demux */
- outl(ME4000_AO_DEMUX_ADJUST_VALUE,
- dev->iobase + ME4000_AO_DEMUX_ADJUST_REG);
-
- /*
- * Set digital I/O direction for port 0
- * to output on isolated versions
- */
- if (!(inl(dev->iobase + ME4000_DIO_DIR_REG) & 0x1))
- outl(0x1, dev->iobase + ME4000_DIO_CTRL_REG);
-}
-
-static unsigned int me4000_ai_get_sample(struct comedi_device *dev,
- struct comedi_subdevice *s)
-{
- unsigned int val;
-
- /* read two's complement value and munge to offset binary */
- val = inl(dev->iobase + ME4000_AI_DATA_REG);
- return comedi_offset_munge(s, val);
-}
-
-static int me4000_ai_eoc(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned long context)
-{
- unsigned int status;
-
- status = inl(dev->iobase + ME4000_AI_STATUS_REG);
- if (status & ME4000_AI_STATUS_EF_DATA)
- return 0;
- return -EBUSY;
-}
-
-static int me4000_ai_insn_read(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- unsigned int chan = CR_CHAN(insn->chanspec);
- unsigned int range = CR_RANGE(insn->chanspec);
- unsigned int aref = CR_AREF(insn->chanspec);
- unsigned int entry;
- int ret = 0;
- int i;
-
- entry = chan | ME4000_AI_LIST_RANGE(range);
- if (aref == AREF_DIFF) {
- if (!(s->subdev_flags & SDF_DIFF)) {
- dev_err(dev->class_dev,
- "Differential inputs are not available\n");
- return -EINVAL;
- }
-
- if (!comedi_range_is_bipolar(s, range)) {
- dev_err(dev->class_dev,
- "Range must be bipolar when aref = diff\n");
- return -EINVAL;
- }
-
- if (chan >= (s->n_chan / 2)) {
- dev_err(dev->class_dev,
- "Analog input is not available\n");
- return -EINVAL;
- }
- entry |= ME4000_AI_LIST_INPUT_DIFFERENTIAL;
- }
-
- entry |= ME4000_AI_LIST_LAST_ENTRY;
-
- /* Enable channel list and data fifo for single acquisition mode */
- outl(ME4000_AI_CTRL_CHANNEL_FIFO | ME4000_AI_CTRL_DATA_FIFO,
- dev->iobase + ME4000_AI_CTRL_REG);
-
- /* Generate channel list entry */
- outl(entry, dev->iobase + ME4000_AI_CHANNEL_LIST_REG);
-
- /* Set the timer to maximum sample rate */
- outl(ME4000_AI_MIN_TICKS, dev->iobase + ME4000_AI_CHAN_TIMER_REG);
- outl(ME4000_AI_MIN_TICKS, dev->iobase + ME4000_AI_CHAN_PRE_TIMER_REG);
-
- for (i = 0; i < insn->n; i++) {
- unsigned int val;
-
- /* start conversion by dummy read */
- inl(dev->iobase + ME4000_AI_START_REG);
-
- ret = comedi_timeout(dev, s, insn, me4000_ai_eoc, 0);
- if (ret)
- break;
-
- val = me4000_ai_get_sample(dev, s);
- data[i] = comedi_offset_munge(s, val);
- }
-
- me4000_ai_reset(dev);
-
- return ret ? ret : insn->n;
-}
-
-static int me4000_ai_cancel(struct comedi_device *dev,
- struct comedi_subdevice *s)
-{
- me4000_ai_reset(dev);
-
- return 0;
-}
-
-static int me4000_ai_check_chanlist(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_cmd *cmd)
-{
- unsigned int aref0 = CR_AREF(cmd->chanlist[0]);
- int i;
-
- for (i = 0; i < cmd->chanlist_len; i++) {
- unsigned int chan = CR_CHAN(cmd->chanlist[i]);
- unsigned int range = CR_RANGE(cmd->chanlist[i]);
- unsigned int aref = CR_AREF(cmd->chanlist[i]);
-
- if (aref != aref0) {
- dev_dbg(dev->class_dev,
- "Mode is not equal for all entries\n");
- return -EINVAL;
- }
-
- if (aref == AREF_DIFF) {
- if (!(s->subdev_flags & SDF_DIFF)) {
- dev_err(dev->class_dev,
- "Differential inputs are not available\n");
- return -EINVAL;
- }
-
- if (chan >= (s->n_chan / 2)) {
- dev_dbg(dev->class_dev,
- "Channel number to high\n");
- return -EINVAL;
- }
-
- if (!comedi_range_is_bipolar(s, range)) {
- dev_dbg(dev->class_dev,
- "Bipolar is not selected in differential mode\n");
- return -EINVAL;
- }
- }
- }
-
- return 0;
-}
-
-static void me4000_ai_round_cmd_args(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_cmd *cmd)
-{
- struct me4000_private *devpriv = dev->private;
- int rest;
-
- devpriv->ai_init_ticks = 0;
- devpriv->ai_scan_ticks = 0;
- devpriv->ai_chan_ticks = 0;
-
- if (cmd->start_arg) {
- devpriv->ai_init_ticks = (cmd->start_arg * 33) / 1000;
- rest = (cmd->start_arg * 33) % 1000;
-
- if ((cmd->flags & CMDF_ROUND_MASK) == CMDF_ROUND_NEAREST) {
- if (rest > 33)
- devpriv->ai_init_ticks++;
- } else if ((cmd->flags & CMDF_ROUND_MASK) == CMDF_ROUND_UP) {
- if (rest)
- devpriv->ai_init_ticks++;
- }
- }
-
- if (cmd->scan_begin_arg) {
- devpriv->ai_scan_ticks = (cmd->scan_begin_arg * 33) / 1000;
- rest = (cmd->scan_begin_arg * 33) % 1000;
-
- if ((cmd->flags & CMDF_ROUND_MASK) == CMDF_ROUND_NEAREST) {
- if (rest > 33)
- devpriv->ai_scan_ticks++;
- } else if ((cmd->flags & CMDF_ROUND_MASK) == CMDF_ROUND_UP) {
- if (rest)
- devpriv->ai_scan_ticks++;
- }
- }
-
- if (cmd->convert_arg) {
- devpriv->ai_chan_ticks = (cmd->convert_arg * 33) / 1000;
- rest = (cmd->convert_arg * 33) % 1000;
-
- if ((cmd->flags & CMDF_ROUND_MASK) == CMDF_ROUND_NEAREST) {
- if (rest > 33)
- devpriv->ai_chan_ticks++;
- } else if ((cmd->flags & CMDF_ROUND_MASK) == CMDF_ROUND_UP) {
- if (rest)
- devpriv->ai_chan_ticks++;
- }
- }
-}
-
-static void me4000_ai_write_chanlist(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_cmd *cmd)
-{
- int i;
-
- for (i = 0; i < cmd->chanlist_len; i++) {
- unsigned int chan = CR_CHAN(cmd->chanlist[i]);
- unsigned int range = CR_RANGE(cmd->chanlist[i]);
- unsigned int aref = CR_AREF(cmd->chanlist[i]);
- unsigned int entry;
-
- entry = chan | ME4000_AI_LIST_RANGE(range);
-
- if (aref == AREF_DIFF)
- entry |= ME4000_AI_LIST_INPUT_DIFFERENTIAL;
-
- if (i == (cmd->chanlist_len - 1))
- entry |= ME4000_AI_LIST_LAST_ENTRY;
-
- outl(entry, dev->iobase + ME4000_AI_CHANNEL_LIST_REG);
- }
-}
-
-static int me4000_ai_do_cmd(struct comedi_device *dev,
- struct comedi_subdevice *s)
-{
- struct me4000_private *devpriv = dev->private;
- struct comedi_cmd *cmd = &s->async->cmd;
- unsigned int ctrl;
-
- /* Write timer arguments */
- outl(devpriv->ai_init_ticks - 1,
- dev->iobase + ME4000_AI_SCAN_PRE_TIMER_LOW_REG);
- outl(0x0, dev->iobase + ME4000_AI_SCAN_PRE_TIMER_HIGH_REG);
-
- if (devpriv->ai_scan_ticks) {
- outl(devpriv->ai_scan_ticks - 1,
- dev->iobase + ME4000_AI_SCAN_TIMER_LOW_REG);
- outl(0x0, dev->iobase + ME4000_AI_SCAN_TIMER_HIGH_REG);
- }
-
- outl(devpriv->ai_chan_ticks - 1,
- dev->iobase + ME4000_AI_CHAN_PRE_TIMER_REG);
- outl(devpriv->ai_chan_ticks - 1,
- dev->iobase + ME4000_AI_CHAN_TIMER_REG);
-
- /* Start sources */
- ctrl = devpriv->ai_ctrl_mode |
- ME4000_AI_CTRL_CHANNEL_FIFO |
- ME4000_AI_CTRL_DATA_FIFO;
-
- /* Stop triggers */
- if (cmd->stop_src == TRIG_COUNT) {
- outl(cmd->chanlist_len * cmd->stop_arg,
- dev->iobase + ME4000_AI_SAMPLE_COUNTER_REG);
- ctrl |= ME4000_AI_CTRL_SC_IRQ;
- } else if (cmd->stop_src == TRIG_NONE &&
- cmd->scan_end_src == TRIG_COUNT) {
- outl(cmd->scan_end_arg,
- dev->iobase + ME4000_AI_SAMPLE_COUNTER_REG);
- ctrl |= ME4000_AI_CTRL_SC_IRQ;
- }
- ctrl |= ME4000_AI_CTRL_HF_IRQ;
-
- /* Write the setup to the control register */
- outl(ctrl, dev->iobase + ME4000_AI_CTRL_REG);
-
- /* Write the channel list */
- me4000_ai_write_chanlist(dev, s, cmd);
-
- /* Start acquistion by dummy read */
- inl(dev->iobase + ME4000_AI_START_REG);
-
- return 0;
-}
-
-static int me4000_ai_do_cmd_test(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_cmd *cmd)
-{
- struct me4000_private *devpriv = dev->private;
- int err = 0;
-
- /* Step 1 : check if triggers are trivially valid */
-
- err |= comedi_check_trigger_src(&cmd->start_src, TRIG_NOW | TRIG_EXT);
- err |= comedi_check_trigger_src(&cmd->scan_begin_src,
- TRIG_FOLLOW | TRIG_TIMER | TRIG_EXT);
- err |= comedi_check_trigger_src(&cmd->convert_src,
- TRIG_TIMER | TRIG_EXT);
- err |= comedi_check_trigger_src(&cmd->scan_end_src,
- TRIG_NONE | TRIG_COUNT);
- err |= comedi_check_trigger_src(&cmd->stop_src, TRIG_NONE | TRIG_COUNT);
-
- if (err)
- return 1;
-
- /* Step 2a : make sure trigger sources are unique */
-
- err |= comedi_check_trigger_is_unique(cmd->start_src);
- err |= comedi_check_trigger_is_unique(cmd->scan_begin_src);
- err |= comedi_check_trigger_is_unique(cmd->convert_src);
- err |= comedi_check_trigger_is_unique(cmd->scan_end_src);
- err |= comedi_check_trigger_is_unique(cmd->stop_src);
-
- /* Step 2b : and mutually compatible */
-
- if (cmd->start_src == TRIG_NOW &&
- cmd->scan_begin_src == TRIG_TIMER &&
- cmd->convert_src == TRIG_TIMER) {
- devpriv->ai_ctrl_mode = ME4000_AI_CTRL_MODE_0;
- } else if (cmd->start_src == TRIG_NOW &&
- cmd->scan_begin_src == TRIG_FOLLOW &&
- cmd->convert_src == TRIG_TIMER) {
- devpriv->ai_ctrl_mode = ME4000_AI_CTRL_MODE_0;
- } else if (cmd->start_src == TRIG_EXT &&
- cmd->scan_begin_src == TRIG_TIMER &&
- cmd->convert_src == TRIG_TIMER) {
- devpriv->ai_ctrl_mode = ME4000_AI_CTRL_MODE_1;
- } else if (cmd->start_src == TRIG_EXT &&
- cmd->scan_begin_src == TRIG_FOLLOW &&
- cmd->convert_src == TRIG_TIMER) {
- devpriv->ai_ctrl_mode = ME4000_AI_CTRL_MODE_1;
- } else if (cmd->start_src == TRIG_EXT &&
- cmd->scan_begin_src == TRIG_EXT &&
- cmd->convert_src == TRIG_TIMER) {
- devpriv->ai_ctrl_mode = ME4000_AI_CTRL_MODE_2;
- } else if (cmd->start_src == TRIG_EXT &&
- cmd->scan_begin_src == TRIG_EXT &&
- cmd->convert_src == TRIG_EXT) {
- devpriv->ai_ctrl_mode = ME4000_AI_CTRL_MODE_0 |
- ME4000_AI_CTRL_MODE_1;
- } else {
- err |= -EINVAL;
- }
-
- if (err)
- return 2;
-
- /* Step 3: check if arguments are trivially valid */
-
- err |= comedi_check_trigger_arg_is(&cmd->start_arg, 0);
-
- if (cmd->chanlist_len < 1) {
- cmd->chanlist_len = 1;
- err |= -EINVAL;
- }
-
- /* Round the timer arguments */
- me4000_ai_round_cmd_args(dev, s, cmd);
-
- if (devpriv->ai_init_ticks < 66) {
- cmd->start_arg = 2000;
- err |= -EINVAL;
- }
- if (devpriv->ai_scan_ticks && devpriv->ai_scan_ticks < 67) {
- cmd->scan_begin_arg = 2031;
- err |= -EINVAL;
- }
- if (devpriv->ai_chan_ticks < 66) {
- cmd->convert_arg = 2000;
- err |= -EINVAL;
- }
-
- if (cmd->stop_src == TRIG_COUNT)
- err |= comedi_check_trigger_arg_min(&cmd->stop_arg, 1);
- else /* TRIG_NONE */
- err |= comedi_check_trigger_arg_is(&cmd->stop_arg, 0);
-
- if (err)
- return 3;
-
- /*
- * Stage 4. Check for argument conflicts.
- */
- if (cmd->start_src == TRIG_NOW &&
- cmd->scan_begin_src == TRIG_TIMER &&
- cmd->convert_src == TRIG_TIMER) {
- /* Check timer arguments */
- if (devpriv->ai_init_ticks < ME4000_AI_MIN_TICKS) {
- dev_err(dev->class_dev, "Invalid start arg\n");
- cmd->start_arg = 2000; /* 66 ticks at least */
- err++;
- }
- if (devpriv->ai_chan_ticks < ME4000_AI_MIN_TICKS) {
- dev_err(dev->class_dev, "Invalid convert arg\n");
- cmd->convert_arg = 2000; /* 66 ticks at least */
- err++;
- }
- if (devpriv->ai_scan_ticks <=
- cmd->chanlist_len * devpriv->ai_chan_ticks) {
- dev_err(dev->class_dev, "Invalid scan end arg\n");
-
- /* At least one tick more */
- cmd->scan_end_arg = 2000 * cmd->chanlist_len + 31;
- err++;
- }
- } else if (cmd->start_src == TRIG_NOW &&
- cmd->scan_begin_src == TRIG_FOLLOW &&
- cmd->convert_src == TRIG_TIMER) {
- /* Check timer arguments */
- if (devpriv->ai_init_ticks < ME4000_AI_MIN_TICKS) {
- dev_err(dev->class_dev, "Invalid start arg\n");
- cmd->start_arg = 2000; /* 66 ticks at least */
- err++;
- }
- if (devpriv->ai_chan_ticks < ME4000_AI_MIN_TICKS) {
- dev_err(dev->class_dev, "Invalid convert arg\n");
- cmd->convert_arg = 2000; /* 66 ticks at least */
- err++;
- }
- } else if (cmd->start_src == TRIG_EXT &&
- cmd->scan_begin_src == TRIG_TIMER &&
- cmd->convert_src == TRIG_TIMER) {
- /* Check timer arguments */
- if (devpriv->ai_init_ticks < ME4000_AI_MIN_TICKS) {
- dev_err(dev->class_dev, "Invalid start arg\n");
- cmd->start_arg = 2000; /* 66 ticks at least */
- err++;
- }
- if (devpriv->ai_chan_ticks < ME4000_AI_MIN_TICKS) {
- dev_err(dev->class_dev, "Invalid convert arg\n");
- cmd->convert_arg = 2000; /* 66 ticks at least */
- err++;
- }
- if (devpriv->ai_scan_ticks <=
- cmd->chanlist_len * devpriv->ai_chan_ticks) {
- dev_err(dev->class_dev, "Invalid scan end arg\n");
-
- /* At least one tick more */
- cmd->scan_end_arg = 2000 * cmd->chanlist_len + 31;
- err++;
- }
- } else if (cmd->start_src == TRIG_EXT &&
- cmd->scan_begin_src == TRIG_FOLLOW &&
- cmd->convert_src == TRIG_TIMER) {
- /* Check timer arguments */
- if (devpriv->ai_init_ticks < ME4000_AI_MIN_TICKS) {
- dev_err(dev->class_dev, "Invalid start arg\n");
- cmd->start_arg = 2000; /* 66 ticks at least */
- err++;
- }
- if (devpriv->ai_chan_ticks < ME4000_AI_MIN_TICKS) {
- dev_err(dev->class_dev, "Invalid convert arg\n");
- cmd->convert_arg = 2000; /* 66 ticks at least */
- err++;
- }
- } else if (cmd->start_src == TRIG_EXT &&
- cmd->scan_begin_src == TRIG_EXT &&
- cmd->convert_src == TRIG_TIMER) {
- /* Check timer arguments */
- if (devpriv->ai_init_ticks < ME4000_AI_MIN_TICKS) {
- dev_err(dev->class_dev, "Invalid start arg\n");
- cmd->start_arg = 2000; /* 66 ticks at least */
- err++;
- }
- if (devpriv->ai_chan_ticks < ME4000_AI_MIN_TICKS) {
- dev_err(dev->class_dev, "Invalid convert arg\n");
- cmd->convert_arg = 2000; /* 66 ticks at least */
- err++;
- }
- } else if (cmd->start_src == TRIG_EXT &&
- cmd->scan_begin_src == TRIG_EXT &&
- cmd->convert_src == TRIG_EXT) {
- /* Check timer arguments */
- if (devpriv->ai_init_ticks < ME4000_AI_MIN_TICKS) {
- dev_err(dev->class_dev, "Invalid start arg\n");
- cmd->start_arg = 2000; /* 66 ticks at least */
- err++;
- }
- }
- if (cmd->scan_end_src == TRIG_COUNT) {
- if (cmd->scan_end_arg == 0) {
- dev_err(dev->class_dev, "Invalid scan end arg\n");
- cmd->scan_end_arg = 1;
- err++;
- }
- }
-
- if (err)
- return 4;
-
- /* Step 5: check channel list if it exists */
- if (cmd->chanlist && cmd->chanlist_len > 0)
- err |= me4000_ai_check_chanlist(dev, s, cmd);
-
- if (err)
- return 5;
-
- return 0;
-}
-
-static irqreturn_t me4000_ai_isr(int irq, void *dev_id)
-{
- unsigned int tmp;
- struct comedi_device *dev = dev_id;
- struct comedi_subdevice *s = dev->read_subdev;
- int i;
- int c = 0;
- unsigned int lval;
-
- if (!dev->attached)
- return IRQ_NONE;
-
- if (inl(dev->iobase + ME4000_IRQ_STATUS_REG) &
- ME4000_IRQ_STATUS_AI_HF) {
- /* Read status register to find out what happened */
- tmp = inl(dev->iobase + ME4000_AI_STATUS_REG);
-
- if (!(tmp & ME4000_AI_STATUS_FF_DATA) &&
- !(tmp & ME4000_AI_STATUS_HF_DATA) &&
- (tmp & ME4000_AI_STATUS_EF_DATA)) {
- dev_err(dev->class_dev, "FIFO overflow\n");
- s->async->events |= COMEDI_CB_ERROR;
- c = ME4000_AI_FIFO_COUNT;
- } else if ((tmp & ME4000_AI_STATUS_FF_DATA) &&
- !(tmp & ME4000_AI_STATUS_HF_DATA) &&
- (tmp & ME4000_AI_STATUS_EF_DATA)) {
- c = ME4000_AI_FIFO_COUNT / 2;
- } else {
- dev_err(dev->class_dev, "Undefined FIFO state\n");
- s->async->events |= COMEDI_CB_ERROR;
- c = 0;
- }
-
- for (i = 0; i < c; i++) {
- lval = me4000_ai_get_sample(dev, s);
- if (!comedi_buf_write_samples(s, &lval, 1))
- break;
- }
-
- /* Work is done, so reset the interrupt */
- tmp |= ME4000_AI_CTRL_HF_IRQ_RESET;
- outl(tmp, dev->iobase + ME4000_AI_CTRL_REG);
- tmp &= ~ME4000_AI_CTRL_HF_IRQ_RESET;
- outl(tmp, dev->iobase + ME4000_AI_CTRL_REG);
- }
-
- if (inl(dev->iobase + ME4000_IRQ_STATUS_REG) &
- ME4000_IRQ_STATUS_SC) {
- /* Acquisition is complete */
- s->async->events |= COMEDI_CB_EOA;
-
- /* Poll data until fifo empty */
- while (inl(dev->iobase + ME4000_AI_STATUS_REG) &
- ME4000_AI_STATUS_EF_DATA) {
- lval = me4000_ai_get_sample(dev, s);
- if (!comedi_buf_write_samples(s, &lval, 1))
- break;
- }
-
- /* Work is done, so reset the interrupt */
- tmp = inl(dev->iobase + ME4000_AI_CTRL_REG);
- tmp |= ME4000_AI_CTRL_SC_IRQ_RESET;
- outl(tmp, dev->iobase + ME4000_AI_CTRL_REG);
- tmp &= ~ME4000_AI_CTRL_SC_IRQ_RESET;
- outl(tmp, dev->iobase + ME4000_AI_CTRL_REG);
- }
-
- comedi_handle_events(dev, s);
-
- return IRQ_HANDLED;
-}
-
-static int me4000_ao_insn_write(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- unsigned int chan = CR_CHAN(insn->chanspec);
- unsigned int tmp;
-
- /* Stop any running conversion */
- tmp = inl(dev->iobase + ME4000_AO_CTRL_REG(chan));
- tmp |= ME4000_AO_CTRL_IMMEDIATE_STOP;
- outl(tmp, dev->iobase + ME4000_AO_CTRL_REG(chan));
-
- /* Clear control register and set to single mode */
- outl(0x0, dev->iobase + ME4000_AO_CTRL_REG(chan));
-
- /* Write data value */
- outl(data[0], dev->iobase + ME4000_AO_SINGLE_REG(chan));
-
- /* Store in the mirror */
- s->readback[chan] = data[0];
-
- return 1;
-}
-
-static int me4000_dio_insn_bits(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- if (comedi_dio_update_state(s, data)) {
- outl((s->state >> 0) & 0xFF,
- dev->iobase + ME4000_DIO_PORT_0_REG);
- outl((s->state >> 8) & 0xFF,
- dev->iobase + ME4000_DIO_PORT_1_REG);
- outl((s->state >> 16) & 0xFF,
- dev->iobase + ME4000_DIO_PORT_2_REG);
- outl((s->state >> 24) & 0xFF,
- dev->iobase + ME4000_DIO_PORT_3_REG);
- }
-
- data[1] = ((inl(dev->iobase + ME4000_DIO_PORT_0_REG) & 0xFF) << 0) |
- ((inl(dev->iobase + ME4000_DIO_PORT_1_REG) & 0xFF) << 8) |
- ((inl(dev->iobase + ME4000_DIO_PORT_2_REG) & 0xFF) << 16) |
- ((inl(dev->iobase + ME4000_DIO_PORT_3_REG) & 0xFF) << 24);
-
- return insn->n;
-}
-
-static int me4000_dio_insn_config(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- unsigned int chan = CR_CHAN(insn->chanspec);
- unsigned int mask;
- unsigned int tmp;
- int ret;
-
- if (chan < 8)
- mask = 0x000000ff;
- else if (chan < 16)
- mask = 0x0000ff00;
- else if (chan < 24)
- mask = 0x00ff0000;
- else
- mask = 0xff000000;
-
- ret = comedi_dio_insn_config(dev, s, insn, data, mask);
- if (ret)
- return ret;
-
- tmp = inl(dev->iobase + ME4000_DIO_CTRL_REG);
- tmp &= ~(ME4000_DIO_CTRL_MODE_0 | ME4000_DIO_CTRL_MODE_1 |
- ME4000_DIO_CTRL_MODE_2 | ME4000_DIO_CTRL_MODE_3 |
- ME4000_DIO_CTRL_MODE_4 | ME4000_DIO_CTRL_MODE_5 |
- ME4000_DIO_CTRL_MODE_6 | ME4000_DIO_CTRL_MODE_7);
- if (s->io_bits & 0x000000ff)
- tmp |= ME4000_DIO_CTRL_MODE_0;
- if (s->io_bits & 0x0000ff00)
- tmp |= ME4000_DIO_CTRL_MODE_2;
- if (s->io_bits & 0x00ff0000)
- tmp |= ME4000_DIO_CTRL_MODE_4;
- if (s->io_bits & 0xff000000)
- tmp |= ME4000_DIO_CTRL_MODE_6;
-
- /*
- * Check for optoisolated ME-4000 version.
- * If one the first port is a fixed output
- * port and the second is a fixed input port.
- */
- if (inl(dev->iobase + ME4000_DIO_DIR_REG)) {
- s->io_bits |= 0x000000ff;
- s->io_bits &= ~0x0000ff00;
- tmp |= ME4000_DIO_CTRL_MODE_0;
- tmp &= ~(ME4000_DIO_CTRL_MODE_2 | ME4000_DIO_CTRL_MODE_3);
- }
-
- outl(tmp, dev->iobase + ME4000_DIO_CTRL_REG);
-
- return insn->n;
-}
-
-static int me4000_auto_attach(struct comedi_device *dev,
- unsigned long context)
-{
- struct pci_dev *pcidev = comedi_to_pci_dev(dev);
- const struct me4000_board *board = NULL;
- struct me4000_private *devpriv;
- struct comedi_subdevice *s;
- int result;
-
- if (context < ARRAY_SIZE(me4000_boards))
- board = &me4000_boards[context];
- if (!board)
- return -ENODEV;
- dev->board_ptr = board;
- dev->board_name = board->name;
-
- devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
- if (!devpriv)
- return -ENOMEM;
-
- result = comedi_pci_enable(dev);
- if (result)
- return result;
-
- devpriv->plx_regbase = pci_resource_start(pcidev, 1);
- dev->iobase = pci_resource_start(pcidev, 2);
- if (!devpriv->plx_regbase || !dev->iobase)
- return -ENODEV;
-
- result = comedi_load_firmware(dev, &pcidev->dev, ME4000_FIRMWARE,
- me4000_xilinx_download, 0);
- if (result < 0)
- return result;
-
- me4000_reset(dev);
-
- if (pcidev->irq > 0) {
- result = request_irq(pcidev->irq, me4000_ai_isr, IRQF_SHARED,
- dev->board_name, dev);
- if (result == 0) {
- dev->irq = pcidev->irq;
-
- /* Enable interrupts on the PLX */
- outl(PLX9052_INTCSR_LI1ENAB | PLX9052_INTCSR_LI1POL |
- PLX9052_INTCSR_PCIENAB,
- devpriv->plx_regbase + PLX9052_INTCSR);
- }
- }
-
- result = comedi_alloc_subdevices(dev, 4);
- if (result)
- return result;
-
- /* Analog Input subdevice */
- s = &dev->subdevices[0];
- s->type = COMEDI_SUBD_AI;
- s->subdev_flags = SDF_READABLE | SDF_COMMON | SDF_GROUND;
- if (board->can_do_diff_ai)
- s->subdev_flags |= SDF_DIFF;
- s->n_chan = board->ai_nchan;
- s->maxdata = 0xffff;
- s->len_chanlist = ME4000_AI_CHANNEL_LIST_COUNT;
- s->range_table = &me4000_ai_range;
- s->insn_read = me4000_ai_insn_read;
-
- if (dev->irq) {
- dev->read_subdev = s;
- s->subdev_flags |= SDF_CMD_READ;
- s->cancel = me4000_ai_cancel;
- s->do_cmdtest = me4000_ai_do_cmd_test;
- s->do_cmd = me4000_ai_do_cmd;
- }
-
- /* Analog Output subdevice */
- s = &dev->subdevices[1];
- if (board->has_ao) {
- s->type = COMEDI_SUBD_AO;
- s->subdev_flags = SDF_WRITABLE | SDF_COMMON | SDF_GROUND;
- s->n_chan = 4;
- s->maxdata = 0xffff;
- s->range_table = &range_bipolar10;
- s->insn_write = me4000_ao_insn_write;
-
- result = comedi_alloc_subdev_readback(s);
- if (result)
- return result;
- } else {
- s->type = COMEDI_SUBD_UNUSED;
- }
-
- /* Digital I/O subdevice */
- s = &dev->subdevices[2];
- s->type = COMEDI_SUBD_DIO;
- s->subdev_flags = SDF_READABLE | SDF_WRITABLE;
- s->n_chan = 32;
- s->maxdata = 1;
- s->range_table = &range_digital;
- s->insn_bits = me4000_dio_insn_bits;
- s->insn_config = me4000_dio_insn_config;
-
- /*
- * Check for optoisolated ME-4000 version. If one the first
- * port is a fixed output port and the second is a fixed input port.
- */
- if (!inl(dev->iobase + ME4000_DIO_DIR_REG)) {
- s->io_bits |= 0xFF;
- outl(ME4000_DIO_CTRL_MODE_0,
- dev->iobase + ME4000_DIO_DIR_REG);
- }
-
- /* Counter subdevice (8254) */
- s = &dev->subdevices[3];
- if (board->has_counter) {
- unsigned long timer_base = pci_resource_start(pcidev, 3);
-
- if (!timer_base)
- return -ENODEV;
-
- dev->pacer = comedi_8254_init(timer_base, 0, I8254_IO8, 0);
- if (!dev->pacer)
- return -ENOMEM;
-
- comedi_8254_subdevice_init(s, dev->pacer);
- } else {
- s->type = COMEDI_SUBD_UNUSED;
- }
-
- return 0;
-}
-
-static void me4000_detach(struct comedi_device *dev)
-{
- if (dev->irq) {
- struct me4000_private *devpriv = dev->private;
-
- /* Disable interrupts on the PLX */
- outl(0, devpriv->plx_regbase + PLX9052_INTCSR);
- }
- comedi_pci_detach(dev);
-}
-
-static struct comedi_driver me4000_driver = {
- .driver_name = "me4000",
- .module = THIS_MODULE,
- .auto_attach = me4000_auto_attach,
- .detach = me4000_detach,
-};
-
-static int me4000_pci_probe(struct pci_dev *dev,
- const struct pci_device_id *id)
-{
- return comedi_pci_auto_config(dev, &me4000_driver, id->driver_data);
-}
-
-static const struct pci_device_id me4000_pci_table[] = {
- { PCI_VDEVICE(MEILHAUS, 0x4650), BOARD_ME4650 },
- { PCI_VDEVICE(MEILHAUS, 0x4660), BOARD_ME4660 },
- { PCI_VDEVICE(MEILHAUS, 0x4661), BOARD_ME4660I },
- { PCI_VDEVICE(MEILHAUS, 0x4662), BOARD_ME4660S },
- { PCI_VDEVICE(MEILHAUS, 0x4663), BOARD_ME4660IS },
- { PCI_VDEVICE(MEILHAUS, 0x4670), BOARD_ME4670 },
- { PCI_VDEVICE(MEILHAUS, 0x4671), BOARD_ME4670I },
- { PCI_VDEVICE(MEILHAUS, 0x4672), BOARD_ME4670S },
- { PCI_VDEVICE(MEILHAUS, 0x4673), BOARD_ME4670IS },
- { PCI_VDEVICE(MEILHAUS, 0x4680), BOARD_ME4680 },
- { PCI_VDEVICE(MEILHAUS, 0x4681), BOARD_ME4680I },
- { PCI_VDEVICE(MEILHAUS, 0x4682), BOARD_ME4680S },
- { PCI_VDEVICE(MEILHAUS, 0x4683), BOARD_ME4680IS },
- { 0 }
-};
-MODULE_DEVICE_TABLE(pci, me4000_pci_table);
-
-static struct pci_driver me4000_pci_driver = {
- .name = "me4000",
- .id_table = me4000_pci_table,
- .probe = me4000_pci_probe,
- .remove = comedi_pci_auto_unconfig,
-};
-module_comedi_pci_driver(me4000_driver, me4000_pci_driver);
-
-MODULE_AUTHOR("Comedi http://www.comedi.org");
-MODULE_DESCRIPTION("Comedi driver for Meilhaus ME-4000 series boards");
-MODULE_LICENSE("GPL");
-MODULE_FIRMWARE(ME4000_FIRMWARE);
diff --git a/drivers/staging/comedi/drivers/me_daq.c b/drivers/staging/comedi/drivers/me_daq.c
deleted file mode 100644
index 9ea1ba4b1b6f..000000000000
--- a/drivers/staging/comedi/drivers/me_daq.c
+++ /dev/null
@@ -1,582 +0,0 @@
-/*
- * comedi/drivers/me_daq.c
- * Hardware driver for Meilhaus data acquisition cards:
- * ME-2000i, ME-2600i, ME-3000vm1
- *
- * Copyright (C) 2002 Michael Hillmann <hillmann@syscongroup.de>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-/*
- * Driver: me_daq
- * Description: Meilhaus PCI data acquisition cards
- * Devices: [Meilhaus] ME-2600i (me-2600i), ME-2000i (me-2000i)
- * Author: Michael Hillmann <hillmann@syscongroup.de>
- * Status: experimental
- *
- * Configuration options: not applicable, uses PCI auto config
- *
- * Supports:
- * Analog Input, Analog Output, Digital I/O
- */
-
-#include <linux/module.h>
-#include <linux/interrupt.h>
-#include <linux/sched.h>
-
-#include "../comedi_pci.h"
-
-#include "plx9052.h"
-
-#define ME2600_FIRMWARE "me2600_firmware.bin"
-
-#define XILINX_DOWNLOAD_RESET 0x42 /* Xilinx registers */
-
-#define ME_CONTROL_1 0x0000 /* - | W */
-#define INTERRUPT_ENABLE (1<<15)
-#define COUNTER_B_IRQ (1<<12)
-#define COUNTER_A_IRQ (1<<11)
-#define CHANLIST_READY_IRQ (1<<10)
-#define EXT_IRQ (1<<9)
-#define ADFIFO_HALFFULL_IRQ (1<<8)
-#define SCAN_COUNT_ENABLE (1<<5)
-#define SIMULTANEOUS_ENABLE (1<<4)
-#define TRIGGER_FALLING_EDGE (1<<3)
-#define CONTINUOUS_MODE (1<<2)
-#define DISABLE_ADC (0<<0)
-#define SOFTWARE_TRIGGERED_ADC (1<<0)
-#define SCAN_TRIGGERED_ADC (2<<0)
-#define EXT_TRIGGERED_ADC (3<<0)
-#define ME_ADC_START 0x0000 /* R | - */
-#define ME_CONTROL_2 0x0002 /* - | W */
-#define ENABLE_ADFIFO (1<<10)
-#define ENABLE_CHANLIST (1<<9)
-#define ENABLE_PORT_B (1<<7)
-#define ENABLE_PORT_A (1<<6)
-#define ENABLE_COUNTER_B (1<<4)
-#define ENABLE_COUNTER_A (1<<3)
-#define ENABLE_DAC (1<<1)
-#define BUFFERED_DAC (1<<0)
-#define ME_DAC_UPDATE 0x0002 /* R | - */
-#define ME_STATUS 0x0004 /* R | - */
-#define COUNTER_B_IRQ_PENDING (1<<12)
-#define COUNTER_A_IRQ_PENDING (1<<11)
-#define CHANLIST_READY_IRQ_PENDING (1<<10)
-#define EXT_IRQ_PENDING (1<<9)
-#define ADFIFO_HALFFULL_IRQ_PENDING (1<<8)
-#define ADFIFO_FULL (1<<4)
-#define ADFIFO_HALFFULL (1<<3)
-#define ADFIFO_EMPTY (1<<2)
-#define CHANLIST_FULL (1<<1)
-#define FST_ACTIVE (1<<0)
-#define ME_RESET_INTERRUPT 0x0004 /* - | W */
-#define ME_DIO_PORT_A 0x0006 /* R | W */
-#define ME_DIO_PORT_B 0x0008 /* R | W */
-#define ME_TIMER_DATA_0 0x000A /* - | W */
-#define ME_TIMER_DATA_1 0x000C /* - | W */
-#define ME_TIMER_DATA_2 0x000E /* - | W */
-#define ME_CHANNEL_LIST 0x0010 /* - | W */
-#define ADC_UNIPOLAR (1<<6)
-#define ADC_GAIN_0 (0<<4)
-#define ADC_GAIN_1 (1<<4)
-#define ADC_GAIN_2 (2<<4)
-#define ADC_GAIN_3 (3<<4)
-#define ME_READ_AD_FIFO 0x0010 /* R | - */
-#define ME_DAC_CONTROL 0x0012 /* - | W */
-#define DAC_UNIPOLAR_D (0<<4)
-#define DAC_BIPOLAR_D (1<<4)
-#define DAC_UNIPOLAR_C (0<<5)
-#define DAC_BIPOLAR_C (1<<5)
-#define DAC_UNIPOLAR_B (0<<6)
-#define DAC_BIPOLAR_B (1<<6)
-#define DAC_UNIPOLAR_A (0<<7)
-#define DAC_BIPOLAR_A (1<<7)
-#define DAC_GAIN_0_D (0<<8)
-#define DAC_GAIN_1_D (1<<8)
-#define DAC_GAIN_0_C (0<<9)
-#define DAC_GAIN_1_C (1<<9)
-#define DAC_GAIN_0_B (0<<10)
-#define DAC_GAIN_1_B (1<<10)
-#define DAC_GAIN_0_A (0<<11)
-#define DAC_GAIN_1_A (1<<11)
-#define ME_DAC_CONTROL_UPDATE 0x0012 /* R | - */
-#define ME_DAC_DATA_A 0x0014 /* - | W */
-#define ME_DAC_DATA_B 0x0016 /* - | W */
-#define ME_DAC_DATA_C 0x0018 /* - | W */
-#define ME_DAC_DATA_D 0x001A /* - | W */
-#define ME_COUNTER_ENDDATA_A 0x001C /* - | W */
-#define ME_COUNTER_ENDDATA_B 0x001E /* - | W */
-#define ME_COUNTER_STARTDATA_A 0x0020 /* - | W */
-#define ME_COUNTER_VALUE_A 0x0020 /* R | - */
-#define ME_COUNTER_STARTDATA_B 0x0022 /* - | W */
-#define ME_COUNTER_VALUE_B 0x0022 /* R | - */
-
-static const struct comedi_lrange me_ai_range = {
- 8, {
- BIP_RANGE(10),
- BIP_RANGE(5),
- BIP_RANGE(2.5),
- BIP_RANGE(1.25),
- UNI_RANGE(10),
- UNI_RANGE(5),
- UNI_RANGE(2.5),
- UNI_RANGE(1.25)
- }
-};
-
-static const struct comedi_lrange me_ao_range = {
- 3, {
- BIP_RANGE(10),
- BIP_RANGE(5),
- UNI_RANGE(10)
- }
-};
-
-enum me_boardid {
- BOARD_ME2600,
- BOARD_ME2000,
-};
-
-struct me_board {
- const char *name;
- int needs_firmware;
- int has_ao;
-};
-
-static const struct me_board me_boards[] = {
- [BOARD_ME2600] = {
- .name = "me-2600i",
- .needs_firmware = 1,
- .has_ao = 1,
- },
- [BOARD_ME2000] = {
- .name = "me-2000i",
- },
-};
-
-struct me_private_data {
- void __iomem *plx_regbase; /* PLX configuration base address */
-
- unsigned short control_1; /* Mirror of CONTROL_1 register */
- unsigned short control_2; /* Mirror of CONTROL_2 register */
- unsigned short dac_control; /* Mirror of the DAC_CONTROL register */
-};
-
-static inline void sleep(unsigned sec)
-{
- schedule_timeout_interruptible(sec * HZ);
-}
-
-static int me_dio_insn_config(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct me_private_data *devpriv = dev->private;
- unsigned int chan = CR_CHAN(insn->chanspec);
- unsigned int mask;
- int ret;
-
- if (chan < 16)
- mask = 0x0000ffff;
- else
- mask = 0xffff0000;
-
- ret = comedi_dio_insn_config(dev, s, insn, data, mask);
- if (ret)
- return ret;
-
- if (s->io_bits & 0x0000ffff)
- devpriv->control_2 |= ENABLE_PORT_A;
- else
- devpriv->control_2 &= ~ENABLE_PORT_A;
- if (s->io_bits & 0xffff0000)
- devpriv->control_2 |= ENABLE_PORT_B;
- else
- devpriv->control_2 &= ~ENABLE_PORT_B;
-
- writew(devpriv->control_2, dev->mmio + ME_CONTROL_2);
-
- return insn->n;
-}
-
-static int me_dio_insn_bits(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- void __iomem *mmio_porta = dev->mmio + ME_DIO_PORT_A;
- void __iomem *mmio_portb = dev->mmio + ME_DIO_PORT_B;
- unsigned int mask;
- unsigned int val;
-
- mask = comedi_dio_update_state(s, data);
- if (mask) {
- if (mask & 0x0000ffff)
- writew((s->state & 0xffff), mmio_porta);
- if (mask & 0xffff0000)
- writew(((s->state >> 16) & 0xffff), mmio_portb);
- }
-
- if (s->io_bits & 0x0000ffff)
- val = s->state & 0xffff;
- else
- val = readw(mmio_porta);
-
- if (s->io_bits & 0xffff0000)
- val |= (s->state & 0xffff0000);
- else
- val |= (readw(mmio_portb) << 16);
-
- data[1] = val;
-
- return insn->n;
-}
-
-static int me_ai_eoc(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned long context)
-{
- unsigned int status;
-
- status = readw(dev->mmio + ME_STATUS);
- if ((status & 0x0004) == 0)
- return 0;
- return -EBUSY;
-}
-
-static int me_ai_insn_read(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct me_private_data *dev_private = dev->private;
- unsigned int chan = CR_CHAN(insn->chanspec);
- unsigned int rang = CR_RANGE(insn->chanspec);
- unsigned int aref = CR_AREF(insn->chanspec);
- unsigned short val;
- int ret;
-
- /* stop any running conversion */
- dev_private->control_1 &= 0xFFFC;
- writew(dev_private->control_1, dev->mmio + ME_CONTROL_1);
-
- /* clear chanlist and ad fifo */
- dev_private->control_2 &= ~(ENABLE_ADFIFO | ENABLE_CHANLIST);
- writew(dev_private->control_2, dev->mmio + ME_CONTROL_2);
-
- /* reset any pending interrupt */
- writew(0x00, dev->mmio + ME_RESET_INTERRUPT);
-
- /* enable the chanlist and ADC fifo */
- dev_private->control_2 |= (ENABLE_ADFIFO | ENABLE_CHANLIST);
- writew(dev_private->control_2, dev->mmio + ME_CONTROL_2);
-
- /* write to channel list fifo */
- val = chan & 0x0f; /* b3:b0 channel */
- val |= (rang & 0x03) << 4; /* b5:b4 gain */
- val |= (rang & 0x04) << 4; /* b6 polarity */
- val |= ((aref & AREF_DIFF) ? 0x80 : 0); /* b7 differential */
- writew(val & 0xff, dev->mmio + ME_CHANNEL_LIST);
-
- /* set ADC mode to software trigger */
- dev_private->control_1 |= SOFTWARE_TRIGGERED_ADC;
- writew(dev_private->control_1, dev->mmio + ME_CONTROL_1);
-
- /* start conversion by reading from ADC_START */
- readw(dev->mmio + ME_ADC_START);
-
- /* wait for ADC fifo not empty flag */
- ret = comedi_timeout(dev, s, insn, me_ai_eoc, 0);
- if (ret)
- return ret;
-
- /* get value from ADC fifo */
- val = readw(dev->mmio + ME_READ_AD_FIFO);
- val = (val ^ 0x800) & 0x0fff;
- data[0] = val;
-
- /* stop any running conversion */
- dev_private->control_1 &= 0xFFFC;
- writew(dev_private->control_1, dev->mmio + ME_CONTROL_1);
-
- return 1;
-}
-
-static int me_ao_insn_write(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct me_private_data *dev_private = dev->private;
- unsigned int chan = CR_CHAN(insn->chanspec);
- unsigned int rang = CR_RANGE(insn->chanspec);
- unsigned int val = s->readback[chan];
- int i;
-
- /* Enable all DAC */
- dev_private->control_2 |= ENABLE_DAC;
- writew(dev_private->control_2, dev->mmio + ME_CONTROL_2);
-
- /* and set DAC to "buffered" mode */
- dev_private->control_2 |= BUFFERED_DAC;
- writew(dev_private->control_2, dev->mmio + ME_CONTROL_2);
-
- /* Set dac-control register */
- for (i = 0; i < insn->n; i++) {
- /* clear bits for this channel */
- dev_private->dac_control &= ~(0x0880 >> chan);
- if (rang == 0)
- dev_private->dac_control |=
- ((DAC_BIPOLAR_A | DAC_GAIN_1_A) >> chan);
- else if (rang == 1)
- dev_private->dac_control |=
- ((DAC_BIPOLAR_A | DAC_GAIN_0_A) >> chan);
- }
- writew(dev_private->dac_control, dev->mmio + ME_DAC_CONTROL);
-
- /* Update dac-control register */
- readw(dev->mmio + ME_DAC_CONTROL_UPDATE);
-
- /* Set data register */
- for (i = 0; i < insn->n; i++) {
- val = data[i];
-
- writew(val, dev->mmio + ME_DAC_DATA_A + (chan << 1));
- }
- s->readback[chan] = val;
-
- /* Update dac with data registers */
- readw(dev->mmio + ME_DAC_UPDATE);
-
- return insn->n;
-}
-
-static int me2600_xilinx_download(struct comedi_device *dev,
- const u8 *data, size_t size,
- unsigned long context)
-{
- struct me_private_data *dev_private = dev->private;
- unsigned int value;
- unsigned int file_length;
- unsigned int i;
-
- /* disable irq's on PLX */
- writel(0x00, dev_private->plx_regbase + PLX9052_INTCSR);
-
- /* First, make a dummy read to reset xilinx */
- value = readw(dev->mmio + XILINX_DOWNLOAD_RESET);
-
- /* Wait until reset is over */
- sleep(1);
-
- /* Write a dummy value to Xilinx */
- writeb(0x00, dev->mmio + 0x0);
- sleep(1);
-
- /*
- * Format of the firmware
- * Build longs from the byte-wise coded header
- * Byte 1-3: length of the array
- * Byte 4-7: version
- * Byte 8-11: date
- * Byte 12-15: reserved
- */
- if (size < 16)
- return -EINVAL;
-
- file_length = (((unsigned int)data[0] & 0xff) << 24) +
- (((unsigned int)data[1] & 0xff) << 16) +
- (((unsigned int)data[2] & 0xff) << 8) +
- ((unsigned int)data[3] & 0xff);
-
- /*
- * Loop for writing firmware byte by byte to xilinx
- * Firmware data start at offset 16
- */
- for (i = 0; i < file_length; i++)
- writeb((data[16 + i] & 0xff), dev->mmio + 0x0);
-
- /* Write 5 dummy values to xilinx */
- for (i = 0; i < 5; i++)
- writeb(0x00, dev->mmio + 0x0);
-
- /* Test if there was an error during download -> INTB was thrown */
- value = readl(dev_private->plx_regbase + PLX9052_INTCSR);
- if (value & PLX9052_INTCSR_LI2STAT) {
- /* Disable interrupt */
- writel(0x00, dev_private->plx_regbase + PLX9052_INTCSR);
- dev_err(dev->class_dev, "Xilinx download failed\n");
- return -EIO;
- }
-
- /* Wait until the Xilinx is ready for real work */
- sleep(1);
-
- /* Enable PLX-Interrupts */
- writel(PLX9052_INTCSR_LI1ENAB |
- PLX9052_INTCSR_LI1POL |
- PLX9052_INTCSR_PCIENAB,
- dev_private->plx_regbase + PLX9052_INTCSR);
-
- return 0;
-}
-
-static int me_reset(struct comedi_device *dev)
-{
- struct me_private_data *dev_private = dev->private;
-
- /* Reset board */
- writew(0x00, dev->mmio + ME_CONTROL_1);
- writew(0x00, dev->mmio + ME_CONTROL_2);
- writew(0x00, dev->mmio + ME_RESET_INTERRUPT);
- writew(0x00, dev->mmio + ME_DAC_CONTROL);
-
- /* Save values in the board context */
- dev_private->dac_control = 0;
- dev_private->control_1 = 0;
- dev_private->control_2 = 0;
-
- return 0;
-}
-
-static int me_auto_attach(struct comedi_device *dev,
- unsigned long context)
-{
- struct pci_dev *pcidev = comedi_to_pci_dev(dev);
- const struct me_board *board = NULL;
- struct me_private_data *dev_private;
- struct comedi_subdevice *s;
- int ret;
-
- if (context < ARRAY_SIZE(me_boards))
- board = &me_boards[context];
- if (!board)
- return -ENODEV;
- dev->board_ptr = board;
- dev->board_name = board->name;
-
- dev_private = comedi_alloc_devpriv(dev, sizeof(*dev_private));
- if (!dev_private)
- return -ENOMEM;
-
- ret = comedi_pci_enable(dev);
- if (ret)
- return ret;
-
- dev_private->plx_regbase = pci_ioremap_bar(pcidev, 0);
- if (!dev_private->plx_regbase)
- return -ENOMEM;
-
- dev->mmio = pci_ioremap_bar(pcidev, 2);
- if (!dev->mmio)
- return -ENOMEM;
-
- /* Download firmware and reset card */
- if (board->needs_firmware) {
- ret = comedi_load_firmware(dev, &comedi_to_pci_dev(dev)->dev,
- ME2600_FIRMWARE,
- me2600_xilinx_download, 0);
- if (ret < 0)
- return ret;
- }
- me_reset(dev);
-
- ret = comedi_alloc_subdevices(dev, 3);
- if (ret)
- return ret;
-
- s = &dev->subdevices[0];
- s->type = COMEDI_SUBD_AI;
- s->subdev_flags = SDF_READABLE | SDF_COMMON;
- s->n_chan = 16;
- s->maxdata = 0x0fff;
- s->len_chanlist = 16;
- s->range_table = &me_ai_range;
- s->insn_read = me_ai_insn_read;
-
- s = &dev->subdevices[1];
- if (board->has_ao) {
- s->type = COMEDI_SUBD_AO;
- s->subdev_flags = SDF_WRITABLE | SDF_COMMON;
- s->n_chan = 4;
- s->maxdata = 0x0fff;
- s->len_chanlist = 4;
- s->range_table = &me_ao_range;
- s->insn_write = me_ao_insn_write;
-
- ret = comedi_alloc_subdev_readback(s);
- if (ret)
- return ret;
- } else {
- s->type = COMEDI_SUBD_UNUSED;
- }
-
- s = &dev->subdevices[2];
- s->type = COMEDI_SUBD_DIO;
- s->subdev_flags = SDF_READABLE | SDF_WRITABLE;
- s->n_chan = 32;
- s->maxdata = 1;
- s->len_chanlist = 32;
- s->range_table = &range_digital;
- s->insn_bits = me_dio_insn_bits;
- s->insn_config = me_dio_insn_config;
-
- return 0;
-}
-
-static void me_detach(struct comedi_device *dev)
-{
- struct me_private_data *dev_private = dev->private;
-
- if (dev_private) {
- if (dev->mmio)
- me_reset(dev);
- if (dev_private->plx_regbase)
- iounmap(dev_private->plx_regbase);
- }
- comedi_pci_detach(dev);
-}
-
-static struct comedi_driver me_daq_driver = {
- .driver_name = "me_daq",
- .module = THIS_MODULE,
- .auto_attach = me_auto_attach,
- .detach = me_detach,
-};
-
-static int me_daq_pci_probe(struct pci_dev *dev,
- const struct pci_device_id *id)
-{
- return comedi_pci_auto_config(dev, &me_daq_driver, id->driver_data);
-}
-
-static const struct pci_device_id me_daq_pci_table[] = {
- { PCI_VDEVICE(MEILHAUS, 0x2600), BOARD_ME2600 },
- { PCI_VDEVICE(MEILHAUS, 0x2000), BOARD_ME2000 },
- { 0 }
-};
-MODULE_DEVICE_TABLE(pci, me_daq_pci_table);
-
-static struct pci_driver me_daq_pci_driver = {
- .name = "me_daq",
- .id_table = me_daq_pci_table,
- .probe = me_daq_pci_probe,
- .remove = comedi_pci_auto_unconfig,
-};
-module_comedi_pci_driver(me_daq_driver, me_daq_pci_driver);
-
-MODULE_AUTHOR("Comedi http://www.comedi.org");
-MODULE_DESCRIPTION("Comedi low-level driver");
-MODULE_LICENSE("GPL");
-MODULE_FIRMWARE(ME2600_FIRMWARE);
diff --git a/drivers/staging/comedi/drivers/mf6x4.c b/drivers/staging/comedi/drivers/mf6x4.c
deleted file mode 100644
index a675e2ef9b45..000000000000
--- a/drivers/staging/comedi/drivers/mf6x4.c
+++ /dev/null
@@ -1,330 +0,0 @@
-/*
- * comedi/drivers/mf6x4.c
- * Driver for Humusoft MF634 and MF624 Data acquisition cards
- *
- * COMEDI - Linux Control and Measurement Device Interface
- * Copyright (C) 2000 David A. Schleef <ds@schleef.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-/*
- * Driver: mf6x4
- * Description: Humusoft MF634 and MF624 Data acquisition card driver
- * Devices: [Humusoft] MF634 (mf634), MF624 (mf624)
- * Author: Rostislav Lisovy <lisovy@gmail.com>
- * Status: works
- * Updated:
- * Configuration Options: none
- */
-
-#include <linux/module.h>
-#include <linux/delay.h>
-
-#include "../comedi_pci.h"
-
-/* Registers present in BAR0 memory region */
-#define MF624_GPIOC_R 0x54
-
-#define MF6X4_GPIOC_EOLC /* End Of Last Conversion */ (1 << 17)
-#define MF6X4_GPIOC_LDAC /* Load DACs */ (1 << 23)
-#define MF6X4_GPIOC_DACEN (1 << 26)
-
-/* BAR1 registers */
-#define MF6X4_DIN_R 0x10
-#define MF6X4_DIN_M 0xff
-#define MF6X4_DOUT_R 0x10
-#define MF6X4_DOUT_M 0xff
-
-#define MF6X4_ADSTART_R 0x20
-#define MF6X4_ADDATA_R 0x00
-#define MF6X4_ADCTRL_R 0x00
-#define MF6X4_ADCTRL_M 0xff
-
-#define MF6X4_DA0_R 0x20
-#define MF6X4_DA1_R 0x22
-#define MF6X4_DA2_R 0x24
-#define MF6X4_DA3_R 0x26
-#define MF6X4_DA4_R 0x28
-#define MF6X4_DA5_R 0x2a
-#define MF6X4_DA6_R 0x2c
-#define MF6X4_DA7_R 0x2e
-/* Map DAC cahnnel id to real HW-dependent offset value */
-#define MF6X4_DAC_R(x) (0x20 + ((x) * 2))
-
-/* BAR2 registers */
-#define MF634_GPIOC_R 0x68
-
-enum mf6x4_boardid {
- BOARD_MF634,
- BOARD_MF624,
-};
-
-struct mf6x4_board {
- const char *name;
- unsigned int bar_nums[3]; /* We need to keep track of the
- order of BARs used by the cards */
-};
-
-static const struct mf6x4_board mf6x4_boards[] = {
- [BOARD_MF634] = {
- .name = "mf634",
- .bar_nums = {0, 2, 3},
- },
- [BOARD_MF624] = {
- .name = "mf624",
- .bar_nums = {0, 2, 4},
- },
-};
-
-struct mf6x4_private {
- /*
- * Documentation for both MF634 and MF624 describes registers
- * present in BAR0, 1 and 2 regions.
- * The real (i.e. in HW) BAR numbers are different for MF624
- * and MF634 yet we will call them 0, 1, 2
- */
- void __iomem *bar0_mem;
- void __iomem *bar2_mem;
-
- /*
- * This configuration register has the same function and fields
- * for both cards however it lies in different BARs on different
- * offsets -- this variable makes the access easier
- */
- void __iomem *gpioc_R;
-};
-
-static int mf6x4_di_insn_bits(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- data[1] = ioread16(dev->mmio + MF6X4_DIN_R) & MF6X4_DIN_M;
-
- return insn->n;
-}
-
-static int mf6x4_do_insn_bits(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- if (comedi_dio_update_state(s, data))
- iowrite16(s->state & MF6X4_DOUT_M, dev->mmio + MF6X4_DOUT_R);
-
- data[1] = s->state;
-
- return insn->n;
-}
-
-static int mf6x4_ai_eoc(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned long context)
-{
- struct mf6x4_private *devpriv = dev->private;
- unsigned int status;
-
- status = ioread32(devpriv->gpioc_R);
- if (status & MF6X4_GPIOC_EOLC)
- return 0;
- return -EBUSY;
-}
-
-static int mf6x4_ai_insn_read(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- int chan = CR_CHAN(insn->chanspec);
- int ret;
- int i;
- int d;
-
- /* Set the ADC channel number in the scan list */
- iowrite16((1 << chan) & MF6X4_ADCTRL_M, dev->mmio + MF6X4_ADCTRL_R);
-
- for (i = 0; i < insn->n; i++) {
- /* Trigger ADC conversion by reading ADSTART */
- ioread16(dev->mmio + MF6X4_ADSTART_R);
-
- ret = comedi_timeout(dev, s, insn, mf6x4_ai_eoc, 0);
- if (ret)
- return ret;
-
- /* Read the actual value */
- d = ioread16(dev->mmio + MF6X4_ADDATA_R);
- d &= s->maxdata;
- data[i] = d;
- }
-
- iowrite16(0x0, dev->mmio + MF6X4_ADCTRL_R);
-
- return insn->n;
-}
-
-static int mf6x4_ao_insn_write(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct mf6x4_private *devpriv = dev->private;
- unsigned int chan = CR_CHAN(insn->chanspec);
- unsigned int val = s->readback[chan];
- uint32_t gpioc;
- int i;
-
- /* Enable instantaneous update of converters outputs + Enable DACs */
- gpioc = ioread32(devpriv->gpioc_R);
- iowrite32((gpioc & ~MF6X4_GPIOC_LDAC) | MF6X4_GPIOC_DACEN,
- devpriv->gpioc_R);
-
- for (i = 0; i < insn->n; i++) {
- val = data[i];
- iowrite16(val, dev->mmio + MF6X4_DAC_R(chan));
- }
- s->readback[chan] = val;
-
- return insn->n;
-}
-
-static int mf6x4_auto_attach(struct comedi_device *dev, unsigned long context)
-{
- struct pci_dev *pcidev = comedi_to_pci_dev(dev);
- const struct mf6x4_board *board = NULL;
- struct mf6x4_private *devpriv;
- struct comedi_subdevice *s;
- int ret;
-
- if (context < ARRAY_SIZE(mf6x4_boards))
- board = &mf6x4_boards[context];
- else
- return -ENODEV;
-
- dev->board_ptr = board;
- dev->board_name = board->name;
-
- ret = comedi_pci_enable(dev);
- if (ret)
- return ret;
-
- devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
- if (!devpriv)
- return -ENOMEM;
-
- devpriv->bar0_mem = pci_ioremap_bar(pcidev, board->bar_nums[0]);
- if (!devpriv->bar0_mem)
- return -ENODEV;
-
- dev->mmio = pci_ioremap_bar(pcidev, board->bar_nums[1]);
- if (!dev->mmio)
- return -ENODEV;
-
- devpriv->bar2_mem = pci_ioremap_bar(pcidev, board->bar_nums[2]);
- if (!devpriv->bar2_mem)
- return -ENODEV;
-
- if (board == &mf6x4_boards[BOARD_MF634])
- devpriv->gpioc_R = devpriv->bar2_mem + MF634_GPIOC_R;
- else
- devpriv->gpioc_R = devpriv->bar0_mem + MF624_GPIOC_R;
-
- ret = comedi_alloc_subdevices(dev, 4);
- if (ret)
- return ret;
-
- /* ADC */
- s = &dev->subdevices[0];
- s->type = COMEDI_SUBD_AI;
- s->subdev_flags = SDF_READABLE | SDF_GROUND;
- s->n_chan = 8;
- s->maxdata = 0x3fff; /* 14 bits ADC */
- s->range_table = &range_bipolar10;
- s->insn_read = mf6x4_ai_insn_read;
-
- /* DAC */
- s = &dev->subdevices[1];
- s->type = COMEDI_SUBD_AO;
- s->subdev_flags = SDF_WRITABLE;
- s->n_chan = 8;
- s->maxdata = 0x3fff; /* 14 bits DAC */
- s->range_table = &range_bipolar10;
- s->insn_write = mf6x4_ao_insn_write;
-
- ret = comedi_alloc_subdev_readback(s);
- if (ret)
- return ret;
-
- /* DIN */
- s = &dev->subdevices[2];
- s->type = COMEDI_SUBD_DI;
- s->subdev_flags = SDF_READABLE;
- s->n_chan = 8;
- s->maxdata = 1;
- s->range_table = &range_digital;
- s->insn_bits = mf6x4_di_insn_bits;
-
- /* DOUT */
- s = &dev->subdevices[3];
- s->type = COMEDI_SUBD_DO;
- s->subdev_flags = SDF_WRITABLE;
- s->n_chan = 8;
- s->maxdata = 1;
- s->range_table = &range_digital;
- s->insn_bits = mf6x4_do_insn_bits;
-
- return 0;
-}
-
-static void mf6x4_detach(struct comedi_device *dev)
-{
- struct mf6x4_private *devpriv = dev->private;
-
- if (devpriv) {
- if (devpriv->bar0_mem)
- iounmap(devpriv->bar0_mem);
- if (devpriv->bar2_mem)
- iounmap(devpriv->bar2_mem);
- }
- comedi_pci_detach(dev);
-}
-
-static struct comedi_driver mf6x4_driver = {
- .driver_name = "mf6x4",
- .module = THIS_MODULE,
- .auto_attach = mf6x4_auto_attach,
- .detach = mf6x4_detach,
-};
-
-static int mf6x4_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
-{
- return comedi_pci_auto_config(dev, &mf6x4_driver, id->driver_data);
-}
-
-static const struct pci_device_id mf6x4_pci_table[] = {
- { PCI_VDEVICE(HUMUSOFT, 0x0634), BOARD_MF634 },
- { PCI_VDEVICE(HUMUSOFT, 0x0624), BOARD_MF624 },
- { 0 }
-};
-MODULE_DEVICE_TABLE(pci, mf6x4_pci_table);
-
-static struct pci_driver mf6x4_pci_driver = {
- .name = "mf6x4",
- .id_table = mf6x4_pci_table,
- .probe = mf6x4_pci_probe,
- .remove = comedi_pci_auto_unconfig,
-};
-
-module_comedi_pci_driver(mf6x4_driver, mf6x4_pci_driver);
-
-MODULE_AUTHOR("Rostislav Lisovy <lisovy@gmail.com>");
-MODULE_DESCRIPTION("Comedi MF634 and MF624 DAQ cards driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/mite.c b/drivers/staging/comedi/drivers/mite.c
deleted file mode 100644
index fa7ae2c04556..000000000000
--- a/drivers/staging/comedi/drivers/mite.c
+++ /dev/null
@@ -1,628 +0,0 @@
-/*
- * comedi/drivers/mite.c
- * Hardware driver for NI Mite PCI interface chip
- *
- * COMEDI - Linux Control and Measurement Device Interface
- * Copyright (C) 1997-2002 David A. Schleef <ds@schleef.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-/*
- * The PCI-MIO E series driver was originally written by
- * Tomasz Motylewski <...>, and ported to comedi by ds.
- *
- * References for specifications:
- *
- * 321747b.pdf Register Level Programmer Manual (obsolete)
- * 321747c.pdf Register Level Programmer Manual (new)
- * DAQ-STC reference manual
- *
- * Other possibly relevant info:
- *
- * 320517c.pdf User manual (obsolete)
- * 320517f.pdf User manual (new)
- * 320889a.pdf delete
- * 320906c.pdf maximum signal ratings
- * 321066a.pdf about 16x
- * 321791a.pdf discontinuation of at-mio-16e-10 rev. c
- * 321808a.pdf about at-mio-16e-10 rev P
- * 321837a.pdf discontinuation of at-mio-16de-10 rev d
- * 321838a.pdf about at-mio-16de-10 rev N
- *
- * ISSUES:
- *
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/module.h>
-#include <linux/slab.h>
-
-#include "../comedi_pci.h"
-
-#include "mite.h"
-
-#define TOP_OF_PAGE(x) ((x)|(~(PAGE_MASK)))
-
-struct mite_struct *mite_alloc(struct pci_dev *pcidev)
-{
- struct mite_struct *mite;
- unsigned int i;
-
- mite = kzalloc(sizeof(*mite), GFP_KERNEL);
- if (mite) {
- spin_lock_init(&mite->lock);
- mite->pcidev = pcidev;
- for (i = 0; i < MAX_MITE_DMA_CHANNELS; ++i) {
- mite->channels[i].mite = mite;
- mite->channels[i].channel = i;
- mite->channels[i].done = 1;
- }
- }
- return mite;
-}
-EXPORT_SYMBOL_GPL(mite_alloc);
-
-static void dump_chip_signature(u32 csigr_bits)
-{
- pr_info("version = %i, type = %i, mite mode = %i, interface mode = %i\n",
- mite_csigr_version(csigr_bits), mite_csigr_type(csigr_bits),
- mite_csigr_mmode(csigr_bits), mite_csigr_imode(csigr_bits));
- pr_info("num channels = %i, write post fifo depth = %i, wins = %i, iowins = %i\n",
- mite_csigr_dmac(csigr_bits), mite_csigr_wpdep(csigr_bits),
- mite_csigr_wins(csigr_bits), mite_csigr_iowins(csigr_bits));
-}
-
-static unsigned mite_fifo_size(struct mite_struct *mite, unsigned channel)
-{
- unsigned fcr_bits = readl(mite->mite_io_addr + MITE_FCR(channel));
- unsigned empty_count = (fcr_bits >> 16) & 0xff;
- unsigned full_count = fcr_bits & 0xff;
-
- return empty_count + full_count;
-}
-
-int mite_setup2(struct comedi_device *dev,
- struct mite_struct *mite, bool use_win1)
-{
- unsigned long length;
- int i;
- u32 csigr_bits;
- unsigned unknown_dma_burst_bits;
-
- pci_set_master(mite->pcidev);
-
- mite->mite_io_addr = pci_ioremap_bar(mite->pcidev, 0);
- if (!mite->mite_io_addr) {
- dev_err(dev->class_dev,
- "Failed to remap mite io memory address\n");
- return -ENOMEM;
- }
- mite->mite_phys_addr = pci_resource_start(mite->pcidev, 0);
-
- dev->mmio = pci_ioremap_bar(mite->pcidev, 1);
- if (!dev->mmio) {
- dev_err(dev->class_dev,
- "Failed to remap daq io memory address\n");
- return -ENOMEM;
- }
- mite->daq_phys_addr = pci_resource_start(mite->pcidev, 1);
- length = pci_resource_len(mite->pcidev, 1);
-
- if (use_win1) {
- writel(0, mite->mite_io_addr + MITE_IODWBSR);
- dev_info(dev->class_dev,
- "using I/O Window Base Size register 1\n");
- writel(mite->daq_phys_addr | WENAB |
- MITE_IODWBSR_1_WSIZE_bits(length),
- mite->mite_io_addr + MITE_IODWBSR_1);
- writel(0, mite->mite_io_addr + MITE_IODWCR_1);
- } else {
- writel(mite->daq_phys_addr | WENAB,
- mite->mite_io_addr + MITE_IODWBSR);
- }
- /*
- * Make sure dma bursts work. I got this from running a bus analyzer
- * on a pxi-6281 and a pxi-6713. 6713 powered up with register value
- * of 0x61f and bursts worked. 6281 powered up with register value of
- * 0x1f and bursts didn't work. The NI windows driver reads the
- * register, then does a bitwise-or of 0x600 with it and writes it back.
- */
- unknown_dma_burst_bits =
- readl(mite->mite_io_addr + MITE_UNKNOWN_DMA_BURST_REG);
- unknown_dma_burst_bits |= UNKNOWN_DMA_BURST_ENABLE_BITS;
- writel(unknown_dma_burst_bits,
- mite->mite_io_addr + MITE_UNKNOWN_DMA_BURST_REG);
-
- csigr_bits = readl(mite->mite_io_addr + MITE_CSIGR);
- mite->num_channels = mite_csigr_dmac(csigr_bits);
- if (mite->num_channels > MAX_MITE_DMA_CHANNELS) {
- dev_warn(dev->class_dev,
- "mite: bug? chip claims to have %i dma channels. Setting to %i.\n",
- mite->num_channels, MAX_MITE_DMA_CHANNELS);
- mite->num_channels = MAX_MITE_DMA_CHANNELS;
- }
- dump_chip_signature(csigr_bits);
- for (i = 0; i < mite->num_channels; i++) {
- writel(CHOR_DMARESET, mite->mite_io_addr + MITE_CHOR(i));
- /* disable interrupts */
- writel(CHCR_CLR_DMA_IE | CHCR_CLR_LINKP_IE | CHCR_CLR_SAR_IE |
- CHCR_CLR_DONE_IE | CHCR_CLR_MRDY_IE | CHCR_CLR_DRDY_IE |
- CHCR_CLR_LC_IE | CHCR_CLR_CONT_RB_IE,
- mite->mite_io_addr + MITE_CHCR(i));
- }
- mite->fifo_size = mite_fifo_size(mite, 0);
- dev_info(dev->class_dev, "fifo size is %i.\n", mite->fifo_size);
- return 0;
-}
-EXPORT_SYMBOL_GPL(mite_setup2);
-
-void mite_detach(struct mite_struct *mite)
-{
- if (!mite)
- return;
-
- if (mite->mite_io_addr)
- iounmap(mite->mite_io_addr);
-
- kfree(mite);
-}
-EXPORT_SYMBOL_GPL(mite_detach);
-
-struct mite_dma_descriptor_ring *mite_alloc_ring(struct mite_struct *mite)
-{
- struct mite_dma_descriptor_ring *ring =
- kmalloc(sizeof(struct mite_dma_descriptor_ring), GFP_KERNEL);
-
- if (!ring)
- return NULL;
- ring->hw_dev = get_device(&mite->pcidev->dev);
- if (!ring->hw_dev) {
- kfree(ring);
- return NULL;
- }
- ring->n_links = 0;
- ring->descriptors = NULL;
- ring->descriptors_dma_addr = 0;
- return ring;
-};
-EXPORT_SYMBOL_GPL(mite_alloc_ring);
-
-void mite_free_ring(struct mite_dma_descriptor_ring *ring)
-{
- if (ring) {
- if (ring->descriptors) {
- dma_free_coherent(ring->hw_dev,
- ring->n_links *
- sizeof(struct mite_dma_descriptor),
- ring->descriptors,
- ring->descriptors_dma_addr);
- }
- put_device(ring->hw_dev);
- kfree(ring);
- }
-};
-EXPORT_SYMBOL_GPL(mite_free_ring);
-
-struct mite_channel *mite_request_channel_in_range(struct mite_struct *mite,
- struct
- mite_dma_descriptor_ring
- *ring, unsigned min_channel,
- unsigned max_channel)
-{
- int i;
- unsigned long flags;
- struct mite_channel *channel = NULL;
-
- /*
- * spin lock so mite_release_channel can be called safely
- * from interrupts
- */
- spin_lock_irqsave(&mite->lock, flags);
- for (i = min_channel; i <= max_channel; ++i) {
- if (mite->channel_allocated[i] == 0) {
- mite->channel_allocated[i] = 1;
- channel = &mite->channels[i];
- channel->ring = ring;
- break;
- }
- }
- spin_unlock_irqrestore(&mite->lock, flags);
- return channel;
-}
-EXPORT_SYMBOL_GPL(mite_request_channel_in_range);
-
-void mite_release_channel(struct mite_channel *mite_chan)
-{
- struct mite_struct *mite = mite_chan->mite;
- unsigned long flags;
-
- /* spin lock to prevent races with mite_request_channel */
- spin_lock_irqsave(&mite->lock, flags);
- if (mite->channel_allocated[mite_chan->channel]) {
- mite_dma_disarm(mite_chan);
- mite_dma_reset(mite_chan);
- /*
- * disable all channel's interrupts (do it after disarm/reset so
- * MITE_CHCR reg isn't changed while dma is still active!)
- */
- writel(CHCR_CLR_DMA_IE | CHCR_CLR_LINKP_IE |
- CHCR_CLR_SAR_IE | CHCR_CLR_DONE_IE |
- CHCR_CLR_MRDY_IE | CHCR_CLR_DRDY_IE |
- CHCR_CLR_LC_IE | CHCR_CLR_CONT_RB_IE,
- mite->mite_io_addr + MITE_CHCR(mite_chan->channel));
- mite->channel_allocated[mite_chan->channel] = 0;
- mite_chan->ring = NULL;
- mmiowb();
- }
- spin_unlock_irqrestore(&mite->lock, flags);
-}
-EXPORT_SYMBOL_GPL(mite_release_channel);
-
-void mite_dma_arm(struct mite_channel *mite_chan)
-{
- struct mite_struct *mite = mite_chan->mite;
- int chor;
- unsigned long flags;
-
- /*
- * memory barrier is intended to insure any twiddling with the buffer
- * is done before writing to the mite to arm dma transfer
- */
- smp_mb();
- /* arm */
- chor = CHOR_START;
- spin_lock_irqsave(&mite->lock, flags);
- mite_chan->done = 0;
- writel(chor, mite->mite_io_addr + MITE_CHOR(mite_chan->channel));
- mmiowb();
- spin_unlock_irqrestore(&mite->lock, flags);
- /* mite_dma_tcr(mite, channel); */
-}
-EXPORT_SYMBOL_GPL(mite_dma_arm);
-
-/**************************************/
-
-int mite_buf_change(struct mite_dma_descriptor_ring *ring,
- struct comedi_subdevice *s)
-{
- struct comedi_async *async = s->async;
- unsigned int n_links;
- int i;
-
- if (ring->descriptors) {
- dma_free_coherent(ring->hw_dev,
- ring->n_links *
- sizeof(struct mite_dma_descriptor),
- ring->descriptors,
- ring->descriptors_dma_addr);
- }
- ring->descriptors = NULL;
- ring->descriptors_dma_addr = 0;
- ring->n_links = 0;
-
- if (async->prealloc_bufsz == 0)
- return 0;
-
- n_links = async->prealloc_bufsz >> PAGE_SHIFT;
-
- ring->descriptors =
- dma_alloc_coherent(ring->hw_dev,
- n_links * sizeof(struct mite_dma_descriptor),
- &ring->descriptors_dma_addr, GFP_KERNEL);
- if (!ring->descriptors) {
- dev_err(s->device->class_dev,
- "mite: ring buffer allocation failed\n");
- return -ENOMEM;
- }
- ring->n_links = n_links;
-
- for (i = 0; i < n_links; i++) {
- ring->descriptors[i].count = cpu_to_le32(PAGE_SIZE);
- ring->descriptors[i].addr =
- cpu_to_le32(async->buf_map->page_list[i].dma_addr);
- ring->descriptors[i].next =
- cpu_to_le32(ring->descriptors_dma_addr + (i +
- 1) *
- sizeof(struct mite_dma_descriptor));
- }
- ring->descriptors[n_links - 1].next =
- cpu_to_le32(ring->descriptors_dma_addr);
- /*
- * barrier is meant to insure that all the writes to the dma descriptors
- * have completed before the dma controller is commanded to read them
- */
- smp_wmb();
- return 0;
-}
-EXPORT_SYMBOL_GPL(mite_buf_change);
-
-void mite_prep_dma(struct mite_channel *mite_chan,
- unsigned int num_device_bits, unsigned int num_memory_bits)
-{
- unsigned int chor, chcr, mcr, dcr, lkcr;
- struct mite_struct *mite = mite_chan->mite;
-
- /* reset DMA and FIFO */
- chor = CHOR_DMARESET | CHOR_FRESET;
- writel(chor, mite->mite_io_addr + MITE_CHOR(mite_chan->channel));
-
- /* short link chaining mode */
- chcr = CHCR_SET_DMA_IE | CHCR_LINKSHORT | CHCR_SET_DONE_IE |
- CHCR_BURSTEN;
- /*
- * Link Complete Interrupt: interrupt every time a link
- * in MITE_RING is completed. This can generate a lot of
- * extra interrupts, but right now we update the values
- * of buf_int_ptr and buf_int_count at each interrupt. A
- * better method is to poll the MITE before each user
- * "read()" to calculate the number of bytes available.
- */
- chcr |= CHCR_SET_LC_IE;
- if (num_memory_bits == 32 && num_device_bits == 16) {
- /*
- * Doing a combined 32 and 16 bit byteswap gets the 16 bit
- * samples into the fifo in the right order. Tested doing 32 bit
- * memory to 16 bit device transfers to the analog out of a
- * pxi-6281, which has mite version = 1, type = 4. This also
- * works for dma reads from the counters on e-series boards.
- */
- chcr |= CHCR_BYTE_SWAP_DEVICE | CHCR_BYTE_SWAP_MEMORY;
- }
- if (mite_chan->dir == COMEDI_INPUT)
- chcr |= CHCR_DEV_TO_MEM;
-
- writel(chcr, mite->mite_io_addr + MITE_CHCR(mite_chan->channel));
-
- /* to/from memory */
- mcr = CR_RL(64) | CR_ASEQUP;
- switch (num_memory_bits) {
- case 8:
- mcr |= CR_PSIZE8;
- break;
- case 16:
- mcr |= CR_PSIZE16;
- break;
- case 32:
- mcr |= CR_PSIZE32;
- break;
- default:
- pr_warn("bug! invalid mem bit width for dma transfer\n");
- break;
- }
- writel(mcr, mite->mite_io_addr + MITE_MCR(mite_chan->channel));
-
- /* from/to device */
- dcr = CR_RL(64) | CR_ASEQUP;
- dcr |= CR_PORTIO | CR_AMDEVICE | CR_REQSDRQ(mite_chan->channel);
- switch (num_device_bits) {
- case 8:
- dcr |= CR_PSIZE8;
- break;
- case 16:
- dcr |= CR_PSIZE16;
- break;
- case 32:
- dcr |= CR_PSIZE32;
- break;
- default:
- pr_warn("bug! invalid dev bit width for dma transfer\n");
- break;
- }
- writel(dcr, mite->mite_io_addr + MITE_DCR(mite_chan->channel));
-
- /* reset the DAR */
- writel(0, mite->mite_io_addr + MITE_DAR(mite_chan->channel));
-
- /* the link is 32bits */
- lkcr = CR_RL(64) | CR_ASEQUP | CR_PSIZE32;
- writel(lkcr, mite->mite_io_addr + MITE_LKCR(mite_chan->channel));
-
- /* starting address for link chaining */
- writel(mite_chan->ring->descriptors_dma_addr,
- mite->mite_io_addr + MITE_LKAR(mite_chan->channel));
-}
-EXPORT_SYMBOL_GPL(mite_prep_dma);
-
-static u32 mite_device_bytes_transferred(struct mite_channel *mite_chan)
-{
- struct mite_struct *mite = mite_chan->mite;
-
- return readl(mite->mite_io_addr + MITE_DAR(mite_chan->channel));
-}
-
-u32 mite_bytes_in_transit(struct mite_channel *mite_chan)
-{
- struct mite_struct *mite = mite_chan->mite;
-
- return readl(mite->mite_io_addr +
- MITE_FCR(mite_chan->channel)) & 0x000000FF;
-}
-EXPORT_SYMBOL_GPL(mite_bytes_in_transit);
-
-/* returns lower bound for number of bytes transferred from device to memory */
-u32 mite_bytes_written_to_memory_lb(struct mite_channel *mite_chan)
-{
- u32 device_byte_count;
-
- device_byte_count = mite_device_bytes_transferred(mite_chan);
- return device_byte_count - mite_bytes_in_transit(mite_chan);
-}
-EXPORT_SYMBOL_GPL(mite_bytes_written_to_memory_lb);
-
-/* returns upper bound for number of bytes transferred from device to memory */
-u32 mite_bytes_written_to_memory_ub(struct mite_channel *mite_chan)
-{
- u32 in_transit_count;
-
- in_transit_count = mite_bytes_in_transit(mite_chan);
- return mite_device_bytes_transferred(mite_chan) - in_transit_count;
-}
-EXPORT_SYMBOL_GPL(mite_bytes_written_to_memory_ub);
-
-/* returns lower bound for number of bytes read from memory to device */
-u32 mite_bytes_read_from_memory_lb(struct mite_channel *mite_chan)
-{
- u32 device_byte_count;
-
- device_byte_count = mite_device_bytes_transferred(mite_chan);
- return device_byte_count + mite_bytes_in_transit(mite_chan);
-}
-EXPORT_SYMBOL_GPL(mite_bytes_read_from_memory_lb);
-
-/* returns upper bound for number of bytes read from memory to device */
-u32 mite_bytes_read_from_memory_ub(struct mite_channel *mite_chan)
-{
- u32 in_transit_count;
-
- in_transit_count = mite_bytes_in_transit(mite_chan);
- return mite_device_bytes_transferred(mite_chan) + in_transit_count;
-}
-EXPORT_SYMBOL_GPL(mite_bytes_read_from_memory_ub);
-
-unsigned mite_dma_tcr(struct mite_channel *mite_chan)
-{
- struct mite_struct *mite = mite_chan->mite;
-
- return readl(mite->mite_io_addr + MITE_TCR(mite_chan->channel));
-}
-EXPORT_SYMBOL_GPL(mite_dma_tcr);
-
-void mite_dma_disarm(struct mite_channel *mite_chan)
-{
- struct mite_struct *mite = mite_chan->mite;
- unsigned chor;
-
- /* disarm */
- chor = CHOR_ABORT;
- writel(chor, mite->mite_io_addr + MITE_CHOR(mite_chan->channel));
-}
-EXPORT_SYMBOL_GPL(mite_dma_disarm);
-
-int mite_sync_input_dma(struct mite_channel *mite_chan,
- struct comedi_subdevice *s)
-{
- struct comedi_async *async = s->async;
- int count;
- unsigned int nbytes, old_alloc_count;
-
- old_alloc_count = async->buf_write_alloc_count;
- /* write alloc as much as we can */
- comedi_buf_write_alloc(s, async->prealloc_bufsz);
-
- nbytes = mite_bytes_written_to_memory_lb(mite_chan);
- if ((int)(mite_bytes_written_to_memory_ub(mite_chan) -
- old_alloc_count) > 0) {
- dev_warn(s->device->class_dev,
- "mite: DMA overwrite of free area\n");
- async->events |= COMEDI_CB_OVERFLOW;
- return -1;
- }
-
- count = nbytes - async->buf_write_count;
- /*
- * it's possible count will be negative due to conservative value
- * returned by mite_bytes_written_to_memory_lb
- */
- if (count <= 0)
- return 0;
-
- comedi_buf_write_free(s, count);
- comedi_inc_scan_progress(s, count);
- async->events |= COMEDI_CB_BLOCK;
- return 0;
-}
-EXPORT_SYMBOL_GPL(mite_sync_input_dma);
-
-int mite_sync_output_dma(struct mite_channel *mite_chan,
- struct comedi_subdevice *s)
-{
- struct comedi_async *async = s->async;
- struct comedi_cmd *cmd = &async->cmd;
- u32 stop_count = cmd->stop_arg * comedi_bytes_per_scan(s);
- unsigned int old_alloc_count = async->buf_read_alloc_count;
- u32 nbytes_ub, nbytes_lb;
- int count;
-
- /* read alloc as much as we can */
- comedi_buf_read_alloc(s, async->prealloc_bufsz);
- nbytes_lb = mite_bytes_read_from_memory_lb(mite_chan);
- if (cmd->stop_src == TRIG_COUNT && (int)(nbytes_lb - stop_count) > 0)
- nbytes_lb = stop_count;
- nbytes_ub = mite_bytes_read_from_memory_ub(mite_chan);
- if (cmd->stop_src == TRIG_COUNT && (int)(nbytes_ub - stop_count) > 0)
- nbytes_ub = stop_count;
- if ((int)(nbytes_ub - old_alloc_count) > 0) {
- dev_warn(s->device->class_dev, "mite: DMA underrun\n");
- async->events |= COMEDI_CB_OVERFLOW;
- return -1;
- }
- count = nbytes_lb - async->buf_read_count;
- if (count <= 0)
- return 0;
-
- if (count) {
- comedi_buf_read_free(s, count);
- async->events |= COMEDI_CB_BLOCK;
- }
- return 0;
-}
-EXPORT_SYMBOL_GPL(mite_sync_output_dma);
-
-unsigned mite_get_status(struct mite_channel *mite_chan)
-{
- struct mite_struct *mite = mite_chan->mite;
- unsigned status;
- unsigned long flags;
-
- spin_lock_irqsave(&mite->lock, flags);
- status = readl(mite->mite_io_addr + MITE_CHSR(mite_chan->channel));
- if (status & CHSR_DONE) {
- mite_chan->done = 1;
- writel(CHOR_CLRDONE,
- mite->mite_io_addr + MITE_CHOR(mite_chan->channel));
- }
- mmiowb();
- spin_unlock_irqrestore(&mite->lock, flags);
- return status;
-}
-EXPORT_SYMBOL_GPL(mite_get_status);
-
-int mite_done(struct mite_channel *mite_chan)
-{
- struct mite_struct *mite = mite_chan->mite;
- unsigned long flags;
- int done;
-
- mite_get_status(mite_chan);
- spin_lock_irqsave(&mite->lock, flags);
- done = mite_chan->done;
- spin_unlock_irqrestore(&mite->lock, flags);
- return done;
-}
-EXPORT_SYMBOL_GPL(mite_done);
-
-static int __init mite_module_init(void)
-{
- return 0;
-}
-
-static void __exit mite_module_exit(void)
-{
-}
-
-module_init(mite_module_init);
-module_exit(mite_module_exit);
-
-MODULE_AUTHOR("Comedi http://www.comedi.org");
-MODULE_DESCRIPTION("Comedi helper for NI Mite PCI interface chip");
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/mite.h b/drivers/staging/comedi/drivers/mite.h
deleted file mode 100644
index c32d4e4ddccc..000000000000
--- a/drivers/staging/comedi/drivers/mite.h
+++ /dev/null
@@ -1,349 +0,0 @@
-/*
- * module/mite.h
- * Hardware driver for NI Mite PCI interface chip
- *
- * COMEDI - Linux Control and Measurement Device Interface
- * Copyright (C) 1999 David A. Schleef <ds@schleef.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#ifndef _MITE_H_
-#define _MITE_H_
-
-#include <linux/io.h>
-#include <linux/log2.h>
-#include <linux/spinlock.h>
-
-#define MAX_MITE_DMA_CHANNELS 8
-
-struct comedi_device;
-struct comedi_subdevice;
-struct device;
-struct pci_dev;
-
-struct mite_dma_descriptor {
- __le32 count;
- __le32 addr;
- __le32 next;
- u32 dar;
-};
-
-struct mite_dma_descriptor_ring {
- struct device *hw_dev;
- unsigned int n_links;
- struct mite_dma_descriptor *descriptors;
- dma_addr_t descriptors_dma_addr;
-};
-
-struct mite_channel {
- struct mite_struct *mite;
- unsigned channel;
- int dir;
- int done;
- struct mite_dma_descriptor_ring *ring;
-};
-
-struct mite_struct {
- struct pci_dev *pcidev;
- resource_size_t mite_phys_addr;
- void __iomem *mite_io_addr;
- resource_size_t daq_phys_addr;
- struct mite_channel channels[MAX_MITE_DMA_CHANNELS];
- short channel_allocated[MAX_MITE_DMA_CHANNELS];
- int num_channels;
- unsigned fifo_size;
- spinlock_t lock;
-};
-
-struct mite_struct *mite_alloc(struct pci_dev *pcidev);
-
-int mite_setup2(struct comedi_device *, struct mite_struct *, bool use_win1);
-
-static inline int mite_setup(struct comedi_device *dev,
- struct mite_struct *mite)
-{
- return mite_setup2(dev, mite, false);
-}
-
-void mite_detach(struct mite_struct *mite);
-struct mite_dma_descriptor_ring *mite_alloc_ring(struct mite_struct *mite);
-void mite_free_ring(struct mite_dma_descriptor_ring *ring);
-struct mite_channel *
-mite_request_channel_in_range(struct mite_struct *mite,
- struct mite_dma_descriptor_ring *ring,
- unsigned min_channel, unsigned max_channel);
-static inline struct mite_channel *
-mite_request_channel(struct mite_struct *mite,
- struct mite_dma_descriptor_ring *ring)
-{
- return mite_request_channel_in_range(mite, ring, 0,
- mite->num_channels - 1);
-}
-
-void mite_release_channel(struct mite_channel *mite_chan);
-
-unsigned mite_dma_tcr(struct mite_channel *mite_chan);
-void mite_dma_arm(struct mite_channel *mite_chan);
-void mite_dma_disarm(struct mite_channel *mite_chan);
-int mite_sync_input_dma(struct mite_channel *mite_chan,
- struct comedi_subdevice *s);
-int mite_sync_output_dma(struct mite_channel *mite_chan,
- struct comedi_subdevice *s);
-u32 mite_bytes_written_to_memory_lb(struct mite_channel *mite_chan);
-u32 mite_bytes_written_to_memory_ub(struct mite_channel *mite_chan);
-u32 mite_bytes_read_from_memory_lb(struct mite_channel *mite_chan);
-u32 mite_bytes_read_from_memory_ub(struct mite_channel *mite_chan);
-u32 mite_bytes_in_transit(struct mite_channel *mite_chan);
-unsigned mite_get_status(struct mite_channel *mite_chan);
-int mite_done(struct mite_channel *mite_chan);
-
-void mite_prep_dma(struct mite_channel *mite_chan,
- unsigned int num_device_bits, unsigned int num_memory_bits);
-int mite_buf_change(struct mite_dma_descriptor_ring *ring,
- struct comedi_subdevice *s);
-
-enum mite_registers {
- /*
- * The bits 0x90180700 in MITE_UNKNOWN_DMA_BURST_REG can be
- * written and read back. The bits 0x1f always read as 1.
- * The rest always read as zero.
- */
- MITE_UNKNOWN_DMA_BURST_REG = 0x28,
- MITE_IODWBSR = 0xc0, /* IO Device Window Base Size Register */
- MITE_IODWBSR_1 = 0xc4, /* IO Device Window Base Size Register 1 */
- MITE_IODWCR_1 = 0xf4,
- MITE_PCI_CONFIG_OFFSET = 0x300,
- MITE_CSIGR = 0x460 /* chip signature */
-};
-
-#define MITE_CHAN(x) (0x500 + 0x100 * (x))
-#define MITE_CHOR(x) (0x00 + MITE_CHAN(x)) /* channel operation */
-#define MITE_CHCR(x) (0x04 + MITE_CHAN(x)) /* channel control */
-#define MITE_TCR(x) (0x08 + MITE_CHAN(x)) /* transfer count */
-#define MITE_MCR(x) (0x0c + MITE_CHAN(x)) /* memory configuration */
-#define MITE_MAR(x) (0x10 + MITE_CHAN(x)) /* memory address */
-#define MITE_DCR(x) (0x14 + MITE_CHAN(x)) /* device configuration */
-#define MITE_DAR(x) (0x18 + MITE_CHAN(x)) /* device address */
-#define MITE_LKCR(x) (0x1c + MITE_CHAN(x)) /* link configuration */
-#define MITE_LKAR(x) (0x20 + MITE_CHAN(x)) /* link address */
-#define MITE_LLKAR(x) (0x24 + MITE_CHAN(x)) /* see tnt5002 manual */
-#define MITE_BAR(x) (0x28 + MITE_CHAN(x)) /* base address */
-#define MITE_BCR(x) (0x2c + MITE_CHAN(x)) /* base count */
-#define MITE_SAR(x) (0x30 + MITE_CHAN(x)) /* ? address */
-#define MITE_WSCR(x) (0x34 + MITE_CHAN(x)) /* ? */
-#define MITE_WSER(x) (0x38 + MITE_CHAN(x)) /* ? */
-#define MITE_CHSR(x) (0x3c + MITE_CHAN(x)) /* channel status */
-#define MITE_FCR(x) (0x40 + MITE_CHAN(x)) /* fifo count */
-
-enum MITE_IODWBSR_bits {
- WENAB = 0x80, /* window enable */
-};
-
-static inline unsigned MITE_IODWBSR_1_WSIZE_bits(unsigned size)
-{
- unsigned order = 0;
-
- BUG_ON(size == 0);
- order = ilog2(size);
- BUG_ON(order < 1);
- return (order - 1) & 0x1f;
-}
-
-enum MITE_UNKNOWN_DMA_BURST_bits {
- UNKNOWN_DMA_BURST_ENABLE_BITS = 0x600
-};
-
-static inline int mite_csigr_version(u32 csigr_bits)
-{
- return csigr_bits & 0xf;
-};
-
-static inline int mite_csigr_type(u32 csigr_bits)
-{ /* original mite = 0, minimite = 1 */
- return (csigr_bits >> 4) & 0xf;
-};
-
-static inline int mite_csigr_mmode(u32 csigr_bits)
-{ /* mite mode, minimite = 1 */
- return (csigr_bits >> 8) & 0x3;
-};
-
-static inline int mite_csigr_imode(u32 csigr_bits)
-{ /* cpu port interface mode, pci = 0x3 */
- return (csigr_bits >> 12) & 0x3;
-};
-
-static inline int mite_csigr_dmac(u32 csigr_bits)
-{ /* number of dma channels */
- return (csigr_bits >> 16) & 0xf;
-};
-
-static inline int mite_csigr_wpdep(u32 csigr_bits)
-{ /* write post fifo depth */
- unsigned int wpdep_bits = (csigr_bits >> 20) & 0x7;
-
- return (wpdep_bits) ? (1 << (wpdep_bits - 1)) : 0;
-}
-
-static inline int mite_csigr_wins(u32 csigr_bits)
-{
- return (csigr_bits >> 24) & 0x1f;
-};
-
-static inline int mite_csigr_iowins(u32 csigr_bits)
-{ /* number of io windows */
- return (csigr_bits >> 29) & 0x7;
-};
-
-enum MITE_MCR_bits {
- MCRPON = 0,
-};
-
-enum MITE_DCR_bits {
- DCR_NORMAL = (1 << 29),
- DCRPON = 0,
-};
-
-enum MITE_CHOR_bits {
- CHOR_DMARESET = (1 << 31),
- CHOR_SET_SEND_TC = (1 << 11),
- CHOR_CLR_SEND_TC = (1 << 10),
- CHOR_SET_LPAUSE = (1 << 9),
- CHOR_CLR_LPAUSE = (1 << 8),
- CHOR_CLRDONE = (1 << 7),
- CHOR_CLRRB = (1 << 6),
- CHOR_CLRLC = (1 << 5),
- CHOR_FRESET = (1 << 4),
- CHOR_ABORT = (1 << 3), /* stop without emptying fifo */
- CHOR_STOP = (1 << 2), /* stop after emptying fifo */
- CHOR_CONT = (1 << 1),
- CHOR_START = (1 << 0),
- CHOR_PON = (CHOR_CLR_SEND_TC | CHOR_CLR_LPAUSE),
-};
-
-enum MITE_CHCR_bits {
- CHCR_SET_DMA_IE = (1 << 31),
- CHCR_CLR_DMA_IE = (1 << 30),
- CHCR_SET_LINKP_IE = (1 << 29),
- CHCR_CLR_LINKP_IE = (1 << 28),
- CHCR_SET_SAR_IE = (1 << 27),
- CHCR_CLR_SAR_IE = (1 << 26),
- CHCR_SET_DONE_IE = (1 << 25),
- CHCR_CLR_DONE_IE = (1 << 24),
- CHCR_SET_MRDY_IE = (1 << 23),
- CHCR_CLR_MRDY_IE = (1 << 22),
- CHCR_SET_DRDY_IE = (1 << 21),
- CHCR_CLR_DRDY_IE = (1 << 20),
- CHCR_SET_LC_IE = (1 << 19),
- CHCR_CLR_LC_IE = (1 << 18),
- CHCR_SET_CONT_RB_IE = (1 << 17),
- CHCR_CLR_CONT_RB_IE = (1 << 16),
- CHCR_FIFODIS = (1 << 15),
- CHCR_FIFO_ON = 0,
- CHCR_BURSTEN = (1 << 14),
- CHCR_NO_BURSTEN = 0,
- CHCR_BYTE_SWAP_DEVICE = (1 << 6),
- CHCR_BYTE_SWAP_MEMORY = (1 << 4),
- CHCR_DIR = (1 << 3),
- CHCR_DEV_TO_MEM = CHCR_DIR,
- CHCR_MEM_TO_DEV = 0,
- CHCR_NORMAL = (0 << 0),
- CHCR_CONTINUE = (1 << 0),
- CHCR_RINGBUFF = (2 << 0),
- CHCR_LINKSHORT = (4 << 0),
- CHCR_LINKLONG = (5 << 0),
- CHCRPON =
- (CHCR_CLR_DMA_IE | CHCR_CLR_LINKP_IE | CHCR_CLR_SAR_IE |
- CHCR_CLR_DONE_IE | CHCR_CLR_MRDY_IE | CHCR_CLR_DRDY_IE |
- CHCR_CLR_LC_IE | CHCR_CLR_CONT_RB_IE),
-};
-
-enum ConfigRegister_bits {
- CR_REQS_MASK = 0x7 << 16,
- CR_ASEQDONT = 0x0 << 10,
- CR_ASEQUP = 0x1 << 10,
- CR_ASEQDOWN = 0x2 << 10,
- CR_ASEQ_MASK = 0x3 << 10,
- CR_PSIZE8 = (1 << 8),
- CR_PSIZE16 = (2 << 8),
- CR_PSIZE32 = (3 << 8),
- CR_PORTCPU = (0 << 6),
- CR_PORTIO = (1 << 6),
- CR_PORTVXI = (2 << 6),
- CR_PORTMXI = (3 << 6),
- CR_AMDEVICE = (1 << 0),
-};
-
-static inline int CR_REQS(int source)
-{
- return (source & 0x7) << 16;
-};
-
-static inline int CR_REQSDRQ(unsigned drq_line)
-{
- /* This also works on m-series when using channels (drq_line) 4 or 5. */
- return CR_REQS((drq_line & 0x3) | 0x4);
-}
-
-static inline int CR_RL(unsigned int retry_limit)
-{
- int value = 0;
-
- if (retry_limit)
- value = 1 + ilog2(retry_limit);
- if (value > 0x7)
- value = 0x7;
- return (value & 0x7) << 21;
-}
-
-enum CHSR_bits {
- CHSR_INT = (1 << 31),
- CHSR_LPAUSES = (1 << 29),
- CHSR_SARS = (1 << 27),
- CHSR_DONE = (1 << 25),
- CHSR_MRDY = (1 << 23),
- CHSR_DRDY = (1 << 21),
- CHSR_LINKC = (1 << 19),
- CHSR_CONTS_RB = (1 << 17),
- CHSR_ERROR = (1 << 15),
- CHSR_SABORT = (1 << 14),
- CHSR_HABORT = (1 << 13),
- CHSR_STOPS = (1 << 12),
- CHSR_OPERR_mask = (3 << 10),
- CHSR_OPERR_NOERROR = (0 << 10),
- CHSR_OPERR_FIFOERROR = (1 << 10),
- CHSR_OPERR_LINKERROR = (1 << 10), /* ??? */
- CHSR_XFERR = (1 << 9),
- CHSR_END = (1 << 8),
- CHSR_DRQ1 = (1 << 7),
- CHSR_DRQ0 = (1 << 6),
- CHSR_LxERR_mask = (3 << 4),
- CHSR_LBERR = (1 << 4),
- CHSR_LRERR = (2 << 4),
- CHSR_LOERR = (3 << 4),
- CHSR_MxERR_mask = (3 << 2),
- CHSR_MBERR = (1 << 2),
- CHSR_MRERR = (2 << 2),
- CHSR_MOERR = (3 << 2),
- CHSR_DxERR_mask = (3 << 0),
- CHSR_DBERR = (1 << 0),
- CHSR_DRERR = (2 << 0),
- CHSR_DOERR = (3 << 0),
-};
-
-static inline void mite_dma_reset(struct mite_channel *mite_chan)
-{
- writel(CHOR_DMARESET | CHOR_FRESET,
- mite_chan->mite->mite_io_addr + MITE_CHOR(mite_chan->channel));
-};
-
-#endif
diff --git a/drivers/staging/comedi/drivers/mpc624.c b/drivers/staging/comedi/drivers/mpc624.c
deleted file mode 100644
index 8e9204150eb7..000000000000
--- a/drivers/staging/comedi/drivers/mpc624.c
+++ /dev/null
@@ -1,357 +0,0 @@
-/*
- comedi/drivers/mpc624.c
- Hardware driver for a Micro/sys inc. MPC-624 PC/104 board
-
- COMEDI - Linux Control and Measurement Device Interface
- Copyright (C) 2000 David A. Schleef <ds@schleef.org>
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-*/
-/*
-Driver: mpc624
-Description: Micro/sys MPC-624 PC/104 board
-Devices: [Micro/sys] MPC-624 (mpc624)
-Author: Stanislaw Raczynski <sraczynski@op.pl>
-Updated: Thu, 15 Sep 2005 12:01:18 +0200
-Status: working
-
- The Micro/sys MPC-624 board is based on the LTC2440 24-bit sigma-delta
- ADC chip.
-
- Subdevices supported by the driver:
- - Analog In: supported
- - Digital I/O: not supported
- - LEDs: not supported
- - EEPROM: not supported
-
-Configuration Options:
- [0] - I/O base address
- [1] - conversion rate
- Conversion rate RMS noise Effective Number Of Bits
- 0 3.52kHz 23uV 17
- 1 1.76kHz 3.5uV 20
- 2 880Hz 2uV 21.3
- 3 440Hz 1.4uV 21.8
- 4 220Hz 1uV 22.4
- 5 110Hz 750uV 22.9
- 6 55Hz 510nV 23.4
- 7 27.5Hz 375nV 24
- 8 13.75Hz 250nV 24.4
- 9 6.875Hz 200nV 24.6
- [2] - voltage range
- 0 -1.01V .. +1.01V
- 1 -10.1V .. +10.1V
-*/
-
-#include <linux/module.h>
-#include "../comedidev.h"
-
-#include <linux/delay.h>
-
-/* Offsets of different ports */
-#define MPC624_MASTER_CONTROL 0 /* not used */
-#define MPC624_GNMUXCH 1 /* Gain, Mux, Channel of ADC */
-#define MPC624_ADC 2 /* read/write to/from ADC */
-#define MPC624_EE 3 /* read/write to/from serial EEPROM via I2C */
-#define MPC624_LEDS 4 /* write to LEDs */
-#define MPC624_DIO 5 /* read/write to/from digital I/O ports */
-#define MPC624_IRQ_MASK 6 /* IRQ masking enable/disable */
-
-/* Register bits' names */
-#define MPC624_ADBUSY BIT(5)
-#define MPC624_ADSDO BIT(4)
-#define MPC624_ADFO BIT(3)
-#define MPC624_ADCS BIT(2)
-#define MPC624_ADSCK BIT(1)
-#define MPC624_ADSDI BIT(0)
-
-/* SDI Speed/Resolution Programming bits */
-#define MPC624_OSR4 BIT(31)
-#define MPC624_OSR3 BIT(30)
-#define MPC624_OSR2 BIT(29)
-#define MPC624_OSR1 BIT(28)
-#define MPC624_OSR0 BIT(27)
-
-/* 32-bit output value bits' names */
-#define MPC624_EOC_BIT BIT(31)
-#define MPC624_DMY_BIT BIT(30)
-#define MPC624_SGN_BIT BIT(29)
-
-/* Conversion speeds */
-/* OSR4 OSR3 OSR2 OSR1 OSR0 Conversion rate RMS noise ENOB^
- * X 0 0 0 1 3.52kHz 23uV 17
- * X 0 0 1 0 1.76kHz 3.5uV 20
- * X 0 0 1 1 880Hz 2uV 21.3
- * X 0 1 0 0 440Hz 1.4uV 21.8
- * X 0 1 0 1 220Hz 1uV 22.4
- * X 0 1 1 0 110Hz 750uV 22.9
- * X 0 1 1 1 55Hz 510nV 23.4
- * X 1 0 0 0 27.5Hz 375nV 24
- * X 1 0 0 1 13.75Hz 250nV 24.4
- * X 1 1 1 1 6.875Hz 200nV 24.6
- *
- * ^ - Effective Number Of Bits
- */
-
-#define MPC624_SPEED_3_52_kHz (MPC624_OSR4 | MPC624_OSR0)
-#define MPC624_SPEED_1_76_kHz (MPC624_OSR4 | MPC624_OSR1)
-#define MPC624_SPEED_880_Hz (MPC624_OSR4 | MPC624_OSR1 | MPC624_OSR0)
-#define MPC624_SPEED_440_Hz (MPC624_OSR4 | MPC624_OSR2)
-#define MPC624_SPEED_220_Hz (MPC624_OSR4 | MPC624_OSR2 | MPC624_OSR0)
-#define MPC624_SPEED_110_Hz (MPC624_OSR4 | MPC624_OSR2 | MPC624_OSR1)
-#define MPC624_SPEED_55_Hz \
- (MPC624_OSR4 | MPC624_OSR2 | MPC624_OSR1 | MPC624_OSR0)
-#define MPC624_SPEED_27_5_Hz (MPC624_OSR4 | MPC624_OSR3)
-#define MPC624_SPEED_13_75_Hz (MPC624_OSR4 | MPC624_OSR3 | MPC624_OSR0)
-#define MPC624_SPEED_6_875_Hz \
- (MPC624_OSR4 | MPC624_OSR3 | MPC624_OSR2 | MPC624_OSR1 | MPC624_OSR0)
-/* -------------------------------------------------------------------------- */
-struct mpc624_private {
- /* set by mpc624_attach() from driver's parameters */
- unsigned long int ulConvertionRate;
-};
-
-/* -------------------------------------------------------------------------- */
-static const struct comedi_lrange range_mpc624_bipolar1 = {
- 1,
- {
-/* BIP_RANGE(1.01) this is correct, */
- /* but my MPC-624 actually seems to have a range of 2.02 */
- BIP_RANGE(2.02)
- }
-};
-
-static const struct comedi_lrange range_mpc624_bipolar10 = {
- 1,
- {
-/* BIP_RANGE(10.1) this is correct, */
- /* but my MPC-624 actually seems to have a range of 20.2 */
- BIP_RANGE(20.2)
- }
-};
-
-static int mpc624_ai_eoc(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned long context)
-{
- unsigned char status;
-
- status = inb(dev->iobase + MPC624_ADC);
- if ((status & MPC624_ADBUSY) == 0)
- return 0;
- return -EBUSY;
-}
-
-static int mpc624_ai_rinsn(struct comedi_device *dev,
- struct comedi_subdevice *s, struct comedi_insn *insn,
- unsigned int *data)
-{
- struct mpc624_private *devpriv = dev->private;
- int n, i;
- unsigned long int data_in, data_out;
- int ret;
-
- /*
- * WARNING:
- * We always write 0 to GNSWA bit, so the channel range is +-/10.1Vdc
- */
- outb(insn->chanspec, dev->iobase + MPC624_GNMUXCH);
-
- for (n = 0; n < insn->n; n++) {
- /* Trigger the conversion */
- outb(MPC624_ADSCK, dev->iobase + MPC624_ADC);
- udelay(1);
- outb(MPC624_ADCS | MPC624_ADSCK, dev->iobase + MPC624_ADC);
- udelay(1);
- outb(0, dev->iobase + MPC624_ADC);
- udelay(1);
-
- /* Wait for the conversion to end */
- ret = comedi_timeout(dev, s, insn, mpc624_ai_eoc, 0);
- if (ret)
- return ret;
-
- /* Start reading data */
- data_in = 0;
- data_out = devpriv->ulConvertionRate;
- udelay(1);
- for (i = 0; i < 32; i++) {
- /* Set the clock low */
- outb(0, dev->iobase + MPC624_ADC);
- udelay(1);
-
- if (data_out & BIT(31)) { /* the next bit is a 1 */
- /* Set the ADSDI line (send to MPC624) */
- outb(MPC624_ADSDI, dev->iobase + MPC624_ADC);
- udelay(1);
- /* Set the clock high */
- outb(MPC624_ADSCK | MPC624_ADSDI,
- dev->iobase + MPC624_ADC);
- } else { /* the next bit is a 0 */
-
- /* Set the ADSDI line (send to MPC624) */
- outb(0, dev->iobase + MPC624_ADC);
- udelay(1);
- /* Set the clock high */
- outb(MPC624_ADSCK, dev->iobase + MPC624_ADC);
- }
- /* Read ADSDO on high clock (receive from MPC624) */
- udelay(1);
- data_in <<= 1;
- data_in |=
- (inb(dev->iobase + MPC624_ADC) & MPC624_ADSDO) >> 4;
- udelay(1);
-
- data_out <<= 1;
- }
-
- /*
- * Received 32-bit long value consist of:
- * 31: EOC -
- * (End Of Transmission) bit - should be 0
- * 30: DMY
- * (Dummy) bit - should be 0
- * 29: SIG
- * (Sign) bit- 1 if the voltage is positive,
- * 0 if negative
- * 28: MSB
- * (Most Significant Bit) - the first bit of
- * the conversion result
- * ....
- * 05: LSB
- * (Least Significant Bit)- the last bit of the
- * conversion result
- * 04-00: sub-LSB
- * - sub-LSBs are basically noise, but when
- * averaged properly, they can increase conversion
- * precision up to 29 bits; they can be discarded
- * without loss of resolution.
- */
-
- if (data_in & MPC624_EOC_BIT)
- dev_dbg(dev->class_dev,
- "EOC bit is set (data_in=%lu)!", data_in);
- if (data_in & MPC624_DMY_BIT)
- dev_dbg(dev->class_dev,
- "DMY bit is set (data_in=%lu)!", data_in);
- if (data_in & MPC624_SGN_BIT) { /* Volatge is positive */
- /*
- * comedi operates on unsigned numbers, so mask off EOC
- * and DMY and don't clear the SGN bit
- */
- data_in &= 0x3FFFFFFF;
- data[n] = data_in;
- } else { /* The voltage is negative */
- /*
- * data_in contains a number in 30-bit two's complement
- * code and we must deal with it
- */
- data_in |= MPC624_SGN_BIT;
- data_in = ~data_in;
- data_in += 1;
- data_in &= ~(MPC624_EOC_BIT | MPC624_DMY_BIT);
- /* clear EOC and DMY bits */
- data_in = 0x20000000 - data_in;
- data[n] = data_in;
- }
- }
-
- /* Return the number of samples read/written */
- return n;
-}
-
-static int mpc624_attach(struct comedi_device *dev, struct comedi_devconfig *it)
-{
- struct mpc624_private *devpriv;
- struct comedi_subdevice *s;
- int ret;
-
- ret = comedi_request_region(dev, it->options[0], 0x10);
- if (ret)
- return ret;
-
- devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
- if (!devpriv)
- return -ENOMEM;
-
- switch (it->options[1]) {
- case 0:
- devpriv->ulConvertionRate = MPC624_SPEED_3_52_kHz;
- break;
- case 1:
- devpriv->ulConvertionRate = MPC624_SPEED_1_76_kHz;
- break;
- case 2:
- devpriv->ulConvertionRate = MPC624_SPEED_880_Hz;
- break;
- case 3:
- devpriv->ulConvertionRate = MPC624_SPEED_440_Hz;
- break;
- case 4:
- devpriv->ulConvertionRate = MPC624_SPEED_220_Hz;
- break;
- case 5:
- devpriv->ulConvertionRate = MPC624_SPEED_110_Hz;
- break;
- case 6:
- devpriv->ulConvertionRate = MPC624_SPEED_55_Hz;
- break;
- case 7:
- devpriv->ulConvertionRate = MPC624_SPEED_27_5_Hz;
- break;
- case 8:
- devpriv->ulConvertionRate = MPC624_SPEED_13_75_Hz;
- break;
- case 9:
- devpriv->ulConvertionRate = MPC624_SPEED_6_875_Hz;
- break;
- default:
- devpriv->ulConvertionRate = MPC624_SPEED_3_52_kHz;
- }
-
- ret = comedi_alloc_subdevices(dev, 1);
- if (ret)
- return ret;
-
- s = &dev->subdevices[0];
- s->type = COMEDI_SUBD_AI;
- s->subdev_flags = SDF_READABLE | SDF_DIFF;
- s->n_chan = 8;
- switch (it->options[1]) {
- default:
- s->maxdata = 0x3FFFFFFF;
- }
-
- switch (it->options[1]) {
- case 0:
- s->range_table = &range_mpc624_bipolar1;
- break;
- default:
- s->range_table = &range_mpc624_bipolar10;
- }
- s->len_chanlist = 1;
- s->insn_read = mpc624_ai_rinsn;
-
- return 0;
-}
-
-static struct comedi_driver mpc624_driver = {
- .driver_name = "mpc624",
- .module = THIS_MODULE,
- .attach = mpc624_attach,
- .detach = comedi_legacy_detach,
-};
-module_comedi_driver(mpc624_driver);
-
-MODULE_AUTHOR("Comedi http://www.comedi.org");
-MODULE_DESCRIPTION("Comedi low-level driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/multiq3.c b/drivers/staging/comedi/drivers/multiq3.c
deleted file mode 100644
index 8471219210b6..000000000000
--- a/drivers/staging/comedi/drivers/multiq3.c
+++ /dev/null
@@ -1,289 +0,0 @@
-/*
- comedi/drivers/multiq3.c
- Hardware driver for Quanser Consulting MultiQ-3 board
-
- COMEDI - Linux Control and Measurement Device Interface
- Copyright (C) 1999 Anders Blomdell <anders.blomdell@control.lth.se>
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
- */
-/*
-Driver: multiq3
-Description: Quanser Consulting MultiQ-3
-Author: Anders Blomdell <anders.blomdell@control.lth.se>
-Status: works
-Devices: [Quanser Consulting] MultiQ-3 (multiq3)
-
-*/
-
-#include <linux/module.h>
-#include <linux/interrupt.h>
-#include "../comedidev.h"
-
-/*
- * MULTIQ-3 port offsets
- */
-#define MULTIQ3_DIGIN_PORT 0
-#define MULTIQ3_DIGOUT_PORT 0
-#define MULTIQ3_DAC_DATA 2
-#define MULTIQ3_AD_DATA 4
-#define MULTIQ3_AD_CS 4
-#define MULTIQ3_STATUS 6
-#define MULTIQ3_CONTROL 6
-#define MULTIQ3_CLK_DATA 8
-#define MULTIQ3_ENC_DATA 12
-#define MULTIQ3_ENC_CONTROL 14
-
-/*
- * flags for CONTROL register
- */
-#define MULTIQ3_AD_MUX_EN 0x0040
-#define MULTIQ3_AD_AUTOZ 0x0080
-#define MULTIQ3_AD_AUTOCAL 0x0100
-#define MULTIQ3_AD_SH 0x0200
-#define MULTIQ3_AD_CLOCK_4M 0x0400
-#define MULTIQ3_DA_LOAD 0x1800
-
-#define MULTIQ3_CONTROL_MUST 0x0600
-
-/*
- * flags for STATUS register
- */
-#define MULTIQ3_STATUS_EOC 0x008
-#define MULTIQ3_STATUS_EOC_I 0x010
-
-/*
- * flags for encoder control
- */
-#define MULTIQ3_CLOCK_DATA 0x00
-#define MULTIQ3_CLOCK_SETUP 0x18
-#define MULTIQ3_INPUT_SETUP 0x41
-#define MULTIQ3_QUAD_X4 0x38
-#define MULTIQ3_BP_RESET 0x01
-#define MULTIQ3_CNTR_RESET 0x02
-#define MULTIQ3_TRSFRPR_CTR 0x08
-#define MULTIQ3_TRSFRCNTR_OL 0x10
-#define MULTIQ3_EFLAG_RESET 0x06
-
-#define MULTIQ3_TIMEOUT 30
-
-static int multiq3_ai_status(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned long context)
-{
- unsigned int status;
-
- status = inw(dev->iobase + MULTIQ3_STATUS);
- if (status & context)
- return 0;
- return -EBUSY;
-}
-
-static int multiq3_ai_insn_read(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
-{
- int n;
- int chan;
- unsigned int hi, lo;
- int ret;
-
- chan = CR_CHAN(insn->chanspec);
- outw(MULTIQ3_CONTROL_MUST | MULTIQ3_AD_MUX_EN | (chan << 3),
- dev->iobase + MULTIQ3_CONTROL);
-
- ret = comedi_timeout(dev, s, insn, multiq3_ai_status,
- MULTIQ3_STATUS_EOC);
- if (ret)
- return ret;
-
- for (n = 0; n < insn->n; n++) {
- outw(0, dev->iobase + MULTIQ3_AD_CS);
-
- ret = comedi_timeout(dev, s, insn, multiq3_ai_status,
- MULTIQ3_STATUS_EOC_I);
- if (ret)
- return ret;
-
- hi = inb(dev->iobase + MULTIQ3_AD_CS);
- lo = inb(dev->iobase + MULTIQ3_AD_CS);
- data[n] = (((hi << 8) | lo) + 0x1000) & 0x1fff;
- }
-
- return n;
-}
-
-static int multiq3_ao_insn_write(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- unsigned int chan = CR_CHAN(insn->chanspec);
- unsigned int val = s->readback[chan];
- int i;
-
- for (i = 0; i < insn->n; i++) {
- val = data[i];
- outw(MULTIQ3_CONTROL_MUST | MULTIQ3_DA_LOAD | chan,
- dev->iobase + MULTIQ3_CONTROL);
- outw(val, dev->iobase + MULTIQ3_DAC_DATA);
- outw(MULTIQ3_CONTROL_MUST, dev->iobase + MULTIQ3_CONTROL);
- }
- s->readback[chan] = val;
-
- return insn->n;
-}
-
-static int multiq3_di_insn_bits(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
-{
- data[1] = inw(dev->iobase + MULTIQ3_DIGIN_PORT);
-
- return insn->n;
-}
-
-static int multiq3_do_insn_bits(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- if (comedi_dio_update_state(s, data))
- outw(s->state, dev->iobase + MULTIQ3_DIGOUT_PORT);
-
- data[1] = s->state;
-
- return insn->n;
-}
-
-static int multiq3_encoder_insn_read(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- int chan = CR_CHAN(insn->chanspec);
- int control = MULTIQ3_CONTROL_MUST | MULTIQ3_AD_MUX_EN | (chan << 3);
- int value;
- int n;
-
- for (n = 0; n < insn->n; n++) {
- outw(control, dev->iobase + MULTIQ3_CONTROL);
- outb(MULTIQ3_BP_RESET, dev->iobase + MULTIQ3_ENC_CONTROL);
- outb(MULTIQ3_TRSFRCNTR_OL, dev->iobase + MULTIQ3_ENC_CONTROL);
- value = inb(dev->iobase + MULTIQ3_ENC_DATA);
- value |= (inb(dev->iobase + MULTIQ3_ENC_DATA) << 8);
- value |= (inb(dev->iobase + MULTIQ3_ENC_DATA) << 16);
- data[n] = (value + 0x800000) & 0xffffff;
- }
-
- return n;
-}
-
-static void encoder_reset(struct comedi_device *dev)
-{
- struct comedi_subdevice *s = &dev->subdevices[4];
- int chan;
-
- for (chan = 0; chan < s->n_chan; chan++) {
- int control =
- MULTIQ3_CONTROL_MUST | MULTIQ3_AD_MUX_EN | (chan << 3);
- outw(control, dev->iobase + MULTIQ3_CONTROL);
- outb(MULTIQ3_EFLAG_RESET, dev->iobase + MULTIQ3_ENC_CONTROL);
- outb(MULTIQ3_BP_RESET, dev->iobase + MULTIQ3_ENC_CONTROL);
- outb(MULTIQ3_CLOCK_DATA, dev->iobase + MULTIQ3_ENC_DATA);
- outb(MULTIQ3_CLOCK_SETUP, dev->iobase + MULTIQ3_ENC_CONTROL);
- outb(MULTIQ3_INPUT_SETUP, dev->iobase + MULTIQ3_ENC_CONTROL);
- outb(MULTIQ3_QUAD_X4, dev->iobase + MULTIQ3_ENC_CONTROL);
- outb(MULTIQ3_CNTR_RESET, dev->iobase + MULTIQ3_ENC_CONTROL);
- }
-}
-
-static int multiq3_attach(struct comedi_device *dev,
- struct comedi_devconfig *it)
-{
- struct comedi_subdevice *s;
- int ret;
-
- ret = comedi_request_region(dev, it->options[0], 0x10);
- if (ret)
- return ret;
-
- ret = comedi_alloc_subdevices(dev, 5);
- if (ret)
- return ret;
-
- s = &dev->subdevices[0];
- /* ai subdevice */
- s->type = COMEDI_SUBD_AI;
- s->subdev_flags = SDF_READABLE | SDF_GROUND;
- s->n_chan = 8;
- s->insn_read = multiq3_ai_insn_read;
- s->maxdata = 0x1fff;
- s->range_table = &range_bipolar5;
-
- s = &dev->subdevices[1];
- /* ao subdevice */
- s->type = COMEDI_SUBD_AO;
- s->subdev_flags = SDF_WRITABLE;
- s->n_chan = 8;
- s->maxdata = 0xfff;
- s->range_table = &range_bipolar5;
- s->insn_write = multiq3_ao_insn_write;
-
- ret = comedi_alloc_subdev_readback(s);
- if (ret)
- return ret;
-
- s = &dev->subdevices[2];
- /* di subdevice */
- s->type = COMEDI_SUBD_DI;
- s->subdev_flags = SDF_READABLE;
- s->n_chan = 16;
- s->insn_bits = multiq3_di_insn_bits;
- s->maxdata = 1;
- s->range_table = &range_digital;
-
- s = &dev->subdevices[3];
- /* do subdevice */
- s->type = COMEDI_SUBD_DO;
- s->subdev_flags = SDF_WRITABLE;
- s->n_chan = 16;
- s->insn_bits = multiq3_do_insn_bits;
- s->maxdata = 1;
- s->range_table = &range_digital;
- s->state = 0;
-
- s = &dev->subdevices[4];
- /* encoder (counter) subdevice */
- s->type = COMEDI_SUBD_COUNTER;
- s->subdev_flags = SDF_READABLE | SDF_LSAMPL;
- s->n_chan = it->options[2] * 2;
- s->insn_read = multiq3_encoder_insn_read;
- s->maxdata = 0xffffff;
- s->range_table = &range_unknown;
-
- encoder_reset(dev);
-
- return 0;
-}
-
-static struct comedi_driver multiq3_driver = {
- .driver_name = "multiq3",
- .module = THIS_MODULE,
- .attach = multiq3_attach,
- .detach = comedi_legacy_detach,
-};
-module_comedi_driver(multiq3_driver);
-
-MODULE_AUTHOR("Comedi http://www.comedi.org");
-MODULE_DESCRIPTION("Comedi low-level driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/ni_6527.c b/drivers/staging/comedi/drivers/ni_6527.c
deleted file mode 100644
index 62a817e4cd64..000000000000
--- a/drivers/staging/comedi/drivers/ni_6527.c
+++ /dev/null
@@ -1,500 +0,0 @@
-/*
- * ni_6527.c
- * Comedi driver for National Instruments PCI-6527
- *
- * COMEDI - Linux Control and Measurement Device Interface
- * Copyright (C) 1999,2002,2003 David A. Schleef <ds@schleef.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-/*
- * Driver: ni_6527
- * Description: National Instruments 6527
- * Devices: [National Instruments] PCI-6527 (pci-6527), PXI-6527 (pxi-6527)
- * Author: David A. Schleef <ds@schleef.org>
- * Updated: Sat, 25 Jan 2003 13:24:40 -0800
- * Status: works
- *
- * Configuration Options: not applicable, uses PCI auto config
- */
-
-#include <linux/module.h>
-#include <linux/interrupt.h>
-
-#include "../comedi_pci.h"
-
-/*
- * PCI BAR1 - Register memory map
- *
- * Manuals (available from ftp://ftp.natinst.com/support/manuals)
- * 370106b.pdf 6527 Register Level Programmer Manual
- */
-#define NI6527_DI_REG(x) (0x00 + (x))
-#define NI6527_DO_REG(x) (0x03 + (x))
-#define NI6527_ID_REG 0x06
-#define NI6527_CLR_REG 0x07
-#define NI6527_CLR_EDGE (1 << 3)
-#define NI6527_CLR_OVERFLOW (1 << 2)
-#define NI6527_CLR_FILT (1 << 1)
-#define NI6527_CLR_INTERVAL (1 << 0)
-#define NI6527_CLR_IRQS (NI6527_CLR_EDGE | NI6527_CLR_OVERFLOW)
-#define NI6527_CLR_RESET_FILT (NI6527_CLR_FILT | NI6527_CLR_INTERVAL)
-#define NI6527_FILT_INTERVAL_REG(x) (0x08 + (x))
-#define NI6527_FILT_ENA_REG(x) (0x0c + (x))
-#define NI6527_STATUS_REG 0x14
-#define NI6527_STATUS_IRQ (1 << 2)
-#define NI6527_STATUS_OVERFLOW (1 << 1)
-#define NI6527_STATUS_EDGE (1 << 0)
-#define NI6527_CTRL_REG 0x15
-#define NI6527_CTRL_FALLING (1 << 4)
-#define NI6527_CTRL_RISING (1 << 3)
-#define NI6527_CTRL_IRQ (1 << 2)
-#define NI6527_CTRL_OVERFLOW (1 << 1)
-#define NI6527_CTRL_EDGE (1 << 0)
-#define NI6527_CTRL_DISABLE_IRQS 0
-#define NI6527_CTRL_ENABLE_IRQS (NI6527_CTRL_FALLING | \
- NI6527_CTRL_RISING | \
- NI6527_CTRL_IRQ | NI6527_CTRL_EDGE)
-#define NI6527_RISING_EDGE_REG(x) (0x18 + (x))
-#define NI6527_FALLING_EDGE_REG(x) (0x20 + (x))
-
-enum ni6527_boardid {
- BOARD_PCI6527,
- BOARD_PXI6527,
-};
-
-struct ni6527_board {
- const char *name;
-};
-
-static const struct ni6527_board ni6527_boards[] = {
- [BOARD_PCI6527] = {
- .name = "pci-6527",
- },
- [BOARD_PXI6527] = {
- .name = "pxi-6527",
- },
-};
-
-struct ni6527_private {
- unsigned int filter_interval;
- unsigned int filter_enable;
-};
-
-static void ni6527_set_filter_interval(struct comedi_device *dev,
- unsigned int val)
-{
- struct ni6527_private *devpriv = dev->private;
-
- if (val != devpriv->filter_interval) {
- writeb(val & 0xff, dev->mmio + NI6527_FILT_INTERVAL_REG(0));
- writeb((val >> 8) & 0xff,
- dev->mmio + NI6527_FILT_INTERVAL_REG(1));
- writeb((val >> 16) & 0x0f,
- dev->mmio + NI6527_FILT_INTERVAL_REG(2));
-
- writeb(NI6527_CLR_INTERVAL, dev->mmio + NI6527_CLR_REG);
-
- devpriv->filter_interval = val;
- }
-}
-
-static void ni6527_set_filter_enable(struct comedi_device *dev,
- unsigned int val)
-{
- writeb(val & 0xff, dev->mmio + NI6527_FILT_ENA_REG(0));
- writeb((val >> 8) & 0xff, dev->mmio + NI6527_FILT_ENA_REG(1));
- writeb((val >> 16) & 0xff, dev->mmio + NI6527_FILT_ENA_REG(2));
-}
-
-static int ni6527_di_insn_config(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct ni6527_private *devpriv = dev->private;
- unsigned int chan = CR_CHAN(insn->chanspec);
- unsigned int interval;
-
- switch (data[0]) {
- case INSN_CONFIG_FILTER:
- /*
- * The deglitch filter interval is specified in nanoseconds.
- * The hardware supports intervals in 200ns increments. Round
- * the user values up and return the actual interval.
- */
- interval = (data[1] + 100) / 200;
- data[1] = interval * 200;
-
- if (interval) {
- ni6527_set_filter_interval(dev, interval);
- devpriv->filter_enable |= 1 << chan;
- } else {
- devpriv->filter_enable &= ~(1 << chan);
- }
- ni6527_set_filter_enable(dev, devpriv->filter_enable);
- break;
- default:
- return -EINVAL;
- }
-
- return insn->n;
-}
-
-static int ni6527_di_insn_bits(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- unsigned int val;
-
- val = readb(dev->mmio + NI6527_DI_REG(0));
- val |= (readb(dev->mmio + NI6527_DI_REG(1)) << 8);
- val |= (readb(dev->mmio + NI6527_DI_REG(2)) << 16);
-
- data[1] = val;
-
- return insn->n;
-}
-
-static int ni6527_do_insn_bits(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- unsigned int mask;
-
- mask = comedi_dio_update_state(s, data);
- if (mask) {
- /* Outputs are inverted */
- unsigned int val = s->state ^ 0xffffff;
-
- if (mask & 0x0000ff)
- writeb(val & 0xff, dev->mmio + NI6527_DO_REG(0));
- if (mask & 0x00ff00)
- writeb((val >> 8) & 0xff,
- dev->mmio + NI6527_DO_REG(1));
- if (mask & 0xff0000)
- writeb((val >> 16) & 0xff,
- dev->mmio + NI6527_DO_REG(2));
- }
-
- data[1] = s->state;
-
- return insn->n;
-}
-
-static irqreturn_t ni6527_interrupt(int irq, void *d)
-{
- struct comedi_device *dev = d;
- struct comedi_subdevice *s = dev->read_subdev;
- unsigned int status;
-
- status = readb(dev->mmio + NI6527_STATUS_REG);
- if (!(status & NI6527_STATUS_IRQ))
- return IRQ_NONE;
-
- if (status & NI6527_STATUS_EDGE) {
- comedi_buf_write_samples(s, &s->state, 1);
- comedi_handle_events(dev, s);
- }
-
- writeb(NI6527_CLR_IRQS, dev->mmio + NI6527_CLR_REG);
-
- return IRQ_HANDLED;
-}
-
-static int ni6527_intr_cmdtest(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_cmd *cmd)
-{
- int err = 0;
-
- /* Step 1 : check if triggers are trivially valid */
-
- err |= comedi_check_trigger_src(&cmd->start_src, TRIG_NOW);
- err |= comedi_check_trigger_src(&cmd->scan_begin_src, TRIG_OTHER);
- err |= comedi_check_trigger_src(&cmd->convert_src, TRIG_FOLLOW);
- err |= comedi_check_trigger_src(&cmd->scan_end_src, TRIG_COUNT);
- err |= comedi_check_trigger_src(&cmd->stop_src, TRIG_COUNT);
-
- if (err)
- return 1;
-
- /* Step 2a : make sure trigger sources are unique */
- /* Step 2b : and mutually compatible */
-
- /* Step 3: check if arguments are trivially valid */
-
- err |= comedi_check_trigger_arg_is(&cmd->start_arg, 0);
- err |= comedi_check_trigger_arg_is(&cmd->scan_begin_arg, 0);
- err |= comedi_check_trigger_arg_is(&cmd->convert_arg, 0);
- err |= comedi_check_trigger_arg_is(&cmd->scan_end_arg,
- cmd->chanlist_len);
- err |= comedi_check_trigger_arg_is(&cmd->stop_arg, 0);
-
- if (err)
- return 3;
-
- /* Step 4: fix up any arguments */
-
- /* Step 5: check channel list if it exists */
-
- return 0;
-}
-
-static int ni6527_intr_cmd(struct comedi_device *dev,
- struct comedi_subdevice *s)
-{
- writeb(NI6527_CLR_IRQS, dev->mmio + NI6527_CLR_REG);
- writeb(NI6527_CTRL_ENABLE_IRQS, dev->mmio + NI6527_CTRL_REG);
-
- return 0;
-}
-
-static int ni6527_intr_cancel(struct comedi_device *dev,
- struct comedi_subdevice *s)
-{
- writeb(NI6527_CTRL_DISABLE_IRQS, dev->mmio + NI6527_CTRL_REG);
-
- return 0;
-}
-
-static int ni6527_intr_insn_bits(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
-{
- data[1] = 0;
- return insn->n;
-}
-
-static void ni6527_set_edge_detection(struct comedi_device *dev,
- unsigned int mask,
- unsigned int rising,
- unsigned int falling)
-{
- unsigned int i;
-
- rising &= mask;
- falling &= mask;
- for (i = 0; i < 2; i++) {
- if (mask & 0xff) {
- if (~mask & 0xff) {
- /* preserve rising-edge detection channels */
- rising |= readb(dev->mmio +
- NI6527_RISING_EDGE_REG(i)) &
- (~mask & 0xff);
- /* preserve falling-edge detection channels */
- falling |= readb(dev->mmio +
- NI6527_FALLING_EDGE_REG(i)) &
- (~mask & 0xff);
- }
- /* update rising-edge detection channels */
- writeb(rising & 0xff,
- dev->mmio + NI6527_RISING_EDGE_REG(i));
- /* update falling-edge detection channels */
- writeb(falling & 0xff,
- dev->mmio + NI6527_FALLING_EDGE_REG(i));
- }
- rising >>= 8;
- falling >>= 8;
- mask >>= 8;
- }
-}
-
-static int ni6527_intr_insn_config(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- unsigned int mask = 0xffffffff;
- unsigned int rising, falling, shift;
-
- switch (data[0]) {
- case INSN_CONFIG_CHANGE_NOTIFY:
- /* check_insn_config_length() does not check this instruction */
- if (insn->n != 3)
- return -EINVAL;
- rising = data[1];
- falling = data[2];
- ni6527_set_edge_detection(dev, mask, rising, falling);
- break;
- case INSN_CONFIG_DIGITAL_TRIG:
- /* check trigger number */
- if (data[1] != 0)
- return -EINVAL;
- /* check digital trigger operation */
- switch (data[2]) {
- case COMEDI_DIGITAL_TRIG_DISABLE:
- rising = 0;
- falling = 0;
- break;
- case COMEDI_DIGITAL_TRIG_ENABLE_EDGES:
- /* check shift amount */
- shift = data[3];
- if (shift >= s->n_chan) {
- mask = 0;
- rising = 0;
- falling = 0;
- } else {
- mask <<= shift;
- rising = data[4] << shift;
- falling = data[5] << shift;
- }
- break;
- default:
- return -EINVAL;
- }
- ni6527_set_edge_detection(dev, mask, rising, falling);
- break;
- default:
- return -EINVAL;
- }
-
- return insn->n;
-}
-
-static void ni6527_reset(struct comedi_device *dev)
-{
- /* disable deglitch filters on all channels */
- ni6527_set_filter_enable(dev, 0);
-
- /* disable edge detection */
- ni6527_set_edge_detection(dev, 0xffffffff, 0, 0);
-
- writeb(NI6527_CLR_IRQS | NI6527_CLR_RESET_FILT,
- dev->mmio + NI6527_CLR_REG);
- writeb(NI6527_CTRL_DISABLE_IRQS, dev->mmio + NI6527_CTRL_REG);
-}
-
-static int ni6527_auto_attach(struct comedi_device *dev,
- unsigned long context)
-{
- struct pci_dev *pcidev = comedi_to_pci_dev(dev);
- const struct ni6527_board *board = NULL;
- struct ni6527_private *devpriv;
- struct comedi_subdevice *s;
- int ret;
-
- if (context < ARRAY_SIZE(ni6527_boards))
- board = &ni6527_boards[context];
- if (!board)
- return -ENODEV;
- dev->board_ptr = board;
- dev->board_name = board->name;
-
- devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
- if (!devpriv)
- return -ENOMEM;
-
- ret = comedi_pci_enable(dev);
- if (ret)
- return ret;
-
- dev->mmio = pci_ioremap_bar(pcidev, 1);
- if (!dev->mmio)
- return -ENOMEM;
-
- /* make sure this is actually a 6527 device */
- if (readb(dev->mmio + NI6527_ID_REG) != 0x27)
- return -ENODEV;
-
- ni6527_reset(dev);
-
- ret = request_irq(pcidev->irq, ni6527_interrupt, IRQF_SHARED,
- dev->board_name, dev);
- if (ret == 0)
- dev->irq = pcidev->irq;
-
- ret = comedi_alloc_subdevices(dev, 3);
- if (ret)
- return ret;
-
- /* Digital Input subdevice */
- s = &dev->subdevices[0];
- s->type = COMEDI_SUBD_DI;
- s->subdev_flags = SDF_READABLE;
- s->n_chan = 24;
- s->maxdata = 1;
- s->range_table = &range_digital;
- s->insn_config = ni6527_di_insn_config;
- s->insn_bits = ni6527_di_insn_bits;
-
- /* Digital Output subdevice */
- s = &dev->subdevices[1];
- s->type = COMEDI_SUBD_DO;
- s->subdev_flags = SDF_WRITABLE;
- s->n_chan = 24;
- s->maxdata = 1;
- s->range_table = &range_digital;
- s->insn_bits = ni6527_do_insn_bits;
-
- /* Edge detection interrupt subdevice */
- s = &dev->subdevices[2];
- if (dev->irq) {
- dev->read_subdev = s;
- s->type = COMEDI_SUBD_DI;
- s->subdev_flags = SDF_READABLE | SDF_CMD_READ;
- s->n_chan = 1;
- s->maxdata = 1;
- s->range_table = &range_digital;
- s->insn_config = ni6527_intr_insn_config;
- s->insn_bits = ni6527_intr_insn_bits;
- s->len_chanlist = 1;
- s->do_cmdtest = ni6527_intr_cmdtest;
- s->do_cmd = ni6527_intr_cmd;
- s->cancel = ni6527_intr_cancel;
- } else {
- s->type = COMEDI_SUBD_UNUSED;
- }
-
- return 0;
-}
-
-static void ni6527_detach(struct comedi_device *dev)
-{
- if (dev->mmio)
- ni6527_reset(dev);
- comedi_pci_detach(dev);
-}
-
-static struct comedi_driver ni6527_driver = {
- .driver_name = "ni_6527",
- .module = THIS_MODULE,
- .auto_attach = ni6527_auto_attach,
- .detach = ni6527_detach,
-};
-
-static int ni6527_pci_probe(struct pci_dev *dev,
- const struct pci_device_id *id)
-{
- return comedi_pci_auto_config(dev, &ni6527_driver, id->driver_data);
-}
-
-static const struct pci_device_id ni6527_pci_table[] = {
- { PCI_VDEVICE(NI, 0x2b10), BOARD_PXI6527 },
- { PCI_VDEVICE(NI, 0x2b20), BOARD_PCI6527 },
- { 0 }
-};
-MODULE_DEVICE_TABLE(pci, ni6527_pci_table);
-
-static struct pci_driver ni6527_pci_driver = {
- .name = "ni_6527",
- .id_table = ni6527_pci_table,
- .probe = ni6527_pci_probe,
- .remove = comedi_pci_auto_unconfig,
-};
-module_comedi_pci_driver(ni6527_driver, ni6527_pci_driver);
-
-MODULE_AUTHOR("Comedi http://www.comedi.org");
-MODULE_DESCRIPTION("Comedi driver for National Instruments PCI-6527");
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/ni_65xx.c b/drivers/staging/comedi/drivers/ni_65xx.c
deleted file mode 100644
index 800d57426070..000000000000
--- a/drivers/staging/comedi/drivers/ni_65xx.c
+++ /dev/null
@@ -1,831 +0,0 @@
-/*
- * ni_65xx.c
- * Comedi driver for National Instruments PCI-65xx static dio boards
- *
- * Copyright (C) 2006 Jon Grierson <jd@renko.co.uk>
- * Copyright (C) 2006 Frank Mori Hess <fmhess@users.sourceforge.net>
- *
- * COMEDI - Linux Control and Measurement Device Interface
- * Copyright (C) 1999,2002,2003 David A. Schleef <ds@schleef.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-/*
- * Driver: ni_65xx
- * Description: National Instruments 65xx static dio boards
- * Author: Jon Grierson <jd@renko.co.uk>,
- * Frank Mori Hess <fmhess@users.sourceforge.net>
- * Status: testing
- * Devices: [National Instruments] PCI-6509 (pci-6509), PXI-6509 (pxi-6509),
- * PCI-6510 (pci-6510), PCI-6511 (pci-6511), PXI-6511 (pxi-6511),
- * PCI-6512 (pci-6512), PXI-6512 (pxi-6512), PCI-6513 (pci-6513),
- * PXI-6513 (pxi-6513), PCI-6514 (pci-6514), PXI-6514 (pxi-6514),
- * PCI-6515 (pxi-6515), PXI-6515 (pxi-6515), PCI-6516 (pci-6516),
- * PCI-6517 (pci-6517), PCI-6518 (pci-6518), PCI-6519 (pci-6519),
- * PCI-6520 (pci-6520), PCI-6521 (pci-6521), PXI-6521 (pxi-6521),
- * PCI-6528 (pci-6528), PXI-6528 (pxi-6528)
- * Updated: Mon, 21 Jul 2014 12:49:58 +0000
- *
- * Configuration Options: not applicable, uses PCI auto config
- *
- * Based on the PCI-6527 driver by ds.
- * The interrupt subdevice (subdevice 3) is probably broken for all
- * boards except maybe the 6514.
- *
- * This driver previously inverted the outputs on PCI-6513 through to
- * PCI-6519 and on PXI-6513 through to PXI-6515. It no longer inverts
- * outputs on those cards by default as it didn't make much sense. If
- * you require the outputs to be inverted on those cards for legacy
- * reasons, set the module parameter "legacy_invert_outputs=true" when
- * loading the module, or set "ni_65xx.legacy_invert_outputs=true" on
- * the kernel command line if the driver is built in to the kernel.
- */
-
-/*
- * Manuals (available from ftp://ftp.natinst.com/support/manuals)
- *
- * 370106b.pdf 6514 Register Level Programmer Manual
- */
-
-#include <linux/module.h>
-#include <linux/interrupt.h>
-
-#include "../comedi_pci.h"
-
-/*
- * PCI BAR1 Register Map
- */
-
-/* Non-recurring Registers (8-bit except where noted) */
-#define NI_65XX_ID_REG 0x00
-#define NI_65XX_CLR_REG 0x01
-#define NI_65XX_CLR_WDOG_INT (1 << 6)
-#define NI_65XX_CLR_WDOG_PING (1 << 5)
-#define NI_65XX_CLR_WDOG_EXP (1 << 4)
-#define NI_65XX_CLR_EDGE_INT (1 << 3)
-#define NI_65XX_CLR_OVERFLOW_INT (1 << 2)
-#define NI_65XX_STATUS_REG 0x02
-#define NI_65XX_STATUS_WDOG_INT (1 << 5)
-#define NI_65XX_STATUS_FALL_EDGE (1 << 4)
-#define NI_65XX_STATUS_RISE_EDGE (1 << 3)
-#define NI_65XX_STATUS_INT (1 << 2)
-#define NI_65XX_STATUS_OVERFLOW_INT (1 << 1)
-#define NI_65XX_STATUS_EDGE_INT (1 << 0)
-#define NI_65XX_CTRL_REG 0x03
-#define NI_65XX_CTRL_WDOG_ENA (1 << 5)
-#define NI_65XX_CTRL_FALL_EDGE_ENA (1 << 4)
-#define NI_65XX_CTRL_RISE_EDGE_ENA (1 << 3)
-#define NI_65XX_CTRL_INT_ENA (1 << 2)
-#define NI_65XX_CTRL_OVERFLOW_ENA (1 << 1)
-#define NI_65XX_CTRL_EDGE_ENA (1 << 0)
-#define NI_65XX_REV_REG 0x04 /* 32-bit */
-#define NI_65XX_FILTER_REG 0x08 /* 32-bit */
-#define NI_65XX_RTSI_ROUTE_REG 0x0c /* 16-bit */
-#define NI_65XX_RTSI_EDGE_REG 0x0e /* 16-bit */
-#define NI_65XX_RTSI_WDOG_REG 0x10 /* 16-bit */
-#define NI_65XX_RTSI_TRIG_REG 0x12 /* 16-bit */
-#define NI_65XX_AUTO_CLK_SEL_REG 0x14 /* PXI-6528 only */
-#define NI_65XX_AUTO_CLK_SEL_STATUS (1 << 1)
-#define NI_65XX_AUTO_CLK_SEL_DISABLE (1 << 0)
-#define NI_65XX_WDOG_CTRL_REG 0x15
-#define NI_65XX_WDOG_CTRL_ENA (1 << 0)
-#define NI_65XX_RTSI_CFG_REG 0x16
-#define NI_65XX_RTSI_CFG_RISE_SENSE (1 << 2)
-#define NI_65XX_RTSI_CFG_FALL_SENSE (1 << 1)
-#define NI_65XX_RTSI_CFG_SYNC_DETECT (1 << 0)
-#define NI_65XX_WDOG_STATUS_REG 0x17
-#define NI_65XX_WDOG_STATUS_EXP (1 << 0)
-#define NI_65XX_WDOG_INTERVAL_REG 0x18 /* 32-bit */
-
-/* Recurring port registers (8-bit) */
-#define NI_65XX_PORT(x) ((x) * 0x10)
-#define NI_65XX_IO_DATA_REG(x) (0x40 + NI_65XX_PORT(x))
-#define NI_65XX_IO_SEL_REG(x) (0x41 + NI_65XX_PORT(x))
-#define NI_65XX_IO_SEL_OUTPUT (0 << 0)
-#define NI_65XX_IO_SEL_INPUT (1 << 0)
-#define NI_65XX_RISE_EDGE_ENA_REG(x) (0x42 + NI_65XX_PORT(x))
-#define NI_65XX_FALL_EDGE_ENA_REG(x) (0x43 + NI_65XX_PORT(x))
-#define NI_65XX_FILTER_ENA(x) (0x44 + NI_65XX_PORT(x))
-#define NI_65XX_WDOG_HIZ_REG(x) (0x46 + NI_65XX_PORT(x))
-#define NI_65XX_WDOG_ENA(x) (0x47 + NI_65XX_PORT(x))
-#define NI_65XX_WDOG_HI_LO_REG(x) (0x48 + NI_65XX_PORT(x))
-#define NI_65XX_RTSI_ENA(x) (0x49 + NI_65XX_PORT(x))
-
-#define NI_65XX_PORT_TO_CHAN(x) ((x) * 8)
-#define NI_65XX_CHAN_TO_PORT(x) ((x) / 8)
-#define NI_65XX_CHAN_TO_MASK(x) (1 << ((x) % 8))
-
-enum ni_65xx_boardid {
- BOARD_PCI6509,
- BOARD_PXI6509,
- BOARD_PCI6510,
- BOARD_PCI6511,
- BOARD_PXI6511,
- BOARD_PCI6512,
- BOARD_PXI6512,
- BOARD_PCI6513,
- BOARD_PXI6513,
- BOARD_PCI6514,
- BOARD_PXI6514,
- BOARD_PCI6515,
- BOARD_PXI6515,
- BOARD_PCI6516,
- BOARD_PCI6517,
- BOARD_PCI6518,
- BOARD_PCI6519,
- BOARD_PCI6520,
- BOARD_PCI6521,
- BOARD_PXI6521,
- BOARD_PCI6528,
- BOARD_PXI6528,
-};
-
-struct ni_65xx_board {
- const char *name;
- unsigned num_dio_ports;
- unsigned num_di_ports;
- unsigned num_do_ports;
- unsigned legacy_invert:1;
-};
-
-static const struct ni_65xx_board ni_65xx_boards[] = {
- [BOARD_PCI6509] = {
- .name = "pci-6509",
- .num_dio_ports = 12,
- },
- [BOARD_PXI6509] = {
- .name = "pxi-6509",
- .num_dio_ports = 12,
- },
- [BOARD_PCI6510] = {
- .name = "pci-6510",
- .num_di_ports = 4,
- },
- [BOARD_PCI6511] = {
- .name = "pci-6511",
- .num_di_ports = 8,
- },
- [BOARD_PXI6511] = {
- .name = "pxi-6511",
- .num_di_ports = 8,
- },
- [BOARD_PCI6512] = {
- .name = "pci-6512",
- .num_do_ports = 8,
- },
- [BOARD_PXI6512] = {
- .name = "pxi-6512",
- .num_do_ports = 8,
- },
- [BOARD_PCI6513] = {
- .name = "pci-6513",
- .num_do_ports = 8,
- .legacy_invert = 1,
- },
- [BOARD_PXI6513] = {
- .name = "pxi-6513",
- .num_do_ports = 8,
- .legacy_invert = 1,
- },
- [BOARD_PCI6514] = {
- .name = "pci-6514",
- .num_di_ports = 4,
- .num_do_ports = 4,
- .legacy_invert = 1,
- },
- [BOARD_PXI6514] = {
- .name = "pxi-6514",
- .num_di_ports = 4,
- .num_do_ports = 4,
- .legacy_invert = 1,
- },
- [BOARD_PCI6515] = {
- .name = "pci-6515",
- .num_di_ports = 4,
- .num_do_ports = 4,
- .legacy_invert = 1,
- },
- [BOARD_PXI6515] = {
- .name = "pxi-6515",
- .num_di_ports = 4,
- .num_do_ports = 4,
- .legacy_invert = 1,
- },
- [BOARD_PCI6516] = {
- .name = "pci-6516",
- .num_do_ports = 4,
- .legacy_invert = 1,
- },
- [BOARD_PCI6517] = {
- .name = "pci-6517",
- .num_do_ports = 4,
- .legacy_invert = 1,
- },
- [BOARD_PCI6518] = {
- .name = "pci-6518",
- .num_di_ports = 2,
- .num_do_ports = 2,
- .legacy_invert = 1,
- },
- [BOARD_PCI6519] = {
- .name = "pci-6519",
- .num_di_ports = 2,
- .num_do_ports = 2,
- .legacy_invert = 1,
- },
- [BOARD_PCI6520] = {
- .name = "pci-6520",
- .num_di_ports = 1,
- .num_do_ports = 1,
- },
- [BOARD_PCI6521] = {
- .name = "pci-6521",
- .num_di_ports = 1,
- .num_do_ports = 1,
- },
- [BOARD_PXI6521] = {
- .name = "pxi-6521",
- .num_di_ports = 1,
- .num_do_ports = 1,
- },
- [BOARD_PCI6528] = {
- .name = "pci-6528",
- .num_di_ports = 3,
- .num_do_ports = 3,
- },
- [BOARD_PXI6528] = {
- .name = "pxi-6528",
- .num_di_ports = 3,
- .num_do_ports = 3,
- },
-};
-
-static bool ni_65xx_legacy_invert_outputs;
-module_param_named(legacy_invert_outputs, ni_65xx_legacy_invert_outputs,
- bool, 0444);
-MODULE_PARM_DESC(legacy_invert_outputs,
- "invert outputs of PCI/PXI-6513/6514/6515/6516/6517/6518/6519 for compatibility with old user code");
-
-static unsigned int ni_65xx_num_ports(struct comedi_device *dev)
-{
- const struct ni_65xx_board *board = dev->board_ptr;
-
- return board->num_dio_ports + board->num_di_ports + board->num_do_ports;
-}
-
-static void ni_65xx_disable_input_filters(struct comedi_device *dev)
-{
- unsigned int num_ports = ni_65xx_num_ports(dev);
- int i;
-
- /* disable input filtering on all ports */
- for (i = 0; i < num_ports; ++i)
- writeb(0x00, dev->mmio + NI_65XX_FILTER_ENA(i));
-
- /* set filter interval to 0 (32bit reg) */
- writel(0x00000000, dev->mmio + NI_65XX_FILTER_REG);
-}
-
-/* updates edge detection for base_chan to base_chan+31 */
-static void ni_65xx_update_edge_detection(struct comedi_device *dev,
- unsigned int base_chan,
- unsigned int rising,
- unsigned int falling)
-{
- unsigned int num_ports = ni_65xx_num_ports(dev);
- unsigned int port;
-
- if (base_chan >= NI_65XX_PORT_TO_CHAN(num_ports))
- return;
-
- for (port = NI_65XX_CHAN_TO_PORT(base_chan); port < num_ports; port++) {
- int bitshift = (int)(NI_65XX_PORT_TO_CHAN(port) - base_chan);
- unsigned int port_mask, port_rising, port_falling;
-
- if (bitshift >= 32)
- break;
-
- if (bitshift >= 0) {
- port_mask = ~0U >> bitshift;
- port_rising = rising >> bitshift;
- port_falling = falling >> bitshift;
- } else {
- port_mask = ~0U << -bitshift;
- port_rising = rising << -bitshift;
- port_falling = falling << -bitshift;
- }
- if (port_mask & 0xff) {
- if (~port_mask & 0xff) {
- port_rising |=
- readb(dev->mmio +
- NI_65XX_RISE_EDGE_ENA_REG(port)) &
- ~port_mask;
- port_falling |=
- readb(dev->mmio +
- NI_65XX_FALL_EDGE_ENA_REG(port)) &
- ~port_mask;
- }
- writeb(port_rising & 0xff,
- dev->mmio + NI_65XX_RISE_EDGE_ENA_REG(port));
- writeb(port_falling & 0xff,
- dev->mmio + NI_65XX_FALL_EDGE_ENA_REG(port));
- }
- }
-}
-
-static void ni_65xx_disable_edge_detection(struct comedi_device *dev)
-{
- /* clear edge detection for channels 0 to 31 */
- ni_65xx_update_edge_detection(dev, 0, 0, 0);
- /* clear edge detection for channels 32 to 63 */
- ni_65xx_update_edge_detection(dev, 32, 0, 0);
- /* clear edge detection for channels 64 to 95 */
- ni_65xx_update_edge_detection(dev, 64, 0, 0);
-}
-
-static int ni_65xx_dio_insn_config(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- unsigned long base_port = (unsigned long)s->private;
- unsigned int chan = CR_CHAN(insn->chanspec);
- unsigned int chan_mask = NI_65XX_CHAN_TO_MASK(chan);
- unsigned port = base_port + NI_65XX_CHAN_TO_PORT(chan);
- unsigned int interval;
- unsigned int val;
-
- switch (data[0]) {
- case INSN_CONFIG_FILTER:
- /*
- * The deglitch filter interval is specified in nanoseconds.
- * The hardware supports intervals in 200ns increments. Round
- * the user values up and return the actual interval.
- */
- interval = (data[1] + 100) / 200;
- if (interval > 0xfffff)
- interval = 0xfffff;
- data[1] = interval * 200;
-
- /*
- * Enable/disable the channel for deglitch filtering. Note
- * that the filter interval is never set to '0'. This is done
- * because other channels might still be enabled for filtering.
- */
- val = readb(dev->mmio + NI_65XX_FILTER_ENA(port));
- if (interval) {
- writel(interval, dev->mmio + NI_65XX_FILTER_REG);
- val |= chan_mask;
- } else {
- val &= ~chan_mask;
- }
- writeb(val, dev->mmio + NI_65XX_FILTER_ENA(port));
- break;
-
- case INSN_CONFIG_DIO_OUTPUT:
- if (s->type != COMEDI_SUBD_DIO)
- return -EINVAL;
- writeb(NI_65XX_IO_SEL_OUTPUT,
- dev->mmio + NI_65XX_IO_SEL_REG(port));
- break;
-
- case INSN_CONFIG_DIO_INPUT:
- if (s->type != COMEDI_SUBD_DIO)
- return -EINVAL;
- writeb(NI_65XX_IO_SEL_INPUT,
- dev->mmio + NI_65XX_IO_SEL_REG(port));
- break;
-
- case INSN_CONFIG_DIO_QUERY:
- if (s->type != COMEDI_SUBD_DIO)
- return -EINVAL;
- val = readb(dev->mmio + NI_65XX_IO_SEL_REG(port));
- data[1] = (val == NI_65XX_IO_SEL_INPUT) ? COMEDI_INPUT
- : COMEDI_OUTPUT;
- break;
-
- default:
- return -EINVAL;
- }
-
- return insn->n;
-}
-
-static int ni_65xx_dio_insn_bits(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- unsigned long base_port = (unsigned long)s->private;
- unsigned int base_chan = CR_CHAN(insn->chanspec);
- int last_port_offset = NI_65XX_CHAN_TO_PORT(s->n_chan - 1);
- unsigned read_bits = 0;
- int port_offset;
-
- for (port_offset = NI_65XX_CHAN_TO_PORT(base_chan);
- port_offset <= last_port_offset; port_offset++) {
- unsigned port = base_port + port_offset;
- int base_port_channel = NI_65XX_PORT_TO_CHAN(port_offset);
- unsigned port_mask, port_data, bits;
- int bitshift = base_port_channel - base_chan;
-
- if (bitshift >= 32)
- break;
- port_mask = data[0];
- port_data = data[1];
- if (bitshift > 0) {
- port_mask >>= bitshift;
- port_data >>= bitshift;
- } else {
- port_mask <<= -bitshift;
- port_data <<= -bitshift;
- }
- port_mask &= 0xff;
- port_data &= 0xff;
-
- /* update the outputs */
- if (port_mask) {
- bits = readb(dev->mmio + NI_65XX_IO_DATA_REG(port));
- bits ^= s->io_bits; /* invert if necessary */
- bits &= ~port_mask;
- bits |= (port_data & port_mask);
- bits ^= s->io_bits; /* invert back */
- writeb(bits, dev->mmio + NI_65XX_IO_DATA_REG(port));
- }
-
- /* read back the actual state */
- bits = readb(dev->mmio + NI_65XX_IO_DATA_REG(port));
- bits ^= s->io_bits; /* invert if necessary */
- if (bitshift > 0)
- bits <<= bitshift;
- else
- bits >>= -bitshift;
-
- read_bits |= bits;
- }
- data[1] = read_bits;
- return insn->n;
-}
-
-static irqreturn_t ni_65xx_interrupt(int irq, void *d)
-{
- struct comedi_device *dev = d;
- struct comedi_subdevice *s = dev->read_subdev;
- unsigned int status;
-
- status = readb(dev->mmio + NI_65XX_STATUS_REG);
- if ((status & NI_65XX_STATUS_INT) == 0)
- return IRQ_NONE;
- if ((status & NI_65XX_STATUS_EDGE_INT) == 0)
- return IRQ_NONE;
-
- writeb(NI_65XX_CLR_EDGE_INT | NI_65XX_CLR_OVERFLOW_INT,
- dev->mmio + NI_65XX_CLR_REG);
-
- comedi_buf_write_samples(s, &s->state, 1);
- comedi_handle_events(dev, s);
-
- return IRQ_HANDLED;
-}
-
-static int ni_65xx_intr_cmdtest(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_cmd *cmd)
-{
- int err = 0;
-
- /* Step 1 : check if triggers are trivially valid */
-
- err |= comedi_check_trigger_src(&cmd->start_src, TRIG_NOW);
- err |= comedi_check_trigger_src(&cmd->scan_begin_src, TRIG_OTHER);
- err |= comedi_check_trigger_src(&cmd->convert_src, TRIG_FOLLOW);
- err |= comedi_check_trigger_src(&cmd->scan_end_src, TRIG_COUNT);
- err |= comedi_check_trigger_src(&cmd->stop_src, TRIG_COUNT);
-
- if (err)
- return 1;
-
- /* Step 2a : make sure trigger sources are unique */
- /* Step 2b : and mutually compatible */
-
- /* Step 3: check if arguments are trivially valid */
-
- err |= comedi_check_trigger_arg_is(&cmd->start_arg, 0);
- err |= comedi_check_trigger_arg_is(&cmd->scan_begin_arg, 0);
- err |= comedi_check_trigger_arg_is(&cmd->convert_arg, 0);
- err |= comedi_check_trigger_arg_is(&cmd->scan_end_arg,
- cmd->chanlist_len);
- err |= comedi_check_trigger_arg_is(&cmd->stop_arg, 0);
-
- if (err)
- return 3;
-
- /* Step 4: fix up any arguments */
-
- /* Step 5: check channel list if it exists */
-
- return 0;
-}
-
-static int ni_65xx_intr_cmd(struct comedi_device *dev,
- struct comedi_subdevice *s)
-{
- writeb(NI_65XX_CLR_EDGE_INT | NI_65XX_CLR_OVERFLOW_INT,
- dev->mmio + NI_65XX_CLR_REG);
- writeb(NI_65XX_CTRL_FALL_EDGE_ENA | NI_65XX_CTRL_RISE_EDGE_ENA |
- NI_65XX_CTRL_INT_ENA | NI_65XX_CTRL_EDGE_ENA,
- dev->mmio + NI_65XX_CTRL_REG);
-
- return 0;
-}
-
-static int ni_65xx_intr_cancel(struct comedi_device *dev,
- struct comedi_subdevice *s)
-{
- writeb(0x00, dev->mmio + NI_65XX_CTRL_REG);
-
- return 0;
-}
-
-static int ni_65xx_intr_insn_bits(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- data[1] = 0;
- return insn->n;
-}
-
-static int ni_65xx_intr_insn_config(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- switch (data[0]) {
- case INSN_CONFIG_CHANGE_NOTIFY:
- /* add instruction to check_insn_config_length() */
- if (insn->n != 3)
- return -EINVAL;
-
- /* update edge detection for channels 0 to 31 */
- ni_65xx_update_edge_detection(dev, 0, data[1], data[2]);
- /* clear edge detection for channels 32 to 63 */
- ni_65xx_update_edge_detection(dev, 32, 0, 0);
- /* clear edge detection for channels 64 to 95 */
- ni_65xx_update_edge_detection(dev, 64, 0, 0);
- break;
- case INSN_CONFIG_DIGITAL_TRIG:
- /* check trigger number */
- if (data[1] != 0)
- return -EINVAL;
- /* check digital trigger operation */
- switch (data[2]) {
- case COMEDI_DIGITAL_TRIG_DISABLE:
- ni_65xx_disable_edge_detection(dev);
- break;
- case COMEDI_DIGITAL_TRIG_ENABLE_EDGES:
- /*
- * update edge detection for channels data[3]
- * to (data[3] + 31)
- */
- ni_65xx_update_edge_detection(dev, data[3],
- data[4], data[5]);
- break;
- default:
- return -EINVAL;
- }
- break;
- default:
- return -EINVAL;
- }
-
- return insn->n;
-}
-
-/* ripped from mite.h and mite_setup2() to avoid mite dependency */
-#define MITE_IODWBSR 0xc0 /* IO Device Window Base Size Register */
-#define WENAB (1 << 7) /* window enable */
-
-static int ni_65xx_mite_init(struct pci_dev *pcidev)
-{
- void __iomem *mite_base;
- u32 main_phys_addr;
-
- /* ioremap the MITE registers (BAR 0) temporarily */
- mite_base = pci_ioremap_bar(pcidev, 0);
- if (!mite_base)
- return -ENOMEM;
-
- /* set data window to main registers (BAR 1) */
- main_phys_addr = pci_resource_start(pcidev, 1);
- writel(main_phys_addr | WENAB, mite_base + MITE_IODWBSR);
-
- /* finished with MITE registers */
- iounmap(mite_base);
- return 0;
-}
-
-static int ni_65xx_auto_attach(struct comedi_device *dev,
- unsigned long context)
-{
- struct pci_dev *pcidev = comedi_to_pci_dev(dev);
- const struct ni_65xx_board *board = NULL;
- struct comedi_subdevice *s;
- unsigned i;
- int ret;
-
- if (context < ARRAY_SIZE(ni_65xx_boards))
- board = &ni_65xx_boards[context];
- if (!board)
- return -ENODEV;
- dev->board_ptr = board;
- dev->board_name = board->name;
-
- ret = comedi_pci_enable(dev);
- if (ret)
- return ret;
-
- ret = ni_65xx_mite_init(pcidev);
- if (ret)
- return ret;
-
- dev->mmio = pci_ioremap_bar(pcidev, 1);
- if (!dev->mmio)
- return -ENOMEM;
-
- writeb(NI_65XX_CLR_EDGE_INT | NI_65XX_CLR_OVERFLOW_INT,
- dev->mmio + NI_65XX_CLR_REG);
- writeb(0x00, dev->mmio + NI_65XX_CTRL_REG);
-
- if (pcidev->irq) {
- ret = request_irq(pcidev->irq, ni_65xx_interrupt, IRQF_SHARED,
- dev->board_name, dev);
- if (ret == 0)
- dev->irq = pcidev->irq;
- }
-
- dev_info(dev->class_dev, "board: %s, ID=0x%02x", dev->board_name,
- readb(dev->mmio + NI_65XX_ID_REG));
-
- ret = comedi_alloc_subdevices(dev, 4);
- if (ret)
- return ret;
-
- s = &dev->subdevices[0];
- if (board->num_di_ports) {
- s->type = COMEDI_SUBD_DI;
- s->subdev_flags = SDF_READABLE;
- s->n_chan = NI_65XX_PORT_TO_CHAN(board->num_di_ports);
- s->maxdata = 1;
- s->range_table = &range_digital;
- s->insn_bits = ni_65xx_dio_insn_bits;
- s->insn_config = ni_65xx_dio_insn_config;
-
- /* the input ports always start at port 0 */
- s->private = (void *)0;
- } else {
- s->type = COMEDI_SUBD_UNUSED;
- }
-
- s = &dev->subdevices[1];
- if (board->num_do_ports) {
- s->type = COMEDI_SUBD_DO;
- s->subdev_flags = SDF_WRITABLE;
- s->n_chan = NI_65XX_PORT_TO_CHAN(board->num_do_ports);
- s->maxdata = 1;
- s->range_table = &range_digital;
- s->insn_bits = ni_65xx_dio_insn_bits;
-
- /* the output ports always start after the input ports */
- s->private = (void *)(unsigned long)board->num_di_ports;
-
- /*
- * Use the io_bits to handle the inverted outputs. Inverted
- * outputs are only supported if the "legacy_invert_outputs"
- * module parameter is set to "true".
- */
- if (ni_65xx_legacy_invert_outputs && board->legacy_invert)
- s->io_bits = 0xff;
-
- /* reset all output ports to comedi '0' */
- for (i = 0; i < board->num_do_ports; ++i) {
- writeb(s->io_bits, /* inverted if necessary */
- dev->mmio +
- NI_65XX_IO_DATA_REG(board->num_di_ports + i));
- }
- } else {
- s->type = COMEDI_SUBD_UNUSED;
- }
-
- s = &dev->subdevices[2];
- if (board->num_dio_ports) {
- s->type = COMEDI_SUBD_DIO;
- s->subdev_flags = SDF_READABLE | SDF_WRITABLE;
- s->n_chan = NI_65XX_PORT_TO_CHAN(board->num_dio_ports);
- s->maxdata = 1;
- s->range_table = &range_digital;
- s->insn_bits = ni_65xx_dio_insn_bits;
- s->insn_config = ni_65xx_dio_insn_config;
-
- /* the input/output ports always start at port 0 */
- s->private = (void *)0;
-
- /* configure all ports for input */
- for (i = 0; i < board->num_dio_ports; ++i) {
- writeb(NI_65XX_IO_SEL_INPUT,
- dev->mmio + NI_65XX_IO_SEL_REG(i));
- }
- } else {
- s->type = COMEDI_SUBD_UNUSED;
- }
-
- s = &dev->subdevices[3];
- s->type = COMEDI_SUBD_DI;
- s->subdev_flags = SDF_READABLE;
- s->n_chan = 1;
- s->maxdata = 1;
- s->range_table = &range_digital;
- s->insn_bits = ni_65xx_intr_insn_bits;
- if (dev->irq) {
- dev->read_subdev = s;
- s->subdev_flags |= SDF_CMD_READ;
- s->len_chanlist = 1;
- s->insn_config = ni_65xx_intr_insn_config;
- s->do_cmdtest = ni_65xx_intr_cmdtest;
- s->do_cmd = ni_65xx_intr_cmd;
- s->cancel = ni_65xx_intr_cancel;
- }
-
- ni_65xx_disable_input_filters(dev);
- ni_65xx_disable_edge_detection(dev);
-
- return 0;
-}
-
-static void ni_65xx_detach(struct comedi_device *dev)
-{
- if (dev->mmio)
- writeb(0x00, dev->mmio + NI_65XX_CTRL_REG);
- comedi_pci_detach(dev);
-}
-
-static struct comedi_driver ni_65xx_driver = {
- .driver_name = "ni_65xx",
- .module = THIS_MODULE,
- .auto_attach = ni_65xx_auto_attach,
- .detach = ni_65xx_detach,
-};
-
-static int ni_65xx_pci_probe(struct pci_dev *dev,
- const struct pci_device_id *id)
-{
- return comedi_pci_auto_config(dev, &ni_65xx_driver, id->driver_data);
-}
-
-static const struct pci_device_id ni_65xx_pci_table[] = {
- { PCI_VDEVICE(NI, 0x1710), BOARD_PXI6509 },
- { PCI_VDEVICE(NI, 0x7085), BOARD_PCI6509 },
- { PCI_VDEVICE(NI, 0x7086), BOARD_PXI6528 },
- { PCI_VDEVICE(NI, 0x7087), BOARD_PCI6515 },
- { PCI_VDEVICE(NI, 0x7088), BOARD_PCI6514 },
- { PCI_VDEVICE(NI, 0x70a9), BOARD_PCI6528 },
- { PCI_VDEVICE(NI, 0x70c3), BOARD_PCI6511 },
- { PCI_VDEVICE(NI, 0x70c8), BOARD_PCI6513 },
- { PCI_VDEVICE(NI, 0x70c9), BOARD_PXI6515 },
- { PCI_VDEVICE(NI, 0x70cc), BOARD_PCI6512 },
- { PCI_VDEVICE(NI, 0x70cd), BOARD_PXI6514 },
- { PCI_VDEVICE(NI, 0x70d1), BOARD_PXI6513 },
- { PCI_VDEVICE(NI, 0x70d2), BOARD_PXI6512 },
- { PCI_VDEVICE(NI, 0x70d3), BOARD_PXI6511 },
- { PCI_VDEVICE(NI, 0x7124), BOARD_PCI6510 },
- { PCI_VDEVICE(NI, 0x7125), BOARD_PCI6516 },
- { PCI_VDEVICE(NI, 0x7126), BOARD_PCI6517 },
- { PCI_VDEVICE(NI, 0x7127), BOARD_PCI6518 },
- { PCI_VDEVICE(NI, 0x7128), BOARD_PCI6519 },
- { PCI_VDEVICE(NI, 0x718b), BOARD_PCI6521 },
- { PCI_VDEVICE(NI, 0x718c), BOARD_PXI6521 },
- { PCI_VDEVICE(NI, 0x71c5), BOARD_PCI6520 },
- { 0 }
-};
-MODULE_DEVICE_TABLE(pci, ni_65xx_pci_table);
-
-static struct pci_driver ni_65xx_pci_driver = {
- .name = "ni_65xx",
- .id_table = ni_65xx_pci_table,
- .probe = ni_65xx_pci_probe,
- .remove = comedi_pci_auto_unconfig,
-};
-module_comedi_pci_driver(ni_65xx_driver, ni_65xx_pci_driver);
-
-MODULE_AUTHOR("Comedi http://www.comedi.org");
-MODULE_DESCRIPTION("Comedi driver for NI PCI-65xx static dio boards");
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/ni_660x.c b/drivers/staging/comedi/drivers/ni_660x.c
deleted file mode 100644
index 46647c64f369..000000000000
--- a/drivers/staging/comedi/drivers/ni_660x.c
+++ /dev/null
@@ -1,1222 +0,0 @@
-/*
- comedi/drivers/ni_660x.c
- Hardware driver for NI 660x devices
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-*/
-
-/*
- * Driver: ni_660x
- * Description: National Instruments 660x counter/timer boards
- * Devices: [National Instruments] PCI-6601 (ni_660x), PCI-6602, PXI-6602,
- * PXI-6608, PXI-6624
- * Author: J.P. Mellor <jpmellor@rose-hulman.edu>,
- * Herman.Bruyninckx@mech.kuleuven.ac.be,
- * Wim.Meeussen@mech.kuleuven.ac.be,
- * Klaas.Gadeyne@mech.kuleuven.ac.be,
- * Frank Mori Hess <fmhess@users.sourceforge.net>
- * Updated: Fri, 15 Mar 2013 10:47:56 +0000
- * Status: experimental
- *
- * Encoders work. PulseGeneration (both single pulse and pulse train)
- * works. Buffered commands work for input but not output.
- *
- * References:
- * DAQ 660x Register-Level Programmer Manual (NI 370505A-01)
- * DAQ 6601/6602 User Manual (NI 322137B-01)
- */
-
-#include <linux/module.h>
-#include <linux/interrupt.h>
-
-#include "../comedi_pci.h"
-
-#include "mite.h"
-#include "ni_tio.h"
-
-enum ni_660x_constants {
- min_counter_pfi_chan = 8,
- max_dio_pfi_chan = 31,
- counters_per_chip = 4
-};
-
-#define NUM_PFI_CHANNELS 40
-/* really there are only up to 3 dma channels, but the register layout allows
-for 4 */
-#define MAX_DMA_CHANNEL 4
-
-/* See Register-Level Programmer Manual page 3.1 */
-enum ni_660x_register {
- NI660X_G0_INT_ACK,
- NI660X_G0_STATUS,
- NI660X_G1_INT_ACK,
- NI660X_G1_STATUS,
- NI660X_G01_STATUS,
- NI660X_G0_CMD,
- NI660X_STC_DIO_PARALLEL_INPUT,
- NI660X_G1_CMD,
- NI660X_G0_HW_SAVE,
- NI660X_G1_HW_SAVE,
- NI660X_STC_DIO_OUTPUT,
- NI660X_STC_DIO_CONTROL,
- NI660X_G0_SW_SAVE,
- NI660X_G1_SW_SAVE,
- NI660X_G0_MODE,
- NI660X_G01_STATUS1,
- NI660X_G1_MODE,
- NI660X_STC_DIO_SERIAL_INPUT,
- NI660X_G0_LOADA,
- NI660X_G01_STATUS2,
- NI660X_G0_LOADB,
- NI660X_G1_LOADA,
- NI660X_G1_LOADB,
- NI660X_G0_INPUT_SEL,
- NI660X_G1_INPUT_SEL,
- NI660X_G0_AUTO_INC,
- NI660X_G1_AUTO_INC,
- NI660X_G01_RESET,
- NI660X_G0_INT_ENA,
- NI660X_G1_INT_ENA,
- NI660X_G0_CNT_MODE,
- NI660X_G1_CNT_MODE,
- NI660X_G0_GATE2,
- NI660X_G1_GATE2,
- NI660X_G0_DMA_CFG,
- NI660X_G0_DMA_STATUS,
- NI660X_G1_DMA_CFG,
- NI660X_G1_DMA_STATUS,
- NI660X_G2_INT_ACK,
- NI660X_G2_STATUS,
- NI660X_G3_INT_ACK,
- NI660X_G3_STATUS,
- NI660X_G23_STATUS,
- NI660X_G2_CMD,
- NI660X_G3_CMD,
- NI660X_G2_HW_SAVE,
- NI660X_G3_HW_SAVE,
- NI660X_G2_SW_SAVE,
- NI660X_G3_SW_SAVE,
- NI660X_G2_MODE,
- NI660X_G23_STATUS1,
- NI660X_G3_MODE,
- NI660X_G2_LOADA,
- NI660X_G23_STATUS2,
- NI660X_G2_LOADB,
- NI660X_G3_LOADA,
- NI660X_G3_LOADB,
- NI660X_G2_INPUT_SEL,
- NI660X_G3_INPUT_SEL,
- NI660X_G2_AUTO_INC,
- NI660X_G3_AUTO_INC,
- NI660X_G23_RESET,
- NI660X_G2_INT_ENA,
- NI660X_G3_INT_ENA,
- NI660X_G2_CNT_MODE,
- NI660X_G3_CNT_MODE,
- NI660X_G3_GATE2,
- NI660X_G2_GATE2,
- NI660X_G2_DMA_CFG,
- NI660X_G2_DMA_STATUS,
- NI660X_G3_DMA_CFG,
- NI660X_G3_DMA_STATUS,
- NI660X_DIO32_INPUT,
- NI660X_DIO32_OUTPUT,
- NI660X_CLK_CFG,
- NI660X_GLOBAL_INT_STATUS,
- NI660X_DMA_CFG,
- NI660X_GLOBAL_INT_CFG,
- NI660X_IO_CFG_0_1,
- NI660X_IO_CFG_2_3,
- NI660X_IO_CFG_4_5,
- NI660X_IO_CFG_6_7,
- NI660X_IO_CFG_8_9,
- NI660X_IO_CFG_10_11,
- NI660X_IO_CFG_12_13,
- NI660X_IO_CFG_14_15,
- NI660X_IO_CFG_16_17,
- NI660X_IO_CFG_18_19,
- NI660X_IO_CFG_20_21,
- NI660X_IO_CFG_22_23,
- NI660X_IO_CFG_24_25,
- NI660X_IO_CFG_26_27,
- NI660X_IO_CFG_28_29,
- NI660X_IO_CFG_30_31,
- NI660X_IO_CFG_32_33,
- NI660X_IO_CFG_34_35,
- NI660X_IO_CFG_36_37,
- NI660X_IO_CFG_38_39,
- NI660X_NUM_REGS,
-};
-
-static inline unsigned IOConfigReg(unsigned pfi_channel)
-{
- unsigned reg = NI660X_IO_CFG_0_1 + pfi_channel / 2;
-
- BUG_ON(reg > NI660X_IO_CFG_38_39);
- return reg;
-}
-
-enum ni_660x_register_width {
- DATA_1B,
- DATA_2B,
- DATA_4B
-};
-
-enum ni_660x_register_direction {
- NI_660x_READ,
- NI_660x_WRITE,
- NI_660x_READ_WRITE
-};
-
-enum ni_660x_pfi_output_select {
- pfi_output_select_high_Z = 0,
- pfi_output_select_counter = 1,
- pfi_output_select_do = 2,
- num_pfi_output_selects
-};
-
-enum ni_660x_subdevices {
- NI_660X_DIO_SUBDEV = 1,
- NI_660X_GPCT_SUBDEV_0 = 2
-};
-static inline unsigned NI_660X_GPCT_SUBDEV(unsigned index)
-{
- return NI_660X_GPCT_SUBDEV_0 + index;
-}
-
-struct NI_660xRegisterData {
- const char *name; /* Register Name */
- int offset; /* Offset from base address from GPCT chip */
- enum ni_660x_register_direction direction;
- enum ni_660x_register_width size; /* 1 byte, 2 bytes, or 4 bytes */
-};
-
-static const struct NI_660xRegisterData registerData[NI660X_NUM_REGS] = {
- {"G0 Interrupt Acknowledge", 0x004, NI_660x_WRITE, DATA_2B},
- {"G0 Status Register", 0x004, NI_660x_READ, DATA_2B},
- {"G1 Interrupt Acknowledge", 0x006, NI_660x_WRITE, DATA_2B},
- {"G1 Status Register", 0x006, NI_660x_READ, DATA_2B},
- {"G01 Status Register ", 0x008, NI_660x_READ, DATA_2B},
- {"G0 Command Register", 0x00C, NI_660x_WRITE, DATA_2B},
- {"STC DIO Parallel Input", 0x00E, NI_660x_READ, DATA_2B},
- {"G1 Command Register", 0x00E, NI_660x_WRITE, DATA_2B},
- {"G0 HW Save Register", 0x010, NI_660x_READ, DATA_4B},
- {"G1 HW Save Register", 0x014, NI_660x_READ, DATA_4B},
- {"STC DIO Output", 0x014, NI_660x_WRITE, DATA_2B},
- {"STC DIO Control", 0x016, NI_660x_WRITE, DATA_2B},
- {"G0 SW Save Register", 0x018, NI_660x_READ, DATA_4B},
- {"G1 SW Save Register", 0x01C, NI_660x_READ, DATA_4B},
- {"G0 Mode Register", 0x034, NI_660x_WRITE, DATA_2B},
- {"G01 Joint Status 1 Register", 0x036, NI_660x_READ, DATA_2B},
- {"G1 Mode Register", 0x036, NI_660x_WRITE, DATA_2B},
- {"STC DIO Serial Input", 0x038, NI_660x_READ, DATA_2B},
- {"G0 Load A Register", 0x038, NI_660x_WRITE, DATA_4B},
- {"G01 Joint Status 2 Register", 0x03A, NI_660x_READ, DATA_2B},
- {"G0 Load B Register", 0x03C, NI_660x_WRITE, DATA_4B},
- {"G1 Load A Register", 0x040, NI_660x_WRITE, DATA_4B},
- {"G1 Load B Register", 0x044, NI_660x_WRITE, DATA_4B},
- {"G0 Input Select Register", 0x048, NI_660x_WRITE, DATA_2B},
- {"G1 Input Select Register", 0x04A, NI_660x_WRITE, DATA_2B},
- {"G0 Autoincrement Register", 0x088, NI_660x_WRITE, DATA_2B},
- {"G1 Autoincrement Register", 0x08A, NI_660x_WRITE, DATA_2B},
- {"G01 Joint Reset Register", 0x090, NI_660x_WRITE, DATA_2B},
- {"G0 Interrupt Enable", 0x092, NI_660x_WRITE, DATA_2B},
- {"G1 Interrupt Enable", 0x096, NI_660x_WRITE, DATA_2B},
- {"G0 Counting Mode Register", 0x0B0, NI_660x_WRITE, DATA_2B},
- {"G1 Counting Mode Register", 0x0B2, NI_660x_WRITE, DATA_2B},
- {"G0 Second Gate Register", 0x0B4, NI_660x_WRITE, DATA_2B},
- {"G1 Second Gate Register", 0x0B6, NI_660x_WRITE, DATA_2B},
- {"G0 DMA Config Register", 0x0B8, NI_660x_WRITE, DATA_2B},
- {"G0 DMA Status Register", 0x0B8, NI_660x_READ, DATA_2B},
- {"G1 DMA Config Register", 0x0BA, NI_660x_WRITE, DATA_2B},
- {"G1 DMA Status Register", 0x0BA, NI_660x_READ, DATA_2B},
- {"G2 Interrupt Acknowledge", 0x104, NI_660x_WRITE, DATA_2B},
- {"G2 Status Register", 0x104, NI_660x_READ, DATA_2B},
- {"G3 Interrupt Acknowledge", 0x106, NI_660x_WRITE, DATA_2B},
- {"G3 Status Register", 0x106, NI_660x_READ, DATA_2B},
- {"G23 Status Register", 0x108, NI_660x_READ, DATA_2B},
- {"G2 Command Register", 0x10C, NI_660x_WRITE, DATA_2B},
- {"G3 Command Register", 0x10E, NI_660x_WRITE, DATA_2B},
- {"G2 HW Save Register", 0x110, NI_660x_READ, DATA_4B},
- {"G3 HW Save Register", 0x114, NI_660x_READ, DATA_4B},
- {"G2 SW Save Register", 0x118, NI_660x_READ, DATA_4B},
- {"G3 SW Save Register", 0x11C, NI_660x_READ, DATA_4B},
- {"G2 Mode Register", 0x134, NI_660x_WRITE, DATA_2B},
- {"G23 Joint Status 1 Register", 0x136, NI_660x_READ, DATA_2B},
- {"G3 Mode Register", 0x136, NI_660x_WRITE, DATA_2B},
- {"G2 Load A Register", 0x138, NI_660x_WRITE, DATA_4B},
- {"G23 Joint Status 2 Register", 0x13A, NI_660x_READ, DATA_2B},
- {"G2 Load B Register", 0x13C, NI_660x_WRITE, DATA_4B},
- {"G3 Load A Register", 0x140, NI_660x_WRITE, DATA_4B},
- {"G3 Load B Register", 0x144, NI_660x_WRITE, DATA_4B},
- {"G2 Input Select Register", 0x148, NI_660x_WRITE, DATA_2B},
- {"G3 Input Select Register", 0x14A, NI_660x_WRITE, DATA_2B},
- {"G2 Autoincrement Register", 0x188, NI_660x_WRITE, DATA_2B},
- {"G3 Autoincrement Register", 0x18A, NI_660x_WRITE, DATA_2B},
- {"G23 Joint Reset Register", 0x190, NI_660x_WRITE, DATA_2B},
- {"G2 Interrupt Enable", 0x192, NI_660x_WRITE, DATA_2B},
- {"G3 Interrupt Enable", 0x196, NI_660x_WRITE, DATA_2B},
- {"G2 Counting Mode Register", 0x1B0, NI_660x_WRITE, DATA_2B},
- {"G3 Counting Mode Register", 0x1B2, NI_660x_WRITE, DATA_2B},
- {"G3 Second Gate Register", 0x1B6, NI_660x_WRITE, DATA_2B},
- {"G2 Second Gate Register", 0x1B4, NI_660x_WRITE, DATA_2B},
- {"G2 DMA Config Register", 0x1B8, NI_660x_WRITE, DATA_2B},
- {"G2 DMA Status Register", 0x1B8, NI_660x_READ, DATA_2B},
- {"G3 DMA Config Register", 0x1BA, NI_660x_WRITE, DATA_2B},
- {"G3 DMA Status Register", 0x1BA, NI_660x_READ, DATA_2B},
- {"32 bit Digital Input", 0x414, NI_660x_READ, DATA_4B},
- {"32 bit Digital Output", 0x510, NI_660x_WRITE, DATA_4B},
- {"Clock Config Register", 0x73C, NI_660x_WRITE, DATA_4B},
- {"Global Interrupt Status Register", 0x754, NI_660x_READ, DATA_4B},
- {"DMA Configuration Register", 0x76C, NI_660x_WRITE, DATA_4B},
- {"Global Interrupt Config Register", 0x770, NI_660x_WRITE, DATA_4B},
- {"IO Config Register 0-1", 0x77C, NI_660x_READ_WRITE, DATA_2B},
- {"IO Config Register 2-3", 0x77E, NI_660x_READ_WRITE, DATA_2B},
- {"IO Config Register 4-5", 0x780, NI_660x_READ_WRITE, DATA_2B},
- {"IO Config Register 6-7", 0x782, NI_660x_READ_WRITE, DATA_2B},
- {"IO Config Register 8-9", 0x784, NI_660x_READ_WRITE, DATA_2B},
- {"IO Config Register 10-11", 0x786, NI_660x_READ_WRITE, DATA_2B},
- {"IO Config Register 12-13", 0x788, NI_660x_READ_WRITE, DATA_2B},
- {"IO Config Register 14-15", 0x78A, NI_660x_READ_WRITE, DATA_2B},
- {"IO Config Register 16-17", 0x78C, NI_660x_READ_WRITE, DATA_2B},
- {"IO Config Register 18-19", 0x78E, NI_660x_READ_WRITE, DATA_2B},
- {"IO Config Register 20-21", 0x790, NI_660x_READ_WRITE, DATA_2B},
- {"IO Config Register 22-23", 0x792, NI_660x_READ_WRITE, DATA_2B},
- {"IO Config Register 24-25", 0x794, NI_660x_READ_WRITE, DATA_2B},
- {"IO Config Register 26-27", 0x796, NI_660x_READ_WRITE, DATA_2B},
- {"IO Config Register 28-29", 0x798, NI_660x_READ_WRITE, DATA_2B},
- {"IO Config Register 30-31", 0x79A, NI_660x_READ_WRITE, DATA_2B},
- {"IO Config Register 32-33", 0x79C, NI_660x_READ_WRITE, DATA_2B},
- {"IO Config Register 34-35", 0x79E, NI_660x_READ_WRITE, DATA_2B},
- {"IO Config Register 36-37", 0x7A0, NI_660x_READ_WRITE, DATA_2B},
- {"IO Config Register 38-39", 0x7A2, NI_660x_READ_WRITE, DATA_2B}
-};
-
-/* kind of ENABLE for the second counter */
-enum clock_config_register_bits {
- CounterSwap = 0x1 << 21
-};
-
-/* ioconfigreg */
-static inline unsigned ioconfig_bitshift(unsigned pfi_channel)
-{
- return (pfi_channel % 2) ? 0 : 8;
-}
-
-static inline unsigned pfi_output_select_mask(unsigned pfi_channel)
-{
- return 0x3 << ioconfig_bitshift(pfi_channel);
-}
-
-static inline unsigned pfi_output_select_bits(unsigned pfi_channel,
- unsigned output_select)
-{
- return (output_select & 0x3) << ioconfig_bitshift(pfi_channel);
-}
-
-static inline unsigned pfi_input_select_mask(unsigned pfi_channel)
-{
- return 0x7 << (4 + ioconfig_bitshift(pfi_channel));
-}
-
-static inline unsigned pfi_input_select_bits(unsigned pfi_channel,
- unsigned input_select)
-{
- return (input_select & 0x7) << (4 + ioconfig_bitshift(pfi_channel));
-}
-
-/* dma configuration register bits */
-static inline unsigned dma_select_mask(unsigned dma_channel)
-{
- BUG_ON(dma_channel >= MAX_DMA_CHANNEL);
- return 0x1f << (8 * dma_channel);
-}
-
-enum dma_selection {
- dma_selection_none = 0x1f,
-};
-
-static inline unsigned dma_select_bits(unsigned dma_channel, unsigned selection)
-{
- BUG_ON(dma_channel >= MAX_DMA_CHANNEL);
- return (selection << (8 * dma_channel)) & dma_select_mask(dma_channel);
-}
-
-static inline unsigned dma_reset_bit(unsigned dma_channel)
-{
- BUG_ON(dma_channel >= MAX_DMA_CHANNEL);
- return 0x80 << (8 * dma_channel);
-}
-
-enum global_interrupt_status_register_bits {
- Counter_0_Int_Bit = 0x100,
- Counter_1_Int_Bit = 0x200,
- Counter_2_Int_Bit = 0x400,
- Counter_3_Int_Bit = 0x800,
- Cascade_Int_Bit = 0x20000000,
- Global_Int_Bit = 0x80000000
-};
-
-enum global_interrupt_config_register_bits {
- Cascade_Int_Enable_Bit = 0x20000000,
- Global_Int_Polarity_Bit = 0x40000000,
- Global_Int_Enable_Bit = 0x80000000
-};
-
-/* Offset of the GPCT chips from the base-address of the card */
-/* First chip is at base-address + 0x00, etc. */
-static const unsigned GPCT_OFFSET[2] = { 0x0, 0x800 };
-
-enum ni_660x_boardid {
- BOARD_PCI6601,
- BOARD_PCI6602,
- BOARD_PXI6602,
- BOARD_PXI6608,
- BOARD_PXI6624
-};
-
-struct ni_660x_board {
- const char *name;
- unsigned n_chips; /* total number of TIO chips */
-};
-
-static const struct ni_660x_board ni_660x_boards[] = {
- [BOARD_PCI6601] = {
- .name = "PCI-6601",
- .n_chips = 1,
- },
- [BOARD_PCI6602] = {
- .name = "PCI-6602",
- .n_chips = 2,
- },
- [BOARD_PXI6602] = {
- .name = "PXI-6602",
- .n_chips = 2,
- },
- [BOARD_PXI6608] = {
- .name = "PXI-6608",
- .n_chips = 2,
- },
- [BOARD_PXI6624] = {
- .name = "PXI-6624",
- .n_chips = 2,
- },
-};
-
-#define NI_660X_MAX_NUM_CHIPS 2
-#define NI_660X_MAX_NUM_COUNTERS (NI_660X_MAX_NUM_CHIPS * counters_per_chip)
-
-struct ni_660x_private {
- struct mite_struct *mite;
- struct ni_gpct_device *counter_dev;
- uint64_t pfi_direction_bits;
- struct mite_dma_descriptor_ring
- *mite_rings[NI_660X_MAX_NUM_CHIPS][counters_per_chip];
- spinlock_t mite_channel_lock;
- /* interrupt_lock prevents races between interrupt and comedi_poll */
- spinlock_t interrupt_lock;
- unsigned dma_configuration_soft_copies[NI_660X_MAX_NUM_CHIPS];
- spinlock_t soft_reg_copy_lock;
- unsigned short pfi_output_selects[NUM_PFI_CHANNELS];
-};
-
-static inline unsigned ni_660x_num_counters(struct comedi_device *dev)
-{
- const struct ni_660x_board *board = dev->board_ptr;
-
- return board->n_chips * counters_per_chip;
-}
-
-static enum ni_660x_register ni_gpct_to_660x_register(enum ni_gpct_register reg)
-{
- switch (reg) {
- case NITIO_G0_AUTO_INC:
- return NI660X_G0_AUTO_INC;
- case NITIO_G1_AUTO_INC:
- return NI660X_G1_AUTO_INC;
- case NITIO_G2_AUTO_INC:
- return NI660X_G2_AUTO_INC;
- case NITIO_G3_AUTO_INC:
- return NI660X_G3_AUTO_INC;
- case NITIO_G0_CMD:
- return NI660X_G0_CMD;
- case NITIO_G1_CMD:
- return NI660X_G1_CMD;
- case NITIO_G2_CMD:
- return NI660X_G2_CMD;
- case NITIO_G3_CMD:
- return NI660X_G3_CMD;
- case NITIO_G0_HW_SAVE:
- return NI660X_G0_HW_SAVE;
- case NITIO_G1_HW_SAVE:
- return NI660X_G1_HW_SAVE;
- case NITIO_G2_HW_SAVE:
- return NI660X_G2_HW_SAVE;
- case NITIO_G3_HW_SAVE:
- return NI660X_G3_HW_SAVE;
- case NITIO_G0_SW_SAVE:
- return NI660X_G0_SW_SAVE;
- case NITIO_G1_SW_SAVE:
- return NI660X_G1_SW_SAVE;
- case NITIO_G2_SW_SAVE:
- return NI660X_G2_SW_SAVE;
- case NITIO_G3_SW_SAVE:
- return NI660X_G3_SW_SAVE;
- case NITIO_G0_MODE:
- return NI660X_G0_MODE;
- case NITIO_G1_MODE:
- return NI660X_G1_MODE;
- case NITIO_G2_MODE:
- return NI660X_G2_MODE;
- case NITIO_G3_MODE:
- return NI660X_G3_MODE;
- case NITIO_G0_LOADA:
- return NI660X_G0_LOADA;
- case NITIO_G1_LOADA:
- return NI660X_G1_LOADA;
- case NITIO_G2_LOADA:
- return NI660X_G2_LOADA;
- case NITIO_G3_LOADA:
- return NI660X_G3_LOADA;
- case NITIO_G0_LOADB:
- return NI660X_G0_LOADB;
- case NITIO_G1_LOADB:
- return NI660X_G1_LOADB;
- case NITIO_G2_LOADB:
- return NI660X_G2_LOADB;
- case NITIO_G3_LOADB:
- return NI660X_G3_LOADB;
- case NITIO_G0_INPUT_SEL:
- return NI660X_G0_INPUT_SEL;
- case NITIO_G1_INPUT_SEL:
- return NI660X_G1_INPUT_SEL;
- case NITIO_G2_INPUT_SEL:
- return NI660X_G2_INPUT_SEL;
- case NITIO_G3_INPUT_SEL:
- return NI660X_G3_INPUT_SEL;
- case NITIO_G01_STATUS:
- return NI660X_G01_STATUS;
- case NITIO_G23_STATUS:
- return NI660X_G23_STATUS;
- case NITIO_G01_RESET:
- return NI660X_G01_RESET;
- case NITIO_G23_RESET:
- return NI660X_G23_RESET;
- case NITIO_G01_STATUS1:
- return NI660X_G01_STATUS1;
- case NITIO_G23_STATUS1:
- return NI660X_G23_STATUS1;
- case NITIO_G01_STATUS2:
- return NI660X_G01_STATUS2;
- case NITIO_G23_STATUS2:
- return NI660X_G23_STATUS2;
- case NITIO_G0_CNT_MODE:
- return NI660X_G0_CNT_MODE;
- case NITIO_G1_CNT_MODE:
- return NI660X_G1_CNT_MODE;
- case NITIO_G2_CNT_MODE:
- return NI660X_G2_CNT_MODE;
- case NITIO_G3_CNT_MODE:
- return NI660X_G3_CNT_MODE;
- case NITIO_G0_GATE2:
- return NI660X_G0_GATE2;
- case NITIO_G1_GATE2:
- return NI660X_G1_GATE2;
- case NITIO_G2_GATE2:
- return NI660X_G2_GATE2;
- case NITIO_G3_GATE2:
- return NI660X_G3_GATE2;
- case NITIO_G0_DMA_CFG:
- return NI660X_G0_DMA_CFG;
- case NITIO_G0_DMA_STATUS:
- return NI660X_G0_DMA_STATUS;
- case NITIO_G1_DMA_CFG:
- return NI660X_G1_DMA_CFG;
- case NITIO_G1_DMA_STATUS:
- return NI660X_G1_DMA_STATUS;
- case NITIO_G2_DMA_CFG:
- return NI660X_G2_DMA_CFG;
- case NITIO_G2_DMA_STATUS:
- return NI660X_G2_DMA_STATUS;
- case NITIO_G3_DMA_CFG:
- return NI660X_G3_DMA_CFG;
- case NITIO_G3_DMA_STATUS:
- return NI660X_G3_DMA_STATUS;
- case NITIO_G0_INT_ACK:
- return NI660X_G0_INT_ACK;
- case NITIO_G1_INT_ACK:
- return NI660X_G1_INT_ACK;
- case NITIO_G2_INT_ACK:
- return NI660X_G2_INT_ACK;
- case NITIO_G3_INT_ACK:
- return NI660X_G3_INT_ACK;
- case NITIO_G0_STATUS:
- return NI660X_G0_STATUS;
- case NITIO_G1_STATUS:
- return NI660X_G1_STATUS;
- case NITIO_G2_STATUS:
- return NI660X_G2_STATUS;
- case NITIO_G3_STATUS:
- return NI660X_G3_STATUS;
- case NITIO_G0_INT_ENA:
- return NI660X_G0_INT_ENA;
- case NITIO_G1_INT_ENA:
- return NI660X_G1_INT_ENA;
- case NITIO_G2_INT_ENA:
- return NI660X_G2_INT_ENA;
- case NITIO_G3_INT_ENA:
- return NI660X_G3_INT_ENA;
- default:
- BUG();
- return 0;
- }
-}
-
-static inline void ni_660x_write_register(struct comedi_device *dev,
- unsigned chip, unsigned bits,
- enum ni_660x_register reg)
-{
- unsigned int addr = GPCT_OFFSET[chip] + registerData[reg].offset;
-
- switch (registerData[reg].size) {
- case DATA_2B:
- writew(bits, dev->mmio + addr);
- break;
- case DATA_4B:
- writel(bits, dev->mmio + addr);
- break;
- default:
- BUG();
- break;
- }
-}
-
-static inline unsigned ni_660x_read_register(struct comedi_device *dev,
- unsigned chip,
- enum ni_660x_register reg)
-{
- unsigned int addr = GPCT_OFFSET[chip] + registerData[reg].offset;
-
- switch (registerData[reg].size) {
- case DATA_2B:
- return readw(dev->mmio + addr);
- case DATA_4B:
- return readl(dev->mmio + addr);
- default:
- BUG();
- break;
- }
- return 0;
-}
-
-static void ni_gpct_write_register(struct ni_gpct *counter, unsigned bits,
- enum ni_gpct_register reg)
-{
- struct comedi_device *dev = counter->counter_dev->dev;
- enum ni_660x_register ni_660x_register = ni_gpct_to_660x_register(reg);
- unsigned chip = counter->chip_index;
-
- ni_660x_write_register(dev, chip, bits, ni_660x_register);
-}
-
-static unsigned ni_gpct_read_register(struct ni_gpct *counter,
- enum ni_gpct_register reg)
-{
- struct comedi_device *dev = counter->counter_dev->dev;
- enum ni_660x_register ni_660x_register = ni_gpct_to_660x_register(reg);
- unsigned chip = counter->chip_index;
-
- return ni_660x_read_register(dev, chip, ni_660x_register);
-}
-
-static inline struct mite_dma_descriptor_ring *mite_ring(struct ni_660x_private
- *priv,
- struct ni_gpct
- *counter)
-{
- unsigned chip = counter->chip_index;
-
- return priv->mite_rings[chip][counter->counter_index];
-}
-
-static inline void ni_660x_set_dma_channel(struct comedi_device *dev,
- unsigned mite_channel,
- struct ni_gpct *counter)
-{
- struct ni_660x_private *devpriv = dev->private;
- unsigned chip = counter->chip_index;
- unsigned long flags;
-
- spin_lock_irqsave(&devpriv->soft_reg_copy_lock, flags);
- devpriv->dma_configuration_soft_copies[chip] &=
- ~dma_select_mask(mite_channel);
- devpriv->dma_configuration_soft_copies[chip] |=
- dma_select_bits(mite_channel, counter->counter_index);
- ni_660x_write_register(dev, chip,
- devpriv->dma_configuration_soft_copies[chip] |
- dma_reset_bit(mite_channel), NI660X_DMA_CFG);
- mmiowb();
- spin_unlock_irqrestore(&devpriv->soft_reg_copy_lock, flags);
-}
-
-static inline void ni_660x_unset_dma_channel(struct comedi_device *dev,
- unsigned mite_channel,
- struct ni_gpct *counter)
-{
- struct ni_660x_private *devpriv = dev->private;
- unsigned chip = counter->chip_index;
- unsigned long flags;
-
- spin_lock_irqsave(&devpriv->soft_reg_copy_lock, flags);
- devpriv->dma_configuration_soft_copies[chip] &=
- ~dma_select_mask(mite_channel);
- devpriv->dma_configuration_soft_copies[chip] |=
- dma_select_bits(mite_channel, dma_selection_none);
- ni_660x_write_register(dev, chip,
- devpriv->dma_configuration_soft_copies[chip],
- NI660X_DMA_CFG);
- mmiowb();
- spin_unlock_irqrestore(&devpriv->soft_reg_copy_lock, flags);
-}
-
-static int ni_660x_request_mite_channel(struct comedi_device *dev,
- struct ni_gpct *counter,
- enum comedi_io_direction direction)
-{
- struct ni_660x_private *devpriv = dev->private;
- unsigned long flags;
- struct mite_channel *mite_chan;
-
- spin_lock_irqsave(&devpriv->mite_channel_lock, flags);
- BUG_ON(counter->mite_chan);
- mite_chan = mite_request_channel(devpriv->mite,
- mite_ring(devpriv, counter));
- if (!mite_chan) {
- spin_unlock_irqrestore(&devpriv->mite_channel_lock, flags);
- dev_err(dev->class_dev,
- "failed to reserve mite dma channel for counter\n");
- return -EBUSY;
- }
- mite_chan->dir = direction;
- ni_tio_set_mite_channel(counter, mite_chan);
- ni_660x_set_dma_channel(dev, mite_chan->channel, counter);
- spin_unlock_irqrestore(&devpriv->mite_channel_lock, flags);
- return 0;
-}
-
-static void ni_660x_release_mite_channel(struct comedi_device *dev,
- struct ni_gpct *counter)
-{
- struct ni_660x_private *devpriv = dev->private;
- unsigned long flags;
-
- spin_lock_irqsave(&devpriv->mite_channel_lock, flags);
- if (counter->mite_chan) {
- struct mite_channel *mite_chan = counter->mite_chan;
-
- ni_660x_unset_dma_channel(dev, mite_chan->channel, counter);
- ni_tio_set_mite_channel(counter, NULL);
- mite_release_channel(mite_chan);
- }
- spin_unlock_irqrestore(&devpriv->mite_channel_lock, flags);
-}
-
-static int ni_660x_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
-{
- struct ni_gpct *counter = s->private;
- int retval;
-
- retval = ni_660x_request_mite_channel(dev, counter, COMEDI_INPUT);
- if (retval) {
- dev_err(dev->class_dev,
- "no dma channel available for use by counter\n");
- return retval;
- }
- ni_tio_acknowledge(counter);
-
- return ni_tio_cmd(dev, s);
-}
-
-static int ni_660x_cancel(struct comedi_device *dev, struct comedi_subdevice *s)
-{
- struct ni_gpct *counter = s->private;
- int retval;
-
- retval = ni_tio_cancel(counter);
- ni_660x_release_mite_channel(dev, counter);
- return retval;
-}
-
-static void set_tio_counterswap(struct comedi_device *dev, int chip)
-{
- unsigned bits = 0;
-
- /*
- * See P. 3.5 of the Register-Level Programming manual.
- * The CounterSwap bit has to be set on the second chip,
- * otherwise it will try to use the same pins as the
- * first chip.
- */
- if (chip)
- bits = CounterSwap;
-
- ni_660x_write_register(dev, chip, bits, NI660X_CLK_CFG);
-}
-
-static void ni_660x_handle_gpct_interrupt(struct comedi_device *dev,
- struct comedi_subdevice *s)
-{
- struct ni_gpct *counter = s->private;
-
- ni_tio_handle_interrupt(counter, s);
- comedi_handle_events(dev, s);
-}
-
-static irqreturn_t ni_660x_interrupt(int irq, void *d)
-{
- struct comedi_device *dev = d;
- struct ni_660x_private *devpriv = dev->private;
- struct comedi_subdevice *s;
- unsigned i;
- unsigned long flags;
-
- if (!dev->attached)
- return IRQ_NONE;
- /* lock to avoid race with comedi_poll */
- spin_lock_irqsave(&devpriv->interrupt_lock, flags);
- smp_mb();
- for (i = 0; i < ni_660x_num_counters(dev); ++i) {
- s = &dev->subdevices[NI_660X_GPCT_SUBDEV(i)];
- ni_660x_handle_gpct_interrupt(dev, s);
- }
- spin_unlock_irqrestore(&devpriv->interrupt_lock, flags);
- return IRQ_HANDLED;
-}
-
-static int ni_660x_input_poll(struct comedi_device *dev,
- struct comedi_subdevice *s)
-{
- struct ni_660x_private *devpriv = dev->private;
- struct ni_gpct *counter = s->private;
- unsigned long flags;
-
- /* lock to avoid race with comedi_poll */
- spin_lock_irqsave(&devpriv->interrupt_lock, flags);
- mite_sync_input_dma(counter->mite_chan, s);
- spin_unlock_irqrestore(&devpriv->interrupt_lock, flags);
- return comedi_buf_read_n_available(s);
-}
-
-static int ni_660x_buf_change(struct comedi_device *dev,
- struct comedi_subdevice *s)
-{
- struct ni_660x_private *devpriv = dev->private;
- struct ni_gpct *counter = s->private;
- int ret;
-
- ret = mite_buf_change(mite_ring(devpriv, counter), s);
- if (ret < 0)
- return ret;
-
- return 0;
-}
-
-static int ni_660x_allocate_private(struct comedi_device *dev)
-{
- struct ni_660x_private *devpriv;
- unsigned i;
-
- devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
- if (!devpriv)
- return -ENOMEM;
-
- spin_lock_init(&devpriv->mite_channel_lock);
- spin_lock_init(&devpriv->interrupt_lock);
- spin_lock_init(&devpriv->soft_reg_copy_lock);
- for (i = 0; i < NUM_PFI_CHANNELS; ++i)
- devpriv->pfi_output_selects[i] = pfi_output_select_counter;
-
- return 0;
-}
-
-static int ni_660x_alloc_mite_rings(struct comedi_device *dev)
-{
- const struct ni_660x_board *board = dev->board_ptr;
- struct ni_660x_private *devpriv = dev->private;
- unsigned i;
- unsigned j;
-
- for (i = 0; i < board->n_chips; ++i) {
- for (j = 0; j < counters_per_chip; ++j) {
- devpriv->mite_rings[i][j] =
- mite_alloc_ring(devpriv->mite);
- if (!devpriv->mite_rings[i][j])
- return -ENOMEM;
- }
- }
- return 0;
-}
-
-static void ni_660x_free_mite_rings(struct comedi_device *dev)
-{
- const struct ni_660x_board *board = dev->board_ptr;
- struct ni_660x_private *devpriv = dev->private;
- unsigned i;
- unsigned j;
-
- for (i = 0; i < board->n_chips; ++i) {
- for (j = 0; j < counters_per_chip; ++j)
- mite_free_ring(devpriv->mite_rings[i][j]);
- }
-}
-
-static void init_tio_chip(struct comedi_device *dev, int chipset)
-{
- struct ni_660x_private *devpriv = dev->private;
- unsigned i;
-
- /* init dma configuration register */
- devpriv->dma_configuration_soft_copies[chipset] = 0;
- for (i = 0; i < MAX_DMA_CHANNEL; ++i) {
- devpriv->dma_configuration_soft_copies[chipset] |=
- dma_select_bits(i, dma_selection_none) & dma_select_mask(i);
- }
- ni_660x_write_register(dev, chipset,
- devpriv->dma_configuration_soft_copies[chipset],
- NI660X_DMA_CFG);
- for (i = 0; i < NUM_PFI_CHANNELS; ++i)
- ni_660x_write_register(dev, chipset, 0, IOConfigReg(i));
-}
-
-static int ni_660x_dio_insn_bits(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
-{
- unsigned base_bitfield_channel = CR_CHAN(insn->chanspec);
-
- /* Check if we have to write some bits */
- if (data[0]) {
- s->state &= ~(data[0] << base_bitfield_channel);
- s->state |= (data[0] & data[1]) << base_bitfield_channel;
- /* Write out the new digital output lines */
- ni_660x_write_register(dev, 0, s->state, NI660X_DIO32_OUTPUT);
- }
- /* on return, data[1] contains the value of the digital
- * input and output lines. */
- data[1] = (ni_660x_read_register(dev, 0, NI660X_DIO32_INPUT) >>
- base_bitfield_channel);
-
- return insn->n;
-}
-
-static void ni_660x_select_pfi_output(struct comedi_device *dev,
- unsigned pfi_channel,
- unsigned output_select)
-{
- const struct ni_660x_board *board = dev->board_ptr;
- static const unsigned counter_4_7_first_pfi = 8;
- static const unsigned counter_4_7_last_pfi = 23;
- unsigned active_chipset = 0;
- unsigned idle_chipset = 0;
- unsigned active_bits;
- unsigned idle_bits;
-
- if (board->n_chips > 1) {
- if (output_select == pfi_output_select_counter &&
- pfi_channel >= counter_4_7_first_pfi &&
- pfi_channel <= counter_4_7_last_pfi) {
- active_chipset = 1;
- idle_chipset = 0;
- } else {
- active_chipset = 0;
- idle_chipset = 1;
- }
- }
-
- if (idle_chipset != active_chipset) {
- idle_bits =
- ni_660x_read_register(dev, idle_chipset,
- IOConfigReg(pfi_channel));
- idle_bits &= ~pfi_output_select_mask(pfi_channel);
- idle_bits |=
- pfi_output_select_bits(pfi_channel,
- pfi_output_select_high_Z);
- ni_660x_write_register(dev, idle_chipset, idle_bits,
- IOConfigReg(pfi_channel));
- }
-
- active_bits =
- ni_660x_read_register(dev, active_chipset,
- IOConfigReg(pfi_channel));
- active_bits &= ~pfi_output_select_mask(pfi_channel);
- active_bits |= pfi_output_select_bits(pfi_channel, output_select);
- ni_660x_write_register(dev, active_chipset, active_bits,
- IOConfigReg(pfi_channel));
-}
-
-static int ni_660x_set_pfi_routing(struct comedi_device *dev, unsigned chan,
- unsigned source)
-{
- struct ni_660x_private *devpriv = dev->private;
-
- if (source > num_pfi_output_selects)
- return -EINVAL;
- if (source == pfi_output_select_high_Z)
- return -EINVAL;
- if (chan < min_counter_pfi_chan) {
- if (source == pfi_output_select_counter)
- return -EINVAL;
- } else if (chan > max_dio_pfi_chan) {
- if (source == pfi_output_select_do)
- return -EINVAL;
- }
-
- devpriv->pfi_output_selects[chan] = source;
- if (devpriv->pfi_direction_bits & (((uint64_t) 1) << chan))
- ni_660x_select_pfi_output(dev, chan,
- devpriv->pfi_output_selects[chan]);
- return 0;
-}
-
-static int ni_660x_dio_insn_config(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct ni_660x_private *devpriv = dev->private;
- unsigned int chan = CR_CHAN(insn->chanspec);
- uint64_t bit = 1ULL << chan;
- unsigned int val;
- int ret;
-
- switch (data[0]) {
- case INSN_CONFIG_DIO_OUTPUT:
- devpriv->pfi_direction_bits |= bit;
- ni_660x_select_pfi_output(dev, chan,
- devpriv->pfi_output_selects[chan]);
- break;
-
- case INSN_CONFIG_DIO_INPUT:
- devpriv->pfi_direction_bits &= ~bit;
- ni_660x_select_pfi_output(dev, chan, pfi_output_select_high_Z);
- break;
-
- case INSN_CONFIG_DIO_QUERY:
- data[1] = (devpriv->pfi_direction_bits & bit) ? COMEDI_OUTPUT
- : COMEDI_INPUT;
- break;
-
- case INSN_CONFIG_SET_ROUTING:
- ret = ni_660x_set_pfi_routing(dev, chan, data[1]);
- if (ret)
- return ret;
- break;
-
- case INSN_CONFIG_GET_ROUTING:
- data[1] = devpriv->pfi_output_selects[chan];
- break;
-
- case INSN_CONFIG_FILTER:
- val = ni_660x_read_register(dev, 0, IOConfigReg(chan));
- val &= ~pfi_input_select_mask(chan);
- val |= pfi_input_select_bits(chan, data[1]);
- ni_660x_write_register(dev, 0, val, IOConfigReg(chan));
- break;
-
- default:
- return -EINVAL;
- }
-
- return insn->n;
-}
-
-static int ni_660x_auto_attach(struct comedi_device *dev,
- unsigned long context)
-{
- struct pci_dev *pcidev = comedi_to_pci_dev(dev);
- const struct ni_660x_board *board = NULL;
- struct ni_660x_private *devpriv;
- struct comedi_subdevice *s;
- int ret;
- unsigned i;
- unsigned global_interrupt_config_bits;
-
- if (context < ARRAY_SIZE(ni_660x_boards))
- board = &ni_660x_boards[context];
- if (!board)
- return -ENODEV;
- dev->board_ptr = board;
- dev->board_name = board->name;
-
- ret = comedi_pci_enable(dev);
- if (ret)
- return ret;
-
- ret = ni_660x_allocate_private(dev);
- if (ret < 0)
- return ret;
- devpriv = dev->private;
-
- devpriv->mite = mite_alloc(pcidev);
- if (!devpriv->mite)
- return -ENOMEM;
-
- ret = mite_setup2(dev, devpriv->mite, true);
- if (ret < 0)
- return ret;
-
- ret = ni_660x_alloc_mite_rings(dev);
- if (ret < 0)
- return ret;
-
- ret = comedi_alloc_subdevices(dev, 2 + NI_660X_MAX_NUM_COUNTERS);
- if (ret)
- return ret;
-
- s = &dev->subdevices[0];
- /* Old GENERAL-PURPOSE COUNTER/TIME (GPCT) subdevice, no longer used */
- s->type = COMEDI_SUBD_UNUSED;
-
- s = &dev->subdevices[NI_660X_DIO_SUBDEV];
- /* DIGITAL I/O SUBDEVICE */
- s->type = COMEDI_SUBD_DIO;
- s->subdev_flags = SDF_READABLE | SDF_WRITABLE;
- s->n_chan = NUM_PFI_CHANNELS;
- s->maxdata = 1;
- s->range_table = &range_digital;
- s->insn_bits = ni_660x_dio_insn_bits;
- s->insn_config = ni_660x_dio_insn_config;
- /* we use the ioconfig registers to control dio direction, so zero
- output enables in stc dio control reg */
- ni_660x_write_register(dev, 0, 0, NI660X_STC_DIO_CONTROL);
-
- devpriv->counter_dev = ni_gpct_device_construct(dev,
- &ni_gpct_write_register,
- &ni_gpct_read_register,
- ni_gpct_variant_660x,
- ni_660x_num_counters
- (dev));
- if (!devpriv->counter_dev)
- return -ENOMEM;
- for (i = 0; i < NI_660X_MAX_NUM_COUNTERS; ++i) {
- s = &dev->subdevices[NI_660X_GPCT_SUBDEV(i)];
- if (i < ni_660x_num_counters(dev)) {
- s->type = COMEDI_SUBD_COUNTER;
- s->subdev_flags = SDF_READABLE | SDF_WRITABLE |
- SDF_LSAMPL | SDF_CMD_READ;
- s->n_chan = 3;
- s->maxdata = 0xffffffff;
- s->insn_read = ni_tio_insn_read;
- s->insn_write = ni_tio_insn_write;
- s->insn_config = ni_tio_insn_config;
- s->do_cmd = &ni_660x_cmd;
- s->len_chanlist = 1;
- s->do_cmdtest = ni_tio_cmdtest;
- s->cancel = &ni_660x_cancel;
- s->poll = &ni_660x_input_poll;
- s->async_dma_dir = DMA_BIDIRECTIONAL;
- s->buf_change = &ni_660x_buf_change;
- s->private = &devpriv->counter_dev->counters[i];
-
- devpriv->counter_dev->counters[i].chip_index =
- i / counters_per_chip;
- devpriv->counter_dev->counters[i].counter_index =
- i % counters_per_chip;
- } else {
- s->type = COMEDI_SUBD_UNUSED;
- }
- }
- for (i = 0; i < board->n_chips; ++i)
- init_tio_chip(dev, i);
-
- for (i = 0; i < ni_660x_num_counters(dev); ++i)
- ni_tio_init_counter(&devpriv->counter_dev->counters[i]);
-
- for (i = 0; i < NUM_PFI_CHANNELS; ++i) {
- if (i < min_counter_pfi_chan)
- ni_660x_set_pfi_routing(dev, i, pfi_output_select_do);
- else
- ni_660x_set_pfi_routing(dev, i,
- pfi_output_select_counter);
- ni_660x_select_pfi_output(dev, i, pfi_output_select_high_Z);
- }
- /* to be safe, set counterswap bits on tio chips after all the counter
- outputs have been set to high impedance mode */
- for (i = 0; i < board->n_chips; ++i)
- set_tio_counterswap(dev, i);
-
- ret = request_irq(pcidev->irq, ni_660x_interrupt, IRQF_SHARED,
- dev->board_name, dev);
- if (ret < 0) {
- dev_warn(dev->class_dev, " irq not available\n");
- return ret;
- }
- dev->irq = pcidev->irq;
- global_interrupt_config_bits = Global_Int_Enable_Bit;
- if (board->n_chips > 1)
- global_interrupt_config_bits |= Cascade_Int_Enable_Bit;
- ni_660x_write_register(dev, 0, global_interrupt_config_bits,
- NI660X_GLOBAL_INT_CFG);
-
- return 0;
-}
-
-static void ni_660x_detach(struct comedi_device *dev)
-{
- struct ni_660x_private *devpriv = dev->private;
-
- if (dev->irq)
- free_irq(dev->irq, dev);
- if (devpriv) {
- if (devpriv->counter_dev)
- ni_gpct_device_destroy(devpriv->counter_dev);
- ni_660x_free_mite_rings(dev);
- mite_detach(devpriv->mite);
- }
- if (dev->mmio)
- iounmap(dev->mmio);
- comedi_pci_disable(dev);
-}
-
-static struct comedi_driver ni_660x_driver = {
- .driver_name = "ni_660x",
- .module = THIS_MODULE,
- .auto_attach = ni_660x_auto_attach,
- .detach = ni_660x_detach,
-};
-
-static int ni_660x_pci_probe(struct pci_dev *dev,
- const struct pci_device_id *id)
-{
- return comedi_pci_auto_config(dev, &ni_660x_driver, id->driver_data);
-}
-
-static const struct pci_device_id ni_660x_pci_table[] = {
- { PCI_VDEVICE(NI, 0x1310), BOARD_PCI6602 },
- { PCI_VDEVICE(NI, 0x1360), BOARD_PXI6602 },
- { PCI_VDEVICE(NI, 0x2c60), BOARD_PCI6601 },
- { PCI_VDEVICE(NI, 0x2cc0), BOARD_PXI6608 },
- { PCI_VDEVICE(NI, 0x1e40), BOARD_PXI6624 },
- { 0 }
-};
-MODULE_DEVICE_TABLE(pci, ni_660x_pci_table);
-
-static struct pci_driver ni_660x_pci_driver = {
- .name = "ni_660x",
- .id_table = ni_660x_pci_table,
- .probe = ni_660x_pci_probe,
- .remove = comedi_pci_auto_unconfig,
-};
-module_comedi_pci_driver(ni_660x_driver, ni_660x_pci_driver);
-
-MODULE_AUTHOR("Comedi http://www.comedi.org");
-MODULE_DESCRIPTION("Comedi low-level driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/ni_670x.c b/drivers/staging/comedi/drivers/ni_670x.c
deleted file mode 100644
index f4c580f65a89..000000000000
--- a/drivers/staging/comedi/drivers/ni_670x.c
+++ /dev/null
@@ -1,296 +0,0 @@
-/*
- comedi/drivers/ni_670x.c
- Hardware driver for NI 670x devices
-
- COMEDI - Linux Control and Measurement Device Interface
- Copyright (C) 1997-2001 David A. Schleef <ds@schleef.org>
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-*/
-/*
-Driver: ni_670x
-Description: National Instruments 670x
-Author: Bart Joris <bjoris@advalvas.be>
-Updated: Wed, 11 Dec 2002 18:25:35 -0800
-Devices: [National Instruments] PCI-6703 (ni_670x), PCI-6704
-Status: unknown
-
-Commands are not supported.
-*/
-
-/*
- Bart Joris <bjoris@advalvas.be> Last updated on 20/08/2001
-
- Manuals:
-
- 322110a.pdf PCI/PXI-6704 User Manual
- 322110b.pdf PCI/PXI-6703/6704 User Manual
-
-*/
-
-#include <linux/module.h>
-#include <linux/interrupt.h>
-#include <linux/slab.h>
-
-#include "../comedi_pci.h"
-
-#define AO_VALUE_OFFSET 0x00
-#define AO_CHAN_OFFSET 0x0c
-#define AO_STATUS_OFFSET 0x10
-#define AO_CONTROL_OFFSET 0x10
-#define DIO_PORT0_DIR_OFFSET 0x20
-#define DIO_PORT0_DATA_OFFSET 0x24
-#define DIO_PORT1_DIR_OFFSET 0x28
-#define DIO_PORT1_DATA_OFFSET 0x2c
-#define MISC_STATUS_OFFSET 0x14
-#define MISC_CONTROL_OFFSET 0x14
-
-enum ni_670x_boardid {
- BOARD_PCI6703,
- BOARD_PXI6704,
- BOARD_PCI6704,
-};
-
-struct ni_670x_board {
- const char *name;
- unsigned short ao_chans;
-};
-
-static const struct ni_670x_board ni_670x_boards[] = {
- [BOARD_PCI6703] = {
- .name = "PCI-6703",
- .ao_chans = 16,
- },
- [BOARD_PXI6704] = {
- .name = "PXI-6704",
- .ao_chans = 32,
- },
- [BOARD_PCI6704] = {
- .name = "PCI-6704",
- .ao_chans = 32,
- },
-};
-
-struct ni_670x_private {
- int boardtype;
- int dio;
-};
-
-static int ni_670x_ao_insn_write(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- unsigned int chan = CR_CHAN(insn->chanspec);
- unsigned int val = s->readback[chan];
- int i;
-
- /*
- * Channel number mapping:
- *
- * NI 6703/ NI 6704 | NI 6704 Only
- * -------------------------------
- * vch(0) : 0 | ich(16) : 1
- * vch(1) : 2 | ich(17) : 3
- * ... | ...
- * vch(15) : 30 | ich(31) : 31
- */
- for (i = 0; i < insn->n; i++) {
- val = data[i];
- /* First write in channel register which channel to use */
- writel(((chan & 15) << 1) | ((chan & 16) >> 4),
- dev->mmio + AO_CHAN_OFFSET);
- /* write channel value */
- writel(val, dev->mmio + AO_VALUE_OFFSET);
- }
- s->readback[chan] = val;
-
- return insn->n;
-}
-
-static int ni_670x_dio_insn_bits(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- if (comedi_dio_update_state(s, data))
- writel(s->state, dev->mmio + DIO_PORT0_DATA_OFFSET);
-
- data[1] = readl(dev->mmio + DIO_PORT0_DATA_OFFSET);
-
- return insn->n;
-}
-
-static int ni_670x_dio_insn_config(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- int ret;
-
- ret = comedi_dio_insn_config(dev, s, insn, data, 0);
- if (ret)
- return ret;
-
- writel(s->io_bits, dev->mmio + DIO_PORT0_DIR_OFFSET);
-
- return insn->n;
-}
-
-/* ripped from mite.h and mite_setup2() to avoid mite dependency */
-#define MITE_IODWBSR 0xc0 /* IO Device Window Base Size Register */
-#define WENAB (1 << 7) /* window enable */
-
-static int ni_670x_mite_init(struct pci_dev *pcidev)
-{
- void __iomem *mite_base;
- u32 main_phys_addr;
-
- /* ioremap the MITE registers (BAR 0) temporarily */
- mite_base = pci_ioremap_bar(pcidev, 0);
- if (!mite_base)
- return -ENOMEM;
-
- /* set data window to main registers (BAR 1) */
- main_phys_addr = pci_resource_start(pcidev, 1);
- writel(main_phys_addr | WENAB, mite_base + MITE_IODWBSR);
-
- /* finished with MITE registers */
- iounmap(mite_base);
- return 0;
-}
-
-static int ni_670x_auto_attach(struct comedi_device *dev,
- unsigned long context)
-{
- struct pci_dev *pcidev = comedi_to_pci_dev(dev);
- const struct ni_670x_board *board = NULL;
- struct ni_670x_private *devpriv;
- struct comedi_subdevice *s;
- int ret;
- int i;
-
- if (context < ARRAY_SIZE(ni_670x_boards))
- board = &ni_670x_boards[context];
- if (!board)
- return -ENODEV;
- dev->board_ptr = board;
- dev->board_name = board->name;
-
- ret = comedi_pci_enable(dev);
- if (ret)
- return ret;
-
- devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
- if (!devpriv)
- return -ENOMEM;
-
- ret = ni_670x_mite_init(pcidev);
- if (ret)
- return ret;
-
- dev->mmio = pci_ioremap_bar(pcidev, 1);
- if (!dev->mmio)
- return -ENOMEM;
-
- ret = comedi_alloc_subdevices(dev, 2);
- if (ret)
- return ret;
-
- s = &dev->subdevices[0];
- /* analog output subdevice */
- s->type = COMEDI_SUBD_AO;
- s->subdev_flags = SDF_WRITABLE;
- s->n_chan = board->ao_chans;
- s->maxdata = 0xffff;
- if (s->n_chan == 32) {
- const struct comedi_lrange **range_table_list;
-
- range_table_list = kmalloc(sizeof(struct comedi_lrange *) * 32,
- GFP_KERNEL);
- if (!range_table_list)
- return -ENOMEM;
- s->range_table_list = range_table_list;
- for (i = 0; i < 16; i++) {
- range_table_list[i] = &range_bipolar10;
- range_table_list[16 + i] = &range_0_20mA;
- }
- } else {
- s->range_table = &range_bipolar10;
- }
- s->insn_write = ni_670x_ao_insn_write;
-
- ret = comedi_alloc_subdev_readback(s);
- if (ret)
- return ret;
-
- s = &dev->subdevices[1];
- /* digital i/o subdevice */
- s->type = COMEDI_SUBD_DIO;
- s->subdev_flags = SDF_READABLE | SDF_WRITABLE;
- s->n_chan = 8;
- s->maxdata = 1;
- s->range_table = &range_digital;
- s->insn_bits = ni_670x_dio_insn_bits;
- s->insn_config = ni_670x_dio_insn_config;
-
- /* Config of misc registers */
- writel(0x10, dev->mmio + MISC_CONTROL_OFFSET);
- /* Config of ao registers */
- writel(0x00, dev->mmio + AO_CONTROL_OFFSET);
-
- return 0;
-}
-
-static void ni_670x_detach(struct comedi_device *dev)
-{
- struct comedi_subdevice *s;
-
- comedi_pci_detach(dev);
- if (dev->n_subdevices) {
- s = &dev->subdevices[0];
- if (s)
- kfree(s->range_table_list);
- }
-}
-
-static struct comedi_driver ni_670x_driver = {
- .driver_name = "ni_670x",
- .module = THIS_MODULE,
- .auto_attach = ni_670x_auto_attach,
- .detach = ni_670x_detach,
-};
-
-static int ni_670x_pci_probe(struct pci_dev *dev,
- const struct pci_device_id *id)
-{
- return comedi_pci_auto_config(dev, &ni_670x_driver, id->driver_data);
-}
-
-static const struct pci_device_id ni_670x_pci_table[] = {
- { PCI_VDEVICE(NI, 0x1290), BOARD_PCI6704 },
- { PCI_VDEVICE(NI, 0x1920), BOARD_PXI6704 },
- { PCI_VDEVICE(NI, 0x2c90), BOARD_PCI6703 },
- { 0 }
-};
-MODULE_DEVICE_TABLE(pci, ni_670x_pci_table);
-
-static struct pci_driver ni_670x_pci_driver = {
- .name = "ni_670x",
- .id_table = ni_670x_pci_table,
- .probe = ni_670x_pci_probe,
- .remove = comedi_pci_auto_unconfig,
-};
-module_comedi_pci_driver(ni_670x_driver, ni_670x_pci_driver);
-
-MODULE_AUTHOR("Comedi http://www.comedi.org");
-MODULE_DESCRIPTION("Comedi low-level driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/ni_at_a2150.c b/drivers/staging/comedi/drivers/ni_at_a2150.c
deleted file mode 100644
index 9b444f8c4e33..000000000000
--- a/drivers/staging/comedi/drivers/ni_at_a2150.c
+++ /dev/null
@@ -1,796 +0,0 @@
-/*
- comedi/drivers/ni_at_a2150.c
- Driver for National Instruments AT-A2150 boards
- Copyright (C) 2001, 2002 Frank Mori Hess <fmhess@users.sourceforge.net>
-
- COMEDI - Linux Control and Measurement Device Interface
- Copyright (C) 2000 David A. Schleef <ds@schleef.org>
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-*/
-/*
-Driver: ni_at_a2150
-Description: National Instruments AT-A2150
-Author: Frank Mori Hess
-Status: works
-Devices: [National Instruments] AT-A2150C (at_a2150c), AT-2150S (at_a2150s)
-
-If you want to ac couple the board's inputs, use AREF_OTHER.
-
-Configuration options:
- [0] - I/O port base address
- [1] - IRQ (optional, required for timed conversions)
- [2] - DMA (optional, required for timed conversions)
-
-*/
-/*
-Yet another driver for obsolete hardware brought to you by Frank Hess.
-Testing and debugging help provided by Dave Andruczyk.
-
-This driver supports the boards:
-
-AT-A2150C
-AT-A2150S
-
-The only difference is their master clock frequencies.
-
-Options:
- [0] - base io address
- [1] - irq
- [2] - dma channel
-
-References (from ftp://ftp.natinst.com/support/manuals):
-
- 320360.pdf AT-A2150 User Manual
-
-TODO:
-
-analog level triggering
-TRIG_WAKE_EOS
-
-*/
-
-#include <linux/module.h>
-#include <linux/delay.h>
-#include <linux/interrupt.h>
-#include <linux/slab.h>
-#include <linux/io.h>
-
-#include "../comedidev.h"
-
-#include "comedi_isadma.h"
-#include "comedi_8254.h"
-
-#define A2150_DMA_BUFFER_SIZE 0xff00 /* size in bytes of dma buffer */
-
-/* Registers and bits */
-#define CONFIG_REG 0x0
-#define CHANNEL_BITS(x) ((x) & 0x7)
-#define CHANNEL_MASK 0x7
-#define CLOCK_SELECT_BITS(x) (((x) & 0x3) << 3)
-#define CLOCK_DIVISOR_BITS(x) (((x) & 0x3) << 5)
-#define CLOCK_MASK (0xf << 3)
-#define ENABLE0_BIT 0x80 /* enable (don't internally ground) channels 0 and 1 */
-#define ENABLE1_BIT 0x100 /* enable (don't internally ground) channels 2 and 3 */
-#define AC0_BIT 0x200 /* ac couple channels 0,1 */
-#define AC1_BIT 0x400 /* ac couple channels 2,3 */
-#define APD_BIT 0x800 /* analog power down */
-#define DPD_BIT 0x1000 /* digital power down */
-#define TRIGGER_REG 0x2 /* trigger config register */
-#define POST_TRIGGER_BITS 0x2
-#define DELAY_TRIGGER_BITS 0x3
-#define HW_TRIG_EN 0x10 /* enable hardware trigger */
-#define FIFO_START_REG 0x6 /* software start aquistion trigger */
-#define FIFO_RESET_REG 0x8 /* clears fifo + fifo flags */
-#define FIFO_DATA_REG 0xa /* read data */
-#define DMA_TC_CLEAR_REG 0xe /* clear dma terminal count interrupt */
-#define STATUS_REG 0x12 /* read only */
-#define FNE_BIT 0x1 /* fifo not empty */
-#define OVFL_BIT 0x8 /* fifo overflow */
-#define EDAQ_BIT 0x10 /* end of acquisition interrupt */
-#define DCAL_BIT 0x20 /* offset calibration in progress */
-#define INTR_BIT 0x40 /* interrupt has occurred */
-#define DMA_TC_BIT 0x80 /* dma terminal count interrupt has occurred */
-#define ID_BITS(x) (((x) >> 8) & 0x3)
-#define IRQ_DMA_CNTRL_REG 0x12 /* write only */
-#define DMA_CHAN_BITS(x) ((x) & 0x7) /* sets dma channel */
-#define DMA_EN_BIT 0x8 /* enables dma */
-#define IRQ_LVL_BITS(x) (((x) & 0xf) << 4) /* sets irq level */
-#define FIFO_INTR_EN_BIT 0x100 /* enable fifo interrupts */
-#define FIFO_INTR_FHF_BIT 0x200 /* interrupt fifo half full */
-#define DMA_INTR_EN_BIT 0x800 /* enable interrupt on dma terminal count */
-#define DMA_DEM_EN_BIT 0x1000 /* enables demand mode dma */
-#define I8253_BASE_REG 0x14
-
-struct a2150_board {
- const char *name;
- int clock[4]; /* master clock periods, in nanoseconds */
- int num_clocks; /* number of available master clock speeds */
- int ai_speed; /* maximum conversion rate in nanoseconds */
-};
-
-/* analog input range */
-static const struct comedi_lrange range_a2150 = {
- 1, {
- BIP_RANGE(2.828)
- }
-};
-
-/* enum must match board indices */
-enum { a2150_c, a2150_s };
-static const struct a2150_board a2150_boards[] = {
- {
- .name = "at-a2150c",
- .clock = {31250, 22676, 20833, 19531},
- .num_clocks = 4,
- .ai_speed = 19531,
- },
- {
- .name = "at-a2150s",
- .clock = {62500, 50000, 41667, 0},
- .num_clocks = 3,
- .ai_speed = 41667,
- },
-};
-
-struct a2150_private {
- struct comedi_isadma *dma;
- unsigned int count; /* number of data points left to be taken */
- int irq_dma_bits; /* irq/dma register bits */
- int config_bits; /* config register bits */
-};
-
-/* interrupt service routine */
-static irqreturn_t a2150_interrupt(int irq, void *d)
-{
- struct comedi_device *dev = d;
- struct a2150_private *devpriv = dev->private;
- struct comedi_isadma *dma = devpriv->dma;
- struct comedi_isadma_desc *desc = &dma->desc[0];
- struct comedi_subdevice *s = dev->read_subdev;
- struct comedi_async *async = s->async;
- struct comedi_cmd *cmd = &async->cmd;
- unsigned short *buf = desc->virt_addr;
- unsigned int max_points, num_points, residue, leftover;
- unsigned short dpnt;
- int status;
- int i;
-
- if (!dev->attached)
- return IRQ_HANDLED;
-
- status = inw(dev->iobase + STATUS_REG);
- if ((status & INTR_BIT) == 0)
- return IRQ_NONE;
-
- if (status & OVFL_BIT) {
- async->events |= COMEDI_CB_ERROR;
- comedi_handle_events(dev, s);
- }
-
- if ((status & DMA_TC_BIT) == 0) {
- async->events |= COMEDI_CB_ERROR;
- comedi_handle_events(dev, s);
- return IRQ_HANDLED;
- }
-
- /*
- * residue is the number of bytes left to be done on the dma
- * transfer. It should always be zero at this point unless
- * the stop_src is set to external triggering.
- */
- residue = comedi_isadma_disable(desc->chan);
-
- /* figure out how many points to read */
- max_points = comedi_bytes_to_samples(s, desc->size);
- num_points = max_points - comedi_bytes_to_samples(s, residue);
- if (devpriv->count < num_points && cmd->stop_src == TRIG_COUNT)
- num_points = devpriv->count;
-
- /* figure out how many points will be stored next time */
- leftover = 0;
- if (cmd->stop_src == TRIG_NONE) {
- leftover = comedi_bytes_to_samples(s, desc->size);
- } else if (devpriv->count > max_points) {
- leftover = devpriv->count - max_points;
- if (leftover > max_points)
- leftover = max_points;
- }
- /* there should only be a residue if collection was stopped by having
- * the stop_src set to an external trigger, in which case there
- * will be no more data
- */
- if (residue)
- leftover = 0;
-
- for (i = 0; i < num_points; i++) {
- /* write data point to comedi buffer */
- dpnt = buf[i];
- /* convert from 2's complement to unsigned coding */
- dpnt ^= 0x8000;
- comedi_buf_write_samples(s, &dpnt, 1);
- if (cmd->stop_src == TRIG_COUNT) {
- if (--devpriv->count == 0) { /* end of acquisition */
- async->events |= COMEDI_CB_EOA;
- break;
- }
- }
- }
- /* re-enable dma */
- if (leftover) {
- desc->size = comedi_samples_to_bytes(s, leftover);
- comedi_isadma_program(desc);
- }
-
- comedi_handle_events(dev, s);
-
- /* clear interrupt */
- outw(0x00, dev->iobase + DMA_TC_CLEAR_REG);
-
- return IRQ_HANDLED;
-}
-
-static int a2150_cancel(struct comedi_device *dev, struct comedi_subdevice *s)
-{
- struct a2150_private *devpriv = dev->private;
- struct comedi_isadma *dma = devpriv->dma;
- struct comedi_isadma_desc *desc = &dma->desc[0];
-
- /* disable dma on card */
- devpriv->irq_dma_bits &= ~DMA_INTR_EN_BIT & ~DMA_EN_BIT;
- outw(devpriv->irq_dma_bits, dev->iobase + IRQ_DMA_CNTRL_REG);
-
- /* disable computer's dma */
- comedi_isadma_disable(desc->chan);
-
- /* clear fifo and reset triggering circuitry */
- outw(0, dev->iobase + FIFO_RESET_REG);
-
- return 0;
-}
-
-/*
- * sets bits in devpriv->clock_bits to nearest approximation of requested
- * period, adjusts requested period to actual timing.
- */
-static int a2150_get_timing(struct comedi_device *dev, unsigned int *period,
- unsigned int flags)
-{
- const struct a2150_board *board = dev->board_ptr;
- struct a2150_private *devpriv = dev->private;
- int lub, glb, temp;
- int lub_divisor_shift, lub_index, glb_divisor_shift, glb_index;
- int i, j;
-
- /* initialize greatest lower and least upper bounds */
- lub_divisor_shift = 3;
- lub_index = 0;
- lub = board->clock[lub_index] * (1 << lub_divisor_shift);
- glb_divisor_shift = 0;
- glb_index = board->num_clocks - 1;
- glb = board->clock[glb_index] * (1 << glb_divisor_shift);
-
- /* make sure period is in available range */
- if (*period < glb)
- *period = glb;
- if (*period > lub)
- *period = lub;
-
- /* we can multiply period by 1, 2, 4, or 8, using (1 << i) */
- for (i = 0; i < 4; i++) {
- /* there are a maximum of 4 master clocks */
- for (j = 0; j < board->num_clocks; j++) {
- /* temp is the period in nanosec we are evaluating */
- temp = board->clock[j] * (1 << i);
- /* if it is the best match yet */
- if (temp < lub && temp >= *period) {
- lub_divisor_shift = i;
- lub_index = j;
- lub = temp;
- }
- if (temp > glb && temp <= *period) {
- glb_divisor_shift = i;
- glb_index = j;
- glb = temp;
- }
- }
- }
- switch (flags & CMDF_ROUND_MASK) {
- case CMDF_ROUND_NEAREST:
- default:
- /* if least upper bound is better approximation */
- if (lub - *period < *period - glb)
- *period = lub;
- else
- *period = glb;
- break;
- case CMDF_ROUND_UP:
- *period = lub;
- break;
- case CMDF_ROUND_DOWN:
- *period = glb;
- break;
- }
-
- /* set clock bits for config register appropriately */
- devpriv->config_bits &= ~CLOCK_MASK;
- if (*period == lub) {
- devpriv->config_bits |=
- CLOCK_SELECT_BITS(lub_index) |
- CLOCK_DIVISOR_BITS(lub_divisor_shift);
- } else {
- devpriv->config_bits |=
- CLOCK_SELECT_BITS(glb_index) |
- CLOCK_DIVISOR_BITS(glb_divisor_shift);
- }
-
- return 0;
-}
-
-static int a2150_set_chanlist(struct comedi_device *dev,
- unsigned int start_channel,
- unsigned int num_channels)
-{
- struct a2150_private *devpriv = dev->private;
-
- if (start_channel + num_channels > 4)
- return -1;
-
- devpriv->config_bits &= ~CHANNEL_MASK;
-
- switch (num_channels) {
- case 1:
- devpriv->config_bits |= CHANNEL_BITS(0x4 | start_channel);
- break;
- case 2:
- if (start_channel == 0)
- devpriv->config_bits |= CHANNEL_BITS(0x2);
- else if (start_channel == 2)
- devpriv->config_bits |= CHANNEL_BITS(0x3);
- else
- return -1;
- break;
- case 4:
- devpriv->config_bits |= CHANNEL_BITS(0x1);
- break;
- default:
- return -1;
- }
-
- return 0;
-}
-
-static int a2150_ai_check_chanlist(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_cmd *cmd)
-{
- unsigned int chan0 = CR_CHAN(cmd->chanlist[0]);
- unsigned int aref0 = CR_AREF(cmd->chanlist[0]);
- int i;
-
- if (cmd->chanlist_len == 2 && (chan0 == 1 || chan0 == 3)) {
- dev_dbg(dev->class_dev,
- "length 2 chanlist must be channels 0,1 or channels 2,3\n");
- return -EINVAL;
- }
-
- if (cmd->chanlist_len == 3) {
- dev_dbg(dev->class_dev,
- "chanlist must have 1,2 or 4 channels\n");
- return -EINVAL;
- }
-
- for (i = 1; i < cmd->chanlist_len; i++) {
- unsigned int chan = CR_CHAN(cmd->chanlist[i]);
- unsigned int aref = CR_AREF(cmd->chanlist[i]);
-
- if (chan != (chan0 + i)) {
- dev_dbg(dev->class_dev,
- "entries in chanlist must be consecutive channels, counting upwards\n");
- return -EINVAL;
- }
-
- if (chan == 2)
- aref0 = aref;
- if (aref != aref0) {
- dev_dbg(dev->class_dev,
- "channels 0/1 and 2/3 must have the same analog reference\n");
- return -EINVAL;
- }
- }
-
- return 0;
-}
-
-static int a2150_ai_cmdtest(struct comedi_device *dev,
- struct comedi_subdevice *s, struct comedi_cmd *cmd)
-{
- const struct a2150_board *board = dev->board_ptr;
- int err = 0;
- unsigned int arg;
-
- /* Step 1 : check if triggers are trivially valid */
-
- err |= comedi_check_trigger_src(&cmd->start_src, TRIG_NOW | TRIG_EXT);
- err |= comedi_check_trigger_src(&cmd->scan_begin_src, TRIG_TIMER);
- err |= comedi_check_trigger_src(&cmd->convert_src, TRIG_NOW);
- err |= comedi_check_trigger_src(&cmd->scan_end_src, TRIG_COUNT);
- err |= comedi_check_trigger_src(&cmd->stop_src, TRIG_COUNT | TRIG_NONE);
-
- if (err)
- return 1;
-
- /* Step 2a : make sure trigger sources are unique */
-
- err |= comedi_check_trigger_is_unique(cmd->start_src);
- err |= comedi_check_trigger_is_unique(cmd->stop_src);
-
- /* Step 2b : and mutually compatible */
-
- if (err)
- return 2;
-
- /* Step 3: check if arguments are trivially valid */
-
- err |= comedi_check_trigger_arg_is(&cmd->start_arg, 0);
-
- if (cmd->convert_src == TRIG_TIMER) {
- err |= comedi_check_trigger_arg_min(&cmd->convert_arg,
- board->ai_speed);
- }
-
- err |= comedi_check_trigger_arg_min(&cmd->chanlist_len, 1);
- err |= comedi_check_trigger_arg_is(&cmd->scan_end_arg,
- cmd->chanlist_len);
-
- if (cmd->stop_src == TRIG_COUNT)
- err |= comedi_check_trigger_arg_min(&cmd->stop_arg, 1);
- else /* TRIG_NONE */
- err |= comedi_check_trigger_arg_is(&cmd->stop_arg, 0);
-
- if (err)
- return 3;
-
- /* step 4: fix up any arguments */
-
- if (cmd->scan_begin_src == TRIG_TIMER) {
- arg = cmd->scan_begin_arg;
- a2150_get_timing(dev, &arg, cmd->flags);
- err |= comedi_check_trigger_arg_is(&cmd->scan_begin_arg, arg);
- }
-
- if (err)
- return 4;
-
- /* Step 5: check channel list if it exists */
- if (cmd->chanlist && cmd->chanlist_len > 0)
- err |= a2150_ai_check_chanlist(dev, s, cmd);
-
- if (err)
- return 5;
-
- return 0;
-}
-
-static int a2150_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
-{
- struct a2150_private *devpriv = dev->private;
- struct comedi_isadma *dma = devpriv->dma;
- struct comedi_isadma_desc *desc = &dma->desc[0];
- struct comedi_async *async = s->async;
- struct comedi_cmd *cmd = &async->cmd;
- unsigned int old_config_bits = devpriv->config_bits;
- unsigned int trigger_bits;
-
- if (cmd->flags & CMDF_PRIORITY) {
- dev_err(dev->class_dev,
- "dma incompatible with hard real-time interrupt (CMDF_PRIORITY), aborting\n");
- return -1;
- }
- /* clear fifo and reset triggering circuitry */
- outw(0, dev->iobase + FIFO_RESET_REG);
-
- /* setup chanlist */
- if (a2150_set_chanlist(dev, CR_CHAN(cmd->chanlist[0]),
- cmd->chanlist_len) < 0)
- return -1;
-
- /* setup ac/dc coupling */
- if (CR_AREF(cmd->chanlist[0]) == AREF_OTHER)
- devpriv->config_bits |= AC0_BIT;
- else
- devpriv->config_bits &= ~AC0_BIT;
- if (CR_AREF(cmd->chanlist[2]) == AREF_OTHER)
- devpriv->config_bits |= AC1_BIT;
- else
- devpriv->config_bits &= ~AC1_BIT;
-
- /* setup timing */
- a2150_get_timing(dev, &cmd->scan_begin_arg, cmd->flags);
-
- /* send timing, channel, config bits */
- outw(devpriv->config_bits, dev->iobase + CONFIG_REG);
-
- /* initialize number of samples remaining */
- devpriv->count = cmd->stop_arg * cmd->chanlist_len;
-
- comedi_isadma_disable(desc->chan);
-
- /* set size of transfer to fill in 1/3 second */
-#define ONE_THIRD_SECOND 333333333
- desc->size = comedi_bytes_per_sample(s) * cmd->chanlist_len *
- ONE_THIRD_SECOND / cmd->scan_begin_arg;
- if (desc->size > desc->maxsize)
- desc->size = desc->maxsize;
- if (desc->size < comedi_bytes_per_sample(s))
- desc->size = comedi_bytes_per_sample(s);
- desc->size -= desc->size % comedi_bytes_per_sample(s);
-
- comedi_isadma_program(desc);
-
- /* clear dma interrupt before enabling it, to try and get rid of that
- * one spurious interrupt that has been happening */
- outw(0x00, dev->iobase + DMA_TC_CLEAR_REG);
-
- /* enable dma on card */
- devpriv->irq_dma_bits |= DMA_INTR_EN_BIT | DMA_EN_BIT;
- outw(devpriv->irq_dma_bits, dev->iobase + IRQ_DMA_CNTRL_REG);
-
- /* may need to wait 72 sampling periods if timing was changed */
- comedi_8254_load(dev->pacer, 2, 72, I8254_MODE0 | I8254_BINARY);
-
- /* setup start triggering */
- trigger_bits = 0;
- /* decide if we need to wait 72 periods for valid data */
- if (cmd->start_src == TRIG_NOW &&
- (old_config_bits & CLOCK_MASK) !=
- (devpriv->config_bits & CLOCK_MASK)) {
- /* set trigger source to delay trigger */
- trigger_bits |= DELAY_TRIGGER_BITS;
- } else {
- /* otherwise no delay */
- trigger_bits |= POST_TRIGGER_BITS;
- }
- /* enable external hardware trigger */
- if (cmd->start_src == TRIG_EXT) {
- trigger_bits |= HW_TRIG_EN;
- } else if (cmd->start_src == TRIG_OTHER) {
- /* XXX add support for level/slope start trigger using TRIG_OTHER */
- dev_err(dev->class_dev, "you shouldn't see this?\n");
- }
- /* send trigger config bits */
- outw(trigger_bits, dev->iobase + TRIGGER_REG);
-
- /* start acquisition for soft trigger */
- if (cmd->start_src == TRIG_NOW)
- outw(0, dev->iobase + FIFO_START_REG);
-
- return 0;
-}
-
-static int a2150_ai_eoc(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned long context)
-{
- unsigned int status;
-
- status = inw(dev->iobase + STATUS_REG);
- if (status & FNE_BIT)
- return 0;
- return -EBUSY;
-}
-
-static int a2150_ai_rinsn(struct comedi_device *dev, struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
-{
- struct a2150_private *devpriv = dev->private;
- unsigned int n;
- int ret;
-
- /* clear fifo and reset triggering circuitry */
- outw(0, dev->iobase + FIFO_RESET_REG);
-
- /* setup chanlist */
- if (a2150_set_chanlist(dev, CR_CHAN(insn->chanspec), 1) < 0)
- return -1;
-
- /* set dc coupling */
- devpriv->config_bits &= ~AC0_BIT;
- devpriv->config_bits &= ~AC1_BIT;
-
- /* send timing, channel, config bits */
- outw(devpriv->config_bits, dev->iobase + CONFIG_REG);
-
- /* disable dma on card */
- devpriv->irq_dma_bits &= ~DMA_INTR_EN_BIT & ~DMA_EN_BIT;
- outw(devpriv->irq_dma_bits, dev->iobase + IRQ_DMA_CNTRL_REG);
-
- /* setup start triggering */
- outw(0, dev->iobase + TRIGGER_REG);
-
- /* start acquisition for soft trigger */
- outw(0, dev->iobase + FIFO_START_REG);
-
- /*
- * there is a 35.6 sample delay for data to get through the
- * antialias filter
- */
- for (n = 0; n < 36; n++) {
- ret = comedi_timeout(dev, s, insn, a2150_ai_eoc, 0);
- if (ret)
- return ret;
-
- inw(dev->iobase + FIFO_DATA_REG);
- }
-
- /* read data */
- for (n = 0; n < insn->n; n++) {
- ret = comedi_timeout(dev, s, insn, a2150_ai_eoc, 0);
- if (ret)
- return ret;
-
- data[n] = inw(dev->iobase + FIFO_DATA_REG);
- data[n] ^= 0x8000;
- }
-
- /* clear fifo and reset triggering circuitry */
- outw(0, dev->iobase + FIFO_RESET_REG);
-
- return n;
-}
-
-static void a2150_alloc_irq_and_dma(struct comedi_device *dev,
- struct comedi_devconfig *it)
-{
- struct a2150_private *devpriv = dev->private;
- unsigned int irq_num = it->options[1];
- unsigned int dma_chan = it->options[2];
-
- /*
- * Only IRQs 15, 14, 12-9, and 7-3 are valid.
- * Only DMA channels 7-5 and 3-0 are valid.
- */
- if (irq_num > 15 || dma_chan > 7 ||
- !((1 << irq_num) & 0xdef8) || !((1 << dma_chan) & 0xef))
- return;
-
- if (request_irq(irq_num, a2150_interrupt, 0, dev->board_name, dev))
- return;
-
- /* DMA uses 1 buffer */
- devpriv->dma = comedi_isadma_alloc(dev, 1, dma_chan, dma_chan,
- A2150_DMA_BUFFER_SIZE,
- COMEDI_ISADMA_READ);
- if (!devpriv->dma) {
- free_irq(irq_num, dev);
- } else {
- dev->irq = irq_num;
- devpriv->irq_dma_bits = IRQ_LVL_BITS(irq_num) |
- DMA_CHAN_BITS(dma_chan);
- }
-}
-
-static void a2150_free_dma(struct comedi_device *dev)
-{
- struct a2150_private *devpriv = dev->private;
-
- if (devpriv)
- comedi_isadma_free(devpriv->dma);
-}
-
-static const struct a2150_board *a2150_probe(struct comedi_device *dev)
-{
- int id = ID_BITS(inw(dev->iobase + STATUS_REG));
-
- if (id >= ARRAY_SIZE(a2150_boards))
- return NULL;
-
- return &a2150_boards[id];
-}
-
-static int a2150_attach(struct comedi_device *dev, struct comedi_devconfig *it)
-{
- const struct a2150_board *board;
- struct a2150_private *devpriv;
- struct comedi_subdevice *s;
- static const int timeout = 2000;
- int i;
- int ret;
-
- devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
- if (!devpriv)
- return -ENOMEM;
-
- ret = comedi_request_region(dev, it->options[0], 0x1c);
- if (ret)
- return ret;
-
- board = a2150_probe(dev);
- if (!board)
- return -ENODEV;
- dev->board_ptr = board;
- dev->board_name = board->name;
-
- /* an IRQ and DMA are required to support async commands */
- a2150_alloc_irq_and_dma(dev, it);
-
- dev->pacer = comedi_8254_init(dev->iobase + I8253_BASE_REG,
- 0, I8254_IO8, 0);
- if (!dev->pacer)
- return -ENOMEM;
-
- ret = comedi_alloc_subdevices(dev, 1);
- if (ret)
- return ret;
-
- /* analog input subdevice */
- s = &dev->subdevices[0];
- s->type = COMEDI_SUBD_AI;
- s->subdev_flags = SDF_READABLE | SDF_GROUND | SDF_OTHER;
- s->n_chan = 4;
- s->maxdata = 0xffff;
- s->range_table = &range_a2150;
- s->insn_read = a2150_ai_rinsn;
- if (dev->irq) {
- dev->read_subdev = s;
- s->subdev_flags |= SDF_CMD_READ;
- s->len_chanlist = s->n_chan;
- s->do_cmd = a2150_ai_cmd;
- s->do_cmdtest = a2150_ai_cmdtest;
- s->cancel = a2150_cancel;
- }
-
- /* set card's irq and dma levels */
- outw(devpriv->irq_dma_bits, dev->iobase + IRQ_DMA_CNTRL_REG);
-
- /* reset and sync adc clock circuitry */
- outw_p(DPD_BIT | APD_BIT, dev->iobase + CONFIG_REG);
- outw_p(DPD_BIT, dev->iobase + CONFIG_REG);
- /* initialize configuration register */
- devpriv->config_bits = 0;
- outw(devpriv->config_bits, dev->iobase + CONFIG_REG);
- /* wait until offset calibration is done, then enable analog inputs */
- for (i = 0; i < timeout; i++) {
- if ((DCAL_BIT & inw(dev->iobase + STATUS_REG)) == 0)
- break;
- udelay(1000);
- }
- if (i == timeout) {
- dev_err(dev->class_dev,
- "timed out waiting for offset calibration to complete\n");
- return -ETIME;
- }
- devpriv->config_bits |= ENABLE0_BIT | ENABLE1_BIT;
- outw(devpriv->config_bits, dev->iobase + CONFIG_REG);
-
- return 0;
-};
-
-static void a2150_detach(struct comedi_device *dev)
-{
- if (dev->iobase)
- outw(APD_BIT | DPD_BIT, dev->iobase + CONFIG_REG);
- a2150_free_dma(dev);
- comedi_legacy_detach(dev);
-};
-
-static struct comedi_driver ni_at_a2150_driver = {
- .driver_name = "ni_at_a2150",
- .module = THIS_MODULE,
- .attach = a2150_attach,
- .detach = a2150_detach,
-};
-module_comedi_driver(ni_at_a2150_driver);
-
-MODULE_AUTHOR("Comedi http://www.comedi.org");
-MODULE_DESCRIPTION("Comedi low-level driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/ni_at_ao.c b/drivers/staging/comedi/drivers/ni_at_ao.c
deleted file mode 100644
index f27aa0e82234..000000000000
--- a/drivers/staging/comedi/drivers/ni_at_ao.c
+++ /dev/null
@@ -1,383 +0,0 @@
-/*
- * ni_at_ao.c
- * Driver for NI AT-AO-6/10 boards
- *
- * COMEDI - Linux Control and Measurement Device Interface
- * Copyright (C) 2000,2002 David A. Schleef <ds@schleef.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-/*
- * Driver: ni_at_ao
- * Description: National Instruments AT-AO-6/10
- * Devices: [National Instruments] AT-AO-6 (at-ao-6), AT-AO-10 (at-ao-10)
- * Status: should work
- * Author: David A. Schleef <ds@schleef.org>
- * Updated: Sun Dec 26 12:26:28 EST 2004
- *
- * Configuration options:
- * [0] - I/O port base address
- * [1] - IRQ (unused)
- * [2] - DMA (unused)
- * [3] - analog output range, set by jumpers on hardware
- * 0 for -10 to 10V bipolar
- * 1 for 0V to 10V unipolar
- */
-
-#include <linux/module.h>
-
-#include "../comedidev.h"
-
-#include "comedi_8254.h"
-
-/*
- * Register map
- *
- * Register-level programming information can be found in NI
- * document 320379.pdf.
- */
-#define ATAO_DIO_REG 0x00
-#define ATAO_CFG2_REG 0x02
-#define ATAO_CFG2_CALLD_NOP (0 << 14)
-#define ATAO_CFG2_CALLD(x) ((((x) >> 3) + 1) << 14)
-#define ATAO_CFG2_FFRTEN (1 << 13)
-#define ATAO_CFG2_DACS(x) (1 << (((x) / 2) + 8))
-#define ATAO_CFG2_LDAC(x) (1 << (((x) / 2) + 3))
-#define ATAO_CFG2_PROMEN (1 << 2)
-#define ATAO_CFG2_SCLK (1 << 1)
-#define ATAO_CFG2_SDATA (1 << 0)
-#define ATAO_CFG3_REG 0x04
-#define ATAO_CFG3_DMAMODE (1 << 6)
-#define ATAO_CFG3_CLKOUT (1 << 5)
-#define ATAO_CFG3_RCLKEN (1 << 4)
-#define ATAO_CFG3_DOUTEN2 (1 << 3)
-#define ATAO_CFG3_DOUTEN1 (1 << 2)
-#define ATAO_CFG3_EN2_5V (1 << 1)
-#define ATAO_CFG3_SCANEN (1 << 0)
-#define ATAO_82C53_BASE 0x06
-#define ATAO_CFG1_REG 0x0a
-#define ATAO_CFG1_EXTINT2EN (1 << 15)
-#define ATAO_CFG1_EXTINT1EN (1 << 14)
-#define ATAO_CFG1_CNTINT2EN (1 << 13)
-#define ATAO_CFG1_CNTINT1EN (1 << 12)
-#define ATAO_CFG1_TCINTEN (1 << 11)
-#define ATAO_CFG1_CNT1SRC (1 << 10)
-#define ATAO_CFG1_CNT2SRC (1 << 9)
-#define ATAO_CFG1_FIFOEN (1 << 8)
-#define ATAO_CFG1_GRP2WR (1 << 7)
-#define ATAO_CFG1_EXTUPDEN (1 << 6)
-#define ATAO_CFG1_DMARQ (1 << 5)
-#define ATAO_CFG1_DMAEN (1 << 4)
-#define ATAO_CFG1_CH(x) (((x) & 0xf) << 0)
-#define ATAO_STATUS_REG 0x0a
-#define ATAO_STATUS_FH (1 << 6)
-#define ATAO_STATUS_FE (1 << 5)
-#define ATAO_STATUS_FF (1 << 4)
-#define ATAO_STATUS_INT2 (1 << 3)
-#define ATAO_STATUS_INT1 (1 << 2)
-#define ATAO_STATUS_TCINT (1 << 1)
-#define ATAO_STATUS_PROMOUT (1 << 0)
-#define ATAO_FIFO_WRITE_REG 0x0c
-#define ATAO_FIFO_CLEAR_REG 0x0c
-#define ATAO_AO_REG(x) (0x0c + ((x) * 2))
-
-/* registers with _2_ are accessed when GRP2WR is set in CFG1 */
-#define ATAO_2_DMATCCLR_REG 0x00
-#define ATAO_2_INT1CLR_REG 0x02
-#define ATAO_2_INT2CLR_REG 0x04
-#define ATAO_2_RTSISHFT_REG 0x06
-#define ATAO_2_RTSISHFT_RSI (1 << 0)
-#define ATAO_2_RTSISTRB_REG 0x07
-
-struct atao_board {
- const char *name;
- int n_ao_chans;
-};
-
-static const struct atao_board atao_boards[] = {
- {
- .name = "at-ao-6",
- .n_ao_chans = 6,
- }, {
- .name = "at-ao-10",
- .n_ao_chans = 10,
- },
-};
-
-struct atao_private {
- unsigned short cfg1;
- unsigned short cfg3;
-
- /* Used for caldac readback */
- unsigned char caldac[21];
-};
-
-static void atao_select_reg_group(struct comedi_device *dev, int group)
-{
- struct atao_private *devpriv = dev->private;
-
- if (group)
- devpriv->cfg1 |= ATAO_CFG1_GRP2WR;
- else
- devpriv->cfg1 &= ~ATAO_CFG1_GRP2WR;
- outw(devpriv->cfg1, dev->iobase + ATAO_CFG1_REG);
-}
-
-static int atao_ao_insn_write(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- unsigned int chan = CR_CHAN(insn->chanspec);
- unsigned int val = s->readback[chan];
- int i;
-
- if (chan == 0)
- atao_select_reg_group(dev, 1);
-
- for (i = 0; i < insn->n; i++) {
- val = data[i];
-
- /* the hardware expects two's complement values */
- outw(comedi_offset_munge(s, val),
- dev->iobase + ATAO_AO_REG(chan));
- }
- s->readback[chan] = val;
-
- if (chan == 0)
- atao_select_reg_group(dev, 0);
-
- return insn->n;
-}
-
-static int atao_dio_insn_bits(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- if (comedi_dio_update_state(s, data))
- outw(s->state, dev->iobase + ATAO_DIO_REG);
-
- data[1] = inw(dev->iobase + ATAO_DIO_REG);
-
- return insn->n;
-}
-
-static int atao_dio_insn_config(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct atao_private *devpriv = dev->private;
- unsigned int chan = CR_CHAN(insn->chanspec);
- unsigned int mask;
- int ret;
-
- if (chan < 4)
- mask = 0x0f;
- else
- mask = 0xf0;
-
- ret = comedi_dio_insn_config(dev, s, insn, data, mask);
- if (ret)
- return ret;
-
- if (s->io_bits & 0x0f)
- devpriv->cfg3 |= ATAO_CFG3_DOUTEN1;
- else
- devpriv->cfg3 &= ~ATAO_CFG3_DOUTEN1;
- if (s->io_bits & 0xf0)
- devpriv->cfg3 |= ATAO_CFG3_DOUTEN2;
- else
- devpriv->cfg3 &= ~ATAO_CFG3_DOUTEN2;
-
- outw(devpriv->cfg3, dev->iobase + ATAO_CFG3_REG);
-
- return insn->n;
-}
-
-/*
- * There are three DAC8800 TrimDACs on the board. These are 8-channel,
- * 8-bit DACs that are used to calibrate the Analog Output channels.
- * The factory default calibration values are stored in the EEPROM.
- * The TrimDACs, and EEPROM addresses, are mapped as:
- *
- * Channel EEPROM Description
- * ----------------- ------ -----------------------------------
- * 0 - DAC0 Chan 0 0x30 AO Channel 0 Offset
- * 1 - DAC0 Chan 1 0x31 AO Channel 0 Gain
- * 2 - DAC0 Chan 2 0x32 AO Channel 1 Offset
- * 3 - DAC0 Chan 3 0x33 AO Channel 1 Gain
- * 4 - DAC0 Chan 4 0x34 AO Channel 2 Offset
- * 5 - DAC0 Chan 5 0x35 AO Channel 2 Gain
- * 6 - DAC0 Chan 6 0x36 AO Channel 3 Offset
- * 7 - DAC0 Chan 7 0x37 AO Channel 3 Gain
- * 8 - DAC1 Chan 0 0x38 AO Channel 4 Offset
- * 9 - DAC1 Chan 1 0x39 AO Channel 4 Gain
- * 10 - DAC1 Chan 2 0x3a AO Channel 5 Offset
- * 11 - DAC1 Chan 3 0x3b AO Channel 5 Gain
- * 12 - DAC1 Chan 4 0x3c 2.5V Offset
- * 13 - DAC1 Chan 5 0x3d AO Channel 6 Offset (at-ao-10 only)
- * 14 - DAC1 Chan 6 0x3e AO Channel 6 Gain (at-ao-10 only)
- * 15 - DAC1 Chan 7 0x3f AO Channel 7 Offset (at-ao-10 only)
- * 16 - DAC2 Chan 0 0x40 AO Channel 7 Gain (at-ao-10 only)
- * 17 - DAC2 Chan 1 0x41 AO Channel 8 Offset (at-ao-10 only)
- * 18 - DAC2 Chan 2 0x42 AO Channel 8 Gain (at-ao-10 only)
- * 19 - DAC2 Chan 3 0x43 AO Channel 9 Offset (at-ao-10 only)
- * 20 - DAC2 Chan 4 0x44 AO Channel 9 Gain (at-ao-10 only)
- * DAC2 Chan 5 0x45 Reserved
- * DAC2 Chan 6 0x46 Reserved
- * DAC2 Chan 7 0x47 Reserved
- */
-static int atao_calib_insn_write(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- unsigned int chan = CR_CHAN(insn->chanspec);
-
- if (insn->n) {
- unsigned int val = data[insn->n - 1];
- unsigned int bitstring = ((chan & 0x7) << 8) | val;
- unsigned int bits;
- int bit;
-
- /* write the channel and last data value to the caldac */
- /* clock the bitstring to the caldac; MSB -> LSB */
- for (bit = 1 << 10; bit; bit >>= 1) {
- bits = (bit & bitstring) ? ATAO_CFG2_SDATA : 0;
-
- outw(bits, dev->iobase + ATAO_CFG2_REG);
- outw(bits | ATAO_CFG2_SCLK,
- dev->iobase + ATAO_CFG2_REG);
- }
-
- /* strobe the caldac to load the value */
- outw(ATAO_CFG2_CALLD(chan), dev->iobase + ATAO_CFG2_REG);
- outw(ATAO_CFG2_CALLD_NOP, dev->iobase + ATAO_CFG2_REG);
-
- s->readback[chan] = val;
- }
-
- return insn->n;
-}
-
-static void atao_reset(struct comedi_device *dev)
-{
- struct atao_private *devpriv = dev->private;
-
- /* This is the reset sequence described in the manual */
-
- devpriv->cfg1 = 0;
- outw(devpriv->cfg1, dev->iobase + ATAO_CFG1_REG);
-
- /* Put outputs of counter 1 and counter 2 in a high state */
- comedi_8254_set_mode(dev->pacer, 0, I8254_MODE4 | I8254_BINARY);
- comedi_8254_set_mode(dev->pacer, 1, I8254_MODE4 | I8254_BINARY);
- comedi_8254_write(dev->pacer, 0, 0x0003);
-
- outw(ATAO_CFG2_CALLD_NOP, dev->iobase + ATAO_CFG2_REG);
-
- devpriv->cfg3 = 0;
- outw(devpriv->cfg3, dev->iobase + ATAO_CFG3_REG);
-
- inw(dev->iobase + ATAO_FIFO_CLEAR_REG);
-
- atao_select_reg_group(dev, 1);
- outw(0, dev->iobase + ATAO_2_INT1CLR_REG);
- outw(0, dev->iobase + ATAO_2_INT2CLR_REG);
- outw(0, dev->iobase + ATAO_2_DMATCCLR_REG);
- atao_select_reg_group(dev, 0);
-}
-
-static int atao_attach(struct comedi_device *dev, struct comedi_devconfig *it)
-{
- const struct atao_board *board = dev->board_ptr;
- struct atao_private *devpriv;
- struct comedi_subdevice *s;
- int ret;
-
- ret = comedi_request_region(dev, it->options[0], 0x20);
- if (ret)
- return ret;
-
- devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
- if (!devpriv)
- return -ENOMEM;
-
- dev->pacer = comedi_8254_init(dev->iobase + ATAO_82C53_BASE,
- 0, I8254_IO8, 0);
- if (!dev->pacer)
- return -ENOMEM;
-
- ret = comedi_alloc_subdevices(dev, 4);
- if (ret)
- return ret;
-
- /* Analog Output subdevice */
- s = &dev->subdevices[0];
- s->type = COMEDI_SUBD_AO;
- s->subdev_flags = SDF_WRITABLE;
- s->n_chan = board->n_ao_chans;
- s->maxdata = 0x0fff;
- s->range_table = it->options[3] ? &range_unipolar10 : &range_bipolar10;
- s->insn_write = atao_ao_insn_write;
-
- ret = comedi_alloc_subdev_readback(s);
- if (ret)
- return ret;
-
- /* Digital I/O subdevice */
- s = &dev->subdevices[1];
- s->type = COMEDI_SUBD_DIO;
- s->subdev_flags = SDF_READABLE | SDF_WRITABLE;
- s->n_chan = 8;
- s->maxdata = 1;
- s->range_table = &range_digital;
- s->insn_bits = atao_dio_insn_bits;
- s->insn_config = atao_dio_insn_config;
-
- /* caldac subdevice */
- s = &dev->subdevices[2];
- s->type = COMEDI_SUBD_CALIB;
- s->subdev_flags = SDF_WRITABLE | SDF_INTERNAL;
- s->n_chan = (board->n_ao_chans * 2) + 1;
- s->maxdata = 0xff;
- s->insn_write = atao_calib_insn_write;
-
- ret = comedi_alloc_subdev_readback(s);
- if (ret)
- return ret;
-
- /* EEPROM subdevice */
- s = &dev->subdevices[3];
- s->type = COMEDI_SUBD_UNUSED;
-
- atao_reset(dev);
-
- return 0;
-}
-
-static struct comedi_driver ni_at_ao_driver = {
- .driver_name = "ni_at_ao",
- .module = THIS_MODULE,
- .attach = atao_attach,
- .detach = comedi_legacy_detach,
- .board_name = &atao_boards[0].name,
- .offset = sizeof(struct atao_board),
- .num_names = ARRAY_SIZE(atao_boards),
-};
-module_comedi_driver(ni_at_ao_driver);
-
-MODULE_AUTHOR("Comedi http://www.comedi.org");
-MODULE_DESCRIPTION("Comedi driver for NI AT-AO-6/10 boards");
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/ni_atmio.c b/drivers/staging/comedi/drivers/ni_atmio.c
deleted file mode 100644
index 95435b81aa55..000000000000
--- a/drivers/staging/comedi/drivers/ni_atmio.c
+++ /dev/null
@@ -1,375 +0,0 @@
-/*
- comedi/drivers/ni_atmio.c
- Hardware driver for NI AT-MIO E series cards
-
- COMEDI - Linux Control and Measurement Device Interface
- Copyright (C) 1997-2001 David A. Schleef <ds@schleef.org>
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-*/
-/*
-Driver: ni_atmio
-Description: National Instruments AT-MIO-E series
-Author: ds
-Devices: [National Instruments] AT-MIO-16E-1 (ni_atmio),
- AT-MIO-16E-2, AT-MIO-16E-10, AT-MIO-16DE-10, AT-MIO-64E-3,
- AT-MIO-16XE-50, AT-MIO-16XE-10, AT-AI-16XE-10
-Status: works
-Updated: Thu May 1 20:03:02 CDT 2003
-
-The driver has 2.6 kernel isapnp support, and
-will automatically probe for a supported board if the
-I/O base is left unspecified with comedi_config.
-However, many of
-the isapnp id numbers are unknown. If your board is not
-recognized, please send the output of 'cat /proc/isapnp'
-(you may need to modprobe the isa-pnp module for
-/proc/isapnp to exist) so the
-id numbers for your board can be added to the driver.
-
-Otherwise, you can use the isapnptools package to configure
-your board. Use isapnp to
-configure the I/O base and IRQ for the board, and then pass
-the same values as
-parameters in comedi_config. A sample isapnp.conf file is included
-in the etc/ directory of Comedilib.
-
-Comedilib includes a utility to autocalibrate these boards. The
-boards seem to boot into a state where the all calibration DACs
-are at one extreme of their range, thus the default calibration
-is terrible. Calibration at boot is strongly encouraged.
-
-To use the extended digital I/O on some of the boards, enable the
-8255 driver when configuring the Comedi source tree.
-
-External triggering is supported for some events. The channel index
-(scan_begin_arg, etc.) maps to PFI0 - PFI9.
-
-Some of the more esoteric triggering possibilities of these boards
-are not supported.
-*/
-/*
- The real guts of the driver is in ni_mio_common.c, which is included
- both here and in ni_pcimio.c
-
- Interrupt support added by Truxton Fulton <trux@truxton.com>
-
- References for specifications:
-
- 340747b.pdf Register Level Programmer Manual (obsolete)
- 340747c.pdf Register Level Programmer Manual (new)
- DAQ-STC reference manual
-
- Other possibly relevant info:
-
- 320517c.pdf User manual (obsolete)
- 320517f.pdf User manual (new)
- 320889a.pdf delete
- 320906c.pdf maximum signal ratings
- 321066a.pdf about 16x
- 321791a.pdf discontinuation of at-mio-16e-10 rev. c
- 321808a.pdf about at-mio-16e-10 rev P
- 321837a.pdf discontinuation of at-mio-16de-10 rev d
- 321838a.pdf about at-mio-16de-10 rev N
-
- ISSUES:
-
- need to deal with external reference for DAC, and other DAC
- properties in board properties
-
- deal with at-mio-16de-10 revision D to N changes, etc.
-
-*/
-
-#include <linux/module.h>
-#include <linux/interrupt.h>
-#include "../comedidev.h"
-
-#include <linux/isapnp.h>
-
-#include "ni_stc.h"
-#include "8255.h"
-
-/*
- * AT specific setup
- */
-
-static const struct ni_board_struct ni_boards[] = {
- {
- .name = "at-mio-16e-1",
- .device_id = 44,
- .isapnp_id = 0x0000, /* XXX unknown */
- .n_adchan = 16,
- .ai_maxdata = 0x0fff,
- .ai_fifo_depth = 8192,
- .gainlkup = ai_gain_16,
- .ai_speed = 800,
- .n_aochan = 2,
- .ao_maxdata = 0x0fff,
- .ao_fifo_depth = 2048,
- .ao_range_table = &range_ni_E_ao_ext,
- .ao_speed = 1000,
- .caldac = { mb88341 },
- }, {
- .name = "at-mio-16e-2",
- .device_id = 25,
- .isapnp_id = 0x1900,
- .n_adchan = 16,
- .ai_maxdata = 0x0fff,
- .ai_fifo_depth = 2048,
- .gainlkup = ai_gain_16,
- .ai_speed = 2000,
- .n_aochan = 2,
- .ao_maxdata = 0x0fff,
- .ao_fifo_depth = 2048,
- .ao_range_table = &range_ni_E_ao_ext,
- .ao_speed = 1000,
- .caldac = { mb88341 },
- }, {
- .name = "at-mio-16e-10",
- .device_id = 36,
- .isapnp_id = 0x2400,
- .n_adchan = 16,
- .ai_maxdata = 0x0fff,
- .ai_fifo_depth = 512,
- .gainlkup = ai_gain_16,
- .ai_speed = 10000,
- .n_aochan = 2,
- .ao_maxdata = 0x0fff,
- .ao_range_table = &range_ni_E_ao_ext,
- .ao_speed = 10000,
- .caldac = { ad8804_debug },
- }, {
- .name = "at-mio-16de-10",
- .device_id = 37,
- .isapnp_id = 0x2500,
- .n_adchan = 16,
- .ai_maxdata = 0x0fff,
- .ai_fifo_depth = 512,
- .gainlkup = ai_gain_16,
- .ai_speed = 10000,
- .n_aochan = 2,
- .ao_maxdata = 0x0fff,
- .ao_range_table = &range_ni_E_ao_ext,
- .ao_speed = 10000,
- .caldac = { ad8804_debug },
- .has_8255 = 1,
- }, {
- .name = "at-mio-64e-3",
- .device_id = 38,
- .isapnp_id = 0x2600,
- .n_adchan = 64,
- .ai_maxdata = 0x0fff,
- .ai_fifo_depth = 2048,
- .gainlkup = ai_gain_16,
- .ai_speed = 2000,
- .n_aochan = 2,
- .ao_maxdata = 0x0fff,
- .ao_fifo_depth = 2048,
- .ao_range_table = &range_ni_E_ao_ext,
- .ao_speed = 1000,
- .caldac = { ad8804_debug },
- }, {
- .name = "at-mio-16xe-50",
- .device_id = 39,
- .isapnp_id = 0x2700,
- .n_adchan = 16,
- .ai_maxdata = 0xffff,
- .ai_fifo_depth = 512,
- .alwaysdither = 1,
- .gainlkup = ai_gain_8,
- .ai_speed = 50000,
- .n_aochan = 2,
- .ao_maxdata = 0x0fff,
- .ao_range_table = &range_bipolar10,
- .ao_speed = 50000,
- .caldac = { dac8800, dac8043 },
- }, {
- .name = "at-mio-16xe-10",
- .device_id = 50,
- .isapnp_id = 0x0000, /* XXX unknown */
- .n_adchan = 16,
- .ai_maxdata = 0xffff,
- .ai_fifo_depth = 512,
- .alwaysdither = 1,
- .gainlkup = ai_gain_14,
- .ai_speed = 10000,
- .n_aochan = 2,
- .ao_maxdata = 0xffff,
- .ao_fifo_depth = 2048,
- .ao_range_table = &range_ni_E_ao_ext,
- .ao_speed = 1000,
- .caldac = { dac8800, dac8043, ad8522 },
- }, {
- .name = "at-ai-16xe-10",
- .device_id = 51,
- .isapnp_id = 0x0000, /* XXX unknown */
- .n_adchan = 16,
- .ai_maxdata = 0xffff,
- .ai_fifo_depth = 512,
- .alwaysdither = 1, /* unknown */
- .gainlkup = ai_gain_14,
- .ai_speed = 10000,
- .caldac = { dac8800, dac8043, ad8522 },
- },
-};
-
-static const int ni_irqpin[] = {
- -1, -1, -1, 0, 1, 2, -1, 3, -1, -1, 4, 5, 6, -1, -1, 7
-};
-
-#include "ni_mio_common.c"
-
-static struct pnp_device_id device_ids[] = {
- {.id = "NIC1900", .driver_data = 0},
- {.id = "NIC2400", .driver_data = 0},
- {.id = "NIC2500", .driver_data = 0},
- {.id = "NIC2600", .driver_data = 0},
- {.id = "NIC2700", .driver_data = 0},
- {.id = ""}
-};
-
-MODULE_DEVICE_TABLE(pnp, device_ids);
-
-static int ni_isapnp_find_board(struct pnp_dev **dev)
-{
- struct pnp_dev *isapnp_dev = NULL;
- int i;
-
- for (i = 0; i < ARRAY_SIZE(ni_boards); i++) {
- isapnp_dev = pnp_find_dev(NULL,
- ISAPNP_VENDOR('N', 'I', 'C'),
- ISAPNP_FUNCTION(ni_boards[i].
- isapnp_id), NULL);
-
- if (!isapnp_dev || !isapnp_dev->card)
- continue;
-
- if (pnp_device_attach(isapnp_dev) < 0)
- continue;
-
- if (pnp_activate_dev(isapnp_dev) < 0) {
- pnp_device_detach(isapnp_dev);
- return -EAGAIN;
- }
-
- if (!pnp_port_valid(isapnp_dev, 0) ||
- !pnp_irq_valid(isapnp_dev, 0)) {
- pnp_device_detach(isapnp_dev);
- return -ENOMEM;
- }
- break;
- }
- if (i == ARRAY_SIZE(ni_boards))
- return -ENODEV;
- *dev = isapnp_dev;
- return 0;
-}
-
-static const struct ni_board_struct *ni_atmio_probe(struct comedi_device *dev)
-{
- int device_id = ni_read_eeprom(dev, 511);
- int i;
-
- for (i = 0; i < ARRAY_SIZE(ni_boards); i++) {
- const struct ni_board_struct *board = &ni_boards[i];
-
- if (board->device_id == device_id)
- return board;
- }
- if (device_id == 255)
- dev_err(dev->class_dev, "can't find board\n");
- else if (device_id == 0)
- dev_err(dev->class_dev,
- "EEPROM read error (?) or device not found\n");
- else
- dev_err(dev->class_dev,
- "unknown device ID %d -- contact author\n", device_id);
-
- return NULL;
-}
-
-static int ni_atmio_attach(struct comedi_device *dev,
- struct comedi_devconfig *it)
-{
- const struct ni_board_struct *board;
- struct pnp_dev *isapnp_dev;
- int ret;
- unsigned long iobase;
- unsigned int irq;
-
- ret = ni_alloc_private(dev);
- if (ret)
- return ret;
-
- iobase = it->options[0];
- irq = it->options[1];
- isapnp_dev = NULL;
- if (iobase == 0) {
- ret = ni_isapnp_find_board(&isapnp_dev);
- if (ret < 0)
- return ret;
-
- iobase = pnp_port_start(isapnp_dev, 0);
- irq = pnp_irq(isapnp_dev, 0);
- comedi_set_hw_dev(dev, &isapnp_dev->dev);
- }
-
- ret = comedi_request_region(dev, iobase, 0x20);
- if (ret)
- return ret;
-
- board = ni_atmio_probe(dev);
- if (!board)
- return -ENODEV;
- dev->board_ptr = board;
- dev->board_name = board->name;
-
- /* irq stuff */
-
- if (irq != 0) {
- if (irq > 15 || ni_irqpin[irq] == -1)
- return -EINVAL;
- ret = request_irq(irq, ni_E_interrupt, 0,
- dev->board_name, dev);
- if (ret < 0)
- return -EINVAL;
- dev->irq = irq;
- }
-
- /* generic E series stuff in ni_mio_common.c */
-
- ret = ni_E_init(dev, ni_irqpin[dev->irq], 0);
- if (ret < 0)
- return ret;
-
- return 0;
-}
-
-static void ni_atmio_detach(struct comedi_device *dev)
-{
- struct pnp_dev *isapnp_dev;
-
- mio_common_detach(dev);
- comedi_legacy_detach(dev);
-
- isapnp_dev = dev->hw_dev ? to_pnp_dev(dev->hw_dev) : NULL;
- if (isapnp_dev)
- pnp_device_detach(isapnp_dev);
-}
-
-static struct comedi_driver ni_atmio_driver = {
- .driver_name = "ni_atmio",
- .module = THIS_MODULE,
- .attach = ni_atmio_attach,
- .detach = ni_atmio_detach,
-};
-module_comedi_driver(ni_atmio_driver);
diff --git a/drivers/staging/comedi/drivers/ni_atmio16d.c b/drivers/staging/comedi/drivers/ni_atmio16d.c
deleted file mode 100644
index c3eb54622bc3..000000000000
--- a/drivers/staging/comedi/drivers/ni_atmio16d.c
+++ /dev/null
@@ -1,760 +0,0 @@
-/*
- comedi/drivers/ni_atmio16d.c
- Hardware driver for National Instruments AT-MIO16D board
- Copyright (C) 2000 Chris R. Baugher <baugher@enteract.com>
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
- */
-/*
-Driver: ni_atmio16d
-Description: National Instruments AT-MIO-16D
-Author: Chris R. Baugher <baugher@enteract.com>
-Status: unknown
-Devices: [National Instruments] AT-MIO-16 (atmio16), AT-MIO-16D (atmio16d)
-*/
-/*
- * I must give credit here to Michal Dobes <dobes@tesnet.cz> who
- * wrote the driver for Advantec's pcl812 boards. I used the interrupt
- * handling code from his driver as an example for this one.
- *
- * Chris Baugher
- * 5/1/2000
- *
- */
-
-#include <linux/module.h>
-#include <linux/interrupt.h>
-#include "../comedidev.h"
-
-#include "8255.h"
-
-/* Configuration and Status Registers */
-#define COM_REG_1 0x00 /* wo 16 */
-#define STAT_REG 0x00 /* ro 16 */
-#define COM_REG_2 0x02 /* wo 16 */
-/* Event Strobe Registers */
-#define START_CONVERT_REG 0x08 /* wo 16 */
-#define START_DAQ_REG 0x0A /* wo 16 */
-#define AD_CLEAR_REG 0x0C /* wo 16 */
-#define EXT_STROBE_REG 0x0E /* wo 16 */
-/* Analog Output Registers */
-#define DAC0_REG 0x10 /* wo 16 */
-#define DAC1_REG 0x12 /* wo 16 */
-#define INT2CLR_REG 0x14 /* wo 16 */
-/* Analog Input Registers */
-#define MUX_CNTR_REG 0x04 /* wo 16 */
-#define MUX_GAIN_REG 0x06 /* wo 16 */
-#define AD_FIFO_REG 0x16 /* ro 16 */
-#define DMA_TC_INT_CLR_REG 0x16 /* wo 16 */
-/* AM9513A Counter/Timer Registers */
-#define AM9513A_DATA_REG 0x18 /* rw 16 */
-#define AM9513A_COM_REG 0x1A /* wo 16 */
-#define AM9513A_STAT_REG 0x1A /* ro 16 */
-/* MIO-16 Digital I/O Registers */
-#define MIO_16_DIG_IN_REG 0x1C /* ro 16 */
-#define MIO_16_DIG_OUT_REG 0x1C /* wo 16 */
-/* RTSI Switch Registers */
-#define RTSI_SW_SHIFT_REG 0x1E /* wo 8 */
-#define RTSI_SW_STROBE_REG 0x1F /* wo 8 */
-/* DIO-24 Registers */
-#define DIO_24_PORTA_REG 0x00 /* rw 8 */
-#define DIO_24_PORTB_REG 0x01 /* rw 8 */
-#define DIO_24_PORTC_REG 0x02 /* rw 8 */
-#define DIO_24_CNFG_REG 0x03 /* wo 8 */
-
-/* Command Register bits */
-#define COMREG1_2SCADC 0x0001
-#define COMREG1_1632CNT 0x0002
-#define COMREG1_SCANEN 0x0008
-#define COMREG1_DAQEN 0x0010
-#define COMREG1_DMAEN 0x0020
-#define COMREG1_CONVINTEN 0x0080
-#define COMREG2_SCN2 0x0010
-#define COMREG2_INTEN 0x0080
-#define COMREG2_DOUTEN0 0x0100
-#define COMREG2_DOUTEN1 0x0200
-/* Status Register bits */
-#define STAT_AD_OVERRUN 0x0100
-#define STAT_AD_OVERFLOW 0x0200
-#define STAT_AD_DAQPROG 0x0800
-#define STAT_AD_CONVAVAIL 0x2000
-#define STAT_AD_DAQSTOPINT 0x4000
-/* AM9513A Counter/Timer defines */
-#define CLOCK_1_MHZ 0x8B25
-#define CLOCK_100_KHZ 0x8C25
-#define CLOCK_10_KHZ 0x8D25
-#define CLOCK_1_KHZ 0x8E25
-#define CLOCK_100_HZ 0x8F25
-
-struct atmio16_board_t {
- const char *name;
- int has_8255;
-};
-
-/* range structs */
-static const struct comedi_lrange range_atmio16d_ai_10_bipolar = {
- 4, {
- BIP_RANGE(10),
- BIP_RANGE(1),
- BIP_RANGE(0.1),
- BIP_RANGE(0.02)
- }
-};
-
-static const struct comedi_lrange range_atmio16d_ai_5_bipolar = {
- 4, {
- BIP_RANGE(5),
- BIP_RANGE(0.5),
- BIP_RANGE(0.05),
- BIP_RANGE(0.01)
- }
-};
-
-static const struct comedi_lrange range_atmio16d_ai_unipolar = {
- 4, {
- UNI_RANGE(10),
- UNI_RANGE(1),
- UNI_RANGE(0.1),
- UNI_RANGE(0.02)
- }
-};
-
-/* private data struct */
-struct atmio16d_private {
- enum { adc_diff, adc_singleended } adc_mux;
- enum { adc_bipolar10, adc_bipolar5, adc_unipolar10 } adc_range;
- enum { adc_2comp, adc_straight } adc_coding;
- enum { dac_bipolar, dac_unipolar } dac0_range, dac1_range;
- enum { dac_internal, dac_external } dac0_reference, dac1_reference;
- enum { dac_2comp, dac_straight } dac0_coding, dac1_coding;
- const struct comedi_lrange *ao_range_type_list[2];
- unsigned int com_reg_1_state; /* current state of command register 1 */
- unsigned int com_reg_2_state; /* current state of command register 2 */
-};
-
-static void reset_counters(struct comedi_device *dev)
-{
- /* Counter 2 */
- outw(0xFFC2, dev->iobase + AM9513A_COM_REG);
- outw(0xFF02, dev->iobase + AM9513A_COM_REG);
- outw(0x4, dev->iobase + AM9513A_DATA_REG);
- outw(0xFF0A, dev->iobase + AM9513A_COM_REG);
- outw(0x3, dev->iobase + AM9513A_DATA_REG);
- outw(0xFF42, dev->iobase + AM9513A_COM_REG);
- outw(0xFF42, dev->iobase + AM9513A_COM_REG);
- /* Counter 3 */
- outw(0xFFC4, dev->iobase + AM9513A_COM_REG);
- outw(0xFF03, dev->iobase + AM9513A_COM_REG);
- outw(0x4, dev->iobase + AM9513A_DATA_REG);
- outw(0xFF0B, dev->iobase + AM9513A_COM_REG);
- outw(0x3, dev->iobase + AM9513A_DATA_REG);
- outw(0xFF44, dev->iobase + AM9513A_COM_REG);
- outw(0xFF44, dev->iobase + AM9513A_COM_REG);
- /* Counter 4 */
- outw(0xFFC8, dev->iobase + AM9513A_COM_REG);
- outw(0xFF04, dev->iobase + AM9513A_COM_REG);
- outw(0x4, dev->iobase + AM9513A_DATA_REG);
- outw(0xFF0C, dev->iobase + AM9513A_COM_REG);
- outw(0x3, dev->iobase + AM9513A_DATA_REG);
- outw(0xFF48, dev->iobase + AM9513A_COM_REG);
- outw(0xFF48, dev->iobase + AM9513A_COM_REG);
- /* Counter 5 */
- outw(0xFFD0, dev->iobase + AM9513A_COM_REG);
- outw(0xFF05, dev->iobase + AM9513A_COM_REG);
- outw(0x4, dev->iobase + AM9513A_DATA_REG);
- outw(0xFF0D, dev->iobase + AM9513A_COM_REG);
- outw(0x3, dev->iobase + AM9513A_DATA_REG);
- outw(0xFF50, dev->iobase + AM9513A_COM_REG);
- outw(0xFF50, dev->iobase + AM9513A_COM_REG);
-
- outw(0, dev->iobase + AD_CLEAR_REG);
-}
-
-static void reset_atmio16d(struct comedi_device *dev)
-{
- struct atmio16d_private *devpriv = dev->private;
- int i;
-
- /* now we need to initialize the board */
- outw(0, dev->iobase + COM_REG_1);
- outw(0, dev->iobase + COM_REG_2);
- outw(0, dev->iobase + MUX_GAIN_REG);
- /* init AM9513A timer */
- outw(0xFFFF, dev->iobase + AM9513A_COM_REG);
- outw(0xFFEF, dev->iobase + AM9513A_COM_REG);
- outw(0xFF17, dev->iobase + AM9513A_COM_REG);
- outw(0xF000, dev->iobase + AM9513A_DATA_REG);
- for (i = 1; i <= 5; ++i) {
- outw(0xFF00 + i, dev->iobase + AM9513A_COM_REG);
- outw(0x0004, dev->iobase + AM9513A_DATA_REG);
- outw(0xFF08 + i, dev->iobase + AM9513A_COM_REG);
- outw(0x3, dev->iobase + AM9513A_DATA_REG);
- }
- outw(0xFF5F, dev->iobase + AM9513A_COM_REG);
- /* timer init done */
- outw(0, dev->iobase + AD_CLEAR_REG);
- outw(0, dev->iobase + INT2CLR_REG);
- /* select straight binary mode for Analog Input */
- devpriv->com_reg_1_state |= 1;
- outw(devpriv->com_reg_1_state, dev->iobase + COM_REG_1);
- devpriv->adc_coding = adc_straight;
- /* zero the analog outputs */
- outw(2048, dev->iobase + DAC0_REG);
- outw(2048, dev->iobase + DAC1_REG);
-}
-
-static irqreturn_t atmio16d_interrupt(int irq, void *d)
-{
- struct comedi_device *dev = d;
- struct comedi_subdevice *s = dev->read_subdev;
- unsigned short val;
-
- val = inw(dev->iobase + AD_FIFO_REG);
- comedi_buf_write_samples(s, &val, 1);
- comedi_handle_events(dev, s);
-
- return IRQ_HANDLED;
-}
-
-static int atmio16d_ai_cmdtest(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_cmd *cmd)
-{
- int err = 0;
-
- /* Step 1 : check if triggers are trivially valid */
-
- err |= comedi_check_trigger_src(&cmd->start_src, TRIG_NOW);
- err |= comedi_check_trigger_src(&cmd->scan_begin_src,
- TRIG_FOLLOW | TRIG_TIMER);
- err |= comedi_check_trigger_src(&cmd->convert_src, TRIG_TIMER);
- err |= comedi_check_trigger_src(&cmd->scan_end_src, TRIG_COUNT);
- err |= comedi_check_trigger_src(&cmd->stop_src, TRIG_COUNT | TRIG_NONE);
-
- if (err)
- return 1;
-
- /* Step 2a : make sure trigger sources are unique */
-
- err |= comedi_check_trigger_is_unique(cmd->scan_begin_src);
- err |= comedi_check_trigger_is_unique(cmd->stop_src);
-
- /* Step 2b : and mutually compatible */
-
- if (err)
- return 2;
-
- /* Step 3: check if arguments are trivially valid */
-
- err |= comedi_check_trigger_arg_is(&cmd->start_arg, 0);
-
- if (cmd->scan_begin_src == TRIG_FOLLOW) {
- /* internal trigger */
- err |= comedi_check_trigger_arg_is(&cmd->scan_begin_arg, 0);
- } else {
-#if 0
- /* external trigger */
- /* should be level/edge, hi/lo specification here */
- err |= comedi_check_trigger_arg_is(&cmd->scan_begin_arg, 0);
-#endif
- }
-
- err |= comedi_check_trigger_arg_min(&cmd->convert_arg, 10000);
-#if 0
- err |= comedi_check_trigger_arg_max(&cmd->convert_arg, SLOWEST_TIMER);
-#endif
-
- err |= comedi_check_trigger_arg_is(&cmd->scan_end_arg,
- cmd->chanlist_len);
-
- if (cmd->stop_src == TRIG_COUNT)
- err |= comedi_check_trigger_arg_min(&cmd->stop_arg, 1);
- else /* TRIG_NONE */
- err |= comedi_check_trigger_arg_is(&cmd->stop_arg, 0);
-
- if (err)
- return 3;
-
- return 0;
-}
-
-static int atmio16d_ai_cmd(struct comedi_device *dev,
- struct comedi_subdevice *s)
-{
- struct atmio16d_private *devpriv = dev->private;
- struct comedi_cmd *cmd = &s->async->cmd;
- unsigned int timer, base_clock;
- unsigned int sample_count, tmp, chan, gain;
- int i;
-
- /* This is slowly becoming a working command interface. *
- * It is still uber-experimental */
-
- reset_counters(dev);
-
- /* check if scanning multiple channels */
- if (cmd->chanlist_len < 2) {
- devpriv->com_reg_1_state &= ~COMREG1_SCANEN;
- outw(devpriv->com_reg_1_state, dev->iobase + COM_REG_1);
- } else {
- devpriv->com_reg_1_state |= COMREG1_SCANEN;
- devpriv->com_reg_2_state |= COMREG2_SCN2;
- outw(devpriv->com_reg_1_state, dev->iobase + COM_REG_1);
- outw(devpriv->com_reg_2_state, dev->iobase + COM_REG_2);
- }
-
- /* Setup the Mux-Gain Counter */
- for (i = 0; i < cmd->chanlist_len; ++i) {
- chan = CR_CHAN(cmd->chanlist[i]);
- gain = CR_RANGE(cmd->chanlist[i]);
- outw(i, dev->iobase + MUX_CNTR_REG);
- tmp = chan | (gain << 6);
- if (i == cmd->scan_end_arg - 1)
- tmp |= 0x0010; /* set LASTONE bit */
- outw(tmp, dev->iobase + MUX_GAIN_REG);
- }
-
- /* Now program the sample interval timer */
- /* Figure out which clock to use then get an
- * appropriate timer value */
- if (cmd->convert_arg < 65536000) {
- base_clock = CLOCK_1_MHZ;
- timer = cmd->convert_arg / 1000;
- } else if (cmd->convert_arg < 655360000) {
- base_clock = CLOCK_100_KHZ;
- timer = cmd->convert_arg / 10000;
- } else /* cmd->convert_arg < 6553600000 */ {
- base_clock = CLOCK_10_KHZ;
- timer = cmd->convert_arg / 100000;
- }
- outw(0xFF03, dev->iobase + AM9513A_COM_REG);
- outw(base_clock, dev->iobase + AM9513A_DATA_REG);
- outw(0xFF0B, dev->iobase + AM9513A_COM_REG);
- outw(0x2, dev->iobase + AM9513A_DATA_REG);
- outw(0xFF44, dev->iobase + AM9513A_COM_REG);
- outw(0xFFF3, dev->iobase + AM9513A_COM_REG);
- outw(timer, dev->iobase + AM9513A_DATA_REG);
- outw(0xFF24, dev->iobase + AM9513A_COM_REG);
-
- /* Now figure out how many samples to get */
- /* and program the sample counter */
- sample_count = cmd->stop_arg * cmd->scan_end_arg;
- outw(0xFF04, dev->iobase + AM9513A_COM_REG);
- outw(0x1025, dev->iobase + AM9513A_DATA_REG);
- outw(0xFF0C, dev->iobase + AM9513A_COM_REG);
- if (sample_count < 65536) {
- /* use only Counter 4 */
- outw(sample_count, dev->iobase + AM9513A_DATA_REG);
- outw(0xFF48, dev->iobase + AM9513A_COM_REG);
- outw(0xFFF4, dev->iobase + AM9513A_COM_REG);
- outw(0xFF28, dev->iobase + AM9513A_COM_REG);
- devpriv->com_reg_1_state &= ~COMREG1_1632CNT;
- outw(devpriv->com_reg_1_state, dev->iobase + COM_REG_1);
- } else {
- /* Counter 4 and 5 are needed */
-
- tmp = sample_count & 0xFFFF;
- if (tmp)
- outw(tmp - 1, dev->iobase + AM9513A_DATA_REG);
- else
- outw(0xFFFF, dev->iobase + AM9513A_DATA_REG);
-
- outw(0xFF48, dev->iobase + AM9513A_COM_REG);
- outw(0, dev->iobase + AM9513A_DATA_REG);
- outw(0xFF28, dev->iobase + AM9513A_COM_REG);
- outw(0xFF05, dev->iobase + AM9513A_COM_REG);
- outw(0x25, dev->iobase + AM9513A_DATA_REG);
- outw(0xFF0D, dev->iobase + AM9513A_COM_REG);
- tmp = sample_count & 0xFFFF;
- if ((tmp == 0) || (tmp == 1)) {
- outw((sample_count >> 16) & 0xFFFF,
- dev->iobase + AM9513A_DATA_REG);
- } else {
- outw(((sample_count >> 16) & 0xFFFF) + 1,
- dev->iobase + AM9513A_DATA_REG);
- }
- outw(0xFF70, dev->iobase + AM9513A_COM_REG);
- devpriv->com_reg_1_state |= COMREG1_1632CNT;
- outw(devpriv->com_reg_1_state, dev->iobase + COM_REG_1);
- }
-
- /* Program the scan interval timer ONLY IF SCANNING IS ENABLED */
- /* Figure out which clock to use then get an
- * appropriate timer value */
- if (cmd->chanlist_len > 1) {
- if (cmd->scan_begin_arg < 65536000) {
- base_clock = CLOCK_1_MHZ;
- timer = cmd->scan_begin_arg / 1000;
- } else if (cmd->scan_begin_arg < 655360000) {
- base_clock = CLOCK_100_KHZ;
- timer = cmd->scan_begin_arg / 10000;
- } else /* cmd->scan_begin_arg < 6553600000 */ {
- base_clock = CLOCK_10_KHZ;
- timer = cmd->scan_begin_arg / 100000;
- }
- outw(0xFF02, dev->iobase + AM9513A_COM_REG);
- outw(base_clock, dev->iobase + AM9513A_DATA_REG);
- outw(0xFF0A, dev->iobase + AM9513A_COM_REG);
- outw(0x2, dev->iobase + AM9513A_DATA_REG);
- outw(0xFF42, dev->iobase + AM9513A_COM_REG);
- outw(0xFFF2, dev->iobase + AM9513A_COM_REG);
- outw(timer, dev->iobase + AM9513A_DATA_REG);
- outw(0xFF22, dev->iobase + AM9513A_COM_REG);
- }
-
- /* Clear the A/D FIFO and reset the MUX counter */
- outw(0, dev->iobase + AD_CLEAR_REG);
- outw(0, dev->iobase + MUX_CNTR_REG);
- outw(0, dev->iobase + INT2CLR_REG);
- /* enable this acquisition operation */
- devpriv->com_reg_1_state |= COMREG1_DAQEN;
- outw(devpriv->com_reg_1_state, dev->iobase + COM_REG_1);
- /* enable interrupts for conversion completion */
- devpriv->com_reg_1_state |= COMREG1_CONVINTEN;
- devpriv->com_reg_2_state |= COMREG2_INTEN;
- outw(devpriv->com_reg_1_state, dev->iobase + COM_REG_1);
- outw(devpriv->com_reg_2_state, dev->iobase + COM_REG_2);
- /* apply a trigger. this starts the counters! */
- outw(0, dev->iobase + START_DAQ_REG);
-
- return 0;
-}
-
-/* This will cancel a running acquisition operation */
-static int atmio16d_ai_cancel(struct comedi_device *dev,
- struct comedi_subdevice *s)
-{
- reset_atmio16d(dev);
-
- return 0;
-}
-
-static int atmio16d_ai_eoc(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned long context)
-{
- unsigned int status;
-
- status = inw(dev->iobase + STAT_REG);
- if (status & STAT_AD_CONVAVAIL)
- return 0;
- if (status & STAT_AD_OVERFLOW) {
- outw(0, dev->iobase + AD_CLEAR_REG);
- return -EOVERFLOW;
- }
- return -EBUSY;
-}
-
-static int atmio16d_ai_insn_read(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
-{
- struct atmio16d_private *devpriv = dev->private;
- int i;
- int chan;
- int gain;
- int ret;
-
- chan = CR_CHAN(insn->chanspec);
- gain = CR_RANGE(insn->chanspec);
-
- /* reset the Analog input circuitry */
- /* outw( 0, dev->iobase+AD_CLEAR_REG ); */
- /* reset the Analog Input MUX Counter to 0 */
- /* outw( 0, dev->iobase+MUX_CNTR_REG ); */
-
- /* set the Input MUX gain */
- outw(chan | (gain << 6), dev->iobase + MUX_GAIN_REG);
-
- for (i = 0; i < insn->n; i++) {
- /* start the conversion */
- outw(0, dev->iobase + START_CONVERT_REG);
-
- /* wait for it to finish */
- ret = comedi_timeout(dev, s, insn, atmio16d_ai_eoc, 0);
- if (ret)
- return ret;
-
- /* read the data now */
- data[i] = inw(dev->iobase + AD_FIFO_REG);
- /* change to two's complement if need be */
- if (devpriv->adc_coding == adc_2comp)
- data[i] ^= 0x800;
- }
-
- return i;
-}
-
-static int atmio16d_ao_insn_write(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct atmio16d_private *devpriv = dev->private;
- unsigned int chan = CR_CHAN(insn->chanspec);
- unsigned int reg = (chan) ? DAC1_REG : DAC0_REG;
- bool munge = false;
- int i;
-
- if (chan == 0 && devpriv->dac0_coding == dac_2comp)
- munge = true;
- if (chan == 1 && devpriv->dac1_coding == dac_2comp)
- munge = true;
-
- for (i = 0; i < insn->n; i++) {
- unsigned int val = data[i];
-
- s->readback[chan] = val;
-
- if (munge)
- val ^= 0x800;
-
- outw(val, dev->iobase + reg);
- }
-
- return insn->n;
-}
-
-static int atmio16d_dio_insn_bits(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- if (comedi_dio_update_state(s, data))
- outw(s->state, dev->iobase + MIO_16_DIG_OUT_REG);
-
- data[1] = inw(dev->iobase + MIO_16_DIG_IN_REG);
-
- return insn->n;
-}
-
-static int atmio16d_dio_insn_config(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct atmio16d_private *devpriv = dev->private;
- unsigned int chan = CR_CHAN(insn->chanspec);
- unsigned int mask;
- int ret;
-
- if (chan < 4)
- mask = 0x0f;
- else
- mask = 0xf0;
-
- ret = comedi_dio_insn_config(dev, s, insn, data, mask);
- if (ret)
- return ret;
-
- devpriv->com_reg_2_state &= ~(COMREG2_DOUTEN0 | COMREG2_DOUTEN1);
- if (s->io_bits & 0x0f)
- devpriv->com_reg_2_state |= COMREG2_DOUTEN0;
- if (s->io_bits & 0xf0)
- devpriv->com_reg_2_state |= COMREG2_DOUTEN1;
- outw(devpriv->com_reg_2_state, dev->iobase + COM_REG_2);
-
- return insn->n;
-}
-
-/*
- options[0] - I/O port
- options[1] - MIO irq
- 0 == no irq
- N == irq N {3,4,5,6,7,9,10,11,12,14,15}
- options[2] - DIO irq
- 0 == no irq
- N == irq N {3,4,5,6,7,9}
- options[3] - DMA1 channel
- 0 == no DMA
- N == DMA N {5,6,7}
- options[4] - DMA2 channel
- 0 == no DMA
- N == DMA N {5,6,7}
-
- options[5] - a/d mux
- 0=differential, 1=single
- options[6] - a/d range
- 0=bipolar10, 1=bipolar5, 2=unipolar10
-
- options[7] - dac0 range
- 0=bipolar, 1=unipolar
- options[8] - dac0 reference
- 0=internal, 1=external
- options[9] - dac0 coding
- 0=2's comp, 1=straight binary
-
- options[10] - dac1 range
- options[11] - dac1 reference
- options[12] - dac1 coding
- */
-
-static int atmio16d_attach(struct comedi_device *dev,
- struct comedi_devconfig *it)
-{
- const struct atmio16_board_t *board = dev->board_ptr;
- struct atmio16d_private *devpriv;
- struct comedi_subdevice *s;
- int ret;
-
- ret = comedi_request_region(dev, it->options[0], 0x20);
- if (ret)
- return ret;
-
- ret = comedi_alloc_subdevices(dev, 4);
- if (ret)
- return ret;
-
- devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
- if (!devpriv)
- return -ENOMEM;
-
- /* reset the atmio16d hardware */
- reset_atmio16d(dev);
-
- if (it->options[1]) {
- ret = request_irq(it->options[1], atmio16d_interrupt, 0,
- dev->board_name, dev);
- if (ret == 0)
- dev->irq = it->options[1];
- }
-
- /* set device options */
- devpriv->adc_mux = it->options[5];
- devpriv->adc_range = it->options[6];
-
- devpriv->dac0_range = it->options[7];
- devpriv->dac0_reference = it->options[8];
- devpriv->dac0_coding = it->options[9];
- devpriv->dac1_range = it->options[10];
- devpriv->dac1_reference = it->options[11];
- devpriv->dac1_coding = it->options[12];
-
- /* setup sub-devices */
- s = &dev->subdevices[0];
- /* ai subdevice */
- s->type = COMEDI_SUBD_AI;
- s->subdev_flags = SDF_READABLE | SDF_GROUND;
- s->n_chan = (devpriv->adc_mux ? 16 : 8);
- s->insn_read = atmio16d_ai_insn_read;
- s->maxdata = 0xfff; /* 4095 decimal */
- switch (devpriv->adc_range) {
- case adc_bipolar10:
- s->range_table = &range_atmio16d_ai_10_bipolar;
- break;
- case adc_bipolar5:
- s->range_table = &range_atmio16d_ai_5_bipolar;
- break;
- case adc_unipolar10:
- s->range_table = &range_atmio16d_ai_unipolar;
- break;
- }
- if (dev->irq) {
- dev->read_subdev = s;
- s->subdev_flags |= SDF_CMD_READ;
- s->len_chanlist = 16;
- s->do_cmdtest = atmio16d_ai_cmdtest;
- s->do_cmd = atmio16d_ai_cmd;
- s->cancel = atmio16d_ai_cancel;
- }
-
- /* ao subdevice */
- s = &dev->subdevices[1];
- s->type = COMEDI_SUBD_AO;
- s->subdev_flags = SDF_WRITABLE;
- s->n_chan = 2;
- s->maxdata = 0xfff; /* 4095 decimal */
- s->range_table_list = devpriv->ao_range_type_list;
- switch (devpriv->dac0_range) {
- case dac_bipolar:
- devpriv->ao_range_type_list[0] = &range_bipolar10;
- break;
- case dac_unipolar:
- devpriv->ao_range_type_list[0] = &range_unipolar10;
- break;
- }
- switch (devpriv->dac1_range) {
- case dac_bipolar:
- devpriv->ao_range_type_list[1] = &range_bipolar10;
- break;
- case dac_unipolar:
- devpriv->ao_range_type_list[1] = &range_unipolar10;
- break;
- }
- s->insn_write = atmio16d_ao_insn_write;
-
- ret = comedi_alloc_subdev_readback(s);
- if (ret)
- return ret;
-
- /* Digital I/O */
- s = &dev->subdevices[2];
- s->type = COMEDI_SUBD_DIO;
- s->subdev_flags = SDF_WRITABLE | SDF_READABLE;
- s->n_chan = 8;
- s->insn_bits = atmio16d_dio_insn_bits;
- s->insn_config = atmio16d_dio_insn_config;
- s->maxdata = 1;
- s->range_table = &range_digital;
-
- /* 8255 subdevice */
- s = &dev->subdevices[3];
- if (board->has_8255) {
- ret = subdev_8255_init(dev, s, NULL, 0x00);
- if (ret)
- return ret;
- } else {
- s->type = COMEDI_SUBD_UNUSED;
- }
-
-/* don't yet know how to deal with counter/timers */
-#if 0
- s = &dev->subdevices[4];
- /* do */
- s->type = COMEDI_SUBD_TIMER;
- s->n_chan = 0;
- s->maxdata = 0
-#endif
-
- return 0;
-}
-
-static void atmio16d_detach(struct comedi_device *dev)
-{
- reset_atmio16d(dev);
- comedi_legacy_detach(dev);
-}
-
-static const struct atmio16_board_t atmio16_boards[] = {
- {
- .name = "atmio16",
- .has_8255 = 0,
- }, {
- .name = "atmio16d",
- .has_8255 = 1,
- },
-};
-
-static struct comedi_driver atmio16d_driver = {
- .driver_name = "atmio16",
- .module = THIS_MODULE,
- .attach = atmio16d_attach,
- .detach = atmio16d_detach,
- .board_name = &atmio16_boards[0].name,
- .num_names = ARRAY_SIZE(atmio16_boards),
- .offset = sizeof(struct atmio16_board_t),
-};
-module_comedi_driver(atmio16d_driver);
-
-MODULE_AUTHOR("Comedi http://www.comedi.org");
-MODULE_DESCRIPTION("Comedi low-level driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/ni_daq_700.c b/drivers/staging/comedi/drivers/ni_daq_700.c
deleted file mode 100644
index 8f6396edd21c..000000000000
--- a/drivers/staging/comedi/drivers/ni_daq_700.c
+++ /dev/null
@@ -1,289 +0,0 @@
-/*
- * comedi/drivers/ni_daq_700.c
- * Driver for DAQCard-700 DIO/AI
- * copied from 8255
- *
- * COMEDI - Linux Control and Measurement Device Interface
- * Copyright (C) 1998 David A. Schleef <ds@schleef.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-/*
- * Driver: ni_daq_700
- * Description: National Instruments PCMCIA DAQCard-700
- * Author: Fred Brooks <nsaspook@nsaspook.com>,
- * based on ni_daq_dio24 by Daniel Vecino Castel <dvecino@able.es>
- * Devices: [National Instruments] PCMCIA DAQ-Card-700 (ni_daq_700)
- * Status: works
- * Updated: Wed, 21 May 2014 12:07:20 +0000
- *
- * The daqcard-700 appears in Comedi as a digital I/O subdevice (0) with
- * 16 channels and a analog input subdevice (1) with 16 single-ended channels
- * or 8 differential channels, and three input ranges.
- *
- * Digital: The channel 0 corresponds to the daqcard-700's output
- * port, bit 0; channel 8 corresponds to the input port, bit 0.
- *
- * Digital direction configuration: channels 0-7 output, 8-15 input.
- *
- * Analog: The input range is 0 to 4095 with a default of -10 to +10 volts.
- * Valid ranges:
- * 0 for -10 to 10V bipolar
- * 1 for -5 to 5V bipolar
- * 2 for -2.5 to 2.5V bipolar
- *
- * IRQ is assigned but not used.
- *
- * Manuals: Register level: http://www.ni.com/pdf/manuals/340698.pdf
- * User Manual: http://www.ni.com/pdf/manuals/320676d.pdf
- */
-
-#include <linux/module.h>
-#include <linux/delay.h>
-#include <linux/interrupt.h>
-
-#include "../comedi_pcmcia.h"
-
-/* daqcard700 registers */
-#define DIO_W 0x04 /* WO 8bit */
-#define DIO_R 0x05 /* RO 8bit */
-#define CMD_R1 0x00 /* WO 8bit */
-#define CMD_R2 0x07 /* RW 8bit */
-#define CMD_R3 0x05 /* W0 8bit */
-#define STA_R1 0x00 /* RO 8bit */
-#define STA_R2 0x01 /* RO 8bit */
-#define ADFIFO_R 0x02 /* RO 16bit */
-#define ADCLEAR_R 0x01 /* WO 8bit */
-#define CDA_R0 0x08 /* RW 8bit */
-#define CDA_R1 0x09 /* RW 8bit */
-#define CDA_R2 0x0A /* RW 8bit */
-#define CMO_R 0x0B /* RO 8bit */
-#define TIC_R 0x06 /* WO 8bit */
-/* daqcard700 modes */
-#define CMD_R3_DIFF 0x04 /* diff mode */
-
-static const struct comedi_lrange range_daq700_ai = {
- 3,
- {
- BIP_RANGE(10),
- BIP_RANGE(5),
- BIP_RANGE(2.5)
- }
-};
-
-static int daq700_dio_insn_bits(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- unsigned int mask;
- unsigned int val;
-
- mask = comedi_dio_update_state(s, data);
- if (mask) {
- if (mask & 0xff)
- outb(s->state & 0xff, dev->iobase + DIO_W);
- }
-
- val = s->state & 0xff;
- val |= inb(dev->iobase + DIO_R) << 8;
-
- data[1] = val;
-
- return insn->n;
-}
-
-static int daq700_dio_insn_config(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- int ret;
-
- ret = comedi_dio_insn_config(dev, s, insn, data, 0);
- if (ret)
- return ret;
-
- /* The DIO channels are not configurable, fix the io_bits */
- s->io_bits = 0x00ff;
-
- return insn->n;
-}
-
-static int daq700_ai_eoc(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned long context)
-{
- unsigned int status;
-
- status = inb(dev->iobase + STA_R2);
- if ((status & 0x03))
- return -EOVERFLOW;
- status = inb(dev->iobase + STA_R1);
- if ((status & 0x02))
- return -ENODATA;
- if ((status & 0x11) == 0x01)
- return 0;
- return -EBUSY;
-}
-
-static int daq700_ai_rinsn(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
-{
- int n;
- int d;
- int ret;
- unsigned int chan = CR_CHAN(insn->chanspec);
- unsigned int aref = CR_AREF(insn->chanspec);
- unsigned int range = CR_RANGE(insn->chanspec);
- unsigned int r3_bits = 0;
-
- /* set channel input modes */
- if (aref == AREF_DIFF)
- r3_bits |= CMD_R3_DIFF;
- /* write channel mode/range */
- if (range >= 1)
- range++; /* convert range to hardware value */
- outb(r3_bits | (range & 0x03), dev->iobase + CMD_R3);
-
- /* write channel to multiplexer */
- /* set mask scan bit high to disable scanning */
- outb(chan | 0x80, dev->iobase + CMD_R1);
- /* mux needs 2us to really settle [Fred Brooks]. */
- udelay(2);
-
- /* convert n samples */
- for (n = 0; n < insn->n; n++) {
- /* trigger conversion with out0 L to H */
- outb(0x00, dev->iobase + CMD_R2); /* enable ADC conversions */
- outb(0x30, dev->iobase + CMO_R); /* mode 0 out0 L, from H */
- outb(0x00, dev->iobase + ADCLEAR_R); /* clear the ADC FIFO */
- /* read 16bit junk from FIFO to clear */
- inw(dev->iobase + ADFIFO_R);
- /* mode 1 out0 H, L to H, start conversion */
- outb(0x32, dev->iobase + CMO_R);
-
- /* wait for conversion to end */
- ret = comedi_timeout(dev, s, insn, daq700_ai_eoc, 0);
- if (ret)
- return ret;
-
- /* read data */
- d = inw(dev->iobase + ADFIFO_R);
- /* mangle the data as necessary */
- /* Bipolar Offset Binary: 0 to 4095 for -10 to +10 */
- d &= 0x0fff;
- d ^= 0x0800;
- data[n] = d;
- }
- return n;
-}
-
-/*
- * Data acquisition is enabled.
- * The counter 0 output is high.
- * The I/O connector pin CLK1 drives counter 1 source.
- * Multiple-channel scanning is disabled.
- * All interrupts are disabled.
- * The analog input range is set to +-10 V
- * The analog input mode is single-ended.
- * The analog input circuitry is initialized to channel 0.
- * The A/D FIFO is cleared.
- */
-static void daq700_ai_config(struct comedi_device *dev,
- struct comedi_subdevice *s)
-{
- unsigned long iobase = dev->iobase;
-
- outb(0x80, iobase + CMD_R1); /* disable scanning, ADC to chan 0 */
- outb(0x00, iobase + CMD_R2); /* clear all bits */
- outb(0x00, iobase + CMD_R3); /* set +-10 range */
- outb(0x32, iobase + CMO_R); /* config counter mode1, out0 to H */
- outb(0x00, iobase + TIC_R); /* clear counter interrupt */
- outb(0x00, iobase + ADCLEAR_R); /* clear the ADC FIFO */
- inw(iobase + ADFIFO_R); /* read 16bit junk from FIFO to clear */
-}
-
-static int daq700_auto_attach(struct comedi_device *dev,
- unsigned long context)
-{
- struct pcmcia_device *link = comedi_to_pcmcia_dev(dev);
- struct comedi_subdevice *s;
- int ret;
-
- link->config_flags |= CONF_AUTO_SET_IO;
- ret = comedi_pcmcia_enable(dev, NULL);
- if (ret)
- return ret;
- dev->iobase = link->resource[0]->start;
-
- ret = comedi_alloc_subdevices(dev, 2);
- if (ret)
- return ret;
-
- /* DAQCard-700 dio */
- s = &dev->subdevices[0];
- s->type = COMEDI_SUBD_DIO;
- s->subdev_flags = SDF_READABLE | SDF_WRITABLE;
- s->n_chan = 16;
- s->range_table = &range_digital;
- s->maxdata = 1;
- s->insn_bits = daq700_dio_insn_bits;
- s->insn_config = daq700_dio_insn_config;
- s->io_bits = 0x00ff;
-
- /* DAQCard-700 ai */
- s = &dev->subdevices[1];
- s->type = COMEDI_SUBD_AI;
- s->subdev_flags = SDF_READABLE | SDF_GROUND | SDF_DIFF;
- s->n_chan = 16;
- s->maxdata = (1 << 12) - 1;
- s->range_table = &range_daq700_ai;
- s->insn_read = daq700_ai_rinsn;
- daq700_ai_config(dev, s);
-
- return 0;
-}
-
-static struct comedi_driver daq700_driver = {
- .driver_name = "ni_daq_700",
- .module = THIS_MODULE,
- .auto_attach = daq700_auto_attach,
- .detach = comedi_pcmcia_disable,
-};
-
-static int daq700_cs_attach(struct pcmcia_device *link)
-{
- return comedi_pcmcia_auto_config(link, &daq700_driver);
-}
-
-static const struct pcmcia_device_id daq700_cs_ids[] = {
- PCMCIA_DEVICE_MANF_CARD(0x010b, 0x4743),
- PCMCIA_DEVICE_NULL
-};
-MODULE_DEVICE_TABLE(pcmcia, daq700_cs_ids);
-
-static struct pcmcia_driver daq700_cs_driver = {
- .name = "ni_daq_700",
- .owner = THIS_MODULE,
- .id_table = daq700_cs_ids,
- .probe = daq700_cs_attach,
- .remove = comedi_pcmcia_auto_unconfig,
-};
-module_comedi_pcmcia_driver(daq700_driver, daq700_cs_driver);
-
-MODULE_AUTHOR("Fred Brooks <nsaspook@nsaspook.com>");
-MODULE_DESCRIPTION(
- "Comedi driver for National Instruments PCMCIA DAQCard-700 DIO/AI");
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/ni_daq_dio24.c b/drivers/staging/comedi/drivers/ni_daq_dio24.c
deleted file mode 100644
index d9de83ab0267..000000000000
--- a/drivers/staging/comedi/drivers/ni_daq_dio24.c
+++ /dev/null
@@ -1,91 +0,0 @@
-/*
- comedi/drivers/ni_daq_dio24.c
- Driver for National Instruments PCMCIA DAQ-Card DIO-24
- Copyright (C) 2002 Daniel Vecino Castel <dvecino@able.es>
-
- PCMCIA crap at end of file is adapted from dummy_cs.c 1.31
- 2001/08/24 12:13:13 from the pcmcia package.
- The initial developer of the pcmcia dummy_cs.c code is David A. Hinds
- <dahinds@users.sourceforge.net>. Portions created by David A. Hinds
- are Copyright (C) 1999 David A. Hinds. All Rights Reserved.
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-*/
-/*
-Driver: ni_daq_dio24
-Description: National Instruments PCMCIA DAQ-Card DIO-24
-Author: Daniel Vecino Castel <dvecino@able.es>
-Devices: [National Instruments] PCMCIA DAQ-Card DIO-24 (ni_daq_dio24)
-Status: ?
-Updated: Thu, 07 Nov 2002 21:53:06 -0800
-
-This is just a wrapper around the 8255.o driver to properly handle
-the PCMCIA interface.
-*/
-
-#include <linux/module.h>
-#include "../comedi_pcmcia.h"
-
-#include "8255.h"
-
-static int dio24_auto_attach(struct comedi_device *dev,
- unsigned long context)
-{
- struct pcmcia_device *link = comedi_to_pcmcia_dev(dev);
- struct comedi_subdevice *s;
- int ret;
-
- link->config_flags |= CONF_AUTO_SET_IO;
- ret = comedi_pcmcia_enable(dev, NULL);
- if (ret)
- return ret;
- dev->iobase = link->resource[0]->start;
-
- ret = comedi_alloc_subdevices(dev, 1);
- if (ret)
- return ret;
-
- /* 8255 dio */
- s = &dev->subdevices[0];
- return subdev_8255_init(dev, s, NULL, 0x00);
-}
-
-static struct comedi_driver driver_dio24 = {
- .driver_name = "ni_daq_dio24",
- .module = THIS_MODULE,
- .auto_attach = dio24_auto_attach,
- .detach = comedi_pcmcia_disable,
-};
-
-static int dio24_cs_attach(struct pcmcia_device *link)
-{
- return comedi_pcmcia_auto_config(link, &driver_dio24);
-}
-
-static const struct pcmcia_device_id dio24_cs_ids[] = {
- PCMCIA_DEVICE_MANF_CARD(0x010b, 0x475c), /* daqcard-dio24 */
- PCMCIA_DEVICE_NULL
-};
-MODULE_DEVICE_TABLE(pcmcia, dio24_cs_ids);
-
-static struct pcmcia_driver dio24_cs_driver = {
- .name = "ni_daq_dio24",
- .owner = THIS_MODULE,
- .id_table = dio24_cs_ids,
- .probe = dio24_cs_attach,
- .remove = comedi_pcmcia_auto_unconfig,
-};
-module_comedi_pcmcia_driver(driver_dio24, dio24_cs_driver);
-
-MODULE_AUTHOR("Daniel Vecino Castel <dvecino@able.es>");
-MODULE_DESCRIPTION(
- "Comedi driver for National Instruments PCMCIA DAQ-Card DIO-24");
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/ni_labpc.c b/drivers/staging/comedi/drivers/ni_labpc.c
deleted file mode 100644
index 51e5e942b442..000000000000
--- a/drivers/staging/comedi/drivers/ni_labpc.c
+++ /dev/null
@@ -1,125 +0,0 @@
-/*
- * comedi/drivers/ni_labpc.c
- * Driver for National Instruments Lab-PC series boards and compatibles
- * Copyright (C) 2001-2003 Frank Mori Hess <fmhess@users.sourceforge.net>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-/*
- * Driver: ni_labpc
- * Description: National Instruments Lab-PC (& compatibles)
- * Devices: [National Instruments] Lab-PC-1200 (lab-pc-1200),
- * Lab-PC-1200AI (lab-pc-1200ai), Lab-PC+ (lab-pc+)
- * Author: Frank Mori Hess <fmhess@users.sourceforge.net>
- * Status: works
- *
- * Configuration options - ISA boards:
- * [0] - I/O port base address
- * [1] - IRQ (optional, required for timed or externally triggered
- * conversions)
- * [2] - DMA channel (optional)
- *
- * Tested with lab-pc-1200. For the older Lab-PC+, not all input
- * ranges and analog references will work, the available ranges/arefs
- * will depend on how you have configured the jumpers on your board
- * (see your owner's manual).
- *
- * Kernel-level ISA plug-and-play support for the lab-pc-1200 boards
- * has not yet been added to the driver, mainly due to the fact that
- * I don't know the device id numbers. If you have one of these boards,
- * please file a bug report at http://comedi.org/ so I can get the
- * necessary information from you.
- *
- * The 1200 series boards have onboard calibration dacs for correcting
- * analog input/output offsets and gains. The proper settings for these
- * caldacs are stored on the board's eeprom. To read the caldac values
- * from the eeprom and store them into a file that can be then be used
- * by comedilib, use the comedi_calibrate program.
- *
- * The Lab-pc+ has quirky chanlist requirements when scanning multiple
- * channels. Multiple channel scan sequence must start at highest channel,
- * then decrement down to channel 0. The rest of the cards can scan down
- * like lab-pc+ or scan up from channel zero. Chanlists consisting of all
- * one channel are also legal, and allow you to pace conversions in bursts.
- *
- * NI manuals:
- * 341309a (labpc-1200 register manual)
- * 320502b (lab-pc+)
- */
-
-#include <linux/module.h>
-
-#include "../comedidev.h"
-
-#include "ni_labpc.h"
-#include "ni_labpc_isadma.h"
-
-static const struct labpc_boardinfo labpc_boards[] = {
- {
- .name = "lab-pc-1200",
- .ai_speed = 10000,
- .ai_scan_up = 1,
- .has_ao = 1,
- .is_labpc1200 = 1,
- }, {
- .name = "lab-pc-1200ai",
- .ai_speed = 10000,
- .ai_scan_up = 1,
- .is_labpc1200 = 1,
- }, {
- .name = "lab-pc+",
- .ai_speed = 12000,
- .has_ao = 1,
- },
-};
-
-static int labpc_attach(struct comedi_device *dev, struct comedi_devconfig *it)
-{
- unsigned int irq = it->options[1];
- unsigned int dma_chan = it->options[2];
- int ret;
-
- ret = comedi_request_region(dev, it->options[0], 0x20);
- if (ret)
- return ret;
-
- ret = labpc_common_attach(dev, irq, 0);
- if (ret)
- return ret;
-
- if (dev->irq)
- labpc_init_dma_chan(dev, dma_chan);
-
- return 0;
-}
-
-static void labpc_detach(struct comedi_device *dev)
-{
- labpc_free_dma_chan(dev);
- labpc_common_detach(dev);
- comedi_legacy_detach(dev);
-}
-
-static struct comedi_driver labpc_driver = {
- .driver_name = "ni_labpc",
- .module = THIS_MODULE,
- .attach = labpc_attach,
- .detach = labpc_detach,
- .num_names = ARRAY_SIZE(labpc_boards),
- .board_name = &labpc_boards[0].name,
- .offset = sizeof(struct labpc_boardinfo),
-};
-module_comedi_driver(labpc_driver);
-
-MODULE_AUTHOR("Comedi http://www.comedi.org");
-MODULE_DESCRIPTION("Comedi driver for NI Lab-PC ISA boards");
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/ni_labpc.h b/drivers/staging/comedi/drivers/ni_labpc.h
deleted file mode 100644
index 83f878adbd53..000000000000
--- a/drivers/staging/comedi/drivers/ni_labpc.h
+++ /dev/null
@@ -1,69 +0,0 @@
-/*
- ni_labpc.h
-
- Header for ni_labpc.c and ni_labpc_cs.c
-
- Copyright (C) 2003 Frank Mori Hess <fmhess@users.sourceforge.net>
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-*/
-
-#ifndef _NI_LABPC_H
-#define _NI_LABPC_H
-
-#define EEPROM_SIZE 256 /* 256 byte eeprom */
-#define NUM_AO_CHAN 2 /* boards have two analog output channels */
-
-enum transfer_type { fifo_not_empty_transfer, fifo_half_full_transfer,
- isa_dma_transfer
-};
-
-struct labpc_boardinfo {
- const char *name;
- int ai_speed; /* maximum input speed in ns */
- unsigned ai_scan_up:1; /* can auto scan up in ai channels */
- unsigned has_ao:1; /* has analog outputs */
- unsigned is_labpc1200:1; /* has extra regs compared to pc+ */
-};
-
-struct labpc_private {
- struct comedi_isadma *dma;
- struct comedi_8254 *counter;
-
- /* number of data points left to be taken */
- unsigned long long count;
- /* software copys of bits written to command registers */
- unsigned int cmd1;
- unsigned int cmd2;
- unsigned int cmd3;
- unsigned int cmd4;
- unsigned int cmd5;
- unsigned int cmd6;
- /* store last read of board status registers */
- unsigned int stat1;
- unsigned int stat2;
-
- /* we are using dma/fifo-half-full/etc. */
- enum transfer_type current_transfer;
- /*
- * function pointers so we can use inb/outb or readb/writeb as
- * appropriate
- */
- unsigned int (*read_byte)(struct comedi_device *, unsigned long reg);
- void (*write_byte)(struct comedi_device *,
- unsigned int byte, unsigned long reg);
-};
-
-int labpc_common_attach(struct comedi_device *dev,
- unsigned int irq, unsigned long isr_flags);
-void labpc_common_detach(struct comedi_device *dev);
-
-#endif /* _NI_LABPC_H */
diff --git a/drivers/staging/comedi/drivers/ni_labpc_common.c b/drivers/staging/comedi/drivers/ni_labpc_common.c
deleted file mode 100644
index 863afb28ee28..000000000000
--- a/drivers/staging/comedi/drivers/ni_labpc_common.c
+++ /dev/null
@@ -1,1355 +0,0 @@
-/*
- * comedi/drivers/ni_labpc_common.c
- *
- * Common support code for "ni_labpc", "ni_labpc_pci" and "ni_labpc_cs".
- *
- * Copyright (C) 2001-2003 Frank Mori Hess <fmhess@users.sourceforge.net>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#include <linux/module.h>
-#include <linux/interrupt.h>
-#include <linux/io.h>
-#include <linux/delay.h>
-#include <linux/slab.h>
-
-#include "../comedidev.h"
-
-#include "comedi_8254.h"
-#include "8255.h"
-#include "ni_labpc.h"
-#include "ni_labpc_regs.h"
-#include "ni_labpc_isadma.h"
-
-enum scan_mode {
- MODE_SINGLE_CHAN,
- MODE_SINGLE_CHAN_INTERVAL,
- MODE_MULT_CHAN_UP,
- MODE_MULT_CHAN_DOWN,
-};
-
-static const struct comedi_lrange range_labpc_plus_ai = {
- 16, {
- BIP_RANGE(5),
- BIP_RANGE(4),
- BIP_RANGE(2.5),
- BIP_RANGE(1),
- BIP_RANGE(0.5),
- BIP_RANGE(0.25),
- BIP_RANGE(0.1),
- BIP_RANGE(0.05),
- UNI_RANGE(10),
- UNI_RANGE(8),
- UNI_RANGE(5),
- UNI_RANGE(2),
- UNI_RANGE(1),
- UNI_RANGE(0.5),
- UNI_RANGE(0.2),
- UNI_RANGE(0.1)
- }
-};
-
-static const struct comedi_lrange range_labpc_1200_ai = {
- 14, {
- BIP_RANGE(5),
- BIP_RANGE(2.5),
- BIP_RANGE(1),
- BIP_RANGE(0.5),
- BIP_RANGE(0.25),
- BIP_RANGE(0.1),
- BIP_RANGE(0.05),
- UNI_RANGE(10),
- UNI_RANGE(5),
- UNI_RANGE(2),
- UNI_RANGE(1),
- UNI_RANGE(0.5),
- UNI_RANGE(0.2),
- UNI_RANGE(0.1)
- }
-};
-
-static const struct comedi_lrange range_labpc_ao = {
- 2, {
- BIP_RANGE(5),
- UNI_RANGE(10)
- }
-};
-
-/* functions that do inb/outb and readb/writeb so we can use
- * function pointers to decide which to use */
-static unsigned int labpc_inb(struct comedi_device *dev, unsigned long reg)
-{
- return inb(dev->iobase + reg);
-}
-
-static void labpc_outb(struct comedi_device *dev,
- unsigned int byte, unsigned long reg)
-{
- outb(byte, dev->iobase + reg);
-}
-
-static unsigned int labpc_readb(struct comedi_device *dev, unsigned long reg)
-{
- return readb(dev->mmio + reg);
-}
-
-static void labpc_writeb(struct comedi_device *dev,
- unsigned int byte, unsigned long reg)
-{
- writeb(byte, dev->mmio + reg);
-}
-
-static int labpc_cancel(struct comedi_device *dev, struct comedi_subdevice *s)
-{
- struct labpc_private *devpriv = dev->private;
- unsigned long flags;
-
- spin_lock_irqsave(&dev->spinlock, flags);
- devpriv->cmd2 &= ~(CMD2_SWTRIG | CMD2_HWTRIG | CMD2_PRETRIG);
- devpriv->write_byte(dev, devpriv->cmd2, CMD2_REG);
- spin_unlock_irqrestore(&dev->spinlock, flags);
-
- devpriv->cmd3 = 0;
- devpriv->write_byte(dev, devpriv->cmd3, CMD3_REG);
-
- return 0;
-}
-
-static void labpc_ai_set_chan_and_gain(struct comedi_device *dev,
- enum scan_mode mode,
- unsigned int chan,
- unsigned int range,
- unsigned int aref)
-{
- const struct labpc_boardinfo *board = dev->board_ptr;
- struct labpc_private *devpriv = dev->private;
-
- if (board->is_labpc1200) {
- /*
- * The LabPC-1200 boards do not have a gain
- * of '0x10'. Skip the range values that would
- * result in this gain.
- */
- range += (range > 0) + (range > 7);
- }
-
- /* munge channel bits for differential/scan disabled mode */
- if ((mode == MODE_SINGLE_CHAN || mode == MODE_SINGLE_CHAN_INTERVAL) &&
- aref == AREF_DIFF)
- chan *= 2;
- devpriv->cmd1 = CMD1_MA(chan);
- devpriv->cmd1 |= CMD1_GAIN(range);
-
- devpriv->write_byte(dev, devpriv->cmd1, CMD1_REG);
-}
-
-static void labpc_setup_cmd6_reg(struct comedi_device *dev,
- struct comedi_subdevice *s,
- enum scan_mode mode,
- enum transfer_type xfer,
- unsigned int range,
- unsigned int aref,
- bool ena_intr)
-{
- const struct labpc_boardinfo *board = dev->board_ptr;
- struct labpc_private *devpriv = dev->private;
-
- if (!board->is_labpc1200)
- return;
-
- /* reference inputs to ground or common? */
- if (aref != AREF_GROUND)
- devpriv->cmd6 |= CMD6_NRSE;
- else
- devpriv->cmd6 &= ~CMD6_NRSE;
-
- /* bipolar or unipolar range? */
- if (comedi_range_is_unipolar(s, range))
- devpriv->cmd6 |= CMD6_ADCUNI;
- else
- devpriv->cmd6 &= ~CMD6_ADCUNI;
-
- /* interrupt on fifo half full? */
- if (xfer == fifo_half_full_transfer)
- devpriv->cmd6 |= CMD6_HFINTEN;
- else
- devpriv->cmd6 &= ~CMD6_HFINTEN;
-
- /* enable interrupt on counter a1 terminal count? */
- if (ena_intr)
- devpriv->cmd6 |= CMD6_DQINTEN;
- else
- devpriv->cmd6 &= ~CMD6_DQINTEN;
-
- /* are we scanning up or down through channels? */
- if (mode == MODE_MULT_CHAN_UP)
- devpriv->cmd6 |= CMD6_SCANUP;
- else
- devpriv->cmd6 &= ~CMD6_SCANUP;
-
- devpriv->write_byte(dev, devpriv->cmd6, CMD6_REG);
-}
-
-static unsigned int labpc_read_adc_fifo(struct comedi_device *dev)
-{
- struct labpc_private *devpriv = dev->private;
- unsigned int lsb = devpriv->read_byte(dev, ADC_FIFO_REG);
- unsigned int msb = devpriv->read_byte(dev, ADC_FIFO_REG);
-
- return (msb << 8) | lsb;
-}
-
-static void labpc_clear_adc_fifo(struct comedi_device *dev)
-{
- struct labpc_private *devpriv = dev->private;
-
- devpriv->write_byte(dev, 0x1, ADC_FIFO_CLEAR_REG);
- labpc_read_adc_fifo(dev);
-}
-
-static int labpc_ai_eoc(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned long context)
-{
- struct labpc_private *devpriv = dev->private;
-
- devpriv->stat1 = devpriv->read_byte(dev, STAT1_REG);
- if (devpriv->stat1 & STAT1_DAVAIL)
- return 0;
- return -EBUSY;
-}
-
-static int labpc_ai_insn_read(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct labpc_private *devpriv = dev->private;
- unsigned int chan = CR_CHAN(insn->chanspec);
- unsigned int range = CR_RANGE(insn->chanspec);
- unsigned int aref = CR_AREF(insn->chanspec);
- int ret;
- int i;
-
- /* disable timed conversions, interrupt generation and dma */
- labpc_cancel(dev, s);
-
- labpc_ai_set_chan_and_gain(dev, MODE_SINGLE_CHAN, chan, range, aref);
-
- labpc_setup_cmd6_reg(dev, s, MODE_SINGLE_CHAN, fifo_not_empty_transfer,
- range, aref, false);
-
- /* setup cmd4 register */
- devpriv->cmd4 = 0;
- devpriv->cmd4 |= CMD4_ECLKRCV;
- /* single-ended/differential */
- if (aref == AREF_DIFF)
- devpriv->cmd4 |= CMD4_SEDIFF;
- devpriv->write_byte(dev, devpriv->cmd4, CMD4_REG);
-
- /* initialize pacer counter to prevent any problems */
- comedi_8254_set_mode(devpriv->counter, 0, I8254_MODE2 | I8254_BINARY);
-
- labpc_clear_adc_fifo(dev);
-
- for (i = 0; i < insn->n; i++) {
- /* trigger conversion */
- devpriv->write_byte(dev, 0x1, ADC_START_CONVERT_REG);
-
- ret = comedi_timeout(dev, s, insn, labpc_ai_eoc, 0);
- if (ret)
- return ret;
-
- data[i] = labpc_read_adc_fifo(dev);
- }
-
- return insn->n;
-}
-
-static bool labpc_use_continuous_mode(const struct comedi_cmd *cmd,
- enum scan_mode mode)
-{
- if (mode == MODE_SINGLE_CHAN || cmd->scan_begin_src == TRIG_FOLLOW)
- return true;
-
- return false;
-}
-
-static unsigned int labpc_ai_convert_period(const struct comedi_cmd *cmd,
- enum scan_mode mode)
-{
- if (cmd->convert_src != TRIG_TIMER)
- return 0;
-
- if (mode == MODE_SINGLE_CHAN && cmd->scan_begin_src == TRIG_TIMER)
- return cmd->scan_begin_arg;
-
- return cmd->convert_arg;
-}
-
-static void labpc_set_ai_convert_period(struct comedi_cmd *cmd,
- enum scan_mode mode, unsigned int ns)
-{
- if (cmd->convert_src != TRIG_TIMER)
- return;
-
- if (mode == MODE_SINGLE_CHAN &&
- cmd->scan_begin_src == TRIG_TIMER) {
- cmd->scan_begin_arg = ns;
- if (cmd->convert_arg > cmd->scan_begin_arg)
- cmd->convert_arg = cmd->scan_begin_arg;
- } else {
- cmd->convert_arg = ns;
- }
-}
-
-static unsigned int labpc_ai_scan_period(const struct comedi_cmd *cmd,
- enum scan_mode mode)
-{
- if (cmd->scan_begin_src != TRIG_TIMER)
- return 0;
-
- if (mode == MODE_SINGLE_CHAN && cmd->convert_src == TRIG_TIMER)
- return 0;
-
- return cmd->scan_begin_arg;
-}
-
-static void labpc_set_ai_scan_period(struct comedi_cmd *cmd,
- enum scan_mode mode, unsigned int ns)
-{
- if (cmd->scan_begin_src != TRIG_TIMER)
- return;
-
- if (mode == MODE_SINGLE_CHAN && cmd->convert_src == TRIG_TIMER)
- return;
-
- cmd->scan_begin_arg = ns;
-}
-
-/* figures out what counter values to use based on command */
-static void labpc_adc_timing(struct comedi_device *dev, struct comedi_cmd *cmd,
- enum scan_mode mode)
-{
- struct comedi_8254 *pacer = dev->pacer;
- unsigned int convert_period = labpc_ai_convert_period(cmd, mode);
- unsigned int scan_period = labpc_ai_scan_period(cmd, mode);
- unsigned int base_period;
-
- /*
- * If both convert and scan triggers are TRIG_TIMER, then they
- * both rely on counter b0. If only one TRIG_TIMER is used, we
- * can use the generic cascaded timing functions.
- */
- if (convert_period && scan_period) {
- /*
- * pick the lowest divisor value we can (for maximum input
- * clock speed on convert and scan counters)
- */
- pacer->next_div1 = (scan_period - 1) /
- (pacer->osc_base * I8254_MAX_COUNT) + 1;
-
- comedi_check_trigger_arg_min(&pacer->next_div1, 2);
- comedi_check_trigger_arg_max(&pacer->next_div1,
- I8254_MAX_COUNT);
-
- base_period = pacer->osc_base * pacer->next_div1;
-
- /* set a0 for conversion frequency and b1 for scan frequency */
- switch (cmd->flags & CMDF_ROUND_MASK) {
- default:
- case CMDF_ROUND_NEAREST:
- pacer->next_div = DIV_ROUND_CLOSEST(convert_period,
- base_period);
- pacer->next_div2 = DIV_ROUND_CLOSEST(scan_period,
- base_period);
- break;
- case CMDF_ROUND_UP:
- pacer->next_div = DIV_ROUND_UP(convert_period,
- base_period);
- pacer->next_div2 = DIV_ROUND_UP(scan_period,
- base_period);
- break;
- case CMDF_ROUND_DOWN:
- pacer->next_div = convert_period / base_period;
- pacer->next_div2 = scan_period / base_period;
- break;
- }
- /* make sure a0 and b1 values are acceptable */
- comedi_check_trigger_arg_min(&pacer->next_div, 2);
- comedi_check_trigger_arg_max(&pacer->next_div, I8254_MAX_COUNT);
- comedi_check_trigger_arg_min(&pacer->next_div2, 2);
- comedi_check_trigger_arg_max(&pacer->next_div2,
- I8254_MAX_COUNT);
-
- /* write corrected timings to command */
- labpc_set_ai_convert_period(cmd, mode,
- base_period * pacer->next_div);
- labpc_set_ai_scan_period(cmd, mode,
- base_period * pacer->next_div2);
- } else if (scan_period) {
- /*
- * calculate cascaded counter values
- * that give desired scan timing
- * (pacer->next_div2 / pacer->next_div1)
- */
- comedi_8254_cascade_ns_to_timer(pacer, &scan_period,
- cmd->flags);
- labpc_set_ai_scan_period(cmd, mode, scan_period);
- } else if (convert_period) {
- /*
- * calculate cascaded counter values
- * that give desired conversion timing
- * (pacer->next_div / pacer->next_div1)
- */
- comedi_8254_cascade_ns_to_timer(pacer, &convert_period,
- cmd->flags);
- /* transfer div2 value so correct timer gets updated */
- pacer->next_div = pacer->next_div2;
- labpc_set_ai_convert_period(cmd, mode, convert_period);
- }
-}
-
-static enum scan_mode labpc_ai_scan_mode(const struct comedi_cmd *cmd)
-{
- unsigned int chan0;
- unsigned int chan1;
-
- if (cmd->chanlist_len == 1)
- return MODE_SINGLE_CHAN;
-
- /* chanlist may be NULL during cmdtest */
- if (!cmd->chanlist)
- return MODE_MULT_CHAN_UP;
-
- chan0 = CR_CHAN(cmd->chanlist[0]);
- chan1 = CR_CHAN(cmd->chanlist[1]);
-
- if (chan0 < chan1)
- return MODE_MULT_CHAN_UP;
-
- if (chan0 > chan1)
- return MODE_MULT_CHAN_DOWN;
-
- return MODE_SINGLE_CHAN_INTERVAL;
-}
-
-static int labpc_ai_check_chanlist(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_cmd *cmd)
-{
- enum scan_mode mode = labpc_ai_scan_mode(cmd);
- unsigned int chan0 = CR_CHAN(cmd->chanlist[0]);
- unsigned int range0 = CR_RANGE(cmd->chanlist[0]);
- unsigned int aref0 = CR_AREF(cmd->chanlist[0]);
- int i;
-
- for (i = 0; i < cmd->chanlist_len; i++) {
- unsigned int chan = CR_CHAN(cmd->chanlist[i]);
- unsigned int range = CR_RANGE(cmd->chanlist[i]);
- unsigned int aref = CR_AREF(cmd->chanlist[i]);
-
- switch (mode) {
- case MODE_SINGLE_CHAN:
- break;
- case MODE_SINGLE_CHAN_INTERVAL:
- if (chan != chan0) {
- dev_dbg(dev->class_dev,
- "channel scanning order specified in chanlist is not supported by hardware\n");
- return -EINVAL;
- }
- break;
- case MODE_MULT_CHAN_UP:
- if (chan != i) {
- dev_dbg(dev->class_dev,
- "channel scanning order specified in chanlist is not supported by hardware\n");
- return -EINVAL;
- }
- break;
- case MODE_MULT_CHAN_DOWN:
- if (chan != (cmd->chanlist_len - i - 1)) {
- dev_dbg(dev->class_dev,
- "channel scanning order specified in chanlist is not supported by hardware\n");
- return -EINVAL;
- }
- break;
- }
-
- if (range != range0) {
- dev_dbg(dev->class_dev,
- "entries in chanlist must all have the same range\n");
- return -EINVAL;
- }
-
- if (aref != aref0) {
- dev_dbg(dev->class_dev,
- "entries in chanlist must all have the same reference\n");
- return -EINVAL;
- }
- }
-
- return 0;
-}
-
-static int labpc_ai_cmdtest(struct comedi_device *dev,
- struct comedi_subdevice *s, struct comedi_cmd *cmd)
-{
- const struct labpc_boardinfo *board = dev->board_ptr;
- int err = 0;
- int tmp, tmp2;
- unsigned int stop_mask;
- enum scan_mode mode;
-
- /* Step 1 : check if triggers are trivially valid */
-
- err |= comedi_check_trigger_src(&cmd->start_src, TRIG_NOW | TRIG_EXT);
- err |= comedi_check_trigger_src(&cmd->scan_begin_src,
- TRIG_TIMER | TRIG_FOLLOW | TRIG_EXT);
- err |= comedi_check_trigger_src(&cmd->convert_src,
- TRIG_TIMER | TRIG_EXT);
- err |= comedi_check_trigger_src(&cmd->scan_end_src, TRIG_COUNT);
-
- stop_mask = TRIG_COUNT | TRIG_NONE;
- if (board->is_labpc1200)
- stop_mask |= TRIG_EXT;
- err |= comedi_check_trigger_src(&cmd->stop_src, stop_mask);
-
- if (err)
- return 1;
-
- /* Step 2a : make sure trigger sources are unique */
-
- err |= comedi_check_trigger_is_unique(cmd->start_src);
- err |= comedi_check_trigger_is_unique(cmd->scan_begin_src);
- err |= comedi_check_trigger_is_unique(cmd->convert_src);
- err |= comedi_check_trigger_is_unique(cmd->stop_src);
-
- /* Step 2b : and mutually compatible */
-
- /* can't have external stop and start triggers at once */
- if (cmd->start_src == TRIG_EXT && cmd->stop_src == TRIG_EXT)
- err++;
-
- if (err)
- return 2;
-
- /* Step 3: check if arguments are trivially valid */
-
- switch (cmd->start_src) {
- case TRIG_NOW:
- err |= comedi_check_trigger_arg_is(&cmd->start_arg, 0);
- break;
- case TRIG_EXT:
- /* start_arg value is ignored */
- break;
- }
-
- if (!cmd->chanlist_len)
- err |= -EINVAL;
- err |= comedi_check_trigger_arg_is(&cmd->scan_end_arg,
- cmd->chanlist_len);
-
- if (cmd->convert_src == TRIG_TIMER) {
- err |= comedi_check_trigger_arg_min(&cmd->convert_arg,
- board->ai_speed);
- }
-
- /* make sure scan timing is not too fast */
- if (cmd->scan_begin_src == TRIG_TIMER) {
- if (cmd->convert_src == TRIG_TIMER) {
- err |= comedi_check_trigger_arg_min(&cmd->
- scan_begin_arg,
- cmd->convert_arg *
- cmd->chanlist_len);
- }
- err |= comedi_check_trigger_arg_min(&cmd->scan_begin_arg,
- board->ai_speed *
- cmd->chanlist_len);
- }
-
- switch (cmd->stop_src) {
- case TRIG_COUNT:
- err |= comedi_check_trigger_arg_min(&cmd->stop_arg, 1);
- break;
- case TRIG_NONE:
- err |= comedi_check_trigger_arg_is(&cmd->stop_arg, 0);
- break;
- /*
- * TRIG_EXT doesn't care since it doesn't
- * trigger off a numbered channel
- */
- default:
- break;
- }
-
- if (err)
- return 3;
-
- /* step 4: fix up any arguments */
-
- tmp = cmd->convert_arg;
- tmp2 = cmd->scan_begin_arg;
- mode = labpc_ai_scan_mode(cmd);
- labpc_adc_timing(dev, cmd, mode);
- if (tmp != cmd->convert_arg || tmp2 != cmd->scan_begin_arg)
- err++;
-
- if (err)
- return 4;
-
- /* Step 5: check channel list if it exists */
- if (cmd->chanlist && cmd->chanlist_len > 0)
- err |= labpc_ai_check_chanlist(dev, s, cmd);
-
- if (err)
- return 5;
-
- return 0;
-}
-
-static int labpc_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
-{
- const struct labpc_boardinfo *board = dev->board_ptr;
- struct labpc_private *devpriv = dev->private;
- struct comedi_async *async = s->async;
- struct comedi_cmd *cmd = &async->cmd;
- enum scan_mode mode = labpc_ai_scan_mode(cmd);
- unsigned int chanspec = (mode == MODE_MULT_CHAN_UP) ?
- cmd->chanlist[cmd->chanlist_len - 1] :
- cmd->chanlist[0];
- unsigned int chan = CR_CHAN(chanspec);
- unsigned int range = CR_RANGE(chanspec);
- unsigned int aref = CR_AREF(chanspec);
- enum transfer_type xfer;
- unsigned long flags;
-
- /* make sure board is disabled before setting up acquisition */
- labpc_cancel(dev, s);
-
- /* initialize software conversion count */
- if (cmd->stop_src == TRIG_COUNT)
- devpriv->count = cmd->stop_arg * cmd->chanlist_len;
-
- /* setup hardware conversion counter */
- if (cmd->stop_src == TRIG_EXT) {
- /*
- * load counter a1 with count of 3
- * (pc+ manual says this is minimum allowed) using mode 0
- */
- comedi_8254_load(devpriv->counter, 1,
- 3, I8254_MODE0 | I8254_BINARY);
- } else {
- /* just put counter a1 in mode 0 to set its output low */
- comedi_8254_set_mode(devpriv->counter, 1,
- I8254_MODE0 | I8254_BINARY);
- }
-
- /* figure out what method we will use to transfer data */
- if (devpriv->dma &&
- /* dma unsafe at RT priority,
- * and too much setup time for CMDF_WAKE_EOS */
- (cmd->flags & (CMDF_WAKE_EOS | CMDF_PRIORITY)) == 0)
- xfer = isa_dma_transfer;
- else if (/* pc-plus has no fifo-half full interrupt */
- board->is_labpc1200 &&
- /* wake-end-of-scan should interrupt on fifo not empty */
- (cmd->flags & CMDF_WAKE_EOS) == 0 &&
- /* make sure we are taking more than just a few points */
- (cmd->stop_src != TRIG_COUNT || devpriv->count > 256))
- xfer = fifo_half_full_transfer;
- else
- xfer = fifo_not_empty_transfer;
- devpriv->current_transfer = xfer;
-
- labpc_ai_set_chan_and_gain(dev, mode, chan, range, aref);
-
- labpc_setup_cmd6_reg(dev, s, mode, xfer, range, aref,
- (cmd->stop_src == TRIG_EXT));
-
- /* manual says to set scan enable bit on second pass */
- if (mode == MODE_MULT_CHAN_UP || mode == MODE_MULT_CHAN_DOWN) {
- devpriv->cmd1 |= CMD1_SCANEN;
- /* need a brief delay before enabling scan, or scan
- * list will get screwed when you switch
- * between scan up to scan down mode - dunno why */
- udelay(1);
- devpriv->write_byte(dev, devpriv->cmd1, CMD1_REG);
- }
-
- devpriv->write_byte(dev, cmd->chanlist_len, INTERVAL_COUNT_REG);
- /* load count */
- devpriv->write_byte(dev, 0x1, INTERVAL_STROBE_REG);
-
- if (cmd->convert_src == TRIG_TIMER ||
- cmd->scan_begin_src == TRIG_TIMER) {
- struct comedi_8254 *pacer = dev->pacer;
- struct comedi_8254 *counter = devpriv->counter;
-
- comedi_8254_update_divisors(pacer);
-
- /* set up pacing */
- comedi_8254_load(pacer, 0, pacer->divisor1,
- I8254_MODE3 | I8254_BINARY);
-
- /* set up conversion pacing */
- comedi_8254_set_mode(counter, 0, I8254_MODE2 | I8254_BINARY);
- if (labpc_ai_convert_period(cmd, mode))
- comedi_8254_write(counter, 0, pacer->divisor);
-
- /* set up scan pacing */
- if (labpc_ai_scan_period(cmd, mode))
- comedi_8254_load(pacer, 1, pacer->divisor2,
- I8254_MODE2 | I8254_BINARY);
- }
-
- labpc_clear_adc_fifo(dev);
-
- if (xfer == isa_dma_transfer)
- labpc_setup_dma(dev, s);
-
- /* enable error interrupts */
- devpriv->cmd3 |= CMD3_ERRINTEN;
- /* enable fifo not empty interrupt? */
- if (xfer == fifo_not_empty_transfer)
- devpriv->cmd3 |= CMD3_FIFOINTEN;
- devpriv->write_byte(dev, devpriv->cmd3, CMD3_REG);
-
- /* setup any external triggering/pacing (cmd4 register) */
- devpriv->cmd4 = 0;
- if (cmd->convert_src != TRIG_EXT)
- devpriv->cmd4 |= CMD4_ECLKRCV;
- /* XXX should discard first scan when using interval scanning
- * since manual says it is not synced with scan clock */
- if (!labpc_use_continuous_mode(cmd, mode)) {
- devpriv->cmd4 |= CMD4_INTSCAN;
- if (cmd->scan_begin_src == TRIG_EXT)
- devpriv->cmd4 |= CMD4_EOIRCV;
- }
- /* single-ended/differential */
- if (aref == AREF_DIFF)
- devpriv->cmd4 |= CMD4_SEDIFF;
- devpriv->write_byte(dev, devpriv->cmd4, CMD4_REG);
-
- /* startup acquisition */
-
- spin_lock_irqsave(&dev->spinlock, flags);
-
- /* use 2 cascaded counters for pacing */
- devpriv->cmd2 |= CMD2_TBSEL;
-
- devpriv->cmd2 &= ~(CMD2_SWTRIG | CMD2_HWTRIG | CMD2_PRETRIG);
- if (cmd->start_src == TRIG_EXT)
- devpriv->cmd2 |= CMD2_HWTRIG;
- else
- devpriv->cmd2 |= CMD2_SWTRIG;
- if (cmd->stop_src == TRIG_EXT)
- devpriv->cmd2 |= (CMD2_HWTRIG | CMD2_PRETRIG);
-
- devpriv->write_byte(dev, devpriv->cmd2, CMD2_REG);
-
- spin_unlock_irqrestore(&dev->spinlock, flags);
-
- return 0;
-}
-
-/* read all available samples from ai fifo */
-static int labpc_drain_fifo(struct comedi_device *dev)
-{
- struct labpc_private *devpriv = dev->private;
- struct comedi_async *async = dev->read_subdev->async;
- struct comedi_cmd *cmd = &async->cmd;
- unsigned short data;
- const int timeout = 10000;
- unsigned int i;
-
- devpriv->stat1 = devpriv->read_byte(dev, STAT1_REG);
-
- for (i = 0; (devpriv->stat1 & STAT1_DAVAIL) && i < timeout;
- i++) {
- /* quit if we have all the data we want */
- if (cmd->stop_src == TRIG_COUNT) {
- if (devpriv->count == 0)
- break;
- devpriv->count--;
- }
- data = labpc_read_adc_fifo(dev);
- comedi_buf_write_samples(dev->read_subdev, &data, 1);
- devpriv->stat1 = devpriv->read_byte(dev, STAT1_REG);
- }
- if (i == timeout) {
- dev_err(dev->class_dev, "ai timeout, fifo never empties\n");
- async->events |= COMEDI_CB_ERROR;
- return -1;
- }
-
- return 0;
-}
-
-/* makes sure all data acquired by board is transferred to comedi (used
- * when acquisition is terminated by stop_src == TRIG_EXT). */
-static void labpc_drain_dregs(struct comedi_device *dev)
-{
- struct labpc_private *devpriv = dev->private;
-
- if (devpriv->current_transfer == isa_dma_transfer)
- labpc_drain_dma(dev);
-
- labpc_drain_fifo(dev);
-}
-
-/* interrupt service routine */
-static irqreturn_t labpc_interrupt(int irq, void *d)
-{
- struct comedi_device *dev = d;
- const struct labpc_boardinfo *board = dev->board_ptr;
- struct labpc_private *devpriv = dev->private;
- struct comedi_subdevice *s = dev->read_subdev;
- struct comedi_async *async;
- struct comedi_cmd *cmd;
-
- if (!dev->attached) {
- dev_err(dev->class_dev, "premature interrupt\n");
- return IRQ_HANDLED;
- }
-
- async = s->async;
- cmd = &async->cmd;
-
- /* read board status */
- devpriv->stat1 = devpriv->read_byte(dev, STAT1_REG);
- if (board->is_labpc1200)
- devpriv->stat2 = devpriv->read_byte(dev, STAT2_REG);
-
- if ((devpriv->stat1 & (STAT1_GATA0 | STAT1_CNTINT | STAT1_OVERFLOW |
- STAT1_OVERRUN | STAT1_DAVAIL)) == 0 &&
- (devpriv->stat2 & STAT2_OUTA1) == 0 &&
- (devpriv->stat2 & STAT2_FIFONHF)) {
- return IRQ_NONE;
- }
-
- if (devpriv->stat1 & STAT1_OVERRUN) {
- /* clear error interrupt */
- devpriv->write_byte(dev, 0x1, ADC_FIFO_CLEAR_REG);
- async->events |= COMEDI_CB_ERROR;
- comedi_handle_events(dev, s);
- dev_err(dev->class_dev, "overrun\n");
- return IRQ_HANDLED;
- }
-
- if (devpriv->current_transfer == isa_dma_transfer)
- labpc_handle_dma_status(dev);
- else
- labpc_drain_fifo(dev);
-
- if (devpriv->stat1 & STAT1_CNTINT) {
- dev_err(dev->class_dev, "handled timer interrupt?\n");
- /* clear it */
- devpriv->write_byte(dev, 0x1, TIMER_CLEAR_REG);
- }
-
- if (devpriv->stat1 & STAT1_OVERFLOW) {
- /* clear error interrupt */
- devpriv->write_byte(dev, 0x1, ADC_FIFO_CLEAR_REG);
- async->events |= COMEDI_CB_ERROR;
- comedi_handle_events(dev, s);
- dev_err(dev->class_dev, "overflow\n");
- return IRQ_HANDLED;
- }
- /* handle external stop trigger */
- if (cmd->stop_src == TRIG_EXT) {
- if (devpriv->stat2 & STAT2_OUTA1) {
- labpc_drain_dregs(dev);
- async->events |= COMEDI_CB_EOA;
- }
- }
-
- /* TRIG_COUNT end of acquisition */
- if (cmd->stop_src == TRIG_COUNT) {
- if (devpriv->count == 0)
- async->events |= COMEDI_CB_EOA;
- }
-
- comedi_handle_events(dev, s);
- return IRQ_HANDLED;
-}
-
-static void labpc_ao_write(struct comedi_device *dev,
- struct comedi_subdevice *s,
- unsigned int chan, unsigned int val)
-{
- struct labpc_private *devpriv = dev->private;
-
- devpriv->write_byte(dev, val & 0xff, DAC_LSB_REG(chan));
- devpriv->write_byte(dev, (val >> 8) & 0xff, DAC_MSB_REG(chan));
-
- s->readback[chan] = val;
-}
-
-static int labpc_ao_insn_write(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- const struct labpc_boardinfo *board = dev->board_ptr;
- struct labpc_private *devpriv = dev->private;
- int channel, range;
- unsigned long flags;
-
- channel = CR_CHAN(insn->chanspec);
-
- /* turn off pacing of analog output channel */
- /* note: hardware bug in daqcard-1200 means pacing cannot
- * be independently enabled/disabled for its the two channels */
- spin_lock_irqsave(&dev->spinlock, flags);
- devpriv->cmd2 &= ~CMD2_LDAC(channel);
- devpriv->write_byte(dev, devpriv->cmd2, CMD2_REG);
- spin_unlock_irqrestore(&dev->spinlock, flags);
-
- /* set range */
- if (board->is_labpc1200) {
- range = CR_RANGE(insn->chanspec);
- if (comedi_range_is_unipolar(s, range))
- devpriv->cmd6 |= CMD6_DACUNI(channel);
- else
- devpriv->cmd6 &= ~CMD6_DACUNI(channel);
- /* write to register */
- devpriv->write_byte(dev, devpriv->cmd6, CMD6_REG);
- }
- /* send data */
- labpc_ao_write(dev, s, channel, data[0]);
-
- return 1;
-}
-
-/* lowlevel write to eeprom/dac */
-static void labpc_serial_out(struct comedi_device *dev, unsigned int value,
- unsigned int value_width)
-{
- struct labpc_private *devpriv = dev->private;
- int i;
-
- for (i = 1; i <= value_width; i++) {
- /* clear serial clock */
- devpriv->cmd5 &= ~CMD5_SCLK;
- /* send bits most significant bit first */
- if (value & (1 << (value_width - i)))
- devpriv->cmd5 |= CMD5_SDATA;
- else
- devpriv->cmd5 &= ~CMD5_SDATA;
- udelay(1);
- devpriv->write_byte(dev, devpriv->cmd5, CMD5_REG);
- /* set clock to load bit */
- devpriv->cmd5 |= CMD5_SCLK;
- udelay(1);
- devpriv->write_byte(dev, devpriv->cmd5, CMD5_REG);
- }
-}
-
-/* lowlevel read from eeprom */
-static unsigned int labpc_serial_in(struct comedi_device *dev)
-{
- struct labpc_private *devpriv = dev->private;
- unsigned int value = 0;
- int i;
- const int value_width = 8; /* number of bits wide values are */
-
- for (i = 1; i <= value_width; i++) {
- /* set serial clock */
- devpriv->cmd5 |= CMD5_SCLK;
- udelay(1);
- devpriv->write_byte(dev, devpriv->cmd5, CMD5_REG);
- /* clear clock bit */
- devpriv->cmd5 &= ~CMD5_SCLK;
- udelay(1);
- devpriv->write_byte(dev, devpriv->cmd5, CMD5_REG);
- /* read bits most significant bit first */
- udelay(1);
- devpriv->stat2 = devpriv->read_byte(dev, STAT2_REG);
- if (devpriv->stat2 & STAT2_PROMOUT)
- value |= 1 << (value_width - i);
- }
-
- return value;
-}
-
-static unsigned int labpc_eeprom_read(struct comedi_device *dev,
- unsigned int address)
-{
- struct labpc_private *devpriv = dev->private;
- unsigned int value;
- /* bits to tell eeprom to expect a read */
- const int read_instruction = 0x3;
- /* 8 bit write lengths to eeprom */
- const int write_length = 8;
-
- /* enable read/write to eeprom */
- devpriv->cmd5 &= ~CMD5_EEPROMCS;
- udelay(1);
- devpriv->write_byte(dev, devpriv->cmd5, CMD5_REG);
- devpriv->cmd5 |= (CMD5_EEPROMCS | CMD5_WRTPRT);
- udelay(1);
- devpriv->write_byte(dev, devpriv->cmd5, CMD5_REG);
-
- /* send read instruction */
- labpc_serial_out(dev, read_instruction, write_length);
- /* send 8 bit address to read from */
- labpc_serial_out(dev, address, write_length);
- /* read result */
- value = labpc_serial_in(dev);
-
- /* disable read/write to eeprom */
- devpriv->cmd5 &= ~(CMD5_EEPROMCS | CMD5_WRTPRT);
- udelay(1);
- devpriv->write_byte(dev, devpriv->cmd5, CMD5_REG);
-
- return value;
-}
-
-static unsigned int labpc_eeprom_read_status(struct comedi_device *dev)
-{
- struct labpc_private *devpriv = dev->private;
- unsigned int value;
- const int read_status_instruction = 0x5;
- const int write_length = 8; /* 8 bit write lengths to eeprom */
-
- /* enable read/write to eeprom */
- devpriv->cmd5 &= ~CMD5_EEPROMCS;
- udelay(1);
- devpriv->write_byte(dev, devpriv->cmd5, CMD5_REG);
- devpriv->cmd5 |= (CMD5_EEPROMCS | CMD5_WRTPRT);
- udelay(1);
- devpriv->write_byte(dev, devpriv->cmd5, CMD5_REG);
-
- /* send read status instruction */
- labpc_serial_out(dev, read_status_instruction, write_length);
- /* read result */
- value = labpc_serial_in(dev);
-
- /* disable read/write to eeprom */
- devpriv->cmd5 &= ~(CMD5_EEPROMCS | CMD5_WRTPRT);
- udelay(1);
- devpriv->write_byte(dev, devpriv->cmd5, CMD5_REG);
-
- return value;
-}
-
-static void labpc_eeprom_write(struct comedi_device *dev,
- unsigned int address, unsigned int value)
-{
- struct labpc_private *devpriv = dev->private;
- const int write_enable_instruction = 0x6;
- const int write_instruction = 0x2;
- const int write_length = 8; /* 8 bit write lengths to eeprom */
-
- /* enable read/write to eeprom */
- devpriv->cmd5 &= ~CMD5_EEPROMCS;
- udelay(1);
- devpriv->write_byte(dev, devpriv->cmd5, CMD5_REG);
- devpriv->cmd5 |= (CMD5_EEPROMCS | CMD5_WRTPRT);
- udelay(1);
- devpriv->write_byte(dev, devpriv->cmd5, CMD5_REG);
-
- /* send write_enable instruction */
- labpc_serial_out(dev, write_enable_instruction, write_length);
- devpriv->cmd5 &= ~CMD5_EEPROMCS;
- udelay(1);
- devpriv->write_byte(dev, devpriv->cmd5, CMD5_REG);
-
- /* send write instruction */
- devpriv->cmd5 |= CMD5_EEPROMCS;
- udelay(1);
- devpriv->write_byte(dev, devpriv->cmd5, CMD5_REG);
- labpc_serial_out(dev, write_instruction, write_length);
- /* send 8 bit address to write to */
- labpc_serial_out(dev, address, write_length);
- /* write value */
- labpc_serial_out(dev, value, write_length);
- devpriv->cmd5 &= ~CMD5_EEPROMCS;
- udelay(1);
- devpriv->write_byte(dev, devpriv->cmd5, CMD5_REG);
-
- /* disable read/write to eeprom */
- devpriv->cmd5 &= ~(CMD5_EEPROMCS | CMD5_WRTPRT);
- udelay(1);
- devpriv->write_byte(dev, devpriv->cmd5, CMD5_REG);
-}
-
-/* writes to 8 bit calibration dacs */
-static void write_caldac(struct comedi_device *dev, unsigned int channel,
- unsigned int value)
-{
- struct labpc_private *devpriv = dev->private;
-
- /* clear caldac load bit and make sure we don't write to eeprom */
- devpriv->cmd5 &= ~(CMD5_CALDACLD | CMD5_EEPROMCS | CMD5_WRTPRT);
- udelay(1);
- devpriv->write_byte(dev, devpriv->cmd5, CMD5_REG);
-
- /* write 4 bit channel */
- labpc_serial_out(dev, channel, 4);
- /* write 8 bit caldac value */
- labpc_serial_out(dev, value, 8);
-
- /* set and clear caldac bit to load caldac value */
- devpriv->cmd5 |= CMD5_CALDACLD;
- udelay(1);
- devpriv->write_byte(dev, devpriv->cmd5, CMD5_REG);
- devpriv->cmd5 &= ~CMD5_CALDACLD;
- udelay(1);
- devpriv->write_byte(dev, devpriv->cmd5, CMD5_REG);
-}
-
-static int labpc_calib_insn_write(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- unsigned int chan = CR_CHAN(insn->chanspec);
-
- /*
- * Only write the last data value to the caldac. Preceding
- * data would be overwritten anyway.
- */
- if (insn->n > 0) {
- unsigned int val = data[insn->n - 1];
-
- if (s->readback[chan] != val) {
- write_caldac(dev, chan, val);
- s->readback[chan] = val;
- }
- }
-
- return insn->n;
-}
-
-static int labpc_eeprom_ready(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned long context)
-{
- unsigned int status;
-
- /* make sure there isn't already a write in progress */
- status = labpc_eeprom_read_status(dev);
- if ((status & 0x1) == 0)
- return 0;
- return -EBUSY;
-}
-
-static int labpc_eeprom_insn_write(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- unsigned int chan = CR_CHAN(insn->chanspec);
- int ret;
-
- /* only allow writes to user area of eeprom */
- if (chan < 16 || chan > 127)
- return -EINVAL;
-
- /*
- * Only write the last data value to the eeprom. Preceding
- * data would be overwritten anyway.
- */
- if (insn->n > 0) {
- unsigned int val = data[insn->n - 1];
-
- ret = comedi_timeout(dev, s, insn, labpc_eeprom_ready, 0);
- if (ret)
- return ret;
-
- labpc_eeprom_write(dev, chan, val);
- s->readback[chan] = val;
- }
-
- return insn->n;
-}
-
-int labpc_common_attach(struct comedi_device *dev,
- unsigned int irq, unsigned long isr_flags)
-{
- const struct labpc_boardinfo *board = dev->board_ptr;
- struct labpc_private *devpriv;
- struct comedi_subdevice *s;
- int ret;
- int i;
-
- devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
- if (!devpriv)
- return -ENOMEM;
-
- if (dev->mmio) {
- devpriv->read_byte = labpc_readb;
- devpriv->write_byte = labpc_writeb;
- } else {
- devpriv->read_byte = labpc_inb;
- devpriv->write_byte = labpc_outb;
- }
-
- /* initialize board's command registers */
- devpriv->write_byte(dev, devpriv->cmd1, CMD1_REG);
- devpriv->write_byte(dev, devpriv->cmd2, CMD2_REG);
- devpriv->write_byte(dev, devpriv->cmd3, CMD3_REG);
- devpriv->write_byte(dev, devpriv->cmd4, CMD4_REG);
- if (board->is_labpc1200) {
- devpriv->write_byte(dev, devpriv->cmd5, CMD5_REG);
- devpriv->write_byte(dev, devpriv->cmd6, CMD6_REG);
- }
-
- if (irq) {
- ret = request_irq(irq, labpc_interrupt, isr_flags,
- dev->board_name, dev);
- if (ret == 0)
- dev->irq = irq;
- }
-
- if (dev->mmio) {
- dev->pacer = comedi_8254_mm_init(dev->mmio + COUNTER_B_BASE_REG,
- I8254_OSC_BASE_2MHZ,
- I8254_IO8, 0);
- devpriv->counter = comedi_8254_mm_init(dev->mmio +
- COUNTER_A_BASE_REG,
- I8254_OSC_BASE_2MHZ,
- I8254_IO8, 0);
- } else {
- dev->pacer = comedi_8254_init(dev->iobase + COUNTER_B_BASE_REG,
- I8254_OSC_BASE_2MHZ,
- I8254_IO8, 0);
- devpriv->counter = comedi_8254_init(dev->iobase +
- COUNTER_A_BASE_REG,
- I8254_OSC_BASE_2MHZ,
- I8254_IO8, 0);
- }
- if (!dev->pacer || !devpriv->counter)
- return -ENOMEM;
-
- ret = comedi_alloc_subdevices(dev, 5);
- if (ret)
- return ret;
-
- /* analog input subdevice */
- s = &dev->subdevices[0];
- s->type = COMEDI_SUBD_AI;
- s->subdev_flags = SDF_READABLE | SDF_GROUND | SDF_COMMON | SDF_DIFF;
- s->n_chan = 8;
- s->len_chanlist = 8;
- s->maxdata = 0x0fff;
- s->range_table = board->is_labpc1200 ?
- &range_labpc_1200_ai : &range_labpc_plus_ai;
- s->insn_read = labpc_ai_insn_read;
- if (dev->irq) {
- dev->read_subdev = s;
- s->subdev_flags |= SDF_CMD_READ;
- s->do_cmd = labpc_ai_cmd;
- s->do_cmdtest = labpc_ai_cmdtest;
- s->cancel = labpc_cancel;
- }
-
- /* analog output */
- s = &dev->subdevices[1];
- if (board->has_ao) {
- s->type = COMEDI_SUBD_AO;
- s->subdev_flags = SDF_READABLE | SDF_WRITABLE | SDF_GROUND;
- s->n_chan = NUM_AO_CHAN;
- s->maxdata = 0x0fff;
- s->range_table = &range_labpc_ao;
- s->insn_write = labpc_ao_insn_write;
-
- ret = comedi_alloc_subdev_readback(s);
- if (ret)
- return ret;
-
- /* initialize analog outputs to a known value */
- for (i = 0; i < s->n_chan; i++)
- labpc_ao_write(dev, s, i, s->maxdata / 2);
- } else {
- s->type = COMEDI_SUBD_UNUSED;
- }
-
- /* 8255 dio */
- s = &dev->subdevices[2];
- if (dev->mmio)
- ret = subdev_8255_mm_init(dev, s, NULL, DIO_BASE_REG);
- else
- ret = subdev_8255_init(dev, s, NULL, DIO_BASE_REG);
- if (ret)
- return ret;
-
- /* calibration subdevices for boards that have one */
- s = &dev->subdevices[3];
- if (board->is_labpc1200) {
- s->type = COMEDI_SUBD_CALIB;
- s->subdev_flags = SDF_READABLE | SDF_WRITABLE | SDF_INTERNAL;
- s->n_chan = 16;
- s->maxdata = 0xff;
- s->insn_write = labpc_calib_insn_write;
-
- ret = comedi_alloc_subdev_readback(s);
- if (ret)
- return ret;
-
- for (i = 0; i < s->n_chan; i++) {
- write_caldac(dev, i, s->maxdata / 2);
- s->readback[i] = s->maxdata / 2;
- }
- } else {
- s->type = COMEDI_SUBD_UNUSED;
- }
-
- /* EEPROM */
- s = &dev->subdevices[4];
- if (board->is_labpc1200) {
- s->type = COMEDI_SUBD_MEMORY;
- s->subdev_flags = SDF_READABLE | SDF_WRITABLE | SDF_INTERNAL;
- s->n_chan = EEPROM_SIZE;
- s->maxdata = 0xff;
- s->insn_write = labpc_eeprom_insn_write;
-
- ret = comedi_alloc_subdev_readback(s);
- if (ret)
- return ret;
-
- for (i = 0; i < s->n_chan; i++)
- s->readback[i] = labpc_eeprom_read(dev, i);
- } else {
- s->type = COMEDI_SUBD_UNUSED;
- }
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(labpc_common_attach);
-
-void labpc_common_detach(struct comedi_device *dev)
-{
- struct labpc_private *devpriv = dev->private;
-
- if (devpriv)
- kfree(devpriv->counter);
-}
-EXPORT_SYMBOL_GPL(labpc_common_detach);
-
-static int __init labpc_common_init(void)
-{
- return 0;
-}
-module_init(labpc_common_init);
-
-static void __exit labpc_common_exit(void)
-{
-}
-module_exit(labpc_common_exit);
-
-MODULE_AUTHOR("Comedi http://www.comedi.org");
-MODULE_DESCRIPTION("Comedi helper for ni_labpc, ni_labpc_pci, ni_labpc_cs");
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/ni_labpc_cs.c b/drivers/staging/comedi/drivers/ni_labpc_cs.c
deleted file mode 100644
index a1c69ac075d5..000000000000
--- a/drivers/staging/comedi/drivers/ni_labpc_cs.c
+++ /dev/null
@@ -1,128 +0,0 @@
-/*
- comedi/drivers/ni_labpc_cs.c
- Driver for National Instruments daqcard-1200 boards
- Copyright (C) 2001, 2002, 2003 Frank Mori Hess <fmhess@users.sourceforge.net>
-
- PCMCIA crap is adapted from dummy_cs.c 1.31 2001/08/24 12:13:13
- from the pcmcia package.
- The initial developer of the pcmcia dummy_cs.c code is David A. Hinds
- <dahinds@users.sourceforge.net>. Portions created by David A. Hinds
- are Copyright (C) 1999 David A. Hinds.
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-*/
-/*
-Driver: ni_labpc_cs
-Description: National Instruments Lab-PC (& compatibles)
-Author: Frank Mori Hess <fmhess@users.sourceforge.net>
-Devices: [National Instruments] DAQCard-1200 (daqcard-1200)
-Status: works
-
-Thanks go to Fredrik Lingvall for much testing and perseverance in
-helping to debug daqcard-1200 support.
-
-The 1200 series boards have onboard calibration dacs for correcting
-analog input/output offsets and gains. The proper settings for these
-caldacs are stored on the board's eeprom. To read the caldac values
-from the eeprom and store them into a file that can be then be used by
-comedilib, use the comedi_calibrate program.
-
-Configuration options:
- none
-
-The daqcard-1200 has quirky chanlist requirements
-when scanning multiple channels. Multiple channel scan
-sequence must start at highest channel, then decrement down to
-channel 0. Chanlists consisting of all one channel
-are also legal, and allow you to pace conversions in bursts.
-
-*/
-
-/*
-
-NI manuals:
-340988a (daqcard-1200)
-
-*/
-
-#include <linux/module.h>
-
-#include "../comedi_pcmcia.h"
-
-#include "ni_labpc.h"
-
-static const struct labpc_boardinfo labpc_cs_boards[] = {
- {
- .name = "daqcard-1200",
- .ai_speed = 10000,
- .has_ao = 1,
- .is_labpc1200 = 1,
- },
-};
-
-static int labpc_cs_auto_attach(struct comedi_device *dev,
- unsigned long context)
-{
- struct pcmcia_device *link = comedi_to_pcmcia_dev(dev);
- int ret;
-
- /* The ni_labpc driver needs the board_ptr */
- dev->board_ptr = &labpc_cs_boards[0];
-
- link->config_flags |= CONF_AUTO_SET_IO |
- CONF_ENABLE_IRQ | CONF_ENABLE_PULSE_IRQ;
- ret = comedi_pcmcia_enable(dev, NULL);
- if (ret)
- return ret;
- dev->iobase = link->resource[0]->start;
-
- if (!link->irq)
- return -EINVAL;
-
- return labpc_common_attach(dev, link->irq, IRQF_SHARED);
-}
-
-static void labpc_cs_detach(struct comedi_device *dev)
-{
- labpc_common_detach(dev);
- comedi_pcmcia_disable(dev);
-}
-
-static struct comedi_driver driver_labpc_cs = {
- .driver_name = "ni_labpc_cs",
- .module = THIS_MODULE,
- .auto_attach = labpc_cs_auto_attach,
- .detach = labpc_cs_detach,
-};
-
-static int labpc_cs_attach(struct pcmcia_device *link)
-{
- return comedi_pcmcia_auto_config(link, &driver_labpc_cs);
-}
-
-static const struct pcmcia_device_id labpc_cs_ids[] = {
- PCMCIA_DEVICE_MANF_CARD(0x010b, 0x0103), /* daqcard-1200 */
- PCMCIA_DEVICE_NULL
-};
-MODULE_DEVICE_TABLE(pcmcia, labpc_cs_ids);
-
-static struct pcmcia_driver labpc_cs_driver = {
- .name = "daqcard-1200",
- .owner = THIS_MODULE,
- .id_table = labpc_cs_ids,
- .probe = labpc_cs_attach,
- .remove = comedi_pcmcia_auto_unconfig,
-};
-module_comedi_pcmcia_driver(driver_labpc_cs, labpc_cs_driver);
-
-MODULE_DESCRIPTION("Comedi driver for National Instruments Lab-PC");
-MODULE_AUTHOR("Frank Mori Hess <fmhess@users.sourceforge.net>");
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/ni_labpc_isadma.c b/drivers/staging/comedi/drivers/ni_labpc_isadma.c
deleted file mode 100644
index 29dbdf5ec25d..000000000000
--- a/drivers/staging/comedi/drivers/ni_labpc_isadma.c
+++ /dev/null
@@ -1,190 +0,0 @@
-/*
- * comedi/drivers/ni_labpc_isadma.c
- * ISA DMA support for National Instruments Lab-PC series boards and
- * compatibles.
- *
- * Extracted from ni_labpc.c:
- * Copyright (C) 2001-2003 Frank Mori Hess <fmhess@users.sourceforge.net>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#include <linux/module.h>
-#include <linux/slab.h>
-
-#include "../comedidev.h"
-
-#include "comedi_isadma.h"
-#include "ni_labpc.h"
-#include "ni_labpc_regs.h"
-#include "ni_labpc_isadma.h"
-
-/* size in bytes of dma buffer */
-#define LABPC_ISADMA_BUFFER_SIZE 0xff00
-
-/* utility function that suggests a dma transfer size in bytes */
-static unsigned int labpc_suggest_transfer_size(struct comedi_device *dev,
- struct comedi_subdevice *s,
- unsigned int maxbytes)
-{
- struct comedi_cmd *cmd = &s->async->cmd;
- unsigned int sample_size = comedi_bytes_per_sample(s);
- unsigned int size;
- unsigned int freq;
-
- if (cmd->convert_src == TRIG_TIMER)
- freq = 1000000000 / cmd->convert_arg;
- else
- /* return some default value */
- freq = 0xffffffff;
-
- /* make buffer fill in no more than 1/3 second */
- size = (freq / 3) * sample_size;
-
- /* set a minimum and maximum size allowed */
- if (size > maxbytes)
- size = maxbytes;
- else if (size < sample_size)
- size = sample_size;
-
- return size;
-}
-
-void labpc_setup_dma(struct comedi_device *dev, struct comedi_subdevice *s)
-{
- struct labpc_private *devpriv = dev->private;
- struct comedi_isadma_desc *desc = &devpriv->dma->desc[0];
- struct comedi_cmd *cmd = &s->async->cmd;
- unsigned int sample_size = comedi_bytes_per_sample(s);
-
- /* set appropriate size of transfer */
- desc->size = labpc_suggest_transfer_size(dev, s, desc->maxsize);
- if (cmd->stop_src == TRIG_COUNT &&
- devpriv->count * sample_size < desc->size)
- desc->size = devpriv->count * sample_size;
-
- comedi_isadma_program(desc);
-
- /* set CMD3 bits for caller to enable DMA and interrupt */
- devpriv->cmd3 |= (CMD3_DMAEN | CMD3_DMATCINTEN);
-}
-EXPORT_SYMBOL_GPL(labpc_setup_dma);
-
-void labpc_drain_dma(struct comedi_device *dev)
-{
- struct labpc_private *devpriv = dev->private;
- struct comedi_isadma_desc *desc = &devpriv->dma->desc[0];
- struct comedi_subdevice *s = dev->read_subdev;
- struct comedi_async *async = s->async;
- struct comedi_cmd *cmd = &async->cmd;
- unsigned int max_samples = comedi_bytes_to_samples(s, desc->size);
- unsigned int residue;
- unsigned int nsamples;
- unsigned int leftover;
-
- /*
- * residue is the number of bytes left to be done on the dma
- * transfer. It should always be zero at this point unless
- * the stop_src is set to external triggering.
- */
- residue = comedi_isadma_disable(desc->chan);
-
- /*
- * Figure out how many samples to read for this transfer and
- * how many will be stored for next time.
- */
- nsamples = max_samples - comedi_bytes_to_samples(s, residue);
- if (cmd->stop_src == TRIG_COUNT) {
- if (devpriv->count <= nsamples) {
- nsamples = devpriv->count;
- leftover = 0;
- } else {
- leftover = devpriv->count - nsamples;
- if (leftover > max_samples)
- leftover = max_samples;
- }
- devpriv->count -= nsamples;
- } else {
- leftover = max_samples;
- }
- desc->size = comedi_samples_to_bytes(s, leftover);
-
- comedi_buf_write_samples(s, desc->virt_addr, nsamples);
-}
-EXPORT_SYMBOL_GPL(labpc_drain_dma);
-
-static void handle_isa_dma(struct comedi_device *dev)
-{
- struct labpc_private *devpriv = dev->private;
- struct comedi_isadma_desc *desc = &devpriv->dma->desc[0];
-
- labpc_drain_dma(dev);
-
- if (desc->size)
- comedi_isadma_program(desc);
-
- /* clear dma tc interrupt */
- devpriv->write_byte(dev, 0x1, DMATC_CLEAR_REG);
-}
-
-void labpc_handle_dma_status(struct comedi_device *dev)
-{
- const struct labpc_boardinfo *board = dev->board_ptr;
- struct labpc_private *devpriv = dev->private;
-
- /*
- * if a dma terminal count of external stop trigger
- * has occurred
- */
- if (devpriv->stat1 & STAT1_GATA0 ||
- (board->is_labpc1200 && devpriv->stat2 & STAT2_OUTA1))
- handle_isa_dma(dev);
-}
-EXPORT_SYMBOL_GPL(labpc_handle_dma_status);
-
-void labpc_init_dma_chan(struct comedi_device *dev, unsigned int dma_chan)
-{
- struct labpc_private *devpriv = dev->private;
-
- /* only DMA channels 3 and 1 are valid */
- if (dma_chan != 1 && dma_chan != 3)
- return;
-
- /* DMA uses 1 buffer */
- devpriv->dma = comedi_isadma_alloc(dev, 1, dma_chan, dma_chan,
- LABPC_ISADMA_BUFFER_SIZE,
- COMEDI_ISADMA_READ);
-}
-EXPORT_SYMBOL_GPL(labpc_init_dma_chan);
-
-void labpc_free_dma_chan(struct comedi_device *dev)
-{
- struct labpc_private *devpriv = dev->private;
-
- if (devpriv)
- comedi_isadma_free(devpriv->dma);
-}
-EXPORT_SYMBOL_GPL(labpc_free_dma_chan);
-
-static int __init ni_labpc_isadma_init_module(void)
-{
- return 0;
-}
-module_init(ni_labpc_isadma_init_module);
-
-static void __exit ni_labpc_isadma_cleanup_module(void)
-{
-}
-module_exit(ni_labpc_isadma_cleanup_module);
-
-MODULE_AUTHOR("Comedi http://www.comedi.org");
-MODULE_DESCRIPTION("Comedi NI Lab-PC ISA DMA support");
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/ni_labpc_isadma.h b/drivers/staging/comedi/drivers/ni_labpc_isadma.h
deleted file mode 100644
index b8a1b0ee6290..000000000000
--- a/drivers/staging/comedi/drivers/ni_labpc_isadma.h
+++ /dev/null
@@ -1,42 +0,0 @@
-/*
- * ni_labpc ISA DMA support.
-*/
-
-#ifndef _NI_LABPC_ISADMA_H
-#define _NI_LABPC_ISADMA_H
-
-#if IS_ENABLED(CONFIG_COMEDI_NI_LABPC_ISADMA)
-
-void labpc_init_dma_chan(struct comedi_device *dev, unsigned int dma_chan);
-void labpc_free_dma_chan(struct comedi_device *dev);
-void labpc_setup_dma(struct comedi_device *dev, struct comedi_subdevice *s);
-void labpc_drain_dma(struct comedi_device *dev);
-void labpc_handle_dma_status(struct comedi_device *dev);
-
-#else
-
-static inline void labpc_init_dma_chan(struct comedi_device *dev,
- unsigned int dma_chan)
-{
-}
-
-static inline void labpc_free_dma_chan(struct comedi_device *dev)
-{
-}
-
-static inline void labpc_setup_dma(struct comedi_device *dev,
- struct comedi_subdevice *s)
-{
-}
-
-static inline void labpc_drain_dma(struct comedi_device *dev)
-{
-}
-
-static inline void labpc_handle_dma_status(struct comedi_device *dev)
-{
-}
-
-#endif
-
-#endif /* _NI_LABPC_ISADMA_H */
diff --git a/drivers/staging/comedi/drivers/ni_labpc_pci.c b/drivers/staging/comedi/drivers/ni_labpc_pci.c
deleted file mode 100644
index 77d403801db5..000000000000
--- a/drivers/staging/comedi/drivers/ni_labpc_pci.c
+++ /dev/null
@@ -1,141 +0,0 @@
-/*
- * comedi/drivers/ni_labpc_pci.c
- * Driver for National Instruments Lab-PC PCI-1200
- * Copyright (C) 2001, 2002, 2003 Frank Mori Hess <fmhess@users.sourceforge.net>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-/*
- * Driver: ni_labpc_pci
- * Description: National Instruments Lab-PC PCI-1200
- * Devices: [National Instruments] PCI-1200 (ni_pci-1200)
- * Author: Frank Mori Hess <fmhess@users.sourceforge.net>
- * Status: works
- *
- * This is the PCI-specific support split off from the ni_labpc driver.
- *
- * Configuration Options: not applicable, uses PCI auto config
- *
- * NI manuals:
- * 340914a (pci-1200)
- */
-
-#include <linux/module.h>
-#include <linux/interrupt.h>
-
-#include "../comedi_pci.h"
-
-#include "ni_labpc.h"
-
-enum labpc_pci_boardid {
- BOARD_NI_PCI1200,
-};
-
-static const struct labpc_boardinfo labpc_pci_boards[] = {
- [BOARD_NI_PCI1200] = {
- .name = "ni_pci-1200",
- .ai_speed = 10000,
- .ai_scan_up = 1,
- .has_ao = 1,
- .is_labpc1200 = 1,
- },
-};
-
-/* ripped from mite.h and mite_setup2() to avoid mite dependency */
-#define MITE_IODWBSR 0xc0 /* IO Device Window Base Size Register */
-#define WENAB (1 << 7) /* window enable */
-
-static int labpc_pci_mite_init(struct pci_dev *pcidev)
-{
- void __iomem *mite_base;
- u32 main_phys_addr;
-
- /* ioremap the MITE registers (BAR 0) temporarily */
- mite_base = pci_ioremap_bar(pcidev, 0);
- if (!mite_base)
- return -ENOMEM;
-
- /* set data window to main registers (BAR 1) */
- main_phys_addr = pci_resource_start(pcidev, 1);
- writel(main_phys_addr | WENAB, mite_base + MITE_IODWBSR);
-
- /* finished with MITE registers */
- iounmap(mite_base);
- return 0;
-}
-
-static int labpc_pci_auto_attach(struct comedi_device *dev,
- unsigned long context)
-{
- struct pci_dev *pcidev = comedi_to_pci_dev(dev);
- const struct labpc_boardinfo *board = NULL;
- int ret;
-
- if (context < ARRAY_SIZE(labpc_pci_boards))
- board = &labpc_pci_boards[context];
- if (!board)
- return -ENODEV;
- dev->board_ptr = board;
- dev->board_name = board->name;
-
- ret = comedi_pci_enable(dev);
- if (ret)
- return ret;
-
- ret = labpc_pci_mite_init(pcidev);
- if (ret)
- return ret;
-
- dev->mmio = pci_ioremap_bar(pcidev, 1);
- if (!dev->mmio)
- return -ENOMEM;
-
- return labpc_common_attach(dev, pcidev->irq, IRQF_SHARED);
-}
-
-static void labpc_pci_detach(struct comedi_device *dev)
-{
- labpc_common_detach(dev);
- comedi_pci_detach(dev);
-}
-
-static struct comedi_driver labpc_pci_comedi_driver = {
- .driver_name = "labpc_pci",
- .module = THIS_MODULE,
- .auto_attach = labpc_pci_auto_attach,
- .detach = labpc_pci_detach,
-};
-
-static const struct pci_device_id labpc_pci_table[] = {
- { PCI_VDEVICE(NI, 0x161), BOARD_NI_PCI1200 },
- { 0 }
-};
-MODULE_DEVICE_TABLE(pci, labpc_pci_table);
-
-static int labpc_pci_probe(struct pci_dev *dev,
- const struct pci_device_id *id)
-{
- return comedi_pci_auto_config(dev, &labpc_pci_comedi_driver,
- id->driver_data);
-}
-
-static struct pci_driver labpc_pci_driver = {
- .name = "labpc_pci",
- .id_table = labpc_pci_table,
- .probe = labpc_pci_probe,
- .remove = comedi_pci_auto_unconfig,
-};
-module_comedi_pci_driver(labpc_pci_comedi_driver, labpc_pci_driver);
-
-MODULE_DESCRIPTION("Comedi: National Instruments Lab-PC PCI-1200 driver");
-MODULE_AUTHOR("Comedi http://www.comedi.org");
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/ni_labpc_regs.h b/drivers/staging/comedi/drivers/ni_labpc_regs.h
deleted file mode 100644
index 2a274a3e4e73..000000000000
--- a/drivers/staging/comedi/drivers/ni_labpc_regs.h
+++ /dev/null
@@ -1,75 +0,0 @@
-/*
- * ni_labpc register definitions.
-*/
-
-#ifndef _NI_LABPC_REGS_H
-#define _NI_LABPC_REGS_H
-
-/*
- * Register map (all registers are 8-bit)
- */
-#define STAT1_REG 0x00 /* R: Status 1 reg */
-#define STAT1_DAVAIL (1 << 0)
-#define STAT1_OVERRUN (1 << 1)
-#define STAT1_OVERFLOW (1 << 2)
-#define STAT1_CNTINT (1 << 3)
-#define STAT1_GATA0 (1 << 5)
-#define STAT1_EXTGATA0 (1 << 6)
-#define CMD1_REG 0x00 /* W: Command 1 reg */
-#define CMD1_MA(x) (((x) & 0x7) << 0)
-#define CMD1_TWOSCMP (1 << 3)
-#define CMD1_GAIN(x) (((x) & 0x7) << 4)
-#define CMD1_SCANEN (1 << 7)
-#define CMD2_REG 0x01 /* W: Command 2 reg */
-#define CMD2_PRETRIG (1 << 0)
-#define CMD2_HWTRIG (1 << 1)
-#define CMD2_SWTRIG (1 << 2)
-#define CMD2_TBSEL (1 << 3)
-#define CMD2_2SDAC0 (1 << 4)
-#define CMD2_2SDAC1 (1 << 5)
-#define CMD2_LDAC(x) (1 << (6 + (x)))
-#define CMD3_REG 0x02 /* W: Command 3 reg */
-#define CMD3_DMAEN (1 << 0)
-#define CMD3_DIOINTEN (1 << 1)
-#define CMD3_DMATCINTEN (1 << 2)
-#define CMD3_CNTINTEN (1 << 3)
-#define CMD3_ERRINTEN (1 << 4)
-#define CMD3_FIFOINTEN (1 << 5)
-#define ADC_START_CONVERT_REG 0x03 /* W: Start Convert reg */
-#define DAC_LSB_REG(x) (0x04 + 2 * (x)) /* W: DAC0/1 LSB reg */
-#define DAC_MSB_REG(x) (0x05 + 2 * (x)) /* W: DAC0/1 MSB reg */
-#define ADC_FIFO_CLEAR_REG 0x08 /* W: A/D FIFO Clear reg */
-#define ADC_FIFO_REG 0x0a /* R: A/D FIFO reg */
-#define DMATC_CLEAR_REG 0x0a /* W: DMA Interrupt Clear reg */
-#define TIMER_CLEAR_REG 0x0c /* W: Timer Interrupt Clear reg */
-#define CMD6_REG 0x0e /* W: Command 6 reg */
-#define CMD6_NRSE (1 << 0)
-#define CMD6_ADCUNI (1 << 1)
-#define CMD6_DACUNI(x) (1 << (2 + (x)))
-#define CMD6_HFINTEN (1 << 5)
-#define CMD6_DQINTEN (1 << 6)
-#define CMD6_SCANUP (1 << 7)
-#define CMD4_REG 0x0f /* W: Command 3 reg */
-#define CMD4_INTSCAN (1 << 0)
-#define CMD4_EOIRCV (1 << 1)
-#define CMD4_ECLKDRV (1 << 2)
-#define CMD4_SEDIFF (1 << 3)
-#define CMD4_ECLKRCV (1 << 4)
-#define DIO_BASE_REG 0x10 /* R/W: 8255 DIO base reg */
-#define COUNTER_A_BASE_REG 0x14 /* R/W: 8253 Counter A base reg */
-#define COUNTER_B_BASE_REG 0x18 /* R/W: 8253 Counter B base reg */
-#define CMD5_REG 0x1c /* W: Command 5 reg */
-#define CMD5_WRTPRT (1 << 2)
-#define CMD5_DITHEREN (1 << 3)
-#define CMD5_CALDACLD (1 << 4)
-#define CMD5_SCLK (1 << 5)
-#define CMD5_SDATA (1 << 6)
-#define CMD5_EEPROMCS (1 << 7)
-#define STAT2_REG 0x1d /* R: Status 2 reg */
-#define STAT2_PROMOUT (1 << 0)
-#define STAT2_OUTA1 (1 << 1)
-#define STAT2_FIFONHF (1 << 2)
-#define INTERVAL_COUNT_REG 0x1e /* W: Interval Counter Data reg */
-#define INTERVAL_STROBE_REG 0x1f /* W: Interval Counter Strobe reg */
-
-#endif /* _NI_LABPC_REGS_H */
diff --git a/drivers/staging/comedi/drivers/ni_mio_common.c b/drivers/staging/comedi/drivers/ni_mio_common.c
deleted file mode 100644
index 6cc304a4c59b..000000000000
--- a/drivers/staging/comedi/drivers/ni_mio_common.c
+++ /dev/null
@@ -1,5415 +0,0 @@
-/*
- comedi/drivers/ni_mio_common.c
- Hardware driver for DAQ-STC based boards
-
- COMEDI - Linux Control and Measurement Device Interface
- Copyright (C) 1997-2001 David A. Schleef <ds@schleef.org>
- Copyright (C) 2002-2006 Frank Mori Hess <fmhess@users.sourceforge.net>
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-*/
-
-/*
- This file is meant to be included by another file, e.g.,
- ni_atmio.c or ni_pcimio.c.
-
- Interrupt support originally added by Truxton Fulton
- <trux@truxton.com>
-
- References (from ftp://ftp.natinst.com/support/manuals):
-
- 340747b.pdf AT-MIO E series Register Level Programmer Manual
- 341079b.pdf PCI E Series RLPM
- 340934b.pdf DAQ-STC reference manual
- 67xx and 611x registers (from ftp://ftp.ni.com/support/daq/mhddk/documentation/)
- release_ni611x.pdf
- release_ni67xx.pdf
- Other possibly relevant info:
-
- 320517c.pdf User manual (obsolete)
- 320517f.pdf User manual (new)
- 320889a.pdf delete
- 320906c.pdf maximum signal ratings
- 321066a.pdf about 16x
- 321791a.pdf discontinuation of at-mio-16e-10 rev. c
- 321808a.pdf about at-mio-16e-10 rev P
- 321837a.pdf discontinuation of at-mio-16de-10 rev d
- 321838a.pdf about at-mio-16de-10 rev N
-
- ISSUES:
-
- - the interrupt routine needs to be cleaned up
-
- 2006-02-07: S-Series PCI-6143: Support has been added but is not
- fully tested as yet. Terry Barnaby, BEAM Ltd.
-*/
-
-#include <linux/interrupt.h>
-#include <linux/sched.h>
-#include <linux/delay.h>
-#include "8255.h"
-#include "mite.h"
-
-/* A timeout count */
-#define NI_TIMEOUT 1000
-
-/* Note: this table must match the ai_gain_* definitions */
-static const short ni_gainlkup[][16] = {
- [ai_gain_16] = {0, 1, 2, 3, 4, 5, 6, 7,
- 0x100, 0x101, 0x102, 0x103, 0x104, 0x105, 0x106, 0x107},
- [ai_gain_8] = {1, 2, 4, 7, 0x101, 0x102, 0x104, 0x107},
- [ai_gain_14] = {1, 2, 3, 4, 5, 6, 7,
- 0x101, 0x102, 0x103, 0x104, 0x105, 0x106, 0x107},
- [ai_gain_4] = {0, 1, 4, 7},
- [ai_gain_611x] = {0x00a, 0x00b, 0x001, 0x002,
- 0x003, 0x004, 0x005, 0x006},
- [ai_gain_622x] = {0, 1, 4, 5},
- [ai_gain_628x] = {1, 2, 3, 4, 5, 6, 7},
- [ai_gain_6143] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
-};
-
-static const struct comedi_lrange range_ni_E_ai = {
- 16, {
- BIP_RANGE(10),
- BIP_RANGE(5),
- BIP_RANGE(2.5),
- BIP_RANGE(1),
- BIP_RANGE(0.5),
- BIP_RANGE(0.25),
- BIP_RANGE(0.1),
- BIP_RANGE(0.05),
- UNI_RANGE(20),
- UNI_RANGE(10),
- UNI_RANGE(5),
- UNI_RANGE(2),
- UNI_RANGE(1),
- UNI_RANGE(0.5),
- UNI_RANGE(0.2),
- UNI_RANGE(0.1)
- }
-};
-
-static const struct comedi_lrange range_ni_E_ai_limited = {
- 8, {
- BIP_RANGE(10),
- BIP_RANGE(5),
- BIP_RANGE(1),
- BIP_RANGE(0.1),
- UNI_RANGE(10),
- UNI_RANGE(5),
- UNI_RANGE(1),
- UNI_RANGE(0.1)
- }
-};
-
-static const struct comedi_lrange range_ni_E_ai_limited14 = {
- 14, {
- BIP_RANGE(10),
- BIP_RANGE(5),
- BIP_RANGE(2),
- BIP_RANGE(1),
- BIP_RANGE(0.5),
- BIP_RANGE(0.2),
- BIP_RANGE(0.1),
- UNI_RANGE(10),
- UNI_RANGE(5),
- UNI_RANGE(2),
- UNI_RANGE(1),
- UNI_RANGE(0.5),
- UNI_RANGE(0.2),
- UNI_RANGE(0.1)
- }
-};
-
-static const struct comedi_lrange range_ni_E_ai_bipolar4 = {
- 4, {
- BIP_RANGE(10),
- BIP_RANGE(5),
- BIP_RANGE(0.5),
- BIP_RANGE(0.05)
- }
-};
-
-static const struct comedi_lrange range_ni_E_ai_611x = {
- 8, {
- BIP_RANGE(50),
- BIP_RANGE(20),
- BIP_RANGE(10),
- BIP_RANGE(5),
- BIP_RANGE(2),
- BIP_RANGE(1),
- BIP_RANGE(0.5),
- BIP_RANGE(0.2)
- }
-};
-
-static const struct comedi_lrange range_ni_M_ai_622x = {
- 4, {
- BIP_RANGE(10),
- BIP_RANGE(5),
- BIP_RANGE(1),
- BIP_RANGE(0.2)
- }
-};
-
-static const struct comedi_lrange range_ni_M_ai_628x = {
- 7, {
- BIP_RANGE(10),
- BIP_RANGE(5),
- BIP_RANGE(2),
- BIP_RANGE(1),
- BIP_RANGE(0.5),
- BIP_RANGE(0.2),
- BIP_RANGE(0.1)
- }
-};
-
-static const struct comedi_lrange range_ni_E_ao_ext = {
- 4, {
- BIP_RANGE(10),
- UNI_RANGE(10),
- RANGE_ext(-1, 1),
- RANGE_ext(0, 1)
- }
-};
-
-static const struct comedi_lrange *const ni_range_lkup[] = {
- [ai_gain_16] = &range_ni_E_ai,
- [ai_gain_8] = &range_ni_E_ai_limited,
- [ai_gain_14] = &range_ni_E_ai_limited14,
- [ai_gain_4] = &range_ni_E_ai_bipolar4,
- [ai_gain_611x] = &range_ni_E_ai_611x,
- [ai_gain_622x] = &range_ni_M_ai_622x,
- [ai_gain_628x] = &range_ni_M_ai_628x,
- [ai_gain_6143] = &range_bipolar5
-};
-
-enum aimodes {
- AIMODE_NONE = 0,
- AIMODE_HALF_FULL = 1,
- AIMODE_SCAN = 2,
- AIMODE_SAMPLE = 3,
-};
-
-enum ni_common_subdevices {
- NI_AI_SUBDEV,
- NI_AO_SUBDEV,
- NI_DIO_SUBDEV,
- NI_8255_DIO_SUBDEV,
- NI_UNUSED_SUBDEV,
- NI_CALIBRATION_SUBDEV,
- NI_EEPROM_SUBDEV,
- NI_PFI_DIO_SUBDEV,
- NI_CS5529_CALIBRATION_SUBDEV,
- NI_SERIAL_SUBDEV,
- NI_RTSI_SUBDEV,
- NI_GPCT0_SUBDEV,
- NI_GPCT1_SUBDEV,
- NI_FREQ_OUT_SUBDEV,
- NI_NUM_SUBDEVICES
-};
-static inline unsigned NI_GPCT_SUBDEV(unsigned counter_index)
-{
- switch (counter_index) {
- case 0:
- return NI_GPCT0_SUBDEV;
- case 1:
- return NI_GPCT1_SUBDEV;
- default:
- break;
- }
- BUG();
- return NI_GPCT0_SUBDEV;
-}
-
-enum timebase_nanoseconds {
- TIMEBASE_1_NS = 50,
- TIMEBASE_2_NS = 10000
-};
-
-#define SERIAL_DISABLED 0
-#define SERIAL_600NS 600
-#define SERIAL_1_2US 1200
-#define SERIAL_10US 10000
-
-static const int num_adc_stages_611x = 3;
-
-static void ni_writel(struct comedi_device *dev, uint32_t data, int reg)
-{
- if (dev->mmio)
- writel(data, dev->mmio + reg);
-
- outl(data, dev->iobase + reg);
-}
-
-static void ni_writew(struct comedi_device *dev, uint16_t data, int reg)
-{
- if (dev->mmio)
- writew(data, dev->mmio + reg);
-
- outw(data, dev->iobase + reg);
-}
-
-static void ni_writeb(struct comedi_device *dev, uint8_t data, int reg)
-{
- if (dev->mmio)
- writeb(data, dev->mmio + reg);
-
- outb(data, dev->iobase + reg);
-}
-
-static uint32_t ni_readl(struct comedi_device *dev, int reg)
-{
- if (dev->mmio)
- return readl(dev->mmio + reg);
-
- return inl(dev->iobase + reg);
-}
-
-static uint16_t ni_readw(struct comedi_device *dev, int reg)
-{
- if (dev->mmio)
- return readw(dev->mmio + reg);
-
- return inw(dev->iobase + reg);
-}
-
-static uint8_t ni_readb(struct comedi_device *dev, int reg)
-{
- if (dev->mmio)
- return readb(dev->mmio + reg);
-
- return inb(dev->iobase + reg);
-}
-
-/*
- * We automatically take advantage of STC registers that can be
- * read/written directly in the I/O space of the board.
- *
- * The AT-MIO and DAQCard devices map the low 8 STC registers to
- * iobase+reg*2.
- *
- * Most PCIMIO devices also map the low 8 STC registers but the
- * 611x devices map the read registers to iobase+(addr-1)*2.
- * For now non-windowed STC access is disabled if a PCIMIO device
- * is detected (devpriv->mite has been initialized).
- *
- * The M series devices do not used windowed registers for the
- * STC registers. The functions below handle the mapping of the
- * windowed STC registers to the m series register offsets.
- */
-
-struct mio_regmap {
- unsigned int mio_reg;
- int size;
-};
-
-static const struct mio_regmap m_series_stc_write_regmap[] = {
- [NISTC_INTA_ACK_REG] = { 0x104, 2 },
- [NISTC_INTB_ACK_REG] = { 0x106, 2 },
- [NISTC_AI_CMD2_REG] = { 0x108, 2 },
- [NISTC_AO_CMD2_REG] = { 0x10a, 2 },
- [NISTC_G0_CMD_REG] = { 0x10c, 2 },
- [NISTC_G1_CMD_REG] = { 0x10e, 2 },
- [NISTC_AI_CMD1_REG] = { 0x110, 2 },
- [NISTC_AO_CMD1_REG] = { 0x112, 2 },
- /*
- * NISTC_DIO_OUT_REG maps to:
- * { NI_M_DIO_REG, 4 } and { NI_M_SCXI_SER_DO_REG, 1 }
- */
- [NISTC_DIO_OUT_REG] = { 0, 0 }, /* DOES NOT MAP CLEANLY */
- [NISTC_DIO_CTRL_REG] = { 0, 0 }, /* DOES NOT MAP CLEANLY */
- [NISTC_AI_MODE1_REG] = { 0x118, 2 },
- [NISTC_AI_MODE2_REG] = { 0x11a, 2 },
- [NISTC_AI_SI_LOADA_REG] = { 0x11c, 4 },
- [NISTC_AI_SI_LOADB_REG] = { 0x120, 4 },
- [NISTC_AI_SC_LOADA_REG] = { 0x124, 4 },
- [NISTC_AI_SC_LOADB_REG] = { 0x128, 4 },
- [NISTC_AI_SI2_LOADA_REG] = { 0x12c, 4 },
- [NISTC_AI_SI2_LOADB_REG] = { 0x130, 4 },
- [NISTC_G0_MODE_REG] = { 0x134, 2 },
- [NISTC_G1_MODE_REG] = { 0x136, 2 },
- [NISTC_G0_LOADA_REG] = { 0x138, 4 },
- [NISTC_G0_LOADB_REG] = { 0x13c, 4 },
- [NISTC_G1_LOADA_REG] = { 0x140, 4 },
- [NISTC_G1_LOADB_REG] = { 0x144, 4 },
- [NISTC_G0_INPUT_SEL_REG] = { 0x148, 2 },
- [NISTC_G1_INPUT_SEL_REG] = { 0x14a, 2 },
- [NISTC_AO_MODE1_REG] = { 0x14c, 2 },
- [NISTC_AO_MODE2_REG] = { 0x14e, 2 },
- [NISTC_AO_UI_LOADA_REG] = { 0x150, 4 },
- [NISTC_AO_UI_LOADB_REG] = { 0x154, 4 },
- [NISTC_AO_BC_LOADA_REG] = { 0x158, 4 },
- [NISTC_AO_BC_LOADB_REG] = { 0x15c, 4 },
- [NISTC_AO_UC_LOADA_REG] = { 0x160, 4 },
- [NISTC_AO_UC_LOADB_REG] = { 0x164, 4 },
- [NISTC_CLK_FOUT_REG] = { 0x170, 2 },
- [NISTC_IO_BIDIR_PIN_REG] = { 0x172, 2 },
- [NISTC_RTSI_TRIG_DIR_REG] = { 0x174, 2 },
- [NISTC_INT_CTRL_REG] = { 0x176, 2 },
- [NISTC_AI_OUT_CTRL_REG] = { 0x178, 2 },
- [NISTC_ATRIG_ETC_REG] = { 0x17a, 2 },
- [NISTC_AI_START_STOP_REG] = { 0x17c, 2 },
- [NISTC_AI_TRIG_SEL_REG] = { 0x17e, 2 },
- [NISTC_AI_DIV_LOADA_REG] = { 0x180, 4 },
- [NISTC_AO_START_SEL_REG] = { 0x184, 2 },
- [NISTC_AO_TRIG_SEL_REG] = { 0x186, 2 },
- [NISTC_G0_AUTOINC_REG] = { 0x188, 2 },
- [NISTC_G1_AUTOINC_REG] = { 0x18a, 2 },
- [NISTC_AO_MODE3_REG] = { 0x18c, 2 },
- [NISTC_RESET_REG] = { 0x190, 2 },
- [NISTC_INTA_ENA_REG] = { 0x192, 2 },
- [NISTC_INTA2_ENA_REG] = { 0, 0 }, /* E-Series only */
- [NISTC_INTB_ENA_REG] = { 0x196, 2 },
- [NISTC_INTB2_ENA_REG] = { 0, 0 }, /* E-Series only */
- [NISTC_AI_PERSONAL_REG] = { 0x19a, 2 },
- [NISTC_AO_PERSONAL_REG] = { 0x19c, 2 },
- [NISTC_RTSI_TRIGA_OUT_REG] = { 0x19e, 2 },
- [NISTC_RTSI_TRIGB_OUT_REG] = { 0x1a0, 2 },
- [NISTC_RTSI_BOARD_REG] = { 0, 0 }, /* Unknown */
- [NISTC_CFG_MEM_CLR_REG] = { 0x1a4, 2 },
- [NISTC_ADC_FIFO_CLR_REG] = { 0x1a6, 2 },
- [NISTC_DAC_FIFO_CLR_REG] = { 0x1a8, 2 },
- [NISTC_AO_OUT_CTRL_REG] = { 0x1ac, 2 },
- [NISTC_AI_MODE3_REG] = { 0x1ae, 2 },
-};
-
-static void m_series_stc_write(struct comedi_device *dev,
- unsigned int data, unsigned int reg)
-{
- const struct mio_regmap *regmap;
-
- if (reg < ARRAY_SIZE(m_series_stc_write_regmap)) {
- regmap = &m_series_stc_write_regmap[reg];
- } else {
- dev_warn(dev->class_dev, "%s: unhandled register=0x%x\n",
- __func__, reg);
- return;
- }
-
- switch (regmap->size) {
- case 4:
- ni_writel(dev, data, regmap->mio_reg);
- break;
- case 2:
- ni_writew(dev, data, regmap->mio_reg);
- break;
- default:
- dev_warn(dev->class_dev, "%s: unmapped register=0x%x\n",
- __func__, reg);
- break;
- }
-}
-
-static const struct mio_regmap m_series_stc_read_regmap[] = {
- [NISTC_AI_STATUS1_REG] = { 0x104, 2 },
- [NISTC_AO_STATUS1_REG] = { 0x106, 2 },
- [NISTC_G01_STATUS_REG] = { 0x108, 2 },
- [NISTC_AI_STATUS2_REG] = { 0, 0 }, /* Unknown */
- [NISTC_AO_STATUS2_REG] = { 0x10c, 2 },
- [NISTC_DIO_IN_REG] = { 0, 0 }, /* Unknown */
- [NISTC_G0_HW_SAVE_REG] = { 0x110, 4 },
- [NISTC_G1_HW_SAVE_REG] = { 0x114, 4 },
- [NISTC_G0_SAVE_REG] = { 0x118, 4 },
- [NISTC_G1_SAVE_REG] = { 0x11c, 4 },
- [NISTC_AO_UI_SAVE_REG] = { 0x120, 4 },
- [NISTC_AO_BC_SAVE_REG] = { 0x124, 4 },
- [NISTC_AO_UC_SAVE_REG] = { 0x128, 4 },
- [NISTC_STATUS1_REG] = { 0x136, 2 },
- [NISTC_DIO_SERIAL_IN_REG] = { 0x009, 1 },
- [NISTC_STATUS2_REG] = { 0x13a, 2 },
- [NISTC_AI_SI_SAVE_REG] = { 0x180, 4 },
- [NISTC_AI_SC_SAVE_REG] = { 0x184, 4 },
-};
-
-static unsigned int m_series_stc_read(struct comedi_device *dev,
- unsigned int reg)
-{
- const struct mio_regmap *regmap;
-
- if (reg < ARRAY_SIZE(m_series_stc_read_regmap)) {
- regmap = &m_series_stc_read_regmap[reg];
- } else {
- dev_warn(dev->class_dev, "%s: unhandled register=0x%x\n",
- __func__, reg);
- return 0;
- }
-
- switch (regmap->size) {
- case 4:
- return ni_readl(dev, regmap->mio_reg);
- case 2:
- return ni_readw(dev, regmap->mio_reg);
- case 1:
- return ni_readb(dev, regmap->mio_reg);
- default:
- dev_warn(dev->class_dev, "%s: unmapped register=0x%x\n",
- __func__, reg);
- return 0;
- }
-}
-
-static void ni_stc_writew(struct comedi_device *dev, uint16_t data, int reg)
-{
- struct ni_private *devpriv = dev->private;
- unsigned long flags;
-
- if (devpriv->is_m_series) {
- m_series_stc_write(dev, data, reg);
- } else {
- spin_lock_irqsave(&devpriv->window_lock, flags);
- if (!devpriv->mite && reg < 8) {
- ni_writew(dev, data, reg * 2);
- } else {
- ni_writew(dev, reg, NI_E_STC_WINDOW_ADDR_REG);
- ni_writew(dev, data, NI_E_STC_WINDOW_DATA_REG);
- }
- spin_unlock_irqrestore(&devpriv->window_lock, flags);
- }
-}
-
-static void ni_stc_writel(struct comedi_device *dev, uint32_t data, int reg)
-{
- struct ni_private *devpriv = dev->private;
-
- if (devpriv->is_m_series) {
- m_series_stc_write(dev, data, reg);
- } else {
- ni_stc_writew(dev, data >> 16, reg);
- ni_stc_writew(dev, data & 0xffff, reg + 1);
- }
-}
-
-static uint16_t ni_stc_readw(struct comedi_device *dev, int reg)
-{
- struct ni_private *devpriv = dev->private;
- unsigned long flags;
- uint16_t val;
-
- if (devpriv->is_m_series) {
- val = m_series_stc_read(dev, reg);
- } else {
- spin_lock_irqsave(&devpriv->window_lock, flags);
- if (!devpriv->mite && reg < 8) {
- val = ni_readw(dev, reg * 2);
- } else {
- ni_writew(dev, reg, NI_E_STC_WINDOW_ADDR_REG);
- val = ni_readw(dev, NI_E_STC_WINDOW_DATA_REG);
- }
- spin_unlock_irqrestore(&devpriv->window_lock, flags);
- }
- return val;
-}
-
-static uint32_t ni_stc_readl(struct comedi_device *dev, int reg)
-{
- struct ni_private *devpriv = dev->private;
- uint32_t val;
-
- if (devpriv->is_m_series) {
- val = m_series_stc_read(dev, reg);
- } else {
- val = ni_stc_readw(dev, reg) << 16;
- val |= ni_stc_readw(dev, reg + 1);
- }
- return val;
-}
-
-static inline void ni_set_bitfield(struct comedi_device *dev, int reg,
- unsigned bit_mask, unsigned bit_values)
-{
- struct ni_private *devpriv = dev->private;
- unsigned long flags;
-
- spin_lock_irqsave(&devpriv->soft_reg_copy_lock, flags);
- switch (reg) {
- case NISTC_INTA_ENA_REG:
- devpriv->int_a_enable_reg &= ~bit_mask;
- devpriv->int_a_enable_reg |= bit_values & bit_mask;
- ni_stc_writew(dev, devpriv->int_a_enable_reg, reg);
- break;
- case NISTC_INTB_ENA_REG:
- devpriv->int_b_enable_reg &= ~bit_mask;
- devpriv->int_b_enable_reg |= bit_values & bit_mask;
- ni_stc_writew(dev, devpriv->int_b_enable_reg, reg);
- break;
- case NISTC_IO_BIDIR_PIN_REG:
- devpriv->io_bidirection_pin_reg &= ~bit_mask;
- devpriv->io_bidirection_pin_reg |= bit_values & bit_mask;
- ni_stc_writew(dev, devpriv->io_bidirection_pin_reg, reg);
- break;
- case NI_E_DMA_AI_AO_SEL_REG:
- devpriv->ai_ao_select_reg &= ~bit_mask;
- devpriv->ai_ao_select_reg |= bit_values & bit_mask;
- ni_writeb(dev, devpriv->ai_ao_select_reg, reg);
- break;
- case NI_E_DMA_G0_G1_SEL_REG:
- devpriv->g0_g1_select_reg &= ~bit_mask;
- devpriv->g0_g1_select_reg |= bit_values & bit_mask;
- ni_writeb(dev, devpriv->g0_g1_select_reg, reg);
- break;
- default:
- dev_err(dev->class_dev, "called with invalid register %d\n",
- reg);
- break;
- }
- mmiowb();
- spin_unlock_irqrestore(&devpriv->soft_reg_copy_lock, flags);
-}
-
-#ifdef PCIDMA
-/* DMA channel setup */
-static inline unsigned ni_stc_dma_channel_select_bitfield(unsigned channel)
-{
- if (channel < 4)
- return 1 << channel;
- if (channel == 4)
- return 0x3;
- if (channel == 5)
- return 0x5;
- BUG();
- return 0;
-}
-
-/* negative channel means no channel */
-static inline void ni_set_ai_dma_channel(struct comedi_device *dev, int channel)
-{
- unsigned bits = 0;
-
- if (channel >= 0)
- bits = ni_stc_dma_channel_select_bitfield(channel);
-
- ni_set_bitfield(dev, NI_E_DMA_AI_AO_SEL_REG,
- NI_E_DMA_AI_SEL_MASK, NI_E_DMA_AI_SEL(bits));
-}
-
-/* negative channel means no channel */
-static inline void ni_set_ao_dma_channel(struct comedi_device *dev, int channel)
-{
- unsigned bits = 0;
-
- if (channel >= 0)
- bits = ni_stc_dma_channel_select_bitfield(channel);
-
- ni_set_bitfield(dev, NI_E_DMA_AI_AO_SEL_REG,
- NI_E_DMA_AO_SEL_MASK, NI_E_DMA_AO_SEL(bits));
-}
-
-/* negative channel means no channel */
-static inline void ni_set_gpct_dma_channel(struct comedi_device *dev,
- unsigned gpct_index,
- int channel)
-{
- unsigned bits = 0;
-
- if (channel >= 0)
- bits = ni_stc_dma_channel_select_bitfield(channel);
-
- ni_set_bitfield(dev, NI_E_DMA_G0_G1_SEL_REG,
- NI_E_DMA_G0_G1_SEL_MASK(gpct_index),
- NI_E_DMA_G0_G1_SEL(gpct_index, bits));
-}
-
-/* negative mite_channel means no channel */
-static inline void ni_set_cdo_dma_channel(struct comedi_device *dev,
- int mite_channel)
-{
- struct ni_private *devpriv = dev->private;
- unsigned long flags;
- unsigned bits;
-
- spin_lock_irqsave(&devpriv->soft_reg_copy_lock, flags);
- devpriv->cdio_dma_select_reg &= ~NI_M_CDIO_DMA_SEL_CDO_MASK;
- if (mite_channel >= 0) {
- /*
- * XXX just guessing ni_stc_dma_channel_select_bitfield()
- * returns the right bits, under the assumption the cdio dma
- * selection works just like ai/ao/gpct.
- * Definitely works for dma channels 0 and 1.
- */
- bits = ni_stc_dma_channel_select_bitfield(mite_channel);
- devpriv->cdio_dma_select_reg |= NI_M_CDIO_DMA_SEL_CDO(bits);
- }
- ni_writeb(dev, devpriv->cdio_dma_select_reg, NI_M_CDIO_DMA_SEL_REG);
- mmiowb();
- spin_unlock_irqrestore(&devpriv->soft_reg_copy_lock, flags);
-}
-
-static int ni_request_ai_mite_channel(struct comedi_device *dev)
-{
- struct ni_private *devpriv = dev->private;
- unsigned long flags;
-
- spin_lock_irqsave(&devpriv->mite_channel_lock, flags);
- BUG_ON(devpriv->ai_mite_chan);
- devpriv->ai_mite_chan =
- mite_request_channel(devpriv->mite, devpriv->ai_mite_ring);
- if (!devpriv->ai_mite_chan) {
- spin_unlock_irqrestore(&devpriv->mite_channel_lock, flags);
- dev_err(dev->class_dev,
- "failed to reserve mite dma channel for analog input\n");
- return -EBUSY;
- }
- devpriv->ai_mite_chan->dir = COMEDI_INPUT;
- ni_set_ai_dma_channel(dev, devpriv->ai_mite_chan->channel);
- spin_unlock_irqrestore(&devpriv->mite_channel_lock, flags);
- return 0;
-}
-
-static int ni_request_ao_mite_channel(struct comedi_device *dev)
-{
- struct ni_private *devpriv = dev->private;
- unsigned long flags;
-
- spin_lock_irqsave(&devpriv->mite_channel_lock, flags);
- BUG_ON(devpriv->ao_mite_chan);
- devpriv->ao_mite_chan =
- mite_request_channel(devpriv->mite, devpriv->ao_mite_ring);
- if (!devpriv->ao_mite_chan) {
- spin_unlock_irqrestore(&devpriv->mite_channel_lock, flags);
- dev_err(dev->class_dev,
- "failed to reserve mite dma channel for analog outut\n");
- return -EBUSY;
- }
- devpriv->ao_mite_chan->dir = COMEDI_OUTPUT;
- ni_set_ao_dma_channel(dev, devpriv->ao_mite_chan->channel);
- spin_unlock_irqrestore(&devpriv->mite_channel_lock, flags);
- return 0;
-}
-
-static int ni_request_gpct_mite_channel(struct comedi_device *dev,
- unsigned gpct_index,
- enum comedi_io_direction direction)
-{
- struct ni_private *devpriv = dev->private;
- unsigned long flags;
- struct mite_channel *mite_chan;
-
- spin_lock_irqsave(&devpriv->mite_channel_lock, flags);
- BUG_ON(devpriv->counter_dev->counters[gpct_index].mite_chan);
- mite_chan =
- mite_request_channel(devpriv->mite,
- devpriv->gpct_mite_ring[gpct_index]);
- if (!mite_chan) {
- spin_unlock_irqrestore(&devpriv->mite_channel_lock, flags);
- dev_err(dev->class_dev,
- "failed to reserve mite dma channel for counter\n");
- return -EBUSY;
- }
- mite_chan->dir = direction;
- ni_tio_set_mite_channel(&devpriv->counter_dev->counters[gpct_index],
- mite_chan);
- ni_set_gpct_dma_channel(dev, gpct_index, mite_chan->channel);
- spin_unlock_irqrestore(&devpriv->mite_channel_lock, flags);
- return 0;
-}
-
-#endif /* PCIDMA */
-
-static int ni_request_cdo_mite_channel(struct comedi_device *dev)
-{
-#ifdef PCIDMA
- struct ni_private *devpriv = dev->private;
- unsigned long flags;
-
- spin_lock_irqsave(&devpriv->mite_channel_lock, flags);
- BUG_ON(devpriv->cdo_mite_chan);
- devpriv->cdo_mite_chan =
- mite_request_channel(devpriv->mite, devpriv->cdo_mite_ring);
- if (!devpriv->cdo_mite_chan) {
- spin_unlock_irqrestore(&devpriv->mite_channel_lock, flags);
- dev_err(dev->class_dev,
- "failed to reserve mite dma channel for correlated digital output\n");
- return -EBUSY;
- }
- devpriv->cdo_mite_chan->dir = COMEDI_OUTPUT;
- ni_set_cdo_dma_channel(dev, devpriv->cdo_mite_chan->channel);
- spin_unlock_irqrestore(&devpriv->mite_channel_lock, flags);
-#endif /* PCIDMA */
- return 0;
-}
-
-static void ni_release_ai_mite_channel(struct comedi_device *dev)
-{
-#ifdef PCIDMA
- struct ni_private *devpriv = dev->private;
- unsigned long flags;
-
- spin_lock_irqsave(&devpriv->mite_channel_lock, flags);
- if (devpriv->ai_mite_chan) {
- ni_set_ai_dma_channel(dev, -1);
- mite_release_channel(devpriv->ai_mite_chan);
- devpriv->ai_mite_chan = NULL;
- }
- spin_unlock_irqrestore(&devpriv->mite_channel_lock, flags);
-#endif /* PCIDMA */
-}
-
-static void ni_release_ao_mite_channel(struct comedi_device *dev)
-{
-#ifdef PCIDMA
- struct ni_private *devpriv = dev->private;
- unsigned long flags;
-
- spin_lock_irqsave(&devpriv->mite_channel_lock, flags);
- if (devpriv->ao_mite_chan) {
- ni_set_ao_dma_channel(dev, -1);
- mite_release_channel(devpriv->ao_mite_chan);
- devpriv->ao_mite_chan = NULL;
- }
- spin_unlock_irqrestore(&devpriv->mite_channel_lock, flags);
-#endif /* PCIDMA */
-}
-
-#ifdef PCIDMA
-static void ni_release_gpct_mite_channel(struct comedi_device *dev,
- unsigned gpct_index)
-{
- struct ni_private *devpriv = dev->private;
- unsigned long flags;
-
- spin_lock_irqsave(&devpriv->mite_channel_lock, flags);
- if (devpriv->counter_dev->counters[gpct_index].mite_chan) {
- struct mite_channel *mite_chan =
- devpriv->counter_dev->counters[gpct_index].mite_chan;
-
- ni_set_gpct_dma_channel(dev, gpct_index, -1);
- ni_tio_set_mite_channel(&devpriv->
- counter_dev->counters[gpct_index],
- NULL);
- mite_release_channel(mite_chan);
- }
- spin_unlock_irqrestore(&devpriv->mite_channel_lock, flags);
-}
-#endif /* PCIDMA */
-
-static void ni_release_cdo_mite_channel(struct comedi_device *dev)
-{
-#ifdef PCIDMA
- struct ni_private *devpriv = dev->private;
- unsigned long flags;
-
- spin_lock_irqsave(&devpriv->mite_channel_lock, flags);
- if (devpriv->cdo_mite_chan) {
- ni_set_cdo_dma_channel(dev, -1);
- mite_release_channel(devpriv->cdo_mite_chan);
- devpriv->cdo_mite_chan = NULL;
- }
- spin_unlock_irqrestore(&devpriv->mite_channel_lock, flags);
-#endif /* PCIDMA */
-}
-
-#ifdef PCIDMA
-static void ni_e_series_enable_second_irq(struct comedi_device *dev,
- unsigned gpct_index, short enable)
-{
- struct ni_private *devpriv = dev->private;
- uint16_t val = 0;
- int reg;
-
- if (devpriv->is_m_series || gpct_index > 1)
- return;
-
- /*
- * e-series boards use the second irq signals to generate
- * dma requests for their counters
- */
- if (gpct_index == 0) {
- reg = NISTC_INTA2_ENA_REG;
- if (enable)
- val = NISTC_INTA_ENA_G0_GATE;
- } else {
- reg = NISTC_INTB2_ENA_REG;
- if (enable)
- val = NISTC_INTB_ENA_G1_GATE;
- }
- ni_stc_writew(dev, val, reg);
-}
-#endif /* PCIDMA */
-
-static void ni_clear_ai_fifo(struct comedi_device *dev)
-{
- struct ni_private *devpriv = dev->private;
- static const int timeout = 10000;
- int i;
-
- if (devpriv->is_6143) {
- /* Flush the 6143 data FIFO */
- ni_writel(dev, 0x10, NI6143_AI_FIFO_CTRL_REG);
- ni_writel(dev, 0x00, NI6143_AI_FIFO_CTRL_REG);
- /* Wait for complete */
- for (i = 0; i < timeout; i++) {
- if (!(ni_readl(dev, NI6143_AI_FIFO_STATUS_REG) & 0x10))
- break;
- udelay(1);
- }
- if (i == timeout)
- dev_err(dev->class_dev, "FIFO flush timeout\n");
- } else {
- ni_stc_writew(dev, 1, NISTC_ADC_FIFO_CLR_REG);
- if (devpriv->is_625x) {
- ni_writeb(dev, 0, NI_M_STATIC_AI_CTRL_REG(0));
- ni_writeb(dev, 1, NI_M_STATIC_AI_CTRL_REG(0));
-#if 0
- /* the NI example code does 3 convert pulses for 625x boards,
- but that appears to be wrong in practice. */
- ni_stc_writew(dev, NISTC_AI_CMD1_CONVERT_PULSE,
- NISTC_AI_CMD1_REG);
- ni_stc_writew(dev, NISTC_AI_CMD1_CONVERT_PULSE,
- NISTC_AI_CMD1_REG);
- ni_stc_writew(dev, NISTC_AI_CMD1_CONVERT_PULSE,
- NISTC_AI_CMD1_REG);
-#endif
- }
- }
-}
-
-static inline void ni_ao_win_outw(struct comedi_device *dev, uint16_t data,
- int addr)
-{
- struct ni_private *devpriv = dev->private;
- unsigned long flags;
-
- spin_lock_irqsave(&devpriv->window_lock, flags);
- ni_writew(dev, addr, NI611X_AO_WINDOW_ADDR_REG);
- ni_writew(dev, data, NI611X_AO_WINDOW_DATA_REG);
- spin_unlock_irqrestore(&devpriv->window_lock, flags);
-}
-
-static inline void ni_ao_win_outl(struct comedi_device *dev, uint32_t data,
- int addr)
-{
- struct ni_private *devpriv = dev->private;
- unsigned long flags;
-
- spin_lock_irqsave(&devpriv->window_lock, flags);
- ni_writew(dev, addr, NI611X_AO_WINDOW_ADDR_REG);
- ni_writel(dev, data, NI611X_AO_WINDOW_DATA_REG);
- spin_unlock_irqrestore(&devpriv->window_lock, flags);
-}
-
-static inline unsigned short ni_ao_win_inw(struct comedi_device *dev, int addr)
-{
- struct ni_private *devpriv = dev->private;
- unsigned long flags;
- unsigned short data;
-
- spin_lock_irqsave(&devpriv->window_lock, flags);
- ni_writew(dev, addr, NI611X_AO_WINDOW_ADDR_REG);
- data = ni_readw(dev, NI611X_AO_WINDOW_DATA_REG);
- spin_unlock_irqrestore(&devpriv->window_lock, flags);
- return data;
-}
-
-/* ni_set_bits( ) allows different parts of the ni_mio_common driver to
-* share registers (such as Interrupt_A_Register) without interfering with
-* each other.
-*
-* NOTE: the switch/case statements are optimized out for a constant argument
-* so this is actually quite fast--- If you must wrap another function around this
-* make it inline to avoid a large speed penalty.
-*
-* value should only be 1 or 0.
-*/
-static inline void ni_set_bits(struct comedi_device *dev, int reg,
- unsigned bits, unsigned value)
-{
- unsigned bit_values;
-
- if (value)
- bit_values = bits;
- else
- bit_values = 0;
- ni_set_bitfield(dev, reg, bits, bit_values);
-}
-
-#ifdef PCIDMA
-static void ni_sync_ai_dma(struct comedi_device *dev)
-{
- struct ni_private *devpriv = dev->private;
- struct comedi_subdevice *s = dev->read_subdev;
- unsigned long flags;
-
- spin_lock_irqsave(&devpriv->mite_channel_lock, flags);
- if (devpriv->ai_mite_chan)
- mite_sync_input_dma(devpriv->ai_mite_chan, s);
- spin_unlock_irqrestore(&devpriv->mite_channel_lock, flags);
-}
-
-static int ni_ai_drain_dma(struct comedi_device *dev)
-{
- struct ni_private *devpriv = dev->private;
- int i;
- static const int timeout = 10000;
- unsigned long flags;
- int retval = 0;
-
- spin_lock_irqsave(&devpriv->mite_channel_lock, flags);
- if (devpriv->ai_mite_chan) {
- for (i = 0; i < timeout; i++) {
- if ((ni_stc_readw(dev, NISTC_AI_STATUS1_REG) &
- NISTC_AI_STATUS1_FIFO_E)
- && mite_bytes_in_transit(devpriv->ai_mite_chan) ==
- 0)
- break;
- udelay(5);
- }
- if (i == timeout) {
- dev_err(dev->class_dev, "timed out\n");
- dev_err(dev->class_dev,
- "mite_bytes_in_transit=%i, AI_Status1_Register=0x%x\n",
- mite_bytes_in_transit(devpriv->ai_mite_chan),
- ni_stc_readw(dev, NISTC_AI_STATUS1_REG));
- retval = -1;
- }
- }
- spin_unlock_irqrestore(&devpriv->mite_channel_lock, flags);
-
- ni_sync_ai_dma(dev);
-
- return retval;
-}
-
-static void mite_handle_b_linkc(struct mite_struct *mite,
- struct comedi_device *dev)
-{
- struct ni_private *devpriv = dev->private;
- struct comedi_subdevice *s = dev->write_subdev;
- unsigned long flags;
-
- spin_lock_irqsave(&devpriv->mite_channel_lock, flags);
- if (devpriv->ao_mite_chan)
- mite_sync_output_dma(devpriv->ao_mite_chan, s);
- spin_unlock_irqrestore(&devpriv->mite_channel_lock, flags);
-}
-
-static int ni_ao_wait_for_dma_load(struct comedi_device *dev)
-{
- static const int timeout = 10000;
- int i;
-
- for (i = 0; i < timeout; i++) {
- unsigned short b_status;
-
- b_status = ni_stc_readw(dev, NISTC_AO_STATUS1_REG);
- if (b_status & NISTC_AO_STATUS1_FIFO_HF)
- break;
- /* if we poll too often, the pci bus activity seems
- to slow the dma transfer down */
- udelay(10);
- }
- if (i == timeout) {
- dev_err(dev->class_dev, "timed out waiting for dma load\n");
- return -EPIPE;
- }
- return 0;
-}
-#endif /* PCIDMA */
-
-#ifndef PCIDMA
-
-static void ni_ao_fifo_load(struct comedi_device *dev,
- struct comedi_subdevice *s, int n)
-{
- struct ni_private *devpriv = dev->private;
- int i;
- unsigned short d;
- u32 packed_data;
-
- for (i = 0; i < n; i++) {
- comedi_buf_read_samples(s, &d, 1);
-
- if (devpriv->is_6xxx) {
- packed_data = d & 0xffff;
- /* 6711 only has 16 bit wide ao fifo */
- if (!devpriv->is_6711) {
- comedi_buf_read_samples(s, &d, 1);
- i++;
- packed_data |= (d << 16) & 0xffff0000;
- }
- ni_writel(dev, packed_data, NI611X_AO_FIFO_DATA_REG);
- } else {
- ni_writew(dev, d, NI_E_AO_FIFO_DATA_REG);
- }
- }
-}
-
-/*
- * There's a small problem if the FIFO gets really low and we
- * don't have the data to fill it. Basically, if after we fill
- * the FIFO with all the data available, the FIFO is _still_
- * less than half full, we never clear the interrupt. If the
- * IRQ is in edge mode, we never get another interrupt, because
- * this one wasn't cleared. If in level mode, we get flooded
- * with interrupts that we can't fulfill, because nothing ever
- * gets put into the buffer.
- *
- * This kind of situation is recoverable, but it is easier to
- * just pretend we had a FIFO underrun, since there is a good
- * chance it will happen anyway. This is _not_ the case for
- * RT code, as RT code might purposely be running close to the
- * metal. Needs to be fixed eventually.
- */
-static int ni_ao_fifo_half_empty(struct comedi_device *dev,
- struct comedi_subdevice *s)
-{
- const struct ni_board_struct *board = dev->board_ptr;
- unsigned int nbytes;
- unsigned int nsamples;
-
- nbytes = comedi_buf_read_n_available(s);
- if (nbytes == 0) {
- s->async->events |= COMEDI_CB_OVERFLOW;
- return 0;
- }
-
- nsamples = comedi_bytes_to_samples(s, nbytes);
- if (nsamples > board->ao_fifo_depth / 2)
- nsamples = board->ao_fifo_depth / 2;
-
- ni_ao_fifo_load(dev, s, nsamples);
-
- return 1;
-}
-
-static int ni_ao_prep_fifo(struct comedi_device *dev,
- struct comedi_subdevice *s)
-{
- const struct ni_board_struct *board = dev->board_ptr;
- struct ni_private *devpriv = dev->private;
- unsigned int nbytes;
- unsigned int nsamples;
-
- /* reset fifo */
- ni_stc_writew(dev, 1, NISTC_DAC_FIFO_CLR_REG);
- if (devpriv->is_6xxx)
- ni_ao_win_outl(dev, 0x6, NI611X_AO_FIFO_OFFSET_LOAD_REG);
-
- /* load some data */
- nbytes = comedi_buf_read_n_available(s);
- if (nbytes == 0)
- return 0;
-
- nsamples = comedi_bytes_to_samples(s, nbytes);
- if (nsamples > board->ao_fifo_depth)
- nsamples = board->ao_fifo_depth;
-
- ni_ao_fifo_load(dev, s, nsamples);
-
- return nsamples;
-}
-
-static void ni_ai_fifo_read(struct comedi_device *dev,
- struct comedi_subdevice *s, int n)
-{
- struct ni_private *devpriv = dev->private;
- struct comedi_async *async = s->async;
- u32 dl;
- unsigned short data;
- int i;
-
- if (devpriv->is_611x) {
- for (i = 0; i < n / 2; i++) {
- dl = ni_readl(dev, NI611X_AI_FIFO_DATA_REG);
- /* This may get the hi/lo data in the wrong order */
- data = (dl >> 16) & 0xffff;
- comedi_buf_write_samples(s, &data, 1);
- data = dl & 0xffff;
- comedi_buf_write_samples(s, &data, 1);
- }
- /* Check if there's a single sample stuck in the FIFO */
- if (n % 2) {
- dl = ni_readl(dev, NI611X_AI_FIFO_DATA_REG);
- data = dl & 0xffff;
- comedi_buf_write_samples(s, &data, 1);
- }
- } else if (devpriv->is_6143) {
- /* This just reads the FIFO assuming the data is present, no checks on the FIFO status are performed */
- for (i = 0; i < n / 2; i++) {
- dl = ni_readl(dev, NI6143_AI_FIFO_DATA_REG);
-
- data = (dl >> 16) & 0xffff;
- comedi_buf_write_samples(s, &data, 1);
- data = dl & 0xffff;
- comedi_buf_write_samples(s, &data, 1);
- }
- if (n % 2) {
- /* Assume there is a single sample stuck in the FIFO */
- /* Get stranded sample into FIFO */
- ni_writel(dev, 0x01, NI6143_AI_FIFO_CTRL_REG);
- dl = ni_readl(dev, NI6143_AI_FIFO_DATA_REG);
- data = (dl >> 16) & 0xffff;
- comedi_buf_write_samples(s, &data, 1);
- }
- } else {
- if (n > sizeof(devpriv->ai_fifo_buffer) /
- sizeof(devpriv->ai_fifo_buffer[0])) {
- dev_err(dev->class_dev,
- "bug! ai_fifo_buffer too small\n");
- async->events |= COMEDI_CB_ERROR;
- return;
- }
- for (i = 0; i < n; i++) {
- devpriv->ai_fifo_buffer[i] =
- ni_readw(dev, NI_E_AI_FIFO_DATA_REG);
- }
- comedi_buf_write_samples(s, devpriv->ai_fifo_buffer, n);
- }
-}
-
-static void ni_handle_fifo_half_full(struct comedi_device *dev)
-{
- const struct ni_board_struct *board = dev->board_ptr;
- struct comedi_subdevice *s = dev->read_subdev;
- int n;
-
- n = board->ai_fifo_depth / 2;
-
- ni_ai_fifo_read(dev, s, n);
-}
-#endif
-
-/*
- Empties the AI fifo
-*/
-static void ni_handle_fifo_dregs(struct comedi_device *dev)
-{
- struct ni_private *devpriv = dev->private;
- struct comedi_subdevice *s = dev->read_subdev;
- u32 dl;
- unsigned short data;
- unsigned short fifo_empty;
- int i;
-
- if (devpriv->is_611x) {
- while ((ni_stc_readw(dev, NISTC_AI_STATUS1_REG) &
- NISTC_AI_STATUS1_FIFO_E) == 0) {
- dl = ni_readl(dev, NI611X_AI_FIFO_DATA_REG);
-
- /* This may get the hi/lo data in the wrong order */
- data = dl >> 16;
- comedi_buf_write_samples(s, &data, 1);
- data = dl & 0xffff;
- comedi_buf_write_samples(s, &data, 1);
- }
- } else if (devpriv->is_6143) {
- i = 0;
- while (ni_readl(dev, NI6143_AI_FIFO_STATUS_REG) & 0x04) {
- dl = ni_readl(dev, NI6143_AI_FIFO_DATA_REG);
-
- /* This may get the hi/lo data in the wrong order */
- data = dl >> 16;
- comedi_buf_write_samples(s, &data, 1);
- data = dl & 0xffff;
- comedi_buf_write_samples(s, &data, 1);
- i += 2;
- }
- /* Check if stranded sample is present */
- if (ni_readl(dev, NI6143_AI_FIFO_STATUS_REG) & 0x01) {
- /* Get stranded sample into FIFO */
- ni_writel(dev, 0x01, NI6143_AI_FIFO_CTRL_REG);
- dl = ni_readl(dev, NI6143_AI_FIFO_DATA_REG);
- data = (dl >> 16) & 0xffff;
- comedi_buf_write_samples(s, &data, 1);
- }
-
- } else {
- fifo_empty = ni_stc_readw(dev, NISTC_AI_STATUS1_REG) &
- NISTC_AI_STATUS1_FIFO_E;
- while (fifo_empty == 0) {
- for (i = 0;
- i <
- sizeof(devpriv->ai_fifo_buffer) /
- sizeof(devpriv->ai_fifo_buffer[0]); i++) {
- fifo_empty = ni_stc_readw(dev,
- NISTC_AI_STATUS1_REG) &
- NISTC_AI_STATUS1_FIFO_E;
- if (fifo_empty)
- break;
- devpriv->ai_fifo_buffer[i] =
- ni_readw(dev, NI_E_AI_FIFO_DATA_REG);
- }
- comedi_buf_write_samples(s, devpriv->ai_fifo_buffer, i);
- }
- }
-}
-
-static void get_last_sample_611x(struct comedi_device *dev)
-{
- struct ni_private *devpriv = dev->private;
- struct comedi_subdevice *s = dev->read_subdev;
- unsigned short data;
- u32 dl;
-
- if (!devpriv->is_611x)
- return;
-
- /* Check if there's a single sample stuck in the FIFO */
- if (ni_readb(dev, NI_E_STATUS_REG) & 0x80) {
- dl = ni_readl(dev, NI611X_AI_FIFO_DATA_REG);
- data = dl & 0xffff;
- comedi_buf_write_samples(s, &data, 1);
- }
-}
-
-static void get_last_sample_6143(struct comedi_device *dev)
-{
- struct ni_private *devpriv = dev->private;
- struct comedi_subdevice *s = dev->read_subdev;
- unsigned short data;
- u32 dl;
-
- if (!devpriv->is_6143)
- return;
-
- /* Check if there's a single sample stuck in the FIFO */
- if (ni_readl(dev, NI6143_AI_FIFO_STATUS_REG) & 0x01) {
- /* Get stranded sample into FIFO */
- ni_writel(dev, 0x01, NI6143_AI_FIFO_CTRL_REG);
- dl = ni_readl(dev, NI6143_AI_FIFO_DATA_REG);
-
- /* This may get the hi/lo data in the wrong order */
- data = (dl >> 16) & 0xffff;
- comedi_buf_write_samples(s, &data, 1);
- }
-}
-
-static void shutdown_ai_command(struct comedi_device *dev)
-{
- struct comedi_subdevice *s = dev->read_subdev;
-
-#ifdef PCIDMA
- ni_ai_drain_dma(dev);
-#endif
- ni_handle_fifo_dregs(dev);
- get_last_sample_611x(dev);
- get_last_sample_6143(dev);
-
- s->async->events |= COMEDI_CB_EOA;
-}
-
-static void ni_handle_eos(struct comedi_device *dev, struct comedi_subdevice *s)
-{
- struct ni_private *devpriv = dev->private;
-
- if (devpriv->aimode == AIMODE_SCAN) {
-#ifdef PCIDMA
- static const int timeout = 10;
- int i;
-
- for (i = 0; i < timeout; i++) {
- ni_sync_ai_dma(dev);
- if ((s->async->events & COMEDI_CB_EOS))
- break;
- udelay(1);
- }
-#else
- ni_handle_fifo_dregs(dev);
- s->async->events |= COMEDI_CB_EOS;
-#endif
- }
- /* handle special case of single scan */
- if (devpriv->ai_cmd2 & NISTC_AI_CMD2_END_ON_EOS)
- shutdown_ai_command(dev);
-}
-
-static void handle_gpct_interrupt(struct comedi_device *dev,
- unsigned short counter_index)
-{
-#ifdef PCIDMA
- struct ni_private *devpriv = dev->private;
- struct comedi_subdevice *s;
-
- s = &dev->subdevices[NI_GPCT_SUBDEV(counter_index)];
-
- ni_tio_handle_interrupt(&devpriv->counter_dev->counters[counter_index],
- s);
- comedi_handle_events(dev, s);
-#endif
-}
-
-static void ack_a_interrupt(struct comedi_device *dev, unsigned short a_status)
-{
- unsigned short ack = 0;
-
- if (a_status & NISTC_AI_STATUS1_SC_TC)
- ack |= NISTC_INTA_ACK_AI_SC_TC;
- if (a_status & NISTC_AI_STATUS1_START1)
- ack |= NISTC_INTA_ACK_AI_START1;
- if (a_status & NISTC_AI_STATUS1_START)
- ack |= NISTC_INTA_ACK_AI_START;
- if (a_status & NISTC_AI_STATUS1_STOP)
- ack |= NISTC_INTA_ACK_AI_STOP;
- if (ack)
- ni_stc_writew(dev, ack, NISTC_INTA_ACK_REG);
-}
-
-static void handle_a_interrupt(struct comedi_device *dev, unsigned short status,
- unsigned ai_mite_status)
-{
- struct comedi_subdevice *s = dev->read_subdev;
- struct comedi_cmd *cmd = &s->async->cmd;
-
- /* 67xx boards don't have ai subdevice, but their gpct0 might generate an a interrupt */
- if (s->type == COMEDI_SUBD_UNUSED)
- return;
-
-#ifdef PCIDMA
- if (ai_mite_status & CHSR_LINKC)
- ni_sync_ai_dma(dev);
-
- if (ai_mite_status & ~(CHSR_INT | CHSR_LINKC | CHSR_DONE | CHSR_MRDY |
- CHSR_DRDY | CHSR_DRQ1 | CHSR_DRQ0 | CHSR_ERROR |
- CHSR_SABORT | CHSR_XFERR | CHSR_LxERR_mask)) {
- dev_err(dev->class_dev,
- "unknown mite interrupt (ai_mite_status=%08x)\n",
- ai_mite_status);
- s->async->events |= COMEDI_CB_ERROR;
- /* disable_irq(dev->irq); */
- }
-#endif
-
- /* test for all uncommon interrupt events at the same time */
- if (status & (NISTC_AI_STATUS1_ERR |
- NISTC_AI_STATUS1_SC_TC | NISTC_AI_STATUS1_START1)) {
- if (status == 0xffff) {
- dev_err(dev->class_dev, "Card removed?\n");
- /* we probably aren't even running a command now,
- * so it's a good idea to be careful. */
- if (comedi_is_subdevice_running(s)) {
- s->async->events |= COMEDI_CB_ERROR;
- comedi_handle_events(dev, s);
- }
- return;
- }
- if (status & NISTC_AI_STATUS1_ERR) {
- dev_err(dev->class_dev, "ai error a_status=%04x\n",
- status);
-
- shutdown_ai_command(dev);
-
- s->async->events |= COMEDI_CB_ERROR;
- if (status & NISTC_AI_STATUS1_OVER)
- s->async->events |= COMEDI_CB_OVERFLOW;
-
- comedi_handle_events(dev, s);
- return;
- }
- if (status & NISTC_AI_STATUS1_SC_TC) {
- if (cmd->stop_src == TRIG_COUNT)
- shutdown_ai_command(dev);
- }
- }
-#ifndef PCIDMA
- if (status & NISTC_AI_STATUS1_FIFO_HF) {
- int i;
- static const int timeout = 10;
- /* pcmcia cards (at least 6036) seem to stop producing interrupts if we
- *fail to get the fifo less than half full, so loop to be sure.*/
- for (i = 0; i < timeout; ++i) {
- ni_handle_fifo_half_full(dev);
- if ((ni_stc_readw(dev, NISTC_AI_STATUS1_REG) &
- NISTC_AI_STATUS1_FIFO_HF) == 0)
- break;
- }
- }
-#endif /* !PCIDMA */
-
- if (status & NISTC_AI_STATUS1_STOP)
- ni_handle_eos(dev, s);
-
- comedi_handle_events(dev, s);
-}
-
-static void ack_b_interrupt(struct comedi_device *dev, unsigned short b_status)
-{
- unsigned short ack = 0;
-
- if (b_status & NISTC_AO_STATUS1_BC_TC)
- ack |= NISTC_INTB_ACK_AO_BC_TC;
- if (b_status & NISTC_AO_STATUS1_OVERRUN)
- ack |= NISTC_INTB_ACK_AO_ERR;
- if (b_status & NISTC_AO_STATUS1_START)
- ack |= NISTC_INTB_ACK_AO_START;
- if (b_status & NISTC_AO_STATUS1_START1)
- ack |= NISTC_INTB_ACK_AO_START1;
- if (b_status & NISTC_AO_STATUS1_UC_TC)
- ack |= NISTC_INTB_ACK_AO_UC_TC;
- if (b_status & NISTC_AO_STATUS1_UI2_TC)
- ack |= NISTC_INTB_ACK_AO_UI2_TC;
- if (b_status & NISTC_AO_STATUS1_UPDATE)
- ack |= NISTC_INTB_ACK_AO_UPDATE;
- if (ack)
- ni_stc_writew(dev, ack, NISTC_INTB_ACK_REG);
-}
-
-static void handle_b_interrupt(struct comedi_device *dev,
- unsigned short b_status, unsigned ao_mite_status)
-{
- struct comedi_subdevice *s = dev->write_subdev;
- /* unsigned short ack=0; */
-
-#ifdef PCIDMA
- /* Currently, mite.c requires us to handle LINKC */
- if (ao_mite_status & CHSR_LINKC) {
- struct ni_private *devpriv = dev->private;
-
- mite_handle_b_linkc(devpriv->mite, dev);
- }
-
- if (ao_mite_status & ~(CHSR_INT | CHSR_LINKC | CHSR_DONE | CHSR_MRDY |
- CHSR_DRDY | CHSR_DRQ1 | CHSR_DRQ0 | CHSR_ERROR |
- CHSR_SABORT | CHSR_XFERR | CHSR_LxERR_mask)) {
- dev_err(dev->class_dev,
- "unknown mite interrupt (ao_mite_status=%08x)\n",
- ao_mite_status);
- s->async->events |= COMEDI_CB_ERROR;
- }
-#endif
-
- if (b_status == 0xffff)
- return;
- if (b_status & NISTC_AO_STATUS1_OVERRUN) {
- dev_err(dev->class_dev,
- "AO FIFO underrun status=0x%04x status2=0x%04x\n",
- b_status, ni_stc_readw(dev, NISTC_AO_STATUS2_REG));
- s->async->events |= COMEDI_CB_OVERFLOW;
- }
-
- if (b_status & NISTC_AO_STATUS1_BC_TC)
- s->async->events |= COMEDI_CB_EOA;
-
-#ifndef PCIDMA
- if (b_status & NISTC_AO_STATUS1_FIFO_REQ) {
- int ret;
-
- ret = ni_ao_fifo_half_empty(dev, s);
- if (!ret) {
- dev_err(dev->class_dev, "AO buffer underrun\n");
- ni_set_bits(dev, NISTC_INTB_ENA_REG,
- NISTC_INTB_ENA_AO_FIFO |
- NISTC_INTB_ENA_AO_ERR, 0);
- s->async->events |= COMEDI_CB_OVERFLOW;
- }
- }
-#endif
-
- comedi_handle_events(dev, s);
-}
-
-static void ni_ai_munge(struct comedi_device *dev, struct comedi_subdevice *s,
- void *data, unsigned int num_bytes,
- unsigned int chan_index)
-{
- struct ni_private *devpriv = dev->private;
- struct comedi_async *async = s->async;
- struct comedi_cmd *cmd = &async->cmd;
- unsigned int nsamples = comedi_bytes_to_samples(s, num_bytes);
- unsigned short *array = data;
- unsigned int *larray = data;
- unsigned int i;
-
- for (i = 0; i < nsamples; i++) {
-#ifdef PCIDMA
- if (s->subdev_flags & SDF_LSAMPL)
- larray[i] = le32_to_cpu(larray[i]);
- else
- array[i] = le16_to_cpu(array[i]);
-#endif
- if (s->subdev_flags & SDF_LSAMPL)
- larray[i] += devpriv->ai_offset[chan_index];
- else
- array[i] += devpriv->ai_offset[chan_index];
- chan_index++;
- chan_index %= cmd->chanlist_len;
- }
-}
-
-#ifdef PCIDMA
-
-static int ni_ai_setup_MITE_dma(struct comedi_device *dev)
-{
- struct ni_private *devpriv = dev->private;
- struct comedi_subdevice *s = dev->read_subdev;
- int retval;
- unsigned long flags;
-
- retval = ni_request_ai_mite_channel(dev);
- if (retval)
- return retval;
-
- /* write alloc the entire buffer */
- comedi_buf_write_alloc(s, s->async->prealloc_bufsz);
-
- spin_lock_irqsave(&devpriv->mite_channel_lock, flags);
- if (!devpriv->ai_mite_chan) {
- spin_unlock_irqrestore(&devpriv->mite_channel_lock, flags);
- return -EIO;
- }
-
- if (devpriv->is_611x || devpriv->is_6143)
- mite_prep_dma(devpriv->ai_mite_chan, 32, 16);
- else if (devpriv->is_628x)
- mite_prep_dma(devpriv->ai_mite_chan, 32, 32);
- else
- mite_prep_dma(devpriv->ai_mite_chan, 16, 16);
-
- /*start the MITE */
- mite_dma_arm(devpriv->ai_mite_chan);
- spin_unlock_irqrestore(&devpriv->mite_channel_lock, flags);
-
- return 0;
-}
-
-static int ni_ao_setup_MITE_dma(struct comedi_device *dev)
-{
- struct ni_private *devpriv = dev->private;
- struct comedi_subdevice *s = dev->write_subdev;
- int retval;
- unsigned long flags;
-
- retval = ni_request_ao_mite_channel(dev);
- if (retval)
- return retval;
-
- /* read alloc the entire buffer */
- comedi_buf_read_alloc(s, s->async->prealloc_bufsz);
-
- spin_lock_irqsave(&devpriv->mite_channel_lock, flags);
- if (devpriv->ao_mite_chan) {
- if (devpriv->is_611x || devpriv->is_6713) {
- mite_prep_dma(devpriv->ao_mite_chan, 32, 32);
- } else {
- /* doing 32 instead of 16 bit wide transfers from memory
- makes the mite do 32 bit pci transfers, doubling pci bandwidth. */
- mite_prep_dma(devpriv->ao_mite_chan, 16, 32);
- }
- mite_dma_arm(devpriv->ao_mite_chan);
- } else {
- retval = -EIO;
- }
- spin_unlock_irqrestore(&devpriv->mite_channel_lock, flags);
-
- return retval;
-}
-
-#endif /* PCIDMA */
-
-/*
- used for both cancel ioctl and board initialization
-
- this is pretty harsh for a cancel, but it works...
- */
-
-static int ni_ai_reset(struct comedi_device *dev, struct comedi_subdevice *s)
-{
- struct ni_private *devpriv = dev->private;
- unsigned ai_personal;
- unsigned ai_out_ctrl;
-
- ni_release_ai_mite_channel(dev);
- /* ai configuration */
- ni_stc_writew(dev, NISTC_RESET_AI_CFG_START | NISTC_RESET_AI,
- NISTC_RESET_REG);
-
- ni_set_bits(dev, NISTC_INTA_ENA_REG, NISTC_INTA_ENA_AI_MASK, 0);
-
- ni_clear_ai_fifo(dev);
-
- if (!devpriv->is_6143)
- ni_writeb(dev, NI_E_MISC_CMD_EXT_ATRIG, NI_E_MISC_CMD_REG);
-
- ni_stc_writew(dev, NISTC_AI_CMD1_DISARM, NISTC_AI_CMD1_REG);
- ni_stc_writew(dev, NISTC_AI_MODE1_START_STOP |
- NISTC_AI_MODE1_RSVD
- /*| NISTC_AI_MODE1_TRIGGER_ONCE */,
- NISTC_AI_MODE1_REG);
- ni_stc_writew(dev, 0, NISTC_AI_MODE2_REG);
- /* generate FIFO interrupts on non-empty */
- ni_stc_writew(dev, NISTC_AI_MODE3_FIFO_MODE_NE,
- NISTC_AI_MODE3_REG);
-
- ai_personal = NISTC_AI_PERSONAL_SHIFTIN_PW |
- NISTC_AI_PERSONAL_SOC_POLARITY |
- NISTC_AI_PERSONAL_LOCALMUX_CLK_PW;
- ai_out_ctrl = NISTC_AI_OUT_CTRL_SCAN_IN_PROG_SEL(3) |
- NISTC_AI_OUT_CTRL_EXTMUX_CLK_SEL(0) |
- NISTC_AI_OUT_CTRL_LOCALMUX_CLK_SEL(2) |
- NISTC_AI_OUT_CTRL_SC_TC_SEL(3);
- if (devpriv->is_611x) {
- ai_out_ctrl |= NISTC_AI_OUT_CTRL_CONVERT_HIGH;
- } else if (devpriv->is_6143) {
- ai_out_ctrl |= NISTC_AI_OUT_CTRL_CONVERT_LOW;
- } else {
- ai_personal |= NISTC_AI_PERSONAL_CONVERT_PW;
- if (devpriv->is_622x)
- ai_out_ctrl |= NISTC_AI_OUT_CTRL_CONVERT_HIGH;
- else
- ai_out_ctrl |= NISTC_AI_OUT_CTRL_CONVERT_LOW;
- }
- ni_stc_writew(dev, ai_personal, NISTC_AI_PERSONAL_REG);
- ni_stc_writew(dev, ai_out_ctrl, NISTC_AI_OUT_CTRL_REG);
-
- /* the following registers should not be changed, because there
- * are no backup registers in devpriv. If you want to change
- * any of these, add a backup register and other appropriate code:
- * NISTC_AI_MODE1_REG
- * NISTC_AI_MODE3_REG
- * NISTC_AI_PERSONAL_REG
- * NISTC_AI_OUT_CTRL_REG
- */
-
- /* clear interrupts */
- ni_stc_writew(dev, NISTC_INTA_ACK_AI_ALL, NISTC_INTA_ACK_REG);
-
- ni_stc_writew(dev, NISTC_RESET_AI_CFG_END, NISTC_RESET_REG);
-
- return 0;
-}
-
-static int ni_ai_poll(struct comedi_device *dev, struct comedi_subdevice *s)
-{
- unsigned long flags;
- int count;
-
- /* lock to avoid race with interrupt handler */
- spin_lock_irqsave(&dev->spinlock, flags);
-#ifndef PCIDMA
- ni_handle_fifo_dregs(dev);
-#else
- ni_sync_ai_dma(dev);
-#endif
- count = comedi_buf_n_bytes_ready(s);
- spin_unlock_irqrestore(&dev->spinlock, flags);
-
- return count;
-}
-
-static void ni_prime_channelgain_list(struct comedi_device *dev)
-{
- int i;
-
- ni_stc_writew(dev, NISTC_AI_CMD1_CONVERT_PULSE, NISTC_AI_CMD1_REG);
- for (i = 0; i < NI_TIMEOUT; ++i) {
- if (!(ni_stc_readw(dev, NISTC_AI_STATUS1_REG) &
- NISTC_AI_STATUS1_FIFO_E)) {
- ni_stc_writew(dev, 1, NISTC_ADC_FIFO_CLR_REG);
- return;
- }
- udelay(1);
- }
- dev_err(dev->class_dev, "timeout loading channel/gain list\n");
-}
-
-static void ni_m_series_load_channelgain_list(struct comedi_device *dev,
- unsigned int n_chan,
- unsigned int *list)
-{
- const struct ni_board_struct *board = dev->board_ptr;
- struct ni_private *devpriv = dev->private;
- unsigned int chan, range, aref;
- unsigned int i;
- unsigned int dither;
- unsigned range_code;
-
- ni_stc_writew(dev, 1, NISTC_CFG_MEM_CLR_REG);
-
- if ((list[0] & CR_ALT_SOURCE)) {
- unsigned bypass_bits;
-
- chan = CR_CHAN(list[0]);
- range = CR_RANGE(list[0]);
- range_code = ni_gainlkup[board->gainlkup][range];
- dither = (list[0] & CR_ALT_FILTER) != 0;
- bypass_bits = NI_M_CFG_BYPASS_FIFO |
- NI_M_CFG_BYPASS_AI_CHAN(chan) |
- NI_M_CFG_BYPASS_AI_GAIN(range_code) |
- devpriv->ai_calib_source;
- if (dither)
- bypass_bits |= NI_M_CFG_BYPASS_AI_DITHER;
- /* don't use 2's complement encoding */
- bypass_bits |= NI_M_CFG_BYPASS_AI_POLARITY;
- ni_writel(dev, bypass_bits, NI_M_CFG_BYPASS_FIFO_REG);
- } else {
- ni_writel(dev, 0, NI_M_CFG_BYPASS_FIFO_REG);
- }
- for (i = 0; i < n_chan; i++) {
- unsigned config_bits = 0;
-
- chan = CR_CHAN(list[i]);
- aref = CR_AREF(list[i]);
- range = CR_RANGE(list[i]);
- dither = (list[i] & CR_ALT_FILTER) != 0;
-
- range_code = ni_gainlkup[board->gainlkup][range];
- devpriv->ai_offset[i] = 0;
- switch (aref) {
- case AREF_DIFF:
- config_bits |= NI_M_AI_CFG_CHAN_TYPE_DIFF;
- break;
- case AREF_COMMON:
- config_bits |= NI_M_AI_CFG_CHAN_TYPE_COMMON;
- break;
- case AREF_GROUND:
- config_bits |= NI_M_AI_CFG_CHAN_TYPE_GROUND;
- break;
- case AREF_OTHER:
- break;
- }
- config_bits |= NI_M_AI_CFG_CHAN_SEL(chan);
- config_bits |= NI_M_AI_CFG_BANK_SEL(chan);
- config_bits |= NI_M_AI_CFG_GAIN(range_code);
- if (i == n_chan - 1)
- config_bits |= NI_M_AI_CFG_LAST_CHAN;
- if (dither)
- config_bits |= NI_M_AI_CFG_DITHER;
- /* don't use 2's complement encoding */
- config_bits |= NI_M_AI_CFG_POLARITY;
- ni_writew(dev, config_bits, NI_M_AI_CFG_FIFO_DATA_REG);
- }
- ni_prime_channelgain_list(dev);
-}
-
-/*
- * Notes on the 6110 and 6111:
- * These boards a slightly different than the rest of the series, since
- * they have multiple A/D converters.
- * From the driver side, the configuration memory is a
- * little different.
- * Configuration Memory Low:
- * bits 15-9: same
- * bit 8: unipolar/bipolar (should be 0 for bipolar)
- * bits 0-3: gain. This is 4 bits instead of 3 for the other boards
- * 1001 gain=0.1 (+/- 50)
- * 1010 0.2
- * 1011 0.1
- * 0001 1
- * 0010 2
- * 0011 5
- * 0100 10
- * 0101 20
- * 0110 50
- * Configuration Memory High:
- * bits 12-14: Channel Type
- * 001 for differential
- * 000 for calibration
- * bit 11: coupling (this is not currently handled)
- * 1 AC coupling
- * 0 DC coupling
- * bits 0-2: channel
- * valid channels are 0-3
- */
-static void ni_load_channelgain_list(struct comedi_device *dev,
- struct comedi_subdevice *s,
- unsigned int n_chan, unsigned int *list)
-{
- const struct ni_board_struct *board = dev->board_ptr;
- struct ni_private *devpriv = dev->private;
- unsigned int offset = (s->maxdata + 1) >> 1;
- unsigned int chan, range, aref;
- unsigned int i;
- unsigned int hi, lo;
- unsigned int dither;
-
- if (devpriv->is_m_series) {
- ni_m_series_load_channelgain_list(dev, n_chan, list);
- return;
- }
- if (n_chan == 1 && !devpriv->is_611x && !devpriv->is_6143) {
- if (devpriv->changain_state
- && devpriv->changain_spec == list[0]) {
- /* ready to go. */
- return;
- }
- devpriv->changain_state = 1;
- devpriv->changain_spec = list[0];
- } else {
- devpriv->changain_state = 0;
- }
-
- ni_stc_writew(dev, 1, NISTC_CFG_MEM_CLR_REG);
-
- /* Set up Calibration mode if required */
- if (devpriv->is_6143) {
- if ((list[0] & CR_ALT_SOURCE)
- && !devpriv->ai_calib_source_enabled) {
- /* Strobe Relay enable bit */
- ni_writew(dev, devpriv->ai_calib_source |
- NI6143_CALIB_CHAN_RELAY_ON,
- NI6143_CALIB_CHAN_REG);
- ni_writew(dev, devpriv->ai_calib_source,
- NI6143_CALIB_CHAN_REG);
- devpriv->ai_calib_source_enabled = 1;
- msleep_interruptible(100); /* Allow relays to change */
- } else if (!(list[0] & CR_ALT_SOURCE)
- && devpriv->ai_calib_source_enabled) {
- /* Strobe Relay disable bit */
- ni_writew(dev, devpriv->ai_calib_source |
- NI6143_CALIB_CHAN_RELAY_OFF,
- NI6143_CALIB_CHAN_REG);
- ni_writew(dev, devpriv->ai_calib_source,
- NI6143_CALIB_CHAN_REG);
- devpriv->ai_calib_source_enabled = 0;
- msleep_interruptible(100); /* Allow relays to change */
- }
- }
-
- for (i = 0; i < n_chan; i++) {
- if (!devpriv->is_6143 && (list[i] & CR_ALT_SOURCE))
- chan = devpriv->ai_calib_source;
- else
- chan = CR_CHAN(list[i]);
- aref = CR_AREF(list[i]);
- range = CR_RANGE(list[i]);
- dither = (list[i] & CR_ALT_FILTER) != 0;
-
- /* fix the external/internal range differences */
- range = ni_gainlkup[board->gainlkup][range];
- if (devpriv->is_611x)
- devpriv->ai_offset[i] = offset;
- else
- devpriv->ai_offset[i] = (range & 0x100) ? 0 : offset;
-
- hi = 0;
- if ((list[i] & CR_ALT_SOURCE)) {
- if (devpriv->is_611x)
- ni_writew(dev, CR_CHAN(list[i]) & 0x0003,
- NI611X_CALIB_CHAN_SEL_REG);
- } else {
- if (devpriv->is_611x)
- aref = AREF_DIFF;
- else if (devpriv->is_6143)
- aref = AREF_OTHER;
- switch (aref) {
- case AREF_DIFF:
- hi |= NI_E_AI_CFG_HI_TYPE_DIFF;
- break;
- case AREF_COMMON:
- hi |= NI_E_AI_CFG_HI_TYPE_COMMON;
- break;
- case AREF_GROUND:
- hi |= NI_E_AI_CFG_HI_TYPE_GROUND;
- break;
- case AREF_OTHER:
- break;
- }
- }
- hi |= NI_E_AI_CFG_HI_CHAN(chan);
-
- ni_writew(dev, hi, NI_E_AI_CFG_HI_REG);
-
- if (!devpriv->is_6143) {
- lo = NI_E_AI_CFG_LO_GAIN(range);
-
- if (i == n_chan - 1)
- lo |= NI_E_AI_CFG_LO_LAST_CHAN;
- if (dither)
- lo |= NI_E_AI_CFG_LO_DITHER;
-
- ni_writew(dev, lo, NI_E_AI_CFG_LO_REG);
- }
- }
-
- /* prime the channel/gain list */
- if (!devpriv->is_611x && !devpriv->is_6143)
- ni_prime_channelgain_list(dev);
-}
-
-static int ni_ai_insn_read(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct ni_private *devpriv = dev->private;
- unsigned int mask = (s->maxdata + 1) >> 1;
- int i, n;
- unsigned signbits;
- unsigned int d;
- unsigned long dl;
-
- ni_load_channelgain_list(dev, s, 1, &insn->chanspec);
-
- ni_clear_ai_fifo(dev);
-
- signbits = devpriv->ai_offset[0];
- if (devpriv->is_611x) {
- for (n = 0; n < num_adc_stages_611x; n++) {
- ni_stc_writew(dev, NISTC_AI_CMD1_CONVERT_PULSE,
- NISTC_AI_CMD1_REG);
- udelay(1);
- }
- for (n = 0; n < insn->n; n++) {
- ni_stc_writew(dev, NISTC_AI_CMD1_CONVERT_PULSE,
- NISTC_AI_CMD1_REG);
- /* The 611x has screwy 32-bit FIFOs. */
- d = 0;
- for (i = 0; i < NI_TIMEOUT; i++) {
- if (ni_readb(dev, NI_E_STATUS_REG) & 0x80) {
- d = ni_readl(dev,
- NI611X_AI_FIFO_DATA_REG);
- d >>= 16;
- d &= 0xffff;
- break;
- }
- if (!(ni_stc_readw(dev, NISTC_AI_STATUS1_REG) &
- NISTC_AI_STATUS1_FIFO_E)) {
- d = ni_readl(dev,
- NI611X_AI_FIFO_DATA_REG);
- d &= 0xffff;
- break;
- }
- }
- if (i == NI_TIMEOUT) {
- dev_err(dev->class_dev, "timeout\n");
- return -ETIME;
- }
- d += signbits;
- data[n] = d;
- }
- } else if (devpriv->is_6143) {
- for (n = 0; n < insn->n; n++) {
- ni_stc_writew(dev, NISTC_AI_CMD1_CONVERT_PULSE,
- NISTC_AI_CMD1_REG);
-
- /* The 6143 has 32-bit FIFOs. You need to strobe a bit to move a single 16bit stranded sample into the FIFO */
- dl = 0;
- for (i = 0; i < NI_TIMEOUT; i++) {
- if (ni_readl(dev, NI6143_AI_FIFO_STATUS_REG) &
- 0x01) {
- /* Get stranded sample into FIFO */
- ni_writel(dev, 0x01,
- NI6143_AI_FIFO_CTRL_REG);
- dl = ni_readl(dev,
- NI6143_AI_FIFO_DATA_REG);
- break;
- }
- }
- if (i == NI_TIMEOUT) {
- dev_err(dev->class_dev, "timeout\n");
- return -ETIME;
- }
- data[n] = (((dl >> 16) & 0xFFFF) + signbits) & 0xFFFF;
- }
- } else {
- for (n = 0; n < insn->n; n++) {
- ni_stc_writew(dev, NISTC_AI_CMD1_CONVERT_PULSE,
- NISTC_AI_CMD1_REG);
- for (i = 0; i < NI_TIMEOUT; i++) {
- if (!(ni_stc_readw(dev, NISTC_AI_STATUS1_REG) &
- NISTC_AI_STATUS1_FIFO_E))
- break;
- }
- if (i == NI_TIMEOUT) {
- dev_err(dev->class_dev, "timeout\n");
- return -ETIME;
- }
- if (devpriv->is_m_series) {
- dl = ni_readl(dev, NI_M_AI_FIFO_DATA_REG);
- dl &= mask;
- data[n] = dl;
- } else {
- d = ni_readw(dev, NI_E_AI_FIFO_DATA_REG);
- d += signbits; /* subtle: needs to be short addition */
- data[n] = d;
- }
- }
- }
- return insn->n;
-}
-
-static int ni_ns_to_timer(const struct comedi_device *dev, unsigned nanosec,
- unsigned int flags)
-{
- struct ni_private *devpriv = dev->private;
- int divider;
-
- switch (flags & CMDF_ROUND_MASK) {
- case CMDF_ROUND_NEAREST:
- default:
- divider = (nanosec + devpriv->clock_ns / 2) / devpriv->clock_ns;
- break;
- case CMDF_ROUND_DOWN:
- divider = (nanosec) / devpriv->clock_ns;
- break;
- case CMDF_ROUND_UP:
- divider = (nanosec + devpriv->clock_ns - 1) / devpriv->clock_ns;
- break;
- }
- return divider - 1;
-}
-
-static unsigned ni_timer_to_ns(const struct comedi_device *dev, int timer)
-{
- struct ni_private *devpriv = dev->private;
-
- return devpriv->clock_ns * (timer + 1);
-}
-
-static unsigned ni_min_ai_scan_period_ns(struct comedi_device *dev,
- unsigned num_channels)
-{
- const struct ni_board_struct *board = dev->board_ptr;
- struct ni_private *devpriv = dev->private;
-
- /* simultaneously-sampled inputs */
- if (devpriv->is_611x || devpriv->is_6143)
- return board->ai_speed;
-
- /* multiplexed inputs */
- return board->ai_speed * num_channels;
-}
-
-static int ni_ai_cmdtest(struct comedi_device *dev, struct comedi_subdevice *s,
- struct comedi_cmd *cmd)
-{
- const struct ni_board_struct *board = dev->board_ptr;
- struct ni_private *devpriv = dev->private;
- int err = 0;
- unsigned int tmp;
- unsigned int sources;
-
- /* Step 1 : check if triggers are trivially valid */
-
- err |= comedi_check_trigger_src(&cmd->start_src,
- TRIG_NOW | TRIG_INT | TRIG_EXT);
- err |= comedi_check_trigger_src(&cmd->scan_begin_src,
- TRIG_TIMER | TRIG_EXT);
-
- sources = TRIG_TIMER | TRIG_EXT;
- if (devpriv->is_611x || devpriv->is_6143)
- sources |= TRIG_NOW;
- err |= comedi_check_trigger_src(&cmd->convert_src, sources);
-
- err |= comedi_check_trigger_src(&cmd->scan_end_src, TRIG_COUNT);
- err |= comedi_check_trigger_src(&cmd->stop_src, TRIG_COUNT | TRIG_NONE);
-
- if (err)
- return 1;
-
- /* Step 2a : make sure trigger sources are unique */
-
- err |= comedi_check_trigger_is_unique(cmd->start_src);
- err |= comedi_check_trigger_is_unique(cmd->scan_begin_src);
- err |= comedi_check_trigger_is_unique(cmd->convert_src);
- err |= comedi_check_trigger_is_unique(cmd->stop_src);
-
- /* Step 2b : and mutually compatible */
-
- if (err)
- return 2;
-
- /* Step 3: check if arguments are trivially valid */
-
- switch (cmd->start_src) {
- case TRIG_NOW:
- case TRIG_INT:
- err |= comedi_check_trigger_arg_is(&cmd->start_arg, 0);
- break;
- case TRIG_EXT:
- tmp = CR_CHAN(cmd->start_arg);
-
- if (tmp > 16)
- tmp = 16;
- tmp |= (cmd->start_arg & (CR_INVERT | CR_EDGE));
- err |= comedi_check_trigger_arg_is(&cmd->start_arg, tmp);
- break;
- }
-
- if (cmd->scan_begin_src == TRIG_TIMER) {
- err |= comedi_check_trigger_arg_min(&cmd->scan_begin_arg,
- ni_min_ai_scan_period_ns(dev, cmd->chanlist_len));
- err |= comedi_check_trigger_arg_max(&cmd->scan_begin_arg,
- devpriv->clock_ns *
- 0xffffff);
- } else if (cmd->scan_begin_src == TRIG_EXT) {
- /* external trigger */
- unsigned int tmp = CR_CHAN(cmd->scan_begin_arg);
-
- if (tmp > 16)
- tmp = 16;
- tmp |= (cmd->scan_begin_arg & (CR_INVERT | CR_EDGE));
- err |= comedi_check_trigger_arg_is(&cmd->scan_begin_arg, tmp);
- } else { /* TRIG_OTHER */
- err |= comedi_check_trigger_arg_is(&cmd->scan_begin_arg, 0);
- }
-
- if (cmd->convert_src == TRIG_TIMER) {
- if (devpriv->is_611x || devpriv->is_6143) {
- err |= comedi_check_trigger_arg_is(&cmd->convert_arg,
- 0);
- } else {
- err |= comedi_check_trigger_arg_min(&cmd->convert_arg,
- board->ai_speed);
- err |= comedi_check_trigger_arg_max(&cmd->convert_arg,
- devpriv->clock_ns *
- 0xffff);
- }
- } else if (cmd->convert_src == TRIG_EXT) {
- /* external trigger */
- unsigned int tmp = CR_CHAN(cmd->convert_arg);
-
- if (tmp > 16)
- tmp = 16;
- tmp |= (cmd->convert_arg & (CR_ALT_FILTER | CR_INVERT));
- err |= comedi_check_trigger_arg_is(&cmd->convert_arg, tmp);
- } else if (cmd->convert_src == TRIG_NOW) {
- err |= comedi_check_trigger_arg_is(&cmd->convert_arg, 0);
- }
-
- err |= comedi_check_trigger_arg_is(&cmd->scan_end_arg,
- cmd->chanlist_len);
-
- if (cmd->stop_src == TRIG_COUNT) {
- unsigned int max_count = 0x01000000;
-
- if (devpriv->is_611x)
- max_count -= num_adc_stages_611x;
- err |= comedi_check_trigger_arg_max(&cmd->stop_arg, max_count);
- err |= comedi_check_trigger_arg_min(&cmd->stop_arg, 1);
- } else {
- /* TRIG_NONE */
- err |= comedi_check_trigger_arg_is(&cmd->stop_arg, 0);
- }
-
- if (err)
- return 3;
-
- /* step 4: fix up any arguments */
-
- if (cmd->scan_begin_src == TRIG_TIMER) {
- tmp = cmd->scan_begin_arg;
- cmd->scan_begin_arg =
- ni_timer_to_ns(dev, ni_ns_to_timer(dev,
- cmd->scan_begin_arg,
- cmd->flags));
- if (tmp != cmd->scan_begin_arg)
- err++;
- }
- if (cmd->convert_src == TRIG_TIMER) {
- if (!devpriv->is_611x && !devpriv->is_6143) {
- tmp = cmd->convert_arg;
- cmd->convert_arg =
- ni_timer_to_ns(dev, ni_ns_to_timer(dev,
- cmd->convert_arg,
- cmd->flags));
- if (tmp != cmd->convert_arg)
- err++;
- if (cmd->scan_begin_src == TRIG_TIMER &&
- cmd->scan_begin_arg <
- cmd->convert_arg * cmd->scan_end_arg) {
- cmd->scan_begin_arg =
- cmd->convert_arg * cmd->scan_end_arg;
- err++;
- }
- }
- }
-
- if (err)
- return 4;
-
- return 0;
-}
-
-static int ni_ai_inttrig(struct comedi_device *dev,
- struct comedi_subdevice *s,
- unsigned int trig_num)
-{
- struct ni_private *devpriv = dev->private;
- struct comedi_cmd *cmd = &s->async->cmd;
-
- if (trig_num != cmd->start_arg)
- return -EINVAL;
-
- ni_stc_writew(dev, NISTC_AI_CMD2_START1_PULSE | devpriv->ai_cmd2,
- NISTC_AI_CMD2_REG);
- s->async->inttrig = NULL;
-
- return 1;
-}
-
-static int ni_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
-{
- struct ni_private *devpriv = dev->private;
- const struct comedi_cmd *cmd = &s->async->cmd;
- int timer;
- int mode1 = 0; /* mode1 is needed for both stop and convert */
- int mode2 = 0;
- int start_stop_select = 0;
- unsigned int stop_count;
- int interrupt_a_enable = 0;
- unsigned ai_trig;
-
- if (dev->irq == 0) {
- dev_err(dev->class_dev, "cannot run command without an irq\n");
- return -EIO;
- }
- ni_clear_ai_fifo(dev);
-
- ni_load_channelgain_list(dev, s, cmd->chanlist_len, cmd->chanlist);
-
- /* start configuration */
- ni_stc_writew(dev, NISTC_RESET_AI_CFG_START, NISTC_RESET_REG);
-
- /* disable analog triggering for now, since it
- * interferes with the use of pfi0 */
- devpriv->an_trig_etc_reg &= ~NISTC_ATRIG_ETC_ENA;
- ni_stc_writew(dev, devpriv->an_trig_etc_reg, NISTC_ATRIG_ETC_REG);
-
- ai_trig = NISTC_AI_TRIG_START2_SEL(0) | NISTC_AI_TRIG_START1_SYNC;
- switch (cmd->start_src) {
- case TRIG_INT:
- case TRIG_NOW:
- ai_trig |= NISTC_AI_TRIG_START1_EDGE |
- NISTC_AI_TRIG_START1_SEL(0);
- break;
- case TRIG_EXT:
- ai_trig |= NISTC_AI_TRIG_START1_SEL(CR_CHAN(cmd->start_arg) +
- 1);
-
- if (cmd->start_arg & CR_INVERT)
- ai_trig |= NISTC_AI_TRIG_START1_POLARITY;
- if (cmd->start_arg & CR_EDGE)
- ai_trig |= NISTC_AI_TRIG_START1_EDGE;
- break;
- }
- ni_stc_writew(dev, ai_trig, NISTC_AI_TRIG_SEL_REG);
-
- mode2 &= ~NISTC_AI_MODE2_PRE_TRIGGER;
- mode2 &= ~NISTC_AI_MODE2_SC_INIT_LOAD_SRC;
- mode2 &= ~NISTC_AI_MODE2_SC_RELOAD_MODE;
- ni_stc_writew(dev, mode2, NISTC_AI_MODE2_REG);
-
- if (cmd->chanlist_len == 1 || devpriv->is_611x || devpriv->is_6143) {
- /* logic low */
- start_stop_select |= NISTC_AI_STOP_POLARITY |
- NISTC_AI_STOP_SEL(31) |
- NISTC_AI_STOP_SYNC;
- } else {
- /* ai configuration memory */
- start_stop_select |= NISTC_AI_STOP_SEL(19);
- }
- ni_stc_writew(dev, start_stop_select, NISTC_AI_START_STOP_REG);
-
- devpriv->ai_cmd2 = 0;
- switch (cmd->stop_src) {
- case TRIG_COUNT:
- stop_count = cmd->stop_arg - 1;
-
- if (devpriv->is_611x) {
- /* have to take 3 stage adc pipeline into account */
- stop_count += num_adc_stages_611x;
- }
- /* stage number of scans */
- ni_stc_writel(dev, stop_count, NISTC_AI_SC_LOADA_REG);
-
- mode1 |= NISTC_AI_MODE1_START_STOP |
- NISTC_AI_MODE1_RSVD |
- NISTC_AI_MODE1_TRIGGER_ONCE;
- ni_stc_writew(dev, mode1, NISTC_AI_MODE1_REG);
- /* load SC (Scan Count) */
- ni_stc_writew(dev, NISTC_AI_CMD1_SC_LOAD, NISTC_AI_CMD1_REG);
-
- if (stop_count == 0) {
- devpriv->ai_cmd2 |= NISTC_AI_CMD2_END_ON_EOS;
- interrupt_a_enable |= NISTC_INTA_ENA_AI_STOP;
- /* this is required to get the last sample for chanlist_len > 1, not sure why */
- if (cmd->chanlist_len > 1)
- start_stop_select |= NISTC_AI_STOP_POLARITY |
- NISTC_AI_STOP_EDGE;
- }
- break;
- case TRIG_NONE:
- /* stage number of scans */
- ni_stc_writel(dev, 0, NISTC_AI_SC_LOADA_REG);
-
- mode1 |= NISTC_AI_MODE1_START_STOP |
- NISTC_AI_MODE1_RSVD |
- NISTC_AI_MODE1_CONTINUOUS;
- ni_stc_writew(dev, mode1, NISTC_AI_MODE1_REG);
-
- /* load SC (Scan Count) */
- ni_stc_writew(dev, NISTC_AI_CMD1_SC_LOAD, NISTC_AI_CMD1_REG);
- break;
- }
-
- switch (cmd->scan_begin_src) {
- case TRIG_TIMER:
- /*
- * stop bits for non 611x boards
- * NISTC_AI_MODE3_SI_TRIG_DELAY=0
- * NISTC_AI_MODE2_PRE_TRIGGER=0
- * NISTC_AI_START_STOP_REG:
- * NISTC_AI_START_POLARITY=0 (?) rising edge
- * NISTC_AI_START_EDGE=1 edge triggered
- * NISTC_AI_START_SYNC=1 (?)
- * NISTC_AI_START_SEL=0 SI_TC
- * NISTC_AI_STOP_POLARITY=0 rising edge
- * NISTC_AI_STOP_EDGE=0 level
- * NISTC_AI_STOP_SYNC=1
- * NISTC_AI_STOP_SEL=19 external pin (configuration mem)
- */
- start_stop_select |= NISTC_AI_START_EDGE | NISTC_AI_START_SYNC;
- ni_stc_writew(dev, start_stop_select, NISTC_AI_START_STOP_REG);
-
- mode2 &= ~NISTC_AI_MODE2_SI_INIT_LOAD_SRC; /* A */
- mode2 |= NISTC_AI_MODE2_SI_RELOAD_MODE(0);
- /* mode2 |= NISTC_AI_MODE2_SC_RELOAD_MODE; */
- ni_stc_writew(dev, mode2, NISTC_AI_MODE2_REG);
-
- /* load SI */
- timer = ni_ns_to_timer(dev, cmd->scan_begin_arg,
- CMDF_ROUND_NEAREST);
- ni_stc_writel(dev, timer, NISTC_AI_SI_LOADA_REG);
- ni_stc_writew(dev, NISTC_AI_CMD1_SI_LOAD, NISTC_AI_CMD1_REG);
- break;
- case TRIG_EXT:
- if (cmd->scan_begin_arg & CR_EDGE)
- start_stop_select |= NISTC_AI_START_EDGE;
- if (cmd->scan_begin_arg & CR_INVERT) /* falling edge */
- start_stop_select |= NISTC_AI_START_POLARITY;
- if (cmd->scan_begin_src != cmd->convert_src ||
- (cmd->scan_begin_arg & ~CR_EDGE) !=
- (cmd->convert_arg & ~CR_EDGE))
- start_stop_select |= NISTC_AI_START_SYNC;
- start_stop_select |=
- NISTC_AI_START_SEL(1 + CR_CHAN(cmd->scan_begin_arg));
- ni_stc_writew(dev, start_stop_select, NISTC_AI_START_STOP_REG);
- break;
- }
-
- switch (cmd->convert_src) {
- case TRIG_TIMER:
- case TRIG_NOW:
- if (cmd->convert_arg == 0 || cmd->convert_src == TRIG_NOW)
- timer = 1;
- else
- timer = ni_ns_to_timer(dev, cmd->convert_arg,
- CMDF_ROUND_NEAREST);
- /* 0,0 does not work */
- ni_stc_writew(dev, 1, NISTC_AI_SI2_LOADA_REG);
- ni_stc_writew(dev, timer, NISTC_AI_SI2_LOADB_REG);
-
- mode2 &= ~NISTC_AI_MODE2_SI2_INIT_LOAD_SRC; /* A */
- mode2 |= NISTC_AI_MODE2_SI2_RELOAD_MODE; /* alternate */
- ni_stc_writew(dev, mode2, NISTC_AI_MODE2_REG);
-
- ni_stc_writew(dev, NISTC_AI_CMD1_SI2_LOAD, NISTC_AI_CMD1_REG);
-
- mode2 |= NISTC_AI_MODE2_SI2_INIT_LOAD_SRC; /* B */
- mode2 |= NISTC_AI_MODE2_SI2_RELOAD_MODE; /* alternate */
- ni_stc_writew(dev, mode2, NISTC_AI_MODE2_REG);
- break;
- case TRIG_EXT:
- mode1 |= NISTC_AI_MODE1_CONVERT_SRC(1 + cmd->convert_arg);
- if ((cmd->convert_arg & CR_INVERT) == 0)
- mode1 |= NISTC_AI_MODE1_CONVERT_POLARITY;
- ni_stc_writew(dev, mode1, NISTC_AI_MODE1_REG);
-
- mode2 |= NISTC_AI_MODE2_SC_GATE_ENA |
- NISTC_AI_MODE2_START_STOP_GATE_ENA;
- ni_stc_writew(dev, mode2, NISTC_AI_MODE2_REG);
-
- break;
- }
-
- if (dev->irq) {
- /* interrupt on FIFO, errors, SC_TC */
- interrupt_a_enable |= NISTC_INTA_ENA_AI_ERR |
- NISTC_INTA_ENA_AI_SC_TC;
-
-#ifndef PCIDMA
- interrupt_a_enable |= NISTC_INTA_ENA_AI_FIFO;
-#endif
-
- if ((cmd->flags & CMDF_WAKE_EOS) ||
- (devpriv->ai_cmd2 & NISTC_AI_CMD2_END_ON_EOS)) {
- /* wake on end-of-scan */
- devpriv->aimode = AIMODE_SCAN;
- } else {
- devpriv->aimode = AIMODE_HALF_FULL;
- }
-
- switch (devpriv->aimode) {
- case AIMODE_HALF_FULL:
- /*generate FIFO interrupts and DMA requests on half-full */
-#ifdef PCIDMA
- ni_stc_writew(dev, NISTC_AI_MODE3_FIFO_MODE_HF_E,
- NISTC_AI_MODE3_REG);
-#else
- ni_stc_writew(dev, NISTC_AI_MODE3_FIFO_MODE_HF,
- NISTC_AI_MODE3_REG);
-#endif
- break;
- case AIMODE_SAMPLE:
- /*generate FIFO interrupts on non-empty */
- ni_stc_writew(dev, NISTC_AI_MODE3_FIFO_MODE_NE,
- NISTC_AI_MODE3_REG);
- break;
- case AIMODE_SCAN:
-#ifdef PCIDMA
- ni_stc_writew(dev, NISTC_AI_MODE3_FIFO_MODE_NE,
- NISTC_AI_MODE3_REG);
-#else
- ni_stc_writew(dev, NISTC_AI_MODE3_FIFO_MODE_HF,
- NISTC_AI_MODE3_REG);
-#endif
- interrupt_a_enable |= NISTC_INTA_ENA_AI_STOP;
- break;
- default:
- break;
- }
-
- /* clear interrupts */
- ni_stc_writew(dev, NISTC_INTA_ACK_AI_ALL, NISTC_INTA_ACK_REG);
-
- ni_set_bits(dev, NISTC_INTA_ENA_REG, interrupt_a_enable, 1);
- } else {
- /* interrupt on nothing */
- ni_set_bits(dev, NISTC_INTA_ENA_REG, ~0, 0);
-
- /* XXX start polling if necessary */
- }
-
- /* end configuration */
- ni_stc_writew(dev, NISTC_RESET_AI_CFG_END, NISTC_RESET_REG);
-
- switch (cmd->scan_begin_src) {
- case TRIG_TIMER:
- ni_stc_writew(dev, NISTC_AI_CMD1_SI2_ARM |
- NISTC_AI_CMD1_SI_ARM |
- NISTC_AI_CMD1_DIV_ARM |
- NISTC_AI_CMD1_SC_ARM,
- NISTC_AI_CMD1_REG);
- break;
- case TRIG_EXT:
- ni_stc_writew(dev, NISTC_AI_CMD1_SI2_ARM |
- NISTC_AI_CMD1_SI_ARM | /* XXX ? */
- NISTC_AI_CMD1_DIV_ARM |
- NISTC_AI_CMD1_SC_ARM,
- NISTC_AI_CMD1_REG);
- break;
- }
-
-#ifdef PCIDMA
- {
- int retval = ni_ai_setup_MITE_dma(dev);
-
- if (retval)
- return retval;
- }
-#endif
-
- if (cmd->start_src == TRIG_NOW) {
- ni_stc_writew(dev, NISTC_AI_CMD2_START1_PULSE |
- devpriv->ai_cmd2,
- NISTC_AI_CMD2_REG);
- s->async->inttrig = NULL;
- } else if (cmd->start_src == TRIG_EXT) {
- s->async->inttrig = NULL;
- } else { /* TRIG_INT */
- s->async->inttrig = ni_ai_inttrig;
- }
-
- return 0;
-}
-
-static int ni_ai_insn_config(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
-{
- struct ni_private *devpriv = dev->private;
-
- if (insn->n < 1)
- return -EINVAL;
-
- switch (data[0]) {
- case INSN_CONFIG_ALT_SOURCE:
- if (devpriv->is_m_series) {
- if (data[1] & ~NI_M_CFG_BYPASS_AI_CAL_MASK)
- return -EINVAL;
- devpriv->ai_calib_source = data[1];
- } else if (devpriv->is_6143) {
- unsigned int calib_source;
-
- calib_source = data[1] & 0xf;
-
- devpriv->ai_calib_source = calib_source;
- ni_writew(dev, calib_source, NI6143_CALIB_CHAN_REG);
- } else {
- unsigned int calib_source;
- unsigned int calib_source_adjust;
-
- calib_source = data[1] & 0xf;
- calib_source_adjust = (data[1] >> 4) & 0xff;
-
- if (calib_source >= 8)
- return -EINVAL;
- devpriv->ai_calib_source = calib_source;
- if (devpriv->is_611x) {
- ni_writeb(dev, calib_source_adjust,
- NI611X_CAL_GAIN_SEL_REG);
- }
- }
- return 2;
- default:
- break;
- }
-
- return -EINVAL;
-}
-
-static void ni_ao_munge(struct comedi_device *dev, struct comedi_subdevice *s,
- void *data, unsigned int num_bytes,
- unsigned int chan_index)
-{
- struct comedi_cmd *cmd = &s->async->cmd;
- unsigned int nsamples = comedi_bytes_to_samples(s, num_bytes);
- unsigned short *array = data;
- unsigned int i;
-
- for (i = 0; i < nsamples; i++) {
- unsigned int range = CR_RANGE(cmd->chanlist[chan_index]);
- unsigned short val = array[i];
-
- /*
- * Munge data from unsigned to two's complement for
- * bipolar ranges.
- */
- if (comedi_range_is_bipolar(s, range))
- val = comedi_offset_munge(s, val);
-#ifdef PCIDMA
- val = cpu_to_le16(val);
-#endif
- array[i] = val;
-
- chan_index++;
- chan_index %= cmd->chanlist_len;
- }
-}
-
-static int ni_m_series_ao_config_chanlist(struct comedi_device *dev,
- struct comedi_subdevice *s,
- unsigned int chanspec[],
- unsigned int n_chans, int timed)
-{
- struct ni_private *devpriv = dev->private;
- unsigned int range;
- unsigned int chan;
- unsigned int conf;
- int i;
- int invert = 0;
-
- if (timed) {
- for (i = 0; i < s->n_chan; ++i) {
- devpriv->ao_conf[i] &= ~NI_M_AO_CFG_BANK_UPDATE_TIMED;
- ni_writeb(dev, devpriv->ao_conf[i],
- NI_M_AO_CFG_BANK_REG(i));
- ni_writeb(dev, 0xf, NI_M_AO_WAVEFORM_ORDER_REG(i));
- }
- }
- for (i = 0; i < n_chans; i++) {
- const struct comedi_krange *krange;
-
- chan = CR_CHAN(chanspec[i]);
- range = CR_RANGE(chanspec[i]);
- krange = s->range_table->range + range;
- invert = 0;
- conf = 0;
- switch (krange->max - krange->min) {
- case 20000000:
- conf |= NI_M_AO_CFG_BANK_REF_INT_10V;
- ni_writeb(dev, 0, NI_M_AO_REF_ATTENUATION_REG(chan));
- break;
- case 10000000:
- conf |= NI_M_AO_CFG_BANK_REF_INT_5V;
- ni_writeb(dev, 0, NI_M_AO_REF_ATTENUATION_REG(chan));
- break;
- case 4000000:
- conf |= NI_M_AO_CFG_BANK_REF_INT_10V;
- ni_writeb(dev, NI_M_AO_REF_ATTENUATION_X5,
- NI_M_AO_REF_ATTENUATION_REG(chan));
- break;
- case 2000000:
- conf |= NI_M_AO_CFG_BANK_REF_INT_5V;
- ni_writeb(dev, NI_M_AO_REF_ATTENUATION_X5,
- NI_M_AO_REF_ATTENUATION_REG(chan));
- break;
- default:
- dev_err(dev->class_dev,
- "bug! unhandled ao reference voltage\n");
- break;
- }
- switch (krange->max + krange->min) {
- case 0:
- conf |= NI_M_AO_CFG_BANK_OFFSET_0V;
- break;
- case 10000000:
- conf |= NI_M_AO_CFG_BANK_OFFSET_5V;
- break;
- default:
- dev_err(dev->class_dev,
- "bug! unhandled ao offset voltage\n");
- break;
- }
- if (timed)
- conf |= NI_M_AO_CFG_BANK_UPDATE_TIMED;
- ni_writeb(dev, conf, NI_M_AO_CFG_BANK_REG(chan));
- devpriv->ao_conf[chan] = conf;
- ni_writeb(dev, i, NI_M_AO_WAVEFORM_ORDER_REG(chan));
- }
- return invert;
-}
-
-static int ni_old_ao_config_chanlist(struct comedi_device *dev,
- struct comedi_subdevice *s,
- unsigned int chanspec[],
- unsigned int n_chans)
-{
- struct ni_private *devpriv = dev->private;
- unsigned int range;
- unsigned int chan;
- unsigned int conf;
- int i;
- int invert = 0;
-
- for (i = 0; i < n_chans; i++) {
- chan = CR_CHAN(chanspec[i]);
- range = CR_RANGE(chanspec[i]);
- conf = NI_E_AO_DACSEL(chan);
-
- if (comedi_range_is_bipolar(s, range)) {
- conf |= NI_E_AO_CFG_BIP;
- invert = (s->maxdata + 1) >> 1;
- } else {
- invert = 0;
- }
- if (comedi_range_is_external(s, range))
- conf |= NI_E_AO_EXT_REF;
-
- /* not all boards can deglitch, but this shouldn't hurt */
- if (chanspec[i] & CR_DEGLITCH)
- conf |= NI_E_AO_DEGLITCH;
-
- /* analog reference */
- /* AREF_OTHER connects AO ground to AI ground, i think */
- if (CR_AREF(chanspec[i]) == AREF_OTHER)
- conf |= NI_E_AO_GROUND_REF;
-
- ni_writew(dev, conf, NI_E_AO_CFG_REG);
- devpriv->ao_conf[chan] = conf;
- }
- return invert;
-}
-
-static int ni_ao_config_chanlist(struct comedi_device *dev,
- struct comedi_subdevice *s,
- unsigned int chanspec[], unsigned int n_chans,
- int timed)
-{
- struct ni_private *devpriv = dev->private;
-
- if (devpriv->is_m_series)
- return ni_m_series_ao_config_chanlist(dev, s, chanspec, n_chans,
- timed);
- else
- return ni_old_ao_config_chanlist(dev, s, chanspec, n_chans);
-}
-
-static int ni_ao_insn_write(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct ni_private *devpriv = dev->private;
- unsigned int chan = CR_CHAN(insn->chanspec);
- unsigned int range = CR_RANGE(insn->chanspec);
- int reg;
- int i;
-
- if (devpriv->is_6xxx) {
- ni_ao_win_outw(dev, 1 << chan, NI671X_AO_IMMEDIATE_REG);
-
- reg = NI671X_DAC_DIRECT_DATA_REG(chan);
- } else if (devpriv->is_m_series) {
- reg = NI_M_DAC_DIRECT_DATA_REG(chan);
- } else {
- reg = NI_E_DAC_DIRECT_DATA_REG(chan);
- }
-
- ni_ao_config_chanlist(dev, s, &insn->chanspec, 1, 0);
-
- for (i = 0; i < insn->n; i++) {
- unsigned int val = data[i];
-
- s->readback[chan] = val;
-
- if (devpriv->is_6xxx) {
- /*
- * 6xxx boards have bipolar outputs, munge the
- * unsigned comedi values to 2's complement
- */
- val = comedi_offset_munge(s, val);
-
- ni_ao_win_outw(dev, val, reg);
- } else if (devpriv->is_m_series) {
- /*
- * M-series boards use offset binary values for
- * bipolar and uinpolar outputs
- */
- ni_writew(dev, val, reg);
- } else {
- /*
- * Non-M series boards need two's complement values
- * for bipolar ranges.
- */
- if (comedi_range_is_bipolar(s, range))
- val = comedi_offset_munge(s, val);
-
- ni_writew(dev, val, reg);
- }
- }
-
- return insn->n;
-}
-
-static int ni_ao_insn_config(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
-{
- const struct ni_board_struct *board = dev->board_ptr;
- struct ni_private *devpriv = dev->private;
- unsigned int nbytes;
-
- switch (data[0]) {
- case INSN_CONFIG_GET_HARDWARE_BUFFER_SIZE:
- switch (data[1]) {
- case COMEDI_OUTPUT:
- nbytes = comedi_samples_to_bytes(s,
- board->ao_fifo_depth);
- data[2] = 1 + nbytes;
- if (devpriv->mite)
- data[2] += devpriv->mite->fifo_size;
- break;
- case COMEDI_INPUT:
- data[2] = 0;
- break;
- default:
- return -EINVAL;
- }
- return 0;
- default:
- break;
- }
-
- return -EINVAL;
-}
-
-static int ni_ao_inttrig(struct comedi_device *dev,
- struct comedi_subdevice *s,
- unsigned int trig_num)
-{
- struct ni_private *devpriv = dev->private;
- struct comedi_cmd *cmd = &s->async->cmd;
- int ret;
- int interrupt_b_bits;
- int i;
- static const int timeout = 1000;
-
- if (trig_num != cmd->start_arg)
- return -EINVAL;
-
- /* Null trig at beginning prevent ao start trigger from executing more than
- once per command (and doing things like trying to allocate the ao dma channel
- multiple times) */
- s->async->inttrig = NULL;
-
- ni_set_bits(dev, NISTC_INTB_ENA_REG,
- NISTC_INTB_ENA_AO_FIFO | NISTC_INTB_ENA_AO_ERR, 0);
- interrupt_b_bits = NISTC_INTB_ENA_AO_ERR;
-#ifdef PCIDMA
- ni_stc_writew(dev, 1, NISTC_DAC_FIFO_CLR_REG);
- if (devpriv->is_6xxx)
- ni_ao_win_outl(dev, 0x6, NI611X_AO_FIFO_OFFSET_LOAD_REG);
- ret = ni_ao_setup_MITE_dma(dev);
- if (ret)
- return ret;
- ret = ni_ao_wait_for_dma_load(dev);
- if (ret < 0)
- return ret;
-#else
- ret = ni_ao_prep_fifo(dev, s);
- if (ret == 0)
- return -EPIPE;
-
- interrupt_b_bits |= NISTC_INTB_ENA_AO_FIFO;
-#endif
-
- ni_stc_writew(dev, devpriv->ao_mode3 | NISTC_AO_MODE3_NOT_AN_UPDATE,
- NISTC_AO_MODE3_REG);
- ni_stc_writew(dev, devpriv->ao_mode3, NISTC_AO_MODE3_REG);
- /* wait for DACs to be loaded */
- for (i = 0; i < timeout; i++) {
- udelay(1);
- if ((ni_stc_readw(dev, NISTC_STATUS2_REG) &
- NISTC_STATUS2_AO_TMRDACWRS_IN_PROGRESS) == 0)
- break;
- }
- if (i == timeout) {
- dev_err(dev->class_dev,
- "timed out waiting for AO_TMRDACWRs_In_Progress_St to clear\n");
- return -EIO;
- }
- /*
- * stc manual says we are need to clear error interrupt after
- * AO_TMRDACWRs_In_Progress_St clears
- */
- ni_stc_writew(dev, NISTC_INTB_ACK_AO_ERR, NISTC_INTB_ACK_REG);
-
- ni_set_bits(dev, NISTC_INTB_ENA_REG, interrupt_b_bits, 1);
-
- ni_stc_writew(dev, NISTC_AO_CMD1_UI_ARM |
- NISTC_AO_CMD1_UC_ARM |
- NISTC_AO_CMD1_BC_ARM |
- NISTC_AO_CMD1_DAC1_UPDATE_MODE |
- NISTC_AO_CMD1_DAC0_UPDATE_MODE |
- devpriv->ao_cmd1,
- NISTC_AO_CMD1_REG);
-
- ni_stc_writew(dev, NISTC_AO_CMD2_START1_PULSE | devpriv->ao_cmd2,
- NISTC_AO_CMD2_REG);
-
- return 0;
-}
-
-static int ni_ao_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
-{
- const struct ni_board_struct *board = dev->board_ptr;
- struct ni_private *devpriv = dev->private;
- const struct comedi_cmd *cmd = &s->async->cmd;
- int bits;
- int i;
- unsigned trigvar;
- unsigned val;
-
- if (dev->irq == 0) {
- dev_err(dev->class_dev, "cannot run command without an irq\n");
- return -EIO;
- }
-
- ni_stc_writew(dev, NISTC_RESET_AO_CFG_START, NISTC_RESET_REG);
-
- ni_stc_writew(dev, NISTC_AO_CMD1_DISARM, NISTC_AO_CMD1_REG);
-
- if (devpriv->is_6xxx) {
- ni_ao_win_outw(dev, NI611X_AO_MISC_CLEAR_WG,
- NI611X_AO_MISC_REG);
-
- bits = 0;
- for (i = 0; i < cmd->chanlist_len; i++) {
- int chan;
-
- chan = CR_CHAN(cmd->chanlist[i]);
- bits |= 1 << chan;
- ni_ao_win_outw(dev, chan, NI611X_AO_WAVEFORM_GEN_REG);
- }
- ni_ao_win_outw(dev, bits, NI611X_AO_TIMED_REG);
- }
-
- ni_ao_config_chanlist(dev, s, cmd->chanlist, cmd->chanlist_len, 1);
-
- if (cmd->stop_src == TRIG_NONE) {
- devpriv->ao_mode1 |= NISTC_AO_MODE1_CONTINUOUS;
- devpriv->ao_mode1 &= ~NISTC_AO_MODE1_TRIGGER_ONCE;
- } else {
- devpriv->ao_mode1 &= ~NISTC_AO_MODE1_CONTINUOUS;
- devpriv->ao_mode1 |= NISTC_AO_MODE1_TRIGGER_ONCE;
- }
- ni_stc_writew(dev, devpriv->ao_mode1, NISTC_AO_MODE1_REG);
-
- val = devpriv->ao_trigger_select;
- switch (cmd->start_src) {
- case TRIG_INT:
- case TRIG_NOW:
- val &= ~(NISTC_AO_TRIG_START1_POLARITY |
- NISTC_AO_TRIG_START1_SEL_MASK);
- val |= NISTC_AO_TRIG_START1_EDGE |
- NISTC_AO_TRIG_START1_SYNC;
- break;
- case TRIG_EXT:
- val = NISTC_AO_TRIG_START1_SEL(CR_CHAN(cmd->start_arg) + 1);
- if (cmd->start_arg & CR_INVERT) {
- /* 0=active high, 1=active low. see daq-stc 3-24 (p186) */
- val |= NISTC_AO_TRIG_START1_POLARITY;
- }
- if (cmd->start_arg & CR_EDGE) {
- /* 0=edge detection disabled, 1=enabled */
- val |= NISTC_AO_TRIG_START1_EDGE;
- }
- ni_stc_writew(dev, devpriv->ao_trigger_select,
- NISTC_AO_TRIG_SEL_REG);
- break;
- default:
- BUG();
- break;
- }
- devpriv->ao_trigger_select = val;
- ni_stc_writew(dev, devpriv->ao_trigger_select, NISTC_AO_TRIG_SEL_REG);
-
- devpriv->ao_mode3 &= ~NISTC_AO_MODE3_TRIG_LEN;
- ni_stc_writew(dev, devpriv->ao_mode3, NISTC_AO_MODE3_REG);
-
- ni_stc_writew(dev, devpriv->ao_mode1, NISTC_AO_MODE1_REG);
- devpriv->ao_mode2 &= ~NISTC_AO_MODE2_BC_INIT_LOAD_SRC;
- ni_stc_writew(dev, devpriv->ao_mode2, NISTC_AO_MODE2_REG);
- if (cmd->stop_src == TRIG_NONE)
- ni_stc_writel(dev, 0xffffff, NISTC_AO_BC_LOADA_REG);
- else
- ni_stc_writel(dev, 0, NISTC_AO_BC_LOADA_REG);
- ni_stc_writew(dev, NISTC_AO_CMD1_BC_LOAD, NISTC_AO_CMD1_REG);
- devpriv->ao_mode2 &= ~NISTC_AO_MODE2_UC_INIT_LOAD_SRC;
- ni_stc_writew(dev, devpriv->ao_mode2, NISTC_AO_MODE2_REG);
- switch (cmd->stop_src) {
- case TRIG_COUNT:
- if (devpriv->is_m_series) {
- /* this is how the NI example code does it for m-series boards, verified correct with 6259 */
- ni_stc_writel(dev, cmd->stop_arg - 1,
- NISTC_AO_UC_LOADA_REG);
- ni_stc_writew(dev, NISTC_AO_CMD1_UC_LOAD,
- NISTC_AO_CMD1_REG);
- } else {
- ni_stc_writel(dev, cmd->stop_arg,
- NISTC_AO_UC_LOADA_REG);
- ni_stc_writew(dev, NISTC_AO_CMD1_UC_LOAD,
- NISTC_AO_CMD1_REG);
- ni_stc_writel(dev, cmd->stop_arg - 1,
- NISTC_AO_UC_LOADA_REG);
- }
- break;
- case TRIG_NONE:
- ni_stc_writel(dev, 0xffffff, NISTC_AO_UC_LOADA_REG);
- ni_stc_writew(dev, NISTC_AO_CMD1_UC_LOAD, NISTC_AO_CMD1_REG);
- ni_stc_writel(dev, 0xffffff, NISTC_AO_UC_LOADA_REG);
- break;
- default:
- ni_stc_writel(dev, 0, NISTC_AO_UC_LOADA_REG);
- ni_stc_writew(dev, NISTC_AO_CMD1_UC_LOAD, NISTC_AO_CMD1_REG);
- ni_stc_writel(dev, cmd->stop_arg, NISTC_AO_UC_LOADA_REG);
- }
-
- devpriv->ao_mode1 &= ~(NISTC_AO_MODE1_UPDATE_SRC_MASK |
- NISTC_AO_MODE1_UI_SRC_MASK |
- NISTC_AO_MODE1_UPDATE_SRC_POLARITY |
- NISTC_AO_MODE1_UI_SRC_POLARITY);
- switch (cmd->scan_begin_src) {
- case TRIG_TIMER:
- devpriv->ao_cmd2 &= ~NISTC_AO_CMD2_BC_GATE_ENA;
- trigvar =
- ni_ns_to_timer(dev, cmd->scan_begin_arg,
- CMDF_ROUND_NEAREST);
- ni_stc_writel(dev, 1, NISTC_AO_UI_LOADA_REG);
- ni_stc_writew(dev, NISTC_AO_CMD1_UI_LOAD, NISTC_AO_CMD1_REG);
- ni_stc_writel(dev, trigvar, NISTC_AO_UI_LOADA_REG);
- break;
- case TRIG_EXT:
- devpriv->ao_mode1 |=
- NISTC_AO_MODE1_UPDATE_SRC(cmd->scan_begin_arg);
- if (cmd->scan_begin_arg & CR_INVERT)
- devpriv->ao_mode1 |= NISTC_AO_MODE1_UPDATE_SRC_POLARITY;
- devpriv->ao_cmd2 |= NISTC_AO_CMD2_BC_GATE_ENA;
- break;
- default:
- BUG();
- break;
- }
- ni_stc_writew(dev, devpriv->ao_cmd2, NISTC_AO_CMD2_REG);
- ni_stc_writew(dev, devpriv->ao_mode1, NISTC_AO_MODE1_REG);
- devpriv->ao_mode2 &= ~(NISTC_AO_MODE2_UI_RELOAD_MODE(3) |
- NISTC_AO_MODE2_UI_INIT_LOAD_SRC);
- ni_stc_writew(dev, devpriv->ao_mode2, NISTC_AO_MODE2_REG);
-
- if (cmd->scan_end_arg > 1) {
- devpriv->ao_mode1 |= NISTC_AO_MODE1_MULTI_CHAN;
- ni_stc_writew(dev,
- NISTC_AO_OUT_CTRL_CHANS(cmd->scan_end_arg - 1) |
- NISTC_AO_OUT_CTRL_UPDATE_SEL_HIGHZ,
- NISTC_AO_OUT_CTRL_REG);
- } else {
- unsigned bits;
-
- devpriv->ao_mode1 &= ~NISTC_AO_MODE1_MULTI_CHAN;
- bits = NISTC_AO_OUT_CTRL_UPDATE_SEL_HIGHZ;
- if (devpriv->is_m_series || devpriv->is_6xxx) {
- bits |= NISTC_AO_OUT_CTRL_CHANS(0);
- } else {
- bits |=
- NISTC_AO_OUT_CTRL_CHANS(CR_CHAN(cmd->chanlist[0]));
- }
- ni_stc_writew(dev, bits, NISTC_AO_OUT_CTRL_REG);
- }
- ni_stc_writew(dev, devpriv->ao_mode1, NISTC_AO_MODE1_REG);
-
- ni_stc_writew(dev, NISTC_AO_CMD1_DAC1_UPDATE_MODE |
- NISTC_AO_CMD1_DAC0_UPDATE_MODE,
- NISTC_AO_CMD1_REG);
-
- devpriv->ao_mode3 |= NISTC_AO_MODE3_STOP_ON_OVERRUN_ERR;
- ni_stc_writew(dev, devpriv->ao_mode3, NISTC_AO_MODE3_REG);
-
- devpriv->ao_mode2 &= ~NISTC_AO_MODE2_FIFO_MODE_MASK;
-#ifdef PCIDMA
- devpriv->ao_mode2 |= NISTC_AO_MODE2_FIFO_MODE_HF_F;
-#else
- devpriv->ao_mode2 |= NISTC_AO_MODE2_FIFO_MODE_HF;
-#endif
- devpriv->ao_mode2 &= ~NISTC_AO_MODE2_FIFO_REXMIT_ENA;
- ni_stc_writew(dev, devpriv->ao_mode2, NISTC_AO_MODE2_REG);
-
- bits = NISTC_AO_PERSONAL_BC_SRC_SEL |
- NISTC_AO_PERSONAL_UPDATE_PW |
- NISTC_AO_PERSONAL_TMRDACWR_PW;
- if (board->ao_fifo_depth)
- bits |= NISTC_AO_PERSONAL_FIFO_ENA;
- else
- bits |= NISTC_AO_PERSONAL_DMA_PIO_CTRL;
-#if 0
- /*
- * F Hess: windows driver does not set NISTC_AO_PERSONAL_NUM_DAC bit
- * for 6281, verified with bus analyzer.
- */
- if (devpriv->is_m_series)
- bits |= NISTC_AO_PERSONAL_NUM_DAC;
-#endif
- ni_stc_writew(dev, bits, NISTC_AO_PERSONAL_REG);
- /* enable sending of ao dma requests */
- ni_stc_writew(dev, NISTC_AO_START_AOFREQ_ENA, NISTC_AO_START_SEL_REG);
-
- ni_stc_writew(dev, NISTC_RESET_AO_CFG_END, NISTC_RESET_REG);
-
- if (cmd->stop_src == TRIG_COUNT) {
- ni_stc_writew(dev, NISTC_INTB_ACK_AO_BC_TC,
- NISTC_INTB_ACK_REG);
- ni_set_bits(dev, NISTC_INTB_ENA_REG,
- NISTC_INTB_ENA_AO_BC_TC, 1);
- }
-
- s->async->inttrig = ni_ao_inttrig;
-
- return 0;
-}
-
-static int ni_ao_cmdtest(struct comedi_device *dev, struct comedi_subdevice *s,
- struct comedi_cmd *cmd)
-{
- const struct ni_board_struct *board = dev->board_ptr;
- struct ni_private *devpriv = dev->private;
- int err = 0;
- unsigned int tmp;
-
- /* Step 1 : check if triggers are trivially valid */
-
- err |= comedi_check_trigger_src(&cmd->start_src, TRIG_INT | TRIG_EXT);
- err |= comedi_check_trigger_src(&cmd->scan_begin_src,
- TRIG_TIMER | TRIG_EXT);
- err |= comedi_check_trigger_src(&cmd->convert_src, TRIG_NOW);
- err |= comedi_check_trigger_src(&cmd->scan_end_src, TRIG_COUNT);
- err |= comedi_check_trigger_src(&cmd->stop_src, TRIG_COUNT | TRIG_NONE);
-
- if (err)
- return 1;
-
- /* Step 2a : make sure trigger sources are unique */
-
- err |= comedi_check_trigger_is_unique(cmd->start_src);
- err |= comedi_check_trigger_is_unique(cmd->scan_begin_src);
- err |= comedi_check_trigger_is_unique(cmd->stop_src);
-
- /* Step 2b : and mutually compatible */
-
- if (err)
- return 2;
-
- /* Step 3: check if arguments are trivially valid */
-
- switch (cmd->start_src) {
- case TRIG_INT:
- err |= comedi_check_trigger_arg_is(&cmd->start_arg, 0);
- break;
- case TRIG_EXT:
- tmp = CR_CHAN(cmd->start_arg);
-
- if (tmp > 18)
- tmp = 18;
- tmp |= (cmd->start_arg & (CR_INVERT | CR_EDGE));
- err |= comedi_check_trigger_arg_is(&cmd->start_arg, tmp);
- break;
- }
-
- if (cmd->scan_begin_src == TRIG_TIMER) {
- err |= comedi_check_trigger_arg_min(&cmd->scan_begin_arg,
- board->ao_speed);
- err |= comedi_check_trigger_arg_max(&cmd->scan_begin_arg,
- devpriv->clock_ns *
- 0xffffff);
- }
-
- err |= comedi_check_trigger_arg_is(&cmd->convert_arg, 0);
- err |= comedi_check_trigger_arg_is(&cmd->scan_end_arg,
- cmd->chanlist_len);
-
- if (cmd->stop_src == TRIG_COUNT)
- err |= comedi_check_trigger_arg_max(&cmd->stop_arg, 0x00ffffff);
- else /* TRIG_NONE */
- err |= comedi_check_trigger_arg_is(&cmd->stop_arg, 0);
-
- if (err)
- return 3;
-
- /* step 4: fix up any arguments */
- if (cmd->scan_begin_src == TRIG_TIMER) {
- tmp = cmd->scan_begin_arg;
- cmd->scan_begin_arg =
- ni_timer_to_ns(dev, ni_ns_to_timer(dev,
- cmd->scan_begin_arg,
- cmd->flags));
- if (tmp != cmd->scan_begin_arg)
- err++;
- }
- if (err)
- return 4;
-
- return 0;
-}
-
-static int ni_ao_reset(struct comedi_device *dev, struct comedi_subdevice *s)
-{
- struct ni_private *devpriv = dev->private;
-
- ni_release_ao_mite_channel(dev);
-
- ni_stc_writew(dev, NISTC_RESET_AO_CFG_START, NISTC_RESET_REG);
- ni_stc_writew(dev, NISTC_AO_CMD1_DISARM, NISTC_AO_CMD1_REG);
- ni_set_bits(dev, NISTC_INTB_ENA_REG, ~0, 0);
- ni_stc_writew(dev, NISTC_AO_PERSONAL_BC_SRC_SEL, NISTC_AO_PERSONAL_REG);
- ni_stc_writew(dev, NISTC_INTB_ACK_AO_ALL, NISTC_INTB_ACK_REG);
- ni_stc_writew(dev, NISTC_AO_PERSONAL_BC_SRC_SEL |
- NISTC_AO_PERSONAL_UPDATE_PW |
- NISTC_AO_PERSONAL_TMRDACWR_PW,
- NISTC_AO_PERSONAL_REG);
- ni_stc_writew(dev, 0, NISTC_AO_OUT_CTRL_REG);
- ni_stc_writew(dev, 0, NISTC_AO_START_SEL_REG);
- devpriv->ao_cmd1 = 0;
- ni_stc_writew(dev, devpriv->ao_cmd1, NISTC_AO_CMD1_REG);
- devpriv->ao_cmd2 = 0;
- ni_stc_writew(dev, devpriv->ao_cmd2, NISTC_AO_CMD2_REG);
- devpriv->ao_mode1 = 0;
- ni_stc_writew(dev, devpriv->ao_mode1, NISTC_AO_MODE1_REG);
- devpriv->ao_mode2 = 0;
- ni_stc_writew(dev, devpriv->ao_mode2, NISTC_AO_MODE2_REG);
- if (devpriv->is_m_series)
- devpriv->ao_mode3 = NISTC_AO_MODE3_LAST_GATE_DISABLE;
- else
- devpriv->ao_mode3 = 0;
- ni_stc_writew(dev, devpriv->ao_mode3, NISTC_AO_MODE3_REG);
- devpriv->ao_trigger_select = 0;
- ni_stc_writew(dev, devpriv->ao_trigger_select,
- NISTC_AO_TRIG_SEL_REG);
- if (devpriv->is_6xxx) {
- unsigned immediate_bits = 0;
- unsigned i;
-
- for (i = 0; i < s->n_chan; ++i)
- immediate_bits |= 1 << i;
- ni_ao_win_outw(dev, immediate_bits, NI671X_AO_IMMEDIATE_REG);
- ni_ao_win_outw(dev, NI611X_AO_MISC_CLEAR_WG,
- NI611X_AO_MISC_REG);
- }
- ni_stc_writew(dev, NISTC_RESET_AO_CFG_END, NISTC_RESET_REG);
-
- return 0;
-}
-
-/* digital io */
-
-static int ni_dio_insn_config(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct ni_private *devpriv = dev->private;
- int ret;
-
- ret = comedi_dio_insn_config(dev, s, insn, data, 0);
- if (ret)
- return ret;
-
- devpriv->dio_control &= ~NISTC_DIO_CTRL_DIR_MASK;
- devpriv->dio_control |= NISTC_DIO_CTRL_DIR(s->io_bits);
- ni_stc_writew(dev, devpriv->dio_control, NISTC_DIO_CTRL_REG);
-
- return insn->n;
-}
-
-static int ni_dio_insn_bits(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct ni_private *devpriv = dev->private;
-
- /* Make sure we're not using the serial part of the dio */
- if ((data[0] & (NISTC_DIO_SDIN | NISTC_DIO_SDOUT)) &&
- devpriv->serial_interval_ns)
- return -EBUSY;
-
- if (comedi_dio_update_state(s, data)) {
- devpriv->dio_output &= ~NISTC_DIO_OUT_PARALLEL_MASK;
- devpriv->dio_output |= NISTC_DIO_OUT_PARALLEL(s->state);
- ni_stc_writew(dev, devpriv->dio_output, NISTC_DIO_OUT_REG);
- }
-
- data[1] = ni_stc_readw(dev, NISTC_DIO_IN_REG);
-
- return insn->n;
-}
-
-static int ni_m_series_dio_insn_config(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- int ret;
-
- ret = comedi_dio_insn_config(dev, s, insn, data, 0);
- if (ret)
- return ret;
-
- ni_writel(dev, s->io_bits, NI_M_DIO_DIR_REG);
-
- return insn->n;
-}
-
-static int ni_m_series_dio_insn_bits(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- if (comedi_dio_update_state(s, data))
- ni_writel(dev, s->state, NI_M_DIO_REG);
-
- data[1] = ni_readl(dev, NI_M_DIO_REG);
-
- return insn->n;
-}
-
-static int ni_cdio_check_chanlist(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_cmd *cmd)
-{
- int i;
-
- for (i = 0; i < cmd->chanlist_len; ++i) {
- unsigned int chan = CR_CHAN(cmd->chanlist[i]);
-
- if (chan != i)
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int ni_cdio_cmdtest(struct comedi_device *dev,
- struct comedi_subdevice *s, struct comedi_cmd *cmd)
-{
- int err = 0;
- int tmp;
-
- /* Step 1 : check if triggers are trivially valid */
-
- err |= comedi_check_trigger_src(&cmd->start_src, TRIG_INT);
- err |= comedi_check_trigger_src(&cmd->scan_begin_src, TRIG_EXT);
- err |= comedi_check_trigger_src(&cmd->convert_src, TRIG_NOW);
- err |= comedi_check_trigger_src(&cmd->scan_end_src, TRIG_COUNT);
- err |= comedi_check_trigger_src(&cmd->stop_src, TRIG_NONE);
-
- if (err)
- return 1;
-
- /* Step 2a : make sure trigger sources are unique */
- /* Step 2b : and mutually compatible */
-
- /* Step 3: check if arguments are trivially valid */
-
- err |= comedi_check_trigger_arg_is(&cmd->start_arg, 0);
-
- tmp = cmd->scan_begin_arg;
- tmp &= CR_PACK_FLAGS(NI_M_CDO_MODE_SAMPLE_SRC_MASK, 0, 0, CR_INVERT);
- if (tmp != cmd->scan_begin_arg)
- err |= -EINVAL;
-
- err |= comedi_check_trigger_arg_is(&cmd->convert_arg, 0);
- err |= comedi_check_trigger_arg_is(&cmd->scan_end_arg,
- cmd->chanlist_len);
- err |= comedi_check_trigger_arg_is(&cmd->stop_arg, 0);
-
- if (err)
- return 3;
-
- /* Step 4: fix up any arguments */
-
- /* Step 5: check channel list if it exists */
-
- if (cmd->chanlist && cmd->chanlist_len > 0)
- err |= ni_cdio_check_chanlist(dev, s, cmd);
-
- if (err)
- return 5;
-
- return 0;
-}
-
-static int ni_cdo_inttrig(struct comedi_device *dev,
- struct comedi_subdevice *s,
- unsigned int trig_num)
-{
- struct comedi_cmd *cmd = &s->async->cmd;
- const unsigned timeout = 1000;
- int retval = 0;
- unsigned i;
-#ifdef PCIDMA
- struct ni_private *devpriv = dev->private;
- unsigned long flags;
-#endif
-
- if (trig_num != cmd->start_arg)
- return -EINVAL;
-
- s->async->inttrig = NULL;
-
- /* read alloc the entire buffer */
- comedi_buf_read_alloc(s, s->async->prealloc_bufsz);
-
-#ifdef PCIDMA
- spin_lock_irqsave(&devpriv->mite_channel_lock, flags);
- if (devpriv->cdo_mite_chan) {
- mite_prep_dma(devpriv->cdo_mite_chan, 32, 32);
- mite_dma_arm(devpriv->cdo_mite_chan);
- } else {
- dev_err(dev->class_dev, "BUG: no cdo mite channel?\n");
- retval = -EIO;
- }
- spin_unlock_irqrestore(&devpriv->mite_channel_lock, flags);
- if (retval < 0)
- return retval;
-#endif
- /*
- * XXX not sure what interrupt C group does
- * wait for dma to fill output fifo
- * ni_writeb(dev, NI_M_INTC_ENA, NI_M_INTC_ENA_REG);
- */
- for (i = 0; i < timeout; ++i) {
- if (ni_readl(dev, NI_M_CDIO_STATUS_REG) &
- NI_M_CDIO_STATUS_CDO_FIFO_FULL)
- break;
- udelay(10);
- }
- if (i == timeout) {
- dev_err(dev->class_dev, "dma failed to fill cdo fifo!\n");
- s->cancel(dev, s);
- return -EIO;
- }
- ni_writel(dev, NI_M_CDO_CMD_ARM |
- NI_M_CDO_CMD_ERR_INT_ENA_SET |
- NI_M_CDO_CMD_F_E_INT_ENA_SET,
- NI_M_CDIO_CMD_REG);
- return retval;
-}
-
-static int ni_cdio_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
-{
- const struct comedi_cmd *cmd = &s->async->cmd;
- unsigned cdo_mode_bits;
- int retval;
-
- ni_writel(dev, NI_M_CDO_CMD_RESET, NI_M_CDIO_CMD_REG);
- cdo_mode_bits = NI_M_CDO_MODE_FIFO_MODE |
- NI_M_CDO_MODE_HALT_ON_ERROR |
- NI_M_CDO_MODE_SAMPLE_SRC(CR_CHAN(cmd->scan_begin_arg));
- if (cmd->scan_begin_arg & CR_INVERT)
- cdo_mode_bits |= NI_M_CDO_MODE_POLARITY;
- ni_writel(dev, cdo_mode_bits, NI_M_CDO_MODE_REG);
- if (s->io_bits) {
- ni_writel(dev, s->state, NI_M_CDO_FIFO_DATA_REG);
- ni_writel(dev, NI_M_CDO_CMD_SW_UPDATE, NI_M_CDIO_CMD_REG);
- ni_writel(dev, s->io_bits, NI_M_CDO_MASK_ENA_REG);
- } else {
- dev_err(dev->class_dev,
- "attempted to run digital output command with no lines configured as outputs\n");
- return -EIO;
- }
- retval = ni_request_cdo_mite_channel(dev);
- if (retval < 0)
- return retval;
-
- s->async->inttrig = ni_cdo_inttrig;
-
- return 0;
-}
-
-static int ni_cdio_cancel(struct comedi_device *dev, struct comedi_subdevice *s)
-{
- ni_writel(dev, NI_M_CDO_CMD_DISARM |
- NI_M_CDO_CMD_ERR_INT_ENA_CLR |
- NI_M_CDO_CMD_F_E_INT_ENA_CLR |
- NI_M_CDO_CMD_F_REQ_INT_ENA_CLR,
- NI_M_CDIO_CMD_REG);
- /*
- * XXX not sure what interrupt C group does
- * ni_writeb(dev, 0, NI_M_INTC_ENA_REG);
- */
- ni_writel(dev, 0, NI_M_CDO_MASK_ENA_REG);
- ni_release_cdo_mite_channel(dev);
- return 0;
-}
-
-static void handle_cdio_interrupt(struct comedi_device *dev)
-{
- struct ni_private *devpriv = dev->private;
- unsigned cdio_status;
- struct comedi_subdevice *s = &dev->subdevices[NI_DIO_SUBDEV];
-#ifdef PCIDMA
- unsigned long flags;
-#endif
-
- if (!devpriv->is_m_series)
- return;
-#ifdef PCIDMA
- spin_lock_irqsave(&devpriv->mite_channel_lock, flags);
- if (devpriv->cdo_mite_chan) {
- unsigned cdo_mite_status =
- mite_get_status(devpriv->cdo_mite_chan);
- if (cdo_mite_status & CHSR_LINKC) {
- writel(CHOR_CLRLC,
- devpriv->mite->mite_io_addr +
- MITE_CHOR(devpriv->cdo_mite_chan->channel));
- }
- mite_sync_output_dma(devpriv->cdo_mite_chan, s);
- }
- spin_unlock_irqrestore(&devpriv->mite_channel_lock, flags);
-#endif
-
- cdio_status = ni_readl(dev, NI_M_CDIO_STATUS_REG);
- if (cdio_status & NI_M_CDIO_STATUS_CDO_ERROR) {
- /* XXX just guessing this is needed and does something useful */
- ni_writel(dev, NI_M_CDO_CMD_ERR_INT_CONFIRM,
- NI_M_CDIO_CMD_REG);
- s->async->events |= COMEDI_CB_OVERFLOW;
- }
- if (cdio_status & NI_M_CDIO_STATUS_CDO_FIFO_EMPTY) {
- ni_writel(dev, NI_M_CDO_CMD_F_E_INT_ENA_CLR,
- NI_M_CDIO_CMD_REG);
- /* s->async->events |= COMEDI_CB_EOA; */
- }
- comedi_handle_events(dev, s);
-}
-
-static int ni_serial_hw_readwrite8(struct comedi_device *dev,
- struct comedi_subdevice *s,
- unsigned char data_out,
- unsigned char *data_in)
-{
- struct ni_private *devpriv = dev->private;
- unsigned int status1;
- int err = 0, count = 20;
-
- devpriv->dio_output &= ~NISTC_DIO_OUT_SERIAL_MASK;
- devpriv->dio_output |= NISTC_DIO_OUT_SERIAL(data_out);
- ni_stc_writew(dev, devpriv->dio_output, NISTC_DIO_OUT_REG);
-
- status1 = ni_stc_readw(dev, NISTC_STATUS1_REG);
- if (status1 & NISTC_STATUS1_SERIO_IN_PROG) {
- err = -EBUSY;
- goto Error;
- }
-
- devpriv->dio_control |= NISTC_DIO_CTRL_HW_SER_START;
- ni_stc_writew(dev, devpriv->dio_control, NISTC_DIO_CTRL_REG);
- devpriv->dio_control &= ~NISTC_DIO_CTRL_HW_SER_START;
-
- /* Wait until STC says we're done, but don't loop infinitely. */
- while ((status1 = ni_stc_readw(dev, NISTC_STATUS1_REG)) &
- NISTC_STATUS1_SERIO_IN_PROG) {
- /* Delay one bit per loop */
- udelay((devpriv->serial_interval_ns + 999) / 1000);
- if (--count < 0) {
- dev_err(dev->class_dev,
- "SPI serial I/O didn't finish in time!\n");
- err = -ETIME;
- goto Error;
- }
- }
-
- /*
- * Delay for last bit. This delay is absolutely necessary, because
- * NISTC_STATUS1_SERIO_IN_PROG goes high one bit too early.
- */
- udelay((devpriv->serial_interval_ns + 999) / 1000);
-
- if (data_in)
- *data_in = ni_stc_readw(dev, NISTC_DIO_SERIAL_IN_REG);
-
-Error:
- ni_stc_writew(dev, devpriv->dio_control, NISTC_DIO_CTRL_REG);
-
- return err;
-}
-
-static int ni_serial_sw_readwrite8(struct comedi_device *dev,
- struct comedi_subdevice *s,
- unsigned char data_out,
- unsigned char *data_in)
-{
- struct ni_private *devpriv = dev->private;
- unsigned char mask, input = 0;
-
- /* Wait for one bit before transfer */
- udelay((devpriv->serial_interval_ns + 999) / 1000);
-
- for (mask = 0x80; mask; mask >>= 1) {
- /* Output current bit; note that we cannot touch s->state
- because it is a per-subdevice field, and serial is
- a separate subdevice from DIO. */
- devpriv->dio_output &= ~NISTC_DIO_SDOUT;
- if (data_out & mask)
- devpriv->dio_output |= NISTC_DIO_SDOUT;
- ni_stc_writew(dev, devpriv->dio_output, NISTC_DIO_OUT_REG);
-
- /* Assert SDCLK (active low, inverted), wait for half of
- the delay, deassert SDCLK, and wait for the other half. */
- devpriv->dio_control |= NISTC_DIO_SDCLK;
- ni_stc_writew(dev, devpriv->dio_control, NISTC_DIO_CTRL_REG);
-
- udelay((devpriv->serial_interval_ns + 999) / 2000);
-
- devpriv->dio_control &= ~NISTC_DIO_SDCLK;
- ni_stc_writew(dev, devpriv->dio_control, NISTC_DIO_CTRL_REG);
-
- udelay((devpriv->serial_interval_ns + 999) / 2000);
-
- /* Input current bit */
- if (ni_stc_readw(dev, NISTC_DIO_IN_REG) & NISTC_DIO_SDIN)
- input |= mask;
- }
-
- if (data_in)
- *data_in = input;
-
- return 0;
-}
-
-static int ni_serial_insn_config(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct ni_private *devpriv = dev->private;
- unsigned clk_fout = devpriv->clock_and_fout;
- int err = insn->n;
- unsigned char byte_out, byte_in = 0;
-
- if (insn->n != 2)
- return -EINVAL;
-
- switch (data[0]) {
- case INSN_CONFIG_SERIAL_CLOCK:
- devpriv->serial_hw_mode = 1;
- devpriv->dio_control |= NISTC_DIO_CTRL_HW_SER_ENA;
-
- if (data[1] == SERIAL_DISABLED) {
- devpriv->serial_hw_mode = 0;
- devpriv->dio_control &= ~(NISTC_DIO_CTRL_HW_SER_ENA |
- NISTC_DIO_SDCLK);
- data[1] = SERIAL_DISABLED;
- devpriv->serial_interval_ns = data[1];
- } else if (data[1] <= SERIAL_600NS) {
- /* Warning: this clock speed is too fast to reliably
- control SCXI. */
- devpriv->dio_control &= ~NISTC_DIO_CTRL_HW_SER_TIMEBASE;
- clk_fout |= NISTC_CLK_FOUT_SLOW_TIMEBASE;
- clk_fout &= ~NISTC_CLK_FOUT_DIO_SER_OUT_DIV2;
- data[1] = SERIAL_600NS;
- devpriv->serial_interval_ns = data[1];
- } else if (data[1] <= SERIAL_1_2US) {
- devpriv->dio_control &= ~NISTC_DIO_CTRL_HW_SER_TIMEBASE;
- clk_fout |= NISTC_CLK_FOUT_SLOW_TIMEBASE |
- NISTC_CLK_FOUT_DIO_SER_OUT_DIV2;
- data[1] = SERIAL_1_2US;
- devpriv->serial_interval_ns = data[1];
- } else if (data[1] <= SERIAL_10US) {
- devpriv->dio_control |= NISTC_DIO_CTRL_HW_SER_TIMEBASE;
- clk_fout |= NISTC_CLK_FOUT_SLOW_TIMEBASE |
- NISTC_CLK_FOUT_DIO_SER_OUT_DIV2;
- /* Note: NISTC_CLK_FOUT_DIO_SER_OUT_DIV2 only affects
- 600ns/1.2us. If you turn divide_by_2 off with the
- slow clock, you will still get 10us, except then
- all your delays are wrong. */
- data[1] = SERIAL_10US;
- devpriv->serial_interval_ns = data[1];
- } else {
- devpriv->dio_control &= ~(NISTC_DIO_CTRL_HW_SER_ENA |
- NISTC_DIO_SDCLK);
- devpriv->serial_hw_mode = 0;
- data[1] = (data[1] / 1000) * 1000;
- devpriv->serial_interval_ns = data[1];
- }
- devpriv->clock_and_fout = clk_fout;
-
- ni_stc_writew(dev, devpriv->dio_control, NISTC_DIO_CTRL_REG);
- ni_stc_writew(dev, devpriv->clock_and_fout, NISTC_CLK_FOUT_REG);
- return 1;
-
- case INSN_CONFIG_BIDIRECTIONAL_DATA:
-
- if (devpriv->serial_interval_ns == 0)
- return -EINVAL;
-
- byte_out = data[1] & 0xFF;
-
- if (devpriv->serial_hw_mode) {
- err = ni_serial_hw_readwrite8(dev, s, byte_out,
- &byte_in);
- } else if (devpriv->serial_interval_ns > 0) {
- err = ni_serial_sw_readwrite8(dev, s, byte_out,
- &byte_in);
- } else {
- dev_err(dev->class_dev, "serial disabled!\n");
- return -EINVAL;
- }
- if (err < 0)
- return err;
- data[1] = byte_in & 0xFF;
- return insn->n;
-
- break;
- default:
- return -EINVAL;
- }
-}
-
-static void init_ao_67xx(struct comedi_device *dev, struct comedi_subdevice *s)
-{
- int i;
-
- for (i = 0; i < s->n_chan; i++) {
- ni_ao_win_outw(dev, NI_E_AO_DACSEL(i) | 0x0,
- NI67XX_AO_CFG2_REG);
- }
- ni_ao_win_outw(dev, 0x0, NI67XX_AO_SP_UPDATES_REG);
-}
-
-static const struct mio_regmap ni_gpct_to_stc_regmap[] = {
- [NITIO_G0_AUTO_INC] = { NISTC_G0_AUTOINC_REG, 2 },
- [NITIO_G1_AUTO_INC] = { NISTC_G1_AUTOINC_REG, 2 },
- [NITIO_G0_CMD] = { NISTC_G0_CMD_REG, 2 },
- [NITIO_G1_CMD] = { NISTC_G1_CMD_REG, 2 },
- [NITIO_G0_HW_SAVE] = { NISTC_G0_HW_SAVE_REG, 4 },
- [NITIO_G1_HW_SAVE] = { NISTC_G1_HW_SAVE_REG, 4 },
- [NITIO_G0_SW_SAVE] = { NISTC_G0_SAVE_REG, 4 },
- [NITIO_G1_SW_SAVE] = { NISTC_G1_SAVE_REG, 4 },
- [NITIO_G0_MODE] = { NISTC_G0_MODE_REG, 2 },
- [NITIO_G1_MODE] = { NISTC_G1_MODE_REG, 2 },
- [NITIO_G0_LOADA] = { NISTC_G0_LOADA_REG, 4 },
- [NITIO_G1_LOADA] = { NISTC_G1_LOADA_REG, 4 },
- [NITIO_G0_LOADB] = { NISTC_G0_LOADB_REG, 4 },
- [NITIO_G1_LOADB] = { NISTC_G1_LOADB_REG, 4 },
- [NITIO_G0_INPUT_SEL] = { NISTC_G0_INPUT_SEL_REG, 2 },
- [NITIO_G1_INPUT_SEL] = { NISTC_G1_INPUT_SEL_REG, 2 },
- [NITIO_G0_CNT_MODE] = { 0x1b0, 2 }, /* M-Series only */
- [NITIO_G1_CNT_MODE] = { 0x1b2, 2 }, /* M-Series only */
- [NITIO_G0_GATE2] = { 0x1b4, 2 }, /* M-Series only */
- [NITIO_G1_GATE2] = { 0x1b6, 2 }, /* M-Series only */
- [NITIO_G01_STATUS] = { NISTC_G01_STATUS_REG, 2 },
- [NITIO_G01_RESET] = { NISTC_RESET_REG, 2 },
- [NITIO_G01_STATUS1] = { NISTC_STATUS1_REG, 2 },
- [NITIO_G01_STATUS2] = { NISTC_STATUS2_REG, 2 },
- [NITIO_G0_DMA_CFG] = { 0x1b8, 2 }, /* M-Series only */
- [NITIO_G1_DMA_CFG] = { 0x1ba, 2 }, /* M-Series only */
- [NITIO_G0_DMA_STATUS] = { 0x1b8, 2 }, /* M-Series only */
- [NITIO_G1_DMA_STATUS] = { 0x1ba, 2 }, /* M-Series only */
- [NITIO_G0_ABZ] = { 0x1c0, 2 }, /* M-Series only */
- [NITIO_G1_ABZ] = { 0x1c2, 2 }, /* M-Series only */
- [NITIO_G0_INT_ACK] = { NISTC_INTA_ACK_REG, 2 },
- [NITIO_G1_INT_ACK] = { NISTC_INTB_ACK_REG, 2 },
- [NITIO_G0_STATUS] = { NISTC_AI_STATUS1_REG, 2 },
- [NITIO_G1_STATUS] = { NISTC_AO_STATUS1_REG, 2 },
- [NITIO_G0_INT_ENA] = { NISTC_INTA_ENA_REG, 2 },
- [NITIO_G1_INT_ENA] = { NISTC_INTB_ENA_REG, 2 },
-};
-
-static unsigned int ni_gpct_to_stc_register(struct comedi_device *dev,
- enum ni_gpct_register reg)
-{
- const struct mio_regmap *regmap;
-
- if (reg < ARRAY_SIZE(ni_gpct_to_stc_regmap)) {
- regmap = &ni_gpct_to_stc_regmap[reg];
- } else {
- dev_warn(dev->class_dev, "%s: unhandled register=0x%x\n",
- __func__, reg);
- return 0;
- }
-
- return regmap->mio_reg;
-}
-
-static void ni_gpct_write_register(struct ni_gpct *counter, unsigned bits,
- enum ni_gpct_register reg)
-{
- struct comedi_device *dev = counter->counter_dev->dev;
- unsigned int stc_register = ni_gpct_to_stc_register(dev, reg);
- static const unsigned gpct_interrupt_a_enable_mask =
- NISTC_INTA_ENA_G0_GATE | NISTC_INTA_ENA_G0_TC;
- static const unsigned gpct_interrupt_b_enable_mask =
- NISTC_INTB_ENA_G1_GATE | NISTC_INTB_ENA_G1_TC;
-
- if (stc_register == 0)
- return;
-
- switch (reg) {
- /* m-series only registers */
- case NITIO_G0_CNT_MODE:
- case NITIO_G1_CNT_MODE:
- case NITIO_G0_GATE2:
- case NITIO_G1_GATE2:
- case NITIO_G0_DMA_CFG:
- case NITIO_G1_DMA_CFG:
- case NITIO_G0_ABZ:
- case NITIO_G1_ABZ:
- ni_writew(dev, bits, stc_register);
- break;
-
- /* 32 bit registers */
- case NITIO_G0_LOADA:
- case NITIO_G1_LOADA:
- case NITIO_G0_LOADB:
- case NITIO_G1_LOADB:
- ni_stc_writel(dev, bits, stc_register);
- break;
-
- /* 16 bit registers */
- case NITIO_G0_INT_ENA:
- BUG_ON(bits & ~gpct_interrupt_a_enable_mask);
- ni_set_bitfield(dev, stc_register,
- gpct_interrupt_a_enable_mask, bits);
- break;
- case NITIO_G1_INT_ENA:
- BUG_ON(bits & ~gpct_interrupt_b_enable_mask);
- ni_set_bitfield(dev, stc_register,
- gpct_interrupt_b_enable_mask, bits);
- break;
- case NITIO_G01_RESET:
- BUG_ON(bits & ~(NISTC_RESET_G0 | NISTC_RESET_G1));
- /* fall-through */
- default:
- ni_stc_writew(dev, bits, stc_register);
- }
-}
-
-static unsigned ni_gpct_read_register(struct ni_gpct *counter,
- enum ni_gpct_register reg)
-{
- struct comedi_device *dev = counter->counter_dev->dev;
- unsigned int stc_register = ni_gpct_to_stc_register(dev, reg);
-
- if (stc_register == 0)
- return 0;
-
- switch (reg) {
- /* m-series only registers */
- case NITIO_G0_DMA_STATUS:
- case NITIO_G1_DMA_STATUS:
- return ni_readw(dev, stc_register);
-
- /* 32 bit registers */
- case NITIO_G0_HW_SAVE:
- case NITIO_G1_HW_SAVE:
- case NITIO_G0_SW_SAVE:
- case NITIO_G1_SW_SAVE:
- return ni_stc_readl(dev, stc_register);
-
- /* 16 bit registers */
- default:
- return ni_stc_readw(dev, stc_register);
- }
-}
-
-static int ni_freq_out_insn_read(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct ni_private *devpriv = dev->private;
- unsigned int val = NISTC_CLK_FOUT_TO_DIVIDER(devpriv->clock_and_fout);
- int i;
-
- for (i = 0; i < insn->n; i++)
- data[i] = val;
-
- return insn->n;
-}
-
-static int ni_freq_out_insn_write(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct ni_private *devpriv = dev->private;
-
- if (insn->n) {
- unsigned int val = data[insn->n - 1];
-
- devpriv->clock_and_fout &= ~NISTC_CLK_FOUT_ENA;
- ni_stc_writew(dev, devpriv->clock_and_fout, NISTC_CLK_FOUT_REG);
- devpriv->clock_and_fout &= ~NISTC_CLK_FOUT_DIVIDER_MASK;
-
- /* use the last data value to set the fout divider */
- devpriv->clock_and_fout |= NISTC_CLK_FOUT_DIVIDER(val);
-
- devpriv->clock_and_fout |= NISTC_CLK_FOUT_ENA;
- ni_stc_writew(dev, devpriv->clock_and_fout, NISTC_CLK_FOUT_REG);
- }
- return insn->n;
-}
-
-static int ni_freq_out_insn_config(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct ni_private *devpriv = dev->private;
-
- switch (data[0]) {
- case INSN_CONFIG_SET_CLOCK_SRC:
- switch (data[1]) {
- case NI_FREQ_OUT_TIMEBASE_1_DIV_2_CLOCK_SRC:
- devpriv->clock_and_fout &= ~NISTC_CLK_FOUT_TIMEBASE_SEL;
- break;
- case NI_FREQ_OUT_TIMEBASE_2_CLOCK_SRC:
- devpriv->clock_and_fout |= NISTC_CLK_FOUT_TIMEBASE_SEL;
- break;
- default:
- return -EINVAL;
- }
- ni_stc_writew(dev, devpriv->clock_and_fout, NISTC_CLK_FOUT_REG);
- break;
- case INSN_CONFIG_GET_CLOCK_SRC:
- if (devpriv->clock_and_fout & NISTC_CLK_FOUT_TIMEBASE_SEL) {
- data[1] = NI_FREQ_OUT_TIMEBASE_2_CLOCK_SRC;
- data[2] = TIMEBASE_2_NS;
- } else {
- data[1] = NI_FREQ_OUT_TIMEBASE_1_DIV_2_CLOCK_SRC;
- data[2] = TIMEBASE_1_NS * 2;
- }
- break;
- default:
- return -EINVAL;
- }
- return insn->n;
-}
-
-static int ni_8255_callback(struct comedi_device *dev,
- int dir, int port, int data, unsigned long iobase)
-{
- if (dir) {
- ni_writeb(dev, data, iobase + 2 * port);
- return 0;
- }
-
- return ni_readb(dev, iobase + 2 * port);
-}
-
-static int ni_get_pwm_config(struct comedi_device *dev, unsigned int *data)
-{
- struct ni_private *devpriv = dev->private;
-
- data[1] = devpriv->pwm_up_count * devpriv->clock_ns;
- data[2] = devpriv->pwm_down_count * devpriv->clock_ns;
- return 3;
-}
-
-static int ni_m_series_pwm_config(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct ni_private *devpriv = dev->private;
- unsigned up_count, down_count;
-
- switch (data[0]) {
- case INSN_CONFIG_PWM_OUTPUT:
- switch (data[1]) {
- case CMDF_ROUND_NEAREST:
- up_count =
- (data[2] +
- devpriv->clock_ns / 2) / devpriv->clock_ns;
- break;
- case CMDF_ROUND_DOWN:
- up_count = data[2] / devpriv->clock_ns;
- break;
- case CMDF_ROUND_UP:
- up_count =
- (data[2] + devpriv->clock_ns -
- 1) / devpriv->clock_ns;
- break;
- default:
- return -EINVAL;
- }
- switch (data[3]) {
- case CMDF_ROUND_NEAREST:
- down_count =
- (data[4] +
- devpriv->clock_ns / 2) / devpriv->clock_ns;
- break;
- case CMDF_ROUND_DOWN:
- down_count = data[4] / devpriv->clock_ns;
- break;
- case CMDF_ROUND_UP:
- down_count =
- (data[4] + devpriv->clock_ns -
- 1) / devpriv->clock_ns;
- break;
- default:
- return -EINVAL;
- }
- if (up_count * devpriv->clock_ns != data[2] ||
- down_count * devpriv->clock_ns != data[4]) {
- data[2] = up_count * devpriv->clock_ns;
- data[4] = down_count * devpriv->clock_ns;
- return -EAGAIN;
- }
- ni_writel(dev, NI_M_CAL_PWM_HIGH_TIME(up_count) |
- NI_M_CAL_PWM_LOW_TIME(down_count),
- NI_M_CAL_PWM_REG);
- devpriv->pwm_up_count = up_count;
- devpriv->pwm_down_count = down_count;
- return 5;
- case INSN_CONFIG_GET_PWM_OUTPUT:
- return ni_get_pwm_config(dev, data);
- default:
- return -EINVAL;
- }
- return 0;
-}
-
-static int ni_6143_pwm_config(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct ni_private *devpriv = dev->private;
- unsigned up_count, down_count;
-
- switch (data[0]) {
- case INSN_CONFIG_PWM_OUTPUT:
- switch (data[1]) {
- case CMDF_ROUND_NEAREST:
- up_count =
- (data[2] +
- devpriv->clock_ns / 2) / devpriv->clock_ns;
- break;
- case CMDF_ROUND_DOWN:
- up_count = data[2] / devpriv->clock_ns;
- break;
- case CMDF_ROUND_UP:
- up_count =
- (data[2] + devpriv->clock_ns -
- 1) / devpriv->clock_ns;
- break;
- default:
- return -EINVAL;
- }
- switch (data[3]) {
- case CMDF_ROUND_NEAREST:
- down_count =
- (data[4] +
- devpriv->clock_ns / 2) / devpriv->clock_ns;
- break;
- case CMDF_ROUND_DOWN:
- down_count = data[4] / devpriv->clock_ns;
- break;
- case CMDF_ROUND_UP:
- down_count =
- (data[4] + devpriv->clock_ns -
- 1) / devpriv->clock_ns;
- break;
- default:
- return -EINVAL;
- }
- if (up_count * devpriv->clock_ns != data[2] ||
- down_count * devpriv->clock_ns != data[4]) {
- data[2] = up_count * devpriv->clock_ns;
- data[4] = down_count * devpriv->clock_ns;
- return -EAGAIN;
- }
- ni_writel(dev, up_count, NI6143_CALIB_HI_TIME_REG);
- devpriv->pwm_up_count = up_count;
- ni_writel(dev, down_count, NI6143_CALIB_LO_TIME_REG);
- devpriv->pwm_down_count = down_count;
- return 5;
- case INSN_CONFIG_GET_PWM_OUTPUT:
- return ni_get_pwm_config(dev, data);
- default:
- return -EINVAL;
- }
- return 0;
-}
-
-static int pack_mb88341(int addr, int val, int *bitstring)
-{
- /*
- Fujitsu MB 88341
- Note that address bits are reversed. Thanks to
- Ingo Keen for noticing this.
-
- Note also that the 88341 expects address values from
- 1-12, whereas we use channel numbers 0-11. The NI
- docs use 1-12, also, so be careful here.
- */
- addr++;
- *bitstring = ((addr & 0x1) << 11) |
- ((addr & 0x2) << 9) |
- ((addr & 0x4) << 7) | ((addr & 0x8) << 5) | (val & 0xff);
- return 12;
-}
-
-static int pack_dac8800(int addr, int val, int *bitstring)
-{
- *bitstring = ((addr & 0x7) << 8) | (val & 0xff);
- return 11;
-}
-
-static int pack_dac8043(int addr, int val, int *bitstring)
-{
- *bitstring = val & 0xfff;
- return 12;
-}
-
-static int pack_ad8522(int addr, int val, int *bitstring)
-{
- *bitstring = (val & 0xfff) | (addr ? 0xc000 : 0xa000);
- return 16;
-}
-
-static int pack_ad8804(int addr, int val, int *bitstring)
-{
- *bitstring = ((addr & 0xf) << 8) | (val & 0xff);
- return 12;
-}
-
-static int pack_ad8842(int addr, int val, int *bitstring)
-{
- *bitstring = ((addr + 1) << 8) | (val & 0xff);
- return 12;
-}
-
-struct caldac_struct {
- int n_chans;
- int n_bits;
- int (*packbits)(int, int, int *);
-};
-
-static struct caldac_struct caldacs[] = {
- [mb88341] = {12, 8, pack_mb88341},
- [dac8800] = {8, 8, pack_dac8800},
- [dac8043] = {1, 12, pack_dac8043},
- [ad8522] = {2, 12, pack_ad8522},
- [ad8804] = {12, 8, pack_ad8804},
- [ad8842] = {8, 8, pack_ad8842},
- [ad8804_debug] = {16, 8, pack_ad8804},
-};
-
-static void ni_write_caldac(struct comedi_device *dev, int addr, int val)
-{
- const struct ni_board_struct *board = dev->board_ptr;
- struct ni_private *devpriv = dev->private;
- unsigned int loadbit = 0, bits = 0, bit, bitstring = 0;
- unsigned int cmd;
- int i;
- int type;
-
- if (devpriv->caldacs[addr] == val)
- return;
- devpriv->caldacs[addr] = val;
-
- for (i = 0; i < 3; i++) {
- type = board->caldac[i];
- if (type == caldac_none)
- break;
- if (addr < caldacs[type].n_chans) {
- bits = caldacs[type].packbits(addr, val, &bitstring);
- loadbit = NI_E_SERIAL_CMD_DAC_LD(i);
- break;
- }
- addr -= caldacs[type].n_chans;
- }
-
- /* bits will be 0 if there is no caldac for the given addr */
- if (bits == 0)
- return;
-
- for (bit = 1 << (bits - 1); bit; bit >>= 1) {
- cmd = (bit & bitstring) ? NI_E_SERIAL_CMD_SDATA : 0;
- ni_writeb(dev, cmd, NI_E_SERIAL_CMD_REG);
- udelay(1);
- ni_writeb(dev, NI_E_SERIAL_CMD_SCLK | cmd, NI_E_SERIAL_CMD_REG);
- udelay(1);
- }
- ni_writeb(dev, loadbit, NI_E_SERIAL_CMD_REG);
- udelay(1);
- ni_writeb(dev, 0, NI_E_SERIAL_CMD_REG);
-}
-
-static int ni_calib_insn_write(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- ni_write_caldac(dev, CR_CHAN(insn->chanspec), data[0]);
-
- return 1;
-}
-
-static int ni_calib_insn_read(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct ni_private *devpriv = dev->private;
-
- data[0] = devpriv->caldacs[CR_CHAN(insn->chanspec)];
-
- return 1;
-}
-
-static void caldac_setup(struct comedi_device *dev, struct comedi_subdevice *s)
-{
- const struct ni_board_struct *board = dev->board_ptr;
- struct ni_private *devpriv = dev->private;
- int i, j;
- int n_dacs;
- int n_chans = 0;
- int n_bits;
- int diffbits = 0;
- int type;
- int chan;
-
- type = board->caldac[0];
- if (type == caldac_none)
- return;
- n_bits = caldacs[type].n_bits;
- for (i = 0; i < 3; i++) {
- type = board->caldac[i];
- if (type == caldac_none)
- break;
- if (caldacs[type].n_bits != n_bits)
- diffbits = 1;
- n_chans += caldacs[type].n_chans;
- }
- n_dacs = i;
- s->n_chan = n_chans;
-
- if (diffbits) {
- unsigned int *maxdata_list;
-
- if (n_chans > MAX_N_CALDACS)
- dev_err(dev->class_dev,
- "BUG! MAX_N_CALDACS too small\n");
- s->maxdata_list = maxdata_list = devpriv->caldac_maxdata_list;
- chan = 0;
- for (i = 0; i < n_dacs; i++) {
- type = board->caldac[i];
- for (j = 0; j < caldacs[type].n_chans; j++) {
- maxdata_list[chan] =
- (1 << caldacs[type].n_bits) - 1;
- chan++;
- }
- }
-
- for (chan = 0; chan < s->n_chan; chan++)
- ni_write_caldac(dev, i, s->maxdata_list[i] / 2);
- } else {
- type = board->caldac[0];
- s->maxdata = (1 << caldacs[type].n_bits) - 1;
-
- for (chan = 0; chan < s->n_chan; chan++)
- ni_write_caldac(dev, i, s->maxdata / 2);
- }
-}
-
-static int ni_read_eeprom(struct comedi_device *dev, int addr)
-{
- unsigned int cmd = NI_E_SERIAL_CMD_EEPROM_CS;
- int bit;
- int bitstring;
-
- bitstring = 0x0300 | ((addr & 0x100) << 3) | (addr & 0xff);
- ni_writeb(dev, cmd, NI_E_SERIAL_CMD_REG);
- for (bit = 0x8000; bit; bit >>= 1) {
- if (bit & bitstring)
- cmd |= NI_E_SERIAL_CMD_SDATA;
- else
- cmd &= ~NI_E_SERIAL_CMD_SDATA;
-
- ni_writeb(dev, cmd, NI_E_SERIAL_CMD_REG);
- ni_writeb(dev, NI_E_SERIAL_CMD_SCLK | cmd, NI_E_SERIAL_CMD_REG);
- }
- cmd = NI_E_SERIAL_CMD_EEPROM_CS;
- bitstring = 0;
- for (bit = 0x80; bit; bit >>= 1) {
- ni_writeb(dev, cmd, NI_E_SERIAL_CMD_REG);
- ni_writeb(dev, NI_E_SERIAL_CMD_SCLK | cmd, NI_E_SERIAL_CMD_REG);
- if (ni_readb(dev, NI_E_STATUS_REG) & NI_E_STATUS_PROMOUT)
- bitstring |= bit;
- }
- ni_writeb(dev, 0, NI_E_SERIAL_CMD_REG);
-
- return bitstring;
-}
-
-static int ni_eeprom_insn_read(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- data[0] = ni_read_eeprom(dev, CR_CHAN(insn->chanspec));
-
- return 1;
-}
-
-static int ni_m_series_eeprom_insn_read(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct ni_private *devpriv = dev->private;
-
- data[0] = devpriv->eeprom_buffer[CR_CHAN(insn->chanspec)];
-
- return 1;
-}
-
-static unsigned ni_old_get_pfi_routing(struct comedi_device *dev,
- unsigned chan)
-{
- /* pre-m-series boards have fixed signals on pfi pins */
- switch (chan) {
- case 0:
- return NI_PFI_OUTPUT_AI_START1;
- case 1:
- return NI_PFI_OUTPUT_AI_START2;
- case 2:
- return NI_PFI_OUTPUT_AI_CONVERT;
- case 3:
- return NI_PFI_OUTPUT_G_SRC1;
- case 4:
- return NI_PFI_OUTPUT_G_GATE1;
- case 5:
- return NI_PFI_OUTPUT_AO_UPDATE_N;
- case 6:
- return NI_PFI_OUTPUT_AO_START1;
- case 7:
- return NI_PFI_OUTPUT_AI_START_PULSE;
- case 8:
- return NI_PFI_OUTPUT_G_SRC0;
- case 9:
- return NI_PFI_OUTPUT_G_GATE0;
- default:
- dev_err(dev->class_dev, "bug, unhandled case in switch.\n");
- break;
- }
- return 0;
-}
-
-static int ni_old_set_pfi_routing(struct comedi_device *dev,
- unsigned chan, unsigned source)
-{
- /* pre-m-series boards have fixed signals on pfi pins */
- if (source != ni_old_get_pfi_routing(dev, chan))
- return -EINVAL;
- return 2;
-}
-
-static unsigned ni_m_series_get_pfi_routing(struct comedi_device *dev,
- unsigned chan)
-{
- struct ni_private *devpriv = dev->private;
- const unsigned array_offset = chan / 3;
-
- return NI_M_PFI_OUT_SEL_TO_SRC(chan,
- devpriv->pfi_output_select_reg[array_offset]);
-}
-
-static int ni_m_series_set_pfi_routing(struct comedi_device *dev,
- unsigned chan, unsigned source)
-{
- struct ni_private *devpriv = dev->private;
- unsigned index = chan / 3;
- unsigned short val = devpriv->pfi_output_select_reg[index];
-
- if ((source & 0x1f) != source)
- return -EINVAL;
-
- val &= ~NI_M_PFI_OUT_SEL_MASK(chan);
- val |= NI_M_PFI_OUT_SEL(chan, source);
- ni_writew(dev, val, NI_M_PFI_OUT_SEL_REG(index));
- devpriv->pfi_output_select_reg[index] = val;
-
- return 2;
-}
-
-static unsigned ni_get_pfi_routing(struct comedi_device *dev, unsigned chan)
-{
- struct ni_private *devpriv = dev->private;
-
- return (devpriv->is_m_series)
- ? ni_m_series_get_pfi_routing(dev, chan)
- : ni_old_get_pfi_routing(dev, chan);
-}
-
-static int ni_set_pfi_routing(struct comedi_device *dev, unsigned chan,
- unsigned source)
-{
- struct ni_private *devpriv = dev->private;
-
- return (devpriv->is_m_series)
- ? ni_m_series_set_pfi_routing(dev, chan, source)
- : ni_old_set_pfi_routing(dev, chan, source);
-}
-
-static int ni_config_filter(struct comedi_device *dev,
- unsigned pfi_channel,
- enum ni_pfi_filter_select filter)
-{
- struct ni_private *devpriv = dev->private;
- unsigned bits;
-
- if (!devpriv->is_m_series)
- return -ENOTSUPP;
-
- bits = ni_readl(dev, NI_M_PFI_FILTER_REG);
- bits &= ~NI_M_PFI_FILTER_SEL_MASK(pfi_channel);
- bits |= NI_M_PFI_FILTER_SEL(pfi_channel, filter);
- ni_writel(dev, bits, NI_M_PFI_FILTER_REG);
- return 0;
-}
-
-static int ni_pfi_insn_config(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct ni_private *devpriv = dev->private;
- unsigned int chan;
-
- if (insn->n < 1)
- return -EINVAL;
-
- chan = CR_CHAN(insn->chanspec);
-
- switch (data[0]) {
- case COMEDI_OUTPUT:
- ni_set_bits(dev, NISTC_IO_BIDIR_PIN_REG, 1 << chan, 1);
- break;
- case COMEDI_INPUT:
- ni_set_bits(dev, NISTC_IO_BIDIR_PIN_REG, 1 << chan, 0);
- break;
- case INSN_CONFIG_DIO_QUERY:
- data[1] =
- (devpriv->io_bidirection_pin_reg & (1 << chan)) ?
- COMEDI_OUTPUT : COMEDI_INPUT;
- return 0;
- case INSN_CONFIG_SET_ROUTING:
- return ni_set_pfi_routing(dev, chan, data[1]);
- case INSN_CONFIG_GET_ROUTING:
- data[1] = ni_get_pfi_routing(dev, chan);
- break;
- case INSN_CONFIG_FILTER:
- return ni_config_filter(dev, chan, data[1]);
- default:
- return -EINVAL;
- }
- return 0;
-}
-
-static int ni_pfi_insn_bits(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct ni_private *devpriv = dev->private;
-
- if (!devpriv->is_m_series)
- return -ENOTSUPP;
-
- if (comedi_dio_update_state(s, data))
- ni_writew(dev, s->state, NI_M_PFI_DO_REG);
-
- data[1] = ni_readw(dev, NI_M_PFI_DI_REG);
-
- return insn->n;
-}
-
-static int cs5529_wait_for_idle(struct comedi_device *dev)
-{
- unsigned short status;
- const int timeout = HZ;
- int i;
-
- for (i = 0; i < timeout; i++) {
- status = ni_ao_win_inw(dev, NI67XX_CAL_STATUS_REG);
- if ((status & NI67XX_CAL_STATUS_BUSY) == 0)
- break;
- set_current_state(TASK_INTERRUPTIBLE);
- if (schedule_timeout(1))
- return -EIO;
- }
- if (i == timeout) {
- dev_err(dev->class_dev, "timeout\n");
- return -ETIME;
- }
- return 0;
-}
-
-static void cs5529_command(struct comedi_device *dev, unsigned short value)
-{
- static const int timeout = 100;
- int i;
-
- ni_ao_win_outw(dev, value, NI67XX_CAL_CMD_REG);
- /* give time for command to start being serially clocked into cs5529.
- * this insures that the NI67XX_CAL_STATUS_BUSY bit will get properly
- * set before we exit this function.
- */
- for (i = 0; i < timeout; i++) {
- if (ni_ao_win_inw(dev, NI67XX_CAL_STATUS_REG) &
- NI67XX_CAL_STATUS_BUSY)
- break;
- udelay(1);
- }
- if (i == timeout)
- dev_err(dev->class_dev,
- "possible problem - never saw adc go busy?\n");
-}
-
-static int cs5529_do_conversion(struct comedi_device *dev,
- unsigned short *data)
-{
- int retval;
- unsigned short status;
-
- cs5529_command(dev, CS5529_CMD_CB | CS5529_CMD_SINGLE_CONV);
- retval = cs5529_wait_for_idle(dev);
- if (retval) {
- dev_err(dev->class_dev,
- "timeout or signal in cs5529_do_conversion()\n");
- return -ETIME;
- }
- status = ni_ao_win_inw(dev, NI67XX_CAL_STATUS_REG);
- if (status & NI67XX_CAL_STATUS_OSC_DETECT) {
- dev_err(dev->class_dev,
- "cs5529 conversion error, status CSS_OSC_DETECT\n");
- return -EIO;
- }
- if (status & NI67XX_CAL_STATUS_OVERRANGE) {
- dev_err(dev->class_dev,
- "cs5529 conversion error, overrange (ignoring)\n");
- }
- if (data) {
- *data = ni_ao_win_inw(dev, NI67XX_CAL_DATA_REG);
- /* cs5529 returns 16 bit signed data in bipolar mode */
- *data ^= (1 << 15);
- }
- return 0;
-}
-
-static int cs5529_ai_insn_read(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- int n, retval;
- unsigned short sample;
- unsigned int channel_select;
- const unsigned int INTERNAL_REF = 0x1000;
-
- /* Set calibration adc source. Docs lie, reference select bits 8 to 11
- * do nothing. bit 12 seems to chooses internal reference voltage, bit
- * 13 causes the adc input to go overrange (maybe reads external reference?) */
- if (insn->chanspec & CR_ALT_SOURCE)
- channel_select = INTERNAL_REF;
- else
- channel_select = CR_CHAN(insn->chanspec);
- ni_ao_win_outw(dev, channel_select, NI67XX_AO_CAL_CHAN_SEL_REG);
-
- for (n = 0; n < insn->n; n++) {
- retval = cs5529_do_conversion(dev, &sample);
- if (retval < 0)
- return retval;
- data[n] = sample;
- }
- return insn->n;
-}
-
-static void cs5529_config_write(struct comedi_device *dev, unsigned int value,
- unsigned int reg_select_bits)
-{
- ni_ao_win_outw(dev, (value >> 16) & 0xff, NI67XX_CAL_CFG_HI_REG);
- ni_ao_win_outw(dev, value & 0xffff, NI67XX_CAL_CFG_LO_REG);
- reg_select_bits &= CS5529_CMD_REG_MASK;
- cs5529_command(dev, CS5529_CMD_CB | reg_select_bits);
- if (cs5529_wait_for_idle(dev))
- dev_err(dev->class_dev,
- "timeout or signal in %s\n", __func__);
-}
-
-static int init_cs5529(struct comedi_device *dev)
-{
- unsigned int config_bits = CS5529_CFG_PORT_FLAG |
- CS5529_CFG_WORD_RATE_2180;
-
-#if 1
- /* do self-calibration */
- cs5529_config_write(dev, config_bits | CS5529_CFG_CALIB_BOTH_SELF,
- CS5529_CFG_REG);
- /* need to force a conversion for calibration to run */
- cs5529_do_conversion(dev, NULL);
-#else
- /* force gain calibration to 1 */
- cs5529_config_write(dev, 0x400000, CS5529_GAIN_REG);
- cs5529_config_write(dev, config_bits | CS5529_CFG_CALIB_OFFSET_SELF,
- CS5529_CFG_REG);
- if (cs5529_wait_for_idle(dev))
- dev_err(dev->class_dev,
- "timeout or signal in %s\n", __func__);
-#endif
- return 0;
-}
-
-/*
- * Find best multiplier/divider to try and get the PLL running at 80 MHz
- * given an arbitrary frequency input clock.
- */
-static int ni_mseries_get_pll_parameters(unsigned reference_period_ns,
- unsigned *freq_divider,
- unsigned *freq_multiplier,
- unsigned *actual_period_ns)
-{
- unsigned div;
- unsigned best_div = 1;
- unsigned mult;
- unsigned best_mult = 1;
- static const unsigned pico_per_nano = 1000;
-
- const unsigned reference_picosec = reference_period_ns * pico_per_nano;
- /* m-series wants the phased-locked loop to output 80MHz, which is divided by 4 to
- * 20 MHz for most timing clocks */
- static const unsigned target_picosec = 12500;
- static const unsigned fudge_factor_80_to_20Mhz = 4;
- int best_period_picosec = 0;
-
- for (div = 1; div <= NI_M_PLL_MAX_DIVISOR; ++div) {
- for (mult = 1; mult <= NI_M_PLL_MAX_MULTIPLIER; ++mult) {
- unsigned new_period_ps =
- (reference_picosec * div) / mult;
- if (abs(new_period_ps - target_picosec) <
- abs(best_period_picosec - target_picosec)) {
- best_period_picosec = new_period_ps;
- best_div = div;
- best_mult = mult;
- }
- }
- }
- if (best_period_picosec == 0)
- return -EIO;
-
- *freq_divider = best_div;
- *freq_multiplier = best_mult;
- *actual_period_ns =
- (best_period_picosec * fudge_factor_80_to_20Mhz +
- (pico_per_nano / 2)) / pico_per_nano;
- return 0;
-}
-
-static int ni_mseries_set_pll_master_clock(struct comedi_device *dev,
- unsigned source, unsigned period_ns)
-{
- struct ni_private *devpriv = dev->private;
- static const unsigned min_period_ns = 50;
- static const unsigned max_period_ns = 1000;
- static const unsigned timeout = 1000;
- unsigned pll_control_bits;
- unsigned freq_divider;
- unsigned freq_multiplier;
- unsigned rtsi;
- unsigned i;
- int retval;
-
- if (source == NI_MIO_PLL_PXI10_CLOCK)
- period_ns = 100;
- /* these limits are somewhat arbitrary, but NI advertises 1 to 20MHz range so we'll use that */
- if (period_ns < min_period_ns || period_ns > max_period_ns) {
- dev_err(dev->class_dev,
- "%s: you must specify an input clock frequency between %i and %i nanosec for the phased-lock loop\n",
- __func__, min_period_ns, max_period_ns);
- return -EINVAL;
- }
- devpriv->rtsi_trig_direction_reg &= ~NISTC_RTSI_TRIG_USE_CLK;
- ni_stc_writew(dev, devpriv->rtsi_trig_direction_reg,
- NISTC_RTSI_TRIG_DIR_REG);
- pll_control_bits = NI_M_PLL_CTRL_ENA | NI_M_PLL_CTRL_VCO_MODE_75_150MHZ;
- devpriv->clock_and_fout2 |= NI_M_CLK_FOUT2_TIMEBASE1_PLL |
- NI_M_CLK_FOUT2_TIMEBASE3_PLL;
- devpriv->clock_and_fout2 &= ~NI_M_CLK_FOUT2_PLL_SRC_MASK;
- switch (source) {
- case NI_MIO_PLL_PXI_STAR_TRIGGER_CLOCK:
- devpriv->clock_and_fout2 |= NI_M_CLK_FOUT2_PLL_SRC_STAR;
- break;
- case NI_MIO_PLL_PXI10_CLOCK:
- /* pxi clock is 10MHz */
- devpriv->clock_and_fout2 |= NI_M_CLK_FOUT2_PLL_SRC_PXI10;
- break;
- default:
- for (rtsi = 0; rtsi <= NI_M_MAX_RTSI_CHAN; ++rtsi) {
- if (source == NI_MIO_PLL_RTSI_CLOCK(rtsi)) {
- devpriv->clock_and_fout2 |=
- NI_M_CLK_FOUT2_PLL_SRC_RTSI(rtsi);
- break;
- }
- }
- if (rtsi > NI_M_MAX_RTSI_CHAN)
- return -EINVAL;
- break;
- }
- retval = ni_mseries_get_pll_parameters(period_ns,
- &freq_divider,
- &freq_multiplier,
- &devpriv->clock_ns);
- if (retval < 0) {
- dev_err(dev->class_dev,
- "bug, failed to find pll parameters\n");
- return retval;
- }
-
- ni_writew(dev, devpriv->clock_and_fout2, NI_M_CLK_FOUT2_REG);
- pll_control_bits |= NI_M_PLL_CTRL_DIVISOR(freq_divider) |
- NI_M_PLL_CTRL_MULTIPLIER(freq_multiplier);
-
- ni_writew(dev, pll_control_bits, NI_M_PLL_CTRL_REG);
- devpriv->clock_source = source;
- /* it seems to typically take a few hundred microseconds for PLL to lock */
- for (i = 0; i < timeout; ++i) {
- if (ni_readw(dev, NI_M_PLL_STATUS_REG) & NI_M_PLL_STATUS_LOCKED)
- break;
- udelay(1);
- }
- if (i == timeout) {
- dev_err(dev->class_dev,
- "%s: timed out waiting for PLL to lock to reference clock source %i with period %i ns\n",
- __func__, source, period_ns);
- return -ETIMEDOUT;
- }
- return 3;
-}
-
-static int ni_set_master_clock(struct comedi_device *dev,
- unsigned source, unsigned period_ns)
-{
- struct ni_private *devpriv = dev->private;
-
- if (source == NI_MIO_INTERNAL_CLOCK) {
- devpriv->rtsi_trig_direction_reg &= ~NISTC_RTSI_TRIG_USE_CLK;
- ni_stc_writew(dev, devpriv->rtsi_trig_direction_reg,
- NISTC_RTSI_TRIG_DIR_REG);
- devpriv->clock_ns = TIMEBASE_1_NS;
- if (devpriv->is_m_series) {
- devpriv->clock_and_fout2 &=
- ~(NI_M_CLK_FOUT2_TIMEBASE1_PLL |
- NI_M_CLK_FOUT2_TIMEBASE3_PLL);
- ni_writew(dev, devpriv->clock_and_fout2,
- NI_M_CLK_FOUT2_REG);
- ni_writew(dev, 0, NI_M_PLL_CTRL_REG);
- }
- devpriv->clock_source = source;
- } else {
- if (devpriv->is_m_series) {
- return ni_mseries_set_pll_master_clock(dev, source,
- period_ns);
- } else {
- if (source == NI_MIO_RTSI_CLOCK) {
- devpriv->rtsi_trig_direction_reg |=
- NISTC_RTSI_TRIG_USE_CLK;
- ni_stc_writew(dev,
- devpriv->rtsi_trig_direction_reg,
- NISTC_RTSI_TRIG_DIR_REG);
- if (period_ns == 0) {
- dev_err(dev->class_dev,
- "we don't handle an unspecified clock period correctly yet, returning error\n");
- return -EINVAL;
- }
- devpriv->clock_ns = period_ns;
- devpriv->clock_source = source;
- } else {
- return -EINVAL;
- }
- }
- }
- return 3;
-}
-
-static int ni_valid_rtsi_output_source(struct comedi_device *dev,
- unsigned chan, unsigned source)
-{
- struct ni_private *devpriv = dev->private;
-
- if (chan >= NISTC_RTSI_TRIG_NUM_CHAN(devpriv->is_m_series)) {
- if (chan == NISTC_RTSI_TRIG_OLD_CLK_CHAN) {
- if (source == NI_RTSI_OUTPUT_RTSI_OSC)
- return 1;
-
- dev_err(dev->class_dev,
- "%s: invalid source for channel=%i, channel %i is always the RTSI clock for pre-m-series boards\n",
- __func__, chan, NISTC_RTSI_TRIG_OLD_CLK_CHAN);
- return 0;
- }
- return 0;
- }
- switch (source) {
- case NI_RTSI_OUTPUT_ADR_START1:
- case NI_RTSI_OUTPUT_ADR_START2:
- case NI_RTSI_OUTPUT_SCLKG:
- case NI_RTSI_OUTPUT_DACUPDN:
- case NI_RTSI_OUTPUT_DA_START1:
- case NI_RTSI_OUTPUT_G_SRC0:
- case NI_RTSI_OUTPUT_G_GATE0:
- case NI_RTSI_OUTPUT_RGOUT0:
- case NI_RTSI_OUTPUT_RTSI_BRD_0:
- return 1;
- case NI_RTSI_OUTPUT_RTSI_OSC:
- return (devpriv->is_m_series) ? 1 : 0;
- default:
- return 0;
- }
-}
-
-static int ni_set_rtsi_routing(struct comedi_device *dev,
- unsigned chan, unsigned src)
-{
- struct ni_private *devpriv = dev->private;
-
- if (ni_valid_rtsi_output_source(dev, chan, src) == 0)
- return -EINVAL;
- if (chan < 4) {
- devpriv->rtsi_trig_a_output_reg &= ~NISTC_RTSI_TRIG_MASK(chan);
- devpriv->rtsi_trig_a_output_reg |= NISTC_RTSI_TRIG(chan, src);
- ni_stc_writew(dev, devpriv->rtsi_trig_a_output_reg,
- NISTC_RTSI_TRIGA_OUT_REG);
- } else if (chan < 8) {
- devpriv->rtsi_trig_b_output_reg &= ~NISTC_RTSI_TRIG_MASK(chan);
- devpriv->rtsi_trig_b_output_reg |= NISTC_RTSI_TRIG(chan, src);
- ni_stc_writew(dev, devpriv->rtsi_trig_b_output_reg,
- NISTC_RTSI_TRIGB_OUT_REG);
- }
- return 2;
-}
-
-static unsigned ni_get_rtsi_routing(struct comedi_device *dev, unsigned chan)
-{
- struct ni_private *devpriv = dev->private;
-
- if (chan < 4) {
- return NISTC_RTSI_TRIG_TO_SRC(chan,
- devpriv->rtsi_trig_a_output_reg);
- } else if (chan < NISTC_RTSI_TRIG_NUM_CHAN(devpriv->is_m_series)) {
- return NISTC_RTSI_TRIG_TO_SRC(chan,
- devpriv->rtsi_trig_b_output_reg);
- } else {
- if (chan == NISTC_RTSI_TRIG_OLD_CLK_CHAN)
- return NI_RTSI_OUTPUT_RTSI_OSC;
- dev_err(dev->class_dev, "bug! should never get here?\n");
- return 0;
- }
-}
-
-static int ni_rtsi_insn_config(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct ni_private *devpriv = dev->private;
- unsigned int chan = CR_CHAN(insn->chanspec);
- unsigned int max_chan = NISTC_RTSI_TRIG_NUM_CHAN(devpriv->is_m_series);
-
- switch (data[0]) {
- case INSN_CONFIG_DIO_OUTPUT:
- if (chan < max_chan) {
- devpriv->rtsi_trig_direction_reg |=
- NISTC_RTSI_TRIG_DIR(chan, devpriv->is_m_series);
- } else if (chan == NISTC_RTSI_TRIG_OLD_CLK_CHAN) {
- devpriv->rtsi_trig_direction_reg |=
- NISTC_RTSI_TRIG_DRV_CLK;
- }
- ni_stc_writew(dev, devpriv->rtsi_trig_direction_reg,
- NISTC_RTSI_TRIG_DIR_REG);
- break;
- case INSN_CONFIG_DIO_INPUT:
- if (chan < max_chan) {
- devpriv->rtsi_trig_direction_reg &=
- ~NISTC_RTSI_TRIG_DIR(chan, devpriv->is_m_series);
- } else if (chan == NISTC_RTSI_TRIG_OLD_CLK_CHAN) {
- devpriv->rtsi_trig_direction_reg &=
- ~NISTC_RTSI_TRIG_DRV_CLK;
- }
- ni_stc_writew(dev, devpriv->rtsi_trig_direction_reg,
- NISTC_RTSI_TRIG_DIR_REG);
- break;
- case INSN_CONFIG_DIO_QUERY:
- if (chan < max_chan) {
- data[1] =
- (devpriv->rtsi_trig_direction_reg &
- NISTC_RTSI_TRIG_DIR(chan, devpriv->is_m_series))
- ? INSN_CONFIG_DIO_OUTPUT
- : INSN_CONFIG_DIO_INPUT;
- } else if (chan == NISTC_RTSI_TRIG_OLD_CLK_CHAN) {
- data[1] = (devpriv->rtsi_trig_direction_reg &
- NISTC_RTSI_TRIG_DRV_CLK)
- ? INSN_CONFIG_DIO_OUTPUT
- : INSN_CONFIG_DIO_INPUT;
- }
- return 2;
- case INSN_CONFIG_SET_CLOCK_SRC:
- return ni_set_master_clock(dev, data[1], data[2]);
- case INSN_CONFIG_GET_CLOCK_SRC:
- data[1] = devpriv->clock_source;
- data[2] = devpriv->clock_ns;
- return 3;
- case INSN_CONFIG_SET_ROUTING:
- return ni_set_rtsi_routing(dev, chan, data[1]);
- case INSN_CONFIG_GET_ROUTING:
- data[1] = ni_get_rtsi_routing(dev, chan);
- return 2;
- default:
- return -EINVAL;
- }
- return 1;
-}
-
-static int ni_rtsi_insn_bits(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- data[1] = 0;
-
- return insn->n;
-}
-
-static void ni_rtsi_init(struct comedi_device *dev)
-{
- struct ni_private *devpriv = dev->private;
-
- /* Initialises the RTSI bus signal switch to a default state */
-
- /*
- * Use 10MHz instead of 20MHz for RTSI clock frequency. Appears
- * to have no effect, at least on pxi-6281, which always uses
- * 20MHz rtsi clock frequency
- */
- devpriv->clock_and_fout2 = NI_M_CLK_FOUT2_RTSI_10MHZ;
- /* Set clock mode to internal */
- if (ni_set_master_clock(dev, NI_MIO_INTERNAL_CLOCK, 0) < 0)
- dev_err(dev->class_dev, "ni_set_master_clock failed, bug?\n");
- /* default internal lines routing to RTSI bus lines */
- devpriv->rtsi_trig_a_output_reg =
- NISTC_RTSI_TRIG(0, NI_RTSI_OUTPUT_ADR_START1) |
- NISTC_RTSI_TRIG(1, NI_RTSI_OUTPUT_ADR_START2) |
- NISTC_RTSI_TRIG(2, NI_RTSI_OUTPUT_SCLKG) |
- NISTC_RTSI_TRIG(3, NI_RTSI_OUTPUT_DACUPDN);
- ni_stc_writew(dev, devpriv->rtsi_trig_a_output_reg,
- NISTC_RTSI_TRIGA_OUT_REG);
- devpriv->rtsi_trig_b_output_reg =
- NISTC_RTSI_TRIG(4, NI_RTSI_OUTPUT_DA_START1) |
- NISTC_RTSI_TRIG(5, NI_RTSI_OUTPUT_G_SRC0) |
- NISTC_RTSI_TRIG(6, NI_RTSI_OUTPUT_G_GATE0);
- if (devpriv->is_m_series)
- devpriv->rtsi_trig_b_output_reg |=
- NISTC_RTSI_TRIG(7, NI_RTSI_OUTPUT_RTSI_OSC);
- ni_stc_writew(dev, devpriv->rtsi_trig_b_output_reg,
- NISTC_RTSI_TRIGB_OUT_REG);
-
- /*
- * Sets the source and direction of the 4 on board lines
- * ni_stc_writew(dev, 0, NISTC_RTSI_BOARD_REG);
- */
-}
-
-#ifdef PCIDMA
-static int ni_gpct_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
-{
- struct ni_gpct *counter = s->private;
- int retval;
-
- retval = ni_request_gpct_mite_channel(dev, counter->counter_index,
- COMEDI_INPUT);
- if (retval) {
- dev_err(dev->class_dev,
- "no dma channel available for use by counter\n");
- return retval;
- }
- ni_tio_acknowledge(counter);
- ni_e_series_enable_second_irq(dev, counter->counter_index, 1);
-
- return ni_tio_cmd(dev, s);
-}
-
-static int ni_gpct_cancel(struct comedi_device *dev, struct comedi_subdevice *s)
-{
- struct ni_gpct *counter = s->private;
- int retval;
-
- retval = ni_tio_cancel(counter);
- ni_e_series_enable_second_irq(dev, counter->counter_index, 0);
- ni_release_gpct_mite_channel(dev, counter->counter_index);
- return retval;
-}
-#endif
-
-static irqreturn_t ni_E_interrupt(int irq, void *d)
-{
- struct comedi_device *dev = d;
- unsigned short a_status;
- unsigned short b_status;
- unsigned int ai_mite_status = 0;
- unsigned int ao_mite_status = 0;
- unsigned long flags;
-#ifdef PCIDMA
- struct ni_private *devpriv = dev->private;
- struct mite_struct *mite = devpriv->mite;
-#endif
-
- if (!dev->attached)
- return IRQ_NONE;
- smp_mb(); /* make sure dev->attached is checked before handler does anything else. */
-
- /* lock to avoid race with comedi_poll */
- spin_lock_irqsave(&dev->spinlock, flags);
- a_status = ni_stc_readw(dev, NISTC_AI_STATUS1_REG);
- b_status = ni_stc_readw(dev, NISTC_AO_STATUS1_REG);
-#ifdef PCIDMA
- if (mite) {
- struct ni_private *devpriv = dev->private;
- unsigned long flags_too;
-
- spin_lock_irqsave(&devpriv->mite_channel_lock, flags_too);
- if (devpriv->ai_mite_chan) {
- ai_mite_status = mite_get_status(devpriv->ai_mite_chan);
- if (ai_mite_status & CHSR_LINKC)
- writel(CHOR_CLRLC,
- devpriv->mite->mite_io_addr +
- MITE_CHOR(devpriv->
- ai_mite_chan->channel));
- }
- if (devpriv->ao_mite_chan) {
- ao_mite_status = mite_get_status(devpriv->ao_mite_chan);
- if (ao_mite_status & CHSR_LINKC)
- writel(CHOR_CLRLC,
- mite->mite_io_addr +
- MITE_CHOR(devpriv->
- ao_mite_chan->channel));
- }
- spin_unlock_irqrestore(&devpriv->mite_channel_lock, flags_too);
- }
-#endif
- ack_a_interrupt(dev, a_status);
- ack_b_interrupt(dev, b_status);
- if ((a_status & NISTC_AI_STATUS1_INTA) || (ai_mite_status & CHSR_INT))
- handle_a_interrupt(dev, a_status, ai_mite_status);
- if ((b_status & NISTC_AO_STATUS1_INTB) || (ao_mite_status & CHSR_INT))
- handle_b_interrupt(dev, b_status, ao_mite_status);
- handle_gpct_interrupt(dev, 0);
- handle_gpct_interrupt(dev, 1);
- handle_cdio_interrupt(dev);
-
- spin_unlock_irqrestore(&dev->spinlock, flags);
- return IRQ_HANDLED;
-}
-
-static int ni_alloc_private(struct comedi_device *dev)
-{
- struct ni_private *devpriv;
-
- devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
- if (!devpriv)
- return -ENOMEM;
-
- spin_lock_init(&devpriv->window_lock);
- spin_lock_init(&devpriv->soft_reg_copy_lock);
- spin_lock_init(&devpriv->mite_channel_lock);
-
- return 0;
-}
-
-static int ni_E_init(struct comedi_device *dev,
- unsigned interrupt_pin, unsigned irq_polarity)
-{
- const struct ni_board_struct *board = dev->board_ptr;
- struct ni_private *devpriv = dev->private;
- struct comedi_subdevice *s;
- int ret;
- int i;
-
- if (board->n_aochan > MAX_N_AO_CHAN) {
- dev_err(dev->class_dev, "bug! n_aochan > MAX_N_AO_CHAN\n");
- return -EINVAL;
- }
-
- /* initialize clock dividers */
- devpriv->clock_and_fout = NISTC_CLK_FOUT_SLOW_DIV2 |
- NISTC_CLK_FOUT_SLOW_TIMEBASE |
- NISTC_CLK_FOUT_TO_BOARD_DIV2 |
- NISTC_CLK_FOUT_TO_BOARD;
- if (!devpriv->is_6xxx) {
- /* BEAM is this needed for PCI-6143 ?? */
- devpriv->clock_and_fout |= (NISTC_CLK_FOUT_AI_OUT_DIV2 |
- NISTC_CLK_FOUT_AO_OUT_DIV2);
- }
- ni_stc_writew(dev, devpriv->clock_and_fout, NISTC_CLK_FOUT_REG);
-
- ret = comedi_alloc_subdevices(dev, NI_NUM_SUBDEVICES);
- if (ret)
- return ret;
-
- /* Analog Input subdevice */
- s = &dev->subdevices[NI_AI_SUBDEV];
- if (board->n_adchan) {
- s->type = COMEDI_SUBD_AI;
- s->subdev_flags = SDF_READABLE | SDF_DIFF | SDF_DITHER;
- if (!devpriv->is_611x)
- s->subdev_flags |= SDF_GROUND | SDF_COMMON | SDF_OTHER;
- if (board->ai_maxdata > 0xffff)
- s->subdev_flags |= SDF_LSAMPL;
- if (devpriv->is_m_series)
- s->subdev_flags |= SDF_SOFT_CALIBRATED;
- s->n_chan = board->n_adchan;
- s->maxdata = board->ai_maxdata;
- s->range_table = ni_range_lkup[board->gainlkup];
- s->insn_read = ni_ai_insn_read;
- s->insn_config = ni_ai_insn_config;
- if (dev->irq) {
- dev->read_subdev = s;
- s->subdev_flags |= SDF_CMD_READ;
- s->len_chanlist = 512;
- s->do_cmdtest = ni_ai_cmdtest;
- s->do_cmd = ni_ai_cmd;
- s->cancel = ni_ai_reset;
- s->poll = ni_ai_poll;
- s->munge = ni_ai_munge;
-
- if (devpriv->mite)
- s->async_dma_dir = DMA_FROM_DEVICE;
- }
-
- /* reset the analog input configuration */
- ni_ai_reset(dev, s);
- } else {
- s->type = COMEDI_SUBD_UNUSED;
- }
-
- /* Analog Output subdevice */
- s = &dev->subdevices[NI_AO_SUBDEV];
- if (board->n_aochan) {
- s->type = COMEDI_SUBD_AO;
- s->subdev_flags = SDF_WRITABLE | SDF_DEGLITCH | SDF_GROUND;
- if (devpriv->is_m_series)
- s->subdev_flags |= SDF_SOFT_CALIBRATED;
- s->n_chan = board->n_aochan;
- s->maxdata = board->ao_maxdata;
- s->range_table = board->ao_range_table;
- s->insn_config = ni_ao_insn_config;
- s->insn_write = ni_ao_insn_write;
-
- ret = comedi_alloc_subdev_readback(s);
- if (ret)
- return ret;
-
- /*
- * Along with the IRQ we need either a FIFO or DMA for
- * async command support.
- */
- if (dev->irq && (board->ao_fifo_depth || devpriv->mite)) {
- dev->write_subdev = s;
- s->subdev_flags |= SDF_CMD_WRITE;
- s->len_chanlist = s->n_chan;
- s->do_cmdtest = ni_ao_cmdtest;
- s->do_cmd = ni_ao_cmd;
- s->cancel = ni_ao_reset;
- if (!devpriv->is_m_series)
- s->munge = ni_ao_munge;
-
- if (devpriv->mite)
- s->async_dma_dir = DMA_TO_DEVICE;
- }
-
- if (devpriv->is_67xx)
- init_ao_67xx(dev, s);
-
- /* reset the analog output configuration */
- ni_ao_reset(dev, s);
- } else {
- s->type = COMEDI_SUBD_UNUSED;
- }
-
- /* Digital I/O subdevice */
- s = &dev->subdevices[NI_DIO_SUBDEV];
- s->type = COMEDI_SUBD_DIO;
- s->subdev_flags = SDF_WRITABLE | SDF_READABLE;
- s->n_chan = board->has_32dio_chan ? 32 : 8;
- s->maxdata = 1;
- s->range_table = &range_digital;
- if (devpriv->is_m_series) {
- s->subdev_flags |= SDF_LSAMPL;
- s->insn_bits = ni_m_series_dio_insn_bits;
- s->insn_config = ni_m_series_dio_insn_config;
- if (dev->irq) {
- s->subdev_flags |= SDF_CMD_WRITE /* | SDF_CMD_READ */;
- s->len_chanlist = s->n_chan;
- s->do_cmdtest = ni_cdio_cmdtest;
- s->do_cmd = ni_cdio_cmd;
- s->cancel = ni_cdio_cancel;
-
- /* M-series boards use DMA */
- s->async_dma_dir = DMA_BIDIRECTIONAL;
- }
-
- /* reset DIO and set all channels to inputs */
- ni_writel(dev, NI_M_CDO_CMD_RESET |
- NI_M_CDI_CMD_RESET,
- NI_M_CDIO_CMD_REG);
- ni_writel(dev, s->io_bits, NI_M_DIO_DIR_REG);
- } else {
- s->insn_bits = ni_dio_insn_bits;
- s->insn_config = ni_dio_insn_config;
-
- /* set all channels to inputs */
- devpriv->dio_control = NISTC_DIO_CTRL_DIR(s->io_bits);
- ni_writew(dev, devpriv->dio_control, NISTC_DIO_CTRL_REG);
- }
-
- /* 8255 device */
- s = &dev->subdevices[NI_8255_DIO_SUBDEV];
- if (board->has_8255) {
- ret = subdev_8255_init(dev, s, ni_8255_callback,
- NI_E_8255_BASE);
- if (ret)
- return ret;
- } else {
- s->type = COMEDI_SUBD_UNUSED;
- }
-
- /* formerly general purpose counter/timer device, but no longer used */
- s = &dev->subdevices[NI_UNUSED_SUBDEV];
- s->type = COMEDI_SUBD_UNUSED;
-
- /* Calibration subdevice */
- s = &dev->subdevices[NI_CALIBRATION_SUBDEV];
- s->type = COMEDI_SUBD_CALIB;
- s->subdev_flags = SDF_INTERNAL;
- s->n_chan = 1;
- s->maxdata = 0;
- if (devpriv->is_m_series) {
- /* internal PWM output used for AI nonlinearity calibration */
- s->insn_config = ni_m_series_pwm_config;
-
- ni_writel(dev, 0x0, NI_M_CAL_PWM_REG);
- } else if (devpriv->is_6143) {
- /* internal PWM output used for AI nonlinearity calibration */
- s->insn_config = ni_6143_pwm_config;
- } else {
- s->subdev_flags |= SDF_WRITABLE;
- s->insn_read = ni_calib_insn_read;
- s->insn_write = ni_calib_insn_write;
-
- /* setup the caldacs and find the real n_chan and maxdata */
- caldac_setup(dev, s);
- }
-
- /* EEPROM subdevice */
- s = &dev->subdevices[NI_EEPROM_SUBDEV];
- s->type = COMEDI_SUBD_MEMORY;
- s->subdev_flags = SDF_READABLE | SDF_INTERNAL;
- s->maxdata = 0xff;
- if (devpriv->is_m_series) {
- s->n_chan = M_SERIES_EEPROM_SIZE;
- s->insn_read = ni_m_series_eeprom_insn_read;
- } else {
- s->n_chan = 512;
- s->insn_read = ni_eeprom_insn_read;
- }
-
- /* Digital I/O (PFI) subdevice */
- s = &dev->subdevices[NI_PFI_DIO_SUBDEV];
- s->type = COMEDI_SUBD_DIO;
- s->subdev_flags = SDF_READABLE | SDF_WRITABLE | SDF_INTERNAL;
- s->maxdata = 1;
- if (devpriv->is_m_series) {
- s->n_chan = 16;
- s->insn_bits = ni_pfi_insn_bits;
-
- ni_writew(dev, s->state, NI_M_PFI_DO_REG);
- for (i = 0; i < NUM_PFI_OUTPUT_SELECT_REGS; ++i) {
- ni_writew(dev, devpriv->pfi_output_select_reg[i],
- NI_M_PFI_OUT_SEL_REG(i));
- }
- } else {
- s->n_chan = 10;
- }
- s->insn_config = ni_pfi_insn_config;
-
- ni_set_bits(dev, NISTC_IO_BIDIR_PIN_REG, ~0, 0);
-
- /* cs5529 calibration adc */
- s = &dev->subdevices[NI_CS5529_CALIBRATION_SUBDEV];
- if (devpriv->is_67xx) {
- s->type = COMEDI_SUBD_AI;
- s->subdev_flags = SDF_READABLE | SDF_DIFF | SDF_INTERNAL;
- /* one channel for each analog output channel */
- s->n_chan = board->n_aochan;
- s->maxdata = (1 << 16) - 1;
- s->range_table = &range_unknown; /* XXX */
- s->insn_read = cs5529_ai_insn_read;
- s->insn_config = NULL;
- init_cs5529(dev);
- } else {
- s->type = COMEDI_SUBD_UNUSED;
- }
-
- /* Serial */
- s = &dev->subdevices[NI_SERIAL_SUBDEV];
- s->type = COMEDI_SUBD_SERIAL;
- s->subdev_flags = SDF_READABLE | SDF_WRITABLE | SDF_INTERNAL;
- s->n_chan = 1;
- s->maxdata = 0xff;
- s->insn_config = ni_serial_insn_config;
- devpriv->serial_interval_ns = 0;
- devpriv->serial_hw_mode = 0;
-
- /* RTSI */
- s = &dev->subdevices[NI_RTSI_SUBDEV];
- s->type = COMEDI_SUBD_DIO;
- s->subdev_flags = SDF_READABLE | SDF_WRITABLE | SDF_INTERNAL;
- s->n_chan = 8;
- s->maxdata = 1;
- s->insn_bits = ni_rtsi_insn_bits;
- s->insn_config = ni_rtsi_insn_config;
- ni_rtsi_init(dev);
-
- /* allocate and initialize the gpct counter device */
- devpriv->counter_dev = ni_gpct_device_construct(dev,
- ni_gpct_write_register,
- ni_gpct_read_register,
- (devpriv->is_m_series)
- ? ni_gpct_variant_m_series
- : ni_gpct_variant_e_series,
- NUM_GPCT);
- if (!devpriv->counter_dev)
- return -ENOMEM;
-
- /* Counter (gpct) subdevices */
- for (i = 0; i < NUM_GPCT; ++i) {
- struct ni_gpct *gpct = &devpriv->counter_dev->counters[i];
-
- /* setup and initialize the counter */
- gpct->chip_index = 0;
- gpct->counter_index = i;
- ni_tio_init_counter(gpct);
-
- s = &dev->subdevices[NI_GPCT_SUBDEV(i)];
- s->type = COMEDI_SUBD_COUNTER;
- s->subdev_flags = SDF_READABLE | SDF_WRITABLE | SDF_LSAMPL;
- s->n_chan = 3;
- s->maxdata = (devpriv->is_m_series) ? 0xffffffff
- : 0x00ffffff;
- s->insn_read = ni_tio_insn_read;
- s->insn_write = ni_tio_insn_read;
- s->insn_config = ni_tio_insn_config;
-#ifdef PCIDMA
- if (dev->irq && devpriv->mite) {
- s->subdev_flags |= SDF_CMD_READ /* | SDF_CMD_WRITE */;
- s->len_chanlist = 1;
- s->do_cmdtest = ni_tio_cmdtest;
- s->do_cmd = ni_gpct_cmd;
- s->cancel = ni_gpct_cancel;
-
- s->async_dma_dir = DMA_BIDIRECTIONAL;
- }
-#endif
- s->private = gpct;
- }
-
- /* Frequency output subdevice */
- s = &dev->subdevices[NI_FREQ_OUT_SUBDEV];
- s->type = COMEDI_SUBD_COUNTER;
- s->subdev_flags = SDF_READABLE | SDF_WRITABLE;
- s->n_chan = 1;
- s->maxdata = 0xf;
- s->insn_read = ni_freq_out_insn_read;
- s->insn_write = ni_freq_out_insn_write;
- s->insn_config = ni_freq_out_insn_config;
-
- if (dev->irq) {
- ni_stc_writew(dev,
- (irq_polarity ? NISTC_INT_CTRL_INT_POL : 0) |
- (NISTC_INT_CTRL_3PIN_INT & 0) |
- NISTC_INT_CTRL_INTA_ENA |
- NISTC_INT_CTRL_INTB_ENA |
- NISTC_INT_CTRL_INTA_SEL(interrupt_pin) |
- NISTC_INT_CTRL_INTB_SEL(interrupt_pin),
- NISTC_INT_CTRL_REG);
- }
-
- /* DMA setup */
- ni_writeb(dev, devpriv->ai_ao_select_reg, NI_E_DMA_AI_AO_SEL_REG);
- ni_writeb(dev, devpriv->g0_g1_select_reg, NI_E_DMA_G0_G1_SEL_REG);
-
- if (devpriv->is_6xxx) {
- ni_writeb(dev, 0, NI611X_MAGIC_REG);
- } else if (devpriv->is_m_series) {
- int channel;
-
- for (channel = 0; channel < board->n_aochan; ++channel) {
- ni_writeb(dev, 0xf,
- NI_M_AO_WAVEFORM_ORDER_REG(channel));
- ni_writeb(dev, 0x0,
- NI_M_AO_REF_ATTENUATION_REG(channel));
- }
- ni_writeb(dev, 0x0, NI_M_AO_CALIB_REG);
- }
-
- return 0;
-}
-
-static void mio_common_detach(struct comedi_device *dev)
-{
- struct ni_private *devpriv = dev->private;
-
- if (devpriv) {
- if (devpriv->counter_dev)
- ni_gpct_device_destroy(devpriv->counter_dev);
- }
-}
diff --git a/drivers/staging/comedi/drivers/ni_mio_cs.c b/drivers/staging/comedi/drivers/ni_mio_cs.c
deleted file mode 100644
index e3d821bf2d6a..000000000000
--- a/drivers/staging/comedi/drivers/ni_mio_cs.c
+++ /dev/null
@@ -1,228 +0,0 @@
-/*
- comedi/drivers/ni_mio_cs.c
- Hardware driver for NI PCMCIA MIO E series cards
-
- COMEDI - Linux Control and Measurement Device Interface
- Copyright (C) 1997-2000 David A. Schleef <ds@schleef.org>
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-*/
-/*
-Driver: ni_mio_cs
-Description: National Instruments DAQCard E series
-Author: ds
-Status: works
-Devices: [National Instruments] DAQCard-AI-16XE-50 (ni_mio_cs),
- DAQCard-AI-16E-4, DAQCard-6062E, DAQCard-6024E, DAQCard-6036E
-Updated: Thu Oct 23 19:43:17 CDT 2003
-
-See the notes in the ni_atmio.o driver.
-*/
-/*
- The real guts of the driver is in ni_mio_common.c, which is
- included by all the E series drivers.
-
- References for specifications:
-
- 341080a.pdf DAQCard E Series Register Level Programmer Manual
-
-*/
-
-#include <linux/module.h>
-#include <linux/delay.h>
-
-#include "../comedi_pcmcia.h"
-#include "ni_stc.h"
-#include "8255.h"
-
-/*
- * AT specific setup
- */
-
-static const struct ni_board_struct ni_boards[] = {
- {
- .name = "DAQCard-ai-16xe-50",
- .device_id = 0x010d,
- .n_adchan = 16,
- .ai_maxdata = 0xffff,
- .ai_fifo_depth = 1024,
- .gainlkup = ai_gain_8,
- .ai_speed = 5000,
- .caldac = { dac8800, dac8043 },
- }, {
- .name = "DAQCard-ai-16e-4",
- .device_id = 0x010c,
- .n_adchan = 16,
- .ai_maxdata = 0x0fff,
- .ai_fifo_depth = 1024,
- .gainlkup = ai_gain_16,
- .ai_speed = 4000,
- .caldac = { mb88341 }, /* verified */
- }, {
- .name = "DAQCard-6062E",
- .device_id = 0x02c4,
- .n_adchan = 16,
- .ai_maxdata = 0x0fff,
- .ai_fifo_depth = 8192,
- .gainlkup = ai_gain_16,
- .ai_speed = 2000,
- .n_aochan = 2,
- .ao_maxdata = 0x0fff,
- .ao_fifo_depth = 2048,
- .ao_range_table = &range_bipolar10,
- .ao_speed = 1176,
- .caldac = { ad8804_debug }, /* verified */
- }, {
- /* specs incorrect! */
- .name = "DAQCard-6024E",
- .device_id = 0x075e,
- .n_adchan = 16,
- .ai_maxdata = 0x0fff,
- .ai_fifo_depth = 1024,
- .gainlkup = ai_gain_4,
- .ai_speed = 5000,
- .n_aochan = 2,
- .ao_maxdata = 0x0fff,
- .ao_range_table = &range_bipolar10,
- .ao_speed = 1000000,
- .caldac = { ad8804_debug },
- }, {
- /* specs incorrect! */
- .name = "DAQCard-6036E",
- .device_id = 0x0245,
- .n_adchan = 16,
- .ai_maxdata = 0xffff,
- .ai_fifo_depth = 1024,
- .alwaysdither = 1,
- .gainlkup = ai_gain_4,
- .ai_speed = 5000,
- .n_aochan = 2,
- .ao_maxdata = 0xffff,
- .ao_range_table = &range_bipolar10,
- .ao_speed = 1000000,
- .caldac = { ad8804_debug },
- },
-#if 0
- {
- .name = "DAQCard-6715",
- .device_id = 0x0000, /* unknown */
- .n_aochan = 8,
- .ao_maxdata = 0x0fff,
- .ao_671x = 8192,
- .caldac = { mb88341, mb88341 },
- },
-#endif
-};
-
-#include "ni_mio_common.c"
-
-static const void *ni_getboardtype(struct comedi_device *dev,
- struct pcmcia_device *link)
-{
- static const struct ni_board_struct *board;
- int i;
-
- for (i = 0; i < ARRAY_SIZE(ni_boards); i++) {
- board = &ni_boards[i];
- if (board->device_id == link->card_id)
- return board;
- }
- return NULL;
-}
-
-static int mio_pcmcia_config_loop(struct pcmcia_device *p_dev, void *priv_data)
-{
- int base, ret;
-
- p_dev->resource[0]->flags &= ~IO_DATA_PATH_WIDTH;
- p_dev->resource[0]->flags |= IO_DATA_PATH_WIDTH_16;
-
- for (base = 0x000; base < 0x400; base += 0x20) {
- p_dev->resource[0]->start = base;
- ret = pcmcia_request_io(p_dev);
- if (!ret)
- return 0;
- }
- return -ENODEV;
-}
-
-static int mio_cs_auto_attach(struct comedi_device *dev,
- unsigned long context)
-{
- struct pcmcia_device *link = comedi_to_pcmcia_dev(dev);
- static const struct ni_board_struct *board;
- int ret;
-
- board = ni_getboardtype(dev, link);
- if (!board)
- return -ENODEV;
- dev->board_ptr = board;
- dev->board_name = board->name;
-
- link->config_flags |= CONF_AUTO_SET_IO | CONF_ENABLE_IRQ;
- ret = comedi_pcmcia_enable(dev, mio_pcmcia_config_loop);
- if (ret)
- return ret;
- dev->iobase = link->resource[0]->start;
-
- link->priv = dev;
- ret = pcmcia_request_irq(link, ni_E_interrupt);
- if (ret)
- return ret;
- dev->irq = link->irq;
-
- ret = ni_alloc_private(dev);
- if (ret)
- return ret;
-
- return ni_E_init(dev, 0, 1);
-}
-
-static void mio_cs_detach(struct comedi_device *dev)
-{
- mio_common_detach(dev);
- comedi_pcmcia_disable(dev);
-}
-
-static struct comedi_driver driver_ni_mio_cs = {
- .driver_name = "ni_mio_cs",
- .module = THIS_MODULE,
- .auto_attach = mio_cs_auto_attach,
- .detach = mio_cs_detach,
-};
-
-static int cs_attach(struct pcmcia_device *link)
-{
- return comedi_pcmcia_auto_config(link, &driver_ni_mio_cs);
-}
-
-static const struct pcmcia_device_id ni_mio_cs_ids[] = {
- PCMCIA_DEVICE_MANF_CARD(0x010b, 0x010d), /* DAQCard-ai-16xe-50 */
- PCMCIA_DEVICE_MANF_CARD(0x010b, 0x010c), /* DAQCard-ai-16e-4 */
- PCMCIA_DEVICE_MANF_CARD(0x010b, 0x02c4), /* DAQCard-6062E */
- PCMCIA_DEVICE_MANF_CARD(0x010b, 0x075e), /* DAQCard-6024E */
- PCMCIA_DEVICE_MANF_CARD(0x010b, 0x0245), /* DAQCard-6036E */
- PCMCIA_DEVICE_NULL
-};
-MODULE_DEVICE_TABLE(pcmcia, ni_mio_cs_ids);
-
-static struct pcmcia_driver ni_mio_cs_driver = {
- .name = "ni_mio_cs",
- .owner = THIS_MODULE,
- .id_table = ni_mio_cs_ids,
- .probe = cs_attach,
- .remove = comedi_pcmcia_auto_unconfig,
-};
-module_comedi_pcmcia_driver(driver_ni_mio_cs, ni_mio_cs_driver);
-
-MODULE_DESCRIPTION("Comedi driver for National Instruments DAQCard E series");
-MODULE_AUTHOR("David A. Schleef <ds@schleef.org>");
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/ni_pcidio.c b/drivers/staging/comedi/drivers/ni_pcidio.c
deleted file mode 100644
index ac79099bc23e..000000000000
--- a/drivers/staging/comedi/drivers/ni_pcidio.c
+++ /dev/null
@@ -1,1026 +0,0 @@
-/*
- comedi/drivers/ni_pcidio.c
- driver for National Instruments PCI-DIO-32HS
-
- COMEDI - Linux Control and Measurement Device Interface
- Copyright (C) 1999,2002 David A. Schleef <ds@schleef.org>
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-*/
-/*
-Driver: ni_pcidio
-Description: National Instruments PCI-DIO32HS, PCI-6533
-Author: ds
-Status: works
-Devices: [National Instruments] PCI-DIO-32HS (ni_pcidio)
- [National Instruments] PXI-6533, PCI-6533 (pxi-6533)
- [National Instruments] PCI-6534 (pci-6534)
-Updated: Mon, 09 Jan 2012 14:27:23 +0000
-
-The DIO32HS board appears as one subdevice, with 32 channels.
-Each channel is individually I/O configurable. The channel order
-is 0=A0, 1=A1, 2=A2, ... 8=B0, 16=C0, 24=D0. The driver only
-supports simple digital I/O; no handshaking is supported.
-
-DMA mostly works for the PCI-DIO32HS, but only in timed input mode.
-
-The PCI-DIO-32HS/PCI-6533 has a configurable external trigger. Setting
-scan_begin_arg to 0 or CR_EDGE triggers on the leading edge. Setting
-scan_begin_arg to CR_INVERT or (CR_EDGE | CR_INVERT) triggers on the
-trailing edge.
-
-This driver could be easily modified to support AT-MIO32HS and
-AT-MIO96.
-
-The PCI-6534 requires a firmware upload after power-up to work, the
-firmware data and instructions for loading it with comedi_config
-it are contained in the
-comedi_nonfree_firmware tarball available from http://www.comedi.org
-*/
-
-#define USE_DMA
-
-#include <linux/module.h>
-#include <linux/delay.h>
-#include <linux/interrupt.h>
-#include <linux/sched.h>
-
-#include "../comedi_pci.h"
-
-#include "mite.h"
-
-/* defines for the PCI-DIO-32HS */
-
-#define Window_Address 4 /* W */
-#define Interrupt_And_Window_Status 4 /* R */
-#define IntStatus1 (1<<0)
-#define IntStatus2 (1<<1)
-#define WindowAddressStatus_mask 0x7c
-
-#define Master_DMA_And_Interrupt_Control 5 /* W */
-#define InterruptLine(x) ((x)&3)
-#define OpenInt (1<<2)
-#define Group_Status 5 /* R */
-#define DataLeft (1<<0)
-#define Req (1<<2)
-#define StopTrig (1<<3)
-
-#define Group_1_Flags 6 /* R */
-#define Group_2_Flags 7 /* R */
-#define TransferReady (1<<0)
-#define CountExpired (1<<1)
-#define Waited (1<<5)
-#define PrimaryTC (1<<6)
-#define SecondaryTC (1<<7)
- /* #define SerialRose */
- /* #define ReqRose */
- /* #define Paused */
-
-#define Group_1_First_Clear 6 /* W */
-#define Group_2_First_Clear 7 /* W */
-#define ClearWaited (1<<3)
-#define ClearPrimaryTC (1<<4)
-#define ClearSecondaryTC (1<<5)
-#define DMAReset (1<<6)
-#define FIFOReset (1<<7)
-#define ClearAll 0xf8
-
-#define Group_1_FIFO 8 /* W */
-#define Group_2_FIFO 12 /* W */
-
-#define Transfer_Count 20
-#define Chip_ID_D 24
-#define Chip_ID_I 25
-#define Chip_ID_O 26
-#define Chip_Version 27
-#define Port_IO(x) (28+(x))
-#define Port_Pin_Directions(x) (32+(x))
-#define Port_Pin_Mask(x) (36+(x))
-#define Port_Pin_Polarities(x) (40+(x))
-
-#define Master_Clock_Routing 45
-#define RTSIClocking(x) (((x)&3)<<4)
-
-#define Group_1_Second_Clear 46 /* W */
-#define Group_2_Second_Clear 47 /* W */
-#define ClearExpired (1<<0)
-
-#define Port_Pattern(x) (48+(x))
-
-#define Data_Path 64
-#define FIFOEnableA (1<<0)
-#define FIFOEnableB (1<<1)
-#define FIFOEnableC (1<<2)
-#define FIFOEnableD (1<<3)
-#define Funneling(x) (((x)&3)<<4)
-#define GroupDirection (1<<7)
-
-#define Protocol_Register_1 65
-#define OpMode Protocol_Register_1
-#define RunMode(x) ((x)&7)
-#define Numbered (1<<3)
-
-#define Protocol_Register_2 66
-#define ClockReg Protocol_Register_2
-#define ClockLine(x) (((x)&3)<<5)
-#define InvertStopTrig (1<<7)
-#define DataLatching(x) (((x)&3)<<5)
-
-#define Protocol_Register_3 67
-#define Sequence Protocol_Register_3
-
-#define Protocol_Register_14 68 /* 16 bit */
-#define ClockSpeed Protocol_Register_14
-
-#define Protocol_Register_4 70
-#define ReqReg Protocol_Register_4
-#define ReqConditioning(x) (((x)&7)<<3)
-
-#define Protocol_Register_5 71
-#define BlockMode Protocol_Register_5
-
-#define FIFO_Control 72
-#define ReadyLevel(x) ((x)&7)
-
-#define Protocol_Register_6 73
-#define LinePolarities Protocol_Register_6
-#define InvertAck (1<<0)
-#define InvertReq (1<<1)
-#define InvertClock (1<<2)
-#define InvertSerial (1<<3)
-#define OpenAck (1<<4)
-#define OpenClock (1<<5)
-
-#define Protocol_Register_7 74
-#define AckSer Protocol_Register_7
-#define AckLine(x) (((x)&3)<<2)
-#define ExchangePins (1<<7)
-
-#define Interrupt_Control 75
- /* bits same as flags */
-
-#define DMA_Line_Control_Group1 76
-#define DMA_Line_Control_Group2 108
-/* channel zero is none */
-static inline unsigned primary_DMAChannel_bits(unsigned channel)
-{
- return channel & 0x3;
-}
-
-static inline unsigned secondary_DMAChannel_bits(unsigned channel)
-{
- return (channel << 2) & 0xc;
-}
-
-#define Transfer_Size_Control 77
-#define TransferWidth(x) ((x)&3)
-#define TransferLength(x) (((x)&3)<<3)
-#define RequireRLevel (1<<5)
-
-#define Protocol_Register_15 79
-#define DAQOptions Protocol_Register_15
-#define StartSource(x) ((x)&0x3)
-#define InvertStart (1<<2)
-#define StopSource(x) (((x)&0x3)<<3)
-#define ReqStart (1<<6)
-#define PreStart (1<<7)
-
-#define Pattern_Detection 81
-#define DetectionMethod (1<<0)
-#define InvertMatch (1<<1)
-#define IE_Pattern_Detection (1<<2)
-
-#define Protocol_Register_9 82
-#define ReqDelay Protocol_Register_9
-
-#define Protocol_Register_10 83
-#define ReqNotDelay Protocol_Register_10
-
-#define Protocol_Register_11 84
-#define AckDelay Protocol_Register_11
-
-#define Protocol_Register_12 85
-#define AckNotDelay Protocol_Register_12
-
-#define Protocol_Register_13 86
-#define Data1Delay Protocol_Register_13
-
-#define Protocol_Register_8 88 /* 32 bit */
-#define StartDelay Protocol_Register_8
-
-/* Firmware files for PCI-6524 */
-#define FW_PCI_6534_MAIN "ni6534a.bin"
-#define FW_PCI_6534_SCARAB_DI "niscrb01.bin"
-#define FW_PCI_6534_SCARAB_DO "niscrb02.bin"
-MODULE_FIRMWARE(FW_PCI_6534_MAIN);
-MODULE_FIRMWARE(FW_PCI_6534_SCARAB_DI);
-MODULE_FIRMWARE(FW_PCI_6534_SCARAB_DO);
-
-enum pci_6534_firmware_registers { /* 16 bit */
- Firmware_Control_Register = 0x100,
- Firmware_Status_Register = 0x104,
- Firmware_Data_Register = 0x108,
- Firmware_Mask_Register = 0x10c,
- Firmware_Debug_Register = 0x110,
-};
-/* main fpga registers (32 bit)*/
-enum pci_6534_fpga_registers {
- FPGA_Control1_Register = 0x200,
- FPGA_Control2_Register = 0x204,
- FPGA_Irq_Mask_Register = 0x208,
- FPGA_Status_Register = 0x20c,
- FPGA_Signature_Register = 0x210,
- FPGA_SCALS_Counter_Register = 0x280, /*write-clear */
- FPGA_SCAMS_Counter_Register = 0x284, /*write-clear */
- FPGA_SCBLS_Counter_Register = 0x288, /*write-clear */
- FPGA_SCBMS_Counter_Register = 0x28c, /*write-clear */
- FPGA_Temp_Control_Register = 0x2a0,
- FPGA_DAR_Register = 0x2a8,
- FPGA_ELC_Read_Register = 0x2b8,
- FPGA_ELC_Write_Register = 0x2bc,
-};
-enum FPGA_Control_Bits {
- FPGA_Enable_Bit = 0x8000,
-};
-
-#define TIMER_BASE 50 /* nanoseconds */
-
-#ifdef USE_DMA
-#define IntEn (CountExpired|Waited|PrimaryTC|SecondaryTC)
-#else
-#define IntEn (TransferReady|CountExpired|Waited|PrimaryTC|SecondaryTC)
-#endif
-
-enum nidio_boardid {
- BOARD_PCIDIO_32HS,
- BOARD_PXI6533,
- BOARD_PCI6534,
-};
-
-struct nidio_board {
- const char *name;
- unsigned int uses_firmware:1;
-};
-
-static const struct nidio_board nidio_boards[] = {
- [BOARD_PCIDIO_32HS] = {
- .name = "pci-dio-32hs",
- },
- [BOARD_PXI6533] = {
- .name = "pxi-6533",
- },
- [BOARD_PCI6534] = {
- .name = "pci-6534",
- .uses_firmware = 1,
- },
-};
-
-struct nidio96_private {
- struct mite_struct *mite;
- int boardtype;
- int dio;
- unsigned short OpModeBits;
- struct mite_channel *di_mite_chan;
- struct mite_dma_descriptor_ring *di_mite_ring;
- spinlock_t mite_channel_lock;
-};
-
-static int ni_pcidio_request_di_mite_channel(struct comedi_device *dev)
-{
- struct nidio96_private *devpriv = dev->private;
- unsigned long flags;
-
- spin_lock_irqsave(&devpriv->mite_channel_lock, flags);
- BUG_ON(devpriv->di_mite_chan);
- devpriv->di_mite_chan =
- mite_request_channel_in_range(devpriv->mite,
- devpriv->di_mite_ring, 1, 2);
- if (!devpriv->di_mite_chan) {
- spin_unlock_irqrestore(&devpriv->mite_channel_lock, flags);
- dev_err(dev->class_dev, "failed to reserve mite dma channel\n");
- return -EBUSY;
- }
- devpriv->di_mite_chan->dir = COMEDI_INPUT;
- writeb(primary_DMAChannel_bits(devpriv->di_mite_chan->channel) |
- secondary_DMAChannel_bits(devpriv->di_mite_chan->channel),
- dev->mmio + DMA_Line_Control_Group1);
- mmiowb();
- spin_unlock_irqrestore(&devpriv->mite_channel_lock, flags);
- return 0;
-}
-
-static void ni_pcidio_release_di_mite_channel(struct comedi_device *dev)
-{
- struct nidio96_private *devpriv = dev->private;
- unsigned long flags;
-
- spin_lock_irqsave(&devpriv->mite_channel_lock, flags);
- if (devpriv->di_mite_chan) {
- mite_dma_disarm(devpriv->di_mite_chan);
- mite_dma_reset(devpriv->di_mite_chan);
- mite_release_channel(devpriv->di_mite_chan);
- devpriv->di_mite_chan = NULL;
- writeb(primary_DMAChannel_bits(0) |
- secondary_DMAChannel_bits(0),
- dev->mmio + DMA_Line_Control_Group1);
- mmiowb();
- }
- spin_unlock_irqrestore(&devpriv->mite_channel_lock, flags);
-}
-
-static int setup_mite_dma(struct comedi_device *dev, struct comedi_subdevice *s)
-{
- struct nidio96_private *devpriv = dev->private;
- int retval;
- unsigned long flags;
-
- retval = ni_pcidio_request_di_mite_channel(dev);
- if (retval)
- return retval;
-
- /* write alloc the entire buffer */
- comedi_buf_write_alloc(s, s->async->prealloc_bufsz);
-
- spin_lock_irqsave(&devpriv->mite_channel_lock, flags);
- if (devpriv->di_mite_chan) {
- mite_prep_dma(devpriv->di_mite_chan, 32, 32);
- mite_dma_arm(devpriv->di_mite_chan);
- } else {
- retval = -EIO;
- }
- spin_unlock_irqrestore(&devpriv->mite_channel_lock, flags);
-
- return retval;
-}
-
-static int ni_pcidio_poll(struct comedi_device *dev, struct comedi_subdevice *s)
-{
- struct nidio96_private *devpriv = dev->private;
- unsigned long irq_flags;
- int count;
-
- spin_lock_irqsave(&dev->spinlock, irq_flags);
- spin_lock(&devpriv->mite_channel_lock);
- if (devpriv->di_mite_chan)
- mite_sync_input_dma(devpriv->di_mite_chan, s);
- spin_unlock(&devpriv->mite_channel_lock);
- count = comedi_buf_n_bytes_ready(s);
- spin_unlock_irqrestore(&dev->spinlock, irq_flags);
- return count;
-}
-
-static irqreturn_t nidio_interrupt(int irq, void *d)
-{
- struct comedi_device *dev = d;
- struct nidio96_private *devpriv = dev->private;
- struct comedi_subdevice *s = dev->read_subdev;
- struct comedi_async *async = s->async;
- struct mite_struct *mite = devpriv->mite;
- unsigned int auxdata;
- int flags;
- int status;
- int work = 0;
- unsigned int m_status = 0;
-
- /* interrupcions parasites */
- if (!dev->attached) {
- /* assume it's from another card */
- return IRQ_NONE;
- }
-
- /* Lock to avoid race with comedi_poll */
- spin_lock(&dev->spinlock);
-
- status = readb(dev->mmio + Interrupt_And_Window_Status);
- flags = readb(dev->mmio + Group_1_Flags);
-
- spin_lock(&devpriv->mite_channel_lock);
- if (devpriv->di_mite_chan)
- m_status = mite_get_status(devpriv->di_mite_chan);
-
- if (m_status & CHSR_INT) {
- if (m_status & CHSR_LINKC) {
- writel(CHOR_CLRLC,
- mite->mite_io_addr +
- MITE_CHOR(devpriv->di_mite_chan->channel));
- mite_sync_input_dma(devpriv->di_mite_chan, s);
- /* XXX need to byteswap */
- }
- if (m_status & ~(CHSR_INT | CHSR_LINKC | CHSR_DONE | CHSR_DRDY |
- CHSR_DRQ1 | CHSR_MRDY)) {
- dev_dbg(dev->class_dev,
- "unknown mite interrupt, disabling IRQ\n");
- async->events |= COMEDI_CB_ERROR;
- disable_irq(dev->irq);
- }
- }
- spin_unlock(&devpriv->mite_channel_lock);
-
- while (status & DataLeft) {
- work++;
- if (work > 20) {
- dev_dbg(dev->class_dev, "too much work in interrupt\n");
- writeb(0x00,
- dev->mmio + Master_DMA_And_Interrupt_Control);
- break;
- }
-
- flags &= IntEn;
-
- if (flags & TransferReady) {
- while (flags & TransferReady) {
- work++;
- if (work > 100) {
- dev_dbg(dev->class_dev,
- "too much work in interrupt\n");
- writeb(0x00, dev->mmio +
- Master_DMA_And_Interrupt_Control
- );
- goto out;
- }
- auxdata = readl(dev->mmio + Group_1_FIFO);
- comedi_buf_write_samples(s, &auxdata, 1);
- flags = readb(dev->mmio + Group_1_Flags);
- }
- }
-
- if (flags & CountExpired) {
- writeb(ClearExpired, dev->mmio + Group_1_Second_Clear);
- async->events |= COMEDI_CB_EOA;
-
- writeb(0x00, dev->mmio + OpMode);
- break;
- } else if (flags & Waited) {
- writeb(ClearWaited, dev->mmio + Group_1_First_Clear);
- async->events |= COMEDI_CB_ERROR;
- break;
- } else if (flags & PrimaryTC) {
- writeb(ClearPrimaryTC,
- dev->mmio + Group_1_First_Clear);
- async->events |= COMEDI_CB_EOA;
- } else if (flags & SecondaryTC) {
- writeb(ClearSecondaryTC,
- dev->mmio + Group_1_First_Clear);
- async->events |= COMEDI_CB_EOA;
- }
-
- flags = readb(dev->mmio + Group_1_Flags);
- status = readb(dev->mmio + Interrupt_And_Window_Status);
- }
-
-out:
- comedi_handle_events(dev, s);
-#if 0
- if (!tag)
- writeb(0x03, dev->mmio + Master_DMA_And_Interrupt_Control);
-#endif
-
- spin_unlock(&dev->spinlock);
- return IRQ_HANDLED;
-}
-
-static int ni_pcidio_insn_config(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- int ret;
-
- ret = comedi_dio_insn_config(dev, s, insn, data, 0);
- if (ret)
- return ret;
-
- writel(s->io_bits, dev->mmio + Port_Pin_Directions(0));
-
- return insn->n;
-}
-
-static int ni_pcidio_insn_bits(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- if (comedi_dio_update_state(s, data))
- writel(s->state, dev->mmio + Port_IO(0));
-
- data[1] = readl(dev->mmio + Port_IO(0));
-
- return insn->n;
-}
-
-static int ni_pcidio_ns_to_timer(int *nanosec, unsigned int flags)
-{
- int divider, base;
-
- base = TIMER_BASE;
-
- switch (flags & CMDF_ROUND_MASK) {
- case CMDF_ROUND_NEAREST:
- default:
- divider = (*nanosec + base / 2) / base;
- break;
- case CMDF_ROUND_DOWN:
- divider = (*nanosec) / base;
- break;
- case CMDF_ROUND_UP:
- divider = (*nanosec + base - 1) / base;
- break;
- }
-
- *nanosec = base * divider;
- return divider;
-}
-
-static int ni_pcidio_cmdtest(struct comedi_device *dev,
- struct comedi_subdevice *s, struct comedi_cmd *cmd)
-{
- int err = 0;
- unsigned int arg;
-
- /* Step 1 : check if triggers are trivially valid */
-
- err |= comedi_check_trigger_src(&cmd->start_src, TRIG_NOW | TRIG_INT);
- err |= comedi_check_trigger_src(&cmd->scan_begin_src,
- TRIG_TIMER | TRIG_EXT);
- err |= comedi_check_trigger_src(&cmd->convert_src, TRIG_NOW);
- err |= comedi_check_trigger_src(&cmd->scan_end_src, TRIG_COUNT);
- err |= comedi_check_trigger_src(&cmd->stop_src, TRIG_COUNT | TRIG_NONE);
-
- if (err)
- return 1;
-
- /* Step 2a : make sure trigger sources are unique */
-
- err |= comedi_check_trigger_is_unique(cmd->start_src);
- err |= comedi_check_trigger_is_unique(cmd->scan_begin_src);
- err |= comedi_check_trigger_is_unique(cmd->stop_src);
-
- /* Step 2b : and mutually compatible */
-
- if (err)
- return 2;
-
- /* Step 3: check if arguments are trivially valid */
-
- err |= comedi_check_trigger_arg_is(&cmd->start_arg, 0);
-
-#define MAX_SPEED (TIMER_BASE) /* in nanoseconds */
-
- if (cmd->scan_begin_src == TRIG_TIMER) {
- err |= comedi_check_trigger_arg_min(&cmd->scan_begin_arg,
- MAX_SPEED);
- /* no minimum speed */
- } else {
- /* TRIG_EXT */
- /* should be level/edge, hi/lo specification here */
- if ((cmd->scan_begin_arg & ~(CR_EDGE | CR_INVERT)) != 0) {
- cmd->scan_begin_arg &= (CR_EDGE | CR_INVERT);
- err |= -EINVAL;
- }
- }
-
- err |= comedi_check_trigger_arg_is(&cmd->convert_arg, 0);
- err |= comedi_check_trigger_arg_is(&cmd->scan_end_arg,
- cmd->chanlist_len);
-
- if (cmd->stop_src == TRIG_COUNT)
- err |= comedi_check_trigger_arg_min(&cmd->stop_arg, 1);
- else /* TRIG_NONE */
- err |= comedi_check_trigger_arg_is(&cmd->stop_arg, 0);
-
- if (err)
- return 3;
-
- /* step 4: fix up any arguments */
-
- if (cmd->scan_begin_src == TRIG_TIMER) {
- arg = cmd->scan_begin_arg;
- ni_pcidio_ns_to_timer(&arg, cmd->flags);
- err |= comedi_check_trigger_arg_is(&cmd->scan_begin_arg, arg);
- }
-
- if (err)
- return 4;
-
- return 0;
-}
-
-static int ni_pcidio_inttrig(struct comedi_device *dev,
- struct comedi_subdevice *s,
- unsigned int trig_num)
-{
- struct nidio96_private *devpriv = dev->private;
- struct comedi_cmd *cmd = &s->async->cmd;
-
- if (trig_num != cmd->start_arg)
- return -EINVAL;
-
- writeb(devpriv->OpModeBits, dev->mmio + OpMode);
- s->async->inttrig = NULL;
-
- return 1;
-}
-
-static int ni_pcidio_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
-{
- struct nidio96_private *devpriv = dev->private;
- struct comedi_cmd *cmd = &s->async->cmd;
-
- /* XXX configure ports for input */
- writel(0x0000, dev->mmio + Port_Pin_Directions(0));
-
- if (1) {
- /* enable fifos A B C D */
- writeb(0x0f, dev->mmio + Data_Path);
-
- /* set transfer width a 32 bits */
- writeb(TransferWidth(0) | TransferLength(0),
- dev->mmio + Transfer_Size_Control);
- } else {
- writeb(0x03, dev->mmio + Data_Path);
- writeb(TransferWidth(3) | TransferLength(0),
- dev->mmio + Transfer_Size_Control);
- }
-
- /* protocol configuration */
- if (cmd->scan_begin_src == TRIG_TIMER) {
- /* page 4-5, "input with internal REQs" */
- writeb(0, dev->mmio + OpMode);
- writeb(0x00, dev->mmio + ClockReg);
- writeb(1, dev->mmio + Sequence);
- writeb(0x04, dev->mmio + ReqReg);
- writeb(4, dev->mmio + BlockMode);
- writeb(3, dev->mmio + LinePolarities);
- writeb(0xc0, dev->mmio + AckSer);
- writel(ni_pcidio_ns_to_timer(&cmd->scan_begin_arg,
- CMDF_ROUND_NEAREST),
- dev->mmio + StartDelay);
- writeb(1, dev->mmio + ReqDelay);
- writeb(1, dev->mmio + ReqNotDelay);
- writeb(1, dev->mmio + AckDelay);
- writeb(0x0b, dev->mmio + AckNotDelay);
- writeb(0x01, dev->mmio + Data1Delay);
- /* manual, page 4-5: ClockSpeed comment is incorrectly listed
- * on DAQOptions */
- writew(0, dev->mmio + ClockSpeed);
- writeb(0, dev->mmio + DAQOptions);
- } else {
- /* TRIG_EXT */
- /* page 4-5, "input with external REQs" */
- writeb(0, dev->mmio + OpMode);
- writeb(0x00, dev->mmio + ClockReg);
- writeb(0, dev->mmio + Sequence);
- writeb(0x00, dev->mmio + ReqReg);
- writeb(4, dev->mmio + BlockMode);
- if (!(cmd->scan_begin_arg & CR_INVERT)) /* Leading Edge */
- writeb(0, dev->mmio + LinePolarities);
- else /* Trailing Edge */
- writeb(2, dev->mmio + LinePolarities);
- writeb(0x00, dev->mmio + AckSer);
- writel(1, dev->mmio + StartDelay);
- writeb(1, dev->mmio + ReqDelay);
- writeb(1, dev->mmio + ReqNotDelay);
- writeb(1, dev->mmio + AckDelay);
- writeb(0x0C, dev->mmio + AckNotDelay);
- writeb(0x10, dev->mmio + Data1Delay);
- writew(0, dev->mmio + ClockSpeed);
- writeb(0x60, dev->mmio + DAQOptions);
- }
-
- if (cmd->stop_src == TRIG_COUNT) {
- writel(cmd->stop_arg,
- dev->mmio + Transfer_Count);
- } else {
- /* XXX */
- }
-
-#ifdef USE_DMA
- writeb(ClearPrimaryTC | ClearSecondaryTC,
- dev->mmio + Group_1_First_Clear);
-
- {
- int retval = setup_mite_dma(dev, s);
-
- if (retval)
- return retval;
- }
-#else
- writeb(0x00, dev->mmio + DMA_Line_Control_Group1);
-#endif
- writeb(0x00, dev->mmio + DMA_Line_Control_Group2);
-
- /* clear and enable interrupts */
- writeb(0xff, dev->mmio + Group_1_First_Clear);
- /* writeb(ClearExpired, dev->mmio+Group_1_Second_Clear); */
-
- writeb(IntEn, dev->mmio + Interrupt_Control);
- writeb(0x03, dev->mmio + Master_DMA_And_Interrupt_Control);
-
- if (cmd->stop_src == TRIG_NONE) {
- devpriv->OpModeBits = DataLatching(0) | RunMode(7);
- } else { /* TRIG_TIMER */
- devpriv->OpModeBits = Numbered | RunMode(7);
- }
- if (cmd->start_src == TRIG_NOW) {
- /* start */
- writeb(devpriv->OpModeBits, dev->mmio + OpMode);
- s->async->inttrig = NULL;
- } else {
- /* TRIG_INT */
- s->async->inttrig = ni_pcidio_inttrig;
- }
-
- return 0;
-}
-
-static int ni_pcidio_cancel(struct comedi_device *dev,
- struct comedi_subdevice *s)
-{
- writeb(0x00, dev->mmio + Master_DMA_And_Interrupt_Control);
- ni_pcidio_release_di_mite_channel(dev);
-
- return 0;
-}
-
-static int ni_pcidio_change(struct comedi_device *dev,
- struct comedi_subdevice *s)
-{
- struct nidio96_private *devpriv = dev->private;
- int ret;
-
- ret = mite_buf_change(devpriv->di_mite_ring, s);
- if (ret < 0)
- return ret;
-
- memset(s->async->prealloc_buf, 0xaa, s->async->prealloc_bufsz);
-
- return 0;
-}
-
-static int pci_6534_load_fpga(struct comedi_device *dev,
- const u8 *data, size_t data_len,
- unsigned long context)
-{
- static const int timeout = 1000;
- int fpga_index = context;
- int i;
- size_t j;
-
- writew(0x80 | fpga_index, dev->mmio + Firmware_Control_Register);
- writew(0xc0 | fpga_index, dev->mmio + Firmware_Control_Register);
- for (i = 0;
- (readw(dev->mmio + Firmware_Status_Register) & 0x2) == 0 &&
- i < timeout; ++i) {
- udelay(1);
- }
- if (i == timeout) {
- dev_warn(dev->class_dev,
- "ni_pcidio: failed to load fpga %i, waiting for status 0x2\n",
- fpga_index);
- return -EIO;
- }
- writew(0x80 | fpga_index, dev->mmio + Firmware_Control_Register);
- for (i = 0;
- readw(dev->mmio + Firmware_Status_Register) != 0x3 &&
- i < timeout; ++i) {
- udelay(1);
- }
- if (i == timeout) {
- dev_warn(dev->class_dev,
- "ni_pcidio: failed to load fpga %i, waiting for status 0x3\n",
- fpga_index);
- return -EIO;
- }
- for (j = 0; j + 1 < data_len;) {
- unsigned int value = data[j++];
-
- value |= data[j++] << 8;
- writew(value, dev->mmio + Firmware_Data_Register);
- for (i = 0;
- (readw(dev->mmio + Firmware_Status_Register) & 0x2) == 0
- && i < timeout; ++i) {
- udelay(1);
- }
- if (i == timeout) {
- dev_warn(dev->class_dev,
- "ni_pcidio: failed to load word into fpga %i\n",
- fpga_index);
- return -EIO;
- }
- if (need_resched())
- schedule();
- }
- writew(0x0, dev->mmio + Firmware_Control_Register);
- return 0;
-}
-
-static int pci_6534_reset_fpga(struct comedi_device *dev, int fpga_index)
-{
- return pci_6534_load_fpga(dev, NULL, 0, fpga_index);
-}
-
-static int pci_6534_reset_fpgas(struct comedi_device *dev)
-{
- int ret;
- int i;
-
- writew(0x0, dev->mmio + Firmware_Control_Register);
- for (i = 0; i < 3; ++i) {
- ret = pci_6534_reset_fpga(dev, i);
- if (ret < 0)
- break;
- }
- writew(0x0, dev->mmio + Firmware_Mask_Register);
- return ret;
-}
-
-static void pci_6534_init_main_fpga(struct comedi_device *dev)
-{
- writel(0, dev->mmio + FPGA_Control1_Register);
- writel(0, dev->mmio + FPGA_Control2_Register);
- writel(0, dev->mmio + FPGA_SCALS_Counter_Register);
- writel(0, dev->mmio + FPGA_SCAMS_Counter_Register);
- writel(0, dev->mmio + FPGA_SCBLS_Counter_Register);
- writel(0, dev->mmio + FPGA_SCBMS_Counter_Register);
-}
-
-static int pci_6534_upload_firmware(struct comedi_device *dev)
-{
- struct nidio96_private *devpriv = dev->private;
- static const char *const fw_file[3] = {
- FW_PCI_6534_SCARAB_DI, /* loaded into scarab A for DI */
- FW_PCI_6534_SCARAB_DO, /* loaded into scarab B for DO */
- FW_PCI_6534_MAIN, /* loaded into main FPGA */
- };
- int ret;
- int n;
-
- ret = pci_6534_reset_fpgas(dev);
- if (ret < 0)
- return ret;
- /* load main FPGA first, then the two scarabs */
- for (n = 2; n >= 0; n--) {
- ret = comedi_load_firmware(dev, &devpriv->mite->pcidev->dev,
- fw_file[n],
- pci_6534_load_fpga, n);
- if (ret == 0 && n == 2)
- pci_6534_init_main_fpga(dev);
- if (ret < 0)
- break;
- }
- return ret;
-}
-
-static void nidio_reset_board(struct comedi_device *dev)
-{
- writel(0, dev->mmio + Port_IO(0));
- writel(0, dev->mmio + Port_Pin_Directions(0));
- writel(0, dev->mmio + Port_Pin_Mask(0));
-
- /* disable interrupts on board */
- writeb(0, dev->mmio + Master_DMA_And_Interrupt_Control);
-}
-
-static int nidio_auto_attach(struct comedi_device *dev,
- unsigned long context)
-{
- struct pci_dev *pcidev = comedi_to_pci_dev(dev);
- const struct nidio_board *board = NULL;
- struct nidio96_private *devpriv;
- struct comedi_subdevice *s;
- int ret;
- unsigned int irq;
-
- if (context < ARRAY_SIZE(nidio_boards))
- board = &nidio_boards[context];
- if (!board)
- return -ENODEV;
- dev->board_ptr = board;
- dev->board_name = board->name;
-
- ret = comedi_pci_enable(dev);
- if (ret)
- return ret;
-
- devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
- if (!devpriv)
- return -ENOMEM;
-
- spin_lock_init(&devpriv->mite_channel_lock);
-
- devpriv->mite = mite_alloc(pcidev);
- if (!devpriv->mite)
- return -ENOMEM;
-
- ret = mite_setup(dev, devpriv->mite);
- if (ret < 0)
- return ret;
-
- devpriv->di_mite_ring = mite_alloc_ring(devpriv->mite);
- if (!devpriv->di_mite_ring)
- return -ENOMEM;
-
- if (board->uses_firmware) {
- ret = pci_6534_upload_firmware(dev);
- if (ret < 0)
- return ret;
- }
-
- nidio_reset_board(dev);
-
- ret = comedi_alloc_subdevices(dev, 1);
- if (ret)
- return ret;
-
- dev_info(dev->class_dev, "%s rev=%d\n", dev->board_name,
- readb(dev->mmio + Chip_Version));
-
- s = &dev->subdevices[0];
-
- dev->read_subdev = s;
- s->type = COMEDI_SUBD_DIO;
- s->subdev_flags =
- SDF_READABLE | SDF_WRITABLE | SDF_LSAMPL | SDF_PACKED |
- SDF_CMD_READ;
- s->n_chan = 32;
- s->range_table = &range_digital;
- s->maxdata = 1;
- s->insn_config = &ni_pcidio_insn_config;
- s->insn_bits = &ni_pcidio_insn_bits;
- s->do_cmd = &ni_pcidio_cmd;
- s->do_cmdtest = &ni_pcidio_cmdtest;
- s->cancel = &ni_pcidio_cancel;
- s->len_chanlist = 32; /* XXX */
- s->buf_change = &ni_pcidio_change;
- s->async_dma_dir = DMA_BIDIRECTIONAL;
- s->poll = &ni_pcidio_poll;
-
- irq = pcidev->irq;
- if (irq) {
- ret = request_irq(irq, nidio_interrupt, IRQF_SHARED,
- dev->board_name, dev);
- if (ret == 0)
- dev->irq = irq;
- }
-
- return 0;
-}
-
-static void nidio_detach(struct comedi_device *dev)
-{
- struct nidio96_private *devpriv = dev->private;
-
- if (dev->irq)
- free_irq(dev->irq, dev);
- if (devpriv) {
- if (devpriv->di_mite_ring) {
- mite_free_ring(devpriv->di_mite_ring);
- devpriv->di_mite_ring = NULL;
- }
- mite_detach(devpriv->mite);
- }
- if (dev->mmio)
- iounmap(dev->mmio);
- comedi_pci_disable(dev);
-}
-
-static struct comedi_driver ni_pcidio_driver = {
- .driver_name = "ni_pcidio",
- .module = THIS_MODULE,
- .auto_attach = nidio_auto_attach,
- .detach = nidio_detach,
-};
-
-static int ni_pcidio_pci_probe(struct pci_dev *dev,
- const struct pci_device_id *id)
-{
- return comedi_pci_auto_config(dev, &ni_pcidio_driver, id->driver_data);
-}
-
-static const struct pci_device_id ni_pcidio_pci_table[] = {
- { PCI_VDEVICE(NI, 0x1150), BOARD_PCIDIO_32HS },
- { PCI_VDEVICE(NI, 0x12b0), BOARD_PCI6534 },
- { PCI_VDEVICE(NI, 0x1320), BOARD_PXI6533 },
- { 0 }
-};
-MODULE_DEVICE_TABLE(pci, ni_pcidio_pci_table);
-
-static struct pci_driver ni_pcidio_pci_driver = {
- .name = "ni_pcidio",
- .id_table = ni_pcidio_pci_table,
- .probe = ni_pcidio_pci_probe,
- .remove = comedi_pci_auto_unconfig,
-};
-module_comedi_pci_driver(ni_pcidio_driver, ni_pcidio_pci_driver);
-
-MODULE_AUTHOR("Comedi http://www.comedi.org");
-MODULE_DESCRIPTION("Comedi low-level driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/ni_pcimio.c b/drivers/staging/comedi/drivers/ni_pcimio.c
deleted file mode 100644
index 30a5a75d1fe7..000000000000
--- a/drivers/staging/comedi/drivers/ni_pcimio.c
+++ /dev/null
@@ -1,1307 +0,0 @@
-/*
- comedi/drivers/ni_pcimio.c
- Hardware driver for NI PCI-MIO E series cards
-
- COMEDI - Linux Control and Measurement Device Interface
- Copyright (C) 1997-8 David A. Schleef <ds@schleef.org>
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-*/
-/*
-Driver: ni_pcimio
-Description: National Instruments PCI-MIO-E series and M series (all boards)
-Author: ds, John Hallen, Frank Mori Hess, Rolf Mueller, Herbert Peremans,
- Herman Bruyninckx, Terry Barnaby
-Status: works
-Devices: [National Instruments] PCI-MIO-16XE-50 (ni_pcimio),
- PCI-MIO-16XE-10, PXI-6030E, PCI-MIO-16E-1, PCI-MIO-16E-4, PCI-6014, PCI-6040E,
- PXI-6040E, PCI-6030E, PCI-6031E, PCI-6032E, PCI-6033E, PCI-6071E, PCI-6023E,
- PCI-6024E, PCI-6025E, PXI-6025E, PCI-6034E, PCI-6035E, PCI-6052E,
- PCI-6110, PCI-6111, PCI-6220, PCI-6221, PCI-6224, PXI-6224,
- PCI-6225, PXI-6225, PCI-6229, PCI-6250, PCI-6251, PCIe-6251, PXIe-6251,
- PCI-6254, PCI-6259, PCIe-6259,
- PCI-6280, PCI-6281, PXI-6281, PCI-6284, PCI-6289,
- PCI-6711, PXI-6711, PCI-6713, PXI-6713,
- PXI-6071E, PCI-6070E, PXI-6070E,
- PXI-6052E, PCI-6036E, PCI-6731, PCI-6733, PXI-6733,
- PCI-6143, PXI-6143
-Updated: Mon, 09 Jan 2012 14:52:48 +0000
-
-These boards are almost identical to the AT-MIO E series, except that
-they use the PCI bus instead of ISA (i.e., AT). See the notes for
-the ni_atmio.o driver for additional information about these boards.
-
-Autocalibration is supported on many of the devices, using the
-comedi_calibrate (or comedi_soft_calibrate for m-series) utility.
-M-Series boards do analog input and analog output calibration entirely
-in software. The software calibration corrects
-the analog input for offset, gain and
-nonlinearity. The analog outputs are corrected for offset and gain.
-See the comedilib documentation on comedi_get_softcal_converter() for
-more information.
-
-By default, the driver uses DMA to transfer analog input data to
-memory. When DMA is enabled, not all triggering features are
-supported.
-
-Digital I/O may not work on 673x.
-
-Note that the PCI-6143 is a simultaineous sampling device with 8 convertors.
-With this board all of the convertors perform one simultaineous sample during
-a scan interval. The period for a scan is used for the convert time in a
-Comedi cmd. The convert trigger source is normally set to TRIG_NOW by default.
-
-The RTSI trigger bus is supported on these cards on
-subdevice 10. See the comedilib documentation for details.
-
-Information (number of channels, bits, etc.) for some devices may be
-incorrect. Please check this and submit a bug if there are problems
-for your device.
-
-SCXI is probably broken for m-series boards.
-
-Bugs:
- - When DMA is enabled, COMEDI_EV_CONVERT does
- not work correctly.
-
-*/
-/*
- The PCI-MIO E series driver was originally written by
- Tomasz Motylewski <...>, and ported to comedi by ds.
-
- References:
-
- 341079b.pdf PCI E Series Register-Level Programmer Manual
- 340934b.pdf DAQ-STC reference manual
-
- 322080b.pdf 6711/6713/6715 User Manual
-
- 320945c.pdf PCI E Series User Manual
- 322138a.pdf PCI-6052E and DAQPad-6052E User Manual
-
- ISSUES:
-
- need to deal with external reference for DAC, and other DAC
- properties in board properties
-
- deal with at-mio-16de-10 revision D to N changes, etc.
-
- need to add other CALDAC type
-
- need to slow down DAC loading. I don't trust NI's claim that
- two writes to the PCI bus slows IO enough. I would prefer to
- use udelay(). Timing specs: (clock)
- AD8522 30ns
- DAC8043 120ns
- DAC8800 60ns
- MB88341 ?
-
-*/
-
-#include <linux/module.h>
-#include <linux/delay.h>
-
-#include "../comedi_pci.h"
-
-#include <asm/byteorder.h>
-
-#include "ni_stc.h"
-#include "mite.h"
-
-#define PCIDMA
-
-/* These are not all the possible ao ranges for 628x boards.
- They can do OFFSET +- REFERENCE where OFFSET can be
- 0V, 5V, APFI<0,1>, or AO<0...3> and RANGE can
- be 10V, 5V, 2V, 1V, APFI<0,1>, AO<0...3>. That's
- 63 different possibilities. An AO channel
- can not act as it's own OFFSET or REFERENCE.
-*/
-static const struct comedi_lrange range_ni_M_628x_ao = {
- 8, {
- BIP_RANGE(10),
- BIP_RANGE(5),
- BIP_RANGE(2),
- BIP_RANGE(1),
- RANGE(-5, 15),
- UNI_RANGE(10),
- RANGE(3, 7),
- RANGE(4, 6),
- RANGE_ext(-1, 1)
- }
-};
-
-static const struct comedi_lrange range_ni_M_625x_ao = {
- 3, {
- BIP_RANGE(10),
- BIP_RANGE(5),
- RANGE_ext(-1, 1)
- }
-};
-
-enum ni_pcimio_boardid {
- BOARD_PCIMIO_16XE_50,
- BOARD_PCIMIO_16XE_10,
- BOARD_PCI6014,
- BOARD_PXI6030E,
- BOARD_PCIMIO_16E_1,
- BOARD_PCIMIO_16E_4,
- BOARD_PXI6040E,
- BOARD_PCI6031E,
- BOARD_PCI6032E,
- BOARD_PCI6033E,
- BOARD_PCI6071E,
- BOARD_PCI6023E,
- BOARD_PCI6024E,
- BOARD_PCI6025E,
- BOARD_PXI6025E,
- BOARD_PCI6034E,
- BOARD_PCI6035E,
- BOARD_PCI6052E,
- BOARD_PCI6110,
- BOARD_PCI6111,
- /* BOARD_PCI6115, */
- /* BOARD_PXI6115, */
- BOARD_PCI6711,
- BOARD_PXI6711,
- BOARD_PCI6713,
- BOARD_PXI6713,
- BOARD_PCI6731,
- /* BOARD_PXI6731, */
- BOARD_PCI6733,
- BOARD_PXI6733,
- BOARD_PXI6071E,
- BOARD_PXI6070E,
- BOARD_PXI6052E,
- BOARD_PXI6031E,
- BOARD_PCI6036E,
- BOARD_PCI6220,
- BOARD_PCI6221,
- BOARD_PCI6221_37PIN,
- BOARD_PCI6224,
- BOARD_PXI6224,
- BOARD_PCI6225,
- BOARD_PXI6225,
- BOARD_PCI6229,
- BOARD_PCI6250,
- BOARD_PCI6251,
- BOARD_PCIE6251,
- BOARD_PXIE6251,
- BOARD_PCI6254,
- BOARD_PCI6259,
- BOARD_PCIE6259,
- BOARD_PCI6280,
- BOARD_PCI6281,
- BOARD_PXI6281,
- BOARD_PCI6284,
- BOARD_PCI6289,
- BOARD_PCI6143,
- BOARD_PXI6143,
-};
-
-static const struct ni_board_struct ni_boards[] = {
- [BOARD_PCIMIO_16XE_50] = {
- .name = "pci-mio-16xe-50",
- .n_adchan = 16,
- .ai_maxdata = 0xffff,
- .ai_fifo_depth = 2048,
- .alwaysdither = 1,
- .gainlkup = ai_gain_8,
- .ai_speed = 50000,
- .n_aochan = 2,
- .ao_maxdata = 0x0fff,
- .ao_range_table = &range_bipolar10,
- .ao_speed = 50000,
- .caldac = { dac8800, dac8043 },
- },
- [BOARD_PCIMIO_16XE_10] = {
- .name = "pci-mio-16xe-10", /* aka pci-6030E */
- .n_adchan = 16,
- .ai_maxdata = 0xffff,
- .ai_fifo_depth = 512,
- .alwaysdither = 1,
- .gainlkup = ai_gain_14,
- .ai_speed = 10000,
- .n_aochan = 2,
- .ao_maxdata = 0xffff,
- .ao_fifo_depth = 2048,
- .ao_range_table = &range_ni_E_ao_ext,
- .ao_speed = 10000,
- .caldac = { dac8800, dac8043, ad8522 },
- },
- [BOARD_PCI6014] = {
- .name = "pci-6014",
- .n_adchan = 16,
- .ai_maxdata = 0xffff,
- .ai_fifo_depth = 512,
- .alwaysdither = 1,
- .gainlkup = ai_gain_4,
- .ai_speed = 5000,
- .n_aochan = 2,
- .ao_maxdata = 0xffff,
- .ao_range_table = &range_bipolar10,
- .ao_speed = 100000,
- .caldac = { ad8804_debug },
- },
- [BOARD_PXI6030E] = {
- .name = "pxi-6030e",
- .n_adchan = 16,
- .ai_maxdata = 0xffff,
- .ai_fifo_depth = 512,
- .alwaysdither = 1,
- .gainlkup = ai_gain_14,
- .ai_speed = 10000,
- .n_aochan = 2,
- .ao_maxdata = 0xffff,
- .ao_fifo_depth = 2048,
- .ao_range_table = &range_ni_E_ao_ext,
- .ao_speed = 10000,
- .caldac = { dac8800, dac8043, ad8522 },
- },
- [BOARD_PCIMIO_16E_1] = {
- .name = "pci-mio-16e-1", /* aka pci-6070e */
- .n_adchan = 16,
- .ai_maxdata = 0x0fff,
- .ai_fifo_depth = 512,
- .gainlkup = ai_gain_16,
- .ai_speed = 800,
- .n_aochan = 2,
- .ao_maxdata = 0x0fff,
- .ao_fifo_depth = 2048,
- .ao_range_table = &range_ni_E_ao_ext,
- .ao_speed = 1000,
- .caldac = { mb88341 },
- },
- [BOARD_PCIMIO_16E_4] = {
- .name = "pci-mio-16e-4", /* aka pci-6040e */
- .n_adchan = 16,
- .ai_maxdata = 0x0fff,
- .ai_fifo_depth = 512,
- .gainlkup = ai_gain_16,
- /*
- * there have been reported problems with
- * full speed on this board
- */
- .ai_speed = 2000,
- .n_aochan = 2,
- .ao_maxdata = 0x0fff,
- .ao_fifo_depth = 512,
- .ao_range_table = &range_ni_E_ao_ext,
- .ao_speed = 1000,
- .caldac = { ad8804_debug }, /* doc says mb88341 */
- },
- [BOARD_PXI6040E] = {
- .name = "pxi-6040e",
- .n_adchan = 16,
- .ai_maxdata = 0x0fff,
- .ai_fifo_depth = 512,
- .gainlkup = ai_gain_16,
- .ai_speed = 2000,
- .n_aochan = 2,
- .ao_maxdata = 0x0fff,
- .ao_fifo_depth = 512,
- .ao_range_table = &range_ni_E_ao_ext,
- .ao_speed = 1000,
- .caldac = { mb88341 },
- },
- [BOARD_PCI6031E] = {
- .name = "pci-6031e",
- .n_adchan = 64,
- .ai_maxdata = 0xffff,
- .ai_fifo_depth = 512,
- .alwaysdither = 1,
- .gainlkup = ai_gain_14,
- .ai_speed = 10000,
- .n_aochan = 2,
- .ao_maxdata = 0xffff,
- .ao_fifo_depth = 2048,
- .ao_range_table = &range_ni_E_ao_ext,
- .ao_speed = 10000,
- .caldac = { dac8800, dac8043, ad8522 },
- },
- [BOARD_PCI6032E] = {
- .name = "pci-6032e",
- .n_adchan = 16,
- .ai_maxdata = 0xffff,
- .ai_fifo_depth = 512,
- .alwaysdither = 1,
- .gainlkup = ai_gain_14,
- .ai_speed = 10000,
- .caldac = { dac8800, dac8043, ad8522 },
- },
- [BOARD_PCI6033E] = {
- .name = "pci-6033e",
- .n_adchan = 64,
- .ai_maxdata = 0xffff,
- .ai_fifo_depth = 512,
- .alwaysdither = 1,
- .gainlkup = ai_gain_14,
- .ai_speed = 10000,
- .caldac = { dac8800, dac8043, ad8522 },
- },
- [BOARD_PCI6071E] = {
- .name = "pci-6071e",
- .n_adchan = 64,
- .ai_maxdata = 0x0fff,
- .ai_fifo_depth = 512,
- .alwaysdither = 1,
- .gainlkup = ai_gain_16,
- .ai_speed = 800,
- .n_aochan = 2,
- .ao_maxdata = 0x0fff,
- .ao_fifo_depth = 2048,
- .ao_range_table = &range_ni_E_ao_ext,
- .ao_speed = 1000,
- .caldac = { ad8804_debug },
- },
- [BOARD_PCI6023E] = {
- .name = "pci-6023e",
- .n_adchan = 16,
- .ai_maxdata = 0x0fff,
- .ai_fifo_depth = 512,
- .gainlkup = ai_gain_4,
- .ai_speed = 5000,
- .caldac = { ad8804_debug }, /* manual is wrong */
- },
- [BOARD_PCI6024E] = {
- .name = "pci-6024e",
- .n_adchan = 16,
- .ai_maxdata = 0x0fff,
- .ai_fifo_depth = 512,
- .gainlkup = ai_gain_4,
- .ai_speed = 5000,
- .n_aochan = 2,
- .ao_maxdata = 0x0fff,
- .ao_range_table = &range_bipolar10,
- .ao_speed = 100000,
- .caldac = { ad8804_debug }, /* manual is wrong */
- },
- [BOARD_PCI6025E] = {
- .name = "pci-6025e",
- .n_adchan = 16,
- .ai_maxdata = 0x0fff,
- .ai_fifo_depth = 512,
- .gainlkup = ai_gain_4,
- .ai_speed = 5000,
- .n_aochan = 2,
- .ao_maxdata = 0x0fff,
- .ao_range_table = &range_bipolar10,
- .ao_speed = 100000,
- .caldac = { ad8804_debug }, /* manual is wrong */
- .has_8255 = 1,
- },
- [BOARD_PXI6025E] = {
- .name = "pxi-6025e",
- .n_adchan = 16,
- .ai_maxdata = 0x0fff,
- .ai_fifo_depth = 512,
- .gainlkup = ai_gain_4,
- .ai_speed = 5000,
- .n_aochan = 2,
- .ao_maxdata = 0x0fff,
- .ao_range_table = &range_ni_E_ao_ext,
- .ao_speed = 100000,
- .caldac = { ad8804_debug }, /* manual is wrong */
- .has_8255 = 1,
- },
- [BOARD_PCI6034E] = {
- .name = "pci-6034e",
- .n_adchan = 16,
- .ai_maxdata = 0xffff,
- .ai_fifo_depth = 512,
- .alwaysdither = 1,
- .gainlkup = ai_gain_4,
- .ai_speed = 5000,
- .caldac = { ad8804_debug },
- },
- [BOARD_PCI6035E] = {
- .name = "pci-6035e",
- .n_adchan = 16,
- .ai_maxdata = 0xffff,
- .ai_fifo_depth = 512,
- .alwaysdither = 1,
- .gainlkup = ai_gain_4,
- .ai_speed = 5000,
- .n_aochan = 2,
- .ao_maxdata = 0x0fff,
- .ao_range_table = &range_bipolar10,
- .ao_speed = 100000,
- .caldac = { ad8804_debug },
- },
- [BOARD_PCI6052E] = {
- .name = "pci-6052e",
- .n_adchan = 16,
- .ai_maxdata = 0xffff,
- .ai_fifo_depth = 512,
- .alwaysdither = 1,
- .gainlkup = ai_gain_16,
- .ai_speed = 3000,
- .n_aochan = 2,
- .ao_maxdata = 0xffff,
- .ao_fifo_depth = 2048,
- .ao_range_table = &range_ni_E_ao_ext,
- .ao_speed = 3000,
- /* manual is wrong */
- .caldac = { ad8804_debug, ad8804_debug, ad8522 },
- },
- [BOARD_PCI6110] = {
- .name = "pci-6110",
- .n_adchan = 4,
- .ai_maxdata = 0x0fff,
- .ai_fifo_depth = 8192,
- .alwaysdither = 0,
- .gainlkup = ai_gain_611x,
- .ai_speed = 200,
- .n_aochan = 2,
- .ao_maxdata = 0xffff,
- .reg_type = ni_reg_611x,
- .ao_range_table = &range_bipolar10,
- .ao_fifo_depth = 2048,
- .ao_speed = 250,
- .caldac = { ad8804, ad8804 },
- },
- [BOARD_PCI6111] = {
- .name = "pci-6111",
- .n_adchan = 2,
- .ai_maxdata = 0x0fff,
- .ai_fifo_depth = 8192,
- .gainlkup = ai_gain_611x,
- .ai_speed = 200,
- .n_aochan = 2,
- .ao_maxdata = 0xffff,
- .reg_type = ni_reg_611x,
- .ao_range_table = &range_bipolar10,
- .ao_fifo_depth = 2048,
- .ao_speed = 250,
- .caldac = { ad8804, ad8804 },
- },
-#if 0
- /* The 6115 boards probably need their own driver */
- [BOARD_PCI6115] = { /* .device_id = 0x2ed0, */
- .name = "pci-6115",
- .n_adchan = 4,
- .ai_maxdata = 0x0fff,
- .ai_fifo_depth = 8192,
- .gainlkup = ai_gain_611x,
- .ai_speed = 100,
- .n_aochan = 2,
- .ao_maxdata = 0xffff,
- .ao_671x = 1,
- .ao_fifo_depth = 2048,
- .ao_speed = 250,
- .reg_611x = 1,
- /* XXX */
- .caldac = { ad8804_debug, ad8804_debug, ad8804_debug },
- },
-#endif
-#if 0
- [BOARD_PXI6115] = { /* .device_id = ????, */
- .name = "pxi-6115",
- .n_adchan = 4,
- .ai_maxdata = 0x0fff,
- .ai_fifo_depth = 8192,
- .gainlkup = ai_gain_611x,
- .ai_speed = 100,
- .n_aochan = 2,
- .ao_maxdata = 0xffff,
- .ao_671x = 1,
- .ao_fifo_depth = 2048,
- .ao_speed = 250,
- .reg_611x = 1,
- /* XXX */
- .caldac = { ad8804_debug, ad8804_debug, ad8804_debug },
- },
-#endif
- [BOARD_PCI6711] = {
- .name = "pci-6711",
- .n_aochan = 4,
- .ao_maxdata = 0x0fff,
- /* data sheet says 8192, but fifo really holds 16384 samples */
- .ao_fifo_depth = 16384,
- .ao_range_table = &range_bipolar10,
- .ao_speed = 1000,
- .reg_type = ni_reg_6711,
- .caldac = { ad8804_debug },
- },
- [BOARD_PXI6711] = {
- .name = "pxi-6711",
- .n_aochan = 4,
- .ao_maxdata = 0x0fff,
- .ao_fifo_depth = 16384,
- .ao_range_table = &range_bipolar10,
- .ao_speed = 1000,
- .reg_type = ni_reg_6711,
- .caldac = { ad8804_debug },
- },
- [BOARD_PCI6713] = {
- .name = "pci-6713",
- .n_aochan = 8,
- .ao_maxdata = 0x0fff,
- .ao_fifo_depth = 16384,
- .ao_range_table = &range_bipolar10,
- .ao_speed = 1000,
- .reg_type = ni_reg_6713,
- .caldac = { ad8804_debug, ad8804_debug },
- },
- [BOARD_PXI6713] = {
- .name = "pxi-6713",
- .n_aochan = 8,
- .ao_maxdata = 0x0fff,
- .ao_fifo_depth = 16384,
- .ao_range_table = &range_bipolar10,
- .ao_speed = 1000,
- .reg_type = ni_reg_6713,
- .caldac = { ad8804_debug, ad8804_debug },
- },
- [BOARD_PCI6731] = {
- .name = "pci-6731",
- .n_aochan = 4,
- .ao_maxdata = 0xffff,
- .ao_fifo_depth = 8192,
- .ao_range_table = &range_bipolar10,
- .ao_speed = 1000,
- .reg_type = ni_reg_6711,
- .caldac = { ad8804_debug },
- },
-#if 0
- [BOARD_PXI6731] = { /* .device_id = ????, */
- .name = "pxi-6731",
- .n_aochan = 4,
- .ao_maxdata = 0xffff,
- .ao_fifo_depth = 8192,
- .ao_range_table = &range_bipolar10,
- .reg_type = ni_reg_6711,
- .caldac = { ad8804_debug },
- },
-#endif
- [BOARD_PCI6733] = {
- .name = "pci-6733",
- .n_aochan = 8,
- .ao_maxdata = 0xffff,
- .ao_fifo_depth = 16384,
- .ao_range_table = &range_bipolar10,
- .ao_speed = 1000,
- .reg_type = ni_reg_6713,
- .caldac = { ad8804_debug, ad8804_debug },
- },
- [BOARD_PXI6733] = {
- .name = "pxi-6733",
- .n_aochan = 8,
- .ao_maxdata = 0xffff,
- .ao_fifo_depth = 16384,
- .ao_range_table = &range_bipolar10,
- .ao_speed = 1000,
- .reg_type = ni_reg_6713,
- .caldac = { ad8804_debug, ad8804_debug },
- },
- [BOARD_PXI6071E] = {
- .name = "pxi-6071e",
- .n_adchan = 64,
- .ai_maxdata = 0x0fff,
- .ai_fifo_depth = 512,
- .alwaysdither = 1,
- .gainlkup = ai_gain_16,
- .ai_speed = 800,
- .n_aochan = 2,
- .ao_maxdata = 0x0fff,
- .ao_fifo_depth = 2048,
- .ao_range_table = &range_ni_E_ao_ext,
- .ao_speed = 1000,
- .caldac = { ad8804_debug },
- },
- [BOARD_PXI6070E] = {
- .name = "pxi-6070e",
- .n_adchan = 16,
- .ai_maxdata = 0x0fff,
- .ai_fifo_depth = 512,
- .alwaysdither = 1,
- .gainlkup = ai_gain_16,
- .ai_speed = 800,
- .n_aochan = 2,
- .ao_maxdata = 0x0fff,
- .ao_fifo_depth = 2048,
- .ao_range_table = &range_ni_E_ao_ext,
- .ao_speed = 1000,
- .caldac = { ad8804_debug },
- },
- [BOARD_PXI6052E] = {
- .name = "pxi-6052e",
- .n_adchan = 16,
- .ai_maxdata = 0xffff,
- .ai_fifo_depth = 512,
- .alwaysdither = 1,
- .gainlkup = ai_gain_16,
- .ai_speed = 3000,
- .n_aochan = 2,
- .ao_maxdata = 0xffff,
- .ao_fifo_depth = 2048,
- .ao_range_table = &range_ni_E_ao_ext,
- .ao_speed = 3000,
- .caldac = { mb88341, mb88341, ad8522 },
- },
- [BOARD_PXI6031E] = {
- .name = "pxi-6031e",
- .n_adchan = 64,
- .ai_maxdata = 0xffff,
- .ai_fifo_depth = 512,
- .alwaysdither = 1,
- .gainlkup = ai_gain_14,
- .ai_speed = 10000,
- .n_aochan = 2,
- .ao_maxdata = 0xffff,
- .ao_fifo_depth = 2048,
- .ao_range_table = &range_ni_E_ao_ext,
- .ao_speed = 10000,
- .caldac = { dac8800, dac8043, ad8522 },
- },
- [BOARD_PCI6036E] = {
- .name = "pci-6036e",
- .n_adchan = 16,
- .ai_maxdata = 0xffff,
- .ai_fifo_depth = 512,
- .alwaysdither = 1,
- .gainlkup = ai_gain_4,
- .ai_speed = 5000,
- .n_aochan = 2,
- .ao_maxdata = 0xffff,
- .ao_range_table = &range_bipolar10,
- .ao_speed = 100000,
- .caldac = { ad8804_debug },
- },
- [BOARD_PCI6220] = {
- .name = "pci-6220",
- .n_adchan = 16,
- .ai_maxdata = 0xffff,
- .ai_fifo_depth = 512, /* FIXME: guess */
- .gainlkup = ai_gain_622x,
- .ai_speed = 4000,
- .reg_type = ni_reg_622x,
- .caldac = { caldac_none },
- },
- [BOARD_PCI6221] = {
- .name = "pci-6221",
- .n_adchan = 16,
- .ai_maxdata = 0xffff,
- .ai_fifo_depth = 4095,
- .gainlkup = ai_gain_622x,
- .ai_speed = 4000,
- .n_aochan = 2,
- .ao_maxdata = 0xffff,
- .ao_fifo_depth = 8191,
- .ao_range_table = &range_bipolar10,
- .reg_type = ni_reg_622x,
- .ao_speed = 1200,
- .caldac = { caldac_none },
- },
- [BOARD_PCI6221_37PIN] = {
- .name = "pci-6221_37pin",
- .n_adchan = 16,
- .ai_maxdata = 0xffff,
- .ai_fifo_depth = 4095,
- .gainlkup = ai_gain_622x,
- .ai_speed = 4000,
- .n_aochan = 2,
- .ao_maxdata = 0xffff,
- .ao_fifo_depth = 8191,
- .ao_range_table = &range_bipolar10,
- .reg_type = ni_reg_622x,
- .ao_speed = 1200,
- .caldac = { caldac_none },
- },
- [BOARD_PCI6224] = {
- .name = "pci-6224",
- .n_adchan = 32,
- .ai_maxdata = 0xffff,
- .ai_fifo_depth = 4095,
- .gainlkup = ai_gain_622x,
- .ai_speed = 4000,
- .reg_type = ni_reg_622x,
- .has_32dio_chan = 1,
- .caldac = { caldac_none },
- },
- [BOARD_PXI6224] = {
- .name = "pxi-6224",
- .n_adchan = 32,
- .ai_maxdata = 0xffff,
- .ai_fifo_depth = 4095,
- .gainlkup = ai_gain_622x,
- .ai_speed = 4000,
- .reg_type = ni_reg_622x,
- .has_32dio_chan = 1,
- .caldac = { caldac_none },
- },
- [BOARD_PCI6225] = {
- .name = "pci-6225",
- .n_adchan = 80,
- .ai_maxdata = 0xffff,
- .ai_fifo_depth = 4095,
- .gainlkup = ai_gain_622x,
- .ai_speed = 4000,
- .n_aochan = 2,
- .ao_maxdata = 0xffff,
- .ao_fifo_depth = 8191,
- .ao_range_table = &range_bipolar10,
- .reg_type = ni_reg_622x,
- .ao_speed = 1200,
- .has_32dio_chan = 1,
- .caldac = { caldac_none },
- },
- [BOARD_PXI6225] = {
- .name = "pxi-6225",
- .n_adchan = 80,
- .ai_maxdata = 0xffff,
- .ai_fifo_depth = 4095,
- .gainlkup = ai_gain_622x,
- .ai_speed = 4000,
- .n_aochan = 2,
- .ao_maxdata = 0xffff,
- .ao_fifo_depth = 8191,
- .ao_range_table = &range_bipolar10,
- .reg_type = ni_reg_622x,
- .ao_speed = 1200,
- .has_32dio_chan = 1,
- .caldac = { caldac_none },
- },
- [BOARD_PCI6229] = {
- .name = "pci-6229",
- .n_adchan = 32,
- .ai_maxdata = 0xffff,
- .ai_fifo_depth = 4095,
- .gainlkup = ai_gain_622x,
- .ai_speed = 4000,
- .n_aochan = 4,
- .ao_maxdata = 0xffff,
- .ao_fifo_depth = 8191,
- .ao_range_table = &range_bipolar10,
- .reg_type = ni_reg_622x,
- .ao_speed = 1200,
- .has_32dio_chan = 1,
- .caldac = { caldac_none },
- },
- [BOARD_PCI6250] = {
- .name = "pci-6250",
- .n_adchan = 16,
- .ai_maxdata = 0xffff,
- .ai_fifo_depth = 4095,
- .gainlkup = ai_gain_628x,
- .ai_speed = 800,
- .reg_type = ni_reg_625x,
- .caldac = { caldac_none },
- },
- [BOARD_PCI6251] = {
- .name = "pci-6251",
- .n_adchan = 16,
- .ai_maxdata = 0xffff,
- .ai_fifo_depth = 4095,
- .gainlkup = ai_gain_628x,
- .ai_speed = 800,
- .n_aochan = 2,
- .ao_maxdata = 0xffff,
- .ao_fifo_depth = 8191,
- .ao_range_table = &range_ni_M_625x_ao,
- .reg_type = ni_reg_625x,
- .ao_speed = 350,
- .caldac = { caldac_none },
- },
- [BOARD_PCIE6251] = {
- .name = "pcie-6251",
- .n_adchan = 16,
- .ai_maxdata = 0xffff,
- .ai_fifo_depth = 4095,
- .gainlkup = ai_gain_628x,
- .ai_speed = 800,
- .n_aochan = 2,
- .ao_maxdata = 0xffff,
- .ao_fifo_depth = 8191,
- .ao_range_table = &range_ni_M_625x_ao,
- .reg_type = ni_reg_625x,
- .ao_speed = 350,
- .caldac = { caldac_none },
- },
- [BOARD_PXIE6251] = {
- .name = "pxie-6251",
- .n_adchan = 16,
- .ai_maxdata = 0xffff,
- .ai_fifo_depth = 4095,
- .gainlkup = ai_gain_628x,
- .ai_speed = 800,
- .n_aochan = 2,
- .ao_maxdata = 0xffff,
- .ao_fifo_depth = 8191,
- .ao_range_table = &range_ni_M_625x_ao,
- .reg_type = ni_reg_625x,
- .ao_speed = 350,
- .caldac = { caldac_none },
- },
- [BOARD_PCI6254] = {
- .name = "pci-6254",
- .n_adchan = 32,
- .ai_maxdata = 0xffff,
- .ai_fifo_depth = 4095,
- .gainlkup = ai_gain_628x,
- .ai_speed = 800,
- .reg_type = ni_reg_625x,
- .has_32dio_chan = 1,
- .caldac = { caldac_none },
- },
- [BOARD_PCI6259] = {
- .name = "pci-6259",
- .n_adchan = 32,
- .ai_maxdata = 0xffff,
- .ai_fifo_depth = 4095,
- .gainlkup = ai_gain_628x,
- .ai_speed = 800,
- .n_aochan = 4,
- .ao_maxdata = 0xffff,
- .ao_fifo_depth = 8191,
- .ao_range_table = &range_ni_M_625x_ao,
- .reg_type = ni_reg_625x,
- .ao_speed = 350,
- .has_32dio_chan = 1,
- .caldac = { caldac_none },
- },
- [BOARD_PCIE6259] = {
- .name = "pcie-6259",
- .n_adchan = 32,
- .ai_maxdata = 0xffff,
- .ai_fifo_depth = 4095,
- .gainlkup = ai_gain_628x,
- .ai_speed = 800,
- .n_aochan = 4,
- .ao_maxdata = 0xffff,
- .ao_fifo_depth = 8191,
- .ao_range_table = &range_ni_M_625x_ao,
- .reg_type = ni_reg_625x,
- .ao_speed = 350,
- .has_32dio_chan = 1,
- .caldac = { caldac_none },
- },
- [BOARD_PCI6280] = {
- .name = "pci-6280",
- .n_adchan = 16,
- .ai_maxdata = 0x3ffff,
- .ai_fifo_depth = 2047,
- .gainlkup = ai_gain_628x,
- .ai_speed = 1600,
- .ao_fifo_depth = 8191,
- .reg_type = ni_reg_628x,
- .caldac = { caldac_none },
- },
- [BOARD_PCI6281] = {
- .name = "pci-6281",
- .n_adchan = 16,
- .ai_maxdata = 0x3ffff,
- .ai_fifo_depth = 2047,
- .gainlkup = ai_gain_628x,
- .ai_speed = 1600,
- .n_aochan = 2,
- .ao_maxdata = 0xffff,
- .ao_fifo_depth = 8191,
- .ao_range_table = &range_ni_M_628x_ao,
- .reg_type = ni_reg_628x,
- .ao_speed = 350,
- .caldac = { caldac_none },
- },
- [BOARD_PXI6281] = {
- .name = "pxi-6281",
- .n_adchan = 16,
- .ai_maxdata = 0x3ffff,
- .ai_fifo_depth = 2047,
- .gainlkup = ai_gain_628x,
- .ai_speed = 1600,
- .n_aochan = 2,
- .ao_maxdata = 0xffff,
- .ao_fifo_depth = 8191,
- .ao_range_table = &range_ni_M_628x_ao,
- .reg_type = ni_reg_628x,
- .ao_speed = 350,
- .caldac = { caldac_none },
- },
- [BOARD_PCI6284] = {
- .name = "pci-6284",
- .n_adchan = 32,
- .ai_maxdata = 0x3ffff,
- .ai_fifo_depth = 2047,
- .gainlkup = ai_gain_628x,
- .ai_speed = 1600,
- .reg_type = ni_reg_628x,
- .has_32dio_chan = 1,
- .caldac = { caldac_none },
- },
- [BOARD_PCI6289] = {
- .name = "pci-6289",
- .n_adchan = 32,
- .ai_maxdata = 0x3ffff,
- .ai_fifo_depth = 2047,
- .gainlkup = ai_gain_628x,
- .ai_speed = 1600,
- .n_aochan = 4,
- .ao_maxdata = 0xffff,
- .ao_fifo_depth = 8191,
- .ao_range_table = &range_ni_M_628x_ao,
- .reg_type = ni_reg_628x,
- .ao_speed = 350,
- .has_32dio_chan = 1,
- .caldac = { caldac_none },
- },
- [BOARD_PCI6143] = {
- .name = "pci-6143",
- .n_adchan = 8,
- .ai_maxdata = 0xffff,
- .ai_fifo_depth = 1024,
- .gainlkup = ai_gain_6143,
- .ai_speed = 4000,
- .reg_type = ni_reg_6143,
- .caldac = { ad8804_debug, ad8804_debug },
- },
- [BOARD_PXI6143] = {
- .name = "pxi-6143",
- .n_adchan = 8,
- .ai_maxdata = 0xffff,
- .ai_fifo_depth = 1024,
- .gainlkup = ai_gain_6143,
- .ai_speed = 4000,
- .reg_type = ni_reg_6143,
- .caldac = { ad8804_debug, ad8804_debug },
- },
-};
-
-#include "ni_mio_common.c"
-
-static int pcimio_ai_change(struct comedi_device *dev,
- struct comedi_subdevice *s)
-{
- struct ni_private *devpriv = dev->private;
- int ret;
-
- ret = mite_buf_change(devpriv->ai_mite_ring, s);
- if (ret < 0)
- return ret;
-
- return 0;
-}
-
-static int pcimio_ao_change(struct comedi_device *dev,
- struct comedi_subdevice *s)
-{
- struct ni_private *devpriv = dev->private;
- int ret;
-
- ret = mite_buf_change(devpriv->ao_mite_ring, s);
- if (ret < 0)
- return ret;
-
- return 0;
-}
-
-static int pcimio_gpct0_change(struct comedi_device *dev,
- struct comedi_subdevice *s)
-{
- struct ni_private *devpriv = dev->private;
- int ret;
-
- ret = mite_buf_change(devpriv->gpct_mite_ring[0], s);
- if (ret < 0)
- return ret;
-
- return 0;
-}
-
-static int pcimio_gpct1_change(struct comedi_device *dev,
- struct comedi_subdevice *s)
-{
- struct ni_private *devpriv = dev->private;
- int ret;
-
- ret = mite_buf_change(devpriv->gpct_mite_ring[1], s);
- if (ret < 0)
- return ret;
-
- return 0;
-}
-
-static int pcimio_dio_change(struct comedi_device *dev,
- struct comedi_subdevice *s)
-{
- struct ni_private *devpriv = dev->private;
- int ret;
-
- ret = mite_buf_change(devpriv->cdo_mite_ring, s);
- if (ret < 0)
- return ret;
-
- return 0;
-}
-
-static void m_series_init_eeprom_buffer(struct comedi_device *dev)
-{
- struct ni_private *devpriv = dev->private;
- static const int Start_Cal_EEPROM = 0x400;
- static const unsigned window_size = 10;
- static const int serial_number_eeprom_offset = 0x4;
- static const int serial_number_eeprom_length = 0x4;
- unsigned old_iodwbsr_bits;
- unsigned old_iodwbsr1_bits;
- unsigned old_iodwcr1_bits;
- int i;
-
- old_iodwbsr_bits = readl(devpriv->mite->mite_io_addr + MITE_IODWBSR);
- old_iodwbsr1_bits = readl(devpriv->mite->mite_io_addr + MITE_IODWBSR_1);
- old_iodwcr1_bits = readl(devpriv->mite->mite_io_addr + MITE_IODWCR_1);
- writel(0x0, devpriv->mite->mite_io_addr + MITE_IODWBSR);
- writel(((0x80 | window_size) | devpriv->mite->daq_phys_addr),
- devpriv->mite->mite_io_addr + MITE_IODWBSR_1);
- writel(0x1 | old_iodwcr1_bits,
- devpriv->mite->mite_io_addr + MITE_IODWCR_1);
- writel(0xf, devpriv->mite->mite_io_addr + 0x30);
-
- BUG_ON(serial_number_eeprom_length > sizeof(devpriv->serial_number));
- for (i = 0; i < serial_number_eeprom_length; ++i) {
- char *byte_ptr = (char *)&devpriv->serial_number + i;
- *byte_ptr = ni_readb(dev, serial_number_eeprom_offset + i);
- }
- devpriv->serial_number = be32_to_cpu(devpriv->serial_number);
-
- for (i = 0; i < M_SERIES_EEPROM_SIZE; ++i)
- devpriv->eeprom_buffer[i] = ni_readb(dev, Start_Cal_EEPROM + i);
-
- writel(old_iodwbsr1_bits, devpriv->mite->mite_io_addr + MITE_IODWBSR_1);
- writel(old_iodwbsr_bits, devpriv->mite->mite_io_addr + MITE_IODWBSR);
- writel(old_iodwcr1_bits, devpriv->mite->mite_io_addr + MITE_IODWCR_1);
- writel(0x0, devpriv->mite->mite_io_addr + 0x30);
-}
-
-static void init_6143(struct comedi_device *dev)
-{
- const struct ni_board_struct *board = dev->board_ptr;
- struct ni_private *devpriv = dev->private;
-
- /* Disable interrupts */
- ni_stc_writew(dev, 0, NISTC_INT_CTRL_REG);
-
- /* Initialise 6143 AI specific bits */
-
- /* Set G0,G1 DMA mode to E series version */
- ni_writeb(dev, 0x00, NI6143_MAGIC_REG);
- /* Set EOCMode, ADCMode and pipelinedelay */
- ni_writeb(dev, 0x80, NI6143_PIPELINE_DELAY_REG);
- /* Set EOC Delay */
- ni_writeb(dev, 0x00, NI6143_EOC_SET_REG);
-
- /* Set the FIFO half full level */
- ni_writel(dev, board->ai_fifo_depth / 2, NI6143_AI_FIFO_FLAG_REG);
-
- /* Strobe Relay disable bit */
- devpriv->ai_calib_source_enabled = 0;
- ni_writew(dev, devpriv->ai_calib_source | NI6143_CALIB_CHAN_RELAY_OFF,
- NI6143_CALIB_CHAN_REG);
- ni_writew(dev, devpriv->ai_calib_source, NI6143_CALIB_CHAN_REG);
-}
-
-static void pcimio_detach(struct comedi_device *dev)
-{
- struct ni_private *devpriv = dev->private;
-
- mio_common_detach(dev);
- if (dev->irq)
- free_irq(dev->irq, dev);
- if (devpriv) {
- mite_free_ring(devpriv->ai_mite_ring);
- mite_free_ring(devpriv->ao_mite_ring);
- mite_free_ring(devpriv->cdo_mite_ring);
- mite_free_ring(devpriv->gpct_mite_ring[0]);
- mite_free_ring(devpriv->gpct_mite_ring[1]);
- mite_detach(devpriv->mite);
- }
- if (dev->mmio)
- iounmap(dev->mmio);
- comedi_pci_disable(dev);
-}
-
-static int pcimio_auto_attach(struct comedi_device *dev,
- unsigned long context)
-{
- struct pci_dev *pcidev = comedi_to_pci_dev(dev);
- const struct ni_board_struct *board = NULL;
- struct ni_private *devpriv;
- unsigned int irq;
- int ret;
-
- if (context < ARRAY_SIZE(ni_boards))
- board = &ni_boards[context];
- if (!board)
- return -ENODEV;
- dev->board_ptr = board;
- dev->board_name = board->name;
-
- ret = comedi_pci_enable(dev);
- if (ret)
- return ret;
-
- ret = ni_alloc_private(dev);
- if (ret)
- return ret;
- devpriv = dev->private;
-
- devpriv->mite = mite_alloc(pcidev);
- if (!devpriv->mite)
- return -ENOMEM;
-
- if (board->reg_type & ni_reg_m_series_mask)
- devpriv->is_m_series = 1;
- if (board->reg_type & ni_reg_6xxx_mask)
- devpriv->is_6xxx = 1;
- if (board->reg_type == ni_reg_611x)
- devpriv->is_611x = 1;
- if (board->reg_type == ni_reg_6143)
- devpriv->is_6143 = 1;
- if (board->reg_type == ni_reg_622x)
- devpriv->is_622x = 1;
- if (board->reg_type == ni_reg_625x)
- devpriv->is_625x = 1;
- if (board->reg_type == ni_reg_628x)
- devpriv->is_628x = 1;
- if (board->reg_type & ni_reg_67xx_mask)
- devpriv->is_67xx = 1;
- if (board->reg_type == ni_reg_6711)
- devpriv->is_6711 = 1;
- if (board->reg_type == ni_reg_6713)
- devpriv->is_6713 = 1;
-
- ret = mite_setup(dev, devpriv->mite);
- if (ret < 0)
- return ret;
-
- devpriv->ai_mite_ring = mite_alloc_ring(devpriv->mite);
- if (!devpriv->ai_mite_ring)
- return -ENOMEM;
- devpriv->ao_mite_ring = mite_alloc_ring(devpriv->mite);
- if (!devpriv->ao_mite_ring)
- return -ENOMEM;
- devpriv->cdo_mite_ring = mite_alloc_ring(devpriv->mite);
- if (!devpriv->cdo_mite_ring)
- return -ENOMEM;
- devpriv->gpct_mite_ring[0] = mite_alloc_ring(devpriv->mite);
- if (!devpriv->gpct_mite_ring[0])
- return -ENOMEM;
- devpriv->gpct_mite_ring[1] = mite_alloc_ring(devpriv->mite);
- if (!devpriv->gpct_mite_ring[1])
- return -ENOMEM;
-
- if (devpriv->is_m_series)
- m_series_init_eeprom_buffer(dev);
- if (devpriv->is_6143)
- init_6143(dev);
-
- irq = pcidev->irq;
- if (irq) {
- ret = request_irq(irq, ni_E_interrupt, IRQF_SHARED,
- dev->board_name, dev);
- if (ret == 0)
- dev->irq = irq;
- }
-
- ret = ni_E_init(dev, 0, 1);
- if (ret < 0)
- return ret;
-
- dev->subdevices[NI_AI_SUBDEV].buf_change = &pcimio_ai_change;
- dev->subdevices[NI_AO_SUBDEV].buf_change = &pcimio_ao_change;
- dev->subdevices[NI_GPCT_SUBDEV(0)].buf_change = &pcimio_gpct0_change;
- dev->subdevices[NI_GPCT_SUBDEV(1)].buf_change = &pcimio_gpct1_change;
- dev->subdevices[NI_DIO_SUBDEV].buf_change = &pcimio_dio_change;
-
- return 0;
-}
-
-static struct comedi_driver ni_pcimio_driver = {
- .driver_name = "ni_pcimio",
- .module = THIS_MODULE,
- .auto_attach = pcimio_auto_attach,
- .detach = pcimio_detach,
-};
-
-static int ni_pcimio_pci_probe(struct pci_dev *dev,
- const struct pci_device_id *id)
-{
- return comedi_pci_auto_config(dev, &ni_pcimio_driver, id->driver_data);
-}
-
-static const struct pci_device_id ni_pcimio_pci_table[] = {
- { PCI_VDEVICE(NI, 0x0162), BOARD_PCIMIO_16XE_50 }, /* 0x1620? */
- { PCI_VDEVICE(NI, 0x1170), BOARD_PCIMIO_16XE_10 },
- { PCI_VDEVICE(NI, 0x1180), BOARD_PCIMIO_16E_1 },
- { PCI_VDEVICE(NI, 0x1190), BOARD_PCIMIO_16E_4 },
- { PCI_VDEVICE(NI, 0x11b0), BOARD_PXI6070E },
- { PCI_VDEVICE(NI, 0x11c0), BOARD_PXI6040E },
- { PCI_VDEVICE(NI, 0x11d0), BOARD_PXI6030E },
- { PCI_VDEVICE(NI, 0x1270), BOARD_PCI6032E },
- { PCI_VDEVICE(NI, 0x1330), BOARD_PCI6031E },
- { PCI_VDEVICE(NI, 0x1340), BOARD_PCI6033E },
- { PCI_VDEVICE(NI, 0x1350), BOARD_PCI6071E },
- { PCI_VDEVICE(NI, 0x14e0), BOARD_PCI6110 },
- { PCI_VDEVICE(NI, 0x14f0), BOARD_PCI6111 },
- { PCI_VDEVICE(NI, 0x1580), BOARD_PXI6031E },
- { PCI_VDEVICE(NI, 0x15b0), BOARD_PXI6071E },
- { PCI_VDEVICE(NI, 0x1880), BOARD_PCI6711 },
- { PCI_VDEVICE(NI, 0x1870), BOARD_PCI6713 },
- { PCI_VDEVICE(NI, 0x18b0), BOARD_PCI6052E },
- { PCI_VDEVICE(NI, 0x18c0), BOARD_PXI6052E },
- { PCI_VDEVICE(NI, 0x2410), BOARD_PCI6733 },
- { PCI_VDEVICE(NI, 0x2420), BOARD_PXI6733 },
- { PCI_VDEVICE(NI, 0x2430), BOARD_PCI6731 },
- { PCI_VDEVICE(NI, 0x2890), BOARD_PCI6036E },
- { PCI_VDEVICE(NI, 0x28c0), BOARD_PCI6014 },
- { PCI_VDEVICE(NI, 0x2a60), BOARD_PCI6023E },
- { PCI_VDEVICE(NI, 0x2a70), BOARD_PCI6024E },
- { PCI_VDEVICE(NI, 0x2a80), BOARD_PCI6025E },
- { PCI_VDEVICE(NI, 0x2ab0), BOARD_PXI6025E },
- { PCI_VDEVICE(NI, 0x2b80), BOARD_PXI6713 },
- { PCI_VDEVICE(NI, 0x2b90), BOARD_PXI6711 },
- { PCI_VDEVICE(NI, 0x2c80), BOARD_PCI6035E },
- { PCI_VDEVICE(NI, 0x2ca0), BOARD_PCI6034E },
- { PCI_VDEVICE(NI, 0x70aa), BOARD_PCI6229 },
- { PCI_VDEVICE(NI, 0x70ab), BOARD_PCI6259 },
- { PCI_VDEVICE(NI, 0x70ac), BOARD_PCI6289 },
- { PCI_VDEVICE(NI, 0x70af), BOARD_PCI6221 },
- { PCI_VDEVICE(NI, 0x70b0), BOARD_PCI6220 },
- { PCI_VDEVICE(NI, 0x70b4), BOARD_PCI6250 },
- { PCI_VDEVICE(NI, 0x70b6), BOARD_PCI6280 },
- { PCI_VDEVICE(NI, 0x70b7), BOARD_PCI6254 },
- { PCI_VDEVICE(NI, 0x70b8), BOARD_PCI6251 },
- { PCI_VDEVICE(NI, 0x70bc), BOARD_PCI6284 },
- { PCI_VDEVICE(NI, 0x70bd), BOARD_PCI6281 },
- { PCI_VDEVICE(NI, 0x70bf), BOARD_PXI6281 },
- { PCI_VDEVICE(NI, 0x70c0), BOARD_PCI6143 },
- { PCI_VDEVICE(NI, 0x70f2), BOARD_PCI6224 },
- { PCI_VDEVICE(NI, 0x70f3), BOARD_PXI6224 },
- { PCI_VDEVICE(NI, 0x710d), BOARD_PXI6143 },
- { PCI_VDEVICE(NI, 0x716c), BOARD_PCI6225 },
- { PCI_VDEVICE(NI, 0x716d), BOARD_PXI6225 },
- { PCI_VDEVICE(NI, 0x717f), BOARD_PCIE6259 },
- { PCI_VDEVICE(NI, 0x71bc), BOARD_PCI6221_37PIN },
- { PCI_VDEVICE(NI, 0x717d), BOARD_PCIE6251 },
- { PCI_VDEVICE(NI, 0x72e8), BOARD_PXIE6251 },
- { 0 }
-};
-MODULE_DEVICE_TABLE(pci, ni_pcimio_pci_table);
-
-static struct pci_driver ni_pcimio_pci_driver = {
- .name = "ni_pcimio",
- .id_table = ni_pcimio_pci_table,
- .probe = ni_pcimio_pci_probe,
- .remove = comedi_pci_auto_unconfig,
-};
-module_comedi_pci_driver(ni_pcimio_driver, ni_pcimio_pci_driver);
-
-MODULE_AUTHOR("Comedi http://www.comedi.org");
-MODULE_DESCRIPTION("Comedi low-level driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/ni_stc.h b/drivers/staging/comedi/drivers/ni_stc.h
deleted file mode 100644
index 1d5af25b92a8..000000000000
--- a/drivers/staging/comedi/drivers/ni_stc.h
+++ /dev/null
@@ -1,1062 +0,0 @@
-/*
- module/ni_stc.h
- Register descriptions for NI DAQ-STC chip
-
- COMEDI - Linux Control and Measurement Device Interface
- Copyright (C) 1998-9 David A. Schleef <ds@schleef.org>
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-*/
-
-/*
- References:
- DAQ-STC Technical Reference Manual
-*/
-
-#ifndef _COMEDI_NI_STC_H
-#define _COMEDI_NI_STC_H
-
-#include "ni_tio.h"
-
-/*
- * Registers in the National Instruments DAQ-STC chip
- */
-
-#define NISTC_INTA_ACK_REG 2
-#define NISTC_INTA_ACK_G0_GATE BIT(15)
-#define NISTC_INTA_ACK_G0_TC BIT(14)
-#define NISTC_INTA_ACK_AI_ERR BIT(13)
-#define NISTC_INTA_ACK_AI_STOP BIT(12)
-#define NISTC_INTA_ACK_AI_START BIT(11)
-#define NISTC_INTA_ACK_AI_START2 BIT(10)
-#define NISTC_INTA_ACK_AI_START1 BIT(9)
-#define NISTC_INTA_ACK_AI_SC_TC BIT(8)
-#define NISTC_INTA_ACK_AI_SC_TC_ERR BIT(7)
-#define NISTC_INTA_ACK_G0_TC_ERR BIT(6)
-#define NISTC_INTA_ACK_G0_GATE_ERR BIT(5)
-#define NISTC_INTA_ACK_AI_ALL (NISTC_INTA_ACK_AI_ERR | \
- NISTC_INTA_ACK_AI_STOP | \
- NISTC_INTA_ACK_AI_START | \
- NISTC_INTA_ACK_AI_START2 | \
- NISTC_INTA_ACK_AI_START1 | \
- NISTC_INTA_ACK_AI_SC_TC | \
- NISTC_INTA_ACK_AI_SC_TC_ERR)
-
-#define NISTC_INTB_ACK_REG 3
-#define NISTC_INTB_ACK_G1_GATE BIT(15)
-#define NISTC_INTB_ACK_G1_TC BIT(14)
-#define NISTC_INTB_ACK_AO_ERR BIT(13)
-#define NISTC_INTB_ACK_AO_STOP BIT(12)
-#define NISTC_INTB_ACK_AO_START BIT(11)
-#define NISTC_INTB_ACK_AO_UPDATE BIT(10)
-#define NISTC_INTB_ACK_AO_START1 BIT(9)
-#define NISTC_INTB_ACK_AO_BC_TC BIT(8)
-#define NISTC_INTB_ACK_AO_UC_TC BIT(7)
-#define NISTC_INTB_ACK_AO_UI2_TC BIT(6)
-#define NISTC_INTB_ACK_AO_UI2_TC_ERR BIT(5)
-#define NISTC_INTB_ACK_AO_BC_TC_ERR BIT(4)
-#define NISTC_INTB_ACK_AO_BC_TC_TRIG_ERR BIT(3)
-#define NISTC_INTB_ACK_G1_TC_ERR BIT(2)
-#define NISTC_INTB_ACK_G1_GATE_ERR BIT(1)
-#define NISTC_INTB_ACK_AO_ALL (NISTC_INTB_ACK_AO_ERR | \
- NISTC_INTB_ACK_AO_STOP | \
- NISTC_INTB_ACK_AO_START | \
- NISTC_INTB_ACK_AO_UPDATE | \
- NISTC_INTB_ACK_AO_START1 | \
- NISTC_INTB_ACK_AO_BC_TC | \
- NISTC_INTB_ACK_AO_UC_TC | \
- NISTC_INTB_ACK_AO_BC_TC_ERR | \
- NISTC_INTB_ACK_AO_BC_TC_TRIG_ERR)
-
-#define NISTC_AI_CMD2_REG 4
-#define NISTC_AI_CMD2_END_ON_SC_TC BIT(15)
-#define NISTC_AI_CMD2_END_ON_EOS BIT(14)
-#define NISTC_AI_CMD2_START1_DISABLE BIT(11)
-#define NISTC_AI_CMD2_SC_SAVE_TRACE BIT(10)
-#define NISTC_AI_CMD2_SI_SW_ON_SC_TC BIT(9)
-#define NISTC_AI_CMD2_SI_SW_ON_STOP BIT(8)
-#define NISTC_AI_CMD2_SI_SW_ON_TC BIT(7)
-#define NISTC_AI_CMD2_SC_SW_ON_TC BIT(4)
-#define NISTC_AI_CMD2_STOP_PULSE BIT(3)
-#define NISTC_AI_CMD2_START_PULSE BIT(2)
-#define NISTC_AI_CMD2_START2_PULSE BIT(1)
-#define NISTC_AI_CMD2_START1_PULSE BIT(0)
-
-#define NISTC_AO_CMD2_REG 5
-#define NISTC_AO_CMD2_END_ON_BC_TC(x) (((x) & 0x3) << 14)
-#define NISTC_AO_CMD2_START_STOP_GATE_ENA BIT(13)
-#define NISTC_AO_CMD2_UC_SAVE_TRACE BIT(12)
-#define NISTC_AO_CMD2_BC_GATE_ENA BIT(11)
-#define NISTC_AO_CMD2_BC_SAVE_TRACE BIT(10)
-#define NISTC_AO_CMD2_UI_SW_ON_BC_TC BIT(9)
-#define NISTC_AO_CMD2_UI_SW_ON_STOP BIT(8)
-#define NISTC_AO_CMD2_UI_SW_ON_TC BIT(7)
-#define NISTC_AO_CMD2_UC_SW_ON_BC_TC BIT(6)
-#define NISTC_AO_CMD2_UC_SW_ON_TC BIT(5)
-#define NISTC_AO_CMD2_BC_SW_ON_TC BIT(4)
-#define NISTC_AO_CMD2_MUTE_B BIT(3)
-#define NISTC_AO_CMD2_MUTE_A BIT(2)
-#define NISTC_AO_CMD2_UPDATE2_PULSE BIT(1)
-#define NISTC_AO_CMD2_START1_PULSE BIT(0)
-
-#define NISTC_G0_CMD_REG 6
-#define NISTC_G1_CMD_REG 7
-
-#define NISTC_AI_CMD1_REG 8
-#define NISTC_AI_CMD1_ATRIG_RESET BIT(14)
-#define NISTC_AI_CMD1_DISARM BIT(13)
-#define NISTC_AI_CMD1_SI2_ARM BIT(12)
-#define NISTC_AI_CMD1_SI2_LOAD BIT(11)
-#define NISTC_AI_CMD1_SI_ARM BIT(10)
-#define NISTC_AI_CMD1_SI_LOAD BIT(9)
-#define NISTC_AI_CMD1_DIV_ARM BIT(8)
-#define NISTC_AI_CMD1_DIV_LOAD BIT(7)
-#define NISTC_AI_CMD1_SC_ARM BIT(6)
-#define NISTC_AI_CMD1_SC_LOAD BIT(5)
-#define NISTC_AI_CMD1_SCAN_IN_PROG_PULSE BIT(4)
-#define NISTC_AI_CMD1_EXTMUX_CLK_PULSE BIT(3)
-#define NISTC_AI_CMD1_LOCALMUX_CLK_PULSE BIT(2)
-#define NISTC_AI_CMD1_SC_TC_PULSE BIT(1)
-#define NISTC_AI_CMD1_CONVERT_PULSE BIT(0)
-
-#define NISTC_AO_CMD1_REG 9
-#define NISTC_AO_CMD1_ATRIG_RESET BIT(15)
-#define NISTC_AO_CMD1_START_PULSE BIT(14)
-#define NISTC_AO_CMD1_DISARM BIT(13)
-#define NISTC_AO_CMD1_UI2_ARM_DISARM BIT(12)
-#define NISTC_AO_CMD1_UI2_LOAD BIT(11)
-#define NISTC_AO_CMD1_UI_ARM BIT(10)
-#define NISTC_AO_CMD1_UI_LOAD BIT(9)
-#define NISTC_AO_CMD1_UC_ARM BIT(8)
-#define NISTC_AO_CMD1_UC_LOAD BIT(7)
-#define NISTC_AO_CMD1_BC_ARM BIT(6)
-#define NISTC_AO_CMD1_BC_LOAD BIT(5)
-#define NISTC_AO_CMD1_DAC1_UPDATE_MODE BIT(4)
-#define NISTC_AO_CMD1_LDAC1_SRC_SEL BIT(3)
-#define NISTC_AO_CMD1_DAC0_UPDATE_MODE BIT(2)
-#define NISTC_AO_CMD1_LDAC0_SRC_SEL BIT(1)
-#define NISTC_AO_CMD1_UPDATE_PULSE BIT(0)
-
-#define NISTC_DIO_OUT_REG 10
-#define NISTC_DIO_OUT_SERIAL(x) (((x) & 0xff) << 8)
-#define NISTC_DIO_OUT_SERIAL_MASK NISTC_DIO_OUT_SERIAL(0xff)
-#define NISTC_DIO_OUT_PARALLEL(x) ((x) & 0xff)
-#define NISTC_DIO_OUT_PARALLEL_MASK NISTC_DIO_OUT_PARALLEL(0xff)
-#define NISTC_DIO_SDIN BIT(4)
-#define NISTC_DIO_SDOUT BIT(0)
-
-#define NISTC_DIO_CTRL_REG 11
-#define NISTC_DIO_SDCLK BIT(11)
-#define NISTC_DIO_CTRL_HW_SER_TIMEBASE BIT(10)
-#define NISTC_DIO_CTRL_HW_SER_ENA BIT(9)
-#define NISTC_DIO_CTRL_HW_SER_START BIT(8)
-#define NISTC_DIO_CTRL_DIR(x) ((x) & 0xff)
-#define NISTC_DIO_CTRL_DIR_MASK NISTC_DIO_CTRL_DIR(0xff)
-
-#define NISTC_AI_MODE1_REG 12
-#define NISTC_AI_MODE1_CONVERT_SRC(x) (((x) & 0x1f) << 11)
-#define NISTC_AI_MODE1_SI_SRC(x) (((x) & 0x1f) << 6)
-#define NISTC_AI_MODE1_CONVERT_POLARITY BIT(5)
-#define NISTC_AI_MODE1_SI_POLARITY BIT(4)
-#define NISTC_AI_MODE1_START_STOP BIT(3)
-#define NISTC_AI_MODE1_RSVD BIT(2)
-#define NISTC_AI_MODE1_CONTINUOUS BIT(1)
-#define NISTC_AI_MODE1_TRIGGER_ONCE BIT(0)
-
-#define NISTC_AI_MODE2_REG 13
-#define NISTC_AI_MODE2_SC_GATE_ENA BIT(15)
-#define NISTC_AI_MODE2_START_STOP_GATE_ENA BIT(14)
-#define NISTC_AI_MODE2_PRE_TRIGGER BIT(13)
-#define NISTC_AI_MODE2_EXTMUX_PRESENT BIT(12)
-#define NISTC_AI_MODE2_SI2_INIT_LOAD_SRC BIT(9)
-#define NISTC_AI_MODE2_SI2_RELOAD_MODE BIT(8)
-#define NISTC_AI_MODE2_SI_INIT_LOAD_SRC BIT(7)
-#define NISTC_AI_MODE2_SI_RELOAD_MODE(x) (((x) & 0x7) << 4)
-#define NISTC_AI_MODE2_SI_WR_SWITCH BIT(3)
-#define NISTC_AI_MODE2_SC_INIT_LOAD_SRC BIT(2)
-#define NISTC_AI_MODE2_SC_RELOAD_MODE BIT(1)
-#define NISTC_AI_MODE2_SC_WR_SWITCH BIT(0)
-
-#define NISTC_AI_SI_LOADA_REG 14
-#define NISTC_AI_SI_LOADB_REG 16
-#define NISTC_AI_SC_LOADA_REG 18
-#define NISTC_AI_SC_LOADB_REG 20
-#define NISTC_AI_SI2_LOADA_REG 23
-#define NISTC_AI_SI2_LOADB_REG 25
-
-#define NISTC_G0_MODE_REG 26
-#define NISTC_G1_MODE_REG 27
-#define NISTC_G0_LOADA_REG 28
-#define NISTC_G0_LOADB_REG 30
-#define NISTC_G1_LOADA_REG 32
-#define NISTC_G1_LOADB_REG 34
-#define NISTC_G0_INPUT_SEL_REG 36
-#define NISTC_G1_INPUT_SEL_REG 37
-
-#define NISTC_AO_MODE1_REG 38
-#define NISTC_AO_MODE1_UPDATE_SRC(x) (((x) & 0x1f) << 11)
-#define NISTC_AO_MODE1_UPDATE_SRC_MASK NISTC_AO_MODE1_UPDATE_SRC(0x1f)
-#define NISTC_AO_MODE1_UI_SRC(x) (((x) & 0x1f) << 6)
-#define NISTC_AO_MODE1_UI_SRC_MASK NISTC_AO_MODE1_UI_SRC(0x1f)
-#define NISTC_AO_MODE1_MULTI_CHAN BIT(5)
-#define NISTC_AO_MODE1_UPDATE_SRC_POLARITY BIT(4)
-#define NISTC_AO_MODE1_UI_SRC_POLARITY BIT(3)
-#define NISTC_AO_MODE1_UC_SW_EVERY_TC BIT(2)
-#define NISTC_AO_MODE1_CONTINUOUS BIT(1)
-#define NISTC_AO_MODE1_TRIGGER_ONCE BIT(0)
-
-#define NISTC_AO_MODE2_REG 39
-#define NISTC_AO_MODE2_FIFO_MODE(x) (((x) & 0x3) << 14)
-#define NISTC_AO_MODE2_FIFO_MODE_MASK NISTC_AO_MODE2_FIFO_MODE(3)
-#define NISTC_AO_MODE2_FIFO_MODE_E NISTC_AO_MODE2_FIFO_MODE(0)
-#define NISTC_AO_MODE2_FIFO_MODE_HF NISTC_AO_MODE2_FIFO_MODE(1)
-#define NISTC_AO_MODE2_FIFO_MODE_F NISTC_AO_MODE2_FIFO_MODE(2)
-#define NISTC_AO_MODE2_FIFO_MODE_HF_F NISTC_AO_MODE2_FIFO_MODE(3)
-#define NISTC_AO_MODE2_FIFO_REXMIT_ENA BIT(13)
-#define NISTC_AO_MODE2_START1_DISABLE BIT(12)
-#define NISTC_AO_MODE2_UC_INIT_LOAD_SRC BIT(11)
-#define NISTC_AO_MODE2_UC_WR_SWITCH BIT(10)
-#define NISTC_AO_MODE2_UI2_INIT_LOAD_SRC BIT(9)
-#define NISTC_AO_MODE2_UI2_RELOAD_MODE BIT(8)
-#define NISTC_AO_MODE2_UI_INIT_LOAD_SRC BIT(7)
-#define NISTC_AO_MODE2_UI_RELOAD_MODE(x) (((x) & 0x7) << 4)
-#define NISTC_AO_MODE2_UI_WR_SWITCH BIT(3)
-#define NISTC_AO_MODE2_BC_INIT_LOAD_SRC BIT(2)
-#define NISTC_AO_MODE2_BC_RELOAD_MODE BIT(1)
-#define NISTC_AO_MODE2_BC_WR_SWITCH BIT(0)
-
-#define NISTC_AO_UI_LOADA_REG 40
-#define NISTC_AO_UI_LOADB_REG 42
-#define NISTC_AO_BC_LOADA_REG 44
-#define NISTC_AO_BC_LOADB_REG 46
-#define NISTC_AO_UC_LOADA_REG 48
-#define NISTC_AO_UC_LOADB_REG 50
-
-#define NISTC_CLK_FOUT_REG 56
-#define NISTC_CLK_FOUT_ENA BIT(15)
-#define NISTC_CLK_FOUT_TIMEBASE_SEL BIT(14)
-#define NISTC_CLK_FOUT_DIO_SER_OUT_DIV2 BIT(13)
-#define NISTC_CLK_FOUT_SLOW_DIV2 BIT(12)
-#define NISTC_CLK_FOUT_SLOW_TIMEBASE BIT(11)
-#define NISTC_CLK_FOUT_G_SRC_DIV2 BIT(10)
-#define NISTC_CLK_FOUT_TO_BOARD_DIV2 BIT(9)
-#define NISTC_CLK_FOUT_TO_BOARD BIT(8)
-#define NISTC_CLK_FOUT_AI_OUT_DIV2 BIT(7)
-#define NISTC_CLK_FOUT_AI_SRC_DIV2 BIT(6)
-#define NISTC_CLK_FOUT_AO_OUT_DIV2 BIT(5)
-#define NISTC_CLK_FOUT_AO_SRC_DIV2 BIT(4)
-#define NISTC_CLK_FOUT_DIVIDER(x) (((x) & 0xf) << 0)
-#define NISTC_CLK_FOUT_TO_DIVIDER(x) (((x) >> 0) & 0xf)
-#define NISTC_CLK_FOUT_DIVIDER_MASK NISTC_CLK_FOUT_DIVIDER(0xf)
-
-#define NISTC_IO_BIDIR_PIN_REG 57
-
-#define NISTC_RTSI_TRIG_DIR_REG 58
-#define NISTC_RTSI_TRIG_OLD_CLK_CHAN 7
-#define NISTC_RTSI_TRIG_NUM_CHAN(_m) ((_m) ? 8 : 7)
-#define NISTC_RTSI_TRIG_DIR(_c, _m) ((_m) ? BIT(8 + (_c)) : BIT(7 + (_c)))
-#define NISTC_RTSI_TRIG_USE_CLK BIT(1)
-#define NISTC_RTSI_TRIG_DRV_CLK BIT(0)
-
-#define NISTC_INT_CTRL_REG 59
-#define NISTC_INT_CTRL_INTB_ENA BIT(15)
-#define NISTC_INT_CTRL_INTB_SEL(x) (((x) & 0x7) << 12)
-#define NISTC_INT_CTRL_INTA_ENA BIT(11)
-#define NISTC_INT_CTRL_INTA_SEL(x) (((x) & 0x7) << 8)
-#define NISTC_INT_CTRL_PASSTHRU0_POL BIT(3)
-#define NISTC_INT_CTRL_PASSTHRU1_POL BIT(2)
-#define NISTC_INT_CTRL_3PIN_INT BIT(1)
-#define NISTC_INT_CTRL_INT_POL BIT(0)
-
-#define NISTC_AI_OUT_CTRL_REG 60
-#define NISTC_AI_OUT_CTRL_START_SEL BIT(10)
-#define NISTC_AI_OUT_CTRL_SCAN_IN_PROG_SEL(x) (((x) & 0x3) << 8)
-#define NISTC_AI_OUT_CTRL_EXTMUX_CLK_SEL(x) (((x) & 0x3) << 6)
-#define NISTC_AI_OUT_CTRL_LOCALMUX_CLK_SEL(x) (((x) & 0x3) << 4)
-#define NISTC_AI_OUT_CTRL_SC_TC_SEL(x) (((x) & 0x3) << 2)
-#define NISTC_AI_OUT_CTRL_CONVERT_SEL(x) (((x) & 0x3) << 0)
-#define NISTC_AI_OUT_CTRL_CONVERT_HIGH_Z NISTC_AI_OUT_CTRL_CONVERT_SEL(0)
-#define NISTC_AI_OUT_CTRL_CONVERT_GND NISTC_AI_OUT_CTRL_CONVERT_SEL(1)
-#define NISTC_AI_OUT_CTRL_CONVERT_LOW NISTC_AI_OUT_CTRL_CONVERT_SEL(2)
-#define NISTC_AI_OUT_CTRL_CONVERT_HIGH NISTC_AI_OUT_CTRL_CONVERT_SEL(3)
-
-#define NISTC_ATRIG_ETC_REG 61
-#define NISTC_ATRIG_ETC_GPFO_1_ENA BIT(15)
-#define NISTC_ATRIG_ETC_GPFO_0_ENA BIT(14)
-#define NISTC_ATRIG_ETC_GPFO_0_SEL(x) (((x) & 0x3) << 11)
-#define NISTC_ATRIG_ETC_GPFO_1_SEL BIT(7)
-#define NISTC_ATRIG_ETC_DRV BIT(4)
-#define NISTC_ATRIG_ETC_ENA BIT(3)
-#define NISTC_ATRIG_ETC_MODE(x) (((x) & 0x7) << 0)
-
-#define NISTC_AI_START_STOP_REG 62
-#define NISTC_AI_START_POLARITY BIT(15)
-#define NISTC_AI_STOP_POLARITY BIT(14)
-#define NISTC_AI_STOP_SYNC BIT(13)
-#define NISTC_AI_STOP_EDGE BIT(12)
-#define NISTC_AI_STOP_SEL(x) (((x) & 0x1f) << 7)
-#define NISTC_AI_START_SYNC BIT(6)
-#define NISTC_AI_START_EDGE BIT(5)
-#define NISTC_AI_START_SEL(x) (((x) & 0x1f) << 0)
-
-#define NISTC_AI_TRIG_SEL_REG 63
-#define NISTC_AI_TRIG_START1_POLARITY BIT(15)
-#define NISTC_AI_TRIG_START2_POLARITY BIT(14)
-#define NISTC_AI_TRIG_START2_SYNC BIT(13)
-#define NISTC_AI_TRIG_START2_EDGE BIT(12)
-#define NISTC_AI_TRIG_START2_SEL(x) (((x) & 0x1f) << 7)
-#define NISTC_AI_TRIG_START1_SYNC BIT(6)
-#define NISTC_AI_TRIG_START1_EDGE BIT(5)
-#define NISTC_AI_TRIG_START1_SEL(x) (((x) & 0x1f) << 0)
-
-#define NISTC_AI_DIV_LOADA_REG 64
-
-#define NISTC_AO_START_SEL_REG 66
-#define NISTC_AO_START_UI2_SW_GATE BIT(15)
-#define NISTC_AO_START_UI2_EXT_GATE_POL BIT(14)
-#define NISTC_AO_START_POLARITY BIT(13)
-#define NISTC_AO_START_AOFREQ_ENA BIT(12)
-#define NISTC_AO_START_UI2_EXT_GATE_SEL(x) (((x) & 0x1f) << 7)
-#define NISTC_AO_START_SYNC BIT(6)
-#define NISTC_AO_START_EDGE BIT(5)
-#define NISTC_AO_START_SEL(x) (((x) & 0x1f) << 0)
-
-#define NISTC_AO_TRIG_SEL_REG 67
-#define NISTC_AO_TRIG_UI2_EXT_GATE_ENA BIT(15)
-#define NISTC_AO_TRIG_DELAYED_START1 BIT(14)
-#define NISTC_AO_TRIG_START1_POLARITY BIT(13)
-#define NISTC_AO_TRIG_UI2_SRC_POLARITY BIT(12)
-#define NISTC_AO_TRIG_UI2_SRC_SEL(x) (((x) & 0x1f) << 7)
-#define NISTC_AO_TRIG_START1_SYNC BIT(6)
-#define NISTC_AO_TRIG_START1_EDGE BIT(5)
-#define NISTC_AO_TRIG_START1_SEL(x) (((x) & 0x1f) << 0)
-#define NISTC_AO_TRIG_START1_SEL_MASK NISTC_AO_TRIG_START1_SEL(0x1f)
-
-#define NISTC_G0_AUTOINC_REG 68
-#define NISTC_G1_AUTOINC_REG 69
-
-#define NISTC_AO_MODE3_REG 70
-#define NISTC_AO_MODE3_UI2_SW_NEXT_TC BIT(13)
-#define NISTC_AO_MODE3_UC_SW_EVERY_BC_TC BIT(12)
-#define NISTC_AO_MODE3_TRIG_LEN BIT(11)
-#define NISTC_AO_MODE3_STOP_ON_OVERRUN_ERR BIT(5)
-#define NISTC_AO_MODE3_STOP_ON_BC_TC_TRIG_ERR BIT(4)
-#define NISTC_AO_MODE3_STOP_ON_BC_TC_ERR BIT(3)
-#define NISTC_AO_MODE3_NOT_AN_UPDATE BIT(2)
-#define NISTC_AO_MODE3_SW_GATE BIT(1)
-#define NISTC_AO_MODE3_LAST_GATE_DISABLE BIT(0) /* M-Series only */
-
-#define NISTC_RESET_REG 72
-#define NISTC_RESET_SOFTWARE BIT(11)
-#define NISTC_RESET_AO_CFG_END BIT(9)
-#define NISTC_RESET_AI_CFG_END BIT(8)
-#define NISTC_RESET_AO_CFG_START BIT(5)
-#define NISTC_RESET_AI_CFG_START BIT(4)
-#define NISTC_RESET_G1 BIT(3)
-#define NISTC_RESET_G0 BIT(2)
-#define NISTC_RESET_AO BIT(1)
-#define NISTC_RESET_AI BIT(0)
-
-#define NISTC_INTA_ENA_REG 73
-#define NISTC_INTA2_ENA_REG 74
-#define NISTC_INTA_ENA_PASSTHRU0 BIT(9)
-#define NISTC_INTA_ENA_G0_GATE BIT(8)
-#define NISTC_INTA_ENA_AI_FIFO BIT(7)
-#define NISTC_INTA_ENA_G0_TC BIT(6)
-#define NISTC_INTA_ENA_AI_ERR BIT(5)
-#define NISTC_INTA_ENA_AI_STOP BIT(4)
-#define NISTC_INTA_ENA_AI_START BIT(3)
-#define NISTC_INTA_ENA_AI_START2 BIT(2)
-#define NISTC_INTA_ENA_AI_START1 BIT(1)
-#define NISTC_INTA_ENA_AI_SC_TC BIT(0)
-#define NISTC_INTA_ENA_AI_MASK (NISTC_INTA_ENA_AI_FIFO | \
- NISTC_INTA_ENA_AI_ERR | \
- NISTC_INTA_ENA_AI_STOP | \
- NISTC_INTA_ENA_AI_START | \
- NISTC_INTA_ENA_AI_START2 | \
- NISTC_INTA_ENA_AI_START1 | \
- NISTC_INTA_ENA_AI_SC_TC)
-
-#define NISTC_INTB_ENA_REG 75
-#define NISTC_INTB2_ENA_REG 76
-#define NISTC_INTB_ENA_PASSTHRU1 BIT(11)
-#define NISTC_INTB_ENA_G1_GATE BIT(10)
-#define NISTC_INTB_ENA_G1_TC BIT(9)
-#define NISTC_INTB_ENA_AO_FIFO BIT(8)
-#define NISTC_INTB_ENA_AO_UI2_TC BIT(7)
-#define NISTC_INTB_ENA_AO_UC_TC BIT(6)
-#define NISTC_INTB_ENA_AO_ERR BIT(5)
-#define NISTC_INTB_ENA_AO_STOP BIT(4)
-#define NISTC_INTB_ENA_AO_START BIT(3)
-#define NISTC_INTB_ENA_AO_UPDATE BIT(2)
-#define NISTC_INTB_ENA_AO_START1 BIT(1)
-#define NISTC_INTB_ENA_AO_BC_TC BIT(0)
-
-#define NISTC_AI_PERSONAL_REG 77
-#define NISTC_AI_PERSONAL_SHIFTIN_PW BIT(15)
-#define NISTC_AI_PERSONAL_EOC_POLARITY BIT(14)
-#define NISTC_AI_PERSONAL_SOC_POLARITY BIT(13)
-#define NISTC_AI_PERSONAL_SHIFTIN_POL BIT(12)
-#define NISTC_AI_PERSONAL_CONVERT_TIMEBASE BIT(11)
-#define NISTC_AI_PERSONAL_CONVERT_PW BIT(10)
-#define NISTC_AI_PERSONAL_CONVERT_ORIG_PULSE BIT(9)
-#define NISTC_AI_PERSONAL_FIFO_FLAGS_POL BIT(8)
-#define NISTC_AI_PERSONAL_OVERRUN_MODE BIT(7)
-#define NISTC_AI_PERSONAL_EXTMUX_CLK_PW BIT(6)
-#define NISTC_AI_PERSONAL_LOCALMUX_CLK_PW BIT(5)
-#define NISTC_AI_PERSONAL_AIFREQ_POL BIT(4)
-
-#define NISTC_AO_PERSONAL_REG 78
-#define NISTC_AO_PERSONAL_MULTI_DACS BIT(15) /* M-Series only */
-#define NISTC_AO_PERSONAL_NUM_DAC BIT(14) /* 1:single; 0:dual */
-#define NISTC_AO_PERSONAL_FAST_CPU BIT(13) /* M-Series reserved */
-#define NISTC_AO_PERSONAL_TMRDACWR_PW BIT(12)
-#define NISTC_AO_PERSONAL_FIFO_FLAGS_POL BIT(11) /* M-Series reserved */
-#define NISTC_AO_PERSONAL_FIFO_ENA BIT(10)
-#define NISTC_AO_PERSONAL_AOFREQ_POL BIT(9) /* M-Series reserved */
-#define NISTC_AO_PERSONAL_DMA_PIO_CTRL BIT(8) /* M-Series reserved */
-#define NISTC_AO_PERSONAL_UPDATE_ORIG_PULSE BIT(7)
-#define NISTC_AO_PERSONAL_UPDATE_TIMEBASE BIT(6)
-#define NISTC_AO_PERSONAL_UPDATE_PW BIT(5)
-#define NISTC_AO_PERSONAL_BC_SRC_SEL BIT(4)
-#define NISTC_AO_PERSONAL_INTERVAL_BUFFER_MODE BIT(3)
-
-#define NISTC_RTSI_TRIGA_OUT_REG 79
-#define NISTC_RTSI_TRIGB_OUT_REG 80
-#define NISTC_RTSI_TRIGB_SUB_SEL1 BIT(15) /* not for M-Series */
-#define NISTC_RTSI_TRIG(_c, _s) (((_s) & 0xf) << (((_c) % 4) * 4))
-#define NISTC_RTSI_TRIG_MASK(_c) NISTC_RTSI_TRIG((_c), 0xf)
-#define NISTC_RTSI_TRIG_TO_SRC(_c, _b) (((_b) >> (((_c) % 4) * 4)) & 0xf)
-
-#define NISTC_RTSI_BOARD_REG 81
-
-#define NISTC_CFG_MEM_CLR_REG 82
-#define NISTC_ADC_FIFO_CLR_REG 83
-#define NISTC_DAC_FIFO_CLR_REG 84
-#define NISTC_WR_STROBE3_REG 85
-
-#define NISTC_AO_OUT_CTRL_REG 86
-#define NISTC_AO_OUT_CTRL_EXT_GATE_ENA BIT(15)
-#define NISTC_AO_OUT_CTRL_EXT_GATE_SEL(x) (((x) & 0x1f) << 10)
-#define NISTC_AO_OUT_CTRL_CHANS(x) (((x) & 0xf) << 6)
-#define NISTC_AO_OUT_CTRL_UPDATE2_SEL(x) (((x) & 0x3) << 4)
-#define NISTC_AO_OUT_CTRL_EXT_GATE_POL BIT(3)
-#define NISTC_AO_OUT_CTRL_UPDATE2_TOGGLE BIT(2)
-#define NISTC_AO_OUT_CTRL_UPDATE_SEL(x) (((x) & 0x3) << 0)
-#define NISTC_AO_OUT_CTRL_UPDATE_SEL_HIGHZ NISTC_AO_OUT_CTRL_UPDATE_SEL(0)
-#define NISTC_AO_OUT_CTRL_UPDATE_SEL_GND NISTC_AO_OUT_CTRL_UPDATE_SEL(1)
-#define NISTC_AO_OUT_CTRL_UPDATE_SEL_LOW NISTC_AO_OUT_CTRL_UPDATE_SEL(2)
-#define NISTC_AO_OUT_CTRL_UPDATE_SEL_HIGH NISTC_AO_OUT_CTRL_UPDATE_SEL(3)
-
-#define NISTC_AI_MODE3_REG 87
-#define NISTC_AI_MODE3_TRIG_LEN BIT(15)
-#define NISTC_AI_MODE3_DELAY_START BIT(14)
-#define NISTC_AI_MODE3_SOFTWARE_GATE BIT(13)
-#define NISTC_AI_MODE3_SI_TRIG_DELAY BIT(12)
-#define NISTC_AI_MODE3_SI2_SRC_SEL BIT(11)
-#define NISTC_AI_MODE3_DELAYED_START2 BIT(10)
-#define NISTC_AI_MODE3_DELAYED_START1 BIT(9)
-#define NISTC_AI_MODE3_EXT_GATE_MODE BIT(8)
-#define NISTC_AI_MODE3_FIFO_MODE(x) (((x) & 0x3) << 6)
-#define NISTC_AI_MODE3_FIFO_MODE_NE NISTC_AI_MODE3_FIFO_MODE(0)
-#define NISTC_AI_MODE3_FIFO_MODE_HF NISTC_AI_MODE3_FIFO_MODE(1)
-#define NISTC_AI_MODE3_FIFO_MODE_F NISTC_AI_MODE3_FIFO_MODE(2)
-#define NISTC_AI_MODE3_FIFO_MODE_HF_E NISTC_AI_MODE3_FIFO_MODE(3)
-#define NISTC_AI_MODE3_EXT_GATE_POL BIT(5)
-#define NISTC_AI_MODE3_EXT_GATE_SEL(x) (((x) & 0x1f) << 0)
-
-#define NISTC_AI_STATUS1_REG 2
-#define NISTC_AI_STATUS1_INTA BIT(15)
-#define NISTC_AI_STATUS1_FIFO_F BIT(14)
-#define NISTC_AI_STATUS1_FIFO_HF BIT(13)
-#define NISTC_AI_STATUS1_FIFO_E BIT(12)
-#define NISTC_AI_STATUS1_OVERRUN BIT(11)
-#define NISTC_AI_STATUS1_OVERFLOW BIT(10)
-#define NISTC_AI_STATUS1_SC_TC_ERR BIT(9)
-#define NISTC_AI_STATUS1_OVER (NISTC_AI_STATUS1_OVERRUN | \
- NISTC_AI_STATUS1_OVERFLOW)
-#define NISTC_AI_STATUS1_ERR (NISTC_AI_STATUS1_OVER | \
- NISTC_AI_STATUS1_SC_TC_ERR)
-#define NISTC_AI_STATUS1_START2 BIT(8)
-#define NISTC_AI_STATUS1_START1 BIT(7)
-#define NISTC_AI_STATUS1_SC_TC BIT(6)
-#define NISTC_AI_STATUS1_START BIT(5)
-#define NISTC_AI_STATUS1_STOP BIT(4)
-#define NISTC_AI_STATUS1_G0_TC BIT(3)
-#define NISTC_AI_STATUS1_G0_GATE BIT(2)
-#define NISTC_AI_STATUS1_FIFO_REQ BIT(1)
-#define NISTC_AI_STATUS1_PASSTHRU0 BIT(0)
-
-#define NISTC_AO_STATUS1_REG 3
-#define NISTC_AO_STATUS1_INTB BIT(15)
-#define NISTC_AO_STATUS1_FIFO_F BIT(14)
-#define NISTC_AO_STATUS1_FIFO_HF BIT(13)
-#define NISTC_AO_STATUS1_FIFO_E BIT(12)
-#define NISTC_AO_STATUS1_BC_TC_ERR BIT(11)
-#define NISTC_AO_STATUS1_START BIT(10)
-#define NISTC_AO_STATUS1_OVERRUN BIT(9)
-#define NISTC_AO_STATUS1_START1 BIT(8)
-#define NISTC_AO_STATUS1_BC_TC BIT(7)
-#define NISTC_AO_STATUS1_UC_TC BIT(6)
-#define NISTC_AO_STATUS1_UPDATE BIT(5)
-#define NISTC_AO_STATUS1_UI2_TC BIT(4)
-#define NISTC_AO_STATUS1_G1_TC BIT(3)
-#define NISTC_AO_STATUS1_G1_GATE BIT(2)
-#define NISTC_AO_STATUS1_FIFO_REQ BIT(1)
-#define NISTC_AO_STATUS1_PASSTHRU1 BIT(0)
-
-#define NISTC_G01_STATUS_REG 4
-
-#define NISTC_AI_STATUS2_REG 5
-
-#define NISTC_AO_STATUS2_REG 6
-
-#define NISTC_DIO_IN_REG 7
-
-#define NISTC_G0_HW_SAVE_REG 8
-#define NISTC_G1_HW_SAVE_REG 10
-
-#define NISTC_G0_SAVE_REG 12
-#define NISTC_G1_SAVE_REG 14
-
-#define NISTC_AO_UI_SAVE_REG 16
-#define NISTC_AO_BC_SAVE_REG 18
-#define NISTC_AO_UC_SAVE_REG 20
-
-#define NISTC_STATUS1_REG 27
-#define NISTC_STATUS1_SERIO_IN_PROG BIT(12)
-
-#define NISTC_DIO_SERIAL_IN_REG 28
-
-#define NISTC_STATUS2_REG 29
-#define NISTC_STATUS2_AO_TMRDACWRS_IN_PROGRESS BIT(5)
-
-#define NISTC_AI_SI_SAVE_REG 64
-#define NISTC_AI_SC_SAVE_REG 66
-
-/*
- * PCI E Series Registers
- */
-#define NI_E_STC_WINDOW_ADDR_REG 0x00 /* rw16 */
-#define NI_E_STC_WINDOW_DATA_REG 0x02 /* rw16 */
-
-#define NI_E_STATUS_REG 0x01 /* r8 */
-#define NI_E_STATUS_AI_FIFO_LOWER_NE BIT(3)
-#define NI_E_STATUS_PROMOUT BIT(0)
-
-#define NI_E_DMA_AI_AO_SEL_REG 0x09 /* w8 */
-#define NI_E_DMA_AI_SEL(x) (((x) & 0xf) << 0)
-#define NI_E_DMA_AI_SEL_MASK NI_E_DMA_AI_SEL(0xf)
-#define NI_E_DMA_AO_SEL(x) (((x) & 0xf) << 4)
-#define NI_E_DMA_AO_SEL_MASK NI_E_DMA_AO_SEL(0xf)
-
-#define NI_E_DMA_G0_G1_SEL_REG 0x0b /* w8 */
-#define NI_E_DMA_G0_G1_SEL(_g, _c) (((_c) & 0xf) << ((_g) * 4))
-#define NI_E_DMA_G0_G1_SEL_MASK(_g) NI_E_DMA_G0_G1_SEL((_g), 0xf)
-
-#define NI_E_SERIAL_CMD_REG 0x0d /* w8 */
-#define NI_E_SERIAL_CMD_DAC_LD(x) BIT(3 + (x))
-#define NI_E_SERIAL_CMD_EEPROM_CS BIT(2)
-#define NI_E_SERIAL_CMD_SDATA BIT(1)
-#define NI_E_SERIAL_CMD_SCLK BIT(0)
-
-#define NI_E_MISC_CMD_REG 0x0f /* w8 */
-#define NI_E_MISC_CMD_INTEXT_ATRIG(x) (((x) & 0x1) << 7)
-#define NI_E_MISC_CMD_EXT_ATRIG NI_E_MISC_CMD_INTEXT_ATRIG(0)
-#define NI_E_MISC_CMD_INT_ATRIG NI_E_MISC_CMD_INTEXT_ATRIG(1)
-
-#define NI_E_AI_CFG_LO_REG 0x10 /* w16 */
-#define NI_E_AI_CFG_LO_LAST_CHAN BIT(15)
-#define NI_E_AI_CFG_LO_GEN_TRIG BIT(12)
-#define NI_E_AI_CFG_LO_DITHER BIT(9)
-#define NI_E_AI_CFG_LO_UNI BIT(8)
-#define NI_E_AI_CFG_LO_GAIN(x) ((x) << 0)
-
-#define NI_E_AI_CFG_HI_REG 0x12 /* w16 */
-#define NI_E_AI_CFG_HI_TYPE(x) (((x) & 0x7) << 12)
-#define NI_E_AI_CFG_HI_TYPE_DIFF NI_E_AI_CFG_HI_TYPE(1)
-#define NI_E_AI_CFG_HI_TYPE_COMMON NI_E_AI_CFG_HI_TYPE(2)
-#define NI_E_AI_CFG_HI_TYPE_GROUND NI_E_AI_CFG_HI_TYPE(3)
-#define NI_E_AI_CFG_HI_AC_COUPLE BIT(11)
-#define NI_E_AI_CFG_HI_CHAN(x) (((x) & 0x3f) << 0)
-
-#define NI_E_AO_CFG_REG 0x16 /* w16 */
-#define NI_E_AO_DACSEL(x) ((x) << 8)
-#define NI_E_AO_GROUND_REF BIT(3)
-#define NI_E_AO_EXT_REF BIT(2)
-#define NI_E_AO_DEGLITCH BIT(1)
-#define NI_E_AO_CFG_BIP BIT(0)
-
-#define NI_E_DAC_DIRECT_DATA_REG(x) (0x18 + ((x) * 2)) /* w16 */
-
-#define NI_E_8255_BASE 0x19 /* rw8 */
-
-#define NI_E_AI_FIFO_DATA_REG 0x1c /* r16 */
-
-#define NI_E_AO_FIFO_DATA_REG 0x1e /* w16 */
-
-/*
- * 611x registers (these boards differ from the e-series)
- */
-#define NI611X_MAGIC_REG 0x19 /* w8 (new) */
-#define NI611X_CALIB_CHAN_SEL_REG 0x1a /* w16 (new) */
-#define NI611X_AI_FIFO_DATA_REG 0x1c /* r32 (incompatible) */
-#define NI611X_AI_FIFO_OFFSET_LOAD_REG 0x05 /* r8 (new) */
-#define NI611X_AO_FIFO_DATA_REG 0x14 /* w32 (incompatible) */
-#define NI611X_CAL_GAIN_SEL_REG 0x05 /* w8 (new) */
-
-#define NI611X_AO_WINDOW_ADDR_REG 0x18
-#define NI611X_AO_WINDOW_DATA_REG 0x1e
-
-/*
- * 6143 registers
- */
-#define NI6143_MAGIC_REG 0x19 /* w8 */
-#define NI6143_DMA_G0_G1_SEL_REG 0x0b /* w8 */
-#define NI6143_PIPELINE_DELAY_REG 0x1f /* w8 */
-#define NI6143_EOC_SET_REG 0x1d /* w8 */
-#define NI6143_DMA_AI_SEL_REG 0x09 /* w8 */
-#define NI6143_AI_FIFO_DATA_REG 0x8c /* r32 */
-#define NI6143_AI_FIFO_FLAG_REG 0x84 /* w32 */
-#define NI6143_AI_FIFO_CTRL_REG 0x88 /* w32 */
-#define NI6143_AI_FIFO_STATUS_REG 0x88 /* r32 */
-#define NI6143_AI_FIFO_DMA_THRESH_REG 0x90 /* w32 */
-#define NI6143_AI_FIFO_WORDS_AVAIL_REG 0x94 /* w32 */
-
-#define NI6143_CALIB_CHAN_REG 0x42 /* w16 */
-#define NI6143_CALIB_CHAN_RELAY_ON BIT(15)
-#define NI6143_CALIB_CHAN_RELAY_OFF BIT(14)
-#define NI6143_CALIB_CHAN(x) (((x) & 0xf) << 0)
-#define NI6143_CALIB_CHAN_GND_GND NI6143_CALIB_CHAN(0) /* Offset Cal */
-#define NI6143_CALIB_CHAN_2V5_GND NI6143_CALIB_CHAN(2) /* 2.5V ref */
-#define NI6143_CALIB_CHAN_PWM_GND NI6143_CALIB_CHAN(5) /* +-5V Self Cal */
-#define NI6143_CALIB_CHAN_2V5_PWM NI6143_CALIB_CHAN(10) /* PWM Cal */
-#define NI6143_CALIB_CHAN_PWM_PWM NI6143_CALIB_CHAN(13) /* CMRR */
-#define NI6143_CALIB_CHAN_GND_PWM NI6143_CALIB_CHAN(14) /* PWM Cal */
-#define NI6143_CALIB_LO_TIME_REG 0x20 /* w16 */
-#define NI6143_CALIB_HI_TIME_REG 0x22 /* w16 */
-#define NI6143_RELAY_COUNTER_LOAD_REG 0x4c /* w32 */
-#define NI6143_SIGNATURE_REG 0x50 /* w32 */
-#define NI6143_RELEASE_DATE_REG 0x54 /* w32 */
-#define NI6143_RELEASE_OLDEST_DATE_REG 0x58 /* w32 */
-
-/*
- * 671x, 611x windowed ao registers
- */
-#define NI671X_DAC_DIRECT_DATA_REG(x) (0x00 + (x)) /* w16 */
-#define NI611X_AO_TIMED_REG 0x10 /* w16 */
-#define NI671X_AO_IMMEDIATE_REG 0x11 /* w16 */
-#define NI611X_AO_FIFO_OFFSET_LOAD_REG 0x13 /* w32 */
-#define NI67XX_AO_SP_UPDATES_REG 0x14 /* w16 */
-#define NI611X_AO_WAVEFORM_GEN_REG 0x15 /* w16 */
-#define NI611X_AO_MISC_REG 0x16 /* w16 */
-#define NI611X_AO_MISC_CLEAR_WG BIT(0)
-#define NI67XX_AO_CAL_CHAN_SEL_REG 0x17 /* w16 */
-#define NI67XX_AO_CFG2_REG 0x18 /* w16 */
-#define NI67XX_CAL_CMD_REG 0x19 /* w16 */
-#define NI67XX_CAL_STATUS_REG 0x1a /* r8 */
-#define NI67XX_CAL_STATUS_BUSY BIT(0)
-#define NI67XX_CAL_STATUS_OSC_DETECT BIT(1)
-#define NI67XX_CAL_STATUS_OVERRANGE BIT(2)
-#define NI67XX_CAL_DATA_REG 0x1b /* r16 */
-#define NI67XX_CAL_CFG_HI_REG 0x1c /* rw16 */
-#define NI67XX_CAL_CFG_LO_REG 0x1d /* rw16 */
-
-#define CS5529_CMD_CB BIT(7)
-#define CS5529_CMD_SINGLE_CONV BIT(6)
-#define CS5529_CMD_CONT_CONV BIT(5)
-#define CS5529_CMD_READ BIT(4)
-#define CS5529_CMD_REG(x) (((x) & 0x7) << 1)
-#define CS5529_CMD_REG_MASK CS5529_CMD_REG(7)
-#define CS5529_CMD_PWR_SAVE BIT(0)
-
-#define CS5529_OFFSET_REG CS5529_CMD_REG(0)
-#define CS5529_GAIN_REG CS5529_CMD_REG(1)
-#define CS5529_CONV_DATA_REG CS5529_CMD_REG(3)
-#define CS5529_SETUP_REG CS5529_CMD_REG(4)
-
-#define CS5529_CFG_REG CS5529_CMD_REG(2)
-#define CS5529_CFG_AOUT(x) BIT(22 + (x))
-#define CS5529_CFG_DOUT(x) BIT(18 + (x))
-#define CS5529_CFG_LOW_PWR_MODE BIT(16)
-#define CS5529_CFG_WORD_RATE(x) (((x) & 0x7) << 13)
-#define CS5529_CFG_WORD_RATE_MASK CS5529_CFG_WORD_RATE(0x7)
-#define CS5529_CFG_WORD_RATE_2180 CS5529_CFG_WORD_RATE(0)
-#define CS5529_CFG_WORD_RATE_1092 CS5529_CFG_WORD_RATE(1)
-#define CS5529_CFG_WORD_RATE_532 CS5529_CFG_WORD_RATE(2)
-#define CS5529_CFG_WORD_RATE_388 CS5529_CFG_WORD_RATE(3)
-#define CS5529_CFG_WORD_RATE_324 CS5529_CFG_WORD_RATE(4)
-#define CS5529_CFG_WORD_RATE_17444 CS5529_CFG_WORD_RATE(5)
-#define CS5529_CFG_WORD_RATE_8724 CS5529_CFG_WORD_RATE(6)
-#define CS5529_CFG_WORD_RATE_4364 CS5529_CFG_WORD_RATE(7)
-#define CS5529_CFG_UNIPOLAR BIT(12)
-#define CS5529_CFG_RESET BIT(7)
-#define CS5529_CFG_RESET_VALID BIT(6)
-#define CS5529_CFG_PORT_FLAG BIT(5)
-#define CS5529_CFG_PWR_SAVE_SEL BIT(4)
-#define CS5529_CFG_DONE_FLAG BIT(3)
-#define CS5529_CFG_CALIB(x) (((x) & 0x7) << 0)
-#define CS5529_CFG_CALIB_NONE CS5529_CFG_CALIB(0)
-#define CS5529_CFG_CALIB_OFFSET_SELF CS5529_CFG_CALIB(1)
-#define CS5529_CFG_CALIB_GAIN_SELF CS5529_CFG_CALIB(2)
-#define CS5529_CFG_CALIB_BOTH_SELF CS5529_CFG_CALIB(3)
-#define CS5529_CFG_CALIB_OFFSET_SYS CS5529_CFG_CALIB(5)
-#define CS5529_CFG_CALIB_GAIN_SYS CS5529_CFG_CALIB(6)
-
-/*
- * M-Series specific registers not handled by the DAQ-STC and GPCT register
- * remapping.
- */
-#define NI_M_CDIO_DMA_SEL_REG 0x007
-#define NI_M_CDIO_DMA_SEL_CDO(x) (((x) & 0xf) << 4)
-#define NI_M_CDIO_DMA_SEL_CDO_MASK NI_M_CDIO_DMA_SEL_CDO(0xf)
-#define NI_M_CDIO_DMA_SEL_CDI(x) (((x) & 0xf) << 0)
-#define NI_M_CDIO_DMA_SEL_CDI_MASK NI_M_CDIO_DMA_SEL_CDI(0xf)
-#define NI_M_SCXI_STATUS_REG 0x007
-#define NI_M_AI_AO_SEL_REG 0x009
-#define NI_M_G0_G1_SEL_REG 0x00b
-#define NI_M_MISC_CMD_REG 0x00f
-#define NI_M_SCXI_SER_DO_REG 0x011
-#define NI_M_SCXI_CTRL_REG 0x013
-#define NI_M_SCXI_OUT_ENA_REG 0x015
-#define NI_M_AI_FIFO_DATA_REG 0x01c
-#define NI_M_DIO_REG 0x024
-#define NI_M_DIO_DIR_REG 0x028
-#define NI_M_CAL_PWM_REG 0x040
-#define NI_M_CAL_PWM_HIGH_TIME(x) (((x) & 0xffff) << 16)
-#define NI_M_CAL_PWM_LOW_TIME(x) (((x) & 0xffff) << 0)
-#define NI_M_GEN_PWM_REG(x) (0x044 + ((x) * 2))
-#define NI_M_AI_CFG_FIFO_DATA_REG 0x05e
-#define NI_M_AI_CFG_LAST_CHAN BIT(14)
-#define NI_M_AI_CFG_DITHER BIT(13)
-#define NI_M_AI_CFG_POLARITY BIT(12)
-#define NI_M_AI_CFG_GAIN(x) (((x) & 0x7) << 9)
-#define NI_M_AI_CFG_CHAN_TYPE(x) (((x) & 0x7) << 6)
-#define NI_M_AI_CFG_CHAN_TYPE_MASK NI_M_AI_CFG_CHAN_TYPE(7)
-#define NI_M_AI_CFG_CHAN_TYPE_CALIB NI_M_AI_CFG_CHAN_TYPE(0)
-#define NI_M_AI_CFG_CHAN_TYPE_DIFF NI_M_AI_CFG_CHAN_TYPE(1)
-#define NI_M_AI_CFG_CHAN_TYPE_COMMON NI_M_AI_CFG_CHAN_TYPE(2)
-#define NI_M_AI_CFG_CHAN_TYPE_GROUND NI_M_AI_CFG_CHAN_TYPE(3)
-#define NI_M_AI_CFG_CHAN_TYPE_AUX NI_M_AI_CFG_CHAN_TYPE(5)
-#define NI_M_AI_CFG_CHAN_TYPE_GHOST NI_M_AI_CFG_CHAN_TYPE(7)
-#define NI_M_AI_CFG_BANK_SEL(x) ((((x) & 0x40) << 4) | ((x) & 0x30))
-#define NI_M_AI_CFG_CHAN_SEL(x) (((x) & 0xf) << 0)
-#define NI_M_INTC_ENA_REG 0x088
-#define NI_M_INTC_ENA BIT(0)
-#define NI_M_INTC_STATUS_REG 0x088
-#define NI_M_INTC_STATUS BIT(0)
-#define NI_M_ATRIG_CTRL_REG 0x08c
-#define NI_M_AO_SER_INT_ENA_REG 0x0a0
-#define NI_M_AO_SER_INT_ACK_REG 0x0a1
-#define NI_M_AO_SER_INT_STATUS_REG 0x0a1
-#define NI_M_AO_CALIB_REG 0x0a3
-#define NI_M_AO_FIFO_DATA_REG 0x0a4
-#define NI_M_PFI_FILTER_REG 0x0b0
-#define NI_M_PFI_FILTER_SEL(_c, _f) (((_f) & 0x3) << ((_c) * 2))
-#define NI_M_PFI_FILTER_SEL_MASK(_c) NI_M_PFI_FILTER_SEL((_c), 0x3)
-#define NI_M_RTSI_FILTER_REG 0x0b4
-#define NI_M_SCXI_LEGACY_COMPAT_REG 0x0bc
-#define NI_M_DAC_DIRECT_DATA_REG(x) (0x0c0 + ((x) * 4))
-#define NI_M_AO_WAVEFORM_ORDER_REG(x) (0x0c2 + ((x) * 4))
-#define NI_M_AO_CFG_BANK_REG(x) (0x0c3 + ((x) * 4))
-#define NI_M_AO_CFG_BANK_BIPOLAR BIT(7)
-#define NI_M_AO_CFG_BANK_UPDATE_TIMED BIT(6)
-#define NI_M_AO_CFG_BANK_REF(x) (((x) & 0x7) << 3)
-#define NI_M_AO_CFG_BANK_REF_MASK NI_M_AO_CFG_BANK_REF(7)
-#define NI_M_AO_CFG_BANK_REF_INT_10V NI_M_AO_CFG_BANK_REF(0)
-#define NI_M_AO_CFG_BANK_REF_INT_5V NI_M_AO_CFG_BANK_REF(1)
-#define NI_M_AO_CFG_BANK_OFFSET(x) (((x) & 0x7) << 0)
-#define NI_M_AO_CFG_BANK_OFFSET_MASK NI_M_AO_CFG_BANK_OFFSET(7)
-#define NI_M_AO_CFG_BANK_OFFSET_0V NI_M_AO_CFG_BANK_OFFSET(0)
-#define NI_M_AO_CFG_BANK_OFFSET_5V NI_M_AO_CFG_BANK_OFFSET(1)
-#define NI_M_RTSI_SHARED_MUX_REG 0x1a2
-#define NI_M_CLK_FOUT2_REG 0x1c4
-#define NI_M_CLK_FOUT2_RTSI_10MHZ BIT(7)
-#define NI_M_CLK_FOUT2_TIMEBASE3_PLL BIT(6)
-#define NI_M_CLK_FOUT2_TIMEBASE1_PLL BIT(5)
-#define NI_M_CLK_FOUT2_PLL_SRC(x) (((x) & 0x1f) << 0)
-#define NI_M_CLK_FOUT2_PLL_SRC_MASK NI_M_CLK_FOUT2_PLL_SRC(0x1f)
-#define NI_M_MAX_RTSI_CHAN 7
-#define NI_M_CLK_FOUT2_PLL_SRC_RTSI(x) (((x) == NI_M_MAX_RTSI_CHAN) \
- ? NI_M_CLK_FOUT2_PLL_SRC(0x1b) \
- : NI_M_CLK_FOUT2_PLL_SRC(0xb + (x)))
-#define NI_M_CLK_FOUT2_PLL_SRC_STAR NI_M_CLK_FOUT2_PLL_SRC(0x14)
-#define NI_M_CLK_FOUT2_PLL_SRC_PXI10 NI_M_CLK_FOUT2_PLL_SRC(0x1d)
-#define NI_M_PLL_CTRL_REG 0x1c6
-#define NI_M_PLL_CTRL_VCO_MODE(x) (((x) & 0x3) << 13)
-#define NI_M_PLL_CTRL_VCO_MODE_200_325MHZ NI_M_PLL_CTRL_VCO_MODE(0)
-#define NI_M_PLL_CTRL_VCO_MODE_175_225MHZ NI_M_PLL_CTRL_VCO_MODE(1)
-#define NI_M_PLL_CTRL_VCO_MODE_100_225MHZ NI_M_PLL_CTRL_VCO_MODE(2)
-#define NI_M_PLL_CTRL_VCO_MODE_75_150MHZ NI_M_PLL_CTRL_VCO_MODE(3)
-#define NI_M_PLL_CTRL_ENA BIT(12)
-#define NI_M_PLL_MAX_DIVISOR 0x10
-#define NI_M_PLL_CTRL_DIVISOR(x) (((x) & 0xf) << 8)
-#define NI_M_PLL_MAX_MULTIPLIER 0x100
-#define NI_M_PLL_CTRL_MULTIPLIER(x) (((x) & 0xff) << 0)
-#define NI_M_PLL_STATUS_REG 0x1c8
-#define NI_M_PLL_STATUS_LOCKED BIT(0)
-#define NI_M_PFI_OUT_SEL_REG(x) (0x1d0 + ((x) * 2))
-#define NI_M_PFI_CHAN(_c) (((_c) % 3) * 5)
-#define NI_M_PFI_OUT_SEL(_c, _s) (((_s) & 0x1f) << NI_M_PFI_CHAN(_c))
-#define NI_M_PFI_OUT_SEL_MASK(_c) (0x1f << NI_M_PFI_CHAN(_c))
-#define NI_M_PFI_OUT_SEL_TO_SRC(_c, _b) (((_b) >> NI_M_PFI_CHAN(_c)) & 0x1f)
-#define NI_M_PFI_DI_REG 0x1dc
-#define NI_M_PFI_DO_REG 0x1de
-#define NI_M_CFG_BYPASS_FIFO_REG 0x218
-#define NI_M_CFG_BYPASS_FIFO BIT(31)
-#define NI_M_CFG_BYPASS_AI_POLARITY BIT(22)
-#define NI_M_CFG_BYPASS_AI_DITHER BIT(21)
-#define NI_M_CFG_BYPASS_AI_GAIN(x) (((x) & 0x7) << 18)
-#define NI_M_CFG_BYPASS_AO_CAL(x) (((x) & 0xf) << 15)
-#define NI_M_CFG_BYPASS_AO_CAL_MASK NI_M_CFG_BYPASS_AO_CAL(0xf)
-#define NI_M_CFG_BYPASS_AI_MODE_MUX(x) (((x) & 0x3) << 13)
-#define NI_M_CFG_BYPASS_AI_MODE_MUX_MASK NI_M_CFG_BYPASS_AI_MODE_MUX(3)
-#define NI_M_CFG_BYPASS_AI_CAL_NEG(x) (((x) & 0x7) << 10)
-#define NI_M_CFG_BYPASS_AI_CAL_NEG_MASK NI_M_CFG_BYPASS_AI_CAL_NEG(7)
-#define NI_M_CFG_BYPASS_AI_CAL_POS(x) (((x) & 0x7) << 7)
-#define NI_M_CFG_BYPASS_AI_CAL_POS_MASK NI_M_CFG_BYPASS_AI_CAL_POS(7)
-#define NI_M_CFG_BYPASS_AI_CAL_MASK (NI_M_CFG_BYPASS_AI_CAL_POS_MASK | \
- NI_M_CFG_BYPASS_AI_CAL_NEG_MASK | \
- NI_M_CFG_BYPASS_AI_MODE_MUX_MASK | \
- NI_M_CFG_BYPASS_AO_CAL_MASK)
-#define NI_M_CFG_BYPASS_AI_BANK(x) (((x) & 0xf) << 3)
-#define NI_M_CFG_BYPASS_AI_BANK_MASK NI_M_CFG_BYPASS_AI_BANK(0xf)
-#define NI_M_CFG_BYPASS_AI_CHAN(x) (((x) & 0x7) << 0)
-#define NI_M_CFG_BYPASS_AI_CHAN_MASK NI_M_CFG_BYPASS_AI_CHAN(7)
-#define NI_M_SCXI_DIO_ENA_REG 0x21c
-#define NI_M_CDI_FIFO_DATA_REG 0x220
-#define NI_M_CDO_FIFO_DATA_REG 0x220
-#define NI_M_CDIO_STATUS_REG 0x224
-#define NI_M_CDIO_STATUS_CDI_OVERFLOW BIT(20)
-#define NI_M_CDIO_STATUS_CDI_OVERRUN BIT(19)
-#define NI_M_CDIO_STATUS_CDI_ERROR (NI_M_CDIO_STATUS_CDI_OVERFLOW | \
- NI_M_CDIO_STATUS_CDI_OVERRUN)
-#define NI_M_CDIO_STATUS_CDI_FIFO_REQ BIT(18)
-#define NI_M_CDIO_STATUS_CDI_FIFO_FULL BIT(17)
-#define NI_M_CDIO_STATUS_CDI_FIFO_EMPTY BIT(16)
-#define NI_M_CDIO_STATUS_CDO_UNDERFLOW BIT(4)
-#define NI_M_CDIO_STATUS_CDO_OVERRUN BIT(3)
-#define NI_M_CDIO_STATUS_CDO_ERROR (NI_M_CDIO_STATUS_CDO_UNDERFLOW | \
- NI_M_CDIO_STATUS_CDO_OVERRUN)
-#define NI_M_CDIO_STATUS_CDO_FIFO_REQ BIT(2)
-#define NI_M_CDIO_STATUS_CDO_FIFO_FULL BIT(1)
-#define NI_M_CDIO_STATUS_CDO_FIFO_EMPTY BIT(0)
-#define NI_M_CDIO_CMD_REG 0x224
-#define NI_M_CDI_CMD_SW_UPDATE BIT(20)
-#define NI_M_CDO_CMD_SW_UPDATE BIT(19)
-#define NI_M_CDO_CMD_F_E_INT_ENA_CLR BIT(17)
-#define NI_M_CDO_CMD_F_E_INT_ENA_SET BIT(16)
-#define NI_M_CDI_CMD_ERR_INT_CONFIRM BIT(15)
-#define NI_M_CDO_CMD_ERR_INT_CONFIRM BIT(14)
-#define NI_M_CDI_CMD_F_REQ_INT_ENA_CLR BIT(13)
-#define NI_M_CDI_CMD_F_REQ_INT_ENA_SET BIT(12)
-#define NI_M_CDO_CMD_F_REQ_INT_ENA_CLR BIT(11)
-#define NI_M_CDO_CMD_F_REQ_INT_ENA_SET BIT(10)
-#define NI_M_CDI_CMD_ERR_INT_ENA_CLR BIT(9)
-#define NI_M_CDI_CMD_ERR_INT_ENA_SET BIT(8)
-#define NI_M_CDO_CMD_ERR_INT_ENA_CLR BIT(7)
-#define NI_M_CDO_CMD_ERR_INT_ENA_SET BIT(6)
-#define NI_M_CDI_CMD_RESET BIT(5)
-#define NI_M_CDO_CMD_RESET BIT(4)
-#define NI_M_CDI_CMD_ARM BIT(3)
-#define NI_M_CDI_CMD_DISARM BIT(2)
-#define NI_M_CDO_CMD_ARM BIT(1)
-#define NI_M_CDO_CMD_DISARM BIT(0)
-#define NI_M_CDI_MODE_REG 0x228
-#define NI_M_CDI_MODE_DATA_LANE(x) (((x) & 0x3) << 12)
-#define NI_M_CDI_MODE_DATA_LANE_MASK NI_M_CDI_MODE_DATA_LANE(3)
-#define NI_M_CDI_MODE_DATA_LANE_0_15 NI_M_CDI_MODE_DATA_LANE(0)
-#define NI_M_CDI_MODE_DATA_LANE_16_31 NI_M_CDI_MODE_DATA_LANE(1)
-#define NI_M_CDI_MODE_DATA_LANE_0_7 NI_M_CDI_MODE_DATA_LANE(0)
-#define NI_M_CDI_MODE_DATA_LANE_8_15 NI_M_CDI_MODE_DATA_LANE(1)
-#define NI_M_CDI_MODE_DATA_LANE_16_23 NI_M_CDI_MODE_DATA_LANE(2)
-#define NI_M_CDI_MODE_DATA_LANE_24_31 NI_M_CDI_MODE_DATA_LANE(3)
-#define NI_M_CDI_MODE_FIFO_MODE BIT(11)
-#define NI_M_CDI_MODE_POLARITY BIT(10)
-#define NI_M_CDI_MODE_HALT_ON_ERROR BIT(9)
-#define NI_M_CDI_MODE_SAMPLE_SRC(x) (((x) & 0x3f) << 0)
-#define NI_M_CDI_MODE_SAMPLE_SRC_MASK NI_M_CDI_MODE_SAMPLE_SRC(0x3f)
-#define NI_M_CDO_MODE_REG 0x22c
-#define NI_M_CDO_MODE_DATA_LANE(x) (((x) & 0x3) << 12)
-#define NI_M_CDO_MODE_DATA_LANE_MASK NI_M_CDO_MODE_DATA_LANE(3)
-#define NI_M_CDO_MODE_DATA_LANE_0_15 NI_M_CDO_MODE_DATA_LANE(0)
-#define NI_M_CDO_MODE_DATA_LANE_16_31 NI_M_CDO_MODE_DATA_LANE(1)
-#define NI_M_CDO_MODE_DATA_LANE_0_7 NI_M_CDO_MODE_DATA_LANE(0)
-#define NI_M_CDO_MODE_DATA_LANE_8_15 NI_M_CDO_MODE_DATA_LANE(1)
-#define NI_M_CDO_MODE_DATA_LANE_16_23 NI_M_CDO_MODE_DATA_LANE(2)
-#define NI_M_CDO_MODE_DATA_LANE_24_31 NI_M_CDO_MODE_DATA_LANE(3)
-#define NI_M_CDO_MODE_FIFO_MODE BIT(11)
-#define NI_M_CDO_MODE_POLARITY BIT(10)
-#define NI_M_CDO_MODE_HALT_ON_ERROR BIT(9)
-#define NI_M_CDO_MODE_RETRANSMIT BIT(8)
-#define NI_M_CDO_MODE_SAMPLE_SRC(x) (((x) & 0x3f) << 0)
-#define NI_M_CDO_MODE_SAMPLE_SRC_MASK NI_M_CDO_MODE_SAMPLE_SRC(0x3f)
-#define NI_M_CDI_MASK_ENA_REG 0x230
-#define NI_M_CDO_MASK_ENA_REG 0x234
-#define NI_M_STATIC_AI_CTRL_REG(x) ((x) ? (0x260 + (x)) : 0x064)
-#define NI_M_AO_REF_ATTENUATION_REG(x) (0x264 + (x))
-#define NI_M_AO_REF_ATTENUATION_X5 BIT(0)
-
-enum {
- ai_gain_16 = 0,
- ai_gain_8,
- ai_gain_14,
- ai_gain_4,
- ai_gain_611x,
- ai_gain_622x,
- ai_gain_628x,
- ai_gain_6143
-};
-
-enum caldac_enum {
- caldac_none = 0,
- mb88341,
- dac8800,
- dac8043,
- ad8522,
- ad8804,
- ad8842,
- ad8804_debug
-};
-
-enum ni_reg_type {
- ni_reg_normal = 0x0,
- ni_reg_611x = 0x1,
- ni_reg_6711 = 0x2,
- ni_reg_6713 = 0x4,
- ni_reg_67xx_mask = 0x6,
- ni_reg_6xxx_mask = 0x7,
- ni_reg_622x = 0x8,
- ni_reg_625x = 0x10,
- ni_reg_628x = 0x18,
- ni_reg_m_series_mask = 0x18,
- ni_reg_6143 = 0x20
-};
-
-struct ni_board_struct {
- const char *name;
- int device_id;
- int isapnp_id;
-
- int n_adchan;
- unsigned int ai_maxdata;
-
- int ai_fifo_depth;
- unsigned int alwaysdither:1;
- int gainlkup;
- int ai_speed;
-
- int n_aochan;
- unsigned int ao_maxdata;
- int ao_fifo_depth;
- const struct comedi_lrange *ao_range_table;
- unsigned ao_speed;
-
- int reg_type;
- unsigned int has_8255:1;
- unsigned int has_32dio_chan:1;
-
- enum caldac_enum caldac[3];
-};
-
-#define MAX_N_CALDACS 34
-#define MAX_N_AO_CHAN 8
-#define NUM_GPCT 2
-
-#define NUM_PFI_OUTPUT_SELECT_REGS 6
-
-#define M_SERIES_EEPROM_SIZE 1024
-
-struct ni_private {
- unsigned short dio_output;
- unsigned short dio_control;
- int aimode;
- unsigned int ai_calib_source;
- unsigned int ai_calib_source_enabled;
- /* protects access to windowed registers */
- spinlock_t window_lock;
- /* protects interrupt/dma register access */
- spinlock_t soft_reg_copy_lock;
- /* protects mite DMA channel request/release */
- spinlock_t mite_channel_lock;
-
- int changain_state;
- unsigned int changain_spec;
-
- unsigned int caldac_maxdata_list[MAX_N_CALDACS];
- unsigned short caldacs[MAX_N_CALDACS];
-
- unsigned short ai_cmd2;
-
- unsigned short ao_conf[MAX_N_AO_CHAN];
- unsigned short ao_mode1;
- unsigned short ao_mode2;
- unsigned short ao_mode3;
- unsigned short ao_cmd1;
- unsigned short ao_cmd2;
- unsigned short ao_trigger_select;
-
- struct ni_gpct_device *counter_dev;
- unsigned short an_trig_etc_reg;
-
- unsigned ai_offset[512];
-
- unsigned long serial_interval_ns;
- unsigned char serial_hw_mode;
- unsigned short clock_and_fout;
- unsigned short clock_and_fout2;
-
- unsigned short int_a_enable_reg;
- unsigned short int_b_enable_reg;
- unsigned short io_bidirection_pin_reg;
- unsigned short rtsi_trig_direction_reg;
- unsigned short rtsi_trig_a_output_reg;
- unsigned short rtsi_trig_b_output_reg;
- unsigned short pfi_output_select_reg[NUM_PFI_OUTPUT_SELECT_REGS];
- unsigned short ai_ao_select_reg;
- unsigned short g0_g1_select_reg;
- unsigned short cdio_dma_select_reg;
-
- unsigned clock_ns;
- unsigned clock_source;
-
- unsigned short pwm_up_count;
- unsigned short pwm_down_count;
-
- unsigned short ai_fifo_buffer[0x2000];
- uint8_t eeprom_buffer[M_SERIES_EEPROM_SIZE];
- __be32 serial_number;
-
- struct mite_struct *mite;
- struct mite_channel *ai_mite_chan;
- struct mite_channel *ao_mite_chan;
- struct mite_channel *cdo_mite_chan;
- struct mite_dma_descriptor_ring *ai_mite_ring;
- struct mite_dma_descriptor_ring *ao_mite_ring;
- struct mite_dma_descriptor_ring *cdo_mite_ring;
- struct mite_dma_descriptor_ring *gpct_mite_ring[NUM_GPCT];
-
- /* ni_pcimio board type flags (based on the boardinfo reg_type) */
- unsigned int is_m_series:1;
- unsigned int is_6xxx:1;
- unsigned int is_611x:1;
- unsigned int is_6143:1;
- unsigned int is_622x:1;
- unsigned int is_625x:1;
- unsigned int is_628x:1;
- unsigned int is_67xx:1;
- unsigned int is_6711:1;
- unsigned int is_6713:1;
-};
-
-static const struct comedi_lrange range_ni_E_ao_ext;
-
-#endif /* _COMEDI_NI_STC_H */
diff --git a/drivers/staging/comedi/drivers/ni_tio.c b/drivers/staging/comedi/drivers/ni_tio.c
deleted file mode 100644
index c20c51bef3e7..000000000000
--- a/drivers/staging/comedi/drivers/ni_tio.c
+++ /dev/null
@@ -1,1436 +0,0 @@
-/*
- comedi/drivers/ni_tio.c
- Support for NI general purpose counters
-
- Copyright (C) 2006 Frank Mori Hess <fmhess@users.sourceforge.net>
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-*/
-
-/*
- * Module: ni_tio
- * Description: National Instruments general purpose counters
- * Author: J.P. Mellor <jpmellor@rose-hulman.edu>,
- * Herman.Bruyninckx@mech.kuleuven.ac.be,
- * Wim.Meeussen@mech.kuleuven.ac.be,
- * Klaas.Gadeyne@mech.kuleuven.ac.be,
- * Frank Mori Hess <fmhess@users.sourceforge.net>
- * Updated: Thu Nov 16 09:50:32 EST 2006
- * Status: works
- *
- * This module is not used directly by end-users. Rather, it
- * is used by other drivers (for example ni_660x and ni_pcimio)
- * to provide support for NI's general purpose counters. It was
- * originally based on the counter code from ni_660x.c and
- * ni_mio_common.c.
- *
- * References:
- * DAQ 660x Register-Level Programmer Manual (NI 370505A-01)
- * DAQ 6601/6602 User Manual (NI 322137B-01)
- * 340934b.pdf DAQ-STC reference manual
- */
-
-/*
-TODO:
- Support use of both banks X and Y
-*/
-
-#include <linux/module.h>
-#include <linux/slab.h>
-
-#include "ni_tio_internal.h"
-
-/*
- * clock sources for ni e and m series boards,
- * get bits with GI_SRC_SEL()
- */
-#define NI_M_TIMEBASE_1_CLK 0x0 /* 20MHz */
-#define NI_M_PFI_CLK(x) (((x) < 10) ? (1 + (x)) : (0xb + (x)))
-#define NI_M_RTSI_CLK(x) (((x) == 7) ? 0x1b : (0xb + (x)))
-#define NI_M_TIMEBASE_2_CLK 0x12 /* 100KHz */
-#define NI_M_NEXT_TC_CLK 0x13
-#define NI_M_NEXT_GATE_CLK 0x14 /* Gi_Src_SubSelect=0 */
-#define NI_M_PXI_STAR_TRIGGER_CLK 0x14 /* Gi_Src_SubSelect=1 */
-#define NI_M_PXI10_CLK 0x1d
-#define NI_M_TIMEBASE_3_CLK 0x1e /* 80MHz, Gi_Src_SubSelect=0 */
-#define NI_M_ANALOG_TRIGGER_OUT_CLK 0x1e /* Gi_Src_SubSelect=1 */
-#define NI_M_LOGIC_LOW_CLK 0x1f
-#define NI_M_MAX_PFI_CHAN 15
-#define NI_M_MAX_RTSI_CHAN 7
-
-/*
- * clock sources for ni_660x boards,
- * get bits with GI_SRC_SEL()
- */
-#define NI_660X_TIMEBASE_1_CLK 0x0 /* 20MHz */
-#define NI_660X_SRC_PIN_I_CLK 0x1
-#define NI_660X_SRC_PIN_CLK(x) (0x2 + (x))
-#define NI_660X_NEXT_GATE_CLK 0xa
-#define NI_660X_RTSI_CLK(x) (0xb + (x))
-#define NI_660X_TIMEBASE_2_CLK 0x12 /* 100KHz */
-#define NI_660X_NEXT_TC_CLK 0x13
-#define NI_660X_TIMEBASE_3_CLK 0x1e /* 80MHz */
-#define NI_660X_LOGIC_LOW_CLK 0x1f
-#define NI_660X_MAX_SRC_PIN 7
-#define NI_660X_MAX_RTSI_CHAN 6
-
-/* ni m series gate_select */
-#define NI_M_TIMESTAMP_MUX_GATE_SEL 0x0
-#define NI_M_PFI_GATE_SEL(x) (((x) < 10) ? (1 + (x)) : (0xb + (x)))
-#define NI_M_RTSI_GATE_SEL(x) (((x) == 7) ? 0x1b : (0xb + (x)))
-#define NI_M_AI_START2_GATE_SEL 0x12
-#define NI_M_PXI_STAR_TRIGGER_GATE_SEL 0x13
-#define NI_M_NEXT_OUT_GATE_SEL 0x14
-#define NI_M_AI_START1_GATE_SEL 0x1c
-#define NI_M_NEXT_SRC_GATE_SEL 0x1d
-#define NI_M_ANALOG_TRIG_OUT_GATE_SEL 0x1e
-#define NI_M_LOGIC_LOW_GATE_SEL 0x1f
-
-/* ni_660x gate select */
-#define NI_660X_SRC_PIN_I_GATE_SEL 0x0
-#define NI_660X_GATE_PIN_I_GATE_SEL 0x1
-#define NI_660X_PIN_GATE_SEL(x) (0x2 + (x))
-#define NI_660X_NEXT_SRC_GATE_SEL 0xa
-#define NI_660X_RTSI_GATE_SEL(x) (0xb + (x))
-#define NI_660X_NEXT_OUT_GATE_SEL 0x14
-#define NI_660X_LOGIC_LOW_GATE_SEL 0x1f
-#define NI_660X_MAX_GATE_PIN 7
-
-/* ni_660x second gate select */
-#define NI_660X_SRC_PIN_I_GATE2_SEL 0x0
-#define NI_660X_UD_PIN_I_GATE2_SEL 0x1
-#define NI_660X_UD_PIN_GATE2_SEL(x) (0x2 + (x))
-#define NI_660X_NEXT_SRC_GATE2_SEL 0xa
-#define NI_660X_RTSI_GATE2_SEL(x) (0xb + (x))
-#define NI_660X_NEXT_OUT_GATE2_SEL 0x14
-#define NI_660X_SELECTED_GATE2_SEL 0x1e
-#define NI_660X_LOGIC_LOW_GATE2_SEL 0x1f
-#define NI_660X_MAX_UP_DOWN_PIN 7
-
-static inline unsigned GI_ALT_SYNC(enum ni_gpct_variant variant)
-{
- switch (variant) {
- case ni_gpct_variant_e_series:
- default:
- return 0;
- case ni_gpct_variant_m_series:
- return GI_M_ALT_SYNC;
- case ni_gpct_variant_660x:
- return GI_660X_ALT_SYNC;
- }
-}
-
-static inline unsigned GI_PRESCALE_X2(enum ni_gpct_variant variant)
-{
- switch (variant) {
- case ni_gpct_variant_e_series:
- default:
- return 0;
- case ni_gpct_variant_m_series:
- return GI_M_PRESCALE_X2;
- case ni_gpct_variant_660x:
- return GI_660X_PRESCALE_X2;
- }
-}
-
-static inline unsigned GI_PRESCALE_X8(enum ni_gpct_variant variant)
-{
- switch (variant) {
- case ni_gpct_variant_e_series:
- default:
- return 0;
- case ni_gpct_variant_m_series:
- return GI_M_PRESCALE_X8;
- case ni_gpct_variant_660x:
- return GI_660X_PRESCALE_X8;
- }
-}
-
-static inline unsigned GI_HW_ARM_SEL_MASK(enum ni_gpct_variant variant)
-{
- switch (variant) {
- case ni_gpct_variant_e_series:
- default:
- return 0;
- case ni_gpct_variant_m_series:
- return GI_M_HW_ARM_SEL_MASK;
- case ni_gpct_variant_660x:
- return GI_660X_HW_ARM_SEL_MASK;
- }
-}
-
-static int ni_tio_has_gate2_registers(const struct ni_gpct_device *counter_dev)
-{
- switch (counter_dev->variant) {
- case ni_gpct_variant_e_series:
- default:
- return 0;
- case ni_gpct_variant_m_series:
- case ni_gpct_variant_660x:
- return 1;
- }
-}
-
-static void ni_tio_reset_count_and_disarm(struct ni_gpct *counter)
-{
- unsigned cidx = counter->counter_index;
-
- write_register(counter, GI_RESET(cidx), NITIO_RESET_REG(cidx));
-}
-
-static uint64_t ni_tio_clock_period_ps(const struct ni_gpct *counter,
- unsigned generic_clock_source)
-{
- uint64_t clock_period_ps;
-
- switch (generic_clock_source & NI_GPCT_CLOCK_SRC_SELECT_MASK) {
- case NI_GPCT_TIMEBASE_1_CLOCK_SRC_BITS:
- clock_period_ps = 50000;
- break;
- case NI_GPCT_TIMEBASE_2_CLOCK_SRC_BITS:
- clock_period_ps = 10000000;
- break;
- case NI_GPCT_TIMEBASE_3_CLOCK_SRC_BITS:
- clock_period_ps = 12500;
- break;
- case NI_GPCT_PXI10_CLOCK_SRC_BITS:
- clock_period_ps = 100000;
- break;
- default:
- /*
- * clock period is specified by user with prescaling
- * already taken into account.
- */
- return counter->clock_period_ps;
- }
-
- switch (generic_clock_source & NI_GPCT_PRESCALE_MODE_CLOCK_SRC_MASK) {
- case NI_GPCT_NO_PRESCALE_CLOCK_SRC_BITS:
- break;
- case NI_GPCT_PRESCALE_X2_CLOCK_SRC_BITS:
- clock_period_ps *= 2;
- break;
- case NI_GPCT_PRESCALE_X8_CLOCK_SRC_BITS:
- clock_period_ps *= 8;
- break;
- default:
- BUG();
- break;
- }
- return clock_period_ps;
-}
-
-static unsigned ni_tio_clock_src_modifiers(const struct ni_gpct *counter)
-{
- struct ni_gpct_device *counter_dev = counter->counter_dev;
- unsigned cidx = counter->counter_index;
- const unsigned counting_mode_bits =
- ni_tio_get_soft_copy(counter, NITIO_CNT_MODE_REG(cidx));
- unsigned bits = 0;
-
- if (ni_tio_get_soft_copy(counter, NITIO_INPUT_SEL_REG(cidx)) &
- GI_SRC_POL_INVERT)
- bits |= NI_GPCT_INVERT_CLOCK_SRC_BIT;
- if (counting_mode_bits & GI_PRESCALE_X2(counter_dev->variant))
- bits |= NI_GPCT_PRESCALE_X2_CLOCK_SRC_BITS;
- if (counting_mode_bits & GI_PRESCALE_X8(counter_dev->variant))
- bits |= NI_GPCT_PRESCALE_X8_CLOCK_SRC_BITS;
- return bits;
-}
-
-static unsigned ni_m_series_clock_src_select(const struct ni_gpct *counter)
-{
- struct ni_gpct_device *counter_dev = counter->counter_dev;
- unsigned cidx = counter->counter_index;
- const unsigned second_gate_reg = NITIO_GATE2_REG(cidx);
- unsigned clock_source = 0;
- unsigned src;
- unsigned i;
-
- src = GI_BITS_TO_SRC(ni_tio_get_soft_copy(counter,
- NITIO_INPUT_SEL_REG(cidx)));
-
- switch (src) {
- case NI_M_TIMEBASE_1_CLK:
- clock_source = NI_GPCT_TIMEBASE_1_CLOCK_SRC_BITS;
- break;
- case NI_M_TIMEBASE_2_CLK:
- clock_source = NI_GPCT_TIMEBASE_2_CLOCK_SRC_BITS;
- break;
- case NI_M_TIMEBASE_3_CLK:
- if (counter_dev->regs[second_gate_reg] & GI_SRC_SUBSEL)
- clock_source =
- NI_GPCT_ANALOG_TRIGGER_OUT_CLOCK_SRC_BITS;
- else
- clock_source = NI_GPCT_TIMEBASE_3_CLOCK_SRC_BITS;
- break;
- case NI_M_LOGIC_LOW_CLK:
- clock_source = NI_GPCT_LOGIC_LOW_CLOCK_SRC_BITS;
- break;
- case NI_M_NEXT_GATE_CLK:
- if (counter_dev->regs[second_gate_reg] & GI_SRC_SUBSEL)
- clock_source = NI_GPCT_PXI_STAR_TRIGGER_CLOCK_SRC_BITS;
- else
- clock_source = NI_GPCT_NEXT_GATE_CLOCK_SRC_BITS;
- break;
- case NI_M_PXI10_CLK:
- clock_source = NI_GPCT_PXI10_CLOCK_SRC_BITS;
- break;
- case NI_M_NEXT_TC_CLK:
- clock_source = NI_GPCT_NEXT_TC_CLOCK_SRC_BITS;
- break;
- default:
- for (i = 0; i <= NI_M_MAX_RTSI_CHAN; ++i) {
- if (src == NI_M_RTSI_CLK(i)) {
- clock_source = NI_GPCT_RTSI_CLOCK_SRC_BITS(i);
- break;
- }
- }
- if (i <= NI_M_MAX_RTSI_CHAN)
- break;
- for (i = 0; i <= NI_M_MAX_PFI_CHAN; ++i) {
- if (src == NI_M_PFI_CLK(i)) {
- clock_source = NI_GPCT_PFI_CLOCK_SRC_BITS(i);
- break;
- }
- }
- if (i <= NI_M_MAX_PFI_CHAN)
- break;
- BUG();
- break;
- }
- clock_source |= ni_tio_clock_src_modifiers(counter);
- return clock_source;
-}
-
-static unsigned ni_660x_clock_src_select(const struct ni_gpct *counter)
-{
- unsigned clock_source = 0;
- unsigned cidx = counter->counter_index;
- unsigned src;
- unsigned i;
-
- src = GI_BITS_TO_SRC(ni_tio_get_soft_copy(counter,
- NITIO_INPUT_SEL_REG(cidx)));
-
- switch (src) {
- case NI_660X_TIMEBASE_1_CLK:
- clock_source = NI_GPCT_TIMEBASE_1_CLOCK_SRC_BITS;
- break;
- case NI_660X_TIMEBASE_2_CLK:
- clock_source = NI_GPCT_TIMEBASE_2_CLOCK_SRC_BITS;
- break;
- case NI_660X_TIMEBASE_3_CLK:
- clock_source = NI_GPCT_TIMEBASE_3_CLOCK_SRC_BITS;
- break;
- case NI_660X_LOGIC_LOW_CLK:
- clock_source = NI_GPCT_LOGIC_LOW_CLOCK_SRC_BITS;
- break;
- case NI_660X_SRC_PIN_I_CLK:
- clock_source = NI_GPCT_SOURCE_PIN_i_CLOCK_SRC_BITS;
- break;
- case NI_660X_NEXT_GATE_CLK:
- clock_source = NI_GPCT_NEXT_GATE_CLOCK_SRC_BITS;
- break;
- case NI_660X_NEXT_TC_CLK:
- clock_source = NI_GPCT_NEXT_TC_CLOCK_SRC_BITS;
- break;
- default:
- for (i = 0; i <= NI_660X_MAX_RTSI_CHAN; ++i) {
- if (src == NI_660X_RTSI_CLK(i)) {
- clock_source = NI_GPCT_RTSI_CLOCK_SRC_BITS(i);
- break;
- }
- }
- if (i <= NI_660X_MAX_RTSI_CHAN)
- break;
- for (i = 0; i <= NI_660X_MAX_SRC_PIN; ++i) {
- if (src == NI_660X_SRC_PIN_CLK(i)) {
- clock_source =
- NI_GPCT_SOURCE_PIN_CLOCK_SRC_BITS(i);
- break;
- }
- }
- if (i <= NI_660X_MAX_SRC_PIN)
- break;
- BUG();
- break;
- }
- clock_source |= ni_tio_clock_src_modifiers(counter);
- return clock_source;
-}
-
-static unsigned ni_tio_generic_clock_src_select(const struct ni_gpct *counter)
-{
- switch (counter->counter_dev->variant) {
- case ni_gpct_variant_e_series:
- case ni_gpct_variant_m_series:
- default:
- return ni_m_series_clock_src_select(counter);
- case ni_gpct_variant_660x:
- return ni_660x_clock_src_select(counter);
- }
-}
-
-static void ni_tio_set_sync_mode(struct ni_gpct *counter, int force_alt_sync)
-{
- struct ni_gpct_device *counter_dev = counter->counter_dev;
- unsigned cidx = counter->counter_index;
- const unsigned counting_mode_reg = NITIO_CNT_MODE_REG(cidx);
- static const uint64_t min_normal_sync_period_ps = 25000;
- unsigned mode;
- uint64_t clock_period_ps;
-
- if (ni_tio_counting_mode_registers_present(counter_dev) == 0)
- return;
-
- mode = ni_tio_get_soft_copy(counter, counting_mode_reg);
- switch (mode & GI_CNT_MODE_MASK) {
- case GI_CNT_MODE_QUADX1:
- case GI_CNT_MODE_QUADX2:
- case GI_CNT_MODE_QUADX4:
- case GI_CNT_MODE_SYNC_SRC:
- force_alt_sync = 1;
- break;
- default:
- break;
- }
-
- clock_period_ps = ni_tio_clock_period_ps(counter,
- ni_tio_generic_clock_src_select(counter));
-
- /*
- * It's not clear what we should do if clock_period is unknown, so we
- * are not using the alt sync bit in that case, but allow the caller
- * to decide by using the force_alt_sync parameter.
- */
- if (force_alt_sync ||
- (clock_period_ps && clock_period_ps < min_normal_sync_period_ps)) {
- ni_tio_set_bits(counter, counting_mode_reg,
- GI_ALT_SYNC(counter_dev->variant),
- GI_ALT_SYNC(counter_dev->variant));
- } else {
- ni_tio_set_bits(counter, counting_mode_reg,
- GI_ALT_SYNC(counter_dev->variant),
- 0x0);
- }
-}
-
-static int ni_tio_set_counter_mode(struct ni_gpct *counter, unsigned mode)
-{
- struct ni_gpct_device *counter_dev = counter->counter_dev;
- unsigned cidx = counter->counter_index;
- unsigned mode_reg_mask;
- unsigned mode_reg_values;
- unsigned input_select_bits = 0;
- /* these bits map directly on to the mode register */
- static const unsigned mode_reg_direct_mask =
- NI_GPCT_GATE_ON_BOTH_EDGES_BIT | NI_GPCT_EDGE_GATE_MODE_MASK |
- NI_GPCT_STOP_MODE_MASK | NI_GPCT_OUTPUT_MODE_MASK |
- NI_GPCT_HARDWARE_DISARM_MASK | NI_GPCT_LOADING_ON_TC_BIT |
- NI_GPCT_LOADING_ON_GATE_BIT | NI_GPCT_LOAD_B_SELECT_BIT;
-
- mode_reg_mask = mode_reg_direct_mask | GI_RELOAD_SRC_SWITCHING;
- mode_reg_values = mode & mode_reg_direct_mask;
- switch (mode & NI_GPCT_RELOAD_SOURCE_MASK) {
- case NI_GPCT_RELOAD_SOURCE_FIXED_BITS:
- break;
- case NI_GPCT_RELOAD_SOURCE_SWITCHING_BITS:
- mode_reg_values |= GI_RELOAD_SRC_SWITCHING;
- break;
- case NI_GPCT_RELOAD_SOURCE_GATE_SELECT_BITS:
- input_select_bits |= GI_GATE_SEL_LOAD_SRC;
- mode_reg_mask |= GI_GATING_MODE_MASK;
- mode_reg_values |= GI_LEVEL_GATING;
- break;
- default:
- break;
- }
- ni_tio_set_bits(counter, NITIO_MODE_REG(cidx),
- mode_reg_mask, mode_reg_values);
-
- if (ni_tio_counting_mode_registers_present(counter_dev)) {
- unsigned bits = 0;
-
- bits |= GI_CNT_MODE(mode >> NI_GPCT_COUNTING_MODE_SHIFT);
- bits |= GI_INDEX_PHASE((mode >> NI_GPCT_INDEX_PHASE_BITSHIFT));
- if (mode & NI_GPCT_INDEX_ENABLE_BIT)
- bits |= GI_INDEX_MODE;
- ni_tio_set_bits(counter, NITIO_CNT_MODE_REG(cidx),
- GI_CNT_MODE_MASK | GI_INDEX_PHASE_MASK |
- GI_INDEX_MODE, bits);
- ni_tio_set_sync_mode(counter, 0);
- }
-
- ni_tio_set_bits(counter, NITIO_CMD_REG(cidx), GI_CNT_DIR_MASK,
- GI_CNT_DIR(mode >> NI_GPCT_COUNTING_DIRECTION_SHIFT));
-
- if (mode & NI_GPCT_OR_GATE_BIT)
- input_select_bits |= GI_OR_GATE;
- if (mode & NI_GPCT_INVERT_OUTPUT_BIT)
- input_select_bits |= GI_OUTPUT_POL_INVERT;
- ni_tio_set_bits(counter, NITIO_INPUT_SEL_REG(cidx),
- GI_GATE_SEL_LOAD_SRC | GI_OR_GATE |
- GI_OUTPUT_POL_INVERT, input_select_bits);
-
- return 0;
-}
-
-int ni_tio_arm(struct ni_gpct *counter, int arm, unsigned start_trigger)
-{
- struct ni_gpct_device *counter_dev = counter->counter_dev;
- unsigned cidx = counter->counter_index;
- unsigned command_transient_bits = 0;
-
- if (arm) {
- switch (start_trigger) {
- case NI_GPCT_ARM_IMMEDIATE:
- command_transient_bits |= GI_ARM;
- break;
- case NI_GPCT_ARM_PAIRED_IMMEDIATE:
- command_transient_bits |= GI_ARM | GI_ARM_COPY;
- break;
- default:
- break;
- }
- if (ni_tio_counting_mode_registers_present(counter_dev)) {
- unsigned bits = 0;
- unsigned sel_mask;
-
- sel_mask = GI_HW_ARM_SEL_MASK(counter_dev->variant);
-
- switch (start_trigger) {
- case NI_GPCT_ARM_IMMEDIATE:
- case NI_GPCT_ARM_PAIRED_IMMEDIATE:
- break;
- default:
- if (start_trigger & NI_GPCT_ARM_UNKNOWN) {
- /*
- * pass-through the least significant
- * bits so we can figure out what
- * select later
- */
- bits |= GI_HW_ARM_ENA |
- (GI_HW_ARM_SEL(start_trigger) &
- sel_mask);
- } else {
- return -EINVAL;
- }
- break;
- }
- ni_tio_set_bits(counter, NITIO_CNT_MODE_REG(cidx),
- GI_HW_ARM_ENA | sel_mask, bits);
- }
- } else {
- command_transient_bits |= GI_DISARM;
- }
- ni_tio_set_bits_transient(counter, NITIO_CMD_REG(cidx),
- 0, 0, command_transient_bits);
- return 0;
-}
-EXPORT_SYMBOL_GPL(ni_tio_arm);
-
-static unsigned ni_660x_clk_src(unsigned int clock_source)
-{
- unsigned clk_src = clock_source & NI_GPCT_CLOCK_SRC_SELECT_MASK;
- unsigned ni_660x_clock;
- unsigned i;
-
- switch (clk_src) {
- case NI_GPCT_TIMEBASE_1_CLOCK_SRC_BITS:
- ni_660x_clock = NI_660X_TIMEBASE_1_CLK;
- break;
- case NI_GPCT_TIMEBASE_2_CLOCK_SRC_BITS:
- ni_660x_clock = NI_660X_TIMEBASE_2_CLK;
- break;
- case NI_GPCT_TIMEBASE_3_CLOCK_SRC_BITS:
- ni_660x_clock = NI_660X_TIMEBASE_3_CLK;
- break;
- case NI_GPCT_LOGIC_LOW_CLOCK_SRC_BITS:
- ni_660x_clock = NI_660X_LOGIC_LOW_CLK;
- break;
- case NI_GPCT_SOURCE_PIN_i_CLOCK_SRC_BITS:
- ni_660x_clock = NI_660X_SRC_PIN_I_CLK;
- break;
- case NI_GPCT_NEXT_GATE_CLOCK_SRC_BITS:
- ni_660x_clock = NI_660X_NEXT_GATE_CLK;
- break;
- case NI_GPCT_NEXT_TC_CLOCK_SRC_BITS:
- ni_660x_clock = NI_660X_NEXT_TC_CLK;
- break;
- default:
- for (i = 0; i <= NI_660X_MAX_RTSI_CHAN; ++i) {
- if (clk_src == NI_GPCT_RTSI_CLOCK_SRC_BITS(i)) {
- ni_660x_clock = NI_660X_RTSI_CLK(i);
- break;
- }
- }
- if (i <= NI_660X_MAX_RTSI_CHAN)
- break;
- for (i = 0; i <= NI_660X_MAX_SRC_PIN; ++i) {
- if (clk_src == NI_GPCT_SOURCE_PIN_CLOCK_SRC_BITS(i)) {
- ni_660x_clock = NI_660X_SRC_PIN_CLK(i);
- break;
- }
- }
- if (i <= NI_660X_MAX_SRC_PIN)
- break;
- ni_660x_clock = 0;
- BUG();
- break;
- }
- return GI_SRC_SEL(ni_660x_clock);
-}
-
-static unsigned ni_m_clk_src(unsigned int clock_source)
-{
- unsigned clk_src = clock_source & NI_GPCT_CLOCK_SRC_SELECT_MASK;
- unsigned ni_m_series_clock;
- unsigned i;
-
- switch (clk_src) {
- case NI_GPCT_TIMEBASE_1_CLOCK_SRC_BITS:
- ni_m_series_clock = NI_M_TIMEBASE_1_CLK;
- break;
- case NI_GPCT_TIMEBASE_2_CLOCK_SRC_BITS:
- ni_m_series_clock = NI_M_TIMEBASE_2_CLK;
- break;
- case NI_GPCT_TIMEBASE_3_CLOCK_SRC_BITS:
- ni_m_series_clock = NI_M_TIMEBASE_3_CLK;
- break;
- case NI_GPCT_LOGIC_LOW_CLOCK_SRC_BITS:
- ni_m_series_clock = NI_M_LOGIC_LOW_CLK;
- break;
- case NI_GPCT_NEXT_GATE_CLOCK_SRC_BITS:
- ni_m_series_clock = NI_M_NEXT_GATE_CLK;
- break;
- case NI_GPCT_NEXT_TC_CLOCK_SRC_BITS:
- ni_m_series_clock = NI_M_NEXT_TC_CLK;
- break;
- case NI_GPCT_PXI10_CLOCK_SRC_BITS:
- ni_m_series_clock = NI_M_PXI10_CLK;
- break;
- case NI_GPCT_PXI_STAR_TRIGGER_CLOCK_SRC_BITS:
- ni_m_series_clock = NI_M_PXI_STAR_TRIGGER_CLK;
- break;
- case NI_GPCT_ANALOG_TRIGGER_OUT_CLOCK_SRC_BITS:
- ni_m_series_clock = NI_M_ANALOG_TRIGGER_OUT_CLK;
- break;
- default:
- for (i = 0; i <= NI_M_MAX_RTSI_CHAN; ++i) {
- if (clk_src == NI_GPCT_RTSI_CLOCK_SRC_BITS(i)) {
- ni_m_series_clock = NI_M_RTSI_CLK(i);
- break;
- }
- }
- if (i <= NI_M_MAX_RTSI_CHAN)
- break;
- for (i = 0; i <= NI_M_MAX_PFI_CHAN; ++i) {
- if (clk_src == NI_GPCT_PFI_CLOCK_SRC_BITS(i)) {
- ni_m_series_clock = NI_M_PFI_CLK(i);
- break;
- }
- }
- if (i <= NI_M_MAX_PFI_CHAN)
- break;
- pr_err("invalid clock source 0x%lx\n",
- (unsigned long)clock_source);
- BUG();
- ni_m_series_clock = 0;
- break;
- }
- return GI_SRC_SEL(ni_m_series_clock);
-};
-
-static void ni_tio_set_source_subselect(struct ni_gpct *counter,
- unsigned int clock_source)
-{
- struct ni_gpct_device *counter_dev = counter->counter_dev;
- unsigned cidx = counter->counter_index;
- const unsigned second_gate_reg = NITIO_GATE2_REG(cidx);
-
- if (counter_dev->variant != ni_gpct_variant_m_series)
- return;
- switch (clock_source & NI_GPCT_CLOCK_SRC_SELECT_MASK) {
- /* Gi_Source_Subselect is zero */
- case NI_GPCT_NEXT_GATE_CLOCK_SRC_BITS:
- case NI_GPCT_TIMEBASE_3_CLOCK_SRC_BITS:
- counter_dev->regs[second_gate_reg] &= ~GI_SRC_SUBSEL;
- break;
- /* Gi_Source_Subselect is one */
- case NI_GPCT_ANALOG_TRIGGER_OUT_CLOCK_SRC_BITS:
- case NI_GPCT_PXI_STAR_TRIGGER_CLOCK_SRC_BITS:
- counter_dev->regs[second_gate_reg] |= GI_SRC_SUBSEL;
- break;
- /* Gi_Source_Subselect doesn't matter */
- default:
- return;
- }
- write_register(counter, counter_dev->regs[second_gate_reg],
- second_gate_reg);
-}
-
-static int ni_tio_set_clock_src(struct ni_gpct *counter,
- unsigned int clock_source,
- unsigned int period_ns)
-{
- struct ni_gpct_device *counter_dev = counter->counter_dev;
- unsigned cidx = counter->counter_index;
- unsigned bits = 0;
-
- /* FIXME: validate clock source */
- switch (counter_dev->variant) {
- case ni_gpct_variant_660x:
- bits |= ni_660x_clk_src(clock_source);
- break;
- case ni_gpct_variant_e_series:
- case ni_gpct_variant_m_series:
- default:
- bits |= ni_m_clk_src(clock_source);
- break;
- }
- if (clock_source & NI_GPCT_INVERT_CLOCK_SRC_BIT)
- bits |= GI_SRC_POL_INVERT;
- ni_tio_set_bits(counter, NITIO_INPUT_SEL_REG(cidx),
- GI_SRC_SEL_MASK | GI_SRC_POL_INVERT, bits);
- ni_tio_set_source_subselect(counter, clock_source);
-
- if (ni_tio_counting_mode_registers_present(counter_dev)) {
- bits = 0;
- switch (clock_source & NI_GPCT_PRESCALE_MODE_CLOCK_SRC_MASK) {
- case NI_GPCT_NO_PRESCALE_CLOCK_SRC_BITS:
- break;
- case NI_GPCT_PRESCALE_X2_CLOCK_SRC_BITS:
- bits |= GI_PRESCALE_X2(counter_dev->variant);
- break;
- case NI_GPCT_PRESCALE_X8_CLOCK_SRC_BITS:
- bits |= GI_PRESCALE_X8(counter_dev->variant);
- break;
- default:
- return -EINVAL;
- }
- ni_tio_set_bits(counter, NITIO_CNT_MODE_REG(cidx),
- GI_PRESCALE_X2(counter_dev->variant) |
- GI_PRESCALE_X8(counter_dev->variant), bits);
- }
- counter->clock_period_ps = period_ns * 1000;
- ni_tio_set_sync_mode(counter, 0);
- return 0;
-}
-
-static void ni_tio_get_clock_src(struct ni_gpct *counter,
- unsigned int *clock_source,
- unsigned int *period_ns)
-{
- uint64_t temp64;
-
- *clock_source = ni_tio_generic_clock_src_select(counter);
- temp64 = ni_tio_clock_period_ps(counter, *clock_source);
- do_div(temp64, 1000); /* ps to ns */
- *period_ns = temp64;
-}
-
-static int ni_660x_set_gate(struct ni_gpct *counter, unsigned int gate_source)
-{
- unsigned int chan = CR_CHAN(gate_source);
- unsigned cidx = counter->counter_index;
- unsigned gate_sel;
- unsigned i;
-
- switch (chan) {
- case NI_GPCT_NEXT_SOURCE_GATE_SELECT:
- gate_sel = NI_660X_NEXT_SRC_GATE_SEL;
- break;
- case NI_GPCT_NEXT_OUT_GATE_SELECT:
- case NI_GPCT_LOGIC_LOW_GATE_SELECT:
- case NI_GPCT_SOURCE_PIN_i_GATE_SELECT:
- case NI_GPCT_GATE_PIN_i_GATE_SELECT:
- gate_sel = chan & 0x1f;
- break;
- default:
- for (i = 0; i <= NI_660X_MAX_RTSI_CHAN; ++i) {
- if (chan == NI_GPCT_RTSI_GATE_SELECT(i)) {
- gate_sel = chan & 0x1f;
- break;
- }
- }
- if (i <= NI_660X_MAX_RTSI_CHAN)
- break;
- for (i = 0; i <= NI_660X_MAX_GATE_PIN; ++i) {
- if (chan == NI_GPCT_GATE_PIN_GATE_SELECT(i)) {
- gate_sel = chan & 0x1f;
- break;
- }
- }
- if (i <= NI_660X_MAX_GATE_PIN)
- break;
- return -EINVAL;
- }
- ni_tio_set_bits(counter, NITIO_INPUT_SEL_REG(cidx),
- GI_GATE_SEL_MASK, GI_GATE_SEL(gate_sel));
- return 0;
-}
-
-static int ni_m_set_gate(struct ni_gpct *counter, unsigned int gate_source)
-{
- unsigned int chan = CR_CHAN(gate_source);
- unsigned cidx = counter->counter_index;
- unsigned gate_sel;
- unsigned i;
-
- switch (chan) {
- case NI_GPCT_TIMESTAMP_MUX_GATE_SELECT:
- case NI_GPCT_AI_START2_GATE_SELECT:
- case NI_GPCT_PXI_STAR_TRIGGER_GATE_SELECT:
- case NI_GPCT_NEXT_OUT_GATE_SELECT:
- case NI_GPCT_AI_START1_GATE_SELECT:
- case NI_GPCT_NEXT_SOURCE_GATE_SELECT:
- case NI_GPCT_ANALOG_TRIGGER_OUT_GATE_SELECT:
- case NI_GPCT_LOGIC_LOW_GATE_SELECT:
- gate_sel = chan & 0x1f;
- break;
- default:
- for (i = 0; i <= NI_M_MAX_RTSI_CHAN; ++i) {
- if (chan == NI_GPCT_RTSI_GATE_SELECT(i)) {
- gate_sel = chan & 0x1f;
- break;
- }
- }
- if (i <= NI_M_MAX_RTSI_CHAN)
- break;
- for (i = 0; i <= NI_M_MAX_PFI_CHAN; ++i) {
- if (chan == NI_GPCT_PFI_GATE_SELECT(i)) {
- gate_sel = chan & 0x1f;
- break;
- }
- }
- if (i <= NI_M_MAX_PFI_CHAN)
- break;
- return -EINVAL;
- }
- ni_tio_set_bits(counter, NITIO_INPUT_SEL_REG(cidx),
- GI_GATE_SEL_MASK, GI_GATE_SEL(gate_sel));
- return 0;
-}
-
-static int ni_660x_set_gate2(struct ni_gpct *counter, unsigned int gate_source)
-{
- struct ni_gpct_device *counter_dev = counter->counter_dev;
- unsigned cidx = counter->counter_index;
- unsigned int chan = CR_CHAN(gate_source);
- unsigned gate2_reg = NITIO_GATE2_REG(cidx);
- unsigned gate2_sel;
- unsigned i;
-
- switch (chan) {
- case NI_GPCT_SOURCE_PIN_i_GATE_SELECT:
- case NI_GPCT_UP_DOWN_PIN_i_GATE_SELECT:
- case NI_GPCT_SELECTED_GATE_GATE_SELECT:
- case NI_GPCT_NEXT_OUT_GATE_SELECT:
- case NI_GPCT_LOGIC_LOW_GATE_SELECT:
- gate2_sel = chan & 0x1f;
- break;
- case NI_GPCT_NEXT_SOURCE_GATE_SELECT:
- gate2_sel = NI_660X_NEXT_SRC_GATE2_SEL;
- break;
- default:
- for (i = 0; i <= NI_660X_MAX_RTSI_CHAN; ++i) {
- if (chan == NI_GPCT_RTSI_GATE_SELECT(i)) {
- gate2_sel = chan & 0x1f;
- break;
- }
- }
- if (i <= NI_660X_MAX_RTSI_CHAN)
- break;
- for (i = 0; i <= NI_660X_MAX_UP_DOWN_PIN; ++i) {
- if (chan == NI_GPCT_UP_DOWN_PIN_GATE_SELECT(i)) {
- gate2_sel = chan & 0x1f;
- break;
- }
- }
- if (i <= NI_660X_MAX_UP_DOWN_PIN)
- break;
- return -EINVAL;
- }
- counter_dev->regs[gate2_reg] |= GI_GATE2_MODE;
- counter_dev->regs[gate2_reg] &= ~GI_GATE2_SEL_MASK;
- counter_dev->regs[gate2_reg] |= GI_GATE2_SEL(gate2_sel);
- write_register(counter, counter_dev->regs[gate2_reg], gate2_reg);
- return 0;
-}
-
-static int ni_m_set_gate2(struct ni_gpct *counter, unsigned int gate_source)
-{
- struct ni_gpct_device *counter_dev = counter->counter_dev;
- unsigned cidx = counter->counter_index;
- unsigned int chan = CR_CHAN(gate_source);
- unsigned gate2_reg = NITIO_GATE2_REG(cidx);
- unsigned gate2_sel;
-
- /*
- * FIXME: We don't know what the m-series second gate codes are,
- * so we'll just pass the bits through for now.
- */
- switch (chan) {
- default:
- gate2_sel = chan & 0x1f;
- break;
- }
- counter_dev->regs[gate2_reg] |= GI_GATE2_MODE;
- counter_dev->regs[gate2_reg] &= ~GI_GATE2_SEL_MASK;
- counter_dev->regs[gate2_reg] |= GI_GATE2_SEL(gate2_sel);
- write_register(counter, counter_dev->regs[gate2_reg], gate2_reg);
- return 0;
-}
-
-int ni_tio_set_gate_src(struct ni_gpct *counter, unsigned gate_index,
- unsigned int gate_source)
-{
- struct ni_gpct_device *counter_dev = counter->counter_dev;
- unsigned cidx = counter->counter_index;
- unsigned int chan = CR_CHAN(gate_source);
- unsigned gate2_reg = NITIO_GATE2_REG(cidx);
- unsigned mode = 0;
-
- switch (gate_index) {
- case 0:
- if (chan == NI_GPCT_DISABLED_GATE_SELECT) {
- ni_tio_set_bits(counter, NITIO_MODE_REG(cidx),
- GI_GATING_MODE_MASK,
- GI_GATING_DISABLED);
- return 0;
- }
- if (gate_source & CR_INVERT)
- mode |= GI_GATE_POL_INVERT;
- if (gate_source & CR_EDGE)
- mode |= GI_RISING_EDGE_GATING;
- else
- mode |= GI_LEVEL_GATING;
- ni_tio_set_bits(counter, NITIO_MODE_REG(cidx),
- GI_GATE_POL_INVERT | GI_GATING_MODE_MASK,
- mode);
- switch (counter_dev->variant) {
- case ni_gpct_variant_e_series:
- case ni_gpct_variant_m_series:
- default:
- return ni_m_set_gate(counter, gate_source);
- case ni_gpct_variant_660x:
- return ni_660x_set_gate(counter, gate_source);
- }
- break;
- case 1:
- if (!ni_tio_has_gate2_registers(counter_dev))
- return -EINVAL;
-
- if (chan == NI_GPCT_DISABLED_GATE_SELECT) {
- counter_dev->regs[gate2_reg] &= ~GI_GATE2_MODE;
- write_register(counter, counter_dev->regs[gate2_reg],
- gate2_reg);
- return 0;
- }
- if (gate_source & CR_INVERT)
- counter_dev->regs[gate2_reg] |= GI_GATE2_POL_INVERT;
- else
- counter_dev->regs[gate2_reg] &= ~GI_GATE2_POL_INVERT;
- switch (counter_dev->variant) {
- case ni_gpct_variant_m_series:
- return ni_m_set_gate2(counter, gate_source);
- case ni_gpct_variant_660x:
- return ni_660x_set_gate2(counter, gate_source);
- default:
- BUG();
- break;
- }
- break;
- default:
- return -EINVAL;
- }
- return 0;
-}
-EXPORT_SYMBOL_GPL(ni_tio_set_gate_src);
-
-static int ni_tio_set_other_src(struct ni_gpct *counter, unsigned index,
- unsigned int source)
-{
- struct ni_gpct_device *counter_dev = counter->counter_dev;
- unsigned cidx = counter->counter_index;
- unsigned int abz_reg, shift, mask;
-
- if (counter_dev->variant != ni_gpct_variant_m_series)
- return -EINVAL;
-
- abz_reg = NITIO_ABZ_REG(cidx);
- switch (index) {
- case NI_GPCT_SOURCE_ENCODER_A:
- shift = 10;
- break;
- case NI_GPCT_SOURCE_ENCODER_B:
- shift = 5;
- break;
- case NI_GPCT_SOURCE_ENCODER_Z:
- shift = 0;
- break;
- default:
- return -EINVAL;
- }
- mask = 0x1f << shift;
- if (source > 0x1f)
- source = 0x1f; /* Disable gate */
-
- counter_dev->regs[abz_reg] &= ~mask;
- counter_dev->regs[abz_reg] |= (source << shift) & mask;
- write_register(counter, counter_dev->regs[abz_reg], abz_reg);
- return 0;
-}
-
-static unsigned ni_660x_gate_to_generic_gate(unsigned gate)
-{
- unsigned i;
-
- switch (gate) {
- case NI_660X_SRC_PIN_I_GATE_SEL:
- return NI_GPCT_SOURCE_PIN_i_GATE_SELECT;
- case NI_660X_GATE_PIN_I_GATE_SEL:
- return NI_GPCT_GATE_PIN_i_GATE_SELECT;
- case NI_660X_NEXT_SRC_GATE_SEL:
- return NI_GPCT_NEXT_SOURCE_GATE_SELECT;
- case NI_660X_NEXT_OUT_GATE_SEL:
- return NI_GPCT_NEXT_OUT_GATE_SELECT;
- case NI_660X_LOGIC_LOW_GATE_SEL:
- return NI_GPCT_LOGIC_LOW_GATE_SELECT;
- default:
- for (i = 0; i <= NI_660X_MAX_RTSI_CHAN; ++i) {
- if (gate == NI_660X_RTSI_GATE_SEL(i))
- return NI_GPCT_RTSI_GATE_SELECT(i);
- }
- for (i = 0; i <= NI_660X_MAX_GATE_PIN; ++i) {
- if (gate == NI_660X_PIN_GATE_SEL(i))
- return NI_GPCT_GATE_PIN_GATE_SELECT(i);
- }
- BUG();
- break;
- }
- return 0;
-};
-
-static unsigned ni_m_gate_to_generic_gate(unsigned gate)
-{
- unsigned i;
-
- switch (gate) {
- case NI_M_TIMESTAMP_MUX_GATE_SEL:
- return NI_GPCT_TIMESTAMP_MUX_GATE_SELECT;
- case NI_M_AI_START2_GATE_SEL:
- return NI_GPCT_AI_START2_GATE_SELECT;
- case NI_M_PXI_STAR_TRIGGER_GATE_SEL:
- return NI_GPCT_PXI_STAR_TRIGGER_GATE_SELECT;
- case NI_M_NEXT_OUT_GATE_SEL:
- return NI_GPCT_NEXT_OUT_GATE_SELECT;
- case NI_M_AI_START1_GATE_SEL:
- return NI_GPCT_AI_START1_GATE_SELECT;
- case NI_M_NEXT_SRC_GATE_SEL:
- return NI_GPCT_NEXT_SOURCE_GATE_SELECT;
- case NI_M_ANALOG_TRIG_OUT_GATE_SEL:
- return NI_GPCT_ANALOG_TRIGGER_OUT_GATE_SELECT;
- case NI_M_LOGIC_LOW_GATE_SEL:
- return NI_GPCT_LOGIC_LOW_GATE_SELECT;
- default:
- for (i = 0; i <= NI_M_MAX_RTSI_CHAN; ++i) {
- if (gate == NI_M_RTSI_GATE_SEL(i))
- return NI_GPCT_RTSI_GATE_SELECT(i);
- }
- for (i = 0; i <= NI_M_MAX_PFI_CHAN; ++i) {
- if (gate == NI_M_PFI_GATE_SEL(i))
- return NI_GPCT_PFI_GATE_SELECT(i);
- }
- BUG();
- break;
- }
- return 0;
-};
-
-static unsigned ni_660x_gate2_to_generic_gate(unsigned gate)
-{
- unsigned i;
-
- switch (gate) {
- case NI_660X_SRC_PIN_I_GATE2_SEL:
- return NI_GPCT_SOURCE_PIN_i_GATE_SELECT;
- case NI_660X_UD_PIN_I_GATE2_SEL:
- return NI_GPCT_UP_DOWN_PIN_i_GATE_SELECT;
- case NI_660X_NEXT_SRC_GATE2_SEL:
- return NI_GPCT_NEXT_SOURCE_GATE_SELECT;
- case NI_660X_NEXT_OUT_GATE2_SEL:
- return NI_GPCT_NEXT_OUT_GATE_SELECT;
- case NI_660X_SELECTED_GATE2_SEL:
- return NI_GPCT_SELECTED_GATE_GATE_SELECT;
- case NI_660X_LOGIC_LOW_GATE2_SEL:
- return NI_GPCT_LOGIC_LOW_GATE_SELECT;
- default:
- for (i = 0; i <= NI_660X_MAX_RTSI_CHAN; ++i) {
- if (gate == NI_660X_RTSI_GATE2_SEL(i))
- return NI_GPCT_RTSI_GATE_SELECT(i);
- }
- for (i = 0; i <= NI_660X_MAX_UP_DOWN_PIN; ++i) {
- if (gate == NI_660X_UD_PIN_GATE2_SEL(i))
- return NI_GPCT_UP_DOWN_PIN_GATE_SELECT(i);
- }
- BUG();
- break;
- }
- return 0;
-};
-
-static unsigned ni_m_gate2_to_generic_gate(unsigned gate)
-{
- /*
- * FIXME: the second gate sources for the m series are undocumented,
- * so we just return the raw bits for now.
- */
- switch (gate) {
- default:
- return gate;
- }
- return 0;
-};
-
-static int ni_tio_get_gate_src(struct ni_gpct *counter, unsigned gate_index,
- unsigned int *gate_source)
-{
- struct ni_gpct_device *counter_dev = counter->counter_dev;
- unsigned cidx = counter->counter_index;
- unsigned mode = ni_tio_get_soft_copy(counter, NITIO_MODE_REG(cidx));
- unsigned gate2_reg = NITIO_GATE2_REG(cidx);
- unsigned gate;
-
- switch (gate_index) {
- case 0:
- if ((mode & GI_GATING_MODE_MASK) == GI_GATING_DISABLED) {
- *gate_source = NI_GPCT_DISABLED_GATE_SELECT;
- return 0;
- }
-
- gate = GI_BITS_TO_GATE(ni_tio_get_soft_copy(counter,
- NITIO_INPUT_SEL_REG(cidx)));
-
- switch (counter_dev->variant) {
- case ni_gpct_variant_e_series:
- case ni_gpct_variant_m_series:
- default:
- *gate_source = ni_m_gate_to_generic_gate(gate);
- break;
- case ni_gpct_variant_660x:
- *gate_source = ni_660x_gate_to_generic_gate(gate);
- break;
- }
- if (mode & GI_GATE_POL_INVERT)
- *gate_source |= CR_INVERT;
- if ((mode & GI_GATING_MODE_MASK) != GI_LEVEL_GATING)
- *gate_source |= CR_EDGE;
- break;
- case 1:
- if ((mode & GI_GATING_MODE_MASK) == GI_GATING_DISABLED ||
- !(counter_dev->regs[gate2_reg] & GI_GATE2_MODE)) {
- *gate_source = NI_GPCT_DISABLED_GATE_SELECT;
- return 0;
- }
-
- gate = GI_BITS_TO_GATE2(counter_dev->regs[gate2_reg]);
-
- switch (counter_dev->variant) {
- case ni_gpct_variant_e_series:
- case ni_gpct_variant_m_series:
- default:
- *gate_source = ni_m_gate2_to_generic_gate(gate);
- break;
- case ni_gpct_variant_660x:
- *gate_source = ni_660x_gate2_to_generic_gate(gate);
- break;
- }
- if (counter_dev->regs[gate2_reg] & GI_GATE2_POL_INVERT)
- *gate_source |= CR_INVERT;
- /* second gate can't have edge/level mode set independently */
- if ((mode & GI_GATING_MODE_MASK) != GI_LEVEL_GATING)
- *gate_source |= CR_EDGE;
- break;
- default:
- return -EINVAL;
- }
- return 0;
-}
-
-int ni_tio_insn_config(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct ni_gpct *counter = s->private;
- unsigned cidx = counter->counter_index;
- unsigned status;
-
- switch (data[0]) {
- case INSN_CONFIG_SET_COUNTER_MODE:
- return ni_tio_set_counter_mode(counter, data[1]);
- case INSN_CONFIG_ARM:
- return ni_tio_arm(counter, 1, data[1]);
- case INSN_CONFIG_DISARM:
- ni_tio_arm(counter, 0, 0);
- return 0;
- case INSN_CONFIG_GET_COUNTER_STATUS:
- data[1] = 0;
- status = read_register(counter, NITIO_SHARED_STATUS_REG(cidx));
- if (status & GI_ARMED(cidx)) {
- data[1] |= COMEDI_COUNTER_ARMED;
- if (status & GI_COUNTING(cidx))
- data[1] |= COMEDI_COUNTER_COUNTING;
- }
- data[2] = COMEDI_COUNTER_ARMED | COMEDI_COUNTER_COUNTING;
- return 0;
- case INSN_CONFIG_SET_CLOCK_SRC:
- return ni_tio_set_clock_src(counter, data[1], data[2]);
- case INSN_CONFIG_GET_CLOCK_SRC:
- ni_tio_get_clock_src(counter, &data[1], &data[2]);
- return 0;
- case INSN_CONFIG_SET_GATE_SRC:
- return ni_tio_set_gate_src(counter, data[1], data[2]);
- case INSN_CONFIG_GET_GATE_SRC:
- return ni_tio_get_gate_src(counter, data[1], &data[2]);
- case INSN_CONFIG_SET_OTHER_SRC:
- return ni_tio_set_other_src(counter, data[1], data[2]);
- case INSN_CONFIG_RESET:
- ni_tio_reset_count_and_disarm(counter);
- return 0;
- default:
- break;
- }
- return -EINVAL;
-}
-EXPORT_SYMBOL_GPL(ni_tio_insn_config);
-
-static unsigned int ni_tio_read_sw_save_reg(struct comedi_device *dev,
- struct comedi_subdevice *s)
-{
- struct ni_gpct *counter = s->private;
- unsigned cidx = counter->counter_index;
- unsigned int val;
-
- ni_tio_set_bits(counter, NITIO_CMD_REG(cidx), GI_SAVE_TRACE, 0);
- ni_tio_set_bits(counter, NITIO_CMD_REG(cidx),
- GI_SAVE_TRACE, GI_SAVE_TRACE);
-
- /*
- * The count doesn't get latched until the next clock edge, so it is
- * possible the count may change (once) while we are reading. Since
- * the read of the SW_Save_Reg isn't atomic (apparently even when it's
- * a 32 bit register according to 660x docs), we need to read twice
- * and make sure the reading hasn't changed. If it has, a third read
- * will be correct since the count value will definitely have latched
- * by then.
- */
- val = read_register(counter, NITIO_SW_SAVE_REG(cidx));
- if (val != read_register(counter, NITIO_SW_SAVE_REG(cidx)))
- val = read_register(counter, NITIO_SW_SAVE_REG(cidx));
-
- return val;
-}
-
-int ni_tio_insn_read(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct ni_gpct *counter = s->private;
- struct ni_gpct_device *counter_dev = counter->counter_dev;
- unsigned int channel = CR_CHAN(insn->chanspec);
- unsigned cidx = counter->counter_index;
- int i;
-
- for (i = 0; i < insn->n; i++) {
- switch (channel) {
- case 0:
- data[i] = ni_tio_read_sw_save_reg(dev, s);
- break;
- case 1:
- data[i] = counter_dev->regs[NITIO_LOADA_REG(cidx)];
- break;
- case 2:
- data[i] = counter_dev->regs[NITIO_LOADB_REG(cidx)];
- break;
- }
- }
- return insn->n;
-}
-EXPORT_SYMBOL_GPL(ni_tio_insn_read);
-
-static unsigned ni_tio_next_load_register(struct ni_gpct *counter)
-{
- unsigned cidx = counter->counter_index;
- const unsigned bits =
- read_register(counter, NITIO_SHARED_STATUS_REG(cidx));
-
- return (bits & GI_NEXT_LOAD_SRC(cidx))
- ? NITIO_LOADB_REG(cidx)
- : NITIO_LOADA_REG(cidx);
-}
-
-int ni_tio_insn_write(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct ni_gpct *counter = s->private;
- struct ni_gpct_device *counter_dev = counter->counter_dev;
- const unsigned channel = CR_CHAN(insn->chanspec);
- unsigned cidx = counter->counter_index;
- unsigned load_reg;
-
- if (insn->n < 1)
- return 0;
- switch (channel) {
- case 0:
- /*
- * Unsafe if counter is armed.
- * Should probably check status and return -EBUSY if armed.
- */
-
- /*
- * Don't disturb load source select, just use whichever
- * load register is already selected.
- */
- load_reg = ni_tio_next_load_register(counter);
- write_register(counter, data[0], load_reg);
- ni_tio_set_bits_transient(counter, NITIO_CMD_REG(cidx),
- 0, 0, GI_LOAD);
- /* restore load reg */
- write_register(counter, counter_dev->regs[load_reg], load_reg);
- break;
- case 1:
- counter_dev->regs[NITIO_LOADA_REG(cidx)] = data[0];
- write_register(counter, data[0], NITIO_LOADA_REG(cidx));
- break;
- case 2:
- counter_dev->regs[NITIO_LOADB_REG(cidx)] = data[0];
- write_register(counter, data[0], NITIO_LOADB_REG(cidx));
- break;
- default:
- return -EINVAL;
- }
- return 0;
-}
-EXPORT_SYMBOL_GPL(ni_tio_insn_write);
-
-void ni_tio_init_counter(struct ni_gpct *counter)
-{
- struct ni_gpct_device *counter_dev = counter->counter_dev;
- unsigned cidx = counter->counter_index;
-
- ni_tio_reset_count_and_disarm(counter);
-
- /* initialize counter registers */
- counter_dev->regs[NITIO_AUTO_INC_REG(cidx)] = 0x0;
- write_register(counter, 0x0, NITIO_AUTO_INC_REG(cidx));
-
- ni_tio_set_bits(counter, NITIO_CMD_REG(cidx),
- ~0, GI_SYNC_GATE);
-
- ni_tio_set_bits(counter, NITIO_MODE_REG(cidx), ~0, 0);
-
- counter_dev->regs[NITIO_LOADA_REG(cidx)] = 0x0;
- write_register(counter, 0x0, NITIO_LOADA_REG(cidx));
-
- counter_dev->regs[NITIO_LOADB_REG(cidx)] = 0x0;
- write_register(counter, 0x0, NITIO_LOADB_REG(cidx));
-
- ni_tio_set_bits(counter, NITIO_INPUT_SEL_REG(cidx), ~0, 0);
-
- if (ni_tio_counting_mode_registers_present(counter_dev))
- ni_tio_set_bits(counter, NITIO_CNT_MODE_REG(cidx), ~0, 0);
-
- if (ni_tio_has_gate2_registers(counter_dev)) {
- counter_dev->regs[NITIO_GATE2_REG(cidx)] = 0x0;
- write_register(counter, 0x0, NITIO_GATE2_REG(cidx));
- }
-
- ni_tio_set_bits(counter, NITIO_DMA_CFG_REG(cidx), ~0, 0x0);
-
- ni_tio_set_bits(counter, NITIO_INT_ENA_REG(cidx), ~0, 0x0);
-}
-EXPORT_SYMBOL_GPL(ni_tio_init_counter);
-
-struct ni_gpct_device *
-ni_gpct_device_construct(struct comedi_device *dev,
- void (*write_register)(struct ni_gpct *counter,
- unsigned bits,
- enum ni_gpct_register reg),
- unsigned (*read_register)(struct ni_gpct *counter,
- enum ni_gpct_register reg),
- enum ni_gpct_variant variant,
- unsigned num_counters)
-{
- struct ni_gpct_device *counter_dev;
- struct ni_gpct *counter;
- unsigned i;
-
- if (num_counters == 0)
- return NULL;
-
- counter_dev = kzalloc(sizeof(*counter_dev), GFP_KERNEL);
- if (!counter_dev)
- return NULL;
-
- counter_dev->dev = dev;
- counter_dev->write_register = write_register;
- counter_dev->read_register = read_register;
- counter_dev->variant = variant;
-
- spin_lock_init(&counter_dev->regs_lock);
-
- counter_dev->counters = kcalloc(num_counters, sizeof(*counter),
- GFP_KERNEL);
- if (!counter_dev->counters) {
- kfree(counter_dev);
- return NULL;
- }
-
- for (i = 0; i < num_counters; ++i) {
- counter = &counter_dev->counters[i];
- counter->counter_dev = counter_dev;
- spin_lock_init(&counter->lock);
- }
- counter_dev->num_counters = num_counters;
-
- return counter_dev;
-}
-EXPORT_SYMBOL_GPL(ni_gpct_device_construct);
-
-void ni_gpct_device_destroy(struct ni_gpct_device *counter_dev)
-{
- if (!counter_dev->counters)
- return;
- kfree(counter_dev->counters);
- kfree(counter_dev);
-}
-EXPORT_SYMBOL_GPL(ni_gpct_device_destroy);
-
-static int __init ni_tio_init_module(void)
-{
- return 0;
-}
-module_init(ni_tio_init_module);
-
-static void __exit ni_tio_cleanup_module(void)
-{
-}
-module_exit(ni_tio_cleanup_module);
-
-MODULE_AUTHOR("Comedi <comedi@comedi.org>");
-MODULE_DESCRIPTION("Comedi support for NI general-purpose counters");
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/ni_tio.h b/drivers/staging/comedi/drivers/ni_tio.h
deleted file mode 100644
index 25aedd0e5867..000000000000
--- a/drivers/staging/comedi/drivers/ni_tio.h
+++ /dev/null
@@ -1,154 +0,0 @@
-/*
- drivers/ni_tio.h
- Header file for NI general purpose counter support code (ni_tio.c)
-
- COMEDI - Linux Control and Measurement Device Interface
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-*/
-
-#ifndef _COMEDI_NI_TIO_H
-#define _COMEDI_NI_TIO_H
-
-#include "../comedidev.h"
-
-/* forward declarations */
-struct mite_struct;
-struct ni_gpct_device;
-
-enum ni_gpct_register {
- NITIO_G0_AUTO_INC,
- NITIO_G1_AUTO_INC,
- NITIO_G2_AUTO_INC,
- NITIO_G3_AUTO_INC,
- NITIO_G0_CMD,
- NITIO_G1_CMD,
- NITIO_G2_CMD,
- NITIO_G3_CMD,
- NITIO_G0_HW_SAVE,
- NITIO_G1_HW_SAVE,
- NITIO_G2_HW_SAVE,
- NITIO_G3_HW_SAVE,
- NITIO_G0_SW_SAVE,
- NITIO_G1_SW_SAVE,
- NITIO_G2_SW_SAVE,
- NITIO_G3_SW_SAVE,
- NITIO_G0_MODE,
- NITIO_G1_MODE,
- NITIO_G2_MODE,
- NITIO_G3_MODE,
- NITIO_G0_LOADA,
- NITIO_G1_LOADA,
- NITIO_G2_LOADA,
- NITIO_G3_LOADA,
- NITIO_G0_LOADB,
- NITIO_G1_LOADB,
- NITIO_G2_LOADB,
- NITIO_G3_LOADB,
- NITIO_G0_INPUT_SEL,
- NITIO_G1_INPUT_SEL,
- NITIO_G2_INPUT_SEL,
- NITIO_G3_INPUT_SEL,
- NITIO_G0_CNT_MODE,
- NITIO_G1_CNT_MODE,
- NITIO_G2_CNT_MODE,
- NITIO_G3_CNT_MODE,
- NITIO_G0_GATE2,
- NITIO_G1_GATE2,
- NITIO_G2_GATE2,
- NITIO_G3_GATE2,
- NITIO_G01_STATUS,
- NITIO_G23_STATUS,
- NITIO_G01_RESET,
- NITIO_G23_RESET,
- NITIO_G01_STATUS1,
- NITIO_G23_STATUS1,
- NITIO_G01_STATUS2,
- NITIO_G23_STATUS2,
- NITIO_G0_DMA_CFG,
- NITIO_G1_DMA_CFG,
- NITIO_G2_DMA_CFG,
- NITIO_G3_DMA_CFG,
- NITIO_G0_DMA_STATUS,
- NITIO_G1_DMA_STATUS,
- NITIO_G2_DMA_STATUS,
- NITIO_G3_DMA_STATUS,
- NITIO_G0_ABZ,
- NITIO_G1_ABZ,
- NITIO_G0_INT_ACK,
- NITIO_G1_INT_ACK,
- NITIO_G2_INT_ACK,
- NITIO_G3_INT_ACK,
- NITIO_G0_STATUS,
- NITIO_G1_STATUS,
- NITIO_G2_STATUS,
- NITIO_G3_STATUS,
- NITIO_G0_INT_ENA,
- NITIO_G1_INT_ENA,
- NITIO_G2_INT_ENA,
- NITIO_G3_INT_ENA,
- NITIO_NUM_REGS,
-};
-
-enum ni_gpct_variant {
- ni_gpct_variant_e_series,
- ni_gpct_variant_m_series,
- ni_gpct_variant_660x
-};
-
-struct ni_gpct {
- struct ni_gpct_device *counter_dev;
- unsigned counter_index;
- unsigned chip_index;
- uint64_t clock_period_ps; /* clock period in picoseconds */
- struct mite_channel *mite_chan;
- spinlock_t lock;
-};
-
-struct ni_gpct_device {
- struct comedi_device *dev;
- void (*write_register)(struct ni_gpct *counter, unsigned bits,
- enum ni_gpct_register reg);
- unsigned (*read_register)(struct ni_gpct *counter,
- enum ni_gpct_register reg);
- enum ni_gpct_variant variant;
- struct ni_gpct *counters;
- unsigned num_counters;
- unsigned regs[NITIO_NUM_REGS];
- spinlock_t regs_lock;
-};
-
-struct ni_gpct_device *
-ni_gpct_device_construct(struct comedi_device *,
- void (*write_register)(struct ni_gpct *,
- unsigned bits,
- enum ni_gpct_register),
- unsigned (*read_register)(struct ni_gpct *,
- enum ni_gpct_register),
- enum ni_gpct_variant,
- unsigned num_counters);
-void ni_gpct_device_destroy(struct ni_gpct_device *);
-void ni_tio_init_counter(struct ni_gpct *);
-int ni_tio_insn_read(struct comedi_device *, struct comedi_subdevice *,
- struct comedi_insn *, unsigned int *data);
-int ni_tio_insn_config(struct comedi_device *, struct comedi_subdevice *,
- struct comedi_insn *, unsigned int *data);
-int ni_tio_insn_write(struct comedi_device *, struct comedi_subdevice *,
- struct comedi_insn *, unsigned int *data);
-int ni_tio_cmd(struct comedi_device *, struct comedi_subdevice *);
-int ni_tio_cmdtest(struct comedi_device *, struct comedi_subdevice *,
- struct comedi_cmd *);
-int ni_tio_cancel(struct ni_gpct *);
-void ni_tio_handle_interrupt(struct ni_gpct *, struct comedi_subdevice *);
-void ni_tio_set_mite_channel(struct ni_gpct *, struct mite_channel *);
-void ni_tio_acknowledge(struct ni_gpct *);
-
-#endif /* _COMEDI_NI_TIO_H */
diff --git a/drivers/staging/comedi/drivers/ni_tio_internal.h b/drivers/staging/comedi/drivers/ni_tio_internal.h
deleted file mode 100644
index 2bceae493e23..000000000000
--- a/drivers/staging/comedi/drivers/ni_tio_internal.h
+++ /dev/null
@@ -1,245 +0,0 @@
-/*
- drivers/ni_tio_internal.h
- Header file for NI general purpose counter support code (ni_tio.c and
- ni_tiocmd.c)
-
- COMEDI - Linux Control and Measurement Device Interface
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-*/
-
-#ifndef _COMEDI_NI_TIO_INTERNAL_H
-#define _COMEDI_NI_TIO_INTERNAL_H
-
-#include "ni_tio.h"
-
-#define NITIO_AUTO_INC_REG(x) (NITIO_G0_AUTO_INC + (x))
-#define GI_AUTO_INC_MASK 0xff
-#define NITIO_CMD_REG(x) (NITIO_G0_CMD + (x))
-#define GI_ARM (1 << 0)
-#define GI_SAVE_TRACE (1 << 1)
-#define GI_LOAD (1 << 2)
-#define GI_DISARM (1 << 4)
-#define GI_CNT_DIR(x) (((x) & 0x3) << 5)
-#define GI_CNT_DIR_MASK (3 << 5)
-#define GI_WRITE_SWITCH (1 << 7)
-#define GI_SYNC_GATE (1 << 8)
-#define GI_LITTLE_BIG_ENDIAN (1 << 9)
-#define GI_BANK_SWITCH_START (1 << 10)
-#define GI_BANK_SWITCH_MODE (1 << 11)
-#define GI_BANK_SWITCH_ENABLE (1 << 12)
-#define GI_ARM_COPY (1 << 13)
-#define GI_SAVE_TRACE_COPY (1 << 14)
-#define GI_DISARM_COPY (1 << 15)
-#define NITIO_HW_SAVE_REG(x) (NITIO_G0_HW_SAVE + (x))
-#define NITIO_SW_SAVE_REG(x) (NITIO_G0_SW_SAVE + (x))
-#define NITIO_MODE_REG(x) (NITIO_G0_MODE + (x))
-#define GI_GATING_DISABLED (0 << 0)
-#define GI_LEVEL_GATING (1 << 0)
-#define GI_RISING_EDGE_GATING (2 << 0)
-#define GI_FALLING_EDGE_GATING (3 << 0)
-#define GI_GATING_MODE_MASK (3 << 0)
-#define GI_GATE_ON_BOTH_EDGES (1 << 2)
-#define GI_EDGE_GATE_STARTS_STOPS (0 << 3)
-#define GI_EDGE_GATE_STOPS_STARTS (1 << 3)
-#define GI_EDGE_GATE_STARTS (2 << 3)
-#define GI_EDGE_GATE_NO_STARTS_OR_STOPS (3 << 3)
-#define GI_EDGE_GATE_MODE_MASK (3 << 3)
-#define GI_STOP_ON_GATE (0 << 5)
-#define GI_STOP_ON_GATE_OR_TC (1 << 5)
-#define GI_STOP_ON_GATE_OR_SECOND_TC (2 << 5)
-#define GI_STOP_MODE_MASK (3 << 5)
-#define GI_LOAD_SRC_SEL (1 << 7)
-#define GI_OUTPUT_TC_PULSE (1 << 8)
-#define GI_OUTPUT_TC_TOGGLE (2 << 8)
-#define GI_OUTPUT_TC_OR_GATE_TOGGLE (3 << 8)
-#define GI_OUTPUT_MODE_MASK (3 << 8)
-#define GI_NO_HARDWARE_DISARM (0 << 10)
-#define GI_DISARM_AT_TC (1 << 10)
-#define GI_DISARM_AT_GATE (2 << 10)
-#define GI_DISARM_AT_TC_OR_GATE (3 << 10)
-#define GI_COUNTING_ONCE_MASK (3 << 10)
-#define GI_LOADING_ON_TC (1 << 12)
-#define GI_GATE_POL_INVERT (1 << 13)
-#define GI_LOADING_ON_GATE (1 << 14)
-#define GI_RELOAD_SRC_SWITCHING (1 << 15)
-#define NITIO_LOADA_REG(x) (NITIO_G0_LOADA + (x))
-#define NITIO_LOADB_REG(x) (NITIO_G0_LOADB + (x))
-#define NITIO_INPUT_SEL_REG(x) (NITIO_G0_INPUT_SEL + (x))
-#define GI_READ_ACKS_IRQ (1 << 0)
-#define GI_WRITE_ACKS_IRQ (1 << 1)
-#define GI_BITS_TO_SRC(x) (((x) >> 2) & 0x1f)
-#define GI_SRC_SEL(x) (((x) & 0x1f) << 2)
-#define GI_SRC_SEL_MASK (0x1f << 2)
-#define GI_BITS_TO_GATE(x) (((x) >> 7) & 0x1f)
-#define GI_GATE_SEL(x) (((x) & 0x1f) << 7)
-#define GI_GATE_SEL_MASK (0x1f << 7)
-#define GI_GATE_SEL_LOAD_SRC (1 << 12)
-#define GI_OR_GATE (1 << 13)
-#define GI_OUTPUT_POL_INVERT (1 << 14)
-#define GI_SRC_POL_INVERT (1 << 15)
-#define NITIO_CNT_MODE_REG(x) (NITIO_G0_CNT_MODE + (x))
-#define GI_CNT_MODE(x) (((x) & 0x7) << 0)
-#define GI_CNT_MODE_NORMAL GI_CNT_MODE(0)
-#define GI_CNT_MODE_QUADX1 GI_CNT_MODE(1)
-#define GI_CNT_MODE_QUADX2 GI_CNT_MODE(2)
-#define GI_CNT_MODE_QUADX4 GI_CNT_MODE(3)
-#define GI_CNT_MODE_TWO_PULSE GI_CNT_MODE(4)
-#define GI_CNT_MODE_SYNC_SRC GI_CNT_MODE(6)
-#define GI_CNT_MODE_MASK (7 << 0)
-#define GI_INDEX_MODE (1 << 4)
-#define GI_INDEX_PHASE(x) (((x) & 0x3) << 5)
-#define GI_INDEX_PHASE_MASK (3 << 5)
-#define GI_HW_ARM_ENA (1 << 7)
-#define GI_HW_ARM_SEL(x) ((x) << 8)
-#define GI_660X_HW_ARM_SEL_MASK (0x7 << 8)
-#define GI_M_HW_ARM_SEL_MASK (0x1f << 8)
-#define GI_660X_PRESCALE_X8 (1 << 12)
-#define GI_M_PRESCALE_X8 (1 << 13)
-#define GI_660X_ALT_SYNC (1 << 13)
-#define GI_M_ALT_SYNC (1 << 14)
-#define GI_660X_PRESCALE_X2 (1 << 14)
-#define GI_M_PRESCALE_X2 (1 << 15)
-#define NITIO_GATE2_REG(x) (NITIO_G0_GATE2 + (x))
-#define GI_GATE2_MODE (1 << 0)
-#define GI_BITS_TO_GATE2(x) (((x) >> 7) & 0x1f)
-#define GI_GATE2_SEL(x) (((x) & 0x1f) << 7)
-#define GI_GATE2_SEL_MASK (0x1f << 7)
-#define GI_GATE2_POL_INVERT (1 << 13)
-#define GI_GATE2_SUBSEL (1 << 14)
-#define GI_SRC_SUBSEL (1 << 15)
-#define NITIO_SHARED_STATUS_REG(x) (NITIO_G01_STATUS + ((x) / 2))
-#define GI_SAVE(x) (((x) % 2) ? (1 << 1) : (1 << 0))
-#define GI_COUNTING(x) (((x) % 2) ? (1 << 3) : (1 << 2))
-#define GI_NEXT_LOAD_SRC(x) (((x) % 2) ? (1 << 5) : (1 << 4))
-#define GI_STALE_DATA(x) (((x) % 2) ? (1 << 7) : (1 << 6))
-#define GI_ARMED(x) (((x) % 2) ? (1 << 9) : (1 << 8))
-#define GI_NO_LOAD_BETWEEN_GATES(x) (((x) % 2) ? (1 << 11) : (1 << 10))
-#define GI_TC_ERROR(x) (((x) % 2) ? (1 << 13) : (1 << 12))
-#define GI_GATE_ERROR(x) (((x) % 2) ? (1 << 15) : (1 << 14))
-#define NITIO_RESET_REG(x) (NITIO_G01_RESET + ((x) / 2))
-#define GI_RESET(x) (1 << (2 + ((x) % 2)))
-#define NITIO_STATUS1_REG(x) (NITIO_G01_STATUS1 + ((x) / 2))
-#define NITIO_STATUS2_REG(x) (NITIO_G01_STATUS2 + ((x) / 2))
-#define GI_OUTPUT(x) (((x) % 2) ? (1 << 1) : (1 << 0))
-#define GI_HW_SAVE(x) (((x) % 2) ? (1 << 13) : (1 << 12))
-#define GI_PERMANENT_STALE(x) (((x) % 2) ? (1 << 15) : (1 << 14))
-#define NITIO_DMA_CFG_REG(x) (NITIO_G0_DMA_CFG + (x))
-#define GI_DMA_ENABLE (1 << 0)
-#define GI_DMA_WRITE (1 << 1)
-#define GI_DMA_INT_ENA (1 << 2)
-#define GI_DMA_RESET (1 << 3)
-#define GI_DMA_BANKSW_ERROR (1 << 4)
-#define NITIO_DMA_STATUS_REG(x) (NITIO_G0_DMA_STATUS + (x))
-#define GI_DMA_READBANK (1 << 13)
-#define GI_DRQ_ERROR (1 << 14)
-#define GI_DRQ_STATUS (1 << 15)
-#define NITIO_ABZ_REG(x) (NITIO_G0_ABZ + (x))
-#define NITIO_INT_ACK_REG(x) (NITIO_G0_INT_ACK + (x))
-#define GI_GATE_ERROR_CONFIRM(x) (((x) % 2) ? (1 << 1) : (1 << 5))
-#define GI_TC_ERROR_CONFIRM(x) (((x) % 2) ? (1 << 2) : (1 << 6))
-#define GI_TC_INTERRUPT_ACK (1 << 14)
-#define GI_GATE_INTERRUPT_ACK (1 << 15)
-#define NITIO_STATUS_REG(x) (NITIO_G0_STATUS + (x))
-#define GI_GATE_INTERRUPT (1 << 2)
-#define GI_TC (1 << 3)
-#define GI_INTERRUPT (1 << 15)
-#define NITIO_INT_ENA_REG(x) (NITIO_G0_INT_ENA + (x))
-#define GI_TC_INTERRUPT_ENABLE(x) (((x) % 2) ? (1 << 9) : (1 << 6))
-#define GI_GATE_INTERRUPT_ENABLE(x) (((x) % 2) ? (1 << 10) : (1 << 8))
-
-static inline void write_register(struct ni_gpct *counter, unsigned bits,
- enum ni_gpct_register reg)
-{
- BUG_ON(reg >= NITIO_NUM_REGS);
- counter->counter_dev->write_register(counter, bits, reg);
-}
-
-static inline unsigned read_register(struct ni_gpct *counter,
- enum ni_gpct_register reg)
-{
- BUG_ON(reg >= NITIO_NUM_REGS);
- return counter->counter_dev->read_register(counter, reg);
-}
-
-static inline int ni_tio_counting_mode_registers_present(const struct
- ni_gpct_device
- *counter_dev)
-{
- switch (counter_dev->variant) {
- case ni_gpct_variant_e_series:
- return 0;
- case ni_gpct_variant_m_series:
- case ni_gpct_variant_660x:
- return 1;
- default:
- BUG();
- break;
- }
- return 0;
-}
-
-static inline void ni_tio_set_bits_transient(struct ni_gpct *counter,
- enum ni_gpct_register
- register_index, unsigned bit_mask,
- unsigned bit_values,
- unsigned transient_bit_values)
-{
- struct ni_gpct_device *counter_dev = counter->counter_dev;
- unsigned long flags;
-
- BUG_ON(register_index >= NITIO_NUM_REGS);
- spin_lock_irqsave(&counter_dev->regs_lock, flags);
- counter_dev->regs[register_index] &= ~bit_mask;
- counter_dev->regs[register_index] |= (bit_values & bit_mask);
- write_register(counter,
- counter_dev->regs[register_index] | transient_bit_values,
- register_index);
- mmiowb();
- spin_unlock_irqrestore(&counter_dev->regs_lock, flags);
-}
-
-/* ni_tio_set_bits( ) is for safely writing to registers whose bits may be
- * twiddled in interrupt context, or whose software copy may be read in
- * interrupt context.
- */
-static inline void ni_tio_set_bits(struct ni_gpct *counter,
- enum ni_gpct_register register_index,
- unsigned bit_mask, unsigned bit_values)
-{
- ni_tio_set_bits_transient(counter, register_index, bit_mask, bit_values,
- 0x0);
-}
-
-/* ni_tio_get_soft_copy( ) is for safely reading the software copy of a register
-whose bits might be modified in interrupt context, or whose software copy
-might need to be read in interrupt context.
-*/
-static inline unsigned ni_tio_get_soft_copy(const struct ni_gpct *counter,
- enum ni_gpct_register
- register_index)
-{
- struct ni_gpct_device *counter_dev = counter->counter_dev;
- unsigned long flags;
- unsigned value;
-
- BUG_ON(register_index >= NITIO_NUM_REGS);
- spin_lock_irqsave(&counter_dev->regs_lock, flags);
- value = counter_dev->regs[register_index];
- spin_unlock_irqrestore(&counter_dev->regs_lock, flags);
- return value;
-}
-
-int ni_tio_arm(struct ni_gpct *counter, int arm, unsigned start_trigger);
-int ni_tio_set_gate_src(struct ni_gpct *counter, unsigned gate_index,
- unsigned int gate_source);
-
-#endif /* _COMEDI_NI_TIO_INTERNAL_H */
diff --git a/drivers/staging/comedi/drivers/ni_tiocmd.c b/drivers/staging/comedi/drivers/ni_tiocmd.c
deleted file mode 100644
index 437f723bb34d..000000000000
--- a/drivers/staging/comedi/drivers/ni_tiocmd.c
+++ /dev/null
@@ -1,478 +0,0 @@
-/*
- comedi/drivers/ni_tiocmd.c
- Command support for NI general purpose counters
-
- Copyright (C) 2006 Frank Mori Hess <fmhess@users.sourceforge.net>
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-*/
-
-/*
- * Module: ni_tiocmd
- * Description: National Instruments general purpose counters command support
- * Author: J.P. Mellor <jpmellor@rose-hulman.edu>,
- * Herman.Bruyninckx@mech.kuleuven.ac.be,
- * Wim.Meeussen@mech.kuleuven.ac.be,
- * Klaas.Gadeyne@mech.kuleuven.ac.be,
- * Frank Mori Hess <fmhess@users.sourceforge.net>
- * Updated: Fri, 11 Apr 2008 12:32:35 +0100
- * Status: works
- *
- * This module is not used directly by end-users. Rather, it
- * is used by other drivers (for example ni_660x and ni_pcimio)
- * to provide command support for NI's general purpose counters.
- * It was originally split out of ni_tio.c to stop the 'ni_tio'
- * module depending on the 'mite' module.
- *
- * References:
- * DAQ 660x Register-Level Programmer Manual (NI 370505A-01)
- * DAQ 6601/6602 User Manual (NI 322137B-01)
- * 340934b.pdf DAQ-STC reference manual
- */
-
-/*
-TODO:
- Support use of both banks X and Y
-*/
-
-#include <linux/module.h>
-#include "ni_tio_internal.h"
-#include "mite.h"
-
-static void ni_tio_configure_dma(struct ni_gpct *counter,
- bool enable, bool read)
-{
- struct ni_gpct_device *counter_dev = counter->counter_dev;
- unsigned cidx = counter->counter_index;
- unsigned mask;
- unsigned bits;
-
- mask = GI_READ_ACKS_IRQ | GI_WRITE_ACKS_IRQ;
- bits = 0;
-
- if (enable) {
- if (read)
- bits |= GI_READ_ACKS_IRQ;
- else
- bits |= GI_WRITE_ACKS_IRQ;
- }
- ni_tio_set_bits(counter, NITIO_INPUT_SEL_REG(cidx), mask, bits);
-
- switch (counter_dev->variant) {
- case ni_gpct_variant_e_series:
- break;
- case ni_gpct_variant_m_series:
- case ni_gpct_variant_660x:
- mask = GI_DMA_ENABLE | GI_DMA_INT_ENA | GI_DMA_WRITE;
- bits = 0;
-
- if (enable)
- bits |= GI_DMA_ENABLE | GI_DMA_INT_ENA;
- if (!read)
- bits |= GI_DMA_WRITE;
- ni_tio_set_bits(counter, NITIO_DMA_CFG_REG(cidx), mask, bits);
- break;
- }
-}
-
-static int ni_tio_input_inttrig(struct comedi_device *dev,
- struct comedi_subdevice *s,
- unsigned int trig_num)
-{
- struct ni_gpct *counter = s->private;
- struct comedi_cmd *cmd = &s->async->cmd;
- unsigned long flags;
- int ret = 0;
-
- if (trig_num != cmd->start_src)
- return -EINVAL;
-
- spin_lock_irqsave(&counter->lock, flags);
- if (counter->mite_chan)
- mite_dma_arm(counter->mite_chan);
- else
- ret = -EIO;
- spin_unlock_irqrestore(&counter->lock, flags);
- if (ret < 0)
- return ret;
- ret = ni_tio_arm(counter, 1, NI_GPCT_ARM_IMMEDIATE);
- s->async->inttrig = NULL;
-
- return ret;
-}
-
-static int ni_tio_input_cmd(struct comedi_subdevice *s)
-{
- struct ni_gpct *counter = s->private;
- struct ni_gpct_device *counter_dev = counter->counter_dev;
- unsigned cidx = counter->counter_index;
- struct comedi_async *async = s->async;
- struct comedi_cmd *cmd = &async->cmd;
- int ret = 0;
-
- /* write alloc the entire buffer */
- comedi_buf_write_alloc(s, async->prealloc_bufsz);
- counter->mite_chan->dir = COMEDI_INPUT;
- switch (counter_dev->variant) {
- case ni_gpct_variant_m_series:
- case ni_gpct_variant_660x:
- mite_prep_dma(counter->mite_chan, 32, 32);
- break;
- case ni_gpct_variant_e_series:
- mite_prep_dma(counter->mite_chan, 16, 32);
- break;
- default:
- BUG();
- break;
- }
- ni_tio_set_bits(counter, NITIO_CMD_REG(cidx), GI_SAVE_TRACE, 0);
- ni_tio_configure_dma(counter, true, true);
-
- if (cmd->start_src == TRIG_INT) {
- async->inttrig = &ni_tio_input_inttrig;
- } else { /* TRIG_NOW || TRIG_EXT || TRIG_OTHER */
- async->inttrig = NULL;
- mite_dma_arm(counter->mite_chan);
-
- if (cmd->start_src == TRIG_NOW)
- ret = ni_tio_arm(counter, 1, NI_GPCT_ARM_IMMEDIATE);
- else if (cmd->start_src == TRIG_EXT)
- ret = ni_tio_arm(counter, 1, cmd->start_arg);
- }
- return ret;
-}
-
-static int ni_tio_output_cmd(struct comedi_subdevice *s)
-{
- struct ni_gpct *counter = s->private;
-
- dev_err(counter->counter_dev->dev->class_dev,
- "output commands not yet implemented.\n");
- return -ENOTSUPP;
-}
-
-static int ni_tio_cmd_setup(struct comedi_subdevice *s)
-{
- struct comedi_cmd *cmd = &s->async->cmd;
- struct ni_gpct *counter = s->private;
- unsigned cidx = counter->counter_index;
- int set_gate_source = 0;
- unsigned gate_source;
- int retval = 0;
-
- if (cmd->scan_begin_src == TRIG_EXT) {
- set_gate_source = 1;
- gate_source = cmd->scan_begin_arg;
- } else if (cmd->convert_src == TRIG_EXT) {
- set_gate_source = 1;
- gate_source = cmd->convert_arg;
- }
- if (set_gate_source)
- retval = ni_tio_set_gate_src(counter, 0, gate_source);
- if (cmd->flags & CMDF_WAKE_EOS) {
- ni_tio_set_bits(counter, NITIO_INT_ENA_REG(cidx),
- GI_GATE_INTERRUPT_ENABLE(cidx),
- GI_GATE_INTERRUPT_ENABLE(cidx));
- }
- return retval;
-}
-
-int ni_tio_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
-{
- struct ni_gpct *counter = s->private;
- struct comedi_async *async = s->async;
- struct comedi_cmd *cmd = &async->cmd;
- int retval = 0;
- unsigned long flags;
-
- spin_lock_irqsave(&counter->lock, flags);
- if (!counter->mite_chan) {
- dev_err(counter->counter_dev->dev->class_dev,
- "commands only supported with DMA. ");
- dev_err(counter->counter_dev->dev->class_dev,
- "Interrupt-driven commands not yet implemented.\n");
- retval = -EIO;
- } else {
- retval = ni_tio_cmd_setup(s);
- if (retval == 0) {
- if (cmd->flags & CMDF_WRITE)
- retval = ni_tio_output_cmd(s);
- else
- retval = ni_tio_input_cmd(s);
- }
- }
- spin_unlock_irqrestore(&counter->lock, flags);
- return retval;
-}
-EXPORT_SYMBOL_GPL(ni_tio_cmd);
-
-int ni_tio_cmdtest(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_cmd *cmd)
-{
- struct ni_gpct *counter = s->private;
- int err = 0;
- unsigned int sources;
-
- /* Step 1 : check if triggers are trivially valid */
-
- sources = TRIG_NOW | TRIG_INT | TRIG_OTHER;
- if (ni_tio_counting_mode_registers_present(counter->counter_dev))
- sources |= TRIG_EXT;
- err |= comedi_check_trigger_src(&cmd->start_src, sources);
-
- err |= comedi_check_trigger_src(&cmd->scan_begin_src,
- TRIG_FOLLOW | TRIG_EXT | TRIG_OTHER);
- err |= comedi_check_trigger_src(&cmd->convert_src,
- TRIG_NOW | TRIG_EXT | TRIG_OTHER);
- err |= comedi_check_trigger_src(&cmd->scan_end_src, TRIG_COUNT);
- err |= comedi_check_trigger_src(&cmd->stop_src, TRIG_NONE);
-
- if (err)
- return 1;
-
- /* Step 2a : make sure trigger sources are unique */
-
- err |= comedi_check_trigger_is_unique(cmd->start_src);
- err |= comedi_check_trigger_is_unique(cmd->scan_begin_src);
- err |= comedi_check_trigger_is_unique(cmd->convert_src);
-
- /* Step 2b : and mutually compatible */
-
- if (cmd->convert_src != TRIG_NOW && cmd->scan_begin_src != TRIG_FOLLOW)
- err |= -EINVAL;
-
- if (err)
- return 2;
-
- /* Step 3: check if arguments are trivially valid */
-
- switch (cmd->start_src) {
- case TRIG_NOW:
- case TRIG_INT:
- case TRIG_OTHER:
- err |= comedi_check_trigger_arg_is(&cmd->start_arg, 0);
- break;
- case TRIG_EXT:
- /* start_arg is the start_trigger passed to ni_tio_arm() */
- break;
- }
-
- if (cmd->scan_begin_src != TRIG_EXT)
- err |= comedi_check_trigger_arg_is(&cmd->scan_begin_arg, 0);
-
- if (cmd->convert_src != TRIG_EXT)
- err |= comedi_check_trigger_arg_is(&cmd->convert_arg, 0);
-
- err |= comedi_check_trigger_arg_is(&cmd->scan_end_arg,
- cmd->chanlist_len);
- err |= comedi_check_trigger_arg_is(&cmd->stop_arg, 0);
-
- if (err)
- return 3;
-
- /* Step 4: fix up any arguments */
-
- /* Step 5: check channel list if it exists */
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(ni_tio_cmdtest);
-
-int ni_tio_cancel(struct ni_gpct *counter)
-{
- unsigned cidx = counter->counter_index;
- unsigned long flags;
-
- ni_tio_arm(counter, 0, 0);
- spin_lock_irqsave(&counter->lock, flags);
- if (counter->mite_chan)
- mite_dma_disarm(counter->mite_chan);
- spin_unlock_irqrestore(&counter->lock, flags);
- ni_tio_configure_dma(counter, false, false);
-
- ni_tio_set_bits(counter, NITIO_INT_ENA_REG(cidx),
- GI_GATE_INTERRUPT_ENABLE(cidx), 0x0);
- return 0;
-}
-EXPORT_SYMBOL_GPL(ni_tio_cancel);
-
- /* During buffered input counter operation for e-series, the gate
- interrupt is acked automatically by the dma controller, due to the
- Gi_Read/Write_Acknowledges_IRQ bits in the input select register. */
-static int should_ack_gate(struct ni_gpct *counter)
-{
- unsigned long flags;
- int retval = 0;
-
- switch (counter->counter_dev->variant) {
- case ni_gpct_variant_m_series:
- /* not sure if 660x really supports gate
- interrupts (the bits are not listed
- in register-level manual) */
- case ni_gpct_variant_660x:
- return 1;
- case ni_gpct_variant_e_series:
- spin_lock_irqsave(&counter->lock, flags);
- {
- if (!counter->mite_chan ||
- counter->mite_chan->dir != COMEDI_INPUT ||
- (mite_done(counter->mite_chan))) {
- retval = 1;
- }
- }
- spin_unlock_irqrestore(&counter->lock, flags);
- break;
- }
- return retval;
-}
-
-static void ni_tio_acknowledge_and_confirm(struct ni_gpct *counter,
- int *gate_error,
- int *tc_error,
- int *perm_stale_data,
- int *stale_data)
-{
- unsigned cidx = counter->counter_index;
- const unsigned short gxx_status = read_register(counter,
- NITIO_SHARED_STATUS_REG(cidx));
- const unsigned short gi_status = read_register(counter,
- NITIO_STATUS_REG(cidx));
- unsigned ack = 0;
-
- if (gate_error)
- *gate_error = 0;
- if (tc_error)
- *tc_error = 0;
- if (perm_stale_data)
- *perm_stale_data = 0;
- if (stale_data)
- *stale_data = 0;
-
- if (gxx_status & GI_GATE_ERROR(cidx)) {
- ack |= GI_GATE_ERROR_CONFIRM(cidx);
- if (gate_error) {
- /*660x don't support automatic acknowledgment
- of gate interrupt via dma read/write
- and report bogus gate errors */
- if (counter->counter_dev->variant !=
- ni_gpct_variant_660x)
- *gate_error = 1;
- }
- }
- if (gxx_status & GI_TC_ERROR(cidx)) {
- ack |= GI_TC_ERROR_CONFIRM(cidx);
- if (tc_error)
- *tc_error = 1;
- }
- if (gi_status & GI_TC)
- ack |= GI_TC_INTERRUPT_ACK;
- if (gi_status & GI_GATE_INTERRUPT) {
- if (should_ack_gate(counter))
- ack |= GI_GATE_INTERRUPT_ACK;
- }
- if (ack)
- write_register(counter, ack, NITIO_INT_ACK_REG(cidx));
- if (ni_tio_get_soft_copy(counter, NITIO_MODE_REG(cidx)) &
- GI_LOADING_ON_GATE) {
- if (gxx_status & GI_STALE_DATA(cidx)) {
- if (stale_data)
- *stale_data = 1;
- }
- if (read_register(counter, NITIO_STATUS2_REG(cidx)) &
- GI_PERMANENT_STALE(cidx)) {
- dev_info(counter->counter_dev->dev->class_dev,
- "%s: Gi_Permanent_Stale_Data detected.\n",
- __func__);
- if (perm_stale_data)
- *perm_stale_data = 1;
- }
- }
-}
-
-void ni_tio_acknowledge(struct ni_gpct *counter)
-{
- ni_tio_acknowledge_and_confirm(counter, NULL, NULL, NULL, NULL);
-}
-EXPORT_SYMBOL_GPL(ni_tio_acknowledge);
-
-void ni_tio_handle_interrupt(struct ni_gpct *counter,
- struct comedi_subdevice *s)
-{
- unsigned cidx = counter->counter_index;
- unsigned gpct_mite_status;
- unsigned long flags;
- int gate_error;
- int tc_error;
- int perm_stale_data;
-
- ni_tio_acknowledge_and_confirm(counter, &gate_error, &tc_error,
- &perm_stale_data, NULL);
- if (gate_error) {
- dev_notice(counter->counter_dev->dev->class_dev,
- "%s: Gi_Gate_Error detected.\n", __func__);
- s->async->events |= COMEDI_CB_OVERFLOW;
- }
- if (perm_stale_data)
- s->async->events |= COMEDI_CB_ERROR;
- switch (counter->counter_dev->variant) {
- case ni_gpct_variant_m_series:
- case ni_gpct_variant_660x:
- if (read_register(counter, NITIO_DMA_STATUS_REG(cidx)) &
- GI_DRQ_ERROR) {
- dev_notice(counter->counter_dev->dev->class_dev,
- "%s: Gi_DRQ_Error detected.\n", __func__);
- s->async->events |= COMEDI_CB_OVERFLOW;
- }
- break;
- case ni_gpct_variant_e_series:
- break;
- }
- spin_lock_irqsave(&counter->lock, flags);
- if (!counter->mite_chan) {
- spin_unlock_irqrestore(&counter->lock, flags);
- return;
- }
- gpct_mite_status = mite_get_status(counter->mite_chan);
- if (gpct_mite_status & CHSR_LINKC)
- writel(CHOR_CLRLC,
- counter->mite_chan->mite->mite_io_addr +
- MITE_CHOR(counter->mite_chan->channel));
- mite_sync_input_dma(counter->mite_chan, s);
- spin_unlock_irqrestore(&counter->lock, flags);
-}
-EXPORT_SYMBOL_GPL(ni_tio_handle_interrupt);
-
-void ni_tio_set_mite_channel(struct ni_gpct *counter,
- struct mite_channel *mite_chan)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&counter->lock, flags);
- counter->mite_chan = mite_chan;
- spin_unlock_irqrestore(&counter->lock, flags);
-}
-EXPORT_SYMBOL_GPL(ni_tio_set_mite_channel);
-
-static int __init ni_tiocmd_init_module(void)
-{
- return 0;
-}
-module_init(ni_tiocmd_init_module);
-
-static void __exit ni_tiocmd_cleanup_module(void)
-{
-}
-module_exit(ni_tiocmd_cleanup_module);
-
-MODULE_AUTHOR("Comedi <comedi@comedi.org>");
-MODULE_DESCRIPTION("Comedi command support for NI general-purpose counters");
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/ni_usb6501.c b/drivers/staging/comedi/drivers/ni_usb6501.c
deleted file mode 100644
index 88de8da3eff3..000000000000
--- a/drivers/staging/comedi/drivers/ni_usb6501.c
+++ /dev/null
@@ -1,615 +0,0 @@
-/*
- * comedi/drivers/ni_usb6501.c
- * Comedi driver for National Instruments USB-6501
- *
- * COMEDI - Linux Control and Measurement Device Interface
- * Copyright (C) 2014 Luca Ellero <luca.ellero@brickedbrain.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-/*
- * Driver: ni_usb6501
- * Description: National Instruments USB-6501 module
- * Devices: [National Instruments] USB-6501 (ni_usb6501)
- * Author: Luca Ellero <luca.ellero@brickedbrain.com>
- * Updated: 8 Sep 2014
- * Status: works
- *
- *
- * Configuration Options:
- * none
- */
-
-/*
- * NI-6501 - USB PROTOCOL DESCRIPTION
- *
- * Every command is composed by two USB packets:
- * - request (out)
- * - response (in)
- *
- * Every packet is at least 12 bytes long, here is the meaning of
- * every field (all values are hex):
- *
- * byte 0 is always 00
- * byte 1 is always 01
- * byte 2 is always 00
- * byte 3 is the total packet length
- *
- * byte 4 is always 00
- * byte 5 is is the total packet length - 4
- * byte 6 is always 01
- * byte 7 is the command
- *
- * byte 8 is 02 (request) or 00 (response)
- * byte 9 is 00 (response) or 10 (port request) or 20 (counter request)
- * byte 10 is always 00
- * byte 11 is 00 (request) or 02 (response)
- *
- * PORT PACKETS
- *
- * CMD: 0xE READ_PORT
- * REQ: 00 01 00 10 00 0C 01 0E 02 10 00 00 00 03 <PORT> 00
- * RES: 00 01 00 10 00 0C 01 00 00 00 00 02 00 03 <BMAP> 00
- *
- * CMD: 0xF WRITE_PORT
- * REQ: 00 01 00 14 00 10 01 0F 02 10 00 00 00 03 <PORT> 00 03 <BMAP> 00 00
- * RES: 00 01 00 0C 00 08 01 00 00 00 00 02
- *
- * CMD: 0x12 SET_PORT_DIR (0 = input, 1 = output)
- * REQ: 00 01 00 18 00 14 01 12 02 10 00 00
- * 00 05 <PORT 0> <PORT 1> <PORT 2> 00 05 00 00 00 00 00
- * RES: 00 01 00 0C 00 08 01 00 00 00 00 02
- *
- * COUNTER PACKETS
- *
- * CMD 0x9: START_COUNTER
- * REQ: 00 01 00 0C 00 08 01 09 02 20 00 00
- * RES: 00 01 00 0C 00 08 01 00 00 00 00 02
- *
- * CMD 0xC: STOP_COUNTER
- * REQ: 00 01 00 0C 00 08 01 0C 02 20 00 00
- * RES: 00 01 00 0C 00 08 01 00 00 00 00 02
- *
- * CMD 0xE: READ_COUNTER
- * REQ: 00 01 00 0C 00 08 01 0E 02 20 00 00
- * RES: 00 01 00 10 00 0C 01 00 00 00 00 02 <u32 counter value, Big Endian>
- *
- * CMD 0xF: WRITE_COUNTER
- * REQ: 00 01 00 10 00 0C 01 0F 02 20 00 00 <u32 counter value, Big Endian>
- * RES: 00 01 00 0C 00 08 01 00 00 00 00 02
- *
- *
- * Please visit http://www.brickedbrain.com if you need
- * additional information or have any questions.
- *
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/slab.h>
-
-#include "../comedi_usb.h"
-
-#define NI6501_TIMEOUT 1000
-
-/* Port request packets */
-static const u8 READ_PORT_REQUEST[] = {0x00, 0x01, 0x00, 0x10,
- 0x00, 0x0C, 0x01, 0x0E,
- 0x02, 0x10, 0x00, 0x00,
- 0x00, 0x03, 0x00, 0x00};
-
-static const u8 WRITE_PORT_REQUEST[] = {0x00, 0x01, 0x00, 0x14,
- 0x00, 0x10, 0x01, 0x0F,
- 0x02, 0x10, 0x00, 0x00,
- 0x00, 0x03, 0x00, 0x00,
- 0x03, 0x00, 0x00, 0x00};
-
-static const u8 SET_PORT_DIR_REQUEST[] = {0x00, 0x01, 0x00, 0x18,
- 0x00, 0x14, 0x01, 0x12,
- 0x02, 0x10, 0x00, 0x00,
- 0x00, 0x05, 0x00, 0x00,
- 0x00, 0x00, 0x05, 0x00,
- 0x00, 0x00, 0x00, 0x00};
-
-/* Counter request packets */
-static const u8 START_COUNTER_REQUEST[] = {0x00, 0x01, 0x00, 0x0C,
- 0x00, 0x08, 0x01, 0x09,
- 0x02, 0x20, 0x00, 0x00};
-
-static const u8 STOP_COUNTER_REQUEST[] = {0x00, 0x01, 0x00, 0x0C,
- 0x00, 0x08, 0x01, 0x0C,
- 0x02, 0x20, 0x00, 0x00};
-
-static const u8 READ_COUNTER_REQUEST[] = {0x00, 0x01, 0x00, 0x0C,
- 0x00, 0x08, 0x01, 0x0E,
- 0x02, 0x20, 0x00, 0x00};
-
-static const u8 WRITE_COUNTER_REQUEST[] = {0x00, 0x01, 0x00, 0x10,
- 0x00, 0x0C, 0x01, 0x0F,
- 0x02, 0x20, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00};
-
-/* Response packets */
-static const u8 GENERIC_RESPONSE[] = {0x00, 0x01, 0x00, 0x0C,
- 0x00, 0x08, 0x01, 0x00,
- 0x00, 0x00, 0x00, 0x02};
-
-static const u8 READ_PORT_RESPONSE[] = {0x00, 0x01, 0x00, 0x10,
- 0x00, 0x0C, 0x01, 0x00,
- 0x00, 0x00, 0x00, 0x02,
- 0x00, 0x03, 0x00, 0x00};
-
-static const u8 READ_COUNTER_RESPONSE[] = {0x00, 0x01, 0x00, 0x10,
- 0x00, 0x0C, 0x01, 0x00,
- 0x00, 0x00, 0x00, 0x02,
- 0x00, 0x00, 0x00, 0x00};
-
-enum commands {
- READ_PORT,
- WRITE_PORT,
- SET_PORT_DIR,
- START_COUNTER,
- STOP_COUNTER,
- READ_COUNTER,
- WRITE_COUNTER
-};
-
-struct ni6501_private {
- struct usb_endpoint_descriptor *ep_rx;
- struct usb_endpoint_descriptor *ep_tx;
- struct semaphore sem;
- u8 *usb_rx_buf;
- u8 *usb_tx_buf;
-};
-
-static int ni6501_port_command(struct comedi_device *dev, int command,
- unsigned int val, u8 *bitmap)
-{
- struct usb_device *usb = comedi_to_usb_dev(dev);
- struct ni6501_private *devpriv = dev->private;
- int request_size, response_size;
- u8 *tx = devpriv->usb_tx_buf;
- int ret;
-
- if (command != SET_PORT_DIR && !bitmap)
- return -EINVAL;
-
- down(&devpriv->sem);
-
- switch (command) {
- case READ_PORT:
- request_size = sizeof(READ_PORT_REQUEST);
- response_size = sizeof(READ_PORT_RESPONSE);
- memcpy(tx, READ_PORT_REQUEST, request_size);
- tx[14] = val & 0xff;
- break;
- case WRITE_PORT:
- request_size = sizeof(WRITE_PORT_REQUEST);
- response_size = sizeof(GENERIC_RESPONSE);
- memcpy(tx, WRITE_PORT_REQUEST, request_size);
- tx[14] = val & 0xff;
- tx[17] = *bitmap;
- break;
- case SET_PORT_DIR:
- request_size = sizeof(SET_PORT_DIR_REQUEST);
- response_size = sizeof(GENERIC_RESPONSE);
- memcpy(tx, SET_PORT_DIR_REQUEST, request_size);
- tx[14] = val & 0xff;
- tx[15] = (val >> 8) & 0xff;
- tx[16] = (val >> 16) & 0xff;
- break;
- default:
- ret = -EINVAL;
- goto end;
- }
-
- ret = usb_bulk_msg(usb,
- usb_sndbulkpipe(usb,
- devpriv->ep_tx->bEndpointAddress),
- devpriv->usb_tx_buf,
- request_size,
- NULL,
- NI6501_TIMEOUT);
- if (ret)
- goto end;
-
- ret = usb_bulk_msg(usb,
- usb_rcvbulkpipe(usb,
- devpriv->ep_rx->bEndpointAddress),
- devpriv->usb_rx_buf,
- response_size,
- NULL,
- NI6501_TIMEOUT);
- if (ret)
- goto end;
-
- /* Check if results are valid */
-
- if (command == READ_PORT) {
- *bitmap = devpriv->usb_rx_buf[14];
- /* mask bitmap for comparing */
- devpriv->usb_rx_buf[14] = 0x00;
-
- if (memcmp(devpriv->usb_rx_buf, READ_PORT_RESPONSE,
- sizeof(READ_PORT_RESPONSE))) {
- ret = -EINVAL;
- }
- } else if (memcmp(devpriv->usb_rx_buf, GENERIC_RESPONSE,
- sizeof(GENERIC_RESPONSE))) {
- ret = -EINVAL;
- }
-end:
- up(&devpriv->sem);
-
- return ret;
-}
-
-static int ni6501_counter_command(struct comedi_device *dev, int command,
- u32 *val)
-{
- struct usb_device *usb = comedi_to_usb_dev(dev);
- struct ni6501_private *devpriv = dev->private;
- int request_size, response_size;
- u8 *tx = devpriv->usb_tx_buf;
- int ret;
-
- if ((command == READ_COUNTER || command == WRITE_COUNTER) && !val)
- return -EINVAL;
-
- down(&devpriv->sem);
-
- switch (command) {
- case START_COUNTER:
- request_size = sizeof(START_COUNTER_REQUEST);
- response_size = sizeof(GENERIC_RESPONSE);
- memcpy(tx, START_COUNTER_REQUEST, request_size);
- break;
- case STOP_COUNTER:
- request_size = sizeof(STOP_COUNTER_REQUEST);
- response_size = sizeof(GENERIC_RESPONSE);
- memcpy(tx, STOP_COUNTER_REQUEST, request_size);
- break;
- case READ_COUNTER:
- request_size = sizeof(READ_COUNTER_REQUEST);
- response_size = sizeof(READ_COUNTER_RESPONSE);
- memcpy(tx, READ_COUNTER_REQUEST, request_size);
- break;
- case WRITE_COUNTER:
- request_size = sizeof(WRITE_COUNTER_REQUEST);
- response_size = sizeof(GENERIC_RESPONSE);
- memcpy(tx, WRITE_COUNTER_REQUEST, request_size);
- /* Setup tx packet: bytes 12,13,14,15 hold the */
- /* u32 counter value (Big Endian) */
- *((__be32 *)&tx[12]) = cpu_to_be32(*val);
- break;
- default:
- ret = -EINVAL;
- goto end;
- }
-
- ret = usb_bulk_msg(usb,
- usb_sndbulkpipe(usb,
- devpriv->ep_tx->bEndpointAddress),
- devpriv->usb_tx_buf,
- request_size,
- NULL,
- NI6501_TIMEOUT);
- if (ret)
- goto end;
-
- ret = usb_bulk_msg(usb,
- usb_rcvbulkpipe(usb,
- devpriv->ep_rx->bEndpointAddress),
- devpriv->usb_rx_buf,
- response_size,
- NULL,
- NI6501_TIMEOUT);
- if (ret)
- goto end;
-
- /* Check if results are valid */
-
- if (command == READ_COUNTER) {
- int i;
-
- /* Read counter value: bytes 12,13,14,15 of rx packet */
- /* hold the u32 counter value (Big Endian) */
- *val = be32_to_cpu(*((__be32 *)&devpriv->usb_rx_buf[12]));
-
- /* mask counter value for comparing */
- for (i = 12; i < sizeof(READ_COUNTER_RESPONSE); ++i)
- devpriv->usb_rx_buf[i] = 0x00;
-
- if (memcmp(devpriv->usb_rx_buf, READ_COUNTER_RESPONSE,
- sizeof(READ_COUNTER_RESPONSE))) {
- ret = -EINVAL;
- }
- } else if (memcmp(devpriv->usb_rx_buf, GENERIC_RESPONSE,
- sizeof(GENERIC_RESPONSE))) {
- ret = -EINVAL;
- }
-end:
- up(&devpriv->sem);
-
- return ret;
-}
-
-static int ni6501_dio_insn_config(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- int ret;
-
- ret = comedi_dio_insn_config(dev, s, insn, data, 0);
- if (ret)
- return ret;
-
- ret = ni6501_port_command(dev, SET_PORT_DIR, s->io_bits, NULL);
- if (ret)
- return ret;
-
- return insn->n;
-}
-
-static int ni6501_dio_insn_bits(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- unsigned int mask;
- int ret;
- u8 port;
- u8 bitmap;
-
- mask = comedi_dio_update_state(s, data);
-
- for (port = 0; port < 3; port++) {
- if (mask & (0xFF << port * 8)) {
- bitmap = (s->state >> port * 8) & 0xFF;
- ret = ni6501_port_command(dev, WRITE_PORT,
- port, &bitmap);
- if (ret)
- return ret;
- }
- }
-
- data[1] = 0;
-
- for (port = 0; port < 3; port++) {
- ret = ni6501_port_command(dev, READ_PORT, port, &bitmap);
- if (ret)
- return ret;
- data[1] |= bitmap << port * 8;
- }
-
- return insn->n;
-}
-
-static int ni6501_cnt_insn_config(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- int ret;
- u32 val = 0;
-
- switch (data[0]) {
- case INSN_CONFIG_ARM:
- ret = ni6501_counter_command(dev, START_COUNTER, NULL);
- break;
- case INSN_CONFIG_DISARM:
- ret = ni6501_counter_command(dev, STOP_COUNTER, NULL);
- break;
- case INSN_CONFIG_RESET:
- ret = ni6501_counter_command(dev, STOP_COUNTER, NULL);
- if (ret)
- break;
- ret = ni6501_counter_command(dev, WRITE_COUNTER, &val);
- break;
- default:
- return -EINVAL;
- }
-
- return ret ? ret : insn->n;
-}
-
-static int ni6501_cnt_insn_read(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- int ret;
- u32 val;
- unsigned int i;
-
- for (i = 0; i < insn->n; i++) {
- ret = ni6501_counter_command(dev, READ_COUNTER, &val);
- if (ret)
- return ret;
- data[i] = val;
- }
-
- return insn->n;
-}
-
-static int ni6501_cnt_insn_write(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- int ret;
-
- if (insn->n) {
- u32 val = data[insn->n - 1];
-
- ret = ni6501_counter_command(dev, WRITE_COUNTER, &val);
- if (ret)
- return ret;
- }
-
- return insn->n;
-}
-
-static int ni6501_alloc_usb_buffers(struct comedi_device *dev)
-{
- struct ni6501_private *devpriv = dev->private;
- size_t size;
-
- size = le16_to_cpu(devpriv->ep_rx->wMaxPacketSize);
- devpriv->usb_rx_buf = kzalloc(size, GFP_KERNEL);
- if (!devpriv->usb_rx_buf)
- return -ENOMEM;
-
- size = le16_to_cpu(devpriv->ep_tx->wMaxPacketSize);
- devpriv->usb_tx_buf = kzalloc(size, GFP_KERNEL);
- if (!devpriv->usb_tx_buf) {
- kfree(devpriv->usb_rx_buf);
- return -ENOMEM;
- }
-
- return 0;
-}
-
-static int ni6501_find_endpoints(struct comedi_device *dev)
-{
- struct usb_interface *intf = comedi_to_usb_interface(dev);
- struct ni6501_private *devpriv = dev->private;
- struct usb_host_interface *iface_desc = intf->cur_altsetting;
- struct usb_endpoint_descriptor *ep_desc;
- int i;
-
- if (iface_desc->desc.bNumEndpoints != 2) {
- dev_err(dev->class_dev, "Wrong number of endpoints\n");
- return -ENODEV;
- }
-
- for (i = 0; i < iface_desc->desc.bNumEndpoints; i++) {
- ep_desc = &iface_desc->endpoint[i].desc;
-
- if (usb_endpoint_is_bulk_in(ep_desc)) {
- if (!devpriv->ep_rx)
- devpriv->ep_rx = ep_desc;
- continue;
- }
-
- if (usb_endpoint_is_bulk_out(ep_desc)) {
- if (!devpriv->ep_tx)
- devpriv->ep_tx = ep_desc;
- continue;
- }
- }
-
- if (!devpriv->ep_rx || !devpriv->ep_tx)
- return -ENODEV;
-
- return 0;
-}
-
-static int ni6501_auto_attach(struct comedi_device *dev,
- unsigned long context)
-{
- struct usb_interface *intf = comedi_to_usb_interface(dev);
- struct ni6501_private *devpriv;
- struct comedi_subdevice *s;
- int ret;
-
- devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
- if (!devpriv)
- return -ENOMEM;
-
- ret = ni6501_find_endpoints(dev);
- if (ret)
- return ret;
-
- ret = ni6501_alloc_usb_buffers(dev);
- if (ret)
- return ret;
-
- sema_init(&devpriv->sem, 1);
- usb_set_intfdata(intf, devpriv);
-
- ret = comedi_alloc_subdevices(dev, 2);
- if (ret)
- return ret;
-
- /* Digital Input/Output subdevice */
- s = &dev->subdevices[0];
- s->type = COMEDI_SUBD_DIO;
- s->subdev_flags = SDF_READABLE | SDF_WRITABLE;
- s->n_chan = 24;
- s->maxdata = 1;
- s->range_table = &range_digital;
- s->insn_bits = ni6501_dio_insn_bits;
- s->insn_config = ni6501_dio_insn_config;
-
- /* Counter subdevice */
- s = &dev->subdevices[1];
- s->type = COMEDI_SUBD_COUNTER;
- s->subdev_flags = SDF_READABLE | SDF_WRITABLE | SDF_LSAMPL;
- s->n_chan = 1;
- s->maxdata = 0xffffffff;
- s->insn_read = ni6501_cnt_insn_read;
- s->insn_write = ni6501_cnt_insn_write;
- s->insn_config = ni6501_cnt_insn_config;
-
- return 0;
-}
-
-static void ni6501_detach(struct comedi_device *dev)
-{
- struct usb_interface *intf = comedi_to_usb_interface(dev);
- struct ni6501_private *devpriv = dev->private;
-
- if (!devpriv)
- return;
-
- down(&devpriv->sem);
-
- usb_set_intfdata(intf, NULL);
-
- kfree(devpriv->usb_rx_buf);
- kfree(devpriv->usb_tx_buf);
-
- up(&devpriv->sem);
-}
-
-static struct comedi_driver ni6501_driver = {
- .module = THIS_MODULE,
- .driver_name = "ni6501",
- .auto_attach = ni6501_auto_attach,
- .detach = ni6501_detach,
-};
-
-static int ni6501_usb_probe(struct usb_interface *intf,
- const struct usb_device_id *id)
-{
- return comedi_usb_auto_config(intf, &ni6501_driver, id->driver_info);
-}
-
-static const struct usb_device_id ni6501_usb_table[] = {
- { USB_DEVICE(0x3923, 0x718a) },
- { }
-};
-MODULE_DEVICE_TABLE(usb, ni6501_usb_table);
-
-static struct usb_driver ni6501_usb_driver = {
- .name = "ni6501",
- .id_table = ni6501_usb_table,
- .probe = ni6501_usb_probe,
- .disconnect = comedi_usb_auto_unconfig,
-};
-module_comedi_usb_driver(ni6501_driver, ni6501_usb_driver);
-
-MODULE_AUTHOR("Luca Ellero");
-MODULE_DESCRIPTION("Comedi driver for National Instruments USB-6501");
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/pcl711.c b/drivers/staging/comedi/drivers/pcl711.c
deleted file mode 100644
index cfc3a627d0ca..000000000000
--- a/drivers/staging/comedi/drivers/pcl711.c
+++ /dev/null
@@ -1,521 +0,0 @@
-/*
- * pcl711.c
- * Comedi driver for PC-LabCard PCL-711 and AdSys ACL-8112 and compatibles
- * Copyright (C) 1998 David A. Schleef <ds@schleef.org>
- * Janne Jalkanen <jalkanen@cs.hut.fi>
- * Eric Bunn <ebu@cs.hut.fi>
- *
- * COMEDI - Linux Control and Measurement Device Interface
- * Copyright (C) 1998 David A. Schleef <ds@schleef.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-/*
- * Driver: pcl711
- * Description: Advantech PCL-711 and 711b, ADLink ACL-8112
- * Devices: [Advantech] PCL-711 (pcl711), PCL-711B (pcl711b),
- * [ADLink] ACL-8112HG (acl8112hg), ACL-8112DG (acl8112dg)
- * Author: David A. Schleef <ds@schleef.org>
- * Janne Jalkanen <jalkanen@cs.hut.fi>
- * Eric Bunn <ebu@cs.hut.fi>
- * Updated:
- * Status: mostly complete
- *
- * Configuration Options:
- * [0] - I/O port base
- * [1] - IRQ, optional
- */
-
-#include <linux/module.h>
-#include <linux/delay.h>
-#include <linux/interrupt.h>
-
-#include "../comedidev.h"
-
-#include "comedi_8254.h"
-
-/*
- * I/O port register map
- */
-#define PCL711_TIMER_BASE 0x00
-#define PCL711_AI_LSB_REG 0x04
-#define PCL711_AI_MSB_REG 0x05
-#define PCL711_AI_MSB_DRDY (1 << 4)
-#define PCL711_AO_LSB_REG(x) (0x04 + ((x) * 2))
-#define PCL711_AO_MSB_REG(x) (0x05 + ((x) * 2))
-#define PCL711_DI_LSB_REG 0x06
-#define PCL711_DI_MSB_REG 0x07
-#define PCL711_INT_STAT_REG 0x08
-#define PCL711_INT_STAT_CLR (0 << 0) /* any value will work */
-#define PCL711_AI_GAIN_REG 0x09
-#define PCL711_AI_GAIN(x) (((x) & 0xf) << 0)
-#define PCL711_MUX_REG 0x0a
-#define PCL711_MUX_CHAN(x) (((x) & 0xf) << 0)
-#define PCL711_MUX_CS0 (1 << 4)
-#define PCL711_MUX_CS1 (1 << 5)
-#define PCL711_MUX_DIFF (PCL711_MUX_CS0 | PCL711_MUX_CS1)
-#define PCL711_MODE_REG 0x0b
-#define PCL711_MODE_DEFAULT (0 << 0)
-#define PCL711_MODE_SOFTTRIG (1 << 0)
-#define PCL711_MODE_EXT (2 << 0)
-#define PCL711_MODE_EXT_IRQ (3 << 0)
-#define PCL711_MODE_PACER (4 << 0)
-#define PCL711_MODE_PACER_IRQ (6 << 0)
-#define PCL711_MODE_IRQ(x) (((x) & 0x7) << 4)
-#define PCL711_SOFTTRIG_REG 0x0c
-#define PCL711_SOFTTRIG (0 << 0) /* any value will work */
-#define PCL711_DO_LSB_REG 0x0d
-#define PCL711_DO_MSB_REG 0x0e
-
-static const struct comedi_lrange range_pcl711b_ai = {
- 5, {
- BIP_RANGE(5),
- BIP_RANGE(2.5),
- BIP_RANGE(1.25),
- BIP_RANGE(0.625),
- BIP_RANGE(0.3125)
- }
-};
-
-static const struct comedi_lrange range_acl8112hg_ai = {
- 12, {
- BIP_RANGE(5),
- BIP_RANGE(0.5),
- BIP_RANGE(0.05),
- BIP_RANGE(0.005),
- UNI_RANGE(10),
- UNI_RANGE(1),
- UNI_RANGE(0.1),
- UNI_RANGE(0.01),
- BIP_RANGE(10),
- BIP_RANGE(1),
- BIP_RANGE(0.1),
- BIP_RANGE(0.01)
- }
-};
-
-static const struct comedi_lrange range_acl8112dg_ai = {
- 9, {
- BIP_RANGE(5),
- BIP_RANGE(2.5),
- BIP_RANGE(1.25),
- BIP_RANGE(0.625),
- UNI_RANGE(10),
- UNI_RANGE(5),
- UNI_RANGE(2.5),
- UNI_RANGE(1.25),
- BIP_RANGE(10)
- }
-};
-
-struct pcl711_board {
- const char *name;
- int n_aichan;
- int n_aochan;
- int maxirq;
- const struct comedi_lrange *ai_range_type;
-};
-
-static const struct pcl711_board boardtypes[] = {
- {
- .name = "pcl711",
- .n_aichan = 8,
- .n_aochan = 1,
- .ai_range_type = &range_bipolar5,
- }, {
- .name = "pcl711b",
- .n_aichan = 8,
- .n_aochan = 1,
- .maxirq = 7,
- .ai_range_type = &range_pcl711b_ai,
- }, {
- .name = "acl8112hg",
- .n_aichan = 16,
- .n_aochan = 2,
- .maxirq = 15,
- .ai_range_type = &range_acl8112hg_ai,
- }, {
- .name = "acl8112dg",
- .n_aichan = 16,
- .n_aochan = 2,
- .maxirq = 15,
- .ai_range_type = &range_acl8112dg_ai,
- },
-};
-
-static void pcl711_ai_set_mode(struct comedi_device *dev, unsigned int mode)
-{
- /*
- * The pcl711b board uses bits in the mode register to select the
- * interrupt. The other boards supported by this driver all use
- * jumpers on the board.
- *
- * Enables the interrupt when needed on the pcl711b board. These
- * bits do nothing on the other boards.
- */
- if (mode == PCL711_MODE_EXT_IRQ || mode == PCL711_MODE_PACER_IRQ)
- mode |= PCL711_MODE_IRQ(dev->irq);
-
- outb(mode, dev->iobase + PCL711_MODE_REG);
-}
-
-static unsigned int pcl711_ai_get_sample(struct comedi_device *dev,
- struct comedi_subdevice *s)
-{
- unsigned int val;
-
- val = inb(dev->iobase + PCL711_AI_MSB_REG) << 8;
- val |= inb(dev->iobase + PCL711_AI_LSB_REG);
-
- return val & s->maxdata;
-}
-
-static int pcl711_ai_cancel(struct comedi_device *dev,
- struct comedi_subdevice *s)
-{
- outb(PCL711_INT_STAT_CLR, dev->iobase + PCL711_INT_STAT_REG);
- pcl711_ai_set_mode(dev, PCL711_MODE_SOFTTRIG);
- return 0;
-}
-
-static irqreturn_t pcl711_interrupt(int irq, void *d)
-{
- struct comedi_device *dev = d;
- struct comedi_subdevice *s = dev->read_subdev;
- struct comedi_cmd *cmd = &s->async->cmd;
- unsigned int data;
-
- if (!dev->attached) {
- dev_err(dev->class_dev, "spurious interrupt\n");
- return IRQ_HANDLED;
- }
-
- data = pcl711_ai_get_sample(dev, s);
-
- outb(PCL711_INT_STAT_CLR, dev->iobase + PCL711_INT_STAT_REG);
-
- comedi_buf_write_samples(s, &data, 1);
-
- if (cmd->stop_src == TRIG_COUNT &&
- s->async->scans_done >= cmd->stop_arg)
- s->async->events |= COMEDI_CB_EOA;
-
- comedi_handle_events(dev, s);
-
- return IRQ_HANDLED;
-}
-
-static void pcl711_set_changain(struct comedi_device *dev,
- struct comedi_subdevice *s,
- unsigned int chanspec)
-{
- unsigned int chan = CR_CHAN(chanspec);
- unsigned int range = CR_RANGE(chanspec);
- unsigned int aref = CR_AREF(chanspec);
- unsigned int mux = 0;
-
- outb(PCL711_AI_GAIN(range), dev->iobase + PCL711_AI_GAIN_REG);
-
- if (s->n_chan > 8) {
- /* Select the correct MPC508A chip */
- if (aref == AREF_DIFF) {
- chan &= 0x7;
- mux |= PCL711_MUX_DIFF;
- } else {
- if (chan < 8)
- mux |= PCL711_MUX_CS0;
- else
- mux |= PCL711_MUX_CS1;
- }
- }
- outb(mux | PCL711_MUX_CHAN(chan), dev->iobase + PCL711_MUX_REG);
-}
-
-static int pcl711_ai_eoc(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned long context)
-{
- unsigned int status;
-
- status = inb(dev->iobase + PCL711_AI_MSB_REG);
- if ((status & PCL711_AI_MSB_DRDY) == 0)
- return 0;
- return -EBUSY;
-}
-
-static int pcl711_ai_insn_read(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- int ret;
- int i;
-
- pcl711_set_changain(dev, s, insn->chanspec);
-
- pcl711_ai_set_mode(dev, PCL711_MODE_SOFTTRIG);
-
- for (i = 0; i < insn->n; i++) {
- outb(PCL711_SOFTTRIG, dev->iobase + PCL711_SOFTTRIG_REG);
-
- ret = comedi_timeout(dev, s, insn, pcl711_ai_eoc, 0);
- if (ret)
- return ret;
-
- data[i] = pcl711_ai_get_sample(dev, s);
- }
-
- return insn->n;
-}
-
-static int pcl711_ai_cmdtest(struct comedi_device *dev,
- struct comedi_subdevice *s, struct comedi_cmd *cmd)
-{
- int err = 0;
-
- /* Step 1 : check if triggers are trivially valid */
-
- err |= comedi_check_trigger_src(&cmd->start_src, TRIG_NOW);
- err |= comedi_check_trigger_src(&cmd->scan_begin_src,
- TRIG_TIMER | TRIG_EXT);
- err |= comedi_check_trigger_src(&cmd->convert_src, TRIG_NOW);
- err |= comedi_check_trigger_src(&cmd->scan_end_src, TRIG_COUNT);
- err |= comedi_check_trigger_src(&cmd->stop_src, TRIG_COUNT | TRIG_NONE);
-
- if (err)
- return 1;
-
- /* Step 2a : make sure trigger sources are unique */
-
- err |= comedi_check_trigger_is_unique(cmd->scan_begin_src);
- err |= comedi_check_trigger_is_unique(cmd->stop_src);
-
- /* Step 2b : and mutually compatible */
-
- if (err)
- return 2;
-
- /* Step 3: check if arguments are trivially valid */
-
- err |= comedi_check_trigger_arg_is(&cmd->start_arg, 0);
-
- if (cmd->scan_begin_src == TRIG_EXT) {
- err |= comedi_check_trigger_arg_is(&cmd->scan_begin_arg, 0);
- } else {
-#define MAX_SPEED 1000
- err |= comedi_check_trigger_arg_min(&cmd->scan_begin_arg,
- MAX_SPEED);
- }
-
- err |= comedi_check_trigger_arg_is(&cmd->convert_arg, 0);
- err |= comedi_check_trigger_arg_is(&cmd->scan_end_arg,
- cmd->chanlist_len);
-
- if (cmd->stop_src == TRIG_COUNT)
- err |= comedi_check_trigger_arg_min(&cmd->stop_arg, 1);
- else /* TRIG_NONE */
- err |= comedi_check_trigger_arg_is(&cmd->stop_arg, 0);
-
- if (err)
- return 3;
-
- /* step 4 */
-
- if (cmd->scan_begin_src == TRIG_TIMER) {
- unsigned int arg = cmd->scan_begin_arg;
-
- comedi_8254_cascade_ns_to_timer(dev->pacer, &arg, cmd->flags);
- err |= comedi_check_trigger_arg_is(&cmd->scan_begin_arg, arg);
- }
-
- if (err)
- return 4;
-
- return 0;
-}
-
-static int pcl711_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
-{
- struct comedi_cmd *cmd = &s->async->cmd;
-
- pcl711_set_changain(dev, s, cmd->chanlist[0]);
-
- if (cmd->scan_begin_src == TRIG_TIMER) {
- comedi_8254_update_divisors(dev->pacer);
- comedi_8254_pacer_enable(dev->pacer, 1, 2, true);
- outb(PCL711_INT_STAT_CLR, dev->iobase + PCL711_INT_STAT_REG);
- pcl711_ai_set_mode(dev, PCL711_MODE_PACER_IRQ);
- } else {
- pcl711_ai_set_mode(dev, PCL711_MODE_EXT_IRQ);
- }
-
- return 0;
-}
-
-static void pcl711_ao_write(struct comedi_device *dev,
- unsigned int chan, unsigned int val)
-{
- outb(val & 0xff, dev->iobase + PCL711_AO_LSB_REG(chan));
- outb((val >> 8) & 0xff, dev->iobase + PCL711_AO_MSB_REG(chan));
-}
-
-static int pcl711_ao_insn_write(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- unsigned int chan = CR_CHAN(insn->chanspec);
- unsigned int val = s->readback[chan];
- int i;
-
- for (i = 0; i < insn->n; i++) {
- val = data[i];
- pcl711_ao_write(dev, chan, val);
- }
- s->readback[chan] = val;
-
- return insn->n;
-}
-
-static int pcl711_di_insn_bits(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- unsigned int val;
-
- val = inb(dev->iobase + PCL711_DI_LSB_REG);
- val |= (inb(dev->iobase + PCL711_DI_MSB_REG) << 8);
-
- data[1] = val;
-
- return insn->n;
-}
-
-static int pcl711_do_insn_bits(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- unsigned int mask;
-
- mask = comedi_dio_update_state(s, data);
- if (mask) {
- if (mask & 0x00ff)
- outb(s->state & 0xff, dev->iobase + PCL711_DO_LSB_REG);
- if (mask & 0xff00)
- outb((s->state >> 8), dev->iobase + PCL711_DO_MSB_REG);
- }
-
- data[1] = s->state;
-
- return insn->n;
-}
-
-static int pcl711_attach(struct comedi_device *dev, struct comedi_devconfig *it)
-{
- const struct pcl711_board *board = dev->board_ptr;
- struct comedi_subdevice *s;
- int ret;
-
- ret = comedi_request_region(dev, it->options[0], 0x10);
- if (ret)
- return ret;
-
- if (it->options[1] && it->options[1] <= board->maxirq) {
- ret = request_irq(it->options[1], pcl711_interrupt, 0,
- dev->board_name, dev);
- if (ret == 0)
- dev->irq = it->options[1];
- }
-
- dev->pacer = comedi_8254_init(dev->iobase + PCL711_TIMER_BASE,
- I8254_OSC_BASE_2MHZ, I8254_IO8, 0);
- if (!dev->pacer)
- return -ENOMEM;
-
- ret = comedi_alloc_subdevices(dev, 4);
- if (ret)
- return ret;
-
- /* Analog Input subdevice */
- s = &dev->subdevices[0];
- s->type = COMEDI_SUBD_AI;
- s->subdev_flags = SDF_READABLE | SDF_GROUND;
- if (board->n_aichan > 8)
- s->subdev_flags |= SDF_DIFF;
- s->n_chan = board->n_aichan;
- s->maxdata = 0xfff;
- s->range_table = board->ai_range_type;
- s->insn_read = pcl711_ai_insn_read;
- if (dev->irq) {
- dev->read_subdev = s;
- s->subdev_flags |= SDF_CMD_READ;
- s->len_chanlist = 1;
- s->do_cmdtest = pcl711_ai_cmdtest;
- s->do_cmd = pcl711_ai_cmd;
- s->cancel = pcl711_ai_cancel;
- }
-
- /* Analog Output subdevice */
- s = &dev->subdevices[1];
- s->type = COMEDI_SUBD_AO;
- s->subdev_flags = SDF_WRITABLE;
- s->n_chan = board->n_aochan;
- s->maxdata = 0xfff;
- s->range_table = &range_bipolar5;
- s->insn_write = pcl711_ao_insn_write;
-
- ret = comedi_alloc_subdev_readback(s);
- if (ret)
- return ret;
-
- /* Digital Input subdevice */
- s = &dev->subdevices[2];
- s->type = COMEDI_SUBD_DI;
- s->subdev_flags = SDF_READABLE;
- s->n_chan = 16;
- s->maxdata = 1;
- s->range_table = &range_digital;
- s->insn_bits = pcl711_di_insn_bits;
-
- /* Digital Output subdevice */
- s = &dev->subdevices[3];
- s->type = COMEDI_SUBD_DO;
- s->subdev_flags = SDF_WRITABLE;
- s->n_chan = 16;
- s->maxdata = 1;
- s->range_table = &range_digital;
- s->insn_bits = pcl711_do_insn_bits;
-
- /* clear DAC */
- pcl711_ao_write(dev, 0, 0x0);
- pcl711_ao_write(dev, 1, 0x0);
-
- return 0;
-}
-
-static struct comedi_driver pcl711_driver = {
- .driver_name = "pcl711",
- .module = THIS_MODULE,
- .attach = pcl711_attach,
- .detach = comedi_legacy_detach,
- .board_name = &boardtypes[0].name,
- .num_names = ARRAY_SIZE(boardtypes),
- .offset = sizeof(struct pcl711_board),
-};
-module_comedi_driver(pcl711_driver);
-
-MODULE_AUTHOR("Comedi http://www.comedi.org");
-MODULE_DESCRIPTION("Comedi driver for PCL-711 compatible boards");
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/pcl724.c b/drivers/staging/comedi/drivers/pcl724.c
deleted file mode 100644
index 74b07e1744c7..000000000000
--- a/drivers/staging/comedi/drivers/pcl724.c
+++ /dev/null
@@ -1,152 +0,0 @@
-/*
- * pcl724.c
- * Comedi driver for 8255 based ISA and PC/104 DIO boards
- *
- * Michal Dobes <dobes@tesnet.cz>
- */
-
-/*
- * Driver: pcl724
- * Description: Comedi driver for 8255 based ISA DIO boards
- * Devices: [Advantech] PCL-724 (pcl724), PCL-722 (pcl722), PCL-731 (pcl731),
- * [ADLink] ACL-7122 (acl7122), ACL-7124 (acl7124), PET-48DIO (pet48dio),
- * [WinSystems] PCM-IO48 (pcmio48),
- * [Diamond Systems] ONYX-MM-DIO (onyx-mm-dio)
- * Author: Michal Dobes <dobes@tesnet.cz>
- * Status: untested
- *
- * Configuration options:
- * [0] - IO Base
- * [1] - IRQ (not supported)
- * [2] - number of DIO (pcl722 and acl7122 boards)
- * 0, 144: 144 DIO configuration
- * 1, 96: 96 DIO configuration
- */
-
-#include <linux/module.h>
-#include "../comedidev.h"
-
-#include "8255.h"
-
-struct pcl724_board {
- const char *name;
- unsigned int io_range;
- unsigned int can_have96:1;
- unsigned int is_pet48:1;
- int numofports;
-};
-
-static const struct pcl724_board boardtypes[] = {
- {
- .name = "pcl724",
- .io_range = 0x04,
- .numofports = 1, /* 24 DIO channels */
- }, {
- .name = "pcl722",
- .io_range = 0x20,
- .can_have96 = 1,
- .numofports = 6, /* 144 (or 96) DIO channels */
- }, {
- .name = "pcl731",
- .io_range = 0x08,
- .numofports = 2, /* 48 DIO channels */
- }, {
- .name = "acl7122",
- .io_range = 0x20,
- .can_have96 = 1,
- .numofports = 6, /* 144 (or 96) DIO channels */
- }, {
- .name = "acl7124",
- .io_range = 0x04,
- .numofports = 1, /* 24 DIO channels */
- }, {
- .name = "pet48dio",
- .io_range = 0x02,
- .is_pet48 = 1,
- .numofports = 2, /* 48 DIO channels */
- }, {
- .name = "pcmio48",
- .io_range = 0x08,
- .numofports = 2, /* 48 DIO channels */
- }, {
- .name = "onyx-mm-dio",
- .io_range = 0x10,
- .numofports = 2, /* 48 DIO channels */
- },
-};
-
-static int pcl724_8255mapped_io(struct comedi_device *dev,
- int dir, int port, int data,
- unsigned long iobase)
-{
- int movport = I8255_SIZE * (iobase >> 12);
-
- iobase &= 0x0fff;
-
- outb(port + movport, iobase);
- if (dir) {
- outb(data, iobase + 1);
- return 0;
- }
- return inb(iobase + 1);
-}
-
-static int pcl724_attach(struct comedi_device *dev,
- struct comedi_devconfig *it)
-{
- const struct pcl724_board *board = dev->board_ptr;
- struct comedi_subdevice *s;
- unsigned long iobase;
- unsigned int iorange;
- int n_subdevices;
- int ret;
- int i;
-
- iorange = board->io_range;
- n_subdevices = board->numofports;
-
- /* Handle PCL-724 in 96 DIO configuration */
- if (board->can_have96 &&
- (it->options[2] == 1 || it->options[2] == 96)) {
- iorange = 0x10;
- n_subdevices = 4;
- }
-
- ret = comedi_request_region(dev, it->options[0], iorange);
- if (ret)
- return ret;
-
- ret = comedi_alloc_subdevices(dev, n_subdevices);
- if (ret)
- return ret;
-
- for (i = 0; i < dev->n_subdevices; i++) {
- s = &dev->subdevices[i];
- if (board->is_pet48) {
- iobase = dev->iobase + (i * 0x1000);
- ret = subdev_8255_init(dev, s, pcl724_8255mapped_io,
- iobase);
- } else {
- ret = subdev_8255_init(dev, s, NULL, i * I8255_SIZE);
- }
- if (ret)
- return ret;
- }
-
- return 0;
-}
-
-static struct comedi_driver pcl724_driver = {
- .driver_name = "pcl724",
- .module = THIS_MODULE,
- .attach = pcl724_attach,
- .detach = comedi_legacy_detach,
- .board_name = &boardtypes[0].name,
- .num_names = ARRAY_SIZE(boardtypes),
- .offset = sizeof(struct pcl724_board),
-};
-module_comedi_driver(pcl724_driver);
-
-MODULE_AUTHOR("Comedi http://www.comedi.org");
-MODULE_DESCRIPTION("Comedi driver for 8255 based ISA and PC/104 DIO boards");
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/pcl726.c b/drivers/staging/comedi/drivers/pcl726.c
deleted file mode 100644
index 256850ccb6fa..000000000000
--- a/drivers/staging/comedi/drivers/pcl726.c
+++ /dev/null
@@ -1,432 +0,0 @@
-/*
- * pcl726.c
- * Comedi driver for 6/12-Channel D/A Output and DIO cards
- *
- * COMEDI - Linux Control and Measurement Device Interface
- * Copyright (C) 1998 David A. Schleef <ds@schleef.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-/*
- * Driver: pcl726
- * Description: Advantech PCL-726 & compatibles
- * Author: David A. Schleef <ds@schleef.org>
- * Status: untested
- * Devices: [Advantech] PCL-726 (pcl726), PCL-727 (pcl727), PCL-728 (pcl728),
- * [ADLink] ACL-6126 (acl6126), ACL-6128 (acl6128)
- *
- * Configuration Options:
- * [0] - IO Base
- * [1] - IRQ (ACL-6126 only)
- * [2] - D/A output range for channel 0
- * [3] - D/A output range for channel 1
- *
- * Boards with > 2 analog output channels:
- * [4] - D/A output range for channel 2
- * [5] - D/A output range for channel 3
- * [6] - D/A output range for channel 4
- * [7] - D/A output range for channel 5
- *
- * Boards with > 6 analog output channels:
- * [8] - D/A output range for channel 6
- * [9] - D/A output range for channel 7
- * [10] - D/A output range for channel 8
- * [11] - D/A output range for channel 9
- * [12] - D/A output range for channel 10
- * [13] - D/A output range for channel 11
- *
- * For PCL-726 the D/A output ranges are:
- * 0: 0-5V, 1: 0-10V, 2: +/-5V, 3: +/-10V, 4: 4-20mA, 5: unknown
- *
- * For PCL-727:
- * 0: 0-5V, 1: 0-10V, 2: +/-5V, 3: 4-20mA
- *
- * For PCL-728 and ACL-6128:
- * 0: 0-5V, 1: 0-10V, 2: +/-5V, 3: +/-10V, 4: 4-20mA, 5: 0-20mA
- *
- * For ACL-6126:
- * 0: 0-5V, 1: 0-10V, 2: +/-5V, 3: +/-10V, 4: 4-20mA
- */
-
-#include <linux/module.h>
-#include <linux/interrupt.h>
-
-#include "../comedidev.h"
-
-#define PCL726_AO_MSB_REG(x) (0x00 + ((x) * 2))
-#define PCL726_AO_LSB_REG(x) (0x01 + ((x) * 2))
-#define PCL726_DO_MSB_REG 0x0c
-#define PCL726_DO_LSB_REG 0x0d
-#define PCL726_DI_MSB_REG 0x0e
-#define PCL726_DI_LSB_REG 0x0f
-
-#define PCL727_DI_MSB_REG 0x00
-#define PCL727_DI_LSB_REG 0x01
-#define PCL727_DO_MSB_REG 0x18
-#define PCL727_DO_LSB_REG 0x19
-
-static const struct comedi_lrange *const rangelist_726[] = {
- &range_unipolar5,
- &range_unipolar10,
- &range_bipolar5,
- &range_bipolar10,
- &range_4_20mA,
- &range_unknown
-};
-
-static const struct comedi_lrange *const rangelist_727[] = {
- &range_unipolar5,
- &range_unipolar10,
- &range_bipolar5,
- &range_4_20mA
-};
-
-static const struct comedi_lrange *const rangelist_728[] = {
- &range_unipolar5,
- &range_unipolar10,
- &range_bipolar5,
- &range_bipolar10,
- &range_4_20mA,
- &range_0_20mA
-};
-
-struct pcl726_board {
- const char *name;
- unsigned long io_len;
- unsigned int irq_mask;
- const struct comedi_lrange *const *ao_ranges;
- int ao_num_ranges;
- int ao_nchan;
- unsigned int have_dio:1;
- unsigned int is_pcl727:1;
-};
-
-static const struct pcl726_board pcl726_boards[] = {
- {
- .name = "pcl726",
- .io_len = 0x10,
- .ao_ranges = &rangelist_726[0],
- .ao_num_ranges = ARRAY_SIZE(rangelist_726),
- .ao_nchan = 6,
- .have_dio = 1,
- }, {
- .name = "pcl727",
- .io_len = 0x20,
- .ao_ranges = &rangelist_727[0],
- .ao_num_ranges = ARRAY_SIZE(rangelist_727),
- .ao_nchan = 12,
- .have_dio = 1,
- .is_pcl727 = 1,
- }, {
- .name = "pcl728",
- .io_len = 0x08,
- .ao_num_ranges = ARRAY_SIZE(rangelist_728),
- .ao_ranges = &rangelist_728[0],
- .ao_nchan = 2,
- }, {
- .name = "acl6126",
- .io_len = 0x10,
- .irq_mask = 0x96e8,
- .ao_num_ranges = ARRAY_SIZE(rangelist_726),
- .ao_ranges = &rangelist_726[0],
- .ao_nchan = 6,
- .have_dio = 1,
- }, {
- .name = "acl6128",
- .io_len = 0x08,
- .ao_num_ranges = ARRAY_SIZE(rangelist_728),
- .ao_ranges = &rangelist_728[0],
- .ao_nchan = 2,
- },
-};
-
-struct pcl726_private {
- const struct comedi_lrange *rangelist[12];
- unsigned int cmd_running:1;
-};
-
-static int pcl726_intr_insn_bits(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- data[1] = 0;
- return insn->n;
-}
-
-static int pcl726_intr_cmdtest(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_cmd *cmd)
-{
- int err = 0;
-
- /* Step 1 : check if triggers are trivially valid */
-
- err |= comedi_check_trigger_src(&cmd->start_src, TRIG_NOW);
- err |= comedi_check_trigger_src(&cmd->scan_begin_src, TRIG_EXT);
- err |= comedi_check_trigger_src(&cmd->convert_src, TRIG_FOLLOW);
- err |= comedi_check_trigger_src(&cmd->scan_end_src, TRIG_COUNT);
- err |= comedi_check_trigger_src(&cmd->stop_src, TRIG_NONE);
-
- if (err)
- return 1;
-
- /* Step 2a : make sure trigger sources are unique */
- /* Step 2b : and mutually compatible */
-
- /* Step 3: check if arguments are trivially valid */
-
- err |= comedi_check_trigger_arg_is(&cmd->start_arg, 0);
- err |= comedi_check_trigger_arg_is(&cmd->scan_begin_arg, 0);
- err |= comedi_check_trigger_arg_is(&cmd->convert_arg, 0);
- err |= comedi_check_trigger_arg_is(&cmd->scan_end_arg,
- cmd->chanlist_len);
- err |= comedi_check_trigger_arg_is(&cmd->stop_arg, 0);
-
- if (err)
- return 3;
-
- /* Step 4: fix up any arguments */
-
- /* Step 5: check channel list if it exists */
-
- return 0;
-}
-
-static int pcl726_intr_cmd(struct comedi_device *dev,
- struct comedi_subdevice *s)
-{
- struct pcl726_private *devpriv = dev->private;
-
- devpriv->cmd_running = 1;
-
- return 0;
-}
-
-static int pcl726_intr_cancel(struct comedi_device *dev,
- struct comedi_subdevice *s)
-{
- struct pcl726_private *devpriv = dev->private;
-
- devpriv->cmd_running = 0;
-
- return 0;
-}
-
-static irqreturn_t pcl726_interrupt(int irq, void *d)
-{
- struct comedi_device *dev = d;
- struct comedi_subdevice *s = dev->read_subdev;
- struct pcl726_private *devpriv = dev->private;
-
- if (devpriv->cmd_running) {
- pcl726_intr_cancel(dev, s);
-
- comedi_buf_write_samples(s, &s->state, 1);
- comedi_handle_events(dev, s);
- }
-
- return IRQ_HANDLED;
-}
-
-static int pcl726_ao_insn_write(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- unsigned int chan = CR_CHAN(insn->chanspec);
- unsigned int range = CR_RANGE(insn->chanspec);
- int i;
-
- for (i = 0; i < insn->n; i++) {
- unsigned int val = data[i];
-
- s->readback[chan] = val;
-
- /* bipolar data to the DAC is two's complement */
- if (comedi_chan_range_is_bipolar(s, chan, range))
- val = comedi_offset_munge(s, val);
-
- /* order is important, MSB then LSB */
- outb((val >> 8) & 0xff, dev->iobase + PCL726_AO_MSB_REG(chan));
- outb(val & 0xff, dev->iobase + PCL726_AO_LSB_REG(chan));
- }
-
- return insn->n;
-}
-
-static int pcl726_di_insn_bits(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- const struct pcl726_board *board = dev->board_ptr;
- unsigned int val;
-
- if (board->is_pcl727) {
- val = inb(dev->iobase + PCL727_DI_LSB_REG);
- val |= (inb(dev->iobase + PCL727_DI_MSB_REG) << 8);
- } else {
- val = inb(dev->iobase + PCL726_DI_LSB_REG);
- val |= (inb(dev->iobase + PCL726_DI_MSB_REG) << 8);
- }
-
- data[1] = val;
-
- return insn->n;
-}
-
-static int pcl726_do_insn_bits(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- const struct pcl726_board *board = dev->board_ptr;
- unsigned long io = dev->iobase;
- unsigned int mask;
-
- mask = comedi_dio_update_state(s, data);
- if (mask) {
- if (board->is_pcl727) {
- if (mask & 0x00ff)
- outb(s->state & 0xff, io + PCL727_DO_LSB_REG);
- if (mask & 0xff00)
- outb((s->state >> 8), io + PCL727_DO_MSB_REG);
- } else {
- if (mask & 0x00ff)
- outb(s->state & 0xff, io + PCL726_DO_LSB_REG);
- if (mask & 0xff00)
- outb((s->state >> 8), io + PCL726_DO_MSB_REG);
- }
- }
-
- data[1] = s->state;
-
- return insn->n;
-}
-
-static int pcl726_attach(struct comedi_device *dev,
- struct comedi_devconfig *it)
-{
- const struct pcl726_board *board = dev->board_ptr;
- struct pcl726_private *devpriv;
- struct comedi_subdevice *s;
- int subdev;
- int ret;
- int i;
-
- ret = comedi_request_region(dev, it->options[0], board->io_len);
- if (ret)
- return ret;
-
- devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
- if (!devpriv)
- return -ENOMEM;
-
- /*
- * Hook up the external trigger source interrupt only if the
- * user config option is valid and the board supports interrupts.
- */
- if (it->options[1] && (board->irq_mask & (1 << it->options[1]))) {
- ret = request_irq(it->options[1], pcl726_interrupt, 0,
- dev->board_name, dev);
- if (ret == 0) {
- /* External trigger source is from Pin-17 of CN3 */
- dev->irq = it->options[1];
- }
- }
-
- /* setup the per-channel analog output range_table_list */
- for (i = 0; i < 12; i++) {
- unsigned int opt = it->options[2 + i];
-
- if (opt < board->ao_num_ranges && i < board->ao_nchan)
- devpriv->rangelist[i] = board->ao_ranges[opt];
- else
- devpriv->rangelist[i] = &range_unknown;
- }
-
- subdev = board->have_dio ? 3 : 1;
- if (dev->irq)
- subdev++;
- ret = comedi_alloc_subdevices(dev, subdev);
- if (ret)
- return ret;
-
- subdev = 0;
-
- /* Analog Output subdevice */
- s = &dev->subdevices[subdev++];
- s->type = COMEDI_SUBD_AO;
- s->subdev_flags = SDF_WRITABLE | SDF_GROUND;
- s->n_chan = board->ao_nchan;
- s->maxdata = 0x0fff;
- s->range_table_list = devpriv->rangelist;
- s->insn_write = pcl726_ao_insn_write;
-
- ret = comedi_alloc_subdev_readback(s);
- if (ret)
- return ret;
-
- if (board->have_dio) {
- /* Digital Input subdevice */
- s = &dev->subdevices[subdev++];
- s->type = COMEDI_SUBD_DI;
- s->subdev_flags = SDF_READABLE;
- s->n_chan = 16;
- s->maxdata = 1;
- s->insn_bits = pcl726_di_insn_bits;
- s->range_table = &range_digital;
-
- /* Digital Output subdevice */
- s = &dev->subdevices[subdev++];
- s->type = COMEDI_SUBD_DO;
- s->subdev_flags = SDF_WRITABLE;
- s->n_chan = 16;
- s->maxdata = 1;
- s->insn_bits = pcl726_do_insn_bits;
- s->range_table = &range_digital;
- }
-
- if (dev->irq) {
- /* Digial Input subdevice - Interrupt support */
- s = &dev->subdevices[subdev++];
- dev->read_subdev = s;
- s->type = COMEDI_SUBD_DI;
- s->subdev_flags = SDF_READABLE | SDF_CMD_READ;
- s->n_chan = 1;
- s->maxdata = 1;
- s->range_table = &range_digital;
- s->insn_bits = pcl726_intr_insn_bits;
- s->len_chanlist = 1;
- s->do_cmdtest = pcl726_intr_cmdtest;
- s->do_cmd = pcl726_intr_cmd;
- s->cancel = pcl726_intr_cancel;
- }
-
- return 0;
-}
-
-static struct comedi_driver pcl726_driver = {
- .driver_name = "pcl726",
- .module = THIS_MODULE,
- .attach = pcl726_attach,
- .detach = comedi_legacy_detach,
- .board_name = &pcl726_boards[0].name,
- .num_names = ARRAY_SIZE(pcl726_boards),
- .offset = sizeof(struct pcl726_board),
-};
-module_comedi_driver(pcl726_driver);
-
-MODULE_AUTHOR("Comedi http://www.comedi.org");
-MODULE_DESCRIPTION("Comedi driver for Advantech PCL-726 & compatibles");
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/pcl730.c b/drivers/staging/comedi/drivers/pcl730.c
deleted file mode 100644
index ce958eef2a61..000000000000
--- a/drivers/staging/comedi/drivers/pcl730.c
+++ /dev/null
@@ -1,349 +0,0 @@
-/*
- * comedi/drivers/pcl730.c
- * Driver for Advantech PCL-730 and clones
- * José Luis Sánchez
- */
-
-/*
- * Driver: pcl730
- * Description: Advantech PCL-730 (& compatibles)
- * Devices: [Advantech] PCL-730 (pcl730), PCM-3730 (pcm3730), PCL-725 (pcl725),
- * PCL-733 (pcl733), PCL-734 (pcl734),
- * [ADLink] ACL-7130 (acl7130), ACL-7225b (acl7225b),
- * [ICP] ISO-730 (iso730), P8R8-DIO (p8r8dio), P16R16-DIO (p16r16dio),
- * [Diamond Systems] OPMM-1616-XT (opmm-1616-xt), PEARL-MM-P (pearl-mm-p),
- * IR104-PBF (ir104-pbf),
- * Author: José Luis Sánchez (jsanchezv@teleline.es)
- * Status: untested
- *
- * Configuration options:
- * [0] - I/O port base
- *
- * Interrupts are not supported.
- * The ACL-7130 card has an 8254 timer/counter not supported by this driver.
- */
-
-#include <linux/module.h>
-#include "../comedidev.h"
-
-/*
- * Register map
- *
- * The register map varies slightly depending on the board type but
- * all registers are 8-bit.
- *
- * The boardinfo 'io_range' is used to allow comedi to request the
- * proper range required by the board.
- *
- * The comedi_subdevice 'private' data is used to pass the register
- * offset to the (*insn_bits) functions to read/write the correct
- * registers.
- *
- * The basic register mapping looks like this:
- *
- * BASE+0 Isolated outputs 0-7 (write) / inputs 0-7 (read)
- * BASE+1 Isolated outputs 8-15 (write) / inputs 8-15 (read)
- * BASE+2 TTL outputs 0-7 (write) / inputs 0-7 (read)
- * BASE+3 TTL outputs 8-15 (write) / inputs 8-15 (read)
- *
- * The pcm3730 board does not have register BASE+1.
- *
- * The pcl725 and p8r8dio only have registers BASE+0 and BASE+1:
- *
- * BASE+0 Isolated outputs 0-7 (write) (read back on p8r8dio)
- * BASE+1 Isolated inputs 0-7 (read)
- *
- * The acl7225b and p16r16dio boards have this register mapping:
- *
- * BASE+0 Isolated outputs 0-7 (write) (read back)
- * BASE+1 Isolated outputs 8-15 (write) (read back)
- * BASE+2 Isolated inputs 0-7 (read)
- * BASE+3 Isolated inputs 8-15 (read)
- *
- * The pcl733 and pcl733 boards have this register mapping:
- *
- * BASE+0 Isolated outputs 0-7 (write) or inputs 0-7 (read)
- * BASE+1 Isolated outputs 8-15 (write) or inputs 8-15 (read)
- * BASE+2 Isolated outputs 16-23 (write) or inputs 16-23 (read)
- * BASE+3 Isolated outputs 24-31 (write) or inputs 24-31 (read)
- *
- * The opmm-1616-xt board has this register mapping:
- *
- * BASE+0 Isolated outputs 0-7 (write) (read back)
- * BASE+1 Isolated outputs 8-15 (write) (read back)
- * BASE+2 Isolated inputs 0-7 (read)
- * BASE+3 Isolated inputs 8-15 (read)
- *
- * These registers are not currently supported:
- *
- * BASE+2 Relay select register (write)
- * BASE+3 Board reset control register (write)
- * BASE+4 Interrupt control register (write)
- * BASE+4 Change detect 7-0 status register (read)
- * BASE+5 LED control register (write)
- * BASE+5 Change detect 15-8 status register (read)
- *
- * The pearl-mm-p board has this register mapping:
- *
- * BASE+0 Isolated outputs 0-7 (write)
- * BASE+1 Isolated outputs 8-15 (write)
- *
- * The ir104-pbf board has this register mapping:
- *
- * BASE+0 Isolated outputs 0-7 (write) (read back)
- * BASE+1 Isolated outputs 8-15 (write) (read back)
- * BASE+2 Isolated outputs 16-19 (write) (read back)
- * BASE+4 Isolated inputs 0-7 (read)
- * BASE+5 Isolated inputs 8-15 (read)
- * BASE+6 Isolated inputs 16-19 (read)
- */
-
-struct pcl730_board {
- const char *name;
- unsigned int io_range;
- unsigned is_pcl725:1;
- unsigned is_acl7225b:1;
- unsigned is_ir104:1;
- unsigned has_readback:1;
- unsigned has_ttl_io:1;
- int n_subdevs;
- int n_iso_out_chan;
- int n_iso_in_chan;
- int n_ttl_chan;
-};
-
-static const struct pcl730_board pcl730_boards[] = {
- {
- .name = "pcl730",
- .io_range = 0x04,
- .has_ttl_io = 1,
- .n_subdevs = 4,
- .n_iso_out_chan = 16,
- .n_iso_in_chan = 16,
- .n_ttl_chan = 16,
- }, {
- .name = "iso730",
- .io_range = 0x04,
- .n_subdevs = 4,
- .n_iso_out_chan = 16,
- .n_iso_in_chan = 16,
- .n_ttl_chan = 16,
- }, {
- .name = "acl7130",
- .io_range = 0x08,
- .has_ttl_io = 1,
- .n_subdevs = 4,
- .n_iso_out_chan = 16,
- .n_iso_in_chan = 16,
- .n_ttl_chan = 16,
- }, {
- .name = "pcm3730",
- .io_range = 0x04,
- .has_ttl_io = 1,
- .n_subdevs = 4,
- .n_iso_out_chan = 8,
- .n_iso_in_chan = 8,
- .n_ttl_chan = 16,
- }, {
- .name = "pcl725",
- .io_range = 0x02,
- .is_pcl725 = 1,
- .n_subdevs = 2,
- .n_iso_out_chan = 8,
- .n_iso_in_chan = 8,
- }, {
- .name = "p8r8dio",
- .io_range = 0x02,
- .is_pcl725 = 1,
- .has_readback = 1,
- .n_subdevs = 2,
- .n_iso_out_chan = 8,
- .n_iso_in_chan = 8,
- }, {
- .name = "acl7225b",
- .io_range = 0x08, /* only 4 are used */
- .is_acl7225b = 1,
- .has_readback = 1,
- .n_subdevs = 2,
- .n_iso_out_chan = 16,
- .n_iso_in_chan = 16,
- }, {
- .name = "p16r16dio",
- .io_range = 0x04,
- .is_acl7225b = 1,
- .has_readback = 1,
- .n_subdevs = 2,
- .n_iso_out_chan = 16,
- .n_iso_in_chan = 16,
- }, {
- .name = "pcl733",
- .io_range = 0x04,
- .n_subdevs = 1,
- .n_iso_in_chan = 32,
- }, {
- .name = "pcl734",
- .io_range = 0x04,
- .n_subdevs = 1,
- .n_iso_out_chan = 32,
- }, {
- .name = "opmm-1616-xt",
- .io_range = 0x10,
- .is_acl7225b = 1,
- .has_readback = 1,
- .n_subdevs = 2,
- .n_iso_out_chan = 16,
- .n_iso_in_chan = 16,
- }, {
- .name = "pearl-mm-p",
- .io_range = 0x02,
- .n_subdevs = 1,
- .n_iso_out_chan = 16,
- }, {
- .name = "ir104-pbf",
- .io_range = 0x08,
- .is_ir104 = 1,
- .has_readback = 1,
- .n_iso_out_chan = 20,
- .n_iso_in_chan = 20,
- },
-};
-
-static int pcl730_do_insn_bits(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- unsigned long reg = (unsigned long)s->private;
- unsigned int mask;
-
- mask = comedi_dio_update_state(s, data);
- if (mask) {
- if (mask & 0x00ff)
- outb(s->state & 0xff, dev->iobase + reg);
- if ((mask & 0xff00) && (s->n_chan > 8))
- outb((s->state >> 8) & 0xff, dev->iobase + reg + 1);
- if ((mask & 0xff0000) && (s->n_chan > 16))
- outb((s->state >> 16) & 0xff, dev->iobase + reg + 2);
- if ((mask & 0xff000000) && (s->n_chan > 24))
- outb((s->state >> 24) & 0xff, dev->iobase + reg + 3);
- }
-
- data[1] = s->state;
-
- return insn->n;
-}
-
-static unsigned int pcl730_get_bits(struct comedi_device *dev,
- struct comedi_subdevice *s)
-{
- unsigned long reg = (unsigned long)s->private;
- unsigned int val;
-
- val = inb(dev->iobase + reg);
- if (s->n_chan > 8)
- val |= (inb(dev->iobase + reg + 1) << 8);
- if (s->n_chan > 16)
- val |= (inb(dev->iobase + reg + 2) << 16);
- if (s->n_chan > 24)
- val |= (inb(dev->iobase + reg + 3) << 24);
-
- return val;
-}
-
-static int pcl730_di_insn_bits(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- data[1] = pcl730_get_bits(dev, s);
-
- return insn->n;
-}
-
-static int pcl730_attach(struct comedi_device *dev,
- struct comedi_devconfig *it)
-{
- const struct pcl730_board *board = dev->board_ptr;
- struct comedi_subdevice *s;
- int subdev;
- int ret;
-
- ret = comedi_request_region(dev, it->options[0], board->io_range);
- if (ret)
- return ret;
-
- ret = comedi_alloc_subdevices(dev, board->n_subdevs);
- if (ret)
- return ret;
-
- subdev = 0;
-
- if (board->n_iso_out_chan) {
- /* Isolated Digital Outputs */
- s = &dev->subdevices[subdev++];
- s->type = COMEDI_SUBD_DO;
- s->subdev_flags = SDF_WRITABLE;
- s->n_chan = board->n_iso_out_chan;
- s->maxdata = 1;
- s->range_table = &range_digital;
- s->insn_bits = pcl730_do_insn_bits;
- s->private = (void *)0;
-
- /* get the initial state if supported */
- if (board->has_readback)
- s->state = pcl730_get_bits(dev, s);
- }
-
- if (board->n_iso_in_chan) {
- /* Isolated Digital Inputs */
- s = &dev->subdevices[subdev++];
- s->type = COMEDI_SUBD_DI;
- s->subdev_flags = SDF_READABLE;
- s->n_chan = board->n_iso_in_chan;
- s->maxdata = 1;
- s->range_table = &range_digital;
- s->insn_bits = pcl730_di_insn_bits;
- s->private = board->is_ir104 ? (void *)4 :
- board->is_acl7225b ? (void *)2 :
- board->is_pcl725 ? (void *)1 : (void *)0;
- }
-
- if (board->has_ttl_io) {
- /* TTL Digital Outputs */
- s = &dev->subdevices[subdev++];
- s->type = COMEDI_SUBD_DO;
- s->subdev_flags = SDF_WRITABLE;
- s->n_chan = board->n_ttl_chan;
- s->maxdata = 1;
- s->range_table = &range_digital;
- s->insn_bits = pcl730_do_insn_bits;
- s->private = (void *)2;
-
- /* TTL Digital Inputs */
- s = &dev->subdevices[subdev++];
- s->type = COMEDI_SUBD_DI;
- s->subdev_flags = SDF_READABLE;
- s->n_chan = board->n_ttl_chan;
- s->maxdata = 1;
- s->range_table = &range_digital;
- s->insn_bits = pcl730_di_insn_bits;
- s->private = (void *)2;
- }
-
- return 0;
-}
-
-static struct comedi_driver pcl730_driver = {
- .driver_name = "pcl730",
- .module = THIS_MODULE,
- .attach = pcl730_attach,
- .detach = comedi_legacy_detach,
- .board_name = &pcl730_boards[0].name,
- .num_names = ARRAY_SIZE(pcl730_boards),
- .offset = sizeof(struct pcl730_board),
-};
-module_comedi_driver(pcl730_driver);
-
-MODULE_AUTHOR("Comedi http://www.comedi.org");
-MODULE_DESCRIPTION("Comedi low-level driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/pcl812.c b/drivers/staging/comedi/drivers/pcl812.c
deleted file mode 100644
index 48f6cdf440b9..000000000000
--- a/drivers/staging/comedi/drivers/pcl812.c
+++ /dev/null
@@ -1,1317 +0,0 @@
-/*
- * comedi/drivers/pcl812.c
- *
- * Author: Michal Dobes <dobes@tesnet.cz>
- *
- * hardware driver for Advantech cards
- * card: PCL-812, PCL-812PG, PCL-813, PCL-813B
- * driver: pcl812, pcl812pg, pcl813, pcl813b
- * and for ADlink cards
- * card: ACL-8112DG, ACL-8112HG, ACL-8112PG, ACL-8113, ACL-8216
- * driver: acl8112dg, acl8112hg, acl8112pg, acl8113, acl8216
- * and for ICP DAS cards
- * card: ISO-813, A-821PGH, A-821PGL, A-821PGL-NDA, A-822PGH, A-822PGL,
- * driver: iso813, a821pgh, a-821pgl, a-821pglnda, a822pgh, a822pgl,
- * card: A-823PGH, A-823PGL, A-826PG
- * driver: a823pgh, a823pgl, a826pg
- */
-
-/*
- * Driver: pcl812
- * Description: Advantech PCL-812/PG, PCL-813/B,
- * ADLink ACL-8112DG/HG/PG, ACL-8113, ACL-8216,
- * ICP DAS A-821PGH/PGL/PGL-NDA, A-822PGH/PGL, A-823PGH/PGL, A-826PG,
- * ICP DAS ISO-813
- * Author: Michal Dobes <dobes@tesnet.cz>
- * Devices: [Advantech] PCL-812 (pcl812), PCL-812PG (pcl812pg),
- * PCL-813 (pcl813), PCL-813B (pcl813b), [ADLink] ACL-8112DG (acl8112dg),
- * ACL-8112HG (acl8112hg), ACL-8113 (acl-8113), ACL-8216 (acl8216),
- * [ICP] ISO-813 (iso813), A-821PGH (a821pgh), A-821PGL (a821pgl),
- * A-821PGL-NDA (a821pclnda), A-822PGH (a822pgh), A-822PGL (a822pgl),
- * A-823PGH (a823pgh), A-823PGL (a823pgl), A-826PG (a826pg)
- * Updated: Mon, 06 Aug 2007 12:03:15 +0100
- * Status: works (I hope. My board fire up under my hands
- * and I cann't test all features.)
- *
- * This driver supports insn and cmd interfaces. Some boards support only insn
- * because their hardware don't allow more (PCL-813/B, ACL-8113, ISO-813).
- * Data transfer over DMA is supported only when you measure only one
- * channel, this is too hardware limitation of these boards.
- *
- * Options for PCL-812:
- * [0] - IO Base
- * [1] - IRQ (0=disable, 2, 3, 4, 5, 6, 7; 10, 11, 12, 14, 15)
- * [2] - DMA (0=disable, 1, 3)
- * [3] - 0=trigger source is internal 8253 with 2MHz clock
- * 1=trigger source is external
- * [4] - 0=A/D input range is +/-10V
- * 1=A/D input range is +/-5V
- * 2=A/D input range is +/-2.5V
- * 3=A/D input range is +/-1.25V
- * 4=A/D input range is +/-0.625V
- * 5=A/D input range is +/-0.3125V
- * [5] - 0=D/A outputs 0-5V (internal reference -5V)
- * 1=D/A outputs 0-10V (internal reference -10V)
- * 2=D/A outputs unknown (external reference)
- *
- * Options for PCL-812PG, ACL-8112PG:
- * [0] - IO Base
- * [1] - IRQ (0=disable, 2, 3, 4, 5, 6, 7; 10, 11, 12, 14, 15)
- * [2] - DMA (0=disable, 1, 3)
- * [3] - 0=trigger source is internal 8253 with 2MHz clock
- * 1=trigger source is external
- * [4] - 0=A/D have max +/-5V input
- * 1=A/D have max +/-10V input
- * [5] - 0=D/A outputs 0-5V (internal reference -5V)
- * 1=D/A outputs 0-10V (internal reference -10V)
- * 2=D/A outputs unknown (external reference)
- *
- * Options for ACL-8112DG/HG, A-822PGL/PGH, A-823PGL/PGH, ACL-8216, A-826PG:
- * [0] - IO Base
- * [1] - IRQ (0=disable, 2, 3, 4, 5, 6, 7; 10, 11, 12, 14, 15)
- * [2] - DMA (0=disable, 1, 3)
- * [3] - 0=trigger source is internal 8253 with 2MHz clock
- * 1=trigger source is external
- * [4] - 0=A/D channels are S.E.
- * 1=A/D channels are DIFF
- * [5] - 0=D/A outputs 0-5V (internal reference -5V)
- * 1=D/A outputs 0-10V (internal reference -10V)
- * 2=D/A outputs unknown (external reference)
- *
- * Options for A-821PGL/PGH:
- * [0] - IO Base
- * [1] - IRQ (0=disable, 2, 3, 4, 5, 6, 7)
- * [2] - 0=A/D channels are S.E.
- * 1=A/D channels are DIFF
- * [3] - 0=D/A output 0-5V (internal reference -5V)
- * 1=D/A output 0-10V (internal reference -10V)
- *
- * Options for A-821PGL-NDA:
- * [0] - IO Base
- * [1] - IRQ (0=disable, 2, 3, 4, 5, 6, 7)
- * [2] - 0=A/D channels are S.E.
- * 1=A/D channels are DIFF
- *
- * Options for PCL-813:
- * [0] - IO Base
- *
- * Options for PCL-813B:
- * [0] - IO Base
- * [1] - 0= bipolar inputs
- * 1= unipolar inputs
- *
- * Options for ACL-8113, ISO-813:
- * [0] - IO Base
- * [1] - 0= 10V bipolar inputs
- * 1= 10V unipolar inputs
- * 2= 20V bipolar inputs
- * 3= 20V unipolar inputs
- */
-
-#include <linux/module.h>
-#include <linux/interrupt.h>
-#include <linux/gfp.h>
-#include <linux/delay.h>
-#include <linux/io.h>
-
-#include "../comedidev.h"
-
-#include "comedi_isadma.h"
-#include "comedi_8254.h"
-
-/* hardware types of the cards */
-#define boardPCL812PG 0 /* and ACL-8112PG */
-#define boardPCL813B 1
-#define boardPCL812 2
-#define boardPCL813 3
-#define boardISO813 5
-#define boardACL8113 6
-#define boardACL8112 7 /* ACL-8112DG/HG, A-822PGL/PGH, A-823PGL/PGH */
-#define boardACL8216 8 /* and ICP DAS A-826PG */
-#define boardA821 9 /* PGH, PGL, PGL/NDA versions */
-
-/*
- * Register I/O map
- */
-#define PCL812_TIMER_BASE 0x00
-#define PCL812_AI_LSB_REG 0x04
-#define PCL812_AI_MSB_REG 0x05
-#define PCL812_AI_MSB_DRDY (1 << 4)
-#define PCL812_AO_LSB_REG(x) (0x04 + ((x) * 2))
-#define PCL812_AO_MSB_REG(x) (0x05 + ((x) * 2))
-#define PCL812_DI_LSB_REG 0x06
-#define PCL812_DI_MSB_REG 0x07
-#define PCL812_STATUS_REG 0x08
-#define PCL812_STATUS_DRDY (1 << 5)
-#define PCL812_RANGE_REG 0x09
-#define PCL812_MUX_REG 0x0a
-#define PCL812_MUX_CHAN(x) ((x) << 0)
-#define PCL812_MUX_CS0 (1 << 4)
-#define PCL812_MUX_CS1 (1 << 5)
-#define PCL812_CTRL_REG 0x0b
-#define PCL812_CTRL_DISABLE_TRIG (0 << 0)
-#define PCL812_CTRL_SOFT_TRIG (1 << 0)
-#define PCL812_CTRL_PACER_DMA_TRIG (2 << 0)
-#define PCL812_CTRL_PACER_EOC_TRIG (6 << 0)
-#define PCL812_SOFTTRIG_REG 0x0c
-#define PCL812_DO_LSB_REG 0x0d
-#define PCL812_DO_MSB_REG 0x0e
-
-#define MAX_CHANLIST_LEN 256 /* length of scan list */
-
-static const struct comedi_lrange range_pcl812pg_ai = {
- 5, {
- BIP_RANGE(5),
- BIP_RANGE(2.5),
- BIP_RANGE(1.25),
- BIP_RANGE(0.625),
- BIP_RANGE(0.3125)
- }
-};
-
-static const struct comedi_lrange range_pcl812pg2_ai = {
- 5, {
- BIP_RANGE(10),
- BIP_RANGE(5),
- BIP_RANGE(2.5),
- BIP_RANGE(1.25),
- BIP_RANGE(0.625)
- }
-};
-
-static const struct comedi_lrange range812_bipolar1_25 = {
- 1, {
- BIP_RANGE(1.25)
- }
-};
-
-static const struct comedi_lrange range812_bipolar0_625 = {
- 1, {
- BIP_RANGE(0.625)
- }
-};
-
-static const struct comedi_lrange range812_bipolar0_3125 = {
- 1, {
- BIP_RANGE(0.3125)
- }
-};
-
-static const struct comedi_lrange range_pcl813b_ai = {
- 4, {
- BIP_RANGE(5),
- BIP_RANGE(2.5),
- BIP_RANGE(1.25),
- BIP_RANGE(0.625)
- }
-};
-
-static const struct comedi_lrange range_pcl813b2_ai = {
- 4, {
- UNI_RANGE(10),
- UNI_RANGE(5),
- UNI_RANGE(2.5),
- UNI_RANGE(1.25)
- }
-};
-
-static const struct comedi_lrange range_iso813_1_ai = {
- 5, {
- BIP_RANGE(5),
- BIP_RANGE(2.5),
- BIP_RANGE(1.25),
- BIP_RANGE(0.625),
- BIP_RANGE(0.3125)
- }
-};
-
-static const struct comedi_lrange range_iso813_1_2_ai = {
- 5, {
- UNI_RANGE(10),
- UNI_RANGE(5),
- UNI_RANGE(2.5),
- UNI_RANGE(1.25),
- UNI_RANGE(0.625)
- }
-};
-
-static const struct comedi_lrange range_iso813_2_ai = {
- 4, {
- BIP_RANGE(5),
- BIP_RANGE(2.5),
- BIP_RANGE(1.25),
- BIP_RANGE(0.625)
- }
-};
-
-static const struct comedi_lrange range_iso813_2_2_ai = {
- 4, {
- UNI_RANGE(10),
- UNI_RANGE(5),
- UNI_RANGE(2.5),
- UNI_RANGE(1.25)
- }
-};
-
-static const struct comedi_lrange range_acl8113_1_ai = {
- 4, {
- BIP_RANGE(5),
- BIP_RANGE(2.5),
- BIP_RANGE(1.25),
- BIP_RANGE(0.625)
- }
-};
-
-static const struct comedi_lrange range_acl8113_1_2_ai = {
- 4, {
- UNI_RANGE(10),
- UNI_RANGE(5),
- UNI_RANGE(2.5),
- UNI_RANGE(1.25)
- }
-};
-
-static const struct comedi_lrange range_acl8113_2_ai = {
- 3, {
- BIP_RANGE(5),
- BIP_RANGE(2.5),
- BIP_RANGE(1.25)
- }
-};
-
-static const struct comedi_lrange range_acl8113_2_2_ai = {
- 3, {
- UNI_RANGE(10),
- UNI_RANGE(5),
- UNI_RANGE(2.5)
- }
-};
-
-static const struct comedi_lrange range_acl8112dg_ai = {
- 9, {
- BIP_RANGE(5),
- BIP_RANGE(2.5),
- BIP_RANGE(1.25),
- BIP_RANGE(0.625),
- UNI_RANGE(10),
- UNI_RANGE(5),
- UNI_RANGE(2.5),
- UNI_RANGE(1.25),
- BIP_RANGE(10)
- }
-};
-
-static const struct comedi_lrange range_acl8112hg_ai = {
- 12, {
- BIP_RANGE(5),
- BIP_RANGE(0.5),
- BIP_RANGE(0.05),
- BIP_RANGE(0.005),
- UNI_RANGE(10),
- UNI_RANGE(1),
- UNI_RANGE(0.1),
- UNI_RANGE(0.01),
- BIP_RANGE(10),
- BIP_RANGE(1),
- BIP_RANGE(0.1),
- BIP_RANGE(0.01)
- }
-};
-
-static const struct comedi_lrange range_a821pgh_ai = {
- 4, {
- BIP_RANGE(5),
- BIP_RANGE(0.5),
- BIP_RANGE(0.05),
- BIP_RANGE(0.005)
- }
-};
-
-struct pcl812_board {
- const char *name;
- int board_type;
- int n_aichan;
- int n_aochan;
- unsigned int ai_ns_min;
- const struct comedi_lrange *rangelist_ai;
- unsigned int IRQbits;
- unsigned int has_dma:1;
- unsigned int has_16bit_ai:1;
- unsigned int has_mpc508_mux:1;
- unsigned int has_dio:1;
-};
-
-static const struct pcl812_board boardtypes[] = {
- {
- .name = "pcl812",
- .board_type = boardPCL812,
- .n_aichan = 16,
- .n_aochan = 2,
- .ai_ns_min = 33000,
- .rangelist_ai = &range_bipolar10,
- .IRQbits = 0xdcfc,
- .has_dma = 1,
- .has_dio = 1,
- }, {
- .name = "pcl812pg",
- .board_type = boardPCL812PG,
- .n_aichan = 16,
- .n_aochan = 2,
- .ai_ns_min = 33000,
- .rangelist_ai = &range_pcl812pg_ai,
- .IRQbits = 0xdcfc,
- .has_dma = 1,
- .has_dio = 1,
- }, {
- .name = "acl8112pg",
- .board_type = boardPCL812PG,
- .n_aichan = 16,
- .n_aochan = 2,
- .ai_ns_min = 10000,
- .rangelist_ai = &range_pcl812pg_ai,
- .IRQbits = 0xdcfc,
- .has_dma = 1,
- .has_dio = 1,
- }, {
- .name = "acl8112dg",
- .board_type = boardACL8112,
- .n_aichan = 16, /* 8 differential */
- .n_aochan = 2,
- .ai_ns_min = 10000,
- .rangelist_ai = &range_acl8112dg_ai,
- .IRQbits = 0xdcfc,
- .has_dma = 1,
- .has_mpc508_mux = 1,
- .has_dio = 1,
- }, {
- .name = "acl8112hg",
- .board_type = boardACL8112,
- .n_aichan = 16, /* 8 differential */
- .n_aochan = 2,
- .ai_ns_min = 10000,
- .rangelist_ai = &range_acl8112hg_ai,
- .IRQbits = 0xdcfc,
- .has_dma = 1,
- .has_mpc508_mux = 1,
- .has_dio = 1,
- }, {
- .name = "a821pgl",
- .board_type = boardA821,
- .n_aichan = 16, /* 8 differential */
- .n_aochan = 1,
- .ai_ns_min = 10000,
- .rangelist_ai = &range_pcl813b_ai,
- .IRQbits = 0x000c,
- .has_dio = 1,
- }, {
- .name = "a821pglnda",
- .board_type = boardA821,
- .n_aichan = 16, /* 8 differential */
- .ai_ns_min = 10000,
- .rangelist_ai = &range_pcl813b_ai,
- .IRQbits = 0x000c,
- }, {
- .name = "a821pgh",
- .board_type = boardA821,
- .n_aichan = 16, /* 8 differential */
- .n_aochan = 1,
- .ai_ns_min = 10000,
- .rangelist_ai = &range_a821pgh_ai,
- .IRQbits = 0x000c,
- .has_dio = 1,
- }, {
- .name = "a822pgl",
- .board_type = boardACL8112,
- .n_aichan = 16, /* 8 differential */
- .n_aochan = 2,
- .ai_ns_min = 10000,
- .rangelist_ai = &range_acl8112dg_ai,
- .IRQbits = 0xdcfc,
- .has_dma = 1,
- .has_dio = 1,
- }, {
- .name = "a822pgh",
- .board_type = boardACL8112,
- .n_aichan = 16, /* 8 differential */
- .n_aochan = 2,
- .ai_ns_min = 10000,
- .rangelist_ai = &range_acl8112hg_ai,
- .IRQbits = 0xdcfc,
- .has_dma = 1,
- .has_dio = 1,
- }, {
- .name = "a823pgl",
- .board_type = boardACL8112,
- .n_aichan = 16, /* 8 differential */
- .n_aochan = 2,
- .ai_ns_min = 8000,
- .rangelist_ai = &range_acl8112dg_ai,
- .IRQbits = 0xdcfc,
- .has_dma = 1,
- .has_dio = 1,
- }, {
- .name = "a823pgh",
- .board_type = boardACL8112,
- .n_aichan = 16, /* 8 differential */
- .n_aochan = 2,
- .ai_ns_min = 8000,
- .rangelist_ai = &range_acl8112hg_ai,
- .IRQbits = 0xdcfc,
- .has_dma = 1,
- .has_dio = 1,
- }, {
- .name = "pcl813",
- .board_type = boardPCL813,
- .n_aichan = 32,
- .rangelist_ai = &range_pcl813b_ai,
- }, {
- .name = "pcl813b",
- .board_type = boardPCL813B,
- .n_aichan = 32,
- .rangelist_ai = &range_pcl813b_ai,
- }, {
- .name = "acl8113",
- .board_type = boardACL8113,
- .n_aichan = 32,
- .rangelist_ai = &range_acl8113_1_ai,
- }, {
- .name = "iso813",
- .board_type = boardISO813,
- .n_aichan = 32,
- .rangelist_ai = &range_iso813_1_ai,
- }, {
- .name = "acl8216",
- .board_type = boardACL8216,
- .n_aichan = 16, /* 8 differential */
- .n_aochan = 2,
- .ai_ns_min = 10000,
- .rangelist_ai = &range_pcl813b2_ai,
- .IRQbits = 0xdcfc,
- .has_dma = 1,
- .has_16bit_ai = 1,
- .has_mpc508_mux = 1,
- .has_dio = 1,
- }, {
- .name = "a826pg",
- .board_type = boardACL8216,
- .n_aichan = 16, /* 8 differential */
- .n_aochan = 2,
- .ai_ns_min = 10000,
- .rangelist_ai = &range_pcl813b2_ai,
- .IRQbits = 0xdcfc,
- .has_dma = 1,
- .has_16bit_ai = 1,
- .has_dio = 1,
- },
-};
-
-struct pcl812_private {
- struct comedi_isadma *dma;
- unsigned char range_correction; /* =1 we must add 1 to range number */
- unsigned int last_ai_chanspec;
- unsigned char mode_reg_int; /* stored INT number for some cards */
- unsigned int ai_poll_ptr; /* how many samples transfer poll */
- unsigned int max_812_ai_mode0_rangewait; /* settling time for gain */
- unsigned int use_diff:1;
- unsigned int use_mpc508:1;
- unsigned int use_ext_trg:1;
- unsigned int ai_dma:1;
- unsigned int ai_eos:1;
-};
-
-static void pcl812_ai_setup_dma(struct comedi_device *dev,
- struct comedi_subdevice *s,
- unsigned int unread_samples)
-{
- struct pcl812_private *devpriv = dev->private;
- struct comedi_isadma *dma = devpriv->dma;
- struct comedi_isadma_desc *desc = &dma->desc[dma->cur_dma];
- unsigned int bytes;
- unsigned int max_samples;
- unsigned int nsamples;
-
- comedi_isadma_disable(dma->chan);
-
- /* if using EOS, adapt DMA buffer to one scan */
- bytes = devpriv->ai_eos ? comedi_bytes_per_scan(s) : desc->maxsize;
- max_samples = comedi_bytes_to_samples(s, bytes);
-
- /*
- * Determine dma size based on the buffer size plus the number of
- * unread samples and the number of samples remaining in the command.
- */
- nsamples = comedi_nsamples_left(s, max_samples + unread_samples);
- if (nsamples > unread_samples) {
- nsamples -= unread_samples;
- desc->size = comedi_samples_to_bytes(s, nsamples);
- comedi_isadma_program(desc);
- }
-}
-
-static void pcl812_ai_set_chan_range(struct comedi_device *dev,
- unsigned int chanspec, char wait)
-{
- struct pcl812_private *devpriv = dev->private;
- unsigned int chan = CR_CHAN(chanspec);
- unsigned int range = CR_RANGE(chanspec);
- unsigned int mux = 0;
-
- if (chanspec == devpriv->last_ai_chanspec)
- return;
-
- devpriv->last_ai_chanspec = chanspec;
-
- if (devpriv->use_mpc508) {
- if (devpriv->use_diff) {
- mux |= PCL812_MUX_CS0 | PCL812_MUX_CS1;
- } else {
- if (chan < 8)
- mux |= PCL812_MUX_CS0;
- else
- mux |= PCL812_MUX_CS1;
- }
- }
-
- outb(mux | PCL812_MUX_CHAN(chan), dev->iobase + PCL812_MUX_REG);
- outb(range + devpriv->range_correction, dev->iobase + PCL812_RANGE_REG);
-
- if (wait)
- /*
- * XXX this depends on selected range and can be very long for
- * some high gain ranges!
- */
- udelay(devpriv->max_812_ai_mode0_rangewait);
-}
-
-static void pcl812_ai_clear_eoc(struct comedi_device *dev)
-{
- /* writing any value clears the interrupt request */
- outb(0, dev->iobase + PCL812_STATUS_REG);
-}
-
-static void pcl812_ai_soft_trig(struct comedi_device *dev)
-{
- /* writing any value triggers a software conversion */
- outb(255, dev->iobase + PCL812_SOFTTRIG_REG);
-}
-
-static unsigned int pcl812_ai_get_sample(struct comedi_device *dev,
- struct comedi_subdevice *s)
-{
- unsigned int val;
-
- val = inb(dev->iobase + PCL812_AI_MSB_REG) << 8;
- val |= inb(dev->iobase + PCL812_AI_LSB_REG);
-
- return val & s->maxdata;
-}
-
-static int pcl812_ai_eoc(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned long context)
-{
- unsigned int status;
-
- if (s->maxdata > 0x0fff) {
- status = inb(dev->iobase + PCL812_STATUS_REG);
- if ((status & PCL812_STATUS_DRDY) == 0)
- return 0;
- } else {
- status = inb(dev->iobase + PCL812_AI_MSB_REG);
- if ((status & PCL812_AI_MSB_DRDY) == 0)
- return 0;
- }
- return -EBUSY;
-}
-
-static int pcl812_ai_cmdtest(struct comedi_device *dev,
- struct comedi_subdevice *s, struct comedi_cmd *cmd)
-{
- const struct pcl812_board *board = dev->board_ptr;
- struct pcl812_private *devpriv = dev->private;
- int err = 0;
- unsigned int flags;
-
- /* Step 1 : check if triggers are trivially valid */
-
- err |= comedi_check_trigger_src(&cmd->start_src, TRIG_NOW);
- err |= comedi_check_trigger_src(&cmd->scan_begin_src, TRIG_FOLLOW);
-
- if (devpriv->use_ext_trg)
- flags = TRIG_EXT;
- else
- flags = TRIG_TIMER;
- err |= comedi_check_trigger_src(&cmd->convert_src, flags);
-
- err |= comedi_check_trigger_src(&cmd->scan_end_src, TRIG_COUNT);
- err |= comedi_check_trigger_src(&cmd->stop_src, TRIG_COUNT | TRIG_NONE);
-
- if (err)
- return 1;
-
- /* Step 2a : make sure trigger sources are unique */
-
- err |= comedi_check_trigger_is_unique(cmd->stop_src);
-
- /* Step 2b : and mutually compatible */
-
- if (err)
- return 2;
-
- /* Step 3: check if arguments are trivially valid */
-
- err |= comedi_check_trigger_arg_is(&cmd->start_arg, 0);
- err |= comedi_check_trigger_arg_is(&cmd->scan_begin_arg, 0);
-
- if (cmd->convert_src == TRIG_TIMER) {
- err |= comedi_check_trigger_arg_min(&cmd->convert_arg,
- board->ai_ns_min);
- } else { /* TRIG_EXT */
- err |= comedi_check_trigger_arg_is(&cmd->convert_arg, 0);
- }
-
- err |= comedi_check_trigger_arg_min(&cmd->chanlist_len, 1);
- err |= comedi_check_trigger_arg_is(&cmd->scan_end_arg,
- cmd->chanlist_len);
-
- if (cmd->stop_src == TRIG_COUNT)
- err |= comedi_check_trigger_arg_min(&cmd->stop_arg, 1);
- else /* TRIG_NONE */
- err |= comedi_check_trigger_arg_is(&cmd->stop_arg, 0);
-
- if (err)
- return 3;
-
- /* step 4: fix up any arguments */
-
- if (cmd->convert_src == TRIG_TIMER) {
- unsigned int arg = cmd->convert_arg;
-
- comedi_8254_cascade_ns_to_timer(dev->pacer, &arg, cmd->flags);
- err |= comedi_check_trigger_arg_is(&cmd->convert_arg, arg);
- }
-
- if (err)
- return 4;
-
- return 0;
-}
-
-static int pcl812_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
-{
- struct pcl812_private *devpriv = dev->private;
- struct comedi_isadma *dma = devpriv->dma;
- struct comedi_cmd *cmd = &s->async->cmd;
- unsigned int ctrl = 0;
- unsigned int i;
-
- pcl812_ai_set_chan_range(dev, cmd->chanlist[0], 1);
-
- if (dma) { /* check if we can use DMA transfer */
- devpriv->ai_dma = 1;
- for (i = 1; i < cmd->chanlist_len; i++)
- if (cmd->chanlist[0] != cmd->chanlist[i]) {
- /* we cann't use DMA :-( */
- devpriv->ai_dma = 0;
- break;
- }
- } else {
- devpriv->ai_dma = 0;
- }
-
- devpriv->ai_poll_ptr = 0;
-
- /* don't we want wake up every scan? */
- if (cmd->flags & CMDF_WAKE_EOS) {
- devpriv->ai_eos = 1;
-
- /* DMA is useless for this situation */
- if (cmd->chanlist_len == 1)
- devpriv->ai_dma = 0;
- }
-
- if (devpriv->ai_dma) {
- /* setup and enable dma for the first buffer */
- dma->cur_dma = 0;
- pcl812_ai_setup_dma(dev, s, 0);
- }
-
- switch (cmd->convert_src) {
- case TRIG_TIMER:
- comedi_8254_update_divisors(dev->pacer);
- comedi_8254_pacer_enable(dev->pacer, 1, 2, true);
- break;
- }
-
- if (devpriv->ai_dma)
- ctrl |= PCL812_CTRL_PACER_DMA_TRIG;
- else
- ctrl |= PCL812_CTRL_PACER_EOC_TRIG;
- outb(devpriv->mode_reg_int | ctrl, dev->iobase + PCL812_CTRL_REG);
-
- return 0;
-}
-
-static bool pcl812_ai_next_chan(struct comedi_device *dev,
- struct comedi_subdevice *s)
-{
- struct comedi_cmd *cmd = &s->async->cmd;
-
- if (cmd->stop_src == TRIG_COUNT &&
- s->async->scans_done >= cmd->stop_arg) {
- s->async->events |= COMEDI_CB_EOA;
- return false;
- }
-
- return true;
-}
-
-static void pcl812_handle_eoc(struct comedi_device *dev,
- struct comedi_subdevice *s)
-{
- struct comedi_cmd *cmd = &s->async->cmd;
- unsigned int chan = s->async->cur_chan;
- unsigned int next_chan;
- unsigned short val;
-
- if (pcl812_ai_eoc(dev, s, NULL, 0)) {
- dev_dbg(dev->class_dev, "A/D cmd IRQ without DRDY!\n");
- s->async->events |= COMEDI_CB_ERROR;
- return;
- }
-
- val = pcl812_ai_get_sample(dev, s);
- comedi_buf_write_samples(s, &val, 1);
-
- /* Set up next channel. Added by abbotti 2010-01-20, but untested. */
- next_chan = s->async->cur_chan;
- if (cmd->chanlist[chan] != cmd->chanlist[next_chan])
- pcl812_ai_set_chan_range(dev, cmd->chanlist[next_chan], 0);
-
- pcl812_ai_next_chan(dev, s);
-}
-
-static void transfer_from_dma_buf(struct comedi_device *dev,
- struct comedi_subdevice *s,
- unsigned short *ptr,
- unsigned int bufptr, unsigned int len)
-{
- unsigned int i;
- unsigned short val;
-
- for (i = len; i; i--) {
- val = ptr[bufptr++];
- comedi_buf_write_samples(s, &val, 1);
-
- if (!pcl812_ai_next_chan(dev, s))
- break;
- }
-}
-
-static void pcl812_handle_dma(struct comedi_device *dev,
- struct comedi_subdevice *s)
-{
- struct pcl812_private *devpriv = dev->private;
- struct comedi_isadma *dma = devpriv->dma;
- struct comedi_isadma_desc *desc = &dma->desc[dma->cur_dma];
- unsigned int nsamples;
- int bufptr;
-
- nsamples = comedi_bytes_to_samples(s, desc->size) -
- devpriv->ai_poll_ptr;
- bufptr = devpriv->ai_poll_ptr;
- devpriv->ai_poll_ptr = 0;
-
- /* restart dma with the next buffer */
- dma->cur_dma = 1 - dma->cur_dma;
- pcl812_ai_setup_dma(dev, s, nsamples);
-
- transfer_from_dma_buf(dev, s, desc->virt_addr, bufptr, nsamples);
-}
-
-static irqreturn_t pcl812_interrupt(int irq, void *d)
-{
- struct comedi_device *dev = d;
- struct comedi_subdevice *s = dev->read_subdev;
- struct pcl812_private *devpriv = dev->private;
-
- if (!dev->attached) {
- pcl812_ai_clear_eoc(dev);
- return IRQ_HANDLED;
- }
-
- if (devpriv->ai_dma)
- pcl812_handle_dma(dev, s);
- else
- pcl812_handle_eoc(dev, s);
-
- pcl812_ai_clear_eoc(dev);
-
- comedi_handle_events(dev, s);
- return IRQ_HANDLED;
-}
-
-static int pcl812_ai_poll(struct comedi_device *dev, struct comedi_subdevice *s)
-{
- struct pcl812_private *devpriv = dev->private;
- struct comedi_isadma *dma = devpriv->dma;
- struct comedi_isadma_desc *desc;
- unsigned long flags;
- unsigned int poll;
- int ret;
-
- /* poll is valid only for DMA transfer */
- if (!devpriv->ai_dma)
- return 0;
-
- spin_lock_irqsave(&dev->spinlock, flags);
-
- poll = comedi_isadma_poll(dma);
- poll = comedi_bytes_to_samples(s, poll);
- if (poll > devpriv->ai_poll_ptr) {
- desc = &dma->desc[dma->cur_dma];
- transfer_from_dma_buf(dev, s, desc->virt_addr,
- devpriv->ai_poll_ptr,
- poll - devpriv->ai_poll_ptr);
- /* new buffer position */
- devpriv->ai_poll_ptr = poll;
-
- ret = comedi_buf_n_bytes_ready(s);
- } else {
- /* no new samples */
- ret = 0;
- }
-
- spin_unlock_irqrestore(&dev->spinlock, flags);
-
- return ret;
-}
-
-static int pcl812_ai_cancel(struct comedi_device *dev,
- struct comedi_subdevice *s)
-{
- struct pcl812_private *devpriv = dev->private;
-
- if (devpriv->ai_dma)
- comedi_isadma_disable(devpriv->dma->chan);
-
- outb(devpriv->mode_reg_int | PCL812_CTRL_DISABLE_TRIG,
- dev->iobase + PCL812_CTRL_REG);
- comedi_8254_pacer_enable(dev->pacer, 1, 2, false);
- pcl812_ai_clear_eoc(dev);
- return 0;
-}
-
-static int pcl812_ai_insn_read(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct pcl812_private *devpriv = dev->private;
- int ret = 0;
- int i;
-
- outb(devpriv->mode_reg_int | PCL812_CTRL_SOFT_TRIG,
- dev->iobase + PCL812_CTRL_REG);
-
- pcl812_ai_set_chan_range(dev, insn->chanspec, 1);
-
- for (i = 0; i < insn->n; i++) {
- pcl812_ai_clear_eoc(dev);
- pcl812_ai_soft_trig(dev);
-
- ret = comedi_timeout(dev, s, insn, pcl812_ai_eoc, 0);
- if (ret)
- break;
-
- data[i] = pcl812_ai_get_sample(dev, s);
- }
- outb(devpriv->mode_reg_int | PCL812_CTRL_DISABLE_TRIG,
- dev->iobase + PCL812_CTRL_REG);
- pcl812_ai_clear_eoc(dev);
-
- return ret ? ret : insn->n;
-}
-
-static int pcl812_ao_insn_write(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- unsigned int chan = CR_CHAN(insn->chanspec);
- unsigned int val = s->readback[chan];
- int i;
-
- for (i = 0; i < insn->n; i++) {
- val = data[i];
- outb(val & 0xff, dev->iobase + PCL812_AO_LSB_REG(chan));
- outb((val >> 8) & 0x0f, dev->iobase + PCL812_AO_MSB_REG(chan));
- }
- s->readback[chan] = val;
-
- return insn->n;
-}
-
-static int pcl812_di_insn_bits(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- data[1] = inb(dev->iobase + PCL812_DI_LSB_REG) |
- (inb(dev->iobase + PCL812_DI_MSB_REG) << 8);
-
- return insn->n;
-}
-
-static int pcl812_do_insn_bits(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- if (comedi_dio_update_state(s, data)) {
- outb(s->state & 0xff, dev->iobase + PCL812_DO_LSB_REG);
- outb((s->state >> 8), dev->iobase + PCL812_DO_MSB_REG);
- }
-
- data[1] = s->state;
-
- return insn->n;
-}
-
-static void pcl812_reset(struct comedi_device *dev)
-{
- const struct pcl812_board *board = dev->board_ptr;
- struct pcl812_private *devpriv = dev->private;
- unsigned int chan;
-
- /* disable analog input trigger */
- outb(devpriv->mode_reg_int | PCL812_CTRL_DISABLE_TRIG,
- dev->iobase + PCL812_CTRL_REG);
- pcl812_ai_clear_eoc(dev);
-
- /*
- * Invalidate last_ai_chanspec then set analog input to
- * known channel/range.
- */
- devpriv->last_ai_chanspec = CR_PACK(16, 0, 0);
- pcl812_ai_set_chan_range(dev, CR_PACK(0, 0, 0), 0);
-
- /* set analog output channels to 0V */
- for (chan = 0; chan < board->n_aochan; chan++) {
- outb(0, dev->iobase + PCL812_AO_LSB_REG(chan));
- outb(0, dev->iobase + PCL812_AO_MSB_REG(chan));
- }
-
- /* set all digital outputs low */
- if (board->has_dio) {
- outb(0, dev->iobase + PCL812_DO_MSB_REG);
- outb(0, dev->iobase + PCL812_DO_LSB_REG);
- }
-}
-
-static void pcl812_set_ai_range_table(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_devconfig *it)
-{
- const struct pcl812_board *board = dev->board_ptr;
- struct pcl812_private *devpriv = dev->private;
-
- /* default to the range table from the boardinfo */
- s->range_table = board->rangelist_ai;
-
- /* now check the user config option based on the boardtype */
- switch (board->board_type) {
- case boardPCL812PG:
- if (it->options[4] == 1)
- s->range_table = &range_pcl812pg2_ai;
- break;
- case boardPCL812:
- switch (it->options[4]) {
- case 0:
- s->range_table = &range_bipolar10;
- break;
- case 1:
- s->range_table = &range_bipolar5;
- break;
- case 2:
- s->range_table = &range_bipolar2_5;
- break;
- case 3:
- s->range_table = &range812_bipolar1_25;
- break;
- case 4:
- s->range_table = &range812_bipolar0_625;
- break;
- case 5:
- s->range_table = &range812_bipolar0_3125;
- break;
- default:
- s->range_table = &range_bipolar10;
- break;
- }
- break;
- case boardPCL813B:
- if (it->options[1] == 1)
- s->range_table = &range_pcl813b2_ai;
- break;
- case boardISO813:
- switch (it->options[1]) {
- case 0:
- s->range_table = &range_iso813_1_ai;
- break;
- case 1:
- s->range_table = &range_iso813_1_2_ai;
- break;
- case 2:
- s->range_table = &range_iso813_2_ai;
- devpriv->range_correction = 1;
- break;
- case 3:
- s->range_table = &range_iso813_2_2_ai;
- devpriv->range_correction = 1;
- break;
- default:
- s->range_table = &range_iso813_1_ai;
- break;
- }
- break;
- case boardACL8113:
- switch (it->options[1]) {
- case 0:
- s->range_table = &range_acl8113_1_ai;
- break;
- case 1:
- s->range_table = &range_acl8113_1_2_ai;
- break;
- case 2:
- s->range_table = &range_acl8113_2_ai;
- devpriv->range_correction = 1;
- break;
- case 3:
- s->range_table = &range_acl8113_2_2_ai;
- devpriv->range_correction = 1;
- break;
- default:
- s->range_table = &range_acl8113_1_ai;
- break;
- }
- break;
- }
-}
-
-static void pcl812_alloc_dma(struct comedi_device *dev, unsigned int dma_chan)
-{
- struct pcl812_private *devpriv = dev->private;
-
- /* only DMA channels 3 and 1 are valid */
- if (!(dma_chan == 3 || dma_chan == 1))
- return;
-
- /* DMA uses two 8K buffers */
- devpriv->dma = comedi_isadma_alloc(dev, 2, dma_chan, dma_chan,
- PAGE_SIZE * 2, COMEDI_ISADMA_READ);
-}
-
-static void pcl812_free_dma(struct comedi_device *dev)
-{
- struct pcl812_private *devpriv = dev->private;
-
- if (devpriv)
- comedi_isadma_free(devpriv->dma);
-}
-
-static int pcl812_attach(struct comedi_device *dev, struct comedi_devconfig *it)
-{
- const struct pcl812_board *board = dev->board_ptr;
- struct pcl812_private *devpriv;
- struct comedi_subdevice *s;
- int n_subdevices;
- int subdev;
- int ret;
-
- devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
- if (!devpriv)
- return -ENOMEM;
-
- ret = comedi_request_region(dev, it->options[0], 0x10);
- if (ret)
- return ret;
-
- if (board->IRQbits) {
- dev->pacer = comedi_8254_init(dev->iobase + PCL812_TIMER_BASE,
- I8254_OSC_BASE_2MHZ,
- I8254_IO8, 0);
- if (!dev->pacer)
- return -ENOMEM;
-
- if ((1 << it->options[1]) & board->IRQbits) {
- ret = request_irq(it->options[1], pcl812_interrupt, 0,
- dev->board_name, dev);
- if (ret == 0)
- dev->irq = it->options[1];
- }
- }
-
- /* we need an IRQ to do DMA on channel 3 or 1 */
- if (dev->irq && board->has_dma)
- pcl812_alloc_dma(dev, it->options[2]);
-
- /* differential analog inputs? */
- switch (board->board_type) {
- case boardA821:
- if (it->options[2] == 1)
- devpriv->use_diff = 1;
- break;
- case boardACL8112:
- case boardACL8216:
- if (it->options[4] == 1)
- devpriv->use_diff = 1;
- break;
- }
-
- n_subdevices = 1; /* all boardtypes have analog inputs */
- if (board->n_aochan > 0)
- n_subdevices++;
- if (board->has_dio)
- n_subdevices += 2;
-
- ret = comedi_alloc_subdevices(dev, n_subdevices);
- if (ret)
- return ret;
-
- subdev = 0;
-
- /* Analog Input subdevice */
- s = &dev->subdevices[subdev];
- s->type = COMEDI_SUBD_AI;
- s->subdev_flags = SDF_READABLE;
- if (devpriv->use_diff) {
- s->subdev_flags |= SDF_DIFF;
- s->n_chan = board->n_aichan / 2;
- } else {
- s->subdev_flags |= SDF_GROUND;
- s->n_chan = board->n_aichan;
- }
- s->maxdata = board->has_16bit_ai ? 0xffff : 0x0fff;
-
- pcl812_set_ai_range_table(dev, s, it);
-
- s->insn_read = pcl812_ai_insn_read;
-
- if (dev->irq) {
- dev->read_subdev = s;
- s->subdev_flags |= SDF_CMD_READ;
- s->len_chanlist = MAX_CHANLIST_LEN;
- s->do_cmdtest = pcl812_ai_cmdtest;
- s->do_cmd = pcl812_ai_cmd;
- s->poll = pcl812_ai_poll;
- s->cancel = pcl812_ai_cancel;
- }
-
- devpriv->use_mpc508 = board->has_mpc508_mux;
-
- subdev++;
-
- /* analog output */
- if (board->n_aochan > 0) {
- s = &dev->subdevices[subdev];
- s->type = COMEDI_SUBD_AO;
- s->subdev_flags = SDF_WRITABLE | SDF_GROUND;
- s->n_chan = board->n_aochan;
- s->maxdata = 0xfff;
- s->range_table = &range_unipolar5;
- switch (board->board_type) {
- case boardA821:
- if (it->options[3] == 1)
- s->range_table = &range_unipolar10;
- break;
- case boardPCL812:
- case boardACL8112:
- case boardPCL812PG:
- case boardACL8216:
- if (it->options[5] == 1)
- s->range_table = &range_unipolar10;
- if (it->options[5] == 2)
- s->range_table = &range_unknown;
- break;
- }
- s->insn_write = pcl812_ao_insn_write;
-
- ret = comedi_alloc_subdev_readback(s);
- if (ret)
- return ret;
-
- subdev++;
- }
-
- if (board->has_dio) {
- /* Digital Input subdevice */
- s = &dev->subdevices[subdev];
- s->type = COMEDI_SUBD_DI;
- s->subdev_flags = SDF_READABLE;
- s->n_chan = 16;
- s->maxdata = 1;
- s->range_table = &range_digital;
- s->insn_bits = pcl812_di_insn_bits;
- subdev++;
-
- /* Digital Output subdevice */
- s = &dev->subdevices[subdev];
- s->type = COMEDI_SUBD_DO;
- s->subdev_flags = SDF_WRITABLE;
- s->n_chan = 16;
- s->maxdata = 1;
- s->range_table = &range_digital;
- s->insn_bits = pcl812_do_insn_bits;
- subdev++;
- }
-
- switch (board->board_type) {
- case boardACL8216:
- case boardPCL812PG:
- case boardPCL812:
- case boardACL8112:
- devpriv->max_812_ai_mode0_rangewait = 1;
- if (it->options[3] > 0)
- /* we use external trigger */
- devpriv->use_ext_trg = 1;
- break;
- case boardA821:
- devpriv->max_812_ai_mode0_rangewait = 1;
- devpriv->mode_reg_int = (dev->irq << 4) & 0xf0;
- break;
- case boardPCL813B:
- case boardPCL813:
- case boardISO813:
- case boardACL8113:
- /* maybe there must by greatest timeout */
- devpriv->max_812_ai_mode0_rangewait = 5;
- break;
- }
-
- pcl812_reset(dev);
-
- return 0;
-}
-
-static void pcl812_detach(struct comedi_device *dev)
-{
- pcl812_free_dma(dev);
- comedi_legacy_detach(dev);
-}
-
-static struct comedi_driver pcl812_driver = {
- .driver_name = "pcl812",
- .module = THIS_MODULE,
- .attach = pcl812_attach,
- .detach = pcl812_detach,
- .board_name = &boardtypes[0].name,
- .num_names = ARRAY_SIZE(boardtypes),
- .offset = sizeof(struct pcl812_board),
-};
-module_comedi_driver(pcl812_driver);
-
-MODULE_AUTHOR("Comedi http://www.comedi.org");
-MODULE_DESCRIPTION("Comedi low-level driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/pcl816.c b/drivers/staging/comedi/drivers/pcl816.c
deleted file mode 100644
index a353d1b155bb..000000000000
--- a/drivers/staging/comedi/drivers/pcl816.c
+++ /dev/null
@@ -1,712 +0,0 @@
-/*
- comedi/drivers/pcl816.c
-
- Author: Juan Grigera <juan@grigera.com.ar>
- based on pcl818 by Michal Dobes <dobes@tesnet.cz> and bits of pcl812
-
- hardware driver for Advantech cards:
- card: PCL-816, PCL814B
- driver: pcl816
-*/
-/*
-Driver: pcl816
-Description: Advantech PCL-816 cards, PCL-814
-Author: Juan Grigera <juan@grigera.com.ar>
-Devices: [Advantech] PCL-816 (pcl816), PCL-814B (pcl814b)
-Status: works
-Updated: Tue, 2 Apr 2002 23:15:21 -0800
-
-PCL 816 and 814B have 16 SE/DIFF ADCs, 16 DACs, 16 DI and 16 DO.
-Differences are at resolution (16 vs 12 bits).
-
-The driver support AI command mode, other subdevices not written.
-
-Analog output and digital input and output are not supported.
-
-Configuration Options:
- [0] - IO Base
- [1] - IRQ (0=disable, 2, 3, 4, 5, 6, 7)
- [2] - DMA (0=disable, 1, 3)
- [3] - 0, 10=10MHz clock for 8254
- 1= 1MHz clock for 8254
-
-*/
-
-#include <linux/module.h>
-#include <linux/gfp.h>
-#include <linux/delay.h>
-#include <linux/io.h>
-#include <linux/interrupt.h>
-
-#include "../comedidev.h"
-
-#include "comedi_isadma.h"
-#include "comedi_8254.h"
-
-/*
- * Register I/O map
- */
-#define PCL816_DO_DI_LSB_REG 0x00
-#define PCL816_DO_DI_MSB_REG 0x01
-#define PCL816_TIMER_BASE 0x04
-#define PCL816_AI_LSB_REG 0x08
-#define PCL816_AI_MSB_REG 0x09
-#define PCL816_RANGE_REG 0x09
-#define PCL816_CLRINT_REG 0x0a
-#define PCL816_MUX_REG 0x0b
-#define PCL816_MUX_SCAN(_first, _last) (((_last) << 4) | (_first))
-#define PCL816_CTRL_REG 0x0c
-#define PCL816_CTRL_DISABLE_TRIG (0 << 0)
-#define PCL816_CTRL_SOFT_TRIG (1 << 0)
-#define PCL816_CTRL_PACER_TRIG (1 << 1)
-#define PCL816_CTRL_EXT_TRIG (1 << 2)
-#define PCL816_CTRL_POE (1 << 3)
-#define PCL816_CTRL_DMAEN (1 << 4)
-#define PCL816_CTRL_INTEN (1 << 5)
-#define PCL816_CTRL_DMASRC_SLOT0 (0 << 6)
-#define PCL816_CTRL_DMASRC_SLOT1 (1 << 6)
-#define PCL816_CTRL_DMASRC_SLOT2 (2 << 6)
-#define PCL816_STATUS_REG 0x0d
-#define PCL816_STATUS_NEXT_CHAN_MASK (0xf << 0)
-#define PCL816_STATUS_INTSRC_MASK (3 << 4)
-#define PCL816_STATUS_INTSRC_SLOT0 (0 << 4)
-#define PCL816_STATUS_INTSRC_SLOT1 (1 << 4)
-#define PCL816_STATUS_INTSRC_SLOT2 (2 << 4)
-#define PCL816_STATUS_INTSRC_DMA (3 << 4)
-#define PCL816_STATUS_INTACT (1 << 6)
-#define PCL816_STATUS_DRDY (1 << 7)
-
-#define MAGIC_DMA_WORD 0x5a5a
-
-static const struct comedi_lrange range_pcl816 = {
- 8, {
- BIP_RANGE(10),
- BIP_RANGE(5),
- BIP_RANGE(2.5),
- BIP_RANGE(1.25),
- UNI_RANGE(10),
- UNI_RANGE(5),
- UNI_RANGE(2.5),
- UNI_RANGE(1.25)
- }
-};
-
-struct pcl816_board {
- const char *name;
- int ai_maxdata;
- int ao_maxdata;
- int ai_chanlist;
-};
-
-static const struct pcl816_board boardtypes[] = {
- {
- .name = "pcl816",
- .ai_maxdata = 0xffff,
- .ao_maxdata = 0xffff,
- .ai_chanlist = 1024,
- }, {
- .name = "pcl814b",
- .ai_maxdata = 0x3fff,
- .ao_maxdata = 0x3fff,
- .ai_chanlist = 1024,
- },
-};
-
-struct pcl816_private {
- struct comedi_isadma *dma;
- unsigned int ai_poll_ptr; /* how many sampes transfer poll */
- unsigned int ai_cmd_running:1;
- unsigned int ai_cmd_canceled:1;
-};
-
-static void pcl816_ai_setup_dma(struct comedi_device *dev,
- struct comedi_subdevice *s,
- unsigned int unread_samples)
-{
- struct pcl816_private *devpriv = dev->private;
- struct comedi_isadma *dma = devpriv->dma;
- struct comedi_isadma_desc *desc = &dma->desc[dma->cur_dma];
- unsigned int max_samples = comedi_bytes_to_samples(s, desc->maxsize);
- unsigned int nsamples;
-
- comedi_isadma_disable(dma->chan);
-
- /*
- * Determine dma size based on the buffer maxsize plus the number of
- * unread samples and the number of samples remaining in the command.
- */
- nsamples = comedi_nsamples_left(s, max_samples + unread_samples);
- if (nsamples > unread_samples) {
- nsamples -= unread_samples;
- desc->size = comedi_samples_to_bytes(s, nsamples);
- comedi_isadma_program(desc);
- }
-}
-
-static void pcl816_ai_set_chan_range(struct comedi_device *dev,
- unsigned int chan,
- unsigned int range)
-{
- outb(chan, dev->iobase + PCL816_MUX_REG);
- outb(range, dev->iobase + PCL816_RANGE_REG);
-}
-
-static void pcl816_ai_set_chan_scan(struct comedi_device *dev,
- unsigned int first_chan,
- unsigned int last_chan)
-{
- outb(PCL816_MUX_SCAN(first_chan, last_chan),
- dev->iobase + PCL816_MUX_REG);
-}
-
-static void pcl816_ai_setup_chanlist(struct comedi_device *dev,
- unsigned int *chanlist,
- unsigned int seglen)
-{
- unsigned int first_chan = CR_CHAN(chanlist[0]);
- unsigned int last_chan;
- unsigned int range;
- unsigned int i;
-
- /* store range list to card */
- for (i = 0; i < seglen; i++) {
- last_chan = CR_CHAN(chanlist[i]);
- range = CR_RANGE(chanlist[i]);
-
- pcl816_ai_set_chan_range(dev, last_chan, range);
- }
-
- udelay(1);
-
- pcl816_ai_set_chan_scan(dev, first_chan, last_chan);
-}
-
-static void pcl816_ai_clear_eoc(struct comedi_device *dev)
-{
- /* writing any value clears the interrupt request */
- outb(0, dev->iobase + PCL816_CLRINT_REG);
-}
-
-static void pcl816_ai_soft_trig(struct comedi_device *dev)
-{
- /* writing any value triggers a software conversion */
- outb(0, dev->iobase + PCL816_AI_LSB_REG);
-}
-
-static unsigned int pcl816_ai_get_sample(struct comedi_device *dev,
- struct comedi_subdevice *s)
-{
- unsigned int val;
-
- val = inb(dev->iobase + PCL816_AI_MSB_REG) << 8;
- val |= inb(dev->iobase + PCL816_AI_LSB_REG);
-
- return val & s->maxdata;
-}
-
-static int pcl816_ai_eoc(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned long context)
-{
- unsigned int status;
-
- status = inb(dev->iobase + PCL816_STATUS_REG);
- if ((status & PCL816_STATUS_DRDY) == 0)
- return 0;
- return -EBUSY;
-}
-
-static bool pcl816_ai_next_chan(struct comedi_device *dev,
- struct comedi_subdevice *s)
-{
- struct comedi_cmd *cmd = &s->async->cmd;
-
- if (cmd->stop_src == TRIG_COUNT &&
- s->async->scans_done >= cmd->stop_arg) {
- s->async->events |= COMEDI_CB_EOA;
- return false;
- }
-
- return true;
-}
-
-static void transfer_from_dma_buf(struct comedi_device *dev,
- struct comedi_subdevice *s,
- unsigned short *ptr,
- unsigned int bufptr, unsigned int len)
-{
- unsigned short val;
- int i;
-
- for (i = 0; i < len; i++) {
- val = ptr[bufptr++];
- comedi_buf_write_samples(s, &val, 1);
-
- if (!pcl816_ai_next_chan(dev, s))
- return;
- }
-}
-
-static irqreturn_t pcl816_interrupt(int irq, void *d)
-{
- struct comedi_device *dev = d;
- struct comedi_subdevice *s = dev->read_subdev;
- struct pcl816_private *devpriv = dev->private;
- struct comedi_isadma *dma = devpriv->dma;
- struct comedi_isadma_desc *desc = &dma->desc[dma->cur_dma];
- unsigned int nsamples;
- unsigned int bufptr;
-
- if (!dev->attached || !devpriv->ai_cmd_running) {
- pcl816_ai_clear_eoc(dev);
- return IRQ_HANDLED;
- }
-
- if (devpriv->ai_cmd_canceled) {
- devpriv->ai_cmd_canceled = 0;
- pcl816_ai_clear_eoc(dev);
- return IRQ_HANDLED;
- }
-
- nsamples = comedi_bytes_to_samples(s, desc->size) -
- devpriv->ai_poll_ptr;
- bufptr = devpriv->ai_poll_ptr;
- devpriv->ai_poll_ptr = 0;
-
- /* restart dma with the next buffer */
- dma->cur_dma = 1 - dma->cur_dma;
- pcl816_ai_setup_dma(dev, s, nsamples);
-
- transfer_from_dma_buf(dev, s, desc->virt_addr, bufptr, nsamples);
-
- pcl816_ai_clear_eoc(dev);
-
- comedi_handle_events(dev, s);
- return IRQ_HANDLED;
-}
-
-static int check_channel_list(struct comedi_device *dev,
- struct comedi_subdevice *s,
- unsigned int *chanlist,
- unsigned int chanlen)
-{
- unsigned int chansegment[16];
- unsigned int i, nowmustbechan, seglen, segpos;
-
- /* correct channel and range number check itself comedi/range.c */
- if (chanlen < 1) {
- dev_err(dev->class_dev, "range/channel list is empty!\n");
- return 0;
- }
-
- if (chanlen > 1) {
- /* first channel is every time ok */
- chansegment[0] = chanlist[0];
- for (i = 1, seglen = 1; i < chanlen; i++, seglen++) {
- /* we detect loop, this must by finish */
- if (chanlist[0] == chanlist[i])
- break;
- nowmustbechan =
- (CR_CHAN(chansegment[i - 1]) + 1) % chanlen;
- if (nowmustbechan != CR_CHAN(chanlist[i])) {
- /* channel list isn't continuous :-( */
- dev_dbg(dev->class_dev,
- "channel list must be continuous! chanlist[%i]=%d but must be %d or %d!\n",
- i, CR_CHAN(chanlist[i]), nowmustbechan,
- CR_CHAN(chanlist[0]));
- return 0;
- }
- /* well, this is next correct channel in list */
- chansegment[i] = chanlist[i];
- }
-
- /* check whole chanlist */
- for (i = 0, segpos = 0; i < chanlen; i++) {
- if (chanlist[i] != chansegment[i % seglen]) {
- dev_dbg(dev->class_dev,
- "bad channel or range number! chanlist[%i]=%d,%d,%d and not %d,%d,%d!\n",
- i, CR_CHAN(chansegment[i]),
- CR_RANGE(chansegment[i]),
- CR_AREF(chansegment[i]),
- CR_CHAN(chanlist[i % seglen]),
- CR_RANGE(chanlist[i % seglen]),
- CR_AREF(chansegment[i % seglen]));
- return 0; /* chan/gain list is strange */
- }
- }
- } else {
- seglen = 1;
- }
-
- return seglen; /* we can serve this with MUX logic */
-}
-
-static int pcl816_ai_cmdtest(struct comedi_device *dev,
- struct comedi_subdevice *s, struct comedi_cmd *cmd)
-{
- int err = 0;
-
- /* Step 1 : check if triggers are trivially valid */
-
- err |= comedi_check_trigger_src(&cmd->start_src, TRIG_NOW);
- err |= comedi_check_trigger_src(&cmd->scan_begin_src, TRIG_FOLLOW);
- err |= comedi_check_trigger_src(&cmd->convert_src,
- TRIG_EXT | TRIG_TIMER);
- err |= comedi_check_trigger_src(&cmd->scan_end_src, TRIG_COUNT);
- err |= comedi_check_trigger_src(&cmd->stop_src, TRIG_COUNT | TRIG_NONE);
-
- if (err)
- return 1;
-
- /* Step 2a : make sure trigger sources are unique */
-
- err |= comedi_check_trigger_is_unique(cmd->convert_src);
- err |= comedi_check_trigger_is_unique(cmd->stop_src);
-
- /* Step 2b : and mutually compatible */
-
- if (err)
- return 2;
-
- /* Step 3: check if arguments are trivially valid */
-
- err |= comedi_check_trigger_arg_is(&cmd->start_arg, 0);
- err |= comedi_check_trigger_arg_is(&cmd->scan_begin_arg, 0);
-
- if (cmd->convert_src == TRIG_TIMER)
- err |= comedi_check_trigger_arg_min(&cmd->convert_arg, 10000);
- else /* TRIG_EXT */
- err |= comedi_check_trigger_arg_is(&cmd->convert_arg, 0);
-
- err |= comedi_check_trigger_arg_is(&cmd->scan_end_arg,
- cmd->chanlist_len);
-
- if (cmd->stop_src == TRIG_COUNT)
- err |= comedi_check_trigger_arg_min(&cmd->stop_arg, 1);
- else /* TRIG_NONE */
- err |= comedi_check_trigger_arg_is(&cmd->stop_arg, 0);
-
- if (err)
- return 3;
-
- /* step 4: fix up any arguments */
- if (cmd->convert_src == TRIG_TIMER) {
- unsigned int arg = cmd->convert_arg;
-
- comedi_8254_cascade_ns_to_timer(dev->pacer, &arg, cmd->flags);
- err |= comedi_check_trigger_arg_is(&cmd->convert_arg, arg);
- }
-
- if (err)
- return 4;
-
- /* step 5: complain about special chanlist considerations */
-
- if (cmd->chanlist) {
- if (!check_channel_list(dev, s, cmd->chanlist,
- cmd->chanlist_len))
- return 5; /* incorrect channels list */
- }
-
- return 0;
-}
-
-static int pcl816_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
-{
- struct pcl816_private *devpriv = dev->private;
- struct comedi_isadma *dma = devpriv->dma;
- struct comedi_cmd *cmd = &s->async->cmd;
- unsigned int ctrl;
- unsigned int seglen;
-
- if (devpriv->ai_cmd_running)
- return -EBUSY;
-
- seglen = check_channel_list(dev, s, cmd->chanlist, cmd->chanlist_len);
- if (seglen < 1)
- return -EINVAL;
- pcl816_ai_setup_chanlist(dev, cmd->chanlist, seglen);
- udelay(1);
-
- devpriv->ai_cmd_running = 1;
- devpriv->ai_poll_ptr = 0;
- devpriv->ai_cmd_canceled = 0;
-
- /* setup and enable dma for the first buffer */
- dma->cur_dma = 0;
- pcl816_ai_setup_dma(dev, s, 0);
-
- comedi_8254_set_mode(dev->pacer, 0, I8254_MODE1 | I8254_BINARY);
- comedi_8254_write(dev->pacer, 0, 0x0ff);
- udelay(1);
- comedi_8254_update_divisors(dev->pacer);
- comedi_8254_pacer_enable(dev->pacer, 1, 2, true);
-
- ctrl = PCL816_CTRL_INTEN | PCL816_CTRL_DMAEN | PCL816_CTRL_DMASRC_SLOT0;
- if (cmd->convert_src == TRIG_TIMER)
- ctrl |= PCL816_CTRL_PACER_TRIG;
- else /* TRIG_EXT */
- ctrl |= PCL816_CTRL_EXT_TRIG;
-
- outb(ctrl, dev->iobase + PCL816_CTRL_REG);
- outb((dma->chan << 4) | dev->irq,
- dev->iobase + PCL816_STATUS_REG);
-
- return 0;
-}
-
-static int pcl816_ai_poll(struct comedi_device *dev, struct comedi_subdevice *s)
-{
- struct pcl816_private *devpriv = dev->private;
- struct comedi_isadma *dma = devpriv->dma;
- struct comedi_isadma_desc *desc;
- unsigned long flags;
- unsigned int poll;
- int ret;
-
- spin_lock_irqsave(&dev->spinlock, flags);
-
- poll = comedi_isadma_poll(dma);
- poll = comedi_bytes_to_samples(s, poll);
- if (poll > devpriv->ai_poll_ptr) {
- desc = &dma->desc[dma->cur_dma];
- transfer_from_dma_buf(dev, s, desc->virt_addr,
- devpriv->ai_poll_ptr,
- poll - devpriv->ai_poll_ptr);
- /* new buffer position */
- devpriv->ai_poll_ptr = poll;
-
- comedi_handle_events(dev, s);
-
- ret = comedi_buf_n_bytes_ready(s);
- } else {
- /* no new samples */
- ret = 0;
- }
- spin_unlock_irqrestore(&dev->spinlock, flags);
-
- return ret;
-}
-
-static int pcl816_ai_cancel(struct comedi_device *dev,
- struct comedi_subdevice *s)
-{
- struct pcl816_private *devpriv = dev->private;
-
- if (!devpriv->ai_cmd_running)
- return 0;
-
- outb(PCL816_CTRL_DISABLE_TRIG, dev->iobase + PCL816_CTRL_REG);
- pcl816_ai_clear_eoc(dev);
-
- comedi_8254_pacer_enable(dev->pacer, 1, 2, false);
-
- devpriv->ai_cmd_running = 0;
- devpriv->ai_cmd_canceled = 1;
-
- return 0;
-}
-
-static int pcl816_ai_insn_read(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- unsigned int chan = CR_CHAN(insn->chanspec);
- unsigned int range = CR_RANGE(insn->chanspec);
- int ret = 0;
- int i;
-
- outb(PCL816_CTRL_SOFT_TRIG, dev->iobase + PCL816_CTRL_REG);
-
- pcl816_ai_set_chan_range(dev, chan, range);
- pcl816_ai_set_chan_scan(dev, chan, chan);
-
- for (i = 0; i < insn->n; i++) {
- pcl816_ai_clear_eoc(dev);
- pcl816_ai_soft_trig(dev);
-
- ret = comedi_timeout(dev, s, insn, pcl816_ai_eoc, 0);
- if (ret)
- break;
-
- data[i] = pcl816_ai_get_sample(dev, s);
- }
- outb(PCL816_CTRL_DISABLE_TRIG, dev->iobase + PCL816_CTRL_REG);
- pcl816_ai_clear_eoc(dev);
-
- return ret ? ret : insn->n;
-}
-
-static int pcl816_di_insn_bits(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- data[1] = inb(dev->iobase + PCL816_DO_DI_LSB_REG) |
- (inb(dev->iobase + PCL816_DO_DI_MSB_REG) << 8);
-
- return insn->n;
-}
-
-static int pcl816_do_insn_bits(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- if (comedi_dio_update_state(s, data)) {
- outb(s->state & 0xff, dev->iobase + PCL816_DO_DI_LSB_REG);
- outb((s->state >> 8), dev->iobase + PCL816_DO_DI_MSB_REG);
- }
-
- data[1] = s->state;
-
- return insn->n;
-}
-
-static void pcl816_reset(struct comedi_device *dev)
-{
- outb(PCL816_CTRL_DISABLE_TRIG, dev->iobase + PCL816_CTRL_REG);
- pcl816_ai_set_chan_range(dev, 0, 0);
- pcl816_ai_clear_eoc(dev);
-
- /* set all digital outputs low */
- outb(0, dev->iobase + PCL816_DO_DI_LSB_REG);
- outb(0, dev->iobase + PCL816_DO_DI_MSB_REG);
-}
-
-static void pcl816_alloc_irq_and_dma(struct comedi_device *dev,
- struct comedi_devconfig *it)
-{
- struct pcl816_private *devpriv = dev->private;
- unsigned int irq_num = it->options[1];
- unsigned int dma_chan = it->options[2];
-
- /* only IRQs 2-7 and DMA channels 3 and 1 are valid */
- if (!(irq_num >= 2 && irq_num <= 7) ||
- !(dma_chan == 3 || dma_chan == 1))
- return;
-
- if (request_irq(irq_num, pcl816_interrupt, 0, dev->board_name, dev))
- return;
-
- /* DMA uses two 16K buffers */
- devpriv->dma = comedi_isadma_alloc(dev, 2, dma_chan, dma_chan,
- PAGE_SIZE * 4, COMEDI_ISADMA_READ);
- if (!devpriv->dma)
- free_irq(irq_num, dev);
- else
- dev->irq = irq_num;
-}
-
-static void pcl816_free_dma(struct comedi_device *dev)
-{
- struct pcl816_private *devpriv = dev->private;
-
- if (devpriv)
- comedi_isadma_free(devpriv->dma);
-}
-
-static int pcl816_attach(struct comedi_device *dev, struct comedi_devconfig *it)
-{
- const struct pcl816_board *board = dev->board_ptr;
- struct pcl816_private *devpriv;
- struct comedi_subdevice *s;
- int ret;
-
- devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
- if (!devpriv)
- return -ENOMEM;
-
- ret = comedi_request_region(dev, it->options[0], 0x10);
- if (ret)
- return ret;
-
- /* an IRQ and DMA are required to support async commands */
- pcl816_alloc_irq_and_dma(dev, it);
-
- dev->pacer = comedi_8254_init(dev->iobase + PCL816_TIMER_BASE,
- I8254_OSC_BASE_10MHZ, I8254_IO8, 0);
- if (!dev->pacer)
- return -ENOMEM;
-
- ret = comedi_alloc_subdevices(dev, 4);
- if (ret)
- return ret;
-
- s = &dev->subdevices[0];
- s->type = COMEDI_SUBD_AI;
- s->subdev_flags = SDF_CMD_READ | SDF_DIFF;
- s->n_chan = 16;
- s->maxdata = board->ai_maxdata;
- s->range_table = &range_pcl816;
- s->insn_read = pcl816_ai_insn_read;
- if (dev->irq) {
- dev->read_subdev = s;
- s->subdev_flags |= SDF_CMD_READ;
- s->len_chanlist = board->ai_chanlist;
- s->do_cmdtest = pcl816_ai_cmdtest;
- s->do_cmd = pcl816_ai_cmd;
- s->poll = pcl816_ai_poll;
- s->cancel = pcl816_ai_cancel;
- }
-
- /* Analog OUtput subdevice */
- s = &dev->subdevices[2];
- s->type = COMEDI_SUBD_UNUSED;
-#if 0
- subdevs[1] = COMEDI_SUBD_AO;
- s->subdev_flags = SDF_WRITABLE | SDF_GROUND;
- s->n_chan = 1;
- s->maxdata = board->ao_maxdata;
- s->range_table = &range_pcl816;
-#endif
-
- /* Digital Input subdevice */
- s = &dev->subdevices[2];
- s->type = COMEDI_SUBD_DI;
- s->subdev_flags = SDF_READABLE;
- s->n_chan = 16;
- s->maxdata = 1;
- s->range_table = &range_digital;
- s->insn_bits = pcl816_di_insn_bits;
-
- /* Digital Output subdevice */
- s = &dev->subdevices[3];
- s->type = COMEDI_SUBD_DO;
- s->subdev_flags = SDF_WRITABLE;
- s->n_chan = 16;
- s->maxdata = 1;
- s->range_table = &range_digital;
- s->insn_bits = pcl816_do_insn_bits;
-
- pcl816_reset(dev);
-
- return 0;
-}
-
-static void pcl816_detach(struct comedi_device *dev)
-{
- if (dev->private) {
- pcl816_ai_cancel(dev, dev->read_subdev);
- pcl816_reset(dev);
- }
- pcl816_free_dma(dev);
- comedi_legacy_detach(dev);
-}
-
-static struct comedi_driver pcl816_driver = {
- .driver_name = "pcl816",
- .module = THIS_MODULE,
- .attach = pcl816_attach,
- .detach = pcl816_detach,
- .board_name = &boardtypes[0].name,
- .num_names = ARRAY_SIZE(boardtypes),
- .offset = sizeof(struct pcl816_board),
-};
-module_comedi_driver(pcl816_driver);
-
-MODULE_AUTHOR("Comedi http://www.comedi.org");
-MODULE_DESCRIPTION("Comedi low-level driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/pcl818.c b/drivers/staging/comedi/drivers/pcl818.c
deleted file mode 100644
index e1bdde977302..000000000000
--- a/drivers/staging/comedi/drivers/pcl818.c
+++ /dev/null
@@ -1,1146 +0,0 @@
-/*
- * comedi/drivers/pcl818.c
- *
- * Driver: pcl818
- * Description: Advantech PCL-818 cards, PCL-718
- * Author: Michal Dobes <dobes@tesnet.cz>
- * Devices: [Advantech] PCL-818L (pcl818l), PCL-818H (pcl818h),
- * PCL-818HD (pcl818hd), PCL-818HG (pcl818hg), PCL-818 (pcl818),
- * PCL-718 (pcl718)
- * Status: works
- *
- * All cards have 16 SE/8 DIFF ADCs, one or two DACs, 16 DI and 16 DO.
- * Differences are only at maximal sample speed, range list and FIFO
- * support.
- * The driver support AI mode 0, 1, 3 other subdevices (AO, DI, DO) support
- * only mode 0. If DMA/FIFO/INT are disabled then AI support only mode 0.
- * PCL-818HD and PCL-818HG support 1kword FIFO. Driver support this FIFO
- * but this code is untested.
- * A word or two about DMA. Driver support DMA operations at two ways:
- * 1) DMA uses two buffers and after one is filled then is generated
- * INT and DMA restart with second buffer. With this mode I'm unable run
- * more that 80Ksamples/secs without data dropouts on K6/233.
- * 2) DMA uses one buffer and run in autoinit mode and the data are
- * from DMA buffer moved on the fly with 2kHz interrupts from RTC.
- * This mode is used if the interrupt 8 is available for allocation.
- * If not, then first DMA mode is used. With this I can run at
- * full speed one card (100ksamples/secs) or two cards with
- * 60ksamples/secs each (more is problem on account of ISA limitations).
- * To use this mode you must have compiled kernel with disabled
- * "Enhanced Real Time Clock Support".
- * Maybe you can have problems if you use xntpd or similar.
- * If you've data dropouts with DMA mode 2 then:
- * a) disable IDE DMA
- * b) switch text mode console to fb.
- *
- * Options for PCL-818L:
- * [0] - IO Base
- * [1] - IRQ (0=disable, 2, 3, 4, 5, 6, 7)
- * [2] - DMA (0=disable, 1, 3)
- * [3] - 0, 10=10MHz clock for 8254
- * 1= 1MHz clock for 8254
- * [4] - 0, 5=A/D input -5V.. +5V
- * 1, 10=A/D input -10V..+10V
- * [5] - 0, 5=D/A output 0-5V (internal reference -5V)
- * 1, 10=D/A output 0-10V (internal reference -10V)
- * 2 =D/A output unknown (external reference)
- *
- * Options for PCL-818, PCL-818H:
- * [0] - IO Base
- * [1] - IRQ (0=disable, 2, 3, 4, 5, 6, 7)
- * [2] - DMA (0=disable, 1, 3)
- * [3] - 0, 10=10MHz clock for 8254
- * 1= 1MHz clock for 8254
- * [4] - 0, 5=D/A output 0-5V (internal reference -5V)
- * 1, 10=D/A output 0-10V (internal reference -10V)
- * 2 =D/A output unknown (external reference)
- *
- * Options for PCL-818HD, PCL-818HG:
- * [0] - IO Base
- * [1] - IRQ (0=disable, 2, 3, 4, 5, 6, 7)
- * [2] - DMA/FIFO (-1=use FIFO, 0=disable both FIFO and DMA,
- * 1=use DMA ch 1, 3=use DMA ch 3)
- * [3] - 0, 10=10MHz clock for 8254
- * 1= 1MHz clock for 8254
- * [4] - 0, 5=D/A output 0-5V (internal reference -5V)
- * 1, 10=D/A output 0-10V (internal reference -10V)
- * 2 =D/A output unknown (external reference)
- *
- * Options for PCL-718:
- * [0] - IO Base
- * [1] - IRQ (0=disable, 2, 3, 4, 5, 6, 7)
- * [2] - DMA (0=disable, 1, 3)
- * [3] - 0, 10=10MHz clock for 8254
- * 1= 1MHz clock for 8254
- * [4] - 0=A/D Range is +/-10V
- * 1= +/-5V
- * 2= +/-2.5V
- * 3= +/-1V
- * 4= +/-0.5V
- * 5= user defined bipolar
- * 6= 0-10V
- * 7= 0-5V
- * 8= 0-2V
- * 9= 0-1V
- * 10= user defined unipolar
- * [5] - 0, 5=D/A outputs 0-5V (internal reference -5V)
- * 1, 10=D/A outputs 0-10V (internal reference -10V)
- * 2=D/A outputs unknown (external reference)
- * [6] - 0, 60=max 60kHz A/D sampling
- * 1,100=max 100kHz A/D sampling (PCL-718 with Option 001 installed)
- *
- */
-
-#include <linux/module.h>
-#include <linux/gfp.h>
-#include <linux/delay.h>
-#include <linux/io.h>
-#include <linux/interrupt.h>
-
-#include "../comedidev.h"
-
-#include "comedi_isadma.h"
-#include "comedi_8254.h"
-
-/* boards constants */
-
-#define boardPCL818L 0
-#define boardPCL818H 1
-#define boardPCL818HD 2
-#define boardPCL818HG 3
-#define boardPCL818 4
-#define boardPCL718 5
-
-/*
- * Register I/O map
- */
-#define PCL818_AI_LSB_REG 0x00
-#define PCL818_AI_MSB_REG 0x01
-#define PCL818_RANGE_REG 0x01
-#define PCL818_MUX_REG 0x02
-#define PCL818_MUX_SCAN(_first, _last) (((_last) << 4) | (_first))
-#define PCL818_DO_DI_LSB_REG 0x03
-#define PCL818_AO_LSB_REG(x) (0x04 + ((x) * 2))
-#define PCL818_AO_MSB_REG(x) (0x05 + ((x) * 2))
-#define PCL818_STATUS_REG 0x08
-#define PCL818_STATUS_NEXT_CHAN_MASK (0xf << 0)
-#define PCL818_STATUS_INT (1 << 4)
-#define PCL818_STATUS_MUX (1 << 5)
-#define PCL818_STATUS_UNI (1 << 6)
-#define PCL818_STATUS_EOC (1 << 7)
-#define PCL818_CTRL_REG 0x09
-#define PCL818_CTRL_DISABLE_TRIG (0 << 0)
-#define PCL818_CTRL_SOFT_TRIG (1 << 0)
-#define PCL818_CTRL_EXT_TRIG (2 << 0)
-#define PCL818_CTRL_PACER_TRIG (3 << 0)
-#define PCL818_CTRL_DMAE (1 << 2)
-#define PCL818_CTRL_IRQ(x) ((x) << 4)
-#define PCL818_CTRL_INTE (1 << 7)
-#define PCL818_CNTENABLE_REG 0x0a
-#define PCL818_CNTENABLE_PACER_ENA (0 << 0)
-#define PCL818_CNTENABLE_PACER_TRIG0 (1 << 0)
-#define PCL818_CNTENABLE_CNT0_EXT_CLK (0 << 1)
-#define PCL818_CNTENABLE_CNT0_INT_CLK (1 << 1)
-#define PCL818_DO_DI_MSB_REG 0x0b
-#define PCL818_TIMER_BASE 0x0c
-
-/* W: fifo enable/disable */
-#define PCL818_FI_ENABLE 6
-/* W: fifo interrupt clear */
-#define PCL818_FI_INTCLR 20
-/* W: fifo interrupt clear */
-#define PCL818_FI_FLUSH 25
-/* R: fifo status */
-#define PCL818_FI_STATUS 25
-/* R: one record from FIFO */
-#define PCL818_FI_DATALO 23
-#define PCL818_FI_DATAHI 24
-
-#define MAGIC_DMA_WORD 0x5a5a
-
-static const struct comedi_lrange range_pcl818h_ai = {
- 9, {
- BIP_RANGE(5),
- BIP_RANGE(2.5),
- BIP_RANGE(1.25),
- BIP_RANGE(0.625),
- UNI_RANGE(10),
- UNI_RANGE(5),
- UNI_RANGE(2.5),
- UNI_RANGE(1.25),
- BIP_RANGE(10)
- }
-};
-
-static const struct comedi_lrange range_pcl818hg_ai = {
- 10, {
- BIP_RANGE(5),
- BIP_RANGE(0.5),
- BIP_RANGE(0.05),
- BIP_RANGE(0.005),
- UNI_RANGE(10),
- UNI_RANGE(1),
- UNI_RANGE(0.1),
- UNI_RANGE(0.01),
- BIP_RANGE(10),
- BIP_RANGE(1),
- BIP_RANGE(0.1),
- BIP_RANGE(0.01)
- }
-};
-
-static const struct comedi_lrange range_pcl818l_l_ai = {
- 4, {
- BIP_RANGE(5),
- BIP_RANGE(2.5),
- BIP_RANGE(1.25),
- BIP_RANGE(0.625)
- }
-};
-
-static const struct comedi_lrange range_pcl818l_h_ai = {
- 4, {
- BIP_RANGE(10),
- BIP_RANGE(5),
- BIP_RANGE(2.5),
- BIP_RANGE(1.25)
- }
-};
-
-static const struct comedi_lrange range718_bipolar1 = {
- 1, {
- BIP_RANGE(1)
- }
-};
-
-static const struct comedi_lrange range718_bipolar0_5 = {
- 1, {
- BIP_RANGE(0.5)
- }
-};
-
-static const struct comedi_lrange range718_unipolar2 = {
- 1, {
- UNI_RANGE(2)
- }
-};
-
-static const struct comedi_lrange range718_unipolar1 = {
- 1, {
- BIP_RANGE(1)
- }
-};
-
-struct pcl818_board {
- const char *name;
- unsigned int ns_min;
- int n_aochan;
- const struct comedi_lrange *ai_range_type;
- unsigned int has_dma:1;
- unsigned int has_fifo:1;
- unsigned int is_818:1;
-};
-
-static const struct pcl818_board boardtypes[] = {
- {
- .name = "pcl818l",
- .ns_min = 25000,
- .n_aochan = 1,
- .ai_range_type = &range_pcl818l_l_ai,
- .has_dma = 1,
- .is_818 = 1,
- }, {
- .name = "pcl818h",
- .ns_min = 10000,
- .n_aochan = 1,
- .ai_range_type = &range_pcl818h_ai,
- .has_dma = 1,
- .is_818 = 1,
- }, {
- .name = "pcl818hd",
- .ns_min = 10000,
- .n_aochan = 1,
- .ai_range_type = &range_pcl818h_ai,
- .has_dma = 1,
- .has_fifo = 1,
- .is_818 = 1,
- }, {
- .name = "pcl818hg",
- .ns_min = 10000,
- .n_aochan = 1,
- .ai_range_type = &range_pcl818hg_ai,
- .has_dma = 1,
- .has_fifo = 1,
- .is_818 = 1,
- }, {
- .name = "pcl818",
- .ns_min = 10000,
- .n_aochan = 2,
- .ai_range_type = &range_pcl818h_ai,
- .has_dma = 1,
- .is_818 = 1,
- }, {
- .name = "pcl718",
- .ns_min = 16000,
- .n_aochan = 2,
- .ai_range_type = &range_unipolar5,
- .has_dma = 1,
- }, {
- .name = "pcm3718",
- .ns_min = 10000,
- .ai_range_type = &range_pcl818h_ai,
- .has_dma = 1,
- .is_818 = 1,
- },
-};
-
-struct pcl818_private {
- struct comedi_isadma *dma;
- /* manimal allowed delay between samples (in us) for actual card */
- unsigned int ns_min;
- /* MUX setting for actual AI operations */
- unsigned int act_chanlist[16];
- unsigned int act_chanlist_len; /* how long is actual MUX list */
- unsigned int act_chanlist_pos; /* actual position in MUX list */
- unsigned int usefifo:1;
- unsigned int ai_cmd_running:1;
- unsigned int ai_cmd_canceled:1;
-};
-
-static void pcl818_ai_setup_dma(struct comedi_device *dev,
- struct comedi_subdevice *s,
- unsigned int unread_samples)
-{
- struct pcl818_private *devpriv = dev->private;
- struct comedi_isadma *dma = devpriv->dma;
- struct comedi_isadma_desc *desc = &dma->desc[dma->cur_dma];
- unsigned int max_samples = comedi_bytes_to_samples(s, desc->maxsize);
- unsigned int nsamples;
-
- comedi_isadma_disable(dma->chan);
-
- /*
- * Determine dma size based on the buffer maxsize plus the number of
- * unread samples and the number of samples remaining in the command.
- */
- nsamples = comedi_nsamples_left(s, max_samples + unread_samples);
- if (nsamples > unread_samples) {
- nsamples -= unread_samples;
- desc->size = comedi_samples_to_bytes(s, nsamples);
- comedi_isadma_program(desc);
- }
-}
-
-static void pcl818_ai_set_chan_range(struct comedi_device *dev,
- unsigned int chan,
- unsigned int range)
-{
- outb(chan, dev->iobase + PCL818_MUX_REG);
- outb(range, dev->iobase + PCL818_RANGE_REG);
-}
-
-static void pcl818_ai_set_chan_scan(struct comedi_device *dev,
- unsigned int first_chan,
- unsigned int last_chan)
-{
- outb(PCL818_MUX_SCAN(first_chan, last_chan),
- dev->iobase + PCL818_MUX_REG);
-}
-
-static void pcl818_ai_setup_chanlist(struct comedi_device *dev,
- unsigned int *chanlist,
- unsigned int seglen)
-{
- struct pcl818_private *devpriv = dev->private;
- unsigned int first_chan = CR_CHAN(chanlist[0]);
- unsigned int last_chan;
- unsigned int range;
- int i;
-
- devpriv->act_chanlist_len = seglen;
- devpriv->act_chanlist_pos = 0;
-
- /* store range list to card */
- for (i = 0; i < seglen; i++) {
- last_chan = CR_CHAN(chanlist[i]);
- range = CR_RANGE(chanlist[i]);
-
- devpriv->act_chanlist[i] = last_chan;
-
- pcl818_ai_set_chan_range(dev, last_chan, range);
- }
-
- udelay(1);
-
- pcl818_ai_set_chan_scan(dev, first_chan, last_chan);
-}
-
-static void pcl818_ai_clear_eoc(struct comedi_device *dev)
-{
- /* writing any value clears the interrupt request */
- outb(0, dev->iobase + PCL818_STATUS_REG);
-}
-
-static void pcl818_ai_soft_trig(struct comedi_device *dev)
-{
- /* writing any value triggers a software conversion */
- outb(0, dev->iobase + PCL818_AI_LSB_REG);
-}
-
-static unsigned int pcl818_ai_get_fifo_sample(struct comedi_device *dev,
- struct comedi_subdevice *s,
- unsigned int *chan)
-{
- unsigned int val;
-
- val = inb(dev->iobase + PCL818_FI_DATALO);
- val |= (inb(dev->iobase + PCL818_FI_DATAHI) << 8);
-
- if (chan)
- *chan = val & 0xf;
-
- return (val >> 4) & s->maxdata;
-}
-
-static unsigned int pcl818_ai_get_sample(struct comedi_device *dev,
- struct comedi_subdevice *s,
- unsigned int *chan)
-{
- unsigned int val;
-
- val = inb(dev->iobase + PCL818_AI_MSB_REG) << 8;
- val |= inb(dev->iobase + PCL818_AI_LSB_REG);
-
- if (chan)
- *chan = val & 0xf;
-
- return (val >> 4) & s->maxdata;
-}
-
-static int pcl818_ai_eoc(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned long context)
-{
- unsigned int status;
-
- status = inb(dev->iobase + PCL818_STATUS_REG);
- if (status & PCL818_STATUS_INT)
- return 0;
- return -EBUSY;
-}
-
-static bool pcl818_ai_write_sample(struct comedi_device *dev,
- struct comedi_subdevice *s,
- unsigned int chan, unsigned int val)
-{
- struct pcl818_private *devpriv = dev->private;
- struct comedi_cmd *cmd = &s->async->cmd;
- unsigned int expected_chan;
-
- expected_chan = devpriv->act_chanlist[devpriv->act_chanlist_pos];
- if (chan != expected_chan) {
- dev_dbg(dev->class_dev,
- "A/D mode1/3 %s - channel dropout %d!=%d !\n",
- (devpriv->dma) ? "DMA" :
- (devpriv->usefifo) ? "FIFO" : "IRQ",
- chan, expected_chan);
- s->async->events |= COMEDI_CB_ERROR;
- return false;
- }
-
- comedi_buf_write_samples(s, &val, 1);
-
- devpriv->act_chanlist_pos++;
- if (devpriv->act_chanlist_pos >= devpriv->act_chanlist_len)
- devpriv->act_chanlist_pos = 0;
-
- if (cmd->stop_src == TRIG_COUNT &&
- s->async->scans_done >= cmd->stop_arg) {
- s->async->events |= COMEDI_CB_EOA;
- return false;
- }
-
- return true;
-}
-
-static void pcl818_handle_eoc(struct comedi_device *dev,
- struct comedi_subdevice *s)
-{
- unsigned int chan;
- unsigned int val;
-
- if (pcl818_ai_eoc(dev, s, NULL, 0)) {
- dev_err(dev->class_dev, "A/D mode1/3 IRQ without DRDY!\n");
- s->async->events |= COMEDI_CB_ERROR;
- return;
- }
-
- val = pcl818_ai_get_sample(dev, s, &chan);
- pcl818_ai_write_sample(dev, s, chan, val);
-}
-
-static void pcl818_handle_dma(struct comedi_device *dev,
- struct comedi_subdevice *s)
-{
- struct pcl818_private *devpriv = dev->private;
- struct comedi_isadma *dma = devpriv->dma;
- struct comedi_isadma_desc *desc = &dma->desc[dma->cur_dma];
- unsigned short *ptr = desc->virt_addr;
- unsigned int nsamples = comedi_bytes_to_samples(s, desc->size);
- unsigned int chan;
- unsigned int val;
- int i;
-
- /* restart dma with the next buffer */
- dma->cur_dma = 1 - dma->cur_dma;
- pcl818_ai_setup_dma(dev, s, nsamples);
-
- for (i = 0; i < nsamples; i++) {
- val = ptr[i];
- chan = val & 0xf;
- val = (val >> 4) & s->maxdata;
- if (!pcl818_ai_write_sample(dev, s, chan, val))
- break;
- }
-}
-
-static void pcl818_handle_fifo(struct comedi_device *dev,
- struct comedi_subdevice *s)
-{
- unsigned int status;
- unsigned int chan;
- unsigned int val;
- int i, len;
-
- status = inb(dev->iobase + PCL818_FI_STATUS);
-
- if (status & 4) {
- dev_err(dev->class_dev, "A/D mode1/3 FIFO overflow!\n");
- s->async->events |= COMEDI_CB_ERROR;
- return;
- }
-
- if (status & 1) {
- dev_err(dev->class_dev,
- "A/D mode1/3 FIFO interrupt without data!\n");
- s->async->events |= COMEDI_CB_ERROR;
- return;
- }
-
- if (status & 2)
- len = 512;
- else
- len = 0;
-
- for (i = 0; i < len; i++) {
- val = pcl818_ai_get_fifo_sample(dev, s, &chan);
- if (!pcl818_ai_write_sample(dev, s, chan, val))
- break;
- }
-}
-
-static irqreturn_t pcl818_interrupt(int irq, void *d)
-{
- struct comedi_device *dev = d;
- struct pcl818_private *devpriv = dev->private;
- struct comedi_subdevice *s = dev->read_subdev;
- struct comedi_cmd *cmd = &s->async->cmd;
-
- if (!dev->attached || !devpriv->ai_cmd_running) {
- pcl818_ai_clear_eoc(dev);
- return IRQ_HANDLED;
- }
-
- if (devpriv->ai_cmd_canceled) {
- /*
- * The cleanup from ai_cancel() has been delayed
- * until now because the card doesn't seem to like
- * being reprogrammed while a DMA transfer is in
- * progress.
- */
- s->async->scans_done = cmd->stop_arg;
- s->cancel(dev, s);
- return IRQ_HANDLED;
- }
-
- if (devpriv->dma)
- pcl818_handle_dma(dev, s);
- else if (devpriv->usefifo)
- pcl818_handle_fifo(dev, s);
- else
- pcl818_handle_eoc(dev, s);
-
- pcl818_ai_clear_eoc(dev);
-
- comedi_handle_events(dev, s);
- return IRQ_HANDLED;
-}
-
-static int check_channel_list(struct comedi_device *dev,
- struct comedi_subdevice *s,
- unsigned int *chanlist, unsigned int n_chan)
-{
- unsigned int chansegment[16];
- unsigned int i, nowmustbechan, seglen, segpos;
-
- /* correct channel and range number check itself comedi/range.c */
- if (n_chan < 1) {
- dev_err(dev->class_dev, "range/channel list is empty!\n");
- return 0;
- }
-
- if (n_chan > 1) {
- /* first channel is every time ok */
- chansegment[0] = chanlist[0];
- /* build part of chanlist */
- for (i = 1, seglen = 1; i < n_chan; i++, seglen++) {
- /* we detect loop, this must by finish */
-
- if (chanlist[0] == chanlist[i])
- break;
- nowmustbechan =
- (CR_CHAN(chansegment[i - 1]) + 1) % s->n_chan;
- if (nowmustbechan != CR_CHAN(chanlist[i])) {
- /* channel list isn't continuous :-( */
- dev_dbg(dev->class_dev,
- "channel list must be continuous! chanlist[%i]=%d but must be %d or %d!\n",
- i, CR_CHAN(chanlist[i]), nowmustbechan,
- CR_CHAN(chanlist[0]));
- return 0;
- }
- /* well, this is next correct channel in list */
- chansegment[i] = chanlist[i];
- }
-
- /* check whole chanlist */
- for (i = 0, segpos = 0; i < n_chan; i++) {
- if (chanlist[i] != chansegment[i % seglen]) {
- dev_dbg(dev->class_dev,
- "bad channel or range number! chanlist[%i]=%d,%d,%d and not %d,%d,%d!\n",
- i, CR_CHAN(chansegment[i]),
- CR_RANGE(chansegment[i]),
- CR_AREF(chansegment[i]),
- CR_CHAN(chanlist[i % seglen]),
- CR_RANGE(chanlist[i % seglen]),
- CR_AREF(chansegment[i % seglen]));
- return 0; /* chan/gain list is strange */
- }
- }
- } else {
- seglen = 1;
- }
- return seglen;
-}
-
-static int check_single_ended(unsigned int port)
-{
- if (inb(port + PCL818_STATUS_REG) & PCL818_STATUS_MUX)
- return 1;
- return 0;
-}
-
-static int ai_cmdtest(struct comedi_device *dev, struct comedi_subdevice *s,
- struct comedi_cmd *cmd)
-{
- const struct pcl818_board *board = dev->board_ptr;
- int err = 0;
-
- /* Step 1 : check if triggers are trivially valid */
-
- err |= comedi_check_trigger_src(&cmd->start_src, TRIG_NOW);
- err |= comedi_check_trigger_src(&cmd->scan_begin_src, TRIG_FOLLOW);
- err |= comedi_check_trigger_src(&cmd->convert_src,
- TRIG_TIMER | TRIG_EXT);
- err |= comedi_check_trigger_src(&cmd->scan_end_src, TRIG_COUNT);
- err |= comedi_check_trigger_src(&cmd->stop_src, TRIG_COUNT | TRIG_NONE);
-
- if (err)
- return 1;
-
- /* Step 2a : make sure trigger sources are unique */
-
- err |= comedi_check_trigger_is_unique(cmd->convert_src);
- err |= comedi_check_trigger_is_unique(cmd->stop_src);
-
- /* Step 2b : and mutually compatible */
-
- if (err)
- return 2;
-
- /* Step 3: check if arguments are trivially valid */
-
- err |= comedi_check_trigger_arg_is(&cmd->start_arg, 0);
- err |= comedi_check_trigger_arg_is(&cmd->scan_begin_arg, 0);
-
- if (cmd->convert_src == TRIG_TIMER) {
- err |= comedi_check_trigger_arg_min(&cmd->convert_arg,
- board->ns_min);
- } else { /* TRIG_EXT */
- err |= comedi_check_trigger_arg_is(&cmd->convert_arg, 0);
- }
-
- err |= comedi_check_trigger_arg_is(&cmd->scan_end_arg,
- cmd->chanlist_len);
-
- if (cmd->stop_src == TRIG_COUNT)
- err |= comedi_check_trigger_arg_min(&cmd->stop_arg, 1);
- else /* TRIG_NONE */
- err |= comedi_check_trigger_arg_is(&cmd->stop_arg, 0);
-
- if (err)
- return 3;
-
- /* step 4: fix up any arguments */
-
- if (cmd->convert_src == TRIG_TIMER) {
- unsigned int arg = cmd->convert_arg;
-
- comedi_8254_cascade_ns_to_timer(dev->pacer, &arg, cmd->flags);
- err |= comedi_check_trigger_arg_is(&cmd->convert_arg, arg);
- }
-
- if (err)
- return 4;
-
- /* step 5: complain about special chanlist considerations */
-
- if (cmd->chanlist) {
- if (!check_channel_list(dev, s, cmd->chanlist,
- cmd->chanlist_len))
- return 5; /* incorrect channels list */
- }
-
- return 0;
-}
-
-static int pcl818_ai_cmd(struct comedi_device *dev,
- struct comedi_subdevice *s)
-{
- struct pcl818_private *devpriv = dev->private;
- struct comedi_isadma *dma = devpriv->dma;
- struct comedi_cmd *cmd = &s->async->cmd;
- unsigned int ctrl = 0;
- unsigned int seglen;
-
- if (devpriv->ai_cmd_running)
- return -EBUSY;
-
- seglen = check_channel_list(dev, s, cmd->chanlist, cmd->chanlist_len);
- if (seglen < 1)
- return -EINVAL;
- pcl818_ai_setup_chanlist(dev, cmd->chanlist, seglen);
-
- devpriv->ai_cmd_running = 1;
- devpriv->ai_cmd_canceled = 0;
- devpriv->act_chanlist_pos = 0;
-
- if (cmd->convert_src == TRIG_TIMER)
- ctrl |= PCL818_CTRL_PACER_TRIG;
- else
- ctrl |= PCL818_CTRL_EXT_TRIG;
-
- outb(PCL818_CNTENABLE_PACER_ENA, dev->iobase + PCL818_CNTENABLE_REG);
-
- if (dma) {
- /* setup and enable dma for the first buffer */
- dma->cur_dma = 0;
- pcl818_ai_setup_dma(dev, s, 0);
-
- ctrl |= PCL818_CTRL_INTE | PCL818_CTRL_IRQ(dev->irq) |
- PCL818_CTRL_DMAE;
- } else if (devpriv->usefifo) {
- /* enable FIFO */
- outb(1, dev->iobase + PCL818_FI_ENABLE);
- } else {
- ctrl |= PCL818_CTRL_INTE | PCL818_CTRL_IRQ(dev->irq);
- }
- outb(ctrl, dev->iobase + PCL818_CTRL_REG);
-
- if (cmd->convert_src == TRIG_TIMER) {
- comedi_8254_update_divisors(dev->pacer);
- comedi_8254_pacer_enable(dev->pacer, 1, 2, true);
- }
-
- return 0;
-}
-
-static int pcl818_ai_cancel(struct comedi_device *dev,
- struct comedi_subdevice *s)
-{
- struct pcl818_private *devpriv = dev->private;
- struct comedi_isadma *dma = devpriv->dma;
- struct comedi_cmd *cmd = &s->async->cmd;
-
- if (!devpriv->ai_cmd_running)
- return 0;
-
- if (dma) {
- if (cmd->stop_src == TRIG_NONE ||
- (cmd->stop_src == TRIG_COUNT &&
- s->async->scans_done < cmd->stop_arg)) {
- if (!devpriv->ai_cmd_canceled) {
- /*
- * Wait for running dma transfer to end,
- * do cleanup in interrupt.
- */
- devpriv->ai_cmd_canceled = 1;
- return 0;
- }
- }
- comedi_isadma_disable(dma->chan);
- }
-
- outb(PCL818_CTRL_DISABLE_TRIG, dev->iobase + PCL818_CTRL_REG);
- comedi_8254_pacer_enable(dev->pacer, 1, 2, false);
- pcl818_ai_clear_eoc(dev);
-
- if (devpriv->usefifo) { /* FIFO shutdown */
- outb(0, dev->iobase + PCL818_FI_INTCLR);
- outb(0, dev->iobase + PCL818_FI_FLUSH);
- outb(0, dev->iobase + PCL818_FI_ENABLE);
- }
- devpriv->ai_cmd_running = 0;
- devpriv->ai_cmd_canceled = 0;
-
- return 0;
-}
-
-static int pcl818_ai_insn_read(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- unsigned int chan = CR_CHAN(insn->chanspec);
- unsigned int range = CR_RANGE(insn->chanspec);
- int ret = 0;
- int i;
-
- outb(PCL818_CTRL_SOFT_TRIG, dev->iobase + PCL818_CTRL_REG);
-
- pcl818_ai_set_chan_range(dev, chan, range);
- pcl818_ai_set_chan_scan(dev, chan, chan);
-
- for (i = 0; i < insn->n; i++) {
- pcl818_ai_clear_eoc(dev);
- pcl818_ai_soft_trig(dev);
-
- ret = comedi_timeout(dev, s, insn, pcl818_ai_eoc, 0);
- if (ret)
- break;
-
- data[i] = pcl818_ai_get_sample(dev, s, NULL);
- }
- pcl818_ai_clear_eoc(dev);
-
- return ret ? ret : insn->n;
-}
-
-static int pcl818_ao_insn_write(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- unsigned int chan = CR_CHAN(insn->chanspec);
- unsigned int val = s->readback[chan];
- int i;
-
- for (i = 0; i < insn->n; i++) {
- val = data[i];
- outb((val & 0x000f) << 4,
- dev->iobase + PCL818_AO_LSB_REG(chan));
- outb((val & 0x0ff0) >> 4,
- dev->iobase + PCL818_AO_MSB_REG(chan));
- }
- s->readback[chan] = val;
-
- return insn->n;
-}
-
-static int pcl818_di_insn_bits(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- data[1] = inb(dev->iobase + PCL818_DO_DI_LSB_REG) |
- (inb(dev->iobase + PCL818_DO_DI_MSB_REG) << 8);
-
- return insn->n;
-}
-
-static int pcl818_do_insn_bits(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- if (comedi_dio_update_state(s, data)) {
- outb(s->state & 0xff, dev->iobase + PCL818_DO_DI_LSB_REG);
- outb((s->state >> 8), dev->iobase + PCL818_DO_DI_MSB_REG);
- }
-
- data[1] = s->state;
-
- return insn->n;
-}
-
-static void pcl818_reset(struct comedi_device *dev)
-{
- const struct pcl818_board *board = dev->board_ptr;
- unsigned int chan;
-
- /* flush and disable the FIFO */
- if (board->has_fifo) {
- outb(0, dev->iobase + PCL818_FI_INTCLR);
- outb(0, dev->iobase + PCL818_FI_FLUSH);
- outb(0, dev->iobase + PCL818_FI_ENABLE);
- }
-
- /* disable analog input trigger */
- outb(PCL818_CTRL_DISABLE_TRIG, dev->iobase + PCL818_CTRL_REG);
- pcl818_ai_clear_eoc(dev);
-
- pcl818_ai_set_chan_range(dev, 0, 0);
-
- /* stop pacer */
- outb(PCL818_CNTENABLE_PACER_ENA, dev->iobase + PCL818_CNTENABLE_REG);
-
- /* set analog output channels to 0V */
- for (chan = 0; chan < board->n_aochan; chan++) {
- outb(0, dev->iobase + PCL818_AO_LSB_REG(chan));
- outb(0, dev->iobase + PCL818_AO_MSB_REG(chan));
- }
-
- /* set all digital outputs low */
- outb(0, dev->iobase + PCL818_DO_DI_MSB_REG);
- outb(0, dev->iobase + PCL818_DO_DI_LSB_REG);
-}
-
-static void pcl818_set_ai_range_table(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_devconfig *it)
-{
- const struct pcl818_board *board = dev->board_ptr;
-
- /* default to the range table from the boardinfo */
- s->range_table = board->ai_range_type;
-
- /* now check the user config option based on the boardtype */
- if (board->is_818) {
- if (it->options[4] == 1 || it->options[4] == 10) {
- /* secondary range list jumper selectable */
- s->range_table = &range_pcl818l_h_ai;
- }
- } else {
- switch (it->options[4]) {
- case 0:
- s->range_table = &range_bipolar10;
- break;
- case 1:
- s->range_table = &range_bipolar5;
- break;
- case 2:
- s->range_table = &range_bipolar2_5;
- break;
- case 3:
- s->range_table = &range718_bipolar1;
- break;
- case 4:
- s->range_table = &range718_bipolar0_5;
- break;
- case 6:
- s->range_table = &range_unipolar10;
- break;
- case 7:
- s->range_table = &range_unipolar5;
- break;
- case 8:
- s->range_table = &range718_unipolar2;
- break;
- case 9:
- s->range_table = &range718_unipolar1;
- break;
- default:
- s->range_table = &range_unknown;
- break;
- }
- }
-}
-
-static void pcl818_alloc_dma(struct comedi_device *dev, unsigned int dma_chan)
-{
- struct pcl818_private *devpriv = dev->private;
-
- /* only DMA channels 3 and 1 are valid */
- if (!(dma_chan == 3 || dma_chan == 1))
- return;
-
- /* DMA uses two 16K buffers */
- devpriv->dma = comedi_isadma_alloc(dev, 2, dma_chan, dma_chan,
- PAGE_SIZE * 4, COMEDI_ISADMA_READ);
-}
-
-static void pcl818_free_dma(struct comedi_device *dev)
-{
- struct pcl818_private *devpriv = dev->private;
-
- if (devpriv)
- comedi_isadma_free(devpriv->dma);
-}
-
-static int pcl818_attach(struct comedi_device *dev, struct comedi_devconfig *it)
-{
- const struct pcl818_board *board = dev->board_ptr;
- struct pcl818_private *devpriv;
- struct comedi_subdevice *s;
- unsigned int osc_base;
- int ret;
-
- devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
- if (!devpriv)
- return -ENOMEM;
-
- ret = comedi_request_region(dev, it->options[0],
- board->has_fifo ? 0x20 : 0x10);
- if (ret)
- return ret;
-
- /* we can use IRQ 2-7 for async command support */
- if (it->options[1] >= 2 && it->options[1] <= 7) {
- ret = request_irq(it->options[1], pcl818_interrupt, 0,
- dev->board_name, dev);
- if (ret == 0)
- dev->irq = it->options[1];
- }
-
- /* should we use the FIFO? */
- if (dev->irq && board->has_fifo && it->options[2] == -1)
- devpriv->usefifo = 1;
-
- /* we need an IRQ to do DMA on channel 3 or 1 */
- if (dev->irq && board->has_dma)
- pcl818_alloc_dma(dev, it->options[2]);
-
- /* use 1MHz or 10MHz oscilator */
- if ((it->options[3] == 0) || (it->options[3] == 10))
- osc_base = I8254_OSC_BASE_10MHZ;
- else
- osc_base = I8254_OSC_BASE_1MHZ;
-
- dev->pacer = comedi_8254_init(dev->iobase + PCL818_TIMER_BASE,
- osc_base, I8254_IO8, 0);
- if (!dev->pacer)
- return -ENOMEM;
-
- /* max sampling speed */
- devpriv->ns_min = board->ns_min;
- if (!board->is_818) {
- /* extended PCL718 to 100kHz DAC */
- if ((it->options[6] == 1) || (it->options[6] == 100))
- devpriv->ns_min = 10000;
- }
-
- ret = comedi_alloc_subdevices(dev, 4);
- if (ret)
- return ret;
-
- s = &dev->subdevices[0];
- s->type = COMEDI_SUBD_AI;
- s->subdev_flags = SDF_READABLE;
- if (check_single_ended(dev->iobase)) {
- s->n_chan = 16;
- s->subdev_flags |= SDF_COMMON | SDF_GROUND;
- } else {
- s->n_chan = 8;
- s->subdev_flags |= SDF_DIFF;
- }
- s->maxdata = 0x0fff;
-
- pcl818_set_ai_range_table(dev, s, it);
-
- s->insn_read = pcl818_ai_insn_read;
- if (dev->irq) {
- dev->read_subdev = s;
- s->subdev_flags |= SDF_CMD_READ;
- s->len_chanlist = s->n_chan;
- s->do_cmdtest = ai_cmdtest;
- s->do_cmd = pcl818_ai_cmd;
- s->cancel = pcl818_ai_cancel;
- }
-
- /* Analog Output subdevice */
- s = &dev->subdevices[1];
- if (board->n_aochan) {
- s->type = COMEDI_SUBD_AO;
- s->subdev_flags = SDF_WRITABLE | SDF_GROUND;
- s->n_chan = board->n_aochan;
- s->maxdata = 0x0fff;
- s->range_table = &range_unipolar5;
- if (board->is_818) {
- if ((it->options[4] == 1) || (it->options[4] == 10))
- s->range_table = &range_unipolar10;
- if (it->options[4] == 2)
- s->range_table = &range_unknown;
- } else {
- if ((it->options[5] == 1) || (it->options[5] == 10))
- s->range_table = &range_unipolar10;
- if (it->options[5] == 2)
- s->range_table = &range_unknown;
- }
- s->insn_write = pcl818_ao_insn_write;
-
- ret = comedi_alloc_subdev_readback(s);
- if (ret)
- return ret;
- } else {
- s->type = COMEDI_SUBD_UNUSED;
- }
-
- /* Digital Input subdevice */
- s = &dev->subdevices[2];
- s->type = COMEDI_SUBD_DI;
- s->subdev_flags = SDF_READABLE;
- s->n_chan = 16;
- s->maxdata = 1;
- s->range_table = &range_digital;
- s->insn_bits = pcl818_di_insn_bits;
-
- /* Digital Output subdevice */
- s = &dev->subdevices[3];
- s->type = COMEDI_SUBD_DO;
- s->subdev_flags = SDF_WRITABLE;
- s->n_chan = 16;
- s->maxdata = 1;
- s->range_table = &range_digital;
- s->insn_bits = pcl818_do_insn_bits;
-
- pcl818_reset(dev);
-
- return 0;
-}
-
-static void pcl818_detach(struct comedi_device *dev)
-{
- struct pcl818_private *devpriv = dev->private;
-
- if (devpriv) {
- pcl818_ai_cancel(dev, dev->read_subdev);
- pcl818_reset(dev);
- }
- pcl818_free_dma(dev);
- comedi_legacy_detach(dev);
-}
-
-static struct comedi_driver pcl818_driver = {
- .driver_name = "pcl818",
- .module = THIS_MODULE,
- .attach = pcl818_attach,
- .detach = pcl818_detach,
- .board_name = &boardtypes[0].name,
- .num_names = ARRAY_SIZE(boardtypes),
- .offset = sizeof(struct pcl818_board),
-};
-module_comedi_driver(pcl818_driver);
-
-MODULE_AUTHOR("Comedi http://www.comedi.org");
-MODULE_DESCRIPTION("Comedi low-level driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/pcm3724.c b/drivers/staging/comedi/drivers/pcm3724.c
deleted file mode 100644
index 6176dfa24801..000000000000
--- a/drivers/staging/comedi/drivers/pcm3724.c
+++ /dev/null
@@ -1,220 +0,0 @@
-/*
- comedi/drivers/pcm724.c
-
- Drew Csillag <drew_csillag@yahoo.com>
-
- hardware driver for Advantech card:
- card: PCM-3724
- driver: pcm3724
-
- Options for PCM-3724
- [0] - IO Base
-*/
-/*
-Driver: pcm3724
-Description: Advantech PCM-3724
-Author: Drew Csillag <drew_csillag@yahoo.com>
-Devices: [Advantech] PCM-3724 (pcm724)
-Status: tested
-
-This is driver for digital I/O boards PCM-3724 with 48 DIO.
-It needs 8255.o for operations and only immediate mode is supported.
-See the source for configuration details.
-
-Copy/pasted/hacked from pcm724.c
-*/
-/*
- * check_driver overrides:
- * struct comedi_insn
- */
-
-#include <linux/module.h>
-#include "../comedidev.h"
-
-#include "8255.h"
-
-#define BUF_C0 0x1
-#define BUF_B0 0x2
-#define BUF_A0 0x4
-#define BUF_C1 0x8
-#define BUF_B1 0x10
-#define BUF_A1 0x20
-
-#define GATE_A0 0x4
-#define GATE_B0 0x2
-#define GATE_C0 0x1
-#define GATE_A1 0x20
-#define GATE_B1 0x10
-#define GATE_C1 0x8
-
-/* used to track configured dios */
-struct priv_pcm3724 {
- int dio_1;
- int dio_2;
-};
-
-static int compute_buffer(int config, int devno, struct comedi_subdevice *s)
-{
- /* 1 in io_bits indicates output */
- if (s->io_bits & 0x0000ff) {
- if (devno == 0)
- config |= BUF_A0;
- else
- config |= BUF_A1;
- }
- if (s->io_bits & 0x00ff00) {
- if (devno == 0)
- config |= BUF_B0;
- else
- config |= BUF_B1;
- }
- if (s->io_bits & 0xff0000) {
- if (devno == 0)
- config |= BUF_C0;
- else
- config |= BUF_C1;
- }
- return config;
-}
-
-static void do_3724_config(struct comedi_device *dev,
- struct comedi_subdevice *s, int chanspec)
-{
- struct comedi_subdevice *s_dio1 = &dev->subdevices[0];
- struct comedi_subdevice *s_dio2 = &dev->subdevices[1];
- int config;
- int buffer_config;
- unsigned long port_8255_cfg;
-
- config = I8255_CTRL_CW;
- buffer_config = 0;
-
- /* 1 in io_bits indicates output, 1 in config indicates input */
- if (!(s->io_bits & 0x0000ff))
- config |= I8255_CTRL_A_IO;
-
- if (!(s->io_bits & 0x00ff00))
- config |= I8255_CTRL_B_IO;
-
- if (!(s->io_bits & 0xff0000))
- config |= I8255_CTRL_C_HI_IO | I8255_CTRL_C_LO_IO;
-
- buffer_config = compute_buffer(0, 0, s_dio1);
- buffer_config = compute_buffer(buffer_config, 1, s_dio2);
-
- if (s == s_dio1)
- port_8255_cfg = dev->iobase + I8255_CTRL_REG;
- else
- port_8255_cfg = dev->iobase + I8255_SIZE + I8255_CTRL_REG;
-
- outb(buffer_config, dev->iobase + 8); /* update buffer register */
-
- outb(config, port_8255_cfg);
-}
-
-static void enable_chan(struct comedi_device *dev, struct comedi_subdevice *s,
- int chanspec)
-{
- struct priv_pcm3724 *priv = dev->private;
- struct comedi_subdevice *s_dio1 = &dev->subdevices[0];
- unsigned int mask;
- int gatecfg;
-
- gatecfg = 0;
-
- mask = 1 << CR_CHAN(chanspec);
- if (s == s_dio1)
- priv->dio_1 |= mask;
- else
- priv->dio_2 |= mask;
-
- if (priv->dio_1 & 0xff0000)
- gatecfg |= GATE_C0;
-
- if (priv->dio_1 & 0xff00)
- gatecfg |= GATE_B0;
-
- if (priv->dio_1 & 0xff)
- gatecfg |= GATE_A0;
-
- if (priv->dio_2 & 0xff0000)
- gatecfg |= GATE_C1;
-
- if (priv->dio_2 & 0xff00)
- gatecfg |= GATE_B1;
-
- if (priv->dio_2 & 0xff)
- gatecfg |= GATE_A1;
-
- outb(gatecfg, dev->iobase + 9);
-}
-
-/* overriding the 8255 insn config */
-static int subdev_3724_insn_config(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- unsigned int chan = CR_CHAN(insn->chanspec);
- unsigned int mask;
- int ret;
-
- if (chan < 8)
- mask = 0x0000ff;
- else if (chan < 16)
- mask = 0x00ff00;
- else if (chan < 20)
- mask = 0x0f0000;
- else
- mask = 0xf00000;
-
- ret = comedi_dio_insn_config(dev, s, insn, data, mask);
- if (ret)
- return ret;
-
- do_3724_config(dev, s, insn->chanspec);
- enable_chan(dev, s, insn->chanspec);
-
- return insn->n;
-}
-
-static int pcm3724_attach(struct comedi_device *dev,
- struct comedi_devconfig *it)
-{
- struct priv_pcm3724 *priv;
- struct comedi_subdevice *s;
- int ret, i;
-
- priv = comedi_alloc_devpriv(dev, sizeof(*priv));
- if (!priv)
- return -ENOMEM;
-
- ret = comedi_request_region(dev, it->options[0], 0x10);
- if (ret)
- return ret;
-
- ret = comedi_alloc_subdevices(dev, 2);
- if (ret)
- return ret;
-
- for (i = 0; i < dev->n_subdevices; i++) {
- s = &dev->subdevices[i];
- ret = subdev_8255_init(dev, s, NULL, i * I8255_SIZE);
- if (ret)
- return ret;
- s->insn_config = subdev_3724_insn_config;
- }
- return 0;
-}
-
-static struct comedi_driver pcm3724_driver = {
- .driver_name = "pcm3724",
- .module = THIS_MODULE,
- .attach = pcm3724_attach,
- .detach = comedi_legacy_detach,
-};
-module_comedi_driver(pcm3724_driver);
-
-MODULE_AUTHOR("Comedi http://www.comedi.org");
-MODULE_DESCRIPTION("Comedi low-level driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/pcmad.c b/drivers/staging/comedi/drivers/pcmad.c
deleted file mode 100644
index 12f94fe82f5b..000000000000
--- a/drivers/staging/comedi/drivers/pcmad.c
+++ /dev/null
@@ -1,158 +0,0 @@
-/*
- * pcmad.c
- * Hardware driver for Winsystems PCM-A/D12 and PCM-A/D16
- *
- * COMEDI - Linux Control and Measurement Device Interface
- * Copyright (C) 2000,2001 David A. Schleef <ds@schleef.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-/*
- * Driver: pcmad
- * Description: Winsystems PCM-A/D12, PCM-A/D16
- * Devices: [Winsystems] PCM-A/D12 (pcmad12), PCM-A/D16 (pcmad16)
- * Author: ds
- * Status: untested
- *
- * This driver was written on a bet that I couldn't write a driver
- * in less than 2 hours. I won the bet, but never got paid. =(
- *
- * Configuration options:
- * [0] - I/O port base
- * [1] - IRQ (unused)
- * [2] - Analog input reference (must match jumpers)
- * 0 = single-ended (16 channels)
- * 1 = differential (8 channels)
- * [3] - Analog input encoding (must match jumpers)
- * 0 = straight binary (0-5V input range)
- * 1 = two's complement (+-10V input range)
- */
-
-#include <linux/module.h>
-#include "../comedidev.h"
-
-#define PCMAD_STATUS 0
-#define PCMAD_LSB 1
-#define PCMAD_MSB 2
-#define PCMAD_CONVERT 1
-
-struct pcmad_board_struct {
- const char *name;
- unsigned int ai_maxdata;
-};
-
-static const struct pcmad_board_struct pcmad_boards[] = {
- {
- .name = "pcmad12",
- .ai_maxdata = 0x0fff,
- }, {
- .name = "pcmad16",
- .ai_maxdata = 0xffff,
- },
-};
-
-static int pcmad_ai_eoc(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned long context)
-{
- unsigned int status;
-
- status = inb(dev->iobase + PCMAD_STATUS);
- if ((status & 0x3) == 0x3)
- return 0;
- return -EBUSY;
-}
-
-static int pcmad_ai_insn_read(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- unsigned int chan = CR_CHAN(insn->chanspec);
- unsigned int range = CR_RANGE(insn->chanspec);
- unsigned int val;
- int ret;
- int i;
-
- for (i = 0; i < insn->n; i++) {
- outb(chan, dev->iobase + PCMAD_CONVERT);
-
- ret = comedi_timeout(dev, s, insn, pcmad_ai_eoc, 0);
- if (ret)
- return ret;
-
- val = inb(dev->iobase + PCMAD_LSB) |
- (inb(dev->iobase + PCMAD_MSB) << 8);
-
- /* data is shifted on the pcmad12, fix it */
- if (s->maxdata == 0x0fff)
- val >>= 4;
-
- if (comedi_range_is_bipolar(s, range)) {
- /* munge the two's complement value */
- val ^= ((s->maxdata + 1) >> 1);
- }
-
- data[i] = val;
- }
-
- return insn->n;
-}
-
-static int pcmad_attach(struct comedi_device *dev, struct comedi_devconfig *it)
-{
- const struct pcmad_board_struct *board = dev->board_ptr;
- struct comedi_subdevice *s;
- int ret;
-
- ret = comedi_request_region(dev, it->options[0], 0x04);
- if (ret)
- return ret;
-
- ret = comedi_alloc_subdevices(dev, 1);
- if (ret)
- return ret;
-
- s = &dev->subdevices[0];
- s->type = COMEDI_SUBD_AI;
- if (it->options[1]) {
- /* 8 differential channels */
- s->subdev_flags = SDF_READABLE | AREF_DIFF;
- s->n_chan = 8;
- } else {
- /* 16 single-ended channels */
- s->subdev_flags = SDF_READABLE | AREF_GROUND;
- s->n_chan = 16;
- }
- s->len_chanlist = 1;
- s->maxdata = board->ai_maxdata;
- s->range_table = it->options[2] ? &range_bipolar10 : &range_unipolar5;
- s->insn_read = pcmad_ai_insn_read;
-
- return 0;
-}
-
-static struct comedi_driver pcmad_driver = {
- .driver_name = "pcmad",
- .module = THIS_MODULE,
- .attach = pcmad_attach,
- .detach = comedi_legacy_detach,
- .board_name = &pcmad_boards[0].name,
- .num_names = ARRAY_SIZE(pcmad_boards),
- .offset = sizeof(pcmad_boards[0]),
-};
-module_comedi_driver(pcmad_driver);
-
-MODULE_AUTHOR("Comedi http://www.comedi.org");
-MODULE_DESCRIPTION("Comedi low-level driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/pcmda12.c b/drivers/staging/comedi/drivers/pcmda12.c
deleted file mode 100644
index d86c5e2cd0c7..000000000000
--- a/drivers/staging/comedi/drivers/pcmda12.c
+++ /dev/null
@@ -1,174 +0,0 @@
-/*
- * pcmda12.c
- * Driver for Winsystems PC-104 based PCM-D/A-12 8-channel AO board.
- *
- * COMEDI - Linux Control and Measurement Device Interface
- * Copyright (C) 2006 Calin A. Culianu <calin@ajvar.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-/*
- * Driver: pcmda12
- * Description: A driver for the Winsystems PCM-D/A-12
- * Devices: [Winsystems] PCM-D/A-12 (pcmda12)
- * Author: Calin Culianu <calin@ajvar.org>
- * Updated: Fri, 13 Jan 2006 12:01:01 -0500
- * Status: works
- *
- * A driver for the relatively straightforward-to-program PCM-D/A-12.
- * This board doesn't support commands, and the only way to set its
- * analog output range is to jumper the board. As such,
- * comedi_data_write() ignores the range value specified.
- *
- * The board uses 16 consecutive I/O addresses starting at the I/O port
- * base address. Each address corresponds to the LSB then MSB of a
- * particular channel from 0-7.
- *
- * Note that the board is not ISA-PNP capable and thus needs the I/O
- * port comedi_config parameter.
- *
- * Note that passing a nonzero value as the second config option will
- * enable "simultaneous xfer" mode for this board, in which AO writes
- * will not take effect until a subsequent read of any AO channel. This
- * is so that one can speed up programming by preloading all AO registers
- * with values before simultaneously setting them to take effect with one
- * read command.
- *
- * Configuration Options:
- * [0] - I/O port base address
- * [1] - Do Simultaneous Xfer (see description)
- */
-
-#include <linux/module.h>
-#include "../comedidev.h"
-
-/* AI range is not configurable, it's set by jumpers on the board */
-static const struct comedi_lrange pcmda12_ranges = {
- 3, {
- UNI_RANGE(5),
- UNI_RANGE(10),
- BIP_RANGE(5)
- }
-};
-
-struct pcmda12_private {
- int simultaneous_xfer_mode;
-};
-
-static int pcmda12_ao_insn_write(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct pcmda12_private *devpriv = dev->private;
- unsigned int chan = CR_CHAN(insn->chanspec);
- unsigned int val = s->readback[chan];
- unsigned long ioreg = dev->iobase + (chan * 2);
- int i;
-
- for (i = 0; i < insn->n; ++i) {
- val = data[i];
- outb(val & 0xff, ioreg);
- outb((val >> 8) & 0xff, ioreg + 1);
-
- /*
- * Initiate transfer if not in simultaneaous xfer
- * mode by reading one of the AO registers.
- */
- if (!devpriv->simultaneous_xfer_mode)
- inb(ioreg);
- }
- s->readback[chan] = val;
-
- return insn->n;
-}
-
-static int pcmda12_ao_insn_read(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct pcmda12_private *devpriv = dev->private;
-
- /*
- * Initiate simultaneaous xfer mode by reading one of the
- * AO registers. All analog outputs will then be updated.
- */
- if (devpriv->simultaneous_xfer_mode)
- inb(dev->iobase);
-
- return comedi_readback_insn_read(dev, s, insn, data);
-}
-
-static void pcmda12_ao_reset(struct comedi_device *dev,
- struct comedi_subdevice *s)
-{
- int i;
-
- for (i = 0; i < s->n_chan; ++i) {
- outb(0, dev->iobase + (i * 2));
- outb(0, dev->iobase + (i * 2) + 1);
- }
- /* Initiate transfer by reading one of the AO registers. */
- inb(dev->iobase);
-}
-
-static int pcmda12_attach(struct comedi_device *dev,
- struct comedi_devconfig *it)
-{
- struct pcmda12_private *devpriv;
- struct comedi_subdevice *s;
- int ret;
-
- ret = comedi_request_region(dev, it->options[0], 0x10);
- if (ret)
- return ret;
-
- devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
- if (!devpriv)
- return -ENOMEM;
-
- devpriv->simultaneous_xfer_mode = it->options[1];
-
- ret = comedi_alloc_subdevices(dev, 1);
- if (ret)
- return ret;
-
- s = &dev->subdevices[0];
- s->type = COMEDI_SUBD_AO;
- s->subdev_flags = SDF_READABLE | SDF_WRITABLE;
- s->n_chan = 8;
- s->maxdata = 0x0fff;
- s->range_table = &pcmda12_ranges;
- s->insn_write = pcmda12_ao_insn_write;
- s->insn_read = pcmda12_ao_insn_read;
-
- ret = comedi_alloc_subdev_readback(s);
- if (ret)
- return ret;
-
- pcmda12_ao_reset(dev, s);
-
- return 0;
-}
-
-static struct comedi_driver pcmda12_driver = {
- .driver_name = "pcmda12",
- .module = THIS_MODULE,
- .attach = pcmda12_attach,
- .detach = comedi_legacy_detach,
-};
-module_comedi_driver(pcmda12_driver);
-
-MODULE_AUTHOR("Comedi http://www.comedi.org");
-MODULE_DESCRIPTION("Comedi low-level driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/pcmmio.c b/drivers/staging/comedi/drivers/pcmmio.c
deleted file mode 100644
index 10472e6dd002..000000000000
--- a/drivers/staging/comedi/drivers/pcmmio.c
+++ /dev/null
@@ -1,786 +0,0 @@
-/*
- * pcmmio.c
- * Driver for Winsystems PC-104 based multifunction IO board.
- *
- * COMEDI - Linux Control and Measurement Device Interface
- * Copyright (C) 2007 Calin A. Culianu <calin@ajvar.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-/*
- * Driver: pcmmio
- * Description: A driver for the PCM-MIO multifunction board
- * Devices: [Winsystems] PCM-MIO (pcmmio)
- * Author: Calin Culianu <calin@ajvar.org>
- * Updated: Wed, May 16 2007 16:21:10 -0500
- * Status: works
- *
- * A driver for the PCM-MIO multifunction board from Winsystems. This
- * is a PC-104 based I/O board. It contains four subdevices:
- *
- * subdevice 0 - 16 channels of 16-bit AI
- * subdevice 1 - 8 channels of 16-bit AO
- * subdevice 2 - first 24 channels of the 48 channel of DIO
- * (with edge-triggered interrupt support)
- * subdevice 3 - last 24 channels of the 48 channel DIO
- * (no interrupt support for this bank of channels)
- *
- * Some notes:
- *
- * Synchronous reads and writes are the only things implemented for analog
- * input and output. The hardware itself can do streaming acquisition, etc.
- *
- * Asynchronous I/O for the DIO subdevices *is* implemented, however! They
- * are basically edge-triggered interrupts for any configuration of the
- * channels in subdevice 2.
- *
- * Also note that this interrupt support is untested.
- *
- * A few words about edge-detection IRQ support (commands on DIO):
- *
- * To use edge-detection IRQ support for the DIO subdevice, pass the IRQ
- * of the board to the comedi_config command. The board IRQ is not jumpered
- * but rather configured through software, so any IRQ from 1-15 is OK.
- *
- * Due to the genericity of the comedi API, you need to create a special
- * comedi_command in order to use edge-triggered interrupts for DIO.
- *
- * Use comedi_commands with TRIG_NOW. Your callback will be called each
- * time an edge is detected on the specified DIO line(s), and the data
- * values will be two sample_t's, which should be concatenated to form
- * one 32-bit unsigned int. This value is the mask of channels that had
- * edges detected from your channel list. Note that the bits positions
- * in the mask correspond to positions in your chanlist when you
- * specified the command and *not* channel id's!
- *
- * To set the polarity of the edge-detection interrupts pass a nonzero value
- * for either CR_RANGE or CR_AREF for edge-up polarity, or a zero
- * value for both CR_RANGE and CR_AREF if you want edge-down polarity.
- *
- * Configuration Options:
- * [0] - I/O port base address
- * [1] - IRQ (optional -- for edge-detect interrupt support only,
- * leave out if you don't need this feature)
- */
-
-#include <linux/module.h>
-#include <linux/interrupt.h>
-#include <linux/slab.h>
-
-#include "../comedidev.h"
-
-/*
- * Register I/O map
- */
-#define PCMMIO_AI_LSB_REG 0x00
-#define PCMMIO_AI_MSB_REG 0x01
-#define PCMMIO_AI_CMD_REG 0x02
-#define PCMMIO_AI_CMD_SE (1 << 7)
-#define PCMMIO_AI_CMD_ODD_CHAN (1 << 6)
-#define PCMMIO_AI_CMD_CHAN_SEL(x) (((x) & 0x3) << 4)
-#define PCMMIO_AI_CMD_RANGE(x) (((x) & 0x3) << 2)
-#define PCMMIO_RESOURCE_REG 0x02
-#define PCMMIO_RESOURCE_IRQ(x) (((x) & 0xf) << 0)
-#define PCMMIO_AI_STATUS_REG 0x03
-#define PCMMIO_AI_STATUS_DATA_READY (1 << 7)
-#define PCMMIO_AI_STATUS_DATA_DMA_PEND (1 << 6)
-#define PCMMIO_AI_STATUS_CMD_DMA_PEND (1 << 5)
-#define PCMMIO_AI_STATUS_IRQ_PEND (1 << 4)
-#define PCMMIO_AI_STATUS_DATA_DRQ_ENA (1 << 2)
-#define PCMMIO_AI_STATUS_REG_SEL (1 << 3)
-#define PCMMIO_AI_STATUS_CMD_DRQ_ENA (1 << 1)
-#define PCMMIO_AI_STATUS_IRQ_ENA (1 << 0)
-#define PCMMIO_AI_RES_ENA_REG 0x03
-#define PCMMIO_AI_RES_ENA_CMD_REG_ACCESS (0 << 3)
-#define PCMMIO_AI_RES_ENA_AI_RES_ACCESS (1 << 3)
-#define PCMMIO_AI_RES_ENA_DIO_RES_ACCESS (1 << 4)
-#define PCMMIO_AI_2ND_ADC_OFFSET 0x04
-
-#define PCMMIO_AO_LSB_REG 0x08
-#define PCMMIO_AO_LSB_SPAN(x) (((x) & 0xf) << 0)
-#define PCMMIO_AO_MSB_REG 0x09
-#define PCMMIO_AO_CMD_REG 0x0a
-#define PCMMIO_AO_CMD_WR_SPAN (0x2 << 4)
-#define PCMMIO_AO_CMD_WR_CODE (0x3 << 4)
-#define PCMMIO_AO_CMD_UPDATE (0x4 << 4)
-#define PCMMIO_AO_CMD_UPDATE_ALL (0x5 << 4)
-#define PCMMIO_AO_CMD_WR_SPAN_UPDATE (0x6 << 4)
-#define PCMMIO_AO_CMD_WR_CODE_UPDATE (0x7 << 4)
-#define PCMMIO_AO_CMD_WR_SPAN_UPDATE_ALL (0x8 << 4)
-#define PCMMIO_AO_CMD_WR_CODE_UPDATE_ALL (0x9 << 4)
-#define PCMMIO_AO_CMD_RD_B1_SPAN (0xa << 4)
-#define PCMMIO_AO_CMD_RD_B1_CODE (0xb << 4)
-#define PCMMIO_AO_CMD_RD_B2_SPAN (0xc << 4)
-#define PCMMIO_AO_CMD_RD_B2_CODE (0xd << 4)
-#define PCMMIO_AO_CMD_NOP (0xf << 4)
-#define PCMMIO_AO_CMD_CHAN_SEL(x) (((x) & 0x03) << 1)
-#define PCMMIO_AO_CMD_CHAN_SEL_ALL (0x0f << 0)
-#define PCMMIO_AO_STATUS_REG 0x0b
-#define PCMMIO_AO_STATUS_DATA_READY (1 << 7)
-#define PCMMIO_AO_STATUS_DATA_DMA_PEND (1 << 6)
-#define PCMMIO_AO_STATUS_CMD_DMA_PEND (1 << 5)
-#define PCMMIO_AO_STATUS_IRQ_PEND (1 << 4)
-#define PCMMIO_AO_STATUS_DATA_DRQ_ENA (1 << 2)
-#define PCMMIO_AO_STATUS_REG_SEL (1 << 3)
-#define PCMMIO_AO_STATUS_CMD_DRQ_ENA (1 << 1)
-#define PCMMIO_AO_STATUS_IRQ_ENA (1 << 0)
-#define PCMMIO_AO_RESOURCE_ENA_REG 0x0b
-#define PCMMIO_AO_2ND_DAC_OFFSET 0x04
-
-/*
- * WinSystems WS16C48
- *
- * Offset Page 0 Page 1 Page 2 Page 3
- * ------ ----------- ----------- ----------- -----------
- * 0x10 Port 0 I/O Port 0 I/O Port 0 I/O Port 0 I/O
- * 0x11 Port 1 I/O Port 1 I/O Port 1 I/O Port 1 I/O
- * 0x12 Port 2 I/O Port 2 I/O Port 2 I/O Port 2 I/O
- * 0x13 Port 3 I/O Port 3 I/O Port 3 I/O Port 3 I/O
- * 0x14 Port 4 I/O Port 4 I/O Port 4 I/O Port 4 I/O
- * 0x15 Port 5 I/O Port 5 I/O Port 5 I/O Port 5 I/O
- * 0x16 INT_PENDING INT_PENDING INT_PENDING INT_PENDING
- * 0x17 Page/Lock Page/Lock Page/Lock Page/Lock
- * 0x18 N/A POL_0 ENAB_0 INT_ID0
- * 0x19 N/A POL_1 ENAB_1 INT_ID1
- * 0x1a N/A POL_2 ENAB_2 INT_ID2
- */
-#define PCMMIO_PORT_REG(x) (0x10 + (x))
-#define PCMMIO_INT_PENDING_REG 0x16
-#define PCMMIO_PAGE_LOCK_REG 0x17
-#define PCMMIO_LOCK_PORT(x) ((1 << (x)) & 0x3f)
-#define PCMMIO_PAGE(x) (((x) & 0x3) << 6)
-#define PCMMIO_PAGE_MASK PCMUIO_PAGE(3)
-#define PCMMIO_PAGE_POL 1
-#define PCMMIO_PAGE_ENAB 2
-#define PCMMIO_PAGE_INT_ID 3
-#define PCMMIO_PAGE_REG(x) (0x18 + (x))
-
-static const struct comedi_lrange pcmmio_ai_ranges = {
- 4, {
- BIP_RANGE(5),
- BIP_RANGE(10),
- UNI_RANGE(5),
- UNI_RANGE(10)
- }
-};
-
-static const struct comedi_lrange pcmmio_ao_ranges = {
- 6, {
- UNI_RANGE(5),
- UNI_RANGE(10),
- BIP_RANGE(5),
- BIP_RANGE(10),
- BIP_RANGE(2.5),
- RANGE(-2.5, 7.5)
- }
-};
-
-struct pcmmio_private {
- spinlock_t pagelock; /* protects the page registers */
- spinlock_t spinlock; /* protects the member variables */
- unsigned int enabled_mask;
- unsigned int active:1;
-};
-
-static void pcmmio_dio_write(struct comedi_device *dev, unsigned int val,
- int page, int port)
-{
- struct pcmmio_private *devpriv = dev->private;
- unsigned long iobase = dev->iobase;
- unsigned long flags;
-
- spin_lock_irqsave(&devpriv->pagelock, flags);
- if (page == 0) {
- /* Port registers are valid for any page */
- outb(val & 0xff, iobase + PCMMIO_PORT_REG(port + 0));
- outb((val >> 8) & 0xff, iobase + PCMMIO_PORT_REG(port + 1));
- outb((val >> 16) & 0xff, iobase + PCMMIO_PORT_REG(port + 2));
- } else {
- outb(PCMMIO_PAGE(page), iobase + PCMMIO_PAGE_LOCK_REG);
- outb(val & 0xff, iobase + PCMMIO_PAGE_REG(0));
- outb((val >> 8) & 0xff, iobase + PCMMIO_PAGE_REG(1));
- outb((val >> 16) & 0xff, iobase + PCMMIO_PAGE_REG(2));
- }
- spin_unlock_irqrestore(&devpriv->pagelock, flags);
-}
-
-static unsigned int pcmmio_dio_read(struct comedi_device *dev,
- int page, int port)
-{
- struct pcmmio_private *devpriv = dev->private;
- unsigned long iobase = dev->iobase;
- unsigned long flags;
- unsigned int val;
-
- spin_lock_irqsave(&devpriv->pagelock, flags);
- if (page == 0) {
- /* Port registers are valid for any page */
- val = inb(iobase + PCMMIO_PORT_REG(port + 0));
- val |= (inb(iobase + PCMMIO_PORT_REG(port + 1)) << 8);
- val |= (inb(iobase + PCMMIO_PORT_REG(port + 2)) << 16);
- } else {
- outb(PCMMIO_PAGE(page), iobase + PCMMIO_PAGE_LOCK_REG);
- val = inb(iobase + PCMMIO_PAGE_REG(0));
- val |= (inb(iobase + PCMMIO_PAGE_REG(1)) << 8);
- val |= (inb(iobase + PCMMIO_PAGE_REG(2)) << 16);
- }
- spin_unlock_irqrestore(&devpriv->pagelock, flags);
-
- return val;
-}
-
-/*
- * Each channel can be individually programmed for input or output.
- * Writing a '0' to a channel causes the corresponding output pin
- * to go to a high-z state (pulled high by an external 10K resistor).
- * This allows it to be used as an input. When used in the input mode,
- * a read reflects the inverted state of the I/O pin, such that a
- * high on the pin will read as a '0' in the register. Writing a '1'
- * to a bit position causes the pin to sink current (up to 12mA),
- * effectively pulling it low.
- */
-static int pcmmio_dio_insn_bits(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- /* subdevice 2 uses ports 0-2, subdevice 3 uses ports 3-5 */
- int port = s->index == 2 ? 0 : 3;
- unsigned int chanmask = (1 << s->n_chan) - 1;
- unsigned int mask;
- unsigned int val;
-
- mask = comedi_dio_update_state(s, data);
- if (mask) {
- /*
- * Outputs are inverted, invert the state and
- * update the channels.
- *
- * The s->io_bits mask makes sure the input channels
- * are '0' so that the outputs pins stay in a high
- * z-state.
- */
- val = ~s->state & chanmask;
- val &= s->io_bits;
- pcmmio_dio_write(dev, val, 0, port);
- }
-
- /* get inverted state of the channels from the port */
- val = pcmmio_dio_read(dev, 0, port);
-
- /* return the true state of the channels */
- data[1] = ~val & chanmask;
-
- return insn->n;
-}
-
-static int pcmmio_dio_insn_config(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- /* subdevice 2 uses ports 0-2, subdevice 3 uses ports 3-5 */
- int port = s->index == 2 ? 0 : 3;
- int ret;
-
- ret = comedi_dio_insn_config(dev, s, insn, data, 0);
- if (ret)
- return ret;
-
- if (data[0] == INSN_CONFIG_DIO_INPUT)
- pcmmio_dio_write(dev, s->io_bits, 0, port);
-
- return insn->n;
-}
-
-static void pcmmio_reset(struct comedi_device *dev)
-{
- /* Clear all the DIO port bits */
- pcmmio_dio_write(dev, 0, 0, 0);
- pcmmio_dio_write(dev, 0, 0, 3);
-
- /* Clear all the paged registers */
- pcmmio_dio_write(dev, 0, PCMMIO_PAGE_POL, 0);
- pcmmio_dio_write(dev, 0, PCMMIO_PAGE_ENAB, 0);
- pcmmio_dio_write(dev, 0, PCMMIO_PAGE_INT_ID, 0);
-}
-
-/* devpriv->spinlock is already locked */
-static void pcmmio_stop_intr(struct comedi_device *dev,
- struct comedi_subdevice *s)
-{
- struct pcmmio_private *devpriv = dev->private;
-
- devpriv->enabled_mask = 0;
- devpriv->active = 0;
- s->async->inttrig = NULL;
-
- /* disable all dio interrupts */
- pcmmio_dio_write(dev, 0, PCMMIO_PAGE_ENAB, 0);
-}
-
-static void pcmmio_handle_dio_intr(struct comedi_device *dev,
- struct comedi_subdevice *s,
- unsigned int triggered)
-{
- struct pcmmio_private *devpriv = dev->private;
- struct comedi_cmd *cmd = &s->async->cmd;
- unsigned int val = 0;
- unsigned long flags;
- int i;
-
- spin_lock_irqsave(&devpriv->spinlock, flags);
-
- if (!devpriv->active)
- goto done;
-
- if (!(triggered & devpriv->enabled_mask))
- goto done;
-
- for (i = 0; i < cmd->chanlist_len; i++) {
- unsigned int chan = CR_CHAN(cmd->chanlist[i]);
-
- if (triggered & (1 << chan))
- val |= (1 << i);
- }
-
- comedi_buf_write_samples(s, &val, 1);
-
- if (cmd->stop_src == TRIG_COUNT &&
- s->async->scans_done >= cmd->stop_arg)
- s->async->events |= COMEDI_CB_EOA;
-
-done:
- spin_unlock_irqrestore(&devpriv->spinlock, flags);
-
- comedi_handle_events(dev, s);
-}
-
-static irqreturn_t interrupt_pcmmio(int irq, void *d)
-{
- struct comedi_device *dev = d;
- struct comedi_subdevice *s = dev->read_subdev;
- unsigned int triggered;
- unsigned char int_pend;
-
- /* are there any interrupts pending */
- int_pend = inb(dev->iobase + PCMMIO_INT_PENDING_REG) & 0x07;
- if (!int_pend)
- return IRQ_NONE;
-
- /* get, and clear, the pending interrupts */
- triggered = pcmmio_dio_read(dev, PCMMIO_PAGE_INT_ID, 0);
- pcmmio_dio_write(dev, 0, PCMMIO_PAGE_INT_ID, 0);
-
- pcmmio_handle_dio_intr(dev, s, triggered);
-
- return IRQ_HANDLED;
-}
-
-/* devpriv->spinlock is already locked */
-static void pcmmio_start_intr(struct comedi_device *dev,
- struct comedi_subdevice *s)
-{
- struct pcmmio_private *devpriv = dev->private;
- struct comedi_cmd *cmd = &s->async->cmd;
- unsigned int bits = 0;
- unsigned int pol_bits = 0;
- int i;
-
- devpriv->enabled_mask = 0;
- devpriv->active = 1;
- if (cmd->chanlist) {
- for (i = 0; i < cmd->chanlist_len; i++) {
- unsigned int chanspec = cmd->chanlist[i];
- unsigned int chan = CR_CHAN(chanspec);
- unsigned int range = CR_RANGE(chanspec);
- unsigned int aref = CR_AREF(chanspec);
-
- bits |= (1 << chan);
- pol_bits |= (((aref || range) ? 1 : 0) << chan);
- }
- }
- bits &= ((1 << s->n_chan) - 1);
- devpriv->enabled_mask = bits;
-
- /* set polarity and enable interrupts */
- pcmmio_dio_write(dev, pol_bits, PCMMIO_PAGE_POL, 0);
- pcmmio_dio_write(dev, bits, PCMMIO_PAGE_ENAB, 0);
-}
-
-static int pcmmio_cancel(struct comedi_device *dev, struct comedi_subdevice *s)
-{
- struct pcmmio_private *devpriv = dev->private;
- unsigned long flags;
-
- spin_lock_irqsave(&devpriv->spinlock, flags);
- if (devpriv->active)
- pcmmio_stop_intr(dev, s);
- spin_unlock_irqrestore(&devpriv->spinlock, flags);
-
- return 0;
-}
-
-static int pcmmio_inttrig_start_intr(struct comedi_device *dev,
- struct comedi_subdevice *s,
- unsigned int trig_num)
-{
- struct pcmmio_private *devpriv = dev->private;
- struct comedi_cmd *cmd = &s->async->cmd;
- unsigned long flags;
-
- if (trig_num != cmd->start_arg)
- return -EINVAL;
-
- spin_lock_irqsave(&devpriv->spinlock, flags);
- s->async->inttrig = NULL;
- if (devpriv->active)
- pcmmio_start_intr(dev, s);
- spin_unlock_irqrestore(&devpriv->spinlock, flags);
-
- return 1;
-}
-
-/*
- * 'do_cmd' function for an 'INTERRUPT' subdevice.
- */
-static int pcmmio_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
-{
- struct pcmmio_private *devpriv = dev->private;
- struct comedi_cmd *cmd = &s->async->cmd;
- unsigned long flags;
-
- spin_lock_irqsave(&devpriv->spinlock, flags);
- devpriv->active = 1;
-
- /* Set up start of acquisition. */
- if (cmd->start_src == TRIG_INT)
- s->async->inttrig = pcmmio_inttrig_start_intr;
- else /* TRIG_NOW */
- pcmmio_start_intr(dev, s);
-
- spin_unlock_irqrestore(&devpriv->spinlock, flags);
-
- return 0;
-}
-
-static int pcmmio_cmdtest(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_cmd *cmd)
-{
- int err = 0;
-
- /* Step 1 : check if triggers are trivially valid */
-
- err |= comedi_check_trigger_src(&cmd->start_src, TRIG_NOW | TRIG_INT);
- err |= comedi_check_trigger_src(&cmd->scan_begin_src, TRIG_EXT);
- err |= comedi_check_trigger_src(&cmd->convert_src, TRIG_NOW);
- err |= comedi_check_trigger_src(&cmd->scan_end_src, TRIG_COUNT);
- err |= comedi_check_trigger_src(&cmd->stop_src, TRIG_COUNT | TRIG_NONE);
-
- if (err)
- return 1;
-
- /* Step 2a : make sure trigger sources are unique */
-
- err |= comedi_check_trigger_is_unique(cmd->start_src);
- err |= comedi_check_trigger_is_unique(cmd->stop_src);
-
- /* Step 2b : and mutually compatible */
-
- if (err)
- return 2;
-
- /* Step 3: check if arguments are trivially valid */
-
- err |= comedi_check_trigger_arg_is(&cmd->start_arg, 0);
- err |= comedi_check_trigger_arg_is(&cmd->scan_begin_arg, 0);
- err |= comedi_check_trigger_arg_is(&cmd->convert_arg, 0);
- err |= comedi_check_trigger_arg_is(&cmd->scan_end_arg,
- cmd->chanlist_len);
-
- if (cmd->stop_src == TRIG_COUNT)
- err |= comedi_check_trigger_arg_min(&cmd->stop_arg, 1);
- else /* TRIG_NONE */
- err |= comedi_check_trigger_arg_is(&cmd->stop_arg, 0);
-
- if (err)
- return 3;
-
- /* step 4: fix up any arguments */
-
- /* if (err) return 4; */
-
- return 0;
-}
-
-static int pcmmio_ai_eoc(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned long context)
-{
- unsigned char status;
-
- status = inb(dev->iobase + PCMMIO_AI_STATUS_REG);
- if (status & PCMMIO_AI_STATUS_DATA_READY)
- return 0;
- return -EBUSY;
-}
-
-static int pcmmio_ai_insn_read(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- unsigned long iobase = dev->iobase;
- unsigned int chan = CR_CHAN(insn->chanspec);
- unsigned int range = CR_RANGE(insn->chanspec);
- unsigned int aref = CR_AREF(insn->chanspec);
- unsigned char cmd = 0;
- unsigned int val;
- int ret;
- int i;
-
- /*
- * The PCM-MIO uses two Linear Tech LTC1859CG 8-channel A/D converters.
- * The devices use a full duplex serial interface which transmits and
- * receives data simultaneously. An 8-bit command is shifted into the
- * ADC interface to configure it for the next conversion. At the same
- * time, the data from the previous conversion is shifted out of the
- * device. Consequently, the conversion result is delayed by one
- * conversion from the command word.
- *
- * Setup the cmd for the conversions then do a dummy conversion to
- * flush the junk data. Then do each conversion requested by the
- * comedi_insn. Note that the last conversion will leave junk data
- * in ADC which will get flushed on the next comedi_insn.
- */
-
- if (chan > 7) {
- chan -= 8;
- iobase += PCMMIO_AI_2ND_ADC_OFFSET;
- }
-
- if (aref == AREF_GROUND)
- cmd |= PCMMIO_AI_CMD_SE;
- if (chan % 2)
- cmd |= PCMMIO_AI_CMD_ODD_CHAN;
- cmd |= PCMMIO_AI_CMD_CHAN_SEL(chan / 2);
- cmd |= PCMMIO_AI_CMD_RANGE(range);
-
- outb(cmd, iobase + PCMMIO_AI_CMD_REG);
-
- ret = comedi_timeout(dev, s, insn, pcmmio_ai_eoc, 0);
- if (ret)
- return ret;
-
- val = inb(iobase + PCMMIO_AI_LSB_REG);
- val |= inb(iobase + PCMMIO_AI_MSB_REG) << 8;
-
- for (i = 0; i < insn->n; i++) {
- outb(cmd, iobase + PCMMIO_AI_CMD_REG);
-
- ret = comedi_timeout(dev, s, insn, pcmmio_ai_eoc, 0);
- if (ret)
- return ret;
-
- val = inb(iobase + PCMMIO_AI_LSB_REG);
- val |= inb(iobase + PCMMIO_AI_MSB_REG) << 8;
-
- /* bipolar data is two's complement */
- if (comedi_range_is_bipolar(s, range))
- val = comedi_offset_munge(s, val);
-
- data[i] = val;
- }
-
- return insn->n;
-}
-
-static int pcmmio_ao_eoc(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned long context)
-{
- unsigned char status;
-
- status = inb(dev->iobase + PCMMIO_AO_STATUS_REG);
- if (status & PCMMIO_AO_STATUS_DATA_READY)
- return 0;
- return -EBUSY;
-}
-
-static int pcmmio_ao_insn_write(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- unsigned long iobase = dev->iobase;
- unsigned int chan = CR_CHAN(insn->chanspec);
- unsigned int range = CR_RANGE(insn->chanspec);
- unsigned char cmd = 0;
- int ret;
- int i;
-
- /*
- * The PCM-MIO has two Linear Tech LTC2704 DAC devices. Each device
- * is a 4-channel converter with software-selectable output range.
- */
-
- if (chan > 3) {
- cmd |= PCMMIO_AO_CMD_CHAN_SEL(chan - 4);
- iobase += PCMMIO_AO_2ND_DAC_OFFSET;
- } else {
- cmd |= PCMMIO_AO_CMD_CHAN_SEL(chan);
- }
-
- /* set the range for the channel */
- outb(PCMMIO_AO_LSB_SPAN(range), iobase + PCMMIO_AO_LSB_REG);
- outb(0, iobase + PCMMIO_AO_MSB_REG);
- outb(cmd | PCMMIO_AO_CMD_WR_SPAN_UPDATE, iobase + PCMMIO_AO_CMD_REG);
-
- ret = comedi_timeout(dev, s, insn, pcmmio_ao_eoc, 0);
- if (ret)
- return ret;
-
- for (i = 0; i < insn->n; i++) {
- unsigned int val = data[i];
-
- /* write the data to the channel */
- outb(val & 0xff, iobase + PCMMIO_AO_LSB_REG);
- outb((val >> 8) & 0xff, iobase + PCMMIO_AO_MSB_REG);
- outb(cmd | PCMMIO_AO_CMD_WR_CODE_UPDATE,
- iobase + PCMMIO_AO_CMD_REG);
-
- ret = comedi_timeout(dev, s, insn, pcmmio_ao_eoc, 0);
- if (ret)
- return ret;
-
- s->readback[chan] = val;
- }
-
- return insn->n;
-}
-
-static int pcmmio_attach(struct comedi_device *dev, struct comedi_devconfig *it)
-{
- struct pcmmio_private *devpriv;
- struct comedi_subdevice *s;
- int ret;
-
- ret = comedi_request_region(dev, it->options[0], 32);
- if (ret)
- return ret;
-
- devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
- if (!devpriv)
- return -ENOMEM;
-
- spin_lock_init(&devpriv->pagelock);
- spin_lock_init(&devpriv->spinlock);
-
- pcmmio_reset(dev);
-
- if (it->options[1]) {
- ret = request_irq(it->options[1], interrupt_pcmmio, 0,
- dev->board_name, dev);
- if (ret == 0) {
- dev->irq = it->options[1];
-
- /* configure the interrupt routing on the board */
- outb(PCMMIO_AI_RES_ENA_DIO_RES_ACCESS,
- dev->iobase + PCMMIO_AI_RES_ENA_REG);
- outb(PCMMIO_RESOURCE_IRQ(dev->irq),
- dev->iobase + PCMMIO_RESOURCE_REG);
- }
- }
-
- ret = comedi_alloc_subdevices(dev, 4);
- if (ret)
- return ret;
-
- /* Analog Input subdevice */
- s = &dev->subdevices[0];
- s->type = COMEDI_SUBD_AI;
- s->subdev_flags = SDF_READABLE | SDF_GROUND | SDF_DIFF;
- s->n_chan = 16;
- s->maxdata = 0xffff;
- s->range_table = &pcmmio_ai_ranges;
- s->insn_read = pcmmio_ai_insn_read;
-
- /* initialize the resource enable register by clearing it */
- outb(PCMMIO_AI_RES_ENA_CMD_REG_ACCESS,
- dev->iobase + PCMMIO_AI_RES_ENA_REG);
- outb(PCMMIO_AI_RES_ENA_CMD_REG_ACCESS,
- dev->iobase + PCMMIO_AI_RES_ENA_REG + PCMMIO_AI_2ND_ADC_OFFSET);
-
- /* Analog Output subdevice */
- s = &dev->subdevices[1];
- s->type = COMEDI_SUBD_AO;
- s->subdev_flags = SDF_READABLE;
- s->n_chan = 8;
- s->maxdata = 0xffff;
- s->range_table = &pcmmio_ao_ranges;
- s->insn_write = pcmmio_ao_insn_write;
-
- ret = comedi_alloc_subdev_readback(s);
- if (ret)
- return ret;
-
- /* initialize the resource enable register by clearing it */
- outb(0, dev->iobase + PCMMIO_AO_RESOURCE_ENA_REG);
- outb(0, dev->iobase + PCMMIO_AO_2ND_DAC_OFFSET +
- PCMMIO_AO_RESOURCE_ENA_REG);
-
- /* Digital I/O subdevice with interrupt support */
- s = &dev->subdevices[2];
- s->type = COMEDI_SUBD_DIO;
- s->subdev_flags = SDF_READABLE | SDF_WRITABLE;
- s->n_chan = 24;
- s->maxdata = 1;
- s->len_chanlist = 1;
- s->range_table = &range_digital;
- s->insn_bits = pcmmio_dio_insn_bits;
- s->insn_config = pcmmio_dio_insn_config;
- if (dev->irq) {
- dev->read_subdev = s;
- s->subdev_flags |= SDF_CMD_READ | SDF_LSAMPL | SDF_PACKED;
- s->len_chanlist = s->n_chan;
- s->cancel = pcmmio_cancel;
- s->do_cmd = pcmmio_cmd;
- s->do_cmdtest = pcmmio_cmdtest;
- }
-
- /* Digital I/O subdevice */
- s = &dev->subdevices[3];
- s->type = COMEDI_SUBD_DIO;
- s->subdev_flags = SDF_READABLE | SDF_WRITABLE;
- s->n_chan = 24;
- s->maxdata = 1;
- s->range_table = &range_digital;
- s->insn_bits = pcmmio_dio_insn_bits;
- s->insn_config = pcmmio_dio_insn_config;
-
- return 0;
-}
-
-static struct comedi_driver pcmmio_driver = {
- .driver_name = "pcmmio",
- .module = THIS_MODULE,
- .attach = pcmmio_attach,
- .detach = comedi_legacy_detach,
-};
-module_comedi_driver(pcmmio_driver);
-
-MODULE_AUTHOR("Comedi http://www.comedi.org");
-MODULE_DESCRIPTION("Comedi driver for Winsystems PCM-MIO PC/104 board");
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/pcmuio.c b/drivers/staging/comedi/drivers/pcmuio.c
deleted file mode 100644
index 7ea813022ff6..000000000000
--- a/drivers/staging/comedi/drivers/pcmuio.c
+++ /dev/null
@@ -1,633 +0,0 @@
-/*
- * pcmuio.c
- * Comedi driver for Winsystems PC-104 based 48/96-channel DIO boards.
- *
- * COMEDI - Linux Control and Measurement Device Interface
- * Copyright (C) 2006 Calin A. Culianu <calin@ajvar.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-/*
- * Driver: pcmuio
- * Description: Winsystems PC-104 based 48/96-channel DIO boards.
- * Devices: [Winsystems] PCM-UIO48A (pcmuio48), PCM-UIO96A (pcmuio96)
- * Author: Calin Culianu <calin@ajvar.org>
- * Updated: Fri, 13 Jan 2006 12:01:01 -0500
- * Status: works
- *
- * A driver for the relatively straightforward-to-program PCM-UIO48A and
- * PCM-UIO96A boards from Winsystems. These boards use either one or two
- * (in the 96-DIO version) WS16C48 ASIC HighDensity I/O Chips (HDIO). This
- * chip is interesting in that each I/O line is individually programmable
- * for INPUT or OUTPUT (thus comedi_dio_config can be done on a per-channel
- * basis). Also, each chip supports edge-triggered interrupts for the first
- * 24 I/O lines. Of course, since the 96-channel version of the board has
- * two ASICs, it can detect polarity changes on up to 48 I/O lines. Since
- * this is essentially an (non-PnP) ISA board, I/O Address and IRQ selection
- * are done through jumpers on the board. You need to pass that information
- * to this driver as the first and second comedi_config option, respectively.
- * Note that the 48-channel version uses 16 bytes of IO memory and the 96-
- * channel version uses 32-bytes (in case you are worried about conflicts).
- * The 48-channel board is split into two 24-channel comedi subdevices. The
- * 96-channel board is split into 4 24-channel DIO subdevices.
- *
- * Note that IRQ support has been added, but it is untested.
- *
- * To use edge-detection IRQ support, pass the IRQs of both ASICS (for the
- * 96 channel version) or just 1 ASIC (for 48-channel version). Then, use
- * comedi_commands with TRIG_NOW. Your callback will be called each time an
- * edge is triggered, and the data values will be two sample_t's, which
- * should be concatenated to form one 32-bit unsigned int. This value is
- * the mask of channels that had edges detected from your channel list. Note
- * that the bits positions in the mask correspond to positions in your
- * chanlist when you specified the command and *not* channel id's!
- *
- * To set the polarity of the edge-detection interrupts pass a nonzero value
- * for either CR_RANGE or CR_AREF for edge-up polarity, or a zero value for
- * both CR_RANGE and CR_AREF if you want edge-down polarity.
- *
- * In the 48-channel version:
- *
- * On subdev 0, the first 24 channels channels are edge-detect channels.
- *
- * In the 96-channel board you have the following channels that can do edge
- * detection:
- *
- * subdev 0, channels 0-24 (first 24 channels of 1st ASIC)
- * subdev 2, channels 0-24 (first 24 channels of 2nd ASIC)
- *
- * Configuration Options:
- * [0] - I/O port base address
- * [1] - IRQ (for first ASIC, or first 24 channels)
- * [2] - IRQ (for second ASIC, pcmuio96 only - IRQ for chans 48-72
- * can be the same as first irq!)
- */
-
-#include <linux/module.h>
-#include <linux/interrupt.h>
-
-#include "../comedidev.h"
-
-/*
- * Register I/O map
- *
- * Offset Page 0 Page 1 Page 2 Page 3
- * ------ ----------- ----------- ----------- -----------
- * 0x00 Port 0 I/O Port 0 I/O Port 0 I/O Port 0 I/O
- * 0x01 Port 1 I/O Port 1 I/O Port 1 I/O Port 1 I/O
- * 0x02 Port 2 I/O Port 2 I/O Port 2 I/O Port 2 I/O
- * 0x03 Port 3 I/O Port 3 I/O Port 3 I/O Port 3 I/O
- * 0x04 Port 4 I/O Port 4 I/O Port 4 I/O Port 4 I/O
- * 0x05 Port 5 I/O Port 5 I/O Port 5 I/O Port 5 I/O
- * 0x06 INT_PENDING INT_PENDING INT_PENDING INT_PENDING
- * 0x07 Page/Lock Page/Lock Page/Lock Page/Lock
- * 0x08 N/A POL_0 ENAB_0 INT_ID0
- * 0x09 N/A POL_1 ENAB_1 INT_ID1
- * 0x0a N/A POL_2 ENAB_2 INT_ID2
- */
-#define PCMUIO_PORT_REG(x) (0x00 + (x))
-#define PCMUIO_INT_PENDING_REG 0x06
-#define PCMUIO_PAGE_LOCK_REG 0x07
-#define PCMUIO_LOCK_PORT(x) ((1 << (x)) & 0x3f)
-#define PCMUIO_PAGE(x) (((x) & 0x3) << 6)
-#define PCMUIO_PAGE_MASK PCMUIO_PAGE(3)
-#define PCMUIO_PAGE_POL 1
-#define PCMUIO_PAGE_ENAB 2
-#define PCMUIO_PAGE_INT_ID 3
-#define PCMUIO_PAGE_REG(x) (0x08 + (x))
-
-#define PCMUIO_ASIC_IOSIZE 0x10
-#define PCMUIO_MAX_ASICS 2
-
-struct pcmuio_board {
- const char *name;
- const int num_asics;
-};
-
-static const struct pcmuio_board pcmuio_boards[] = {
- {
- .name = "pcmuio48",
- .num_asics = 1,
- }, {
- .name = "pcmuio96",
- .num_asics = 2,
- },
-};
-
-struct pcmuio_asic {
- spinlock_t pagelock; /* protects the page registers */
- spinlock_t spinlock; /* protects member variables */
- unsigned int enabled_mask;
- unsigned int active:1;
-};
-
-struct pcmuio_private {
- struct pcmuio_asic asics[PCMUIO_MAX_ASICS];
- unsigned int irq2;
-};
-
-static inline unsigned long pcmuio_asic_iobase(struct comedi_device *dev,
- int asic)
-{
- return dev->iobase + (asic * PCMUIO_ASIC_IOSIZE);
-}
-
-static inline int pcmuio_subdevice_to_asic(struct comedi_subdevice *s)
-{
- /*
- * subdevice 0 and 1 are handled by the first asic
- * subdevice 2 and 3 are handled by the second asic
- */
- return s->index / 2;
-}
-
-static inline int pcmuio_subdevice_to_port(struct comedi_subdevice *s)
-{
- /*
- * subdevice 0 and 2 use port registers 0-2
- * subdevice 1 and 3 use port registers 3-5
- */
- return (s->index % 2) ? 3 : 0;
-}
-
-static void pcmuio_write(struct comedi_device *dev, unsigned int val,
- int asic, int page, int port)
-{
- struct pcmuio_private *devpriv = dev->private;
- struct pcmuio_asic *chip = &devpriv->asics[asic];
- unsigned long iobase = pcmuio_asic_iobase(dev, asic);
- unsigned long flags;
-
- spin_lock_irqsave(&chip->pagelock, flags);
- if (page == 0) {
- /* Port registers are valid for any page */
- outb(val & 0xff, iobase + PCMUIO_PORT_REG(port + 0));
- outb((val >> 8) & 0xff, iobase + PCMUIO_PORT_REG(port + 1));
- outb((val >> 16) & 0xff, iobase + PCMUIO_PORT_REG(port + 2));
- } else {
- outb(PCMUIO_PAGE(page), iobase + PCMUIO_PAGE_LOCK_REG);
- outb(val & 0xff, iobase + PCMUIO_PAGE_REG(0));
- outb((val >> 8) & 0xff, iobase + PCMUIO_PAGE_REG(1));
- outb((val >> 16) & 0xff, iobase + PCMUIO_PAGE_REG(2));
- }
- spin_unlock_irqrestore(&chip->pagelock, flags);
-}
-
-static unsigned int pcmuio_read(struct comedi_device *dev,
- int asic, int page, int port)
-{
- struct pcmuio_private *devpriv = dev->private;
- struct pcmuio_asic *chip = &devpriv->asics[asic];
- unsigned long iobase = pcmuio_asic_iobase(dev, asic);
- unsigned long flags;
- unsigned int val;
-
- spin_lock_irqsave(&chip->pagelock, flags);
- if (page == 0) {
- /* Port registers are valid for any page */
- val = inb(iobase + PCMUIO_PORT_REG(port + 0));
- val |= (inb(iobase + PCMUIO_PORT_REG(port + 1)) << 8);
- val |= (inb(iobase + PCMUIO_PORT_REG(port + 2)) << 16);
- } else {
- outb(PCMUIO_PAGE(page), iobase + PCMUIO_PAGE_LOCK_REG);
- val = inb(iobase + PCMUIO_PAGE_REG(0));
- val |= (inb(iobase + PCMUIO_PAGE_REG(1)) << 8);
- val |= (inb(iobase + PCMUIO_PAGE_REG(2)) << 16);
- }
- spin_unlock_irqrestore(&chip->pagelock, flags);
-
- return val;
-}
-
-/*
- * Each channel can be individually programmed for input or output.
- * Writing a '0' to a channel causes the corresponding output pin
- * to go to a high-z state (pulled high by an external 10K resistor).
- * This allows it to be used as an input. When used in the input mode,
- * a read reflects the inverted state of the I/O pin, such that a
- * high on the pin will read as a '0' in the register. Writing a '1'
- * to a bit position causes the pin to sink current (up to 12mA),
- * effectively pulling it low.
- */
-static int pcmuio_dio_insn_bits(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- int asic = pcmuio_subdevice_to_asic(s);
- int port = pcmuio_subdevice_to_port(s);
- unsigned int chanmask = (1 << s->n_chan) - 1;
- unsigned int mask;
- unsigned int val;
-
- mask = comedi_dio_update_state(s, data);
- if (mask) {
- /*
- * Outputs are inverted, invert the state and
- * update the channels.
- *
- * The s->io_bits mask makes sure the input channels
- * are '0' so that the outputs pins stay in a high
- * z-state.
- */
- val = ~s->state & chanmask;
- val &= s->io_bits;
- pcmuio_write(dev, val, asic, 0, port);
- }
-
- /* get inverted state of the channels from the port */
- val = pcmuio_read(dev, asic, 0, port);
-
- /* return the true state of the channels */
- data[1] = ~val & chanmask;
-
- return insn->n;
-}
-
-static int pcmuio_dio_insn_config(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- int asic = pcmuio_subdevice_to_asic(s);
- int port = pcmuio_subdevice_to_port(s);
- int ret;
-
- ret = comedi_dio_insn_config(dev, s, insn, data, 0);
- if (ret)
- return ret;
-
- if (data[0] == INSN_CONFIG_DIO_INPUT)
- pcmuio_write(dev, s->io_bits, asic, 0, port);
-
- return insn->n;
-}
-
-static void pcmuio_reset(struct comedi_device *dev)
-{
- const struct pcmuio_board *board = dev->board_ptr;
- int asic;
-
- for (asic = 0; asic < board->num_asics; ++asic) {
- /* first, clear all the DIO port bits */
- pcmuio_write(dev, 0, asic, 0, 0);
- pcmuio_write(dev, 0, asic, 0, 3);
-
- /* Next, clear all the paged registers for each page */
- pcmuio_write(dev, 0, asic, PCMUIO_PAGE_POL, 0);
- pcmuio_write(dev, 0, asic, PCMUIO_PAGE_ENAB, 0);
- pcmuio_write(dev, 0, asic, PCMUIO_PAGE_INT_ID, 0);
- }
-}
-
-/* chip->spinlock is already locked */
-static void pcmuio_stop_intr(struct comedi_device *dev,
- struct comedi_subdevice *s)
-{
- struct pcmuio_private *devpriv = dev->private;
- int asic = pcmuio_subdevice_to_asic(s);
- struct pcmuio_asic *chip = &devpriv->asics[asic];
-
- chip->enabled_mask = 0;
- chip->active = 0;
- s->async->inttrig = NULL;
-
- /* disable all intrs for this subdev.. */
- pcmuio_write(dev, 0, asic, PCMUIO_PAGE_ENAB, 0);
-}
-
-static void pcmuio_handle_intr_subdev(struct comedi_device *dev,
- struct comedi_subdevice *s,
- unsigned triggered)
-{
- struct pcmuio_private *devpriv = dev->private;
- int asic = pcmuio_subdevice_to_asic(s);
- struct pcmuio_asic *chip = &devpriv->asics[asic];
- struct comedi_cmd *cmd = &s->async->cmd;
- unsigned int val = 0;
- unsigned long flags;
- unsigned int i;
-
- spin_lock_irqsave(&chip->spinlock, flags);
-
- if (!chip->active)
- goto done;
-
- if (!(triggered & chip->enabled_mask))
- goto done;
-
- for (i = 0; i < cmd->chanlist_len; i++) {
- unsigned int chan = CR_CHAN(cmd->chanlist[i]);
-
- if (triggered & (1 << chan))
- val |= (1 << i);
- }
-
- comedi_buf_write_samples(s, &val, 1);
-
- if (cmd->stop_src == TRIG_COUNT &&
- s->async->scans_done >= cmd->stop_arg)
- s->async->events |= COMEDI_CB_EOA;
-
-done:
- spin_unlock_irqrestore(&chip->spinlock, flags);
-
- comedi_handle_events(dev, s);
-}
-
-static int pcmuio_handle_asic_interrupt(struct comedi_device *dev, int asic)
-{
- /* there are could be two asics so we can't use dev->read_subdev */
- struct comedi_subdevice *s = &dev->subdevices[asic * 2];
- unsigned long iobase = pcmuio_asic_iobase(dev, asic);
- unsigned int val;
-
- /* are there any interrupts pending */
- val = inb(iobase + PCMUIO_INT_PENDING_REG) & 0x07;
- if (!val)
- return 0;
-
- /* get, and clear, the pending interrupts */
- val = pcmuio_read(dev, asic, PCMUIO_PAGE_INT_ID, 0);
- pcmuio_write(dev, 0, asic, PCMUIO_PAGE_INT_ID, 0);
-
- /* handle the pending interrupts */
- pcmuio_handle_intr_subdev(dev, s, val);
-
- return 1;
-}
-
-static irqreturn_t pcmuio_interrupt(int irq, void *d)
-{
- struct comedi_device *dev = d;
- struct pcmuio_private *devpriv = dev->private;
- int handled = 0;
-
- if (irq == dev->irq)
- handled += pcmuio_handle_asic_interrupt(dev, 0);
- if (irq == devpriv->irq2)
- handled += pcmuio_handle_asic_interrupt(dev, 1);
-
- return handled ? IRQ_HANDLED : IRQ_NONE;
-}
-
-/* chip->spinlock is already locked */
-static void pcmuio_start_intr(struct comedi_device *dev,
- struct comedi_subdevice *s)
-{
- struct pcmuio_private *devpriv = dev->private;
- int asic = pcmuio_subdevice_to_asic(s);
- struct pcmuio_asic *chip = &devpriv->asics[asic];
- struct comedi_cmd *cmd = &s->async->cmd;
- unsigned int bits = 0;
- unsigned int pol_bits = 0;
- int i;
-
- chip->enabled_mask = 0;
- chip->active = 1;
- if (cmd->chanlist) {
- for (i = 0; i < cmd->chanlist_len; i++) {
- unsigned int chanspec = cmd->chanlist[i];
- unsigned int chan = CR_CHAN(chanspec);
- unsigned int range = CR_RANGE(chanspec);
- unsigned int aref = CR_AREF(chanspec);
-
- bits |= (1 << chan);
- pol_bits |= ((aref || range) ? 1 : 0) << chan;
- }
- }
- bits &= ((1 << s->n_chan) - 1);
- chip->enabled_mask = bits;
-
- /* set pol and enab intrs for this subdev.. */
- pcmuio_write(dev, pol_bits, asic, PCMUIO_PAGE_POL, 0);
- pcmuio_write(dev, bits, asic, PCMUIO_PAGE_ENAB, 0);
-}
-
-static int pcmuio_cancel(struct comedi_device *dev, struct comedi_subdevice *s)
-{
- struct pcmuio_private *devpriv = dev->private;
- int asic = pcmuio_subdevice_to_asic(s);
- struct pcmuio_asic *chip = &devpriv->asics[asic];
- unsigned long flags;
-
- spin_lock_irqsave(&chip->spinlock, flags);
- if (chip->active)
- pcmuio_stop_intr(dev, s);
- spin_unlock_irqrestore(&chip->spinlock, flags);
-
- return 0;
-}
-
-static int pcmuio_inttrig_start_intr(struct comedi_device *dev,
- struct comedi_subdevice *s,
- unsigned int trig_num)
-{
- struct pcmuio_private *devpriv = dev->private;
- struct comedi_cmd *cmd = &s->async->cmd;
- int asic = pcmuio_subdevice_to_asic(s);
- struct pcmuio_asic *chip = &devpriv->asics[asic];
- unsigned long flags;
-
- if (trig_num != cmd->start_arg)
- return -EINVAL;
-
- spin_lock_irqsave(&chip->spinlock, flags);
- s->async->inttrig = NULL;
- if (chip->active)
- pcmuio_start_intr(dev, s);
-
- spin_unlock_irqrestore(&chip->spinlock, flags);
-
- return 1;
-}
-
-/*
- * 'do_cmd' function for an 'INTERRUPT' subdevice.
- */
-static int pcmuio_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
-{
- struct pcmuio_private *devpriv = dev->private;
- struct comedi_cmd *cmd = &s->async->cmd;
- int asic = pcmuio_subdevice_to_asic(s);
- struct pcmuio_asic *chip = &devpriv->asics[asic];
- unsigned long flags;
-
- spin_lock_irqsave(&chip->spinlock, flags);
- chip->active = 1;
-
- /* Set up start of acquisition. */
- if (cmd->start_src == TRIG_INT)
- s->async->inttrig = pcmuio_inttrig_start_intr;
- else /* TRIG_NOW */
- pcmuio_start_intr(dev, s);
-
- spin_unlock_irqrestore(&chip->spinlock, flags);
-
- return 0;
-}
-
-static int pcmuio_cmdtest(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_cmd *cmd)
-{
- int err = 0;
-
- /* Step 1 : check if triggers are trivially valid */
-
- err |= comedi_check_trigger_src(&cmd->start_src, TRIG_NOW | TRIG_INT);
- err |= comedi_check_trigger_src(&cmd->scan_begin_src, TRIG_EXT);
- err |= comedi_check_trigger_src(&cmd->convert_src, TRIG_NOW);
- err |= comedi_check_trigger_src(&cmd->scan_end_src, TRIG_COUNT);
- err |= comedi_check_trigger_src(&cmd->stop_src, TRIG_COUNT | TRIG_NONE);
-
- if (err)
- return 1;
-
- /* Step 2a : make sure trigger sources are unique */
-
- err |= comedi_check_trigger_is_unique(cmd->start_src);
- err |= comedi_check_trigger_is_unique(cmd->stop_src);
-
- /* Step 2b : and mutually compatible */
-
- if (err)
- return 2;
-
- /* Step 3: check if arguments are trivially valid */
-
- err |= comedi_check_trigger_arg_is(&cmd->start_arg, 0);
- err |= comedi_check_trigger_arg_is(&cmd->scan_begin_arg, 0);
- err |= comedi_check_trigger_arg_is(&cmd->convert_arg, 0);
- err |= comedi_check_trigger_arg_is(&cmd->scan_end_arg,
- cmd->chanlist_len);
-
- if (cmd->stop_src == TRIG_COUNT)
- err |= comedi_check_trigger_arg_min(&cmd->stop_arg, 1);
- else /* TRIG_NONE */
- err |= comedi_check_trigger_arg_is(&cmd->stop_arg, 0);
-
- if (err)
- return 3;
-
- /* step 4: fix up any arguments */
-
- /* if (err) return 4; */
-
- return 0;
-}
-
-static int pcmuio_attach(struct comedi_device *dev, struct comedi_devconfig *it)
-{
- const struct pcmuio_board *board = dev->board_ptr;
- struct comedi_subdevice *s;
- struct pcmuio_private *devpriv;
- int ret;
- int i;
-
- ret = comedi_request_region(dev, it->options[0],
- board->num_asics * PCMUIO_ASIC_IOSIZE);
- if (ret)
- return ret;
-
- devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
- if (!devpriv)
- return -ENOMEM;
-
- for (i = 0; i < PCMUIO_MAX_ASICS; ++i) {
- struct pcmuio_asic *chip = &devpriv->asics[i];
-
- spin_lock_init(&chip->pagelock);
- spin_lock_init(&chip->spinlock);
- }
-
- pcmuio_reset(dev);
-
- if (it->options[1]) {
- /* request the irq for the 1st asic */
- ret = request_irq(it->options[1], pcmuio_interrupt, 0,
- dev->board_name, dev);
- if (ret == 0)
- dev->irq = it->options[1];
- }
-
- if (board->num_asics == 2) {
- if (it->options[2] == dev->irq) {
- /* the same irq (or none) is used by both asics */
- devpriv->irq2 = it->options[2];
- } else if (it->options[2]) {
- /* request the irq for the 2nd asic */
- ret = request_irq(it->options[2], pcmuio_interrupt, 0,
- dev->board_name, dev);
- if (ret == 0)
- devpriv->irq2 = it->options[2];
- }
- }
-
- ret = comedi_alloc_subdevices(dev, board->num_asics * 2);
- if (ret)
- return ret;
-
- for (i = 0; i < dev->n_subdevices; ++i) {
- s = &dev->subdevices[i];
- s->type = COMEDI_SUBD_DIO;
- s->subdev_flags = SDF_READABLE | SDF_WRITABLE;
- s->n_chan = 24;
- s->maxdata = 1;
- s->range_table = &range_digital;
- s->insn_bits = pcmuio_dio_insn_bits;
- s->insn_config = pcmuio_dio_insn_config;
-
- /* subdevices 0 and 2 can support interrupts */
- if ((i == 0 && dev->irq) || (i == 2 && devpriv->irq2)) {
- /* setup the interrupt subdevice */
- dev->read_subdev = s;
- s->subdev_flags |= SDF_CMD_READ | SDF_LSAMPL |
- SDF_PACKED;
- s->len_chanlist = s->n_chan;
- s->cancel = pcmuio_cancel;
- s->do_cmd = pcmuio_cmd;
- s->do_cmdtest = pcmuio_cmdtest;
- }
- }
-
- return 0;
-}
-
-static void pcmuio_detach(struct comedi_device *dev)
-{
- struct pcmuio_private *devpriv = dev->private;
-
- if (devpriv) {
- pcmuio_reset(dev);
-
- /* free the 2nd irq if used, the core will free the 1st one */
- if (devpriv->irq2 && devpriv->irq2 != dev->irq)
- free_irq(devpriv->irq2, dev);
- }
- comedi_legacy_detach(dev);
-}
-
-static struct comedi_driver pcmuio_driver = {
- .driver_name = "pcmuio",
- .module = THIS_MODULE,
- .attach = pcmuio_attach,
- .detach = pcmuio_detach,
- .board_name = &pcmuio_boards[0].name,
- .offset = sizeof(struct pcmuio_board),
- .num_names = ARRAY_SIZE(pcmuio_boards),
-};
-module_comedi_driver(pcmuio_driver);
-
-MODULE_AUTHOR("Comedi http://www.comedi.org");
-MODULE_DESCRIPTION("Comedi low-level driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/plx9052.h b/drivers/staging/comedi/drivers/plx9052.h
deleted file mode 100644
index fbcf25069807..000000000000
--- a/drivers/staging/comedi/drivers/plx9052.h
+++ /dev/null
@@ -1,79 +0,0 @@
-/*
- comedi/drivers/plx9052.h
- Definitions for the PLX-9052 PCI interface chip
-
- Copyright (C) 2002 MEV Ltd. <http://www.mev.co.uk/>
-
- COMEDI - Linux Control and Measurement Device Interface
- Copyright (C) 2000 David A. Schleef <ds@schleef.org>
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-*/
-
-#ifndef _PLX9052_H_
-#define _PLX9052_H_
-
-/*
- * INTCSR - Interrupt Control/Status register
- */
-#define PLX9052_INTCSR 0x4c
-#define PLX9052_INTCSR_LI1ENAB (1 << 0) /* LI1 enabled */
-#define PLX9052_INTCSR_LI1POL (1 << 1) /* LI1 active high */
-#define PLX9052_INTCSR_LI1STAT (1 << 2) /* LI1 active */
-#define PLX9052_INTCSR_LI2ENAB (1 << 3) /* LI2 enabled */
-#define PLX9052_INTCSR_LI2POL (1 << 4) /* LI2 active high */
-#define PLX9052_INTCSR_LI2STAT (1 << 5) /* LI2 active */
-#define PLX9052_INTCSR_PCIENAB (1 << 6) /* PCIINT enabled */
-#define PLX9052_INTCSR_SOFTINT (1 << 7) /* generate soft int */
-#define PLX9052_INTCSR_LI1SEL (1 << 8) /* LI1 edge */
-#define PLX9052_INTCSR_LI2SEL (1 << 9) /* LI2 edge */
-#define PLX9052_INTCSR_LI1CLRINT (1 << 10) /* LI1 clear int */
-#define PLX9052_INTCSR_LI2CLRINT (1 << 11) /* LI2 clear int */
-#define PLX9052_INTCSR_ISAMODE (1 << 12) /* ISA interface mode */
-
-/*
- * CNTRL - User I/O, Direct Slave Response, Serial EEPROM, and
- * Initialization Control register
- */
-#define PLX9052_CNTRL 0x50
-#define PLX9052_CNTRL_WAITO (1 << 0) /* UIO0 or WAITO# select */
-#define PLX9052_CNTRL_UIO0_DIR (1 << 1) /* UIO0 direction */
-#define PLX9052_CNTRL_UIO0_DATA (1 << 2) /* UIO0 data */
-#define PLX9052_CNTRL_LLOCKO (1 << 3) /* UIO1 or LLOCKo# select */
-#define PLX9052_CNTRL_UIO1_DIR (1 << 4) /* UIO1 direction */
-#define PLX9052_CNTRL_UIO1_DATA (1 << 5) /* UIO1 data */
-#define PLX9052_CNTRL_CS2 (1 << 6) /* UIO2 or CS2# select */
-#define PLX9052_CNTRL_UIO2_DIR (1 << 7) /* UIO2 direction */
-#define PLX9052_CNTRL_UIO2_DATA (1 << 8) /* UIO2 data */
-#define PLX9052_CNTRL_CS3 (1 << 9) /* UIO3 or CS3# select */
-#define PLX9052_CNTRL_UIO3_DIR (1 << 10) /* UIO3 direction */
-#define PLX9052_CNTRL_UIO3_DATA (1 << 11) /* UIO3 data */
-#define PLX9052_CNTRL_PCIBAR01 (0 << 12) /* bar 0 (mem) and 1 (I/O) */
-#define PLX9052_CNTRL_PCIBAR0 (1 << 12) /* bar 0 (mem) only */
-#define PLX9052_CNTRL_PCIBAR1 (2 << 12) /* bar 1 (I/O) only */
-#define PLX9052_CNTRL_PCI2_1_FEATURES (1 << 14) /* PCI r2.1 features enabled */
-#define PLX9052_CNTRL_PCI_R_W_FLUSH (1 << 15) /* read w/write flush mode */
-#define PLX9052_CNTRL_PCI_R_NO_FLUSH (1 << 16) /* read no flush mode */
-#define PLX9052_CNTRL_PCI_R_NO_WRITE (1 << 17) /* read no write mode */
-#define PLX9052_CNTRL_PCI_W_RELEASE (1 << 18) /* write release bus mode */
-#define PLX9052_CNTRL_RETRY_CLKS(x) (((x) & 0xf) << 19) /* slave retry clks */
-#define PLX9052_CNTRL_LOCK_ENAB (1 << 23) /* slave LOCK# enable */
-#define PLX9052_CNTRL_EEPROM_MASK (0x1f << 24) /* EEPROM bits */
-#define PLX9052_CNTRL_EEPROM_CLK (1 << 24) /* EEPROM clock */
-#define PLX9052_CNTRL_EEPROM_CS (1 << 25) /* EEPROM chip select */
-#define PLX9052_CNTRL_EEPROM_DOUT (1 << 26) /* EEPROM write bit */
-#define PLX9052_CNTRL_EEPROM_DIN (1 << 27) /* EEPROM read bit */
-#define PLX9052_CNTRL_EEPROM_PRESENT (1 << 28) /* EEPROM present */
-#define PLX9052_CNTRL_RELOAD_CFG (1 << 29) /* reload configuration */
-#define PLX9052_CNTRL_PCI_RESET (1 << 30) /* PCI adapter reset */
-#define PLX9052_CNTRL_MASK_REV (1 << 31) /* mask revision */
-
-#endif /* _PLX9052_H_ */
diff --git a/drivers/staging/comedi/drivers/plx9080.h b/drivers/staging/comedi/drivers/plx9080.h
deleted file mode 100644
index 25706531b885..000000000000
--- a/drivers/staging/comedi/drivers/plx9080.h
+++ /dev/null
@@ -1,422 +0,0 @@
-/* plx9080.h
- *
- * Copyright (C) 2002,2003 Frank Mori Hess <fmhess@users.sourceforge.net>
- *
- * I modified this file from the plx9060.h header for the
- * wanXL device driver in the linux kernel,
- * for the register offsets and bit definitions. Made minor modifications,
- * added plx9080 registers and
- * stripped out stuff that was specifically for the wanXL driver.
- * Note: I've only made sure the definitions are correct as far
- * as I make use of them. There are still various plx9060-isms
- * left in this header file.
- *
- ********************************************************************
- *
- * Copyright (C) 1999 RG Studio s.c.
- * Written by Krzysztof Halasa <khc@rgstudio.com.pl>
- *
- * Portions (C) SBE Inc., used by permission.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-
-#ifndef __COMEDI_PLX9080_H
-#define __COMEDI_PLX9080_H
-
-/* descriptor block used for chained dma transfers */
-struct plx_dma_desc {
- __le32 pci_start_addr;
- __le32 local_start_addr;
- /* transfer_size is in bytes, only first 23 bits of register are used */
- __le32 transfer_size;
- /* address of next descriptor (quad word aligned), plus some
- * additional bits (see PLX_DMA0_DESCRIPTOR_REG) */
- __le32 next;
-};
-
-/**********************************************************************
-** Register Offsets and Bit Definitions
-**
-** Note: All offsets zero relative. IE. Some standard base address
-** must be added to the Register Number to properly access the register.
-**
-**********************************************************************/
-
-#define PLX_LAS0RNG_REG 0x0000 /* L, Local Addr Space 0 Range Register */
-#define PLX_LAS1RNG_REG 0x00f0 /* L, Local Addr Space 1 Range Register */
-#define LRNG_IO 0x00000001 /* Map to: 1=I/O, 0=Mem */
-#define LRNG_ANY32 0x00000000 /* Locate anywhere in 32 bit */
-#define LRNG_LT1MB 0x00000002 /* Locate in 1st meg */
-#define LRNG_ANY64 0x00000004 /* Locate anywhere in 64 bit */
-#define LRNG_MEM_MASK 0xfffffff0 /* bits that specify range for memory io */
-#define LRNG_IO_MASK 0xfffffffa /* bits that specify range for normal io */
-
-#define PLX_LAS0MAP_REG 0x0004 /* L, Local Addr Space 0 Remap Register */
-#define PLX_LAS1MAP_REG 0x00f4 /* L, Local Addr Space 1 Remap Register */
-#define LMAP_EN 0x00000001 /* Enable slave decode */
-#define LMAP_MEM_MASK 0xfffffff0 /* bits that specify decode for memory io */
-#define LMAP_IO_MASK 0xfffffffa /* bits that specify decode bits for normal io */
-
-/* Mode/Arbitration Register.
-*/
-#define PLX_MARB_REG 0x8 /* L, Local Arbitration Register */
-#define PLX_DMAARB_REG 0xac
-enum marb_bits {
- MARB_LLT_MASK = 0x000000ff, /* Local Bus Latency Timer */
- MARB_LPT_MASK = 0x0000ff00, /* Local Bus Pause Timer */
- MARB_LTEN = 0x00010000, /* Latency Timer Enable */
- MARB_LPEN = 0x00020000, /* Pause Timer Enable */
- MARB_BREQ = 0x00040000, /* Local Bus BREQ Enable */
- MARB_DMA_PRIORITY_MASK = 0x00180000,
- MARB_LBDS_GIVE_UP_BUS_MODE = 0x00200000, /* local bus direct slave give up bus mode */
- MARB_DS_LLOCK_ENABLE = 0x00400000, /* direct slave LLOCKo# enable */
- MARB_PCI_REQUEST_MODE = 0x00800000,
- MARB_PCIv21_MODE = 0x01000000, /* pci specification v2.1 mode */
- MARB_PCI_READ_NO_WRITE_MODE = 0x02000000,
- MARB_PCI_READ_WITH_WRITE_FLUSH_MODE = 0x04000000,
- MARB_GATE_TIMER_WITH_BREQ = 0x08000000, /* gate local bus latency timer with BREQ */
- MARB_PCI_READ_NO_FLUSH_MODE = 0x10000000,
- MARB_USE_SUBSYSTEM_IDS = 0x20000000,
-};
-
-#define PLX_BIGEND_REG 0xc
-enum bigend_bits {
- BIGEND_CONFIG = 0x1, /* use big endian ordering for configuration register accesses */
- BIGEND_DIRECT_MASTER = 0x2,
- BIGEND_DIRECT_SLAVE_LOCAL0 = 0x4,
- BIGEND_ROM = 0x8,
- BIGEND_BYTE_LANE = 0x10, /* use byte lane consisting of most significant bits instead of least significant */
- BIGEND_DIRECT_SLAVE_LOCAL1 = 0x20,
- BIGEND_DMA1 = 0x40,
- BIGEND_DMA0 = 0x80,
-};
-
-/* Note: The Expansion ROM stuff is only relevant to the PC environment.
-** This expansion ROM code is executed by the host CPU at boot time.
-** For this reason no bit definitions are provided here.
-*/
-#define PLX_ROMRNG_REG 0x0010 /* L, Expn ROM Space Range Register */
-#define PLX_ROMMAP_REG 0x0014 /* L, Local Addr Space Range Register */
-
-#define PLX_REGION0_REG 0x0018 /* L, Local Bus Region 0 Descriptor */
-#define RGN_WIDTH 0x00000002 /* Local bus width bits */
-#define RGN_8BITS 0x00000000 /* 08 bit Local Bus */
-#define RGN_16BITS 0x00000001 /* 16 bit Local Bus */
-#define RGN_32BITS 0x00000002 /* 32 bit Local Bus */
-#define RGN_MWS 0x0000003C /* Memory Access Wait States */
-#define RGN_0MWS 0x00000000
-#define RGN_1MWS 0x00000004
-#define RGN_2MWS 0x00000008
-#define RGN_3MWS 0x0000000C
-#define RGN_4MWS 0x00000010
-#define RGN_6MWS 0x00000018
-#define RGN_8MWS 0x00000020
-#define RGN_MRE 0x00000040 /* Memory Space Ready Input Enable */
-#define RGN_MBE 0x00000080 /* Memory Space Bterm Input Enable */
-#define RGN_READ_PREFETCH_DISABLE 0x00000100
-#define RGN_ROM_PREFETCH_DISABLE 0x00000200
-#define RGN_READ_PREFETCH_COUNT_ENABLE 0x00000400
-#define RGN_RWS 0x003C0000 /* Expn ROM Wait States */
-#define RGN_RRE 0x00400000 /* ROM Space Ready Input Enable */
-#define RGN_RBE 0x00800000 /* ROM Space Bterm Input Enable */
-#define RGN_MBEN 0x01000000 /* Memory Space Burst Enable */
-#define RGN_RBEN 0x04000000 /* ROM Space Burst Enable */
-#define RGN_THROT 0x08000000 /* De-assert TRDY when FIFO full */
-#define RGN_TRD 0xF0000000 /* Target Ready Delay /8 */
-
-#define PLX_REGION1_REG 0x00f8 /* L, Local Bus Region 1 Descriptor */
-
-#define PLX_DMRNG_REG 0x001C /* L, Direct Master Range Register */
-
-#define PLX_LBAPMEM_REG 0x0020 /* L, Lcl Base Addr for PCI mem space */
-
-#define PLX_LBAPIO_REG 0x0024 /* L, Lcl Base Addr for PCI I/O space */
-
-#define PLX_DMMAP_REG 0x0028 /* L, Direct Master Remap Register */
-#define DMM_MAE 0x00000001 /* Direct Mstr Memory Acc Enable */
-#define DMM_IAE 0x00000002 /* Direct Mstr I/O Acc Enable */
-#define DMM_LCK 0x00000004 /* LOCK Input Enable */
-#define DMM_PF4 0x00000008 /* Prefetch 4 Mode Enable */
-#define DMM_THROT 0x00000010 /* Assert IRDY when read FIFO full */
-#define DMM_PAF0 0x00000000 /* Programmable Almost fill level */
-#define DMM_PAF1 0x00000020 /* Programmable Almost fill level */
-#define DMM_PAF2 0x00000040 /* Programmable Almost fill level */
-#define DMM_PAF3 0x00000060 /* Programmable Almost fill level */
-#define DMM_PAF4 0x00000080 /* Programmable Almost fill level */
-#define DMM_PAF5 0x000000A0 /* Programmable Almost fill level */
-#define DMM_PAF6 0x000000C0 /* Programmable Almost fill level */
-#define DMM_PAF7 0x000000D0 /* Programmable Almost fill level */
-#define DMM_MAP 0xFFFF0000 /* Remap Address Bits */
-
-#define PLX_CAR_REG 0x002C /* L, Configuration Address Register */
-#define CAR_CT0 0x00000000 /* Config Type 0 */
-#define CAR_CT1 0x00000001 /* Config Type 1 */
-#define CAR_REG 0x000000FC /* Register Number Bits */
-#define CAR_FUN 0x00000700 /* Function Number Bits */
-#define CAR_DEV 0x0000F800 /* Device Number Bits */
-#define CAR_BUS 0x00FF0000 /* Bus Number Bits */
-#define CAR_CFG 0x80000000 /* Config Spc Access Enable */
-
-#define PLX_DBR_IN_REG 0x0060 /* L, PCI to Local Doorbell Register */
-
-#define PLX_DBR_OUT_REG 0x0064 /* L, Local to PCI Doorbell Register */
-
-#define PLX_INTRCS_REG 0x0068 /* L, Interrupt Control/Status Reg */
-#define ICS_AERR 0x00000001 /* Assert LSERR on ABORT */
-#define ICS_PERR 0x00000002 /* Assert LSERR on Parity Error */
-#define ICS_SERR 0x00000004 /* Generate PCI SERR# */
-#define ICS_MBIE 0x00000008 /* mailbox interrupt enable */
-#define ICS_PIE 0x00000100 /* PCI Interrupt Enable */
-#define ICS_PDIE 0x00000200 /* PCI Doorbell Interrupt Enable */
-#define ICS_PAIE 0x00000400 /* PCI Abort Interrupt Enable */
-#define ICS_PLIE 0x00000800 /* PCI Local Int Enable */
-#define ICS_RAE 0x00001000 /* Retry Abort Enable */
-#define ICS_PDIA 0x00002000 /* PCI Doorbell Interrupt Active */
-#define ICS_PAIA 0x00004000 /* PCI Abort Interrupt Active */
-#define ICS_LIA 0x00008000 /* Local Interrupt Active */
-#define ICS_LIE 0x00010000 /* Local Interrupt Enable */
-#define ICS_LDIE 0x00020000 /* Local Doorbell Int Enable */
-#define ICS_DMA0_E 0x00040000 /* DMA #0 Interrupt Enable */
-#define ICS_DMA1_E 0x00080000 /* DMA #1 Interrupt Enable */
-#define ICS_LDIA 0x00100000 /* Local Doorbell Int Active */
-#define ICS_DMA0_A 0x00200000 /* DMA #0 Interrupt Active */
-#define ICS_DMA1_A 0x00400000 /* DMA #1 Interrupt Active */
-#define ICS_BIA 0x00800000 /* BIST Interrupt Active */
-#define ICS_TA_DM 0x01000000 /* Target Abort - Direct Master */
-#define ICS_TA_DMA0 0x02000000 /* Target Abort - DMA #0 */
-#define ICS_TA_DMA1 0x04000000 /* Target Abort - DMA #1 */
-#define ICS_TA_RA 0x08000000 /* Target Abort - Retry Timeout */
-#define ICS_MBIA(x) (0x10000000 << ((x) & 0x3)) /* mailbox x is active */
-
-#define PLX_CONTROL_REG 0x006C /* L, EEPROM Cntl & PCI Cmd Codes */
-#define CTL_RDMA 0x0000000E /* DMA Read Command */
-#define CTL_WDMA 0x00000070 /* DMA Write Command */
-#define CTL_RMEM 0x00000600 /* Memory Read Command */
-#define CTL_WMEM 0x00007000 /* Memory Write Command */
-#define CTL_USERO 0x00010000 /* USERO output pin control bit */
-#define CTL_USERI 0x00020000 /* USERI input pin bit */
-#define CTL_EE_CLK 0x01000000 /* EEPROM Clock line */
-#define CTL_EE_CS 0x02000000 /* EEPROM Chip Select */
-#define CTL_EE_W 0x04000000 /* EEPROM Write bit */
-#define CTL_EE_R 0x08000000 /* EEPROM Read bit */
-#define CTL_EECHK 0x10000000 /* EEPROM Present bit */
-#define CTL_EERLD 0x20000000 /* EEPROM Reload Register */
-#define CTL_RESET 0x40000000 /* !! Adapter Reset !! */
-#define CTL_READY 0x80000000 /* Local Init Done */
-
-#define PLX_ID_REG 0x70 /* hard-coded plx vendor and device ids */
-
-#define PLX_REVISION_REG 0x74 /* silicon revision */
-
-#define PLX_DMA0_MODE_REG 0x80 /* dma channel 0 mode register */
-#define PLX_DMA1_MODE_REG 0x94 /* dma channel 0 mode register */
-#define PLX_LOCAL_BUS_16_WIDE_BITS 0x1
-#define PLX_LOCAL_BUS_32_WIDE_BITS 0x3
-#define PLX_LOCAL_BUS_WIDTH_MASK 0x3
-#define PLX_DMA_EN_READYIN_BIT 0x40 /* enable ready in input */
-#define PLX_EN_BTERM_BIT 0x80 /* enable BTERM# input */
-#define PLX_DMA_LOCAL_BURST_EN_BIT 0x100 /* enable local burst mode */
-#define PLX_EN_CHAIN_BIT 0x200 /* enables chaining */
-#define PLX_EN_DMA_DONE_INTR_BIT 0x400 /* enables interrupt on dma done */
-#define PLX_LOCAL_ADDR_CONST_BIT 0x800 /* hold local address constant (don't increment) */
-#define PLX_DEMAND_MODE_BIT 0x1000 /* enables demand-mode for dma transfer */
-#define PLX_EOT_ENABLE_BIT 0x4000
-#define PLX_STOP_MODE_BIT 0x8000
-#define PLX_DMA_INTR_PCI_BIT 0x20000 /* routes dma interrupt to pci bus (instead of local bus) */
-
-#define PLX_DMA0_PCI_ADDRESS_REG 0x84 /* pci address that dma transfers start at */
-#define PLX_DMA1_PCI_ADDRESS_REG 0x98
-
-#define PLX_DMA0_LOCAL_ADDRESS_REG 0x88 /* local address that dma transfers start at */
-#define PLX_DMA1_LOCAL_ADDRESS_REG 0x9c
-
-#define PLX_DMA0_TRANSFER_SIZE_REG 0x8c /* number of bytes to transfer (first 23 bits) */
-#define PLX_DMA1_TRANSFER_SIZE_REG 0xa0
-
-#define PLX_DMA0_DESCRIPTOR_REG 0x90 /* descriptor pointer register */
-#define PLX_DMA1_DESCRIPTOR_REG 0xa4
-#define PLX_DESC_IN_PCI_BIT 0x1 /* descriptor is located in pci space (not local space) */
-#define PLX_END_OF_CHAIN_BIT 0x2 /* end of chain bit */
-#define PLX_INTR_TERM_COUNT 0x4 /* interrupt when this descriptor's transfer is finished */
-#define PLX_XFER_LOCAL_TO_PCI 0x8 /* transfer from local to pci bus (not pci to local) */
-
-#define PLX_DMA0_CS_REG 0xa8 /* command status register */
-#define PLX_DMA1_CS_REG 0xa9
-#define PLX_DMA_EN_BIT 0x1 /* enable dma channel */
-#define PLX_DMA_START_BIT 0x2 /* start dma transfer */
-#define PLX_DMA_ABORT_BIT 0x4 /* abort dma transfer */
-#define PLX_CLEAR_DMA_INTR_BIT 0x8 /* clear dma interrupt */
-#define PLX_DMA_DONE_BIT 0x10 /* transfer done status bit */
-
-#define PLX_DMA0_THRESHOLD_REG 0xb0 /* command status register */
-
-/*
- * Accesses near the end of memory can cause the PLX chip
- * to pre-fetch data off of end-of-ram. Limit the size of
- * memory so host-side accesses cannot occur.
- */
-
-#define PLX_PREFETCH 32
-
-/*
- * The PCI Interface, via the PCI-9060 Chip, has up to eight (8) Mailbox
- * Registers. The PUTS (Power-Up Test Suite) handles the board-side
- * interface/interaction using the first 4 registers. Specifications for
- * the use of the full PUTS' command and status interface is contained
- * within a separate SBE PUTS Manual. The Host-Side Device Driver only
- * uses a subset of the full PUTS interface.
- */
-
-/*****************************************/
-/*** MAILBOX #(-1) - MEM ACCESS STS ***/
-/*****************************************/
-
-#define MBX_STS_VALID 0x57584744 /* 'WXGD' */
-#define MBX_STS_DILAV 0x44475857 /* swapped = 'DGXW' */
-
-/*****************************************/
-/*** MAILBOX #0 - PUTS STATUS ***/
-/*****************************************/
-
-#define MBX_STS_MASK 0x000000ff /* PUTS Status Register bits */
-#define MBX_STS_TMASK 0x0000000f /* register bits for TEST number */
-
-#define MBX_STS_PCIRESET 0x00000100 /* Host issued PCI reset request */
-#define MBX_STS_BUSY 0x00000080 /* PUTS is in progress */
-#define MBX_STS_ERROR 0x00000040 /* PUTS has failed */
-#define MBX_STS_RESERVED 0x000000c0 /* Undefined -> status in transition.
- We are in process of changing
- bits; we SET Error bit before
- RESET of Busy bit */
-
-#define MBX_RESERVED_5 0x00000020 /* FYI: reserved/unused bit */
-#define MBX_RESERVED_4 0x00000010 /* FYI: reserved/unused bit */
-
-/******************************************/
-/*** MAILBOX #1 - PUTS COMMANDS ***/
-/******************************************/
-
-/*
- * Any attempt to execute an unimplement command results in the PUTS
- * interface executing a NOOP and continuing as if the offending command
- * completed normally. Note: this supplies a simple method to interrogate
- * mailbox command processing functionality.
- */
-
-#define MBX_CMD_MASK 0xffff0000 /* PUTS Command Register bits */
-
-#define MBX_CMD_ABORTJ 0x85000000 /* abort and jump */
-#define MBX_CMD_RESETP 0x86000000 /* reset and pause at start */
-#define MBX_CMD_PAUSE 0x87000000 /* pause immediately */
-#define MBX_CMD_PAUSEC 0x88000000 /* pause on completion */
-#define MBX_CMD_RESUME 0x89000000 /* resume operation */
-#define MBX_CMD_STEP 0x8a000000 /* single step tests */
-
-#define MBX_CMD_BSWAP 0x8c000000 /* identify byte swap scheme */
-#define MBX_CMD_BSWAP_0 0x8c000000 /* use scheme 0 */
-#define MBX_CMD_BSWAP_1 0x8c000001 /* use scheme 1 */
-
-#define MBX_CMD_SETHMS 0x8d000000 /* setup host memory access window
- size */
-#define MBX_CMD_SETHBA 0x8e000000 /* setup host memory access base
- address */
-#define MBX_CMD_MGO 0x8f000000 /* perform memory setup and continue
- (IE. Done) */
-#define MBX_CMD_NOOP 0xFF000000 /* dummy, illegal command */
-
-/*****************************************/
-/*** MAILBOX #2 - MEMORY SIZE ***/
-/*****************************************/
-
-#define MBX_MEMSZ_MASK 0xffff0000 /* PUTS Memory Size Register bits */
-
-#define MBX_MEMSZ_128KB 0x00020000 /* 128 kilobyte board */
-#define MBX_MEMSZ_256KB 0x00040000 /* 256 kilobyte board */
-#define MBX_MEMSZ_512KB 0x00080000 /* 512 kilobyte board */
-#define MBX_MEMSZ_1MB 0x00100000 /* 1 megabyte board */
-#define MBX_MEMSZ_2MB 0x00200000 /* 2 megabyte board */
-#define MBX_MEMSZ_4MB 0x00400000 /* 4 megabyte board */
-#define MBX_MEMSZ_8MB 0x00800000 /* 8 megabyte board */
-#define MBX_MEMSZ_16MB 0x01000000 /* 16 megabyte board */
-
-/***************************************/
-/*** MAILBOX #2 - BOARD TYPE ***/
-/***************************************/
-
-#define MBX_BTYPE_MASK 0x0000ffff /* PUTS Board Type Register */
-#define MBX_BTYPE_FAMILY_MASK 0x0000ff00 /* PUTS Board Family Register */
-#define MBX_BTYPE_SUBTYPE_MASK 0x000000ff /* PUTS Board Subtype */
-
-#define MBX_BTYPE_PLX9060 0x00000100 /* PLX family type */
-#define MBX_BTYPE_PLX9080 0x00000300 /* PLX wanXL100s family type */
-
-#define MBX_BTYPE_WANXL_4 0x00000104 /* wanXL400, 4-port */
-#define MBX_BTYPE_WANXL_2 0x00000102 /* wanXL200, 2-port */
-#define MBX_BTYPE_WANXL_1s 0x00000301 /* wanXL100s, 1-port */
-#define MBX_BTYPE_WANXL_1t 0x00000401 /* wanXL100T1, 1-port */
-
-/*****************************************/
-/*** MAILBOX #3 - SHMQ MAILBOX ***/
-/*****************************************/
-
-#define MBX_SMBX_MASK 0x000000ff /* PUTS SHMQ Mailbox bits */
-
-/***************************************/
-/*** GENERIC HOST-SIDE DRIVER ***/
-/***************************************/
-
-#define MBX_ERR 0
-#define MBX_OK 1
-
-/* mailbox check routine - type of testing */
-#define MBXCHK_STS 0x00 /* check for PUTS status */
-#define MBXCHK_NOWAIT 0x01 /* dont care about PUTS status */
-
-/* system allocates this many bytes for address mapping mailbox space */
-#define MBX_ADDR_SPACE_360 0x80 /* wanXL100s/200/400 */
-#define MBX_ADDR_MASK_360 (MBX_ADDR_SPACE_360-1)
-
-static inline int plx9080_abort_dma(void __iomem *iobase, unsigned int channel)
-{
- void __iomem *dma_cs_addr;
- uint8_t dma_status;
- const int timeout = 10000;
- unsigned int i;
-
- if (channel)
- dma_cs_addr = iobase + PLX_DMA1_CS_REG;
- else
- dma_cs_addr = iobase + PLX_DMA0_CS_REG;
-
- /* abort dma transfer if necessary */
- dma_status = readb(dma_cs_addr);
- if ((dma_status & PLX_DMA_EN_BIT) == 0)
- return 0;
-
- /* wait to make sure done bit is zero */
- for (i = 0; (dma_status & PLX_DMA_DONE_BIT) && i < timeout; i++) {
- udelay(1);
- dma_status = readb(dma_cs_addr);
- }
- if (i == timeout)
- return -ETIMEDOUT;
-
- /* disable and abort channel */
- writeb(PLX_DMA_ABORT_BIT, dma_cs_addr);
- /* wait for dma done bit */
- dma_status = readb(dma_cs_addr);
- for (i = 0; (dma_status & PLX_DMA_DONE_BIT) == 0 && i < timeout; i++) {
- udelay(1);
- dma_status = readb(dma_cs_addr);
- }
- if (i == timeout)
- return -ETIMEDOUT;
-
- return 0;
-}
-
-#endif /* __COMEDI_PLX9080_H */
diff --git a/drivers/staging/comedi/drivers/quatech_daqp_cs.c b/drivers/staging/comedi/drivers/quatech_daqp_cs.c
deleted file mode 100644
index 152cb146fc16..000000000000
--- a/drivers/staging/comedi/drivers/quatech_daqp_cs.c
+++ /dev/null
@@ -1,815 +0,0 @@
-/*======================================================================
-
- comedi/drivers/quatech_daqp_cs.c
-
- Quatech DAQP PCMCIA data capture cards COMEDI client driver
- Copyright (C) 2000, 2003 Brent Baccala <baccala@freesoft.org>
- The DAQP interface code in this file is released into the public domain.
-
- COMEDI - Linux Control and Measurement Device Interface
- Copyright (C) 1998 David A. Schleef <ds@schleef.org>
- http://www.comedi.org/
-
- quatech_daqp_cs.c 1.10
-
- Documentation for the DAQP PCMCIA cards can be found on Quatech's site:
-
- ftp://ftp.quatech.com/Manuals/daqp-208.pdf
-
- This manual is for both the DAQP-208 and the DAQP-308.
-
- What works:
-
- - A/D conversion
- - 8 channels
- - 4 gain ranges
- - ground ref or differential
- - single-shot and timed both supported
- - D/A conversion, single-shot
- - digital I/O
-
- What doesn't:
-
- - any kind of triggering - external or D/A channel 1
- - the card's optional expansion board
- - the card's timer (for anything other than A/D conversion)
- - D/A update modes other than immediate (i.e, timed)
- - fancier timing modes
- - setting card's FIFO buffer thresholds to anything but default
-
-======================================================================*/
-
-/*
-Driver: quatech_daqp_cs
-Description: Quatech DAQP PCMCIA data capture cards
-Author: Brent Baccala <baccala@freesoft.org>
-Status: works
-Devices: [Quatech] DAQP-208 (daqp), DAQP-308
-*/
-
-#include <linux/module.h>
-#include <linux/semaphore.h>
-#include <linux/completion.h>
-
-#include "../comedi_pcmcia.h"
-
-struct daqp_private {
- int stop;
-
- enum { semaphore, buffer } interrupt_mode;
-
- struct completion eos;
-};
-
-/* The DAQP communicates with the system through a 16 byte I/O window. */
-
-#define DAQP_FIFO_SIZE 4096
-
-#define DAQP_FIFO 0
-#define DAQP_SCANLIST 1
-#define DAQP_CONTROL 2
-#define DAQP_STATUS 2
-#define DAQP_DIGITAL_IO 3
-#define DAQP_PACER_LOW 4
-#define DAQP_PACER_MID 5
-#define DAQP_PACER_HIGH 6
-#define DAQP_COMMAND 7
-#define DAQP_DA 8
-#define DAQP_TIMER 10
-#define DAQP_AUX 15
-
-#define DAQP_SCANLIST_DIFFERENTIAL 0x4000
-#define DAQP_SCANLIST_GAIN(x) ((x)<<12)
-#define DAQP_SCANLIST_CHANNEL(x) ((x)<<8)
-#define DAQP_SCANLIST_START 0x0080
-#define DAQP_SCANLIST_EXT_GAIN(x) ((x)<<4)
-#define DAQP_SCANLIST_EXT_CHANNEL(x) (x)
-
-#define DAQP_CONTROL_PACER_100kHz 0xc0
-#define DAQP_CONTROL_PACER_1MHz 0x80
-#define DAQP_CONTROL_PACER_5MHz 0x40
-#define DAQP_CONTROL_PACER_EXTERNAL 0x00
-#define DAQP_CONTORL_EXPANSION 0x20
-#define DAQP_CONTROL_EOS_INT_ENABLE 0x10
-#define DAQP_CONTROL_FIFO_INT_ENABLE 0x08
-#define DAQP_CONTROL_TRIGGER_ONESHOT 0x00
-#define DAQP_CONTROL_TRIGGER_CONTINUOUS 0x04
-#define DAQP_CONTROL_TRIGGER_INTERNAL 0x00
-#define DAQP_CONTROL_TRIGGER_EXTERNAL 0x02
-#define DAQP_CONTROL_TRIGGER_RISING 0x00
-#define DAQP_CONTROL_TRIGGER_FALLING 0x01
-
-#define DAQP_STATUS_IDLE 0x80
-#define DAQP_STATUS_RUNNING 0x40
-#define DAQP_STATUS_EVENTS 0x38
-#define DAQP_STATUS_DATA_LOST 0x20
-#define DAQP_STATUS_END_OF_SCAN 0x10
-#define DAQP_STATUS_FIFO_THRESHOLD 0x08
-#define DAQP_STATUS_FIFO_FULL 0x04
-#define DAQP_STATUS_FIFO_NEARFULL 0x02
-#define DAQP_STATUS_FIFO_EMPTY 0x01
-
-#define DAQP_COMMAND_ARM 0x80
-#define DAQP_COMMAND_RSTF 0x40
-#define DAQP_COMMAND_RSTQ 0x20
-#define DAQP_COMMAND_STOP 0x10
-#define DAQP_COMMAND_LATCH 0x08
-#define DAQP_COMMAND_100kHz 0x00
-#define DAQP_COMMAND_50kHz 0x02
-#define DAQP_COMMAND_25kHz 0x04
-#define DAQP_COMMAND_FIFO_DATA 0x01
-#define DAQP_COMMAND_FIFO_PROGRAM 0x00
-
-#define DAQP_AUX_TRIGGER_TTL 0x00
-#define DAQP_AUX_TRIGGER_ANALOG 0x80
-#define DAQP_AUX_TRIGGER_PRETRIGGER 0x40
-#define DAQP_AUX_TIMER_INT_ENABLE 0x20
-#define DAQP_AUX_TIMER_RELOAD 0x00
-#define DAQP_AUX_TIMER_PAUSE 0x08
-#define DAQP_AUX_TIMER_GO 0x10
-#define DAQP_AUX_TIMER_GO_EXTERNAL 0x18
-#define DAQP_AUX_TIMER_EXTERNAL_SRC 0x04
-#define DAQP_AUX_TIMER_INTERNAL_SRC 0x00
-#define DAQP_AUX_DA_DIRECT 0x00
-#define DAQP_AUX_DA_OVERFLOW 0x01
-#define DAQP_AUX_DA_EXTERNAL 0x02
-#define DAQP_AUX_DA_PACER 0x03
-
-#define DAQP_AUX_RUNNING 0x80
-#define DAQP_AUX_TRIGGERED 0x40
-#define DAQP_AUX_DA_BUFFER 0x20
-#define DAQP_AUX_TIMER_OVERFLOW 0x10
-#define DAQP_AUX_CONVERSION 0x08
-#define DAQP_AUX_DATA_LOST 0x04
-#define DAQP_AUX_FIFO_NEARFULL 0x02
-#define DAQP_AUX_FIFO_EMPTY 0x01
-
-static const struct comedi_lrange range_daqp_ai = {
- 4, {
- BIP_RANGE(10),
- BIP_RANGE(5),
- BIP_RANGE(2.5),
- BIP_RANGE(1.25)
- }
-};
-
-/* Cancel a running acquisition */
-
-static int daqp_ai_cancel(struct comedi_device *dev, struct comedi_subdevice *s)
-{
- struct daqp_private *devpriv = dev->private;
-
- if (devpriv->stop)
- return -EIO;
-
- outb(DAQP_COMMAND_STOP, dev->iobase + DAQP_COMMAND);
-
- /* flush any linguring data in FIFO - superfluous here */
- /* outb(DAQP_COMMAND_RSTF, dev->iobase+DAQP_COMMAND); */
-
- devpriv->interrupt_mode = semaphore;
-
- return 0;
-}
-
-/* Interrupt handler
- *
- * Operates in one of two modes. If devpriv->interrupt_mode is
- * 'semaphore', just signal the devpriv->eos completion and return
- * (one-shot mode). Otherwise (continuous mode), read data in from
- * the card, transfer it to the buffer provided by the higher-level
- * comedi kernel module, and signal various comedi callback routines,
- * which run pretty quick.
- */
-static enum irqreturn daqp_interrupt(int irq, void *dev_id)
-{
- struct comedi_device *dev = dev_id;
- struct daqp_private *devpriv = dev->private;
- struct comedi_subdevice *s = dev->read_subdev;
- struct comedi_cmd *cmd = &s->async->cmd;
- int loop_limit = 10000;
- int status;
-
- if (!dev->attached)
- return IRQ_NONE;
-
- switch (devpriv->interrupt_mode) {
- case semaphore:
- complete(&devpriv->eos);
- break;
-
- case buffer:
- while (!((status = inb(dev->iobase + DAQP_STATUS))
- & DAQP_STATUS_FIFO_EMPTY)) {
- unsigned short data;
-
- if (status & DAQP_STATUS_DATA_LOST) {
- s->async->events |= COMEDI_CB_OVERFLOW;
- dev_warn(dev->class_dev, "data lost\n");
- break;
- }
-
- data = inb(dev->iobase + DAQP_FIFO);
- data |= inb(dev->iobase + DAQP_FIFO) << 8;
- data ^= 0x8000;
-
- comedi_buf_write_samples(s, &data, 1);
-
- /* If there's a limit, decrement it
- * and stop conversion if zero
- */
-
- if (cmd->stop_src == TRIG_COUNT &&
- s->async->scans_done >= cmd->stop_arg) {
- s->async->events |= COMEDI_CB_EOA;
- break;
- }
-
- if ((loop_limit--) <= 0)
- break;
- }
-
- if (loop_limit <= 0) {
- dev_warn(dev->class_dev,
- "loop_limit reached in daqp_interrupt()\n");
- s->async->events |= COMEDI_CB_ERROR;
- }
-
- comedi_handle_events(dev, s);
- }
- return IRQ_HANDLED;
-}
-
-static void daqp_ai_set_one_scanlist_entry(struct comedi_device *dev,
- unsigned int chanspec,
- int start)
-{
- unsigned int chan = CR_CHAN(chanspec);
- unsigned int range = CR_RANGE(chanspec);
- unsigned int aref = CR_AREF(chanspec);
- unsigned int val;
-
- val = DAQP_SCANLIST_CHANNEL(chan) | DAQP_SCANLIST_GAIN(range);
-
- if (aref == AREF_DIFF)
- val |= DAQP_SCANLIST_DIFFERENTIAL;
-
- if (start)
- val |= DAQP_SCANLIST_START;
-
- outb(val & 0xff, dev->iobase + DAQP_SCANLIST);
- outb((val >> 8) & 0xff, dev->iobase + DAQP_SCANLIST);
-}
-
-/* One-shot analog data acquisition routine */
-
-static int daqp_ai_insn_read(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
-{
- struct daqp_private *devpriv = dev->private;
- int i;
- int v;
- int counter = 10000;
-
- if (devpriv->stop)
- return -EIO;
-
- /* Stop any running conversion */
- daqp_ai_cancel(dev, s);
-
- outb(0, dev->iobase + DAQP_AUX);
-
- /* Reset scan list queue */
- outb(DAQP_COMMAND_RSTQ, dev->iobase + DAQP_COMMAND);
-
- /* Program one scan list entry */
- daqp_ai_set_one_scanlist_entry(dev, insn->chanspec, 1);
-
- /* Reset data FIFO (see page 28 of DAQP User's Manual) */
-
- outb(DAQP_COMMAND_RSTF, dev->iobase + DAQP_COMMAND);
-
- /* Set trigger */
-
- v = DAQP_CONTROL_TRIGGER_ONESHOT | DAQP_CONTROL_TRIGGER_INTERNAL
- | DAQP_CONTROL_PACER_100kHz | DAQP_CONTROL_EOS_INT_ENABLE;
-
- outb(v, dev->iobase + DAQP_CONTROL);
-
- /* Reset any pending interrupts (my card has a tendency to require
- * require multiple reads on the status register to achieve this)
- */
-
- while (--counter
- && (inb(dev->iobase + DAQP_STATUS) & DAQP_STATUS_EVENTS))
- ;
- if (!counter) {
- dev_err(dev->class_dev,
- "couldn't clear interrupts in status register\n");
- return -1;
- }
-
- init_completion(&devpriv->eos);
- devpriv->interrupt_mode = semaphore;
-
- for (i = 0; i < insn->n; i++) {
- /* Start conversion */
- outb(DAQP_COMMAND_ARM | DAQP_COMMAND_FIFO_DATA,
- dev->iobase + DAQP_COMMAND);
-
- /* Wait for interrupt service routine to unblock completion */
- /* Maybe could use a timeout here, but it's interruptible */
- if (wait_for_completion_interruptible(&devpriv->eos))
- return -EINTR;
-
- data[i] = inb(dev->iobase + DAQP_FIFO);
- data[i] |= inb(dev->iobase + DAQP_FIFO) << 8;
- data[i] ^= 0x8000;
- }
-
- return insn->n;
-}
-
-/* This function converts ns nanoseconds to a counter value suitable
- * for programming the device. We always use the DAQP's 5 MHz clock,
- * which with its 24-bit counter, allows values up to 84 seconds.
- * Also, the function adjusts ns so that it cooresponds to the actual
- * time that the device will use.
- */
-
-static int daqp_ns_to_timer(unsigned int *ns, unsigned int flags)
-{
- int timer;
-
- timer = *ns / 200;
- *ns = timer * 200;
-
- return timer;
-}
-
-/* cmdtest tests a particular command to see if it is valid.
- * Using the cmdtest ioctl, a user can create a valid cmd
- * and then have it executed by the cmd ioctl.
- *
- * cmdtest returns 1,2,3,4 or 0, depending on which tests
- * the command passes.
- */
-
-static int daqp_ai_cmdtest(struct comedi_device *dev,
- struct comedi_subdevice *s, struct comedi_cmd *cmd)
-{
- int err = 0;
- unsigned int arg;
-
- /* Step 1 : check if triggers are trivially valid */
-
- err |= comedi_check_trigger_src(&cmd->start_src, TRIG_NOW);
- err |= comedi_check_trigger_src(&cmd->scan_begin_src,
- TRIG_TIMER | TRIG_FOLLOW);
- err |= comedi_check_trigger_src(&cmd->convert_src,
- TRIG_TIMER | TRIG_NOW);
- err |= comedi_check_trigger_src(&cmd->scan_end_src, TRIG_COUNT);
- err |= comedi_check_trigger_src(&cmd->stop_src, TRIG_COUNT | TRIG_NONE);
-
- if (err)
- return 1;
-
- /* Step 2a : make sure trigger sources are unique */
-
- err |= comedi_check_trigger_is_unique(cmd->scan_begin_src);
- err |= comedi_check_trigger_is_unique(cmd->convert_src);
- err |= comedi_check_trigger_is_unique(cmd->stop_src);
-
- /* Step 2b : and mutually compatible */
-
- if (err)
- return 2;
-
- /* Step 3: check if arguments are trivially valid */
-
- err |= comedi_check_trigger_arg_is(&cmd->start_arg, 0);
-
-#define MAX_SPEED 10000 /* 100 kHz - in nanoseconds */
-
- if (cmd->scan_begin_src == TRIG_TIMER) {
- err |= comedi_check_trigger_arg_min(&cmd->scan_begin_arg,
- MAX_SPEED);
- }
-
- /* If both scan_begin and convert are both timer values, the only
- * way that can make sense is if the scan time is the number of
- * conversions times the convert time
- */
-
- if (cmd->scan_begin_src == TRIG_TIMER && cmd->convert_src == TRIG_TIMER
- && cmd->scan_begin_arg != cmd->convert_arg * cmd->scan_end_arg) {
- err |= -EINVAL;
- }
-
- if (cmd->convert_src == TRIG_TIMER) {
- err |= comedi_check_trigger_arg_min(&cmd->convert_arg,
- MAX_SPEED);
- }
-
- err |= comedi_check_trigger_arg_is(&cmd->scan_end_arg,
- cmd->chanlist_len);
-
- if (cmd->stop_src == TRIG_COUNT)
- err |= comedi_check_trigger_arg_max(&cmd->stop_arg, 0x00ffffff);
- else /* TRIG_NONE */
- err |= comedi_check_trigger_arg_is(&cmd->stop_arg, 0);
-
- if (err)
- return 3;
-
- /* step 4: fix up any arguments */
-
- if (cmd->scan_begin_src == TRIG_TIMER) {
- arg = cmd->scan_begin_arg;
- daqp_ns_to_timer(&arg, cmd->flags);
- err |= comedi_check_trigger_arg_is(&cmd->scan_begin_arg, arg);
- }
-
- if (cmd->convert_src == TRIG_TIMER) {
- arg = cmd->convert_arg;
- daqp_ns_to_timer(&arg, cmd->flags);
- err |= comedi_check_trigger_arg_is(&cmd->convert_arg, arg);
- }
-
- if (err)
- return 4;
-
- return 0;
-}
-
-static int daqp_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
-{
- struct daqp_private *devpriv = dev->private;
- struct comedi_cmd *cmd = &s->async->cmd;
- int counter;
- int scanlist_start_on_every_entry;
- int threshold;
-
- int i;
- int v;
-
- if (devpriv->stop)
- return -EIO;
-
- /* Stop any running conversion */
- daqp_ai_cancel(dev, s);
-
- outb(0, dev->iobase + DAQP_AUX);
-
- /* Reset scan list queue */
- outb(DAQP_COMMAND_RSTQ, dev->iobase + DAQP_COMMAND);
-
- /* Program pacer clock
- *
- * There's two modes we can operate in. If convert_src is
- * TRIG_TIMER, then convert_arg specifies the time between
- * each conversion, so we program the pacer clock to that
- * frequency and set the SCANLIST_START bit on every scanlist
- * entry. Otherwise, convert_src is TRIG_NOW, which means
- * we want the fastest possible conversions, scan_begin_src
- * is TRIG_TIMER, and scan_begin_arg specifies the time between
- * each scan, so we program the pacer clock to this frequency
- * and only set the SCANLIST_START bit on the first entry.
- */
-
- if (cmd->convert_src == TRIG_TIMER) {
- counter = daqp_ns_to_timer(&cmd->convert_arg, cmd->flags);
- outb(counter & 0xff, dev->iobase + DAQP_PACER_LOW);
- outb((counter >> 8) & 0xff, dev->iobase + DAQP_PACER_MID);
- outb((counter >> 16) & 0xff, dev->iobase + DAQP_PACER_HIGH);
- scanlist_start_on_every_entry = 1;
- } else {
- counter = daqp_ns_to_timer(&cmd->scan_begin_arg, cmd->flags);
- outb(counter & 0xff, dev->iobase + DAQP_PACER_LOW);
- outb((counter >> 8) & 0xff, dev->iobase + DAQP_PACER_MID);
- outb((counter >> 16) & 0xff, dev->iobase + DAQP_PACER_HIGH);
- scanlist_start_on_every_entry = 0;
- }
-
- /* Program scan list */
- for (i = 0; i < cmd->chanlist_len; i++) {
- int start = (i == 0 || scanlist_start_on_every_entry);
-
- daqp_ai_set_one_scanlist_entry(dev, cmd->chanlist[i], start);
- }
-
- /* Now it's time to program the FIFO threshold, basically the
- * number of samples the card will buffer before it interrupts
- * the CPU.
- *
- * If we don't have a stop count, then use half the size of
- * the FIFO (the manufacturer's recommendation). Consider
- * that the FIFO can hold 2K samples (4K bytes). With the
- * threshold set at half the FIFO size, we have a margin of
- * error of 1024 samples. At the chip's maximum sample rate
- * of 100,000 Hz, the CPU would have to delay interrupt
- * service for a full 10 milliseconds in order to lose data
- * here (as opposed to higher up in the kernel). I've never
- * seen it happen. However, for slow sample rates it may
- * buffer too much data and introduce too much delay for the
- * user application.
- *
- * If we have a stop count, then things get more interesting.
- * If the stop count is less than the FIFO size (actually
- * three-quarters of the FIFO size - see below), we just use
- * the stop count itself as the threshold, the card interrupts
- * us when that many samples have been taken, and we kill the
- * acquisition at that point and are done. If the stop count
- * is larger than that, then we divide it by 2 until it's less
- * than three quarters of the FIFO size (we always leave the
- * top quarter of the FIFO as protection against sluggish CPU
- * interrupt response) and use that as the threshold. So, if
- * the stop count is 4000 samples, we divide by two twice to
- * get 1000 samples, use that as the threshold, take four
- * interrupts to get our 4000 samples and are done.
- *
- * The algorithm could be more clever. For example, if 81000
- * samples are requested, we could set the threshold to 1500
- * samples and take 54 interrupts to get 81000. But 54 isn't
- * a power of two, so this algorithm won't find that option.
- * Instead, it'll set the threshold at 1266 and take 64
- * interrupts to get 81024 samples, of which the last 24 will
- * be discarded... but we won't get the last interrupt until
- * they've been collected. To find the first option, the
- * computer could look at the prime decomposition of the
- * sample count (81000 = 3^4 * 5^3 * 2^3) and factor it into a
- * threshold (1500 = 3 * 5^3 * 2^2) and an interrupt count (54
- * = 3^3 * 2). Hmmm... a one-line while loop or prime
- * decomposition of integers... I'll leave it the way it is.
- *
- * I'll also note a mini-race condition before ignoring it in
- * the code. Let's say we're taking 4000 samples, as before.
- * After 1000 samples, we get an interrupt. But before that
- * interrupt is completely serviced, another sample is taken
- * and loaded into the FIFO. Since the interrupt handler
- * empties the FIFO before returning, it will read 1001 samples.
- * If that happens four times, we'll end up taking 4004 samples,
- * not 4000. The interrupt handler will discard the extra four
- * samples (by halting the acquisition with four samples still
- * in the FIFO), but we will have to wait for them.
- *
- * In short, this code works pretty well, but for either of
- * the two reasons noted, might end up waiting for a few more
- * samples than actually requested. Shouldn't make too much
- * of a difference.
- */
-
- /* Save away the number of conversions we should perform, and
- * compute the FIFO threshold (in bytes, not samples - that's
- * why we multiple devpriv->count by 2 = sizeof(sample))
- */
-
- if (cmd->stop_src == TRIG_COUNT) {
- unsigned long long nsamples;
- unsigned long long nbytes;
-
- nsamples = (unsigned long long)cmd->stop_arg *
- cmd->scan_end_arg;
- nbytes = nsamples * comedi_bytes_per_sample(s);
- while (nbytes > DAQP_FIFO_SIZE * 3 / 4)
- nbytes /= 2;
- threshold = nbytes;
- } else {
- threshold = DAQP_FIFO_SIZE / 2;
- }
-
- /* Reset data FIFO (see page 28 of DAQP User's Manual) */
-
- outb(DAQP_COMMAND_RSTF, dev->iobase + DAQP_COMMAND);
-
- /* Set FIFO threshold. First two bytes are near-empty
- * threshold, which is unused; next two bytes are near-full
- * threshold. We computed the number of bytes we want in the
- * FIFO when the interrupt is generated, what the card wants
- * is actually the number of available bytes left in the FIFO
- * when the interrupt is to happen.
- */
-
- outb(0x00, dev->iobase + DAQP_FIFO);
- outb(0x00, dev->iobase + DAQP_FIFO);
-
- outb((DAQP_FIFO_SIZE - threshold) & 0xff, dev->iobase + DAQP_FIFO);
- outb((DAQP_FIFO_SIZE - threshold) >> 8, dev->iobase + DAQP_FIFO);
-
- /* Set trigger */
-
- v = DAQP_CONTROL_TRIGGER_CONTINUOUS | DAQP_CONTROL_TRIGGER_INTERNAL
- | DAQP_CONTROL_PACER_5MHz | DAQP_CONTROL_FIFO_INT_ENABLE;
-
- outb(v, dev->iobase + DAQP_CONTROL);
-
- /* Reset any pending interrupts (my card has a tendency to require
- * require multiple reads on the status register to achieve this)
- */
- counter = 100;
- while (--counter
- && (inb(dev->iobase + DAQP_STATUS) & DAQP_STATUS_EVENTS))
- ;
- if (!counter) {
- dev_err(dev->class_dev,
- "couldn't clear interrupts in status register\n");
- return -1;
- }
-
- devpriv->interrupt_mode = buffer;
-
- /* Start conversion */
- outb(DAQP_COMMAND_ARM | DAQP_COMMAND_FIFO_DATA,
- dev->iobase + DAQP_COMMAND);
-
- return 0;
-}
-
-static int daqp_ao_insn_write(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct daqp_private *devpriv = dev->private;
- unsigned int chan = CR_CHAN(insn->chanspec);
- int i;
-
- if (devpriv->stop)
- return -EIO;
-
- /* Make sure D/A update mode is direct update */
- outb(0, dev->iobase + DAQP_AUX);
-
- for (i = 0; i > insn->n; i++) {
- unsigned val = data[i];
-
- s->readback[chan] = val;
-
- val &= 0x0fff;
- val ^= 0x0800; /* Flip the sign */
- val |= (chan << 12);
-
- outw(val, dev->iobase + DAQP_DA);
- }
-
- return insn->n;
-}
-
-static int daqp_di_insn_bits(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct daqp_private *devpriv = dev->private;
-
- if (devpriv->stop)
- return -EIO;
-
- data[0] = inb(dev->iobase + DAQP_DIGITAL_IO);
-
- return insn->n;
-}
-
-static int daqp_do_insn_bits(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct daqp_private *devpriv = dev->private;
-
- if (devpriv->stop)
- return -EIO;
-
- if (comedi_dio_update_state(s, data))
- outb(s->state, dev->iobase + DAQP_DIGITAL_IO);
-
- data[1] = s->state;
-
- return insn->n;
-}
-
-static int daqp_auto_attach(struct comedi_device *dev,
- unsigned long context)
-{
- struct pcmcia_device *link = comedi_to_pcmcia_dev(dev);
- struct daqp_private *devpriv;
- struct comedi_subdevice *s;
- int ret;
-
- devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
- if (!devpriv)
- return -ENOMEM;
-
- link->config_flags |= CONF_AUTO_SET_IO | CONF_ENABLE_IRQ;
- ret = comedi_pcmcia_enable(dev, NULL);
- if (ret)
- return ret;
- dev->iobase = link->resource[0]->start;
-
- link->priv = dev;
- ret = pcmcia_request_irq(link, daqp_interrupt);
- if (ret)
- return ret;
-
- ret = comedi_alloc_subdevices(dev, 4);
- if (ret)
- return ret;
-
- s = &dev->subdevices[0];
- dev->read_subdev = s;
- s->type = COMEDI_SUBD_AI;
- s->subdev_flags = SDF_READABLE | SDF_GROUND | SDF_DIFF | SDF_CMD_READ;
- s->n_chan = 8;
- s->len_chanlist = 2048;
- s->maxdata = 0xffff;
- s->range_table = &range_daqp_ai;
- s->insn_read = daqp_ai_insn_read;
- s->do_cmdtest = daqp_ai_cmdtest;
- s->do_cmd = daqp_ai_cmd;
- s->cancel = daqp_ai_cancel;
-
- s = &dev->subdevices[1];
- s->type = COMEDI_SUBD_AO;
- s->subdev_flags = SDF_WRITABLE;
- s->n_chan = 2;
- s->maxdata = 0x0fff;
- s->range_table = &range_bipolar5;
- s->insn_write = daqp_ao_insn_write;
-
- ret = comedi_alloc_subdev_readback(s);
- if (ret)
- return ret;
-
- s = &dev->subdevices[2];
- s->type = COMEDI_SUBD_DI;
- s->subdev_flags = SDF_READABLE;
- s->n_chan = 1;
- s->maxdata = 1;
- s->insn_bits = daqp_di_insn_bits;
-
- s = &dev->subdevices[3];
- s->type = COMEDI_SUBD_DO;
- s->subdev_flags = SDF_WRITABLE;
- s->n_chan = 1;
- s->maxdata = 1;
- s->insn_bits = daqp_do_insn_bits;
-
- return 0;
-}
-
-static struct comedi_driver driver_daqp = {
- .driver_name = "quatech_daqp_cs",
- .module = THIS_MODULE,
- .auto_attach = daqp_auto_attach,
- .detach = comedi_pcmcia_disable,
-};
-
-static int daqp_cs_suspend(struct pcmcia_device *link)
-{
- struct comedi_device *dev = link->priv;
- struct daqp_private *devpriv = dev ? dev->private : NULL;
-
- /* Mark the device as stopped, to block IO until later */
- if (devpriv)
- devpriv->stop = 1;
-
- return 0;
-}
-
-static int daqp_cs_resume(struct pcmcia_device *link)
-{
- struct comedi_device *dev = link->priv;
- struct daqp_private *devpriv = dev ? dev->private : NULL;
-
- if (devpriv)
- devpriv->stop = 0;
-
- return 0;
-}
-
-static int daqp_cs_attach(struct pcmcia_device *link)
-{
- return comedi_pcmcia_auto_config(link, &driver_daqp);
-}
-
-static const struct pcmcia_device_id daqp_cs_id_table[] = {
- PCMCIA_DEVICE_MANF_CARD(0x0137, 0x0027),
- PCMCIA_DEVICE_NULL
-};
-MODULE_DEVICE_TABLE(pcmcia, daqp_cs_id_table);
-
-static struct pcmcia_driver daqp_cs_driver = {
- .name = "quatech_daqp_cs",
- .owner = THIS_MODULE,
- .id_table = daqp_cs_id_table,
- .probe = daqp_cs_attach,
- .remove = comedi_pcmcia_auto_unconfig,
- .suspend = daqp_cs_suspend,
- .resume = daqp_cs_resume,
-};
-module_comedi_pcmcia_driver(driver_daqp, daqp_cs_driver);
-
-MODULE_DESCRIPTION("Comedi driver for Quatech DAQP PCMCIA data capture cards");
-MODULE_AUTHOR("Brent Baccala <baccala@freesoft.org>");
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/rtd520.c b/drivers/staging/comedi/drivers/rtd520.c
deleted file mode 100644
index 68ac02b68cb2..000000000000
--- a/drivers/staging/comedi/drivers/rtd520.c
+++ /dev/null
@@ -1,1375 +0,0 @@
-/*
- * comedi/drivers/rtd520.c
- * Comedi driver for Real Time Devices (RTD) PCI4520/DM7520
- *
- * COMEDI - Linux Control and Measurement Device Interface
- * Copyright (C) 2001 David A. Schleef <ds@schleef.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-/*
- * Driver: rtd520
- * Description: Real Time Devices PCI4520/DM7520
- * Devices: [Real Time Devices] DM7520HR-1 (DM7520), DM7520HR-8,
- * PCI4520 (PCI4520), PCI4520-8
- * Author: Dan Christian
- * Status: Works. Only tested on DM7520-8. Not SMP safe.
- *
- * Configuration options: not applicable, uses PCI auto config
- */
-
-/*
- * Created by Dan Christian, NASA Ames Research Center.
- *
- * The PCI4520 is a PCI card. The DM7520 is a PC/104-plus card.
- * Both have:
- * 8/16 12 bit ADC with FIFO and channel gain table
- * 8 bits high speed digital out (for external MUX) (or 8 in or 8 out)
- * 8 bits high speed digital in with FIFO and interrupt on change (or 8 IO)
- * 2 12 bit DACs with FIFOs
- * 2 bits output
- * 2 bits input
- * bus mastering DMA
- * timers: ADC sample, pacer, burst, about, delay, DA1, DA2
- * sample counter
- * 3 user timer/counters (8254)
- * external interrupt
- *
- * The DM7520 has slightly fewer features (fewer gain steps).
- *
- * These boards can support external multiplexors and multi-board
- * synchronization, but this driver doesn't support that.
- *
- * Board docs: http://www.rtdusa.com/PC104/DM/analog%20IO/dm7520.htm
- * Data sheet: http://www.rtdusa.com/pdf/dm7520.pdf
- * Example source: http://www.rtdusa.com/examples/dm/dm7520.zip
- * Call them and ask for the register level manual.
- * PCI chip: http://www.plxtech.com/products/io/pci9080
- *
- * Notes:
- * This board is memory mapped. There is some IO stuff, but it isn't needed.
- *
- * I use a pretty loose naming style within the driver (rtd_blah).
- * All externally visible names should be rtd520_blah.
- * I use camelCase for structures (and inside them).
- * I may also use upper CamelCase for function names (old habit).
- *
- * This board is somewhat related to the RTD PCI4400 board.
- *
- * I borrowed heavily from the ni_mio_common, ni_atmio16d, mite, and
- * das1800, since they have the best documented code. Driver cb_pcidas64.c
- * uses the same DMA controller.
- *
- * As far as I can tell, the About interrupt doesn't work if Sample is
- * also enabled. It turns out that About really isn't needed, since
- * we always count down samples read.
- */
-
-/*
- * driver status:
- *
- * Analog-In supports instruction and command mode.
- *
- * With DMA, you can sample at 1.15Mhz with 70% idle on a 400Mhz K6-2
- * (single channel, 64K read buffer). I get random system lockups when
- * using DMA with ALI-15xx based systems. I haven't been able to test
- * any other chipsets. The lockups happen soon after the start of an
- * acquistion, not in the middle of a long run.
- *
- * Without DMA, you can do 620Khz sampling with 20% idle on a 400Mhz K6-2
- * (with a 256K read buffer).
- *
- * Digital-IO and Analog-Out only support instruction mode.
- */
-
-#include <linux/module.h>
-#include <linux/delay.h>
-#include <linux/interrupt.h>
-
-#include "../comedi_pci.h"
-
-#include "comedi_8254.h"
-#include "plx9080.h"
-
-/*
- * Local Address Space 0 Offsets
- */
-#define LAS0_USER_IO 0x0008 /* User I/O */
-#define LAS0_ADC 0x0010 /* FIFO Status/Software A/D Start */
-#define FS_DAC1_NOT_EMPTY BIT(0) /* DAC1 FIFO not empty */
-#define FS_DAC1_HEMPTY BIT(1) /* DAC1 FIFO half empty */
-#define FS_DAC1_NOT_FULL BIT(2) /* DAC1 FIFO not full */
-#define FS_DAC2_NOT_EMPTY BIT(4) /* DAC2 FIFO not empty */
-#define FS_DAC2_HEMPTY BIT(5) /* DAC2 FIFO half empty */
-#define FS_DAC2_NOT_FULL BIT(6) /* DAC2 FIFO not full */
-#define FS_ADC_NOT_EMPTY BIT(8) /* ADC FIFO not empty */
-#define FS_ADC_HEMPTY BIT(9) /* ADC FIFO half empty */
-#define FS_ADC_NOT_FULL BIT(10) /* ADC FIFO not full */
-#define FS_DIN_NOT_EMPTY BIT(12) /* DIN FIFO not empty */
-#define FS_DIN_HEMPTY BIT(13) /* DIN FIFO half empty */
-#define FS_DIN_NOT_FULL BIT(14) /* DIN FIFO not full */
-#define LAS0_UPDATE_DAC(x) (0x0014 + ((x) * 0x4)) /* D/Ax Update (w) */
-#define LAS0_DAC 0x0024 /* Software Simultaneous Update (w) */
-#define LAS0_PACER 0x0028 /* Software Pacer Start/Stop */
-#define LAS0_TIMER 0x002c /* Timer Status/HDIN Software Trig. */
-#define LAS0_IT 0x0030 /* Interrupt Status/Enable */
-#define IRQM_ADC_FIFO_WRITE BIT(0) /* ADC FIFO Write */
-#define IRQM_CGT_RESET BIT(1) /* Reset CGT */
-#define IRQM_CGT_PAUSE BIT(3) /* Pause CGT */
-#define IRQM_ADC_ABOUT_CNT BIT(4) /* About Counter out */
-#define IRQM_ADC_DELAY_CNT BIT(5) /* Delay Counter out */
-#define IRQM_ADC_SAMPLE_CNT BIT(6) /* ADC Sample Counter */
-#define IRQM_DAC1_UCNT BIT(7) /* DAC1 Update Counter */
-#define IRQM_DAC2_UCNT BIT(8) /* DAC2 Update Counter */
-#define IRQM_UTC1 BIT(9) /* User TC1 out */
-#define IRQM_UTC1_INV BIT(10) /* User TC1 out, inverted */
-#define IRQM_UTC2 BIT(11) /* User TC2 out */
-#define IRQM_DIGITAL_IT BIT(12) /* Digital Interrupt */
-#define IRQM_EXTERNAL_IT BIT(13) /* External Interrupt */
-#define IRQM_ETRIG_RISING BIT(14) /* Ext Trigger rising-edge */
-#define IRQM_ETRIG_FALLING BIT(15) /* Ext Trigger falling-edge */
-#define LAS0_CLEAR 0x0034 /* Clear/Set Interrupt Clear Mask */
-#define LAS0_OVERRUN 0x0038 /* Pending interrupts/Clear Overrun */
-#define LAS0_PCLK 0x0040 /* Pacer Clock (24bit) */
-#define LAS0_BCLK 0x0044 /* Burst Clock (10bit) */
-#define LAS0_ADC_SCNT 0x0048 /* A/D Sample counter (10bit) */
-#define LAS0_DAC1_UCNT 0x004c /* D/A1 Update counter (10 bit) */
-#define LAS0_DAC2_UCNT 0x0050 /* D/A2 Update counter (10 bit) */
-#define LAS0_DCNT 0x0054 /* Delay counter (16 bit) */
-#define LAS0_ACNT 0x0058 /* About counter (16 bit) */
-#define LAS0_DAC_CLK 0x005c /* DAC clock (16bit) */
-#define LAS0_8254_TIMER_BASE 0x0060 /* 8254 timer/counter base */
-#define LAS0_DIO0 0x0070 /* Digital I/O Port 0 */
-#define LAS0_DIO1 0x0074 /* Digital I/O Port 1 */
-#define LAS0_DIO0_CTRL 0x0078 /* Digital I/O Control */
-#define LAS0_DIO_STATUS 0x007c /* Digital I/O Status */
-#define LAS0_BOARD_RESET 0x0100 /* Board reset */
-#define LAS0_DMA0_SRC 0x0104 /* DMA 0 Sources select */
-#define LAS0_DMA1_SRC 0x0108 /* DMA 1 Sources select */
-#define LAS0_ADC_CONVERSION 0x010c /* A/D Conversion Signal select */
-#define LAS0_BURST_START 0x0110 /* Burst Clock Start Trigger select */
-#define LAS0_PACER_START 0x0114 /* Pacer Clock Start Trigger select */
-#define LAS0_PACER_STOP 0x0118 /* Pacer Clock Stop Trigger select */
-#define LAS0_ACNT_STOP_ENABLE 0x011c /* About Counter Stop Enable */
-#define LAS0_PACER_REPEAT 0x0120 /* Pacer Start Trigger Mode select */
-#define LAS0_DIN_START 0x0124 /* HiSpd DI Sampling Signal select */
-#define LAS0_DIN_FIFO_CLEAR 0x0128 /* Digital Input FIFO Clear */
-#define LAS0_ADC_FIFO_CLEAR 0x012c /* A/D FIFO Clear */
-#define LAS0_CGT_WRITE 0x0130 /* Channel Gain Table Write */
-#define LAS0_CGL_WRITE 0x0134 /* Channel Gain Latch Write */
-#define LAS0_CG_DATA 0x0138 /* Digital Table Write */
-#define LAS0_CGT_ENABLE 0x013c /* Channel Gain Table Enable */
-#define LAS0_CG_ENABLE 0x0140 /* Digital Table Enable */
-#define LAS0_CGT_PAUSE 0x0144 /* Table Pause Enable */
-#define LAS0_CGT_RESET 0x0148 /* Reset Channel Gain Table */
-#define LAS0_CGT_CLEAR 0x014c /* Clear Channel Gain Table */
-#define LAS0_DAC_CTRL(x) (0x0150 + ((x) * 0x14)) /* D/Ax type/range */
-#define LAS0_DAC_SRC(x) (0x0154 + ((x) * 0x14)) /* D/Ax update source */
-#define LAS0_DAC_CYCLE(x) (0x0158 + ((x) * 0x14)) /* D/Ax cycle mode */
-#define LAS0_DAC_RESET(x) (0x015c + ((x) * 0x14)) /* D/Ax FIFO reset */
-#define LAS0_DAC_FIFO_CLEAR(x) (0x0160 + ((x) * 0x14)) /* D/Ax FIFO clear */
-#define LAS0_ADC_SCNT_SRC 0x0178 /* A/D Sample Counter Source select */
-#define LAS0_PACER_SELECT 0x0180 /* Pacer Clock select */
-#define LAS0_SBUS0_SRC 0x0184 /* SyncBus 0 Source select */
-#define LAS0_SBUS0_ENABLE 0x0188 /* SyncBus 0 enable */
-#define LAS0_SBUS1_SRC 0x018c /* SyncBus 1 Source select */
-#define LAS0_SBUS1_ENABLE 0x0190 /* SyncBus 1 enable */
-#define LAS0_SBUS2_SRC 0x0198 /* SyncBus 2 Source select */
-#define LAS0_SBUS2_ENABLE 0x019c /* SyncBus 2 enable */
-#define LAS0_ETRG_POLARITY 0x01a4 /* Ext. Trigger polarity select */
-#define LAS0_EINT_POLARITY 0x01a8 /* Ext. Interrupt polarity select */
-#define LAS0_8254_CLK_SEL(x) (0x01ac + ((x) * 0x8)) /* 8254 clock select */
-#define LAS0_8254_GATE_SEL(x) (0x01b0 + ((x) * 0x8)) /* 8254 gate select */
-#define LAS0_UOUT0_SELECT 0x01c4 /* User Output 0 source select */
-#define LAS0_UOUT1_SELECT 0x01c8 /* User Output 1 source select */
-#define LAS0_DMA0_RESET 0x01cc /* DMA0 Request state machine reset */
-#define LAS0_DMA1_RESET 0x01d0 /* DMA1 Request state machine reset */
-
-/*
- * Local Address Space 1 Offsets
- */
-#define LAS1_ADC_FIFO 0x0000 /* A/D FIFO (16bit) */
-#define LAS1_HDIO_FIFO 0x0004 /* HiSpd DI FIFO (16bit) */
-#define LAS1_DAC_FIFO(x) (0x0008 + ((x) * 0x4)) /* D/Ax FIFO (16bit) */
-
-/*
- * Driver specific stuff (tunable)
- */
-
-/*
- * We really only need 2 buffers. More than that means being much
- * smarter about knowing which ones are full.
- */
-#define DMA_CHAIN_COUNT 2 /* max DMA segments/buffers in a ring (min 2) */
-
-/* Target period for periodic transfers. This sets the user read latency. */
-/* Note: There are certain rates where we give this up and transfer 1/2 FIFO */
-/* If this is too low, efficiency is poor */
-#define TRANS_TARGET_PERIOD 10000000 /* 10 ms (in nanoseconds) */
-
-/* Set a practical limit on how long a list to support (affects memory use) */
-/* The board support a channel list up to the FIFO length (1K or 8K) */
-#define RTD_MAX_CHANLIST 128 /* max channel list that we allow */
-
-/*
- * Board specific stuff
- */
-
-#define RTD_CLOCK_RATE 8000000 /* 8Mhz onboard clock */
-#define RTD_CLOCK_BASE 125 /* clock period in ns */
-
-/* Note: these speed are slower than the spec, but fit the counter resolution*/
-#define RTD_MAX_SPEED 1625 /* when sampling, in nanoseconds */
-/* max speed if we don't have to wait for settling */
-#define RTD_MAX_SPEED_1 875 /* if single channel, in nanoseconds */
-
-#define RTD_MIN_SPEED 2097151875 /* (24bit counter) in nanoseconds */
-/* min speed when only 1 channel (no burst counter) */
-#define RTD_MIN_SPEED_1 5000000 /* 200Hz, in nanoseconds */
-
-/* Setup continuous ring of 1/2 FIFO transfers. See RTD manual p91 */
-#define DMA_MODE_BITS (\
- PLX_LOCAL_BUS_16_WIDE_BITS \
- | PLX_DMA_EN_READYIN_BIT \
- | PLX_DMA_LOCAL_BURST_EN_BIT \
- | PLX_EN_CHAIN_BIT \
- | PLX_DMA_INTR_PCI_BIT \
- | PLX_LOCAL_ADDR_CONST_BIT \
- | PLX_DEMAND_MODE_BIT)
-
-#define DMA_TRANSFER_BITS (\
-/* descriptors in PCI memory*/ PLX_DESC_IN_PCI_BIT \
-/* interrupt at end of block */ | PLX_INTR_TERM_COUNT \
-/* from board to PCI */ | PLX_XFER_LOCAL_TO_PCI)
-
-/*
- * Comedi specific stuff
- */
-
-/*
- * The board has 3 input modes and the gains of 1,2,4,...32 (, 64, 128)
- */
-static const struct comedi_lrange rtd_ai_7520_range = {
- 18, {
- /* +-5V input range gain steps */
- BIP_RANGE(5.0),
- BIP_RANGE(5.0 / 2),
- BIP_RANGE(5.0 / 4),
- BIP_RANGE(5.0 / 8),
- BIP_RANGE(5.0 / 16),
- BIP_RANGE(5.0 / 32),
- /* +-10V input range gain steps */
- BIP_RANGE(10.0),
- BIP_RANGE(10.0 / 2),
- BIP_RANGE(10.0 / 4),
- BIP_RANGE(10.0 / 8),
- BIP_RANGE(10.0 / 16),
- BIP_RANGE(10.0 / 32),
- /* +10V input range gain steps */
- UNI_RANGE(10.0),
- UNI_RANGE(10.0 / 2),
- UNI_RANGE(10.0 / 4),
- UNI_RANGE(10.0 / 8),
- UNI_RANGE(10.0 / 16),
- UNI_RANGE(10.0 / 32),
- }
-};
-
-/* PCI4520 has two more gains (6 more entries) */
-static const struct comedi_lrange rtd_ai_4520_range = {
- 24, {
- /* +-5V input range gain steps */
- BIP_RANGE(5.0),
- BIP_RANGE(5.0 / 2),
- BIP_RANGE(5.0 / 4),
- BIP_RANGE(5.0 / 8),
- BIP_RANGE(5.0 / 16),
- BIP_RANGE(5.0 / 32),
- BIP_RANGE(5.0 / 64),
- BIP_RANGE(5.0 / 128),
- /* +-10V input range gain steps */
- BIP_RANGE(10.0),
- BIP_RANGE(10.0 / 2),
- BIP_RANGE(10.0 / 4),
- BIP_RANGE(10.0 / 8),
- BIP_RANGE(10.0 / 16),
- BIP_RANGE(10.0 / 32),
- BIP_RANGE(10.0 / 64),
- BIP_RANGE(10.0 / 128),
- /* +10V input range gain steps */
- UNI_RANGE(10.0),
- UNI_RANGE(10.0 / 2),
- UNI_RANGE(10.0 / 4),
- UNI_RANGE(10.0 / 8),
- UNI_RANGE(10.0 / 16),
- UNI_RANGE(10.0 / 32),
- UNI_RANGE(10.0 / 64),
- UNI_RANGE(10.0 / 128),
- }
-};
-
-/* Table order matches range values */
-static const struct comedi_lrange rtd_ao_range = {
- 4, {
- UNI_RANGE(5),
- UNI_RANGE(10),
- BIP_RANGE(5),
- BIP_RANGE(10),
- }
-};
-
-enum rtd_boardid {
- BOARD_DM7520,
- BOARD_PCI4520,
-};
-
-struct rtd_boardinfo {
- const char *name;
- int range_bip10; /* start of +-10V range */
- int range_uni10; /* start of +10V range */
- const struct comedi_lrange *ai_range;
-};
-
-static const struct rtd_boardinfo rtd520_boards[] = {
- [BOARD_DM7520] = {
- .name = "DM7520",
- .range_bip10 = 6,
- .range_uni10 = 12,
- .ai_range = &rtd_ai_7520_range,
- },
- [BOARD_PCI4520] = {
- .name = "PCI4520",
- .range_bip10 = 8,
- .range_uni10 = 16,
- .ai_range = &rtd_ai_4520_range,
- },
-};
-
-struct rtd_private {
- /* memory mapped board structures */
- void __iomem *las1;
- void __iomem *lcfg;
-
- long ai_count; /* total transfer size (samples) */
- int xfer_count; /* # to transfer data. 0->1/2FIFO */
- int flags; /* flag event modes */
- unsigned fifosz;
-
- /* 8254 Timer/Counter gate and clock sources */
- unsigned char timer_gate_src[3];
- unsigned char timer_clk_src[3];
-};
-
-/* bit defines for "flags" */
-#define SEND_EOS 0x01 /* send End Of Scan events */
-#define DMA0_ACTIVE 0x02 /* DMA0 is active */
-#define DMA1_ACTIVE 0x04 /* DMA1 is active */
-
-/*
- * Given a desired period and the clock period (both in ns), return the
- * proper counter value (divider-1). Sets the original period to be the
- * true value.
- * Note: you have to check if the value is larger than the counter range!
- */
-static int rtd_ns_to_timer_base(unsigned int *nanosec,
- unsigned int flags, int base)
-{
- int divider;
-
- switch (flags & CMDF_ROUND_MASK) {
- case CMDF_ROUND_NEAREST:
- default:
- divider = DIV_ROUND_CLOSEST(*nanosec, base);
- break;
- case CMDF_ROUND_DOWN:
- divider = (*nanosec) / base;
- break;
- case CMDF_ROUND_UP:
- divider = DIV_ROUND_UP(*nanosec, base);
- break;
- }
- if (divider < 2)
- divider = 2; /* min is divide by 2 */
-
- /*
- * Note: we don't check for max, because different timers
- * have different ranges
- */
-
- *nanosec = base * divider;
- return divider - 1; /* countdown is divisor+1 */
-}
-
-/*
- * Given a desired period (in ns), return the proper counter value
- * (divider-1) for the internal clock. Sets the original period to
- * be the true value.
- */
-static int rtd_ns_to_timer(unsigned int *ns, unsigned int flags)
-{
- return rtd_ns_to_timer_base(ns, flags, RTD_CLOCK_BASE);
-}
-
-/* Convert a single comedi channel-gain entry to a RTD520 table entry */
-static unsigned short rtd_convert_chan_gain(struct comedi_device *dev,
- unsigned int chanspec, int index)
-{
- const struct rtd_boardinfo *board = dev->board_ptr;
- unsigned int chan = CR_CHAN(chanspec);
- unsigned int range = CR_RANGE(chanspec);
- unsigned int aref = CR_AREF(chanspec);
- unsigned short r = 0;
-
- r |= chan & 0xf;
-
- /* Note: we also setup the channel list bipolar flag array */
- if (range < board->range_bip10) {
- /* +-5 range */
- r |= 0x000;
- r |= (range & 0x7) << 4;
- } else if (range < board->range_uni10) {
- /* +-10 range */
- r |= 0x100;
- r |= ((range - board->range_bip10) & 0x7) << 4;
- } else {
- /* +10 range */
- r |= 0x200;
- r |= ((range - board->range_uni10) & 0x7) << 4;
- }
-
- switch (aref) {
- case AREF_GROUND: /* on-board ground */
- break;
-
- case AREF_COMMON:
- r |= 0x80; /* ref external analog common */
- break;
-
- case AREF_DIFF:
- r |= 0x400; /* differential inputs */
- break;
-
- case AREF_OTHER: /* ??? */
- break;
- }
- return r;
-}
-
-/* Setup the channel-gain table from a comedi list */
-static void rtd_load_channelgain_list(struct comedi_device *dev,
- unsigned int n_chan, unsigned int *list)
-{
- if (n_chan > 1) { /* setup channel gain table */
- int ii;
-
- writel(0, dev->mmio + LAS0_CGT_CLEAR);
- writel(1, dev->mmio + LAS0_CGT_ENABLE);
- for (ii = 0; ii < n_chan; ii++) {
- writel(rtd_convert_chan_gain(dev, list[ii], ii),
- dev->mmio + LAS0_CGT_WRITE);
- }
- } else { /* just use the channel gain latch */
- writel(0, dev->mmio + LAS0_CGT_ENABLE);
- writel(rtd_convert_chan_gain(dev, list[0], 0),
- dev->mmio + LAS0_CGL_WRITE);
- }
-}
-
-/*
- * Determine fifo size by doing adc conversions until the fifo half
- * empty status flag clears.
- */
-static int rtd520_probe_fifo_depth(struct comedi_device *dev)
-{
- unsigned int chanspec = CR_PACK(0, 0, AREF_GROUND);
- unsigned i;
- static const unsigned limit = 0x2000;
- unsigned fifo_size = 0;
-
- writel(0, dev->mmio + LAS0_ADC_FIFO_CLEAR);
- rtd_load_channelgain_list(dev, 1, &chanspec);
- /* ADC conversion trigger source: SOFTWARE */
- writel(0, dev->mmio + LAS0_ADC_CONVERSION);
- /* convert samples */
- for (i = 0; i < limit; ++i) {
- unsigned fifo_status;
- /* trigger conversion */
- writew(0, dev->mmio + LAS0_ADC);
- usleep_range(1, 1000);
- fifo_status = readl(dev->mmio + LAS0_ADC);
- if ((fifo_status & FS_ADC_HEMPTY) == 0) {
- fifo_size = 2 * i;
- break;
- }
- }
- if (i == limit) {
- dev_info(dev->class_dev, "failed to probe fifo size.\n");
- return -EIO;
- }
- writel(0, dev->mmio + LAS0_ADC_FIFO_CLEAR);
- if (fifo_size != 0x400 && fifo_size != 0x2000) {
- dev_info(dev->class_dev,
- "unexpected fifo size of %i, expected 1024 or 8192.\n",
- fifo_size);
- return -EIO;
- }
- return fifo_size;
-}
-
-static int rtd_ai_eoc(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned long context)
-{
- unsigned int status;
-
- status = readl(dev->mmio + LAS0_ADC);
- if (status & FS_ADC_NOT_EMPTY)
- return 0;
- return -EBUSY;
-}
-
-static int rtd_ai_rinsn(struct comedi_device *dev,
- struct comedi_subdevice *s, struct comedi_insn *insn,
- unsigned int *data)
-{
- struct rtd_private *devpriv = dev->private;
- unsigned int range = CR_RANGE(insn->chanspec);
- int ret;
- int n;
-
- /* clear any old fifo data */
- writel(0, dev->mmio + LAS0_ADC_FIFO_CLEAR);
-
- /* write channel to multiplexer and clear channel gain table */
- rtd_load_channelgain_list(dev, 1, &insn->chanspec);
-
- /* ADC conversion trigger source: SOFTWARE */
- writel(0, dev->mmio + LAS0_ADC_CONVERSION);
-
- /* convert n samples */
- for (n = 0; n < insn->n; n++) {
- unsigned short d;
- /* trigger conversion */
- writew(0, dev->mmio + LAS0_ADC);
-
- ret = comedi_timeout(dev, s, insn, rtd_ai_eoc, 0);
- if (ret)
- return ret;
-
- /* read data */
- d = readw(devpriv->las1 + LAS1_ADC_FIFO);
- d >>= 3; /* low 3 bits are marker lines */
-
- /* convert bipolar data to comedi unsigned data */
- if (comedi_range_is_bipolar(s, range))
- d = comedi_offset_munge(s, d);
-
- data[n] = d & s->maxdata;
- }
-
- /* return the number of samples read/written */
- return n;
-}
-
-static int ai_read_n(struct comedi_device *dev, struct comedi_subdevice *s,
- int count)
-{
- struct rtd_private *devpriv = dev->private;
- struct comedi_async *async = s->async;
- struct comedi_cmd *cmd = &async->cmd;
- int ii;
-
- for (ii = 0; ii < count; ii++) {
- unsigned int range = CR_RANGE(cmd->chanlist[async->cur_chan]);
- unsigned short d;
-
- if (devpriv->ai_count == 0) { /* done */
- d = readw(devpriv->las1 + LAS1_ADC_FIFO);
- continue;
- }
-
- d = readw(devpriv->las1 + LAS1_ADC_FIFO);
- d >>= 3; /* low 3 bits are marker lines */
-
- /* convert bipolar data to comedi unsigned data */
- if (comedi_range_is_bipolar(s, range))
- d = comedi_offset_munge(s, d);
- d &= s->maxdata;
-
- if (!comedi_buf_write_samples(s, &d, 1))
- return -1;
-
- if (devpriv->ai_count > 0) /* < 0, means read forever */
- devpriv->ai_count--;
- }
- return 0;
-}
-
-static irqreturn_t rtd_interrupt(int irq, void *d)
-{
- struct comedi_device *dev = d;
- struct comedi_subdevice *s = dev->read_subdev;
- struct rtd_private *devpriv = dev->private;
- u32 overrun;
- u16 status;
- u16 fifo_status;
-
- if (!dev->attached)
- return IRQ_NONE;
-
- fifo_status = readl(dev->mmio + LAS0_ADC);
- /* check for FIFO full, this automatically halts the ADC! */
- if (!(fifo_status & FS_ADC_NOT_FULL)) /* 0 -> full */
- goto xfer_abort;
-
- status = readw(dev->mmio + LAS0_IT);
- /* if interrupt was not caused by our board, or handled above */
- if (status == 0)
- return IRQ_HANDLED;
-
- if (status & IRQM_ADC_ABOUT_CNT) { /* sample count -> read FIFO */
- /*
- * since the priority interrupt controller may have queued
- * a sample counter interrupt, even though we have already
- * finished, we must handle the possibility that there is
- * no data here
- */
- if (!(fifo_status & FS_ADC_HEMPTY)) {
- /* FIFO half full */
- if (ai_read_n(dev, s, devpriv->fifosz / 2) < 0)
- goto xfer_abort;
-
- if (devpriv->ai_count == 0)
- goto xfer_done;
- } else if (devpriv->xfer_count > 0) {
- if (fifo_status & FS_ADC_NOT_EMPTY) {
- /* FIFO not empty */
- if (ai_read_n(dev, s, devpriv->xfer_count) < 0)
- goto xfer_abort;
-
- if (devpriv->ai_count == 0)
- goto xfer_done;
- }
- }
- }
-
- overrun = readl(dev->mmio + LAS0_OVERRUN) & 0xffff;
- if (overrun)
- goto xfer_abort;
-
- /* clear the interrupt */
- writew(status, dev->mmio + LAS0_CLEAR);
- readw(dev->mmio + LAS0_CLEAR);
-
- comedi_handle_events(dev, s);
-
- return IRQ_HANDLED;
-
-xfer_abort:
- s->async->events |= COMEDI_CB_ERROR;
-
-xfer_done:
- s->async->events |= COMEDI_CB_EOA;
-
- /* clear the interrupt */
- status = readw(dev->mmio + LAS0_IT);
- writew(status, dev->mmio + LAS0_CLEAR);
- readw(dev->mmio + LAS0_CLEAR);
-
- fifo_status = readl(dev->mmio + LAS0_ADC);
- overrun = readl(dev->mmio + LAS0_OVERRUN) & 0xffff;
-
- comedi_handle_events(dev, s);
-
- return IRQ_HANDLED;
-}
-
-static int rtd_ai_cmdtest(struct comedi_device *dev,
- struct comedi_subdevice *s, struct comedi_cmd *cmd)
-{
- int err = 0;
- unsigned int arg;
-
- /* Step 1 : check if triggers are trivially valid */
-
- err |= comedi_check_trigger_src(&cmd->start_src, TRIG_NOW);
- err |= comedi_check_trigger_src(&cmd->scan_begin_src,
- TRIG_TIMER | TRIG_EXT);
- err |= comedi_check_trigger_src(&cmd->convert_src,
- TRIG_TIMER | TRIG_EXT);
- err |= comedi_check_trigger_src(&cmd->scan_end_src, TRIG_COUNT);
- err |= comedi_check_trigger_src(&cmd->stop_src, TRIG_COUNT | TRIG_NONE);
-
- if (err)
- return 1;
-
- /* Step 2a : make sure trigger sources are unique */
-
- err |= comedi_check_trigger_is_unique(cmd->scan_begin_src);
- err |= comedi_check_trigger_is_unique(cmd->convert_src);
- err |= comedi_check_trigger_is_unique(cmd->stop_src);
-
- /* Step 2b : and mutually compatible */
-
- if (err)
- return 2;
-
- /* Step 3: check if arguments are trivially valid */
-
- err |= comedi_check_trigger_arg_is(&cmd->start_arg, 0);
-
- if (cmd->scan_begin_src == TRIG_TIMER) {
- /* Note: these are time periods, not actual rates */
- if (cmd->chanlist_len == 1) { /* no scanning */
- if (comedi_check_trigger_arg_min(&cmd->scan_begin_arg,
- RTD_MAX_SPEED_1)) {
- rtd_ns_to_timer(&cmd->scan_begin_arg,
- CMDF_ROUND_UP);
- err |= -EINVAL;
- }
- if (comedi_check_trigger_arg_max(&cmd->scan_begin_arg,
- RTD_MIN_SPEED_1)) {
- rtd_ns_to_timer(&cmd->scan_begin_arg,
- CMDF_ROUND_DOWN);
- err |= -EINVAL;
- }
- } else {
- if (comedi_check_trigger_arg_min(&cmd->scan_begin_arg,
- RTD_MAX_SPEED)) {
- rtd_ns_to_timer(&cmd->scan_begin_arg,
- CMDF_ROUND_UP);
- err |= -EINVAL;
- }
- if (comedi_check_trigger_arg_max(&cmd->scan_begin_arg,
- RTD_MIN_SPEED)) {
- rtd_ns_to_timer(&cmd->scan_begin_arg,
- CMDF_ROUND_DOWN);
- err |= -EINVAL;
- }
- }
- } else {
- /* external trigger */
- /* should be level/edge, hi/lo specification here */
- /* should specify multiple external triggers */
- err |= comedi_check_trigger_arg_max(&cmd->scan_begin_arg, 9);
- }
-
- if (cmd->convert_src == TRIG_TIMER) {
- if (cmd->chanlist_len == 1) { /* no scanning */
- if (comedi_check_trigger_arg_min(&cmd->convert_arg,
- RTD_MAX_SPEED_1)) {
- rtd_ns_to_timer(&cmd->convert_arg,
- CMDF_ROUND_UP);
- err |= -EINVAL;
- }
- if (comedi_check_trigger_arg_max(&cmd->convert_arg,
- RTD_MIN_SPEED_1)) {
- rtd_ns_to_timer(&cmd->convert_arg,
- CMDF_ROUND_DOWN);
- err |= -EINVAL;
- }
- } else {
- if (comedi_check_trigger_arg_min(&cmd->convert_arg,
- RTD_MAX_SPEED)) {
- rtd_ns_to_timer(&cmd->convert_arg,
- CMDF_ROUND_UP);
- err |= -EINVAL;
- }
- if (comedi_check_trigger_arg_max(&cmd->convert_arg,
- RTD_MIN_SPEED)) {
- rtd_ns_to_timer(&cmd->convert_arg,
- CMDF_ROUND_DOWN);
- err |= -EINVAL;
- }
- }
- } else {
- /* external trigger */
- /* see above */
- err |= comedi_check_trigger_arg_max(&cmd->convert_arg, 9);
- }
-
- err |= comedi_check_trigger_arg_is(&cmd->scan_end_arg,
- cmd->chanlist_len);
-
- if (cmd->stop_src == TRIG_COUNT)
- err |= comedi_check_trigger_arg_min(&cmd->stop_arg, 1);
- else /* TRIG_NONE */
- err |= comedi_check_trigger_arg_is(&cmd->stop_arg, 0);
-
- if (err)
- return 3;
-
- /* step 4: fix up any arguments */
-
- if (cmd->scan_begin_src == TRIG_TIMER) {
- arg = cmd->scan_begin_arg;
- rtd_ns_to_timer(&arg, cmd->flags);
- err |= comedi_check_trigger_arg_is(&cmd->scan_begin_arg, arg);
- }
-
- if (cmd->convert_src == TRIG_TIMER) {
- arg = cmd->convert_arg;
- rtd_ns_to_timer(&arg, cmd->flags);
- err |= comedi_check_trigger_arg_is(&cmd->convert_arg, arg);
-
- if (cmd->scan_begin_src == TRIG_TIMER) {
- arg = cmd->convert_arg * cmd->scan_end_arg;
- err |= comedi_check_trigger_arg_min(&cmd->
- scan_begin_arg,
- arg);
- }
- }
-
- if (err)
- return 4;
-
- return 0;
-}
-
-static int rtd_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
-{
- struct rtd_private *devpriv = dev->private;
- struct comedi_cmd *cmd = &s->async->cmd;
- int timer;
-
- /* stop anything currently running */
- /* pacer stop source: SOFTWARE */
- writel(0, dev->mmio + LAS0_PACER_STOP);
- writel(0, dev->mmio + LAS0_PACER); /* stop pacer */
- writel(0, dev->mmio + LAS0_ADC_CONVERSION);
- writew(0, dev->mmio + LAS0_IT);
- writel(0, dev->mmio + LAS0_ADC_FIFO_CLEAR);
- writel(0, dev->mmio + LAS0_OVERRUN);
-
- /* start configuration */
- /* load channel list and reset CGT */
- rtd_load_channelgain_list(dev, cmd->chanlist_len, cmd->chanlist);
-
- /* setup the common case and override if needed */
- if (cmd->chanlist_len > 1) {
- /* pacer start source: SOFTWARE */
- writel(0, dev->mmio + LAS0_PACER_START);
- /* burst trigger source: PACER */
- writel(1, dev->mmio + LAS0_BURST_START);
- /* ADC conversion trigger source: BURST */
- writel(2, dev->mmio + LAS0_ADC_CONVERSION);
- } else { /* single channel */
- /* pacer start source: SOFTWARE */
- writel(0, dev->mmio + LAS0_PACER_START);
- /* ADC conversion trigger source: PACER */
- writel(1, dev->mmio + LAS0_ADC_CONVERSION);
- }
- writel((devpriv->fifosz / 2 - 1) & 0xffff, dev->mmio + LAS0_ACNT);
-
- if (cmd->scan_begin_src == TRIG_TIMER) {
- /* scan_begin_arg is in nanoseconds */
- /* find out how many samples to wait before transferring */
- if (cmd->flags & CMDF_WAKE_EOS) {
- /*
- * this may generate un-sustainable interrupt rates
- * the application is responsible for doing the
- * right thing
- */
- devpriv->xfer_count = cmd->chanlist_len;
- devpriv->flags |= SEND_EOS;
- } else {
- /* arrange to transfer data periodically */
- devpriv->xfer_count =
- (TRANS_TARGET_PERIOD * cmd->chanlist_len) /
- cmd->scan_begin_arg;
- if (devpriv->xfer_count < cmd->chanlist_len) {
- /* transfer after each scan (and avoid 0) */
- devpriv->xfer_count = cmd->chanlist_len;
- } else { /* make a multiple of scan length */
- devpriv->xfer_count =
- (devpriv->xfer_count +
- cmd->chanlist_len - 1)
- / cmd->chanlist_len;
- devpriv->xfer_count *= cmd->chanlist_len;
- }
- devpriv->flags |= SEND_EOS;
- }
- if (devpriv->xfer_count >= (devpriv->fifosz / 2)) {
- /* out of counter range, use 1/2 fifo instead */
- devpriv->xfer_count = 0;
- devpriv->flags &= ~SEND_EOS;
- } else {
- /* interrupt for each transfer */
- writel((devpriv->xfer_count - 1) & 0xffff,
- dev->mmio + LAS0_ACNT);
- }
- } else { /* unknown timing, just use 1/2 FIFO */
- devpriv->xfer_count = 0;
- devpriv->flags &= ~SEND_EOS;
- }
- /* pacer clock source: INTERNAL 8MHz */
- writel(1, dev->mmio + LAS0_PACER_SELECT);
- /* just interrupt, don't stop */
- writel(1, dev->mmio + LAS0_ACNT_STOP_ENABLE);
-
- /* BUG??? these look like enumerated values, but they are bit fields */
-
- /* First, setup when to stop */
- switch (cmd->stop_src) {
- case TRIG_COUNT: /* stop after N scans */
- devpriv->ai_count = cmd->stop_arg * cmd->chanlist_len;
- if ((devpriv->xfer_count > 0) &&
- (devpriv->xfer_count > devpriv->ai_count)) {
- devpriv->xfer_count = devpriv->ai_count;
- }
- break;
-
- case TRIG_NONE: /* stop when cancel is called */
- devpriv->ai_count = -1; /* read forever */
- break;
- }
-
- /* Scan timing */
- switch (cmd->scan_begin_src) {
- case TRIG_TIMER: /* periodic scanning */
- timer = rtd_ns_to_timer(&cmd->scan_begin_arg,
- CMDF_ROUND_NEAREST);
- /* set PACER clock */
- writel(timer & 0xffffff, dev->mmio + LAS0_PCLK);
-
- break;
-
- case TRIG_EXT:
- /* pacer start source: EXTERNAL */
- writel(1, dev->mmio + LAS0_PACER_START);
- break;
- }
-
- /* Sample timing within a scan */
- switch (cmd->convert_src) {
- case TRIG_TIMER: /* periodic */
- if (cmd->chanlist_len > 1) {
- /* only needed for multi-channel */
- timer = rtd_ns_to_timer(&cmd->convert_arg,
- CMDF_ROUND_NEAREST);
- /* setup BURST clock */
- writel(timer & 0x3ff, dev->mmio + LAS0_BCLK);
- }
-
- break;
-
- case TRIG_EXT: /* external */
- /* burst trigger source: EXTERNAL */
- writel(2, dev->mmio + LAS0_BURST_START);
- break;
- }
- /* end configuration */
-
- /*
- * This doesn't seem to work. There is no way to clear an interrupt
- * that the priority controller has queued!
- */
- writew(~0, dev->mmio + LAS0_CLEAR);
- readw(dev->mmio + LAS0_CLEAR);
-
- /* TODO: allow multiple interrupt sources */
- /* transfer every N samples */
- writew(IRQM_ADC_ABOUT_CNT, dev->mmio + LAS0_IT);
-
- /* BUG: start_src is ASSUMED to be TRIG_NOW */
- /* BUG? it seems like things are running before the "start" */
- readl(dev->mmio + LAS0_PACER); /* start pacer */
- return 0;
-}
-
-static int rtd_ai_cancel(struct comedi_device *dev, struct comedi_subdevice *s)
-{
- struct rtd_private *devpriv = dev->private;
-
- /* pacer stop source: SOFTWARE */
- writel(0, dev->mmio + LAS0_PACER_STOP);
- writel(0, dev->mmio + LAS0_PACER); /* stop pacer */
- writel(0, dev->mmio + LAS0_ADC_CONVERSION);
- writew(0, dev->mmio + LAS0_IT);
- devpriv->ai_count = 0; /* stop and don't transfer any more */
- writel(0, dev->mmio + LAS0_ADC_FIFO_CLEAR);
- return 0;
-}
-
-static int rtd_ao_eoc(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned long context)
-{
- unsigned int chan = CR_CHAN(insn->chanspec);
- unsigned int bit = (chan == 0) ? FS_DAC1_NOT_EMPTY : FS_DAC2_NOT_EMPTY;
- unsigned int status;
-
- status = readl(dev->mmio + LAS0_ADC);
- if (status & bit)
- return 0;
- return -EBUSY;
-}
-
-static int rtd_ao_insn_write(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct rtd_private *devpriv = dev->private;
- unsigned int chan = CR_CHAN(insn->chanspec);
- unsigned int range = CR_RANGE(insn->chanspec);
- int ret;
- int i;
-
- /* Configure the output range (table index matches the range values) */
- writew(range & 7, dev->mmio + LAS0_DAC_CTRL(chan));
-
- for (i = 0; i < insn->n; ++i) {
- unsigned int val = data[i];
-
- /* bipolar uses 2's complement values with an extended sign */
- if (comedi_range_is_bipolar(s, range)) {
- val = comedi_offset_munge(s, val);
- val |= (val & ((s->maxdata + 1) >> 1)) << 1;
- }
-
- /* shift the 12-bit data (+ sign) to match the register */
- val <<= 3;
-
- writew(val, devpriv->las1 + LAS1_DAC_FIFO(chan));
- writew(0, dev->mmio + LAS0_UPDATE_DAC(chan));
-
- ret = comedi_timeout(dev, s, insn, rtd_ao_eoc, 0);
- if (ret)
- return ret;
-
- s->readback[chan] = data[i];
- }
-
- return insn->n;
-}
-
-static int rtd_dio_insn_bits(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- if (comedi_dio_update_state(s, data))
- writew(s->state & 0xff, dev->mmio + LAS0_DIO0);
-
- data[1] = readw(dev->mmio + LAS0_DIO0) & 0xff;
-
- return insn->n;
-}
-
-static int rtd_dio_insn_config(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- int ret;
-
- ret = comedi_dio_insn_config(dev, s, insn, data, 0);
- if (ret)
- return ret;
-
- /* TODO support digital match interrupts and strobes */
-
- /* set direction */
- writew(0x01, dev->mmio + LAS0_DIO_STATUS);
- writew(s->io_bits & 0xff, dev->mmio + LAS0_DIO0_CTRL);
-
- /* clear interrupts */
- writew(0x00, dev->mmio + LAS0_DIO_STATUS);
-
- /* port1 can only be all input or all output */
-
- /* there are also 2 user input lines and 2 user output lines */
-
- return insn->n;
-}
-
-static int rtd_counter_insn_config(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct rtd_private *devpriv = dev->private;
- unsigned int chan = CR_CHAN(insn->chanspec);
- unsigned int max_src;
- unsigned int src;
-
- switch (data[0]) {
- case INSN_CONFIG_SET_GATE_SRC:
- /*
- * 8254 Timer/Counter gate sources:
- *
- * 0 = Not gated, free running (reset state)
- * 1 = Gated, off
- * 2 = Ext. TC Gate 1
- * 3 = Ext. TC Gate 2
- * 4 = Previous TC out (chan 1 and 2 only)
- */
- src = data[2];
- max_src = (chan == 0) ? 3 : 4;
- if (src > max_src)
- return -EINVAL;
-
- devpriv->timer_gate_src[chan] = src;
- writeb(src, dev->mmio + LAS0_8254_GATE_SEL(chan));
- break;
- case INSN_CONFIG_GET_GATE_SRC:
- data[2] = devpriv->timer_gate_src[chan];
- break;
- case INSN_CONFIG_SET_CLOCK_SRC:
- /*
- * 8254 Timer/Counter clock sources:
- *
- * 0 = 8 MHz (reset state)
- * 1 = Ext. TC Clock 1
- * 2 = Ext. TX Clock 2
- * 3 = Ext. Pacer Clock
- * 4 = Previous TC out (chan 1 and 2 only)
- * 5 = High-Speed Digital Input Sampling signal (chan 1 only)
- */
- src = data[1];
- switch (chan) {
- case 0:
- max_src = 3;
- break;
- case 1:
- max_src = 5;
- break;
- case 2:
- max_src = 4;
- break;
- default:
- return -EINVAL;
- }
- if (src > max_src)
- return -EINVAL;
-
- devpriv->timer_clk_src[chan] = src;
- writeb(src, dev->mmio + LAS0_8254_CLK_SEL(chan));
- break;
- case INSN_CONFIG_GET_CLOCK_SRC:
- src = devpriv->timer_clk_src[chan];
- data[1] = devpriv->timer_clk_src[chan];
- data[2] = (src == 0) ? RTD_CLOCK_BASE : 0;
- break;
- default:
- return -EINVAL;
- }
-
- return insn->n;
-}
-
-static void rtd_reset(struct comedi_device *dev)
-{
- struct rtd_private *devpriv = dev->private;
-
- writel(0, dev->mmio + LAS0_BOARD_RESET);
- usleep_range(100, 1000); /* needed? */
- writel(0, devpriv->lcfg + PLX_INTRCS_REG);
- writew(0, dev->mmio + LAS0_IT);
- writew(~0, dev->mmio + LAS0_CLEAR);
- readw(dev->mmio + LAS0_CLEAR);
-}
-
-/*
- * initialize board, per RTD spec
- * also, initialize shadow registers
- */
-static void rtd_init_board(struct comedi_device *dev)
-{
- rtd_reset(dev);
-
- writel(0, dev->mmio + LAS0_OVERRUN);
- writel(0, dev->mmio + LAS0_CGT_CLEAR);
- writel(0, dev->mmio + LAS0_ADC_FIFO_CLEAR);
- writel(0, dev->mmio + LAS0_DAC_RESET(0));
- writel(0, dev->mmio + LAS0_DAC_RESET(1));
- /* clear digital IO fifo */
- writew(0, dev->mmio + LAS0_DIO_STATUS);
- /* TODO: set user out source ??? */
-}
-
-/* The RTD driver does this */
-static void rtd_pci_latency_quirk(struct comedi_device *dev,
- struct pci_dev *pcidev)
-{
- unsigned char pci_latency;
-
- pci_read_config_byte(pcidev, PCI_LATENCY_TIMER, &pci_latency);
- if (pci_latency < 32) {
- dev_info(dev->class_dev,
- "PCI latency changed from %d to %d\n",
- pci_latency, 32);
- pci_write_config_byte(pcidev, PCI_LATENCY_TIMER, 32);
- }
-}
-
-static int rtd_auto_attach(struct comedi_device *dev,
- unsigned long context)
-{
- struct pci_dev *pcidev = comedi_to_pci_dev(dev);
- const struct rtd_boardinfo *board = NULL;
- struct rtd_private *devpriv;
- struct comedi_subdevice *s;
- int ret;
-
- if (context < ARRAY_SIZE(rtd520_boards))
- board = &rtd520_boards[context];
- if (!board)
- return -ENODEV;
- dev->board_ptr = board;
- dev->board_name = board->name;
-
- devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
- if (!devpriv)
- return -ENOMEM;
-
- ret = comedi_pci_enable(dev);
- if (ret)
- return ret;
-
- dev->mmio = pci_ioremap_bar(pcidev, 2);
- devpriv->las1 = pci_ioremap_bar(pcidev, 3);
- devpriv->lcfg = pci_ioremap_bar(pcidev, 0);
- if (!dev->mmio || !devpriv->las1 || !devpriv->lcfg)
- return -ENOMEM;
-
- rtd_pci_latency_quirk(dev, pcidev);
-
- if (pcidev->irq) {
- ret = request_irq(pcidev->irq, rtd_interrupt, IRQF_SHARED,
- dev->board_name, dev);
- if (ret == 0)
- dev->irq = pcidev->irq;
- }
-
- ret = comedi_alloc_subdevices(dev, 4);
- if (ret)
- return ret;
-
- s = &dev->subdevices[0];
- /* analog input subdevice */
- s->type = COMEDI_SUBD_AI;
- s->subdev_flags = SDF_READABLE | SDF_GROUND | SDF_COMMON | SDF_DIFF;
- s->n_chan = 16;
- s->maxdata = 0x0fff;
- s->range_table = board->ai_range;
- s->len_chanlist = RTD_MAX_CHANLIST;
- s->insn_read = rtd_ai_rinsn;
- if (dev->irq) {
- dev->read_subdev = s;
- s->subdev_flags |= SDF_CMD_READ;
- s->do_cmd = rtd_ai_cmd;
- s->do_cmdtest = rtd_ai_cmdtest;
- s->cancel = rtd_ai_cancel;
- }
-
- s = &dev->subdevices[1];
- /* analog output subdevice */
- s->type = COMEDI_SUBD_AO;
- s->subdev_flags = SDF_WRITABLE;
- s->n_chan = 2;
- s->maxdata = 0x0fff;
- s->range_table = &rtd_ao_range;
- s->insn_write = rtd_ao_insn_write;
-
- ret = comedi_alloc_subdev_readback(s);
- if (ret)
- return ret;
-
- s = &dev->subdevices[2];
- /* digital i/o subdevice */
- s->type = COMEDI_SUBD_DIO;
- s->subdev_flags = SDF_READABLE | SDF_WRITABLE;
- /* we only support port 0 right now. Ignoring port 1 and user IO */
- s->n_chan = 8;
- s->maxdata = 1;
- s->range_table = &range_digital;
- s->insn_bits = rtd_dio_insn_bits;
- s->insn_config = rtd_dio_insn_config;
-
- /* 8254 Timer/Counter subdevice */
- s = &dev->subdevices[3];
- dev->pacer = comedi_8254_mm_init(dev->mmio + LAS0_8254_TIMER_BASE,
- RTD_CLOCK_BASE, I8254_IO8, 2);
- if (!dev->pacer)
- return -ENOMEM;
-
- comedi_8254_subdevice_init(s, dev->pacer);
- dev->pacer->insn_config = rtd_counter_insn_config;
-
- rtd_init_board(dev);
-
- ret = rtd520_probe_fifo_depth(dev);
- if (ret < 0)
- return ret;
- devpriv->fifosz = ret;
-
- if (dev->irq)
- writel(ICS_PIE | ICS_PLIE, devpriv->lcfg + PLX_INTRCS_REG);
-
- return 0;
-}
-
-static void rtd_detach(struct comedi_device *dev)
-{
- struct rtd_private *devpriv = dev->private;
-
- if (devpriv) {
- /* Shut down any board ops by resetting it */
- if (dev->mmio && devpriv->lcfg)
- rtd_reset(dev);
- if (dev->irq)
- free_irq(dev->irq, dev);
- if (dev->mmio)
- iounmap(dev->mmio);
- if (devpriv->las1)
- iounmap(devpriv->las1);
- if (devpriv->lcfg)
- iounmap(devpriv->lcfg);
- }
- comedi_pci_disable(dev);
-}
-
-static struct comedi_driver rtd520_driver = {
- .driver_name = "rtd520",
- .module = THIS_MODULE,
- .auto_attach = rtd_auto_attach,
- .detach = rtd_detach,
-};
-
-static int rtd520_pci_probe(struct pci_dev *dev,
- const struct pci_device_id *id)
-{
- return comedi_pci_auto_config(dev, &rtd520_driver, id->driver_data);
-}
-
-static const struct pci_device_id rtd520_pci_table[] = {
- { PCI_VDEVICE(RTD, 0x7520), BOARD_DM7520 },
- { PCI_VDEVICE(RTD, 0x4520), BOARD_PCI4520 },
- { 0 }
-};
-MODULE_DEVICE_TABLE(pci, rtd520_pci_table);
-
-static struct pci_driver rtd520_pci_driver = {
- .name = "rtd520",
- .id_table = rtd520_pci_table,
- .probe = rtd520_pci_probe,
- .remove = comedi_pci_auto_unconfig,
-};
-module_comedi_pci_driver(rtd520_driver, rtd520_pci_driver);
-
-MODULE_AUTHOR("Comedi http://www.comedi.org");
-MODULE_DESCRIPTION("Comedi low-level driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/rti800.c b/drivers/staging/comedi/drivers/rti800.c
deleted file mode 100644
index cd61d2645af4..000000000000
--- a/drivers/staging/comedi/drivers/rti800.c
+++ /dev/null
@@ -1,366 +0,0 @@
-/*
- * comedi/drivers/rti800.c
- * Hardware driver for Analog Devices RTI-800/815 board
- *
- * COMEDI - Linux Control and Measurement Device Interface
- * Copyright (C) 1998 David A. Schleef <ds@schleef.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-/*
- * Driver: rti800
- * Description: Analog Devices RTI-800/815
- * Devices: [Analog Devices] RTI-800 (rti800), RTI-815 (rti815)
- * Author: David A. Schleef <ds@schleef.org>
- * Status: unknown
- * Updated: Fri, 05 Sep 2008 14:50:44 +0100
- *
- * Configuration options:
- * [0] - I/O port base address
- * [1] - IRQ (not supported / unused)
- * [2] - A/D mux/reference (number of channels)
- * 0 = differential
- * 1 = pseudodifferential (common)
- * 2 = single-ended
- * [3] - A/D range
- * 0 = [-10,10]
- * 1 = [-5,5]
- * 2 = [0,10]
- * [4] - A/D encoding
- * 0 = two's complement
- * 1 = straight binary
- * [5] - DAC 0 range
- * 0 = [-10,10]
- * 1 = [0,10]
- * [6] - DAC 0 encoding
- * 0 = two's complement
- * 1 = straight binary
- * [7] - DAC 1 range (same as DAC 0)
- * [8] - DAC 1 encoding (same as DAC 0)
- */
-
-#include <linux/module.h>
-#include <linux/delay.h>
-#include <linux/interrupt.h>
-#include "../comedidev.h"
-
-/*
- * Register map
- */
-#define RTI800_CSR 0x00
-#define RTI800_CSR_BUSY BIT(7)
-#define RTI800_CSR_DONE BIT(6)
-#define RTI800_CSR_OVERRUN BIT(5)
-#define RTI800_CSR_TCR BIT(4)
-#define RTI800_CSR_DMA_ENAB BIT(3)
-#define RTI800_CSR_INTR_TC BIT(2)
-#define RTI800_CSR_INTR_EC BIT(1)
-#define RTI800_CSR_INTR_OVRN BIT(0)
-#define RTI800_MUXGAIN 0x01
-#define RTI800_CONVERT 0x02
-#define RTI800_ADCLO 0x03
-#define RTI800_ADCHI 0x04
-#define RTI800_DAC0LO 0x05
-#define RTI800_DAC0HI 0x06
-#define RTI800_DAC1LO 0x07
-#define RTI800_DAC1HI 0x08
-#define RTI800_CLRFLAGS 0x09
-#define RTI800_DI 0x0a
-#define RTI800_DO 0x0b
-#define RTI800_9513A_DATA 0x0c
-#define RTI800_9513A_CNTRL 0x0d
-#define RTI800_9513A_STATUS 0x0d
-
-static const struct comedi_lrange range_rti800_ai_10_bipolar = {
- 4, {
- BIP_RANGE(10),
- BIP_RANGE(1),
- BIP_RANGE(0.1),
- BIP_RANGE(0.02)
- }
-};
-
-static const struct comedi_lrange range_rti800_ai_5_bipolar = {
- 4, {
- BIP_RANGE(5),
- BIP_RANGE(0.5),
- BIP_RANGE(0.05),
- BIP_RANGE(0.01)
- }
-};
-
-static const struct comedi_lrange range_rti800_ai_unipolar = {
- 4, {
- UNI_RANGE(10),
- UNI_RANGE(1),
- UNI_RANGE(0.1),
- UNI_RANGE(0.02)
- }
-};
-
-static const struct comedi_lrange *const rti800_ai_ranges[] = {
- &range_rti800_ai_10_bipolar,
- &range_rti800_ai_5_bipolar,
- &range_rti800_ai_unipolar,
-};
-
-static const struct comedi_lrange *const rti800_ao_ranges[] = {
- &range_bipolar10,
- &range_unipolar10,
-};
-
-struct rti800_board {
- const char *name;
- int has_ao;
-};
-
-static const struct rti800_board rti800_boardtypes[] = {
- {
- .name = "rti800",
- }, {
- .name = "rti815",
- .has_ao = 1,
- },
-};
-
-struct rti800_private {
- bool adc_2comp;
- bool dac_2comp[2];
- const struct comedi_lrange *ao_range_type_list[2];
- unsigned char muxgain_bits;
-};
-
-static int rti800_ai_eoc(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned long context)
-{
- unsigned char status;
-
- status = inb(dev->iobase + RTI800_CSR);
- if (status & RTI800_CSR_OVERRUN) {
- outb(0, dev->iobase + RTI800_CLRFLAGS);
- return -EOVERFLOW;
- }
- if (status & RTI800_CSR_DONE)
- return 0;
- return -EBUSY;
-}
-
-static int rti800_ai_insn_read(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct rti800_private *devpriv = dev->private;
- unsigned int chan = CR_CHAN(insn->chanspec);
- unsigned int gain = CR_RANGE(insn->chanspec);
- unsigned char muxgain_bits;
- int ret;
- int i;
-
- inb(dev->iobase + RTI800_ADCHI);
- outb(0, dev->iobase + RTI800_CLRFLAGS);
-
- muxgain_bits = chan | (gain << 5);
- if (muxgain_bits != devpriv->muxgain_bits) {
- devpriv->muxgain_bits = muxgain_bits;
- outb(devpriv->muxgain_bits, dev->iobase + RTI800_MUXGAIN);
- /*
- * Without a delay here, the RTI_CSR_OVERRUN bit
- * gets set, and you will have an error.
- */
- if (insn->n > 0) {
- int delay = (gain == 0) ? 10 :
- (gain == 1) ? 20 :
- (gain == 2) ? 40 : 80;
-
- udelay(delay);
- }
- }
-
- for (i = 0; i < insn->n; i++) {
- unsigned int val;
-
- outb(0, dev->iobase + RTI800_CONVERT);
-
- ret = comedi_timeout(dev, s, insn, rti800_ai_eoc, 0);
- if (ret)
- return ret;
-
- val = inb(dev->iobase + RTI800_ADCLO);
- val |= (inb(dev->iobase + RTI800_ADCHI) & 0xf) << 8;
-
- if (devpriv->adc_2comp)
- val = comedi_offset_munge(s, val);
-
- data[i] = val;
- }
-
- return insn->n;
-}
-
-static int rti800_ao_insn_write(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct rti800_private *devpriv = dev->private;
- unsigned int chan = CR_CHAN(insn->chanspec);
- int reg_lo = chan ? RTI800_DAC1LO : RTI800_DAC0LO;
- int reg_hi = chan ? RTI800_DAC1HI : RTI800_DAC0HI;
- int i;
-
- for (i = 0; i < insn->n; i++) {
- unsigned int val = data[i];
-
- s->readback[chan] = val;
-
- if (devpriv->dac_2comp[chan])
- val = comedi_offset_munge(s, val);
-
- outb(val & 0xff, dev->iobase + reg_lo);
- outb((val >> 8) & 0xff, dev->iobase + reg_hi);
- }
-
- return insn->n;
-}
-
-static int rti800_di_insn_bits(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- data[1] = inb(dev->iobase + RTI800_DI);
- return insn->n;
-}
-
-static int rti800_do_insn_bits(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- if (comedi_dio_update_state(s, data)) {
- /* Outputs are inverted... */
- outb(s->state ^ 0xff, dev->iobase + RTI800_DO);
- }
-
- data[1] = s->state;
-
- return insn->n;
-}
-
-static int rti800_attach(struct comedi_device *dev, struct comedi_devconfig *it)
-{
- const struct rti800_board *board = dev->board_ptr;
- struct rti800_private *devpriv;
- struct comedi_subdevice *s;
- int ret;
-
- ret = comedi_request_region(dev, it->options[0], 0x10);
- if (ret)
- return ret;
-
- outb(0, dev->iobase + RTI800_CSR);
- inb(dev->iobase + RTI800_ADCHI);
- outb(0, dev->iobase + RTI800_CLRFLAGS);
-
- devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
- if (!devpriv)
- return -ENOMEM;
-
- devpriv->adc_2comp = (it->options[4] == 0);
- devpriv->dac_2comp[0] = (it->options[6] == 0);
- devpriv->dac_2comp[1] = (it->options[8] == 0);
- /* invalid, forces the MUXGAIN register to be set when first used */
- devpriv->muxgain_bits = 0xff;
-
- ret = comedi_alloc_subdevices(dev, 4);
- if (ret)
- return ret;
-
- s = &dev->subdevices[0];
- /* ai subdevice */
- s->type = COMEDI_SUBD_AI;
- s->subdev_flags = SDF_READABLE | SDF_GROUND;
- s->n_chan = (it->options[2] ? 16 : 8);
- s->insn_read = rti800_ai_insn_read;
- s->maxdata = 0x0fff;
- s->range_table = (it->options[3] < ARRAY_SIZE(rti800_ai_ranges))
- ? rti800_ai_ranges[it->options[3]]
- : &range_unknown;
-
- s = &dev->subdevices[1];
- if (board->has_ao) {
- /* ao subdevice (only on rti815) */
- s->type = COMEDI_SUBD_AO;
- s->subdev_flags = SDF_WRITABLE;
- s->n_chan = 2;
- s->maxdata = 0x0fff;
- s->range_table_list = devpriv->ao_range_type_list;
- devpriv->ao_range_type_list[0] =
- (it->options[5] < ARRAY_SIZE(rti800_ao_ranges))
- ? rti800_ao_ranges[it->options[5]]
- : &range_unknown;
- devpriv->ao_range_type_list[1] =
- (it->options[7] < ARRAY_SIZE(rti800_ao_ranges))
- ? rti800_ao_ranges[it->options[7]]
- : &range_unknown;
- s->insn_write = rti800_ao_insn_write;
-
- ret = comedi_alloc_subdev_readback(s);
- if (ret)
- return ret;
- } else {
- s->type = COMEDI_SUBD_UNUSED;
- }
-
- s = &dev->subdevices[2];
- /* di */
- s->type = COMEDI_SUBD_DI;
- s->subdev_flags = SDF_READABLE;
- s->n_chan = 8;
- s->insn_bits = rti800_di_insn_bits;
- s->maxdata = 1;
- s->range_table = &range_digital;
-
- s = &dev->subdevices[3];
- /* do */
- s->type = COMEDI_SUBD_DO;
- s->subdev_flags = SDF_WRITABLE;
- s->n_chan = 8;
- s->insn_bits = rti800_do_insn_bits;
- s->maxdata = 1;
- s->range_table = &range_digital;
-
- /*
- * There is also an Am9513 timer on these boards. This subdevice
- * is not currently supported.
- */
-
- return 0;
-}
-
-static struct comedi_driver rti800_driver = {
- .driver_name = "rti800",
- .module = THIS_MODULE,
- .attach = rti800_attach,
- .detach = comedi_legacy_detach,
- .num_names = ARRAY_SIZE(rti800_boardtypes),
- .board_name = &rti800_boardtypes[0].name,
- .offset = sizeof(struct rti800_board),
-};
-module_comedi_driver(rti800_driver);
-
-MODULE_DESCRIPTION("Comedi: RTI-800 Multifunction Analog/Digital board");
-MODULE_AUTHOR("Comedi http://www.comedi.org");
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/rti802.c b/drivers/staging/comedi/drivers/rti802.c
deleted file mode 100644
index 6db58fcfd496..000000000000
--- a/drivers/staging/comedi/drivers/rti802.c
+++ /dev/null
@@ -1,129 +0,0 @@
-/*
- * rti802.c
- * Comedi driver for Analog Devices RTI-802 board
- *
- * COMEDI - Linux Control and Measurement Device Interface
- * Copyright (C) 1999 Anders Blomdell <anders.blomdell@control.lth.se>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-/*
- * Driver: rti802
- * Description: Analog Devices RTI-802
- * Author: Anders Blomdell <anders.blomdell@control.lth.se>
- * Devices: [Analog Devices] RTI-802 (rti802)
- * Status: works
- *
- * Configuration Options:
- * [0] - i/o base
- * [1] - unused
- * [2,4,6,8,10,12,14,16] - dac#[0-7] 0=two's comp, 1=straight
- * [3,5,7,9,11,13,15,17] - dac#[0-7] 0=bipolar, 1=unipolar
- */
-
-#include <linux/module.h>
-#include "../comedidev.h"
-
-/*
- * Register I/O map
- */
-#define RTI802_SELECT 0x00
-#define RTI802_DATALOW 0x01
-#define RTI802_DATAHIGH 0x02
-
-struct rti802_private {
- enum {
- dac_2comp, dac_straight
- } dac_coding[8];
- const struct comedi_lrange *range_type_list[8];
-};
-
-static int rti802_ao_insn_write(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct rti802_private *devpriv = dev->private;
- unsigned int chan = CR_CHAN(insn->chanspec);
- int i;
-
- outb(chan, dev->iobase + RTI802_SELECT);
-
- for (i = 0; i < insn->n; i++) {
- unsigned int val = data[i];
-
- s->readback[chan] = val;
-
- /* munge offset binary to two's complement if needed */
- if (devpriv->dac_coding[chan] == dac_2comp)
- val = comedi_offset_munge(s, val);
-
- outb(val & 0xff, dev->iobase + RTI802_DATALOW);
- outb((val >> 8) & 0xff, dev->iobase + RTI802_DATAHIGH);
- }
-
- return insn->n;
-}
-
-static int rti802_attach(struct comedi_device *dev, struct comedi_devconfig *it)
-{
- struct rti802_private *devpriv;
- struct comedi_subdevice *s;
- int i;
- int ret;
-
- ret = comedi_request_region(dev, it->options[0], 0x04);
- if (ret)
- return ret;
-
- devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
- if (!devpriv)
- return -ENOMEM;
-
- ret = comedi_alloc_subdevices(dev, 1);
- if (ret)
- return ret;
-
- /* Analog Output subdevice */
- s = &dev->subdevices[0];
- s->type = COMEDI_SUBD_AO;
- s->subdev_flags = SDF_WRITABLE;
- s->maxdata = 0xfff;
- s->n_chan = 8;
- s->insn_write = rti802_ao_insn_write;
-
- ret = comedi_alloc_subdev_readback(s);
- if (ret)
- return ret;
-
- s->range_table_list = devpriv->range_type_list;
- for (i = 0; i < 8; i++) {
- devpriv->dac_coding[i] = (it->options[3 + 2 * i])
- ? (dac_straight) : (dac_2comp);
- devpriv->range_type_list[i] = (it->options[2 + 2 * i])
- ? &range_unipolar10 : &range_bipolar10;
- }
-
- return 0;
-}
-
-static struct comedi_driver rti802_driver = {
- .driver_name = "rti802",
- .module = THIS_MODULE,
- .attach = rti802_attach,
- .detach = comedi_legacy_detach,
-};
-module_comedi_driver(rti802_driver);
-
-MODULE_AUTHOR("Comedi http://www.comedi.org");
-MODULE_DESCRIPTION("Comedi driver for Analog Devices RTI-802 board");
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/s526.c b/drivers/staging/comedi/drivers/s526.c
deleted file mode 100644
index d70c97947627..000000000000
--- a/drivers/staging/comedi/drivers/s526.c
+++ /dev/null
@@ -1,576 +0,0 @@
-/*
- * s526.c
- * Sensoray s526 Comedi driver
- *
- * COMEDI - Linux Control and Measurement Device Interface
- * Copyright (C) 2000 David A. Schleef <ds@schleef.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-/*
- * Driver: s526
- * Description: Sensoray 526 driver
- * Devices: [Sensoray] 526 (s526)
- * Author: Richie
- * Everett Wang <everett.wang@everteq.com>
- * Updated: Thu, 14 Sep. 2006
- * Status: experimental
- *
- * Encoder works
- * Analog input works
- * Analog output works
- * PWM output works
- * Commands are not supported yet.
- *
- * Configuration Options:
- * [0] - I/O port base address
- */
-
-#include <linux/module.h>
-#include "../comedidev.h"
-#include <asm/byteorder.h>
-
-/*
- * Register I/O map
- */
-#define S526_TIMER_REG 0x00
-#define S526_TIMER_LOAD(x) (((x) & 0xff) << 8)
-#define S526_TIMER_MODE ((x) << 1)
-#define S526_TIMER_MANUAL S526_TIMER_MODE(0)
-#define S526_TIMER_AUTO S526_TIMER_MODE(1)
-#define S526_TIMER_RESTART BIT(0)
-#define S526_WDOG_REG 0x02
-#define S526_WDOG_INVERTED BIT(4)
-#define S526_WDOG_ENA BIT(3)
-#define S526_WDOG_INTERVAL(x) (((x) & 0x7) << 0)
-#define S526_AO_CTRL_REG 0x04
-#define S526_AO_CTRL_RESET BIT(3)
-#define S526_AO_CTRL_CHAN(x) (((x) & 0x3) << 1)
-#define S526_AO_CTRL_START BIT(0)
-#define S526_AI_CTRL_REG 0x06
-#define S526_AI_CTRL_DELAY BIT(15)
-#define S526_AI_CTRL_CONV(x) (1 << (5 + ((x) & 0x9)))
-#define S526_AI_CTRL_READ(x) (((x) & 0xf) << 1)
-#define S526_AI_CTRL_START BIT(0)
-#define S526_AO_REG 0x08
-#define S526_AI_REG 0x08
-#define S526_DIO_CTRL_REG 0x0a
-#define S526_DIO_CTRL_DIO3_NEG BIT(15) /* irq on DIO3 neg/pos edge */
-#define S526_DIO_CTRL_DIO2_NEG BIT(14) /* irq on DIO2 neg/pos edge */
-#define S526_DIO_CTRL_DIO1_NEG BIT(13) /* irq on DIO1 neg/pos edge */
-#define S526_DIO_CTRL_DIO0_NEG BIT(12) /* irq on DIO0 neg/pos edge */
-#define S526_DIO_CTRL_GRP2_OUT BIT(11)
-#define S526_DIO_CTRL_GRP1_OUT BIT(10)
-#define S526_DIO_CTRL_GRP2_NEG BIT(8) /* irq on DIO[4-7] neg/pos edge */
-#define S526_INT_ENA_REG 0x0c
-#define S526_INT_STATUS_REG 0x0e
-#define S526_INT_DIO(x) BIT(8 + ((x) & 0x7))
-#define S526_INT_EEPROM BIT(7) /* status only */
-#define S526_INT_CNTR(x) BIT(3 + (3 - ((x) & 0x3)))
-#define S526_INT_AI BIT(2)
-#define S526_INT_AO BIT(1)
-#define S526_INT_TIMER BIT(0)
-#define S526_MISC_REG 0x10
-#define S526_MISC_LED_OFF BIT(0)
-#define S526_GPCT_LSB_REG(x) (0x12 + ((x) * 8))
-#define S526_GPCT_MSB_REG(x) (0x14 + ((x) * 8))
-#define S526_GPCT_MODE_REG(x) (0x16 + ((x) * 8))
-#define S526_GPCT_CTRL_REG(x) (0x18 + ((x) * 8))
-#define S526_EEPROM_DATA_REG 0x32
-#define S526_EEPROM_CTRL_REG 0x34
-#define S526_EEPROM_CTRL_ADDR(x) (((x) & 0x3f) << 3)
-#define S526_EEPROM_CTRL(x) (((x) & 0x3) << 1)
-#define S526_EEPROM_CTRL_READ S526_EEPROM_CTRL(2)
-#define S526_EEPROM_CTRL_START BIT(0)
-
-struct counter_mode_register_t {
-#if defined(__LITTLE_ENDIAN_BITFIELD)
- unsigned short coutSource:1;
- unsigned short coutPolarity:1;
- unsigned short autoLoadResetRcap:3;
- unsigned short hwCtEnableSource:2;
- unsigned short ctEnableCtrl:2;
- unsigned short clockSource:2;
- unsigned short countDir:1;
- unsigned short countDirCtrl:1;
- unsigned short outputRegLatchCtrl:1;
- unsigned short preloadRegSel:1;
- unsigned short reserved:1;
- #elif defined(__BIG_ENDIAN_BITFIELD)
- unsigned short reserved:1;
- unsigned short preloadRegSel:1;
- unsigned short outputRegLatchCtrl:1;
- unsigned short countDirCtrl:1;
- unsigned short countDir:1;
- unsigned short clockSource:2;
- unsigned short ctEnableCtrl:2;
- unsigned short hwCtEnableSource:2;
- unsigned short autoLoadResetRcap:3;
- unsigned short coutPolarity:1;
- unsigned short coutSource:1;
-#else
-#error Unknown bit field order
-#endif
-};
-
-union cmReg {
- struct counter_mode_register_t reg;
- unsigned short value;
-};
-
-struct s526_private {
- unsigned int gpct_config[4];
- unsigned short ai_ctrl;
-};
-
-static void s526_gpct_write(struct comedi_device *dev,
- unsigned int chan, unsigned int val)
-{
- /* write high word then low word */
- outw((val >> 16) & 0xffff, dev->iobase + S526_GPCT_MSB_REG(chan));
- outw(val & 0xffff, dev->iobase + S526_GPCT_LSB_REG(chan));
-}
-
-static unsigned int s526_gpct_read(struct comedi_device *dev,
- unsigned int chan)
-{
- unsigned int val;
-
- /* read the low word then high word */
- val = inw(dev->iobase + S526_GPCT_LSB_REG(chan)) & 0xffff;
- val |= (inw(dev->iobase + S526_GPCT_MSB_REG(chan)) & 0xff) << 16;
-
- return val;
-}
-
-static int s526_gpct_rinsn(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- unsigned int chan = CR_CHAN(insn->chanspec);
- int i;
-
- for (i = 0; i < insn->n; i++)
- data[i] = s526_gpct_read(dev, chan);
-
- return insn->n;
-}
-
-static int s526_gpct_insn_config(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct s526_private *devpriv = dev->private;
- unsigned int chan = CR_CHAN(insn->chanspec);
- unsigned int val;
- union cmReg cmReg;
-
- /*
- * Check what type of Counter the user requested
- * data[0] contains the Application type
- */
- switch (data[0]) {
- case INSN_CONFIG_GPCT_QUADRATURE_ENCODER:
- /*
- * data[0]: Application Type
- * data[1]: Counter Mode Register Value
- * data[2]: Pre-load Register Value
- * data[3]: Conter Control Register
- */
- devpriv->gpct_config[chan] = data[0];
-
-#if 1
- /* Set Counter Mode Register */
- cmReg.value = data[1] & 0xffff;
- outw(cmReg.value, dev->iobase + S526_GPCT_MODE_REG(chan));
-
- /* Reset the counter if it is software preload */
- if (cmReg.reg.autoLoadResetRcap == 0) {
- /* Reset the counter */
- outw(0x8000, dev->iobase + S526_GPCT_CTRL_REG(chan));
- /* Load the counter from PR0
- * outw(0x4000, dev->iobase + S526_GPCT_CTRL_REG(chan));
- */
- }
-#else
- /* 0 quadrature, 1 software control */
- cmReg.reg.countDirCtrl = 0;
-
- /* data[1] contains GPCT_X1, GPCT_X2 or GPCT_X4 */
- if (data[1] == GPCT_X2)
- cmReg.reg.clockSource = 1;
- else if (data[1] == GPCT_X4)
- cmReg.reg.clockSource = 2;
- else
- cmReg.reg.clockSource = 0;
-
- /* When to take into account the indexpulse: */
- /*
- * if (data[2] == GPCT_IndexPhaseLowLow) {
- * } else if (data[2] == GPCT_IndexPhaseLowHigh) {
- * } else if (data[2] == GPCT_IndexPhaseHighLow) {
- * } else if (data[2] == GPCT_IndexPhaseHighHigh) {
- * }
- */
- /* Take into account the index pulse? */
- if (data[3] == GPCT_RESET_COUNTER_ON_INDEX)
- /* Auto load with INDEX^ */
- cmReg.reg.autoLoadResetRcap = 4;
-
- /* Set Counter Mode Register */
- cmReg.value = data[1] & 0xffff;
- outw(cmReg.value, dev->iobase + S526_GPCT_MODE_REG(chan));
-
- /* Load the pre-load register */
- s526_gpct_write(dev, chan, data[2]);
-
- /* Write the Counter Control Register */
- if (data[3])
- outw(data[3] & 0xffff,
- dev->iobase + S526_GPCT_CTRL_REG(chan));
-
- /* Reset the counter if it is software preload */
- if (cmReg.reg.autoLoadResetRcap == 0) {
- /* Reset the counter */
- outw(0x8000, dev->iobase + S526_GPCT_CTRL_REG(chan));
- /* Load the counter from PR0 */
- outw(0x4000, dev->iobase + S526_GPCT_CTRL_REG(chan));
- }
-#endif
- break;
-
- case INSN_CONFIG_GPCT_SINGLE_PULSE_GENERATOR:
- /*
- * data[0]: Application Type
- * data[1]: Counter Mode Register Value
- * data[2]: Pre-load Register 0 Value
- * data[3]: Pre-load Register 1 Value
- * data[4]: Conter Control Register
- */
- devpriv->gpct_config[chan] = data[0];
-
- /* Set Counter Mode Register */
- cmReg.value = data[1] & 0xffff;
- cmReg.reg.preloadRegSel = 0; /* PR0 */
- outw(cmReg.value, dev->iobase + S526_GPCT_MODE_REG(chan));
-
- /* Load the pre-load register 0 */
- s526_gpct_write(dev, chan, data[2]);
-
- /* Set Counter Mode Register */
- cmReg.value = data[1] & 0xffff;
- cmReg.reg.preloadRegSel = 1; /* PR1 */
- outw(cmReg.value, dev->iobase + S526_GPCT_MODE_REG(chan));
-
- /* Load the pre-load register 1 */
- s526_gpct_write(dev, chan, data[3]);
-
- /* Write the Counter Control Register */
- if (data[4]) {
- val = data[4] & 0xffff;
- outw(val, dev->iobase + S526_GPCT_CTRL_REG(chan));
- }
- break;
-
- case INSN_CONFIG_GPCT_PULSE_TRAIN_GENERATOR:
- /*
- * data[0]: Application Type
- * data[1]: Counter Mode Register Value
- * data[2]: Pre-load Register 0 Value
- * data[3]: Pre-load Register 1 Value
- * data[4]: Conter Control Register
- */
- devpriv->gpct_config[chan] = data[0];
-
- /* Set Counter Mode Register */
- cmReg.value = data[1] & 0xffff;
- cmReg.reg.preloadRegSel = 0; /* PR0 */
- outw(cmReg.value, dev->iobase + S526_GPCT_MODE_REG(chan));
-
- /* Load the pre-load register 0 */
- s526_gpct_write(dev, chan, data[2]);
-
- /* Set Counter Mode Register */
- cmReg.value = data[1] & 0xffff;
- cmReg.reg.preloadRegSel = 1; /* PR1 */
- outw(cmReg.value, dev->iobase + S526_GPCT_MODE_REG(chan));
-
- /* Load the pre-load register 1 */
- s526_gpct_write(dev, chan, data[3]);
-
- /* Write the Counter Control Register */
- if (data[4]) {
- val = data[4] & 0xffff;
- outw(val, dev->iobase + S526_GPCT_CTRL_REG(chan));
- }
- break;
-
- default:
- return -EINVAL;
- }
-
- return insn->n;
-}
-
-static int s526_gpct_winsn(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct s526_private *devpriv = dev->private;
- unsigned int chan = CR_CHAN(insn->chanspec);
-
- inw(dev->iobase + S526_GPCT_MODE_REG(chan)); /* Is this required? */
-
- /* Check what Application of Counter this channel is configured for */
- switch (devpriv->gpct_config[chan]) {
- case INSN_CONFIG_GPCT_PULSE_TRAIN_GENERATOR:
- /*
- * data[0] contains the PULSE_WIDTH
- * data[1] contains the PULSE_PERIOD
- * @pre PULSE_PERIOD > PULSE_WIDTH > 0
- * The above periods must be expressed as a multiple of the
- * pulse frequency on the selected source
- */
- if ((data[1] <= data[0]) || !data[0])
- return -EINVAL;
-
- /* Fall thru to write the PULSE_WIDTH */
-
- case INSN_CONFIG_GPCT_QUADRATURE_ENCODER:
- case INSN_CONFIG_GPCT_SINGLE_PULSE_GENERATOR:
- s526_gpct_write(dev, chan, data[0]);
- break;
-
- default:
- return -EINVAL;
- }
-
- return insn->n;
-}
-
-static int s526_eoc(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned long context)
-{
- unsigned int status;
-
- status = inw(dev->iobase + S526_INT_STATUS_REG);
- if (status & context) {
- /* we got our eoc event, clear it */
- outw(context, dev->iobase + S526_INT_STATUS_REG);
- return 0;
- }
- return -EBUSY;
-}
-
-static int s526_ai_insn_read(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct s526_private *devpriv = dev->private;
- unsigned int chan = CR_CHAN(insn->chanspec);
- unsigned int ctrl;
- unsigned int val;
- int ret;
- int i;
-
- ctrl = S526_AI_CTRL_CONV(chan) | S526_AI_CTRL_READ(chan) |
- S526_AI_CTRL_START;
- if (ctrl != devpriv->ai_ctrl) {
- /*
- * The multiplexor needs to change, enable the 15us
- * delay for the first sample.
- */
- devpriv->ai_ctrl = ctrl;
- ctrl |= S526_AI_CTRL_DELAY;
- }
-
- for (i = 0; i < insn->n; i++) {
- /* trigger conversion */
- outw(ctrl, dev->iobase + S526_AI_CTRL_REG);
- ctrl &= ~S526_AI_CTRL_DELAY;
-
- /* wait for conversion to end */
- ret = comedi_timeout(dev, s, insn, s526_eoc, S526_INT_AI);
- if (ret)
- return ret;
-
- val = inw(dev->iobase + S526_AI_REG);
- data[i] = comedi_offset_munge(s, val);
- }
-
- return insn->n;
-}
-
-static int s526_ao_insn_write(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- unsigned int chan = CR_CHAN(insn->chanspec);
- unsigned int ctrl = S526_AO_CTRL_CHAN(chan);
- unsigned int val = s->readback[chan];
- int ret;
- int i;
-
- outw(ctrl, dev->iobase + S526_AO_CTRL_REG);
- ctrl |= S526_AO_CTRL_START;
-
- for (i = 0; i < insn->n; i++) {
- val = data[i];
- outw(val, dev->iobase + S526_AO_REG);
- outw(ctrl, dev->iobase + S526_AO_CTRL_REG);
-
- /* wait for conversion to end */
- ret = comedi_timeout(dev, s, insn, s526_eoc, S526_INT_AO);
- if (ret)
- return ret;
- }
- s->readback[chan] = val;
-
- return insn->n;
-}
-
-static int s526_dio_insn_bits(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- if (comedi_dio_update_state(s, data))
- outw(s->state, dev->iobase + S526_DIO_CTRL_REG);
-
- data[1] = inw(dev->iobase + S526_DIO_CTRL_REG) & 0xff;
-
- return insn->n;
-}
-
-static int s526_dio_insn_config(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- unsigned int chan = CR_CHAN(insn->chanspec);
- unsigned int mask;
- int ret;
-
- /*
- * Digital I/O can be configured as inputs or outputs in
- * groups of 4; DIO group 1 (DIO0-3) and DIO group 2 (DIO4-7).
- */
- if (chan < 4)
- mask = 0x0f;
- else
- mask = 0xf0;
-
- ret = comedi_dio_insn_config(dev, s, insn, data, mask);
- if (ret)
- return ret;
-
- if (s->io_bits & 0x0f)
- s->state |= S526_DIO_CTRL_GRP1_OUT;
- else
- s->state &= ~S526_DIO_CTRL_GRP1_OUT;
- if (s->io_bits & 0xf0)
- s->state |= S526_DIO_CTRL_GRP2_OUT;
- else
- s->state &= ~S526_DIO_CTRL_GRP2_OUT;
-
- outw(s->state, dev->iobase + S526_DIO_CTRL_REG);
-
- return insn->n;
-}
-
-static int s526_attach(struct comedi_device *dev, struct comedi_devconfig *it)
-{
- struct s526_private *devpriv;
- struct comedi_subdevice *s;
- int ret;
-
- ret = comedi_request_region(dev, it->options[0], 0x40);
- if (ret)
- return ret;
-
- devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
- if (!devpriv)
- return -ENOMEM;
-
- ret = comedi_alloc_subdevices(dev, 4);
- if (ret)
- return ret;
-
- /* General-Purpose Counter/Timer (GPCT) */
- s = &dev->subdevices[0];
- s->type = COMEDI_SUBD_COUNTER;
- s->subdev_flags = SDF_READABLE | SDF_WRITABLE | SDF_LSAMPL;
- s->n_chan = 4;
- s->maxdata = 0x00ffffff;
- s->insn_read = s526_gpct_rinsn;
- s->insn_config = s526_gpct_insn_config;
- s->insn_write = s526_gpct_winsn;
-
- /*
- * Analog Input subdevice
- * channels 0 to 7 are the regular differential inputs
- * channel 8 is "reference 0" (+10V)
- * channel 9 is "reference 1" (0V)
- */
- s = &dev->subdevices[1];
- s->type = COMEDI_SUBD_AI;
- s->subdev_flags = SDF_READABLE | SDF_DIFF;
- s->n_chan = 10;
- s->maxdata = 0xffff;
- s->range_table = &range_bipolar10;
- s->len_chanlist = 16;
- s->insn_read = s526_ai_insn_read;
-
- /* Analog Output subdevice */
- s = &dev->subdevices[2];
- s->type = COMEDI_SUBD_AO;
- s->subdev_flags = SDF_WRITABLE;
- s->n_chan = 4;
- s->maxdata = 0xffff;
- s->range_table = &range_bipolar10;
- s->insn_write = s526_ao_insn_write;
-
- ret = comedi_alloc_subdev_readback(s);
- if (ret)
- return ret;
-
- /* Digital I/O subdevice */
- s = &dev->subdevices[3];
- s->type = COMEDI_SUBD_DIO;
- s->subdev_flags = SDF_READABLE | SDF_WRITABLE;
- s->n_chan = 8;
- s->maxdata = 1;
- s->range_table = &range_digital;
- s->insn_bits = s526_dio_insn_bits;
- s->insn_config = s526_dio_insn_config;
-
- return 0;
-}
-
-static struct comedi_driver s526_driver = {
- .driver_name = "s526",
- .module = THIS_MODULE,
- .attach = s526_attach,
- .detach = comedi_legacy_detach,
-};
-module_comedi_driver(s526_driver);
-
-MODULE_AUTHOR("Comedi http://www.comedi.org");
-MODULE_DESCRIPTION("Comedi low-level driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/s626.c b/drivers/staging/comedi/drivers/s626.c
deleted file mode 100644
index 35f0f676eb28..000000000000
--- a/drivers/staging/comedi/drivers/s626.c
+++ /dev/null
@@ -1,2921 +0,0 @@
-/*
- * comedi/drivers/s626.c
- * Sensoray s626 Comedi driver
- *
- * COMEDI - Linux Control and Measurement Device Interface
- * Copyright (C) 2000 David A. Schleef <ds@schleef.org>
- *
- * Based on Sensoray Model 626 Linux driver Version 0.2
- * Copyright (C) 2002-2004 Sensoray Co., Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-/*
- * Driver: s626
- * Description: Sensoray 626 driver
- * Devices: [Sensoray] 626 (s626)
- * Authors: Gianluca Palli <gpalli@deis.unibo.it>,
- * Updated: Fri, 15 Feb 2008 10:28:42 +0000
- * Status: experimental
-
- * Configuration options: not applicable, uses PCI auto config
-
- * INSN_CONFIG instructions:
- * analog input:
- * none
- *
- * analog output:
- * none
- *
- * digital channel:
- * s626 has 3 dio subdevices (2,3 and 4) each with 16 i/o channels
- * supported configuration options:
- * INSN_CONFIG_DIO_QUERY
- * COMEDI_INPUT
- * COMEDI_OUTPUT
- *
- * encoder:
- * Every channel must be configured before reading.
- *
- * Example code
- *
- * insn.insn=INSN_CONFIG; //configuration instruction
- * insn.n=1; //number of operation (must be 1)
- * insn.data=&initialvalue; //initial value loaded into encoder
- * //during configuration
- * insn.subdev=5; //encoder subdevice
- * insn.chanspec=CR_PACK(encoder_channel,0,AREF_OTHER); //encoder_channel
- * //to configure
- *
- * comedi_do_insn(cf,&insn); //executing configuration
- */
-
-#include <linux/module.h>
-#include <linux/delay.h>
-#include <linux/interrupt.h>
-#include <linux/kernel.h>
-#include <linux/types.h>
-
-#include "../comedi_pci.h"
-
-#include "s626.h"
-
-struct s626_buffer_dma {
- dma_addr_t physical_base;
- void *logical_base;
-};
-
-struct s626_private {
- uint8_t ai_cmd_running; /* ai_cmd is running */
- unsigned int ai_sample_timer; /* time between samples in
- * units of the timer */
- int ai_convert_count; /* conversion counter */
- unsigned int ai_convert_timer; /* time between conversion in
- * units of the timer */
- uint16_t counter_int_enabs; /* counter interrupt enable mask
- * for MISC2 register */
- uint8_t adc_items; /* number of items in ADC poll list */
- struct s626_buffer_dma rps_buf; /* DMA buffer used to hold ADC (RPS1)
- * program */
- struct s626_buffer_dma ana_buf; /* DMA buffer used to receive ADC data
- * and hold DAC data */
- uint32_t *dac_wbuf; /* pointer to logical adrs of DMA buffer
- * used to hold DAC data */
- uint16_t dacpol; /* image of DAC polarity register */
- uint8_t trim_setpoint[12]; /* images of TrimDAC setpoints */
- uint32_t i2c_adrs; /* I2C device address for onboard EEPROM
- * (board rev dependent) */
-};
-
-/* Counter overflow/index event flag masks for RDMISC2. */
-#define S626_INDXMASK(C) (1 << (((C) > 2) ? ((C) * 2 - 1) : ((C) * 2 + 4)))
-#define S626_OVERMASK(C) (1 << (((C) > 2) ? ((C) * 2 + 5) : ((C) * 2 + 10)))
-
-/*
- * Enable/disable a function or test status bit(s) that are accessed
- * through Main Control Registers 1 or 2.
- */
-static void s626_mc_enable(struct comedi_device *dev,
- unsigned int cmd, unsigned int reg)
-{
- unsigned int val = (cmd << 16) | cmd;
-
- mmiowb();
- writel(val, dev->mmio + reg);
-}
-
-static void s626_mc_disable(struct comedi_device *dev,
- unsigned int cmd, unsigned int reg)
-{
- writel(cmd << 16, dev->mmio + reg);
- mmiowb();
-}
-
-static bool s626_mc_test(struct comedi_device *dev,
- unsigned int cmd, unsigned int reg)
-{
- unsigned int val;
-
- val = readl(dev->mmio + reg);
-
- return (val & cmd) ? true : false;
-}
-
-#define S626_BUGFIX_STREG(REGADRS) ((REGADRS) - 4)
-
-/* Write a time slot control record to TSL2. */
-#define S626_VECTPORT(VECTNUM) (S626_P_TSL2 + ((VECTNUM) << 2))
-
-static const struct comedi_lrange s626_range_table = {
- 2, {
- BIP_RANGE(5),
- BIP_RANGE(10)
- }
-};
-
-/*
- * Execute a DEBI transfer. This must be called from within a critical section.
- */
-static void s626_debi_transfer(struct comedi_device *dev)
-{
- static const int timeout = 10000;
- int i;
-
- /* Initiate upload of shadow RAM to DEBI control register */
- s626_mc_enable(dev, S626_MC2_UPLD_DEBI, S626_P_MC2);
-
- /*
- * Wait for completion of upload from shadow RAM to
- * DEBI control register.
- */
- for (i = 0; i < timeout; i++) {
- if (s626_mc_test(dev, S626_MC2_UPLD_DEBI, S626_P_MC2))
- break;
- udelay(1);
- }
- if (i == timeout)
- dev_err(dev->class_dev,
- "Timeout while uploading to DEBI control register\n");
-
- /* Wait until DEBI transfer is done */
- for (i = 0; i < timeout; i++) {
- if (!(readl(dev->mmio + S626_P_PSR) & S626_PSR_DEBI_S))
- break;
- udelay(1);
- }
- if (i == timeout)
- dev_err(dev->class_dev, "DEBI transfer timeout\n");
-}
-
-/*
- * Read a value from a gate array register.
- */
-static uint16_t s626_debi_read(struct comedi_device *dev, uint16_t addr)
-{
- /* Set up DEBI control register value in shadow RAM */
- writel(S626_DEBI_CMD_RDWORD | addr, dev->mmio + S626_P_DEBICMD);
-
- /* Execute the DEBI transfer. */
- s626_debi_transfer(dev);
-
- return readl(dev->mmio + S626_P_DEBIAD);
-}
-
-/*
- * Write a value to a gate array register.
- */
-static void s626_debi_write(struct comedi_device *dev, uint16_t addr,
- uint16_t wdata)
-{
- /* Set up DEBI control register value in shadow RAM */
- writel(S626_DEBI_CMD_WRWORD | addr, dev->mmio + S626_P_DEBICMD);
- writel(wdata, dev->mmio + S626_P_DEBIAD);
-
- /* Execute the DEBI transfer. */
- s626_debi_transfer(dev);
-}
-
-/*
- * Replace the specified bits in a gate array register. Imports: mask
- * specifies bits that are to be preserved, wdata is new value to be
- * or'd with the masked original.
- */
-static void s626_debi_replace(struct comedi_device *dev, unsigned int addr,
- unsigned int mask, unsigned int wdata)
-{
- unsigned int val;
-
- addr &= 0xffff;
- writel(S626_DEBI_CMD_RDWORD | addr, dev->mmio + S626_P_DEBICMD);
- s626_debi_transfer(dev);
-
- writel(S626_DEBI_CMD_WRWORD | addr, dev->mmio + S626_P_DEBICMD);
- val = readl(dev->mmio + S626_P_DEBIAD);
- val &= mask;
- val |= wdata;
- writel(val & 0xffff, dev->mmio + S626_P_DEBIAD);
- s626_debi_transfer(dev);
-}
-
-/* ************** EEPROM ACCESS FUNCTIONS ************** */
-
-static int s626_i2c_handshake_eoc(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned long context)
-{
- bool status;
-
- status = s626_mc_test(dev, S626_MC2_UPLD_IIC, S626_P_MC2);
- if (status)
- return 0;
- return -EBUSY;
-}
-
-static int s626_i2c_handshake(struct comedi_device *dev, uint32_t val)
-{
- unsigned int ctrl;
- int ret;
-
- /* Write I2C command to I2C Transfer Control shadow register */
- writel(val, dev->mmio + S626_P_I2CCTRL);
-
- /*
- * Upload I2C shadow registers into working registers and
- * wait for upload confirmation.
- */
- s626_mc_enable(dev, S626_MC2_UPLD_IIC, S626_P_MC2);
- ret = comedi_timeout(dev, NULL, NULL, s626_i2c_handshake_eoc, 0);
- if (ret)
- return ret;
-
- /* Wait until I2C bus transfer is finished or an error occurs */
- do {
- ctrl = readl(dev->mmio + S626_P_I2CCTRL);
- } while ((ctrl & (S626_I2C_BUSY | S626_I2C_ERR)) == S626_I2C_BUSY);
-
- /* Return non-zero if I2C error occurred */
- return ctrl & S626_I2C_ERR;
-}
-
-/* Read uint8_t from EEPROM. */
-static uint8_t s626_i2c_read(struct comedi_device *dev, uint8_t addr)
-{
- struct s626_private *devpriv = dev->private;
-
- /*
- * Send EEPROM target address:
- * Byte2 = I2C command: write to I2C EEPROM device.
- * Byte1 = EEPROM internal target address.
- * Byte0 = Not sent.
- */
- if (s626_i2c_handshake(dev, S626_I2C_B2(S626_I2C_ATTRSTART,
- devpriv->i2c_adrs) |
- S626_I2C_B1(S626_I2C_ATTRSTOP, addr) |
- S626_I2C_B0(S626_I2C_ATTRNOP, 0)))
- /* Abort function and declare error if handshake failed. */
- return 0;
-
- /*
- * Execute EEPROM read:
- * Byte2 = I2C command: read from I2C EEPROM device.
- * Byte1 receives uint8_t from EEPROM.
- * Byte0 = Not sent.
- */
- if (s626_i2c_handshake(dev, S626_I2C_B2(S626_I2C_ATTRSTART,
- (devpriv->i2c_adrs | 1)) |
- S626_I2C_B1(S626_I2C_ATTRSTOP, 0) |
- S626_I2C_B0(S626_I2C_ATTRNOP, 0)))
- /* Abort function and declare error if handshake failed. */
- return 0;
-
- return (readl(dev->mmio + S626_P_I2CCTRL) >> 16) & 0xff;
-}
-
-/* *********** DAC FUNCTIONS *********** */
-
-/* TrimDac LogicalChan-to-PhysicalChan mapping table. */
-static const uint8_t s626_trimchan[] = { 10, 9, 8, 3, 2, 7, 6, 1, 0, 5, 4 };
-
-/* TrimDac LogicalChan-to-EepromAdrs mapping table. */
-static const uint8_t s626_trimadrs[] = {
- 0x40, 0x41, 0x42, 0x50, 0x51, 0x52, 0x53, 0x60, 0x61, 0x62, 0x63
-};
-
-enum {
- s626_send_dac_wait_not_mc1_a2out,
- s626_send_dac_wait_ssr_af2_out,
- s626_send_dac_wait_fb_buffer2_msb_00,
- s626_send_dac_wait_fb_buffer2_msb_ff
-};
-
-static int s626_send_dac_eoc(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned long context)
-{
- unsigned int status;
-
- switch (context) {
- case s626_send_dac_wait_not_mc1_a2out:
- status = readl(dev->mmio + S626_P_MC1);
- if (!(status & S626_MC1_A2OUT))
- return 0;
- break;
- case s626_send_dac_wait_ssr_af2_out:
- status = readl(dev->mmio + S626_P_SSR);
- if (status & S626_SSR_AF2_OUT)
- return 0;
- break;
- case s626_send_dac_wait_fb_buffer2_msb_00:
- status = readl(dev->mmio + S626_P_FB_BUFFER2);
- if (!(status & 0xff000000))
- return 0;
- break;
- case s626_send_dac_wait_fb_buffer2_msb_ff:
- status = readl(dev->mmio + S626_P_FB_BUFFER2);
- if (status & 0xff000000)
- return 0;
- break;
- default:
- return -EINVAL;
- }
- return -EBUSY;
-}
-
-/*
- * Private helper function: Transmit serial data to DAC via Audio
- * channel 2. Assumes: (1) TSL2 slot records initialized, and (2)
- * dacpol contains valid target image.
- */
-static int s626_send_dac(struct comedi_device *dev, uint32_t val)
-{
- struct s626_private *devpriv = dev->private;
- int ret;
-
- /* START THE SERIAL CLOCK RUNNING ------------- */
-
- /*
- * Assert DAC polarity control and enable gating of DAC serial clock
- * and audio bit stream signals. At this point in time we must be
- * assured of being in time slot 0. If we are not in slot 0, the
- * serial clock and audio stream signals will be disabled; this is
- * because the following s626_debi_write statement (which enables
- * signals to be passed through the gate array) would execute before
- * the trailing edge of WS1/WS3 (which turns off the signals), thus
- * causing the signals to be inactive during the DAC write.
- */
- s626_debi_write(dev, S626_LP_DACPOL, devpriv->dacpol);
-
- /* TRANSFER OUTPUT DWORD VALUE INTO A2'S OUTPUT FIFO ---------------- */
-
- /* Copy DAC setpoint value to DAC's output DMA buffer. */
- /* writel(val, dev->mmio + (uint32_t)devpriv->dac_wbuf); */
- *devpriv->dac_wbuf = val;
-
- /*
- * Enable the output DMA transfer. This will cause the DMAC to copy
- * the DAC's data value to A2's output FIFO. The DMA transfer will
- * then immediately terminate because the protection address is
- * reached upon transfer of the first DWORD value.
- */
- s626_mc_enable(dev, S626_MC1_A2OUT, S626_P_MC1);
-
- /* While the DMA transfer is executing ... */
-
- /*
- * Reset Audio2 output FIFO's underflow flag (along with any
- * other FIFO underflow/overflow flags). When set, this flag
- * will indicate that we have emerged from slot 0.
- */
- writel(S626_ISR_AFOU, dev->mmio + S626_P_ISR);
-
- /*
- * Wait for the DMA transfer to finish so that there will be data
- * available in the FIFO when time slot 1 tries to transfer a DWORD
- * from the FIFO to the output buffer register. We test for DMA
- * Done by polling the DMAC enable flag; this flag is automatically
- * cleared when the transfer has finished.
- */
- ret = comedi_timeout(dev, NULL, NULL, s626_send_dac_eoc,
- s626_send_dac_wait_not_mc1_a2out);
- if (ret) {
- dev_err(dev->class_dev, "DMA transfer timeout\n");
- return ret;
- }
-
- /* START THE OUTPUT STREAM TO THE TARGET DAC -------------------- */
-
- /*
- * FIFO data is now available, so we enable execution of time slots
- * 1 and higher by clearing the EOS flag in slot 0. Note that SD3
- * will be shifted in and stored in FB_BUFFER2 for end-of-slot-list
- * detection.
- */
- writel(S626_XSD2 | S626_RSD3 | S626_SIB_A2,
- dev->mmio + S626_VECTPORT(0));
-
- /*
- * Wait for slot 1 to execute to ensure that the Packet will be
- * transmitted. This is detected by polling the Audio2 output FIFO
- * underflow flag, which will be set when slot 1 execution has
- * finished transferring the DAC's data DWORD from the output FIFO
- * to the output buffer register.
- */
- ret = comedi_timeout(dev, NULL, NULL, s626_send_dac_eoc,
- s626_send_dac_wait_ssr_af2_out);
- if (ret) {
- dev_err(dev->class_dev,
- "TSL timeout waiting for slot 1 to execute\n");
- return ret;
- }
-
- /*
- * Set up to trap execution at slot 0 when the TSL sequencer cycles
- * back to slot 0 after executing the EOS in slot 5. Also,
- * simultaneously shift out and in the 0x00 that is ALWAYS the value
- * stored in the last byte to be shifted out of the FIFO's DWORD
- * buffer register.
- */
- writel(S626_XSD2 | S626_XFIFO_2 | S626_RSD2 | S626_SIB_A2 | S626_EOS,
- dev->mmio + S626_VECTPORT(0));
-
- /* WAIT FOR THE TRANSACTION TO FINISH ----------------------- */
-
- /*
- * Wait for the TSL to finish executing all time slots before
- * exiting this function. We must do this so that the next DAC
- * write doesn't start, thereby enabling clock/chip select signals:
- *
- * 1. Before the TSL sequence cycles back to slot 0, which disables
- * the clock/cs signal gating and traps slot // list execution.
- * we have not yet finished slot 5 then the clock/cs signals are
- * still gated and we have not finished transmitting the stream.
- *
- * 2. While slots 2-5 are executing due to a late slot 0 trap. In
- * this case, the slot sequence is currently repeating, but with
- * clock/cs signals disabled. We must wait for slot 0 to trap
- * execution before setting up the next DAC setpoint DMA transfer
- * and enabling the clock/cs signals. To detect the end of slot 5,
- * we test for the FB_BUFFER2 MSB contents to be equal to 0xFF. If
- * the TSL has not yet finished executing slot 5 ...
- */
- if (readl(dev->mmio + S626_P_FB_BUFFER2) & 0xff000000) {
- /*
- * The trap was set on time and we are still executing somewhere
- * in slots 2-5, so we now wait for slot 0 to execute and trap
- * TSL execution. This is detected when FB_BUFFER2 MSB changes
- * from 0xFF to 0x00, which slot 0 causes to happen by shifting
- * out/in on SD2 the 0x00 that is always referenced by slot 5.
- */
- ret = comedi_timeout(dev, NULL, NULL, s626_send_dac_eoc,
- s626_send_dac_wait_fb_buffer2_msb_00);
- if (ret) {
- dev_err(dev->class_dev,
- "TSL timeout waiting for slot 0 to execute\n");
- return ret;
- }
- }
- /*
- * Either (1) we were too late setting the slot 0 trap; the TSL
- * sequencer restarted slot 0 before we could set the EOS trap flag,
- * or (2) we were not late and execution is now trapped at slot 0.
- * In either case, we must now change slot 0 so that it will store
- * value 0xFF (instead of 0x00) to FB_BUFFER2 next time it executes.
- * In order to do this, we reprogram slot 0 so that it will shift in
- * SD3, which is driven only by a pull-up resistor.
- */
- writel(S626_RSD3 | S626_SIB_A2 | S626_EOS,
- dev->mmio + S626_VECTPORT(0));
-
- /*
- * Wait for slot 0 to execute, at which time the TSL is setup for
- * the next DAC write. This is detected when FB_BUFFER2 MSB changes
- * from 0x00 to 0xFF.
- */
- ret = comedi_timeout(dev, NULL, NULL, s626_send_dac_eoc,
- s626_send_dac_wait_fb_buffer2_msb_ff);
- if (ret) {
- dev_err(dev->class_dev,
- "TSL timeout waiting for slot 0 to execute\n");
- return ret;
- }
- return 0;
-}
-
-/*
- * Private helper function: Write setpoint to an application DAC channel.
- */
-static int s626_set_dac(struct comedi_device *dev,
- uint16_t chan, int16_t dacdata)
-{
- struct s626_private *devpriv = dev->private;
- uint16_t signmask;
- uint32_t ws_image;
- uint32_t val;
-
- /*
- * Adjust DAC data polarity and set up Polarity Control Register image.
- */
- signmask = 1 << chan;
- if (dacdata < 0) {
- dacdata = -dacdata;
- devpriv->dacpol |= signmask;
- } else {
- devpriv->dacpol &= ~signmask;
- }
-
- /* Limit DAC setpoint value to valid range. */
- if ((uint16_t)dacdata > 0x1FFF)
- dacdata = 0x1FFF;
-
- /*
- * Set up TSL2 records (aka "vectors") for DAC update. Vectors V2
- * and V3 transmit the setpoint to the target DAC. V4 and V5 send
- * data to a non-existent TrimDac channel just to keep the clock
- * running after sending data to the target DAC. This is necessary
- * to eliminate the clock glitch that would otherwise occur at the
- * end of the target DAC's serial data stream. When the sequence
- * restarts at V0 (after executing V5), the gate array automatically
- * disables gating for the DAC clock and all DAC chip selects.
- */
-
- /* Choose DAC chip select to be asserted */
- ws_image = (chan & 2) ? S626_WS1 : S626_WS2;
- /* Slot 2: Transmit high data byte to target DAC */
- writel(S626_XSD2 | S626_XFIFO_1 | ws_image,
- dev->mmio + S626_VECTPORT(2));
- /* Slot 3: Transmit low data byte to target DAC */
- writel(S626_XSD2 | S626_XFIFO_0 | ws_image,
- dev->mmio + S626_VECTPORT(3));
- /* Slot 4: Transmit to non-existent TrimDac channel to keep clock */
- writel(S626_XSD2 | S626_XFIFO_3 | S626_WS3,
- dev->mmio + S626_VECTPORT(4));
- /* Slot 5: running after writing target DAC's low data byte */
- writel(S626_XSD2 | S626_XFIFO_2 | S626_WS3 | S626_EOS,
- dev->mmio + S626_VECTPORT(5));
-
- /*
- * Construct and transmit target DAC's serial packet:
- * (A10D DDDD), (DDDD DDDD), (0x0F), (0x00) where A is chan<0>,
- * and D<12:0> is the DAC setpoint. Append a WORD value (that writes
- * to a non-existent TrimDac channel) that serves to keep the clock
- * running after the packet has been sent to the target DAC.
- */
- val = 0x0F000000; /* Continue clock after target DAC data
- * (write to non-existent trimdac). */
- val |= 0x00004000; /* Address the two main dual-DAC devices
- * (TSL's chip select enables target device). */
- val |= ((uint32_t)(chan & 1) << 15); /* Address the DAC channel
- * within the device. */
- val |= (uint32_t)dacdata; /* Include DAC setpoint data. */
- return s626_send_dac(dev, val);
-}
-
-static int s626_write_trim_dac(struct comedi_device *dev,
- uint8_t logical_chan, uint8_t dac_data)
-{
- struct s626_private *devpriv = dev->private;
- uint32_t chan;
-
- /*
- * Save the new setpoint in case the application needs to read it back
- * later.
- */
- devpriv->trim_setpoint[logical_chan] = (uint8_t)dac_data;
-
- /* Map logical channel number to physical channel number. */
- chan = s626_trimchan[logical_chan];
-
- /*
- * Set up TSL2 records for TrimDac write operation. All slots shift
- * 0xFF in from pulled-up SD3 so that the end of the slot sequence
- * can be detected.
- */
-
- /* Slot 2: Send high uint8_t to target TrimDac */
- writel(S626_XSD2 | S626_XFIFO_1 | S626_WS3,
- dev->mmio + S626_VECTPORT(2));
- /* Slot 3: Send low uint8_t to target TrimDac */
- writel(S626_XSD2 | S626_XFIFO_0 | S626_WS3,
- dev->mmio + S626_VECTPORT(3));
- /* Slot 4: Send NOP high uint8_t to DAC0 to keep clock running */
- writel(S626_XSD2 | S626_XFIFO_3 | S626_WS1,
- dev->mmio + S626_VECTPORT(4));
- /* Slot 5: Send NOP low uint8_t to DAC0 */
- writel(S626_XSD2 | S626_XFIFO_2 | S626_WS1 | S626_EOS,
- dev->mmio + S626_VECTPORT(5));
-
- /*
- * Construct and transmit target DAC's serial packet:
- * (0000 AAAA), (DDDD DDDD), (0x00), (0x00) where A<3:0> is the
- * DAC channel's address, and D<7:0> is the DAC setpoint. Append a
- * WORD value (that writes a channel 0 NOP command to a non-existent
- * main DAC channel) that serves to keep the clock running after the
- * packet has been sent to the target DAC.
- */
-
- /*
- * Address the DAC channel within the trimdac device.
- * Include DAC setpoint data.
- */
- return s626_send_dac(dev, (chan << 8) | dac_data);
-}
-
-static int s626_load_trim_dacs(struct comedi_device *dev)
-{
- uint8_t i;
- int ret;
-
- /* Copy TrimDac setpoint values from EEPROM to TrimDacs. */
- for (i = 0; i < ARRAY_SIZE(s626_trimchan); i++) {
- ret = s626_write_trim_dac(dev, i,
- s626_i2c_read(dev, s626_trimadrs[i]));
- if (ret)
- return ret;
- }
- return 0;
-}
-
-/* ****** COUNTER FUNCTIONS ******* */
-
-/*
- * All counter functions address a specific counter by means of the
- * "Counter" argument, which is a logical counter number. The Counter
- * argument may have any of the following legal values: 0=0A, 1=1A,
- * 2=2A, 3=0B, 4=1B, 5=2B.
- */
-
-/*
- * Return/set a counter pair's latch trigger source. 0: On read
- * access, 1: A index latches A, 2: B index latches B, 3: A overflow
- * latches B.
- */
-static void s626_set_latch_source(struct comedi_device *dev,
- unsigned int chan, uint16_t value)
-{
- s626_debi_replace(dev, S626_LP_CRB(chan),
- ~(S626_CRBMSK_INTCTRL | S626_CRBMSK_LATCHSRC),
- S626_SET_CRB_LATCHSRC(value));
-}
-
-/*
- * Write value into counter preload register.
- */
-static void s626_preload(struct comedi_device *dev,
- unsigned int chan, uint32_t value)
-{
- s626_debi_write(dev, S626_LP_CNTR(chan), value);
- s626_debi_write(dev, S626_LP_CNTR(chan) + 2, value >> 16);
-}
-
-/* ****** PRIVATE COUNTER FUNCTIONS ****** */
-
-/*
- * Reset a counter's index and overflow event capture flags.
- */
-static void s626_reset_cap_flags(struct comedi_device *dev,
- unsigned int chan)
-{
- uint16_t set;
-
- set = S626_SET_CRB_INTRESETCMD(1);
- if (chan < 3)
- set |= S626_SET_CRB_INTRESET_A(1);
- else
- set |= S626_SET_CRB_INTRESET_B(1);
-
- s626_debi_replace(dev, S626_LP_CRB(chan), ~S626_CRBMSK_INTCTRL, set);
-}
-
-#ifdef unused
-/*
- * Return counter setup in a format (COUNTER_SETUP) that is consistent
- * for both A and B counters.
- */
-static uint16_t s626_get_mode_a(struct comedi_device *dev,
- unsigned int chan)
-{
- uint16_t cra;
- uint16_t crb;
- uint16_t setup;
- unsigned cntsrc, clkmult, clkpol, encmode;
-
- /* Fetch CRA and CRB register images. */
- cra = s626_debi_read(dev, S626_LP_CRA(chan));
- crb = s626_debi_read(dev, S626_LP_CRB(chan));
-
- /*
- * Populate the standardized counter setup bit fields.
- */
- setup =
- /* LoadSrc = LoadSrcA. */
- S626_SET_STD_LOADSRC(S626_GET_CRA_LOADSRC_A(cra)) |
- /* LatchSrc = LatchSrcA. */
- S626_SET_STD_LATCHSRC(S626_GET_CRB_LATCHSRC(crb)) |
- /* IntSrc = IntSrcA. */
- S626_SET_STD_INTSRC(S626_GET_CRA_INTSRC_A(cra)) |
- /* IndxSrc = IndxSrcA. */
- S626_SET_STD_INDXSRC(S626_GET_CRA_INDXSRC_A(cra)) |
- /* IndxPol = IndxPolA. */
- S626_SET_STD_INDXPOL(S626_GET_CRA_INDXPOL_A(cra)) |
- /* ClkEnab = ClkEnabA. */
- S626_SET_STD_CLKENAB(S626_GET_CRB_CLKENAB_A(crb));
-
- /* Adjust mode-dependent parameters. */
- cntsrc = S626_GET_CRA_CNTSRC_A(cra);
- if (cntsrc & S626_CNTSRC_SYSCLK) {
- /* Timer mode (CntSrcA<1> == 1): */
- encmode = S626_ENCMODE_TIMER;
- /* Set ClkPol to indicate count direction (CntSrcA<0>). */
- clkpol = cntsrc & 1;
- /* ClkMult must be 1x in Timer mode. */
- clkmult = S626_CLKMULT_1X;
- } else {
- /* Counter mode (CntSrcA<1> == 0): */
- encmode = S626_ENCMODE_COUNTER;
- /* Pass through ClkPol. */
- clkpol = S626_GET_CRA_CLKPOL_A(cra);
- /* Force ClkMult to 1x if not legal, else pass through. */
- clkmult = S626_GET_CRA_CLKMULT_A(cra);
- if (clkmult == S626_CLKMULT_SPECIAL)
- clkmult = S626_CLKMULT_1X;
- }
- setup |= S626_SET_STD_ENCMODE(encmode) | S626_SET_STD_CLKMULT(clkmult) |
- S626_SET_STD_CLKPOL(clkpol);
-
- /* Return adjusted counter setup. */
- return setup;
-}
-
-static uint16_t s626_get_mode_b(struct comedi_device *dev,
- unsigned int chan)
-{
- uint16_t cra;
- uint16_t crb;
- uint16_t setup;
- unsigned cntsrc, clkmult, clkpol, encmode;
-
- /* Fetch CRA and CRB register images. */
- cra = s626_debi_read(dev, S626_LP_CRA(chan));
- crb = s626_debi_read(dev, S626_LP_CRB(chan));
-
- /*
- * Populate the standardized counter setup bit fields.
- */
- setup =
- /* IntSrc = IntSrcB. */
- S626_SET_STD_INTSRC(S626_GET_CRB_INTSRC_B(crb)) |
- /* LatchSrc = LatchSrcB. */
- S626_SET_STD_LATCHSRC(S626_GET_CRB_LATCHSRC(crb)) |
- /* LoadSrc = LoadSrcB. */
- S626_SET_STD_LOADSRC(S626_GET_CRB_LOADSRC_B(crb)) |
- /* IndxPol = IndxPolB. */
- S626_SET_STD_INDXPOL(S626_GET_CRB_INDXPOL_B(crb)) |
- /* ClkEnab = ClkEnabB. */
- S626_SET_STD_CLKENAB(S626_GET_CRB_CLKENAB_B(crb)) |
- /* IndxSrc = IndxSrcB. */
- S626_SET_STD_INDXSRC(S626_GET_CRA_INDXSRC_B(cra));
-
- /* Adjust mode-dependent parameters. */
- cntsrc = S626_GET_CRA_CNTSRC_B(cra);
- clkmult = S626_GET_CRB_CLKMULT_B(crb);
- if (clkmult == S626_CLKMULT_SPECIAL) {
- /* Extender mode (ClkMultB == S626_CLKMULT_SPECIAL): */
- encmode = S626_ENCMODE_EXTENDER;
- /* Indicate multiplier is 1x. */
- clkmult = S626_CLKMULT_1X;
- /* Set ClkPol equal to Timer count direction (CntSrcB<0>). */
- clkpol = cntsrc & 1;
- } else if (cntsrc & S626_CNTSRC_SYSCLK) {
- /* Timer mode (CntSrcB<1> == 1): */
- encmode = S626_ENCMODE_TIMER;
- /* Indicate multiplier is 1x. */
- clkmult = S626_CLKMULT_1X;
- /* Set ClkPol equal to Timer count direction (CntSrcB<0>). */
- clkpol = cntsrc & 1;
- } else {
- /* If Counter mode (CntSrcB<1> == 0): */
- encmode = S626_ENCMODE_COUNTER;
- /* Clock multiplier is passed through. */
- /* Clock polarity is passed through. */
- clkpol = S626_GET_CRB_CLKPOL_B(crb);
- }
- setup |= S626_SET_STD_ENCMODE(encmode) | S626_SET_STD_CLKMULT(clkmult) |
- S626_SET_STD_CLKPOL(clkpol);
-
- /* Return adjusted counter setup. */
- return setup;
-}
-
-static uint16_t s626_get_mode(struct comedi_device *dev,
- unsigned int chan)
-{
- return (chan < 3) ? s626_get_mode_a(dev, chan)
- : s626_get_mode_b(dev, chan);
-}
-#endif
-
-/*
- * Set the operating mode for the specified counter. The setup
- * parameter is treated as a COUNTER_SETUP data type. The following
- * parameters are programmable (all other parms are ignored): ClkMult,
- * ClkPol, ClkEnab, IndexSrc, IndexPol, LoadSrc.
- */
-static void s626_set_mode_a(struct comedi_device *dev,
- unsigned int chan, uint16_t setup,
- uint16_t disable_int_src)
-{
- struct s626_private *devpriv = dev->private;
- uint16_t cra;
- uint16_t crb;
- unsigned cntsrc, clkmult, clkpol;
-
- /* Initialize CRA and CRB images. */
- /* Preload trigger is passed through. */
- cra = S626_SET_CRA_LOADSRC_A(S626_GET_STD_LOADSRC(setup));
- /* IndexSrc is passed through. */
- cra |= S626_SET_CRA_INDXSRC_A(S626_GET_STD_INDXSRC(setup));
-
- /* Reset any pending CounterA event captures. */
- crb = S626_SET_CRB_INTRESETCMD(1) | S626_SET_CRB_INTRESET_A(1);
- /* Clock enable is passed through. */
- crb |= S626_SET_CRB_CLKENAB_A(S626_GET_STD_CLKENAB(setup));
-
- /* Force IntSrc to Disabled if disable_int_src is asserted. */
- if (!disable_int_src)
- cra |= S626_SET_CRA_INTSRC_A(S626_GET_STD_INTSRC(setup));
-
- /* Populate all mode-dependent attributes of CRA & CRB images. */
- clkpol = S626_GET_STD_CLKPOL(setup);
- switch (S626_GET_STD_ENCMODE(setup)) {
- case S626_ENCMODE_EXTENDER: /* Extender Mode: */
- /* Force to Timer mode (Extender valid only for B counters). */
- /* Fall through to case S626_ENCMODE_TIMER: */
- case S626_ENCMODE_TIMER: /* Timer Mode: */
- /* CntSrcA<1> selects system clock */
- cntsrc = S626_CNTSRC_SYSCLK;
- /* Count direction (CntSrcA<0>) obtained from ClkPol. */
- cntsrc |= clkpol;
- /* ClkPolA behaves as always-on clock enable. */
- clkpol = 1;
- /* ClkMult must be 1x. */
- clkmult = S626_CLKMULT_1X;
- break;
- default: /* Counter Mode: */
- /* Select ENC_C and ENC_D as clock/direction inputs. */
- cntsrc = S626_CNTSRC_ENCODER;
- /* Clock polarity is passed through. */
- /* Force multiplier to x1 if not legal, else pass through. */
- clkmult = S626_GET_STD_CLKMULT(setup);
- if (clkmult == S626_CLKMULT_SPECIAL)
- clkmult = S626_CLKMULT_1X;
- break;
- }
- cra |= S626_SET_CRA_CNTSRC_A(cntsrc) | S626_SET_CRA_CLKPOL_A(clkpol) |
- S626_SET_CRA_CLKMULT_A(clkmult);
-
- /*
- * Force positive index polarity if IndxSrc is software-driven only,
- * otherwise pass it through.
- */
- if (S626_GET_STD_INDXSRC(setup) != S626_INDXSRC_SOFT)
- cra |= S626_SET_CRA_INDXPOL_A(S626_GET_STD_INDXPOL(setup));
-
- /*
- * If IntSrc has been forced to Disabled, update the MISC2 interrupt
- * enable mask to indicate the counter interrupt is disabled.
- */
- if (disable_int_src)
- devpriv->counter_int_enabs &= ~(S626_OVERMASK(chan) |
- S626_INDXMASK(chan));
-
- /*
- * While retaining CounterB and LatchSrc configurations, program the
- * new counter operating mode.
- */
- s626_debi_replace(dev, S626_LP_CRA(chan),
- S626_CRAMSK_INDXSRC_B | S626_CRAMSK_CNTSRC_B, cra);
- s626_debi_replace(dev, S626_LP_CRB(chan),
- ~(S626_CRBMSK_INTCTRL | S626_CRBMSK_CLKENAB_A), crb);
-}
-
-static void s626_set_mode_b(struct comedi_device *dev,
- unsigned int chan, uint16_t setup,
- uint16_t disable_int_src)
-{
- struct s626_private *devpriv = dev->private;
- uint16_t cra;
- uint16_t crb;
- unsigned cntsrc, clkmult, clkpol;
-
- /* Initialize CRA and CRB images. */
- /* IndexSrc is passed through. */
- cra = S626_SET_CRA_INDXSRC_B(S626_GET_STD_INDXSRC(setup));
-
- /* Reset event captures and disable interrupts. */
- crb = S626_SET_CRB_INTRESETCMD(1) | S626_SET_CRB_INTRESET_B(1);
- /* Clock enable is passed through. */
- crb |= S626_SET_CRB_CLKENAB_B(S626_GET_STD_CLKENAB(setup));
- /* Preload trigger source is passed through. */
- crb |= S626_SET_CRB_LOADSRC_B(S626_GET_STD_LOADSRC(setup));
-
- /* Force IntSrc to Disabled if disable_int_src is asserted. */
- if (!disable_int_src)
- crb |= S626_SET_CRB_INTSRC_B(S626_GET_STD_INTSRC(setup));
-
- /* Populate all mode-dependent attributes of CRA & CRB images. */
- clkpol = S626_GET_STD_CLKPOL(setup);
- switch (S626_GET_STD_ENCMODE(setup)) {
- case S626_ENCMODE_TIMER: /* Timer Mode: */
- /* CntSrcB<1> selects system clock */
- cntsrc = S626_CNTSRC_SYSCLK;
- /* with direction (CntSrcB<0>) obtained from ClkPol. */
- cntsrc |= clkpol;
- /* ClkPolB behaves as always-on clock enable. */
- clkpol = 1;
- /* ClkMultB must be 1x. */
- clkmult = S626_CLKMULT_1X;
- break;
- case S626_ENCMODE_EXTENDER: /* Extender Mode: */
- /* CntSrcB source is OverflowA (same as "timer") */
- cntsrc = S626_CNTSRC_SYSCLK;
- /* with direction obtained from ClkPol. */
- cntsrc |= clkpol;
- /* ClkPolB controls IndexB -- always set to active. */
- clkpol = 1;
- /* ClkMultB selects OverflowA as the clock source. */
- clkmult = S626_CLKMULT_SPECIAL;
- break;
- default: /* Counter Mode: */
- /* Select ENC_C and ENC_D as clock/direction inputs. */
- cntsrc = S626_CNTSRC_ENCODER;
- /* ClkPol is passed through. */
- /* Force ClkMult to x1 if not legal, otherwise pass through. */
- clkmult = S626_GET_STD_CLKMULT(setup);
- if (clkmult == S626_CLKMULT_SPECIAL)
- clkmult = S626_CLKMULT_1X;
- break;
- }
- cra |= S626_SET_CRA_CNTSRC_B(cntsrc);
- crb |= S626_SET_CRB_CLKPOL_B(clkpol) | S626_SET_CRB_CLKMULT_B(clkmult);
-
- /*
- * Force positive index polarity if IndxSrc is software-driven only,
- * otherwise pass it through.
- */
- if (S626_GET_STD_INDXSRC(setup) != S626_INDXSRC_SOFT)
- crb |= S626_SET_CRB_INDXPOL_B(S626_GET_STD_INDXPOL(setup));
-
- /*
- * If IntSrc has been forced to Disabled, update the MISC2 interrupt
- * enable mask to indicate the counter interrupt is disabled.
- */
- if (disable_int_src)
- devpriv->counter_int_enabs &= ~(S626_OVERMASK(chan) |
- S626_INDXMASK(chan));
-
- /*
- * While retaining CounterA and LatchSrc configurations, program the
- * new counter operating mode.
- */
- s626_debi_replace(dev, S626_LP_CRA(chan),
- ~(S626_CRAMSK_INDXSRC_B | S626_CRAMSK_CNTSRC_B), cra);
- s626_debi_replace(dev, S626_LP_CRB(chan),
- S626_CRBMSK_CLKENAB_A | S626_CRBMSK_LATCHSRC, crb);
-}
-
-static void s626_set_mode(struct comedi_device *dev,
- unsigned int chan,
- uint16_t setup, uint16_t disable_int_src)
-{
- if (chan < 3)
- s626_set_mode_a(dev, chan, setup, disable_int_src);
- else
- s626_set_mode_b(dev, chan, setup, disable_int_src);
-}
-
-/*
- * Return/set a counter's enable. enab: 0=always enabled, 1=enabled by index.
- */
-static void s626_set_enable(struct comedi_device *dev,
- unsigned int chan, uint16_t enab)
-{
- unsigned int mask = S626_CRBMSK_INTCTRL;
- unsigned int set;
-
- if (chan < 3) {
- mask |= S626_CRBMSK_CLKENAB_A;
- set = S626_SET_CRB_CLKENAB_A(enab);
- } else {
- mask |= S626_CRBMSK_CLKENAB_B;
- set = S626_SET_CRB_CLKENAB_B(enab);
- }
- s626_debi_replace(dev, S626_LP_CRB(chan), ~mask, set);
-}
-
-#ifdef unused
-static uint16_t s626_get_enable(struct comedi_device *dev,
- unsigned int chan)
-{
- uint16_t crb = s626_debi_read(dev, S626_LP_CRB(chan));
-
- return (chan < 3) ? S626_GET_CRB_CLKENAB_A(crb)
- : S626_GET_CRB_CLKENAB_B(crb);
-}
-#endif
-
-#ifdef unused
-static uint16_t s626_get_latch_source(struct comedi_device *dev,
- unsigned int chan)
-{
- return S626_GET_CRB_LATCHSRC(s626_debi_read(dev, S626_LP_CRB(chan)));
-}
-#endif
-
-/*
- * Return/set the event that will trigger transfer of the preload
- * register into the counter. 0=ThisCntr_Index, 1=ThisCntr_Overflow,
- * 2=OverflowA (B counters only), 3=disabled.
- */
-static void s626_set_load_trig(struct comedi_device *dev,
- unsigned int chan, uint16_t trig)
-{
- uint16_t reg;
- uint16_t mask;
- uint16_t set;
-
- if (chan < 3) {
- reg = S626_LP_CRA(chan);
- mask = S626_CRAMSK_LOADSRC_A;
- set = S626_SET_CRA_LOADSRC_A(trig);
- } else {
- reg = S626_LP_CRB(chan);
- mask = S626_CRBMSK_LOADSRC_B | S626_CRBMSK_INTCTRL;
- set = S626_SET_CRB_LOADSRC_B(trig);
- }
- s626_debi_replace(dev, reg, ~mask, set);
-}
-
-#ifdef unused
-static uint16_t s626_get_load_trig(struct comedi_device *dev,
- unsigned int chan)
-{
- if (chan < 3)
- return S626_GET_CRA_LOADSRC_A(s626_debi_read(dev,
- S626_LP_CRA(chan)));
- else
- return S626_GET_CRB_LOADSRC_B(s626_debi_read(dev,
- S626_LP_CRB(chan)));
-}
-#endif
-
-/*
- * Return/set counter interrupt source and clear any captured
- * index/overflow events. int_source: 0=Disabled, 1=OverflowOnly,
- * 2=IndexOnly, 3=IndexAndOverflow.
- */
-static void s626_set_int_src(struct comedi_device *dev,
- unsigned int chan, uint16_t int_source)
-{
- struct s626_private *devpriv = dev->private;
- uint16_t cra_reg = S626_LP_CRA(chan);
- uint16_t crb_reg = S626_LP_CRB(chan);
-
- if (chan < 3) {
- /* Reset any pending counter overflow or index captures */
- s626_debi_replace(dev, crb_reg, ~S626_CRBMSK_INTCTRL,
- S626_SET_CRB_INTRESETCMD(1) |
- S626_SET_CRB_INTRESET_A(1));
-
- /* Program counter interrupt source */
- s626_debi_replace(dev, cra_reg, ~S626_CRAMSK_INTSRC_A,
- S626_SET_CRA_INTSRC_A(int_source));
- } else {
- uint16_t crb;
-
- /* Cache writeable CRB register image */
- crb = s626_debi_read(dev, crb_reg);
- crb &= ~S626_CRBMSK_INTCTRL;
-
- /* Reset any pending counter overflow or index captures */
- s626_debi_write(dev, crb_reg,
- crb | S626_SET_CRB_INTRESETCMD(1) |
- S626_SET_CRB_INTRESET_B(1));
-
- /* Program counter interrupt source */
- s626_debi_write(dev, crb_reg,
- (crb & ~S626_CRBMSK_INTSRC_B) |
- S626_SET_CRB_INTSRC_B(int_source));
- }
-
- /* Update MISC2 interrupt enable mask. */
- devpriv->counter_int_enabs &= ~(S626_OVERMASK(chan) |
- S626_INDXMASK(chan));
- switch (int_source) {
- case 0:
- default:
- break;
- case 1:
- devpriv->counter_int_enabs |= S626_OVERMASK(chan);
- break;
- case 2:
- devpriv->counter_int_enabs |= S626_INDXMASK(chan);
- break;
- case 3:
- devpriv->counter_int_enabs |= (S626_OVERMASK(chan) |
- S626_INDXMASK(chan));
- break;
- }
-}
-
-#ifdef unused
-static uint16_t s626_get_int_src(struct comedi_device *dev,
- unsigned int chan)
-{
- if (chan < 3)
- return S626_GET_CRA_INTSRC_A(s626_debi_read(dev,
- S626_LP_CRA(chan)));
- else
- return S626_GET_CRB_INTSRC_B(s626_debi_read(dev,
- S626_LP_CRB(chan)));
-}
-#endif
-
-#ifdef unused
-/*
- * Return/set the clock multiplier.
- */
-static void s626_set_clk_mult(struct comedi_device *dev,
- unsigned int chan, uint16_t value)
-{
- uint16_t mode;
-
- mode = s626_get_mode(dev, chan);
- mode &= ~S626_STDMSK_CLKMULT;
- mode |= S626_SET_STD_CLKMULT(value);
-
- s626_set_mode(dev, chan, mode, false);
-}
-
-static uint16_t s626_get_clk_mult(struct comedi_device *dev,
- unsigned int chan)
-{
- return S626_GET_STD_CLKMULT(s626_get_mode(dev, chan));
-}
-
-/*
- * Return/set the clock polarity.
- */
-static void s626_set_clk_pol(struct comedi_device *dev,
- unsigned int chan, uint16_t value)
-{
- uint16_t mode;
-
- mode = s626_get_mode(dev, chan);
- mode &= ~S626_STDMSK_CLKPOL;
- mode |= S626_SET_STD_CLKPOL(value);
-
- s626_set_mode(dev, chan, mode, false);
-}
-
-static uint16_t s626_get_clk_pol(struct comedi_device *dev,
- unsigned int chan)
-{
- return S626_GET_STD_CLKPOL(s626_get_mode(dev, chan));
-}
-
-/*
- * Return/set the encoder mode.
- */
-static void s626_set_enc_mode(struct comedi_device *dev,
- unsigned int chan, uint16_t value)
-{
- uint16_t mode;
-
- mode = s626_get_mode(dev, chan);
- mode &= ~S626_STDMSK_ENCMODE;
- mode |= S626_SET_STD_ENCMODE(value);
-
- s626_set_mode(dev, chan, mode, false);
-}
-
-static uint16_t s626_get_enc_mode(struct comedi_device *dev,
- unsigned int chan)
-{
- return S626_GET_STD_ENCMODE(s626_get_mode(dev, chan));
-}
-
-/*
- * Return/set the index polarity.
- */
-static void s626_set_index_pol(struct comedi_device *dev,
- unsigned int chan, uint16_t value)
-{
- uint16_t mode;
-
- mode = s626_get_mode(dev, chan);
- mode &= ~S626_STDMSK_INDXPOL;
- mode |= S626_SET_STD_INDXPOL(value != 0);
-
- s626_set_mode(dev, chan, mode, false);
-}
-
-static uint16_t s626_get_index_pol(struct comedi_device *dev,
- unsigned int chan)
-{
- return S626_GET_STD_INDXPOL(s626_get_mode(dev, chan));
-}
-
-/*
- * Return/set the index source.
- */
-static void s626_set_index_src(struct comedi_device *dev,
- unsigned int chan, uint16_t value)
-{
- uint16_t mode;
-
- mode = s626_get_mode(dev, chan);
- mode &= ~S626_STDMSK_INDXSRC;
- mode |= S626_SET_STD_INDXSRC(value != 0);
-
- s626_set_mode(dev, chan, mode, false);
-}
-
-static uint16_t s626_get_index_src(struct comedi_device *dev,
- unsigned int chan)
-{
- return S626_GET_STD_INDXSRC(s626_get_mode(dev, chan));
-}
-#endif
-
-/*
- * Generate an index pulse.
- */
-static void s626_pulse_index(struct comedi_device *dev,
- unsigned int chan)
-{
- if (chan < 3) {
- uint16_t cra;
-
- cra = s626_debi_read(dev, S626_LP_CRA(chan));
-
- /* Pulse index */
- s626_debi_write(dev, S626_LP_CRA(chan),
- (cra ^ S626_CRAMSK_INDXPOL_A));
- s626_debi_write(dev, S626_LP_CRA(chan), cra);
- } else {
- uint16_t crb;
-
- crb = s626_debi_read(dev, S626_LP_CRB(chan));
- crb &= ~S626_CRBMSK_INTCTRL;
-
- /* Pulse index */
- s626_debi_write(dev, S626_LP_CRB(chan),
- (crb ^ S626_CRBMSK_INDXPOL_B));
- s626_debi_write(dev, S626_LP_CRB(chan), crb);
- }
-}
-
-static unsigned int s626_ai_reg_to_uint(unsigned int data)
-{
- return ((data >> 18) & 0x3fff) ^ 0x2000;
-}
-
-static int s626_dio_set_irq(struct comedi_device *dev, unsigned int chan)
-{
- unsigned int group = chan / 16;
- unsigned int mask = 1 << (chan - (16 * group));
- unsigned int status;
-
- /* set channel to capture positive edge */
- status = s626_debi_read(dev, S626_LP_RDEDGSEL(group));
- s626_debi_write(dev, S626_LP_WREDGSEL(group), mask | status);
-
- /* enable interrupt on selected channel */
- status = s626_debi_read(dev, S626_LP_RDINTSEL(group));
- s626_debi_write(dev, S626_LP_WRINTSEL(group), mask | status);
-
- /* enable edge capture write command */
- s626_debi_write(dev, S626_LP_MISC1, S626_MISC1_EDCAP);
-
- /* enable edge capture on selected channel */
- status = s626_debi_read(dev, S626_LP_RDCAPSEL(group));
- s626_debi_write(dev, S626_LP_WRCAPSEL(group), mask | status);
-
- return 0;
-}
-
-static int s626_dio_reset_irq(struct comedi_device *dev, unsigned int group,
- unsigned int mask)
-{
- /* disable edge capture write command */
- s626_debi_write(dev, S626_LP_MISC1, S626_MISC1_NOEDCAP);
-
- /* enable edge capture on selected channel */
- s626_debi_write(dev, S626_LP_WRCAPSEL(group), mask);
-
- return 0;
-}
-
-static int s626_dio_clear_irq(struct comedi_device *dev)
-{
- unsigned int group;
-
- /* disable edge capture write command */
- s626_debi_write(dev, S626_LP_MISC1, S626_MISC1_NOEDCAP);
-
- /* clear all dio pending events and interrupt */
- for (group = 0; group < S626_DIO_BANKS; group++)
- s626_debi_write(dev, S626_LP_WRCAPSEL(group), 0xffff);
-
- return 0;
-}
-
-static void s626_handle_dio_interrupt(struct comedi_device *dev,
- uint16_t irqbit, uint8_t group)
-{
- struct s626_private *devpriv = dev->private;
- struct comedi_subdevice *s = dev->read_subdev;
- struct comedi_cmd *cmd = &s->async->cmd;
-
- s626_dio_reset_irq(dev, group, irqbit);
-
- if (devpriv->ai_cmd_running) {
- /* check if interrupt is an ai acquisition start trigger */
- if ((irqbit >> (cmd->start_arg - (16 * group))) == 1 &&
- cmd->start_src == TRIG_EXT) {
- /* Start executing the RPS program */
- s626_mc_enable(dev, S626_MC1_ERPS1, S626_P_MC1);
-
- if (cmd->scan_begin_src == TRIG_EXT)
- s626_dio_set_irq(dev, cmd->scan_begin_arg);
- }
- if ((irqbit >> (cmd->scan_begin_arg - (16 * group))) == 1 &&
- cmd->scan_begin_src == TRIG_EXT) {
- /* Trigger ADC scan loop start */
- s626_mc_enable(dev, S626_MC2_ADC_RPS, S626_P_MC2);
-
- if (cmd->convert_src == TRIG_EXT) {
- devpriv->ai_convert_count = cmd->chanlist_len;
-
- s626_dio_set_irq(dev, cmd->convert_arg);
- }
-
- if (cmd->convert_src == TRIG_TIMER) {
- devpriv->ai_convert_count = cmd->chanlist_len;
- s626_set_enable(dev, 5, S626_CLKENAB_ALWAYS);
- }
- }
- if ((irqbit >> (cmd->convert_arg - (16 * group))) == 1 &&
- cmd->convert_src == TRIG_EXT) {
- /* Trigger ADC scan loop start */
- s626_mc_enable(dev, S626_MC2_ADC_RPS, S626_P_MC2);
-
- devpriv->ai_convert_count--;
- if (devpriv->ai_convert_count > 0)
- s626_dio_set_irq(dev, cmd->convert_arg);
- }
- }
-}
-
-static void s626_check_dio_interrupts(struct comedi_device *dev)
-{
- uint16_t irqbit;
- uint8_t group;
-
- for (group = 0; group < S626_DIO_BANKS; group++) {
- /* read interrupt type */
- irqbit = s626_debi_read(dev, S626_LP_RDCAPFLG(group));
-
- /* check if interrupt is generated from dio channels */
- if (irqbit) {
- s626_handle_dio_interrupt(dev, irqbit, group);
- return;
- }
- }
-}
-
-static void s626_check_counter_interrupts(struct comedi_device *dev)
-{
- struct s626_private *devpriv = dev->private;
- struct comedi_subdevice *s = dev->read_subdev;
- struct comedi_async *async = s->async;
- struct comedi_cmd *cmd = &async->cmd;
- uint16_t irqbit;
-
- /* read interrupt type */
- irqbit = s626_debi_read(dev, S626_LP_RDMISC2);
-
- /* check interrupt on counters */
- if (irqbit & S626_IRQ_COINT1A) {
- /* clear interrupt capture flag */
- s626_reset_cap_flags(dev, 0);
- }
- if (irqbit & S626_IRQ_COINT2A) {
- /* clear interrupt capture flag */
- s626_reset_cap_flags(dev, 1);
- }
- if (irqbit & S626_IRQ_COINT3A) {
- /* clear interrupt capture flag */
- s626_reset_cap_flags(dev, 2);
- }
- if (irqbit & S626_IRQ_COINT1B) {
- /* clear interrupt capture flag */
- s626_reset_cap_flags(dev, 3);
- }
- if (irqbit & S626_IRQ_COINT2B) {
- /* clear interrupt capture flag */
- s626_reset_cap_flags(dev, 4);
-
- if (devpriv->ai_convert_count > 0) {
- devpriv->ai_convert_count--;
- if (devpriv->ai_convert_count == 0)
- s626_set_enable(dev, 4, S626_CLKENAB_INDEX);
-
- if (cmd->convert_src == TRIG_TIMER) {
- /* Trigger ADC scan loop start */
- s626_mc_enable(dev, S626_MC2_ADC_RPS,
- S626_P_MC2);
- }
- }
- }
- if (irqbit & S626_IRQ_COINT3B) {
- /* clear interrupt capture flag */
- s626_reset_cap_flags(dev, 5);
-
- if (cmd->scan_begin_src == TRIG_TIMER) {
- /* Trigger ADC scan loop start */
- s626_mc_enable(dev, S626_MC2_ADC_RPS, S626_P_MC2);
- }
-
- if (cmd->convert_src == TRIG_TIMER) {
- devpriv->ai_convert_count = cmd->chanlist_len;
- s626_set_enable(dev, 4, S626_CLKENAB_ALWAYS);
- }
- }
-}
-
-static bool s626_handle_eos_interrupt(struct comedi_device *dev)
-{
- struct s626_private *devpriv = dev->private;
- struct comedi_subdevice *s = dev->read_subdev;
- struct comedi_async *async = s->async;
- struct comedi_cmd *cmd = &async->cmd;
- /*
- * Init ptr to DMA buffer that holds new ADC data. We skip the
- * first uint16_t in the buffer because it contains junk data
- * from the final ADC of the previous poll list scan.
- */
- uint32_t *readaddr = (uint32_t *)devpriv->ana_buf.logical_base + 1;
- int i;
-
- /* get the data and hand it over to comedi */
- for (i = 0; i < cmd->chanlist_len; i++) {
- unsigned short tempdata;
-
- /*
- * Convert ADC data to 16-bit integer values and copy
- * to application buffer.
- */
- tempdata = s626_ai_reg_to_uint(*readaddr);
- readaddr++;
-
- comedi_buf_write_samples(s, &tempdata, 1);
- }
-
- if (cmd->stop_src == TRIG_COUNT && async->scans_done >= cmd->stop_arg)
- async->events |= COMEDI_CB_EOA;
-
- if (async->events & COMEDI_CB_CANCEL_MASK)
- devpriv->ai_cmd_running = 0;
-
- if (devpriv->ai_cmd_running && cmd->scan_begin_src == TRIG_EXT)
- s626_dio_set_irq(dev, cmd->scan_begin_arg);
-
- comedi_handle_events(dev, s);
-
- return !devpriv->ai_cmd_running;
-}
-
-static irqreturn_t s626_irq_handler(int irq, void *d)
-{
- struct comedi_device *dev = d;
- unsigned long flags;
- uint32_t irqtype, irqstatus;
-
- if (!dev->attached)
- return IRQ_NONE;
- /* lock to avoid race with comedi_poll */
- spin_lock_irqsave(&dev->spinlock, flags);
-
- /* save interrupt enable register state */
- irqstatus = readl(dev->mmio + S626_P_IER);
-
- /* read interrupt type */
- irqtype = readl(dev->mmio + S626_P_ISR);
-
- /* disable master interrupt */
- writel(0, dev->mmio + S626_P_IER);
-
- /* clear interrupt */
- writel(irqtype, dev->mmio + S626_P_ISR);
-
- switch (irqtype) {
- case S626_IRQ_RPS1: /* end_of_scan occurs */
- if (s626_handle_eos_interrupt(dev))
- irqstatus = 0;
- break;
- case S626_IRQ_GPIO3: /* check dio and counter interrupt */
- /* s626_dio_clear_irq(dev); */
- s626_check_dio_interrupts(dev);
- s626_check_counter_interrupts(dev);
- break;
- }
-
- /* enable interrupt */
- writel(irqstatus, dev->mmio + S626_P_IER);
-
- spin_unlock_irqrestore(&dev->spinlock, flags);
- return IRQ_HANDLED;
-}
-
-/*
- * This function builds the RPS program for hardware driven acquisition.
- */
-static void s626_reset_adc(struct comedi_device *dev, uint8_t *ppl)
-{
- struct s626_private *devpriv = dev->private;
- struct comedi_subdevice *s = dev->read_subdev;
- struct comedi_cmd *cmd = &s->async->cmd;
- uint32_t *rps;
- uint32_t jmp_adrs;
- uint16_t i;
- uint16_t n;
- uint32_t local_ppl;
-
- /* Stop RPS program in case it is currently running */
- s626_mc_disable(dev, S626_MC1_ERPS1, S626_P_MC1);
-
- /* Set starting logical address to write RPS commands. */
- rps = (uint32_t *)devpriv->rps_buf.logical_base;
-
- /* Initialize RPS instruction pointer */
- writel((uint32_t)devpriv->rps_buf.physical_base,
- dev->mmio + S626_P_RPSADDR1);
-
- /* Construct RPS program in rps_buf DMA buffer */
- if (cmd->scan_begin_src != TRIG_FOLLOW) {
- /* Wait for Start trigger. */
- *rps++ = S626_RPS_PAUSE | S626_RPS_SIGADC;
- *rps++ = S626_RPS_CLRSIGNAL | S626_RPS_SIGADC;
- }
-
- /*
- * SAA7146 BUG WORKAROUND Do a dummy DEBI Write. This is necessary
- * because the first RPS DEBI Write following a non-RPS DEBI write
- * seems to always fail. If we don't do this dummy write, the ADC
- * gain might not be set to the value required for the first slot in
- * the poll list; the ADC gain would instead remain unchanged from
- * the previously programmed value.
- */
- /* Write DEBI Write command and address to shadow RAM. */
- *rps++ = S626_RPS_LDREG | (S626_P_DEBICMD >> 2);
- *rps++ = S626_DEBI_CMD_WRWORD | S626_LP_GSEL;
- *rps++ = S626_RPS_LDREG | (S626_P_DEBIAD >> 2);
- /* Write DEBI immediate data to shadow RAM: */
- *rps++ = S626_GSEL_BIPOLAR5V; /* arbitrary immediate data value. */
- *rps++ = S626_RPS_CLRSIGNAL | S626_RPS_DEBI;
- /* Reset "shadow RAM uploaded" flag. */
- /* Invoke shadow RAM upload. */
- *rps++ = S626_RPS_UPLOAD | S626_RPS_DEBI;
- /* Wait for shadow upload to finish. */
- *rps++ = S626_RPS_PAUSE | S626_RPS_DEBI;
-
- /*
- * Digitize all slots in the poll list. This is implemented as a
- * for loop to limit the slot count to 16 in case the application
- * forgot to set the S626_EOPL flag in the final slot.
- */
- for (devpriv->adc_items = 0; devpriv->adc_items < 16;
- devpriv->adc_items++) {
- /*
- * Convert application's poll list item to private board class
- * format. Each app poll list item is an uint8_t with form
- * (EOPL,x,x,RANGE,CHAN<3:0>), where RANGE code indicates 0 =
- * +-10V, 1 = +-5V, and EOPL = End of Poll List marker.
- */
- local_ppl = (*ppl << 8) | (*ppl & 0x10 ? S626_GSEL_BIPOLAR5V :
- S626_GSEL_BIPOLAR10V);
-
- /* Switch ADC analog gain. */
- /* Write DEBI command and address to shadow RAM. */
- *rps++ = S626_RPS_LDREG | (S626_P_DEBICMD >> 2);
- *rps++ = S626_DEBI_CMD_WRWORD | S626_LP_GSEL;
- /* Write DEBI immediate data to shadow RAM. */
- *rps++ = S626_RPS_LDREG | (S626_P_DEBIAD >> 2);
- *rps++ = local_ppl;
- /* Reset "shadow RAM uploaded" flag. */
- *rps++ = S626_RPS_CLRSIGNAL | S626_RPS_DEBI;
- /* Invoke shadow RAM upload. */
- *rps++ = S626_RPS_UPLOAD | S626_RPS_DEBI;
- /* Wait for shadow upload to finish. */
- *rps++ = S626_RPS_PAUSE | S626_RPS_DEBI;
- /* Select ADC analog input channel. */
- *rps++ = S626_RPS_LDREG | (S626_P_DEBICMD >> 2);
- /* Write DEBI command and address to shadow RAM. */
- *rps++ = S626_DEBI_CMD_WRWORD | S626_LP_ISEL;
- *rps++ = S626_RPS_LDREG | (S626_P_DEBIAD >> 2);
- /* Write DEBI immediate data to shadow RAM. */
- *rps++ = local_ppl;
- /* Reset "shadow RAM uploaded" flag. */
- *rps++ = S626_RPS_CLRSIGNAL | S626_RPS_DEBI;
- /* Invoke shadow RAM upload. */
- *rps++ = S626_RPS_UPLOAD | S626_RPS_DEBI;
- /* Wait for shadow upload to finish. */
- *rps++ = S626_RPS_PAUSE | S626_RPS_DEBI;
-
- /*
- * Delay at least 10 microseconds for analog input settling.
- * Instead of padding with NOPs, we use S626_RPS_JUMP
- * instructions here; this allows us to produce a longer delay
- * than is possible with NOPs because each S626_RPS_JUMP
- * flushes the RPS' instruction prefetch pipeline.
- */
- jmp_adrs =
- (uint32_t)devpriv->rps_buf.physical_base +
- (uint32_t)((unsigned long)rps -
- (unsigned long)devpriv->
- rps_buf.logical_base);
- for (i = 0; i < (10 * S626_RPSCLK_PER_US / 2); i++) {
- jmp_adrs += 8; /* Repeat to implement time delay: */
- /* Jump to next RPS instruction. */
- *rps++ = S626_RPS_JUMP;
- *rps++ = jmp_adrs;
- }
-
- if (cmd->convert_src != TRIG_NOW) {
- /* Wait for Start trigger. */
- *rps++ = S626_RPS_PAUSE | S626_RPS_SIGADC;
- *rps++ = S626_RPS_CLRSIGNAL | S626_RPS_SIGADC;
- }
- /* Start ADC by pulsing GPIO1. */
- /* Begin ADC Start pulse. */
- *rps++ = S626_RPS_LDREG | (S626_P_GPIO >> 2);
- *rps++ = S626_GPIO_BASE | S626_GPIO1_LO;
- *rps++ = S626_RPS_NOP;
- /* VERSION 2.03 CHANGE: STRETCH OUT ADC START PULSE. */
- /* End ADC Start pulse. */
- *rps++ = S626_RPS_LDREG | (S626_P_GPIO >> 2);
- *rps++ = S626_GPIO_BASE | S626_GPIO1_HI;
- /*
- * Wait for ADC to complete (GPIO2 is asserted high when ADC not
- * busy) and for data from previous conversion to shift into FB
- * BUFFER 1 register.
- */
- /* Wait for ADC done. */
- *rps++ = S626_RPS_PAUSE | S626_RPS_GPIO2;
-
- /* Transfer ADC data from FB BUFFER 1 register to DMA buffer. */
- *rps++ = S626_RPS_STREG |
- (S626_BUGFIX_STREG(S626_P_FB_BUFFER1) >> 2);
- *rps++ = (uint32_t)devpriv->ana_buf.physical_base +
- (devpriv->adc_items << 2);
-
- /*
- * If this slot's EndOfPollList flag is set, all channels have
- * now been processed.
- */
- if (*ppl++ & S626_EOPL) {
- devpriv->adc_items++; /* Adjust poll list item count. */
- break; /* Exit poll list processing loop. */
- }
- }
-
- /*
- * VERSION 2.01 CHANGE: DELAY CHANGED FROM 250NS to 2US. Allow the
- * ADC to stabilize for 2 microseconds before starting the final
- * (dummy) conversion. This delay is necessary to allow sufficient
- * time between last conversion finished and the start of the dummy
- * conversion. Without this delay, the last conversion's data value
- * is sometimes set to the previous conversion's data value.
- */
- for (n = 0; n < (2 * S626_RPSCLK_PER_US); n++)
- *rps++ = S626_RPS_NOP;
-
- /*
- * Start a dummy conversion to cause the data from the last
- * conversion of interest to be shifted in.
- */
- /* Begin ADC Start pulse. */
- *rps++ = S626_RPS_LDREG | (S626_P_GPIO >> 2);
- *rps++ = S626_GPIO_BASE | S626_GPIO1_LO;
- *rps++ = S626_RPS_NOP;
- /* VERSION 2.03 CHANGE: STRETCH OUT ADC START PULSE. */
- *rps++ = S626_RPS_LDREG | (S626_P_GPIO >> 2); /* End ADC Start pulse. */
- *rps++ = S626_GPIO_BASE | S626_GPIO1_HI;
-
- /*
- * Wait for the data from the last conversion of interest to arrive
- * in FB BUFFER 1 register.
- */
- *rps++ = S626_RPS_PAUSE | S626_RPS_GPIO2; /* Wait for ADC done. */
-
- /* Transfer final ADC data from FB BUFFER 1 register to DMA buffer. */
- *rps++ = S626_RPS_STREG | (S626_BUGFIX_STREG(S626_P_FB_BUFFER1) >> 2);
- *rps++ = (uint32_t)devpriv->ana_buf.physical_base +
- (devpriv->adc_items << 2);
-
- /* Indicate ADC scan loop is finished. */
- /* Signal ReadADC() that scan is done. */
- /* *rps++= S626_RPS_CLRSIGNAL | S626_RPS_SIGADC; */
-
- /* invoke interrupt */
- if (devpriv->ai_cmd_running == 1)
- *rps++ = S626_RPS_IRQ;
-
- /* Restart RPS program at its beginning. */
- *rps++ = S626_RPS_JUMP; /* Branch to start of RPS program. */
- *rps++ = (uint32_t)devpriv->rps_buf.physical_base;
-
- /* End of RPS program build */
-}
-
-#ifdef unused_code
-static int s626_ai_rinsn(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct s626_private *devpriv = dev->private;
- uint8_t i;
- int32_t *readaddr;
-
- /* Trigger ADC scan loop start */
- s626_mc_enable(dev, S626_MC2_ADC_RPS, S626_P_MC2);
-
- /* Wait until ADC scan loop is finished (RPS Signal 0 reset) */
- while (s626_mc_test(dev, S626_MC2_ADC_RPS, S626_P_MC2))
- ;
-
- /*
- * Init ptr to DMA buffer that holds new ADC data. We skip the
- * first uint16_t in the buffer because it contains junk data from
- * the final ADC of the previous poll list scan.
- */
- readaddr = (uint32_t *)devpriv->ana_buf.logical_base + 1;
-
- /*
- * Convert ADC data to 16-bit integer values and
- * copy to application buffer.
- */
- for (i = 0; i < devpriv->adc_items; i++) {
- *data = s626_ai_reg_to_uint(*readaddr++);
- data++;
- }
-
- return i;
-}
-#endif
-
-static int s626_ai_eoc(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned long context)
-{
- unsigned int status;
-
- status = readl(dev->mmio + S626_P_PSR);
- if (status & S626_PSR_GPIO2)
- return 0;
- return -EBUSY;
-}
-
-static int s626_ai_insn_read(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- uint16_t chan = CR_CHAN(insn->chanspec);
- uint16_t range = CR_RANGE(insn->chanspec);
- uint16_t adc_spec = 0;
- uint32_t gpio_image;
- uint32_t tmp;
- int ret;
- int n;
-
- /*
- * Convert application's ADC specification into form
- * appropriate for register programming.
- */
- if (range == 0)
- adc_spec = (chan << 8) | (S626_GSEL_BIPOLAR5V);
- else
- adc_spec = (chan << 8) | (S626_GSEL_BIPOLAR10V);
-
- /* Switch ADC analog gain. */
- s626_debi_write(dev, S626_LP_GSEL, adc_spec); /* Set gain. */
-
- /* Select ADC analog input channel. */
- s626_debi_write(dev, S626_LP_ISEL, adc_spec); /* Select channel. */
-
- for (n = 0; n < insn->n; n++) {
- /* Delay 10 microseconds for analog input settling. */
- udelay(10);
-
- /* Start ADC by pulsing GPIO1 low */
- gpio_image = readl(dev->mmio + S626_P_GPIO);
- /* Assert ADC Start command */
- writel(gpio_image & ~S626_GPIO1_HI, dev->mmio + S626_P_GPIO);
- /* and stretch it out */
- writel(gpio_image & ~S626_GPIO1_HI, dev->mmio + S626_P_GPIO);
- writel(gpio_image & ~S626_GPIO1_HI, dev->mmio + S626_P_GPIO);
- /* Negate ADC Start command */
- writel(gpio_image | S626_GPIO1_HI, dev->mmio + S626_P_GPIO);
-
- /*
- * Wait for ADC to complete (GPIO2 is asserted high when
- * ADC not busy) and for data from previous conversion to
- * shift into FB BUFFER 1 register.
- */
-
- /* Wait for ADC done */
- ret = comedi_timeout(dev, s, insn, s626_ai_eoc, 0);
- if (ret)
- return ret;
-
- /* Fetch ADC data */
- if (n != 0) {
- tmp = readl(dev->mmio + S626_P_FB_BUFFER1);
- data[n - 1] = s626_ai_reg_to_uint(tmp);
- }
-
- /*
- * Allow the ADC to stabilize for 4 microseconds before
- * starting the next (final) conversion. This delay is
- * necessary to allow sufficient time between last
- * conversion finished and the start of the next
- * conversion. Without this delay, the last conversion's
- * data value is sometimes set to the previous
- * conversion's data value.
- */
- udelay(4);
- }
-
- /*
- * Start a dummy conversion to cause the data from the
- * previous conversion to be shifted in.
- */
- gpio_image = readl(dev->mmio + S626_P_GPIO);
- /* Assert ADC Start command */
- writel(gpio_image & ~S626_GPIO1_HI, dev->mmio + S626_P_GPIO);
- /* and stretch it out */
- writel(gpio_image & ~S626_GPIO1_HI, dev->mmio + S626_P_GPIO);
- writel(gpio_image & ~S626_GPIO1_HI, dev->mmio + S626_P_GPIO);
- /* Negate ADC Start command */
- writel(gpio_image | S626_GPIO1_HI, dev->mmio + S626_P_GPIO);
-
- /* Wait for the data to arrive in FB BUFFER 1 register. */
-
- /* Wait for ADC done */
- ret = comedi_timeout(dev, s, insn, s626_ai_eoc, 0);
- if (ret)
- return ret;
-
- /* Fetch ADC data from audio interface's input shift register. */
-
- /* Fetch ADC data */
- if (n != 0) {
- tmp = readl(dev->mmio + S626_P_FB_BUFFER1);
- data[n - 1] = s626_ai_reg_to_uint(tmp);
- }
-
- return n;
-}
-
-static int s626_ai_load_polllist(uint8_t *ppl, struct comedi_cmd *cmd)
-{
- int n;
-
- for (n = 0; n < cmd->chanlist_len; n++) {
- if (CR_RANGE(cmd->chanlist[n]) == 0)
- ppl[n] = CR_CHAN(cmd->chanlist[n]) | S626_RANGE_5V;
- else
- ppl[n] = CR_CHAN(cmd->chanlist[n]) | S626_RANGE_10V;
- }
- if (n != 0)
- ppl[n - 1] |= S626_EOPL;
-
- return n;
-}
-
-static int s626_ai_inttrig(struct comedi_device *dev,
- struct comedi_subdevice *s,
- unsigned int trig_num)
-{
- struct comedi_cmd *cmd = &s->async->cmd;
-
- if (trig_num != cmd->start_arg)
- return -EINVAL;
-
- /* Start executing the RPS program */
- s626_mc_enable(dev, S626_MC1_ERPS1, S626_P_MC1);
-
- s->async->inttrig = NULL;
-
- return 1;
-}
-
-/*
- * This function doesn't require a particular form, this is just what
- * happens to be used in some of the drivers. It should convert ns
- * nanoseconds to a counter value suitable for programming the device.
- * Also, it should adjust ns so that it cooresponds to the actual time
- * that the device will use.
- */
-static int s626_ns_to_timer(unsigned int *nanosec, unsigned int flags)
-{
- int divider, base;
-
- base = 500; /* 2MHz internal clock */
-
- switch (flags & CMDF_ROUND_MASK) {
- case CMDF_ROUND_NEAREST:
- default:
- divider = DIV_ROUND_CLOSEST(*nanosec, base);
- break;
- case CMDF_ROUND_DOWN:
- divider = (*nanosec) / base;
- break;
- case CMDF_ROUND_UP:
- divider = DIV_ROUND_UP(*nanosec, base);
- break;
- }
-
- *nanosec = base * divider;
- return divider - 1;
-}
-
-static void s626_timer_load(struct comedi_device *dev,
- unsigned int chan, int tick)
-{
- uint16_t setup =
- /* Preload upon index. */
- S626_SET_STD_LOADSRC(S626_LOADSRC_INDX) |
- /* Disable hardware index. */
- S626_SET_STD_INDXSRC(S626_INDXSRC_SOFT) |
- /* Operating mode is Timer. */
- S626_SET_STD_ENCMODE(S626_ENCMODE_TIMER) |
- /* Count direction is Down. */
- S626_SET_STD_CLKPOL(S626_CNTDIR_DOWN) |
- /* Clock multiplier is 1x. */
- S626_SET_STD_CLKMULT(S626_CLKMULT_1X) |
- /* Enabled by index */
- S626_SET_STD_CLKENAB(S626_CLKENAB_INDEX);
- uint16_t value_latchsrc = S626_LATCHSRC_A_INDXA;
- /* uint16_t enab = S626_CLKENAB_ALWAYS; */
-
- s626_set_mode(dev, chan, setup, false);
-
- /* Set the preload register */
- s626_preload(dev, chan, tick);
-
- /*
- * Software index pulse forces the preload register to load
- * into the counter
- */
- s626_set_load_trig(dev, chan, 0);
- s626_pulse_index(dev, chan);
-
- /* set reload on counter overflow */
- s626_set_load_trig(dev, chan, 1);
-
- /* set interrupt on overflow */
- s626_set_int_src(dev, chan, S626_INTSRC_OVER);
-
- s626_set_latch_source(dev, chan, value_latchsrc);
- /* s626_set_enable(dev, chan, (uint16_t)(enab != 0)); */
-}
-
-/* TO COMPLETE */
-static int s626_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
-{
- struct s626_private *devpriv = dev->private;
- uint8_t ppl[16];
- struct comedi_cmd *cmd = &s->async->cmd;
- int tick;
-
- if (devpriv->ai_cmd_running) {
- dev_err(dev->class_dev,
- "s626_ai_cmd: Another ai_cmd is running\n");
- return -EBUSY;
- }
- /* disable interrupt */
- writel(0, dev->mmio + S626_P_IER);
-
- /* clear interrupt request */
- writel(S626_IRQ_RPS1 | S626_IRQ_GPIO3, dev->mmio + S626_P_ISR);
-
- /* clear any pending interrupt */
- s626_dio_clear_irq(dev);
- /* s626_enc_clear_irq(dev); */
-
- /* reset ai_cmd_running flag */
- devpriv->ai_cmd_running = 0;
-
- s626_ai_load_polllist(ppl, cmd);
- devpriv->ai_cmd_running = 1;
- devpriv->ai_convert_count = 0;
-
- switch (cmd->scan_begin_src) {
- case TRIG_FOLLOW:
- break;
- case TRIG_TIMER:
- /*
- * set a counter to generate adc trigger at scan_begin_arg
- * interval
- */
- tick = s626_ns_to_timer(&cmd->scan_begin_arg, cmd->flags);
-
- /* load timer value and enable interrupt */
- s626_timer_load(dev, 5, tick);
- s626_set_enable(dev, 5, S626_CLKENAB_ALWAYS);
- break;
- case TRIG_EXT:
- /* set the digital line and interrupt for scan trigger */
- if (cmd->start_src != TRIG_EXT)
- s626_dio_set_irq(dev, cmd->scan_begin_arg);
- break;
- }
-
- switch (cmd->convert_src) {
- case TRIG_NOW:
- break;
- case TRIG_TIMER:
- /*
- * set a counter to generate adc trigger at convert_arg
- * interval
- */
- tick = s626_ns_to_timer(&cmd->convert_arg, cmd->flags);
-
- /* load timer value and enable interrupt */
- s626_timer_load(dev, 4, tick);
- s626_set_enable(dev, 4, S626_CLKENAB_INDEX);
- break;
- case TRIG_EXT:
- /* set the digital line and interrupt for convert trigger */
- if (cmd->scan_begin_src != TRIG_EXT &&
- cmd->start_src == TRIG_EXT)
- s626_dio_set_irq(dev, cmd->convert_arg);
- break;
- }
-
- s626_reset_adc(dev, ppl);
-
- switch (cmd->start_src) {
- case TRIG_NOW:
- /* Trigger ADC scan loop start */
- /* s626_mc_enable(dev, S626_MC2_ADC_RPS, S626_P_MC2); */
-
- /* Start executing the RPS program */
- s626_mc_enable(dev, S626_MC1_ERPS1, S626_P_MC1);
- s->async->inttrig = NULL;
- break;
- case TRIG_EXT:
- /* configure DIO channel for acquisition trigger */
- s626_dio_set_irq(dev, cmd->start_arg);
- s->async->inttrig = NULL;
- break;
- case TRIG_INT:
- s->async->inttrig = s626_ai_inttrig;
- break;
- }
-
- /* enable interrupt */
- writel(S626_IRQ_GPIO3 | S626_IRQ_RPS1, dev->mmio + S626_P_IER);
-
- return 0;
-}
-
-static int s626_ai_cmdtest(struct comedi_device *dev,
- struct comedi_subdevice *s, struct comedi_cmd *cmd)
-{
- int err = 0;
- unsigned int arg;
-
- /* Step 1 : check if triggers are trivially valid */
-
- err |= comedi_check_trigger_src(&cmd->start_src,
- TRIG_NOW | TRIG_INT | TRIG_EXT);
- err |= comedi_check_trigger_src(&cmd->scan_begin_src,
- TRIG_TIMER | TRIG_EXT | TRIG_FOLLOW);
- err |= comedi_check_trigger_src(&cmd->convert_src,
- TRIG_TIMER | TRIG_EXT | TRIG_NOW);
- err |= comedi_check_trigger_src(&cmd->scan_end_src, TRIG_COUNT);
- err |= comedi_check_trigger_src(&cmd->stop_src, TRIG_COUNT | TRIG_NONE);
-
- if (err)
- return 1;
-
- /* Step 2a : make sure trigger sources are unique */
-
- err |= comedi_check_trigger_is_unique(cmd->start_src);
- err |= comedi_check_trigger_is_unique(cmd->scan_begin_src);
- err |= comedi_check_trigger_is_unique(cmd->convert_src);
- err |= comedi_check_trigger_is_unique(cmd->stop_src);
-
- /* Step 2b : and mutually compatible */
-
- if (err)
- return 2;
-
- /* Step 3: check if arguments are trivially valid */
-
- switch (cmd->start_src) {
- case TRIG_NOW:
- case TRIG_INT:
- err |= comedi_check_trigger_arg_is(&cmd->start_arg, 0);
- break;
- case TRIG_EXT:
- err |= comedi_check_trigger_arg_max(&cmd->start_arg, 39);
- break;
- }
-
- if (cmd->scan_begin_src == TRIG_EXT)
- err |= comedi_check_trigger_arg_max(&cmd->scan_begin_arg, 39);
- if (cmd->convert_src == TRIG_EXT)
- err |= comedi_check_trigger_arg_max(&cmd->convert_arg, 39);
-
-#define S626_MAX_SPEED 200000 /* in nanoseconds */
-#define S626_MIN_SPEED 2000000000 /* in nanoseconds */
-
- if (cmd->scan_begin_src == TRIG_TIMER) {
- err |= comedi_check_trigger_arg_min(&cmd->scan_begin_arg,
- S626_MAX_SPEED);
- err |= comedi_check_trigger_arg_max(&cmd->scan_begin_arg,
- S626_MIN_SPEED);
- } else {
- /*
- * external trigger
- * should be level/edge, hi/lo specification here
- * should specify multiple external triggers
- * err |= comedi_check_trigger_arg_max(&cmd->scan_begin_arg, 9);
- */
- }
- if (cmd->convert_src == TRIG_TIMER) {
- err |= comedi_check_trigger_arg_min(&cmd->convert_arg,
- S626_MAX_SPEED);
- err |= comedi_check_trigger_arg_max(&cmd->convert_arg,
- S626_MIN_SPEED);
- } else {
- /*
- * external trigger - see above
- * err |= comedi_check_trigger_arg_max(&cmd->scan_begin_arg, 9);
- */
- }
-
- err |= comedi_check_trigger_arg_is(&cmd->scan_end_arg,
- cmd->chanlist_len);
-
- if (cmd->stop_src == TRIG_COUNT)
- err |= comedi_check_trigger_arg_min(&cmd->stop_arg, 1);
- else /* TRIG_NONE */
- err |= comedi_check_trigger_arg_is(&cmd->stop_arg, 0);
-
- if (err)
- return 3;
-
- /* step 4: fix up any arguments */
-
- if (cmd->scan_begin_src == TRIG_TIMER) {
- arg = cmd->scan_begin_arg;
- s626_ns_to_timer(&arg, cmd->flags);
- err |= comedi_check_trigger_arg_is(&cmd->scan_begin_arg, arg);
- }
-
- if (cmd->convert_src == TRIG_TIMER) {
- arg = cmd->convert_arg;
- s626_ns_to_timer(&arg, cmd->flags);
- err |= comedi_check_trigger_arg_is(&cmd->convert_arg, arg);
-
- if (cmd->scan_begin_src == TRIG_TIMER) {
- arg = cmd->convert_arg * cmd->scan_end_arg;
- err |= comedi_check_trigger_arg_min(&cmd->
- scan_begin_arg,
- arg);
- }
- }
-
- if (err)
- return 4;
-
- return 0;
-}
-
-static int s626_ai_cancel(struct comedi_device *dev, struct comedi_subdevice *s)
-{
- struct s626_private *devpriv = dev->private;
-
- /* Stop RPS program in case it is currently running */
- s626_mc_disable(dev, S626_MC1_ERPS1, S626_P_MC1);
-
- /* disable master interrupt */
- writel(0, dev->mmio + S626_P_IER);
-
- devpriv->ai_cmd_running = 0;
-
- return 0;
-}
-
-static int s626_ao_insn_write(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- unsigned int chan = CR_CHAN(insn->chanspec);
- int i;
-
- for (i = 0; i < insn->n; i++) {
- int16_t dacdata = (int16_t)data[i];
- int ret;
-
- dacdata -= (0x1fff);
-
- ret = s626_set_dac(dev, chan, dacdata);
- if (ret)
- return ret;
-
- s->readback[chan] = data[i];
- }
-
- return insn->n;
-}
-
-/* *************** DIGITAL I/O FUNCTIONS *************** */
-
-/*
- * All DIO functions address a group of DIO channels by means of
- * "group" argument. group may be 0, 1 or 2, which correspond to DIO
- * ports A, B and C, respectively.
- */
-
-static void s626_dio_init(struct comedi_device *dev)
-{
- uint16_t group;
-
- /* Prepare to treat writes to WRCapSel as capture disables. */
- s626_debi_write(dev, S626_LP_MISC1, S626_MISC1_NOEDCAP);
-
- /* For each group of sixteen channels ... */
- for (group = 0; group < S626_DIO_BANKS; group++) {
- /* Disable all interrupts */
- s626_debi_write(dev, S626_LP_WRINTSEL(group), 0);
- /* Disable all event captures */
- s626_debi_write(dev, S626_LP_WRCAPSEL(group), 0xffff);
- /* Init all DIOs to default edge polarity */
- s626_debi_write(dev, S626_LP_WREDGSEL(group), 0);
- /* Program all outputs to inactive state */
- s626_debi_write(dev, S626_LP_WRDOUT(group), 0);
- }
-}
-
-static int s626_dio_insn_bits(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- unsigned long group = (unsigned long)s->private;
-
- if (comedi_dio_update_state(s, data))
- s626_debi_write(dev, S626_LP_WRDOUT(group), s->state);
-
- data[1] = s626_debi_read(dev, S626_LP_RDDIN(group));
-
- return insn->n;
-}
-
-static int s626_dio_insn_config(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- unsigned long group = (unsigned long)s->private;
- int ret;
-
- ret = comedi_dio_insn_config(dev, s, insn, data, 0);
- if (ret)
- return ret;
-
- s626_debi_write(dev, S626_LP_WRDOUT(group), s->io_bits);
-
- return insn->n;
-}
-
-/*
- * Now this function initializes the value of the counter (data[0])
- * and set the subdevice. To complete with trigger and interrupt
- * configuration.
- *
- * FIXME: data[0] is supposed to be an INSN_CONFIG_xxx constant indicating
- * what is being configured, but this function appears to be using data[0]
- * as a variable.
- */
-static int s626_enc_insn_config(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
-{
- unsigned int chan = CR_CHAN(insn->chanspec);
- uint16_t setup =
- /* Preload upon index. */
- S626_SET_STD_LOADSRC(S626_LOADSRC_INDX) |
- /* Disable hardware index. */
- S626_SET_STD_INDXSRC(S626_INDXSRC_SOFT) |
- /* Operating mode is Counter. */
- S626_SET_STD_ENCMODE(S626_ENCMODE_COUNTER) |
- /* Active high clock. */
- S626_SET_STD_CLKPOL(S626_CLKPOL_POS) |
- /* Clock multiplier is 1x. */
- S626_SET_STD_CLKMULT(S626_CLKMULT_1X) |
- /* Enabled by index */
- S626_SET_STD_CLKENAB(S626_CLKENAB_INDEX);
- /* uint16_t disable_int_src = true; */
- /* uint32_t Preloadvalue; //Counter initial value */
- uint16_t value_latchsrc = S626_LATCHSRC_AB_READ;
- uint16_t enab = S626_CLKENAB_ALWAYS;
-
- /* (data==NULL) ? (Preloadvalue=0) : (Preloadvalue=data[0]); */
-
- s626_set_mode(dev, chan, setup, true);
- s626_preload(dev, chan, data[0]);
- s626_pulse_index(dev, chan);
- s626_set_latch_source(dev, chan, value_latchsrc);
- s626_set_enable(dev, chan, (enab != 0));
-
- return insn->n;
-}
-
-static int s626_enc_insn_read(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- unsigned int chan = CR_CHAN(insn->chanspec);
- uint16_t cntr_latch_reg = S626_LP_CNTR(chan);
- int i;
-
- for (i = 0; i < insn->n; i++) {
- unsigned int val;
-
- /*
- * Read the counter's output latch LSW/MSW.
- * Latches on LSW read.
- */
- val = s626_debi_read(dev, cntr_latch_reg);
- val |= (s626_debi_read(dev, cntr_latch_reg + 2) << 16);
- data[i] = val;
- }
-
- return insn->n;
-}
-
-static int s626_enc_insn_write(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
-{
- unsigned int chan = CR_CHAN(insn->chanspec);
-
- /* Set the preload register */
- s626_preload(dev, chan, data[0]);
-
- /*
- * Software index pulse forces the preload register to load
- * into the counter
- */
- s626_set_load_trig(dev, chan, 0);
- s626_pulse_index(dev, chan);
- s626_set_load_trig(dev, chan, 2);
-
- return 1;
-}
-
-static void s626_write_misc2(struct comedi_device *dev, uint16_t new_image)
-{
- s626_debi_write(dev, S626_LP_MISC1, S626_MISC1_WENABLE);
- s626_debi_write(dev, S626_LP_WRMISC2, new_image);
- s626_debi_write(dev, S626_LP_MISC1, S626_MISC1_WDISABLE);
-}
-
-static void s626_counters_init(struct comedi_device *dev)
-{
- int chan;
- uint16_t setup =
- /* Preload upon index. */
- S626_SET_STD_LOADSRC(S626_LOADSRC_INDX) |
- /* Disable hardware index. */
- S626_SET_STD_INDXSRC(S626_INDXSRC_SOFT) |
- /* Operating mode is counter. */
- S626_SET_STD_ENCMODE(S626_ENCMODE_COUNTER) |
- /* Active high clock. */
- S626_SET_STD_CLKPOL(S626_CLKPOL_POS) |
- /* Clock multiplier is 1x. */
- S626_SET_STD_CLKMULT(S626_CLKMULT_1X) |
- /* Enabled by index */
- S626_SET_STD_CLKENAB(S626_CLKENAB_INDEX);
-
- /*
- * Disable all counter interrupts and clear any captured counter events.
- */
- for (chan = 0; chan < S626_ENCODER_CHANNELS; chan++) {
- s626_set_mode(dev, chan, setup, true);
- s626_set_int_src(dev, chan, 0);
- s626_reset_cap_flags(dev, chan);
- s626_set_enable(dev, chan, S626_CLKENAB_ALWAYS);
- }
-}
-
-static int s626_allocate_dma_buffers(struct comedi_device *dev)
-{
- struct pci_dev *pcidev = comedi_to_pci_dev(dev);
- struct s626_private *devpriv = dev->private;
- void *addr;
- dma_addr_t appdma;
-
- addr = pci_alloc_consistent(pcidev, S626_DMABUF_SIZE, &appdma);
- if (!addr)
- return -ENOMEM;
- devpriv->ana_buf.logical_base = addr;
- devpriv->ana_buf.physical_base = appdma;
-
- addr = pci_alloc_consistent(pcidev, S626_DMABUF_SIZE, &appdma);
- if (!addr)
- return -ENOMEM;
- devpriv->rps_buf.logical_base = addr;
- devpriv->rps_buf.physical_base = appdma;
-
- return 0;
-}
-
-static void s626_free_dma_buffers(struct comedi_device *dev)
-{
- struct pci_dev *pcidev = comedi_to_pci_dev(dev);
- struct s626_private *devpriv = dev->private;
-
- if (!devpriv)
- return;
-
- if (devpriv->rps_buf.logical_base)
- pci_free_consistent(pcidev, S626_DMABUF_SIZE,
- devpriv->rps_buf.logical_base,
- devpriv->rps_buf.physical_base);
- if (devpriv->ana_buf.logical_base)
- pci_free_consistent(pcidev, S626_DMABUF_SIZE,
- devpriv->ana_buf.logical_base,
- devpriv->ana_buf.physical_base);
-}
-
-static int s626_initialize(struct comedi_device *dev)
-{
- struct s626_private *devpriv = dev->private;
- dma_addr_t phys_buf;
- uint16_t chan;
- int i;
- int ret;
-
- /* Enable DEBI and audio pins, enable I2C interface */
- s626_mc_enable(dev, S626_MC1_DEBI | S626_MC1_AUDIO | S626_MC1_I2C,
- S626_P_MC1);
-
- /*
- * Configure DEBI operating mode
- *
- * Local bus is 16 bits wide
- * Declare DEBI transfer timeout interval
- * Set up byte lane steering
- * Intel-compatible local bus (DEBI never times out)
- */
- writel(S626_DEBI_CFG_SLAVE16 |
- (S626_DEBI_TOUT << S626_DEBI_CFG_TOUT_BIT) | S626_DEBI_SWAP |
- S626_DEBI_CFG_INTEL, dev->mmio + S626_P_DEBICFG);
-
- /* Disable MMU paging */
- writel(S626_DEBI_PAGE_DISABLE, dev->mmio + S626_P_DEBIPAGE);
-
- /* Init GPIO so that ADC Start* is negated */
- writel(S626_GPIO_BASE | S626_GPIO1_HI, dev->mmio + S626_P_GPIO);
-
- /* I2C device address for onboard eeprom (revb) */
- devpriv->i2c_adrs = 0xA0;
-
- /*
- * Issue an I2C ABORT command to halt any I2C
- * operation in progress and reset BUSY flag.
- */
- writel(S626_I2C_CLKSEL | S626_I2C_ABORT,
- dev->mmio + S626_P_I2CSTAT);
- s626_mc_enable(dev, S626_MC2_UPLD_IIC, S626_P_MC2);
- ret = comedi_timeout(dev, NULL, NULL, s626_i2c_handshake_eoc, 0);
- if (ret)
- return ret;
-
- /*
- * Per SAA7146 data sheet, write to STATUS
- * reg twice to reset all I2C error flags.
- */
- for (i = 0; i < 2; i++) {
- writel(S626_I2C_CLKSEL, dev->mmio + S626_P_I2CSTAT);
- s626_mc_enable(dev, S626_MC2_UPLD_IIC, S626_P_MC2);
- ret = comedi_timeout(dev, NULL, NULL, s626_i2c_handshake_eoc, 0);
- if (ret)
- return ret;
- }
-
- /*
- * Init audio interface functional attributes: set DAC/ADC
- * serial clock rates, invert DAC serial clock so that
- * DAC data setup times are satisfied, enable DAC serial
- * clock out.
- */
- writel(S626_ACON2_INIT, dev->mmio + S626_P_ACON2);
-
- /*
- * Set up TSL1 slot list, which is used to control the
- * accumulation of ADC data: S626_RSD1 = shift data in on SD1.
- * S626_SIB_A1 = store data uint8_t at next available location
- * in FB BUFFER1 register.
- */
- writel(S626_RSD1 | S626_SIB_A1, dev->mmio + S626_P_TSL1);
- writel(S626_RSD1 | S626_SIB_A1 | S626_EOS,
- dev->mmio + S626_P_TSL1 + 4);
-
- /* Enable TSL1 slot list so that it executes all the time */
- writel(S626_ACON1_ADCSTART, dev->mmio + S626_P_ACON1);
-
- /*
- * Initialize RPS registers used for ADC
- */
-
- /* Physical start of RPS program */
- writel((uint32_t)devpriv->rps_buf.physical_base,
- dev->mmio + S626_P_RPSADDR1);
- /* RPS program performs no explicit mem writes */
- writel(0, dev->mmio + S626_P_RPSPAGE1);
- /* Disable RPS timeouts */
- writel(0, dev->mmio + S626_P_RPS1_TOUT);
-
-#if 0
- /*
- * SAA7146 BUG WORKAROUND
- *
- * Initialize SAA7146 ADC interface to a known state by
- * invoking ADCs until FB BUFFER 1 register shows that it
- * is correctly receiving ADC data. This is necessary
- * because the SAA7146 ADC interface does not start up in
- * a defined state after a PCI reset.
- */
- {
- struct comedi_subdevice *s = dev->read_subdev;
- uint8_t poll_list;
- uint16_t adc_data;
- uint16_t start_val;
- uint16_t index;
- unsigned int data[16];
-
- /* Create a simple polling list for analog input channel 0 */
- poll_list = S626_EOPL;
- s626_reset_adc(dev, &poll_list);
-
- /* Get initial ADC value */
- s626_ai_rinsn(dev, s, NULL, data);
- start_val = data[0];
-
- /*
- * VERSION 2.01 CHANGE: TIMEOUT ADDED TO PREVENT HANGED
- * EXECUTION.
- *
- * Invoke ADCs until the new ADC value differs from the initial
- * value or a timeout occurs. The timeout protects against the
- * possibility that the driver is restarting and the ADC data is
- * a fixed value resulting from the applied ADC analog input
- * being unusually quiet or at the rail.
- */
- for (index = 0; index < 500; index++) {
- s626_ai_rinsn(dev, s, NULL, data);
- adc_data = data[0];
- if (adc_data != start_val)
- break;
- }
- }
-#endif /* SAA7146 BUG WORKAROUND */
-
- /*
- * Initialize the DAC interface
- */
-
- /*
- * Init Audio2's output DMAC attributes:
- * burst length = 1 DWORD
- * threshold = 1 DWORD.
- */
- writel(0, dev->mmio + S626_P_PCI_BT_A);
-
- /*
- * Init Audio2's output DMA physical addresses. The protection
- * address is set to 1 DWORD past the base address so that a
- * single DWORD will be transferred each time a DMA transfer is
- * enabled.
- */
- phys_buf = devpriv->ana_buf.physical_base +
- (S626_DAC_WDMABUF_OS * sizeof(uint32_t));
- writel((uint32_t)phys_buf, dev->mmio + S626_P_BASEA2_OUT);
- writel((uint32_t)(phys_buf + sizeof(uint32_t)),
- dev->mmio + S626_P_PROTA2_OUT);
-
- /*
- * Cache Audio2's output DMA buffer logical address. This is
- * where DAC data is buffered for A2 output DMA transfers.
- */
- devpriv->dac_wbuf = (uint32_t *)devpriv->ana_buf.logical_base +
- S626_DAC_WDMABUF_OS;
-
- /*
- * Audio2's output channels does not use paging. The
- * protection violation handling bit is set so that the
- * DMAC will automatically halt and its PCI address pointer
- * will be reset when the protection address is reached.
- */
- writel(8, dev->mmio + S626_P_PAGEA2_OUT);
-
- /*
- * Initialize time slot list 2 (TSL2), which is used to control
- * the clock generation for and serialization of data to be sent
- * to the DAC devices. Slot 0 is a NOP that is used to trap TSL
- * execution; this permits other slots to be safely modified
- * without first turning off the TSL sequencer (which is
- * apparently impossible to do). Also, SD3 (which is driven by a
- * pull-up resistor) is shifted in and stored to the MSB of
- * FB_BUFFER2 to be used as evidence that the slot sequence has
- * not yet finished executing.
- */
-
- /* Slot 0: Trap TSL execution, shift 0xFF into FB_BUFFER2 */
- writel(S626_XSD2 | S626_RSD3 | S626_SIB_A2 | S626_EOS,
- dev->mmio + S626_VECTPORT(0));
-
- /*
- * Initialize slot 1, which is constant. Slot 1 causes a
- * DWORD to be transferred from audio channel 2's output FIFO
- * to the FIFO's output buffer so that it can be serialized
- * and sent to the DAC during subsequent slots. All remaining
- * slots are dynamically populated as required by the target
- * DAC device.
- */
-
- /* Slot 1: Fetch DWORD from Audio2's output FIFO */
- writel(S626_LF_A2, dev->mmio + S626_VECTPORT(1));
-
- /* Start DAC's audio interface (TSL2) running */
- writel(S626_ACON1_DACSTART, dev->mmio + S626_P_ACON1);
-
- /*
- * Init Trim DACs to calibrated values. Do it twice because the
- * SAA7146 audio channel does not always reset properly and
- * sometimes causes the first few TrimDAC writes to malfunction.
- */
- s626_load_trim_dacs(dev);
- ret = s626_load_trim_dacs(dev);
- if (ret)
- return ret;
-
- /*
- * Manually init all gate array hardware in case this is a soft
- * reset (we have no way of determining whether this is a warm
- * or cold start). This is necessary because the gate array will
- * reset only in response to a PCI hard reset; there is no soft
- * reset function.
- */
-
- /*
- * Init all DAC outputs to 0V and init all DAC setpoint and
- * polarity images.
- */
- for (chan = 0; chan < S626_DAC_CHANNELS; chan++) {
- ret = s626_set_dac(dev, chan, 0);
- if (ret)
- return ret;
- }
-
- /* Init counters */
- s626_counters_init(dev);
-
- /*
- * Without modifying the state of the Battery Backup enab, disable
- * the watchdog timer, set DIO channels 0-5 to operate in the
- * standard DIO (vs. counter overflow) mode, disable the battery
- * charger, and reset the watchdog interval selector to zero.
- */
- s626_write_misc2(dev, (s626_debi_read(dev, S626_LP_RDMISC2) &
- S626_MISC2_BATT_ENABLE));
-
- /* Initialize the digital I/O subsystem */
- s626_dio_init(dev);
-
- return 0;
-}
-
-static int s626_auto_attach(struct comedi_device *dev,
- unsigned long context_unused)
-{
- struct pci_dev *pcidev = comedi_to_pci_dev(dev);
- struct s626_private *devpriv;
- struct comedi_subdevice *s;
- int ret;
-
- devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
- if (!devpriv)
- return -ENOMEM;
-
- ret = comedi_pci_enable(dev);
- if (ret)
- return ret;
-
- dev->mmio = pci_ioremap_bar(pcidev, 0);
- if (!dev->mmio)
- return -ENOMEM;
-
- /* disable master interrupt */
- writel(0, dev->mmio + S626_P_IER);
-
- /* soft reset */
- writel(S626_MC1_SOFT_RESET, dev->mmio + S626_P_MC1);
-
- /* DMA FIXME DMA// */
-
- ret = s626_allocate_dma_buffers(dev);
- if (ret)
- return ret;
-
- if (pcidev->irq) {
- ret = request_irq(pcidev->irq, s626_irq_handler, IRQF_SHARED,
- dev->board_name, dev);
-
- if (ret == 0)
- dev->irq = pcidev->irq;
- }
-
- ret = comedi_alloc_subdevices(dev, 6);
- if (ret)
- return ret;
-
- s = &dev->subdevices[0];
- /* analog input subdevice */
- s->type = COMEDI_SUBD_AI;
- s->subdev_flags = SDF_READABLE | SDF_DIFF;
- s->n_chan = S626_ADC_CHANNELS;
- s->maxdata = 0x3fff;
- s->range_table = &s626_range_table;
- s->len_chanlist = S626_ADC_CHANNELS;
- s->insn_read = s626_ai_insn_read;
- if (dev->irq) {
- dev->read_subdev = s;
- s->subdev_flags |= SDF_CMD_READ;
- s->do_cmd = s626_ai_cmd;
- s->do_cmdtest = s626_ai_cmdtest;
- s->cancel = s626_ai_cancel;
- }
-
- s = &dev->subdevices[1];
- /* analog output subdevice */
- s->type = COMEDI_SUBD_AO;
- s->subdev_flags = SDF_WRITABLE | SDF_READABLE;
- s->n_chan = S626_DAC_CHANNELS;
- s->maxdata = 0x3fff;
- s->range_table = &range_bipolar10;
- s->insn_write = s626_ao_insn_write;
-
- ret = comedi_alloc_subdev_readback(s);
- if (ret)
- return ret;
-
- s = &dev->subdevices[2];
- /* digital I/O subdevice */
- s->type = COMEDI_SUBD_DIO;
- s->subdev_flags = SDF_WRITABLE | SDF_READABLE;
- s->n_chan = 16;
- s->maxdata = 1;
- s->io_bits = 0xffff;
- s->private = (void *)0; /* DIO group 0 */
- s->range_table = &range_digital;
- s->insn_config = s626_dio_insn_config;
- s->insn_bits = s626_dio_insn_bits;
-
- s = &dev->subdevices[3];
- /* digital I/O subdevice */
- s->type = COMEDI_SUBD_DIO;
- s->subdev_flags = SDF_WRITABLE | SDF_READABLE;
- s->n_chan = 16;
- s->maxdata = 1;
- s->io_bits = 0xffff;
- s->private = (void *)1; /* DIO group 1 */
- s->range_table = &range_digital;
- s->insn_config = s626_dio_insn_config;
- s->insn_bits = s626_dio_insn_bits;
-
- s = &dev->subdevices[4];
- /* digital I/O subdevice */
- s->type = COMEDI_SUBD_DIO;
- s->subdev_flags = SDF_WRITABLE | SDF_READABLE;
- s->n_chan = 16;
- s->maxdata = 1;
- s->io_bits = 0xffff;
- s->private = (void *)2; /* DIO group 2 */
- s->range_table = &range_digital;
- s->insn_config = s626_dio_insn_config;
- s->insn_bits = s626_dio_insn_bits;
-
- s = &dev->subdevices[5];
- /* encoder (counter) subdevice */
- s->type = COMEDI_SUBD_COUNTER;
- s->subdev_flags = SDF_WRITABLE | SDF_READABLE | SDF_LSAMPL;
- s->n_chan = S626_ENCODER_CHANNELS;
- s->maxdata = 0xffffff;
- s->range_table = &range_unknown;
- s->insn_config = s626_enc_insn_config;
- s->insn_read = s626_enc_insn_read;
- s->insn_write = s626_enc_insn_write;
-
- return s626_initialize(dev);
-}
-
-static void s626_detach(struct comedi_device *dev)
-{
- struct s626_private *devpriv = dev->private;
-
- if (devpriv) {
- /* stop ai_command */
- devpriv->ai_cmd_running = 0;
-
- if (dev->mmio) {
- /* interrupt mask */
- /* Disable master interrupt */
- writel(0, dev->mmio + S626_P_IER);
- /* Clear board's IRQ status flag */
- writel(S626_IRQ_GPIO3 | S626_IRQ_RPS1,
- dev->mmio + S626_P_ISR);
-
- /* Disable the watchdog timer and battery charger. */
- s626_write_misc2(dev, 0);
-
- /* Close all interfaces on 7146 device */
- writel(S626_MC1_SHUTDOWN, dev->mmio + S626_P_MC1);
- writel(S626_ACON1_BASE, dev->mmio + S626_P_ACON1);
- }
- }
- comedi_pci_detach(dev);
- s626_free_dma_buffers(dev);
-}
-
-static struct comedi_driver s626_driver = {
- .driver_name = "s626",
- .module = THIS_MODULE,
- .auto_attach = s626_auto_attach,
- .detach = s626_detach,
-};
-
-static int s626_pci_probe(struct pci_dev *dev,
- const struct pci_device_id *id)
-{
- return comedi_pci_auto_config(dev, &s626_driver, id->driver_data);
-}
-
-/*
- * For devices with vendor:device id == 0x1131:0x7146 you must specify
- * also subvendor:subdevice ids, because otherwise it will conflict with
- * Philips SAA7146 media/dvb based cards.
- */
-static const struct pci_device_id s626_pci_table[] = {
- { PCI_DEVICE_SUB(PCI_VENDOR_ID_PHILIPS, PCI_DEVICE_ID_PHILIPS_SAA7146,
- 0x6000, 0x0272) },
- { 0 }
-};
-MODULE_DEVICE_TABLE(pci, s626_pci_table);
-
-static struct pci_driver s626_pci_driver = {
- .name = "s626",
- .id_table = s626_pci_table,
- .probe = s626_pci_probe,
- .remove = comedi_pci_auto_unconfig,
-};
-module_comedi_pci_driver(s626_driver, s626_pci_driver);
-
-MODULE_AUTHOR("Gianluca Palli <gpalli@deis.unibo.it>");
-MODULE_DESCRIPTION("Sensoray 626 Comedi driver module");
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/s626.h b/drivers/staging/comedi/drivers/s626.h
deleted file mode 100644
index b83424e7507b..000000000000
--- a/drivers/staging/comedi/drivers/s626.h
+++ /dev/null
@@ -1,760 +0,0 @@
-/*
- * comedi/drivers/s626.h
- * Sensoray s626 Comedi driver, header file
- *
- * COMEDI - Linux Control and Measurement Device Interface
- * Copyright (C) 2000 David A. Schleef <ds@schleef.org>
- *
- * Based on Sensoray Model 626 Linux driver Version 0.2
- * Copyright (C) 2002-2004 Sensoray Co., Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#ifndef S626_H_INCLUDED
-#define S626_H_INCLUDED
-
-#define S626_DMABUF_SIZE 4096 /* 4k pages */
-
-#define S626_ADC_CHANNELS 16
-#define S626_DAC_CHANNELS 4
-#define S626_ENCODER_CHANNELS 6
-#define S626_DIO_CHANNELS 48
-#define S626_DIO_BANKS 3 /* Number of DIO groups. */
-#define S626_DIO_EXTCHANS 40 /* Number of extended-capability
- * DIO channels. */
-
-#define S626_NUM_TRIMDACS 12 /* Number of valid TrimDAC channels. */
-
-/* PCI bus interface types. */
-#define S626_INTEL 1 /* Intel bus type. */
-#define S626_MOTOROLA 2 /* Motorola bus type. */
-
-#define S626_PLATFORM S626_INTEL /* *** SELECT PLATFORM TYPE *** */
-
-#define S626_RANGE_5V 0x10 /* +/-5V range */
-#define S626_RANGE_10V 0x00 /* +/-10V range */
-
-#define S626_EOPL 0x80 /* End of ADC poll list marker. */
-#define S626_GSEL_BIPOLAR5V 0x00F0 /* S626_LP_GSEL setting 5V bipolar. */
-#define S626_GSEL_BIPOLAR10V 0x00A0 /* S626_LP_GSEL setting 10V bipolar. */
-
-/* Error codes that must be visible to this base class. */
-#define S626_ERR_ILLEGAL_PARM 0x00010000 /* Illegal function parameter
- * value was specified. */
-#define S626_ERR_I2C 0x00020000 /* I2C error. */
-#define S626_ERR_COUNTERSETUP 0x00200000 /* Illegal setup specified for
- * counter channel. */
-#define S626_ERR_DEBI_TIMEOUT 0x00400000 /* DEBI transfer timed out. */
-
-/*
- * Organization (physical order) and size (in DWORDs) of logical DMA buffers
- * contained by ANA_DMABUF.
- */
-#define S626_ADC_DMABUF_DWORDS 40 /* ADC DMA buffer must hold 16 samples,
- * plus pre/post garbage samples. */
-#define S626_DAC_WDMABUF_DWORDS 1 /* DAC output DMA buffer holds a single
- * sample. */
-
-/* All remaining space in 4KB DMA buffer is available for the RPS1 program. */
-
-/* Address offsets, in DWORDS, from base of DMA buffer. */
-#define S626_DAC_WDMABUF_OS S626_ADC_DMABUF_DWORDS
-
-/* Interrupt enable bit in ISR and IER. */
-#define S626_IRQ_GPIO3 0x00000040 /* IRQ enable for GPIO3. */
-#define S626_IRQ_RPS1 0x10000000
-#define S626_ISR_AFOU 0x00000800
-/* Audio fifo under/overflow detected. */
-
-#define S626_IRQ_COINT1A 0x0400 /* counter 1A overflow interrupt mask */
-#define S626_IRQ_COINT1B 0x0800 /* counter 1B overflow interrupt mask */
-#define S626_IRQ_COINT2A 0x1000 /* counter 2A overflow interrupt mask */
-#define S626_IRQ_COINT2B 0x2000 /* counter 2B overflow interrupt mask */
-#define S626_IRQ_COINT3A 0x4000 /* counter 3A overflow interrupt mask */
-#define S626_IRQ_COINT3B 0x8000 /* counter 3B overflow interrupt mask */
-
-/* RPS command codes. */
-#define S626_RPS_CLRSIGNAL 0x00000000 /* CLEAR SIGNAL */
-#define S626_RPS_SETSIGNAL 0x10000000 /* SET SIGNAL */
-#define S626_RPS_NOP 0x00000000 /* NOP */
-#define S626_RPS_PAUSE 0x20000000 /* PAUSE */
-#define S626_RPS_UPLOAD 0x40000000 /* UPLOAD */
-#define S626_RPS_JUMP 0x80000000 /* JUMP */
-#define S626_RPS_LDREG 0x90000100 /* LDREG (1 uint32_t only) */
-#define S626_RPS_STREG 0xA0000100 /* STREG (1 uint32_t only) */
-#define S626_RPS_STOP 0x50000000 /* STOP */
-#define S626_RPS_IRQ 0x60000000 /* IRQ */
-
-#define S626_RPS_LOGICAL_OR 0x08000000 /* Logical OR conditionals. */
-#define S626_RPS_INVERT 0x04000000 /* Test for negated
- * semaphores. */
-#define S626_RPS_DEBI 0x00000002 /* DEBI done */
-
-#define S626_RPS_SIG0 0x00200000 /* RPS semaphore 0
- * (used by ADC). */
-#define S626_RPS_SIG1 0x00400000 /* RPS semaphore 1
- * (used by DAC). */
-#define S626_RPS_SIG2 0x00800000 /* RPS semaphore 2
- * (not used). */
-#define S626_RPS_GPIO2 0x00080000 /* RPS GPIO2 */
-#define S626_RPS_GPIO3 0x00100000 /* RPS GPIO3 */
-
-#define S626_RPS_SIGADC S626_RPS_SIG0 /* Trigger/status for
- * ADC's RPS program. */
-#define S626_RPS_SIGDAC S626_RPS_SIG1 /* Trigger/status for
- * DAC's RPS program. */
-
-/* RPS clock parameters. */
-#define S626_RPSCLK_SCALAR 8 /* This is apparent ratio of
- * PCI/RPS clks (undocumented!!). */
-#define S626_RPSCLK_PER_US (33 / S626_RPSCLK_SCALAR)
- /* Number of RPS clocks in one
- * microsecond. */
-
-/* Event counter source addresses. */
-#define S626_SBA_RPS_A0 0x27 /* Time of RPS0 busy, in PCI clocks. */
-
-/* GPIO constants. */
-#define S626_GPIO_BASE 0x10004000 /* GPIO 0,2,3 = inputs,
- * GPIO3 = IRQ; GPIO1 = out. */
-#define S626_GPIO1_LO 0x00000000 /* GPIO1 set to LOW. */
-#define S626_GPIO1_HI 0x00001000 /* GPIO1 set to HIGH. */
-
-/* Primary Status Register (PSR) constants. */
-#define S626_PSR_DEBI_E 0x00040000 /* DEBI event flag. */
-#define S626_PSR_DEBI_S 0x00080000 /* DEBI status flag. */
-#define S626_PSR_A2_IN 0x00008000 /* Audio output DMA2 protection
- * address reached. */
-#define S626_PSR_AFOU 0x00000800 /* Audio FIFO under/overflow
- * detected. */
-#define S626_PSR_GPIO2 0x00000020 /* GPIO2 input pin: 0=AdcBusy,
- * 1=AdcIdle. */
-#define S626_PSR_EC0S 0x00000001 /* Event counter 0 threshold
- * reached. */
-
-/* Secondary Status Register (SSR) constants. */
-#define S626_SSR_AF2_OUT 0x00000200 /* Audio 2 output FIFO
- * under/overflow detected. */
-
-/* Master Control Register 1 (MC1) constants. */
-#define S626_MC1_SOFT_RESET 0x80000000 /* Invoke 7146 soft reset. */
-#define S626_MC1_SHUTDOWN 0x3FFF0000 /* Shut down all MC1-controlled
- * enables. */
-
-#define S626_MC1_ERPS1 0x2000 /* Enab/disable RPS task 1. */
-#define S626_MC1_ERPS0 0x1000 /* Enab/disable RPS task 0. */
-#define S626_MC1_DEBI 0x0800 /* Enab/disable DEBI pins. */
-#define S626_MC1_AUDIO 0x0200 /* Enab/disable audio port pins. */
-#define S626_MC1_I2C 0x0100 /* Enab/disable I2C interface. */
-#define S626_MC1_A2OUT 0x0008 /* Enab/disable transfer on A2 out. */
-#define S626_MC1_A2IN 0x0004 /* Enab/disable transfer on A2 in. */
-#define S626_MC1_A1IN 0x0001 /* Enab/disable transfer on A1 in. */
-
-/* Master Control Register 2 (MC2) constants. */
-#define S626_MC2_UPLD_DEBI 0x0002 /* Upload DEBI. */
-#define S626_MC2_UPLD_IIC 0x0001 /* Upload I2C. */
-#define S626_MC2_RPSSIG2 0x2000 /* RPS signal 2 (not used). */
-#define S626_MC2_RPSSIG1 0x1000 /* RPS signal 1 (DAC RPS busy). */
-#define S626_MC2_RPSSIG0 0x0800 /* RPS signal 0 (ADC RPS busy). */
-
-#define S626_MC2_ADC_RPS S626_MC2_RPSSIG0 /* ADC RPS busy. */
-#define S626_MC2_DAC_RPS S626_MC2_RPSSIG1 /* DAC RPS busy. */
-
-/* PCI BUS (SAA7146) REGISTER ADDRESS OFFSETS */
-#define S626_P_PCI_BT_A 0x004C /* Audio DMA burst/threshold control. */
-#define S626_P_DEBICFG 0x007C /* DEBI configuration. */
-#define S626_P_DEBICMD 0x0080 /* DEBI command. */
-#define S626_P_DEBIPAGE 0x0084 /* DEBI page. */
-#define S626_P_DEBIAD 0x0088 /* DEBI target address. */
-#define S626_P_I2CCTRL 0x008C /* I2C control. */
-#define S626_P_I2CSTAT 0x0090 /* I2C status. */
-#define S626_P_BASEA2_IN 0x00AC /* Audio input 2 base physical DMAbuf
- * address. */
-#define S626_P_PROTA2_IN 0x00B0 /* Audio input 2 physical DMAbuf
- * protection address. */
-#define S626_P_PAGEA2_IN 0x00B4 /* Audio input 2 paging attributes. */
-#define S626_P_BASEA2_OUT 0x00B8 /* Audio output 2 base physical DMAbuf
- * address. */
-#define S626_P_PROTA2_OUT 0x00BC /* Audio output 2 physical DMAbuf
- * protection address. */
-#define S626_P_PAGEA2_OUT 0x00C0 /* Audio output 2 paging attributes. */
-#define S626_P_RPSPAGE0 0x00C4 /* RPS0 page. */
-#define S626_P_RPSPAGE1 0x00C8 /* RPS1 page. */
-#define S626_P_RPS0_TOUT 0x00D4 /* RPS0 time-out. */
-#define S626_P_RPS1_TOUT 0x00D8 /* RPS1 time-out. */
-#define S626_P_IER 0x00DC /* Interrupt enable. */
-#define S626_P_GPIO 0x00E0 /* General-purpose I/O. */
-#define S626_P_EC1SSR 0x00E4 /* Event counter set 1 source select. */
-#define S626_P_ECT1R 0x00EC /* Event counter threshold set 1. */
-#define S626_P_ACON1 0x00F4 /* Audio control 1. */
-#define S626_P_ACON2 0x00F8 /* Audio control 2. */
-#define S626_P_MC1 0x00FC /* Master control 1. */
-#define S626_P_MC2 0x0100 /* Master control 2. */
-#define S626_P_RPSADDR0 0x0104 /* RPS0 instruction pointer. */
-#define S626_P_RPSADDR1 0x0108 /* RPS1 instruction pointer. */
-#define S626_P_ISR 0x010C /* Interrupt status. */
-#define S626_P_PSR 0x0110 /* Primary status. */
-#define S626_P_SSR 0x0114 /* Secondary status. */
-#define S626_P_EC1R 0x0118 /* Event counter set 1. */
-#define S626_P_ADP4 0x0138 /* Logical audio DMA pointer of audio
- * input FIFO A2_IN. */
-#define S626_P_FB_BUFFER1 0x0144 /* Audio feedback buffer 1. */
-#define S626_P_FB_BUFFER2 0x0148 /* Audio feedback buffer 2. */
-#define S626_P_TSL1 0x0180 /* Audio time slot list 1. */
-#define S626_P_TSL2 0x01C0 /* Audio time slot list 2. */
-
-/* LOCAL BUS (GATE ARRAY) REGISTER ADDRESS OFFSETS */
-/* Analog I/O registers: */
-#define S626_LP_DACPOL 0x0082 /* Write DAC polarity. */
-#define S626_LP_GSEL 0x0084 /* Write ADC gain. */
-#define S626_LP_ISEL 0x0086 /* Write ADC channel select. */
-
-/* Digital I/O registers */
-#define S626_LP_RDDIN(x) (0x0040 + (x) * 0x10) /* R: digital input */
-#define S626_LP_WRINTSEL(x) (0x0042 + (x) * 0x10) /* W: int enable */
-#define S626_LP_WREDGSEL(x) (0x0044 + (x) * 0x10) /* W: edge selection */
-#define S626_LP_WRCAPSEL(x) (0x0046 + (x) * 0x10) /* W: capture enable */
-#define S626_LP_RDCAPFLG(x) (0x0048 + (x) * 0x10) /* R: edges captured */
-#define S626_LP_WRDOUT(x) (0x0048 + (x) * 0x10) /* W: digital output */
-#define S626_LP_RDINTSEL(x) (0x004a + (x) * 0x10) /* R: int enable */
-#define S626_LP_RDEDGSEL(x) (0x004c + (x) * 0x10) /* R: edge selection */
-#define S626_LP_RDCAPSEL(x) (0x004e + (x) * 0x10) /* R: capture enable */
-
-/* Counter registers (read/write): 0A 1A 2A 0B 1B 2B */
-#define S626_LP_CRA(x) (0x0000 + (((x) % 3) * 0x4))
-#define S626_LP_CRB(x) (0x0002 + (((x) % 3) * 0x4))
-
-/* Counter PreLoad (write) and Latch (read) Registers: 0A 1A 2A 0B 1B 2B */
-#define S626_LP_CNTR(x) (0x000c + (((x) < 3) ? 0x0 : 0x4) + \
- (((x) % 3) * 0x8))
-
-/* Miscellaneous Registers (read/write): */
-#define S626_LP_MISC1 0x0088 /* Read/write Misc1. */
-#define S626_LP_WRMISC2 0x0090 /* Write Misc2. */
-#define S626_LP_RDMISC2 0x0082 /* Read Misc2. */
-
-/* Bit masks for MISC1 register that are the same for reads and writes. */
-#define S626_MISC1_WENABLE 0x8000 /* enab writes to MISC2 (except Clear
- * Watchdog bit). */
-#define S626_MISC1_WDISABLE 0x0000 /* Disable writes to MISC2. */
-#define S626_MISC1_EDCAP 0x1000 /* Enable edge capture on DIO chans
- * specified by S626_LP_WRCAPSELx. */
-#define S626_MISC1_NOEDCAP 0x0000 /* Disable edge capture on specified
- * DIO chans. */
-
-/* Bit masks for MISC1 register reads. */
-#define S626_RDMISC1_WDTIMEOUT 0x4000 /* Watchdog timer timed out. */
-
-/* Bit masks for MISC2 register writes. */
-#define S626_WRMISC2_WDCLEAR 0x8000 /* Reset watchdog timer to zero. */
-#define S626_WRMISC2_CHARGE_ENABLE 0x4000 /* Enable battery trickle charging. */
-
-/* Bit masks for MISC2 register that are the same for reads and writes. */
-#define S626_MISC2_BATT_ENABLE 0x0008 /* Backup battery enable. */
-#define S626_MISC2_WDENABLE 0x0004 /* Watchdog timer enable. */
-#define S626_MISC2_WDPERIOD_MASK 0x0003 /* Watchdog interval select mask. */
-
-/* Bit masks for ACON1 register. */
-#define S626_A2_RUN 0x40000000 /* Run A2 based on TSL2. */
-#define S626_A1_RUN 0x20000000 /* Run A1 based on TSL1. */
-#define S626_A1_SWAP 0x00200000 /* Use big-endian for A1. */
-#define S626_A2_SWAP 0x00100000 /* Use big-endian for A2. */
-#define S626_WS_MODES 0x00019999 /* WS0 = TSL1 trigger input,
- * WS1-WS4 = CS* outputs. */
-
-#if S626_PLATFORM == S626_INTEL /* Base ACON1 config: always run
- * A1 based on TSL1. */
-#define S626_ACON1_BASE (S626_WS_MODES | S626_A1_RUN)
-#elif S626_PLATFORM == S626_MOTOROLA
-#define S626_ACON1_BASE \
- (S626_WS_MODES | S626_A1_RUN | S626_A1_SWAP | S626_A2_SWAP)
-#endif
-
-#define S626_ACON1_ADCSTART S626_ACON1_BASE /* Start ADC: run A1
- * based on TSL1. */
-#define S626_ACON1_DACSTART (S626_ACON1_BASE | S626_A2_RUN)
-/* Start transmit to DAC: run A2 based on TSL2. */
-#define S626_ACON1_DACSTOP S626_ACON1_BASE /* Halt A2. */
-
-/* Bit masks for ACON2 register. */
-#define S626_A1_CLKSRC_BCLK1 0x00000000 /* A1 bit rate = BCLK1 (ADC). */
-#define S626_A2_CLKSRC_X1 0x00800000 /* A2 bit rate = ACLK/1
- * (DACs). */
-#define S626_A2_CLKSRC_X2 0x00C00000 /* A2 bit rate = ACLK/2
- * (DACs). */
-#define S626_A2_CLKSRC_X4 0x01400000 /* A2 bit rate = ACLK/4
- * (DACs). */
-#define S626_INVERT_BCLK2 0x00100000 /* Invert BCLK2 (DACs). */
-#define S626_BCLK2_OE 0x00040000 /* Enable BCLK2 (DACs). */
-#define S626_ACON2_XORMASK 0x000C0000 /* XOR mask for ACON2
- * active-low bits. */
-
-#define S626_ACON2_INIT (S626_ACON2_XORMASK ^ \
- (S626_A1_CLKSRC_BCLK1 | S626_A2_CLKSRC_X2 | \
- S626_INVERT_BCLK2 | S626_BCLK2_OE))
-
-/* Bit masks for timeslot records. */
-#define S626_WS1 0x40000000 /* WS output to assert. */
-#define S626_WS2 0x20000000
-#define S626_WS3 0x10000000
-#define S626_WS4 0x08000000
-#define S626_RSD1 0x01000000 /* Shift A1 data in on SD1. */
-#define S626_SDW_A1 0x00800000 /* Store rcv'd char at next char
- * slot of DWORD1 buffer. */
-#define S626_SIB_A1 0x00400000 /* Store rcv'd char at next
- * char slot of FB1 buffer. */
-#define S626_SF_A1 0x00200000 /* Write unsigned long
- * buffer to input FIFO. */
-
-/* Select parallel-to-serial converter's data source: */
-#define S626_XFIFO_0 0x00000000 /* Data fifo byte 0. */
-#define S626_XFIFO_1 0x00000010 /* Data fifo byte 1. */
-#define S626_XFIFO_2 0x00000020 /* Data fifo byte 2. */
-#define S626_XFIFO_3 0x00000030 /* Data fifo byte 3. */
-#define S626_XFB0 0x00000040 /* FB_BUFFER byte 0. */
-#define S626_XFB1 0x00000050 /* FB_BUFFER byte 1. */
-#define S626_XFB2 0x00000060 /* FB_BUFFER byte 2. */
-#define S626_XFB3 0x00000070 /* FB_BUFFER byte 3. */
-#define S626_SIB_A2 0x00000200 /* Store next dword from A2's
- * input shifter to FB2
- * buffer. */
-#define S626_SF_A2 0x00000100 /* Store next dword from A2's
- * input shifter to its input
- * fifo. */
-#define S626_LF_A2 0x00000080 /* Load next dword from A2's
- * output fifo into its
- * output dword buffer. */
-#define S626_XSD2 0x00000008 /* Shift data out on SD2. */
-#define S626_RSD3 0x00001800 /* Shift data in on SD3. */
-#define S626_RSD2 0x00001000 /* Shift data in on SD2. */
-#define S626_LOW_A2 0x00000002 /* Drive last SD low for 7 clks,
- * then tri-state. */
-#define S626_EOS 0x00000001 /* End of superframe. */
-
-/* I2C configuration constants. */
-#define S626_I2C_CLKSEL 0x0400 /* I2C bit rate =
- * PCIclk/480 = 68.75 KHz. */
-#define S626_I2C_BITRATE 68.75 /* I2C bus data bit rate
- * (determined by
- * S626_I2C_CLKSEL) in KHz. */
-#define S626_I2C_WRTIME 15.0 /* Worst case time, in msec,
- * for EEPROM internal write
- * op. */
-
-/* I2C manifest constants. */
-
-/* Max retries to wait for EEPROM write. */
-#define S626_I2C_RETRIES (S626_I2C_WRTIME * S626_I2C_BITRATE / 9.0)
-#define S626_I2C_ERR 0x0002 /* I2C control/status flag ERROR. */
-#define S626_I2C_BUSY 0x0001 /* I2C control/status flag BUSY. */
-#define S626_I2C_ABORT 0x0080 /* I2C status flag ABORT. */
-#define S626_I2C_ATTRSTART 0x3 /* I2C attribute START. */
-#define S626_I2C_ATTRCONT 0x2 /* I2C attribute CONT. */
-#define S626_I2C_ATTRSTOP 0x1 /* I2C attribute STOP. */
-#define S626_I2C_ATTRNOP 0x0 /* I2C attribute NOP. */
-
-/* Code macros used for constructing I2C command bytes. */
-#define S626_I2C_B2(ATTR, VAL) (((ATTR) << 6) | ((VAL) << 24))
-#define S626_I2C_B1(ATTR, VAL) (((ATTR) << 4) | ((VAL) << 16))
-#define S626_I2C_B0(ATTR, VAL) (((ATTR) << 2) | ((VAL) << 8))
-
-/* DEBI command constants. */
-#define S626_DEBI_CMD_SIZE16 (2 << 17) /* Transfer size is always
- * 2 bytes. */
-#define S626_DEBI_CMD_READ 0x00010000 /* Read operation. */
-#define S626_DEBI_CMD_WRITE 0x00000000 /* Write operation. */
-
-/* Read immediate 2 bytes. */
-#define S626_DEBI_CMD_RDWORD (S626_DEBI_CMD_READ | S626_DEBI_CMD_SIZE16)
-
-/* Write immediate 2 bytes. */
-#define S626_DEBI_CMD_WRWORD (S626_DEBI_CMD_WRITE | S626_DEBI_CMD_SIZE16)
-
-/* DEBI configuration constants. */
-#define S626_DEBI_CFG_XIRQ_EN 0x80000000 /* Enable external interrupt
- * on GPIO3. */
-#define S626_DEBI_CFG_XRESUME 0x40000000 /* Resume block */
- /* Transfer when XIRQ
- * deasserted. */
-#define S626_DEBI_CFG_TOQ 0x03C00000 /* Timeout (15 PCI cycles). */
-#define S626_DEBI_CFG_FAST 0x10000000 /* Fast mode enable. */
-
-/* 4-bit field that specifies DEBI timeout value in PCI clock cycles: */
-#define S626_DEBI_CFG_TOUT_BIT 22 /* Finish DEBI cycle after this many
- * clocks. */
-
-/* 2-bit field that specifies Endian byte lane steering: */
-#define S626_DEBI_CFG_SWAP_NONE 0x00000000 /* Straight - don't swap any
- * bytes (Intel). */
-#define S626_DEBI_CFG_SWAP_2 0x00100000 /* 2-byte swap (Motorola). */
-#define S626_DEBI_CFG_SWAP_4 0x00200000 /* 4-byte swap. */
-#define S626_DEBI_CFG_SLAVE16 0x00080000 /* Slave is able to serve
- * 16-bit cycles. */
-#define S626_DEBI_CFG_INC 0x00040000 /* Enable address increment
- * for block transfers. */
-#define S626_DEBI_CFG_INTEL 0x00020000 /* Intel style local bus. */
-#define S626_DEBI_CFG_TIMEROFF 0x00010000 /* Disable timer. */
-
-#if S626_PLATFORM == S626_INTEL
-
-#define S626_DEBI_TOUT 7 /* Wait 7 PCI clocks (212 ns) before
- * polling RDY. */
-
-/* Intel byte lane steering (pass through all byte lanes). */
-#define S626_DEBI_SWAP S626_DEBI_CFG_SWAP_NONE
-
-#elif S626_PLATFORM == S626_MOTOROLA
-
-#define S626_DEBI_TOUT 15 /* Wait 15 PCI clocks (454 ns) maximum
- * before timing out. */
-
-/* Motorola byte lane steering. */
-#define S626_DEBI_SWAP S626_DEBI_CFG_SWAP_2
-
-#endif
-
-/* DEBI page table constants. */
-#define S626_DEBI_PAGE_DISABLE 0x00000000 /* Paging disable. */
-
-/* ******* EXTRA FROM OTHER SENSORAY * .h ******* */
-
-/* LoadSrc values: */
-#define S626_LOADSRC_INDX 0 /* Preload core in response to Index. */
-#define S626_LOADSRC_OVER 1 /* Preload core in response to
- * Overflow. */
-#define S626_LOADSRCB_OVERA 2 /* Preload B core in response to
- * A Overflow. */
-#define S626_LOADSRC_NONE 3 /* Never preload core. */
-
-/* IntSrc values: */
-#define S626_INTSRC_NONE 0 /* Interrupts disabled. */
-#define S626_INTSRC_OVER 1 /* Interrupt on Overflow. */
-#define S626_INTSRC_INDX 2 /* Interrupt on Index. */
-#define S626_INTSRC_BOTH 3 /* Interrupt on Index or Overflow. */
-
-/* LatchSrc values: */
-#define S626_LATCHSRC_AB_READ 0 /* Latch on read. */
-#define S626_LATCHSRC_A_INDXA 1 /* Latch A on A Index. */
-#define S626_LATCHSRC_B_INDXB 2 /* Latch B on B Index. */
-#define S626_LATCHSRC_B_OVERA 3 /* Latch B on A Overflow. */
-
-/* IndxSrc values: */
-#define S626_INDXSRC_ENCODER 0 /* Encoder. */
-#define S626_INDXSRC_DIGIN 1 /* Digital inputs. */
-#define S626_INDXSRC_SOFT 2 /* S/w controlled by IndxPol bit. */
-#define S626_INDXSRC_DISABLED 3 /* Index disabled. */
-
-/* IndxPol values: */
-#define S626_INDXPOL_POS 0 /* Index input is active high. */
-#define S626_INDXPOL_NEG 1 /* Index input is active low. */
-
-/* Logical encoder mode values: */
-#define S626_ENCMODE_COUNTER 0 /* Counter mode. */
-#define S626_ENCMODE_TIMER 2 /* Timer mode. */
-#define S626_ENCMODE_EXTENDER 3 /* Extender mode. */
-
-/* Physical CntSrc values (for Counter A source and Counter B source): */
-#define S626_CNTSRC_ENCODER 0 /* Encoder */
-#define S626_CNTSRC_DIGIN 1 /* Digital inputs */
-#define S626_CNTSRC_SYSCLK 2 /* System clock up */
-#define S626_CNTSRC_SYSCLK_DOWN 3 /* System clock down */
-
-/* ClkPol values: */
-#define S626_CLKPOL_POS 0 /* Counter/Extender clock is
- * active high. */
-#define S626_CLKPOL_NEG 1 /* Counter/Extender clock is
- * active low. */
-#define S626_CNTDIR_UP 0 /* Timer counts up. */
-#define S626_CNTDIR_DOWN 1 /* Timer counts down. */
-
-/* ClkEnab values: */
-#define S626_CLKENAB_ALWAYS 0 /* Clock always enabled. */
-#define S626_CLKENAB_INDEX 1 /* Clock is enabled by index. */
-
-/* ClkMult values: */
-#define S626_CLKMULT_4X 0 /* 4x clock multiplier. */
-#define S626_CLKMULT_2X 1 /* 2x clock multiplier. */
-#define S626_CLKMULT_1X 2 /* 1x clock multiplier. */
-#define S626_CLKMULT_SPECIAL 3 /* Special clock multiplier value. */
-
-/* Sanity-check limits for parameters. */
-
-#define S626_NUM_COUNTERS 6 /* Maximum valid counter
- * logical channel number. */
-#define S626_NUM_INTSOURCES 4
-#define S626_NUM_LATCHSOURCES 4
-#define S626_NUM_CLKMULTS 4
-#define S626_NUM_CLKSOURCES 4
-#define S626_NUM_CLKPOLS 2
-#define S626_NUM_INDEXPOLS 2
-#define S626_NUM_INDEXSOURCES 2
-#define S626_NUM_LOADTRIGS 4
-
-/* General macros for manipulating bitfields: */
-#define S626_MAKE(x, w, p) (((x) & ((1 << (w)) - 1)) << (p))
-#define S626_UNMAKE(v, w, p) (((v) >> (p)) & ((1 << (w)) - 1))
-
-/* Bit field positions in CRA: */
-#define S626_CRABIT_INDXSRC_B 14 /* B index source. */
-#define S626_CRABIT_CNTSRC_B 12 /* B counter source. */
-#define S626_CRABIT_INDXPOL_A 11 /* A index polarity. */
-#define S626_CRABIT_LOADSRC_A 9 /* A preload trigger. */
-#define S626_CRABIT_CLKMULT_A 7 /* A clock multiplier. */
-#define S626_CRABIT_INTSRC_A 5 /* A interrupt source. */
-#define S626_CRABIT_CLKPOL_A 4 /* A clock polarity. */
-#define S626_CRABIT_INDXSRC_A 2 /* A index source. */
-#define S626_CRABIT_CNTSRC_A 0 /* A counter source. */
-
-/* Bit field widths in CRA: */
-#define S626_CRAWID_INDXSRC_B 2
-#define S626_CRAWID_CNTSRC_B 2
-#define S626_CRAWID_INDXPOL_A 1
-#define S626_CRAWID_LOADSRC_A 2
-#define S626_CRAWID_CLKMULT_A 2
-#define S626_CRAWID_INTSRC_A 2
-#define S626_CRAWID_CLKPOL_A 1
-#define S626_CRAWID_INDXSRC_A 2
-#define S626_CRAWID_CNTSRC_A 2
-
-/* Bit field masks for CRA: */
-#define S626_CRAMSK_INDXSRC_B S626_SET_CRA_INDXSRC_B(~0)
-#define S626_CRAMSK_CNTSRC_B S626_SET_CRA_CNTSRC_B(~0)
-#define S626_CRAMSK_INDXPOL_A S626_SET_CRA_INDXPOL_A(~0)
-#define S626_CRAMSK_LOADSRC_A S626_SET_CRA_LOADSRC_A(~0)
-#define S626_CRAMSK_CLKMULT_A S626_SET_CRA_CLKMULT_A(~0)
-#define S626_CRAMSK_INTSRC_A S626_SET_CRA_INTSRC_A(~0)
-#define S626_CRAMSK_CLKPOL_A S626_SET_CRA_CLKPOL_A(~0)
-#define S626_CRAMSK_INDXSRC_A S626_SET_CRA_INDXSRC_A(~0)
-#define S626_CRAMSK_CNTSRC_A S626_SET_CRA_CNTSRC_A(~0)
-
-/* Construct parts of the CRA value: */
-#define S626_SET_CRA_INDXSRC_B(x) \
- S626_MAKE((x), S626_CRAWID_INDXSRC_B, S626_CRABIT_INDXSRC_B)
-#define S626_SET_CRA_CNTSRC_B(x) \
- S626_MAKE((x), S626_CRAWID_CNTSRC_B, S626_CRABIT_CNTSRC_B)
-#define S626_SET_CRA_INDXPOL_A(x) \
- S626_MAKE((x), S626_CRAWID_INDXPOL_A, S626_CRABIT_INDXPOL_A)
-#define S626_SET_CRA_LOADSRC_A(x) \
- S626_MAKE((x), S626_CRAWID_LOADSRC_A, S626_CRABIT_LOADSRC_A)
-#define S626_SET_CRA_CLKMULT_A(x) \
- S626_MAKE((x), S626_CRAWID_CLKMULT_A, S626_CRABIT_CLKMULT_A)
-#define S626_SET_CRA_INTSRC_A(x) \
- S626_MAKE((x), S626_CRAWID_INTSRC_A, S626_CRABIT_INTSRC_A)
-#define S626_SET_CRA_CLKPOL_A(x) \
- S626_MAKE((x), S626_CRAWID_CLKPOL_A, S626_CRABIT_CLKPOL_A)
-#define S626_SET_CRA_INDXSRC_A(x) \
- S626_MAKE((x), S626_CRAWID_INDXSRC_A, S626_CRABIT_INDXSRC_A)
-#define S626_SET_CRA_CNTSRC_A(x) \
- S626_MAKE((x), S626_CRAWID_CNTSRC_A, S626_CRABIT_CNTSRC_A)
-
-/* Extract parts of the CRA value: */
-#define S626_GET_CRA_INDXSRC_B(v) \
- S626_UNMAKE((v), S626_CRAWID_INDXSRC_B, S626_CRABIT_INDXSRC_B)
-#define S626_GET_CRA_CNTSRC_B(v) \
- S626_UNMAKE((v), S626_CRAWID_CNTSRC_B, S626_CRABIT_CNTSRC_B)
-#define S626_GET_CRA_INDXPOL_A(v) \
- S626_UNMAKE((v), S626_CRAWID_INDXPOL_A, S626_CRABIT_INDXPOL_A)
-#define S626_GET_CRA_LOADSRC_A(v) \
- S626_UNMAKE((v), S626_CRAWID_LOADSRC_A, S626_CRABIT_LOADSRC_A)
-#define S626_GET_CRA_CLKMULT_A(v) \
- S626_UNMAKE((v), S626_CRAWID_CLKMULT_A, S626_CRABIT_CLKMULT_A)
-#define S626_GET_CRA_INTSRC_A(v) \
- S626_UNMAKE((v), S626_CRAWID_INTSRC_A, S626_CRABIT_INTSRC_A)
-#define S626_GET_CRA_CLKPOL_A(v) \
- S626_UNMAKE((v), S626_CRAWID_CLKPOL_A, S626_CRABIT_CLKPOL_A)
-#define S626_GET_CRA_INDXSRC_A(v) \
- S626_UNMAKE((v), S626_CRAWID_INDXSRC_A, S626_CRABIT_INDXSRC_A)
-#define S626_GET_CRA_CNTSRC_A(v) \
- S626_UNMAKE((v), S626_CRAWID_CNTSRC_A, S626_CRABIT_CNTSRC_A)
-
-/* Bit field positions in CRB: */
-#define S626_CRBBIT_INTRESETCMD 15 /* (w) Interrupt reset command. */
-#define S626_CRBBIT_CNTDIR_B 15 /* (r) B counter direction. */
-#define S626_CRBBIT_INTRESET_B 14 /* (w) B interrupt reset enable. */
-#define S626_CRBBIT_OVERDO_A 14 /* (r) A overflow routed to dig. out. */
-#define S626_CRBBIT_INTRESET_A 13 /* (w) A interrupt reset enable. */
-#define S626_CRBBIT_OVERDO_B 13 /* (r) B overflow routed to dig. out. */
-#define S626_CRBBIT_CLKENAB_A 12 /* A clock enable. */
-#define S626_CRBBIT_INTSRC_B 10 /* B interrupt source. */
-#define S626_CRBBIT_LATCHSRC 8 /* A/B latch source. */
-#define S626_CRBBIT_LOADSRC_B 6 /* B preload trigger. */
-#define S626_CRBBIT_CLEAR_B 7 /* B cleared when A overflows. */
-#define S626_CRBBIT_CLKMULT_B 3 /* B clock multiplier. */
-#define S626_CRBBIT_CLKENAB_B 2 /* B clock enable. */
-#define S626_CRBBIT_INDXPOL_B 1 /* B index polarity. */
-#define S626_CRBBIT_CLKPOL_B 0 /* B clock polarity. */
-
-/* Bit field widths in CRB: */
-#define S626_CRBWID_INTRESETCMD 1
-#define S626_CRBWID_CNTDIR_B 1
-#define S626_CRBWID_INTRESET_B 1
-#define S626_CRBWID_OVERDO_A 1
-#define S626_CRBWID_INTRESET_A 1
-#define S626_CRBWID_OVERDO_B 1
-#define S626_CRBWID_CLKENAB_A 1
-#define S626_CRBWID_INTSRC_B 2
-#define S626_CRBWID_LATCHSRC 2
-#define S626_CRBWID_LOADSRC_B 2
-#define S626_CRBWID_CLEAR_B 1
-#define S626_CRBWID_CLKMULT_B 2
-#define S626_CRBWID_CLKENAB_B 1
-#define S626_CRBWID_INDXPOL_B 1
-#define S626_CRBWID_CLKPOL_B 1
-
-/* Bit field masks for CRB: */
-#define S626_CRBMSK_INTRESETCMD S626_SET_CRB_INTRESETCMD(~0) /* (w) */
-#define S626_CRBMSK_CNTDIR_B S626_CRBMSK_INTRESETCMD /* (r) */
-#define S626_CRBMSK_INTRESET_B S626_SET_CRB_INTRESET_B(~0) /* (w) */
-#define S626_CRBMSK_OVERDO_A S626_CRBMSK_INTRESET_B /* (r) */
-#define S626_CRBMSK_INTRESET_A S626_SET_CRB_INTRESET_A(~0) /* (w) */
-#define S626_CRBMSK_OVERDO_B S626_CRBMSK_INTRESET_A /* (r) */
-#define S626_CRBMSK_CLKENAB_A S626_SET_CRB_CLKENAB_A(~0)
-#define S626_CRBMSK_INTSRC_B S626_SET_CRB_INTSRC_B(~0)
-#define S626_CRBMSK_LATCHSRC S626_SET_CRB_LATCHSRC(~0)
-#define S626_CRBMSK_LOADSRC_B S626_SET_CRB_LOADSRC_B(~0)
-#define S626_CRBMSK_CLEAR_B S626_SET_CRB_CLEAR_B(~0)
-#define S626_CRBMSK_CLKMULT_B S626_SET_CRB_CLKMULT_B(~0)
-#define S626_CRBMSK_CLKENAB_B S626_SET_CRB_CLKENAB_B(~0)
-#define S626_CRBMSK_INDXPOL_B S626_SET_CRB_INDXPOL_B(~0)
-#define S626_CRBMSK_CLKPOL_B S626_SET_CRB_CLKPOL_B(~0)
-
-/* Interrupt reset control bits. */
-#define S626_CRBMSK_INTCTRL (S626_CRBMSK_INTRESETCMD | \
- S626_CRBMSK_INTRESET_A | \
- S626_CRBMSK_INTRESET_B)
-
-/* Construct parts of the CRB value: */
-#define S626_SET_CRB_INTRESETCMD(x) \
- S626_MAKE((x), S626_CRBWID_INTRESETCMD, S626_CRBBIT_INTRESETCMD)
-#define S626_SET_CRB_INTRESET_B(x) \
- S626_MAKE((x), S626_CRBWID_INTRESET_B, S626_CRBBIT_INTRESET_B)
-#define S626_SET_CRB_INTRESET_A(x) \
- S626_MAKE((x), S626_CRBWID_INTRESET_A, S626_CRBBIT_INTRESET_A)
-#define S626_SET_CRB_CLKENAB_A(x) \
- S626_MAKE((x), S626_CRBWID_CLKENAB_A, S626_CRBBIT_CLKENAB_A)
-#define S626_SET_CRB_INTSRC_B(x) \
- S626_MAKE((x), S626_CRBWID_INTSRC_B, S626_CRBBIT_INTSRC_B)
-#define S626_SET_CRB_LATCHSRC(x) \
- S626_MAKE((x), S626_CRBWID_LATCHSRC, S626_CRBBIT_LATCHSRC)
-#define S626_SET_CRB_LOADSRC_B(x) \
- S626_MAKE((x), S626_CRBWID_LOADSRC_B, S626_CRBBIT_LOADSRC_B)
-#define S626_SET_CRB_CLEAR_B(x) \
- S626_MAKE((x), S626_CRBWID_CLEAR_B, S626_CRBBIT_CLEAR_B)
-#define S626_SET_CRB_CLKMULT_B(x) \
- S626_MAKE((x), S626_CRBWID_CLKMULT_B, S626_CRBBIT_CLKMULT_B)
-#define S626_SET_CRB_CLKENAB_B(x) \
- S626_MAKE((x), S626_CRBWID_CLKENAB_B, S626_CRBBIT_CLKENAB_B)
-#define S626_SET_CRB_INDXPOL_B(x) \
- S626_MAKE((x), S626_CRBWID_INDXPOL_B, S626_CRBBIT_INDXPOL_B)
-#define S626_SET_CRB_CLKPOL_B(x) \
- S626_MAKE((x), S626_CRBWID_CLKPOL_B, S626_CRBBIT_CLKPOL_B)
-
-/* Extract parts of the CRB value: */
-#define S626_GET_CRB_CNTDIR_B(v) \
- S626_UNMAKE((v), S626_CRBWID_CNTDIR_B, S626_CRBBIT_CNTDIR_B)
-#define S626_GET_CRB_OVERDO_A(v) \
- S626_UNMAKE((v), S626_CRBWID_OVERDO_A, S626_CRBBIT_OVERDO_A)
-#define S626_GET_CRB_OVERDO_B(v) \
- S626_UNMAKE((v), S626_CRBWID_OVERDO_B, S626_CRBBIT_OVERDO_B)
-#define S626_GET_CRB_CLKENAB_A(v) \
- S626_UNMAKE((v), S626_CRBWID_CLKENAB_A, S626_CRBBIT_CLKENAB_A)
-#define S626_GET_CRB_INTSRC_B(v) \
- S626_UNMAKE((v), S626_CRBWID_INTSRC_B, S626_CRBBIT_INTSRC_B)
-#define S626_GET_CRB_LATCHSRC(v) \
- S626_UNMAKE((v), S626_CRBWID_LATCHSRC, S626_CRBBIT_LATCHSRC)
-#define S626_GET_CRB_LOADSRC_B(v) \
- S626_UNMAKE((v), S626_CRBWID_LOADSRC_B, S626_CRBBIT_LOADSRC_B)
-#define S626_GET_CRB_CLEAR_B(v) \
- S626_UNMAKE((v), S626_CRBWID_CLEAR_B, S626_CRBBIT_CLEAR_B)
-#define S626_GET_CRB_CLKMULT_B(v) \
- S626_UNMAKE((v), S626_CRBWID_CLKMULT_B, S626_CRBBIT_CLKMULT_B)
-#define S626_GET_CRB_CLKENAB_B(v) \
- S626_UNMAKE((v), S626_CRBWID_CLKENAB_B, S626_CRBBIT_CLKENAB_B)
-#define S626_GET_CRB_INDXPOL_B(v) \
- S626_UNMAKE((v), S626_CRBWID_INDXPOL_B, S626_CRBBIT_INDXPOL_B)
-#define S626_GET_CRB_CLKPOL_B(v) \
- S626_UNMAKE((v), S626_CRBWID_CLKPOL_B, S626_CRBBIT_CLKPOL_B)
-
-/* Bit field positions for standardized SETUP structure: */
-#define S626_STDBIT_INTSRC 13
-#define S626_STDBIT_LATCHSRC 11
-#define S626_STDBIT_LOADSRC 9
-#define S626_STDBIT_INDXSRC 7
-#define S626_STDBIT_INDXPOL 6
-#define S626_STDBIT_ENCMODE 4
-#define S626_STDBIT_CLKPOL 3
-#define S626_STDBIT_CLKMULT 1
-#define S626_STDBIT_CLKENAB 0
-
-/* Bit field widths for standardized SETUP structure: */
-#define S626_STDWID_INTSRC 2
-#define S626_STDWID_LATCHSRC 2
-#define S626_STDWID_LOADSRC 2
-#define S626_STDWID_INDXSRC 2
-#define S626_STDWID_INDXPOL 1
-#define S626_STDWID_ENCMODE 2
-#define S626_STDWID_CLKPOL 1
-#define S626_STDWID_CLKMULT 2
-#define S626_STDWID_CLKENAB 1
-
-/* Bit field masks for standardized SETUP structure: */
-#define S626_STDMSK_INTSRC S626_SET_STD_INTSRC(~0)
-#define S626_STDMSK_LATCHSRC S626_SET_STD_LATCHSRC(~0)
-#define S626_STDMSK_LOADSRC S626_SET_STD_LOADSRC(~0)
-#define S626_STDMSK_INDXSRC S626_SET_STD_INDXSRC(~0)
-#define S626_STDMSK_INDXPOL S626_SET_STD_INDXPOL(~0)
-#define S626_STDMSK_ENCMODE S626_SET_STD_ENCMODE(~0)
-#define S626_STDMSK_CLKPOL S626_SET_STD_CLKPOL(~0)
-#define S626_STDMSK_CLKMULT S626_SET_STD_CLKMULT(~0)
-#define S626_STDMSK_CLKENAB S626_SET_STD_CLKENAB(~0)
-
-/* Construct parts of standardized SETUP structure: */
-#define S626_SET_STD_INTSRC(x) \
- S626_MAKE((x), S626_STDWID_INTSRC, S626_STDBIT_INTSRC)
-#define S626_SET_STD_LATCHSRC(x) \
- S626_MAKE((x), S626_STDWID_LATCHSRC, S626_STDBIT_LATCHSRC)
-#define S626_SET_STD_LOADSRC(x) \
- S626_MAKE((x), S626_STDWID_LOADSRC, S626_STDBIT_LOADSRC)
-#define S626_SET_STD_INDXSRC(x) \
- S626_MAKE((x), S626_STDWID_INDXSRC, S626_STDBIT_INDXSRC)
-#define S626_SET_STD_INDXPOL(x) \
- S626_MAKE((x), S626_STDWID_INDXPOL, S626_STDBIT_INDXPOL)
-#define S626_SET_STD_ENCMODE(x) \
- S626_MAKE((x), S626_STDWID_ENCMODE, S626_STDBIT_ENCMODE)
-#define S626_SET_STD_CLKPOL(x) \
- S626_MAKE((x), S626_STDWID_CLKPOL, S626_STDBIT_CLKPOL)
-#define S626_SET_STD_CLKMULT(x) \
- S626_MAKE((x), S626_STDWID_CLKMULT, S626_STDBIT_CLKMULT)
-#define S626_SET_STD_CLKENAB(x) \
- S626_MAKE((x), S626_STDWID_CLKENAB, S626_STDBIT_CLKENAB)
-
-/* Extract parts of standardized SETUP structure: */
-#define S626_GET_STD_INTSRC(v) \
- S626_UNMAKE((v), S626_STDWID_INTSRC, S626_STDBIT_INTSRC)
-#define S626_GET_STD_LATCHSRC(v) \
- S626_UNMAKE((v), S626_STDWID_LATCHSRC, S626_STDBIT_LATCHSRC)
-#define S626_GET_STD_LOADSRC(v) \
- S626_UNMAKE((v), S626_STDWID_LOADSRC, S626_STDBIT_LOADSRC)
-#define S626_GET_STD_INDXSRC(v) \
- S626_UNMAKE((v), S626_STDWID_INDXSRC, S626_STDBIT_INDXSRC)
-#define S626_GET_STD_INDXPOL(v) \
- S626_UNMAKE((v), S626_STDWID_INDXPOL, S626_STDBIT_INDXPOL)
-#define S626_GET_STD_ENCMODE(v) \
- S626_UNMAKE((v), S626_STDWID_ENCMODE, S626_STDBIT_ENCMODE)
-#define S626_GET_STD_CLKPOL(v) \
- S626_UNMAKE((v), S626_STDWID_CLKPOL, S626_STDBIT_CLKPOL)
-#define S626_GET_STD_CLKMULT(v) \
- S626_UNMAKE((v), S626_STDWID_CLKMULT, S626_STDBIT_CLKMULT)
-#define S626_GET_STD_CLKENAB(v) \
- S626_UNMAKE((v), S626_STDWID_CLKENAB, S626_STDBIT_CLKENAB)
-
-#endif
diff --git a/drivers/staging/comedi/drivers/serial2002.c b/drivers/staging/comedi/drivers/serial2002.c
deleted file mode 100644
index 7a1defcf2102..000000000000
--- a/drivers/staging/comedi/drivers/serial2002.c
+++ /dev/null
@@ -1,797 +0,0 @@
-/*
- * serial2002.c
- * Comedi driver for serial connected hardware
- *
- * COMEDI - Linux Control and Measurement Device Interface
- * Copyright (C) 2002 Anders Blomdell <anders.blomdell@control.lth.se>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-/*
- * Driver: serial2002
- * Description: Driver for serial connected hardware
- * Devices:
- * Author: Anders Blomdell
- * Updated: Fri, 7 Jun 2002 12:56:45 -0700
- * Status: in development
- */
-
-#include <linux/module.h>
-#include "../comedidev.h"
-
-#include <linux/delay.h>
-#include <linux/sched.h>
-#include <linux/slab.h>
-#include <linux/ktime.h>
-
-#include <linux/termios.h>
-#include <asm/ioctls.h>
-#include <linux/serial.h>
-#include <linux/poll.h>
-
-struct serial2002_range_table_t {
- /* HACK... */
- int length;
- struct comedi_krange range;
-};
-
-struct serial2002_private {
- int port; /* /dev/ttyS<port> */
- int speed; /* baudrate */
- struct file *tty;
- unsigned int ao_readback[32];
- unsigned char digital_in_mapping[32];
- unsigned char digital_out_mapping[32];
- unsigned char analog_in_mapping[32];
- unsigned char analog_out_mapping[32];
- unsigned char encoder_in_mapping[32];
- struct serial2002_range_table_t in_range[32], out_range[32];
-};
-
-struct serial_data {
- enum { is_invalid, is_digital, is_channel } kind;
- int index;
- unsigned long value;
-};
-
-/*
- * The configuration serial_data.value read from the device is
- * a bitmask that defines specific options of a channel:
- *
- * 4:0 - the channel to configure
- * 7:5 - the kind of channel
- * 9:8 - the command used to configure the channel
- *
- * The remaining bits vary in use depending on the command:
- *
- * BITS 15:10 - the channel bits (maxdata)
- * MIN/MAX 12:10 - the units multiplier for the scale
- * 13 - the sign of the scale
- * 33:14 - the base value for the range
- */
-#define S2002_CFG_CHAN(x) ((x) & 0x1f)
-#define S2002_CFG_KIND(x) (((x) >> 5) & 0x7)
-#define S2002_CFG_KIND_INVALID 0
-#define S2002_CFG_KIND_DIGITAL_IN 1
-#define S2002_CFG_KIND_DIGITAL_OUT 2
-#define S2002_CFG_KIND_ANALOG_IN 3
-#define S2002_CFG_KIND_ANALOG_OUT 4
-#define S2002_CFG_KIND_ENCODER_IN 5
-#define S2002_CFG_CMD(x) (((x) >> 8) & 0x3)
-#define S2002_CFG_CMD_BITS 0
-#define S2002_CFG_CMD_MIN 1
-#define S2002_CFG_CMD_MAX 2
-#define S2002_CFG_BITS(x) (((x) >> 10) & 0x3f)
-#define S2002_CFG_UNITS(x) (((x) >> 10) & 0x7)
-#define S2002_CFG_SIGN(x) (((x) >> 13) & 0x1)
-#define S2002_CFG_BASE(x) (((x) >> 14) & 0xfffff)
-
-static long serial2002_tty_ioctl(struct file *f, unsigned op,
- unsigned long param)
-{
- if (f->f_op->unlocked_ioctl)
- return f->f_op->unlocked_ioctl(f, op, param);
-
- return -ENOTTY;
-}
-
-static int serial2002_tty_write(struct file *f, unsigned char *buf, int count)
-{
- const char __user *p = (__force const char __user *)buf;
- int result;
- loff_t offset = 0;
- mm_segment_t oldfs;
-
- oldfs = get_fs();
- set_fs(KERNEL_DS);
- result = __vfs_write(f, p, count, &offset);
- set_fs(oldfs);
- return result;
-}
-
-static void serial2002_tty_read_poll_wait(struct file *f, int timeout)
-{
- struct poll_wqueues table;
- ktime_t start, now;
-
- start = ktime_get();
- poll_initwait(&table);
- while (1) {
- long elapsed;
- int mask;
-
- mask = f->f_op->poll(f, &table.pt);
- if (mask & (POLLRDNORM | POLLRDBAND | POLLIN |
- POLLHUP | POLLERR)) {
- break;
- }
- now = ktime_get();
- elapsed = ktime_us_delta(now, start);
- if (elapsed > timeout)
- break;
- set_current_state(TASK_INTERRUPTIBLE);
- schedule_timeout(((timeout - elapsed) * HZ) / 10000);
- }
- poll_freewait(&table);
-}
-
-static int serial2002_tty_read(struct file *f, int timeout)
-{
- unsigned char ch;
- int result;
-
- result = -1;
- if (!IS_ERR(f)) {
- mm_segment_t oldfs;
- char __user *p = (__force char __user *)&ch;
- loff_t offset = 0;
-
- oldfs = get_fs();
- set_fs(KERNEL_DS);
- if (f->f_op->poll) {
- serial2002_tty_read_poll_wait(f, timeout);
-
- if (__vfs_read(f, p, 1, &offset) == 1)
- result = ch;
- } else {
- /* Device does not support poll, busy wait */
- int retries = 0;
-
- while (1) {
- retries++;
- if (retries >= timeout)
- break;
-
- if (__vfs_read(f, p, 1, &offset) == 1) {
- result = ch;
- break;
- }
- usleep_range(100, 1000);
- }
- }
- set_fs(oldfs);
- }
- return result;
-}
-
-static void serial2002_tty_setspeed(struct file *f, int speed)
-{
- struct termios termios;
- struct serial_struct serial;
- mm_segment_t oldfs;
-
- oldfs = get_fs();
- set_fs(KERNEL_DS);
-
- /* Set speed */
- serial2002_tty_ioctl(f, TCGETS, (unsigned long)&termios);
- termios.c_iflag = 0;
- termios.c_oflag = 0;
- termios.c_lflag = 0;
- termios.c_cflag = CLOCAL | CS8 | CREAD;
- termios.c_cc[VMIN] = 0;
- termios.c_cc[VTIME] = 0;
- switch (speed) {
- case 2400:
- termios.c_cflag |= B2400;
- break;
- case 4800:
- termios.c_cflag |= B4800;
- break;
- case 9600:
- termios.c_cflag |= B9600;
- break;
- case 19200:
- termios.c_cflag |= B19200;
- break;
- case 38400:
- termios.c_cflag |= B38400;
- break;
- case 57600:
- termios.c_cflag |= B57600;
- break;
- case 115200:
- termios.c_cflag |= B115200;
- break;
- default:
- termios.c_cflag |= B9600;
- break;
- }
- serial2002_tty_ioctl(f, TCSETS, (unsigned long)&termios);
-
- /* Set low latency */
- serial2002_tty_ioctl(f, TIOCGSERIAL, (unsigned long)&serial);
- serial.flags |= ASYNC_LOW_LATENCY;
- serial2002_tty_ioctl(f, TIOCSSERIAL, (unsigned long)&serial);
-
- set_fs(oldfs);
-}
-
-static void serial2002_poll_digital(struct file *f, int channel)
-{
- char cmd;
-
- cmd = 0x40 | (channel & 0x1f);
- serial2002_tty_write(f, &cmd, 1);
-}
-
-static void serial2002_poll_channel(struct file *f, int channel)
-{
- char cmd;
-
- cmd = 0x60 | (channel & 0x1f);
- serial2002_tty_write(f, &cmd, 1);
-}
-
-static struct serial_data serial2002_read(struct file *f, int timeout)
-{
- struct serial_data result;
- int length;
-
- result.kind = is_invalid;
- result.index = 0;
- result.value = 0;
- length = 0;
- while (1) {
- int data = serial2002_tty_read(f, timeout);
-
- length++;
- if (data < 0) {
- break;
- } else if (data & 0x80) {
- result.value = (result.value << 7) | (data & 0x7f);
- } else {
- if (length == 1) {
- switch ((data >> 5) & 0x03) {
- case 0:
- result.value = 0;
- result.kind = is_digital;
- break;
- case 1:
- result.value = 1;
- result.kind = is_digital;
- break;
- }
- } else {
- result.value =
- (result.value << 2) | ((data & 0x60) >> 5);
- result.kind = is_channel;
- }
- result.index = data & 0x1f;
- break;
- }
- }
- return result;
-}
-
-static void serial2002_write(struct file *f, struct serial_data data)
-{
- if (data.kind == is_digital) {
- unsigned char ch =
- ((data.value << 5) & 0x20) | (data.index & 0x1f);
- serial2002_tty_write(f, &ch, 1);
- } else {
- unsigned char ch[6];
- int i = 0;
-
- if (data.value >= (1L << 30)) {
- ch[i] = 0x80 | ((data.value >> 30) & 0x03);
- i++;
- }
- if (data.value >= (1L << 23)) {
- ch[i] = 0x80 | ((data.value >> 23) & 0x7f);
- i++;
- }
- if (data.value >= (1L << 16)) {
- ch[i] = 0x80 | ((data.value >> 16) & 0x7f);
- i++;
- }
- if (data.value >= (1L << 9)) {
- ch[i] = 0x80 | ((data.value >> 9) & 0x7f);
- i++;
- }
- ch[i] = 0x80 | ((data.value >> 2) & 0x7f);
- i++;
- ch[i] = ((data.value << 5) & 0x60) | (data.index & 0x1f);
- i++;
- serial2002_tty_write(f, ch, i);
- }
-}
-
-struct config_t {
- short int kind;
- short int bits;
- int min;
- int max;
-};
-
-static int serial2002_setup_subdevice(struct comedi_subdevice *s,
- struct config_t *cfg,
- struct serial2002_range_table_t *range,
- unsigned char *mapping,
- int kind)
-{
- const struct comedi_lrange **range_table_list = NULL;
- unsigned int *maxdata_list;
- int j, chan;
-
- for (chan = 0, j = 0; j < 32; j++) {
- if (cfg[j].kind == kind)
- chan++;
- }
- s->n_chan = chan;
- s->maxdata = 0;
- kfree(s->maxdata_list);
- maxdata_list = kmalloc_array(s->n_chan, sizeof(unsigned int),
- GFP_KERNEL);
- if (!maxdata_list)
- return -ENOMEM;
- s->maxdata_list = maxdata_list;
- kfree(s->range_table_list);
- s->range_table = NULL;
- s->range_table_list = NULL;
- if (kind == 1 || kind == 2) {
- s->range_table = &range_digital;
- } else if (range) {
- range_table_list = kmalloc_array(s->n_chan, sizeof(*range),
- GFP_KERNEL);
- if (!range_table_list)
- return -ENOMEM;
- s->range_table_list = range_table_list;
- }
- for (chan = 0, j = 0; j < 32; j++) {
- if (cfg[j].kind == kind) {
- if (mapping)
- mapping[chan] = j;
- if (range && range_table_list) {
- range[j].length = 1;
- range[j].range.min = cfg[j].min;
- range[j].range.max = cfg[j].max;
- range_table_list[chan] =
- (const struct comedi_lrange *)&range[j];
- }
- maxdata_list[chan] = ((long long)1 << cfg[j].bits) - 1;
- chan++;
- }
- }
- return 0;
-}
-
-static int serial2002_setup_subdevs(struct comedi_device *dev)
-{
- struct serial2002_private *devpriv = dev->private;
- struct config_t *di_cfg;
- struct config_t *do_cfg;
- struct config_t *ai_cfg;
- struct config_t *ao_cfg;
- struct config_t *cfg;
- struct comedi_subdevice *s;
- int result = 0;
- int i;
-
- /* Allocate the temporary structs to hold the configuration data */
- di_cfg = kcalloc(32, sizeof(*cfg), GFP_KERNEL);
- do_cfg = kcalloc(32, sizeof(*cfg), GFP_KERNEL);
- ai_cfg = kcalloc(32, sizeof(*cfg), GFP_KERNEL);
- ao_cfg = kcalloc(32, sizeof(*cfg), GFP_KERNEL);
- if (!di_cfg || !do_cfg || !ai_cfg || !ao_cfg) {
- result = -ENOMEM;
- goto err_alloc_configs;
- }
-
- /* Read the configuration from the connected device */
- serial2002_tty_setspeed(devpriv->tty, devpriv->speed);
- serial2002_poll_channel(devpriv->tty, 31);
- while (1) {
- struct serial_data data = serial2002_read(devpriv->tty, 1000);
- int kind = S2002_CFG_KIND(data.value);
- int channel = S2002_CFG_CHAN(data.value);
- int range = S2002_CFG_BASE(data.value);
- int cmd = S2002_CFG_CMD(data.value);
-
- if (data.kind != is_channel || data.index != 31 ||
- kind == S2002_CFG_KIND_INVALID)
- break;
-
- switch (kind) {
- case S2002_CFG_KIND_DIGITAL_IN:
- cfg = di_cfg;
- break;
- case S2002_CFG_KIND_DIGITAL_OUT:
- cfg = do_cfg;
- break;
- case S2002_CFG_KIND_ANALOG_IN:
- cfg = ai_cfg;
- break;
- case S2002_CFG_KIND_ANALOG_OUT:
- cfg = ao_cfg;
- break;
- case S2002_CFG_KIND_ENCODER_IN:
- cfg = ai_cfg;
- break;
- default:
- cfg = NULL;
- break;
- }
- if (!cfg)
- continue; /* unknown kind, skip it */
-
- cfg[channel].kind = kind;
-
- switch (cmd) {
- case S2002_CFG_CMD_BITS:
- cfg[channel].bits = S2002_CFG_BITS(data.value);
- break;
- case S2002_CFG_CMD_MIN:
- case S2002_CFG_CMD_MAX:
- switch (S2002_CFG_UNITS(data.value)) {
- case 0:
- range *= 1000000;
- break;
- case 1:
- range *= 1000;
- break;
- case 2:
- range *= 1;
- break;
- }
- if (S2002_CFG_SIGN(data.value))
- range = -range;
- if (cmd == S2002_CFG_CMD_MIN)
- cfg[channel].min = range;
- else
- cfg[channel].max = range;
- break;
- }
- }
-
- /* Fill in subdevice data */
- for (i = 0; i <= 4; i++) {
- unsigned char *mapping = NULL;
- struct serial2002_range_table_t *range = NULL;
- int kind = 0;
-
- s = &dev->subdevices[i];
-
- switch (i) {
- case 0:
- cfg = di_cfg;
- mapping = devpriv->digital_in_mapping;
- kind = S2002_CFG_KIND_DIGITAL_IN;
- break;
- case 1:
- cfg = do_cfg;
- mapping = devpriv->digital_out_mapping;
- kind = S2002_CFG_KIND_DIGITAL_OUT;
- break;
- case 2:
- cfg = ai_cfg;
- mapping = devpriv->analog_in_mapping;
- range = devpriv->in_range;
- kind = S2002_CFG_KIND_ANALOG_IN;
- break;
- case 3:
- cfg = ao_cfg;
- mapping = devpriv->analog_out_mapping;
- range = devpriv->out_range;
- kind = S2002_CFG_KIND_ANALOG_OUT;
- break;
- case 4:
- cfg = ai_cfg;
- mapping = devpriv->encoder_in_mapping;
- range = devpriv->in_range;
- kind = S2002_CFG_KIND_ENCODER_IN;
- break;
- }
-
- if (serial2002_setup_subdevice(s, cfg, range, mapping, kind))
- break; /* err handled below */
- }
- if (i <= 4) {
- /*
- * Failed to allocate maxdata_list or range_table_list
- * for a subdevice that needed it.
- */
- result = -ENOMEM;
- for (i = 0; i <= 4; i++) {
- s = &dev->subdevices[i];
- kfree(s->maxdata_list);
- s->maxdata_list = NULL;
- kfree(s->range_table_list);
- s->range_table_list = NULL;
- }
- }
-
-err_alloc_configs:
- kfree(di_cfg);
- kfree(do_cfg);
- kfree(ai_cfg);
- kfree(ao_cfg);
-
- if (result) {
- if (devpriv->tty) {
- filp_close(devpriv->tty, NULL);
- devpriv->tty = NULL;
- }
- }
-
- return result;
-}
-
-static int serial2002_open(struct comedi_device *dev)
-{
- struct serial2002_private *devpriv = dev->private;
- int result;
- char port[20];
-
- sprintf(port, "/dev/ttyS%d", devpriv->port);
- devpriv->tty = filp_open(port, O_RDWR, 0);
- if (IS_ERR(devpriv->tty)) {
- result = (int)PTR_ERR(devpriv->tty);
- dev_err(dev->class_dev, "file open error = %d\n", result);
- } else {
- result = serial2002_setup_subdevs(dev);
- }
- return result;
-}
-
-static void serial2002_close(struct comedi_device *dev)
-{
- struct serial2002_private *devpriv = dev->private;
-
- if (!IS_ERR(devpriv->tty) && devpriv->tty)
- filp_close(devpriv->tty, NULL);
-}
-
-static int serial2002_di_insn_read(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct serial2002_private *devpriv = dev->private;
- int n;
- int chan;
-
- chan = devpriv->digital_in_mapping[CR_CHAN(insn->chanspec)];
- for (n = 0; n < insn->n; n++) {
- struct serial_data read;
-
- serial2002_poll_digital(devpriv->tty, chan);
- while (1) {
- read = serial2002_read(devpriv->tty, 1000);
- if (read.kind != is_digital || read.index == chan)
- break;
- }
- data[n] = read.value;
- }
- return n;
-}
-
-static int serial2002_do_insn_write(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct serial2002_private *devpriv = dev->private;
- int n;
- int chan;
-
- chan = devpriv->digital_out_mapping[CR_CHAN(insn->chanspec)];
- for (n = 0; n < insn->n; n++) {
- struct serial_data write;
-
- write.kind = is_digital;
- write.index = chan;
- write.value = data[n];
- serial2002_write(devpriv->tty, write);
- }
- return n;
-}
-
-static int serial2002_ai_insn_read(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct serial2002_private *devpriv = dev->private;
- int n;
- int chan;
-
- chan = devpriv->analog_in_mapping[CR_CHAN(insn->chanspec)];
- for (n = 0; n < insn->n; n++) {
- struct serial_data read;
-
- serial2002_poll_channel(devpriv->tty, chan);
- while (1) {
- read = serial2002_read(devpriv->tty, 1000);
- if (read.kind != is_channel || read.index == chan)
- break;
- }
- data[n] = read.value;
- }
- return n;
-}
-
-static int serial2002_ao_insn_write(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct serial2002_private *devpriv = dev->private;
- int n;
- int chan;
-
- chan = devpriv->analog_out_mapping[CR_CHAN(insn->chanspec)];
- for (n = 0; n < insn->n; n++) {
- struct serial_data write;
-
- write.kind = is_channel;
- write.index = chan;
- write.value = data[n];
- serial2002_write(devpriv->tty, write);
- devpriv->ao_readback[chan] = data[n];
- }
- return n;
-}
-
-static int serial2002_ao_insn_read(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct serial2002_private *devpriv = dev->private;
- int n;
- int chan = CR_CHAN(insn->chanspec);
-
- for (n = 0; n < insn->n; n++)
- data[n] = devpriv->ao_readback[chan];
-
- return n;
-}
-
-static int serial2002_encoder_insn_read(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct serial2002_private *devpriv = dev->private;
- int n;
- int chan;
-
- chan = devpriv->encoder_in_mapping[CR_CHAN(insn->chanspec)];
- for (n = 0; n < insn->n; n++) {
- struct serial_data read;
-
- serial2002_poll_channel(devpriv->tty, chan);
- while (1) {
- read = serial2002_read(devpriv->tty, 1000);
- if (read.kind != is_channel || read.index == chan)
- break;
- }
- data[n] = read.value;
- }
- return n;
-}
-
-static int serial2002_attach(struct comedi_device *dev,
- struct comedi_devconfig *it)
-{
- struct serial2002_private *devpriv;
- struct comedi_subdevice *s;
- int ret;
-
- devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
- if (!devpriv)
- return -ENOMEM;
-
- devpriv->port = it->options[0];
- devpriv->speed = it->options[1];
-
- ret = comedi_alloc_subdevices(dev, 5);
- if (ret)
- return ret;
-
- /* digital input subdevice */
- s = &dev->subdevices[0];
- s->type = COMEDI_SUBD_DI;
- s->subdev_flags = SDF_READABLE;
- s->n_chan = 0;
- s->maxdata = 1;
- s->range_table = &range_digital;
- s->insn_read = serial2002_di_insn_read;
-
- /* digital output subdevice */
- s = &dev->subdevices[1];
- s->type = COMEDI_SUBD_DO;
- s->subdev_flags = SDF_WRITABLE;
- s->n_chan = 0;
- s->maxdata = 1;
- s->range_table = &range_digital;
- s->insn_write = serial2002_do_insn_write;
-
- /* analog input subdevice */
- s = &dev->subdevices[2];
- s->type = COMEDI_SUBD_AI;
- s->subdev_flags = SDF_READABLE | SDF_GROUND;
- s->n_chan = 0;
- s->maxdata = 1;
- s->range_table = NULL;
- s->insn_read = serial2002_ai_insn_read;
-
- /* analog output subdevice */
- s = &dev->subdevices[3];
- s->type = COMEDI_SUBD_AO;
- s->subdev_flags = SDF_WRITABLE;
- s->n_chan = 0;
- s->maxdata = 1;
- s->range_table = NULL;
- s->insn_write = serial2002_ao_insn_write;
- s->insn_read = serial2002_ao_insn_read;
-
- /* encoder input subdevice */
- s = &dev->subdevices[4];
- s->type = COMEDI_SUBD_COUNTER;
- s->subdev_flags = SDF_READABLE | SDF_LSAMPL;
- s->n_chan = 0;
- s->maxdata = 1;
- s->range_table = NULL;
- s->insn_read = serial2002_encoder_insn_read;
-
- dev->open = serial2002_open;
- dev->close = serial2002_close;
-
- return 0;
-}
-
-static void serial2002_detach(struct comedi_device *dev)
-{
- struct comedi_subdevice *s;
- int i;
-
- for (i = 0; i < dev->n_subdevices; i++) {
- s = &dev->subdevices[i];
- kfree(s->maxdata_list);
- kfree(s->range_table_list);
- }
-}
-
-static struct comedi_driver serial2002_driver = {
- .driver_name = "serial2002",
- .module = THIS_MODULE,
- .attach = serial2002_attach,
- .detach = serial2002_detach,
-};
-module_comedi_driver(serial2002_driver);
-
-MODULE_AUTHOR("Comedi http://www.comedi.org");
-MODULE_DESCRIPTION("Comedi low-level driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/ssv_dnp.c b/drivers/staging/comedi/drivers/ssv_dnp.c
deleted file mode 100644
index f9f634fd53cf..000000000000
--- a/drivers/staging/comedi/drivers/ssv_dnp.c
+++ /dev/null
@@ -1,189 +0,0 @@
-/*
- * ssv_dnp.c
- * generic comedi driver for SSV Embedded Systems' DIL/Net-PCs
- * Copyright (C) 2001 Robert Schwebel <robert@schwebel.de>
- *
- * COMEDI - Linux Control and Measurement Device Interface
- * Copyright (C) 2000 David A. Schleef <ds@schleef.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-/*
- * Driver: ssv_dnp
- * Description: SSV Embedded Systems DIL/Net-PC
- * Author: Robert Schwebel <robert@schwebel.de>
- * Devices: [SSV Embedded Systems] DIL/Net-PC 1486 (dnp-1486)
- * Status: unknown
- */
-
-/* include files ----------------------------------------------------------- */
-
-#include <linux/module.h>
-#include "../comedidev.h"
-
-/* Some global definitions: the registers of the DNP ----------------------- */
-/* */
-/* For port A and B the mode register has bits corresponding to the output */
-/* pins, where Bit-N = 0 -> input, Bit-N = 1 -> output. Note that bits */
-/* 4 to 7 correspond to pin 0..3 for port C data register. Ensure that bits */
-/* 0..3 remain unchanged! For details about Port C Mode Register see */
-/* the remarks in dnp_insn_config() below. */
-
-#define CSCIR 0x22 /* Chip Setup and Control Index Register */
-#define CSCDR 0x23 /* Chip Setup and Control Data Register */
-#define PAMR 0xa5 /* Port A Mode Register */
-#define PADR 0xa9 /* Port A Data Register */
-#define PBMR 0xa4 /* Port B Mode Register */
-#define PBDR 0xa8 /* Port B Data Register */
-#define PCMR 0xa3 /* Port C Mode Register */
-#define PCDR 0xa7 /* Port C Data Register */
-
-static int dnp_dio_insn_bits(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- unsigned int mask;
- unsigned int val;
-
- /*
- * Ports A and B are straight forward: each bit corresponds to an
- * output pin with the same order. Port C is different: bits 0...3
- * correspond to bits 4...7 of the output register (PCDR).
- */
-
- mask = comedi_dio_update_state(s, data);
- if (mask) {
- outb(PADR, CSCIR);
- outb(s->state & 0xff, CSCDR);
-
- outb(PBDR, CSCIR);
- outb((s->state >> 8) & 0xff, CSCDR);
-
- outb(PCDR, CSCIR);
- val = inb(CSCDR) & 0x0f;
- outb(((s->state >> 12) & 0xf0) | val, CSCDR);
- }
-
- outb(PADR, CSCIR);
- val = inb(CSCDR);
- outb(PBDR, CSCIR);
- val |= (inb(CSCDR) << 8);
- outb(PCDR, CSCIR);
- val |= ((inb(CSCDR) & 0xf0) << 12);
-
- data[1] = val;
-
- return insn->n;
-}
-
-static int dnp_dio_insn_config(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- unsigned int chan = CR_CHAN(insn->chanspec);
- unsigned int mask;
- unsigned int val;
- int ret;
-
- ret = comedi_dio_insn_config(dev, s, insn, data, 0);
- if (ret)
- return ret;
-
- if (chan < 8) { /* Port A */
- mask = 1 << chan;
- outb(PAMR, CSCIR);
- } else if (chan < 16) { /* Port B */
- mask = 1 << (chan - 8);
- outb(PBMR, CSCIR);
- } else { /* Port C */
- /*
- * We have to pay attention with port C.
- * This is the meaning of PCMR:
- * Bit in PCMR: 7 6 5 4 3 2 1 0
- * Corresponding port C pin: d 3 d 2 d 1 d 0 d= don't touch
- *
- * Multiplication by 2 brings bits into correct position
- * for PCMR!
- */
- mask = 1 << ((chan - 16) * 2);
- outb(PCMR, CSCIR);
- }
-
- val = inb(CSCDR);
- if (data[0] == COMEDI_OUTPUT)
- val |= mask;
- else
- val &= ~mask;
- outb(val, CSCDR);
-
- return insn->n;
-}
-
-static int dnp_attach(struct comedi_device *dev, struct comedi_devconfig *it)
-{
- struct comedi_subdevice *s;
- int ret;
-
- /*
- * We use I/O ports 0x22, 0x23 and 0xa3-0xa9, which are always
- * allocated for the primary 8259, so we don't need to allocate
- * them ourselves.
- */
-
- ret = comedi_alloc_subdevices(dev, 1);
- if (ret)
- return ret;
-
- s = &dev->subdevices[0];
- /* digital i/o subdevice */
- s->type = COMEDI_SUBD_DIO;
- s->subdev_flags = SDF_READABLE | SDF_WRITABLE;
- s->n_chan = 20;
- s->maxdata = 1;
- s->range_table = &range_digital;
- s->insn_bits = dnp_dio_insn_bits;
- s->insn_config = dnp_dio_insn_config;
-
- /* configure all ports as input (default) */
- outb(PAMR, CSCIR);
- outb(0x00, CSCDR);
- outb(PBMR, CSCIR);
- outb(0x00, CSCDR);
- outb(PCMR, CSCIR);
- outb((inb(CSCDR) & 0xAA), CSCDR);
-
- return 0;
-}
-
-static void dnp_detach(struct comedi_device *dev)
-{
- outb(PAMR, CSCIR);
- outb(0x00, CSCDR);
- outb(PBMR, CSCIR);
- outb(0x00, CSCDR);
- outb(PCMR, CSCIR);
- outb((inb(CSCDR) & 0xAA), CSCDR);
-}
-
-static struct comedi_driver dnp_driver = {
- .driver_name = "dnp-1486",
- .module = THIS_MODULE,
- .attach = dnp_attach,
- .detach = dnp_detach,
-};
-module_comedi_driver(dnp_driver);
-
-MODULE_AUTHOR("Comedi http://www.comedi.org");
-MODULE_DESCRIPTION("Comedi low-level driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/usbdux.c b/drivers/staging/comedi/drivers/usbdux.c
deleted file mode 100644
index 39710f2297a6..000000000000
--- a/drivers/staging/comedi/drivers/usbdux.c
+++ /dev/null
@@ -1,1737 +0,0 @@
-/*
- * usbdux.c
- * Copyright (C) 2003-2014 Bernd Porr, mail@berndporr.me.uk
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
-
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-/*
- * Driver: usbdux
- * Description: University of Stirling USB DAQ & INCITE Technology Limited
- * Devices: [ITL] USB-DUX (usbdux)
- * Author: Bernd Porr <mail@berndporr.me.uk>
- * Updated: 10 Oct 2014
- * Status: Stable
- *
- * Connection scheme for the counter at the digital port:
- * 0=/CLK0, 1=UP/DOWN0, 2=RESET0, 4=/CLK1, 5=UP/DOWN1, 6=RESET1.
- * The sampling rate of the counter is approximately 500Hz.
- *
- * Note that under USB2.0 the length of the channel list determines
- * the max sampling rate. If you sample only one channel you get 8kHz
- * sampling rate. If you sample two channels you get 4kHz and so on.
- */
-
-/*
- * I must give credit here to Chris Baugher who
- * wrote the driver for AT-MIO-16d. I used some parts of this
- * driver. I also must give credits to David Brownell
- * who supported me with the USB development.
- *
- * Bernd Porr
- *
- *
- * Revision history:
- * 0.94: D/A output should work now with any channel list combinations
- * 0.95: .owner commented out for kernel vers below 2.4.19
- * sanity checks in ai/ao_cmd
- * 0.96: trying to get it working with 2.6, moved all memory alloc to comedi's
- * attach final USB IDs
- * moved memory allocation completely to the corresponding comedi
- * functions firmware upload is by fxload and no longer by comedi (due to
- * enumeration)
- * 0.97: USB IDs received, adjusted table
- * 0.98: SMP, locking, memory alloc: moved all usb memory alloc
- * to the usb subsystem and moved all comedi related memory
- * alloc to comedi.
- * | kernel | registration | usbdux-usb | usbdux-comedi | comedi |
- * 0.99: USB 2.0: changed protocol to isochronous transfer
- * IRQ transfer is too buggy and too risky in 2.0
- * for the high speed ISO transfer is now a working version
- * available
- * 0.99b: Increased the iso transfer buffer for high sp.to 10 buffers. Some VIA
- * chipsets miss out IRQs. Deeper buffering is needed.
- * 1.00: full USB 2.0 support for the A/D converter. Now: max 8kHz sampling
- * rate.
- * Firmware vers 1.00 is needed for this.
- * Two 16 bit up/down/reset counter with a sampling rate of 1kHz
- * And loads of cleaning up, in particular streamlining the
- * bulk transfers.
- * 1.1: moved EP4 transfers to EP1 to make space for a PWM output on EP4
- * 1.2: added PWM support via EP4
- * 2.0: PWM seems to be stable and is not interfering with the other functions
- * 2.1: changed PWM API
- * 2.2: added firmware kernel request to fix an udev problem
- * 2.3: corrected a bug in bulk timeouts which were far too short
- * 2.4: fixed a bug which causes the driver to hang when it ran out of data.
- * Thanks to Jan-Matthias Braun and Ian to spot the bug and fix it.
- *
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/input.h>
-#include <linux/fcntl.h>
-#include <linux/compiler.h>
-
-#include "../comedi_usb.h"
-
-/* constants for firmware upload and download */
-#define USBDUX_FIRMWARE "usbdux_firmware.bin"
-#define USBDUX_FIRMWARE_MAX_LEN 0x2000
-#define USBDUX_FIRMWARE_CMD 0xa0
-#define VENDOR_DIR_IN 0xc0
-#define VENDOR_DIR_OUT 0x40
-#define USBDUX_CPU_CS 0xe600
-
-/* usbdux bulk transfer commands */
-#define USBDUX_CMD_MULT_AI 0
-#define USBDUX_CMD_AO 1
-#define USBDUX_CMD_DIO_CFG 2
-#define USBDUX_CMD_DIO_BITS 3
-#define USBDUX_CMD_SINGLE_AI 4
-#define USBDUX_CMD_TIMER_RD 5
-#define USBDUX_CMD_TIMER_WR 6
-#define USBDUX_CMD_PWM_ON 7
-#define USBDUX_CMD_PWM_OFF 8
-
-/* timeout for the USB-transfer in ms */
-#define BULK_TIMEOUT 1000
-
-/* 300Hz max frequ under PWM */
-#define MIN_PWM_PERIOD ((long)(1E9 / 300))
-
-/* Default PWM frequency */
-#define PWM_DEFAULT_PERIOD ((long)(1E9 / 100))
-
-/* Size of one A/D value */
-#define SIZEADIN ((sizeof(u16)))
-
-/*
- * Size of the input-buffer IN BYTES
- * Always multiple of 8 for 8 microframes which is needed in the highspeed mode
- */
-#define SIZEINBUF (8 * SIZEADIN)
-
-/* 16 bytes. */
-#define SIZEINSNBUF 16
-
-/* size of one value for the D/A converter: channel and value */
-#define SIZEDAOUT ((sizeof(u8) + sizeof(u16)))
-
-/*
- * Size of the output-buffer in bytes
- * Actually only the first 4 triplets are used but for the
- * high speed mode we need to pad it to 8 (microframes).
- */
-#define SIZEOUTBUF (8 * SIZEDAOUT)
-
-/*
- * Size of the buffer for the dux commands: just now max size is determined
- * by the analogue out + command byte + panic bytes...
- */
-#define SIZEOFDUXBUFFER (8 * SIZEDAOUT + 2)
-
-/* Number of in-URBs which receive the data: min=2 */
-#define NUMOFINBUFFERSFULL 5
-
-/* Number of out-URBs which send the data: min=2 */
-#define NUMOFOUTBUFFERSFULL 5
-
-/* Number of in-URBs which receive the data: min=5 */
-/* must have more buffers due to buggy USB ctr */
-#define NUMOFINBUFFERSHIGH 10
-
-/* Number of out-URBs which send the data: min=5 */
-/* must have more buffers due to buggy USB ctr */
-#define NUMOFOUTBUFFERSHIGH 10
-
-/* number of retries to get the right dux command */
-#define RETRIES 10
-
-static const struct comedi_lrange range_usbdux_ai_range = {
- 4, {
- BIP_RANGE(4.096),
- BIP_RANGE(4.096 / 2),
- UNI_RANGE(4.096),
- UNI_RANGE(4.096 / 2)
- }
-};
-
-static const struct comedi_lrange range_usbdux_ao_range = {
- 2, {
- BIP_RANGE(4.096),
- UNI_RANGE(4.096)
- }
-};
-
-struct usbdux_private {
- /* actual number of in-buffers */
- int n_ai_urbs;
- /* actual number of out-buffers */
- int n_ao_urbs;
- /* ISO-transfer handling: buffers */
- struct urb **ai_urbs;
- struct urb **ao_urbs;
- /* pwm-transfer handling */
- struct urb *pwm_urb;
- /* PWM period */
- unsigned int pwm_period;
- /* PWM internal delay for the GPIF in the FX2 */
- u8 pwm_delay;
- /* size of the PWM buffer which holds the bit pattern */
- int pwm_buf_sz;
- /* input buffer for the ISO-transfer */
- __le16 *in_buf;
- /* input buffer for single insn */
- __le16 *insn_buf;
-
- unsigned int high_speed:1;
- unsigned int ai_cmd_running:1;
- unsigned int ao_cmd_running:1;
- unsigned int pwm_cmd_running:1;
-
- /* time between samples in units of the timer */
- unsigned int ai_timer;
- unsigned int ao_timer;
- /* counter between aquisitions */
- unsigned int ai_counter;
- unsigned int ao_counter;
- /* interval in frames/uframes */
- unsigned int ai_interval;
- /* commands */
- u8 *dux_commands;
- struct semaphore sem;
-};
-
-static void usbdux_unlink_urbs(struct urb **urbs, int num_urbs)
-{
- int i;
-
- for (i = 0; i < num_urbs; i++)
- usb_kill_urb(urbs[i]);
-}
-
-static void usbdux_ai_stop(struct comedi_device *dev, int do_unlink)
-{
- struct usbdux_private *devpriv = dev->private;
-
- if (do_unlink && devpriv->ai_urbs)
- usbdux_unlink_urbs(devpriv->ai_urbs, devpriv->n_ai_urbs);
-
- devpriv->ai_cmd_running = 0;
-}
-
-static int usbdux_ai_cancel(struct comedi_device *dev,
- struct comedi_subdevice *s)
-{
- struct usbdux_private *devpriv = dev->private;
-
- /* prevent other CPUs from submitting new commands just now */
- down(&devpriv->sem);
- /* unlink only if the urb really has been submitted */
- usbdux_ai_stop(dev, devpriv->ai_cmd_running);
- up(&devpriv->sem);
-
- return 0;
-}
-
-static void usbduxsub_ai_handle_urb(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct urb *urb)
-{
- struct usbdux_private *devpriv = dev->private;
- struct comedi_async *async = s->async;
- struct comedi_cmd *cmd = &async->cmd;
- int ret;
- int i;
-
- devpriv->ai_counter--;
- if (devpriv->ai_counter == 0) {
- devpriv->ai_counter = devpriv->ai_timer;
-
- /* get the data from the USB bus and hand it over to comedi */
- for (i = 0; i < cmd->chanlist_len; i++) {
- unsigned int range = CR_RANGE(cmd->chanlist[i]);
- u16 val = le16_to_cpu(devpriv->in_buf[i]);
-
- /* bipolar data is two's-complement */
- if (comedi_range_is_bipolar(s, range))
- val = comedi_offset_munge(s, val);
-
- /* transfer data */
- if (!comedi_buf_write_samples(s, &val, 1))
- return;
- }
-
- if (cmd->stop_src == TRIG_COUNT &&
- async->scans_done >= cmd->stop_arg)
- async->events |= COMEDI_CB_EOA;
- }
-
- /* if command is still running, resubmit urb */
- if (!(async->events & COMEDI_CB_CANCEL_MASK)) {
- urb->dev = comedi_to_usb_dev(dev);
- ret = usb_submit_urb(urb, GFP_ATOMIC);
- if (ret < 0) {
- dev_err(dev->class_dev,
- "urb resubmit failed in int-context! err=%d\n",
- ret);
- if (ret == -EL2NSYNC)
- dev_err(dev->class_dev,
- "buggy USB host controller or bug in IRQ handler!\n");
- async->events |= COMEDI_CB_ERROR;
- }
- }
-}
-
-static void usbduxsub_ai_isoc_irq(struct urb *urb)
-{
- struct comedi_device *dev = urb->context;
- struct comedi_subdevice *s = dev->read_subdev;
- struct comedi_async *async = s->async;
- struct usbdux_private *devpriv = dev->private;
-
- /* exit if not running a command, do not resubmit urb */
- if (!devpriv->ai_cmd_running)
- return;
-
- switch (urb->status) {
- case 0:
- /* copy the result in the transfer buffer */
- memcpy(devpriv->in_buf, urb->transfer_buffer, SIZEINBUF);
- usbduxsub_ai_handle_urb(dev, s, urb);
- break;
-
- case -EILSEQ:
- /*
- * error in the ISOchronous data
- * we don't copy the data into the transfer buffer
- * and recycle the last data byte
- */
- dev_dbg(dev->class_dev, "CRC error in ISO IN stream\n");
- usbduxsub_ai_handle_urb(dev, s, urb);
- break;
-
- case -ECONNRESET:
- case -ENOENT:
- case -ESHUTDOWN:
- case -ECONNABORTED:
- /* after an unlink command, unplug, ... etc */
- async->events |= COMEDI_CB_ERROR;
- break;
-
- default:
- /* a real error */
- dev_err(dev->class_dev,
- "Non-zero urb status received in ai intr context: %d\n",
- urb->status);
- async->events |= COMEDI_CB_ERROR;
- break;
- }
-
- /*
- * comedi_handle_events() cannot be used in this driver. The (*cancel)
- * operation would unlink the urb.
- */
- if (async->events & COMEDI_CB_CANCEL_MASK)
- usbdux_ai_stop(dev, 0);
-
- comedi_event(dev, s);
-}
-
-static void usbdux_ao_stop(struct comedi_device *dev, int do_unlink)
-{
- struct usbdux_private *devpriv = dev->private;
-
- if (do_unlink && devpriv->ao_urbs)
- usbdux_unlink_urbs(devpriv->ao_urbs, devpriv->n_ao_urbs);
-
- devpriv->ao_cmd_running = 0;
-}
-
-static int usbdux_ao_cancel(struct comedi_device *dev,
- struct comedi_subdevice *s)
-{
- struct usbdux_private *devpriv = dev->private;
-
- /* prevent other CPUs from submitting a command just now */
- down(&devpriv->sem);
- /* unlink only if it is really running */
- usbdux_ao_stop(dev, devpriv->ao_cmd_running);
- up(&devpriv->sem);
-
- return 0;
-}
-
-static void usbduxsub_ao_handle_urb(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct urb *urb)
-{
- struct usbdux_private *devpriv = dev->private;
- struct comedi_async *async = s->async;
- struct comedi_cmd *cmd = &async->cmd;
- u8 *datap;
- int ret;
- int i;
-
- devpriv->ao_counter--;
- if (devpriv->ao_counter == 0) {
- devpriv->ao_counter = devpriv->ao_timer;
-
- if (cmd->stop_src == TRIG_COUNT &&
- async->scans_done >= cmd->stop_arg) {
- async->events |= COMEDI_CB_EOA;
- return;
- }
-
- /* transmit data to the USB bus */
- datap = urb->transfer_buffer;
- *datap++ = cmd->chanlist_len;
- for (i = 0; i < cmd->chanlist_len; i++) {
- unsigned int chan = CR_CHAN(cmd->chanlist[i]);
- unsigned short val;
-
- if (!comedi_buf_read_samples(s, &val, 1)) {
- dev_err(dev->class_dev, "buffer underflow\n");
- async->events |= COMEDI_CB_OVERFLOW;
- return;
- }
-
- /* pointer to the DA */
- *datap++ = val & 0xff;
- *datap++ = (val >> 8) & 0xff;
- *datap++ = chan << 6;
- s->readback[chan] = val;
- }
- }
-
- /* if command is still running, resubmit urb for BULK transfer */
- if (!(async->events & COMEDI_CB_CANCEL_MASK)) {
- urb->transfer_buffer_length = SIZEOUTBUF;
- urb->dev = comedi_to_usb_dev(dev);
- urb->status = 0;
- if (devpriv->high_speed)
- urb->interval = 8; /* uframes */
- else
- urb->interval = 1; /* frames */
- urb->number_of_packets = 1;
- urb->iso_frame_desc[0].offset = 0;
- urb->iso_frame_desc[0].length = SIZEOUTBUF;
- urb->iso_frame_desc[0].status = 0;
- ret = usb_submit_urb(urb, GFP_ATOMIC);
- if (ret < 0) {
- dev_err(dev->class_dev,
- "ao urb resubm failed in int-cont. ret=%d",
- ret);
- if (ret == -EL2NSYNC)
- dev_err(dev->class_dev,
- "buggy USB host controller or bug in IRQ handling!\n");
- async->events |= COMEDI_CB_ERROR;
- }
- }
-}
-
-static void usbduxsub_ao_isoc_irq(struct urb *urb)
-{
- struct comedi_device *dev = urb->context;
- struct comedi_subdevice *s = dev->write_subdev;
- struct comedi_async *async = s->async;
- struct usbdux_private *devpriv = dev->private;
-
- /* exit if not running a command, do not resubmit urb */
- if (!devpriv->ao_cmd_running)
- return;
-
- switch (urb->status) {
- case 0:
- usbduxsub_ao_handle_urb(dev, s, urb);
- break;
-
- case -ECONNRESET:
- case -ENOENT:
- case -ESHUTDOWN:
- case -ECONNABORTED:
- /* after an unlink command, unplug, ... etc */
- async->events |= COMEDI_CB_ERROR;
- break;
-
- default:
- /* a real error */
- dev_err(dev->class_dev,
- "Non-zero urb status received in ao intr context: %d\n",
- urb->status);
- async->events |= COMEDI_CB_ERROR;
- break;
- }
-
- /*
- * comedi_handle_events() cannot be used in this driver. The (*cancel)
- * operation would unlink the urb.
- */
- if (async->events & COMEDI_CB_CANCEL_MASK)
- usbdux_ao_stop(dev, 0);
-
- comedi_event(dev, s);
-}
-
-static int usbdux_submit_urbs(struct comedi_device *dev,
- struct urb **urbs, int num_urbs,
- int input_urb)
-{
- struct usb_device *usb = comedi_to_usb_dev(dev);
- struct usbdux_private *devpriv = dev->private;
- struct urb *urb;
- int ret;
- int i;
-
- /* Submit all URBs and start the transfer on the bus */
- for (i = 0; i < num_urbs; i++) {
- urb = urbs[i];
-
- /* in case of a resubmission after an unlink... */
- if (input_urb)
- urb->interval = devpriv->ai_interval;
- urb->context = dev;
- urb->dev = usb;
- urb->status = 0;
- urb->transfer_flags = URB_ISO_ASAP;
-
- ret = usb_submit_urb(urb, GFP_ATOMIC);
- if (ret)
- return ret;
- }
- return 0;
-}
-
-static int usbdux_ai_cmdtest(struct comedi_device *dev,
- struct comedi_subdevice *s, struct comedi_cmd *cmd)
-{
- struct usbdux_private *devpriv = dev->private;
- int err = 0;
-
- /* Step 1 : check if triggers are trivially valid */
-
- err |= comedi_check_trigger_src(&cmd->start_src, TRIG_NOW | TRIG_INT);
- err |= comedi_check_trigger_src(&cmd->scan_begin_src, TRIG_TIMER);
- err |= comedi_check_trigger_src(&cmd->convert_src, TRIG_NOW);
- err |= comedi_check_trigger_src(&cmd->scan_end_src, TRIG_COUNT);
- err |= comedi_check_trigger_src(&cmd->stop_src, TRIG_COUNT | TRIG_NONE);
-
- if (err)
- return 1;
-
- /* Step 2a : make sure trigger sources are unique */
-
- err |= comedi_check_trigger_is_unique(cmd->start_src);
- err |= comedi_check_trigger_is_unique(cmd->stop_src);
-
- /* Step 2b : and mutually compatible */
-
- if (err)
- return 2;
-
- /* Step 3: check if arguments are trivially valid */
-
- err |= comedi_check_trigger_arg_is(&cmd->start_arg, 0);
-
- if (cmd->scan_begin_src == TRIG_FOLLOW) /* internal trigger */
- err |= comedi_check_trigger_arg_is(&cmd->scan_begin_arg, 0);
-
- if (cmd->scan_begin_src == TRIG_TIMER) {
- /* full speed does 1kHz scans every USB frame */
- unsigned int arg = 1000000;
- unsigned int min_arg = arg;
-
- if (devpriv->high_speed) {
- /*
- * In high speed mode microframes are possible.
- * However, during one microframe we can roughly
- * sample one channel. Thus, the more channels
- * are in the channel list the more time we need.
- */
- int i = 1;
-
- /* find a power of 2 for the number of channels */
- while (i < cmd->chanlist_len)
- i = i * 2;
-
- arg /= 8;
- min_arg = arg * i;
- }
- err |= comedi_check_trigger_arg_min(&cmd->scan_begin_arg,
- min_arg);
- /* calc the real sampling rate with the rounding errors */
- arg = (cmd->scan_begin_arg / arg) * arg;
- err |= comedi_check_trigger_arg_is(&cmd->scan_begin_arg, arg);
- }
-
- err |= comedi_check_trigger_arg_is(&cmd->scan_end_arg,
- cmd->chanlist_len);
-
- if (cmd->stop_src == TRIG_COUNT)
- err |= comedi_check_trigger_arg_min(&cmd->stop_arg, 1);
- else /* TRIG_NONE */
- err |= comedi_check_trigger_arg_is(&cmd->stop_arg, 0);
-
- if (err)
- return 3;
-
- return 0;
-}
-
-/*
- * creates the ADC command for the MAX1271
- * range is the range value from comedi
- */
-static u8 create_adc_command(unsigned int chan, unsigned int range)
-{
- u8 p = (range <= 1);
- u8 r = ((range % 2) == 0);
-
- return (chan << 4) | ((p == 1) << 2) | ((r == 1) << 3);
-}
-
-static int send_dux_commands(struct comedi_device *dev, unsigned int cmd_type)
-{
- struct usb_device *usb = comedi_to_usb_dev(dev);
- struct usbdux_private *devpriv = dev->private;
- int nsent;
-
- devpriv->dux_commands[0] = cmd_type;
-
- return usb_bulk_msg(usb, usb_sndbulkpipe(usb, 1),
- devpriv->dux_commands, SIZEOFDUXBUFFER,
- &nsent, BULK_TIMEOUT);
-}
-
-static int receive_dux_commands(struct comedi_device *dev, unsigned int command)
-{
- struct usb_device *usb = comedi_to_usb_dev(dev);
- struct usbdux_private *devpriv = dev->private;
- int ret;
- int nrec;
- int i;
-
- for (i = 0; i < RETRIES; i++) {
- ret = usb_bulk_msg(usb, usb_rcvbulkpipe(usb, 8),
- devpriv->insn_buf, SIZEINSNBUF,
- &nrec, BULK_TIMEOUT);
- if (ret < 0)
- return ret;
- if (le16_to_cpu(devpriv->insn_buf[0]) == command)
- return ret;
- }
- /* command not received */
- return -EFAULT;
-}
-
-static int usbdux_ai_inttrig(struct comedi_device *dev,
- struct comedi_subdevice *s,
- unsigned int trig_num)
-{
- struct usbdux_private *devpriv = dev->private;
- struct comedi_cmd *cmd = &s->async->cmd;
- int ret;
-
- if (trig_num != cmd->start_arg)
- return -EINVAL;
-
- down(&devpriv->sem);
-
- if (!devpriv->ai_cmd_running) {
- devpriv->ai_cmd_running = 1;
- ret = usbdux_submit_urbs(dev, devpriv->ai_urbs,
- devpriv->n_ai_urbs, 1);
- if (ret < 0) {
- devpriv->ai_cmd_running = 0;
- goto ai_trig_exit;
- }
- s->async->inttrig = NULL;
- } else {
- ret = -EBUSY;
- }
-
-ai_trig_exit:
- up(&devpriv->sem);
- return ret;
-}
-
-static int usbdux_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
-{
- struct usbdux_private *devpriv = dev->private;
- struct comedi_cmd *cmd = &s->async->cmd;
- int len = cmd->chanlist_len;
- int ret = -EBUSY;
- int i;
-
- /* block other CPUs from starting an ai_cmd */
- down(&devpriv->sem);
-
- if (devpriv->ai_cmd_running)
- goto ai_cmd_exit;
-
- devpriv->dux_commands[1] = len;
- for (i = 0; i < len; ++i) {
- unsigned int chan = CR_CHAN(cmd->chanlist[i]);
- unsigned int range = CR_RANGE(cmd->chanlist[i]);
-
- devpriv->dux_commands[i + 2] = create_adc_command(chan, range);
- }
-
- ret = send_dux_commands(dev, USBDUX_CMD_MULT_AI);
- if (ret < 0)
- goto ai_cmd_exit;
-
- if (devpriv->high_speed) {
- /*
- * every channel gets a time window of 125us. Thus, if we
- * sample all 8 channels we need 1ms. If we sample only one
- * channel we need only 125us
- */
- devpriv->ai_interval = 1;
- /* find a power of 2 for the interval */
- while (devpriv->ai_interval < len)
- devpriv->ai_interval *= 2;
-
- devpriv->ai_timer = cmd->scan_begin_arg /
- (125000 * devpriv->ai_interval);
- } else {
- /* interval always 1ms */
- devpriv->ai_interval = 1;
- devpriv->ai_timer = cmd->scan_begin_arg / 1000000;
- }
- if (devpriv->ai_timer < 1) {
- ret = -EINVAL;
- goto ai_cmd_exit;
- }
-
- devpriv->ai_counter = devpriv->ai_timer;
-
- if (cmd->start_src == TRIG_NOW) {
- /* enable this acquisition operation */
- devpriv->ai_cmd_running = 1;
- ret = usbdux_submit_urbs(dev, devpriv->ai_urbs,
- devpriv->n_ai_urbs, 1);
- if (ret < 0) {
- devpriv->ai_cmd_running = 0;
- /* fixme: unlink here?? */
- goto ai_cmd_exit;
- }
- s->async->inttrig = NULL;
- } else {
- /* TRIG_INT */
- /* don't enable the acquision operation */
- /* wait for an internal signal */
- s->async->inttrig = usbdux_ai_inttrig;
- }
-
-ai_cmd_exit:
- up(&devpriv->sem);
-
- return ret;
-}
-
-/* Mode 0 is used to get a single conversion on demand */
-static int usbdux_ai_insn_read(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct usbdux_private *devpriv = dev->private;
- unsigned int chan = CR_CHAN(insn->chanspec);
- unsigned int range = CR_RANGE(insn->chanspec);
- unsigned int val;
- int ret = -EBUSY;
- int i;
-
- down(&devpriv->sem);
-
- if (devpriv->ai_cmd_running)
- goto ai_read_exit;
-
- /* set command for the first channel */
- devpriv->dux_commands[1] = create_adc_command(chan, range);
-
- /* adc commands */
- ret = send_dux_commands(dev, USBDUX_CMD_SINGLE_AI);
- if (ret < 0)
- goto ai_read_exit;
-
- for (i = 0; i < insn->n; i++) {
- ret = receive_dux_commands(dev, USBDUX_CMD_SINGLE_AI);
- if (ret < 0)
- goto ai_read_exit;
-
- val = le16_to_cpu(devpriv->insn_buf[1]);
-
- /* bipolar data is two's-complement */
- if (comedi_range_is_bipolar(s, range))
- val = comedi_offset_munge(s, val);
-
- data[i] = val;
- }
-
-ai_read_exit:
- up(&devpriv->sem);
-
- return ret ? ret : insn->n;
-}
-
-static int usbdux_ao_insn_read(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct usbdux_private *devpriv = dev->private;
- int ret;
-
- down(&devpriv->sem);
- ret = comedi_readback_insn_read(dev, s, insn, data);
- up(&devpriv->sem);
-
- return ret;
-}
-
-static int usbdux_ao_insn_write(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct usbdux_private *devpriv = dev->private;
- unsigned int chan = CR_CHAN(insn->chanspec);
- unsigned int val = s->readback[chan];
- __le16 *p = (__le16 *)&devpriv->dux_commands[2];
- int ret = -EBUSY;
- int i;
-
- down(&devpriv->sem);
-
- if (devpriv->ao_cmd_running)
- goto ao_write_exit;
-
- /* number of channels: 1 */
- devpriv->dux_commands[1] = 1;
- /* channel number */
- devpriv->dux_commands[4] = chan << 6;
-
- for (i = 0; i < insn->n; i++) {
- val = data[i];
-
- /* one 16 bit value */
- *p = cpu_to_le16(val);
-
- ret = send_dux_commands(dev, USBDUX_CMD_AO);
- if (ret < 0)
- goto ao_write_exit;
-
- s->readback[chan] = val;
- }
-
-ao_write_exit:
- up(&devpriv->sem);
-
- return ret ? ret : insn->n;
-}
-
-static int usbdux_ao_inttrig(struct comedi_device *dev,
- struct comedi_subdevice *s,
- unsigned int trig_num)
-{
- struct usbdux_private *devpriv = dev->private;
- struct comedi_cmd *cmd = &s->async->cmd;
- int ret;
-
- if (trig_num != cmd->start_arg)
- return -EINVAL;
-
- down(&devpriv->sem);
-
- if (!devpriv->ao_cmd_running) {
- devpriv->ao_cmd_running = 1;
- ret = usbdux_submit_urbs(dev, devpriv->ao_urbs,
- devpriv->n_ao_urbs, 0);
- if (ret < 0) {
- devpriv->ao_cmd_running = 0;
- goto ao_trig_exit;
- }
- s->async->inttrig = NULL;
- } else {
- ret = -EBUSY;
- }
-
-ao_trig_exit:
- up(&devpriv->sem);
- return ret;
-}
-
-static int usbdux_ao_cmdtest(struct comedi_device *dev,
- struct comedi_subdevice *s, struct comedi_cmd *cmd)
-{
- int err = 0;
- unsigned int flags;
-
- /* Step 1 : check if triggers are trivially valid */
-
- err |= comedi_check_trigger_src(&cmd->start_src, TRIG_NOW | TRIG_INT);
-
- if (0) { /* (devpriv->high_speed) */
- /* the sampling rate is set by the coversion rate */
- flags = TRIG_FOLLOW;
- } else {
- /* start a new scan (output at once) with a timer */
- flags = TRIG_TIMER;
- }
- err |= comedi_check_trigger_src(&cmd->scan_begin_src, flags);
-
- if (0) { /* (devpriv->high_speed) */
- /*
- * in usb-2.0 only one conversion it transmitted
- * but with 8kHz/n
- */
- flags = TRIG_TIMER;
- } else {
- /*
- * all conversion events happen simultaneously with
- * a rate of 1kHz/n
- */
- flags = TRIG_NOW;
- }
- err |= comedi_check_trigger_src(&cmd->convert_src, flags);
-
- err |= comedi_check_trigger_src(&cmd->scan_end_src, TRIG_COUNT);
- err |= comedi_check_trigger_src(&cmd->stop_src, TRIG_COUNT | TRIG_NONE);
-
- if (err)
- return 1;
-
- /* Step 2a : make sure trigger sources are unique */
-
- err |= comedi_check_trigger_is_unique(cmd->start_src);
- err |= comedi_check_trigger_is_unique(cmd->stop_src);
-
- /* Step 2b : and mutually compatible */
-
- if (err)
- return 2;
-
- /* Step 3: check if arguments are trivially valid */
-
- err |= comedi_check_trigger_arg_is(&cmd->start_arg, 0);
-
- if (cmd->scan_begin_src == TRIG_FOLLOW) /* internal trigger */
- err |= comedi_check_trigger_arg_is(&cmd->scan_begin_arg, 0);
-
- if (cmd->scan_begin_src == TRIG_TIMER) {
- err |= comedi_check_trigger_arg_min(&cmd->scan_begin_arg,
- 1000000);
- }
-
- /* not used now, is for later use */
- if (cmd->convert_src == TRIG_TIMER)
- err |= comedi_check_trigger_arg_min(&cmd->convert_arg, 125000);
-
- err |= comedi_check_trigger_arg_is(&cmd->scan_end_arg,
- cmd->chanlist_len);
-
- if (cmd->stop_src == TRIG_COUNT)
- err |= comedi_check_trigger_arg_min(&cmd->stop_arg, 1);
- else /* TRIG_NONE */
- err |= comedi_check_trigger_arg_is(&cmd->stop_arg, 0);
-
- if (err)
- return 3;
-
- return 0;
-}
-
-static int usbdux_ao_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
-{
- struct usbdux_private *devpriv = dev->private;
- struct comedi_cmd *cmd = &s->async->cmd;
- int ret = -EBUSY;
-
- down(&devpriv->sem);
-
- if (devpriv->ao_cmd_running)
- goto ao_cmd_exit;
-
- /* we count in steps of 1ms (125us) */
- /* 125us mode not used yet */
- if (0) { /* (devpriv->high_speed) */
- /* 125us */
- /* timing of the conversion itself: every 125 us */
- devpriv->ao_timer = cmd->convert_arg / 125000;
- } else {
- /* 1ms */
- /* timing of the scan: we get all channels at once */
- devpriv->ao_timer = cmd->scan_begin_arg / 1000000;
- if (devpriv->ao_timer < 1) {
- ret = -EINVAL;
- goto ao_cmd_exit;
- }
- }
-
- devpriv->ao_counter = devpriv->ao_timer;
-
- if (cmd->start_src == TRIG_NOW) {
- /* enable this acquisition operation */
- devpriv->ao_cmd_running = 1;
- ret = usbdux_submit_urbs(dev, devpriv->ao_urbs,
- devpriv->n_ao_urbs, 0);
- if (ret < 0) {
- devpriv->ao_cmd_running = 0;
- /* fixme: unlink here?? */
- goto ao_cmd_exit;
- }
- s->async->inttrig = NULL;
- } else {
- /* TRIG_INT */
- /* submit the urbs later */
- /* wait for an internal signal */
- s->async->inttrig = usbdux_ao_inttrig;
- }
-
-ao_cmd_exit:
- up(&devpriv->sem);
-
- return ret;
-}
-
-static int usbdux_dio_insn_config(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- int ret;
-
- ret = comedi_dio_insn_config(dev, s, insn, data, 0);
- if (ret)
- return ret;
-
- /*
- * We don't tell the firmware here as it would take 8 frames
- * to submit the information. We do it in the insn_bits.
- */
- return insn->n;
-}
-
-static int usbdux_dio_insn_bits(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct usbdux_private *devpriv = dev->private;
- int ret;
-
- down(&devpriv->sem);
-
- comedi_dio_update_state(s, data);
-
- /* Always update the hardware. See the (*insn_config). */
- devpriv->dux_commands[1] = s->io_bits;
- devpriv->dux_commands[2] = s->state;
-
- /*
- * This command also tells the firmware to return
- * the digital input lines.
- */
- ret = send_dux_commands(dev, USBDUX_CMD_DIO_BITS);
- if (ret < 0)
- goto dio_exit;
- ret = receive_dux_commands(dev, USBDUX_CMD_DIO_BITS);
- if (ret < 0)
- goto dio_exit;
-
- data[1] = le16_to_cpu(devpriv->insn_buf[1]);
-
-dio_exit:
- up(&devpriv->sem);
-
- return ret ? ret : insn->n;
-}
-
-static int usbdux_counter_read(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct usbdux_private *devpriv = dev->private;
- unsigned int chan = CR_CHAN(insn->chanspec);
- int ret = 0;
- int i;
-
- down(&devpriv->sem);
-
- for (i = 0; i < insn->n; i++) {
- ret = send_dux_commands(dev, USBDUX_CMD_TIMER_RD);
- if (ret < 0)
- goto counter_read_exit;
- ret = receive_dux_commands(dev, USBDUX_CMD_TIMER_RD);
- if (ret < 0)
- goto counter_read_exit;
-
- data[i] = le16_to_cpu(devpriv->insn_buf[chan + 1]);
- }
-
-counter_read_exit:
- up(&devpriv->sem);
-
- return ret ? ret : insn->n;
-}
-
-static int usbdux_counter_write(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct usbdux_private *devpriv = dev->private;
- unsigned int chan = CR_CHAN(insn->chanspec);
- __le16 *p = (__le16 *)&devpriv->dux_commands[2];
- int ret = 0;
- int i;
-
- down(&devpriv->sem);
-
- devpriv->dux_commands[1] = chan;
-
- for (i = 0; i < insn->n; i++) {
- *p = cpu_to_le16(data[i]);
-
- ret = send_dux_commands(dev, USBDUX_CMD_TIMER_WR);
- if (ret < 0)
- break;
- }
-
- up(&devpriv->sem);
-
- return ret ? ret : insn->n;
-}
-
-static int usbdux_counter_config(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
-{
- /* nothing to do so far */
- return 2;
-}
-
-static void usbduxsub_unlink_pwm_urbs(struct comedi_device *dev)
-{
- struct usbdux_private *devpriv = dev->private;
-
- usb_kill_urb(devpriv->pwm_urb);
-}
-
-static void usbdux_pwm_stop(struct comedi_device *dev, int do_unlink)
-{
- struct usbdux_private *devpriv = dev->private;
-
- if (do_unlink)
- usbduxsub_unlink_pwm_urbs(dev);
-
- devpriv->pwm_cmd_running = 0;
-}
-
-static int usbdux_pwm_cancel(struct comedi_device *dev,
- struct comedi_subdevice *s)
-{
- struct usbdux_private *devpriv = dev->private;
- int ret;
-
- down(&devpriv->sem);
- /* unlink only if it is really running */
- usbdux_pwm_stop(dev, devpriv->pwm_cmd_running);
- ret = send_dux_commands(dev, USBDUX_CMD_PWM_OFF);
- up(&devpriv->sem);
-
- return ret;
-}
-
-static void usbduxsub_pwm_irq(struct urb *urb)
-{
- struct comedi_device *dev = urb->context;
- struct usbdux_private *devpriv = dev->private;
- int ret;
-
- switch (urb->status) {
- case 0:
- /* success */
- break;
-
- case -ECONNRESET:
- case -ENOENT:
- case -ESHUTDOWN:
- case -ECONNABORTED:
- /*
- * after an unlink command, unplug, ... etc
- * no unlink needed here. Already shutting down.
- */
- if (devpriv->pwm_cmd_running)
- usbdux_pwm_stop(dev, 0);
-
- return;
-
- default:
- /* a real error */
- if (devpriv->pwm_cmd_running) {
- dev_err(dev->class_dev,
- "Non-zero urb status received in pwm intr context: %d\n",
- urb->status);
- usbdux_pwm_stop(dev, 0);
- }
- return;
- }
-
- /* are we actually running? */
- if (!devpriv->pwm_cmd_running)
- return;
-
- urb->transfer_buffer_length = devpriv->pwm_buf_sz;
- urb->dev = comedi_to_usb_dev(dev);
- urb->status = 0;
- if (devpriv->pwm_cmd_running) {
- ret = usb_submit_urb(urb, GFP_ATOMIC);
- if (ret < 0) {
- dev_err(dev->class_dev,
- "pwm urb resubm failed in int-cont. ret=%d",
- ret);
- if (ret == -EL2NSYNC)
- dev_err(dev->class_dev,
- "buggy USB host controller or bug in IRQ handling!\n");
-
- /* don't do an unlink here */
- usbdux_pwm_stop(dev, 0);
- }
- }
-}
-
-static int usbduxsub_submit_pwm_urbs(struct comedi_device *dev)
-{
- struct usb_device *usb = comedi_to_usb_dev(dev);
- struct usbdux_private *devpriv = dev->private;
- struct urb *urb = devpriv->pwm_urb;
-
- /* in case of a resubmission after an unlink... */
- usb_fill_bulk_urb(urb, usb, usb_sndbulkpipe(usb, 4),
- urb->transfer_buffer,
- devpriv->pwm_buf_sz,
- usbduxsub_pwm_irq,
- dev);
-
- return usb_submit_urb(urb, GFP_ATOMIC);
-}
-
-static int usbdux_pwm_period(struct comedi_device *dev,
- struct comedi_subdevice *s,
- unsigned int period)
-{
- struct usbdux_private *devpriv = dev->private;
- int fx2delay = 255;
-
- if (period < MIN_PWM_PERIOD)
- return -EAGAIN;
-
- fx2delay = (period / (6 * 512 * 1000 / 33)) - 6;
- if (fx2delay > 255)
- return -EAGAIN;
-
- devpriv->pwm_delay = fx2delay;
- devpriv->pwm_period = period;
-
- return 0;
-}
-
-static int usbdux_pwm_start(struct comedi_device *dev,
- struct comedi_subdevice *s)
-{
- struct usbdux_private *devpriv = dev->private;
- int ret = 0;
-
- down(&devpriv->sem);
-
- if (devpriv->pwm_cmd_running)
- goto pwm_start_exit;
-
- devpriv->dux_commands[1] = devpriv->pwm_delay;
- ret = send_dux_commands(dev, USBDUX_CMD_PWM_ON);
- if (ret < 0)
- goto pwm_start_exit;
-
- /* initialise the buffer */
- memset(devpriv->pwm_urb->transfer_buffer, 0, devpriv->pwm_buf_sz);
-
- devpriv->pwm_cmd_running = 1;
- ret = usbduxsub_submit_pwm_urbs(dev);
- if (ret < 0)
- devpriv->pwm_cmd_running = 0;
-
-pwm_start_exit:
- up(&devpriv->sem);
-
- return ret;
-}
-
-static void usbdux_pwm_pattern(struct comedi_device *dev,
- struct comedi_subdevice *s,
- unsigned int chan,
- unsigned int value,
- unsigned int sign)
-{
- struct usbdux_private *devpriv = dev->private;
- char pwm_mask = (1 << chan); /* DIO bit for the PWM data */
- char sgn_mask = (16 << chan); /* DIO bit for the sign */
- char *buf = (char *)(devpriv->pwm_urb->transfer_buffer);
- int szbuf = devpriv->pwm_buf_sz;
- int i;
-
- for (i = 0; i < szbuf; i++) {
- char c = *buf;
-
- c &= ~pwm_mask;
- if (i < value)
- c |= pwm_mask;
- if (!sign)
- c &= ~sgn_mask;
- else
- c |= sgn_mask;
- *buf++ = c;
- }
-}
-
-static int usbdux_pwm_write(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- unsigned int chan = CR_CHAN(insn->chanspec);
-
- /*
- * It doesn't make sense to support more than one value here
- * because it would just overwrite the PWM buffer.
- */
- if (insn->n != 1)
- return -EINVAL;
-
- /*
- * The sign is set via a special INSN only, this gives us 8 bits
- * for normal operation, sign is 0 by default.
- */
- usbdux_pwm_pattern(dev, s, chan, data[0], 0);
-
- return insn->n;
-}
-
-static int usbdux_pwm_config(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct usbdux_private *devpriv = dev->private;
- unsigned int chan = CR_CHAN(insn->chanspec);
-
- switch (data[0]) {
- case INSN_CONFIG_ARM:
- /*
- * if not zero the PWM is limited to a certain time which is
- * not supported here
- */
- if (data[1] != 0)
- return -EINVAL;
- return usbdux_pwm_start(dev, s);
- case INSN_CONFIG_DISARM:
- return usbdux_pwm_cancel(dev, s);
- case INSN_CONFIG_GET_PWM_STATUS:
- data[1] = devpriv->pwm_cmd_running;
- return 0;
- case INSN_CONFIG_PWM_SET_PERIOD:
- return usbdux_pwm_period(dev, s, data[1]);
- case INSN_CONFIG_PWM_GET_PERIOD:
- data[1] = devpriv->pwm_period;
- return 0;
- case INSN_CONFIG_PWM_SET_H_BRIDGE:
- /*
- * data[1] = value
- * data[2] = sign (for a relay)
- */
- usbdux_pwm_pattern(dev, s, chan, data[1], (data[2] != 0));
- return 0;
- case INSN_CONFIG_PWM_GET_H_BRIDGE:
- /* values are not kept in this driver, nothing to return here */
- return -EINVAL;
- }
- return -EINVAL;
-}
-
-static int usbdux_firmware_upload(struct comedi_device *dev,
- const u8 *data, size_t size,
- unsigned long context)
-{
- struct usb_device *usb = comedi_to_usb_dev(dev);
- u8 *buf;
- u8 *tmp;
- int ret;
-
- if (!data)
- return 0;
-
- if (size > USBDUX_FIRMWARE_MAX_LEN) {
- dev_err(dev->class_dev,
- "usbdux firmware binary it too large for FX2.\n");
- return -ENOMEM;
- }
-
- /* we generate a local buffer for the firmware */
- buf = kmemdup(data, size, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
-
- /* we need a malloc'ed buffer for usb_control_msg() */
- tmp = kmalloc(1, GFP_KERNEL);
- if (!tmp) {
- kfree(buf);
- return -ENOMEM;
- }
-
- /* stop the current firmware on the device */
- *tmp = 1; /* 7f92 to one */
- ret = usb_control_msg(usb, usb_sndctrlpipe(usb, 0),
- USBDUX_FIRMWARE_CMD,
- VENDOR_DIR_OUT,
- USBDUX_CPU_CS, 0x0000,
- tmp, 1,
- BULK_TIMEOUT);
- if (ret < 0) {
- dev_err(dev->class_dev, "can not stop firmware\n");
- goto done;
- }
-
- /* upload the new firmware to the device */
- ret = usb_control_msg(usb, usb_sndctrlpipe(usb, 0),
- USBDUX_FIRMWARE_CMD,
- VENDOR_DIR_OUT,
- 0, 0x0000,
- buf, size,
- BULK_TIMEOUT);
- if (ret < 0) {
- dev_err(dev->class_dev, "firmware upload failed\n");
- goto done;
- }
-
- /* start the new firmware on the device */
- *tmp = 0; /* 7f92 to zero */
- ret = usb_control_msg(usb, usb_sndctrlpipe(usb, 0),
- USBDUX_FIRMWARE_CMD,
- VENDOR_DIR_OUT,
- USBDUX_CPU_CS, 0x0000,
- tmp, 1,
- BULK_TIMEOUT);
- if (ret < 0)
- dev_err(dev->class_dev, "can not start firmware\n");
-
-done:
- kfree(tmp);
- kfree(buf);
- return ret;
-}
-
-static int usbdux_alloc_usb_buffers(struct comedi_device *dev)
-{
- struct usb_device *usb = comedi_to_usb_dev(dev);
- struct usbdux_private *devpriv = dev->private;
- struct urb *urb;
- int i;
-
- devpriv->dux_commands = kzalloc(SIZEOFDUXBUFFER, GFP_KERNEL);
- devpriv->in_buf = kzalloc(SIZEINBUF, GFP_KERNEL);
- devpriv->insn_buf = kzalloc(SIZEINSNBUF, GFP_KERNEL);
- devpriv->ai_urbs = kcalloc(devpriv->n_ai_urbs, sizeof(void *),
- GFP_KERNEL);
- devpriv->ao_urbs = kcalloc(devpriv->n_ao_urbs, sizeof(void *),
- GFP_KERNEL);
- if (!devpriv->dux_commands || !devpriv->in_buf || !devpriv->insn_buf ||
- !devpriv->ai_urbs || !devpriv->ao_urbs)
- return -ENOMEM;
-
- for (i = 0; i < devpriv->n_ai_urbs; i++) {
- /* one frame: 1ms */
- urb = usb_alloc_urb(1, GFP_KERNEL);
- if (!urb)
- return -ENOMEM;
- devpriv->ai_urbs[i] = urb;
-
- urb->dev = usb;
- urb->context = dev;
- urb->pipe = usb_rcvisocpipe(usb, 6);
- urb->transfer_flags = URB_ISO_ASAP;
- urb->transfer_buffer = kzalloc(SIZEINBUF, GFP_KERNEL);
- if (!urb->transfer_buffer)
- return -ENOMEM;
-
- urb->complete = usbduxsub_ai_isoc_irq;
- urb->number_of_packets = 1;
- urb->transfer_buffer_length = SIZEINBUF;
- urb->iso_frame_desc[0].offset = 0;
- urb->iso_frame_desc[0].length = SIZEINBUF;
- }
-
- for (i = 0; i < devpriv->n_ao_urbs; i++) {
- /* one frame: 1ms */
- urb = usb_alloc_urb(1, GFP_KERNEL);
- if (!urb)
- return -ENOMEM;
- devpriv->ao_urbs[i] = urb;
-
- urb->dev = usb;
- urb->context = dev;
- urb->pipe = usb_sndisocpipe(usb, 2);
- urb->transfer_flags = URB_ISO_ASAP;
- urb->transfer_buffer = kzalloc(SIZEOUTBUF, GFP_KERNEL);
- if (!urb->transfer_buffer)
- return -ENOMEM;
-
- urb->complete = usbduxsub_ao_isoc_irq;
- urb->number_of_packets = 1;
- urb->transfer_buffer_length = SIZEOUTBUF;
- urb->iso_frame_desc[0].offset = 0;
- urb->iso_frame_desc[0].length = SIZEOUTBUF;
- if (devpriv->high_speed)
- urb->interval = 8; /* uframes */
- else
- urb->interval = 1; /* frames */
- }
-
- /* pwm */
- if (devpriv->pwm_buf_sz) {
- urb = usb_alloc_urb(0, GFP_KERNEL);
- if (!urb)
- return -ENOMEM;
- devpriv->pwm_urb = urb;
-
- /* max bulk ep size in high speed */
- urb->transfer_buffer = kzalloc(devpriv->pwm_buf_sz,
- GFP_KERNEL);
- if (!urb->transfer_buffer)
- return -ENOMEM;
- }
-
- return 0;
-}
-
-static void usbdux_free_usb_buffers(struct comedi_device *dev)
-{
- struct usbdux_private *devpriv = dev->private;
- struct urb *urb;
- int i;
-
- urb = devpriv->pwm_urb;
- if (urb) {
- kfree(urb->transfer_buffer);
- usb_free_urb(urb);
- }
- if (devpriv->ao_urbs) {
- for (i = 0; i < devpriv->n_ao_urbs; i++) {
- urb = devpriv->ao_urbs[i];
- if (urb) {
- kfree(urb->transfer_buffer);
- usb_free_urb(urb);
- }
- }
- kfree(devpriv->ao_urbs);
- }
- if (devpriv->ai_urbs) {
- for (i = 0; i < devpriv->n_ai_urbs; i++) {
- urb = devpriv->ai_urbs[i];
- if (urb) {
- kfree(urb->transfer_buffer);
- usb_free_urb(urb);
- }
- }
- kfree(devpriv->ai_urbs);
- }
- kfree(devpriv->insn_buf);
- kfree(devpriv->in_buf);
- kfree(devpriv->dux_commands);
-}
-
-static int usbdux_auto_attach(struct comedi_device *dev,
- unsigned long context_unused)
-{
- struct usb_interface *intf = comedi_to_usb_interface(dev);
- struct usb_device *usb = comedi_to_usb_dev(dev);
- struct usbdux_private *devpriv;
- struct comedi_subdevice *s;
- int ret;
-
- devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
- if (!devpriv)
- return -ENOMEM;
-
- sema_init(&devpriv->sem, 1);
-
- usb_set_intfdata(intf, devpriv);
-
- devpriv->high_speed = (usb->speed == USB_SPEED_HIGH);
- if (devpriv->high_speed) {
- devpriv->n_ai_urbs = NUMOFINBUFFERSHIGH;
- devpriv->n_ao_urbs = NUMOFOUTBUFFERSHIGH;
- devpriv->pwm_buf_sz = 512;
- } else {
- devpriv->n_ai_urbs = NUMOFINBUFFERSFULL;
- devpriv->n_ao_urbs = NUMOFOUTBUFFERSFULL;
- }
-
- ret = usbdux_alloc_usb_buffers(dev);
- if (ret)
- return ret;
-
- /* setting to alternate setting 3: enabling iso ep and bulk ep. */
- ret = usb_set_interface(usb, intf->altsetting->desc.bInterfaceNumber,
- 3);
- if (ret < 0) {
- dev_err(dev->class_dev,
- "could not set alternate setting 3 in high speed\n");
- return ret;
- }
-
- ret = comedi_load_firmware(dev, &usb->dev, USBDUX_FIRMWARE,
- usbdux_firmware_upload, 0);
- if (ret < 0)
- return ret;
-
- ret = comedi_alloc_subdevices(dev, (devpriv->high_speed) ? 5 : 4);
- if (ret)
- return ret;
-
- /* Analog Input subdevice */
- s = &dev->subdevices[0];
- dev->read_subdev = s;
- s->type = COMEDI_SUBD_AI;
- s->subdev_flags = SDF_READABLE | SDF_GROUND | SDF_CMD_READ;
- s->n_chan = 8;
- s->maxdata = 0x0fff;
- s->len_chanlist = 8;
- s->range_table = &range_usbdux_ai_range;
- s->insn_read = usbdux_ai_insn_read;
- s->do_cmdtest = usbdux_ai_cmdtest;
- s->do_cmd = usbdux_ai_cmd;
- s->cancel = usbdux_ai_cancel;
-
- /* Analog Output subdevice */
- s = &dev->subdevices[1];
- dev->write_subdev = s;
- s->type = COMEDI_SUBD_AO;
- s->subdev_flags = SDF_WRITABLE | SDF_GROUND | SDF_CMD_WRITE;
- s->n_chan = 4;
- s->maxdata = 0x0fff;
- s->len_chanlist = s->n_chan;
- s->range_table = &range_usbdux_ao_range;
- s->do_cmdtest = usbdux_ao_cmdtest;
- s->do_cmd = usbdux_ao_cmd;
- s->cancel = usbdux_ao_cancel;
- s->insn_read = usbdux_ao_insn_read;
- s->insn_write = usbdux_ao_insn_write;
-
- ret = comedi_alloc_subdev_readback(s);
- if (ret)
- return ret;
-
- /* Digital I/O subdevice */
- s = &dev->subdevices[2];
- s->type = COMEDI_SUBD_DIO;
- s->subdev_flags = SDF_READABLE | SDF_WRITABLE;
- s->n_chan = 8;
- s->maxdata = 1;
- s->range_table = &range_digital;
- s->insn_bits = usbdux_dio_insn_bits;
- s->insn_config = usbdux_dio_insn_config;
-
- /* Counter subdevice */
- s = &dev->subdevices[3];
- s->type = COMEDI_SUBD_COUNTER;
- s->subdev_flags = SDF_WRITABLE | SDF_READABLE;
- s->n_chan = 4;
- s->maxdata = 0xffff;
- s->insn_read = usbdux_counter_read;
- s->insn_write = usbdux_counter_write;
- s->insn_config = usbdux_counter_config;
-
- if (devpriv->high_speed) {
- /* PWM subdevice */
- s = &dev->subdevices[4];
- s->type = COMEDI_SUBD_PWM;
- s->subdev_flags = SDF_WRITABLE | SDF_PWM_HBRIDGE;
- s->n_chan = 8;
- s->maxdata = devpriv->pwm_buf_sz;
- s->insn_write = usbdux_pwm_write;
- s->insn_config = usbdux_pwm_config;
-
- usbdux_pwm_period(dev, s, PWM_DEFAULT_PERIOD);
- }
-
- return 0;
-}
-
-static void usbdux_detach(struct comedi_device *dev)
-{
- struct usb_interface *intf = comedi_to_usb_interface(dev);
- struct usbdux_private *devpriv = dev->private;
-
- usb_set_intfdata(intf, NULL);
-
- if (!devpriv)
- return;
-
- down(&devpriv->sem);
-
- /* force unlink all urbs */
- usbdux_pwm_stop(dev, 1);
- usbdux_ao_stop(dev, 1);
- usbdux_ai_stop(dev, 1);
-
- usbdux_free_usb_buffers(dev);
-
- up(&devpriv->sem);
-}
-
-static struct comedi_driver usbdux_driver = {
- .driver_name = "usbdux",
- .module = THIS_MODULE,
- .auto_attach = usbdux_auto_attach,
- .detach = usbdux_detach,
-};
-
-static int usbdux_usb_probe(struct usb_interface *intf,
- const struct usb_device_id *id)
-{
- return comedi_usb_auto_config(intf, &usbdux_driver, 0);
-}
-
-static const struct usb_device_id usbdux_usb_table[] = {
- { USB_DEVICE(0x13d8, 0x0001) },
- { USB_DEVICE(0x13d8, 0x0002) },
- { }
-};
-MODULE_DEVICE_TABLE(usb, usbdux_usb_table);
-
-static struct usb_driver usbdux_usb_driver = {
- .name = "usbdux",
- .probe = usbdux_usb_probe,
- .disconnect = comedi_usb_auto_unconfig,
- .id_table = usbdux_usb_table,
-};
-module_comedi_usb_driver(usbdux_driver, usbdux_usb_driver);
-
-MODULE_AUTHOR("Bernd Porr, BerndPorr@f2s.com");
-MODULE_DESCRIPTION("Stirling/ITL USB-DUX -- Bernd.Porr@f2s.com");
-MODULE_LICENSE("GPL");
-MODULE_FIRMWARE(USBDUX_FIRMWARE);
diff --git a/drivers/staging/comedi/drivers/usbduxfast.c b/drivers/staging/comedi/drivers/usbduxfast.c
deleted file mode 100644
index c6b2a6582127..000000000000
--- a/drivers/staging/comedi/drivers/usbduxfast.c
+++ /dev/null
@@ -1,1041 +0,0 @@
-/*
- * Copyright (C) 2004-2014 Bernd Porr, mail@berndporr.me.uk
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-/*
- * Driver: usbduxfast
- * Description: University of Stirling USB DAQ & INCITE Technology Limited
- * Devices: [ITL] USB-DUX-FAST (usbduxfast)
- * Author: Bernd Porr <mail@berndporr.me.uk>
- * Updated: 10 Oct 2014
- * Status: stable
- */
-
-/*
- * I must give credit here to Chris Baugher who
- * wrote the driver for AT-MIO-16d. I used some parts of this
- * driver. I also must give credits to David Brownell
- * who supported me with the USB development.
- *
- * Bernd Porr
- *
- *
- * Revision history:
- * 0.9: Dropping the first data packet which seems to be from the last transfer.
- * Buffer overflows in the FX2 are handed over to comedi.
- * 0.92: Dropping now 4 packets. The quad buffer has to be emptied.
- * Added insn command basically for testing. Sample rate is
- * 1MHz/16ch=62.5kHz
- * 0.99: Ian Abbott pointed out a bug which has been corrected. Thanks!
- * 0.99a: added external trigger.
- * 1.00: added firmware kernel request to the driver which fixed
- * udev coldplug problem
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/input.h>
-#include <linux/fcntl.h>
-#include <linux/compiler.h>
-#include "../comedi_usb.h"
-
-/*
- * timeout for the USB-transfer
- */
-#define EZTIMEOUT 30
-
-/*
- * constants for "firmware" upload and download
- */
-#define FIRMWARE "usbduxfast_firmware.bin"
-#define FIRMWARE_MAX_LEN 0x2000
-#define USBDUXFASTSUB_FIRMWARE 0xA0
-#define VENDOR_DIR_IN 0xC0
-#define VENDOR_DIR_OUT 0x40
-
-/*
- * internal addresses of the 8051 processor
- */
-#define USBDUXFASTSUB_CPUCS 0xE600
-
-/*
- * max lenghth of the transfer-buffer for software upload
- */
-#define TB_LEN 0x2000
-
-/*
- * input endpoint number
- */
-#define BULKINEP 6
-
-/*
- * endpoint for the A/D channellist: bulk OUT
- */
-#define CHANNELLISTEP 4
-
-/*
- * number of channels
- */
-#define NUMCHANNELS 32
-
-/*
- * size of the waveform descriptor
- */
-#define WAVESIZE 0x20
-
-/*
- * size of one A/D value
- */
-#define SIZEADIN (sizeof(s16))
-
-/*
- * size of the input-buffer IN BYTES
- */
-#define SIZEINBUF 512
-
-/*
- * 16 bytes
- */
-#define SIZEINSNBUF 512
-
-/*
- * size of the buffer for the dux commands in bytes
- */
-#define SIZEOFDUXBUF 256
-
-/*
- * number of in-URBs which receive the data: min=5
- */
-#define NUMOFINBUFFERSHIGH 10
-
-/*
- * min delay steps for more than one channel
- * basically when the mux gives up ;-)
- *
- * steps at 30MHz in the FX2
- */
-#define MIN_SAMPLING_PERIOD 9
-
-/*
- * max number of 1/30MHz delay steps
- */
-#define MAX_SAMPLING_PERIOD 500
-
-/*
- * number of received packets to ignore before we start handing data
- * over to comedi, it's quad buffering and we have to ignore 4 packets
- */
-#define PACKETS_TO_IGNORE 4
-
-/*
- * comedi constants
- */
-static const struct comedi_lrange range_usbduxfast_ai_range = {
- 2, {
- BIP_RANGE(0.75),
- BIP_RANGE(0.5)
- }
-};
-
-/*
- * private structure of one subdevice
- *
- * this is the structure which holds all the data of this driver
- * one sub device just now: A/D
- */
-struct usbduxfast_private {
- struct urb *urb; /* BULK-transfer handling: urb */
- u8 *duxbuf;
- s8 *inbuf;
- short int ai_cmd_running; /* asynchronous command is running */
- int ignore; /* counter which ignores the first buffers */
- struct semaphore sem;
-};
-
-/*
- * bulk transfers to usbduxfast
- */
-#define SENDADCOMMANDS 0
-#define SENDINITEP6 1
-
-static int usbduxfast_send_cmd(struct comedi_device *dev, int cmd_type)
-{
- struct usb_device *usb = comedi_to_usb_dev(dev);
- struct usbduxfast_private *devpriv = dev->private;
- int nsent;
- int ret;
-
- devpriv->duxbuf[0] = cmd_type;
-
- ret = usb_bulk_msg(usb, usb_sndbulkpipe(usb, CHANNELLISTEP),
- devpriv->duxbuf, SIZEOFDUXBUF,
- &nsent, 10000);
- if (ret < 0)
- dev_err(dev->class_dev,
- "could not transmit command to the usb-device, err=%d\n",
- ret);
- return ret;
-}
-
-static void usbduxfast_cmd_data(struct comedi_device *dev, int index,
- u8 len, u8 op, u8 out, u8 log)
-{
- struct usbduxfast_private *devpriv = dev->private;
-
- /* Set the GPIF bytes, the first byte is the command byte */
- devpriv->duxbuf[1 + 0x00 + index] = len;
- devpriv->duxbuf[1 + 0x08 + index] = op;
- devpriv->duxbuf[1 + 0x10 + index] = out;
- devpriv->duxbuf[1 + 0x18 + index] = log;
-}
-
-static int usbduxfast_ai_stop(struct comedi_device *dev, int do_unlink)
-{
- struct usbduxfast_private *devpriv = dev->private;
-
- /* stop aquistion */
- devpriv->ai_cmd_running = 0;
-
- if (do_unlink && devpriv->urb) {
- /* kill the running transfer */
- usb_kill_urb(devpriv->urb);
- }
-
- return 0;
-}
-
-static int usbduxfast_ai_cancel(struct comedi_device *dev,
- struct comedi_subdevice *s)
-{
- struct usbduxfast_private *devpriv = dev->private;
- int ret;
-
- down(&devpriv->sem);
- ret = usbduxfast_ai_stop(dev, 1);
- up(&devpriv->sem);
-
- return ret;
-}
-
-static void usbduxfast_ai_handle_urb(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct urb *urb)
-{
- struct usbduxfast_private *devpriv = dev->private;
- struct comedi_async *async = s->async;
- struct comedi_cmd *cmd = &async->cmd;
- int ret;
-
- if (devpriv->ignore) {
- devpriv->ignore--;
- } else {
- unsigned int nsamples;
-
- nsamples = comedi_bytes_to_samples(s, urb->actual_length);
- nsamples = comedi_nsamples_left(s, nsamples);
- comedi_buf_write_samples(s, urb->transfer_buffer, nsamples);
-
- if (cmd->stop_src == TRIG_COUNT &&
- async->scans_done >= cmd->stop_arg)
- async->events |= COMEDI_CB_EOA;
- }
-
- /* if command is still running, resubmit urb for BULK transfer */
- if (!(async->events & COMEDI_CB_CANCEL_MASK)) {
- urb->dev = comedi_to_usb_dev(dev);
- urb->status = 0;
- ret = usb_submit_urb(urb, GFP_ATOMIC);
- if (ret < 0) {
- dev_err(dev->class_dev, "urb resubm failed: %d", ret);
- async->events |= COMEDI_CB_ERROR;
- }
- }
-}
-
-static void usbduxfast_ai_interrupt(struct urb *urb)
-{
- struct comedi_device *dev = urb->context;
- struct comedi_subdevice *s = dev->read_subdev;
- struct comedi_async *async = s->async;
- struct usbduxfast_private *devpriv = dev->private;
-
- /* exit if not running a command, do not resubmit urb */
- if (!devpriv->ai_cmd_running)
- return;
-
- switch (urb->status) {
- case 0:
- usbduxfast_ai_handle_urb(dev, s, urb);
- break;
-
- case -ECONNRESET:
- case -ENOENT:
- case -ESHUTDOWN:
- case -ECONNABORTED:
- /* after an unlink command, unplug, ... etc */
- async->events |= COMEDI_CB_ERROR;
- break;
-
- default:
- /* a real error */
- dev_err(dev->class_dev,
- "non-zero urb status received in ai intr context: %d\n",
- urb->status);
- async->events |= COMEDI_CB_ERROR;
- break;
- }
-
- /*
- * comedi_handle_events() cannot be used in this driver. The (*cancel)
- * operation would unlink the urb.
- */
- if (async->events & COMEDI_CB_CANCEL_MASK)
- usbduxfast_ai_stop(dev, 0);
-
- comedi_event(dev, s);
-}
-
-static int usbduxfast_submit_urb(struct comedi_device *dev)
-{
- struct usb_device *usb = comedi_to_usb_dev(dev);
- struct usbduxfast_private *devpriv = dev->private;
- int ret;
-
- usb_fill_bulk_urb(devpriv->urb, usb, usb_rcvbulkpipe(usb, BULKINEP),
- devpriv->inbuf, SIZEINBUF,
- usbduxfast_ai_interrupt, dev);
-
- ret = usb_submit_urb(devpriv->urb, GFP_ATOMIC);
- if (ret) {
- dev_err(dev->class_dev, "usb_submit_urb error %d\n", ret);
- return ret;
- }
- return 0;
-}
-
-static int usbduxfast_ai_check_chanlist(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_cmd *cmd)
-{
- unsigned int gain0 = CR_RANGE(cmd->chanlist[0]);
- int i;
-
- if (cmd->chanlist_len > 3 && cmd->chanlist_len != 16) {
- dev_err(dev->class_dev, "unsupported combination of channels\n");
- return -EINVAL;
- }
-
- for (i = 0; i < cmd->chanlist_len; ++i) {
- unsigned int chan = CR_CHAN(cmd->chanlist[i]);
- unsigned int gain = CR_RANGE(cmd->chanlist[i]);
-
- if (chan != i) {
- dev_err(dev->class_dev,
- "channels are not consecutive\n");
- return -EINVAL;
- }
- if (gain != gain0 && cmd->chanlist_len > 3) {
- dev_err(dev->class_dev,
- "gain must be the same for all channels\n");
- return -EINVAL;
- }
- }
- return 0;
-}
-
-static int usbduxfast_ai_cmdtest(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_cmd *cmd)
-{
- int err = 0;
- unsigned int steps;
- unsigned int arg;
-
- /* Step 1 : check if triggers are trivially valid */
-
- err |= comedi_check_trigger_src(&cmd->start_src,
- TRIG_NOW | TRIG_EXT | TRIG_INT);
- err |= comedi_check_trigger_src(&cmd->scan_begin_src, TRIG_FOLLOW);
- err |= comedi_check_trigger_src(&cmd->convert_src, TRIG_TIMER);
- err |= comedi_check_trigger_src(&cmd->scan_end_src, TRIG_COUNT);
- err |= comedi_check_trigger_src(&cmd->stop_src, TRIG_COUNT | TRIG_NONE);
-
- if (err)
- return 1;
-
- /* Step 2a : make sure trigger sources are unique */
-
- err |= comedi_check_trigger_is_unique(cmd->start_src);
- err |= comedi_check_trigger_is_unique(cmd->stop_src);
-
- /* Step 2b : and mutually compatible */
-
- if (err)
- return 2;
-
- /* Step 3: check if arguments are trivially valid */
-
- err |= comedi_check_trigger_arg_is(&cmd->start_arg, 0);
-
- if (!cmd->chanlist_len)
- err |= -EINVAL;
-
- /* external start trigger is only valid for 1 or 16 channels */
- if (cmd->start_src == TRIG_EXT &&
- cmd->chanlist_len != 1 && cmd->chanlist_len != 16)
- err |= -EINVAL;
-
- err |= comedi_check_trigger_arg_is(&cmd->scan_end_arg,
- cmd->chanlist_len);
-
- /*
- * Validate the conversion timing:
- * for 1 channel the timing in 30MHz "steps" is:
- * steps <= MAX_SAMPLING_PERIOD
- * for all other chanlist_len it is:
- * MIN_SAMPLING_PERIOD <= steps <= MAX_SAMPLING_PERIOD
- */
- steps = (cmd->convert_arg * 30) / 1000;
- if (cmd->chanlist_len != 1)
- err |= comedi_check_trigger_arg_min(&steps,
- MIN_SAMPLING_PERIOD);
- err |= comedi_check_trigger_arg_max(&steps, MAX_SAMPLING_PERIOD);
- arg = (steps * 1000) / 30;
- err |= comedi_check_trigger_arg_is(&cmd->convert_arg, arg);
-
- if (cmd->stop_src == TRIG_COUNT)
- err |= comedi_check_trigger_arg_min(&cmd->stop_arg, 1);
- else /* TRIG_NONE */
- err |= comedi_check_trigger_arg_is(&cmd->stop_arg, 0);
-
- if (err)
- return 3;
-
- /* Step 4: fix up any arguments */
-
- /* Step 5: check channel list if it exists */
- if (cmd->chanlist && cmd->chanlist_len > 0)
- err |= usbduxfast_ai_check_chanlist(dev, s, cmd);
- if (err)
- return 5;
-
- return 0;
-}
-
-static int usbduxfast_ai_inttrig(struct comedi_device *dev,
- struct comedi_subdevice *s,
- unsigned int trig_num)
-{
- struct usbduxfast_private *devpriv = dev->private;
- struct comedi_cmd *cmd = &s->async->cmd;
- int ret;
-
- if (trig_num != cmd->start_arg)
- return -EINVAL;
-
- down(&devpriv->sem);
-
- if (!devpriv->ai_cmd_running) {
- devpriv->ai_cmd_running = 1;
- ret = usbduxfast_submit_urb(dev);
- if (ret < 0) {
- dev_err(dev->class_dev, "urbSubmit: err=%d\n", ret);
- devpriv->ai_cmd_running = 0;
- up(&devpriv->sem);
- return ret;
- }
- s->async->inttrig = NULL;
- } else {
- dev_err(dev->class_dev, "ai is already running\n");
- }
- up(&devpriv->sem);
- return 1;
-}
-
-static int usbduxfast_ai_cmd(struct comedi_device *dev,
- struct comedi_subdevice *s)
-{
- struct usbduxfast_private *devpriv = dev->private;
- struct comedi_cmd *cmd = &s->async->cmd;
- unsigned int rngmask = 0xff;
- int j, ret;
- long steps, steps_tmp;
-
- down(&devpriv->sem);
- if (devpriv->ai_cmd_running) {
- ret = -EBUSY;
- goto cmd_exit;
- }
-
- /*
- * ignore the first buffers from the device if there
- * is an error condition
- */
- devpriv->ignore = PACKETS_TO_IGNORE;
-
- steps = (cmd->convert_arg * 30) / 1000;
-
- switch (cmd->chanlist_len) {
- case 1:
- /*
- * one channel
- */
-
- if (CR_RANGE(cmd->chanlist[0]) > 0)
- rngmask = 0xff - 0x04;
- else
- rngmask = 0xff;
-
- /*
- * for external trigger: looping in this state until
- * the RDY0 pin becomes zero
- */
-
- /* we loop here until ready has been set */
- if (cmd->start_src == TRIG_EXT) {
- /* branch back to state 0 */
- /* deceision state w/o data */
- /* RDY0 = 0 */
- usbduxfast_cmd_data(dev, 0, 0x01, 0x01, rngmask, 0x00);
- } else { /* we just proceed to state 1 */
- usbduxfast_cmd_data(dev, 0, 0x01, 0x00, rngmask, 0x00);
- }
-
- if (steps < MIN_SAMPLING_PERIOD) {
- /* for fast single channel aqu without mux */
- if (steps <= 1) {
- /*
- * we just stay here at state 1 and rexecute
- * the same state this gives us 30MHz sampling
- * rate
- */
-
- /* branch back to state 1 */
- /* deceision state with data */
- /* doesn't matter */
- usbduxfast_cmd_data(dev, 1,
- 0x89, 0x03, rngmask, 0xff);
- } else {
- /*
- * we loop through two states: data and delay
- * max rate is 15MHz
- */
- /* data */
- /* doesn't matter */
- usbduxfast_cmd_data(dev, 1, steps - 1,
- 0x02, rngmask, 0x00);
-
- /* branch back to state 1 */
- /* deceision state w/o data */
- /* doesn't matter */
- usbduxfast_cmd_data(dev, 2,
- 0x09, 0x01, rngmask, 0xff);
- }
- } else {
- /*
- * we loop through 3 states: 2x delay and 1x data
- * this gives a min sampling rate of 60kHz
- */
-
- /* we have 1 state with duration 1 */
- steps = steps - 1;
-
- /* do the first part of the delay */
- usbduxfast_cmd_data(dev, 1,
- steps / 2, 0x00, rngmask, 0x00);
-
- /* and the second part */
- usbduxfast_cmd_data(dev, 2, steps - steps / 2,
- 0x00, rngmask, 0x00);
-
- /* get the data and branch back */
-
- /* branch back to state 1 */
- /* deceision state w data */
- /* doesn't matter */
- usbduxfast_cmd_data(dev, 3,
- 0x09, 0x03, rngmask, 0xff);
- }
- break;
-
- case 2:
- /*
- * two channels
- * commit data to the FIFO
- */
-
- if (CR_RANGE(cmd->chanlist[0]) > 0)
- rngmask = 0xff - 0x04;
- else
- rngmask = 0xff;
-
- /* data */
- usbduxfast_cmd_data(dev, 0, 0x01, 0x02, rngmask, 0x00);
-
- /* we have 1 state with duration 1: state 0 */
- steps_tmp = steps - 1;
-
- if (CR_RANGE(cmd->chanlist[1]) > 0)
- rngmask = 0xff - 0x04;
- else
- rngmask = 0xff;
-
- /* do the first part of the delay */
- /* count */
- usbduxfast_cmd_data(dev, 1, steps_tmp / 2,
- 0x00, 0xfe & rngmask, 0x00);
-
- /* and the second part */
- usbduxfast_cmd_data(dev, 2, steps_tmp - steps_tmp / 2,
- 0x00, rngmask, 0x00);
-
- /* data */
- usbduxfast_cmd_data(dev, 3, 0x01, 0x02, rngmask, 0x00);
-
- /*
- * we have 2 states with duration 1: step 6 and
- * the IDLE state
- */
- steps_tmp = steps - 2;
-
- if (CR_RANGE(cmd->chanlist[0]) > 0)
- rngmask = 0xff - 0x04;
- else
- rngmask = 0xff;
-
- /* do the first part of the delay */
- /* reset */
- usbduxfast_cmd_data(dev, 4, steps_tmp / 2,
- 0x00, (0xff - 0x02) & rngmask, 0x00);
-
- /* and the second part */
- usbduxfast_cmd_data(dev, 5, steps_tmp - steps_tmp / 2,
- 0x00, rngmask, 0x00);
-
- usbduxfast_cmd_data(dev, 6, 0x01, 0x00, rngmask, 0x00);
- break;
-
- case 3:
- /*
- * three channels
- */
- for (j = 0; j < 1; j++) {
- int index = j * 2;
-
- if (CR_RANGE(cmd->chanlist[j]) > 0)
- rngmask = 0xff - 0x04;
- else
- rngmask = 0xff;
- /*
- * commit data to the FIFO and do the first part
- * of the delay
- */
- /* data */
- /* no change */
- usbduxfast_cmd_data(dev, index, steps / 2,
- 0x02, rngmask, 0x00);
-
- if (CR_RANGE(cmd->chanlist[j + 1]) > 0)
- rngmask = 0xff - 0x04;
- else
- rngmask = 0xff;
-
- /* do the second part of the delay */
- /* no data */
- /* count */
- usbduxfast_cmd_data(dev, index + 1, steps - steps / 2,
- 0x00, 0xfe & rngmask, 0x00);
- }
-
- /* 2 steps with duration 1: the idele step and step 6: */
- steps_tmp = steps - 2;
-
- /* commit data to the FIFO and do the first part of the delay */
- /* data */
- usbduxfast_cmd_data(dev, 4, steps_tmp / 2,
- 0x02, rngmask, 0x00);
-
- if (CR_RANGE(cmd->chanlist[0]) > 0)
- rngmask = 0xff - 0x04;
- else
- rngmask = 0xff;
-
- /* do the second part of the delay */
- /* no data */
- /* reset */
- usbduxfast_cmd_data(dev, 5, steps_tmp - steps_tmp / 2,
- 0x00, (0xff - 0x02) & rngmask, 0x00);
-
- usbduxfast_cmd_data(dev, 6, 0x01, 0x00, rngmask, 0x00);
- break;
-
- case 16:
- if (CR_RANGE(cmd->chanlist[0]) > 0)
- rngmask = 0xff - 0x04;
- else
- rngmask = 0xff;
-
- if (cmd->start_src == TRIG_EXT) {
- /*
- * we loop here until ready has been set
- */
-
- /* branch back to state 0 */
- /* deceision state w/o data */
- /* reset */
- /* RDY0 = 0 */
- usbduxfast_cmd_data(dev, 0, 0x01, 0x01,
- (0xff - 0x02) & rngmask, 0x00);
- } else {
- /*
- * we just proceed to state 1
- */
-
- /* 30us reset pulse */
- /* reset */
- usbduxfast_cmd_data(dev, 0, 0xff, 0x00,
- (0xff - 0x02) & rngmask, 0x00);
- }
-
- /* commit data to the FIFO */
- /* data */
- usbduxfast_cmd_data(dev, 1, 0x01, 0x02, rngmask, 0x00);
-
- /* we have 2 states with duration 1 */
- steps = steps - 2;
-
- /* do the first part of the delay */
- usbduxfast_cmd_data(dev, 2, steps / 2,
- 0x00, 0xfe & rngmask, 0x00);
-
- /* and the second part */
- usbduxfast_cmd_data(dev, 3, steps - steps / 2,
- 0x00, rngmask, 0x00);
-
- /* branch back to state 1 */
- /* deceision state w/o data */
- /* doesn't matter */
- usbduxfast_cmd_data(dev, 4, 0x09, 0x01, rngmask, 0xff);
-
- break;
- }
-
- /* 0 means that the AD commands are sent */
- ret = usbduxfast_send_cmd(dev, SENDADCOMMANDS);
- if (ret < 0)
- goto cmd_exit;
-
- if ((cmd->start_src == TRIG_NOW) || (cmd->start_src == TRIG_EXT)) {
- /* enable this acquisition operation */
- devpriv->ai_cmd_running = 1;
- ret = usbduxfast_submit_urb(dev);
- if (ret < 0) {
- devpriv->ai_cmd_running = 0;
- /* fixme: unlink here?? */
- goto cmd_exit;
- }
- s->async->inttrig = NULL;
- } else { /* TRIG_INT */
- s->async->inttrig = usbduxfast_ai_inttrig;
- }
-
-cmd_exit:
- up(&devpriv->sem);
-
- return ret;
-}
-
-/*
- * Mode 0 is used to get a single conversion on demand.
- */
-static int usbduxfast_ai_insn_read(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct usb_device *usb = comedi_to_usb_dev(dev);
- struct usbduxfast_private *devpriv = dev->private;
- unsigned int chan = CR_CHAN(insn->chanspec);
- unsigned int range = CR_RANGE(insn->chanspec);
- u8 rngmask = range ? (0xff - 0x04) : 0xff;
- int i, j, n, actual_length;
- int ret;
-
- down(&devpriv->sem);
-
- if (devpriv->ai_cmd_running) {
- dev_err(dev->class_dev,
- "ai_insn_read not possible, async cmd is running\n");
- up(&devpriv->sem);
- return -EBUSY;
- }
-
- /* set command for the first channel */
-
- /* commit data to the FIFO */
- /* data */
- usbduxfast_cmd_data(dev, 0, 0x01, 0x02, rngmask, 0x00);
-
- /* do the first part of the delay */
- usbduxfast_cmd_data(dev, 1, 0x0c, 0x00, 0xfe & rngmask, 0x00);
- usbduxfast_cmd_data(dev, 2, 0x01, 0x00, 0xfe & rngmask, 0x00);
- usbduxfast_cmd_data(dev, 3, 0x01, 0x00, 0xfe & rngmask, 0x00);
- usbduxfast_cmd_data(dev, 4, 0x01, 0x00, 0xfe & rngmask, 0x00);
-
- /* second part */
- usbduxfast_cmd_data(dev, 5, 0x0c, 0x00, rngmask, 0x00);
- usbduxfast_cmd_data(dev, 6, 0x01, 0x00, rngmask, 0x00);
-
- ret = usbduxfast_send_cmd(dev, SENDADCOMMANDS);
- if (ret < 0) {
- up(&devpriv->sem);
- return ret;
- }
-
- for (i = 0; i < PACKETS_TO_IGNORE; i++) {
- ret = usb_bulk_msg(usb, usb_rcvbulkpipe(usb, BULKINEP),
- devpriv->inbuf, SIZEINBUF,
- &actual_length, 10000);
- if (ret < 0) {
- dev_err(dev->class_dev, "insn timeout, no data\n");
- up(&devpriv->sem);
- return ret;
- }
- }
-
- for (i = 0; i < insn->n;) {
- ret = usb_bulk_msg(usb, usb_rcvbulkpipe(usb, BULKINEP),
- devpriv->inbuf, SIZEINBUF,
- &actual_length, 10000);
- if (ret < 0) {
- dev_err(dev->class_dev, "insn data error: %d\n", ret);
- up(&devpriv->sem);
- return ret;
- }
- n = actual_length / sizeof(u16);
- if ((n % 16) != 0) {
- dev_err(dev->class_dev, "insn data packet corrupted\n");
- up(&devpriv->sem);
- return -EINVAL;
- }
- for (j = chan; (j < n) && (i < insn->n); j = j + 16) {
- data[i] = ((u16 *)(devpriv->inbuf))[j];
- i++;
- }
- }
-
- up(&devpriv->sem);
-
- return insn->n;
-}
-
-static int usbduxfast_upload_firmware(struct comedi_device *dev,
- const u8 *data, size_t size,
- unsigned long context)
-{
- struct usb_device *usb = comedi_to_usb_dev(dev);
- u8 *buf;
- unsigned char *tmp;
- int ret;
-
- if (!data)
- return 0;
-
- if (size > FIRMWARE_MAX_LEN) {
- dev_err(dev->class_dev, "firmware binary too large for FX2\n");
- return -ENOMEM;
- }
-
- /* we generate a local buffer for the firmware */
- buf = kmemdup(data, size, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
-
- /* we need a malloc'ed buffer for usb_control_msg() */
- tmp = kmalloc(1, GFP_KERNEL);
- if (!tmp) {
- kfree(buf);
- return -ENOMEM;
- }
-
- /* stop the current firmware on the device */
- *tmp = 1; /* 7f92 to one */
- ret = usb_control_msg(usb, usb_sndctrlpipe(usb, 0),
- USBDUXFASTSUB_FIRMWARE,
- VENDOR_DIR_OUT,
- USBDUXFASTSUB_CPUCS, 0x0000,
- tmp, 1,
- EZTIMEOUT);
- if (ret < 0) {
- dev_err(dev->class_dev, "can not stop firmware\n");
- goto done;
- }
-
- /* upload the new firmware to the device */
- ret = usb_control_msg(usb, usb_sndctrlpipe(usb, 0),
- USBDUXFASTSUB_FIRMWARE,
- VENDOR_DIR_OUT,
- 0, 0x0000,
- buf, size,
- EZTIMEOUT);
- if (ret < 0) {
- dev_err(dev->class_dev, "firmware upload failed\n");
- goto done;
- }
-
- /* start the new firmware on the device */
- *tmp = 0; /* 7f92 to zero */
- ret = usb_control_msg(usb, usb_sndctrlpipe(usb, 0),
- USBDUXFASTSUB_FIRMWARE,
- VENDOR_DIR_OUT,
- USBDUXFASTSUB_CPUCS, 0x0000,
- tmp, 1,
- EZTIMEOUT);
- if (ret < 0)
- dev_err(dev->class_dev, "can not start firmware\n");
-
-done:
- kfree(tmp);
- kfree(buf);
- return ret;
-}
-
-static int usbduxfast_auto_attach(struct comedi_device *dev,
- unsigned long context_unused)
-{
- struct usb_interface *intf = comedi_to_usb_interface(dev);
- struct usb_device *usb = comedi_to_usb_dev(dev);
- struct usbduxfast_private *devpriv;
- struct comedi_subdevice *s;
- int ret;
-
- if (usb->speed != USB_SPEED_HIGH) {
- dev_err(dev->class_dev,
- "This driver needs USB 2.0 to operate. Aborting...\n");
- return -ENODEV;
- }
-
- devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
- if (!devpriv)
- return -ENOMEM;
-
- sema_init(&devpriv->sem, 1);
- usb_set_intfdata(intf, devpriv);
-
- devpriv->duxbuf = kmalloc(SIZEOFDUXBUF, GFP_KERNEL);
- if (!devpriv->duxbuf)
- return -ENOMEM;
-
- ret = usb_set_interface(usb,
- intf->altsetting->desc.bInterfaceNumber, 1);
- if (ret < 0) {
- dev_err(dev->class_dev,
- "could not switch to alternate setting 1\n");
- return -ENODEV;
- }
-
- devpriv->urb = usb_alloc_urb(0, GFP_KERNEL);
- if (!devpriv->urb) {
- dev_err(dev->class_dev, "Could not alloc. urb\n");
- return -ENOMEM;
- }
-
- devpriv->inbuf = kmalloc(SIZEINBUF, GFP_KERNEL);
- if (!devpriv->inbuf)
- return -ENOMEM;
-
- ret = comedi_load_firmware(dev, &usb->dev, FIRMWARE,
- usbduxfast_upload_firmware, 0);
- if (ret)
- return ret;
-
- ret = comedi_alloc_subdevices(dev, 1);
- if (ret)
- return ret;
-
- /* Analog Input subdevice */
- s = &dev->subdevices[0];
- dev->read_subdev = s;
- s->type = COMEDI_SUBD_AI;
- s->subdev_flags = SDF_READABLE | SDF_GROUND | SDF_CMD_READ;
- s->n_chan = 16;
- s->maxdata = 0x1000; /* 12-bit + 1 overflow bit */
- s->range_table = &range_usbduxfast_ai_range;
- s->insn_read = usbduxfast_ai_insn_read;
- s->len_chanlist = s->n_chan;
- s->do_cmdtest = usbduxfast_ai_cmdtest;
- s->do_cmd = usbduxfast_ai_cmd;
- s->cancel = usbduxfast_ai_cancel;
-
- return 0;
-}
-
-static void usbduxfast_detach(struct comedi_device *dev)
-{
- struct usb_interface *intf = comedi_to_usb_interface(dev);
- struct usbduxfast_private *devpriv = dev->private;
-
- if (!devpriv)
- return;
-
- down(&devpriv->sem);
-
- usb_set_intfdata(intf, NULL);
-
- if (devpriv->urb) {
- /* waits until a running transfer is over */
- usb_kill_urb(devpriv->urb);
-
- kfree(devpriv->inbuf);
- usb_free_urb(devpriv->urb);
- }
-
- kfree(devpriv->duxbuf);
-
- up(&devpriv->sem);
-}
-
-static struct comedi_driver usbduxfast_driver = {
- .driver_name = "usbduxfast",
- .module = THIS_MODULE,
- .auto_attach = usbduxfast_auto_attach,
- .detach = usbduxfast_detach,
-};
-
-static int usbduxfast_usb_probe(struct usb_interface *intf,
- const struct usb_device_id *id)
-{
- return comedi_usb_auto_config(intf, &usbduxfast_driver, 0);
-}
-
-static const struct usb_device_id usbduxfast_usb_table[] = {
- /* { USB_DEVICE(0x4b4, 0x8613) }, testing */
- { USB_DEVICE(0x13d8, 0x0010) }, /* real ID */
- { USB_DEVICE(0x13d8, 0x0011) }, /* real ID */
- { }
-};
-MODULE_DEVICE_TABLE(usb, usbduxfast_usb_table);
-
-static struct usb_driver usbduxfast_usb_driver = {
- .name = "usbduxfast",
- .probe = usbduxfast_usb_probe,
- .disconnect = comedi_usb_auto_unconfig,
- .id_table = usbduxfast_usb_table,
-};
-module_comedi_usb_driver(usbduxfast_driver, usbduxfast_usb_driver);
-
-MODULE_AUTHOR("Bernd Porr, BerndPorr@f2s.com");
-MODULE_DESCRIPTION("USB-DUXfast, BerndPorr@f2s.com");
-MODULE_LICENSE("GPL");
-MODULE_FIRMWARE(FIRMWARE);
diff --git a/drivers/staging/comedi/drivers/usbduxsigma.c b/drivers/staging/comedi/drivers/usbduxsigma.c
deleted file mode 100644
index d49147bc2786..000000000000
--- a/drivers/staging/comedi/drivers/usbduxsigma.c
+++ /dev/null
@@ -1,1623 +0,0 @@
-/*
- * usbduxsigma.c
- * Copyright (C) 2011-2015 Bernd Porr, mail@berndporr.me.uk
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-/*
- * Driver: usbduxsigma
- * Description: University of Stirling USB DAQ & INCITE Technology Limited
- * Devices: [ITL] USB-DUX-SIGMA (usbduxsigma)
- * Author: Bernd Porr <mail@berndporr.me.uk>
- * Updated: 20 July 2015
- * Status: stable
- */
-
-/*
- * I must give credit here to Chris Baugher who
- * wrote the driver for AT-MIO-16d. I used some parts of this
- * driver. I also must give credits to David Brownell
- * who supported me with the USB development.
- *
- * Note: the raw data from the A/D converter is 24 bit big endian
- * anything else is little endian to/from the dux board
- *
- *
- * Revision history:
- * 0.1: initial version
- * 0.2: all basic functions implemented, digital I/O only for one port
- * 0.3: proper vendor ID and driver name
- * 0.4: fixed D/A voltage range
- * 0.5: various bug fixes, health check at startup
- * 0.6: corrected wrong input range
- * 0.7: rewrite code that urb->interval is always 1
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/input.h>
-#include <linux/fcntl.h>
-#include <linux/compiler.h>
-#include <asm/unaligned.h>
-
-#include "../comedi_usb.h"
-
-/* timeout for the USB-transfer in ms*/
-#define BULK_TIMEOUT 1000
-
-/* constants for "firmware" upload and download */
-#define FIRMWARE "usbduxsigma_firmware.bin"
-#define FIRMWARE_MAX_LEN 0x4000
-#define USBDUXSUB_FIRMWARE 0xa0
-#define VENDOR_DIR_IN 0xc0
-#define VENDOR_DIR_OUT 0x40
-
-/* internal addresses of the 8051 processor */
-#define USBDUXSUB_CPUCS 0xE600
-
-/* 300Hz max frequ under PWM */
-#define MIN_PWM_PERIOD ((long)(1E9 / 300))
-
-/* Default PWM frequency */
-#define PWM_DEFAULT_PERIOD ((long)(1E9 / 100))
-
-/* Number of channels (16 AD and offset)*/
-#define NUMCHANNELS 16
-
-/* Size of one A/D value */
-#define SIZEADIN ((sizeof(u32)))
-
-/*
- * Size of the async input-buffer IN BYTES, the DIO state is transmitted
- * as the first byte.
- */
-#define SIZEINBUF (((NUMCHANNELS + 1) * SIZEADIN))
-
-/* 16 bytes. */
-#define SIZEINSNBUF 16
-
-/* Number of DA channels */
-#define NUMOUTCHANNELS 8
-
-/* size of one value for the D/A converter: channel and value */
-#define SIZEDAOUT ((sizeof(u8) + sizeof(uint16_t)))
-
-/*
- * Size of the output-buffer in bytes
- * Actually only the first 4 triplets are used but for the
- * high speed mode we need to pad it to 8 (microframes).
- */
-#define SIZEOUTBUF ((8 * SIZEDAOUT))
-
-/*
- * Size of the buffer for the dux commands: just now max size is determined
- * by the analogue out + command byte + panic bytes...
- */
-#define SIZEOFDUXBUFFER ((8 * SIZEDAOUT + 2))
-
-/* Number of in-URBs which receive the data: min=2 */
-#define NUMOFINBUFFERSFULL 5
-
-/* Number of out-URBs which send the data: min=2 */
-#define NUMOFOUTBUFFERSFULL 5
-
-/* Number of in-URBs which receive the data: min=5 */
-/* must have more buffers due to buggy USB ctr */
-#define NUMOFINBUFFERSHIGH 10
-
-/* Number of out-URBs which send the data: min=5 */
-/* must have more buffers due to buggy USB ctr */
-#define NUMOFOUTBUFFERSHIGH 10
-
-/* number of retries to get the right dux command */
-#define RETRIES 10
-
-/* bulk transfer commands to usbduxsigma */
-#define USBBUXSIGMA_AD_CMD 9
-#define USBDUXSIGMA_DA_CMD 1
-#define USBDUXSIGMA_DIO_CFG_CMD 2
-#define USBDUXSIGMA_DIO_BITS_CMD 3
-#define USBDUXSIGMA_SINGLE_AD_CMD 4
-#define USBDUXSIGMA_PWM_ON_CMD 7
-#define USBDUXSIGMA_PWM_OFF_CMD 8
-
-static const struct comedi_lrange usbduxsigma_ai_range = {
- 1, {
- BIP_RANGE(2.5 * 0x800000 / 0x780000 / 2.0)
- }
-};
-
-struct usbduxsigma_private {
- /* actual number of in-buffers */
- int n_ai_urbs;
- /* actual number of out-buffers */
- int n_ao_urbs;
- /* ISO-transfer handling: buffers */
- struct urb **ai_urbs;
- struct urb **ao_urbs;
- /* pwm-transfer handling */
- struct urb *pwm_urb;
- /* PWM period */
- unsigned int pwm_period;
- /* PWM internal delay for the GPIF in the FX2 */
- u8 pwm_delay;
- /* size of the PWM buffer which holds the bit pattern */
- int pwm_buf_sz;
- /* input buffer for the ISO-transfer */
- __be32 *in_buf;
- /* input buffer for single insn */
- u8 *insn_buf;
-
- unsigned high_speed:1;
- unsigned ai_cmd_running:1;
- unsigned ao_cmd_running:1;
- unsigned pwm_cmd_running:1;
-
- /* time between samples in units of the timer */
- unsigned int ai_timer;
- unsigned int ao_timer;
- /* counter between acquisitions */
- unsigned int ai_counter;
- unsigned int ao_counter;
- /* interval in frames/uframes */
- unsigned int ai_interval;
- /* commands */
- u8 *dux_commands;
- struct semaphore sem;
-};
-
-static void usbduxsigma_unlink_urbs(struct urb **urbs, int num_urbs)
-{
- int i;
-
- for (i = 0; i < num_urbs; i++)
- usb_kill_urb(urbs[i]);
-}
-
-static void usbduxsigma_ai_stop(struct comedi_device *dev, int do_unlink)
-{
- struct usbduxsigma_private *devpriv = dev->private;
-
- if (do_unlink && devpriv->ai_urbs)
- usbduxsigma_unlink_urbs(devpriv->ai_urbs, devpriv->n_ai_urbs);
-
- devpriv->ai_cmd_running = 0;
-}
-
-static int usbduxsigma_ai_cancel(struct comedi_device *dev,
- struct comedi_subdevice *s)
-{
- struct usbduxsigma_private *devpriv = dev->private;
-
- down(&devpriv->sem);
- /* unlink only if it is really running */
- usbduxsigma_ai_stop(dev, devpriv->ai_cmd_running);
- up(&devpriv->sem);
-
- return 0;
-}
-
-static void usbduxsigma_ai_handle_urb(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct urb *urb)
-{
- struct usbduxsigma_private *devpriv = dev->private;
- struct comedi_async *async = s->async;
- struct comedi_cmd *cmd = &async->cmd;
- u32 val;
- int ret;
- int i;
-
- if ((urb->actual_length > 0) && (urb->status != -EXDEV)) {
- devpriv->ai_counter--;
- if (devpriv->ai_counter == 0) {
- devpriv->ai_counter = devpriv->ai_timer;
-
- /*
- * Get the data from the USB bus and hand it over
- * to comedi. Note, first byte is the DIO state.
- */
- for (i = 0; i < cmd->chanlist_len; i++) {
- val = be32_to_cpu(devpriv->in_buf[i + 1]);
- val &= 0x00ffffff; /* strip status byte */
- val = comedi_offset_munge(s, val);
- if (!comedi_buf_write_samples(s, &val, 1))
- return;
- }
-
- if (cmd->stop_src == TRIG_COUNT &&
- async->scans_done >= cmd->stop_arg)
- async->events |= COMEDI_CB_EOA;
- }
- }
-
- /* if command is still running, resubmit urb */
- if (!(async->events & COMEDI_CB_CANCEL_MASK)) {
- urb->dev = comedi_to_usb_dev(dev);
- ret = usb_submit_urb(urb, GFP_ATOMIC);
- if (ret < 0) {
- dev_err(dev->class_dev, "urb resubmit failed (%d)\n",
- ret);
- if (ret == -EL2NSYNC)
- dev_err(dev->class_dev,
- "buggy USB host controller or bug in IRQ handler\n");
- async->events |= COMEDI_CB_ERROR;
- }
- }
-}
-
-static void usbduxsigma_ai_urb_complete(struct urb *urb)
-{
- struct comedi_device *dev = urb->context;
- struct usbduxsigma_private *devpriv = dev->private;
- struct comedi_subdevice *s = dev->read_subdev;
- struct comedi_async *async = s->async;
-
- /* exit if not running a command, do not resubmit urb */
- if (!devpriv->ai_cmd_running)
- return;
-
- switch (urb->status) {
- case 0:
- /* copy the result in the transfer buffer */
- memcpy(devpriv->in_buf, urb->transfer_buffer, SIZEINBUF);
- usbduxsigma_ai_handle_urb(dev, s, urb);
- break;
-
- case -EILSEQ:
- /*
- * error in the ISOchronous data
- * we don't copy the data into the transfer buffer
- * and recycle the last data byte
- */
- dev_dbg(dev->class_dev, "CRC error in ISO IN stream\n");
- usbduxsigma_ai_handle_urb(dev, s, urb);
- break;
-
- case -ECONNRESET:
- case -ENOENT:
- case -ESHUTDOWN:
- case -ECONNABORTED:
- /* happens after an unlink command */
- async->events |= COMEDI_CB_ERROR;
- break;
-
- default:
- /* a real error */
- dev_err(dev->class_dev, "non-zero urb status (%d)\n",
- urb->status);
- async->events |= COMEDI_CB_ERROR;
- break;
- }
-
- /*
- * comedi_handle_events() cannot be used in this driver. The (*cancel)
- * operation would unlink the urb.
- */
- if (async->events & COMEDI_CB_CANCEL_MASK)
- usbduxsigma_ai_stop(dev, 0);
-
- comedi_event(dev, s);
-}
-
-static void usbduxsigma_ao_stop(struct comedi_device *dev, int do_unlink)
-{
- struct usbduxsigma_private *devpriv = dev->private;
-
- if (do_unlink && devpriv->ao_urbs)
- usbduxsigma_unlink_urbs(devpriv->ao_urbs, devpriv->n_ao_urbs);
-
- devpriv->ao_cmd_running = 0;
-}
-
-static int usbduxsigma_ao_cancel(struct comedi_device *dev,
- struct comedi_subdevice *s)
-{
- struct usbduxsigma_private *devpriv = dev->private;
-
- down(&devpriv->sem);
- /* unlink only if it is really running */
- usbduxsigma_ao_stop(dev, devpriv->ao_cmd_running);
- up(&devpriv->sem);
-
- return 0;
-}
-
-static void usbduxsigma_ao_handle_urb(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct urb *urb)
-{
- struct usbduxsigma_private *devpriv = dev->private;
- struct comedi_async *async = s->async;
- struct comedi_cmd *cmd = &async->cmd;
- u8 *datap;
- int ret;
- int i;
-
- devpriv->ao_counter--;
- if (devpriv->ao_counter == 0) {
- devpriv->ao_counter = devpriv->ao_timer;
-
- if (cmd->stop_src == TRIG_COUNT &&
- async->scans_done >= cmd->stop_arg) {
- async->events |= COMEDI_CB_EOA;
- return;
- }
-
- /* transmit data to the USB bus */
- datap = urb->transfer_buffer;
- *datap++ = cmd->chanlist_len;
- for (i = 0; i < cmd->chanlist_len; i++) {
- unsigned int chan = CR_CHAN(cmd->chanlist[i]);
- unsigned short val;
-
- if (!comedi_buf_read_samples(s, &val, 1)) {
- dev_err(dev->class_dev, "buffer underflow\n");
- async->events |= COMEDI_CB_OVERFLOW;
- return;
- }
-
- *datap++ = val;
- *datap++ = chan;
- s->readback[chan] = val;
- }
- }
-
- /* if command is still running, resubmit urb */
- if (!(async->events & COMEDI_CB_CANCEL_MASK)) {
- urb->transfer_buffer_length = SIZEOUTBUF;
- urb->dev = comedi_to_usb_dev(dev);
- urb->status = 0;
- urb->interval = 1; /* (u)frames */
- urb->number_of_packets = 1;
- urb->iso_frame_desc[0].offset = 0;
- urb->iso_frame_desc[0].length = SIZEOUTBUF;
- urb->iso_frame_desc[0].status = 0;
- ret = usb_submit_urb(urb, GFP_ATOMIC);
- if (ret < 0) {
- dev_err(dev->class_dev, "urb resubmit failed (%d)\n",
- ret);
- if (ret == -EL2NSYNC)
- dev_err(dev->class_dev,
- "buggy USB host controller or bug in IRQ handler\n");
- async->events |= COMEDI_CB_ERROR;
- }
- }
-}
-
-static void usbduxsigma_ao_urb_complete(struct urb *urb)
-{
- struct comedi_device *dev = urb->context;
- struct usbduxsigma_private *devpriv = dev->private;
- struct comedi_subdevice *s = dev->write_subdev;
- struct comedi_async *async = s->async;
-
- /* exit if not running a command, do not resubmit urb */
- if (!devpriv->ao_cmd_running)
- return;
-
- switch (urb->status) {
- case 0:
- usbduxsigma_ao_handle_urb(dev, s, urb);
- break;
-
- case -ECONNRESET:
- case -ENOENT:
- case -ESHUTDOWN:
- case -ECONNABORTED:
- /* happens after an unlink command */
- async->events |= COMEDI_CB_ERROR;
- break;
-
- default:
- /* a real error */
- dev_err(dev->class_dev, "non-zero urb status (%d)\n",
- urb->status);
- async->events |= COMEDI_CB_ERROR;
- break;
- }
-
- /*
- * comedi_handle_events() cannot be used in this driver. The (*cancel)
- * operation would unlink the urb.
- */
- if (async->events & COMEDI_CB_CANCEL_MASK)
- usbduxsigma_ao_stop(dev, 0);
-
- comedi_event(dev, s);
-}
-
-static int usbduxsigma_submit_urbs(struct comedi_device *dev,
- struct urb **urbs, int num_urbs,
- int input_urb)
-{
- struct usb_device *usb = comedi_to_usb_dev(dev);
- struct urb *urb;
- int ret;
- int i;
-
- /* Submit all URBs and start the transfer on the bus */
- for (i = 0; i < num_urbs; i++) {
- urb = urbs[i];
-
- /* in case of a resubmission after an unlink... */
- if (input_urb)
- urb->interval = 1;
- urb->context = dev;
- urb->dev = usb;
- urb->status = 0;
- urb->transfer_flags = URB_ISO_ASAP;
-
- ret = usb_submit_urb(urb, GFP_ATOMIC);
- if (ret)
- return ret;
- }
- return 0;
-}
-
-static int usbduxsigma_chans_to_interval(int num_chan)
-{
- if (num_chan <= 2)
- return 2; /* 4kHz */
- if (num_chan <= 8)
- return 4; /* 2kHz */
- return 8; /* 1kHz */
-}
-
-static int usbduxsigma_ai_cmdtest(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_cmd *cmd)
-{
- struct usbduxsigma_private *devpriv = dev->private;
- int high_speed = devpriv->high_speed;
- int interval = usbduxsigma_chans_to_interval(cmd->chanlist_len);
- unsigned int tmp;
- int err = 0;
-
- /* Step 1 : check if triggers are trivially valid */
-
- err |= comedi_check_trigger_src(&cmd->start_src, TRIG_NOW | TRIG_INT);
- err |= comedi_check_trigger_src(&cmd->scan_begin_src, TRIG_TIMER);
- err |= comedi_check_trigger_src(&cmd->convert_src, TRIG_NOW);
- err |= comedi_check_trigger_src(&cmd->scan_end_src, TRIG_COUNT);
- err |= comedi_check_trigger_src(&cmd->stop_src, TRIG_COUNT | TRIG_NONE);
-
- if (err)
- return 1;
-
- /* Step 2a : make sure trigger sources are unique */
-
- err |= comedi_check_trigger_is_unique(cmd->start_src);
- err |= comedi_check_trigger_is_unique(cmd->stop_src);
-
- /* Step 2b : and mutually compatible */
-
- if (err)
- return 2;
-
- /* Step 3: check if arguments are trivially valid */
-
- err |= comedi_check_trigger_arg_is(&cmd->start_arg, 0);
-
- if (high_speed) {
- /*
- * In high speed mode microframes are possible.
- * However, during one microframe we can roughly
- * sample two channels. Thus, the more channels
- * are in the channel list the more time we need.
- */
- err |= comedi_check_trigger_arg_min(&cmd->scan_begin_arg,
- (125000 * interval));
- } else {
- /* full speed */
- /* 1kHz scans every USB frame */
- err |= comedi_check_trigger_arg_min(&cmd->scan_begin_arg,
- 1000000);
- }
-
- err |= comedi_check_trigger_arg_is(&cmd->scan_end_arg,
- cmd->chanlist_len);
-
- if (cmd->stop_src == TRIG_COUNT)
- err |= comedi_check_trigger_arg_min(&cmd->stop_arg, 1);
- else /* TRIG_NONE */
- err |= comedi_check_trigger_arg_is(&cmd->stop_arg, 0);
-
- if (err)
- return 3;
-
- /* Step 4: fix up any arguments */
-
- tmp = rounddown(cmd->scan_begin_arg, high_speed ? 125000 : 1000000);
- err |= comedi_check_trigger_arg_is(&cmd->scan_begin_arg, tmp);
-
- if (err)
- return 4;
-
- return 0;
-}
-
-/*
- * creates the ADC command for the MAX1271
- * range is the range value from comedi
- */
-static void create_adc_command(unsigned int chan,
- u8 *muxsg0, u8 *muxsg1)
-{
- if (chan < 8)
- (*muxsg0) = (*muxsg0) | (1 << chan);
- else if (chan < 16)
- (*muxsg1) = (*muxsg1) | (1 << (chan - 8));
-}
-
-static int usbbuxsigma_send_cmd(struct comedi_device *dev, int cmd_type)
-{
- struct usb_device *usb = comedi_to_usb_dev(dev);
- struct usbduxsigma_private *devpriv = dev->private;
- int nsent;
-
- devpriv->dux_commands[0] = cmd_type;
-
- return usb_bulk_msg(usb, usb_sndbulkpipe(usb, 1),
- devpriv->dux_commands, SIZEOFDUXBUFFER,
- &nsent, BULK_TIMEOUT);
-}
-
-static int usbduxsigma_receive_cmd(struct comedi_device *dev, int command)
-{
- struct usb_device *usb = comedi_to_usb_dev(dev);
- struct usbduxsigma_private *devpriv = dev->private;
- int nrec;
- int ret;
- int i;
-
- for (i = 0; i < RETRIES; i++) {
- ret = usb_bulk_msg(usb, usb_rcvbulkpipe(usb, 8),
- devpriv->insn_buf, SIZEINSNBUF,
- &nrec, BULK_TIMEOUT);
- if (ret < 0)
- return ret;
-
- if (devpriv->insn_buf[0] == command)
- return 0;
- }
- /*
- * This is only reached if the data has been requested a
- * couple of times and the command was not received.
- */
- return -EFAULT;
-}
-
-static int usbduxsigma_ai_inttrig(struct comedi_device *dev,
- struct comedi_subdevice *s,
- unsigned int trig_num)
-{
- struct usbduxsigma_private *devpriv = dev->private;
- struct comedi_cmd *cmd = &s->async->cmd;
- int ret;
-
- if (trig_num != cmd->start_arg)
- return -EINVAL;
-
- down(&devpriv->sem);
- if (!devpriv->ai_cmd_running) {
- devpriv->ai_cmd_running = 1;
- ret = usbduxsigma_submit_urbs(dev, devpriv->ai_urbs,
- devpriv->n_ai_urbs, 1);
- if (ret < 0) {
- devpriv->ai_cmd_running = 0;
- up(&devpriv->sem);
- return ret;
- }
- s->async->inttrig = NULL;
- }
- up(&devpriv->sem);
-
- return 1;
-}
-
-static int usbduxsigma_ai_cmd(struct comedi_device *dev,
- struct comedi_subdevice *s)
-{
- struct usbduxsigma_private *devpriv = dev->private;
- struct comedi_cmd *cmd = &s->async->cmd;
- unsigned int len = cmd->chanlist_len;
- u8 muxsg0 = 0;
- u8 muxsg1 = 0;
- u8 sysred = 0;
- int ret;
- int i;
-
- down(&devpriv->sem);
-
- if (devpriv->high_speed) {
- /*
- * every 2 channels get a time window of 125us. Thus, if we
- * sample all 16 channels we need 1ms. If we sample only one
- * channel we need only 125us
- */
- unsigned int interval = usbduxsigma_chans_to_interval(len);
-
- devpriv->ai_interval = interval;
- devpriv->ai_timer = cmd->scan_begin_arg / (125000 * interval);
- } else {
- /* interval always 1ms */
- devpriv->ai_interval = 1;
- devpriv->ai_timer = cmd->scan_begin_arg / 1000000;
- }
-
- for (i = 0; i < len; i++) {
- unsigned int chan = CR_CHAN(cmd->chanlist[i]);
-
- create_adc_command(chan, &muxsg0, &muxsg1);
- }
-
- devpriv->dux_commands[1] = devpriv->ai_interval;
- devpriv->dux_commands[2] = len; /* num channels per time step */
- devpriv->dux_commands[3] = 0x12; /* CONFIG0 */
- devpriv->dux_commands[4] = 0x03; /* CONFIG1: 23kHz sample, delay 0us */
- devpriv->dux_commands[5] = 0x00; /* CONFIG3: diff. channels off */
- devpriv->dux_commands[6] = muxsg0;
- devpriv->dux_commands[7] = muxsg1;
- devpriv->dux_commands[8] = sysred;
-
- ret = usbbuxsigma_send_cmd(dev, USBBUXSIGMA_AD_CMD);
- if (ret < 0) {
- up(&devpriv->sem);
- return ret;
- }
-
- devpriv->ai_counter = devpriv->ai_timer;
-
- if (cmd->start_src == TRIG_NOW) {
- /* enable this acquisition operation */
- devpriv->ai_cmd_running = 1;
- ret = usbduxsigma_submit_urbs(dev, devpriv->ai_urbs,
- devpriv->n_ai_urbs, 1);
- if (ret < 0) {
- devpriv->ai_cmd_running = 0;
- up(&devpriv->sem);
- return ret;
- }
- s->async->inttrig = NULL;
- } else { /* TRIG_INT */
- s->async->inttrig = usbduxsigma_ai_inttrig;
- }
-
- up(&devpriv->sem);
-
- return 0;
-}
-
-static int usbduxsigma_ai_insn_read(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct usbduxsigma_private *devpriv = dev->private;
- unsigned int chan = CR_CHAN(insn->chanspec);
- u8 muxsg0 = 0;
- u8 muxsg1 = 0;
- u8 sysred = 0;
- int ret;
- int i;
-
- down(&devpriv->sem);
- if (devpriv->ai_cmd_running) {
- up(&devpriv->sem);
- return -EBUSY;
- }
-
- create_adc_command(chan, &muxsg0, &muxsg1);
-
- /* Mode 0 is used to get a single conversion on demand */
- devpriv->dux_commands[1] = 0x16; /* CONFIG0: chopper on */
- devpriv->dux_commands[2] = 0x80; /* CONFIG1: 2kHz sampling rate */
- devpriv->dux_commands[3] = 0x00; /* CONFIG3: diff. channels off */
- devpriv->dux_commands[4] = muxsg0;
- devpriv->dux_commands[5] = muxsg1;
- devpriv->dux_commands[6] = sysred;
-
- /* adc commands */
- ret = usbbuxsigma_send_cmd(dev, USBDUXSIGMA_SINGLE_AD_CMD);
- if (ret < 0) {
- up(&devpriv->sem);
- return ret;
- }
-
- for (i = 0; i < insn->n; i++) {
- u32 val;
-
- ret = usbduxsigma_receive_cmd(dev, USBDUXSIGMA_SINGLE_AD_CMD);
- if (ret < 0) {
- up(&devpriv->sem);
- return ret;
- }
-
- /* 32 bits big endian from the A/D converter */
- val = be32_to_cpu(get_unaligned((__be32
- *)(devpriv->insn_buf + 1)));
- val &= 0x00ffffff; /* strip status byte */
- data[i] = comedi_offset_munge(s, val);
- }
- up(&devpriv->sem);
-
- return insn->n;
-}
-
-static int usbduxsigma_ao_insn_read(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct usbduxsigma_private *devpriv = dev->private;
- int ret;
-
- down(&devpriv->sem);
- ret = comedi_readback_insn_read(dev, s, insn, data);
- up(&devpriv->sem);
-
- return ret;
-}
-
-static int usbduxsigma_ao_insn_write(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct usbduxsigma_private *devpriv = dev->private;
- unsigned int chan = CR_CHAN(insn->chanspec);
- int ret;
- int i;
-
- down(&devpriv->sem);
- if (devpriv->ao_cmd_running) {
- up(&devpriv->sem);
- return -EBUSY;
- }
-
- for (i = 0; i < insn->n; i++) {
- devpriv->dux_commands[1] = 1; /* num channels */
- devpriv->dux_commands[2] = data[i]; /* value */
- devpriv->dux_commands[3] = chan; /* channel number */
- ret = usbbuxsigma_send_cmd(dev, USBDUXSIGMA_DA_CMD);
- if (ret < 0) {
- up(&devpriv->sem);
- return ret;
- }
- s->readback[chan] = data[i];
- }
- up(&devpriv->sem);
-
- return insn->n;
-}
-
-static int usbduxsigma_ao_inttrig(struct comedi_device *dev,
- struct comedi_subdevice *s,
- unsigned int trig_num)
-{
- struct usbduxsigma_private *devpriv = dev->private;
- struct comedi_cmd *cmd = &s->async->cmd;
- int ret;
-
- if (trig_num != cmd->start_arg)
- return -EINVAL;
-
- down(&devpriv->sem);
- if (!devpriv->ao_cmd_running) {
- devpriv->ao_cmd_running = 1;
- ret = usbduxsigma_submit_urbs(dev, devpriv->ao_urbs,
- devpriv->n_ao_urbs, 0);
- if (ret < 0) {
- devpriv->ao_cmd_running = 0;
- up(&devpriv->sem);
- return ret;
- }
- s->async->inttrig = NULL;
- }
- up(&devpriv->sem);
-
- return 1;
-}
-
-static int usbduxsigma_ao_cmdtest(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_cmd *cmd)
-{
- struct usbduxsigma_private *devpriv = dev->private;
- unsigned int tmp;
- int err = 0;
-
- /* Step 1 : check if triggers are trivially valid */
-
- err |= comedi_check_trigger_src(&cmd->start_src, TRIG_NOW | TRIG_INT);
-
- /*
- * For now, always use "scan" timing with all channels updated at once
- * (cmd->scan_begin_src == TRIG_TIMER, cmd->convert_src == TRIG_NOW).
- *
- * In a future version, "convert" timing with channels updated
- * indivually may be supported in high speed mode
- * (cmd->scan_begin_src == TRIG_FOLLOW, cmd->convert_src == TRIG_TIMER).
- */
- err |= comedi_check_trigger_src(&cmd->scan_begin_src, TRIG_TIMER);
- err |= comedi_check_trigger_src(&cmd->convert_src, TRIG_NOW);
- err |= comedi_check_trigger_src(&cmd->scan_end_src, TRIG_COUNT);
- err |= comedi_check_trigger_src(&cmd->stop_src, TRIG_COUNT | TRIG_NONE);
-
- if (err) {
- up(&devpriv->sem);
- return 1;
- }
-
- /* Step 2a : make sure trigger sources are unique */
-
- err |= comedi_check_trigger_is_unique(cmd->start_src);
- err |= comedi_check_trigger_is_unique(cmd->stop_src);
-
- /* Step 2b : and mutually compatible */
-
- if (err)
- return 2;
-
- /* Step 3: check if arguments are trivially valid */
-
- err |= comedi_check_trigger_arg_is(&cmd->start_arg, 0);
-
- err |= comedi_check_trigger_arg_min(&cmd->scan_begin_arg, 1000000);
-
- err |= comedi_check_trigger_arg_is(&cmd->scan_end_arg,
- cmd->chanlist_len);
-
- if (cmd->stop_src == TRIG_COUNT)
- err |= comedi_check_trigger_arg_min(&cmd->stop_arg, 1);
- else /* TRIG_NONE */
- err |= comedi_check_trigger_arg_is(&cmd->stop_arg, 0);
-
- if (err)
- return 3;
-
- /* Step 4: fix up any arguments */
-
- tmp = rounddown(cmd->scan_begin_arg, 1000000);
- err |= comedi_check_trigger_arg_is(&cmd->scan_begin_arg, tmp);
-
- if (err)
- return 4;
-
- return 0;
-}
-
-static int usbduxsigma_ao_cmd(struct comedi_device *dev,
- struct comedi_subdevice *s)
-{
- struct usbduxsigma_private *devpriv = dev->private;
- struct comedi_cmd *cmd = &s->async->cmd;
- int ret;
-
- down(&devpriv->sem);
-
- /*
- * For now, only "scan" timing is supported. A future version may
- * support "convert" timing in high speed mode.
- *
- * Timing of the scan: every 1ms all channels updated at once.
- */
- devpriv->ao_timer = cmd->scan_begin_arg / 1000000;
-
- devpriv->ao_counter = devpriv->ao_timer;
-
- if (cmd->start_src == TRIG_NOW) {
- /* enable this acquisition operation */
- devpriv->ao_cmd_running = 1;
- ret = usbduxsigma_submit_urbs(dev, devpriv->ao_urbs,
- devpriv->n_ao_urbs, 0);
- if (ret < 0) {
- devpriv->ao_cmd_running = 0;
- up(&devpriv->sem);
- return ret;
- }
- s->async->inttrig = NULL;
- } else { /* TRIG_INT */
- s->async->inttrig = usbduxsigma_ao_inttrig;
- }
-
- up(&devpriv->sem);
-
- return 0;
-}
-
-static int usbduxsigma_dio_insn_config(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- int ret;
-
- ret = comedi_dio_insn_config(dev, s, insn, data, 0);
- if (ret)
- return ret;
-
- /*
- * We don't tell the firmware here as it would take 8 frames
- * to submit the information. We do it in the (*insn_bits).
- */
- return insn->n;
-}
-
-static int usbduxsigma_dio_insn_bits(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct usbduxsigma_private *devpriv = dev->private;
- int ret;
-
- down(&devpriv->sem);
-
- comedi_dio_update_state(s, data);
-
- /* Always update the hardware. See the (*insn_config). */
- devpriv->dux_commands[1] = s->io_bits & 0xff;
- devpriv->dux_commands[4] = s->state & 0xff;
- devpriv->dux_commands[2] = (s->io_bits >> 8) & 0xff;
- devpriv->dux_commands[5] = (s->state >> 8) & 0xff;
- devpriv->dux_commands[3] = (s->io_bits >> 16) & 0xff;
- devpriv->dux_commands[6] = (s->state >> 16) & 0xff;
-
- ret = usbbuxsigma_send_cmd(dev, USBDUXSIGMA_DIO_BITS_CMD);
- if (ret < 0)
- goto done;
- ret = usbduxsigma_receive_cmd(dev, USBDUXSIGMA_DIO_BITS_CMD);
- if (ret < 0)
- goto done;
-
- s->state = devpriv->insn_buf[1] |
- (devpriv->insn_buf[2] << 8) |
- (devpriv->insn_buf[3] << 16);
-
- data[1] = s->state;
- ret = insn->n;
-
-done:
- up(&devpriv->sem);
-
- return ret;
-}
-
-static void usbduxsigma_pwm_stop(struct comedi_device *dev, int do_unlink)
-{
- struct usbduxsigma_private *devpriv = dev->private;
-
- if (do_unlink) {
- if (devpriv->pwm_urb)
- usb_kill_urb(devpriv->pwm_urb);
- }
-
- devpriv->pwm_cmd_running = 0;
-}
-
-static int usbduxsigma_pwm_cancel(struct comedi_device *dev,
- struct comedi_subdevice *s)
-{
- struct usbduxsigma_private *devpriv = dev->private;
-
- /* unlink only if it is really running */
- usbduxsigma_pwm_stop(dev, devpriv->pwm_cmd_running);
-
- return usbbuxsigma_send_cmd(dev, USBDUXSIGMA_PWM_OFF_CMD);
-}
-
-static void usbduxsigma_pwm_urb_complete(struct urb *urb)
-{
- struct comedi_device *dev = urb->context;
- struct usbduxsigma_private *devpriv = dev->private;
- int ret;
-
- switch (urb->status) {
- case 0:
- /* success */
- break;
-
- case -ECONNRESET:
- case -ENOENT:
- case -ESHUTDOWN:
- case -ECONNABORTED:
- /* happens after an unlink command */
- if (devpriv->pwm_cmd_running)
- usbduxsigma_pwm_stop(dev, 0); /* w/o unlink */
- return;
-
- default:
- /* a real error */
- if (devpriv->pwm_cmd_running) {
- dev_err(dev->class_dev, "non-zero urb status (%d)\n",
- urb->status);
- usbduxsigma_pwm_stop(dev, 0); /* w/o unlink */
- }
- return;
- }
-
- if (!devpriv->pwm_cmd_running)
- return;
-
- urb->transfer_buffer_length = devpriv->pwm_buf_sz;
- urb->dev = comedi_to_usb_dev(dev);
- urb->status = 0;
- ret = usb_submit_urb(urb, GFP_ATOMIC);
- if (ret < 0) {
- dev_err(dev->class_dev, "urb resubmit failed (%d)\n", ret);
- if (ret == -EL2NSYNC)
- dev_err(dev->class_dev,
- "buggy USB host controller or bug in IRQ handler\n");
- usbduxsigma_pwm_stop(dev, 0); /* w/o unlink */
- }
-}
-
-static int usbduxsigma_submit_pwm_urb(struct comedi_device *dev)
-{
- struct usb_device *usb = comedi_to_usb_dev(dev);
- struct usbduxsigma_private *devpriv = dev->private;
- struct urb *urb = devpriv->pwm_urb;
-
- /* in case of a resubmission after an unlink... */
- usb_fill_bulk_urb(urb, usb, usb_sndbulkpipe(usb, 4),
- urb->transfer_buffer, devpriv->pwm_buf_sz,
- usbduxsigma_pwm_urb_complete, dev);
-
- return usb_submit_urb(urb, GFP_ATOMIC);
-}
-
-static int usbduxsigma_pwm_period(struct comedi_device *dev,
- struct comedi_subdevice *s,
- unsigned int period)
-{
- struct usbduxsigma_private *devpriv = dev->private;
- int fx2delay = 255;
-
- if (period < MIN_PWM_PERIOD)
- return -EAGAIN;
-
- fx2delay = (period / (6 * 512 * 1000 / 33)) - 6;
- if (fx2delay > 255)
- return -EAGAIN;
-
- devpriv->pwm_delay = fx2delay;
- devpriv->pwm_period = period;
- return 0;
-}
-
-static int usbduxsigma_pwm_start(struct comedi_device *dev,
- struct comedi_subdevice *s)
-{
- struct usbduxsigma_private *devpriv = dev->private;
- int ret;
-
- if (devpriv->pwm_cmd_running)
- return 0;
-
- devpriv->dux_commands[1] = devpriv->pwm_delay;
- ret = usbbuxsigma_send_cmd(dev, USBDUXSIGMA_PWM_ON_CMD);
- if (ret < 0)
- return ret;
-
- memset(devpriv->pwm_urb->transfer_buffer, 0, devpriv->pwm_buf_sz);
-
- devpriv->pwm_cmd_running = 1;
- ret = usbduxsigma_submit_pwm_urb(dev);
- if (ret < 0) {
- devpriv->pwm_cmd_running = 0;
- return ret;
- }
-
- return 0;
-}
-
-static void usbduxsigma_pwm_pattern(struct comedi_device *dev,
- struct comedi_subdevice *s,
- unsigned int chan,
- unsigned int value,
- unsigned int sign)
-{
- struct usbduxsigma_private *devpriv = dev->private;
- char pwm_mask = (1 << chan); /* DIO bit for the PWM data */
- char sgn_mask = (16 << chan); /* DIO bit for the sign */
- char *buf = (char *)(devpriv->pwm_urb->transfer_buffer);
- int szbuf = devpriv->pwm_buf_sz;
- int i;
-
- for (i = 0; i < szbuf; i++) {
- char c = *buf;
-
- c &= ~pwm_mask;
- if (i < value)
- c |= pwm_mask;
- if (!sign)
- c &= ~sgn_mask;
- else
- c |= sgn_mask;
- *buf++ = c;
- }
-}
-
-static int usbduxsigma_pwm_write(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- unsigned int chan = CR_CHAN(insn->chanspec);
-
- /*
- * It doesn't make sense to support more than one value here
- * because it would just overwrite the PWM buffer.
- */
- if (insn->n != 1)
- return -EINVAL;
-
- /*
- * The sign is set via a special INSN only, this gives us 8 bits
- * for normal operation, sign is 0 by default.
- */
- usbduxsigma_pwm_pattern(dev, s, chan, data[0], 0);
-
- return insn->n;
-}
-
-static int usbduxsigma_pwm_config(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct usbduxsigma_private *devpriv = dev->private;
- unsigned int chan = CR_CHAN(insn->chanspec);
-
- switch (data[0]) {
- case INSN_CONFIG_ARM:
- /*
- * if not zero the PWM is limited to a certain time which is
- * not supported here
- */
- if (data[1] != 0)
- return -EINVAL;
- return usbduxsigma_pwm_start(dev, s);
- case INSN_CONFIG_DISARM:
- return usbduxsigma_pwm_cancel(dev, s);
- case INSN_CONFIG_GET_PWM_STATUS:
- data[1] = devpriv->pwm_cmd_running;
- return 0;
- case INSN_CONFIG_PWM_SET_PERIOD:
- return usbduxsigma_pwm_period(dev, s, data[1]);
- case INSN_CONFIG_PWM_GET_PERIOD:
- data[1] = devpriv->pwm_period;
- return 0;
- case INSN_CONFIG_PWM_SET_H_BRIDGE:
- /*
- * data[1] = value
- * data[2] = sign (for a relay)
- */
- usbduxsigma_pwm_pattern(dev, s, chan, data[1], (data[2] != 0));
- return 0;
- case INSN_CONFIG_PWM_GET_H_BRIDGE:
- /* values are not kept in this driver, nothing to return */
- return -EINVAL;
- }
- return -EINVAL;
-}
-
-static int usbduxsigma_getstatusinfo(struct comedi_device *dev, int chan)
-{
- struct comedi_subdevice *s = dev->read_subdev;
- struct usbduxsigma_private *devpriv = dev->private;
- u8 sysred;
- u32 val;
- int ret;
-
- switch (chan) {
- default:
- case 0:
- sysred = 0; /* ADC zero */
- break;
- case 1:
- sysred = 1; /* ADC offset */
- break;
- case 2:
- sysred = 4; /* VCC */
- break;
- case 3:
- sysred = 8; /* temperature */
- break;
- case 4:
- sysred = 16; /* gain */
- break;
- case 5:
- sysred = 32; /* ref */
- break;
- }
-
- devpriv->dux_commands[1] = 0x12; /* CONFIG0 */
- devpriv->dux_commands[2] = 0x80; /* CONFIG1: 2kHz sampling rate */
- devpriv->dux_commands[3] = 0x00; /* CONFIG3: diff. channels off */
- devpriv->dux_commands[4] = 0;
- devpriv->dux_commands[5] = 0;
- devpriv->dux_commands[6] = sysred;
- ret = usbbuxsigma_send_cmd(dev, USBDUXSIGMA_SINGLE_AD_CMD);
- if (ret < 0)
- return ret;
-
- ret = usbduxsigma_receive_cmd(dev, USBDUXSIGMA_SINGLE_AD_CMD);
- if (ret < 0)
- return ret;
-
- /* 32 bits big endian from the A/D converter */
- val = be32_to_cpu(get_unaligned((__be32 *)(devpriv->insn_buf + 1)));
- val &= 0x00ffffff; /* strip status byte */
-
- return (int)comedi_offset_munge(s, val);
-}
-
-static int usbduxsigma_firmware_upload(struct comedi_device *dev,
- const u8 *data, size_t size,
- unsigned long context)
-{
- struct usb_device *usb = comedi_to_usb_dev(dev);
- u8 *buf;
- u8 *tmp;
- int ret;
-
- if (!data)
- return 0;
-
- if (size > FIRMWARE_MAX_LEN) {
- dev_err(dev->class_dev, "firmware binary too large for FX2\n");
- return -ENOMEM;
- }
-
- /* we generate a local buffer for the firmware */
- buf = kmemdup(data, size, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
-
- /* we need a malloc'ed buffer for usb_control_msg() */
- tmp = kmalloc(1, GFP_KERNEL);
- if (!tmp) {
- kfree(buf);
- return -ENOMEM;
- }
-
- /* stop the current firmware on the device */
- *tmp = 1; /* 7f92 to one */
- ret = usb_control_msg(usb, usb_sndctrlpipe(usb, 0),
- USBDUXSUB_FIRMWARE,
- VENDOR_DIR_OUT,
- USBDUXSUB_CPUCS, 0x0000,
- tmp, 1,
- BULK_TIMEOUT);
- if (ret < 0) {
- dev_err(dev->class_dev, "can not stop firmware\n");
- goto done;
- }
-
- /* upload the new firmware to the device */
- ret = usb_control_msg(usb, usb_sndctrlpipe(usb, 0),
- USBDUXSUB_FIRMWARE,
- VENDOR_DIR_OUT,
- 0, 0x0000,
- buf, size,
- BULK_TIMEOUT);
- if (ret < 0) {
- dev_err(dev->class_dev, "firmware upload failed\n");
- goto done;
- }
-
- /* start the new firmware on the device */
- *tmp = 0; /* 7f92 to zero */
- ret = usb_control_msg(usb, usb_sndctrlpipe(usb, 0),
- USBDUXSUB_FIRMWARE,
- VENDOR_DIR_OUT,
- USBDUXSUB_CPUCS, 0x0000,
- tmp, 1,
- BULK_TIMEOUT);
- if (ret < 0)
- dev_err(dev->class_dev, "can not start firmware\n");
-
-done:
- kfree(tmp);
- kfree(buf);
- return ret;
-}
-
-static int usbduxsigma_alloc_usb_buffers(struct comedi_device *dev)
-{
- struct usb_device *usb = comedi_to_usb_dev(dev);
- struct usbduxsigma_private *devpriv = dev->private;
- struct urb *urb;
- int i;
-
- devpriv->dux_commands = kzalloc(SIZEOFDUXBUFFER, GFP_KERNEL);
- devpriv->in_buf = kzalloc(SIZEINBUF, GFP_KERNEL);
- devpriv->insn_buf = kzalloc(SIZEINSNBUF, GFP_KERNEL);
- devpriv->ai_urbs = kcalloc(devpriv->n_ai_urbs, sizeof(urb), GFP_KERNEL);
- devpriv->ao_urbs = kcalloc(devpriv->n_ao_urbs, sizeof(urb), GFP_KERNEL);
- if (!devpriv->dux_commands || !devpriv->in_buf || !devpriv->insn_buf ||
- !devpriv->ai_urbs || !devpriv->ao_urbs)
- return -ENOMEM;
-
- for (i = 0; i < devpriv->n_ai_urbs; i++) {
- /* one frame: 1ms */
- urb = usb_alloc_urb(1, GFP_KERNEL);
- if (!urb)
- return -ENOMEM;
- devpriv->ai_urbs[i] = urb;
- urb->dev = usb;
- /* will be filled later with a pointer to the comedi-device */
- /* and ONLY then the urb should be submitted */
- urb->context = NULL;
- urb->pipe = usb_rcvisocpipe(usb, 6);
- urb->transfer_flags = URB_ISO_ASAP;
- urb->transfer_buffer = kzalloc(SIZEINBUF, GFP_KERNEL);
- if (!urb->transfer_buffer)
- return -ENOMEM;
- urb->complete = usbduxsigma_ai_urb_complete;
- urb->number_of_packets = 1;
- urb->transfer_buffer_length = SIZEINBUF;
- urb->iso_frame_desc[0].offset = 0;
- urb->iso_frame_desc[0].length = SIZEINBUF;
- }
-
- for (i = 0; i < devpriv->n_ao_urbs; i++) {
- /* one frame: 1ms */
- urb = usb_alloc_urb(1, GFP_KERNEL);
- if (!urb)
- return -ENOMEM;
- devpriv->ao_urbs[i] = urb;
- urb->dev = usb;
- /* will be filled later with a pointer to the comedi-device */
- /* and ONLY then the urb should be submitted */
- urb->context = NULL;
- urb->pipe = usb_sndisocpipe(usb, 2);
- urb->transfer_flags = URB_ISO_ASAP;
- urb->transfer_buffer = kzalloc(SIZEOUTBUF, GFP_KERNEL);
- if (!urb->transfer_buffer)
- return -ENOMEM;
- urb->complete = usbduxsigma_ao_urb_complete;
- urb->number_of_packets = 1;
- urb->transfer_buffer_length = SIZEOUTBUF;
- urb->iso_frame_desc[0].offset = 0;
- urb->iso_frame_desc[0].length = SIZEOUTBUF;
- urb->interval = 1; /* (u)frames */
- }
-
- if (devpriv->pwm_buf_sz) {
- urb = usb_alloc_urb(0, GFP_KERNEL);
- if (!urb)
- return -ENOMEM;
- devpriv->pwm_urb = urb;
-
- urb->transfer_buffer = kzalloc(devpriv->pwm_buf_sz,
- GFP_KERNEL);
- if (!urb->transfer_buffer)
- return -ENOMEM;
- }
-
- return 0;
-}
-
-static void usbduxsigma_free_usb_buffers(struct comedi_device *dev)
-{
- struct usbduxsigma_private *devpriv = dev->private;
- struct urb *urb;
- int i;
-
- urb = devpriv->pwm_urb;
- if (urb) {
- kfree(urb->transfer_buffer);
- usb_free_urb(urb);
- }
- if (devpriv->ao_urbs) {
- for (i = 0; i < devpriv->n_ao_urbs; i++) {
- urb = devpriv->ao_urbs[i];
- if (urb) {
- kfree(urb->transfer_buffer);
- usb_free_urb(urb);
- }
- }
- kfree(devpriv->ao_urbs);
- }
- if (devpriv->ai_urbs) {
- for (i = 0; i < devpriv->n_ai_urbs; i++) {
- urb = devpriv->ai_urbs[i];
- if (urb) {
- kfree(urb->transfer_buffer);
- usb_free_urb(urb);
- }
- }
- kfree(devpriv->ai_urbs);
- }
- kfree(devpriv->insn_buf);
- kfree(devpriv->in_buf);
- kfree(devpriv->dux_commands);
-}
-
-static int usbduxsigma_auto_attach(struct comedi_device *dev,
- unsigned long context_unused)
-{
- struct usb_interface *intf = comedi_to_usb_interface(dev);
- struct usb_device *usb = comedi_to_usb_dev(dev);
- struct usbduxsigma_private *devpriv;
- struct comedi_subdevice *s;
- int offset;
- int ret;
-
- devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
- if (!devpriv)
- return -ENOMEM;
-
- sema_init(&devpriv->sem, 1);
-
- usb_set_intfdata(intf, devpriv);
-
- devpriv->high_speed = (usb->speed == USB_SPEED_HIGH);
- if (devpriv->high_speed) {
- devpriv->n_ai_urbs = NUMOFINBUFFERSHIGH;
- devpriv->n_ao_urbs = NUMOFOUTBUFFERSHIGH;
- devpriv->pwm_buf_sz = 512;
- } else {
- devpriv->n_ai_urbs = NUMOFINBUFFERSFULL;
- devpriv->n_ao_urbs = NUMOFOUTBUFFERSFULL;
- }
-
- ret = usbduxsigma_alloc_usb_buffers(dev);
- if (ret)
- return ret;
-
- /* setting to alternate setting 3: enabling iso ep and bulk ep. */
- ret = usb_set_interface(usb, intf->altsetting->desc.bInterfaceNumber,
- 3);
- if (ret < 0) {
- dev_err(dev->class_dev,
- "could not set alternate setting 3 in high speed\n");
- return ret;
- }
-
- ret = comedi_load_firmware(dev, &usb->dev, FIRMWARE,
- usbduxsigma_firmware_upload, 0);
- if (ret)
- return ret;
-
- ret = comedi_alloc_subdevices(dev, (devpriv->high_speed) ? 4 : 3);
- if (ret)
- return ret;
-
- /* Analog Input subdevice */
- s = &dev->subdevices[0];
- dev->read_subdev = s;
- s->type = COMEDI_SUBD_AI;
- s->subdev_flags = SDF_READABLE | SDF_GROUND | SDF_CMD_READ | SDF_LSAMPL;
- s->n_chan = NUMCHANNELS;
- s->len_chanlist = NUMCHANNELS;
- s->maxdata = 0x00ffffff;
- s->range_table = &usbduxsigma_ai_range;
- s->insn_read = usbduxsigma_ai_insn_read;
- s->do_cmdtest = usbduxsigma_ai_cmdtest;
- s->do_cmd = usbduxsigma_ai_cmd;
- s->cancel = usbduxsigma_ai_cancel;
-
- /* Analog Output subdevice */
- s = &dev->subdevices[1];
- dev->write_subdev = s;
- s->type = COMEDI_SUBD_AO;
- s->subdev_flags = SDF_WRITABLE | SDF_GROUND | SDF_CMD_WRITE;
- s->n_chan = 4;
- s->len_chanlist = s->n_chan;
- s->maxdata = 0x00ff;
- s->range_table = &range_unipolar2_5;
- s->insn_write = usbduxsigma_ao_insn_write;
- s->insn_read = usbduxsigma_ao_insn_read;
- s->do_cmdtest = usbduxsigma_ao_cmdtest;
- s->do_cmd = usbduxsigma_ao_cmd;
- s->cancel = usbduxsigma_ao_cancel;
-
- ret = comedi_alloc_subdev_readback(s);
- if (ret)
- return ret;
-
- /* Digital I/O subdevice */
- s = &dev->subdevices[2];
- s->type = COMEDI_SUBD_DIO;
- s->subdev_flags = SDF_READABLE | SDF_WRITABLE;
- s->n_chan = 24;
- s->maxdata = 1;
- s->range_table = &range_digital;
- s->insn_bits = usbduxsigma_dio_insn_bits;
- s->insn_config = usbduxsigma_dio_insn_config;
-
- if (devpriv->high_speed) {
- /* Timer / pwm subdevice */
- s = &dev->subdevices[3];
- s->type = COMEDI_SUBD_PWM;
- s->subdev_flags = SDF_WRITABLE | SDF_PWM_HBRIDGE;
- s->n_chan = 8;
- s->maxdata = devpriv->pwm_buf_sz;
- s->insn_write = usbduxsigma_pwm_write;
- s->insn_config = usbduxsigma_pwm_config;
-
- usbduxsigma_pwm_period(dev, s, PWM_DEFAULT_PERIOD);
- }
-
- offset = usbduxsigma_getstatusinfo(dev, 0);
- if (offset < 0) {
- dev_err(dev->class_dev,
- "Communication to USBDUXSIGMA failed! Check firmware and cabling.\n");
- return offset;
- }
-
- dev_info(dev->class_dev, "ADC_zero = %x\n", offset);
-
- return 0;
-}
-
-static void usbduxsigma_detach(struct comedi_device *dev)
-{
- struct usb_interface *intf = comedi_to_usb_interface(dev);
- struct usbduxsigma_private *devpriv = dev->private;
-
- usb_set_intfdata(intf, NULL);
-
- if (!devpriv)
- return;
-
- down(&devpriv->sem);
-
- /* force unlink all urbs */
- usbduxsigma_ai_stop(dev, 1);
- usbduxsigma_ao_stop(dev, 1);
- usbduxsigma_pwm_stop(dev, 1);
-
- usbduxsigma_free_usb_buffers(dev);
-
- up(&devpriv->sem);
-}
-
-static struct comedi_driver usbduxsigma_driver = {
- .driver_name = "usbduxsigma",
- .module = THIS_MODULE,
- .auto_attach = usbduxsigma_auto_attach,
- .detach = usbduxsigma_detach,
-};
-
-static int usbduxsigma_usb_probe(struct usb_interface *intf,
- const struct usb_device_id *id)
-{
- return comedi_usb_auto_config(intf, &usbduxsigma_driver, 0);
-}
-
-static const struct usb_device_id usbduxsigma_usb_table[] = {
- { USB_DEVICE(0x13d8, 0x0020) },
- { USB_DEVICE(0x13d8, 0x0021) },
- { USB_DEVICE(0x13d8, 0x0022) },
- { }
-};
-MODULE_DEVICE_TABLE(usb, usbduxsigma_usb_table);
-
-static struct usb_driver usbduxsigma_usb_driver = {
- .name = "usbduxsigma",
- .probe = usbduxsigma_usb_probe,
- .disconnect = comedi_usb_auto_unconfig,
- .id_table = usbduxsigma_usb_table,
-};
-module_comedi_usb_driver(usbduxsigma_driver, usbduxsigma_usb_driver);
-
-MODULE_AUTHOR("Bernd Porr, mail@berndporr.me.uk");
-MODULE_DESCRIPTION("Stirling/ITL USB-DUX SIGMA -- mail@berndporr.me.uk");
-MODULE_LICENSE("GPL");
-MODULE_FIRMWARE(FIRMWARE);
diff --git a/drivers/staging/comedi/drivers/vmk80xx.c b/drivers/staging/comedi/drivers/vmk80xx.c
deleted file mode 100644
index 8c7393ef762d..000000000000
--- a/drivers/staging/comedi/drivers/vmk80xx.c
+++ /dev/null
@@ -1,889 +0,0 @@
-/*
- * vmk80xx.c
- * Velleman USB Board Low-Level Driver
- *
- * Copyright (C) 2009 Manuel Gebele <forensixs@gmx.de>, Germany
- *
- * COMEDI - Linux Control and Measurement Device Interface
- * Copyright (C) 2000 David A. Schleef <ds@schleef.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-/*
- * Driver: vmk80xx
- * Description: Velleman USB Board Low-Level Driver
- * Devices: [Velleman] K8055 (K8055/VM110), K8061 (K8061/VM140),
- * VM110 (K8055/VM110), VM140 (K8061/VM140)
- * Author: Manuel Gebele <forensixs@gmx.de>
- * Updated: Sun, 10 May 2009 11:14:59 +0200
- * Status: works
- *
- * Supports:
- * - analog input
- * - analog output
- * - digital input
- * - digital output
- * - counter
- * - pwm
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/mutex.h>
-#include <linux/errno.h>
-#include <linux/input.h>
-#include <linux/slab.h>
-#include <linux/poll.h>
-#include <linux/uaccess.h>
-
-#include "../comedi_usb.h"
-
-enum {
- DEVICE_VMK8055,
- DEVICE_VMK8061
-};
-
-#define VMK8055_DI_REG 0x00
-#define VMK8055_DO_REG 0x01
-#define VMK8055_AO1_REG 0x02
-#define VMK8055_AO2_REG 0x03
-#define VMK8055_AI1_REG 0x02
-#define VMK8055_AI2_REG 0x03
-#define VMK8055_CNT1_REG 0x04
-#define VMK8055_CNT2_REG 0x06
-
-#define VMK8061_CH_REG 0x01
-#define VMK8061_DI_REG 0x01
-#define VMK8061_DO_REG 0x01
-#define VMK8061_PWM_REG1 0x01
-#define VMK8061_PWM_REG2 0x02
-#define VMK8061_CNT_REG 0x02
-#define VMK8061_AO_REG 0x02
-#define VMK8061_AI_REG1 0x02
-#define VMK8061_AI_REG2 0x03
-
-#define VMK8055_CMD_RST 0x00
-#define VMK8055_CMD_DEB1_TIME 0x01
-#define VMK8055_CMD_DEB2_TIME 0x02
-#define VMK8055_CMD_RST_CNT1 0x03
-#define VMK8055_CMD_RST_CNT2 0x04
-#define VMK8055_CMD_WRT_AD 0x05
-
-#define VMK8061_CMD_RD_AI 0x00
-#define VMK8061_CMR_RD_ALL_AI 0x01 /* !non-active! */
-#define VMK8061_CMD_SET_AO 0x02
-#define VMK8061_CMD_SET_ALL_AO 0x03 /* !non-active! */
-#define VMK8061_CMD_OUT_PWM 0x04
-#define VMK8061_CMD_RD_DI 0x05
-#define VMK8061_CMD_DO 0x06 /* !non-active! */
-#define VMK8061_CMD_CLR_DO 0x07
-#define VMK8061_CMD_SET_DO 0x08
-#define VMK8061_CMD_RD_CNT 0x09 /* TODO: completely pointless? */
-#define VMK8061_CMD_RST_CNT 0x0a /* TODO: completely pointless? */
-#define VMK8061_CMD_RD_VERSION 0x0b /* internal usage */
-#define VMK8061_CMD_RD_JMP_STAT 0x0c /* TODO: not implemented yet */
-#define VMK8061_CMD_RD_PWR_STAT 0x0d /* internal usage */
-#define VMK8061_CMD_RD_DO 0x0e
-#define VMK8061_CMD_RD_AO 0x0f
-#define VMK8061_CMD_RD_PWM 0x10
-
-#define IC3_VERSION BIT(0)
-#define IC6_VERSION BIT(1)
-
-enum vmk80xx_model {
- VMK8055_MODEL,
- VMK8061_MODEL
-};
-
-static const struct comedi_lrange vmk8061_range = {
- 2, {
- UNI_RANGE(5),
- UNI_RANGE(10)
- }
-};
-
-struct vmk80xx_board {
- const char *name;
- enum vmk80xx_model model;
- const struct comedi_lrange *range;
- int ai_nchans;
- unsigned int ai_maxdata;
- int ao_nchans;
- int di_nchans;
- unsigned int cnt_maxdata;
- int pwm_nchans;
- unsigned int pwm_maxdata;
-};
-
-static const struct vmk80xx_board vmk80xx_boardinfo[] = {
- [DEVICE_VMK8055] = {
- .name = "K8055 (VM110)",
- .model = VMK8055_MODEL,
- .range = &range_unipolar5,
- .ai_nchans = 2,
- .ai_maxdata = 0x00ff,
- .ao_nchans = 2,
- .di_nchans = 6,
- .cnt_maxdata = 0xffff,
- },
- [DEVICE_VMK8061] = {
- .name = "K8061 (VM140)",
- .model = VMK8061_MODEL,
- .range = &vmk8061_range,
- .ai_nchans = 8,
- .ai_maxdata = 0x03ff,
- .ao_nchans = 8,
- .di_nchans = 8,
- .cnt_maxdata = 0, /* unknown, device is not writeable */
- .pwm_nchans = 1,
- .pwm_maxdata = 0x03ff,
- },
-};
-
-struct vmk80xx_private {
- struct usb_endpoint_descriptor *ep_rx;
- struct usb_endpoint_descriptor *ep_tx;
- struct semaphore limit_sem;
- unsigned char *usb_rx_buf;
- unsigned char *usb_tx_buf;
- enum vmk80xx_model model;
-};
-
-static void vmk80xx_do_bulk_msg(struct comedi_device *dev)
-{
- struct vmk80xx_private *devpriv = dev->private;
- struct usb_device *usb = comedi_to_usb_dev(dev);
- __u8 tx_addr;
- __u8 rx_addr;
- unsigned int tx_pipe;
- unsigned int rx_pipe;
- size_t size;
-
- tx_addr = devpriv->ep_tx->bEndpointAddress;
- rx_addr = devpriv->ep_rx->bEndpointAddress;
- tx_pipe = usb_sndbulkpipe(usb, tx_addr);
- rx_pipe = usb_rcvbulkpipe(usb, rx_addr);
-
- /*
- * The max packet size attributes of the K8061
- * input/output endpoints are identical
- */
- size = le16_to_cpu(devpriv->ep_tx->wMaxPacketSize);
-
- usb_bulk_msg(usb, tx_pipe, devpriv->usb_tx_buf,
- size, NULL, devpriv->ep_tx->bInterval);
- usb_bulk_msg(usb, rx_pipe, devpriv->usb_rx_buf, size, NULL, HZ * 10);
-}
-
-static int vmk80xx_read_packet(struct comedi_device *dev)
-{
- struct vmk80xx_private *devpriv = dev->private;
- struct usb_device *usb = comedi_to_usb_dev(dev);
- struct usb_endpoint_descriptor *ep;
- unsigned int pipe;
-
- if (devpriv->model == VMK8061_MODEL) {
- vmk80xx_do_bulk_msg(dev);
- return 0;
- }
-
- ep = devpriv->ep_rx;
- pipe = usb_rcvintpipe(usb, ep->bEndpointAddress);
- return usb_interrupt_msg(usb, pipe, devpriv->usb_rx_buf,
- le16_to_cpu(ep->wMaxPacketSize), NULL,
- HZ * 10);
-}
-
-static int vmk80xx_write_packet(struct comedi_device *dev, int cmd)
-{
- struct vmk80xx_private *devpriv = dev->private;
- struct usb_device *usb = comedi_to_usb_dev(dev);
- struct usb_endpoint_descriptor *ep;
- unsigned int pipe;
-
- devpriv->usb_tx_buf[0] = cmd;
-
- if (devpriv->model == VMK8061_MODEL) {
- vmk80xx_do_bulk_msg(dev);
- return 0;
- }
-
- ep = devpriv->ep_tx;
- pipe = usb_sndintpipe(usb, ep->bEndpointAddress);
- return usb_interrupt_msg(usb, pipe, devpriv->usb_tx_buf,
- le16_to_cpu(ep->wMaxPacketSize), NULL,
- HZ * 10);
-}
-
-static int vmk80xx_reset_device(struct comedi_device *dev)
-{
- struct vmk80xx_private *devpriv = dev->private;
- size_t size;
- int retval;
-
- size = le16_to_cpu(devpriv->ep_tx->wMaxPacketSize);
- memset(devpriv->usb_tx_buf, 0, size);
- retval = vmk80xx_write_packet(dev, VMK8055_CMD_RST);
- if (retval)
- return retval;
- /* set outputs to known state as we cannot read them */
- return vmk80xx_write_packet(dev, VMK8055_CMD_WRT_AD);
-}
-
-static int vmk80xx_ai_insn_read(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct vmk80xx_private *devpriv = dev->private;
- int chan;
- int reg[2];
- int n;
-
- down(&devpriv->limit_sem);
- chan = CR_CHAN(insn->chanspec);
-
- switch (devpriv->model) {
- case VMK8055_MODEL:
- if (!chan)
- reg[0] = VMK8055_AI1_REG;
- else
- reg[0] = VMK8055_AI2_REG;
- break;
- case VMK8061_MODEL:
- default:
- reg[0] = VMK8061_AI_REG1;
- reg[1] = VMK8061_AI_REG2;
- devpriv->usb_tx_buf[0] = VMK8061_CMD_RD_AI;
- devpriv->usb_tx_buf[VMK8061_CH_REG] = chan;
- break;
- }
-
- for (n = 0; n < insn->n; n++) {
- if (vmk80xx_read_packet(dev))
- break;
-
- if (devpriv->model == VMK8055_MODEL) {
- data[n] = devpriv->usb_rx_buf[reg[0]];
- continue;
- }
-
- /* VMK8061_MODEL */
- data[n] = devpriv->usb_rx_buf[reg[0]] + 256 *
- devpriv->usb_rx_buf[reg[1]];
- }
-
- up(&devpriv->limit_sem);
-
- return n;
-}
-
-static int vmk80xx_ao_insn_write(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct vmk80xx_private *devpriv = dev->private;
- int chan;
- int cmd;
- int reg;
- int n;
-
- down(&devpriv->limit_sem);
- chan = CR_CHAN(insn->chanspec);
-
- switch (devpriv->model) {
- case VMK8055_MODEL:
- cmd = VMK8055_CMD_WRT_AD;
- if (!chan)
- reg = VMK8055_AO1_REG;
- else
- reg = VMK8055_AO2_REG;
- break;
- default: /* NOTE: avoid compiler warnings */
- cmd = VMK8061_CMD_SET_AO;
- reg = VMK8061_AO_REG;
- devpriv->usb_tx_buf[VMK8061_CH_REG] = chan;
- break;
- }
-
- for (n = 0; n < insn->n; n++) {
- devpriv->usb_tx_buf[reg] = data[n];
-
- if (vmk80xx_write_packet(dev, cmd))
- break;
- }
-
- up(&devpriv->limit_sem);
-
- return n;
-}
-
-static int vmk80xx_ao_insn_read(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct vmk80xx_private *devpriv = dev->private;
- int chan;
- int reg;
- int n;
-
- down(&devpriv->limit_sem);
- chan = CR_CHAN(insn->chanspec);
-
- reg = VMK8061_AO_REG - 1;
-
- devpriv->usb_tx_buf[0] = VMK8061_CMD_RD_AO;
-
- for (n = 0; n < insn->n; n++) {
- if (vmk80xx_read_packet(dev))
- break;
-
- data[n] = devpriv->usb_rx_buf[reg + chan];
- }
-
- up(&devpriv->limit_sem);
-
- return n;
-}
-
-static int vmk80xx_di_insn_bits(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct vmk80xx_private *devpriv = dev->private;
- unsigned char *rx_buf;
- int reg;
- int retval;
-
- down(&devpriv->limit_sem);
-
- rx_buf = devpriv->usb_rx_buf;
-
- if (devpriv->model == VMK8061_MODEL) {
- reg = VMK8061_DI_REG;
- devpriv->usb_tx_buf[0] = VMK8061_CMD_RD_DI;
- } else {
- reg = VMK8055_DI_REG;
- }
-
- retval = vmk80xx_read_packet(dev);
-
- if (!retval) {
- if (devpriv->model == VMK8055_MODEL)
- data[1] = (((rx_buf[reg] >> 4) & 0x03) |
- ((rx_buf[reg] << 2) & 0x04) |
- ((rx_buf[reg] >> 3) & 0x18));
- else
- data[1] = rx_buf[reg];
-
- retval = 2;
- }
-
- up(&devpriv->limit_sem);
-
- return retval;
-}
-
-static int vmk80xx_do_insn_bits(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct vmk80xx_private *devpriv = dev->private;
- unsigned char *rx_buf = devpriv->usb_rx_buf;
- unsigned char *tx_buf = devpriv->usb_tx_buf;
- int reg, cmd;
- int ret = 0;
-
- if (devpriv->model == VMK8061_MODEL) {
- reg = VMK8061_DO_REG;
- cmd = VMK8061_CMD_DO;
- } else { /* VMK8055_MODEL */
- reg = VMK8055_DO_REG;
- cmd = VMK8055_CMD_WRT_AD;
- }
-
- down(&devpriv->limit_sem);
-
- if (comedi_dio_update_state(s, data)) {
- tx_buf[reg] = s->state;
- ret = vmk80xx_write_packet(dev, cmd);
- if (ret)
- goto out;
- }
-
- if (devpriv->model == VMK8061_MODEL) {
- tx_buf[0] = VMK8061_CMD_RD_DO;
- ret = vmk80xx_read_packet(dev);
- if (ret)
- goto out;
- data[1] = rx_buf[reg];
- } else {
- data[1] = s->state;
- }
-
-out:
- up(&devpriv->limit_sem);
-
- return ret ? ret : insn->n;
-}
-
-static int vmk80xx_cnt_insn_read(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct vmk80xx_private *devpriv = dev->private;
- int chan;
- int reg[2];
- int n;
-
- down(&devpriv->limit_sem);
- chan = CR_CHAN(insn->chanspec);
-
- switch (devpriv->model) {
- case VMK8055_MODEL:
- if (!chan)
- reg[0] = VMK8055_CNT1_REG;
- else
- reg[0] = VMK8055_CNT2_REG;
- break;
- case VMK8061_MODEL:
- default:
- reg[0] = VMK8061_CNT_REG;
- reg[1] = VMK8061_CNT_REG;
- devpriv->usb_tx_buf[0] = VMK8061_CMD_RD_CNT;
- break;
- }
-
- for (n = 0; n < insn->n; n++) {
- if (vmk80xx_read_packet(dev))
- break;
-
- if (devpriv->model == VMK8055_MODEL)
- data[n] = devpriv->usb_rx_buf[reg[0]];
- else /* VMK8061_MODEL */
- data[n] = devpriv->usb_rx_buf[reg[0] * (chan + 1) + 1]
- + 256 * devpriv->usb_rx_buf[reg[1] * 2 + 2];
- }
-
- up(&devpriv->limit_sem);
-
- return n;
-}
-
-static int vmk80xx_cnt_insn_config(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct vmk80xx_private *devpriv = dev->private;
- unsigned int chan = CR_CHAN(insn->chanspec);
- int cmd;
- int reg;
- int ret;
-
- down(&devpriv->limit_sem);
- switch (data[0]) {
- case INSN_CONFIG_RESET:
- if (devpriv->model == VMK8055_MODEL) {
- if (!chan) {
- cmd = VMK8055_CMD_RST_CNT1;
- reg = VMK8055_CNT1_REG;
- } else {
- cmd = VMK8055_CMD_RST_CNT2;
- reg = VMK8055_CNT2_REG;
- }
- devpriv->usb_tx_buf[reg] = 0x00;
- } else {
- cmd = VMK8061_CMD_RST_CNT;
- }
- ret = vmk80xx_write_packet(dev, cmd);
- break;
- default:
- ret = -EINVAL;
- break;
- }
- up(&devpriv->limit_sem);
-
- return ret ? ret : insn->n;
-}
-
-static int vmk80xx_cnt_insn_write(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct vmk80xx_private *devpriv = dev->private;
- unsigned long debtime;
- unsigned long val;
- int chan;
- int cmd;
- int n;
-
- down(&devpriv->limit_sem);
- chan = CR_CHAN(insn->chanspec);
-
- if (!chan)
- cmd = VMK8055_CMD_DEB1_TIME;
- else
- cmd = VMK8055_CMD_DEB2_TIME;
-
- for (n = 0; n < insn->n; n++) {
- debtime = data[n];
- if (debtime == 0)
- debtime = 1;
-
- /* TODO: Prevent overflows */
- if (debtime > 7450)
- debtime = 7450;
-
- val = int_sqrt(debtime * 1000 / 115);
- if (((val + 1) * val) < debtime * 1000 / 115)
- val += 1;
-
- devpriv->usb_tx_buf[6 + chan] = val;
-
- if (vmk80xx_write_packet(dev, cmd))
- break;
- }
-
- up(&devpriv->limit_sem);
-
- return n;
-}
-
-static int vmk80xx_pwm_insn_read(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct vmk80xx_private *devpriv = dev->private;
- unsigned char *tx_buf;
- unsigned char *rx_buf;
- int reg[2];
- int n;
-
- down(&devpriv->limit_sem);
-
- tx_buf = devpriv->usb_tx_buf;
- rx_buf = devpriv->usb_rx_buf;
-
- reg[0] = VMK8061_PWM_REG1;
- reg[1] = VMK8061_PWM_REG2;
-
- tx_buf[0] = VMK8061_CMD_RD_PWM;
-
- for (n = 0; n < insn->n; n++) {
- if (vmk80xx_read_packet(dev))
- break;
-
- data[n] = rx_buf[reg[0]] + 4 * rx_buf[reg[1]];
- }
-
- up(&devpriv->limit_sem);
-
- return n;
-}
-
-static int vmk80xx_pwm_insn_write(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct vmk80xx_private *devpriv = dev->private;
- unsigned char *tx_buf;
- int reg[2];
- int cmd;
- int n;
-
- down(&devpriv->limit_sem);
-
- tx_buf = devpriv->usb_tx_buf;
-
- reg[0] = VMK8061_PWM_REG1;
- reg[1] = VMK8061_PWM_REG2;
-
- cmd = VMK8061_CMD_OUT_PWM;
-
- /*
- * The followin piece of code was translated from the inline
- * assembler code in the DLL source code.
- *
- * asm
- * mov eax, k ; k is the value (data[n])
- * and al, 03h ; al are the lower 8 bits of eax
- * mov lo, al ; lo is the low part (tx_buf[reg[0]])
- * mov eax, k
- * shr eax, 2 ; right shift eax register by 2
- * mov hi, al ; hi is the high part (tx_buf[reg[1]])
- * end;
- */
- for (n = 0; n < insn->n; n++) {
- tx_buf[reg[0]] = (unsigned char)(data[n] & 0x03);
- tx_buf[reg[1]] = (unsigned char)(data[n] >> 2) & 0xff;
-
- if (vmk80xx_write_packet(dev, cmd))
- break;
- }
-
- up(&devpriv->limit_sem);
-
- return n;
-}
-
-static int vmk80xx_find_usb_endpoints(struct comedi_device *dev)
-{
- struct vmk80xx_private *devpriv = dev->private;
- struct usb_interface *intf = comedi_to_usb_interface(dev);
- struct usb_host_interface *iface_desc = intf->cur_altsetting;
- struct usb_endpoint_descriptor *ep_desc;
- int i;
-
- if (iface_desc->desc.bNumEndpoints != 2)
- return -ENODEV;
-
- for (i = 0; i < iface_desc->desc.bNumEndpoints; i++) {
- ep_desc = &iface_desc->endpoint[i].desc;
-
- if (usb_endpoint_is_int_in(ep_desc) ||
- usb_endpoint_is_bulk_in(ep_desc)) {
- if (!devpriv->ep_rx)
- devpriv->ep_rx = ep_desc;
- continue;
- }
-
- if (usb_endpoint_is_int_out(ep_desc) ||
- usb_endpoint_is_bulk_out(ep_desc)) {
- if (!devpriv->ep_tx)
- devpriv->ep_tx = ep_desc;
- continue;
- }
- }
-
- if (!devpriv->ep_rx || !devpriv->ep_tx)
- return -ENODEV;
-
- return 0;
-}
-
-static int vmk80xx_alloc_usb_buffers(struct comedi_device *dev)
-{
- struct vmk80xx_private *devpriv = dev->private;
- size_t size;
-
- size = le16_to_cpu(devpriv->ep_rx->wMaxPacketSize);
- devpriv->usb_rx_buf = kzalloc(size, GFP_KERNEL);
- if (!devpriv->usb_rx_buf)
- return -ENOMEM;
-
- size = le16_to_cpu(devpriv->ep_tx->wMaxPacketSize);
- devpriv->usb_tx_buf = kzalloc(size, GFP_KERNEL);
- if (!devpriv->usb_tx_buf) {
- kfree(devpriv->usb_rx_buf);
- return -ENOMEM;
- }
-
- return 0;
-}
-
-static int vmk80xx_init_subdevices(struct comedi_device *dev)
-{
- const struct vmk80xx_board *board = dev->board_ptr;
- struct vmk80xx_private *devpriv = dev->private;
- struct comedi_subdevice *s;
- int n_subd;
- int ret;
-
- down(&devpriv->limit_sem);
-
- if (devpriv->model == VMK8055_MODEL)
- n_subd = 5;
- else
- n_subd = 6;
- ret = comedi_alloc_subdevices(dev, n_subd);
- if (ret) {
- up(&devpriv->limit_sem);
- return ret;
- }
-
- /* Analog input subdevice */
- s = &dev->subdevices[0];
- s->type = COMEDI_SUBD_AI;
- s->subdev_flags = SDF_READABLE | SDF_GROUND;
- s->n_chan = board->ai_nchans;
- s->maxdata = board->ai_maxdata;
- s->range_table = board->range;
- s->insn_read = vmk80xx_ai_insn_read;
-
- /* Analog output subdevice */
- s = &dev->subdevices[1];
- s->type = COMEDI_SUBD_AO;
- s->subdev_flags = SDF_WRITABLE | SDF_GROUND;
- s->n_chan = board->ao_nchans;
- s->maxdata = 0x00ff;
- s->range_table = board->range;
- s->insn_write = vmk80xx_ao_insn_write;
- if (devpriv->model == VMK8061_MODEL) {
- s->subdev_flags |= SDF_READABLE;
- s->insn_read = vmk80xx_ao_insn_read;
- }
-
- /* Digital input subdevice */
- s = &dev->subdevices[2];
- s->type = COMEDI_SUBD_DI;
- s->subdev_flags = SDF_READABLE;
- s->n_chan = board->di_nchans;
- s->maxdata = 1;
- s->range_table = &range_digital;
- s->insn_bits = vmk80xx_di_insn_bits;
-
- /* Digital output subdevice */
- s = &dev->subdevices[3];
- s->type = COMEDI_SUBD_DO;
- s->subdev_flags = SDF_WRITABLE;
- s->n_chan = 8;
- s->maxdata = 1;
- s->range_table = &range_digital;
- s->insn_bits = vmk80xx_do_insn_bits;
-
- /* Counter subdevice */
- s = &dev->subdevices[4];
- s->type = COMEDI_SUBD_COUNTER;
- s->subdev_flags = SDF_READABLE;
- s->n_chan = 2;
- s->maxdata = board->cnt_maxdata;
- s->insn_read = vmk80xx_cnt_insn_read;
- s->insn_config = vmk80xx_cnt_insn_config;
- if (devpriv->model == VMK8055_MODEL) {
- s->subdev_flags |= SDF_WRITABLE;
- s->insn_write = vmk80xx_cnt_insn_write;
- }
-
- /* PWM subdevice */
- if (devpriv->model == VMK8061_MODEL) {
- s = &dev->subdevices[5];
- s->type = COMEDI_SUBD_PWM;
- s->subdev_flags = SDF_READABLE | SDF_WRITABLE;
- s->n_chan = board->pwm_nchans;
- s->maxdata = board->pwm_maxdata;
- s->insn_read = vmk80xx_pwm_insn_read;
- s->insn_write = vmk80xx_pwm_insn_write;
- }
-
- up(&devpriv->limit_sem);
-
- return 0;
-}
-
-static int vmk80xx_auto_attach(struct comedi_device *dev,
- unsigned long context)
-{
- struct usb_interface *intf = comedi_to_usb_interface(dev);
- const struct vmk80xx_board *board = NULL;
- struct vmk80xx_private *devpriv;
- int ret;
-
- if (context < ARRAY_SIZE(vmk80xx_boardinfo))
- board = &vmk80xx_boardinfo[context];
- if (!board)
- return -ENODEV;
- dev->board_ptr = board;
- dev->board_name = board->name;
-
- devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
- if (!devpriv)
- return -ENOMEM;
-
- devpriv->model = board->model;
-
- ret = vmk80xx_find_usb_endpoints(dev);
- if (ret)
- return ret;
-
- ret = vmk80xx_alloc_usb_buffers(dev);
- if (ret)
- return ret;
-
- sema_init(&devpriv->limit_sem, 8);
-
- usb_set_intfdata(intf, devpriv);
-
- if (devpriv->model == VMK8055_MODEL)
- vmk80xx_reset_device(dev);
-
- return vmk80xx_init_subdevices(dev);
-}
-
-static void vmk80xx_detach(struct comedi_device *dev)
-{
- struct usb_interface *intf = comedi_to_usb_interface(dev);
- struct vmk80xx_private *devpriv = dev->private;
-
- if (!devpriv)
- return;
-
- down(&devpriv->limit_sem);
-
- usb_set_intfdata(intf, NULL);
-
- kfree(devpriv->usb_rx_buf);
- kfree(devpriv->usb_tx_buf);
-
- up(&devpriv->limit_sem);
-}
-
-static struct comedi_driver vmk80xx_driver = {
- .module = THIS_MODULE,
- .driver_name = "vmk80xx",
- .auto_attach = vmk80xx_auto_attach,
- .detach = vmk80xx_detach,
-};
-
-static int vmk80xx_usb_probe(struct usb_interface *intf,
- const struct usb_device_id *id)
-{
- return comedi_usb_auto_config(intf, &vmk80xx_driver, id->driver_info);
-}
-
-static const struct usb_device_id vmk80xx_usb_id_table[] = {
- { USB_DEVICE(0x10cf, 0x5500), .driver_info = DEVICE_VMK8055 },
- { USB_DEVICE(0x10cf, 0x5501), .driver_info = DEVICE_VMK8055 },
- { USB_DEVICE(0x10cf, 0x5502), .driver_info = DEVICE_VMK8055 },
- { USB_DEVICE(0x10cf, 0x5503), .driver_info = DEVICE_VMK8055 },
- { USB_DEVICE(0x10cf, 0x8061), .driver_info = DEVICE_VMK8061 },
- { USB_DEVICE(0x10cf, 0x8062), .driver_info = DEVICE_VMK8061 },
- { USB_DEVICE(0x10cf, 0x8063), .driver_info = DEVICE_VMK8061 },
- { USB_DEVICE(0x10cf, 0x8064), .driver_info = DEVICE_VMK8061 },
- { USB_DEVICE(0x10cf, 0x8065), .driver_info = DEVICE_VMK8061 },
- { USB_DEVICE(0x10cf, 0x8066), .driver_info = DEVICE_VMK8061 },
- { USB_DEVICE(0x10cf, 0x8067), .driver_info = DEVICE_VMK8061 },
- { USB_DEVICE(0x10cf, 0x8068), .driver_info = DEVICE_VMK8061 },
- { }
-};
-MODULE_DEVICE_TABLE(usb, vmk80xx_usb_id_table);
-
-static struct usb_driver vmk80xx_usb_driver = {
- .name = "vmk80xx",
- .id_table = vmk80xx_usb_id_table,
- .probe = vmk80xx_usb_probe,
- .disconnect = comedi_usb_auto_unconfig,
-};
-module_comedi_usb_driver(vmk80xx_driver, vmk80xx_usb_driver);
-
-MODULE_AUTHOR("Manuel Gebele <forensixs@gmx.de>");
-MODULE_DESCRIPTION("Velleman USB Board Low-Level Driver");
-MODULE_SUPPORTED_DEVICE("K8055/K8061 aka VM110/VM140");
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/z8536.h b/drivers/staging/comedi/drivers/z8536.h
deleted file mode 100644
index 7be53109cc8d..000000000000
--- a/drivers/staging/comedi/drivers/z8536.h
+++ /dev/null
@@ -1,202 +0,0 @@
-/*
- * Z8536 CIO Internal registers
- */
-
-#ifndef _Z8536_H
-#define _Z8536_H
-
-/* Master Interrupt Control register */
-#define Z8536_INT_CTRL_REG 0x00
-#define Z8536_INT_CTRL_MIE BIT(7) /* Master Interrupt Enable */
-#define Z8536_INT_CTRL_DLC BIT(6) /* Disable Lower Chain */
-#define Z8536_INT_CTRL_NV BIT(5) /* No Vector */
-#define Z8536_INT_CTRL_PA_VIS BIT(4) /* Port A Vect Inc Status */
-#define Z8536_INT_CTRL_PB_VIS BIT(3) /* Port B Vect Inc Status */
-#define Z8536_INT_CTRL_VT_VIS BIT(2) /* C/T Vect Inc Status */
-#define Z8536_INT_CTRL_RJA BIT(1) /* Right Justified Addresses */
-#define Z8536_INT_CTRL_RESET BIT(0) /* Reset */
-
-/* Master Configuration Control register */
-#define Z8536_CFG_CTRL_REG 0x01
-#define Z8536_CFG_CTRL_PBE BIT(7) /* Port B Enable */
-#define Z8536_CFG_CTRL_CT1E BIT(6) /* C/T 1 Enable */
-#define Z8536_CFG_CTRL_CT2E BIT(5) /* C/T 2 Enable */
-#define Z8536_CFG_CTRL_PCE_CT3E BIT(4) /* Port C & C/T 3 Enable */
-#define Z8536_CFG_CTRL_PLC BIT(3) /* Port A/B Link Control */
-#define Z8536_CFG_CTRL_PAE BIT(2) /* Port A Enable */
-#define Z8536_CFG_CTRL_LC_INDEP (0 << 0)/* C/Ts Independent */
-#define Z8536_CFG_CTRL_LC_GATE (1 << 0)/* C/T 1 Out Gates C/T 2 */
-#define Z8536_CFG_CTRL_LC_TRIG (2 << 0)/* C/T 1 Out Triggers C/T 2 */
-#define Z8536_CFG_CTRL_LC_CLK (3 << 0)/* C/T 1 Out Clocks C/T 2 */
-#define Z8536_CFG_CTRL_LC_MASK (3 << 0)/* C/T Link Control mask */
-
-/* Interrupt Vector registers */
-#define Z8536_PA_INT_VECT_REG 0x02
-#define Z8536_PB_INT_VECT_REG 0x03
-#define Z8536_CT_INT_VECT_REG 0x04
-#define Z8536_CURR_INT_VECT_REG 0x1f
-
-/* Port A/B & Counter/Timer 1/2/3 Command and Status registers */
-#define Z8536_PA_CMDSTAT_REG 0x08
-#define Z8536_PB_CMDSTAT_REG 0x09
-#define Z8536_CT1_CMDSTAT_REG 0x0a
-#define Z8536_CT2_CMDSTAT_REG 0x0b
-#define Z8536_CT3_CMDSTAT_REG 0x0c
-#define Z8536_CT_CMDSTAT_REG(x) (0x0a + (x))
-#define Z8536_CMD_NULL (0 << 5)/* Null Code */
-#define Z8536_CMD_CLR_IP_IUS (1 << 5)/* Clear IP & IUS */
-#define Z8536_CMD_SET_IUS (2 << 5)/* Set IUS */
-#define Z8536_CMD_CLR_IUS (3 << 5)/* Clear IUS */
-#define Z8536_CMD_SET_IP (4 << 5)/* Set IP */
-#define Z8536_CMD_CLR_IP (5 << 5)/* Clear IP */
-#define Z8536_CMD_SET_IE (6 << 5)/* Set IE */
-#define Z8536_CMD_CLR_IE (7 << 5)/* Clear IE */
-#define Z8536_CMD_MASK (7 << 5)
-
-#define Z8536_STAT_IUS BIT(7) /* Interrupt Under Service */
-#define Z8536_STAT_IE BIT(6) /* Interrupt Enable */
-#define Z8536_STAT_IP BIT(5) /* Interrupt Pending */
-#define Z8536_STAT_ERR BIT(4) /* Interrupt Error */
-#define Z8536_STAT_IE_IP (Z8536_STAT_IE | Z8536_STAT_IP)
-
-#define Z8536_PAB_STAT_ORE BIT(3) /* Output Register Empty */
-#define Z8536_PAB_STAT_IRF BIT(2) /* Input Register Full */
-#define Z8536_PAB_STAT_PMF BIT(1) /* Pattern Match Flag */
-#define Z8536_PAB_CMDSTAT_IOE BIT(0) /* Interrupt On Error */
-
-#define Z8536_CT_CMD_RCC BIT(3) /* Read Counter Control */
-#define Z8536_CT_CMDSTAT_GCB BIT(2) /* Gate Command Bit */
-#define Z8536_CT_CMD_TCB BIT(1) /* Trigger Command Bit */
-#define Z8536_CT_STAT_CIP BIT(0) /* Count In Progress */
-
-/* Port Data registers */
-#define Z8536_PA_DATA_REG 0x0d
-#define Z8536_PB_DATA_REG 0x0e
-#define Z8536_PC_DATA_REG 0x0f
-
-/* Counter/Timer 1/2/3 Current Count registers */
-#define Z8536_CT1_VAL_MSB_REG 0x10
-#define Z8536_CT1_VAL_LSB_REG 0x11
-#define Z8536_CT2_VAL_MSB_REG 0x12
-#define Z8536_CT2_VAL_LSB_REG 0x13
-#define Z8536_CT3_VAL_MSB_REG 0x14
-#define Z8536_CT3_VAL_LSB_REG 0x15
-#define Z8536_CT_VAL_MSB_REG(x) (0x10 + ((x) * 2))
-#define Z8536_CT_VAL_LSB_REG(x) (0x11 + ((x) * 2))
-
-/* Counter/Timer 1/2/3 Time Constant registers */
-#define Z8536_CT1_RELOAD_MSB_REG 0x16
-#define Z8536_CT1_RELOAD_LSB_REG 0x17
-#define Z8536_CT2_RELOAD_MSB_REG 0x18
-#define Z8536_CT2_RELOAD_LSB_REG 0x19
-#define Z8536_CT3_RELOAD_MSB_REG 0x1a
-#define Z8536_CT3_RELOAD_LSB_REG 0x1b
-#define Z8536_CT_RELOAD_MSB_REG(x) (0x16 + ((x) * 2))
-#define Z8536_CT_RELOAD_LSB_REG(x) (0x17 + ((x) * 2))
-
-/* Counter/Timer 1/2/3 Mode Specification registers */
-#define Z8536_CT1_MODE_REG 0x1c
-#define Z8536_CT2_MODE_REG 0x1d
-#define Z8536_CT3_MODE_REG 0x1e
-#define Z8536_CT_MODE_REG(x) (0x1c + (x))
-#define Z8536_CT_MODE_CSC BIT(7) /* Continuous/Single Cycle */
-#define Z8536_CT_MODE_EOE BIT(6) /* External Output Enable */
-#define Z8536_CT_MODE_ECE BIT(5) /* External Count Enable */
-#define Z8536_CT_MODE_ETE BIT(4) /* External Trigger Enable */
-#define Z8536_CT_MODE_EGE BIT(3) /* External Gate Enable */
-#define Z8536_CT_MODE_REB BIT(2) /* Retrigger Enable Bit */
-#define Z8536_CT_MODE_DCS_PULSE (0 << 0)/* Duty Cycle - Pulse */
-#define Z8536_CT_MODE_DCS_ONESHOT (1 << 0)/* Duty Cycle - One-Shot */
-#define Z8536_CT_MODE_DCS_SQRWAVE (2 << 0)/* Duty Cycle - Square Wave */
-#define Z8536_CT_MODE_DCS_DO_NOT_USE (3 << 0)/* Duty Cycle - Do Not Use */
-#define Z8536_CT_MODE_DCS_MASK (3 << 0)/* Duty Cycle mask */
-
-/* Port A/B Mode Specification registers */
-#define Z8536_PA_MODE_REG 0x20
-#define Z8536_PB_MODE_REG 0x28
-#define Z8536_PAB_MODE_PTS_BIT (0 << 6)/* Bit Port */
-#define Z8536_PAB_MODE_PTS_INPUT (1 << 6)/* Input Port */
-#define Z8536_PAB_MODE_PTS_OUTPUT (2 << 6)/* Output Port */
-#define Z8536_PAB_MODE_PTS_BIDIR (3 << 6)/* Bidirectional Port */
-#define Z8536_PAB_MODE_PTS_MASK (3 << 6)/* Port Type Select mask */
-#define Z8536_PAB_MODE_ITB BIT(5) /* Interrupt on Two Bytes */
-#define Z8536_PAB_MODE_SB BIT(4) /* Single Buffered mode */
-#define Z8536_PAB_MODE_IMO BIT(3) /* Interrupt on Match Only */
-#define Z8536_PAB_MODE_PMS_DISABLE (0 << 1)/* Disable Pattern Match */
-#define Z8536_PAB_MODE_PMS_AND (1 << 1)/* "AND" mode */
-#define Z8536_PAB_MODE_PMS_OR (2 << 1)/* "OR" mode */
-#define Z8536_PAB_MODE_PMS_OR_PEV (3 << 1)/* "OR-Priority" mode */
-#define Z8536_PAB_MODE_PMS_MASK (3 << 1)/* Pattern Mode mask */
-#define Z8536_PAB_MODE_LPM BIT(0) /* Latch on Pattern Match */
-#define Z8536_PAB_MODE_DTE BIT(0) /* Deskew Timer Enabled */
-
-/* Port A/B Handshake Specification registers */
-#define Z8536_PA_HANDSHAKE_REG 0x21
-#define Z8536_PB_HANDSHAKE_REG 0x29
-#define Z8536_PAB_HANDSHAKE_HST_INTER (0 << 6)/* Interlocked Handshake */
-#define Z8536_PAB_HANDSHAKE_HST_STROBED (1 << 6)/* Strobed Handshake */
-#define Z8536_PAB_HANDSHAKE_HST_PULSED (2 << 6)/* Pulsed Handshake */
-#define Z8536_PAB_HANDSHAKE_HST_3WIRE (3 << 6)/* Three-Wire Handshake */
-#define Z8536_PAB_HANDSHAKE_HST_MASK (3 << 6)/* Handshake Type mask */
-#define Z8536_PAB_HANDSHAKE_RWS_DISABLE (0 << 3)/* Req/Wait Disabled */
-#define Z8536_PAB_HANDSHAKE_RWS_OUTWAIT (1 << 3)/* Output Wait */
-#define Z8536_PAB_HANDSHAKE_RWS_INWAIT (3 << 3)/* Input Wait */
-#define Z8536_PAB_HANDSHAKE_RWS_SPREQ (4 << 3)/* Special Request */
-#define Z8536_PAB_HANDSHAKE_RWS_OUTREQ (5 << 4)/* Output Request */
-#define Z8536_PAB_HANDSHAKE_RWS_INREQ (7 << 3)/* Input Request */
-#define Z8536_PAB_HANDSHAKE_RWS_MASK (7 << 3)/* Req/Wait mask */
-#define Z8536_PAB_HANDSHAKE_DESKEW(x) ((x) << 0)/* Deskew Time */
-#define Z8536_PAB_HANDSHAKE_DESKEW_MASK (3 << 0)/* Deskew Time mask */
-
-/*
- * Port A/B/C Data Path Polarity registers
- *
- * 0 = Non-Inverting
- * 1 = Inverting
- */
-#define Z8536_PA_DPP_REG 0x22
-#define Z8536_PB_DPP_REG 0x2a
-#define Z8536_PC_DPP_REG 0x05
-
-/*
- * Port A/B/C Data Direction registers
- *
- * 0 = Output bit
- * 1 = Input bit
- */
-#define Z8536_PA_DD_REG 0x23
-#define Z8536_PB_DD_REG 0x2b
-#define Z8536_PC_DD_REG 0x06
-
-/*
- * Port A/B/C Special I/O Control registers
- *
- * 0 = Normal Input or Output
- * 1 = Output with open drain or Input with 1's catcher
- */
-#define Z8536_PA_SIO_REG 0x24
-#define Z8536_PB_SIO_REG 0x2c
-#define Z8536_PC_SIO_REG 0x07
-
-/*
- * Port A/B Pattern Polarity/Transition/Mask registers
- *
- * PM PT PP Pattern Specification
- * -- -- -- -------------------------------------
- * 0 0 x Bit masked off
- * 0 1 x Any transition
- * 1 0 0 Zero (low-level)
- * 1 0 1 One (high-level)
- * 1 1 0 One-to-zero transition (falling-edge)
- * 1 1 1 Zero-to-one transition (rising-edge)
- */
-#define Z8536_PA_PP_REG 0x25
-#define Z8536_PB_PP_REG 0x2d
-
-#define Z8536_PA_PT_REG 0x26
-#define Z8536_PB_PT_REG 0x2e
-
-#define Z8536_PA_PM_REG 0x27
-#define Z8536_PB_PM_REG 0x2f
-
-#endif /* _Z8536_H */
diff --git a/drivers/staging/comedi/kcomedilib/Makefile b/drivers/staging/comedi/kcomedilib/Makefile
deleted file mode 100644
index 3aff8ed08e2d..000000000000
--- a/drivers/staging/comedi/kcomedilib/Makefile
+++ /dev/null
@@ -1,5 +0,0 @@
-ccflags-$(CONFIG_COMEDI_DEBUG) := -DDEBUG
-
-obj-$(CONFIG_COMEDI_KCOMEDILIB) += kcomedilib.o
-
-kcomedilib-objs := kcomedilib_main.o
diff --git a/drivers/staging/comedi/kcomedilib/kcomedilib_main.c b/drivers/staging/comedi/kcomedilib/kcomedilib_main.c
deleted file mode 100644
index d0a8a28edd36..000000000000
--- a/drivers/staging/comedi/kcomedilib/kcomedilib_main.c
+++ /dev/null
@@ -1,252 +0,0 @@
-/*
- * kcomedilib/kcomedilib.c
- * a comedlib interface for kernel modules
- *
- * COMEDI - Linux Control and Measurement Device Interface
- * Copyright (C) 1997-2000 David A. Schleef <ds@schleef.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#include <linux/module.h>
-
-#include <linux/errno.h>
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/fcntl.h>
-#include <linux/mm.h>
-#include <linux/io.h>
-
-#include "../comedi.h"
-#include "../comedilib.h"
-#include "../comedidev.h"
-
-MODULE_AUTHOR("David Schleef <ds@schleef.org>");
-MODULE_DESCRIPTION("Comedi kernel library");
-MODULE_LICENSE("GPL");
-
-struct comedi_device *comedi_open(const char *filename)
-{
- struct comedi_device *dev, *retval = NULL;
- unsigned int minor;
-
- if (strncmp(filename, "/dev/comedi", 11) != 0)
- return NULL;
-
- if (kstrtouint(filename + 11, 0, &minor))
- return NULL;
-
- if (minor >= COMEDI_NUM_BOARD_MINORS)
- return NULL;
-
- dev = comedi_dev_get_from_minor(minor);
- if (!dev)
- return NULL;
-
- down_read(&dev->attach_lock);
- if (dev->attached)
- retval = dev;
- else
- retval = NULL;
- up_read(&dev->attach_lock);
-
- if (!retval)
- comedi_dev_put(dev);
-
- return retval;
-}
-EXPORT_SYMBOL_GPL(comedi_open);
-
-int comedi_close(struct comedi_device *dev)
-{
- comedi_dev_put(dev);
- return 0;
-}
-EXPORT_SYMBOL_GPL(comedi_close);
-
-static int comedi_do_insn(struct comedi_device *dev,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct comedi_subdevice *s;
- int ret;
-
- mutex_lock(&dev->mutex);
-
- if (!dev->attached) {
- ret = -EINVAL;
- goto error;
- }
-
- /* a subdevice instruction */
- if (insn->subdev >= dev->n_subdevices) {
- ret = -EINVAL;
- goto error;
- }
- s = &dev->subdevices[insn->subdev];
-
- if (s->type == COMEDI_SUBD_UNUSED) {
- dev_err(dev->class_dev,
- "%d not usable subdevice\n", insn->subdev);
- ret = -EIO;
- goto error;
- }
-
- /* XXX check lock */
-
- ret = comedi_check_chanlist(s, 1, &insn->chanspec);
- if (ret < 0) {
- dev_err(dev->class_dev, "bad chanspec\n");
- ret = -EINVAL;
- goto error;
- }
-
- if (s->busy) {
- ret = -EBUSY;
- goto error;
- }
- s->busy = dev;
-
- switch (insn->insn) {
- case INSN_BITS:
- ret = s->insn_bits(dev, s, insn, data);
- break;
- case INSN_CONFIG:
- /* XXX should check instruction length */
- ret = s->insn_config(dev, s, insn, data);
- break;
- default:
- ret = -EINVAL;
- break;
- }
-
- s->busy = NULL;
-error:
-
- mutex_unlock(&dev->mutex);
- return ret;
-}
-
-int comedi_dio_get_config(struct comedi_device *dev, unsigned int subdev,
- unsigned int chan, unsigned int *io)
-{
- struct comedi_insn insn;
- unsigned int data[2];
- int ret;
-
- memset(&insn, 0, sizeof(insn));
- insn.insn = INSN_CONFIG;
- insn.n = 2;
- insn.subdev = subdev;
- insn.chanspec = CR_PACK(chan, 0, 0);
- data[0] = INSN_CONFIG_DIO_QUERY;
- data[1] = 0;
- ret = comedi_do_insn(dev, &insn, data);
- if (ret >= 0)
- *io = data[1];
- return ret;
-}
-EXPORT_SYMBOL_GPL(comedi_dio_get_config);
-
-int comedi_dio_config(struct comedi_device *dev, unsigned int subdev,
- unsigned int chan, unsigned int io)
-{
- struct comedi_insn insn;
-
- memset(&insn, 0, sizeof(insn));
- insn.insn = INSN_CONFIG;
- insn.n = 1;
- insn.subdev = subdev;
- insn.chanspec = CR_PACK(chan, 0, 0);
-
- return comedi_do_insn(dev, &insn, &io);
-}
-EXPORT_SYMBOL_GPL(comedi_dio_config);
-
-int comedi_dio_bitfield2(struct comedi_device *dev, unsigned int subdev,
- unsigned int mask, unsigned int *bits,
- unsigned int base_channel)
-{
- struct comedi_insn insn;
- unsigned int data[2];
- unsigned int n_chan;
- unsigned int shift;
- int ret;
-
- base_channel = CR_CHAN(base_channel);
- n_chan = comedi_get_n_channels(dev, subdev);
- if (base_channel >= n_chan)
- return -EINVAL;
-
- memset(&insn, 0, sizeof(insn));
- insn.insn = INSN_BITS;
- insn.chanspec = base_channel;
- insn.n = 2;
- insn.subdev = subdev;
-
- data[0] = mask;
- data[1] = *bits;
-
- /*
- * Most drivers ignore the base channel in insn->chanspec.
- * Fix this here if the subdevice has <= 32 channels.
- */
- if (n_chan <= 32) {
- shift = base_channel;
- if (shift) {
- insn.chanspec = 0;
- data[0] <<= shift;
- data[1] <<= shift;
- }
- } else {
- shift = 0;
- }
-
- ret = comedi_do_insn(dev, &insn, data);
- *bits = data[1] >> shift;
- return ret;
-}
-EXPORT_SYMBOL_GPL(comedi_dio_bitfield2);
-
-int comedi_find_subdevice_by_type(struct comedi_device *dev, int type,
- unsigned int subd)
-{
- struct comedi_subdevice *s;
- int ret = -ENODEV;
-
- down_read(&dev->attach_lock);
- if (dev->attached)
- for (; subd < dev->n_subdevices; subd++) {
- s = &dev->subdevices[subd];
- if (s->type == type) {
- ret = subd;
- break;
- }
- }
- up_read(&dev->attach_lock);
- return ret;
-}
-EXPORT_SYMBOL_GPL(comedi_find_subdevice_by_type);
-
-int comedi_get_n_channels(struct comedi_device *dev, unsigned int subdevice)
-{
- int n;
-
- down_read(&dev->attach_lock);
- if (!dev->attached || subdevice >= dev->n_subdevices)
- n = 0;
- else
- n = dev->subdevices[subdevice].n_chan;
- up_read(&dev->attach_lock);
-
- return n;
-}
-EXPORT_SYMBOL_GPL(comedi_get_n_channels);
diff --git a/drivers/staging/comedi/proc.c b/drivers/staging/comedi/proc.c
deleted file mode 100644
index 91dea25b5724..000000000000
--- a/drivers/staging/comedi/proc.c
+++ /dev/null
@@ -1,97 +0,0 @@
-/*
- * /proc interface for comedi
- *
- * COMEDI - Linux Control and Measurement Device Interface
- * Copyright (C) 1998 David A. Schleef <ds@schleef.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-/*
- * This is some serious bloatware.
- *
- * Taken from Dave A.'s PCL-711 driver, 'cuz I thought it
- * was cool.
- */
-
-#include "comedidev.h"
-#include "comedi_internal.h"
-#include <linux/proc_fs.h>
-#include <linux/seq_file.h>
-
-static int comedi_read(struct seq_file *m, void *v)
-{
- int i;
- int devices_q = 0;
- struct comedi_driver *driv;
-
- seq_printf(m, "comedi version " COMEDI_RELEASE "\nformat string: %s\n",
- "\"%2d: %-20s %-20s %4d\", i, driver_name, board_name, n_subdevices");
-
- for (i = 0; i < COMEDI_NUM_BOARD_MINORS; i++) {
- struct comedi_device *dev = comedi_dev_get_from_minor(i);
-
- if (!dev)
- continue;
-
- down_read(&dev->attach_lock);
- if (dev->attached) {
- devices_q = 1;
- seq_printf(m, "%2d: %-20s %-20s %4d\n",
- i, dev->driver->driver_name,
- dev->board_name, dev->n_subdevices);
- }
- up_read(&dev->attach_lock);
- comedi_dev_put(dev);
- }
- if (!devices_q)
- seq_puts(m, "no devices\n");
-
- mutex_lock(&comedi_drivers_list_lock);
- for (driv = comedi_drivers; driv; driv = driv->next) {
- seq_printf(m, "%s:\n", driv->driver_name);
- for (i = 0; i < driv->num_names; i++)
- seq_printf(m, " %s\n",
- *(char **)((char *)driv->board_name +
- i * driv->offset));
-
- if (!driv->num_names)
- seq_printf(m, " %s\n", driv->driver_name);
- }
- mutex_unlock(&comedi_drivers_list_lock);
-
- return 0;
-}
-
-/*
- * seq_file wrappers for procfile show routines.
- */
-static int comedi_proc_open(struct inode *inode, struct file *file)
-{
- return single_open(file, comedi_read, NULL);
-}
-
-static const struct file_operations comedi_proc_fops = {
- .open = comedi_proc_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
-void comedi_proc_init(void)
-{
- proc_create("comedi", 0644, NULL, &comedi_proc_fops);
-}
-
-void comedi_proc_cleanup(void)
-{
- remove_proc_entry("comedi", NULL);
-}
diff --git a/drivers/staging/comedi/range.c b/drivers/staging/comedi/range.c
deleted file mode 100644
index ce3a58a7a171..000000000000
--- a/drivers/staging/comedi/range.c
+++ /dev/null
@@ -1,143 +0,0 @@
-/*
- * comedi/range.c
- * comedi routines for voltage ranges
- *
- * COMEDI - Linux Control and Measurement Device Interface
- * Copyright (C) 1997-8 David A. Schleef <ds@schleef.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#include <linux/uaccess.h>
-#include "comedidev.h"
-#include "comedi_internal.h"
-
-const struct comedi_lrange range_bipolar10 = { 1, {BIP_RANGE(10)} };
-EXPORT_SYMBOL_GPL(range_bipolar10);
-const struct comedi_lrange range_bipolar5 = { 1, {BIP_RANGE(5)} };
-EXPORT_SYMBOL_GPL(range_bipolar5);
-const struct comedi_lrange range_bipolar2_5 = { 1, {BIP_RANGE(2.5)} };
-EXPORT_SYMBOL_GPL(range_bipolar2_5);
-const struct comedi_lrange range_unipolar10 = { 1, {UNI_RANGE(10)} };
-EXPORT_SYMBOL_GPL(range_unipolar10);
-const struct comedi_lrange range_unipolar5 = { 1, {UNI_RANGE(5)} };
-EXPORT_SYMBOL_GPL(range_unipolar5);
-const struct comedi_lrange range_unipolar2_5 = { 1, {UNI_RANGE(2.5)} };
-EXPORT_SYMBOL_GPL(range_unipolar2_5);
-const struct comedi_lrange range_0_20mA = { 1, {RANGE_mA(0, 20)} };
-EXPORT_SYMBOL_GPL(range_0_20mA);
-const struct comedi_lrange range_4_20mA = { 1, {RANGE_mA(4, 20)} };
-EXPORT_SYMBOL_GPL(range_4_20mA);
-const struct comedi_lrange range_0_32mA = { 1, {RANGE_mA(0, 32)} };
-EXPORT_SYMBOL_GPL(range_0_32mA);
-const struct comedi_lrange range_unknown = { 1, {{0, 1000000, UNIT_none} } };
-EXPORT_SYMBOL_GPL(range_unknown);
-
-/*
- * COMEDI_RANGEINFO ioctl
- * range information
- *
- * arg:
- * pointer to comedi_rangeinfo structure
- *
- * reads:
- * comedi_rangeinfo structure
- *
- * writes:
- * array of comedi_krange structures to rangeinfo->range_ptr pointer
- */
-int do_rangeinfo_ioctl(struct comedi_device *dev,
- struct comedi_rangeinfo __user *arg)
-{
- struct comedi_rangeinfo it;
- int subd, chan;
- const struct comedi_lrange *lr;
- struct comedi_subdevice *s;
-
- if (copy_from_user(&it, arg, sizeof(struct comedi_rangeinfo)))
- return -EFAULT;
- subd = (it.range_type >> 24) & 0xf;
- chan = (it.range_type >> 16) & 0xff;
-
- if (!dev->attached)
- return -EINVAL;
- if (subd >= dev->n_subdevices)
- return -EINVAL;
- s = &dev->subdevices[subd];
- if (s->range_table) {
- lr = s->range_table;
- } else if (s->range_table_list) {
- if (chan >= s->n_chan)
- return -EINVAL;
- lr = s->range_table_list[chan];
- } else {
- return -EINVAL;
- }
-
- if (RANGE_LENGTH(it.range_type) != lr->length) {
- dev_dbg(dev->class_dev,
- "wrong length %d should be %d (0x%08x)\n",
- RANGE_LENGTH(it.range_type),
- lr->length, it.range_type);
- return -EINVAL;
- }
-
- if (copy_to_user(it.range_ptr, lr->range,
- sizeof(struct comedi_krange) * lr->length))
- return -EFAULT;
-
- return 0;
-}
-
-/**
- * comedi_check_chanlist() - Validate each element in a chanlist.
- * @s: comedi_subdevice struct
- * @n: number of elements in the chanlist
- * @chanlist: the chanlist to validate
- *
- * Each element consists of a channel number, a range index, an analog
- * reference type and some flags, all packed into an unsigned int.
- *
- * This checks that the channel number and range index are supported by
- * the comedi subdevice. It does not check whether the analog reference
- * type and the flags are supported. Drivers that care should check those
- * themselves.
- *
- * Return: %0 if all @chanlist elements are valid (success),
- * %-EINVAL if one or more elements are invalid.
- */
-int comedi_check_chanlist(struct comedi_subdevice *s, int n,
- unsigned int *chanlist)
-{
- struct comedi_device *dev = s->device;
- unsigned int chanspec;
- int chan, range_len, i;
-
- for (i = 0; i < n; i++) {
- chanspec = chanlist[i];
- chan = CR_CHAN(chanspec);
- if (s->range_table)
- range_len = s->range_table->length;
- else if (s->range_table_list && chan < s->n_chan)
- range_len = s->range_table_list[chan]->length;
- else
- range_len = 0;
- if (chan >= s->n_chan ||
- CR_RANGE(chanspec) >= range_len) {
- dev_warn(dev->class_dev,
- "bad chanlist[%d]=0x%08x chan=%d range length=%d\n",
- i, chanspec, chan, range_len);
- return -EINVAL;
- }
- }
- return 0;
-}
-EXPORT_SYMBOL_GPL(comedi_check_chanlist);
diff --git a/drivers/staging/dgap/Kconfig b/drivers/staging/dgap/Kconfig
deleted file mode 100644
index 3bbe9e122365..000000000000
--- a/drivers/staging/dgap/Kconfig
+++ /dev/null
@@ -1,6 +0,0 @@
-config DGAP
- tristate "Digi EPCA PCI products"
- default n
- depends on TTY && HAS_IOMEM
- ---help---
- Driver for the Digi International EPCA PCI based product line
diff --git a/drivers/staging/dgap/Makefile b/drivers/staging/dgap/Makefile
deleted file mode 100644
index 0063d044ca71..000000000000
--- a/drivers/staging/dgap/Makefile
+++ /dev/null
@@ -1 +0,0 @@
-obj-$(CONFIG_DGAP) += dgap.o
diff --git a/drivers/staging/dgap/dgap.c b/drivers/staging/dgap/dgap.c
deleted file mode 100644
index e17bde7bf416..000000000000
--- a/drivers/staging/dgap/dgap.c
+++ /dev/null
@@ -1,7135 +0,0 @@
-/*
- * Copyright 2003 Digi International (www.digi.com)
- * Scott H Kilau <Scott_Kilau at digi dot com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY, EXPRESS OR IMPLIED; without even the
- * implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
- * PURPOSE. See the GNU General Public License for more details.
- *
- */
-
-/*
- * In the original out of kernel Digi dgap driver, firmware
- * loading was done via user land to driver handshaking.
- *
- * For cards that support a concentrator (port expander),
- * I believe the concentrator its self told the card which
- * concentrator is actually attached and then that info
- * was used to tell user land which concentrator firmware
- * image was to be downloaded. I think even the BIOS or
- * FEP images required could change with the connection
- * of a particular concentrator.
- *
- * Since I have no access to any of these cards or
- * concentrators, I cannot put the correct concentrator
- * firmware file names into the firmware_info structure
- * as is now done for the BIOS and FEP images.
- *
- * I think, but am not certain, that the cards supporting
- * concentrators will function without them. So support
- * of these cards has been left in this driver.
- *
- * In order to fully support those cards, they would
- * either have to be acquired for dissection or maybe
- * Digi International could provide some assistance.
- */
-#undef DIGI_CONCENTRATORS_SUPPORTED
-
-#define pr_fmt(fmt) "dgap: " fmt
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/pci.h>
-#include <linux/delay.h> /* For udelay */
-#include <linux/slab.h>
-#include <linux/uaccess.h>
-#include <linux/sched.h>
-
-#include <linux/interrupt.h> /* For tasklet and interrupt structs/defines */
-#include <linux/ctype.h>
-#include <linux/tty.h>
-#include <linux/tty_flip.h>
-#include <linux/serial_reg.h>
-#include <linux/io.h> /* For read[bwl]/write[bwl] */
-
-#include <linux/string.h>
-#include <linux/device.h>
-#include <linux/kdev_t.h>
-#include <linux/firmware.h>
-
-#include "dgap.h"
-
-/*
- * File operations permitted on Control/Management major.
- */
-static const struct file_operations dgap_board_fops = {
- .owner = THIS_MODULE,
-};
-
-static uint dgap_numboards;
-static struct board_t *dgap_board[MAXBOARDS];
-static ulong dgap_poll_counter;
-static int dgap_driver_state = DRIVER_INITIALIZED;
-static int dgap_poll_tick = 20; /* Poll interval - 20 ms */
-
-static struct class *dgap_class;
-
-static uint dgap_count = 500;
-
-/*
- * Poller stuff
- */
-static DEFINE_SPINLOCK(dgap_poll_lock); /* Poll scheduling lock */
-static ulong dgap_poll_time; /* Time of next poll */
-static uint dgap_poll_stop; /* Used to tell poller to stop */
-static struct timer_list dgap_poll_timer;
-
-/*
- SUPPORTED PRODUCTS
-
- Card Model Number of Ports Interface
- ----------------------------------------------------------------
- Acceleport Xem 4 - 64 (EIA232 & EIA422)
- Acceleport Xr 4 & 8 (EIA232)
- Acceleport Xr 920 4 & 8 (EIA232)
- Acceleport C/X 8 - 128 (EIA232)
- Acceleport EPC/X 8 - 224 (EIA232)
- Acceleport Xr/422 4 & 8 (EIA422)
- Acceleport 2r/920 2 (EIA232)
- Acceleport 4r/920 4 (EIA232)
- Acceleport 8r/920 8 (EIA232)
-
- IBM 8-Port Asynchronous PCI Adapter (EIA232)
- IBM 128-Port Asynchronous PCI Adapter (EIA232 & EIA422)
-*/
-
-static struct pci_device_id dgap_pci_tbl[] = {
- { DIGI_VID, PCI_DEV_XEM_DID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
- { DIGI_VID, PCI_DEV_CX_DID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 },
- { DIGI_VID, PCI_DEV_CX_IBM_DID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2 },
- { DIGI_VID, PCI_DEV_EPCJ_DID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 3 },
- { DIGI_VID, PCI_DEV_920_2_DID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4 },
- { DIGI_VID, PCI_DEV_920_4_DID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 5 },
- { DIGI_VID, PCI_DEV_920_8_DID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 6 },
- { DIGI_VID, PCI_DEV_XR_DID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 7 },
- { DIGI_VID, PCI_DEV_XRJ_DID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 8 },
- { DIGI_VID, PCI_DEV_XR_422_DID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 9 },
- { DIGI_VID, PCI_DEV_XR_IBM_DID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 10 },
- { DIGI_VID, PCI_DEV_XR_SAIP_DID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 11 },
- { DIGI_VID, PCI_DEV_XR_BULL_DID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 12 },
- { DIGI_VID, PCI_DEV_920_8_HP_DID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 13 },
- { DIGI_VID, PCI_DEV_XEM_HP_DID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 14 },
- {0,} /* 0 terminated list. */
-};
-MODULE_DEVICE_TABLE(pci, dgap_pci_tbl);
-
-/*
- * A generic list of Product names, PCI Vendor ID, and PCI Device ID.
- */
-struct board_id {
- uint config_type;
- u8 *name;
- uint maxports;
- uint dpatype;
-};
-
-static struct board_id dgap_ids[] = {
- { PPCM, PCI_DEV_XEM_NAME, 64, (T_PCXM|T_PCLITE|T_PCIBUS) },
- { PCX, PCI_DEV_CX_NAME, 128, (T_CX|T_PCIBUS) },
- { PCX, PCI_DEV_CX_IBM_NAME, 128, (T_CX|T_PCIBUS) },
- { PEPC, PCI_DEV_EPCJ_NAME, 224, (T_EPC|T_PCIBUS) },
- { APORT2_920P, PCI_DEV_920_2_NAME, 2, (T_PCXR|T_PCLITE|T_PCIBUS) },
- { APORT4_920P, PCI_DEV_920_4_NAME, 4, (T_PCXR|T_PCLITE|T_PCIBUS) },
- { APORT8_920P, PCI_DEV_920_8_NAME, 8, (T_PCXR|T_PCLITE|T_PCIBUS) },
- { PAPORT8, PCI_DEV_XR_NAME, 8, (T_PCXR|T_PCLITE|T_PCIBUS) },
- { PAPORT8, PCI_DEV_XRJ_NAME, 8, (T_PCXR|T_PCLITE|T_PCIBUS) },
- { PAPORT8, PCI_DEV_XR_422_NAME, 8, (T_PCXR|T_PCLITE|T_PCIBUS) },
- { PAPORT8, PCI_DEV_XR_IBM_NAME, 8, (T_PCXR|T_PCLITE|T_PCIBUS) },
- { PAPORT8, PCI_DEV_XR_SAIP_NAME, 8, (T_PCXR|T_PCLITE|T_PCIBUS) },
- { PAPORT8, PCI_DEV_XR_BULL_NAME, 8, (T_PCXR|T_PCLITE|T_PCIBUS) },
- { APORT8_920P, PCI_DEV_920_8_HP_NAME, 8, (T_PCXR|T_PCLITE|T_PCIBUS) },
- { PPCM, PCI_DEV_XEM_HP_NAME, 64, (T_PCXM|T_PCLITE|T_PCIBUS) },
- {0,} /* 0 terminated list. */
-};
-
-struct firmware_info {
- u8 *conf_name; /* dgap.conf */
- u8 *bios_name; /* BIOS filename */
- u8 *fep_name; /* FEP filename */
- u8 *con_name; /* Concentrator filename FIXME*/
- int num; /* sequence number */
-};
-
-/*
- * Firmware - BIOS, FEP, and CONC filenames
- */
-static struct firmware_info fw_info[] = {
- { "dgap/dgap.conf", "dgap/sxbios.bin", "dgap/sxfep.bin", NULL, 0 },
- { "dgap/dgap.conf", "dgap/cxpbios.bin", "dgap/cxpfep.bin", NULL, 1 },
- { "dgap/dgap.conf", "dgap/cxpbios.bin", "dgap/cxpfep.bin", NULL, 2 },
- { "dgap/dgap.conf", "dgap/pcibios.bin", "dgap/pcifep.bin", NULL, 3 },
- { "dgap/dgap.conf", "dgap/xrbios.bin", "dgap/xrfep.bin", NULL, 4 },
- { "dgap/dgap.conf", "dgap/xrbios.bin", "dgap/xrfep.bin", NULL, 5 },
- { "dgap/dgap.conf", "dgap/xrbios.bin", "dgap/xrfep.bin", NULL, 6 },
- { "dgap/dgap.conf", "dgap/xrbios.bin", "dgap/xrfep.bin", NULL, 7 },
- { "dgap/dgap.conf", "dgap/xrbios.bin", "dgap/xrfep.bin", NULL, 8 },
- { "dgap/dgap.conf", "dgap/xrbios.bin", "dgap/xrfep.bin", NULL, 9 },
- { "dgap/dgap.conf", "dgap/xrbios.bin", "dgap/xrfep.bin", NULL, 10 },
- { "dgap/dgap.conf", "dgap/xrbios.bin", "dgap/xrfep.bin", NULL, 11 },
- { "dgap/dgap.conf", "dgap/xrbios.bin", "dgap/xrfep.bin", NULL, 12 },
- { "dgap/dgap.conf", "dgap/xrbios.bin", "dgap/xrfep.bin", NULL, 13 },
- { "dgap/dgap.conf", "dgap/sxbios.bin", "dgap/sxfep.bin", NULL, 14 },
- {NULL,}
-};
-
-/*
- * Default transparent print information.
- */
-static struct digi_t dgap_digi_init = {
- .digi_flags = DIGI_COOK, /* Flags */
- .digi_maxcps = 100, /* Max CPS */
- .digi_maxchar = 50, /* Max chars in print queue */
- .digi_bufsize = 100, /* Printer buffer size */
- .digi_onlen = 4, /* size of printer on string */
- .digi_offlen = 4, /* size of printer off string */
- .digi_onstr = "\033[5i", /* ANSI printer on string ] */
- .digi_offstr = "\033[4i", /* ANSI printer off string ] */
- .digi_term = "ansi" /* default terminal type */
-};
-
-/*
- * Define a local default termios struct. All ports will be created
- * with this termios initially.
- *
- * This defines a raw port at 9600 baud, 8 data bits, no parity,
- * 1 stop bit.
- */
-
-static struct ktermios dgap_default_termios = {
- .c_iflag = (DEFAULT_IFLAGS), /* iflags */
- .c_oflag = (DEFAULT_OFLAGS), /* oflags */
- .c_cflag = (DEFAULT_CFLAGS), /* cflags */
- .c_lflag = (DEFAULT_LFLAGS), /* lflags */
- .c_cc = INIT_C_CC,
- .c_line = 0,
-};
-
-/*
- * Our needed internal static variables from dgap_parse.c
- */
-static struct cnode dgap_head;
-#define MAXCWORD 200
-static char dgap_cword[MAXCWORD];
-
-struct toklist {
- int token;
- char *string;
-};
-
-static struct toklist dgap_brdtype[] = {
- { PCX, "Digi_AccelePort_C/X_PCI" },
- { PEPC, "Digi_AccelePort_EPC/X_PCI" },
- { PPCM, "Digi_AccelePort_Xem_PCI" },
- { APORT2_920P, "Digi_AccelePort_2r_920_PCI" },
- { APORT4_920P, "Digi_AccelePort_4r_920_PCI" },
- { APORT8_920P, "Digi_AccelePort_8r_920_PCI" },
- { PAPORT4, "Digi_AccelePort_4r_PCI(EIA-232/RS-422)" },
- { PAPORT8, "Digi_AccelePort_8r_PCI(EIA-232/RS-422)" },
- { 0, NULL }
-};
-
-static struct toklist dgap_tlist[] = {
- { BEGIN, "config_begin" },
- { END, "config_end" },
- { BOARD, "board" },
- { IO, "io" },
- { PCIINFO, "pciinfo" },
- { LINE, "line" },
- { CONC, "conc" },
- { CONC, "concentrator" },
- { CX, "cx" },
- { CX, "ccon" },
- { EPC, "epccon" },
- { EPC, "epc" },
- { MOD, "module" },
- { ID, "id" },
- { STARTO, "start" },
- { SPEED, "speed" },
- { CABLE, "cable" },
- { CONNECT, "connect" },
- { METHOD, "method" },
- { STATUS, "status" },
- { CUSTOM, "Custom" },
- { BASIC, "Basic" },
- { MEM, "mem" },
- { MEM, "memory" },
- { PORTS, "ports" },
- { MODEM, "modem" },
- { NPORTS, "nports" },
- { TTYN, "ttyname" },
- { CU, "cuname" },
- { PRINT, "prname" },
- { CMAJOR, "major" },
- { ALTPIN, "altpin" },
- { USEINTR, "useintr" },
- { TTSIZ, "ttysize" },
- { CHSIZ, "chsize" },
- { BSSIZ, "boardsize" },
- { UNTSIZ, "schedsize" },
- { F2SIZ, "f2200size" },
- { VPSIZ, "vpixsize" },
- { 0, NULL }
-};
-
-/*
- * get a word from the input stream, also keep track of current line number.
- * words are separated by whitespace.
- */
-static char *dgap_getword(char **in)
-{
- char *ret_ptr = *in;
-
- char *ptr = strpbrk(*in, " \t\n");
-
- /* If no word found, return null */
- if (!ptr)
- return NULL;
-
- /* Mark new location for our buffer */
- *ptr = '\0';
- *in = ptr + 1;
-
- /* Eat any extra spaces/tabs/newlines that might be present */
- while (*in && **in && ((**in == ' ') ||
- (**in == '\t') ||
- (**in == '\n'))) {
- **in = '\0';
- *in = *in + 1;
- }
-
- return ret_ptr;
-}
-
-
-/*
- * Get a token from the input file; return 0 if end of file is reached
- */
-static int dgap_gettok(char **in)
-{
- char *w;
- struct toklist *t;
-
- if (strstr(dgap_cword, "board")) {
- w = dgap_getword(in);
- if (!w)
- return 0;
- snprintf(dgap_cword, MAXCWORD, "%s", w);
- for (t = dgap_brdtype; t->token != 0; t++) {
- if (!strcmp(w, t->string))
- return t->token;
- }
- } else {
- while ((w = dgap_getword(in))) {
- snprintf(dgap_cword, MAXCWORD, "%s", w);
- for (t = dgap_tlist; t->token != 0; t++) {
- if (!strcmp(w, t->string))
- return t->token;
- }
- }
- }
-
- return 0;
-}
-
-/*
- * dgap_checknode: see if all the necessary info has been supplied for a node
- * before creating the next node.
- */
-static int dgap_checknode(struct cnode *p)
-{
- switch (p->type) {
- case LNODE:
- if (p->u.line.v_speed == 0) {
- pr_err("line speed not specified");
- return 1;
- }
- return 0;
-
- case CNODE:
- if (p->u.conc.v_speed == 0) {
- pr_err("concentrator line speed not specified");
- return 1;
- }
- if (p->u.conc.v_nport == 0) {
- pr_err("number of ports on concentrator not specified");
- return 1;
- }
- if (p->u.conc.v_id == 0) {
- pr_err("concentrator id letter not specified");
- return 1;
- }
- return 0;
-
- case MNODE:
- if (p->u.module.v_nport == 0) {
- pr_err("number of ports on EBI module not specified");
- return 1;
- }
- if (p->u.module.v_id == 0) {
- pr_err("EBI module id letter not specified");
- return 1;
- }
- return 0;
- }
- return 0;
-}
-
-/*
- * Given a board pointer, returns whether we should use interrupts or not.
- */
-static uint dgap_config_get_useintr(struct board_t *bd)
-{
- struct cnode *p;
-
- if (!bd)
- return 0;
-
- for (p = bd->bd_config; p; p = p->next) {
- if (p->type == INTRNODE) {
- /*
- * check for pcxr types.
- */
- return p->u.useintr;
- }
- }
-
- /* If not found, then don't turn on interrupts. */
- return 0;
-}
-
-/*
- * Given a board pointer, returns whether we turn on altpin or not.
- */
-static uint dgap_config_get_altpin(struct board_t *bd)
-{
- struct cnode *p;
-
- if (!bd)
- return 0;
-
- for (p = bd->bd_config; p; p = p->next) {
- if (p->type == ANODE) {
- /*
- * check for pcxr types.
- */
- return p->u.altpin;
- }
- }
-
- /* If not found, then don't turn on interrupts. */
- return 0;
-}
-
-/*
- * Given a specific type of board, if found, detached link and
- * returns the first occurrence in the list.
- */
-static struct cnode *dgap_find_config(int type, int bus, int slot)
-{
- struct cnode *p, *prev, *prev2, *found;
-
- p = &dgap_head;
-
- while (p->next) {
- prev = p;
- p = p->next;
-
- if (p->type != BNODE)
- continue;
-
- if (p->u.board.type != type)
- continue;
-
- if (p->u.board.v_pcibus &&
- p->u.board.pcibus != bus)
- continue;
-
- if (p->u.board.v_pcislot &&
- p->u.board.pcislot != slot)
- continue;
-
- found = p;
- /*
- * Keep walking thru the list till we
- * find the next board.
- */
- while (p->next) {
- prev2 = p;
- p = p->next;
-
- if (p->type != BNODE)
- continue;
-
- /*
- * Mark the end of our 1 board
- * chain of configs.
- */
- prev2->next = NULL;
-
- /*
- * Link the "next" board to the
- * previous board, effectively
- * "unlinking" our board from
- * the main config.
- */
- prev->next = p;
-
- return found;
- }
- /*
- * It must be the last board in the list.
- */
- prev->next = NULL;
- return found;
- }
- return NULL;
-}
-
-/*
- * Given a board pointer, walks the config link, counting up
- * all ports user specified should be on the board.
- * (This does NOT mean they are all actually present right now tho)
- */
-static uint dgap_config_get_num_prts(struct board_t *bd)
-{
- int count = 0;
- struct cnode *p;
-
- if (!bd)
- return 0;
-
- for (p = bd->bd_config; p; p = p->next) {
-
- switch (p->type) {
- case BNODE:
- /*
- * check for pcxr types.
- */
- if (p->u.board.type > EPCFE)
- count += p->u.board.nport;
- break;
- case CNODE:
- count += p->u.conc.nport;
- break;
- case MNODE:
- count += p->u.module.nport;
- break;
- }
- }
- return count;
-}
-
-static char *dgap_create_config_string(struct board_t *bd, char *string)
-{
- char *ptr = string;
- struct cnode *p;
- struct cnode *q;
- int speed;
-
- if (!bd) {
- *ptr = 0xff;
- return string;
- }
-
- for (p = bd->bd_config; p; p = p->next) {
-
- switch (p->type) {
- case LNODE:
- *ptr = '\0';
- ptr++;
- *ptr = p->u.line.speed;
- ptr++;
- break;
- case CNODE:
- /*
- * Because the EPC/con concentrators can have EM modules
- * hanging off of them, we have to walk ahead in the
- * list and keep adding the number of ports on each EM
- * to the config. UGH!
- */
- speed = p->u.conc.speed;
- q = p->next;
- if (q && (q->type == MNODE)) {
- *ptr = (p->u.conc.nport + 0x80);
- ptr++;
- p = q;
- while (q->next && (q->next->type) == MNODE) {
- *ptr = (q->u.module.nport + 0x80);
- ptr++;
- p = q;
- q = q->next;
- }
- *ptr = q->u.module.nport;
- ptr++;
- } else {
- *ptr = p->u.conc.nport;
- ptr++;
- }
-
- *ptr = speed;
- ptr++;
- break;
- }
- }
-
- *ptr = 0xff;
- return string;
-}
-
-/*
- * Parse a configuration file read into memory as a string.
- */
-static int dgap_parsefile(char **in)
-{
- struct cnode *p, *brd, *line, *conc;
- int rc;
- char *s;
- int linecnt = 0;
-
- p = &dgap_head;
- brd = line = conc = NULL;
-
- /* perhaps we are adding to an existing list? */
- while (p->next)
- p = p->next;
-
- /* file must start with a BEGIN */
- while ((rc = dgap_gettok(in)) != BEGIN) {
- if (rc == 0) {
- pr_err("unexpected EOF");
- return -1;
- }
- }
-
- for (; ;) {
- int board_type = 0;
- int conc_type = 0;
- int module_type = 0;
-
- rc = dgap_gettok(in);
- if (rc == 0) {
- pr_err("unexpected EOF");
- return -1;
- }
-
- switch (rc) {
- case BEGIN: /* should only be 1 begin */
- pr_err("unexpected config_begin\n");
- return -1;
-
- case END:
- return 0;
-
- case BOARD: /* board info */
- if (dgap_checknode(p))
- return -1;
-
- p->next = kzalloc(sizeof(struct cnode), GFP_KERNEL);
- if (!p->next)
- return -ENOMEM;
-
- p = p->next;
-
- p->type = BNODE;
- p->u.board.status = kstrdup("No", GFP_KERNEL);
- line = conc = NULL;
- brd = p;
- linecnt = -1;
-
- board_type = dgap_gettok(in);
- if (board_type == 0) {
- pr_err("board !!type not specified");
- return -1;
- }
-
- p->u.board.type = board_type;
-
- break;
-
- case IO: /* i/o port */
- if (p->type != BNODE) {
- pr_err("IO port only valid for boards");
- return -1;
- }
- s = dgap_getword(in);
- if (!s) {
- pr_err("unexpected end of file");
- return -1;
- }
- p->u.board.portstr = kstrdup(s, GFP_KERNEL);
- if (kstrtol(s, 0, &p->u.board.port)) {
- pr_err("bad number for IO port");
- return -1;
- }
- p->u.board.v_port = 1;
- break;
-
- case MEM: /* memory address */
- if (p->type != BNODE) {
- pr_err("memory address only valid for boards");
- return -1;
- }
- s = dgap_getword(in);
- if (!s) {
- pr_err("unexpected end of file");
- return -1;
- }
- p->u.board.addrstr = kstrdup(s, GFP_KERNEL);
- if (kstrtoul(s, 0, &p->u.board.addr)) {
- pr_err("bad number for memory address");
- return -1;
- }
- p->u.board.v_addr = 1;
- break;
-
- case PCIINFO: /* pci information */
- if (p->type != BNODE) {
- pr_err("memory address only valid for boards");
- return -1;
- }
- s = dgap_getword(in);
- if (!s) {
- pr_err("unexpected end of file");
- return -1;
- }
- p->u.board.pcibusstr = kstrdup(s, GFP_KERNEL);
- if (kstrtoul(s, 0, &p->u.board.pcibus)) {
- pr_err("bad number for pci bus");
- return -1;
- }
- p->u.board.v_pcibus = 1;
- s = dgap_getword(in);
- if (!s) {
- pr_err("unexpected end of file");
- return -1;
- }
- p->u.board.pcislotstr = kstrdup(s, GFP_KERNEL);
- if (kstrtoul(s, 0, &p->u.board.pcislot)) {
- pr_err("bad number for pci slot");
- return -1;
- }
- p->u.board.v_pcislot = 1;
- break;
-
- case METHOD:
- if (p->type != BNODE) {
- pr_err("install method only valid for boards");
- return -1;
- }
- s = dgap_getword(in);
- if (!s) {
- pr_err("unexpected end of file");
- return -1;
- }
- p->u.board.method = kstrdup(s, GFP_KERNEL);
- p->u.board.v_method = 1;
- break;
-
- case STATUS:
- if (p->type != BNODE) {
- pr_err("config status only valid for boards");
- return -1;
- }
- s = dgap_getword(in);
- if (!s) {
- pr_err("unexpected end of file");
- return -1;
- }
- p->u.board.status = kstrdup(s, GFP_KERNEL);
- break;
-
- case NPORTS: /* number of ports */
- if (p->type == BNODE) {
- s = dgap_getword(in);
- if (!s) {
- pr_err("unexpected end of file");
- return -1;
- }
- if (kstrtol(s, 0, &p->u.board.nport)) {
- pr_err("bad number for number of ports");
- return -1;
- }
- p->u.board.v_nport = 1;
- } else if (p->type == CNODE) {
- s = dgap_getword(in);
- if (!s) {
- pr_err("unexpected end of file");
- return -1;
- }
- if (kstrtol(s, 0, &p->u.conc.nport)) {
- pr_err("bad number for number of ports");
- return -1;
- }
- p->u.conc.v_nport = 1;
- } else if (p->type == MNODE) {
- s = dgap_getword(in);
- if (!s) {
- pr_err("unexpected end of file");
- return -1;
- }
- if (kstrtol(s, 0, &p->u.module.nport)) {
- pr_err("bad number for number of ports");
- return -1;
- }
- p->u.module.v_nport = 1;
- } else {
- pr_err("nports only valid for concentrators or modules");
- return -1;
- }
- break;
-
- case ID: /* letter ID used in tty name */
- s = dgap_getword(in);
- if (!s) {
- pr_err("unexpected end of file");
- return -1;
- }
-
- p->u.board.status = kstrdup(s, GFP_KERNEL);
-
- if (p->type == CNODE) {
- p->u.conc.id = kstrdup(s, GFP_KERNEL);
- p->u.conc.v_id = 1;
- } else if (p->type == MNODE) {
- p->u.module.id = kstrdup(s, GFP_KERNEL);
- p->u.module.v_id = 1;
- } else {
- pr_err("id only valid for concentrators or modules");
- return -1;
- }
- break;
-
- case STARTO: /* start offset of ID */
- if (p->type == BNODE) {
- s = dgap_getword(in);
- if (!s) {
- pr_err("unexpected end of file");
- return -1;
- }
- if (kstrtol(s, 0, &p->u.board.start)) {
- pr_err("bad number for start of tty count");
- return -1;
- }
- p->u.board.v_start = 1;
- } else if (p->type == CNODE) {
- s = dgap_getword(in);
- if (!s) {
- pr_err("unexpected end of file");
- return -1;
- }
- if (kstrtol(s, 0, &p->u.conc.start)) {
- pr_err("bad number for start of tty count");
- return -1;
- }
- p->u.conc.v_start = 1;
- } else if (p->type == MNODE) {
- s = dgap_getword(in);
- if (!s) {
- pr_err("unexpected end of file");
- return -1;
- }
- if (kstrtol(s, 0, &p->u.module.start)) {
- pr_err("bad number for start of tty count");
- return -1;
- }
- p->u.module.v_start = 1;
- } else {
- pr_err("start only valid for concentrators or modules");
- return -1;
- }
- break;
-
- case TTYN: /* tty name prefix */
- if (dgap_checknode(p))
- return -1;
-
- p->next = kzalloc(sizeof(struct cnode), GFP_KERNEL);
- if (!p->next)
- return -ENOMEM;
-
- p = p->next;
- p->type = TNODE;
-
- s = dgap_getword(in);
- if (!s) {
- pr_err("unexpeced end of file");
- return -1;
- }
- p->u.ttyname = kstrdup(s, GFP_KERNEL);
- if (!p->u.ttyname)
- return -1;
-
- break;
-
- case CU: /* cu name prefix */
- if (dgap_checknode(p))
- return -1;
-
- p->next = kzalloc(sizeof(struct cnode), GFP_KERNEL);
- if (!p->next)
- return -ENOMEM;
-
- p = p->next;
- p->type = CUNODE;
-
- s = dgap_getword(in);
- if (!s) {
- pr_err("unexpeced end of file");
- return -1;
- }
- p->u.cuname = kstrdup(s, GFP_KERNEL);
- if (!p->u.cuname)
- return -1;
-
- break;
-
- case LINE: /* line information */
- if (dgap_checknode(p))
- return -1;
- if (!brd) {
- pr_err("must specify board before line info");
- return -1;
- }
- switch (brd->u.board.type) {
- case PPCM:
- pr_err("line not valid for PC/em");
- return -1;
- }
-
- p->next = kzalloc(sizeof(struct cnode), GFP_KERNEL);
- if (!p->next)
- return -ENOMEM;
-
- p = p->next;
- p->type = LNODE;
- conc = NULL;
- line = p;
- linecnt++;
- break;
-
- case CONC: /* concentrator information */
- if (dgap_checknode(p))
- return -1;
- if (!line) {
- pr_err("must specify line info before concentrator");
- return -1;
- }
-
- p->next = kzalloc(sizeof(struct cnode), GFP_KERNEL);
- if (!p->next)
- return -ENOMEM;
-
- p = p->next;
- p->type = CNODE;
- conc = p;
-
- if (linecnt)
- brd->u.board.conc2++;
- else
- brd->u.board.conc1++;
-
- conc_type = dgap_gettok(in);
- if (conc_type == 0 || (conc_type != CX &&
- conc_type != EPC)) {
- pr_err("failed to set a type of concentratros");
- return -1;
- }
-
- p->u.conc.type = conc_type;
-
- break;
-
- case MOD: /* EBI module */
- if (dgap_checknode(p))
- return -1;
- if (!brd) {
- pr_err("must specify board info before EBI modules");
- return -1;
- }
- switch (brd->u.board.type) {
- case PPCM:
- linecnt = 0;
- break;
- default:
- if (!conc) {
- pr_err("must specify concentrator info before EBI module");
- return -1;
- }
- }
-
- p->next = kzalloc(sizeof(struct cnode), GFP_KERNEL);
- if (!p->next)
- return -ENOMEM;
-
- p = p->next;
- p->type = MNODE;
-
- if (linecnt)
- brd->u.board.module2++;
- else
- brd->u.board.module1++;
-
- module_type = dgap_gettok(in);
- if (module_type == 0 || (module_type != PORTS &&
- module_type != MODEM)) {
- pr_err("failed to set a type of module");
- return -1;
- }
-
- p->u.module.type = module_type;
-
- break;
-
- case CABLE:
- if (p->type == LNODE) {
- s = dgap_getword(in);
- if (!s) {
- pr_err("unexpected end of file");
- return -1;
- }
- p->u.line.cable = kstrdup(s, GFP_KERNEL);
- p->u.line.v_cable = 1;
- }
- break;
-
- case SPEED: /* sync line speed indication */
- if (p->type == LNODE) {
- s = dgap_getword(in);
- if (!s) {
- pr_err("unexpected end of file");
- return -1;
- }
- if (kstrtol(s, 0, &p->u.line.speed)) {
- pr_err("bad number for line speed");
- return -1;
- }
- p->u.line.v_speed = 1;
- } else if (p->type == CNODE) {
- s = dgap_getword(in);
- if (!s) {
- pr_err("unexpected end of file");
- return -1;
- }
- if (kstrtol(s, 0, &p->u.conc.speed)) {
- pr_err("bad number for line speed");
- return -1;
- }
- p->u.conc.v_speed = 1;
- } else {
- pr_err("speed valid only for lines or concentrators.");
- return -1;
- }
- break;
-
- case CONNECT:
- if (p->type == CNODE) {
- s = dgap_getword(in);
- if (!s) {
- pr_err("unexpected end of file");
- return -1;
- }
- p->u.conc.connect = kstrdup(s, GFP_KERNEL);
- p->u.conc.v_connect = 1;
- }
- break;
- case PRINT: /* transparent print name prefix */
- if (dgap_checknode(p))
- return -1;
-
- p->next = kzalloc(sizeof(struct cnode), GFP_KERNEL);
- if (!p->next)
- return -ENOMEM;
-
- p = p->next;
- p->type = PNODE;
-
- s = dgap_getword(in);
- if (!s) {
- pr_err("unexpeced end of file");
- return -1;
- }
- p->u.printname = kstrdup(s, GFP_KERNEL);
- if (!p->u.printname)
- return -1;
-
- break;
-
- case CMAJOR: /* major number */
- if (dgap_checknode(p))
- return -1;
-
- p->next = kzalloc(sizeof(struct cnode), GFP_KERNEL);
- if (!p->next)
- return -ENOMEM;
-
- p = p->next;
- p->type = JNODE;
-
- s = dgap_getword(in);
- if (!s) {
- pr_err("unexpected end of file");
- return -1;
- }
- if (kstrtol(s, 0, &p->u.majornumber)) {
- pr_err("bad number for major number");
- return -1;
- }
- break;
-
- case ALTPIN: /* altpin setting */
- if (dgap_checknode(p))
- return -1;
-
- p->next = kzalloc(sizeof(struct cnode), GFP_KERNEL);
- if (!p->next)
- return -ENOMEM;
-
- p = p->next;
- p->type = ANODE;
-
- s = dgap_getword(in);
- if (!s) {
- pr_err("unexpected end of file");
- return -1;
- }
- if (kstrtol(s, 0, &p->u.altpin)) {
- pr_err("bad number for altpin");
- return -1;
- }
- break;
-
- case USEINTR: /* enable interrupt setting */
- if (dgap_checknode(p))
- return -1;
-
- p->next = kzalloc(sizeof(struct cnode), GFP_KERNEL);
- if (!p->next)
- return -ENOMEM;
-
- p = p->next;
- p->type = INTRNODE;
- s = dgap_getword(in);
- if (!s) {
- pr_err("unexpected end of file");
- return -1;
- }
- if (kstrtol(s, 0, &p->u.useintr)) {
- pr_err("bad number for useintr");
- return -1;
- }
- break;
-
- case TTSIZ: /* size of tty structure */
- if (dgap_checknode(p))
- return -1;
-
- p->next = kzalloc(sizeof(struct cnode), GFP_KERNEL);
- if (!p->next)
- return -ENOMEM;
-
- p = p->next;
- p->type = TSNODE;
-
- s = dgap_getword(in);
- if (!s) {
- pr_err("unexpected end of file");
- return -1;
- }
- if (kstrtol(s, 0, &p->u.ttysize)) {
- pr_err("bad number for ttysize");
- return -1;
- }
- break;
-
- case CHSIZ: /* channel structure size */
- if (dgap_checknode(p))
- return -1;
-
- p->next = kzalloc(sizeof(struct cnode), GFP_KERNEL);
- if (!p->next)
- return -ENOMEM;
-
- p = p->next;
- p->type = CSNODE;
-
- s = dgap_getword(in);
- if (!s) {
- pr_err("unexpected end of file");
- return -1;
- }
- if (kstrtol(s, 0, &p->u.chsize)) {
- pr_err("bad number for chsize");
- return -1;
- }
- break;
-
- case BSSIZ: /* board structure size */
- if (dgap_checknode(p))
- return -1;
-
- p->next = kzalloc(sizeof(struct cnode), GFP_KERNEL);
- if (!p->next)
- return -ENOMEM;
-
- p = p->next;
- p->type = BSNODE;
-
- s = dgap_getword(in);
- if (!s) {
- pr_err("unexpected end of file");
- return -1;
- }
- if (kstrtol(s, 0, &p->u.bssize)) {
- pr_err("bad number for bssize");
- return -1;
- }
- break;
-
- case UNTSIZ: /* sched structure size */
- if (dgap_checknode(p))
- return -1;
-
- p->next = kzalloc(sizeof(struct cnode), GFP_KERNEL);
- if (!p->next)
- return -ENOMEM;
-
- p = p->next;
- p->type = USNODE;
-
- s = dgap_getword(in);
- if (!s) {
- pr_err("unexpected end of file");
- return -1;
- }
- if (kstrtol(s, 0, &p->u.unsize)) {
- pr_err("bad number for schedsize");
- return -1;
- }
- break;
-
- case F2SIZ: /* f2200 structure size */
- if (dgap_checknode(p))
- return -1;
-
- p->next = kzalloc(sizeof(struct cnode), GFP_KERNEL);
- if (!p->next)
- return -ENOMEM;
-
- p = p->next;
- p->type = FSNODE;
-
- s = dgap_getword(in);
- if (!s) {
- pr_err("unexpected end of file");
- return -1;
- }
- if (kstrtol(s, 0, &p->u.f2size)) {
- pr_err("bad number for f2200size");
- return -1;
- }
- break;
-
- case VPSIZ: /* vpix structure size */
- if (dgap_checknode(p))
- return -1;
-
- p->next = kzalloc(sizeof(struct cnode), GFP_KERNEL);
- if (!p->next)
- return -ENOMEM;
-
- p = p->next;
- p->type = VSNODE;
-
- s = dgap_getword(in);
- if (!s) {
- pr_err("unexpected end of file");
- return -1;
- }
- if (kstrtol(s, 0, &p->u.vpixsize)) {
- pr_err("bad number for vpixsize");
- return -1;
- }
- break;
- }
- }
-}
-
-static void dgap_cleanup_nodes(void)
-{
- struct cnode *p;
-
- p = &dgap_head;
-
- while (p) {
- struct cnode *tmp = p->next;
-
- if (p->type == NULLNODE) {
- p = tmp;
- continue;
- }
-
- switch (p->type) {
- case BNODE:
- kfree(p->u.board.portstr);
- kfree(p->u.board.addrstr);
- kfree(p->u.board.pcibusstr);
- kfree(p->u.board.pcislotstr);
- kfree(p->u.board.method);
- break;
- case CNODE:
- kfree(p->u.conc.id);
- kfree(p->u.conc.connect);
- break;
- case MNODE:
- kfree(p->u.module.id);
- break;
- case TNODE:
- kfree(p->u.ttyname);
- break;
- case CUNODE:
- kfree(p->u.cuname);
- break;
- case LNODE:
- kfree(p->u.line.cable);
- break;
- case PNODE:
- kfree(p->u.printname);
- break;
- }
-
- kfree(p->u.board.status);
- kfree(p);
- p = tmp;
- }
-}
-
-/*
- * Retrives the current custom baud rate from FEP memory,
- * and returns it back to the user.
- * Returns 0 on error.
- */
-static uint dgap_get_custom_baud(struct channel_t *ch)
-{
- u8 __iomem *vaddr;
- ulong offset;
-
- if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
- return 0;
-
- if (!ch->ch_bd || ch->ch_bd->magic != DGAP_BOARD_MAGIC)
- return 0;
-
- if (!(ch->ch_bd->bd_flags & BD_FEP5PLUS))
- return 0;
-
- vaddr = ch->ch_bd->re_map_membase;
-
- if (!vaddr)
- return 0;
-
- /*
- * Go get from fep mem, what the fep
- * believes the custom baud rate is.
- */
- offset = (ioread16(vaddr + ECS_SEG) << 4) + (ch->ch_portnum * 0x28)
- + LINE_SPEED;
-
- return readw(vaddr + offset);
-}
-
-/*
- * Remap PCI memory.
- */
-static int dgap_remap(struct board_t *brd)
-{
- if (!brd || brd->magic != DGAP_BOARD_MAGIC)
- return -EIO;
-
- if (!request_mem_region(brd->membase, 0x200000, "dgap"))
- return -ENOMEM;
-
- if (!request_mem_region(brd->membase + PCI_IO_OFFSET, 0x200000,
- "dgap"))
- goto err_req_mem;
-
- brd->re_map_membase = ioremap(brd->membase, 0x200000);
- if (!brd->re_map_membase)
- goto err_remap_mem;
-
- brd->re_map_port = ioremap((brd->membase + PCI_IO_OFFSET), 0x200000);
- if (!brd->re_map_port)
- goto err_remap_port;
-
- return 0;
-
-err_remap_port:
- iounmap(brd->re_map_membase);
-err_remap_mem:
- release_mem_region(brd->membase + PCI_IO_OFFSET, 0x200000);
-err_req_mem:
- release_mem_region(brd->membase, 0x200000);
-
- return -ENOMEM;
-}
-
-static void dgap_unmap(struct board_t *brd)
-{
- iounmap(brd->re_map_port);
- iounmap(brd->re_map_membase);
- release_mem_region(brd->membase + PCI_IO_OFFSET, 0x200000);
- release_mem_region(brd->membase, 0x200000);
-}
-
-/*
- * dgap_parity_scan()
- *
- * Convert the FEP5 way of reporting parity errors and breaks into
- * the Linux line discipline way.
- */
-static void dgap_parity_scan(struct channel_t *ch, unsigned char *cbuf,
- unsigned char *fbuf, int *len)
-{
- int l = *len;
- int count = 0;
- unsigned char *in, *cout, *fout;
- unsigned char c;
-
- in = cbuf;
- cout = cbuf;
- fout = fbuf;
-
- if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
- return;
-
- while (l--) {
- c = *in++;
- switch (ch->pscan_state) {
- default:
- /* reset to sanity and fall through */
- ch->pscan_state = 0;
-
- case 0:
- /* No FF seen yet */
- if (c == (unsigned char) '\377')
- /* delete this character from stream */
- ch->pscan_state = 1;
- else {
- *cout++ = c;
- *fout++ = TTY_NORMAL;
- count += 1;
- }
- break;
-
- case 1:
- /* first FF seen */
- if (c == (unsigned char) '\377') {
- /* doubled ff, transform to single ff */
- *cout++ = c;
- *fout++ = TTY_NORMAL;
- count += 1;
- ch->pscan_state = 0;
- } else {
- /* save value examination in next state */
- ch->pscan_savechar = c;
- ch->pscan_state = 2;
- }
- break;
-
- case 2:
- /* third character of ff sequence */
-
- *cout++ = c;
-
- if (ch->pscan_savechar == 0x0) {
-
- if (c == 0x0) {
- ch->ch_err_break++;
- *fout++ = TTY_BREAK;
- } else {
- ch->ch_err_parity++;
- *fout++ = TTY_PARITY;
- }
- }
-
- count += 1;
- ch->pscan_state = 0;
- }
- }
- *len = count;
-}
-
-/*=======================================================================
- *
- * dgap_input - Process received data.
- *
- * ch - Pointer to channel structure.
- *
- *=======================================================================*/
-
-static void dgap_input(struct channel_t *ch)
-{
- struct board_t *bd;
- struct bs_t __iomem *bs;
- struct tty_struct *tp;
- struct tty_ldisc *ld;
- uint rmask;
- uint head;
- uint tail;
- int data_len;
- ulong lock_flags;
- ulong lock_flags2;
- int flip_len;
- int len;
- int n;
- u8 *buf;
- u8 tmpchar;
- int s;
-
- if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
- return;
-
- tp = ch->ch_tun.un_tty;
-
- bs = ch->ch_bs;
- if (!bs)
- return;
-
- bd = ch->ch_bd;
- if (!bd || bd->magic != DGAP_BOARD_MAGIC)
- return;
-
- spin_lock_irqsave(&bd->bd_lock, lock_flags);
- spin_lock_irqsave(&ch->ch_lock, lock_flags2);
-
- /*
- * Figure the number of characters in the buffer.
- * Exit immediately if none.
- */
-
- rmask = ch->ch_rsize - 1;
-
- head = readw(&(bs->rx_head));
- head &= rmask;
- tail = readw(&(bs->rx_tail));
- tail &= rmask;
-
- data_len = (head - tail) & rmask;
-
- if (data_len == 0) {
- writeb(1, &(bs->idata));
- spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
- spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
- return;
- }
-
- /*
- * If the device is not open, or CREAD is off, flush
- * input data and return immediately.
- */
- if ((bd->state != BOARD_READY) || !tp ||
- (tp->magic != TTY_MAGIC) ||
- !(ch->ch_tun.un_flags & UN_ISOPEN) ||
- !(tp->termios.c_cflag & CREAD) ||
- (ch->ch_tun.un_flags & UN_CLOSING)) {
-
- writew(head, &(bs->rx_tail));
- writeb(1, &(bs->idata));
- spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
- spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
- return;
- }
-
- /*
- * If we are throttled, simply don't read any data.
- */
- if (ch->ch_flags & CH_RXBLOCK) {
- writeb(1, &(bs->idata));
- spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
- spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
- return;
- }
-
- /*
- * Ignore oruns.
- */
- tmpchar = readb(&(bs->orun));
- if (tmpchar) {
- ch->ch_err_overrun++;
- writeb(0, &(bs->orun));
- }
-
- /* Decide how much data we can send into the tty layer */
- flip_len = TTY_FLIPBUF_SIZE;
-
- /* Chop down the length, if needed */
- len = min(data_len, flip_len);
- len = min(len, (N_TTY_BUF_SIZE - 1));
-
- ld = tty_ldisc_ref(tp);
-
-#ifdef TTY_DONT_FLIP
- /*
- * If the DONT_FLIP flag is on, don't flush our buffer, and act
- * like the ld doesn't have any space to put the data right now.
- */
- if (test_bit(TTY_DONT_FLIP, &tp->flags))
- len = 0;
-#endif
-
- /*
- * If we were unable to get a reference to the ld,
- * don't flush our buffer, and act like the ld doesn't
- * have any space to put the data right now.
- */
- if (!ld) {
- len = 0;
- } else {
- /*
- * If ld doesn't have a pointer to a receive_buf function,
- * flush the data, then act like the ld doesn't have any
- * space to put the data right now.
- */
- if (!ld->ops->receive_buf) {
- writew(head, &(bs->rx_tail));
- len = 0;
- }
- }
-
- if (len <= 0) {
- writeb(1, &(bs->idata));
- spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
- spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
- if (ld)
- tty_ldisc_deref(ld);
- return;
- }
-
- buf = ch->ch_bd->flipbuf;
- n = len;
-
- /*
- * n now contains the most amount of data we can copy,
- * bounded either by our buffer size or the amount
- * of data the card actually has pending...
- */
- while (n) {
-
- s = ((head >= tail) ? head : ch->ch_rsize) - tail;
- s = min(s, n);
-
- if (s <= 0)
- break;
-
- memcpy_fromio(buf, ch->ch_raddr + tail, s);
-
- tail += s;
- buf += s;
-
- n -= s;
- /* Flip queue if needed */
- tail &= rmask;
- }
-
- writew(tail, &(bs->rx_tail));
- writeb(1, &(bs->idata));
- ch->ch_rxcount += len;
-
- /*
- * If we are completely raw, we don't need to go through a lot
- * of the tty layers that exist.
- * In this case, we take the shortest and fastest route we
- * can to relay the data to the user.
- *
- * On the other hand, if we are not raw, we need to go through
- * the tty layer, which has its API more well defined.
- */
- if (I_PARMRK(tp) || I_BRKINT(tp) || I_INPCK(tp)) {
- dgap_parity_scan(ch, ch->ch_bd->flipbuf,
- ch->ch_bd->flipflagbuf, &len);
-
- len = tty_buffer_request_room(tp->port, len);
- tty_insert_flip_string_flags(tp->port, ch->ch_bd->flipbuf,
- ch->ch_bd->flipflagbuf, len);
- } else {
- len = tty_buffer_request_room(tp->port, len);
- tty_insert_flip_string(tp->port, ch->ch_bd->flipbuf, len);
- }
-
- spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
- spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
-
- /* Tell the tty layer its okay to "eat" the data now */
- tty_flip_buffer_push(tp->port);
-
- if (ld)
- tty_ldisc_deref(ld);
-
-}
-
-static void dgap_write_wakeup(struct board_t *bd, struct channel_t *ch,
- struct un_t *un, u32 mask,
- unsigned long *irq_flags1,
- unsigned long *irq_flags2)
-{
- if (!(un->un_flags & mask))
- return;
-
- un->un_flags &= ~mask;
-
- if (!(un->un_flags & UN_ISOPEN))
- return;
-
- if ((un->un_tty->flags & (1 << TTY_DO_WRITE_WAKEUP)) &&
- un->un_tty->ldisc->ops->write_wakeup) {
- spin_unlock_irqrestore(&ch->ch_lock, *irq_flags2);
- spin_unlock_irqrestore(&bd->bd_lock, *irq_flags1);
-
- (un->un_tty->ldisc->ops->write_wakeup)(un->un_tty);
-
- spin_lock_irqsave(&bd->bd_lock, *irq_flags1);
- spin_lock_irqsave(&ch->ch_lock, *irq_flags2);
- }
- wake_up_interruptible(&un->un_tty->write_wait);
- wake_up_interruptible(&un->un_flags_wait);
-}
-
-/************************************************************************
- * Determines when CARRIER changes state and takes appropriate
- * action.
- ************************************************************************/
-static void dgap_carrier(struct channel_t *ch)
-{
- struct board_t *bd;
-
- int virt_carrier = 0;
- int phys_carrier = 0;
-
- if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
- return;
-
- bd = ch->ch_bd;
-
- if (!bd || bd->magic != DGAP_BOARD_MAGIC)
- return;
-
- /* Make sure altpin is always set correctly */
- if (ch->ch_digi.digi_flags & DIGI_ALTPIN) {
- ch->ch_dsr = DM_CD;
- ch->ch_cd = DM_DSR;
- } else {
- ch->ch_dsr = DM_DSR;
- ch->ch_cd = DM_CD;
- }
-
- if (ch->ch_mistat & D_CD(ch))
- phys_carrier = 1;
-
- if (ch->ch_digi.digi_flags & DIGI_FORCEDCD)
- virt_carrier = 1;
-
- if (ch->ch_c_cflag & CLOCAL)
- virt_carrier = 1;
-
- /*
- * Test for a VIRTUAL carrier transition to HIGH.
- */
- if (((ch->ch_flags & CH_FCAR) == 0) && (virt_carrier == 1)) {
-
- /*
- * When carrier rises, wake any threads waiting
- * for carrier in the open routine.
- */
-
- if (waitqueue_active(&(ch->ch_flags_wait)))
- wake_up_interruptible(&ch->ch_flags_wait);
- }
-
- /*
- * Test for a PHYSICAL carrier transition to HIGH.
- */
- if (((ch->ch_flags & CH_CD) == 0) && (phys_carrier == 1)) {
-
- /*
- * When carrier rises, wake any threads waiting
- * for carrier in the open routine.
- */
-
- if (waitqueue_active(&(ch->ch_flags_wait)))
- wake_up_interruptible(&ch->ch_flags_wait);
- }
-
- /*
- * Test for a PHYSICAL transition to low, so long as we aren't
- * currently ignoring physical transitions (which is what "virtual
- * carrier" indicates).
- *
- * The transition of the virtual carrier to low really doesn't
- * matter... it really only means "ignore carrier state", not
- * "make pretend that carrier is there".
- */
- if ((virt_carrier == 0) &&
- ((ch->ch_flags & CH_CD) != 0) &&
- (phys_carrier == 0)) {
-
- /*
- * When carrier drops:
- *
- * Drop carrier on all open units.
- *
- * Flush queues, waking up any task waiting in the
- * line discipline.
- *
- * Send a hangup to the control terminal.
- *
- * Enable all select calls.
- */
- if (waitqueue_active(&(ch->ch_flags_wait)))
- wake_up_interruptible(&ch->ch_flags_wait);
-
- if (ch->ch_tun.un_open_count > 0)
- tty_hangup(ch->ch_tun.un_tty);
-
- if (ch->ch_pun.un_open_count > 0)
- tty_hangup(ch->ch_pun.un_tty);
- }
-
- /*
- * Make sure that our cached values reflect the current reality.
- */
- if (virt_carrier == 1)
- ch->ch_flags |= CH_FCAR;
- else
- ch->ch_flags &= ~CH_FCAR;
-
- if (phys_carrier == 1)
- ch->ch_flags |= CH_CD;
- else
- ch->ch_flags &= ~CH_CD;
-}
-
-/*=======================================================================
- *
- * dgap_event - FEP to host event processing routine.
- *
- * bd - Board of current event.
- *
- *=======================================================================*/
-static int dgap_event(struct board_t *bd)
-{
- struct channel_t *ch;
- ulong lock_flags;
- ulong lock_flags2;
- struct bs_t __iomem *bs;
- u8 __iomem *event;
- u8 __iomem *vaddr;
- struct ev_t __iomem *eaddr;
- uint head;
- uint tail;
- int port;
- int reason;
- int modem;
-
- if (!bd || bd->magic != DGAP_BOARD_MAGIC)
- return -EIO;
-
- spin_lock_irqsave(&bd->bd_lock, lock_flags);
-
- vaddr = bd->re_map_membase;
-
- if (!vaddr) {
- spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
- return -EIO;
- }
-
- eaddr = (struct ev_t __iomem *) (vaddr + EVBUF);
-
- /* Get our head and tail */
- head = readw(&(eaddr->ev_head));
- tail = readw(&(eaddr->ev_tail));
-
- /*
- * Forget it if pointers out of range.
- */
-
- if (head >= EVMAX - EVSTART || tail >= EVMAX - EVSTART ||
- (head | tail) & 03) {
- /* Let go of board lock */
- spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
- return -EIO;
- }
-
- /*
- * Loop to process all the events in the buffer.
- */
- while (tail != head) {
-
- /*
- * Get interrupt information.
- */
-
- event = bd->re_map_membase + tail + EVSTART;
-
- port = ioread8(event);
- reason = ioread8(event + 1);
- modem = ioread8(event + 2);
- ioread8(event + 3);
-
- /*
- * Make sure the interrupt is valid.
- */
- if (port >= bd->nasync)
- goto next;
-
- if (!(reason & (IFMODEM | IFBREAK | IFTLW | IFTEM | IFDATA)))
- goto next;
-
- ch = bd->channels[port];
-
- if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
- goto next;
-
- /*
- * If we have made it here, the event was valid.
- * Lock down the channel.
- */
- spin_lock_irqsave(&ch->ch_lock, lock_flags2);
-
- bs = ch->ch_bs;
-
- if (!bs) {
- spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
- goto next;
- }
-
- /*
- * Process received data.
- */
- if (reason & IFDATA) {
-
- /*
- * ALL LOCKS *MUST* BE DROPPED BEFORE CALLING INPUT!
- * input could send some data to ld, which in turn
- * could do a callback to one of our other functions.
- */
- spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
- spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
-
- dgap_input(ch);
-
- spin_lock_irqsave(&bd->bd_lock, lock_flags);
- spin_lock_irqsave(&ch->ch_lock, lock_flags2);
-
- if (ch->ch_flags & CH_RACTIVE)
- ch->ch_flags |= CH_RENABLE;
- else
- writeb(1, &(bs->idata));
-
- if (ch->ch_flags & CH_RWAIT) {
- ch->ch_flags &= ~CH_RWAIT;
-
- wake_up_interruptible
- (&ch->ch_tun.un_flags_wait);
- }
- }
-
- /*
- * Process Modem change signals.
- */
- if (reason & IFMODEM) {
- ch->ch_mistat = modem;
- dgap_carrier(ch);
- }
-
- /*
- * Process break.
- */
- if (reason & IFBREAK) {
-
- if (ch->ch_tun.un_tty) {
- /* A break has been indicated */
- ch->ch_err_break++;
- tty_buffer_request_room
- (ch->ch_tun.un_tty->port, 1);
- tty_insert_flip_char(ch->ch_tun.un_tty->port,
- 0, TTY_BREAK);
- tty_flip_buffer_push(ch->ch_tun.un_tty->port);
- }
- }
-
- /*
- * Process Transmit low.
- */
- if (reason & IFTLW) {
- dgap_write_wakeup(bd, ch, &ch->ch_tun, UN_LOW,
- &lock_flags, &lock_flags2);
- dgap_write_wakeup(bd, ch, &ch->ch_pun, UN_LOW,
- &lock_flags, &lock_flags2);
- if (ch->ch_flags & CH_WLOW) {
- ch->ch_flags &= ~CH_WLOW;
- wake_up_interruptible(&ch->ch_flags_wait);
- }
- }
-
- /*
- * Process Transmit empty.
- */
- if (reason & IFTEM) {
- dgap_write_wakeup(bd, ch, &ch->ch_tun, UN_EMPTY,
- &lock_flags, &lock_flags2);
- dgap_write_wakeup(bd, ch, &ch->ch_pun, UN_EMPTY,
- &lock_flags, &lock_flags2);
- if (ch->ch_flags & CH_WEMPTY) {
- ch->ch_flags &= ~CH_WEMPTY;
- wake_up_interruptible(&ch->ch_flags_wait);
- }
- }
-
- spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
-
-next:
- tail = (tail + 4) & (EVMAX - EVSTART - 4);
- }
-
- writew(tail, &(eaddr->ev_tail));
- spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
-
- return 0;
-}
-
-/*
- * Our board poller function.
- */
-static void dgap_poll_tasklet(unsigned long data)
-{
- struct board_t *bd = (struct board_t *) data;
- ulong lock_flags;
- char __iomem *vaddr;
- u16 head, tail;
-
- if (!bd || (bd->magic != DGAP_BOARD_MAGIC))
- return;
-
- if (bd->inhibit_poller)
- return;
-
- spin_lock_irqsave(&bd->bd_lock, lock_flags);
-
- vaddr = bd->re_map_membase;
-
- /*
- * If board is ready, parse deeper to see if there is anything to do.
- */
- if (bd->state == BOARD_READY) {
-
- struct ev_t __iomem *eaddr;
-
- if (!bd->re_map_membase) {
- spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
- return;
- }
- if (!bd->re_map_port) {
- spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
- return;
- }
-
- if (!bd->nasync)
- goto out;
-
- eaddr = (struct ev_t __iomem *) (vaddr + EVBUF);
-
- /* Get our head and tail */
- head = readw(&(eaddr->ev_head));
- tail = readw(&(eaddr->ev_tail));
-
- /*
- * If there is an event pending. Go service it.
- */
- if (head != tail) {
- spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
- dgap_event(bd);
- spin_lock_irqsave(&bd->bd_lock, lock_flags);
- }
-
-out:
- /*
- * If board is doing interrupts, ACK the interrupt.
- */
- if (bd && bd->intr_running)
- readb(bd->re_map_port + 2);
-
- spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
- return;
- }
-
- spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
-}
-
-/*
- * dgap_found_board()
- *
- * A board has been found, init it.
- */
-static struct board_t *dgap_found_board(struct pci_dev *pdev, int id,
- int boardnum)
-{
- struct board_t *brd;
- unsigned int pci_irq;
- int i;
- int ret;
-
- /* get the board structure and prep it */
- brd = kzalloc(sizeof(struct board_t), GFP_KERNEL);
- if (!brd)
- return ERR_PTR(-ENOMEM);
-
- /* store the info for the board we've found */
- brd->magic = DGAP_BOARD_MAGIC;
- brd->boardnum = boardnum;
- brd->vendor = dgap_pci_tbl[id].vendor;
- brd->device = dgap_pci_tbl[id].device;
- brd->pdev = pdev;
- brd->pci_bus = pdev->bus->number;
- brd->pci_slot = PCI_SLOT(pdev->devfn);
- brd->name = dgap_ids[id].name;
- brd->maxports = dgap_ids[id].maxports;
- brd->type = dgap_ids[id].config_type;
- brd->dpatype = dgap_ids[id].dpatype;
- brd->dpastatus = BD_NOFEP;
- init_waitqueue_head(&brd->state_wait);
-
- spin_lock_init(&brd->bd_lock);
-
- brd->inhibit_poller = FALSE;
- brd->wait_for_bios = 0;
- brd->wait_for_fep = 0;
-
- for (i = 0; i < MAXPORTS; i++)
- brd->channels[i] = NULL;
-
- /* store which card & revision we have */
- pci_read_config_word(pdev, PCI_SUBSYSTEM_VENDOR_ID, &brd->subvendor);
- pci_read_config_word(pdev, PCI_SUBSYSTEM_ID, &brd->subdevice);
- pci_read_config_byte(pdev, PCI_REVISION_ID, &brd->rev);
-
- pci_irq = pdev->irq;
- brd->irq = pci_irq;
-
- /* get the PCI Base Address Registers */
-
- /* Xr Jupiter and EPC use BAR 2 */
- if (brd->device == PCI_DEV_XRJ_DID || brd->device == PCI_DEV_EPCJ_DID) {
- brd->membase = pci_resource_start(pdev, 2);
- brd->membase_end = pci_resource_end(pdev, 2);
- }
- /* Everyone else uses BAR 0 */
- else {
- brd->membase = pci_resource_start(pdev, 0);
- brd->membase_end = pci_resource_end(pdev, 0);
- }
-
- if (!brd->membase) {
- ret = -ENODEV;
- goto free_brd;
- }
-
- if (brd->membase & 1)
- brd->membase &= ~3;
- else
- brd->membase &= ~15;
-
- /*
- * On the PCI boards, there is no IO space allocated
- * The I/O registers will be in the first 3 bytes of the
- * upper 2MB of the 4MB memory space. The board memory
- * will be mapped into the low 2MB of the 4MB memory space
- */
- brd->port = brd->membase + PCI_IO_OFFSET;
- brd->port_end = brd->port + PCI_IO_SIZE_DGAP;
-
- /*
- * Special initialization for non-PLX boards
- */
- if (brd->device != PCI_DEV_XRJ_DID && brd->device != PCI_DEV_EPCJ_DID) {
- unsigned short cmd;
-
- pci_write_config_byte(pdev, 0x40, 0);
- pci_write_config_byte(pdev, 0x46, 0);
-
- /* Limit burst length to 2 doubleword transactions */
- pci_write_config_byte(pdev, 0x42, 1);
-
- /*
- * Enable IO and mem if not already done.
- * This was needed for support on Itanium.
- */
- pci_read_config_word(pdev, PCI_COMMAND, &cmd);
- cmd |= (PCI_COMMAND_IO | PCI_COMMAND_MEMORY);
- pci_write_config_word(pdev, PCI_COMMAND, cmd);
- }
-
- /* init our poll helper tasklet */
- tasklet_init(&brd->helper_tasklet, dgap_poll_tasklet,
- (unsigned long) brd);
-
- ret = dgap_remap(brd);
- if (ret)
- goto free_brd;
-
- pr_info("dgap: board %d: %s (rev %d), irq %ld\n",
- boardnum, brd->name, brd->rev, brd->irq);
-
- return brd;
-
-free_brd:
- kfree(brd);
-
- return ERR_PTR(ret);
-}
-
-/*
- * dgap_intr()
- *
- * Driver interrupt handler.
- */
-static irqreturn_t dgap_intr(int irq, void *voidbrd)
-{
- struct board_t *brd = voidbrd;
-
- if (!brd)
- return IRQ_NONE;
-
- /*
- * Check to make sure its for us.
- */
- if (brd->magic != DGAP_BOARD_MAGIC)
- return IRQ_NONE;
-
- brd->intr_count++;
-
- /*
- * Schedule tasklet to run at a better time.
- */
- tasklet_schedule(&brd->helper_tasklet);
- return IRQ_HANDLED;
-}
-
-/*****************************************************************************
-*
-* Function:
-*
-* dgap_poll_handler
-*
-* Author:
-*
-* Scott H Kilau
-*
-* Parameters:
-*
-* dummy -- ignored
-*
-* Return Values:
-*
-* none
-*
-* Description:
-*
-* As each timer expires, it determines (a) whether the "transmit"
-* waiter needs to be woken up, and (b) whether the poller needs to
-* be rescheduled.
-*
-******************************************************************************/
-
-static void dgap_poll_handler(ulong dummy)
-{
- unsigned int i;
- struct board_t *brd;
- unsigned long lock_flags;
- ulong new_time;
-
- dgap_poll_counter++;
-
- /*
- * Do not start the board state machine until
- * driver tells us its up and running, and has
- * everything it needs.
- */
- if (dgap_driver_state != DRIVER_READY)
- goto schedule_poller;
-
- /*
- * If we have just 1 board, or the system is not SMP,
- * then use the typical old style poller.
- * Otherwise, use our new tasklet based poller, which should
- * speed things up for multiple boards.
- */
- if ((dgap_numboards == 1) || (num_online_cpus() <= 1)) {
- for (i = 0; i < dgap_numboards; i++) {
-
- brd = dgap_board[i];
-
- if (brd->state == BOARD_FAILED)
- continue;
- if (!brd->intr_running)
- /* Call the real board poller directly */
- dgap_poll_tasklet((unsigned long) brd);
- }
- } else {
- /*
- * Go thru each board, kicking off a
- * tasklet for each if needed
- */
- for (i = 0; i < dgap_numboards; i++) {
- brd = dgap_board[i];
-
- /*
- * Attempt to grab the board lock.
- *
- * If we can't get it, no big deal, the next poll
- * will get it. Basically, I just really don't want
- * to spin in here, because I want to kick off my
- * tasklets as fast as I can, and then get out the
- * poller.
- */
- if (!spin_trylock(&brd->bd_lock))
- continue;
-
- /*
- * If board is in a failed state, don't bother
- * scheduling a tasklet
- */
- if (brd->state == BOARD_FAILED) {
- spin_unlock(&brd->bd_lock);
- continue;
- }
-
- /* Schedule a poll helper task */
- if (!brd->intr_running)
- tasklet_schedule(&brd->helper_tasklet);
-
- /*
- * Can't do DGAP_UNLOCK here, as we don't have
- * lock_flags because we did a trylock above.
- */
- spin_unlock(&brd->bd_lock);
- }
- }
-
-schedule_poller:
-
- /*
- * Schedule ourself back at the nominal wakeup interval.
- */
- spin_lock_irqsave(&dgap_poll_lock, lock_flags);
- dgap_poll_time += dgap_jiffies_from_ms(dgap_poll_tick);
-
- new_time = dgap_poll_time - jiffies;
-
- if ((ulong) new_time >= 2 * dgap_poll_tick) {
- dgap_poll_time =
- jiffies + dgap_jiffies_from_ms(dgap_poll_tick);
- }
-
- dgap_poll_timer.function = dgap_poll_handler;
- dgap_poll_timer.data = 0;
- dgap_poll_timer.expires = dgap_poll_time;
- spin_unlock_irqrestore(&dgap_poll_lock, lock_flags);
-
- if (!dgap_poll_stop)
- add_timer(&dgap_poll_timer);
-}
-
-/*=======================================================================
- *
- * dgap_cmdb - Sends a 2 byte command to the FEP.
- *
- * ch - Pointer to channel structure.
- * cmd - Command to be sent.
- * byte1 - Integer containing first byte to be sent.
- * byte2 - Integer containing second byte to be sent.
- * ncmds - Wait until ncmds or fewer cmds are left
- * in the cmd buffer before returning.
- *
- *=======================================================================*/
-static void dgap_cmdb(struct channel_t *ch, u8 cmd, u8 byte1,
- u8 byte2, uint ncmds)
-{
- char __iomem *vaddr;
- struct __iomem cm_t *cm_addr;
- uint count;
- uint n;
- u16 head;
- u16 tail;
-
- if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
- return;
-
- /*
- * Check if board is still alive.
- */
- if (ch->ch_bd->state == BOARD_FAILED)
- return;
-
- /*
- * Make sure the pointers are in range before
- * writing to the FEP memory.
- */
- vaddr = ch->ch_bd->re_map_membase;
-
- if (!vaddr)
- return;
-
- cm_addr = (struct cm_t __iomem *) (vaddr + CMDBUF);
- head = readw(&(cm_addr->cm_head));
-
- /*
- * Forget it if pointers out of range.
- */
- if (head >= (CMDMAX - CMDSTART) || (head & 03)) {
- ch->ch_bd->state = BOARD_FAILED;
- return;
- }
-
- /*
- * Put the data in the circular command buffer.
- */
- writeb(cmd, (vaddr + head + CMDSTART + 0));
- writeb((u8) ch->ch_portnum, (vaddr + head + CMDSTART + 1));
- writeb(byte1, (vaddr + head + CMDSTART + 2));
- writeb(byte2, (vaddr + head + CMDSTART + 3));
-
- head = (head + 4) & (CMDMAX - CMDSTART - 4);
-
- writew(head, &(cm_addr->cm_head));
-
- /*
- * Wait if necessary before updating the head
- * pointer to limit the number of outstanding
- * commands to the FEP. If the time spent waiting
- * is outlandish, declare the FEP dead.
- */
- for (count = dgap_count ;;) {
-
- head = readw(&(cm_addr->cm_head));
- tail = readw(&(cm_addr->cm_tail));
-
- n = (head - tail) & (CMDMAX - CMDSTART - 4);
-
- if (n <= ncmds * sizeof(struct cm_t))
- break;
-
- if (--count == 0) {
- ch->ch_bd->state = BOARD_FAILED;
- return;
- }
- udelay(10);
- }
-}
-
-/*=======================================================================
- *
- * dgap_cmdw - Sends a 1 word command to the FEP.
- *
- * ch - Pointer to channel structure.
- * cmd - Command to be sent.
- * word - Integer containing word to be sent.
- * ncmds - Wait until ncmds or fewer cmds are left
- * in the cmd buffer before returning.
- *
- *=======================================================================*/
-static void dgap_cmdw(struct channel_t *ch, u8 cmd, u16 word, uint ncmds)
-{
- char __iomem *vaddr;
- struct __iomem cm_t *cm_addr;
- uint count;
- uint n;
- u16 head;
- u16 tail;
-
- if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
- return;
-
- /*
- * Check if board is still alive.
- */
- if (ch->ch_bd->state == BOARD_FAILED)
- return;
-
- /*
- * Make sure the pointers are in range before
- * writing to the FEP memory.
- */
- vaddr = ch->ch_bd->re_map_membase;
- if (!vaddr)
- return;
-
- cm_addr = (struct cm_t __iomem *) (vaddr + CMDBUF);
- head = readw(&(cm_addr->cm_head));
-
- /*
- * Forget it if pointers out of range.
- */
- if (head >= (CMDMAX - CMDSTART) || (head & 03)) {
- ch->ch_bd->state = BOARD_FAILED;
- return;
- }
-
- /*
- * Put the data in the circular command buffer.
- */
- writeb(cmd, (vaddr + head + CMDSTART + 0));
- writeb((u8) ch->ch_portnum, (vaddr + head + CMDSTART + 1));
- writew((u16) word, (vaddr + head + CMDSTART + 2));
-
- head = (head + 4) & (CMDMAX - CMDSTART - 4);
-
- writew(head, &(cm_addr->cm_head));
-
- /*
- * Wait if necessary before updating the head
- * pointer to limit the number of outstanding
- * commands to the FEP. If the time spent waiting
- * is outlandish, declare the FEP dead.
- */
- for (count = dgap_count ;;) {
-
- head = readw(&(cm_addr->cm_head));
- tail = readw(&(cm_addr->cm_tail));
-
- n = (head - tail) & (CMDMAX - CMDSTART - 4);
-
- if (n <= ncmds * sizeof(struct cm_t))
- break;
-
- if (--count == 0) {
- ch->ch_bd->state = BOARD_FAILED;
- return;
- }
- udelay(10);
- }
-}
-
-/*=======================================================================
- *
- * dgap_cmdw_ext - Sends a extended word command to the FEP.
- *
- * ch - Pointer to channel structure.
- * cmd - Command to be sent.
- * word - Integer containing word to be sent.
- * ncmds - Wait until ncmds or fewer cmds are left
- * in the cmd buffer before returning.
- *
- *=======================================================================*/
-static void dgap_cmdw_ext(struct channel_t *ch, u16 cmd, u16 word, uint ncmds)
-{
- char __iomem *vaddr;
- struct __iomem cm_t *cm_addr;
- uint count;
- uint n;
- u16 head;
- u16 tail;
-
- if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
- return;
-
- /*
- * Check if board is still alive.
- */
- if (ch->ch_bd->state == BOARD_FAILED)
- return;
-
- /*
- * Make sure the pointers are in range before
- * writing to the FEP memory.
- */
- vaddr = ch->ch_bd->re_map_membase;
- if (!vaddr)
- return;
-
- cm_addr = (struct cm_t __iomem *) (vaddr + CMDBUF);
- head = readw(&(cm_addr->cm_head));
-
- /*
- * Forget it if pointers out of range.
- */
- if (head >= (CMDMAX - CMDSTART) || (head & 03)) {
- ch->ch_bd->state = BOARD_FAILED;
- return;
- }
-
- /*
- * Put the data in the circular command buffer.
- */
-
- /* Write an FF to tell the FEP that we want an extended command */
- writeb((u8) 0xff, (vaddr + head + CMDSTART + 0));
-
- writeb((u8) ch->ch_portnum, (vaddr + head + CMDSTART + 1));
- writew((u16) cmd, (vaddr + head + CMDSTART + 2));
-
- /*
- * If the second part of the command won't fit,
- * put it at the beginning of the circular buffer.
- */
- if (((head + 4) >= ((CMDMAX - CMDSTART)) || (head & 03)))
- writew((u16) word, (vaddr + CMDSTART));
- else
- writew((u16) word, (vaddr + head + CMDSTART + 4));
-
- head = (head + 8) & (CMDMAX - CMDSTART - 4);
-
- writew(head, &(cm_addr->cm_head));
-
- /*
- * Wait if necessary before updating the head
- * pointer to limit the number of outstanding
- * commands to the FEP. If the time spent waiting
- * is outlandish, declare the FEP dead.
- */
- for (count = dgap_count ;;) {
-
- head = readw(&(cm_addr->cm_head));
- tail = readw(&(cm_addr->cm_tail));
-
- n = (head - tail) & (CMDMAX - CMDSTART - 4);
-
- if (n <= ncmds * sizeof(struct cm_t))
- break;
-
- if (--count == 0) {
- ch->ch_bd->state = BOARD_FAILED;
- return;
- }
- udelay(10);
- }
-}
-
-/*=======================================================================
- *
- * dgap_wmove - Write data to FEP buffer.
- *
- * ch - Pointer to channel structure.
- * buf - Pointer to characters to be moved.
- * cnt - Number of characters to move.
- *
- *=======================================================================*/
-static void dgap_wmove(struct channel_t *ch, char *buf, uint cnt)
-{
- int n;
- char __iomem *taddr;
- struct bs_t __iomem *bs;
- u16 head;
-
- if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
- return;
-
- /*
- * Check parameters.
- */
- bs = ch->ch_bs;
- head = readw(&(bs->tx_head));
-
- /*
- * If pointers are out of range, just return.
- */
- if ((cnt > ch->ch_tsize) ||
- (unsigned)(head - ch->ch_tstart) >= ch->ch_tsize)
- return;
-
- /*
- * If the write wraps over the top of the circular buffer,
- * move the portion up to the wrap point, and reset the
- * pointers to the bottom.
- */
- n = ch->ch_tstart + ch->ch_tsize - head;
-
- if (cnt >= n) {
- cnt -= n;
- taddr = ch->ch_taddr + head;
- memcpy_toio(taddr, buf, n);
- head = ch->ch_tstart;
- buf += n;
- }
-
- /*
- * Move rest of data.
- */
- taddr = ch->ch_taddr + head;
- n = cnt;
- memcpy_toio(taddr, buf, n);
- head += cnt;
-
- writew(head, &(bs->tx_head));
-}
-
-/*
- * Calls the firmware to reset this channel.
- */
-static void dgap_firmware_reset_port(struct channel_t *ch)
-{
- dgap_cmdb(ch, CHRESET, 0, 0, 0);
-
- /*
- * Now that the channel is reset, we need to make sure
- * all the current settings get reapplied to the port
- * in the firmware.
- *
- * So we will set the driver's cache of firmware
- * settings all to 0, and then call param.
- */
- ch->ch_fepiflag = 0;
- ch->ch_fepcflag = 0;
- ch->ch_fepoflag = 0;
- ch->ch_fepstartc = 0;
- ch->ch_fepstopc = 0;
- ch->ch_fepastartc = 0;
- ch->ch_fepastopc = 0;
- ch->ch_mostat = 0;
- ch->ch_hflow = 0;
-}
-
-/*=======================================================================
- *
- * dgap_param - Set Digi parameters.
- *
- * struct tty_struct * - TTY for port.
- *
- *=======================================================================*/
-static int dgap_param(struct channel_t *ch, struct board_t *bd, u32 un_type)
-{
- u16 head;
- u16 cflag;
- u16 iflag;
- u8 mval;
- u8 hflow;
-
- /*
- * If baud rate is zero, flush queues, and set mval to drop DTR.
- */
- if ((ch->ch_c_cflag & (CBAUD)) == 0) {
-
- /* flush rx */
- head = readw(&(ch->ch_bs->rx_head));
- writew(head, &(ch->ch_bs->rx_tail));
-
- /* flush tx */
- head = readw(&(ch->ch_bs->tx_head));
- writew(head, &(ch->ch_bs->tx_tail));
-
- ch->ch_flags |= (CH_BAUD0);
-
- /* Drop RTS and DTR */
- ch->ch_mval &= ~(D_RTS(ch)|D_DTR(ch));
- mval = D_DTR(ch) | D_RTS(ch);
- ch->ch_baud_info = 0;
-
- } else if (ch->ch_custom_speed && (bd->bd_flags & BD_FEP5PLUS)) {
- /*
- * Tell the fep to do the command
- */
-
- dgap_cmdw_ext(ch, 0xff01, ch->ch_custom_speed, 0);
-
- /*
- * Now go get from fep mem, what the fep
- * believes the custom baud rate is.
- */
- ch->ch_custom_speed = dgap_get_custom_baud(ch);
- ch->ch_baud_info = ch->ch_custom_speed;
-
- /* Handle transition from B0 */
- if (ch->ch_flags & CH_BAUD0) {
- ch->ch_flags &= ~(CH_BAUD0);
- ch->ch_mval |= (D_RTS(ch)|D_DTR(ch));
- }
- mval = D_DTR(ch) | D_RTS(ch);
-
- } else {
- /*
- * Set baud rate, character size, and parity.
- */
-
-
- int iindex = 0;
- int jindex = 0;
- int baud = 0;
-
- ulong bauds[4][16] = {
- { /* slowbaud */
- 0, 50, 75, 110,
- 134, 150, 200, 300,
- 600, 1200, 1800, 2400,
- 4800, 9600, 19200, 38400 },
- { /* slowbaud & CBAUDEX */
- 0, 57600, 115200, 230400,
- 460800, 150, 200, 921600,
- 600, 1200, 1800, 2400,
- 4800, 9600, 19200, 38400 },
- { /* fastbaud */
- 0, 57600, 76800, 115200,
- 14400, 57600, 230400, 76800,
- 115200, 230400, 28800, 460800,
- 921600, 9600, 19200, 38400 },
- { /* fastbaud & CBAUDEX */
- 0, 57600, 115200, 230400,
- 460800, 150, 200, 921600,
- 600, 1200, 1800, 2400,
- 4800, 9600, 19200, 38400 }
- };
-
- /*
- * Only use the TXPrint baud rate if the
- * terminal unit is NOT open
- */
- if (!(ch->ch_tun.un_flags & UN_ISOPEN) &&
- un_type == DGAP_PRINT)
- baud = C_BAUD(ch->ch_pun.un_tty) & 0xff;
- else
- baud = C_BAUD(ch->ch_tun.un_tty) & 0xff;
-
- if (ch->ch_c_cflag & CBAUDEX)
- iindex = 1;
-
- if (ch->ch_digi.digi_flags & DIGI_FAST)
- iindex += 2;
-
- jindex = baud;
-
- if ((iindex >= 0) && (iindex < 4) &&
- (jindex >= 0) && (jindex < 16))
- baud = bauds[iindex][jindex];
- else
- baud = 0;
-
- if (baud == 0)
- baud = 9600;
-
- ch->ch_baud_info = baud;
-
- /*
- * CBAUD has bit position 0x1000 set these days to
- * indicate Linux baud rate remap.
- * We use a different bit assignment for high speed.
- * Clear this bit out while grabbing the parts of
- * "cflag" we want.
- */
- cflag = ch->ch_c_cflag & ((CBAUD ^ CBAUDEX) | PARODD | PARENB |
- CSTOPB | CSIZE);
-
- /*
- * HUPCL bit is used by FEP to indicate fast baud
- * table is to be used.
- */
- if ((ch->ch_digi.digi_flags & DIGI_FAST) ||
- (ch->ch_c_cflag & CBAUDEX))
- cflag |= HUPCL;
-
- if ((ch->ch_c_cflag & CBAUDEX) &&
- !(ch->ch_digi.digi_flags & DIGI_FAST)) {
- /*
- * The below code is trying to guarantee that only
- * baud rates 115200, 230400, 460800, 921600 are
- * remapped. We use exclusive or because the various
- * baud rates share common bit positions and therefore
- * can't be tested for easily.
- */
- tcflag_t tcflag = (ch->ch_c_cflag & CBAUD) | CBAUDEX;
- int baudpart = 0;
-
- /*
- * Map high speed requests to index
- * into FEP's baud table
- */
- switch (tcflag) {
- case B57600:
- baudpart = 1;
- break;
-#ifdef B76800
- case B76800:
- baudpart = 2;
- break;
-#endif
- case B115200:
- baudpart = 3;
- break;
- case B230400:
- baudpart = 9;
- break;
- case B460800:
- baudpart = 11;
- break;
-#ifdef B921600
- case B921600:
- baudpart = 12;
- break;
-#endif
- default:
- baudpart = 0;
- }
-
- if (baudpart)
- cflag = (cflag & ~(CBAUD | CBAUDEX)) | baudpart;
- }
-
- cflag &= 0xffff;
-
- if (cflag != ch->ch_fepcflag) {
- ch->ch_fepcflag = (u16) (cflag & 0xffff);
-
- /*
- * Okay to have channel and board
- * locks held calling this
- */
- dgap_cmdw(ch, SCFLAG, (u16) cflag, 0);
- }
-
- /* Handle transition from B0 */
- if (ch->ch_flags & CH_BAUD0) {
- ch->ch_flags &= ~(CH_BAUD0);
- ch->ch_mval |= (D_RTS(ch)|D_DTR(ch));
- }
- mval = D_DTR(ch) | D_RTS(ch);
- }
-
- /*
- * Get input flags.
- */
- iflag = ch->ch_c_iflag & (IGNBRK | BRKINT | IGNPAR | PARMRK |
- INPCK | ISTRIP | IXON | IXANY | IXOFF);
-
- if ((ch->ch_startc == _POSIX_VDISABLE) ||
- (ch->ch_stopc == _POSIX_VDISABLE)) {
- iflag &= ~(IXON | IXOFF);
- ch->ch_c_iflag &= ~(IXON | IXOFF);
- }
-
- /*
- * Only the IBM Xr card can switch between
- * 232 and 422 modes on the fly
- */
- if (bd->device == PCI_DEV_XR_IBM_DID) {
- if (ch->ch_digi.digi_flags & DIGI_422)
- dgap_cmdb(ch, SCOMMODE, MODE_422, 0, 0);
- else
- dgap_cmdb(ch, SCOMMODE, MODE_232, 0, 0);
- }
-
- if (ch->ch_digi.digi_flags & DIGI_ALTPIN)
- iflag |= IALTPIN;
-
- if (iflag != ch->ch_fepiflag) {
- ch->ch_fepiflag = iflag;
-
- /* Okay to have channel and board locks held calling this */
- dgap_cmdw(ch, SIFLAG, (u16) ch->ch_fepiflag, 0);
- }
-
- /*
- * Select hardware handshaking.
- */
- hflow = 0;
-
- if (ch->ch_c_cflag & CRTSCTS)
- hflow |= (D_RTS(ch) | D_CTS(ch));
- if (ch->ch_digi.digi_flags & RTSPACE)
- hflow |= D_RTS(ch);
- if (ch->ch_digi.digi_flags & DTRPACE)
- hflow |= D_DTR(ch);
- if (ch->ch_digi.digi_flags & CTSPACE)
- hflow |= D_CTS(ch);
- if (ch->ch_digi.digi_flags & DSRPACE)
- hflow |= D_DSR(ch);
- if (ch->ch_digi.digi_flags & DCDPACE)
- hflow |= D_CD(ch);
-
- if (hflow != ch->ch_hflow) {
- ch->ch_hflow = hflow;
-
- /* Okay to have channel and board locks held calling this */
- dgap_cmdb(ch, SHFLOW, (u8) hflow, 0xff, 0);
- }
-
- /*
- * Set RTS and/or DTR Toggle if needed,
- * but only if product is FEP5+ based.
- */
- if (bd->bd_flags & BD_FEP5PLUS) {
- u16 hflow2 = 0;
-
- if (ch->ch_digi.digi_flags & DIGI_RTS_TOGGLE)
- hflow2 |= (D_RTS(ch));
- if (ch->ch_digi.digi_flags & DIGI_DTR_TOGGLE)
- hflow2 |= (D_DTR(ch));
-
- dgap_cmdw_ext(ch, 0xff03, hflow2, 0);
- }
-
- /*
- * Set modem control lines.
- */
-
- mval ^= ch->ch_mforce & (mval ^ ch->ch_mval);
-
- if (ch->ch_mostat ^ mval) {
- ch->ch_mostat = mval;
-
- /* Okay to have channel and board locks held calling this */
- dgap_cmdb(ch, SMODEM, (u8) mval, D_RTS(ch)|D_DTR(ch), 0);
- }
-
- /*
- * Read modem signals, and then call carrier function.
- */
- ch->ch_mistat = readb(&(ch->ch_bs->m_stat));
- dgap_carrier(ch);
-
- /*
- * Set the start and stop characters.
- */
- if (ch->ch_startc != ch->ch_fepstartc ||
- ch->ch_stopc != ch->ch_fepstopc) {
- ch->ch_fepstartc = ch->ch_startc;
- ch->ch_fepstopc = ch->ch_stopc;
-
- /* Okay to have channel and board locks held calling this */
- dgap_cmdb(ch, SFLOWC, ch->ch_fepstartc, ch->ch_fepstopc, 0);
- }
-
- /*
- * Set the Auxiliary start and stop characters.
- */
- if (ch->ch_astartc != ch->ch_fepastartc ||
- ch->ch_astopc != ch->ch_fepastopc) {
- ch->ch_fepastartc = ch->ch_astartc;
- ch->ch_fepastopc = ch->ch_astopc;
-
- /* Okay to have channel and board locks held calling this */
- dgap_cmdb(ch, SAFLOWC, ch->ch_fepastartc, ch->ch_fepastopc, 0);
- }
-
- return 0;
-}
-
-/*
- * dgap_block_til_ready()
- *
- * Wait for DCD, if needed.
- */
-static int dgap_block_til_ready(struct tty_struct *tty, struct file *file,
- struct channel_t *ch)
-{
- int retval = 0;
- struct un_t *un;
- ulong lock_flags;
- uint old_flags;
- int sleep_on_un_flags;
-
- if (!tty || tty->magic != TTY_MAGIC || !file || !ch ||
- ch->magic != DGAP_CHANNEL_MAGIC)
- return -EIO;
-
- un = tty->driver_data;
- if (!un || un->magic != DGAP_UNIT_MAGIC)
- return -EIO;
-
- spin_lock_irqsave(&ch->ch_lock, lock_flags);
-
- ch->ch_wopen++;
-
- /* Loop forever */
- while (1) {
-
- sleep_on_un_flags = 0;
-
- /*
- * If board has failed somehow during our sleep,
- * bail with error.
- */
- if (ch->ch_bd->state == BOARD_FAILED) {
- retval = -EIO;
- break;
- }
-
- /* If tty was hung up, break out of loop and set error. */
- if (tty_hung_up_p(file)) {
- retval = -EAGAIN;
- break;
- }
-
- /*
- * If either unit is in the middle of the fragile part of close,
- * we just cannot touch the channel safely.
- * Go back to sleep, knowing that when the channel can be
- * touched safely, the close routine will signal the
- * ch_wait_flags to wake us back up.
- */
- if (!((ch->ch_tun.un_flags | ch->ch_pun.un_flags) &
- UN_CLOSING)) {
-
- /*
- * Our conditions to leave cleanly and happily:
- * 1) NONBLOCKING on the tty is set.
- * 2) CLOCAL is set.
- * 3) DCD (fake or real) is active.
- */
-
- if (file->f_flags & O_NONBLOCK)
- break;
-
- if (tty->flags & (1 << TTY_IO_ERROR))
- break;
-
- if (ch->ch_flags & CH_CD)
- break;
-
- if (ch->ch_flags & CH_FCAR)
- break;
- } else {
- sleep_on_un_flags = 1;
- }
-
- /*
- * If there is a signal pending, the user probably
- * interrupted (ctrl-c) us.
- * Leave loop with error set.
- */
- if (signal_pending(current)) {
- retval = -ERESTARTSYS;
- break;
- }
-
- /*
- * Store the flags before we let go of channel lock
- */
- if (sleep_on_un_flags)
- old_flags = ch->ch_tun.un_flags | ch->ch_pun.un_flags;
- else
- old_flags = ch->ch_flags;
-
- /*
- * Let go of channel lock before calling schedule.
- * Our poller will get any FEP events and wake us up when DCD
- * eventually goes active.
- */
-
- spin_unlock_irqrestore(&ch->ch_lock, lock_flags);
-
- /*
- * Wait for something in the flags to change
- * from the current value.
- */
- if (sleep_on_un_flags) {
- retval = wait_event_interruptible(un->un_flags_wait,
- (old_flags != (ch->ch_tun.un_flags |
- ch->ch_pun.un_flags)));
- } else {
- retval = wait_event_interruptible(ch->ch_flags_wait,
- (old_flags != ch->ch_flags));
- }
-
- /*
- * We got woken up for some reason.
- * Before looping around, grab our channel lock.
- */
- spin_lock_irqsave(&ch->ch_lock, lock_flags);
- }
-
- ch->ch_wopen--;
-
- spin_unlock_irqrestore(&ch->ch_lock, lock_flags);
-
- return retval;
-}
-
-/*
- * dgap_tty_flush_buffer()
- *
- * Flush Tx buffer (make in == out)
- */
-static void dgap_tty_flush_buffer(struct tty_struct *tty)
-{
- struct board_t *bd;
- struct channel_t *ch;
- struct un_t *un;
- ulong lock_flags;
- ulong lock_flags2;
- u16 head;
-
- if (!tty || tty->magic != TTY_MAGIC)
- return;
-
- un = tty->driver_data;
- if (!un || un->magic != DGAP_UNIT_MAGIC)
- return;
-
- ch = un->un_ch;
- if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
- return;
-
- bd = ch->ch_bd;
- if (!bd || bd->magic != DGAP_BOARD_MAGIC)
- return;
-
- spin_lock_irqsave(&bd->bd_lock, lock_flags);
- spin_lock_irqsave(&ch->ch_lock, lock_flags2);
-
- ch->ch_flags &= ~CH_STOP;
- head = readw(&(ch->ch_bs->tx_head));
- dgap_cmdw(ch, FLUSHTX, (u16) head, 0);
- dgap_cmdw(ch, RESUMETX, 0, 0);
- if (ch->ch_tun.un_flags & (UN_LOW|UN_EMPTY)) {
- ch->ch_tun.un_flags &= ~(UN_LOW|UN_EMPTY);
- wake_up_interruptible(&ch->ch_tun.un_flags_wait);
- }
- if (ch->ch_pun.un_flags & (UN_LOW|UN_EMPTY)) {
- ch->ch_pun.un_flags &= ~(UN_LOW|UN_EMPTY);
- wake_up_interruptible(&ch->ch_pun.un_flags_wait);
- }
-
- spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
- spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
- if (waitqueue_active(&tty->write_wait))
- wake_up_interruptible(&tty->write_wait);
- tty_wakeup(tty);
-}
-
-/*
- * dgap_tty_hangup()
- *
- * Hangup the port. Like a close, but don't wait for output to drain.
- */
-static void dgap_tty_hangup(struct tty_struct *tty)
-{
- struct board_t *bd;
- struct channel_t *ch;
- struct un_t *un;
-
- if (!tty || tty->magic != TTY_MAGIC)
- return;
-
- un = tty->driver_data;
- if (!un || un->magic != DGAP_UNIT_MAGIC)
- return;
-
- ch = un->un_ch;
- if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
- return;
-
- bd = ch->ch_bd;
- if (!bd || bd->magic != DGAP_BOARD_MAGIC)
- return;
-
- /* flush the transmit queues */
- dgap_tty_flush_buffer(tty);
-}
-
-/*
- * dgap_tty_chars_in_buffer()
- *
- * Return number of characters that have not been transmitted yet.
- *
- * This routine is used by the line discipline to determine if there
- * is data waiting to be transmitted/drained/flushed or not.
- */
-static int dgap_tty_chars_in_buffer(struct tty_struct *tty)
-{
- struct board_t *bd;
- struct channel_t *ch;
- struct un_t *un;
- struct bs_t __iomem *bs;
- u8 tbusy;
- uint chars;
- u16 thead, ttail, tmask, chead, ctail;
- ulong lock_flags = 0;
- ulong lock_flags2 = 0;
-
- if (!tty)
- return 0;
-
- un = tty->driver_data;
- if (!un || un->magic != DGAP_UNIT_MAGIC)
- return 0;
-
- ch = un->un_ch;
- if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
- return 0;
-
- bd = ch->ch_bd;
- if (!bd || bd->magic != DGAP_BOARD_MAGIC)
- return 0;
-
- bs = ch->ch_bs;
- if (!bs)
- return 0;
-
- spin_lock_irqsave(&bd->bd_lock, lock_flags);
- spin_lock_irqsave(&ch->ch_lock, lock_flags2);
-
- tmask = (ch->ch_tsize - 1);
-
- /* Get Transmit queue pointers */
- thead = readw(&(bs->tx_head)) & tmask;
- ttail = readw(&(bs->tx_tail)) & tmask;
-
- /* Get tbusy flag */
- tbusy = readb(&(bs->tbusy));
-
- /* Get Command queue pointers */
- chead = readw(&(ch->ch_cm->cm_head));
- ctail = readw(&(ch->ch_cm->cm_tail));
-
- spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
- spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
-
- /*
- * The only way we know for sure if there is no pending
- * data left to be transferred, is if:
- * 1) Transmit head and tail are equal (empty).
- * 2) Command queue head and tail are equal (empty).
- * 3) The "TBUSY" flag is 0. (Transmitter not busy).
- */
-
- if ((ttail == thead) && (tbusy == 0) && (chead == ctail)) {
- chars = 0;
- } else {
- if (thead >= ttail)
- chars = thead - ttail;
- else
- chars = thead - ttail + ch->ch_tsize;
- /*
- * Fudge factor here.
- * If chars is zero, we know that the command queue had
- * something in it or tbusy was set. Because we cannot
- * be sure if there is still some data to be transmitted,
- * lets lie, and tell ld we have 1 byte left.
- */
- if (chars == 0) {
- /*
- * If TBUSY is still set, and our tx buffers are empty,
- * force the firmware to send me another wakeup after
- * TBUSY has been cleared.
- */
- if (tbusy != 0) {
- spin_lock_irqsave(&ch->ch_lock, lock_flags);
- un->un_flags |= UN_EMPTY;
- writeb(1, &(bs->iempty));
- spin_unlock_irqrestore(&ch->ch_lock,
- lock_flags);
- }
- chars = 1;
- }
- }
-
- return chars;
-}
-
-static int dgap_wait_for_drain(struct tty_struct *tty)
-{
- struct channel_t *ch;
- struct un_t *un;
- struct bs_t __iomem *bs;
- int ret = 0;
- uint count = 1;
- ulong lock_flags = 0;
-
- if (!tty || tty->magic != TTY_MAGIC)
- return -EIO;
-
- un = tty->driver_data;
- if (!un || un->magic != DGAP_UNIT_MAGIC)
- return -EIO;
-
- ch = un->un_ch;
- if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
- return -EIO;
-
- bs = ch->ch_bs;
- if (!bs)
- return -EIO;
-
- /* Loop until data is drained */
- while (count != 0) {
-
- count = dgap_tty_chars_in_buffer(tty);
-
- if (count == 0)
- break;
-
- /* Set flag waiting for drain */
- spin_lock_irqsave(&ch->ch_lock, lock_flags);
- un->un_flags |= UN_EMPTY;
- writeb(1, &(bs->iempty));
- spin_unlock_irqrestore(&ch->ch_lock, lock_flags);
-
- /* Go to sleep till we get woken up */
- ret = wait_event_interruptible(un->un_flags_wait,
- ((un->un_flags & UN_EMPTY) == 0));
- /* If ret is non-zero, user ctrl-c'ed us */
- if (ret)
- break;
- }
-
- spin_lock_irqsave(&ch->ch_lock, lock_flags);
- un->un_flags &= ~(UN_EMPTY);
- spin_unlock_irqrestore(&ch->ch_lock, lock_flags);
-
- return ret;
-}
-
-/*
- * dgap_maxcps_room
- *
- * Reduces bytes_available to the max number of characters
- * that can be sent currently given the maxcps value, and
- * returns the new bytes_available. This only affects printer
- * output.
- */
-static int dgap_maxcps_room(struct channel_t *ch, struct un_t *un,
- int bytes_available)
-{
- /*
- * If its not the Transparent print device, return
- * the full data amount.
- */
- if (un->un_type != DGAP_PRINT)
- return bytes_available;
-
- if (ch->ch_digi.digi_maxcps > 0 && ch->ch_digi.digi_bufsize > 0) {
- int cps_limit = 0;
- unsigned long current_time = jiffies;
- unsigned long buffer_time = current_time +
- (HZ * ch->ch_digi.digi_bufsize) /
- ch->ch_digi.digi_maxcps;
-
- if (ch->ch_cpstime < current_time) {
- /* buffer is empty */
- ch->ch_cpstime = current_time; /* reset ch_cpstime */
- cps_limit = ch->ch_digi.digi_bufsize;
- } else if (ch->ch_cpstime < buffer_time) {
- /* still room in the buffer */
- cps_limit = ((buffer_time - ch->ch_cpstime) *
- ch->ch_digi.digi_maxcps) / HZ;
- } else {
- /* no room in the buffer */
- cps_limit = 0;
- }
-
- bytes_available = min(cps_limit, bytes_available);
- }
-
- return bytes_available;
-}
-
-static inline void dgap_set_firmware_event(struct un_t *un, unsigned int event)
-{
- struct channel_t *ch;
- struct bs_t __iomem *bs;
-
- if (!un || un->magic != DGAP_UNIT_MAGIC)
- return;
- ch = un->un_ch;
- if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
- return;
- bs = ch->ch_bs;
- if (!bs)
- return;
-
- if ((event & UN_LOW) != 0) {
- if ((un->un_flags & UN_LOW) == 0) {
- un->un_flags |= UN_LOW;
- writeb(1, &(bs->ilow));
- }
- }
- if ((event & UN_LOW) != 0) {
- if ((un->un_flags & UN_EMPTY) == 0) {
- un->un_flags |= UN_EMPTY;
- writeb(1, &(bs->iempty));
- }
- }
-}
-
-/*
- * dgap_tty_write_room()
- *
- * Return space available in Tx buffer
- */
-static int dgap_tty_write_room(struct tty_struct *tty)
-{
- struct channel_t *ch;
- struct un_t *un;
- struct bs_t __iomem *bs;
- u16 head, tail, tmask;
- int ret;
- ulong lock_flags = 0;
-
- if (!tty)
- return 0;
-
- un = tty->driver_data;
- if (!un || un->magic != DGAP_UNIT_MAGIC)
- return 0;
-
- ch = un->un_ch;
- if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
- return 0;
-
- bs = ch->ch_bs;
- if (!bs)
- return 0;
-
- spin_lock_irqsave(&ch->ch_lock, lock_flags);
-
- tmask = ch->ch_tsize - 1;
- head = readw(&(bs->tx_head)) & tmask;
- tail = readw(&(bs->tx_tail)) & tmask;
-
- ret = tail - head - 1;
- if (ret < 0)
- ret += ch->ch_tsize;
-
- /* Limit printer to maxcps */
- ret = dgap_maxcps_room(ch, un, ret);
-
- /*
- * If we are printer device, leave space for
- * possibly both the on and off strings.
- */
- if (un->un_type == DGAP_PRINT) {
- if (!(ch->ch_flags & CH_PRON))
- ret -= ch->ch_digi.digi_onlen;
- ret -= ch->ch_digi.digi_offlen;
- } else {
- if (ch->ch_flags & CH_PRON)
- ret -= ch->ch_digi.digi_offlen;
- }
-
- if (ret < 0)
- ret = 0;
-
- /*
- * Schedule FEP to wake us up if needed.
- *
- * TODO: This might be overkill...
- * Do we really need to schedule callbacks from the FEP
- * in every case? Can we get smarter based on ret?
- */
- dgap_set_firmware_event(un, UN_LOW | UN_EMPTY);
- spin_unlock_irqrestore(&ch->ch_lock, lock_flags);
-
- return ret;
-}
-
-/*
- * dgap_tty_write()
- *
- * Take data from the user or kernel and send it out to the FEP.
- * In here exists all the Transparent Print magic as well.
- */
-static int dgap_tty_write(struct tty_struct *tty, const unsigned char *buf,
- int count)
-{
- struct channel_t *ch;
- struct un_t *un;
- struct bs_t __iomem *bs;
- char __iomem *vaddr;
- u16 head, tail, tmask, remain;
- int bufcount, n;
- ulong lock_flags;
-
- if (!tty)
- return 0;
-
- un = tty->driver_data;
- if (!un || un->magic != DGAP_UNIT_MAGIC)
- return 0;
-
- ch = un->un_ch;
- if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
- return 0;
-
- bs = ch->ch_bs;
- if (!bs)
- return 0;
-
- if (!count)
- return 0;
-
- spin_lock_irqsave(&ch->ch_lock, lock_flags);
-
- /* Get our space available for the channel from the board */
- tmask = ch->ch_tsize - 1;
- head = readw(&(bs->tx_head)) & tmask;
- tail = readw(&(bs->tx_tail)) & tmask;
-
- bufcount = tail - head - 1;
- if (bufcount < 0)
- bufcount += ch->ch_tsize;
-
- /*
- * Limit printer output to maxcps overall, with bursts allowed
- * up to bufsize characters.
- */
- bufcount = dgap_maxcps_room(ch, un, bufcount);
-
- /*
- * Take minimum of what the user wants to send, and the
- * space available in the FEP buffer.
- */
- count = min(count, bufcount);
-
- /*
- * Bail if no space left.
- */
- if (count <= 0) {
- dgap_set_firmware_event(un, UN_LOW | UN_EMPTY);
- spin_unlock_irqrestore(&ch->ch_lock, lock_flags);
- return 0;
- }
-
- /*
- * Output the printer ON string, if we are in terminal mode, but
- * need to be in printer mode.
- */
- if ((un->un_type == DGAP_PRINT) && !(ch->ch_flags & CH_PRON)) {
- dgap_wmove(ch, ch->ch_digi.digi_onstr,
- (int) ch->ch_digi.digi_onlen);
- head = readw(&(bs->tx_head)) & tmask;
- ch->ch_flags |= CH_PRON;
- }
-
- /*
- * On the other hand, output the printer OFF string, if we are
- * currently in printer mode, but need to output to the terminal.
- */
- if ((un->un_type != DGAP_PRINT) && (ch->ch_flags & CH_PRON)) {
- dgap_wmove(ch, ch->ch_digi.digi_offstr,
- (int) ch->ch_digi.digi_offlen);
- head = readw(&(bs->tx_head)) & tmask;
- ch->ch_flags &= ~CH_PRON;
- }
-
- n = count;
-
- /*
- * If the write wraps over the top of the circular buffer,
- * move the portion up to the wrap point, and reset the
- * pointers to the bottom.
- */
- remain = ch->ch_tstart + ch->ch_tsize - head;
-
- if (n >= remain) {
- n -= remain;
- vaddr = ch->ch_taddr + head;
-
- memcpy_toio(vaddr, (u8 *) buf, remain);
-
- head = ch->ch_tstart;
- buf += remain;
- }
-
- if (n > 0) {
-
- /*
- * Move rest of data.
- */
- vaddr = ch->ch_taddr + head;
- remain = n;
-
- memcpy_toio(vaddr, (u8 *) buf, remain);
- head += remain;
-
- }
-
- if (count) {
- ch->ch_txcount += count;
- head &= tmask;
- writew(head, &(bs->tx_head));
- }
-
- dgap_set_firmware_event(un, UN_LOW | UN_EMPTY);
-
- /*
- * If this is the print device, and the
- * printer is still on, we need to turn it
- * off before going idle. If the buffer is
- * non-empty, wait until it goes empty.
- * Otherwise turn it off right now.
- */
- if ((un->un_type == DGAP_PRINT) && (ch->ch_flags & CH_PRON)) {
- tail = readw(&(bs->tx_tail)) & tmask;
-
- if (tail != head) {
- un->un_flags |= UN_EMPTY;
- writeb(1, &(bs->iempty));
- } else {
- dgap_wmove(ch, ch->ch_digi.digi_offstr,
- (int) ch->ch_digi.digi_offlen);
- head = readw(&(bs->tx_head)) & tmask;
- ch->ch_flags &= ~CH_PRON;
- }
- }
-
- /* Update printer buffer empty time. */
- if ((un->un_type == DGAP_PRINT) && (ch->ch_digi.digi_maxcps > 0)
- && (ch->ch_digi.digi_bufsize > 0)) {
- ch->ch_cpstime += (HZ * count) / ch->ch_digi.digi_maxcps;
- }
-
- spin_unlock_irqrestore(&ch->ch_lock, lock_flags);
-
- return count;
-}
-
-/*
- * dgap_tty_put_char()
- *
- * Put a character into ch->ch_buf
- *
- * - used by the line discipline for OPOST processing
- */
-static int dgap_tty_put_char(struct tty_struct *tty, unsigned char c)
-{
- /*
- * Simply call tty_write.
- */
- dgap_tty_write(tty, &c, 1);
- return 1;
-}
-
-/*
- * Return modem signals to ld.
- */
-static int dgap_tty_tiocmget(struct tty_struct *tty)
-{
- struct channel_t *ch;
- struct un_t *un;
- int result;
- u8 mstat;
- ulong lock_flags;
-
- if (!tty || tty->magic != TTY_MAGIC)
- return -EIO;
-
- un = tty->driver_data;
- if (!un || un->magic != DGAP_UNIT_MAGIC)
- return -EIO;
-
- ch = un->un_ch;
- if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
- return -EIO;
-
- spin_lock_irqsave(&ch->ch_lock, lock_flags);
-
- mstat = readb(&(ch->ch_bs->m_stat));
- /* Append any outbound signals that might be pending... */
- mstat |= ch->ch_mostat;
-
- spin_unlock_irqrestore(&ch->ch_lock, lock_flags);
-
- result = 0;
-
- if (mstat & D_DTR(ch))
- result |= TIOCM_DTR;
- if (mstat & D_RTS(ch))
- result |= TIOCM_RTS;
- if (mstat & D_CTS(ch))
- result |= TIOCM_CTS;
- if (mstat & D_DSR(ch))
- result |= TIOCM_DSR;
- if (mstat & D_RI(ch))
- result |= TIOCM_RI;
- if (mstat & D_CD(ch))
- result |= TIOCM_CD;
-
- return result;
-}
-
-/*
- * dgap_tty_tiocmset()
- *
- * Set modem signals, called by ld.
- */
-static int dgap_tty_tiocmset(struct tty_struct *tty,
- unsigned int set, unsigned int clear)
-{
- struct board_t *bd;
- struct channel_t *ch;
- struct un_t *un;
- ulong lock_flags;
- ulong lock_flags2;
-
- if (!tty || tty->magic != TTY_MAGIC)
- return -EIO;
-
- un = tty->driver_data;
- if (!un || un->magic != DGAP_UNIT_MAGIC)
- return -EIO;
-
- ch = un->un_ch;
- if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
- return -EIO;
-
- bd = ch->ch_bd;
- if (!bd || bd->magic != DGAP_BOARD_MAGIC)
- return -EIO;
-
- spin_lock_irqsave(&bd->bd_lock, lock_flags);
- spin_lock_irqsave(&ch->ch_lock, lock_flags2);
-
- if (set & TIOCM_RTS) {
- ch->ch_mforce |= D_RTS(ch);
- ch->ch_mval |= D_RTS(ch);
- }
-
- if (set & TIOCM_DTR) {
- ch->ch_mforce |= D_DTR(ch);
- ch->ch_mval |= D_DTR(ch);
- }
-
- if (clear & TIOCM_RTS) {
- ch->ch_mforce |= D_RTS(ch);
- ch->ch_mval &= ~(D_RTS(ch));
- }
-
- if (clear & TIOCM_DTR) {
- ch->ch_mforce |= D_DTR(ch);
- ch->ch_mval &= ~(D_DTR(ch));
- }
-
- dgap_param(ch, bd, un->un_type);
-
- spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
- spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
-
- return 0;
-}
-
-/*
- * dgap_tty_send_break()
- *
- * Send a Break, called by ld.
- */
-static int dgap_tty_send_break(struct tty_struct *tty, int msec)
-{
- struct board_t *bd;
- struct channel_t *ch;
- struct un_t *un;
- ulong lock_flags;
- ulong lock_flags2;
-
- if (!tty || tty->magic != TTY_MAGIC)
- return -EIO;
-
- un = tty->driver_data;
- if (!un || un->magic != DGAP_UNIT_MAGIC)
- return -EIO;
-
- ch = un->un_ch;
- if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
- return -EIO;
-
- bd = ch->ch_bd;
- if (!bd || bd->magic != DGAP_BOARD_MAGIC)
- return -EIO;
-
- switch (msec) {
- case -1:
- msec = 0xFFFF;
- break;
- case 0:
- msec = 1;
- break;
- default:
- msec /= 10;
- break;
- }
-
- spin_lock_irqsave(&bd->bd_lock, lock_flags);
- spin_lock_irqsave(&ch->ch_lock, lock_flags2);
-#if 0
- dgap_cmdw(ch, SBREAK, (u16) SBREAK_TIME, 0);
-#endif
- dgap_cmdw(ch, SBREAK, (u16) msec, 0);
-
- spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
- spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
-
- return 0;
-}
-
-/*
- * dgap_tty_wait_until_sent()
- *
- * wait until data has been transmitted, called by ld.
- */
-static void dgap_tty_wait_until_sent(struct tty_struct *tty, int timeout)
-{
- dgap_wait_for_drain(tty);
-}
-
-/*
- * dgap_send_xchar()
- *
- * send a high priority character, called by ld.
- */
-static void dgap_tty_send_xchar(struct tty_struct *tty, char c)
-{
- struct board_t *bd;
- struct channel_t *ch;
- struct un_t *un;
- ulong lock_flags;
- ulong lock_flags2;
-
- if (!tty || tty->magic != TTY_MAGIC)
- return;
-
- un = tty->driver_data;
- if (!un || un->magic != DGAP_UNIT_MAGIC)
- return;
-
- ch = un->un_ch;
- if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
- return;
-
- bd = ch->ch_bd;
- if (!bd || bd->magic != DGAP_BOARD_MAGIC)
- return;
-
- spin_lock_irqsave(&bd->bd_lock, lock_flags);
- spin_lock_irqsave(&ch->ch_lock, lock_flags2);
-
- /*
- * This is technically what we should do.
- * However, the NIST tests specifically want
- * to see each XON or XOFF character that it
- * sends, so lets just send each character
- * by hand...
- */
-#if 0
- if (c == STOP_CHAR(tty))
- dgap_cmdw(ch, RPAUSE, 0, 0);
- else if (c == START_CHAR(tty))
- dgap_cmdw(ch, RRESUME, 0, 0);
- else
- dgap_wmove(ch, &c, 1);
-#else
- dgap_wmove(ch, &c, 1);
-#endif
-
- spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
- spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
-}
-
-/*
- * Return modem signals to ld.
- */
-static int dgap_get_modem_info(struct channel_t *ch, unsigned int __user *value)
-{
- int result;
- u8 mstat;
- ulong lock_flags;
-
- spin_lock_irqsave(&ch->ch_lock, lock_flags);
-
- mstat = readb(&(ch->ch_bs->m_stat));
- /* Append any outbound signals that might be pending... */
- mstat |= ch->ch_mostat;
-
- spin_unlock_irqrestore(&ch->ch_lock, lock_flags);
-
- result = 0;
-
- if (mstat & D_DTR(ch))
- result |= TIOCM_DTR;
- if (mstat & D_RTS(ch))
- result |= TIOCM_RTS;
- if (mstat & D_CTS(ch))
- result |= TIOCM_CTS;
- if (mstat & D_DSR(ch))
- result |= TIOCM_DSR;
- if (mstat & D_RI(ch))
- result |= TIOCM_RI;
- if (mstat & D_CD(ch))
- result |= TIOCM_CD;
-
- return put_user(result, value);
-}
-
-/*
- * dgap_set_modem_info()
- *
- * Set modem signals, called by ld.
- */
-static int dgap_set_modem_info(struct channel_t *ch, struct board_t *bd,
- struct un_t *un, unsigned int command,
- unsigned int __user *value)
-{
- int ret;
- unsigned int arg;
- ulong lock_flags;
- ulong lock_flags2;
-
- ret = get_user(arg, value);
- if (ret)
- return ret;
-
- switch (command) {
- case TIOCMBIS:
- if (arg & TIOCM_RTS) {
- ch->ch_mforce |= D_RTS(ch);
- ch->ch_mval |= D_RTS(ch);
- }
-
- if (arg & TIOCM_DTR) {
- ch->ch_mforce |= D_DTR(ch);
- ch->ch_mval |= D_DTR(ch);
- }
-
- break;
-
- case TIOCMBIC:
- if (arg & TIOCM_RTS) {
- ch->ch_mforce |= D_RTS(ch);
- ch->ch_mval &= ~(D_RTS(ch));
- }
-
- if (arg & TIOCM_DTR) {
- ch->ch_mforce |= D_DTR(ch);
- ch->ch_mval &= ~(D_DTR(ch));
- }
-
- break;
-
- case TIOCMSET:
- ch->ch_mforce = D_DTR(ch)|D_RTS(ch);
-
- if (arg & TIOCM_RTS)
- ch->ch_mval |= D_RTS(ch);
- else
- ch->ch_mval &= ~(D_RTS(ch));
-
- if (arg & TIOCM_DTR)
- ch->ch_mval |= (D_DTR(ch));
- else
- ch->ch_mval &= ~(D_DTR(ch));
-
- break;
-
- default:
- return -EINVAL;
- }
-
- spin_lock_irqsave(&bd->bd_lock, lock_flags);
- spin_lock_irqsave(&ch->ch_lock, lock_flags2);
-
- dgap_param(ch, bd, un->un_type);
-
- spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
- spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
-
- return 0;
-}
-
-/*
- * dgap_tty_digigeta()
- *
- * Ioctl to get the information for ditty.
- *
- *
- *
- */
-static int dgap_tty_digigeta(struct channel_t *ch,
- struct digi_t __user *retinfo)
-{
- struct digi_t tmp;
- ulong lock_flags;
-
- if (!retinfo)
- return -EFAULT;
-
- memset(&tmp, 0, sizeof(tmp));
-
- spin_lock_irqsave(&ch->ch_lock, lock_flags);
- memcpy(&tmp, &ch->ch_digi, sizeof(tmp));
- spin_unlock_irqrestore(&ch->ch_lock, lock_flags);
-
- if (copy_to_user(retinfo, &tmp, sizeof(*retinfo)))
- return -EFAULT;
-
- return 0;
-}
-
-/*
- * dgap_tty_digiseta()
- *
- * Ioctl to set the information for ditty.
- *
- *
- *
- */
-static int dgap_tty_digiseta(struct channel_t *ch, struct board_t *bd,
- struct un_t *un, struct digi_t __user *new_info)
-{
- struct digi_t new_digi;
- ulong lock_flags = 0;
- unsigned long lock_flags2;
-
- if (copy_from_user(&new_digi, new_info, sizeof(struct digi_t)))
- return -EFAULT;
-
- spin_lock_irqsave(&bd->bd_lock, lock_flags);
- spin_lock_irqsave(&ch->ch_lock, lock_flags2);
-
- memcpy(&ch->ch_digi, &new_digi, sizeof(struct digi_t));
-
- if (ch->ch_digi.digi_maxcps < 1)
- ch->ch_digi.digi_maxcps = 1;
-
- if (ch->ch_digi.digi_maxcps > 10000)
- ch->ch_digi.digi_maxcps = 10000;
-
- if (ch->ch_digi.digi_bufsize < 10)
- ch->ch_digi.digi_bufsize = 10;
-
- if (ch->ch_digi.digi_maxchar < 1)
- ch->ch_digi.digi_maxchar = 1;
-
- if (ch->ch_digi.digi_maxchar > ch->ch_digi.digi_bufsize)
- ch->ch_digi.digi_maxchar = ch->ch_digi.digi_bufsize;
-
- if (ch->ch_digi.digi_onlen > DIGI_PLEN)
- ch->ch_digi.digi_onlen = DIGI_PLEN;
-
- if (ch->ch_digi.digi_offlen > DIGI_PLEN)
- ch->ch_digi.digi_offlen = DIGI_PLEN;
-
- dgap_param(ch, bd, un->un_type);
-
- spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
- spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
-
- return 0;
-}
-
-/*
- * dgap_tty_digigetedelay()
- *
- * Ioctl to get the current edelay setting.
- *
- *
- *
- */
-static int dgap_tty_digigetedelay(struct tty_struct *tty, int __user *retinfo)
-{
- struct channel_t *ch;
- struct un_t *un;
- int tmp;
- ulong lock_flags;
-
- if (!retinfo)
- return -EFAULT;
-
- if (!tty || tty->magic != TTY_MAGIC)
- return -EFAULT;
-
- un = tty->driver_data;
- if (!un || un->magic != DGAP_UNIT_MAGIC)
- return -EFAULT;
-
- ch = un->un_ch;
- if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
- return -EFAULT;
-
- memset(&tmp, 0, sizeof(tmp));
-
- spin_lock_irqsave(&ch->ch_lock, lock_flags);
- tmp = readw(&(ch->ch_bs->edelay));
- spin_unlock_irqrestore(&ch->ch_lock, lock_flags);
-
- if (copy_to_user(retinfo, &tmp, sizeof(*retinfo)))
- return -EFAULT;
-
- return 0;
-}
-
-/*
- * dgap_tty_digisetedelay()
- *
- * Ioctl to set the EDELAY setting
- *
- */
-static int dgap_tty_digisetedelay(struct channel_t *ch, struct board_t *bd,
- struct un_t *un, int __user *new_info)
-{
- int new_digi;
- ulong lock_flags;
- ulong lock_flags2;
-
- if (copy_from_user(&new_digi, new_info, sizeof(int)))
- return -EFAULT;
-
- spin_lock_irqsave(&bd->bd_lock, lock_flags);
- spin_lock_irqsave(&ch->ch_lock, lock_flags2);
-
- writew((u16) new_digi, &(ch->ch_bs->edelay));
-
- dgap_param(ch, bd, un->un_type);
-
- spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
- spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
-
- return 0;
-}
-
-/*
- * dgap_tty_digigetcustombaud()
- *
- * Ioctl to get the current custom baud rate setting.
- */
-static int dgap_tty_digigetcustombaud(struct channel_t *ch, struct un_t *un,
- int __user *retinfo)
-{
- int tmp;
- ulong lock_flags;
-
- if (!retinfo)
- return -EFAULT;
-
- memset(&tmp, 0, sizeof(tmp));
-
- spin_lock_irqsave(&ch->ch_lock, lock_flags);
- tmp = dgap_get_custom_baud(ch);
- spin_unlock_irqrestore(&ch->ch_lock, lock_flags);
-
- if (copy_to_user(retinfo, &tmp, sizeof(*retinfo)))
- return -EFAULT;
-
- return 0;
-}
-
-/*
- * dgap_tty_digisetcustombaud()
- *
- * Ioctl to set the custom baud rate setting
- */
-static int dgap_tty_digisetcustombaud(struct channel_t *ch, struct board_t *bd,
- struct un_t *un, int __user *new_info)
-{
- uint new_rate;
- ulong lock_flags;
- ulong lock_flags2;
-
- if (copy_from_user(&new_rate, new_info, sizeof(unsigned int)))
- return -EFAULT;
-
- if (bd->bd_flags & BD_FEP5PLUS) {
-
- spin_lock_irqsave(&bd->bd_lock, lock_flags);
- spin_lock_irqsave(&ch->ch_lock, lock_flags2);
-
- ch->ch_custom_speed = new_rate;
-
- dgap_param(ch, bd, un->un_type);
-
- spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
- spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
- }
-
- return 0;
-}
-
-/*
- * dgap_set_termios()
- */
-static void dgap_tty_set_termios(struct tty_struct *tty,
- struct ktermios *old_termios)
-{
- struct board_t *bd;
- struct channel_t *ch;
- struct un_t *un;
- unsigned long lock_flags;
- unsigned long lock_flags2;
-
- if (!tty || tty->magic != TTY_MAGIC)
- return;
-
- un = tty->driver_data;
- if (!un || un->magic != DGAP_UNIT_MAGIC)
- return;
-
- ch = un->un_ch;
- if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
- return;
-
- bd = ch->ch_bd;
- if (!bd || bd->magic != DGAP_BOARD_MAGIC)
- return;
-
- spin_lock_irqsave(&bd->bd_lock, lock_flags);
- spin_lock_irqsave(&ch->ch_lock, lock_flags2);
-
- ch->ch_c_cflag = tty->termios.c_cflag;
- ch->ch_c_iflag = tty->termios.c_iflag;
- ch->ch_c_oflag = tty->termios.c_oflag;
- ch->ch_c_lflag = tty->termios.c_lflag;
- ch->ch_startc = tty->termios.c_cc[VSTART];
- ch->ch_stopc = tty->termios.c_cc[VSTOP];
-
- dgap_carrier(ch);
- dgap_param(ch, bd, un->un_type);
-
- spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
- spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
-}
-
-static void dgap_tty_throttle(struct tty_struct *tty)
-{
- struct board_t *bd;
- struct channel_t *ch;
- struct un_t *un;
- ulong lock_flags;
- ulong lock_flags2;
-
- if (!tty || tty->magic != TTY_MAGIC)
- return;
-
- un = tty->driver_data;
- if (!un || un->magic != DGAP_UNIT_MAGIC)
- return;
-
- ch = un->un_ch;
- if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
- return;
-
- bd = ch->ch_bd;
- if (!bd || bd->magic != DGAP_BOARD_MAGIC)
- return;
-
- spin_lock_irqsave(&bd->bd_lock, lock_flags);
- spin_lock_irqsave(&ch->ch_lock, lock_flags2);
-
- ch->ch_flags |= (CH_RXBLOCK);
-#if 1
- dgap_cmdw(ch, RPAUSE, 0, 0);
-#endif
-
- spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
- spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
-
-}
-
-static void dgap_tty_unthrottle(struct tty_struct *tty)
-{
- struct board_t *bd;
- struct channel_t *ch;
- struct un_t *un;
- ulong lock_flags;
- ulong lock_flags2;
-
- if (!tty || tty->magic != TTY_MAGIC)
- return;
-
- un = tty->driver_data;
- if (!un || un->magic != DGAP_UNIT_MAGIC)
- return;
-
- ch = un->un_ch;
- if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
- return;
-
- bd = ch->ch_bd;
- if (!bd || bd->magic != DGAP_BOARD_MAGIC)
- return;
-
- spin_lock_irqsave(&bd->bd_lock, lock_flags);
- spin_lock_irqsave(&ch->ch_lock, lock_flags2);
-
- ch->ch_flags &= ~(CH_RXBLOCK);
-
-#if 1
- dgap_cmdw(ch, RRESUME, 0, 0);
-#endif
-
- spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
- spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
-}
-
-static struct board_t *find_board_by_major(unsigned int major)
-{
- unsigned int i;
-
- for (i = 0; i < MAXBOARDS; i++) {
- struct board_t *brd = dgap_board[i];
-
- if (!brd)
- return NULL;
- if (major == brd->serial_driver->major ||
- major == brd->print_driver->major)
- return brd;
- }
-
- return NULL;
-}
-
-/************************************************************************
- *
- * TTY Entry points and helper functions
- *
- ************************************************************************/
-
-/*
- * dgap_tty_open()
- *
- */
-static int dgap_tty_open(struct tty_struct *tty, struct file *file)
-{
- struct board_t *brd;
- struct channel_t *ch;
- struct un_t *un;
- struct bs_t __iomem *bs;
- uint major;
- uint minor;
- int rc;
- ulong lock_flags;
- ulong lock_flags2;
- u16 head;
-
- major = MAJOR(tty_devnum(tty));
- minor = MINOR(tty_devnum(tty));
-
- brd = find_board_by_major(major);
- if (!brd)
- return -EIO;
-
- /*
- * If board is not yet up to a state of READY, go to
- * sleep waiting for it to happen or they cancel the open.
- */
- rc = wait_event_interruptible(brd->state_wait,
- (brd->state & BOARD_READY));
-
- if (rc)
- return rc;
-
- spin_lock_irqsave(&brd->bd_lock, lock_flags);
-
- /* The wait above should guarantee this cannot happen */
- if (brd->state != BOARD_READY) {
- spin_unlock_irqrestore(&brd->bd_lock, lock_flags);
- return -EIO;
- }
-
- /* If opened device is greater than our number of ports, bail. */
- if (MINOR(tty_devnum(tty)) > brd->nasync) {
- spin_unlock_irqrestore(&brd->bd_lock, lock_flags);
- return -EIO;
- }
-
- ch = brd->channels[minor];
- if (!ch) {
- spin_unlock_irqrestore(&brd->bd_lock, lock_flags);
- return -EIO;
- }
-
- /* Grab channel lock */
- spin_lock_irqsave(&ch->ch_lock, lock_flags2);
-
- /* Figure out our type */
- if (major == brd->serial_driver->major) {
- un = &brd->channels[minor]->ch_tun;
- un->un_type = DGAP_SERIAL;
- } else if (major == brd->print_driver->major) {
- un = &brd->channels[minor]->ch_pun;
- un->un_type = DGAP_PRINT;
- } else {
- spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
- spin_unlock_irqrestore(&brd->bd_lock, lock_flags);
- return -EIO;
- }
-
- /* Store our unit into driver_data, so we always have it available. */
- tty->driver_data = un;
-
- /*
- * Error if channel info pointer is NULL.
- */
- bs = ch->ch_bs;
- if (!bs) {
- spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
- spin_unlock_irqrestore(&brd->bd_lock, lock_flags);
- return -EIO;
- }
-
- /*
- * Initialize tty's
- */
- if (!(un->un_flags & UN_ISOPEN)) {
- /* Store important variables. */
- un->un_tty = tty;
-
- /* Maybe do something here to the TTY struct as well? */
- }
-
- /*
- * Initialize if neither terminal or printer is open.
- */
- if (!((ch->ch_tun.un_flags | ch->ch_pun.un_flags) & UN_ISOPEN)) {
-
- ch->ch_mforce = 0;
- ch->ch_mval = 0;
-
- /*
- * Flush input queue.
- */
- head = readw(&(bs->rx_head));
- writew(head, &(bs->rx_tail));
-
- ch->ch_flags = 0;
- ch->pscan_state = 0;
- ch->pscan_savechar = 0;
-
- ch->ch_c_cflag = tty->termios.c_cflag;
- ch->ch_c_iflag = tty->termios.c_iflag;
- ch->ch_c_oflag = tty->termios.c_oflag;
- ch->ch_c_lflag = tty->termios.c_lflag;
- ch->ch_startc = tty->termios.c_cc[VSTART];
- ch->ch_stopc = tty->termios.c_cc[VSTOP];
-
- /* TODO: flush our TTY struct here? */
- }
-
- dgap_carrier(ch);
- /*
- * Run param in case we changed anything
- */
- dgap_param(ch, brd, un->un_type);
-
- /*
- * follow protocol for opening port
- */
-
- spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
- spin_unlock_irqrestore(&brd->bd_lock, lock_flags);
-
- rc = dgap_block_til_ready(tty, file, ch);
-
- if (!un->un_tty)
- return -ENODEV;
-
- /* No going back now, increment our unit and channel counters */
- spin_lock_irqsave(&ch->ch_lock, lock_flags);
- ch->ch_open_count++;
- un->un_open_count++;
- un->un_flags |= (UN_ISOPEN);
- spin_unlock_irqrestore(&ch->ch_lock, lock_flags);
-
- return rc;
-}
-
-/*
- * dgap_tty_close()
- *
- */
-static void dgap_tty_close(struct tty_struct *tty, struct file *file)
-{
- struct board_t *bd;
- struct channel_t *ch;
- struct un_t *un;
- ulong lock_flags;
-
- if (!tty || tty->magic != TTY_MAGIC)
- return;
-
- un = tty->driver_data;
- if (!un || un->magic != DGAP_UNIT_MAGIC)
- return;
-
- ch = un->un_ch;
- if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
- return;
-
- bd = ch->ch_bd;
- if (!bd || bd->magic != DGAP_BOARD_MAGIC)
- return;
-
- spin_lock_irqsave(&ch->ch_lock, lock_flags);
-
- /*
- * Determine if this is the last close or not - and if we agree about
- * which type of close it is with the Line Discipline
- */
- if ((tty->count == 1) && (un->un_open_count != 1)) {
- /*
- * Uh, oh. tty->count is 1, which means that the tty
- * structure will be freed. un_open_count should always
- * be one in these conditions. If it's greater than
- * one, we've got real problems, since it means the
- * serial port won't be shutdown.
- */
- un->un_open_count = 1;
- }
-
- if (--un->un_open_count < 0)
- un->un_open_count = 0;
-
- ch->ch_open_count--;
-
- if (ch->ch_open_count && un->un_open_count) {
- spin_unlock_irqrestore(&ch->ch_lock, lock_flags);
- return;
- }
-
- /* OK, its the last close on the unit */
-
- un->un_flags |= UN_CLOSING;
-
- tty->closing = 1;
-
- /*
- * Only officially close channel if count is 0 and
- * DIGI_PRINTER bit is not set.
- */
- if ((ch->ch_open_count == 0) &&
- !(ch->ch_digi.digi_flags & DIGI_PRINTER)) {
-
- ch->ch_flags &= ~(CH_RXBLOCK);
-
- spin_unlock_irqrestore(&ch->ch_lock, lock_flags);
-
- /* wait for output to drain */
- /* This will also return if we take an interrupt */
-
- dgap_wait_for_drain(tty);
-
- dgap_tty_flush_buffer(tty);
- tty_ldisc_flush(tty);
-
- spin_lock_irqsave(&ch->ch_lock, lock_flags);
-
- tty->closing = 0;
-
- /*
- * If we have HUPCL set, lower DTR and RTS
- */
- if (ch->ch_c_cflag & HUPCL) {
- ch->ch_mostat &= ~(D_RTS(ch)|D_DTR(ch));
- dgap_cmdb(ch, SMODEM, 0, D_DTR(ch)|D_RTS(ch), 0);
-
- /*
- * Go to sleep to ensure RTS/DTR
- * have been dropped for modems to see it.
- */
- spin_unlock_irqrestore(&ch->ch_lock,
- lock_flags);
-
- /* .25 second delay for dropping RTS/DTR */
- schedule_timeout_interruptible(msecs_to_jiffies(250));
-
- spin_lock_irqsave(&ch->ch_lock, lock_flags);
- }
-
- ch->pscan_state = 0;
- ch->pscan_savechar = 0;
- ch->ch_baud_info = 0;
-
- }
-
- /*
- * turn off print device when closing print device.
- */
- if ((un->un_type == DGAP_PRINT) && (ch->ch_flags & CH_PRON)) {
- dgap_wmove(ch, ch->ch_digi.digi_offstr,
- (int) ch->ch_digi.digi_offlen);
- ch->ch_flags &= ~CH_PRON;
- }
-
- un->un_tty = NULL;
- un->un_flags &= ~(UN_ISOPEN | UN_CLOSING);
- tty->driver_data = NULL;
-
- wake_up_interruptible(&ch->ch_flags_wait);
- wake_up_interruptible(&un->un_flags_wait);
-
- spin_unlock_irqrestore(&ch->ch_lock, lock_flags);
-}
-
-static void dgap_tty_start(struct tty_struct *tty)
-{
- struct board_t *bd;
- struct channel_t *ch;
- struct un_t *un;
- ulong lock_flags;
- ulong lock_flags2;
-
- if (!tty || tty->magic != TTY_MAGIC)
- return;
-
- un = tty->driver_data;
- if (!un || un->magic != DGAP_UNIT_MAGIC)
- return;
-
- ch = un->un_ch;
- if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
- return;
-
- bd = ch->ch_bd;
- if (!bd || bd->magic != DGAP_BOARD_MAGIC)
- return;
-
- spin_lock_irqsave(&bd->bd_lock, lock_flags);
- spin_lock_irqsave(&ch->ch_lock, lock_flags2);
-
- dgap_cmdw(ch, RESUMETX, 0, 0);
-
- spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
- spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
-}
-
-static void dgap_tty_stop(struct tty_struct *tty)
-{
- struct board_t *bd;
- struct channel_t *ch;
- struct un_t *un;
- ulong lock_flags;
- ulong lock_flags2;
-
- if (!tty || tty->magic != TTY_MAGIC)
- return;
-
- un = tty->driver_data;
- if (!un || un->magic != DGAP_UNIT_MAGIC)
- return;
-
- ch = un->un_ch;
- if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
- return;
-
- bd = ch->ch_bd;
- if (!bd || bd->magic != DGAP_BOARD_MAGIC)
- return;
-
- spin_lock_irqsave(&bd->bd_lock, lock_flags);
- spin_lock_irqsave(&ch->ch_lock, lock_flags2);
-
- dgap_cmdw(ch, PAUSETX, 0, 0);
-
- spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
- spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
-}
-
-/*
- * dgap_tty_flush_chars()
- *
- * Flush the cook buffer
- *
- * Note to self, and any other poor souls who venture here:
- *
- * flush in this case DOES NOT mean dispose of the data.
- * instead, it means "stop buffering and send it if you
- * haven't already." Just guess how I figured that out... SRW 2-Jun-98
- *
- * It is also always called in interrupt context - JAR 8-Sept-99
- */
-static void dgap_tty_flush_chars(struct tty_struct *tty)
-{
- struct board_t *bd;
- struct channel_t *ch;
- struct un_t *un;
- ulong lock_flags;
- ulong lock_flags2;
-
- if (!tty || tty->magic != TTY_MAGIC)
- return;
-
- un = tty->driver_data;
- if (!un || un->magic != DGAP_UNIT_MAGIC)
- return;
-
- ch = un->un_ch;
- if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
- return;
-
- bd = ch->ch_bd;
- if (!bd || bd->magic != DGAP_BOARD_MAGIC)
- return;
-
- spin_lock_irqsave(&bd->bd_lock, lock_flags);
- spin_lock_irqsave(&ch->ch_lock, lock_flags2);
-
- /* TODO: Do something here */
-
- spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
- spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
-}
-
-/*****************************************************************************
- *
- * The IOCTL function and all of its helpers
- *
- *****************************************************************************/
-
-/*
- * dgap_tty_ioctl()
- *
- * The usual assortment of ioctl's
- */
-static int dgap_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
- unsigned long arg)
-{
- struct board_t *bd;
- struct channel_t *ch;
- struct un_t *un;
- int rc;
- u16 head;
- ulong lock_flags = 0;
- ulong lock_flags2 = 0;
- void __user *uarg = (void __user *) arg;
-
- if (!tty || tty->magic != TTY_MAGIC)
- return -ENODEV;
-
- un = tty->driver_data;
- if (!un || un->magic != DGAP_UNIT_MAGIC)
- return -ENODEV;
-
- ch = un->un_ch;
- if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
- return -ENODEV;
-
- bd = ch->ch_bd;
- if (!bd || bd->magic != DGAP_BOARD_MAGIC)
- return -ENODEV;
-
- spin_lock_irqsave(&bd->bd_lock, lock_flags);
- spin_lock_irqsave(&ch->ch_lock, lock_flags2);
-
- if (un->un_open_count <= 0) {
- spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
- spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
- return -EIO;
- }
-
- switch (cmd) {
-
- /* Here are all the standard ioctl's that we MUST implement */
-
- case TCSBRK:
- /*
- * TCSBRK is SVID version: non-zero arg --> no break
- * this behaviour is exploited by tcdrain().
- *
- * According to POSIX.1 spec (7.2.2.1.2) breaks should be
- * between 0.25 and 0.5 seconds so we'll ask for something
- * in the middle: 0.375 seconds.
- */
- rc = tty_check_change(tty);
- spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
- spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
- if (rc)
- return rc;
-
- rc = dgap_wait_for_drain(tty);
-
- if (rc)
- return -EINTR;
-
- spin_lock_irqsave(&bd->bd_lock, lock_flags);
- spin_lock_irqsave(&ch->ch_lock, lock_flags2);
-
- if (((cmd == TCSBRK) && (!arg)) || (cmd == TCSBRKP))
- dgap_cmdw(ch, SBREAK, (u16) SBREAK_TIME, 0);
-
- spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
- spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
-
- return 0;
-
- case TCSBRKP:
- /* support for POSIX tcsendbreak()
-
- * According to POSIX.1 spec (7.2.2.1.2) breaks should be
- * between 0.25 and 0.5 seconds so we'll ask for something
- * in the middle: 0.375 seconds.
- */
- rc = tty_check_change(tty);
- spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
- spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
- if (rc)
- return rc;
-
- rc = dgap_wait_for_drain(tty);
- if (rc)
- return -EINTR;
-
- spin_lock_irqsave(&bd->bd_lock, lock_flags);
- spin_lock_irqsave(&ch->ch_lock, lock_flags2);
-
- dgap_cmdw(ch, SBREAK, (u16) SBREAK_TIME, 0);
-
- spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
- spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
-
- return 0;
-
- case TIOCSBRK:
- /*
- * FEP5 doesn't support turning on a break unconditionally.
- * The FEP5 device will stop sending a break automatically
- * after the specified time value that was sent when turning on
- * the break.
- */
- rc = tty_check_change(tty);
- spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
- spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
- if (rc)
- return rc;
-
- rc = dgap_wait_for_drain(tty);
- if (rc)
- return -EINTR;
-
- spin_lock_irqsave(&bd->bd_lock, lock_flags);
- spin_lock_irqsave(&ch->ch_lock, lock_flags2);
-
- dgap_cmdw(ch, SBREAK, (u16) SBREAK_TIME, 0);
-
- spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
- spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
-
- return 0;
-
- case TIOCCBRK:
- /*
- * FEP5 doesn't support turning off a break unconditionally.
- * The FEP5 device will stop sending a break automatically
- * after the specified time value that was sent when turning on
- * the break.
- */
- spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
- spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
- return 0;
-
- case TIOCGSOFTCAR:
-
- spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
- spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
-
- return put_user(C_CLOCAL(tty) ? 1 : 0,
- (unsigned long __user *) arg);
-
- case TIOCSSOFTCAR:
- spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
- spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
-
- rc = get_user(arg, (unsigned long __user *) arg);
- if (rc)
- return rc;
-
- spin_lock_irqsave(&bd->bd_lock, lock_flags);
- spin_lock_irqsave(&ch->ch_lock, lock_flags2);
- tty->termios.c_cflag = ((tty->termios.c_cflag & ~CLOCAL) |
- (arg ? CLOCAL : 0));
- dgap_param(ch, bd, un->un_type);
- spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
- spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
-
- return 0;
-
- case TIOCMGET:
- spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
- spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
- return dgap_get_modem_info(ch, uarg);
-
- case TIOCMBIS:
- case TIOCMBIC:
- case TIOCMSET:
- spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
- spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
- return dgap_set_modem_info(ch, bd, un, cmd, uarg);
-
- /*
- * Here are any additional ioctl's that we want to implement
- */
-
- case TCFLSH:
- /*
- * The linux tty driver doesn't have a flush
- * input routine for the driver, assuming all backed
- * up data is in the line disc. buffers. However,
- * we all know that's not the case. Here, we
- * act on the ioctl, but then lie and say we didn't
- * so the line discipline will process the flush
- * also.
- */
- rc = tty_check_change(tty);
- if (rc) {
- spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
- spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
- return rc;
- }
-
- if ((arg == TCIFLUSH) || (arg == TCIOFLUSH)) {
- if (!(un->un_type == DGAP_PRINT)) {
- head = readw(&(ch->ch_bs->rx_head));
- writew(head, &(ch->ch_bs->rx_tail));
- writeb(0, &(ch->ch_bs->orun));
- }
- }
-
- if ((arg != TCOFLUSH) && (arg != TCIOFLUSH)) {
- /* pretend we didn't recognize this IOCTL */
- spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
- spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
-
- return -ENOIOCTLCMD;
- }
-
- ch->ch_flags &= ~CH_STOP;
- head = readw(&(ch->ch_bs->tx_head));
- dgap_cmdw(ch, FLUSHTX, (u16) head, 0);
- dgap_cmdw(ch, RESUMETX, 0, 0);
- if (ch->ch_tun.un_flags & (UN_LOW|UN_EMPTY)) {
- ch->ch_tun.un_flags &= ~(UN_LOW|UN_EMPTY);
- wake_up_interruptible(&ch->ch_tun.un_flags_wait);
- }
- if (ch->ch_pun.un_flags & (UN_LOW|UN_EMPTY)) {
- ch->ch_pun.un_flags &= ~(UN_LOW|UN_EMPTY);
- wake_up_interruptible(&ch->ch_pun.un_flags_wait);
- }
- if (waitqueue_active(&tty->write_wait))
- wake_up_interruptible(&tty->write_wait);
-
- /* Can't hold any locks when calling tty_wakeup! */
- spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
- spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
- tty_wakeup(tty);
-
- /* pretend we didn't recognize this IOCTL */
- return -ENOIOCTLCMD;
-
- case TCSETSF:
- case TCSETSW:
- /*
- * The linux tty driver doesn't have a flush
- * input routine for the driver, assuming all backed
- * up data is in the line disc. buffers. However,
- * we all know that's not the case. Here, we
- * act on the ioctl, but then lie and say we didn't
- * so the line discipline will process the flush
- * also.
- */
- if (cmd == TCSETSF) {
- /* flush rx */
- ch->ch_flags &= ~CH_STOP;
- head = readw(&(ch->ch_bs->rx_head));
- writew(head, &(ch->ch_bs->rx_tail));
- }
-
- /* now wait for all the output to drain */
- spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
- spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
- rc = dgap_wait_for_drain(tty);
- if (rc)
- return -EINTR;
-
- /* pretend we didn't recognize this */
- return -ENOIOCTLCMD;
-
- case TCSETAW:
-
- spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
- spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
- rc = dgap_wait_for_drain(tty);
- if (rc)
- return -EINTR;
-
- /* pretend we didn't recognize this */
- return -ENOIOCTLCMD;
-
- case TCXONC:
- /*
- * The Linux Line Discipline (LD) would do this for us if we
- * let it, but we have the special firmware options to do this
- * the "right way" regardless of hardware or software flow
- * control so we'll do it outselves instead of letting the LD
- * do it.
- */
- rc = tty_check_change(tty);
- if (rc) {
- spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
- spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
- return rc;
- }
-
- switch (arg) {
-
- case TCOON:
- spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
- spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
- dgap_tty_start(tty);
- return 0;
- case TCOOFF:
- spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
- spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
- dgap_tty_stop(tty);
- return 0;
- case TCION:
- spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
- spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
- /* Make the ld do it */
- return -ENOIOCTLCMD;
- case TCIOFF:
- spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
- spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
- /* Make the ld do it */
- return -ENOIOCTLCMD;
- default:
- spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
- spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
- return -EINVAL;
- }
-
- case DIGI_GETA:
- /* get information for ditty */
- spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
- spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
- return dgap_tty_digigeta(ch, uarg);
-
- case DIGI_SETAW:
- case DIGI_SETAF:
-
- /* set information for ditty */
- if (cmd == (DIGI_SETAW)) {
-
- spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
- spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
- rc = dgap_wait_for_drain(tty);
- if (rc)
- return -EINTR;
- spin_lock_irqsave(&bd->bd_lock, lock_flags);
- spin_lock_irqsave(&ch->ch_lock, lock_flags2);
- } else
- tty_ldisc_flush(tty);
- /* fall thru */
-
- case DIGI_SETA:
- spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
- spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
- return dgap_tty_digiseta(ch, bd, un, uarg);
-
- case DIGI_GEDELAY:
- spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
- spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
- return dgap_tty_digigetedelay(tty, uarg);
-
- case DIGI_SEDELAY:
- spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
- spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
- return dgap_tty_digisetedelay(ch, bd, un, uarg);
-
- case DIGI_GETCUSTOMBAUD:
- spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
- spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
- return dgap_tty_digigetcustombaud(ch, un, uarg);
-
- case DIGI_SETCUSTOMBAUD:
- spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
- spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
- return dgap_tty_digisetcustombaud(ch, bd, un, uarg);
-
- case DIGI_RESET_PORT:
- dgap_firmware_reset_port(ch);
- dgap_param(ch, bd, un->un_type);
- spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
- spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
- return 0;
-
- default:
- spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
- spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
-
- return -ENOIOCTLCMD;
- }
-}
-
-static const struct tty_operations dgap_tty_ops = {
- .open = dgap_tty_open,
- .close = dgap_tty_close,
- .write = dgap_tty_write,
- .write_room = dgap_tty_write_room,
- .flush_buffer = dgap_tty_flush_buffer,
- .chars_in_buffer = dgap_tty_chars_in_buffer,
- .flush_chars = dgap_tty_flush_chars,
- .ioctl = dgap_tty_ioctl,
- .set_termios = dgap_tty_set_termios,
- .stop = dgap_tty_stop,
- .start = dgap_tty_start,
- .throttle = dgap_tty_throttle,
- .unthrottle = dgap_tty_unthrottle,
- .hangup = dgap_tty_hangup,
- .put_char = dgap_tty_put_char,
- .tiocmget = dgap_tty_tiocmget,
- .tiocmset = dgap_tty_tiocmset,
- .break_ctl = dgap_tty_send_break,
- .wait_until_sent = dgap_tty_wait_until_sent,
- .send_xchar = dgap_tty_send_xchar
-};
-
-/************************************************************************
- *
- * TTY Initialization/Cleanup Functions
- *
- ************************************************************************/
-
-/*
- * dgap_tty_register()
- *
- * Init the tty subsystem for this board.
- */
-static int dgap_tty_register(struct board_t *brd)
-{
- int rc;
-
- brd->serial_driver = tty_alloc_driver(MAXPORTS,
- TTY_DRIVER_REAL_RAW |
- TTY_DRIVER_DYNAMIC_DEV |
- TTY_DRIVER_HARDWARE_BREAK);
- if (IS_ERR(brd->serial_driver))
- return PTR_ERR(brd->serial_driver);
-
- snprintf(brd->serial_name, MAXTTYNAMELEN, "tty_dgap_%d_",
- brd->boardnum);
- brd->serial_driver->name = brd->serial_name;
- brd->serial_driver->name_base = 0;
- brd->serial_driver->major = 0;
- brd->serial_driver->minor_start = 0;
- brd->serial_driver->type = TTY_DRIVER_TYPE_SERIAL;
- brd->serial_driver->subtype = SERIAL_TYPE_NORMAL;
- brd->serial_driver->init_termios = dgap_default_termios;
- brd->serial_driver->driver_name = DRVSTR;
-
- /*
- * Entry points for driver. Called by the kernel from
- * tty_io.c and n_tty.c.
- */
- tty_set_operations(brd->serial_driver, &dgap_tty_ops);
-
- /*
- * If we're doing transparent print, we have to do all of the above
- * again, separately so we don't get the LD confused about what major
- * we are when we get into the dgap_tty_open() routine.
- */
- brd->print_driver = tty_alloc_driver(MAXPORTS,
- TTY_DRIVER_REAL_RAW |
- TTY_DRIVER_DYNAMIC_DEV |
- TTY_DRIVER_HARDWARE_BREAK);
- if (IS_ERR(brd->print_driver)) {
- rc = PTR_ERR(brd->print_driver);
- goto free_serial_drv;
- }
-
- snprintf(brd->print_name, MAXTTYNAMELEN, "pr_dgap_%d_",
- brd->boardnum);
- brd->print_driver->name = brd->print_name;
- brd->print_driver->name_base = 0;
- brd->print_driver->major = 0;
- brd->print_driver->minor_start = 0;
- brd->print_driver->type = TTY_DRIVER_TYPE_SERIAL;
- brd->print_driver->subtype = SERIAL_TYPE_NORMAL;
- brd->print_driver->init_termios = dgap_default_termios;
- brd->print_driver->driver_name = DRVSTR;
-
- /*
- * Entry points for driver. Called by the kernel from
- * tty_io.c and n_tty.c.
- */
- tty_set_operations(brd->print_driver, &dgap_tty_ops);
-
- /* Register tty devices */
- rc = tty_register_driver(brd->serial_driver);
- if (rc < 0)
- goto free_print_drv;
-
- /* Register Transparent Print devices */
- rc = tty_register_driver(brd->print_driver);
- if (rc < 0)
- goto unregister_serial_drv;
-
- return 0;
-
-unregister_serial_drv:
- tty_unregister_driver(brd->serial_driver);
-free_print_drv:
- put_tty_driver(brd->print_driver);
-free_serial_drv:
- put_tty_driver(brd->serial_driver);
-
- return rc;
-}
-
-static void dgap_tty_unregister(struct board_t *brd)
-{
- tty_unregister_driver(brd->print_driver);
- tty_unregister_driver(brd->serial_driver);
- put_tty_driver(brd->print_driver);
- put_tty_driver(brd->serial_driver);
-}
-
-static int dgap_alloc_flipbuf(struct board_t *brd)
-{
- /*
- * allocate flip buffer for board.
- */
- brd->flipbuf = kmalloc(MYFLIPLEN, GFP_KERNEL);
- if (!brd->flipbuf)
- return -ENOMEM;
-
- brd->flipflagbuf = kmalloc(MYFLIPLEN, GFP_KERNEL);
- if (!brd->flipflagbuf) {
- kfree(brd->flipbuf);
- return -ENOMEM;
- }
-
- return 0;
-}
-
-static void dgap_free_flipbuf(struct board_t *brd)
-{
- kfree(brd->flipbuf);
- kfree(brd->flipflagbuf);
-}
-
-static struct board_t *dgap_verify_board(struct device *p)
-{
- struct board_t *bd;
-
- if (!p)
- return NULL;
-
- bd = dev_get_drvdata(p);
- if (!bd || bd->magic != DGAP_BOARD_MAGIC || bd->state != BOARD_READY)
- return NULL;
-
- return bd;
-}
-
-static ssize_t dgap_ports_state_show(struct device *p,
- struct device_attribute *attr,
- char *buf)
-{
- struct board_t *bd;
- int count = 0;
- unsigned int i;
-
- bd = dgap_verify_board(p);
- if (!bd)
- return 0;
-
- for (i = 0; i < bd->nasync; i++) {
- count += snprintf(buf + count, PAGE_SIZE - count,
- "%d %s\n", bd->channels[i]->ch_portnum,
- bd->channels[i]->ch_open_count ? "Open" : "Closed");
- }
- return count;
-}
-static DEVICE_ATTR(ports_state, S_IRUSR, dgap_ports_state_show, NULL);
-
-static ssize_t dgap_ports_baud_show(struct device *p,
- struct device_attribute *attr,
- char *buf)
-{
- struct board_t *bd;
- int count = 0;
- unsigned int i;
-
- bd = dgap_verify_board(p);
- if (!bd)
- return 0;
-
- for (i = 0; i < bd->nasync; i++) {
- count += snprintf(buf + count, PAGE_SIZE - count, "%d %d\n",
- bd->channels[i]->ch_portnum,
- bd->channels[i]->ch_baud_info);
- }
- return count;
-}
-static DEVICE_ATTR(ports_baud, S_IRUSR, dgap_ports_baud_show, NULL);
-
-static ssize_t dgap_ports_msignals_show(struct device *p,
- struct device_attribute *attr,
- char *buf)
-{
- struct board_t *bd;
- int count = 0;
- unsigned int i;
-
- bd = dgap_verify_board(p);
- if (!bd)
- return 0;
-
- for (i = 0; i < bd->nasync; i++) {
- if (bd->channels[i]->ch_open_count)
- count += snprintf(buf + count, PAGE_SIZE - count,
- "%d %s %s %s %s %s %s\n",
- bd->channels[i]->ch_portnum,
- (bd->channels[i]->ch_mostat &
- UART_MCR_RTS) ? "RTS" : "",
- (bd->channels[i]->ch_mistat &
- UART_MSR_CTS) ? "CTS" : "",
- (bd->channels[i]->ch_mostat &
- UART_MCR_DTR) ? "DTR" : "",
- (bd->channels[i]->ch_mistat &
- UART_MSR_DSR) ? "DSR" : "",
- (bd->channels[i]->ch_mistat &
- UART_MSR_DCD) ? "DCD" : "",
- (bd->channels[i]->ch_mistat &
- UART_MSR_RI) ? "RI" : "");
- else
- count += snprintf(buf + count, PAGE_SIZE - count,
- "%d\n", bd->channels[i]->ch_portnum);
- }
- return count;
-}
-static DEVICE_ATTR(ports_msignals, S_IRUSR, dgap_ports_msignals_show, NULL);
-
-static ssize_t dgap_ports_iflag_show(struct device *p,
- struct device_attribute *attr,
- char *buf)
-{
- struct board_t *bd;
- int count = 0;
- unsigned int i;
-
- bd = dgap_verify_board(p);
- if (!bd)
- return 0;
-
- for (i = 0; i < bd->nasync; i++)
- count += snprintf(buf + count, PAGE_SIZE - count, "%d %x\n",
- bd->channels[i]->ch_portnum,
- bd->channels[i]->ch_c_iflag);
- return count;
-}
-static DEVICE_ATTR(ports_iflag, S_IRUSR, dgap_ports_iflag_show, NULL);
-
-static ssize_t dgap_ports_cflag_show(struct device *p,
- struct device_attribute *attr,
- char *buf)
-{
- struct board_t *bd;
- int count = 0;
- unsigned int i;
-
- bd = dgap_verify_board(p);
- if (!bd)
- return 0;
-
- for (i = 0; i < bd->nasync; i++)
- count += snprintf(buf + count, PAGE_SIZE - count, "%d %x\n",
- bd->channels[i]->ch_portnum,
- bd->channels[i]->ch_c_cflag);
- return count;
-}
-static DEVICE_ATTR(ports_cflag, S_IRUSR, dgap_ports_cflag_show, NULL);
-
-static ssize_t dgap_ports_oflag_show(struct device *p,
- struct device_attribute *attr,
- char *buf)
-{
- struct board_t *bd;
- int count = 0;
- unsigned int i;
-
- bd = dgap_verify_board(p);
- if (!bd)
- return 0;
-
- for (i = 0; i < bd->nasync; i++)
- count += snprintf(buf + count, PAGE_SIZE - count, "%d %x\n",
- bd->channels[i]->ch_portnum,
- bd->channels[i]->ch_c_oflag);
- return count;
-}
-static DEVICE_ATTR(ports_oflag, S_IRUSR, dgap_ports_oflag_show, NULL);
-
-static ssize_t dgap_ports_lflag_show(struct device *p,
- struct device_attribute *attr,
- char *buf)
-{
- struct board_t *bd;
- int count = 0;
- unsigned int i;
-
- bd = dgap_verify_board(p);
- if (!bd)
- return 0;
-
- for (i = 0; i < bd->nasync; i++)
- count += snprintf(buf + count, PAGE_SIZE - count, "%d %x\n",
- bd->channels[i]->ch_portnum,
- bd->channels[i]->ch_c_lflag);
- return count;
-}
-static DEVICE_ATTR(ports_lflag, S_IRUSR, dgap_ports_lflag_show, NULL);
-
-static ssize_t dgap_ports_digi_flag_show(struct device *p,
- struct device_attribute *attr,
- char *buf)
-{
- struct board_t *bd;
- int count = 0;
- unsigned int i;
-
- bd = dgap_verify_board(p);
- if (!bd)
- return 0;
-
- for (i = 0; i < bd->nasync; i++)
- count += snprintf(buf + count, PAGE_SIZE - count, "%d %x\n",
- bd->channels[i]->ch_portnum,
- bd->channels[i]->ch_digi.digi_flags);
- return count;
-}
-static DEVICE_ATTR(ports_digi_flag, S_IRUSR, dgap_ports_digi_flag_show, NULL);
-
-static ssize_t dgap_ports_rxcount_show(struct device *p,
- struct device_attribute *attr,
- char *buf)
-{
- struct board_t *bd;
- int count = 0;
- unsigned int i;
-
- bd = dgap_verify_board(p);
- if (!bd)
- return 0;
-
- for (i = 0; i < bd->nasync; i++)
- count += snprintf(buf + count, PAGE_SIZE - count, "%d %ld\n",
- bd->channels[i]->ch_portnum,
- bd->channels[i]->ch_rxcount);
- return count;
-}
-static DEVICE_ATTR(ports_rxcount, S_IRUSR, dgap_ports_rxcount_show, NULL);
-
-static ssize_t dgap_ports_txcount_show(struct device *p,
- struct device_attribute *attr,
- char *buf)
-{
- struct board_t *bd;
- int count = 0;
- unsigned int i;
-
- bd = dgap_verify_board(p);
- if (!bd)
- return 0;
-
- for (i = 0; i < bd->nasync; i++)
- count += snprintf(buf + count, PAGE_SIZE - count, "%d %ld\n",
- bd->channels[i]->ch_portnum,
- bd->channels[i]->ch_txcount);
- return count;
-}
-static DEVICE_ATTR(ports_txcount, S_IRUSR, dgap_ports_txcount_show, NULL);
-
-static ssize_t dgap_tty_state_show(struct device *d,
- struct device_attribute *attr,
- char *buf)
-{
- struct board_t *bd;
- struct channel_t *ch;
- struct un_t *un;
-
- if (!d)
- return 0;
- un = dev_get_drvdata(d);
- if (!un || un->magic != DGAP_UNIT_MAGIC)
- return 0;
- ch = un->un_ch;
- if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
- return 0;
- bd = ch->ch_bd;
- if (!bd || bd->magic != DGAP_BOARD_MAGIC)
- return 0;
- if (bd->state != BOARD_READY)
- return 0;
-
- return snprintf(buf, PAGE_SIZE, "%s", un->un_open_count ?
- "Open" : "Closed");
-}
-static DEVICE_ATTR(state, S_IRUSR, dgap_tty_state_show, NULL);
-
-static ssize_t dgap_tty_baud_show(struct device *d,
- struct device_attribute *attr,
- char *buf)
-{
- struct board_t *bd;
- struct channel_t *ch;
- struct un_t *un;
-
- if (!d)
- return 0;
- un = dev_get_drvdata(d);
- if (!un || un->magic != DGAP_UNIT_MAGIC)
- return 0;
- ch = un->un_ch;
- if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
- return 0;
- bd = ch->ch_bd;
- if (!bd || bd->magic != DGAP_BOARD_MAGIC)
- return 0;
- if (bd->state != BOARD_READY)
- return 0;
-
- return snprintf(buf, PAGE_SIZE, "%d\n", ch->ch_baud_info);
-}
-static DEVICE_ATTR(baud, S_IRUSR, dgap_tty_baud_show, NULL);
-
-static ssize_t dgap_tty_msignals_show(struct device *d,
- struct device_attribute *attr,
- char *buf)
-{
- struct board_t *bd;
- struct channel_t *ch;
- struct un_t *un;
-
- if (!d)
- return 0;
- un = dev_get_drvdata(d);
- if (!un || un->magic != DGAP_UNIT_MAGIC)
- return 0;
- ch = un->un_ch;
- if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
- return 0;
- bd = ch->ch_bd;
- if (!bd || bd->magic != DGAP_BOARD_MAGIC)
- return 0;
- if (bd->state != BOARD_READY)
- return 0;
-
- if (ch->ch_open_count) {
- return snprintf(buf, PAGE_SIZE, "%s %s %s %s %s %s\n",
- (ch->ch_mostat & UART_MCR_RTS) ? "RTS" : "",
- (ch->ch_mistat & UART_MSR_CTS) ? "CTS" : "",
- (ch->ch_mostat & UART_MCR_DTR) ? "DTR" : "",
- (ch->ch_mistat & UART_MSR_DSR) ? "DSR" : "",
- (ch->ch_mistat & UART_MSR_DCD) ? "DCD" : "",
- (ch->ch_mistat & UART_MSR_RI) ? "RI" : "");
- }
- return 0;
-}
-static DEVICE_ATTR(msignals, S_IRUSR, dgap_tty_msignals_show, NULL);
-
-static ssize_t dgap_tty_iflag_show(struct device *d,
- struct device_attribute *attr,
- char *buf)
-{
- struct board_t *bd;
- struct channel_t *ch;
- struct un_t *un;
-
- if (!d)
- return 0;
- un = dev_get_drvdata(d);
- if (!un || un->magic != DGAP_UNIT_MAGIC)
- return 0;
- ch = un->un_ch;
- if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
- return 0;
- bd = ch->ch_bd;
- if (!bd || bd->magic != DGAP_BOARD_MAGIC)
- return 0;
- if (bd->state != BOARD_READY)
- return 0;
-
- return snprintf(buf, PAGE_SIZE, "%x\n", ch->ch_c_iflag);
-}
-static DEVICE_ATTR(iflag, S_IRUSR, dgap_tty_iflag_show, NULL);
-
-static ssize_t dgap_tty_cflag_show(struct device *d,
- struct device_attribute *attr,
- char *buf)
-{
- struct board_t *bd;
- struct channel_t *ch;
- struct un_t *un;
-
- if (!d)
- return 0;
- un = dev_get_drvdata(d);
- if (!un || un->magic != DGAP_UNIT_MAGIC)
- return 0;
- ch = un->un_ch;
- if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
- return 0;
- bd = ch->ch_bd;
- if (!bd || bd->magic != DGAP_BOARD_MAGIC)
- return 0;
- if (bd->state != BOARD_READY)
- return 0;
-
- return snprintf(buf, PAGE_SIZE, "%x\n", ch->ch_c_cflag);
-}
-static DEVICE_ATTR(cflag, S_IRUSR, dgap_tty_cflag_show, NULL);
-
-static ssize_t dgap_tty_oflag_show(struct device *d,
- struct device_attribute *attr,
- char *buf)
-{
- struct board_t *bd;
- struct channel_t *ch;
- struct un_t *un;
-
- if (!d)
- return 0;
- un = dev_get_drvdata(d);
- if (!un || un->magic != DGAP_UNIT_MAGIC)
- return 0;
- ch = un->un_ch;
- if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
- return 0;
- bd = ch->ch_bd;
- if (!bd || bd->magic != DGAP_BOARD_MAGIC)
- return 0;
- if (bd->state != BOARD_READY)
- return 0;
-
- return snprintf(buf, PAGE_SIZE, "%x\n", ch->ch_c_oflag);
-}
-static DEVICE_ATTR(oflag, S_IRUSR, dgap_tty_oflag_show, NULL);
-
-static ssize_t dgap_tty_lflag_show(struct device *d,
- struct device_attribute *attr,
- char *buf)
-{
- struct board_t *bd;
- struct channel_t *ch;
- struct un_t *un;
-
- if (!d)
- return 0;
- un = dev_get_drvdata(d);
- if (!un || un->magic != DGAP_UNIT_MAGIC)
- return 0;
- ch = un->un_ch;
- if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
- return 0;
- bd = ch->ch_bd;
- if (!bd || bd->magic != DGAP_BOARD_MAGIC)
- return 0;
- if (bd->state != BOARD_READY)
- return 0;
-
- return snprintf(buf, PAGE_SIZE, "%x\n", ch->ch_c_lflag);
-}
-static DEVICE_ATTR(lflag, S_IRUSR, dgap_tty_lflag_show, NULL);
-
-static ssize_t dgap_tty_digi_flag_show(struct device *d,
- struct device_attribute *attr,
- char *buf)
-{
- struct board_t *bd;
- struct channel_t *ch;
- struct un_t *un;
-
- if (!d)
- return 0;
- un = dev_get_drvdata(d);
- if (!un || un->magic != DGAP_UNIT_MAGIC)
- return 0;
- ch = un->un_ch;
- if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
- return 0;
- bd = ch->ch_bd;
- if (!bd || bd->magic != DGAP_BOARD_MAGIC)
- return 0;
- if (bd->state != BOARD_READY)
- return 0;
-
- return snprintf(buf, PAGE_SIZE, "%x\n", ch->ch_digi.digi_flags);
-}
-static DEVICE_ATTR(digi_flag, S_IRUSR, dgap_tty_digi_flag_show, NULL);
-
-static ssize_t dgap_tty_rxcount_show(struct device *d,
- struct device_attribute *attr,
- char *buf)
-{
- struct board_t *bd;
- struct channel_t *ch;
- struct un_t *un;
-
- if (!d)
- return 0;
- un = dev_get_drvdata(d);
- if (!un || un->magic != DGAP_UNIT_MAGIC)
- return 0;
- ch = un->un_ch;
- if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
- return 0;
- bd = ch->ch_bd;
- if (!bd || bd->magic != DGAP_BOARD_MAGIC)
- return 0;
- if (bd->state != BOARD_READY)
- return 0;
-
- return snprintf(buf, PAGE_SIZE, "%ld\n", ch->ch_rxcount);
-}
-static DEVICE_ATTR(rxcount, S_IRUSR, dgap_tty_rxcount_show, NULL);
-
-static ssize_t dgap_tty_txcount_show(struct device *d,
- struct device_attribute *attr,
- char *buf)
-{
- struct board_t *bd;
- struct channel_t *ch;
- struct un_t *un;
-
- if (!d)
- return 0;
- un = dev_get_drvdata(d);
- if (!un || un->magic != DGAP_UNIT_MAGIC)
- return 0;
- ch = un->un_ch;
- if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
- return 0;
- bd = ch->ch_bd;
- if (!bd || bd->magic != DGAP_BOARD_MAGIC)
- return 0;
- if (bd->state != BOARD_READY)
- return 0;
-
- return snprintf(buf, PAGE_SIZE, "%ld\n", ch->ch_txcount);
-}
-static DEVICE_ATTR(txcount, S_IRUSR, dgap_tty_txcount_show, NULL);
-
-static ssize_t dgap_tty_name_show(struct device *d,
- struct device_attribute *attr,
- char *buf)
-{
- struct board_t *bd;
- struct channel_t *ch;
- struct un_t *un;
- int cn;
- int bn;
- struct cnode *cptr;
- int found = FALSE;
- int ncount = 0;
- int starto = 0;
- int i;
-
- if (!d)
- return 0;
- un = dev_get_drvdata(d);
- if (!un || un->magic != DGAP_UNIT_MAGIC)
- return 0;
- ch = un->un_ch;
- if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
- return 0;
- bd = ch->ch_bd;
- if (!bd || bd->magic != DGAP_BOARD_MAGIC)
- return 0;
- if (bd->state != BOARD_READY)
- return 0;
-
- bn = bd->boardnum;
- cn = ch->ch_portnum;
-
- for (cptr = bd->bd_config; cptr; cptr = cptr->next) {
-
- if ((cptr->type == BNODE) &&
- ((cptr->u.board.type == APORT2_920P) ||
- (cptr->u.board.type == APORT4_920P) ||
- (cptr->u.board.type == APORT8_920P) ||
- (cptr->u.board.type == PAPORT4) ||
- (cptr->u.board.type == PAPORT8))) {
-
- found = TRUE;
- if (cptr->u.board.v_start)
- starto = cptr->u.board.start;
- else
- starto = 1;
- }
-
- if (cptr->type == TNODE && found == TRUE) {
- char *ptr1;
-
- if (strstr(cptr->u.ttyname, "tty")) {
- ptr1 = cptr->u.ttyname;
- ptr1 += 3;
- } else
- ptr1 = cptr->u.ttyname;
-
- for (i = 0; i < dgap_config_get_num_prts(bd); i++) {
- if (cn != i)
- continue;
-
- return snprintf(buf, PAGE_SIZE, "%s%s%02d\n",
- (un->un_type == DGAP_PRINT) ?
- "pr" : "tty",
- ptr1, i + starto);
- }
- }
-
- if (cptr->type == CNODE) {
-
- for (i = 0; i < cptr->u.conc.nport; i++) {
- if (cn != (i + ncount))
- continue;
-
- return snprintf(buf, PAGE_SIZE, "%s%s%02ld\n",
- (un->un_type == DGAP_PRINT) ?
- "pr" : "tty",
- cptr->u.conc.id,
- i + (cptr->u.conc.v_start ?
- cptr->u.conc.start : 1));
- }
-
- ncount += cptr->u.conc.nport;
- }
-
- if (cptr->type == MNODE) {
-
- for (i = 0; i < cptr->u.module.nport; i++) {
- if (cn != (i + ncount))
- continue;
-
- return snprintf(buf, PAGE_SIZE, "%s%s%02ld\n",
- (un->un_type == DGAP_PRINT) ?
- "pr" : "tty",
- cptr->u.module.id,
- i + (cptr->u.module.v_start ?
- cptr->u.module.start : 1));
- }
-
- ncount += cptr->u.module.nport;
- }
- }
-
- return snprintf(buf, PAGE_SIZE, "%s_dgap_%d_%d\n",
- (un->un_type == DGAP_PRINT) ? "pr" : "tty", bn, cn);
-}
-static DEVICE_ATTR(custom_name, S_IRUSR, dgap_tty_name_show, NULL);
-
-static struct attribute *dgap_sysfs_tty_entries[] = {
- &dev_attr_state.attr,
- &dev_attr_baud.attr,
- &dev_attr_msignals.attr,
- &dev_attr_iflag.attr,
- &dev_attr_cflag.attr,
- &dev_attr_oflag.attr,
- &dev_attr_lflag.attr,
- &dev_attr_digi_flag.attr,
- &dev_attr_rxcount.attr,
- &dev_attr_txcount.attr,
- &dev_attr_custom_name.attr,
- NULL
-};
-
-
-/* this function creates the sys files that will export each signal status
- * to sysfs each value will be put in a separate filename
- */
-static void dgap_create_ports_sysfiles(struct board_t *bd)
-{
- dev_set_drvdata(&bd->pdev->dev, bd);
- device_create_file(&(bd->pdev->dev), &dev_attr_ports_state);
- device_create_file(&(bd->pdev->dev), &dev_attr_ports_baud);
- device_create_file(&(bd->pdev->dev), &dev_attr_ports_msignals);
- device_create_file(&(bd->pdev->dev), &dev_attr_ports_iflag);
- device_create_file(&(bd->pdev->dev), &dev_attr_ports_cflag);
- device_create_file(&(bd->pdev->dev), &dev_attr_ports_oflag);
- device_create_file(&(bd->pdev->dev), &dev_attr_ports_lflag);
- device_create_file(&(bd->pdev->dev), &dev_attr_ports_digi_flag);
- device_create_file(&(bd->pdev->dev), &dev_attr_ports_rxcount);
- device_create_file(&(bd->pdev->dev), &dev_attr_ports_txcount);
-}
-
-/* removes all the sys files created for that port */
-static void dgap_remove_ports_sysfiles(struct board_t *bd)
-{
- device_remove_file(&(bd->pdev->dev), &dev_attr_ports_state);
- device_remove_file(&(bd->pdev->dev), &dev_attr_ports_baud);
- device_remove_file(&(bd->pdev->dev), &dev_attr_ports_msignals);
- device_remove_file(&(bd->pdev->dev), &dev_attr_ports_iflag);
- device_remove_file(&(bd->pdev->dev), &dev_attr_ports_cflag);
- device_remove_file(&(bd->pdev->dev), &dev_attr_ports_oflag);
- device_remove_file(&(bd->pdev->dev), &dev_attr_ports_lflag);
- device_remove_file(&(bd->pdev->dev), &dev_attr_ports_digi_flag);
- device_remove_file(&(bd->pdev->dev), &dev_attr_ports_rxcount);
- device_remove_file(&(bd->pdev->dev), &dev_attr_ports_txcount);
-}
-
-/*
- * Copies the BIOS code from the user to the board,
- * and starts the BIOS running.
- */
-static void dgap_do_bios_load(struct board_t *brd, const u8 *ubios, int len)
-{
- u8 __iomem *addr;
- uint offset;
- unsigned int i;
-
- if (!brd || (brd->magic != DGAP_BOARD_MAGIC) || !brd->re_map_membase)
- return;
-
- addr = brd->re_map_membase;
-
- /*
- * clear POST area
- */
- for (i = 0; i < 16; i++)
- writeb(0, addr + POSTAREA + i);
-
- /*
- * Download bios
- */
- offset = 0x1000;
- memcpy_toio(addr + offset, ubios, len);
-
- writel(0x0bf00401, addr);
- writel(0, (addr + 4));
-
- /* Clear the reset, and change states. */
- writeb(FEPCLR, brd->re_map_port);
-}
-
-/*
- * Checks to see if the BIOS completed running on the card.
- */
-static int dgap_test_bios(struct board_t *brd)
-{
- u8 __iomem *addr;
- u16 word;
- u16 err1;
- u16 err2;
-
- if (!brd || (brd->magic != DGAP_BOARD_MAGIC) || !brd->re_map_membase)
- return -EINVAL;
-
- addr = brd->re_map_membase;
- word = readw(addr + POSTAREA);
-
- /*
- * It can take 5-6 seconds for a board to
- * pass the bios self test and post results.
- * Give it 10 seconds.
- */
- brd->wait_for_bios = 0;
- while (brd->wait_for_bios < 1000) {
- /* Check to see if BIOS thinks board is good. (GD). */
- if (word == *(u16 *) "GD")
- return 0;
- msleep_interruptible(10);
- brd->wait_for_bios++;
- word = readw(addr + POSTAREA);
- }
-
- /* Gave up on board after too long of time taken */
- err1 = readw(addr + SEQUENCE);
- err2 = readw(addr + ERROR);
- dev_warn(&brd->pdev->dev, "%s failed diagnostics. Error #(%x,%x).\n",
- brd->name, err1, err2);
- brd->state = BOARD_FAILED;
- brd->dpastatus = BD_NOBIOS;
-
- return -EIO;
-}
-
-/*
- * Copies the FEP code from the user to the board,
- * and starts the FEP running.
- */
-static void dgap_do_fep_load(struct board_t *brd, const u8 *ufep, int len)
-{
- u8 __iomem *addr;
- uint offset;
-
- if (!brd || (brd->magic != DGAP_BOARD_MAGIC) || !brd->re_map_membase)
- return;
-
- addr = brd->re_map_membase;
-
- /*
- * Download FEP
- */
- offset = 0x1000;
- memcpy_toio(addr + offset, ufep, len);
-
- /*
- * If board is a concentrator product, we need to give
- * it its config string describing how the concentrators look.
- */
- if ((brd->type == PCX) || (brd->type == PEPC)) {
- u8 string[100];
- u8 __iomem *config;
- u8 *xconfig;
- unsigned int i = 0;
-
- xconfig = dgap_create_config_string(brd, string);
-
- /* Write string to board memory */
- config = addr + CONFIG;
- for (; i < CONFIGSIZE; i++, config++, xconfig++) {
- writeb(*xconfig, config);
- if ((*xconfig & 0xff) == 0xff)
- break;
- }
- }
-
- writel(0xbfc01004, (addr + 0xc34));
- writel(0x3, (addr + 0xc30));
-
-}
-
-/*
- * Waits for the FEP to report thats its ready for us to use.
- */
-static int dgap_test_fep(struct board_t *brd)
-{
- u8 __iomem *addr;
- u16 word;
- u16 err1;
- u16 err2;
-
- if (!brd || (brd->magic != DGAP_BOARD_MAGIC) || !brd->re_map_membase)
- return -EINVAL;
-
- addr = brd->re_map_membase;
- word = readw(addr + FEPSTAT);
-
- /*
- * It can take 2-3 seconds for the FEP to
- * be up and running. Give it 5 secs.
- */
- brd->wait_for_fep = 0;
- while (brd->wait_for_fep < 500) {
- /* Check to see if FEP is up and running now. */
- if (word == *(u16 *) "OS") {
- /*
- * Check to see if the board can support FEP5+ commands.
- */
- word = readw(addr + FEP5_PLUS);
- if (word == *(u16 *) "5A")
- brd->bd_flags |= BD_FEP5PLUS;
-
- return 0;
- }
- msleep_interruptible(10);
- brd->wait_for_fep++;
- word = readw(addr + FEPSTAT);
- }
-
- /* Gave up on board after too long of time taken */
- err1 = readw(addr + SEQUENCE);
- err2 = readw(addr + ERROR);
- dev_warn(&brd->pdev->dev,
- "FEPOS for %s not functioning. Error #(%x,%x).\n",
- brd->name, err1, err2);
- brd->state = BOARD_FAILED;
- brd->dpastatus = BD_NOFEP;
-
- return -EIO;
-}
-
-/*
- * Physically forces the FEP5 card to reset itself.
- */
-static void dgap_do_reset_board(struct board_t *brd)
-{
- u8 check;
- u32 check1;
- u32 check2;
- unsigned int i;
-
- if (!brd || (brd->magic != DGAP_BOARD_MAGIC) ||
- !brd->re_map_membase || !brd->re_map_port)
- return;
-
- /* FEPRST does not vary among supported boards */
- writeb(FEPRST, brd->re_map_port);
-
- for (i = 0; i <= 1000; i++) {
- check = readb(brd->re_map_port) & 0xe;
- if (check == FEPRST)
- break;
- udelay(10);
-
- }
- if (i > 1000) {
- dev_warn(&brd->pdev->dev,
- "dgap: Board not resetting... Failing board.\n");
- brd->state = BOARD_FAILED;
- brd->dpastatus = BD_NOFEP;
- return;
- }
-
- /*
- * Make sure there really is memory out there.
- */
- writel(0xa55a3cc3, (brd->re_map_membase + LOWMEM));
- writel(0x5aa5c33c, (brd->re_map_membase + HIGHMEM));
- check1 = readl(brd->re_map_membase + LOWMEM);
- check2 = readl(brd->re_map_membase + HIGHMEM);
-
- if ((check1 != 0xa55a3cc3) || (check2 != 0x5aa5c33c)) {
- dev_warn(&brd->pdev->dev,
- "No memory at %p for board.\n",
- brd->re_map_membase);
- brd->state = BOARD_FAILED;
- brd->dpastatus = BD_NOFEP;
- return;
- }
-}
-
-#ifdef DIGI_CONCENTRATORS_SUPPORTED
-/*
- * Sends a concentrator image into the FEP5 board.
- */
-static void dgap_do_conc_load(struct board_t *brd, u8 *uaddr, int len)
-{
- char __iomem *vaddr;
- u16 offset;
- struct downld_t *to_dp;
-
- if (!brd || (brd->magic != DGAP_BOARD_MAGIC) || !brd->re_map_membase)
- return;
-
- vaddr = brd->re_map_membase;
-
- offset = readw((u16 *) (vaddr + DOWNREQ));
- to_dp = (struct downld_t *) (vaddr + (int) offset);
- memcpy_toio(to_dp, uaddr, len);
-
- /* Tell card we have data for it */
- writew(0, vaddr + (DOWNREQ));
-
- brd->conc_dl_status = NO_PENDING_CONCENTRATOR_REQUESTS;
-}
-#endif
-
-#define EXPANSION_ROM_SIZE (64 * 1024)
-#define FEP5_ROM_MAGIC (0xFEFFFFFF)
-
-static void dgap_get_vpd(struct board_t *brd)
-{
- u32 magic;
- u32 base_offset;
- u16 rom_offset;
- u16 vpd_offset;
- u16 image_length;
- u16 i;
- u8 byte1;
- u8 byte2;
-
- /*
- * Poke the magic number at the PCI Rom Address location.
- * If VPD is supported, the value read from that address
- * will be non-zero.
- */
- magic = FEP5_ROM_MAGIC;
- pci_write_config_dword(brd->pdev, PCI_ROM_ADDRESS, magic);
- pci_read_config_dword(brd->pdev, PCI_ROM_ADDRESS, &magic);
-
- /* VPD not supported, bail */
- if (!magic)
- return;
-
- /*
- * To get to the OTPROM memory, we have to send the boards base
- * address or'ed with 1 into the PCI Rom Address location.
- */
- magic = brd->membase | 0x01;
- pci_write_config_dword(brd->pdev, PCI_ROM_ADDRESS, magic);
- pci_read_config_dword(brd->pdev, PCI_ROM_ADDRESS, &magic);
-
- byte1 = readb(brd->re_map_membase);
- byte2 = readb(brd->re_map_membase + 1);
-
- /*
- * If the board correctly swapped to the OTPROM memory,
- * the first 2 bytes (header) should be 0x55, 0xAA
- */
- if (byte1 == 0x55 && byte2 == 0xAA) {
-
- base_offset = 0;
-
- /*
- * We have to run through all the OTPROM memory looking
- * for the VPD offset.
- */
- while (base_offset <= EXPANSION_ROM_SIZE) {
-
- /*
- * Lots of magic numbers here.
- *
- * The VPD offset is located inside the ROM Data
- * Structure.
- *
- * We also have to remember the length of each
- * ROM Data Structure, so we can "hop" to the next
- * entry if the VPD isn't in the current
- * ROM Data Structure.
- */
- rom_offset = readw(brd->re_map_membase +
- base_offset + 0x18);
- image_length = readw(brd->re_map_membase +
- rom_offset + 0x10) * 512;
- vpd_offset = readw(brd->re_map_membase +
- rom_offset + 0x08);
-
- /* Found the VPD entry */
- if (vpd_offset)
- break;
-
- /* We didn't find a VPD entry, go to next ROM entry. */
- base_offset += image_length;
-
- byte1 = readb(brd->re_map_membase + base_offset);
- byte2 = readb(brd->re_map_membase + base_offset + 1);
-
- /*
- * If the new ROM offset doesn't have 0x55, 0xAA
- * as its header, we have run out of ROM.
- */
- if (byte1 != 0x55 || byte2 != 0xAA)
- break;
- }
-
- /*
- * If we have a VPD offset, then mark the board
- * as having a valid VPD, and copy VPDSIZE (512) bytes of
- * that VPD to the buffer we have in our board structure.
- */
- if (vpd_offset) {
- brd->bd_flags |= BD_HAS_VPD;
- for (i = 0; i < VPDSIZE; i++) {
- brd->vpd[i] = readb(brd->re_map_membase +
- vpd_offset + i);
- }
- }
- }
-
- /*
- * We MUST poke the magic number at the PCI Rom Address location again.
- * This makes the card report the regular board memory back to us,
- * rather than the OTPROM memory.
- */
- magic = FEP5_ROM_MAGIC;
- pci_write_config_dword(brd->pdev, PCI_ROM_ADDRESS, magic);
-}
-
-
-static ssize_t dgap_driver_version_show(struct device_driver *ddp, char *buf)
-{
- return snprintf(buf, PAGE_SIZE, "%s\n", DG_PART);
-}
-static DRIVER_ATTR(version, S_IRUSR, dgap_driver_version_show, NULL);
-
-
-static ssize_t dgap_driver_boards_show(struct device_driver *ddp, char *buf)
-{
- return snprintf(buf, PAGE_SIZE, "%d\n", dgap_numboards);
-}
-static DRIVER_ATTR(boards, S_IRUSR, dgap_driver_boards_show, NULL);
-
-
-static ssize_t dgap_driver_maxboards_show(struct device_driver *ddp, char *buf)
-{
- return snprintf(buf, PAGE_SIZE, "%d\n", MAXBOARDS);
-}
-static DRIVER_ATTR(maxboards, S_IRUSR, dgap_driver_maxboards_show, NULL);
-
-
-static ssize_t dgap_driver_pollcounter_show(struct device_driver *ddp,
- char *buf)
-{
- return snprintf(buf, PAGE_SIZE, "%ld\n", dgap_poll_counter);
-}
-static DRIVER_ATTR(pollcounter, S_IRUSR, dgap_driver_pollcounter_show, NULL);
-
-static ssize_t dgap_driver_pollrate_show(struct device_driver *ddp, char *buf)
-{
- return snprintf(buf, PAGE_SIZE, "%dms\n", dgap_poll_tick);
-}
-
-static ssize_t dgap_driver_pollrate_store(struct device_driver *ddp,
- const char *buf, size_t count)
-{
- if (sscanf(buf, "%d\n", &dgap_poll_tick) != 1)
- return -EINVAL;
- return count;
-}
-static DRIVER_ATTR(pollrate, (S_IRUSR | S_IWUSR), dgap_driver_pollrate_show,
- dgap_driver_pollrate_store);
-
-
-static int dgap_create_driver_sysfiles(struct pci_driver *dgap_driver)
-{
- int rc = 0;
- struct device_driver *driverfs = &dgap_driver->driver;
-
- rc |= driver_create_file(driverfs, &driver_attr_version);
- rc |= driver_create_file(driverfs, &driver_attr_boards);
- rc |= driver_create_file(driverfs, &driver_attr_maxboards);
- rc |= driver_create_file(driverfs, &driver_attr_pollrate);
- rc |= driver_create_file(driverfs, &driver_attr_pollcounter);
-
- return rc;
-}
-
-static void dgap_remove_driver_sysfiles(struct pci_driver *dgap_driver)
-{
- struct device_driver *driverfs = &dgap_driver->driver;
-
- driver_remove_file(driverfs, &driver_attr_version);
- driver_remove_file(driverfs, &driver_attr_boards);
- driver_remove_file(driverfs, &driver_attr_maxboards);
- driver_remove_file(driverfs, &driver_attr_pollrate);
- driver_remove_file(driverfs, &driver_attr_pollcounter);
-}
-
-static struct attribute_group dgap_tty_attribute_group = {
- .name = NULL,
- .attrs = dgap_sysfs_tty_entries,
-};
-
-static void dgap_create_tty_sysfs(struct un_t *un, struct device *c)
-{
- int ret;
-
- ret = sysfs_create_group(&c->kobj, &dgap_tty_attribute_group);
- if (ret)
- return;
-
- dev_set_drvdata(c, un);
-
-}
-
-static void dgap_remove_tty_sysfs(struct device *c)
-{
- sysfs_remove_group(&c->kobj, &dgap_tty_attribute_group);
-}
-
-/*
- * Create pr and tty device entries
- */
-static int dgap_tty_register_ports(struct board_t *brd)
-{
- struct channel_t *ch;
- int i;
- int ret;
-
- brd->serial_ports = kcalloc(brd->nasync, sizeof(*brd->serial_ports),
- GFP_KERNEL);
- if (!brd->serial_ports)
- return -ENOMEM;
-
- brd->printer_ports = kcalloc(brd->nasync, sizeof(*brd->printer_ports),
- GFP_KERNEL);
- if (!brd->printer_ports) {
- ret = -ENOMEM;
- goto free_serial_ports;
- }
-
- for (i = 0; i < brd->nasync; i++) {
- tty_port_init(&brd->serial_ports[i]);
- tty_port_init(&brd->printer_ports[i]);
- }
-
- ch = brd->channels[0];
- for (i = 0; i < brd->nasync; i++, ch = brd->channels[i]) {
-
- struct device *classp;
-
- classp = tty_port_register_device(&brd->serial_ports[i],
- brd->serial_driver,
- i, NULL);
-
- if (IS_ERR(classp)) {
- ret = PTR_ERR(classp);
- goto unregister_ttys;
- }
-
- dgap_create_tty_sysfs(&ch->ch_tun, classp);
- ch->ch_tun.un_sysfs = classp;
-
- classp = tty_port_register_device(&brd->printer_ports[i],
- brd->print_driver,
- i, NULL);
-
- if (IS_ERR(classp)) {
- ret = PTR_ERR(classp);
- goto unregister_ttys;
- }
-
- dgap_create_tty_sysfs(&ch->ch_pun, classp);
- ch->ch_pun.un_sysfs = classp;
- }
- dgap_create_ports_sysfiles(brd);
-
- return 0;
-
-unregister_ttys:
- while (i >= 0) {
- ch = brd->channels[i];
- if (ch->ch_tun.un_sysfs) {
- dgap_remove_tty_sysfs(ch->ch_tun.un_sysfs);
- tty_unregister_device(brd->serial_driver, i);
- }
-
- if (ch->ch_pun.un_sysfs) {
- dgap_remove_tty_sysfs(ch->ch_pun.un_sysfs);
- tty_unregister_device(brd->print_driver, i);
- }
- i--;
- }
-
- for (i = 0; i < brd->nasync; i++) {
- tty_port_destroy(&brd->serial_ports[i]);
- tty_port_destroy(&brd->printer_ports[i]);
- }
-
- kfree(brd->printer_ports);
- brd->printer_ports = NULL;
-
-free_serial_ports:
- kfree(brd->serial_ports);
- brd->serial_ports = NULL;
-
- return ret;
-}
-
-/*
- * dgap_cleanup_tty()
- *
- * Uninitialize the TTY portion of this driver. Free all memory and
- * resources.
- */
-static void dgap_cleanup_tty(struct board_t *brd)
-{
- struct device *dev;
- unsigned int i;
-
- for (i = 0; i < brd->nasync; i++) {
- tty_port_destroy(&brd->serial_ports[i]);
- dev = brd->channels[i]->ch_tun.un_sysfs;
- dgap_remove_tty_sysfs(dev);
- tty_unregister_device(brd->serial_driver, i);
- }
- tty_unregister_driver(brd->serial_driver);
- put_tty_driver(brd->serial_driver);
- kfree(brd->serial_ports);
-
- for (i = 0; i < brd->nasync; i++) {
- tty_port_destroy(&brd->printer_ports[i]);
- dev = brd->channels[i]->ch_pun.un_sysfs;
- dgap_remove_tty_sysfs(dev);
- tty_unregister_device(brd->print_driver, i);
- }
- tty_unregister_driver(brd->print_driver);
- put_tty_driver(brd->print_driver);
- kfree(brd->printer_ports);
-}
-
-static int dgap_request_irq(struct board_t *brd)
-{
- int rc;
-
- if (!brd || brd->magic != DGAP_BOARD_MAGIC)
- return -ENODEV;
-
- /*
- * Set up our interrupt handler if we are set to do interrupts.
- */
- if (dgap_config_get_useintr(brd) && brd->irq) {
-
- rc = request_irq(brd->irq, dgap_intr, IRQF_SHARED, "DGAP", brd);
-
- if (!rc)
- brd->intr_used = 1;
- }
- return 0;
-}
-
-static void dgap_free_irq(struct board_t *brd)
-{
- if (brd->intr_used && brd->irq)
- free_irq(brd->irq, brd);
-}
-
-static int dgap_firmware_load(struct pci_dev *pdev, int card_type,
- struct board_t *brd)
-{
- const struct firmware *fw;
- char *tmp_ptr;
- int ret;
- char *dgap_config_buf;
-
- dgap_get_vpd(brd);
- dgap_do_reset_board(brd);
-
- if (fw_info[card_type].conf_name) {
- ret = request_firmware(&fw, fw_info[card_type].conf_name,
- &pdev->dev);
- if (ret) {
- dev_err(&pdev->dev, "config file %s not found\n",
- fw_info[card_type].conf_name);
- return ret;
- }
-
- dgap_config_buf = kzalloc(fw->size + 1, GFP_KERNEL);
- if (!dgap_config_buf) {
- release_firmware(fw);
- return -ENOMEM;
- }
-
- memcpy(dgap_config_buf, fw->data, fw->size);
- release_firmware(fw);
-
- /*
- * preserve dgap_config_buf
- * as dgap_parsefile would
- * otherwise alter it.
- */
- tmp_ptr = dgap_config_buf;
-
- if (dgap_parsefile(&tmp_ptr) != 0) {
- kfree(dgap_config_buf);
- return -EINVAL;
- }
- kfree(dgap_config_buf);
- }
-
- /*
- * Match this board to a config the user created for us.
- */
- brd->bd_config =
- dgap_find_config(brd->type, brd->pci_bus, brd->pci_slot);
-
- /*
- * Because the 4 port Xr products share the same PCI ID
- * as the 8 port Xr products, if we receive a NULL config
- * back, and this is a PAPORT8 board, retry with a
- * PAPORT4 attempt as well.
- */
- if (brd->type == PAPORT8 && !brd->bd_config)
- brd->bd_config =
- dgap_find_config(PAPORT4, brd->pci_bus, brd->pci_slot);
-
- if (!brd->bd_config) {
- dev_err(&pdev->dev, "No valid configuration found\n");
- return -EINVAL;
- }
-
- if (fw_info[card_type].bios_name) {
- ret = request_firmware(&fw, fw_info[card_type].bios_name,
- &pdev->dev);
- if (ret) {
- dev_err(&pdev->dev, "bios file %s not found\n",
- fw_info[card_type].bios_name);
- return ret;
- }
- dgap_do_bios_load(brd, fw->data, fw->size);
- release_firmware(fw);
-
- /* Wait for BIOS to test board... */
- ret = dgap_test_bios(brd);
- if (ret)
- return ret;
- }
-
- if (fw_info[card_type].fep_name) {
- ret = request_firmware(&fw, fw_info[card_type].fep_name,
- &pdev->dev);
- if (ret) {
- dev_err(&pdev->dev, "dgap: fep file %s not found\n",
- fw_info[card_type].fep_name);
- return ret;
- }
- dgap_do_fep_load(brd, fw->data, fw->size);
- release_firmware(fw);
-
- /* Wait for FEP to load on board... */
- ret = dgap_test_fep(brd);
- if (ret)
- return ret;
- }
-
-#ifdef DIGI_CONCENTRATORS_SUPPORTED
- /*
- * If this is a CX or EPCX, we need to see if the firmware
- * is requesting a concentrator image from us.
- */
- if ((bd->type == PCX) || (bd->type == PEPC)) {
- chk_addr = (u16 *) (vaddr + DOWNREQ);
- /* Nonzero if FEP is requesting concentrator image. */
- check = readw(chk_addr);
- vaddr = brd->re_map_membase;
- }
-
- if (fw_info[card_type].con_name && check && vaddr) {
- ret = request_firmware(&fw, fw_info[card_type].con_name,
- &pdev->dev);
- if (ret) {
- dev_err(&pdev->dev, "conc file %s not found\n",
- fw_info[card_type].con_name);
- return ret;
- }
- /* Put concentrator firmware loading code here */
- offset = readw((u16 *) (vaddr + DOWNREQ));
- memcpy_toio(offset, fw->data, fw->size);
-
- dgap_do_conc_load(brd, (char *)fw->data, fw->size)
- release_firmware(fw);
- }
-#endif
-
- return 0;
-}
-
-/*
- * dgap_tty_init()
- *
- * Init the tty subsystem. Called once per board after board has been
- * downloaded and init'ed.
- */
-static int dgap_tty_init(struct board_t *brd)
-{
- int i;
- int tlw;
- uint true_count;
- u8 __iomem *vaddr;
- u8 modem;
- struct channel_t *ch;
- struct bs_t __iomem *bs;
- struct cm_t __iomem *cm;
- int ret;
-
- /*
- * Initialize board structure elements.
- */
-
- vaddr = brd->re_map_membase;
- true_count = readw((vaddr + NCHAN));
-
- brd->nasync = dgap_config_get_num_prts(brd);
-
- if (!brd->nasync)
- brd->nasync = brd->maxports;
-
- if (brd->nasync > brd->maxports)
- brd->nasync = brd->maxports;
-
- if (true_count != brd->nasync) {
- dev_warn(&brd->pdev->dev,
- "%s configured for %d ports, has %d ports.\n",
- brd->name, brd->nasync, true_count);
-
- if ((brd->type == PPCM) &&
- (true_count == 64 || true_count == 0)) {
- dev_warn(&brd->pdev->dev,
- "Please make SURE the EBI cable running from the card\n");
- dev_warn(&brd->pdev->dev,
- "to each EM module is plugged into EBI IN!\n");
- }
-
- brd->nasync = true_count;
-
- /* If no ports, don't bother going any further */
- if (!brd->nasync) {
- brd->state = BOARD_FAILED;
- brd->dpastatus = BD_NOFEP;
- return -EIO;
- }
- }
-
- /*
- * Allocate channel memory that might not have been allocated
- * when the driver was first loaded.
- */
- for (i = 0; i < brd->nasync; i++) {
- brd->channels[i] =
- kzalloc(sizeof(struct channel_t), GFP_KERNEL);
- if (!brd->channels[i]) {
- ret = -ENOMEM;
- goto free_chan;
- }
- }
-
- ch = brd->channels[0];
- vaddr = brd->re_map_membase;
-
- bs = (struct bs_t __iomem *) ((ulong) vaddr + CHANBUF);
- cm = (struct cm_t __iomem *) ((ulong) vaddr + CMDBUF);
-
- brd->bd_bs = bs;
-
- /* Set up channel variables */
- for (i = 0; i < brd->nasync; i++, ch = brd->channels[i], bs++) {
-
- spin_lock_init(&ch->ch_lock);
-
- /* Store all our magic numbers */
- ch->magic = DGAP_CHANNEL_MAGIC;
- ch->ch_tun.magic = DGAP_UNIT_MAGIC;
- ch->ch_tun.un_type = DGAP_SERIAL;
- ch->ch_tun.un_ch = ch;
- ch->ch_tun.un_dev = i;
-
- ch->ch_pun.magic = DGAP_UNIT_MAGIC;
- ch->ch_pun.un_type = DGAP_PRINT;
- ch->ch_pun.un_ch = ch;
- ch->ch_pun.un_dev = i;
-
- ch->ch_vaddr = vaddr;
- ch->ch_bs = bs;
- ch->ch_cm = cm;
- ch->ch_bd = brd;
- ch->ch_portnum = i;
- ch->ch_digi = dgap_digi_init;
-
- /*
- * Set up digi dsr and dcd bits based on altpin flag.
- */
- if (dgap_config_get_altpin(brd)) {
- ch->ch_dsr = DM_CD;
- ch->ch_cd = DM_DSR;
- ch->ch_digi.digi_flags |= DIGI_ALTPIN;
- } else {
- ch->ch_cd = DM_CD;
- ch->ch_dsr = DM_DSR;
- }
-
- ch->ch_taddr = vaddr + (ioread16(&(ch->ch_bs->tx_seg)) << 4);
- ch->ch_raddr = vaddr + (ioread16(&(ch->ch_bs->rx_seg)) << 4);
- ch->ch_tx_win = 0;
- ch->ch_rx_win = 0;
- ch->ch_tsize = readw(&(ch->ch_bs->tx_max)) + 1;
- ch->ch_rsize = readw(&(ch->ch_bs->rx_max)) + 1;
- ch->ch_tstart = 0;
- ch->ch_rstart = 0;
-
- /*
- * Set queue water marks, interrupt mask,
- * and general tty parameters.
- */
- tlw = ch->ch_tsize >= 2000 ? ((ch->ch_tsize * 5) / 8) :
- ch->ch_tsize / 2;
- ch->ch_tlw = tlw;
-
- dgap_cmdw(ch, STLOW, tlw, 0);
-
- dgap_cmdw(ch, SRLOW, ch->ch_rsize / 2, 0);
-
- dgap_cmdw(ch, SRHIGH, 7 * ch->ch_rsize / 8, 0);
-
- ch->ch_mistat = readb(&(ch->ch_bs->m_stat));
-
- init_waitqueue_head(&ch->ch_flags_wait);
- init_waitqueue_head(&ch->ch_tun.un_flags_wait);
- init_waitqueue_head(&ch->ch_pun.un_flags_wait);
-
- /* Turn on all modem interrupts for now */
- modem = (DM_CD | DM_DSR | DM_CTS | DM_RI);
- writeb(modem, &(ch->ch_bs->m_int));
-
- /*
- * Set edelay to 0 if interrupts are turned on,
- * otherwise set edelay to the usual 100.
- */
- if (brd->intr_used)
- writew(0, &(ch->ch_bs->edelay));
- else
- writew(100, &(ch->ch_bs->edelay));
-
- writeb(1, &(ch->ch_bs->idata));
- }
-
- return 0;
-
-free_chan:
- while (--i >= 0) {
- kfree(brd->channels[i]);
- brd->channels[i] = NULL;
- }
- return ret;
-}
-
-/*
- * dgap_tty_free()
- *
- * Free the channles which are allocated in dgap_tty_init().
- */
-static void dgap_tty_free(struct board_t *brd)
-{
- int i;
-
- for (i = 0; i < brd->nasync; i++)
- kfree(brd->channels[i]);
-}
-
-static int dgap_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
-{
- int rc;
- struct board_t *brd;
-
- if (dgap_numboards >= MAXBOARDS)
- return -EPERM;
-
- rc = pci_enable_device(pdev);
- if (rc)
- return -EIO;
-
- brd = dgap_found_board(pdev, ent->driver_data, dgap_numboards);
- if (IS_ERR(brd))
- return PTR_ERR(brd);
-
- rc = dgap_firmware_load(pdev, ent->driver_data, brd);
- if (rc)
- goto cleanup_brd;
-
- rc = dgap_alloc_flipbuf(brd);
- if (rc)
- goto cleanup_brd;
-
- rc = dgap_tty_register(brd);
- if (rc)
- goto free_flipbuf;
-
- rc = dgap_request_irq(brd);
- if (rc)
- goto unregister_tty;
-
- /*
- * Do tty device initialization.
- */
- rc = dgap_tty_init(brd);
- if (rc < 0)
- goto free_irq;
-
- rc = dgap_tty_register_ports(brd);
- if (rc)
- goto tty_free;
-
- brd->state = BOARD_READY;
- brd->dpastatus = BD_RUNNING;
-
- dgap_board[dgap_numboards++] = brd;
-
- return 0;
-
-tty_free:
- dgap_tty_free(brd);
-free_irq:
- dgap_free_irq(brd);
-unregister_tty:
- dgap_tty_unregister(brd);
-free_flipbuf:
- dgap_free_flipbuf(brd);
-cleanup_brd:
- dgap_cleanup_nodes();
- dgap_unmap(brd);
- kfree(brd);
-
- return rc;
-}
-
-/*
- * dgap_cleanup_board()
- *
- * Free all the memory associated with a board
- */
-static void dgap_cleanup_board(struct board_t *brd)
-{
- unsigned int i;
-
- if (!brd || brd->magic != DGAP_BOARD_MAGIC)
- return;
-
- dgap_free_irq(brd);
-
- tasklet_kill(&brd->helper_tasklet);
-
- dgap_unmap(brd);
-
- /* Free all allocated channels structs */
- for (i = 0; i < MAXPORTS ; i++)
- kfree(brd->channels[i]);
-
- kfree(brd->flipbuf);
- kfree(brd->flipflagbuf);
-
- dgap_board[brd->boardnum] = NULL;
-
- kfree(brd);
-}
-
-static void dgap_stop(bool removesys, struct pci_driver *drv)
-{
- unsigned long lock_flags;
-
- spin_lock_irqsave(&dgap_poll_lock, lock_flags);
- dgap_poll_stop = 1;
- spin_unlock_irqrestore(&dgap_poll_lock, lock_flags);
-
- del_timer_sync(&dgap_poll_timer);
- if (removesys)
- dgap_remove_driver_sysfiles(drv);
-
- device_destroy(dgap_class, MKDEV(DIGI_DGAP_MAJOR, 0));
- class_destroy(dgap_class);
- unregister_chrdev(DIGI_DGAP_MAJOR, "dgap");
-}
-
-static void dgap_remove_one(struct pci_dev *dev)
-{
- unsigned int i;
- struct pci_driver *drv = to_pci_driver(dev->dev.driver);
-
- dgap_stop(true, drv);
- for (i = 0; i < dgap_numboards; ++i) {
- dgap_remove_ports_sysfiles(dgap_board[i]);
- dgap_cleanup_tty(dgap_board[i]);
- dgap_cleanup_board(dgap_board[i]);
- }
-
- dgap_cleanup_nodes();
-}
-
-static struct pci_driver dgap_driver = {
- .name = "dgap",
- .probe = dgap_init_one,
- .id_table = dgap_pci_tbl,
- .remove = dgap_remove_one,
-};
-
-/*
- * Start of driver.
- */
-static int dgap_start(void)
-{
- int rc;
- unsigned long flags;
- struct device *device;
-
- dgap_numboards = 0;
-
- pr_info("For the tools package please visit http://www.digi.com\n");
-
- /*
- * Register our base character device into the kernel.
- */
-
- /*
- * Register management/dpa devices
- */
- rc = register_chrdev(DIGI_DGAP_MAJOR, "dgap", &dgap_board_fops);
- if (rc < 0)
- return rc;
-
- dgap_class = class_create(THIS_MODULE, "dgap_mgmt");
- if (IS_ERR(dgap_class)) {
- rc = PTR_ERR(dgap_class);
- goto failed_class;
- }
-
- device = device_create(dgap_class, NULL,
- MKDEV(DIGI_DGAP_MAJOR, 0),
- NULL, "dgap_mgmt");
- if (IS_ERR(device)) {
- rc = PTR_ERR(device);
- goto failed_device;
- }
-
- /* Start the poller */
- spin_lock_irqsave(&dgap_poll_lock, flags);
- setup_timer(&dgap_poll_timer, dgap_poll_handler, 0);
- dgap_poll_timer.data = 0;
- dgap_poll_time = jiffies + dgap_jiffies_from_ms(dgap_poll_tick);
- dgap_poll_timer.expires = dgap_poll_time;
- spin_unlock_irqrestore(&dgap_poll_lock, flags);
-
- add_timer(&dgap_poll_timer);
-
- return rc;
-
-failed_device:
- class_destroy(dgap_class);
-failed_class:
- unregister_chrdev(DIGI_DGAP_MAJOR, "dgap");
- return rc;
-}
-
-/************************************************************************
- *
- * Driver load/unload functions
- *
- ************************************************************************/
-
-/*
- * init_module()
- *
- * Module load. This is where it all starts.
- */
-static int dgap_init_module(void)
-{
- int rc;
-
- pr_info("%s, Digi International Part Number %s\n", DG_NAME, DG_PART);
-
- rc = dgap_start();
- if (rc)
- return rc;
-
- rc = pci_register_driver(&dgap_driver);
- if (rc) {
- dgap_stop(false, NULL);
- return rc;
- }
-
- rc = dgap_create_driver_sysfiles(&dgap_driver);
- if (rc)
- goto err_unregister;
-
- dgap_driver_state = DRIVER_READY;
-
- return 0;
-
-err_unregister:
- pci_unregister_driver(&dgap_driver);
- return rc;
-}
-
-/*
- * dgap_cleanup_module()
- *
- * Module unload. This is where it all ends.
- */
-static void dgap_cleanup_module(void)
-{
- if (dgap_numboards)
- pci_unregister_driver(&dgap_driver);
-}
-
-module_init(dgap_init_module);
-module_exit(dgap_cleanup_module);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Digi International, http://www.digi.com");
-MODULE_DESCRIPTION("Driver for the Digi International EPCA PCI based product line");
-MODULE_SUPPORTED_DEVICE("dgap");
diff --git a/drivers/staging/dgap/dgap.h b/drivers/staging/dgap/dgap.h
deleted file mode 100644
index e707ed5fe949..000000000000
--- a/drivers/staging/dgap/dgap.h
+++ /dev/null
@@ -1,1233 +0,0 @@
-/*
- * Copyright 2003 Digi International (www.digi.com)
- * Scott H Kilau <Scott_Kilau at digi dot com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY, EXPRESS OR IMPLIED; without even the
- * implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
- * PURPOSE. See the GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- * NOTE: THIS IS A SHARED HEADER. DO NOT CHANGE CODING STYLE!!!
- *
- *************************************************************************
- *
- * Driver includes
- *
- *************************************************************************/
-
-#ifndef __DGAP_DRIVER_H
-#define __DGAP_DRIVER_H
-
-#include <linux/types.h> /* To pick up the varions Linux types */
-#include <linux/tty.h> /* To pick up the various tty structs/defines */
-#include <linux/interrupt.h> /* For irqreturn_t type */
-
-#ifndef TRUE
-# define TRUE 1
-#endif
-
-#ifndef FALSE
-# define FALSE 0
-#endif
-
-#if !defined(TTY_FLIPBUF_SIZE)
-# define TTY_FLIPBUF_SIZE 512
-#endif
-
-/*************************************************************************
- *
- * Driver defines
- *
- *************************************************************************/
-
-/*
- * Driver identification
- */
-#define DG_NAME "dgap-1.3-16"
-#define DG_PART "40002347_C"
-#define DRVSTR "dgap"
-
-/*
- * defines from dgap_pci.h
- */
-#define PCIMAX 32 /* maximum number of PCI boards */
-
-#define DIGI_VID 0x114F
-
-#define PCI_DEV_EPC_DID 0x0002
-#define PCI_DEV_XEM_DID 0x0004
-#define PCI_DEV_XR_DID 0x0005
-#define PCI_DEV_CX_DID 0x0006
-#define PCI_DEV_XRJ_DID 0x0009 /* PLX-based Xr adapter */
-#define PCI_DEV_XR_IBM_DID 0x0011 /* IBM 8-port Async Adapter */
-#define PCI_DEV_XR_BULL_DID 0x0013 /* BULL 8-port Async Adapter */
-#define PCI_DEV_XR_SAIP_DID 0x001c /* SAIP card - Xr adapter */
-#define PCI_DEV_XR_422_DID 0x0012 /* Xr-422 */
-#define PCI_DEV_920_2_DID 0x0034 /* XR-Plus 920 K, 2 port */
-#define PCI_DEV_920_4_DID 0x0026 /* XR-Plus 920 K, 4 port */
-#define PCI_DEV_920_8_DID 0x0027 /* XR-Plus 920 K, 8 port */
-#define PCI_DEV_EPCJ_DID 0x000a /* PLX 9060 chip for PCI */
-#define PCI_DEV_CX_IBM_DID 0x001b /* IBM 128-port Async Adapter */
-#define PCI_DEV_920_8_HP_DID 0x0058 /* HP XR-Plus 920 K, 8 port */
-#define PCI_DEV_XEM_HP_DID 0x0059 /* HP Xem PCI */
-
-#define PCI_DEV_XEM_NAME "AccelePort XEM"
-#define PCI_DEV_CX_NAME "AccelePort CX"
-#define PCI_DEV_XR_NAME "AccelePort Xr"
-#define PCI_DEV_XRJ_NAME "AccelePort Xr (PLX)"
-#define PCI_DEV_XR_SAIP_NAME "AccelePort Xr (SAIP)"
-#define PCI_DEV_920_2_NAME "AccelePort Xr920 2 port"
-#define PCI_DEV_920_4_NAME "AccelePort Xr920 4 port"
-#define PCI_DEV_920_8_NAME "AccelePort Xr920 8 port"
-#define PCI_DEV_XR_422_NAME "AccelePort Xr 422"
-#define PCI_DEV_EPCJ_NAME "AccelePort EPC (PLX)"
-#define PCI_DEV_XR_BULL_NAME "AccelePort Xr (BULL)"
-#define PCI_DEV_XR_IBM_NAME "AccelePort Xr (IBM)"
-#define PCI_DEV_CX_IBM_NAME "AccelePort CX (IBM)"
-#define PCI_DEV_920_8_HP_NAME "AccelePort Xr920 8 port (HP)"
-#define PCI_DEV_XEM_HP_NAME "AccelePort XEM (HP)"
-
-/*
- * On the PCI boards, there is no IO space allocated
- * The I/O registers will be in the first 3 bytes of the
- * upper 2MB of the 4MB memory space. The board memory
- * will be mapped into the low 2MB of the 4MB memory space
- */
-
-/* Potential location of PCI Bios from E0000 to FFFFF*/
-#define PCI_BIOS_SIZE 0x00020000
-
-/* Size of Memory and I/O for PCI (4MB) */
-#define PCI_RAM_SIZE 0x00400000
-
-/* Size of Memory (2MB) */
-#define PCI_MEM_SIZE 0x00200000
-
-/* Max PCI Window Size (2MB) */
-#define PCI_WIN_SIZE 0x00200000
-
-#define PCI_WIN_SHIFT 21 /* 21 bits max */
-
-/* Offset of I/0 in Memory (2MB) */
-#define PCI_IO_OFFSET 0x00200000
-
-/* Size of IO (2MB) */
-#define PCI_IO_SIZE_DGAP 0x00200000
-
-/* Number of boards we support at once. */
-#define MAXBOARDS 32
-#define MAXPORTS 224
-#define MAXTTYNAMELEN 200
-
-/* Our 3 magic numbers for our board, channel and unit structs */
-#define DGAP_BOARD_MAGIC 0x5c6df104
-#define DGAP_CHANNEL_MAGIC 0x6c6df104
-#define DGAP_UNIT_MAGIC 0x7c6df104
-
-/* Serial port types */
-#define DGAP_SERIAL 0
-#define DGAP_PRINT 1
-
-#define SERIAL_TYPE_NORMAL 1
-
-/* 4 extra for alignment play space */
-#define WRITEBUFLEN ((4096) + 4)
-#define MYFLIPLEN N_TTY_BUF_SIZE
-
-#define SBREAK_TIME 0x25
-#define U2BSIZE 0x400
-
-#define dgap_jiffies_from_ms(a) (((a) * HZ) / 1000)
-
-/*
- * Our major for the mgmt devices.
- *
- * We can use 22, because Digi was allocated 22 and 23 for the epca driver.
- * 22 has now become obsolete now that the "cu" devices have
- * been removed from 2.6.
- * Also, this *IS* the epca driver, just PCI only now.
- */
-#ifndef DIGI_DGAP_MAJOR
-# define DIGI_DGAP_MAJOR 22
-#endif
-
-/*
- * The parameters we use to define the periods of the moving averages.
- */
-#define MA_PERIOD (HZ / 10)
-#define SMA_DUR (1 * HZ)
-#define EMA_DUR (1 * HZ)
-#define SMA_NPERIODS (SMA_DUR / MA_PERIOD)
-#define EMA_NPERIODS (EMA_DUR / MA_PERIOD)
-
-/*
- * Define a local default termios struct. All ports will be created
- * with this termios initially. This is the same structure that is defined
- * as the default in tty_io.c with the same settings overridden as in serial.c
- *
- * In short, this should match the internal serial ports' defaults.
- */
-#define DEFAULT_IFLAGS (ICRNL | IXON)
-#define DEFAULT_OFLAGS (OPOST | ONLCR)
-#define DEFAULT_CFLAGS (B9600 | CS8 | CREAD | HUPCL | CLOCAL)
-#define DEFAULT_LFLAGS (ISIG | ICANON | ECHO | ECHOE | ECHOK | \
- ECHOCTL | ECHOKE | IEXTEN)
-
-#ifndef _POSIX_VDISABLE
-#define _POSIX_VDISABLE ('\0')
-#endif
-
-#define SNIFF_MAX 65536 /* Sniff buffer size (2^n) */
-#define SNIFF_MASK (SNIFF_MAX - 1) /* Sniff wrap mask */
-
-#define VPDSIZE (512)
-
-/************************************************************************
- * FEP memory offsets
- ************************************************************************/
-#define START 0x0004L /* Execution start address */
-
-#define CMDBUF 0x0d10L /* Command (cm_t) structure offset */
-#define CMDSTART 0x0400L /* Start of command buffer */
-#define CMDMAX 0x0800L /* End of command buffer */
-
-#define EVBUF 0x0d18L /* Event (ev_t) structure */
-#define EVSTART 0x0800L /* Start of event buffer */
-#define EVMAX 0x0c00L /* End of event buffer */
-#define FEP5_PLUS 0x0E40 /* ASCII '5' and ASCII 'A' is here */
-#define ECS_SEG 0x0E44 /* Segment of the extended */
- /* channel structure */
-#define LINE_SPEED 0x10 /* Offset into ECS_SEG for line */
- /* speed if the fep has extended */
- /* capabilities */
-
-/* BIOS MAGIC SPOTS */
-#define ERROR 0x0C14L /* BIOS error code */
-#define SEQUENCE 0x0C12L /* BIOS sequence indicator */
-#define POSTAREA 0x0C00L /* POST complete message area */
-
-/* FEP MAGIC SPOTS */
-#define FEPSTAT POSTAREA /* OS here when FEP comes up */
-#define NCHAN 0x0C02L /* number of ports FEP sees */
-#define PANIC 0x0C10L /* PANIC area for FEP */
-#define KMEMEM 0x0C30L /* Memory for KME use */
-#define CONFIG 0x0CD0L /* Concentrator configuration info */
-#define CONFIGSIZE 0x0030 /* configuration info size */
-#define DOWNREQ 0x0D00 /* Download request buffer pointer */
-
-#define CHANBUF 0x1000L /* Async channel (bs_t) structs */
-#define FEPOSSIZE 0x1FFF /* 8K FEPOS */
-
-#define XEMPORTS 0xC02 /*
- * Offset in board memory where FEP5 stores
- * how many ports it has detected.
- * NOTE: FEP5 reports 64 ports when the user
- * has the cable in EBI OUT instead of EBI IN.
- */
-
-#define FEPCLR 0x00
-#define FEPMEM 0x02
-#define FEPRST 0x04
-#define FEPINT 0x08
-#define FEPMASK 0x0e
-#define FEPWIN 0x80
-
-#define LOWMEM 0x0100
-#define HIGHMEM 0x7f00
-
-#define FEPTIMEOUT 200000
-
-#define ENABLE_INTR 0x0e04 /* Enable interrupts flag */
-#define FEPPOLL_MIN 1 /* minimum of 1 millisecond */
-#define FEPPOLL_MAX 20 /* maximum of 20 milliseconds */
-#define FEPPOLL 0x0c26 /* Fep event poll interval */
-
-#define IALTPIN 0x0080 /* Input flag to swap DSR <-> DCD */
-
-/************************************************************************
- * FEP supported functions
- ************************************************************************/
-#define SRLOW 0xe0 /* Set receive low water */
-#define SRHIGH 0xe1 /* Set receive high water */
-#define FLUSHTX 0xe2 /* Flush transmit buffer */
-#define PAUSETX 0xe3 /* Pause data transmission */
-#define RESUMETX 0xe4 /* Resume data transmission */
-#define SMINT 0xe5 /* Set Modem Interrupt */
-#define SAFLOWC 0xe6 /* Set Aux. flow control chars */
-#define SBREAK 0xe8 /* Send break */
-#define SMODEM 0xe9 /* Set 8530 modem control lines */
-#define SIFLAG 0xea /* Set UNIX iflags */
-#define SFLOWC 0xeb /* Set flow control characters */
-#define STLOW 0xec /* Set transmit low water mark */
-#define RPAUSE 0xee /* Pause receive */
-#define RRESUME 0xef /* Resume receive */
-#define CHRESET 0xf0 /* Reset Channel */
-#define BUFSETALL 0xf2 /* Set Tx & Rx buffer size avail*/
-#define SOFLAG 0xf3 /* Set UNIX oflags */
-#define SHFLOW 0xf4 /* Set hardware handshake */
-#define SCFLAG 0xf5 /* Set UNIX cflags */
-#define SVNEXT 0xf6 /* Set VNEXT character */
-#define SPINTFC 0xfc /* Reserved */
-#define SCOMMODE 0xfd /* Set RS232/422 mode */
-
-/************************************************************************
- * Modes for SCOMMODE
- ************************************************************************/
-#define MODE_232 0x00
-#define MODE_422 0x01
-
-/************************************************************************
- * Event flags.
- ************************************************************************/
-#define IFBREAK 0x01 /* Break received */
-#define IFTLW 0x02 /* Transmit low water */
-#define IFTEM 0x04 /* Transmitter empty */
-#define IFDATA 0x08 /* Receive data present */
-#define IFMODEM 0x20 /* Modem status change */
-
-/************************************************************************
- * Modem flags
- ************************************************************************/
-# define DM_RTS 0x02 /* Request to send */
-# define DM_CD 0x80 /* Carrier detect */
-# define DM_DSR 0x20 /* Data set ready */
-# define DM_CTS 0x10 /* Clear to send */
-# define DM_RI 0x40 /* Ring indicator */
-# define DM_DTR 0x01 /* Data terminal ready */
-
-/*
- * defines from dgap_conf.h
- */
-#define NULLNODE 0 /* header node, not used */
-#define BNODE 1 /* Board node */
-#define LNODE 2 /* Line node */
-#define CNODE 3 /* Concentrator node */
-#define MNODE 4 /* EBI Module node */
-#define TNODE 5 /* tty name prefix node */
-#define CUNODE 6 /* cu name prefix (non-SCO) */
-#define PNODE 7 /* trans. print prefix node */
-#define JNODE 8 /* maJor number node */
-#define ANODE 9 /* altpin */
-#define TSNODE 10 /* tty structure size */
-#define CSNODE 11 /* channel structure size */
-#define BSNODE 12 /* board structure size */
-#define USNODE 13 /* unit schedule structure size */
-#define FSNODE 14 /* f2200 structure size */
-#define VSNODE 15 /* size of VPIX structures */
-#define INTRNODE 16 /* enable interrupt */
-
-/* Enumeration of tokens */
-#define BEGIN 1
-#define END 2
-#define BOARD 10
-
-#define EPCFS 11 /* start of EPC family definitions */
-#define ICX 11
-#define MCX 13
-#define PCX 14
-#define IEPC 15
-#define EEPC 16
-#define MEPC 17
-#define IPCM 18
-#define EPCM 19
-#define MPCM 20
-#define PEPC 21
-#define PPCM 22
-#ifdef CP
-#define ICP 23
-#define ECP 24
-#define MCP 25
-#endif
-#define EPCFE 25 /* end of EPC family definitions */
-#define PC2E 26
-#define PC4E 27
-#define PC4E8K 28
-#define PC8E 29
-#define PC8E8K 30
-#define PC16E 31
-#define MC2E8K 34
-#define MC4E8K 35
-#define MC8E8K 36
-
-#define AVANFS 42 /* start of Avanstar family definitions */
-#define A8P 42
-#define A16P 43
-#define AVANFE 43 /* end of Avanstar family definitions */
-
-#define DA2000FS 44 /* start of AccelePort 2000 family definitions */
-#define DA22 44 /* AccelePort 2002 */
-#define DA24 45 /* AccelePort 2004 */
-#define DA28 46 /* AccelePort 2008 */
-#define DA216 47 /* AccelePort 2016 */
-#define DAR4 48 /* AccelePort RAS 4 port */
-#define DAR8 49 /* AccelePort RAS 8 port */
-#define DDR24 50 /* DataFire RAS 24 port */
-#define DDR30 51 /* DataFire RAS 30 port */
-#define DDR48 52 /* DataFire RAS 48 port */
-#define DDR60 53 /* DataFire RAS 60 port */
-#define DA2000FE 53 /* end of AccelePort 2000/RAS family definitions */
-
-#define PCXRFS 106 /* start of PCXR family definitions */
-#define APORT4 106
-#define APORT8 107
-#define PAPORT4 108
-#define PAPORT8 109
-#define APORT4_920I 110
-#define APORT8_920I 111
-#define APORT4_920P 112
-#define APORT8_920P 113
-#define APORT2_920P 114
-#define PCXRFE 117 /* end of PCXR family definitions */
-
-#define LINE 82
-#ifdef T1
-#define T1M 83
-#define E1M 84
-#endif
-#define CONC 64
-#define CX 65
-#define EPC 66
-#define MOD 67
-#define PORTS 68
-#define METHOD 69
-#define CUSTOM 70
-#define BASIC 71
-#define STATUS 72
-#define MODEM 73
-/* The following tokens can appear in multiple places */
-#define SPEED 74
-#define NPORTS 75
-#define ID 76
-#define CABLE 77
-#define CONNECT 78
-#define IO 79
-#define MEM 80
-#define DPSZ 81
-
-#define TTYN 90
-#define CU 91
-#define PRINT 92
-#define XPRINT 93
-#define CMAJOR 94
-#define ALTPIN 95
-#define STARTO 96
-#define USEINTR 97
-#define PCIINFO 98
-
-#define TTSIZ 100
-#define CHSIZ 101
-#define BSSIZ 102
-#define UNTSIZ 103
-#define F2SIZ 104
-#define VPSIZ 105
-
-#define TOTAL_BOARD 2
-#define CURRENT_BRD 4
-#define BOARD_TYPE 6
-#define IO_ADDRESS 8
-#define MEM_ADDRESS 10
-
-#define FIELDS_PER_PAGE 18
-
-#define TB_FIELD 1
-#define CB_FIELD 3
-#define BT_FIELD 5
-#define IO_FIELD 7
-#define ID_FIELD 8
-#define ME_FIELD 9
-#define TTY_FIELD 11
-#define CU_FIELD 13
-#define PR_FIELD 15
-#define MPR_FIELD 17
-
-#define MAX_FIELD 512
-
-#define INIT 0
-#define NITEMS 128
-#define MAX_ITEM 512
-
-#define DSCRINST 1
-#define DSCRNUM 3
-#define ALTPINQ 5
-#define SSAVE 7
-
-#define DSCR "32"
-#define ONETONINE "123456789"
-#define ALL "1234567890"
-
-/*
- * All the possible states the driver can be while being loaded.
- */
-enum {
- DRIVER_INITIALIZED = 0,
- DRIVER_READY
-};
-
-/*
- * All the possible states the board can be while booting up.
- */
-enum {
- BOARD_FAILED = 0,
- BOARD_READY
-};
-
-/*
- * All the possible states that a requested concentrator image can be in.
- */
-enum {
- NO_PENDING_CONCENTRATOR_REQUESTS = 0,
- NEED_CONCENTRATOR,
- REQUESTED_CONCENTRATOR
-};
-
-/*
- * Modem line constants are defined as macros because DSR and
- * DCD are swapable using the ditty altpin option.
- */
-#define D_CD(ch) ch->ch_cd /* Carrier detect */
-#define D_DSR(ch) ch->ch_dsr /* Data set ready */
-#define D_RTS(ch) DM_RTS /* Request to send */
-#define D_CTS(ch) DM_CTS /* Clear to send */
-#define D_RI(ch) DM_RI /* Ring indicator */
-#define D_DTR(ch) DM_DTR /* Data terminal ready */
-
-/*************************************************************************
- *
- * Structures and closely related defines.
- *
- *************************************************************************/
-
-/*
- * A structure to hold a statistics counter. We also
- * compute moving averages for this counter.
- */
-struct macounter {
- u32 cnt; /* Total count */
- ulong accum; /* Acuumulator per period */
- ulong sma; /* Simple moving average */
- ulong ema; /* Exponential moving average */
-};
-
-/************************************************************************
- * Device flag definitions for bd_flags.
- ************************************************************************/
-#define BD_FEP5PLUS 0x0001 /* Supports FEP5 Plus commands */
-#define BD_HAS_VPD 0x0002 /* Board has VPD info available */
-
-/*
- * Per-board information
- */
-struct board_t {
- int magic; /* Board Magic number. */
- int boardnum; /* Board number: 0-3 */
-
- int type; /* Type of board */
- char *name; /* Product Name */
- struct pci_dev *pdev; /* Pointer to the pci_dev struct */
- u16 vendor; /* PCI vendor ID */
- u16 device; /* PCI device ID */
- u16 subvendor; /* PCI subsystem vendor ID */
- u16 subdevice; /* PCI subsystem device ID */
- u8 rev; /* PCI revision ID */
- uint pci_bus; /* PCI bus value */
- uint pci_slot; /* PCI slot value */
- u16 maxports; /* MAX ports this board can handle */
- u8 vpd[VPDSIZE]; /* VPD of board, if found */
- u32 bd_flags; /* Board flags */
-
- spinlock_t bd_lock; /* Used to protect board */
-
- u32 state; /* State of card. */
- wait_queue_head_t state_wait; /* Place to sleep on for state change */
-
- struct tasklet_struct helper_tasklet; /* Poll helper tasklet */
-
- u32 wait_for_bios;
- u32 wait_for_fep;
-
- struct cnode *bd_config; /* Config of board */
-
- u16 nasync; /* Number of ports on card */
-
- ulong irq; /* Interrupt request number */
- ulong intr_count; /* Count of interrupts */
- u32 intr_used; /* Non-zero if using interrupts */
- u32 intr_running; /* Non-zero if FEP knows its doing */
- /* interrupts */
-
- ulong port; /* Start of base io port of the card */
- ulong port_end; /* End of base io port of the card */
- ulong membase; /* Start of base memory of the card */
- ulong membase_end; /* End of base memory of the card */
-
- u8 __iomem *re_map_port; /* Remapped io port of the card */
- u8 __iomem *re_map_membase;/* Remapped memory of the card */
-
- u8 inhibit_poller; /* Tells the poller to leave us alone */
-
- struct channel_t *channels[MAXPORTS]; /* array of pointers to our */
- /* channels. */
-
- struct tty_driver *serial_driver;
- struct tty_port *serial_ports;
- char serial_name[200];
- struct tty_driver *print_driver;
- struct tty_port *printer_ports;
- char print_name[200];
-
- struct bs_t __iomem *bd_bs; /* Base structure pointer */
-
- char *flipbuf; /* Our flip buffer, alloced if */
- /* board is found */
- char *flipflagbuf; /* Our flip flag buffer, alloced */
- /* if board is found */
-
- u16 dpatype; /* The board "type", as defined */
- /* by DPA */
- u16 dpastatus; /* The board "status", as defined */
- /* by DPA */
-
- u32 conc_dl_status; /* Status of any pending conc */
- /* download */
-};
-
-/************************************************************************
- * Unit flag definitions for un_flags.
- ************************************************************************/
-#define UN_ISOPEN 0x0001 /* Device is open */
-#define UN_CLOSING 0x0002 /* Line is being closed */
-#define UN_IMM 0x0004 /* Service immediately */
-#define UN_BUSY 0x0008 /* Some work this channel */
-#define UN_BREAKI 0x0010 /* Input break received */
-#define UN_PWAIT 0x0020 /* Printer waiting for terminal */
-#define UN_TIME 0x0040 /* Waiting on time */
-#define UN_EMPTY 0x0080 /* Waiting output queue empty */
-#define UN_LOW 0x0100 /* Waiting output low water mark*/
-#define UN_EXCL_OPEN 0x0200 /* Open for exclusive use */
-#define UN_WOPEN 0x0400 /* Device waiting for open */
-#define UN_WIOCTL 0x0800 /* Device waiting for open */
-#define UN_HANGUP 0x8000 /* Carrier lost */
-
-struct device;
-
-/************************************************************************
- * Structure for terminal or printer unit.
- ************************************************************************/
-struct un_t {
- int magic; /* Unit Magic Number. */
- struct channel_t *un_ch;
- u32 un_time;
- u32 un_type;
- int un_open_count; /* Counter of opens to port */
- struct tty_struct *un_tty;/* Pointer to unit tty structure */
- u32 un_flags; /* Unit flags */
- wait_queue_head_t un_flags_wait; /* Place to sleep to wait on unit */
- u32 un_dev; /* Minor device number */
- tcflag_t un_oflag; /* oflags being done on board */
- tcflag_t un_lflag; /* lflags being done on board */
- struct device *un_sysfs;
-};
-
-/************************************************************************
- * Device flag definitions for ch_flags.
- ************************************************************************/
-#define CH_PRON 0x0001 /* Printer on string */
-#define CH_OUT 0x0002 /* Dial-out device open */
-#define CH_STOP 0x0004 /* Output is stopped */
-#define CH_STOPI 0x0008 /* Input is stopped */
-#define CH_CD 0x0010 /* Carrier is present */
-#define CH_FCAR 0x0020 /* Carrier forced on */
-
-#define CH_RXBLOCK 0x0080 /* Enable rx blocked flag */
-#define CH_WLOW 0x0100 /* Term waiting low event */
-#define CH_WEMPTY 0x0200 /* Term waiting empty event */
-#define CH_RENABLE 0x0400 /* Buffer just emptied */
-#define CH_RACTIVE 0x0800 /* Process active in xxread() */
-#define CH_RWAIT 0x1000 /* Process waiting in xxread() */
-#define CH_BAUD0 0x2000 /* Used for checking B0 transitions */
-#define CH_HANGUP 0x8000 /* Hangup received */
-
-/*
- * Definitions for ch_sniff_flags
- */
-#define SNIFF_OPEN 0x1
-#define SNIFF_WAIT_DATA 0x2
-#define SNIFF_WAIT_SPACE 0x4
-
-/************************************************************************
- *** Definitions for Digi ditty(1) command.
- ************************************************************************/
-
-/************************************************************************
- * This module provides application access to special Digi
- * serial line enhancements which are not standard UNIX(tm) features.
- ************************************************************************/
-
-#if !defined(TIOCMODG)
-
-#define TIOCMODG (('d'<<8) | 250) /* get modem ctrl state */
-#define TIOCMODS (('d'<<8) | 251) /* set modem ctrl state */
-
-#ifndef TIOCM_LE
-#define TIOCM_LE 0x01 /* line enable */
-#define TIOCM_DTR 0x02 /* data terminal ready */
-#define TIOCM_RTS 0x04 /* request to send */
-#define TIOCM_ST 0x08 /* secondary transmit */
-#define TIOCM_SR 0x10 /* secondary receive */
-#define TIOCM_CTS 0x20 /* clear to send */
-#define TIOCM_CAR 0x40 /* carrier detect */
-#define TIOCM_RNG 0x80 /* ring indicator */
-#define TIOCM_DSR 0x100 /* data set ready */
-#define TIOCM_RI TIOCM_RNG /* ring (alternate) */
-#define TIOCM_CD TIOCM_CAR /* carrier detect (alt) */
-#endif
-
-#endif
-
-#if !defined(TIOCMSET)
-#define TIOCMSET (('d'<<8) | 252) /* set modem ctrl state */
-#define TIOCMGET (('d'<<8) | 253) /* set modem ctrl state */
-#endif
-
-#if !defined(TIOCMBIC)
-#define TIOCMBIC (('d'<<8) | 254) /* set modem ctrl state */
-#define TIOCMBIS (('d'<<8) | 255) /* set modem ctrl state */
-#endif
-
-#if !defined(TIOCSDTR)
-#define TIOCSDTR (('e'<<8) | 0) /* set DTR */
-#define TIOCCDTR (('e'<<8) | 1) /* clear DTR */
-#endif
-
-/************************************************************************
- * Ioctl command arguments for DIGI parameters.
- ************************************************************************/
-#define DIGI_GETA (('e'<<8) | 94) /* Read params */
-
-#define DIGI_SETA (('e'<<8) | 95) /* Set params */
-#define DIGI_SETAW (('e'<<8) | 96) /* Drain & set params */
-#define DIGI_SETAF (('e'<<8) | 97) /* Drain, flush & set params */
-
-#define DIGI_KME (('e'<<8) | 98) /* Read/Write Host */
- /* Adapter Memory */
-
-#define DIGI_GETFLOW (('e'<<8) | 99) /* Get startc/stopc flow */
- /* control characters */
-#define DIGI_SETFLOW (('e'<<8) | 100) /* Set startc/stopc flow */
- /* control characters */
-#define DIGI_GETAFLOW (('e'<<8) | 101) /* Get Aux. startc/stopc */
- /* flow control chars */
-#define DIGI_SETAFLOW (('e'<<8) | 102) /* Set Aux. startc/stopc */
- /* flow control chars */
-
-#define DIGI_GEDELAY (('d'<<8) | 246) /* Get edelay */
-#define DIGI_SEDELAY (('d'<<8) | 247) /* Set edelay */
-
-struct digiflow_t {
- unsigned char startc; /* flow cntl start char */
- unsigned char stopc; /* flow cntl stop char */
-};
-
-#ifdef FLOW_2200
-#define F2200_GETA (('e'<<8) | 104) /* Get 2x36 flow cntl flags */
-#define F2200_SETAW (('e'<<8) | 105) /* Set 2x36 flow cntl flags */
-#define F2200_MASK 0x03 /* 2200 flow cntl bit mask */
-#define FCNTL_2200 0x01 /* 2x36 terminal flow cntl */
-#define PCNTL_2200 0x02 /* 2x36 printer flow cntl */
-#define F2200_XON 0xf8
-#define P2200_XON 0xf9
-#define F2200_XOFF 0xfa
-#define P2200_XOFF 0xfb
-
-#define FXOFF_MASK 0x03 /* 2200 flow status mask */
-#define RCVD_FXOFF 0x01 /* 2x36 Terminal XOFF rcvd */
-#define RCVD_PXOFF 0x02 /* 2x36 Printer XOFF rcvd */
-#endif
-
-/************************************************************************
- * Values for digi_flags
- ************************************************************************/
-#define DIGI_IXON 0x0001 /* Handle IXON in the FEP */
-#define DIGI_FAST 0x0002 /* Fast baud rates */
-#define RTSPACE 0x0004 /* RTS input flow control */
-#define CTSPACE 0x0008 /* CTS output flow control */
-#define DSRPACE 0x0010 /* DSR output flow control */
-#define DCDPACE 0x0020 /* DCD output flow control */
-#define DTRPACE 0x0040 /* DTR input flow control */
-#define DIGI_COOK 0x0080 /* Cooked processing done in FEP */
-#define DIGI_FORCEDCD 0x0100 /* Force carrier */
-#define DIGI_ALTPIN 0x0200 /* Alternate RJ-45 pin config */
-#define DIGI_AIXON 0x0400 /* Aux flow control in fep */
-#define DIGI_PRINTER 0x0800 /* Hold port open for flow cntrl*/
-#define DIGI_PP_INPUT 0x1000 /* Change parallel port to input*/
-#define DIGI_DTR_TOGGLE 0x2000 /* Support DTR Toggle */
-#define DIGI_422 0x4000 /* for 422/232 selectable panel */
-#define DIGI_RTS_TOGGLE 0x8000 /* Support RTS Toggle */
-
-/************************************************************************
- * These options are not supported on the comxi.
- ************************************************************************/
-#define DIGI_COMXI (DIGI_FAST|DIGI_COOK|DSRPACE|DCDPACE|DTRPACE)
-
-#define DIGI_PLEN 28 /* String length */
-#define DIGI_TSIZ 10 /* Terminal string len */
-
-/************************************************************************
- * Structure used with ioctl commands for DIGI parameters.
- ************************************************************************/
-struct digi_t {
- unsigned short digi_flags; /* Flags (see above) */
- unsigned short digi_maxcps; /* Max printer CPS */
- unsigned short digi_maxchar; /* Max chars in print queue */
- unsigned short digi_bufsize; /* Buffer size */
- unsigned char digi_onlen; /* Length of ON string */
- unsigned char digi_offlen; /* Length of OFF string */
- char digi_onstr[DIGI_PLEN]; /* Printer on string */
- char digi_offstr[DIGI_PLEN]; /* Printer off string */
- char digi_term[DIGI_TSIZ]; /* terminal string */
-};
-
-/************************************************************************
- * KME definitions and structures.
- ************************************************************************/
-#define RW_IDLE 0 /* Operation complete */
-#define RW_READ 1 /* Read Concentrator Memory */
-#define RW_WRITE 2 /* Write Concentrator Memory */
-
-struct rw_t {
- unsigned char rw_req; /* Request type */
- unsigned char rw_board; /* Host Adapter board number */
- unsigned char rw_conc; /* Concentrator number */
- unsigned char rw_reserved; /* Reserved for expansion */
- unsigned long rw_addr; /* Address in concentrator */
- unsigned short rw_size; /* Read/write request length */
- unsigned char rw_data[128]; /* Data to read/write */
-};
-
-/************************************************************************
- * Structure to get driver status information
- ************************************************************************/
-struct digi_dinfo {
- unsigned long dinfo_nboards; /* # boards configured */
- char dinfo_reserved[12]; /* for future expansion */
- char dinfo_version[16]; /* driver version */
-};
-
-#define DIGI_GETDD (('d'<<8) | 248) /* get driver info */
-
-/************************************************************************
- * Structure used with ioctl commands for per-board information
- *
- * physsize and memsize differ when board has "windowed" memory
- ************************************************************************/
-struct digi_info {
- unsigned long info_bdnum; /* Board number (0 based) */
- unsigned long info_ioport; /* io port address */
- unsigned long info_physaddr; /* memory address */
- unsigned long info_physsize; /* Size of host mem window */
- unsigned long info_memsize; /* Amount of dual-port mem */
- /* on board */
- unsigned short info_bdtype; /* Board type */
- unsigned short info_nports; /* number of ports */
- char info_bdstate; /* board state */
- char info_reserved[7]; /* for future expansion */
-};
-
-#define DIGI_GETBD (('d'<<8) | 249) /* get board info */
-
-struct digi_stat {
- unsigned int info_chan; /* Channel number (0 based) */
- unsigned int info_brd; /* Board number (0 based) */
- unsigned long info_cflag; /* cflag for channel */
- unsigned long info_iflag; /* iflag for channel */
- unsigned long info_oflag; /* oflag for channel */
- unsigned long info_mstat; /* mstat for channel */
- unsigned long info_tx_data; /* tx_data for channel */
- unsigned long info_rx_data; /* rx_data for channel */
- unsigned long info_hflow; /* hflow for channel */
- unsigned long info_reserved[8]; /* for future expansion */
-};
-
-#define DIGI_GETSTAT (('d'<<8) | 244) /* get board info */
-/************************************************************************
- *
- * Structure used with ioctl commands for per-channel information
- *
- ************************************************************************/
-struct digi_ch {
- unsigned long info_bdnum; /* Board number (0 based) */
- unsigned long info_channel; /* Channel index number */
- unsigned long info_ch_cflag; /* Channel cflag */
- unsigned long info_ch_iflag; /* Channel iflag */
- unsigned long info_ch_oflag; /* Channel oflag */
- unsigned long info_chsize; /* Channel structure size */
- unsigned long info_sleep_stat; /* sleep status */
- dev_t info_dev; /* device number */
- unsigned char info_initstate; /* Channel init state */
- unsigned char info_running; /* Channel running state */
- long reserved[8]; /* reserved for future use */
-};
-
-/*
-* This structure is used with the DIGI_FEPCMD ioctl to
-* tell the driver which port to send the command for.
-*/
-struct digi_cmd {
- int cmd;
- int word;
- int ncmds;
- int chan; /* channel index (zero based) */
- int bdid; /* board index (zero based) */
-};
-
-/*
-* info_sleep_stat defines
-*/
-#define INFO_RUNWAIT 0x0001
-#define INFO_WOPEN 0x0002
-#define INFO_TTIOW 0x0004
-#define INFO_CH_RWAIT 0x0008
-#define INFO_CH_WEMPTY 0x0010
-#define INFO_CH_WLOW 0x0020
-#define INFO_XXBUF_BUSY 0x0040
-
-#define DIGI_GETCH (('d'<<8) | 245) /* get board info */
-
-/* Board type definitions */
-
-#define SUBTYPE 0007
-#define T_PCXI 0000
-#define T_PCXM 0001
-#define T_PCXE 0002
-#define T_PCXR 0003
-#define T_SP 0004
-#define T_SP_PLUS 0005
-# define T_HERC 0000
-# define T_HOU 0001
-# define T_LON 0002
-# define T_CHA 0003
-#define FAMILY 0070
-#define T_COMXI 0000
-#define T_PCXX 0010
-#define T_CX 0020
-#define T_EPC 0030
-#define T_PCLITE 0040
-#define T_SPXX 0050
-#define T_AVXX 0060
-#define T_DXB 0070
-#define T_A2K_4_8 0070
-#define BUSTYPE 0700
-#define T_ISABUS 0000
-#define T_MCBUS 0100
-#define T_EISABUS 0200
-#define T_PCIBUS 0400
-
-/* Board State Definitions */
-
-#define BD_RUNNING 0x0
-#define BD_REASON 0x7f
-#define BD_NOTFOUND 0x1
-#define BD_NOIOPORT 0x2
-#define BD_NOMEM 0x3
-#define BD_NOBIOS 0x4
-#define BD_NOFEP 0x5
-#define BD_FAILED 0x6
-#define BD_ALLOCATED 0x7
-#define BD_TRIBOOT 0x8
-#define BD_BADKME 0x80
-
-#define DIGI_LOOPBACK (('d'<<8) | 252) /* Enable/disable UART */
- /* internal loopback */
-#define DIGI_SPOLL (('d'<<8) | 254) /* change poller rate */
-
-#define DIGI_SETCUSTOMBAUD _IOW('e', 106, int) /* Set integer baud rate */
-#define DIGI_GETCUSTOMBAUD _IOR('e', 107, int) /* Get integer baud rate */
-#define DIGI_RESET_PORT (('e'<<8) | 93) /* Reset port */
-
-/************************************************************************
- * Channel information structure.
- ************************************************************************/
-struct channel_t {
- int magic; /* Channel Magic Number */
- struct bs_t __iomem *ch_bs; /* Base structure pointer */
- struct cm_t __iomem *ch_cm; /* Command queue pointer */
- struct board_t *ch_bd; /* Board structure pointer */
- u8 __iomem *ch_vaddr; /* FEP memory origin */
- u8 __iomem *ch_taddr; /* Write buffer origin */
- u8 __iomem *ch_raddr; /* Read buffer origin */
- struct digi_t ch_digi; /* Transparent Print structure */
- struct un_t ch_tun; /* Terminal unit info */
- struct un_t ch_pun; /* Printer unit info */
-
- spinlock_t ch_lock; /* provide for serialization */
- wait_queue_head_t ch_flags_wait;
-
- u32 pscan_state;
- u8 pscan_savechar;
-
- u32 ch_portnum; /* Port number, 0 offset. */
- u32 ch_open_count; /* open count */
- u32 ch_flags; /* Channel flags */
-
- u32 ch_cpstime; /* Time for CPS calculations */
-
- tcflag_t ch_c_iflag; /* channel iflags */
- tcflag_t ch_c_cflag; /* channel cflags */
- tcflag_t ch_c_oflag; /* channel oflags */
- tcflag_t ch_c_lflag; /* channel lflags */
-
- u16 ch_fepiflag; /* FEP tty iflags */
- u16 ch_fepcflag; /* FEP tty cflags */
- u16 ch_fepoflag; /* FEP tty oflags */
- u16 ch_wopen; /* Waiting for open process cnt */
- u16 ch_tstart; /* Transmit buffer start */
- u16 ch_tsize; /* Transmit buffer size */
- u16 ch_rstart; /* Receive buffer start */
- u16 ch_rsize; /* Receive buffer size */
- u16 ch_rdelay; /* Receive delay time */
-
- u16 ch_tlw; /* Our currently set low water mark */
-
- u16 ch_cook; /* Output character mask */
-
- u8 ch_card; /* Card channel is on */
- u8 ch_stopc; /* Stop character */
- u8 ch_startc; /* Start character */
-
- u8 ch_mostat; /* FEP output modem status */
- u8 ch_mistat; /* FEP input modem status */
- u8 ch_mforce; /* Modem values to be forced */
- u8 ch_mval; /* Force values */
- u8 ch_fepstopc; /* FEP stop character */
- u8 ch_fepstartc; /* FEP start character */
-
- u8 ch_astopc; /* Auxiliary Stop character */
- u8 ch_astartc; /* Auxiliary Start character */
- u8 ch_fepastopc; /* Auxiliary FEP stop char */
- u8 ch_fepastartc; /* Auxiliary FEP start char */
-
- u8 ch_hflow; /* FEP hardware handshake */
- u8 ch_dsr; /* stores real dsr value */
- u8 ch_cd; /* stores real cd value */
- u8 ch_tx_win; /* channel tx buffer window */
- u8 ch_rx_win; /* channel rx buffer window */
- uint ch_custom_speed; /* Custom baud, if set */
- uint ch_baud_info; /* Current baud info for /proc output */
- ulong ch_rxcount; /* total of data received so far */
- ulong ch_txcount; /* total of data transmitted so far */
- ulong ch_err_parity; /* Count of parity errors on channel */
- ulong ch_err_frame; /* Count of framing errors on channel */
- ulong ch_err_break; /* Count of breaks on channel */
- ulong ch_err_overrun; /* Count of overruns on channel */
-};
-
-/************************************************************************
- * Command structure definition.
- ************************************************************************/
-struct cm_t {
- unsigned short cm_head; /* Command buffer head offset */
- unsigned short cm_tail; /* Command buffer tail offset */
- unsigned short cm_start; /* start offset of buffer */
- unsigned short cm_max; /* last offset of buffer */
-};
-
-/************************************************************************
- * Event structure definition.
- ************************************************************************/
-struct ev_t {
- unsigned short ev_head; /* Command buffer head offset */
- unsigned short ev_tail; /* Command buffer tail offset */
- unsigned short ev_start; /* start offset of buffer */
- unsigned short ev_max; /* last offset of buffer */
-};
-
-/************************************************************************
- * Download buffer structure.
- ************************************************************************/
-struct downld_t {
- u8 dl_type; /* Header */
- u8 dl_seq; /* Download sequence */
- ushort dl_srev; /* Software revision number */
- ushort dl_lrev; /* Low revision number */
- ushort dl_hrev; /* High revision number */
- ushort dl_seg; /* Start segment address */
- ushort dl_size; /* Number of bytes to download */
- u8 dl_data[1024]; /* Download data */
-};
-
-/************************************************************************
- * Per channel buffer structure
- ************************************************************************
- * Base Structure Entries Usage Meanings to Host *
- * *
- * W = read write R = read only *
- * C = changed by commands only *
- * U = unknown (may be changed w/o notice) *
- ************************************************************************/
-struct bs_t {
- unsigned short tp_jmp; /* Transmit poll jump */
- unsigned short tc_jmp; /* Cooked procedure jump */
- unsigned short ri_jmp; /* Not currently used */
- unsigned short rp_jmp; /* Receive poll jump */
-
- unsigned short tx_seg; /* W Tx segment */
- unsigned short tx_head; /* W Tx buffer head offset */
- unsigned short tx_tail; /* R Tx buffer tail offset */
- unsigned short tx_max; /* W Tx buffer size - 1 */
-
- unsigned short rx_seg; /* W Rx segment */
- unsigned short rx_head; /* W Rx buffer head offset */
- unsigned short rx_tail; /* R Rx buffer tail offset */
- unsigned short rx_max; /* W Rx buffer size - 1 */
-
- unsigned short tx_lw; /* W Tx buffer low water mark */
- unsigned short rx_lw; /* W Rx buffer low water mark */
- unsigned short rx_hw; /* W Rx buffer high water mark*/
- unsigned short incr; /* W Increment to next channel*/
-
- unsigned short fepdev; /* U SCC device base address */
- unsigned short edelay; /* W Exception delay */
- unsigned short blen; /* W Break length */
- unsigned short btime; /* U Break complete time */
-
- unsigned short iflag; /* C UNIX input flags */
- unsigned short oflag; /* C UNIX output flags */
- unsigned short cflag; /* C UNIX control flags */
- unsigned short wfill[13]; /* U Reserved for expansion */
-
- unsigned char num; /* U Channel number */
- unsigned char ract; /* U Receiver active counter */
- unsigned char bstat; /* U Break status bits */
- unsigned char tbusy; /* W Transmit busy */
- unsigned char iempty; /* W Transmit empty event */
- /* enable */
- unsigned char ilow; /* W Transmit low-water event */
- /* enable */
- unsigned char idata; /* W Receive data interrupt */
- /* enable */
- unsigned char eflag; /* U Host event flags */
-
- unsigned char tflag; /* U Transmit flags */
- unsigned char rflag; /* U Receive flags */
- unsigned char xmask; /* U Transmit ready flags */
- unsigned char xval; /* U Transmit ready value */
- unsigned char m_stat; /* RC Modem status bits */
- unsigned char m_change; /* U Modem bits which changed */
- unsigned char m_int; /* W Modem interrupt enable */
- /* bits */
- unsigned char m_last; /* U Last modem status */
-
- unsigned char mtran; /* C Unreported modem trans */
- unsigned char orun; /* C Buffer overrun occurred */
- unsigned char astartc; /* W Auxiliary Xon char */
- unsigned char astopc; /* W Auxiliary Xoff char */
- unsigned char startc; /* W Xon character */
- unsigned char stopc; /* W Xoff character */
- unsigned char vnextc; /* W Vnext character */
- unsigned char hflow; /* C Software flow control */
-
- unsigned char fillc; /* U Delay Fill character */
- unsigned char ochar; /* U Saved output character */
- unsigned char omask; /* U Output character mask */
-
- unsigned char bfill[13]; /* U Reserved for expansion */
-
- unsigned char scc[16]; /* U SCC registers */
-};
-
-struct cnode {
- struct cnode *next;
- int type;
- int numbrd;
-
- union {
- struct {
- char type; /* Board Type */
- long port; /* I/O Address */
- char *portstr; /* I/O Address in string */
- long addr; /* Memory Address */
- char *addrstr; /* Memory Address in string */
- long pcibus; /* PCI BUS */
- char *pcibusstr; /* PCI BUS in string */
- long pcislot; /* PCI SLOT */
- char *pcislotstr; /* PCI SLOT in string */
- long nport; /* Number of Ports */
- char *id; /* tty id */
- long start; /* start of tty counting */
- char *method; /* Install method */
- char v_port;
- char v_addr;
- char v_pcibus;
- char v_pcislot;
- char v_nport;
- char v_id;
- char v_start;
- char v_method;
- char line1;
- char line2;
- char conc1; /* total concs in line1 */
- char conc2; /* total concs in line2 */
- char module1; /* total modules for line1 */
- char module2; /* total modules for line2 */
- char *status; /* config status */
- char *dimstatus; /* Y/N */
- int status_index; /* field pointer */
- } board;
-
- struct {
- char *cable;
- char v_cable;
- long speed;
- char v_speed;
- } line;
-
- struct {
- char type;
- char *connect;
- long speed;
- long nport;
- char *id;
- char *idstr;
- long start;
- char v_connect;
- char v_speed;
- char v_nport;
- char v_id;
- char v_start;
- } conc;
-
- struct {
- char type;
- long nport;
- char *id;
- char *idstr;
- long start;
- char v_nport;
- char v_id;
- char v_start;
- } module;
-
- char *ttyname;
- char *cuname;
- char *printname;
- long majornumber;
- long altpin;
- long ttysize;
- long chsize;
- long bssize;
- long unsize;
- long f2size;
- long vpixsize;
- long useintr;
- } u;
-};
-#endif
diff --git a/drivers/staging/dgnc/Kconfig b/drivers/staging/dgnc/Kconfig
deleted file mode 100644
index 032c2a795238..000000000000
--- a/drivers/staging/dgnc/Kconfig
+++ /dev/null
@@ -1,6 +0,0 @@
-config DGNC
- tristate "Digi Neo and Classic PCI Products"
- default n
- depends on TTY && PCI
- ---help---
- Driver for the Digi International Neo and Classic PCI based product line.
diff --git a/drivers/staging/dgnc/Makefile b/drivers/staging/dgnc/Makefile
deleted file mode 100644
index 995c874f40eb..000000000000
--- a/drivers/staging/dgnc/Makefile
+++ /dev/null
@@ -1,6 +0,0 @@
-obj-$(CONFIG_DGNC) += dgnc.o
-
-dgnc-objs := dgnc_cls.o dgnc_driver.o\
- dgnc_mgmt.o dgnc_neo.o\
- dgnc_tty.o dgnc_sysfs.o\
- dgnc_utils.o
diff --git a/drivers/staging/dgnc/TODO b/drivers/staging/dgnc/TODO
deleted file mode 100644
index 0e0825bd70ae..000000000000
--- a/drivers/staging/dgnc/TODO
+++ /dev/null
@@ -1,10 +0,0 @@
-* checkpatch fixes
-* remove unnecessary comments
-* remove unnecessary error messages. Example kzalloc() has its
- own error message. Adding an extra one is useless.
-* use goto statements for error handling when appropriate
-* there is a lot of unnecessary code in the driver. It was
- originally a standalone driver. Remove uneeded code.
-
-Please send patches to Greg Kroah-Hartman <greg@kroah.com> and
-Cc: Lidza Louina <lidza.louina@gmail.com>
diff --git a/drivers/staging/dgnc/dgnc_cls.c b/drivers/staging/dgnc/dgnc_cls.c
deleted file mode 100644
index a629a78964ce..000000000000
--- a/drivers/staging/dgnc/dgnc_cls.c
+++ /dev/null
@@ -1,1314 +0,0 @@
-/*
- * Copyright 2003 Digi International (www.digi.com)
- * Scott H Kilau <Scott_Kilau at digi dot com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY, EXPRESS OR IMPLIED; without even the
- * implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
- * PURPOSE. See the GNU General Public License for more details.
- */
-
-#include <linux/kernel.h>
-#include <linux/sched.h> /* For jiffies, task states */
-#include <linux/interrupt.h> /* For tasklet and interrupt structs/defines */
-#include <linux/delay.h> /* For udelay */
-#include <linux/io.h> /* For read[bwl]/write[bwl] */
-#include <linux/serial.h> /* For struct async_serial */
-#include <linux/serial_reg.h> /* For the various UART offsets */
-#include <linux/pci.h>
-
-#include "dgnc_driver.h" /* Driver main header file */
-#include "dgnc_cls.h"
-#include "dgnc_tty.h"
-
-static inline void cls_parse_isr(struct dgnc_board *brd, uint port);
-static inline void cls_clear_break(struct channel_t *ch, int force);
-static inline void cls_set_cts_flow_control(struct channel_t *ch);
-static inline void cls_set_rts_flow_control(struct channel_t *ch);
-static inline void cls_set_ixon_flow_control(struct channel_t *ch);
-static inline void cls_set_ixoff_flow_control(struct channel_t *ch);
-static inline void cls_set_no_output_flow_control(struct channel_t *ch);
-static inline void cls_set_no_input_flow_control(struct channel_t *ch);
-static void cls_parse_modem(struct channel_t *ch, unsigned char signals);
-static void cls_tasklet(unsigned long data);
-static void cls_vpd(struct dgnc_board *brd);
-static void cls_uart_init(struct channel_t *ch);
-static void cls_uart_off(struct channel_t *ch);
-static int cls_drain(struct tty_struct *tty, uint seconds);
-static void cls_param(struct tty_struct *tty);
-static void cls_assert_modem_signals(struct channel_t *ch);
-static void cls_flush_uart_write(struct channel_t *ch);
-static void cls_flush_uart_read(struct channel_t *ch);
-static void cls_disable_receiver(struct channel_t *ch);
-static void cls_enable_receiver(struct channel_t *ch);
-static void cls_send_break(struct channel_t *ch, int msecs);
-static void cls_send_start_character(struct channel_t *ch);
-static void cls_send_stop_character(struct channel_t *ch);
-static void cls_copy_data_from_uart_to_queue(struct channel_t *ch);
-static void cls_copy_data_from_queue_to_uart(struct channel_t *ch);
-static uint cls_get_uart_bytes_left(struct channel_t *ch);
-static void cls_send_immediate_char(struct channel_t *ch, unsigned char);
-static irqreturn_t cls_intr(int irq, void *voidbrd);
-
-struct board_ops dgnc_cls_ops = {
- .tasklet = cls_tasklet,
- .intr = cls_intr,
- .uart_init = cls_uart_init,
- .uart_off = cls_uart_off,
- .drain = cls_drain,
- .param = cls_param,
- .vpd = cls_vpd,
- .assert_modem_signals = cls_assert_modem_signals,
- .flush_uart_write = cls_flush_uart_write,
- .flush_uart_read = cls_flush_uart_read,
- .disable_receiver = cls_disable_receiver,
- .enable_receiver = cls_enable_receiver,
- .send_break = cls_send_break,
- .send_start_character = cls_send_start_character,
- .send_stop_character = cls_send_stop_character,
- .copy_data_from_queue_to_uart = cls_copy_data_from_queue_to_uart,
- .get_uart_bytes_left = cls_get_uart_bytes_left,
- .send_immediate_char = cls_send_immediate_char
-};
-
-static inline void cls_set_cts_flow_control(struct channel_t *ch)
-{
- unsigned char lcrb = readb(&ch->ch_cls_uart->lcr);
- unsigned char ier = readb(&ch->ch_cls_uart->ier);
- unsigned char isr_fcr = 0;
-
- /*
- * The Enhanced Register Set may only be accessed when
- * the Line Control Register is set to 0xBFh.
- */
- writeb(UART_EXAR654_ENHANCED_REGISTER_SET, &ch->ch_cls_uart->lcr);
-
- isr_fcr = readb(&ch->ch_cls_uart->isr_fcr);
-
- /* Turn on CTS flow control, turn off IXON flow control */
- isr_fcr |= (UART_EXAR654_EFR_ECB | UART_EXAR654_EFR_CTSDSR);
- isr_fcr &= ~(UART_EXAR654_EFR_IXON);
-
- writeb(isr_fcr, &ch->ch_cls_uart->isr_fcr);
-
- /* Write old LCR value back out, which turns enhanced access off */
- writeb(lcrb, &ch->ch_cls_uart->lcr);
-
- /*
- * Enable interrupts for CTS flow, turn off interrupts for
- * received XOFF chars
- */
- ier |= (UART_EXAR654_IER_CTSDSR);
- ier &= ~(UART_EXAR654_IER_XOFF);
- writeb(ier, &ch->ch_cls_uart->ier);
-
- /* Set the usual FIFO values */
- writeb((UART_FCR_ENABLE_FIFO), &ch->ch_cls_uart->isr_fcr);
-
- writeb((UART_FCR_ENABLE_FIFO | UART_16654_FCR_RXTRIGGER_56 |
- UART_16654_FCR_TXTRIGGER_16 | UART_FCR_CLEAR_RCVR),
- &ch->ch_cls_uart->isr_fcr);
-
- ch->ch_t_tlevel = 16;
-
-}
-
-static inline void cls_set_ixon_flow_control(struct channel_t *ch)
-{
- unsigned char lcrb = readb(&ch->ch_cls_uart->lcr);
- unsigned char ier = readb(&ch->ch_cls_uart->ier);
- unsigned char isr_fcr = 0;
-
- /*
- * The Enhanced Register Set may only be accessed when
- * the Line Control Register is set to 0xBFh.
- */
- writeb(UART_EXAR654_ENHANCED_REGISTER_SET, &ch->ch_cls_uart->lcr);
-
- isr_fcr = readb(&ch->ch_cls_uart->isr_fcr);
-
- /* Turn on IXON flow control, turn off CTS flow control */
- isr_fcr |= (UART_EXAR654_EFR_ECB | UART_EXAR654_EFR_IXON);
- isr_fcr &= ~(UART_EXAR654_EFR_CTSDSR);
-
- writeb(isr_fcr, &ch->ch_cls_uart->isr_fcr);
-
- /* Now set our current start/stop chars while in enhanced mode */
- writeb(ch->ch_startc, &ch->ch_cls_uart->mcr);
- writeb(0, &ch->ch_cls_uart->lsr);
- writeb(ch->ch_stopc, &ch->ch_cls_uart->msr);
- writeb(0, &ch->ch_cls_uart->spr);
-
- /* Write old LCR value back out, which turns enhanced access off */
- writeb(lcrb, &ch->ch_cls_uart->lcr);
-
- /*
- * Disable interrupts for CTS flow, turn on interrupts for
- * received XOFF chars
- */
- ier &= ~(UART_EXAR654_IER_CTSDSR);
- ier |= (UART_EXAR654_IER_XOFF);
- writeb(ier, &ch->ch_cls_uart->ier);
-
- /* Set the usual FIFO values */
- writeb((UART_FCR_ENABLE_FIFO), &ch->ch_cls_uart->isr_fcr);
-
- writeb((UART_FCR_ENABLE_FIFO | UART_16654_FCR_RXTRIGGER_16 |
- UART_16654_FCR_TXTRIGGER_16 | UART_FCR_CLEAR_RCVR),
- &ch->ch_cls_uart->isr_fcr);
-
-}
-
-static inline void cls_set_no_output_flow_control(struct channel_t *ch)
-{
- unsigned char lcrb = readb(&ch->ch_cls_uart->lcr);
- unsigned char ier = readb(&ch->ch_cls_uart->ier);
- unsigned char isr_fcr = 0;
-
- /*
- * The Enhanced Register Set may only be accessed when
- * the Line Control Register is set to 0xBFh.
- */
- writeb(UART_EXAR654_ENHANCED_REGISTER_SET, &ch->ch_cls_uart->lcr);
-
- isr_fcr = readb(&ch->ch_cls_uart->isr_fcr);
-
- /* Turn off IXON flow control, turn off CTS flow control */
- isr_fcr |= (UART_EXAR654_EFR_ECB);
- isr_fcr &= ~(UART_EXAR654_EFR_CTSDSR | UART_EXAR654_EFR_IXON);
-
- writeb(isr_fcr, &ch->ch_cls_uart->isr_fcr);
-
- /* Write old LCR value back out, which turns enhanced access off */
- writeb(lcrb, &ch->ch_cls_uart->lcr);
-
- /*
- * Disable interrupts for CTS flow, turn off interrupts for
- * received XOFF chars
- */
- ier &= ~(UART_EXAR654_IER_CTSDSR);
- ier &= ~(UART_EXAR654_IER_XOFF);
- writeb(ier, &ch->ch_cls_uart->ier);
-
- /* Set the usual FIFO values */
- writeb((UART_FCR_ENABLE_FIFO), &ch->ch_cls_uart->isr_fcr);
-
- writeb((UART_FCR_ENABLE_FIFO | UART_16654_FCR_RXTRIGGER_16 |
- UART_16654_FCR_TXTRIGGER_16 | UART_FCR_CLEAR_RCVR),
- &ch->ch_cls_uart->isr_fcr);
-
- ch->ch_r_watermark = 0;
- ch->ch_t_tlevel = 16;
- ch->ch_r_tlevel = 16;
-
-}
-
-static inline void cls_set_rts_flow_control(struct channel_t *ch)
-{
- unsigned char lcrb = readb(&ch->ch_cls_uart->lcr);
- unsigned char ier = readb(&ch->ch_cls_uart->ier);
- unsigned char isr_fcr = 0;
-
- /*
- * The Enhanced Register Set may only be accessed when
- * the Line Control Register is set to 0xBFh.
- */
- writeb(UART_EXAR654_ENHANCED_REGISTER_SET, &ch->ch_cls_uart->lcr);
-
- isr_fcr = readb(&ch->ch_cls_uart->isr_fcr);
-
- /* Turn on RTS flow control, turn off IXOFF flow control */
- isr_fcr |= (UART_EXAR654_EFR_ECB | UART_EXAR654_EFR_RTSDTR);
- isr_fcr &= ~(UART_EXAR654_EFR_IXOFF);
-
- writeb(isr_fcr, &ch->ch_cls_uart->isr_fcr);
-
- /* Write old LCR value back out, which turns enhanced access off */
- writeb(lcrb, &ch->ch_cls_uart->lcr);
-
- /* Enable interrupts for RTS flow */
- ier |= (UART_EXAR654_IER_RTSDTR);
- writeb(ier, &ch->ch_cls_uart->ier);
-
- /* Set the usual FIFO values */
- writeb((UART_FCR_ENABLE_FIFO), &ch->ch_cls_uart->isr_fcr);
-
- writeb((UART_FCR_ENABLE_FIFO | UART_16654_FCR_RXTRIGGER_56 |
- UART_16654_FCR_TXTRIGGER_16 | UART_FCR_CLEAR_RCVR),
- &ch->ch_cls_uart->isr_fcr);
-
- ch->ch_r_watermark = 4;
- ch->ch_r_tlevel = 8;
-
-}
-
-static inline void cls_set_ixoff_flow_control(struct channel_t *ch)
-{
- unsigned char lcrb = readb(&ch->ch_cls_uart->lcr);
- unsigned char ier = readb(&ch->ch_cls_uart->ier);
- unsigned char isr_fcr = 0;
-
- /*
- * The Enhanced Register Set may only be accessed when
- * the Line Control Register is set to 0xBFh.
- */
- writeb(UART_EXAR654_ENHANCED_REGISTER_SET, &ch->ch_cls_uart->lcr);
-
- isr_fcr = readb(&ch->ch_cls_uart->isr_fcr);
-
- /* Turn on IXOFF flow control, turn off RTS flow control */
- isr_fcr |= (UART_EXAR654_EFR_ECB | UART_EXAR654_EFR_IXOFF);
- isr_fcr &= ~(UART_EXAR654_EFR_RTSDTR);
-
- writeb(isr_fcr, &ch->ch_cls_uart->isr_fcr);
-
- /* Now set our current start/stop chars while in enhanced mode */
- writeb(ch->ch_startc, &ch->ch_cls_uart->mcr);
- writeb(0, &ch->ch_cls_uart->lsr);
- writeb(ch->ch_stopc, &ch->ch_cls_uart->msr);
- writeb(0, &ch->ch_cls_uart->spr);
-
- /* Write old LCR value back out, which turns enhanced access off */
- writeb(lcrb, &ch->ch_cls_uart->lcr);
-
- /* Disable interrupts for RTS flow */
- ier &= ~(UART_EXAR654_IER_RTSDTR);
- writeb(ier, &ch->ch_cls_uart->ier);
-
- /* Set the usual FIFO values */
- writeb((UART_FCR_ENABLE_FIFO), &ch->ch_cls_uart->isr_fcr);
-
- writeb((UART_FCR_ENABLE_FIFO | UART_16654_FCR_RXTRIGGER_16 |
- UART_16654_FCR_TXTRIGGER_16 | UART_FCR_CLEAR_RCVR),
- &ch->ch_cls_uart->isr_fcr);
-
-}
-
-static inline void cls_set_no_input_flow_control(struct channel_t *ch)
-{
- unsigned char lcrb = readb(&ch->ch_cls_uart->lcr);
- unsigned char ier = readb(&ch->ch_cls_uart->ier);
- unsigned char isr_fcr = 0;
-
- /*
- * The Enhanced Register Set may only be accessed when
- * the Line Control Register is set to 0xBFh.
- */
- writeb(UART_EXAR654_ENHANCED_REGISTER_SET, &ch->ch_cls_uart->lcr);
-
- isr_fcr = readb(&ch->ch_cls_uart->isr_fcr);
-
- /* Turn off IXOFF flow control, turn off RTS flow control */
- isr_fcr |= (UART_EXAR654_EFR_ECB);
- isr_fcr &= ~(UART_EXAR654_EFR_RTSDTR | UART_EXAR654_EFR_IXOFF);
-
- writeb(isr_fcr, &ch->ch_cls_uart->isr_fcr);
-
- /* Write old LCR value back out, which turns enhanced access off */
- writeb(lcrb, &ch->ch_cls_uart->lcr);
-
- /* Disable interrupts for RTS flow */
- ier &= ~(UART_EXAR654_IER_RTSDTR);
- writeb(ier, &ch->ch_cls_uart->ier);
-
- /* Set the usual FIFO values */
- writeb((UART_FCR_ENABLE_FIFO), &ch->ch_cls_uart->isr_fcr);
-
- writeb((UART_FCR_ENABLE_FIFO | UART_16654_FCR_RXTRIGGER_16 |
- UART_16654_FCR_TXTRIGGER_16 | UART_FCR_CLEAR_RCVR),
- &ch->ch_cls_uart->isr_fcr);
-
- ch->ch_t_tlevel = 16;
- ch->ch_r_tlevel = 16;
-
-}
-
-/*
- * cls_clear_break.
- * Determines whether its time to shut off break condition.
- *
- * No locks are assumed to be held when calling this function.
- * channel lock is held and released in this function.
- */
-static inline void cls_clear_break(struct channel_t *ch, int force)
-{
- unsigned long flags;
-
- if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
- return;
-
- spin_lock_irqsave(&ch->ch_lock, flags);
-
- /* Bail if we aren't currently sending a break. */
- if (!ch->ch_stop_sending_break) {
- spin_unlock_irqrestore(&ch->ch_lock, flags);
- return;
- }
-
- /* Turn break off, and unset some variables */
- if (ch->ch_flags & CH_BREAK_SENDING) {
- if (time_after(jiffies, ch->ch_stop_sending_break) || force) {
- unsigned char temp = readb(&ch->ch_cls_uart->lcr);
-
- writeb((temp & ~UART_LCR_SBC), &ch->ch_cls_uart->lcr);
- ch->ch_flags &= ~(CH_BREAK_SENDING);
- ch->ch_stop_sending_break = 0;
- }
- }
- spin_unlock_irqrestore(&ch->ch_lock, flags);
-}
-
-/* Parse the ISR register for the specific port */
-static inline void cls_parse_isr(struct dgnc_board *brd, uint port)
-{
- struct channel_t *ch;
- unsigned char isr = 0;
- unsigned long flags;
-
- /*
- * No need to verify board pointer, it was already
- * verified in the interrupt routine.
- */
-
- if (port >= brd->nasync)
- return;
-
- ch = brd->channels[port];
- if (ch->magic != DGNC_CHANNEL_MAGIC)
- return;
-
- /* Here we try to figure out what caused the interrupt to happen */
- while (1) {
-
- isr = readb(&ch->ch_cls_uart->isr_fcr);
-
- /* Bail if no pending interrupt on port */
- if (isr & UART_IIR_NO_INT)
- break;
-
- /* Receive Interrupt pending */
- if (isr & (UART_IIR_RDI | UART_IIR_RDI_TIMEOUT)) {
- /* Read data from uart -> queue */
- brd->intr_rx++;
- ch->ch_intr_rx++;
- cls_copy_data_from_uart_to_queue(ch);
- dgnc_check_queue_flow_control(ch);
- }
-
- /* Transmit Hold register empty pending */
- if (isr & UART_IIR_THRI) {
- /* Transfer data (if any) from Write Queue -> UART. */
- spin_lock_irqsave(&ch->ch_lock, flags);
- ch->ch_flags |= (CH_TX_FIFO_EMPTY | CH_TX_FIFO_LWM);
- brd->intr_tx++;
- ch->ch_intr_tx++;
- spin_unlock_irqrestore(&ch->ch_lock, flags);
- cls_copy_data_from_queue_to_uart(ch);
- }
-
- /* CTS/RTS change of state */
- if (isr & UART_IIR_CTSRTS) {
- brd->intr_modem++;
- ch->ch_intr_modem++;
- /*
- * Don't need to do anything, the cls_parse_modem
- * below will grab the updated modem signals.
- */
- }
-
- /* Parse any modem signal changes */
- cls_parse_modem(ch, readb(&ch->ch_cls_uart->msr));
- }
-}
-
-/*
- * cls_param()
- * Send any/all changes to the line to the UART.
- */
-static void cls_param(struct tty_struct *tty)
-{
- unsigned char lcr = 0;
- unsigned char uart_lcr = 0;
- unsigned char ier = 0;
- unsigned char uart_ier = 0;
- uint baud = 9600;
- int quot = 0;
- struct dgnc_board *bd;
- struct channel_t *ch;
- struct un_t *un;
-
- if (!tty || tty->magic != TTY_MAGIC)
- return;
-
- un = (struct un_t *) tty->driver_data;
- if (!un || un->magic != DGNC_UNIT_MAGIC)
- return;
-
- ch = un->un_ch;
- if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
- return;
-
- bd = ch->ch_bd;
- if (!bd || bd->magic != DGNC_BOARD_MAGIC)
- return;
-
- /*
- * If baud rate is zero, flush queues, and set mval to drop DTR.
- */
- if ((ch->ch_c_cflag & (CBAUD)) == 0) {
- ch->ch_r_head = 0;
- ch->ch_r_tail = 0;
- ch->ch_e_head = 0;
- ch->ch_e_tail = 0;
- ch->ch_w_head = 0;
- ch->ch_w_tail = 0;
-
- cls_flush_uart_write(ch);
- cls_flush_uart_read(ch);
-
- /* The baudrate is B0 so all modem lines are to be dropped. */
- ch->ch_flags |= (CH_BAUD0);
- ch->ch_mostat &= ~(UART_MCR_RTS | UART_MCR_DTR);
- cls_assert_modem_signals(ch);
- ch->ch_old_baud = 0;
- return;
- } else if (ch->ch_custom_speed) {
-
- baud = ch->ch_custom_speed;
- /* Handle transition from B0 */
- if (ch->ch_flags & CH_BAUD0) {
- ch->ch_flags &= ~(CH_BAUD0);
-
- /*
- * Bring back up RTS and DTR...
- * Also handle RTS or DTR toggle if set.
- */
- if (!(ch->ch_digi.digi_flags & DIGI_RTS_TOGGLE))
- ch->ch_mostat |= (UART_MCR_RTS);
- if (!(ch->ch_digi.digi_flags & DIGI_DTR_TOGGLE))
- ch->ch_mostat |= (UART_MCR_DTR);
- }
-
- } else {
- int iindex = 0;
- int jindex = 0;
-
- ulong bauds[4][16] = {
- { /* slowbaud */
- 0, 50, 75, 110,
- 134, 150, 200, 300,
- 600, 1200, 1800, 2400,
- 4800, 9600, 19200, 38400 },
- { /* slowbaud & CBAUDEX */
- 0, 57600, 115200, 230400,
- 460800, 150, 200, 921600,
- 600, 1200, 1800, 2400,
- 4800, 9600, 19200, 38400 },
- { /* fastbaud */
- 0, 57600, 76800, 115200,
- 131657, 153600, 230400, 460800,
- 921600, 1200, 1800, 2400,
- 4800, 9600, 19200, 38400 },
- { /* fastbaud & CBAUDEX */
- 0, 57600, 115200, 230400,
- 460800, 150, 200, 921600,
- 600, 1200, 1800, 2400,
- 4800, 9600, 19200, 38400 }
- };
-
- /*
- * Only use the TXPrint baud rate if the terminal
- * unit is NOT open
- */
- if (!(ch->ch_tun.un_flags & UN_ISOPEN) &&
- (un->un_type == DGNC_PRINT))
- baud = C_BAUD(ch->ch_pun.un_tty) & 0xff;
- else
- baud = C_BAUD(ch->ch_tun.un_tty) & 0xff;
-
- if (ch->ch_c_cflag & CBAUDEX)
- iindex = 1;
-
- if (ch->ch_digi.digi_flags & DIGI_FAST)
- iindex += 2;
-
- jindex = baud;
-
- if ((iindex >= 0) && (iindex < 4) && (jindex >= 0) &&
- (jindex < 16)) {
- baud = bauds[iindex][jindex];
- } else {
- baud = 0;
- }
-
- if (baud == 0)
- baud = 9600;
-
- /* Handle transition from B0 */
- if (ch->ch_flags & CH_BAUD0) {
- ch->ch_flags &= ~(CH_BAUD0);
-
- /*
- * Bring back up RTS and DTR...
- * Also handle RTS or DTR toggle if set.
- */
- if (!(ch->ch_digi.digi_flags & DIGI_RTS_TOGGLE))
- ch->ch_mostat |= (UART_MCR_RTS);
- if (!(ch->ch_digi.digi_flags & DIGI_DTR_TOGGLE))
- ch->ch_mostat |= (UART_MCR_DTR);
- }
- }
-
- if (ch->ch_c_cflag & PARENB)
- lcr |= UART_LCR_PARITY;
-
- if (!(ch->ch_c_cflag & PARODD))
- lcr |= UART_LCR_EPAR;
-
- /*
- * Not all platforms support mark/space parity,
- * so this will hide behind an ifdef.
- */
-#ifdef CMSPAR
- if (ch->ch_c_cflag & CMSPAR)
- lcr |= UART_LCR_SPAR;
-#endif
-
- if (ch->ch_c_cflag & CSTOPB)
- lcr |= UART_LCR_STOP;
-
- switch (ch->ch_c_cflag & CSIZE) {
- case CS5:
- lcr |= UART_LCR_WLEN5;
- break;
- case CS6:
- lcr |= UART_LCR_WLEN6;
- break;
- case CS7:
- lcr |= UART_LCR_WLEN7;
- break;
- case CS8:
- default:
- lcr |= UART_LCR_WLEN8;
- break;
- }
-
- uart_ier = readb(&ch->ch_cls_uart->ier);
- ier = uart_ier;
- uart_lcr = readb(&ch->ch_cls_uart->lcr);
-
- if (baud == 0)
- baud = 9600;
-
- quot = ch->ch_bd->bd_dividend / baud;
-
- if (quot != 0 && ch->ch_old_baud != baud) {
- ch->ch_old_baud = baud;
- writeb(UART_LCR_DLAB, &ch->ch_cls_uart->lcr);
- writeb((quot & 0xff), &ch->ch_cls_uart->txrx);
- writeb((quot >> 8), &ch->ch_cls_uart->ier);
- writeb(lcr, &ch->ch_cls_uart->lcr);
- }
-
- if (uart_lcr != lcr)
- writeb(lcr, &ch->ch_cls_uart->lcr);
-
- if (ch->ch_c_cflag & CREAD)
- ier |= (UART_IER_RDI | UART_IER_RLSI);
- else
- ier &= ~(UART_IER_RDI | UART_IER_RLSI);
-
- /*
- * Have the UART interrupt on modem signal changes ONLY when
- * we are in hardware flow control mode, or CLOCAL/FORCEDCD is not set.
- */
- if ((ch->ch_digi.digi_flags & CTSPACE) ||
- (ch->ch_digi.digi_flags & RTSPACE) ||
- (ch->ch_c_cflag & CRTSCTS) ||
- !(ch->ch_digi.digi_flags & DIGI_FORCEDCD) ||
- !(ch->ch_c_cflag & CLOCAL))
- ier |= UART_IER_MSI;
- else
- ier &= ~UART_IER_MSI;
-
- ier |= UART_IER_THRI;
-
- if (ier != uart_ier)
- writeb(ier, &ch->ch_cls_uart->ier);
-
- if (ch->ch_digi.digi_flags & CTSPACE || ch->ch_c_cflag & CRTSCTS) {
- cls_set_cts_flow_control(ch);
- } else if (ch->ch_c_iflag & IXON) {
- /*
- * If start/stop is set to disable, then we should
- * disable flow control
- */
- if ((ch->ch_startc == _POSIX_VDISABLE) ||
- (ch->ch_stopc == _POSIX_VDISABLE))
- cls_set_no_output_flow_control(ch);
- else
- cls_set_ixon_flow_control(ch);
- } else {
- cls_set_no_output_flow_control(ch);
- }
-
- if (ch->ch_digi.digi_flags & RTSPACE || ch->ch_c_cflag & CRTSCTS) {
- cls_set_rts_flow_control(ch);
- } else if (ch->ch_c_iflag & IXOFF) {
- /*
- * If start/stop is set to disable, then we should disable
- * flow control
- */
- if ((ch->ch_startc == _POSIX_VDISABLE) ||
- (ch->ch_stopc == _POSIX_VDISABLE))
- cls_set_no_input_flow_control(ch);
- else
- cls_set_ixoff_flow_control(ch);
- } else {
- cls_set_no_input_flow_control(ch);
- }
-
- cls_assert_modem_signals(ch);
-
- /* Get current status of the modem signals now */
- cls_parse_modem(ch, readb(&ch->ch_cls_uart->msr));
-}
-
-/*
- * Our board poller function.
- */
-static void cls_tasklet(unsigned long data)
-{
- struct dgnc_board *bd = (struct dgnc_board *) data;
- struct channel_t *ch;
- unsigned long flags;
- int i;
- int state = 0;
- int ports = 0;
-
- if (!bd || bd->magic != DGNC_BOARD_MAGIC)
- return;
-
- /* Cache a couple board values */
- spin_lock_irqsave(&bd->bd_lock, flags);
- state = bd->state;
- ports = bd->nasync;
- spin_unlock_irqrestore(&bd->bd_lock, flags);
-
- /*
- * Do NOT allow the interrupt routine to read the intr registers
- * Until we release this lock.
- */
- spin_lock_irqsave(&bd->bd_intr_lock, flags);
-
- /*
- * If board is ready, parse deeper to see if there is anything to do.
- */
- if ((state == BOARD_READY) && (ports > 0)) {
-
- /* Loop on each port */
- for (i = 0; i < ports; i++) {
- ch = bd->channels[i];
-
- /*
- * NOTE: Remember you CANNOT hold any channel
- * locks when calling input.
- * During input processing, its possible we
- * will call ld, which might do callbacks back
- * into us.
- */
- dgnc_input(ch);
-
- /*
- * Channel lock is grabbed and then released
- * inside this routine.
- */
- cls_copy_data_from_queue_to_uart(ch);
- dgnc_wakeup_writes(ch);
-
- /*
- * Check carrier function.
- */
- dgnc_carrier(ch);
-
- /*
- * The timing check of turning off the break is done
- * inside clear_break()
- */
- if (ch->ch_stop_sending_break)
- cls_clear_break(ch, 0);
- }
- }
-
- spin_unlock_irqrestore(&bd->bd_intr_lock, flags);
-
-}
-
-/*
- * cls_intr()
- *
- * Classic specific interrupt handler.
- */
-static irqreturn_t cls_intr(int irq, void *voidbrd)
-{
- struct dgnc_board *brd = voidbrd;
- uint i = 0;
- unsigned char poll_reg;
- unsigned long flags;
-
- /*
- * Check to make sure it didn't receive interrupt with a null board
- * associated or a board pointer that wasn't ours.
- */
- if (!brd || brd->magic != DGNC_BOARD_MAGIC)
- return IRQ_NONE;
-
- spin_lock_irqsave(&brd->bd_intr_lock, flags);
-
- brd->intr_count++;
-
- /*
- * Check the board's global interrupt offset to see if we
- * we actually do have an interrupt pending for us.
- */
- poll_reg = readb(brd->re_map_membase + UART_CLASSIC_POLL_ADDR_OFFSET);
-
- /* If 0, no interrupts pending */
- if (!poll_reg) {
- spin_unlock_irqrestore(&brd->bd_intr_lock, flags);
- return IRQ_NONE;
- }
-
- /* Parse each port to find out what caused the interrupt */
- for (i = 0; i < brd->nasync; i++)
- cls_parse_isr(brd, i);
-
- /*
- * Schedule tasklet to more in-depth servicing at a better time.
- */
- tasklet_schedule(&brd->helper_tasklet);
-
- spin_unlock_irqrestore(&brd->bd_intr_lock, flags);
-
- return IRQ_HANDLED;
-}
-
-static void cls_disable_receiver(struct channel_t *ch)
-{
- unsigned char tmp = readb(&ch->ch_cls_uart->ier);
-
- tmp &= ~(UART_IER_RDI);
- writeb(tmp, &ch->ch_cls_uart->ier);
-}
-
-static void cls_enable_receiver(struct channel_t *ch)
-{
- unsigned char tmp = readb(&ch->ch_cls_uart->ier);
-
- tmp |= (UART_IER_RDI);
- writeb(tmp, &ch->ch_cls_uart->ier);
-}
-
-static void cls_copy_data_from_uart_to_queue(struct channel_t *ch)
-{
- int qleft = 0;
- unsigned char linestatus = 0;
- unsigned char error_mask = 0;
- ushort head;
- ushort tail;
- unsigned long flags;
-
- if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
- return;
-
- spin_lock_irqsave(&ch->ch_lock, flags);
-
- /* cache head and tail of queue */
- head = ch->ch_r_head;
- tail = ch->ch_r_tail;
-
- /* Store how much space we have left in the queue */
- qleft = (tail - head - 1);
- if (qleft < 0)
- qleft += RQUEUEMASK + 1;
-
- /*
- * Create a mask to determine whether we should
- * insert the character (if any) into our queue.
- */
- if (ch->ch_c_iflag & IGNBRK)
- error_mask |= UART_LSR_BI;
-
- while (1) {
- linestatus = readb(&ch->ch_cls_uart->lsr);
-
- if (!(linestatus & (UART_LSR_DR)))
- break;
-
- /*
- * Discard character if we are ignoring the error mask.
- */
- if (linestatus & error_mask) {
- unsigned char discard;
-
- linestatus = 0;
- discard = readb(&ch->ch_cls_uart->txrx);
- continue;
- }
-
- /*
- * If our queue is full, we have no choice but to drop some
- * data. The assumption is that HWFLOW or SWFLOW should have
- * stopped things way way before we got to this point.
- *
- * I decided that I wanted to ditch the oldest data first,
- * I hope thats okay with everyone? Yes? Good.
- */
- while (qleft < 1) {
- tail = (tail + 1) & RQUEUEMASK;
- ch->ch_r_tail = tail;
- ch->ch_err_overrun++;
- qleft++;
- }
-
- ch->ch_equeue[head] = linestatus & (UART_LSR_BI | UART_LSR_PE
- | UART_LSR_FE);
- ch->ch_rqueue[head] = readb(&ch->ch_cls_uart->txrx);
-
- qleft--;
-
- if (ch->ch_equeue[head] & UART_LSR_PE)
- ch->ch_err_parity++;
- if (ch->ch_equeue[head] & UART_LSR_BI)
- ch->ch_err_break++;
- if (ch->ch_equeue[head] & UART_LSR_FE)
- ch->ch_err_frame++;
-
- /* Add to, and flip head if needed */
- head = (head + 1) & RQUEUEMASK;
- ch->ch_rxcount++;
- }
-
- /*
- * Write new final heads to channel structure.
- */
- ch->ch_r_head = head & RQUEUEMASK;
- ch->ch_e_head = head & EQUEUEMASK;
-
- spin_unlock_irqrestore(&ch->ch_lock, flags);
-}
-
-/*
- * This function basically goes to sleep for secs, or until
- * it gets signalled that the port has fully drained.
- */
-static int cls_drain(struct tty_struct *tty, uint seconds)
-{
- unsigned long flags;
- struct channel_t *ch;
- struct un_t *un;
-
- if (!tty || tty->magic != TTY_MAGIC)
- return -ENXIO;
-
- un = (struct un_t *) tty->driver_data;
- if (!un || un->magic != DGNC_UNIT_MAGIC)
- return -ENXIO;
-
- ch = un->un_ch;
- if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
- return -ENXIO;
-
- spin_lock_irqsave(&ch->ch_lock, flags);
- un->un_flags |= UN_EMPTY;
- spin_unlock_irqrestore(&ch->ch_lock, flags);
-
- /*
- * NOTE: Do something with time passed in.
- */
-
- /* If ret is non-zero, user ctrl-c'ed us */
-
- return wait_event_interruptible(un->un_flags_wait,
- ((un->un_flags & UN_EMPTY) == 0));
-}
-
-/* Channel lock MUST be held before calling this function! */
-static void cls_flush_uart_write(struct channel_t *ch)
-{
- if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
- return;
-
- writeb((UART_FCR_ENABLE_FIFO | UART_FCR_CLEAR_XMIT),
- &ch->ch_cls_uart->isr_fcr);
- udelay(10);
-
- ch->ch_flags |= (CH_TX_FIFO_EMPTY | CH_TX_FIFO_LWM);
-}
-
-/* Channel lock MUST be held before calling this function! */
-static void cls_flush_uart_read(struct channel_t *ch)
-{
- if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
- return;
-
- /*
- * For complete POSIX compatibility, we should be purging the
- * read FIFO in the UART here.
- *
- * However, clearing the read FIFO (UART_FCR_CLEAR_RCVR) also
- * incorrectly flushes write data as well as just basically trashing the
- * FIFO.
- *
- * Presumably, this is a bug in this UART.
- */
-
- udelay(10);
-}
-
-static void cls_copy_data_from_queue_to_uart(struct channel_t *ch)
-{
- ushort head;
- ushort tail;
- int n;
- int qlen;
- uint len_written = 0;
- unsigned long flags;
-
- if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
- return;
-
- spin_lock_irqsave(&ch->ch_lock, flags);
-
- /* No data to write to the UART */
- if (ch->ch_w_tail == ch->ch_w_head)
- goto exit_unlock;
-
- /* If port is "stopped", don't send any data to the UART */
- if ((ch->ch_flags & CH_FORCED_STOP) ||
- (ch->ch_flags & CH_BREAK_SENDING))
- goto exit_unlock;
-
- if (!(ch->ch_flags & (CH_TX_FIFO_EMPTY | CH_TX_FIFO_LWM)))
- goto exit_unlock;
-
- n = 32;
-
- /* cache head and tail of queue */
- head = ch->ch_w_head & WQUEUEMASK;
- tail = ch->ch_w_tail & WQUEUEMASK;
- qlen = (head - tail) & WQUEUEMASK;
-
- /* Find minimum of the FIFO space, versus queue length */
- n = min(n, qlen);
-
- while (n > 0) {
-
- /*
- * If RTS Toggle mode is on, turn on RTS now if not already set,
- * and make sure we get an event when the data transfer has
- * completed.
- */
- if (ch->ch_digi.digi_flags & DIGI_RTS_TOGGLE) {
- if (!(ch->ch_mostat & UART_MCR_RTS)) {
- ch->ch_mostat |= (UART_MCR_RTS);
- cls_assert_modem_signals(ch);
- }
- ch->ch_tun.un_flags |= (UN_EMPTY);
- }
-
- /*
- * If DTR Toggle mode is on, turn on DTR now if not already set,
- * and make sure we get an event when the data transfer has
- * completed.
- */
- if (ch->ch_digi.digi_flags & DIGI_DTR_TOGGLE) {
- if (!(ch->ch_mostat & UART_MCR_DTR)) {
- ch->ch_mostat |= (UART_MCR_DTR);
- cls_assert_modem_signals(ch);
- }
- ch->ch_tun.un_flags |= (UN_EMPTY);
- }
- writeb(ch->ch_wqueue[ch->ch_w_tail], &ch->ch_cls_uart->txrx);
- ch->ch_w_tail++;
- ch->ch_w_tail &= WQUEUEMASK;
- ch->ch_txcount++;
- len_written++;
- n--;
- }
-
- if (len_written > 0)
- ch->ch_flags &= ~(CH_TX_FIFO_EMPTY | CH_TX_FIFO_LWM);
-
-exit_unlock:
- spin_unlock_irqrestore(&ch->ch_lock, flags);
-}
-
-static void cls_parse_modem(struct channel_t *ch, unsigned char signals)
-{
- unsigned char msignals = signals;
- unsigned long flags;
-
- if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
- return;
-
- /*
- * Do altpin switching. Altpin switches DCD and DSR.
- * This prolly breaks DSRPACE, so we should be more clever here.
- */
- spin_lock_irqsave(&ch->ch_lock, flags);
- if (ch->ch_digi.digi_flags & DIGI_ALTPIN) {
- unsigned char mswap = signals;
-
- if (mswap & UART_MSR_DDCD) {
- msignals &= ~UART_MSR_DDCD;
- msignals |= UART_MSR_DDSR;
- }
- if (mswap & UART_MSR_DDSR) {
- msignals &= ~UART_MSR_DDSR;
- msignals |= UART_MSR_DDCD;
- }
- if (mswap & UART_MSR_DCD) {
- msignals &= ~UART_MSR_DCD;
- msignals |= UART_MSR_DSR;
- }
- if (mswap & UART_MSR_DSR) {
- msignals &= ~UART_MSR_DSR;
- msignals |= UART_MSR_DCD;
- }
- }
- spin_unlock_irqrestore(&ch->ch_lock, flags);
-
- /*
- * Scrub off lower bits. They signify delta's, which I don't
- * care about
- */
- signals &= 0xf0;
-
- spin_lock_irqsave(&ch->ch_lock, flags);
- if (msignals & UART_MSR_DCD)
- ch->ch_mistat |= UART_MSR_DCD;
- else
- ch->ch_mistat &= ~UART_MSR_DCD;
-
- if (msignals & UART_MSR_DSR)
- ch->ch_mistat |= UART_MSR_DSR;
- else
- ch->ch_mistat &= ~UART_MSR_DSR;
-
- if (msignals & UART_MSR_RI)
- ch->ch_mistat |= UART_MSR_RI;
- else
- ch->ch_mistat &= ~UART_MSR_RI;
-
- if (msignals & UART_MSR_CTS)
- ch->ch_mistat |= UART_MSR_CTS;
- else
- ch->ch_mistat &= ~UART_MSR_CTS;
- spin_unlock_irqrestore(&ch->ch_lock, flags);
-}
-
-/* Make the UART raise any of the output signals we want up */
-static void cls_assert_modem_signals(struct channel_t *ch)
-{
- unsigned char out;
-
- if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
- return;
-
- out = ch->ch_mostat;
-
- if (ch->ch_flags & CH_LOOPBACK)
- out |= UART_MCR_LOOP;
-
- writeb(out, &ch->ch_cls_uart->mcr);
-
- /* Give time for the UART to actually drop the signals */
- udelay(10);
-}
-
-static void cls_send_start_character(struct channel_t *ch)
-{
- if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
- return;
-
- if (ch->ch_startc != _POSIX_VDISABLE) {
- ch->ch_xon_sends++;
- writeb(ch->ch_startc, &ch->ch_cls_uart->txrx);
- }
-}
-
-static void cls_send_stop_character(struct channel_t *ch)
-{
- if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
- return;
-
- if (ch->ch_stopc != _POSIX_VDISABLE) {
- ch->ch_xoff_sends++;
- writeb(ch->ch_stopc, &ch->ch_cls_uart->txrx);
- }
-}
-
-/* Inits UART */
-static void cls_uart_init(struct channel_t *ch)
-{
- unsigned char lcrb = readb(&ch->ch_cls_uart->lcr);
- unsigned char isr_fcr = 0;
-
- writeb(0, &ch->ch_cls_uart->ier);
-
- /*
- * The Enhanced Register Set may only be accessed when
- * the Line Control Register is set to 0xBFh.
- */
- writeb(UART_EXAR654_ENHANCED_REGISTER_SET, &ch->ch_cls_uart->lcr);
-
- isr_fcr = readb(&ch->ch_cls_uart->isr_fcr);
-
- /* Turn on Enhanced/Extended controls */
- isr_fcr |= (UART_EXAR654_EFR_ECB);
-
- writeb(isr_fcr, &ch->ch_cls_uart->isr_fcr);
-
- /* Write old LCR value back out, which turns enhanced access off */
- writeb(lcrb, &ch->ch_cls_uart->lcr);
-
- /* Clear out UART and FIFO */
- readb(&ch->ch_cls_uart->txrx);
-
- writeb((UART_FCR_ENABLE_FIFO|UART_FCR_CLEAR_RCVR|UART_FCR_CLEAR_XMIT),
- &ch->ch_cls_uart->isr_fcr);
- udelay(10);
-
- ch->ch_flags |= (CH_FIFO_ENABLED | CH_TX_FIFO_EMPTY | CH_TX_FIFO_LWM);
-
- readb(&ch->ch_cls_uart->lsr);
- readb(&ch->ch_cls_uart->msr);
-}
-
-/*
- * Turns off UART.
- */
-static void cls_uart_off(struct channel_t *ch)
-{
- writeb(0, &ch->ch_cls_uart->ier);
-}
-
-/*
- * cls_get_uarts_bytes_left.
- * Returns 0 is nothing left in the FIFO, returns 1 otherwise.
- *
- * The channel lock MUST be held by the calling function.
- */
-static uint cls_get_uart_bytes_left(struct channel_t *ch)
-{
- unsigned char left = 0;
- unsigned char lsr = 0;
-
- if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
- return 0;
-
- lsr = readb(&ch->ch_cls_uart->lsr);
-
- /* Determine whether the Transmitter is empty or not */
- if (!(lsr & UART_LSR_TEMT)) {
- if (ch->ch_flags & CH_TX_FIFO_EMPTY)
- tasklet_schedule(&ch->ch_bd->helper_tasklet);
- left = 1;
- } else {
- ch->ch_flags |= (CH_TX_FIFO_EMPTY | CH_TX_FIFO_LWM);
- left = 0;
- }
-
- return left;
-}
-
-/*
- * cls_send_break.
- * Starts sending a break thru the UART.
- *
- * The channel lock MUST be held by the calling function.
- */
-static void cls_send_break(struct channel_t *ch, int msecs)
-{
- if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
- return;
-
- /*
- * If we receive a time of 0, this means turn off the break.
- */
- if (msecs == 0) {
- /* Turn break off, and unset some variables */
- if (ch->ch_flags & CH_BREAK_SENDING) {
- unsigned char temp = readb(&ch->ch_cls_uart->lcr);
-
- writeb((temp & ~UART_LCR_SBC), &ch->ch_cls_uart->lcr);
- ch->ch_flags &= ~(CH_BREAK_SENDING);
- ch->ch_stop_sending_break = 0;
- }
- return;
- }
-
- /*
- * Set the time we should stop sending the break.
- * If we are already sending a break, toss away the existing
- * time to stop, and use this new value instead.
- */
- ch->ch_stop_sending_break = jiffies + dgnc_jiffies_from_ms(msecs);
-
- /* Tell the UART to start sending the break */
- if (!(ch->ch_flags & CH_BREAK_SENDING)) {
- unsigned char temp = readb(&ch->ch_cls_uart->lcr);
-
- writeb((temp | UART_LCR_SBC), &ch->ch_cls_uart->lcr);
- ch->ch_flags |= (CH_BREAK_SENDING);
- }
-}
-
-/*
- * cls_send_immediate_char.
- * Sends a specific character as soon as possible to the UART,
- * jumping over any bytes that might be in the write queue.
- *
- * The channel lock MUST be held by the calling function.
- */
-static void cls_send_immediate_char(struct channel_t *ch, unsigned char c)
-{
- if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
- return;
-
- writeb(c, &ch->ch_cls_uart->txrx);
-}
-
-static void cls_vpd(struct dgnc_board *brd)
-{
- ulong vpdbase; /* Start of io base of the card */
- u8 __iomem *re_map_vpdbase;/* Remapped memory of the card */
- int i = 0;
-
- vpdbase = pci_resource_start(brd->pdev, 3);
-
- /* No VPD */
- if (!vpdbase)
- return;
-
- re_map_vpdbase = ioremap(vpdbase, 0x400);
-
- if (!re_map_vpdbase)
- return;
-
- /* Store the VPD into our buffer */
- for (i = 0; i < 0x40; i++) {
- brd->vpd[i] = readb(re_map_vpdbase + i);
- pr_info("%x ", brd->vpd[i]);
- }
- pr_info("\n");
-
- if (re_map_vpdbase)
- iounmap(re_map_vpdbase);
-}
diff --git a/drivers/staging/dgnc/dgnc_cls.h b/drivers/staging/dgnc/dgnc_cls.h
deleted file mode 100644
index 2597e36d38c4..000000000000
--- a/drivers/staging/dgnc/dgnc_cls.h
+++ /dev/null
@@ -1,82 +0,0 @@
-/*
- * Copyright 2003 Digi International (www.digi.com)
- * Scott H Kilau <Scott_Kilau at digi dot com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY, EXPRESS OR IMPLIED; without even the
- * implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
- * PURPOSE. See the GNU General Public License for more details.
- */
-
-#ifndef __DGNC_CLS_H
-#define __DGNC_CLS_H
-
-/************************************************************************
- * Per channel/port Classic UART structure *
- ************************************************************************
- * Base Structure Entries Usage Meanings to Host *
- * *
- * W = read write R = read only *
- * U = Unused. *
- ************************************************************************/
-
-/*
- * txrx : WR RHR/THR - Holding reg
- * ier : WR IER - Interrupt Enable Reg
- * isr_fcr : WR ISR/FCR - Interrupt Status Reg/Fifo Control Reg
- * lcr : WR LCR - Line Control Reg
- * mcr : WR MCR - Modem Control Reg
- * lsr : WR LSR - Line Status Reg
- * msr : WR MSG - Modem Status Reg
- * spr : WR SPR - Scratch pad Reg
- */
-struct cls_uart_struct {
- u8 txrx;
- u8 ier;
- u8 isr_fcr;
- u8 lcr;
- u8 mcr;
- u8 lsr;
- u8 msr;
- u8 spr;
-};
-
-/* Where to read the interrupt register (8bits) */
-#define UART_CLASSIC_POLL_ADDR_OFFSET 0x40
-
-#define UART_EXAR654_ENHANCED_REGISTER_SET 0xBF
-
-#define UART_16654_FCR_TXTRIGGER_16 0x10
-#define UART_16654_FCR_RXTRIGGER_16 0x40
-#define UART_16654_FCR_RXTRIGGER_56 0x80
-
-/* Received CTS/RTS change of state */
-#define UART_IIR_CTSRTS 0x20
-
-/* Receiver data TIMEOUT */
-#define UART_IIR_RDI_TIMEOUT 0x0C
-
-/*
- * These are the EXTENDED definitions for the Exar 654's Interrupt
- * Enable Register.
- */
-#define UART_EXAR654_EFR_ECB 0x10 /* Enhanced control bit */
-#define UART_EXAR654_EFR_IXON 0x2 /* Receiver compares Xon1/Xoff1 */
-#define UART_EXAR654_EFR_IXOFF 0x8 /* Transmit Xon1/Xoff1 */
-#define UART_EXAR654_EFR_RTSDTR 0x40 /* Auto RTS/DTR Flow Control Enable */
-#define UART_EXAR654_EFR_CTSDSR 0x80 /* Auto CTS/DSR Flow COntrol Enable */
-#define UART_EXAR654_IER_XOFF 0x20 /* Xoff Interrupt Enable */
-#define UART_EXAR654_IER_RTSDTR 0x40 /* Output Interrupt Enable */
-#define UART_EXAR654_IER_CTSDSR 0x80 /* Input Interrupt Enable */
-
-/*
- * Our Global Variables
- */
-extern struct board_ops dgnc_cls_ops;
-
-#endif
diff --git a/drivers/staging/dgnc/dgnc_driver.c b/drivers/staging/dgnc/dgnc_driver.c
deleted file mode 100644
index 7546aff65002..000000000000
--- a/drivers/staging/dgnc/dgnc_driver.c
+++ /dev/null
@@ -1,720 +0,0 @@
-/*
- * Copyright 2003 Digi International (www.digi.com)
- * Scott H Kilau <Scott_Kilau at digi dot com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY, EXPRESS OR IMPLIED; without even the
- * implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
- * PURPOSE. See the GNU General Public License for more details.
- */
-
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/pci.h>
-#include <linux/slab.h>
-#include <linux/sched.h>
-#include "dgnc_driver.h"
-#include "dgnc_pci.h"
-#include "dgnc_mgmt.h"
-#include "dgnc_tty.h"
-#include "dgnc_cls.h"
-#include "dgnc_neo.h"
-#include "dgnc_sysfs.h"
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Digi International, http://www.digi.com");
-MODULE_DESCRIPTION("Driver for the Digi International Neo and Classic PCI based product line");
-MODULE_SUPPORTED_DEVICE("dgnc");
-
-/**************************************************************************
- *
- * protos for this file
- *
- */
-static int dgnc_start(void);
-static int dgnc_finalize_board_init(struct dgnc_board *brd);
-static void dgnc_init_globals(void);
-static int dgnc_found_board(struct pci_dev *pdev, int id);
-static void dgnc_cleanup_board(struct dgnc_board *brd);
-static void dgnc_poll_handler(ulong dummy);
-static int dgnc_init_one(struct pci_dev *pdev,
- const struct pci_device_id *ent);
-static void dgnc_do_remap(struct dgnc_board *brd);
-
-/*
- * File operations permitted on Control/Management major.
- */
-static const struct file_operations dgnc_BoardFops = {
- .owner = THIS_MODULE,
- .unlocked_ioctl = dgnc_mgmt_ioctl,
- .open = dgnc_mgmt_open,
- .release = dgnc_mgmt_close
-};
-
-
-/*
- * Globals
- */
-uint dgnc_NumBoards;
-struct dgnc_board *dgnc_Board[MAXBOARDS];
-DEFINE_SPINLOCK(dgnc_global_lock);
-uint dgnc_Major;
-int dgnc_poll_tick = 20; /* Poll interval - 20 ms */
-
-/*
- * Static vars.
- */
-static struct class *dgnc_class;
-
-/*
- * Poller stuff
- */
-static DEFINE_SPINLOCK(dgnc_poll_lock); /* Poll scheduling lock */
-static ulong dgnc_poll_time; /* Time of next poll */
-static uint dgnc_poll_stop; /* Used to tell poller to stop */
-static struct timer_list dgnc_poll_timer;
-
-
-static const struct pci_device_id dgnc_pci_tbl[] = {
- {PCI_DEVICE(DIGI_VID, PCI_DEVICE_CLASSIC_4_DID), .driver_data = 0},
- {PCI_DEVICE(DIGI_VID, PCI_DEVICE_CLASSIC_4_422_DID), .driver_data = 1},
- {PCI_DEVICE(DIGI_VID, PCI_DEVICE_CLASSIC_8_DID), .driver_data = 2},
- {PCI_DEVICE(DIGI_VID, PCI_DEVICE_CLASSIC_8_422_DID), .driver_data = 3},
- {0,}
-};
-MODULE_DEVICE_TABLE(pci, dgnc_pci_tbl);
-
-struct board_id {
- unsigned char *name;
- uint maxports;
- unsigned int is_pci_express;
-};
-
-static struct board_id dgnc_Ids[] = {
- { PCI_DEVICE_CLASSIC_4_PCI_NAME, 4, 0 },
- { PCI_DEVICE_CLASSIC_4_422_PCI_NAME, 4, 0 },
- { PCI_DEVICE_CLASSIC_8_PCI_NAME, 8, 0 },
- { PCI_DEVICE_CLASSIC_8_422_PCI_NAME, 8, 0 },
- { PCI_DEVICE_NEO_4_PCI_NAME, 4, 0 },
- { PCI_DEVICE_NEO_8_PCI_NAME, 8, 0 },
- { PCI_DEVICE_NEO_2DB9_PCI_NAME, 2, 0 },
- { PCI_DEVICE_NEO_2DB9PRI_PCI_NAME, 2, 0 },
- { PCI_DEVICE_NEO_2RJ45_PCI_NAME, 2, 0 },
- { PCI_DEVICE_NEO_2RJ45PRI_PCI_NAME, 2, 0 },
- { PCI_DEVICE_NEO_1_422_PCI_NAME, 1, 0 },
- { PCI_DEVICE_NEO_1_422_485_PCI_NAME, 1, 0 },
- { PCI_DEVICE_NEO_2_422_485_PCI_NAME, 2, 0 },
- { PCI_DEVICE_NEO_EXPRESS_8_PCI_NAME, 8, 1 },
- { PCI_DEVICE_NEO_EXPRESS_4_PCI_NAME, 4, 1 },
- { PCI_DEVICE_NEO_EXPRESS_4RJ45_PCI_NAME, 4, 1 },
- { PCI_DEVICE_NEO_EXPRESS_8RJ45_PCI_NAME, 8, 1 },
- { NULL, 0, 0 }
-};
-
-static struct pci_driver dgnc_driver = {
- .name = "dgnc",
- .probe = dgnc_init_one,
- .id_table = dgnc_pci_tbl,
-};
-
-/************************************************************************
- *
- * Driver load/unload functions
- *
- ************************************************************************/
-
-/*
- * dgnc_cleanup_module()
- *
- * Module unload. This is where it all ends.
- */
-static void dgnc_cleanup_module(void)
-{
- int i;
- unsigned long flags;
-
- spin_lock_irqsave(&dgnc_poll_lock, flags);
- dgnc_poll_stop = 1;
- spin_unlock_irqrestore(&dgnc_poll_lock, flags);
-
- /* Turn off poller right away. */
- del_timer_sync(&dgnc_poll_timer);
-
- dgnc_remove_driver_sysfiles(&dgnc_driver);
-
- device_destroy(dgnc_class, MKDEV(dgnc_Major, 0));
- class_destroy(dgnc_class);
- unregister_chrdev(dgnc_Major, "dgnc");
-
- for (i = 0; i < dgnc_NumBoards; ++i) {
- dgnc_remove_ports_sysfiles(dgnc_Board[i]);
- dgnc_tty_uninit(dgnc_Board[i]);
- dgnc_cleanup_board(dgnc_Board[i]);
- }
-
- dgnc_tty_post_uninit();
-
- if (dgnc_NumBoards)
- pci_unregister_driver(&dgnc_driver);
-}
-
-/*
- * init_module()
- *
- * Module load. This is where it all starts.
- */
-static int __init dgnc_init_module(void)
-{
- int rc = 0;
-
- /*
- * Initialize global stuff
- */
- rc = dgnc_start();
-
- if (rc < 0)
- return rc;
-
- /*
- * Find and configure all the cards
- */
- rc = pci_register_driver(&dgnc_driver);
-
- /*
- * If something went wrong in the scan, bail out of driver.
- */
- if (rc < 0) {
- /* Only unregister if it was actually registered. */
- if (dgnc_NumBoards)
- pci_unregister_driver(&dgnc_driver);
- else
- pr_warn("WARNING: dgnc driver load failed. No Digi Neo or Classic boards found.\n");
-
- dgnc_cleanup_module();
- } else {
- dgnc_create_driver_sysfiles(&dgnc_driver);
- }
-
- return rc;
-}
-
-module_init(dgnc_init_module);
-module_exit(dgnc_cleanup_module);
-
-/*
- * Start of driver.
- */
-static int dgnc_start(void)
-{
- int rc = 0;
- unsigned long flags;
- struct device *dev;
-
- /* make sure that the globals are init'd before we do anything else */
- dgnc_init_globals();
-
- /*
- * Register our base character device into the kernel.
- * This allows the download daemon to connect to the downld device
- * before any of the boards are init'ed.
- *
- * Register management/dpa devices
- */
- rc = register_chrdev(0, "dgnc", &dgnc_BoardFops);
- if (rc < 0) {
- pr_err(DRVSTR ": Can't register dgnc driver device (%d)\n", rc);
- return rc;
- }
- dgnc_Major = rc;
-
- dgnc_class = class_create(THIS_MODULE, "dgnc_mgmt");
- if (IS_ERR(dgnc_class)) {
- rc = PTR_ERR(dgnc_class);
- pr_err(DRVSTR ": Can't create dgnc_mgmt class (%d)\n", rc);
- goto failed_class;
- }
-
- dev = device_create(dgnc_class, NULL,
- MKDEV(dgnc_Major, 0),
- NULL, "dgnc_mgmt");
- if (IS_ERR(dev)) {
- rc = PTR_ERR(dev);
- pr_err(DRVSTR ": Can't create device (%d)\n", rc);
- goto failed_device;
- }
-
- /*
- * Init any global tty stuff.
- */
- rc = dgnc_tty_preinit();
-
- if (rc < 0) {
- pr_err(DRVSTR ": tty preinit - not enough memory (%d)\n", rc);
- goto failed_tty;
- }
-
- /* Start the poller */
- spin_lock_irqsave(&dgnc_poll_lock, flags);
- setup_timer(&dgnc_poll_timer, dgnc_poll_handler, 0);
- dgnc_poll_time = jiffies + dgnc_jiffies_from_ms(dgnc_poll_tick);
- dgnc_poll_timer.expires = dgnc_poll_time;
- spin_unlock_irqrestore(&dgnc_poll_lock, flags);
-
- add_timer(&dgnc_poll_timer);
-
- return 0;
-
-failed_tty:
- device_destroy(dgnc_class, MKDEV(dgnc_Major, 0));
-failed_device:
- class_destroy(dgnc_class);
-failed_class:
- unregister_chrdev(dgnc_Major, "dgnc");
- return rc;
-}
-
-/* returns count (>= 0), or negative on error */
-static int dgnc_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
-{
- int rc;
-
- /* wake up and enable device */
- rc = pci_enable_device(pdev);
-
- if (rc < 0) {
- rc = -EIO;
- } else {
- rc = dgnc_found_board(pdev, ent->driver_data);
- if (rc == 0)
- dgnc_NumBoards++;
- }
- return rc;
-}
-
-/*
- * dgnc_cleanup_board()
- *
- * Free all the memory associated with a board
- */
-static void dgnc_cleanup_board(struct dgnc_board *brd)
-{
- int i = 0;
-
- if (!brd || brd->magic != DGNC_BOARD_MAGIC)
- return;
-
- switch (brd->device) {
- case PCI_DEVICE_CLASSIC_4_DID:
- case PCI_DEVICE_CLASSIC_8_DID:
- case PCI_DEVICE_CLASSIC_4_422_DID:
- case PCI_DEVICE_CLASSIC_8_422_DID:
-
- /* Tell card not to interrupt anymore. */
- outb(0, brd->iobase + 0x4c);
- break;
-
- default:
- break;
- }
-
- if (brd->irq)
- free_irq(brd->irq, brd);
-
- tasklet_kill(&brd->helper_tasklet);
-
- if (brd->re_map_membase) {
- iounmap(brd->re_map_membase);
- brd->re_map_membase = NULL;
- }
-
- if (brd->msgbuf_head) {
- unsigned long flags;
-
- spin_lock_irqsave(&dgnc_global_lock, flags);
- brd->msgbuf = NULL;
- dev_dbg(&brd->pdev->dev, "%s\n", brd->msgbuf_head);
- kfree(brd->msgbuf_head);
- brd->msgbuf_head = NULL;
- spin_unlock_irqrestore(&dgnc_global_lock, flags);
- }
-
- /* Free all allocated channels structs */
- for (i = 0; i < MAXPORTS ; i++) {
- if (brd->channels[i]) {
- kfree(brd->channels[i]->ch_rqueue);
- kfree(brd->channels[i]->ch_equeue);
- kfree(brd->channels[i]->ch_wqueue);
- kfree(brd->channels[i]);
- brd->channels[i] = NULL;
- }
- }
-
-
- dgnc_Board[brd->boardnum] = NULL;
-
- kfree(brd);
-}
-
-
-/*
- * dgnc_found_board()
- *
- * A board has been found, init it.
- */
-static int dgnc_found_board(struct pci_dev *pdev, int id)
-{
- struct dgnc_board *brd;
- unsigned int pci_irq;
- int i = 0;
- int rc = 0;
- unsigned long flags;
-
- /* get the board structure and prep it */
- dgnc_Board[dgnc_NumBoards] = kzalloc(sizeof(*brd), GFP_KERNEL);
- brd = dgnc_Board[dgnc_NumBoards];
-
- if (!brd)
- return -ENOMEM;
-
- /* make a temporary message buffer for the boot messages */
- brd->msgbuf_head = kcalloc(8192, sizeof(u8), GFP_KERNEL);
- brd->msgbuf = brd->msgbuf_head;
-
- if (!brd->msgbuf) {
- kfree(brd);
- return -ENOMEM;
- }
-
- /* store the info for the board we've found */
- brd->magic = DGNC_BOARD_MAGIC;
- brd->boardnum = dgnc_NumBoards;
- brd->vendor = dgnc_pci_tbl[id].vendor;
- brd->device = dgnc_pci_tbl[id].device;
- brd->pdev = pdev;
- brd->pci_bus = pdev->bus->number;
- brd->pci_slot = PCI_SLOT(pdev->devfn);
- brd->name = dgnc_Ids[id].name;
- brd->maxports = dgnc_Ids[id].maxports;
- if (dgnc_Ids[i].is_pci_express)
- brd->bd_flags |= BD_IS_PCI_EXPRESS;
- brd->dpastatus = BD_NOFEP;
- init_waitqueue_head(&brd->state_wait);
-
- spin_lock_init(&brd->bd_lock);
- spin_lock_init(&brd->bd_intr_lock);
-
- brd->state = BOARD_FOUND;
-
- for (i = 0; i < MAXPORTS; i++)
- brd->channels[i] = NULL;
-
- /* store which card & revision we have */
- pci_read_config_word(pdev, PCI_SUBSYSTEM_VENDOR_ID, &brd->subvendor);
- pci_read_config_word(pdev, PCI_SUBSYSTEM_ID, &brd->subdevice);
- pci_read_config_byte(pdev, PCI_REVISION_ID, &brd->rev);
-
- pci_irq = pdev->irq;
- brd->irq = pci_irq;
-
-
- switch (brd->device) {
-
- case PCI_DEVICE_CLASSIC_4_DID:
- case PCI_DEVICE_CLASSIC_8_DID:
- case PCI_DEVICE_CLASSIC_4_422_DID:
- case PCI_DEVICE_CLASSIC_8_422_DID:
-
- brd->dpatype = T_CLASSIC | T_PCIBUS;
-
- /*
- * For PCI ClassicBoards
- * PCI Local Address (i.e. "resource" number) space
- * 0 PLX Memory Mapped Config
- * 1 PLX I/O Mapped Config
- * 2 I/O Mapped UARTs and Status
- * 3 Memory Mapped VPD
- * 4 Memory Mapped UARTs and Status
- */
-
-
- /* get the PCI Base Address Registers */
- brd->membase = pci_resource_start(pdev, 4);
-
- if (!brd->membase) {
- dev_err(&brd->pdev->dev,
- "Card has no PCI IO resources, failing.\n");
- return -ENODEV;
- }
-
- brd->membase_end = pci_resource_end(pdev, 4);
-
- if (brd->membase & 1)
- brd->membase &= ~3;
- else
- brd->membase &= ~15;
-
- brd->iobase = pci_resource_start(pdev, 1);
- brd->iobase_end = pci_resource_end(pdev, 1);
- brd->iobase = ((unsigned int) (brd->iobase)) & 0xFFFE;
-
- /* Assign the board_ops struct */
- brd->bd_ops = &dgnc_cls_ops;
-
- brd->bd_uart_offset = 0x8;
- brd->bd_dividend = 921600;
-
- dgnc_do_remap(brd);
-
- /* Get and store the board VPD, if it exists */
- brd->bd_ops->vpd(brd);
-
- /*
- * Enable Local Interrupt 1 (0x1),
- * Local Interrupt 1 Polarity Active high (0x2),
- * Enable PCI interrupt (0x40)
- */
- outb(0x43, brd->iobase + 0x4c);
-
- break;
-
-
- case PCI_DEVICE_NEO_4_DID:
- case PCI_DEVICE_NEO_8_DID:
- case PCI_DEVICE_NEO_2DB9_DID:
- case PCI_DEVICE_NEO_2DB9PRI_DID:
- case PCI_DEVICE_NEO_2RJ45_DID:
- case PCI_DEVICE_NEO_2RJ45PRI_DID:
- case PCI_DEVICE_NEO_1_422_DID:
- case PCI_DEVICE_NEO_1_422_485_DID:
- case PCI_DEVICE_NEO_2_422_485_DID:
- case PCI_DEVICE_NEO_EXPRESS_8_DID:
- case PCI_DEVICE_NEO_EXPRESS_4_DID:
- case PCI_DEVICE_NEO_EXPRESS_4RJ45_DID:
- case PCI_DEVICE_NEO_EXPRESS_8RJ45_DID:
-
- /*
- * This chip is set up 100% when we get to it.
- * No need to enable global interrupts or anything.
- */
- if (brd->bd_flags & BD_IS_PCI_EXPRESS)
- brd->dpatype = T_NEO_EXPRESS | T_PCIBUS;
- else
- brd->dpatype = T_NEO | T_PCIBUS;
-
- /* get the PCI Base Address Registers */
- brd->membase = pci_resource_start(pdev, 0);
- brd->membase_end = pci_resource_end(pdev, 0);
-
- if (brd->membase & 1)
- brd->membase &= ~3;
- else
- brd->membase &= ~15;
-
- /* Assign the board_ops struct */
- brd->bd_ops = &dgnc_neo_ops;
-
- brd->bd_uart_offset = 0x200;
- brd->bd_dividend = 921600;
-
- dgnc_do_remap(brd);
-
- if (brd->re_map_membase) {
-
- /* Read and store the dvid after remapping */
- brd->dvid = readb(brd->re_map_membase + 0x8D);
-
- /* Get and store the board VPD, if it exists */
- brd->bd_ops->vpd(brd);
- }
- break;
-
- default:
- dev_err(&brd->pdev->dev,
- "Didn't find any compatible Neo/Classic PCI boards.\n");
- return -ENXIO;
-
- }
-
- /*
- * Do tty device initialization.
- */
-
- rc = dgnc_tty_register(brd);
- if (rc < 0) {
- pr_err(DRVSTR ": Can't register tty devices (%d)\n", rc);
- goto failed;
- }
-
- rc = dgnc_finalize_board_init(brd);
- if (rc < 0) {
- pr_err(DRVSTR ": Can't finalize board init (%d)\n", rc);
- goto failed;
- }
-
- rc = dgnc_tty_init(brd);
- if (rc < 0) {
- pr_err(DRVSTR ": Can't init tty devices (%d)\n", rc);
- goto failed;
- }
-
- brd->state = BOARD_READY;
- brd->dpastatus = BD_RUNNING;
-
- dgnc_create_ports_sysfiles(brd);
-
- /* init our poll helper tasklet */
- tasklet_init(&brd->helper_tasklet,
- brd->bd_ops->tasklet,
- (unsigned long) brd);
-
- spin_lock_irqsave(&dgnc_global_lock, flags);
- brd->msgbuf = NULL;
- dev_dbg(&brd->pdev->dev, "%s\n", brd->msgbuf_head);
- kfree(brd->msgbuf_head);
- brd->msgbuf_head = NULL;
- spin_unlock_irqrestore(&dgnc_global_lock, flags);
-
- wake_up_interruptible(&brd->state_wait);
-
- return 0;
-
-failed:
- dgnc_tty_uninit(brd);
- brd->state = BOARD_FAILED;
- brd->dpastatus = BD_NOFEP;
-
- return -ENXIO;
-
-}
-
-
-static int dgnc_finalize_board_init(struct dgnc_board *brd)
-{
- int rc = 0;
-
- if (!brd || brd->magic != DGNC_BOARD_MAGIC)
- return -ENODEV;
-
- if (brd->irq) {
- rc = request_irq(brd->irq, brd->bd_ops->intr,
- IRQF_SHARED, "DGNC", brd);
-
- if (rc) {
- dev_err(&brd->pdev->dev,
- "Failed to hook IRQ %d\n", brd->irq);
- brd->state = BOARD_FAILED;
- brd->dpastatus = BD_NOFEP;
- rc = -ENODEV;
- }
- }
- return rc;
-}
-
-/*
- * Remap PCI memory.
- */
-static void dgnc_do_remap(struct dgnc_board *brd)
-{
-
- if (!brd || brd->magic != DGNC_BOARD_MAGIC)
- return;
-
- brd->re_map_membase = ioremap(brd->membase, 0x1000);
-}
-
-
-/*****************************************************************************
-*
-* Function:
-*
-* dgnc_poll_handler
-*
-* Author:
-*
-* Scott H Kilau
-*
-* Parameters:
-*
-* dummy -- ignored
-*
-* Return Values:
-*
-* none
-*
-* Description:
-*
-* As each timer expires, it determines (a) whether the "transmit"
-* waiter needs to be woken up, and (b) whether the poller needs to
-* be rescheduled.
-*
-******************************************************************************/
-
-static void dgnc_poll_handler(ulong dummy)
-{
- struct dgnc_board *brd;
- unsigned long flags;
- int i;
- unsigned long new_time;
-
- /* Go thru each board, kicking off a tasklet for each if needed */
- for (i = 0; i < dgnc_NumBoards; i++) {
- brd = dgnc_Board[i];
-
- spin_lock_irqsave(&brd->bd_lock, flags);
-
- /* If board is in a failed state don't schedule a tasklet */
- if (brd->state == BOARD_FAILED) {
- spin_unlock_irqrestore(&brd->bd_lock, flags);
- continue;
- }
-
- /* Schedule a poll helper task */
- tasklet_schedule(&brd->helper_tasklet);
-
- spin_unlock_irqrestore(&brd->bd_lock, flags);
- }
-
- /*
- * Schedule ourself back at the nominal wakeup interval.
- */
- spin_lock_irqsave(&dgnc_poll_lock, flags);
- dgnc_poll_time += dgnc_jiffies_from_ms(dgnc_poll_tick);
-
- new_time = dgnc_poll_time - jiffies;
-
- if ((ulong) new_time >= 2 * dgnc_poll_tick)
- dgnc_poll_time = jiffies + dgnc_jiffies_from_ms(dgnc_poll_tick);
-
- setup_timer(&dgnc_poll_timer, dgnc_poll_handler, 0);
- dgnc_poll_timer.expires = dgnc_poll_time;
- spin_unlock_irqrestore(&dgnc_poll_lock, flags);
-
- if (!dgnc_poll_stop)
- add_timer(&dgnc_poll_timer);
-}
-
-/*
- * dgnc_init_globals()
- *
- * This is where we initialize the globals from the static insmod
- * configuration variables. These are declared near the head of
- * this file.
- */
-static void dgnc_init_globals(void)
-{
- int i = 0;
-
- dgnc_NumBoards = 0;
-
- for (i = 0; i < MAXBOARDS; i++)
- dgnc_Board[i] = NULL;
-
- init_timer(&dgnc_poll_timer);
-}
-
diff --git a/drivers/staging/dgnc/dgnc_driver.h b/drivers/staging/dgnc/dgnc_driver.h
deleted file mode 100644
index 06ece5151fe4..000000000000
--- a/drivers/staging/dgnc/dgnc_driver.h
+++ /dev/null
@@ -1,396 +0,0 @@
-/*
- * Copyright 2003 Digi International (www.digi.com)
- * Scott H Kilau <Scott_Kilau at digi dot com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY, EXPRESS OR IMPLIED; without even the
- * implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
- * PURPOSE. See the GNU General Public License for more details.
- *
- *************************************************************************
- *
- * Driver includes
- *
- *************************************************************************/
-
-#ifndef __DGNC_DRIVER_H
-#define __DGNC_DRIVER_H
-
-#include <linux/types.h>
-#include <linux/tty.h>
-#include <linux/interrupt.h>
-
-#include "digi.h" /* Digi specific ioctl header */
-#include "dgnc_sysfs.h" /* Support for SYSFS */
-
-/*************************************************************************
- *
- * Driver defines
- *
- *************************************************************************/
-
-/* Driver identification and error statments */
-#define PROCSTR "dgnc" /* /proc entries */
-#define DEVSTR "/dev/dg/dgnc" /* /dev entries */
-#define DRVSTR "dgnc" /* Driver name string */
-#define DG_PART "40002369_F" /* RPM part number */
-
-#define TRC_TO_CONSOLE 1
-
-/* Number of boards we support at once. */
-#define MAXBOARDS 20
-#define MAXPORTS 8
-#define MAXTTYNAMELEN 200
-
-/* Our 3 magic numbers for our board, channel and unit structs */
-#define DGNC_BOARD_MAGIC 0x5c6df104
-#define DGNC_CHANNEL_MAGIC 0x6c6df104
-#define DGNC_UNIT_MAGIC 0x7c6df104
-
-/* Serial port types */
-#define DGNC_SERIAL 0
-#define DGNC_PRINT 1
-
-#define SERIAL_TYPE_NORMAL 1
-
-#define PORT_NUM(dev) ((dev) & 0x7f)
-#define IS_PRINT(dev) (((dev) & 0xff) >= 0x80)
-
-/* MAX number of stop characters we will send when our read queue is getting full */
-#define MAX_STOPS_SENT 5
-
-/* 4 extra for alignment play space */
-#define WRITEBUFLEN ((4096) + 4)
-
-#define dgnc_jiffies_from_ms(a) (((a) * HZ) / 1000)
-
-/*
- * Define a local default termios struct. All ports will be created
- * with this termios initially. This is the same structure that is defined
- * as the default in tty_io.c with the same settings overridden as in serial.c
- *
- * In short, this should match the internal serial ports' defaults.
- */
-#define DEFAULT_IFLAGS (ICRNL | IXON)
-#define DEFAULT_OFLAGS (OPOST | ONLCR)
-#define DEFAULT_CFLAGS (B9600 | CS8 | CREAD | HUPCL | CLOCAL)
-#define DEFAULT_LFLAGS (ISIG | ICANON | ECHO | ECHOE | ECHOK | \
- ECHOCTL | ECHOKE | IEXTEN)
-
-#ifndef _POSIX_VDISABLE
-#define _POSIX_VDISABLE '\0'
-#endif
-
-
-/*
- * All the possible states the driver can be while being loaded.
- */
-enum {
- DRIVER_INITIALIZED = 0,
- DRIVER_READY
-};
-
-/*
- * All the possible states the board can be while booting up.
- */
-enum {
- BOARD_FAILED = 0,
- BOARD_FOUND,
- BOARD_READY
-};
-
-
-/*************************************************************************
- *
- * Structures and closely related defines.
- *
- *************************************************************************/
-
-struct dgnc_board;
-struct channel_t;
-
-/************************************************************************
- * Per board operations structure *
- ************************************************************************/
-struct board_ops {
- void (*tasklet)(unsigned long data);
- irqreturn_t (*intr)(int irq, void *voidbrd);
- void (*uart_init)(struct channel_t *ch);
- void (*uart_off)(struct channel_t *ch);
- int (*drain)(struct tty_struct *tty, uint seconds);
- void (*param)(struct tty_struct *tty);
- void (*vpd)(struct dgnc_board *brd);
- void (*assert_modem_signals)(struct channel_t *ch);
- void (*flush_uart_write)(struct channel_t *ch);
- void (*flush_uart_read)(struct channel_t *ch);
- void (*disable_receiver)(struct channel_t *ch);
- void (*enable_receiver)(struct channel_t *ch);
- void (*send_break)(struct channel_t *ch, int);
- void (*send_start_character)(struct channel_t *ch);
- void (*send_stop_character)(struct channel_t *ch);
- void (*copy_data_from_queue_to_uart)(struct channel_t *ch);
- uint (*get_uart_bytes_left)(struct channel_t *ch);
- void (*send_immediate_char)(struct channel_t *ch, unsigned char);
-};
-
-/************************************************************************
- * Device flag definitions for bd_flags.
- ************************************************************************/
-#define BD_IS_PCI_EXPRESS 0x0001 /* Is a PCI Express board */
-
-
-/*
- * Per-board information
- */
-struct dgnc_board {
- int magic; /* Board Magic number. */
- int boardnum; /* Board number: 0-32 */
-
- int type; /* Type of board */
- char *name; /* Product Name */
- struct pci_dev *pdev; /* Pointer to the pci_dev struct */
- unsigned long bd_flags; /* Board flags */
- u16 vendor; /* PCI vendor ID */
- u16 device; /* PCI device ID */
- u16 subvendor; /* PCI subsystem vendor ID */
- u16 subdevice; /* PCI subsystem device ID */
- unsigned char rev; /* PCI revision ID */
- uint pci_bus; /* PCI bus value */
- uint pci_slot; /* PCI slot value */
- uint maxports; /* MAX ports this board can handle */
- unsigned char dvid; /* Board specific device id */
- unsigned char vpd[128]; /* VPD of board, if found */
- unsigned char serial_num[20]; /* Serial number of board, if found in VPD */
-
- spinlock_t bd_lock; /* Used to protect board */
-
- spinlock_t bd_intr_lock; /* Used to protect the poller tasklet and
- * the interrupt routine from each other.
- */
-
- uint state; /* State of card. */
- wait_queue_head_t state_wait; /* Place to sleep on for state change */
-
- struct tasklet_struct helper_tasklet; /* Poll helper tasklet */
-
- uint nasync; /* Number of ports on card */
-
- uint irq; /* Interrupt request number */
- ulong intr_count; /* Count of interrupts */
- ulong intr_modem; /* Count of interrupts */
- ulong intr_tx; /* Count of interrupts */
- ulong intr_rx; /* Count of interrupts */
-
- ulong membase; /* Start of base memory of the card */
- ulong membase_end; /* End of base memory of the card */
-
- u8 __iomem *re_map_membase;/* Remapped memory of the card */
-
- ulong iobase; /* Start of io base of the card */
- ulong iobase_end; /* End of io base of the card */
-
- uint bd_uart_offset; /* Space between each UART */
-
- struct channel_t *channels[MAXPORTS]; /* array of pointers to our channels. */
-
- struct tty_driver SerialDriver;
- char SerialName[200];
- struct tty_driver PrintDriver;
- char PrintName[200];
-
- bool dgnc_Major_Serial_Registered;
- bool dgnc_Major_TransparentPrint_Registered;
-
- uint dgnc_Serial_Major;
- uint dgnc_TransparentPrint_Major;
-
- uint TtyRefCnt;
-
- u16 dpatype; /* The board "type", as defined by DPA */
- u16 dpastatus; /* The board "status", as defined by DPA */
-
- /*
- * Mgmt data.
- */
- char *msgbuf_head;
- char *msgbuf;
-
- uint bd_dividend; /* Board/UARTs specific dividend */
-
- struct board_ops *bd_ops;
-
- /* /proc/<board> entries */
- struct proc_dir_entry *proc_entry_pointer;
- struct dgnc_proc_entry *dgnc_board_table;
-
-};
-
-
-/************************************************************************
- * Unit flag definitions for un_flags.
- ************************************************************************/
-#define UN_ISOPEN 0x0001 /* Device is open */
-#define UN_CLOSING 0x0002 /* Line is being closed */
-#define UN_IMM 0x0004 /* Service immediately */
-#define UN_BUSY 0x0008 /* Some work this channel */
-#define UN_BREAKI 0x0010 /* Input break received */
-#define UN_PWAIT 0x0020 /* Printer waiting for terminal */
-#define UN_TIME 0x0040 /* Waiting on time */
-#define UN_EMPTY 0x0080 /* Waiting output queue empty */
-#define UN_LOW 0x0100 /* Waiting output low water mark*/
-#define UN_EXCL_OPEN 0x0200 /* Open for exclusive use */
-#define UN_WOPEN 0x0400 /* Device waiting for open */
-#define UN_WIOCTL 0x0800 /* Device waiting for open */
-#define UN_HANGUP 0x8000 /* Carrier lost */
-
-struct device;
-
-/************************************************************************
- * Structure for terminal or printer unit.
- ************************************************************************/
-struct un_t {
- int magic; /* Unit Magic Number. */
- struct channel_t *un_ch;
- ulong un_time;
- uint un_type;
- uint un_open_count; /* Counter of opens to port */
- struct tty_struct *un_tty;/* Pointer to unit tty structure */
- uint un_flags; /* Unit flags */
- wait_queue_head_t un_flags_wait; /* Place to sleep to wait on unit */
- uint un_dev; /* Minor device number */
- struct device *un_sysfs;
-};
-
-
-/************************************************************************
- * Device flag definitions for ch_flags.
- ************************************************************************/
-#define CH_PRON 0x0001 /* Printer on string */
-#define CH_STOP 0x0002 /* Output is stopped */
-#define CH_STOPI 0x0004 /* Input is stopped */
-#define CH_CD 0x0008 /* Carrier is present */
-#define CH_FCAR 0x0010 /* Carrier forced on */
-#define CH_HANGUP 0x0020 /* Hangup received */
-
-#define CH_RECEIVER_OFF 0x0040 /* Receiver is off */
-#define CH_OPENING 0x0080 /* Port in fragile open state */
-#define CH_CLOSING 0x0100 /* Port in fragile close state */
-#define CH_FIFO_ENABLED 0x0200 /* Port has FIFOs enabled */
-#define CH_TX_FIFO_EMPTY 0x0400 /* TX Fifo is completely empty */
-#define CH_TX_FIFO_LWM 0x0800 /* TX Fifo is below Low Water */
-#define CH_BREAK_SENDING 0x1000 /* Break is being sent */
-#define CH_LOOPBACK 0x2000 /* Channel is in lookback mode */
-#define CH_BAUD0 0x08000 /* Used for checking B0 transitions */
-#define CH_FORCED_STOP 0x20000 /* Output is forcibly stopped */
-#define CH_FORCED_STOPI 0x40000 /* Input is forcibly stopped */
-
-
-/* Our Read/Error/Write queue sizes */
-#define RQUEUEMASK 0x1FFF /* 8 K - 1 */
-#define EQUEUEMASK 0x1FFF /* 8 K - 1 */
-#define WQUEUEMASK 0x0FFF /* 4 K - 1 */
-#define RQUEUESIZE (RQUEUEMASK + 1)
-#define EQUEUESIZE RQUEUESIZE
-#define WQUEUESIZE (WQUEUEMASK + 1)
-
-
-/************************************************************************
- * Channel information structure.
- ************************************************************************/
-struct channel_t {
- int magic; /* Channel Magic Number */
- struct dgnc_board *ch_bd; /* Board structure pointer */
- struct digi_t ch_digi; /* Transparent Print structure */
- struct un_t ch_tun; /* Terminal unit info */
- struct un_t ch_pun; /* Printer unit info */
-
- spinlock_t ch_lock; /* provide for serialization */
- wait_queue_head_t ch_flags_wait;
-
- uint ch_portnum; /* Port number, 0 offset. */
- uint ch_open_count; /* open count */
- uint ch_flags; /* Channel flags */
-
- ulong ch_close_delay; /* How long we should drop RTS/DTR for */
-
- ulong ch_cpstime; /* Time for CPS calculations */
-
- tcflag_t ch_c_iflag; /* channel iflags */
- tcflag_t ch_c_cflag; /* channel cflags */
- tcflag_t ch_c_oflag; /* channel oflags */
- tcflag_t ch_c_lflag; /* channel lflags */
- unsigned char ch_stopc; /* Stop character */
- unsigned char ch_startc; /* Start character */
-
- uint ch_old_baud; /* Cache of the current baud */
- uint ch_custom_speed;/* Custom baud, if set */
-
- uint ch_wopen; /* Waiting for open process cnt */
-
- unsigned char ch_mostat; /* FEP output modem status */
- unsigned char ch_mistat; /* FEP input modem status */
-
- struct neo_uart_struct __iomem *ch_neo_uart; /* Pointer to the "mapped" UART struct */
- struct cls_uart_struct __iomem *ch_cls_uart; /* Pointer to the "mapped" UART struct */
-
- unsigned char ch_cached_lsr; /* Cached value of the LSR register */
-
- unsigned char *ch_rqueue; /* Our read queue buffer - malloc'ed */
- ushort ch_r_head; /* Head location of the read queue */
- ushort ch_r_tail; /* Tail location of the read queue */
-
- unsigned char *ch_equeue; /* Our error queue buffer - malloc'ed */
- ushort ch_e_head; /* Head location of the error queue */
- ushort ch_e_tail; /* Tail location of the error queue */
-
- unsigned char *ch_wqueue; /* Our write queue buffer - malloc'ed */
- ushort ch_w_head; /* Head location of the write queue */
- ushort ch_w_tail; /* Tail location of the write queue */
-
- ulong ch_rxcount; /* total of data received so far */
- ulong ch_txcount; /* total of data transmitted so far */
-
- unsigned char ch_r_tlevel; /* Receive Trigger level */
- unsigned char ch_t_tlevel; /* Transmit Trigger level */
-
- unsigned char ch_r_watermark; /* Receive Watermark */
-
- ulong ch_stop_sending_break; /* Time we should STOP sending a break */
-
- uint ch_stops_sent; /* How many times I have sent a stop character
- * to try to stop the other guy sending.
- */
- ulong ch_err_parity; /* Count of parity errors on channel */
- ulong ch_err_frame; /* Count of framing errors on channel */
- ulong ch_err_break; /* Count of breaks on channel */
- ulong ch_err_overrun; /* Count of overruns on channel */
-
- ulong ch_xon_sends; /* Count of xons transmitted */
- ulong ch_xoff_sends; /* Count of xoffs transmitted */
-
- ulong ch_intr_modem; /* Count of interrupts */
- ulong ch_intr_tx; /* Count of interrupts */
- ulong ch_intr_rx; /* Count of interrupts */
-
-
- /* /proc/<board>/<channel> entries */
- struct proc_dir_entry *proc_entry_pointer;
- struct dgnc_proc_entry *dgnc_channel_table;
-
-};
-
-/*
- * Our Global Variables.
- */
-extern uint dgnc_Major; /* Our driver/mgmt major */
-extern int dgnc_poll_tick; /* Poll interval - 20 ms */
-extern spinlock_t dgnc_global_lock; /* Driver global spinlock */
-extern uint dgnc_NumBoards; /* Total number of boards */
-extern struct dgnc_board *dgnc_Board[MAXBOARDS]; /* Array of board structs */
-
-#endif
diff --git a/drivers/staging/dgnc/dgnc_mgmt.c b/drivers/staging/dgnc/dgnc_mgmt.c
deleted file mode 100644
index 81c793af9aba..000000000000
--- a/drivers/staging/dgnc/dgnc_mgmt.c
+++ /dev/null
@@ -1,263 +0,0 @@
-/*
- * Copyright 2003 Digi International (www.digi.com)
- * Scott H Kilau <Scott_Kilau at digi dot com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY, EXPRESS OR IMPLIED; without even the
- * implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
- * PURPOSE. See the GNU General Public License for more details.
- */
-
-/************************************************************************
- *
- * This file implements the mgmt functionality for the
- * Neo and ClassicBoard based product lines.
- *
- ************************************************************************
- */
-#include <linux/kernel.h>
-#include <linux/ctype.h>
-#include <linux/sched.h> /* For jiffies, task states */
-#include <linux/interrupt.h> /* For tasklet and interrupt structs/defines */
-#include <linux/serial_reg.h>
-#include <linux/termios.h>
-#include <linux/uaccess.h> /* For copy_from_user/copy_to_user */
-
-#include "dgnc_driver.h"
-#include "dgnc_pci.h"
-#include "dgnc_mgmt.h"
-
-
-/* Our "in use" variables, to enforce 1 open only */
-static int dgnc_mgmt_in_use[MAXMGMTDEVICES];
-
-
-/*
- * dgnc_mgmt_open()
- *
- * Open the mgmt/downld/dpa device
- */
-int dgnc_mgmt_open(struct inode *inode, struct file *file)
-{
- unsigned long flags;
- unsigned int minor = iminor(inode);
-
- spin_lock_irqsave(&dgnc_global_lock, flags);
-
- /* mgmt device */
- if (minor < MAXMGMTDEVICES) {
- /* Only allow 1 open at a time on mgmt device */
- if (dgnc_mgmt_in_use[minor]) {
- spin_unlock_irqrestore(&dgnc_global_lock, flags);
- return -EBUSY;
- }
- dgnc_mgmt_in_use[minor]++;
- } else {
- spin_unlock_irqrestore(&dgnc_global_lock, flags);
- return -ENXIO;
- }
-
- spin_unlock_irqrestore(&dgnc_global_lock, flags);
-
- return 0;
-}
-
-
-/*
- * dgnc_mgmt_close()
- *
- * Open the mgmt/dpa device
- */
-int dgnc_mgmt_close(struct inode *inode, struct file *file)
-{
- unsigned long flags;
- unsigned int minor = iminor(inode);
-
- spin_lock_irqsave(&dgnc_global_lock, flags);
-
- /* mgmt device */
- if (minor < MAXMGMTDEVICES) {
- if (dgnc_mgmt_in_use[minor])
- dgnc_mgmt_in_use[minor] = 0;
- }
- spin_unlock_irqrestore(&dgnc_global_lock, flags);
-
- return 0;
-}
-
-
-/*
- * dgnc_mgmt_ioctl()
- *
- * ioctl the mgmt/dpa device
- */
-
-long dgnc_mgmt_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
-{
- unsigned long flags;
- void __user *uarg = (void __user *) arg;
-
- switch (cmd) {
-
- case DIGI_GETDD:
- {
- /*
- * This returns the total number of boards
- * in the system, as well as driver version
- * and has space for a reserved entry
- */
- struct digi_dinfo ddi;
-
- spin_lock_irqsave(&dgnc_global_lock, flags);
-
- ddi.dinfo_nboards = dgnc_NumBoards;
- sprintf(ddi.dinfo_version, "%s", DG_PART);
-
- spin_unlock_irqrestore(&dgnc_global_lock, flags);
-
- if (copy_to_user(uarg, &ddi, sizeof(ddi)))
- return -EFAULT;
-
- break;
- }
-
- case DIGI_GETBD:
- {
- int brd;
-
- struct digi_info di;
-
- if (copy_from_user(&brd, uarg, sizeof(int)))
- return -EFAULT;
-
- if (brd < 0 || brd >= dgnc_NumBoards)
- return -ENODEV;
-
- memset(&di, 0, sizeof(di));
-
- di.info_bdnum = brd;
-
- spin_lock_irqsave(&dgnc_Board[brd]->bd_lock, flags);
-
- di.info_bdtype = dgnc_Board[brd]->dpatype;
- di.info_bdstate = dgnc_Board[brd]->dpastatus;
- di.info_ioport = 0;
- di.info_physaddr = (ulong) dgnc_Board[brd]->membase;
- di.info_physsize = (ulong) dgnc_Board[brd]->membase
- - dgnc_Board[brd]->membase_end;
- if (dgnc_Board[brd]->state != BOARD_FAILED)
- di.info_nports = dgnc_Board[brd]->nasync;
- else
- di.info_nports = 0;
-
- spin_unlock_irqrestore(&dgnc_Board[brd]->bd_lock, flags);
-
- if (copy_to_user(uarg, &di, sizeof(di)))
- return -EFAULT;
-
- break;
- }
-
- case DIGI_GET_NI_INFO:
- {
- struct channel_t *ch;
- struct ni_info ni;
- unsigned char mstat = 0;
- uint board = 0;
- uint channel = 0;
-
- if (copy_from_user(&ni, uarg, sizeof(ni)))
- return -EFAULT;
-
- board = ni.board;
- channel = ni.channel;
-
- /* Verify boundaries on board */
- if (board >= dgnc_NumBoards)
- return -ENODEV;
-
- /* Verify boundaries on channel */
- if (channel >= dgnc_Board[board]->nasync)
- return -ENODEV;
-
- ch = dgnc_Board[board]->channels[channel];
-
- if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
- return -ENODEV;
-
- memset(&ni, 0, sizeof(ni));
- ni.board = board;
- ni.channel = channel;
-
- spin_lock_irqsave(&ch->ch_lock, flags);
-
- mstat = (ch->ch_mostat | ch->ch_mistat);
-
- if (mstat & UART_MCR_DTR) {
- ni.mstat |= TIOCM_DTR;
- ni.dtr = TIOCM_DTR;
- }
- if (mstat & UART_MCR_RTS) {
- ni.mstat |= TIOCM_RTS;
- ni.rts = TIOCM_RTS;
- }
- if (mstat & UART_MSR_CTS) {
- ni.mstat |= TIOCM_CTS;
- ni.cts = TIOCM_CTS;
- }
- if (mstat & UART_MSR_RI) {
- ni.mstat |= TIOCM_RI;
- ni.ri = TIOCM_RI;
- }
- if (mstat & UART_MSR_DCD) {
- ni.mstat |= TIOCM_CD;
- ni.dcd = TIOCM_CD;
- }
- if (mstat & UART_MSR_DSR)
- ni.mstat |= TIOCM_DSR;
-
- ni.iflag = ch->ch_c_iflag;
- ni.oflag = ch->ch_c_oflag;
- ni.cflag = ch->ch_c_cflag;
- ni.lflag = ch->ch_c_lflag;
-
- if (ch->ch_digi.digi_flags & CTSPACE ||
- ch->ch_c_cflag & CRTSCTS)
- ni.hflow = 1;
- else
- ni.hflow = 0;
-
- if ((ch->ch_flags & CH_STOPI) ||
- (ch->ch_flags & CH_FORCED_STOPI))
- ni.recv_stopped = 1;
- else
- ni.recv_stopped = 0;
-
- if ((ch->ch_flags & CH_STOP) || (ch->ch_flags & CH_FORCED_STOP))
- ni.xmit_stopped = 1;
- else
- ni.xmit_stopped = 0;
-
- ni.curtx = ch->ch_txcount;
- ni.currx = ch->ch_rxcount;
-
- ni.baud = ch->ch_old_baud;
-
- spin_unlock_irqrestore(&ch->ch_lock, flags);
-
- if (copy_to_user(uarg, &ni, sizeof(ni)))
- return -EFAULT;
-
- break;
- }
-
-
- }
-
- return 0;
-}
diff --git a/drivers/staging/dgnc/dgnc_mgmt.h b/drivers/staging/dgnc/dgnc_mgmt.h
deleted file mode 100644
index 708abe9594d4..000000000000
--- a/drivers/staging/dgnc/dgnc_mgmt.h
+++ /dev/null
@@ -1,25 +0,0 @@
-/*
- * Copyright 2003 Digi International (www.digi.com)
- * Scott H Kilau <Scott_Kilau at digi dot com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY, EXPRESS OR IMPLIED; without even the
- * implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
- * PURPOSE. See the GNU General Public License for more details.
- */
-
-#ifndef __DGNC_MGMT_H
-#define __DGNC_MGMT_H
-
-#define MAXMGMTDEVICES 8
-
-int dgnc_mgmt_open(struct inode *inode, struct file *file);
-int dgnc_mgmt_close(struct inode *inode, struct file *file);
-long dgnc_mgmt_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
-#endif
-
diff --git a/drivers/staging/dgnc/dgnc_neo.c b/drivers/staging/dgnc/dgnc_neo.c
deleted file mode 100644
index 900e3ae55a38..000000000000
--- a/drivers/staging/dgnc/dgnc_neo.c
+++ /dev/null
@@ -1,1846 +0,0 @@
-/*
- * Copyright 2003 Digi International (www.digi.com)
- * Scott H Kilau <Scott_Kilau at digi dot com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY, EXPRESS OR IMPLIED; without even the
- * implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
- * PURPOSE. See the GNU General Public License for more details.
- */
-
-
-#include <linux/kernel.h>
-#include <linux/sched.h> /* For jiffies, task states */
-#include <linux/interrupt.h> /* For tasklet and interrupt structs/defines */
-#include <linux/delay.h> /* For udelay */
-#include <linux/io.h> /* For read[bwl]/write[bwl] */
-#include <linux/serial.h> /* For struct async_serial */
-#include <linux/serial_reg.h> /* For the various UART offsets */
-
-#include "dgnc_driver.h" /* Driver main header file */
-#include "dgnc_neo.h" /* Our header file */
-#include "dgnc_tty.h"
-
-static inline void neo_parse_lsr(struct dgnc_board *brd, uint port);
-static inline void neo_parse_isr(struct dgnc_board *brd, uint port);
-static void neo_copy_data_from_uart_to_queue(struct channel_t *ch);
-static inline void neo_clear_break(struct channel_t *ch, int force);
-static inline void neo_set_cts_flow_control(struct channel_t *ch);
-static inline void neo_set_rts_flow_control(struct channel_t *ch);
-static inline void neo_set_ixon_flow_control(struct channel_t *ch);
-static inline void neo_set_ixoff_flow_control(struct channel_t *ch);
-static inline void neo_set_no_output_flow_control(struct channel_t *ch);
-static inline void neo_set_no_input_flow_control(struct channel_t *ch);
-static inline void neo_set_new_start_stop_chars(struct channel_t *ch);
-static void neo_parse_modem(struct channel_t *ch, unsigned char signals);
-static void neo_tasklet(unsigned long data);
-static void neo_vpd(struct dgnc_board *brd);
-static void neo_uart_init(struct channel_t *ch);
-static void neo_uart_off(struct channel_t *ch);
-static int neo_drain(struct tty_struct *tty, uint seconds);
-static void neo_param(struct tty_struct *tty);
-static void neo_assert_modem_signals(struct channel_t *ch);
-static void neo_flush_uart_write(struct channel_t *ch);
-static void neo_flush_uart_read(struct channel_t *ch);
-static void neo_disable_receiver(struct channel_t *ch);
-static void neo_enable_receiver(struct channel_t *ch);
-static void neo_send_break(struct channel_t *ch, int msecs);
-static void neo_send_start_character(struct channel_t *ch);
-static void neo_send_stop_character(struct channel_t *ch);
-static void neo_copy_data_from_queue_to_uart(struct channel_t *ch);
-static uint neo_get_uart_bytes_left(struct channel_t *ch);
-static void neo_send_immediate_char(struct channel_t *ch, unsigned char c);
-static irqreturn_t neo_intr(int irq, void *voidbrd);
-
-
-struct board_ops dgnc_neo_ops = {
- .tasklet = neo_tasklet,
- .intr = neo_intr,
- .uart_init = neo_uart_init,
- .uart_off = neo_uart_off,
- .drain = neo_drain,
- .param = neo_param,
- .vpd = neo_vpd,
- .assert_modem_signals = neo_assert_modem_signals,
- .flush_uart_write = neo_flush_uart_write,
- .flush_uart_read = neo_flush_uart_read,
- .disable_receiver = neo_disable_receiver,
- .enable_receiver = neo_enable_receiver,
- .send_break = neo_send_break,
- .send_start_character = neo_send_start_character,
- .send_stop_character = neo_send_stop_character,
- .copy_data_from_queue_to_uart = neo_copy_data_from_queue_to_uart,
- .get_uart_bytes_left = neo_get_uart_bytes_left,
- .send_immediate_char = neo_send_immediate_char
-};
-
-static uint dgnc_offset_table[8] = { 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80 };
-
-
-/*
- * This function allows calls to ensure that all outstanding
- * PCI writes have been completed, by doing a PCI read against
- * a non-destructive, read-only location on the Neo card.
- *
- * In this case, we are reading the DVID (Read-only Device Identification)
- * value of the Neo card.
- */
-static inline void neo_pci_posting_flush(struct dgnc_board *bd)
-{
- readb(bd->re_map_membase + 0x8D);
-}
-
-static inline void neo_set_cts_flow_control(struct channel_t *ch)
-{
- unsigned char ier = readb(&ch->ch_neo_uart->ier);
- unsigned char efr = readb(&ch->ch_neo_uart->efr);
-
-
- /* Turn on auto CTS flow control */
-#if 1
- ier |= UART_17158_IER_CTSDSR;
-#else
- ier &= ~(UART_17158_IER_CTSDSR);
-#endif
-
- efr |= (UART_17158_EFR_ECB | UART_17158_EFR_CTSDSR);
-
- /* Turn off auto Xon flow control */
- efr &= ~UART_17158_EFR_IXON;
-
- /* Why? Becuz Exar's spec says we have to zero it out before setting it */
- writeb(0, &ch->ch_neo_uart->efr);
-
- /* Turn on UART enhanced bits */
- writeb(efr, &ch->ch_neo_uart->efr);
-
- /* Turn on table D, with 8 char hi/low watermarks */
- writeb((UART_17158_FCTR_TRGD | UART_17158_FCTR_RTS_4DELAY), &ch->ch_neo_uart->fctr);
-
- /* Feed the UART our trigger levels */
- writeb(8, &ch->ch_neo_uart->tfifo);
- ch->ch_t_tlevel = 8;
-
- writeb(ier, &ch->ch_neo_uart->ier);
-
- neo_pci_posting_flush(ch->ch_bd);
-}
-
-
-static inline void neo_set_rts_flow_control(struct channel_t *ch)
-{
- unsigned char ier = readb(&ch->ch_neo_uart->ier);
- unsigned char efr = readb(&ch->ch_neo_uart->efr);
-
- /* Turn on auto RTS flow control */
-#if 1
- ier |= UART_17158_IER_RTSDTR;
-#else
- ier &= ~(UART_17158_IER_RTSDTR);
-#endif
- efr |= (UART_17158_EFR_ECB | UART_17158_EFR_RTSDTR);
-
- /* Turn off auto Xoff flow control */
- ier &= ~UART_17158_IER_XOFF;
- efr &= ~UART_17158_EFR_IXOFF;
-
- /* Why? Becuz Exar's spec says we have to zero it out before setting it */
- writeb(0, &ch->ch_neo_uart->efr);
-
- /* Turn on UART enhanced bits */
- writeb(efr, &ch->ch_neo_uart->efr);
-
- writeb((UART_17158_FCTR_TRGD | UART_17158_FCTR_RTS_4DELAY), &ch->ch_neo_uart->fctr);
- ch->ch_r_watermark = 4;
-
- writeb(32, &ch->ch_neo_uart->rfifo);
- ch->ch_r_tlevel = 32;
-
- writeb(ier, &ch->ch_neo_uart->ier);
-
- /*
- * From the Neo UART spec sheet:
- * The auto RTS/DTR function must be started by asserting
- * RTS/DTR# output pin (MCR bit-0 or 1 to logic 1 after
- * it is enabled.
- */
- ch->ch_mostat |= UART_MCR_RTS;
-
- neo_pci_posting_flush(ch->ch_bd);
-}
-
-
-static inline void neo_set_ixon_flow_control(struct channel_t *ch)
-{
- unsigned char ier = readb(&ch->ch_neo_uart->ier);
- unsigned char efr = readb(&ch->ch_neo_uart->efr);
-
- /* Turn off auto CTS flow control */
- ier &= ~UART_17158_IER_CTSDSR;
- efr &= ~UART_17158_EFR_CTSDSR;
-
- /* Turn on auto Xon flow control */
- efr |= (UART_17158_EFR_ECB | UART_17158_EFR_IXON);
-
- /* Why? Becuz Exar's spec says we have to zero it out before setting it */
- writeb(0, &ch->ch_neo_uart->efr);
-
- /* Turn on UART enhanced bits */
- writeb(efr, &ch->ch_neo_uart->efr);
-
- writeb((UART_17158_FCTR_TRGD | UART_17158_FCTR_RTS_8DELAY), &ch->ch_neo_uart->fctr);
- ch->ch_r_watermark = 4;
-
- writeb(32, &ch->ch_neo_uart->rfifo);
- ch->ch_r_tlevel = 32;
-
- /* Tell UART what start/stop chars it should be looking for */
- writeb(ch->ch_startc, &ch->ch_neo_uart->xonchar1);
- writeb(0, &ch->ch_neo_uart->xonchar2);
-
- writeb(ch->ch_stopc, &ch->ch_neo_uart->xoffchar1);
- writeb(0, &ch->ch_neo_uart->xoffchar2);
-
- writeb(ier, &ch->ch_neo_uart->ier);
-
- neo_pci_posting_flush(ch->ch_bd);
-}
-
-
-static inline void neo_set_ixoff_flow_control(struct channel_t *ch)
-{
- unsigned char ier = readb(&ch->ch_neo_uart->ier);
- unsigned char efr = readb(&ch->ch_neo_uart->efr);
-
- /* Turn off auto RTS flow control */
- ier &= ~UART_17158_IER_RTSDTR;
- efr &= ~UART_17158_EFR_RTSDTR;
-
- /* Turn on auto Xoff flow control */
- ier |= UART_17158_IER_XOFF;
- efr |= (UART_17158_EFR_ECB | UART_17158_EFR_IXOFF);
-
- /* Why? Becuz Exar's spec says we have to zero it out before setting it */
- writeb(0, &ch->ch_neo_uart->efr);
-
- /* Turn on UART enhanced bits */
- writeb(efr, &ch->ch_neo_uart->efr);
-
- /* Turn on table D, with 8 char hi/low watermarks */
- writeb((UART_17158_FCTR_TRGD | UART_17158_FCTR_RTS_8DELAY), &ch->ch_neo_uart->fctr);
-
- writeb(8, &ch->ch_neo_uart->tfifo);
- ch->ch_t_tlevel = 8;
-
- /* Tell UART what start/stop chars it should be looking for */
- writeb(ch->ch_startc, &ch->ch_neo_uart->xonchar1);
- writeb(0, &ch->ch_neo_uart->xonchar2);
-
- writeb(ch->ch_stopc, &ch->ch_neo_uart->xoffchar1);
- writeb(0, &ch->ch_neo_uart->xoffchar2);
-
- writeb(ier, &ch->ch_neo_uart->ier);
-
- neo_pci_posting_flush(ch->ch_bd);
-}
-
-
-static inline void neo_set_no_input_flow_control(struct channel_t *ch)
-{
- unsigned char ier = readb(&ch->ch_neo_uart->ier);
- unsigned char efr = readb(&ch->ch_neo_uart->efr);
-
- /* Turn off auto RTS flow control */
- ier &= ~UART_17158_IER_RTSDTR;
- efr &= ~UART_17158_EFR_RTSDTR;
-
- /* Turn off auto Xoff flow control */
- ier &= ~UART_17158_IER_XOFF;
- if (ch->ch_c_iflag & IXON)
- efr &= ~(UART_17158_EFR_IXOFF);
- else
- efr &= ~(UART_17158_EFR_ECB | UART_17158_EFR_IXOFF);
-
-
- /* Why? Becuz Exar's spec says we have to zero it out before setting it */
- writeb(0, &ch->ch_neo_uart->efr);
-
- /* Turn on UART enhanced bits */
- writeb(efr, &ch->ch_neo_uart->efr);
-
- /* Turn on table D, with 8 char hi/low watermarks */
- writeb((UART_17158_FCTR_TRGD | UART_17158_FCTR_RTS_8DELAY), &ch->ch_neo_uart->fctr);
-
- ch->ch_r_watermark = 0;
-
- writeb(16, &ch->ch_neo_uart->tfifo);
- ch->ch_t_tlevel = 16;
-
- writeb(16, &ch->ch_neo_uart->rfifo);
- ch->ch_r_tlevel = 16;
-
- writeb(ier, &ch->ch_neo_uart->ier);
-
- neo_pci_posting_flush(ch->ch_bd);
-}
-
-
-static inline void neo_set_no_output_flow_control(struct channel_t *ch)
-{
- unsigned char ier = readb(&ch->ch_neo_uart->ier);
- unsigned char efr = readb(&ch->ch_neo_uart->efr);
-
- /* Turn off auto CTS flow control */
- ier &= ~UART_17158_IER_CTSDSR;
- efr &= ~UART_17158_EFR_CTSDSR;
-
- /* Turn off auto Xon flow control */
- if (ch->ch_c_iflag & IXOFF)
- efr &= ~UART_17158_EFR_IXON;
- else
- efr &= ~(UART_17158_EFR_ECB | UART_17158_EFR_IXON);
-
- /* Why? Becuz Exar's spec says we have to zero it out before setting it */
- writeb(0, &ch->ch_neo_uart->efr);
-
- /* Turn on UART enhanced bits */
- writeb(efr, &ch->ch_neo_uart->efr);
-
- /* Turn on table D, with 8 char hi/low watermarks */
- writeb((UART_17158_FCTR_TRGD | UART_17158_FCTR_RTS_8DELAY), &ch->ch_neo_uart->fctr);
-
- ch->ch_r_watermark = 0;
-
- writeb(16, &ch->ch_neo_uart->tfifo);
- ch->ch_t_tlevel = 16;
-
- writeb(16, &ch->ch_neo_uart->rfifo);
- ch->ch_r_tlevel = 16;
-
- writeb(ier, &ch->ch_neo_uart->ier);
-
- neo_pci_posting_flush(ch->ch_bd);
-}
-
-
-/* change UARTs start/stop chars */
-static inline void neo_set_new_start_stop_chars(struct channel_t *ch)
-{
-
- /* if hardware flow control is set, then skip this whole thing */
- if (ch->ch_digi.digi_flags & (CTSPACE | RTSPACE) || ch->ch_c_cflag & CRTSCTS)
- return;
-
- /* Tell UART what start/stop chars it should be looking for */
- writeb(ch->ch_startc, &ch->ch_neo_uart->xonchar1);
- writeb(0, &ch->ch_neo_uart->xonchar2);
-
- writeb(ch->ch_stopc, &ch->ch_neo_uart->xoffchar1);
- writeb(0, &ch->ch_neo_uart->xoffchar2);
-
- neo_pci_posting_flush(ch->ch_bd);
-}
-
-
-/*
- * No locks are assumed to be held when calling this function.
- */
-static inline void neo_clear_break(struct channel_t *ch, int force)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&ch->ch_lock, flags);
-
- /* Bail if we aren't currently sending a break. */
- if (!ch->ch_stop_sending_break) {
- spin_unlock_irqrestore(&ch->ch_lock, flags);
- return;
- }
-
- /* Turn break off, and unset some variables */
- if (ch->ch_flags & CH_BREAK_SENDING) {
- if (time_after_eq(jiffies, ch->ch_stop_sending_break)
- || force) {
- unsigned char temp = readb(&ch->ch_neo_uart->lcr);
-
- writeb((temp & ~UART_LCR_SBC), &ch->ch_neo_uart->lcr);
- neo_pci_posting_flush(ch->ch_bd);
- ch->ch_flags &= ~(CH_BREAK_SENDING);
- ch->ch_stop_sending_break = 0;
- }
- }
- spin_unlock_irqrestore(&ch->ch_lock, flags);
-}
-
-
-/*
- * Parse the ISR register.
- */
-static inline void neo_parse_isr(struct dgnc_board *brd, uint port)
-{
- struct channel_t *ch;
- unsigned char isr;
- unsigned char cause;
- unsigned long flags;
-
- if (!brd || brd->magic != DGNC_BOARD_MAGIC)
- return;
-
- if (port >= brd->maxports)
- return;
-
- ch = brd->channels[port];
- if (ch->magic != DGNC_CHANNEL_MAGIC)
- return;
-
- /* Here we try to figure out what caused the interrupt to happen */
- while (1) {
-
- isr = readb(&ch->ch_neo_uart->isr_fcr);
-
- /* Bail if no pending interrupt */
- if (isr & UART_IIR_NO_INT)
- break;
-
- /*
- * Yank off the upper 2 bits, which just show that the FIFO's are enabled.
- */
- isr &= ~(UART_17158_IIR_FIFO_ENABLED);
-
- if (isr & (UART_17158_IIR_RDI_TIMEOUT | UART_IIR_RDI)) {
- /* Read data from uart -> queue */
- brd->intr_rx++;
- ch->ch_intr_rx++;
- neo_copy_data_from_uart_to_queue(ch);
-
- /* Call our tty layer to enforce queue flow control if needed. */
- spin_lock_irqsave(&ch->ch_lock, flags);
- dgnc_check_queue_flow_control(ch);
- spin_unlock_irqrestore(&ch->ch_lock, flags);
- }
-
- if (isr & UART_IIR_THRI) {
- brd->intr_tx++;
- ch->ch_intr_tx++;
- /* Transfer data (if any) from Write Queue -> UART. */
- spin_lock_irqsave(&ch->ch_lock, flags);
- ch->ch_flags |= (CH_TX_FIFO_EMPTY | CH_TX_FIFO_LWM);
- spin_unlock_irqrestore(&ch->ch_lock, flags);
- neo_copy_data_from_queue_to_uart(ch);
- }
-
- if (isr & UART_17158_IIR_XONXOFF) {
- cause = readb(&ch->ch_neo_uart->xoffchar1);
-
- /*
- * Since the UART detected either an XON or
- * XOFF match, we need to figure out which
- * one it was, so we can suspend or resume data flow.
- */
- if (cause == UART_17158_XON_DETECT) {
- /* Is output stopped right now, if so, resume it */
- if (brd->channels[port]->ch_flags & CH_STOP) {
- spin_lock_irqsave(&ch->ch_lock,
- flags);
- ch->ch_flags &= ~(CH_STOP);
- spin_unlock_irqrestore(&ch->ch_lock,
- flags);
- }
- } else if (cause == UART_17158_XOFF_DETECT) {
- if (!(brd->channels[port]->ch_flags & CH_STOP)) {
- spin_lock_irqsave(&ch->ch_lock,
- flags);
- ch->ch_flags |= CH_STOP;
- spin_unlock_irqrestore(&ch->ch_lock,
- flags);
- }
- }
- }
-
- if (isr & UART_17158_IIR_HWFLOW_STATE_CHANGE) {
- /*
- * If we get here, this means the hardware is doing auto flow control.
- * Check to see whether RTS/DTR or CTS/DSR caused this interrupt.
- */
- brd->intr_modem++;
- ch->ch_intr_modem++;
- cause = readb(&ch->ch_neo_uart->mcr);
- /* Which pin is doing auto flow? RTS or DTR? */
- if ((cause & 0x4) == 0) {
- if (cause & UART_MCR_RTS) {
- spin_lock_irqsave(&ch->ch_lock,
- flags);
- ch->ch_mostat |= UART_MCR_RTS;
- spin_unlock_irqrestore(&ch->ch_lock,
- flags);
- } else {
- spin_lock_irqsave(&ch->ch_lock,
- flags);
- ch->ch_mostat &= ~(UART_MCR_RTS);
- spin_unlock_irqrestore(&ch->ch_lock,
- flags);
- }
- } else {
- if (cause & UART_MCR_DTR) {
- spin_lock_irqsave(&ch->ch_lock,
- flags);
- ch->ch_mostat |= UART_MCR_DTR;
- spin_unlock_irqrestore(&ch->ch_lock,
- flags);
- } else {
- spin_lock_irqsave(&ch->ch_lock,
- flags);
- ch->ch_mostat &= ~(UART_MCR_DTR);
- spin_unlock_irqrestore(&ch->ch_lock,
- flags);
- }
- }
- }
-
- /* Parse any modem signal changes */
- neo_parse_modem(ch, readb(&ch->ch_neo_uart->msr));
- }
-}
-
-
-static inline void neo_parse_lsr(struct dgnc_board *brd, uint port)
-{
- struct channel_t *ch;
- int linestatus;
- unsigned long flags;
-
- /*
- * Check to make sure it didn't receive interrupt with a null board
- * associated or a board pointer that wasn't ours.
- */
- if (!brd || brd->magic != DGNC_BOARD_MAGIC)
- return;
-
- if (port >= brd->maxports)
- return;
-
- ch = brd->channels[port];
- if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
- return;
-
- linestatus = readb(&ch->ch_neo_uart->lsr);
-
- ch->ch_cached_lsr |= linestatus;
-
- if (ch->ch_cached_lsr & UART_LSR_DR) {
- brd->intr_rx++;
- ch->ch_intr_rx++;
- /* Read data from uart -> queue */
- neo_copy_data_from_uart_to_queue(ch);
- spin_lock_irqsave(&ch->ch_lock, flags);
- dgnc_check_queue_flow_control(ch);
- spin_unlock_irqrestore(&ch->ch_lock, flags);
- }
-
- /*
- * The next 3 tests should *NOT* happen, as the above test
- * should encapsulate all 3... At least, thats what Exar says.
- */
-
- if (linestatus & UART_LSR_PE)
- ch->ch_err_parity++;
-
- if (linestatus & UART_LSR_FE)
- ch->ch_err_frame++;
-
- if (linestatus & UART_LSR_BI)
- ch->ch_err_break++;
-
- if (linestatus & UART_LSR_OE) {
- /*
- * Rx Oruns. Exar says that an orun will NOT corrupt
- * the FIFO. It will just replace the holding register
- * with this new data byte. So basically just ignore this.
- * Probably we should eventually have an orun stat in our driver...
- */
- ch->ch_err_overrun++;
- }
-
- if (linestatus & UART_LSR_THRE) {
- brd->intr_tx++;
- ch->ch_intr_tx++;
- spin_lock_irqsave(&ch->ch_lock, flags);
- ch->ch_flags |= (CH_TX_FIFO_EMPTY | CH_TX_FIFO_LWM);
- spin_unlock_irqrestore(&ch->ch_lock, flags);
-
- /* Transfer data (if any) from Write Queue -> UART. */
- neo_copy_data_from_queue_to_uart(ch);
- } else if (linestatus & UART_17158_TX_AND_FIFO_CLR) {
- brd->intr_tx++;
- ch->ch_intr_tx++;
- spin_lock_irqsave(&ch->ch_lock, flags);
- ch->ch_flags |= (CH_TX_FIFO_EMPTY | CH_TX_FIFO_LWM);
- spin_unlock_irqrestore(&ch->ch_lock, flags);
-
- /* Transfer data (if any) from Write Queue -> UART. */
- neo_copy_data_from_queue_to_uart(ch);
- }
-}
-
-
-/*
- * neo_param()
- * Send any/all changes to the line to the UART.
- */
-static void neo_param(struct tty_struct *tty)
-{
- unsigned char lcr = 0;
- unsigned char uart_lcr = 0;
- unsigned char ier = 0;
- unsigned char uart_ier = 0;
- uint baud = 9600;
- int quot = 0;
- struct dgnc_board *bd;
- struct channel_t *ch;
- struct un_t *un;
-
- if (!tty || tty->magic != TTY_MAGIC)
- return;
-
- un = (struct un_t *) tty->driver_data;
- if (!un || un->magic != DGNC_UNIT_MAGIC)
- return;
-
- ch = un->un_ch;
- if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
- return;
-
- bd = ch->ch_bd;
- if (!bd || bd->magic != DGNC_BOARD_MAGIC)
- return;
-
- /*
- * If baud rate is zero, flush queues, and set mval to drop DTR.
- */
- if ((ch->ch_c_cflag & (CBAUD)) == 0) {
- ch->ch_r_head = 0;
- ch->ch_r_tail = 0;
- ch->ch_e_head = 0;
- ch->ch_e_tail = 0;
- ch->ch_w_head = 0;
- ch->ch_w_tail = 0;
-
- neo_flush_uart_write(ch);
- neo_flush_uart_read(ch);
-
- /* The baudrate is B0 so all modem lines are to be dropped. */
- ch->ch_flags |= (CH_BAUD0);
- ch->ch_mostat &= ~(UART_MCR_RTS | UART_MCR_DTR);
- neo_assert_modem_signals(ch);
- ch->ch_old_baud = 0;
- return;
-
- } else if (ch->ch_custom_speed) {
-
- baud = ch->ch_custom_speed;
- /* Handle transition from B0 */
- if (ch->ch_flags & CH_BAUD0) {
- ch->ch_flags &= ~(CH_BAUD0);
-
- /*
- * Bring back up RTS and DTR...
- * Also handle RTS or DTR toggle if set.
- */
- if (!(ch->ch_digi.digi_flags & DIGI_RTS_TOGGLE))
- ch->ch_mostat |= (UART_MCR_RTS);
- if (!(ch->ch_digi.digi_flags & DIGI_DTR_TOGGLE))
- ch->ch_mostat |= (UART_MCR_DTR);
- }
- } else {
- int iindex = 0;
- int jindex = 0;
-
- ulong bauds[4][16] = {
- { /* slowbaud */
- 0, 50, 75, 110,
- 134, 150, 200, 300,
- 600, 1200, 1800, 2400,
- 4800, 9600, 19200, 38400 },
- { /* slowbaud & CBAUDEX */
- 0, 57600, 115200, 230400,
- 460800, 150, 200, 921600,
- 600, 1200, 1800, 2400,
- 4800, 9600, 19200, 38400 },
- { /* fastbaud */
- 0, 57600, 76800, 115200,
- 131657, 153600, 230400, 460800,
- 921600, 1200, 1800, 2400,
- 4800, 9600, 19200, 38400 },
- { /* fastbaud & CBAUDEX */
- 0, 57600, 115200, 230400,
- 460800, 150, 200, 921600,
- 600, 1200, 1800, 2400,
- 4800, 9600, 19200, 38400 }
- };
-
- /* Only use the TXPrint baud rate if the terminal unit is NOT open */
- if (!(ch->ch_tun.un_flags & UN_ISOPEN) && (un->un_type == DGNC_PRINT))
- baud = C_BAUD(ch->ch_pun.un_tty) & 0xff;
- else
- baud = C_BAUD(ch->ch_tun.un_tty) & 0xff;
-
- if (ch->ch_c_cflag & CBAUDEX)
- iindex = 1;
-
- if (ch->ch_digi.digi_flags & DIGI_FAST)
- iindex += 2;
-
- jindex = baud;
-
- if ((iindex >= 0) && (iindex < 4) && (jindex >= 0) && (jindex < 16))
- baud = bauds[iindex][jindex];
- else
- baud = 0;
-
- if (baud == 0)
- baud = 9600;
-
- /* Handle transition from B0 */
- if (ch->ch_flags & CH_BAUD0) {
- ch->ch_flags &= ~(CH_BAUD0);
-
- /*
- * Bring back up RTS and DTR...
- * Also handle RTS or DTR toggle if set.
- */
- if (!(ch->ch_digi.digi_flags & DIGI_RTS_TOGGLE))
- ch->ch_mostat |= (UART_MCR_RTS);
- if (!(ch->ch_digi.digi_flags & DIGI_DTR_TOGGLE))
- ch->ch_mostat |= (UART_MCR_DTR);
- }
- }
-
- if (ch->ch_c_cflag & PARENB)
- lcr |= UART_LCR_PARITY;
-
- if (!(ch->ch_c_cflag & PARODD))
- lcr |= UART_LCR_EPAR;
-
- /*
- * Not all platforms support mark/space parity,
- * so this will hide behind an ifdef.
- */
-#ifdef CMSPAR
- if (ch->ch_c_cflag & CMSPAR)
- lcr |= UART_LCR_SPAR;
-#endif
-
- if (ch->ch_c_cflag & CSTOPB)
- lcr |= UART_LCR_STOP;
-
- switch (ch->ch_c_cflag & CSIZE) {
- case CS5:
- lcr |= UART_LCR_WLEN5;
- break;
- case CS6:
- lcr |= UART_LCR_WLEN6;
- break;
- case CS7:
- lcr |= UART_LCR_WLEN7;
- break;
- case CS8:
- default:
- lcr |= UART_LCR_WLEN8;
- break;
- }
-
- uart_ier = readb(&ch->ch_neo_uart->ier);
- ier = uart_ier;
-
- uart_lcr = readb(&ch->ch_neo_uart->lcr);
-
- if (baud == 0)
- baud = 9600;
-
- quot = ch->ch_bd->bd_dividend / baud;
-
- if (quot != 0 && ch->ch_old_baud != baud) {
- ch->ch_old_baud = baud;
- writeb(UART_LCR_DLAB, &ch->ch_neo_uart->lcr);
- writeb((quot & 0xff), &ch->ch_neo_uart->txrx);
- writeb((quot >> 8), &ch->ch_neo_uart->ier);
- writeb(lcr, &ch->ch_neo_uart->lcr);
- }
-
- if (uart_lcr != lcr)
- writeb(lcr, &ch->ch_neo_uart->lcr);
-
- if (ch->ch_c_cflag & CREAD)
- ier |= (UART_IER_RDI | UART_IER_RLSI);
- else
- ier &= ~(UART_IER_RDI | UART_IER_RLSI);
-
- /*
- * Have the UART interrupt on modem signal changes ONLY when
- * we are in hardware flow control mode, or CLOCAL/FORCEDCD is not set.
- */
- if ((ch->ch_digi.digi_flags & CTSPACE) ||
- (ch->ch_digi.digi_flags & RTSPACE) ||
- (ch->ch_c_cflag & CRTSCTS) ||
- !(ch->ch_digi.digi_flags & DIGI_FORCEDCD) ||
- !(ch->ch_c_cflag & CLOCAL))
- ier |= UART_IER_MSI;
- else
- ier &= ~UART_IER_MSI;
-
- ier |= UART_IER_THRI;
-
- if (ier != uart_ier)
- writeb(ier, &ch->ch_neo_uart->ier);
-
- /* Set new start/stop chars */
- neo_set_new_start_stop_chars(ch);
-
- if (ch->ch_digi.digi_flags & CTSPACE || ch->ch_c_cflag & CRTSCTS) {
- neo_set_cts_flow_control(ch);
- } else if (ch->ch_c_iflag & IXON) {
- /* If start/stop is set to disable, then we should disable flow control */
- if ((ch->ch_startc == _POSIX_VDISABLE) || (ch->ch_stopc == _POSIX_VDISABLE))
- neo_set_no_output_flow_control(ch);
- else
- neo_set_ixon_flow_control(ch);
- } else {
- neo_set_no_output_flow_control(ch);
- }
-
- if (ch->ch_digi.digi_flags & RTSPACE || ch->ch_c_cflag & CRTSCTS) {
- neo_set_rts_flow_control(ch);
- } else if (ch->ch_c_iflag & IXOFF) {
- /* If start/stop is set to disable, then we should disable flow control */
- if ((ch->ch_startc == _POSIX_VDISABLE) || (ch->ch_stopc == _POSIX_VDISABLE))
- neo_set_no_input_flow_control(ch);
- else
- neo_set_ixoff_flow_control(ch);
- } else {
- neo_set_no_input_flow_control(ch);
- }
-
- /*
- * Adjust the RX FIFO Trigger level if baud is less than 9600.
- * Not exactly elegant, but this is needed because of the Exar chip's
- * delay on firing off the RX FIFO interrupt on slower baud rates.
- */
- if (baud < 9600) {
- writeb(1, &ch->ch_neo_uart->rfifo);
- ch->ch_r_tlevel = 1;
- }
-
- neo_assert_modem_signals(ch);
-
- /* Get current status of the modem signals now */
- neo_parse_modem(ch, readb(&ch->ch_neo_uart->msr));
-}
-
-
-/*
- * Our board poller function.
- */
-static void neo_tasklet(unsigned long data)
-{
- struct dgnc_board *bd = (struct dgnc_board *) data;
- struct channel_t *ch;
- unsigned long flags;
- int i;
- int state = 0;
- int ports = 0;
-
- if (!bd || bd->magic != DGNC_BOARD_MAGIC)
- return;
-
- /* Cache a couple board values */
- spin_lock_irqsave(&bd->bd_lock, flags);
- state = bd->state;
- ports = bd->nasync;
- spin_unlock_irqrestore(&bd->bd_lock, flags);
-
- /*
- * Do NOT allow the interrupt routine to read the intr registers
- * Until we release this lock.
- */
- spin_lock_irqsave(&bd->bd_intr_lock, flags);
-
- /*
- * If board is ready, parse deeper to see if there is anything to do.
- */
- if ((state == BOARD_READY) && (ports > 0)) {
- /* Loop on each port */
- for (i = 0; i < ports; i++) {
- ch = bd->channels[i];
-
- /* Just being careful... */
- if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
- continue;
-
- /*
- * NOTE: Remember you CANNOT hold any channel
- * locks when calling the input routine.
- *
- * During input processing, its possible we
- * will call the Linux ld, which might in turn,
- * do a callback right back into us, resulting
- * in us trying to grab the channel lock twice!
- */
- dgnc_input(ch);
-
- /*
- * Channel lock is grabbed and then released
- * inside both of these routines, but neither
- * call anything else that could call back into us.
- */
- neo_copy_data_from_queue_to_uart(ch);
- dgnc_wakeup_writes(ch);
-
- /*
- * Call carrier carrier function, in case something
- * has changed.
- */
- dgnc_carrier(ch);
-
- /*
- * Check to see if we need to turn off a sending break.
- * The timing check is done inside clear_break()
- */
- if (ch->ch_stop_sending_break)
- neo_clear_break(ch, 0);
- }
- }
-
- /* Allow interrupt routine to access the interrupt register again */
- spin_unlock_irqrestore(&bd->bd_intr_lock, flags);
-
-}
-
-
-/*
- * dgnc_neo_intr()
- *
- * Neo specific interrupt handler.
- */
-static irqreturn_t neo_intr(int irq, void *voidbrd)
-{
- struct dgnc_board *brd = voidbrd;
- struct channel_t *ch;
- int port = 0;
- int type = 0;
- int current_port;
- u32 tmp;
- u32 uart_poll;
- unsigned long flags;
- unsigned long flags2;
-
- /*
- * Check to make sure it didn't receive interrupt with a null board
- * associated or a board pointer that wasn't ours.
- */
- if (!brd || brd->magic != DGNC_BOARD_MAGIC)
- return IRQ_NONE;
-
- brd->intr_count++;
-
- /* Lock out the slow poller from running on this board. */
- spin_lock_irqsave(&brd->bd_intr_lock, flags);
-
- /*
- * Read in "extended" IRQ information from the 32bit Neo register.
- * Bits 0-7: What port triggered the interrupt.
- * Bits 8-31: Each 3bits indicate what type of interrupt occurred.
- */
- uart_poll = readl(brd->re_map_membase + UART_17158_POLL_ADDR_OFFSET);
-
- /*
- * If 0, no interrupts pending.
- * This can happen if the IRQ is shared among a couple Neo/Classic boards.
- */
- if (!uart_poll) {
- spin_unlock_irqrestore(&brd->bd_intr_lock, flags);
- return IRQ_NONE;
- }
-
- /* At this point, we have at least SOMETHING to service, dig further... */
-
- current_port = 0;
-
- /* Loop on each port */
- while ((uart_poll & 0xff) != 0) {
-
- tmp = uart_poll;
-
- /* Check current port to see if it has interrupt pending */
- if ((tmp & dgnc_offset_table[current_port]) != 0) {
- port = current_port;
- type = tmp >> (8 + (port * 3));
- type &= 0x7;
- } else {
- current_port++;
- continue;
- }
-
- /* Remove this port + type from uart_poll */
- uart_poll &= ~(dgnc_offset_table[port]);
-
- if (!type) {
- /* If no type, just ignore it, and move onto next port */
- continue;
- }
-
- /* Switch on type of interrupt we have */
- switch (type) {
-
- case UART_17158_RXRDY_TIMEOUT:
- /*
- * RXRDY Time-out is cleared by reading data in the
- * RX FIFO until it falls below the trigger level.
- */
-
- /* Verify the port is in range. */
- if (port >= brd->nasync)
- continue;
-
- ch = brd->channels[port];
- neo_copy_data_from_uart_to_queue(ch);
-
- /* Call our tty layer to enforce queue flow control if needed. */
- spin_lock_irqsave(&ch->ch_lock, flags2);
- dgnc_check_queue_flow_control(ch);
- spin_unlock_irqrestore(&ch->ch_lock, flags2);
-
- continue;
-
- case UART_17158_RX_LINE_STATUS:
- /*
- * RXRDY and RX LINE Status (logic OR of LSR[4:1])
- */
- neo_parse_lsr(brd, port);
- continue;
-
- case UART_17158_TXRDY:
- /*
- * TXRDY interrupt clears after reading ISR register for the UART channel.
- */
-
- /*
- * Yes, this is odd...
- * Why would I check EVERY possibility of type of
- * interrupt, when we know its TXRDY???
- * Becuz for some reason, even tho we got triggered for TXRDY,
- * it seems to be occasionally wrong. Instead of TX, which
- * it should be, I was getting things like RXDY too. Weird.
- */
- neo_parse_isr(brd, port);
- continue;
-
- case UART_17158_MSR:
- /*
- * MSR or flow control was seen.
- */
- neo_parse_isr(brd, port);
- continue;
-
- default:
- /*
- * The UART triggered us with a bogus interrupt type.
- * It appears the Exar chip, when REALLY bogged down, will throw
- * these once and awhile.
- * Its harmless, just ignore it and move on.
- */
- continue;
- }
- }
-
- /*
- * Schedule tasklet to more in-depth servicing at a better time.
- */
- tasklet_schedule(&brd->helper_tasklet);
-
- spin_unlock_irqrestore(&brd->bd_intr_lock, flags);
-
- return IRQ_HANDLED;
-}
-
-
-/*
- * Neo specific way of turning off the receiver.
- * Used as a way to enforce queue flow control when in
- * hardware flow control mode.
- */
-static void neo_disable_receiver(struct channel_t *ch)
-{
- unsigned char tmp = readb(&ch->ch_neo_uart->ier);
-
- tmp &= ~(UART_IER_RDI);
- writeb(tmp, &ch->ch_neo_uart->ier);
- neo_pci_posting_flush(ch->ch_bd);
-}
-
-
-/*
- * Neo specific way of turning on the receiver.
- * Used as a way to un-enforce queue flow control when in
- * hardware flow control mode.
- */
-static void neo_enable_receiver(struct channel_t *ch)
-{
- unsigned char tmp = readb(&ch->ch_neo_uart->ier);
-
- tmp |= (UART_IER_RDI);
- writeb(tmp, &ch->ch_neo_uart->ier);
- neo_pci_posting_flush(ch->ch_bd);
-}
-
-
-static void neo_copy_data_from_uart_to_queue(struct channel_t *ch)
-{
- int qleft = 0;
- unsigned char linestatus = 0;
- unsigned char error_mask = 0;
- int n = 0;
- int total = 0;
- ushort head;
- ushort tail;
- unsigned long flags;
-
- if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
- return;
-
- spin_lock_irqsave(&ch->ch_lock, flags);
-
- /* cache head and tail of queue */
- head = ch->ch_r_head & RQUEUEMASK;
- tail = ch->ch_r_tail & RQUEUEMASK;
-
- /* Get our cached LSR */
- linestatus = ch->ch_cached_lsr;
- ch->ch_cached_lsr = 0;
-
- /* Store how much space we have left in the queue */
- qleft = tail - head - 1;
- if (qleft < 0)
- qleft += RQUEUEMASK + 1;
-
- /*
- * If the UART is not in FIFO mode, force the FIFO copy to
- * NOT be run, by setting total to 0.
- *
- * On the other hand, if the UART IS in FIFO mode, then ask
- * the UART to give us an approximation of data it has RX'ed.
- */
- if (!(ch->ch_flags & CH_FIFO_ENABLED))
- total = 0;
- else {
- total = readb(&ch->ch_neo_uart->rfifo);
-
- /*
- * EXAR chip bug - RX FIFO COUNT - Fudge factor.
- *
- * This resolves a problem/bug with the Exar chip that sometimes
- * returns a bogus value in the rfifo register.
- * The count can be any where from 0-3 bytes "off".
- * Bizarre, but true.
- */
- if ((ch->ch_bd->dvid & 0xf0) >= UART_XR17E158_DVID)
- total -= 1;
- else
- total -= 3;
- }
-
-
- /*
- * Finally, bound the copy to make sure we don't overflow
- * our own queue...
- * The byte by byte copy loop below this loop this will
- * deal with the queue overflow possibility.
- */
- total = min(total, qleft);
-
- while (total > 0) {
-
- /*
- * Grab the linestatus register, we need to check
- * to see if there are any errors in the FIFO.
- */
- linestatus = readb(&ch->ch_neo_uart->lsr);
-
- /*
- * Break out if there is a FIFO error somewhere.
- * This will allow us to go byte by byte down below,
- * finding the exact location of the error.
- */
- if (linestatus & UART_17158_RX_FIFO_DATA_ERROR)
- break;
-
- /* Make sure we don't go over the end of our queue */
- n = min(((uint) total), (RQUEUESIZE - (uint) head));
-
- /*
- * Cut down n even further if needed, this is to fix
- * a problem with memcpy_fromio() with the Neo on the
- * IBM pSeries platform.
- * 15 bytes max appears to be the magic number.
- */
- n = min_t(uint, n, 12);
-
- /*
- * Since we are grabbing the linestatus register, which
- * will reset some bits after our read, we need to ensure
- * we don't miss our TX FIFO emptys.
- */
- if (linestatus & (UART_LSR_THRE | UART_17158_TX_AND_FIFO_CLR))
- ch->ch_flags |= (CH_TX_FIFO_EMPTY | CH_TX_FIFO_LWM);
-
- linestatus = 0;
-
- /* Copy data from uart to the queue */
- memcpy_fromio(ch->ch_rqueue + head, &ch->ch_neo_uart->txrxburst, n);
-
- /*
- * Since RX_FIFO_DATA_ERROR was 0, we are guaranteed
- * that all the data currently in the FIFO is free of
- * breaks and parity/frame/orun errors.
- */
- memset(ch->ch_equeue + head, 0, n);
-
- /* Add to and flip head if needed */
- head = (head + n) & RQUEUEMASK;
- total -= n;
- qleft -= n;
- ch->ch_rxcount += n;
- }
-
- /*
- * Create a mask to determine whether we should
- * insert the character (if any) into our queue.
- */
- if (ch->ch_c_iflag & IGNBRK)
- error_mask |= UART_LSR_BI;
-
- /*
- * Now cleanup any leftover bytes still in the UART.
- * Also deal with any possible queue overflow here as well.
- */
- while (1) {
-
- /*
- * Its possible we have a linestatus from the loop above
- * this, so we "OR" on any extra bits.
- */
- linestatus |= readb(&ch->ch_neo_uart->lsr);
-
- /*
- * If the chip tells us there is no more data pending to
- * be read, we can then leave.
- * But before we do, cache the linestatus, just in case.
- */
- if (!(linestatus & UART_LSR_DR)) {
- ch->ch_cached_lsr = linestatus;
- break;
- }
-
- /* No need to store this bit */
- linestatus &= ~UART_LSR_DR;
-
- /*
- * Since we are grabbing the linestatus register, which
- * will reset some bits after our read, we need to ensure
- * we don't miss our TX FIFO emptys.
- */
- if (linestatus & (UART_LSR_THRE | UART_17158_TX_AND_FIFO_CLR)) {
- linestatus &= ~(UART_LSR_THRE | UART_17158_TX_AND_FIFO_CLR);
- ch->ch_flags |= (CH_TX_FIFO_EMPTY | CH_TX_FIFO_LWM);
- }
-
- /*
- * Discard character if we are ignoring the error mask.
- */
- if (linestatus & error_mask) {
- unsigned char discard;
-
- linestatus = 0;
- memcpy_fromio(&discard, &ch->ch_neo_uart->txrxburst, 1);
- continue;
- }
-
- /*
- * If our queue is full, we have no choice but to drop some data.
- * The assumption is that HWFLOW or SWFLOW should have stopped
- * things way way before we got to this point.
- *
- * I decided that I wanted to ditch the oldest data first,
- * I hope thats okay with everyone? Yes? Good.
- */
- while (qleft < 1) {
- tail = (tail + 1) & RQUEUEMASK;
- ch->ch_r_tail = tail;
- ch->ch_err_overrun++;
- qleft++;
- }
-
- memcpy_fromio(ch->ch_rqueue + head, &ch->ch_neo_uart->txrxburst, 1);
- ch->ch_equeue[head] = (unsigned char) linestatus;
-
- /* Ditch any remaining linestatus value. */
- linestatus = 0;
-
- /* Add to and flip head if needed */
- head = (head + 1) & RQUEUEMASK;
-
- qleft--;
- ch->ch_rxcount++;
- }
-
- /*
- * Write new final heads to channel structure.
- */
- ch->ch_r_head = head & RQUEUEMASK;
- ch->ch_e_head = head & EQUEUEMASK;
-
- spin_unlock_irqrestore(&ch->ch_lock, flags);
-}
-
-
-/*
- * This function basically goes to sleep for secs, or until
- * it gets signalled that the port has fully drained.
- */
-static int neo_drain(struct tty_struct *tty, uint seconds)
-{
- unsigned long flags;
- struct channel_t *ch;
- struct un_t *un;
- int rc = 0;
-
- if (!tty || tty->magic != TTY_MAGIC)
- return -ENXIO;
-
- un = (struct un_t *) tty->driver_data;
- if (!un || un->magic != DGNC_UNIT_MAGIC)
- return -ENXIO;
-
- ch = un->un_ch;
- if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
- return -ENXIO;
-
- spin_lock_irqsave(&ch->ch_lock, flags);
- un->un_flags |= UN_EMPTY;
- spin_unlock_irqrestore(&ch->ch_lock, flags);
-
- /*
- * Go to sleep waiting for the tty layer to wake me back up when
- * the empty flag goes away.
- *
- * NOTE: TODO: Do something with time passed in.
- */
- rc = wait_event_interruptible(un->un_flags_wait, ((un->un_flags & UN_EMPTY) == 0));
-
- /* If ret is non-zero, user ctrl-c'ed us */
- return rc;
-}
-
-
-/*
- * Flush the WRITE FIFO on the Neo.
- *
- * NOTE: Channel lock MUST be held before calling this function!
- */
-static void neo_flush_uart_write(struct channel_t *ch)
-{
- unsigned char tmp = 0;
- int i = 0;
-
- if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
- return;
-
- writeb((UART_FCR_ENABLE_FIFO | UART_FCR_CLEAR_XMIT), &ch->ch_neo_uart->isr_fcr);
- neo_pci_posting_flush(ch->ch_bd);
-
- for (i = 0; i < 10; i++) {
-
- /* Check to see if the UART feels it completely flushed the FIFO. */
- tmp = readb(&ch->ch_neo_uart->isr_fcr);
- if (tmp & 4)
- udelay(10);
- else
- break;
- }
-
- ch->ch_flags |= (CH_TX_FIFO_EMPTY | CH_TX_FIFO_LWM);
-}
-
-
-/*
- * Flush the READ FIFO on the Neo.
- *
- * NOTE: Channel lock MUST be held before calling this function!
- */
-static void neo_flush_uart_read(struct channel_t *ch)
-{
- unsigned char tmp = 0;
- int i = 0;
-
- if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
- return;
-
- writeb((UART_FCR_ENABLE_FIFO | UART_FCR_CLEAR_RCVR), &ch->ch_neo_uart->isr_fcr);
- neo_pci_posting_flush(ch->ch_bd);
-
- for (i = 0; i < 10; i++) {
-
- /* Check to see if the UART feels it completely flushed the FIFO. */
- tmp = readb(&ch->ch_neo_uart->isr_fcr);
- if (tmp & 2)
- udelay(10);
- else
- break;
- }
-}
-
-
-static void neo_copy_data_from_queue_to_uart(struct channel_t *ch)
-{
- ushort head;
- ushort tail;
- int n;
- int s;
- int qlen;
- uint len_written = 0;
- unsigned long flags;
-
- if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
- return;
-
- spin_lock_irqsave(&ch->ch_lock, flags);
-
- /* No data to write to the UART */
- if (ch->ch_w_tail == ch->ch_w_head)
- goto exit_unlock;
-
- /* If port is "stopped", don't send any data to the UART */
- if ((ch->ch_flags & CH_FORCED_STOP) ||
- (ch->ch_flags & CH_BREAK_SENDING))
- goto exit_unlock;
-
- /*
- * If FIFOs are disabled. Send data directly to txrx register
- */
- if (!(ch->ch_flags & CH_FIFO_ENABLED)) {
- unsigned char lsrbits = readb(&ch->ch_neo_uart->lsr);
-
- /* Cache the LSR bits for later parsing */
- ch->ch_cached_lsr |= lsrbits;
- if (ch->ch_cached_lsr & UART_LSR_THRE) {
- ch->ch_cached_lsr &= ~(UART_LSR_THRE);
-
- /*
- * If RTS Toggle mode is on, turn on RTS now if not already set,
- * and make sure we get an event when the data transfer has completed.
- */
- if (ch->ch_digi.digi_flags & DIGI_RTS_TOGGLE) {
- if (!(ch->ch_mostat & UART_MCR_RTS)) {
- ch->ch_mostat |= (UART_MCR_RTS);
- neo_assert_modem_signals(ch);
- }
- ch->ch_tun.un_flags |= (UN_EMPTY);
- }
- /*
- * If DTR Toggle mode is on, turn on DTR now if not already set,
- * and make sure we get an event when the data transfer has completed.
- */
- if (ch->ch_digi.digi_flags & DIGI_DTR_TOGGLE) {
- if (!(ch->ch_mostat & UART_MCR_DTR)) {
- ch->ch_mostat |= (UART_MCR_DTR);
- neo_assert_modem_signals(ch);
- }
- ch->ch_tun.un_flags |= (UN_EMPTY);
- }
-
- writeb(ch->ch_wqueue[ch->ch_w_tail], &ch->ch_neo_uart->txrx);
- ch->ch_w_tail++;
- ch->ch_w_tail &= WQUEUEMASK;
- ch->ch_txcount++;
- }
-
- goto exit_unlock;
- }
-
- /*
- * We have to do it this way, because of the EXAR TXFIFO count bug.
- */
- if ((ch->ch_bd->dvid & 0xf0) < UART_XR17E158_DVID) {
- if (!(ch->ch_flags & (CH_TX_FIFO_EMPTY | CH_TX_FIFO_LWM)))
- goto exit_unlock;
-
- len_written = 0;
-
- n = readb(&ch->ch_neo_uart->tfifo);
-
- if ((unsigned int) n > ch->ch_t_tlevel)
- goto exit_unlock;
-
- n = UART_17158_TX_FIFOSIZE - ch->ch_t_tlevel;
- } else {
- n = UART_17158_TX_FIFOSIZE - readb(&ch->ch_neo_uart->tfifo);
- }
-
- /* cache head and tail of queue */
- head = ch->ch_w_head & WQUEUEMASK;
- tail = ch->ch_w_tail & WQUEUEMASK;
- qlen = (head - tail) & WQUEUEMASK;
-
- /* Find minimum of the FIFO space, versus queue length */
- n = min(n, qlen);
-
- while (n > 0) {
-
- s = ((head >= tail) ? head : WQUEUESIZE) - tail;
- s = min(s, n);
-
- if (s <= 0)
- break;
-
- /*
- * If RTS Toggle mode is on, turn on RTS now if not already set,
- * and make sure we get an event when the data transfer has completed.
- */
- if (ch->ch_digi.digi_flags & DIGI_RTS_TOGGLE) {
- if (!(ch->ch_mostat & UART_MCR_RTS)) {
- ch->ch_mostat |= (UART_MCR_RTS);
- neo_assert_modem_signals(ch);
- }
- ch->ch_tun.un_flags |= (UN_EMPTY);
- }
-
- /*
- * If DTR Toggle mode is on, turn on DTR now if not already set,
- * and make sure we get an event when the data transfer has completed.
- */
- if (ch->ch_digi.digi_flags & DIGI_DTR_TOGGLE) {
- if (!(ch->ch_mostat & UART_MCR_DTR)) {
- ch->ch_mostat |= (UART_MCR_DTR);
- neo_assert_modem_signals(ch);
- }
- ch->ch_tun.un_flags |= (UN_EMPTY);
- }
-
- memcpy_toio(&ch->ch_neo_uart->txrxburst, ch->ch_wqueue + tail, s);
-
- /* Add and flip queue if needed */
- tail = (tail + s) & WQUEUEMASK;
- n -= s;
- ch->ch_txcount += s;
- len_written += s;
- }
-
- /* Update the final tail */
- ch->ch_w_tail = tail & WQUEUEMASK;
-
- if (len_written > 0) {
- neo_pci_posting_flush(ch->ch_bd);
- ch->ch_flags &= ~(CH_TX_FIFO_EMPTY | CH_TX_FIFO_LWM);
- }
-
-exit_unlock:
- spin_unlock_irqrestore(&ch->ch_lock, flags);
-}
-
-
-static void neo_parse_modem(struct channel_t *ch, unsigned char signals)
-{
- unsigned char msignals = signals;
-
- if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
- return;
-
- /*
- * Do altpin switching. Altpin switches DCD and DSR.
- * This prolly breaks DSRPACE, so we should be more clever here.
- */
- if (ch->ch_digi.digi_flags & DIGI_ALTPIN) {
- unsigned char mswap = msignals;
-
- if (mswap & UART_MSR_DDCD) {
- msignals &= ~UART_MSR_DDCD;
- msignals |= UART_MSR_DDSR;
- }
- if (mswap & UART_MSR_DDSR) {
- msignals &= ~UART_MSR_DDSR;
- msignals |= UART_MSR_DDCD;
- }
- if (mswap & UART_MSR_DCD) {
- msignals &= ~UART_MSR_DCD;
- msignals |= UART_MSR_DSR;
- }
- if (mswap & UART_MSR_DSR) {
- msignals &= ~UART_MSR_DSR;
- msignals |= UART_MSR_DCD;
- }
- }
-
- /* Scrub off lower bits. They signify delta's, which I don't care about */
- msignals &= 0xf0;
-
- if (msignals & UART_MSR_DCD)
- ch->ch_mistat |= UART_MSR_DCD;
- else
- ch->ch_mistat &= ~UART_MSR_DCD;
-
- if (msignals & UART_MSR_DSR)
- ch->ch_mistat |= UART_MSR_DSR;
- else
- ch->ch_mistat &= ~UART_MSR_DSR;
-
- if (msignals & UART_MSR_RI)
- ch->ch_mistat |= UART_MSR_RI;
- else
- ch->ch_mistat &= ~UART_MSR_RI;
-
- if (msignals & UART_MSR_CTS)
- ch->ch_mistat |= UART_MSR_CTS;
- else
- ch->ch_mistat &= ~UART_MSR_CTS;
-}
-
-
-/* Make the UART raise any of the output signals we want up */
-static void neo_assert_modem_signals(struct channel_t *ch)
-{
- unsigned char out;
-
- if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
- return;
-
- out = ch->ch_mostat;
-
- if (ch->ch_flags & CH_LOOPBACK)
- out |= UART_MCR_LOOP;
-
- writeb(out, &ch->ch_neo_uart->mcr);
- neo_pci_posting_flush(ch->ch_bd);
-
- /* Give time for the UART to actually raise/drop the signals */
- udelay(10);
-}
-
-
-static void neo_send_start_character(struct channel_t *ch)
-{
- if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
- return;
-
- if (ch->ch_startc != _POSIX_VDISABLE) {
- ch->ch_xon_sends++;
- writeb(ch->ch_startc, &ch->ch_neo_uart->txrx);
- neo_pci_posting_flush(ch->ch_bd);
- udelay(10);
- }
-}
-
-
-static void neo_send_stop_character(struct channel_t *ch)
-{
- if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
- return;
-
- if (ch->ch_stopc != _POSIX_VDISABLE) {
- ch->ch_xoff_sends++;
- writeb(ch->ch_stopc, &ch->ch_neo_uart->txrx);
- neo_pci_posting_flush(ch->ch_bd);
- udelay(10);
- }
-}
-
-
-/*
- * neo_uart_init
- */
-static void neo_uart_init(struct channel_t *ch)
-{
-
- writeb(0, &ch->ch_neo_uart->ier);
- writeb(0, &ch->ch_neo_uart->efr);
- writeb(UART_EFR_ECB, &ch->ch_neo_uart->efr);
-
-
- /* Clear out UART and FIFO */
- readb(&ch->ch_neo_uart->txrx);
- writeb((UART_FCR_ENABLE_FIFO|UART_FCR_CLEAR_RCVR|UART_FCR_CLEAR_XMIT), &ch->ch_neo_uart->isr_fcr);
- readb(&ch->ch_neo_uart->lsr);
- readb(&ch->ch_neo_uart->msr);
-
- ch->ch_flags |= CH_FIFO_ENABLED;
-
- /* Assert any signals we want up */
- writeb(ch->ch_mostat, &ch->ch_neo_uart->mcr);
- neo_pci_posting_flush(ch->ch_bd);
-}
-
-
-/*
- * Make the UART completely turn off.
- */
-static void neo_uart_off(struct channel_t *ch)
-{
- /* Turn off UART enhanced bits */
- writeb(0, &ch->ch_neo_uart->efr);
-
- /* Stop all interrupts from occurring. */
- writeb(0, &ch->ch_neo_uart->ier);
- neo_pci_posting_flush(ch->ch_bd);
-}
-
-
-static uint neo_get_uart_bytes_left(struct channel_t *ch)
-{
- unsigned char left = 0;
- unsigned char lsr = readb(&ch->ch_neo_uart->lsr);
-
- /* We must cache the LSR as some of the bits get reset once read... */
- ch->ch_cached_lsr |= lsr;
-
- /* Determine whether the Transmitter is empty or not */
- if (!(lsr & UART_LSR_TEMT)) {
- if (ch->ch_flags & CH_TX_FIFO_EMPTY)
- tasklet_schedule(&ch->ch_bd->helper_tasklet);
- left = 1;
- } else {
- ch->ch_flags |= (CH_TX_FIFO_EMPTY | CH_TX_FIFO_LWM);
- left = 0;
- }
-
- return left;
-}
-
-
-/* Channel lock MUST be held by the calling function! */
-static void neo_send_break(struct channel_t *ch, int msecs)
-{
- /*
- * If we receive a time of 0, this means turn off the break.
- */
- if (msecs == 0) {
- if (ch->ch_flags & CH_BREAK_SENDING) {
- unsigned char temp = readb(&ch->ch_neo_uart->lcr);
-
- writeb((temp & ~UART_LCR_SBC), &ch->ch_neo_uart->lcr);
- neo_pci_posting_flush(ch->ch_bd);
- ch->ch_flags &= ~(CH_BREAK_SENDING);
- ch->ch_stop_sending_break = 0;
- }
- return;
- }
-
- /*
- * Set the time we should stop sending the break.
- * If we are already sending a break, toss away the existing
- * time to stop, and use this new value instead.
- */
- ch->ch_stop_sending_break = jiffies + dgnc_jiffies_from_ms(msecs);
-
- /* Tell the UART to start sending the break */
- if (!(ch->ch_flags & CH_BREAK_SENDING)) {
- unsigned char temp = readb(&ch->ch_neo_uart->lcr);
-
- writeb((temp | UART_LCR_SBC), &ch->ch_neo_uart->lcr);
- neo_pci_posting_flush(ch->ch_bd);
- ch->ch_flags |= (CH_BREAK_SENDING);
- }
-}
-
-
-/*
- * neo_send_immediate_char.
- *
- * Sends a specific character as soon as possible to the UART,
- * jumping over any bytes that might be in the write queue.
- *
- * The channel lock MUST be held by the calling function.
- */
-static void neo_send_immediate_char(struct channel_t *ch, unsigned char c)
-{
- if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
- return;
-
- writeb(c, &ch->ch_neo_uart->txrx);
- neo_pci_posting_flush(ch->ch_bd);
-}
-
-
-static unsigned int neo_read_eeprom(unsigned char __iomem *base, unsigned int address)
-{
- unsigned int enable;
- unsigned int bits;
- unsigned int databit;
- unsigned int val;
-
- /* enable chip select */
- writeb(NEO_EECS, base + NEO_EEREG);
- /* READ */
- enable = (address | 0x180);
-
- for (bits = 9; bits--; ) {
- databit = (enable & (1 << bits)) ? NEO_EEDI : 0;
- /* Set read address */
- writeb(databit | NEO_EECS, base + NEO_EEREG);
- writeb(databit | NEO_EECS | NEO_EECK, base + NEO_EEREG);
- }
-
- val = 0;
-
- for (bits = 17; bits--; ) {
- /* clock to EEPROM */
- writeb(NEO_EECS, base + NEO_EEREG);
- writeb(NEO_EECS | NEO_EECK, base + NEO_EEREG);
- val <<= 1;
- /* read EEPROM */
- if (readb(base + NEO_EEREG) & NEO_EEDO)
- val |= 1;
- }
-
- /* clock falling edge */
- writeb(NEO_EECS, base + NEO_EEREG);
-
- /* drop chip select */
- writeb(0x00, base + NEO_EEREG);
-
- return val;
-}
-
-
-static void neo_vpd(struct dgnc_board *brd)
-{
- unsigned int i = 0;
- unsigned int a;
-
- if (!brd || brd->magic != DGNC_BOARD_MAGIC)
- return;
-
- if (!brd->re_map_membase)
- return;
-
- /* Store the VPD into our buffer */
- for (i = 0; i < NEO_VPD_IMAGESIZE; i++) {
- a = neo_read_eeprom(brd->re_map_membase, i);
- brd->vpd[i*2] = a & 0xff;
- brd->vpd[(i*2)+1] = (a >> 8) & 0xff;
- }
-
- if (((brd->vpd[0x08] != 0x82) /* long resource name tag */
- && (brd->vpd[0x10] != 0x82)) /* long resource name tag (PCI-66 files)*/
- || (brd->vpd[0x7F] != 0x78)) { /* small resource end tag */
-
- memset(brd->vpd, '\0', NEO_VPD_IMAGESIZE);
- } else {
- /* Search for the serial number */
- for (i = 0; i < NEO_VPD_IMAGEBYTES - 3; i++)
- if (brd->vpd[i] == 'S' && brd->vpd[i + 1] == 'N')
- strncpy(brd->serial_num, &(brd->vpd[i + 3]), 9);
- }
-}
diff --git a/drivers/staging/dgnc/dgnc_neo.h b/drivers/staging/dgnc/dgnc_neo.h
deleted file mode 100644
index c528df5a0e5a..000000000000
--- a/drivers/staging/dgnc/dgnc_neo.h
+++ /dev/null
@@ -1,149 +0,0 @@
-/*
- * Copyright 2003 Digi International (www.digi.com)
- * Scott H Kilau <Scott_Kilau at digi dot com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY, EXPRESS OR IMPLIED; without even the
- * implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
- * PURPOSE. See the GNU General Public License for more details.
- */
-
-#ifndef __DGNC_NEO_H
-#define __DGNC_NEO_H
-
-#include "dgnc_driver.h"
-
-/************************************************************************
- * Per channel/port NEO UART structure *
- ************************************************************************
- * Base Structure Entries Usage Meanings to Host *
- * *
- * W = read write R = read only *
- * U = Unused. *
- ************************************************************************/
-
-struct neo_uart_struct {
- u8 txrx; /* WR RHR/THR - Holding Reg */
- u8 ier; /* WR IER - Interrupt Enable Reg */
- u8 isr_fcr; /* WR ISR/FCR - Interrupt Status Reg/Fifo Control Reg */
- u8 lcr; /* WR LCR - Line Control Reg */
- u8 mcr; /* WR MCR - Modem Control Reg */
- u8 lsr; /* WR LSR - Line Status Reg */
- u8 msr; /* WR MSR - Modem Status Reg */
- u8 spr; /* WR SPR - Scratch Pad Reg */
- u8 fctr; /* WR FCTR - Feature Control Reg */
- u8 efr; /* WR EFR - Enhanced Function Reg */
- u8 tfifo; /* WR TXCNT/TXTRG - Transmit FIFO Reg */
- u8 rfifo; /* WR RXCNT/RXTRG - Receive FIFO Reg */
- u8 xoffchar1; /* WR XOFF 1 - XOff Character 1 Reg */
- u8 xoffchar2; /* WR XOFF 2 - XOff Character 2 Reg */
- u8 xonchar1; /* WR XON 1 - Xon Character 1 Reg */
- u8 xonchar2; /* WR XON 2 - XOn Character 2 Reg */
-
- u8 reserved1[0x2ff - 0x200]; /* U Reserved by Exar */
- u8 txrxburst[64]; /* RW 64 bytes of RX/TX FIFO Data */
- u8 reserved2[0x37f - 0x340]; /* U Reserved by Exar */
- u8 rxburst_with_errors[64]; /* R 64 bytes of RX FIFO Data + LSR */
-};
-
-/* Where to read the extended interrupt register (32bits instead of 8bits) */
-#define UART_17158_POLL_ADDR_OFFSET 0x80
-
-/* These are the current dvid's of the Neo boards */
-#define UART_XR17C158_DVID 0x20
-#define UART_XR17D158_DVID 0x20
-#define UART_XR17E158_DVID 0x40
-
-#define NEO_EECK 0x10 /* Clock */
-#define NEO_EECS 0x20 /* Chip Select */
-#define NEO_EEDI 0x40 /* Data In is an Output Pin */
-#define NEO_EEDO 0x80 /* Data Out is an Input Pin */
-#define NEO_EEREG 0x8E /* offset to EEPROM control reg */
-
-
-#define NEO_VPD_IMAGESIZE 0x40 /* size of image to read from EEPROM in words */
-#define NEO_VPD_IMAGEBYTES (NEO_VPD_IMAGESIZE * 2)
-
-/*
- * These are the redefinitions for the FCTR on the XR17C158, since
- * Exar made them different than their earlier design. (XR16C854)
- */
-
-/* These are only applicable when table D is selected */
-#define UART_17158_FCTR_RTS_NODELAY 0x00
-#define UART_17158_FCTR_RTS_4DELAY 0x01
-#define UART_17158_FCTR_RTS_6DELAY 0x02
-#define UART_17158_FCTR_RTS_8DELAY 0x03
-#define UART_17158_FCTR_RTS_12DELAY 0x12
-#define UART_17158_FCTR_RTS_16DELAY 0x05
-#define UART_17158_FCTR_RTS_20DELAY 0x13
-#define UART_17158_FCTR_RTS_24DELAY 0x06
-#define UART_17158_FCTR_RTS_28DELAY 0x14
-#define UART_17158_FCTR_RTS_32DELAY 0x07
-#define UART_17158_FCTR_RTS_36DELAY 0x16
-#define UART_17158_FCTR_RTS_40DELAY 0x08
-#define UART_17158_FCTR_RTS_44DELAY 0x09
-#define UART_17158_FCTR_RTS_48DELAY 0x10
-#define UART_17158_FCTR_RTS_52DELAY 0x11
-
-#define UART_17158_FCTR_RTS_IRDA 0x10
-#define UART_17158_FCTR_RS485 0x20
-#define UART_17158_FCTR_TRGA 0x00
-#define UART_17158_FCTR_TRGB 0x40
-#define UART_17158_FCTR_TRGC 0x80
-#define UART_17158_FCTR_TRGD 0xC0
-
-/* 17158 trigger table selects.. */
-#define UART_17158_FCTR_BIT6 0x40
-#define UART_17158_FCTR_BIT7 0x80
-
-/* 17158 TX/RX memmapped buffer offsets */
-#define UART_17158_RX_FIFOSIZE 64
-#define UART_17158_TX_FIFOSIZE 64
-
-/* 17158 Extended IIR's */
-#define UART_17158_IIR_RDI_TIMEOUT 0x0C /* Receiver data TIMEOUT */
-#define UART_17158_IIR_XONXOFF 0x10 /* Received an XON/XOFF char */
-#define UART_17158_IIR_HWFLOW_STATE_CHANGE 0x20 /* CTS/DSR or RTS/DTR state change */
-#define UART_17158_IIR_FIFO_ENABLED 0xC0 /* 16550 FIFOs are Enabled */
-
-/*
- * These are the extended interrupts that get sent
- * back to us from the UART's 32bit interrupt register
- */
-#define UART_17158_RX_LINE_STATUS 0x1 /* RX Ready */
-#define UART_17158_RXRDY_TIMEOUT 0x2 /* RX Ready Timeout */
-#define UART_17158_TXRDY 0x3 /* TX Ready */
-#define UART_17158_MSR 0x4 /* Modem State Change */
-#define UART_17158_TX_AND_FIFO_CLR 0x40 /* Transmitter Holding Reg Empty */
-#define UART_17158_RX_FIFO_DATA_ERROR 0x80 /* UART detected an RX FIFO Data error */
-
-/*
- * These are the EXTENDED definitions for the 17C158's Interrupt
- * Enable Register.
- */
-#define UART_17158_EFR_ECB 0x10 /* Enhanced control bit */
-#define UART_17158_EFR_IXON 0x2 /* Receiver compares Xon1/Xoff1 */
-#define UART_17158_EFR_IXOFF 0x8 /* Transmit Xon1/Xoff1 */
-#define UART_17158_EFR_RTSDTR 0x40 /* Auto RTS/DTR Flow Control Enable */
-#define UART_17158_EFR_CTSDSR 0x80 /* Auto CTS/DSR Flow COntrol Enable */
-
-#define UART_17158_XOFF_DETECT 0x1 /* Indicates whether chip saw an incoming XOFF char */
-#define UART_17158_XON_DETECT 0x2 /* Indicates whether chip saw an incoming XON char */
-
-#define UART_17158_IER_RSVD1 0x10 /* Reserved by Exar */
-#define UART_17158_IER_XOFF 0x20 /* Xoff Interrupt Enable */
-#define UART_17158_IER_RTSDTR 0x40 /* Output Interrupt Enable */
-#define UART_17158_IER_CTSDSR 0x80 /* Input Interrupt Enable */
-
-/*
- * Our Global Variables
- */
-extern struct board_ops dgnc_neo_ops;
-
-#endif
diff --git a/drivers/staging/dgnc/dgnc_pci.h b/drivers/staging/dgnc/dgnc_pci.h
deleted file mode 100644
index 617d40d1ec19..000000000000
--- a/drivers/staging/dgnc/dgnc_pci.h
+++ /dev/null
@@ -1,69 +0,0 @@
-/*
- * Copyright 2003 Digi International (www.digi.com)
- * Scott H Kilau <Scott_Kilau at digi dot com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY, EXPRESS OR IMPLIED; without even the
- * implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
- * PURPOSE. See the GNU General Public License for more details.
- */
-
-#ifndef __DGNC_PCI_H
-#define __DGNC_PCI_H
-
-#define PCIMAX 32 /* maximum number of PCI boards */
-
-#define DIGI_VID 0x114F
-
-#define PCI_DEVICE_CLASSIC_4_DID 0x0028
-#define PCI_DEVICE_CLASSIC_8_DID 0x0029
-#define PCI_DEVICE_CLASSIC_4_422_DID 0x00D0
-#define PCI_DEVICE_CLASSIC_8_422_DID 0x00D1
-#define PCI_DEVICE_NEO_4_DID 0x00B0
-#define PCI_DEVICE_NEO_8_DID 0x00B1
-#define PCI_DEVICE_NEO_2DB9_DID 0x00C8
-#define PCI_DEVICE_NEO_2DB9PRI_DID 0x00C9
-#define PCI_DEVICE_NEO_2RJ45_DID 0x00CA
-#define PCI_DEVICE_NEO_2RJ45PRI_DID 0x00CB
-#define PCI_DEVICE_NEO_1_422_DID 0x00CC
-#define PCI_DEVICE_NEO_1_422_485_DID 0x00CD
-#define PCI_DEVICE_NEO_2_422_485_DID 0x00CE
-#define PCI_DEVICE_NEO_EXPRESS_8_DID 0x00F0
-#define PCI_DEVICE_NEO_EXPRESS_4_DID 0x00F1
-#define PCI_DEVICE_NEO_EXPRESS_4RJ45_DID 0x00F2
-#define PCI_DEVICE_NEO_EXPRESS_8RJ45_DID 0x00F3
-#define PCI_DEVICE_NEO_EXPRESS_4_IBM_DID 0x00F4
-
-#define PCI_DEVICE_CLASSIC_4_PCI_NAME "ClassicBoard 4 PCI"
-#define PCI_DEVICE_CLASSIC_8_PCI_NAME "ClassicBoard 8 PCI"
-#define PCI_DEVICE_CLASSIC_4_422_PCI_NAME "ClassicBoard 4 422 PCI"
-#define PCI_DEVICE_CLASSIC_8_422_PCI_NAME "ClassicBoard 8 422 PCI"
-#define PCI_DEVICE_NEO_4_PCI_NAME "Neo 4 PCI"
-#define PCI_DEVICE_NEO_8_PCI_NAME "Neo 8 PCI"
-#define PCI_DEVICE_NEO_2DB9_PCI_NAME "Neo 2 - DB9 Universal PCI"
-#define PCI_DEVICE_NEO_2DB9PRI_PCI_NAME "Neo 2 - DB9 Universal PCI - Powered Ring Indicator"
-#define PCI_DEVICE_NEO_2RJ45_PCI_NAME "Neo 2 - RJ45 Universal PCI"
-#define PCI_DEVICE_NEO_2RJ45PRI_PCI_NAME "Neo 2 - RJ45 Universal PCI - Powered Ring Indicator"
-#define PCI_DEVICE_NEO_1_422_PCI_NAME "Neo 1 422 PCI"
-#define PCI_DEVICE_NEO_1_422_485_PCI_NAME "Neo 1 422/485 PCI"
-#define PCI_DEVICE_NEO_2_422_485_PCI_NAME "Neo 2 422/485 PCI"
-
-#define PCI_DEVICE_NEO_EXPRESS_8_PCI_NAME "Neo 8 PCI Express"
-#define PCI_DEVICE_NEO_EXPRESS_4_PCI_NAME "Neo 4 PCI Express"
-#define PCI_DEVICE_NEO_EXPRESS_4RJ45_PCI_NAME "Neo 4 PCI Express RJ45"
-#define PCI_DEVICE_NEO_EXPRESS_8RJ45_PCI_NAME "Neo 8 PCI Express RJ45"
-#define PCI_DEVICE_NEO_EXPRESS_4_IBM_PCI_NAME "Neo 4 PCI Express IBM"
-
-
-/* Size of Memory and I/O for PCI (4 K) */
-#define PCI_RAM_SIZE 0x1000
-
-/* Size of Memory (2MB) */
-#define PCI_MEM_SIZE 0x1000
-
-#endif
diff --git a/drivers/staging/dgnc/dgnc_sysfs.c b/drivers/staging/dgnc/dgnc_sysfs.c
deleted file mode 100644
index 44db8703eba4..000000000000
--- a/drivers/staging/dgnc/dgnc_sysfs.c
+++ /dev/null
@@ -1,735 +0,0 @@
-/*
- * Copyright 2004 Digi International (www.digi.com)
- * Scott H Kilau <Scott_Kilau at digi dot com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY, EXPRESS OR IMPLIED; without even the
- * implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
- * PURPOSE. See the GNU General Public License for more details.
- */
-
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/ctype.h>
-#include <linux/string.h>
-#include <linux/serial_reg.h>
-#include <linux/device.h>
-#include <linux/pci.h>
-#include <linux/kdev_t.h>
-
-#include "dgnc_driver.h"
-#include "dgnc_mgmt.h"
-
-
-static ssize_t dgnc_driver_version_show(struct device_driver *ddp, char *buf)
-{
- return snprintf(buf, PAGE_SIZE, "%s\n", DG_PART);
-}
-static DRIVER_ATTR(version, S_IRUSR, dgnc_driver_version_show, NULL);
-
-
-static ssize_t dgnc_driver_boards_show(struct device_driver *ddp, char *buf)
-{
- return snprintf(buf, PAGE_SIZE, "%d\n", dgnc_NumBoards);
-}
-static DRIVER_ATTR(boards, S_IRUSR, dgnc_driver_boards_show, NULL);
-
-
-static ssize_t dgnc_driver_maxboards_show(struct device_driver *ddp, char *buf)
-{
- return snprintf(buf, PAGE_SIZE, "%d\n", MAXBOARDS);
-}
-static DRIVER_ATTR(maxboards, S_IRUSR, dgnc_driver_maxboards_show, NULL);
-
-
-static ssize_t dgnc_driver_pollrate_show(struct device_driver *ddp, char *buf)
-{
- return snprintf(buf, PAGE_SIZE, "%dms\n", dgnc_poll_tick);
-}
-
-static ssize_t dgnc_driver_pollrate_store(struct device_driver *ddp,
- const char *buf, size_t count)
-{
- int ret;
-
- ret = sscanf(buf, "%d\n", &dgnc_poll_tick);
- if (ret != 1)
- return -EINVAL;
- return count;
-}
-static DRIVER_ATTR(pollrate, (S_IRUSR | S_IWUSR), dgnc_driver_pollrate_show,
- dgnc_driver_pollrate_store);
-
-
-void dgnc_create_driver_sysfiles(struct pci_driver *dgnc_driver)
-{
- int rc = 0;
- struct device_driver *driverfs = &dgnc_driver->driver;
-
- rc |= driver_create_file(driverfs, &driver_attr_version);
- rc |= driver_create_file(driverfs, &driver_attr_boards);
- rc |= driver_create_file(driverfs, &driver_attr_maxboards);
- rc |= driver_create_file(driverfs, &driver_attr_pollrate);
- if (rc)
- pr_err("DGNC: sysfs driver_create_file failed!\n");
-}
-
-
-void dgnc_remove_driver_sysfiles(struct pci_driver *dgnc_driver)
-{
- struct device_driver *driverfs = &dgnc_driver->driver;
-
- driver_remove_file(driverfs, &driver_attr_version);
- driver_remove_file(driverfs, &driver_attr_boards);
- driver_remove_file(driverfs, &driver_attr_maxboards);
- driver_remove_file(driverfs, &driver_attr_pollrate);
-}
-
-
-#define DGNC_VERIFY_BOARD(p, bd) \
- do { \
- if (!p) \
- return 0; \
- \
- bd = dev_get_drvdata(p); \
- if (!bd || bd->magic != DGNC_BOARD_MAGIC) \
- return 0; \
- if (bd->state != BOARD_READY) \
- return 0; \
- } while (0)
-
-
-
-static ssize_t dgnc_vpd_show(struct device *p, struct device_attribute *attr,
- char *buf)
-{
- struct dgnc_board *bd;
- int count = 0;
- int i = 0;
-
- DGNC_VERIFY_BOARD(p, bd);
-
- count += sprintf(buf + count,
- "\n 0 1 2 3 4 5 6 7 8 9 A B C D E F");
- for (i = 0; i < 0x40 * 2; i++) {
- if (!(i % 16))
- count += sprintf(buf + count, "\n%04X ", i * 2);
- count += sprintf(buf + count, "%02X ", bd->vpd[i]);
- }
- count += sprintf(buf + count, "\n");
-
- return count;
-}
-static DEVICE_ATTR(vpd, S_IRUSR, dgnc_vpd_show, NULL);
-
-static ssize_t dgnc_serial_number_show(struct device *p,
- struct device_attribute *attr, char *buf)
-{
- struct dgnc_board *bd;
- int count = 0;
-
- DGNC_VERIFY_BOARD(p, bd);
-
- if (bd->serial_num[0] == '\0')
- count += sprintf(buf + count, "<UNKNOWN>\n");
- else
- count += sprintf(buf + count, "%s\n", bd->serial_num);
-
- return count;
-}
-static DEVICE_ATTR(serial_number, S_IRUSR, dgnc_serial_number_show, NULL);
-
-
-static ssize_t dgnc_ports_state_show(struct device *p,
- struct device_attribute *attr, char *buf)
-{
- struct dgnc_board *bd;
- int count = 0;
- int i = 0;
-
- DGNC_VERIFY_BOARD(p, bd);
-
- for (i = 0; i < bd->nasync; i++) {
- count += snprintf(buf + count, PAGE_SIZE - count,
- "%d %s\n", bd->channels[i]->ch_portnum,
- bd->channels[i]->ch_open_count ? "Open" : "Closed");
- }
- return count;
-}
-static DEVICE_ATTR(ports_state, S_IRUSR, dgnc_ports_state_show, NULL);
-
-
-static ssize_t dgnc_ports_baud_show(struct device *p,
- struct device_attribute *attr, char *buf)
-{
- struct dgnc_board *bd;
- int count = 0;
- int i = 0;
-
- DGNC_VERIFY_BOARD(p, bd);
-
- for (i = 0; i < bd->nasync; i++) {
- count += snprintf(buf + count, PAGE_SIZE - count,
- "%d %d\n", bd->channels[i]->ch_portnum,
- bd->channels[i]->ch_old_baud);
- }
- return count;
-}
-static DEVICE_ATTR(ports_baud, S_IRUSR, dgnc_ports_baud_show, NULL);
-
-
-static ssize_t dgnc_ports_msignals_show(struct device *p,
- struct device_attribute *attr,
- char *buf)
-{
- struct dgnc_board *bd;
- int count = 0;
- int i = 0;
-
- DGNC_VERIFY_BOARD(p, bd);
-
- for (i = 0; i < bd->nasync; i++) {
- if (bd->channels[i]->ch_open_count) {
- count += snprintf(buf + count, PAGE_SIZE - count,
- "%d %s %s %s %s %s %s\n",
- bd->channels[i]->ch_portnum,
- (bd->channels[i]->ch_mostat & UART_MCR_RTS) ? "RTS" : "",
- (bd->channels[i]->ch_mistat & UART_MSR_CTS) ? "CTS" : "",
- (bd->channels[i]->ch_mostat & UART_MCR_DTR) ? "DTR" : "",
- (bd->channels[i]->ch_mistat & UART_MSR_DSR) ? "DSR" : "",
- (bd->channels[i]->ch_mistat & UART_MSR_DCD) ? "DCD" : "",
- (bd->channels[i]->ch_mistat & UART_MSR_RI) ? "RI" : "");
- } else {
- count += snprintf(buf + count, PAGE_SIZE - count,
- "%d\n", bd->channels[i]->ch_portnum);
- }
- }
- return count;
-}
-static DEVICE_ATTR(ports_msignals, S_IRUSR, dgnc_ports_msignals_show, NULL);
-
-
-static ssize_t dgnc_ports_iflag_show(struct device *p,
- struct device_attribute *attr, char *buf)
-{
- struct dgnc_board *bd;
- int count = 0;
- int i = 0;
-
- DGNC_VERIFY_BOARD(p, bd);
-
- for (i = 0; i < bd->nasync; i++) {
- count += snprintf(buf + count, PAGE_SIZE - count, "%d %x\n",
- bd->channels[i]->ch_portnum,
- bd->channels[i]->ch_c_iflag);
- }
- return count;
-}
-static DEVICE_ATTR(ports_iflag, S_IRUSR, dgnc_ports_iflag_show, NULL);
-
-
-static ssize_t dgnc_ports_cflag_show(struct device *p,
- struct device_attribute *attr, char *buf)
-{
- struct dgnc_board *bd;
- int count = 0;
- int i = 0;
-
- DGNC_VERIFY_BOARD(p, bd);
-
- for (i = 0; i < bd->nasync; i++) {
- count += snprintf(buf + count, PAGE_SIZE - count, "%d %x\n",
- bd->channels[i]->ch_portnum,
- bd->channels[i]->ch_c_cflag);
- }
- return count;
-}
-static DEVICE_ATTR(ports_cflag, S_IRUSR, dgnc_ports_cflag_show, NULL);
-
-
-static ssize_t dgnc_ports_oflag_show(struct device *p,
- struct device_attribute *attr, char *buf)
-{
- struct dgnc_board *bd;
- int count = 0;
- int i = 0;
-
- DGNC_VERIFY_BOARD(p, bd);
-
- for (i = 0; i < bd->nasync; i++) {
- count += snprintf(buf + count, PAGE_SIZE - count, "%d %x\n",
- bd->channels[i]->ch_portnum,
- bd->channels[i]->ch_c_oflag);
- }
- return count;
-}
-static DEVICE_ATTR(ports_oflag, S_IRUSR, dgnc_ports_oflag_show, NULL);
-
-
-static ssize_t dgnc_ports_lflag_show(struct device *p,
- struct device_attribute *attr, char *buf)
-{
- struct dgnc_board *bd;
- int count = 0;
- int i = 0;
-
- DGNC_VERIFY_BOARD(p, bd);
-
- for (i = 0; i < bd->nasync; i++) {
- count += snprintf(buf + count, PAGE_SIZE - count, "%d %x\n",
- bd->channels[i]->ch_portnum,
- bd->channels[i]->ch_c_lflag);
- }
- return count;
-}
-static DEVICE_ATTR(ports_lflag, S_IRUSR, dgnc_ports_lflag_show, NULL);
-
-
-static ssize_t dgnc_ports_digi_flag_show(struct device *p,
- struct device_attribute *attr,
- char *buf)
-{
- struct dgnc_board *bd;
- int count = 0;
- int i = 0;
-
- DGNC_VERIFY_BOARD(p, bd);
-
- for (i = 0; i < bd->nasync; i++) {
- count += snprintf(buf + count, PAGE_SIZE - count, "%d %x\n",
- bd->channels[i]->ch_portnum,
- bd->channels[i]->ch_digi.digi_flags);
- }
- return count;
-}
-static DEVICE_ATTR(ports_digi_flag, S_IRUSR, dgnc_ports_digi_flag_show, NULL);
-
-
-static ssize_t dgnc_ports_rxcount_show(struct device *p,
- struct device_attribute *attr, char *buf)
-{
- struct dgnc_board *bd;
- int count = 0;
- int i = 0;
-
- DGNC_VERIFY_BOARD(p, bd);
-
- for (i = 0; i < bd->nasync; i++) {
- count += snprintf(buf + count, PAGE_SIZE - count, "%d %ld\n",
- bd->channels[i]->ch_portnum,
- bd->channels[i]->ch_rxcount);
- }
- return count;
-}
-static DEVICE_ATTR(ports_rxcount, S_IRUSR, dgnc_ports_rxcount_show, NULL);
-
-
-static ssize_t dgnc_ports_txcount_show(struct device *p,
- struct device_attribute *attr, char *buf)
-{
- struct dgnc_board *bd;
- int count = 0;
- int i = 0;
-
- DGNC_VERIFY_BOARD(p, bd);
-
- for (i = 0; i < bd->nasync; i++) {
- count += snprintf(buf + count, PAGE_SIZE - count, "%d %ld\n",
- bd->channels[i]->ch_portnum,
- bd->channels[i]->ch_txcount);
- }
- return count;
-}
-static DEVICE_ATTR(ports_txcount, S_IRUSR, dgnc_ports_txcount_show, NULL);
-
-
-/* this function creates the sys files that will export each signal status
- * to sysfs each value will be put in a separate filename
- */
-void dgnc_create_ports_sysfiles(struct dgnc_board *bd)
-{
- int rc = 0;
-
- dev_set_drvdata(&bd->pdev->dev, bd);
- rc |= device_create_file(&(bd->pdev->dev), &dev_attr_ports_state);
- rc |= device_create_file(&(bd->pdev->dev), &dev_attr_ports_baud);
- rc |= device_create_file(&(bd->pdev->dev), &dev_attr_ports_msignals);
- rc |= device_create_file(&(bd->pdev->dev), &dev_attr_ports_iflag);
- rc |= device_create_file(&(bd->pdev->dev), &dev_attr_ports_cflag);
- rc |= device_create_file(&(bd->pdev->dev), &dev_attr_ports_oflag);
- rc |= device_create_file(&(bd->pdev->dev), &dev_attr_ports_lflag);
- rc |= device_create_file(&(bd->pdev->dev), &dev_attr_ports_digi_flag);
- rc |= device_create_file(&(bd->pdev->dev), &dev_attr_ports_rxcount);
- rc |= device_create_file(&(bd->pdev->dev), &dev_attr_ports_txcount);
- rc |= device_create_file(&(bd->pdev->dev), &dev_attr_vpd);
- rc |= device_create_file(&(bd->pdev->dev), &dev_attr_serial_number);
- if (rc)
- dev_err(&bd->pdev->dev, "dgnc: sysfs device_create_file failed!\n");
-}
-
-
-/* removes all the sys files created for that port */
-void dgnc_remove_ports_sysfiles(struct dgnc_board *bd)
-{
- device_remove_file(&(bd->pdev->dev), &dev_attr_ports_state);
- device_remove_file(&(bd->pdev->dev), &dev_attr_ports_baud);
- device_remove_file(&(bd->pdev->dev), &dev_attr_ports_msignals);
- device_remove_file(&(bd->pdev->dev), &dev_attr_ports_iflag);
- device_remove_file(&(bd->pdev->dev), &dev_attr_ports_cflag);
- device_remove_file(&(bd->pdev->dev), &dev_attr_ports_oflag);
- device_remove_file(&(bd->pdev->dev), &dev_attr_ports_lflag);
- device_remove_file(&(bd->pdev->dev), &dev_attr_ports_digi_flag);
- device_remove_file(&(bd->pdev->dev), &dev_attr_ports_rxcount);
- device_remove_file(&(bd->pdev->dev), &dev_attr_ports_txcount);
- device_remove_file(&(bd->pdev->dev), &dev_attr_vpd);
- device_remove_file(&(bd->pdev->dev), &dev_attr_serial_number);
-}
-
-
-static ssize_t dgnc_tty_state_show(struct device *d,
- struct device_attribute *attr, char *buf)
-{
- struct dgnc_board *bd;
- struct channel_t *ch;
- struct un_t *un;
-
- if (!d)
- return 0;
- un = dev_get_drvdata(d);
- if (!un || un->magic != DGNC_UNIT_MAGIC)
- return 0;
- ch = un->un_ch;
- if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
- return 0;
- bd = ch->ch_bd;
- if (!bd || bd->magic != DGNC_BOARD_MAGIC)
- return 0;
- if (bd->state != BOARD_READY)
- return 0;
-
- return snprintf(buf, PAGE_SIZE, "%s",
- un->un_open_count ? "Open" : "Closed");
-}
-static DEVICE_ATTR(state, S_IRUSR, dgnc_tty_state_show, NULL);
-
-
-static ssize_t dgnc_tty_baud_show(struct device *d,
- struct device_attribute *attr, char *buf)
-{
- struct dgnc_board *bd;
- struct channel_t *ch;
- struct un_t *un;
-
- if (!d)
- return 0;
- un = dev_get_drvdata(d);
- if (!un || un->magic != DGNC_UNIT_MAGIC)
- return 0;
- ch = un->un_ch;
- if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
- return 0;
- bd = ch->ch_bd;
- if (!bd || bd->magic != DGNC_BOARD_MAGIC)
- return 0;
- if (bd->state != BOARD_READY)
- return 0;
-
- return snprintf(buf, PAGE_SIZE, "%d\n", ch->ch_old_baud);
-}
-static DEVICE_ATTR(baud, S_IRUSR, dgnc_tty_baud_show, NULL);
-
-
-static ssize_t dgnc_tty_msignals_show(struct device *d,
- struct device_attribute *attr, char *buf)
-{
- struct dgnc_board *bd;
- struct channel_t *ch;
- struct un_t *un;
-
- if (!d)
- return 0;
- un = dev_get_drvdata(d);
- if (!un || un->magic != DGNC_UNIT_MAGIC)
- return 0;
- ch = un->un_ch;
- if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
- return 0;
- bd = ch->ch_bd;
- if (!bd || bd->magic != DGNC_BOARD_MAGIC)
- return 0;
- if (bd->state != BOARD_READY)
- return 0;
-
- if (ch->ch_open_count) {
- return snprintf(buf, PAGE_SIZE, "%s %s %s %s %s %s\n",
- (ch->ch_mostat & UART_MCR_RTS) ? "RTS" : "",
- (ch->ch_mistat & UART_MSR_CTS) ? "CTS" : "",
- (ch->ch_mostat & UART_MCR_DTR) ? "DTR" : "",
- (ch->ch_mistat & UART_MSR_DSR) ? "DSR" : "",
- (ch->ch_mistat & UART_MSR_DCD) ? "DCD" : "",
- (ch->ch_mistat & UART_MSR_RI) ? "RI" : "");
- }
- return 0;
-}
-static DEVICE_ATTR(msignals, S_IRUSR, dgnc_tty_msignals_show, NULL);
-
-
-static ssize_t dgnc_tty_iflag_show(struct device *d,
- struct device_attribute *attr, char *buf)
-{
- struct dgnc_board *bd;
- struct channel_t *ch;
- struct un_t *un;
-
- if (!d)
- return 0;
- un = dev_get_drvdata(d);
- if (!un || un->magic != DGNC_UNIT_MAGIC)
- return 0;
- ch = un->un_ch;
- if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
- return 0;
- bd = ch->ch_bd;
- if (!bd || bd->magic != DGNC_BOARD_MAGIC)
- return 0;
- if (bd->state != BOARD_READY)
- return 0;
-
- return snprintf(buf, PAGE_SIZE, "%x\n", ch->ch_c_iflag);
-}
-static DEVICE_ATTR(iflag, S_IRUSR, dgnc_tty_iflag_show, NULL);
-
-
-static ssize_t dgnc_tty_cflag_show(struct device *d,
- struct device_attribute *attr, char *buf)
-{
- struct dgnc_board *bd;
- struct channel_t *ch;
- struct un_t *un;
-
- if (!d)
- return 0;
- un = dev_get_drvdata(d);
- if (!un || un->magic != DGNC_UNIT_MAGIC)
- return 0;
- ch = un->un_ch;
- if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
- return 0;
- bd = ch->ch_bd;
- if (!bd || bd->magic != DGNC_BOARD_MAGIC)
- return 0;
- if (bd->state != BOARD_READY)
- return 0;
-
- return snprintf(buf, PAGE_SIZE, "%x\n", ch->ch_c_cflag);
-}
-static DEVICE_ATTR(cflag, S_IRUSR, dgnc_tty_cflag_show, NULL);
-
-
-static ssize_t dgnc_tty_oflag_show(struct device *d,
- struct device_attribute *attr, char *buf)
-{
- struct dgnc_board *bd;
- struct channel_t *ch;
- struct un_t *un;
-
- if (!d)
- return 0;
- un = dev_get_drvdata(d);
- if (!un || un->magic != DGNC_UNIT_MAGIC)
- return 0;
- ch = un->un_ch;
- if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
- return 0;
- bd = ch->ch_bd;
- if (!bd || bd->magic != DGNC_BOARD_MAGIC)
- return 0;
- if (bd->state != BOARD_READY)
- return 0;
-
- return snprintf(buf, PAGE_SIZE, "%x\n", ch->ch_c_oflag);
-}
-static DEVICE_ATTR(oflag, S_IRUSR, dgnc_tty_oflag_show, NULL);
-
-
-static ssize_t dgnc_tty_lflag_show(struct device *d,
- struct device_attribute *attr, char *buf)
-{
- struct dgnc_board *bd;
- struct channel_t *ch;
- struct un_t *un;
-
- if (!d)
- return 0;
- un = dev_get_drvdata(d);
- if (!un || un->magic != DGNC_UNIT_MAGIC)
- return 0;
- ch = un->un_ch;
- if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
- return 0;
- bd = ch->ch_bd;
- if (!bd || bd->magic != DGNC_BOARD_MAGIC)
- return 0;
- if (bd->state != BOARD_READY)
- return 0;
-
- return snprintf(buf, PAGE_SIZE, "%x\n", ch->ch_c_lflag);
-}
-static DEVICE_ATTR(lflag, S_IRUSR, dgnc_tty_lflag_show, NULL);
-
-
-static ssize_t dgnc_tty_digi_flag_show(struct device *d,
- struct device_attribute *attr, char *buf)
-{
- struct dgnc_board *bd;
- struct channel_t *ch;
- struct un_t *un;
-
- if (!d)
- return 0;
- un = dev_get_drvdata(d);
- if (!un || un->magic != DGNC_UNIT_MAGIC)
- return 0;
- ch = un->un_ch;
- if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
- return 0;
- bd = ch->ch_bd;
- if (!bd || bd->magic != DGNC_BOARD_MAGIC)
- return 0;
- if (bd->state != BOARD_READY)
- return 0;
-
- return snprintf(buf, PAGE_SIZE, "%x\n", ch->ch_digi.digi_flags);
-}
-static DEVICE_ATTR(digi_flag, S_IRUSR, dgnc_tty_digi_flag_show, NULL);
-
-
-static ssize_t dgnc_tty_rxcount_show(struct device *d,
- struct device_attribute *attr, char *buf)
-{
- struct dgnc_board *bd;
- struct channel_t *ch;
- struct un_t *un;
-
- if (!d)
- return 0;
- un = dev_get_drvdata(d);
- if (!un || un->magic != DGNC_UNIT_MAGIC)
- return 0;
- ch = un->un_ch;
- if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
- return 0;
- bd = ch->ch_bd;
- if (!bd || bd->magic != DGNC_BOARD_MAGIC)
- return 0;
- if (bd->state != BOARD_READY)
- return 0;
-
- return snprintf(buf, PAGE_SIZE, "%ld\n", ch->ch_rxcount);
-}
-static DEVICE_ATTR(rxcount, S_IRUSR, dgnc_tty_rxcount_show, NULL);
-
-
-static ssize_t dgnc_tty_txcount_show(struct device *d,
- struct device_attribute *attr, char *buf)
-{
- struct dgnc_board *bd;
- struct channel_t *ch;
- struct un_t *un;
-
- if (!d)
- return 0;
- un = dev_get_drvdata(d);
- if (!un || un->magic != DGNC_UNIT_MAGIC)
- return 0;
- ch = un->un_ch;
- if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
- return 0;
- bd = ch->ch_bd;
- if (!bd || bd->magic != DGNC_BOARD_MAGIC)
- return 0;
- if (bd->state != BOARD_READY)
- return 0;
-
- return snprintf(buf, PAGE_SIZE, "%ld\n", ch->ch_txcount);
-}
-static DEVICE_ATTR(txcount, S_IRUSR, dgnc_tty_txcount_show, NULL);
-
-
-static ssize_t dgnc_tty_name_show(struct device *d,
- struct device_attribute *attr, char *buf)
-{
- struct dgnc_board *bd;
- struct channel_t *ch;
- struct un_t *un;
-
- if (!d)
- return 0;
- un = dev_get_drvdata(d);
- if (!un || un->magic != DGNC_UNIT_MAGIC)
- return 0;
- ch = un->un_ch;
- if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
- return 0;
- bd = ch->ch_bd;
- if (!bd || bd->magic != DGNC_BOARD_MAGIC)
- return 0;
- if (bd->state != BOARD_READY)
- return 0;
-
- return snprintf(buf, PAGE_SIZE, "%sn%d%c\n",
- (un->un_type == DGNC_PRINT) ? "pr" : "tty",
- bd->boardnum + 1, 'a' + ch->ch_portnum);
-}
-static DEVICE_ATTR(custom_name, S_IRUSR, dgnc_tty_name_show, NULL);
-
-
-static struct attribute *dgnc_sysfs_tty_entries[] = {
- &dev_attr_state.attr,
- &dev_attr_baud.attr,
- &dev_attr_msignals.attr,
- &dev_attr_iflag.attr,
- &dev_attr_cflag.attr,
- &dev_attr_oflag.attr,
- &dev_attr_lflag.attr,
- &dev_attr_digi_flag.attr,
- &dev_attr_rxcount.attr,
- &dev_attr_txcount.attr,
- &dev_attr_custom_name.attr,
- NULL
-};
-
-
-static struct attribute_group dgnc_tty_attribute_group = {
- .name = NULL,
- .attrs = dgnc_sysfs_tty_entries,
-};
-
-
-void dgnc_create_tty_sysfs(struct un_t *un, struct device *c)
-{
- int ret;
-
- ret = sysfs_create_group(&c->kobj, &dgnc_tty_attribute_group);
- if (ret) {
- dev_err(c, "dgnc: failed to create sysfs tty device attributes.\n");
- sysfs_remove_group(&c->kobj, &dgnc_tty_attribute_group);
- return;
- }
-
- dev_set_drvdata(c, un);
-
-}
-
-
-void dgnc_remove_tty_sysfs(struct device *c)
-{
- sysfs_remove_group(&c->kobj, &dgnc_tty_attribute_group);
-}
-
diff --git a/drivers/staging/dgnc/dgnc_sysfs.h b/drivers/staging/dgnc/dgnc_sysfs.h
deleted file mode 100644
index 7be7d55bc49e..000000000000
--- a/drivers/staging/dgnc/dgnc_sysfs.h
+++ /dev/null
@@ -1,40 +0,0 @@
-/*
- * Copyright 2003 Digi International (www.digi.com)
- * Scott H Kilau <Scott_Kilau at digi dot com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY, EXPRESS OR IMPLIED; without even the
- * implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
- * PURPOSE. See the GNU General Public License for more details.
- */
-
-#ifndef __DGNC_SYSFS_H
-#define __DGNC_SYSFS_H
-
-#include <linux/device.h>
-#include "dgnc_driver.h"
-
-struct dgnc_board;
-struct channel_t;
-struct un_t;
-struct pci_driver;
-struct class_device;
-
-void dgnc_create_ports_sysfiles(struct dgnc_board *bd);
-void dgnc_remove_ports_sysfiles(struct dgnc_board *bd);
-
-void dgnc_create_driver_sysfiles(struct pci_driver *);
-void dgnc_remove_driver_sysfiles(struct pci_driver *);
-
-int dgnc_tty_class_init(void);
-int dgnc_tty_class_destroy(void);
-
-void dgnc_create_tty_sysfs(struct un_t *un, struct device *c);
-void dgnc_remove_tty_sysfs(struct device *c);
-
-#endif
diff --git a/drivers/staging/dgnc/dgnc_tty.c b/drivers/staging/dgnc/dgnc_tty.c
deleted file mode 100644
index fbfe79a70263..000000000000
--- a/drivers/staging/dgnc/dgnc_tty.c
+++ /dev/null
@@ -1,2999 +0,0 @@
-/*
- * Copyright 2003 Digi International (www.digi.com)
- * Scott H Kilau <Scott_Kilau at digi dot com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY, EXPRESS OR IMPLIED; without even the
- * implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
- * PURPOSE. See the GNU General Public License for more details.
- */
-
-/************************************************************************
- *
- * This file implements the tty driver functionality for the
- * Neo and ClassicBoard PCI based product lines.
- *
- ************************************************************************
- *
- */
-
-#include <linux/kernel.h>
-#include <linux/sched.h> /* For jiffies, task states */
-#include <linux/interrupt.h> /* For tasklet and interrupt structs/defines */
-#include <linux/module.h>
-#include <linux/ctype.h>
-#include <linux/tty.h>
-#include <linux/tty_flip.h>
-#include <linux/types.h>
-#include <linux/serial_reg.h>
-#include <linux/slab.h>
-#include <linux/delay.h> /* For udelay */
-#include <linux/uaccess.h> /* For copy_from_user/copy_to_user */
-#include <linux/pci.h>
-#include "dgnc_driver.h"
-#include "dgnc_tty.h"
-#include "dgnc_neo.h"
-#include "dgnc_cls.h"
-#include "dgnc_sysfs.h"
-#include "dgnc_utils.h"
-
-/*
- * internal variables
- */
-static struct dgnc_board *dgnc_BoardsByMajor[256];
-static unsigned char *dgnc_TmpWriteBuf;
-
-/*
- * Default transparent print information.
- */
-static struct digi_t dgnc_digi_init = {
- .digi_flags = DIGI_COOK, /* Flags */
- .digi_maxcps = 100, /* Max CPS */
- .digi_maxchar = 50, /* Max chars in print queue */
- .digi_bufsize = 100, /* Printer buffer size */
- .digi_onlen = 4, /* size of printer on string */
- .digi_offlen = 4, /* size of printer off string */
- .digi_onstr = "\033[5i", /* ANSI printer on string ] */
- .digi_offstr = "\033[4i", /* ANSI printer off string ] */
- .digi_term = "ansi" /* default terminal type */
-};
-
-
-/*
- * Define a local default termios struct. All ports will be created
- * with this termios initially.
- *
- * This defines a raw port at 9600 baud, 8 data bits, no parity,
- * 1 stop bit.
- */
-static struct ktermios DgncDefaultTermios = {
- .c_iflag = (DEFAULT_IFLAGS), /* iflags */
- .c_oflag = (DEFAULT_OFLAGS), /* oflags */
- .c_cflag = (DEFAULT_CFLAGS), /* cflags */
- .c_lflag = (DEFAULT_LFLAGS), /* lflags */
- .c_cc = INIT_C_CC,
- .c_line = 0,
-};
-
-
-/* Our function prototypes */
-static int dgnc_tty_open(struct tty_struct *tty, struct file *file);
-static void dgnc_tty_close(struct tty_struct *tty, struct file *file);
-static int dgnc_block_til_ready(struct tty_struct *tty, struct file *file, struct channel_t *ch);
-static int dgnc_tty_ioctl(struct tty_struct *tty, unsigned int cmd, unsigned long arg);
-static int dgnc_tty_digigeta(struct tty_struct *tty, struct digi_t __user *retinfo);
-static int dgnc_tty_digiseta(struct tty_struct *tty, struct digi_t __user *new_info);
-static int dgnc_tty_write_room(struct tty_struct *tty);
-static int dgnc_tty_put_char(struct tty_struct *tty, unsigned char c);
-static int dgnc_tty_chars_in_buffer(struct tty_struct *tty);
-static void dgnc_tty_start(struct tty_struct *tty);
-static void dgnc_tty_stop(struct tty_struct *tty);
-static void dgnc_tty_throttle(struct tty_struct *tty);
-static void dgnc_tty_unthrottle(struct tty_struct *tty);
-static void dgnc_tty_flush_chars(struct tty_struct *tty);
-static void dgnc_tty_flush_buffer(struct tty_struct *tty);
-static void dgnc_tty_hangup(struct tty_struct *tty);
-static int dgnc_set_modem_info(struct tty_struct *tty, unsigned int command, unsigned int __user *value);
-static int dgnc_get_modem_info(struct channel_t *ch, unsigned int __user *value);
-static int dgnc_tty_tiocmget(struct tty_struct *tty);
-static int dgnc_tty_tiocmset(struct tty_struct *tty, unsigned int set, unsigned int clear);
-static int dgnc_tty_send_break(struct tty_struct *tty, int msec);
-static void dgnc_tty_wait_until_sent(struct tty_struct *tty, int timeout);
-static int dgnc_tty_write(struct tty_struct *tty, const unsigned char *buf, int count);
-static void dgnc_tty_set_termios(struct tty_struct *tty, struct ktermios *old_termios);
-static void dgnc_tty_send_xchar(struct tty_struct *tty, char ch);
-
-
-static const struct tty_operations dgnc_tty_ops = {
- .open = dgnc_tty_open,
- .close = dgnc_tty_close,
- .write = dgnc_tty_write,
- .write_room = dgnc_tty_write_room,
- .flush_buffer = dgnc_tty_flush_buffer,
- .chars_in_buffer = dgnc_tty_chars_in_buffer,
- .flush_chars = dgnc_tty_flush_chars,
- .ioctl = dgnc_tty_ioctl,
- .set_termios = dgnc_tty_set_termios,
- .stop = dgnc_tty_stop,
- .start = dgnc_tty_start,
- .throttle = dgnc_tty_throttle,
- .unthrottle = dgnc_tty_unthrottle,
- .hangup = dgnc_tty_hangup,
- .put_char = dgnc_tty_put_char,
- .tiocmget = dgnc_tty_tiocmget,
- .tiocmset = dgnc_tty_tiocmset,
- .break_ctl = dgnc_tty_send_break,
- .wait_until_sent = dgnc_tty_wait_until_sent,
- .send_xchar = dgnc_tty_send_xchar
-};
-
-/************************************************************************
- *
- * TTY Initialization/Cleanup Functions
- *
- ************************************************************************/
-
-/*
- * dgnc_tty_preinit()
- *
- * Initialize any global tty related data before we download any boards.
- */
-int dgnc_tty_preinit(void)
-{
- /*
- * Allocate a buffer for doing the copy from user space to
- * kernel space in dgnc_write(). We only use one buffer and
- * control access to it with a semaphore. If we are paging, we
- * are already in trouble so one buffer won't hurt much anyway.
- *
- * We are okay to sleep in the malloc, as this routine
- * is only called during module load, (not in interrupt context),
- * and with no locks held.
- */
- dgnc_TmpWriteBuf = kmalloc(WRITEBUFLEN, GFP_KERNEL);
-
- if (!dgnc_TmpWriteBuf)
- return -ENOMEM;
-
- return 0;
-}
-
-
-/*
- * dgnc_tty_register()
- *
- * Init the tty subsystem for this board.
- */
-int dgnc_tty_register(struct dgnc_board *brd)
-{
- int rc = 0;
-
- brd->SerialDriver.magic = TTY_DRIVER_MAGIC;
-
- snprintf(brd->SerialName, MAXTTYNAMELEN, "tty_dgnc_%d_", brd->boardnum);
-
- brd->SerialDriver.name = brd->SerialName;
- brd->SerialDriver.name_base = 0;
- brd->SerialDriver.major = 0;
- brd->SerialDriver.minor_start = 0;
- brd->SerialDriver.num = brd->maxports;
- brd->SerialDriver.type = TTY_DRIVER_TYPE_SERIAL;
- brd->SerialDriver.subtype = SERIAL_TYPE_NORMAL;
- brd->SerialDriver.init_termios = DgncDefaultTermios;
- brd->SerialDriver.driver_name = DRVSTR;
- brd->SerialDriver.flags = (TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV | TTY_DRIVER_HARDWARE_BREAK);
-
- /*
- * The kernel wants space to store pointers to
- * tty_struct's and termios's.
- */
- brd->SerialDriver.ttys = kcalloc(brd->maxports, sizeof(*brd->SerialDriver.ttys), GFP_KERNEL);
- if (!brd->SerialDriver.ttys)
- return -ENOMEM;
-
- kref_init(&brd->SerialDriver.kref);
- brd->SerialDriver.termios = kcalloc(brd->maxports, sizeof(*brd->SerialDriver.termios), GFP_KERNEL);
- if (!brd->SerialDriver.termios)
- return -ENOMEM;
-
- /*
- * Entry points for driver. Called by the kernel from
- * tty_io.c and n_tty.c.
- */
- tty_set_operations(&brd->SerialDriver, &dgnc_tty_ops);
-
- if (!brd->dgnc_Major_Serial_Registered) {
- /* Register tty devices */
- rc = tty_register_driver(&brd->SerialDriver);
- if (rc < 0) {
- dev_dbg(&brd->pdev->dev,
- "Can't register tty device (%d)\n", rc);
- return rc;
- }
- brd->dgnc_Major_Serial_Registered = true;
- }
-
- /*
- * If we're doing transparent print, we have to do all of the above
- * again, separately so we don't get the LD confused about what major
- * we are when we get into the dgnc_tty_open() routine.
- */
- brd->PrintDriver.magic = TTY_DRIVER_MAGIC;
- snprintf(brd->PrintName, MAXTTYNAMELEN, "pr_dgnc_%d_", brd->boardnum);
-
- brd->PrintDriver.name = brd->PrintName;
- brd->PrintDriver.name_base = 0;
- brd->PrintDriver.major = brd->SerialDriver.major;
- brd->PrintDriver.minor_start = 0x80;
- brd->PrintDriver.num = brd->maxports;
- brd->PrintDriver.type = TTY_DRIVER_TYPE_SERIAL;
- brd->PrintDriver.subtype = SERIAL_TYPE_NORMAL;
- brd->PrintDriver.init_termios = DgncDefaultTermios;
- brd->PrintDriver.driver_name = DRVSTR;
- brd->PrintDriver.flags = (TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV | TTY_DRIVER_HARDWARE_BREAK);
-
- /*
- * The kernel wants space to store pointers to
- * tty_struct's and termios's. Must be separated from
- * the Serial Driver so we don't get confused
- */
- brd->PrintDriver.ttys = kcalloc(brd->maxports, sizeof(*brd->PrintDriver.ttys), GFP_KERNEL);
- if (!brd->PrintDriver.ttys)
- return -ENOMEM;
- kref_init(&brd->PrintDriver.kref);
- brd->PrintDriver.termios = kcalloc(brd->maxports, sizeof(*brd->PrintDriver.termios), GFP_KERNEL);
- if (!brd->PrintDriver.termios)
- return -ENOMEM;
-
- /*
- * Entry points for driver. Called by the kernel from
- * tty_io.c and n_tty.c.
- */
- tty_set_operations(&brd->PrintDriver, &dgnc_tty_ops);
-
- if (!brd->dgnc_Major_TransparentPrint_Registered) {
- /* Register Transparent Print devices */
- rc = tty_register_driver(&brd->PrintDriver);
- if (rc < 0) {
- dev_dbg(&brd->pdev->dev,
- "Can't register Transparent Print device(%d)\n",
- rc);
- return rc;
- }
- brd->dgnc_Major_TransparentPrint_Registered = true;
- }
-
- dgnc_BoardsByMajor[brd->SerialDriver.major] = brd;
- brd->dgnc_Serial_Major = brd->SerialDriver.major;
- brd->dgnc_TransparentPrint_Major = brd->PrintDriver.major;
-
- return rc;
-}
-
-
-/*
- * dgnc_tty_init()
- *
- * Init the tty subsystem. Called once per board after board has been
- * downloaded and init'ed.
- */
-int dgnc_tty_init(struct dgnc_board *brd)
-{
- int i;
- void __iomem *vaddr;
- struct channel_t *ch;
-
- if (!brd)
- return -ENXIO;
-
- /*
- * Initialize board structure elements.
- */
-
- vaddr = brd->re_map_membase;
-
- brd->nasync = brd->maxports;
-
- for (i = 0; i < brd->nasync; i++) {
- /*
- * Okay to malloc with GFP_KERNEL, we are not at
- * interrupt context, and there are no locks held.
- */
- brd->channels[i] = kzalloc(sizeof(*brd->channels[i]),
- GFP_KERNEL);
- if (!brd->channels[i])
- goto err_free_channels;
- }
-
- ch = brd->channels[0];
- vaddr = brd->re_map_membase;
-
- /* Set up channel variables */
- for (i = 0; i < brd->nasync; i++, ch = brd->channels[i]) {
- spin_lock_init(&ch->ch_lock);
-
- /* Store all our magic numbers */
- ch->magic = DGNC_CHANNEL_MAGIC;
- ch->ch_tun.magic = DGNC_UNIT_MAGIC;
- ch->ch_tun.un_ch = ch;
- ch->ch_tun.un_type = DGNC_SERIAL;
- ch->ch_tun.un_dev = i;
-
- ch->ch_pun.magic = DGNC_UNIT_MAGIC;
- ch->ch_pun.un_ch = ch;
- ch->ch_pun.un_type = DGNC_PRINT;
- ch->ch_pun.un_dev = i + 128;
-
- if (brd->bd_uart_offset == 0x200)
- ch->ch_neo_uart = vaddr + (brd->bd_uart_offset * i);
- else
- ch->ch_cls_uart = vaddr + (brd->bd_uart_offset * i);
-
- ch->ch_bd = brd;
- ch->ch_portnum = i;
- ch->ch_digi = dgnc_digi_init;
-
- /* .25 second delay */
- ch->ch_close_delay = 250;
-
- init_waitqueue_head(&ch->ch_flags_wait);
- init_waitqueue_head(&ch->ch_tun.un_flags_wait);
- init_waitqueue_head(&ch->ch_pun.un_flags_wait);
-
- {
- struct device *classp;
-
- classp = tty_register_device(&brd->SerialDriver, i,
- &(ch->ch_bd->pdev->dev));
- ch->ch_tun.un_sysfs = classp;
- dgnc_create_tty_sysfs(&ch->ch_tun, classp);
-
- classp = tty_register_device(&brd->PrintDriver, i,
- &(ch->ch_bd->pdev->dev));
- ch->ch_pun.un_sysfs = classp;
- dgnc_create_tty_sysfs(&ch->ch_pun, classp);
- }
-
- }
-
- return 0;
-
-err_free_channels:
- for (i = i - 1; i >= 0; --i) {
- kfree(brd->channels[i]);
- brd->channels[i] = NULL;
- }
- return -ENOMEM;
-}
-
-
-/*
- * dgnc_tty_post_uninit()
- *
- * UnInitialize any global tty related data.
- */
-void dgnc_tty_post_uninit(void)
-{
- kfree(dgnc_TmpWriteBuf);
- dgnc_TmpWriteBuf = NULL;
-}
-
-
-/*
- * dgnc_tty_uninit()
- *
- * Uninitialize the TTY portion of this driver. Free all memory and
- * resources.
- */
-void dgnc_tty_uninit(struct dgnc_board *brd)
-{
- int i = 0;
-
- if (brd->dgnc_Major_Serial_Registered) {
- dgnc_BoardsByMajor[brd->SerialDriver.major] = NULL;
- brd->dgnc_Serial_Major = 0;
- for (i = 0; i < brd->nasync; i++) {
- if (brd->channels[i])
- dgnc_remove_tty_sysfs(brd->channels[i]->
- ch_tun.un_sysfs);
- tty_unregister_device(&brd->SerialDriver, i);
- }
- tty_unregister_driver(&brd->SerialDriver);
- brd->dgnc_Major_Serial_Registered = false;
- }
-
- if (brd->dgnc_Major_TransparentPrint_Registered) {
- dgnc_BoardsByMajor[brd->PrintDriver.major] = NULL;
- brd->dgnc_TransparentPrint_Major = 0;
- for (i = 0; i < brd->nasync; i++) {
- if (brd->channels[i])
- dgnc_remove_tty_sysfs(brd->channels[i]->
- ch_pun.un_sysfs);
- tty_unregister_device(&brd->PrintDriver, i);
- }
- tty_unregister_driver(&brd->PrintDriver);
- brd->dgnc_Major_TransparentPrint_Registered = false;
- }
-
- kfree(brd->SerialDriver.ttys);
- brd->SerialDriver.ttys = NULL;
- kfree(brd->SerialDriver.termios);
- brd->SerialDriver.termios = NULL;
- kfree(brd->PrintDriver.ttys);
- brd->PrintDriver.ttys = NULL;
- kfree(brd->PrintDriver.termios);
- brd->PrintDriver.termios = NULL;
-}
-
-/*=======================================================================
- *
- * dgnc_wmove - Write data to transmit queue.
- *
- * ch - Pointer to channel structure.
- * buf - Poiter to characters to be moved.
- * n - Number of characters to move.
- *
- *=======================================================================*/
-static void dgnc_wmove(struct channel_t *ch, char *buf, uint n)
-{
- int remain;
- uint head;
-
- if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
- return;
-
- head = ch->ch_w_head & WQUEUEMASK;
-
- /*
- * If the write wraps over the top of the circular buffer,
- * move the portion up to the wrap point, and reset the
- * pointers to the bottom.
- */
- remain = WQUEUESIZE - head;
-
- if (n >= remain) {
- n -= remain;
- memcpy(ch->ch_wqueue + head, buf, remain);
- head = 0;
- buf += remain;
- }
-
- if (n > 0) {
- /*
- * Move rest of data.
- */
- remain = n;
- memcpy(ch->ch_wqueue + head, buf, remain);
- head += remain;
- }
-
- head &= WQUEUEMASK;
- ch->ch_w_head = head;
-}
-
-
-
-
-/*=======================================================================
- *
- * dgnc_input - Process received data.
- *
- * ch - Pointer to channel structure.
- *
- *=======================================================================*/
-void dgnc_input(struct channel_t *ch)
-{
- struct dgnc_board *bd;
- struct tty_struct *tp;
- struct tty_ldisc *ld = NULL;
- uint rmask;
- ushort head;
- ushort tail;
- int data_len;
- unsigned long flags;
- int flip_len;
- int len = 0;
- int n = 0;
- int s = 0;
- int i = 0;
-
- if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
- return;
-
- tp = ch->ch_tun.un_tty;
-
- bd = ch->ch_bd;
- if (!bd || bd->magic != DGNC_BOARD_MAGIC)
- return;
-
- spin_lock_irqsave(&ch->ch_lock, flags);
-
- /*
- * Figure the number of characters in the buffer.
- * Exit immediately if none.
- */
- rmask = RQUEUEMASK;
- head = ch->ch_r_head & rmask;
- tail = ch->ch_r_tail & rmask;
- data_len = (head - tail) & rmask;
-
- if (data_len == 0)
- goto exit_unlock;
-
- /*
- * If the device is not open, or CREAD is off,
- * flush input data and return immediately.
- */
- if (!tp || (tp->magic != TTY_MAGIC) || !(ch->ch_tun.un_flags & UN_ISOPEN) ||
- !(tp->termios.c_cflag & CREAD) || (ch->ch_tun.un_flags & UN_CLOSING)) {
-
- ch->ch_r_head = tail;
-
- /* Force queue flow control to be released, if needed */
- dgnc_check_queue_flow_control(ch);
-
- goto exit_unlock;
- }
-
- /*
- * If we are throttled, simply don't read any data.
- */
- if (ch->ch_flags & CH_FORCED_STOPI)
- goto exit_unlock;
-
- flip_len = TTY_FLIPBUF_SIZE;
-
- /* Chop down the length, if needed */
- len = min(data_len, flip_len);
- len = min(len, (N_TTY_BUF_SIZE - 1));
-
- ld = tty_ldisc_ref(tp);
-
- /*
- * If we were unable to get a reference to the ld,
- * don't flush our buffer, and act like the ld doesn't
- * have any space to put the data right now.
- */
- if (!ld) {
- len = 0;
- } else {
- /*
- * If ld doesn't have a pointer to a receive_buf function,
- * flush the data, then act like the ld doesn't have any
- * space to put the data right now.
- */
- if (!ld->ops->receive_buf) {
- ch->ch_r_head = ch->ch_r_tail;
- len = 0;
- }
- }
-
- if (len <= 0)
- goto exit_unlock;
-
- /*
- * The tty layer in the kernel has changed in 2.6.16+.
- *
- * The flip buffers in the tty structure are no longer exposed,
- * and probably will be going away eventually.
- *
- * If we are completely raw, we don't need to go through a lot
- * of the tty layers that exist.
- * In this case, we take the shortest and fastest route we
- * can to relay the data to the user.
- *
- * On the other hand, if we are not raw, we need to go through
- * the new 2.6.16+ tty layer, which has its API more well defined.
- */
- len = tty_buffer_request_room(tp->port, len);
- n = len;
-
- /*
- * n now contains the most amount of data we can copy,
- * bounded either by how much the Linux tty layer can handle,
- * or the amount of data the card actually has pending...
- */
- while (n) {
- s = ((head >= tail) ? head : RQUEUESIZE) - tail;
- s = min(s, n);
-
- if (s <= 0)
- break;
-
- /*
- * If conditions are such that ld needs to see all
- * UART errors, we will have to walk each character
- * and error byte and send them to the buffer one at
- * a time.
- */
- if (I_PARMRK(tp) || I_BRKINT(tp) || I_INPCK(tp)) {
- for (i = 0; i < s; i++) {
- if (*(ch->ch_equeue + tail + i) & UART_LSR_BI)
- tty_insert_flip_char(tp->port, *(ch->ch_rqueue + tail + i), TTY_BREAK);
- else if (*(ch->ch_equeue + tail + i) & UART_LSR_PE)
- tty_insert_flip_char(tp->port, *(ch->ch_rqueue + tail + i), TTY_PARITY);
- else if (*(ch->ch_equeue + tail + i) & UART_LSR_FE)
- tty_insert_flip_char(tp->port, *(ch->ch_rqueue + tail + i), TTY_FRAME);
- else
- tty_insert_flip_char(tp->port, *(ch->ch_rqueue + tail + i), TTY_NORMAL);
- }
- } else {
- tty_insert_flip_string(tp->port, ch->ch_rqueue + tail, s);
- }
-
- tail += s;
- n -= s;
- /* Flip queue if needed */
- tail &= rmask;
- }
-
- ch->ch_r_tail = tail & rmask;
- ch->ch_e_tail = tail & rmask;
- dgnc_check_queue_flow_control(ch);
- spin_unlock_irqrestore(&ch->ch_lock, flags);
-
- /* Tell the tty layer its okay to "eat" the data now */
- tty_flip_buffer_push(tp->port);
-
- if (ld)
- tty_ldisc_deref(ld);
- return;
-
-exit_unlock:
- spin_unlock_irqrestore(&ch->ch_lock, flags);
- if (ld)
- tty_ldisc_deref(ld);
-}
-
-
-/************************************************************************
- * Determines when CARRIER changes state and takes appropriate
- * action.
- ************************************************************************/
-void dgnc_carrier(struct channel_t *ch)
-{
- struct dgnc_board *bd;
-
- int virt_carrier = 0;
- int phys_carrier = 0;
-
- if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
- return;
-
- bd = ch->ch_bd;
-
- if (!bd || bd->magic != DGNC_BOARD_MAGIC)
- return;
-
- if (ch->ch_mistat & UART_MSR_DCD)
- phys_carrier = 1;
-
- if (ch->ch_digi.digi_flags & DIGI_FORCEDCD)
- virt_carrier = 1;
-
- if (ch->ch_c_cflag & CLOCAL)
- virt_carrier = 1;
-
- /*
- * Test for a VIRTUAL carrier transition to HIGH.
- */
- if (((ch->ch_flags & CH_FCAR) == 0) && (virt_carrier == 1)) {
-
- /*
- * When carrier rises, wake any threads waiting
- * for carrier in the open routine.
- */
-
- if (waitqueue_active(&(ch->ch_flags_wait)))
- wake_up_interruptible(&ch->ch_flags_wait);
- }
-
- /*
- * Test for a PHYSICAL carrier transition to HIGH.
- */
- if (((ch->ch_flags & CH_CD) == 0) && (phys_carrier == 1)) {
-
- /*
- * When carrier rises, wake any threads waiting
- * for carrier in the open routine.
- */
-
- if (waitqueue_active(&(ch->ch_flags_wait)))
- wake_up_interruptible(&ch->ch_flags_wait);
- }
-
- /*
- * Test for a PHYSICAL transition to low, so long as we aren't
- * currently ignoring physical transitions (which is what "virtual
- * carrier" indicates).
- *
- * The transition of the virtual carrier to low really doesn't
- * matter... it really only means "ignore carrier state", not
- * "make pretend that carrier is there".
- */
- if ((virt_carrier == 0) && ((ch->ch_flags & CH_CD) != 0) &&
- (phys_carrier == 0)) {
-
- /*
- * When carrier drops:
- *
- * Drop carrier on all open units.
- *
- * Flush queues, waking up any task waiting in the
- * line discipline.
- *
- * Send a hangup to the control terminal.
- *
- * Enable all select calls.
- */
- if (waitqueue_active(&(ch->ch_flags_wait)))
- wake_up_interruptible(&ch->ch_flags_wait);
-
- if (ch->ch_tun.un_open_count > 0)
- tty_hangup(ch->ch_tun.un_tty);
-
- if (ch->ch_pun.un_open_count > 0)
- tty_hangup(ch->ch_pun.un_tty);
- }
-
- /*
- * Make sure that our cached values reflect the current reality.
- */
- if (virt_carrier == 1)
- ch->ch_flags |= CH_FCAR;
- else
- ch->ch_flags &= ~CH_FCAR;
-
- if (phys_carrier == 1)
- ch->ch_flags |= CH_CD;
- else
- ch->ch_flags &= ~CH_CD;
-}
-
-/*
- * Assign the custom baud rate to the channel structure
- */
-static void dgnc_set_custom_speed(struct channel_t *ch, uint newrate)
-{
- int testdiv;
- int testrate_high;
- int testrate_low;
- int deltahigh;
- int deltalow;
-
- if (newrate <= 0) {
- ch->ch_custom_speed = 0;
- return;
- }
-
- /*
- * Since the divisor is stored in a 16-bit integer, we make sure
- * we don't allow any rates smaller than a 16-bit integer would allow.
- * And of course, rates above the dividend won't fly.
- */
- if (newrate && newrate < ((ch->ch_bd->bd_dividend / 0xFFFF) + 1))
- newrate = ((ch->ch_bd->bd_dividend / 0xFFFF) + 1);
-
- if (newrate && newrate > ch->ch_bd->bd_dividend)
- newrate = ch->ch_bd->bd_dividend;
-
- if (newrate > 0) {
- testdiv = ch->ch_bd->bd_dividend / newrate;
-
- /*
- * If we try to figure out what rate the board would use
- * with the test divisor, it will be either equal or higher
- * than the requested baud rate. If we then determine the
- * rate with a divisor one higher, we will get the next lower
- * supported rate below the requested.
- */
- testrate_high = ch->ch_bd->bd_dividend / testdiv;
- testrate_low = ch->ch_bd->bd_dividend / (testdiv + 1);
-
- /*
- * If the rate for the requested divisor is correct, just
- * use it and be done.
- */
- if (testrate_high != newrate) {
- /*
- * Otherwise, pick the rate that is closer (i.e. whichever rate
- * has a smaller delta).
- */
- deltahigh = testrate_high - newrate;
- deltalow = newrate - testrate_low;
-
- if (deltahigh < deltalow)
- newrate = testrate_high;
- else
- newrate = testrate_low;
- }
- }
-
- ch->ch_custom_speed = newrate;
-}
-
-
-void dgnc_check_queue_flow_control(struct channel_t *ch)
-{
- int qleft = 0;
-
- /* Store how much space we have left in the queue */
- qleft = ch->ch_r_tail - ch->ch_r_head - 1;
- if (qleft < 0)
- qleft += RQUEUEMASK + 1;
-
- /*
- * Check to see if we should enforce flow control on our queue because
- * the ld (or user) isn't reading data out of our queue fast enuf.
- *
- * NOTE: This is done based on what the current flow control of the
- * port is set for.
- *
- * 1) HWFLOW (RTS) - Turn off the UART's Receive interrupt.
- * This will cause the UART's FIFO to back up, and force
- * the RTS signal to be dropped.
- * 2) SWFLOW (IXOFF) - Keep trying to send a stop character to
- * the other side, in hopes it will stop sending data to us.
- * 3) NONE - Nothing we can do. We will simply drop any extra data
- * that gets sent into us when the queue fills up.
- */
- if (qleft < 256) {
- /* HWFLOW */
- if (ch->ch_digi.digi_flags & CTSPACE || ch->ch_c_cflag & CRTSCTS) {
- if (!(ch->ch_flags & CH_RECEIVER_OFF)) {
- ch->ch_bd->bd_ops->disable_receiver(ch);
- ch->ch_flags |= (CH_RECEIVER_OFF);
- }
- }
- /* SWFLOW */
- else if (ch->ch_c_iflag & IXOFF) {
- if (ch->ch_stops_sent <= MAX_STOPS_SENT) {
- ch->ch_bd->bd_ops->send_stop_character(ch);
- ch->ch_stops_sent++;
- }
- }
- }
-
- /*
- * Check to see if we should unenforce flow control because
- * ld (or user) finally read enuf data out of our queue.
- *
- * NOTE: This is done based on what the current flow control of the
- * port is set for.
- *
- * 1) HWFLOW (RTS) - Turn back on the UART's Receive interrupt.
- * This will cause the UART's FIFO to raise RTS back up,
- * which will allow the other side to start sending data again.
- * 2) SWFLOW (IXOFF) - Send a start character to
- * the other side, so it will start sending data to us again.
- * 3) NONE - Do nothing. Since we didn't do anything to turn off the
- * other side, we don't need to do anything now.
- */
- if (qleft > (RQUEUESIZE / 2)) {
- /* HWFLOW */
- if (ch->ch_digi.digi_flags & RTSPACE || ch->ch_c_cflag & CRTSCTS) {
- if (ch->ch_flags & CH_RECEIVER_OFF) {
- ch->ch_bd->bd_ops->enable_receiver(ch);
- ch->ch_flags &= ~(CH_RECEIVER_OFF);
- }
- }
- /* SWFLOW */
- else if (ch->ch_c_iflag & IXOFF && ch->ch_stops_sent) {
- ch->ch_stops_sent = 0;
- ch->ch_bd->bd_ops->send_start_character(ch);
- }
- }
-}
-
-
-void dgnc_wakeup_writes(struct channel_t *ch)
-{
- int qlen = 0;
- unsigned long flags;
-
- if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
- return;
-
- spin_lock_irqsave(&ch->ch_lock, flags);
-
- /*
- * If channel now has space, wake up anyone waiting on the condition.
- */
- qlen = ch->ch_w_head - ch->ch_w_tail;
- if (qlen < 0)
- qlen += WQUEUESIZE;
-
- if (qlen >= (WQUEUESIZE - 256)) {
- spin_unlock_irqrestore(&ch->ch_lock, flags);
- return;
- }
-
- if (ch->ch_tun.un_flags & UN_ISOPEN) {
- if ((ch->ch_tun.un_tty->flags & (1 << TTY_DO_WRITE_WAKEUP)) &&
- ch->ch_tun.un_tty->ldisc->ops->write_wakeup) {
- spin_unlock_irqrestore(&ch->ch_lock, flags);
- (ch->ch_tun.un_tty->ldisc->ops->write_wakeup)(ch->ch_tun.un_tty);
- spin_lock_irqsave(&ch->ch_lock, flags);
- }
-
- wake_up_interruptible(&ch->ch_tun.un_tty->write_wait);
-
- /*
- * If unit is set to wait until empty, check to make sure
- * the queue AND FIFO are both empty.
- */
- if (ch->ch_tun.un_flags & UN_EMPTY) {
- if ((qlen == 0) && (ch->ch_bd->bd_ops->get_uart_bytes_left(ch) == 0)) {
- ch->ch_tun.un_flags &= ~(UN_EMPTY);
-
- /*
- * If RTS Toggle mode is on, whenever
- * the queue and UART is empty, keep RTS low.
- */
- if (ch->ch_digi.digi_flags & DIGI_RTS_TOGGLE) {
- ch->ch_mostat &= ~(UART_MCR_RTS);
- ch->ch_bd->bd_ops->assert_modem_signals(ch);
- }
-
- /*
- * If DTR Toggle mode is on, whenever
- * the queue and UART is empty, keep DTR low.
- */
- if (ch->ch_digi.digi_flags & DIGI_DTR_TOGGLE) {
- ch->ch_mostat &= ~(UART_MCR_DTR);
- ch->ch_bd->bd_ops->assert_modem_signals(ch);
- }
- }
- }
-
- wake_up_interruptible(&ch->ch_tun.un_flags_wait);
- }
-
- if (ch->ch_pun.un_flags & UN_ISOPEN) {
- if ((ch->ch_pun.un_tty->flags & (1 << TTY_DO_WRITE_WAKEUP)) &&
- ch->ch_pun.un_tty->ldisc->ops->write_wakeup) {
- spin_unlock_irqrestore(&ch->ch_lock, flags);
- (ch->ch_pun.un_tty->ldisc->ops->write_wakeup)(ch->ch_pun.un_tty);
- spin_lock_irqsave(&ch->ch_lock, flags);
- }
-
- wake_up_interruptible(&ch->ch_pun.un_tty->write_wait);
-
- /*
- * If unit is set to wait until empty, check to make sure
- * the queue AND FIFO are both empty.
- */
- if (ch->ch_pun.un_flags & UN_EMPTY) {
- if ((qlen == 0) && (ch->ch_bd->bd_ops->get_uart_bytes_left(ch) == 0))
- ch->ch_pun.un_flags &= ~(UN_EMPTY);
- }
-
- wake_up_interruptible(&ch->ch_pun.un_flags_wait);
- }
-
- spin_unlock_irqrestore(&ch->ch_lock, flags);
-}
-
-
-
-/************************************************************************
- *
- * TTY Entry points and helper functions
- *
- ************************************************************************/
-
-/*
- * dgnc_tty_open()
- *
- */
-static int dgnc_tty_open(struct tty_struct *tty, struct file *file)
-{
- struct dgnc_board *brd;
- struct channel_t *ch;
- struct un_t *un;
- uint major = 0;
- uint minor = 0;
- int rc = 0;
- unsigned long flags;
-
- rc = 0;
-
- major = MAJOR(tty_devnum(tty));
- minor = MINOR(tty_devnum(tty));
-
- if (major > 255)
- return -ENXIO;
-
- /* Get board pointer from our array of majors we have allocated */
- brd = dgnc_BoardsByMajor[major];
- if (!brd)
- return -ENXIO;
-
- /*
- * If board is not yet up to a state of READY, go to
- * sleep waiting for it to happen or they cancel the open.
- */
- rc = wait_event_interruptible(brd->state_wait,
- (brd->state & BOARD_READY));
-
- if (rc)
- return rc;
-
- spin_lock_irqsave(&brd->bd_lock, flags);
-
- /* If opened device is greater than our number of ports, bail. */
- if (PORT_NUM(minor) >= brd->nasync) {
- spin_unlock_irqrestore(&brd->bd_lock, flags);
- return -ENXIO;
- }
-
- ch = brd->channels[PORT_NUM(minor)];
- if (!ch) {
- spin_unlock_irqrestore(&brd->bd_lock, flags);
- return -ENXIO;
- }
-
- /* Drop board lock */
- spin_unlock_irqrestore(&brd->bd_lock, flags);
-
- /* Grab channel lock */
- spin_lock_irqsave(&ch->ch_lock, flags);
-
- /* Figure out our type */
- if (!IS_PRINT(minor)) {
- un = &brd->channels[PORT_NUM(minor)]->ch_tun;
- un->un_type = DGNC_SERIAL;
- } else if (IS_PRINT(minor)) {
- un = &brd->channels[PORT_NUM(minor)]->ch_pun;
- un->un_type = DGNC_PRINT;
- } else {
- spin_unlock_irqrestore(&ch->ch_lock, flags);
- return -ENXIO;
- }
-
- /*
- * If the port is still in a previous open, and in a state
- * where we simply cannot safely keep going, wait until the
- * state clears.
- */
- spin_unlock_irqrestore(&ch->ch_lock, flags);
-
- rc = wait_event_interruptible(ch->ch_flags_wait, ((ch->ch_flags & CH_OPENING) == 0));
-
- /* If ret is non-zero, user ctrl-c'ed us */
- if (rc)
- return -EINTR;
-
- /*
- * If either unit is in the middle of the fragile part of close,
- * we just cannot touch the channel safely.
- * Go to sleep, knowing that when the channel can be
- * touched safely, the close routine will signal the
- * ch_flags_wait to wake us back up.
- */
- rc = wait_event_interruptible(ch->ch_flags_wait,
- (((ch->ch_tun.un_flags | ch->ch_pun.un_flags) & UN_CLOSING) == 0));
-
- /* If ret is non-zero, user ctrl-c'ed us */
- if (rc)
- return -EINTR;
-
- spin_lock_irqsave(&ch->ch_lock, flags);
-
-
- /* Store our unit into driver_data, so we always have it available. */
- tty->driver_data = un;
-
-
- /*
- * Initialize tty's
- */
- if (!(un->un_flags & UN_ISOPEN)) {
- /* Store important variables. */
- un->un_tty = tty;
-
- /* Maybe do something here to the TTY struct as well? */
- }
-
-
- /*
- * Allocate channel buffers for read/write/error.
- * Set flag, so we don't get trounced on.
- */
- ch->ch_flags |= (CH_OPENING);
-
- /* Drop locks, as malloc with GFP_KERNEL can sleep */
- spin_unlock_irqrestore(&ch->ch_lock, flags);
-
- if (!ch->ch_rqueue)
- ch->ch_rqueue = kzalloc(RQUEUESIZE, GFP_KERNEL);
- if (!ch->ch_equeue)
- ch->ch_equeue = kzalloc(EQUEUESIZE, GFP_KERNEL);
- if (!ch->ch_wqueue)
- ch->ch_wqueue = kzalloc(WQUEUESIZE, GFP_KERNEL);
-
- spin_lock_irqsave(&ch->ch_lock, flags);
-
- ch->ch_flags &= ~(CH_OPENING);
- wake_up_interruptible(&ch->ch_flags_wait);
-
- /*
- * Initialize if neither terminal or printer is open.
- */
- if (!((ch->ch_tun.un_flags | ch->ch_pun.un_flags) & UN_ISOPEN)) {
-
- /*
- * Flush input queues.
- */
- ch->ch_r_head = 0;
- ch->ch_r_tail = 0;
- ch->ch_e_head = 0;
- ch->ch_e_tail = 0;
- ch->ch_w_head = 0;
- ch->ch_w_tail = 0;
-
- brd->bd_ops->flush_uart_write(ch);
- brd->bd_ops->flush_uart_read(ch);
-
- ch->ch_flags = 0;
- ch->ch_cached_lsr = 0;
- ch->ch_stop_sending_break = 0;
- ch->ch_stops_sent = 0;
-
- ch->ch_c_cflag = tty->termios.c_cflag;
- ch->ch_c_iflag = tty->termios.c_iflag;
- ch->ch_c_oflag = tty->termios.c_oflag;
- ch->ch_c_lflag = tty->termios.c_lflag;
- ch->ch_startc = tty->termios.c_cc[VSTART];
- ch->ch_stopc = tty->termios.c_cc[VSTOP];
-
- /*
- * Bring up RTS and DTR...
- * Also handle RTS or DTR toggle if set.
- */
- if (!(ch->ch_digi.digi_flags & DIGI_RTS_TOGGLE))
- ch->ch_mostat |= (UART_MCR_RTS);
- if (!(ch->ch_digi.digi_flags & DIGI_DTR_TOGGLE))
- ch->ch_mostat |= (UART_MCR_DTR);
-
- /* Tell UART to init itself */
- brd->bd_ops->uart_init(ch);
- }
-
- /*
- * Run param in case we changed anything
- */
- brd->bd_ops->param(tty);
-
- dgnc_carrier(ch);
-
- /*
- * follow protocol for opening port
- */
-
- spin_unlock_irqrestore(&ch->ch_lock, flags);
-
- rc = dgnc_block_til_ready(tty, file, ch);
-
- /* No going back now, increment our unit and channel counters */
- spin_lock_irqsave(&ch->ch_lock, flags);
- ch->ch_open_count++;
- un->un_open_count++;
- un->un_flags |= (UN_ISOPEN);
- spin_unlock_irqrestore(&ch->ch_lock, flags);
-
- return rc;
-}
-
-
-/*
- * dgnc_block_til_ready()
- *
- * Wait for DCD, if needed.
- */
-static int dgnc_block_til_ready(struct tty_struct *tty, struct file *file, struct channel_t *ch)
-{
- int retval = 0;
- struct un_t *un = NULL;
- unsigned long flags;
- uint old_flags = 0;
- int sleep_on_un_flags = 0;
-
- if (!tty || tty->magic != TTY_MAGIC || !file || !ch || ch->magic != DGNC_CHANNEL_MAGIC)
- return -ENXIO;
-
- un = tty->driver_data;
- if (!un || un->magic != DGNC_UNIT_MAGIC)
- return -ENXIO;
-
- spin_lock_irqsave(&ch->ch_lock, flags);
-
- ch->ch_wopen++;
-
- /* Loop forever */
- while (1) {
-
- sleep_on_un_flags = 0;
-
- /*
- * If board has failed somehow during our sleep, bail with error.
- */
- if (ch->ch_bd->state == BOARD_FAILED) {
- retval = -ENXIO;
- break;
- }
-
- /* If tty was hung up, break out of loop and set error. */
- if (tty_hung_up_p(file)) {
- retval = -EAGAIN;
- break;
- }
-
- /*
- * If either unit is in the middle of the fragile part of close,
- * we just cannot touch the channel safely.
- * Go back to sleep, knowing that when the channel can be
- * touched safely, the close routine will signal the
- * ch_wait_flags to wake us back up.
- */
- if (!((ch->ch_tun.un_flags | ch->ch_pun.un_flags) & UN_CLOSING)) {
-
- /*
- * Our conditions to leave cleanly and happily:
- * 1) NONBLOCKING on the tty is set.
- * 2) CLOCAL is set.
- * 3) DCD (fake or real) is active.
- */
-
- if (file->f_flags & O_NONBLOCK)
- break;
-
- if (tty->flags & (1 << TTY_IO_ERROR)) {
- retval = -EIO;
- break;
- }
-
- if (ch->ch_flags & CH_CD)
- break;
-
- if (ch->ch_flags & CH_FCAR)
- break;
- } else {
- sleep_on_un_flags = 1;
- }
-
- /*
- * If there is a signal pending, the user probably
- * interrupted (ctrl-c) us.
- * Leave loop with error set.
- */
- if (signal_pending(current)) {
- retval = -ERESTARTSYS;
- break;
- }
-
- /*
- * Store the flags before we let go of channel lock
- */
- if (sleep_on_un_flags)
- old_flags = ch->ch_tun.un_flags | ch->ch_pun.un_flags;
- else
- old_flags = ch->ch_flags;
-
- /*
- * Let go of channel lock before calling schedule.
- * Our poller will get any FEP events and wake us up when DCD
- * eventually goes active.
- */
-
- spin_unlock_irqrestore(&ch->ch_lock, flags);
-
- /*
- * Wait for something in the flags to change from the current value.
- */
- if (sleep_on_un_flags)
- retval = wait_event_interruptible(un->un_flags_wait,
- (old_flags != (ch->ch_tun.un_flags | ch->ch_pun.un_flags)));
- else
- retval = wait_event_interruptible(ch->ch_flags_wait,
- (old_flags != ch->ch_flags));
-
- /*
- * We got woken up for some reason.
- * Before looping around, grab our channel lock.
- */
- spin_lock_irqsave(&ch->ch_lock, flags);
- }
-
- ch->ch_wopen--;
-
- spin_unlock_irqrestore(&ch->ch_lock, flags);
-
- if (retval)
- return retval;
-
- return 0;
-}
-
-
-/*
- * dgnc_tty_hangup()
- *
- * Hangup the port. Like a close, but don't wait for output to drain.
- */
-static void dgnc_tty_hangup(struct tty_struct *tty)
-{
- struct un_t *un;
-
- if (!tty || tty->magic != TTY_MAGIC)
- return;
-
- un = tty->driver_data;
- if (!un || un->magic != DGNC_UNIT_MAGIC)
- return;
-
- /* flush the transmit queues */
- dgnc_tty_flush_buffer(tty);
-
-}
-
-
-/*
- * dgnc_tty_close()
- *
- */
-static void dgnc_tty_close(struct tty_struct *tty, struct file *file)
-{
- struct ktermios *ts;
- struct dgnc_board *bd;
- struct channel_t *ch;
- struct un_t *un;
- unsigned long flags;
- int rc = 0;
-
- if (!tty || tty->magic != TTY_MAGIC)
- return;
-
- un = tty->driver_data;
- if (!un || un->magic != DGNC_UNIT_MAGIC)
- return;
-
- ch = un->un_ch;
- if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
- return;
-
- bd = ch->ch_bd;
- if (!bd || bd->magic != DGNC_BOARD_MAGIC)
- return;
-
- ts = &tty->termios;
-
- spin_lock_irqsave(&ch->ch_lock, flags);
-
- /*
- * Determine if this is the last close or not - and if we agree about
- * which type of close it is with the Line Discipline
- */
- if ((tty->count == 1) && (un->un_open_count != 1)) {
- /*
- * Uh, oh. tty->count is 1, which means that the tty
- * structure will be freed. un_open_count should always
- * be one in these conditions. If it's greater than
- * one, we've got real problems, since it means the
- * serial port won't be shutdown.
- */
- dev_dbg(tty->dev,
- "tty->count is 1, un open count is %d\n",
- un->un_open_count);
- un->un_open_count = 1;
- }
-
- if (un->un_open_count)
- un->un_open_count--;
- else
- dev_dbg(tty->dev,
- "bad serial port open count of %d\n",
- un->un_open_count);
-
- ch->ch_open_count--;
-
- if (ch->ch_open_count && un->un_open_count) {
- spin_unlock_irqrestore(&ch->ch_lock, flags);
- return;
- }
-
- /* OK, its the last close on the unit */
- un->un_flags |= UN_CLOSING;
-
- tty->closing = 1;
-
-
- /*
- * Only officially close channel if count is 0 and
- * DIGI_PRINTER bit is not set.
- */
- if ((ch->ch_open_count == 0) && !(ch->ch_digi.digi_flags & DIGI_PRINTER)) {
-
- ch->ch_flags &= ~(CH_STOPI | CH_FORCED_STOPI);
-
- /*
- * turn off print device when closing print device.
- */
- if ((un->un_type == DGNC_PRINT) && (ch->ch_flags & CH_PRON)) {
- dgnc_wmove(ch, ch->ch_digi.digi_offstr,
- (int) ch->ch_digi.digi_offlen);
- ch->ch_flags &= ~CH_PRON;
- }
-
- spin_unlock_irqrestore(&ch->ch_lock, flags);
- /* wait for output to drain */
- /* This will also return if we take an interrupt */
-
- rc = bd->bd_ops->drain(tty, 0);
-
- dgnc_tty_flush_buffer(tty);
- tty_ldisc_flush(tty);
-
- spin_lock_irqsave(&ch->ch_lock, flags);
-
- tty->closing = 0;
-
- /*
- * If we have HUPCL set, lower DTR and RTS
- */
- if (ch->ch_c_cflag & HUPCL) {
-
- /* Drop RTS/DTR */
- ch->ch_mostat &= ~(UART_MCR_DTR | UART_MCR_RTS);
- bd->bd_ops->assert_modem_signals(ch);
-
- /*
- * Go to sleep to ensure RTS/DTR
- * have been dropped for modems to see it.
- */
- if (ch->ch_close_delay) {
- spin_unlock_irqrestore(&ch->ch_lock,
- flags);
- dgnc_ms_sleep(ch->ch_close_delay);
- spin_lock_irqsave(&ch->ch_lock, flags);
- }
- }
-
- ch->ch_old_baud = 0;
-
- /* Turn off UART interrupts for this port */
- ch->ch_bd->bd_ops->uart_off(ch);
- } else {
- /*
- * turn off print device when closing print device.
- */
- if ((un->un_type == DGNC_PRINT) && (ch->ch_flags & CH_PRON)) {
- dgnc_wmove(ch, ch->ch_digi.digi_offstr,
- (int) ch->ch_digi.digi_offlen);
- ch->ch_flags &= ~CH_PRON;
- }
- }
-
- un->un_tty = NULL;
- un->un_flags &= ~(UN_ISOPEN | UN_CLOSING);
-
- wake_up_interruptible(&ch->ch_flags_wait);
- wake_up_interruptible(&un->un_flags_wait);
-
- spin_unlock_irqrestore(&ch->ch_lock, flags);
-}
-
-
-/*
- * dgnc_tty_chars_in_buffer()
- *
- * Return number of characters that have not been transmitted yet.
- *
- * This routine is used by the line discipline to determine if there
- * is data waiting to be transmitted/drained/flushed or not.
- */
-static int dgnc_tty_chars_in_buffer(struct tty_struct *tty)
-{
- struct channel_t *ch = NULL;
- struct un_t *un = NULL;
- ushort thead;
- ushort ttail;
- uint tmask;
- uint chars = 0;
- unsigned long flags;
-
- if (tty == NULL)
- return 0;
-
- un = tty->driver_data;
- if (!un || un->magic != DGNC_UNIT_MAGIC)
- return 0;
-
- ch = un->un_ch;
- if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
- return 0;
-
- spin_lock_irqsave(&ch->ch_lock, flags);
-
- tmask = WQUEUEMASK;
- thead = ch->ch_w_head & tmask;
- ttail = ch->ch_w_tail & tmask;
-
- spin_unlock_irqrestore(&ch->ch_lock, flags);
-
- if (ttail == thead) {
- chars = 0;
- } else {
- if (thead >= ttail)
- chars = thead - ttail;
- else
- chars = thead - ttail + WQUEUESIZE;
- }
-
- return chars;
-}
-
-
-/*
- * dgnc_maxcps_room
- *
- * Reduces bytes_available to the max number of characters
- * that can be sent currently given the maxcps value, and
- * returns the new bytes_available. This only affects printer
- * output.
- */
-static int dgnc_maxcps_room(struct tty_struct *tty, int bytes_available)
-{
- struct channel_t *ch = NULL;
- struct un_t *un = NULL;
-
- if (!tty)
- return bytes_available;
-
- un = tty->driver_data;
- if (!un || un->magic != DGNC_UNIT_MAGIC)
- return bytes_available;
-
- ch = un->un_ch;
- if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
- return bytes_available;
-
- /*
- * If its not the Transparent print device, return
- * the full data amount.
- */
- if (un->un_type != DGNC_PRINT)
- return bytes_available;
-
- if (ch->ch_digi.digi_maxcps > 0 && ch->ch_digi.digi_bufsize > 0) {
- int cps_limit = 0;
- unsigned long current_time = jiffies;
- unsigned long buffer_time = current_time +
- (HZ * ch->ch_digi.digi_bufsize) / ch->ch_digi.digi_maxcps;
-
- if (ch->ch_cpstime < current_time) {
- /* buffer is empty */
- ch->ch_cpstime = current_time; /* reset ch_cpstime */
- cps_limit = ch->ch_digi.digi_bufsize;
- } else if (ch->ch_cpstime < buffer_time) {
- /* still room in the buffer */
- cps_limit = ((buffer_time - ch->ch_cpstime) * ch->ch_digi.digi_maxcps) / HZ;
- } else {
- /* no room in the buffer */
- cps_limit = 0;
- }
-
- bytes_available = min(cps_limit, bytes_available);
- }
-
- return bytes_available;
-}
-
-
-/*
- * dgnc_tty_write_room()
- *
- * Return space available in Tx buffer
- */
-static int dgnc_tty_write_room(struct tty_struct *tty)
-{
- struct channel_t *ch = NULL;
- struct un_t *un = NULL;
- ushort head;
- ushort tail;
- ushort tmask;
- int ret = 0;
- unsigned long flags;
-
- if (tty == NULL || dgnc_TmpWriteBuf == NULL)
- return 0;
-
- un = tty->driver_data;
- if (!un || un->magic != DGNC_UNIT_MAGIC)
- return 0;
-
- ch = un->un_ch;
- if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
- return 0;
-
- spin_lock_irqsave(&ch->ch_lock, flags);
-
- tmask = WQUEUEMASK;
- head = (ch->ch_w_head) & tmask;
- tail = (ch->ch_w_tail) & tmask;
-
- ret = tail - head - 1;
- if (ret < 0)
- ret += WQUEUESIZE;
-
- /* Limit printer to maxcps */
- ret = dgnc_maxcps_room(tty, ret);
-
- /*
- * If we are printer device, leave space for
- * possibly both the on and off strings.
- */
- if (un->un_type == DGNC_PRINT) {
- if (!(ch->ch_flags & CH_PRON))
- ret -= ch->ch_digi.digi_onlen;
- ret -= ch->ch_digi.digi_offlen;
- } else {
- if (ch->ch_flags & CH_PRON)
- ret -= ch->ch_digi.digi_offlen;
- }
-
- if (ret < 0)
- ret = 0;
-
- spin_unlock_irqrestore(&ch->ch_lock, flags);
-
- return ret;
-}
-
-
-/*
- * dgnc_tty_put_char()
- *
- * Put a character into ch->ch_buf
- *
- * - used by the line discipline for OPOST processing
- */
-static int dgnc_tty_put_char(struct tty_struct *tty, unsigned char c)
-{
- /*
- * Simply call tty_write.
- */
- dgnc_tty_write(tty, &c, 1);
- return 1;
-}
-
-
-/*
- * dgnc_tty_write()
- *
- * Take data from the user or kernel and send it out to the FEP.
- * In here exists all the Transparent Print magic as well.
- */
-static int dgnc_tty_write(struct tty_struct *tty,
- const unsigned char *buf, int count)
-{
- struct channel_t *ch = NULL;
- struct un_t *un = NULL;
- int bufcount = 0, n = 0;
- int orig_count = 0;
- unsigned long flags;
- ushort head;
- ushort tail;
- ushort tmask;
- uint remain;
-
- if (tty == NULL || dgnc_TmpWriteBuf == NULL)
- return 0;
-
- un = tty->driver_data;
- if (!un || un->magic != DGNC_UNIT_MAGIC)
- return 0;
-
- ch = un->un_ch;
- if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
- return 0;
-
- if (!count)
- return 0;
-
- /*
- * Store original amount of characters passed in.
- * This helps to figure out if we should ask the FEP
- * to send us an event when it has more space available.
- */
- orig_count = count;
-
- spin_lock_irqsave(&ch->ch_lock, flags);
-
- /* Get our space available for the channel from the board */
- tmask = WQUEUEMASK;
- head = (ch->ch_w_head) & tmask;
- tail = (ch->ch_w_tail) & tmask;
-
- bufcount = tail - head - 1;
- if (bufcount < 0)
- bufcount += WQUEUESIZE;
-
- /*
- * Limit printer output to maxcps overall, with bursts allowed
- * up to bufsize characters.
- */
- bufcount = dgnc_maxcps_room(tty, bufcount);
-
- /*
- * Take minimum of what the user wants to send, and the
- * space available in the FEP buffer.
- */
- count = min(count, bufcount);
-
- /*
- * Bail if no space left.
- */
- if (count <= 0)
- goto exit_retry;
-
- /*
- * Output the printer ON string, if we are in terminal mode, but
- * need to be in printer mode.
- */
- if ((un->un_type == DGNC_PRINT) && !(ch->ch_flags & CH_PRON)) {
- dgnc_wmove(ch, ch->ch_digi.digi_onstr,
- (int) ch->ch_digi.digi_onlen);
- head = (ch->ch_w_head) & tmask;
- ch->ch_flags |= CH_PRON;
- }
-
- /*
- * On the other hand, output the printer OFF string, if we are
- * currently in printer mode, but need to output to the terminal.
- */
- if ((un->un_type != DGNC_PRINT) && (ch->ch_flags & CH_PRON)) {
- dgnc_wmove(ch, ch->ch_digi.digi_offstr,
- (int) ch->ch_digi.digi_offlen);
- head = (ch->ch_w_head) & tmask;
- ch->ch_flags &= ~CH_PRON;
- }
-
- n = count;
-
- /*
- * If the write wraps over the top of the circular buffer,
- * move the portion up to the wrap point, and reset the
- * pointers to the bottom.
- */
- remain = WQUEUESIZE - head;
-
- if (n >= remain) {
- n -= remain;
- memcpy(ch->ch_wqueue + head, buf, remain);
- head = 0;
- buf += remain;
- }
-
- if (n > 0) {
- /*
- * Move rest of data.
- */
- remain = n;
- memcpy(ch->ch_wqueue + head, buf, remain);
- head += remain;
- }
-
- if (count) {
- head &= tmask;
- ch->ch_w_head = head;
- }
-
- /* Update printer buffer empty time. */
- if ((un->un_type == DGNC_PRINT) && (ch->ch_digi.digi_maxcps > 0)
- && (ch->ch_digi.digi_bufsize > 0)) {
- ch->ch_cpstime += (HZ * count) / ch->ch_digi.digi_maxcps;
- }
-
- spin_unlock_irqrestore(&ch->ch_lock, flags);
-
- if (count) {
- /*
- * Channel lock is grabbed and then released
- * inside this routine.
- */
- ch->ch_bd->bd_ops->copy_data_from_queue_to_uart(ch);
- }
-
- return count;
-
-exit_retry:
-
- spin_unlock_irqrestore(&ch->ch_lock, flags);
- return 0;
-}
-
-
-/*
- * Return modem signals to ld.
- */
-
-static int dgnc_tty_tiocmget(struct tty_struct *tty)
-{
- struct channel_t *ch;
- struct un_t *un;
- int result = -EIO;
- unsigned char mstat = 0;
- unsigned long flags;
-
- if (!tty || tty->magic != TTY_MAGIC)
- return result;
-
- un = tty->driver_data;
- if (!un || un->magic != DGNC_UNIT_MAGIC)
- return result;
-
- ch = un->un_ch;
- if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
- return result;
-
- spin_lock_irqsave(&ch->ch_lock, flags);
-
- mstat = (ch->ch_mostat | ch->ch_mistat);
-
- spin_unlock_irqrestore(&ch->ch_lock, flags);
-
- result = 0;
-
- if (mstat & UART_MCR_DTR)
- result |= TIOCM_DTR;
- if (mstat & UART_MCR_RTS)
- result |= TIOCM_RTS;
- if (mstat & UART_MSR_CTS)
- result |= TIOCM_CTS;
- if (mstat & UART_MSR_DSR)
- result |= TIOCM_DSR;
- if (mstat & UART_MSR_RI)
- result |= TIOCM_RI;
- if (mstat & UART_MSR_DCD)
- result |= TIOCM_CD;
-
- return result;
-}
-
-
-/*
- * dgnc_tty_tiocmset()
- *
- * Set modem signals, called by ld.
- */
-
-static int dgnc_tty_tiocmset(struct tty_struct *tty,
- unsigned int set, unsigned int clear)
-{
- struct dgnc_board *bd;
- struct channel_t *ch;
- struct un_t *un;
- int ret = -EIO;
- unsigned long flags;
-
- if (!tty || tty->magic != TTY_MAGIC)
- return ret;
-
- un = tty->driver_data;
- if (!un || un->magic != DGNC_UNIT_MAGIC)
- return ret;
-
- ch = un->un_ch;
- if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
- return ret;
-
- bd = ch->ch_bd;
- if (!bd || bd->magic != DGNC_BOARD_MAGIC)
- return ret;
-
- spin_lock_irqsave(&ch->ch_lock, flags);
-
- if (set & TIOCM_RTS)
- ch->ch_mostat |= UART_MCR_RTS;
-
- if (set & TIOCM_DTR)
- ch->ch_mostat |= UART_MCR_DTR;
-
- if (clear & TIOCM_RTS)
- ch->ch_mostat &= ~(UART_MCR_RTS);
-
- if (clear & TIOCM_DTR)
- ch->ch_mostat &= ~(UART_MCR_DTR);
-
- ch->ch_bd->bd_ops->assert_modem_signals(ch);
-
- spin_unlock_irqrestore(&ch->ch_lock, flags);
-
- return 0;
-}
-
-
-/*
- * dgnc_tty_send_break()
- *
- * Send a Break, called by ld.
- */
-static int dgnc_tty_send_break(struct tty_struct *tty, int msec)
-{
- struct dgnc_board *bd;
- struct channel_t *ch;
- struct un_t *un;
- int ret = -EIO;
- unsigned long flags;
-
- if (!tty || tty->magic != TTY_MAGIC)
- return ret;
-
- un = tty->driver_data;
- if (!un || un->magic != DGNC_UNIT_MAGIC)
- return ret;
-
- ch = un->un_ch;
- if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
- return ret;
-
- bd = ch->ch_bd;
- if (!bd || bd->magic != DGNC_BOARD_MAGIC)
- return ret;
-
- switch (msec) {
- case -1:
- msec = 0xFFFF;
- break;
- case 0:
- msec = 0;
- break;
- default:
- break;
- }
-
- spin_lock_irqsave(&ch->ch_lock, flags);
-
- ch->ch_bd->bd_ops->send_break(ch, msec);
-
- spin_unlock_irqrestore(&ch->ch_lock, flags);
-
- return 0;
-
-}
-
-
-/*
- * dgnc_tty_wait_until_sent()
- *
- * wait until data has been transmitted, called by ld.
- */
-static void dgnc_tty_wait_until_sent(struct tty_struct *tty, int timeout)
-{
- struct dgnc_board *bd;
- struct channel_t *ch;
- struct un_t *un;
- int rc;
-
- if (!tty || tty->magic != TTY_MAGIC)
- return;
-
- un = tty->driver_data;
- if (!un || un->magic != DGNC_UNIT_MAGIC)
- return;
-
- ch = un->un_ch;
- if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
- return;
-
- bd = ch->ch_bd;
- if (!bd || bd->magic != DGNC_BOARD_MAGIC)
- return;
-
- rc = bd->bd_ops->drain(tty, 0);
-}
-
-
-/*
- * dgnc_send_xchar()
- *
- * send a high priority character, called by ld.
- */
-static void dgnc_tty_send_xchar(struct tty_struct *tty, char c)
-{
- struct dgnc_board *bd;
- struct channel_t *ch;
- struct un_t *un;
- unsigned long flags;
-
- if (!tty || tty->magic != TTY_MAGIC)
- return;
-
- un = tty->driver_data;
- if (!un || un->magic != DGNC_UNIT_MAGIC)
- return;
-
- ch = un->un_ch;
- if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
- return;
-
- bd = ch->ch_bd;
- if (!bd || bd->magic != DGNC_BOARD_MAGIC)
- return;
-
- dev_dbg(tty->dev, "dgnc_tty_send_xchar start\n");
-
- spin_lock_irqsave(&ch->ch_lock, flags);
- bd->bd_ops->send_immediate_char(ch, c);
- spin_unlock_irqrestore(&ch->ch_lock, flags);
-
- dev_dbg(tty->dev, "dgnc_tty_send_xchar finish\n");
-}
-
-
-
-
-/*
- * Return modem signals to ld.
- */
-static inline int dgnc_get_mstat(struct channel_t *ch)
-{
- unsigned char mstat;
- int result = -EIO;
- unsigned long flags;
-
- if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
- return -ENXIO;
-
- spin_lock_irqsave(&ch->ch_lock, flags);
-
- mstat = (ch->ch_mostat | ch->ch_mistat);
-
- spin_unlock_irqrestore(&ch->ch_lock, flags);
-
- result = 0;
-
- if (mstat & UART_MCR_DTR)
- result |= TIOCM_DTR;
- if (mstat & UART_MCR_RTS)
- result |= TIOCM_RTS;
- if (mstat & UART_MSR_CTS)
- result |= TIOCM_CTS;
- if (mstat & UART_MSR_DSR)
- result |= TIOCM_DSR;
- if (mstat & UART_MSR_RI)
- result |= TIOCM_RI;
- if (mstat & UART_MSR_DCD)
- result |= TIOCM_CD;
-
- return result;
-}
-
-
-
-/*
- * Return modem signals to ld.
- */
-static int dgnc_get_modem_info(struct channel_t *ch, unsigned int __user *value)
-{
- int result;
-
- if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
- return -ENXIO;
-
- result = dgnc_get_mstat(ch);
-
- if (result < 0)
- return -ENXIO;
-
- return put_user(result, value);
-}
-
-
-/*
- * dgnc_set_modem_info()
- *
- * Set modem signals, called by ld.
- */
-static int dgnc_set_modem_info(struct tty_struct *tty, unsigned int command, unsigned int __user *value)
-{
- struct dgnc_board *bd;
- struct channel_t *ch;
- struct un_t *un;
- int ret = -ENXIO;
- unsigned int arg = 0;
- unsigned long flags;
-
- if (!tty || tty->magic != TTY_MAGIC)
- return ret;
-
- un = tty->driver_data;
- if (!un || un->magic != DGNC_UNIT_MAGIC)
- return ret;
-
- ch = un->un_ch;
- if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
- return ret;
-
- bd = ch->ch_bd;
- if (!bd || bd->magic != DGNC_BOARD_MAGIC)
- return ret;
-
- ret = get_user(arg, value);
- if (ret)
- return ret;
-
- switch (command) {
- case TIOCMBIS:
- if (arg & TIOCM_RTS)
- ch->ch_mostat |= UART_MCR_RTS;
-
- if (arg & TIOCM_DTR)
- ch->ch_mostat |= UART_MCR_DTR;
-
- break;
-
- case TIOCMBIC:
- if (arg & TIOCM_RTS)
- ch->ch_mostat &= ~(UART_MCR_RTS);
-
- if (arg & TIOCM_DTR)
- ch->ch_mostat &= ~(UART_MCR_DTR);
-
- break;
-
- case TIOCMSET:
-
- if (arg & TIOCM_RTS)
- ch->ch_mostat |= UART_MCR_RTS;
- else
- ch->ch_mostat &= ~(UART_MCR_RTS);
-
- if (arg & TIOCM_DTR)
- ch->ch_mostat |= UART_MCR_DTR;
- else
- ch->ch_mostat &= ~(UART_MCR_DTR);
-
- break;
-
- default:
- return -EINVAL;
- }
-
- spin_lock_irqsave(&ch->ch_lock, flags);
-
- ch->ch_bd->bd_ops->assert_modem_signals(ch);
-
- spin_unlock_irqrestore(&ch->ch_lock, flags);
-
- return 0;
-}
-
-
-/*
- * dgnc_tty_digigeta()
- *
- * Ioctl to get the information for ditty.
- *
- *
- *
- */
-static int dgnc_tty_digigeta(struct tty_struct *tty, struct digi_t __user *retinfo)
-{
- struct channel_t *ch;
- struct un_t *un;
- struct digi_t tmp;
- unsigned long flags;
-
- if (!retinfo)
- return -EFAULT;
-
- if (!tty || tty->magic != TTY_MAGIC)
- return -EFAULT;
-
- un = tty->driver_data;
- if (!un || un->magic != DGNC_UNIT_MAGIC)
- return -EFAULT;
-
- ch = un->un_ch;
- if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
- return -EFAULT;
-
- memset(&tmp, 0, sizeof(tmp));
-
- spin_lock_irqsave(&ch->ch_lock, flags);
- memcpy(&tmp, &ch->ch_digi, sizeof(tmp));
- spin_unlock_irqrestore(&ch->ch_lock, flags);
-
- if (copy_to_user(retinfo, &tmp, sizeof(*retinfo)))
- return -EFAULT;
-
- return 0;
-}
-
-
-/*
- * dgnc_tty_digiseta()
- *
- * Ioctl to set the information for ditty.
- *
- *
- *
- */
-static int dgnc_tty_digiseta(struct tty_struct *tty, struct digi_t __user *new_info)
-{
- struct dgnc_board *bd;
- struct channel_t *ch;
- struct un_t *un;
- struct digi_t new_digi;
- unsigned long flags;
-
- if (!tty || tty->magic != TTY_MAGIC)
- return -EFAULT;
-
- un = tty->driver_data;
- if (!un || un->magic != DGNC_UNIT_MAGIC)
- return -EFAULT;
-
- ch = un->un_ch;
- if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
- return -EFAULT;
-
- bd = ch->ch_bd;
- if (!bd || bd->magic != DGNC_BOARD_MAGIC)
- return -EFAULT;
-
- if (copy_from_user(&new_digi, new_info, sizeof(new_digi)))
- return -EFAULT;
-
- spin_lock_irqsave(&ch->ch_lock, flags);
-
- /*
- * Handle transistions to and from RTS Toggle.
- */
- if (!(ch->ch_digi.digi_flags & DIGI_RTS_TOGGLE) && (new_digi.digi_flags & DIGI_RTS_TOGGLE))
- ch->ch_mostat &= ~(UART_MCR_RTS);
- if ((ch->ch_digi.digi_flags & DIGI_RTS_TOGGLE) && !(new_digi.digi_flags & DIGI_RTS_TOGGLE))
- ch->ch_mostat |= (UART_MCR_RTS);
-
- /*
- * Handle transistions to and from DTR Toggle.
- */
- if (!(ch->ch_digi.digi_flags & DIGI_DTR_TOGGLE) && (new_digi.digi_flags & DIGI_DTR_TOGGLE))
- ch->ch_mostat &= ~(UART_MCR_DTR);
- if ((ch->ch_digi.digi_flags & DIGI_DTR_TOGGLE) && !(new_digi.digi_flags & DIGI_DTR_TOGGLE))
- ch->ch_mostat |= (UART_MCR_DTR);
-
- memcpy(&ch->ch_digi, &new_digi, sizeof(new_digi));
-
- if (ch->ch_digi.digi_maxcps < 1)
- ch->ch_digi.digi_maxcps = 1;
-
- if (ch->ch_digi.digi_maxcps > 10000)
- ch->ch_digi.digi_maxcps = 10000;
-
- if (ch->ch_digi.digi_bufsize < 10)
- ch->ch_digi.digi_bufsize = 10;
-
- if (ch->ch_digi.digi_maxchar < 1)
- ch->ch_digi.digi_maxchar = 1;
-
- if (ch->ch_digi.digi_maxchar > ch->ch_digi.digi_bufsize)
- ch->ch_digi.digi_maxchar = ch->ch_digi.digi_bufsize;
-
- if (ch->ch_digi.digi_onlen > DIGI_PLEN)
- ch->ch_digi.digi_onlen = DIGI_PLEN;
-
- if (ch->ch_digi.digi_offlen > DIGI_PLEN)
- ch->ch_digi.digi_offlen = DIGI_PLEN;
-
- ch->ch_bd->bd_ops->param(tty);
-
- spin_unlock_irqrestore(&ch->ch_lock, flags);
-
- return 0;
-}
-
-
-/*
- * dgnc_set_termios()
- */
-static void dgnc_tty_set_termios(struct tty_struct *tty, struct ktermios *old_termios)
-{
- struct dgnc_board *bd;
- struct channel_t *ch;
- struct un_t *un;
- unsigned long flags;
-
- if (!tty || tty->magic != TTY_MAGIC)
- return;
-
- un = tty->driver_data;
- if (!un || un->magic != DGNC_UNIT_MAGIC)
- return;
-
- ch = un->un_ch;
- if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
- return;
-
- bd = ch->ch_bd;
- if (!bd || bd->magic != DGNC_BOARD_MAGIC)
- return;
-
- spin_lock_irqsave(&ch->ch_lock, flags);
-
- ch->ch_c_cflag = tty->termios.c_cflag;
- ch->ch_c_iflag = tty->termios.c_iflag;
- ch->ch_c_oflag = tty->termios.c_oflag;
- ch->ch_c_lflag = tty->termios.c_lflag;
- ch->ch_startc = tty->termios.c_cc[VSTART];
- ch->ch_stopc = tty->termios.c_cc[VSTOP];
-
- ch->ch_bd->bd_ops->param(tty);
- dgnc_carrier(ch);
-
- spin_unlock_irqrestore(&ch->ch_lock, flags);
-}
-
-
-static void dgnc_tty_throttle(struct tty_struct *tty)
-{
- struct channel_t *ch;
- struct un_t *un;
- unsigned long flags;
-
- if (!tty || tty->magic != TTY_MAGIC)
- return;
-
- un = tty->driver_data;
- if (!un || un->magic != DGNC_UNIT_MAGIC)
- return;
-
- ch = un->un_ch;
- if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
- return;
-
- spin_lock_irqsave(&ch->ch_lock, flags);
-
- ch->ch_flags |= (CH_FORCED_STOPI);
-
- spin_unlock_irqrestore(&ch->ch_lock, flags);
-}
-
-
-static void dgnc_tty_unthrottle(struct tty_struct *tty)
-{
- struct channel_t *ch;
- struct un_t *un;
- unsigned long flags;
-
- if (!tty || tty->magic != TTY_MAGIC)
- return;
-
- un = tty->driver_data;
- if (!un || un->magic != DGNC_UNIT_MAGIC)
- return;
-
- ch = un->un_ch;
- if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
- return;
-
- spin_lock_irqsave(&ch->ch_lock, flags);
-
- ch->ch_flags &= ~(CH_FORCED_STOPI);
-
- spin_unlock_irqrestore(&ch->ch_lock, flags);
-}
-
-
-static void dgnc_tty_start(struct tty_struct *tty)
-{
- struct dgnc_board *bd;
- struct channel_t *ch;
- struct un_t *un;
- unsigned long flags;
-
- if (!tty || tty->magic != TTY_MAGIC)
- return;
-
- un = tty->driver_data;
- if (!un || un->magic != DGNC_UNIT_MAGIC)
- return;
-
- ch = un->un_ch;
- if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
- return;
-
- bd = ch->ch_bd;
- if (!bd || bd->magic != DGNC_BOARD_MAGIC)
- return;
-
- spin_lock_irqsave(&ch->ch_lock, flags);
-
- ch->ch_flags &= ~(CH_FORCED_STOP);
-
- spin_unlock_irqrestore(&ch->ch_lock, flags);
-}
-
-
-static void dgnc_tty_stop(struct tty_struct *tty)
-{
- struct dgnc_board *bd;
- struct channel_t *ch;
- struct un_t *un;
- unsigned long flags;
-
- if (!tty || tty->magic != TTY_MAGIC)
- return;
-
- un = tty->driver_data;
- if (!un || un->magic != DGNC_UNIT_MAGIC)
- return;
-
- ch = un->un_ch;
- if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
- return;
-
- bd = ch->ch_bd;
- if (!bd || bd->magic != DGNC_BOARD_MAGIC)
- return;
-
- spin_lock_irqsave(&ch->ch_lock, flags);
-
- ch->ch_flags |= (CH_FORCED_STOP);
-
- spin_unlock_irqrestore(&ch->ch_lock, flags);
-}
-
-
-/*
- * dgnc_tty_flush_chars()
- *
- * Flush the cook buffer
- *
- * Note to self, and any other poor souls who venture here:
- *
- * flush in this case DOES NOT mean dispose of the data.
- * instead, it means "stop buffering and send it if you
- * haven't already." Just guess how I figured that out... SRW 2-Jun-98
- *
- * It is also always called in interrupt context - JAR 8-Sept-99
- */
-static void dgnc_tty_flush_chars(struct tty_struct *tty)
-{
- struct dgnc_board *bd;
- struct channel_t *ch;
- struct un_t *un;
- unsigned long flags;
-
- if (!tty || tty->magic != TTY_MAGIC)
- return;
-
- un = tty->driver_data;
- if (!un || un->magic != DGNC_UNIT_MAGIC)
- return;
-
- ch = un->un_ch;
- if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
- return;
-
- bd = ch->ch_bd;
- if (!bd || bd->magic != DGNC_BOARD_MAGIC)
- return;
-
- spin_lock_irqsave(&ch->ch_lock, flags);
-
- /* Do something maybe here */
-
- spin_unlock_irqrestore(&ch->ch_lock, flags);
-}
-
-
-
-/*
- * dgnc_tty_flush_buffer()
- *
- * Flush Tx buffer (make in == out)
- */
-static void dgnc_tty_flush_buffer(struct tty_struct *tty)
-{
- struct channel_t *ch;
- struct un_t *un;
- unsigned long flags;
-
- if (!tty || tty->magic != TTY_MAGIC)
- return;
-
- un = tty->driver_data;
- if (!un || un->magic != DGNC_UNIT_MAGIC)
- return;
-
- ch = un->un_ch;
- if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
- return;
-
- spin_lock_irqsave(&ch->ch_lock, flags);
-
- ch->ch_flags &= ~CH_STOP;
-
- /* Flush our write queue */
- ch->ch_w_head = ch->ch_w_tail;
-
- /* Flush UARTs transmit FIFO */
- ch->ch_bd->bd_ops->flush_uart_write(ch);
-
- if (ch->ch_tun.un_flags & (UN_LOW|UN_EMPTY)) {
- ch->ch_tun.un_flags &= ~(UN_LOW|UN_EMPTY);
- wake_up_interruptible(&ch->ch_tun.un_flags_wait);
- }
- if (ch->ch_pun.un_flags & (UN_LOW|UN_EMPTY)) {
- ch->ch_pun.un_flags &= ~(UN_LOW|UN_EMPTY);
- wake_up_interruptible(&ch->ch_pun.un_flags_wait);
- }
-
- spin_unlock_irqrestore(&ch->ch_lock, flags);
-}
-
-
-
-/*****************************************************************************
- *
- * The IOCTL function and all of its helpers
- *
- *****************************************************************************/
-
-/*
- * dgnc_tty_ioctl()
- *
- * The usual assortment of ioctl's
- */
-static int dgnc_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
- unsigned long arg)
-{
- struct dgnc_board *bd;
- struct channel_t *ch;
- struct un_t *un;
- int rc;
- unsigned long flags;
- void __user *uarg = (void __user *) arg;
-
- if (!tty || tty->magic != TTY_MAGIC)
- return -ENODEV;
-
- un = tty->driver_data;
- if (!un || un->magic != DGNC_UNIT_MAGIC)
- return -ENODEV;
-
- ch = un->un_ch;
- if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
- return -ENODEV;
-
- bd = ch->ch_bd;
- if (!bd || bd->magic != DGNC_BOARD_MAGIC)
- return -ENODEV;
-
- spin_lock_irqsave(&ch->ch_lock, flags);
-
- if (un->un_open_count <= 0) {
- spin_unlock_irqrestore(&ch->ch_lock, flags);
- return -EIO;
- }
-
- switch (cmd) {
-
- /* Here are all the standard ioctl's that we MUST implement */
-
- case TCSBRK:
- /*
- * TCSBRK is SVID version: non-zero arg --> no break
- * this behaviour is exploited by tcdrain().
- *
- * According to POSIX.1 spec (7.2.2.1.2) breaks should be
- * between 0.25 and 0.5 seconds so we'll ask for something
- * in the middle: 0.375 seconds.
- */
- rc = tty_check_change(tty);
- spin_unlock_irqrestore(&ch->ch_lock, flags);
- if (rc)
- return rc;
-
- rc = ch->ch_bd->bd_ops->drain(tty, 0);
-
- if (rc)
- return -EINTR;
-
- spin_lock_irqsave(&ch->ch_lock, flags);
-
- if (((cmd == TCSBRK) && (!arg)) || (cmd == TCSBRKP))
- ch->ch_bd->bd_ops->send_break(ch, 250);
-
- spin_unlock_irqrestore(&ch->ch_lock, flags);
-
- return 0;
-
-
- case TCSBRKP:
- /* support for POSIX tcsendbreak()
- * According to POSIX.1 spec (7.2.2.1.2) breaks should be
- * between 0.25 and 0.5 seconds so we'll ask for something
- * in the middle: 0.375 seconds.
- */
- rc = tty_check_change(tty);
- spin_unlock_irqrestore(&ch->ch_lock, flags);
- if (rc)
- return rc;
-
- rc = ch->ch_bd->bd_ops->drain(tty, 0);
- if (rc)
- return -EINTR;
-
- spin_lock_irqsave(&ch->ch_lock, flags);
-
- ch->ch_bd->bd_ops->send_break(ch, 250);
-
- spin_unlock_irqrestore(&ch->ch_lock, flags);
-
- return 0;
-
- case TIOCSBRK:
- rc = tty_check_change(tty);
- spin_unlock_irqrestore(&ch->ch_lock, flags);
- if (rc)
- return rc;
-
- rc = ch->ch_bd->bd_ops->drain(tty, 0);
- if (rc)
- return -EINTR;
-
- spin_lock_irqsave(&ch->ch_lock, flags);
-
- ch->ch_bd->bd_ops->send_break(ch, 250);
-
- spin_unlock_irqrestore(&ch->ch_lock, flags);
-
- return 0;
-
- case TIOCCBRK:
- /* Do Nothing */
- spin_unlock_irqrestore(&ch->ch_lock, flags);
- return 0;
-
- case TIOCGSOFTCAR:
-
- spin_unlock_irqrestore(&ch->ch_lock, flags);
-
- rc = put_user(C_CLOCAL(tty) ? 1 : 0, (unsigned long __user *) arg);
- return rc;
-
- case TIOCSSOFTCAR:
-
- spin_unlock_irqrestore(&ch->ch_lock, flags);
- rc = get_user(arg, (unsigned long __user *) arg);
- if (rc)
- return rc;
-
- spin_lock_irqsave(&ch->ch_lock, flags);
- tty->termios.c_cflag = ((tty->termios.c_cflag & ~CLOCAL) | (arg ? CLOCAL : 0));
- ch->ch_bd->bd_ops->param(tty);
- spin_unlock_irqrestore(&ch->ch_lock, flags);
-
- return 0;
-
- case TIOCMGET:
- spin_unlock_irqrestore(&ch->ch_lock, flags);
- return dgnc_get_modem_info(ch, uarg);
-
- case TIOCMBIS:
- case TIOCMBIC:
- case TIOCMSET:
- spin_unlock_irqrestore(&ch->ch_lock, flags);
- return dgnc_set_modem_info(tty, cmd, uarg);
-
- /*
- * Here are any additional ioctl's that we want to implement
- */
-
- case TCFLSH:
- /*
- * The linux tty driver doesn't have a flush
- * input routine for the driver, assuming all backed
- * up data is in the line disc. buffers. However,
- * we all know that's not the case. Here, we
- * act on the ioctl, but then lie and say we didn't
- * so the line discipline will process the flush
- * also.
- */
- rc = tty_check_change(tty);
- if (rc) {
- spin_unlock_irqrestore(&ch->ch_lock, flags);
- return rc;
- }
-
- if ((arg == TCIFLUSH) || (arg == TCIOFLUSH)) {
- ch->ch_r_head = ch->ch_r_tail;
- ch->ch_bd->bd_ops->flush_uart_read(ch);
- /* Force queue flow control to be released, if needed */
- dgnc_check_queue_flow_control(ch);
- }
-
- if ((arg == TCOFLUSH) || (arg == TCIOFLUSH)) {
- if (!(un->un_type == DGNC_PRINT)) {
- ch->ch_w_head = ch->ch_w_tail;
- ch->ch_bd->bd_ops->flush_uart_write(ch);
-
- if (ch->ch_tun.un_flags & (UN_LOW|UN_EMPTY)) {
- ch->ch_tun.un_flags &= ~(UN_LOW|UN_EMPTY);
- wake_up_interruptible(&ch->ch_tun.un_flags_wait);
- }
-
- if (ch->ch_pun.un_flags & (UN_LOW|UN_EMPTY)) {
- ch->ch_pun.un_flags &= ~(UN_LOW|UN_EMPTY);
- wake_up_interruptible(&ch->ch_pun.un_flags_wait);
- }
-
- }
- }
-
- /* pretend we didn't recognize this IOCTL */
- spin_unlock_irqrestore(&ch->ch_lock, flags);
- return -ENOIOCTLCMD;
- case TCSETSF:
- case TCSETSW:
- /*
- * The linux tty driver doesn't have a flush
- * input routine for the driver, assuming all backed
- * up data is in the line disc. buffers. However,
- * we all know that's not the case. Here, we
- * act on the ioctl, but then lie and say we didn't
- * so the line discipline will process the flush
- * also.
- */
- if (cmd == TCSETSF) {
- /* flush rx */
- ch->ch_flags &= ~CH_STOP;
- ch->ch_r_head = ch->ch_r_tail;
- ch->ch_bd->bd_ops->flush_uart_read(ch);
- /* Force queue flow control to be released, if needed */
- dgnc_check_queue_flow_control(ch);
- }
-
- /* now wait for all the output to drain */
- spin_unlock_irqrestore(&ch->ch_lock, flags);
- rc = ch->ch_bd->bd_ops->drain(tty, 0);
- if (rc)
- return -EINTR;
-
- /* pretend we didn't recognize this */
- return -ENOIOCTLCMD;
-
- case TCSETAW:
-
- spin_unlock_irqrestore(&ch->ch_lock, flags);
- rc = ch->ch_bd->bd_ops->drain(tty, 0);
- if (rc)
- return -EINTR;
-
- /* pretend we didn't recognize this */
- return -ENOIOCTLCMD;
-
- case TCXONC:
- spin_unlock_irqrestore(&ch->ch_lock, flags);
- /* Make the ld do it */
- return -ENOIOCTLCMD;
-
- case DIGI_GETA:
- /* get information for ditty */
- spin_unlock_irqrestore(&ch->ch_lock, flags);
- return dgnc_tty_digigeta(tty, uarg);
-
- case DIGI_SETAW:
- case DIGI_SETAF:
-
- /* set information for ditty */
- if (cmd == (DIGI_SETAW)) {
-
- spin_unlock_irqrestore(&ch->ch_lock, flags);
- rc = ch->ch_bd->bd_ops->drain(tty, 0);
-
- if (rc)
- return -EINTR;
-
- spin_lock_irqsave(&ch->ch_lock, flags);
- } else {
- tty_ldisc_flush(tty);
- }
- /* fall thru */
-
- case DIGI_SETA:
- spin_unlock_irqrestore(&ch->ch_lock, flags);
- return dgnc_tty_digiseta(tty, uarg);
-
- case DIGI_LOOPBACK:
- {
- uint loopback = 0;
- /* Let go of locks when accessing user space, could sleep */
- spin_unlock_irqrestore(&ch->ch_lock, flags);
- rc = get_user(loopback, (unsigned int __user *) arg);
- if (rc)
- return rc;
- spin_lock_irqsave(&ch->ch_lock, flags);
-
- /* Enable/disable internal loopback for this port */
- if (loopback)
- ch->ch_flags |= CH_LOOPBACK;
- else
- ch->ch_flags &= ~(CH_LOOPBACK);
-
- ch->ch_bd->bd_ops->param(tty);
- spin_unlock_irqrestore(&ch->ch_lock, flags);
- return 0;
- }
-
- case DIGI_GETCUSTOMBAUD:
- spin_unlock_irqrestore(&ch->ch_lock, flags);
- rc = put_user(ch->ch_custom_speed, (unsigned int __user *) arg);
- return rc;
-
- case DIGI_SETCUSTOMBAUD:
- {
- int new_rate;
- /* Let go of locks when accessing user space, could sleep */
- spin_unlock_irqrestore(&ch->ch_lock, flags);
- rc = get_user(new_rate, (int __user *) arg);
- if (rc)
- return rc;
- spin_lock_irqsave(&ch->ch_lock, flags);
- dgnc_set_custom_speed(ch, new_rate);
- ch->ch_bd->bd_ops->param(tty);
- spin_unlock_irqrestore(&ch->ch_lock, flags);
- return 0;
- }
-
- /*
- * This ioctl allows insertion of a character into the front
- * of any pending data to be transmitted.
- *
- * This ioctl is to satify the "Send Character Immediate"
- * call that the RealPort protocol spec requires.
- */
- case DIGI_REALPORT_SENDIMMEDIATE:
- {
- unsigned char c;
-
- spin_unlock_irqrestore(&ch->ch_lock, flags);
- rc = get_user(c, (unsigned char __user *) arg);
- if (rc)
- return rc;
- spin_lock_irqsave(&ch->ch_lock, flags);
- ch->ch_bd->bd_ops->send_immediate_char(ch, c);
- spin_unlock_irqrestore(&ch->ch_lock, flags);
- return 0;
- }
-
- /*
- * This ioctl returns all the current counts for the port.
- *
- * This ioctl is to satify the "Line Error Counters"
- * call that the RealPort protocol spec requires.
- */
- case DIGI_REALPORT_GETCOUNTERS:
- {
- struct digi_getcounter buf;
-
- buf.norun = ch->ch_err_overrun;
- buf.noflow = 0; /* The driver doesn't keep this stat */
- buf.nframe = ch->ch_err_frame;
- buf.nparity = ch->ch_err_parity;
- buf.nbreak = ch->ch_err_break;
- buf.rbytes = ch->ch_rxcount;
- buf.tbytes = ch->ch_txcount;
-
- spin_unlock_irqrestore(&ch->ch_lock, flags);
-
- if (copy_to_user(uarg, &buf, sizeof(buf)))
- return -EFAULT;
-
- return 0;
- }
-
- /*
- * This ioctl returns all current events.
- *
- * This ioctl is to satify the "Event Reporting"
- * call that the RealPort protocol spec requires.
- */
- case DIGI_REALPORT_GETEVENTS:
- {
- unsigned int events = 0;
-
- /* NOTE: MORE EVENTS NEEDS TO BE ADDED HERE */
- if (ch->ch_flags & CH_BREAK_SENDING)
- events |= EV_TXB;
- if ((ch->ch_flags & CH_STOP) || (ch->ch_flags & CH_FORCED_STOP))
- events |= (EV_OPU | EV_OPS);
-
- if ((ch->ch_flags & CH_STOPI) || (ch->ch_flags & CH_FORCED_STOPI))
- events |= (EV_IPU | EV_IPS);
-
- spin_unlock_irqrestore(&ch->ch_lock, flags);
- rc = put_user(events, (unsigned int __user *) arg);
- return rc;
- }
-
- /*
- * This ioctl returns TOUT and TIN counters based
- * upon the values passed in by the RealPort Server.
- * It also passes back whether the UART Transmitter is
- * empty as well.
- */
- case DIGI_REALPORT_GETBUFFERS:
- {
- struct digi_getbuffer buf;
- int tdist;
- int count;
-
- spin_unlock_irqrestore(&ch->ch_lock, flags);
-
- /*
- * Get data from user first.
- */
- if (copy_from_user(&buf, uarg, sizeof(buf)))
- return -EFAULT;
-
- spin_lock_irqsave(&ch->ch_lock, flags);
-
- /*
- * Figure out how much data is in our RX and TX queues.
- */
- buf.rxbuf = (ch->ch_r_head - ch->ch_r_tail) & RQUEUEMASK;
- buf.txbuf = (ch->ch_w_head - ch->ch_w_tail) & WQUEUEMASK;
-
- /*
- * Is the UART empty? Add that value to whats in our TX queue.
- */
- count = buf.txbuf + ch->ch_bd->bd_ops->get_uart_bytes_left(ch);
-
- /*
- * Figure out how much data the RealPort Server believes should
- * be in our TX queue.
- */
- tdist = (buf.tIn - buf.tOut) & 0xffff;
-
- /*
- * If we have more data than the RealPort Server believes we
- * should have, reduce our count to its amount.
- *
- * This count difference CAN happen because the Linux LD can
- * insert more characters into our queue for OPOST processing
- * that the RealPort Server doesn't know about.
- */
- if (buf.txbuf > tdist)
- buf.txbuf = tdist;
-
- /*
- * Report whether our queue and UART TX are completely empty.
- */
- if (count)
- buf.txdone = 0;
- else
- buf.txdone = 1;
-
- spin_unlock_irqrestore(&ch->ch_lock, flags);
-
- if (copy_to_user(uarg, &buf, sizeof(buf)))
- return -EFAULT;
-
- return 0;
- }
- default:
- spin_unlock_irqrestore(&ch->ch_lock, flags);
-
- return -ENOIOCTLCMD;
- }
-}
diff --git a/drivers/staging/dgnc/dgnc_tty.h b/drivers/staging/dgnc/dgnc_tty.h
deleted file mode 100644
index 21d3369b875c..000000000000
--- a/drivers/staging/dgnc/dgnc_tty.h
+++ /dev/null
@@ -1,34 +0,0 @@
-/*
- * Copyright 2003 Digi International (www.digi.com)
- * Scott H Kilau <Scott_Kilau at digi dot com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY, EXPRESS OR IMPLIED; without even the
- * implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
- * PURPOSE. See the GNU General Public License for more details.
- */
-
-#ifndef __DGNC_TTY_H
-#define __DGNC_TTY_H
-
-#include "dgnc_driver.h"
-
-int dgnc_tty_register(struct dgnc_board *brd);
-
-int dgnc_tty_preinit(void);
-int dgnc_tty_init(struct dgnc_board *);
-
-void dgnc_tty_post_uninit(void);
-void dgnc_tty_uninit(struct dgnc_board *);
-
-void dgnc_input(struct channel_t *ch);
-void dgnc_carrier(struct channel_t *ch);
-void dgnc_wakeup_writes(struct channel_t *ch);
-void dgnc_check_queue_flow_control(struct channel_t *ch);
-
-#endif
diff --git a/drivers/staging/dgnc/dgnc_utils.c b/drivers/staging/dgnc/dgnc_utils.c
deleted file mode 100644
index f76de82908d3..000000000000
--- a/drivers/staging/dgnc/dgnc_utils.c
+++ /dev/null
@@ -1,18 +0,0 @@
-#include <linux/tty.h>
-#include <linux/sched.h>
-#include "dgnc_utils.h"
-#include "digi.h"
-
-/*
- * dgnc_ms_sleep()
- *
- * Put the driver to sleep for x ms's
- *
- * Returns 0 if timed out, !0 (showing signal) if interrupted by a signal.
- */
-int dgnc_ms_sleep(ulong ms)
-{
- __set_current_state(TASK_INTERRUPTIBLE);
- schedule_timeout((ms * HZ) / 1000);
- return signal_pending(current);
-}
diff --git a/drivers/staging/dgnc/dgnc_utils.h b/drivers/staging/dgnc/dgnc_utils.h
deleted file mode 100644
index 1164c3a09c6b..000000000000
--- a/drivers/staging/dgnc/dgnc_utils.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef __DGNC_UTILS_H
-#define __DGNC_UTILS_H
-
-int dgnc_ms_sleep(ulong ms);
-
-#endif
diff --git a/drivers/staging/dgnc/digi.h b/drivers/staging/dgnc/digi.h
deleted file mode 100644
index cf9dcae7cc3f..000000000000
--- a/drivers/staging/dgnc/digi.h
+++ /dev/null
@@ -1,179 +0,0 @@
-/*
- * Copyright 2003 Digi International (www.digi.com)
- * Scott H Kilau <Scott_Kilau at digi dot com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY, EXPRESS OR IMPLIED; without even the
- * implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
- * PURPOSE. See the GNU General Public License for more details.
- */
-
-#ifndef __DIGI_H
-#define __DIGI_H
-
-#ifndef TIOCM_LE
-#define TIOCM_LE 0x01 /* line enable */
-#define TIOCM_DTR 0x02 /* data terminal ready */
-#define TIOCM_RTS 0x04 /* request to send */
-#define TIOCM_ST 0x08 /* secondary transmit */
-#define TIOCM_SR 0x10 /* secondary receive */
-#define TIOCM_CTS 0x20 /* clear to send */
-#define TIOCM_CAR 0x40 /* carrier detect */
-#define TIOCM_RNG 0x80 /* ring indicator */
-#define TIOCM_DSR 0x100 /* data set ready */
-#define TIOCM_RI TIOCM_RNG /* ring (alternate) */
-#define TIOCM_CD TIOCM_CAR /* carrier detect (alt) */
-#endif
-
-#if !defined(TIOCMSET)
-#define TIOCMSET (('d'<<8) | 252) /* set modem ctrl state */
-#define TIOCMGET (('d'<<8) | 253) /* set modem ctrl state */
-#endif
-
-#if !defined(TIOCMBIC)
-#define TIOCMBIC (('d'<<8) | 254) /* set modem ctrl state */
-#define TIOCMBIS (('d'<<8) | 255) /* set modem ctrl state */
-#endif
-
-#define DIGI_GETA (('e'<<8) | 94) /* Read params */
-#define DIGI_SETA (('e'<<8) | 95) /* Set params */
-#define DIGI_SETAW (('e'<<8) | 96) /* Drain & set params */
-#define DIGI_SETAF (('e'<<8) | 97) /* Drain, flush & set params */
-#define DIGI_GET_NI_INFO (('d'<<8) | 250) /* Non-intelligent state info */
-#define DIGI_LOOPBACK (('d'<<8) | 252) /*
- * Enable/disable UART
- * internal loopback
- */
-#define DIGI_FAST 0x0002 /* Fast baud rates */
-#define RTSPACE 0x0004 /* RTS input flow control */
-#define CTSPACE 0x0008 /* CTS output flow control */
-#define DIGI_COOK 0x0080 /* Cooked processing done in FEP */
-#define DIGI_FORCEDCD 0x0100 /* Force carrier */
-#define DIGI_ALTPIN 0x0200 /* Alternate RJ-45 pin config */
-#define DIGI_PRINTER 0x0800 /* Hold port open for flow cntrl*/
-#define DIGI_DTR_TOGGLE 0x2000 /* Support DTR Toggle */
-#define DIGI_RTS_TOGGLE 0x8000 /* Support RTS Toggle */
-#define DIGI_PLEN 28 /* String length */
-#define DIGI_TSIZ 10 /* Terminal string len */
-
-/************************************************************************
- * Structure used with ioctl commands for DIGI parameters.
- ************************************************************************/
-struct digi_t {
- unsigned short digi_flags; /* Flags (see above) */
- unsigned short digi_maxcps; /* Max printer CPS */
- unsigned short digi_maxchar; /* Max chars in print queue */
- unsigned short digi_bufsize; /* Buffer size */
- unsigned char digi_onlen; /* Length of ON string */
- unsigned char digi_offlen; /* Length of OFF string */
- char digi_onstr[DIGI_PLEN]; /* Printer on string */
- char digi_offstr[DIGI_PLEN]; /* Printer off string */
- char digi_term[DIGI_TSIZ]; /* terminal string */
-};
-
-/************************************************************************
- * Structure to get driver status information
- ************************************************************************/
-struct digi_dinfo {
- unsigned int dinfo_nboards; /* # boards configured */
- char dinfo_reserved[12]; /* for future expansion */
- char dinfo_version[16]; /* driver version */
-};
-
-#define DIGI_GETDD (('d'<<8) | 248) /* get driver info */
-
-/************************************************************************
- * Structure used with ioctl commands for per-board information
- *
- * physsize and memsize differ when board has "windowed" memory
- ************************************************************************/
-struct digi_info {
- unsigned int info_bdnum; /* Board number (0 based) */
- unsigned int info_ioport; /* io port address */
- unsigned int info_physaddr; /* memory address */
- unsigned int info_physsize; /* Size of host mem window */
- unsigned int info_memsize; /* Amount of dual-port mem */
- /* on board */
- unsigned short info_bdtype; /* Board type */
- unsigned short info_nports; /* number of ports */
- char info_bdstate; /* board state */
- char info_reserved[7]; /* for future expansion */
-};
-
-#define DIGI_GETBD (('d'<<8) | 249) /* get board info */
-
-struct digi_getbuffer /* Struct for holding buffer use counts */
-{
- unsigned long tIn;
- unsigned long tOut;
- unsigned long rxbuf;
- unsigned long txbuf;
- unsigned long txdone;
-};
-
-struct digi_getcounter {
- unsigned long norun; /* number of UART overrun errors */
- unsigned long noflow; /* number of buffer overflow errors */
- unsigned long nframe; /* number of framing errors */
- unsigned long nparity; /* number of parity errors */
- unsigned long nbreak; /* number of breaks received */
- unsigned long rbytes; /* number of received bytes */
- unsigned long tbytes; /* number of bytes transmitted fully */
-};
-
-/* Board State Definitions */
-#define BD_RUNNING 0x0
-#define BD_NOFEP 0x5
-
-#define DIGI_SETCUSTOMBAUD _IOW('e', 106, int) /* Set integer baud rate */
-#define DIGI_GETCUSTOMBAUD _IOR('e', 107, int) /* Get integer baud rate */
-
-#define DIGI_REALPORT_GETBUFFERS (('e'<<8) | 108)
-#define DIGI_REALPORT_SENDIMMEDIATE (('e'<<8) | 109)
-#define DIGI_REALPORT_GETCOUNTERS (('e'<<8) | 110)
-#define DIGI_REALPORT_GETEVENTS (('e'<<8) | 111)
-
-#define EV_OPU 0x0001 /* !<Output paused by client */
-#define EV_OPS 0x0002 /* !<Output paused by reqular sw flowctrl */
-#define EV_IPU 0x0010 /* !<Input paused unconditionally by user */
-#define EV_IPS 0x0020 /* !<Input paused by high/low water marks */
-#define EV_TXB 0x0040 /* !<Transmit break pending */
-
-/*
- * This structure holds data needed for the intelligent <--> nonintelligent
- * DPA translation
- */
-struct ni_info {
- int board;
- int channel;
- int dtr;
- int rts;
- int cts;
- int dsr;
- int ri;
- int dcd;
- int curtx;
- int currx;
- unsigned short iflag;
- unsigned short oflag;
- unsigned short cflag;
- unsigned short lflag;
- unsigned int mstat;
- unsigned char hflow;
- unsigned char xmit_stopped;
- unsigned char recv_stopped;
- unsigned int baud;
-};
-
-#define T_CLASSIC 0002
-#define T_PCIBUS 0400
-#define T_NEO_EXPRESS 0001
-#define T_NEO 0000
-
-#define TTY_FLIPBUF_SIZE 512
-#endif /* DIGI_H */
diff --git a/drivers/staging/emxx_udc/Kconfig b/drivers/staging/emxx_udc/Kconfig
deleted file mode 100644
index cc3402020487..000000000000
--- a/drivers/staging/emxx_udc/Kconfig
+++ /dev/null
@@ -1,10 +0,0 @@
-config USB_EMXX
- bool "EMXX USB Function Device Controller"
- depends on USB_GADGET && (ARCH_SHMOBILE || (ARM && COMPILE_TEST))
- help
- The Emma Mobile series of SoCs from Renesas Electronics and
- former NEC Electronics include USB Function hardware.
-
- Say "y" to link the driver statically, or "m" to build a
- dynamically linked module called "emxx_udc" and force all
- gadget drivers to also be dynamically linked.
diff --git a/drivers/staging/emxx_udc/Makefile b/drivers/staging/emxx_udc/Makefile
deleted file mode 100644
index 6352724c0b57..000000000000
--- a/drivers/staging/emxx_udc/Makefile
+++ /dev/null
@@ -1 +0,0 @@
-obj-$(CONFIG_USB_EMXX) := emxx_udc.o
diff --git a/drivers/staging/emxx_udc/TODO b/drivers/staging/emxx_udc/TODO
deleted file mode 100644
index 1319379beb7e..000000000000
--- a/drivers/staging/emxx_udc/TODO
+++ /dev/null
@@ -1,4 +0,0 @@
-* add clock framework support (platform device with CCF needs special care)
-* break out board-specific VBUS GPIO to work with multiplatform
-* DT bindings
-* move driver into drivers/usb/gadget/
diff --git a/drivers/staging/emxx_udc/emxx_udc.c b/drivers/staging/emxx_udc/emxx_udc.c
deleted file mode 100644
index 597b78df7c1a..000000000000
--- a/drivers/staging/emxx_udc/emxx_udc.c
+++ /dev/null
@@ -1,3410 +0,0 @@
-/*
- * drivers/usb/gadget/emxx_udc.c
- * EMXX FCD (Function Controller Driver) for USB.
- *
- * Copyright (C) 2010 Renesas Electronics Corporation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/delay.h>
-#include <linux/ioport.h>
-#include <linux/slab.h>
-#include <linux/errno.h>
-#include <linux/init.h>
-#include <linux/list.h>
-#include <linux/interrupt.h>
-#include <linux/proc_fs.h>
-#include <linux/clk.h>
-#include <linux/ctype.h>
-#include <linux/string.h>
-#include <linux/dma-mapping.h>
-#include <linux/workqueue.h>
-#include <linux/device.h>
-
-#include <linux/usb/ch9.h>
-#include <linux/usb/gadget.h>
-
-#include <linux/irq.h>
-#include <linux/gpio.h>
-
-#include "emxx_udc.h"
-
-#define DRIVER_DESC "EMXX UDC driver"
-#define DMA_ADDR_INVALID (~(dma_addr_t)0)
-
-static const char driver_name[] = "emxx_udc";
-static const char driver_desc[] = DRIVER_DESC;
-
-/*===========================================================================*/
-/* Prototype */
-static void _nbu2ss_ep_dma_abort(struct nbu2ss_udc *, struct nbu2ss_ep *);
-static void _nbu2ss_ep0_enable(struct nbu2ss_udc *);
-/*static void _nbu2ss_ep0_disable(struct nbu2ss_udc *);*/
-static void _nbu2ss_ep_done(struct nbu2ss_ep *, struct nbu2ss_req *, int);
-static void _nbu2ss_set_test_mode(struct nbu2ss_udc *, u32 mode);
-static void _nbu2ss_endpoint_toggle_reset(struct nbu2ss_udc *udc, u8 ep_adrs);
-
-static int _nbu2ss_pullup(struct nbu2ss_udc *, int);
-static void _nbu2ss_fifo_flush(struct nbu2ss_udc *, struct nbu2ss_ep *);
-
-/*===========================================================================*/
-/* Macro */
-#define _nbu2ss_zero_len_pkt(udc, epnum) \
- _nbu2ss_ep_in_end(udc, epnum, 0, 0)
-
-/*===========================================================================*/
-/* Global */
-struct nbu2ss_udc udc_controller;
-
-/*-------------------------------------------------------------------------*/
-/* Read */
-static inline u32 _nbu2ss_readl(void *address)
-{
- return __raw_readl(address);
-}
-
-/*-------------------------------------------------------------------------*/
-/* Write */
-static inline void _nbu2ss_writel(void *address, u32 udata)
-{
- __raw_writel(udata, address);
-}
-
-/*-------------------------------------------------------------------------*/
-/* Set Bit */
-static inline void _nbu2ss_bitset(void *address, u32 udata)
-{
- u32 reg_dt = __raw_readl(address) | (udata);
-
- __raw_writel(reg_dt, address);
-}
-
-/*-------------------------------------------------------------------------*/
-/* Clear Bit */
-static inline void _nbu2ss_bitclr(void *address, u32 udata)
-{
- u32 reg_dt = __raw_readl(address) & ~(udata);
-
- __raw_writel(reg_dt, address);
-}
-
-#ifdef UDC_DEBUG_DUMP
-/*-------------------------------------------------------------------------*/
-static void _nbu2ss_dump_register(struct nbu2ss_udc *udc)
-{
- int i;
- u32 reg_data;
-
- pr_info("=== %s()\n", __func__);
-
- if (!udc) {
- pr_err("%s udc == NULL\n", __func__);
- return;
- }
-
- spin_unlock(&udc->lock);
-
- dev_dbg(&udc->dev, "\n-USB REG-\n");
- for (i = 0x0 ; i < USB_BASE_SIZE ; i += 16) {
- reg_data = _nbu2ss_readl(
- (u32 *)IO_ADDRESS(USB_BASE_ADDRESS + i));
- dev_dbg(&udc->dev, "USB%04x =%08x", i, (int)reg_data);
-
- reg_data = _nbu2ss_readl(
- (u32 *)IO_ADDRESS(USB_BASE_ADDRESS + i + 4));
- dev_dbg(&udc->dev, " %08x", (int)reg_data);
-
- reg_data = _nbu2ss_readl(
- (u32 *)IO_ADDRESS(USB_BASE_ADDRESS + i + 8));
- dev_dbg(&udc->dev, " %08x", (int)reg_data);
-
- reg_data = _nbu2ss_readl(
- (u32 *)IO_ADDRESS(USB_BASE_ADDRESS + i + 12));
- dev_dbg(&udc->dev, " %08x\n", (int)reg_data);
-
- }
-
- spin_lock(&udc->lock);
-}
-#endif /* UDC_DEBUG_DUMP */
-
-/*-------------------------------------------------------------------------*/
-/* Endpoint 0 Callback (Complete) */
-static void _nbu2ss_ep0_complete(struct usb_ep *_ep, struct usb_request *_req)
-{
- u8 recipient;
- u16 selector;
- u32 test_mode;
- struct usb_ctrlrequest *p_ctrl;
- struct nbu2ss_udc *udc;
-
- if ((!_ep) || (!_req))
- return;
-
- udc = (struct nbu2ss_udc *)_req->context;
- p_ctrl = &udc->ctrl;
- if ((p_ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD) {
-
- if (p_ctrl->bRequest == USB_REQ_SET_FEATURE) {
- /*-------------------------------------------------*/
- /* SET_FEATURE */
- recipient = (u8)(p_ctrl->bRequestType & USB_RECIP_MASK);
- selector = p_ctrl->wValue;
- if ((recipient == USB_RECIP_DEVICE) &&
- (selector == USB_DEVICE_TEST_MODE)) {
- test_mode = (u32)(p_ctrl->wIndex >> 8);
- _nbu2ss_set_test_mode(udc, test_mode);
- }
- }
- }
-}
-
-/*-------------------------------------------------------------------------*/
-/* Initialization usb_request */
-static void _nbu2ss_create_ep0_packet(
- struct nbu2ss_udc *udc,
- void *p_buf,
- unsigned length
-)
-{
- udc->ep0_req.req.buf = p_buf;
- udc->ep0_req.req.length = length;
- udc->ep0_req.req.dma = 0;
- udc->ep0_req.req.zero = TRUE;
- udc->ep0_req.req.complete = _nbu2ss_ep0_complete;
- udc->ep0_req.req.status = -EINPROGRESS;
- udc->ep0_req.req.context = udc;
- udc->ep0_req.req.actual = 0;
-}
-
-/*-------------------------------------------------------------------------*/
-/* Acquisition of the first address of RAM(FIFO) */
-static u32 _nbu2ss_get_begin_ram_address(struct nbu2ss_udc *udc)
-{
- u32 num, buf_type;
- u32 data, last_ram_adr, use_ram_size;
-
- struct ep_regs *p_ep_regs;
-
- last_ram_adr = (D_RAM_SIZE_CTRL / sizeof(u32)) * 2;
- use_ram_size = 0;
-
- for (num = 0; num < NUM_ENDPOINTS - 1; num++) {
- p_ep_regs = &udc->p_regs->EP_REGS[num];
- data = _nbu2ss_readl(&p_ep_regs->EP_PCKT_ADRS);
- buf_type = _nbu2ss_readl(&p_ep_regs->EP_CONTROL) & EPn_BUF_TYPE;
- if (buf_type == 0) {
- /* Single Buffer */
- use_ram_size += (data & EPn_MPKT) / sizeof(u32);
- } else {
- /* Double Buffer */
- use_ram_size += ((data & EPn_MPKT) / sizeof(u32)) * 2;
- }
-
- if ((data >> 16) > last_ram_adr)
- last_ram_adr = data>>16;
- }
-
- return last_ram_adr + use_ram_size;
-}
-
-/*-------------------------------------------------------------------------*/
-/* Construction of Endpoint */
-static int _nbu2ss_ep_init(struct nbu2ss_udc *udc, struct nbu2ss_ep *ep)
-{
- u32 num;
- u32 data;
- u32 begin_adrs;
-
- if (ep->epnum == 0)
- return -EINVAL;
-
- num = ep->epnum - 1;
-
- /*-------------------------------------------------------------*/
- /* RAM Transfer Address */
- begin_adrs = _nbu2ss_get_begin_ram_address(udc);
- data = (begin_adrs << 16) | ep->ep.maxpacket;
- _nbu2ss_writel(&udc->p_regs->EP_REGS[num].EP_PCKT_ADRS, data);
-
- /*-------------------------------------------------------------*/
- /* Interrupt Enable */
- data = 1 << (ep->epnum + 8);
- _nbu2ss_bitset(&udc->p_regs->USB_INT_ENA, data);
-
- /*-------------------------------------------------------------*/
- /* Endpoint Type(Mode) */
- /* Bulk, Interrupt, ISO */
- switch (ep->ep_type) {
- case USB_ENDPOINT_XFER_BULK:
- data = EPn_BULK;
- break;
-
- case USB_ENDPOINT_XFER_INT:
- data = EPn_BUF_SINGLE | EPn_INTERRUPT;
- break;
-
- case USB_ENDPOINT_XFER_ISOC:
- data = EPn_ISO;
- break;
-
- default:
- data = 0;
- break;
- }
-
- _nbu2ss_bitset(&udc->p_regs->EP_REGS[num].EP_CONTROL, data);
- _nbu2ss_endpoint_toggle_reset(udc, (ep->epnum|ep->direct));
-
- if (ep->direct == USB_DIR_OUT) {
- /*---------------------------------------------------------*/
- /* OUT */
- data = EPn_EN | EPn_BCLR | EPn_DIR0;
- _nbu2ss_bitset(&udc->p_regs->EP_REGS[num].EP_CONTROL, data);
-
- data = (EPn_ONAK | EPn_OSTL_EN | EPn_OSTL);
- _nbu2ss_bitclr(&udc->p_regs->EP_REGS[num].EP_CONTROL, data);
-
- data = (EPn_OUT_EN | EPn_OUT_END_EN);
- _nbu2ss_bitset(&udc->p_regs->EP_REGS[num].EP_INT_ENA, data);
- } else {
- /*---------------------------------------------------------*/
- /* IN */
- data = (EPn_EN | EPn_BCLR | EPn_AUTO);
- _nbu2ss_bitset(&udc->p_regs->EP_REGS[num].EP_CONTROL, data);
-
- data = (EPn_ISTL);
- _nbu2ss_bitclr(&udc->p_regs->EP_REGS[num].EP_CONTROL, data);
-
- data = (EPn_IN_EN | EPn_IN_END_EN);
- _nbu2ss_bitset(&udc->p_regs->EP_REGS[num].EP_INT_ENA, data);
- }
-
- return 0;
-}
-
-/*-------------------------------------------------------------------------*/
-/* Release of Endpoint */
-static int _nbu2ss_epn_exit(struct nbu2ss_udc *udc, struct nbu2ss_ep *ep)
-{
- u32 num;
- u32 data;
-
- if ((ep->epnum == 0) || (udc->vbus_active == 0))
- return -EINVAL;
-
- num = ep->epnum - 1;
-
- /*-------------------------------------------------------------*/
- /* RAM Transfer Address */
- _nbu2ss_writel(&udc->p_regs->EP_REGS[num].EP_PCKT_ADRS, 0);
-
- /*-------------------------------------------------------------*/
- /* Interrupt Disable */
- data = 1 << (ep->epnum + 8);
- _nbu2ss_bitclr(&udc->p_regs->USB_INT_ENA, data);
-
- if (ep->direct == USB_DIR_OUT) {
- /*---------------------------------------------------------*/
- /* OUT */
- data = EPn_ONAK | EPn_BCLR;
- _nbu2ss_bitset(&udc->p_regs->EP_REGS[num].EP_CONTROL, data);
-
- data = EPn_EN | EPn_DIR0;
- _nbu2ss_bitclr(&udc->p_regs->EP_REGS[num].EP_CONTROL, data);
-
- data = EPn_OUT_EN | EPn_OUT_END_EN;
- _nbu2ss_bitclr(&udc->p_regs->EP_REGS[num].EP_INT_ENA, data);
- } else {
- /*---------------------------------------------------------*/
- /* IN */
- data = EPn_BCLR;
- _nbu2ss_bitset(&udc->p_regs->EP_REGS[num].EP_CONTROL, data);
-
- data = EPn_EN | EPn_AUTO;
- _nbu2ss_bitclr(&udc->p_regs->EP_REGS[num].EP_CONTROL, data);
-
- data = EPn_IN_EN | EPn_IN_END_EN;
- _nbu2ss_bitclr(&udc->p_regs->EP_REGS[num].EP_INT_ENA, data);
- }
-
- return 0;
-}
-
-/*-------------------------------------------------------------------------*/
-/* DMA setting (without Endpoint 0) */
-static void _nbu2ss_ep_dma_init(struct nbu2ss_udc *udc, struct nbu2ss_ep *ep)
-{
- u32 num;
- u32 data;
-
- data = _nbu2ss_readl(&udc->p_regs->USBSSCONF);
- if (((ep->epnum == 0) || (data & (1 << ep->epnum)) == 0))
- return; /* Not Support DMA */
-
- num = ep->epnum - 1;
-
- if (ep->direct == USB_DIR_OUT) {
- /*---------------------------------------------------------*/
- /* OUT */
- data = ep->ep.maxpacket;
- _nbu2ss_writel(&udc->p_regs->EP_DCR[num].EP_DCR2, data);
-
- /*---------------------------------------------------------*/
- /* Transfer Direct */
- data = DCR1_EPn_DIR0;
- _nbu2ss_bitset(&udc->p_regs->EP_DCR[num].EP_DCR1, data);
-
- /*---------------------------------------------------------*/
- /* DMA Mode etc. */
- data = EPn_STOP_MODE | EPn_STOP_SET | EPn_DMAMODE0;
- _nbu2ss_writel(&udc->p_regs->EP_REGS[num].EP_DMA_CTRL, data);
- } else {
- /*---------------------------------------------------------*/
- /* IN */
- _nbu2ss_bitset(&udc->p_regs->EP_REGS[num].EP_CONTROL, EPn_AUTO);
-
- /*---------------------------------------------------------*/
- /* DMA Mode etc. */
- data = EPn_BURST_SET | EPn_DMAMODE0;
- _nbu2ss_writel(&udc->p_regs->EP_REGS[num].EP_DMA_CTRL, data);
- }
-}
-
-/*-------------------------------------------------------------------------*/
-/* DMA setting release */
-static void _nbu2ss_ep_dma_exit(struct nbu2ss_udc *udc, struct nbu2ss_ep *ep)
-{
- u32 num;
- u32 data;
- struct fc_regs *preg = udc->p_regs;
-
- if (udc->vbus_active == 0)
- return; /* VBUS OFF */
-
- data = _nbu2ss_readl(&preg->USBSSCONF);
- if ((ep->epnum == 0) || ((data & (1 << ep->epnum)) == 0))
- return; /* Not Support DMA */
-
- num = ep->epnum - 1;
-
- _nbu2ss_ep_dma_abort(udc, ep);
-
- if (ep->direct == USB_DIR_OUT) {
- /*---------------------------------------------------------*/
- /* OUT */
- _nbu2ss_writel(&preg->EP_DCR[num].EP_DCR2, 0);
- _nbu2ss_bitclr(&preg->EP_DCR[num].EP_DCR1, DCR1_EPn_DIR0);
- _nbu2ss_writel(&preg->EP_REGS[num].EP_DMA_CTRL, 0);
- } else {
- /*---------------------------------------------------------*/
- /* IN */
- _nbu2ss_bitclr(&preg->EP_REGS[num].EP_CONTROL, EPn_AUTO);
- _nbu2ss_writel(&preg->EP_REGS[num].EP_DMA_CTRL, 0);
- }
-}
-
-/*-------------------------------------------------------------------------*/
-/* Abort DMA */
-static void _nbu2ss_ep_dma_abort(struct nbu2ss_udc *udc, struct nbu2ss_ep *ep)
-{
- struct fc_regs *preg = udc->p_regs;
-
- _nbu2ss_bitclr(&preg->EP_DCR[ep->epnum-1].EP_DCR1, DCR1_EPn_REQEN);
- mdelay(DMA_DISABLE_TIME); /* DCR1_EPn_REQEN Clear */
- _nbu2ss_bitclr(&preg->EP_REGS[ep->epnum-1].EP_DMA_CTRL, EPn_DMA_EN);
-}
-
-/*-------------------------------------------------------------------------*/
-/* Start IN Transfer */
-static void _nbu2ss_ep_in_end(
- struct nbu2ss_udc *udc,
- u32 epnum,
- u32 data32,
- u32 length
-)
-{
- u32 data;
- u32 num;
- struct fc_regs *preg = udc->p_regs;
-
- if (length >= sizeof(u32))
- return;
-
- if (epnum == 0) {
- _nbu2ss_bitclr(&preg->EP0_CONTROL, EP0_AUTO);
-
- /* Writing of 1-4 bytes */
- if (length)
- _nbu2ss_writel(&preg->EP0_WRITE, data32);
-
- data = ((length << 5) & EP0_DW) | EP0_DEND;
- _nbu2ss_writel(&preg->EP0_CONTROL, data);
-
- _nbu2ss_bitset(&preg->EP0_CONTROL, EP0_AUTO);
- } else {
- num = epnum - 1;
-
- _nbu2ss_bitclr(&preg->EP_REGS[num].EP_CONTROL, EPn_AUTO);
-
- /* Writing of 1-4 bytes */
- if (length)
- _nbu2ss_writel(&preg->EP_REGS[num].EP_WRITE, data32);
-
- data = (((((u32)length) << 5) & EPn_DW) | EPn_DEND);
- _nbu2ss_bitset(&preg->EP_REGS[num].EP_CONTROL, data);
-
- _nbu2ss_bitset(&preg->EP_REGS[num].EP_CONTROL, EPn_AUTO);
- }
-}
-
-#ifdef USE_DMA
-/*-------------------------------------------------------------------------*/
-static void _nbu2ss_dma_map_single(
- struct nbu2ss_udc *udc,
- struct nbu2ss_ep *ep,
- struct nbu2ss_req *req,
- u8 direct
-)
-{
- if (req->req.dma == DMA_ADDR_INVALID) {
- if (req->unaligned)
- req->req.dma = ep->phys_buf;
- else {
- req->req.dma = dma_map_single(
- udc->gadget.dev.parent,
- req->req.buf,
- req->req.length,
- (direct == USB_DIR_IN)
- ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
- }
- req->mapped = 1;
- } else {
- if (!req->unaligned)
- dma_sync_single_for_device(
- udc->gadget.dev.parent,
- req->req.dma,
- req->req.length,
- (direct == USB_DIR_IN)
- ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
-
- req->mapped = 0;
- }
-}
-
-/*-------------------------------------------------------------------------*/
-static void _nbu2ss_dma_unmap_single(
- struct nbu2ss_udc *udc,
- struct nbu2ss_ep *ep,
- struct nbu2ss_req *req,
- u8 direct
-)
-{
- u8 data[4];
- u8 *p;
- u32 count = 0;
-
- if (direct == USB_DIR_OUT) {
- count = req->req.actual % 4;
- if (count) {
- p = req->req.buf;
- p += (req->req.actual - count);
- memcpy(data, p, count);
- }
- }
-
- if (req->mapped) {
- if (req->unaligned) {
- if (direct == USB_DIR_OUT)
- memcpy(req->req.buf, ep->virt_buf,
- req->req.actual & 0xfffffffc);
- } else
- dma_unmap_single(udc->gadget.dev.parent,
- req->req.dma, req->req.length,
- (direct == USB_DIR_IN)
- ? DMA_TO_DEVICE
- : DMA_FROM_DEVICE);
- req->req.dma = DMA_ADDR_INVALID;
- req->mapped = 0;
- } else {
- if (!req->unaligned)
- dma_sync_single_for_cpu(udc->gadget.dev.parent,
- req->req.dma, req->req.length,
- (direct == USB_DIR_IN)
- ? DMA_TO_DEVICE
- : DMA_FROM_DEVICE);
- }
-
- if (count) {
- p = req->req.buf;
- p += (req->req.actual - count);
- memcpy(p, data, count);
- }
-}
-#endif
-
-/*-------------------------------------------------------------------------*/
-/* Endpoint 0 OUT Transfer (PIO) */
-static int EP0_out_PIO(struct nbu2ss_udc *udc, u8 *pBuf, u32 length)
-{
- u32 i;
- int nret = 0;
- u32 iWordLength = 0;
- union usb_reg_access *pBuf32 = (union usb_reg_access *)pBuf;
-
- /*------------------------------------------------------------*/
- /* Read Length */
- iWordLength = length / sizeof(u32);
-
- /*------------------------------------------------------------*/
- /* PIO Read */
- if (iWordLength) {
- for (i = 0; i < iWordLength; i++) {
- pBuf32->dw = _nbu2ss_readl(&udc->p_regs->EP0_READ);
- pBuf32++;
- }
- nret = iWordLength * sizeof(u32);
- }
-
- return nret;
-}
-
-/*-------------------------------------------------------------------------*/
-/* Endpoint 0 OUT Transfer (PIO, OverBytes) */
-static int EP0_out_OverBytes(struct nbu2ss_udc *udc, u8 *pBuf, u32 length)
-{
- u32 i;
- u32 iReadSize = 0;
- union usb_reg_access Temp32;
- union usb_reg_access *pBuf32 = (union usb_reg_access *)pBuf;
-
- if ((0 < length) && (length < sizeof(u32))) {
- Temp32.dw = _nbu2ss_readl(&udc->p_regs->EP0_READ);
- for (i = 0 ; i < length ; i++)
- pBuf32->byte.DATA[i] = Temp32.byte.DATA[i];
- iReadSize += length;
- }
-
- return iReadSize;
-}
-
-/*-------------------------------------------------------------------------*/
-/* Endpoint 0 IN Transfer (PIO) */
-static int EP0_in_PIO(struct nbu2ss_udc *udc, u8 *pBuf, u32 length)
-{
- u32 i;
- u32 iMaxLength = EP0_PACKETSIZE;
- u32 iWordLength = 0;
- u32 iWriteLength = 0;
- union usb_reg_access *pBuf32 = (union usb_reg_access *)pBuf;
-
- /*------------------------------------------------------------*/
- /* Transfer Length */
- if (iMaxLength < length)
- iWordLength = iMaxLength / sizeof(u32);
- else
- iWordLength = length / sizeof(u32);
-
- /*------------------------------------------------------------*/
- /* PIO */
- for (i = 0; i < iWordLength; i++) {
- _nbu2ss_writel(&udc->p_regs->EP0_WRITE, pBuf32->dw);
- pBuf32++;
- iWriteLength += sizeof(u32);
- }
-
- return iWriteLength;
-}
-
-/*-------------------------------------------------------------------------*/
-/* Endpoint 0 IN Transfer (PIO, OverBytes) */
-static int EP0_in_OverBytes(struct nbu2ss_udc *udc, u8 *pBuf, u32 iRemainSize)
-{
- u32 i;
- union usb_reg_access Temp32;
- union usb_reg_access *pBuf32 = (union usb_reg_access *)pBuf;
-
- if ((0 < iRemainSize) && (iRemainSize < sizeof(u32))) {
- for (i = 0 ; i < iRemainSize ; i++)
- Temp32.byte.DATA[i] = pBuf32->byte.DATA[i];
- _nbu2ss_ep_in_end(udc, 0, Temp32.dw, iRemainSize);
-
- return iRemainSize;
- }
-
- return 0;
-}
-
-/*-------------------------------------------------------------------------*/
-/* Transfer NULL Packet (Epndoint 0) */
-static int EP0_send_NULL(struct nbu2ss_udc *udc, bool pid_flag)
-{
- u32 data;
-
- data = _nbu2ss_readl(&udc->p_regs->EP0_CONTROL);
- data &= ~(u32)EP0_INAK;
-
- if (pid_flag)
- data |= (EP0_INAK_EN | EP0_PIDCLR | EP0_DEND);
- else
- data |= (EP0_INAK_EN | EP0_DEND);
-
- _nbu2ss_writel(&udc->p_regs->EP0_CONTROL, data);
-
- return 0;
-}
-
-/*-------------------------------------------------------------------------*/
-/* Receive NULL Packet (Endpoint 0) */
-static int EP0_receive_NULL(struct nbu2ss_udc *udc, bool pid_flag)
-{
- u32 data;
-
- data = _nbu2ss_readl(&udc->p_regs->EP0_CONTROL);
- data &= ~(u32)EP0_ONAK;
-
- if (pid_flag)
- data |= EP0_PIDCLR;
-
- _nbu2ss_writel(&udc->p_regs->EP0_CONTROL, data);
-
- return 0;
-}
-
-/*-------------------------------------------------------------------------*/
-static int _nbu2ss_ep0_in_transfer(
- struct nbu2ss_udc *udc,
- struct nbu2ss_req *req
-)
-{
- u8 *pBuffer; /* IN Data Buffer */
- u32 data;
- u32 iRemainSize = 0;
- int result = 0;
-
- /*-------------------------------------------------------------*/
- /* End confirmation */
- if (req->req.actual == req->req.length) {
- if ((req->req.actual % EP0_PACKETSIZE) == 0) {
- if (req->zero) {
- req->zero = false;
- EP0_send_NULL(udc, FALSE);
- return 1;
- }
- }
-
- return 0; /* Transfer End */
- }
-
- /*-------------------------------------------------------------*/
- /* NAK release */
- data = _nbu2ss_readl(&udc->p_regs->EP0_CONTROL);
- data |= EP0_INAK_EN;
- data &= ~(u32)EP0_INAK;
- _nbu2ss_writel(&udc->p_regs->EP0_CONTROL, data);
-
- iRemainSize = req->req.length - req->req.actual;
- pBuffer = (u8 *)req->req.buf;
- pBuffer += req->req.actual;
-
- /*-------------------------------------------------------------*/
- /* Data transfer */
- result = EP0_in_PIO(udc, pBuffer, iRemainSize);
-
- req->div_len = result;
- iRemainSize -= result;
-
- if (iRemainSize == 0) {
- EP0_send_NULL(udc, FALSE);
- return result;
- }
-
- if ((iRemainSize < sizeof(u32)) && (result != EP0_PACKETSIZE)) {
- pBuffer += result;
- result += EP0_in_OverBytes(udc, pBuffer, iRemainSize);
- req->div_len = result;
- }
-
- return result;
-}
-
-/*-------------------------------------------------------------------------*/
-static int _nbu2ss_ep0_out_transfer(
- struct nbu2ss_udc *udc,
- struct nbu2ss_req *req
-)
-{
- u8 *pBuffer;
- u32 iRemainSize;
- u32 iRecvLength;
- int result = 0;
- int fRcvZero;
-
- /*-------------------------------------------------------------*/
- /* Receive data confirmation */
- iRecvLength = _nbu2ss_readl(&udc->p_regs->EP0_LENGTH) & EP0_LDATA;
- if (iRecvLength != 0) {
-
- fRcvZero = 0;
-
- iRemainSize = req->req.length - req->req.actual;
- pBuffer = (u8 *)req->req.buf;
- pBuffer += req->req.actual;
-
- result = EP0_out_PIO(udc, pBuffer
- , min(iRemainSize, iRecvLength));
- if (result < 0)
- return result;
-
- req->req.actual += result;
- iRecvLength -= result;
-
- if ((0 < iRecvLength) && (iRecvLength < sizeof(u32))) {
- pBuffer += result;
- iRemainSize -= result;
-
- result = EP0_out_OverBytes(udc, pBuffer
- , min(iRemainSize, iRecvLength));
- req->req.actual += result;
- }
- } else {
- fRcvZero = 1;
- }
-
- /*-------------------------------------------------------------*/
- /* End confirmation */
- if (req->req.actual == req->req.length) {
- if ((req->req.actual % EP0_PACKETSIZE) == 0) {
- if (req->zero) {
- req->zero = false;
- EP0_receive_NULL(udc, FALSE);
- return 1;
- }
- }
-
- return 0; /* Transfer End */
- }
-
- if ((req->req.actual % EP0_PACKETSIZE) != 0)
- return 0; /* Short Packet Transfer End */
-
- if (req->req.actual > req->req.length) {
- dev_err(udc->dev, " *** Overrun Error\n");
- return -EOVERFLOW;
- }
-
- if (fRcvZero != 0) {
- iRemainSize = _nbu2ss_readl(&udc->p_regs->EP0_CONTROL);
- if (iRemainSize & EP0_ONAK) {
- /*---------------------------------------------------*/
- /* NACK release */
- _nbu2ss_bitclr(&udc->p_regs->EP0_CONTROL, EP0_ONAK);
- }
- result = 1;
- }
-
- return result;
-}
-
-/*-------------------------------------------------------------------------*/
-static int _nbu2ss_out_dma(
- struct nbu2ss_udc *udc,
- struct nbu2ss_req *req,
- u32 num,
- u32 length
-)
-{
- u8 *pBuffer;
- u32 mpkt;
- u32 lmpkt;
- u32 dmacnt;
- u32 burst = 1;
- u32 data;
- int result = -EINVAL;
- struct fc_regs *preg = udc->p_regs;
-
- if (req->dma_flag)
- return 1; /* DMA is forwarded */
-
- req->dma_flag = TRUE;
- pBuffer = (u8 *)req->req.dma;
- pBuffer += req->req.actual;
-
- /* DMA Address */
- _nbu2ss_writel(&preg->EP_DCR[num].EP_TADR, (u32)pBuffer);
-
- /* Number of transfer packets */
- mpkt = _nbu2ss_readl(&preg->EP_REGS[num].EP_PCKT_ADRS) & EPn_MPKT;
- dmacnt = (length / mpkt);
- lmpkt = (length % mpkt) & ~(u32)0x03;
-
- if (DMA_MAX_COUNT < dmacnt) {
- dmacnt = DMA_MAX_COUNT;
- lmpkt = 0;
- } else if (0 != lmpkt) {
- if (0 == dmacnt)
- burst = 0; /* Burst OFF */
- dmacnt++;
- }
-
- data = mpkt | (lmpkt << 16);
- _nbu2ss_writel(&preg->EP_DCR[num].EP_DCR2, data);
-
- data = ((dmacnt & 0xff) << 16) | DCR1_EPn_DIR0 | DCR1_EPn_REQEN;
- _nbu2ss_writel(&preg->EP_DCR[num].EP_DCR1, data);
-
- if (0 == burst) {
- _nbu2ss_writel(&preg->EP_REGS[num].EP_LEN_DCNT, 0);
- _nbu2ss_bitclr(&preg->EP_REGS[num].EP_DMA_CTRL, EPn_BURST_SET);
- } else {
- _nbu2ss_writel(&preg->EP_REGS[num].EP_LEN_DCNT
- , (dmacnt << 16));
- _nbu2ss_bitset(&preg->EP_REGS[num].EP_DMA_CTRL, EPn_BURST_SET);
- }
- _nbu2ss_bitset(&preg->EP_REGS[num].EP_DMA_CTRL, EPn_DMA_EN);
-
- result = length & ~(u32)0x03;
- req->div_len = result;
-
- return result;
-}
-
-/*-------------------------------------------------------------------------*/
-static int _nbu2ss_epn_out_pio(
- struct nbu2ss_udc *udc,
- struct nbu2ss_ep *ep,
- struct nbu2ss_req *req,
- u32 length
-)
-{
- u8 *pBuffer;
- u32 i;
- u32 data;
- u32 iWordLength;
- union usb_reg_access Temp32;
- union usb_reg_access *pBuf32;
- int result = 0;
- struct fc_regs *preg = udc->p_regs;
-
- if (req->dma_flag)
- return 1; /* DMA is forwarded */
-
- if (length == 0)
- return 0;
-
- pBuffer = (u8 *)req->req.buf;
- pBuf32 = (union usb_reg_access *)(pBuffer + req->req.actual);
-
- iWordLength = length / sizeof(u32);
- if (iWordLength > 0) {
- /*---------------------------------------------------------*/
- /* Copy of every four bytes */
- for (i = 0; i < iWordLength; i++) {
- pBuf32->dw =
- _nbu2ss_readl(&preg->EP_REGS[ep->epnum-1].EP_READ);
- pBuf32++;
- }
- result = iWordLength * sizeof(u32);
- }
-
- data = length - result;
- if (data > 0) {
- /*---------------------------------------------------------*/
- /* Copy of fraction byte */
- Temp32.dw = _nbu2ss_readl(&preg->EP_REGS[ep->epnum-1].EP_READ);
- for (i = 0 ; i < data ; i++)
- pBuf32->byte.DATA[i] = Temp32.byte.DATA[i];
- result += data;
- }
-
- req->req.actual += result;
-
- if ((req->req.actual == req->req.length)
- || ((req->req.actual % ep->ep.maxpacket) != 0)) {
-
- result = 0;
- }
-
- return result;
-}
-
-/*-------------------------------------------------------------------------*/
-static int _nbu2ss_epn_out_data(
- struct nbu2ss_udc *udc,
- struct nbu2ss_ep *ep,
- struct nbu2ss_req *req,
- u32 data_size
-)
-{
- u32 num;
- u32 iBufSize;
- int nret = 1;
-
- if (ep->epnum == 0)
- return -EINVAL;
-
- num = ep->epnum - 1;
-
- iBufSize = min((req->req.length - req->req.actual), data_size);
-
- if ((ep->ep_type != USB_ENDPOINT_XFER_INT)
- && (req->req.dma != 0)
- && (iBufSize >= sizeof(u32))) {
- nret = _nbu2ss_out_dma(udc, req, num, iBufSize);
- } else {
- iBufSize = min_t(u32, iBufSize, ep->ep.maxpacket);
- nret = _nbu2ss_epn_out_pio(udc, ep, req, iBufSize);
- }
-
- return nret;
-}
-
-/*-------------------------------------------------------------------------*/
-static int _nbu2ss_epn_out_transfer(
- struct nbu2ss_udc *udc,
- struct nbu2ss_ep *ep,
- struct nbu2ss_req *req
-)
-{
- u32 num;
- u32 iRecvLength;
- int result = 1;
- struct fc_regs *preg = udc->p_regs;
-
- if (ep->epnum == 0)
- return -EINVAL;
-
- num = ep->epnum - 1;
-
- /*-------------------------------------------------------------*/
- /* Receive Length */
- iRecvLength
- = _nbu2ss_readl(&preg->EP_REGS[num].EP_LEN_DCNT) & EPn_LDATA;
-
- if (iRecvLength != 0) {
- result = _nbu2ss_epn_out_data(udc, ep, req, iRecvLength);
- if (iRecvLength < ep->ep.maxpacket) {
- if (iRecvLength == result) {
- req->req.actual += result;
- result = 0;
- }
- }
- } else {
- if ((req->req.actual == req->req.length)
- || ((req->req.actual % ep->ep.maxpacket) != 0)) {
-
- result = 0;
- }
- }
-
- if (result == 0) {
- if ((req->req.actual % ep->ep.maxpacket) == 0) {
- if (req->zero) {
- req->zero = false;
- return 1;
- }
- }
- }
-
- if (req->req.actual > req->req.length) {
- dev_err(udc->dev, " Overrun Error\n");
- dev_err(udc->dev, " actual = %d, length = %d\n",
- req->req.actual, req->req.length);
- result = -EOVERFLOW;
- }
-
- return result;
-}
-
-/*-------------------------------------------------------------------------*/
-static int _nbu2ss_in_dma(
- struct nbu2ss_udc *udc,
- struct nbu2ss_ep *ep,
- struct nbu2ss_req *req,
- u32 num,
- u32 length
-)
-{
- u8 *pBuffer;
- u32 mpkt; /* MaxPacketSize */
- u32 lmpkt; /* Last Packet Data Size */
- u32 dmacnt; /* IN Data Size */
- u32 iWriteLength;
- u32 data;
- int result = -EINVAL;
- struct fc_regs *preg = udc->p_regs;
-
- if (req->dma_flag)
- return 1; /* DMA is forwarded */
-
-#ifdef USE_DMA
- if (req->req.actual == 0)
- _nbu2ss_dma_map_single(udc, ep, req, USB_DIR_IN);
-#endif
- req->dma_flag = TRUE;
-
- /* MAX Packet Size */
- mpkt = _nbu2ss_readl(&preg->EP_REGS[num].EP_PCKT_ADRS) & EPn_MPKT;
-
- if ((DMA_MAX_COUNT * mpkt) < length)
- iWriteLength = DMA_MAX_COUNT * mpkt;
- else
- iWriteLength = length;
-
- /*------------------------------------------------------------*/
- /* Number of transmission packets */
- if (mpkt < iWriteLength) {
- dmacnt = iWriteLength / mpkt;
- lmpkt = (iWriteLength % mpkt) & ~(u32)0x3;
- if (lmpkt != 0)
- dmacnt++;
- else
- lmpkt = mpkt & ~(u32)0x3;
-
- } else {
- dmacnt = 1;
- lmpkt = iWriteLength & ~(u32)0x3;
- }
-
- /* Packet setting */
- data = mpkt | (lmpkt << 16);
- _nbu2ss_writel(&preg->EP_DCR[num].EP_DCR2, data);
-
- /* Address setting */
- pBuffer = (u8 *)req->req.dma;
- pBuffer += req->req.actual;
- _nbu2ss_writel(&preg->EP_DCR[num].EP_TADR, (u32)pBuffer);
-
- /* Packet and DMA setting */
- data = ((dmacnt & 0xff) << 16) | DCR1_EPn_REQEN;
- _nbu2ss_writel(&preg->EP_DCR[num].EP_DCR1, data);
-
- /* Packet setting of EPC */
- data = dmacnt << 16;
- _nbu2ss_writel(&preg->EP_REGS[num].EP_LEN_DCNT, data);
-
- /*DMA setting of EPC */
- _nbu2ss_bitset(&preg->EP_REGS[num].EP_DMA_CTRL, EPn_DMA_EN);
-
- result = iWriteLength & ~(u32)0x3;
- req->div_len = result;
-
- return result;
-}
-
-/*-------------------------------------------------------------------------*/
-static int _nbu2ss_epn_in_pio(
- struct nbu2ss_udc *udc,
- struct nbu2ss_ep *ep,
- struct nbu2ss_req *req,
- u32 length
-)
-{
- u8 *pBuffer;
- u32 i;
- u32 data;
- u32 iWordLength;
- union usb_reg_access Temp32;
- union usb_reg_access *pBuf32 = NULL;
- int result = 0;
- struct fc_regs *preg = udc->p_regs;
-
- if (req->dma_flag)
- return 1; /* DMA is forwarded */
-
- if (length > 0) {
- pBuffer = (u8 *)req->req.buf;
- pBuf32 = (union usb_reg_access *)(pBuffer + req->req.actual);
-
- iWordLength = length / sizeof(u32);
- if (iWordLength > 0) {
- for (i = 0; i < iWordLength; i++) {
- _nbu2ss_writel(
- &preg->EP_REGS[ep->epnum-1].EP_WRITE
- , pBuf32->dw
- );
-
- pBuf32++;
- }
- result = iWordLength * sizeof(u32);
- }
- }
-
- if (result != ep->ep.maxpacket) {
- data = length - result;
- Temp32.dw = 0;
- for (i = 0 ; i < data ; i++)
- Temp32.byte.DATA[i] = pBuf32->byte.DATA[i];
-
- _nbu2ss_ep_in_end(udc, ep->epnum, Temp32.dw, data);
- result += data;
- }
-
- req->div_len = result;
-
- return result;
-}
-
-/*-------------------------------------------------------------------------*/
-static int _nbu2ss_epn_in_data(
- struct nbu2ss_udc *udc,
- struct nbu2ss_ep *ep,
- struct nbu2ss_req *req,
- u32 data_size
-)
-{
- u32 num;
- int nret = 1;
-
- if (ep->epnum == 0)
- return -EINVAL;
-
- num = ep->epnum - 1;
-
- if ((ep->ep_type != USB_ENDPOINT_XFER_INT)
- && (req->req.dma != 0)
- && (data_size >= sizeof(u32))) {
- nret = _nbu2ss_in_dma(udc, ep, req, num, data_size);
- } else {
- data_size = min_t(u32, data_size, ep->ep.maxpacket);
- nret = _nbu2ss_epn_in_pio(udc, ep, req, data_size);
- }
-
- return nret;
-}
-
-/*-------------------------------------------------------------------------*/
-static int _nbu2ss_epn_in_transfer(
- struct nbu2ss_udc *udc,
- struct nbu2ss_ep *ep,
- struct nbu2ss_req *req
-)
-{
- u32 num;
- u32 iBufSize;
- int result = 0;
- u32 status;
-
- if (ep->epnum == 0)
- return -EINVAL;
-
- num = ep->epnum - 1;
-
- status = _nbu2ss_readl(&udc->p_regs->EP_REGS[num].EP_STATUS);
-
- /*-------------------------------------------------------------*/
- /* State confirmation of FIFO */
- if (req->req.actual == 0) {
- if ((status & EPn_IN_EMPTY) == 0)
- return 1; /* Not Empty */
-
- } else {
- if ((status & EPn_IN_FULL) != 0)
- return 1; /* Not Empty */
- }
-
- /*-------------------------------------------------------------*/
- /* Start transfer */
- iBufSize = req->req.length - req->req.actual;
- if (iBufSize > 0)
- result = _nbu2ss_epn_in_data(udc, ep, req, iBufSize);
- else if (req->req.length == 0)
- _nbu2ss_zero_len_pkt(udc, ep->epnum);
-
- return result;
-}
-
-/*-------------------------------------------------------------------------*/
-static int _nbu2ss_start_transfer(
- struct nbu2ss_udc *udc,
- struct nbu2ss_ep *ep,
- struct nbu2ss_req *req,
- bool bflag)
-{
- int nret = -EINVAL;
-
- req->dma_flag = FALSE;
- req->div_len = 0;
-
- if (req->req.length == 0)
- req->zero = false;
- else {
- if ((req->req.length % ep->ep.maxpacket) == 0)
- req->zero = req->req.zero;
- else
- req->zero = false;
- }
-
- if (ep->epnum == 0) {
- /* EP0 */
- switch (udc->ep0state) {
- case EP0_IN_DATA_PHASE:
- nret = _nbu2ss_ep0_in_transfer(udc, req);
- break;
-
- case EP0_OUT_DATA_PHASE:
- nret = _nbu2ss_ep0_out_transfer(udc, req);
- break;
-
- case EP0_IN_STATUS_PHASE:
- nret = EP0_send_NULL(udc, TRUE);
- break;
-
- default:
- break;
- }
-
- } else {
- /* EPn */
- if (ep->direct == USB_DIR_OUT) {
- /* OUT */
- if (bflag == FALSE)
- nret = _nbu2ss_epn_out_transfer(udc, ep, req);
- } else {
- /* IN */
- nret = _nbu2ss_epn_in_transfer(udc, ep, req);
- }
- }
-
- return nret;
-}
-
-/*-------------------------------------------------------------------------*/
-static void _nbu2ss_restert_transfer(struct nbu2ss_ep *ep)
-{
- u32 length;
- bool bflag = FALSE;
- struct nbu2ss_req *req;
-
- if (list_empty(&ep->queue))
- req = NULL;
- else
- req = list_entry(ep->queue.next, struct nbu2ss_req, queue);
-
- if (!req)
- return;
-
- if (ep->epnum > 0) {
- length = _nbu2ss_readl(
- &ep->udc->p_regs->EP_REGS[ep->epnum-1].EP_LEN_DCNT);
-
- length &= EPn_LDATA;
- if (length < ep->ep.maxpacket)
- bflag = TRUE;
- }
-
- _nbu2ss_start_transfer(ep->udc, ep, req, bflag);
-}
-
-/*-------------------------------------------------------------------------*/
-/* Endpoint Toggle Reset */
-static void _nbu2ss_endpoint_toggle_reset(
- struct nbu2ss_udc *udc,
- u8 ep_adrs)
-{
- u8 num;
- u32 data;
-
- if ((ep_adrs == 0) || (ep_adrs == 0x80))
- return;
-
- num = (ep_adrs & 0x7F) - 1;
-
- if (ep_adrs & USB_DIR_IN)
- data = EPn_IPIDCLR;
- else
- data = EPn_BCLR | EPn_OPIDCLR;
-
- _nbu2ss_bitset(&udc->p_regs->EP_REGS[num].EP_CONTROL, data);
-}
-
-/*-------------------------------------------------------------------------*/
-/* Endpoint STALL set */
-static void _nbu2ss_set_endpoint_stall(
- struct nbu2ss_udc *udc,
- u8 ep_adrs,
- bool bstall)
-{
- u8 num, epnum;
- u32 data;
- struct nbu2ss_ep *ep;
- struct fc_regs *preg = udc->p_regs;
-
- if ((ep_adrs == 0) || (ep_adrs == 0x80)) {
- if (bstall) {
- /* Set STALL */
- _nbu2ss_bitset(&preg->EP0_CONTROL, EP0_STL);
- } else {
- /* Clear STALL */
- _nbu2ss_bitclr(&preg->EP0_CONTROL, EP0_STL);
- }
- } else {
- epnum = ep_adrs & USB_ENDPOINT_NUMBER_MASK;
- num = epnum - 1;
- ep = &udc->ep[epnum];
-
- if (bstall) {
- /* Set STALL */
- ep->halted = TRUE;
-
- if (ep_adrs & USB_DIR_IN)
- data = EPn_BCLR | EPn_ISTL;
- else
- data = EPn_OSTL_EN | EPn_OSTL;
-
- _nbu2ss_bitset(&preg->EP_REGS[num].EP_CONTROL, data);
- } else {
- /* Clear STALL */
- ep->stalled = FALSE;
- if (ep_adrs & USB_DIR_IN) {
- _nbu2ss_bitclr(&preg->EP_REGS[num].EP_CONTROL
- , EPn_ISTL);
- } else {
- data =
- _nbu2ss_readl(&preg->EP_REGS[num].EP_CONTROL);
-
- data &= ~EPn_OSTL;
- data |= EPn_OSTL_EN;
-
- _nbu2ss_writel(&preg->EP_REGS[num].EP_CONTROL
- , data);
- }
-
- ep->stalled = FALSE;
- if (ep->halted) {
- ep->halted = FALSE;
- _nbu2ss_restert_transfer(ep);
- }
- }
- }
-}
-
-/*-------------------------------------------------------------------------*/
-/* Device Descriptor */
-static struct usb_device_descriptor device_desc = {
- .bLength = sizeof(device_desc),
- .bDescriptorType = USB_DT_DEVICE,
- .bcdUSB = cpu_to_le16(0x0200),
- .bDeviceClass = USB_CLASS_VENDOR_SPEC,
- .bDeviceSubClass = 0x00,
- .bDeviceProtocol = 0x00,
- .bMaxPacketSize0 = 64,
- .idVendor = cpu_to_le16(0x0409),
- .idProduct = cpu_to_le16(0xfff0),
- .bcdDevice = 0xffff,
- .iManufacturer = 0x00,
- .iProduct = 0x00,
- .iSerialNumber = 0x00,
- .bNumConfigurations = 0x01,
-};
-
-/*-------------------------------------------------------------------------*/
-static void _nbu2ss_set_test_mode(struct nbu2ss_udc *udc, u32 mode)
-{
- u32 data;
-
- if (mode > MAX_TEST_MODE_NUM)
- return;
-
- dev_info(udc->dev, "SET FEATURE : test mode = %d\n", mode);
-
- data = _nbu2ss_readl(&udc->p_regs->USB_CONTROL);
- data &= ~TEST_FORCE_ENABLE;
- data |= mode << TEST_MODE_SHIFT;
-
- _nbu2ss_writel(&udc->p_regs->USB_CONTROL, data);
- _nbu2ss_bitset(&udc->p_regs->TEST_CONTROL, CS_TESTMODEEN);
-}
-
-/*-------------------------------------------------------------------------*/
-static int _nbu2ss_set_feature_device(
- struct nbu2ss_udc *udc,
- u16 selector,
- u16 wIndex
-)
-{
- int result = -EOPNOTSUPP;
-
- switch (selector) {
- case USB_DEVICE_REMOTE_WAKEUP:
- if (0x0000 == wIndex) {
- udc->remote_wakeup = U2F_ENABLE;
- result = 0;
- }
- break;
-
- case USB_DEVICE_TEST_MODE:
- wIndex >>= 8;
- if (wIndex <= MAX_TEST_MODE_NUM)
- result = 0;
- break;
-
- default:
- break;
- }
-
- return result;
-}
-
-/*-------------------------------------------------------------------------*/
-static int _nbu2ss_get_ep_stall(struct nbu2ss_udc *udc, u8 ep_adrs)
-{
- u8 epnum;
- u32 data = 0, bit_data;
- struct fc_regs *preg = udc->p_regs;
-
- epnum = ep_adrs & ~USB_ENDPOINT_DIR_MASK;
- if (epnum == 0) {
- data = _nbu2ss_readl(&preg->EP0_CONTROL);
- bit_data = EP0_STL;
-
- } else {
- data = _nbu2ss_readl(&preg->EP_REGS[epnum-1].EP_CONTROL);
- if ((data & EPn_EN) == 0)
- return -1;
-
- if (ep_adrs & USB_ENDPOINT_DIR_MASK)
- bit_data = EPn_ISTL;
- else
- bit_data = EPn_OSTL;
- }
-
- if ((data & bit_data) == 0)
- return 0;
- return 1;
-}
-
-/*-------------------------------------------------------------------------*/
-static inline int _nbu2ss_req_feature(struct nbu2ss_udc *udc, bool bset)
-{
- u8 recipient = (u8)(udc->ctrl.bRequestType & USB_RECIP_MASK);
- u8 direction = (u8)(udc->ctrl.bRequestType & USB_DIR_IN);
- u16 selector = udc->ctrl.wValue;
- u16 wIndex = udc->ctrl.wIndex;
- u8 ep_adrs;
- int result = -EOPNOTSUPP;
-
- if ((0x0000 != udc->ctrl.wLength) ||
- (USB_DIR_OUT != direction)) {
- return -EINVAL;
- }
-
- switch (recipient) {
- case USB_RECIP_DEVICE:
- if (bset)
- result =
- _nbu2ss_set_feature_device(udc, selector, wIndex);
- break;
-
- case USB_RECIP_ENDPOINT:
- if (0x0000 == (wIndex & 0xFF70)) {
- if (USB_ENDPOINT_HALT == selector) {
- ep_adrs = wIndex & 0xFF;
- if (bset == FALSE) {
- _nbu2ss_endpoint_toggle_reset(
- udc, ep_adrs);
- }
-
- _nbu2ss_set_endpoint_stall(
- udc, ep_adrs, bset);
-
- result = 0;
- }
- }
- break;
-
- default:
- break;
- }
-
- if (result >= 0)
- _nbu2ss_create_ep0_packet(udc, udc->ep0_buf, 0);
-
- return result;
-}
-
-/*-------------------------------------------------------------------------*/
-static inline enum usb_device_speed _nbu2ss_get_speed(struct nbu2ss_udc *udc)
-{
- u32 data;
- enum usb_device_speed speed = USB_SPEED_FULL;
-
- data = _nbu2ss_readl(&udc->p_regs->USB_STATUS);
- if (data & HIGH_SPEED)
- speed = USB_SPEED_HIGH;
-
- return speed;
-}
-
-/*-------------------------------------------------------------------------*/
-static void _nbu2ss_epn_set_stall(
- struct nbu2ss_udc *udc,
- struct nbu2ss_ep *ep
-)
-{
- u8 ep_adrs;
- u32 regdata;
- int limit_cnt = 0;
-
- struct fc_regs *preg = udc->p_regs;
-
- if (ep->direct == USB_DIR_IN) {
- for (limit_cnt = 0
- ; limit_cnt < IN_DATA_EMPTY_COUNT
- ; limit_cnt++) {
-
- regdata = _nbu2ss_readl(
- &preg->EP_REGS[ep->epnum-1].EP_STATUS);
-
- if ((regdata & EPn_IN_DATA) == 0)
- break;
-
- mdelay(1);
- }
- }
-
- ep_adrs = ep->epnum | ep->direct;
- _nbu2ss_set_endpoint_stall(udc, ep_adrs, 1);
-}
-
-/*-------------------------------------------------------------------------*/
-static int std_req_get_status(struct nbu2ss_udc *udc)
-{
- u32 length;
- u16 status_data = 0;
- u8 recipient = (u8)(udc->ctrl.bRequestType & USB_RECIP_MASK);
- u8 direction = (u8)(udc->ctrl.bRequestType & USB_DIR_IN);
- u8 ep_adrs;
- int result = -EINVAL;
-
- if ((0x0000 != udc->ctrl.wValue)
- || (USB_DIR_IN != direction)) {
-
- return result;
- }
-
- length = min_t(u16, udc->ctrl.wLength, sizeof(status_data));
-
- switch (recipient) {
- case USB_RECIP_DEVICE:
- if (udc->ctrl.wIndex == 0x0000) {
- if (udc->gadget.is_selfpowered)
- status_data |= (1 << USB_DEVICE_SELF_POWERED);
-
- if (udc->remote_wakeup)
- status_data |= (1 << USB_DEVICE_REMOTE_WAKEUP);
-
- result = 0;
- }
- break;
-
- case USB_RECIP_ENDPOINT:
- if (0x0000 == (udc->ctrl.wIndex & 0xFF70)) {
- ep_adrs = (u8)(udc->ctrl.wIndex & 0xFF);
- result = _nbu2ss_get_ep_stall(udc, ep_adrs);
-
- if (result > 0)
- status_data |= (1 << USB_ENDPOINT_HALT);
- }
- break;
-
- default:
- break;
- }
-
- if (result >= 0) {
- memcpy(udc->ep0_buf, &status_data, length);
- _nbu2ss_create_ep0_packet(udc, udc->ep0_buf, length);
- _nbu2ss_ep0_in_transfer(udc, &udc->ep0_req);
-
- } else {
- dev_err(udc->dev, " Error GET_STATUS\n");
- }
-
- return result;
-}
-
-/*-------------------------------------------------------------------------*/
-static int std_req_clear_feature(struct nbu2ss_udc *udc)
-{
- return _nbu2ss_req_feature(udc, FALSE);
-}
-
-/*-------------------------------------------------------------------------*/
-static int std_req_set_feature(struct nbu2ss_udc *udc)
-{
- return _nbu2ss_req_feature(udc, TRUE);
-}
-
-/*-------------------------------------------------------------------------*/
-static int std_req_set_address(struct nbu2ss_udc *udc)
-{
- int result = 0;
- u32 wValue = udc->ctrl.wValue;
-
- if ((0x00 != udc->ctrl.bRequestType) ||
- (0x0000 != udc->ctrl.wIndex) ||
- (0x0000 != udc->ctrl.wLength)) {
- return -EINVAL;
- }
-
- if (wValue != (wValue & 0x007F))
- return -EINVAL;
-
- wValue <<= USB_ADRS_SHIFT;
-
- _nbu2ss_writel(&udc->p_regs->USB_ADDRESS, wValue);
- _nbu2ss_create_ep0_packet(udc, udc->ep0_buf, 0);
-
- return result;
-}
-
-/*-------------------------------------------------------------------------*/
-static int std_req_set_configuration(struct nbu2ss_udc *udc)
-{
- u32 ConfigValue = (u32)(udc->ctrl.wValue & 0x00ff);
-
- if ((0x0000 != udc->ctrl.wIndex) ||
- (0x0000 != udc->ctrl.wLength) ||
- (0x00 != udc->ctrl.bRequestType)) {
- return -EINVAL;
- }
-
- udc->curr_config = ConfigValue;
-
- if (ConfigValue > 0) {
- _nbu2ss_bitset(&udc->p_regs->USB_CONTROL, CONF);
- udc->devstate = USB_STATE_CONFIGURED;
-
- } else {
- _nbu2ss_bitclr(&udc->p_regs->USB_CONTROL, CONF);
- udc->devstate = USB_STATE_ADDRESS;
- }
-
- return 0;
-}
-
-/*-------------------------------------------------------------------------*/
-static inline void _nbu2ss_read_request_data(struct nbu2ss_udc *udc, u32 *pdata)
-{
- if ((!udc) && (!pdata))
- return;
-
- *pdata = _nbu2ss_readl(&udc->p_regs->SETUP_DATA0);
- pdata++;
- *pdata = _nbu2ss_readl(&udc->p_regs->SETUP_DATA1);
-}
-
-/*-------------------------------------------------------------------------*/
-static inline int _nbu2ss_decode_request(struct nbu2ss_udc *udc)
-{
- bool bcall_back = TRUE;
- int nret = -EINVAL;
- struct usb_ctrlrequest *p_ctrl;
-
- p_ctrl = &udc->ctrl;
- _nbu2ss_read_request_data(udc, (u32 *)p_ctrl);
-
- /* ep0 state control */
- if (p_ctrl->wLength == 0) {
- udc->ep0state = EP0_IN_STATUS_PHASE;
-
- } else {
- if (p_ctrl->bRequestType & USB_DIR_IN)
- udc->ep0state = EP0_IN_DATA_PHASE;
- else
- udc->ep0state = EP0_OUT_DATA_PHASE;
- }
-
- if ((p_ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD) {
- switch (p_ctrl->bRequest) {
- case USB_REQ_GET_STATUS:
- nret = std_req_get_status(udc);
- bcall_back = FALSE;
- break;
-
- case USB_REQ_CLEAR_FEATURE:
- nret = std_req_clear_feature(udc);
- bcall_back = FALSE;
- break;
-
- case USB_REQ_SET_FEATURE:
- nret = std_req_set_feature(udc);
- bcall_back = FALSE;
- break;
-
- case USB_REQ_SET_ADDRESS:
- nret = std_req_set_address(udc);
- bcall_back = FALSE;
- break;
-
- case USB_REQ_SET_CONFIGURATION:
- nret = std_req_set_configuration(udc);
- break;
-
- default:
- break;
- }
- }
-
- if (bcall_back == FALSE) {
- if (udc->ep0state == EP0_IN_STATUS_PHASE) {
- if (nret >= 0) {
- /*--------------------------------------*/
- /* Status Stage */
- nret = EP0_send_NULL(udc, TRUE);
- }
- }
-
- } else {
- spin_unlock(&udc->lock);
- nret = udc->driver->setup(&udc->gadget, &udc->ctrl);
- spin_lock(&udc->lock);
- }
-
- if (nret < 0)
- udc->ep0state = EP0_IDLE;
-
- return nret;
-}
-
-/*-------------------------------------------------------------------------*/
-static inline int _nbu2ss_ep0_in_data_stage(struct nbu2ss_udc *udc)
-{
- int nret;
- struct nbu2ss_req *req;
- struct nbu2ss_ep *ep = &udc->ep[0];
-
- if (list_empty(&ep->queue))
- req = NULL;
- else
- req = list_entry(ep->queue.next, struct nbu2ss_req, queue);
-
- if (!req)
- req = &udc->ep0_req;
-
- req->req.actual += req->div_len;
- req->div_len = 0;
-
- nret = _nbu2ss_ep0_in_transfer(udc, req);
- if (nret == 0) {
- udc->ep0state = EP0_OUT_STATUS_PAHSE;
- EP0_receive_NULL(udc, TRUE);
- }
-
- return 0;
-}
-
-/*-------------------------------------------------------------------------*/
-static inline int _nbu2ss_ep0_out_data_stage(struct nbu2ss_udc *udc)
-{
- int nret;
- struct nbu2ss_req *req;
- struct nbu2ss_ep *ep = &udc->ep[0];
-
- if (list_empty(&ep->queue))
- req = NULL;
- else
- req = list_entry(ep->queue.next, struct nbu2ss_req, queue);
-
- if (!req)
- req = &udc->ep0_req;
-
- nret = _nbu2ss_ep0_out_transfer(udc, req);
- if (nret == 0) {
- udc->ep0state = EP0_IN_STATUS_PHASE;
- EP0_send_NULL(udc, TRUE);
-
- } else if (nret < 0) {
- _nbu2ss_bitset(&udc->p_regs->EP0_CONTROL, EP0_BCLR);
- req->req.status = nret;
- }
-
- return 0;
-}
-
-/*-------------------------------------------------------------------------*/
-static inline int _nbu2ss_ep0_status_stage(struct nbu2ss_udc *udc)
-{
- struct nbu2ss_req *req;
- struct nbu2ss_ep *ep = &udc->ep[0];
-
- if (list_empty(&ep->queue))
- req = NULL;
- else
- req = list_entry(ep->queue.next, struct nbu2ss_req, queue);
-
- if (!req) {
- req = &udc->ep0_req;
- if (req->req.complete)
- req->req.complete(&ep->ep, &req->req);
-
- } else {
- if (req->req.complete)
- _nbu2ss_ep_done(ep, req, 0);
- }
-
- udc->ep0state = EP0_IDLE;
-
- return 0;
-}
-
-/*-------------------------------------------------------------------------*/
-static inline void _nbu2ss_ep0_int(struct nbu2ss_udc *udc)
-{
- int i;
- u32 status;
- u32 intr;
- int nret = -1;
-
- status = _nbu2ss_readl(&udc->p_regs->EP0_STATUS);
- intr = status & EP0_STATUS_RW_BIT;
- _nbu2ss_writel(&udc->p_regs->EP0_STATUS, ~(u32)intr);
-
- status &= (SETUP_INT | EP0_IN_INT | EP0_OUT_INT
- | STG_END_INT | EP0_OUT_NULL_INT);
-
- if (status == 0) {
- dev_info(udc->dev, "%s Not Decode Interrupt\n", __func__);
- dev_info(udc->dev, "EP0_STATUS = 0x%08x\n", intr);
- return;
- }
-
- if (udc->gadget.speed == USB_SPEED_UNKNOWN)
- udc->gadget.speed = _nbu2ss_get_speed(udc);
-
- for (i = 0; i < EP0_END_XFER; i++) {
- switch (udc->ep0state) {
- case EP0_IDLE:
- if (status & SETUP_INT) {
- status = 0;
- nret = _nbu2ss_decode_request(udc);
- }
- break;
-
- case EP0_IN_DATA_PHASE:
- if (status & EP0_IN_INT) {
- status &= ~EP0_IN_INT;
- nret = _nbu2ss_ep0_in_data_stage(udc);
- }
- break;
-
- case EP0_OUT_DATA_PHASE:
- if (status & EP0_OUT_INT) {
- status &= ~EP0_OUT_INT;
- nret = _nbu2ss_ep0_out_data_stage(udc);
- }
- break;
-
- case EP0_IN_STATUS_PHASE:
- if ((status & STG_END_INT) || (status & SETUP_INT)) {
- status &= ~(STG_END_INT | EP0_IN_INT);
- nret = _nbu2ss_ep0_status_stage(udc);
- }
- break;
-
- case EP0_OUT_STATUS_PAHSE:
- if ((status & STG_END_INT)
- || (status & SETUP_INT)
- || (status & EP0_OUT_NULL_INT)) {
- status &= ~(STG_END_INT
- | EP0_OUT_INT
- | EP0_OUT_NULL_INT);
-
- nret = _nbu2ss_ep0_status_stage(udc);
- }
-
- break;
-
- default:
- status = 0;
- break;
- }
-
- if (status == 0)
- break;
- }
-
- if (nret < 0) {
- /* Send Stall */
- _nbu2ss_set_endpoint_stall(udc, 0, TRUE);
- }
-}
-
-/*-------------------------------------------------------------------------*/
-static void _nbu2ss_ep_done(
- struct nbu2ss_ep *ep,
- struct nbu2ss_req *req,
- int status)
-{
- struct nbu2ss_udc *udc = ep->udc;
-
- list_del_init(&req->queue);
-
- if (status == -ECONNRESET)
- _nbu2ss_fifo_flush(udc, ep);
-
- if (likely(req->req.status == -EINPROGRESS))
- req->req.status = status;
-
- if (ep->stalled)
- _nbu2ss_epn_set_stall(udc, ep);
- else {
- if (!list_empty(&ep->queue))
- _nbu2ss_restert_transfer(ep);
- }
-
-#ifdef USE_DMA
- if ((ep->direct == USB_DIR_OUT) && (ep->epnum > 0) &&
- (req->req.dma != 0))
- _nbu2ss_dma_unmap_single(udc, ep, req, USB_DIR_OUT);
-#endif
-
- spin_unlock(&udc->lock);
- req->req.complete(&ep->ep, &req->req);
- spin_lock(&udc->lock);
-}
-
-/*-------------------------------------------------------------------------*/
-static inline void _nbu2ss_epn_in_int(
- struct nbu2ss_udc *udc,
- struct nbu2ss_ep *ep,
- struct nbu2ss_req *req)
-{
- int result = 0;
- u32 status;
-
- struct fc_regs *preg = udc->p_regs;
-
- if (req->dma_flag)
- return; /* DMA is forwarded */
-
- req->req.actual += req->div_len;
- req->div_len = 0;
-
- if (req->req.actual != req->req.length) {
- /*---------------------------------------------------------*/
- /* remainder of data */
- result = _nbu2ss_epn_in_transfer(udc, ep, req);
-
- } else {
- if (req->zero && ((req->req.actual % ep->ep.maxpacket) == 0)) {
-
- status =
- _nbu2ss_readl(&preg->EP_REGS[ep->epnum-1].EP_STATUS);
-
- if ((status & EPn_IN_FULL) == 0) {
- /*-----------------------------------------*/
- /* 0 Length Packet */
- req->zero = false;
- _nbu2ss_zero_len_pkt(udc, ep->epnum);
- }
- return;
- }
- }
-
- if (result <= 0) {
- /*---------------------------------------------------------*/
- /* Complete */
- _nbu2ss_ep_done(ep, req, result);
- }
-}
-
-/*-------------------------------------------------------------------------*/
-static inline void _nbu2ss_epn_out_int(
- struct nbu2ss_udc *udc,
- struct nbu2ss_ep *ep,
- struct nbu2ss_req *req)
-{
- int result;
-
- result = _nbu2ss_epn_out_transfer(udc, ep, req);
- if (result <= 0)
- _nbu2ss_ep_done(ep, req, result);
-}
-
-/*-------------------------------------------------------------------------*/
-static inline void _nbu2ss_epn_in_dma_int(
- struct nbu2ss_udc *udc,
- struct nbu2ss_ep *ep,
- struct nbu2ss_req *req)
-{
- u32 mpkt;
- u32 size;
- struct usb_request *preq;
-
- preq = &req->req;
-
- if (req->dma_flag == FALSE)
- return;
-
- preq->actual += req->div_len;
- req->div_len = 0;
- req->dma_flag = FALSE;
-
-#ifdef USE_DMA
- _nbu2ss_dma_unmap_single(udc, ep, req, USB_DIR_IN);
-#endif
-
- if (preq->actual != preq->length) {
- _nbu2ss_epn_in_transfer(udc, ep, req);
- } else {
- mpkt = ep->ep.maxpacket;
- size = preq->actual % mpkt;
- if (size > 0) {
- if (((preq->actual & 0x03) == 0) && (size < mpkt))
- _nbu2ss_ep_in_end(udc, ep->epnum, 0, 0);
- } else {
- _nbu2ss_epn_in_int(udc, ep, req);
- }
- }
-}
-
-/*-------------------------------------------------------------------------*/
-static inline void _nbu2ss_epn_out_dma_int(
- struct nbu2ss_udc *udc,
- struct nbu2ss_ep *ep,
- struct nbu2ss_req *req)
-{
- int i;
- u32 num;
- u32 dmacnt, ep_dmacnt;
- u32 mpkt;
- struct fc_regs *preg = udc->p_regs;
-
- num = ep->epnum - 1;
-
- if (req->req.actual == req->req.length) {
- if ((req->req.length % ep->ep.maxpacket) && !req->zero) {
- req->div_len = 0;
- req->dma_flag = FALSE;
- _nbu2ss_ep_done(ep, req, 0);
- return;
- }
- }
-
- ep_dmacnt = _nbu2ss_readl(&preg->EP_REGS[num].EP_LEN_DCNT)
- & EPn_DMACNT;
- ep_dmacnt >>= 16;
-
- for (i = 0; i < EPC_PLL_LOCK_COUNT; i++) {
- dmacnt = _nbu2ss_readl(&preg->EP_DCR[num].EP_DCR1)
- & DCR1_EPn_DMACNT;
- dmacnt >>= 16;
- if (ep_dmacnt == dmacnt)
- break;
- }
-
- _nbu2ss_bitclr(&preg->EP_DCR[num].EP_DCR1, DCR1_EPn_REQEN);
-
- if (dmacnt != 0) {
- mpkt = ep->ep.maxpacket;
- if ((req->div_len % mpkt) == 0)
- req->div_len -= mpkt * dmacnt;
- }
-
- if ((req->req.actual % ep->ep.maxpacket) > 0) {
- if (req->req.actual == req->div_len) {
- req->div_len = 0;
- req->dma_flag = FALSE;
- _nbu2ss_ep_done(ep, req, 0);
- return;
- }
- }
-
- req->req.actual += req->div_len;
- req->div_len = 0;
- req->dma_flag = FALSE;
-
- _nbu2ss_epn_out_int(udc, ep, req);
-}
-
-/*-------------------------------------------------------------------------*/
-static inline void _nbu2ss_epn_int(struct nbu2ss_udc *udc, u32 epnum)
-{
- u32 num;
- u32 status;
-
- struct nbu2ss_req *req;
- struct nbu2ss_ep *ep = &udc->ep[epnum];
-
- num = epnum - 1;
-
- /* Interrupt Status */
- status = _nbu2ss_readl(&udc->p_regs->EP_REGS[num].EP_STATUS);
-
- /* Interrupt Clear */
- _nbu2ss_writel(&udc->p_regs->EP_REGS[num].EP_STATUS, ~(u32)status);
-
- if (list_empty(&ep->queue))
- req = NULL;
- else
- req = list_entry(ep->queue.next, struct nbu2ss_req, queue);
-
- if (!req) {
- /* pr_warn("=== %s(%d) req == NULL\n", __func__, epnum); */
- return;
- }
-
- if (status & EPn_OUT_END_INT) {
- status &= ~EPn_OUT_INT;
- _nbu2ss_epn_out_dma_int(udc, ep, req);
- }
-
- if (status & EPn_OUT_INT)
- _nbu2ss_epn_out_int(udc, ep, req);
-
- if (status & EPn_IN_END_INT) {
- status &= ~EPn_IN_INT;
- _nbu2ss_epn_in_dma_int(udc, ep, req);
- }
-
- if (status & EPn_IN_INT)
- _nbu2ss_epn_in_int(udc, ep, req);
-}
-
-/*-------------------------------------------------------------------------*/
-static inline void _nbu2ss_ep_int(struct nbu2ss_udc *udc, u32 epnum)
-{
- if (epnum == 0)
- _nbu2ss_ep0_int(udc);
- else
- _nbu2ss_epn_int(udc, epnum);
-}
-
-/*-------------------------------------------------------------------------*/
-static void _nbu2ss_ep0_enable(struct nbu2ss_udc *udc)
-{
- _nbu2ss_bitset(&udc->p_regs->EP0_CONTROL, (EP0_AUTO | EP0_BCLR));
- _nbu2ss_writel(&udc->p_regs->EP0_INT_ENA, EP0_INT_EN_BIT);
-}
-
-/*-------------------------------------------------------------------------*/
-static int _nbu2ss_nuke(struct nbu2ss_udc *udc,
- struct nbu2ss_ep *ep,
- int status)
-{
- struct nbu2ss_req *req;
-
- /* Endpoint Disable */
- _nbu2ss_epn_exit(udc, ep);
-
- /* DMA Disable */
- _nbu2ss_ep_dma_exit(udc, ep);
-
- if (list_empty(&ep->queue))
- return 0;
-
- /* called with irqs blocked */
- list_for_each_entry(req, &ep->queue, queue) {
- _nbu2ss_ep_done(ep, req, status);
- }
-
- return 0;
-}
-
-/*-------------------------------------------------------------------------*/
-static void _nbu2ss_quiesce(struct nbu2ss_udc *udc)
-{
- struct nbu2ss_ep *ep;
-
- udc->gadget.speed = USB_SPEED_UNKNOWN;
-
- _nbu2ss_nuke(udc, &udc->ep[0], -ESHUTDOWN);
-
- /* Endpoint n */
- list_for_each_entry(ep, &udc->gadget.ep_list, ep.ep_list) {
- _nbu2ss_nuke(udc, ep, -ESHUTDOWN);
- }
-}
-
-/*-------------------------------------------------------------------------*/
-static int _nbu2ss_pullup(struct nbu2ss_udc *udc, int is_on)
-{
- u32 reg_dt;
-
- if (udc->vbus_active == 0)
- return -ESHUTDOWN;
-
- if (is_on) {
- /* D+ Pullup */
- if (udc->driver) {
- reg_dt = (_nbu2ss_readl(&udc->p_regs->USB_CONTROL)
- | PUE2) & ~(u32)CONNECTB;
-
- _nbu2ss_writel(&udc->p_regs->USB_CONTROL, reg_dt);
- }
-
- } else {
- /* D+ Pulldown */
- reg_dt = (_nbu2ss_readl(&udc->p_regs->USB_CONTROL) | CONNECTB)
- & ~(u32)PUE2;
-
- _nbu2ss_writel(&udc->p_regs->USB_CONTROL, reg_dt);
- udc->gadget.speed = USB_SPEED_UNKNOWN;
- }
-
- return 0;
-}
-
-/*-------------------------------------------------------------------------*/
-static void _nbu2ss_fifo_flush(struct nbu2ss_udc *udc, struct nbu2ss_ep *ep)
-{
- struct fc_regs *p = udc->p_regs;
-
- if (udc->vbus_active == 0)
- return;
-
- if (ep->epnum == 0) {
- /* EP0 */
- _nbu2ss_bitset(&p->EP0_CONTROL, EP0_BCLR);
-
- } else {
- /* EPn */
- _nbu2ss_ep_dma_abort(udc, ep);
- _nbu2ss_bitset(&p->EP_REGS[ep->epnum - 1].EP_CONTROL, EPn_BCLR);
- }
-}
-
-/*-------------------------------------------------------------------------*/
-static int _nbu2ss_enable_controller(struct nbu2ss_udc *udc)
-{
- int waitcnt = 0;
-
- if (udc->udc_enabled)
- return 0;
-
- /*
- Reset
- */
- _nbu2ss_bitset(&udc->p_regs->EPCTR, (DIRPD | EPC_RST));
- udelay(EPC_RST_DISABLE_TIME); /* 1us wait */
-
- _nbu2ss_bitclr(&udc->p_regs->EPCTR, DIRPD);
- mdelay(EPC_DIRPD_DISABLE_TIME); /* 1ms wait */
-
- _nbu2ss_bitclr(&udc->p_regs->EPCTR, EPC_RST);
-
- _nbu2ss_writel(&udc->p_regs->AHBSCTR, WAIT_MODE);
-
- _nbu2ss_writel(&udc->p_regs->AHBMCTR,
- HBUSREQ_MODE | HTRANS_MODE | WBURST_TYPE);
-
- while (!(_nbu2ss_readl(&udc->p_regs->EPCTR) & PLL_LOCK)) {
- waitcnt++;
- udelay(1); /* 1us wait */
- if (waitcnt == EPC_PLL_LOCK_COUNT) {
- dev_err(udc->dev, "*** Reset Cancel failed\n");
- return -EINVAL;
- }
- }
-
- _nbu2ss_bitset(&udc->p_regs->UTMI_CHARACTER_1, USB_SQUSET);
-
- _nbu2ss_bitset(&udc->p_regs->USB_CONTROL, (INT_SEL | SOF_RCV));
-
- /* EP0 */
- _nbu2ss_ep0_enable(udc);
-
- /* USB Interrupt Enable */
- _nbu2ss_bitset(&udc->p_regs->USB_INT_ENA, USB_INT_EN_BIT);
-
- udc->udc_enabled = TRUE;
-
- return 0;
-}
-
-/*-------------------------------------------------------------------------*/
-static void _nbu2ss_reset_controller(struct nbu2ss_udc *udc)
-{
- _nbu2ss_bitset(&udc->p_regs->EPCTR, EPC_RST);
- _nbu2ss_bitclr(&udc->p_regs->EPCTR, EPC_RST);
-}
-
-/*-------------------------------------------------------------------------*/
-static void _nbu2ss_disable_controller(struct nbu2ss_udc *udc)
-{
- if (udc->udc_enabled) {
- udc->udc_enabled = FALSE;
- _nbu2ss_reset_controller(udc);
- _nbu2ss_bitset(&udc->p_regs->EPCTR, (DIRPD | EPC_RST));
- }
-}
-
-/*-------------------------------------------------------------------------*/
-static inline void _nbu2ss_check_vbus(struct nbu2ss_udc *udc)
-{
- int nret;
- u32 reg_dt;
-
- /* chattering */
- mdelay(VBUS_CHATTERING_MDELAY); /* wait (ms) */
-
- /* VBUS ON Check*/
- reg_dt = gpio_get_value(VBUS_VALUE);
- if (reg_dt == 0) {
-
- udc->linux_suspended = 0;
-
- _nbu2ss_reset_controller(udc);
- dev_info(udc->dev, " ----- VBUS OFF\n");
-
- if (udc->vbus_active == 1) {
- /* VBUS OFF */
- udc->vbus_active = 0;
- if (udc->usb_suspended) {
- udc->usb_suspended = 0;
- /* _nbu2ss_reset_controller(udc); */
- }
- udc->devstate = USB_STATE_NOTATTACHED;
-
- _nbu2ss_quiesce(udc);
- if (udc->driver) {
- spin_unlock(&udc->lock);
- udc->driver->disconnect(&udc->gadget);
- spin_lock(&udc->lock);
- }
-
- _nbu2ss_disable_controller(udc);
- }
- } else {
- mdelay(5); /* wait (5ms) */
- reg_dt = gpio_get_value(VBUS_VALUE);
- if (reg_dt == 0)
- return;
-
- dev_info(udc->dev, " ----- VBUS ON\n");
-
- if (udc->linux_suspended)
- return;
-
- if (udc->vbus_active == 0) {
- /* VBUS ON */
- udc->vbus_active = 1;
- udc->devstate = USB_STATE_POWERED;
-
- nret = _nbu2ss_enable_controller(udc);
- if (nret < 0) {
- _nbu2ss_disable_controller(udc);
- udc->vbus_active = 0;
- return;
- }
-
- _nbu2ss_pullup(udc, 1);
-
-#ifdef UDC_DEBUG_DUMP
- _nbu2ss_dump_register(udc);
-#endif /* UDC_DEBUG_DUMP */
-
- } else {
- if (udc->devstate == USB_STATE_POWERED)
- _nbu2ss_pullup(udc, 1);
- }
- }
-}
-
-/*-------------------------------------------------------------------------*/
-static inline void _nbu2ss_int_bus_reset(struct nbu2ss_udc *udc)
-{
- udc->devstate = USB_STATE_DEFAULT;
- udc->remote_wakeup = 0;
-
- _nbu2ss_quiesce(udc);
-
- udc->ep0state = EP0_IDLE;
-}
-
-/*-------------------------------------------------------------------------*/
-static inline void _nbu2ss_int_usb_resume(struct nbu2ss_udc *udc)
-{
- if (udc->usb_suspended == 1) {
- udc->usb_suspended = 0;
- if (udc->driver && udc->driver->resume) {
- spin_unlock(&udc->lock);
- udc->driver->resume(&udc->gadget);
- spin_lock(&udc->lock);
- }
- }
-}
-
-/*-------------------------------------------------------------------------*/
-static inline void _nbu2ss_int_usb_suspend(struct nbu2ss_udc *udc)
-{
- u32 reg_dt;
-
- if (udc->usb_suspended == 0) {
- reg_dt = gpio_get_value(VBUS_VALUE);
-
- if (reg_dt == 0)
- return;
-
- udc->usb_suspended = 1;
- if (udc->driver && udc->driver->suspend) {
- spin_unlock(&udc->lock);
- udc->driver->suspend(&udc->gadget);
- spin_lock(&udc->lock);
- }
-
- _nbu2ss_bitset(&udc->p_regs->USB_CONTROL, SUSPEND);
- }
-}
-
-/*-------------------------------------------------------------------------*/
-/* VBUS (GPIO153) Interrupt */
-static irqreturn_t _nbu2ss_vbus_irq(int irq, void *_udc)
-{
- struct nbu2ss_udc *udc = (struct nbu2ss_udc *)_udc;
-
- spin_lock(&udc->lock);
- _nbu2ss_check_vbus(udc);
- spin_unlock(&udc->lock);
-
- return IRQ_HANDLED;
-}
-
-/*-------------------------------------------------------------------------*/
-/* Interrupt (udc) */
-static irqreturn_t _nbu2ss_udc_irq(int irq, void *_udc)
-{
- u8 suspend_flag = 0;
- u32 status;
- u32 epnum, int_bit;
-
- struct nbu2ss_udc *udc = (struct nbu2ss_udc *)_udc;
- struct fc_regs *preg = udc->p_regs;
-
- if (gpio_get_value(VBUS_VALUE) == 0) {
- _nbu2ss_writel(&preg->USB_INT_STA, ~USB_INT_STA_RW);
- _nbu2ss_writel(&preg->USB_INT_ENA, 0);
- return IRQ_HANDLED;
- }
-
- spin_lock(&udc->lock);
-
- for (;;) {
- if (gpio_get_value(VBUS_VALUE) == 0) {
- _nbu2ss_writel(&preg->USB_INT_STA, ~USB_INT_STA_RW);
- _nbu2ss_writel(&preg->USB_INT_ENA, 0);
- status = 0;
- } else
- status = _nbu2ss_readl(&preg->USB_INT_STA);
-
- if (status == 0)
- break;
-
- _nbu2ss_writel(&preg->USB_INT_STA, ~(status & USB_INT_STA_RW));
-
- if (status & USB_RST_INT) {
- /* USB Reset */
- _nbu2ss_int_bus_reset(udc);
- }
-
- if (status & RSUM_INT) {
- /* Resume */
- _nbu2ss_int_usb_resume(udc);
- }
-
- if (status & SPND_INT) {
- /* Suspend */
- suspend_flag = 1;
- }
-
- if (status & EPn_INT) {
- /* EP INT */
- int_bit = status >> 8;
-
- for (epnum = 0; epnum < NUM_ENDPOINTS; epnum++) {
-
- if (0x01 & int_bit)
- _nbu2ss_ep_int(udc, epnum);
-
- int_bit >>= 1;
-
- if (int_bit == 0)
- break;
- }
- }
- }
-
- if (suspend_flag)
- _nbu2ss_int_usb_suspend(udc);
-
- spin_unlock(&udc->lock);
-
- return IRQ_HANDLED;
-}
-
-/*-------------------------------------------------------------------------*/
-/* usb_ep_ops */
-static int nbu2ss_ep_enable(
- struct usb_ep *_ep,
- const struct usb_endpoint_descriptor *desc)
-{
- u8 ep_type;
- unsigned long flags;
-
- struct nbu2ss_ep *ep;
- struct nbu2ss_udc *udc;
-
- if ((!_ep) || (!desc)) {
- pr_err(" *** %s, bad param\n", __func__);
- return -EINVAL;
- }
-
- ep = container_of(_ep, struct nbu2ss_ep, ep);
- if ((!ep) || (!ep->udc)) {
- pr_err(" *** %s, ep == NULL !!\n", __func__);
- return -EINVAL;
- }
-
- ep_type = usb_endpoint_type(desc);
- if ((ep_type == USB_ENDPOINT_XFER_CONTROL)
- || (ep_type == USB_ENDPOINT_XFER_ISOC)) {
-
- pr_err(" *** %s, bat bmAttributes\n", __func__);
- return -EINVAL;
- }
-
- udc = ep->udc;
- if (udc->vbus_active == 0)
- return -ESHUTDOWN;
-
- if ((!udc->driver)
- || (udc->gadget.speed == USB_SPEED_UNKNOWN)) {
-
- dev_err(ep->udc->dev, " *** %s, udc !!\n", __func__);
- return -ESHUTDOWN;
- }
-
- spin_lock_irqsave(&udc->lock, flags);
-
- ep->desc = desc;
- ep->epnum = usb_endpoint_num(desc);
- ep->direct = desc->bEndpointAddress & USB_ENDPOINT_DIR_MASK;
- ep->ep_type = ep_type;
- ep->wedged = 0;
- ep->halted = FALSE;
- ep->stalled = FALSE;
-
- ep->ep.maxpacket = le16_to_cpu(desc->wMaxPacketSize);
-
- /* DMA setting */
- _nbu2ss_ep_dma_init(udc, ep);
-
- /* Endpoint setting */
- _nbu2ss_ep_init(udc, ep);
-
- spin_unlock_irqrestore(&udc->lock, flags);
-
- return 0;
-}
-
-/*-------------------------------------------------------------------------*/
-static int nbu2ss_ep_disable(struct usb_ep *_ep)
-{
- struct nbu2ss_ep *ep;
- struct nbu2ss_udc *udc;
- unsigned long flags;
-
- if (!_ep) {
- pr_err(" *** %s, bad param\n", __func__);
- return -EINVAL;
- }
-
- ep = container_of(_ep, struct nbu2ss_ep, ep);
- if ((!ep) || (!ep->udc)) {
- pr_err("udc: *** %s, ep == NULL !!\n", __func__);
- return -EINVAL;
- }
-
- udc = ep->udc;
- if (udc->vbus_active == 0)
- return -ESHUTDOWN;
-
- spin_lock_irqsave(&udc->lock, flags);
- _nbu2ss_nuke(udc, ep, -EINPROGRESS); /* dequeue request */
- spin_unlock_irqrestore(&udc->lock, flags);
-
- return 0;
-}
-
-/*-------------------------------------------------------------------------*/
-static struct usb_request *nbu2ss_ep_alloc_request(
- struct usb_ep *ep,
- gfp_t gfp_flags)
-{
- struct nbu2ss_req *req;
-
- req = kzalloc(sizeof(*req), gfp_flags);
- if (!req)
- return 0;
-
-#ifdef USE_DMA
- req->req.dma = DMA_ADDR_INVALID;
-#endif
- INIT_LIST_HEAD(&req->queue);
-
- return &req->req;
-}
-
-/*-------------------------------------------------------------------------*/
-static void nbu2ss_ep_free_request(
- struct usb_ep *_ep,
- struct usb_request *_req)
-{
- struct nbu2ss_req *req;
-
- if (_req) {
- req = container_of(_req, struct nbu2ss_req, req);
-
- kfree(req);
- }
-}
-
-/*-------------------------------------------------------------------------*/
-static int nbu2ss_ep_queue(
- struct usb_ep *_ep,
- struct usb_request *_req,
- gfp_t gfp_flags)
-{
- struct nbu2ss_req *req;
- struct nbu2ss_ep *ep;
- struct nbu2ss_udc *udc;
- unsigned long flags;
- bool bflag;
- int result = -EINVAL;
-
- /* catch various bogus parameters */
- if ((!_ep) || (!_req)) {
- if (!_ep)
- pr_err("udc: %s --- _ep == NULL\n", __func__);
-
- if (!_req)
- pr_err("udc: %s --- _req == NULL\n", __func__);
-
- return -EINVAL;
- }
-
- req = container_of(_req, struct nbu2ss_req, req);
- if (unlikely
- (!_req->complete || !_req->buf
- || !list_empty(&req->queue))) {
-
- if (!_req->complete)
- pr_err("udc: %s --- !_req->complete\n", __func__);
-
- if (!_req->buf)
- pr_err("udc:%s --- !_req->buf\n", __func__);
-
- if (!list_empty(&req->queue))
- pr_err("%s --- !list_empty(&req->queue)\n", __func__);
-
- return -EINVAL;
- }
-
- ep = container_of(_ep, struct nbu2ss_ep, ep);
- udc = ep->udc;
-
- if (udc->vbus_active == 0) {
- dev_info(udc->dev, "Can't ep_queue (VBUS OFF)\n");
- return -ESHUTDOWN;
- }
-
- if (unlikely(!udc->driver)) {
- dev_err(udc->dev, "%s, bogus device state %p\n", __func__,
- udc->driver);
- return -ESHUTDOWN;
- }
-
- spin_lock_irqsave(&udc->lock, flags);
-
-#ifdef USE_DMA
- if ((u32)req->req.buf & 0x3)
- req->unaligned = TRUE;
- else
- req->unaligned = FALSE;
-
- if (req->unaligned) {
- if (!ep->virt_buf)
- ep->virt_buf = (u8 *)dma_alloc_coherent(
- NULL, PAGE_SIZE,
- &ep->phys_buf, GFP_ATOMIC | GFP_DMA);
- if (ep->epnum > 0) {
- if (ep->direct == USB_DIR_IN)
- memcpy(ep->virt_buf, req->req.buf,
- req->req.length);
- }
- }
-
- if ((ep->epnum > 0) && (ep->direct == USB_DIR_OUT) &&
- (req->req.dma != 0))
- _nbu2ss_dma_map_single(udc, ep, req, USB_DIR_OUT);
-#endif
-
- _req->status = -EINPROGRESS;
- _req->actual = 0;
-
- bflag = list_empty(&ep->queue);
- list_add_tail(&req->queue, &ep->queue);
-
- if ((bflag != FALSE) && (ep->stalled == FALSE)) {
-
- result = _nbu2ss_start_transfer(udc, ep, req, FALSE);
- if (result < 0) {
- dev_err(udc->dev, " *** %s, result = %d\n", __func__,
- result);
- list_del(&req->queue);
- } else if ((ep->epnum > 0) && (ep->direct == USB_DIR_OUT)) {
-#ifdef USE_DMA
- if (req->req.length < 4 &&
- req->req.length == req->req.actual)
-#else
- if (req->req.length == req->req.actual)
-#endif
- _nbu2ss_ep_done(ep, req, result);
- }
- }
-
- spin_unlock_irqrestore(&udc->lock, flags);
-
- return 0;
-}
-
-/*-------------------------------------------------------------------------*/
-static int nbu2ss_ep_dequeue(
- struct usb_ep *_ep,
- struct usb_request *_req)
-{
- struct nbu2ss_req *req;
- struct nbu2ss_ep *ep;
- struct nbu2ss_udc *udc;
- unsigned long flags;
-
- /* catch various bogus parameters */
- if ((!_ep) || (!_req)) {
- /* pr_err("%s, bad param(1)\n", __func__); */
- return -EINVAL;
- }
-
- ep = container_of(_ep, struct nbu2ss_ep, ep);
- if (!ep) {
- pr_err("%s, ep == NULL !!\n", __func__);
- return -EINVAL;
- }
-
- udc = ep->udc;
- if (!udc)
- return -EINVAL;
-
- spin_lock_irqsave(&udc->lock, flags);
-
- /* make sure it's actually queued on this endpoint */
- list_for_each_entry(req, &ep->queue, queue) {
- if (&req->req == _req)
- break;
- }
- if (&req->req != _req) {
- spin_unlock_irqrestore(&udc->lock, flags);
- pr_debug("%s no queue(EINVAL)\n", __func__);
- return -EINVAL;
- }
-
- _nbu2ss_ep_done(ep, req, -ECONNRESET);
-
- spin_unlock_irqrestore(&udc->lock, flags);
-
- return 0;
-}
-
-/*-------------------------------------------------------------------------*/
-static int nbu2ss_ep_set_halt(struct usb_ep *_ep, int value)
-{
- u8 ep_adrs;
- unsigned long flags;
-
- struct nbu2ss_ep *ep;
- struct nbu2ss_udc *udc;
-
- if (!_ep) {
- pr_err("%s, bad param\n", __func__);
- return -EINVAL;
- }
-
- ep = container_of(_ep, struct nbu2ss_ep, ep);
- if (!ep) {
- pr_err("%s, bad ep\n", __func__);
- return -EINVAL;
- }
-
- udc = ep->udc;
- if (!udc) {
- dev_err(ep->udc->dev, " *** %s, bad udc\n", __func__);
- return -EINVAL;
- }
-
- spin_lock_irqsave(&udc->lock, flags);
-
- ep_adrs = ep->epnum | ep->direct;
- if (value == 0) {
- _nbu2ss_set_endpoint_stall(udc, ep_adrs, value);
- ep->stalled = FALSE;
- } else {
- if (list_empty(&ep->queue))
- _nbu2ss_epn_set_stall(udc, ep);
- else
- ep->stalled = TRUE;
- }
-
- if (value == 0)
- ep->wedged = 0;
-
- spin_unlock_irqrestore(&udc->lock, flags);
-
- return 0;
-}
-
-static int nbu2ss_ep_set_wedge(struct usb_ep *_ep)
-{
- return nbu2ss_ep_set_halt(_ep, 1);
-}
-
-/*-------------------------------------------------------------------------*/
-static int nbu2ss_ep_fifo_status(struct usb_ep *_ep)
-{
- u32 data;
- struct nbu2ss_ep *ep;
- struct nbu2ss_udc *udc;
- unsigned long flags;
- struct fc_regs *preg;
-
- if (!_ep) {
- pr_err("%s, bad param\n", __func__);
- return -EINVAL;
- }
-
- ep = container_of(_ep, struct nbu2ss_ep, ep);
- if (!ep) {
- pr_err("%s, bad ep\n", __func__);
- return -EINVAL;
- }
-
- udc = ep->udc;
- if (!udc) {
- dev_err(ep->udc->dev, "%s, bad udc\n", __func__);
- return -EINVAL;
- }
-
- preg = udc->p_regs;
-
- data = gpio_get_value(VBUS_VALUE);
- if (data == 0)
- return -EINVAL;
-
- spin_lock_irqsave(&udc->lock, flags);
-
- if (ep->epnum == 0) {
- data = _nbu2ss_readl(&preg->EP0_LENGTH) & EP0_LDATA;
-
- } else {
- data = _nbu2ss_readl(&preg->EP_REGS[ep->epnum-1].EP_LEN_DCNT)
- & EPn_LDATA;
- }
-
- spin_unlock_irqrestore(&udc->lock, flags);
-
- return 0;
-}
-
-/*-------------------------------------------------------------------------*/
-static void nbu2ss_ep_fifo_flush(struct usb_ep *_ep)
-{
- u32 data;
- struct nbu2ss_ep *ep;
- struct nbu2ss_udc *udc;
- unsigned long flags;
-
- if (!_ep) {
- pr_err("udc: %s, bad param\n", __func__);
- return;
- }
-
- ep = container_of(_ep, struct nbu2ss_ep, ep);
- if (!ep) {
- pr_err("udc: %s, bad ep\n", __func__);
- return;
- }
-
- udc = ep->udc;
- if (!udc) {
- dev_err(ep->udc->dev, "%s, bad udc\n", __func__);
- return;
- }
-
- data = gpio_get_value(VBUS_VALUE);
- if (data == 0)
- return;
-
- spin_lock_irqsave(&udc->lock, flags);
- _nbu2ss_fifo_flush(udc, ep);
- spin_unlock_irqrestore(&udc->lock, flags);
-}
-
-/*-------------------------------------------------------------------------*/
-static struct usb_ep_ops nbu2ss_ep_ops = {
- .enable = nbu2ss_ep_enable,
- .disable = nbu2ss_ep_disable,
-
- .alloc_request = nbu2ss_ep_alloc_request,
- .free_request = nbu2ss_ep_free_request,
-
- .queue = nbu2ss_ep_queue,
- .dequeue = nbu2ss_ep_dequeue,
-
- .set_halt = nbu2ss_ep_set_halt,
- .set_wedge = nbu2ss_ep_set_wedge,
-
- .fifo_status = nbu2ss_ep_fifo_status,
- .fifo_flush = nbu2ss_ep_fifo_flush,
-};
-
-/*-------------------------------------------------------------------------*/
-/* usb_gadget_ops */
-
-/*-------------------------------------------------------------------------*/
-static int nbu2ss_gad_get_frame(struct usb_gadget *pgadget)
-{
- u32 data;
- struct nbu2ss_udc *udc;
-
- if (!pgadget) {
- pr_err("udc: %s, bad param\n", __func__);
- return -EINVAL;
- }
-
- udc = container_of(pgadget, struct nbu2ss_udc, gadget);
- if (!udc) {
- dev_err(&pgadget->dev, "%s, udc == NULL\n", __func__);
- return -EINVAL;
- }
-
- data = gpio_get_value(VBUS_VALUE);
- if (data == 0)
- return -EINVAL;
-
- data = _nbu2ss_readl(&udc->p_regs->USB_ADDRESS) & FRAME;
-
- return data;
-}
-
-/*-------------------------------------------------------------------------*/
-static int nbu2ss_gad_wakeup(struct usb_gadget *pgadget)
-{
- int i;
- u32 data;
-
- struct nbu2ss_udc *udc;
-
- if (!pgadget) {
- pr_err("%s, bad param\n", __func__);
- return -EINVAL;
- }
-
- udc = container_of(pgadget, struct nbu2ss_udc, gadget);
- if (!udc) {
- dev_err(&pgadget->dev, "%s, udc == NULL\n", __func__);
- return -EINVAL;
- }
-
- data = gpio_get_value(VBUS_VALUE);
- if (data == 0) {
- dev_warn(&pgadget->dev, "VBUS LEVEL = %d\n", data);
- return -EINVAL;
- }
-
- _nbu2ss_bitset(&udc->p_regs->EPCTR, PLL_RESUME);
-
- for (i = 0; i < EPC_PLL_LOCK_COUNT; i++) {
- data = _nbu2ss_readl(&udc->p_regs->EPCTR);
-
- if (data & PLL_LOCK)
- break;
- }
-
- _nbu2ss_bitclr(&udc->p_regs->EPCTR, PLL_RESUME);
-
- return 0;
-}
-
-/*-------------------------------------------------------------------------*/
-static int nbu2ss_gad_set_selfpowered(struct usb_gadget *pgadget,
- int is_selfpowered)
-{
- struct nbu2ss_udc *udc;
- unsigned long flags;
-
- if (!pgadget) {
- pr_err("%s, bad param\n", __func__);
- return -EINVAL;
- }
-
- udc = container_of(pgadget, struct nbu2ss_udc, gadget);
-
- spin_lock_irqsave(&udc->lock, flags);
- pgadget->is_selfpowered = (is_selfpowered != 0);
- spin_unlock_irqrestore(&udc->lock, flags);
-
- return 0;
-}
-
-/*-------------------------------------------------------------------------*/
-static int nbu2ss_gad_vbus_session(struct usb_gadget *pgadget, int is_active)
-{
- return 0;
-}
-
-/*-------------------------------------------------------------------------*/
-static int nbu2ss_gad_vbus_draw(struct usb_gadget *pgadget, unsigned mA)
-{
- struct nbu2ss_udc *udc;
- unsigned long flags;
-
- if (!pgadget) {
- pr_err("%s, bad param\n", __func__);
- return -EINVAL;
- }
-
- udc = container_of(pgadget, struct nbu2ss_udc, gadget);
-
- spin_lock_irqsave(&udc->lock, flags);
- udc->mA = mA;
- spin_unlock_irqrestore(&udc->lock, flags);
-
- return 0;
-}
-
-/*-------------------------------------------------------------------------*/
-static int nbu2ss_gad_pullup(struct usb_gadget *pgadget, int is_on)
-{
- struct nbu2ss_udc *udc;
- unsigned long flags;
-
- if (!pgadget) {
- pr_err("%s, bad param\n", __func__);
- return -EINVAL;
- }
-
- udc = container_of(pgadget, struct nbu2ss_udc, gadget);
-
- if (!udc->driver) {
- pr_warn("%s, Not Regist Driver\n", __func__);
- return -EINVAL;
- }
-
- if (udc->vbus_active == 0)
- return -ESHUTDOWN;
-
- spin_lock_irqsave(&udc->lock, flags);
- _nbu2ss_pullup(udc, is_on);
- spin_unlock_irqrestore(&udc->lock, flags);
-
- return 0;
-}
-
-/*-------------------------------------------------------------------------*/
-static int nbu2ss_gad_ioctl(
- struct usb_gadget *pgadget,
- unsigned code,
- unsigned long param)
-{
- return 0;
-}
-
-static const struct usb_gadget_ops nbu2ss_gadget_ops = {
- .get_frame = nbu2ss_gad_get_frame,
- .wakeup = nbu2ss_gad_wakeup,
- .set_selfpowered = nbu2ss_gad_set_selfpowered,
- .vbus_session = nbu2ss_gad_vbus_session,
- .vbus_draw = nbu2ss_gad_vbus_draw,
- .pullup = nbu2ss_gad_pullup,
- .ioctl = nbu2ss_gad_ioctl,
-};
-
-static const struct {
- const char *name;
- const struct usb_ep_caps caps;
-} ep_info[NUM_ENDPOINTS] = {
-#define EP_INFO(_name, _caps) \
- { \
- .name = _name, \
- .caps = _caps, \
- }
-
- EP_INFO("ep0",
- USB_EP_CAPS(USB_EP_CAPS_TYPE_CONTROL, USB_EP_CAPS_DIR_ALL)),
- EP_INFO("ep1-bulk",
- USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_ALL)),
- EP_INFO("ep2-bulk",
- USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_ALL)),
- EP_INFO("ep3in-int",
- USB_EP_CAPS(USB_EP_CAPS_TYPE_INT, USB_EP_CAPS_DIR_IN)),
- EP_INFO("ep4-iso",
- USB_EP_CAPS(USB_EP_CAPS_TYPE_ISO, USB_EP_CAPS_DIR_ALL)),
- EP_INFO("ep5-iso",
- USB_EP_CAPS(USB_EP_CAPS_TYPE_ISO, USB_EP_CAPS_DIR_ALL)),
- EP_INFO("ep6-bulk",
- USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_ALL)),
- EP_INFO("ep7-bulk",
- USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_ALL)),
- EP_INFO("ep8in-int",
- USB_EP_CAPS(USB_EP_CAPS_TYPE_INT, USB_EP_CAPS_DIR_IN)),
- EP_INFO("ep9-iso",
- USB_EP_CAPS(USB_EP_CAPS_TYPE_ISO, USB_EP_CAPS_DIR_ALL)),
- EP_INFO("epa-iso",
- USB_EP_CAPS(USB_EP_CAPS_TYPE_ISO, USB_EP_CAPS_DIR_ALL)),
- EP_INFO("epb-bulk",
- USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_ALL)),
- EP_INFO("epc-bulk",
- USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_ALL)),
- EP_INFO("epdin-int",
- USB_EP_CAPS(USB_EP_CAPS_TYPE_INT, USB_EP_CAPS_DIR_IN)),
-
-#undef EP_INFO
-};
-
-/*-------------------------------------------------------------------------*/
-static void __init nbu2ss_drv_ep_init(struct nbu2ss_udc *udc)
-{
- int i;
-
- INIT_LIST_HEAD(&udc->gadget.ep_list);
- udc->gadget.ep0 = &udc->ep[0].ep;
-
- for (i = 0; i < NUM_ENDPOINTS; i++) {
- struct nbu2ss_ep *ep = &udc->ep[i];
-
- ep->udc = udc;
- ep->desc = NULL;
-
- ep->ep.driver_data = NULL;
- ep->ep.name = ep_info[i].name;
- ep->ep.caps = ep_info[i].caps;
- ep->ep.ops = &nbu2ss_ep_ops;
-
- usb_ep_set_maxpacket_limit(&ep->ep,
- i == 0 ? EP0_PACKETSIZE : EP_PACKETSIZE);
-
- list_add_tail(&ep->ep.ep_list, &udc->gadget.ep_list);
- INIT_LIST_HEAD(&ep->queue);
- }
-
- list_del_init(&udc->ep[0].ep.ep_list);
-}
-
-/*-------------------------------------------------------------------------*/
-/* platform_driver */
-static int __init nbu2ss_drv_contest_init(
- struct platform_device *pdev,
- struct nbu2ss_udc *udc)
-{
- spin_lock_init(&udc->lock);
- udc->dev = &pdev->dev;
-
- udc->gadget.is_selfpowered = 1;
- udc->devstate = USB_STATE_NOTATTACHED;
- udc->pdev = pdev;
- udc->mA = 0;
-
- udc->pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
-
- /* init Endpoint */
- nbu2ss_drv_ep_init(udc);
-
- /* init Gadget */
- udc->gadget.ops = &nbu2ss_gadget_ops;
- udc->gadget.ep0 = &udc->ep[0].ep;
- udc->gadget.speed = USB_SPEED_UNKNOWN;
- udc->gadget.name = driver_name;
- /* udc->gadget.is_dualspeed = 1; */
-
- device_initialize(&udc->gadget.dev);
-
- dev_set_name(&udc->gadget.dev, "gadget");
- udc->gadget.dev.parent = &pdev->dev;
- udc->gadget.dev.dma_mask = pdev->dev.dma_mask;
-
- return 0;
-}
-
-/*
- * probe - binds to the platform device
- */
-static int nbu2ss_drv_probe(struct platform_device *pdev)
-{
- int status = -ENODEV;
- struct nbu2ss_udc *udc;
- struct resource *r;
- int irq;
- void __iomem *mmio_base;
-
- udc = &udc_controller;
- memset(udc, 0, sizeof(struct nbu2ss_udc));
-
- platform_set_drvdata(pdev, udc);
-
- /* require I/O memory and IRQ to be provided as resources */
- r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- mmio_base = devm_ioremap_resource(&pdev->dev, r);
- if (IS_ERR(mmio_base))
- return PTR_ERR(mmio_base);
-
- irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- dev_err(&pdev->dev, "failed to get IRQ\n");
- return irq;
- }
- status = devm_request_irq(&pdev->dev, irq, _nbu2ss_udc_irq,
- 0, driver_name, udc);
-
- /* IO Memory */
- udc->p_regs = (struct fc_regs *)mmio_base;
-
- /* USB Function Controller Interrupt */
- if (status != 0) {
- dev_err(udc->dev, "request_irq(USB_UDC_IRQ_1) failed\n");
- return status;
- }
-
- /* Driver Initialization */
- status = nbu2ss_drv_contest_init(pdev, udc);
- if (status < 0) {
- /* Error */
- return status;
- }
-
- /* VBUS Interrupt */
- irq_set_irq_type(INT_VBUS, IRQ_TYPE_EDGE_BOTH);
- status = request_irq(INT_VBUS,
- _nbu2ss_vbus_irq,
- IRQF_SHARED,
- driver_name,
- udc);
-
- if (status != 0) {
- dev_err(udc->dev, "request_irq(INT_VBUS) failed\n");
- return status;
- }
-
- return status;
-}
-
-/*-------------------------------------------------------------------------*/
-static void nbu2ss_drv_shutdown(struct platform_device *pdev)
-{
- struct nbu2ss_udc *udc;
-
- udc = platform_get_drvdata(pdev);
- if (!udc)
- return;
-
- _nbu2ss_disable_controller(udc);
-}
-
-/*-------------------------------------------------------------------------*/
-static int __exit nbu2ss_drv_remove(struct platform_device *pdev)
-{
- struct nbu2ss_udc *udc;
- struct nbu2ss_ep *ep;
- int i;
-
- udc = &udc_controller;
-
- for (i = 0; i < NUM_ENDPOINTS; i++) {
- ep = &udc->ep[i];
- if (ep->virt_buf)
- dma_free_coherent(NULL, PAGE_SIZE,
- (void *)ep->virt_buf, ep->phys_buf);
- }
-
- /* Interrupt Handler - Release */
- free_irq(INT_VBUS, udc);
-
- return 0;
-}
-
-/*-------------------------------------------------------------------------*/
-static int nbu2ss_drv_suspend(struct platform_device *pdev, pm_message_t state)
-{
- struct nbu2ss_udc *udc;
-
- udc = platform_get_drvdata(pdev);
- if (!udc)
- return 0;
-
- if (udc->vbus_active) {
- udc->vbus_active = 0;
- udc->devstate = USB_STATE_NOTATTACHED;
- udc->linux_suspended = 1;
-
- if (udc->usb_suspended) {
- udc->usb_suspended = 0;
- _nbu2ss_reset_controller(udc);
- }
-
- _nbu2ss_quiesce(udc);
- }
- _nbu2ss_disable_controller(udc);
-
- return 0;
-}
-
-/*-------------------------------------------------------------------------*/
-static int nbu2ss_drv_resume(struct platform_device *pdev)
-{
- u32 data;
- struct nbu2ss_udc *udc;
-
- udc = platform_get_drvdata(pdev);
- if (!udc)
- return 0;
-
- data = gpio_get_value(VBUS_VALUE);
- if (data) {
- udc->vbus_active = 1;
- udc->devstate = USB_STATE_POWERED;
- _nbu2ss_enable_controller(udc);
- _nbu2ss_pullup(udc, 1);
- }
-
- udc->linux_suspended = 0;
-
- return 0;
-}
-
-static struct platform_driver udc_driver = {
- .probe = nbu2ss_drv_probe,
- .shutdown = nbu2ss_drv_shutdown,
- .remove = __exit_p(nbu2ss_drv_remove),
- .suspend = nbu2ss_drv_suspend,
- .resume = nbu2ss_drv_resume,
- .driver = {
- .name = driver_name,
- },
-};
-
-module_platform_driver(udc_driver);
-
-MODULE_DESCRIPTION(DRIVER_DESC);
-MODULE_AUTHOR("Renesas Electronics Corporation");
-MODULE_LICENSE("GPL");
-
diff --git a/drivers/staging/emxx_udc/emxx_udc.h b/drivers/staging/emxx_udc/emxx_udc.h
deleted file mode 100644
index 4a2cc38de7b3..000000000000
--- a/drivers/staging/emxx_udc/emxx_udc.h
+++ /dev/null
@@ -1,606 +0,0 @@
-/*
- * EMXX FCD (Function Controller Driver) for USB.
- *
- * Copyright (C) 2010 Renesas Electronics Corporation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#ifndef _LINUX_EMXX_H
-#define _LINUX_EMXX_H
-
-/*---------------------------------------------------------------------------*/
-/*----------------- Default undef */
-#if 0
-#define DEBUG
-#define UDC_DEBUG_DUMP
-#endif
-
-/*----------------- Default define */
-#define USE_DMA 1
-#define USE_SUSPEND_WAIT 1
-
-#ifndef TRUE
-#define TRUE 1
-#define FALSE 0
-#endif
-
-/*------------ Board dependence(Resource) */
-#define VBUS_VALUE GPIO_VBUS
-
-/* below hacked up for staging integration */
-#define GPIO_VBUS 0 /* GPIO_P153 on KZM9D */
-#define INT_VBUS 0 /* IRQ for GPIO_P153 */
-
-/*------------ Board dependence(Wait) */
-
-/* CHATTERING wait time ms */
-#define VBUS_CHATTERING_MDELAY 1
-/* DMA Abort wait time ms */
-#define DMA_DISABLE_TIME 10
-
-/*------------ Controller dependence */
-#define NUM_ENDPOINTS 14 /* Endpoint */
-#define REG_EP_NUM 15 /* Endpoint Register */
-#define DMA_MAX_COUNT 256 /* DMA Block */
-
-#define EPC_RST_DISABLE_TIME 1 /* 1 usec */
-#define EPC_DIRPD_DISABLE_TIME 1 /* 1 msec */
-#define EPC_PLL_LOCK_COUNT 1000 /* 1000 */
-#define IN_DATA_EMPTY_COUNT 1000 /* 1000 */
-
-#define CHATGER_TIME 700 /* 700msec */
-#define USB_SUSPEND_TIME 2000 /* 2 sec */
-
-/* U2F FLAG */
-#define U2F_ENABLE 1
-#define U2F_DISABLE 0
-
-/*------- BIT */
-#define BIT00 0x00000001
-#define BIT01 0x00000002
-#define BIT02 0x00000004
-#define BIT03 0x00000008
-#define BIT04 0x00000010
-#define BIT05 0x00000020
-#define BIT06 0x00000040
-#define BIT07 0x00000080
-#define BIT08 0x00000100
-#define BIT09 0x00000200
-#define BIT10 0x00000400
-#define BIT11 0x00000800
-#define BIT12 0x00001000
-#define BIT13 0x00002000
-#define BIT14 0x00004000
-#define BIT15 0x00008000
-#define BIT16 0x00010000
-#define BIT17 0x00020000
-#define BIT18 0x00040000
-#define BIT19 0x00080000
-#define BIT20 0x00100000
-#define BIT21 0x00200000
-#define BIT22 0x00400000
-#define BIT23 0x00800000
-#define BIT24 0x01000000
-#define BIT25 0x02000000
-#define BIT26 0x04000000
-#define BIT27 0x08000000
-#define BIT28 0x10000000
-#define BIT29 0x20000000
-#define BIT30 0x40000000
-#define BIT31 0x80000000
-
-#define TEST_FORCE_ENABLE (BIT18+BIT16)
-
-#define INT_SEL BIT10
-#define CONSTFS BIT09
-#define SOF_RCV BIT08
-#define RSUM_IN BIT07
-#define SUSPEND BIT06
-#define CONF BIT05
-#define DEFAULT BIT04
-#define CONNECTB BIT03
-#define PUE2 BIT02
-
-#define MAX_TEST_MODE_NUM 0x05
-#define TEST_MODE_SHIFT 16
-
-/*------- (0x0004) USB Status Register */
-#define SPEED_MODE BIT06
-#define HIGH_SPEED BIT06
-
-#define CONF BIT05
-#define DEFAULT BIT04
-#define USB_RST BIT03
-#define SPND_OUT BIT02
-#define RSUM_OUT BIT01
-
-/*------- (0x0008) USB Address Register */
-#define USB_ADDR 0x007F0000
-#define SOF_STATUS BIT15
-#define UFRAME (BIT14+BIT13+BIT12)
-#define FRAME 0x000007FF
-
-#define USB_ADRS_SHIFT 16
-
-/*------- (0x000C) UTMI Characteristic 1 Register */
-#define SQUSET (BIT07+BIT06+BIT05+BIT04)
-
-#define USB_SQUSET (BIT06+BIT05+BIT04)
-
-/*------- (0x0010) TEST Control Register */
-#define FORCEHS BIT02
-#define CS_TESTMODEEN BIT01
-#define LOOPBACK BIT00
-
-/*------- (0x0018) Setup Data 0 Register */
-/*------- (0x001C) Setup Data 1 Register */
-
-/*------- (0x0020) USB Interrupt Status Register */
-#define EPn_INT 0x00FFFF00
-#define EP15_INT BIT23
-#define EP14_INT BIT22
-#define EP13_INT BIT21
-#define EP12_INT BIT20
-#define EP11_INT BIT19
-#define EP10_INT BIT18
-#define EP9_INT BIT17
-#define EP8_INT BIT16
-#define EP7_INT BIT15
-#define EP6_INT BIT14
-#define EP5_INT BIT13
-#define EP4_INT BIT12
-#define EP3_INT BIT11
-#define EP2_INT BIT10
-#define EP1_INT BIT09
-#define EP0_INT BIT08
-#define SPEED_MODE_INT BIT06
-#define SOF_ERROR_INT BIT05
-#define SOF_INT BIT04
-#define USB_RST_INT BIT03
-#define SPND_INT BIT02
-#define RSUM_INT BIT01
-
-#define USB_INT_STA_RW 0x7E
-
-/*------- (0x0024) USB Interrupt Enable Register */
-#define EP15_0_EN 0x00FFFF00
-#define EP15_EN BIT23
-#define EP14_EN BIT22
-#define EP13_EN BIT21
-#define EP12_EN BIT20
-#define EP11_EN BIT19
-#define EP10_EN BIT18
-#define EP9_EN BIT17
-#define EP8_EN BIT16
-#define EP7_EN BIT15
-#define EP6_EN BIT14
-#define EP5_EN BIT13
-#define EP4_EN BIT12
-#define EP3_EN BIT11
-#define EP2_EN BIT10
-#define EP1_EN BIT09
-#define EP0_EN BIT08
-#define SPEED_MODE_EN BIT06
-#define SOF_ERROR_EN BIT05
-#define SOF_EN BIT04
-#define USB_RST_EN BIT03
-#define SPND_EN BIT02
-#define RSUM_EN BIT01
-
-#define USB_INT_EN_BIT \
- (EP0_EN|SPEED_MODE_EN|USB_RST_EN|SPND_EN|RSUM_EN)
-
-/*------- (0x0028) EP0 Control Register */
-#define EP0_STGSEL BIT18
-#define EP0_OVERSEL BIT17
-#define EP0_AUTO BIT16
-#define EP0_PIDCLR BIT09
-#define EP0_BCLR BIT08
-#define EP0_DEND BIT07
-#define EP0_DW (BIT06+BIT05)
-#define EP0_DW4 0
-#define EP0_DW3 (BIT06+BIT05)
-#define EP0_DW2 BIT06
-#define EP0_DW1 BIT05
-
-#define EP0_INAK_EN BIT04
-#define EP0_PERR_NAK_CLR BIT03
-#define EP0_STL BIT02
-#define EP0_INAK BIT01
-#define EP0_ONAK BIT00
-
-/*------- (0x002C) EP0 Status Register */
-#define EP0_PID BIT18
-#define EP0_PERR_NAK BIT17
-#define EP0_PERR_NAK_INT BIT16
-#define EP0_OUT_NAK_INT BIT15
-#define EP0_OUT_NULL BIT14
-#define EP0_OUT_FULL BIT13
-#define EP0_OUT_EMPTY BIT12
-#define EP0_IN_NAK_INT BIT11
-#define EP0_IN_DATA BIT10
-#define EP0_IN_FULL BIT09
-#define EP0_IN_EMPTY BIT08
-#define EP0_OUT_NULL_INT BIT07
-#define EP0_OUT_OR_INT BIT06
-#define EP0_OUT_INT BIT05
-#define EP0_IN_INT BIT04
-#define EP0_STALL_INT BIT03
-#define STG_END_INT BIT02
-#define STG_START_INT BIT01
-#define SETUP_INT BIT00
-
-#define EP0_STATUS_RW_BIT (BIT16|BIT15|BIT11|0xFF)
-
-/*------- (0x0030) EP0 Interrupt Enable Register */
-#define EP0_PERR_NAK_EN BIT16
-#define EP0_OUT_NAK_EN BIT15
-
-#define EP0_IN_NAK_EN BIT11
-
-#define EP0_OUT_NULL_EN BIT07
-#define EP0_OUT_OR_EN BIT06
-#define EP0_OUT_EN BIT05
-#define EP0_IN_EN BIT04
-#define EP0_STALL_EN BIT03
-#define STG_END_EN BIT02
-#define STG_START_EN BIT01
-#define SETUP_EN BIT00
-
-#define EP0_INT_EN_BIT \
- (EP0_OUT_OR_EN|EP0_OUT_EN|EP0_IN_EN|STG_END_EN|SETUP_EN)
-
-/*------- (0x0034) EP0 Length Register */
-#define EP0_LDATA 0x0000007F
-
-/*------- (0x0038) EP0 Read Register */
-/*------- (0x003C) EP0 Write Register */
-
-/*------- (0x0040:) EPn Control Register */
-#define EPn_EN BIT31
-#define EPn_BUF_TYPE BIT30
-#define EPn_BUF_SINGLE BIT30
-
-#define EPn_DIR0 BIT26
-#define EPn_MODE (BIT25+BIT24)
-#define EPn_BULK 0
-#define EPn_INTERRUPT BIT24
-#define EPn_ISO BIT25
-
-#define EPn_OVERSEL BIT17
-#define EPn_AUTO BIT16
-
-#define EPn_IPIDCLR BIT11
-#define EPn_OPIDCLR BIT10
-#define EPn_BCLR BIT09
-#define EPn_CBCLR BIT08
-#define EPn_DEND BIT07
-#define EPn_DW (BIT06+BIT05)
-#define EPn_DW4 0
-#define EPn_DW3 (BIT06+BIT05)
-#define EPn_DW2 BIT06
-#define EPn_DW1 BIT05
-
-#define EPn_OSTL_EN BIT04
-#define EPn_ISTL BIT03
-#define EPn_OSTL BIT02
-
-#define EPn_ONAK BIT00
-
-/*------- (0x0044:) EPn Status Register */
-#define EPn_ISO_PIDERR BIT29 /* R */
-#define EPn_OPID BIT28 /* R */
-#define EPn_OUT_NOTKN BIT27 /* R */
-#define EPn_ISO_OR BIT26 /* R */
-
-#define EPn_ISO_CRC BIT24 /* R */
-#define EPn_OUT_END_INT BIT23 /* RW */
-#define EPn_OUT_OR_INT BIT22 /* RW */
-#define EPn_OUT_NAK_ERR_INT BIT21 /* RW */
-#define EPn_OUT_STALL_INT BIT20 /* RW */
-#define EPn_OUT_INT BIT19 /* RW */
-#define EPn_OUT_NULL_INT BIT18 /* RW */
-#define EPn_OUT_FULL BIT17 /* R */
-#define EPn_OUT_EMPTY BIT16 /* R */
-
-#define EPn_IPID BIT10 /* R */
-#define EPn_IN_NOTKN BIT09 /* R */
-#define EPn_ISO_UR BIT08 /* R */
-#define EPn_IN_END_INT BIT07 /* RW */
-
-#define EPn_IN_NAK_ERR_INT BIT05 /* RW */
-#define EPn_IN_STALL_INT BIT04 /* RW */
-#define EPn_IN_INT BIT03 /* RW */
-#define EPn_IN_DATA BIT02 /* R */
-#define EPn_IN_FULL BIT01 /* R */
-#define EPn_IN_EMPTY BIT00 /* R */
-
-#define EPn_INT_EN \
- (EPn_OUT_END_INT|EPn_OUT_INT|EPn_IN_END_INT|EPn_IN_INT)
-
-/*------- (0x0048:) EPn Interrupt Enable Register */
-#define EPn_OUT_END_EN BIT23 /* RW */
-#define EPn_OUT_OR_EN BIT22 /* RW */
-#define EPn_OUT_NAK_ERR_EN BIT21 /* RW */
-#define EPn_OUT_STALL_EN BIT20 /* RW */
-#define EPn_OUT_EN BIT19 /* RW */
-#define EPn_OUT_NULL_EN BIT18 /* RW */
-
-#define EPn_IN_END_EN BIT07 /* RW */
-
-#define EPn_IN_NAK_ERR_EN BIT05 /* RW */
-#define EPn_IN_STALL_EN BIT04 /* RW */
-#define EPn_IN_EN BIT03 /* RW */
-
-/*------- (0x004C:) EPn Interrupt Enable Register */
-#define EPn_STOP_MODE BIT11
-#define EPn_DEND_SET BIT10
-#define EPn_BURST_SET BIT09
-#define EPn_STOP_SET BIT08
-
-#define EPn_DMA_EN BIT04
-
-#define EPn_DMAMODE0 BIT00
-
-/*------- (0x0050:) EPn MaxPacket & BaseAddress Register */
-#define EPn_BASEAD 0x1FFF0000
-#define EPn_MPKT 0x000007FF
-
-/*------- (0x0054:) EPn Length & DMA Count Register */
-#define EPn_DMACNT 0x01FF0000
-#define EPn_LDATA 0x000007FF
-
-/*------- (0x0058:) EPn Read Register */
-/*------- (0x005C:) EPn Write Register */
-
-/*------- (0x1000) AHBSCTR Register */
-#define WAIT_MODE BIT00
-
-/*------- (0x1004) AHBMCTR Register */
-#define ARBITER_CTR BIT31 /* RW */
-#define MCYCLE_RST BIT12 /* RW */
-
-#define ENDIAN_CTR (BIT09+BIT08) /* RW */
-#define ENDIAN_BYTE_SWAP BIT09
-#define ENDIAN_HALF_WORD_SWAP ENDIAN_CTR
-
-#define HBUSREQ_MODE BIT05 /* RW */
-#define HTRANS_MODE BIT04 /* RW */
-
-#define WBURST_TYPE BIT02 /* RW */
-#define BURST_TYPE (BIT01+BIT00) /* RW */
-#define BURST_MAX_16 0
-#define BURST_MAX_8 BIT00
-#define BURST_MAX_4 BIT01
-#define BURST_SINGLE BURST_TYPE
-
-/*------- (0x1008) AHBBINT Register */
-#define DMA_ENDINT 0xFFFE0000 /* RW */
-
-#define AHB_VBUS_INT BIT13 /* RW */
-
-#define MBUS_ERRINT BIT06 /* RW */
-
-#define SBUS_ERRINT0 BIT04 /* RW */
-#define ERR_MASTER 0x0000000F /* R */
-
-/*------- (0x100C) AHBBINTEN Register */
-#define DMA_ENDINTEN 0xFFFE0000 /* RW */
-
-#define VBUS_INTEN BIT13 /* RW */
-
-#define MBUS_ERRINTEN BIT06 /* RW */
-
-#define SBUS_ERRINT0EN BIT04 /* RW */
-
-/*------- (0x1010) EPCTR Register */
-#define DIRPD BIT12 /* RW */
-
-#define VBUS_LEVEL BIT08 /* R */
-
-#define PLL_RESUME BIT05 /* RW */
-#define PLL_LOCK BIT04 /* R */
-
-#define EPC_RST BIT00 /* RW */
-
-/*------- (0x1014) USBF_EPTEST Register */
-#define LINESTATE (BIT09+BIT08) /* R */
-#define DM_LEVEL BIT09 /* R */
-#define DP_LEVEL BIT08 /* R */
-
-#define PHY_TST BIT01 /* RW */
-#define PHY_TSTCLK BIT00 /* RW */
-
-/*------- (0x1020) USBSSVER Register */
-#define AHBB_VER 0x00FF0000 /* R */
-#define EPC_VER 0x0000FF00 /* R */
-#define SS_VER 0x000000FF /* R */
-
-/*------- (0x1024) USBSSCONF Register */
-#define EP_AVAILABLE 0xFFFF0000 /* R */
-#define DMA_AVAILABLE 0x0000FFFF /* R */
-
-/*------- (0x1110:) EPnDCR1 Register */
-#define DCR1_EPn_DMACNT 0x00FF0000 /* RW */
-
-#define DCR1_EPn_DIR0 BIT01 /* RW */
-#define DCR1_EPn_REQEN BIT00 /* RW */
-
-/*------- (0x1114:) EPnDCR2 Register */
-#define DCR2_EPn_LMPKT 0x07FF0000 /* RW */
-
-#define DCR2_EPn_MPKT 0x000007FF /* RW */
-
-/*------- (0x1118:) EPnTADR Register */
-#define EPn_TADR 0xFFFFFFFF /* RW */
-
-/*===========================================================================*/
-/* Struct */
-/*------- ep_regs */
-struct ep_regs {
- u32 EP_CONTROL; /* EP Control */
- u32 EP_STATUS; /* EP Status */
- u32 EP_INT_ENA; /* EP Interrupt Enable */
- u32 EP_DMA_CTRL; /* EP DMA Control */
- u32 EP_PCKT_ADRS; /* EP Maxpacket & BaseAddress */
- u32 EP_LEN_DCNT; /* EP Length & DMA count */
- u32 EP_READ; /* EP Read */
- u32 EP_WRITE; /* EP Write */
-};
-
-/*------- ep_dcr */
-struct ep_dcr {
- u32 EP_DCR1; /* EP_DCR1 */
- u32 EP_DCR2; /* EP_DCR2 */
- u32 EP_TADR; /* EP_TADR */
- u32 Reserved; /* Reserved */
-};
-
-/*------- Function Registers */
-struct fc_regs {
- u32 USB_CONTROL; /* (0x0000) USB Control */
- u32 USB_STATUS; /* (0x0004) USB Status */
- u32 USB_ADDRESS; /* (0x0008) USB Address */
- u32 UTMI_CHARACTER_1; /* (0x000C) UTMI Setting */
- u32 TEST_CONTROL; /* (0x0010) TEST Control */
- u32 Reserved_14; /* (0x0014) Reserved */
- u32 SETUP_DATA0; /* (0x0018) Setup Data0 */
- u32 SETUP_DATA1; /* (0x001C) Setup Data1 */
- u32 USB_INT_STA; /* (0x0020) USB Interrupt Status */
- u32 USB_INT_ENA; /* (0x0024) USB Interrupt Enable */
- u32 EP0_CONTROL; /* (0x0028) EP0 Control */
- u32 EP0_STATUS; /* (0x002C) EP0 Status */
- u32 EP0_INT_ENA; /* (0x0030) EP0 Interrupt Enable */
- u32 EP0_LENGTH; /* (0x0034) EP0 Length */
- u32 EP0_READ; /* (0x0038) EP0 Read */
- u32 EP0_WRITE; /* (0x003C) EP0 Write */
-
- struct ep_regs EP_REGS[REG_EP_NUM]; /* Endpoint Register */
-
- u8 Reserved220[0x1000-0x220]; /* (0x0220:0x0FFF) Reserved */
-
- u32 AHBSCTR; /* (0x1000) AHBSCTR */
- u32 AHBMCTR; /* (0x1004) AHBMCTR */
- u32 AHBBINT; /* (0x1008) AHBBINT */
- u32 AHBBINTEN; /* (0x100C) AHBBINTEN */
- u32 EPCTR; /* (0x1010) EPCTR */
- u32 USBF_EPTEST; /* (0x1014) USBF_EPTEST */
-
- u8 Reserved1018[0x20-0x18]; /* (0x1018:0x101F) Reserved */
-
- u32 USBSSVER; /* (0x1020) USBSSVER */
- u32 USBSSCONF; /* (0x1024) USBSSCONF */
-
- u8 Reserved1028[0x110-0x28]; /* (0x1028:0x110F) Reserved */
-
- struct ep_dcr EP_DCR[REG_EP_NUM]; /* */
-
- u8 Reserved1200[0x1000-0x200]; /* Reserved */
-} __aligned(32);
-
-#define EP0_PACKETSIZE 64
-#define EP_PACKETSIZE 1024
-
-/* EPn RAM SIZE */
-#define D_RAM_SIZE_CTRL 64
-
-/* EPn Bulk Endpoint Max Packet Size */
-#define D_FS_RAM_SIZE_BULK 64
-#define D_HS_RAM_SIZE_BULK 512
-
-struct nbu2ss_udc;
-
-enum ep0_state {
- EP0_IDLE,
- EP0_IN_DATA_PHASE,
- EP0_OUT_DATA_PHASE,
- EP0_IN_STATUS_PHASE,
- EP0_OUT_STATUS_PAHSE,
- EP0_END_XFER,
- EP0_SUSPEND,
- EP0_STALL,
-};
-
-struct nbu2ss_req {
- struct usb_request req;
- struct list_head queue;
-
- u32 div_len;
- bool dma_flag;
- bool zero;
-
- bool unaligned;
-
- unsigned mapped:1;
-};
-
-struct nbu2ss_ep {
- struct usb_ep ep;
- struct list_head queue;
-
- struct nbu2ss_udc *udc;
-
- const struct usb_endpoint_descriptor *desc;
-
- u8 epnum;
- u8 direct;
- u8 ep_type;
-
- unsigned wedged:1;
- unsigned halted:1;
- unsigned stalled:1;
-
- u8 *virt_buf;
- dma_addr_t phys_buf;
-};
-
-struct nbu2ss_udc {
- struct usb_gadget gadget;
- struct usb_gadget_driver *driver;
- struct platform_device *pdev;
- struct device *dev;
- spinlock_t lock;
- struct completion *pdone;
-
- enum ep0_state ep0state;
- enum usb_device_state devstate;
- struct usb_ctrlrequest ctrl;
- struct nbu2ss_req ep0_req;
- u8 ep0_buf[EP0_PACKETSIZE];
-
- struct nbu2ss_ep ep[NUM_ENDPOINTS];
-
- unsigned softconnect:1;
- unsigned vbus_active:1;
- unsigned linux_suspended:1;
- unsigned linux_resume:1;
- unsigned usb_suspended:1;
- unsigned remote_wakeup:1;
- unsigned udc_enabled:1;
-
- unsigned mA;
-
- u32 curr_config; /* Current Configuration Number */
-
- struct fc_regs *p_regs;
-};
-
-/* USB register access structure */
-union usb_reg_access {
- struct {
- unsigned char DATA[4];
- } byte;
- unsigned int dw;
-};
-
-/*-------------------------------------------------------------------------*/
-
-#endif /* _LINUX_EMXX_H */
diff --git a/drivers/staging/fbtft/Kconfig b/drivers/staging/fbtft/Kconfig
index d473010fa474..c2655768209a 100644
--- a/drivers/staging/fbtft/Kconfig
+++ b/drivers/staging/fbtft/Kconfig
@@ -1,188 +1,173 @@
+# SPDX-License-Identifier: GPL-2.0
menuconfig FB_TFT
tristate "Support for small TFT LCD display modules"
depends on FB && SPI
+ depends on FB_DEVICE
+ depends on BACKLIGHT_CLASS_DEVICE
depends on GPIOLIB || COMPILE_TEST
- select FB_SYS_FILLRECT
- select FB_SYS_COPYAREA
- select FB_SYS_IMAGEBLIT
- select FB_SYS_FOPS
- select FB_DEFERRED_IO
select FB_BACKLIGHT
+ select FB_SYSMEM_HELPERS_DEFERRED
+
+if FB_TFT
config FB_TFT_AGM1264K_FL
tristate "FB driver for the AGM1264K-FL LCD display"
- depends on FB_TFT
help
Framebuffer support for the AGM1264K-FL LCD display (two Samsung KS0108 compatible chips)
config FB_TFT_BD663474
tristate "FB driver for the BD663474 LCD Controller"
- depends on FB_TFT
help
Generic Framebuffer support for BD663474
config FB_TFT_HX8340BN
tristate "FB driver for the HX8340BN LCD Controller"
- depends on FB_TFT
help
Generic Framebuffer support for HX8340BN
config FB_TFT_HX8347D
tristate "FB driver for the HX8347D LCD Controller"
- depends on FB_TFT
help
Generic Framebuffer support for HX8347D
config FB_TFT_HX8353D
tristate "FB driver for the HX8353D LCD Controller"
- depends on FB_TFT
help
Generic Framebuffer support for HX8353D
config FB_TFT_HX8357D
tristate "FB driver for the HX8357D LCD Controller"
- depends on FB_TFT
help
Generic Framebuffer support for HX8357D
config FB_TFT_ILI9163
tristate "FB driver for the ILI9163 LCD Controller"
- depends on FB_TFT
help
Generic Framebuffer support for ILI9163
config FB_TFT_ILI9320
tristate "FB driver for the ILI9320 LCD Controller"
- depends on FB_TFT
help
Generic Framebuffer support for ILI9320
config FB_TFT_ILI9325
tristate "FB driver for the ILI9325 LCD Controller"
- depends on FB_TFT
help
Generic Framebuffer support for ILI9325
config FB_TFT_ILI9340
tristate "FB driver for the ILI9340 LCD Controller"
- depends on FB_TFT
help
Generic Framebuffer support for ILI9340
config FB_TFT_ILI9341
tristate "FB driver for the ILI9341 LCD Controller"
- depends on FB_TFT
help
Generic Framebuffer support for ILI9341
config FB_TFT_ILI9481
tristate "FB driver for the ILI9481 LCD Controller"
- depends on FB_TFT
help
Generic Framebuffer support for ILI9481
config FB_TFT_ILI9486
tristate "FB driver for the ILI9486 LCD Controller"
- depends on FB_TFT
help
Generic Framebuffer support for ILI9486
config FB_TFT_PCD8544
tristate "FB driver for the PCD8544 LCD Controller"
- depends on FB_TFT
help
Generic Framebuffer support for PCD8544
config FB_TFT_RA8875
- tristate "FB driver for the RA8875 LCD Controller"
- depends on FB_TFT
+ tristate "FB driver for the RA8875 LCD Controller"
help
Generic Framebuffer support for RA8875
config FB_TFT_S6D02A1
tristate "FB driver for the S6D02A1 LCD Controller"
- depends on FB_TFT
help
Generic Framebuffer support for S6D02A1
config FB_TFT_S6D1121
tristate "FB driver for the S6D1211 LCD Controller"
- depends on FB_TFT
help
Generic Framebuffer support for S6D1121
+config FB_TFT_SEPS525
+ tristate "FB driver for the SEPS525 LCD Controller"
+ help
+ Generic Framebuffer support for SEPS525
+ Say Y if you have such a display that utilizes this controller.
+
+config FB_TFT_SH1106
+ tristate "FB driver for the SH1106 OLED Controller"
+ help
+ Framebuffer support for SH1106
+
config FB_TFT_SSD1289
tristate "FB driver for the SSD1289 LCD Controller"
- depends on FB_TFT
help
Framebuffer support for SSD1289
+config FB_TFT_SSD1305
+ tristate "FB driver for the SSD1305 OLED Controller"
+ help
+ Framebuffer support for SSD1305
+
config FB_TFT_SSD1306
tristate "FB driver for the SSD1306 OLED Controller"
- depends on FB_TFT
help
Framebuffer support for SSD1306
config FB_TFT_SSD1331
tristate "FB driver for the SSD1331 LCD Controller"
- depends on FB_TFT
help
Framebuffer support for SSD1331
config FB_TFT_SSD1351
tristate "FB driver for the SSD1351 LCD Controller"
- depends on FB_TFT
help
Framebuffer support for SSD1351
config FB_TFT_ST7735R
tristate "FB driver for the ST7735R LCD Controller"
- depends on FB_TFT
help
Generic Framebuffer support for ST7735R
+config FB_TFT_ST7789V
+ tristate "FB driver for the ST7789V LCD Controller"
+ help
+ This enables generic framebuffer support for the Sitronix ST7789V
+ display controller. The controller is intended for small color
+ displays with a resolution of up to 320x240 pixels.
+
+ Say Y if you have such a display that utilizes this controller.
+
config FB_TFT_TINYLCD
tristate "FB driver for tinylcd.com display"
- depends on FB_TFT
help
Custom Framebuffer support for tinylcd.com display
config FB_TFT_TLS8204
tristate "FB driver for the TLS8204 LCD Controller"
- depends on FB_TFT
help
Generic Framebuffer support for TLS8204
config FB_TFT_UC1611
tristate "FB driver for the UC1611 LCD controller"
- depends on FB_TFT
help
Generic Framebuffer support for UC1611
config FB_TFT_UC1701
tristate "FB driver for the UC1701 LCD Controller"
- depends on FB_TFT
help
Generic Framebuffer support for UC1701
config FB_TFT_UPD161704
tristate "FB driver for the uPD161704 LCD Controller"
- depends on FB_TFT
help
Generic Framebuffer support for uPD161704
-config FB_TFT_WATTEROTT
- tristate "FB driver for the WATTEROTT LCD Controller"
- depends on FB_TFT
- help
- Generic Framebuffer support for WATTEROTT
-
-config FB_FLEX
- tristate "Generic FB driver for TFT LCD displays"
- depends on FB_TFT
- help
- Generic Framebuffer support for TFT LCD displays.
-
-config FB_TFT_FBTFT_DEVICE
- tristate "Module to for adding FBTFT devices"
- depends on FB_TFT
+endif
diff --git a/drivers/staging/fbtft/Makefile b/drivers/staging/fbtft/Makefile
index b26efdc87775..e9cdf0f0a7da 100644
--- a/drivers/staging/fbtft/Makefile
+++ b/drivers/staging/fbtft/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
# Core module
obj-$(CONFIG_FB_TFT) += fbtft.o
fbtft-y += fbtft-core.o fbtft-sysfs.o fbtft-bus.o fbtft-io.o
@@ -20,18 +21,18 @@ obj-$(CONFIG_FB_TFT_PCD8544) += fb_pcd8544.o
obj-$(CONFIG_FB_TFT_RA8875) += fb_ra8875.o
obj-$(CONFIG_FB_TFT_S6D02A1) += fb_s6d02a1.o
obj-$(CONFIG_FB_TFT_S6D1121) += fb_s6d1121.o
+obj-$(CONFIG_FB_TFT_SEPS525) += fb_seps525.o
+obj-$(CONFIG_FB_TFT_SH1106) += fb_sh1106.o
obj-$(CONFIG_FB_TFT_SSD1289) += fb_ssd1289.o
+obj-$(CONFIG_FB_TFT_SSD1305) += fb_ssd1305.o
obj-$(CONFIG_FB_TFT_SSD1306) += fb_ssd1306.o
+obj-$(CONFIG_FB_TFT_SSD1305) += fb_ssd1325.o
obj-$(CONFIG_FB_TFT_SSD1331) += fb_ssd1331.o
obj-$(CONFIG_FB_TFT_SSD1351) += fb_ssd1351.o
obj-$(CONFIG_FB_TFT_ST7735R) += fb_st7735r.o
+obj-$(CONFIG_FB_TFT_ST7789V) += fb_st7789v.o
obj-$(CONFIG_FB_TFT_TINYLCD) += fb_tinylcd.o
obj-$(CONFIG_FB_TFT_TLS8204) += fb_tls8204.o
obj-$(CONFIG_FB_TFT_UC1611) += fb_uc1611.o
obj-$(CONFIG_FB_TFT_UC1701) += fb_uc1701.o
obj-$(CONFIG_FB_TFT_UPD161704) += fb_upd161704.o
-obj-$(CONFIG_FB_TFT_WATTEROTT) += fb_watterott.o
-obj-$(CONFIG_FB_FLEX) += flexfb.o
-
-# Device modules
-obj-$(CONFIG_FB_TFT_FBTFT_DEVICE) += fbtft_device.o
diff --git a/drivers/staging/fbtft/TODO b/drivers/staging/fbtft/TODO
new file mode 100644
index 000000000000..e72a08bf221c
--- /dev/null
+++ b/drivers/staging/fbtft/TODO
@@ -0,0 +1,3 @@
+* convert all these over to drm_simple_display_pipe and submit for inclusion
+ into the DRM subsystem under drivers/gpu/drm - fbdev doesn't take any new
+ drivers anymore.
diff --git a/drivers/staging/fbtft/fb_agm1264k-fl.c b/drivers/staging/fbtft/fb_agm1264k-fl.c
index 2a2d53c70501..207d578547cd 100644
--- a/drivers/staging/fbtft/fb_agm1264k-fl.c
+++ b/drivers/staging/fbtft/fb_agm1264k-fl.c
@@ -1,23 +1,14 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* FB driver for Two KS0108 LCD controllers in AGM1264K-FL display
*
* Copyright (C) 2014 ololoshka2871
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/delay.h>
#include <linux/slab.h>
@@ -86,19 +77,6 @@ static int init_display(struct fbtft_par *par)
return 0;
}
-static void reset(struct fbtft_par *par)
-{
- if (par->gpio.reset == -1)
- return;
-
- dev_dbg(par->info->device, "%s()\n", __func__);
-
- gpio_set_value(par->gpio.reset, 0);
- udelay(20);
- gpio_set_value(par->gpio.reset, 1);
- mdelay(120);
-}
-
/* Check if all necessary GPIOS defined */
static int verify_gpios(struct fbtft_par *par)
{
@@ -107,30 +85,30 @@ static int verify_gpios(struct fbtft_par *par)
dev_dbg(par->info->device,
"%s()\n", __func__);
- if (par->EPIN < 0) {
+ if (!par->EPIN) {
dev_err(par->info->device,
"Missing info about 'wr' (aka E) gpio. Aborting.\n");
return -EINVAL;
}
for (i = 0; i < 8; ++i) {
- if (par->gpio.db[i] < 0) {
+ if (!par->gpio.db[i]) {
dev_err(par->info->device,
"Missing info about 'db[%i]' gpio. Aborting.\n",
i);
return -EINVAL;
}
}
- if (par->CS0 < 0) {
+ if (!par->CS0) {
dev_err(par->info->device,
"Missing info about 'cs0' gpio. Aborting.\n");
return -EINVAL;
}
- if (par->CS1 < 0) {
+ if (!par->CS1) {
dev_err(par->info->device,
"Missing info about 'cs1' gpio. Aborting.\n");
return -EINVAL;
}
- if (par->RW < 0) {
+ if (!par->RW) {
dev_err(par->info->device,
"Missing info about 'rw' gpio. Aborting.\n");
return -EINVAL;
@@ -148,22 +126,22 @@ request_gpios_match(struct fbtft_par *par, const struct fbtft_gpio *gpio)
if (strcasecmp(gpio->name, "wr") == 0) {
/* left ks0108 E pin */
par->EPIN = gpio->gpio;
- return GPIOF_OUT_INIT_LOW;
+ return GPIOD_OUT_LOW;
} else if (strcasecmp(gpio->name, "cs0") == 0) {
/* left ks0108 controller pin */
par->CS0 = gpio->gpio;
- return GPIOF_OUT_INIT_HIGH;
+ return GPIOD_OUT_HIGH;
} else if (strcasecmp(gpio->name, "cs1") == 0) {
/* right ks0108 controller pin */
par->CS1 = gpio->gpio;
- return GPIOF_OUT_INIT_HIGH;
+ return GPIOD_OUT_HIGH;
}
/* if write (rw = 0) e(1->0) perform write */
/* if read (rw = 1) e(0->1) set data on D0-7*/
else if (strcasecmp(gpio->name, "rw") == 0) {
par->RW = gpio->gpio;
- return GPIOF_OUT_INIT_LOW;
+ return GPIOD_OUT_LOW;
}
return FBTFT_GPIO_NO_MATCH;
@@ -177,7 +155,7 @@ static void write_reg8_bus8(struct fbtft_par *par, int len, ...)
{
va_list args;
int i, ret;
- u8 *buf = (u8 *)par->buf;
+ u8 *buf = par->buf;
if (unlikely(par->debug & DEBUG_WRITE_REGISTER)) {
va_start(args, len);
@@ -185,9 +163,9 @@ static void write_reg8_bus8(struct fbtft_par *par, int len, ...)
buf[i] = (u8)va_arg(args, unsigned int);
va_end(args);
- fbtft_par_dbg_hex(DEBUG_WRITE_REGISTER, par,
- par->info->device, u8, buf, len, "%s: ", __func__);
- }
+ fbtft_par_dbg_hex(DEBUG_WRITE_REGISTER, par, par->info->device,
+ u8, buf, len, "%s: ", __func__);
+}
va_start(args, len);
@@ -203,15 +181,15 @@ static void write_reg8_bus8(struct fbtft_par *par, int len, ...)
/* select chip */
if (*buf) {
/* cs1 */
- gpio_set_value(par->CS0, 1);
- gpio_set_value(par->CS1, 0);
+ gpiod_set_value(par->CS0, 0);
+ gpiod_set_value(par->CS1, 1);
} else {
/* cs0 */
- gpio_set_value(par->CS0, 0);
- gpio_set_value(par->CS1, 1);
+ gpiod_set_value(par->CS0, 1);
+ gpiod_set_value(par->CS1, 0);
}
- gpio_set_value(par->RS, 0); /* RS->0 (command mode) */
+ gpiod_set_value(par->RS, 0); /* RS->0 (command mode) */
len--;
if (len) {
@@ -246,7 +224,7 @@ static void set_addr_win(struct fbtft_par *par, int xs, int ys, int xe, int ye)
static void
construct_line_bitmap(struct fbtft_par *par, u8 *dest, signed short *src,
- int xs, int xe, int y)
+ int xs, int xe, int y)
{
int x, i;
@@ -264,22 +242,53 @@ construct_line_bitmap(struct fbtft_par *par, u8 *dest, signed short *src,
}
}
+static void iterate_diffusion_matrix(u32 xres, u32 yres, int x,
+ int y, signed short *convert_buf,
+ signed short pixel, signed short error)
+{
+ u16 i, j;
+
+ /* diffusion matrix row */
+ for (i = 0; i < DIFFUSING_MATRIX_WIDTH; ++i)
+ /* diffusion matrix column */
+ for (j = 0; j < DIFFUSING_MATRIX_HEIGHT; ++j) {
+ signed short *write_pos;
+ signed char coeff;
+
+ /* skip pixels out of zone */
+ if (x + i < 0 || x + i >= xres || y + j >= yres)
+ continue;
+ write_pos = &convert_buf[(y + j) * xres + x + i];
+ coeff = diffusing_matrix[i][j];
+ if (-1 == coeff) {
+ /* pixel itself */
+ *write_pos = pixel;
+ } else {
+ signed short p = *write_pos + error * coeff;
+
+ if (p > WHITE)
+ p = WHITE;
+ if (p < BLACK)
+ p = BLACK;
+ *write_pos = p;
+ }
+ }
+}
+
static int write_vmem(struct fbtft_par *par, size_t offset, size_t len)
{
- u16 *vmem16 = (u16 *)par->info->screen_base;
+ u16 *vmem16 = (u16 *)par->info->screen_buffer;
u8 *buf = par->txbuf.buf;
int x, y;
int ret = 0;
/* buffer to convert RGB565 -> grayscale16 -> Dithered image 1bpp */
- signed short *convert_buf = kmalloc(par->info->var.xres *
- par->info->var.yres * sizeof(signed short), GFP_NOIO);
+ signed short *convert_buf = kmalloc_array(par->info->var.xres *
+ par->info->var.yres, sizeof(signed short), GFP_NOIO);
if (!convert_buf)
return -ENOMEM;
- fbtft_par_dbg(DEBUG_WRITE_VMEM, par, "%s()\n", __func__);
-
/* converting to grayscale16 */
for (x = 0; x < par->info->var.xres; ++x)
for (y = 0; y < par->info->var.yres; ++y) {
@@ -305,7 +314,6 @@ static int write_vmem(struct fbtft_par *par, size_t offset, size_t len)
signed short error_b = pixel - BLACK;
signed short error_w = pixel - WHITE;
signed short error;
- u16 i, j;
/* what color close? */
if (abs(error_b) >= abs(error_w)) {
@@ -320,55 +328,30 @@ static int write_vmem(struct fbtft_par *par, size_t offset, size_t len)
error /= 8;
- /* diffusion matrix row */
- for (i = 0; i < DIFFUSING_MATRIX_WIDTH; ++i)
- /* diffusion matrix column */
- for (j = 0; j < DIFFUSING_MATRIX_HEIGHT; ++j) {
- signed short *write_pos;
- signed char coeff;
-
- /* skip pixels out of zone */
- if (x + i < 0 ||
- x + i >= par->info->var.xres
- || y + j >= par->info->var.yres)
- continue;
- write_pos = &convert_buf[
- (y + j) * par->info->var.xres +
- x + i];
- coeff = diffusing_matrix[i][j];
- if (coeff == -1)
- /* pixel itself */
- *write_pos = pixel;
- else {
- signed short p = *write_pos +
- error * coeff;
-
- if (p > WHITE)
- p = WHITE;
- if (p < BLACK)
- p = BLACK;
- *write_pos = p;
- }
- }
+ iterate_diffusion_matrix(par->info->var.xres,
+ par->info->var.yres,
+ x, y, convert_buf,
+ pixel, error);
}
- /* 1 string = 2 pages */
- for (y = addr_win.ys_page; y <= addr_win.ye_page; ++y) {
+ /* 1 string = 2 pages */
+ for (y = addr_win.ys_page; y <= addr_win.ye_page; ++y) {
/* left half of display */
if (addr_win.xs < par->info->var.xres / 2) {
construct_line_bitmap(par, buf, convert_buf,
- addr_win.xs, par->info->var.xres / 2, y);
+ addr_win.xs,
+ par->info->var.xres / 2, y);
len = par->info->var.xres / 2 - addr_win.xs;
/* select left side (sc0)
* set addr
*/
- write_reg(par, 0x00, (1 << 6) | (u8)addr_win.xs);
+ write_reg(par, 0x00, BIT(6) | (u8)addr_win.xs);
write_reg(par, 0x00, (0x17 << 3) | (u8)y);
/* write bitmap */
- gpio_set_value(par->RS, 1); /* RS->1 (data mode) */
+ gpiod_set_value(par->RS, 1); /* RS->1 (data mode) */
ret = par->fbtftops.write(par, buf, len);
if (ret < 0)
dev_err(par->info->device,
@@ -378,19 +361,20 @@ static int write_vmem(struct fbtft_par *par, size_t offset, size_t len)
/* right half of display */
if (addr_win.xe >= par->info->var.xres / 2) {
construct_line_bitmap(par, buf,
- convert_buf, par->info->var.xres / 2,
- addr_win.xe + 1, y);
+ convert_buf,
+ par->info->var.xres / 2,
+ addr_win.xe + 1, y);
len = addr_win.xe + 1 - par->info->var.xres / 2;
/* select right side (sc1)
* set addr
*/
- write_reg(par, 0x01, 1 << 6);
+ write_reg(par, 0x01, BIT(6));
write_reg(par, 0x01, (0x17 << 3) | (u8)y);
/* write bitmap */
- gpio_set_value(par->RS, 1); /* RS->1 (data mode) */
+ gpiod_set_value(par->RS, 1); /* RS->1 (data mode) */
par->fbtftops.write(par, buf, len);
if (ret < 0)
dev_err(par->info->device,
@@ -400,8 +384,8 @@ static int write_vmem(struct fbtft_par *par, size_t offset, size_t len)
}
kfree(convert_buf);
- gpio_set_value(par->CS0, 1);
- gpio_set_value(par->CS1, 1);
+ gpiod_set_value(par->CS0, 0);
+ gpiod_set_value(par->CS1, 0);
return ret;
}
@@ -409,23 +393,23 @@ static int write_vmem(struct fbtft_par *par, size_t offset, size_t len)
static int write(struct fbtft_par *par, void *buf, size_t len)
{
fbtft_par_dbg_hex(DEBUG_WRITE, par, par->info->device, u8, buf, len,
- "%s(len=%d): ", __func__, len);
+ "%s(len=%zu): ", __func__, len);
- gpio_set_value(par->RW, 0); /* set write mode */
+ gpiod_set_value(par->RW, 0); /* set write mode */
while (len--) {
u8 i, data;
- data = *(u8 *) buf++;
+ data = *(u8 *)buf++;
/* set data bus */
for (i = 0; i < 8; ++i)
- gpio_set_value(par->gpio.db[i], data & (1 << i));
+ gpiod_set_value(par->gpio.db[i], data & (1 << i));
/* set E */
- gpio_set_value(par->EPIN, 1);
+ gpiod_set_value(par->EPIN, 0);
udelay(5);
/* unset E - write */
- gpio_set_value(par->EPIN, 0);
+ gpiod_set_value(par->EPIN, 1);
udelay(1);
}
@@ -442,7 +426,6 @@ static struct fbtft_display display = {
.set_addr_win = set_addr_win,
.verify_gpios = verify_gpios,
.request_gpios_match = request_gpios_match,
- .reset = reset,
.write = write,
.write_register = write_reg8_bus8,
.write_vmem = write_vmem,
diff --git a/drivers/staging/fbtft/fb_bd663474.c b/drivers/staging/fbtft/fb_bd663474.c
index 6010e6cbbd72..1629c2c440a9 100644
--- a/drivers/staging/fbtft/fb_bd663474.c
+++ b/drivers/staging/fbtft/fb_bd663474.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* FB driver for the uPD161704 LCD Controller
*
@@ -6,22 +7,11 @@
* Based on fb_ili9325.c by Noralf Tronnes
* Based on ili9325.c by Jeroen Domburg
* Init code from UTFT library by Henning Karlsen
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
-#include <linux/gpio.h>
#include <linux/delay.h>
#include "fbtft.h"
@@ -33,9 +23,6 @@
static int init_display(struct fbtft_par *par)
{
- if (par->gpio.cs != -1)
- gpio_set_value(par->gpio.cs, 0); /* Activate chip */
-
par->fbtftops.reset(par);
/* Initialization sequence from Lib_UTFT */
diff --git a/drivers/staging/fbtft/fb_hx8340bn.c b/drivers/staging/fbtft/fb_hx8340bn.c
index e1ed177f9184..2fd7b87ea0ce 100644
--- a/drivers/staging/fbtft/fb_hx8340bn.c
+++ b/drivers/staging/fbtft/fb_hx8340bn.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* FB driver for the HX8340BN LCD Controller
*
@@ -7,16 +8,6 @@
* This is done by transferring eight 9-bit words in 9 bytes.
*
* Copyright (C) 2013 Noralf Tronnes
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/module.h>
@@ -25,6 +16,7 @@
#include <linux/vmalloc.h>
#include <linux/spi/spi.h>
#include <linux/delay.h>
+#include <video/mipi_display.h>
#include "fbtft.h"
@@ -36,7 +28,7 @@
"3 3 17 8 4 7 05 7 6 0 3 1 6 0 0 "
static bool emulate;
-module_param(emulate, bool, 0);
+module_param(emulate, bool, 0000);
MODULE_PARM_DESC(emulate, "Force emulation in 9-bit mode");
static int init_display(struct fbtft_par *par)
@@ -45,56 +37,70 @@ static int init_display(struct fbtft_par *par)
/* BTL221722-276L startup sequence, from datasheet */
- /* SETEXTCOM: Set extended command set (C1h)
- This command is used to set extended command set access enable.
- Enable: After command (C1h), must write: ffh,83h,40h */
+ /*
+ * SETEXTCOM: Set extended command set (C1h)
+ * This command is used to set extended command set access enable.
+ * Enable: After command (C1h), must write: ffh,83h,40h
+ */
write_reg(par, 0xC1, 0xFF, 0x83, 0x40);
- /* Sleep out
- This command turns off sleep mode.
- In this mode the DC/DC converter is enabled, Internal oscillator
- is started, and panel scanning is started. */
+ /*
+ * Sleep out
+ * This command turns off sleep mode.
+ * In this mode the DC/DC converter is enabled, Internal oscillator
+ * is started, and panel scanning is started.
+ */
write_reg(par, 0x11);
mdelay(150);
/* Undoc'd register? */
write_reg(par, 0xCA, 0x70, 0x00, 0xD9);
- /* SETOSC: Set Internal Oscillator (B0h)
- This command is used to set internal oscillator related settings */
- /* OSC_EN: Enable internal oscillator */
- /* Internal oscillator frequency: 125% x 2.52MHz */
+ /*
+ * SETOSC: Set Internal Oscillator (B0h)
+ * This command is used to set internal oscillator related settings
+ * OSC_EN: Enable internal oscillator
+ * Internal oscillator frequency: 125% x 2.52MHz
+ */
write_reg(par, 0xB0, 0x01, 0x11);
/* Drive ability setting */
write_reg(par, 0xC9, 0x90, 0x49, 0x10, 0x28, 0x28, 0x10, 0x00, 0x06);
mdelay(20);
- /* SETPWCTR5: Set Power Control 5(B5h)
- This command is used to set VCOM Low and VCOM High Voltage */
- /* VCOMH 0110101 : 3.925 */
- /* VCOML 0100000 : -1.700 */
- /* 45h=69 VCOMH: "VMH" + 5d VCOML: "VMH" + 5d */
+ /*
+ * SETPWCTR5: Set Power Control 5(B5h)
+ * This command is used to set VCOM Low and VCOM High Voltage
+ * VCOMH 0110101 : 3.925
+ * VCOML 0100000 : -1.700
+ * 45h=69 VCOMH: "VMH" + 5d VCOML: "VMH" + 5d
+ */
write_reg(par, 0xB5, 0x35, 0x20, 0x45);
- /* SETPWCTR4: Set Power Control 4(B4h)
- VRH[4:0]: Specify the VREG1 voltage adjusting.
- VREG1 voltage is for gamma voltage setting.
- BT[2:0]: Switch the output factor of step-up circuit 2
- for VGH and VGL voltage generation. */
+ /*
+ * SETPWCTR4: Set Power Control 4(B4h)
+ * VRH[4:0]: Specify the VREG1 voltage adjusting.
+ * VREG1 voltage is for gamma voltage setting.
+ * BT[2:0]: Switch the output factor of step-up circuit 2
+ * for VGH and VGL voltage generation.
+ */
write_reg(par, 0xB4, 0x33, 0x25, 0x4C);
mdelay(10);
- /* Interface Pixel Format (3Ah)
- This command is used to define the format of RGB picture data,
- which is to be transfer via the system and RGB interface. */
- /* RGB interface: 16 Bit/Pixel */
- write_reg(par, 0x3A, 0x05);
-
- /* Display on (29h)
- This command is used to recover from DISPLAY OFF mode.
- Output from the Frame Memory is enabled. */
- write_reg(par, 0x29);
+ /*
+ * Interface Pixel Format (3Ah)
+ * This command is used to define the format of RGB picture data,
+ * which is to be transfer via the system and RGB interface.
+ * RGB interface: 16 Bit/Pixel
+ */
+ write_reg(par, MIPI_DCS_SET_PIXEL_FORMAT, MIPI_DCS_PIXEL_FMT_16BIT);
+
+ /*
+ * Display on (29h)
+ * This command is used to recover from DISPLAY OFF mode.
+ * Output from the Frame Memory is enabled.
+ */
+ write_reg(par, MIPI_DCS_SET_DISPLAY_ON);
mdelay(10);
return 0;
@@ -102,9 +108,9 @@ static int init_display(struct fbtft_par *par)
static void set_addr_win(struct fbtft_par *par, int xs, int ys, int xe, int ye)
{
- write_reg(par, FBTFT_CASET, 0x00, xs, 0x00, xe);
- write_reg(par, FBTFT_RASET, 0x00, ys, 0x00, ye);
- write_reg(par, FBTFT_RAMWR);
+ write_reg(par, MIPI_DCS_SET_COLUMN_ADDRESS, 0x00, xs, 0x00, xe);
+ write_reg(par, MIPI_DCS_SET_PAGE_ADDRESS, 0x00, ys, 0x00, ye);
+ write_reg(par, MIPI_DCS_WRITE_MEMORY_START);
}
static int set_var(struct fbtft_par *par)
@@ -116,16 +122,19 @@ static int set_var(struct fbtft_par *par)
#define MV BIT(5)
switch (par->info->var.rotate) {
case 0:
- write_reg(par, 0x36, par->bgr << 3);
+ write_reg(par, MIPI_DCS_SET_ADDRESS_MODE, par->bgr << 3);
break;
case 270:
- write_reg(par, 0x36, MX | MV | (par->bgr << 3));
+ write_reg(par, MIPI_DCS_SET_ADDRESS_MODE,
+ MX | MV | (par->bgr << 3));
break;
case 180:
- write_reg(par, 0x36, MX | MY | (par->bgr << 3));
+ write_reg(par, MIPI_DCS_SET_ADDRESS_MODE,
+ MX | MY | (par->bgr << 3));
break;
case 90:
- write_reg(par, 0x36, MY | MV | (par->bgr << 3));
+ write_reg(par, MIPI_DCS_SET_ADDRESS_MODE,
+ MY | MV | (par->bgr << 3));
break;
}
@@ -133,16 +142,16 @@ static int set_var(struct fbtft_par *par)
}
/*
- Gamma Curve selection, GC (only GC0 can be customized):
- 0 = 2.2, 1 = 1.8, 2 = 2.5, 3 = 1.0
- Gamma string format:
- OP0 OP1 CP0 CP1 CP2 CP3 CP4 MP0 MP1 MP2 MP3 MP4 MP5 CGM0 CGM1
- ON0 ON1 CN0 CN1 CN2 CN3 CN4 MN0 MN1 MN2 MN3 MN4 MN5 XXXX GC
-*/
-#define CURVE(num, idx) curves[num * par->gamma.num_values + idx]
-static int set_gamma(struct fbtft_par *par, unsigned long *curves)
+ * Gamma Curve selection, GC (only GC0 can be customized):
+ * 0 = 2.2, 1 = 1.8, 2 = 2.5, 3 = 1.0
+ * Gamma string format:
+ * OP0 OP1 CP0 CP1 CP2 CP3 CP4 MP0 MP1 MP2 MP3 MP4 MP5 CGM0 CGM1
+ * ON0 ON1 CN0 CN1 CN2 CN3 CN4 MN0 MN1 MN2 MN3 MN4 MN5 XXXX GC
+ */
+#define CURVE(num, idx) curves[(num) * par->gamma.num_values + (idx)]
+static int set_gamma(struct fbtft_par *par, u32 *curves)
{
- unsigned long mask[] = {
+ static const unsigned long mask[] = {
0x0f, 0x0f, 0x1f, 0x0f, 0x0f, 0x0f, 0x1f, 0x07, 0x07, 0x07,
0x07, 0x07, 0x07, 0x03, 0x03, 0x0f, 0x0f, 0x1f, 0x0f, 0x0f,
0x0f, 0x1f, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x00, 0x00,
@@ -154,36 +163,38 @@ static int set_gamma(struct fbtft_par *par, unsigned long *curves)
for (j = 0; j < par->gamma.num_values; j++)
CURVE(i, j) &= mask[i * par->gamma.num_values + j];
- write_reg(par, 0x26, 1 << CURVE(1, 14)); /* Gamma Set (26h) */
+ /* Gamma Set (26h) */
+ write_reg(par, MIPI_DCS_SET_GAMMA_CURVE, 1 << CURVE(1, 14));
if (CURVE(1, 14))
return 0; /* only GC0 can be customized */
write_reg(par, 0xC2,
- (CURVE(0, 8) << 4) | CURVE(0, 7),
- (CURVE(0, 10) << 4) | CURVE(0, 9),
- (CURVE(0, 12) << 4) | CURVE(0, 11),
- CURVE(0, 2),
- (CURVE(0, 4) << 4) | CURVE(0, 3),
- CURVE(0, 5),
- CURVE(0, 6),
- (CURVE(0, 1) << 4) | CURVE(0, 0),
- (CURVE(0, 14) << 2) | CURVE(0, 13));
+ (CURVE(0, 8) << 4) | CURVE(0, 7),
+ (CURVE(0, 10) << 4) | CURVE(0, 9),
+ (CURVE(0, 12) << 4) | CURVE(0, 11),
+ CURVE(0, 2),
+ (CURVE(0, 4) << 4) | CURVE(0, 3),
+ CURVE(0, 5),
+ CURVE(0, 6),
+ (CURVE(0, 1) << 4) | CURVE(0, 0),
+ (CURVE(0, 14) << 2) | CURVE(0, 13));
write_reg(par, 0xC3,
- (CURVE(1, 8) << 4) | CURVE(1, 7),
- (CURVE(1, 10) << 4) | CURVE(1, 9),
- (CURVE(1, 12) << 4) | CURVE(1, 11),
- CURVE(1, 2),
- (CURVE(1, 4) << 4) | CURVE(1, 3),
- CURVE(1, 5),
- CURVE(1, 6),
- (CURVE(1, 1) << 4) | CURVE(1, 0));
+ (CURVE(1, 8) << 4) | CURVE(1, 7),
+ (CURVE(1, 10) << 4) | CURVE(1, 9),
+ (CURVE(1, 12) << 4) | CURVE(1, 11),
+ CURVE(1, 2),
+ (CURVE(1, 4) << 4) | CURVE(1, 3),
+ CURVE(1, 5),
+ CURVE(1, 6),
+ (CURVE(1, 1) << 4) | CURVE(1, 0));
mdelay(10);
return 0;
}
+
#undef CURVE
static struct fbtft_display display = {
diff --git a/drivers/staging/fbtft/fb_hx8347d.c b/drivers/staging/fbtft/fb_hx8347d.c
index 6ff76e531a37..a9b72a8b42b5 100644
--- a/drivers/staging/fbtft/fb_hx8347d.c
+++ b/drivers/staging/fbtft/fb_hx8347d.c
@@ -1,19 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* FB driver for the HX8347D LCD Controller
*
* Copyright (C) 2013 Christian Vogelgsang
*
* Based on driver code found here: https://github.com/watterott/r61505u-Adapter
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/module.h>
@@ -77,9 +68,6 @@ static int init_display(struct fbtft_par *par)
mdelay(40);
write_reg(par, 0x28, 0x3C);
- /* orientation */
- write_reg(par, 0x16, 0x60 | (par->bgr << 3));
-
return 0;
}
@@ -96,15 +84,40 @@ static void set_addr_win(struct fbtft_par *par, int xs, int ys, int xe, int ye)
write_reg(par, 0x22);
}
+#define MEM_Y BIT(7) /* MY row address order */
+#define MEM_X BIT(6) /* MX column address order */
+#define MEM_V BIT(5) /* MV row / column exchange */
+#define MEM_L BIT(4) /* ML vertical refresh order */
+#define MEM_BGR (3) /* RGB-BGR Order */
+static int set_var(struct fbtft_par *par)
+{
+ switch (par->info->var.rotate) {
+ case 0:
+ write_reg(par, 0x16, MEM_V | MEM_X | (par->bgr << MEM_BGR));
+ break;
+ case 270:
+ write_reg(par, 0x16, par->bgr << MEM_BGR);
+ break;
+ case 180:
+ write_reg(par, 0x16, MEM_V | MEM_Y | (par->bgr << MEM_BGR));
+ break;
+ case 90:
+ write_reg(par, 0x16, MEM_X | MEM_Y | (par->bgr << MEM_BGR));
+ break;
+ }
+
+ return 0;
+}
+
/*
- Gamma string format:
- VRP0 VRP1 VRP2 VRP3 VRP4 VRP5 PRP0 PRP1 PKP0 PKP1 PKP2 PKP3 PKP4 CGM
- VRN0 VRN1 VRN2 VRN3 VRN4 VRN5 PRN0 PRN1 PKN0 PKN1 PKN2 PKN3 PKN4 CGM
-*/
-#define CURVE(num, idx) curves[num * par->gamma.num_values + idx]
-static int set_gamma(struct fbtft_par *par, unsigned long *curves)
+ * Gamma string format:
+ * VRP0 VRP1 VRP2 VRP3 VRP4 VRP5 PRP0 PRP1 PKP0 PKP1 PKP2 PKP3 PKP4 CGM
+ * VRN0 VRN1 VRN2 VRN3 VRN4 VRN5 PRN0 PRN1 PKN0 PKN1 PKN2 PKN3 PKN4 CGM
+ */
+#define CURVE(num, idx) curves[(num) * par->gamma.num_values + (idx)]
+static int set_gamma(struct fbtft_par *par, u32 *curves)
{
- unsigned long mask[] = {
+ static const unsigned long mask[] = {
0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x7f, 0x7f, 0x1f, 0x1f,
0x1f, 0x1f, 0x1f, 0x0f,
};
@@ -140,6 +153,7 @@ static int set_gamma(struct fbtft_par *par, unsigned long *curves)
return 0;
}
+
#undef CURVE
static struct fbtft_display display = {
@@ -152,6 +166,7 @@ static struct fbtft_display display = {
.fbtftops = {
.init_display = init_display,
.set_addr_win = set_addr_win,
+ .set_var = set_var,
.set_gamma = set_gamma,
},
};
diff --git a/drivers/staging/fbtft/fb_hx8353d.c b/drivers/staging/fbtft/fb_hx8353d.c
index 8552411695fa..3e73b69b6a27 100644
--- a/drivers/staging/fbtft/fb_hx8353d.c
+++ b/drivers/staging/fbtft/fb_hx8353d.c
@@ -1,24 +1,16 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* FB driver for the HX8353D LCD Controller
*
* Copyright (c) 2014 Petr Olivka
* Copyright (c) 2013 Noralf Tronnes
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/delay.h>
+#include <video/mipi_display.h>
#include "fbtft.h"
@@ -27,7 +19,6 @@
static int init_display(struct fbtft_par *par)
{
-
par->fbtftops.reset(par);
mdelay(150);
@@ -47,18 +38,18 @@ static int init_display(struct fbtft_par *par)
write_reg(par, 0x3A, 0x05);
/* MEM ACCESS */
- write_reg(par, 0x36, 0xC0);
+ write_reg(par, MIPI_DCS_SET_ADDRESS_MODE, 0xC0);
/* SLPOUT - Sleep out & booster on */
- write_reg(par, 0x11);
+ write_reg(par, MIPI_DCS_EXIT_SLEEP_MODE);
mdelay(150);
/* DISPON - Display On */
- write_reg(par, 0x29);
+ write_reg(par, MIPI_DCS_SET_DISPLAY_ON);
/* RGBSET */
- write_reg(par, 0x2D,
- 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30,
+ write_reg(par, MIPI_DCS_WRITE_LUT,
+ 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30,
32, 34, 36, 38, 40, 42, 44, 46, 48, 50, 52, 54, 56, 58, 60, 62,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
@@ -87,41 +78,45 @@ static void set_addr_win(struct fbtft_par *par, int xs, int ys, int xe, int ye)
#define mv BIT(5)
static int set_var(struct fbtft_par *par)
{
- /* madctl - memory data access control
- rgb/bgr:
- 1. mode selection pin srgb
- rgb h/w pin for color filter setting: 0=rgb, 1=bgr
- 2. madctl rgb bit
- rgb-bgr order color filter panel: 0=rgb, 1=bgr */
+ /*
+ * madctl - memory data access control
+ * rgb/bgr:
+ * 1. mode selection pin srgb
+ * rgb h/w pin for color filter setting: 0=rgb, 1=bgr
+ * 2. madctl rgb bit
+ * rgb-bgr order color filter panel: 0=rgb, 1=bgr
+ */
switch (par->info->var.rotate) {
case 0:
- write_reg(par, 0x36, mx | my | (par->bgr << 3));
+ write_reg(par, MIPI_DCS_SET_ADDRESS_MODE,
+ mx | my | (par->bgr << 3));
break;
case 270:
- write_reg(par, 0x36, my | mv | (par->bgr << 3));
+ write_reg(par, MIPI_DCS_SET_ADDRESS_MODE,
+ my | mv | (par->bgr << 3));
break;
case 180:
- write_reg(par, 0x36, par->bgr << 3);
+ write_reg(par, MIPI_DCS_SET_ADDRESS_MODE,
+ par->bgr << 3);
break;
case 90:
- write_reg(par, 0x36, mx | mv | (par->bgr << 3));
+ write_reg(par, MIPI_DCS_SET_ADDRESS_MODE,
+ mx | mv | (par->bgr << 3));
break;
}
return 0;
}
-/*
- gamma string format:
-*/
-static int set_gamma(struct fbtft_par *par, unsigned long *curves)
+/* gamma string format: */
+static int set_gamma(struct fbtft_par *par, u32 *curves)
{
write_reg(par, 0xE0,
- curves[0], curves[1], curves[2], curves[3],
- curves[4], curves[5], curves[6], curves[7],
- curves[8], curves[9], curves[10], curves[11],
- curves[12], curves[13], curves[14], curves[15],
- curves[16], curves[17], curves[18]);
+ curves[0], curves[1], curves[2], curves[3],
+ curves[4], curves[5], curves[6], curves[7],
+ curves[8], curves[9], curves[10], curves[11],
+ curves[12], curves[13], curves[14], curves[15],
+ curves[16], curves[17], curves[18]);
return 0;
}
diff --git a/drivers/staging/fbtft/fb_hx8357d.c b/drivers/staging/fbtft/fb_hx8357d.c
index a381dbcf5535..94a357e8fdf6 100644
--- a/drivers/staging/fbtft/fb_hx8357d.c
+++ b/drivers/staging/fbtft/fb_hx8357d.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* FB driver for the HX8357D LCD Controller
* Copyright (C) 2015 Adafruit Industries
@@ -6,22 +7,13 @@
* Copyright (C) 2013 Christian Vogelgsang
*
* Based on driver code found here: https://github.com/watterott/r61505u-Adapter
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/delay.h>
+#include <video/mipi_display.h>
#include "fbtft.h"
#include "fb_hx8357d.h"
@@ -35,7 +27,7 @@ static int init_display(struct fbtft_par *par)
par->fbtftops.reset(par);
/* Reset things like Gamma */
- write_reg(par, HX8357B_SWRESET);
+ write_reg(par, MIPI_DCS_SOFT_RESET);
usleep_range(5000, 7000);
/* setextc */
@@ -55,83 +47,83 @@ static int init_display(struct fbtft_par *par)
write_reg(par, HX8357_SETPANEL, 0x05);
write_reg(par, HX8357_SETPWR1,
- 0x00, /* Not deep standby */
- 0x15, /* BT */
- 0x1C, /* VSPR */
- 0x1C, /* VSNR */
- 0x83, /* AP */
- 0xAA); /* FS */
+ 0x00, /* Not deep standby */
+ 0x15, /* BT */
+ 0x1C, /* VSPR */
+ 0x1C, /* VSNR */
+ 0x83, /* AP */
+ 0xAA); /* FS */
write_reg(par, HX8357D_SETSTBA,
- 0x50, /* OPON normal */
- 0x50, /* OPON idle */
- 0x01, /* STBA */
- 0x3C, /* STBA */
- 0x1E, /* STBA */
- 0x08); /* GEN */
+ 0x50, /* OPON normal */
+ 0x50, /* OPON idle */
+ 0x01, /* STBA */
+ 0x3C, /* STBA */
+ 0x1E, /* STBA */
+ 0x08); /* GEN */
write_reg(par, HX8357D_SETCYC,
- 0x02, /* NW 0x02 */
- 0x40, /* RTN */
- 0x00, /* DIV */
- 0x2A, /* DUM */
- 0x2A, /* DUM */
- 0x0D, /* GDON */
- 0x78); /* GDOFF */
+ 0x02, /* NW 0x02 */
+ 0x40, /* RTN */
+ 0x00, /* DIV */
+ 0x2A, /* DUM */
+ 0x2A, /* DUM */
+ 0x0D, /* GDON */
+ 0x78); /* GDOFF */
write_reg(par, HX8357D_SETGAMMA,
- 0x02,
- 0x0A,
- 0x11,
- 0x1d,
- 0x23,
- 0x35,
- 0x41,
- 0x4b,
- 0x4b,
- 0x42,
- 0x3A,
- 0x27,
- 0x1B,
- 0x08,
- 0x09,
- 0x03,
- 0x02,
- 0x0A,
- 0x11,
- 0x1d,
- 0x23,
- 0x35,
- 0x41,
- 0x4b,
- 0x4b,
- 0x42,
- 0x3A,
- 0x27,
- 0x1B,
- 0x08,
- 0x09,
- 0x03,
- 0x00,
- 0x01);
+ 0x02,
+ 0x0A,
+ 0x11,
+ 0x1d,
+ 0x23,
+ 0x35,
+ 0x41,
+ 0x4b,
+ 0x4b,
+ 0x42,
+ 0x3A,
+ 0x27,
+ 0x1B,
+ 0x08,
+ 0x09,
+ 0x03,
+ 0x02,
+ 0x0A,
+ 0x11,
+ 0x1d,
+ 0x23,
+ 0x35,
+ 0x41,
+ 0x4b,
+ 0x4b,
+ 0x42,
+ 0x3A,
+ 0x27,
+ 0x1B,
+ 0x08,
+ 0x09,
+ 0x03,
+ 0x00,
+ 0x01);
/* 16 bit */
- write_reg(par, HX8357_COLMOD, 0x55);
+ write_reg(par, MIPI_DCS_SET_PIXEL_FORMAT, 0x55);
- write_reg(par, HX8357_MADCTL, 0xC0);
+ write_reg(par, MIPI_DCS_SET_ADDRESS_MODE, 0xC0);
/* TE off */
- write_reg(par, HX8357_TEON, 0x00);
+ write_reg(par, MIPI_DCS_SET_TEAR_ON, 0x00);
/* tear line */
- write_reg(par, HX8357_TEARLINE, 0x00, 0x02);
+ write_reg(par, MIPI_DCS_SET_TEAR_SCANLINE, 0x00, 0x02);
/* Exit Sleep */
- write_reg(par, HX8357_SLPOUT);
+ write_reg(par, MIPI_DCS_EXIT_SLEEP_MODE);
msleep(150);
/* display on */
- write_reg(par, HX8357_DISPON);
+ write_reg(par, MIPI_DCS_SET_DISPLAY_ON);
usleep_range(5000, 7000);
return 0;
@@ -139,18 +131,15 @@ static int init_display(struct fbtft_par *par)
static void set_addr_win(struct fbtft_par *par, int xs, int ys, int xe, int ye)
{
- /* Column addr set */
- write_reg(par, HX8357_CASET,
- xs >> 8, xs & 0xff, /* XSTART */
- xe >> 8, xe & 0xff); /* XEND */
-
- /* Row addr set */
- write_reg(par, HX8357_PASET,
- ys >> 8, ys & 0xff, /* YSTART */
- ye >> 8, ye & 0xff); /* YEND */
-
- /* write to RAM */
- write_reg(par, HX8357_RAMWR);
+ write_reg(par, MIPI_DCS_SET_COLUMN_ADDRESS,
+ xs >> 8, xs & 0xff, /* XSTART */
+ xe >> 8, xe & 0xff); /* XEND */
+
+ write_reg(par, MIPI_DCS_SET_PAGE_ADDRESS,
+ ys >> 8, ys & 0xff, /* YSTART */
+ ye >> 8, ye & 0xff); /* YEND */
+
+ write_reg(par, MIPI_DCS_WRITE_MEMORY_START);
}
#define HX8357D_MADCTL_MY 0x80
@@ -182,7 +171,7 @@ static int set_var(struct fbtft_par *par)
val |= (par->bgr ? HX8357D_MADCTL_RGB : HX8357D_MADCTL_BGR);
/* Memory Access Control */
- write_reg(par, HX8357_MADCTL, val);
+ write_reg(par, MIPI_DCS_SET_ADDRESS_MODE, val);
return 0;
}
diff --git a/drivers/staging/fbtft/fb_hx8357d.h b/drivers/staging/fbtft/fb_hx8357d.h
index de05e8cdf04c..6180b093f94f 100644
--- a/drivers/staging/fbtft/fb_hx8357d.h
+++ b/drivers/staging/fbtft/fb_hx8357d.h
@@ -1,17 +1,18 @@
-/***************************************************
- This is our library for the Adafruit ILI9341 Breakout and Shield
- ----> http://www.adafruit.com/products/1651
-
- Check out the links above for our tutorials and wiring diagrams
- These displays use SPI to communicate, 4 or 5 pins are required to
- interface (RST is optional)
- Adafruit invests time and resources providing this open source code,
- please support Adafruit and open-source hardware by purchasing
- products from Adafruit!
-
- Written by Limor Fried/Ladyada for Adafruit Industries.
- MIT license, all text above must be included in any redistribution
- ****************************************************/
+/* SPDX-License-Identifier: MIT */
+/*
+ * This is our library for the Adafruit ILI9341 Breakout and Shield
+ * ----> http://www.adafruit.com/products/1651
+ *
+ * Check out the links above for our tutorials and wiring diagrams
+ * These displays use SPI to communicate, 4 or 5 pins are required to
+ * interface (RST is optional)
+ * Adafruit invests time and resources providing this open source code,
+ * please support Adafruit and open-source hardware by purchasing
+ * products from Adafruit!
+ *
+ * Written by Limor Fried/Ladyada for Adafruit Industries.
+ * MIT license, all text above must be included in any redistribution
+ */
#ifndef __HX8357_H__
#define __HX8357_H__
@@ -22,38 +23,6 @@
#define HX8357_TFTWIDTH 320
#define HX8357_TFTHEIGHT 480
-#define HX8357B_NOP 0x00
-#define HX8357B_SWRESET 0x01
-#define HX8357B_RDDID 0x04
-#define HX8357B_RDDST 0x09
-
-#define HX8357B_RDPOWMODE 0x0A
-#define HX8357B_RDMADCTL 0x0B
-#define HX8357B_RDCOLMOD 0x0C
-#define HX8357B_RDDIM 0x0D
-#define HX8357B_RDDSDR 0x0F
-
-#define HX8357_SLPIN 0x10
-#define HX8357_SLPOUT 0x11
-#define HX8357B_PTLON 0x12
-#define HX8357B_NORON 0x13
-
-#define HX8357_INVOFF 0x20
-#define HX8357_INVON 0x21
-#define HX8357_DISPOFF 0x28
-#define HX8357_DISPON 0x29
-
-#define HX8357_CASET 0x2A
-#define HX8357_PASET 0x2B
-#define HX8357_RAMWR 0x2C
-#define HX8357_RAMRD 0x2E
-
-#define HX8357B_PTLAR 0x30
-#define HX8357_TEON 0x35
-#define HX8357_TEARLINE 0x44
-#define HX8357_MADCTL 0x36
-#define HX8357_COLMOD 0x3A
-
#define HX8357_SETOSC 0xB0
#define HX8357_SETPWR1 0xB1
#define HX8357B_SETDISPLAY 0xB2
diff --git a/drivers/staging/fbtft/fb_ili9163.c b/drivers/staging/fbtft/fb_ili9163.c
index f31b3f4b9275..6582a2c90aaf 100644
--- a/drivers/staging/fbtft/fb_ili9163.c
+++ b/drivers/staging/fbtft/fb_ili9163.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* FB driver for the ILI9163 LCD Controller
*
@@ -5,23 +6,13 @@
*
* Based on ili9325.c by Noralf Tronnes and
* .S.U.M.O.T.O.Y. by Max MC Costa (https://github.com/sumotoy/TFT_ILI9163C).
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
-#include <linux/gpio.h>
#include <linux/delay.h>
+#include <video/mipi_display.h>
#include "fbtft.h"
@@ -38,37 +29,11 @@
#endif
/* ILI9163C commands */
-#define CMD_NOP 0x00 /* Non operation*/
-#define CMD_SWRESET 0x01 /* Soft Reset */
-#define CMD_SLPIN 0x10 /* Sleep ON */
-#define CMD_SLPOUT 0x11 /* Sleep OFF */
-#define CMD_PTLON 0x12 /* Partial Mode ON */
-#define CMD_NORML 0x13 /* Normal Display ON */
-#define CMD_DINVOF 0x20 /* Display Inversion OFF */
-#define CMD_DINVON 0x21 /* Display Inversion ON */
-#define CMD_GAMMASET 0x26 /* Gamma Set (0x01[1],0x02[2],0x04[3],0x08[4]) */
-#define CMD_DISPOFF 0x28 /* Display OFF */
-#define CMD_DISPON 0x29 /* Display ON */
-#define CMD_IDLEON 0x39 /* Idle Mode ON */
-#define CMD_IDLEOF 0x38 /* Idle Mode OFF */
-#define CMD_CLMADRS 0x2A /* Column Address Set */
-#define CMD_PGEADRS 0x2B /* Page Address Set */
-
-#define CMD_RAMWR 0x2C /* Memory Write */
-#define CMD_RAMRD 0x2E /* Memory Read */
-#define CMD_CLRSPACE 0x2D /* Color Space : 4K/65K/262K */
-#define CMD_PARTAREA 0x30 /* Partial Area */
-#define CMD_VSCLLDEF 0x33 /* Vertical Scroll Definition */
-#define CMD_TEFXLON 0x34 /* Tearing Effect Line ON */
-#define CMD_TEFXLOF 0x35 /* Tearing Effect Line OFF */
-#define CMD_MADCTL 0x36 /* Memory Access Control */
-
-#define CMD_PIXFMT 0x3A /* Interface Pixel Format */
-#define CMD_FRMCTR1 0xB1 /* Frame Rate Control
- (In normal mode/Full colors) */
+#define CMD_FRMCTR1 0xB1 /* Frame Rate Control */
+ /* (In normal mode/Full colors) */
#define CMD_FRMCTR2 0xB2 /* Frame Rate Control (In Idle mode/8-colors) */
-#define CMD_FRMCTR3 0xB3 /* Frame Rate Control
- (In Partial mode/full colors) */
+#define CMD_FRMCTR3 0xB3 /* Frame Rate Control */
+ /* (In Partial mode/full colors) */
#define CMD_DINVCTR 0xB4 /* Display Inversion Control */
#define CMD_RGBBLK 0xB5 /* RGB Interface Blanking Porch setting */
#define CMD_DFUNCTR 0xB6 /* Display Function set 5 */
@@ -88,17 +53,18 @@
#define CMD_GAMRSEL 0xF2 /* GAM_R_SEL */
/*
-This display:
-http://www.ebay.com/itm/Replace-Nokia-5110-LCD-1-44-Red-Serial-128X128-SPI-Color-TFT-LCD-Display-Module-/271422122271
-This particular display has a design error! The controller has 3 pins to
-configure to constrain the memory and resolution to a fixed dimension (in
-that case 128x128) but they leaved those pins configured for 128x160 so
-there was several pixel memory addressing problems.
-I solved by setup several parameters that dinamically fix the resolution as
-needit so below the parameters for this display. If you have a strain or a
-correct display (can happen with chinese) you can copy those parameters and
-create setup for different displays.
-*/
+ * This display:
+ * http://www.ebay.com/itm/Replace-Nokia-5110-LCD-1-44-Red-Serial-128X128-SPI-
+ * Color-TFT-LCD-Display-Module-/271422122271
+ * This particular display has a design error! The controller has 3 pins to
+ * configure to constrain the memory and resolution to a fixed dimension (in
+ * that case 128x128) but they leaved those pins configured for 128x160 so
+ * there was several pixel memory addressing problems.
+ * I solved by setup several parameters that dinamically fix the resolution as
+ * needit so below the parameters for this display. If you have a strain or a
+ * correct display (can happen with chinese) you can copy those parameters and
+ * create setup for different displays.
+ */
#ifdef RED
#define __OFFSET 32 /*see note 2 - this is the red version */
@@ -110,19 +76,17 @@ static int init_display(struct fbtft_par *par)
{
par->fbtftops.reset(par);
- if (par->gpio.cs != -1)
- gpio_set_value(par->gpio.cs, 0); /* Activate chip */
-
- write_reg(par, CMD_SWRESET); /* software reset */
+ write_reg(par, MIPI_DCS_SOFT_RESET); /* software reset */
mdelay(500);
- write_reg(par, CMD_SLPOUT); /* exit sleep */
+ write_reg(par, MIPI_DCS_EXIT_SLEEP_MODE); /* exit sleep */
mdelay(5);
- write_reg(par, CMD_PIXFMT, 0x05); /* Set Color Format 16bit */
- write_reg(par, CMD_GAMMASET, 0x02); /* default gamma curve 3 */
+ write_reg(par, MIPI_DCS_SET_PIXEL_FORMAT, MIPI_DCS_PIXEL_FMT_16BIT);
+ /* default gamma curve 3 */
+ write_reg(par, MIPI_DCS_SET_GAMMA_CURVE, 0x02);
#ifdef GAMMA_ADJ
write_reg(par, CMD_GAMRSEL, 0x01); /* Enable Gamma adj */
#endif
- write_reg(par, CMD_NORML);
+ write_reg(par, MIPI_DCS_ENTER_NORMAL_MODE);
write_reg(par, CMD_DFUNCTR, 0xff, 0x06);
/* Frame Rate Control (In normal mode/Full colors) */
write_reg(par, CMD_FRMCTR1, 0x08, 0x02);
@@ -135,66 +99,67 @@ static int init_display(struct fbtft_par *par)
write_reg(par, CMD_VCOMCTR1, 0x50, 0x63);
write_reg(par, CMD_VCOMOFFS, 0);
- write_reg(par, CMD_CLMADRS, 0, 0, 0, WIDTH); /* Set Column Address */
- write_reg(par, CMD_PGEADRS, 0, 0, 0, HEIGHT); /* Set Page Address */
+ write_reg(par, MIPI_DCS_SET_COLUMN_ADDRESS, 0, 0, 0, WIDTH);
+ write_reg(par, MIPI_DCS_SET_PAGE_ADDRESS, 0, 0, 0, HEIGHT);
- write_reg(par, CMD_DISPON); /* display ON */
- write_reg(par, CMD_RAMWR); /* Memory Write */
+ write_reg(par, MIPI_DCS_SET_DISPLAY_ON); /* display ON */
+ write_reg(par, MIPI_DCS_WRITE_MEMORY_START); /* Memory Write */
return 0;
}
static void set_addr_win(struct fbtft_par *par, int xs, int ys,
- int xe, int ye)
+ int xe, int ye)
{
switch (par->info->var.rotate) {
case 0:
- write_reg(par, CMD_CLMADRS, xs >> 8, xs & 0xff, xe >> 8,
- xe & 0xff);
- write_reg(par, CMD_PGEADRS,
- (ys + __OFFSET) >> 8, (ys + __OFFSET) & 0xff,
- (ye + __OFFSET) >> 8, (ye + __OFFSET) & 0xff);
+ write_reg(par, MIPI_DCS_SET_COLUMN_ADDRESS,
+ xs >> 8, xs & 0xff, xe >> 8, xe & 0xff);
+ write_reg(par, MIPI_DCS_SET_PAGE_ADDRESS,
+ (ys + __OFFSET) >> 8, (ys + __OFFSET) & 0xff,
+ (ye + __OFFSET) >> 8, (ye + __OFFSET) & 0xff);
break;
case 90:
- write_reg(par, CMD_CLMADRS,
- (xs + __OFFSET) >> 8, (xs + __OFFSET) & 0xff,
- (xe + __OFFSET) >> 8, (xe + __OFFSET) & 0xff);
- write_reg(par, CMD_PGEADRS, ys >> 8, ys & 0xff, ye >> 8,
- ye & 0xff);
+ write_reg(par, MIPI_DCS_SET_COLUMN_ADDRESS,
+ (xs + __OFFSET) >> 8, (xs + __OFFSET) & 0xff,
+ (xe + __OFFSET) >> 8, (xe + __OFFSET) & 0xff);
+ write_reg(par, MIPI_DCS_SET_PAGE_ADDRESS,
+ ys >> 8, ys & 0xff, ye >> 8, ye & 0xff);
break;
case 180:
case 270:
- write_reg(par, CMD_CLMADRS, xs >> 8, xs & 0xff, xe >> 8,
- xe & 0xff);
- write_reg(par, CMD_PGEADRS, ys >> 8, ys & 0xff, ye >> 8,
- ye & 0xff);
+ write_reg(par, MIPI_DCS_SET_COLUMN_ADDRESS,
+ xs >> 8, xs & 0xff, xe >> 8, xe & 0xff);
+ write_reg(par, MIPI_DCS_SET_PAGE_ADDRESS,
+ ys >> 8, ys & 0xff, ye >> 8, ye & 0xff);
break;
default:
- par->info->var.rotate = 0; /* Fix incorrect setting */
+ /* Fix incorrect setting */
+ par->info->var.rotate = 0;
}
- write_reg(par, CMD_RAMWR); /* Write Data to GRAM mode */
+ write_reg(par, MIPI_DCS_WRITE_MEMORY_START);
}
/*
-7) MY: 1(bottom to top), 0(top to bottom) Row Address Order
-6) MX: 1(R to L), 0(L to R) Column Address Order
-5) MV: 1(Exchanged), 0(normal) Row/Column exchange
-4) ML: 1(bottom to top), 0(top to bottom) Vertical Refresh Order
-3) RGB: 1(BGR), 0(RGB) Color Space
-2) MH: 1(R to L), 0(L to R) Horizontal Refresh Order
-1)
-0)
-
- MY, MX, MV, ML,RGB, MH, D1, D0
- 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 //normal
- 1 | 0 | 0 | 0 | 1 | 0 | 0 | 0 //Y-Mirror
- 0 | 1 | 0 | 0 | 1 | 0 | 0 | 0 //X-Mirror
- 1 | 1 | 0 | 0 | 1 | 0 | 0 | 0 //X-Y-Mirror
- 0 | 0 | 1 | 0 | 1 | 0 | 0 | 0 //X-Y Exchange
- 1 | 0 | 1 | 0 | 1 | 0 | 0 | 0 //X-Y Exchange, Y-Mirror
- 0 | 1 | 1 | 0 | 1 | 0 | 0 | 0 //XY exchange
- 1 | 1 | 1 | 0 | 1 | 0 | 0 | 0
-*/
+ * 7) MY: 1(bottom to top), 0(top to bottom) Row Address Order
+ * 6) MX: 1(R to L), 0(L to R) Column Address Order
+ * 5) MV: 1(Exchanged), 0(normal) Row/Column exchange
+ * 4) ML: 1(bottom to top), 0(top to bottom) Vertical Refresh Order
+ * 3) RGB: 1(BGR), 0(RGB) Color Space
+ * 2) MH: 1(R to L), 0(L to R) Horizontal Refresh Order
+ * 1)
+ * 0)
+ *
+ * MY, MX, MV, ML,RGB, MH, D1, D0
+ * 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 //normal
+ * 1 | 0 | 0 | 0 | 1 | 0 | 0 | 0 //Y-Mirror
+ * 0 | 1 | 0 | 0 | 1 | 0 | 0 | 0 //X-Mirror
+ * 1 | 1 | 0 | 0 | 1 | 0 | 0 | 0 //X-Y-Mirror
+ * 0 | 0 | 1 | 0 | 1 | 0 | 0 | 0 //X-Y Exchange
+ * 1 | 0 | 1 | 0 | 1 | 0 | 0 | 0 //X-Y Exchange, Y-Mirror
+ * 0 | 1 | 1 | 0 | 1 | 0 | 0 | 0 //XY exchange
+ * 1 | 1 | 1 | 0 | 1 | 0 | 0 | 0
+ */
static int set_var(struct fbtft_par *par)
{
u8 mactrl_data = 0; /* Avoid compiler warning */
@@ -216,17 +181,17 @@ static int set_var(struct fbtft_par *par)
/* Colorspcae */
if (par->bgr)
- mactrl_data |= (1 << 2);
- write_reg(par, CMD_MADCTL, mactrl_data);
- write_reg(par, CMD_RAMWR); /* Write Data to GRAM mode */
+ mactrl_data |= BIT(2);
+ write_reg(par, MIPI_DCS_SET_ADDRESS_MODE, mactrl_data);
+ write_reg(par, MIPI_DCS_WRITE_MEMORY_START);
return 0;
}
#ifdef GAMMA_ADJ
-#define CURVE(num, idx) curves[num * par->gamma.num_values + idx]
-static int gamma_adj(struct fbtft_par *par, unsigned long *curves)
+#define CURVE(num, idx) curves[(num) * par->gamma.num_values + (idx)]
+static int gamma_adj(struct fbtft_par *par, u32 *curves)
{
- unsigned long mask[] = {
+ static const unsigned long mask[] = {
0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
0x1f, 0x3f, 0x0f, 0x0f, 0x7f, 0x1f,
0x3F, 0x3F, 0x3F, 0x3F, 0x3F};
@@ -237,27 +202,28 @@ static int gamma_adj(struct fbtft_par *par, unsigned long *curves)
CURVE(i, j) &= mask[i * par->gamma.num_values + j];
write_reg(par, CMD_PGAMMAC,
- CURVE(0, 0),
- CURVE(0, 1),
- CURVE(0, 2),
- CURVE(0, 3),
- CURVE(0, 4),
- CURVE(0, 5),
- CURVE(0, 6),
- (CURVE(0, 7) << 4) | CURVE(0, 8),
- CURVE(0, 9),
- CURVE(0, 10),
- CURVE(0, 11),
- CURVE(0, 12),
- CURVE(0, 13),
- CURVE(0, 14),
- CURVE(0, 15)
- );
+ CURVE(0, 0),
+ CURVE(0, 1),
+ CURVE(0, 2),
+ CURVE(0, 3),
+ CURVE(0, 4),
+ CURVE(0, 5),
+ CURVE(0, 6),
+ (CURVE(0, 7) << 4) | CURVE(0, 8),
+ CURVE(0, 9),
+ CURVE(0, 10),
+ CURVE(0, 11),
+ CURVE(0, 12),
+ CURVE(0, 13),
+ CURVE(0, 14),
+ CURVE(0, 15));
- write_reg(par, CMD_RAMWR); /* Write Data to GRAM mode */
+ /* Write Data to GRAM mode */
+ write_reg(par, MIPI_DCS_WRITE_MEMORY_START);
return 0;
}
+
#undef CURVE
#endif
diff --git a/drivers/staging/fbtft/fb_ili9320.c b/drivers/staging/fbtft/fb_ili9320.c
index 3ed50febe36f..050fc2367c12 100644
--- a/drivers/staging/fbtft/fb_ili9320.c
+++ b/drivers/staging/fbtft/fb_ili9320.c
@@ -1,23 +1,13 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* FB driver for the ILI9320 LCD Controller
*
* Copyright (C) 2013 Noralf Tronnes
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
-#include <linux/gpio.h>
#include <linux/spi/spi.h>
#include <linux/delay.h>
@@ -29,28 +19,25 @@
#define DEFAULT_GAMMA "07 07 6 0 0 0 5 5 4 0\n" \
"07 08 4 7 5 1 2 0 7 7"
-static unsigned read_devicecode(struct fbtft_par *par)
+static unsigned int read_devicecode(struct fbtft_par *par)
{
- int ret;
u8 rxbuf[8] = {0, };
write_reg(par, 0x0000);
- ret = par->fbtftops.read(par, rxbuf, 4);
+ par->fbtftops.read(par, rxbuf, 4);
return (rxbuf[2] << 8) | rxbuf[3];
}
static int init_display(struct fbtft_par *par)
{
- unsigned devcode;
+ unsigned int devcode;
par->fbtftops.reset(par);
devcode = read_devicecode(par);
- fbtft_par_dbg(DEBUG_INIT_DISPLAY, par, "Device code: 0x%04X\n",
- devcode);
if ((devcode != 0x0000) && (devcode != 0x9320))
dev_warn(par->info->device,
- "Unrecognized Device code: 0x%04X (expected 0x9320)\n",
+ "Unrecognized Device code: 0x%04X (expected 0x9320)\n",
devcode);
/* Initialization sequence from ILI9320 Application Notes */
@@ -216,14 +203,14 @@ static int set_var(struct fbtft_par *par)
}
/*
- Gamma string format:
- VRP0 VRP1 RP0 RP1 KP0 KP1 KP2 KP3 KP4 KP5
- VRN0 VRN1 RN0 RN1 KN0 KN1 KN2 KN3 KN4 KN5
-*/
-#define CURVE(num, idx) curves[num * par->gamma.num_values + idx]
-static int set_gamma(struct fbtft_par *par, unsigned long *curves)
+ * Gamma string format:
+ * VRP0 VRP1 RP0 RP1 KP0 KP1 KP2 KP3 KP4 KP5
+ * VRN0 VRN1 RN0 RN1 KN0 KN1 KN2 KN3 KN4 KN5
+ */
+#define CURVE(num, idx) curves[(num) * par->gamma.num_values + (idx)]
+static int set_gamma(struct fbtft_par *par, u32 *curves)
{
- unsigned long mask[] = {
+ static const unsigned long mask[] = {
0x1f, 0x1f, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07,
0x1f, 0x1f, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07,
};
@@ -248,6 +235,7 @@ static int set_gamma(struct fbtft_par *par, unsigned long *curves)
return 0;
}
+
#undef CURVE
static struct fbtft_display display = {
diff --git a/drivers/staging/fbtft/fb_ili9325.c b/drivers/staging/fbtft/fb_ili9325.c
index 3b3a06d8a125..16d3b17ca279 100644
--- a/drivers/staging/fbtft/fb_ili9325.c
+++ b/drivers/staging/fbtft/fb_ili9325.c
@@ -1,25 +1,15 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* FB driver for the ILI9325 LCD Controller
*
* Copyright (C) 2013 Noralf Tronnes
*
* Based on ili9325.c by Jeroen Domburg
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
-#include <linux/gpio.h>
#include <linux/delay.h>
#include "fbtft.h"
@@ -32,74 +22,68 @@
#define DEFAULT_GAMMA "0F 00 7 2 0 0 6 5 4 1\n" \
"04 16 2 7 6 3 2 1 7 7"
-static unsigned bt = 6; /* VGL=Vci*4 , VGH=Vci*4 */
-module_param(bt, uint, 0);
+static unsigned int bt = 6; /* VGL=Vci*4 , VGH=Vci*4 */
+module_param(bt, uint, 0000);
MODULE_PARM_DESC(bt, "Sets the factor used in the step-up circuits");
-static unsigned vc = 0x03; /* Vci1=Vci*0.80 */
-module_param(vc, uint, 0);
-MODULE_PARM_DESC(vc,
-"Sets the ratio factor of Vci to generate the reference voltages Vci1");
+static unsigned int vc = 0x03; /* Vci1=Vci*0.80 */
+module_param(vc, uint, 0000);
+MODULE_PARM_DESC(vc, "Sets the ratio factor of Vci to generate the reference voltages Vci1");
-static unsigned vrh = 0x0d; /* VREG1OUT=Vci*1.85 */
-module_param(vrh, uint, 0);
-MODULE_PARM_DESC(vrh,
-"Set the amplifying rate (1.6 ~ 1.9) of Vci applied to output the VREG1OUT");
+static unsigned int vrh = 0x0d; /* VREG1OUT=Vci*1.85 */
+module_param(vrh, uint, 0000);
+MODULE_PARM_DESC(vrh, "Set the amplifying rate (1.6 ~ 1.9) of Vci applied to output the VREG1OUT");
-static unsigned vdv = 0x12; /* VCOMH amplitude=VREG1OUT*0.98 */
-module_param(vdv, uint, 0);
-MODULE_PARM_DESC(vdv,
-"Select the factor of VREG1OUT to set the amplitude of Vcom");
+static unsigned int vdv = 0x12; /* VCOMH amplitude=VREG1OUT*0.98 */
+module_param(vdv, uint, 0000);
+MODULE_PARM_DESC(vdv, "Select the factor of VREG1OUT to set the amplitude of Vcom");
-static unsigned vcm = 0x0a; /* VCOMH=VREG1OUT*0.735 */
-module_param(vcm, uint, 0);
+static unsigned int vcm = 0x0a; /* VCOMH=VREG1OUT*0.735 */
+module_param(vcm, uint, 0000);
MODULE_PARM_DESC(vcm, "Set the internal VcomH voltage");
/*
-Verify that this configuration is within the Voltage limits
-
-Display module configuration: Vcc = IOVcc = Vci = 3.3V
-
- Voltages
-----------
-Vci = 3.3
-Vci1 = Vci * 0.80 = 2.64
-DDVDH = Vci1 * 2 = 5.28
-VCL = -Vci1 = -2.64
-VREG1OUT = Vci * 1.85 = 4.88
-VCOMH = VREG1OUT * 0.735 = 3.59
-VCOM amplitude = VREG1OUT * 0.98 = 4.79
-VGH = Vci * 4 = 13.2
-VGL = -Vci * 4 = -13.2
-
- Limits
---------
-Power supplies
-1.65 < IOVcc < 3.30 => 1.65 < 3.3 < 3.30
-2.40 < Vcc < 3.30 => 2.40 < 3.3 < 3.30
-2.50 < Vci < 3.30 => 2.50 < 3.3 < 3.30
-
-Source/VCOM power supply voltage
- 4.50 < DDVDH < 6.0 => 4.50 < 5.28 < 6.0
--3.0 < VCL < -2.0 => -3.0 < -2.64 < -2.0
-VCI - VCL < 6.0 => 5.94 < 6.0
-
-Gate driver output voltage
- 10 < VGH < 20 => 10 < 13.2 < 20
--15 < VGL < -5 => -15 < -13.2 < -5
-VGH - VGL < 32 => 26.4 < 32
-
-VCOM driver output voltage
-VCOMH - VCOML < 6.0 => 4.79 < 6.0
-*/
+ * Verify that this configuration is within the Voltage limits
+ *
+ * Display module configuration: Vcc = IOVcc = Vci = 3.3V
+ *
+ * Voltages
+ * ----------
+ * Vci = 3.3
+ * Vci1 = Vci * 0.80 = 2.64
+ * DDVDH = Vci1 * 2 = 5.28
+ * VCL = -Vci1 = -2.64
+ * VREG1OUT = Vci * 1.85 = 4.88
+ * VCOMH = VREG1OUT * 0.735 = 3.59
+ * VCOM amplitude = VREG1OUT * 0.98 = 4.79
+ * VGH = Vci * 4 = 13.2
+ * VGL = -Vci * 4 = -13.2
+ *
+ * Limits
+ * --------
+ * Power supplies
+ * 1.65 < IOVcc < 3.30 => 1.65 < 3.3 < 3.30
+ * 2.40 < Vcc < 3.30 => 2.40 < 3.3 < 3.30
+ * 2.50 < Vci < 3.30 => 2.50 < 3.3 < 3.30
+ *
+ * Source/VCOM power supply voltage
+ * 4.50 < DDVDH < 6.0 => 4.50 < 5.28 < 6.0
+ * -3.0 < VCL < -2.0 => -3.0 < -2.64 < -2.0
+ * VCI - VCL < 6.0 => 5.94 < 6.0
+ *
+ * Gate driver output voltage
+ * 10 < VGH < 20 => 10 < 13.2 < 20
+ * -15 < VGL < -5 => -15 < -13.2 < -5
+ * VGH - VGL < 32 => 26.4 < 32
+ *
+ * VCOM driver output voltage
+ * VCOMH - VCOML < 6.0 => 4.79 < 6.0
+ */
static int init_display(struct fbtft_par *par)
{
par->fbtftops.reset(par);
- if (par->gpio.cs != -1)
- gpio_set_value(par->gpio.cs, 0); /* Activate chip */
-
bt &= 0x07;
vc &= 0x07;
vrh &= 0x0f;
@@ -129,7 +113,7 @@ static int init_display(struct fbtft_par *par)
write_reg(par, 0x0013, 0x0000); /* VDV[4:0] for VCOM amplitude */
mdelay(200); /* Dis-charge capacitor power voltage */
write_reg(par, 0x0010, /* SAP, BT[3:0], AP, DSTB, SLP, STB */
- (1 << 12) | (bt << 8) | (1 << 7) | (0x01 << 4));
+ BIT(12) | (bt << 8) | BIT(7) | BIT(4));
write_reg(par, 0x0011, 0x220 | vc); /* DC1[2:0], DC0[2:0], VC[2:0] */
mdelay(50); /* Delay 50ms */
write_reg(par, 0x0012, vrh); /* Internal reference voltage= Vci; */
@@ -213,14 +197,14 @@ static int set_var(struct fbtft_par *par)
}
/*
- Gamma string format:
- VRP0 VRP1 RP0 RP1 KP0 KP1 KP2 KP3 KP4 KP5
- VRN0 VRN1 RN0 RN1 KN0 KN1 KN2 KN3 KN4 KN5
-*/
-#define CURVE(num, idx) curves[num * par->gamma.num_values + idx]
-static int set_gamma(struct fbtft_par *par, unsigned long *curves)
+ * Gamma string format:
+ * VRP0 VRP1 RP0 RP1 KP0 KP1 KP2 KP3 KP4 KP5
+ * VRN0 VRN1 RN0 RN1 KN0 KN1 KN2 KN3 KN4 KN5
+ */
+#define CURVE(num, idx) curves[(num) * par->gamma.num_values + (idx)]
+static int set_gamma(struct fbtft_par *par, u32 *curves)
{
- unsigned long mask[] = {
+ static const unsigned long mask[] = {
0x1f, 0x1f, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07,
0x1f, 0x1f, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07,
};
@@ -245,6 +229,7 @@ static int set_gamma(struct fbtft_par *par, unsigned long *curves)
return 0;
}
+
#undef CURVE
static struct fbtft_display display = {
diff --git a/drivers/staging/fbtft/fb_ili9340.c b/drivers/staging/fbtft/fb_ili9340.c
index e0e253989271..704236bcaf3f 100644
--- a/drivers/staging/fbtft/fb_ili9340.c
+++ b/drivers/staging/fbtft/fb_ili9340.c
@@ -1,24 +1,15 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* FB driver for the ILI9340 LCD Controller
*
* Copyright (C) 2013 Noralf Tronnes
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
-#include <linux/gpio.h>
#include <linux/delay.h>
+#include <video/mipi_display.h>
#include "fbtft.h"
@@ -53,7 +44,7 @@ static int init_display(struct fbtft_par *par)
/* COLMOD: Pixel Format Set */
/* 16 bits/pixel */
- write_reg(par, 0x3A, 0x55);
+ write_reg(par, MIPI_DCS_SET_PIXEL_FORMAT, 0x55);
/* Frame Rate Control */
/* Division ratio = fosc, Frame Rate = 79Hz */
@@ -65,40 +56,37 @@ static int init_display(struct fbtft_par *par)
/* Gamma Function Disable */
write_reg(par, 0xF2, 0x00);
- /* Gamma curve selected */
- write_reg(par, 0x26, 0x01);
+ /* Gamma curve selection */
+ write_reg(par, MIPI_DCS_SET_GAMMA_CURVE, 0x01);
/* Positive Gamma Correction */
write_reg(par, 0xE0,
- 0x0F, 0x31, 0x2B, 0x0C, 0x0E, 0x08, 0x4E, 0xF1,
- 0x37, 0x07, 0x10, 0x03, 0x0E, 0x09, 0x00);
+ 0x0F, 0x31, 0x2B, 0x0C, 0x0E, 0x08, 0x4E, 0xF1,
+ 0x37, 0x07, 0x10, 0x03, 0x0E, 0x09, 0x00);
/* Negative Gamma Correction */
write_reg(par, 0xE1,
- 0x00, 0x0E, 0x14, 0x03, 0x11, 0x07, 0x31, 0xC1,
- 0x48, 0x08, 0x0F, 0x0C, 0x31, 0x36, 0x0F);
+ 0x00, 0x0E, 0x14, 0x03, 0x11, 0x07, 0x31, 0xC1,
+ 0x48, 0x08, 0x0F, 0x0C, 0x31, 0x36, 0x0F);
- /* Sleep OUT */
- write_reg(par, 0x11);
+ write_reg(par, MIPI_DCS_EXIT_SLEEP_MODE);
mdelay(120);
- /* Display ON */
- write_reg(par, 0x29);
+ write_reg(par, MIPI_DCS_SET_DISPLAY_ON);
return 0;
}
static void set_addr_win(struct fbtft_par *par, int xs, int ys, int xe, int ye)
{
- /* Column address */
- write_reg(par, 0x2A, xs >> 8, xs & 0xFF, xe >> 8, xe & 0xFF);
+ write_reg(par, MIPI_DCS_SET_COLUMN_ADDRESS,
+ xs >> 8, xs & 0xFF, xe >> 8, xe & 0xFF);
- /* Row address */
- write_reg(par, 0x2B, ys >> 8, ys & 0xFF, ye >> 8, ye & 0xFF);
+ write_reg(par, MIPI_DCS_SET_PAGE_ADDRESS,
+ ys >> 8, ys & 0xFF, ye >> 8, ye & 0xFF);
- /* Memory write */
- write_reg(par, 0x2C);
+ write_reg(par, MIPI_DCS_WRITE_MEMORY_START);
}
#define ILI9340_MADCTL_MV 0x20
@@ -123,7 +111,7 @@ static int set_var(struct fbtft_par *par)
break;
}
/* Memory Access Control */
- write_reg(par, 0x36, val | (par->bgr << 3));
+ write_reg(par, MIPI_DCS_SET_ADDRESS_MODE, val | (par->bgr << 3));
return 0;
}
diff --git a/drivers/staging/fbtft/fb_ili9341.c b/drivers/staging/fbtft/fb_ili9341.c
index dcee0aff5875..47e72b87d76d 100644
--- a/drivers/staging/fbtft/fb_ili9341.c
+++ b/drivers/staging/fbtft/fb_ili9341.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* FB driver for the ILI9341 LCD display controller
*
@@ -8,22 +9,13 @@
*
* Copyright (C) 2013 Christian Vogelgsang
* Based on adafruit22fb.c by Noralf Tronnes
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/delay.h>
+#include <video/mipi_display.h>
#include "fbtft.h"
@@ -39,9 +31,9 @@ static int init_display(struct fbtft_par *par)
par->fbtftops.reset(par);
/* startup sequence for MI0283QT-9A */
- write_reg(par, 0x01); /* software reset */
+ write_reg(par, MIPI_DCS_SOFT_RESET);
mdelay(5);
- write_reg(par, 0x28); /* display off */
+ write_reg(par, MIPI_DCS_SET_DISPLAY_OFF);
/* --------------------------------------------------------- */
write_reg(par, 0xCF, 0x00, 0x83, 0x30);
write_reg(par, 0xED, 0x64, 0x03, 0x12, 0x81);
@@ -56,18 +48,18 @@ static int init_display(struct fbtft_par *par)
write_reg(par, 0xC5, 0x35, 0x3E);
write_reg(par, 0xC7, 0xBE);
/* ------------memory access control------------------------ */
- write_reg(par, 0x3A, 0x55); /* 16bit pixel */
+ write_reg(par, MIPI_DCS_SET_PIXEL_FORMAT, 0x55); /* 16bit pixel */
/* ------------frame rate----------------------------------- */
write_reg(par, 0xB1, 0x00, 0x1B);
/* ------------Gamma---------------------------------------- */
/* write_reg(par, 0xF2, 0x08); */ /* Gamma Function Disable */
- write_reg(par, 0x26, 0x01);
+ write_reg(par, MIPI_DCS_SET_GAMMA_CURVE, 0x01);
/* ------------display-------------------------------------- */
write_reg(par, 0xB7, 0x07); /* entry mode set */
write_reg(par, 0xB6, 0x0A, 0x82, 0x27, 0x00);
- write_reg(par, 0x11); /* sleep out */
+ write_reg(par, MIPI_DCS_EXIT_SLEEP_MODE);
mdelay(100);
- write_reg(par, 0x29); /* display on */
+ write_reg(par, MIPI_DCS_SET_DISPLAY_ON);
mdelay(20);
return 0;
@@ -75,40 +67,39 @@ static int init_display(struct fbtft_par *par)
static void set_addr_win(struct fbtft_par *par, int xs, int ys, int xe, int ye)
{
- /* Column address set */
- write_reg(par, 0x2A,
- (xs >> 8) & 0xFF, xs & 0xFF, (xe >> 8) & 0xFF, xe & 0xFF);
+ write_reg(par, MIPI_DCS_SET_COLUMN_ADDRESS,
+ (xs >> 8) & 0xFF, xs & 0xFF, (xe >> 8) & 0xFF, xe & 0xFF);
- /* Row address set */
- write_reg(par, 0x2B,
- (ys >> 8) & 0xFF, ys & 0xFF, (ye >> 8) & 0xFF, ye & 0xFF);
+ write_reg(par, MIPI_DCS_SET_PAGE_ADDRESS,
+ (ys >> 8) & 0xFF, ys & 0xFF, (ye >> 8) & 0xFF, ye & 0xFF);
- /* Memory write */
- write_reg(par, 0x2C);
+ write_reg(par, MIPI_DCS_WRITE_MEMORY_START);
}
-#define MEM_Y (7) /* MY row address order */
-#define MEM_X (6) /* MX column address order */
-#define MEM_V (5) /* MV row / column exchange */
-#define MEM_L (4) /* ML vertical refresh order */
-#define MEM_H (2) /* MH horizontal refresh order */
+#define MEM_Y BIT(7) /* MY row address order */
+#define MEM_X BIT(6) /* MX column address order */
+#define MEM_V BIT(5) /* MV row / column exchange */
+#define MEM_L BIT(4) /* ML vertical refresh order */
+#define MEM_H BIT(2) /* MH horizontal refresh order */
#define MEM_BGR (3) /* RGB-BGR Order */
static int set_var(struct fbtft_par *par)
{
switch (par->info->var.rotate) {
case 0:
- write_reg(par, 0x36, (1 << MEM_X) | (par->bgr << MEM_BGR));
+ write_reg(par, MIPI_DCS_SET_ADDRESS_MODE,
+ MEM_X | (par->bgr << MEM_BGR));
break;
case 270:
- write_reg(par, 0x36,
- (1 << MEM_V) | (1 << MEM_L) | (par->bgr << MEM_BGR));
+ write_reg(par, MIPI_DCS_SET_ADDRESS_MODE,
+ MEM_V | MEM_L | (par->bgr << MEM_BGR));
break;
case 180:
- write_reg(par, 0x36, (1 << MEM_Y) | (par->bgr << MEM_BGR));
+ write_reg(par, MIPI_DCS_SET_ADDRESS_MODE,
+ MEM_Y | (par->bgr << MEM_BGR));
break;
case 90:
- write_reg(par, 0x36, (1 << MEM_Y) | (1 << MEM_X) |
- (1 << MEM_V) | (par->bgr << MEM_BGR));
+ write_reg(par, MIPI_DCS_SET_ADDRESS_MODE,
+ MEM_Y | MEM_X | MEM_V | (par->bgr << MEM_BGR));
break;
}
@@ -116,25 +107,26 @@ static int set_var(struct fbtft_par *par)
}
/*
- Gamma string format:
- Positive: Par1 Par2 [...] Par15
- Negative: Par1 Par2 [...] Par15
-*/
-#define CURVE(num, idx) curves[num * par->gamma.num_values + idx]
-static int set_gamma(struct fbtft_par *par, unsigned long *curves)
+ * Gamma string format:
+ * Positive: Par1 Par2 [...] Par15
+ * Negative: Par1 Par2 [...] Par15
+ */
+#define CURVE(num, idx) curves[(num) * par->gamma.num_values + (idx)]
+static int set_gamma(struct fbtft_par *par, u32 *curves)
{
int i;
for (i = 0; i < par->gamma.num_curves; i++)
write_reg(par, 0xE0 + i,
- CURVE(i, 0), CURVE(i, 1), CURVE(i, 2),
- CURVE(i, 3), CURVE(i, 4), CURVE(i, 5),
- CURVE(i, 6), CURVE(i, 7), CURVE(i, 8),
- CURVE(i, 9), CURVE(i, 10), CURVE(i, 11),
- CURVE(i, 12), CURVE(i, 13), CURVE(i, 14));
+ CURVE(i, 0), CURVE(i, 1), CURVE(i, 2),
+ CURVE(i, 3), CURVE(i, 4), CURVE(i, 5),
+ CURVE(i, 6), CURVE(i, 7), CURVE(i, 8),
+ CURVE(i, 9), CURVE(i, 10), CURVE(i, 11),
+ CURVE(i, 12), CURVE(i, 13), CURVE(i, 14));
return 0;
}
+
#undef CURVE
static struct fbtft_display display = {
@@ -153,7 +145,7 @@ static struct fbtft_display display = {
},
};
-FBTFT_REGISTER_DRIVER(DRVNAME, "ilitek,ili9341", &display);
+FBTFT_REGISTER_SPI_DRIVER(DRVNAME, "ilitek", "ili9341", &display);
MODULE_ALIAS("spi:" DRVNAME);
MODULE_ALIAS("platform:" DRVNAME);
diff --git a/drivers/staging/fbtft/fb_ili9481.c b/drivers/staging/fbtft/fb_ili9481.c
index 63684864f309..19eba085ea53 100644
--- a/drivers/staging/fbtft/fb_ili9481.c
+++ b/drivers/staging/fbtft/fb_ili9481.c
@@ -1,24 +1,16 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* FB driver for the ILI9481 LCD Controller
*
* Copyright (c) 2014 Petr Olivka
* Copyright (c) 2013 Noralf Tronnes
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/delay.h>
+#include <video/mipi_display.h>
#include "fbtft.h"
@@ -26,10 +18,9 @@
#define WIDTH 320
#define HEIGHT 480
-static int default_init_sequence[] = {
-
+static const s16 default_init_sequence[] = {
/* SLP_OUT - Sleep out */
- -1, 0x11,
+ -1, MIPI_DCS_EXIT_SLEEP_MODE,
-2, 50,
/* Power setting */
-1, 0xD0, 0x07, 0x42, 0x18,
@@ -42,44 +33,47 @@ static int default_init_sequence[] = {
/* Frame rate & inv. */
-1, 0xC5, 0x03,
/* Pixel format */
- -1, 0x3A, 0x55,
+ -1, MIPI_DCS_SET_PIXEL_FORMAT, 0x55,
/* Gamma */
-1, 0xC8, 0x00, 0x32, 0x36, 0x45, 0x06, 0x16,
0x37, 0x75, 0x77, 0x54, 0x0C, 0x00,
/* DISP_ON */
- -1, 0x29,
+ -1, MIPI_DCS_SET_DISPLAY_ON,
-3
};
static void set_addr_win(struct fbtft_par *par, int xs, int ys, int xe, int ye)
{
- /* column address */
- write_reg(par, 0x2a, xs >> 8, xs & 0xff, xe >> 8, xe & 0xff);
+ write_reg(par, MIPI_DCS_SET_COLUMN_ADDRESS,
+ xs >> 8, xs & 0xff, xe >> 8, xe & 0xff);
- /* Row address */
- write_reg(par, 0x2b, ys >> 8, ys & 0xff, ye >> 8, ye & 0xff);
+ write_reg(par, MIPI_DCS_SET_PAGE_ADDRESS,
+ ys >> 8, ys & 0xff, ye >> 8, ye & 0xff);
- /* memory write */
- write_reg(par, 0x2c);
+ write_reg(par, MIPI_DCS_WRITE_MEMORY_START);
}
#define HFLIP 0x01
#define VFLIP 0x02
-#define ROWxCOL 0x20
+#define ROW_X_COL 0x20
static int set_var(struct fbtft_par *par)
{
switch (par->info->var.rotate) {
case 270:
- write_reg(par, 0x36, ROWxCOL | HFLIP | VFLIP | (par->bgr << 3));
+ write_reg(par, MIPI_DCS_SET_ADDRESS_MODE,
+ ROW_X_COL | HFLIP | VFLIP | (par->bgr << 3));
break;
case 180:
- write_reg(par, 0x36, VFLIP | (par->bgr << 3));
+ write_reg(par, MIPI_DCS_SET_ADDRESS_MODE,
+ VFLIP | (par->bgr << 3));
break;
case 90:
- write_reg(par, 0x36, ROWxCOL | (par->bgr << 3));
+ write_reg(par, MIPI_DCS_SET_ADDRESS_MODE,
+ ROW_X_COL | (par->bgr << 3));
break;
default:
- write_reg(par, 0x36, HFLIP | (par->bgr << 3));
+ write_reg(par, MIPI_DCS_SET_ADDRESS_MODE,
+ HFLIP | (par->bgr << 3));
break;
}
diff --git a/drivers/staging/fbtft/fb_ili9486.c b/drivers/staging/fbtft/fb_ili9486.c
index d9dfff68159b..66210a7137fc 100644
--- a/drivers/staging/fbtft/fb_ili9486.c
+++ b/drivers/staging/fbtft/fb_ili9486.c
@@ -1,22 +1,14 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* FB driver for the ILI9486 LCD Controller
*
* Copyright (C) 2014 Noralf Tronnes
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
+#include <video/mipi_display.h>
#include "fbtft.h"
@@ -25,14 +17,13 @@
#define HEIGHT 480
/* this init sequence matches PiScreen */
-static int default_init_sequence[] = {
+static const s16 default_init_sequence[] = {
/* Interface Mode Control */
-1, 0xb0, 0x0,
- /* Sleep OUT */
- -1, 0x11,
+ -1, MIPI_DCS_EXIT_SLEEP_MODE,
-2, 250,
/* Interface Pixel Format */
- -1, 0x3A, 0x55,
+ -1, MIPI_DCS_SET_PIXEL_FORMAT, 0x55,
/* Power Control 3 */
-1, 0xC2, 0x44,
/* VCOM Control 1 */
@@ -46,40 +37,41 @@ static int default_init_sequence[] = {
/* Digital Gamma Control 1 */
-1, 0xE2, 0x0F, 0x32, 0x2E, 0x0B, 0x0D, 0x05, 0x47, 0x75,
0x37, 0x06, 0x10, 0x03, 0x24, 0x20, 0x00,
- /* Sleep OUT */
- -1, 0x11,
- /* Display ON */
- -1, 0x29,
+ -1, MIPI_DCS_EXIT_SLEEP_MODE,
+ -1, MIPI_DCS_SET_DISPLAY_ON,
/* end marker */
-3
};
static void set_addr_win(struct fbtft_par *par, int xs, int ys, int xe, int ye)
{
- /* Column address */
- write_reg(par, 0x2A, xs >> 8, xs & 0xFF, xe >> 8, xe & 0xFF);
+ write_reg(par, MIPI_DCS_SET_COLUMN_ADDRESS,
+ xs >> 8, xs & 0xFF, xe >> 8, xe & 0xFF);
- /* Row address */
- write_reg(par, 0x2B, ys >> 8, ys & 0xFF, ye >> 8, ye & 0xFF);
+ write_reg(par, MIPI_DCS_SET_PAGE_ADDRESS,
+ ys >> 8, ys & 0xFF, ye >> 8, ye & 0xFF);
- /* Memory write */
- write_reg(par, 0x2C);
+ write_reg(par, MIPI_DCS_WRITE_MEMORY_START);
}
static int set_var(struct fbtft_par *par)
{
switch (par->info->var.rotate) {
case 0:
- write_reg(par, 0x36, 0x80 | (par->bgr << 3));
+ write_reg(par, MIPI_DCS_SET_ADDRESS_MODE,
+ 0x80 | (par->bgr << 3));
break;
case 90:
- write_reg(par, 0x36, 0x20 | (par->bgr << 3));
+ write_reg(par, MIPI_DCS_SET_ADDRESS_MODE,
+ 0x20 | (par->bgr << 3));
break;
case 180:
- write_reg(par, 0x36, 0x40 | (par->bgr << 3));
+ write_reg(par, MIPI_DCS_SET_ADDRESS_MODE,
+ 0x40 | (par->bgr << 3));
break;
case 270:
- write_reg(par, 0x36, 0xE0 | (par->bgr << 3));
+ write_reg(par, MIPI_DCS_SET_ADDRESS_MODE,
+ 0xE0 | (par->bgr << 3));
break;
default:
break;
diff --git a/drivers/staging/fbtft/fb_pcd8544.c b/drivers/staging/fbtft/fb_pcd8544.c
index 828174a566a3..08f8a4bb8772 100644
--- a/drivers/staging/fbtft/fb_pcd8544.c
+++ b/drivers/staging/fbtft/fb_pcd8544.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* FB driver for the PCD8544 LCD Controller
*
@@ -5,22 +6,12 @@
* Any pixel value except 0 turns the pixel on.
*
* Copyright (C) 2013 Noralf Tronnes
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/spi/spi.h>
#include <linux/delay.h>
@@ -32,12 +23,12 @@
#define TXBUFLEN (84 * 6)
#define DEFAULT_GAMMA "40" /* gamma controls the contrast in this driver */
-static unsigned tc;
-module_param(tc, uint, 0);
+static unsigned int tc;
+module_param(tc, uint, 0000);
MODULE_PARM_DESC(tc, "TC[1:0] Temperature coefficient: 0-3 (default: 0)");
-static unsigned bs = 4;
-module_param(bs, uint, 0);
+static unsigned int bs = 4;
+module_param(bs, uint, 0000);
MODULE_PARM_DESC(bs, "BS[2:0] Bias voltage level: 0-7 (default: 4)");
static int init_display(struct fbtft_par *par)
@@ -112,13 +103,11 @@ static void set_addr_win(struct fbtft_par *par, int xs, int ys, int xe, int ye)
static int write_vmem(struct fbtft_par *par, size_t offset, size_t len)
{
- u16 *vmem16 = (u16 *)par->info->screen_base;
+ u16 *vmem16 = (u16 *)par->info->screen_buffer;
u8 *buf = par->txbuf.buf;
int x, y, i;
int ret = 0;
- fbtft_par_dbg(DEBUG_WRITE_VMEM, par, "%s()\n", __func__);
-
for (x = 0; x < 84; x++) {
for (y = 0; y < 6; y++) {
*buf = 0x00;
@@ -130,7 +119,7 @@ static int write_vmem(struct fbtft_par *par, size_t offset, size_t len)
}
/* Write data */
- gpio_set_value(par->gpio.dc, 1);
+ gpiod_set_value(par->gpio.dc, 1);
ret = par->fbtftops.write(par, par->txbuf.buf, 6 * 84);
if (ret < 0)
dev_err(par->info->device, "write failed and returned: %d\n",
@@ -139,7 +128,7 @@ static int write_vmem(struct fbtft_par *par, size_t offset, size_t len)
return ret;
}
-static int set_gamma(struct fbtft_par *par, unsigned long *curves)
+static int set_gamma(struct fbtft_par *par, u32 *curves)
{
/* apply mask */
curves[0] &= 0x7F;
@@ -168,10 +157,10 @@ static struct fbtft_display display = {
.backlight = 1,
};
-FBTFT_REGISTER_DRIVER(DRVNAME, "philips,pdc8544", &display);
+FBTFT_REGISTER_DRIVER(DRVNAME, "philips,pcd8544", &display);
MODULE_ALIAS("spi:" DRVNAME);
-MODULE_ALIAS("spi:pdc8544");
+MODULE_ALIAS("spi:pcd8544");
MODULE_DESCRIPTION("FB driver for the PCD8544 LCD Controller");
MODULE_AUTHOR("Noralf Tronnes");
diff --git a/drivers/staging/fbtft/fb_ra8875.c b/drivers/staging/fbtft/fb_ra8875.c
index 5c1d93d425d0..0ab1de6647d0 100644
--- a/drivers/staging/fbtft/fb_ra8875.c
+++ b/drivers/staging/fbtft/fb_ra8875.c
@@ -1,16 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* FBTFT driver for the RA8875 LCD Controller
* Copyright by Pf@nne & NOTRO
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/module.h>
@@ -18,7 +9,7 @@
#include <linux/init.h>
#include <linux/delay.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include "fbtft.h"
#define DRVNAME "fb_ra8875"
@@ -33,7 +24,7 @@ static int write_spi(struct fbtft_par *par, void *buf, size_t len)
struct spi_message m;
fbtft_par_dbg_hex(DEBUG_WRITE, par, par->info->device, u8, buf, len,
- "%s(len=%d): ", __func__, len);
+ "%s(len=%zu): ", __func__, len);
if (!par->spi) {
dev_err(par->info->device,
@@ -42,24 +33,13 @@ static int write_spi(struct fbtft_par *par, void *buf, size_t len)
}
spi_message_init(&m);
- if (par->txbuf.dma && buf == par->txbuf.buf) {
- t.tx_dma = par->txbuf.dma;
- m.is_dma_mapped = 1;
- }
spi_message_add_tail(&t, &m);
return spi_sync(par->spi, &m);
}
static int init_display(struct fbtft_par *par)
{
- gpio_set_value(par->gpio.dc, 1);
-
- fbtft_par_dbg(DEBUG_INIT_DISPLAY, par,
- "%s()\n", __func__);
- fbtft_par_dbg(DEBUG_INIT_DISPLAY, par,
- "display size %dx%d\n",
- par->info->var.xres,
- par->info->var.yres);
+ gpiod_set_value(par->gpio.dc, 1);
par->fbtftops.reset(par);
@@ -204,7 +184,7 @@ static void write_reg8_bus8(struct fbtft_par *par, int len, ...)
{
va_list args;
int i, ret;
- u8 *buf = (u8 *)par->buf;
+ u8 *buf = par->buf;
/* slow down spi-speed for writing registers */
par->fbtftops.write = write_spi;
@@ -215,7 +195,7 @@ static void write_reg8_bus8(struct fbtft_par *par, int len, ...)
buf[i] = (u8)va_arg(args, unsigned int);
va_end(args);
fbtft_par_dbg_hex(DEBUG_WRITE_REGISTER, par, par->info->device,
- u8, buf, len, "%s: ", __func__);
+ u8, buf, len, "%s: ", __func__);
}
va_start(args, len);
@@ -257,7 +237,7 @@ static void write_reg8_bus8(struct fbtft_par *par, int len, ...)
static int write_vmem16_bus8(struct fbtft_par *par, size_t offset, size_t len)
{
u16 *vmem16;
- u16 *txbuf16 = (u16 *)par->txbuf.buf;
+ __be16 *txbuf16;
size_t remain;
size_t to_copy;
size_t tx_array_size;
@@ -266,18 +246,18 @@ static int write_vmem16_bus8(struct fbtft_par *par, size_t offset, size_t len)
size_t startbyte_size = 0;
fbtft_par_dbg(DEBUG_WRITE_VMEM, par, "%s(offset=%zu, len=%zu)\n",
- __func__, offset, len);
+ __func__, offset, len);
remain = len / 2;
- vmem16 = (u16 *)(par->info->screen_base + offset);
+ vmem16 = (u16 *)(par->info->screen_buffer + offset);
tx_array_size = par->txbuf.len / 2;
- txbuf16 = (u16 *)(par->txbuf.buf + 1);
- tx_array_size -= 2;
- *(u8 *)(par->txbuf.buf) = 0x00;
- startbyte_size = 1;
+ txbuf16 = par->txbuf.buf + 1;
+ tx_array_size -= 2;
+ *(u8 *)(par->txbuf.buf) = 0x00;
+ startbyte_size = 1;
while (remain) {
- to_copy = remain > tx_array_size ? tx_array_size : remain;
+ to_copy = min(tx_array_size, remain);
dev_dbg(par->info->device, " to_copy=%zu, remain=%zu\n",
to_copy, remain - to_copy);
diff --git a/drivers/staging/fbtft/fb_s6d02a1.c b/drivers/staging/fbtft/fb_s6d02a1.c
index da85057eb3e0..d3d6871d8c47 100644
--- a/drivers/staging/fbtft/fb_s6d02a1.c
+++ b/drivers/staging/fbtft/fb_s6d02a1.c
@@ -1,48 +1,46 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* FB driver for the S6D02A1 LCD Controller
*
* Based on fb_st7735r.c by Noralf Tronnes
* Init code from UTFT library by Henning Karlsen
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
+#include <video/mipi_display.h>
#include "fbtft.h"
#define DRVNAME "fb_s6d02a1"
-static int default_init_sequence[] = {
-
+static const s16 default_init_sequence[] = {
-1, 0xf0, 0x5a, 0x5a,
-1, 0xfc, 0x5a, 0x5a,
- -1, 0xfa, 0x02, 0x1f, 0x00, 0x10, 0x22, 0x30, 0x38, 0x3A, 0x3A, 0x3A, 0x3A, 0x3A, 0x3d, 0x02, 0x01,
+ -1, 0xfa, 0x02, 0x1f, 0x00, 0x10, 0x22, 0x30, 0x38,
+ 0x3A, 0x3A, 0x3A, 0x3A, 0x3A, 0x3d, 0x02, 0x01,
- -1, 0xfb, 0x21, 0x00, 0x02, 0x04, 0x07, 0x0a, 0x0b, 0x0c, 0x0c, 0x16, 0x1e, 0x30, 0x3f, 0x01, 0x02,
+ -1, 0xfb, 0x21, 0x00, 0x02, 0x04, 0x07, 0x0a, 0x0b,
+ 0x0c, 0x0c, 0x16, 0x1e, 0x30, 0x3f, 0x01, 0x02,
/* power setting sequence */
- -1, 0xfd, 0x00, 0x00, 0x00, 0x17, 0x10, 0x00, 0x01, 0x01, 0x00, 0x1f, 0x1f,
+ -1, 0xfd, 0x00, 0x00, 0x00, 0x17, 0x10, 0x00, 0x01,
+ 0x01, 0x00, 0x1f, 0x1f,
- -1, 0xf4, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3f, 0x3f, 0x07, 0x00, 0x3C, 0x36, 0x00, 0x3C, 0x36, 0x00,
+ -1, 0xf4, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3f, 0x3f,
+ 0x07, 0x00, 0x3C, 0x36, 0x00, 0x3C, 0x36, 0x00,
- -1, 0xf5, 0x00, 0x70, 0x66, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x6d, 0x66, 0x06,
+ -1, 0xf5, 0x00, 0x70, 0x66, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x6d, 0x66, 0x06,
- -1, 0xf6, 0x02, 0x00, 0x3f, 0x00, 0x00, 0x00, 0x02, 0x00, 0x06, 0x01, 0x00,
+ -1, 0xf6, 0x02, 0x00, 0x3f, 0x00, 0x00, 0x00, 0x02,
+ 0x00, 0x06, 0x01, 0x00,
- -1, 0xf2, 0x00, 0x01, 0x03, 0x08, 0x08, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x04, 0x08, 0x08,
+ -1, 0xf2, 0x00, 0x01, 0x03, 0x08, 0x08, 0x04, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x04, 0x08, 0x08,
-1, 0xf8, 0x11,
@@ -50,7 +48,7 @@ static int default_init_sequence[] = {
-1, 0xf3, 0x00, 0x00,
- -1, 0x11,
+ -1, MIPI_DCS_EXIT_SLEEP_MODE,
-2, 50,
-1, 0xf3, 0x00, 0x01,
@@ -62,7 +60,8 @@ static int default_init_sequence[] = {
-1, 0xf3, 0x00, 0x0f,
-2, 50,
- -1, 0xf4, 0x00, 0x04, 0x00, 0x00, 0x00, 0x3f, 0x3f, 0x07, 0x00, 0x3C, 0x36, 0x00, 0x3C, 0x36, 0x00,
+ -1, 0xf4, 0x00, 0x04, 0x00, 0x00, 0x00, 0x3f, 0x3f,
+ 0x07, 0x00, 0x3C, 0x36, 0x00, 0x3C, 0x36, 0x00,
-2, 50,
-1, 0xf3, 0x00, 0x1f,
@@ -73,24 +72,26 @@ static int default_init_sequence[] = {
-1, 0xf3, 0x00, 0xff,
-2, 50,
- -1, 0xfd, 0x00, 0x00, 0x00, 0x17, 0x10, 0x00, 0x00, 0x01, 0x00, 0x16, 0x16,
+ -1, 0xfd, 0x00, 0x00, 0x00, 0x17, 0x10, 0x00, 0x00,
+ 0x01, 0x00, 0x16, 0x16,
- -1, 0xf4, 0x00, 0x09, 0x00, 0x00, 0x00, 0x3f, 0x3f, 0x07, 0x00, 0x3C, 0x36, 0x00, 0x3C, 0x36, 0x00,
+ -1, 0xf4, 0x00, 0x09, 0x00, 0x00, 0x00, 0x3f, 0x3f,
+ 0x07, 0x00, 0x3C, 0x36, 0x00, 0x3C, 0x36, 0x00,
/* initializing sequence */
- -1, 0x36, 0x08,
+ -1, MIPI_DCS_SET_ADDRESS_MODE, 0x08,
- -1, 0x35, 0x00,
+ -1, MIPI_DCS_SET_TEAR_ON, 0x00,
- -1, 0x3a, 0x05,
+ -1, MIPI_DCS_SET_PIXEL_FORMAT, 0x05,
- /* gamma setting sequence */
- -1, 0x26, 0x01, /* preset gamma curves, possible values 0x01, 0x02, 0x04, 0x08 */
+ /* gamma setting - possible values 0x01, 0x02, 0x04, 0x08 */
+ -1, MIPI_DCS_SET_GAMMA_CURVE, 0x01,
-2, 150,
- -1, 0x29,
- -1, 0x2c,
+ -1, MIPI_DCS_SET_DISPLAY_ON,
+ -1, MIPI_DCS_WRITE_MEMORY_START,
/* end marker */
-3
@@ -98,14 +99,13 @@ static int default_init_sequence[] = {
static void set_addr_win(struct fbtft_par *par, int xs, int ys, int xe, int ye)
{
- /* Column address */
- write_reg(par, 0x2A, xs >> 8, xs & 0xFF, xe >> 8, xe & 0xFF);
+ write_reg(par, MIPI_DCS_SET_COLUMN_ADDRESS,
+ xs >> 8, xs & 0xFF, xe >> 8, xe & 0xFF);
- /* Row address */
- write_reg(par, 0x2B, ys >> 8, ys & 0xFF, ye >> 8, ye & 0xFF);
+ write_reg(par, MIPI_DCS_SET_PAGE_ADDRESS,
+ ys >> 8, ys & 0xFF, ye >> 8, ye & 0xFF);
- /* Memory write */
- write_reg(par, 0x2C);
+ write_reg(par, MIPI_DCS_WRITE_MEMORY_START);
}
#define MY BIT(7)
@@ -113,24 +113,30 @@ static void set_addr_win(struct fbtft_par *par, int xs, int ys, int xe, int ye)
#define MV BIT(5)
static int set_var(struct fbtft_par *par)
{
- /* MADCTL - Memory data access control
- RGB/BGR:
- 1. Mode selection pin SRGB
- RGB H/W pin for color filter setting: 0=RGB, 1=BGR
- 2. MADCTL RGB bit
- RGB-BGR ORDER color filter panel: 0=RGB, 1=BGR */
+ /*
+ * Memory data access control (0x36h)
+ * RGB/BGR:
+ * 1. Mode selection pin SRGB
+ * RGB H/W pin for color filter setting: 0=RGB, 1=BGR
+ * 2. MADCTL RGB bit
+ * RGB-BGR ORDER color filter panel: 0=RGB, 1=BGR
+ */
switch (par->info->var.rotate) {
case 0:
- write_reg(par, 0x36, MX | MY | (par->bgr << 3));
+ write_reg(par, MIPI_DCS_SET_ADDRESS_MODE,
+ MX | MY | (par->bgr << 3));
break;
case 270:
- write_reg(par, 0x36, MY | MV | (par->bgr << 3));
+ write_reg(par, MIPI_DCS_SET_ADDRESS_MODE,
+ MY | MV | (par->bgr << 3));
break;
case 180:
- write_reg(par, 0x36, par->bgr << 3);
+ write_reg(par, MIPI_DCS_SET_ADDRESS_MODE,
+ par->bgr << 3);
break;
case 90:
- write_reg(par, 0x36, MX | MV | (par->bgr << 3));
+ write_reg(par, MIPI_DCS_SET_ADDRESS_MODE,
+ MX | MV | (par->bgr << 3));
break;
}
diff --git a/drivers/staging/fbtft/fb_s6d1121.c b/drivers/staging/fbtft/fb_s6d1121.c
index d6ae76b318ad..62f27172f844 100644
--- a/drivers/staging/fbtft/fb_s6d1121.c
+++ b/drivers/staging/fbtft/fb_s6d1121.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* FB driver for the S6D1121 LCD Controller
*
@@ -6,22 +7,11 @@
* Based on fb_ili9325.c by Noralf Tronnes
* Based on ili9325.c by Jeroen Domburg
* Init code from UTFT library by Henning Karlsen
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
-#include <linux/gpio.h>
#include <linux/delay.h>
#include "fbtft.h"
@@ -38,9 +28,6 @@ static int init_display(struct fbtft_par *par)
{
par->fbtftops.reset(par);
- if (par->gpio.cs != -1)
- gpio_set_value(par->gpio.cs, 0); /* Activate chip */
-
/* Initialization sequence from Lib_UTFT */
write_reg(par, 0x0011, 0x2004);
@@ -125,14 +112,14 @@ static int set_var(struct fbtft_par *par)
}
/*
- Gamma string format:
- PKP0 PKP1 PKP2 PKP3 PKP4 PKP5 PKP6 PKP7 PKP8 PKP9 PKP10 PKP11 VRP0 VRP1
- PKN0 PKN1 PKN2 PKN3 PKN4 PKN5 PKN6 PKN7 PRN8 PRN9 PRN10 PRN11 VRN0 VRN1
-*/
-#define CURVE(num, idx) curves[num * par->gamma.num_values + idx]
-static int set_gamma(struct fbtft_par *par, unsigned long *curves)
+ * Gamma string format:
+ * PKP0 PKP1 PKP2 PKP3 PKP4 PKP5 PKP6 PKP7 PKP8 PKP9 PKP10 PKP11 VRP0 VRP1
+ * PKN0 PKN1 PKN2 PKN3 PKN4 PKN5 PKN6 PKN7 PRN8 PRN9 PRN10 PRN11 VRN0 VRN1
+ */
+#define CURVE(num, idx) curves[(num) * par->gamma.num_values + (idx)]
+static int set_gamma(struct fbtft_par *par, u32 *curves)
{
- unsigned long mask[] = {
+ static const unsigned long mask[] = {
0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f,
0x3f, 0x3f, 0x1f, 0x1f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f,
0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x1f, 0x1f,
@@ -163,6 +150,7 @@ static int set_gamma(struct fbtft_par *par, unsigned long *curves)
return 0;
}
+
#undef CURVE
static struct fbtft_display display = {
diff --git a/drivers/staging/fbtft/fb_seps525.c b/drivers/staging/fbtft/fb_seps525.c
new file mode 100644
index 000000000000..46c257308b49
--- /dev/null
+++ b/drivers/staging/fbtft/fb_seps525.c
@@ -0,0 +1,212 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * FB driver for the NHD-1.69-160128UGC3 (Newhaven Display International, Inc.)
+ * using the SEPS525 (Syncoam) LCD Controller
+ *
+ * Copyright (C) 2016 Analog Devices Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/bits.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/module.h>
+
+#include "fbtft.h"
+
+#define DRVNAME "fb_seps525"
+#define WIDTH 160
+#define HEIGHT 128
+
+#define SEPS525_INDEX 0x00
+#define SEPS525_STATUS_RD 0x01
+#define SEPS525_OSC_CTL 0x02
+#define SEPS525_IREF 0x80
+#define SEPS525_CLOCK_DIV 0x03
+#define SEPS525_REDUCE_CURRENT 0x04
+#define SEPS525_SOFT_RST 0x05
+#define SEPS525_DISP_ONOFF 0x06
+#define SEPS525_PRECHARGE_TIME_R 0x08
+#define SEPS525_PRECHARGE_TIME_G 0x09
+#define SEPS525_PRECHARGE_TIME_B 0x0A
+#define SEPS525_PRECHARGE_CURRENT_R 0x0B
+#define SEPS525_PRECHARGE_CURRENT_G 0x0C
+#define SEPS525_PRECHARGE_CURRENT_B 0x0D
+#define SEPS525_DRIVING_CURRENT_R 0x10
+#define SEPS525_DRIVING_CURRENT_G 0x11
+#define SEPS525_DRIVING_CURRENT_B 0x12
+#define SEPS525_DISPLAYMODE_SET 0x13
+#define SEPS525_RGBIF 0x14
+#define SEPS525_RGB_POL 0x15
+#define SEPS525_MEMORY_WRITEMODE 0x16
+#define SEPS525_MX1_ADDR 0x17
+#define SEPS525_MX2_ADDR 0x18
+#define SEPS525_MY1_ADDR 0x19
+#define SEPS525_MY2_ADDR 0x1A
+#define SEPS525_MEMORY_ACCESS_POINTER_X 0x20
+#define SEPS525_MEMORY_ACCESS_POINTER_Y 0x21
+#define SEPS525_DDRAM_DATA_ACCESS_PORT 0x22
+#define SEPS525_GRAY_SCALE_TABLE_INDEX 0x50
+#define SEPS525_GRAY_SCALE_TABLE_DATA 0x51
+#define SEPS525_DUTY 0x28
+#define SEPS525_DSL 0x29
+#define SEPS525_D1_DDRAM_FAC 0x2E
+#define SEPS525_D1_DDRAM_FAR 0x2F
+#define SEPS525_D2_DDRAM_SAC 0x31
+#define SEPS525_D2_DDRAM_SAR 0x32
+#define SEPS525_SCR1_FX1 0x33
+#define SEPS525_SCR1_FX2 0x34
+#define SEPS525_SCR1_FY1 0x35
+#define SEPS525_SCR1_FY2 0x36
+#define SEPS525_SCR2_SX1 0x37
+#define SEPS525_SCR2_SX2 0x38
+#define SEPS525_SCR2_SY1 0x39
+#define SEPS525_SCR2_SY2 0x3A
+#define SEPS525_SCREEN_SAVER_CONTEROL 0x3B
+#define SEPS525_SS_SLEEP_TIMER 0x3C
+#define SEPS525_SCREEN_SAVER_MODE 0x3D
+#define SEPS525_SS_SCR1_FU 0x3E
+#define SEPS525_SS_SCR1_MXY 0x3F
+#define SEPS525_SS_SCR2_FU 0x40
+#define SEPS525_SS_SCR2_MXY 0x41
+#define SEPS525_MOVING_DIRECTION 0x42
+#define SEPS525_SS_SCR2_SX1 0x47
+#define SEPS525_SS_SCR2_SX2 0x48
+#define SEPS525_SS_SCR2_SY1 0x49
+#define SEPS525_SS_SCR2_SY2 0x4A
+
+/* SEPS525_DISPLAYMODE_SET */
+#define MODE_SWAP_BGR BIT(7)
+#define MODE_SM BIT(6)
+#define MODE_RD BIT(5)
+#define MODE_CD BIT(4)
+
+#define seps525_use_window 0 /* FBTFT doesn't really use it today */
+
+/* Init sequence taken from: Arduino Library for the Adafruit 2.2" display */
+static int init_display(struct fbtft_par *par)
+{
+ par->fbtftops.reset(par);
+
+ usleep_range(1000, 5000);
+
+ /* Disable Oscillator Power Down */
+ write_reg(par, SEPS525_REDUCE_CURRENT, 0x03);
+ usleep_range(1000, 5000);
+ /* Set Normal Driving Current */
+ write_reg(par, SEPS525_REDUCE_CURRENT, 0x00);
+ usleep_range(1000, 5000);
+
+ write_reg(par, SEPS525_SCREEN_SAVER_CONTEROL, 0x00);
+ /* Set EXPORT1 Pin at Internal Clock */
+ write_reg(par, SEPS525_OSC_CTL, 0x01);
+ /* Set Clock as 120 Frames/Sec */
+ write_reg(par, SEPS525_CLOCK_DIV, 0x90);
+ /* Set Reference Voltage Controlled by External Resister */
+ write_reg(par, SEPS525_IREF, 0x01);
+
+ /* precharge time R G B */
+ write_reg(par, SEPS525_PRECHARGE_TIME_R, 0x04);
+ write_reg(par, SEPS525_PRECHARGE_TIME_G, 0x05);
+ write_reg(par, SEPS525_PRECHARGE_TIME_B, 0x05);
+
+ /* precharge current R G B (uA) */
+ write_reg(par, SEPS525_PRECHARGE_CURRENT_R, 0x9D);
+ write_reg(par, SEPS525_PRECHARGE_CURRENT_G, 0x8C);
+ write_reg(par, SEPS525_PRECHARGE_CURRENT_B, 0x57);
+
+ /* driving current R G B (uA) */
+ write_reg(par, SEPS525_DRIVING_CURRENT_R, 0x56);
+ write_reg(par, SEPS525_DRIVING_CURRENT_G, 0x4D);
+ write_reg(par, SEPS525_DRIVING_CURRENT_B, 0x46);
+ /* Set Color Sequence */
+ write_reg(par, SEPS525_DISPLAYMODE_SET, 0xA0);
+ write_reg(par, SEPS525_RGBIF, 0x01); /* Set MCU Interface Mode */
+ /* Set Memory Write Mode */
+ write_reg(par, SEPS525_MEMORY_WRITEMODE, 0x66);
+ write_reg(par, SEPS525_DUTY, 0x7F); /* 1/128 Duty (0x0F~0x7F) */
+ /* Set Mapping RAM Display Start Line (0x00~0x7F) */
+ write_reg(par, SEPS525_DSL, 0x00);
+ write_reg(par, SEPS525_DISP_ONOFF, 0x01); /* Display On (0x00/0x01) */
+ /* Set All Internal Register Value as Normal Mode */
+ write_reg(par, SEPS525_SOFT_RST, 0x00);
+ /* Set RGB Interface Polarity as Active Low */
+ write_reg(par, SEPS525_RGB_POL, 0x00);
+
+ write_reg(par, SEPS525_DDRAM_DATA_ACCESS_PORT);
+
+ return 0;
+}
+
+static void set_addr_win(struct fbtft_par *par, int xs, int ys, int xe, int ye)
+{
+ if (seps525_use_window) {
+ /* Set Window Xs,Ys Xe,Ye*/
+ write_reg(par, SEPS525_MX1_ADDR, xs);
+ write_reg(par, SEPS525_MX2_ADDR, xe);
+ write_reg(par, SEPS525_MY1_ADDR, ys);
+ write_reg(par, SEPS525_MY2_ADDR, ye);
+ }
+ /* start position X,Y */
+ write_reg(par, SEPS525_MEMORY_ACCESS_POINTER_X, xs);
+ write_reg(par, SEPS525_MEMORY_ACCESS_POINTER_Y, ys);
+
+ write_reg(par, SEPS525_DDRAM_DATA_ACCESS_PORT);
+}
+
+static int set_var(struct fbtft_par *par)
+{
+ u8 val;
+
+ switch (par->info->var.rotate) {
+ case 0:
+ val = 0;
+ break;
+ case 180:
+ val = MODE_RD | MODE_CD;
+ break;
+ case 90:
+ case 270:
+
+ default:
+ return -EINVAL;
+ }
+ /* Memory Access Control */
+ write_reg(par, SEPS525_DISPLAYMODE_SET, val |
+ (par->bgr ? MODE_SWAP_BGR : 0));
+
+ write_reg(par, SEPS525_DDRAM_DATA_ACCESS_PORT);
+
+ return 0;
+}
+
+static struct fbtft_display display = {
+ .regwidth = 8,
+ .width = WIDTH,
+ .height = HEIGHT,
+ .fbtftops = {
+ .init_display = init_display,
+ .set_addr_win = set_addr_win,
+ .set_var = set_var,
+ },
+};
+
+FBTFT_REGISTER_DRIVER(DRVNAME, "syncoam,seps525", &display);
+
+MODULE_ALIAS("spi:" DRVNAME);
+MODULE_ALIAS("platform:" DRVNAME);
+MODULE_ALIAS("spi:seps525");
+MODULE_ALIAS("platform:seps525");
+
+MODULE_DESCRIPTION("FB driver for the SEPS525 LCD Controller");
+MODULE_AUTHOR("Michael Hennerich <michael.hennerich@analog.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/fbtft/fb_sh1106.c b/drivers/staging/fbtft/fb_sh1106.c
new file mode 100644
index 000000000000..e4c50c1ffed0
--- /dev/null
+++ b/drivers/staging/fbtft/fb_sh1106.c
@@ -0,0 +1,177 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * FB driver for the SH1106 OLED Controller
+ * Based on the SSD1306 driver by Noralf Tronnes
+ *
+ * Copyright (C) 2017 Heiner Kallweit
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+
+#include "fbtft.h"
+
+#define DRVNAME "fb_sh1106"
+#define WIDTH 128
+#define HEIGHT 64
+
+/* Init sequence based on the Adafruit SSD1306 Arduino library */
+static int init_display(struct fbtft_par *par)
+{
+ if (!par->info->var.xres || par->info->var.xres > WIDTH ||
+ !par->info->var.yres || par->info->var.yres > HEIGHT ||
+ par->info->var.yres % 8) {
+ dev_err(par->info->device, "Invalid screen size\n");
+ return -EINVAL;
+ }
+
+ if (par->info->var.rotate) {
+ dev_err(par->info->device, "Display rotation not supported\n");
+ return -EINVAL;
+ }
+
+ par->fbtftops.reset(par);
+
+ /* Set Display OFF */
+ write_reg(par, 0xAE);
+
+ /* Set Display Clock Divide Ratio/ Oscillator Frequency */
+ write_reg(par, 0xD5, 0x80);
+
+ /* Set Multiplex Ratio */
+ write_reg(par, 0xA8, par->info->var.yres - 1);
+
+ /* Set Display Offset */
+ write_reg(par, 0xD3, 0x00);
+
+ /* Set Display Start Line */
+ write_reg(par, 0x40 | 0x0);
+
+ /* Set Segment Re-map */
+ /* column address 127 is mapped to SEG0 */
+ write_reg(par, 0xA0 | 0x1);
+
+ /* Set COM Output Scan Direction */
+ /* remapped mode. Scan from COM[N-1] to COM0 */
+ write_reg(par, 0xC8);
+
+ /* Set COM Pins Hardware Configuration */
+ if (par->info->var.yres == 64)
+ /* A[4]=1b, Alternative COM pin configuration */
+ write_reg(par, 0xDA, 0x12);
+ else if (par->info->var.yres == 48)
+ /* A[4]=1b, Alternative COM pin configuration */
+ write_reg(par, 0xDA, 0x12);
+ else
+ /* A[4]=0b, Sequential COM pin configuration */
+ write_reg(par, 0xDA, 0x02);
+
+ /* Set Pre-charge Period */
+ write_reg(par, 0xD9, 0xF1);
+
+ /* Set VCOMH Deselect Level */
+ write_reg(par, 0xDB, 0x40);
+
+ /* Set Display ON */
+ write_reg(par, 0xAF);
+
+ msleep(150);
+
+ return 0;
+}
+
+static void set_addr_win(struct fbtft_par *par, int xs, int ys, int xe, int ye)
+{
+}
+
+static int blank(struct fbtft_par *par, bool on)
+{
+ write_reg(par, on ? 0xAE : 0xAF);
+
+ return 0;
+}
+
+/* Gamma is used to control Contrast */
+static int set_gamma(struct fbtft_par *par, u32 *curves)
+{
+ /* apply mask */
+ curves[0] &= 0xFF;
+
+ /* Set Contrast Control for BANK0 */
+ write_reg(par, 0x81, curves[0]);
+
+ return 0;
+}
+
+static int write_vmem(struct fbtft_par *par, size_t offset, size_t len)
+{
+ u16 *vmem16 = (u16 *)par->info->screen_buffer;
+ u32 xres = par->info->var.xres;
+ int page, page_start, page_end, x, i, ret;
+ u8 *buf = par->txbuf.buf;
+
+ /* offset refers to vmem with 2 bytes element size */
+ page_start = offset / (8 * 2 * xres);
+ page_end = DIV_ROUND_UP(offset + len, 8 * 2 * xres);
+
+ for (page = page_start; page < page_end; page++) {
+ /* set page and set column to 2 because of vidmem width 132 */
+ write_reg(par, 0xb0 | page, 0x00 | 2, 0x10 | 0);
+
+ memset(buf, 0, xres);
+ for (x = 0; x < xres; x++)
+ for (i = 0; i < 8; i++)
+ if (vmem16[(page * 8 + i) * xres + x])
+ buf[x] |= BIT(i);
+
+ /* Write data */
+ ret = fbtft_write_buf_dc(par, buf, xres, 1);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+static void write_register(struct fbtft_par *par, int len, ...)
+{
+ va_list args;
+ int i;
+
+ va_start(args, len);
+
+ for (i = 0; i < len; i++)
+ par->buf[i] = va_arg(args, unsigned int);
+
+ /* keep DC low for all command bytes to transfer */
+ fbtft_write_buf_dc(par, par->buf, len, 0);
+
+ va_end(args);
+}
+
+static struct fbtft_display display = {
+ .regwidth = 8,
+ .width = WIDTH,
+ .height = HEIGHT,
+ .txbuflen = WIDTH,
+ .gamma_num = 1,
+ .gamma_len = 1,
+ /* set default contrast to 0xcd = 80% */
+ .gamma = "cd",
+ .fbtftops = {
+ .write_vmem = write_vmem,
+ .write_register = write_register,
+ .init_display = init_display,
+ .set_addr_win = set_addr_win,
+ .blank = blank,
+ .set_gamma = set_gamma,
+ },
+};
+
+FBTFT_REGISTER_SPI_DRIVER(DRVNAME, "sinowealth", "sh1106", &display);
+
+MODULE_DESCRIPTION("SH1106 OLED Driver");
+MODULE_AUTHOR("Heiner Kallweit");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/fbtft/fb_ssd1289.c b/drivers/staging/fbtft/fb_ssd1289.c
index 1162c08613fa..255a6d21ca8e 100644
--- a/drivers/staging/fbtft/fb_ssd1289.c
+++ b/drivers/staging/fbtft/fb_ssd1289.c
@@ -1,25 +1,15 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* FB driver for the SSD1289 LCD Controller
*
* Copyright (C) 2013 Noralf Tronnes
*
* Init sequence taken from ITDB02_Graph16.cpp - (C)2010-2011 Henning Karlsen
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
-#include <linux/gpio.h>
#include "fbtft.h"
@@ -29,17 +19,14 @@
#define DEFAULT_GAMMA "02 03 2 5 7 7 4 2 4 2\n" \
"02 03 2 5 7 5 4 2 4 2"
-static unsigned reg11 = 0x6040;
-module_param(reg11, uint, 0);
+static unsigned int reg11 = 0x6040;
+module_param(reg11, uint, 0000);
MODULE_PARM_DESC(reg11, "Register 11h value");
static int init_display(struct fbtft_par *par)
{
par->fbtftops.reset(par);
- if (par->gpio.cs != -1)
- gpio_set_value(par->gpio.cs, 0); /* Activate chip */
-
write_reg(par, 0x00, 0x0001);
write_reg(par, 0x03, 0xA8A4);
write_reg(par, 0x0C, 0x0000);
@@ -47,7 +34,7 @@ static int init_display(struct fbtft_par *par)
write_reg(par, 0x0E, 0x2B00);
write_reg(par, 0x1E, 0x00B7);
write_reg(par, 0x01,
- (1 << 13) | (par->bgr << 11) | (1 << 9) | (HEIGHT - 1));
+ BIT(13) | (par->bgr << 11) | BIT(9) | (HEIGHT - 1));
write_reg(par, 0x02, 0x0600);
write_reg(par, 0x10, 0x0000);
write_reg(par, 0x05, 0x0000);
@@ -106,9 +93,6 @@ static int set_var(struct fbtft_par *par)
{
if (par->fbtftops.init_display != init_display) {
/* don't risk messing up register 11h */
- fbtft_par_dbg(DEBUG_INIT_DISPLAY, par,
- "%s: skipping since custom init_display() is used\n",
- __func__);
return 0;
}
@@ -131,14 +115,14 @@ static int set_var(struct fbtft_par *par)
}
/*
- Gamma string format:
- VRP0 VRP1 PRP0 PRP1 PKP0 PKP1 PKP2 PKP3 PKP4 PKP5
- VRN0 VRN1 PRN0 PRN1 PKN0 PKN1 PKN2 PKN3 PKN4 PKN5
-*/
-#define CURVE(num, idx) curves[num * par->gamma.num_values + idx]
-static int set_gamma(struct fbtft_par *par, unsigned long *curves)
+ * Gamma string format:
+ * VRP0 VRP1 PRP0 PRP1 PKP0 PKP1 PKP2 PKP3 PKP4 PKP5
+ * VRN0 VRN1 PRN0 PRN1 PKN0 PKN1 PKN2 PKN3 PKN4 PKN5
+ */
+#define CURVE(num, idx) curves[(num) * par->gamma.num_values + (idx)]
+static int set_gamma(struct fbtft_par *par, u32 *curves)
{
- unsigned long mask[] = {
+ static const unsigned long mask[] = {
0x1f, 0x1f, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07,
0x1f, 0x1f, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07,
};
@@ -162,6 +146,7 @@ static int set_gamma(struct fbtft_par *par, unsigned long *curves)
return 0;
}
+
#undef CURVE
static struct fbtft_display display = {
diff --git a/drivers/staging/fbtft/fb_ssd1305.c b/drivers/staging/fbtft/fb_ssd1305.c
new file mode 100644
index 000000000000..020fe48fed0b
--- /dev/null
+++ b/drivers/staging/fbtft/fb_ssd1305.c
@@ -0,0 +1,207 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * FB driver for the SSD1305 OLED Controller
+ *
+ * based on SSD1306 driver by Noralf Tronnes
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/gpio/consumer.h>
+#include <linux/delay.h>
+
+#include "fbtft.h"
+
+#define DRVNAME "fb_ssd1305"
+
+#define WIDTH 128
+#define HEIGHT 64
+
+/*
+ * write_reg() caveat:
+ *
+ * This doesn't work because D/C has to be LOW for both values:
+ * write_reg(par, val1, val2);
+ *
+ * Do it like this:
+ * write_reg(par, val1);
+ * write_reg(par, val2);
+ */
+
+/* Init sequence taken from the Adafruit SSD1306 Arduino library */
+static int init_display(struct fbtft_par *par)
+{
+ par->fbtftops.reset(par);
+
+ if (par->gamma.curves[0] == 0) {
+ mutex_lock(&par->gamma.lock);
+ if (par->info->var.yres == 64)
+ par->gamma.curves[0] = 0xCF;
+ else
+ par->gamma.curves[0] = 0x8F;
+ mutex_unlock(&par->gamma.lock);
+ }
+
+ /* Set Display OFF */
+ write_reg(par, 0xAE);
+
+ /* Set Display Clock Divide Ratio/ Oscillator Frequency */
+ write_reg(par, 0xD5);
+ write_reg(par, 0x80);
+
+ /* Set Multiplex Ratio */
+ write_reg(par, 0xA8);
+ if (par->info->var.yres == 64)
+ write_reg(par, 0x3F);
+ else
+ write_reg(par, 0x1F);
+
+ /* Set Display Offset */
+ write_reg(par, 0xD3);
+ write_reg(par, 0x0);
+
+ /* Set Display Start Line */
+ write_reg(par, 0x40 | 0x0);
+
+ /* Charge Pump Setting */
+ write_reg(par, 0x8D);
+ /* A[2] = 1b, Enable charge pump during display on */
+ write_reg(par, 0x14);
+
+ /* Set Memory Addressing Mode */
+ write_reg(par, 0x20);
+ /* Vertical addressing mode */
+ write_reg(par, 0x01);
+
+ /*
+ * Set Segment Re-map
+ * column address 127 is mapped to SEG0
+ */
+ write_reg(par, 0xA0 | ((par->info->var.rotate == 180) ? 0x0 : 0x1));
+
+ /*
+ * Set COM Output Scan Direction
+ * remapped mode. Scan from COM[N-1] to COM0
+ */
+ write_reg(par, ((par->info->var.rotate == 180) ? 0xC8 : 0xC0));
+
+ /* Set COM Pins Hardware Configuration */
+ write_reg(par, 0xDA);
+ if (par->info->var.yres == 64) {
+ /* A[4]=1b, Alternative COM pin configuration */
+ write_reg(par, 0x12);
+ } else {
+ /* A[4]=0b, Sequential COM pin configuration */
+ write_reg(par, 0x02);
+ }
+
+ /* Set Pre-charge Period */
+ write_reg(par, 0xD9);
+ write_reg(par, 0xF1);
+
+ /*
+ * Entire Display ON
+ * Resume to RAM content display. Output follows RAM content
+ */
+ write_reg(par, 0xA4);
+
+ /*
+ * Set Normal Display
+ * 0 in RAM: OFF in display panel
+ * 1 in RAM: ON in display panel
+ */
+ write_reg(par, 0xA6);
+
+ /* Set Display ON */
+ write_reg(par, 0xAF);
+
+ return 0;
+}
+
+static void set_addr_win(struct fbtft_par *par, int xs, int ys, int xe, int ye)
+{
+ /* Set Lower Column Start Address for Page Addressing Mode */
+ write_reg(par, 0x00 | ((par->info->var.rotate == 180) ? 0x0 : 0x4));
+ /* Set Higher Column Start Address for Page Addressing Mode */
+ write_reg(par, 0x10 | 0x0);
+ /* Set Display Start Line */
+ write_reg(par, 0x40 | 0x0);
+}
+
+static int blank(struct fbtft_par *par, bool on)
+{
+ if (on)
+ write_reg(par, 0xAE);
+ else
+ write_reg(par, 0xAF);
+ return 0;
+}
+
+/* Gamma is used to control Contrast */
+static int set_gamma(struct fbtft_par *par, u32 *curves)
+{
+ curves[0] &= 0xFF;
+ /* Set Contrast Control for BANK0 */
+ write_reg(par, 0x81);
+ write_reg(par, curves[0]);
+
+ return 0;
+}
+
+static int write_vmem(struct fbtft_par *par, size_t offset, size_t len)
+{
+ u16 *vmem16 = (u16 *)par->info->screen_buffer;
+ u8 *buf = par->txbuf.buf;
+ int x, y, i;
+ int ret;
+
+ for (x = 0; x < par->info->var.xres; x++) {
+ for (y = 0; y < par->info->var.yres / 8; y++) {
+ *buf = 0x00;
+ for (i = 0; i < 8; i++)
+ *buf |= (vmem16[(y * 8 + i) *
+ par->info->var.xres + x] ?
+ 1 : 0) << i;
+ buf++;
+ }
+ }
+
+ /* Write data */
+ gpiod_set_value(par->gpio.dc, 1);
+ ret = par->fbtftops.write(par, par->txbuf.buf,
+ par->info->var.xres * par->info->var.yres /
+ 8);
+ if (ret < 0)
+ dev_err(par->info->device, "write failed and returned: %d\n",
+ ret);
+ return ret;
+}
+
+static struct fbtft_display display = {
+ .regwidth = 8,
+ .width = WIDTH,
+ .height = HEIGHT,
+ .txbuflen = WIDTH * HEIGHT / 8,
+ .gamma_num = 1,
+ .gamma_len = 1,
+ .gamma = "00",
+ .fbtftops = {
+ .write_vmem = write_vmem,
+ .init_display = init_display,
+ .set_addr_win = set_addr_win,
+ .blank = blank,
+ .set_gamma = set_gamma,
+ },
+};
+
+FBTFT_REGISTER_DRIVER(DRVNAME, "solomon,ssd1305", &display);
+
+MODULE_ALIAS("spi:" DRVNAME);
+MODULE_ALIAS("platform:" DRVNAME);
+MODULE_ALIAS("spi:ssd1305");
+MODULE_ALIAS("platform:ssd1305");
+
+MODULE_DESCRIPTION("SSD1305 OLED Driver");
+MODULE_AUTHOR("Alexey Mednyy");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/fbtft/fb_ssd1306.c b/drivers/staging/fbtft/fb_ssd1306.c
index 7568e943ec0a..478d710469b9 100644
--- a/drivers/staging/fbtft/fb_ssd1306.c
+++ b/drivers/staging/fbtft/fb_ssd1306.c
@@ -1,23 +1,14 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* FB driver for the SSD1306 OLED Controller
*
* Copyright (C) 2013 Noralf Tronnes
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/delay.h>
#include "fbtft.h"
@@ -27,15 +18,15 @@
#define HEIGHT 64
/*
- write_reg() caveat:
-
- This doesn't work because D/C has to be LOW for both values:
- write_reg(par, val1, val2);
-
- Do it like this:
- write_reg(par, val1);
- write_reg(par, val2);
-*/
+ * write_reg() caveat:
+ *
+ * This doesn't work because D/C has to be LOW for both values:
+ * write_reg(par, val1, val2);
+ *
+ * Do it like this:
+ * write_reg(par, val1);
+ * write_reg(par, val2);
+ */
/* Init sequence taken from the Adafruit SSD1306 Arduino library */
static int init_display(struct fbtft_par *par)
@@ -62,6 +53,8 @@ static int init_display(struct fbtft_par *par)
write_reg(par, 0xA8);
if (par->info->var.yres == 64)
write_reg(par, 0x3F);
+ else if (par->info->var.yres == 48)
+ write_reg(par, 0x2F);
else
write_reg(par, 0x1F);
@@ -82,7 +75,7 @@ static int init_display(struct fbtft_par *par)
/* Vertical addressing mode */
write_reg(par, 0x01);
- /*Set Segment Re-map */
+ /* Set Segment Re-map */
/* column address 127 is mapped to SEG0 */
write_reg(par, 0xA0 | 0x1);
@@ -95,6 +88,9 @@ static int init_display(struct fbtft_par *par)
if (par->info->var.yres == 64)
/* A[4]=1b, Alternative COM pin configuration */
write_reg(par, 0x12);
+ else if (par->info->var.yres == 48)
+ /* A[4]=1b, Alternative COM pin configuration */
+ write_reg(par, 0x12);
else
/* A[4]=0b, Sequential COM pin configuration */
write_reg(par, 0x02);
@@ -113,8 +109,9 @@ static int init_display(struct fbtft_par *par)
write_reg(par, 0xA4);
/* Set Normal Display
- 0 in RAM: OFF in display panel
- 1 in RAM: ON in display panel */
+ * 0 in RAM: OFF in display panel
+ * 1 in RAM: ON in display panel
+ */
write_reg(par, 0xA6);
/* Set Display ON */
@@ -123,6 +120,19 @@ static int init_display(struct fbtft_par *par)
return 0;
}
+static void set_addr_win_64x48(struct fbtft_par *par)
+{
+ /* Set Column Address */
+ write_reg(par, 0x21);
+ write_reg(par, 0x20);
+ write_reg(par, 0x5F);
+
+ /* Set Page Address */
+ write_reg(par, 0x22);
+ write_reg(par, 0x0);
+ write_reg(par, 0x5);
+}
+
static void set_addr_win(struct fbtft_par *par, int xs, int ys, int xe, int ye)
{
/* Set Lower Column Start Address for Page Addressing Mode */
@@ -131,13 +141,13 @@ static void set_addr_win(struct fbtft_par *par, int xs, int ys, int xe, int ye)
write_reg(par, 0x10 | 0x0);
/* Set Display Start Line */
write_reg(par, 0x40 | 0x0);
+
+ if (par->info->var.xres == 64 && par->info->var.yres == 48)
+ set_addr_win_64x48(par);
}
static int blank(struct fbtft_par *par, bool on)
{
- fbtft_par_dbg(DEBUG_BLANK, par, "%s(blank=%s)\n",
- __func__, on ? "true" : "false");
-
if (on)
write_reg(par, 0xAE);
else
@@ -146,7 +156,7 @@ static int blank(struct fbtft_par *par, bool on)
}
/* Gamma is used to control Contrast */
-static int set_gamma(struct fbtft_par *par, unsigned long *curves)
+static int set_gamma(struct fbtft_par *par, u32 *curves)
{
/* apply mask */
curves[0] &= 0xFF;
@@ -160,29 +170,26 @@ static int set_gamma(struct fbtft_par *par, unsigned long *curves)
static int write_vmem(struct fbtft_par *par, size_t offset, size_t len)
{
- u16 *vmem16 = (u16 *)par->info->screen_base;
+ u16 *vmem16 = (u16 *)par->info->screen_buffer;
+ u32 xres = par->info->var.xres;
+ u32 yres = par->info->var.yres;
u8 *buf = par->txbuf.buf;
int x, y, i;
int ret = 0;
- fbtft_par_dbg(DEBUG_WRITE_VMEM, par, "%s()\n", __func__);
-
- for (x = 0; x < par->info->var.xres; x++) {
- for (y = 0; y < par->info->var.yres/8; y++) {
+ for (x = 0; x < xres; x++) {
+ for (y = 0; y < yres / 8; y++) {
*buf = 0x00;
for (i = 0; i < 8; i++)
- *buf |= (vmem16[(y * 8 + i) *
- par->info->var.xres + x] ?
- 1 : 0) << i;
+ if (vmem16[(y * 8 + i) * xres + x])
+ *buf |= BIT(i);
buf++;
}
}
/* Write data */
- gpio_set_value(par->gpio.dc, 1);
- ret = par->fbtftops.write(par, par->txbuf.buf,
- par->info->var.xres * par->info->var.yres /
- 8);
+ gpiod_set_value(par->gpio.dc, 1);
+ ret = par->fbtftops.write(par, par->txbuf.buf, xres * yres / 8);
if (ret < 0)
dev_err(par->info->device, "write failed and returned: %d\n",
ret);
diff --git a/drivers/staging/fbtft/fb_ssd1325.c b/drivers/staging/fbtft/fb_ssd1325.c
new file mode 100644
index 000000000000..256b0b87a930
--- /dev/null
+++ b/drivers/staging/fbtft/fb_ssd1325.c
@@ -0,0 +1,185 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * FB driver for the SSD1325 OLED Controller
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/gpio/consumer.h>
+#include <linux/delay.h>
+
+#include "fbtft.h"
+
+#define DRVNAME "fb_ssd1325"
+
+#define WIDTH 128
+#define HEIGHT 64
+#define GAMMA_NUM 1
+#define GAMMA_LEN 15
+#define DEFAULT_GAMMA "7 1 1 1 1 2 2 3 3 4 4 5 5 6 6"
+
+/*
+ * write_reg() caveat:
+ *
+ * This doesn't work because D/C has to be LOW for both values:
+ * write_reg(par, val1, val2);
+ *
+ * Do it like this:
+ * write_reg(par, val1);
+ * write_reg(par, val2);
+ */
+
+/* Init sequence taken from the Adafruit SSD1306 Arduino library */
+static int init_display(struct fbtft_par *par)
+{
+ par->fbtftops.reset(par);
+
+ write_reg(par, 0xb3);
+ write_reg(par, 0xf0);
+ write_reg(par, 0xae);
+ write_reg(par, 0xa1);
+ write_reg(par, 0x00);
+ write_reg(par, 0xa8);
+ write_reg(par, 0x3f);
+ write_reg(par, 0xa0);
+ write_reg(par, 0x45);
+ write_reg(par, 0xa2);
+ write_reg(par, 0x40);
+ write_reg(par, 0x75);
+ write_reg(par, 0x00);
+ write_reg(par, 0x3f);
+ write_reg(par, 0x15);
+ write_reg(par, 0x00);
+ write_reg(par, 0x7f);
+ write_reg(par, 0xa4);
+ write_reg(par, 0xaf);
+
+ return 0;
+}
+
+static uint8_t rgb565_to_g16(u16 pixel)
+{
+ u16 b = pixel & 0x1f;
+ u16 g = (pixel & (0x3f << 5)) >> 5;
+ u16 r = (pixel & (0x1f << (5 + 6))) >> (5 + 6);
+
+ pixel = (299 * r + 587 * g + 114 * b) / 195;
+ if (pixel > 255)
+ pixel = 255;
+ return (uint8_t)pixel / 16;
+}
+
+static void set_addr_win(struct fbtft_par *par, int xs, int ys, int xe, int ye)
+{
+ write_reg(par, 0x75);
+ write_reg(par, 0x00);
+ write_reg(par, 0x3f);
+ write_reg(par, 0x15);
+ write_reg(par, 0x00);
+ write_reg(par, 0x7f);
+}
+
+static int blank(struct fbtft_par *par, bool on)
+{
+ if (on)
+ write_reg(par, 0xAE);
+ else
+ write_reg(par, 0xAF);
+ return 0;
+}
+
+/*
+ * Grayscale Lookup Table
+ * GS1 - GS15
+ * The "Gamma curve" contains the relative values between the entries
+ * in the Lookup table.
+ *
+ * 0 = Setting of GS1 < Setting of GS2 < Setting of GS3.....<
+ * Setting of GS14 < Setting of GS15
+ */
+static int set_gamma(struct fbtft_par *par, u32 *curves)
+{
+ int i;
+
+ for (i = 0; i < GAMMA_LEN; i++) {
+ if (i > 0 && curves[i] < 1) {
+ dev_err(par->info->device,
+ "Illegal value in Grayscale Lookup Table at index %d.\n"
+ "Must be greater than 0\n", i);
+ return -EINVAL;
+ }
+ if (curves[i] > 7) {
+ dev_err(par->info->device,
+ "Illegal value(s) in Grayscale Lookup Table.\n"
+ "At index=%d, the accumulated value has exceeded 7\n",
+ i);
+ return -EINVAL;
+ }
+ }
+ write_reg(par, 0xB8);
+ for (i = 0; i < 8; i++)
+ write_reg(par, (curves[i] & 0xFF));
+ return 0;
+}
+
+static int write_vmem(struct fbtft_par *par, size_t offset, size_t len)
+{
+ u16 *vmem16 = (u16 *)par->info->screen_buffer;
+ u8 *buf = par->txbuf.buf;
+ u8 n1;
+ u8 n2;
+ int y, x;
+ int ret;
+
+ for (x = 0; x < par->info->var.xres; x++) {
+ if (x % 2)
+ continue;
+ for (y = 0; y < par->info->var.yres; y++) {
+ n1 = rgb565_to_g16(vmem16[y * par->info->var.xres + x]);
+ n2 = rgb565_to_g16(vmem16
+ [y * par->info->var.xres + x + 1]);
+ *buf = (n1 << 4) | n2;
+ buf++;
+ }
+ }
+
+ gpiod_set_value(par->gpio.dc, 1);
+
+ /* Write data */
+ ret = par->fbtftops.write(par, par->txbuf.buf,
+ par->info->var.xres * par->info->var.yres / 2);
+ if (ret < 0)
+ dev_err(par->info->device,
+ "%s: write failed and returned: %d\n", __func__, ret);
+
+ return ret;
+}
+
+static struct fbtft_display display = {
+ .regwidth = 8,
+ .width = WIDTH,
+ .height = HEIGHT,
+ .txbuflen = WIDTH * HEIGHT / 2,
+ .gamma_num = GAMMA_NUM,
+ .gamma_len = GAMMA_LEN,
+ .gamma = DEFAULT_GAMMA,
+ .fbtftops = {
+ .write_vmem = write_vmem,
+ .init_display = init_display,
+ .set_addr_win = set_addr_win,
+ .blank = blank,
+ .set_gamma = set_gamma,
+ },
+};
+
+FBTFT_REGISTER_DRIVER(DRVNAME, "solomon,ssd1325", &display);
+
+MODULE_ALIAS("spi:" DRVNAME);
+MODULE_ALIAS("platform:" DRVNAME);
+MODULE_ALIAS("spi:ssd1325");
+MODULE_ALIAS("platform:ssd1325");
+
+MODULE_DESCRIPTION("SSD1325 OLED Driver");
+MODULE_AUTHOR("Alexey Mednyy");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/fbtft/fb_ssd1331.c b/drivers/staging/fbtft/fb_ssd1331.c
index 53cb8e9847cf..06b7056d6c71 100644
--- a/drivers/staging/fbtft/fb_ssd1331.c
+++ b/drivers/staging/fbtft/fb_ssd1331.c
@@ -1,7 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/spi/spi.h>
#include <linux/delay.h>
@@ -26,7 +27,13 @@ static int init_display(struct fbtft_par *par)
par->fbtftops.reset(par);
write_reg(par, 0xae); /* Display Off */
- write_reg(par, 0xa0, 0x70 | (par->bgr << 2)); /* Set Colour Depth */
+
+ /* Set Column Address Mapping, COM Scan Direction and Colour Depth */
+ if (par->info->var.rotate == 180)
+ write_reg(par, 0xa0, 0x60 | (par->bgr << 2));
+ else
+ write_reg(par, 0xa0, 0x72 | (par->bgr << 2));
+
write_reg(par, 0x72); /* RGB colour */
write_reg(par, 0xa1, 0x00); /* Set Display Start Line */
write_reg(par, 0xa2, 0x00); /* Set Display Offset */
@@ -60,21 +67,21 @@ static void write_reg8_bus8(struct fbtft_par *par, int len, ...)
{
va_list args;
int i, ret;
- u8 *buf = (u8 *)par->buf;
+ u8 *buf = par->buf;
if (unlikely(par->debug & DEBUG_WRITE_REGISTER)) {
va_start(args, len);
for (i = 0; i < len; i++)
buf[i] = (u8)va_arg(args, unsigned int);
va_end(args);
- fbtft_par_dbg_hex(DEBUG_WRITE_REGISTER, par, par->info->device, u8, buf, len, "%s: ", __func__);
+ fbtft_par_dbg_hex(DEBUG_WRITE_REGISTER, par, par->info->device,
+ u8, buf, len, "%s: ", __func__);
}
va_start(args, len);
*buf = (u8)va_arg(args, unsigned int);
- if (par->gpio.dc != -1)
- gpio_set_value(par->gpio.dc, 0);
+ gpiod_set_value(par->gpio.dc, 0);
ret = par->fbtftops.write(par, par->buf, sizeof(u8));
if (ret < 0) {
va_end(args);
@@ -96,33 +103,32 @@ static void write_reg8_bus8(struct fbtft_par *par, int len, ...)
return;
}
}
- if (par->gpio.dc != -1)
- gpio_set_value(par->gpio.dc, 1);
+ gpiod_set_value(par->gpio.dc, 1);
va_end(args);
}
/*
- Grayscale Lookup Table
- GS1 - GS63
- The driver Gamma curve contains the relative values between the entries
- in the Lookup table.
-
- From datasheet:
- 8.8 Gray Scale Decoder
-
- there are total 180 Gamma Settings (Setting 0 to Setting 180)
- available for the Gray Scale table.
-
- The gray scale is defined in incremental way, with reference
- to the length of previous table entry:
- Setting of GS1 has to be >= 0
- Setting of GS2 has to be > Setting of GS1 +1
- Setting of GS3 has to be > Setting of GS2 +1
- :
- Setting of GS63 has to be > Setting of GS62 +1
-
-*/
-static int set_gamma(struct fbtft_par *par, unsigned long *curves)
+ * Grayscale Lookup Table
+ * GS1 - GS63
+ * The driver Gamma curve contains the relative values between the entries
+ * in the Lookup table.
+ *
+ * From datasheet:
+ * 8.8 Gray Scale Decoder
+ *
+ * there are total 180 Gamma Settings (Setting 0 to Setting 180)
+ * available for the Gray Scale table.
+ *
+ * The gray scale is defined in incremental way, with reference
+ * to the length of previous table entry:
+ * Setting of GS1 has to be >= 0
+ * Setting of GS2 has to be > Setting of GS1 +1
+ * Setting of GS3 has to be > Setting of GS2 +1
+ * :
+ * Setting of GS63 has to be > Setting of GS62 +1
+ *
+ */
+static int set_gamma(struct fbtft_par *par, u32 *curves)
{
unsigned long tmp[GAMMA_NUM * GAMMA_LEN];
int i, acc = 0;
@@ -130,37 +136,37 @@ static int set_gamma(struct fbtft_par *par, unsigned long *curves)
for (i = 0; i < 63; i++) {
if (i > 0 && curves[i] < 2) {
dev_err(par->info->device,
- "Illegal value in Grayscale Lookup Table at index %d. " \
- "Must be greater than 1\n", i);
+ "Illegal value in Grayscale Lookup Table at index %d. Must be greater than 1\n",
+ i);
return -EINVAL;
}
acc += curves[i];
tmp[i] = acc;
if (acc > 180) {
dev_err(par->info->device,
- "Illegal value(s) in Grayscale Lookup Table. " \
- "At index=%d, the accumulated value has exceeded 180\n", i);
+ "Illegal value(s) in Grayscale Lookup Table. At index=%d, the accumulated value has exceeded 180\n",
+ i);
return -EINVAL;
}
}
write_reg(par, 0xB8,
- tmp[0], tmp[1], tmp[2], tmp[3], tmp[4], tmp[5], tmp[6], tmp[7],
- tmp[8], tmp[9], tmp[10], tmp[11], tmp[12], tmp[13], tmp[14], tmp[15],
- tmp[16], tmp[17], tmp[18], tmp[19], tmp[20], tmp[21], tmp[22], tmp[23],
- tmp[24], tmp[25], tmp[26], tmp[27], tmp[28], tmp[29], tmp[30], tmp[31],
- tmp[32], tmp[33], tmp[34], tmp[35], tmp[36], tmp[37], tmp[38], tmp[39],
- tmp[40], tmp[41], tmp[42], tmp[43], tmp[44], tmp[45], tmp[46], tmp[47],
- tmp[48], tmp[49], tmp[50], tmp[51], tmp[52], tmp[53], tmp[54], tmp[55],
- tmp[56], tmp[57], tmp[58], tmp[59], tmp[60], tmp[61], tmp[62]);
+ tmp[0], tmp[1], tmp[2], tmp[3], tmp[4], tmp[5], tmp[6],
+ tmp[7], tmp[8], tmp[9], tmp[10], tmp[11], tmp[12], tmp[13],
+ tmp[14], tmp[15], tmp[16], tmp[17], tmp[18], tmp[19], tmp[20],
+ tmp[21], tmp[22], tmp[23], tmp[24], tmp[25], tmp[26], tmp[27],
+ tmp[28], tmp[29], tmp[30], tmp[31], tmp[32], tmp[33], tmp[34],
+ tmp[35], tmp[36], tmp[37], tmp[38], tmp[39], tmp[40], tmp[41],
+ tmp[42], tmp[43], tmp[44], tmp[45], tmp[46], tmp[47], tmp[48],
+ tmp[49], tmp[50], tmp[51], tmp[52], tmp[53], tmp[54], tmp[55],
+ tmp[56], tmp[57], tmp[58], tmp[59], tmp[60], tmp[61],
+ tmp[62]);
return 0;
}
static int blank(struct fbtft_par *par, bool on)
{
- fbtft_par_dbg(DEBUG_BLANK, par, "%s(blank=%s)\n",
- __func__, on ? "true" : "false");
if (on)
write_reg(par, 0xAE);
else
diff --git a/drivers/staging/fbtft/fb_ssd1351.c b/drivers/staging/fbtft/fb_ssd1351.c
index 83867e1593f0..6736b09b2f45 100644
--- a/drivers/staging/fbtft/fb_ssd1351.c
+++ b/drivers/staging/fbtft/fb_ssd1351.c
@@ -1,9 +1,12 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/backlight.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
-#include <linux/gpio.h>
#include <linux/spi/spi.h>
#include <linux/delay.h>
+#include <linux/string_choices.h>
#include "fbtft.h"
@@ -25,8 +28,8 @@ static void register_onboard_backlight(struct fbtft_par *par);
static int init_display(struct fbtft_par *par)
{
- if (par->pdata
- && par->pdata->display.backlight == FBTFT_ONBOARD_BACKLIGHT) {
+ if (par->pdata &&
+ par->pdata->display.backlight == FBTFT_ONBOARD_BACKLIGHT) {
/* module uses onboard GPIO for panel power */
par->fbtftops.register_backlight = register_onboard_backlight;
}
@@ -66,13 +69,10 @@ static void set_addr_win(struct fbtft_par *par, int xs, int ys, int xe, int ye)
static int set_var(struct fbtft_par *par)
{
- unsigned remap;
+ unsigned int remap;
if (par->fbtftops.init_display != init_display) {
/* don't risk messing up register A0h */
- fbtft_par_dbg(DEBUG_INIT_DISPLAY, par,
- "%s: skipping since custom init_display() is used\n",
- __func__);
return 0;
}
@@ -80,10 +80,10 @@ static int set_var(struct fbtft_par *par)
switch (par->info->var.rotate) {
case 0:
- write_reg(par, 0xA0, remap | 0x00 | 1<<4);
+ write_reg(par, 0xA0, remap | 0x00 | BIT(4));
break;
case 270:
- write_reg(par, 0xA0, remap | 0x03 | 1<<4);
+ write_reg(par, 0xA0, remap | 0x03 | BIT(4));
break;
case 180:
write_reg(par, 0xA0, remap | 0x02);
@@ -97,27 +97,27 @@ static int set_var(struct fbtft_par *par)
}
/*
- Grayscale Lookup Table
- GS1 - GS63
- The driver Gamma curve contains the relative values between the entries
- in the Lookup table.
-
- From datasheet:
- 8.8 Gray Scale Decoder
-
- there are total 180 Gamma Settings (Setting 0 to Setting 180)
- available for the Gray Scale table.
-
- The gray scale is defined in incremental way, with reference
- to the length of previous table entry:
- Setting of GS1 has to be >= 0
- Setting of GS2 has to be > Setting of GS1 +1
- Setting of GS3 has to be > Setting of GS2 +1
- :
- Setting of GS63 has to be > Setting of GS62 +1
-
-*/
-static int set_gamma(struct fbtft_par *par, unsigned long *curves)
+ * Grayscale Lookup Table
+ * GS1 - GS63
+ * The driver Gamma curve contains the relative values between the entries
+ * in the Lookup table.
+ *
+ * From datasheet:
+ * 8.8 Gray Scale Decoder
+ *
+ * there are total 180 Gamma Settings (Setting 0 to Setting 180)
+ * available for the Gray Scale table.
+ *
+ * The gray scale is defined in incremental way, with reference
+ * to the length of previous table entry:
+ * Setting of GS1 has to be >= 0
+ * Setting of GS2 has to be > Setting of GS1 +1
+ * Setting of GS3 has to be > Setting of GS2 +1
+ * :
+ * Setting of GS63 has to be > Setting of GS62 +1
+ *
+ */
+static int set_gamma(struct fbtft_par *par, u32 *curves)
{
unsigned long tmp[GAMMA_NUM * GAMMA_LEN];
int i, acc = 0;
@@ -125,37 +125,45 @@ static int set_gamma(struct fbtft_par *par, unsigned long *curves)
for (i = 0; i < 63; i++) {
if (i > 0 && curves[i] < 2) {
dev_err(par->info->device,
- "Illegal value in Grayscale Lookup Table at index %d. " \
- "Must be greater than 1\n", i);
+ "Illegal value in Grayscale Lookup Table at index %d : %d. Must be greater than 1\n",
+ i, curves[i]);
return -EINVAL;
}
acc += curves[i];
tmp[i] = acc;
if (acc > 180) {
dev_err(par->info->device,
- "Illegal value(s) in Grayscale Lookup Table. " \
- "At index=%d, the accumulated value has exceeded 180\n", i);
+ "Illegal value(s) in Grayscale Lookup Table. At index=%d : %d, the accumulated value has exceeded 180\n",
+ i, acc);
return -EINVAL;
}
}
write_reg(par, 0xB8,
- tmp[0], tmp[1], tmp[2], tmp[3], tmp[4], tmp[5], tmp[6], tmp[7],
- tmp[8], tmp[9], tmp[10], tmp[11], tmp[12], tmp[13], tmp[14], tmp[15],
- tmp[16], tmp[17], tmp[18], tmp[19], tmp[20], tmp[21], tmp[22], tmp[23],
- tmp[24], tmp[25], tmp[26], tmp[27], tmp[28], tmp[29], tmp[30], tmp[31],
- tmp[32], tmp[33], tmp[34], tmp[35], tmp[36], tmp[37], tmp[38], tmp[39],
- tmp[40], tmp[41], tmp[42], tmp[43], tmp[44], tmp[45], tmp[46], tmp[47],
- tmp[48], tmp[49], tmp[50], tmp[51], tmp[52], tmp[53], tmp[54], tmp[55],
- tmp[56], tmp[57], tmp[58], tmp[59], tmp[60], tmp[61], tmp[62]);
+ tmp[0], tmp[1], tmp[2], tmp[3],
+ tmp[4], tmp[5], tmp[6], tmp[7],
+ tmp[8], tmp[9], tmp[10], tmp[11],
+ tmp[12], tmp[13], tmp[14], tmp[15],
+ tmp[16], tmp[17], tmp[18], tmp[19],
+ tmp[20], tmp[21], tmp[22], tmp[23],
+ tmp[24], tmp[25], tmp[26], tmp[27],
+ tmp[28], tmp[29], tmp[30], tmp[31],
+ tmp[32], tmp[33], tmp[34], tmp[35],
+ tmp[36], tmp[37], tmp[38], tmp[39],
+ tmp[40], tmp[41], tmp[42], tmp[43],
+ tmp[44], tmp[45], tmp[46], tmp[47],
+ tmp[48], tmp[49], tmp[50], tmp[51],
+ tmp[52], tmp[53], tmp[54], tmp[55],
+ tmp[56], tmp[57], tmp[58], tmp[59],
+ tmp[60], tmp[61], tmp[62]);
return 0;
}
static int blank(struct fbtft_par *par, bool on)
{
- fbtft_par_dbg(DEBUG_BLANK, par, "%s(blank=%s)\n",
- __func__, on ? "true" : "false");
+ fbtft_par_dbg(DEBUG_BLANK, par, "(%s=%s)\n",
+ __func__, str_true_false(on));
if (on)
write_reg(par, 0xAE);
else
@@ -179,18 +187,14 @@ static struct fbtft_display display = {
},
};
-#ifdef CONFIG_FB_BACKLIGHT
static int update_onboard_backlight(struct backlight_device *bd)
{
struct fbtft_par *par = bl_get_data(bd);
bool on;
- fbtft_par_dbg(DEBUG_BACKLIGHT, par,
- "%s: power=%d, fb_blank=%d\n",
- __func__, bd->props.power, bd->props.fb_blank);
+ fbtft_par_dbg(DEBUG_BACKLIGHT, par, "%s: power=%d\n", __func__, bd->props.power);
- on = (bd->props.power == FB_BLANK_UNBLANK)
- && (bd->props.fb_blank == FB_BLANK_UNBLANK);
+ on = !backlight_is_blank(bd);
/* Onboard backlight connected to GPIO0 on SSD1351, GPIO1 unused */
write_reg(par, 0xB5, on ? 0x03 : 0x02);
@@ -206,13 +210,12 @@ static void register_onboard_backlight(struct fbtft_par *par)
struct backlight_device *bd;
struct backlight_properties bl_props = { 0, };
- fbtft_par_dbg(DEBUG_BACKLIGHT, par, "%s()\n", __func__);
-
bl_props.type = BACKLIGHT_RAW;
- bl_props.power = FB_BLANK_POWERDOWN;
+ bl_props.power = BACKLIGHT_POWER_OFF;
bd = backlight_device_register(dev_driver_string(par->info->device),
- par->info->device, par, &bl_ops, &bl_props);
+ par->info->device, par, &bl_ops,
+ &bl_props);
if (IS_ERR(bd)) {
dev_err(par->info->device,
"cannot register backlight device (%ld)\n",
@@ -224,9 +227,6 @@ static void register_onboard_backlight(struct fbtft_par *par)
if (!par->fbtftops.unregister_backlight)
par->fbtftops.unregister_backlight = fbtft_unregister_backlight;
}
-#else
-static void register_onboard_backlight(struct fbtft_par *par) { };
-#endif
FBTFT_REGISTER_DRIVER(DRVNAME, "solomon,ssd1351", &display);
diff --git a/drivers/staging/fbtft/fb_st7735r.c b/drivers/staging/fbtft/fb_st7735r.c
index a92b0d071097..9670a8989b91 100644
--- a/drivers/staging/fbtft/fb_st7735r.c
+++ b/drivers/staging/fbtft/fb_st7735r.c
@@ -1,22 +1,14 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* FB driver for the ST7735R LCD Controller
*
* Copyright (C) 2013 Noralf Tronnes
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
+#include <video/mipi_display.h>
#include "fbtft.h"
@@ -24,45 +16,51 @@
#define DEFAULT_GAMMA "0F 1A 0F 18 2F 28 20 22 1F 1B 23 37 00 07 02 10\n" \
"0F 1B 0F 17 33 2C 29 2E 30 30 39 3F 00 07 03 10"
-static int default_init_sequence[] = {
- /* SWRESET - Software reset */
- -1, 0x01,
+static const s16 default_init_sequence[] = {
+ -1, MIPI_DCS_SOFT_RESET,
-2, 150, /* delay */
- /* SLPOUT - Sleep out & booster on */
- -1, 0x11,
+ -1, MIPI_DCS_EXIT_SLEEP_MODE,
-2, 500, /* delay */
/* FRMCTR1 - frame rate control: normal mode
- frame rate = fosc / (1 x 2 + 40) * (LINE + 2C + 2D) */
+ * frame rate = fosc / (1 x 2 + 40) * (LINE + 2C + 2D)
+ */
-1, 0xB1, 0x01, 0x2C, 0x2D,
/* FRMCTR2 - frame rate control: idle mode
- frame rate = fosc / (1 x 2 + 40) * (LINE + 2C + 2D) */
+ * frame rate = fosc / (1 x 2 + 40) * (LINE + 2C + 2D)
+ */
-1, 0xB2, 0x01, 0x2C, 0x2D,
/* FRMCTR3 - frame rate control - partial mode
- dot inversion mode, line inversion mode */
+ * dot inversion mode, line inversion mode
+ */
-1, 0xB3, 0x01, 0x2C, 0x2D, 0x01, 0x2C, 0x2D,
/* INVCTR - display inversion control
- no inversion */
+ * no inversion
+ */
-1, 0xB4, 0x07,
/* PWCTR1 - Power Control
- -4.6V, AUTO mode */
+ * -4.6V, AUTO mode
+ */
-1, 0xC0, 0xA2, 0x02, 0x84,
/* PWCTR2 - Power Control
- VGH25 = 2.4C VGSEL = -10 VGH = 3 * AVDD */
+ * VGH25 = 2.4C VGSEL = -10 VGH = 3 * AVDD
+ */
-1, 0xC1, 0xC5,
/* PWCTR3 - Power Control
- Opamp current small, Boost frequency */
+ * Opamp current small, Boost frequency
+ */
-1, 0xC2, 0x0A, 0x00,
/* PWCTR4 - Power Control
- BCLK/2, Opamp current small & Medium low */
+ * BCLK/2, Opamp current small & Medium low
+ */
-1, 0xC3, 0x8A, 0x2A,
/* PWCTR5 - Power Control */
@@ -71,18 +69,14 @@ static int default_init_sequence[] = {
/* VMCTR1 - Power Control */
-1, 0xC5, 0x0E,
- /* INVOFF - Display inversion off */
- -1, 0x20,
+ -1, MIPI_DCS_EXIT_INVERT_MODE,
- /* COLMOD - Interface pixel format */
- -1, 0x3A, 0x05,
+ -1, MIPI_DCS_SET_PIXEL_FORMAT, MIPI_DCS_PIXEL_FMT_16BIT,
- /* DISPON - Display On */
- -1, 0x29,
+ -1, MIPI_DCS_SET_DISPLAY_ON,
-2, 100, /* delay */
- /* NORON - Partial off (Normal) */
- -1, 0x13,
+ -1, MIPI_DCS_ENTER_NORMAL_MODE,
-2, 10, /* delay */
/* end marker */
@@ -91,14 +85,13 @@ static int default_init_sequence[] = {
static void set_addr_win(struct fbtft_par *par, int xs, int ys, int xe, int ye)
{
- /* Column address */
- write_reg(par, 0x2A, xs >> 8, xs & 0xFF, xe >> 8, xe & 0xFF);
+ write_reg(par, MIPI_DCS_SET_COLUMN_ADDRESS,
+ xs >> 8, xs & 0xFF, xe >> 8, xe & 0xFF);
- /* Row address */
- write_reg(par, 0x2B, ys >> 8, ys & 0xFF, ye >> 8, ye & 0xFF);
+ write_reg(par, MIPI_DCS_SET_PAGE_ADDRESS,
+ ys >> 8, ys & 0xFF, ye >> 8, ye & 0xFF);
- /* Memory write */
- write_reg(par, 0x2C);
+ write_reg(par, MIPI_DCS_WRITE_MEMORY_START);
}
#define MY BIT(7)
@@ -107,23 +100,28 @@ static void set_addr_win(struct fbtft_par *par, int xs, int ys, int xe, int ye)
static int set_var(struct fbtft_par *par)
{
/* MADCTL - Memory data access control
- RGB/BGR:
- 1. Mode selection pin SRGB
- RGB H/W pin for color filter setting: 0=RGB, 1=BGR
- 2. MADCTL RGB bit
- RGB-BGR ORDER color filter panel: 0=RGB, 1=BGR */
+ * RGB/BGR:
+ * 1. Mode selection pin SRGB
+ * RGB H/W pin for color filter setting: 0=RGB, 1=BGR
+ * 2. MADCTL RGB bit
+ * RGB-BGR ORDER color filter panel: 0=RGB, 1=BGR
+ */
switch (par->info->var.rotate) {
case 0:
- write_reg(par, 0x36, MX | MY | (par->bgr << 3));
+ write_reg(par, MIPI_DCS_SET_ADDRESS_MODE,
+ MX | MY | (par->bgr << 3));
break;
case 270:
- write_reg(par, 0x36, MY | MV | (par->bgr << 3));
+ write_reg(par, MIPI_DCS_SET_ADDRESS_MODE,
+ MY | MV | (par->bgr << 3));
break;
case 180:
- write_reg(par, 0x36, par->bgr << 3);
+ write_reg(par, MIPI_DCS_SET_ADDRESS_MODE,
+ par->bgr << 3);
break;
case 90:
- write_reg(par, 0x36, MX | MV | (par->bgr << 3));
+ write_reg(par, MIPI_DCS_SET_ADDRESS_MODE,
+ MX | MV | (par->bgr << 3));
break;
}
@@ -131,12 +129,12 @@ static int set_var(struct fbtft_par *par)
}
/*
- Gamma string format:
- VRF0P VOS0P PK0P PK1P PK2P PK3P PK4P PK5P PK6P PK7P PK8P PK9P SELV0P SELV1P SELV62P SELV63P
- VRF0N VOS0N PK0N PK1N PK2N PK3N PK4N PK5N PK6N PK7N PK8N PK9N SELV0N SELV1N SELV62N SELV63N
-*/
-#define CURVE(num, idx) curves[num * par->gamma.num_values + idx]
-static int set_gamma(struct fbtft_par *par, unsigned long *curves)
+ * Gamma string format:
+ * VRF0P VOS0P PK0P PK1P PK2P PK3P PK4P PK5P PK6P PK7P PK8P PK9P SELV0P SELV1P SELV62P SELV63P
+ * VRF0N VOS0N PK0N PK1N PK2N PK3N PK4N PK5N PK6N PK7N PK8N PK9N SELV0N SELV1N SELV62N SELV63N
+ */
+#define CURVE(num, idx) curves[(num) * par->gamma.num_values + (idx)]
+static int set_gamma(struct fbtft_par *par, u32 *curves)
{
int i, j;
@@ -147,13 +145,18 @@ static int set_gamma(struct fbtft_par *par, unsigned long *curves)
for (i = 0; i < par->gamma.num_curves; i++)
write_reg(par, 0xE0 + i,
- CURVE(i, 0), CURVE(i, 1), CURVE(i, 2), CURVE(i, 3),
- CURVE(i, 4), CURVE(i, 5), CURVE(i, 6), CURVE(i, 7),
- CURVE(i, 8), CURVE(i, 9), CURVE(i, 10), CURVE(i, 11),
- CURVE(i, 12), CURVE(i, 13), CURVE(i, 14), CURVE(i, 15));
+ CURVE(i, 0), CURVE(i, 1),
+ CURVE(i, 2), CURVE(i, 3),
+ CURVE(i, 4), CURVE(i, 5),
+ CURVE(i, 6), CURVE(i, 7),
+ CURVE(i, 8), CURVE(i, 9),
+ CURVE(i, 10), CURVE(i, 11),
+ CURVE(i, 12), CURVE(i, 13),
+ CURVE(i, 14), CURVE(i, 15));
return 0;
}
+
#undef CURVE
static struct fbtft_display display = {
diff --git a/drivers/staging/fbtft/fb_st7789v.c b/drivers/staging/fbtft/fb_st7789v.c
new file mode 100644
index 000000000000..861a154144e6
--- /dev/null
+++ b/drivers/staging/fbtft/fb_st7789v.c
@@ -0,0 +1,394 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * FB driver for the ST7789V LCD Controller
+ *
+ * Copyright (C) 2015 Dennis Menschel
+ */
+
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/completion.h>
+#include <linux/module.h>
+
+#include <video/mipi_display.h>
+
+#include "fbtft.h"
+
+#define DRVNAME "fb_st7789v"
+
+#define DEFAULT_GAMMA \
+ "70 2C 2E 15 10 09 48 33 53 0B 19 18 20 25\n" \
+ "70 2C 2E 15 10 09 48 33 53 0B 19 18 20 25"
+
+#define HSD20_IPS_GAMMA \
+ "D0 05 0A 09 08 05 2E 44 45 0F 17 16 2B 33\n" \
+ "D0 05 0A 09 08 05 2E 43 45 0F 16 16 2B 33"
+
+#define HSD20_IPS 1
+
+/**
+ * enum st7789v_command - ST7789V display controller commands
+ *
+ * @PORCTRL: porch setting
+ * @GCTRL: gate control
+ * @VCOMS: VCOM setting
+ * @VDVVRHEN: VDV and VRH command enable
+ * @VRHS: VRH set
+ * @VDVS: VDV set
+ * @VCMOFSET: VCOM offset set
+ * @PWCTRL1: power control 1
+ * @PVGAMCTRL: positive voltage gamma control
+ * @NVGAMCTRL: negative voltage gamma control
+ *
+ * The command names are the same as those found in the datasheet to ease
+ * looking up their semantics and usage.
+ *
+ * Note that the ST7789V display controller offers quite a few more commands
+ * which have been omitted from this list as they are not used at the moment.
+ * Furthermore, commands that are compliant with the MIPI DCS have been left
+ * out as well to avoid duplicate entries.
+ */
+enum st7789v_command {
+ PORCTRL = 0xB2,
+ GCTRL = 0xB7,
+ VCOMS = 0xBB,
+ VDVVRHEN = 0xC2,
+ VRHS = 0xC3,
+ VDVS = 0xC4,
+ VCMOFSET = 0xC5,
+ PWCTRL1 = 0xD0,
+ PVGAMCTRL = 0xE0,
+ NVGAMCTRL = 0xE1,
+};
+
+#define MADCTL_BGR BIT(3) /* bitmask for RGB/BGR order */
+#define MADCTL_MV BIT(5) /* bitmask for page/column order */
+#define MADCTL_MX BIT(6) /* bitmask for column address order */
+#define MADCTL_MY BIT(7) /* bitmask for page address order */
+
+/* 60Hz for 16.6ms, configured as 2*16.6ms */
+#define PANEL_TE_TIMEOUT_MS 33
+
+static struct completion panel_te; /* completion for panel TE line */
+static int irq_te; /* Linux IRQ for LCD TE line */
+
+static irqreturn_t panel_te_handler(int irq, void *data)
+{
+ complete(&panel_te);
+ return IRQ_HANDLED;
+}
+
+/*
+ * init_tearing_effect_line() - init tearing effect line.
+ * @par: FBTFT parameter object.
+ *
+ * Return: 0 on success, or a negative error code otherwise.
+ */
+static int init_tearing_effect_line(struct fbtft_par *par)
+{
+ struct device *dev = par->info->device;
+ struct gpio_desc *te;
+ int rc, irq;
+
+ te = gpiod_get_optional(dev, "te", GPIOD_IN);
+ if (IS_ERR(te))
+ return dev_err_probe(dev, PTR_ERR(te), "Failed to request te GPIO\n");
+
+ /* if te is NULL, indicating no configuration, directly return success */
+ if (!te) {
+ irq_te = 0;
+ return 0;
+ }
+
+ irq = gpiod_to_irq(te);
+
+ /* GPIO is locked as an IRQ, we may drop the reference */
+ gpiod_put(te);
+
+ if (irq < 0)
+ return irq;
+
+ irq_te = irq;
+ init_completion(&panel_te);
+
+ /* The effective state is high and lasts no more than 1000 microseconds */
+ rc = devm_request_irq(dev, irq_te, panel_te_handler,
+ IRQF_TRIGGER_RISING, "TE_GPIO", par);
+ if (rc)
+ return dev_err_probe(dev, rc, "TE IRQ request failed.\n");
+
+ disable_irq_nosync(irq_te);
+
+ return 0;
+}
+
+/**
+ * init_display() - initialize the display controller
+ *
+ * @par: FBTFT parameter object
+ *
+ * Most of the commands in this init function set their parameters to the
+ * same default values which are already in place after the display has been
+ * powered up. (The main exception to this rule is the pixel format which
+ * would default to 18 instead of 16 bit per pixel.)
+ * Nonetheless, this sequence can be used as a template for concrete
+ * displays which usually need some adjustments.
+ *
+ * Return: 0 on success, < 0 if error occurred.
+ */
+static int init_display(struct fbtft_par *par)
+{
+ int rc;
+
+ par->fbtftops.reset(par);
+
+ rc = init_tearing_effect_line(par);
+ if (rc)
+ return rc;
+
+ /* turn off sleep mode */
+ write_reg(par, MIPI_DCS_EXIT_SLEEP_MODE);
+ mdelay(120);
+
+ /* set pixel format to RGB-565 */
+ write_reg(par, MIPI_DCS_SET_PIXEL_FORMAT, MIPI_DCS_PIXEL_FMT_16BIT);
+ if (HSD20_IPS)
+ write_reg(par, PORCTRL, 0x05, 0x05, 0x00, 0x33, 0x33);
+
+ else
+ write_reg(par, PORCTRL, 0x08, 0x08, 0x00, 0x22, 0x22);
+
+ /*
+ * VGH = 13.26V
+ * VGL = -10.43V
+ */
+ if (HSD20_IPS)
+ write_reg(par, GCTRL, 0x75);
+ else
+ write_reg(par, GCTRL, 0x35);
+
+ /*
+ * VDV and VRH register values come from command write
+ * (instead of NVM)
+ */
+ write_reg(par, VDVVRHEN, 0x01, 0xFF);
+
+ /*
+ * VAP = 4.1V + (VCOM + VCOM offset + 0.5 * VDV)
+ * VAN = -4.1V + (VCOM + VCOM offset + 0.5 * VDV)
+ */
+ if (HSD20_IPS)
+ write_reg(par, VRHS, 0x13);
+ else
+ write_reg(par, VRHS, 0x0B);
+
+ /* VDV = 0V */
+ write_reg(par, VDVS, 0x20);
+
+ /* VCOM = 0.9V */
+ if (HSD20_IPS)
+ write_reg(par, VCOMS, 0x22);
+ else
+ write_reg(par, VCOMS, 0x20);
+
+ /* VCOM offset = 0V */
+ write_reg(par, VCMOFSET, 0x20);
+
+ /*
+ * AVDD = 6.8V
+ * AVCL = -4.8V
+ * VDS = 2.3V
+ */
+ write_reg(par, PWCTRL1, 0xA4, 0xA1);
+
+ /* TE line output is off by default when powering on */
+ if (irq_te)
+ write_reg(par, MIPI_DCS_SET_TEAR_ON, 0x00);
+
+ write_reg(par, MIPI_DCS_SET_DISPLAY_ON);
+
+ if (HSD20_IPS)
+ write_reg(par, MIPI_DCS_ENTER_INVERT_MODE);
+
+ return 0;
+}
+
+/*
+ * write_vmem() - write data to display.
+ * @par: FBTFT parameter object.
+ * @offset: offset from screen_buffer.
+ * @len: the length of data to be writte.
+ *
+ * Return: 0 on success, or a negative error code otherwise.
+ */
+static int write_vmem(struct fbtft_par *par, size_t offset, size_t len)
+{
+ struct device *dev = par->info->device;
+ int ret;
+
+ if (irq_te) {
+ enable_irq(irq_te);
+ reinit_completion(&panel_te);
+ ret = wait_for_completion_timeout(&panel_te,
+ msecs_to_jiffies(PANEL_TE_TIMEOUT_MS));
+ if (ret == 0)
+ dev_err(dev, "wait panel TE timeout\n");
+
+ disable_irq(irq_te);
+ }
+
+ switch (par->pdata->display.buswidth) {
+ case 8:
+ ret = fbtft_write_vmem16_bus8(par, offset, len);
+ break;
+ case 9:
+ ret = fbtft_write_vmem16_bus9(par, offset, len);
+ break;
+ case 16:
+ ret = fbtft_write_vmem16_bus16(par, offset, len);
+ break;
+ default:
+ dev_err(dev, "Unsupported buswidth %d\n",
+ par->pdata->display.buswidth);
+ ret = 0;
+ break;
+ }
+
+ return ret;
+}
+
+/**
+ * set_var() - apply LCD properties like rotation and BGR mode
+ *
+ * @par: FBTFT parameter object
+ *
+ * Return: 0 on success, < 0 if error occurred.
+ */
+static int set_var(struct fbtft_par *par)
+{
+ u8 madctl_par = 0;
+
+ if (par->bgr)
+ madctl_par |= MADCTL_BGR;
+ switch (par->info->var.rotate) {
+ case 0:
+ break;
+ case 90:
+ madctl_par |= (MADCTL_MV | MADCTL_MY);
+ break;
+ case 180:
+ madctl_par |= (MADCTL_MX | MADCTL_MY);
+ break;
+ case 270:
+ madctl_par |= (MADCTL_MV | MADCTL_MX);
+ break;
+ default:
+ return -EINVAL;
+ }
+ write_reg(par, MIPI_DCS_SET_ADDRESS_MODE, madctl_par);
+ return 0;
+}
+
+/**
+ * set_gamma() - set gamma curves
+ *
+ * @par: FBTFT parameter object
+ * @curves: gamma curves
+ *
+ * Before the gamma curves are applied, they are preprocessed with a bitmask
+ * to ensure syntactically correct input for the display controller.
+ * This implies that the curves input parameter might be changed by this
+ * function and that illegal gamma values are auto-corrected and not
+ * reported as errors.
+ *
+ * Return: 0 on success, < 0 if error occurred.
+ */
+static int set_gamma(struct fbtft_par *par, u32 *curves)
+{
+ int i;
+ int j;
+ int c; /* curve index offset */
+
+ /*
+ * Bitmasks for gamma curve command parameters.
+ * The masks are the same for both positive and negative voltage
+ * gamma curves.
+ */
+ static const u8 gamma_par_mask[] = {
+ 0xFF, /* V63[3:0], V0[3:0]*/
+ 0x3F, /* V1[5:0] */
+ 0x3F, /* V2[5:0] */
+ 0x1F, /* V4[4:0] */
+ 0x1F, /* V6[4:0] */
+ 0x3F, /* J0[1:0], V13[3:0] */
+ 0x7F, /* V20[6:0] */
+ 0x77, /* V36[2:0], V27[2:0] */
+ 0x7F, /* V43[6:0] */
+ 0x3F, /* J1[1:0], V50[3:0] */
+ 0x1F, /* V57[4:0] */
+ 0x1F, /* V59[4:0] */
+ 0x3F, /* V61[5:0] */
+ 0x3F, /* V62[5:0] */
+ };
+
+ for (i = 0; i < par->gamma.num_curves; i++) {
+ c = i * par->gamma.num_values;
+ for (j = 0; j < par->gamma.num_values; j++)
+ curves[c + j] &= gamma_par_mask[j];
+ write_reg(par, PVGAMCTRL + i,
+ curves[c + 0], curves[c + 1], curves[c + 2],
+ curves[c + 3], curves[c + 4], curves[c + 5],
+ curves[c + 6], curves[c + 7], curves[c + 8],
+ curves[c + 9], curves[c + 10], curves[c + 11],
+ curves[c + 12], curves[c + 13]);
+ }
+ return 0;
+}
+
+/**
+ * blank() - blank the display
+ *
+ * @par: FBTFT parameter object
+ * @on: whether to enable or disable blanking the display
+ *
+ * Return: 0 on success, < 0 if error occurred.
+ */
+static int blank(struct fbtft_par *par, bool on)
+{
+ if (on)
+ write_reg(par, MIPI_DCS_SET_DISPLAY_OFF);
+ else
+ write_reg(par, MIPI_DCS_SET_DISPLAY_ON);
+ return 0;
+}
+
+static struct fbtft_display display = {
+ .regwidth = 8,
+ .width = 240,
+ .height = 320,
+ .gamma_num = 2,
+ .gamma_len = 14,
+ .gamma = HSD20_IPS_GAMMA,
+ .fbtftops = {
+ .init_display = init_display,
+ .write_vmem = write_vmem,
+ .set_var = set_var,
+ .set_gamma = set_gamma,
+ .blank = blank,
+ },
+};
+
+FBTFT_REGISTER_DRIVER(DRVNAME, "sitronix,st7789v", &display);
+
+MODULE_ALIAS("spi:" DRVNAME);
+MODULE_ALIAS("platform:" DRVNAME);
+MODULE_ALIAS("spi:st7789v");
+MODULE_ALIAS("platform:st7789v");
+
+MODULE_DESCRIPTION("FB driver for the ST7789V LCD Controller");
+MODULE_AUTHOR("Dennis Menschel");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/fbtft/fb_tinylcd.c b/drivers/staging/fbtft/fb_tinylcd.c
index caf263db436a..9469248f2c50 100644
--- a/drivers/staging/fbtft/fb_tinylcd.c
+++ b/drivers/staging/fbtft/fb_tinylcd.c
@@ -1,23 +1,15 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Custom FB driver for tinylcd.com display
*
* Copyright (C) 2013 Noralf Tronnes
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/delay.h>
+#include <video/mipi_display.h>
#include "fbtft.h"
@@ -38,7 +30,7 @@ static int init_display(struct fbtft_par *par)
write_reg(par, 0xB4, 0x02);
write_reg(par, 0xB6, 0x00, 0x22, 0x3B);
write_reg(par, 0xB7, 0x07);
- write_reg(par, 0x36, 0x58);
+ write_reg(par, MIPI_DCS_SET_ADDRESS_MODE, 0x58);
write_reg(par, 0xF0, 0x36, 0xA5, 0xD3);
write_reg(par, 0xE5, 0x80);
write_reg(par, 0xE5, 0x01);
@@ -46,25 +38,24 @@ static int init_display(struct fbtft_par *par)
write_reg(par, 0xE5, 0x00);
write_reg(par, 0xF0, 0x36, 0xA5, 0x53);
write_reg(par, 0xE0, 0x00, 0x35, 0x33, 0x00, 0x00, 0x00,
- 0x00, 0x35, 0x33, 0x00, 0x00, 0x00);
- write_reg(par, 0x3A, 0x55);
- write_reg(par, 0x11);
+ 0x00, 0x35, 0x33, 0x00, 0x00, 0x00);
+ write_reg(par, MIPI_DCS_SET_PIXEL_FORMAT, 0x55);
+ write_reg(par, MIPI_DCS_EXIT_SLEEP_MODE);
udelay(250);
- write_reg(par, 0x29);
+ write_reg(par, MIPI_DCS_SET_DISPLAY_ON);
return 0;
}
static void set_addr_win(struct fbtft_par *par, int xs, int ys, int xe, int ye)
{
- /* Column address */
- write_reg(par, 0x2A, xs >> 8, xs & 0xFF, xe >> 8, xe & 0xFF);
+ write_reg(par, MIPI_DCS_SET_COLUMN_ADDRESS,
+ xs >> 8, xs & 0xFF, xe >> 8, xe & 0xFF);
- /* Row address */
- write_reg(par, 0x2B, ys >> 8, ys & 0xFF, ye >> 8, ye & 0xFF);
+ write_reg(par, MIPI_DCS_SET_PAGE_ADDRESS,
+ ys >> 8, ys & 0xFF, ye >> 8, ye & 0xFF);
- /* Memory write */
- write_reg(par, 0x2C);
+ write_reg(par, MIPI_DCS_WRITE_MEMORY_START);
}
static int set_var(struct fbtft_par *par)
@@ -72,19 +63,19 @@ static int set_var(struct fbtft_par *par)
switch (par->info->var.rotate) {
case 270:
write_reg(par, 0xB6, 0x00, 0x02, 0x3B);
- write_reg(par, 0x36, 0x28);
+ write_reg(par, MIPI_DCS_SET_ADDRESS_MODE, 0x28);
break;
case 180:
write_reg(par, 0xB6, 0x00, 0x22, 0x3B);
- write_reg(par, 0x36, 0x58);
+ write_reg(par, MIPI_DCS_SET_ADDRESS_MODE, 0x58);
break;
case 90:
write_reg(par, 0xB6, 0x00, 0x22, 0x3B);
- write_reg(par, 0x36, 0x38);
+ write_reg(par, MIPI_DCS_SET_ADDRESS_MODE, 0x38);
break;
default:
write_reg(par, 0xB6, 0x00, 0x22, 0x3B);
- write_reg(par, 0x36, 0x08);
+ write_reg(par, MIPI_DCS_SET_ADDRESS_MODE, 0x08);
break;
}
diff --git a/drivers/staging/fbtft/fb_tls8204.c b/drivers/staging/fbtft/fb_tls8204.c
index 4e16ea763d78..bec6dd0ffb01 100644
--- a/drivers/staging/fbtft/fb_tls8204.c
+++ b/drivers/staging/fbtft/fb_tls8204.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* FB driver for the TLS8204 LCD Controller
*
@@ -6,22 +7,12 @@
*
* Copyright (C) 2013 Noralf Tronnes
* Copyright (C) 2014 Michael Hope (adapted for the TLS8204)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/spi/spi.h>
#include <linux/delay.h>
@@ -35,8 +26,8 @@
/* gamma is used to control contrast in this driver */
#define DEFAULT_GAMMA "40"
-static unsigned bs = 4;
-module_param(bs, uint, 0);
+static unsigned int bs = 4;
+module_param(bs, uint, 0000);
MODULE_PARM_DESC(bs, "BS[2:0] Bias voltage level: 0-7 (default: 4)");
static int init_display(struct fbtft_par *par)
@@ -44,21 +35,21 @@ static int init_display(struct fbtft_par *par)
par->fbtftops.reset(par);
/* Enter extended command mode */
- write_reg(par, 0x21); /* 5:1 1
- 2:0 PD - Powerdown control: chip is active
- 1:0 V - Entry mode: horizontal addressing
- 0:1 H - Extended instruction set control:
- extended
- */
+ write_reg(par, 0x21); /* 5:1 1
+ * 2:0 PD - Powerdown control: chip is active
+ * 1:0 V - Entry mode: horizontal addressing
+ * 0:1 H - Extended instruction set control:
+ * extended
+ */
/* H=1 Bias system */
- write_reg(par, 0x10 | (bs & 0x7)); /*
- 4:1 1
- 3:0 0
- 2:x BS2 - Bias System
- 1:x BS1
- 0:x BS0
- */
+ write_reg(par, 0x10 | (bs & 0x7));
+ /* 4:1 1
+ * 3:0 0
+ * 2:x BS2 - Bias System
+ * 1:x BS1
+ * 0:x BS0
+ */
/* Set the address of the first display line. */
write_reg(par, 0x04 | (64 >> 6));
@@ -68,12 +59,12 @@ static int init_display(struct fbtft_par *par)
write_reg(par, 0x20);
/* H=0 Display control */
- write_reg(par, 0x08 | 4); /*
- 3:1 1
- 2:1 D - DE: 10=normal mode
- 1:0 0
- 0:0 E
- */
+ write_reg(par, 0x08 | 4);
+ /* 3:1 1
+ * 2:1 D - DE: 10=normal mode
+ * 1:0 0
+ * 0:0 E
+ */
return 0;
}
@@ -81,30 +72,29 @@ static int init_display(struct fbtft_par *par)
static void set_addr_win(struct fbtft_par *par, int xs, int ys, int xe, int ye)
{
/* H=0 Set X address of RAM */
- write_reg(par, 0x80); /* 7:1 1
- 6-0: X[6:0] - 0x00
- */
+ write_reg(par, 0x80); /* 7:1 1
+ * 6-0: X[6:0] - 0x00
+ */
/* H=0 Set Y address of RAM */
- write_reg(par, 0x40); /* 7:0 0
- 6:1 1
- 2-0: Y[2:0] - 0x0
- */
+ write_reg(par, 0x40); /* 7:0 0
+ * 6:1 1
+ * 2-0: Y[2:0] - 0x0
+ */
}
static int write_vmem(struct fbtft_par *par, size_t offset, size_t len)
{
- u16 *vmem16 = (u16 *)par->info->screen_base;
+ u16 *vmem16 = (u16 *)par->info->screen_buffer;
int x, y, i;
int ret = 0;
- fbtft_par_dbg(DEBUG_WRITE_VMEM, par, "%s()\n", __func__);
-
for (y = 0; y < HEIGHT / 8; y++) {
u8 *buf = par->txbuf.buf;
- /* The display is 102x68 but the LCD is 84x48. Set
- the write pointer at the start of each row. */
- gpio_set_value(par->gpio.dc, 0);
+ /* The display is 102x68 but the LCD is 84x48.
+ * Set the write pointer at the start of each row.
+ */
+ gpiod_set_value(par->gpio.dc, 0);
write_reg(par, 0x80 | 0);
write_reg(par, 0x40 | y);
@@ -119,7 +109,7 @@ static int write_vmem(struct fbtft_par *par, size_t offset, size_t len)
*buf++ = ch;
}
/* Write the row */
- gpio_set_value(par->gpio.dc, 1);
+ gpiod_set_value(par->gpio.dc, 1);
ret = par->fbtftops.write(par, par->txbuf.buf, WIDTH);
if (ret < 0) {
dev_err(par->info->device,
@@ -131,7 +121,7 @@ static int write_vmem(struct fbtft_par *par, size_t offset, size_t len)
return ret;
}
-static int set_gamma(struct fbtft_par *par, unsigned long *curves)
+static int set_gamma(struct fbtft_par *par, u32 *curves)
{
/* apply mask */
curves[0] &= 0x7F;
diff --git a/drivers/staging/fbtft/fb_uc1611.c b/drivers/staging/fbtft/fb_uc1611.c
index c9bbba1e1a5d..ca35b386a12d 100644
--- a/drivers/staging/fbtft/fb_uc1611.c
+++ b/drivers/staging/fbtft/fb_uc1611.c
@@ -1,25 +1,16 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* FB driver for the UltraChip UC1611 LCD controller
*
* The display is 4-bit grayscale (16 shades) 240x160.
*
* Copyright (C) 2015 Henri Chain
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/spi/spi.h>
#include <linux/delay.h>
@@ -41,42 +32,48 @@
*/
/* BR -> actual ratio: 0-3 -> 5, 10, 11, 13 */
-static unsigned ratio = 2;
-module_param(ratio, uint, 0);
+static unsigned int ratio = 2;
+module_param(ratio, uint, 0000);
MODULE_PARM_DESC(ratio, "BR[1:0] Bias voltage ratio: 0-3 (default: 2)");
-static unsigned gain = 3;
-module_param(gain, uint, 0);
+static unsigned int gain = 3;
+module_param(gain, uint, 0000);
MODULE_PARM_DESC(gain, "GN[1:0] Bias voltage gain: 0-3 (default: 3)");
-static unsigned pot = 16;
-module_param(pot, uint, 0);
+static unsigned int pot = 16;
+module_param(pot, uint, 0000);
MODULE_PARM_DESC(pot, "PM[6:0] Bias voltage pot.: 0-63 (default: 16)");
/* TC -> % compensation per deg C: 0-3 -> -.05, -.10, -.015, -.20 */
-static unsigned temp;
-module_param(temp, uint, 0);
+static unsigned int temp;
+module_param(temp, uint, 0000);
MODULE_PARM_DESC(temp, "TC[1:0] Temperature compensation: 0-3 (default: 0)");
/* PC[1:0] -> LCD capacitance: 0-3 -> <20nF, 20-28 nF, 29-40 nF, 40-56 nF */
-static unsigned load = 1;
-module_param(load, uint, 0);
+static unsigned int load = 1;
+module_param(load, uint, 0000);
MODULE_PARM_DESC(load, "PC[1:0] Panel Loading: 0-3 (default: 1)");
/* PC[3:2] -> V_LCD: 0, 1, 3 -> ext., int. with ratio = 5, int. standard */
-static unsigned pump = 3;
-module_param(pump, uint, 0);
+static unsigned int pump = 3;
+module_param(pump, uint, 0000);
MODULE_PARM_DESC(pump, "PC[3:2] Pump control: 0,1,3 (default: 3)");
static int init_display(struct fbtft_par *par)
{
int ret;
- /* Set CS active high */
- par->spi->mode |= SPI_CS_HIGH;
+ /*
+ * Set CS active inverse polarity: just setting SPI_CS_HIGH does not
+ * work with GPIO based chip selects that are logically active high
+ * but inverted inside the GPIO library, so enforce inverted
+ * semantics.
+ */
+ par->spi->mode ^= SPI_CS_HIGH;
ret = spi_setup(par->spi);
if (ret) {
- dev_err(par->info->device, "Could not set SPI_CS_HIGH\n");
+ dev_err(par->info->device,
+ "Could not set inverse CS polarity\n");
return ret;
}
@@ -100,7 +97,7 @@ static int init_display(struct fbtft_par *par)
write_reg(par, 0x2C | (pump & 0x03));
/* Set inverse display */
- write_reg(par, 0xA6 | (0x01 & 0x01));
+ write_reg(par, 0xA6 | 0x01);
/* Set 4-bit grayscale mode */
write_reg(par, 0xD0 | (0x02 & 0x03));
@@ -138,9 +135,6 @@ static void set_addr_win(struct fbtft_par *par, int xs, int ys, int xe, int ye)
static int blank(struct fbtft_par *par, bool on)
{
- fbtft_par_dbg(DEBUG_BLANK, par, "%s(blank=%s)\n",
- __func__, on ? "true" : "false");
-
if (on)
write_reg(par, 0xA8 | 0x00);
else
@@ -166,8 +160,8 @@ static int set_var(struct fbtft_par *par)
/* Set RAM address control */
write_reg(par, 0x88
| (0x0 & 0x1) << 2 /* Increment positively */
- | (0x1 & 0x1) << 1 /* Increment page first */
- | (0x1 & 0x1)); /* Wrap around (default) */
+ | (0x1 << 1) /* Increment page first */
+ | 0x1); /* Wrap around (default) */
/* Set LCD mapping */
write_reg(par, 0xC0
@@ -180,11 +174,11 @@ static int set_var(struct fbtft_par *par)
write_reg(par, 0x88
| (0x0 & 0x1) << 2 /* Increment positively */
| (0x0 & 0x1) << 1 /* Increment column first */
- | (0x1 & 0x1)); /* Wrap around (default) */
+ | 0x1); /* Wrap around (default) */
/* Set LCD mapping */
write_reg(par, 0xC0
- | (0x1 & 0x1) << 2 /* Mirror Y ON */
+ | (0x1 << 2) /* Mirror Y ON */
| (0x0 & 0x1) << 1 /* Mirror X OFF */
| (0x0 & 0x1)); /* MS nibble last (default) */
break;
@@ -192,13 +186,13 @@ static int set_var(struct fbtft_par *par)
/* Set RAM address control */
write_reg(par, 0x88
| (0x0 & 0x1) << 2 /* Increment positively */
- | (0x1 & 0x1) << 1 /* Increment page first */
- | (0x1 & 0x1)); /* Wrap around (default) */
+ | (0x1 << 1) /* Increment page first */
+ | 0x1); /* Wrap around (default) */
/* Set LCD mapping */
write_reg(par, 0xC0
- | (0x1 & 0x1) << 2 /* Mirror Y ON */
- | (0x1 & 0x1) << 1 /* Mirror X ON */
+ | (0x1 << 2) /* Mirror Y ON */
+ | (0x1 << 1) /* Mirror X ON */
| (0x0 & 0x1)); /* MS nibble last (default) */
break;
default:
@@ -206,12 +200,12 @@ static int set_var(struct fbtft_par *par)
write_reg(par, 0x88
| (0x0 & 0x1) << 2 /* Increment positively */
| (0x0 & 0x1) << 1 /* Increment column first */
- | (0x1 & 0x1)); /* Wrap around (default) */
+ | 0x1); /* Wrap around (default) */
/* Set LCD mapping */
write_reg(par, 0xC0
| (0x0 & 0x1) << 2 /* Mirror Y OFF */
- | (0x1 & 0x1) << 1 /* Mirror X ON */
+ | (0x1 << 1) /* Mirror X ON */
| (0x0 & 0x1)); /* MS nibble last (default) */
break;
}
@@ -221,17 +215,15 @@ static int set_var(struct fbtft_par *par)
static int write_vmem(struct fbtft_par *par, size_t offset, size_t len)
{
- u8 *vmem8 = (u8 *)(par->info->screen_base);
- u8 *buf8 = (u8 *)(par->txbuf.buf);
- u16 *buf16 = (u16 *)(par->txbuf.buf);
+ u8 *vmem8 = (u8 *)(par->info->screen_buffer);
+ u8 *buf8 = par->txbuf.buf;
+ u16 *buf16 = par->txbuf.buf;
int line_length = par->info->fix.line_length;
- int y_start = (offset / line_length);
+ int y_start = offset / line_length;
int y_end = (offset + len - 1) / line_length;
int x, y, i;
int ret = 0;
- fbtft_par_dbg(DEBUG_WRITE_VMEM, par, "%s()\n", __func__);
-
switch (par->pdata->display.buswidth) {
case 8:
switch (par->info->var.rotate) {
@@ -262,7 +254,7 @@ static int write_vmem(struct fbtft_par *par, size_t offset, size_t len)
}
break;
}
- gpio_set_value(par->gpio.dc, 1);
+ gpiod_set_value(par->gpio.dc, 1);
/* Write data */
ret = par->fbtftops.write(par, par->txbuf.buf, len / 2);
diff --git a/drivers/staging/fbtft/fb_uc1701.c b/drivers/staging/fbtft/fb_uc1701.c
index 28b13cf16f1e..e4ccc73868a7 100644
--- a/drivers/staging/fbtft/fb_uc1701.c
+++ b/drivers/staging/fbtft/fb_uc1701.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* FB driver for the UC1701 LCD Controller
*
@@ -5,22 +6,12 @@
* Any pixel value except 0 turns the pixel on.
*
* Copyright (C) 2014 Juergen Holzmann
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/spi/spi.h>
#include <linux/delay.h>
@@ -78,11 +69,11 @@ static int init_display(struct fbtft_par *par)
mdelay(10);
/* set startpoint */
- /* LCD_START_LINE | (pos & 0x3F) */
write_reg(par, LCD_START_LINE);
/* select orientation BOTTOMVIEW */
write_reg(par, LCD_BOTTOMVIEW | 1);
+
/* output mode select (turns display upside-down) */
write_reg(par, LCD_SCAN_DIR | 0x00);
@@ -96,20 +87,14 @@ static int init_display(struct fbtft_par *par)
write_reg(par, LCD_BIAS | 0);
/* power control mode: all features on */
- /* LCD_POWER_CONTROL | (val&0x07) */
write_reg(par, LCD_POWER_CONTROL | 0x07);
/* set voltage regulator R/R */
- /* LCD_VOLTAGE | (val&0x07) */
write_reg(par, LCD_VOLTAGE | 0x07);
/* volume mode set */
- /* LCD_VOLUME_MODE,val&0x3f,LCD_NO_OP */
write_reg(par, LCD_VOLUME_MODE);
- /* LCD_VOLUME_MODE,val&0x3f,LCD_NO_OP */
write_reg(par, 0x09);
- /* ???? */
- /* LCD_VOLUME_MODE,val&0x3f,LCD_NO_OP */
write_reg(par, LCD_NO_OP);
/* advanced program control */
@@ -125,29 +110,18 @@ static int init_display(struct fbtft_par *par)
static void set_addr_win(struct fbtft_par *par, int xs, int ys, int xe, int ye)
{
/* goto address */
- /* LCD_PAGE_ADDRESS | ((page) & 0x1F),
- (((col)+SHIFT_ADDR_NORMAL) & 0x0F),
- LCD_COL_ADDRESS | ((((col)+SHIFT_ADDR_NORMAL)>>4) & 0x0F) */
write_reg(par, LCD_PAGE_ADDRESS);
- /* LCD_PAGE_ADDRESS | ((page) & 0x1F),
- (((col)+SHIFT_ADDR_NORMAL) & 0x0F),
- LCD_COL_ADDRESS | ((((col)+SHIFT_ADDR_NORMAL)>>4) & 0x0F) */
write_reg(par, 0x00);
- /* LCD_PAGE_ADDRESS | ((page) & 0x1F),
- (((col)+SHIFT_ADDR_NORMAL) & 0x0F),
- LCD_COL_ADDRESS | ((((col)+SHIFT_ADDR_NORMAL)>>4) & 0x0F) */
write_reg(par, LCD_COL_ADDRESS);
}
static int write_vmem(struct fbtft_par *par, size_t offset, size_t len)
{
- u16 *vmem16 = (u16 *)par->info->screen_base;
- u8 *buf = par->txbuf.buf;
+ u16 *vmem16 = (u16 *)par->info->screen_buffer;
+ u8 *buf;
int x, y, i;
int ret = 0;
- fbtft_par_dbg(DEBUG_WRITE_VMEM, par, "%s()\n", __func__);
-
for (y = 0; y < PAGES; y++) {
buf = par->txbuf.buf;
for (x = 0; x < WIDTH; x++) {
@@ -158,21 +132,13 @@ static int write_vmem(struct fbtft_par *par, size_t offset, size_t len)
1 : 0) << i;
buf++;
}
- /* LCD_PAGE_ADDRESS | ((page) & 0x1F),
- (((col)+SHIFT_ADDR_NORMAL) & 0x0F),
- LCD_COL_ADDRESS | ((((col)+SHIFT_ADDR_NORMAL)>>4) & 0x0F) */
+
write_reg(par, LCD_PAGE_ADDRESS | (u8)y);
- /* LCD_PAGE_ADDRESS | ((page) & 0x1F),
- (((col)+SHIFT_ADDR_NORMAL) & 0x0F),
- LCD_COL_ADDRESS | ((((col)+SHIFT_ADDR_NORMAL)>>4) & 0x0F) */
write_reg(par, 0x00);
- /* LCD_PAGE_ADDRESS | ((page) & 0x1F),
- (((col)+SHIFT_ADDR_NORMAL) & 0x0F),
- LCD_COL_ADDRESS | ((((col)+SHIFT_ADDR_NORMAL)>>4) & 0x0F) */
write_reg(par, LCD_COL_ADDRESS);
- gpio_set_value(par->gpio.dc, 1);
+ gpiod_set_value(par->gpio.dc, 1);
ret = par->fbtftops.write(par, par->txbuf.buf, WIDTH);
- gpio_set_value(par->gpio.dc, 0);
+ gpiod_set_value(par->gpio.dc, 0);
}
if (ret < 0)
diff --git a/drivers/staging/fbtft/fb_upd161704.c b/drivers/staging/fbtft/fb_upd161704.c
index 970b8430eccf..c680160d6380 100644
--- a/drivers/staging/fbtft/fb_upd161704.c
+++ b/drivers/staging/fbtft/fb_upd161704.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* FB driver for the uPD161704 LCD Controller
*
@@ -6,22 +7,11 @@
* Based on fb_ili9325.c by Noralf Tronnes
* Based on ili9325.c by Jeroen Domburg
* Init code from UTFT library by Henning Karlsen
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
-#include <linux/gpio.h>
#include <linux/delay.h>
#include "fbtft.h"
@@ -35,9 +25,6 @@ static int init_display(struct fbtft_par *par)
{
par->fbtftops.reset(par);
- if (par->gpio.cs != -1)
- gpio_set_value(par->gpio.cs, 0); /* Activate chip */
-
/* Initialization sequence from Lib_UTFT */
/* register reset */
diff --git a/drivers/staging/fbtft/fb_watterott.c b/drivers/staging/fbtft/fb_watterott.c
deleted file mode 100644
index 14beefefa9ac..000000000000
--- a/drivers/staging/fbtft/fb_watterott.c
+++ /dev/null
@@ -1,308 +0,0 @@
-/*
- * FB driver for the Watterott LCD Controller
- *
- * Copyright (C) 2013 Noralf Tronnes
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/gpio.h>
-#include <linux/delay.h>
-
-#include "fbtft.h"
-
-#define DRVNAME "fb_watterott"
-#define WIDTH 320
-#define HEIGHT 240
-#define FPS 5
-#define TXBUFLEN 1024
-#define DEFAULT_BRIGHTNESS 50
-
-#define CMD_VERSION 0x01
-#define CMD_LCD_LED 0x10
-#define CMD_LCD_RESET 0x11
-#define CMD_LCD_ORIENTATION 0x20
-#define CMD_LCD_DRAWIMAGE 0x27
-#define COLOR_RGB323 8
-#define COLOR_RGB332 9
-#define COLOR_RGB233 10
-#define COLOR_RGB565 16
-
-static short mode = 565;
-module_param(mode, short, 0);
-MODULE_PARM_DESC(mode, "RGB color transfer mode: 332, 565 (default)");
-
-static void write_reg8_bus8(struct fbtft_par *par, int len, ...)
-{
- va_list args;
- int i, ret;
- u8 *buf = par->buf;
-
- va_start(args, len);
- for (i = 0; i < len; i++)
- *buf++ = (u8)va_arg(args, unsigned int);
- va_end(args);
-
- fbtft_par_dbg_hex(DEBUG_WRITE_REGISTER, par,
- par->info->device, u8, par->buf, len, "%s: ", __func__);
-
- ret = par->fbtftops.write(par, par->buf, len);
- if (ret < 0) {
- dev_err(par->info->device,
- "write() failed and returned %d\n", ret);
- return;
- }
-}
-
-static int write_vmem(struct fbtft_par *par, size_t offset, size_t len)
-{
- unsigned start_line, end_line;
- u16 *vmem16 = (u16 *)(par->info->screen_base + offset);
- u16 *pos = par->txbuf.buf + 1;
- u16 *buf16 = par->txbuf.buf + 10;
- int i, j;
- int ret = 0;
-
- fbtft_par_dbg(DEBUG_WRITE_VMEM, par, "%s()\n", __func__);
-
- start_line = offset / par->info->fix.line_length;
- end_line = start_line + (len / par->info->fix.line_length) - 1;
-
- /* Set command header. pos: x, y, w, h */
- ((u8 *)par->txbuf.buf)[0] = CMD_LCD_DRAWIMAGE;
- pos[0] = 0;
- pos[2] = cpu_to_be16(par->info->var.xres);
- pos[3] = cpu_to_be16(1);
- ((u8 *)par->txbuf.buf)[9] = COLOR_RGB565;
-
- for (i = start_line; i <= end_line; i++) {
- pos[1] = cpu_to_be16(i);
- for (j = 0; j < par->info->var.xres; j++)
- buf16[j] = cpu_to_be16(*vmem16++);
- ret = par->fbtftops.write(par,
- par->txbuf.buf, 10 + par->info->fix.line_length);
- if (ret < 0)
- return ret;
- udelay(300);
- }
-
- return 0;
-}
-
-#define RGB565toRGB323(c) (((c&0xE000)>>8) | ((c&0600)>>6) | ((c&0x001C)>>2))
-#define RGB565toRGB332(c) (((c&0xE000)>>8) | ((c&0700)>>6) | ((c&0x0018)>>3))
-#define RGB565toRGB233(c) (((c&0xC000)>>8) | ((c&0700)>>5) | ((c&0x001C)>>2))
-
-static int write_vmem_8bit(struct fbtft_par *par, size_t offset, size_t len)
-{
- unsigned start_line, end_line;
- u16 *vmem16 = (u16 *)(par->info->screen_base + offset);
- u16 *pos = par->txbuf.buf + 1;
- u8 *buf8 = par->txbuf.buf + 10;
- int i, j;
- int ret = 0;
-
- fbtft_par_dbg(DEBUG_WRITE_VMEM, par, "%s()\n", __func__);
-
- start_line = offset / par->info->fix.line_length;
- end_line = start_line + (len / par->info->fix.line_length) - 1;
-
- /* Set command header. pos: x, y, w, h */
- ((u8 *)par->txbuf.buf)[0] = CMD_LCD_DRAWIMAGE;
- pos[0] = 0;
- pos[2] = cpu_to_be16(par->info->var.xres);
- pos[3] = cpu_to_be16(1);
- ((u8 *)par->txbuf.buf)[9] = COLOR_RGB332;
-
- for (i = start_line; i <= end_line; i++) {
- pos[1] = cpu_to_be16(i);
- for (j = 0; j < par->info->var.xres; j++) {
- buf8[j] = RGB565toRGB332(*vmem16);
- vmem16++;
- }
- ret = par->fbtftops.write(par,
- par->txbuf.buf, 10 + par->info->var.xres);
- if (ret < 0)
- return ret;
- udelay(700);
- }
-
- return 0;
-}
-
-static unsigned firmware_version(struct fbtft_par *par)
-{
- u8 rxbuf[4] = {0, };
-
- write_reg(par, CMD_VERSION);
- par->fbtftops.read(par, rxbuf, 4);
- if (rxbuf[1] != '.')
- return 0;
-
- return (rxbuf[0] - '0') << 8 | (rxbuf[2] - '0') << 4 | (rxbuf[3] - '0');
-}
-
-static int init_display(struct fbtft_par *par)
-{
- int ret;
- unsigned version;
- u8 save_mode;
-
- /* enable SPI interface by having CS and MOSI low during reset */
- save_mode = par->spi->mode;
- par->spi->mode |= SPI_CS_HIGH;
- ret = spi_setup(par->spi); /* set CS inactive low */
- if (ret) {
- dev_err(par->info->device, "Could not set SPI_CS_HIGH\n");
- return ret;
- }
- write_reg(par, 0x00); /* make sure mode is set */
-
- mdelay(50);
- par->fbtftops.reset(par);
- mdelay(1000);
- par->spi->mode = save_mode;
- ret = spi_setup(par->spi);
- if (ret) {
- dev_err(par->info->device, "Could not restore SPI mode\n");
- return ret;
- }
- write_reg(par, 0x00);
-
- version = firmware_version(par);
- fbtft_par_dbg(DEBUG_INIT_DISPLAY, par, "Firmware version: %x.%02x\n",
- version >> 8, version & 0xFF);
-
- if (mode == 332)
- par->fbtftops.write_vmem = write_vmem_8bit;
- return 0;
-}
-
-static void set_addr_win(struct fbtft_par *par, int xs, int ys, int xe, int ye)
-{
- /* not used on this controller */
-}
-
-static int set_var(struct fbtft_par *par)
-{
- u8 rotate;
-
- /* this controller rotates clock wise */
- switch (par->info->var.rotate) {
- case 90:
- rotate = 27;
- break;
- case 180:
- rotate = 18;
- break;
- case 270:
- rotate = 9;
- break;
- default:
- rotate = 0;
- }
- write_reg(par, CMD_LCD_ORIENTATION, rotate);
-
- return 0;
-}
-
-static int verify_gpios(struct fbtft_par *par)
-{
- if (par->gpio.reset < 0) {
- dev_err(par->info->device, "Missing 'reset' gpio. Aborting.\n");
- return -EINVAL;
- }
- return 0;
-}
-
-#ifdef CONFIG_FB_BACKLIGHT
-static int backlight_chip_update_status(struct backlight_device *bd)
-{
- struct fbtft_par *par = bl_get_data(bd);
- int brightness = bd->props.brightness;
-
- fbtft_par_dbg(DEBUG_BACKLIGHT, par,
- "%s: brightness=%d, power=%d, fb_blank=%d\n",
- __func__, bd->props.brightness, bd->props.power,
- bd->props.fb_blank);
-
- if (bd->props.power != FB_BLANK_UNBLANK)
- brightness = 0;
-
- if (bd->props.fb_blank != FB_BLANK_UNBLANK)
- brightness = 0;
-
- write_reg(par, CMD_LCD_LED, brightness);
-
- return 0;
-}
-
-static const struct backlight_ops bl_ops = {
- .update_status = backlight_chip_update_status,
-};
-
-static void register_chip_backlight(struct fbtft_par *par)
-{
- struct backlight_device *bd;
- struct backlight_properties bl_props = { 0, };
-
- fbtft_par_dbg(DEBUG_BACKLIGHT, par, "%s()\n", __func__);
-
- bl_props.type = BACKLIGHT_RAW;
- bl_props.power = FB_BLANK_POWERDOWN;
- bl_props.max_brightness = 100;
- bl_props.brightness = DEFAULT_BRIGHTNESS;
-
- bd = backlight_device_register(dev_driver_string(par->info->device),
- par->info->device, par, &bl_ops, &bl_props);
- if (IS_ERR(bd)) {
- dev_err(par->info->device,
- "cannot register backlight device (%ld)\n",
- PTR_ERR(bd));
- return;
- }
- par->info->bl_dev = bd;
-
- if (!par->fbtftops.unregister_backlight)
- par->fbtftops.unregister_backlight = fbtft_unregister_backlight;
-}
-#else
-#define register_chip_backlight NULL
-#endif
-
-static struct fbtft_display display = {
- .regwidth = 8,
- .buswidth = 8,
- .width = WIDTH,
- .height = HEIGHT,
- .fps = FPS,
- .txbuflen = TXBUFLEN,
- .fbtftops = {
- .write_register = write_reg8_bus8,
- .write_vmem = write_vmem,
- .init_display = init_display,
- .set_addr_win = set_addr_win,
- .set_var = set_var,
- .verify_gpios = verify_gpios,
- .register_backlight = register_chip_backlight,
- },
-};
-
-FBTFT_REGISTER_DRIVER(DRVNAME, "watterott,openlcd", &display);
-
-MODULE_ALIAS("spi:" DRVNAME);
-
-MODULE_DESCRIPTION("FB driver for the Watterott LCD Controller");
-MODULE_AUTHOR("Noralf Tronnes");
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/fbtft/fbtft-bus.c b/drivers/staging/fbtft/fbtft-bus.c
index 97129241cab9..30e436ff19e4 100644
--- a/drivers/staging/fbtft/fbtft-bus.c
+++ b/drivers/staging/fbtft/fbtft-bus.c
@@ -1,6 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0
#include <linux/export.h>
#include <linux/errno.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/spi/spi.h>
#include "fbtft.h"
@@ -10,40 +11,39 @@
*
*****************************************************************************/
-#define define_fbtft_write_reg(func, type, modifier) \
+#define define_fbtft_write_reg(func, buffer_type, data_type, modifier) \
void func(struct fbtft_par *par, int len, ...) \
{ \
va_list args; \
int i, ret; \
int offset = 0; \
- type *buf = (type *)par->buf; \
+ buffer_type *buf = (buffer_type *)par->buf; \
\
if (unlikely(par->debug & DEBUG_WRITE_REGISTER)) { \
va_start(args, len); \
for (i = 0; i < len; i++) { \
- buf[i] = (type)va_arg(args, unsigned int); \
+ buf[i] = modifier((data_type)va_arg(args, \
+ unsigned int)); \
} \
va_end(args); \
- fbtft_par_dbg_hex(DEBUG_WRITE_REGISTER, par, par->info->device, type, buf, len, "%s: ", __func__); \
+ fbtft_par_dbg_hex(DEBUG_WRITE_REGISTER, par, \
+ par->info->device, buffer_type, buf, len, \
+ "%s: ", __func__); \
} \
\
va_start(args, len); \
\
if (par->startbyte) { \
*(u8 *)par->buf = par->startbyte; \
- buf = (type *)(par->buf + 1); \
+ buf = (buffer_type *)(par->buf + 1); \
offset = 1; \
} \
\
- *buf = modifier((type)va_arg(args, unsigned int)); \
- if (par->gpio.dc != -1) \
- gpio_set_value(par->gpio.dc, 0); \
- ret = par->fbtftops.write(par, par->buf, sizeof(type) + offset); \
- if (ret < 0) { \
- va_end(args); \
- dev_err(par->info->device, "%s: write() failed and returned %d\n", __func__, ret); \
- return; \
- } \
+ *buf = modifier((data_type)va_arg(args, unsigned int)); \
+ ret = fbtft_write_buf_dc(par, par->buf, sizeof(data_type) + offset, \
+ 0); \
+ if (ret < 0) \
+ goto out; \
len--; \
\
if (par->startbyte) \
@@ -51,26 +51,20 @@ void func(struct fbtft_par *par, int len, ...) \
\
if (len) { \
i = len; \
- while (i--) { \
- *buf++ = modifier((type)va_arg(args, unsigned int)); \
- } \
- if (par->gpio.dc != -1) \
- gpio_set_value(par->gpio.dc, 1); \
- ret = par->fbtftops.write(par, par->buf, \
- len * (sizeof(type) + offset)); \
- if (ret < 0) { \
- va_end(args); \
- dev_err(par->info->device, "%s: write() failed and returned %d\n", __func__, ret); \
- return; \
- } \
+ while (i--) \
+ *buf++ = modifier((data_type)va_arg(args, \
+ unsigned int)); \
+ fbtft_write_buf_dc(par, par->buf, \
+ len * (sizeof(data_type) + offset), 1); \
} \
+out: \
va_end(args); \
} \
EXPORT_SYMBOL(func);
-define_fbtft_write_reg(fbtft_write_reg8_bus8, u8, )
-define_fbtft_write_reg(fbtft_write_reg16_bus8, u16, cpu_to_be16)
-define_fbtft_write_reg(fbtft_write_reg16_bus16, u16, )
+define_fbtft_write_reg(fbtft_write_reg8_bus8, u8, u8, )
+define_fbtft_write_reg(fbtft_write_reg16_bus8, __be16, u16, cpu_to_be16)
+define_fbtft_write_reg(fbtft_write_reg16_bus16, u16, u16, )
void fbtft_write_reg8_bus9(struct fbtft_par *par, int len, ...)
{
@@ -85,14 +79,16 @@ void fbtft_write_reg8_bus9(struct fbtft_par *par, int len, ...)
*(((u8 *)buf) + i) = (u8)va_arg(args, unsigned int);
va_end(args);
fbtft_par_dbg_hex(DEBUG_WRITE_REGISTER, par,
- par->info->device, u8, buf, len, "%s: ", __func__);
+ par->info->device, u8, buf, len, "%s: ",
+ __func__);
}
if (len <= 0)
return;
if (par->spi && (par->spi->bits_per_word == 8)) {
/* we're emulating 9-bit, pad start of buffer with no-ops
- (assuming here that zero is a no-op) */
+ * (assuming here that zero is a no-op)
+ */
pad = (len % 4) ? 4 - (len % 4) : 0;
for (i = 0; i < pad; i++)
*buf++ = 0x000;
@@ -125,7 +121,7 @@ EXPORT_SYMBOL(fbtft_write_reg8_bus9);
int fbtft_write_vmem16_bus8(struct fbtft_par *par, size_t offset, size_t len)
{
u16 *vmem16;
- u16 *txbuf16 = (u16 *)par->txbuf.buf;
+ __be16 *txbuf16 = par->txbuf.buf;
size_t remain;
size_t to_copy;
size_t tx_array_size;
@@ -133,14 +129,10 @@ int fbtft_write_vmem16_bus8(struct fbtft_par *par, size_t offset, size_t len)
int ret = 0;
size_t startbyte_size = 0;
- fbtft_par_dbg(DEBUG_WRITE_VMEM, par, "%s(offset=%zu, len=%zu)\n",
- __func__, offset, len);
-
remain = len / 2;
- vmem16 = (u16 *)(par->info->screen_base + offset);
+ vmem16 = (u16 *)(par->info->screen_buffer + offset);
- if (par->gpio.dc != -1)
- gpio_set_value(par->gpio.dc, 1);
+ gpiod_set_value(par->gpio.dc, 1);
/* non buffered write */
if (!par->txbuf.buf)
@@ -150,16 +142,16 @@ int fbtft_write_vmem16_bus8(struct fbtft_par *par, size_t offset, size_t len)
tx_array_size = par->txbuf.len / 2;
if (par->startbyte) {
- txbuf16 = (u16 *)(par->txbuf.buf + 1);
+ txbuf16 = par->txbuf.buf + 1;
tx_array_size -= 2;
*(u8 *)(par->txbuf.buf) = par->startbyte | 0x2;
startbyte_size = 1;
}
while (remain) {
- to_copy = remain > tx_array_size ? tx_array_size : remain;
- dev_dbg(par->info->device, " to_copy=%zu, remain=%zu\n",
- to_copy, remain - to_copy);
+ to_copy = min(tx_array_size, remain);
+ dev_dbg(par->info->device, "to_copy=%zu, remain=%zu\n",
+ to_copy, remain - to_copy);
for (i = 0; i < to_copy; i++)
txbuf16[i] = cpu_to_be16(vmem16[i]);
@@ -179,7 +171,7 @@ EXPORT_SYMBOL(fbtft_write_vmem16_bus8);
/* 16 bit pixel over 9-bit SPI bus: dc + high byte, dc + low byte */
int fbtft_write_vmem16_bus9(struct fbtft_par *par, size_t offset, size_t len)
{
- u8 __iomem *vmem8;
+ u8 *vmem8;
u16 *txbuf16 = par->txbuf.buf;
size_t remain;
size_t to_copy;
@@ -187,32 +179,29 @@ int fbtft_write_vmem16_bus9(struct fbtft_par *par, size_t offset, size_t len)
int i;
int ret = 0;
- fbtft_par_dbg(DEBUG_WRITE_VMEM, par, "%s(offset=%zu, len=%zu)\n",
- __func__, offset, len);
-
if (!par->txbuf.buf) {
dev_err(par->info->device, "%s: txbuf.buf is NULL\n", __func__);
return -1;
}
remain = len;
- vmem8 = par->info->screen_base + offset;
+ vmem8 = par->info->screen_buffer + offset;
tx_array_size = par->txbuf.len / 2;
while (remain) {
- to_copy = remain > tx_array_size ? tx_array_size : remain;
- dev_dbg(par->info->device, " to_copy=%zu, remain=%zu\n",
- to_copy, remain - to_copy);
+ to_copy = min(tx_array_size, remain);
+ dev_dbg(par->info->device, "to_copy=%zu, remain=%zu\n",
+ to_copy, remain - to_copy);
#ifdef __LITTLE_ENDIAN
for (i = 0; i < to_copy; i += 2) {
- txbuf16[i] = 0x0100 | ioread8(vmem8 + i + 1);
- txbuf16[i + 1] = 0x0100 | ioread8(vmem8 + i);
+ txbuf16[i] = 0x0100 | vmem8[i + 1];
+ txbuf16[i + 1] = 0x0100 | vmem8[i];
}
#else
for (i = 0; i < to_copy; i++)
- txbuf16[i] = 0x0100 | ioread8(vmem8 + i);
+ txbuf16[i] = 0x0100 | vmem8[i];
#endif
vmem8 = vmem8 + to_copy;
ret = par->fbtftops.write(par, par->txbuf.buf, to_copy * 2);
@@ -237,15 +226,9 @@ int fbtft_write_vmem16_bus16(struct fbtft_par *par, size_t offset, size_t len)
{
u16 *vmem16;
- fbtft_par_dbg(DEBUG_WRITE_VMEM, par, "%s(offset=%zu, len=%zu)\n",
- __func__, offset, len);
-
- vmem16 = (u16 *)(par->info->screen_base + offset);
-
- if (par->gpio.dc != -1)
- gpio_set_value(par->gpio.dc, 1);
+ vmem16 = (u16 *)(par->info->screen_buffer + offset);
/* no need for buffered write with 16-bit bus */
- return par->fbtftops.write(par, vmem16, len);
+ return fbtft_write_buf_dc(par, vmem16, len, 1);
}
EXPORT_SYMBOL(fbtft_write_vmem16_bus16);
diff --git a/drivers/staging/fbtft/fbtft-core.c b/drivers/staging/fbtft/fbtft-core.c
index 4691bccaf7c4..8a5ccc8ae0a1 100644
--- a/drivers/staging/fbtft/fbtft-core.c
+++ b/drivers/staging/fbtft/fbtft-core.c
@@ -1,19 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2013 Noralf Tronnes
*
* This driver is inspired by:
* st7735fb.c, Copyright (C) 2011, Matt Porter
* broadsheetfb.c, Copyright (C) 2008, Jaya Kumar
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/module.h>
@@ -25,32 +16,40 @@
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/fb.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/spi/spi.h>
#include <linux/delay.h>
#include <linux/uaccess.h>
#include <linux/backlight.h>
#include <linux/platform_device.h>
+#include <linux/property.h>
#include <linux/spinlock.h>
-#include <linux/dma-mapping.h>
-#include <linux/of.h>
-#include <linux/of_gpio.h>
+
+#include <video/mipi_display.h>
#include "fbtft.h"
#include "internal.h"
static unsigned long debug;
-module_param(debug, ulong, 0);
+module_param(debug, ulong, 0000);
MODULE_PARM_DESC(debug, "override device debug level");
-#ifdef CONFIG_HAS_DMA
-static bool dma = true;
-module_param(dma, bool, 0);
-MODULE_PARM_DESC(dma, "Use DMA buffer");
-#endif
+int fbtft_write_buf_dc(struct fbtft_par *par, void *buf, size_t len, int dc)
+{
+ int ret;
+
+ gpiod_set_value(par->gpio.dc, dc);
+
+ ret = par->fbtftops.write(par, buf, len);
+ if (ret < 0)
+ dev_err(par->info->device,
+ "write() failed and returned %d\n", ret);
+ return ret;
+}
+EXPORT_SYMBOL(fbtft_write_buf_dc);
void fbtft_dbg_hex(const struct device *dev, int groupsize,
- void *buf, size_t len, const char *fmt, ...)
+ const void *buf, size_t len, const char *fmt, ...)
{
va_list args;
static char textbuf[512];
@@ -62,7 +61,7 @@ void fbtft_dbg_hex(const struct device *dev, int groupsize,
va_end(args);
hex_dump_to_buffer(buf, len, 32, groupsize, text + text_len,
- 512 - text_len, false);
+ 512 - text_len, false);
if (len > 32)
dev_info(dev, "%s ...\n", text);
@@ -71,190 +70,76 @@ void fbtft_dbg_hex(const struct device *dev, int groupsize,
}
EXPORT_SYMBOL(fbtft_dbg_hex);
-static unsigned long fbtft_request_gpios_match(struct fbtft_par *par,
- const struct fbtft_gpio *gpio)
-{
- int ret;
- long val;
-
- fbtft_par_dbg(DEBUG_REQUEST_GPIOS_MATCH, par, "%s('%s')\n",
- __func__, gpio->name);
-
- if (strcasecmp(gpio->name, "reset") == 0) {
- par->gpio.reset = gpio->gpio;
- return GPIOF_OUT_INIT_HIGH;
- } else if (strcasecmp(gpio->name, "dc") == 0) {
- par->gpio.dc = gpio->gpio;
- return GPIOF_OUT_INIT_LOW;
- } else if (strcasecmp(gpio->name, "cs") == 0) {
- par->gpio.cs = gpio->gpio;
- return GPIOF_OUT_INIT_HIGH;
- } else if (strcasecmp(gpio->name, "wr") == 0) {
- par->gpio.wr = gpio->gpio;
- return GPIOF_OUT_INIT_HIGH;
- } else if (strcasecmp(gpio->name, "rd") == 0) {
- par->gpio.rd = gpio->gpio;
- return GPIOF_OUT_INIT_HIGH;
- } else if (strcasecmp(gpio->name, "latch") == 0) {
- par->gpio.latch = gpio->gpio;
- return GPIOF_OUT_INIT_LOW;
- } else if (gpio->name[0] == 'd' && gpio->name[1] == 'b') {
- ret = kstrtol(&gpio->name[2], 10, &val);
- if (ret == 0 && val < 16) {
- par->gpio.db[val] = gpio->gpio;
- return GPIOF_OUT_INIT_LOW;
- }
- } else if (strcasecmp(gpio->name, "led") == 0) {
- par->gpio.led[0] = gpio->gpio;
- return GPIOF_OUT_INIT_LOW;
- } else if (strcasecmp(gpio->name, "led_") == 0) {
- par->gpio.led[0] = gpio->gpio;
- return GPIOF_OUT_INIT_HIGH;
- }
-
- return FBTFT_GPIO_NO_MATCH;
-}
-
-static int fbtft_request_gpios(struct fbtft_par *par)
-{
- struct fbtft_platform_data *pdata = par->pdata;
- const struct fbtft_gpio *gpio;
- unsigned long flags;
- int ret;
-
- if (!(pdata && pdata->gpios))
- return 0;
-
- gpio = pdata->gpios;
- while (gpio->name[0]) {
- flags = FBTFT_GPIO_NO_MATCH;
- /* if driver provides match function, try it first,
- if no match use our own */
- if (par->fbtftops.request_gpios_match)
- flags = par->fbtftops.request_gpios_match(par, gpio);
- if (flags == FBTFT_GPIO_NO_MATCH)
- flags = fbtft_request_gpios_match(par, gpio);
- if (flags != FBTFT_GPIO_NO_MATCH) {
- ret = devm_gpio_request_one(par->info->device,
- gpio->gpio, flags,
- par->info->device->driver->name);
- if (ret < 0) {
- dev_err(par->info->device,
- "%s: gpio_request_one('%s'=%d) failed with %d\n",
- __func__, gpio->name,
- gpio->gpio, ret);
- return ret;
- }
- fbtft_par_dbg(DEBUG_REQUEST_GPIOS, par,
- "%s: '%s' = GPIO%d\n",
- __func__, gpio->name, gpio->gpio);
- }
- gpio++;
- }
-
- return 0;
-}
-
-#ifdef CONFIG_OF
static int fbtft_request_one_gpio(struct fbtft_par *par,
- const char *name, int index, int *gpiop)
+ const char *name, int index,
+ struct gpio_desc **gpiop)
{
struct device *dev = par->info->device;
- struct device_node *node = dev->of_node;
- int gpio, flags, ret = 0;
- enum of_gpio_flags of_flags;
- if (of_find_property(node, name, NULL)) {
- gpio = of_get_named_gpio_flags(node, name, index, &of_flags);
- if (gpio == -ENOENT)
- return 0;
- if (gpio == -EPROBE_DEFER)
- return gpio;
- if (gpio < 0) {
- dev_err(dev,
- "failed to get '%s' from DT\n", name);
- return gpio;
- }
+ *gpiop = devm_gpiod_get_index_optional(dev, name, index,
+ GPIOD_OUT_LOW);
+ if (IS_ERR(*gpiop))
+ return dev_err_probe(dev, PTR_ERR(*gpiop), "Failed to request %s GPIO\n", name);
- /* active low translates to initially low */
- flags = (of_flags & OF_GPIO_ACTIVE_LOW) ? GPIOF_OUT_INIT_LOW :
- GPIOF_OUT_INIT_HIGH;
- ret = devm_gpio_request_one(dev, gpio, flags,
- dev->driver->name);
- if (ret) {
- dev_err(dev,
- "gpio_request_one('%s'=%d) failed with %d\n",
- name, gpio, ret);
- return ret;
- }
- if (gpiop)
- *gpiop = gpio;
- fbtft_par_dbg(DEBUG_REQUEST_GPIOS, par, "%s: '%s' = GPIO%d\n",
- __func__, name, gpio);
- }
+ fbtft_par_dbg(DEBUG_REQUEST_GPIOS, par, "%s: '%s' GPIO\n",
+ __func__, name);
- return ret;
+ return 0;
}
-static int fbtft_request_gpios_dt(struct fbtft_par *par)
+static int fbtft_request_gpios(struct fbtft_par *par)
{
int i;
int ret;
- if (!par->info->device->of_node)
- return -EINVAL;
-
- ret = fbtft_request_one_gpio(par, "reset-gpios", 0, &par->gpio.reset);
+ ret = fbtft_request_one_gpio(par, "reset", 0, &par->gpio.reset);
if (ret)
return ret;
- ret = fbtft_request_one_gpio(par, "dc-gpios", 0, &par->gpio.dc);
+ ret = fbtft_request_one_gpio(par, "dc", 0, &par->gpio.dc);
if (ret)
return ret;
- ret = fbtft_request_one_gpio(par, "rd-gpios", 0, &par->gpio.rd);
+ ret = fbtft_request_one_gpio(par, "rd", 0, &par->gpio.rd);
if (ret)
return ret;
- ret = fbtft_request_one_gpio(par, "wr-gpios", 0, &par->gpio.wr);
+ ret = fbtft_request_one_gpio(par, "wr", 0, &par->gpio.wr);
if (ret)
return ret;
- ret = fbtft_request_one_gpio(par, "cs-gpios", 0, &par->gpio.cs);
+ ret = fbtft_request_one_gpio(par, "cs", 0, &par->gpio.cs);
if (ret)
return ret;
- ret = fbtft_request_one_gpio(par, "latch-gpios", 0, &par->gpio.latch);
+ ret = fbtft_request_one_gpio(par, "latch", 0, &par->gpio.latch);
if (ret)
return ret;
for (i = 0; i < 16; i++) {
- ret = fbtft_request_one_gpio(par, "db-gpios", i,
- &par->gpio.db[i]);
+ ret = fbtft_request_one_gpio(par, "db", i,
+ &par->gpio.db[i]);
if (ret)
return ret;
- ret = fbtft_request_one_gpio(par, "led-gpios", i,
- &par->gpio.led[i]);
+ ret = fbtft_request_one_gpio(par, "led", i,
+ &par->gpio.led[i]);
if (ret)
return ret;
- ret = fbtft_request_one_gpio(par, "aux-gpios", i,
- &par->gpio.aux[i]);
+ ret = fbtft_request_one_gpio(par, "aux", i,
+ &par->gpio.aux[i]);
if (ret)
return ret;
}
return 0;
}
-#endif
-#ifdef CONFIG_FB_BACKLIGHT
static int fbtft_backlight_update_status(struct backlight_device *bd)
{
struct fbtft_par *par = bl_get_data(bd);
- bool polarity = !!(bd->props.state & BL_CORE_DRIVER1);
+ bool polarity = par->polarity;
- fbtft_par_dbg(DEBUG_BACKLIGHT, par,
- "%s: polarity=%d, power=%d, fb_blank=%d\n",
- __func__, polarity, bd->props.power, bd->props.fb_blank);
+ fbtft_par_dbg(DEBUG_BACKLIGHT, par, "%s: polarity=%d, power=%d\n", __func__,
+ polarity, bd->props.power);
- if ((bd->props.power == FB_BLANK_UNBLANK) && (bd->props.fb_blank == FB_BLANK_UNBLANK))
- gpio_set_value(par->gpio.led[0], polarity);
+ if (!backlight_is_blank(bd))
+ gpiod_set_value(par->gpio.led[0], polarity);
else
- gpio_set_value(par->gpio.led[0], !polarity);
+ gpiod_set_value(par->gpio.led[0], !polarity);
return 0;
}
@@ -266,15 +151,14 @@ static int fbtft_backlight_get_brightness(struct backlight_device *bd)
void fbtft_unregister_backlight(struct fbtft_par *par)
{
- fbtft_par_dbg(DEBUG_BACKLIGHT, par, "%s()\n", __func__);
-
if (par->info->bl_dev) {
- par->info->bl_dev->props.power = FB_BLANK_POWERDOWN;
+ par->info->bl_dev->props.power = BACKLIGHT_POWER_OFF;
backlight_update_status(par->info->bl_dev);
backlight_device_unregister(par->info->bl_dev);
par->info->bl_dev = NULL;
}
}
+EXPORT_SYMBOL(fbtft_unregister_backlight);
static const struct backlight_ops fbtft_bl_ops = {
.get_brightness = fbtft_backlight_get_brightness,
@@ -286,22 +170,21 @@ void fbtft_register_backlight(struct fbtft_par *par)
struct backlight_device *bd;
struct backlight_properties bl_props = { 0, };
- fbtft_par_dbg(DEBUG_BACKLIGHT, par, "%s()\n", __func__);
-
- if (par->gpio.led[0] == -1) {
+ if (!par->gpio.led[0]) {
fbtft_par_dbg(DEBUG_BACKLIGHT, par,
- "%s(): led pin not set, exiting.\n", __func__);
+ "%s(): led pin not set, exiting.\n", __func__);
return;
}
bl_props.type = BACKLIGHT_RAW;
/* Assume backlight is off, get polarity from current state of pin */
- bl_props.power = FB_BLANK_POWERDOWN;
- if (!gpio_get_value(par->gpio.led[0]))
- bl_props.state |= BL_CORE_DRIVER1;
+ bl_props.power = BACKLIGHT_POWER_OFF;
+ if (!gpiod_get_value(par->gpio.led[0]))
+ par->polarity = true;
bd = backlight_device_register(dev_driver_string(par->info->device),
- par->info->device, par, &fbtft_bl_ops, &bl_props);
+ par->info->device, par,
+ &fbtft_bl_ops, &bl_props);
if (IS_ERR(bd)) {
dev_err(par->info->device,
"cannot register backlight device (%ld)\n",
@@ -313,53 +196,48 @@ void fbtft_register_backlight(struct fbtft_par *par)
if (!par->fbtftops.unregister_backlight)
par->fbtftops.unregister_backlight = fbtft_unregister_backlight;
}
-#else
-void fbtft_register_backlight(struct fbtft_par *par) { };
-void fbtft_unregister_backlight(struct fbtft_par *par) { };
-#endif
EXPORT_SYMBOL(fbtft_register_backlight);
-EXPORT_SYMBOL(fbtft_unregister_backlight);
static void fbtft_set_addr_win(struct fbtft_par *par, int xs, int ys, int xe,
int ye)
{
- /* Column address set */
- write_reg(par, 0x2A,
- (xs >> 8) & 0xFF, xs & 0xFF, (xe >> 8) & 0xFF, xe & 0xFF);
+ write_reg(par, MIPI_DCS_SET_COLUMN_ADDRESS,
+ (xs >> 8) & 0xFF, xs & 0xFF, (xe >> 8) & 0xFF, xe & 0xFF);
- /* Row address set */
- write_reg(par, 0x2B,
- (ys >> 8) & 0xFF, ys & 0xFF, (ye >> 8) & 0xFF, ye & 0xFF);
+ write_reg(par, MIPI_DCS_SET_PAGE_ADDRESS,
+ (ys >> 8) & 0xFF, ys & 0xFF, (ye >> 8) & 0xFF, ye & 0xFF);
- /* Memory write */
- write_reg(par, 0x2C);
+ write_reg(par, MIPI_DCS_WRITE_MEMORY_START);
}
static void fbtft_reset(struct fbtft_par *par)
{
- if (par->gpio.reset == -1)
+ if (!par->gpio.reset)
return;
- fbtft_par_dbg(DEBUG_RESET, par, "%s()\n", __func__);
- gpio_set_value(par->gpio.reset, 0);
- udelay(20);
- gpio_set_value(par->gpio.reset, 1);
- mdelay(120);
+
+ gpiod_set_value_cansleep(par->gpio.reset, 1);
+ usleep_range(20, 40);
+ gpiod_set_value_cansleep(par->gpio.reset, 0);
+ msleep(120);
+
+ gpiod_set_value_cansleep(par->gpio.cs, 1); /* Activate chip */
}
-static void fbtft_update_display(struct fbtft_par *par, unsigned start_line,
- unsigned end_line)
+static void fbtft_update_display(struct fbtft_par *par, unsigned int start_line,
+ unsigned int end_line)
{
size_t offset, len;
- struct timespec ts_start, ts_end, ts_fps, ts_duration;
- long fps_ms, fps_us, duration_ms, duration_us;
+ ktime_t ts_start, ts_end;
long fps, throughput;
bool timeit = false;
int ret = 0;
- if (unlikely(par->debug & (DEBUG_TIME_FIRST_UPDATE | DEBUG_TIME_EACH_UPDATE))) {
+ if (unlikely(par->debug & (DEBUG_TIME_FIRST_UPDATE |
+ DEBUG_TIME_EACH_UPDATE))) {
if ((par->debug & DEBUG_TIME_EACH_UPDATE) ||
- ((par->debug & DEBUG_TIME_FIRST_UPDATE) && !par->first_update_done)) {
- getnstimeofday(&ts_start);
+ ((par->debug & DEBUG_TIME_FIRST_UPDATE) &&
+ !par->first_update_done)) {
+ ts_start = ktime_get();
timeit = true;
}
}
@@ -367,21 +245,23 @@ static void fbtft_update_display(struct fbtft_par *par, unsigned start_line,
/* Sanity checks */
if (start_line > end_line) {
dev_warn(par->info->device,
- "%s: start_line=%u is larger than end_line=%u. Shouldn't happen, will do full display update\n",
- __func__, start_line, end_line);
+ "%s: start_line=%u is larger than end_line=%u. Shouldn't happen, will do full display update\n",
+ __func__, start_line, end_line);
start_line = 0;
end_line = par->info->var.yres - 1;
}
- if (start_line > par->info->var.yres - 1 || end_line > par->info->var.yres - 1) {
+ if (start_line > par->info->var.yres - 1 ||
+ end_line > par->info->var.yres - 1) {
dev_warn(par->info->device,
- "%s: start_line=%u or end_line=%u is larger than max=%d. Shouldn't happen, will do full display update\n",
- __func__, start_line, end_line, par->info->var.yres - 1);
+ "%s: start_line=%u or end_line=%u is larger than max=%d. Shouldn't happen, will do full display update\n",
+ __func__, start_line,
+ end_line, par->info->var.yres - 1);
start_line = 0;
end_line = par->info->var.yres - 1;
}
fbtft_par_dbg(DEBUG_UPDATE_DISPLAY, par, "%s(start_line=%u, end_line=%u)\n",
- __func__, start_line, end_line);
+ __func__, start_line, end_line);
if (par->fbtftops.set_addr_win)
par->fbtftops.set_addr_win(par, 0, start_line,
@@ -396,30 +276,21 @@ static void fbtft_update_display(struct fbtft_par *par, unsigned start_line,
__func__);
if (unlikely(timeit)) {
- getnstimeofday(&ts_end);
- if (par->update_time.tv_nsec == 0 && par->update_time.tv_sec == 0) {
- par->update_time.tv_sec = ts_start.tv_sec;
- par->update_time.tv_nsec = ts_start.tv_nsec;
- }
- ts_fps = timespec_sub(ts_start, par->update_time);
- par->update_time.tv_sec = ts_start.tv_sec;
- par->update_time.tv_nsec = ts_start.tv_nsec;
- fps_ms = (ts_fps.tv_sec * 1000) + ((ts_fps.tv_nsec / 1000000) % 1000);
- fps_us = (ts_fps.tv_nsec / 1000) % 1000;
- fps = fps_ms * 1000 + fps_us;
+ ts_end = ktime_get();
+ if (!ktime_to_ns(par->update_time))
+ par->update_time = ts_start;
+
+ fps = ktime_us_delta(ts_start, par->update_time);
+ par->update_time = ts_start;
fps = fps ? 1000000 / fps : 0;
- ts_duration = timespec_sub(ts_end, ts_start);
- duration_ms = (ts_duration.tv_sec * 1000) + ((ts_duration.tv_nsec / 1000000) % 1000);
- duration_us = (ts_duration.tv_nsec / 1000) % 1000;
- throughput = duration_ms * 1000 + duration_us;
+ throughput = ktime_us_delta(ts_end, ts_start);
throughput = throughput ? (len * 1000) / throughput : 0;
throughput = throughput * 1000 / 1024;
dev_info(par->info->device,
- "Display update: %ld kB/s (%ld.%.3ld ms), fps=%ld (%ld.%.3ld ms)\n",
- throughput, duration_ms, duration_us,
- fps, fps_ms, fps_us);
+ "Display update: %ld kB/s, fps=%ld\n",
+ throughput, fps);
par->first_update_done = true;
}
}
@@ -432,7 +303,7 @@ static void fbtft_mkdirty(struct fb_info *info, int y, int height)
/* special case, needed ? */
if (y == -1) {
y = 0;
- height = info->var.yres - 1;
+ height = info->var.yres;
}
/* Mark display lines/area as dirty */
@@ -447,14 +318,12 @@ static void fbtft_mkdirty(struct fb_info *info, int y, int height)
schedule_delayed_work(&info->deferred_work, fbdefio->delay);
}
-static void fbtft_deferred_io(struct fb_info *info, struct list_head *pagelist)
+static void fbtft_deferred_io(struct fb_info *info, struct list_head *pagereflist)
{
struct fbtft_par *par = info->par;
- unsigned dirty_lines_start, dirty_lines_end;
- struct page *page;
- unsigned long index;
- unsigned y_low = 0, y_high = 0;
- int count = 0;
+ unsigned int dirty_lines_start, dirty_lines_end;
+ struct fb_deferred_io_pageref *pageref;
+ unsigned int y_low = 0, y_high = 0;
spin_lock(&par->dirty_lock);
dirty_lines_start = par->dirty_lines_start;
@@ -465,14 +334,10 @@ static void fbtft_deferred_io(struct fb_info *info, struct list_head *pagelist)
spin_unlock(&par->dirty_lock);
/* Mark display lines as dirty */
- list_for_each_entry(page, pagelist, lru) {
- count++;
- index = page->index << PAGE_SHIFT;
- y_low = index / info->fix.line_length;
- y_high = (index + PAGE_SIZE - 1) / info->fix.line_length;
- dev_dbg(info->device,
- "page->index=%lu y_low=%d y_high=%d\n",
- page->index, y_low, y_high);
+ list_for_each_entry(pageref, pagereflist, list) {
+ y_low = pageref->offset / info->fix.line_length;
+ y_high = (pageref->offset + PAGE_SIZE - 1) / info->fix.line_length;
+ dev_dbg(info->device, "y_low=%d y_high=%d\n", y_low, y_high);
if (y_high > info->var.yres - 1)
y_high = info->var.yres - 1;
if (y_low < dirty_lines_start)
@@ -485,75 +350,19 @@ static void fbtft_deferred_io(struct fb_info *info, struct list_head *pagelist)
dirty_lines_start, dirty_lines_end);
}
-static void fbtft_fb_fillrect(struct fb_info *info,
- const struct fb_fillrect *rect)
-{
- struct fbtft_par *par = info->par;
-
- dev_dbg(info->dev,
- "%s: dx=%d, dy=%d, width=%d, height=%d\n",
- __func__, rect->dx, rect->dy, rect->width, rect->height);
- sys_fillrect(info, rect);
-
- par->fbtftops.mkdirty(info, rect->dy, rect->height);
-}
-
-static void fbtft_fb_copyarea(struct fb_info *info,
- const struct fb_copyarea *area)
-{
- struct fbtft_par *par = info->par;
-
- dev_dbg(info->dev,
- "%s: dx=%d, dy=%d, width=%d, height=%d\n",
- __func__, area->dx, area->dy, area->width, area->height);
- sys_copyarea(info, area);
-
- par->fbtftops.mkdirty(info, area->dy, area->height);
-}
-
-static void fbtft_fb_imageblit(struct fb_info *info,
- const struct fb_image *image)
-{
- struct fbtft_par *par = info->par;
-
- dev_dbg(info->dev,
- "%s: dx=%d, dy=%d, width=%d, height=%d\n",
- __func__, image->dx, image->dy, image->width, image->height);
- sys_imageblit(info, image);
-
- par->fbtftops.mkdirty(info, image->dy, image->height);
-}
-
-static ssize_t fbtft_fb_write(struct fb_info *info, const char __user *buf,
- size_t count, loff_t *ppos)
-{
- struct fbtft_par *par = info->par;
- ssize_t res;
-
- dev_dbg(info->dev,
- "%s: count=%zd, ppos=%llu\n", __func__, count, *ppos);
- res = fb_sys_write(info, buf, count, ppos);
-
- /* TODO: only mark changed area
- update all for now */
- par->fbtftops.mkdirty(info, -1, 0);
-
- return res;
-}
-
/* from pxafb.c */
-static unsigned int chan_to_field(unsigned chan, struct fb_bitfield *bf)
+static unsigned int chan_to_field(unsigned int chan, struct fb_bitfield *bf)
{
chan &= 0xffff;
chan >>= 16 - bf->length;
return chan << bf->offset;
}
-static int fbtft_fb_setcolreg(unsigned regno, unsigned red, unsigned green,
- unsigned blue, unsigned transp,
- struct fb_info *info)
+static int fbtft_fb_setcolreg(unsigned int regno, unsigned int red,
+ unsigned int green, unsigned int blue,
+ unsigned int transp, struct fb_info *info)
{
- unsigned val;
+ unsigned int val;
int ret = 1;
dev_dbg(info->dev,
@@ -573,7 +382,6 @@ static int fbtft_fb_setcolreg(unsigned regno, unsigned red, unsigned green,
ret = 0;
}
break;
-
}
return ret;
}
@@ -603,6 +411,32 @@ static int fbtft_fb_blank(int blank, struct fb_info *info)
return ret;
}
+static void fbtft_ops_damage_range(struct fb_info *info, off_t off, size_t len)
+{
+ struct fbtft_par *par = info->par;
+
+ /* TODO: only mark changed area update all for now */
+ par->fbtftops.mkdirty(info, -1, 0);
+}
+
+static void fbtft_ops_damage_area(struct fb_info *info, u32 x, u32 y, u32 width, u32 height)
+{
+ struct fbtft_par *par = info->par;
+
+ par->fbtftops.mkdirty(info, y, height);
+}
+
+FB_GEN_DEFAULT_DEFERRED_SYSMEM_OPS(fbtft_ops,
+ fbtft_ops_damage_range,
+ fbtft_ops_damage_area)
+
+static const struct fb_ops fbtft_ops = {
+ .owner = THIS_MODULE,
+ FB_DEFAULT_DEFERRED_OPS(fbtft_ops),
+ .fb_setcolreg = fbtft_fb_setcolreg,
+ .fb_blank = fbtft_fb_blank,
+};
+
static void fbtft_merge_fbtftops(struct fbtft_ops *dst, struct fbtft_ops *src)
{
if (src->write)
@@ -646,11 +480,11 @@ static void fbtft_merge_fbtftops(struct fbtft_ops *dst, struct fbtft_ops *src)
*
* @display: pointer to structure describing the display
* @dev: pointer to the device for this fb, this can be NULL
+ * @pdata: platform data for the display in use
*
* Creates a new frame buffer info structure.
*
* Also creates and populates the following structures:
- * info->fbops
* info->fbdefio
* info->pseudo_palette
* par->fbtftops
@@ -665,23 +499,23 @@ struct fb_info *fbtft_framebuffer_alloc(struct fbtft_display *display,
{
struct fb_info *info;
struct fbtft_par *par;
- struct fb_ops *fbops = NULL;
struct fb_deferred_io *fbdefio = NULL;
u8 *vmem = NULL;
void *txbuf = NULL;
void *buf = NULL;
- unsigned width;
- unsigned height;
+ unsigned int width;
+ unsigned int height;
int txbuflen = display->txbuflen;
- unsigned bpp = display->bpp;
- unsigned fps = display->fps;
- int vmem_size, i;
- int *init_sequence = display->init_sequence;
+ unsigned int bpp = display->bpp;
+ unsigned int fps = display->fps;
+ int vmem_size;
+ const s16 *init_sequence = display->init_sequence;
char *gamma = display->gamma;
- unsigned long *gamma_curves = NULL;
+ u32 *gamma_curves = NULL;
/* sanity check */
- if (display->gamma_num * display->gamma_len > FBTFT_GAMMA_MAX_VALUES_TOTAL) {
+ if (display->gamma_num * display->gamma_len >
+ FBTFT_GAMMA_MAX_VALUES_TOTAL) {
dev_err(dev, "FBTFT_GAMMA_MAX_VALUES_TOTAL=%d is exceeded\n",
FBTFT_GAMMA_MAX_VALUES_TOTAL);
return NULL;
@@ -734,52 +568,42 @@ struct fb_info *fbtft_framebuffer_alloc(struct fbtft_display *display,
height = display->height;
}
- vmem_size = display->width * display->height * bpp / 8;
- vmem = vzalloc(vmem_size);
- if (!vmem)
- goto alloc_fail;
-
- fbops = devm_kzalloc(dev, sizeof(struct fb_ops), GFP_KERNEL);
- if (!fbops)
- goto alloc_fail;
-
fbdefio = devm_kzalloc(dev, sizeof(struct fb_deferred_io), GFP_KERNEL);
if (!fbdefio)
- goto alloc_fail;
+ return NULL;
buf = devm_kzalloc(dev, 128, GFP_KERNEL);
if (!buf)
- goto alloc_fail;
+ return NULL;
if (display->gamma_num && display->gamma_len) {
- gamma_curves = devm_kzalloc(dev, display->gamma_num * display->gamma_len * sizeof(gamma_curves[0]),
- GFP_KERNEL);
+ gamma_curves = devm_kcalloc(dev,
+ display->gamma_num *
+ display->gamma_len,
+ sizeof(gamma_curves[0]),
+ GFP_KERNEL);
if (!gamma_curves)
- goto alloc_fail;
+ return NULL;
}
info = framebuffer_alloc(sizeof(struct fbtft_par), dev);
if (!info)
- goto alloc_fail;
+ return NULL;
- info->screen_base = (u8 __force __iomem *)vmem;
- info->fbops = fbops;
- info->fbdefio = fbdefio;
+ vmem_size = display->width * display->height * bpp / 8;
+ vmem = vzalloc(vmem_size);
+ if (!vmem)
+ goto release_framebuf;
- fbops->owner = dev->driver->owner;
- fbops->fb_read = fb_sys_read;
- fbops->fb_write = fbtft_fb_write;
- fbops->fb_fillrect = fbtft_fb_fillrect;
- fbops->fb_copyarea = fbtft_fb_copyarea;
- fbops->fb_imageblit = fbtft_fb_imageblit;
- fbops->fb_setcolreg = fbtft_fb_setcolreg;
- fbops->fb_blank = fbtft_fb_blank;
+ info->screen_buffer = vmem;
+ info->fbops = &fbtft_ops;
+ info->fbdefio = fbdefio;
- fbdefio->delay = HZ/fps;
- fbdefio->deferred_io = fbtft_deferred_io;
- fb_deferred_io_init(info);
+ fbdefio->delay = HZ / fps;
+ fbdefio->sort_pagereflist = true;
+ fbdefio->deferred_io = fbtft_deferred_io;
- strncpy(info->fix.id, dev->driver->name, 16);
+ snprintf(info->fix.id, sizeof(info->fix.id), "%s", dev->driver->name);
info->fix.type = FB_TYPE_PACKED_PIXELS;
info->fix.visual = FB_VISUAL_TRUECOLOR;
info->fix.xpanstep = 0;
@@ -788,6 +612,8 @@ struct fb_info *fbtft_framebuffer_alloc(struct fbtft_display *display,
info->fix.line_length = width * bpp / 8;
info->fix.accel = FB_ACCEL_NONE;
info->fix.smem_len = vmem_size;
+ if (fb_deferred_io_init(info))
+ goto release_screen_buffer;
info->var.rotate = pdata->rotate;
info->var.xres = width;
@@ -807,7 +633,7 @@ struct fb_info *fbtft_framebuffer_alloc(struct fbtft_display *display,
info->var.transp.offset = 0;
info->var.transp.length = 0;
- info->flags = FBINFO_FLAG_DEFAULT | FBINFO_VIRTFB;
+ info->flags = FBINFO_VIRTFB;
par = info->par;
par->info = info;
@@ -825,14 +651,16 @@ struct fb_info *fbtft_framebuffer_alloc(struct fbtft_display *display,
info->pseudo_palette = par->pseudo_palette;
if (par->gamma.curves && gamma) {
- if (fbtft_gamma_parse_str(par,
- par->gamma.curves, gamma, strlen(gamma)))
- goto alloc_fail;
+ if (fbtft_gamma_parse_str(par, par->gamma.curves, gamma,
+ strlen(gamma)))
+ goto cleanup_deferred;
}
/* Transmit buffer */
if (txbuflen == -1)
txbuflen = vmem_size + 2; /* add in case startbyte is used */
+ if (txbuflen >= vmem_size + 2)
+ txbuflen = 0;
#ifdef __LITTLE_ENDIAN
if ((!txbuflen) && (bpp > 8))
@@ -840,34 +668,13 @@ struct fb_info *fbtft_framebuffer_alloc(struct fbtft_display *display,
#endif
if (txbuflen > 0) {
-#ifdef CONFIG_HAS_DMA
- if (dma) {
- dev->coherent_dma_mask = ~0;
- txbuf = dmam_alloc_coherent(dev, txbuflen, &par->txbuf.dma, GFP_DMA);
- } else
-#endif
- {
- txbuf = devm_kzalloc(par->info->device, txbuflen, GFP_KERNEL);
- }
+ txbuf = kzalloc(txbuflen, GFP_KERNEL);
if (!txbuf)
- goto alloc_fail;
+ goto cleanup_deferred;
par->txbuf.buf = txbuf;
par->txbuf.len = txbuflen;
}
- /* Initialize gpios to disabled */
- par->gpio.reset = -1;
- par->gpio.dc = -1;
- par->gpio.rd = -1;
- par->gpio.wr = -1;
- par->gpio.cs = -1;
- par->gpio.latch = -1;
- for (i = 0; i < 16; i++) {
- par->gpio.db[i] = -1;
- par->gpio.led[i] = -1;
- par->gpio.aux[i] = -1;
- }
-
/* default fbtft operations */
par->fbtftops.write = fbtft_write_spi;
par->fbtftops.read = fbtft_read_spi;
@@ -877,7 +684,6 @@ struct fb_info *fbtft_framebuffer_alloc(struct fbtft_display *display,
par->fbtftops.reset = fbtft_reset;
par->fbtftops.mkdirty = fbtft_mkdirty;
par->fbtftops.update_display = fbtft_update_display;
- par->fbtftops.request_gpios = fbtft_request_gpios;
if (display->backlight)
par->fbtftops.register_backlight = fbtft_register_backlight;
@@ -886,9 +692,12 @@ struct fb_info *fbtft_framebuffer_alloc(struct fbtft_display *display,
return info;
-alloc_fail:
- vfree(vmem);
-
+cleanup_deferred:
+ fb_deferred_io_cleanup(info);
+release_screen_buffer:
+ vfree(info->screen_buffer);
+release_framebuf:
+ framebuffer_release(info);
return NULL;
}
EXPORT_SYMBOL(fbtft_framebuffer_alloc);
@@ -901,8 +710,11 @@ EXPORT_SYMBOL(fbtft_framebuffer_alloc);
*/
void fbtft_framebuffer_release(struct fb_info *info)
{
+ struct fbtft_par *par = info->par;
+
+ kfree(par->txbuf.buf);
fb_deferred_io_cleanup(info);
- vfree(info->screen_base);
+ vfree(info->screen_buffer);
framebuffer_release(info);
}
EXPORT_SYMBOL(fbtft_framebuffer_release);
@@ -976,35 +788,28 @@ int fbtft_register_framebuffer(struct fb_info *fb_info)
fbtft_sysfs_init(par);
- if (par->txbuf.buf)
- sprintf(text1, ", %zu KiB %sbuffer memory",
- par->txbuf.len >> 10, par->txbuf.dma ? "DMA " : "");
+ if (par->txbuf.buf && par->txbuf.len >= 1024)
+ sprintf(text1, ", %zu KiB buffer memory", par->txbuf.len >> 10);
if (spi)
- sprintf(text2, ", spi%d.%d at %d MHz", spi->master->bus_num,
- spi->chip_select, spi->max_speed_hz / 1000000);
+ sprintf(text2, ", spi%d.%d at %d MHz", spi->controller->bus_num,
+ spi_get_chipselect(spi, 0), spi->max_speed_hz / 1000000);
dev_info(fb_info->dev,
- "%s frame buffer, %dx%d, %d KiB video memory%s, fps=%lu%s\n",
- fb_info->fix.id, fb_info->var.xres, fb_info->var.yres,
- fb_info->fix.smem_len >> 10, text1,
- HZ / fb_info->fbdefio->delay, text2);
+ "%s frame buffer, %dx%d, %d KiB video memory%s, fps=%lu%s\n",
+ fb_info->fix.id, fb_info->var.xres, fb_info->var.yres,
+ fb_info->fix.smem_len >> 10, text1,
+ HZ / fb_info->fbdefio->delay, text2);
-#ifdef CONFIG_FB_BACKLIGHT
/* Turn on backlight if available */
if (fb_info->bl_dev) {
- fb_info->bl_dev->props.power = FB_BLANK_UNBLANK;
+ fb_info->bl_dev->props.power = BACKLIGHT_POWER_ON;
fb_info->bl_dev->ops->update_status(fb_info->bl_dev);
}
-#endif
return 0;
reg_fail:
if (par->fbtftops.unregister_backlight)
par->fbtftops.unregister_backlight(par);
- if (spi)
- spi_set_drvdata(spi, NULL);
- if (par->pdev)
- platform_set_drvdata(par->pdev, NULL);
return ret;
}
@@ -1022,63 +827,66 @@ EXPORT_SYMBOL(fbtft_register_framebuffer);
int fbtft_unregister_framebuffer(struct fb_info *fb_info)
{
struct fbtft_par *par = fb_info->par;
- struct spi_device *spi = par->spi;
- if (spi)
- spi_set_drvdata(spi, NULL);
- if (par->pdev)
- platform_set_drvdata(par->pdev, NULL);
if (par->fbtftops.unregister_backlight)
par->fbtftops.unregister_backlight(par);
fbtft_sysfs_exit(par);
- return unregister_framebuffer(fb_info);
+ unregister_framebuffer(fb_info);
+
+ return 0;
}
EXPORT_SYMBOL(fbtft_unregister_framebuffer);
-#ifdef CONFIG_OF
/**
- * fbtft_init_display_dt() - Device Tree init_display() function
+ * fbtft_init_display_from_property() - Device Tree init_display() function
* @par: Driver data
*
* Return: 0 if successful, negative if error
*/
-static int fbtft_init_display_dt(struct fbtft_par *par)
+static int fbtft_init_display_from_property(struct fbtft_par *par)
{
- struct device_node *node = par->info->device->of_node;
- struct property *prop;
- const __be32 *p;
+ struct device *dev = par->info->device;
+ int buf[64], count, index, i, j, ret;
+ u32 *values;
u32 val;
- int buf[64], i, j;
- if (!node)
+ count = device_property_count_u32(dev, "init");
+ if (count < 0)
+ return count;
+ if (count == 0)
return -EINVAL;
- prop = of_find_property(node, "init", NULL);
- p = of_prop_next_u32(prop, NULL, &val);
- if (!p)
- return -EINVAL;
+ values = kmalloc_array(count + 1, sizeof(*values), GFP_KERNEL);
+ if (!values)
+ return -ENOMEM;
+
+ ret = device_property_read_u32_array(dev, "init", values, count);
+ if (ret)
+ goto out_free;
par->fbtftops.reset(par);
- if (par->gpio.cs != -1)
- gpio_set_value(par->gpio.cs, 0); /* Activate chip */
- while (p) {
+ index = -1;
+ val = values[++index];
+
+ while (index < count) {
if (val & FBTFT_OF_INIT_CMD) {
val &= 0xFFFF;
i = 0;
- while (p && !(val & 0xFFFF0000)) {
+ while ((index < count) && !(val & 0xFFFF0000)) {
if (i > 63) {
- dev_err(par->info->device,
- "%s: Maximum register values exceeded\n",
- __func__);
- return -EINVAL;
+ dev_err(dev,
+ "%s: Maximum register values exceeded\n",
+ __func__);
+ ret = -EINVAL;
+ goto out_free;
}
buf[i++] = val;
- p = of_prop_next_u32(prop, p, &val);
+ val = values[++index];
}
/* make debug message */
fbtft_par_dbg(DEBUG_INIT_DISPLAY, par,
- "init: write_register:\n");
+ "init: write_register:\n");
for (j = 0; j < i; j++)
fbtft_par_dbg(DEBUG_INIT_DISPLAY, par,
"buf[%d] = %02X\n", j, buf[j]);
@@ -1102,19 +910,20 @@ static int fbtft_init_display_dt(struct fbtft_par *par)
buf[60], buf[61], buf[62], buf[63]);
} else if (val & FBTFT_OF_INIT_DELAY) {
fbtft_par_dbg(DEBUG_INIT_DISPLAY, par,
- "init: msleep(%u)\n", val & 0xFFFF);
+ "init: msleep(%u)\n", val & 0xFFFF);
msleep(val & 0xFFFF);
- p = of_prop_next_u32(prop, p, &val);
+ val = values[++index];
} else {
- dev_err(par->info->device, "illegal init value 0x%X\n",
- val);
- return -EINVAL;
+ dev_err(dev, "illegal init value 0x%X\n", val);
+ ret = -EINVAL;
+ goto out_free;
}
}
- return 0;
+out_free:
+ kfree(values);
+ return ret;
}
-#endif
/**
* fbtft_init_display() - Generic init_display() function
@@ -1127,9 +936,7 @@ static int fbtft_init_display_dt(struct fbtft_par *par)
int fbtft_init_display(struct fbtft_par *par)
{
int buf[64];
- char msg[128];
- char str[16];
- int i = 0;
+ int i;
int j;
/* sanity check */
@@ -1140,9 +947,11 @@ int fbtft_init_display(struct fbtft_par *par)
}
/* make sure stop marker exists */
- for (i = 0; i < FBTFT_MAX_INIT_SEQUENCE; i++)
+ for (i = 0; i < FBTFT_MAX_INIT_SEQUENCE; i++) {
if (par->init_sequence[i] == -3)
break;
+ }
+
if (i == FBTFT_MAX_INIT_SEQUENCE) {
dev_err(par->info->device,
"missing stop marker at end of init sequence\n");
@@ -1150,8 +959,6 @@ int fbtft_init_display(struct fbtft_par *par)
}
par->fbtftops.reset(par);
- if (par->gpio.cs != -1)
- gpio_set_value(par->gpio.cs, 0); /* Activate chip */
i = 0;
while (i < FBTFT_MAX_INIT_SEQUENCE) {
@@ -1173,25 +980,22 @@ int fbtft_init_display(struct fbtft_par *par)
switch (par->init_sequence[i]) {
case -1:
i++;
+
/* make debug message */
- strcpy(msg, "");
- j = i + 1;
- while (par->init_sequence[j] >= 0) {
- sprintf(str, "0x%02X ", par->init_sequence[j]);
- strcat(msg, str);
- j++;
- }
- fbtft_par_dbg(DEBUG_INIT_DISPLAY, par,
- "init: write(0x%02X) %s\n",
- par->init_sequence[i], msg);
+ for (j = 0; par->init_sequence[i + 1 + j] >= 0; j++)
+ ;
+
+ fbtft_par_dbg_hex(DEBUG_INIT_DISPLAY, par, par->info->device,
+ s16, &par->init_sequence[i + 1], j,
+ "init: write(0x%02X)", par->init_sequence[i]);
/* Write */
j = 0;
while (par->init_sequence[i] >= 0) {
if (j > 63) {
dev_err(par->info->device,
- "%s: Maximum register values exceeded\n",
- __func__);
+ "%s: Maximum register values exceeded\n",
+ __func__);
return -EINVAL;
}
buf[j++] = par->init_sequence[i++];
@@ -1217,7 +1021,8 @@ int fbtft_init_display(struct fbtft_par *par)
case -2:
i++;
fbtft_par_dbg(DEBUG_INIT_DISPLAY, par,
- "init: mdelay(%d)\n", par->init_sequence[i]);
+ "init: mdelay(%d)\n",
+ par->init_sequence[i]);
mdelay(par->init_sequence[i++]);
break;
default:
@@ -1247,10 +1052,8 @@ static int fbtft_verify_gpios(struct fbtft_par *par)
struct fbtft_platform_data *pdata = par->pdata;
int i;
- fbtft_par_dbg(DEBUG_VERIFY_GPIOS, par, "%s()\n", __func__);
-
- if (pdata->display.buswidth != 9 && par->startbyte == 0 &&
- par->gpio.dc < 0) {
+ if (pdata->display.buswidth != 9 && par->startbyte == 0 &&
+ !par->gpio.dc) {
dev_err(par->info->device,
"Missing info about 'dc' gpio. Aborting.\n");
return -EINVAL;
@@ -1259,12 +1062,12 @@ static int fbtft_verify_gpios(struct fbtft_par *par)
if (!par->pdev)
return 0;
- if (par->gpio.wr < 0) {
+ if (!par->gpio.wr) {
dev_err(par->info->device, "Missing 'wr' gpio. Aborting.\n");
return -EINVAL;
}
for (i = 0; i < pdata->display.buswidth; i++) {
- if (par->gpio.db[i] < 0) {
+ if (!par->gpio.db[i]) {
dev_err(par->info->device,
"Missing 'db%02d' gpio. Aborting.\n", i);
return -EINVAL;
@@ -1274,27 +1077,25 @@ static int fbtft_verify_gpios(struct fbtft_par *par)
return 0;
}
-#ifdef CONFIG_OF
/* returns 0 if the property is not present */
-static u32 fbtft_of_value(struct device_node *node, const char *propname)
+static u32 fbtft_property_value(struct device *dev, const char *propname)
{
int ret;
u32 val = 0;
- ret = of_property_read_u32(node, propname, &val);
+ ret = device_property_read_u32(dev, propname, &val);
if (ret == 0)
- pr_info("%s: %s = %u\n", __func__, propname, val);
+ dev_info(dev, "%s: %s = %u\n", __func__, propname, val);
return val;
}
-static struct fbtft_platform_data *fbtft_probe_dt(struct device *dev)
+static struct fbtft_platform_data *fbtft_properties_read(struct device *dev)
{
- struct device_node *node = dev->of_node;
struct fbtft_platform_data *pdata;
- if (!node) {
- dev_err(dev, "Missing platform data or DT\n");
+ if (!dev_fwnode(dev)) {
+ dev_err(dev, "Missing platform data or properties\n");
return ERR_PTR(-EINVAL);
}
@@ -1302,35 +1103,30 @@ static struct fbtft_platform_data *fbtft_probe_dt(struct device *dev)
if (!pdata)
return ERR_PTR(-ENOMEM);
- pdata->display.width = fbtft_of_value(node, "width");
- pdata->display.height = fbtft_of_value(node, "height");
- pdata->display.regwidth = fbtft_of_value(node, "regwidth");
- pdata->display.buswidth = fbtft_of_value(node, "buswidth");
- pdata->display.backlight = fbtft_of_value(node, "backlight");
- pdata->display.bpp = fbtft_of_value(node, "bpp");
- pdata->display.debug = fbtft_of_value(node, "debug");
- pdata->rotate = fbtft_of_value(node, "rotate");
- pdata->bgr = of_property_read_bool(node, "bgr");
- pdata->fps = fbtft_of_value(node, "fps");
- pdata->txbuflen = fbtft_of_value(node, "txbuflen");
- pdata->startbyte = fbtft_of_value(node, "startbyte");
- of_property_read_string(node, "gamma", (const char **)&pdata->gamma);
-
- if (of_find_property(node, "led-gpios", NULL))
+ pdata->display.width = fbtft_property_value(dev, "width");
+ pdata->display.height = fbtft_property_value(dev, "height");
+ pdata->display.regwidth = fbtft_property_value(dev, "regwidth");
+ pdata->display.buswidth = fbtft_property_value(dev, "buswidth");
+ pdata->display.backlight = fbtft_property_value(dev, "backlight");
+ pdata->display.bpp = fbtft_property_value(dev, "bpp");
+ pdata->display.debug = fbtft_property_value(dev, "debug");
+ pdata->rotate = fbtft_property_value(dev, "rotate");
+ pdata->bgr = device_property_read_bool(dev, "bgr");
+ pdata->fps = fbtft_property_value(dev, "fps");
+ pdata->txbuflen = fbtft_property_value(dev, "txbuflen");
+ pdata->startbyte = fbtft_property_value(dev, "startbyte");
+ device_property_read_string(dev, "gamma", (const char **)&pdata->gamma);
+
+ if (device_property_present(dev, "led-gpios"))
pdata->display.backlight = 1;
- if (of_find_property(node, "init", NULL))
- pdata->display.fbtftops.init_display = fbtft_init_display_dt;
- pdata->display.fbtftops.request_gpios = fbtft_request_gpios_dt;
+ if (device_property_present(dev, "init"))
+ pdata->display.fbtftops.init_display =
+ fbtft_init_display_from_property;
+
+ pdata->display.fbtftops.request_gpios = fbtft_request_gpios;
return pdata;
}
-#else
-static struct fbtft_platform_data *fbtft_probe_dt(struct device *dev)
-{
- dev_err(dev, "Missing platform data\n");
- return ERR_PTR(-EINVAL);
-}
-#endif
/**
* fbtft_probe_common() - Generic device probe() helper function
@@ -1345,7 +1141,8 @@ static struct fbtft_platform_data *fbtft_probe_dt(struct device *dev)
* Return: 0 if successful, negative if error
*/
int fbtft_probe_common(struct fbtft_display *display,
- struct spi_device *sdev, struct platform_device *pdev)
+ struct spi_device *sdev,
+ struct platform_device *pdev)
{
struct device *dev;
struct fb_info *info;
@@ -1358,12 +1155,9 @@ int fbtft_probe_common(struct fbtft_display *display,
else
dev = &pdev->dev;
- if (unlikely(display->debug & DEBUG_DRIVER_INIT_FUNCTIONS))
- dev_info(dev, "%s()\n", __func__);
-
pdata = dev->platform_data;
if (!pdata) {
- pdata = fbtft_probe_dt(dev);
+ pdata = fbtft_properties_read(dev);
if (IS_ERR(pdata))
return PTR_ERR(pdata);
}
@@ -1377,25 +1171,23 @@ int fbtft_probe_common(struct fbtft_display *display,
par->pdev = pdev;
if (display->buswidth == 0) {
- dev_err(dev, "buswidth is not set\n");
- return -EINVAL;
+ ret = dev_err_probe(dev, -EINVAL, "buswidth is not set\n");
+ goto out_release;
}
/* write register functions */
- if (display->regwidth == 8 && display->buswidth == 8) {
+ if (display->regwidth == 8 && display->buswidth == 8)
par->fbtftops.write_register = fbtft_write_reg8_bus8;
- } else
- if (display->regwidth == 8 && display->buswidth == 9 && par->spi) {
+ else if (display->regwidth == 8 && display->buswidth == 9 && par->spi)
par->fbtftops.write_register = fbtft_write_reg8_bus9;
- } else if (display->regwidth == 16 && display->buswidth == 8) {
+ else if (display->regwidth == 16 && display->buswidth == 8)
par->fbtftops.write_register = fbtft_write_reg16_bus8;
- } else if (display->regwidth == 16 && display->buswidth == 16) {
+ else if (display->regwidth == 16 && display->buswidth == 16)
par->fbtftops.write_register = fbtft_write_reg16_bus16;
- } else {
+ else
dev_warn(dev,
- "no default functions for regwidth=%d and buswidth=%d\n",
- display->regwidth, display->buswidth);
- }
+ "no default functions for regwidth=%d and buswidth=%d\n",
+ display->regwidth, display->buswidth);
/* write_vmem() functions */
if (display->buswidth == 8)
@@ -1415,15 +1207,16 @@ int fbtft_probe_common(struct fbtft_display *display,
/* 9-bit SPI setup */
if (par->spi && display->buswidth == 9) {
- if (par->spi->master->bits_per_word_mask & SPI_BPW_MASK(9)) {
+ if (par->spi->controller->bits_per_word_mask & SPI_BPW_MASK(9)) {
par->spi->bits_per_word = 9;
} else {
dev_warn(&par->spi->dev,
- "9-bit SPI not available, emulating using 8-bit.\n");
+ "9-bit SPI not available, emulating using 8-bit.\n");
/* allocate buffer with room for dc bits */
par->extra = devm_kzalloc(par->info->device,
- par->txbuf.len + (par->txbuf.len / 8) + 8,
- GFP_KERNEL);
+ par->txbuf.len +
+ (par->txbuf.len / 8) + 8,
+ GFP_KERNEL);
if (!par->extra) {
ret = -ENOMEM;
goto out_release;
@@ -1464,24 +1257,19 @@ EXPORT_SYMBOL(fbtft_probe_common);
* @info: Framebuffer
*
* Unregisters and releases the framebuffer
- *
- * Return: 0 if successful, negative if error
*/
-int fbtft_remove_common(struct device *dev, struct fb_info *info)
+void fbtft_remove_common(struct device *dev, struct fb_info *info)
{
struct fbtft_par *par;
- if (!info)
- return -EINVAL;
par = info->par;
if (par)
fbtft_par_dbg(DEBUG_DRIVER_INIT_FUNCTIONS, par,
- "%s()\n", __func__);
+ "%s()\n", __func__);
fbtft_unregister_framebuffer(info);
fbtft_framebuffer_release(info);
-
- return 0;
}
EXPORT_SYMBOL(fbtft_remove_common);
+MODULE_DESCRIPTION("Core FB support for small TFT LCD display modules");
MODULE_LICENSE("GPL");
diff --git a/drivers/staging/fbtft/fbtft-io.c b/drivers/staging/fbtft/fbtft-io.c
index a6f091fb975c..de1904a443c2 100644
--- a/drivers/staging/fbtft/fbtft-io.c
+++ b/drivers/staging/fbtft/fbtft-io.c
@@ -1,6 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0
#include <linux/export.h>
#include <linux/errno.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/spi/spi.h>
#include "fbtft.h"
@@ -13,7 +14,7 @@ int fbtft_write_spi(struct fbtft_par *par, void *buf, size_t len)
struct spi_message m;
fbtft_par_dbg_hex(DEBUG_WRITE, par, par->info->device, u8, buf, len,
- "%s(len=%d): ", __func__, len);
+ "%s(len=%zu): ", __func__, len);
if (!par->spi) {
dev_err(par->info->device,
@@ -22,10 +23,6 @@ int fbtft_write_spi(struct fbtft_par *par, void *buf, size_t len)
}
spi_message_init(&m);
- if (par->txbuf.dma && buf == par->txbuf.buf) {
- t.tx_dma = par->txbuf.dma;
- m.is_dma_mapped = 1;
- }
spi_message_add_tail(&t, &m);
return spi_sync(par->spi, &m);
}
@@ -50,7 +47,7 @@ int fbtft_write_spi_emulate_9(struct fbtft_par *par, void *buf, size_t len)
u64 val, dc, tmp;
fbtft_par_dbg_hex(DEBUG_WRITE, par, par->info->device, u8, buf, len,
- "%s(len=%d): ", __func__, len);
+ "%s(len=%zu): ", __func__, len);
if (!par->extra) {
dev_err(par->info->device, "%s: error: par->extra is NULL\n",
@@ -75,7 +72,7 @@ int fbtft_write_spi_emulate_9(struct fbtft_par *par, void *buf, size_t len)
src++;
}
tmp |= ((*src & 0x0100) ? 1 : 0);
- *(u64 *)dst = cpu_to_be64(tmp);
+ *(__be64 *)dst = cpu_to_be64(tmp);
dst += 8;
*dst++ = (u8)(*src++ & 0x00FF);
added++;
@@ -112,14 +109,15 @@ int fbtft_read_spi(struct fbtft_par *par, void *buf, size_t len)
txbuf[0] = par->startbyte | 0x3;
t.tx_buf = txbuf;
fbtft_par_dbg_hex(DEBUG_READ, par, par->info->device, u8,
- txbuf, len, "%s(len=%d) txbuf => ", __func__, len);
+ txbuf, len, "%s(len=%zu) txbuf => ",
+ __func__, len);
}
spi_message_init(&m);
spi_message_add_tail(&t, &m);
ret = spi_sync(par->spi, &m);
fbtft_par_dbg_hex(DEBUG_READ, par, par->info->device, u8, buf, len,
- "%s(len=%d) buf <= ", __func__, len);
+ "%s(len=%zu) buf <= ", __func__, len);
return ret;
}
@@ -138,39 +136,39 @@ int fbtft_write_gpio8_wr(struct fbtft_par *par, void *buf, size_t len)
#endif
fbtft_par_dbg_hex(DEBUG_WRITE, par, par->info->device, u8, buf, len,
- "%s(len=%d): ", __func__, len);
+ "%s(len=%zu): ", __func__, len);
while (len--) {
- data = *(u8 *) buf;
+ data = *(u8 *)buf;
/* Start writing by pulling down /WR */
- gpio_set_value(par->gpio.wr, 0);
+ gpiod_set_value(par->gpio.wr, 1);
/* Set data */
#ifndef DO_NOT_OPTIMIZE_FBTFT_WRITE_GPIO
if (data == prev_data) {
- gpio_set_value(par->gpio.wr, 0); /* used as delay */
+ gpiod_set_value(par->gpio.wr, 1); /* used as delay */
} else {
for (i = 0; i < 8; i++) {
if ((data & 1) != (prev_data & 1))
- gpio_set_value(par->gpio.db[i],
- data & 1);
+ gpiod_set_value(par->gpio.db[i],
+ data & 1);
data >>= 1;
prev_data >>= 1;
}
}
#else
for (i = 0; i < 8; i++) {
- gpio_set_value(par->gpio.db[i], data & 1);
+ gpiod_set_value(par->gpio.db[i], data & 1);
data >>= 1;
}
#endif
/* Pullup /WR */
- gpio_set_value(par->gpio.wr, 1);
+ gpiod_set_value(par->gpio.wr, 0);
#ifndef DO_NOT_OPTIMIZE_FBTFT_WRITE_GPIO
- prev_data = *(u8 *) buf;
+ prev_data = *(u8 *)buf;
#endif
buf++;
}
@@ -188,39 +186,39 @@ int fbtft_write_gpio16_wr(struct fbtft_par *par, void *buf, size_t len)
#endif
fbtft_par_dbg_hex(DEBUG_WRITE, par, par->info->device, u8, buf, len,
- "%s(len=%d): ", __func__, len);
+ "%s(len=%zu): ", __func__, len);
while (len) {
- data = *(u16 *) buf;
+ data = *(u16 *)buf;
/* Start writing by pulling down /WR */
- gpio_set_value(par->gpio.wr, 0);
+ gpiod_set_value(par->gpio.wr, 1);
/* Set data */
#ifndef DO_NOT_OPTIMIZE_FBTFT_WRITE_GPIO
if (data == prev_data) {
- gpio_set_value(par->gpio.wr, 0); /* used as delay */
+ gpiod_set_value(par->gpio.wr, 1); /* used as delay */
} else {
for (i = 0; i < 16; i++) {
if ((data & 1) != (prev_data & 1))
- gpio_set_value(par->gpio.db[i],
- data & 1);
+ gpiod_set_value(par->gpio.db[i],
+ data & 1);
data >>= 1;
prev_data >>= 1;
}
}
#else
for (i = 0; i < 16; i++) {
- gpio_set_value(par->gpio.db[i], data & 1);
+ gpiod_set_value(par->gpio.db[i], data & 1);
data >>= 1;
}
#endif
/* Pullup /WR */
- gpio_set_value(par->gpio.wr, 1);
+ gpiod_set_value(par->gpio.wr, 0);
#ifndef DO_NOT_OPTIMIZE_FBTFT_WRITE_GPIO
- prev_data = *(u16 *) buf;
+ prev_data = *(u16 *)buf;
#endif
buf += 2;
len -= 2;
diff --git a/drivers/staging/fbtft/fbtft-sysfs.c b/drivers/staging/fbtft/fbtft-sysfs.c
index 8d8bd12b90a1..e45c90a03a90 100644
--- a/drivers/staging/fbtft/fbtft-sysfs.c
+++ b/drivers/staging/fbtft/fbtft-sysfs.c
@@ -1,10 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
#include "fbtft.h"
#include "internal.h"
static int get_next_ulong(char **str_p, unsigned long *val, char *sep, int base)
{
char *p_val;
- int ret;
if (!str_p || !(*str_p))
return -EINVAL;
@@ -14,29 +14,22 @@ static int get_next_ulong(char **str_p, unsigned long *val, char *sep, int base)
if (!p_val)
return -EINVAL;
- ret = kstrtoul(p_val, base, val);
- if (ret)
- return -EINVAL;
-
- return 0;
+ return kstrtoul(p_val, base, val);
}
-int fbtft_gamma_parse_str(struct fbtft_par *par, unsigned long *curves,
- const char *str, int size)
+int fbtft_gamma_parse_str(struct fbtft_par *par, u32 *curves,
+ const char *str, int size)
{
char *str_p, *curve_p = NULL;
char *tmp;
unsigned long val = 0;
int ret = 0;
int curve_counter, value_counter;
-
- fbtft_par_dbg(DEBUG_SYSFS, par, "%s() str=\n", __func__);
+ int _count;
if (!str || !curves)
return -EINVAL;
- fbtft_par_dbg(DEBUG_SYSFS, par, "%s\n", str);
-
tmp = kmemdup(str, size + 1, GFP_KERNEL);
if (!tmp)
return -ENOMEM;
@@ -72,7 +65,10 @@ int fbtft_gamma_parse_str(struct fbtft_par *par, unsigned long *curves,
ret = get_next_ulong(&curve_p, &val, " ", 16);
if (ret)
goto out;
- curves[curve_counter * par->gamma.num_values + value_counter] = val;
+
+ _count = curve_counter * par->gamma.num_values +
+ value_counter;
+ curves[_count] = val;
value_counter++;
}
if (value_counter != par->gamma.num_values) {
@@ -94,7 +90,7 @@ out:
}
static ssize_t
-sprintf_gamma(struct fbtft_par *par, unsigned long *curves, char *buf)
+sprintf_gamma(struct fbtft_par *par, u32 *curves, char *buf)
{
ssize_t len = 0;
unsigned int i, j;
@@ -103,7 +99,7 @@ sprintf_gamma(struct fbtft_par *par, unsigned long *curves, char *buf)
for (i = 0; i < par->gamma.num_curves; i++) {
for (j = 0; j < par->gamma.num_values; j++)
len += scnprintf(&buf[len], PAGE_SIZE,
- "%04lx ", curves[i * par->gamma.num_values + j]);
+ "%04x ", curves[i * par->gamma.num_values + j]);
buf[len - 1] = '\n';
}
mutex_unlock(&par->gamma.lock);
@@ -112,12 +108,12 @@ sprintf_gamma(struct fbtft_par *par, unsigned long *curves, char *buf)
}
static ssize_t store_gamma_curve(struct device *device,
- struct device_attribute *attr,
- const char *buf, size_t count)
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct fb_info *fb_info = dev_get_drvdata(device);
struct fbtft_par *par = fb_info->par;
- unsigned long tmp_curves[FBTFT_GAMMA_MAX_VALUES_TOTAL];
+ u32 tmp_curves[FBTFT_GAMMA_MAX_VALUES_TOTAL];
int ret;
ret = fbtft_gamma_parse_str(par, tmp_curves, buf, count);
@@ -130,7 +126,8 @@ static ssize_t store_gamma_curve(struct device *device,
mutex_lock(&par->gamma.lock);
memcpy(par->gamma.curves, tmp_curves,
- par->gamma.num_curves * par->gamma.num_values * sizeof(tmp_curves[0]));
+ par->gamma.num_curves * par->gamma.num_values *
+ sizeof(tmp_curves[0]));
mutex_unlock(&par->gamma.lock);
return count;
@@ -177,8 +174,8 @@ void fbtft_expand_debug_value(unsigned long *debug)
}
static ssize_t store_debug(struct device *device,
- struct device_attribute *attr,
- const char *buf, size_t count)
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct fb_info *fb_info = dev_get_drvdata(device);
struct fbtft_par *par = fb_info->par;
@@ -193,15 +190,15 @@ static ssize_t store_debug(struct device *device,
}
static ssize_t show_debug(struct device *device,
- struct device_attribute *attr, char *buf)
+ struct device_attribute *attr, char *buf)
{
struct fb_info *fb_info = dev_get_drvdata(device);
struct fbtft_par *par = fb_info->par;
- return snprintf(buf, PAGE_SIZE, "%lu\n", par->debug);
+ return sysfs_emit(buf, "%lu\n", par->debug);
}
-static struct device_attribute debug_device_attr = \
+static struct device_attribute debug_device_attr =
__ATTR(debug, 0660, show_debug, store_debug);
void fbtft_sysfs_init(struct fbtft_par *par)
diff --git a/drivers/staging/fbtft/fbtft.h b/drivers/staging/fbtft/fbtft.h
index 6dd42b28d594..317be17b95c1 100644
--- a/drivers/staging/fbtft/fbtft.h
+++ b/drivers/staging/fbtft/fbtft.h
@@ -1,16 +1,5 @@
-/*
- * Copyright (C) 2013 Noralf Tronnes
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Copyright (C) 2013 Noralf Tronnes */
#ifndef __LINUX_FBTFT_H
#define __LINUX_FBTFT_H
@@ -20,14 +9,6 @@
#include <linux/spi/spi.h>
#include <linux/platform_device.h>
-#define FBTFT_NOP 0x00
-#define FBTFT_SWRESET 0x01
-#define FBTFT_RDDID 0x04
-#define FBTFT_RDDST 0x09
-#define FBTFT_CASET 0x2A
-#define FBTFT_RASET 0x2B
-#define FBTFT_RAMWR 0x2C
-
#define FBTFT_ONBOARD_BACKLIGHT 2
#define FBTFT_GPIO_NO_MATCH 0xFFFF
@@ -46,7 +27,7 @@
*/
struct fbtft_gpio {
char name[FBTFT_GPIO_NAME_SIZE];
- unsigned gpio;
+ struct gpio_desc *gpio;
};
struct fbtft_par;
@@ -83,16 +64,16 @@ struct fbtft_ops {
void (*write_register)(struct fbtft_par *par, int len, ...);
void (*set_addr_win)(struct fbtft_par *par,
- int xs, int ys, int xe, int ye);
+ int xs, int ys, int xe, int ye);
void (*reset)(struct fbtft_par *par);
void (*mkdirty)(struct fb_info *info, int from, int to);
void (*update_display)(struct fbtft_par *par,
- unsigned start_line, unsigned end_line);
+ unsigned int start_line, unsigned int end_line);
int (*init_display)(struct fbtft_par *par);
int (*blank)(struct fbtft_par *par, bool on);
unsigned long (*request_gpios_match)(struct fbtft_par *par,
- const struct fbtft_gpio *gpio);
+ const struct fbtft_gpio *gpio);
int (*request_gpios)(struct fbtft_par *par);
int (*verify_gpios)(struct fbtft_par *par);
@@ -100,7 +81,7 @@ struct fbtft_ops {
void (*unregister_backlight)(struct fbtft_par *par);
int (*set_var)(struct fbtft_par *par);
- int (*set_gamma)(struct fbtft_par *par, unsigned long *curves);
+ int (*set_gamma)(struct fbtft_par *par, u32 *curves);
};
/**
@@ -123,16 +104,16 @@ struct fbtft_ops {
* This structure is not stored by FBTFT except for init_sequence.
*/
struct fbtft_display {
- unsigned width;
- unsigned height;
- unsigned regwidth;
- unsigned buswidth;
- unsigned backlight;
+ unsigned int width;
+ unsigned int height;
+ unsigned int regwidth;
+ unsigned int buswidth;
+ unsigned int backlight;
struct fbtft_ops fbtftops;
- unsigned bpp;
- unsigned fps;
+ unsigned int bpp;
+ unsigned int fps;
int txbuflen;
- int *init_sequence;
+ const s16 *init_sequence;
char *gamma;
int gamma_num;
int gamma_len;
@@ -153,10 +134,9 @@ struct fbtft_display {
*/
struct fbtft_platform_data {
struct fbtft_display display;
- const struct fbtft_gpio *gpios;
- unsigned rotate;
+ unsigned int rotate;
bool bgr;
- unsigned fps;
+ unsigned int fps;
int txbuflen;
u8 startbyte;
char *gamma;
@@ -217,48 +197,52 @@ struct fbtft_par {
u32 pseudo_palette[16];
struct {
void *buf;
- dma_addr_t dma;
size_t len;
} txbuf;
u8 *buf;
u8 startbyte;
struct fbtft_ops fbtftops;
+ /* Spinlock to ensure thread-safe access to dirty_lines_start and dirty_lines_end */
spinlock_t dirty_lock;
- unsigned dirty_lines_start;
- unsigned dirty_lines_end;
+ unsigned int dirty_lines_start;
+ unsigned int dirty_lines_end;
struct {
- int reset;
- int dc;
- int rd;
- int wr;
- int latch;
- int cs;
- int db[16];
- int led[16];
- int aux[16];
+ struct gpio_desc *reset;
+ struct gpio_desc *dc;
+ struct gpio_desc *rd;
+ struct gpio_desc *wr;
+ struct gpio_desc *latch;
+ struct gpio_desc *cs;
+ struct gpio_desc *db[16];
+ struct gpio_desc *led[16];
+ struct gpio_desc *aux[16];
} gpio;
- int *init_sequence;
+ const s16 *init_sequence;
struct {
+ /* Mutex to synchronize access to gamma curve locking */
struct mutex lock;
- unsigned long *curves;
+ u32 *curves;
int num_values;
int num_curves;
} gamma;
unsigned long debug;
bool first_update_done;
- struct timespec update_time;
+ ktime_t update_time;
bool bgr;
void *extra;
+ bool polarity;
};
-#define NUMARGS(...) (sizeof((int[]){__VA_ARGS__})/sizeof(int))
+#define NUMARGS(...) (sizeof((int[]){__VA_ARGS__}) / sizeof(int))
-#define write_reg(par, ...) \
- par->fbtftops.write_register(par, NUMARGS(__VA_ARGS__), __VA_ARGS__)
+#define write_reg(par, ...) \
+ ((par)->fbtftops.write_register(par, NUMARGS(__VA_ARGS__), __VA_ARGS__))
/* fbtft-core.c */
+int fbtft_write_buf_dc(struct fbtft_par *par, void *buf, size_t len, int dc);
+__printf(5, 6)
void fbtft_dbg_hex(const struct device *dev, int groupsize,
- void *buf, size_t len, const char *fmt, ...);
+ const void *buf, size_t len, const char *fmt, ...);
struct fb_info *fbtft_framebuffer_alloc(struct fbtft_display *display,
struct device *dev,
struct fbtft_platform_data *pdata);
@@ -270,7 +254,7 @@ void fbtft_unregister_backlight(struct fbtft_par *par);
int fbtft_init_display(struct fbtft_par *par);
int fbtft_probe_common(struct fbtft_display *display, struct spi_device *sdev,
struct platform_device *pdev);
-int fbtft_remove_common(struct device *dev, struct fb_info *info);
+void fbtft_remove_common(struct device *dev, struct fb_info *info);
/* fbtft-io.c */
int fbtft_write_spi(struct fbtft_par *par, void *buf, size_t len);
@@ -290,58 +274,63 @@ void fbtft_write_reg8_bus9(struct fbtft_par *par, int len, ...);
void fbtft_write_reg16_bus8(struct fbtft_par *par, int len, ...);
void fbtft_write_reg16_bus16(struct fbtft_par *par, int len, ...);
+#define FBTFT_DT_TABLE(_compatible) \
+static const struct of_device_id dt_ids[] = { \
+ { .compatible = _compatible }, \
+ {}, \
+}; \
+MODULE_DEVICE_TABLE(of, dt_ids);
+
+#define FBTFT_SPI_DRIVER(_name, _compatible, _display, _spi_ids) \
+ \
+static int fbtft_driver_probe_spi(struct spi_device *spi) \
+{ \
+ return fbtft_probe_common(_display, spi, NULL); \
+} \
+ \
+static void fbtft_driver_remove_spi(struct spi_device *spi) \
+{ \
+ struct fb_info *info = spi_get_drvdata(spi); \
+ \
+ fbtft_remove_common(&spi->dev, info); \
+} \
+ \
+static struct spi_driver fbtft_driver_spi_driver = { \
+ .driver = { \
+ .name = _name, \
+ .of_match_table = dt_ids, \
+ }, \
+ .id_table = _spi_ids, \
+ .probe = fbtft_driver_probe_spi, \
+ .remove = fbtft_driver_remove_spi, \
+};
+
#define FBTFT_REGISTER_DRIVER(_name, _compatible, _display) \
\
-static int fbtft_driver_probe_spi(struct spi_device *spi) \
-{ \
- return fbtft_probe_common(_display, spi, NULL); \
-} \
- \
-static int fbtft_driver_remove_spi(struct spi_device *spi) \
-{ \
- struct fb_info *info = spi_get_drvdata(spi); \
- \
- return fbtft_remove_common(&spi->dev, info); \
-} \
- \
static int fbtft_driver_probe_pdev(struct platform_device *pdev) \
{ \
return fbtft_probe_common(_display, NULL, pdev); \
} \
\
-static int fbtft_driver_remove_pdev(struct platform_device *pdev) \
+static void fbtft_driver_remove_pdev(struct platform_device *pdev) \
{ \
struct fb_info *info = platform_get_drvdata(pdev); \
\
- return fbtft_remove_common(&pdev->dev, info); \
+ fbtft_remove_common(&pdev->dev, info); \
} \
\
-static const struct of_device_id dt_ids[] = { \
- { .compatible = _compatible }, \
- {}, \
-}; \
- \
-MODULE_DEVICE_TABLE(of, dt_ids); \
+FBTFT_DT_TABLE(_compatible) \
\
- \
-static struct spi_driver fbtft_driver_spi_driver = { \
- .driver = { \
- .name = _name, \
- .owner = THIS_MODULE, \
- .of_match_table = of_match_ptr(dt_ids), \
- }, \
- .probe = fbtft_driver_probe_spi, \
- .remove = fbtft_driver_remove_spi, \
-}; \
+FBTFT_SPI_DRIVER(_name, _compatible, _display, NULL) \
\
static struct platform_driver fbtft_driver_platform_driver = { \
.driver = { \
.name = _name, \
.owner = THIS_MODULE, \
- .of_match_table = of_match_ptr(dt_ids), \
+ .of_match_table = dt_ids, \
}, \
.probe = fbtft_driver_probe_pdev, \
- .remove = fbtft_driver_remove_pdev, \
+ .remove = fbtft_driver_remove_pdev, \
}; \
\
static int __init fbtft_driver_module_init(void) \
@@ -351,7 +340,10 @@ static int __init fbtft_driver_module_init(void) \
ret = spi_register_driver(&fbtft_driver_spi_driver); \
if (ret < 0) \
return ret; \
- return platform_driver_register(&fbtft_driver_platform_driver); \
+ ret = platform_driver_register(&fbtft_driver_platform_driver); \
+ if (ret < 0) \
+ spi_unregister_driver(&fbtft_driver_spi_driver); \
+ return ret; \
} \
\
static void __exit fbtft_driver_module_exit(void) \
@@ -363,50 +355,72 @@ static void __exit fbtft_driver_module_exit(void) \
module_init(fbtft_driver_module_init); \
module_exit(fbtft_driver_module_exit);
+#define FBTFT_REGISTER_SPI_DRIVER(_name, _comp_vend, _comp_dev, _display) \
+ \
+FBTFT_DT_TABLE(_comp_vend "," _comp_dev) \
+ \
+static const struct spi_device_id spi_ids[] = { \
+ { .name = _comp_dev }, \
+ {}, \
+}; \
+MODULE_DEVICE_TABLE(spi, spi_ids); \
+ \
+FBTFT_SPI_DRIVER(_name, _comp_vend "," _comp_dev, _display, spi_ids) \
+ \
+module_spi_driver(fbtft_driver_spi_driver);
+
/* Debug macros */
/* shorthand debug levels */
#define DEBUG_LEVEL_1 DEBUG_REQUEST_GPIOS
-#define DEBUG_LEVEL_2 (DEBUG_LEVEL_1 | DEBUG_DRIVER_INIT_FUNCTIONS | DEBUG_TIME_FIRST_UPDATE)
-#define DEBUG_LEVEL_3 (DEBUG_LEVEL_2 | DEBUG_RESET | DEBUG_INIT_DISPLAY | DEBUG_BLANK | DEBUG_REQUEST_GPIOS | DEBUG_FREE_GPIOS | DEBUG_VERIFY_GPIOS | DEBUG_BACKLIGHT | DEBUG_SYSFS)
-#define DEBUG_LEVEL_4 (DEBUG_LEVEL_2 | DEBUG_FB_READ | DEBUG_FB_WRITE | DEBUG_FB_FILLRECT | DEBUG_FB_COPYAREA | DEBUG_FB_IMAGEBLIT | DEBUG_FB_BLANK)
+#define DEBUG_LEVEL_2 (DEBUG_LEVEL_1 | DEBUG_DRIVER_INIT_FUNCTIONS \
+ | DEBUG_TIME_FIRST_UPDATE)
+#define DEBUG_LEVEL_3 (DEBUG_LEVEL_2 | DEBUG_RESET | DEBUG_INIT_DISPLAY \
+ | DEBUG_BLANK | DEBUG_REQUEST_GPIOS \
+ | DEBUG_FREE_GPIOS \
+ | DEBUG_VERIFY_GPIOS \
+ | DEBUG_BACKLIGHT | DEBUG_SYSFS)
+#define DEBUG_LEVEL_4 (DEBUG_LEVEL_2 | DEBUG_FB_READ | DEBUG_FB_WRITE \
+ | DEBUG_FB_FILLRECT \
+ | DEBUG_FB_COPYAREA \
+ | DEBUG_FB_IMAGEBLIT | DEBUG_FB_BLANK)
#define DEBUG_LEVEL_5 (DEBUG_LEVEL_3 | DEBUG_UPDATE_DISPLAY)
#define DEBUG_LEVEL_6 (DEBUG_LEVEL_4 | DEBUG_LEVEL_5)
#define DEBUG_LEVEL_7 0xFFFFFFFF
-#define DEBUG_DRIVER_INIT_FUNCTIONS (1<<3)
-#define DEBUG_TIME_FIRST_UPDATE (1<<4)
-#define DEBUG_TIME_EACH_UPDATE (1<<5)
-#define DEBUG_DEFERRED_IO (1<<6)
-#define DEBUG_FBTFT_INIT_FUNCTIONS (1<<7)
+#define DEBUG_DRIVER_INIT_FUNCTIONS BIT(3)
+#define DEBUG_TIME_FIRST_UPDATE BIT(4)
+#define DEBUG_TIME_EACH_UPDATE BIT(5)
+#define DEBUG_DEFERRED_IO BIT(6)
+#define DEBUG_FBTFT_INIT_FUNCTIONS BIT(7)
/* fbops */
-#define DEBUG_FB_READ (1<<8)
-#define DEBUG_FB_WRITE (1<<9)
-#define DEBUG_FB_FILLRECT (1<<10)
-#define DEBUG_FB_COPYAREA (1<<11)
-#define DEBUG_FB_IMAGEBLIT (1<<12)
-#define DEBUG_FB_SETCOLREG (1<<13)
-#define DEBUG_FB_BLANK (1<<14)
+#define DEBUG_FB_READ BIT(8)
+#define DEBUG_FB_WRITE BIT(9)
+#define DEBUG_FB_FILLRECT BIT(10)
+#define DEBUG_FB_COPYAREA BIT(11)
+#define DEBUG_FB_IMAGEBLIT BIT(12)
+#define DEBUG_FB_SETCOLREG BIT(13)
+#define DEBUG_FB_BLANK BIT(14)
-#define DEBUG_SYSFS (1<<16)
+#define DEBUG_SYSFS BIT(16)
/* fbtftops */
-#define DEBUG_BACKLIGHT (1<<17)
-#define DEBUG_READ (1<<18)
-#define DEBUG_WRITE (1<<19)
-#define DEBUG_WRITE_VMEM (1<<20)
-#define DEBUG_WRITE_REGISTER (1<<21)
-#define DEBUG_SET_ADDR_WIN (1<<22)
-#define DEBUG_RESET (1<<23)
-#define DEBUG_MKDIRTY (1<<24)
-#define DEBUG_UPDATE_DISPLAY (1<<25)
-#define DEBUG_INIT_DISPLAY (1<<26)
-#define DEBUG_BLANK (1<<27)
-#define DEBUG_REQUEST_GPIOS (1<<28)
-#define DEBUG_FREE_GPIOS (1<<29)
-#define DEBUG_REQUEST_GPIOS_MATCH (1<<30)
-#define DEBUG_VERIFY_GPIOS (1<<31)
+#define DEBUG_BACKLIGHT BIT(17)
+#define DEBUG_READ BIT(18)
+#define DEBUG_WRITE BIT(19)
+#define DEBUG_WRITE_VMEM BIT(20)
+#define DEBUG_WRITE_REGISTER BIT(21)
+#define DEBUG_SET_ADDR_WIN BIT(22)
+#define DEBUG_RESET BIT(23)
+#define DEBUG_MKDIRTY BIT(24)
+#define DEBUG_UPDATE_DISPLAY BIT(25)
+#define DEBUG_INIT_DISPLAY BIT(26)
+#define DEBUG_BLANK BIT(27)
+#define DEBUG_REQUEST_GPIOS BIT(28)
+#define DEBUG_FREE_GPIOS BIT(29)
+#define DEBUG_REQUEST_GPIOS_MATCH BIT(30)
+#define DEBUG_VERIFY_GPIOS BIT(31)
#define fbtft_init_dbg(dev, format, arg...) \
do { \
@@ -417,14 +431,15 @@ do { \
#define fbtft_par_dbg(level, par, format, arg...) \
do { \
- if (unlikely(par->debug & level)) \
- dev_info(par->info->device, format, ##arg); \
+ if (unlikely((par)->debug & (level))) \
+ dev_info((par)->info->device, format, ##arg); \
} while (0)
#define fbtft_par_dbg_hex(level, par, dev, type, buf, num, format, arg...) \
do { \
- if (unlikely(par->debug & level)) \
- fbtft_dbg_hex(dev, sizeof(type), buf, num * sizeof(type), format, ##arg); \
+ if (unlikely((par)->debug & (level))) \
+ fbtft_dbg_hex(dev, sizeof(type), buf,\
+ (num) * sizeof(type), format, ##arg); \
} while (0)
#endif /* __LINUX_FBTFT_H */
diff --git a/drivers/staging/fbtft/fbtft_device.c b/drivers/staging/fbtft/fbtft_device.c
deleted file mode 100644
index a60f389122b0..000000000000
--- a/drivers/staging/fbtft/fbtft_device.c
+++ /dev/null
@@ -1,1495 +0,0 @@
-/*
- *
- * Copyright (C) 2013, Noralf Tronnes
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#define pr_fmt(fmt) "fbtft_device: " fmt
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/gpio.h>
-#include <linux/spi/spi.h>
-
-#include "fbtft.h"
-
-#define MAX_GPIOS 32
-
-static struct spi_device *spi_device;
-static struct platform_device *p_device;
-
-static char *name;
-module_param(name, charp, 0);
-MODULE_PARM_DESC(name, "Devicename (required). name=list => list all supported devices.");
-
-static unsigned rotate;
-module_param(rotate, uint, 0);
-MODULE_PARM_DESC(rotate,
-"Angle to rotate display counter clockwise: 0, 90, 180, 270");
-
-static unsigned busnum;
-module_param(busnum, uint, 0);
-MODULE_PARM_DESC(busnum, "SPI bus number (default=0)");
-
-static unsigned cs;
-module_param(cs, uint, 0);
-MODULE_PARM_DESC(cs, "SPI chip select (default=0)");
-
-static unsigned speed;
-module_param(speed, uint, 0);
-MODULE_PARM_DESC(speed, "SPI speed (override device default)");
-
-static int mode = -1;
-module_param(mode, int, 0);
-MODULE_PARM_DESC(mode, "SPI mode (override device default)");
-
-static char *gpios;
-module_param(gpios, charp, 0);
-MODULE_PARM_DESC(gpios,
-"List of gpios. Comma separated with the form: reset:23,dc:24 (when overriding the default, all gpios must be specified)");
-
-static unsigned fps;
-module_param(fps, uint, 0);
-MODULE_PARM_DESC(fps, "Frames per second (override driver default)");
-
-static char *gamma;
-module_param(gamma, charp, 0);
-MODULE_PARM_DESC(gamma,
-"String representation of Gamma Curve(s). Driver specific.");
-
-static int txbuflen;
-module_param(txbuflen, int, 0);
-MODULE_PARM_DESC(txbuflen, "txbuflen (override driver default)");
-
-static int bgr = -1;
-module_param(bgr, int, 0);
-MODULE_PARM_DESC(bgr,
-"BGR bit (supported by some drivers).");
-
-static unsigned startbyte;
-module_param(startbyte, uint, 0);
-MODULE_PARM_DESC(startbyte, "Sets the Start byte used by some SPI displays.");
-
-static bool custom;
-module_param(custom, bool, 0);
-MODULE_PARM_DESC(custom, "Add a custom display device. Use speed= argument to make it a SPI device, else platform_device");
-
-static unsigned width;
-module_param(width, uint, 0);
-MODULE_PARM_DESC(width, "Display width, used with the custom argument");
-
-static unsigned height;
-module_param(height, uint, 0);
-MODULE_PARM_DESC(height, "Display height, used with the custom argument");
-
-static unsigned buswidth = 8;
-module_param(buswidth, uint, 0);
-MODULE_PARM_DESC(buswidth, "Display bus width, used with the custom argument");
-
-static int init[FBTFT_MAX_INIT_SEQUENCE];
-static int init_num;
-module_param_array(init, int, &init_num, 0);
-MODULE_PARM_DESC(init, "Init sequence, used with the custom argument");
-
-static unsigned long debug;
-module_param(debug, ulong, 0);
-MODULE_PARM_DESC(debug,
-"level: 0-7 (the remaining 29 bits is for advanced usage)");
-
-static unsigned verbose = 3;
-module_param(verbose, uint, 0);
-MODULE_PARM_DESC(verbose,
-"0 silent, >0 show gpios, >1 show devices, >2 show devices before (default=3)");
-
-struct fbtft_device_display {
- char *name;
- struct spi_board_info *spi;
- struct platform_device *pdev;
-};
-
-static void fbtft_device_pdev_release(struct device *dev);
-
-static int write_gpio16_wr_slow(struct fbtft_par *par, void *buf, size_t len);
-static void adafruit18_green_tab_set_addr_win(struct fbtft_par *par,
- int xs, int ys, int xe, int ye);
-
-#define ADAFRUIT18_GAMMA \
- "02 1c 07 12 37 32 29 2d 29 25 2B 39 00 01 03 10\n" \
- "03 1d 07 06 2E 2C 29 2D 2E 2E 37 3F 00 00 02 10"
-
-static int hy28b_init_sequence[] = {
- -1, 0x00e7, 0x0010, -1, 0x0000, 0x0001,
- -1, 0x0001, 0x0100, -1, 0x0002, 0x0700,
- -1, 0x0003, 0x1030, -1, 0x0004, 0x0000,
- -1, 0x0008, 0x0207, -1, 0x0009, 0x0000,
- -1, 0x000a, 0x0000, -1, 0x000c, 0x0001,
- -1, 0x000d, 0x0000, -1, 0x000f, 0x0000,
- -1, 0x0010, 0x0000, -1, 0x0011, 0x0007,
- -1, 0x0012, 0x0000, -1, 0x0013, 0x0000,
- -2, 50, -1, 0x0010, 0x1590, -1, 0x0011,
- 0x0227, -2, 50, -1, 0x0012, 0x009c, -2, 50,
- -1, 0x0013, 0x1900, -1, 0x0029, 0x0023,
- -1, 0x002b, 0x000e, -2, 50,
- -1, 0x0020, 0x0000, -1, 0x0021, 0x0000,
- -2, 50, -1, 0x0050, 0x0000,
- -1, 0x0051, 0x00ef, -1, 0x0052, 0x0000,
- -1, 0x0053, 0x013f, -1, 0x0060, 0xa700,
- -1, 0x0061, 0x0001, -1, 0x006a, 0x0000,
- -1, 0x0080, 0x0000, -1, 0x0081, 0x0000,
- -1, 0x0082, 0x0000, -1, 0x0083, 0x0000,
- -1, 0x0084, 0x0000, -1, 0x0085, 0x0000,
- -1, 0x0090, 0x0010, -1, 0x0092, 0x0000,
- -1, 0x0093, 0x0003, -1, 0x0095, 0x0110,
- -1, 0x0097, 0x0000, -1, 0x0098, 0x0000,
- -1, 0x0007, 0x0133, -1, 0x0020, 0x0000,
- -1, 0x0021, 0x0000, -2, 100, -3 };
-
-#define HY28B_GAMMA \
- "04 1F 4 7 7 0 7 7 6 0\n" \
- "0F 00 1 7 4 0 0 0 6 7"
-
-static int pitft_init_sequence[] = {
- -1, 0x01, -2, 5, -1, 0x28, -1, 0xEF,
- 0x03, 0x80, 0x02, -1, 0xCF, 0x00, 0xC1, 0x30,
- -1, 0xED, 0x64, 0x03, 0x12, 0x81,
- -1, 0xE8, 0x85, 0x00, 0x78,
- -1, 0xCB, 0x39, 0x2C, 0x00, 0x34, 0x02,
- -1, 0xF7, 0x20, -1, 0xEA, 0x00, 0x00,
- -1, 0xC0, 0x23, -1, 0xC1, 0x10, -1, 0xC5,
- 0x3e, 0x28, -1, 0xC7, 0x86, -1, 0x3A, 0x55,
- -1, 0xB1, 0x00, 0x18, -1, 0xB6, 0x08, 0x82,
- 0x27, -1, 0xF2, 0x00, -1, 0x26, 0x01,
- -1, 0xE0, 0x0F, 0x31, 0x2B, 0x0C, 0x0E, 0x08,
- 0x4E, 0xF1, 0x37, 0x07, 0x10, 0x03,
- 0x0E, 0x09, 0x00, -1, 0xE1, 0x00, 0x0E, 0x14,
- 0x03, 0x11, 0x07, 0x31, 0xC1, 0x48,
- 0x08, 0x0F, 0x0C, 0x31, 0x36, 0x0F, -1,
- 0x11, -2, 100, -1, 0x29, -2, 20, -3 };
-
-static int waveshare32b_init_sequence[] = {
- -1, 0xCB, 0x39, 0x2C, 0x00, 0x34, 0x02,
- -1, 0xCF, 0x00, 0xC1, 0x30,
- -1, 0xE8, 0x85, 0x00, 0x78, -1, 0xEA, 0x00,
- 0x00, -1, 0xED, 0x64, 0x03, 0x12, 0x81,
- -1, 0xF7, 0x20, -1, 0xC0, 0x23, -1, 0xC1,
- 0x10, -1, 0xC5, 0x3e, 0x28, -1, 0xC7, 0x86,
- -1, 0x36, 0x28, -1, 0x3A, 0x55, -1, 0xB1, 0x00,
- 0x18, -1, 0xB6, 0x08, 0x82, 0x27,
- -1, 0xF2, 0x00, -1, 0x26, 0x01,
- -1, 0xE0, 0x0F, 0x31, 0x2B, 0x0C, 0x0E, 0x08, 0x4E,
- 0xF1, 0x37, 0x07, 0x10, 0x03, 0x0E, 0x09, 0x00,
- -1, 0xE1, 0x00, 0x0E, 0x14, 0x03, 0x11, 0x07, 0x31,
- 0xC1, 0x48, 0x08, 0x0F, 0x0C, 0x31, 0x36, 0x0F,
- -1, 0x11, -2, 120, -1, 0x29, -1, 0x2c, -3 };
-
-/* Supported displays in alphabetical order */
-static struct fbtft_device_display displays[] = {
- {
- .name = "adafruit18",
- .spi = &(struct spi_board_info) {
- .modalias = "fb_st7735r",
- .max_speed_hz = 32000000,
- .mode = SPI_MODE_0,
- .platform_data = &(struct fbtft_platform_data) {
- .display = {
- .buswidth = 8,
- .backlight = 1,
- },
- .gpios = (const struct fbtft_gpio []) {
- { "reset", 25 },
- { "dc", 24 },
- { "led", 18 },
- {},
- },
- .gamma = ADAFRUIT18_GAMMA,
- }
- }
- }, {
- .name = "adafruit18_green",
- .spi = &(struct spi_board_info) {
- .modalias = "fb_st7735r",
- .max_speed_hz = 4000000,
- .mode = SPI_MODE_0,
- .platform_data = &(struct fbtft_platform_data) {
- .display = {
- .buswidth = 8,
- .backlight = 1,
- .fbtftops.set_addr_win =
- adafruit18_green_tab_set_addr_win,
- },
- .bgr = true,
- .gpios = (const struct fbtft_gpio []) {
- { "reset", 25 },
- { "dc", 24 },
- { "led", 18 },
- {},
- },
- .gamma = ADAFRUIT18_GAMMA,
- }
- }
- }, {
- .name = "adafruit22",
- .spi = &(struct spi_board_info) {
- .modalias = "fb_hx8340bn",
- .max_speed_hz = 32000000,
- .mode = SPI_MODE_0,
- .platform_data = &(struct fbtft_platform_data) {
- .display = {
- .buswidth = 9,
- .backlight = 1,
- },
- .bgr = true,
- .gpios = (const struct fbtft_gpio []) {
- { "reset", 25 },
- { "led", 23 },
- {},
- },
- }
- }
- }, {
- .name = "adafruit22a",
- .spi = &(struct spi_board_info) {
- .modalias = "fb_ili9340",
- .max_speed_hz = 32000000,
- .mode = SPI_MODE_0,
- .platform_data = &(struct fbtft_platform_data) {
- .display = {
- .buswidth = 8,
- .backlight = 1,
- },
- .bgr = true,
- .gpios = (const struct fbtft_gpio []) {
- { "reset", 25 },
- { "dc", 24 },
- { "led", 18 },
- {},
- },
- }
- }
- }, {
- .name = "adafruit28",
- .spi = &(struct spi_board_info) {
- .modalias = "fb_ili9341",
- .max_speed_hz = 32000000,
- .mode = SPI_MODE_0,
- .platform_data = &(struct fbtft_platform_data) {
- .display = {
- .buswidth = 8,
- .backlight = 1,
- },
- .bgr = true,
- .gpios = (const struct fbtft_gpio []) {
- { "reset", 25 },
- { "dc", 24 },
- { "led", 18 },
- {},
- },
- }
- }
- }, {
- .name = "adafruit13m",
- .spi = &(struct spi_board_info) {
- .modalias = "fb_ssd1306",
- .max_speed_hz = 16000000,
- .mode = SPI_MODE_0,
- .platform_data = &(struct fbtft_platform_data) {
- .display = {
- .buswidth = 8,
- },
- .gpios = (const struct fbtft_gpio []) {
- { "reset", 25 },
- { "dc", 24 },
- {},
- },
- }
- }
- }, {
- .name = "agm1264k-fl",
- .pdev = &(struct platform_device) {
- .name = "fb_agm1264k-fl",
- .id = 0,
- .dev = {
- .release = fbtft_device_pdev_release,
- .platform_data = &(struct fbtft_platform_data) {
- .display = {
- .buswidth = 8,
- .backlight = FBTFT_ONBOARD_BACKLIGHT,
- },
- .gpios = (const struct fbtft_gpio []) {
- {},
- },
- },
- }
- }
- }, {
- .name = "dogs102",
- .spi = &(struct spi_board_info) {
- .modalias = "fb_uc1701",
- .max_speed_hz = 8000000,
- .mode = SPI_MODE_0,
- .platform_data = &(struct fbtft_platform_data) {
- .display = {
- .buswidth = 8,
- },
- .bgr = true,
- .gpios = (const struct fbtft_gpio []) {
- { "reset", 13 },
- { "dc", 6 },
- {},
- },
- }
- }
- }, {
- .name = "er_tftm050_2",
- .spi = &(struct spi_board_info) {
- .modalias = "fb_ra8875",
- .max_speed_hz = 5000000,
- .mode = SPI_MODE_3,
- .platform_data = &(struct fbtft_platform_data) {
- .display = {
- .buswidth = 8,
- .backlight = 1,
- .width = 480,
- .height = 272,
- },
- .bgr = true,
- .gpios = (const struct fbtft_gpio []) {
- { "reset", 25 },
- { "dc", 24 },
- {},
- },
- }
- }
- }, {
- .name = "er_tftm070_5",
- .spi = &(struct spi_board_info) {
- .modalias = "fb_ra8875",
- .max_speed_hz = 5000000,
- .mode = SPI_MODE_3,
- .platform_data = &(struct fbtft_platform_data) {
- .display = {
- .buswidth = 8,
- .backlight = 1,
- .width = 800,
- .height = 480,
- },
- .bgr = true,
- .gpios = (const struct fbtft_gpio []) {
- { "reset", 25 },
- { "dc", 24 },
- {},
- },
- }
- }
- }, {
- .name = "ew24ha0",
- .spi = &(struct spi_board_info) {
- .modalias = "fb_uc1611",
- .max_speed_hz = 32000000,
- .mode = SPI_MODE_3,
- .platform_data = &(struct fbtft_platform_data) {
- .display = {
- .buswidth = 8,
- },
- .gpios = (const struct fbtft_gpio []) {
- { "dc", 24 },
- {},
- },
- }
- }
- }, {
- .name = "ew24ha0_9bit",
- .spi = &(struct spi_board_info) {
- .modalias = "fb_uc1611",
- .max_speed_hz = 32000000,
- .mode = SPI_MODE_3,
- .platform_data = &(struct fbtft_platform_data) {
- .display = {
- .buswidth = 9,
- },
- .gpios = (const struct fbtft_gpio []) {
- {},
- },
- }
- }
- }, {
- .name = "flexfb",
- .spi = &(struct spi_board_info) {
- .modalias = "flexfb",
- .max_speed_hz = 32000000,
- .mode = SPI_MODE_0,
- .platform_data = &(struct fbtft_platform_data) {
- .gpios = (const struct fbtft_gpio []) {
- { "reset", 25 },
- { "dc", 24 },
- {},
- },
- }
- }
- }, {
- .name = "flexpfb",
- .pdev = &(struct platform_device) {
- .name = "flexpfb",
- .id = 0,
- .dev = {
- .release = fbtft_device_pdev_release,
- .platform_data = &(struct fbtft_platform_data) {
- .gpios = (const struct fbtft_gpio []) {
- { "reset", 17 },
- { "dc", 1 },
- { "wr", 0 },
- { "cs", 21 },
- { "db00", 9 },
- { "db01", 11 },
- { "db02", 18 },
- { "db03", 23 },
- { "db04", 24 },
- { "db05", 25 },
- { "db06", 8 },
- { "db07", 7 },
- { "led", 4 },
- {},
- },
- },
- }
- }
- }, {
- .name = "freetronicsoled128",
- .spi = &(struct spi_board_info) {
- .modalias = "fb_ssd1351",
- .max_speed_hz = 20000000,
- .mode = SPI_MODE_0,
- .platform_data = &(struct fbtft_platform_data) {
- .display = {
- .buswidth = 8,
- .backlight = FBTFT_ONBOARD_BACKLIGHT,
- },
- .bgr = true,
- .gpios = (const struct fbtft_gpio []) {
- { "reset", 24 },
- { "dc", 25 },
- {},
- },
- }
- }
- }, {
- .name = "hx8353d",
- .spi = &(struct spi_board_info) {
- .modalias = "fb_hx8353d",
- .max_speed_hz = 16000000,
- .mode = SPI_MODE_0,
- .platform_data = &(struct fbtft_platform_data) {
- .display = {
- .buswidth = 8,
- .backlight = 1,
- },
- .gpios = (const struct fbtft_gpio []) {
- { "reset", 25 },
- { "dc", 24 },
- { "led", 23 },
- {},
- },
- }
- }
- }, {
- .name = "hy28a",
- .spi = &(struct spi_board_info) {
- .modalias = "fb_ili9320",
- .max_speed_hz = 32000000,
- .mode = SPI_MODE_3,
- .platform_data = &(struct fbtft_platform_data) {
- .display = {
- .buswidth = 8,
- .backlight = 1,
- },
- .startbyte = 0x70,
- .bgr = true,
- .gpios = (const struct fbtft_gpio []) {
- { "reset", 25 },
- { "led", 18 },
- {},
- },
- }
- }
- }, {
- .name = "hy28b",
- .spi = &(struct spi_board_info) {
- .modalias = "fb_ili9325",
- .max_speed_hz = 48000000,
- .mode = SPI_MODE_3,
- .platform_data = &(struct fbtft_platform_data) {
- .display = {
- .buswidth = 8,
- .backlight = 1,
- .init_sequence = hy28b_init_sequence,
- },
- .startbyte = 0x70,
- .bgr = true,
- .fps = 50,
- .gpios = (const struct fbtft_gpio []) {
- { "reset", 25 },
- { "led", 18 },
- {},
- },
- .gamma = HY28B_GAMMA,
- }
- }
- }, {
- .name = "ili9481",
- .spi = &(struct spi_board_info) {
- .modalias = "fb_ili9481",
- .max_speed_hz = 32000000,
- .mode = SPI_MODE_0,
- .platform_data = &(struct fbtft_platform_data) {
- .display = {
- .regwidth = 16,
- .buswidth = 8,
- .backlight = 1,
- },
- .bgr = true,
- .gpios = (const struct fbtft_gpio []) {
- { "reset", 25 },
- { "dc", 24 },
- { "led", 22 },
- {},
- },
- }
- }
- }, {
- .name = "itdb24",
- .pdev = &(struct platform_device) {
- .name = "fb_s6d1121",
- .id = 0,
- .dev = {
- .release = fbtft_device_pdev_release,
- .platform_data = &(struct fbtft_platform_data) {
- .display = {
- .buswidth = 8,
- .backlight = 1,
- },
- .bgr = false,
- .gpios = (const struct fbtft_gpio []) {
- /* Wiring for LCD adapter kit */
- { "reset", 7 },
- { "dc", 0 }, /* rev 2: 2 */
- { "wr", 1 }, /* rev 2: 3 */
- { "cs", 8 },
- { "db00", 17 },
- { "db01", 18 },
- { "db02", 21 }, /* rev 2: 27 */
- { "db03", 22 },
- { "db04", 23 },
- { "db05", 24 },
- { "db06", 25 },
- { "db07", 4 },
- {}
- },
- },
- }
- }
- }, {
- .name = "itdb28",
- .pdev = &(struct platform_device) {
- .name = "fb_ili9325",
- .id = 0,
- .dev = {
- .release = fbtft_device_pdev_release,
- .platform_data = &(struct fbtft_platform_data) {
- .display = {
- .buswidth = 8,
- .backlight = 1,
- },
- .bgr = true,
- .gpios = (const struct fbtft_gpio []) {
- {},
- },
- },
- }
- }
- }, {
- .name = "itdb28_spi",
- .spi = &(struct spi_board_info) {
- .modalias = "fb_ili9325",
- .max_speed_hz = 32000000,
- .mode = SPI_MODE_0,
- .platform_data = &(struct fbtft_platform_data) {
- .display = {
- .buswidth = 8,
- .backlight = 1,
- },
- .bgr = true,
- .gpios = (const struct fbtft_gpio []) {
- { "reset", 25 },
- { "dc", 24 },
- {},
- },
- }
- }
- }, {
- .name = "mi0283qt-2",
- .spi = &(struct spi_board_info) {
- .modalias = "fb_hx8347d",
- .max_speed_hz = 32000000,
- .mode = SPI_MODE_0,
- .platform_data = &(struct fbtft_platform_data) {
- .display = {
- .buswidth = 8,
- .backlight = 1,
- },
- .startbyte = 0x70,
- .bgr = true,
- .gpios = (const struct fbtft_gpio []) {
- { "reset", 25 },
- { "dc", 24 },
- { "led", 18 },
- {},
- },
- }
- }
- }, {
- .name = "mi0283qt-9a",
- .spi = &(struct spi_board_info) {
- .modalias = "fb_ili9341",
- .max_speed_hz = 32000000,
- .mode = SPI_MODE_0,
- .platform_data = &(struct fbtft_platform_data) {
- .display = {
- .buswidth = 9,
- .backlight = 1,
- },
- .bgr = true,
- .gpios = (const struct fbtft_gpio []) {
- { "reset", 25 },
- { "led", 18 },
- {},
- },
- }
- }
- }, {
- .name = "mi0283qt-v2",
- .spi = &(struct spi_board_info) {
- .modalias = "fb_watterott",
- .max_speed_hz = 4000000,
- .mode = SPI_MODE_3,
- .platform_data = &(struct fbtft_platform_data) {
- .gpios = (const struct fbtft_gpio []) {
- { "reset", 25 },
- {},
- },
- }
- }
- }, {
- .name = "nokia3310",
- .spi = &(struct spi_board_info) {
- .modalias = "fb_pcd8544",
- .max_speed_hz = 400000,
- .mode = SPI_MODE_0,
- .platform_data = &(struct fbtft_platform_data) {
- .display = {
- .buswidth = 8,
- },
- .gpios = (const struct fbtft_gpio []) {
- { "reset", 25 },
- { "dc", 24 },
- { "led", 23 },
- {},
- },
- }
- }
- }, {
- .name = "nokia3310a",
- .spi = &(struct spi_board_info) {
- .modalias = "fb_tls8204",
- .max_speed_hz = 1000000,
- .mode = SPI_MODE_0,
- .platform_data = &(struct fbtft_platform_data) {
- .display = {
- .buswidth = 8,
- },
- .gpios = (const struct fbtft_gpio []) {
- { "reset", 25 },
- { "dc", 24 },
- { "led", 23 },
- {},
- },
- }
- }
- }, {
- .name = "nokia5110",
- .spi = &(struct spi_board_info) {
- .modalias = "fb_ili9163",
- .max_speed_hz = 12000000,
- .mode = SPI_MODE_0,
- .platform_data = &(struct fbtft_platform_data) {
- .display = {
- .buswidth = 8,
- .backlight = 1,
- },
- .bgr = true,
- .gpios = (const struct fbtft_gpio []) {
- {},
- },
- }
- }
- }, {
-
- .name = "piscreen",
- .spi = &(struct spi_board_info) {
- .modalias = "fb_ili9486",
- .max_speed_hz = 32000000,
- .mode = SPI_MODE_0,
- .platform_data = &(struct fbtft_platform_data) {
- .display = {
- .regwidth = 16,
- .buswidth = 8,
- .backlight = 1,
- },
- .bgr = true,
- .gpios = (const struct fbtft_gpio []) {
- { "reset", 25 },
- { "dc", 24 },
- { "led", 22 },
- {},
- },
- }
- }
- }, {
- .name = "pitft",
- .spi = &(struct spi_board_info) {
- .modalias = "fb_ili9340",
- .max_speed_hz = 32000000,
- .mode = SPI_MODE_0,
- .chip_select = 0,
- .platform_data = &(struct fbtft_platform_data) {
- .display = {
- .buswidth = 8,
- .backlight = 1,
- .init_sequence = pitft_init_sequence,
- },
- .bgr = true,
- .gpios = (const struct fbtft_gpio []) {
- { "dc", 25 },
- {},
- },
- }
- }
- }, {
- .name = "pioled",
- .spi = &(struct spi_board_info) {
- .modalias = "fb_ssd1351",
- .max_speed_hz = 20000000,
- .mode = SPI_MODE_0,
- .platform_data = &(struct fbtft_platform_data) {
- .display = {
- .buswidth = 8,
- },
- .bgr = true,
- .gpios = (const struct fbtft_gpio []) {
- { "reset", 24 },
- { "dc", 25 },
- {},
- },
- .gamma = "0 2 2 2 2 2 2 2 "
- "2 2 2 2 2 2 2 2 "
- "2 2 2 2 2 2 2 2 "
- "2 2 2 2 2 2 2 3 "
- "3 3 3 3 3 3 3 3 "
- "3 3 3 3 3 3 3 3 "
- "3 3 3 4 4 4 4 4 "
- "4 4 4 4 4 4 4"
- }
- }
- }, {
- .name = "rpi-display",
- .spi = &(struct spi_board_info) {
- .modalias = "fb_ili9341",
- .max_speed_hz = 32000000,
- .mode = SPI_MODE_0,
- .platform_data = &(struct fbtft_platform_data) {
- .display = {
- .buswidth = 8,
- .backlight = 1,
- },
- .bgr = true,
- .gpios = (const struct fbtft_gpio []) {
- { "reset", 23 },
- { "dc", 24 },
- { "led", 18 },
- {},
- },
- }
- }
- }, {
- .name = "s6d02a1",
- .spi = &(struct spi_board_info) {
- .modalias = "fb_s6d02a1",
- .max_speed_hz = 32000000,
- .mode = SPI_MODE_0,
- .platform_data = &(struct fbtft_platform_data) {
- .display = {
- .buswidth = 8,
- .backlight = 1,
- },
- .bgr = true,
- .gpios = (const struct fbtft_gpio []) {
- { "reset", 25 },
- { "dc", 24 },
- { "led", 23 },
- {},
- },
- }
- }
- }, {
- .name = "sainsmart18",
- .spi = &(struct spi_board_info) {
- .modalias = "fb_st7735r",
- .max_speed_hz = 32000000,
- .mode = SPI_MODE_0,
- .platform_data = &(struct fbtft_platform_data) {
- .display = {
- .buswidth = 8,
- },
- .gpios = (const struct fbtft_gpio []) {
- { "reset", 25 },
- { "dc", 24 },
- {},
- },
- }
- }
- }, {
- .name = "sainsmart32",
- .pdev = &(struct platform_device) {
- .name = "fb_ssd1289",
- .id = 0,
- .dev = {
- .release = fbtft_device_pdev_release,
- .platform_data = &(struct fbtft_platform_data) {
- .display = {
- .buswidth = 16,
- .txbuflen = -2, /* disable buffer */
- .backlight = 1,
- .fbtftops.write = write_gpio16_wr_slow,
- },
- .bgr = true,
- .gpios = (const struct fbtft_gpio []) {
- {},
- },
- },
- },
- }
- }, {
- .name = "sainsmart32_fast",
- .pdev = &(struct platform_device) {
- .name = "fb_ssd1289",
- .id = 0,
- .dev = {
- .release = fbtft_device_pdev_release,
- .platform_data = &(struct fbtft_platform_data) {
- .display = {
- .buswidth = 16,
- .txbuflen = -2, /* disable buffer */
- .backlight = 1,
- },
- .bgr = true,
- .gpios = (const struct fbtft_gpio []) {
- {},
- },
- },
- },
- }
- }, {
- .name = "sainsmart32_latched",
- .pdev = &(struct platform_device) {
- .name = "fb_ssd1289",
- .id = 0,
- .dev = {
- .release = fbtft_device_pdev_release,
- .platform_data = &(struct fbtft_platform_data) {
- .display = {
- .buswidth = 16,
- .txbuflen = -2, /* disable buffer */
- .backlight = 1,
- .fbtftops.write =
- fbtft_write_gpio16_wr_latched,
- },
- .bgr = true,
- .gpios = (const struct fbtft_gpio []) {
- {},
- },
- },
- },
- }
- }, {
- .name = "sainsmart32_spi",
- .spi = &(struct spi_board_info) {
- .modalias = "fb_ssd1289",
- .max_speed_hz = 16000000,
- .mode = SPI_MODE_0,
- .platform_data = &(struct fbtft_platform_data) {
- .display = {
- .buswidth = 8,
- .backlight = 1,
- },
- .bgr = true,
- .gpios = (const struct fbtft_gpio []) {
- { "reset", 25 },
- { "dc", 24 },
- {},
- },
- }
- }
- }, {
- .name = "spidev",
- .spi = &(struct spi_board_info) {
- .modalias = "spidev",
- .max_speed_hz = 500000,
- .bus_num = 0,
- .chip_select = 0,
- .mode = SPI_MODE_0,
- .platform_data = &(struct fbtft_platform_data) {
- .gpios = (const struct fbtft_gpio []) {
- {},
- },
- }
- }
- }, {
- .name = "ssd1331",
- .spi = &(struct spi_board_info) {
- .modalias = "fb_ssd1331",
- .max_speed_hz = 20000000,
- .mode = SPI_MODE_3,
- .platform_data = &(struct fbtft_platform_data) {
- .display = {
- .buswidth = 8,
- },
- .gpios = (const struct fbtft_gpio []) {
- { "reset", 24 },
- { "dc", 25 },
- {},
- },
- }
- }
- }, {
- .name = "tinylcd35",
- .spi = &(struct spi_board_info) {
- .modalias = "fb_tinylcd",
- .max_speed_hz = 32000000,
- .mode = SPI_MODE_0,
- .platform_data = &(struct fbtft_platform_data) {
- .display = {
- .buswidth = 8,
- .backlight = 1,
- },
- .bgr = true,
- .gpios = (const struct fbtft_gpio []) {
- { "reset", 25 },
- { "dc", 24 },
- { "led", 18 },
- {},
- },
- }
- }
- }, {
- .name = "tm022hdh26",
- .spi = &(struct spi_board_info) {
- .modalias = "fb_ili9341",
- .max_speed_hz = 32000000,
- .mode = SPI_MODE_0,
- .platform_data = &(struct fbtft_platform_data) {
- .display = {
- .buswidth = 8,
- .backlight = 1,
- },
- .bgr = true,
- .gpios = (const struct fbtft_gpio []) {
- { "reset", 25 },
- { "dc", 24 },
- { "led", 18 },
- {},
- },
- }
- }
- }, {
- .name = "tontec35_9481", /* boards before 02 July 2014 */
- .spi = &(struct spi_board_info) {
- .modalias = "fb_ili9481",
- .max_speed_hz = 128000000,
- .mode = SPI_MODE_3,
- .platform_data = &(struct fbtft_platform_data) {
- .display = {
- .buswidth = 8,
- .backlight = 1,
- },
- .bgr = true,
- .gpios = (const struct fbtft_gpio []) {
- { "reset", 15 },
- { "dc", 25 },
- { "led_", 18 },
- {},
- },
- }
- }
- }, {
- .name = "tontec35_9486", /* boards after 02 July 2014 */
- .spi = &(struct spi_board_info) {
- .modalias = "fb_ili9486",
- .max_speed_hz = 128000000,
- .mode = SPI_MODE_3,
- .platform_data = &(struct fbtft_platform_data) {
- .display = {
- .buswidth = 8,
- .backlight = 1,
- },
- .bgr = true,
- .gpios = (const struct fbtft_gpio []) {
- { "reset", 15 },
- { "dc", 25 },
- { "led_", 18 },
- {},
- },
- }
- }
- }, {
- .name = "upd161704",
- .spi = &(struct spi_board_info) {
- .modalias = "fb_upd161704",
- .max_speed_hz = 32000000,
- .mode = SPI_MODE_0,
- .platform_data = &(struct fbtft_platform_data) {
- .display = {
- .buswidth = 8,
- },
- .gpios = (const struct fbtft_gpio []) {
- { "reset", 24 },
- { "dc", 25 },
- {},
- },
- }
- }
- }, {
- .name = "waveshare32b",
- .spi = &(struct spi_board_info) {
- .modalias = "fb_ili9340",
- .max_speed_hz = 48000000,
- .mode = SPI_MODE_0,
- .platform_data = &(struct fbtft_platform_data) {
- .display = {
- .buswidth = 8,
- .backlight = 1,
- .init_sequence =
- waveshare32b_init_sequence,
- },
- .bgr = true,
- .gpios = (const struct fbtft_gpio []) {
- { "reset", 27 },
- { "dc", 22 },
- {},
- },
- }
- }
- }, {
- .name = "waveshare22",
- .spi = &(struct spi_board_info) {
- .modalias = "fb_bd663474",
- .max_speed_hz = 32000000,
- .mode = SPI_MODE_3,
- .platform_data = &(struct fbtft_platform_data) {
- .display = {
- .buswidth = 8,
- },
- .gpios = (const struct fbtft_gpio []) {
- { "reset", 24 },
- { "dc", 25 },
- {},
- },
- }
- }
- }, {
- /* This should be the last item.
- Used with the custom argument */
- .name = "",
- .spi = &(struct spi_board_info) {
- .modalias = "",
- .max_speed_hz = 0,
- .mode = SPI_MODE_0,
- .platform_data = &(struct fbtft_platform_data) {
- .gpios = (const struct fbtft_gpio []) {
- {},
- },
- }
- },
- .pdev = &(struct platform_device) {
- .name = "",
- .id = 0,
- .dev = {
- .release = fbtft_device_pdev_release,
- .platform_data = &(struct fbtft_platform_data) {
- .gpios = (const struct fbtft_gpio []) {
- {},
- },
- },
- },
- },
- }
-};
-
-static int write_gpio16_wr_slow(struct fbtft_par *par, void *buf, size_t len)
-{
- u16 data;
- int i;
-#ifndef DO_NOT_OPTIMIZE_FBTFT_WRITE_GPIO
- static u16 prev_data;
-#endif
-
- fbtft_par_dbg_hex(DEBUG_WRITE, par, par->info->device, u8, buf, len,
- "%s(len=%d): ", __func__, len);
-
- while (len) {
- data = *(u16 *) buf;
-
- /* Start writing by pulling down /WR */
- gpio_set_value(par->gpio.wr, 0);
-
- /* Set data */
-#ifndef DO_NOT_OPTIMIZE_FBTFT_WRITE_GPIO
- if (data == prev_data) {
- gpio_set_value(par->gpio.wr, 0); /* used as delay */
- } else {
- for (i = 0; i < 16; i++) {
- if ((data & 1) != (prev_data & 1))
- gpio_set_value(par->gpio.db[i],
- data & 1);
- data >>= 1;
- prev_data >>= 1;
- }
- }
-#else
- for (i = 0; i < 16; i++) {
- gpio_set_value(par->gpio.db[i], data & 1);
- data >>= 1;
- }
-#endif
-
- /* Pullup /WR */
- gpio_set_value(par->gpio.wr, 1);
-
-#ifndef DO_NOT_OPTIMIZE_FBTFT_WRITE_GPIO
- prev_data = *(u16 *) buf;
-#endif
- buf += 2;
- len -= 2;
- }
-
- return 0;
-}
-
-static void adafruit18_green_tab_set_addr_win(struct fbtft_par *par,
- int xs, int ys, int xe, int ye)
-{
- write_reg(par, 0x2A, 0, xs + 2, 0, xe + 2);
- write_reg(par, 0x2B, 0, ys + 1, 0, ye + 1);
- write_reg(par, 0x2C);
-}
-
-/* used if gpios parameter is present */
-static struct fbtft_gpio fbtft_device_param_gpios[MAX_GPIOS + 1] = { };
-
-static void fbtft_device_pdev_release(struct device *dev)
-{
-/* Needed to silence this message:
-Device 'xxx' does not have a release() function, it is broken and must be fixed
-*/
-}
-
-static int spi_device_found(struct device *dev, void *data)
-{
- struct spi_device *spi = container_of(dev, struct spi_device, dev);
-
- dev_info(dev, "%s %s %dkHz %d bits mode=0x%02X\n", spi->modalias,
- dev_name(dev), spi->max_speed_hz / 1000, spi->bits_per_word,
- spi->mode);
-
- return 0;
-}
-
-static void pr_spi_devices(void)
-{
- pr_debug("SPI devices registered:\n");
- bus_for_each_dev(&spi_bus_type, NULL, NULL, spi_device_found);
-}
-
-static int p_device_found(struct device *dev, void *data)
-{
- struct platform_device
- *pdev = container_of(dev, struct platform_device, dev);
-
- if (strstr(pdev->name, "fb"))
- dev_info(dev, "%s id=%d pdata? %s\n", pdev->name, pdev->id,
- pdev->dev.platform_data ? "yes" : "no");
-
- return 0;
-}
-
-static void pr_p_devices(void)
-{
- pr_debug("'fb' Platform devices registered:\n");
- bus_for_each_dev(&platform_bus_type, NULL, NULL, p_device_found);
-}
-
-#ifdef MODULE
-static void fbtft_device_spi_delete(struct spi_master *master, unsigned cs)
-{
- struct device *dev;
- char str[32];
-
- snprintf(str, sizeof(str), "%s.%u", dev_name(&master->dev), cs);
-
- dev = bus_find_device_by_name(&spi_bus_type, NULL, str);
- if (dev) {
- if (verbose)
- dev_info(dev, "Deleting %s\n", str);
- device_del(dev);
- }
-}
-
-static int fbtft_device_spi_device_register(struct spi_board_info *spi)
-{
- struct spi_master *master;
-
- master = spi_busnum_to_master(spi->bus_num);
- if (!master) {
- pr_err("spi_busnum_to_master(%d) returned NULL\n",
- spi->bus_num);
- return -EINVAL;
- }
- /* make sure it's available */
- fbtft_device_spi_delete(master, spi->chip_select);
- spi_device = spi_new_device(master, spi);
- put_device(&master->dev);
- if (!spi_device) {
- dev_err(&master->dev, "spi_new_device() returned NULL\n");
- return -EPERM;
- }
- return 0;
-}
-#else
-static int fbtft_device_spi_device_register(struct spi_board_info *spi)
-{
- return spi_register_board_info(spi, 1);
-}
-#endif
-
-static int __init fbtft_device_init(void)
-{
- struct spi_board_info *spi = NULL;
- struct fbtft_platform_data *pdata;
- const struct fbtft_gpio *gpio = NULL;
- char *p_gpio, *p_name, *p_num;
- bool found = false;
- int i = 0;
- long val;
- int ret = 0;
-
- if (name == NULL) {
-#ifdef MODULE
- pr_err("missing module parameter: 'name'\n");
- return -EINVAL;
-#else
- return 0;
-#endif
- }
-
- if (init_num > FBTFT_MAX_INIT_SEQUENCE) {
- pr_err("init parameter: exceeded max array size: %d\n",
- FBTFT_MAX_INIT_SEQUENCE);
- return -EINVAL;
- }
-
- /* parse module parameter: gpios */
- while ((p_gpio = strsep(&gpios, ","))) {
- if (strchr(p_gpio, ':') == NULL) {
- pr_err("error: missing ':' in gpios parameter: %s\n",
- p_gpio);
- return -EINVAL;
- }
- p_num = p_gpio;
- p_name = strsep(&p_num, ":");
- if (p_name == NULL || p_num == NULL) {
- pr_err("something bad happened parsing gpios parameter: %s\n",
- p_gpio);
- return -EINVAL;
- }
- ret = kstrtol(p_num, 10, &val);
- if (ret) {
- pr_err("could not parse number in gpios parameter: %s:%s\n",
- p_name, p_num);
- return -EINVAL;
- }
- strncpy(fbtft_device_param_gpios[i].name, p_name,
- FBTFT_GPIO_NAME_SIZE - 1);
- fbtft_device_param_gpios[i++].gpio = (int) val;
- if (i == MAX_GPIOS) {
- pr_err("gpios parameter: exceeded max array size: %d\n",
- MAX_GPIOS);
- return -EINVAL;
- }
- }
- if (fbtft_device_param_gpios[0].name[0])
- gpio = fbtft_device_param_gpios;
-
- if (verbose > 2)
- pr_spi_devices(); /* print list of registered SPI devices */
-
- if (verbose > 2)
- pr_p_devices(); /* print list of 'fb' platform devices */
-
- pr_debug("name='%s', busnum=%d, cs=%d\n", name, busnum, cs);
-
- if (rotate > 0 && rotate < 4) {
- rotate = (4 - rotate) * 90;
- pr_warn("argument 'rotate' should be an angle. Values 1-3 is deprecated. Setting it to %d.\n",
- rotate);
- }
- if (rotate != 0 && rotate != 90 && rotate != 180 && rotate != 270) {
- pr_warn("argument 'rotate' illegal value: %d. Setting it to 0.\n",
- rotate);
- rotate = 0;
- }
-
- /* name=list lists all supported displays */
- if (strncmp(name, "list", FBTFT_GPIO_NAME_SIZE) == 0) {
- pr_info("Supported displays:\n");
-
- for (i = 0; i < ARRAY_SIZE(displays); i++)
- pr_info("%s\n", displays[i].name);
- return -ECANCELED;
- }
-
- if (custom) {
- i = ARRAY_SIZE(displays) - 1;
- displays[i].name = name;
- if (speed == 0) {
- displays[i].pdev->name = name;
- displays[i].spi = NULL;
- } else {
- strncpy(displays[i].spi->modalias, name, SPI_NAME_SIZE);
- displays[i].pdev = NULL;
- }
- }
-
- for (i = 0; i < ARRAY_SIZE(displays); i++) {
- if (strncmp(name, displays[i].name, 32) == 0) {
- if (displays[i].spi) {
- spi = displays[i].spi;
- spi->chip_select = cs;
- spi->bus_num = busnum;
- if (speed)
- spi->max_speed_hz = speed;
- if (mode != -1)
- spi->mode = mode;
- pdata = (void *)spi->platform_data;
- } else if (displays[i].pdev) {
- p_device = displays[i].pdev;
- pdata = p_device->dev.platform_data;
- } else {
- pr_err("broken displays array\n");
- return -EINVAL;
- }
-
- pdata->rotate = rotate;
- if (bgr == 0)
- pdata->bgr = false;
- else if (bgr == 1)
- pdata->bgr = true;
- if (startbyte)
- pdata->startbyte = startbyte;
- if (gamma)
- pdata->gamma = gamma;
- pdata->display.debug = debug;
- if (fps)
- pdata->fps = fps;
- if (txbuflen)
- pdata->txbuflen = txbuflen;
- if (init_num)
- pdata->display.init_sequence = init;
- if (gpio)
- pdata->gpios = gpio;
- if (custom) {
- pdata->display.width = width;
- pdata->display.height = height;
- pdata->display.buswidth = buswidth;
- pdata->display.backlight = 1;
- }
-
- if (displays[i].spi) {
- ret = fbtft_device_spi_device_register(spi);
- if (ret) {
- pr_err("failed to register SPI device\n");
- return ret;
- }
- } else {
- ret = platform_device_register(p_device);
- if (ret < 0) {
- pr_err("platform_device_register() returned %d\n",
- ret);
- return ret;
- }
- }
- found = true;
- break;
- }
- }
-
- if (!found) {
- pr_err("display not supported: '%s'\n", name);
- return -EINVAL;
- }
-
- if (verbose && pdata && pdata->gpios) {
- gpio = pdata->gpios;
- pr_info("GPIOS used by '%s':\n", name);
- found = false;
- while (verbose && gpio->name[0]) {
- pr_info("'%s' = GPIO%d\n", gpio->name, gpio->gpio);
- gpio++;
- found = true;
- }
- if (!found)
- pr_info("(none)\n");
- }
-
- if (spi_device && (verbose > 1))
- pr_spi_devices();
- if (p_device && (verbose > 1))
- pr_p_devices();
-
- return 0;
-}
-
-static void __exit fbtft_device_exit(void)
-{
- if (spi_device) {
- device_del(&spi_device->dev);
- kfree(spi_device);
- }
-
- if (p_device)
- platform_device_unregister(p_device);
-
-}
-
-arch_initcall(fbtft_device_init);
-module_exit(fbtft_device_exit);
-
-MODULE_DESCRIPTION("Add a FBTFT device.");
-MODULE_AUTHOR("Noralf Tronnes");
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/fbtft/flexfb.c b/drivers/staging/fbtft/flexfb.c
deleted file mode 100644
index 704b78c78f13..000000000000
--- a/drivers/staging/fbtft/flexfb.c
+++ /dev/null
@@ -1,620 +0,0 @@
-/*
- * Generic FB driver for TFT LCD displays
- *
- * Copyright (C) 2013 Noralf Tronnes
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/vmalloc.h>
-#include <linux/gpio.h>
-#include <linux/spi/spi.h>
-#include <linux/delay.h>
-
-#include "fbtft.h"
-
-#define DRVNAME "flexfb"
-
-static char *chip;
-module_param(chip, charp, 0);
-MODULE_PARM_DESC(chip, "LCD controller");
-
-static unsigned int width;
-module_param(width, uint, 0);
-MODULE_PARM_DESC(width, "Display width");
-
-static unsigned int height;
-module_param(height, uint, 0);
-MODULE_PARM_DESC(height, "Display height");
-
-static int init[512];
-static int init_num;
-module_param_array(init, int, &init_num, 0);
-MODULE_PARM_DESC(init, "Init sequence");
-
-static unsigned int setaddrwin;
-module_param(setaddrwin, uint, 0);
-MODULE_PARM_DESC(setaddrwin, "Which set_addr_win() implementation to use");
-
-static unsigned int buswidth = 8;
-module_param(buswidth, uint, 0);
-MODULE_PARM_DESC(buswidth, "Width of databus (default: 8)");
-
-static unsigned int regwidth = 8;
-module_param(regwidth, uint, 0);
-MODULE_PARM_DESC(regwidth, "Width of controller register (default: 8)");
-
-static bool nobacklight;
-module_param(nobacklight, bool, 0);
-MODULE_PARM_DESC(nobacklight, "Turn off backlight functionality.");
-
-static bool latched;
-module_param(latched, bool, 0);
-MODULE_PARM_DESC(latched, "Use with latched 16-bit databus");
-
-static int *initp;
-static int initp_num;
-
-/* default init sequences */
-static int st7735r_init[] = {
--1, 0x01, -2, 150, -1, 0x11, -2, 500, -1, 0xB1, 0x01, 0x2C, 0x2D, -1, 0xB2, 0x01, 0x2C, 0x2D, -1, 0xB3, 0x01, 0x2C, 0x2D, 0x01, 0x2C, 0x2D,
--1, 0xB4, 0x07, -1, 0xC0, 0xA2, 0x02, 0x84, -1, 0xC1, 0xC5, -1, 0xC2, 0x0A, 0x00, -1, 0xC3, 0x8A, 0x2A, -1, 0xC4, 0x8A, 0xEE, -1, 0xC5, 0x0E,
--1, 0x20, -1, 0x36, 0xC0, -1, 0x3A, 0x05, -1, 0xE0, 0x0f, 0x1a, 0x0f, 0x18, 0x2f, 0x28, 0x20, 0x22, 0x1f, 0x1b, 0x23, 0x37, 0x00, 0x07, 0x02, 0x10,
--1, 0xE1, 0x0f, 0x1b, 0x0f, 0x17, 0x33, 0x2c, 0x29, 0x2e, 0x30, 0x30, 0x39, 0x3f, 0x00, 0x07, 0x03, 0x10, -1, 0x29, -2, 100, -1, 0x13, -2, 10, -3 };
-
-static int ssd1289_init[] = {
--1, 0x00, 0x0001, -1, 0x03, 0xA8A4, -1, 0x0C, 0x0000, -1, 0x0D, 0x080C, -1, 0x0E, 0x2B00, -1, 0x1E, 0x00B7, -1, 0x01, 0x2B3F, -1, 0x02, 0x0600,
--1, 0x10, 0x0000, -1, 0x11, 0x6070, -1, 0x05, 0x0000, -1, 0x06, 0x0000, -1, 0x16, 0xEF1C, -1, 0x17, 0x0003, -1, 0x07, 0x0233, -1, 0x0B, 0x0000,
--1, 0x0F, 0x0000, -1, 0x41, 0x0000, -1, 0x42, 0x0000, -1, 0x48, 0x0000, -1, 0x49, 0x013F, -1, 0x4A, 0x0000, -1, 0x4B, 0x0000, -1, 0x44, 0xEF00,
--1, 0x45, 0x0000, -1, 0x46, 0x013F, -1, 0x30, 0x0707, -1, 0x31, 0x0204, -1, 0x32, 0x0204, -1, 0x33, 0x0502, -1, 0x34, 0x0507, -1, 0x35, 0x0204,
--1, 0x36, 0x0204, -1, 0x37, 0x0502, -1, 0x3A, 0x0302, -1, 0x3B, 0x0302, -1, 0x23, 0x0000, -1, 0x24, 0x0000, -1, 0x25, 0x8000, -1, 0x4f, 0x0000,
--1, 0x4e, 0x0000, -1, 0x22, -3 };
-
-static int hx8340bn_init[] = {
--1, 0xC1, 0xFF, 0x83, 0x40, -1, 0x11, -2, 150, -1, 0xCA, 0x70, 0x00, 0xD9, -1, 0xB0, 0x01, 0x11,
--1, 0xC9, 0x90, 0x49, 0x10, 0x28, 0x28, 0x10, 0x00, 0x06, -2, 20, -1, 0xC2, 0x60, 0x71, 0x01, 0x0E, 0x05, 0x02, 0x09, 0x31, 0x0A,
--1, 0xC3, 0x67, 0x30, 0x61, 0x17, 0x48, 0x07, 0x05, 0x33, -2, 10, -1, 0xB5, 0x35, 0x20, 0x45, -1, 0xB4, 0x33, 0x25, 0x4C, -2, 10,
--1, 0x3A, 0x05, -1, 0x29, -2, 10, -3 };
-
-static int ili9225_init[] = {
--1, 0x0001, 0x011C, -1, 0x0002, 0x0100, -1, 0x0003, 0x1030, -1, 0x0008, 0x0808, -1, 0x000C, 0x0000, -1, 0x000F, 0x0A01, -1, 0x0020, 0x0000,
--1, 0x0021, 0x0000, -2, 50, -1, 0x0010, 0x0A00, -1, 0x0011, 0x1038, -2, 50, -1, 0x0012, 0x1121, -1, 0x0013, 0x004E, -1, 0x0014, 0x676F,
--1, 0x0030, 0x0000, -1, 0x0031, 0x00DB, -1, 0x0032, 0x0000, -1, 0x0033, 0x0000, -1, 0x0034, 0x00DB, -1, 0x0035, 0x0000, -1, 0x0036, 0x00AF,
--1, 0x0037, 0x0000, -1, 0x0038, 0x00DB, -1, 0x0039, 0x0000, -1, 0x0050, 0x0000, -1, 0x0051, 0x060A, -1, 0x0052, 0x0D0A, -1, 0x0053, 0x0303,
--1, 0x0054, 0x0A0D, -1, 0x0055, 0x0A06, -1, 0x0056, 0x0000, -1, 0x0057, 0x0303, -1, 0x0058, 0x0000, -1, 0x0059, 0x0000, -2, 50,
--1, 0x0007, 0x1017, -2, 50, -3 };
-
-static int ili9320_init[] = {
--1, 0x00E5, 0x8000, -1, 0x0000, 0x0001, -1, 0x0001, 0x0100, -1, 0x0002, 0x0700, -1, 0x0003, 0x1030, -1, 0x0004, 0x0000, -1, 0x0008, 0x0202,
--1, 0x0009, 0x0000, -1, 0x000A, 0x0000, -1, 0x000C, 0x0000, -1, 0x000D, 0x0000, -1, 0x000F, 0x0000, -1, 0x0010, 0x0000, -1, 0x0011, 0x0007,
--1, 0x0012, 0x0000, -1, 0x0013, 0x0000, -2, 200, -1, 0x0010, 0x17B0, -1, 0x0011, 0x0031, -2, 50, -1, 0x0012, 0x0138, -2, 50, -1, 0x0013, 0x1800,
--1, 0x0029, 0x0008, -2, 50, -1, 0x0020, 0x0000, -1, 0x0021, 0x0000, -1, 0x0030, 0x0000, -1, 0x0031, 0x0505, -1, 0x0032, 0x0004,
--1, 0x0035, 0x0006, -1, 0x0036, 0x0707, -1, 0x0037, 0x0105, -1, 0x0038, 0x0002, -1, 0x0039, 0x0707, -1, 0x003C, 0x0704, -1, 0x003D, 0x0807,
--1, 0x0050, 0x0000, -1, 0x0051, 0x00EF, -1, 0x0052, 0x0000, -1, 0x0053, 0x013F, -1, 0x0060, 0x2700, -1, 0x0061, 0x0001, -1, 0x006A, 0x0000,
--1, 0x0080, 0x0000, -1, 0x0081, 0x0000, -1, 0x0082, 0x0000, -1, 0x0083, 0x0000, -1, 0x0084, 0x0000, -1, 0x0085, 0x0000, -1, 0x0090, 0x0010,
--1, 0x0092, 0x0000, -1, 0x0093, 0x0003, -1, 0x0095, 0x0110, -1, 0x0097, 0x0000, -1, 0x0098, 0x0000, -1, 0x0007, 0x0173, -3 };
-
-static int ili9325_init[] = {
--1, 0x00E3, 0x3008, -1, 0x00E7, 0x0012, -1, 0x00EF, 0x1231, -1, 0x0001, 0x0100, -1, 0x0002, 0x0700, -1, 0x0003, 0x1030, -1, 0x0004, 0x0000,
--1, 0x0008, 0x0207, -1, 0x0009, 0x0000, -1, 0x000A, 0x0000, -1, 0x000C, 0x0000, -1, 0x000D, 0x0000, -1, 0x000F, 0x0000, -1, 0x0010, 0x0000,
--1, 0x0011, 0x0007, -1, 0x0012, 0x0000, -1, 0x0013, 0x0000, -2, 200, -1, 0x0010, 0x1690, -1, 0x0011, 0x0223, -2, 50, -1, 0x0012, 0x000D, -2, 50,
--1, 0x0013, 0x1200, -1, 0x0029, 0x000A, -1, 0x002B, 0x000C, -2, 50, -1, 0x0020, 0x0000, -1, 0x0021, 0x0000, -1, 0x0030, 0x0000,
--1, 0x0031, 0x0506, -1, 0x0032, 0x0104, -1, 0x0035, 0x0207, -1, 0x0036, 0x000F, -1, 0x0037, 0x0306, -1, 0x0038, 0x0102, -1, 0x0039, 0x0707,
--1, 0x003C, 0x0702, -1, 0x003D, 0x1604, -1, 0x0050, 0x0000, -1, 0x0051, 0x00EF, -1, 0x0052, 0x0000, -1, 0x0053, 0x013F, -1, 0x0060, 0xA700,
--1, 0x0061, 0x0001, -1, 0x006A, 0x0000, -1, 0x0080, 0x0000, -1, 0x0081, 0x0000, -1, 0x0082, 0x0000, -1, 0x0083, 0x0000, -1, 0x0084, 0x0000,
--1, 0x0085, 0x0000, -1, 0x0090, 0x0010, -1, 0x0092, 0x0600, -1, 0x0007, 0x0133, -3 };
-
-static int ili9341_init[] = {
--1, 0x28, -2, 20, -1, 0xCF, 0x00, 0x83, 0x30, -1, 0xED, 0x64, 0x03, 0x12, 0x81, -1, 0xE8, 0x85, 0x01, 0x79,
--1, 0xCB, 0x39, 0x2c, 0x00, 0x34, 0x02, -1, 0xF7, 0x20, -1, 0xEA, 0x00, 0x00, -1, 0xC0, 0x26, -1, 0xC1, 0x11,
--1, 0xC5, 0x35, 0x3E, -1, 0xC7, 0xBE, -1, 0xB1, 0x00, 0x1B, -1, 0xB6, 0x0a, 0x82, 0x27, 0x00, -1, 0xB7, 0x07,
--1, 0x3A, 0x55, -1, 0x36, 0x48, -1, 0x11, -2, 120, -1, 0x29, -2, 20, -3 };
-
-static int ssd1351_init[] = { -1, 0xfd, 0x12, -1, 0xfd, 0xb1, -1, 0xae, -1, 0xb3, 0xf1, -1, 0xca, 0x7f, -1, 0xa0, 0x74,
- -1, 0x15, 0x00, 0x7f, -1, 0x75, 0x00, 0x7f, -1, 0xa1, 0x00, -1, 0xa2, 0x00, -1, 0xb5, 0x00,
- -1, 0xab, 0x01, -1, 0xb1, 0x32, -1, 0xb4, 0xa0, 0xb5, 0x55, -1, 0xbb, 0x17, -1, 0xbe, 0x05,
- -1, 0xc1, 0xc8, 0x80, 0xc8, -1, 0xc7, 0x0f, -1, 0xb6, 0x01, -1, 0xa6, -1, 0xaf, -3 };
-
-/**
- * struct flexfb_lcd_controller - Describes the LCD controller properties
- * @name: Model name of the chip
- * @width: Width of display in pixels
- * @height: Height of display in pixels
- * @setaddrwin: Which set_addr_win() implementation to use
- * @regwidth: LCD Controller Register width in bits
- * @init_seq: LCD initialization sequence
- * @init_seq_sz: Size of LCD initialization sequence
- */
-struct flexfb_lcd_controller {
- const char *name;
- unsigned int width;
- unsigned int height;
- unsigned int setaddrwin;
- unsigned int regwidth;
- int *init_seq;
- int init_seq_sz;
-};
-
-static const struct flexfb_lcd_controller flexfb_chip_table[] = {
- {
- .name = "st7735r",
- .width = 120,
- .height = 160,
- .init_seq = st7735r_init,
- .init_seq_sz = ARRAY_SIZE(st7735r_init),
- },
- {
- .name = "hx8340bn",
- .width = 176,
- .height = 220,
- .init_seq = hx8340bn_init,
- .init_seq_sz = ARRAY_SIZE(hx8340bn_init),
- },
- {
- .name = "ili9225",
- .width = 176,
- .height = 220,
- .regwidth = 16,
- .init_seq = ili9225_init,
- .init_seq_sz = ARRAY_SIZE(ili9225_init),
- },
- {
- .name = "ili9225",
- .width = 176,
- .height = 220,
- .regwidth = 16,
- .init_seq = ili9225_init,
- .init_seq_sz = ARRAY_SIZE(ili9225_init),
- },
- {
- .name = "ili9225",
- .width = 176,
- .height = 220,
- .regwidth = 16,
- .init_seq = ili9225_init,
- .init_seq_sz = ARRAY_SIZE(ili9225_init),
- },
- {
- .name = "ili9320",
- .width = 240,
- .height = 320,
- .setaddrwin = 1,
- .regwidth = 16,
- .init_seq = ili9320_init,
- .init_seq_sz = ARRAY_SIZE(ili9320_init),
- },
- {
- .name = "ili9325",
- .width = 240,
- .height = 320,
- .setaddrwin = 1,
- .regwidth = 16,
- .init_seq = ili9325_init,
- .init_seq_sz = ARRAY_SIZE(ili9325_init),
- },
- {
- .name = "ili9341",
- .width = 240,
- .height = 320,
- .init_seq = ili9341_init,
- .init_seq_sz = ARRAY_SIZE(ili9341_init),
- },
- {
- .name = "ssd1289",
- .width = 240,
- .height = 320,
- .setaddrwin = 2,
- .regwidth = 16,
- .init_seq = ssd1289_init,
- .init_seq_sz = ARRAY_SIZE(ssd1289_init),
- },
- {
- .name = "ssd1351",
- .width = 128,
- .height = 128,
- .setaddrwin = 3,
- .init_seq = ssd1351_init,
- .init_seq_sz = ARRAY_SIZE(ssd1351_init),
- },
-};
-
-/* ili9320, ili9325 */
-static void flexfb_set_addr_win_1(struct fbtft_par *par,
- int xs, int ys, int xe, int ye)
-{
- switch (par->info->var.rotate) {
- /* R20h = Horizontal GRAM Start Address */
- /* R21h = Vertical GRAM Start Address */
- case 0:
- write_reg(par, 0x0020, xs);
- write_reg(par, 0x0021, ys);
- break;
- case 180:
- write_reg(par, 0x0020, width - 1 - xs);
- write_reg(par, 0x0021, height - 1 - ys);
- break;
- case 270:
- write_reg(par, 0x0020, width - 1 - ys);
- write_reg(par, 0x0021, xs);
- break;
- case 90:
- write_reg(par, 0x0020, ys);
- write_reg(par, 0x0021, height - 1 - xs);
- break;
- }
- write_reg(par, 0x0022); /* Write Data to GRAM */
-}
-
-/* ssd1289 */
-static void flexfb_set_addr_win_2(struct fbtft_par *par,
- int xs, int ys, int xe, int ye)
-{
- switch (par->info->var.rotate) {
- /* R4Eh - Set GDDRAM X address counter */
- /* R4Fh - Set GDDRAM Y address counter */
- case 0:
- write_reg(par, 0x4e, xs);
- write_reg(par, 0x4f, ys);
- break;
- case 180:
- write_reg(par, 0x4e, par->info->var.xres - 1 - xs);
- write_reg(par, 0x4f, par->info->var.yres - 1 - ys);
- break;
- case 270:
- write_reg(par, 0x4e, par->info->var.yres - 1 - ys);
- write_reg(par, 0x4f, xs);
- break;
- case 90:
- write_reg(par, 0x4e, ys);
- write_reg(par, 0x4f, par->info->var.xres - 1 - xs);
- break;
- }
-
- /* R22h - RAM data write */
- write_reg(par, 0x22, 0);
-}
-
-/* ssd1351 */
-static void set_addr_win_3(struct fbtft_par *par,
- int xs, int ys, int xe, int ye)
-{
- write_reg(par, 0x15, xs, xe);
- write_reg(par, 0x75, ys, ye);
- write_reg(par, 0x5C);
-}
-
-static int flexfb_verify_gpios_dc(struct fbtft_par *par)
-{
- fbtft_par_dbg(DEBUG_VERIFY_GPIOS, par, "%s()\n", __func__);
-
- if (par->gpio.dc < 0) {
- dev_err(par->info->device,
- "Missing info about 'dc' gpio. Aborting.\n");
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int flexfb_verify_gpios_db(struct fbtft_par *par)
-{
- int i;
- int num_db = buswidth;
-
- fbtft_par_dbg(DEBUG_VERIFY_GPIOS, par, "%s()\n", __func__);
-
- if (par->gpio.dc < 0) {
- dev_err(par->info->device, "Missing info about 'dc' gpio. Aborting.\n");
- return -EINVAL;
- }
- if (par->gpio.wr < 0) {
- dev_err(par->info->device, "Missing info about 'wr' gpio. Aborting.\n");
- return -EINVAL;
- }
- if (latched && (par->gpio.latch < 0)) {
- dev_err(par->info->device, "Missing info about 'latch' gpio. Aborting.\n");
- return -EINVAL;
- }
- if (latched)
- num_db = buswidth / 2;
- for (i = 0; i < num_db; i++) {
- if (par->gpio.db[i] < 0) {
- dev_err(par->info->device,
- "Missing info about 'db%02d' gpio. Aborting.\n",
- i);
- return -EINVAL;
- }
- }
-
- return 0;
-}
-
-static void flexfb_chip_load_param(const struct flexfb_lcd_controller *chip)
-{
- if (!width)
- width = chip->width;
- if (!height)
- height = chip->height;
- setaddrwin = chip->setaddrwin;
- if (chip->regwidth)
- regwidth = chip->regwidth;
- if (!init_num) {
- initp = chip->init_seq;
- initp_num = chip->init_seq_sz;
- }
-}
-
-static struct fbtft_display flex_display = { };
-
-static int flexfb_chip_init(const struct device *dev)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(flexfb_chip_table); i++)
- if (!strcmp(chip, flexfb_chip_table[i].name)) {
- flexfb_chip_load_param(&flexfb_chip_table[i]);
- return 0;
- }
-
- dev_err(dev, "chip=%s is not supported\n", chip);
-
- return -EINVAL;
-}
-
-static int flexfb_probe_common(struct spi_device *sdev,
- struct platform_device *pdev)
-{
- struct device *dev;
- struct fb_info *info;
- struct fbtft_par *par;
- int ret;
-
- initp = init;
- initp_num = init_num;
-
- if (sdev)
- dev = &sdev->dev;
- else
- dev = &pdev->dev;
-
- fbtft_init_dbg(dev, "%s(%s)\n", __func__,
- sdev ? "'SPI device'" : "'Platform device'");
-
- if (chip) {
- ret = flexfb_chip_init(dev);
- if (ret)
- return ret;
- }
-
- if (width == 0 || height == 0) {
- dev_err(dev, "argument(s) missing: width and height has to be set.\n");
- return -EINVAL;
- }
- flex_display.width = width;
- flex_display.height = height;
- fbtft_init_dbg(dev, "Display resolution: %dx%d\n", width, height);
- fbtft_init_dbg(dev, "chip = %s\n", chip ? chip : "not set");
- fbtft_init_dbg(dev, "setaddrwin = %d\n", setaddrwin);
- fbtft_init_dbg(dev, "regwidth = %d\n", regwidth);
- fbtft_init_dbg(dev, "buswidth = %d\n", buswidth);
-
- info = fbtft_framebuffer_alloc(&flex_display, dev, dev->platform_data);
- if (!info)
- return -ENOMEM;
-
- par = info->par;
- if (sdev)
- par->spi = sdev;
- else
- par->pdev = pdev;
- if (!par->init_sequence)
- par->init_sequence = initp;
- par->fbtftops.init_display = fbtft_init_display;
-
- /* registerwrite functions */
- switch (regwidth) {
- case 8:
- par->fbtftops.write_register = fbtft_write_reg8_bus8;
- break;
- case 16:
- par->fbtftops.write_register = fbtft_write_reg16_bus8;
- break;
- default:
- dev_err(dev,
- "argument 'regwidth': %d is not supported.\n",
- regwidth);
- return -EINVAL;
- }
-
- /* bus functions */
- if (sdev) {
- par->fbtftops.write = fbtft_write_spi;
- switch (buswidth) {
- case 8:
- par->fbtftops.write_vmem = fbtft_write_vmem16_bus8;
- if (!par->startbyte)
- par->fbtftops.verify_gpios = flexfb_verify_gpios_dc;
- break;
- case 9:
- if (regwidth == 16) {
- dev_err(dev, "argument 'regwidth': %d is not supported with buswidth=%d and SPI.\n", regwidth, buswidth);
- return -EINVAL;
- }
- par->fbtftops.write_register = fbtft_write_reg8_bus9;
- par->fbtftops.write_vmem = fbtft_write_vmem16_bus9;
- if (par->spi->master->bits_per_word_mask
- & SPI_BPW_MASK(9)) {
- par->spi->bits_per_word = 9;
- } else {
- dev_warn(dev,
- "9-bit SPI not available, emulating using 8-bit.\n");
- /* allocate buffer with room for dc bits */
- par->extra = devm_kzalloc(par->info->device,
- par->txbuf.len + (par->txbuf.len / 8) + 8,
- GFP_KERNEL);
- if (!par->extra) {
- ret = -ENOMEM;
- goto out_release;
- }
- par->fbtftops.write = fbtft_write_spi_emulate_9;
- }
- break;
- default:
- dev_err(dev, "argument 'buswidth': %d is not supported with SPI.\n", buswidth);
- return -EINVAL;
- }
- } else {
- par->fbtftops.verify_gpios = flexfb_verify_gpios_db;
- switch (buswidth) {
- case 8:
- par->fbtftops.write = fbtft_write_gpio8_wr;
- par->fbtftops.write_vmem = fbtft_write_vmem16_bus8;
- break;
- case 16:
- par->fbtftops.write_register = fbtft_write_reg16_bus16;
- if (latched)
- par->fbtftops.write = fbtft_write_gpio16_wr_latched;
- else
- par->fbtftops.write = fbtft_write_gpio16_wr;
- par->fbtftops.write_vmem = fbtft_write_vmem16_bus16;
- break;
- default:
- dev_err(dev, "argument 'buswidth': %d is not supported with parallel.\n", buswidth);
- return -EINVAL;
- }
- }
-
- /* set_addr_win function */
- switch (setaddrwin) {
- case 0:
- /* use default */
- break;
- case 1:
- par->fbtftops.set_addr_win = flexfb_set_addr_win_1;
- break;
- case 2:
- par->fbtftops.set_addr_win = flexfb_set_addr_win_2;
- break;
- case 3:
- par->fbtftops.set_addr_win = set_addr_win_3;
- break;
- default:
- dev_err(dev, "argument 'setaddrwin': unknown value %d.\n",
- setaddrwin);
- return -EINVAL;
- }
-
- if (!nobacklight)
- par->fbtftops.register_backlight = fbtft_register_backlight;
-
- ret = fbtft_register_framebuffer(info);
- if (ret < 0)
- goto out_release;
-
- return 0;
-
-out_release:
- fbtft_framebuffer_release(info);
-
- return ret;
-}
-
-static int flexfb_remove_common(struct device *dev, struct fb_info *info)
-{
- struct fbtft_par *par;
-
- if (!info)
- return -EINVAL;
- par = info->par;
- if (par)
- fbtft_par_dbg(DEBUG_DRIVER_INIT_FUNCTIONS, par, "%s()\n",
- __func__);
- fbtft_unregister_framebuffer(info);
- fbtft_framebuffer_release(info);
-
- return 0;
-}
-
-static int flexfb_probe_spi(struct spi_device *spi)
-{
- return flexfb_probe_common(spi, NULL);
-}
-
-static int flexfb_remove_spi(struct spi_device *spi)
-{
- struct fb_info *info = spi_get_drvdata(spi);
-
- return flexfb_remove_common(&spi->dev, info);
-}
-
-static int flexfb_probe_pdev(struct platform_device *pdev)
-{
- return flexfb_probe_common(NULL, pdev);
-}
-
-static int flexfb_remove_pdev(struct platform_device *pdev)
-{
- struct fb_info *info = platform_get_drvdata(pdev);
-
- return flexfb_remove_common(&pdev->dev, info);
-}
-
-static struct spi_driver flexfb_spi_driver = {
- .driver = {
- .name = DRVNAME,
- .owner = THIS_MODULE,
- },
- .probe = flexfb_probe_spi,
- .remove = flexfb_remove_spi,
-};
-
-static const struct platform_device_id flexfb_platform_ids[] = {
- { "flexpfb", 0 },
- { },
-};
-
-static struct platform_driver flexfb_platform_driver = {
- .driver = {
- .name = DRVNAME,
- },
- .id_table = flexfb_platform_ids,
- .probe = flexfb_probe_pdev,
- .remove = flexfb_remove_pdev,
-};
-
-static int __init flexfb_init(void)
-{
- int ret, ret2;
-
- ret = spi_register_driver(&flexfb_spi_driver);
- ret2 = platform_driver_register(&flexfb_platform_driver);
- if (ret < 0)
- return ret;
- return ret2;
-}
-
-static void __exit flexfb_exit(void)
-{
- spi_unregister_driver(&flexfb_spi_driver);
- platform_driver_unregister(&flexfb_platform_driver);
-}
-
-/* ------------------------------------------------------------------------- */
-
-module_init(flexfb_init);
-module_exit(flexfb_exit);
-
-MODULE_DESCRIPTION("Generic FB driver for TFT LCD displays");
-MODULE_AUTHOR("Noralf Tronnes");
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/fbtft/internal.h b/drivers/staging/fbtft/internal.h
index eea0ec5ff4d3..ae2ff4a4a472 100644
--- a/drivers/staging/fbtft/internal.h
+++ b/drivers/staging/fbtft/internal.h
@@ -1,17 +1,5 @@
-/*
- * Copyright (C) 2013 Noralf Tronnes
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Copyright (C) 2013 Noralf Tronnes */
#ifndef __LINUX_FBTFT_INTERNAL_H
#define __LINUX_FBTFT_INTERNAL_H
@@ -19,7 +7,7 @@
void fbtft_sysfs_init(struct fbtft_par *par);
void fbtft_sysfs_exit(struct fbtft_par *par);
void fbtft_expand_debug_value(unsigned long *debug);
-int fbtft_gamma_parse_str(struct fbtft_par *par, unsigned long *curves,
+int fbtft_gamma_parse_str(struct fbtft_par *par, u32 *curves,
const char *str, int size);
#endif /* __LINUX_FBTFT_INTERNAL_H */
diff --git a/drivers/staging/fsl-mc/Kconfig b/drivers/staging/fsl-mc/Kconfig
deleted file mode 100644
index 32df07b15e09..000000000000
--- a/drivers/staging/fsl-mc/Kconfig
+++ /dev/null
@@ -1 +0,0 @@
-source "drivers/staging/fsl-mc/bus/Kconfig"
diff --git a/drivers/staging/fsl-mc/Makefile b/drivers/staging/fsl-mc/Makefile
deleted file mode 100644
index 9c6a00128c65..000000000000
--- a/drivers/staging/fsl-mc/Makefile
+++ /dev/null
@@ -1,2 +0,0 @@
-# Freescale Management Complex (MC) bus drivers
-obj-$(CONFIG_FSL_MC_BUS) += bus/
diff --git a/drivers/staging/fsl-mc/README.txt b/drivers/staging/fsl-mc/README.txt
deleted file mode 100644
index 8214102f104b..000000000000
--- a/drivers/staging/fsl-mc/README.txt
+++ /dev/null
@@ -1,364 +0,0 @@
-Copyright (C) 2015 Freescale Semiconductor Inc.
-
-DPAA2 (Data Path Acceleration Architecture Gen2)
-------------------------------------------------
-
-This document provides an overview of the Freescale DPAA2 architecture
-and how it is integrated into the Linux kernel.
-
-Contents summary
- -DPAA2 overview
- -Overview of DPAA2 objects
- -DPAA2 Linux driver architecture overview
- -bus driver
- -dprc driver
- -allocator
- -dpio driver
- -Ethernet
- -mac
-
-DPAA2 Overview
---------------
-
-DPAA2 is a hardware architecture designed for high-speeed network
-packet processing. DPAA2 consists of sophisticated mechanisms for
-processing Ethernet packets, queue management, buffer management,
-autonomous L2 switching, virtual Ethernet bridging, and accelerator
-(e.g. crypto) sharing.
-
-A DPAA2 hardware component called the Management Complex (or MC) manages the
-DPAA2 hardware resources. The MC provides an object-based abstraction for
-software drivers to use the DPAA2 hardware.
-
-The MC uses DPAA2 hardware resources such as queues, buffer pools, and
-network ports to create functional objects/devices such as network
-interfaces, an L2 switch, or accelerator instances.
-
-The MC provides memory-mapped I/O command interfaces (MC portals)
-which DPAA2 software drivers use to operate on DPAA2 objects:
-
- +--------------------------------------+
- | OS |
- | DPAA2 drivers |
- | | |
- +-----------------------------|--------+
- |
- | (create,discover,connect
- | config,use,destroy)
- |
- DPAA2 |
- +------------------------| mc portal |-+
- | | |
- | +- - - - - - - - - - - - -V- - -+ |
- | | | |
- | | Management Complex (MC) | |
- | | | |
- | +- - - - - - - - - - - - - - - -+ |
- | |
- | Hardware Hardware |
- | Resources Objects |
- | --------- ------- |
- | -queues -DPRC |
- | -buffer pools -DPMCP |
- | -Eth MACs/ports -DPIO |
- | -network interface -DPNI |
- | profiles -DPMAC |
- | -queue portals -DPBP |
- | -MC portals ... |
- | ... |
- | |
- +--------------------------------------+
-
-The MC mediates operations such as create, discover,
-connect, configuration, and destroy. Fast-path operations
-on data, such as packet transmit/receive, are not mediated by
-the MC and are done directly using memory mapped regions in
-DPIO objects.
-
-Overview of DPAA2 Objects
--------------------------
-The section provides a brief overview of some key objects
-in the DPAA2 hardware. A simple scenario is described illustrating
-the objects involved in creating a network interfaces.
-
--DPRC (Datapath Resource Container)
-
- A DPRC is an container object that holds all the other
- types of DPAA2 objects. In the example diagram below there
- are 8 objects of 5 types (DPMCP, DPIO, DPBP, DPNI, and DPMAC)
- in the container.
-
- +---------------------------------------------------------+
- | DPRC |
- | |
- | +-------+ +-------+ +-------+ +-------+ +-------+ |
- | | DPMCP | | DPIO | | DPBP | | DPNI | | DPMAC | |
- | +-------+ +-------+ +-------+ +---+---+ +---+---+ |
- | | DPMCP | | DPIO | |
- | +-------+ +-------+ |
- | | DPMCP | |
- | +-------+ |
- | |
- +---------------------------------------------------------+
-
- From the point of view of an OS, a DPRC is bus-like. Like
- a plug-and-play bus, such as PCI, DPRC commands can be used to
- enumerate the contents of the DPRC, discover the hardware
- objects present (including mappable regions and interrupts).
-
- dprc.1 (bus)
- |
- +--+--------+-------+-------+-------+
- | | | | |
- dpmcp.1 dpio.1 dpbp.1 dpni.1 dpmac.1
- dpmcp.2 dpio.2
- dpmcp.3
-
- Hardware objects can be created and destroyed dynamically, providing
- the ability to hot plug/unplug objects in and out of the DPRC.
-
- A DPRC has a mappable mmio region (an MC portal) that can be used
- to send MC commands. It has an interrupt for status events (like
- hotplug).
-
- All objects in a container share the same hardware "isolation context".
- This means that with respect to an IOMMU the isolation granularity
- is at the DPRC (container) level, not at the individual object
- level.
-
- DPRCs can be defined statically and populated with objects
- via a config file passed to the MC when firmware starts
- it. There is also a Linux user space tool called "restool"
- that can be used to create/destroy containers and objects
- dynamically.
-
--DPAA2 Objects for an Ethernet Network Interface
-
- A typical Ethernet NIC is monolithic-- the NIC device contains TX/RX
- queuing mechanisms, configuration mechanisms, buffer management,
- physical ports, and interrupts. DPAA2 uses a more granular approach
- utilizing multiple hardware objects. Each object has specialized
- functions, and are used together by software to provide Ethernet network
- interface functionality. This approach provides efficient use of finite
- hardware resources, flexibility, and performance advantages.
-
- The diagram below shows the objects needed for a simple
- network interface configuration on a system with 2 CPUs.
-
- +---+---+ +---+---+
- CPU0 CPU1
- +---+---+ +---+---+
- | |
- +---+---+ +---+---+
- DPIO DPIO
- +---+---+ +---+---+
- \ /
- \ /
- \ /
- +---+---+
- DPNI --- DPBP,DPMCP
- +---+---+
- |
- |
- +---+---+
- DPMAC
- +---+---+
- |
- port/PHY
-
- Below the objects are described. For each object a brief description
- is provided along with a summary of the kinds of operations the object
- supports and a summary of key resources of the object (mmio regions
- and irqs).
-
- -DPMAC (Datapath Ethernet MAC): represents an Ethernet MAC, a
- hardware device that connects to an Ethernet PHY and allows
- physical transmission and reception of Ethernet frames.
- -mmio regions: none
- -irqs: dpni link change
- -commands: set link up/down, link config, get stats,
- irq config, enable, reset
-
- -DPNI (Datapath Network Interface): contains TX/RX queues,
- network interface configuration, and rx buffer pool configuration
- mechanisms.
- -mmio regions: none
- -irqs: link state
- -commands: port config, offload config, queue config,
- parse/classify config, irq config, enable, reset
-
- -DPIO (Datapath I/O): provides interfaces to enqueue and dequeue
- packets and do hardware buffer pool management operations. For
- optimum performance there is typically DPIO per CPU. This allows
- each CPU to perform simultaneous enqueue/dequeue operations.
- -mmio regions: queue operations, buffer mgmt
- -irqs: data availability, congestion notification, buffer
- pool depletion
- -commands: irq config, enable, reset
-
- -DPBP (Datapath Buffer Pool): represents a hardware buffer
- pool.
- -mmio regions: none
- -irqs: none
- -commands: enable, reset
-
- -DPMCP (Datapath MC Portal): provides an MC command portal.
- Used by drivers to send commands to the MC to manage
- objects.
- -mmio regions: MC command portal
- -irqs: command completion
- -commands: irq config, enable, reset
-
- Object Connections
- ------------------
- Some objects have explicit relationships that must
- be configured:
-
- -DPNI <--> DPMAC
- -DPNI <--> DPNI
- -DPNI <--> L2-switch-port
- A DPNI must be connected to something such as a DPMAC,
- another DPNI, or L2 switch port. The DPNI connection
- is made via a DPRC command.
-
- +-------+ +-------+
- | DPNI | | DPMAC |
- +---+---+ +---+---+
- | |
- +==========+
-
- -DPNI <--> DPBP
- A network interface requires a 'buffer pool' (DPBP
- object) which provides a list of pointers to memory
- where received Ethernet data is to be copied. The
- Ethernet driver configures the DPBPs associated with
- the network interface.
-
- Interrupts
- ----------
- All interrupts generated by DPAA2 objects are message
- interrupts. At the hardware level message interrupts
- generated by devices will normally have 3 components--
- 1) a non-spoofable 'device-id' expressed on the hardware
- bus, 2) an address, 3) a data value.
-
- In the case of DPAA2 devices/objects, all objects in the
- same container/DPRC share the same 'device-id'.
- For ARM-based SoC this is the same as the stream ID.
-
-
-DPAA2 Linux Driver Overview
----------------------------
-
-This section provides an overview of the Linux kernel drivers for
-DPAA2-- 1) the bus driver and associated "DPAA2 infrastructure"
-drivers and 2) functional object drivers (such as Ethernet).
-
-As described previously, a DPRC is a container that holds the other
-types of DPAA2 objects. It is functionally similar to a plug-and-play
-bus controller.
-
-Each object in the DPRC is a Linux "device" and is bound to a driver.
-The diagram below shows the Linux drivers involved in a networking
-scenario and the objects bound to each driver. A brief description
-of each driver follows.
-
- +------------+
- | OS Network |
- | Stack |
- +------------+ +------------+
- | Allocator |. . . . . . . | Ethernet |
- |(dpmcp,dpbp)| | (dpni) |
- +-.----------+ +---+---+----+
- . . ^ |
- . . <data avail, | |<enqueue,
- . . tx confirm> | | dequeue>
- +-------------+ . | |
- | DPRC driver | . +---+---V----+ +---------+
- | (dprc) | . . . . . .| DPIO driver| | MAC |
- +----------+--+ | (dpio) | | (dpmac) |
- | +------+-----+ +-----+---+
- |<dev add/remove> | |
- | | |
- +----+--------------+ | +--+---+
- | mc-bus driver | | | PHY |
- | | | |driver|
- | /fsl-mc@80c000000 | | +--+---+
- +-------------------+ | |
- | |
- ================================ HARDWARE =========|=================|======
- DPIO |
- | |
- DPNI---DPBP |
- | |
- DPMAC |
- | |
- PHY ---------------+
- ===================================================|========================
-
-A brief description of each driver is provided below.
-
- mc-bus driver
- -------------
- The mc-bus driver is a platform driver and is probed from an
- "/fsl-mc@xxxx" node in the device tree passed in by boot firmware.
- It is responsible for bootstrapping the DPAA2 kernel infrastructure.
- Key functions include:
- -registering a new bus type named "fsl-mc" with the kernel,
- and implementing bus call-backs (e.g. match/uevent/dev_groups)
- -implemeting APIs for DPAA2 driver registration and for device
- add/remove
- -creates an MSI irq domain
- -do a device add of the 'root' DPRC device, which is needed
- to bootstrap things
-
- DPRC driver
- -----------
- The dprc-driver is bound DPRC objects and does runtime management
- of a bus instance. It performs the initial bus scan of the DPRC
- and handles interrupts for container events such as hot plug.
-
- Allocator
- ----------
- Certain objects such as DPMCP and DPBP are generic and fungible,
- and are intended to be used by other drivers. For example,
- the DPAA2 Ethernet driver needs:
- -DPMCPs to send MC commands, to configure network interfaces
- -DPBPs for network buffer pools
-
- The allocator driver registers for these allocatable object types
- and those objects are bound to the allocator when the bus is probed.
- The allocator maintains a pool of objects that are available for
- allocation by other DPAA2 drivers.
-
- DPIO driver
- -----------
- The DPIO driver is bound to DPIO objects and provides services that allow
- other drivers such as the Ethernet driver to receive and transmit data.
- Key services include:
- -data availability notifications
- -hardware queuing operations (enqueue and dequeue of data)
- -hardware buffer pool management
-
- There is typically one DPIO object per physical CPU for optimum
- performance, allowing each CPU to simultaneously enqueue
- and dequeue data.
-
- The DPIO driver operates on behalf of all DPAA2 drivers
- active in the kernel-- Ethernet, crypto, compression,
- etc.
-
- Ethernet
- --------
- The Ethernet driver is bound to a DPNI and implements the kernel
- interfaces needed to connect the DPAA2 network interface to
- the network stack.
-
- Each DPNI corresponds to a Linux network interface.
-
- MAC driver
- ----------
- An Ethernet PHY is an off-chip, board specific component and is managed
- by the appropriate PHY driver via an mdio bus. The MAC driver
- plays a role of being a proxy between the PHY driver and the
- MC. It does this proxy via the MC commands to a DPMAC object.
diff --git a/drivers/staging/fsl-mc/TODO b/drivers/staging/fsl-mc/TODO
deleted file mode 100644
index 389436891b93..000000000000
--- a/drivers/staging/fsl-mc/TODO
+++ /dev/null
@@ -1,31 +0,0 @@
-* Decide if multiple root fsl-mc buses will be supported per Linux instance,
- and if so add support for this.
-
-* Add at least one device driver for a DPAA2 object (child device of the
- fsl-mc bus). Most likely candidate for this is adding DPAA2 Ethernet
- driver support, which depends on drivers for several objects: DPNI,
- DPIO, DPMAC. Other pre-requisites include:
-
- * interrupt support. for meaningful driver support we need
- interrupts, and thus need message interrupt support by the bus
- driver.
- -Note: this has dependencies on generic MSI support work
- in process upstream, see [1] and [2].
-
- * Management Complex (MC) command serialization. locking mechanisms
- are needed by drivers to serialize commands sent to the MC, including
- from atomic context.
-
- * MC firmware uprev. The MC firmware upon which the fsl-mc
- bus driver and DPAA2 object drivers are based is continuing
- to evolve, so minor updates are needed to keep in sync with binary
- interface changes to the MC.
-
-* Cleanup
-
-Please send any patches to Greg Kroah-Hartman <gregkh@linuxfoundation.org>,
-german.rivera@freescale.com, devel@driverdev.osuosl.org,
-linux-kernel@vger.kernel.org
-
-[1] https://lkml.org/lkml/2015/7/9/93
-[2] https://lkml.org/lkml/2015/7/7/712
diff --git a/drivers/staging/fsl-mc/bus/Kconfig b/drivers/staging/fsl-mc/bus/Kconfig
deleted file mode 100644
index 0d779d9ccbd8..000000000000
--- a/drivers/staging/fsl-mc/bus/Kconfig
+++ /dev/null
@@ -1,24 +0,0 @@
-#
-# Freescale Management Complex (MC) bus drivers
-#
-# Copyright (C) 2014 Freescale Semiconductor, Inc.
-#
-# This file is released under the GPLv2
-#
-
-config FSL_MC_BUS
- tristate "Freescale Management Complex (MC) bus driver"
- depends on OF && ARM64
- help
- Driver to enable the bus infrastructure for the Freescale
- QorIQ Management Complex (fsl-mc). The fsl-mc is a hardware
- module of the QorIQ LS2 SoCs, that does resource management
- for hardware building-blocks in the SoC that can be used
- to dynamically create networking hardware objects such as
- network interfaces (NICs), crypto accelerator instances,
- or L2 switches.
-
- Only enable this option when building the kernel for
- Freescale QorQIQ LS2xxxx SoCs.
-
-
diff --git a/drivers/staging/fsl-mc/bus/Makefile b/drivers/staging/fsl-mc/bus/Makefile
deleted file mode 100644
index 25433a998478..000000000000
--- a/drivers/staging/fsl-mc/bus/Makefile
+++ /dev/null
@@ -1,17 +0,0 @@
-#
-# Freescale Management Complex (MC) bus drivers
-#
-# Copyright (C) 2014 Freescale Semiconductor, Inc.
-#
-# This file is released under the GPLv2
-#
-obj-$(CONFIG_FSL_MC_BUS) += mc-bus-driver.o
-
-mc-bus-driver-objs := mc-bus.o \
- mc-sys.o \
- dprc.o \
- dpmng.o \
- dprc-driver.o \
- mc-allocator.o \
- dpmcp.o \
- dpbp.o
diff --git a/drivers/staging/fsl-mc/bus/dpbp.c b/drivers/staging/fsl-mc/bus/dpbp.c
deleted file mode 100644
index 2d97173f8e91..000000000000
--- a/drivers/staging/fsl-mc/bus/dpbp.c
+++ /dev/null
@@ -1,582 +0,0 @@
-/* Copyright 2013-2014 Freescale Semiconductor Inc.
-*
-* Redistribution and use in source and binary forms, with or without
-* modification, are permitted provided that the following conditions are met:
-* * Redistributions of source code must retain the above copyright
-* notice, this list of conditions and the following disclaimer.
-* * Redistributions in binary form must reproduce the above copyright
-* notice, this list of conditions and the following disclaimer in the
-* documentation and/or other materials provided with the distribution.
-* * Neither the name of the above-listed copyright holders nor the
-* names of any contributors may be used to endorse or promote products
-* derived from this software without specific prior written permission.
-*
-*
-* ALTERNATIVELY, this software may be distributed under the terms of the
-* GNU General Public License ("GPL") as published by the Free Software
-* Foundation, either version 2 of that License or (at your option) any
-* later version.
-*
-* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
-* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-* POSSIBILITY OF SUCH DAMAGE.
-*/
-#include "../include/mc-sys.h"
-#include "../include/mc-cmd.h"
-#include "../include/dpbp.h"
-#include "../include/dpbp-cmd.h"
-
-/**
- * dpbp_open() - Open a control session for the specified object.
- * @mc_io: Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @dpbp_id: DPBP unique ID
- * @token: Returned token; use in subsequent API calls
- *
- * This function can be used to open a control session for an
- * already created object; an object may have been declared in
- * the DPL or by calling the dpbp_create function.
- * This function returns a unique authentication token,
- * associated with the specific object ID and the specific MC
- * portal; this token must be used in all subsequent commands for
- * this specific object
- *
- * Return: '0' on Success; Error code otherwise.
- */
-int dpbp_open(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- int dpbp_id,
- u16 *token)
-{
- struct mc_command cmd = { 0 };
- int err;
-
- /* prepare command */
- cmd.header = mc_encode_cmd_header(DPBP_CMDID_OPEN,
- cmd_flags, 0);
- cmd.params[0] |= mc_enc(0, 32, dpbp_id);
-
- /* send command to mc*/
- err = mc_send_command(mc_io, &cmd);
- if (err)
- return err;
-
- /* retrieve response parameters */
- *token = MC_CMD_HDR_READ_TOKEN(cmd.header);
-
- return err;
-}
-EXPORT_SYMBOL(dpbp_open);
-
-/**
- * dpbp_close() - Close the control session of the object
- * @mc_io: Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @token: Token of DPBP object
- *
- * After this function is called, no further operations are
- * allowed on the object without opening a new control session.
- *
- * Return: '0' on Success; Error code otherwise.
- */
-int dpbp_close(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token)
-{
- struct mc_command cmd = { 0 };
-
- /* prepare command */
- cmd.header = mc_encode_cmd_header(DPBP_CMDID_CLOSE, cmd_flags,
- token);
-
- /* send command to mc*/
- return mc_send_command(mc_io, &cmd);
-}
-EXPORT_SYMBOL(dpbp_close);
-
-/**
- * dpbp_create() - Create the DPBP object.
- * @mc_io: Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @cfg: Configuration structure
- * @token: Returned token; use in subsequent API calls
- *
- * Create the DPBP object, allocate required resources and
- * perform required initialization.
- *
- * The object can be created either by declaring it in the
- * DPL file, or by calling this function.
- * This function returns a unique authentication token,
- * associated with the specific object ID and the specific MC
- * portal; this token must be used in all subsequent calls to
- * this specific object. For objects that are created using the
- * DPL file, call dpbp_open function to get an authentication
- * token first.
- *
- * Return: '0' on Success; Error code otherwise.
- */
-int dpbp_create(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- const struct dpbp_cfg *cfg,
- u16 *token)
-{
- struct mc_command cmd = { 0 };
- int err;
-
- (void)(cfg); /* unused */
-
- /* prepare command */
- cmd.header = mc_encode_cmd_header(DPBP_CMDID_CREATE,
- cmd_flags, 0);
-
- /* send command to mc*/
- err = mc_send_command(mc_io, &cmd);
- if (err)
- return err;
-
- /* retrieve response parameters */
- *token = MC_CMD_HDR_READ_TOKEN(cmd.header);
-
- return 0;
-}
-
-/**
- * dpbp_destroy() - Destroy the DPBP object and release all its resources.
- * @mc_io: Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @token: Token of DPBP object
- *
- * Return: '0' on Success; error code otherwise.
- */
-int dpbp_destroy(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token)
-{
- struct mc_command cmd = { 0 };
-
- /* prepare command */
- cmd.header = mc_encode_cmd_header(DPBP_CMDID_DESTROY,
- cmd_flags, token);
-
- /* send command to mc*/
- return mc_send_command(mc_io, &cmd);
-}
-
-/**
- * dpbp_enable() - Enable the DPBP.
- * @mc_io: Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @token: Token of DPBP object
- *
- * Return: '0' on Success; Error code otherwise.
- */
-int dpbp_enable(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token)
-{
- struct mc_command cmd = { 0 };
-
- /* prepare command */
- cmd.header = mc_encode_cmd_header(DPBP_CMDID_ENABLE, cmd_flags,
- token);
-
- /* send command to mc*/
- return mc_send_command(mc_io, &cmd);
-}
-EXPORT_SYMBOL(dpbp_enable);
-
-/**
- * dpbp_disable() - Disable the DPBP.
- * @mc_io: Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @token: Token of DPBP object
- *
- * Return: '0' on Success; Error code otherwise.
- */
-int dpbp_disable(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token)
-{
- struct mc_command cmd = { 0 };
-
- /* prepare command */
- cmd.header = mc_encode_cmd_header(DPBP_CMDID_DISABLE,
- cmd_flags, token);
-
- /* send command to mc*/
- return mc_send_command(mc_io, &cmd);
-}
-EXPORT_SYMBOL(dpbp_disable);
-
-/**
- * dpbp_is_enabled() - Check if the DPBP is enabled.
- * @mc_io: Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @token: Token of DPBP object
- * @en: Returns '1' if object is enabled; '0' otherwise
- *
- * Return: '0' on Success; Error code otherwise.
- */
-int dpbp_is_enabled(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- int *en)
-{
- struct mc_command cmd = { 0 };
- int err;
- /* prepare command */
- cmd.header = mc_encode_cmd_header(DPBP_CMDID_IS_ENABLED, cmd_flags,
- token);
-
- /* send command to mc*/
- err = mc_send_command(mc_io, &cmd);
- if (err)
- return err;
-
- /* retrieve response parameters */
- *en = (int)mc_dec(cmd.params[0], 0, 1);
-
- return 0;
-}
-
-/**
- * dpbp_reset() - Reset the DPBP, returns the object to initial state.
- * @mc_io: Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @token: Token of DPBP object
- *
- * Return: '0' on Success; Error code otherwise.
- */
-int dpbp_reset(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token)
-{
- struct mc_command cmd = { 0 };
-
- /* prepare command */
- cmd.header = mc_encode_cmd_header(DPBP_CMDID_RESET,
- cmd_flags, token);
-
- /* send command to mc*/
- return mc_send_command(mc_io, &cmd);
-}
-
-/**
- * dpbp_set_irq() - Set IRQ information for the DPBP to trigger an interrupt.
- * @mc_io: Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @token: Token of DPBP object
- * @irq_index: Identifies the interrupt index to configure
- * @irq_cfg: IRQ configuration
- *
- * Return: '0' on Success; Error code otherwise.
- */
-int dpbp_set_irq(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- u8 irq_index,
- struct dpbp_irq_cfg *irq_cfg)
-{
- struct mc_command cmd = { 0 };
-
- /* prepare command */
- cmd.header = mc_encode_cmd_header(DPBP_CMDID_SET_IRQ,
- cmd_flags, token);
- cmd.params[0] |= mc_enc(0, 8, irq_index);
- cmd.params[0] |= mc_enc(32, 32, irq_cfg->val);
- cmd.params[1] |= mc_enc(0, 64, irq_cfg->addr);
- cmd.params[2] |= mc_enc(0, 32, irq_cfg->user_irq_id);
-
- /* send command to mc*/
- return mc_send_command(mc_io, &cmd);
-}
-
-/**
- * dpbp_get_irq() - Get IRQ information from the DPBP.
- * @mc_io: Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @token: Token of DPBP object
- * @irq_index: The interrupt index to configure
- * @type: Interrupt type: 0 represents message interrupt
- * type (both irq_addr and irq_val are valid)
- * @irq_cfg: IRQ attributes
- *
- * Return: '0' on Success; Error code otherwise.
- */
-int dpbp_get_irq(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- u8 irq_index,
- int *type,
- struct dpbp_irq_cfg *irq_cfg)
-{
- struct mc_command cmd = { 0 };
- int err;
-
- /* prepare command */
- cmd.header = mc_encode_cmd_header(DPBP_CMDID_GET_IRQ,
- cmd_flags, token);
- cmd.params[0] |= mc_enc(32, 8, irq_index);
-
- /* send command to mc*/
- err = mc_send_command(mc_io, &cmd);
- if (err)
- return err;
-
- /* retrieve response parameters */
- irq_cfg->val = (u32)mc_dec(cmd.params[0], 0, 32);
- irq_cfg->addr = (u64)mc_dec(cmd.params[1], 0, 64);
- irq_cfg->user_irq_id = (int)mc_dec(cmd.params[2], 0, 32);
- *type = (int)mc_dec(cmd.params[2], 32, 32);
- return 0;
-}
-
-/**
- * dpbp_set_irq_enable() - Set overall interrupt state.
- * @mc_io: Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @token: Token of DPBP object
- * @irq_index: The interrupt index to configure
- * @en: Interrupt state - enable = 1, disable = 0
- *
- * Allows GPP software to control when interrupts are generated.
- * Each interrupt can have up to 32 causes. The enable/disable control's the
- * overall interrupt state. if the interrupt is disabled no causes will cause
- * an interrupt.
- *
- * Return: '0' on Success; Error code otherwise.
- */
-int dpbp_set_irq_enable(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- u8 irq_index,
- u8 en)
-{
- struct mc_command cmd = { 0 };
-
- /* prepare command */
- cmd.header = mc_encode_cmd_header(DPBP_CMDID_SET_IRQ_ENABLE,
- cmd_flags, token);
- cmd.params[0] |= mc_enc(0, 8, en);
- cmd.params[0] |= mc_enc(32, 8, irq_index);
-
- /* send command to mc*/
- return mc_send_command(mc_io, &cmd);
-}
-
-/**
- * dpbp_get_irq_enable() - Get overall interrupt state
- * @mc_io: Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @token: Token of DPBP object
- * @irq_index: The interrupt index to configure
- * @en: Returned interrupt state - enable = 1, disable = 0
- *
- * Return: '0' on Success; Error code otherwise.
- */
-int dpbp_get_irq_enable(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- u8 irq_index,
- u8 *en)
-{
- struct mc_command cmd = { 0 };
- int err;
-
- /* prepare command */
- cmd.header = mc_encode_cmd_header(DPBP_CMDID_GET_IRQ_ENABLE,
- cmd_flags, token);
- cmd.params[0] |= mc_enc(32, 8, irq_index);
-
- /* send command to mc*/
- err = mc_send_command(mc_io, &cmd);
- if (err)
- return err;
-
- /* retrieve response parameters */
- *en = (u8)mc_dec(cmd.params[0], 0, 8);
- return 0;
-}
-
-/**
- * dpbp_set_irq_mask() - Set interrupt mask.
- * @mc_io: Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @token: Token of DPBP object
- * @irq_index: The interrupt index to configure
- * @mask: Event mask to trigger interrupt;
- * each bit:
- * 0 = ignore event
- * 1 = consider event for asserting IRQ
- *
- * Every interrupt can have up to 32 causes and the interrupt model supports
- * masking/unmasking each cause independently
- *
- * Return: '0' on Success; Error code otherwise.
- */
-int dpbp_set_irq_mask(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- u8 irq_index,
- u32 mask)
-{
- struct mc_command cmd = { 0 };
-
- /* prepare command */
- cmd.header = mc_encode_cmd_header(DPBP_CMDID_SET_IRQ_MASK,
- cmd_flags, token);
- cmd.params[0] |= mc_enc(0, 32, mask);
- cmd.params[0] |= mc_enc(32, 8, irq_index);
-
- /* send command to mc*/
- return mc_send_command(mc_io, &cmd);
-}
-
-/**
- * dpbp_get_irq_mask() - Get interrupt mask.
- * @mc_io: Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @token: Token of DPBP object
- * @irq_index: The interrupt index to configure
- * @mask: Returned event mask to trigger interrupt
- *
- * Every interrupt can have up to 32 causes and the interrupt model supports
- * masking/unmasking each cause independently
- *
- * Return: '0' on Success; Error code otherwise.
- */
-int dpbp_get_irq_mask(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- u8 irq_index,
- u32 *mask)
-{
- struct mc_command cmd = { 0 };
- int err;
-
- /* prepare command */
- cmd.header = mc_encode_cmd_header(DPBP_CMDID_GET_IRQ_MASK,
- cmd_flags, token);
- cmd.params[0] |= mc_enc(32, 8, irq_index);
-
- /* send command to mc*/
- err = mc_send_command(mc_io, &cmd);
- if (err)
- return err;
-
- /* retrieve response parameters */
- *mask = (u32)mc_dec(cmd.params[0], 0, 32);
- return 0;
-}
-
-/**
- * dpbp_get_irq_status() - Get the current status of any pending interrupts.
- *
- * @mc_io: Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @token: Token of DPBP object
- * @irq_index: The interrupt index to configure
- * @status: Returned interrupts status - one bit per cause:
- * 0 = no interrupt pending
- * 1 = interrupt pending
- *
- * Return: '0' on Success; Error code otherwise.
- */
-int dpbp_get_irq_status(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- u8 irq_index,
- u32 *status)
-{
- struct mc_command cmd = { 0 };
- int err;
-
- /* prepare command */
- cmd.header = mc_encode_cmd_header(DPBP_CMDID_GET_IRQ_STATUS,
- cmd_flags, token);
- cmd.params[0] |= mc_enc(32, 8, irq_index);
-
- /* send command to mc*/
- err = mc_send_command(mc_io, &cmd);
- if (err)
- return err;
-
- /* retrieve response parameters */
- *status = (u32)mc_dec(cmd.params[0], 0, 32);
- return 0;
-}
-
-/**
- * dpbp_clear_irq_status() - Clear a pending interrupt's status
- *
- * @mc_io: Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @token: Token of DPBP object
- * @irq_index: The interrupt index to configure
- * @status: Bits to clear (W1C) - one bit per cause:
- * 0 = don't change
- * 1 = clear status bit
- *
- * Return: '0' on Success; Error code otherwise.
- */
-int dpbp_clear_irq_status(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- u8 irq_index,
- u32 status)
-{
- struct mc_command cmd = { 0 };
-
- /* prepare command */
- cmd.header = mc_encode_cmd_header(DPBP_CMDID_CLEAR_IRQ_STATUS,
- cmd_flags, token);
- cmd.params[0] |= mc_enc(0, 32, status);
- cmd.params[0] |= mc_enc(32, 8, irq_index);
-
- /* send command to mc*/
- return mc_send_command(mc_io, &cmd);
-}
-
-/**
- * dpbp_get_attributes - Retrieve DPBP attributes.
- *
- * @mc_io: Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @token: Token of DPBP object
- * @attr: Returned object's attributes
- *
- * Return: '0' on Success; Error code otherwise.
- */
-int dpbp_get_attributes(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- struct dpbp_attr *attr)
-{
- struct mc_command cmd = { 0 };
- int err;
-
- /* prepare command */
- cmd.header = mc_encode_cmd_header(DPBP_CMDID_GET_ATTR,
- cmd_flags, token);
-
- /* send command to mc*/
- err = mc_send_command(mc_io, &cmd);
- if (err)
- return err;
-
- /* retrieve response parameters */
- attr->bpid = (u16)mc_dec(cmd.params[0], 16, 16);
- attr->id = (int)mc_dec(cmd.params[0], 32, 32);
- attr->version.major = (u16)mc_dec(cmd.params[1], 0, 16);
- attr->version.minor = (u16)mc_dec(cmd.params[1], 16, 16);
- return 0;
-}
-EXPORT_SYMBOL(dpbp_get_attributes);
diff --git a/drivers/staging/fsl-mc/bus/dpmcp-cmd.h b/drivers/staging/fsl-mc/bus/dpmcp-cmd.h
deleted file mode 100644
index 6cc0fedd802f..000000000000
--- a/drivers/staging/fsl-mc/bus/dpmcp-cmd.h
+++ /dev/null
@@ -1,136 +0,0 @@
-/* Copyright 2013-2015 Freescale Semiconductor Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of the above-listed copyright holders nor the
- * names of any contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-#ifndef _FSL_DPMCP_CMD_H
-#define _FSL_DPMCP_CMD_H
-
-/* DPMCP Version */
-#define DPMCP_VER_MAJOR 2
-#define DPMCP_VER_MINOR 1
-
-/* Command IDs */
-#define DPMCP_CMDID_CLOSE 0x800
-#define DPMCP_CMDID_OPEN 0x80b
-#define DPMCP_CMDID_CREATE 0x90b
-#define DPMCP_CMDID_DESTROY 0x900
-
-#define DPMCP_CMDID_GET_ATTR 0x004
-#define DPMCP_CMDID_RESET 0x005
-
-#define DPMCP_CMDID_SET_IRQ 0x010
-#define DPMCP_CMDID_GET_IRQ 0x011
-#define DPMCP_CMDID_SET_IRQ_ENABLE 0x012
-#define DPMCP_CMDID_GET_IRQ_ENABLE 0x013
-#define DPMCP_CMDID_SET_IRQ_MASK 0x014
-#define DPMCP_CMDID_GET_IRQ_MASK 0x015
-#define DPMCP_CMDID_GET_IRQ_STATUS 0x016
-#define DPMCP_CMDID_CLEAR_IRQ_STATUS 0x017
-
-/* cmd, param, offset, width, type, arg_name */
-#define DPMCP_CMD_CREATE(cmd, cfg) \
- MC_CMD_OP(cmd, 0, 0, 32, int, cfg->portal_id)
-
-/* cmd, param, offset, width, type, arg_name */
-#define DPMCP_CMD_SET_IRQ(cmd, irq_index, irq_addr, irq_val, user_irq_id) \
-do { \
- MC_CMD_OP(cmd, 0, 0, 8, uint8_t, irq_index);\
- MC_CMD_OP(cmd, 0, 32, 32, uint32_t, irq_val);\
- MC_CMD_OP(cmd, 1, 0, 64, uint64_t, irq_addr); \
- MC_CMD_OP(cmd, 2, 0, 32, int, user_irq_id); \
-} while (0)
-
-/* cmd, param, offset, width, type, arg_name */
-#define DPMCP_CMD_GET_IRQ(cmd, irq_index) \
- MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index)
-
-/* cmd, param, offset, width, type, arg_name */
-#define DPMCP_RSP_GET_IRQ(cmd, type, irq_addr, irq_val, user_irq_id) \
-do { \
- MC_RSP_OP(cmd, 0, 0, 32, uint32_t, irq_val); \
- MC_RSP_OP(cmd, 1, 0, 64, uint64_t, irq_addr); \
- MC_RSP_OP(cmd, 2, 0, 32, int, user_irq_id); \
- MC_RSP_OP(cmd, 2, 32, 32, int, type); \
-} while (0)
-
-/* cmd, param, offset, width, type, arg_name */
-#define DPMCP_CMD_SET_IRQ_ENABLE(cmd, irq_index, en) \
-do { \
- MC_CMD_OP(cmd, 0, 0, 8, uint8_t, en); \
- MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\
-} while (0)
-
-/* cmd, param, offset, width, type, arg_name */
-#define DPMCP_CMD_GET_IRQ_ENABLE(cmd, irq_index) \
- MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index)
-
-/* cmd, param, offset, width, type, arg_name */
-#define DPMCP_RSP_GET_IRQ_ENABLE(cmd, en) \
- MC_RSP_OP(cmd, 0, 0, 8, uint8_t, en)
-
-/* cmd, param, offset, width, type, arg_name */
-#define DPMCP_CMD_SET_IRQ_MASK(cmd, irq_index, mask) \
-do { \
- MC_CMD_OP(cmd, 0, 0, 32, uint32_t, mask);\
- MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\
-} while (0)
-
-/* cmd, param, offset, width, type, arg_name */
-#define DPMCP_CMD_GET_IRQ_MASK(cmd, irq_index) \
- MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index)
-
-/* cmd, param, offset, width, type, arg_name */
-#define DPMCP_RSP_GET_IRQ_MASK(cmd, mask) \
- MC_RSP_OP(cmd, 0, 0, 32, uint32_t, mask)
-
-/* cmd, param, offset, width, type, arg_name */
-#define DPMCP_CMD_GET_IRQ_STATUS(cmd, irq_index) \
- MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index)
-
-/* cmd, param, offset, width, type, arg_name */
-#define DPMCP_RSP_GET_IRQ_STATUS(cmd, status) \
- MC_RSP_OP(cmd, 0, 0, 32, uint32_t, status)
-
-/* cmd, param, offset, width, type, arg_name */
-#define DPMCP_CMD_CLEAR_IRQ_STATUS(cmd, irq_index, status) \
-do { \
- MC_CMD_OP(cmd, 0, 0, 32, uint32_t, status); \
- MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\
-} while (0)
-
-/* cmd, param, offset, width, type, arg_name */
-#define DPMCP_RSP_GET_ATTRIBUTES(cmd, attr) \
-do { \
- MC_RSP_OP(cmd, 0, 32, 32, int, attr->id);\
- MC_RSP_OP(cmd, 1, 0, 16, uint16_t, attr->version.major);\
- MC_RSP_OP(cmd, 1, 16, 16, uint16_t, attr->version.minor);\
-} while (0)
-
-#endif /* _FSL_DPMCP_CMD_H */
diff --git a/drivers/staging/fsl-mc/bus/dpmcp.c b/drivers/staging/fsl-mc/bus/dpmcp.c
deleted file mode 100644
index b0248f574619..000000000000
--- a/drivers/staging/fsl-mc/bus/dpmcp.c
+++ /dev/null
@@ -1,500 +0,0 @@
-/* Copyright 2013-2015 Freescale Semiconductor Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of the above-listed copyright holders nor the
- * names of any contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-#include "../include/mc-sys.h"
-#include "../include/mc-cmd.h"
-#include "dpmcp.h"
-#include "dpmcp-cmd.h"
-
-/**
- * dpmcp_open() - Open a control session for the specified object.
- * @mc_io: Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @dpmcp_id: DPMCP unique ID
- * @token: Returned token; use in subsequent API calls
- *
- * This function can be used to open a control session for an
- * already created object; an object may have been declared in
- * the DPL or by calling the dpmcp_create function.
- * This function returns a unique authentication token,
- * associated with the specific object ID and the specific MC
- * portal; this token must be used in all subsequent commands for
- * this specific object
- *
- * Return: '0' on Success; Error code otherwise.
- */
-int dpmcp_open(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- int dpmcp_id,
- u16 *token)
-{
- struct mc_command cmd = { 0 };
- int err;
-
- /* prepare command */
- cmd.header = mc_encode_cmd_header(DPMCP_CMDID_OPEN,
- cmd_flags, 0);
- cmd.params[0] |= mc_enc(0, 32, dpmcp_id);
-
- /* send command to mc*/
- err = mc_send_command(mc_io, &cmd);
- if (err)
- return err;
-
- /* retrieve response parameters */
- *token = MC_CMD_HDR_READ_TOKEN(cmd.header);
-
- return err;
-}
-
-/**
- * dpmcp_close() - Close the control session of the object
- * @mc_io: Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @token: Token of DPMCP object
- *
- * After this function is called, no further operations are
- * allowed on the object without opening a new control session.
- *
- * Return: '0' on Success; Error code otherwise.
- */
-int dpmcp_close(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token)
-{
- struct mc_command cmd = { 0 };
-
- /* prepare command */
- cmd.header = mc_encode_cmd_header(DPMCP_CMDID_CLOSE,
- cmd_flags, token);
-
- /* send command to mc*/
- return mc_send_command(mc_io, &cmd);
-}
-
-/**
- * dpmcp_create() - Create the DPMCP object.
- * @mc_io: Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @cfg: Configuration structure
- * @token: Returned token; use in subsequent API calls
- *
- * Create the DPMCP object, allocate required resources and
- * perform required initialization.
- *
- * The object can be created either by declaring it in the
- * DPL file, or by calling this function.
- * This function returns a unique authentication token,
- * associated with the specific object ID and the specific MC
- * portal; this token must be used in all subsequent calls to
- * this specific object. For objects that are created using the
- * DPL file, call dpmcp_open function to get an authentication
- * token first.
- *
- * Return: '0' on Success; Error code otherwise.
- */
-int dpmcp_create(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- const struct dpmcp_cfg *cfg,
- u16 *token)
-{
- struct mc_command cmd = { 0 };
- int err;
-
- /* prepare command */
- cmd.header = mc_encode_cmd_header(DPMCP_CMDID_CREATE,
- cmd_flags, 0);
- cmd.params[0] |= mc_enc(0, 32, cfg->portal_id);
-
- /* send command to mc*/
- err = mc_send_command(mc_io, &cmd);
- if (err)
- return err;
-
- /* retrieve response parameters */
- *token = MC_CMD_HDR_READ_TOKEN(cmd.header);
-
- return 0;
-}
-
-/**
- * dpmcp_destroy() - Destroy the DPMCP object and release all its resources.
- * @mc_io: Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @token: Token of DPMCP object
- *
- * Return: '0' on Success; error code otherwise.
- */
-int dpmcp_destroy(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token)
-{
- struct mc_command cmd = { 0 };
-
- /* prepare command */
- cmd.header = mc_encode_cmd_header(DPMCP_CMDID_DESTROY,
- cmd_flags, token);
-
- /* send command to mc*/
- return mc_send_command(mc_io, &cmd);
-}
-
-/**
- * dpmcp_reset() - Reset the DPMCP, returns the object to initial state.
- * @mc_io: Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @token: Token of DPMCP object
- *
- * Return: '0' on Success; Error code otherwise.
- */
-int dpmcp_reset(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token)
-{
- struct mc_command cmd = { 0 };
-
- /* prepare command */
- cmd.header = mc_encode_cmd_header(DPMCP_CMDID_RESET,
- cmd_flags, token);
-
- /* send command to mc*/
- return mc_send_command(mc_io, &cmd);
-}
-
-/**
- * dpmcp_set_irq() - Set IRQ information for the DPMCP to trigger an interrupt.
- * @mc_io: Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @token: Token of DPMCP object
- * @irq_index: Identifies the interrupt index to configure
- * @irq_cfg: IRQ configuration
- *
- * Return: '0' on Success; Error code otherwise.
- */
-int dpmcp_set_irq(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- u8 irq_index,
- struct dpmcp_irq_cfg *irq_cfg)
-{
- struct mc_command cmd = { 0 };
-
- /* prepare command */
- cmd.header = mc_encode_cmd_header(DPMCP_CMDID_SET_IRQ,
- cmd_flags, token);
- cmd.params[0] |= mc_enc(0, 8, irq_index);
- cmd.params[0] |= mc_enc(32, 32, irq_cfg->val);
- cmd.params[1] |= mc_enc(0, 64, irq_cfg->paddr);
- cmd.params[2] |= mc_enc(0, 32, irq_cfg->user_irq_id);
-
- /* send command to mc*/
- return mc_send_command(mc_io, &cmd);
-}
-
-/**
- * dpmcp_get_irq() - Get IRQ information from the DPMCP.
- * @mc_io: Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @token: Token of DPMCP object
- * @irq_index: The interrupt index to configure
- * @type: Interrupt type: 0 represents message interrupt
- * type (both irq_addr and irq_val are valid)
- * @irq_cfg: IRQ attributes
- *
- * Return: '0' on Success; Error code otherwise.
- */
-int dpmcp_get_irq(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- u8 irq_index,
- int *type,
- struct dpmcp_irq_cfg *irq_cfg)
-{
- struct mc_command cmd = { 0 };
- int err;
-
- /* prepare command */
- cmd.header = mc_encode_cmd_header(DPMCP_CMDID_GET_IRQ,
- cmd_flags, token);
- cmd.params[0] |= mc_enc(32, 8, irq_index);
-
- /* send command to mc*/
- err = mc_send_command(mc_io, &cmd);
- if (err)
- return err;
-
- /* retrieve response parameters */
- irq_cfg->val = (u32)mc_dec(cmd.params[0], 0, 32);
- irq_cfg->paddr = (u64)mc_dec(cmd.params[1], 0, 64);
- irq_cfg->user_irq_id = (int)mc_dec(cmd.params[2], 0, 32);
- *type = (int)mc_dec(cmd.params[2], 32, 32);
- return 0;
-}
-
-/**
- * dpmcp_set_irq_enable() - Set overall interrupt state.
- * @mc_io: Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @token: Token of DPMCP object
- * @irq_index: The interrupt index to configure
- * @en: Interrupt state - enable = 1, disable = 0
- *
- * Allows GPP software to control when interrupts are generated.
- * Each interrupt can have up to 32 causes. The enable/disable control's the
- * overall interrupt state. if the interrupt is disabled no causes will cause
- * an interrupt.
- *
- * Return: '0' on Success; Error code otherwise.
- */
-int dpmcp_set_irq_enable(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- u8 irq_index,
- u8 en)
-{
- struct mc_command cmd = { 0 };
-
- /* prepare command */
- cmd.header = mc_encode_cmd_header(DPMCP_CMDID_SET_IRQ_ENABLE,
- cmd_flags, token);
- cmd.params[0] |= mc_enc(0, 8, en);
- cmd.params[0] |= mc_enc(32, 8, irq_index);
-
- /* send command to mc*/
- return mc_send_command(mc_io, &cmd);
-}
-
-/**
- * dpmcp_get_irq_enable() - Get overall interrupt state
- * @mc_io: Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @token: Token of DPMCP object
- * @irq_index: The interrupt index to configure
- * @en: Returned interrupt state - enable = 1, disable = 0
- *
- * Return: '0' on Success; Error code otherwise.
- */
-int dpmcp_get_irq_enable(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- u8 irq_index,
- u8 *en)
-{
- struct mc_command cmd = { 0 };
- int err;
-
- /* prepare command */
- cmd.header = mc_encode_cmd_header(DPMCP_CMDID_GET_IRQ_ENABLE,
- cmd_flags, token);
- cmd.params[0] |= mc_enc(32, 8, irq_index);
-
- /* send command to mc*/
- err = mc_send_command(mc_io, &cmd);
- if (err)
- return err;
-
- /* retrieve response parameters */
- *en = (u8)mc_dec(cmd.params[0], 0, 8);
- return 0;
-}
-
-/**
- * dpmcp_set_irq_mask() - Set interrupt mask.
- * @mc_io: Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @token: Token of DPMCP object
- * @irq_index: The interrupt index to configure
- * @mask: Event mask to trigger interrupt;
- * each bit:
- * 0 = ignore event
- * 1 = consider event for asserting IRQ
- *
- * Every interrupt can have up to 32 causes and the interrupt model supports
- * masking/unmasking each cause independently
- *
- * Return: '0' on Success; Error code otherwise.
- */
-int dpmcp_set_irq_mask(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- u8 irq_index,
- u32 mask)
-{
- struct mc_command cmd = { 0 };
-
- /* prepare command */
- cmd.header = mc_encode_cmd_header(DPMCP_CMDID_SET_IRQ_MASK,
- cmd_flags, token);
- cmd.params[0] |= mc_enc(0, 32, mask);
- cmd.params[0] |= mc_enc(32, 8, irq_index);
-
- /* send command to mc*/
- return mc_send_command(mc_io, &cmd);
-}
-
-/**
- * dpmcp_get_irq_mask() - Get interrupt mask.
- * @mc_io: Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @token: Token of DPMCP object
- * @irq_index: The interrupt index to configure
- * @mask: Returned event mask to trigger interrupt
- *
- * Every interrupt can have up to 32 causes and the interrupt model supports
- * masking/unmasking each cause independently
- *
- * Return: '0' on Success; Error code otherwise.
- */
-int dpmcp_get_irq_mask(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- u8 irq_index,
- u32 *mask)
-{
- struct mc_command cmd = { 0 };
- int err;
-
- /* prepare command */
- cmd.header = mc_encode_cmd_header(DPMCP_CMDID_GET_IRQ_MASK,
- cmd_flags, token);
- cmd.params[0] |= mc_enc(32, 8, irq_index);
-
- /* send command to mc*/
- err = mc_send_command(mc_io, &cmd);
- if (err)
- return err;
-
- /* retrieve response parameters */
- *mask = (u32)mc_dec(cmd.params[0], 0, 32);
- return 0;
-}
-
-/**
- * dpmcp_get_irq_status() - Get the current status of any pending interrupts.
- *
- * @mc_io: Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @token: Token of DPMCP object
- * @irq_index: The interrupt index to configure
- * @status: Returned interrupts status - one bit per cause:
- * 0 = no interrupt pending
- * 1 = interrupt pending
- *
- * Return: '0' on Success; Error code otherwise.
- */
-int dpmcp_get_irq_status(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- u8 irq_index,
- u32 *status)
-{
- struct mc_command cmd = { 0 };
- int err;
-
- /* prepare command */
- cmd.header = mc_encode_cmd_header(DPMCP_CMDID_GET_IRQ_STATUS,
- cmd_flags, token);
- cmd.params[0] |= mc_enc(32, 8, irq_index);
-
- /* send command to mc*/
- err = mc_send_command(mc_io, &cmd);
- if (err)
- return err;
-
- /* retrieve response parameters */
- *status = (u32)mc_dec(cmd.params[0], 0, 32);
- return 0;
-}
-
-/**
- * dpmcp_clear_irq_status() - Clear a pending interrupt's status
- *
- * @mc_io: Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @token: Token of DPMCP object
- * @irq_index: The interrupt index to configure
- * @status: Bits to clear (W1C) - one bit per cause:
- * 0 = don't change
- * 1 = clear status bit
- *
- * Return: '0' on Success; Error code otherwise.
- */
-int dpmcp_clear_irq_status(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- u8 irq_index,
- u32 status)
-{
- struct mc_command cmd = { 0 };
-
- /* prepare command */
- cmd.header = mc_encode_cmd_header(DPMCP_CMDID_CLEAR_IRQ_STATUS,
- cmd_flags, token);
- cmd.params[0] |= mc_enc(0, 32, status);
- cmd.params[0] |= mc_enc(32, 8, irq_index);
-
- /* send command to mc*/
- return mc_send_command(mc_io, &cmd);
-}
-
-/**
- * dpmcp_get_attributes - Retrieve DPMCP attributes.
- *
- * @mc_io: Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @token: Token of DPMCP object
- * @attr: Returned object's attributes
- *
- * Return: '0' on Success; Error code otherwise.
- */
-int dpmcp_get_attributes(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- struct dpmcp_attr *attr)
-{
- struct mc_command cmd = { 0 };
- int err;
-
- /* prepare command */
- cmd.header = mc_encode_cmd_header(DPMCP_CMDID_GET_ATTR,
- cmd_flags, token);
-
- /* send command to mc*/
- err = mc_send_command(mc_io, &cmd);
- if (err)
- return err;
-
- /* retrieve response parameters */
- attr->id = (int)mc_dec(cmd.params[0], 32, 32);
- attr->version.major = (u16)mc_dec(cmd.params[1], 0, 16);
- attr->version.minor = (u16)mc_dec(cmd.params[1], 16, 16);
- return 0;
-}
diff --git a/drivers/staging/fsl-mc/bus/dpmcp.h b/drivers/staging/fsl-mc/bus/dpmcp.h
deleted file mode 100644
index 6df351f0caa5..000000000000
--- a/drivers/staging/fsl-mc/bus/dpmcp.h
+++ /dev/null
@@ -1,165 +0,0 @@
-/* Copyright 2013-2015 Freescale Semiconductor Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of the above-listed copyright holders nor the
- * names of any contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-#ifndef __FSL_DPMCP_H
-#define __FSL_DPMCP_H
-
-/* Data Path Management Command Portal API
- * Contains initialization APIs and runtime control APIs for DPMCP
- */
-
-struct fsl_mc_io;
-
-int dpmcp_open(struct fsl_mc_io *mc_io,
- uint32_t cmd_flags,
- int dpmcp_id,
- uint16_t *token);
-
-/* Get portal ID from pool */
-#define DPMCP_GET_PORTAL_ID_FROM_POOL (-1)
-
-int dpmcp_close(struct fsl_mc_io *mc_io,
- uint32_t cmd_flags,
- uint16_t token);
-
-/**
- * struct dpmcp_cfg - Structure representing DPMCP configuration
- * @portal_id: Portal ID; 'DPMCP_GET_PORTAL_ID_FROM_POOL' to get the portal ID
- * from pool
- */
-struct dpmcp_cfg {
- int portal_id;
-};
-
-int dpmcp_create(struct fsl_mc_io *mc_io,
- uint32_t cmd_flags,
- const struct dpmcp_cfg *cfg,
- uint16_t *token);
-
-int dpmcp_destroy(struct fsl_mc_io *mc_io,
- uint32_t cmd_flags,
- uint16_t token);
-
-int dpmcp_reset(struct fsl_mc_io *mc_io,
- uint32_t cmd_flags,
- uint16_t token);
-
-/* IRQ */
-/* IRQ Index */
-#define DPMCP_IRQ_INDEX 0
-/* irq event - Indicates that the link state changed */
-#define DPMCP_IRQ_EVENT_CMD_DONE 0x00000001
-
-/**
- * struct dpmcp_irq_cfg - IRQ configuration
- * @paddr: Address that must be written to signal a message-based interrupt
- * @val: Value to write into irq_addr address
- * @user_irq_id: A user defined number associated with this IRQ
- */
-struct dpmcp_irq_cfg {
- uint64_t paddr;
- uint32_t val;
- int user_irq_id;
-};
-
-int dpmcp_set_irq(struct fsl_mc_io *mc_io,
- uint32_t cmd_flags,
- uint16_t token,
- uint8_t irq_index,
- struct dpmcp_irq_cfg *irq_cfg);
-
-int dpmcp_get_irq(struct fsl_mc_io *mc_io,
- uint32_t cmd_flags,
- uint16_t token,
- uint8_t irq_index,
- int *type,
- struct dpmcp_irq_cfg *irq_cfg);
-
-int dpmcp_set_irq_enable(struct fsl_mc_io *mc_io,
- uint32_t cmd_flags,
- uint16_t token,
- uint8_t irq_index,
- uint8_t en);
-
-int dpmcp_get_irq_enable(struct fsl_mc_io *mc_io,
- uint32_t cmd_flags,
- uint16_t token,
- uint8_t irq_index,
- uint8_t *en);
-
-int dpmcp_set_irq_mask(struct fsl_mc_io *mc_io,
- uint32_t cmd_flags,
- uint16_t token,
- uint8_t irq_index,
- uint32_t mask);
-
-int dpmcp_get_irq_mask(struct fsl_mc_io *mc_io,
- uint32_t cmd_flags,
- uint16_t token,
- uint8_t irq_index,
- uint32_t *mask);
-
-int dpmcp_get_irq_status(struct fsl_mc_io *mc_io,
- uint32_t cmd_flags,
- uint16_t token,
- uint8_t irq_index,
- uint32_t *status);
-
-int dpmcp_clear_irq_status(struct fsl_mc_io *mc_io,
- uint32_t cmd_flags,
- uint16_t token,
- uint8_t irq_index,
- uint32_t status);
-
-/**
- * struct dpmcp_attr - Structure representing DPMCP attributes
- * @id: DPMCP object ID
- * @version: DPMCP version
- */
-struct dpmcp_attr {
- int id;
- /**
- * struct version - Structure representing DPMCP version
- * @major: DPMCP major version
- * @minor: DPMCP minor version
- */
- struct {
- uint16_t major;
- uint16_t minor;
- } version;
-};
-
-int dpmcp_get_attributes(struct fsl_mc_io *mc_io,
- uint32_t cmd_flags,
- uint16_t token,
- struct dpmcp_attr *attr);
-
-#endif /* __FSL_DPMCP_H */
diff --git a/drivers/staging/fsl-mc/bus/dpmng-cmd.h b/drivers/staging/fsl-mc/bus/dpmng-cmd.h
deleted file mode 100644
index ba8cfa9635dd..000000000000
--- a/drivers/staging/fsl-mc/bus/dpmng-cmd.h
+++ /dev/null
@@ -1,47 +0,0 @@
-/* Copyright 2013-2014 Freescale Semiconductor Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of the above-listed copyright holders nor the
- * names of any contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-/*************************************************************************//*
- dpmng-cmd.h
-
- defines portal commands
-
- *//**************************************************************************/
-
-#ifndef __FSL_DPMNG_CMD_H
-#define __FSL_DPMNG_CMD_H
-
-/* Command IDs */
-#define DPMNG_CMDID_GET_CONT_ID 0x830
-#define DPMNG_CMDID_GET_VERSION 0x831
-
-#endif /* __FSL_DPMNG_CMD_H */
diff --git a/drivers/staging/fsl-mc/bus/dpmng.c b/drivers/staging/fsl-mc/bus/dpmng.c
deleted file mode 100644
index f633fcd86e51..000000000000
--- a/drivers/staging/fsl-mc/bus/dpmng.c
+++ /dev/null
@@ -1,101 +0,0 @@
-/* Copyright 2013-2014 Freescale Semiconductor Inc.
-*
-* Redistribution and use in source and binary forms, with or without
-* modification, are permitted provided that the following conditions are met:
-* * Redistributions of source code must retain the above copyright
-* notice, this list of conditions and the following disclaimer.
-* * Redistributions in binary form must reproduce the above copyright
-* notice, this list of conditions and the following disclaimer in the
-* documentation and/or other materials provided with the distribution.
-* * Neither the name of the above-listed copyright holders nor the
-* names of any contributors may be used to endorse or promote products
-* derived from this software without specific prior written permission.
-*
-*
-* ALTERNATIVELY, this software may be distributed under the terms of the
-* GNU General Public License ("GPL") as published by the Free Software
-* Foundation, either version 2 of that License or (at your option) any
-* later version.
-*
-* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
-* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-* POSSIBILITY OF SUCH DAMAGE.
-*/
-#include "../include/mc-sys.h"
-#include "../include/mc-cmd.h"
-#include "../include/dpmng.h"
-#include "dpmng-cmd.h"
-
-/**
- * mc_get_version() - Retrieves the Management Complex firmware
- * version information
- * @mc_io: Pointer to opaque I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @mc_ver_info: Returned version information structure
- *
- * Return: '0' on Success; Error code otherwise.
- */
-int mc_get_version(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- struct mc_version *mc_ver_info)
-{
- struct mc_command cmd = { 0 };
- int err;
-
- /* prepare command */
- cmd.header = mc_encode_cmd_header(DPMNG_CMDID_GET_VERSION,
- cmd_flags,
- 0);
-
- /* send command to mc*/
- err = mc_send_command(mc_io, &cmd);
- if (err)
- return err;
-
- /* retrieve response parameters */
- mc_ver_info->revision = mc_dec(cmd.params[0], 0, 32);
- mc_ver_info->major = mc_dec(cmd.params[0], 32, 32);
- mc_ver_info->minor = mc_dec(cmd.params[1], 0, 32);
-
- return 0;
-}
-
-/**
- * dpmng_get_container_id() - Get container ID associated with a given portal.
- * @mc_io: Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @container_id: Requested container ID
- *
- * Return: '0' on Success; Error code otherwise.
- */
-int dpmng_get_container_id(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- int *container_id)
-{
- struct mc_command cmd = { 0 };
- int err;
-
- /* prepare command */
- cmd.header = mc_encode_cmd_header(DPMNG_CMDID_GET_CONT_ID,
- cmd_flags,
- 0);
-
- /* send command to mc*/
- err = mc_send_command(mc_io, &cmd);
- if (err)
- return err;
-
- /* retrieve response parameters */
- *container_id = mc_dec(cmd.params[0], 0, 32);
-
- return 0;
-}
-
diff --git a/drivers/staging/fsl-mc/bus/dprc-cmd.h b/drivers/staging/fsl-mc/bus/dprc-cmd.h
deleted file mode 100644
index 6552c2034947..000000000000
--- a/drivers/staging/fsl-mc/bus/dprc-cmd.h
+++ /dev/null
@@ -1,87 +0,0 @@
-/* Copyright 2013-2014 Freescale Semiconductor Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of the above-listed copyright holders nor the
- * names of any contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-/*************************************************************************//*
- dprc-cmd.h
-
- defines dprc portal commands
-
- *//**************************************************************************/
-
-#ifndef _FSL_DPRC_CMD_H
-#define _FSL_DPRC_CMD_H
-
-/* DPRC Version */
-#define DPRC_VER_MAJOR 4
-#define DPRC_VER_MINOR 0
-
-/* Command IDs */
-#define DPRC_CMDID_CLOSE 0x800
-#define DPRC_CMDID_OPEN 0x805
-#define DPRC_CMDID_CREATE 0x905
-
-#define DPRC_CMDID_GET_ATTR 0x004
-#define DPRC_CMDID_RESET_CONT 0x005
-
-#define DPRC_CMDID_SET_IRQ 0x010
-#define DPRC_CMDID_GET_IRQ 0x011
-#define DPRC_CMDID_SET_IRQ_ENABLE 0x012
-#define DPRC_CMDID_GET_IRQ_ENABLE 0x013
-#define DPRC_CMDID_SET_IRQ_MASK 0x014
-#define DPRC_CMDID_GET_IRQ_MASK 0x015
-#define DPRC_CMDID_GET_IRQ_STATUS 0x016
-#define DPRC_CMDID_CLEAR_IRQ_STATUS 0x017
-
-#define DPRC_CMDID_CREATE_CONT 0x151
-#define DPRC_CMDID_DESTROY_CONT 0x152
-#define DPRC_CMDID_SET_RES_QUOTA 0x155
-#define DPRC_CMDID_GET_RES_QUOTA 0x156
-#define DPRC_CMDID_ASSIGN 0x157
-#define DPRC_CMDID_UNASSIGN 0x158
-#define DPRC_CMDID_GET_OBJ_COUNT 0x159
-#define DPRC_CMDID_GET_OBJ 0x15A
-#define DPRC_CMDID_GET_RES_COUNT 0x15B
-#define DPRC_CMDID_GET_RES_IDS 0x15C
-#define DPRC_CMDID_GET_OBJ_REG 0x15E
-#define DPRC_CMDID_SET_OBJ_IRQ 0x15F
-#define DPRC_CMDID_GET_OBJ_IRQ 0x160
-#define DPRC_CMDID_SET_OBJ_LABEL 0x161
-#define DPRC_CMDID_GET_OBJ_DESC 0x162
-
-#define DPRC_CMDID_CONNECT 0x167
-#define DPRC_CMDID_DISCONNECT 0x168
-#define DPRC_CMDID_GET_POOL 0x169
-#define DPRC_CMDID_GET_POOL_COUNT 0x16A
-
-#define DPRC_CMDID_GET_CONNECTION 0x16C
-
-#endif /* _FSL_DPRC_CMD_H */
diff --git a/drivers/staging/fsl-mc/bus/dprc-driver.c b/drivers/staging/fsl-mc/bus/dprc-driver.c
deleted file mode 100644
index a9ead0d7f9c3..000000000000
--- a/drivers/staging/fsl-mc/bus/dprc-driver.c
+++ /dev/null
@@ -1,488 +0,0 @@
-/*
- * Freescale data path resource container (DPRC) driver
- *
- * Copyright (C) 2014 Freescale Semiconductor, Inc.
- * Author: German Rivera <German.Rivera@freescale.com>
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
- */
-
-#include "../include/mc-private.h"
-#include "../include/mc-sys.h"
-#include <linux/module.h>
-#include <linux/slab.h>
-#include "dprc-cmd.h"
-
-struct dprc_child_objs {
- int child_count;
- struct dprc_obj_desc *child_array;
-};
-
-static int __fsl_mc_device_remove_if_not_in_mc(struct device *dev, void *data)
-{
- int i;
- struct dprc_child_objs *objs;
- struct fsl_mc_device *mc_dev;
-
- WARN_ON(!dev);
- WARN_ON(!data);
- mc_dev = to_fsl_mc_device(dev);
- objs = data;
-
- for (i = 0; i < objs->child_count; i++) {
- struct dprc_obj_desc *obj_desc = &objs->child_array[i];
-
- if (strlen(obj_desc->type) != 0 &&
- FSL_MC_DEVICE_MATCH(mc_dev, obj_desc))
- break;
- }
-
- if (i == objs->child_count)
- fsl_mc_device_remove(mc_dev);
-
- return 0;
-}
-
-static int __fsl_mc_device_remove(struct device *dev, void *data)
-{
- WARN_ON(!dev);
- WARN_ON(data);
- fsl_mc_device_remove(to_fsl_mc_device(dev));
- return 0;
-}
-
-/**
- * dprc_remove_devices - Removes devices for objects removed from a DPRC
- *
- * @mc_bus_dev: pointer to the fsl-mc device that represents a DPRC object
- * @obj_desc_array: array of object descriptors for child objects currently
- * present in the DPRC in the MC.
- * @num_child_objects_in_mc: number of entries in obj_desc_array
- *
- * Synchronizes the state of the Linux bus driver with the actual state of
- * the MC by removing devices that represent MC objects that have
- * been dynamically removed in the physical DPRC.
- */
-static void dprc_remove_devices(struct fsl_mc_device *mc_bus_dev,
- struct dprc_obj_desc *obj_desc_array,
- int num_child_objects_in_mc)
-{
- if (num_child_objects_in_mc != 0) {
- /*
- * Remove child objects that are in the DPRC in Linux,
- * but not in the MC:
- */
- struct dprc_child_objs objs;
-
- objs.child_count = num_child_objects_in_mc;
- objs.child_array = obj_desc_array;
- device_for_each_child(&mc_bus_dev->dev, &objs,
- __fsl_mc_device_remove_if_not_in_mc);
- } else {
- /*
- * There are no child objects for this DPRC in the MC.
- * So, remove all the child devices from Linux:
- */
- device_for_each_child(&mc_bus_dev->dev, NULL,
- __fsl_mc_device_remove);
- }
-}
-
-static int __fsl_mc_device_match(struct device *dev, void *data)
-{
- struct dprc_obj_desc *obj_desc = data;
- struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev);
-
- return FSL_MC_DEVICE_MATCH(mc_dev, obj_desc);
-}
-
-static struct fsl_mc_device *fsl_mc_device_lookup(struct dprc_obj_desc
- *obj_desc,
- struct fsl_mc_device
- *mc_bus_dev)
-{
- struct device *dev;
-
- dev = device_find_child(&mc_bus_dev->dev, obj_desc,
- __fsl_mc_device_match);
-
- return dev ? to_fsl_mc_device(dev) : NULL;
-}
-
-/**
- * check_plugged_state_change - Check change in an MC object's plugged state
- *
- * @mc_dev: pointer to the fsl-mc device for a given MC object
- * @obj_desc: pointer to the MC object's descriptor in the MC
- *
- * If the plugged state has changed from unplugged to plugged, the fsl-mc
- * device is bound to the corresponding device driver.
- * If the plugged state has changed from plugged to unplugged, the fsl-mc
- * device is unbound from the corresponding device driver.
- */
-static void check_plugged_state_change(struct fsl_mc_device *mc_dev,
- struct dprc_obj_desc *obj_desc)
-{
- int error;
- u32 plugged_flag_at_mc =
- (obj_desc->state & DPRC_OBJ_STATE_PLUGGED);
-
- if (plugged_flag_at_mc !=
- (mc_dev->obj_desc.state & DPRC_OBJ_STATE_PLUGGED)) {
- if (plugged_flag_at_mc) {
- mc_dev->obj_desc.state |= DPRC_OBJ_STATE_PLUGGED;
- error = device_attach(&mc_dev->dev);
- if (error < 0) {
- dev_err(&mc_dev->dev,
- "device_attach() failed: %d\n",
- error);
- }
- } else {
- mc_dev->obj_desc.state &= ~DPRC_OBJ_STATE_PLUGGED;
- device_release_driver(&mc_dev->dev);
- }
- }
-}
-
-/**
- * dprc_add_new_devices - Adds devices to the logical bus for a DPRC
- *
- * @mc_bus_dev: pointer to the fsl-mc device that represents a DPRC object
- * @obj_desc_array: array of device descriptors for child devices currently
- * present in the physical DPRC.
- * @num_child_objects_in_mc: number of entries in obj_desc_array
- *
- * Synchronizes the state of the Linux bus driver with the actual
- * state of the MC by adding objects that have been newly discovered
- * in the physical DPRC.
- */
-static void dprc_add_new_devices(struct fsl_mc_device *mc_bus_dev,
- struct dprc_obj_desc *obj_desc_array,
- int num_child_objects_in_mc)
-{
- int error;
- int i;
-
- for (i = 0; i < num_child_objects_in_mc; i++) {
- struct fsl_mc_device *child_dev;
- struct dprc_obj_desc *obj_desc = &obj_desc_array[i];
-
- if (strlen(obj_desc->type) == 0)
- continue;
-
- /*
- * Check if device is already known to Linux:
- */
- child_dev = fsl_mc_device_lookup(obj_desc, mc_bus_dev);
- if (child_dev) {
- check_plugged_state_change(child_dev, obj_desc);
- continue;
- }
-
- error = fsl_mc_device_add(obj_desc, NULL, &mc_bus_dev->dev,
- &child_dev);
- if (error < 0)
- continue;
- }
-}
-
-static void dprc_init_all_resource_pools(struct fsl_mc_device *mc_bus_dev)
-{
- int pool_type;
- struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_bus_dev);
-
- for (pool_type = 0; pool_type < FSL_MC_NUM_POOL_TYPES; pool_type++) {
- struct fsl_mc_resource_pool *res_pool =
- &mc_bus->resource_pools[pool_type];
-
- res_pool->type = pool_type;
- res_pool->max_count = 0;
- res_pool->free_count = 0;
- res_pool->mc_bus = mc_bus;
- INIT_LIST_HEAD(&res_pool->free_list);
- mutex_init(&res_pool->mutex);
- }
-}
-
-static void dprc_cleanup_resource_pool(struct fsl_mc_device *mc_bus_dev,
- enum fsl_mc_pool_type pool_type)
-{
- struct fsl_mc_resource *resource;
- struct fsl_mc_resource *next;
- struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_bus_dev);
- struct fsl_mc_resource_pool *res_pool =
- &mc_bus->resource_pools[pool_type];
- int free_count = 0;
-
- WARN_ON(res_pool->type != pool_type);
- WARN_ON(res_pool->free_count != res_pool->max_count);
-
- list_for_each_entry_safe(resource, next, &res_pool->free_list, node) {
- free_count++;
- WARN_ON(resource->type != res_pool->type);
- WARN_ON(resource->parent_pool != res_pool);
- devm_kfree(&mc_bus_dev->dev, resource);
- }
-
- WARN_ON(free_count != res_pool->free_count);
-}
-
-static void dprc_cleanup_all_resource_pools(struct fsl_mc_device *mc_bus_dev)
-{
- int pool_type;
-
- for (pool_type = 0; pool_type < FSL_MC_NUM_POOL_TYPES; pool_type++)
- dprc_cleanup_resource_pool(mc_bus_dev, pool_type);
-}
-
-/**
- * dprc_scan_objects - Discover objects in a DPRC
- *
- * @mc_bus_dev: pointer to the fsl-mc device that represents a DPRC object
- *
- * Detects objects added and removed from a DPRC and synchronizes the
- * state of the Linux bus driver, MC by adding and removing
- * devices accordingly.
- * Two types of devices can be found in a DPRC: allocatable objects (e.g.,
- * dpbp, dpmcp) and non-allocatable devices (e.g., dprc, dpni).
- * All allocatable devices needed to be probed before all non-allocatable
- * devices, to ensure that device drivers for non-allocatable
- * devices can allocate any type of allocatable devices.
- * That is, we need to ensure that the corresponding resource pools are
- * populated before they can get allocation requests from probe callbacks
- * of the device drivers for the non-allocatable devices.
- */
-int dprc_scan_objects(struct fsl_mc_device *mc_bus_dev)
-{
- int num_child_objects;
- int dprc_get_obj_failures;
- int error;
- struct dprc_obj_desc *child_obj_desc_array = NULL;
-
- error = dprc_get_obj_count(mc_bus_dev->mc_io,
- 0,
- mc_bus_dev->mc_handle,
- &num_child_objects);
- if (error < 0) {
- dev_err(&mc_bus_dev->dev, "dprc_get_obj_count() failed: %d\n",
- error);
- return error;
- }
-
- if (num_child_objects != 0) {
- int i;
-
- child_obj_desc_array =
- devm_kmalloc_array(&mc_bus_dev->dev, num_child_objects,
- sizeof(*child_obj_desc_array),
- GFP_KERNEL);
- if (!child_obj_desc_array)
- return -ENOMEM;
-
- /*
- * Discover objects currently present in the physical DPRC:
- */
- dprc_get_obj_failures = 0;
- for (i = 0; i < num_child_objects; i++) {
- struct dprc_obj_desc *obj_desc =
- &child_obj_desc_array[i];
-
- error = dprc_get_obj(mc_bus_dev->mc_io,
- 0,
- mc_bus_dev->mc_handle,
- i, obj_desc);
- if (error < 0) {
- dev_err(&mc_bus_dev->dev,
- "dprc_get_obj(i=%d) failed: %d\n",
- i, error);
- /*
- * Mark the obj entry as "invalid", by using the
- * empty string as obj type:
- */
- obj_desc->type[0] = '\0';
- obj_desc->id = error;
- dprc_get_obj_failures++;
- continue;
- }
-
- dev_dbg(&mc_bus_dev->dev,
- "Discovered object: type %s, id %d\n",
- obj_desc->type, obj_desc->id);
- }
-
- if (dprc_get_obj_failures != 0) {
- dev_err(&mc_bus_dev->dev,
- "%d out of %d devices could not be retrieved\n",
- dprc_get_obj_failures, num_child_objects);
- }
- }
-
- dprc_remove_devices(mc_bus_dev, child_obj_desc_array,
- num_child_objects);
-
- dprc_add_new_devices(mc_bus_dev, child_obj_desc_array,
- num_child_objects);
-
- if (child_obj_desc_array)
- devm_kfree(&mc_bus_dev->dev, child_obj_desc_array);
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(dprc_scan_objects);
-
-/**
- * dprc_scan_container - Scans a physical DPRC and synchronizes Linux bus state
- *
- * @mc_bus_dev: pointer to the fsl-mc device that represents a DPRC object
- *
- * Scans the physical DPRC and synchronizes the state of the Linux
- * bus driver with the actual state of the MC by adding and removing
- * devices as appropriate.
- */
-int dprc_scan_container(struct fsl_mc_device *mc_bus_dev)
-{
- int error;
- struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_bus_dev);
-
- dprc_init_all_resource_pools(mc_bus_dev);
-
- /*
- * Discover objects in the DPRC:
- */
- mutex_lock(&mc_bus->scan_mutex);
- error = dprc_scan_objects(mc_bus_dev);
- mutex_unlock(&mc_bus->scan_mutex);
- if (error < 0)
- goto error;
-
- return 0;
-error:
- dprc_cleanup_all_resource_pools(mc_bus_dev);
- return error;
-}
-EXPORT_SYMBOL_GPL(dprc_scan_container);
-
-/**
- * dprc_probe - callback invoked when a DPRC is being bound to this driver
- *
- * @mc_dev: Pointer to fsl-mc device representing a DPRC
- *
- * It opens the physical DPRC in the MC.
- * It scans the DPRC to discover the MC objects contained in it.
- * It creates the interrupt pool for the MC bus associated with the DPRC.
- * It configures the interrupts for the DPRC device itself.
- */
-static int dprc_probe(struct fsl_mc_device *mc_dev)
-{
- int error;
- size_t region_size;
- struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_dev);
-
- if (WARN_ON(strcmp(mc_dev->obj_desc.type, "dprc") != 0))
- return -EINVAL;
-
- if (!mc_dev->mc_io) {
- /*
- * This is a child DPRC:
- */
- if (WARN_ON(mc_dev->obj_desc.region_count == 0))
- return -EINVAL;
-
- region_size = mc_dev->regions[0].end -
- mc_dev->regions[0].start + 1;
-
- error = fsl_create_mc_io(&mc_dev->dev,
- mc_dev->regions[0].start,
- region_size,
- NULL, 0, &mc_dev->mc_io);
- if (error < 0)
- return error;
- }
-
- error = dprc_open(mc_dev->mc_io, 0, mc_dev->obj_desc.id,
- &mc_dev->mc_handle);
- if (error < 0) {
- dev_err(&mc_dev->dev, "dprc_open() failed: %d\n", error);
- goto error_cleanup_mc_io;
- }
-
- mutex_init(&mc_bus->scan_mutex);
-
- /*
- * Discover MC objects in DPRC object:
- */
- error = dprc_scan_container(mc_dev);
- if (error < 0)
- goto error_cleanup_open;
-
- dev_info(&mc_dev->dev, "DPRC device bound to driver");
- return 0;
-
-error_cleanup_open:
- (void)dprc_close(mc_dev->mc_io, 0, mc_dev->mc_handle);
-
-error_cleanup_mc_io:
- fsl_destroy_mc_io(mc_dev->mc_io);
- return error;
-}
-
-/**
- * dprc_remove - callback invoked when a DPRC is being unbound from this driver
- *
- * @mc_dev: Pointer to fsl-mc device representing the DPRC
- *
- * It removes the DPRC's child objects from Linux (not from the MC) and
- * closes the DPRC device in the MC.
- * It tears down the interrupts that were configured for the DPRC device.
- * It destroys the interrupt pool associated with this MC bus.
- */
-static int dprc_remove(struct fsl_mc_device *mc_dev)
-{
- int error;
-
- if (WARN_ON(strcmp(mc_dev->obj_desc.type, "dprc") != 0))
- return -EINVAL;
- if (WARN_ON(!mc_dev->mc_io))
- return -EINVAL;
-
- device_for_each_child(&mc_dev->dev, NULL, __fsl_mc_device_remove);
- dprc_cleanup_all_resource_pools(mc_dev);
- error = dprc_close(mc_dev->mc_io, 0, mc_dev->mc_handle);
- if (error < 0)
- dev_err(&mc_dev->dev, "dprc_close() failed: %d\n", error);
-
- dev_info(&mc_dev->dev, "DPRC device unbound from driver");
- return 0;
-}
-
-static const struct fsl_mc_device_match_id match_id_table[] = {
- {
- .vendor = FSL_MC_VENDOR_FREESCALE,
- .obj_type = "dprc",
- .ver_major = DPRC_VER_MAJOR,
- .ver_minor = DPRC_VER_MINOR},
- {.vendor = 0x0},
-};
-
-static struct fsl_mc_driver dprc_driver = {
- .driver = {
- .name = FSL_MC_DPRC_DRIVER_NAME,
- .owner = THIS_MODULE,
- .pm = NULL,
- },
- .match_id_table = match_id_table,
- .probe = dprc_probe,
- .remove = dprc_remove,
-};
-
-int __init dprc_driver_init(void)
-{
- return fsl_mc_driver_register(&dprc_driver);
-}
-
-void __exit dprc_driver_exit(void)
-{
- fsl_mc_driver_unregister(&dprc_driver);
-}
diff --git a/drivers/staging/fsl-mc/bus/dprc.c b/drivers/staging/fsl-mc/bus/dprc.c
deleted file mode 100644
index 381b9a96a14b..000000000000
--- a/drivers/staging/fsl-mc/bus/dprc.c
+++ /dev/null
@@ -1,1622 +0,0 @@
-/* Copyright 2013-2014 Freescale Semiconductor Inc.
-*
-* Redistribution and use in source and binary forms, with or without
-* modification, are permitted provided that the following conditions are met:
-* * Redistributions of source code must retain the above copyright
-* notice, this list of conditions and the following disclaimer.
-* * Redistributions in binary form must reproduce the above copyright
-* notice, this list of conditions and the following disclaimer in the
-* documentation and/or other materials provided with the distribution.
-* * Neither the name of the above-listed copyright holders nor the
-* names of any contributors may be used to endorse or promote products
-* derived from this software without specific prior written permission.
-*
-*
-* ALTERNATIVELY, this software may be distributed under the terms of the
-* GNU General Public License ("GPL") as published by the Free Software
-* Foundation, either version 2 of that License or (at your option) any
-* later version.
-*
-* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
-* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-* POSSIBILITY OF SUCH DAMAGE.
-*/
-#include "../include/mc-sys.h"
-#include "../include/mc-cmd.h"
-#include "../include/dprc.h"
-#include "dprc-cmd.h"
-
-/**
- * dprc_open() - Open DPRC object for use
- * @mc_io: Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @container_id: Container ID to open
- * @token: Returned token of DPRC object
- *
- * Return: '0' on Success; Error code otherwise.
- *
- * @warning Required before any operation on the object.
- */
-int dprc_open(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- int container_id,
- u16 *token)
-{
- struct mc_command cmd = { 0 };
- int err;
-
- /* prepare command */
- cmd.header = mc_encode_cmd_header(DPRC_CMDID_OPEN, cmd_flags,
- 0);
- cmd.params[0] |= mc_enc(0, 32, container_id);
-
- /* send command to mc*/
- err = mc_send_command(mc_io, &cmd);
- if (err)
- return err;
-
- /* retrieve response parameters */
- *token = MC_CMD_HDR_READ_TOKEN(cmd.header);
-
- return 0;
-}
-EXPORT_SYMBOL(dprc_open);
-
-/**
- * dprc_close() - Close the control session of the object
- * @mc_io: Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @token: Token of DPRC object
- *
- * After this function is called, no further operations are
- * allowed on the object without opening a new control session.
- *
- * Return: '0' on Success; Error code otherwise.
- */
-int dprc_close(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token)
-{
- struct mc_command cmd = { 0 };
-
- /* prepare command */
- cmd.header = mc_encode_cmd_header(DPRC_CMDID_CLOSE, cmd_flags,
- token);
-
- /* send command to mc*/
- return mc_send_command(mc_io, &cmd);
-}
-EXPORT_SYMBOL(dprc_close);
-
-/**
- * dprc_create_container() - Create child container
- * @mc_io: Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @token: Token of DPRC object
- * @cfg: Child container configuration
- * @child_container_id: Returned child container ID
- * @child_portal_offset: Returned child portal offset from MC portal base
- *
- * Return: '0' on Success; Error code otherwise.
- */
-int dprc_create_container(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- struct dprc_cfg *cfg,
- int *child_container_id,
- u64 *child_portal_offset)
-{
- struct mc_command cmd = { 0 };
- int err;
-
- /* prepare command */
- cmd.params[0] |= mc_enc(32, 16, cfg->icid);
- cmd.params[0] |= mc_enc(0, 32, cfg->options);
- cmd.params[1] |= mc_enc(32, 32, cfg->portal_id);
- cmd.params[2] |= mc_enc(0, 8, cfg->label[0]);
- cmd.params[2] |= mc_enc(8, 8, cfg->label[1]);
- cmd.params[2] |= mc_enc(16, 8, cfg->label[2]);
- cmd.params[2] |= mc_enc(24, 8, cfg->label[3]);
- cmd.params[2] |= mc_enc(32, 8, cfg->label[4]);
- cmd.params[2] |= mc_enc(40, 8, cfg->label[5]);
- cmd.params[2] |= mc_enc(48, 8, cfg->label[6]);
- cmd.params[2] |= mc_enc(56, 8, cfg->label[7]);
- cmd.params[3] |= mc_enc(0, 8, cfg->label[8]);
- cmd.params[3] |= mc_enc(8, 8, cfg->label[9]);
- cmd.params[3] |= mc_enc(16, 8, cfg->label[10]);
- cmd.params[3] |= mc_enc(24, 8, cfg->label[11]);
- cmd.params[3] |= mc_enc(32, 8, cfg->label[12]);
- cmd.params[3] |= mc_enc(40, 8, cfg->label[13]);
- cmd.params[3] |= mc_enc(48, 8, cfg->label[14]);
- cmd.params[3] |= mc_enc(56, 8, cfg->label[15]);
-
- cmd.header = mc_encode_cmd_header(DPRC_CMDID_CREATE_CONT,
- cmd_flags, token);
-
- /* send command to mc*/
- err = mc_send_command(mc_io, &cmd);
- if (err)
- return err;
-
- /* retrieve response parameters */
- *child_container_id = mc_dec(cmd.params[1], 0, 32);
- *child_portal_offset = mc_dec(cmd.params[2], 0, 64);
-
- return 0;
-}
-
-/**
- * dprc_destroy_container() - Destroy child container.
- * @mc_io: Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @token: Token of DPRC object
- * @child_container_id: ID of the container to destroy
- *
- * This function terminates the child container, so following this call the
- * child container ID becomes invalid.
- *
- * Notes:
- * - All resources and objects of the destroyed container are returned to the
- * parent container or destroyed if were created be the destroyed container.
- * - This function destroy all the child containers of the specified
- * container prior to destroying the container itself.
- *
- * warning: Only the parent container is allowed to destroy a child policy
- * Container 0 can't be destroyed
- *
- * Return: '0' on Success; Error code otherwise.
- *
- */
-int dprc_destroy_container(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- int child_container_id)
-{
- struct mc_command cmd = { 0 };
-
- /* prepare command */
- cmd.header = mc_encode_cmd_header(DPRC_CMDID_DESTROY_CONT,
- cmd_flags, token);
- cmd.params[0] |= mc_enc(0, 32, child_container_id);
-
- /* send command to mc*/
- return mc_send_command(mc_io, &cmd);
-}
-
-/**
- * dprc_reset_container - Reset child container.
- * @mc_io: Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @token: Token of DPRC object
- * @child_container_id: ID of the container to reset
- *
- * In case a software context crashes or becomes non-responsive, the parent
- * may wish to reset its resources container before the software context is
- * restarted.
- *
- * This routine informs all objects assigned to the child container that the
- * container is being reset, so they may perform any cleanup operations that are
- * needed. All objects handles that were owned by the child container shall be
- * closed.
- *
- * Note that such request may be submitted even if the child software context
- * has not crashed, but the resulting object cleanup operations will not be
- * aware of that.
- *
- * Return: '0' on Success; Error code otherwise.
- */
-int dprc_reset_container(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- int child_container_id)
-{
- struct mc_command cmd = { 0 };
-
- /* prepare command */
- cmd.header = mc_encode_cmd_header(DPRC_CMDID_RESET_CONT,
- cmd_flags, token);
- cmd.params[0] |= mc_enc(0, 32, child_container_id);
-
- /* send command to mc*/
- return mc_send_command(mc_io, &cmd);
-}
-
-/**
- * dprc_get_irq() - Get IRQ information from the DPRC.
- * @mc_io: Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @token: Token of DPRC object
- * @irq_index: The interrupt index to configure
- * @type: Interrupt type: 0 represents message interrupt
- * type (both irq_addr and irq_val are valid)
- * @irq_cfg: IRQ attributes
- *
- * Return: '0' on Success; Error code otherwise.
- */
-int dprc_get_irq(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- u8 irq_index,
- int *type,
- struct dprc_irq_cfg *irq_cfg)
-{
- struct mc_command cmd = { 0 };
- int err;
-
- /* prepare command */
- cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_IRQ,
- cmd_flags,
- token);
- cmd.params[0] |= mc_enc(32, 8, irq_index);
-
- /* send command to mc*/
- err = mc_send_command(mc_io, &cmd);
- if (err)
- return err;
-
- /* retrieve response parameters */
- irq_cfg->val = mc_dec(cmd.params[0], 0, 32);
- irq_cfg->paddr = mc_dec(cmd.params[1], 0, 64);
- irq_cfg->user_irq_id = mc_dec(cmd.params[2], 0, 32);
- *type = mc_dec(cmd.params[2], 32, 32);
-
- return 0;
-}
-
-/**
- * dprc_set_irq() - Set IRQ information for the DPRC to trigger an interrupt.
- * @mc_io: Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @token: Token of DPRC object
- * @irq_index: Identifies the interrupt index to configure
- * @irq_cfg: IRQ configuration
- *
- * Return: '0' on Success; Error code otherwise.
- */
-int dprc_set_irq(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- u8 irq_index,
- struct dprc_irq_cfg *irq_cfg)
-{
- struct mc_command cmd = { 0 };
-
- /* prepare command */
- cmd.header = mc_encode_cmd_header(DPRC_CMDID_SET_IRQ,
- cmd_flags,
- token);
- cmd.params[0] |= mc_enc(32, 8, irq_index);
- cmd.params[0] |= mc_enc(0, 32, irq_cfg->val);
- cmd.params[1] |= mc_enc(0, 64, irq_cfg->paddr);
- cmd.params[2] |= mc_enc(0, 32, irq_cfg->user_irq_id);
-
- /* send command to mc*/
- return mc_send_command(mc_io, &cmd);
-}
-
-/**
- * dprc_get_irq_enable() - Get overall interrupt state.
- * @mc_io: Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @token: Token of DPRC object
- * @irq_index: The interrupt index to configure
- * @en: Returned interrupt state - enable = 1, disable = 0
- *
- * Return: '0' on Success; Error code otherwise.
- */
-int dprc_get_irq_enable(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- u8 irq_index,
- u8 *en)
-{
- struct mc_command cmd = { 0 };
- int err;
-
- /* prepare command */
- cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_IRQ_ENABLE,
- cmd_flags, token);
- cmd.params[0] |= mc_enc(32, 8, irq_index);
-
- /* send command to mc*/
- err = mc_send_command(mc_io, &cmd);
- if (err)
- return err;
-
- /* retrieve response parameters */
- *en = mc_dec(cmd.params[0], 0, 8);
-
- return 0;
-}
-
-/**
- * dprc_set_irq_enable() - Set overall interrupt state.
- * @mc_io: Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @token: Token of DPRC object
- * @irq_index: The interrupt index to configure
- * @en: Interrupt state - enable = 1, disable = 0
- *
- * Allows GPP software to control when interrupts are generated.
- * Each interrupt can have up to 32 causes. The enable/disable control's the
- * overall interrupt state. if the interrupt is disabled no causes will cause
- * an interrupt.
- *
- * Return: '0' on Success; Error code otherwise.
- */
-int dprc_set_irq_enable(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- u8 irq_index,
- u8 en)
-{
- struct mc_command cmd = { 0 };
-
- /* prepare command */
- cmd.header = mc_encode_cmd_header(DPRC_CMDID_SET_IRQ_ENABLE,
- cmd_flags, token);
- cmd.params[0] |= mc_enc(0, 8, en);
- cmd.params[0] |= mc_enc(32, 8, irq_index);
-
- /* send command to mc*/
- return mc_send_command(mc_io, &cmd);
-}
-
-/**
- * dprc_get_irq_mask() - Get interrupt mask.
- * @mc_io: Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @token: Token of DPRC object
- * @irq_index: The interrupt index to configure
- * @mask: Returned event mask to trigger interrupt
- *
- * Every interrupt can have up to 32 causes and the interrupt model supports
- * masking/unmasking each cause independently
- *
- * Return: '0' on Success; Error code otherwise.
- */
-int dprc_get_irq_mask(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- u8 irq_index,
- u32 *mask)
-{
- struct mc_command cmd = { 0 };
- int err;
-
- /* prepare command */
- cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_IRQ_MASK,
- cmd_flags, token);
- cmd.params[0] |= mc_enc(32, 8, irq_index);
-
- /* send command to mc*/
- err = mc_send_command(mc_io, &cmd);
- if (err)
- return err;
-
- /* retrieve response parameters */
- *mask = mc_dec(cmd.params[0], 0, 32);
-
- return 0;
-}
-
-/**
- * dprc_set_irq_mask() - Set interrupt mask.
- * @mc_io: Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @token: Token of DPRC object
- * @irq_index: The interrupt index to configure
- * @mask: event mask to trigger interrupt;
- * each bit:
- * 0 = ignore event
- * 1 = consider event for asserting irq
- *
- * Every interrupt can have up to 32 causes and the interrupt model supports
- * masking/unmasking each cause independently
- *
- * Return: '0' on Success; Error code otherwise.
- */
-int dprc_set_irq_mask(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- u8 irq_index,
- u32 mask)
-{
- struct mc_command cmd = { 0 };
-
- /* prepare command */
- cmd.header = mc_encode_cmd_header(DPRC_CMDID_SET_IRQ_MASK,
- cmd_flags, token);
- cmd.params[0] |= mc_enc(0, 32, mask);
- cmd.params[0] |= mc_enc(32, 8, irq_index);
-
- /* send command to mc*/
- return mc_send_command(mc_io, &cmd);
-}
-
-/**
- * dprc_get_irq_status() - Get the current status of any pending interrupts.
- * @mc_io: Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @token: Token of DPRC object
- * @irq_index: The interrupt index to configure
- * @status: Returned interrupts status - one bit per cause:
- * 0 = no interrupt pending
- * 1 = interrupt pending
- *
- * Return: '0' on Success; Error code otherwise.
- */
-int dprc_get_irq_status(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- u8 irq_index,
- u32 *status)
-{
- struct mc_command cmd = { 0 };
- int err;
-
- /* prepare command */
- cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_IRQ_STATUS,
- cmd_flags, token);
- cmd.params[0] |= mc_enc(32, 8, irq_index);
-
- /* send command to mc*/
- err = mc_send_command(mc_io, &cmd);
- if (err)
- return err;
-
- /* retrieve response parameters */
- *status = mc_dec(cmd.params[0], 0, 32);
-
- return 0;
-}
-
-/**
- * dprc_clear_irq_status() - Clear a pending interrupt's status
- * @mc_io: Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @token: Token of DPRC object
- * @irq_index: The interrupt index to configure
- * @status: bits to clear (W1C) - one bit per cause:
- * 0 = don't change
- * 1 = clear status bit
- *
- * Return: '0' on Success; Error code otherwise.
- */
-int dprc_clear_irq_status(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- u8 irq_index,
- u32 status)
-{
- struct mc_command cmd = { 0 };
-
- /* prepare command */
- cmd.header = mc_encode_cmd_header(DPRC_CMDID_CLEAR_IRQ_STATUS,
- cmd_flags, token);
- cmd.params[0] |= mc_enc(0, 32, status);
- cmd.params[0] |= mc_enc(32, 8, irq_index);
-
- /* send command to mc*/
- return mc_send_command(mc_io, &cmd);
-}
-
-/**
- * dprc_get_attributes() - Obtains container attributes
- * @mc_io: Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @token: Token of DPRC object
- * @attributes Returned container attributes
- *
- * Return: '0' on Success; Error code otherwise.
- */
-int dprc_get_attributes(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- struct dprc_attributes *attr)
-{
- struct mc_command cmd = { 0 };
- int err;
-
- /* prepare command */
- cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_ATTR,
- cmd_flags,
- token);
-
- /* send command to mc*/
- err = mc_send_command(mc_io, &cmd);
- if (err)
- return err;
-
- /* retrieve response parameters */
- attr->container_id = mc_dec(cmd.params[0], 0, 32);
- attr->icid = mc_dec(cmd.params[0], 32, 16);
- attr->options = mc_dec(cmd.params[1], 0, 32);
- attr->portal_id = mc_dec(cmd.params[1], 32, 32);
- attr->version.major = mc_dec(cmd.params[2], 0, 16);
- attr->version.minor = mc_dec(cmd.params[2], 16, 16);
-
- return 0;
-}
-
-/**
- * dprc_set_res_quota() - Set allocation policy for a specific resource/object
- * type in a child container
- * @mc_io: Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @token: Token of DPRC object
- * @child_container_id: ID of the child container
- * @type: Resource/object type
- * @quota: Sets the maximum number of resources of the selected type
- * that the child container is allowed to allocate from its parent;
- * when quota is set to -1, the policy is the same as container's
- * general policy.
- *
- * Allocation policy determines whether or not a container may allocate
- * resources from its parent. Each container has a 'global' allocation policy
- * that is set when the container is created.
- *
- * This function sets allocation policy for a specific resource type.
- * The default policy for all resource types matches the container's 'global'
- * allocation policy.
- *
- * Return: '0' on Success; Error code otherwise.
- *
- * @warning Only the parent container is allowed to change a child policy.
- */
-int dprc_set_res_quota(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- int child_container_id,
- char *type,
- u16 quota)
-{
- struct mc_command cmd = { 0 };
-
- /* prepare command */
- cmd.header = mc_encode_cmd_header(DPRC_CMDID_SET_RES_QUOTA,
- cmd_flags, token);
- cmd.params[0] |= mc_enc(0, 32, child_container_id);
- cmd.params[0] |= mc_enc(32, 16, quota);
- cmd.params[1] |= mc_enc(0, 8, type[0]);
- cmd.params[1] |= mc_enc(8, 8, type[1]);
- cmd.params[1] |= mc_enc(16, 8, type[2]);
- cmd.params[1] |= mc_enc(24, 8, type[3]);
- cmd.params[1] |= mc_enc(32, 8, type[4]);
- cmd.params[1] |= mc_enc(40, 8, type[5]);
- cmd.params[1] |= mc_enc(48, 8, type[6]);
- cmd.params[1] |= mc_enc(56, 8, type[7]);
- cmd.params[2] |= mc_enc(0, 8, type[8]);
- cmd.params[2] |= mc_enc(8, 8, type[9]);
- cmd.params[2] |= mc_enc(16, 8, type[10]);
- cmd.params[2] |= mc_enc(24, 8, type[11]);
- cmd.params[2] |= mc_enc(32, 8, type[12]);
- cmd.params[2] |= mc_enc(40, 8, type[13]);
- cmd.params[2] |= mc_enc(48, 8, type[14]);
- cmd.params[2] |= mc_enc(56, 8, '\0');
-
- /* send command to mc*/
- return mc_send_command(mc_io, &cmd);
-}
-
-/**
- * dprc_get_res_quota() - Gets the allocation policy of a specific
- * resource/object type in a child container
- * @mc_io: Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @token: Token of DPRC object
- * @child_container_id; ID of the child container
- * @type: resource/object type
- * @quota: Returnes the maximum number of resources of the selected type
- * that the child container is allowed to allocate from the parent;
- * when quota is set to -1, the policy is the same as container's
- * general policy.
- *
- * Return: '0' on Success; Error code otherwise.
- */
-int dprc_get_res_quota(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- int child_container_id,
- char *type,
- u16 *quota)
-{
- struct mc_command cmd = { 0 };
- int err;
-
- /* prepare command */
- cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_RES_QUOTA,
- cmd_flags, token);
- cmd.params[0] |= mc_enc(0, 32, child_container_id);
- cmd.params[1] |= mc_enc(0, 8, type[0]);
- cmd.params[1] |= mc_enc(8, 8, type[1]);
- cmd.params[1] |= mc_enc(16, 8, type[2]);
- cmd.params[1] |= mc_enc(24, 8, type[3]);
- cmd.params[1] |= mc_enc(32, 8, type[4]);
- cmd.params[1] |= mc_enc(40, 8, type[5]);
- cmd.params[1] |= mc_enc(48, 8, type[6]);
- cmd.params[1] |= mc_enc(56, 8, type[7]);
- cmd.params[2] |= mc_enc(0, 8, type[8]);
- cmd.params[2] |= mc_enc(8, 8, type[9]);
- cmd.params[2] |= mc_enc(16, 8, type[10]);
- cmd.params[2] |= mc_enc(24, 8, type[11]);
- cmd.params[2] |= mc_enc(32, 8, type[12]);
- cmd.params[2] |= mc_enc(40, 8, type[13]);
- cmd.params[2] |= mc_enc(48, 8, type[14]);
- cmd.params[2] |= mc_enc(56, 8, '\0');
-
- /* send command to mc*/
- err = mc_send_command(mc_io, &cmd);
- if (err)
- return err;
-
- /* retrieve response parameters */
- *quota = mc_dec(cmd.params[0], 32, 16);
-
- return 0;
-}
-
-/**
- * dprc_assign() - Assigns objects or resource to a child container.
- * @mc_io: Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @token: Token of DPRC object
- * @container_id: ID of the child container
- * @res_req: Describes the type and amount of resources to
- * assign to the given container
- *
- * Assignment is usually done by a parent (this DPRC) to one of its child
- * containers.
- *
- * According to the DPRC allocation policy, the assigned resources may be taken
- * (allocated) from the container's ancestors, if not enough resources are
- * available in the container itself.
- *
- * The type of assignment depends on the dprc_res_req options, as follows:
- * - DPRC_RES_REQ_OPT_EXPLICIT: indicates that assigned resources should have
- * the explicit base ID specified at the id_base_align field of res_req.
- * - DPRC_RES_REQ_OPT_ALIGNED: indicates that the assigned resources should be
- * aligned to the value given at id_base_align field of res_req.
- * - DPRC_RES_REQ_OPT_PLUGGED: Relevant only for object assignment,
- * and indicates that the object must be set to the plugged state.
- *
- * A container may use this function with its own ID in order to change a
- * object state to plugged or unplugged.
- *
- * If IRQ information has been set in the child DPRC, it will signal an
- * interrupt following every change in its object assignment.
- *
- * Return: '0' on Success; Error code otherwise.
- */
-int dprc_assign(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- int container_id,
- struct dprc_res_req *res_req)
-{
- struct mc_command cmd = { 0 };
-
- /* prepare command */
- cmd.header = mc_encode_cmd_header(DPRC_CMDID_ASSIGN,
- cmd_flags, token);
- cmd.params[0] |= mc_enc(0, 32, container_id);
- cmd.params[0] |= mc_enc(32, 32, res_req->options);
- cmd.params[1] |= mc_enc(0, 32, res_req->num);
- cmd.params[1] |= mc_enc(32, 32, res_req->id_base_align);
- cmd.params[2] |= mc_enc(0, 8, res_req->type[0]);
- cmd.params[2] |= mc_enc(8, 8, res_req->type[1]);
- cmd.params[2] |= mc_enc(16, 8, res_req->type[2]);
- cmd.params[2] |= mc_enc(24, 8, res_req->type[3]);
- cmd.params[2] |= mc_enc(32, 8, res_req->type[4]);
- cmd.params[2] |= mc_enc(40, 8, res_req->type[5]);
- cmd.params[2] |= mc_enc(48, 8, res_req->type[6]);
- cmd.params[2] |= mc_enc(56, 8, res_req->type[7]);
- cmd.params[3] |= mc_enc(0, 8, res_req->type[8]);
- cmd.params[3] |= mc_enc(8, 8, res_req->type[9]);
- cmd.params[3] |= mc_enc(16, 8, res_req->type[10]);
- cmd.params[3] |= mc_enc(24, 8, res_req->type[11]);
- cmd.params[3] |= mc_enc(32, 8, res_req->type[12]);
- cmd.params[3] |= mc_enc(40, 8, res_req->type[13]);
- cmd.params[3] |= mc_enc(48, 8, res_req->type[14]);
- cmd.params[3] |= mc_enc(56, 8, res_req->type[15]);
-
- /* send command to mc*/
- return mc_send_command(mc_io, &cmd);
-}
-
-/**
- * dprc_unassign() - Un-assigns objects or resources from a child container
- * and moves them into this (parent) DPRC.
- * @mc_io: Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @token: Token of DPRC object
- * @child_container_id: ID of the child container
- * @res_req: Describes the type and amount of resources to un-assign from
- * the child container
- *
- * Un-assignment of objects can succeed only if the object is not in the
- * plugged or opened state.
- *
- * Return: '0' on Success; Error code otherwise.
- */
-int dprc_unassign(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- int child_container_id,
- struct dprc_res_req *res_req)
-{
- struct mc_command cmd = { 0 };
-
- /* prepare command */
- cmd.header = mc_encode_cmd_header(DPRC_CMDID_UNASSIGN,
- cmd_flags,
- token);
- cmd.params[0] |= mc_enc(0, 32, child_container_id);
- cmd.params[0] |= mc_enc(32, 32, res_req->options);
- cmd.params[1] |= mc_enc(0, 32, res_req->num);
- cmd.params[1] |= mc_enc(32, 32, res_req->id_base_align);
- cmd.params[2] |= mc_enc(0, 8, res_req->type[0]);
- cmd.params[2] |= mc_enc(8, 8, res_req->type[1]);
- cmd.params[2] |= mc_enc(16, 8, res_req->type[2]);
- cmd.params[2] |= mc_enc(24, 8, res_req->type[3]);
- cmd.params[2] |= mc_enc(32, 8, res_req->type[4]);
- cmd.params[2] |= mc_enc(40, 8, res_req->type[5]);
- cmd.params[2] |= mc_enc(48, 8, res_req->type[6]);
- cmd.params[2] |= mc_enc(56, 8, res_req->type[7]);
- cmd.params[3] |= mc_enc(0, 8, res_req->type[8]);
- cmd.params[3] |= mc_enc(8, 8, res_req->type[9]);
- cmd.params[3] |= mc_enc(16, 8, res_req->type[10]);
- cmd.params[3] |= mc_enc(24, 8, res_req->type[11]);
- cmd.params[3] |= mc_enc(32, 8, res_req->type[12]);
- cmd.params[3] |= mc_enc(40, 8, res_req->type[13]);
- cmd.params[3] |= mc_enc(48, 8, res_req->type[14]);
- cmd.params[3] |= mc_enc(56, 8, res_req->type[15]);
-
- /* send command to mc*/
- return mc_send_command(mc_io, &cmd);
-}
-
-/**
- * dprc_get_pool_count() - Get the number of dprc's pools
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @mc_io: Pointer to MC portal's I/O object
- * @token: Token of DPRC object
- * @pool_count: Returned number of resource pools in the dprc
- *
- * Return: '0' on Success; Error code otherwise.
- */
-int dprc_get_pool_count(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- int *pool_count)
-{
- struct mc_command cmd = { 0 };
- int err;
-
- /* prepare command */
- cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_POOL_COUNT,
- cmd_flags, token);
-
- /* send command to mc*/
- err = mc_send_command(mc_io, &cmd);
- if (err)
- return err;
-
- /* retrieve response parameters */
- *pool_count = mc_dec(cmd.params[0], 0, 32);
-
- return 0;
-}
-
-/**
- * dprc_get_pool() - Get the type (string) of a certain dprc's pool
- * @mc_io: Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @token: Token of DPRC object
- * @pool_index; Index of the pool to be queried (< pool_count)
- * @type: The type of the pool
- *
- * The pool types retrieved one by one by incrementing
- * pool_index up to (not including) the value of pool_count returned
- * from dprc_get_pool_count(). dprc_get_pool_count() must
- * be called prior to dprc_get_pool().
- *
- * Return: '0' on Success; Error code otherwise.
- */
-int dprc_get_pool(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- int pool_index,
- char *type)
-{
- struct mc_command cmd = { 0 };
- int err;
-
- /* prepare command */
- cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_POOL,
- cmd_flags,
- token);
- cmd.params[0] |= mc_enc(0, 32, pool_index);
-
- /* send command to mc*/
- err = mc_send_command(mc_io, &cmd);
- if (err)
- return err;
-
- /* retrieve response parameters */
- type[0] = mc_dec(cmd.params[1], 0, 8);
- type[1] = mc_dec(cmd.params[1], 8, 8);
- type[2] = mc_dec(cmd.params[1], 16, 8);
- type[3] = mc_dec(cmd.params[1], 24, 8);
- type[4] = mc_dec(cmd.params[1], 32, 8);
- type[5] = mc_dec(cmd.params[1], 40, 8);
- type[6] = mc_dec(cmd.params[1], 48, 8);
- type[7] = mc_dec(cmd.params[1], 56, 8);
- type[8] = mc_dec(cmd.params[2], 0, 8);
- type[9] = mc_dec(cmd.params[2], 8, 8);
- type[10] = mc_dec(cmd.params[2], 16, 8);
- type[11] = mc_dec(cmd.params[2], 24, 8);
- type[12] = mc_dec(cmd.params[2], 32, 8);
- type[13] = mc_dec(cmd.params[2], 40, 8);
- type[14] = mc_dec(cmd.params[2], 48, 8);
- type[15] = '\0';
-
- return 0;
-}
-
-/**
- * dprc_get_obj_count() - Obtains the number of objects in the DPRC
- * @mc_io: Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @token: Token of DPRC object
- * @obj_count: Number of objects assigned to the DPRC
- *
- * Return: '0' on Success; Error code otherwise.
- */
-int dprc_get_obj_count(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- int *obj_count)
-{
- struct mc_command cmd = { 0 };
- int err;
-
- /* prepare command */
- cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_OBJ_COUNT,
- cmd_flags, token);
-
- /* send command to mc*/
- err = mc_send_command(mc_io, &cmd);
- if (err)
- return err;
-
- /* retrieve response parameters */
- *obj_count = mc_dec(cmd.params[0], 32, 32);
-
- return 0;
-}
-EXPORT_SYMBOL(dprc_get_obj_count);
-
-/**
- * dprc_get_obj() - Get general information on an object
- * @mc_io: Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @token: Token of DPRC object
- * @obj_index: Index of the object to be queried (< obj_count)
- * @obj_desc: Returns the requested object descriptor
- *
- * The object descriptors are retrieved one by one by incrementing
- * obj_index up to (not including) the value of obj_count returned
- * from dprc_get_obj_count(). dprc_get_obj_count() must
- * be called prior to dprc_get_obj().
- *
- * Return: '0' on Success; Error code otherwise.
- */
-int dprc_get_obj(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- int obj_index,
- struct dprc_obj_desc *obj_desc)
-{
- struct mc_command cmd = { 0 };
- int err;
-
- /* prepare command */
- cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_OBJ,
- cmd_flags,
- token);
- cmd.params[0] |= mc_enc(0, 32, obj_index);
-
- /* send command to mc*/
- err = mc_send_command(mc_io, &cmd);
- if (err)
- return err;
-
- /* retrieve response parameters */
- obj_desc->id = mc_dec(cmd.params[0], 32, 32);
- obj_desc->vendor = mc_dec(cmd.params[1], 0, 16);
- obj_desc->irq_count = mc_dec(cmd.params[1], 16, 8);
- obj_desc->region_count = mc_dec(cmd.params[1], 24, 8);
- obj_desc->state = mc_dec(cmd.params[1], 32, 32);
- obj_desc->ver_major = mc_dec(cmd.params[2], 0, 16);
- obj_desc->ver_minor = mc_dec(cmd.params[2], 16, 16);
- obj_desc->type[0] = mc_dec(cmd.params[3], 0, 8);
- obj_desc->type[1] = mc_dec(cmd.params[3], 8, 8);
- obj_desc->type[2] = mc_dec(cmd.params[3], 16, 8);
- obj_desc->type[3] = mc_dec(cmd.params[3], 24, 8);
- obj_desc->type[4] = mc_dec(cmd.params[3], 32, 8);
- obj_desc->type[5] = mc_dec(cmd.params[3], 40, 8);
- obj_desc->type[6] = mc_dec(cmd.params[3], 48, 8);
- obj_desc->type[7] = mc_dec(cmd.params[3], 56, 8);
- obj_desc->type[8] = mc_dec(cmd.params[4], 0, 8);
- obj_desc->type[9] = mc_dec(cmd.params[4], 8, 8);
- obj_desc->type[10] = mc_dec(cmd.params[4], 16, 8);
- obj_desc->type[11] = mc_dec(cmd.params[4], 24, 8);
- obj_desc->type[12] = mc_dec(cmd.params[4], 32, 8);
- obj_desc->type[13] = mc_dec(cmd.params[4], 40, 8);
- obj_desc->type[14] = mc_dec(cmd.params[4], 48, 8);
- obj_desc->type[15] = '\0';
- obj_desc->label[0] = mc_dec(cmd.params[5], 0, 8);
- obj_desc->label[1] = mc_dec(cmd.params[5], 8, 8);
- obj_desc->label[2] = mc_dec(cmd.params[5], 16, 8);
- obj_desc->label[3] = mc_dec(cmd.params[5], 24, 8);
- obj_desc->label[4] = mc_dec(cmd.params[5], 32, 8);
- obj_desc->label[5] = mc_dec(cmd.params[5], 40, 8);
- obj_desc->label[6] = mc_dec(cmd.params[5], 48, 8);
- obj_desc->label[7] = mc_dec(cmd.params[5], 56, 8);
- obj_desc->label[8] = mc_dec(cmd.params[6], 0, 8);
- obj_desc->label[9] = mc_dec(cmd.params[6], 8, 8);
- obj_desc->label[10] = mc_dec(cmd.params[6], 16, 8);
- obj_desc->label[11] = mc_dec(cmd.params[6], 24, 8);
- obj_desc->label[12] = mc_dec(cmd.params[6], 32, 8);
- obj_desc->label[13] = mc_dec(cmd.params[6], 40, 8);
- obj_desc->label[14] = mc_dec(cmd.params[6], 48, 8);
- obj_desc->label[15] = '\0';
- return 0;
-}
-EXPORT_SYMBOL(dprc_get_obj);
-
-/**
- * dprc_get_obj_desc() - Get object descriptor.
- *
- * @mc_io: Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @token: Token of DPRC object
- * @obj_type: The type of the object to get its descriptor.
- * @obj_id: The id of the object to get its descriptor
- * @obj_desc: The returned descriptor to fill and return to the user
- *
- * Return: '0' on Success; Error code otherwise.
- *
- */
-int dprc_get_obj_desc(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- char *obj_type,
- int obj_id,
- struct dprc_obj_desc *obj_desc)
-{
- struct mc_command cmd = { 0 };
- int err;
-
- /* prepare command */
- cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_OBJ_DESC,
- cmd_flags,
- token);
- cmd.params[0] |= mc_enc(0, 32, obj_id);
- cmd.params[1] |= mc_enc(0, 8, obj_type[0]);
- cmd.params[1] |= mc_enc(8, 8, obj_type[1]);
- cmd.params[1] |= mc_enc(16, 8, obj_type[2]);
- cmd.params[1] |= mc_enc(24, 8, obj_type[3]);
- cmd.params[1] |= mc_enc(32, 8, obj_type[4]);
- cmd.params[1] |= mc_enc(40, 8, obj_type[5]);
- cmd.params[1] |= mc_enc(48, 8, obj_type[6]);
- cmd.params[1] |= mc_enc(56, 8, obj_type[7]);
- cmd.params[2] |= mc_enc(0, 8, obj_type[8]);
- cmd.params[2] |= mc_enc(8, 8, obj_type[9]);
- cmd.params[2] |= mc_enc(16, 8, obj_type[10]);
- cmd.params[2] |= mc_enc(24, 8, obj_type[11]);
- cmd.params[2] |= mc_enc(32, 8, obj_type[12]);
- cmd.params[2] |= mc_enc(40, 8, obj_type[13]);
- cmd.params[2] |= mc_enc(48, 8, obj_type[14]);
- cmd.params[2] |= mc_enc(56, 8, obj_type[15]);
-
- /* send command to mc*/
- err = mc_send_command(mc_io, &cmd);
- if (err)
- return err;
-
- /* retrieve response parameters */
- obj_desc->id = (int)mc_dec(cmd.params[0], 32, 32);
- obj_desc->vendor = (u16)mc_dec(cmd.params[1], 0, 16);
- obj_desc->vendor = (u8)mc_dec(cmd.params[1], 16, 8);
- obj_desc->region_count = (u8)mc_dec(cmd.params[1], 24, 8);
- obj_desc->state = (u32)mc_dec(cmd.params[1], 32, 32);
- obj_desc->ver_major = (u16)mc_dec(cmd.params[2], 0, 16);
- obj_desc->ver_minor = (u16)mc_dec(cmd.params[2], 16, 16);
- obj_desc->type[0] = (char)mc_dec(cmd.params[3], 0, 8);
- obj_desc->type[1] = (char)mc_dec(cmd.params[3], 8, 8);
- obj_desc->type[2] = (char)mc_dec(cmd.params[3], 16, 8);
- obj_desc->type[3] = (char)mc_dec(cmd.params[3], 24, 8);
- obj_desc->type[4] = (char)mc_dec(cmd.params[3], 32, 8);
- obj_desc->type[5] = (char)mc_dec(cmd.params[3], 40, 8);
- obj_desc->type[6] = (char)mc_dec(cmd.params[3], 48, 8);
- obj_desc->type[7] = (char)mc_dec(cmd.params[3], 56, 8);
- obj_desc->type[8] = (char)mc_dec(cmd.params[4], 0, 8);
- obj_desc->type[9] = (char)mc_dec(cmd.params[4], 8, 8);
- obj_desc->type[10] = (char)mc_dec(cmd.params[4], 16, 8);
- obj_desc->type[11] = (char)mc_dec(cmd.params[4], 24, 8);
- obj_desc->type[12] = (char)mc_dec(cmd.params[4], 32, 8);
- obj_desc->type[13] = (char)mc_dec(cmd.params[4], 40, 8);
- obj_desc->type[14] = (char)mc_dec(cmd.params[4], 48, 8);
- obj_desc->type[15] = (char)mc_dec(cmd.params[4], 56, 8);
- obj_desc->label[0] = (char)mc_dec(cmd.params[5], 0, 8);
- obj_desc->label[1] = (char)mc_dec(cmd.params[5], 8, 8);
- obj_desc->label[2] = (char)mc_dec(cmd.params[5], 16, 8);
- obj_desc->label[3] = (char)mc_dec(cmd.params[5], 24, 8);
- obj_desc->label[4] = (char)mc_dec(cmd.params[5], 32, 8);
- obj_desc->label[5] = (char)mc_dec(cmd.params[5], 40, 8);
- obj_desc->label[6] = (char)mc_dec(cmd.params[5], 48, 8);
- obj_desc->label[7] = (char)mc_dec(cmd.params[5], 56, 8);
- obj_desc->label[8] = (char)mc_dec(cmd.params[6], 0, 8);
- obj_desc->label[9] = (char)mc_dec(cmd.params[6], 8, 8);
- obj_desc->label[10] = (char)mc_dec(cmd.params[6], 16, 8);
- obj_desc->label[11] = (char)mc_dec(cmd.params[6], 24, 8);
- obj_desc->label[12] = (char)mc_dec(cmd.params[6], 32, 8);
- obj_desc->label[13] = (char)mc_dec(cmd.params[6], 40, 8);
- obj_desc->label[14] = (char)mc_dec(cmd.params[6], 48, 8);
- obj_desc->label[15] = (char)mc_dec(cmd.params[6], 56, 8);
-
- return 0;
-}
-EXPORT_SYMBOL(dprc_get_obj_desc);
-
-/**
- * dprc_set_obj_irq() - Set IRQ information for object to trigger an interrupt.
- * @mc_io: Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @token: Token of DPRC object
- * @obj_type: Type of the object to set its IRQ
- * @obj_id: ID of the object to set its IRQ
- * @irq_index: The interrupt index to configure
- * @irq_cfg: IRQ configuration
- *
- * Return: '0' on Success; Error code otherwise.
- */
-int dprc_set_obj_irq(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- char *obj_type,
- int obj_id,
- u8 irq_index,
- struct dprc_irq_cfg *irq_cfg)
-{
- struct mc_command cmd = { 0 };
-
- /* prepare command */
- cmd.header = mc_encode_cmd_header(DPRC_CMDID_SET_OBJ_IRQ,
- cmd_flags,
- token);
- cmd.params[0] |= mc_enc(32, 8, irq_index);
- cmd.params[0] |= mc_enc(0, 32, irq_cfg->val);
- cmd.params[1] |= mc_enc(0, 64, irq_cfg->paddr);
- cmd.params[2] |= mc_enc(0, 32, irq_cfg->user_irq_id);
- cmd.params[2] |= mc_enc(32, 32, obj_id);
- cmd.params[3] |= mc_enc(0, 8, obj_type[0]);
- cmd.params[3] |= mc_enc(8, 8, obj_type[1]);
- cmd.params[3] |= mc_enc(16, 8, obj_type[2]);
- cmd.params[3] |= mc_enc(24, 8, obj_type[3]);
- cmd.params[3] |= mc_enc(32, 8, obj_type[4]);
- cmd.params[3] |= mc_enc(40, 8, obj_type[5]);
- cmd.params[3] |= mc_enc(48, 8, obj_type[6]);
- cmd.params[3] |= mc_enc(56, 8, obj_type[7]);
- cmd.params[4] |= mc_enc(0, 8, obj_type[8]);
- cmd.params[4] |= mc_enc(8, 8, obj_type[9]);
- cmd.params[4] |= mc_enc(16, 8, obj_type[10]);
- cmd.params[4] |= mc_enc(24, 8, obj_type[11]);
- cmd.params[4] |= mc_enc(32, 8, obj_type[12]);
- cmd.params[4] |= mc_enc(40, 8, obj_type[13]);
- cmd.params[4] |= mc_enc(48, 8, obj_type[14]);
- cmd.params[4] |= mc_enc(56, 8, obj_type[15]);
-
- /* send command to mc*/
- return mc_send_command(mc_io, &cmd);
-}
-EXPORT_SYMBOL(dprc_set_obj_irq);
-
-/**
- * dprc_get_obj_irq() - Get IRQ information from object.
- * @mc_io: Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @token: Token of DPRC object
- * @obj_type: Type od the object to get its IRQ
- * @obj_id: ID of the object to get its IRQ
- * @irq_index: The interrupt index to configure
- * @type: Interrupt type: 0 represents message interrupt
- * type (both irq_addr and irq_val are valid)
- * @irq_cfg: The returned IRQ attributes
- *
- * Return: '0' on Success; Error code otherwise.
- */
-int dprc_get_obj_irq(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- char *obj_type,
- int obj_id,
- u8 irq_index,
- int *type,
- struct dprc_irq_cfg *irq_cfg)
-{
- struct mc_command cmd = { 0 };
- int err;
-
- /* prepare command */
- cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_OBJ_IRQ,
- cmd_flags,
- token);
- cmd.params[0] |= mc_enc(0, 32, obj_id);
- cmd.params[0] |= mc_enc(32, 8, irq_index);
- cmd.params[1] |= mc_enc(0, 8, obj_type[0]);
- cmd.params[1] |= mc_enc(8, 8, obj_type[1]);
- cmd.params[1] |= mc_enc(16, 8, obj_type[2]);
- cmd.params[1] |= mc_enc(24, 8, obj_type[3]);
- cmd.params[1] |= mc_enc(32, 8, obj_type[4]);
- cmd.params[1] |= mc_enc(40, 8, obj_type[5]);
- cmd.params[1] |= mc_enc(48, 8, obj_type[6]);
- cmd.params[1] |= mc_enc(56, 8, obj_type[7]);
- cmd.params[2] |= mc_enc(0, 8, obj_type[8]);
- cmd.params[2] |= mc_enc(8, 8, obj_type[9]);
- cmd.params[2] |= mc_enc(16, 8, obj_type[10]);
- cmd.params[2] |= mc_enc(24, 8, obj_type[11]);
- cmd.params[2] |= mc_enc(32, 8, obj_type[12]);
- cmd.params[2] |= mc_enc(40, 8, obj_type[13]);
- cmd.params[2] |= mc_enc(48, 8, obj_type[14]);
- cmd.params[2] |= mc_enc(56, 8, obj_type[15]);
-
- /* send command to mc*/
- err = mc_send_command(mc_io, &cmd);
- if (err)
- return err;
-
- /* retrieve response parameters */
- irq_cfg->val = (u32)mc_dec(cmd.params[0], 0, 32);
- irq_cfg->paddr = (u64)mc_dec(cmd.params[1], 0, 64);
- irq_cfg->user_irq_id = (int)mc_dec(cmd.params[2], 0, 32);
- *type = (int)mc_dec(cmd.params[2], 32, 32);
-
- return 0;
-}
-EXPORT_SYMBOL(dprc_get_obj_irq);
-
-/**
- * dprc_get_res_count() - Obtains the number of free resources that are assigned
- * to this container, by pool type
- * @mc_io: Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @token: Token of DPRC object
- * @type: pool type
- * @res_count: Returned number of free resources of the given
- * resource type that are assigned to this DPRC
- *
- * Return: '0' on Success; Error code otherwise.
- */
-int dprc_get_res_count(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- char *type,
- int *res_count)
-{
- struct mc_command cmd = { 0 };
- int err;
-
- *res_count = 0;
-
- /* prepare command */
- cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_RES_COUNT,
- cmd_flags, token);
- cmd.params[1] |= mc_enc(0, 8, type[0]);
- cmd.params[1] |= mc_enc(8, 8, type[1]);
- cmd.params[1] |= mc_enc(16, 8, type[2]);
- cmd.params[1] |= mc_enc(24, 8, type[3]);
- cmd.params[1] |= mc_enc(32, 8, type[4]);
- cmd.params[1] |= mc_enc(40, 8, type[5]);
- cmd.params[1] |= mc_enc(48, 8, type[6]);
- cmd.params[1] |= mc_enc(56, 8, type[7]);
- cmd.params[2] |= mc_enc(0, 8, type[8]);
- cmd.params[2] |= mc_enc(8, 8, type[9]);
- cmd.params[2] |= mc_enc(16, 8, type[10]);
- cmd.params[2] |= mc_enc(24, 8, type[11]);
- cmd.params[2] |= mc_enc(32, 8, type[12]);
- cmd.params[2] |= mc_enc(40, 8, type[13]);
- cmd.params[2] |= mc_enc(48, 8, type[14]);
- cmd.params[2] |= mc_enc(56, 8, '\0');
-
- /* send command to mc*/
- err = mc_send_command(mc_io, &cmd);
- if (err)
- return err;
-
- /* retrieve response parameters */
- *res_count = mc_dec(cmd.params[0], 0, 32);
-
- return 0;
-}
-EXPORT_SYMBOL(dprc_get_res_count);
-
-/**
- * dprc_get_res_ids() - Obtains IDs of free resources in the container
- * @mc_io: Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @token: Token of DPRC object
- * @type: pool type
- * @range_desc: range descriptor
- *
- * Return: '0' on Success; Error code otherwise.
- */
-int dprc_get_res_ids(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- char *type,
- struct dprc_res_ids_range_desc *range_desc)
-{
- struct mc_command cmd = { 0 };
- int err;
-
- /* prepare command */
- cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_RES_IDS,
- cmd_flags, token);
- cmd.params[0] |= mc_enc(42, 7, range_desc->iter_status);
- cmd.params[1] |= mc_enc(0, 32, range_desc->base_id);
- cmd.params[1] |= mc_enc(32, 32, range_desc->last_id);
- cmd.params[2] |= mc_enc(0, 8, type[0]);
- cmd.params[2] |= mc_enc(8, 8, type[1]);
- cmd.params[2] |= mc_enc(16, 8, type[2]);
- cmd.params[2] |= mc_enc(24, 8, type[3]);
- cmd.params[2] |= mc_enc(32, 8, type[4]);
- cmd.params[2] |= mc_enc(40, 8, type[5]);
- cmd.params[2] |= mc_enc(48, 8, type[6]);
- cmd.params[2] |= mc_enc(56, 8, type[7]);
- cmd.params[3] |= mc_enc(0, 8, type[8]);
- cmd.params[3] |= mc_enc(8, 8, type[9]);
- cmd.params[3] |= mc_enc(16, 8, type[10]);
- cmd.params[3] |= mc_enc(24, 8, type[11]);
- cmd.params[3] |= mc_enc(32, 8, type[12]);
- cmd.params[3] |= mc_enc(40, 8, type[13]);
- cmd.params[3] |= mc_enc(48, 8, type[14]);
- cmd.params[3] |= mc_enc(56, 8, '\0');
-
- /* send command to mc*/
- err = mc_send_command(mc_io, &cmd);
- if (err)
- return err;
-
- /* retrieve response parameters */
- range_desc->iter_status = mc_dec(cmd.params[0], 42, 7);
- range_desc->base_id = mc_dec(cmd.params[1], 0, 32);
- range_desc->last_id = mc_dec(cmd.params[1], 32, 32);
-
- return 0;
-}
-EXPORT_SYMBOL(dprc_get_res_ids);
-
-/**
- * dprc_get_obj_region() - Get region information for a specified object.
- * @mc_io: Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @token: Token of DPRC object
- * @obj_type; Object type as returned in dprc_get_obj()
- * @obj_id: Unique object instance as returned in dprc_get_obj()
- * @region_index: The specific region to query
- * @region_desc: Returns the requested region descriptor
- *
- * Return: '0' on Success; Error code otherwise.
- */
-int dprc_get_obj_region(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- char *obj_type,
- int obj_id,
- u8 region_index,
- struct dprc_region_desc *region_desc)
-{
- struct mc_command cmd = { 0 };
- int err;
-
- /* prepare command */
- cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_OBJ_REG,
- cmd_flags, token);
- cmd.params[0] |= mc_enc(0, 32, obj_id);
- cmd.params[0] |= mc_enc(48, 8, region_index);
- cmd.params[3] |= mc_enc(0, 8, obj_type[0]);
- cmd.params[3] |= mc_enc(8, 8, obj_type[1]);
- cmd.params[3] |= mc_enc(16, 8, obj_type[2]);
- cmd.params[3] |= mc_enc(24, 8, obj_type[3]);
- cmd.params[3] |= mc_enc(32, 8, obj_type[4]);
- cmd.params[3] |= mc_enc(40, 8, obj_type[5]);
- cmd.params[3] |= mc_enc(48, 8, obj_type[6]);
- cmd.params[3] |= mc_enc(56, 8, obj_type[7]);
- cmd.params[4] |= mc_enc(0, 8, obj_type[8]);
- cmd.params[4] |= mc_enc(8, 8, obj_type[9]);
- cmd.params[4] |= mc_enc(16, 8, obj_type[10]);
- cmd.params[4] |= mc_enc(24, 8, obj_type[11]);
- cmd.params[4] |= mc_enc(32, 8, obj_type[12]);
- cmd.params[4] |= mc_enc(40, 8, obj_type[13]);
- cmd.params[4] |= mc_enc(48, 8, obj_type[14]);
- cmd.params[4] |= mc_enc(56, 8, '\0');
-
- /* send command to mc*/
- err = mc_send_command(mc_io, &cmd);
- if (err)
- return err;
-
- /* retrieve response parameters */
- region_desc->base_offset = mc_dec(cmd.params[1], 0, 64);
- region_desc->size = mc_dec(cmd.params[2], 0, 32);
-
- return 0;
-}
-EXPORT_SYMBOL(dprc_get_obj_region);
-
-/**
- * dprc_set_obj_label() - Set object label.
- * @mc_io: Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @token: Token of DPRC object
- * @obj_type: Object's type
- * @obj_id: Object's ID
- * @label: The required label. The maximum length is 16 chars.
- *
- * Return: '0' on Success; Error code otherwise.
- */
-int dprc_set_obj_label(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- char *obj_type,
- int obj_id,
- char *label)
-{
- struct mc_command cmd = { 0 };
-
- /* prepare command */
- cmd.header = mc_encode_cmd_header(DPRC_CMDID_SET_OBJ_LABEL,
- cmd_flags,
- token);
-
- cmd.params[0] |= mc_enc(0, 32, obj_id);
- cmd.params[1] |= mc_enc(0, 8, label[0]);
- cmd.params[1] |= mc_enc(8, 8, label[1]);
- cmd.params[1] |= mc_enc(16, 8, label[2]);
- cmd.params[1] |= mc_enc(24, 8, label[3]);
- cmd.params[1] |= mc_enc(32, 8, label[4]);
- cmd.params[1] |= mc_enc(40, 8, label[5]);
- cmd.params[1] |= mc_enc(48, 8, label[6]);
- cmd.params[1] |= mc_enc(56, 8, label[7]);
- cmd.params[2] |= mc_enc(0, 8, label[8]);
- cmd.params[2] |= mc_enc(8, 8, label[9]);
- cmd.params[2] |= mc_enc(16, 8, label[10]);
- cmd.params[2] |= mc_enc(24, 8, label[11]);
- cmd.params[2] |= mc_enc(32, 8, label[12]);
- cmd.params[2] |= mc_enc(40, 8, label[13]);
- cmd.params[2] |= mc_enc(48, 8, label[14]);
- cmd.params[2] |= mc_enc(56, 8, label[15]);
- cmd.params[3] |= mc_enc(0, 8, obj_type[0]);
- cmd.params[3] |= mc_enc(8, 8, obj_type[1]);
- cmd.params[3] |= mc_enc(16, 8, obj_type[2]);
- cmd.params[3] |= mc_enc(24, 8, obj_type[3]);
- cmd.params[3] |= mc_enc(32, 8, obj_type[4]);
- cmd.params[3] |= mc_enc(40, 8, obj_type[5]);
- cmd.params[3] |= mc_enc(48, 8, obj_type[6]);
- cmd.params[3] |= mc_enc(56, 8, obj_type[7]);
- cmd.params[4] |= mc_enc(0, 8, obj_type[8]);
- cmd.params[4] |= mc_enc(8, 8, obj_type[9]);
- cmd.params[4] |= mc_enc(16, 8, obj_type[10]);
- cmd.params[4] |= mc_enc(24, 8, obj_type[11]);
- cmd.params[4] |= mc_enc(32, 8, obj_type[12]);
- cmd.params[4] |= mc_enc(40, 8, obj_type[13]);
- cmd.params[4] |= mc_enc(48, 8, obj_type[14]);
- cmd.params[4] |= mc_enc(56, 8, obj_type[15]);
-
- /* send command to mc*/
- return mc_send_command(mc_io, &cmd);
-}
-EXPORT_SYMBOL(dprc_set_obj_label);
-
-/**
- * dprc_connect() - Connect two endpoints to create a network link between them
- * @mc_io: Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @token: Token of DPRC object
- * @endpoint1: Endpoint 1 configuration parameters
- * @endpoint2: Endpoint 2 configuration parameters
- * @cfg: Connection configuration. The connection configuration is ignored for
- * connections made to DPMAC objects, where rate is set according to
- * MAC configuration.
- * The committed rate is the guaranteed rate for the connection.
- * The maximum rate is an upper limit allowed for the connection; it is
- * expected to be equal or higher than the committed rate.
- * When committed and maximum rates are both zero, the connection is set
- * to "best effort" mode, having lower priority compared to connections
- * with committed or maximum rates.
- *
- * Return: '0' on Success; Error code otherwise.
- */
-int dprc_connect(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- const struct dprc_endpoint *endpoint1,
- const struct dprc_endpoint *endpoint2,
- const struct dprc_connection_cfg *cfg)
-{
- struct mc_command cmd = { 0 };
-
- /* prepare command */
- cmd.header = mc_encode_cmd_header(DPRC_CMDID_CONNECT,
- cmd_flags,
- token);
- cmd.params[0] |= mc_enc(0, 32, endpoint1->id);
- cmd.params[0] |= mc_enc(32, 32, endpoint1->if_id);
- cmd.params[1] |= mc_enc(0, 32, endpoint2->id);
- cmd.params[1] |= mc_enc(32, 32, endpoint2->if_id);
- cmd.params[2] |= mc_enc(0, 8, endpoint1->type[0]);
- cmd.params[2] |= mc_enc(8, 8, endpoint1->type[1]);
- cmd.params[2] |= mc_enc(16, 8, endpoint1->type[2]);
- cmd.params[2] |= mc_enc(24, 8, endpoint1->type[3]);
- cmd.params[2] |= mc_enc(32, 8, endpoint1->type[4]);
- cmd.params[2] |= mc_enc(40, 8, endpoint1->type[5]);
- cmd.params[2] |= mc_enc(48, 8, endpoint1->type[6]);
- cmd.params[2] |= mc_enc(56, 8, endpoint1->type[7]);
- cmd.params[3] |= mc_enc(0, 8, endpoint1->type[8]);
- cmd.params[3] |= mc_enc(8, 8, endpoint1->type[9]);
- cmd.params[3] |= mc_enc(16, 8, endpoint1->type[10]);
- cmd.params[3] |= mc_enc(24, 8, endpoint1->type[11]);
- cmd.params[3] |= mc_enc(32, 8, endpoint1->type[12]);
- cmd.params[3] |= mc_enc(40, 8, endpoint1->type[13]);
- cmd.params[3] |= mc_enc(48, 8, endpoint1->type[14]);
- cmd.params[3] |= mc_enc(56, 8, endpoint1->type[15]);
- cmd.params[4] |= mc_enc(0, 32, cfg->max_rate);
- cmd.params[4] |= mc_enc(32, 32, cfg->committed_rate);
- cmd.params[5] |= mc_enc(0, 8, endpoint2->type[0]);
- cmd.params[5] |= mc_enc(8, 8, endpoint2->type[1]);
- cmd.params[5] |= mc_enc(16, 8, endpoint2->type[2]);
- cmd.params[5] |= mc_enc(24, 8, endpoint2->type[3]);
- cmd.params[5] |= mc_enc(32, 8, endpoint2->type[4]);
- cmd.params[5] |= mc_enc(40, 8, endpoint2->type[5]);
- cmd.params[5] |= mc_enc(48, 8, endpoint2->type[6]);
- cmd.params[5] |= mc_enc(56, 8, endpoint2->type[7]);
- cmd.params[6] |= mc_enc(0, 8, endpoint2->type[8]);
- cmd.params[6] |= mc_enc(8, 8, endpoint2->type[9]);
- cmd.params[6] |= mc_enc(16, 8, endpoint2->type[10]);
- cmd.params[6] |= mc_enc(24, 8, endpoint2->type[11]);
- cmd.params[6] |= mc_enc(32, 8, endpoint2->type[12]);
- cmd.params[6] |= mc_enc(40, 8, endpoint2->type[13]);
- cmd.params[6] |= mc_enc(48, 8, endpoint2->type[14]);
- cmd.params[6] |= mc_enc(56, 8, endpoint2->type[15]);
-
- /* send command to mc*/
- return mc_send_command(mc_io, &cmd);
-}
-
-/**
- * dprc_disconnect() - Disconnect one endpoint to remove its network connection
- * @mc_io: Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @token: Token of DPRC object
- * @endpoint: Endpoint configuration parameters
- *
- * Return: '0' on Success; Error code otherwise.
- */
-int dprc_disconnect(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- const struct dprc_endpoint *endpoint)
-{
- struct mc_command cmd = { 0 };
-
- /* prepare command */
- cmd.header = mc_encode_cmd_header(DPRC_CMDID_DISCONNECT,
- cmd_flags,
- token);
- cmd.params[0] |= mc_enc(0, 32, endpoint->id);
- cmd.params[0] |= mc_enc(32, 32, endpoint->if_id);
- cmd.params[1] |= mc_enc(0, 8, endpoint->type[0]);
- cmd.params[1] |= mc_enc(8, 8, endpoint->type[1]);
- cmd.params[1] |= mc_enc(16, 8, endpoint->type[2]);
- cmd.params[1] |= mc_enc(24, 8, endpoint->type[3]);
- cmd.params[1] |= mc_enc(32, 8, endpoint->type[4]);
- cmd.params[1] |= mc_enc(40, 8, endpoint->type[5]);
- cmd.params[1] |= mc_enc(48, 8, endpoint->type[6]);
- cmd.params[1] |= mc_enc(56, 8, endpoint->type[7]);
- cmd.params[2] |= mc_enc(0, 8, endpoint->type[8]);
- cmd.params[2] |= mc_enc(8, 8, endpoint->type[9]);
- cmd.params[2] |= mc_enc(16, 8, endpoint->type[10]);
- cmd.params[2] |= mc_enc(24, 8, endpoint->type[11]);
- cmd.params[2] |= mc_enc(32, 8, endpoint->type[12]);
- cmd.params[2] |= mc_enc(40, 8, endpoint->type[13]);
- cmd.params[2] |= mc_enc(48, 8, endpoint->type[14]);
- cmd.params[2] |= mc_enc(56, 8, endpoint->type[15]);
-
- /* send command to mc*/
- return mc_send_command(mc_io, &cmd);
-}
-
-/**
-* dprc_get_connection() - Get connected endpoint and link status if connection
-* exists.
-* @mc_io: Pointer to MC portal's I/O object
-* @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-* @token: Token of DPRC object
-* @endpoint1: Endpoint 1 configuration parameters
-* @endpoint2: Returned endpoint 2 configuration parameters
-* @state: Returned link state: 1 - link is up, 0 - link is down
-*
-* Return: '0' on Success; -ENAVAIL if connection does not exist.
-*/
-int dprc_get_connection(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- const struct dprc_endpoint *endpoint1,
- struct dprc_endpoint *endpoint2,
- int *state)
-{
- struct mc_command cmd = { 0 };
- int err;
-
- /* prepare command */
- cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_CONNECTION,
- cmd_flags,
- token);
- cmd.params[0] |= mc_enc(0, 32, endpoint1->id);
- cmd.params[0] |= mc_enc(32, 32, endpoint1->if_id);
- cmd.params[1] |= mc_enc(0, 8, endpoint1->type[0]);
- cmd.params[1] |= mc_enc(8, 8, endpoint1->type[1]);
- cmd.params[1] |= mc_enc(16, 8, endpoint1->type[2]);
- cmd.params[1] |= mc_enc(24, 8, endpoint1->type[3]);
- cmd.params[1] |= mc_enc(32, 8, endpoint1->type[4]);
- cmd.params[1] |= mc_enc(40, 8, endpoint1->type[5]);
- cmd.params[1] |= mc_enc(48, 8, endpoint1->type[6]);
- cmd.params[1] |= mc_enc(56, 8, endpoint1->type[7]);
- cmd.params[2] |= mc_enc(0, 8, endpoint1->type[8]);
- cmd.params[2] |= mc_enc(8, 8, endpoint1->type[9]);
- cmd.params[2] |= mc_enc(16, 8, endpoint1->type[10]);
- cmd.params[2] |= mc_enc(24, 8, endpoint1->type[11]);
- cmd.params[2] |= mc_enc(32, 8, endpoint1->type[12]);
- cmd.params[2] |= mc_enc(40, 8, endpoint1->type[13]);
- cmd.params[2] |= mc_enc(48, 8, endpoint1->type[14]);
- cmd.params[2] |= mc_enc(56, 8, endpoint1->type[15]);
-
- /* send command to mc*/
- err = mc_send_command(mc_io, &cmd);
- if (err)
- return err;
-
- /* retrieve response parameters */
- endpoint2->id = mc_dec(cmd.params[3], 0, 32);
- endpoint2->if_id = mc_dec(cmd.params[3], 32, 32);
- endpoint2->type[0] = mc_dec(cmd.params[4], 0, 8);
- endpoint2->type[1] = mc_dec(cmd.params[4], 8, 8);
- endpoint2->type[2] = mc_dec(cmd.params[4], 16, 8);
- endpoint2->type[3] = mc_dec(cmd.params[4], 24, 8);
- endpoint2->type[4] = mc_dec(cmd.params[4], 32, 8);
- endpoint2->type[5] = mc_dec(cmd.params[4], 40, 8);
- endpoint2->type[6] = mc_dec(cmd.params[4], 48, 8);
- endpoint2->type[7] = mc_dec(cmd.params[4], 56, 8);
- endpoint2->type[8] = mc_dec(cmd.params[5], 0, 8);
- endpoint2->type[9] = mc_dec(cmd.params[5], 8, 8);
- endpoint2->type[10] = mc_dec(cmd.params[5], 16, 8);
- endpoint2->type[11] = mc_dec(cmd.params[5], 24, 8);
- endpoint2->type[12] = mc_dec(cmd.params[5], 32, 8);
- endpoint2->type[13] = mc_dec(cmd.params[5], 40, 8);
- endpoint2->type[14] = mc_dec(cmd.params[5], 48, 8);
- endpoint2->type[15] = mc_dec(cmd.params[5], 56, 8);
- *state = mc_dec(cmd.params[6], 0, 32);
-
- return 0;
-}
diff --git a/drivers/staging/fsl-mc/bus/mc-allocator.c b/drivers/staging/fsl-mc/bus/mc-allocator.c
deleted file mode 100644
index d087b4c7537f..000000000000
--- a/drivers/staging/fsl-mc/bus/mc-allocator.c
+++ /dev/null
@@ -1,573 +0,0 @@
-/*
- * Freescale MC object device allocator driver
- *
- * Copyright (C) 2013 Freescale Semiconductor, Inc.
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
- */
-
-#include "../include/mc-private.h"
-#include "../include/mc-sys.h"
-#include <linux/module.h>
-#include "../include/dpbp-cmd.h"
-#include "../include/dpcon-cmd.h"
-#include "dpmcp-cmd.h"
-#include "dpmcp.h"
-
-/**
- * fsl_mc_resource_pool_add_device - add allocatable device to a resource
- * pool of a given MC bus
- *
- * @mc_bus: pointer to the MC bus
- * @pool_type: MC bus pool type
- * @mc_dev: Pointer to allocatable MC object device
- *
- * It adds an allocatable MC object device to a container's resource pool of
- * the given resource type
- */
-static int __must_check fsl_mc_resource_pool_add_device(struct fsl_mc_bus
- *mc_bus,
- enum fsl_mc_pool_type
- pool_type,
- struct fsl_mc_device
- *mc_dev)
-{
- struct fsl_mc_resource_pool *res_pool;
- struct fsl_mc_resource *resource;
- struct fsl_mc_device *mc_bus_dev = &mc_bus->mc_dev;
- int error = -EINVAL;
- bool mutex_locked = false;
-
- if (WARN_ON(pool_type < 0 || pool_type >= FSL_MC_NUM_POOL_TYPES))
- goto out;
- if (WARN_ON(!FSL_MC_IS_ALLOCATABLE(mc_dev->obj_desc.type)))
- goto out;
- if (WARN_ON(mc_dev->resource))
- goto out;
-
- res_pool = &mc_bus->resource_pools[pool_type];
- if (WARN_ON(res_pool->type != pool_type))
- goto out;
- if (WARN_ON(res_pool->mc_bus != mc_bus))
- goto out;
-
- mutex_lock(&res_pool->mutex);
- mutex_locked = true;
-
- if (WARN_ON(res_pool->max_count < 0))
- goto out;
- if (WARN_ON(res_pool->free_count < 0 ||
- res_pool->free_count > res_pool->max_count))
- goto out;
-
- resource = devm_kzalloc(&mc_bus_dev->dev, sizeof(*resource),
- GFP_KERNEL);
- if (!resource) {
- error = -ENOMEM;
- dev_err(&mc_bus_dev->dev,
- "Failed to allocate memory for fsl_mc_resource\n");
- goto out;
- }
-
- resource->type = pool_type;
- resource->id = mc_dev->obj_desc.id;
- resource->data = mc_dev;
- resource->parent_pool = res_pool;
- INIT_LIST_HEAD(&resource->node);
- list_add_tail(&resource->node, &res_pool->free_list);
- mc_dev->resource = resource;
- res_pool->free_count++;
- res_pool->max_count++;
- error = 0;
-out:
- if (mutex_locked)
- mutex_unlock(&res_pool->mutex);
-
- return error;
-}
-
-/**
- * fsl_mc_resource_pool_remove_device - remove an allocatable device from a
- * resource pool
- *
- * @mc_dev: Pointer to allocatable MC object device
- *
- * It permanently removes an allocatable MC object device from the resource
- * pool, the device is currently in, as long as it is in the pool's free list.
- */
-static int __must_check fsl_mc_resource_pool_remove_device(struct fsl_mc_device
- *mc_dev)
-{
- struct fsl_mc_device *mc_bus_dev;
- struct fsl_mc_bus *mc_bus;
- struct fsl_mc_resource_pool *res_pool;
- struct fsl_mc_resource *resource;
- int error = -EINVAL;
- bool mutex_locked = false;
-
- if (WARN_ON(!FSL_MC_IS_ALLOCATABLE(mc_dev->obj_desc.type)))
- goto out;
-
- resource = mc_dev->resource;
- if (WARN_ON(resource->data != mc_dev))
- goto out;
-
- mc_bus_dev = to_fsl_mc_device(mc_dev->dev.parent);
- mc_bus = to_fsl_mc_bus(mc_bus_dev);
- res_pool = resource->parent_pool;
- if (WARN_ON(res_pool != &mc_bus->resource_pools[resource->type]))
- goto out;
-
- mutex_lock(&res_pool->mutex);
- mutex_locked = true;
-
- if (WARN_ON(res_pool->max_count <= 0))
- goto out;
- if (WARN_ON(res_pool->free_count <= 0 ||
- res_pool->free_count > res_pool->max_count))
- goto out;
-
- /*
- * If the device is currently allocated, its resource is not
- * in the free list and thus, the device cannot be removed.
- */
- if (list_empty(&resource->node)) {
- error = -EBUSY;
- dev_err(&mc_bus_dev->dev,
- "Device %s cannot be removed from resource pool\n",
- dev_name(&mc_dev->dev));
- goto out;
- }
-
- list_del(&resource->node);
- INIT_LIST_HEAD(&resource->node);
- res_pool->free_count--;
- res_pool->max_count--;
-
- devm_kfree(&mc_bus_dev->dev, resource);
- mc_dev->resource = NULL;
- error = 0;
-out:
- if (mutex_locked)
- mutex_unlock(&res_pool->mutex);
-
- return error;
-}
-
-static const char *const fsl_mc_pool_type_strings[] = {
- [FSL_MC_POOL_DPMCP] = "dpmcp",
- [FSL_MC_POOL_DPBP] = "dpbp",
- [FSL_MC_POOL_DPCON] = "dpcon",
-};
-
-static int __must_check object_type_to_pool_type(const char *object_type,
- enum fsl_mc_pool_type
- *pool_type)
-{
- unsigned int i;
-
- for (i = 0; i < ARRAY_SIZE(fsl_mc_pool_type_strings); i++) {
- if (strcmp(object_type, fsl_mc_pool_type_strings[i]) == 0) {
- *pool_type = i;
- return 0;
- }
- }
-
- return -EINVAL;
-}
-
-int __must_check fsl_mc_resource_allocate(struct fsl_mc_bus *mc_bus,
- enum fsl_mc_pool_type pool_type,
- struct fsl_mc_resource **new_resource)
-{
- struct fsl_mc_resource_pool *res_pool;
- struct fsl_mc_resource *resource;
- struct fsl_mc_device *mc_bus_dev = &mc_bus->mc_dev;
- int error = -EINVAL;
- bool mutex_locked = false;
-
- BUILD_BUG_ON(ARRAY_SIZE(fsl_mc_pool_type_strings) !=
- FSL_MC_NUM_POOL_TYPES);
-
- *new_resource = NULL;
- if (WARN_ON(pool_type < 0 || pool_type >= FSL_MC_NUM_POOL_TYPES))
- goto error;
-
- res_pool = &mc_bus->resource_pools[pool_type];
- if (WARN_ON(res_pool->mc_bus != mc_bus))
- goto error;
-
- mutex_lock(&res_pool->mutex);
- mutex_locked = true;
- resource = list_first_entry_or_null(&res_pool->free_list,
- struct fsl_mc_resource, node);
-
- if (!resource) {
- WARN_ON(res_pool->free_count != 0);
- error = -ENXIO;
- dev_err(&mc_bus_dev->dev,
- "No more resources of type %s left\n",
- fsl_mc_pool_type_strings[pool_type]);
- goto error;
- }
-
- if (WARN_ON(resource->type != pool_type))
- goto error;
- if (WARN_ON(resource->parent_pool != res_pool))
- goto error;
- if (WARN_ON(res_pool->free_count <= 0 ||
- res_pool->free_count > res_pool->max_count))
- goto error;
-
- list_del(&resource->node);
- INIT_LIST_HEAD(&resource->node);
-
- res_pool->free_count--;
- mutex_unlock(&res_pool->mutex);
- *new_resource = resource;
- return 0;
-error:
- if (mutex_locked)
- mutex_unlock(&res_pool->mutex);
-
- return error;
-}
-EXPORT_SYMBOL_GPL(fsl_mc_resource_allocate);
-
-void fsl_mc_resource_free(struct fsl_mc_resource *resource)
-{
- struct fsl_mc_resource_pool *res_pool;
- bool mutex_locked = false;
-
- res_pool = resource->parent_pool;
- if (WARN_ON(resource->type != res_pool->type))
- goto out;
-
- mutex_lock(&res_pool->mutex);
- mutex_locked = true;
- if (WARN_ON(res_pool->free_count < 0 ||
- res_pool->free_count >= res_pool->max_count))
- goto out;
-
- if (WARN_ON(!list_empty(&resource->node)))
- goto out;
-
- list_add_tail(&resource->node, &res_pool->free_list);
- res_pool->free_count++;
-out:
- if (mutex_locked)
- mutex_unlock(&res_pool->mutex);
-}
-EXPORT_SYMBOL_GPL(fsl_mc_resource_free);
-
-/**
- * fsl_mc_portal_allocate - Allocates an MC portal
- *
- * @mc_dev: MC device for which the MC portal is to be allocated
- * @mc_io_flags: Flags for the fsl_mc_io object that wraps the allocated
- * MC portal.
- * @new_mc_io: Pointer to area where the pointer to the fsl_mc_io object
- * that wraps the allocated MC portal is to be returned
- *
- * This function allocates an MC portal from the device's parent DPRC,
- * from the corresponding MC bus' pool of MC portals and wraps
- * it in a new fsl_mc_io object. If 'mc_dev' is a DPRC itself, the
- * portal is allocated from its own MC bus.
- */
-int __must_check fsl_mc_portal_allocate(struct fsl_mc_device *mc_dev,
- u16 mc_io_flags,
- struct fsl_mc_io **new_mc_io)
-{
- struct fsl_mc_device *mc_bus_dev;
- struct fsl_mc_bus *mc_bus;
- phys_addr_t mc_portal_phys_addr;
- size_t mc_portal_size;
- struct fsl_mc_device *mc_adev;
- int error = -EINVAL;
- struct fsl_mc_resource *resource = NULL;
- struct fsl_mc_io *mc_io = NULL;
-
- if (mc_dev->flags & FSL_MC_IS_DPRC) {
- mc_bus_dev = mc_dev;
- } else {
- if (WARN_ON(mc_dev->dev.parent->bus != &fsl_mc_bus_type))
- return error;
-
- mc_bus_dev = to_fsl_mc_device(mc_dev->dev.parent);
- }
-
- mc_bus = to_fsl_mc_bus(mc_bus_dev);
- *new_mc_io = NULL;
- error = fsl_mc_resource_allocate(mc_bus, FSL_MC_POOL_DPMCP, &resource);
- if (error < 0)
- return error;
-
- mc_adev = resource->data;
- if (WARN_ON(!mc_adev))
- goto error_cleanup_resource;
-
- if (WARN_ON(mc_adev->obj_desc.region_count == 0))
- goto error_cleanup_resource;
-
- mc_portal_phys_addr = mc_adev->regions[0].start;
- mc_portal_size = mc_adev->regions[0].end -
- mc_adev->regions[0].start + 1;
-
- if (WARN_ON(mc_portal_size != mc_bus_dev->mc_io->portal_size))
- goto error_cleanup_resource;
-
- error = fsl_create_mc_io(&mc_bus_dev->dev,
- mc_portal_phys_addr,
- mc_portal_size, resource,
- mc_io_flags, &mc_io);
- if (error < 0)
- goto error_cleanup_resource;
-
- *new_mc_io = mc_io;
- return 0;
-
-error_cleanup_resource:
- fsl_mc_resource_free(resource);
- return error;
-}
-EXPORT_SYMBOL_GPL(fsl_mc_portal_allocate);
-
-/**
- * fsl_mc_portal_free - Returns an MC portal to the pool of free MC portals
- * of a given MC bus
- *
- * @mc_io: Pointer to the fsl_mc_io object that wraps the MC portal to free
- */
-void fsl_mc_portal_free(struct fsl_mc_io *mc_io)
-{
- struct fsl_mc_resource *resource;
-
- resource = mc_io->resource;
- if (WARN_ON(resource->type != FSL_MC_POOL_DPMCP))
- return;
- if (WARN_ON(!resource->data))
- return;
-
- fsl_destroy_mc_io(mc_io);
- fsl_mc_resource_free(resource);
-}
-EXPORT_SYMBOL_GPL(fsl_mc_portal_free);
-
-/**
- * fsl_mc_portal_reset - Resets the dpmcp object for a given fsl_mc_io object
- *
- * @mc_io: Pointer to the fsl_mc_io object that wraps the MC portal to free
- */
-int fsl_mc_portal_reset(struct fsl_mc_io *mc_io)
-{
- int error;
- u16 token;
- struct fsl_mc_resource *resource = mc_io->resource;
- struct fsl_mc_device *mc_dev = resource->data;
-
- if (WARN_ON(resource->type != FSL_MC_POOL_DPMCP))
- return -EINVAL;
-
- if (WARN_ON(!mc_dev))
- return -EINVAL;
-
- error = dpmcp_open(mc_io, 0, mc_dev->obj_desc.id, &token);
- if (error < 0) {
- dev_err(&mc_dev->dev, "dpmcp_open() failed: %d\n", error);
- return error;
- }
-
- error = dpmcp_reset(mc_io, 0, token);
- if (error < 0) {
- dev_err(&mc_dev->dev, "dpmcp_reset() failed: %d\n", error);
- return error;
- }
-
- error = dpmcp_close(mc_io, 0, token);
- if (error < 0) {
- dev_err(&mc_dev->dev, "dpmcp_close() failed: %d\n", error);
- return error;
- }
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(fsl_mc_portal_reset);
-
-/**
- * fsl_mc_object_allocate - Allocates a MC object device of the given
- * pool type from a given MC bus
- *
- * @mc_dev: MC device for which the MC object device is to be allocated
- * @pool_type: MC bus resource pool type
- * @new_mc_dev: Pointer to area where the pointer to the allocated
- * MC object device is to be returned
- *
- * This function allocates a MC object device from the device's parent DPRC,
- * from the corresponding MC bus' pool of allocatable MC object devices of
- * the given resource type. mc_dev cannot be a DPRC itself.
- *
- * NOTE: pool_type must be different from FSL_MC_POOL_MCP, since MC
- * portals are allocated using fsl_mc_portal_allocate(), instead of
- * this function.
- */
-int __must_check fsl_mc_object_allocate(struct fsl_mc_device *mc_dev,
- enum fsl_mc_pool_type pool_type,
- struct fsl_mc_device **new_mc_adev)
-{
- struct fsl_mc_device *mc_bus_dev;
- struct fsl_mc_bus *mc_bus;
- struct fsl_mc_device *mc_adev;
- int error = -EINVAL;
- struct fsl_mc_resource *resource = NULL;
-
- *new_mc_adev = NULL;
- if (WARN_ON(mc_dev->flags & FSL_MC_IS_DPRC))
- goto error;
-
- if (WARN_ON(mc_dev->dev.parent->bus != &fsl_mc_bus_type))
- goto error;
-
- if (WARN_ON(pool_type == FSL_MC_POOL_DPMCP))
- goto error;
-
- mc_bus_dev = to_fsl_mc_device(mc_dev->dev.parent);
- mc_bus = to_fsl_mc_bus(mc_bus_dev);
- error = fsl_mc_resource_allocate(mc_bus, pool_type, &resource);
- if (error < 0)
- goto error;
-
- mc_adev = resource->data;
- if (WARN_ON(!mc_adev))
- goto error;
-
- *new_mc_adev = mc_adev;
- return 0;
-error:
- if (resource)
- fsl_mc_resource_free(resource);
-
- return error;
-}
-EXPORT_SYMBOL_GPL(fsl_mc_object_allocate);
-
-/**
- * fsl_mc_object_free - Returns an allocatable MC object device to the
- * corresponding resource pool of a given MC bus.
- *
- * @mc_adev: Pointer to the MC object device
- */
-void fsl_mc_object_free(struct fsl_mc_device *mc_adev)
-{
- struct fsl_mc_resource *resource;
-
- resource = mc_adev->resource;
- if (WARN_ON(resource->type == FSL_MC_POOL_DPMCP))
- return;
- if (WARN_ON(resource->data != mc_adev))
- return;
-
- fsl_mc_resource_free(resource);
-}
-EXPORT_SYMBOL_GPL(fsl_mc_object_free);
-
-/**
- * fsl_mc_allocator_probe - callback invoked when an allocatable device is
- * being added to the system
- */
-static int fsl_mc_allocator_probe(struct fsl_mc_device *mc_dev)
-{
- enum fsl_mc_pool_type pool_type;
- struct fsl_mc_device *mc_bus_dev;
- struct fsl_mc_bus *mc_bus;
- int error = -EINVAL;
-
- if (WARN_ON(!FSL_MC_IS_ALLOCATABLE(mc_dev->obj_desc.type)))
- goto error;
-
- mc_bus_dev = to_fsl_mc_device(mc_dev->dev.parent);
- if (WARN_ON(mc_bus_dev->dev.bus != &fsl_mc_bus_type))
- goto error;
-
- mc_bus = to_fsl_mc_bus(mc_bus_dev);
- error = object_type_to_pool_type(mc_dev->obj_desc.type, &pool_type);
- if (error < 0)
- goto error;
-
- error = fsl_mc_resource_pool_add_device(mc_bus, pool_type, mc_dev);
- if (error < 0)
- goto error;
-
- dev_info(&mc_dev->dev,
- "Allocatable MC object device bound to fsl_mc_allocator driver");
- return 0;
-error:
-
- return error;
-}
-
-/**
- * fsl_mc_allocator_remove - callback invoked when an allocatable device is
- * being removed from the system
- */
-static int fsl_mc_allocator_remove(struct fsl_mc_device *mc_dev)
-{
- int error = -EINVAL;
-
- if (WARN_ON(!FSL_MC_IS_ALLOCATABLE(mc_dev->obj_desc.type)))
- goto out;
-
- error = fsl_mc_resource_pool_remove_device(mc_dev);
- if (error < 0)
- goto out;
-
- dev_info(&mc_dev->dev,
- "Allocatable MC object device unbound from fsl_mc_allocator driver");
- error = 0;
-out:
- return error;
-}
-
-static const struct fsl_mc_device_match_id match_id_table[] = {
- {
- .vendor = FSL_MC_VENDOR_FREESCALE,
- .obj_type = "dpbp",
- .ver_major = DPBP_VER_MAJOR,
- .ver_minor = DPBP_VER_MINOR
- },
- {
- .vendor = FSL_MC_VENDOR_FREESCALE,
- .obj_type = "dpmcp",
- .ver_major = DPMCP_VER_MAJOR,
- .ver_minor = DPMCP_VER_MINOR
- },
- {
- .vendor = FSL_MC_VENDOR_FREESCALE,
- .obj_type = "dpcon",
- .ver_major = DPCON_VER_MAJOR,
- .ver_minor = DPCON_VER_MINOR
- },
- {.vendor = 0x0},
-};
-
-static struct fsl_mc_driver fsl_mc_allocator_driver = {
- .driver = {
- .name = "fsl_mc_allocator",
- .owner = THIS_MODULE,
- .pm = NULL,
- },
- .match_id_table = match_id_table,
- .probe = fsl_mc_allocator_probe,
- .remove = fsl_mc_allocator_remove,
-};
-
-int __init fsl_mc_allocator_driver_init(void)
-{
- return fsl_mc_driver_register(&fsl_mc_allocator_driver);
-}
-
-void __exit fsl_mc_allocator_driver_exit(void)
-{
- fsl_mc_driver_unregister(&fsl_mc_allocator_driver);
-}
diff --git a/drivers/staging/fsl-mc/bus/mc-bus.c b/drivers/staging/fsl-mc/bus/mc-bus.c
deleted file mode 100644
index 92e070237aa9..000000000000
--- a/drivers/staging/fsl-mc/bus/mc-bus.c
+++ /dev/null
@@ -1,814 +0,0 @@
-/*
- * Freescale Management Complex (MC) bus driver
- *
- * Copyright (C) 2014 Freescale Semiconductor, Inc.
- * Author: German Rivera <German.Rivera@freescale.com>
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
- */
-
-#include "../include/mc-private.h"
-#include <linux/module.h>
-#include <linux/of_device.h>
-#include <linux/of_address.h>
-#include <linux/ioport.h>
-#include <linux/slab.h>
-#include <linux/limits.h>
-#include "../include/dpmng.h"
-#include "../include/mc-sys.h"
-#include "dprc-cmd.h"
-
-static struct kmem_cache *mc_dev_cache;
-
-/**
- * fsl_mc_bus_match - device to driver matching callback
- * @dev: the MC object device structure to match against
- * @drv: the device driver to search for matching MC object device id
- * structures
- *
- * Returns 1 on success, 0 otherwise.
- */
-static int fsl_mc_bus_match(struct device *dev, struct device_driver *drv)
-{
- const struct fsl_mc_device_match_id *id;
- struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev);
- struct fsl_mc_driver *mc_drv = to_fsl_mc_driver(drv);
- bool found = false;
- bool major_version_mismatch = false;
- bool minor_version_mismatch = false;
-
- if (WARN_ON(!fsl_mc_bus_type.dev_root))
- goto out;
-
- if (!mc_drv->match_id_table)
- goto out;
-
- /*
- * If the object is not 'plugged' don't match.
- * Only exception is the root DPRC, which is a special case.
- */
- if ((mc_dev->obj_desc.state & DPRC_OBJ_STATE_PLUGGED) == 0 &&
- &mc_dev->dev != fsl_mc_bus_type.dev_root)
- goto out;
-
- /*
- * Traverse the match_id table of the given driver, trying to find
- * a matching for the given MC object device.
- */
- for (id = mc_drv->match_id_table; id->vendor != 0x0; id++) {
- if (id->vendor == mc_dev->obj_desc.vendor &&
- strcmp(id->obj_type, mc_dev->obj_desc.type) == 0) {
- if (id->ver_major == mc_dev->obj_desc.ver_major) {
- found = true;
- if (id->ver_minor != mc_dev->obj_desc.ver_minor)
- minor_version_mismatch = true;
- } else {
- major_version_mismatch = true;
- }
-
- break;
- }
- }
-
- if (major_version_mismatch) {
- dev_warn(dev,
- "Major version mismatch: driver version %u.%u, MC object version %u.%u\n",
- id->ver_major, id->ver_minor,
- mc_dev->obj_desc.ver_major,
- mc_dev->obj_desc.ver_minor);
- } else if (minor_version_mismatch) {
- dev_warn(dev,
- "Minor version mismatch: driver version %u.%u, MC object version %u.%u\n",
- id->ver_major, id->ver_minor,
- mc_dev->obj_desc.ver_major,
- mc_dev->obj_desc.ver_minor);
- }
-
-out:
- dev_dbg(dev, "%smatched\n", found ? "" : "not ");
- return found;
-}
-
-/**
- * fsl_mc_bus_uevent - callback invoked when a device is added
- */
-static int fsl_mc_bus_uevent(struct device *dev, struct kobj_uevent_env *env)
-{
- pr_debug("%s invoked\n", __func__);
- return 0;
-}
-
-struct bus_type fsl_mc_bus_type = {
- .name = "fsl-mc",
- .match = fsl_mc_bus_match,
- .uevent = fsl_mc_bus_uevent,
-};
-EXPORT_SYMBOL_GPL(fsl_mc_bus_type);
-
-static int fsl_mc_driver_probe(struct device *dev)
-{
- struct fsl_mc_driver *mc_drv;
- struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev);
- int error;
-
- if (WARN_ON(!dev->driver))
- return -EINVAL;
-
- mc_drv = to_fsl_mc_driver(dev->driver);
- if (WARN_ON(!mc_drv->probe))
- return -EINVAL;
-
- error = mc_drv->probe(mc_dev);
- if (error < 0) {
- dev_err(dev, "MC object device probe callback failed: %d\n",
- error);
- return error;
- }
-
- return 0;
-}
-
-static int fsl_mc_driver_remove(struct device *dev)
-{
- struct fsl_mc_driver *mc_drv = to_fsl_mc_driver(dev->driver);
- struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev);
- int error;
-
- if (WARN_ON(!dev->driver))
- return -EINVAL;
-
- error = mc_drv->remove(mc_dev);
- if (error < 0) {
- dev_err(dev,
- "MC object device remove callback failed: %d\n",
- error);
- return error;
- }
-
- return 0;
-}
-
-static void fsl_mc_driver_shutdown(struct device *dev)
-{
- struct fsl_mc_driver *mc_drv = to_fsl_mc_driver(dev->driver);
- struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev);
-
- mc_drv->shutdown(mc_dev);
-}
-
-/**
- * __fsl_mc_driver_register - registers a child device driver with the
- * MC bus
- *
- * This function is implicitly invoked from the registration function of
- * fsl_mc device drivers, which is generated by the
- * module_fsl_mc_driver() macro.
- */
-int __fsl_mc_driver_register(struct fsl_mc_driver *mc_driver,
- struct module *owner)
-{
- int error;
-
- mc_driver->driver.owner = owner;
- mc_driver->driver.bus = &fsl_mc_bus_type;
-
- if (mc_driver->probe)
- mc_driver->driver.probe = fsl_mc_driver_probe;
-
- if (mc_driver->remove)
- mc_driver->driver.remove = fsl_mc_driver_remove;
-
- if (mc_driver->shutdown)
- mc_driver->driver.shutdown = fsl_mc_driver_shutdown;
-
- error = driver_register(&mc_driver->driver);
- if (error < 0) {
- pr_err("driver_register() failed for %s: %d\n",
- mc_driver->driver.name, error);
- return error;
- }
-
- pr_info("MC object device driver %s registered\n",
- mc_driver->driver.name);
- return 0;
-}
-EXPORT_SYMBOL_GPL(__fsl_mc_driver_register);
-
-/**
- * fsl_mc_driver_unregister - unregisters a device driver from the
- * MC bus
- */
-void fsl_mc_driver_unregister(struct fsl_mc_driver *mc_driver)
-{
- driver_unregister(&mc_driver->driver);
-}
-EXPORT_SYMBOL_GPL(fsl_mc_driver_unregister);
-
-static int get_dprc_icid(struct fsl_mc_io *mc_io,
- int container_id, u16 *icid)
-{
- u16 dprc_handle;
- struct dprc_attributes attr;
- int error;
-
- error = dprc_open(mc_io, 0, container_id, &dprc_handle);
- if (error < 0) {
- pr_err("dprc_open() failed: %d\n", error);
- return error;
- }
-
- memset(&attr, 0, sizeof(attr));
- error = dprc_get_attributes(mc_io, 0, dprc_handle, &attr);
- if (error < 0) {
- pr_err("dprc_get_attributes() failed: %d\n", error);
- goto common_cleanup;
- }
-
- *icid = attr.icid;
- error = 0;
-
-common_cleanup:
- (void)dprc_close(mc_io, 0, dprc_handle);
- return error;
-}
-
-static int translate_mc_addr(enum dprc_region_type mc_region_type,
- u64 mc_offset, phys_addr_t *phys_addr)
-{
- int i;
- struct fsl_mc *mc = dev_get_drvdata(fsl_mc_bus_type.dev_root->parent);
-
- if (mc->num_translation_ranges == 0) {
- /*
- * Do identity mapping:
- */
- *phys_addr = mc_offset;
- return 0;
- }
-
- for (i = 0; i < mc->num_translation_ranges; i++) {
- struct fsl_mc_addr_translation_range *range =
- &mc->translation_ranges[i];
-
- if (mc_region_type == range->mc_region_type &&
- mc_offset >= range->start_mc_offset &&
- mc_offset < range->end_mc_offset) {
- *phys_addr = range->start_phys_addr +
- (mc_offset - range->start_mc_offset);
- return 0;
- }
- }
-
- return -EFAULT;
-}
-
-static int fsl_mc_device_get_mmio_regions(struct fsl_mc_device *mc_dev,
- struct fsl_mc_device *mc_bus_dev)
-{
- int i;
- int error;
- struct resource *regions;
- struct dprc_obj_desc *obj_desc = &mc_dev->obj_desc;
- struct device *parent_dev = mc_dev->dev.parent;
- enum dprc_region_type mc_region_type;
-
- if (strcmp(obj_desc->type, "dprc") == 0 ||
- strcmp(obj_desc->type, "dpmcp") == 0) {
- mc_region_type = DPRC_REGION_TYPE_MC_PORTAL;
- } else if (strcmp(obj_desc->type, "dpio") == 0) {
- mc_region_type = DPRC_REGION_TYPE_QBMAN_PORTAL;
- } else {
- /*
- * This function should not have been called for this MC object
- * type, as this object type is not supposed to have MMIO
- * regions
- */
- WARN_ON(true);
- return -EINVAL;
- }
-
- regions = kmalloc_array(obj_desc->region_count,
- sizeof(regions[0]), GFP_KERNEL);
- if (!regions)
- return -ENOMEM;
-
- for (i = 0; i < obj_desc->region_count; i++) {
- struct dprc_region_desc region_desc;
-
- error = dprc_get_obj_region(mc_bus_dev->mc_io,
- 0,
- mc_bus_dev->mc_handle,
- obj_desc->type,
- obj_desc->id, i, &region_desc);
- if (error < 0) {
- dev_err(parent_dev,
- "dprc_get_obj_region() failed: %d\n", error);
- goto error_cleanup_regions;
- }
-
- WARN_ON(region_desc.size == 0);
- error = translate_mc_addr(mc_region_type,
- region_desc.base_offset,
- &regions[i].start);
- if (error < 0) {
- dev_err(parent_dev,
- "Invalid MC offset: %#x (for %s.%d\'s region %d)\n",
- region_desc.base_offset,
- obj_desc->type, obj_desc->id, i);
- goto error_cleanup_regions;
- }
-
- regions[i].end = regions[i].start + region_desc.size - 1;
- regions[i].name = "fsl-mc object MMIO region";
- regions[i].flags = IORESOURCE_IO;
- }
-
- mc_dev->regions = regions;
- return 0;
-
-error_cleanup_regions:
- kfree(regions);
- return error;
-}
-
-/**
- * Add a newly discovered MC object device to be visible in Linux
- */
-int fsl_mc_device_add(struct dprc_obj_desc *obj_desc,
- struct fsl_mc_io *mc_io,
- struct device *parent_dev,
- struct fsl_mc_device **new_mc_dev)
-{
- int error;
- struct fsl_mc_device *mc_dev = NULL;
- struct fsl_mc_bus *mc_bus = NULL;
- struct fsl_mc_device *parent_mc_dev;
-
- if (parent_dev->bus == &fsl_mc_bus_type)
- parent_mc_dev = to_fsl_mc_device(parent_dev);
- else
- parent_mc_dev = NULL;
-
- if (strcmp(obj_desc->type, "dprc") == 0) {
- /*
- * Allocate an MC bus device object:
- */
- mc_bus = devm_kzalloc(parent_dev, sizeof(*mc_bus), GFP_KERNEL);
- if (!mc_bus)
- return -ENOMEM;
-
- mc_dev = &mc_bus->mc_dev;
- } else {
- /*
- * Allocate a regular fsl_mc_device object:
- */
- mc_dev = kmem_cache_zalloc(mc_dev_cache, GFP_KERNEL);
- if (!mc_dev)
- return -ENOMEM;
- }
-
- mc_dev->obj_desc = *obj_desc;
- mc_dev->mc_io = mc_io;
- device_initialize(&mc_dev->dev);
- mc_dev->dev.parent = parent_dev;
- mc_dev->dev.bus = &fsl_mc_bus_type;
- dev_set_name(&mc_dev->dev, "%s.%d", obj_desc->type, obj_desc->id);
-
- if (strcmp(obj_desc->type, "dprc") == 0) {
- struct fsl_mc_io *mc_io2;
-
- mc_dev->flags |= FSL_MC_IS_DPRC;
-
- /*
- * To get the DPRC's ICID, we need to open the DPRC
- * in get_dprc_icid(). For child DPRCs, we do so using the
- * parent DPRC's MC portal instead of the child DPRC's MC
- * portal, in case the child DPRC is already opened with
- * its own portal (e.g., the DPRC used by AIOP).
- *
- * NOTE: There cannot be more than one active open for a
- * given MC object, using the same MC portal.
- */
- if (parent_mc_dev) {
- /*
- * device being added is a child DPRC device
- */
- mc_io2 = parent_mc_dev->mc_io;
- } else {
- /*
- * device being added is the root DPRC device
- */
- if (WARN_ON(!mc_io)) {
- error = -EINVAL;
- goto error_cleanup_dev;
- }
-
- mc_io2 = mc_io;
-
- if (!fsl_mc_bus_type.dev_root)
- fsl_mc_bus_type.dev_root = &mc_dev->dev;
- }
-
- error = get_dprc_icid(mc_io2, obj_desc->id, &mc_dev->icid);
- if (error < 0)
- goto error_cleanup_dev;
- } else {
- /*
- * A non-DPRC MC object device has to be a child of another
- * MC object (specifically a DPRC object)
- */
- mc_dev->icid = parent_mc_dev->icid;
- mc_dev->dma_mask = FSL_MC_DEFAULT_DMA_MASK;
- mc_dev->dev.dma_mask = &mc_dev->dma_mask;
- }
-
- /*
- * Get MMIO regions for the device from the MC:
- *
- * NOTE: the root DPRC is a special case as its MMIO region is
- * obtained from the device tree
- */
- if (parent_mc_dev && obj_desc->region_count != 0) {
- error = fsl_mc_device_get_mmio_regions(mc_dev,
- parent_mc_dev);
- if (error < 0)
- goto error_cleanup_dev;
- }
-
- /*
- * The device-specific probe callback will get invoked by device_add()
- */
- error = device_add(&mc_dev->dev);
- if (error < 0) {
- dev_err(parent_dev,
- "device_add() failed for device %s: %d\n",
- dev_name(&mc_dev->dev), error);
- goto error_cleanup_dev;
- }
-
- (void)get_device(&mc_dev->dev);
- dev_dbg(parent_dev, "Added MC object device %s\n",
- dev_name(&mc_dev->dev));
-
- *new_mc_dev = mc_dev;
- return 0;
-
-error_cleanup_dev:
- kfree(mc_dev->regions);
- if (mc_bus)
- devm_kfree(parent_dev, mc_bus);
- else
- kmem_cache_free(mc_dev_cache, mc_dev);
-
- return error;
-}
-EXPORT_SYMBOL_GPL(fsl_mc_device_add);
-
-/**
- * fsl_mc_device_remove - Remove a MC object device from being visible to
- * Linux
- *
- * @mc_dev: Pointer to a MC object device object
- */
-void fsl_mc_device_remove(struct fsl_mc_device *mc_dev)
-{
- struct fsl_mc_bus *mc_bus = NULL;
-
- kfree(mc_dev->regions);
-
- /*
- * The device-specific remove callback will get invoked by device_del()
- */
- device_del(&mc_dev->dev);
- put_device(&mc_dev->dev);
-
- if (strcmp(mc_dev->obj_desc.type, "dprc") == 0) {
- mc_bus = to_fsl_mc_bus(mc_dev);
- if (mc_dev->mc_io) {
- fsl_destroy_mc_io(mc_dev->mc_io);
- mc_dev->mc_io = NULL;
- }
-
- if (&mc_dev->dev == fsl_mc_bus_type.dev_root)
- fsl_mc_bus_type.dev_root = NULL;
- }
-
- if (mc_bus)
- devm_kfree(mc_dev->dev.parent, mc_bus);
- else
- kmem_cache_free(mc_dev_cache, mc_dev);
-}
-EXPORT_SYMBOL_GPL(fsl_mc_device_remove);
-
-static int parse_mc_ranges(struct device *dev,
- int *paddr_cells,
- int *mc_addr_cells,
- int *mc_size_cells,
- const __be32 **ranges_start,
- u8 *num_ranges)
-{
- const __be32 *prop;
- int range_tuple_cell_count;
- int ranges_len;
- int tuple_len;
- struct device_node *mc_node = dev->of_node;
-
- *ranges_start = of_get_property(mc_node, "ranges", &ranges_len);
- if (!(*ranges_start) || !ranges_len) {
- dev_warn(dev,
- "missing or empty ranges property for device tree node '%s'\n",
- mc_node->name);
-
- *num_ranges = 0;
- return 0;
- }
-
- *paddr_cells = of_n_addr_cells(mc_node);
-
- prop = of_get_property(mc_node, "#address-cells", NULL);
- if (prop)
- *mc_addr_cells = be32_to_cpup(prop);
- else
- *mc_addr_cells = *paddr_cells;
-
- prop = of_get_property(mc_node, "#size-cells", NULL);
- if (prop)
- *mc_size_cells = be32_to_cpup(prop);
- else
- *mc_size_cells = of_n_size_cells(mc_node);
-
- range_tuple_cell_count = *paddr_cells + *mc_addr_cells +
- *mc_size_cells;
-
- tuple_len = range_tuple_cell_count * sizeof(__be32);
- if (ranges_len % tuple_len != 0) {
- dev_err(dev, "malformed ranges property '%s'\n", mc_node->name);
- return -EINVAL;
- }
-
- *num_ranges = ranges_len / tuple_len;
- return 0;
-}
-
-static int get_mc_addr_translation_ranges(struct device *dev,
- struct fsl_mc_addr_translation_range
- **ranges,
- u8 *num_ranges)
-{
- int error;
- int paddr_cells;
- int mc_addr_cells;
- int mc_size_cells;
- int i;
- const __be32 *ranges_start;
- const __be32 *cell;
-
- error = parse_mc_ranges(dev,
- &paddr_cells,
- &mc_addr_cells,
- &mc_size_cells,
- &ranges_start,
- num_ranges);
- if (error < 0)
- return error;
-
- if (!(*num_ranges)) {
- /*
- * Missing or empty ranges property ("ranges;") for the
- * 'fsl,qoriq-mc' node. In this case, identity mapping
- * will be used.
- */
- *ranges = NULL;
- return 0;
- }
-
- *ranges = devm_kcalloc(dev, *num_ranges,
- sizeof(struct fsl_mc_addr_translation_range),
- GFP_KERNEL);
- if (!(*ranges))
- return -ENOMEM;
-
- cell = ranges_start;
- for (i = 0; i < *num_ranges; ++i) {
- struct fsl_mc_addr_translation_range *range = &(*ranges)[i];
-
- range->mc_region_type = of_read_number(cell, 1);
- range->start_mc_offset = of_read_number(cell + 1,
- mc_addr_cells - 1);
- cell += mc_addr_cells;
- range->start_phys_addr = of_read_number(cell, paddr_cells);
- cell += paddr_cells;
- range->end_mc_offset = range->start_mc_offset +
- of_read_number(cell, mc_size_cells);
-
- cell += mc_size_cells;
- }
-
- return 0;
-}
-
-/**
- * fsl_mc_bus_probe - callback invoked when the root MC bus is being
- * added
- */
-static int fsl_mc_bus_probe(struct platform_device *pdev)
-{
- struct dprc_obj_desc obj_desc;
- int error;
- struct fsl_mc *mc;
- struct fsl_mc_device *mc_bus_dev = NULL;
- struct fsl_mc_io *mc_io = NULL;
- int container_id;
- phys_addr_t mc_portal_phys_addr;
- u32 mc_portal_size;
- struct mc_version mc_version;
- struct resource res;
-
- dev_info(&pdev->dev, "Root MC bus device probed");
-
- mc = devm_kzalloc(&pdev->dev, sizeof(*mc), GFP_KERNEL);
- if (!mc)
- return -ENOMEM;
-
- platform_set_drvdata(pdev, mc);
-
- /*
- * Get physical address of MC portal for the root DPRC:
- */
- error = of_address_to_resource(pdev->dev.of_node, 0, &res);
- if (error < 0) {
- dev_err(&pdev->dev,
- "of_address_to_resource() failed for %s\n",
- pdev->dev.of_node->full_name);
- return error;
- }
-
- mc_portal_phys_addr = res.start;
- mc_portal_size = resource_size(&res);
- error = fsl_create_mc_io(&pdev->dev, mc_portal_phys_addr,
- mc_portal_size, NULL, 0, &mc_io);
- if (error < 0)
- return error;
-
- error = mc_get_version(mc_io, 0, &mc_version);
- if (error != 0) {
- dev_err(&pdev->dev,
- "mc_get_version() failed with error %d\n", error);
- goto error_cleanup_mc_io;
- }
-
- dev_info(&pdev->dev,
- "Freescale Management Complex Firmware version: %u.%u.%u\n",
- mc_version.major, mc_version.minor, mc_version.revision);
-
- if (mc_version.major < MC_VER_MAJOR) {
- dev_err(&pdev->dev,
- "ERROR: MC firmware version not supported by driver (driver version: %u.%u)\n",
- MC_VER_MAJOR, MC_VER_MINOR);
- error = -ENOTSUPP;
- goto error_cleanup_mc_io;
- }
-
- if (mc_version.major > MC_VER_MAJOR) {
- dev_warn(&pdev->dev,
- "WARNING: driver may not support newer MC firmware features (driver version: %u.%u)\n",
- MC_VER_MAJOR, MC_VER_MINOR);
- }
-
- error = get_mc_addr_translation_ranges(&pdev->dev,
- &mc->translation_ranges,
- &mc->num_translation_ranges);
- if (error < 0)
- goto error_cleanup_mc_io;
-
- error = dpmng_get_container_id(mc_io, 0, &container_id);
- if (error < 0) {
- dev_err(&pdev->dev,
- "dpmng_get_container_id() failed: %d\n", error);
- goto error_cleanup_mc_io;
- }
-
- obj_desc.vendor = FSL_MC_VENDOR_FREESCALE;
- strcpy(obj_desc.type, "dprc");
- obj_desc.id = container_id;
- obj_desc.ver_major = DPRC_VER_MAJOR;
- obj_desc.ver_minor = DPRC_VER_MINOR;
- obj_desc.region_count = 0;
-
- error = fsl_mc_device_add(&obj_desc, mc_io, &pdev->dev, &mc_bus_dev);
- if (error < 0)
- goto error_cleanup_mc_io;
-
- mc->root_mc_bus_dev = mc_bus_dev;
- return 0;
-
-error_cleanup_mc_io:
- fsl_destroy_mc_io(mc_io);
- return error;
-}
-
-/**
- * fsl_mc_bus_remove - callback invoked when the root MC bus is being
- * removed
- */
-static int fsl_mc_bus_remove(struct platform_device *pdev)
-{
- struct fsl_mc *mc = platform_get_drvdata(pdev);
-
- if (WARN_ON(&mc->root_mc_bus_dev->dev != fsl_mc_bus_type.dev_root))
- return -EINVAL;
-
- fsl_mc_device_remove(mc->root_mc_bus_dev);
- dev_info(&pdev->dev, "Root MC bus device removed");
- return 0;
-}
-
-static const struct of_device_id fsl_mc_bus_match_table[] = {
- {.compatible = "fsl,qoriq-mc",},
- {},
-};
-
-MODULE_DEVICE_TABLE(of, fsl_mc_bus_match_table);
-
-static struct platform_driver fsl_mc_bus_driver = {
- .driver = {
- .name = "fsl_mc_bus",
- .pm = NULL,
- .of_match_table = fsl_mc_bus_match_table,
- },
- .probe = fsl_mc_bus_probe,
- .remove = fsl_mc_bus_remove,
-};
-
-static int __init fsl_mc_bus_driver_init(void)
-{
- int error;
-
- mc_dev_cache = kmem_cache_create("fsl_mc_device",
- sizeof(struct fsl_mc_device), 0, 0,
- NULL);
- if (!mc_dev_cache) {
- pr_err("Could not create fsl_mc_device cache\n");
- return -ENOMEM;
- }
-
- error = bus_register(&fsl_mc_bus_type);
- if (error < 0) {
- pr_err("fsl-mc bus type registration failed: %d\n", error);
- goto error_cleanup_cache;
- }
-
- pr_info("fsl-mc bus type registered\n");
-
- error = platform_driver_register(&fsl_mc_bus_driver);
- if (error < 0) {
- pr_err("platform_driver_register() failed: %d\n", error);
- goto error_cleanup_bus;
- }
-
- error = dprc_driver_init();
- if (error < 0)
- goto error_cleanup_driver;
-
- error = fsl_mc_allocator_driver_init();
- if (error < 0)
- goto error_cleanup_dprc_driver;
-
- return 0;
-
-error_cleanup_dprc_driver:
- dprc_driver_exit();
-
-error_cleanup_driver:
- platform_driver_unregister(&fsl_mc_bus_driver);
-
-error_cleanup_bus:
- bus_unregister(&fsl_mc_bus_type);
-
-error_cleanup_cache:
- kmem_cache_destroy(mc_dev_cache);
- return error;
-}
-
-postcore_initcall(fsl_mc_bus_driver_init);
-
-static void __exit fsl_mc_bus_driver_exit(void)
-{
- if (WARN_ON(!mc_dev_cache))
- return;
-
- fsl_mc_allocator_driver_exit();
- dprc_driver_exit();
- platform_driver_unregister(&fsl_mc_bus_driver);
- bus_unregister(&fsl_mc_bus_type);
- kmem_cache_destroy(mc_dev_cache);
- pr_info("MC bus unregistered\n");
-}
-
-module_exit(fsl_mc_bus_driver_exit);
-
-MODULE_AUTHOR("Freescale Semiconductor Inc.");
-MODULE_DESCRIPTION("Freescale Management Complex (MC) bus driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/fsl-mc/bus/mc-sys.c b/drivers/staging/fsl-mc/bus/mc-sys.c
deleted file mode 100644
index b58b53fe165c..000000000000
--- a/drivers/staging/fsl-mc/bus/mc-sys.c
+++ /dev/null
@@ -1,287 +0,0 @@
-/* Copyright 2013-2014 Freescale Semiconductor Inc.
- *
- * I/O services to send MC commands to the MC hardware
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of the above-listed copyright holders nor the
- * names of any contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "../include/mc-sys.h"
-#include "../include/mc-cmd.h"
-#include <linux/delay.h>
-#include <linux/slab.h>
-#include <linux/ioport.h>
-#include <linux/device.h>
-
-/**
- * Timeout in jiffies to wait for the completion of an MC command
- */
-#define MC_CMD_COMPLETION_TIMEOUT_JIFFIES (HZ / 2) /* 500 ms */
-
-/*
- * usleep_range() min and max values used to throttle down polling
- * iterations while waiting for MC command completion
- */
-#define MC_CMD_COMPLETION_POLLING_MIN_SLEEP_USECS 10
-#define MC_CMD_COMPLETION_POLLING_MAX_SLEEP_USECS 500
-
-#define MC_CMD_HDR_READ_CMDID(_hdr) \
- ((u16)mc_dec((_hdr), MC_CMD_HDR_CMDID_O, MC_CMD_HDR_CMDID_S))
-
-/**
- * Creates an MC I/O object
- *
- * @dev: device to be associated with the MC I/O object
- * @mc_portal_phys_addr: physical address of the MC portal to use
- * @mc_portal_size: size in bytes of the MC portal
- * @resource: Pointer to MC bus object allocator resource associated
- * with this MC I/O object or NULL if none.
- * @flags: flags for the new MC I/O object
- * @new_mc_io: Area to return pointer to newly created MC I/O object
- *
- * Returns '0' on Success; Error code otherwise.
- */
-int __must_check fsl_create_mc_io(struct device *dev,
- phys_addr_t mc_portal_phys_addr,
- u32 mc_portal_size,
- struct fsl_mc_resource *resource,
- u32 flags, struct fsl_mc_io **new_mc_io)
-{
- struct fsl_mc_io *mc_io;
- void __iomem *mc_portal_virt_addr;
- struct resource *res;
-
- mc_io = devm_kzalloc(dev, sizeof(*mc_io), GFP_KERNEL);
- if (!mc_io)
- return -ENOMEM;
-
- mc_io->dev = dev;
- mc_io->flags = flags;
- mc_io->portal_phys_addr = mc_portal_phys_addr;
- mc_io->portal_size = mc_portal_size;
- mc_io->resource = resource;
- res = devm_request_mem_region(dev,
- mc_portal_phys_addr,
- mc_portal_size,
- "mc_portal");
- if (!res) {
- dev_err(dev,
- "devm_request_mem_region failed for MC portal %#llx\n",
- mc_portal_phys_addr);
- return -EBUSY;
- }
-
- mc_portal_virt_addr = devm_ioremap_nocache(dev,
- mc_portal_phys_addr,
- mc_portal_size);
- if (!mc_portal_virt_addr) {
- dev_err(dev,
- "devm_ioremap_nocache failed for MC portal %#llx\n",
- mc_portal_phys_addr);
- return -ENXIO;
- }
-
- mc_io->portal_virt_addr = mc_portal_virt_addr;
- *new_mc_io = mc_io;
- return 0;
-}
-EXPORT_SYMBOL_GPL(fsl_create_mc_io);
-
-/**
- * Destroys an MC I/O object
- *
- * @mc_io: MC I/O object to destroy
- */
-void fsl_destroy_mc_io(struct fsl_mc_io *mc_io)
-{
- devm_iounmap(mc_io->dev, mc_io->portal_virt_addr);
- devm_release_mem_region(mc_io->dev,
- mc_io->portal_phys_addr,
- mc_io->portal_size);
-
- mc_io->portal_virt_addr = NULL;
- devm_kfree(mc_io->dev, mc_io);
-}
-EXPORT_SYMBOL_GPL(fsl_destroy_mc_io);
-
-static int mc_status_to_error(enum mc_cmd_status status)
-{
- static const int mc_status_to_error_map[] = {
- [MC_CMD_STATUS_OK] = 0,
- [MC_CMD_STATUS_AUTH_ERR] = -EACCES,
- [MC_CMD_STATUS_NO_PRIVILEGE] = -EPERM,
- [MC_CMD_STATUS_DMA_ERR] = -EIO,
- [MC_CMD_STATUS_CONFIG_ERR] = -ENXIO,
- [MC_CMD_STATUS_TIMEOUT] = -ETIMEDOUT,
- [MC_CMD_STATUS_NO_RESOURCE] = -ENAVAIL,
- [MC_CMD_STATUS_NO_MEMORY] = -ENOMEM,
- [MC_CMD_STATUS_BUSY] = -EBUSY,
- [MC_CMD_STATUS_UNSUPPORTED_OP] = -ENOTSUPP,
- [MC_CMD_STATUS_INVALID_STATE] = -ENODEV,
- };
-
- if (WARN_ON((u32)status >= ARRAY_SIZE(mc_status_to_error_map)))
- return -EINVAL;
-
- return mc_status_to_error_map[status];
-}
-
-static const char *mc_status_to_string(enum mc_cmd_status status)
-{
- static const char *const status_strings[] = {
- [MC_CMD_STATUS_OK] = "Command completed successfully",
- [MC_CMD_STATUS_READY] = "Command ready to be processed",
- [MC_CMD_STATUS_AUTH_ERR] = "Authentication error",
- [MC_CMD_STATUS_NO_PRIVILEGE] = "No privilege",
- [MC_CMD_STATUS_DMA_ERR] = "DMA or I/O error",
- [MC_CMD_STATUS_CONFIG_ERR] = "Configuration error",
- [MC_CMD_STATUS_TIMEOUT] = "Operation timed out",
- [MC_CMD_STATUS_NO_RESOURCE] = "No resources",
- [MC_CMD_STATUS_NO_MEMORY] = "No memory available",
- [MC_CMD_STATUS_BUSY] = "Device is busy",
- [MC_CMD_STATUS_UNSUPPORTED_OP] = "Unsupported operation",
- [MC_CMD_STATUS_INVALID_STATE] = "Invalid state"
- };
-
- if ((unsigned int)status >= ARRAY_SIZE(status_strings))
- return "Unknown MC error";
-
- return status_strings[status];
-}
-
-/**
- * mc_write_command - writes a command to a Management Complex (MC) portal
- *
- * @portal: pointer to an MC portal
- * @cmd: pointer to a filled command
- */
-static inline void mc_write_command(struct mc_command __iomem *portal,
- struct mc_command *cmd)
-{
- int i;
-
- /* copy command parameters into the portal */
- for (i = 0; i < MC_CMD_NUM_OF_PARAMS; i++)
- writeq(cmd->params[i], &portal->params[i]);
-
- /* submit the command by writing the header */
- writeq(cmd->header, &portal->header);
-}
-
-/**
- * mc_read_response - reads the response for the last MC command from a
- * Management Complex (MC) portal
- *
- * @portal: pointer to an MC portal
- * @resp: pointer to command response buffer
- *
- * Returns MC_CMD_STATUS_OK on Success; Error code otherwise.
- */
-static inline enum mc_cmd_status mc_read_response(struct mc_command __iomem *
- portal,
- struct mc_command *resp)
-{
- int i;
- enum mc_cmd_status status;
-
- /* Copy command response header from MC portal: */
- resp->header = readq(&portal->header);
- status = MC_CMD_HDR_READ_STATUS(resp->header);
- if (status != MC_CMD_STATUS_OK)
- return status;
-
- /* Copy command response data from MC portal: */
- for (i = 0; i < MC_CMD_NUM_OF_PARAMS; i++)
- resp->params[i] = readq(&portal->params[i]);
-
- return status;
-}
-
-/**
- * Sends an command to the MC device using the given MC I/O object
- *
- * @mc_io: MC I/O object to be used
- * @cmd: command to be sent
- *
- * Returns '0' on Success; Error code otherwise.
- *
- * NOTE: This function cannot be invoked from from atomic contexts.
- */
-int mc_send_command(struct fsl_mc_io *mc_io, struct mc_command *cmd)
-{
- enum mc_cmd_status status;
- unsigned long jiffies_until_timeout =
- jiffies + MC_CMD_COMPLETION_TIMEOUT_JIFFIES;
-
- /*
- * Send command to the MC hardware:
- */
- mc_write_command(mc_io->portal_virt_addr, cmd);
-
- /*
- * Wait for response from the MC hardware:
- */
- for (;;) {
- status = mc_read_response(mc_io->portal_virt_addr, cmd);
- if (status != MC_CMD_STATUS_READY)
- break;
-
- /*
- * TODO: When MC command completion interrupts are supported
- * call wait function here instead of usleep_range()
- */
- usleep_range(MC_CMD_COMPLETION_POLLING_MIN_SLEEP_USECS,
- MC_CMD_COMPLETION_POLLING_MAX_SLEEP_USECS);
-
- if (time_after_eq(jiffies, jiffies_until_timeout)) {
- pr_debug("MC command timed out (portal: %#llx, obj handle: %#x, command: %#x)\n",
- mc_io->portal_phys_addr,
- (unsigned int)
- MC_CMD_HDR_READ_TOKEN(cmd->header),
- (unsigned int)
- MC_CMD_HDR_READ_CMDID(cmd->header));
-
- return -ETIMEDOUT;
- }
- }
-
- if (status != MC_CMD_STATUS_OK) {
- pr_debug("MC command failed: portal: %#llx, obj handle: %#x, command: %#x, status: %s (%#x)\n",
- mc_io->portal_phys_addr,
- (unsigned int)MC_CMD_HDR_READ_TOKEN(cmd->header),
- (unsigned int)MC_CMD_HDR_READ_CMDID(cmd->header),
- mc_status_to_string(status),
- (unsigned int)status);
-
- return mc_status_to_error(status);
- }
-
- return 0;
-}
-EXPORT_SYMBOL(mc_send_command);
diff --git a/drivers/staging/fsl-mc/include/dpbp-cmd.h b/drivers/staging/fsl-mc/include/dpbp-cmd.h
deleted file mode 100644
index efa9bf33c1a5..000000000000
--- a/drivers/staging/fsl-mc/include/dpbp-cmd.h
+++ /dev/null
@@ -1,60 +0,0 @@
-/* Copyright 2013-2014 Freescale Semiconductor Inc.
-*
-* Redistribution and use in source and binary forms, with or without
-* modification, are permitted provided that the following conditions are met:
-* * Redistributions of source code must retain the above copyright
-* notice, this list of conditions and the following disclaimer.
-* * Redistributions in binary form must reproduce the above copyright
-* notice, this list of conditions and the following disclaimer in the
-* documentation and/or other materials provided with the distribution.
-* * Neither the name of the above-listed copyright holders nor the
-* names of any contributors may be used to endorse or promote products
-* derived from this software without specific prior written permission.
-*
-*
-* ALTERNATIVELY, this software may be distributed under the terms of the
-* GNU General Public License ("GPL") as published by the Free Software
-* Foundation, either version 2 of that License or (at your option) any
-* later version.
-*
-* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
-* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-* POSSIBILITY OF SUCH DAMAGE.
-*/
-#ifndef _FSL_DPBP_CMD_H
-#define _FSL_DPBP_CMD_H
-
-/* DPBP Version */
-#define DPBP_VER_MAJOR 2
-#define DPBP_VER_MINOR 1
-
-/* Command IDs */
-#define DPBP_CMDID_CLOSE 0x800
-#define DPBP_CMDID_OPEN 0x804
-#define DPBP_CMDID_CREATE 0x904
-#define DPBP_CMDID_DESTROY 0x900
-
-#define DPBP_CMDID_ENABLE 0x002
-#define DPBP_CMDID_DISABLE 0x003
-#define DPBP_CMDID_GET_ATTR 0x004
-#define DPBP_CMDID_RESET 0x005
-#define DPBP_CMDID_IS_ENABLED 0x006
-
-#define DPBP_CMDID_SET_IRQ 0x010
-#define DPBP_CMDID_GET_IRQ 0x011
-#define DPBP_CMDID_SET_IRQ_ENABLE 0x012
-#define DPBP_CMDID_GET_IRQ_ENABLE 0x013
-#define DPBP_CMDID_SET_IRQ_MASK 0x014
-#define DPBP_CMDID_GET_IRQ_MASK 0x015
-#define DPBP_CMDID_GET_IRQ_STATUS 0x016
-#define DPBP_CMDID_CLEAR_IRQ_STATUS 0x017
-
-#endif /* _FSL_DPBP_CMD_H */
diff --git a/drivers/staging/fsl-mc/include/dpbp.h b/drivers/staging/fsl-mc/include/dpbp.h
deleted file mode 100644
index 37ed951436d5..000000000000
--- a/drivers/staging/fsl-mc/include/dpbp.h
+++ /dev/null
@@ -1,173 +0,0 @@
-/* Copyright 2013-2015 Freescale Semiconductor Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of the above-listed copyright holders nor the
- * names of any contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-#ifndef __FSL_DPBP_H
-#define __FSL_DPBP_H
-
-/* Data Path Buffer Pool API
- * Contains initialization APIs and runtime control APIs for DPBP
- */
-
-struct fsl_mc_io;
-
-int dpbp_open(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- int dpbp_id,
- u16 *token);
-
-int dpbp_close(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token);
-
-/**
- * struct dpbp_cfg - Structure representing DPBP configuration
- * @options: place holder
- */
-struct dpbp_cfg {
- u32 options;
-};
-
-int dpbp_create(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- const struct dpbp_cfg *cfg,
- u16 *token);
-
-int dpbp_destroy(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token);
-
-int dpbp_enable(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token);
-
-int dpbp_disable(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token);
-
-int dpbp_is_enabled(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- int *en);
-
-int dpbp_reset(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token);
-
-/**
- * struct dpbp_irq_cfg - IRQ configuration
- * @addr: Address that must be written to signal a message-based interrupt
- * @val: Value to write into irq_addr address
- * @user_irq_id: A user defined number associated with this IRQ
- */
-struct dpbp_irq_cfg {
- u64 addr;
- u32 val;
- int user_irq_id;
-};
-
-int dpbp_set_irq(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- u8 irq_index,
- struct dpbp_irq_cfg *irq_cfg);
-
-int dpbp_get_irq(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- u8 irq_index,
- int *type,
- struct dpbp_irq_cfg *irq_cfg);
-
-int dpbp_set_irq_enable(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- u8 irq_index,
- u8 en);
-
-int dpbp_get_irq_enable(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- u8 irq_index,
- u8 *en);
-
-int dpbp_set_irq_mask(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- u8 irq_index,
- u32 mask);
-
-int dpbp_get_irq_mask(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- u8 irq_index,
- u32 *mask);
-
-int dpbp_get_irq_status(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- u8 irq_index,
- u32 *status);
-
-int dpbp_clear_irq_status(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- u8 irq_index,
- u32 status);
-
-/**
- * struct dpbp_attr - Structure representing DPBP attributes
- * @id: DPBP object ID
- * @version: DPBP version
- * @bpid: Hardware buffer pool ID; should be used as an argument in
- * acquire/release operations on buffers
- */
-struct dpbp_attr {
- int id;
- /**
- * struct version - Structure representing DPBP version
- * @major: DPBP major version
- * @minor: DPBP minor version
- */
- struct {
- u16 major;
- u16 minor;
- } version;
- u16 bpid;
-};
-
-int dpbp_get_attributes(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- struct dpbp_attr *attr);
-
-/** @} */
-
-#endif /* __FSL_DPBP_H */
diff --git a/drivers/staging/fsl-mc/include/dpcon-cmd.h b/drivers/staging/fsl-mc/include/dpcon-cmd.h
deleted file mode 100644
index 261702496ca8..000000000000
--- a/drivers/staging/fsl-mc/include/dpcon-cmd.h
+++ /dev/null
@@ -1,62 +0,0 @@
-/* Copyright 2013-2015 Freescale Semiconductor Inc.
-*
-* Redistribution and use in source and binary forms, with or without
-* modification, are permitted provided that the following conditions are met:
-* * Redistributions of source code must retain the above copyright
-* notice, this list of conditions and the following disclaimer.
-* * Redistributions in binary form must reproduce the above copyright
-* notice, this list of conditions and the following disclaimer in the
-* documentation and/or other materials provided with the distribution.
-* * Neither the name of the above-listed copyright holders nor the
-* names of any contributors may be used to endorse or promote products
-* derived from this software without specific prior written permission.
-*
-*
-* ALTERNATIVELY, this software may be distributed under the terms of the
-* GNU General Public License ("GPL") as published by the Free Software
-* Foundation, either version 2 of that License or (at your option) any
-* later version.
-*
-* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
-* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-* POSSIBILITY OF SUCH DAMAGE.
-*/
-#ifndef _FSL_DPCON_CMD_H
-#define _FSL_DPCON_CMD_H
-
-/* DPCON Version */
-#define DPCON_VER_MAJOR 2
-#define DPCON_VER_MINOR 1
-
-/* Command IDs */
-#define DPCON_CMDID_CLOSE 0x800
-#define DPCON_CMDID_OPEN 0x808
-#define DPCON_CMDID_CREATE 0x908
-#define DPCON_CMDID_DESTROY 0x900
-
-#define DPCON_CMDID_ENABLE 0x002
-#define DPCON_CMDID_DISABLE 0x003
-#define DPCON_CMDID_GET_ATTR 0x004
-#define DPCON_CMDID_RESET 0x005
-#define DPCON_CMDID_IS_ENABLED 0x006
-
-#define DPCON_CMDID_SET_IRQ 0x010
-#define DPCON_CMDID_GET_IRQ 0x011
-#define DPCON_CMDID_SET_IRQ_ENABLE 0x012
-#define DPCON_CMDID_GET_IRQ_ENABLE 0x013
-#define DPCON_CMDID_SET_IRQ_MASK 0x014
-#define DPCON_CMDID_GET_IRQ_MASK 0x015
-#define DPCON_CMDID_GET_IRQ_STATUS 0x016
-#define DPCON_CMDID_CLEAR_IRQ_STATUS 0x017
-
-#define DPCON_CMDID_SET_NOTIFICATION 0x100
-
-#endif /* _FSL_DPCON_CMD_H */
diff --git a/drivers/staging/fsl-mc/include/dpmng.h b/drivers/staging/fsl-mc/include/dpmng.h
deleted file mode 100644
index e5cfd017f9a5..000000000000
--- a/drivers/staging/fsl-mc/include/dpmng.h
+++ /dev/null
@@ -1,69 +0,0 @@
-/* Copyright 2013-2015 Freescale Semiconductor Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of the above-listed copyright holders nor the
- * names of any contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-#ifndef __FSL_DPMNG_H
-#define __FSL_DPMNG_H
-
-/* Management Complex General API
- * Contains general API for the Management Complex firmware
- */
-
-struct fsl_mc_io;
-
-/**
- * Management Complex firmware version information
- */
-#define MC_VER_MAJOR 8
-#define MC_VER_MINOR 0
-
-/**
- * struct mc_version
- * @major: Major version number: incremented on API compatibility changes
- * @minor: Minor version number: incremented on API additions (that are
- * backward compatible); reset when major version is incremented
- * @revision: Internal revision number: incremented on implementation changes
- * and/or bug fixes that have no impact on API
- */
-struct mc_version {
- u32 major;
- u32 minor;
- u32 revision;
-};
-
-int mc_get_version(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- struct mc_version *mc_ver_info);
-
-int dpmng_get_container_id(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- int *container_id);
-
-#endif /* __FSL_DPMNG_H */
diff --git a/drivers/staging/fsl-mc/include/dprc.h b/drivers/staging/fsl-mc/include/dprc.h
deleted file mode 100644
index c3152f677ff1..000000000000
--- a/drivers/staging/fsl-mc/include/dprc.h
+++ /dev/null
@@ -1,539 +0,0 @@
-/* Copyright 2013-2015 Freescale Semiconductor Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of the above-listed copyright holders nor the
- * names of any contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-#ifndef _FSL_DPRC_H
-#define _FSL_DPRC_H
-
-#include "mc-cmd.h"
-
-/* Data Path Resource Container API
- * Contains DPRC API for managing and querying DPAA resources
- */
-
-struct fsl_mc_io;
-
-/**
- * Set this value as the icid value in dprc_cfg structure when creating a
- * container, in case the ICID is not selected by the user and should be
- * allocated by the DPRC from the pool of ICIDs.
- */
-#define DPRC_GET_ICID_FROM_POOL (u16)(~(0))
-
-/**
- * Set this value as the portal_id value in dprc_cfg structure when creating a
- * container, in case the portal ID is not specifically selected by the
- * user and should be allocated by the DPRC from the pool of portal ids.
- */
-#define DPRC_GET_PORTAL_ID_FROM_POOL (int)(~(0))
-
-int dprc_open(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- int container_id,
- u16 *token);
-
-int dprc_close(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token);
-
-/**
- * Container general options
- *
- * These options may be selected at container creation by the container creator
- * and can be retrieved using dprc_get_attributes()
- */
-
-/* Spawn Policy Option allowed - Indicates that the new container is allowed
- * to spawn and have its own child containers.
- */
-#define DPRC_CFG_OPT_SPAWN_ALLOWED 0x00000001
-
-/* General Container allocation policy - Indicates that the new container is
- * allowed to allocate requested resources from its parent container; if not
- * set, the container is only allowed to use resources in its own pools; Note
- * that this is a container's global policy, but the parent container may
- * override it and set specific quota per resource type.
- */
-#define DPRC_CFG_OPT_ALLOC_ALLOWED 0x00000002
-
-/* Object initialization allowed - software context associated with this
- * container is allowed to invoke object initialization operations.
- */
-#define DPRC_CFG_OPT_OBJ_CREATE_ALLOWED 0x00000004
-
-/* Topology change allowed - software context associated with this
- * container is allowed to invoke topology operations, such as attach/detach
- * of network objects.
- */
-#define DPRC_CFG_OPT_TOPOLOGY_CHANGES_ALLOWED 0x00000008
-
-/* IOMMU bypass - indicates whether objects of this container are permitted
- * to bypass the IOMMU.
- */
-#define DPRC_CFG_OPT_IOMMU_BYPASS 0x00000010
-
-/* AIOP - Indicates that container belongs to AIOP. */
-#define DPRC_CFG_OPT_AIOP 0x00000020
-
-/* IRQ Config - Indicates that the container allowed to configure its IRQs. */
-#define DPRC_CFG_OPT_IRQ_CFG_ALLOWED 0x00000040
-
-/**
- * struct dprc_cfg - Container configuration options
- * @icid: Container's ICID; if set to 'DPRC_GET_ICID_FROM_POOL', a free
- * ICID value is allocated by the DPRC
- * @portal_id: Portal ID; if set to 'DPRC_GET_PORTAL_ID_FROM_POOL', a free
- * portal ID is allocated by the DPRC
- * @options: Combination of 'DPRC_CFG_OPT_<X>' options
- * @label: Object's label
- */
-struct dprc_cfg {
- u16 icid;
- int portal_id;
- u64 options;
- char label[16];
-};
-
-int dprc_create_container(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- struct dprc_cfg *cfg,
- int *child_container_id,
- u64 *child_portal_offset);
-
-int dprc_destroy_container(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- int child_container_id);
-
-int dprc_reset_container(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- int child_container_id);
-
-/* IRQ */
-
-/* IRQ index */
-#define DPRC_IRQ_INDEX 0
-
-/* Number of dprc's IRQs */
-#define DPRC_NUM_OF_IRQS 1
-
-/* DPRC IRQ events */
-
-/* IRQ event - Indicates that a new object added to the container */
-#define DPRC_IRQ_EVENT_OBJ_ADDED 0x00000001
-/* IRQ event - Indicates that an object was removed from the container */
-#define DPRC_IRQ_EVENT_OBJ_REMOVED 0x00000002
-/* IRQ event - Indicates that resources added to the container */
-#define DPRC_IRQ_EVENT_RES_ADDED 0x00000004
-/* IRQ event - Indicates that resources removed from the container */
-#define DPRC_IRQ_EVENT_RES_REMOVED 0x00000008
-/* IRQ event - Indicates that one of the descendant containers that opened by
- * this container is destroyed
- */
-#define DPRC_IRQ_EVENT_CONTAINER_DESTROYED 0x00000010
-
-/* IRQ event - Indicates that on one of the container's opened object is
- * destroyed
- */
-#define DPRC_IRQ_EVENT_OBJ_DESTROYED 0x00000020
-
-/* Irq event - Indicates that object is created at the container */
-#define DPRC_IRQ_EVENT_OBJ_CREATED 0x00000040
-
-/**
- * struct dprc_irq_cfg - IRQ configuration
- * @paddr: Address that must be written to signal a message-based interrupt
- * @val: Value to write into irq_addr address
- * @user_irq_id: A user defined number associated with this IRQ
- */
-struct dprc_irq_cfg {
- u64 paddr;
- u32 val;
- int user_irq_id;
-};
-
-int dprc_set_irq(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- u8 irq_index,
- struct dprc_irq_cfg *irq_cfg);
-
-int dprc_get_irq(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- u8 irq_index,
- int *type,
- struct dprc_irq_cfg *irq_cfg);
-
-int dprc_set_irq_enable(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- u8 irq_index,
- u8 en);
-
-int dprc_get_irq_enable(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- u8 irq_index,
- u8 *en);
-
-int dprc_set_irq_mask(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- u8 irq_index,
- u32 mask);
-
-int dprc_get_irq_mask(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- u8 irq_index,
- u32 *mask);
-
-int dprc_get_irq_status(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- u8 irq_index,
- u32 *status);
-
-int dprc_clear_irq_status(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- u8 irq_index,
- u32 status);
-
-/**
- * struct dprc_attributes - Container attributes
- * @container_id: Container's ID
- * @icid: Container's ICID
- * @portal_id: Container's portal ID
- * @options: Container's options as set at container's creation
- * @version: DPRC version
- */
-struct dprc_attributes {
- int container_id;
- u16 icid;
- int portal_id;
- u64 options;
- /**
- * struct version - DPRC version
- * @major: DPRC major version
- * @minor: DPRC minor version
- */
- struct {
- u16 major;
- u16 minor;
- } version;
-};
-
-int dprc_get_attributes(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- struct dprc_attributes *attributes);
-
-int dprc_set_res_quota(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- int child_container_id,
- char *type,
- u16 quota);
-
-int dprc_get_res_quota(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- int child_container_id,
- char *type,
- u16 *quota);
-
-/* Resource request options */
-
-/* Explicit resource ID request - The requested objects/resources
- * are explicit and sequential (in case of resources).
- * The base ID is given at res_req at base_align field
- */
-#define DPRC_RES_REQ_OPT_EXPLICIT 0x00000001
-
-/* Aligned resources request - Relevant only for resources
- * request (and not objects). Indicates that resources base ID should be
- * sequential and aligned to the value given at dprc_res_req base_align field
- */
-#define DPRC_RES_REQ_OPT_ALIGNED 0x00000002
-
-/* Plugged Flag - Relevant only for object assignment request.
- * Indicates that after all objects assigned. An interrupt will be invoked at
- * the relevant GPP. The assigned object will be marked as plugged.
- * plugged objects can't be assigned from their container
- */
-#define DPRC_RES_REQ_OPT_PLUGGED 0x00000004
-
-/**
- * struct dprc_res_req - Resource request descriptor, to be used in assignment
- * or un-assignment of resources and objects.
- * @type: Resource/object type: Represent as a NULL terminated string.
- * This string may received by using dprc_get_pool() to get resource
- * type and dprc_get_obj() to get object type;
- * Note: it is not possible to assign/un-assign DPRC objects
- * @num: Number of resources
- * @options: Request options: combination of DPRC_RES_REQ_OPT_ options
- * @id_base_align: In case of explicit assignment (DPRC_RES_REQ_OPT_EXPLICIT
- * is set at option), this field represents the required base ID
- * for resource allocation; In case of aligned assignment
- * (DPRC_RES_REQ_OPT_ALIGNED is set at option), this field
- * indicates the required alignment for the resource ID(s) -
- * use 0 if there is no alignment or explicit ID requirements
- */
-struct dprc_res_req {
- char type[16];
- u32 num;
- u32 options;
- int id_base_align;
-};
-
-int dprc_assign(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- int container_id,
- struct dprc_res_req *res_req);
-
-int dprc_unassign(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- int child_container_id,
- struct dprc_res_req *res_req);
-
-int dprc_get_pool_count(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- int *pool_count);
-
-int dprc_get_pool(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- int pool_index,
- char *type);
-
-int dprc_get_obj_count(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- int *obj_count);
-
-/* Objects Attributes Flags */
-
-/* Opened state - Indicates that an object is open by at least one owner */
-#define DPRC_OBJ_STATE_OPEN 0x00000001
-/* Plugged state - Indicates that the object is plugged */
-#define DPRC_OBJ_STATE_PLUGGED 0x00000002
-
-/**
- * struct dprc_obj_desc - Object descriptor, returned from dprc_get_obj()
- * @type: Type of object: NULL terminated string
- * @id: ID of logical object resource
- * @vendor: Object vendor identifier
- * @ver_major: Major version number
- * @ver_minor: Minor version number
- * @irq_count: Number of interrupts supported by the object
- * @region_count: Number of mappable regions supported by the object
- * @state: Object state: combination of DPRC_OBJ_STATE_ states
- * @label: Object label
- */
-struct dprc_obj_desc {
- char type[16];
- int id;
- u16 vendor;
- u16 ver_major;
- u16 ver_minor;
- u8 irq_count;
- u8 region_count;
- u32 state;
- char label[16];
-};
-
-int dprc_get_obj(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- int obj_index,
- struct dprc_obj_desc *obj_desc);
-
-int dprc_get_obj_desc(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- char *obj_type,
- int obj_id,
- struct dprc_obj_desc *obj_desc);
-
-int dprc_set_obj_irq(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- char *obj_type,
- int obj_id,
- u8 irq_index,
- struct dprc_irq_cfg *irq_cfg);
-
-int dprc_get_obj_irq(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- char *obj_type,
- int obj_id,
- u8 irq_index,
- int *type,
- struct dprc_irq_cfg *irq_cfg);
-
-int dprc_get_res_count(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- char *type,
- int *res_count);
-
-/**
- * enum dprc_iter_status - Iteration status
- * @DPRC_ITER_STATUS_FIRST: Perform first iteration
- * @DPRC_ITER_STATUS_MORE: Indicates more/next iteration is needed
- * @DPRC_ITER_STATUS_LAST: Indicates last iteration
- */
-enum dprc_iter_status {
- DPRC_ITER_STATUS_FIRST = 0,
- DPRC_ITER_STATUS_MORE = 1,
- DPRC_ITER_STATUS_LAST = 2
-};
-
-/**
- * struct dprc_res_ids_range_desc - Resource ID range descriptor
- * @base_id: Base resource ID of this range
- * @last_id: Last resource ID of this range
- * @iter_status: Iteration status - should be set to DPRC_ITER_STATUS_FIRST at
- * first iteration; while the returned marker is DPRC_ITER_STATUS_MORE,
- * additional iterations are needed, until the returned marker is
- * DPRC_ITER_STATUS_LAST
- */
-struct dprc_res_ids_range_desc {
- int base_id;
- int last_id;
- enum dprc_iter_status iter_status;
-};
-
-int dprc_get_res_ids(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- char *type,
- struct dprc_res_ids_range_desc *range_desc);
-
-/* Region flags */
-/* Cacheable - Indicates that region should be mapped as cacheable */
-#define DPRC_REGION_CACHEABLE 0x00000001
-
-/**
- * enum dprc_region_type - Region type
- * @DPRC_REGION_TYPE_MC_PORTAL: MC portal region
- * @DPRC_REGION_TYPE_QBMAN_PORTAL: Qbman portal region
- */
-enum dprc_region_type {
- DPRC_REGION_TYPE_MC_PORTAL,
- DPRC_REGION_TYPE_QBMAN_PORTAL
-};
-
-/**
- * struct dprc_region_desc - Mappable region descriptor
- * @base_offset: Region offset from region's base address.
- * For DPMCP and DPRC objects, region base is offset from SoC MC portals
- * base address; For DPIO, region base is offset from SoC QMan portals
- * base address
- * @size: Region size (in bytes)
- * @flags: Region attributes
- * @type: Portal region type
- */
-struct dprc_region_desc {
- u32 base_offset;
- u32 size;
- u32 flags;
- enum dprc_region_type type;
-};
-
-int dprc_get_obj_region(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- char *obj_type,
- int obj_id,
- u8 region_index,
- struct dprc_region_desc *region_desc);
-
-int dprc_set_obj_label(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- char *obj_type,
- int obj_id,
- char *label);
-
-/**
- * struct dprc_endpoint - Endpoint description for link connect/disconnect
- * operations
- * @type: Endpoint object type: NULL terminated string
- * @id: Endpoint object ID
- * @if_id: Interface ID; should be set for endpoints with multiple
- * interfaces ("dpsw", "dpdmux"); for others, always set to 0
- */
-struct dprc_endpoint {
- char type[16];
- int id;
- int if_id;
-};
-
-/**
- * struct dprc_connection_cfg - Connection configuration.
- * Used for virtual connections only
- * @committed_rate: Committed rate (Mbits/s)
- * @max_rate: Maximum rate (Mbits/s)
- */
-struct dprc_connection_cfg {
- u32 committed_rate;
- u32 max_rate;
-};
-
-int dprc_connect(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- const struct dprc_endpoint *endpoint1,
- const struct dprc_endpoint *endpoint2,
- const struct dprc_connection_cfg *cfg);
-
-int dprc_disconnect(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- const struct dprc_endpoint *endpoint);
-
-int dprc_get_connection(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- const struct dprc_endpoint *endpoint1,
- struct dprc_endpoint *endpoint2,
- int *state);
-
-#endif /* _FSL_DPRC_H */
-
diff --git a/drivers/staging/fsl-mc/include/mc-cmd.h b/drivers/staging/fsl-mc/include/mc-cmd.h
deleted file mode 100644
index 65277e3de44d..000000000000
--- a/drivers/staging/fsl-mc/include/mc-cmd.h
+++ /dev/null
@@ -1,131 +0,0 @@
-/* Copyright 2013-2015 Freescale Semiconductor Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of the above-listed copyright holders nor the
- * names of any contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-#ifndef __FSL_MC_CMD_H
-#define __FSL_MC_CMD_H
-
-#define MC_CMD_NUM_OF_PARAMS 7
-
-#define MAKE_UMASK64(_width) \
- ((u64)((_width) < 64 ? ((u64)1 << (_width)) - 1 : -1))
-
-static inline u64 mc_enc(int lsoffset, int width, u64 val)
-{
- return (u64)(((u64)val & MAKE_UMASK64(width)) << lsoffset);
-}
-
-static inline u64 mc_dec(u64 val, int lsoffset, int width)
-{
- return (u64)((val >> lsoffset) & MAKE_UMASK64(width));
-}
-
-struct mc_command {
- u64 header;
- u64 params[MC_CMD_NUM_OF_PARAMS];
-};
-
-enum mc_cmd_status {
- MC_CMD_STATUS_OK = 0x0, /* Completed successfully */
- MC_CMD_STATUS_READY = 0x1, /* Ready to be processed */
- MC_CMD_STATUS_AUTH_ERR = 0x3, /* Authentication error */
- MC_CMD_STATUS_NO_PRIVILEGE = 0x4, /* No privilege */
- MC_CMD_STATUS_DMA_ERR = 0x5, /* DMA or I/O error */
- MC_CMD_STATUS_CONFIG_ERR = 0x6, /* Configuration error */
- MC_CMD_STATUS_TIMEOUT = 0x7, /* Operation timed out */
- MC_CMD_STATUS_NO_RESOURCE = 0x8, /* No resources */
- MC_CMD_STATUS_NO_MEMORY = 0x9, /* No memory available */
- MC_CMD_STATUS_BUSY = 0xA, /* Device is busy */
- MC_CMD_STATUS_UNSUPPORTED_OP = 0xB, /* Unsupported operation */
- MC_CMD_STATUS_INVALID_STATE = 0xC /* Invalid state */
-};
-
-/*
- * MC command flags
- */
-
-/* High priority flag */
-#define MC_CMD_FLAG_PRI 0x00008000
-/* Command completion flag */
-#define MC_CMD_FLAG_INTR_DIS 0x01000000
-
-/*
- * TODO Remove following two defines after completion of flib 8.0.0
- * integration
- */
-#define MC_CMD_PRI_LOW 0 /*!< Low Priority command indication */
-#define MC_CMD_PRI_HIGH 1 /*!< High Priority command indication */
-
-#define MC_CMD_HDR_CMDID_O 52 /* Command ID field offset */
-#define MC_CMD_HDR_CMDID_S 12 /* Command ID field size */
-#define MC_CMD_HDR_TOKEN_O 38 /* Token field offset */
-#define MC_CMD_HDR_TOKEN_S 10 /* Token field size */
-#define MC_CMD_HDR_STATUS_O 16 /* Status field offset */
-#define MC_CMD_HDR_STATUS_S 8 /* Status field size*/
-#define MC_CMD_HDR_FLAGS_O 0 /* Flags field offset */
-#define MC_CMD_HDR_FLAGS_S 32 /* Flags field size*/
-#define MC_CMD_HDR_FLAGS_MASK 0xFF00FF00 /* Command flags mask */
-
-#define MC_CMD_HDR_READ_STATUS(_hdr) \
- ((enum mc_cmd_status)mc_dec((_hdr), \
- MC_CMD_HDR_STATUS_O, MC_CMD_HDR_STATUS_S))
-
-#define MC_CMD_HDR_READ_TOKEN(_hdr) \
- ((u16)mc_dec((_hdr), MC_CMD_HDR_TOKEN_O, MC_CMD_HDR_TOKEN_S))
-
-#define MC_CMD_HDR_READ_FLAGS(_hdr) \
- ((u32)mc_dec((_hdr), MC_CMD_HDR_FLAGS_O, MC_CMD_HDR_FLAGS_S))
-
-#define MC_EXT_OP(_ext, _param, _offset, _width, _type, _arg) \
- ((_ext)[_param] |= mc_enc((_offset), (_width), _arg))
-
-#define MC_CMD_OP(_cmd, _param, _offset, _width, _type, _arg) \
- ((_cmd).params[_param] |= mc_enc((_offset), (_width), _arg))
-
-#define MC_RSP_OP(_cmd, _param, _offset, _width, _type, _arg) \
- (_arg = (_type)mc_dec(_cmd.params[_param], (_offset), (_width)))
-
-static inline u64 mc_encode_cmd_header(u16 cmd_id,
- u32 cmd_flags,
- u16 token)
-{
- u64 hdr;
-
- hdr = mc_enc(MC_CMD_HDR_CMDID_O, MC_CMD_HDR_CMDID_S, cmd_id);
- hdr |= mc_enc(MC_CMD_HDR_FLAGS_O, MC_CMD_HDR_FLAGS_S,
- (cmd_flags & MC_CMD_HDR_FLAGS_MASK));
- hdr |= mc_enc(MC_CMD_HDR_TOKEN_O, MC_CMD_HDR_TOKEN_S, token);
- hdr |= mc_enc(MC_CMD_HDR_STATUS_O, MC_CMD_HDR_STATUS_S,
- MC_CMD_STATUS_READY);
-
- return hdr;
-}
-
-#endif /* __FSL_MC_CMD_H */
diff --git a/drivers/staging/fsl-mc/include/mc-private.h b/drivers/staging/fsl-mc/include/mc-private.h
deleted file mode 100644
index 2c4cc794be7a..000000000000
--- a/drivers/staging/fsl-mc/include/mc-private.h
+++ /dev/null
@@ -1,119 +0,0 @@
-/*
- * Freescale Management Complex (MC) bus private declarations
- *
- * Copyright (C) 2014 Freescale Semiconductor, Inc.
- * Author: German Rivera <German.Rivera@freescale.com>
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
- */
-#ifndef _FSL_MC_PRIVATE_H_
-#define _FSL_MC_PRIVATE_H_
-
-#include "../include/mc.h"
-#include <linux/mutex.h>
-#include <linux/stringify.h>
-
-#define FSL_MC_DPRC_DRIVER_NAME "fsl_mc_dprc"
-
-#define FSL_MC_DEVICE_MATCH(_mc_dev, _obj_desc) \
- (strcmp((_mc_dev)->obj_desc.type, (_obj_desc)->type) == 0 && \
- (_mc_dev)->obj_desc.id == (_obj_desc)->id)
-
-#define FSL_MC_IS_ALLOCATABLE(_obj_type) \
- (strcmp(_obj_type, "dpbp") == 0 || \
- strcmp(_obj_type, "dpmcp") == 0 || \
- strcmp(_obj_type, "dpcon") == 0)
-
-/**
- * struct fsl_mc - Private data of a "fsl,qoriq-mc" platform device
- * @root_mc_bus_dev: MC object device representing the root DPRC
- * @num_translation_ranges: number of entries in addr_translation_ranges
- * @translation_ranges: array of bus to system address translation ranges
- */
-struct fsl_mc {
- struct fsl_mc_device *root_mc_bus_dev;
- u8 num_translation_ranges;
- struct fsl_mc_addr_translation_range *translation_ranges;
-};
-
-/**
- * struct fsl_mc_addr_translation_range - bus to system address translation
- * range
- * @mc_region_type: Type of MC region for the range being translated
- * @start_mc_offset: Start MC offset of the range being translated
- * @end_mc_offset: MC offset of the first byte after the range (last MC
- * offset of the range is end_mc_offset - 1)
- * @start_phys_addr: system physical address corresponding to start_mc_addr
- */
-struct fsl_mc_addr_translation_range {
- enum dprc_region_type mc_region_type;
- u64 start_mc_offset;
- u64 end_mc_offset;
- phys_addr_t start_phys_addr;
-};
-
-/**
- * struct fsl_mc_resource_pool - Pool of MC resources of a given
- * type
- * @type: type of resources in the pool
- * @max_count: maximum number of resources in the pool
- * @free_count: number of free resources in the pool
- * @mutex: mutex to serialize access to the pool's free list
- * @free_list: anchor node of list of free resources in the pool
- * @mc_bus: pointer to the MC bus that owns this resource pool
- */
-struct fsl_mc_resource_pool {
- enum fsl_mc_pool_type type;
- int16_t max_count;
- int16_t free_count;
- struct mutex mutex; /* serializes access to free_list */
- struct list_head free_list;
- struct fsl_mc_bus *mc_bus;
-};
-
-/**
- * struct fsl_mc_bus - logical bus that corresponds to a physical DPRC
- * @mc_dev: fsl-mc device for the bus device itself.
- * @resource_pools: array of resource pools (one pool per resource type)
- * for this MC bus. These resources represent allocatable entities
- * from the physical DPRC.
- * @scan_mutex: Serializes bus scanning
- */
-struct fsl_mc_bus {
- struct fsl_mc_device mc_dev;
- struct fsl_mc_resource_pool resource_pools[FSL_MC_NUM_POOL_TYPES];
- struct mutex scan_mutex; /* serializes bus scanning */
-};
-
-#define to_fsl_mc_bus(_mc_dev) \
- container_of(_mc_dev, struct fsl_mc_bus, mc_dev)
-
-int __must_check fsl_mc_device_add(struct dprc_obj_desc *obj_desc,
- struct fsl_mc_io *mc_io,
- struct device *parent_dev,
- struct fsl_mc_device **new_mc_dev);
-
-void fsl_mc_device_remove(struct fsl_mc_device *mc_dev);
-
-int dprc_scan_container(struct fsl_mc_device *mc_bus_dev);
-
-int dprc_scan_objects(struct fsl_mc_device *mc_bus_dev);
-
-int __init dprc_driver_init(void);
-
-void __exit dprc_driver_exit(void);
-
-int __init fsl_mc_allocator_driver_init(void);
-
-void __exit fsl_mc_allocator_driver_exit(void);
-
-int __must_check fsl_mc_resource_allocate(struct fsl_mc_bus *mc_bus,
- enum fsl_mc_pool_type pool_type,
- struct fsl_mc_resource
- **new_resource);
-
-void fsl_mc_resource_free(struct fsl_mc_resource *resource);
-
-#endif /* _FSL_MC_PRIVATE_H_ */
diff --git a/drivers/staging/fsl-mc/include/mc-sys.h b/drivers/staging/fsl-mc/include/mc-sys.h
deleted file mode 100644
index 939b7d39b365..000000000000
--- a/drivers/staging/fsl-mc/include/mc-sys.h
+++ /dev/null
@@ -1,76 +0,0 @@
-/* Copyright 2013-2014 Freescale Semiconductor Inc.
- *
- * Interface of the I/O services to send MC commands to the MC hardware
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of the above-listed copyright holders nor the
- * names of any contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef _FSL_MC_SYS_H
-#define _FSL_MC_SYS_H
-
-#include <linux/types.h>
-#include <linux/errno.h>
-#include <linux/io.h>
-#include <linux/dma-mapping.h>
-
-struct fsl_mc_resource;
-struct mc_command;
-
-/**
- * struct fsl_mc_io - MC I/O object to be passed-in to mc_send_command()
- * @dev: device associated with this Mc I/O object
- * @flags: flags for mc_send_command()
- * @portal_size: MC command portal size in bytes
- * @portal_phys_addr: MC command portal physical address
- * @portal_virt_addr: MC command portal virtual address
- * @resource: generic resource associated with the MC portal if
- * the MC portal came from a resource pool, or NULL if the MC portal
- * is permanently bound to a device (e.g., a DPRC)
- */
-struct fsl_mc_io {
- struct device *dev;
- u32 flags;
- u32 portal_size;
- phys_addr_t portal_phys_addr;
- void __iomem *portal_virt_addr;
- struct fsl_mc_resource *resource;
-};
-
-int __must_check fsl_create_mc_io(struct device *dev,
- phys_addr_t mc_portal_phys_addr,
- u32 mc_portal_size,
- struct fsl_mc_resource *resource,
- u32 flags, struct fsl_mc_io **new_mc_io);
-
-void fsl_destroy_mc_io(struct fsl_mc_io *mc_io);
-
-int mc_send_command(struct fsl_mc_io *mc_io, struct mc_command *cmd);
-
-#endif /* _FSL_MC_SYS_H */
diff --git a/drivers/staging/fsl-mc/include/mc.h b/drivers/staging/fsl-mc/include/mc.h
deleted file mode 100644
index 860bf264404f..000000000000
--- a/drivers/staging/fsl-mc/include/mc.h
+++ /dev/null
@@ -1,201 +0,0 @@
-/*
- * Freescale Management Complex (MC) bus public interface
- *
- * Copyright (C) 2014 Freescale Semiconductor, Inc.
- * Author: German Rivera <German.Rivera@freescale.com>
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
- */
-#ifndef _FSL_MC_H_
-#define _FSL_MC_H_
-
-#include <linux/device.h>
-#include <linux/mod_devicetable.h>
-#include <linux/list.h>
-#include "../include/dprc.h"
-
-#define FSL_MC_VENDOR_FREESCALE 0x1957
-
-struct fsl_mc_device;
-struct fsl_mc_io;
-
-/**
- * struct fsl_mc_driver - MC object device driver object
- * @driver: Generic device driver
- * @match_id_table: table of supported device matching Ids
- * @probe: Function called when a device is added
- * @remove: Function called when a device is removed
- * @shutdown: Function called at shutdown time to quiesce the device
- * @suspend: Function called when a device is stopped
- * @resume: Function called when a device is resumed
- *
- * Generic DPAA device driver object for device drivers that are registered
- * with a DPRC bus. This structure is to be embedded in each device-specific
- * driver structure.
- */
-struct fsl_mc_driver {
- struct device_driver driver;
- const struct fsl_mc_device_match_id *match_id_table;
- int (*probe)(struct fsl_mc_device *dev);
- int (*remove)(struct fsl_mc_device *dev);
- void (*shutdown)(struct fsl_mc_device *dev);
- int (*suspend)(struct fsl_mc_device *dev, pm_message_t state);
- int (*resume)(struct fsl_mc_device *dev);
-};
-
-#define to_fsl_mc_driver(_drv) \
- container_of(_drv, struct fsl_mc_driver, driver)
-
-/**
- * struct fsl_mc_device_match_id - MC object device Id entry for driver matching
- * @vendor: vendor ID
- * @obj_type: MC object type
- * @ver_major: MC object version major number
- * @ver_minor: MC object version minor number
- *
- * Type of entries in the "device Id" table for MC object devices supported by
- * a MC object device driver. The last entry of the table has vendor set to 0x0
- */
-struct fsl_mc_device_match_id {
- u16 vendor;
- const char obj_type[16];
- u32 ver_major;
- u32 ver_minor;
-};
-
-/**
- * enum fsl_mc_pool_type - Types of allocatable MC bus resources
- *
- * Entries in these enum are used as indices in the array of resource
- * pools of an fsl_mc_bus object.
- */
-enum fsl_mc_pool_type {
- FSL_MC_POOL_DPMCP = 0x0, /* corresponds to "dpmcp" in the MC */
- FSL_MC_POOL_DPBP, /* corresponds to "dpbp" in the MC */
- FSL_MC_POOL_DPCON, /* corresponds to "dpcon" in the MC */
-
- /*
- * NOTE: New resource pool types must be added before this entry
- */
- FSL_MC_NUM_POOL_TYPES
-};
-
-/**
- * struct fsl_mc_resource - MC generic resource
- * @type: type of resource
- * @id: unique MC resource Id within the resources of the same type
- * @data: pointer to resource-specific data if the resource is currently
- * allocated, or NULL if the resource is not currently allocated.
- * @parent_pool: pointer to the parent resource pool from which this
- * resource is allocated from.
- * @node: Node in the free list of the corresponding resource pool
- *
- * NOTE: This structure is to be embedded as a field of specific
- * MC resource structures.
- */
-struct fsl_mc_resource {
- enum fsl_mc_pool_type type;
- int32_t id;
- void *data;
- struct fsl_mc_resource_pool *parent_pool;
- struct list_head node;
-};
-
-/**
- * Bit masks for a MC object device (struct fsl_mc_device) flags
- */
-#define FSL_MC_IS_DPRC 0x0001
-
-/**
- * Default DMA mask for devices on a fsl-mc bus
- */
-#define FSL_MC_DEFAULT_DMA_MASK (~0ULL)
-
-/**
- * struct fsl_mc_device - MC object device object
- * @dev: Linux driver model device object
- * @dma_mask: Default DMA mask
- * @flags: MC object device flags
- * @icid: Isolation context ID for the device
- * @mc_handle: MC handle for the corresponding MC object opened
- * @mc_io: Pointer to MC IO object assigned to this device or
- * NULL if none.
- * @obj_desc: MC description of the DPAA device
- * @regions: pointer to array of MMIO region entries
- * @resource: generic resource associated with this MC object device, if any.
- *
- * Generic device object for MC object devices that are "attached" to a
- * MC bus.
- *
- * NOTES:
- * - For a non-DPRC object its icid is the same as its parent DPRC's icid.
- * - The SMMU notifier callback gets invoked after device_add() has been
- * called for an MC object device, but before the device-specific probe
- * callback gets called.
- * - DP_OBJ_DPRC objects are the only MC objects that have built-in MC
- * portals. For all other MC objects, their device drivers are responsible for
- * allocating MC portals for them by calling fsl_mc_portal_allocate().
- * - Some types of MC objects (e.g., DP_OBJ_DPBP, DP_OBJ_DPCON) are
- * treated as resources that can be allocated/deallocated from the
- * corresponding resource pool in the object's parent DPRC, using the
- * fsl_mc_object_allocate()/fsl_mc_object_free() functions. These MC objects
- * are known as "allocatable" objects. For them, the corresponding
- * fsl_mc_device's 'resource' points to the associated resource object.
- * For MC objects that are not allocatable (e.g., DP_OBJ_DPRC, DP_OBJ_DPNI),
- * 'resource' is NULL.
- */
-struct fsl_mc_device {
- struct device dev;
- u64 dma_mask;
- u16 flags;
- u16 icid;
- u16 mc_handle;
- struct fsl_mc_io *mc_io;
- struct dprc_obj_desc obj_desc;
- struct resource *regions;
- struct fsl_mc_resource *resource;
-};
-
-#define to_fsl_mc_device(_dev) \
- container_of(_dev, struct fsl_mc_device, dev)
-
-/*
- * module_fsl_mc_driver() - Helper macro for drivers that don't do
- * anything special in module init/exit. This eliminates a lot of
- * boilerplate. Each module may only use this macro once, and
- * calling it replaces module_init() and module_exit()
- */
-#define module_fsl_mc_driver(__fsl_mc_driver) \
- module_driver(__fsl_mc_driver, fsl_mc_driver_register, \
- fsl_mc_driver_unregister)
-
-/*
- * Macro to avoid include chaining to get THIS_MODULE
- */
-#define fsl_mc_driver_register(drv) \
- __fsl_mc_driver_register(drv, THIS_MODULE)
-
-int __must_check __fsl_mc_driver_register(struct fsl_mc_driver *fsl_mc_driver,
- struct module *owner);
-
-void fsl_mc_driver_unregister(struct fsl_mc_driver *driver);
-
-int __must_check fsl_mc_portal_allocate(struct fsl_mc_device *mc_dev,
- u16 mc_io_flags,
- struct fsl_mc_io **new_mc_io);
-
-void fsl_mc_portal_free(struct fsl_mc_io *mc_io);
-
-int fsl_mc_portal_reset(struct fsl_mc_io *mc_io);
-
-int __must_check fsl_mc_object_allocate(struct fsl_mc_device *mc_dev,
- enum fsl_mc_pool_type pool_type,
- struct fsl_mc_device **new_mc_adev);
-
-void fsl_mc_object_free(struct fsl_mc_device *mc_adev);
-
-extern struct bus_type fsl_mc_bus_type;
-
-#endif /* _FSL_MC_H_ */
diff --git a/drivers/staging/ft1000/Kconfig b/drivers/staging/ft1000/Kconfig
deleted file mode 100644
index c54b4e83d6e9..000000000000
--- a/drivers/staging/ft1000/Kconfig
+++ /dev/null
@@ -1,22 +0,0 @@
-config FT1000
- tristate "Drivers for Flarion ft1000 devices"
-
-if FT1000
-
-config FT1000_USB
- tristate "Driver for ft1000 usb devices."
- depends on USB
- depends on NET
- help
- Say Y if you want to have support for Qleadtek FLASH-OFDM USB Modem [LR7F04],
- Qleadtek Express Card or Leadtek Multi-band modem HSDPA.
-
-config FT1000_PCMCIA
- tristate "Driver for ft1000 pcmcia device."
- depends on PCMCIA
- depends on NET
- help
- Say Y if you want to have support for Flarion card also called
- Multimedia Net Card.
-
-endif
diff --git a/drivers/staging/ft1000/Makefile b/drivers/staging/ft1000/Makefile
deleted file mode 100644
index 3e987770a142..000000000000
--- a/drivers/staging/ft1000/Makefile
+++ /dev/null
@@ -1,3 +0,0 @@
-obj-$(CONFIG_FT1000_USB) += ft1000-usb/
-obj-$(CONFIG_FT1000_PCMCIA) += ft1000-pcmcia/
-
diff --git a/drivers/staging/ft1000/TODO b/drivers/staging/ft1000/TODO
deleted file mode 100644
index 1d346bc4f443..000000000000
--- a/drivers/staging/ft1000/TODO
+++ /dev/null
@@ -1,9 +0,0 @@
-TODO:
- - checkpatch.pl cleanups
- - coding style
- - sparse fixes
- - adapt to latest usb and pcmcia api changes
- - change firmware loading for usb driver to proper kernel method (request_firmware)
-
-Please send patches to Greg Kroah-Hartman <greg@kroah.com> and
-Cc: Marek Belisko <marek.belisko@gmail.com>
diff --git a/drivers/staging/ft1000/ft1000-pcmcia/Makefile b/drivers/staging/ft1000/ft1000-pcmcia/Makefile
deleted file mode 100644
index 715de3f00e33..000000000000
--- a/drivers/staging/ft1000/ft1000-pcmcia/Makefile
+++ /dev/null
@@ -1,2 +0,0 @@
-obj-$(CONFIG_FT1000_PCMCIA) = ft1000_pcmcia.o
-ft1000_pcmcia-y := ft1000_hw.o ft1000_dnld.o ft1000_cs.o
diff --git a/drivers/staging/ft1000/ft1000-pcmcia/boot.h b/drivers/staging/ft1000/ft1000-pcmcia/boot.h
deleted file mode 100644
index e4a698528520..000000000000
--- a/drivers/staging/ft1000/ft1000-pcmcia/boot.h
+++ /dev/null
@@ -1,158 +0,0 @@
-/*---------------------------------------------------------------------------
- FT1000 driver for Flarion Flash OFDM NIC Device
-
- Copyright (C) 2002 Flarion Technologies, All rights reserved.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms of the GNU General Public License as published by the Free
- Software Foundation; either version 2 of the License, or (at your option) any
- later version. This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
- or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details. You should have received a copy of the GNU General Public
- License along with this program; if not, write to the
- Free Software Foundation, Inc., 59 Temple Place -
- Suite 330, Boston, MA 02111-1307, USA.
- ---------------------------------------------------------------------------
-
- File: boot.h
-
- Description: boatloader
-
- History:
- 1/11/05 Whc Ported to Linux.
-
- ---------------------------------------------------------------------------*/
-#ifndef _BOOTH_
-#define _BOOTH_
-
-/* Official bootloader */
-static unsigned char bootimage[] = {
- 0x00, 0x00, 0x01, 0x5E, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x02, 0xD7,
- 0x00, 0x00, 0x01, 0x5E, 0x46, 0xB3,
- 0xE6, 0x02, 0x00, 0x98, 0xE6, 0x8C,
- 0x00, 0x98, 0xFB, 0x92, 0xFF, 0xFF,
- 0x98, 0xFB, 0x94, 0xFF, 0xFF, 0x98,
- 0xFB, 0x06, 0x08, 0x00, 0x98, 0xFB,
- 0x96, 0x84, 0x00, 0x98, 0xFB, 0x08,
- 0x1C, 0x00, 0x98, 0xFB, 0x51, 0x25,
- 0x10, 0x1C, 0x00, 0xE6, 0x51, 0x01,
- 0x07, 0xFD, 0x4C, 0xFF, 0x20, 0xF5,
- 0x51, 0x02, 0x20, 0x08, 0x00, 0x4C,
- 0xFF, 0x20, 0x3C, 0x00, 0xC0, 0x64,
- 0x98, 0xC0, 0x66, 0x98, 0xC0, 0x68,
- 0x98, 0xC0, 0x6A, 0x98, 0xC0, 0x6C,
- 0x98, 0x90, 0x08, 0x90, 0x09, 0x90,
- 0x0A, 0x90, 0x0B, 0x90, 0x0C, 0x90,
- 0x0D, 0x90, 0x0E, 0x90, 0x0F, 0x90,
- 0x04, 0x90, 0x06, 0xFB, 0x51, 0x22,
- 0x16, 0x08, 0x03, 0xFB, 0x51, 0x52,
- 0x16, 0x08, 0x04, 0xFB, 0x51, 0x24,
- 0x2B, 0x08, 0x06, 0xFB, 0x51, 0x54,
- 0x2B, 0x08, 0x07, 0xFB, 0x51, 0x24,
- 0x2B, 0x08, 0x09, 0xFB, 0x51, 0x54,
- 0x2B, 0x08, 0x0A, 0xFB, 0x51, 0x12,
- 0x16, 0x08, 0x0C, 0xFB, 0x51, 0x52,
- 0x16, 0x08, 0x0D, 0x78, 0x00, 0x00,
- 0x00, 0x16, 0x00, 0x00, 0xEC, 0x31,
- 0xAE, 0x00, 0x00, 0x81, 0x4C, 0x0F,
- 0xE6, 0x43, 0xFF, 0xEC, 0x31, 0x4E,
- 0x00, 0x00, 0x91, 0xEC, 0x31, 0xAE,
- 0x00, 0x00, 0x91, 0x4C, 0x0F, 0xE6,
- 0x43, 0xFF, 0xEC, 0x31, 0x5E, 0x00,
- 0x00, 0xA1, 0xEB, 0x31, 0x08, 0x00,
- 0x00, 0xA6, 0xEB, 0x31, 0x08, 0x00,
- 0x00, 0xAC, 0x3C, 0x00, 0xEB, 0x31,
- 0x08, 0x00, 0x00, 0xA8, 0x76, 0xFE,
- 0xFE, 0x08, 0xEB, 0x31, 0x08, 0x20,
- 0x00, 0x00, 0x76, 0xFF, 0xFF, 0x18,
- 0xED, 0x31, 0x08, 0x20, 0x00, 0x00,
- 0x26, 0x10, 0x04, 0x10, 0xF5, 0x3C,
- 0x01, 0x3C, 0x00, 0x08, 0x01, 0x12,
- 0x3C, 0x11, 0x3C, 0x00, 0x08, 0x01,
- 0x0B, 0x08, 0x00, 0x6D, 0xEC, 0x31,
- 0xAE, 0x20, 0x00, 0x06, 0xED, 0x4D,
- 0x08, 0x00, 0x00, 0x67, 0x80, 0x6F,
- 0x00, 0x01, 0x0B, 0x6F, 0x00, 0x02,
- 0x2E, 0x76, 0xEE, 0x01, 0x48, 0x06,
- 0x01, 0x39, 0xED, 0x4D, 0x18, 0x00,
- 0x02, 0xED, 0x4D, 0x08, 0x00, 0x04,
- 0x14, 0x06, 0xA4, 0xED, 0x31, 0x22,
- 0x00, 0x00, 0xAC, 0x76, 0xEE, 0x07,
- 0x48, 0x6D, 0x22, 0x01, 0x1E, 0x08,
- 0x01, 0x58, 0xEB, 0x31, 0x08, 0x00,
- 0x00, 0xAC, 0x06, 0xFF, 0xBA, 0x3C,
- 0x00, 0xEB, 0x31, 0x08, 0x20, 0x00,
- 0x04, 0x3C, 0x30, 0xEB, 0x31, 0x08,
- 0x20, 0x00, 0x02, 0x3C, 0x10, 0xEB,
- 0x31, 0x08, 0x20, 0x00, 0x00, 0xED,
- 0x31, 0x08, 0x20, 0x00, 0x00, 0x04,
- 0x10, 0xF7, 0xED, 0x31, 0x08, 0x00,
- 0x00, 0xA2, 0x91, 0x00, 0x9C, 0x3C,
- 0x80, 0xEB, 0x31, 0x08, 0x20, 0x00,
- 0x04, 0x3C, 0x20, 0xEB, 0x31, 0x08,
- 0x20, 0x00, 0x02, 0x3C, 0x10, 0xEB,
- 0x31, 0x08, 0x20, 0x00, 0x00, 0xED,
- 0x31, 0x08, 0x20, 0x00, 0x00, 0x04,
- 0x10, 0xF7, 0xED, 0x31, 0x08, 0x20,
- 0x00, 0x04, 0x42, 0x10, 0x90, 0x08,
- 0xEC, 0x31, 0xAE, 0x20, 0x00, 0x06,
- 0xA4, 0x41, 0x08, 0x00, 0xB6, 0xED,
- 0x41, 0x28, 0x7D, 0xFF, 0xFF, 0x22,
- 0xB3, 0x40, 0x98, 0x2A, 0x32, 0xEB,
- 0x41, 0x28, 0xB4, 0x43, 0xFC, 0x05,
- 0xFF, 0xE6, 0xA0, 0x31, 0x20, 0x00,
- 0x06, 0xEB, 0x31, 0x08, 0x20, 0x00,
- 0x04, 0x3C, 0x20, 0xEB, 0x31, 0x08,
- 0x20, 0x00, 0x02, 0x3C, 0x10, 0xEB,
- 0x31, 0x08, 0x20, 0x00, 0x00, 0xED,
- 0x31, 0x08, 0x20, 0x00, 0x00, 0x04,
- 0x10, 0xF7, 0xED, 0x31, 0x08, 0x20,
- 0x00, 0x04, 0x42, 0x10, 0x90, 0x08,
- 0xEC, 0x31, 0xAE, 0x20, 0x00, 0x06,
- 0xA4, 0x41, 0x08, 0x00, 0x68, 0xED,
- 0x41, 0x28, 0x7D, 0xFF, 0xFF, 0x22,
- 0xB3, 0x40, 0x98, 0x2A, 0x32, 0xEB,
- 0x41, 0x28, 0xB4, 0x43, 0xFC, 0x05,
- 0xFF, 0xE6, 0x48, 0x04, 0xEB, 0x31,
- 0x08, 0x20, 0x00, 0x04, 0xEB, 0x31,
- 0x18, 0x20, 0x00, 0x02, 0x3C, 0x11,
- 0xEB, 0x31, 0x18, 0x20, 0x00, 0x00,
- 0xED, 0x31, 0x08, 0x20, 0x00, 0x00,
- 0x04, 0x10, 0xF7, 0xED, 0x31, 0x08,
- 0x20, 0x00, 0x02, 0x66, 0x00, 0x6F,
- 0x00, 0x01, 0x16, 0x76, 0xEE, 0x06,
- 0x48, 0x4A, 0x1E, 0x48, 0x04, 0xED,
- 0x31, 0x08, 0x20, 0x00, 0x04, 0xEB,
- 0x31, 0x08, 0x00, 0x00, 0xA4, 0x48,
- 0x04, 0xED, 0x31, 0x08, 0x20, 0x00,
- 0x04, 0xEB, 0x31, 0x08, 0x00, 0x00,
- 0xA2, 0x48, 0x04, 0x20, 0x20, 0x4A,
- 0x7C, 0x46, 0x82, 0x50, 0x05, 0x50,
- 0x15, 0xB5, 0x1E, 0x98, 0xED, 0x31,
- 0x08, 0x00, 0x00, 0xA8, 0x10, 0x47,
- 0x3B, 0x2C, 0x01, 0xDB, 0x40, 0x11,
- 0x98, 0xC1, 0x1E, 0x98, 0x10, 0x07,
- 0x30, 0xF9, 0x40, 0x07, 0x18, 0x98,
- 0x2A, 0x10, 0xEB, 0x31, 0x08, 0x00,
- 0x00, 0xA8, 0xA4, 0x1E, 0x98, 0xBB,
- 0x1E, 0x98, 0x50, 0x14, 0x50, 0x04,
- 0x46, 0x83, 0x48, 0x04, 0x02, 0x01,
- 0x00, 0x50, 0x05, 0x50, 0x15, 0x10,
- 0x87, 0x3F, 0x90, 0x2B, 0x18, 0x01,
- 0x00, 0xC0, 0x31, 0x00, 0x00, 0xAE,
- 0xDF, 0x41, 0x00, 0x08, 0x00, 0x1A,
- 0x42, 0x11, 0x67, 0x01, 0xDF, 0x41,
- 0x02, 0x08, 0x00, 0x10, 0x42, 0x11,
- 0x62, 0x01, 0xB4, 0x43, 0x4A, 0x68,
- 0x50, 0x14, 0x50, 0x04, 0x24, 0x10,
- 0x48, 0x04, 0xF2, 0x31, 0x00, 0x01,
- 0x00, 0x00, 0xAE, 0xF6, 0x31, 0x00,
- 0x01, 0x00, 0x00, 0xAE, 0x62, 0xE4,
- 0xE5, 0x61, 0x04, 0x48, 0x04, 0xE5,
- 0x63, 0x05, 0x48, 0x04, 0x20, 0x20,
- 0x00, 0x00, 0x00, 0x00
-};
-
-#endif
diff --git a/drivers/staging/ft1000/ft1000-pcmcia/ft1000.h b/drivers/staging/ft1000/ft1000-pcmcia/ft1000.h
deleted file mode 100644
index e1861cf5de73..000000000000
--- a/drivers/staging/ft1000/ft1000-pcmcia/ft1000.h
+++ /dev/null
@@ -1,70 +0,0 @@
-/*---------------------------------------------------------------------------
- FT1000 driver for Flarion Flash OFDM NIC Device
-
- Copyright (C) 2002 Flarion Technologies, All rights reserved.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms of the GNU General Public License as published by the Free
- Software Foundation; either version 2 of the License, or (at your option) any
- later version. This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
- or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details. You should have received a copy of the GNU General Public
- License along with this program; if not, write to the
- Free Software Foundation, Inc., 59 Temple Place -
- Suite 330, Boston, MA 02111-1307, USA.
- ---------------------------------------------------------------------------
- Description: Common structures and defines
- ---------------------------------------------------------------------------*/
-#ifndef _FT1000H_
-#define _FT1000H_
-
-#include "../ft1000.h"
-
-#define FT1000_DRV_VER 0x01010300
-
-#define FT1000_DPRAM_BASE 0x0000 /* Dual Port RAM starting offset */
-
-/*
- * Maximum number of occurrence of pseudo header errors before resetting PC
- * Card.
- */
-#define MAX_PH_ERR 300
-
-#define SUCCESS 0x00
-#define FAILURE 0x01
-
-struct ft1000_pcmcia {
- int PktIntfErr;
- u16 packetseqnum;
- void *link;
-};
-
-struct pcmcia_device;
-struct net_device;
-struct net_device *init_ft1000_card(struct pcmcia_device *link,
- void *ft1000_reset);
-void stop_ft1000_card(struct net_device *dev);
-int card_download(struct net_device *dev, const u8 *pFileStart,
- size_t FileLength);
-
-u16 ft1000_read_dpram(struct net_device *dev, int offset);
-void card_bootload(struct net_device *dev);
-u16 ft1000_read_dpram_mag_16(struct net_device *dev, int offset, int Index);
-u32 ft1000_read_dpram_mag_32(struct net_device *dev, int offset);
-void ft1000_write_dpram_mag_32(struct net_device *dev, int offset, u32 value);
-
-/* Read the value of a given ASIC register. */
-static inline u16 ft1000_read_reg(struct net_device *dev, u16 offset)
-{
- return inw(dev->base_addr + offset);
-}
-
-/* Set the value of a given ASIC register. */
-static inline void ft1000_write_reg(struct net_device *dev, u16 offset,
- u16 value)
-{
- outw(value, dev->base_addr + offset);
-}
-
-#endif
diff --git a/drivers/staging/ft1000/ft1000-pcmcia/ft1000.img b/drivers/staging/ft1000/ft1000-pcmcia/ft1000.img
deleted file mode 100644
index aad3c80d07c8..000000000000
--- a/drivers/staging/ft1000/ft1000-pcmcia/ft1000.img
+++ /dev/null
Binary files differ
diff --git a/drivers/staging/ft1000/ft1000-pcmcia/ft1000_cs.c b/drivers/staging/ft1000/ft1000-pcmcia/ft1000_cs.c
deleted file mode 100644
index e5cc5bedf031..000000000000
--- a/drivers/staging/ft1000/ft1000-pcmcia/ft1000_cs.c
+++ /dev/null
@@ -1,158 +0,0 @@
-/*---------------------------------------------------------------------------
- FT1000 driver for Flarion Flash OFDM NIC Device
-
- Copyright (C) 1999 David A. Hinds. All Rights Reserved.
- Copyright (C) 2002 Flarion Technologies, All rights reserved.
- Copyright (C) 2006 Patrik Ostrihon, All rights reserved.
- Copyright (C) 2006 ProWeb Consulting, a.s, All rights reserved.
-
- The initial developer of the original code is David A. Hinds
- <dahinds@users.sourceforge.net>. Portions created by David A. Hinds.
-
- This file was modified to support the Flarion Flash OFDM NIC Device
- by Wai Chan (w.chan@flarion.com).
-
- Port for kernel 2.6 created by Patrik Ostrihon (patrik.ostrihon@pwc.sk)
-
- This program is free software; you can redistribute it and/or modify it
- under the terms of the GNU General Public License as published by the Free
- Software Foundation; either version 2 of the License, or (at your option) any
- later version. This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
- or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details. You should have received a copy of the GNU General Public
- License along with this program; if not, write to the
- Free Software Foundation, Inc., 59 Temple Place -
- Suite 330, Boston, MA 02111-1307, USA.
- -----------------------------------------------------------------------------*/
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/netdevice.h>
-#include <pcmcia/cistpl.h>
-#include <pcmcia/ds.h>
-
-/*====================================================================*/
-
-MODULE_AUTHOR("Wai Chan");
-MODULE_DESCRIPTION("FT1000 PCMCIA driver");
-MODULE_LICENSE("GPL");
-
-/*====================================================================*/
-
-static int ft1000_config(struct pcmcia_device *link);
-static void ft1000_detach(struct pcmcia_device *link);
-static int ft1000_attach(struct pcmcia_device *link);
-
-#include "ft1000.h"
-
-/*====================================================================*/
-
-static void ft1000_reset(struct pcmcia_device *link)
-{
- pcmcia_reset_card(link->socket);
-}
-
-static int ft1000_attach(struct pcmcia_device *link)
-{
- link->priv = NULL;
- link->config_flags |= CONF_ENABLE_IRQ | CONF_AUTO_SET_IO;
-
- return ft1000_config(link);
-}
-
-static void ft1000_detach(struct pcmcia_device *link)
-{
- struct net_device *dev = link->priv;
-
- if (dev)
- stop_ft1000_card(dev);
-
- pcmcia_disable_device(link);
- free_netdev(dev);
-}
-
-static int ft1000_confcheck(struct pcmcia_device *link, void *priv_data)
-{
- return pcmcia_request_io(link);
-}
-
-/*======================================================================
-
- ft1000_config() is scheduled to run after a CARD_INSERTION event
- is received, to configure the PCMCIA socket, and to make the
- device available to the system.
-
- ======================================================================*/
-
-static int ft1000_config(struct pcmcia_device *link)
-{
- int ret;
-
- dev_dbg(&link->dev, "ft1000_cs: ft1000_config(0x%p)\n", link);
-
- /* setup IO window */
- ret = pcmcia_loop_config(link, ft1000_confcheck, NULL);
- if (ret) {
- dev_err(&link->dev, "Could not configure pcmcia\n");
- return -ENODEV;
- }
-
- /* configure device */
- ret = pcmcia_enable_device(link);
- if (ret) {
- dev_err(&link->dev, "Could not enable pcmcia\n");
- goto failed;
- }
-
- link->priv = init_ft1000_card(link, &ft1000_reset);
- if (!link->priv) {
- dev_err(&link->dev, "Could not register as network device\n");
- goto failed;
- }
-
- /* Finally, report what we've done */
-
- return 0;
-failed:
- pcmcia_disable_device(link);
- return -ENODEV;
-}
-
-static int ft1000_suspend(struct pcmcia_device *link)
-{
- struct net_device *dev = link->priv;
-
- if (link->open)
- netif_device_detach(dev);
- return 0;
-}
-
-static int ft1000_resume(struct pcmcia_device *link)
-{
- return 0;
-}
-
-/*====================================================================*/
-
-static const struct pcmcia_device_id ft1000_ids[] = {
- PCMCIA_DEVICE_MANF_CARD(0x02cc, 0x0100),
- PCMCIA_DEVICE_MANF_CARD(0x02cc, 0x1000),
- PCMCIA_DEVICE_MANF_CARD(0x02cc, 0x1300),
- PCMCIA_DEVICE_NULL,
-};
-
-MODULE_DEVICE_TABLE(pcmcia, ft1000_ids);
-
-static struct pcmcia_driver ft1000_cs_driver = {
- .owner = THIS_MODULE,
- .name = "ft1000_cs",
- .probe = ft1000_attach,
- .remove = ft1000_detach,
- .id_table = ft1000_ids,
- .suspend = ft1000_suspend,
- .resume = ft1000_resume,
-};
-
-module_pcmcia_driver(ft1000_cs_driver);
diff --git a/drivers/staging/ft1000/ft1000-pcmcia/ft1000_dnld.c b/drivers/staging/ft1000/ft1000-pcmcia/ft1000_dnld.c
deleted file mode 100644
index 612ac0bd3756..000000000000
--- a/drivers/staging/ft1000/ft1000-pcmcia/ft1000_dnld.c
+++ /dev/null
@@ -1,762 +0,0 @@
-/*---------------------------------------------------------------------------
- FT1000 driver for Flarion Flash OFDM NIC Device
-
- Copyright (C) 2002 Flarion Technologies, All rights reserved.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms of the GNU General Public License as published by the Free
- Software Foundation; either version 2 of the License, or (at your option) any
- later version. This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
- or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details. You should have received a copy of the GNU General Public
- License along with this program; if not, write to the
- Free Software Foundation, Inc., 59 Temple Place -
- Suite 330, Boston, MA 02111-1307, USA.
- --------------------------------------------------------------------------
-
- Description: This module will handshake with the DSP bootloader to
- download the DSP runtime image.
-
- ---------------------------------------------------------------------------*/
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#define __KERNEL_SYSCALLS__
-
-#include <linux/module.h>
-#include <linux/fs.h>
-#include <linux/mm.h>
-#include <linux/slab.h>
-#include <linux/unistd.h>
-#include <linux/netdevice.h>
-#include <linux/timer.h>
-#include <linux/delay.h>
-#include <linux/io.h>
-#include <linux/uaccess.h>
-#include <linux/vmalloc.h>
-
-#include "ft1000.h"
-#include "boot.h"
-
-#define MAX_DSP_WAIT_LOOPS 100
-#define DSP_WAIT_SLEEP_TIME 1 /* 1 millisecond */
-
-#define MAX_LENGTH 0x7f0
-
-#define DWNLD_MAG_HANDSHAKE_LOC 0x00
-#define DWNLD_MAG_TYPE_LOC 0x01
-#define DWNLD_MAG_SIZE_LOC 0x02
-#define DWNLD_MAG_PS_HDR_LOC 0x03
-
-#define DWNLD_HANDSHAKE_LOC 0x02
-#define DWNLD_TYPE_LOC 0x04
-#define DWNLD_SIZE_MSW_LOC 0x06
-#define DWNLD_SIZE_LSW_LOC 0x08
-#define DWNLD_PS_HDR_LOC 0x0A
-
-#define HANDSHAKE_TIMEOUT_VALUE 0xF1F1
-#define HANDSHAKE_RESET_VALUE 0xFEFE /* When DSP requests startover */
-#define HANDSHAKE_DSP_BL_READY 0xFEFE /* At start DSP writes this when bootloader ready */
-#define HANDSHAKE_DRIVER_READY 0xFFFF /* Driver writes after receiving 0xFEFE */
-#define HANDSHAKE_SEND_DATA 0x0000 /* DSP writes this when ready for more data */
-
-#define HANDSHAKE_REQUEST 0x0001 /* Request from DSP */
-#define HANDSHAKE_RESPONSE 0x0000 /* Satisfied DSP request */
-
-#define REQUEST_CODE_LENGTH 0x0000
-#define REQUEST_RUN_ADDRESS 0x0001
-#define REQUEST_CODE_SEGMENT 0x0002 /* In WORD count */
-#define REQUEST_DONE_BL 0x0003
-#define REQUEST_DONE_CL 0x0004
-#define REQUEST_VERSION_INFO 0x0005
-#define REQUEST_CODE_BY_VERSION 0x0006
-#define REQUEST_MAILBOX_DATA 0x0007
-#define REQUEST_FILE_CHECKSUM 0x0008
-
-#define STATE_START_DWNLD 0x01
-#define STATE_BOOT_DWNLD 0x02
-#define STATE_CODE_DWNLD 0x03
-#define STATE_DONE_DWNLD 0x04
-#define STATE_SECTION_PROV 0x05
-#define STATE_DONE_PROV 0x06
-#define STATE_DONE_FILE 0x07
-
-struct dsp_file_hdr {
- u32 version_id; /* Version ID of this image format. */
- u32 package_id; /* Package ID of code release. */
- u32 build_date; /* Date/time stamp when file was built. */
- u32 commands_offset; /* Offset to attached commands in Pseudo Hdr format. */
- u32 loader_offset; /* Offset to bootloader code. */
- u32 loader_code_address; /* Start address of bootloader. */
- u32 loader_code_end; /* Where bootloader code ends. */
- u32 loader_code_size;
- u32 version_data_offset; /* Offset were scrambled version data begins. */
- u32 version_data_size; /* Size, in words, of scrambled version data. */
- u32 nDspImages; /* Number of DSP images in file. */
-} __packed;
-
-struct dsp_image_info {
- u32 coff_date; /* Date/time when DSP Coff image was built. */
- u32 begin_offset; /* Offset in file where image begins. */
- u32 end_offset; /* Offset in file where image begins. */
- u32 run_address; /* On chip Start address of DSP code. */
- u32 image_size; /* Size of image. */
- u32 version; /* Embedded version # of DSP code. */
- unsigned short checksum; /* Dsp File checksum */
- unsigned short pad1;
-} __packed;
-
-void card_bootload(struct net_device *dev)
-{
- struct ft1000_info *info = netdev_priv(dev);
- unsigned long flags;
- u32 *pdata;
- u32 size;
- u32 i;
- u32 templong;
-
- netdev_dbg(dev, "card_bootload is called\n");
-
- pdata = (u32 *)bootimage;
- size = sizeof(bootimage);
-
- /* check for odd word */
- if (size & 0x0003)
- size += 4;
-
- /* Provide mutual exclusive access while reading ASIC registers. */
- spin_lock_irqsave(&info->dpram_lock, flags);
-
- /* need to set i/o base address initially and hardware will autoincrement */
- ft1000_write_reg(dev, FT1000_REG_DPRAM_ADDR, FT1000_DPRAM_BASE);
- /* write bytes */
- for (i = 0; i < (size >> 2); i++) {
- templong = *pdata++;
- outl(templong, dev->base_addr + FT1000_REG_MAG_DPDATA);
- }
-
- spin_unlock_irqrestore(&info->dpram_lock, flags);
-}
-
-static u16 get_handshake(struct net_device *dev, u16 expected_value)
-{
- struct ft1000_info *info = netdev_priv(dev);
- u16 handshake;
- u32 tempx;
- int loopcnt;
-
- loopcnt = 0;
- while (loopcnt < MAX_DSP_WAIT_LOOPS) {
- if (info->AsicID == ELECTRABUZZ_ID) {
- ft1000_write_reg(dev, FT1000_REG_DPRAM_ADDR,
- DWNLD_HANDSHAKE_LOC);
-
- handshake = ft1000_read_reg(dev, FT1000_REG_DPRAM_DATA);
- } else {
- tempx =
- ntohl(ft1000_read_dpram_mag_32
- (dev, DWNLD_MAG_HANDSHAKE_LOC));
- handshake = (u16)tempx;
- }
-
- if ((handshake == expected_value)
- || (handshake == HANDSHAKE_RESET_VALUE)) {
- return handshake;
- }
- loopcnt++;
- mdelay(DSP_WAIT_SLEEP_TIME);
-
- }
-
- return HANDSHAKE_TIMEOUT_VALUE;
-
-}
-
-static void put_handshake(struct net_device *dev, u16 handshake_value)
-{
- struct ft1000_info *info = netdev_priv(dev);
- u32 tempx;
-
- if (info->AsicID == ELECTRABUZZ_ID) {
- ft1000_write_reg(dev, FT1000_REG_DPRAM_ADDR,
- DWNLD_HANDSHAKE_LOC);
- ft1000_write_reg(dev, FT1000_REG_DPRAM_DATA, handshake_value); /* Handshake */
- } else {
- tempx = (u32)handshake_value;
- tempx = ntohl(tempx);
- ft1000_write_dpram_mag_32(dev, DWNLD_MAG_HANDSHAKE_LOC, tempx); /* Handshake */
- }
-}
-
-static u16 get_request_type(struct net_device *dev)
-{
- struct ft1000_info *info = netdev_priv(dev);
- u16 request_type;
- u32 tempx;
-
- if (info->AsicID == ELECTRABUZZ_ID) {
- ft1000_write_reg(dev, FT1000_REG_DPRAM_ADDR, DWNLD_TYPE_LOC);
- request_type = ft1000_read_reg(dev, FT1000_REG_DPRAM_DATA);
- } else {
- tempx = ft1000_read_dpram_mag_32(dev, DWNLD_MAG_TYPE_LOC);
- tempx = ntohl(tempx);
- request_type = (u16)tempx;
- }
-
- return request_type;
-
-}
-
-static long get_request_value(struct net_device *dev)
-{
- struct ft1000_info *info = netdev_priv(dev);
- long value;
- u16 w_val;
-
- if (info->AsicID == ELECTRABUZZ_ID) {
- ft1000_write_reg(dev, FT1000_REG_DPRAM_ADDR,
- DWNLD_SIZE_MSW_LOC);
-
- w_val = ft1000_read_reg(dev, FT1000_REG_DPRAM_DATA);
-
- value = (long)(w_val << 16);
-
- ft1000_write_reg(dev, FT1000_REG_DPRAM_ADDR,
- DWNLD_SIZE_LSW_LOC);
-
- w_val = ft1000_read_reg(dev, FT1000_REG_DPRAM_DATA);
-
- value = (long)(value | w_val);
- } else {
- value = ft1000_read_dpram_mag_32(dev, DWNLD_MAG_SIZE_LOC);
- value = ntohl(value);
- }
-
- return value;
-
-}
-
-static void put_request_value(struct net_device *dev, long lvalue)
-{
- struct ft1000_info *info = netdev_priv(dev);
- u16 size;
- u32 tempx;
-
- if (info->AsicID == ELECTRABUZZ_ID) {
- size = (u16) (lvalue >> 16);
-
- ft1000_write_reg(dev, FT1000_REG_DPRAM_ADDR,
- DWNLD_SIZE_MSW_LOC);
-
- ft1000_write_reg(dev, FT1000_REG_DPRAM_DATA, size);
-
- size = (u16) (lvalue);
-
- ft1000_write_reg(dev, FT1000_REG_DPRAM_ADDR,
- DWNLD_SIZE_LSW_LOC);
-
- ft1000_write_reg(dev, FT1000_REG_DPRAM_DATA, size);
- } else {
- tempx = ntohl(lvalue);
- ft1000_write_dpram_mag_32(dev, DWNLD_MAG_SIZE_LOC, tempx); /* Handshake */
- }
-
-}
-
-static u16 hdr_checksum(struct pseudo_hdr *pHdr)
-{
- u16 *usPtr = (u16 *)pHdr;
- u16 chksum;
-
- chksum = (((((usPtr[0] ^ usPtr[1]) ^ usPtr[2]) ^ usPtr[3]) ^
- usPtr[4]) ^ usPtr[5]) ^ usPtr[6];
-
- return chksum;
-}
-
-int card_download(struct net_device *dev, const u8 *pFileStart,
- size_t FileLength)
-{
- struct ft1000_info *info = netdev_priv(dev);
- int Status = SUCCESS;
- u32 uiState;
- u16 handshake;
- struct pseudo_hdr *pHdr;
- u16 usHdrLength;
- long word_length;
- u16 request;
- u16 temp;
- struct prov_record *pprov_record;
- u8 *pbuffer;
- struct dsp_file_hdr *pFileHdr5;
- struct dsp_image_info *pDspImageInfoV6 = NULL;
- long requested_version;
- bool bGoodVersion = false;
- struct drv_msg *pMailBoxData;
- u16 *pUsData = NULL;
- u16 *pUsFile = NULL;
- u8 *pUcFile = NULL;
- u8 *pBootEnd = NULL;
- u8 *pCodeEnd = NULL;
- int imageN;
- long file_version;
- long loader_code_address = 0;
- long loader_code_size = 0;
- long run_address = 0;
- long run_size = 0;
- unsigned long flags;
- unsigned long templong;
- unsigned long image_chksum = 0;
-
- file_version = *(long *)pFileStart;
- if (file_version != 6) {
- pr_err("unsupported firmware version %ld\n", file_version);
- Status = FAILURE;
- }
-
- uiState = STATE_START_DWNLD;
-
- pFileHdr5 = (struct dsp_file_hdr *)pFileStart;
-
- pUsFile = (u16 *) ((long)pFileStart + pFileHdr5->loader_offset);
- pUcFile = (u8 *) ((long)pFileStart + pFileHdr5->loader_offset);
- pBootEnd = (u8 *) ((long)pFileStart + pFileHdr5->loader_code_end);
- loader_code_address = pFileHdr5->loader_code_address;
- loader_code_size = pFileHdr5->loader_code_size;
- bGoodVersion = false;
-
- while ((Status == SUCCESS) && (uiState != STATE_DONE_FILE)) {
-
- switch (uiState) {
- case STATE_START_DWNLD:
-
- handshake = get_handshake(dev, HANDSHAKE_DSP_BL_READY);
-
- if (handshake == HANDSHAKE_DSP_BL_READY)
- put_handshake(dev, HANDSHAKE_DRIVER_READY);
- else
- Status = FAILURE;
-
- uiState = STATE_BOOT_DWNLD;
-
- break;
-
- case STATE_BOOT_DWNLD:
- handshake = get_handshake(dev, HANDSHAKE_REQUEST);
- if (handshake == HANDSHAKE_REQUEST) {
- /*
- * Get type associated with the request.
- */
- request = get_request_type(dev);
- switch (request) {
- case REQUEST_RUN_ADDRESS:
- put_request_value(dev,
- loader_code_address);
- break;
- case REQUEST_CODE_LENGTH:
- put_request_value(dev,
- loader_code_size);
- break;
- case REQUEST_DONE_BL:
- /* Reposition ptrs to beginning of code section */
- pUsFile = (u16 *) ((long)pBootEnd);
- pUcFile = (u8 *) ((long)pBootEnd);
- uiState = STATE_CODE_DWNLD;
- break;
- case REQUEST_CODE_SEGMENT:
- word_length = get_request_value(dev);
- if (word_length > MAX_LENGTH) {
- Status = FAILURE;
- break;
- }
- if ((word_length * 2 + (long)pUcFile) >
- (long)pBootEnd) {
- /*
- * Error, beyond boot code range.
- */
- Status = FAILURE;
- break;
- }
- /* Provide mutual exclusive access while reading ASIC registers. */
- spin_lock_irqsave(&info->dpram_lock,
- flags);
- /*
- * Position ASIC DPRAM auto-increment pointer.
- */
- outw(DWNLD_MAG_PS_HDR_LOC,
- dev->base_addr +
- FT1000_REG_DPRAM_ADDR);
- if (word_length & 0x01)
- word_length++;
- word_length = word_length / 2;
-
- for (; word_length > 0; word_length--) { /* In words */
- templong = *pUsFile++;
- templong |=
- (*pUsFile++ << 16);
- pUcFile += 4;
- outl(templong,
- dev->base_addr +
- FT1000_REG_MAG_DPDATAL);
- }
- spin_unlock_irqrestore(&info->
- dpram_lock,
- flags);
- break;
- default:
- Status = FAILURE;
- break;
- }
- put_handshake(dev, HANDSHAKE_RESPONSE);
- } else {
- Status = FAILURE;
- }
-
- break;
-
- case STATE_CODE_DWNLD:
- handshake = get_handshake(dev, HANDSHAKE_REQUEST);
- if (handshake == HANDSHAKE_REQUEST) {
- /*
- * Get type associated with the request.
- */
- request = get_request_type(dev);
- switch (request) {
- case REQUEST_FILE_CHECKSUM:
- netdev_dbg(dev,
- "ft1000_dnld: REQUEST_FOR_CHECKSUM\n");
- put_request_value(dev, image_chksum);
- break;
- case REQUEST_RUN_ADDRESS:
- if (bGoodVersion) {
- put_request_value(dev,
- run_address);
- } else {
- Status = FAILURE;
- break;
- }
- break;
- case REQUEST_CODE_LENGTH:
- if (bGoodVersion) {
- put_request_value(dev,
- run_size);
- } else {
- Status = FAILURE;
- break;
- }
- break;
- case REQUEST_DONE_CL:
- /* Reposition ptrs to beginning of provisioning section */
- pUsFile = (u16 *) ((long)pFileStart + pFileHdr5->commands_offset);
- pUcFile = (u8 *) ((long)pFileStart + pFileHdr5->commands_offset);
- uiState = STATE_DONE_DWNLD;
- break;
- case REQUEST_CODE_SEGMENT:
- if (!bGoodVersion) {
- Status = FAILURE;
- break;
- }
- word_length = get_request_value(dev);
- if (word_length > MAX_LENGTH) {
- Status = FAILURE;
- break;
- }
- if ((word_length * 2 + (long)pUcFile) >
- (long)pCodeEnd) {
- /*
- * Error, beyond boot code range.
- */
- Status = FAILURE;
- break;
- }
- /*
- * Position ASIC DPRAM auto-increment pointer.
- */
- outw(DWNLD_MAG_PS_HDR_LOC,
- dev->base_addr +
- FT1000_REG_DPRAM_ADDR);
- if (word_length & 0x01)
- word_length++;
- word_length = word_length / 2;
-
- for (; word_length > 0; word_length--) { /* In words */
- templong = *pUsFile++;
- templong |=
- (*pUsFile++ << 16);
- pUcFile += 4;
- outl(templong,
- dev->base_addr +
- FT1000_REG_MAG_DPDATAL);
- }
- break;
-
- case REQUEST_MAILBOX_DATA:
- /* Convert length from byte count to word count. Make sure we round up. */
- word_length =
- (long)(info->DSPInfoBlklen + 1) / 2;
- put_request_value(dev, word_length);
- pMailBoxData =
- (struct drv_msg *)&info->DSPInfoBlk[0];
- pUsData =
- (u16 *)&pMailBoxData->data[0];
- /* Provide mutual exclusive access while reading ASIC registers. */
- spin_lock_irqsave(&info->dpram_lock,
- flags);
- if (file_version == 5) {
- /*
- * Position ASIC DPRAM auto-increment pointer.
- */
- ft1000_write_reg(dev,
- FT1000_REG_DPRAM_ADDR,
- DWNLD_PS_HDR_LOC);
-
- for (; word_length > 0; word_length--) { /* In words */
- temp = ntohs(*pUsData);
- ft1000_write_reg(dev,
- FT1000_REG_DPRAM_DATA,
- temp);
- pUsData++;
- }
- } else {
- /*
- * Position ASIC DPRAM auto-increment pointer.
- */
- outw(DWNLD_MAG_PS_HDR_LOC,
- dev->base_addr +
- FT1000_REG_DPRAM_ADDR);
- if (word_length & 0x01)
- word_length++;
-
- word_length = word_length / 2;
-
- for (; word_length > 0; word_length--) { /* In words */
- templong = *pUsData++;
- templong |=
- (*pUsData++ << 16);
- outl(templong,
- dev->base_addr +
- FT1000_REG_MAG_DPDATAL);
- }
- }
- spin_unlock_irqrestore(&info->
- dpram_lock,
- flags);
- break;
-
- case REQUEST_VERSION_INFO:
- word_length =
- pFileHdr5->version_data_size;
- put_request_value(dev, word_length);
- pUsFile =
- (u16 *) ((long)pFileStart +
- pFileHdr5->
- version_data_offset);
- /* Provide mutual exclusive access while reading ASIC registers. */
- spin_lock_irqsave(&info->dpram_lock,
- flags);
- /*
- * Position ASIC DPRAM auto-increment pointer.
- */
- outw(DWNLD_MAG_PS_HDR_LOC,
- dev->base_addr +
- FT1000_REG_DPRAM_ADDR);
- if (word_length & 0x01)
- word_length++;
- word_length = word_length / 2;
-
- for (; word_length > 0; word_length--) { /* In words */
- templong =
- ntohs(*pUsFile++);
- temp =
- ntohs(*pUsFile++);
- templong |=
- (temp << 16);
- outl(templong,
- dev->base_addr +
- FT1000_REG_MAG_DPDATAL);
- }
- spin_unlock_irqrestore(&info->
- dpram_lock,
- flags);
- break;
-
- case REQUEST_CODE_BY_VERSION:
- bGoodVersion = false;
- requested_version =
- get_request_value(dev);
- pDspImageInfoV6 =
- (struct dsp_image_info *) ((long)
- pFileStart
- +
- sizeof
- (struct dsp_file_hdr));
- for (imageN = 0;
- imageN <
- pFileHdr5->nDspImages;
- imageN++) {
- temp = (u16)
- (pDspImageInfoV6->
- version);
- templong = temp;
- temp = (u16)
- (pDspImageInfoV6->
- version >> 16);
- templong |=
- (temp << 16);
- if (templong ==
- requested_version) {
- bGoodVersion =
- true;
- pUsFile =
- (u16
- *) ((long)
- pFileStart
- +
- pDspImageInfoV6->
- begin_offset);
- pUcFile =
- (u8
- *) ((long)
- pFileStart
- +
- pDspImageInfoV6->
- begin_offset);
- pCodeEnd =
- (u8
- *) ((long)
- pFileStart
- +
- pDspImageInfoV6->
- end_offset);
- run_address =
- pDspImageInfoV6->
- run_address;
- run_size =
- pDspImageInfoV6->
- image_size;
- image_chksum =
- (u32)
- pDspImageInfoV6->
- checksum;
- netdev_dbg(dev,
- "ft1000_dnld: image_chksum = 0x%8x\n",
- (unsigned
- int)
- image_chksum);
- break;
- }
- pDspImageInfoV6++;
- }
- if (!bGoodVersion) {
- /*
- * Error, beyond boot code range.
- */
- Status = FAILURE;
- break;
- }
- break;
-
- default:
- Status = FAILURE;
- break;
- }
- put_handshake(dev, HANDSHAKE_RESPONSE);
- } else {
- Status = FAILURE;
- }
-
- break;
-
- case STATE_DONE_DWNLD:
- if (((unsigned long)(pUcFile) - (unsigned long) pFileStart) >=
- (unsigned long)FileLength) {
- uiState = STATE_DONE_FILE;
- break;
- }
-
- pHdr = (struct pseudo_hdr *)pUsFile;
-
- if (pHdr->portdest == 0x80 /* DspOAM */
- && (pHdr->portsrc == 0x00 /* Driver */
- || pHdr->portsrc == 0x10 /* FMM */)) {
- uiState = STATE_SECTION_PROV;
- } else {
- netdev_dbg(dev,
- "Download error: Bad Port IDs in Pseudo Record\n");
- netdev_dbg(dev, "\t Port Source = 0x%2.2x\n",
- pHdr->portsrc);
- netdev_dbg(dev, "\t Port Destination = 0x%2.2x\n",
- pHdr->portdest);
- Status = FAILURE;
- }
-
- break;
-
- case STATE_SECTION_PROV:
-
- pHdr = (struct pseudo_hdr *)pUcFile;
-
- if (pHdr->checksum == hdr_checksum(pHdr)) {
- if (pHdr->portdest != 0x80 /* Dsp OAM */) {
- uiState = STATE_DONE_PROV;
- break;
- }
- usHdrLength = ntohs(pHdr->length); /* Byte length for PROV records */
-
- /* Get buffer for provisioning data */
- pbuffer =
- kmalloc(usHdrLength + sizeof(struct pseudo_hdr),
- GFP_ATOMIC);
- if (pbuffer) {
- memcpy(pbuffer, pUcFile,
- (u32) (usHdrLength +
- sizeof(struct pseudo_hdr)));
- /* link provisioning data */
- pprov_record =
- kmalloc(sizeof(struct prov_record),
- GFP_ATOMIC);
- if (pprov_record) {
- pprov_record->pprov_data =
- pbuffer;
- list_add_tail(&pprov_record->
- list,
- &info->prov_list);
- /* Move to next entry if available */
- pUcFile =
- (u8 *)((unsigned long) pUcFile +
- (unsigned long) ((usHdrLength + 1) & 0xFFFFFFFE) + sizeof(struct pseudo_hdr));
- if ((unsigned long) (pUcFile) -
- (unsigned long) (pFileStart) >=
- (unsigned long)FileLength) {
- uiState =
- STATE_DONE_FILE;
- }
- } else {
- kfree(pbuffer);
- Status = FAILURE;
- }
- } else {
- Status = FAILURE;
- }
- } else {
- /* Checksum did not compute */
- Status = FAILURE;
- }
-
- break;
-
- case STATE_DONE_PROV:
- uiState = STATE_DONE_FILE;
- break;
-
- default:
- Status = FAILURE;
- break;
- } /* End Switch */
-
- } /* End while */
-
- return Status;
-
-}
diff --git a/drivers/staging/ft1000/ft1000-pcmcia/ft1000_hw.c b/drivers/staging/ft1000/ft1000-pcmcia/ft1000_hw.c
deleted file mode 100644
index eecfa377054d..000000000000
--- a/drivers/staging/ft1000/ft1000-pcmcia/ft1000_hw.c
+++ /dev/null
@@ -1,2068 +0,0 @@
-/*---------------------------------------------------------------------------
- FT1000 driver for Flarion Flash OFDM NIC Device
-
- Copyright (C) 2002 Flarion Technologies, All rights reserved.
- Copyright (C) 2006 Patrik Ostrihon, All rights reserved.
- Copyright (C) 2006 ProWeb Consulting, a.s, All rights reserved.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms of the GNU General Public License as published by the Free
- Software Foundation; either version 2 of the License, or (at your option) any
- later version. This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
- or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details. You should have received a copy of the GNU General Public
- License along with this program; if not, write to the
- Free Software Foundation, Inc., 59 Temple Place -
- Suite 330, Boston, MA 02111-1307, USA.
- -------------------------------------------------------------------------*/
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/sched.h>
-#include <linux/ptrace.h>
-#include <linux/slab.h>
-#include <linux/string.h>
-#include <linux/timer.h>
-#include <linux/interrupt.h>
-#include <linux/in.h>
-#include <linux/io.h>
-#include <linux/bitops.h>
-
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/skbuff.h>
-#include <linux/if_arp.h>
-#include <linux/ioport.h>
-#include <linux/wait.h>
-#include <linux/vmalloc.h>
-
-#include <linux/firmware.h>
-#include <linux/ethtool.h>
-
-#include <pcmcia/cistpl.h>
-#include <pcmcia/cisreg.h>
-#include <pcmcia/ds.h>
-
-#include <linux/delay.h>
-#include "ft1000.h"
-
-static const struct firmware *fw_entry;
-
-static void ft1000_hbchk(u_long data);
-static struct timer_list poll_timer = {
- .function = ft1000_hbchk
-};
-
-static u16 cmdbuffer[1024];
-static u8 tempbuffer[1600];
-static u8 ft1000_card_present;
-static u8 flarion_ft1000_cnt;
-
-static irqreturn_t ft1000_interrupt(int irq, void *dev_id);
-static void ft1000_enable_interrupts(struct net_device *dev);
-static void ft1000_disable_interrupts(struct net_device *dev);
-
-/* new kernel */
-MODULE_AUTHOR("");
-MODULE_DESCRIPTION("Support for Flarion Flash OFDM NIC Device. Support for PCMCIA when used with ft1000_cs.");
-MODULE_LICENSE("GPL");
-MODULE_SUPPORTED_DEVICE("FT1000");
-
-#define MAX_RCV_LOOP 100
-
-/*---------------------------------------------------------------------------
-
- Function: ft1000_read_fifo_len
- Description: This function will read the ASIC Uplink FIFO status register
- which will return the number of bytes remaining in the Uplink FIFO.
- Sixteen bytes are subtracted to make sure that the ASIC does not
- reach its threshold.
- Input:
- dev - network device structure
- Output:
- value - number of bytes available in the ASIC Uplink FIFO.
-
- -------------------------------------------------------------------------*/
-static inline u16 ft1000_read_fifo_len(struct net_device *dev)
-{
- struct ft1000_info *info = netdev_priv(dev);
-
- if (info->AsicID == ELECTRABUZZ_ID)
- return (ft1000_read_reg(dev, FT1000_REG_UFIFO_STAT) - 16);
- else
- return (ft1000_read_reg(dev, FT1000_REG_MAG_UFSR) - 16);
-}
-
-/*---------------------------------------------------------------------------
-
- Function: ft1000_read_dpram
- Description: This function will read the specific area of dpram
- (Electrabuzz ASIC only)
- Input:
- dev - device structure
- offset - index of dpram
- Output:
- value - value of dpram
-
- -------------------------------------------------------------------------*/
-u16 ft1000_read_dpram(struct net_device *dev, int offset)
-{
- struct ft1000_info *info = netdev_priv(dev);
- unsigned long flags;
- u16 data;
-
- /* Provide mutual exclusive access while reading ASIC registers. */
- spin_lock_irqsave(&info->dpram_lock, flags);
- ft1000_write_reg(dev, FT1000_REG_DPRAM_ADDR, offset);
- data = ft1000_read_reg(dev, FT1000_REG_DPRAM_DATA);
- spin_unlock_irqrestore(&info->dpram_lock, flags);
-
- return data;
-}
-
-/*---------------------------------------------------------------------------
-
- Function: ft1000_write_dpram
- Description: This function will write to a specific area of dpram
- (Electrabuzz ASIC only)
- Input:
- dev - device structure
- offset - index of dpram
- value - value to write
- Output:
- none.
-
- -------------------------------------------------------------------------*/
-static inline void ft1000_write_dpram(struct net_device *dev,
- int offset, u16 value)
-{
- struct ft1000_info *info = netdev_priv(dev);
- unsigned long flags;
-
- /* Provide mutual exclusive access while reading ASIC registers. */
- spin_lock_irqsave(&info->dpram_lock, flags);
- ft1000_write_reg(dev, FT1000_REG_DPRAM_ADDR, offset);
- ft1000_write_reg(dev, FT1000_REG_DPRAM_DATA, value);
- spin_unlock_irqrestore(&info->dpram_lock, flags);
-}
-
-/*---------------------------------------------------------------------------
-
- Function: ft1000_read_dpram_mag_16
- Description: This function will read the specific area of dpram
- (Magnemite ASIC only)
- Input:
- dev - device structure
- offset - index of dpram
- Output:
- value - value of dpram
-
- -------------------------------------------------------------------------*/
-u16 ft1000_read_dpram_mag_16(struct net_device *dev, int offset, int Index)
-{
- struct ft1000_info *info = netdev_priv(dev);
- unsigned long flags;
- u16 data;
-
- /* Provide mutual exclusive access while reading ASIC registers. */
- spin_lock_irqsave(&info->dpram_lock, flags);
- ft1000_write_reg(dev, FT1000_REG_DPRAM_ADDR, offset);
- /* check if we want to read upper or lower 32-bit word */
- if (Index)
- data = ft1000_read_reg(dev, FT1000_REG_MAG_DPDATAL);
- else
- data = ft1000_read_reg(dev, FT1000_REG_MAG_DPDATAH);
-
- spin_unlock_irqrestore(&info->dpram_lock, flags);
-
- return data;
-}
-
-/*---------------------------------------------------------------------------
-
- Function: ft1000_write_dpram_mag_16
- Description: This function will write to a specific area of dpram
- (Magnemite ASIC only)
- Input:
- dev - device structure
- offset - index of dpram
- value - value to write
- Output:
- none.
-
- -------------------------------------------------------------------------*/
-static inline void ft1000_write_dpram_mag_16(struct net_device *dev,
- int offset, u16 value, int Index)
-{
- struct ft1000_info *info = netdev_priv(dev);
- unsigned long flags;
-
- /* Provide mutual exclusive access while reading ASIC registers. */
- spin_lock_irqsave(&info->dpram_lock, flags);
- ft1000_write_reg(dev, FT1000_REG_DPRAM_ADDR, offset);
- if (Index)
- ft1000_write_reg(dev, FT1000_REG_MAG_DPDATAL, value);
- else
- ft1000_write_reg(dev, FT1000_REG_MAG_DPDATAH, value);
-
- spin_unlock_irqrestore(&info->dpram_lock, flags);
-}
-
-/*---------------------------------------------------------------------------
-
- Function: ft1000_read_dpram_mag_32
- Description: This function will read the specific area of dpram
- (Magnemite ASIC only)
- Input:
- dev - device structure
- offset - index of dpram
- Output:
- value - value of dpram
-
- -------------------------------------------------------------------------*/
-u32 ft1000_read_dpram_mag_32(struct net_device *dev, int offset)
-{
- struct ft1000_info *info = netdev_priv(dev);
- unsigned long flags;
- u32 data;
-
- /* Provide mutual exclusive access while reading ASIC registers. */
- spin_lock_irqsave(&info->dpram_lock, flags);
- ft1000_write_reg(dev, FT1000_REG_DPRAM_ADDR, offset);
- data = inl(dev->base_addr + FT1000_REG_MAG_DPDATAL);
- spin_unlock_irqrestore(&info->dpram_lock, flags);
-
- return data;
-}
-
-/*---------------------------------------------------------------------------
-
- Function: ft1000_write_dpram_mag_32
- Description: This function will write to a specific area of dpram
- (Magnemite ASIC only)
- Input:
- dev - device structure
- offset - index of dpram
- value - value to write
- Output:
- none.
-
- -------------------------------------------------------------------------*/
-void ft1000_write_dpram_mag_32(struct net_device *dev, int offset, u32 value)
-{
- struct ft1000_info *info = netdev_priv(dev);
- unsigned long flags;
-
- /* Provide mutual exclusive access while reading ASIC registers. */
- spin_lock_irqsave(&info->dpram_lock, flags);
- ft1000_write_reg(dev, FT1000_REG_DPRAM_ADDR, offset);
- outl(value, dev->base_addr + FT1000_REG_MAG_DPDATAL);
- spin_unlock_irqrestore(&info->dpram_lock, flags);
-}
-
-/*---------------------------------------------------------------------------
-
- Function: ft1000_enable_interrupts
- Description: This function will enable interrupts base on the current
- interrupt mask.
- Input:
- dev - device structure
- Output:
- None.
-
- -------------------------------------------------------------------------*/
-static void ft1000_enable_interrupts(struct net_device *dev)
-{
- u16 tempword;
-
- ft1000_write_reg(dev, FT1000_REG_SUP_IMASK, ISR_DEFAULT_MASK);
- tempword = ft1000_read_reg(dev, FT1000_REG_SUP_IMASK);
- pr_debug("current interrupt enable mask = 0x%x\n", tempword);
-}
-
-/*---------------------------------------------------------------------------
-
- Function: ft1000_disable_interrupts
- Description: This function will disable all interrupts.
- Input:
- dev - device structure
- Output:
- None.
-
- -------------------------------------------------------------------------*/
-static void ft1000_disable_interrupts(struct net_device *dev)
-{
- u16 tempword;
-
- ft1000_write_reg(dev, FT1000_REG_SUP_IMASK, ISR_MASK_ALL);
- tempword = ft1000_read_reg(dev, FT1000_REG_SUP_IMASK);
- pr_debug("current interrupt enable mask = 0x%x\n", tempword);
-}
-
-/*---------------------------------------------------------------------------
- Function: ft1000_read_dsp_timer
- Description: This function reads the DSP timer and stores its value in the
- DSP_TIME field of the ft1000_info struct passed as argument
- Input:
- dev - device structure
- info - ft1000_info structure
- Output:
- None.
-
- -------------------------------------------------------------------------*/
-static void ft1000_read_dsp_timer(struct net_device *dev,
- struct ft1000_info *info)
-{
- if (info->AsicID == ELECTRABUZZ_ID) {
- info->DSP_TIME[0] = ft1000_read_dpram(dev, FT1000_DSP_TIMER0);
- info->DSP_TIME[1] = ft1000_read_dpram(dev, FT1000_DSP_TIMER1);
- info->DSP_TIME[2] = ft1000_read_dpram(dev, FT1000_DSP_TIMER2);
- info->DSP_TIME[3] = ft1000_read_dpram(dev, FT1000_DSP_TIMER3);
- } else {
- info->DSP_TIME[0] =
- ft1000_read_dpram_mag_16(dev, FT1000_MAG_DSP_TIMER0,
- FT1000_MAG_DSP_TIMER0_INDX);
- info->DSP_TIME[1] =
- ft1000_read_dpram_mag_16(dev, FT1000_MAG_DSP_TIMER1,
- FT1000_MAG_DSP_TIMER1_INDX);
- info->DSP_TIME[2] =
- ft1000_read_dpram_mag_16(dev, FT1000_MAG_DSP_TIMER2,
- FT1000_MAG_DSP_TIMER2_INDX);
- info->DSP_TIME[3] =
- ft1000_read_dpram_mag_16(dev, FT1000_MAG_DSP_TIMER3,
- FT1000_MAG_DSP_TIMER3_INDX);
- }
-}
-
-/*---------------------------------------------------------------------------
-
- Function: ft1000_reset_asic
- Description: This function will call the Card Service function to reset the
- ASIC.
- Input:
- dev - device structure
- Output:
- none
-
- -------------------------------------------------------------------------*/
-static void ft1000_reset_asic(struct net_device *dev)
-{
- struct ft1000_info *info = netdev_priv(dev);
- struct ft1000_pcmcia *pcmcia = info->priv;
- u16 tempword;
-
- (*info->ft1000_reset) (pcmcia->link);
-
- /*
- * Let's use the register provided by the Magnemite ASIC to reset the
- * ASIC and DSP.
- */
- if (info->AsicID == MAGNEMITE_ID) {
- ft1000_write_reg(dev, FT1000_REG_RESET,
- DSP_RESET_BIT | ASIC_RESET_BIT);
- }
- mdelay(1);
- if (info->AsicID == ELECTRABUZZ_ID) {
- /* set watermark to -1 in order to not generate an interrupt */
- ft1000_write_reg(dev, FT1000_REG_WATERMARK, 0xffff);
- } else {
- /* set watermark to -1 in order to not generate an interrupt */
- ft1000_write_reg(dev, FT1000_REG_MAG_WATERMARK, 0xffff);
- }
- /* clear interrupts */
- tempword = ft1000_read_reg(dev, FT1000_REG_SUP_ISR);
- pr_debug("interrupt status register = 0x%x\n", tempword);
- ft1000_write_reg(dev, FT1000_REG_SUP_ISR, tempword);
- tempword = ft1000_read_reg(dev, FT1000_REG_SUP_ISR);
- pr_debug("interrupt status register = 0x%x\n", tempword);
-
-}
-
-/*---------------------------------------------------------------------------
-
- Function: ft1000_reset_card
- Description: This function will reset the card
- Input:
- dev - device structure
- Output:
- status - false (card reset fail)
- true (card reset successful)
-
- -------------------------------------------------------------------------*/
-static int ft1000_reset_card(struct net_device *dev)
-{
- struct ft1000_info *info = netdev_priv(dev);
- u16 tempword;
- int i;
- unsigned long flags;
- struct prov_record *ptr;
- struct prov_record *tmp;
-
- info->CardReady = 0;
- info->ProgConStat = 0;
- info->squeseqnum = 0;
- ft1000_disable_interrupts(dev);
-
- /* del_timer(&poll_timer); */
-
- /* Make sure we free any memory reserve for provisioning */
- list_for_each_entry_safe(ptr, tmp, &info->prov_list, list) {
- pr_debug("deleting provisioning record\n");
- list_del(&ptr->list);
- kfree(ptr->pprov_data);
- kfree(ptr);
- }
-
- if (info->AsicID == ELECTRABUZZ_ID) {
- pr_debug("resetting DSP\n");
- ft1000_write_reg(dev, FT1000_REG_RESET, DSP_RESET_BIT);
- } else {
- pr_debug("resetting ASIC and DSP\n");
- ft1000_write_reg(dev, FT1000_REG_RESET,
- DSP_RESET_BIT | ASIC_RESET_BIT);
- }
-
- /* Copy DSP session record into info block if this is not a coldstart */
- if (ft1000_card_present == 1) {
- spin_lock_irqsave(&info->dpram_lock, flags);
- if (info->AsicID == ELECTRABUZZ_ID) {
- ft1000_write_reg(dev, FT1000_REG_DPRAM_ADDR,
- FT1000_DPRAM_RX_BASE);
- for (i = 0; i < MAX_DSP_SESS_REC; i++) {
- info->DSPSess.Rec[i] =
- ft1000_read_reg(dev,
- FT1000_REG_DPRAM_DATA);
- }
- } else {
- ft1000_write_reg(dev, FT1000_REG_DPRAM_ADDR,
- FT1000_DPRAM_MAG_RX_BASE);
- for (i = 0; i < MAX_DSP_SESS_REC / 2; i++) {
- info->DSPSess.MagRec[i] =
- inl(dev->base_addr
- + FT1000_REG_MAG_DPDATA);
- }
- }
- spin_unlock_irqrestore(&info->dpram_lock, flags);
- }
-
- pr_debug("resetting ASIC\n");
- mdelay(10);
- /* reset ASIC */
- ft1000_reset_asic(dev);
-
- pr_debug("downloading dsp image\n");
-
- if (info->AsicID == MAGNEMITE_ID) {
- /* Put dsp in reset and take ASIC out of reset */
- pr_debug("Put DSP in reset and take ASIC out of reset\n");
- ft1000_write_reg(dev, FT1000_REG_RESET, DSP_RESET_BIT);
-
- /* Setting MAGNEMITE ASIC to big endian mode */
- ft1000_write_reg(dev, FT1000_REG_SUP_CTRL, HOST_INTF_BE);
- /* Download bootloader */
- card_bootload(dev);
-
- /* Take DSP out of reset */
- ft1000_write_reg(dev, FT1000_REG_RESET, 0);
- /* FLARION_DSP_ACTIVE; */
- mdelay(10);
- pr_debug("Take DSP out of reset\n");
-
- /*
- * Wait for 0xfefe indicating dsp ready before starting
- * download
- */
- for (i = 0; i < 50; i++) {
- tempword = ft1000_read_dpram_mag_16(dev,
- FT1000_MAG_DPRAM_FEFE,
- FT1000_MAG_DPRAM_FEFE_INDX);
- if (tempword == 0xfefe)
- break;
- mdelay(20);
- }
-
- if (i == 50) {
- pr_debug("No FEFE detected from DSP\n");
- return false;
- }
-
- } else {
- /* Take DSP out of reset */
- ft1000_write_reg(dev, FT1000_REG_RESET, ~DSP_RESET_BIT);
- mdelay(10);
- }
-
- if (card_download(dev, fw_entry->data, fw_entry->size)) {
- pr_debug("card download unsuccessful\n");
- return false;
- }
- pr_debug("card download successful\n");
-
- mdelay(10);
-
- if (info->AsicID == ELECTRABUZZ_ID) {
- /*
- * Need to initialize the FIFO length counter to zero in order
- * to sync up with the DSP
- */
- info->fifo_cnt = 0;
- ft1000_write_dpram(dev, FT1000_FIFO_LEN, info->fifo_cnt);
- /* Initialize DSP heartbeat area to ho */
- ft1000_write_dpram(dev, FT1000_HI_HO, ho);
- tempword = ft1000_read_dpram(dev, FT1000_HI_HO);
- pr_debug("hi_ho value = 0x%x\n", tempword);
- } else {
- /* Initialize DSP heartbeat area to ho */
- ft1000_write_dpram_mag_16(dev, FT1000_MAG_HI_HO, ho_mag,
- FT1000_MAG_HI_HO_INDX);
- tempword =
- ft1000_read_dpram_mag_16(dev, FT1000_MAG_HI_HO,
- FT1000_MAG_HI_HO_INDX);
- pr_debug("hi_ho value = 0x%x\n", tempword);
- }
-
- info->CardReady = 1;
- ft1000_enable_interrupts(dev);
-
- /* Schedule heartbeat process to run every 2 seconds */
- /* poll_timer.expires = jiffies + (2*HZ); */
- /* poll_timer.data = (u_long)dev; */
- /* add_timer(&poll_timer); */
-
- return true;
-
-}
-
-/*---------------------------------------------------------------------------
-
- Function: ft1000_chkcard
- Description: This function will check if the device is presently available on
- the system.
- Input:
- dev - device structure
- Output:
- status - false (device is not present)
- true (device is present)
-
- -------------------------------------------------------------------------*/
-static int ft1000_chkcard(struct net_device *dev)
-{
- u16 tempword;
-
- /*
- * Mask register is used to check for device presence since it is never
- * set to zero.
- */
- tempword = ft1000_read_reg(dev, FT1000_REG_SUP_IMASK);
- if (tempword == 0) {
- pr_debug("IMASK = 0 Card not detected\n");
- return false;
- }
- /*
- * The system will return the value of 0xffff for the version register
- * if the device is not present.
- */
- tempword = ft1000_read_reg(dev, FT1000_REG_ASIC_ID);
- if (tempword == 0xffff) {
- pr_debug("Version = 0xffff Card not detected\n");
- return false;
- }
- return true;
-}
-
-
-/*---------------------------------------------------------------------------
-
- Function: ft1000_hbchk
- Description: This function will perform the heart beat check of the DSP as
- well as the ASIC.
- Input:
- dev - device structure
- Output:
- none
-
- -------------------------------------------------------------------------*/
-static void ft1000_hbchk(u_long data)
-{
- struct net_device *dev = (struct net_device *)data;
-
- struct ft1000_info *info;
- u16 tempword;
-
- info = netdev_priv(dev);
-
- if (info->CardReady == 1) {
- /* Perform dsp heartbeat check */
- if (info->AsicID == ELECTRABUZZ_ID) {
- tempword = ft1000_read_dpram(dev, FT1000_HI_HO);
- } else {
- tempword =
- ntohs(ft1000_read_dpram_mag_16
- (dev, FT1000_MAG_HI_HO,
- FT1000_MAG_HI_HO_INDX));
- }
- pr_debug("hi_ho value = 0x%x\n", tempword);
- /* Let's perform another check if ho is not detected */
- if (tempword != ho) {
- if (info->AsicID == ELECTRABUZZ_ID)
- tempword = ft1000_read_dpram(dev, FT1000_HI_HO);
- else
- tempword = ntohs(ft1000_read_dpram_mag_16(dev,
- FT1000_MAG_HI_HO,
- FT1000_MAG_HI_HO_INDX));
- }
- if (tempword != ho) {
- pr_info("heartbeat failed - no ho detected\n");
- ft1000_read_dsp_timer(dev, info);
- info->DrvErrNum = DSP_HB_INFO;
- if (ft1000_reset_card(dev) == 0) {
- pr_info("Hardware Failure Detected - PC Card disabled\n");
- info->ProgConStat = 0xff;
- return;
- }
- /* Schedule this module to run every 2 seconds */
- poll_timer.expires = jiffies + (2*HZ);
- poll_timer.data = (u_long)dev;
- add_timer(&poll_timer);
- return;
- }
-
- tempword = ft1000_read_reg(dev, FT1000_REG_DOORBELL);
- /* Let's check doorbell again if fail */
- if (tempword & FT1000_DB_HB)
- tempword = ft1000_read_reg(dev, FT1000_REG_DOORBELL);
-
- if (tempword & FT1000_DB_HB) {
- pr_info("heartbeat doorbell not clear by firmware\n");
- ft1000_read_dsp_timer(dev, info);
- info->DrvErrNum = DSP_HB_INFO;
- if (ft1000_reset_card(dev) == 0) {
- pr_info("Hardware Failure Detected - PC Card disabled\n");
- info->ProgConStat = 0xff;
- return;
- }
- /* Schedule this module to run every 2 seconds */
- poll_timer.expires = jiffies + (2*HZ);
- poll_timer.data = (u_long)dev;
- add_timer(&poll_timer);
- return;
- }
- /*
- * Set dedicated area to hi and ring appropriate doorbell
- * according to hi/ho heartbeat protocol
- */
- if (info->AsicID == ELECTRABUZZ_ID) {
- ft1000_write_dpram(dev, FT1000_HI_HO, hi);
- } else {
- ft1000_write_dpram_mag_16(dev, FT1000_MAG_HI_HO, hi_mag,
- FT1000_MAG_HI_HO_INDX);
- }
-
- if (info->AsicID == ELECTRABUZZ_ID) {
- tempword = ft1000_read_dpram(dev, FT1000_HI_HO);
- } else {
- tempword =
- ntohs(ft1000_read_dpram_mag_16
- (dev, FT1000_MAG_HI_HO,
- FT1000_MAG_HI_HO_INDX));
- }
- /* Let's write hi again if fail */
- if (tempword != hi) {
- if (info->AsicID == ELECTRABUZZ_ID)
- ft1000_write_dpram(dev, FT1000_HI_HO, hi);
- else
- ft1000_write_dpram_mag_16(dev, FT1000_MAG_HI_HO,
- hi_mag, FT1000_MAG_HI_HO_INDX);
-
- if (info->AsicID == ELECTRABUZZ_ID)
- tempword = ft1000_read_dpram(dev, FT1000_HI_HO);
- else
- tempword = ntohs(ft1000_read_dpram_mag_16(dev,
- FT1000_MAG_HI_HO,
- FT1000_MAG_HI_HO_INDX));
- }
-
- if (tempword != hi) {
- pr_info("heartbeat failed - cannot write hi into DPRAM\n");
- ft1000_read_dsp_timer(dev, info);
- info->DrvErrNum = DSP_HB_INFO;
- if (ft1000_reset_card(dev) == 0) {
- pr_info("Hardware Failure Detected - PC Card disabled\n");
- info->ProgConStat = 0xff;
- return;
- }
- /* Schedule this module to run every 2 seconds */
- poll_timer.expires = jiffies + (2*HZ);
- poll_timer.data = (u_long)dev;
- add_timer(&poll_timer);
- return;
- }
- ft1000_write_reg(dev, FT1000_REG_DOORBELL, FT1000_DB_HB);
-
- }
-
- /* Schedule this module to run every 2 seconds */
- poll_timer.expires = jiffies + (2 * HZ);
- poll_timer.data = (u_long)dev;
- add_timer(&poll_timer);
-}
-
-/*---------------------------------------------------------------------------
-
- Function: ft1000_send_cmd
- Description:
- Input:
- Output:
-
- -------------------------------------------------------------------------*/
-static void ft1000_send_cmd(struct net_device *dev, u16 *ptempbuffer, int size,
- u16 qtype)
-{
- struct ft1000_info *info = netdev_priv(dev);
- int i;
- u16 tempword;
- unsigned long flags;
-
- size += sizeof(struct pseudo_hdr);
- /* check for odd byte and increment to 16-bit word align value */
- if ((size & 0x0001))
- size++;
- pr_debug("total length = %d\n", size);
- pr_debug("length = %d\n", ntohs(*ptempbuffer));
- /*
- * put message into slow queue area
- * All messages are in the form total_len + pseudo header + message body
- */
- spin_lock_irqsave(&info->dpram_lock, flags);
-
- /* Make sure SLOWQ doorbell is clear */
- tempword = ft1000_read_reg(dev, FT1000_REG_DOORBELL);
- i = 0;
- while (tempword & FT1000_DB_DPRAM_TX) {
- mdelay(10);
- i++;
- if (i == 10) {
- spin_unlock_irqrestore(&info->dpram_lock, flags);
- return;
- }
- tempword = ft1000_read_reg(dev, FT1000_REG_DOORBELL);
- }
-
- if (info->AsicID == ELECTRABUZZ_ID) {
- ft1000_write_reg(dev, FT1000_REG_DPRAM_ADDR,
- FT1000_DPRAM_TX_BASE);
- /* Write total length to dpram */
- ft1000_write_reg(dev, FT1000_REG_DPRAM_DATA, size);
- /* Write pseudo header and messgae body */
- for (i = 0; i < (size >> 1); i++) {
- pr_debug("data %d = 0x%x\n", i, *ptempbuffer);
- tempword = htons(*ptempbuffer++);
- ft1000_write_reg(dev, FT1000_REG_DPRAM_DATA, tempword);
- }
- } else {
- ft1000_write_reg(dev, FT1000_REG_DPRAM_ADDR,
- FT1000_DPRAM_MAG_TX_BASE);
- /* Write total length to dpram */
- ft1000_write_reg(dev, FT1000_REG_MAG_DPDATAH, htons(size));
- /* Write pseudo header and messgae body */
- ft1000_write_reg(dev, FT1000_REG_DPRAM_ADDR,
- FT1000_DPRAM_MAG_TX_BASE + 1);
- for (i = 0; i < (size >> 2); i++) {
- pr_debug("data = 0x%x\n", *ptempbuffer);
- outw(*ptempbuffer++,
- dev->base_addr + FT1000_REG_MAG_DPDATAL);
- pr_debug("data = 0x%x\n", *ptempbuffer);
- outw(*ptempbuffer++,
- dev->base_addr + FT1000_REG_MAG_DPDATAH);
- }
- pr_debug("data = 0x%x\n", *ptempbuffer);
- outw(*ptempbuffer++, dev->base_addr + FT1000_REG_MAG_DPDATAL);
- pr_debug("data = 0x%x\n", *ptempbuffer);
- outw(*ptempbuffer++, dev->base_addr + FT1000_REG_MAG_DPDATAH);
- }
- spin_unlock_irqrestore(&info->dpram_lock, flags);
-
- /* ring doorbell to notify DSP that we have a message ready */
- ft1000_write_reg(dev, FT1000_REG_DOORBELL, FT1000_DB_DPRAM_TX);
-}
-
-/*---------------------------------------------------------------------------
-
- Function: ft1000_receive_cmd
- Description: This function will read a message from the dpram area.
- Input:
- dev - network device structure
- pbuffer - caller supply address to buffer
- pnxtph - pointer to next pseudo header
- Output:
- Status = 0 (unsuccessful)
- = 1 (successful)
-
- -------------------------------------------------------------------------*/
-static bool ft1000_receive_cmd(struct net_device *dev, u16 *pbuffer,
- int maxsz, u16 *pnxtph)
-{
- struct ft1000_info *info = netdev_priv(dev);
- u16 size;
- u16 *ppseudohdr;
- int i;
- u16 tempword;
- unsigned long flags;
-
- if (info->AsicID == ELECTRABUZZ_ID) {
- size = ft1000_read_dpram(dev, *pnxtph)
- + sizeof(struct pseudo_hdr);
- } else {
- size = ntohs(ft1000_read_dpram_mag_16(dev, FT1000_MAG_PH_LEN,
- FT1000_MAG_PH_LEN_INDX))
- + sizeof(struct pseudo_hdr);
- }
- if (size > maxsz) {
- pr_debug("Invalid command length = %d\n", size);
- return false;
- }
- ppseudohdr = (u16 *)pbuffer;
- spin_lock_irqsave(&info->dpram_lock, flags);
- if (info->AsicID == ELECTRABUZZ_ID) {
- ft1000_write_reg(dev, FT1000_REG_DPRAM_ADDR,
- FT1000_DPRAM_RX_BASE + 2);
- for (i = 0; i <= (size >> 1); i++) {
- tempword =
- ft1000_read_reg(dev, FT1000_REG_DPRAM_DATA);
- *pbuffer++ = ntohs(tempword);
- }
- } else {
- ft1000_write_reg(dev, FT1000_REG_DPRAM_ADDR,
- FT1000_DPRAM_MAG_RX_BASE);
- *pbuffer = inw(dev->base_addr + FT1000_REG_MAG_DPDATAH);
- pr_debug("received data = 0x%x\n", *pbuffer);
- pbuffer++;
- ft1000_write_reg(dev, FT1000_REG_DPRAM_ADDR,
- FT1000_DPRAM_MAG_RX_BASE + 1);
- for (i = 0; i <= (size >> 2); i++) {
- *pbuffer =
- inw(dev->base_addr +
- FT1000_REG_MAG_DPDATAL);
- pbuffer++;
- *pbuffer =
- inw(dev->base_addr +
- FT1000_REG_MAG_DPDATAH);
- pbuffer++;
- }
- /* copy odd aligned word */
- *pbuffer = inw(dev->base_addr + FT1000_REG_MAG_DPDATAL);
- pr_debug("received data = 0x%x\n", *pbuffer);
- pbuffer++;
- *pbuffer = inw(dev->base_addr + FT1000_REG_MAG_DPDATAH);
- pr_debug("received data = 0x%x\n", *pbuffer);
- pbuffer++;
- }
- if (size & 0x0001) {
- /* copy odd byte from fifo */
- tempword = ft1000_read_reg(dev, FT1000_REG_DPRAM_DATA);
- *pbuffer = ntohs(tempword);
- }
- spin_unlock_irqrestore(&info->dpram_lock, flags);
-
- /*
- * Check if pseudo header checksum is good
- * Calculate pseudo header checksum
- */
- tempword = *ppseudohdr++;
- for (i = 1; i < 7; i++)
- tempword ^= *ppseudohdr++;
- if (tempword != *ppseudohdr) {
- pr_debug("Pseudo header checksum mismatch\n");
- /* Drop this message */
- return false;
- }
- return true;
-}
-
-/*---------------------------------------------------------------------------
-
- Function: ft1000_proc_drvmsg
- Description: This function will process the various driver messages.
- Input:
- dev - device structure
- pnxtph - pointer to next pseudo header
- Output:
- none
-
- -------------------------------------------------------------------------*/
-static void ft1000_proc_drvmsg(struct net_device *dev)
-{
- struct ft1000_info *info = netdev_priv(dev);
- u16 msgtype;
- u16 tempword;
- struct media_msg *pmediamsg;
- struct dsp_init_msg *pdspinitmsg;
- struct drv_msg *pdrvmsg;
- u16 len;
- u16 i;
- struct prov_record *ptr;
- struct pseudo_hdr *ppseudo_hdr;
- u16 *pmsg;
- struct timeval tv;
- union {
- u8 byte[2];
- u16 wrd;
- } convert;
-
- if (info->AsicID == ELECTRABUZZ_ID)
- tempword = FT1000_DPRAM_RX_BASE+2;
- else
- tempword = FT1000_DPRAM_MAG_RX_BASE;
-
- if (ft1000_receive_cmd(dev, &cmdbuffer[0], MAX_CMD_SQSIZE, &tempword)) {
-
- /*
- * Get the message type which is total_len + PSEUDO header
- * + msgtype + message body
- */
- pdrvmsg = (struct drv_msg *)&cmdbuffer[0];
- msgtype = ntohs(pdrvmsg->type);
- pr_debug("Command message type = 0x%x\n", msgtype);
- switch (msgtype) {
- case DSP_PROVISION:
- pr_debug("Got a provisioning request message from DSP\n");
- mdelay(25);
- while (list_empty(&info->prov_list) == 0) {
- pr_debug("Sending a provisioning message\n");
- /* Make sure SLOWQ doorbell is clear */
- tempword = ft1000_read_reg(dev,
- FT1000_REG_DOORBELL);
- i = 0;
- while (tempword & FT1000_DB_DPRAM_TX) {
- mdelay(5);
- i++;
- if (i == 10)
- break;
- }
- ptr = list_entry(info->prov_list.next,
- struct prov_record, list);
- len = *(u16 *)ptr->pprov_data;
- len = htons(len);
-
- pmsg = (u16 *)ptr->pprov_data;
- ppseudo_hdr = (struct pseudo_hdr *)pmsg;
- /* Insert slow queue sequence number */
- ppseudo_hdr->seq_num = info->squeseqnum++;
- ppseudo_hdr->portsrc = 0;
- /* Calculate new checksum */
- ppseudo_hdr->checksum = *pmsg++;
- pr_debug("checksum = 0x%x\n",
- ppseudo_hdr->checksum);
- for (i = 1; i < 7; i++) {
- ppseudo_hdr->checksum ^= *pmsg++;
- pr_debug("checksum = 0x%x\n",
- ppseudo_hdr->checksum);
- }
-
- ft1000_send_cmd(dev, (u16 *)ptr->pprov_data,
- len, SLOWQ_TYPE);
- list_del(&ptr->list);
- kfree(ptr->pprov_data);
- kfree(ptr);
- }
- /*
- * Indicate adapter is ready to take application
- * messages after all provisioning messages are sent
- */
- info->CardReady = 1;
- break;
- case MEDIA_STATE:
- pmediamsg = (struct media_msg *)&cmdbuffer[0];
- if (info->ProgConStat != 0xFF) {
- if (pmediamsg->state) {
- pr_debug("Media is up\n");
- if (info->mediastate == 0) {
- netif_carrier_on(dev);
- netif_wake_queue(dev);
- info->mediastate = 1;
- do_gettimeofday(&tv);
- info->ConTm = tv.tv_sec;
- }
- } else {
- pr_debug("Media is down\n");
- if (info->mediastate == 1) {
- info->mediastate = 0;
- netif_carrier_off(dev);
- netif_stop_queue(dev);
- info->ConTm = 0;
- }
- }
- } else {
- pr_debug("Media is down\n");
- if (info->mediastate == 1) {
- info->mediastate = 0;
- netif_carrier_off(dev);
- netif_stop_queue(dev);
- info->ConTm = 0;
- }
- }
- break;
- case DSP_INIT_MSG:
- pdspinitmsg = (struct dsp_init_msg *)&cmdbuffer[0];
- memcpy(info->DspVer, pdspinitmsg->DspVer, DSPVERSZ);
- pr_debug("DSPVER = 0x%2x 0x%2x 0x%2x 0x%2x\n",
- info->DspVer[0], info->DspVer[1],
- info->DspVer[2], info->DspVer[3]);
- memcpy(info->HwSerNum, pdspinitmsg->HwSerNum,
- HWSERNUMSZ);
- memcpy(info->Sku, pdspinitmsg->Sku, SKUSZ);
- memcpy(info->eui64, pdspinitmsg->eui64, EUISZ);
- dev->dev_addr[0] = info->eui64[0];
- dev->dev_addr[1] = info->eui64[1];
- dev->dev_addr[2] = info->eui64[2];
- dev->dev_addr[3] = info->eui64[5];
- dev->dev_addr[4] = info->eui64[6];
- dev->dev_addr[5] = info->eui64[7];
-
- if (ntohs(pdspinitmsg->length) ==
- (sizeof(struct dsp_init_msg) - 20)) {
- memcpy(info->ProductMode,
- pdspinitmsg->ProductMode, MODESZ);
- memcpy(info->RfCalVer, pdspinitmsg->RfCalVer,
- CALVERSZ);
- memcpy(info->RfCalDate, pdspinitmsg->RfCalDate,
- CALDATESZ);
- pr_debug("RFCalVer = 0x%2x 0x%2x\n",
- info->RfCalVer[0], info->RfCalVer[1]);
- }
-
- break;
- case DSP_STORE_INFO:
- pr_debug("Got DSP_STORE_INFO\n");
- tempword = ntohs(pdrvmsg->length);
- info->DSPInfoBlklen = tempword;
- if (tempword < (MAX_DSP_SESS_REC - 4)) {
- pmsg = (u16 *)&pdrvmsg->data[0];
- for (i = 0; i < ((tempword + 1) / 2); i++) {
- pr_debug("dsp info data = 0x%x\n",
- *pmsg);
- info->DSPInfoBlk[i + 10] = *pmsg++;
- }
- }
- break;
- case DSP_GET_INFO:
- pr_debug("Got DSP_GET_INFO\n");
- /*
- * copy dsp info block to dsp
- * allow any outstanding ioctl to finish
- */
- mdelay(10);
- tempword = ft1000_read_reg(dev, FT1000_REG_DOORBELL);
- if (tempword & FT1000_DB_DPRAM_TX) {
- mdelay(10);
- tempword = ft1000_read_reg(dev,
- FT1000_REG_DOORBELL);
- if (tempword & FT1000_DB_DPRAM_TX)
- mdelay(10);
- }
-
- if ((tempword & FT1000_DB_DPRAM_TX) == 0) {
- /*
- * Put message into Slow Queue
- * Form Pseudo header
- */
- pmsg = (u16 *)info->DSPInfoBlk;
- ppseudo_hdr = (struct pseudo_hdr *)pmsg;
- ppseudo_hdr->length =
- htons(info->DSPInfoBlklen + 4);
- ppseudo_hdr->source = 0x10;
- ppseudo_hdr->destination = 0x20;
- ppseudo_hdr->portdest = 0;
- ppseudo_hdr->portsrc = 0;
- ppseudo_hdr->sh_str_id = 0;
- ppseudo_hdr->control = 0;
- ppseudo_hdr->rsvd1 = 0;
- ppseudo_hdr->rsvd2 = 0;
- ppseudo_hdr->qos_class = 0;
- /* Insert slow queue sequence number */
- ppseudo_hdr->seq_num = info->squeseqnum++;
- /* Insert application id */
- ppseudo_hdr->portsrc = 0;
- /* Calculate new checksum */
- ppseudo_hdr->checksum = *pmsg++;
- for (i = 1; i < 7; i++)
- ppseudo_hdr->checksum ^= *pmsg++;
-
- info->DSPInfoBlk[8] = 0x7200;
- info->DSPInfoBlk[9] =
- htons(info->DSPInfoBlklen);
- ft1000_send_cmd(dev, info->DSPInfoBlk,
- (u16)(info->DSPInfoBlklen+4),
- 0);
- }
-
- break;
- case GET_DRV_ERR_RPT_MSG:
- pr_debug("Got GET_DRV_ERR_RPT_MSG\n");
- /*
- * copy driver error message to dsp
- * allow any outstanding ioctl to finish
- */
- mdelay(10);
- tempword = ft1000_read_reg(dev, FT1000_REG_DOORBELL);
- if (tempword & FT1000_DB_DPRAM_TX) {
- mdelay(10);
- tempword = ft1000_read_reg(dev,
- FT1000_REG_DOORBELL);
- if (tempword & FT1000_DB_DPRAM_TX)
- mdelay(10);
- }
-
- if ((tempword & FT1000_DB_DPRAM_TX) == 0) {
- /*
- * Put message into Slow Queue
- * Form Pseudo header
- */
- pmsg = (u16 *)&tempbuffer[0];
- ppseudo_hdr = (struct pseudo_hdr *)pmsg;
- ppseudo_hdr->length = htons(0x0012);
- ppseudo_hdr->source = 0x10;
- ppseudo_hdr->destination = 0x20;
- ppseudo_hdr->portdest = 0;
- ppseudo_hdr->portsrc = 0;
- ppseudo_hdr->sh_str_id = 0;
- ppseudo_hdr->control = 0;
- ppseudo_hdr->rsvd1 = 0;
- ppseudo_hdr->rsvd2 = 0;
- ppseudo_hdr->qos_class = 0;
- /* Insert slow queue sequence number */
- ppseudo_hdr->seq_num = info->squeseqnum++;
- /* Insert application id */
- ppseudo_hdr->portsrc = 0;
- /* Calculate new checksum */
- ppseudo_hdr->checksum = *pmsg++;
- for (i = 1; i < 7; i++)
- ppseudo_hdr->checksum ^= *pmsg++;
-
- pmsg = (u16 *)&tempbuffer[16];
- *pmsg++ = htons(RSP_DRV_ERR_RPT_MSG);
- *pmsg++ = htons(0x000e);
- *pmsg++ = htons(info->DSP_TIME[0]);
- *pmsg++ = htons(info->DSP_TIME[1]);
- *pmsg++ = htons(info->DSP_TIME[2]);
- *pmsg++ = htons(info->DSP_TIME[3]);
- convert.byte[0] = info->DspVer[0];
- convert.byte[1] = info->DspVer[1];
- *pmsg++ = convert.wrd;
- convert.byte[0] = info->DspVer[2];
- convert.byte[1] = info->DspVer[3];
- *pmsg++ = convert.wrd;
- *pmsg++ = htons(info->DrvErrNum);
-
- ft1000_send_cmd(dev, (u16 *)&tempbuffer[0],
- (u16)(0x0012), 0);
- info->DrvErrNum = 0;
- }
-
- break;
- default:
- break;
- }
- }
-}
-
-/*---------------------------------------------------------------------------
-
- Function: ft1000_parse_dpram_msg
- Description: This function will parse the message received from the DSP
- via the DPRAM interface.
- Input:
- dev - device structure
- Output:
- status - FAILURE
- SUCCESS
-
- -------------------------------------------------------------------------*/
-static int ft1000_parse_dpram_msg(struct net_device *dev)
-{
- struct ft1000_info *info = netdev_priv(dev);
- u16 doorbell;
- u16 portid;
- u16 nxtph;
- u16 total_len;
- int i = 0;
- unsigned long flags;
-
- doorbell = ft1000_read_reg(dev, FT1000_REG_DOORBELL);
- pr_debug("Doorbell = 0x%x\n", doorbell);
-
- if (doorbell & FT1000_ASIC_RESET_REQ) {
- /* Copy DSP session record from info block */
- spin_lock_irqsave(&info->dpram_lock, flags);
- if (info->AsicID == ELECTRABUZZ_ID) {
- ft1000_write_reg(dev, FT1000_REG_DPRAM_ADDR,
- FT1000_DPRAM_RX_BASE);
- for (i = 0; i < MAX_DSP_SESS_REC; i++) {
- ft1000_write_reg(dev, FT1000_REG_DPRAM_DATA,
- info->DSPSess.Rec[i]);
- }
- } else {
- ft1000_write_reg(dev, FT1000_REG_DPRAM_ADDR,
- FT1000_DPRAM_MAG_RX_BASE);
- for (i = 0; i < MAX_DSP_SESS_REC / 2; i++) {
- outl(info->DSPSess.MagRec[i],
- dev->base_addr + FT1000_REG_MAG_DPDATA);
- }
- }
- spin_unlock_irqrestore(&info->dpram_lock, flags);
-
- /* clear ASIC RESET request */
- ft1000_write_reg(dev, FT1000_REG_DOORBELL,
- FT1000_ASIC_RESET_REQ);
- pr_debug("Got an ASIC RESET Request\n");
- ft1000_write_reg(dev, FT1000_REG_DOORBELL,
- FT1000_ASIC_RESET_DSP);
-
- if (info->AsicID == MAGNEMITE_ID) {
- /* Setting MAGNEMITE ASIC to big endian mode */
- ft1000_write_reg(dev, FT1000_REG_SUP_CTRL,
- HOST_INTF_BE);
- }
- }
-
- if (doorbell & FT1000_DSP_ASIC_RESET) {
- pr_debug("Got a dsp ASIC reset message\n");
- ft1000_write_reg(dev, FT1000_REG_DOORBELL,
- FT1000_DSP_ASIC_RESET);
- udelay(200);
- return SUCCESS;
- }
-
- if (doorbell & FT1000_DB_DPRAM_RX) {
- pr_debug("Got a slow queue message\n");
- nxtph = FT1000_DPRAM_RX_BASE + 2;
- if (info->AsicID == ELECTRABUZZ_ID) {
- total_len =
- ft1000_read_dpram(dev, FT1000_DPRAM_RX_BASE);
- } else {
- total_len =
- ntohs(ft1000_read_dpram_mag_16
- (dev, FT1000_MAG_TOTAL_LEN,
- FT1000_MAG_TOTAL_LEN_INDX));
- }
- pr_debug("total length = %d\n", total_len);
- if ((total_len < MAX_CMD_SQSIZE)
- && (total_len > sizeof(struct pseudo_hdr))) {
- total_len += nxtph;
- /*
- * ft1000_read_reg will return a value that needs to be
- * byteswap in order to get DSP_QID_OFFSET.
- */
- if (info->AsicID == ELECTRABUZZ_ID) {
- portid = (ft1000_read_dpram(dev, DSP_QID_OFFSET
- + FT1000_DPRAM_RX_BASE + 2)
- >> 8) & 0xff;
- } else {
- portid =
- ft1000_read_dpram_mag_16
- (dev, FT1000_MAG_PORT_ID,
- FT1000_MAG_PORT_ID_INDX) & 0xff;
- }
- pr_debug("DSP_QID = 0x%x\n", portid);
-
- if (portid == DRIVERID) {
- /*
- * We are assumming one driver message from the
- * DSP at a time.
- */
- ft1000_proc_drvmsg(dev);
- }
- }
- ft1000_write_reg(dev, FT1000_REG_DOORBELL, FT1000_DB_DPRAM_RX);
- }
-
- if (doorbell & FT1000_DB_COND_RESET) {
- /* Reset ASIC and DSP */
- ft1000_read_dsp_timer(dev, info);
- info->DrvErrNum = DSP_CONDRESET_INFO;
- pr_debug("DSP conditional reset requested\n");
- ft1000_reset_card(dev);
- ft1000_write_reg(dev, FT1000_REG_DOORBELL,
- FT1000_DB_COND_RESET);
- }
- /* let's clear any unexpected doorbells from DSP */
- doorbell =
- doorbell & ~(FT1000_DB_DPRAM_RX | FT1000_ASIC_RESET_REQ |
- FT1000_DB_COND_RESET | 0xff00);
- if (doorbell) {
- pr_debug("Clearing unexpected doorbell = 0x%x\n", doorbell);
- ft1000_write_reg(dev, FT1000_REG_DOORBELL, doorbell);
- }
-
- return SUCCESS;
-
-}
-
-/*---------------------------------------------------------------------------
-
- Function: ft1000_flush_fifo
- Description: This function will flush one packet from the downlink
- FIFO.
- Input:
- dev - device structure
- drv_err - driver error causing the flush fifo
- Output:
- None.
-
- -------------------------------------------------------------------------*/
-static void ft1000_flush_fifo(struct net_device *dev, u16 DrvErrNum)
-{
- struct ft1000_info *info = netdev_priv(dev);
- struct ft1000_pcmcia *pcmcia = info->priv;
- u16 i;
- u32 templong;
- u16 tempword;
-
- if (pcmcia->PktIntfErr > MAX_PH_ERR) {
- ft1000_read_dsp_timer(dev, info);
- info->DrvErrNum = DrvErrNum;
- ft1000_reset_card(dev);
- return;
- }
- /* Flush corrupted pkt from FIFO */
- i = 0;
- do {
- if (info->AsicID == ELECTRABUZZ_ID) {
- tempword =
- ft1000_read_reg(dev, FT1000_REG_DFIFO);
- tempword =
- ft1000_read_reg(dev, FT1000_REG_DFIFO_STAT);
- } else {
- templong =
- inl(dev->base_addr + FT1000_REG_MAG_DFR);
- tempword =
- inw(dev->base_addr + FT1000_REG_MAG_DFSR);
- }
- i++;
- /*
- * This should never happen unless the ASIC is broken.
- * We must reset to recover.
- */
- if ((i > 2048) || (tempword == 0)) {
- ft1000_read_dsp_timer(dev, info);
- if (tempword == 0) {
- /*
- * Let's check if ASIC reads are still ok by
- * reading the Mask register which is never zero
- * at this point of the code.
- */
- tempword =
- inw(dev->base_addr +
- FT1000_REG_SUP_IMASK);
- if (tempword == 0) {
- /*
- * This indicates that we can not
- * communicate with the ASIC
- */
- info->DrvErrNum = FIFO_FLUSH_BADCNT;
- } else {
- /*
- * Let's assume that we really flush
- * the FIFO
- */
- pcmcia->PktIntfErr++;
- return;
- }
- } else {
- info->DrvErrNum = FIFO_FLUSH_MAXLIMIT;
- }
- return;
- }
- tempword = inw(dev->base_addr + FT1000_REG_SUP_STAT);
- } while ((tempword & 0x03) != 0x03);
- if (info->AsicID == ELECTRABUZZ_ID) {
- i++;
- pr_debug("Flushing FIFO complete = %x\n", tempword);
- /* Flush last word in FIFO. */
- tempword = ft1000_read_reg(dev, FT1000_REG_DFIFO);
- /* Update FIFO counter for DSP */
- i = i * 2;
- pr_debug("Flush Data byte count to dsp = %d\n", i);
- info->fifo_cnt += i;
- ft1000_write_dpram(dev, FT1000_FIFO_LEN,
- info->fifo_cnt);
- } else {
- pr_debug("Flushing FIFO complete = %x\n", tempword);
- /* Flush last word in FIFO */
- templong = inl(dev->base_addr + FT1000_REG_MAG_DFR);
- tempword = inw(dev->base_addr + FT1000_REG_SUP_STAT);
- pr_debug("FT1000_REG_SUP_STAT = 0x%x\n", tempword);
- tempword = inw(dev->base_addr + FT1000_REG_MAG_DFSR);
- pr_debug("FT1000_REG_MAG_DFSR = 0x%x\n", tempword);
- }
- if (DrvErrNum)
- pcmcia->PktIntfErr++;
-}
-
-/*---------------------------------------------------------------------------
-
- Function: ft1000_copy_up_pkt
- Description: This function will pull Flarion packets out of the Downlink
- FIFO and convert it to an ethernet packet. The ethernet packet will
- then be deliver to the TCP/IP stack.
- Input:
- dev - device structure
- Output:
- status - FAILURE
- SUCCESS
-
- -------------------------------------------------------------------------*/
-static int ft1000_copy_up_pkt(struct net_device *dev)
-{
- u16 tempword;
- struct ft1000_info *info = netdev_priv(dev);
- u16 len;
- struct sk_buff *skb;
- u16 i;
- u8 *pbuffer = NULL;
- u8 *ptemp = NULL;
- u16 chksum;
- u32 *ptemplong;
- u32 templong;
-
- /* Read length */
- if (info->AsicID == ELECTRABUZZ_ID) {
- tempword = ft1000_read_reg(dev, FT1000_REG_DFIFO);
- len = tempword;
- } else {
- tempword = ft1000_read_reg(dev, FT1000_REG_MAG_DFRL);
- len = ntohs(tempword);
- }
- chksum = tempword;
- pr_debug("Number of Bytes in FIFO = %d\n", len);
-
- if (len > ENET_MAX_SIZE) {
- pr_debug("size of ethernet packet invalid\n");
- if (info->AsicID == MAGNEMITE_ID) {
- /* Read High word to complete 32 bit access */
- tempword = ft1000_read_reg(dev, FT1000_REG_MAG_DFRH);
- }
- ft1000_flush_fifo(dev, DSP_PKTLEN_INFO);
- info->stats.rx_errors++;
- return FAILURE;
- }
-
- skb = dev_alloc_skb(len + 12 + 2);
-
- if (skb == NULL) {
- /* Read High word to complete 32 bit access */
- if (info->AsicID == MAGNEMITE_ID)
- tempword = ft1000_read_reg(dev, FT1000_REG_MAG_DFRH);
-
- ft1000_flush_fifo(dev, 0);
- info->stats.rx_errors++;
- return FAILURE;
- }
- pbuffer = (u8 *)skb_put(skb, len + 12);
-
- /* Pseudo header */
- if (info->AsicID == ELECTRABUZZ_ID) {
- for (i = 1; i < 7; i++) {
- tempword = ft1000_read_reg(dev, FT1000_REG_DFIFO);
- chksum ^= tempword;
- }
- /* read checksum value */
- tempword = ft1000_read_reg(dev, FT1000_REG_DFIFO);
- } else {
- tempword = ft1000_read_reg(dev, FT1000_REG_MAG_DFRH);
- pr_debug("Pseudo = 0x%x\n", tempword);
- chksum ^= tempword;
-
- tempword = ft1000_read_reg(dev, FT1000_REG_MAG_DFRL);
- pr_debug("Pseudo = 0x%x\n", tempword);
- chksum ^= tempword;
-
- tempword = ft1000_read_reg(dev, FT1000_REG_MAG_DFRH);
- pr_debug("Pseudo = 0x%x\n", tempword);
- chksum ^= tempword;
-
- tempword = ft1000_read_reg(dev, FT1000_REG_MAG_DFRL);
- pr_debug("Pseudo = 0x%x\n", tempword);
- chksum ^= tempword;
-
- tempword = ft1000_read_reg(dev, FT1000_REG_MAG_DFRH);
- pr_debug("Pseudo = 0x%x\n", tempword);
- chksum ^= tempword;
-
- tempword = ft1000_read_reg(dev, FT1000_REG_MAG_DFRL);
- pr_debug("Pseudo = 0x%x\n", tempword);
- chksum ^= tempword;
-
- /* read checksum value */
- tempword = ft1000_read_reg(dev, FT1000_REG_MAG_DFRH);
- pr_debug("Pseudo = 0x%x\n", tempword);
- }
-
- if (chksum != tempword) {
- pr_debug("Packet checksum mismatch 0x%x 0x%x\n",
- chksum, tempword);
- ft1000_flush_fifo(dev, DSP_PKTPHCKSUM_INFO);
- info->stats.rx_errors++;
- kfree_skb(skb);
- return FAILURE;
- }
- /* subtract the number of bytes read already */
- ptemp = pbuffer;
-
- /* fake MAC address */
- *pbuffer++ = dev->dev_addr[0];
- *pbuffer++ = dev->dev_addr[1];
- *pbuffer++ = dev->dev_addr[2];
- *pbuffer++ = dev->dev_addr[3];
- *pbuffer++ = dev->dev_addr[4];
- *pbuffer++ = dev->dev_addr[5];
- *pbuffer++ = 0x00;
- *pbuffer++ = 0x07;
- *pbuffer++ = 0x35;
- *pbuffer++ = 0xff;
- *pbuffer++ = 0xff;
- *pbuffer++ = 0xfe;
-
- if (info->AsicID == ELECTRABUZZ_ID) {
- for (i = 0; i < len / 2; i++) {
- tempword = ft1000_read_reg(dev, FT1000_REG_DFIFO);
- *pbuffer++ = (u8) (tempword >> 8);
- *pbuffer++ = (u8)tempword;
- if (ft1000_chkcard(dev) == false) {
- kfree_skb(skb);
- return FAILURE;
- }
- }
-
- /* Need to read one more word if odd byte */
- if (len & 0x0001) {
- tempword = ft1000_read_reg(dev, FT1000_REG_DFIFO);
- *pbuffer++ = (u8) (tempword >> 8);
- }
- } else {
- ptemplong = (u32 *)pbuffer;
- for (i = 0; i < len / 4; i++) {
- templong = inl(dev->base_addr + FT1000_REG_MAG_DFR);
- pr_debug("Data = 0x%8x\n", templong);
- *ptemplong++ = templong;
- }
-
- /* Need to read one more word if odd align. */
- if (len & 0x0003) {
- templong = inl(dev->base_addr + FT1000_REG_MAG_DFR);
- pr_debug("Data = 0x%8x\n", templong);
- *ptemplong++ = templong;
- }
-
- }
-
- pr_debug("Data passed to Protocol layer:\n");
- for (i = 0; i < len + 12; i++)
- pr_debug("Protocol Data: 0x%x\n", *ptemp++);
-
- skb->dev = dev;
- skb->protocol = eth_type_trans(skb, dev);
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- netif_rx(skb);
-
- info->stats.rx_packets++;
- /* Add on 12 bytes for MAC address which was removed */
- info->stats.rx_bytes += (len + 12);
-
- if (info->AsicID == ELECTRABUZZ_ID) {
- /* track how many bytes have been read from FIFO - round up to
- * 16 bit word */
- tempword = len + 16;
- if (tempword & 0x01)
- tempword++;
- info->fifo_cnt += tempword;
- ft1000_write_reg(dev, FT1000_REG_DPRAM_ADDR, FT1000_FIFO_LEN);
- ft1000_write_reg(dev, FT1000_REG_DPRAM_DATA, info->fifo_cnt);
- }
-
- return SUCCESS;
-}
-
-/*---------------------------------------------------------------------------
-
- Function: ft1000_copy_down_pkt
- Description: This function will take an ethernet packet and convert it to
- a Flarion packet prior to sending it to the ASIC Downlink
- FIFO.
- Input:
- dev - device structure
- packet - address of ethernet packet
- len - length of IP packet
- Output:
- status - FAILURE
- SUCCESS
-
- -------------------------------------------------------------------------*/
-static int ft1000_copy_down_pkt(struct net_device *dev, u16 *packet, u16 len)
-{
- struct ft1000_info *info = netdev_priv(dev);
- struct ft1000_pcmcia *pcmcia = info->priv;
- union {
- struct pseudo_hdr blk;
- u16 buff[sizeof(struct pseudo_hdr) >> 1];
- u8 buffc[sizeof(struct pseudo_hdr)];
- } pseudo;
- int i;
- u32 *plong;
-
- /* Check if there is room on the FIFO */
- if (len > ft1000_read_fifo_len(dev)) {
- udelay(10);
- if (len > ft1000_read_fifo_len(dev))
- udelay(20);
- if (len > ft1000_read_fifo_len(dev))
- udelay(20);
- if (len > ft1000_read_fifo_len(dev))
- udelay(20);
- if (len > ft1000_read_fifo_len(dev))
- udelay(20);
- if (len > ft1000_read_fifo_len(dev))
- udelay(20);
- if (len > ft1000_read_fifo_len(dev)) {
- pr_debug("Transmit FIFO is full - pkt drop\n");
- info->stats.tx_errors++;
- return SUCCESS;
- }
- }
- /* Create pseudo header and send pseudo/ip to hardware */
- if (info->AsicID == ELECTRABUZZ_ID)
- pseudo.blk.length = len;
- else
- pseudo.blk.length = ntohs(len);
-
- pseudo.blk.source = DSPID; /* Need to swap to get in correct
- order */
- pseudo.blk.destination = HOSTID;
- pseudo.blk.portdest = NETWORKID; /* Need to swap to get in
- correct order */
- pseudo.blk.portsrc = DSPAIRID;
- pseudo.blk.sh_str_id = 0;
- pseudo.blk.control = 0;
- pseudo.blk.rsvd1 = 0;
- pseudo.blk.seq_num = 0;
- pseudo.blk.rsvd2 = pcmcia->packetseqnum++;
- pseudo.blk.qos_class = 0;
- /* Calculate pseudo header checksum */
- pseudo.blk.checksum = pseudo.buff[0];
- for (i = 1; i < 7; i++)
- pseudo.blk.checksum ^= pseudo.buff[i];
-
- /* Production Mode */
- if (info->AsicID == ELECTRABUZZ_ID) {
- /* copy first word to UFIFO_BEG reg */
- ft1000_write_reg(dev, FT1000_REG_UFIFO_BEG, pseudo.buff[0]);
- pr_debug("data 0 BEG = 0x%04x\n", pseudo.buff[0]);
-
- /* copy subsequent words to UFIFO_MID reg */
- ft1000_write_reg(dev, FT1000_REG_UFIFO_MID, pseudo.buff[1]);
- pr_debug("data 1 MID = 0x%04x\n", pseudo.buff[1]);
- ft1000_write_reg(dev, FT1000_REG_UFIFO_MID, pseudo.buff[2]);
- pr_debug("data 2 MID = 0x%04x\n", pseudo.buff[2]);
- ft1000_write_reg(dev, FT1000_REG_UFIFO_MID, pseudo.buff[3]);
- pr_debug("data 3 MID = 0x%04x\n", pseudo.buff[3]);
- ft1000_write_reg(dev, FT1000_REG_UFIFO_MID, pseudo.buff[4]);
- pr_debug("data 4 MID = 0x%04x\n", pseudo.buff[4]);
- ft1000_write_reg(dev, FT1000_REG_UFIFO_MID, pseudo.buff[5]);
- pr_debug("data 5 MID = 0x%04x\n", pseudo.buff[5]);
- ft1000_write_reg(dev, FT1000_REG_UFIFO_MID, pseudo.buff[6]);
- pr_debug("data 6 MID = 0x%04x\n", pseudo.buff[6]);
- ft1000_write_reg(dev, FT1000_REG_UFIFO_MID, pseudo.buff[7]);
- pr_debug("data 7 MID = 0x%04x\n", pseudo.buff[7]);
-
- /* Write PPP type + IP Packet into Downlink FIFO */
- for (i = 0; i < (len >> 1) - 1; i++) {
- ft1000_write_reg(dev, FT1000_REG_UFIFO_MID,
- htons(*packet));
- pr_debug("data %d MID = 0x%04x\n",
- i + 8, htons(*packet));
- packet++;
- }
-
- /* Check for odd byte */
- if (len & 0x0001) {
- ft1000_write_reg(dev, FT1000_REG_UFIFO_MID,
- htons(*packet));
- pr_debug("data MID = 0x%04x\n", htons(*packet));
- packet++;
- ft1000_write_reg(dev, FT1000_REG_UFIFO_END,
- htons(*packet));
- pr_debug("data %d MID = 0x%04x\n",
- i + 8, htons(*packet));
- } else {
- ft1000_write_reg(dev, FT1000_REG_UFIFO_END,
- htons(*packet));
- pr_debug("data %d MID = 0x%04x\n",
- i + 8, htons(*packet));
- }
- } else {
- outl(*(u32 *)&pseudo.buff[0],
- dev->base_addr + FT1000_REG_MAG_UFDR);
- pr_debug("Pseudo = 0x%8x\n", *(u32 *)&pseudo.buff[0]);
- outl(*(u32 *)&pseudo.buff[2],
- dev->base_addr + FT1000_REG_MAG_UFDR);
- pr_debug("Pseudo = 0x%8x\n", *(u32 *)&pseudo.buff[2]);
- outl(*(u32 *)&pseudo.buff[4],
- dev->base_addr + FT1000_REG_MAG_UFDR);
- pr_debug("Pseudo = 0x%8x\n", *(u32 *)&pseudo.buff[4]);
- outl(*(u32 *)&pseudo.buff[6],
- dev->base_addr + FT1000_REG_MAG_UFDR);
- pr_debug("Pseudo = 0x%8x\n", *(u32 *)&pseudo.buff[6]);
-
- plong = (u32 *)packet;
- /* Write PPP type + IP Packet into Downlink FIFO */
- for (i = 0; i < (len >> 2); i++)
- outl(*plong++, dev->base_addr + FT1000_REG_MAG_UFDR);
-
- /* Check for odd alignment */
- if (len & 0x0003) {
- pr_debug("data = 0x%8x\n", *plong);
- outl(*plong++, dev->base_addr + FT1000_REG_MAG_UFDR);
- }
- outl(1, dev->base_addr + FT1000_REG_MAG_UFER);
- }
-
- info->stats.tx_packets++;
- /* Add 14 bytes for MAC address plus ethernet type */
- info->stats.tx_bytes += (len + 14);
- return SUCCESS;
-}
-
-static struct net_device_stats *ft1000_stats(struct net_device *dev)
-{
- struct ft1000_info *info = netdev_priv(dev);
-
- return &info->stats;
-}
-
-static int ft1000_open(struct net_device *dev)
-{
- ft1000_reset_card(dev);
-
- /* schedule ft1000_hbchk to perform periodic heartbeat checks on DSP
- * and ASIC */
- init_timer(&poll_timer);
- poll_timer.expires = jiffies + (2 * HZ);
- poll_timer.data = (u_long)dev;
- add_timer(&poll_timer);
-
- return 0;
-}
-
-static int ft1000_close(struct net_device *dev)
-{
- struct ft1000_info *info = netdev_priv(dev);
-
- info->CardReady = 0;
- del_timer(&poll_timer);
-
- if (ft1000_card_present == 1) {
- pr_debug("Media is down\n");
- netif_stop_queue(dev);
-
- ft1000_disable_interrupts(dev);
- ft1000_write_reg(dev, FT1000_REG_RESET, DSP_RESET_BIT);
-
- /* reset ASIC */
- ft1000_reset_asic(dev);
- }
- return 0;
-}
-
-static int ft1000_start_xmit(struct sk_buff *skb, struct net_device *dev)
-{
- struct ft1000_info *info = netdev_priv(dev);
- u8 *pdata;
-
- if (skb == NULL) {
- pr_debug("skb == NULL!!!\n");
- return 0;
- }
-
- pr_debug("length of packet = %d\n", skb->len);
-
- pdata = (u8 *)skb->data;
-
- if (info->mediastate == 0) {
- /* Drop packet is mediastate is down */
- pr_debug("mediastate is down\n");
- return SUCCESS;
- }
-
- if ((skb->len < ENET_HEADER_SIZE) || (skb->len > ENET_MAX_SIZE)) {
- /* Drop packet which has invalid size */
- pr_debug("invalid ethernet length\n");
- return SUCCESS;
- }
- ft1000_copy_down_pkt(dev, (u16 *) (pdata + ENET_HEADER_SIZE - 2),
- skb->len - ENET_HEADER_SIZE + 2);
-
- dev_kfree_skb(skb);
-
- return 0;
-}
-
-static irqreturn_t ft1000_interrupt(int irq, void *dev_id)
-{
- struct net_device *dev = dev_id;
- struct ft1000_info *info = netdev_priv(dev);
- u16 tempword;
- u16 inttype;
- int cnt;
-
- if (info->CardReady == 0) {
- ft1000_disable_interrupts(dev);
- return IRQ_HANDLED;
- }
-
- if (ft1000_chkcard(dev) == false) {
- ft1000_disable_interrupts(dev);
- return IRQ_HANDLED;
- }
-
- ft1000_disable_interrupts(dev);
-
- /* Read interrupt type */
- inttype = ft1000_read_reg(dev, FT1000_REG_SUP_ISR);
-
- /* Make sure we process all interrupt before leaving the ISR due to the
- * edge trigger interrupt type */
- while (inttype) {
- if (inttype & ISR_DOORBELL_PEND)
- ft1000_parse_dpram_msg(dev);
-
- if (inttype & ISR_RCV) {
- pr_debug("Data in FIFO\n");
-
- cnt = 0;
- do {
- /* Check if we have packets in the Downlink
- * FIFO */
- if (info->AsicID == ELECTRABUZZ_ID) {
- tempword = ft1000_read_reg(dev,
- FT1000_REG_DFIFO_STAT);
- } else {
- tempword = ft1000_read_reg(dev,
- FT1000_REG_MAG_DFSR);
- }
- if (!(tempword & 0x1f))
- break;
- ft1000_copy_up_pkt(dev);
- cnt++;
- } while (cnt < MAX_RCV_LOOP);
-
- }
- /* clear interrupts */
- tempword = ft1000_read_reg(dev, FT1000_REG_SUP_ISR);
- pr_debug("interrupt status register = 0x%x\n", tempword);
- ft1000_write_reg(dev, FT1000_REG_SUP_ISR, tempword);
-
- /* Read interrupt type */
- inttype = ft1000_read_reg(dev, FT1000_REG_SUP_ISR);
- pr_debug("interrupt status register after clear = 0x%x\n",
- inttype);
- }
- ft1000_enable_interrupts(dev);
- return IRQ_HANDLED;
-}
-
-void stop_ft1000_card(struct net_device *dev)
-{
- struct ft1000_info *info = netdev_priv(dev);
- struct prov_record *ptr;
- struct prov_record *tmp;
- /* int cnt; */
-
- info->CardReady = 0;
- ft1000_card_present = 0;
- netif_stop_queue(dev);
- ft1000_disable_interrupts(dev);
-
- /* Make sure we free any memory reserve for provisioning */
- list_for_each_entry_safe(ptr, tmp, &info->prov_list, list) {
- list_del(&ptr->list);
- kfree(ptr->pprov_data);
- kfree(ptr);
- }
-
- kfree(info->priv);
-
- if (info->registered) {
- unregister_netdev(dev);
- info->registered = 0;
- }
-
- free_irq(dev->irq, dev);
- release_region(dev->base_addr, 256);
- release_firmware(fw_entry);
- flarion_ft1000_cnt--;
-
-}
-
-static void ft1000_get_drvinfo(struct net_device *dev,
- struct ethtool_drvinfo *info)
-{
- struct ft1000_info *ft_info;
-
- ft_info = netdev_priv(dev);
-
- strlcpy(info->driver, "ft1000", sizeof(info->driver));
- snprintf(info->bus_info, sizeof(info->bus_info), "PCMCIA 0x%lx",
- dev->base_addr);
- snprintf(info->fw_version, sizeof(info->fw_version), "%d.%d.%d.%d",
- ft_info->DspVer[0], ft_info->DspVer[1], ft_info->DspVer[2],
- ft_info->DspVer[3]);
-}
-
-static u32 ft1000_get_link(struct net_device *dev)
-{
- struct ft1000_info *info;
-
- info = netdev_priv(dev);
- return info->mediastate;
-}
-
-static const struct ethtool_ops ops = {
- .get_drvinfo = ft1000_get_drvinfo,
- .get_link = ft1000_get_link
-};
-
-struct net_device *init_ft1000_card(struct pcmcia_device *link,
- void *ft1000_reset)
-{
- struct ft1000_info *info;
- struct ft1000_pcmcia *pcmcia;
- struct net_device *dev;
-
- static const struct net_device_ops ft1000ops = {
- .ndo_open = &ft1000_open,
- .ndo_stop = &ft1000_close,
- .ndo_start_xmit = &ft1000_start_xmit,
- .ndo_get_stats = &ft1000_stats,
- };
-
- pr_debug("irq = %d, port = 0x%04llx\n",
- link->irq, (unsigned long long)link->resource[0]->start);
-
- flarion_ft1000_cnt++;
-
- if (flarion_ft1000_cnt > 1) {
- flarion_ft1000_cnt--;
-
- dev_info(&link->dev,
- "This driver can not support more than one instance\n");
- return NULL;
- }
-
- dev = alloc_etherdev(sizeof(struct ft1000_info));
- if (!dev) {
- dev_err(&link->dev, "Failed to allocate etherdev\n");
- return NULL;
- }
-
- SET_NETDEV_DEV(dev, &link->dev);
- info = netdev_priv(dev);
-
- memset(info, 0, sizeof(struct ft1000_info));
-
- pr_debug("address of dev = 0x%p\n", dev);
- pr_debug("address of dev info = 0x%p\n", info);
- pr_debug("device name = %s\n", dev->name);
-
- memset(&info->stats, 0, sizeof(struct net_device_stats));
-
- info->priv = kzalloc(sizeof(struct ft1000_pcmcia), GFP_KERNEL);
- pcmcia = info->priv;
- pcmcia->link = link;
-
- spin_lock_init(&info->dpram_lock);
- info->DrvErrNum = 0;
- info->registered = 1;
- info->ft1000_reset = ft1000_reset;
- info->mediastate = 0;
- info->fifo_cnt = 0;
- info->CardReady = 0;
- info->DSP_TIME[0] = 0;
- info->DSP_TIME[1] = 0;
- info->DSP_TIME[2] = 0;
- info->DSP_TIME[3] = 0;
- flarion_ft1000_cnt = 0;
-
- INIT_LIST_HEAD(&info->prov_list);
-
- info->squeseqnum = 0;
-
- /* dev->hard_start_xmit = &ft1000_start_xmit; */
- /* dev->get_stats = &ft1000_stats; */
- /* dev->open = &ft1000_open; */
- /* dev->stop = &ft1000_close; */
-
- dev->netdev_ops = &ft1000ops;
-
- pr_debug("device name = %s\n", dev->name);
-
- dev->irq = link->irq;
- dev->base_addr = link->resource[0]->start;
- if (pcmcia_get_mac_from_cis(link, dev)) {
- netdev_err(dev, "Could not read mac address\n");
- goto err_dev;
- }
-
- if (request_irq(dev->irq, ft1000_interrupt, IRQF_SHARED, dev->name,
- dev)) {
- netdev_err(dev, "Could not request_irq\n");
- goto err_dev;
- }
-
- if (request_region(dev->base_addr, 256, dev->name) == NULL) {
- netdev_err(dev, "Could not request_region\n");
- goto err_irq;
- }
-
- if (register_netdev(dev)) {
- pr_debug("Could not register netdev\n");
- goto err_reg;
- }
-
- info->AsicID = ft1000_read_reg(dev, FT1000_REG_ASIC_ID);
- if (info->AsicID == ELECTRABUZZ_ID) {
- pr_debug("ELECTRABUZZ ASIC\n");
- if (request_firmware(&fw_entry, "ft1000.img",
- &link->dev) != 0) {
- pr_info("Could not open ft1000.img\n");
- goto err_unreg;
- }
- } else {
- pr_debug("MAGNEMITE ASIC\n");
- if (request_firmware(&fw_entry, "ft2000.img",
- &link->dev) != 0) {
- pr_info("Could not open ft2000.img\n");
- goto err_unreg;
- }
- }
-
- ft1000_enable_interrupts(dev);
-
- ft1000_card_present = 1;
- dev->ethtool_ops = &ops;
- pr_info("%s: addr 0x%04lx irq %d, MAC addr %pM\n",
- dev->name, dev->base_addr, dev->irq, dev->dev_addr);
- return dev;
-
-err_unreg:
- unregister_netdev(dev);
-err_reg:
- release_region(dev->base_addr, 256);
-err_irq:
- free_irq(dev->irq, dev);
-err_dev:
- free_netdev(dev);
- return NULL;
-}
diff --git a/drivers/staging/ft1000/ft1000-usb/Makefile b/drivers/staging/ft1000/ft1000-usb/Makefile
deleted file mode 100644
index 7c4b78456254..000000000000
--- a/drivers/staging/ft1000/ft1000-usb/Makefile
+++ /dev/null
@@ -1,3 +0,0 @@
-obj-$(CONFIG_FT1000_USB) += ft1000.o
-
-ft1000-y := ft1000_debug.o ft1000_download.o ft1000_hw.o ft1000_usb.o
diff --git a/drivers/staging/ft1000/ft1000-usb/ft1000_debug.c b/drivers/staging/ft1000/ft1000-usb/ft1000_debug.c
deleted file mode 100644
index f241a3a5a684..000000000000
--- a/drivers/staging/ft1000/ft1000-usb/ft1000_debug.c
+++ /dev/null
@@ -1,789 +0,0 @@
-/*
- *---------------------------------------------------------------------------
- * FT1000 driver for Flarion Flash OFDM NIC Device
- *
- * Copyright (C) 2006 Flarion Technologies, All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option) any
- * later version. This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
- * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details. You should have received a copy of the GNU General Public
- * License along with this program; if not, write to the
- * Free Software Foundation, Inc., 59 Temple Place -
- * Suite 330, Boston, MA 02111-1307, USA.
- *---------------------------------------------------------------------------
- *
- * File: ft1000_chdev.c
- *
- * Description: Custom character device dispatch routines.
- *
- * History:
- * 8/29/02 Whc Ported to Linux.
- * 6/05/06 Whc Porting to Linux 2.6.9
- *
- *---------------------------------------------------------------------------
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/errno.h>
-#include <linux/poll.h>
-#include <linux/netdevice.h>
-#include <linux/delay.h>
-
-#include <linux/ioctl.h>
-#include <linux/debugfs.h>
-#include "ft1000_usb.h"
-
-static int ft1000_flarion_cnt;
-
-static int ft1000_open(struct inode *inode, struct file *file);
-static unsigned int ft1000_poll_dev(struct file *file, poll_table *wait);
-static long ft1000_ioctl(struct file *file, unsigned int command,
- unsigned long argument);
-static int ft1000_release(struct inode *inode, struct file *file);
-
-/* List to free receive command buffer pool */
-struct list_head freercvpool;
-
-/* lock to arbitrate free buffer list for receive command data */
-spinlock_t free_buff_lock;
-
-int numofmsgbuf;
-
-/*
- * Table of entry-point routines for char device
- */
-static const struct file_operations ft1000fops = {
- .unlocked_ioctl = ft1000_ioctl,
- .poll = ft1000_poll_dev,
- .open = ft1000_open,
- .release = ft1000_release,
- .llseek = no_llseek,
-};
-
-/*
- ---------------------------------------------------------------------------
- * Function: ft1000_get_buffer
- *
- * Parameters:
- *
- * Returns:
- *
- * Description:
- *
- * Notes:
- *
- *---------------------------------------------------------------------------
- */
-struct dpram_blk *ft1000_get_buffer(struct list_head *bufflist)
-{
- unsigned long flags;
- struct dpram_blk *ptr;
-
- spin_lock_irqsave(&free_buff_lock, flags);
- /* Check if buffer is available */
- if (list_empty(bufflist)) {
- pr_debug("No more buffer - %d\n", numofmsgbuf);
- ptr = NULL;
- } else {
- numofmsgbuf--;
- ptr = list_entry(bufflist->next, struct dpram_blk, list);
- list_del(&ptr->list);
- /* pr_debug("number of free msg buffers = %d\n", numofmsgbuf); */
- }
- spin_unlock_irqrestore(&free_buff_lock, flags);
-
- return ptr;
-}
-
-
-
-
-/*
- *---------------------------------------------------------------------------
- * Function: ft1000_free_buffer
- *
- * Parameters:
- *
- * Returns:
- *
- * Description:
- *
- * Notes:
- *
- *---------------------------------------------------------------------------
- */
-void ft1000_free_buffer(struct dpram_blk *pdpram_blk, struct list_head *plist)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&free_buff_lock, flags);
- /* Put memory back to list */
- list_add_tail(&pdpram_blk->list, plist);
- numofmsgbuf++;
- /*pr_debug("number of free msg buffers = %d\n", numofmsgbuf); */
- spin_unlock_irqrestore(&free_buff_lock, flags);
-}
-
-/*
- *---------------------------------------------------------------------------
- * Function: ft1000_CreateDevice
- *
- * Parameters: dev - pointer to adapter object
- *
- * Returns: 0 if successful
- *
- * Description: Creates a private char device.
- *
- * Notes: Only called by init_module().
- *
- *---------------------------------------------------------------------------
- */
-int ft1000_create_dev(struct ft1000_usb *dev)
-{
- int result;
- int i;
- struct dentry *dir, *file;
- struct ft1000_debug_dirs *tmp;
-
- /* make a new device name */
- sprintf(dev->DeviceName, "%s%d", "FT1000_", dev->CardNumber);
-
- pr_debug("number of instance = %d\n", ft1000_flarion_cnt);
- pr_debug("DeviceCreated = %x\n", dev->DeviceCreated);
-
- if (dev->DeviceCreated) {
- pr_debug("\"%s\" already registered\n", dev->DeviceName);
- return -EIO;
- }
-
-
- /* register the device */
- pr_debug("\"%s\" debugfs device registration\n", dev->DeviceName);
-
- tmp = kmalloc(sizeof(struct ft1000_debug_dirs), GFP_KERNEL);
- if (tmp == NULL) {
- result = -1;
- goto fail;
- }
-
- dir = debugfs_create_dir(dev->DeviceName, NULL);
- if (IS_ERR(dir)) {
- result = PTR_ERR(dir);
- goto debug_dir_fail;
- }
-
- file = debugfs_create_file("device", S_IRUGO | S_IWUSR, dir,
- dev, &ft1000fops);
- if (IS_ERR(file)) {
- result = PTR_ERR(file);
- goto debug_file_fail;
- }
-
- tmp->dent = dir;
- tmp->file = file;
- tmp->int_number = dev->CardNumber;
- list_add(&tmp->list, &dev->nodes.list);
-
- pr_debug("registered debugfs directory \"%s\"\n", dev->DeviceName);
-
- /* initialize application information */
- dev->appcnt = 0;
- for (i = 0; i < MAX_NUM_APP; i++) {
- dev->app_info[i].nTxMsg = 0;
- dev->app_info[i].nRxMsg = 0;
- dev->app_info[i].nTxMsgReject = 0;
- dev->app_info[i].nRxMsgMiss = 0;
- dev->app_info[i].fileobject = NULL;
- dev->app_info[i].app_id = i+1;
- dev->app_info[i].DspBCMsgFlag = 0;
- dev->app_info[i].NumOfMsg = 0;
- init_waitqueue_head(&dev->app_info[i].wait_dpram_msg);
- INIT_LIST_HEAD(&dev->app_info[i].app_sqlist);
- }
-
- dev->DeviceCreated = TRUE;
- ft1000_flarion_cnt++;
-
- return 0;
-
-debug_file_fail:
- debugfs_remove(dir);
-debug_dir_fail:
- kfree(tmp);
-fail:
- return result;
-}
-
-/*
- *---------------------------------------------------------------------------
- * Function: ft1000_DestroyDeviceDEBUG
- *
- * Parameters: dev - pointer to adapter object
- *
- * Description: Destroys a private char device.
- *
- * Notes: Only called by cleanup_module().
- *
- *---------------------------------------------------------------------------
- */
-void ft1000_destroy_dev(struct net_device *netdev)
-{
- struct ft1000_info *info = netdev_priv(netdev);
- struct ft1000_usb *dev = info->priv;
- int i;
- struct dpram_blk *pdpram_blk;
- struct dpram_blk *ptr;
- struct list_head *pos, *q;
- struct ft1000_debug_dirs *dir;
-
- if (dev->DeviceCreated) {
- ft1000_flarion_cnt--;
- list_for_each_safe(pos, q, &dev->nodes.list) {
- dir = list_entry(pos, struct ft1000_debug_dirs, list);
- if (dir->int_number == dev->CardNumber) {
- debugfs_remove(dir->file);
- debugfs_remove(dir->dent);
- list_del(pos);
- kfree(dir);
- }
- }
- pr_debug("unregistered device \"%s\"\n", dev->DeviceName);
-
- /* Make sure we free any memory reserve for slow Queue */
- for (i = 0; i < MAX_NUM_APP; i++) {
- while (list_empty(&dev->app_info[i].app_sqlist) == 0) {
- pdpram_blk = list_entry(dev->app_info[i].app_sqlist.next,
- struct dpram_blk, list);
- list_del(&pdpram_blk->list);
- ft1000_free_buffer(pdpram_blk, &freercvpool);
-
- }
- wake_up_interruptible(&dev->app_info[i].wait_dpram_msg);
- }
-
- /* Remove buffer allocated for receive command data */
- if (ft1000_flarion_cnt == 0) {
- while (list_empty(&freercvpool) == 0) {
- ptr = list_entry(freercvpool.next, struct dpram_blk, list);
- list_del(&ptr->list);
- kfree(ptr->pbuffer);
- kfree(ptr);
- }
- }
- dev->DeviceCreated = FALSE;
- }
-
-
-}
-
-/*
- *---------------------------------------------------------------------------
- * Function: ft1000_open
- *
- * Parameters:
- *
- * Description:
- *
- * Notes:
- *
- *---------------------------------------------------------------------------
- */
-static int ft1000_open(struct inode *inode, struct file *file)
-{
- struct ft1000_info *info;
- struct ft1000_usb *dev = (struct ft1000_usb *)inode->i_private;
- int i, num;
-
- num = MINOR(inode->i_rdev) & 0xf;
- pr_debug("minor number=%d\n", num);
-
- info = file->private_data = netdev_priv(dev->net);
-
- pr_debug("f_owner = %p number of application = %d\n",
- &file->f_owner, dev->appcnt);
-
- /* Check if maximum number of application exceeded */
- if (dev->appcnt > MAX_NUM_APP) {
- pr_debug("Maximum number of application exceeded\n");
- return -EACCES;
- }
-
- /* Search for available application info block */
- for (i = 0; i < MAX_NUM_APP; i++) {
- if (dev->app_info[i].fileobject == NULL)
- break;
- }
-
- /* Fail due to lack of application info block */
- if (i == MAX_NUM_APP) {
- pr_debug("Could not find an application info block\n");
- return -EACCES;
- }
-
- dev->appcnt++;
- dev->app_info[i].fileobject = &file->f_owner;
- dev->app_info[i].nTxMsg = 0;
- dev->app_info[i].nRxMsg = 0;
- dev->app_info[i].nTxMsgReject = 0;
- dev->app_info[i].nRxMsgMiss = 0;
-
- nonseekable_open(inode, file);
- return 0;
-}
-
-
-/*
- *---------------------------------------------------------------------------
- * Function: ft1000_poll_dev
- *
- * Parameters:
- *
- * Description:
- *
- * Notes:
- *
- *---------------------------------------------------------------------------
- */
-
-static unsigned int ft1000_poll_dev(struct file *file, poll_table *wait)
-{
- struct net_device *netdev = file->private_data;
- struct ft1000_info *info = netdev_priv(netdev);
- struct ft1000_usb *dev = info->priv;
- int i;
-
- if (ft1000_flarion_cnt == 0) {
- pr_debug("called with ft1000_flarion_cnt value zero\n");
- return -EBADF;
- }
-
- /* Search for matching file object */
- for (i = 0; i < MAX_NUM_APP; i++) {
- if (dev->app_info[i].fileobject == &file->f_owner) {
- /* pr_debug("Message is for AppId = %d\n", dev->app_info[i].app_id); */
- break;
- }
- }
-
- /* Could not find application info block */
- if (i == MAX_NUM_APP) {
- pr_debug("Could not find application info block\n");
- return -EACCES;
- }
-
- if (list_empty(&dev->app_info[i].app_sqlist) == 0) {
- pr_debug("Message detected in slow queue\n");
- return(POLLIN | POLLRDNORM | POLLPRI);
- }
-
- poll_wait(file, &dev->app_info[i].wait_dpram_msg, wait);
- /* pr_debug("Polling for data from DSP\n"); */
-
- return 0;
-}
-
-/*
- *---------------------------------------------------------------------------
- * Function: ft1000_ioctl
- *
- * Parameters:
- *
- * Description:
- *
- * Notes:
- *
- *---------------------------------------------------------------------------
- */
-static long ft1000_ioctl(struct file *file, unsigned int command,
- unsigned long argument)
-{
- void __user *argp = (void __user *)argument;
- struct ft1000_info *info;
- struct ft1000_usb *ft1000dev;
- int result = 0;
- int cmd;
- int i;
- u16 tempword;
- unsigned long flags;
- struct timeval tv;
- struct IOCTL_GET_VER get_ver_data;
- struct IOCTL_GET_DSP_STAT get_stat_data;
- u8 ConnectionMsg[] = {
- 0x00, 0x44, 0x10, 0x20, 0x80, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x93, 0x64,
- 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, 0x0a,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x12,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x02, 0x37, 0x00, 0x00, 0x00, 0x08,
- 0x00, 0x00, 0x00, 0x01, 0x00, 0x01, 0x7f, 0x00,
- 0x00, 0x01, 0x00, 0x00
- };
-
- unsigned short ledStat = 0;
- unsigned short conStat = 0;
-
- if (ft1000_flarion_cnt == 0) {
- pr_debug("called with ft1000_flarion_cnt of zero\n");
- return -EBADF;
- }
-
- /* pr_debug("command = 0x%x argument = 0x%8x\n", command, (u32)argument); */
-
- info = file->private_data;
- ft1000dev = info->priv;
- cmd = _IOC_NR(command);
- /* pr_debug("cmd = 0x%x\n", cmd); */
-
- /* process the command */
- switch (cmd) {
- case IOCTL_REGISTER_CMD:
- pr_debug("IOCTL_FT1000_REGISTER called\n");
- result = get_user(tempword, (__u16 __user *)argp);
- if (result) {
- pr_debug("result = %d failed to get_user\n", result);
- break;
- }
- if (tempword == DSPBCMSGID) {
- /* Search for matching file object */
- for (i = 0; i < MAX_NUM_APP; i++) {
- if (ft1000dev->app_info[i].fileobject == &file->f_owner) {
- ft1000dev->app_info[i].DspBCMsgFlag = 1;
- pr_debug("Registered for broadcast messages\n");
- break;
- }
- }
- }
- break;
-
- case IOCTL_GET_VER_CMD:
- pr_debug("IOCTL_FT1000_GET_VER called\n");
-
- get_ver_data.drv_ver = FT1000_DRV_VER;
-
- if (copy_to_user(argp, &get_ver_data, sizeof(get_ver_data))) {
- pr_debug("copy fault occurred\n");
- result = -EFAULT;
- break;
- }
-
- pr_debug("driver version = 0x%x\n",
- (unsigned int)get_ver_data.drv_ver);
-
- break;
- case IOCTL_CONNECT:
- /* Connect Message */
- pr_debug("IOCTL_FT1000_CONNECT\n");
- ConnectionMsg[79] = 0xfc;
- result = card_send_command(ft1000dev, ConnectionMsg, 0x4c);
-
- break;
- case IOCTL_DISCONNECT:
- /* Disconnect Message */
- pr_debug("IOCTL_FT1000_DISCONNECT\n");
- ConnectionMsg[79] = 0xfd;
- result = card_send_command(ft1000dev, ConnectionMsg, 0x4c);
- break;
- case IOCTL_GET_DSP_STAT_CMD:
- /* pr_debug("IOCTL_FT1000_GET_DSP_STAT\n"); */
- memset(&get_stat_data, 0, sizeof(get_stat_data));
- memcpy(get_stat_data.DspVer, info->DspVer, DSPVERSZ);
- memcpy(get_stat_data.HwSerNum, info->HwSerNum, HWSERNUMSZ);
- memcpy(get_stat_data.Sku, info->Sku, SKUSZ);
- memcpy(get_stat_data.eui64, info->eui64, EUISZ);
-
- if (info->ProgConStat != 0xFF) {
- ft1000_read_dpram16(ft1000dev, FT1000_MAG_DSP_LED,
- (u8 *)&ledStat, FT1000_MAG_DSP_LED_INDX);
- get_stat_data.LedStat = ntohs(ledStat);
- pr_debug("LedStat = 0x%x\n", get_stat_data.LedStat);
- ft1000_read_dpram16(ft1000dev, FT1000_MAG_DSP_CON_STATE,
- (u8 *)&conStat, FT1000_MAG_DSP_CON_STATE_INDX);
- get_stat_data.ConStat = ntohs(conStat);
- pr_debug("ConStat = 0x%x\n", get_stat_data.ConStat);
- } else {
- get_stat_data.ConStat = 0x0f;
- }
-
-
- get_stat_data.nTxPkts = info->stats.tx_packets;
- get_stat_data.nRxPkts = info->stats.rx_packets;
- get_stat_data.nTxBytes = info->stats.tx_bytes;
- get_stat_data.nRxBytes = info->stats.rx_bytes;
- do_gettimeofday(&tv);
- get_stat_data.ConTm = (u32)(tv.tv_sec - info->ConTm);
- pr_debug("Connection Time = %d\n", (int)get_stat_data.ConTm);
- if (copy_to_user(argp, &get_stat_data, sizeof(get_stat_data))) {
- pr_debug("copy fault occurred\n");
- result = -EFAULT;
- break;
- }
- pr_debug("GET_DSP_STAT succeed\n");
- break;
- case IOCTL_SET_DPRAM_CMD:
- {
- struct IOCTL_DPRAM_BLK *dpram_data = NULL;
- /* struct IOCTL_DPRAM_COMMAND dpram_command; */
- u16 qtype;
- u16 msgsz;
- struct pseudo_hdr *ppseudo_hdr;
- u16 *pmsg;
- u16 total_len;
- u16 app_index;
- u16 status;
-
- /* pr_debug("IOCTL_FT1000_SET_DPRAM called\n");*/
-
-
- if (ft1000_flarion_cnt == 0)
- return -EBADF;
-
- if (ft1000dev->DrvMsgPend)
- return -ENOTTY;
-
- if (ft1000dev->fProvComplete == 0)
- return -EACCES;
-
- ft1000dev->fAppMsgPend = true;
-
- if (info->CardReady) {
-
- /* pr_debug("try to SET_DPRAM\n"); */
-
- /* Get the length field to see how many bytes to copy */
- result = get_user(msgsz, (__u16 __user *)argp);
- if (result)
- break;
- msgsz = ntohs(msgsz);
- /* pr_debug("length of message = %d\n", msgsz); */
-
- if (msgsz > MAX_CMD_SQSIZE) {
- pr_debug("bad message length = %d\n", msgsz);
- result = -EINVAL;
- break;
- }
-
- result = -ENOMEM;
- dpram_data = kmalloc(msgsz + 2, GFP_KERNEL);
- if (!dpram_data)
- break;
-
- if (copy_from_user(dpram_data, argp, msgsz+2)) {
- pr_debug("copy fault occurred\n");
- result = -EFAULT;
- } else {
- /* Check if this message came from a registered application */
- for (i = 0; i < MAX_NUM_APP; i++) {
- if (ft1000dev->app_info[i].fileobject == &file->f_owner)
- break;
- }
- if (i == MAX_NUM_APP) {
- pr_debug("No matching application fileobject\n");
- result = -EINVAL;
- kfree(dpram_data);
- break;
- }
- app_index = i;
-
- /* Check message qtype type which is the lower byte within qos_class */
- qtype = ntohs(dpram_data->pseudohdr.qos_class) & 0xff;
- /* pr_debug("qtype = %d\n", qtype); */
- if (!qtype) {
- /* Put message into Slow Queue */
- /* Only put a message into the DPRAM if msg doorbell is available */
- status = ft1000_read_register(ft1000dev, &tempword, FT1000_REG_DOORBELL);
- /* pr_debug("READ REGISTER tempword=%x\n", tempword); */
- if (tempword & FT1000_DB_DPRAM_TX) {
- /* Suspend for 2ms and try again due to DSP doorbell busy */
- mdelay(2);
- status = ft1000_read_register(ft1000dev, &tempword, FT1000_REG_DOORBELL);
- if (tempword & FT1000_DB_DPRAM_TX) {
- /* Suspend for 1ms and try again due to DSP doorbell busy */
- mdelay(1);
- status = ft1000_read_register(ft1000dev, &tempword, FT1000_REG_DOORBELL);
- if (tempword & FT1000_DB_DPRAM_TX) {
- status = ft1000_read_register(ft1000dev, &tempword, FT1000_REG_DOORBELL);
- if (tempword & FT1000_DB_DPRAM_TX) {
- /* Suspend for 3ms and try again due to DSP doorbell busy */
- mdelay(3);
- status = ft1000_read_register(ft1000dev, &tempword, FT1000_REG_DOORBELL);
- if (tempword & FT1000_DB_DPRAM_TX) {
- pr_debug("Doorbell not available\n");
- result = -ENOTTY;
- kfree(dpram_data);
- break;
- }
- }
- }
- }
- }
-
- /*pr_debug("finished reading register\n"); */
-
- /* Make sure we are within the limits of the slow queue memory limitation */
- if ((msgsz < MAX_CMD_SQSIZE) && (msgsz > PSEUDOSZ)) {
- /* Need to put sequence number plus new checksum for message */
- pmsg = (u16 *)&dpram_data->pseudohdr;
- ppseudo_hdr = (struct pseudo_hdr *)pmsg;
- total_len = msgsz+2;
- if (total_len & 0x1)
- total_len++;
-
- /* Insert slow queue sequence number */
- ppseudo_hdr->seq_num = info->squeseqnum++;
- ppseudo_hdr->portsrc = ft1000dev->app_info[app_index].app_id;
- /* Calculate new checksum */
- ppseudo_hdr->checksum = *pmsg++;
- /* pr_debug("checksum = 0x%x\n", ppseudo_hdr->checksum); */
- for (i = 1; i < 7; i++) {
- ppseudo_hdr->checksum ^= *pmsg++;
- /* pr_debug("checksum = 0x%x\n", ppseudo_hdr->checksum); */
- }
- pmsg++;
- ppseudo_hdr = (struct pseudo_hdr *)pmsg;
- result = card_send_command(ft1000dev, dpram_data, total_len+2);
-
-
- ft1000dev->app_info[app_index].nTxMsg++;
- } else {
- result = -EINVAL;
- }
- }
- }
- } else {
- pr_debug("Card not ready take messages\n");
- result = -EACCES;
- }
- kfree(dpram_data);
-
- }
- break;
- case IOCTL_GET_DPRAM_CMD:
- {
- struct dpram_blk *pdpram_blk;
- struct IOCTL_DPRAM_BLK __user *pioctl_dpram;
- int msglen;
-
- /* pr_debug("IOCTL_FT1000_GET_DPRAM called\n"); */
-
- if (ft1000_flarion_cnt == 0)
- return -EBADF;
-
- /* Search for matching file object */
- for (i = 0; i < MAX_NUM_APP; i++) {
- if (ft1000dev->app_info[i].fileobject == &file->f_owner) {
- /*pr_debug("Message is for AppId = %d\n", ft1000dev->app_info[i].app_id); */
- break;
- }
- }
-
- /* Could not find application info block */
- if (i == MAX_NUM_APP) {
- pr_debug("Could not find application info block\n");
- result = -EBADF;
- break;
- }
-
- result = 0;
- pioctl_dpram = argp;
- if (list_empty(&ft1000dev->app_info[i].app_sqlist) == 0) {
- /* pr_debug("Message detected in slow queue\n"); */
- spin_lock_irqsave(&free_buff_lock, flags);
- pdpram_blk = list_entry(ft1000dev->app_info[i].app_sqlist.next,
- struct dpram_blk, list);
- list_del(&pdpram_blk->list);
- ft1000dev->app_info[i].NumOfMsg--;
- /* pr_debug("NumOfMsg for app %d = %d\n", i, ft1000dev->app_info[i].NumOfMsg); */
- spin_unlock_irqrestore(&free_buff_lock, flags);
- msglen = ntohs(*(u16 *)pdpram_blk->pbuffer) + PSEUDOSZ;
- result = get_user(msglen, &pioctl_dpram->total_len);
- if (result)
- break;
- msglen = htons(msglen);
- /* pr_debug("msg length = %x\n", msglen); */
- if (copy_to_user(&pioctl_dpram->pseudohdr, pdpram_blk->pbuffer, msglen)) {
- pr_debug("copy fault occurred\n");
- result = -EFAULT;
- break;
- }
-
- ft1000_free_buffer(pdpram_blk, &freercvpool);
- result = msglen;
- }
- /* pr_debug("IOCTL_FT1000_GET_DPRAM no message\n"); */
- }
- break;
-
- default:
- pr_debug("unknown command: 0x%x\n", command);
- result = -ENOTTY;
- break;
- }
- ft1000dev->fAppMsgPend = false;
- return result;
-}
-
-/*
- *---------------------------------------------------------------------------
- * Function: ft1000_release
- *
- * Parameters:
- *
- * Description:
- *
- * Notes:
- *
- *---------------------------------------------------------------------------
- */
-static int ft1000_release(struct inode *inode, struct file *file)
-{
- struct ft1000_info *info;
- struct net_device *dev;
- struct ft1000_usb *ft1000dev;
- int i;
- struct dpram_blk *pdpram_blk;
- struct dpram_blk *tmp;
-
- dev = file->private_data;
- info = netdev_priv(dev);
- ft1000dev = info->priv;
-
- if (ft1000_flarion_cnt == 0) {
- ft1000dev->appcnt--;
- return -EBADF;
- }
-
- /* Search for matching file object */
- for (i = 0; i < MAX_NUM_APP; i++) {
- if (ft1000dev->app_info[i].fileobject == &file->f_owner) {
- /* pr_debug("Message is for AppId = %d\n", ft1000dev->app_info[i].app_id); */
- break;
- }
- }
-
- if (i == MAX_NUM_APP)
- return 0;
-
- list_for_each_entry_safe(pdpram_blk, tmp, &ft1000dev->app_info[i].app_sqlist, list) {
- pr_debug("Remove and free memory queue up on slow queue\n");
- list_del(&pdpram_blk->list);
- ft1000_free_buffer(pdpram_blk, &freercvpool);
- }
-
- /* initialize application information */
- ft1000dev->appcnt--;
- pr_debug("appcnt = %d\n", ft1000dev->appcnt);
- ft1000dev->app_info[i].fileobject = NULL;
-
- return 0;
-}
diff --git a/drivers/staging/ft1000/ft1000-usb/ft1000_download.c b/drivers/staging/ft1000/ft1000-usb/ft1000_download.c
deleted file mode 100644
index cf850212f4b6..000000000000
--- a/drivers/staging/ft1000/ft1000-usb/ft1000_download.c
+++ /dev/null
@@ -1,1058 +0,0 @@
-/*
- * CopyRight (C) 2007 Qualcomm Inc. All Rights Reserved.
- *
- * This file is part of Express Card USB Driver
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/usb.h>
-#include <linux/vmalloc.h>
-#include "ft1000_usb.h"
-
-
-#define DWNLD_HANDSHAKE_LOC 0x02
-#define DWNLD_TYPE_LOC 0x04
-#define DWNLD_SIZE_MSW_LOC 0x06
-#define DWNLD_SIZE_LSW_LOC 0x08
-#define DWNLD_PS_HDR_LOC 0x0A
-
-#define MAX_DSP_WAIT_LOOPS 40
-#define DSP_WAIT_SLEEP_TIME 1000 /* 1 millisecond */
-#define DSP_WAIT_DISPATCH_LVL 50 /* 50 usec */
-
-#define HANDSHAKE_TIMEOUT_VALUE 0xF1F1
-#define HANDSHAKE_RESET_VALUE 0xFEFE /* When DSP requests startover */
-#define HANDSHAKE_RESET_VALUE_USB 0xFE7E /* When DSP requests startover */
-#define HANDSHAKE_DSP_BL_READY 0xFEFE /* At start DSP writes this when bootloader ready */
-#define HANDSHAKE_DSP_BL_READY_USB 0xFE7E /* At start DSP writes this when bootloader ready */
-#define HANDSHAKE_DRIVER_READY 0xFFFF /* Driver writes after receiving 0xFEFE */
-#define HANDSHAKE_SEND_DATA 0x0000 /* DSP writes this when ready for more data */
-
-#define HANDSHAKE_REQUEST 0x0001 /* Request from DSP */
-#define HANDSHAKE_RESPONSE 0x0000 /* Satisfied DSP request */
-
-#define REQUEST_CODE_LENGTH 0x0000
-#define REQUEST_RUN_ADDRESS 0x0001
-#define REQUEST_CODE_SEGMENT 0x0002 /* In WORD count */
-#define REQUEST_DONE_BL 0x0003
-#define REQUEST_DONE_CL 0x0004
-#define REQUEST_VERSION_INFO 0x0005
-#define REQUEST_CODE_BY_VERSION 0x0006
-#define REQUEST_MAILBOX_DATA 0x0007
-#define REQUEST_FILE_CHECKSUM 0x0008
-
-#define STATE_START_DWNLD 0x01
-#define STATE_BOOT_DWNLD 0x02
-#define STATE_CODE_DWNLD 0x03
-#define STATE_DONE_DWNLD 0x04
-#define STATE_SECTION_PROV 0x05
-#define STATE_DONE_PROV 0x06
-#define STATE_DONE_FILE 0x07
-
-#define MAX_LENGTH 0x7f0
-
-/* Temporary download mechanism for Magnemite */
-#define DWNLD_MAG_TYPE_LOC 0x00
-#define DWNLD_MAG_LEN_LOC 0x01
-#define DWNLD_MAG_ADDR_LOC 0x02
-#define DWNLD_MAG_CHKSUM_LOC 0x03
-#define DWNLD_MAG_VAL_LOC 0x04
-
-#define HANDSHAKE_MAG_DSP_BL_READY 0xFEFE0000 /* At start DSP writes this when bootloader ready */
-#define HANDSHAKE_MAG_DSP_ENTRY 0x01000000 /* Dsp writes this to request for entry address */
-#define HANDSHAKE_MAG_DSP_DATA 0x02000000 /* Dsp writes this to request for data block */
-#define HANDSHAKE_MAG_DSP_DONE 0x03000000 /* Dsp writes this to indicate download done */
-
-#define HANDSHAKE_MAG_DRV_READY 0xFFFF0000 /* Driver writes this to indicate ready to download */
-#define HANDSHAKE_MAG_DRV_DATA 0x02FECDAB /* Driver writes this to indicate data available to DSP */
-#define HANDSHAKE_MAG_DRV_ENTRY 0x01FECDAB /* Driver writes this to indicate entry point to DSP */
-
-#define HANDSHAKE_MAG_TIMEOUT_VALUE 0xF1F1
-
-
-/* New Magnemite downloader */
-#define DWNLD_MAG1_HANDSHAKE_LOC 0x00
-#define DWNLD_MAG1_TYPE_LOC 0x01
-#define DWNLD_MAG1_SIZE_LOC 0x02
-#define DWNLD_MAG1_PS_HDR_LOC 0x03
-
-struct dsp_file_hdr {
- long version_id; /* Version ID of this image format. */
- long package_id; /* Package ID of code release. */
- long build_date; /* Date/time stamp when file was built. */
- long commands_offset; /* Offset to attached commands in Pseudo Hdr format. */
- long loader_offset; /* Offset to bootloader code. */
- long loader_code_address; /* Start address of bootloader. */
- long loader_code_end; /* Where bootloader code ends. */
- long loader_code_size;
- long version_data_offset; /* Offset were scrambled version data begins. */
- long version_data_size; /* Size, in words, of scrambled version data. */
- long nDspImages; /* Number of DSP images in file. */
-};
-
-struct dsp_image_info {
- long coff_date; /* Date/time when DSP Coff image was built. */
- long begin_offset; /* Offset in file where image begins. */
- long end_offset; /* Offset in file where image begins. */
- long run_address; /* On chip Start address of DSP code. */
- long image_size; /* Size of image. */
- long version; /* Embedded version # of DSP code. */
- unsigned short checksum; /* DSP File checksum */
- unsigned short pad1;
-} __packed;
-
-
-/* checks if the doorbell register is cleared */
-static int check_usb_db(struct ft1000_usb *ft1000dev)
-{
- int loopcnt;
- u16 temp;
- int status;
-
- loopcnt = 0;
-
- while (loopcnt < 10) {
- status = ft1000_read_register(ft1000dev, &temp,
- FT1000_REG_DOORBELL);
- pr_debug("read FT1000_REG_DOORBELL value is %x\n", temp);
- if (temp & 0x0080) {
- pr_debug("Got checkusb doorbell\n");
- status = ft1000_write_register(ft1000dev, 0x0080,
- FT1000_REG_DOORBELL);
- status = ft1000_write_register(ft1000dev, 0x0100,
- FT1000_REG_DOORBELL);
- status = ft1000_write_register(ft1000dev, 0x8000,
- FT1000_REG_DOORBELL);
- break;
- }
- loopcnt++;
- usleep_range(10000, 11000);
-
- }
-
- loopcnt = 0;
- while (loopcnt < 20) {
- status = ft1000_read_register(ft1000dev, &temp,
- FT1000_REG_DOORBELL);
- pr_debug("Doorbell = 0x%x\n", temp);
- if (temp & 0x8000) {
- loopcnt++;
- usleep_range(10000, 11000);
- } else {
- pr_debug("door bell is cleared, return 0\n");
- return 0;
- }
- }
-
- return -1;
-}
-
-/* gets the handshake and compares it with the expected value */
-static u16 get_handshake(struct ft1000_usb *ft1000dev, u16 expected_value)
-{
- u16 handshake;
- int loopcnt;
- int status = 0;
-
- loopcnt = 0;
-
- while (loopcnt < 100) {
- /* Need to clear downloader doorbell if Hartley ASIC */
- status = ft1000_write_register(ft1000dev, FT1000_DB_DNLD_RX,
- FT1000_REG_DOORBELL);
- if (ft1000dev->fcodeldr) {
- pr_debug("fcodeldr is %d\n", ft1000dev->fcodeldr);
- ft1000dev->fcodeldr = 0;
- status = check_usb_db(ft1000dev);
- if (status != 0) {
- pr_debug("check_usb_db failed\n");
- break;
- }
- status = ft1000_write_register(ft1000dev,
- FT1000_DB_DNLD_RX,
- FT1000_REG_DOORBELL);
- }
-
- status = ft1000_read_dpram16(ft1000dev,
- DWNLD_MAG1_HANDSHAKE_LOC,
- (u8 *)&handshake, 1);
- handshake = ntohs(handshake);
-
- if (status)
- return HANDSHAKE_TIMEOUT_VALUE;
-
- if ((handshake == expected_value) ||
- (handshake == HANDSHAKE_RESET_VALUE_USB)) {
- return handshake;
- }
- loopcnt++;
- usleep_range(10000, 11000);
- }
-
- return HANDSHAKE_TIMEOUT_VALUE;
-}
-
-/* write the handshake value to the handshake location */
-static void put_handshake(struct ft1000_usb *ft1000dev, u16 handshake_value)
-{
- u32 tempx;
- u16 tempword;
- int status;
-
- tempx = (u32)handshake_value;
- tempx = ntohl(tempx);
-
- tempword = (u16)(tempx & 0xffff);
- status = ft1000_write_dpram16(ft1000dev, DWNLD_MAG1_HANDSHAKE_LOC,
- tempword, 0);
- tempword = (u16)(tempx >> 16);
- status = ft1000_write_dpram16(ft1000dev, DWNLD_MAG1_HANDSHAKE_LOC,
- tempword, 1);
- status = ft1000_write_register(ft1000dev, FT1000_DB_DNLD_TX,
- FT1000_REG_DOORBELL);
-}
-
-static u16 get_handshake_usb(struct ft1000_usb *ft1000dev, u16 expected_value)
-{
- u16 handshake;
- int loopcnt;
- u16 temp;
- int status = 0;
-
- loopcnt = 0;
- handshake = 0;
-
- while (loopcnt < 100) {
- if (ft1000dev->usbboot == 2) {
- status = ft1000_read_dpram32(ft1000dev, 0,
- (u8 *)&ft1000dev->tempbuf[0], 64);
- for (temp = 0; temp < 16; temp++) {
- pr_debug("tempbuf %d = 0x%x\n",
- temp, ft1000dev->tempbuf[temp]);
- }
- status = ft1000_read_dpram16(ft1000dev,
- DWNLD_MAG1_HANDSHAKE_LOC,
- (u8 *)&handshake, 1);
- pr_debug("handshake from read_dpram16 = 0x%x\n",
- handshake);
- if (ft1000dev->dspalive == ft1000dev->tempbuf[6]) {
- handshake = 0;
- } else {
- handshake = ft1000dev->tempbuf[1];
- ft1000dev->dspalive =
- ft1000dev->tempbuf[6];
- }
- } else {
- status = ft1000_read_dpram16(ft1000dev,
- DWNLD_MAG1_HANDSHAKE_LOC,
- (u8 *)&handshake, 1);
- }
-
- loopcnt++;
- usleep_range(10000, 11000);
- handshake = ntohs(handshake);
- if ((handshake == expected_value) ||
- (handshake == HANDSHAKE_RESET_VALUE_USB))
- return handshake;
- }
-
- return HANDSHAKE_TIMEOUT_VALUE;
-}
-
-static void put_handshake_usb(struct ft1000_usb *ft1000dev, u16 handshake_value)
-{
- int i;
-
- for (i = 0; i < 1000; i++)
- ;
-}
-
-static u16 get_request_type(struct ft1000_usb *ft1000dev)
-{
- u16 request_type;
- int status;
- u16 tempword;
- u32 tempx;
-
- if (ft1000dev->bootmode == 1) {
- status = fix_ft1000_read_dpram32(ft1000dev,
- DWNLD_MAG1_TYPE_LOC,
- (u8 *)&tempx);
- tempx = ntohl(tempx);
- } else {
- tempx = 0;
- status = ft1000_read_dpram16(ft1000dev,
- DWNLD_MAG1_TYPE_LOC,
- (u8 *)&tempword, 1);
- tempx |= (tempword << 16);
- tempx = ntohl(tempx);
- }
- request_type = (u16)tempx;
-
- return request_type;
-}
-
-static u16 get_request_type_usb(struct ft1000_usb *ft1000dev)
-{
- u16 request_type;
- int status;
- u16 tempword;
- u32 tempx;
-
- if (ft1000dev->bootmode == 1) {
- status = fix_ft1000_read_dpram32(ft1000dev,
- DWNLD_MAG1_TYPE_LOC,
- (u8 *)&tempx);
- tempx = ntohl(tempx);
- } else {
- if (ft1000dev->usbboot == 2) {
- tempx = ft1000dev->tempbuf[2];
- tempword = ft1000dev->tempbuf[3];
- } else {
- tempx = 0;
- status = ft1000_read_dpram16(ft1000dev,
- DWNLD_MAG1_TYPE_LOC,
- (u8 *)&tempword, 1);
- }
- tempx |= (tempword << 16);
- tempx = ntohl(tempx);
- }
- request_type = (u16)tempx;
-
- return request_type;
-}
-
-static long get_request_value(struct ft1000_usb *ft1000dev)
-{
- u32 value;
- u16 tempword;
- int status;
-
- if (ft1000dev->bootmode == 1) {
- status = fix_ft1000_read_dpram32(ft1000dev,
- DWNLD_MAG1_SIZE_LOC,
- (u8 *)&value);
- value = ntohl(value);
- } else {
- status = ft1000_read_dpram16(ft1000dev,
- DWNLD_MAG1_SIZE_LOC,
- (u8 *)&tempword, 0);
- value = tempword;
- status = ft1000_read_dpram16(ft1000dev,
- DWNLD_MAG1_SIZE_LOC,
- (u8 *)&tempword, 1);
- value |= (tempword << 16);
- value = ntohl(value);
- }
-
- return value;
-}
-
-
-/* writes a value to DWNLD_MAG1_SIZE_LOC */
-static void put_request_value(struct ft1000_usb *ft1000dev, long lvalue)
-{
- u32 tempx;
- int status;
-
- tempx = ntohl(lvalue);
- status = fix_ft1000_write_dpram32(ft1000dev, DWNLD_MAG1_SIZE_LOC,
- (u8 *)&tempx);
-}
-
-
-
-/* returns the checksum of the pseudo header */
-static u16 hdr_checksum(struct pseudo_hdr *pHdr)
-{
- u16 *usPtr = (u16 *)pHdr;
- u16 chksum;
-
-
- chksum = (((((usPtr[0] ^ usPtr[1]) ^ usPtr[2]) ^ usPtr[3]) ^
- usPtr[4]) ^ usPtr[5]) ^ usPtr[6];
-
- return chksum;
-}
-
-static int check_buffers(u16 *buff_w, u16 *buff_r, int len, int offset)
-{
- int i;
-
- for (i = 0; i < len; i++) {
- if (buff_w[i] != buff_r[i + offset])
- return -EREMOTEIO;
- }
-
- return 0;
-}
-
-static int write_dpram32_and_check(struct ft1000_usb *ft1000dev,
- u16 tempbuffer[], u16 dpram)
-{
- int status;
- u16 resultbuffer[64];
- int i;
-
- for (i = 0; i < 10; i++) {
- status = ft1000_write_dpram32(ft1000dev, dpram,
- (u8 *)&tempbuffer[0], 64);
- if (status == 0) {
- /* Work around for ASIC bit stuffing problem. */
- if ((tempbuffer[31] & 0xfe00) == 0xfe00) {
- status = ft1000_write_dpram32(ft1000dev,
- dpram+12, (u8 *)&tempbuffer[24],
- 64);
- }
- /* Let's check the data written */
- status = ft1000_read_dpram32(ft1000dev, dpram,
- (u8 *)&resultbuffer[0], 64);
- if ((tempbuffer[31] & 0xfe00) == 0xfe00) {
- if (check_buffers(tempbuffer, resultbuffer, 28,
- 0)) {
- pr_debug("DPRAM write failed 1 during bootloading\n");
- usleep_range(9000, 11000);
- break;
- }
- status = ft1000_read_dpram32(ft1000dev,
- dpram+12,
- (u8 *)&resultbuffer[0], 64);
-
- if (check_buffers(tempbuffer, resultbuffer, 16,
- 24)) {
- pr_debug("DPRAM write failed 2 during bootloading\n");
- usleep_range(9000, 11000);
- break;
- }
- } else {
- if (check_buffers(tempbuffer, resultbuffer, 32,
- 0)) {
- pr_debug("DPRAM write failed 3 during bootloading\n");
- usleep_range(9000, 11000);
- break;
- }
- }
- if (status == 0)
- break;
- }
- }
- return status;
-}
-
-/* writes a block of DSP image to DPRAM
- * Parameters: struct ft1000_usb - device structure
- * u16 **pUsFile - DSP image file pointer in u16
- * u8 **pUcFile - DSP image file pointer in u8
- * long word_length - length of the buffer to be written to DPRAM
- */
-static int write_blk(struct ft1000_usb *ft1000dev, u16 **pUsFile, u8 **pUcFile,
- long word_length)
-{
- int status = 0;
- u16 dpram;
- int loopcnt, i;
- u16 tempword;
- u16 tempbuffer[64];
-
- /*pr_debug("start word_length = %d\n",(int)word_length); */
- dpram = (u16)DWNLD_MAG1_PS_HDR_LOC;
- tempword = *(*pUsFile);
- (*pUsFile)++;
- status = ft1000_write_dpram16(ft1000dev, dpram, tempword, 0);
- tempword = *(*pUsFile);
- (*pUsFile)++;
- status = ft1000_write_dpram16(ft1000dev, dpram++, tempword, 1);
-
- *pUcFile = *pUcFile + 4;
- word_length--;
- tempword = (u16)word_length;
- word_length = (word_length / 16) + 1;
- for (; word_length > 0; word_length--) { /* In words */
- loopcnt = 0;
- for (i = 0; i < 32; i++) {
- if (tempword != 0) {
- tempbuffer[i++] = *(*pUsFile);
- (*pUsFile)++;
- tempbuffer[i] = *(*pUsFile);
- (*pUsFile)++;
- *pUcFile = *pUcFile + 4;
- loopcnt++;
- tempword--;
- } else {
- tempbuffer[i++] = 0;
- tempbuffer[i] = 0;
- }
- }
-
- /*pr_debug("loopcnt is %d\n", loopcnt); */
- /*pr_debug("write_blk: bootmode = %d\n", bootmode); */
- /*pr_debug("write_blk: dpram = %x\n", dpram); */
- if (ft1000dev->bootmode == 0) {
- if (dpram >= 0x3F4)
- status = ft1000_write_dpram32(ft1000dev, dpram,
- (u8 *)&tempbuffer[0], 8);
- else
- status = ft1000_write_dpram32(ft1000dev, dpram,
- (u8 *)&tempbuffer[0], 64);
- } else {
- status = write_dpram32_and_check(ft1000dev, tempbuffer,
- dpram);
- if (status != 0) {
- pr_debug("Write failed tempbuffer[31] = 0x%x\n",
- tempbuffer[31]);
- break;
- }
- }
- dpram = dpram + loopcnt;
- }
- return status;
-}
-
-static void usb_dnld_complete(struct urb *urb)
-{
- /* pr_debug("****** usb_dnld_complete\n"); */
-}
-
-/* writes a block of DSP image to DPRAM
- * Parameters: struct ft1000_usb - device structure
- * u16 **pUsFile - DSP image file pointer in u16
- * u8 **pUcFile - DSP image file pointer in u8
- * long word_length - length of the buffer to be written to DPRAM
- */
-static int write_blk_fifo(struct ft1000_usb *ft1000dev, u16 **pUsFile,
- u8 **pUcFile, long word_length)
-{
- int byte_length;
-
- byte_length = word_length * 4;
-
- if (byte_length && ((byte_length % 64) == 0))
- byte_length += 4;
-
- if (byte_length < 64)
- byte_length = 68;
-
- usb_init_urb(ft1000dev->tx_urb);
- memcpy(ft1000dev->tx_buf, *pUcFile, byte_length);
- usb_fill_bulk_urb(ft1000dev->tx_urb,
- ft1000dev->dev,
- usb_sndbulkpipe(ft1000dev->dev,
- ft1000dev->bulk_out_endpointAddr),
- ft1000dev->tx_buf, byte_length, usb_dnld_complete,
- ft1000dev);
-
- usb_submit_urb(ft1000dev->tx_urb, GFP_ATOMIC);
-
- *pUsFile = *pUsFile + (word_length << 1);
- *pUcFile = *pUcFile + (word_length << 2);
-
- return 0;
-}
-
-static int scram_start_dwnld(struct ft1000_usb *ft1000dev, u16 *hshake,
- u32 *state)
-{
- int status = 0;
-
- if (ft1000dev->usbboot)
- *hshake = get_handshake_usb(ft1000dev, HANDSHAKE_DSP_BL_READY);
- else
- *hshake = get_handshake(ft1000dev, HANDSHAKE_DSP_BL_READY);
- if (*hshake == HANDSHAKE_DSP_BL_READY) {
- pr_debug("handshake is HANDSHAKE_DSP_BL_READY, call put_handshake(HANDSHAKE_DRIVER_READY)\n");
- put_handshake(ft1000dev, HANDSHAKE_DRIVER_READY);
- } else if (*hshake == HANDSHAKE_TIMEOUT_VALUE) {
- status = -ETIMEDOUT;
- } else {
- pr_debug("Download error: Handshake failed\n");
- status = -ENETRESET;
- }
- *state = STATE_BOOT_DWNLD;
- return status;
-}
-
-static int request_code_segment(struct ft1000_usb *ft1000dev, u16 **s_file,
- u8 **c_file, const u8 *endpoint, bool boot_case)
-{
- long word_length;
- int status = 0;
-
- word_length = get_request_value(ft1000dev);
- /*pr_debug("word_length = 0x%x\n", (int)word_length); */
- /*NdisMSleep (100); */
- if (word_length > MAX_LENGTH) {
- pr_debug("Download error: Max length exceeded\n");
- return -1;
- }
- if ((word_length * 2 + (long)c_file) > (long)endpoint) {
- /* Error, beyond boot code range.*/
- pr_debug("Download error: Requested len=%d exceeds BOOT code boundary\n",
- (int)word_length);
- return -1;
- }
- if (word_length & 0x1)
- word_length++;
- word_length = word_length / 2;
-
- if (boot_case) {
- status = write_blk(ft1000dev, s_file, c_file, word_length);
- /*pr_debug("write_blk returned %d\n", status); */
- } else {
- status = write_blk_fifo(ft1000dev, s_file, c_file, word_length);
- if (ft1000dev->usbboot == 0)
- ft1000dev->usbboot++;
- if (ft1000dev->usbboot == 1)
- status |= ft1000_write_dpram16(ft1000dev,
- DWNLD_MAG1_PS_HDR_LOC, 0, 0);
- }
- return status;
-}
-
-/* Scramble downloader for Harley based ASIC via USB interface */
-int scram_dnldr(struct ft1000_usb *ft1000dev, void *pFileStart,
- u32 FileLength)
-{
- int status = 0;
- u32 state;
- u16 handshake;
- struct pseudo_hdr *pseudo_header;
- u16 pseudo_header_len;
- long word_length;
- u16 request;
- u16 temp;
-
- struct dsp_file_hdr *file_hdr;
- struct dsp_image_info *dsp_img_info = NULL;
- long requested_version;
- bool correct_version;
- struct drv_msg *mailbox_data;
- u16 *data = NULL;
- u16 *s_file = NULL;
- u8 *c_file = NULL;
- u8 *boot_end = NULL, *code_end = NULL;
- int image;
- long loader_code_address, loader_code_size = 0;
- long run_address = 0, run_size = 0;
-
- u32 templong;
- u32 image_chksum = 0;
-
- u16 dpram = 0;
- u8 *pbuffer;
- struct prov_record *pprov_record;
- struct ft1000_info *pft1000info = netdev_priv(ft1000dev->net);
-
- ft1000dev->fcodeldr = 0;
- ft1000dev->usbboot = 0;
- ft1000dev->dspalive = 0xffff;
-
- /*
- * Get version id of file, at first 4 bytes of file, for newer files.
- */
-
- state = STATE_START_DWNLD;
-
- file_hdr = pFileStart;
-
- ft1000_write_register(ft1000dev, 0x800, FT1000_REG_MAG_WATERMARK);
-
- s_file = (u16 *) (pFileStart + file_hdr->loader_offset);
- c_file = (u8 *) (pFileStart + file_hdr->loader_offset);
-
- boot_end = (u8 *) (pFileStart + file_hdr->loader_code_end);
-
- loader_code_address = file_hdr->loader_code_address;
- loader_code_size = file_hdr->loader_code_size;
- correct_version = false;
-
- while ((status == 0) && (state != STATE_DONE_FILE)) {
- switch (state) {
- case STATE_START_DWNLD:
- status = scram_start_dwnld(ft1000dev, &handshake,
- &state);
- break;
-
- case STATE_BOOT_DWNLD:
- pr_debug("STATE_BOOT_DWNLD\n");
- ft1000dev->bootmode = 1;
- handshake = get_handshake(ft1000dev, HANDSHAKE_REQUEST);
- if (handshake == HANDSHAKE_REQUEST) {
- /*
- * Get type associated with the request.
- */
- request = get_request_type(ft1000dev);
- switch (request) {
- case REQUEST_RUN_ADDRESS:
- pr_debug("REQUEST_RUN_ADDRESS\n");
- put_request_value(ft1000dev,
- loader_code_address);
- break;
- case REQUEST_CODE_LENGTH:
- pr_debug("REQUEST_CODE_LENGTH\n");
- put_request_value(ft1000dev,
- loader_code_size);
- break;
- case REQUEST_DONE_BL:
- pr_debug("REQUEST_DONE_BL\n");
- /* Reposition ptrs to beginning of code section */
- s_file = (u16 *) (boot_end);
- c_file = (u8 *) (boot_end);
- /* pr_debug("download:s_file = 0x%8x\n", (int)s_file); */
- /* pr_debug("FT1000:download:c_file = 0x%8x\n", (int)c_file); */
- state = STATE_CODE_DWNLD;
- ft1000dev->fcodeldr = 1;
- break;
- case REQUEST_CODE_SEGMENT:
- status = request_code_segment(ft1000dev,
- &s_file, &c_file,
- boot_end,
- true);
- break;
- default:
- pr_debug("Download error: Bad request type=%d in BOOT download state\n",
- request);
- status = -1;
- break;
- }
- if (ft1000dev->usbboot)
- put_handshake_usb(ft1000dev,
- HANDSHAKE_RESPONSE);
- else
- put_handshake(ft1000dev,
- HANDSHAKE_RESPONSE);
- } else {
- pr_debug("Download error: Handshake failed\n");
- status = -1;
- }
-
- break;
-
- case STATE_CODE_DWNLD:
- /* pr_debug("STATE_CODE_DWNLD\n"); */
- ft1000dev->bootmode = 0;
- if (ft1000dev->usbboot)
- handshake =
- get_handshake_usb(ft1000dev,
- HANDSHAKE_REQUEST);
- else
- handshake =
- get_handshake(ft1000dev, HANDSHAKE_REQUEST);
- if (handshake == HANDSHAKE_REQUEST) {
- /*
- * Get type associated with the request.
- */
- if (ft1000dev->usbboot)
- request =
- get_request_type_usb(ft1000dev);
- else
- request = get_request_type(ft1000dev);
- switch (request) {
- case REQUEST_FILE_CHECKSUM:
- pr_debug("image_chksum = 0x%8x\n",
- image_chksum);
- put_request_value(ft1000dev,
- image_chksum);
- break;
- case REQUEST_RUN_ADDRESS:
- pr_debug("REQUEST_RUN_ADDRESS\n");
- if (correct_version) {
- pr_debug("run_address = 0x%8x\n",
- (int)run_address);
- put_request_value(ft1000dev,
- run_address);
- } else {
- pr_debug("Download error: Got Run address request before image offset request\n");
- status = -1;
- break;
- }
- break;
- case REQUEST_CODE_LENGTH:
- pr_debug("REQUEST_CODE_LENGTH\n");
- if (correct_version) {
- pr_debug("run_size = 0x%8x\n",
- (int)run_size);
- put_request_value(ft1000dev,
- run_size);
- } else {
- pr_debug("Download error: Got Size request before image offset request\n");
- status = -1;
- break;
- }
- break;
- case REQUEST_DONE_CL:
- ft1000dev->usbboot = 3;
- /* Reposition ptrs to beginning of provisioning section */
- s_file =
- (u16 *) (pFileStart +
- file_hdr->commands_offset);
- c_file =
- (u8 *) (pFileStart +
- file_hdr->commands_offset);
- state = STATE_DONE_DWNLD;
- break;
- case REQUEST_CODE_SEGMENT:
- /* pr_debug("REQUEST_CODE_SEGMENT - CODELOADER\n"); */
- if (!correct_version) {
- pr_debug("Download error: Got Code Segment request before image offset request\n");
- status = -1;
- break;
- }
-
- status = request_code_segment(ft1000dev,
- &s_file, &c_file,
- code_end,
- false);
-
- break;
-
- case REQUEST_MAILBOX_DATA:
- pr_debug("REQUEST_MAILBOX_DATA\n");
- /* Convert length from byte count to word count. Make sure we round up. */
- word_length =
- (long)(pft1000info->DSPInfoBlklen +
- 1) / 2;
- put_request_value(ft1000dev,
- word_length);
- mailbox_data =
- (struct drv_msg *)&(pft1000info->
- DSPInfoBlk[0]);
- /*
- * Position ASIC DPRAM auto-increment pointer.
- */
-
- data = (u16 *)&mailbox_data->data[0];
- dpram = (u16)DWNLD_MAG1_PS_HDR_LOC;
- if (word_length & 0x1)
- word_length++;
-
- word_length = word_length / 2;
-
- for (; word_length > 0; word_length--) { /* In words */
-
- templong = *data++;
- templong |= (*data++ << 16);
- status =
- fix_ft1000_write_dpram32
- (ft1000dev, dpram++,
- (u8 *)&templong);
-
- }
- break;
-
- case REQUEST_VERSION_INFO:
- pr_debug("REQUEST_VERSION_INFO\n");
- word_length =
- file_hdr->version_data_size;
- put_request_value(ft1000dev,
- word_length);
- /*
- * Position ASIC DPRAM auto-increment pointer.
- */
-
- s_file =
- (u16 *) (pFileStart +
- file_hdr->
- version_data_offset);
-
- dpram = (u16)DWNLD_MAG1_PS_HDR_LOC;
- if (word_length & 0x1)
- word_length++;
-
- word_length = word_length / 2;
-
- for (; word_length > 0; word_length--) { /* In words */
-
- templong = ntohs(*s_file++);
- temp = ntohs(*s_file++);
- templong |= (temp << 16);
- status =
- fix_ft1000_write_dpram32
- (ft1000dev, dpram++,
- (u8 *)&templong);
-
- }
- break;
-
- case REQUEST_CODE_BY_VERSION:
- pr_debug("REQUEST_CODE_BY_VERSION\n");
- correct_version = false;
- requested_version =
- get_request_value(ft1000dev);
-
- dsp_img_info =
- (struct dsp_image_info *)(pFileStart
- +
- sizeof
- (struct
- dsp_file_hdr));
-
- for (image = 0;
- image < file_hdr->nDspImages;
- image++) {
-
- if (dsp_img_info->version ==
- requested_version) {
- correct_version = true;
- pr_debug("correct_version is TRUE\n");
- s_file =
- (u16 *) (pFileStart
- +
- dsp_img_info->
- begin_offset);
- c_file =
- (u8 *) (pFileStart +
- dsp_img_info->
- begin_offset);
- code_end =
- (u8 *) (pFileStart +
- dsp_img_info->
- end_offset);
- run_address =
- dsp_img_info->
- run_address;
- run_size =
- dsp_img_info->
- image_size;
- image_chksum =
- (u32)dsp_img_info->
- checksum;
- break;
- }
- dsp_img_info++;
-
- } /* end of for */
-
- if (!correct_version) {
- /*
- * Error, beyond boot code range.
- */
- pr_debug("Download error: Bad Version Request = 0x%x.\n",
- (int)requested_version);
- status = -1;
- break;
- }
- break;
-
- default:
- pr_debug("Download error: Bad request type=%d in CODE download state.\n",
- request);
- status = -1;
- break;
- }
- if (ft1000dev->usbboot)
- put_handshake_usb(ft1000dev,
- HANDSHAKE_RESPONSE);
- else
- put_handshake(ft1000dev,
- HANDSHAKE_RESPONSE);
- } else {
- pr_debug("Download error: Handshake failed\n");
- status = -1;
- }
-
- break;
-
- case STATE_DONE_DWNLD:
- pr_debug("Code loader is done...\n");
- state = STATE_SECTION_PROV;
- break;
-
- case STATE_SECTION_PROV:
- pr_debug("STATE_SECTION_PROV\n");
- pseudo_header = (struct pseudo_hdr *)c_file;
-
- if (pseudo_header->checksum ==
- hdr_checksum(pseudo_header)) {
- if (pseudo_header->portdest !=
- 0x80 /* Dsp OAM */) {
- state = STATE_DONE_PROV;
- break;
- }
- pseudo_header_len = ntohs(pseudo_header->length); /* Byte length for PROV records */
-
- /* Get buffer for provisioning data */
- pbuffer =
- kmalloc(pseudo_header_len +
- sizeof(struct pseudo_hdr),
- GFP_ATOMIC);
- if (pbuffer) {
- memcpy(pbuffer, c_file,
- (u32) (pseudo_header_len +
- sizeof(struct
- pseudo_hdr)));
- /* link provisioning data */
- pprov_record =
- kmalloc(sizeof(struct prov_record),
- GFP_ATOMIC);
- if (pprov_record) {
- pprov_record->pprov_data =
- pbuffer;
- list_add_tail(&pprov_record->
- list,
- &pft1000info->
- prov_list);
- /* Move to next entry if available */
- c_file =
- (u8 *) ((unsigned long)
- c_file +
- (u32) ((pseudo_header_len + 1) & 0xFFFFFFFE) + sizeof(struct pseudo_hdr));
- if ((unsigned long)(c_file) -
- (unsigned long)(pFileStart)
- >=
- (unsigned long)FileLength) {
- state = STATE_DONE_FILE;
- }
- } else {
- kfree(pbuffer);
- status = -1;
- }
- } else {
- status = -1;
- }
- } else {
- /* Checksum did not compute */
- status = -1;
- }
- pr_debug("after STATE_SECTION_PROV, state = %d, status= %d\n",
- state, status);
- break;
-
- case STATE_DONE_PROV:
- pr_debug("STATE_DONE_PROV\n");
- state = STATE_DONE_FILE;
- break;
-
- default:
- status = -1;
- break;
- } /* End Switch */
-
- if (status != 0)
- break;
-
-/****
- // Check if Card is present
- status = Harley_Read_Register(&temp, FT1000_REG_SUP_IMASK);
- if ( (status != NDIS_STATUS_SUCCESS) || (temp == 0x0000) ) {
- break;
- }
-
- status = Harley_Read_Register(&temp, FT1000_REG_ASIC_ID);
- if ( (status != NDIS_STATUS_SUCCESS) || (temp == 0xffff) ) {
- break;
- }
-****/
-
- } /* End while */
-
- pr_debug("Download exiting with status = 0x%8x\n", status);
- ft1000_write_register(ft1000dev, FT1000_DB_DNLD_TX,
- FT1000_REG_DOORBELL);
-
- return status;
-}
diff --git a/drivers/staging/ft1000/ft1000-usb/ft1000_hw.c b/drivers/staging/ft1000/ft1000-usb/ft1000_hw.c
deleted file mode 100644
index 9ea32cea2c03..000000000000
--- a/drivers/staging/ft1000/ft1000-usb/ft1000_hw.c
+++ /dev/null
@@ -1,1586 +0,0 @@
-/* CopyRight (C) 2007 Qualcomm Inc. All Rights Reserved.
- *
- *
- * This file is part of Express Card USB Driver
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/usb.h>
-#include "ft1000_usb.h"
-#include <linux/types.h>
-
-#define HARLEY_READ_REGISTER 0x0
-#define HARLEY_WRITE_REGISTER 0x01
-#define HARLEY_READ_DPRAM_32 0x02
-#define HARLEY_READ_DPRAM_LOW 0x03
-#define HARLEY_READ_DPRAM_HIGH 0x04
-#define HARLEY_WRITE_DPRAM_32 0x05
-#define HARLEY_WRITE_DPRAM_LOW 0x06
-#define HARLEY_WRITE_DPRAM_HIGH 0x07
-
-#define HARLEY_READ_OPERATION 0xc1
-#define HARLEY_WRITE_OPERATION 0x41
-
-#if 0
-#define JDEBUG
-#endif
-
-static int ft1000_submit_rx_urb(struct ft1000_info *info);
-
-static u8 tempbuffer[1600];
-
-#define MAX_RCV_LOOP 100
-
-/* send a control message via USB interface synchronously
- * Parameters: ft1000_usb - device structure
- * pipe - usb control message pipe
- * request - control request
- * requesttype - control message request type
- * value - value to be written or 0
- * index - register index
- * data - data buffer to hold the read/write values
- * size - data size
- * timeout - control message time out value
- */
-static int ft1000_control(struct ft1000_usb *ft1000dev, unsigned int pipe,
- u8 request, u8 requesttype, u16 value, u16 index,
- void *data, u16 size, int timeout)
-{
- int ret;
-
- if ((ft1000dev == NULL) || (ft1000dev->dev == NULL)) {
- pr_debug("ft1000dev or ft1000dev->dev == NULL, failure\n");
- return -ENODEV;
- }
-
- ret = usb_control_msg(ft1000dev->dev, pipe, request, requesttype,
- value, index, data, size, timeout);
-
- if (ret > 0)
- ret = 0;
-
- return ret;
-}
-
-/* returns the value in a register */
-int ft1000_read_register(struct ft1000_usb *ft1000dev, u16 *Data,
- u16 nRegIndx)
-{
- int ret = 0;
-
- ret = ft1000_control(ft1000dev,
- usb_rcvctrlpipe(ft1000dev->dev, 0),
- HARLEY_READ_REGISTER,
- HARLEY_READ_OPERATION,
- 0,
- nRegIndx,
- Data,
- 2,
- USB_CTRL_GET_TIMEOUT);
-
- return ret;
-}
-
-/* writes the value in a register */
-int ft1000_write_register(struct ft1000_usb *ft1000dev, u16 value,
- u16 nRegIndx)
-{
- int ret = 0;
-
- ret = ft1000_control(ft1000dev,
- usb_sndctrlpipe(ft1000dev->dev, 0),
- HARLEY_WRITE_REGISTER,
- HARLEY_WRITE_OPERATION,
- value,
- nRegIndx,
- NULL,
- 0,
- USB_CTRL_SET_TIMEOUT);
-
- return ret;
-}
-
-/* read a number of bytes from DPRAM */
-int ft1000_read_dpram32(struct ft1000_usb *ft1000dev, u16 indx, u8 *buffer,
- u16 cnt)
-{
- int ret = 0;
-
- ret = ft1000_control(ft1000dev,
- usb_rcvctrlpipe(ft1000dev->dev, 0),
- HARLEY_READ_DPRAM_32,
- HARLEY_READ_OPERATION,
- 0,
- indx,
- buffer,
- cnt,
- USB_CTRL_GET_TIMEOUT);
-
- return ret;
-}
-
-/* writes into DPRAM a number of bytes */
-int ft1000_write_dpram32(struct ft1000_usb *ft1000dev, u16 indx, u8 *buffer,
- u16 cnt)
-{
- int ret = 0;
-
- if (cnt % 4)
- cnt += cnt - (cnt % 4);
-
- ret = ft1000_control(ft1000dev,
- usb_sndctrlpipe(ft1000dev->dev, 0),
- HARLEY_WRITE_DPRAM_32,
- HARLEY_WRITE_OPERATION,
- 0,
- indx,
- buffer,
- cnt,
- USB_CTRL_SET_TIMEOUT);
-
- return ret;
-}
-
-/* read 16 bits from DPRAM */
-int ft1000_read_dpram16(struct ft1000_usb *ft1000dev, u16 indx, u8 *buffer,
- u8 highlow)
-{
- int ret = 0;
- u8 request;
-
- if (highlow == 0)
- request = HARLEY_READ_DPRAM_LOW;
- else
- request = HARLEY_READ_DPRAM_HIGH;
-
- ret = ft1000_control(ft1000dev,
- usb_rcvctrlpipe(ft1000dev->dev, 0),
- request,
- HARLEY_READ_OPERATION,
- 0,
- indx,
- buffer,
- 2,
- USB_CTRL_GET_TIMEOUT);
-
- return ret;
-}
-
-/* write into DPRAM a number of bytes */
-int ft1000_write_dpram16(struct ft1000_usb *ft1000dev, u16 indx, u16 value,
- u8 highlow)
-{
- int ret = 0;
- u8 request;
-
- if (highlow == 0)
- request = HARLEY_WRITE_DPRAM_LOW;
- else
- request = HARLEY_WRITE_DPRAM_HIGH;
-
- ret = ft1000_control(ft1000dev,
- usb_sndctrlpipe(ft1000dev->dev, 0),
- request,
- HARLEY_WRITE_OPERATION,
- value,
- indx,
- NULL,
- 0,
- USB_CTRL_SET_TIMEOUT);
-
- return ret;
-}
-
-/* read DPRAM 4 words at a time */
-int fix_ft1000_read_dpram32(struct ft1000_usb *ft1000dev, u16 indx,
- u8 *buffer)
-{
- u8 buf[16];
- u16 pos;
- int ret = 0;
-
- pos = (indx / 4) * 4;
- ret = ft1000_read_dpram32(ft1000dev, pos, buf, 16);
-
- if (ret == 0) {
- pos = (indx % 4) * 4;
- *buffer++ = buf[pos++];
- *buffer++ = buf[pos++];
- *buffer++ = buf[pos++];
- *buffer++ = buf[pos++];
- } else {
- pr_debug("DPRAM32 Read failed\n");
- *buffer++ = 0;
- *buffer++ = 0;
- *buffer++ = 0;
- *buffer++ = 0;
- }
-
- return ret;
-}
-
-
-/* Description: This function write to DPRAM 4 words at a time */
-int fix_ft1000_write_dpram32(struct ft1000_usb *ft1000dev, u16 indx, u8 *buffer)
-{
- u16 pos1;
- u16 pos2;
- u16 i;
- u8 buf[32];
- u8 resultbuffer[32];
- u8 *pdata;
- int ret = 0;
-
- pos1 = (indx / 4) * 4;
- pdata = buffer;
- ret = ft1000_read_dpram32(ft1000dev, pos1, buf, 16);
-
- if (ret == 0) {
- pos2 = (indx % 4)*4;
- buf[pos2++] = *buffer++;
- buf[pos2++] = *buffer++;
- buf[pos2++] = *buffer++;
- buf[pos2++] = *buffer++;
- ret = ft1000_write_dpram32(ft1000dev, pos1, buf, 16);
- } else {
- pr_debug("DPRAM32 Read failed\n");
- return ret;
- }
-
- ret = ft1000_read_dpram32(ft1000dev, pos1, (u8 *)&resultbuffer[0], 16);
-
- if (ret == 0) {
- buffer = pdata;
- for (i = 0; i < 16; i++) {
- if (buf[i] != resultbuffer[i])
- ret = -1;
- }
- }
-
- if (ret == -1) {
- ret = ft1000_write_dpram32(ft1000dev, pos1,
- (u8 *)&tempbuffer[0], 16);
- ret = ft1000_read_dpram32(ft1000dev, pos1,
- (u8 *)&resultbuffer[0], 16);
- if (ret == 0) {
- buffer = pdata;
- for (i = 0; i < 16; i++) {
- if (tempbuffer[i] != resultbuffer[i]) {
- ret = -1;
- pr_debug("Failed to write\n");
- }
- }
- }
- }
-
- return ret;
-}
-
-/* reset or activate the DSP */
-static void card_reset_dsp(struct ft1000_usb *ft1000dev, bool value)
-{
- int status = 0;
- u16 tempword;
-
- status = ft1000_write_register(ft1000dev, HOST_INTF_BE,
- FT1000_REG_SUP_CTRL);
- status = ft1000_read_register(ft1000dev, &tempword,
- FT1000_REG_SUP_CTRL);
-
- if (value) {
- pr_debug("Reset DSP\n");
- status = ft1000_read_register(ft1000dev, &tempword,
- FT1000_REG_RESET);
- tempword |= DSP_RESET_BIT;
- status = ft1000_write_register(ft1000dev, tempword,
- FT1000_REG_RESET);
- } else {
- pr_debug("Activate DSP\n");
- status = ft1000_read_register(ft1000dev, &tempword,
- FT1000_REG_RESET);
- tempword |= DSP_ENCRYPTED;
- tempword &= ~DSP_UNENCRYPTED;
- status = ft1000_write_register(ft1000dev, tempword,
- FT1000_REG_RESET);
- status = ft1000_read_register(ft1000dev, &tempword,
- FT1000_REG_RESET);
- tempword &= ~EFUSE_MEM_DISABLE;
- tempword &= ~DSP_RESET_BIT;
- status = ft1000_write_register(ft1000dev, tempword,
- FT1000_REG_RESET);
- status = ft1000_read_register(ft1000dev, &tempword,
- FT1000_REG_RESET);
- }
-}
-
-/* send a command to ASIC
- * Parameters: ft1000_usb - device structure
- * ptempbuffer - command buffer
- * size - command buffer size
- */
-int card_send_command(struct ft1000_usb *ft1000dev, void *ptempbuffer,
- int size)
-{
- int ret;
- unsigned short temp;
- unsigned char *commandbuf;
-
- pr_debug("enter card_send_command... size=%d\n", size);
-
- ret = ft1000_read_register(ft1000dev, &temp, FT1000_REG_DOORBELL);
- if (ret)
- return ret;
-
- commandbuf = kmalloc(size + 2, GFP_KERNEL);
- if (!commandbuf)
- return -ENOMEM;
- memcpy((void *)commandbuf + 2, ptempbuffer, size);
-
- if (temp & 0x0100)
- usleep_range(900, 1100);
-
- /* check for odd word */
- size = size + 2;
-
- /* Must force to be 32 bit aligned */
- if (size % 4)
- size += 4 - (size % 4);
-
- ret = ft1000_write_dpram32(ft1000dev, 0, commandbuf, size);
- if (ret)
- return ret;
- usleep_range(900, 1100);
- ret = ft1000_write_register(ft1000dev, FT1000_DB_DPRAM_TX,
- FT1000_REG_DOORBELL);
- if (ret)
- return ret;
- usleep_range(900, 1100);
-
- ret = ft1000_read_register(ft1000dev, &temp, FT1000_REG_DOORBELL);
-
-#if 0
- if ((temp & 0x0100) == 0)
- pr_debug("Message sent\n");
-#endif
- return ret;
-}
-
-/* load or reload the DSP */
-int dsp_reload(struct ft1000_usb *ft1000dev)
-{
- int status;
- u16 tempword;
- u32 templong;
-
- struct ft1000_info *pft1000info;
-
- pft1000info = netdev_priv(ft1000dev->net);
-
- pft1000info->CardReady = 0;
-
- /* Program Interrupt Mask register */
- status = ft1000_write_register(ft1000dev, 0xffff, FT1000_REG_SUP_IMASK);
-
- status = ft1000_read_register(ft1000dev, &tempword, FT1000_REG_RESET);
- tempword |= ASIC_RESET_BIT;
- status = ft1000_write_register(ft1000dev, tempword, FT1000_REG_RESET);
- msleep(1000);
- status = ft1000_read_register(ft1000dev, &tempword, FT1000_REG_RESET);
- pr_debug("Reset Register = 0x%x\n", tempword);
-
- /* Toggle DSP reset */
- card_reset_dsp(ft1000dev, 1);
- msleep(1000);
- card_reset_dsp(ft1000dev, 0);
- msleep(1000);
-
- status = ft1000_write_register(ft1000dev, HOST_INTF_BE,
- FT1000_REG_SUP_CTRL);
-
- /* Let's check for FEFE */
- status =
- ft1000_read_dpram32(ft1000dev, FT1000_MAG_DPRAM_FEFE_INDX,
- (u8 *)&templong, 4);
- pr_debug("templong (fefe) = 0x%8x\n", templong);
-
- /* call codeloader */
- status = scram_dnldr(ft1000dev, pFileStart, FileLength);
-
- if (status != 0)
- return -EIO;
-
- msleep(1000);
-
- return 0;
-}
-
-/* call the Card Service function to reset the ASIC. */
-static void ft1000_reset_asic(struct net_device *dev)
-{
- struct ft1000_info *info = netdev_priv(dev);
- struct ft1000_usb *ft1000dev = info->priv;
- u16 tempword;
-
- /* Let's use the register provided by the Magnemite ASIC to reset the
- * ASIC and DSP.
- */
- ft1000_write_register(ft1000dev, DSP_RESET_BIT | ASIC_RESET_BIT,
- FT1000_REG_RESET);
-
- mdelay(1);
-
- /* set watermark to -1 in order to not generate an interrupt */
- ft1000_write_register(ft1000dev, 0xffff, FT1000_REG_MAG_WATERMARK);
-
- /* clear interrupts */
- ft1000_read_register(ft1000dev, &tempword, FT1000_REG_SUP_ISR);
- pr_debug("interrupt status register = 0x%x\n", tempword);
- ft1000_write_register(ft1000dev, tempword, FT1000_REG_SUP_ISR);
- ft1000_read_register(ft1000dev, &tempword, FT1000_REG_SUP_ISR);
- pr_debug("interrupt status register = 0x%x\n", tempword);
-}
-
-static int ft1000_reset_card(struct net_device *dev)
-{
- struct ft1000_info *info = netdev_priv(dev);
- struct ft1000_usb *ft1000dev = info->priv;
- u16 tempword;
- struct prov_record *ptr;
- struct prov_record *tmp;
-
- ft1000dev->fCondResetPend = true;
- info->CardReady = 0;
- ft1000dev->fProvComplete = false;
-
- /* Make sure we free any memory reserve for provisioning */
- list_for_each_entry_safe(ptr, tmp, &info->prov_list, list) {
- pr_debug("deleting provisioning record\n");
- list_del(&ptr->list);
- kfree(ptr->pprov_data);
- kfree(ptr);
- }
-
- pr_debug("reset asic\n");
- ft1000_reset_asic(dev);
-
- pr_debug("call dsp_reload\n");
- dsp_reload(ft1000dev);
-
- pr_debug("dsp reload successful\n");
-
- mdelay(10);
-
- /* Initialize DSP heartbeat area */
- ft1000_write_dpram16(ft1000dev, FT1000_MAG_HI_HO, ho_mag,
- FT1000_MAG_HI_HO_INDX);
- ft1000_read_dpram16(ft1000dev, FT1000_MAG_HI_HO, (u8 *)&tempword,
- FT1000_MAG_HI_HO_INDX);
- pr_debug("hi_ho value = 0x%x\n", tempword);
-
- info->CardReady = 1;
-
- ft1000dev->fCondResetPend = false;
-
- return TRUE;
-}
-
-/* callback function when a urb is transmitted */
-static void ft1000_usb_transmit_complete(struct urb *urb)
-{
-
- struct ft1000_usb *ft1000dev = urb->context;
-
- if (urb->status)
- pr_err("%s: TX status %d\n", ft1000dev->net->name, urb->status);
-
- netif_wake_queue(ft1000dev->net);
-}
-
-/* take an ethernet packet and convert it to a Flarion
- * packet prior to sending it to the ASIC Downlink FIFO.
- */
-static int ft1000_copy_down_pkt(struct net_device *netdev, u8 *packet, u16 len)
-{
- struct ft1000_info *pInfo = netdev_priv(netdev);
- struct ft1000_usb *pFt1000Dev = pInfo->priv;
-
- int count, ret;
- u8 *t;
- struct pseudo_hdr hdr;
-
- if (!pInfo->CardReady) {
- pr_debug("Card Not Ready\n");
- return -ENODEV;
- }
-
- count = sizeof(struct pseudo_hdr) + len;
- if (count > MAX_BUF_SIZE) {
- pr_debug("Message Size Overflow! size = %d\n", count);
- return -EINVAL;
- }
-
- if (count % 4)
- count = count + (4 - (count % 4));
-
- memset(&hdr, 0, sizeof(struct pseudo_hdr));
-
- hdr.length = ntohs(count);
- hdr.source = 0x10;
- hdr.destination = 0x20;
- hdr.portdest = 0x20;
- hdr.portsrc = 0x10;
- hdr.sh_str_id = 0x91;
- hdr.control = 0x00;
-
- hdr.checksum = hdr.length ^ hdr.source ^ hdr.destination ^
- hdr.portdest ^ hdr.portsrc ^ hdr.sh_str_id ^ hdr.control;
-
- memcpy(&pFt1000Dev->tx_buf[0], &hdr, sizeof(hdr));
- memcpy(&pFt1000Dev->tx_buf[sizeof(struct pseudo_hdr)], packet, len);
-
- netif_stop_queue(netdev);
-
- usb_fill_bulk_urb(pFt1000Dev->tx_urb,
- pFt1000Dev->dev,
- usb_sndbulkpipe(pFt1000Dev->dev,
- pFt1000Dev->bulk_out_endpointAddr),
- pFt1000Dev->tx_buf, count,
- ft1000_usb_transmit_complete, pFt1000Dev);
-
- t = (u8 *)pFt1000Dev->tx_urb->transfer_buffer;
-
- ret = usb_submit_urb(pFt1000Dev->tx_urb, GFP_ATOMIC);
-
- if (ret) {
- pr_debug("failed tx_urb %d\n", ret);
- return ret;
- }
- pInfo->stats.tx_packets++;
- pInfo->stats.tx_bytes += (len + 14);
-
- return 0;
-}
-
-/* transmit an ethernet packet
- * Parameters: skb - socket buffer to be sent
- * dev - network device
- */
-static int ft1000_start_xmit(struct sk_buff *skb, struct net_device *dev)
-{
- struct ft1000_info *pInfo = netdev_priv(dev);
- struct ft1000_usb *pFt1000Dev = pInfo->priv;
- u8 *pdata;
- int maxlen, pipe;
-
- if (skb == NULL) {
- pr_debug("skb == NULL!!!\n");
- return NETDEV_TX_OK;
- }
-
- if (pFt1000Dev->status & FT1000_STATUS_CLOSING) {
- pr_debug("network driver is closed, return\n");
- goto err;
- }
-
- pipe = usb_sndbulkpipe(pFt1000Dev->dev,
- pFt1000Dev->bulk_out_endpointAddr);
- maxlen = usb_maxpacket(pFt1000Dev->dev, pipe, usb_pipeout(pipe));
-
- pdata = (u8 *)skb->data;
-
- if (pInfo->mediastate == 0) {
- /* Drop packet is mediastate is down */
- pr_debug("mediastate is down\n");
- goto err;
- }
-
- if ((skb->len < ENET_HEADER_SIZE) || (skb->len > ENET_MAX_SIZE)) {
- /* Drop packet which has invalid size */
- pr_debug("invalid ethernet length\n");
- goto err;
- }
-
- ft1000_copy_down_pkt(dev, pdata + ENET_HEADER_SIZE - 2,
- skb->len - ENET_HEADER_SIZE + 2);
-
-err:
- dev_kfree_skb(skb);
-
- return NETDEV_TX_OK;
-}
-
-/* open the network driver */
-static int ft1000_open(struct net_device *dev)
-{
- struct ft1000_info *pInfo = netdev_priv(dev);
- struct ft1000_usb *pFt1000Dev = pInfo->priv;
- struct timeval tv;
-
- pr_debug("ft1000_open is called for card %d\n", pFt1000Dev->CardNumber);
-
- pInfo->stats.rx_bytes = 0;
- pInfo->stats.tx_bytes = 0;
- pInfo->stats.rx_packets = 0;
- pInfo->stats.tx_packets = 0;
- do_gettimeofday(&tv);
- pInfo->ConTm = tv.tv_sec;
- pInfo->ProgConStat = 0;
-
- netif_start_queue(dev);
-
- netif_carrier_on(dev);
-
- return ft1000_submit_rx_urb(pInfo);
-}
-
-static struct net_device_stats *ft1000_netdev_stats(struct net_device *dev)
-{
- struct ft1000_info *info = netdev_priv(dev);
-
- return &(info->stats);
-}
-
-static const struct net_device_ops ftnet_ops = {
- .ndo_open = &ft1000_open,
- .ndo_stop = &ft1000_close,
- .ndo_start_xmit = &ft1000_start_xmit,
- .ndo_get_stats = &ft1000_netdev_stats,
-};
-
-/* initialize the network device */
-static int ft1000_reset(void *dev)
-{
- ft1000_reset_card(dev);
- return 0;
-}
-
-int init_ft1000_netdev(struct ft1000_usb *ft1000dev)
-{
- struct net_device *netdev;
- struct ft1000_info *pInfo = NULL;
- struct dpram_blk *pdpram_blk;
- int i, ret_val;
- struct list_head *cur, *tmp;
- char card_nr[2];
- u8 gCardIndex = 0;
-
- netdev = alloc_etherdev(sizeof(struct ft1000_info));
- if (!netdev) {
- pr_debug("can not allocate network device\n");
- return -ENOMEM;
- }
-
- pInfo = netdev_priv(netdev);
-
- memset(pInfo, 0, sizeof(struct ft1000_info));
-
- dev_alloc_name(netdev, netdev->name);
-
- pr_debug("network device name is %s\n", netdev->name);
-
- if (strncmp(netdev->name, "eth", 3) == 0) {
- card_nr[0] = netdev->name[3];
- card_nr[1] = '\0';
- ret_val = kstrtou8(card_nr, 10, &gCardIndex);
- if (ret_val) {
- netdev_err(ft1000dev->net, "Can't parse netdev\n");
- goto err_net;
- }
-
- ft1000dev->CardNumber = gCardIndex;
- pr_debug("card number = %d\n", ft1000dev->CardNumber);
- } else {
- netdev_err(ft1000dev->net, "ft1000: Invalid device name\n");
- ret_val = -ENXIO;
- goto err_net;
- }
-
- memset(&pInfo->stats, 0, sizeof(struct net_device_stats));
-
- spin_lock_init(&pInfo->dpram_lock);
- pInfo->priv = ft1000dev;
- pInfo->DrvErrNum = 0;
- pInfo->registered = 1;
- pInfo->ft1000_reset = ft1000_reset;
- pInfo->mediastate = 0;
- pInfo->fifo_cnt = 0;
- ft1000dev->DeviceCreated = FALSE;
- pInfo->CardReady = 0;
- pInfo->DSP_TIME[0] = 0;
- pInfo->DSP_TIME[1] = 0;
- pInfo->DSP_TIME[2] = 0;
- pInfo->DSP_TIME[3] = 0;
- ft1000dev->fAppMsgPend = false;
- ft1000dev->fCondResetPend = false;
- ft1000dev->usbboot = 0;
- ft1000dev->dspalive = 0;
- memset(&ft1000dev->tempbuf[0], 0, sizeof(ft1000dev->tempbuf));
-
- INIT_LIST_HEAD(&pInfo->prov_list);
-
- INIT_LIST_HEAD(&ft1000dev->nodes.list);
-
- netdev->netdev_ops = &ftnet_ops;
-
- ft1000dev->net = netdev;
-
- pr_debug("Initialize free_buff_lock and freercvpool\n");
- spin_lock_init(&free_buff_lock);
-
- /* initialize a list of buffers to be use for queuing
- * up receive command data
- */
- INIT_LIST_HEAD(&freercvpool);
-
- /* create list of free buffers */
- for (i = 0; i < NUM_OF_FREE_BUFFERS; i++) {
- /* Get memory for DPRAM_DATA link list */
- pdpram_blk = kmalloc(sizeof(struct dpram_blk), GFP_KERNEL);
- if (pdpram_blk == NULL) {
- ret_val = -ENOMEM;
- goto err_free;
- }
- /* Get a block of memory to store command data */
- pdpram_blk->pbuffer = kmalloc(MAX_CMD_SQSIZE, GFP_KERNEL);
- if (pdpram_blk->pbuffer == NULL) {
- ret_val = -ENOMEM;
- kfree(pdpram_blk);
- goto err_free;
- }
- /* link provisioning data */
- list_add_tail(&pdpram_blk->list, &freercvpool);
- }
- numofmsgbuf = NUM_OF_FREE_BUFFERS;
-
- return 0;
-
-err_free:
- list_for_each_safe(cur, tmp, &freercvpool) {
- pdpram_blk = list_entry(cur, struct dpram_blk, list);
- list_del(&pdpram_blk->list);
- kfree(pdpram_blk->pbuffer);
- kfree(pdpram_blk);
- }
-err_net:
- free_netdev(netdev);
- return ret_val;
-}
-
-/* register the network driver */
-int reg_ft1000_netdev(struct ft1000_usb *ft1000dev,
- struct usb_interface *intf)
-{
- struct net_device *netdev;
- struct ft1000_info *pInfo;
- int rc;
-
- netdev = ft1000dev->net;
- pInfo = netdev_priv(ft1000dev->net);
-
- ft1000_read_register(ft1000dev, &pInfo->AsicID, FT1000_REG_ASIC_ID);
-
- usb_set_intfdata(intf, pInfo);
- SET_NETDEV_DEV(netdev, &intf->dev);
-
- rc = register_netdev(netdev);
- if (rc) {
- pr_debug("could not register network device\n");
- free_netdev(netdev);
- return rc;
- }
-
- ft1000_create_dev(ft1000dev);
-
- pInfo->CardReady = 1;
-
- return 0;
-}
-
-/* take a packet from the FIFO up link and
- * convert it into an ethernet packet and deliver it to the IP stack
- */
-static int ft1000_copy_up_pkt(struct urb *urb)
-{
- struct ft1000_info *info = urb->context;
- struct ft1000_usb *ft1000dev = info->priv;
- struct net_device *net = ft1000dev->net;
-
- u16 tempword;
- u16 len;
- u16 lena;
- struct sk_buff *skb;
- u16 i;
- u8 *pbuffer = NULL;
- u8 *ptemp = NULL;
- u16 *chksum;
-
- if (ft1000dev->status & FT1000_STATUS_CLOSING) {
- pr_debug("network driver is closed, return\n");
- return 0;
- }
- /* Read length */
- len = urb->transfer_buffer_length;
- lena = urb->actual_length;
-
- chksum = (u16 *)ft1000dev->rx_buf;
-
- tempword = *chksum++;
- for (i = 1; i < 7; i++)
- tempword ^= *chksum++;
-
- if (tempword != *chksum) {
- info->stats.rx_errors++;
- ft1000_submit_rx_urb(info);
- return -1;
- }
-
- skb = dev_alloc_skb(len + 12 + 2);
-
- if (skb == NULL) {
- info->stats.rx_errors++;
- ft1000_submit_rx_urb(info);
- return -1;
- }
-
- pbuffer = (u8 *)skb_put(skb, len + 12);
-
- /* subtract the number of bytes read already */
- ptemp = pbuffer;
-
- /* fake MAC address */
- *pbuffer++ = net->dev_addr[0];
- *pbuffer++ = net->dev_addr[1];
- *pbuffer++ = net->dev_addr[2];
- *pbuffer++ = net->dev_addr[3];
- *pbuffer++ = net->dev_addr[4];
- *pbuffer++ = net->dev_addr[5];
- *pbuffer++ = 0x00;
- *pbuffer++ = 0x07;
- *pbuffer++ = 0x35;
- *pbuffer++ = 0xff;
- *pbuffer++ = 0xff;
- *pbuffer++ = 0xfe;
-
- memcpy(pbuffer, ft1000dev->rx_buf + sizeof(struct pseudo_hdr),
- len - sizeof(struct pseudo_hdr));
-
- skb->dev = net;
-
- skb->protocol = eth_type_trans(skb, net);
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- netif_rx(skb);
-
- info->stats.rx_packets++;
- /* Add on 12 bytes for MAC address which was removed */
- info->stats.rx_bytes += (lena + 12);
-
- ft1000_submit_rx_urb(info);
-
- return 0;
-}
-
-
-/* the receiving function of the network driver */
-static int ft1000_submit_rx_urb(struct ft1000_info *info)
-{
- int result;
- struct ft1000_usb *pFt1000Dev = info->priv;
-
- if (pFt1000Dev->status & FT1000_STATUS_CLOSING) {
- pr_debug("network driver is closed, return\n");
- return -ENODEV;
- }
-
- usb_fill_bulk_urb(pFt1000Dev->rx_urb,
- pFt1000Dev->dev,
- usb_rcvbulkpipe(pFt1000Dev->dev,
- pFt1000Dev->bulk_in_endpointAddr),
- pFt1000Dev->rx_buf, MAX_BUF_SIZE,
- (usb_complete_t)ft1000_copy_up_pkt, info);
-
- result = usb_submit_urb(pFt1000Dev->rx_urb, GFP_ATOMIC);
-
- if (result) {
- pr_err("submitting rx_urb %d failed\n", result);
- return result;
- }
-
- return 0;
-}
-
-/* close the network driver */
-int ft1000_close(struct net_device *net)
-{
- struct ft1000_info *pInfo = netdev_priv(net);
- struct ft1000_usb *ft1000dev = pInfo->priv;
-
- ft1000dev->status |= FT1000_STATUS_CLOSING;
-
- pr_debug("pInfo=%p, ft1000dev=%p\n", pInfo, ft1000dev);
- netif_carrier_off(net);
- netif_stop_queue(net);
- ft1000dev->status &= ~FT1000_STATUS_CLOSING;
-
- pInfo->ProgConStat = 0xff;
-
- return 0;
-}
-
-/* check if the device is presently available on the system. */
-static int ft1000_chkcard(struct ft1000_usb *dev)
-{
- u16 tempword;
- int status;
-
- if (dev->fCondResetPend) {
- pr_debug("Card is being reset, return FALSE\n");
- return TRUE;
- }
- /* Mask register is used to check for device presence since it is never
- * set to zero.
- */
- status = ft1000_read_register(dev, &tempword, FT1000_REG_SUP_IMASK);
- if (tempword == 0) {
- pr_debug("IMASK = 0 Card not detected\n");
- return FALSE;
- }
- /* The system will return the value of 0xffff for the version register
- * if the device is not present.
- */
- status = ft1000_read_register(dev, &tempword, FT1000_REG_ASIC_ID);
- if (tempword != 0x1b01) {
- dev->status |= FT1000_STATUS_CLOSING;
- pr_debug("Version = 0xffff Card not detected\n");
- return FALSE;
- }
- return TRUE;
-}
-
-/* read a message from the dpram area.
- * Input:
- * dev - network device structure
- * pbuffer - caller supply address to buffer
- */
-static bool ft1000_receive_cmd(struct ft1000_usb *dev, u16 *pbuffer,
- int maxsz)
-{
- u16 size;
- int ret;
- u16 *ppseudohdr;
- int i;
- u16 tempword;
-
- ret =
- ft1000_read_dpram16(dev, FT1000_MAG_PH_LEN, (u8 *)&size,
- FT1000_MAG_PH_LEN_INDX);
- size = ntohs(size) + PSEUDOSZ;
- if (size > maxsz) {
- pr_debug("Invalid command length = %d\n", size);
- return FALSE;
- }
- ppseudohdr = (u16 *)pbuffer;
- ft1000_write_register(dev, FT1000_DPRAM_MAG_RX_BASE,
- FT1000_REG_DPRAM_ADDR);
- ret =
- ft1000_read_register(dev, pbuffer, FT1000_REG_MAG_DPDATAH);
- pbuffer++;
- ft1000_write_register(dev, FT1000_DPRAM_MAG_RX_BASE + 1,
- FT1000_REG_DPRAM_ADDR);
- for (i = 0; i <= (size >> 2); i++) {
- ret =
- ft1000_read_register(dev, pbuffer,
- FT1000_REG_MAG_DPDATAL);
- pbuffer++;
- ret =
- ft1000_read_register(dev, pbuffer,
- FT1000_REG_MAG_DPDATAH);
- pbuffer++;
- }
- /* copy odd aligned word */
- ret =
- ft1000_read_register(dev, pbuffer, FT1000_REG_MAG_DPDATAL);
-
- pbuffer++;
- ret =
- ft1000_read_register(dev, pbuffer, FT1000_REG_MAG_DPDATAH);
-
- pbuffer++;
- if (size & 0x0001) {
- /* copy odd byte from fifo */
- ret =
- ft1000_read_register(dev, &tempword,
- FT1000_REG_DPRAM_DATA);
- *pbuffer = ntohs(tempword);
- }
- /* Check if pseudo header checksum is good
- * Calculate pseudo header checksum
- */
- tempword = *ppseudohdr++;
- for (i = 1; i < 7; i++)
- tempword ^= *ppseudohdr++;
-
- if (tempword != *ppseudohdr)
- return FALSE;
-
- return TRUE;
-}
-
-static int ft1000_dsp_prov(void *arg)
-{
- struct ft1000_usb *dev = (struct ft1000_usb *)arg;
- struct ft1000_info *info = netdev_priv(dev->net);
- u16 tempword;
- u16 len;
- u16 i = 0;
- struct prov_record *ptr;
- struct pseudo_hdr *ppseudo_hdr;
- u16 *pmsg;
- int status;
- u16 TempShortBuf[256];
-
- while (list_empty(&info->prov_list) == 0) {
- pr_debug("DSP Provisioning List Entry\n");
-
- /* Check if doorbell is available */
- pr_debug("check if doorbell is cleared\n");
- status = ft1000_read_register(dev, &tempword,
- FT1000_REG_DOORBELL);
- if (status) {
- pr_debug("ft1000_read_register error\n");
- break;
- }
-
- while (tempword & FT1000_DB_DPRAM_TX) {
- mdelay(10);
- i++;
- if (i == 10) {
- pr_debug("message drop\n");
- return -1;
- }
- ft1000_read_register(dev, &tempword,
- FT1000_REG_DOORBELL);
- }
-
- if (!(tempword & FT1000_DB_DPRAM_TX)) {
- pr_debug("*** Provision Data Sent to DSP\n");
-
- /* Send provisioning data */
- ptr = list_entry(info->prov_list.next,
- struct prov_record, list);
- len = *(u16 *)ptr->pprov_data;
- len = htons(len);
- len += PSEUDOSZ;
-
- pmsg = (u16 *)ptr->pprov_data;
- ppseudo_hdr = (struct pseudo_hdr *)pmsg;
- /* Insert slow queue sequence number */
- ppseudo_hdr->seq_num = info->squeseqnum++;
- ppseudo_hdr->portsrc = 0;
- /* Calculate new checksum */
- ppseudo_hdr->checksum = *pmsg++;
- for (i = 1; i < 7; i++)
- ppseudo_hdr->checksum ^= *pmsg++;
-
- TempShortBuf[0] = 0;
- TempShortBuf[1] = htons(len);
- memcpy(&TempShortBuf[2], ppseudo_hdr, len);
-
- status =
- ft1000_write_dpram32(dev, 0,
- (u8 *)&TempShortBuf[0],
- (unsigned short)(len + 2));
- status =
- ft1000_write_register(dev, FT1000_DB_DPRAM_TX,
- FT1000_REG_DOORBELL);
-
- list_del(&ptr->list);
- kfree(ptr->pprov_data);
- kfree(ptr);
- }
- usleep_range(9000, 11000);
- }
-
- pr_debug("DSP Provisioning List Entry finished\n");
-
- msleep(100);
-
- dev->fProvComplete = true;
- info->CardReady = 1;
-
- return 0;
-}
-
-static int ft1000_proc_drvmsg(struct ft1000_usb *dev, u16 size)
-{
- struct ft1000_info *info = netdev_priv(dev->net);
- u16 msgtype;
- u16 tempword;
- struct media_msg *pmediamsg;
- struct dsp_init_msg *pdspinitmsg;
- struct drv_msg *pdrvmsg;
- u16 i;
- struct pseudo_hdr *ppseudo_hdr;
- u16 *pmsg;
- int status;
- union {
- u8 byte[2];
- u16 wrd;
- } convert;
-
- char *cmdbuffer = kmalloc(1600, GFP_KERNEL);
-
- if (!cmdbuffer)
- return -ENOMEM;
-
- status = ft1000_read_dpram32(dev, 0x200, cmdbuffer, size);
-
-#ifdef JDEBUG
- print_hex_dump_debug("cmdbuffer: ", HEX_DUMP_OFFSET, 16, 1,
- cmdbuffer, size, true);
-#endif
- pdrvmsg = (struct drv_msg *)&cmdbuffer[2];
- msgtype = ntohs(pdrvmsg->type);
- pr_debug("Command message type = 0x%x\n", msgtype);
- switch (msgtype) {
- case MEDIA_STATE:{
- pr_debug("Command message type = MEDIA_STATE\n");
- pmediamsg = (struct media_msg *)&cmdbuffer[0];
- if (info->ProgConStat != 0xFF) {
- if (pmediamsg->state) {
- pr_debug("Media is up\n");
- if (info->mediastate == 0) {
- if (dev->NetDevRegDone)
- netif_wake_queue(dev->net);
- info->mediastate = 1;
- }
- } else {
- pr_debug("Media is down\n");
- if (info->mediastate == 1) {
- info->mediastate = 0;
- if (dev->NetDevRegDone)
- info->ConTm = 0;
- }
- }
- } else {
- pr_debug("Media is down\n");
- if (info->mediastate == 1) {
- info->mediastate = 0;
- info->ConTm = 0;
- }
- }
- break;
- }
- case DSP_INIT_MSG:{
- pr_debug("Command message type = DSP_INIT_MSG\n");
- pdspinitmsg = (struct dsp_init_msg *)&cmdbuffer[2];
- memcpy(info->DspVer, pdspinitmsg->DspVer, DSPVERSZ);
- pr_debug("DSPVER = 0x%2x 0x%2x 0x%2x 0x%2x\n",
- info->DspVer[0], info->DspVer[1], info->DspVer[2],
- info->DspVer[3]);
- memcpy(info->HwSerNum, pdspinitmsg->HwSerNum,
- HWSERNUMSZ);
- memcpy(info->Sku, pdspinitmsg->Sku, SKUSZ);
- memcpy(info->eui64, pdspinitmsg->eui64, EUISZ);
- pr_debug("EUI64=%2x.%2x.%2x.%2x.%2x.%2x.%2x.%2x\n",
- info->eui64[0], info->eui64[1], info->eui64[2],
- info->eui64[3], info->eui64[4], info->eui64[5],
- info->eui64[6], info->eui64[7]);
- dev->net->dev_addr[0] = info->eui64[0];
- dev->net->dev_addr[1] = info->eui64[1];
- dev->net->dev_addr[2] = info->eui64[2];
- dev->net->dev_addr[3] = info->eui64[5];
- dev->net->dev_addr[4] = info->eui64[6];
- dev->net->dev_addr[5] = info->eui64[7];
-
- if (ntohs(pdspinitmsg->length) ==
- (sizeof(struct dsp_init_msg) - 20)) {
- memcpy(info->ProductMode, pdspinitmsg->ProductMode,
- MODESZ);
- memcpy(info->RfCalVer, pdspinitmsg->RfCalVer, CALVERSZ);
- memcpy(info->RfCalDate, pdspinitmsg->RfCalDate,
- CALDATESZ);
- pr_debug("RFCalVer = 0x%2x 0x%2x\n",
- info->RfCalVer[0], info->RfCalVer[1]);
- }
- break;
- }
- case DSP_PROVISION:{
- pr_debug("Command message type = DSP_PROVISION\n");
-
- /* kick off dspprov routine to start provisioning
- * Send provisioning data to DSP
- */
- if (list_empty(&info->prov_list) == 0) {
- dev->fProvComplete = false;
- status = ft1000_dsp_prov(dev);
- if (status != 0)
- goto out;
- } else {
- dev->fProvComplete = true;
- status = ft1000_write_register(dev, FT1000_DB_HB,
- FT1000_REG_DOORBELL);
- pr_debug("No more DSP provisioning data in dsp image\n");
- }
- pr_debug("DSP PROVISION is done\n");
- break;
- }
- case DSP_STORE_INFO:{
- pr_debug("Command message type = DSP_STORE_INFO");
- tempword = ntohs(pdrvmsg->length);
- info->DSPInfoBlklen = tempword;
- if (tempword < (MAX_DSP_SESS_REC - 4)) {
- pmsg = (u16 *)&pdrvmsg->data[0];
- for (i = 0; i < ((tempword + 1) / 2); i++) {
- pr_debug("dsp info data = 0x%x\n", *pmsg);
- info->DSPInfoBlk[i + 10] = *pmsg++;
- }
- } else {
- info->DSPInfoBlklen = 0;
- }
- break;
- }
- case DSP_GET_INFO:{
- pr_debug("Got DSP_GET_INFO\n");
- /* copy dsp info block to dsp */
- dev->DrvMsgPend = 1;
- /* allow any outstanding ioctl to finish */
- mdelay(10);
- status = ft1000_read_register(dev, &tempword,
- FT1000_REG_DOORBELL);
- if (tempword & FT1000_DB_DPRAM_TX) {
- mdelay(10);
- status = ft1000_read_register(dev, &tempword,
- FT1000_REG_DOORBELL);
- if (tempword & FT1000_DB_DPRAM_TX) {
- mdelay(10);
- status = ft1000_read_register(dev, &tempword,
- FT1000_REG_DOORBELL);
- if (tempword & FT1000_DB_DPRAM_TX)
- break;
- }
- }
- /* Put message into Slow Queue Form Pseudo header */
- pmsg = (u16 *)info->DSPInfoBlk;
- *pmsg++ = 0;
- *pmsg++ = htons(info->DSPInfoBlklen + 20 + info->DSPInfoBlklen);
- ppseudo_hdr =
- (struct pseudo_hdr *)(u16 *)&info->DSPInfoBlk[2];
- ppseudo_hdr->length = htons(info->DSPInfoBlklen + 4
- + info->DSPInfoBlklen);
- ppseudo_hdr->source = 0x10;
- ppseudo_hdr->destination = 0x20;
- ppseudo_hdr->portdest = 0;
- ppseudo_hdr->portsrc = 0;
- ppseudo_hdr->sh_str_id = 0;
- ppseudo_hdr->control = 0;
- ppseudo_hdr->rsvd1 = 0;
- ppseudo_hdr->rsvd2 = 0;
- ppseudo_hdr->qos_class = 0;
- /* Insert slow queue sequence number */
- ppseudo_hdr->seq_num = info->squeseqnum++;
- /* Insert application id */
- ppseudo_hdr->portsrc = 0;
- /* Calculate new checksum */
- ppseudo_hdr->checksum = *pmsg++;
- for (i = 1; i < 7; i++)
- ppseudo_hdr->checksum ^= *pmsg++;
-
- info->DSPInfoBlk[10] = 0x7200;
- info->DSPInfoBlk[11] = htons(info->DSPInfoBlklen);
- status = ft1000_write_dpram32(dev, 0,
- (u8 *)&info->DSPInfoBlk[0],
- (unsigned short)(info->DSPInfoBlklen + 22));
- status = ft1000_write_register(dev, FT1000_DB_DPRAM_TX,
- FT1000_REG_DOORBELL);
- dev->DrvMsgPend = 0;
- break;
- }
- case GET_DRV_ERR_RPT_MSG:{
- pr_debug("Got GET_DRV_ERR_RPT_MSG\n");
- /* copy driver error message to dsp */
- dev->DrvMsgPend = 1;
- /* allow any outstanding ioctl to finish */
- mdelay(10);
- status = ft1000_read_register(dev, &tempword,
- FT1000_REG_DOORBELL);
- if (tempword & FT1000_DB_DPRAM_TX) {
- mdelay(10);
- status = ft1000_read_register(dev, &tempword,
- FT1000_REG_DOORBELL);
- if (tempword & FT1000_DB_DPRAM_TX)
- mdelay(10);
- }
- if ((tempword & FT1000_DB_DPRAM_TX) == 0) {
- /* Put message into Slow Queue Form Pseudo header */
- pmsg = (u16 *)&tempbuffer[0];
- ppseudo_hdr = (struct pseudo_hdr *)pmsg;
- ppseudo_hdr->length = htons(0x0012);
- ppseudo_hdr->source = 0x10;
- ppseudo_hdr->destination = 0x20;
- ppseudo_hdr->portdest = 0;
- ppseudo_hdr->portsrc = 0;
- ppseudo_hdr->sh_str_id = 0;
- ppseudo_hdr->control = 0;
- ppseudo_hdr->rsvd1 = 0;
- ppseudo_hdr->rsvd2 = 0;
- ppseudo_hdr->qos_class = 0;
- /* Insert slow queue sequence number */
- ppseudo_hdr->seq_num = info->squeseqnum++;
- /* Insert application id */
- ppseudo_hdr->portsrc = 0;
- /* Calculate new checksum */
- ppseudo_hdr->checksum = *pmsg++;
- for (i = 1; i < 7; i++)
- ppseudo_hdr->checksum ^= *pmsg++;
-
- pmsg = (u16 *)&tempbuffer[16];
- *pmsg++ = htons(RSP_DRV_ERR_RPT_MSG);
- *pmsg++ = htons(0x000e);
- *pmsg++ = htons(info->DSP_TIME[0]);
- *pmsg++ = htons(info->DSP_TIME[1]);
- *pmsg++ = htons(info->DSP_TIME[2]);
- *pmsg++ = htons(info->DSP_TIME[3]);
- convert.byte[0] = info->DspVer[0];
- convert.byte[1] = info->DspVer[1];
- *pmsg++ = convert.wrd;
- convert.byte[0] = info->DspVer[2];
- convert.byte[1] = info->DspVer[3];
- *pmsg++ = convert.wrd;
- *pmsg++ = htons(info->DrvErrNum);
-
- status = card_send_command(dev,
- (unsigned char *)&tempbuffer[0],
- (u16)(0x0012 + PSEUDOSZ));
- if (status)
- goto out;
- info->DrvErrNum = 0;
- }
- dev->DrvMsgPend = 0;
- break;
- }
- default:
- break;
- }
-
- status = 0;
-out:
- kfree(cmdbuffer);
- return status;
-}
-
-/* Check which application has registered for dsp broadcast messages */
-static int dsp_broadcast_msg_id(struct ft1000_usb *dev)
-{
- struct dpram_blk *pdpram_blk;
- unsigned long flags;
- int i;
-
- for (i = 0; i < MAX_NUM_APP; i++) {
- if ((dev->app_info[i].DspBCMsgFlag)
- && (dev->app_info[i].fileobject)
- && (dev->app_info[i].NumOfMsg
- < MAX_MSG_LIMIT)) {
- pdpram_blk = ft1000_get_buffer(&freercvpool);
- if (pdpram_blk == NULL) {
- pr_debug("Out of memory in free receive command pool\n");
- dev->app_info[i].nRxMsgMiss++;
- return -1;
- }
- if (ft1000_receive_cmd(dev, pdpram_blk->pbuffer,
- MAX_CMD_SQSIZE)) {
- /* Put message into the
- * appropriate application block
- */
- dev->app_info[i].nRxMsg++;
- spin_lock_irqsave(&free_buff_lock, flags);
- list_add_tail(&pdpram_blk->list,
- &dev->app_info[i] .app_sqlist);
- dev->app_info[i].NumOfMsg++;
- spin_unlock_irqrestore(&free_buff_lock, flags);
- wake_up_interruptible(&dev->app_info[i]
- .wait_dpram_msg);
- } else {
- dev->app_info[i].nRxMsgMiss++;
- ft1000_free_buffer(pdpram_blk, &freercvpool);
- pr_debug("ft1000_get_buffer NULL\n");
- return -1;
- }
- }
- }
- return 0;
-}
-
-static int handle_misc_portid(struct ft1000_usb *dev)
-{
- struct dpram_blk *pdpram_blk;
- int i;
-
- pdpram_blk = ft1000_get_buffer(&freercvpool);
- if (pdpram_blk == NULL) {
- pr_debug("Out of memory in free receive command pool\n");
- return -1;
- }
- if (!ft1000_receive_cmd(dev, pdpram_blk->pbuffer, MAX_CMD_SQSIZE))
- goto exit_failure;
-
- /* Search for correct application block */
- for (i = 0; i < MAX_NUM_APP; i++) {
- if (dev->app_info[i].app_id == ((struct pseudo_hdr *)
- pdpram_blk->pbuffer)->portdest)
- break;
- }
- if (i == MAX_NUM_APP) {
- pr_debug("No application matching id = %d\n",
- ((struct pseudo_hdr *)pdpram_blk->pbuffer)->portdest);
- goto exit_failure;
- } else if (dev->app_info[i].NumOfMsg > MAX_MSG_LIMIT) {
- goto exit_failure;
- } else {
- dev->app_info[i].nRxMsg++;
- /* Put message into the appropriate application block */
- list_add_tail(&pdpram_blk->list, &dev->app_info[i].app_sqlist);
- dev->app_info[i].NumOfMsg++;
- }
- return 0;
-
-exit_failure:
- ft1000_free_buffer(pdpram_blk, &freercvpool);
- return -1;
-}
-
-int ft1000_poll(void *dev_id)
-{
- struct ft1000_usb *dev = (struct ft1000_usb *)dev_id;
- struct ft1000_info *info = netdev_priv(dev->net);
- u16 tempword;
- int status;
- u16 size;
- int i;
- u16 data;
- u16 modulo;
- u16 portid;
-
- if (ft1000_chkcard(dev) == FALSE) {
- pr_debug("failed\n");
- return -1;
- }
- status = ft1000_read_register(dev, &tempword, FT1000_REG_DOORBELL);
- if (!status) {
- if (tempword & FT1000_DB_DPRAM_RX) {
- status = ft1000_read_dpram16(dev,
- 0x200, (u8 *)&data, 0);
- size = ntohs(data) + 16 + 2;
- if (size % 4) {
- modulo = 4 - (size % 4);
- size = size + modulo;
- }
- status = ft1000_read_dpram16(dev, 0x201,
- (u8 *)&portid, 1);
- portid &= 0xff;
- if (size < MAX_CMD_SQSIZE) {
- switch (portid) {
- case DRIVERID:
- pr_debug("FT1000_REG_DOORBELL message type: FT1000_DB_DPRAM_RX : portid DRIVERID\n");
- status = ft1000_proc_drvmsg(dev, size);
- if (status != 0)
- return status;
- break;
- case DSPBCMSGID:
- status = dsp_broadcast_msg_id(dev);
- break;
- default:
- status = handle_misc_portid(dev);
- break;
- }
- } else
- pr_debug("Invalid total length for SlowQ = %d\n",
- size);
- status = ft1000_write_register(dev,
- FT1000_DB_DPRAM_RX,
- FT1000_REG_DOORBELL);
- } else if (tempword & FT1000_DSP_ASIC_RESET) {
- /* Let's reset the ASIC from the Host side as well */
- status = ft1000_write_register(dev, ASIC_RESET_BIT,
- FT1000_REG_RESET);
- status = ft1000_read_register(dev, &tempword,
- FT1000_REG_RESET);
- i = 0;
- while (tempword & ASIC_RESET_BIT) {
- status = ft1000_read_register(dev, &tempword,
- FT1000_REG_RESET);
- usleep_range(9000, 11000);
- i++;
- if (i == 100)
- break;
- }
- if (i == 100) {
- pr_debug("Unable to reset ASIC\n");
- return 0;
- }
- usleep_range(9000, 11000);
- /* Program WMARK register */
- status = ft1000_write_register(dev, 0x600,
- FT1000_REG_MAG_WATERMARK);
- /* clear ASIC reset doorbell */
- status = ft1000_write_register(dev,
- FT1000_DSP_ASIC_RESET,
- FT1000_REG_DOORBELL);
- usleep_range(9000, 11000);
- } else if (tempword & FT1000_ASIC_RESET_REQ) {
- pr_debug("FT1000_REG_DOORBELL message type: FT1000_ASIC_RESET_REQ\n");
- /* clear ASIC reset request from DSP */
- status = ft1000_write_register(dev,
- FT1000_ASIC_RESET_REQ,
- FT1000_REG_DOORBELL);
- status = ft1000_write_register(dev, HOST_INTF_BE,
- FT1000_REG_SUP_CTRL);
- /* copy dsp session record from Adapter block */
- status = ft1000_write_dpram32(dev, 0,
- (u8 *)&info->DSPSess.Rec[0], 1024);
- status = ft1000_write_register(dev, 0x600,
- FT1000_REG_MAG_WATERMARK);
- /* ring doorbell to tell DSP that
- * ASIC is out of reset
- */
- status = ft1000_write_register(dev,
- FT1000_ASIC_RESET_DSP,
- FT1000_REG_DOORBELL);
- } else if (tempword & FT1000_DB_COND_RESET) {
- pr_debug("FT1000_REG_DOORBELL message type: FT1000_DB_COND_RESET\n");
- if (!dev->fAppMsgPend) {
- /* Reset ASIC and DSP */
- status = ft1000_read_dpram16(dev,
- FT1000_MAG_DSP_TIMER0,
- (u8 *)&info->DSP_TIME[0],
- FT1000_MAG_DSP_TIMER0_INDX);
- status = ft1000_read_dpram16(dev,
- FT1000_MAG_DSP_TIMER1,
- (u8 *)&info->DSP_TIME[1],
- FT1000_MAG_DSP_TIMER1_INDX);
- status = ft1000_read_dpram16(dev,
- FT1000_MAG_DSP_TIMER2,
- (u8 *)&info->DSP_TIME[2],
- FT1000_MAG_DSP_TIMER2_INDX);
- status = ft1000_read_dpram16(dev,
- FT1000_MAG_DSP_TIMER3,
- (u8 *)&info->DSP_TIME[3],
- FT1000_MAG_DSP_TIMER3_INDX);
- info->CardReady = 0;
- info->DrvErrNum = DSP_CONDRESET_INFO;
- pr_debug("DSP conditional reset requested\n");
- info->ft1000_reset(dev->net);
- } else {
- dev->fProvComplete = false;
- dev->fCondResetPend = true;
- }
- ft1000_write_register(dev, FT1000_DB_COND_RESET,
- FT1000_REG_DOORBELL);
- }
- }
- return 0;
-}
diff --git a/drivers/staging/ft1000/ft1000-usb/ft1000_ioctl.h b/drivers/staging/ft1000/ft1000-usb/ft1000_ioctl.h
deleted file mode 100644
index e9472bebda0b..000000000000
--- a/drivers/staging/ft1000/ft1000-usb/ft1000_ioctl.h
+++ /dev/null
@@ -1,123 +0,0 @@
-/*
- *---------------------------------------------------------------------------
- * FT1000 driver for Flarion Flash OFDM NIC Device
- *
- * Copyright (C) 2002 Flarion Technologies, All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option) any
- * later version. This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
- * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details. You should have received a copy of the GNU General Public
- * License along with this program; if not, write to the
- * Free Software Foundation, Inc., 59 Temple Place -
- * Suite 330, Boston, MA 02111-1307, USA.
- *---------------------------------------------------------------------------
- *
- * File: ft1000_ioctl.h
- *
- * Description: Common structures and defines relating to IOCTL
- *
- * History:
- * 11/5/02 Whc Created.
- *
- *---------------------------------------------------------------------------
- */
-#ifndef _FT1000IOCTLH_
-#define _FT1000IOCTLH_
-
-struct IOCTL_GET_VER {
- unsigned long drv_ver;
-} __packed;
-
-/* Data structure for Dsp statistics */
-struct IOCTL_GET_DSP_STAT {
- unsigned char DspVer[DSPVERSZ]; /* DSP version number */
- unsigned char HwSerNum[HWSERNUMSZ]; /* Hardware Serial Number */
- unsigned char Sku[SKUSZ]; /* SKU */
- unsigned char eui64[EUISZ]; /* EUI64 */
- unsigned short ConStat; /* Connection Status */
- /* Bits 0-3 = Connection Status Field */
- /* 0000=Idle (Disconnect) */
- /* 0001=Searching */
- /* 0010=Active (Connected) */
- /* 0011=Waiting for L2 down */
- /* 0100=Sleep */
- unsigned short LedStat; /* Led Status */
- /* Bits 0-3 = Signal Strength Field */
- /* 0000 = -105dBm to -92dBm */
- /* 0001 = -92dBm to -85dBm */
- /* 0011 = -85dBm to -75dBm */
- /* 0111 = -75dBm to -50dBm */
- /* 1111 = -50dBm to 0dBm */
- /* Bits 4-7 = Reserved */
- /* Bits 8-11 = SNR Field */
- /* 0000 = <2dB */
- /* 0001 = 2dB to 8dB */
- /* 0011 = 8dB to 15dB */
- /* 0111 = 15dB to 22dB */
- /* 1111 = >22dB */
- /* Bits 12-15 = Reserved */
- unsigned long nTxPkts; /* Number of packets transmitted
- * from host to dsp
- */
- unsigned long nRxPkts; /* Number of packets received from
- * dsp to host
- */
- unsigned long nTxBytes; /* Number of bytes transmitted
- * from host to dsp
- */
- unsigned long nRxBytes; /* Number of bytes received from
- * dsp to host
- */
- unsigned long ConTm; /* Current session connection time
- * in seconds
- */
- unsigned char CalVer[CALVERSZ]; /* Proprietary Calibration
- * Version
- */
- unsigned char CalDate[CALDATESZ]; /* Proprietary Calibration Date */
-} __packed;
-
-/* Data structure for Dual Ported RAM messaging between Host and Dsp */
-struct IOCTL_DPRAM_BLK {
- unsigned short total_len;
- struct pseudo_hdr pseudohdr;
- unsigned char buffer[1780];
-} __packed;
-
-struct IOCTL_DPRAM_COMMAND {
- unsigned short extra;
- struct IOCTL_DPRAM_BLK dpram_blk;
-} __packed;
-
-/*
- * Custom IOCTL command codes
- */
-#define FT1000_MAGIC_CODE 'F'
-
-#define IOCTL_REGISTER_CMD 0
-#define IOCTL_SET_DPRAM_CMD 3
-#define IOCTL_GET_DPRAM_CMD 4
-#define IOCTL_GET_DSP_STAT_CMD 6
-#define IOCTL_GET_VER_CMD 7
-#define IOCTL_CONNECT 10
-#define IOCTL_DISCONNECT 11
-
-#define IOCTL_FT1000_GET_DSP_STAT _IOR(FT1000_MAGIC_CODE, \
- IOCTL_GET_DSP_STAT_CMD, \
- struct IOCTL_GET_DSP_STAT)
-#define IOCTL_FT1000_GET_VER _IOR(FT1000_MAGIC_CODE, IOCTL_GET_VER_CMD, \
- struct IOCTL_GET_VER)
-#define IOCTL_FT1000_CONNECT _IO(FT1000_MAGIC_CODE, IOCTL_CONNECT)
-#define IOCTL_FT1000_DISCONNECT _IO(FT1000_MAGIC_CODE, IOCTL_DISCONNECT)
-#define IOCTL_FT1000_SET_DPRAM _IOW(FT1000_MAGIC_CODE, IOCTL_SET_DPRAM_CMD, \
- struct IOCTL_DPRAM_BLK)
-#define IOCTL_FT1000_GET_DPRAM _IOR(FT1000_MAGIC_CODE, IOCTL_GET_DPRAM_CMD, \
- struct IOCTL_DPRAM_BLK)
-#define IOCTL_FT1000_REGISTER _IOW(FT1000_MAGIC_CODE, IOCTL_REGISTER_CMD, \
- unsigned short *)
-
-#endif /* _FT1000IOCTLH_ */
diff --git a/drivers/staging/ft1000/ft1000-usb/ft1000_usb.c b/drivers/staging/ft1000/ft1000-usb/ft1000_usb.c
deleted file mode 100644
index d1ba0b827a55..000000000000
--- a/drivers/staging/ft1000/ft1000-usb/ft1000_usb.c
+++ /dev/null
@@ -1,248 +0,0 @@
-/*=====================================================
- * CopyRight (C) 2007 Qualcomm Inc. All Rights Reserved.
- *
- *
- * This file is part of Express Card USB Driver
- *====================================================
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/usb.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/firmware.h>
-#include "ft1000_usb.h"
-
-#include <linux/kthread.h>
-
-MODULE_DESCRIPTION("FT1000 EXPRESS CARD DRIVER");
-MODULE_LICENSE("Dual MPL/GPL");
-MODULE_SUPPORTED_DEVICE("QFT FT1000 Express Cards");
-
-void *pFileStart;
-size_t FileLength;
-
-#define VENDOR_ID 0x1291 /* Qualcomm vendor id */
-#define PRODUCT_ID 0x11 /* fake product id */
-
-/* table of devices that work with this driver */
-static struct usb_device_id id_table[] = {
- {USB_DEVICE(VENDOR_ID, PRODUCT_ID)},
- {},
-};
-
-MODULE_DEVICE_TABLE(usb, id_table);
-
-static bool gPollingfailed;
-static int ft1000_poll_thread(void *arg)
-{
- int ret;
-
- while (!kthread_should_stop()) {
- usleep_range(10000, 11000);
- if (!gPollingfailed) {
- ret = ft1000_poll(arg);
- if (ret != 0) {
- pr_debug("polling failed\n");
- gPollingfailed = true;
- }
- }
- }
- return 0;
-}
-
-static int ft1000_probe(struct usb_interface *interface,
- const struct usb_device_id *id)
-{
- struct usb_host_interface *iface_desc;
- struct usb_endpoint_descriptor *endpoint;
- struct usb_device *dev;
- unsigned numaltsetting;
- int i, ret = 0, size;
-
- struct ft1000_usb *ft1000dev;
- struct ft1000_info *pft1000info = NULL;
- const struct firmware *dsp_fw;
-
- ft1000dev = kzalloc(sizeof(struct ft1000_usb), GFP_KERNEL);
- if (!ft1000dev)
- return -ENOMEM;
-
- dev = interface_to_usbdev(interface);
- pr_debug("usb device descriptor info - number of configuration is %d\n",
- dev->descriptor.bNumConfigurations);
-
- ft1000dev->dev = dev;
- ft1000dev->status = 0;
- ft1000dev->net = NULL;
- ft1000dev->tx_urb = usb_alloc_urb(0, GFP_KERNEL);
- ft1000dev->rx_urb = usb_alloc_urb(0, GFP_KERNEL);
- if (!ft1000dev->tx_urb || !ft1000dev->rx_urb) {
- ret = -ENOMEM;
- goto err_fw;
- }
-
- numaltsetting = interface->num_altsetting;
- pr_debug("number of alt settings is: %d\n", numaltsetting);
- iface_desc = interface->cur_altsetting;
- pr_debug("number of endpoints is: %d\n",
- iface_desc->desc.bNumEndpoints);
- pr_debug("descriptor type is: %d\n", iface_desc->desc.bDescriptorType);
- pr_debug("interface number is: %d\n",
- iface_desc->desc.bInterfaceNumber);
- pr_debug("alternatesetting is: %d\n",
- iface_desc->desc.bAlternateSetting);
- pr_debug("interface class is: %d\n", iface_desc->desc.bInterfaceClass);
- pr_debug("control endpoint info:\n");
- pr_debug("descriptor0 type -- %d\n",
- iface_desc->endpoint[0].desc.bmAttributes);
- pr_debug("descriptor1 type -- %d\n",
- iface_desc->endpoint[1].desc.bmAttributes);
- pr_debug("descriptor2 type -- %d\n",
- iface_desc->endpoint[2].desc.bmAttributes);
-
- for (i = 0; i < iface_desc->desc.bNumEndpoints; i++) {
- endpoint =
- (struct usb_endpoint_descriptor *)&iface_desc->
- endpoint[i].desc;
- pr_debug("endpoint %d\n", i);
- pr_debug("bEndpointAddress=%x, bmAttributes=%x\n",
- endpoint->bEndpointAddress, endpoint->bmAttributes);
- if (usb_endpoint_is_bulk_in(endpoint)) {
- ft1000dev->bulk_in_endpointAddr =
- endpoint->bEndpointAddress;
- pr_debug("in: %d\n", endpoint->bEndpointAddress);
- }
-
- if (usb_endpoint_is_bulk_in(endpoint)) {
- ft1000dev->bulk_out_endpointAddr =
- endpoint->bEndpointAddress;
- pr_debug("out: %d\n", endpoint->bEndpointAddress);
- }
- }
-
- pr_debug("bulk_in=%d, bulk_out=%d\n",
- ft1000dev->bulk_in_endpointAddr,
- ft1000dev->bulk_out_endpointAddr);
-
- ret = request_firmware(&dsp_fw, "ft3000.img", &dev->dev);
- if (ret < 0) {
- dev_err(interface->usb_dev, "Error request_firmware()\n");
- goto err_fw;
- }
-
- size = max_t(uint, dsp_fw->size, 4096);
- pFileStart = kmalloc(size, GFP_KERNEL);
-
- if (!pFileStart) {
- release_firmware(dsp_fw);
- ret = -ENOMEM;
- goto err_fw;
- }
-
- memcpy(pFileStart, dsp_fw->data, dsp_fw->size);
- FileLength = dsp_fw->size;
- release_firmware(dsp_fw);
-
- pr_debug("start downloading dsp image...\n");
-
- ret = init_ft1000_netdev(ft1000dev);
- if (ret)
- goto err_load;
-
- pft1000info = netdev_priv(ft1000dev->net);
-
- pr_debug("pft1000info=%p\n", pft1000info);
- ret = dsp_reload(ft1000dev);
- if (ret) {
- dev_err(interface->usb_dev,
- "Problem with DSP image loading\n");
- goto err_load;
- }
-
- gPollingfailed = false;
- ft1000dev->pPollThread =
- kthread_run(ft1000_poll_thread, ft1000dev, "ft1000_poll");
-
- if (IS_ERR(ft1000dev->pPollThread)) {
- ret = PTR_ERR(ft1000dev->pPollThread);
- goto err_load;
- }
-
- msleep(500);
-
- while (!pft1000info->CardReady) {
- if (gPollingfailed) {
- ret = -EIO;
- goto err_thread;
- }
- msleep(100);
- pr_debug("Waiting for Card Ready\n");
- }
-
- pr_debug("Card Ready!!!! Registering network device\n");
-
- ret = reg_ft1000_netdev(ft1000dev, interface);
- if (ret)
- goto err_thread;
-
- ft1000dev->NetDevRegDone = 1;
-
- return 0;
-
-err_thread:
- kthread_stop(ft1000dev->pPollThread);
-err_load:
- kfree(pFileStart);
-err_fw:
- usb_free_urb(ft1000dev->rx_urb);
- usb_free_urb(ft1000dev->tx_urb);
- kfree(ft1000dev);
- return ret;
-}
-
-static void ft1000_disconnect(struct usb_interface *interface)
-{
- struct ft1000_info *pft1000info;
- struct ft1000_usb *ft1000dev;
-
- pft1000info = (struct ft1000_info *)usb_get_intfdata(interface);
- pr_debug("In disconnect pft1000info=%p\n", pft1000info);
-
- if (pft1000info) {
- ft1000dev = pft1000info->priv;
- if (ft1000dev->pPollThread)
- kthread_stop(ft1000dev->pPollThread);
-
- pr_debug("threads are terminated\n");
-
- if (ft1000dev->net) {
- pr_debug("destroy char driver\n");
- ft1000_destroy_dev(ft1000dev->net);
- unregister_netdev(ft1000dev->net);
- pr_debug("network device unregistered\n");
- free_netdev(ft1000dev->net);
-
- }
-
- usb_free_urb(ft1000dev->rx_urb);
- usb_free_urb(ft1000dev->tx_urb);
-
- pr_debug("urb freed\n");
-
- kfree(ft1000dev);
- }
- kfree(pFileStart);
-}
-
-static struct usb_driver ft1000_usb_driver = {
- .name = "ft1000usb",
- .probe = ft1000_probe,
- .disconnect = ft1000_disconnect,
- .id_table = id_table,
-};
-
-module_usb_driver(ft1000_usb_driver);
diff --git a/drivers/staging/ft1000/ft1000-usb/ft1000_usb.h b/drivers/staging/ft1000/ft1000-usb/ft1000_usb.h
deleted file mode 100644
index 9b5050fcbb66..000000000000
--- a/drivers/staging/ft1000/ft1000-usb/ft1000_usb.h
+++ /dev/null
@@ -1,150 +0,0 @@
-#ifndef _FT1000_USB_H_
-#define _FT1000_USB_H_
-
-#include "../ft1000.h"
-#include "ft1000_ioctl.h"
-#define FT1000_DRV_VER 0x01010403
-
-#define MAX_NUM_APP 6
-#define MAX_MSG_LIMIT 200
-#define NUM_OF_FREE_BUFFERS 1500
-
-#define PSEUDOSZ 16
-
-struct app_info_block {
- u32 nTxMsg; /* DPRAM msg sent to DSP with app_id */
- u32 nRxMsg; /* DPRAM msg rcv from dsp with app_id */
- u32 nTxMsgReject; /* DPRAM msg rejected due to DSP doorbell
- * set
- */
- u32 nRxMsgMiss; /* DPRAM msg dropped due to overflow */
- struct fown_struct *fileobject;/* Application's file object */
- u16 app_id; /* Application id */
- int DspBCMsgFlag;
- int NumOfMsg; /* number of messages queued up */
- wait_queue_head_t wait_dpram_msg;
- struct list_head app_sqlist; /* link list of msgs for applicaton on
- * slow queue
- */
-} __packed;
-
-#define FALSE 0
-#define TRUE 1
-
-#define FT1000_STATUS_CLOSING 0x01
-
-#define DSPBCMSGID 0x10
-
-/* Electrabuzz specific DPRAM mapping */
-/* this is used by ft1000_usb driver - isn't that a bug? */
-#undef FT1000_DPRAM_RX_BASE
-#define FT1000_DPRAM_RX_BASE 0x1800 /* RX AREA (SlowQ) */
-
-/* MEMORY MAP FOR MAGNEMITE */
-/* the indexes are swapped comparing to PCMCIA - is it OK or a bug? */
-#undef FT1000_MAG_DSP_LED_INDX
-#define FT1000_MAG_DSP_LED_INDX 0x1 /* dsp led status for PAD
- * device
- */
-#undef FT1000_MAG_DSP_CON_STATE_INDX
-#define FT1000_MAG_DSP_CON_STATE_INDX 0x0 /* DSP Connection Status Info */
-
-/* Maximum times trying to get ASIC out of reset */
-#define MAX_ASIC_RESET_CNT 20
-
-#define MAX_BUF_SIZE 4096
-
-struct ft1000_debug_dirs {
- struct list_head list;
- struct dentry *dent;
- struct dentry *file;
- int int_number;
-};
-
-struct ft1000_usb {
- struct usb_device *dev;
- struct net_device *net;
-
- u32 status;
-
- struct urb *rx_urb;
- struct urb *tx_urb;
-
- u8 tx_buf[MAX_BUF_SIZE];
- u8 rx_buf[MAX_BUF_SIZE];
-
- u8 bulk_in_endpointAddr;
- u8 bulk_out_endpointAddr;
-
- struct task_struct *pPollThread;
- unsigned char fcodeldr;
- unsigned char bootmode;
- unsigned char usbboot;
- unsigned short dspalive;
- bool fProvComplete;
- bool fCondResetPend;
- bool fAppMsgPend;
- int DeviceCreated;
- int NetDevRegDone;
- u8 CardNumber;
- u8 DeviceName[15];
- struct ft1000_debug_dirs nodes;
- spinlock_t fifo_lock;
- int appcnt;
- struct app_info_block app_info[MAX_NUM_APP];
- u16 DrvMsgPend;
- unsigned short tempbuf[32];
-} __packed;
-
-
-struct dpram_blk {
- struct list_head list;
- u16 *pbuffer;
-} __packed;
-
-int ft1000_read_register(struct ft1000_usb *ft1000dev,
- u16 *Data, u16 nRegIndx);
-int ft1000_write_register(struct ft1000_usb *ft1000dev,
- u16 value, u16 nRegIndx);
-int ft1000_read_dpram32(struct ft1000_usb *ft1000dev,
- u16 indx, u8 *buffer, u16 cnt);
-int ft1000_write_dpram32(struct ft1000_usb *ft1000dev,
- u16 indx, u8 *buffer, u16 cnt);
-int ft1000_read_dpram16(struct ft1000_usb *ft1000dev,
- u16 indx, u8 *buffer, u8 highlow);
-int ft1000_write_dpram16(struct ft1000_usb *ft1000dev,
- u16 indx, u16 value, u8 highlow);
-int fix_ft1000_read_dpram32(struct ft1000_usb *ft1000dev,
- u16 indx, u8 *buffer);
-int fix_ft1000_write_dpram32(struct ft1000_usb *ft1000dev,
- u16 indx, u8 *buffer);
-
-extern void *pFileStart;
-extern size_t FileLength;
-extern int numofmsgbuf;
-
-int ft1000_close(struct net_device *dev);
-int scram_dnldr(struct ft1000_usb *ft1000dev, void *pFileStart,
- u32 FileLength);
-
-extern struct list_head freercvpool;
-
-/* lock to arbitrate free buffer list for receive command data */
-extern spinlock_t free_buff_lock;
-
-int ft1000_create_dev(struct ft1000_usb *dev);
-void ft1000_destroy_dev(struct net_device *dev);
-int card_send_command(struct ft1000_usb *ft1000dev,
- void *ptempbuffer, int size);
-
-struct dpram_blk *ft1000_get_buffer(struct list_head *bufflist);
-void ft1000_free_buffer(struct dpram_blk *pdpram_blk, struct list_head *plist);
-
-int dsp_reload(struct ft1000_usb *ft1000dev);
-int init_ft1000_netdev(struct ft1000_usb *ft1000dev);
-struct usb_interface;
-int reg_ft1000_netdev(struct ft1000_usb *ft1000dev,
- struct usb_interface *intf);
-int ft1000_poll(void *dev_id);
-
-#endif /* _FT1000_USB_H_ */
diff --git a/drivers/staging/ft1000/ft1000-usb/ft3000.img b/drivers/staging/ft1000/ft1000-usb/ft3000.img
deleted file mode 100644
index 7bef6bd3680a..000000000000
--- a/drivers/staging/ft1000/ft1000-usb/ft3000.img
+++ /dev/null
Binary files differ
diff --git a/drivers/staging/ft1000/ft1000.h b/drivers/staging/ft1000/ft1000.h
deleted file mode 100644
index 8a2e4caa532d..000000000000
--- a/drivers/staging/ft1000/ft1000.h
+++ /dev/null
@@ -1,366 +0,0 @@
-/*
- * Common structures and definitions for FT1000 Flarion Flash OFDM PCMCIA and
- * USB devices.
- *
- * Originally copyright (c) 2002 Flarion Technologies
- *
- */
-
-#define DSPVERSZ 4
-#define HWSERNUMSZ 16
-#define SKUSZ 20
-#define EUISZ 8
-#define MODESZ 2
-#define CALVERSZ 2
-#define CALDATESZ 6
-
-#define ELECTRABUZZ_ID 0 /* ASIC ID for Electrabuzz */
-#define MAGNEMITE_ID 0x1a01 /* ASIC ID for Magnemite */
-
-/* MEMORY MAP common to both ELECTRABUZZ and MAGNEMITE */
-#define FT1000_REG_DPRAM_ADDR 0x000E /* DPADR - Dual Port Ram Indirect
- * Address Register
- */
-#define FT1000_REG_SUP_CTRL 0x0020 /* HCTR - Host Control Register */
-#define FT1000_REG_SUP_STAT 0x0022 /* HSTAT - Host Status Register */
-#define FT1000_REG_RESET 0x0024 /* HCTR - Host Control Register */
-#define FT1000_REG_SUP_ISR 0x0026 /* HISR - Host Interrupt Status
- * Register
- */
-#define FT1000_REG_SUP_IMASK 0x0028 /* HIMASK - Host Interrupt Mask */
-#define FT1000_REG_DOORBELL 0x002a /* DBELL - Door Bell Register */
-#define FT1000_REG_ASIC_ID 0x002e /* ASICID - ASIC Identification
- * Number
- */
-
-/* MEMORY MAP FOR ELECTRABUZZ ASIC */
-#define FT1000_REG_UFIFO_STAT 0x0000 /* UFSR - Uplink FIFO status register */
-#define FT1000_REG_UFIFO_BEG 0x0002 /* UFBR - Uplink FIFO beginning
- * register
- */
-#define FT1000_REG_UFIFO_MID 0x0004 /* UFMR - Uplink FIFO middle register */
-#define FT1000_REG_UFIFO_END 0x0006 /* UFER - Uplink FIFO end register */
-#define FT1000_REG_DFIFO_STAT 0x0008 /* DFSR - Downlink FIFO status
- * register
- */
-#define FT1000_REG_DFIFO 0x000A /* DFR - Downlink FIFO Register */
-#define FT1000_REG_DPRAM_DATA 0x000C /* DPRAM - Dual Port Indirect
- * Data Register
- */
-#define FT1000_REG_WATERMARK 0x0010 /* WMARK - Watermark Register */
-
-/* MEMORY MAP FOR MAGNEMITE */
-#define FT1000_REG_MAG_UFDR 0x0000 /* UFDR - Uplink FIFO Data
- * Register (32-bits)
- */
-#define FT1000_REG_MAG_UFDRL 0x0000 /* UFDRL - Uplink FIFO Data
- * Register low-word (16-bits)
- */
-#define FT1000_REG_MAG_UFDRH 0x0002 /* UFDRH - Uplink FIFO Data Register
- * high-word (16-bits)
- */
-#define FT1000_REG_MAG_UFER 0x0004 /* UFER - Uplink FIFO End Register */
-#define FT1000_REG_MAG_UFSR 0x0006 /* UFSR - Uplink FIFO Status Register */
-#define FT1000_REG_MAG_DFR 0x0008 /* DFR - Downlink FIFO Register
- * (32-bits)
- */
-#define FT1000_REG_MAG_DFRL 0x0008 /* DFRL - Downlink FIFO Register
- * low-word (16-bits)
- */
-#define FT1000_REG_MAG_DFRH 0x000a /* DFRH - Downlink FIFO Register
- * high-word (16-bits)
- */
-#define FT1000_REG_MAG_DFSR 0x000c /* DFSR - Downlink FIFO Status
- * Register
- */
-#define FT1000_REG_MAG_DPDATA 0x0010 /* DPDATA - Dual Port RAM Indirect
- * Data Register (32-bits)
- */
-#define FT1000_REG_MAG_DPDATAL 0x0010 /* DPDATAL - Dual Port RAM Indirect
- * Data Register low-word (16-bits)
- */
-#define FT1000_REG_MAG_DPDATAH 0x0012 /* DPDATAH - Dual Port RAM Indirect Data
- * Register high-word (16-bits)
- */
-#define FT1000_REG_MAG_WATERMARK 0x002c /* WMARK - Watermark Register */
-#define FT1000_REG_MAG_VERSION 0x0030 /* LLC Version */
-
-/* Reserved Dual Port RAM offsets for Electrabuzz */
-#define FT1000_DPRAM_TX_BASE 0x0002 /* Host to PC Card Messaging Area */
-#define FT1000_DPRAM_RX_BASE 0x0800 /* PC Card to Host Messaging Area */
-#define FT1000_FIFO_LEN 0x07FC /* total length for DSP FIFO tracking */
-#define FT1000_HI_HO 0x07FE /* heartbeat with HI/HO */
-#define FT1000_DSP_STATUS 0x0FFE /* dsp status - non-zero is a request
- * to reset dsp
- */
-#define FT1000_DSP_LED 0x0FFA /* dsp led status for PAD device */
-#define FT1000_DSP_CON_STATE 0x0FF8 /* DSP Connection Status Info */
-#define FT1000_DPRAM_FEFE 0x0002 /* location for dsp ready indicator */
-#define FT1000_DSP_TIMER0 0x1FF0 /* Timer Field from Basestation */
-#define FT1000_DSP_TIMER1 0x1FF2 /* Timer Field from Basestation */
-#define FT1000_DSP_TIMER2 0x1FF4 /* Timer Field from Basestation */
-#define FT1000_DSP_TIMER3 0x1FF6 /* Timer Field from Basestation */
-
-/* Reserved Dual Port RAM offsets for Magnemite */
-#define FT1000_DPRAM_MAG_TX_BASE 0x0000 /* Host to PC Card
- * Messaging Area
- */
-#define FT1000_DPRAM_MAG_RX_BASE 0x0200 /* PC Card to Host
- * Messaging Area
- */
-
-#define FT1000_MAG_FIFO_LEN 0x1FF /* total length for DSP
- * FIFO tracking
- */
-#define FT1000_MAG_FIFO_LEN_INDX 0x1 /* low-word index */
-#define FT1000_MAG_HI_HO 0x1FF /* heartbeat with HI/HO */
-#define FT1000_MAG_HI_HO_INDX 0x0 /* high-word index */
-#define FT1000_MAG_DSP_LED 0x3FE /* dsp led status for
- * PAD device
- */
-#define FT1000_MAG_DSP_LED_INDX 0x0 /* dsp led status for
- * PAD device
- */
-#define FT1000_MAG_DSP_CON_STATE 0x3FE /* DSP Connection Status Info */
-#define FT1000_MAG_DSP_CON_STATE_INDX 0x1 /* DSP Connection Status Info */
-#define FT1000_MAG_DPRAM_FEFE 0x000 /* location for dsp ready
- * indicator
- */
-#define FT1000_MAG_DPRAM_FEFE_INDX 0x0 /* location for dsp ready
- * indicator
- */
-#define FT1000_MAG_DSP_TIMER0 0x3FC /* Timer Field from
- * Basestation
- */
-#define FT1000_MAG_DSP_TIMER0_INDX 0x1
-#define FT1000_MAG_DSP_TIMER1 0x3FC /* Timer Field from
- * Basestation
- */
-#define FT1000_MAG_DSP_TIMER1_INDX 0x0
-#define FT1000_MAG_DSP_TIMER2 0x3FD /* Timer Field from
- * Basestation
- */
-#define FT1000_MAG_DSP_TIMER2_INDX 0x1
-#define FT1000_MAG_DSP_TIMER3 0x3FD /* Timer Field from
- * Basestation
- */
-#define FT1000_MAG_DSP_TIMER3_INDX 0x0
-#define FT1000_MAG_TOTAL_LEN 0x200
-#define FT1000_MAG_TOTAL_LEN_INDX 0x1
-#define FT1000_MAG_PH_LEN 0x200
-#define FT1000_MAG_PH_LEN_INDX 0x0
-#define FT1000_MAG_PORT_ID 0x201
-#define FT1000_MAG_PORT_ID_INDX 0x0
-
-#define HOST_INTF_LE 0x0 /* Host interface little endian mode */
-#define HOST_INTF_BE 0x1 /* Host interface big endian mode */
-
-/* FT1000 to Host Doorbell assignments */
-#define FT1000_DB_DPRAM_RX 0x0001 /* this value indicates that DSP
- * has data for host in DPRAM
- */
-#define FT1000_DB_DNLD_RX 0x0002 /* Downloader handshake doorbell */
-#define FT1000_ASIC_RESET_REQ 0x0004 /* DSP requesting host to
- * reset the ASIC
- */
-#define FT1000_DSP_ASIC_RESET 0x0008 /* DSP indicating host that
- * it will reset the ASIC
- */
-#define FT1000_DB_COND_RESET 0x0010 /* DSP request for a card reset. */
-
-/* Host to FT1000 Doorbell assignments */
-#define FT1000_DB_DPRAM_TX 0x0100 /* this value indicates that host
- * has data for DSP in DPRAM.
- */
-#define FT1000_DB_DNLD_TX 0x0200 /* Downloader handshake doorbell */
-#define FT1000_ASIC_RESET_DSP 0x0400 /* Responds to FT1000_ASIC_RESET_REQ */
-#define FT1000_DB_HB 0x1000 /* Indicates that supervisor has a
- * heartbeat message for DSP.
- */
-
-#define hi 0x6869 /* PC Card heartbeat values */
-#define ho 0x686f /* PC Card heartbeat values */
-
-/* Magnemite specific defines */
-#define hi_mag 0x6968 /* Byte swap hi to avoid
- * additional system call
- */
-#define ho_mag 0x6f68 /* Byte swap ho to avoid
- * additional system call
- */
-
-/* Bit field definitions for Host Interrupt Status Register */
-/* Indicate the cause of an interrupt. */
-#define ISR_EMPTY 0x00 /* no bits set */
-#define ISR_DOORBELL_ACK 0x01 /* Doorbell acknowledge from DSP */
-#define ISR_DOORBELL_PEND 0x02 /* Doorbell pending from DSP */
-#define ISR_RCV 0x04 /* Packet available in Downlink FIFO */
-#define ISR_WATERMARK 0x08 /* Watermark requirements satisfied */
-
-/* Bit field definition for Host Interrupt Mask */
-#define ISR_MASK_NONE 0x0000 /* no bits set */
-#define ISR_MASK_DOORBELL_ACK 0x0001 /* Doorbell acknowledge mask */
-#define ISR_MASK_DOORBELL_PEND 0x0002 /* Doorbell pending mask */
-#define ISR_MASK_RCV 0x0004 /* Downlink Packet available mask */
-#define ISR_MASK_WATERMARK 0x0008 /* Watermark interrupt mask */
-#define ISR_MASK_ALL 0xffff /* Mask all interrupts */
-/* Default interrupt mask
- * (Enable Doorbell pending and Packet available interrupts)
- */
-#define ISR_DEFAULT_MASK 0x7ff9
-
-/* Bit field definition for Host Control Register */
-#define DSP_RESET_BIT 0x0001 /* Bit field to control
- * dsp reset state
- */
- /* (0 = out of reset 1 = reset) */
-#define ASIC_RESET_BIT 0x0002 /* Bit field to control
- * ASIC reset state
- */
- /* (0 = out of reset 1 = reset) */
-#define DSP_UNENCRYPTED 0x0004
-#define DSP_ENCRYPTED 0x0008
-#define EFUSE_MEM_DISABLE 0x0040
-
-/* Application specific IDs */
-#define DSPID 0x20
-#define HOSTID 0x10
-#define DSPAIRID 0x90
-#define DRIVERID 0x00
-#define NETWORKID 0x20
-
-/* Size of DPRAM Message */
-#define MAX_CMD_SQSIZE 1780
-
-#define ENET_MAX_SIZE 1514
-#define ENET_HEADER_SIZE 14
-
-#define SLOWQ_TYPE 0
-#define FASTQ_TYPE 1
-
-#define MAX_DSP_SESS_REC 1024
-
-#define DSP_QID_OFFSET 4
-
-/* Driver message types */
-#define MEDIA_STATE 0x0010
-#define TIME_UPDATE 0x0020
-#define DSP_PROVISION 0x0030
-#define DSP_INIT_MSG 0x0050
-#define DSP_HIBERNATE 0x0060
-#define DSP_STORE_INFO 0x0070
-#define DSP_GET_INFO 0x0071
-#define GET_DRV_ERR_RPT_MSG 0x0073
-#define RSP_DRV_ERR_RPT_MSG 0x0074
-
-/* Driver Error Messages for DSP */
-#define DSP_HB_INFO 0x7ef0
-#define DSP_FIFO_INFO 0x7ef1
-#define DSP_CONDRESET_INFO 0x7ef2
-#define DSP_CMDLEN_INFO 0x7ef3
-#define DSP_CMDPHCKSUM_INFO 0x7ef4
-#define DSP_PKTPHCKSUM_INFO 0x7ef5
-#define DSP_PKTLEN_INFO 0x7ef6
-#define DSP_USER_RESET 0x7ef7
-#define FIFO_FLUSH_MAXLIMIT 0x7ef8
-#define FIFO_FLUSH_BADCNT 0x7ef9
-#define FIFO_ZERO_LEN 0x7efa
-
-/* Pseudo Header structure */
-struct pseudo_hdr {
- unsigned short length; /* length of msg body */
- unsigned char source; /* hardware source id */
- /* Host = 0x10 */
- /* Dsp = 0x20 */
- unsigned char destination; /* hardware destination id
- * (refer to source)
- */
- unsigned char portdest; /* software destination port id */
- /* Host = 0x00 */
- /* Applicaton Broadcast = 0x10 */
- /* Network Stack = 0x20 */
- /* Dsp OAM = 0x80 */
- /* Dsp Airlink = 0x90 */
- /* Dsp Loader = 0xa0 */
- /* Dsp MIP = 0xb0 */
- unsigned char portsrc; /* software source port id
- * (refer to portdest)
- */
- unsigned short sh_str_id; /* not used */
- unsigned char control; /* not used */
- unsigned char rsvd1;
- unsigned char seq_num; /* message sequence number */
- unsigned char rsvd2;
- unsigned short qos_class; /* not used */
- unsigned short checksum; /* pseudo header checksum */
-} __packed;
-
-struct drv_msg {
- struct pseudo_hdr pseudo;
- u16 type;
- u16 length;
- u8 data[0];
-} __packed;
-
-struct media_msg {
- struct pseudo_hdr pseudo;
- u16 type;
- u16 length;
- u16 state;
- u32 ip_addr;
- u32 net_mask;
- u32 gateway;
- u32 dns_1;
- u32 dns_2;
-} __packed;
-
-struct dsp_init_msg {
- struct pseudo_hdr pseudo;
- u16 type;
- u16 length;
- u8 DspVer[DSPVERSZ]; /* DSP version number */
- u8 HwSerNum[HWSERNUMSZ]; /* Hardware Serial Number */
- u8 Sku[SKUSZ]; /* SKU */
- u8 eui64[EUISZ]; /* EUI64 */
- u8 ProductMode[MODESZ]; /* Product Mode (Market/Production) */
- u8 RfCalVer[CALVERSZ]; /* Rf Calibration version */
- u8 RfCalDate[CALDATESZ]; /* Rf Calibration date */
-} __packed;
-
-struct prov_record {
- struct list_head list;
- u8 *pprov_data;
-};
-
-struct ft1000_info {
- void *priv;
- struct net_device_stats stats;
- u16 DrvErrNum;
- u16 AsicID;
- int CardReady;
- int registered;
- int mediastate;
- u8 squeseqnum; /* sequence number on slow queue */
- spinlock_t dpram_lock;
- u16 fifo_cnt;
- u8 DspVer[DSPVERSZ]; /* DSP version number */
- u8 HwSerNum[HWSERNUMSZ]; /* Hardware Serial Number */
- u8 Sku[SKUSZ]; /* SKU */
- u8 eui64[EUISZ]; /* EUI64 */
- time_t ConTm; /* Connection Time */
- u8 ProductMode[MODESZ];
- u8 RfCalVer[CALVERSZ];
- u8 RfCalDate[CALDATESZ];
- u16 DSP_TIME[4];
- u16 LedStat;
- u16 ConStat;
- u16 ProgConStat;
- struct list_head prov_list;
- u16 DSPInfoBlklen;
- int (*ft1000_reset)(void *);
- u16 DSPInfoBlk[MAX_DSP_SESS_REC];
- union {
- u16 Rec[MAX_DSP_SESS_REC];
- u32 MagRec[MAX_DSP_SESS_REC/2];
- } DSPSess;
-};
diff --git a/drivers/staging/fwserial/Kconfig b/drivers/staging/fwserial/Kconfig
deleted file mode 100644
index 9c7c9267d52c..000000000000
--- a/drivers/staging/fwserial/Kconfig
+++ /dev/null
@@ -1,31 +0,0 @@
-config FIREWIRE_SERIAL
- tristate "TTY over Firewire"
- depends on FIREWIRE && TTY
- help
- This enables TTY over IEEE 1394, providing high-speed serial
- connectivity to cabled peers. This driver implements a
- ad-hoc transport protocol and is currently limited to
- Linux-to-Linux communication.
-
- To compile this driver as a module, say M here: the module will
- be called firewire-serial.
-
-if FIREWIRE_SERIAL
-
-config FWTTY_MAX_TOTAL_PORTS
- int "Maximum number of serial ports supported"
- default "64"
- help
- Set this to the maximum number of serial ports you want the
- firewire-serial driver to support.
-
-config FWTTY_MAX_CARD_PORTS
- int "Maximum number of serial ports supported per adapter"
- range 0 FWTTY_MAX_TOTAL_PORTS
- default "32"
- help
- Set this to the maximum number of serial ports each firewire
- adapter supports. The actual number of serial ports registered
- is set with the module parameter "ttys".
-
-endif
diff --git a/drivers/staging/fwserial/Makefile b/drivers/staging/fwserial/Makefile
deleted file mode 100644
index 2170869a19b1..000000000000
--- a/drivers/staging/fwserial/Makefile
+++ /dev/null
@@ -1,2 +0,0 @@
-obj-$(CONFIG_FIREWIRE_SERIAL) += firewire-serial.o
-firewire-serial-objs := fwserial.o dma_fifo.o
diff --git a/drivers/staging/fwserial/TODO b/drivers/staging/fwserial/TODO
deleted file mode 100644
index 382a7959407c..000000000000
--- a/drivers/staging/fwserial/TODO
+++ /dev/null
@@ -1,14 +0,0 @@
-TODOs prior to this driver moving out of staging
-------------------------------------------------
-1. Implement retries for RCODE_BUSY, RCODE_NO_ACK and RCODE_SEND_ERROR
- - I/O is handled asynchronously which presents some issues when error
- conditions occur.
-2. Implement _robust_ console on top of this. The existing prototype console
- driver is not ready for the big leagues yet.
-3. Expose means of controlling attach/detach of peers via sysfs. Include
- GUID-to-port matching/whitelist/blacklist.
-
--- Issues with firewire stack --
-1. This driver uses the same unregistered vendor id that the firewire core does
- (0xd00d1e). Perhaps this could be exposed as a define in
- firewire.h?
diff --git a/drivers/staging/fwserial/dma_fifo.c b/drivers/staging/fwserial/dma_fifo.c
deleted file mode 100644
index 7a3347c3d02b..000000000000
--- a/drivers/staging/fwserial/dma_fifo.c
+++ /dev/null
@@ -1,303 +0,0 @@
-/*
- * DMA-able FIFO implementation
- *
- * Copyright (C) 2012 Peter Hurley <peter@hurleysoftware.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/list.h>
-#include <linux/bug.h>
-
-#include "dma_fifo.h"
-
-#ifdef DEBUG_TRACING
-#define df_trace(s, args...) pr_debug(s, ##args)
-#else
-#define df_trace(s, args...)
-#endif
-
-#define FAIL(fifo, condition, format...) ({ \
- fifo->corrupt = !!(condition); \
- WARN(fifo->corrupt, format); \
-})
-
-/*
- * private helper fn to determine if check is in open interval (lo,hi)
- */
-static bool addr_check(unsigned check, unsigned lo, unsigned hi)
-{
- return check - (lo + 1) < (hi - 1) - lo;
-}
-
-/**
- * dma_fifo_init: initialize the fifo to a valid but inoperative state
- * @fifo: address of in-place "struct dma_fifo" object
- */
-void dma_fifo_init(struct dma_fifo *fifo)
-{
- memset(fifo, 0, sizeof(*fifo));
- INIT_LIST_HEAD(&fifo->pending);
-}
-
-/**
- * dma_fifo_alloc - initialize and allocate dma_fifo
- * @fifo: address of in-place "struct dma_fifo" object
- * @size: 'apparent' size, in bytes, of fifo
- * @align: dma alignment to maintain (should be at least cpu cache alignment),
- * must be power of 2
- * @tx_limit: maximum # of bytes transmissible per dma (rounded down to
- * multiple of alignment, but at least align size)
- * @open_limit: maximum # of outstanding dma transactions allowed
- * @gfp_mask: get_free_pages mask, passed to kmalloc()
- *
- * The 'apparent' size will be rounded up to next greater aligned size.
- * Returns 0 if no error, otherwise an error code
- */
-int dma_fifo_alloc(struct dma_fifo *fifo, int size, unsigned align,
- int tx_limit, int open_limit, gfp_t gfp_mask)
-{
- int capacity;
-
- if (!is_power_of_2(align) || size < 0)
- return -EINVAL;
-
- size = round_up(size, align);
- capacity = size + align * open_limit + align * DMA_FIFO_GUARD;
- fifo->data = kmalloc(capacity, gfp_mask);
- if (!fifo->data)
- return -ENOMEM;
-
- fifo->in = 0;
- fifo->out = 0;
- fifo->done = 0;
- fifo->size = size;
- fifo->avail = size;
- fifo->align = align;
- fifo->tx_limit = max_t(int, round_down(tx_limit, align), align);
- fifo->open = 0;
- fifo->open_limit = open_limit;
- fifo->guard = size + align * open_limit;
- fifo->capacity = capacity;
- fifo->corrupt = 0;
-
- return 0;
-}
-
-/**
- * dma_fifo_free - frees the fifo
- * @fifo: address of in-place "struct dma_fifo" to free
- *
- * Also reinits the fifo to a valid but inoperative state. This
- * allows the fifo to be reused with a different target requiring
- * different fifo parameters.
- */
-void dma_fifo_free(struct dma_fifo *fifo)
-{
- struct dma_pending *pending, *next;
-
- if (fifo->data == NULL)
- return;
-
- list_for_each_entry_safe(pending, next, &fifo->pending, link)
- list_del_init(&pending->link);
- kfree(fifo->data);
- fifo->data = NULL;
-}
-
-/**
- * dma_fifo_reset - dumps the fifo contents and reinits for reuse
- * @fifo: address of in-place "struct dma_fifo" to reset
- */
-void dma_fifo_reset(struct dma_fifo *fifo)
-{
- struct dma_pending *pending, *next;
-
- if (fifo->data == NULL)
- return;
-
- list_for_each_entry_safe(pending, next, &fifo->pending, link)
- list_del_init(&pending->link);
- fifo->in = 0;
- fifo->out = 0;
- fifo->done = 0;
- fifo->avail = fifo->size;
- fifo->open = 0;
- fifo->corrupt = 0;
-}
-
-/**
- * dma_fifo_in - copies data into the fifo
- * @fifo: address of in-place "struct dma_fifo" to write to
- * @src: buffer to copy from
- * @n: # of bytes to copy
- *
- * Returns the # of bytes actually copied, which can be less than requested if
- * the fifo becomes full. If < 0, return is error code.
- */
-int dma_fifo_in(struct dma_fifo *fifo, const void *src, int n)
-{
- int ofs, l;
-
- if (fifo->data == NULL)
- return -ENOENT;
- if (fifo->corrupt)
- return -ENXIO;
-
- if (n > fifo->avail)
- n = fifo->avail;
- if (n <= 0)
- return 0;
-
- ofs = fifo->in % fifo->capacity;
- l = min(n, fifo->capacity - ofs);
- memcpy(fifo->data + ofs, src, l);
- memcpy(fifo->data, src + l, n - l);
-
- if (FAIL(fifo, addr_check(fifo->done, fifo->in, fifo->in + n) ||
- fifo->avail < n,
- "fifo corrupt: in:%u out:%u done:%u n:%d avail:%d",
- fifo->in, fifo->out, fifo->done, n, fifo->avail))
- return -ENXIO;
-
- fifo->in += n;
- fifo->avail -= n;
-
- df_trace("in:%u out:%u done:%u n:%d avail:%d", fifo->in, fifo->out,
- fifo->done, n, fifo->avail);
-
- return n;
-}
-
-/**
- * dma_fifo_out_pend - gets address/len of next avail read and marks as pended
- * @fifo: address of in-place "struct dma_fifo" to read from
- * @pended: address of structure to fill with read address/len
- * The data/len fields will be NULL/0 if no dma is pended.
- *
- * Returns the # of used bytes remaining in fifo (ie, if > 0, more data
- * remains in the fifo that was not pended). If < 0, return is error code.
- */
-int dma_fifo_out_pend(struct dma_fifo *fifo, struct dma_pending *pended)
-{
- unsigned len, n, ofs, l, limit;
-
- if (fifo->data == NULL)
- return -ENOENT;
- if (fifo->corrupt)
- return -ENXIO;
-
- pended->len = 0;
- pended->data = NULL;
- pended->out = fifo->out;
-
- len = fifo->in - fifo->out;
- if (!len)
- return -ENODATA;
- if (fifo->open == fifo->open_limit)
- return -EAGAIN;
-
- n = len;
- ofs = fifo->out % fifo->capacity;
- l = fifo->capacity - ofs;
- limit = min_t(unsigned, l, fifo->tx_limit);
- if (n > limit) {
- n = limit;
- fifo->out += limit;
- } else if (ofs + n > fifo->guard) {
- fifo->out += l;
- fifo->in = fifo->out;
- } else {
- fifo->out += round_up(n, fifo->align);
- fifo->in = fifo->out;
- }
-
- df_trace("in: %u out: %u done: %u n: %d len: %u avail: %d", fifo->in,
- fifo->out, fifo->done, n, len, fifo->avail);
-
- pended->len = n;
- pended->data = fifo->data + ofs;
- pended->next = fifo->out;
- list_add_tail(&pended->link, &fifo->pending);
- ++fifo->open;
-
- if (FAIL(fifo, fifo->open > fifo->open_limit,
- "past open limit:%d (limit:%d)",
- fifo->open, fifo->open_limit))
- return -ENXIO;
- if (FAIL(fifo, fifo->out & (fifo->align - 1),
- "fifo out unaligned:%u (align:%u)",
- fifo->out, fifo->align))
- return -ENXIO;
-
- return len - n;
-}
-
-/**
- * dma_fifo_out_complete - marks pended dma as completed
- * @fifo: address of in-place "struct dma_fifo" which was read from
- * @complete: address of structure for previously pended dma to mark completed
- */
-int dma_fifo_out_complete(struct dma_fifo *fifo, struct dma_pending *complete)
-{
- struct dma_pending *pending, *next, *tmp;
-
- if (fifo->data == NULL)
- return -ENOENT;
- if (fifo->corrupt)
- return -ENXIO;
- if (list_empty(&fifo->pending) && fifo->open == 0)
- return -EINVAL;
-
- if (FAIL(fifo, list_empty(&fifo->pending) != (fifo->open == 0),
- "pending list disagrees with open count:%d",
- fifo->open))
- return -ENXIO;
-
- tmp = complete->data;
- *tmp = *complete;
- list_replace(&complete->link, &tmp->link);
- dp_mark_completed(tmp);
-
- /* Only update the fifo in the original pended order */
- list_for_each_entry_safe(pending, next, &fifo->pending, link) {
- if (!dp_is_completed(pending)) {
- df_trace("still pending: saved out: %u len: %d",
- pending->out, pending->len);
- break;
- }
-
- if (FAIL(fifo, pending->out != fifo->done ||
- addr_check(fifo->in, fifo->done, pending->next),
- "in:%u out:%u done:%u saved:%u next:%u",
- fifo->in, fifo->out, fifo->done, pending->out,
- pending->next))
- return -ENXIO;
-
- list_del_init(&pending->link);
- fifo->done = pending->next;
- fifo->avail += pending->len;
- --fifo->open;
-
- df_trace("in: %u out: %u done: %u len: %u avail: %d", fifo->in,
- fifo->out, fifo->done, pending->len, fifo->avail);
- }
-
- if (FAIL(fifo, fifo->open < 0, "open dma:%d < 0", fifo->open))
- return -ENXIO;
- if (FAIL(fifo, fifo->avail > fifo->size, "fifo avail:%d > size:%d",
- fifo->avail, fifo->size))
- return -ENXIO;
-
- return 0;
-}
diff --git a/drivers/staging/fwserial/dma_fifo.h b/drivers/staging/fwserial/dma_fifo.h
deleted file mode 100644
index 410988224f89..000000000000
--- a/drivers/staging/fwserial/dma_fifo.h
+++ /dev/null
@@ -1,126 +0,0 @@
-/*
- * DMA-able FIFO interface
- *
- * Copyright (C) 2012 Peter Hurley <peter@hurleysoftware.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#ifndef _DMA_FIFO_H_
-#define _DMA_FIFO_H_
-
-/**
- * The design basis for the DMA FIFO is to provide an output side that
- * complies with the streaming DMA API design that can be DMA'd from directly
- * (without additional copying), coupled with an input side that maintains a
- * logically consistent 'apparent' size (ie, bytes in + bytes avail is static
- * for the lifetime of the FIFO).
- *
- * DMA output transactions originate on a cache line boundary and can be
- * variably-sized. DMA output transactions can be retired out-of-order but
- * the FIFO will only advance the output in the original input sequence.
- * This means the FIFO will eventually stall if a transaction is never retired.
- *
- * Chunking the output side into cache line multiples means that some FIFO
- * memory is unused. For example, if all the avail input has been pended out,
- * then the in and out markers are re-aligned to the next cache line.
- * The maximum possible waste is
- * (cache line alignment - 1) * (max outstanding dma transactions)
- * This potential waste requires additional hidden capacity within the FIFO
- * to be able to accept input while the 'apparent' size has not been reached.
- *
- * Additional cache lines (ie, guard area) are used to minimize DMA
- * fragmentation when wrapping at the end of the FIFO. Input is allowed into the
- * guard area, but the in and out FIFO markers are wrapped when DMA is pended.
- */
-
-#define DMA_FIFO_GUARD 3 /* # of cache lines to reserve for the guard area */
-
-struct dma_fifo {
- unsigned in;
- unsigned out; /* updated when dma is pended */
- unsigned done; /* updated upon dma completion */
- struct {
- unsigned corrupt:1;
- };
- int size; /* 'apparent' size of fifo */
- int guard; /* ofs of guard area */
- int capacity; /* size + reserved */
- int avail; /* # of unused bytes in fifo */
- unsigned align; /* must be power of 2 */
- int tx_limit; /* max # of bytes per dma transaction */
- int open_limit; /* max # of outstanding allowed */
- int open; /* # of outstanding dma transactions */
- struct list_head pending; /* fifo markers for outstanding dma */
- void *data;
-};
-
-struct dma_pending {
- struct list_head link;
- void *data;
- unsigned len;
- unsigned next;
- unsigned out;
-};
-
-static inline void dp_mark_completed(struct dma_pending *dp)
-{
- dp->data += 1;
-}
-
-static inline bool dp_is_completed(struct dma_pending *dp)
-{
- return (unsigned long)dp->data & 1UL;
-}
-
-void dma_fifo_init(struct dma_fifo *fifo);
-int dma_fifo_alloc(struct dma_fifo *fifo, int size, unsigned align,
- int tx_limit, int open_limit, gfp_t gfp_mask);
-void dma_fifo_free(struct dma_fifo *fifo);
-void dma_fifo_reset(struct dma_fifo *fifo);
-int dma_fifo_in(struct dma_fifo *fifo, const void *src, int n);
-int dma_fifo_out_pend(struct dma_fifo *fifo, struct dma_pending *pended);
-int dma_fifo_out_complete(struct dma_fifo *fifo,
- struct dma_pending *complete);
-
-/* returns the # of used bytes in the fifo */
-static inline int dma_fifo_level(struct dma_fifo *fifo)
-{
- return fifo->size - fifo->avail;
-}
-
-/* returns the # of bytes ready for output in the fifo */
-static inline int dma_fifo_out_level(struct dma_fifo *fifo)
-{
- return fifo->in - fifo->out;
-}
-
-/* returns the # of unused bytes in the fifo */
-static inline int dma_fifo_avail(struct dma_fifo *fifo)
-{
- return fifo->avail;
-}
-
-/* returns true if fifo has max # of outstanding dmas */
-static inline bool dma_fifo_busy(struct dma_fifo *fifo)
-{
- return fifo->open == fifo->open_limit;
-}
-
-/* changes the max size of dma returned from dma_fifo_out_pend() */
-static inline int dma_fifo_change_tx_limit(struct dma_fifo *fifo, int tx_limit)
-{
- tx_limit = round_down(tx_limit, fifo->align);
- fifo->tx_limit = max_t(int, tx_limit, fifo->align);
- return 0;
-}
-
-#endif /* _DMA_FIFO_H_ */
diff --git a/drivers/staging/fwserial/fwserial.c b/drivers/staging/fwserial/fwserial.c
deleted file mode 100644
index b3ea4bb54e2c..000000000000
--- a/drivers/staging/fwserial/fwserial.c
+++ /dev/null
@@ -1,2957 +0,0 @@
-/*
- * FireWire Serial driver
- *
- * Copyright (C) 2012 Peter Hurley <peter@hurleysoftware.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/sched.h>
-#include <linux/slab.h>
-#include <linux/device.h>
-#include <linux/mod_devicetable.h>
-#include <linux/rculist.h>
-#include <linux/workqueue.h>
-#include <linux/ratelimit.h>
-#include <linux/bug.h>
-#include <linux/uaccess.h>
-
-#include "fwserial.h"
-
-#define be32_to_u64(hi, lo) ((u64)be32_to_cpu(hi) << 32 | be32_to_cpu(lo))
-
-#define LINUX_VENDOR_ID 0xd00d1eU /* same id used in card root directory */
-#define FWSERIAL_VERSION 0x00e81cU /* must be unique within LINUX_VENDOR_ID */
-
-/* configurable options */
-static int num_ttys = 4; /* # of std ttys to create per fw_card */
- /* - doubles as loopback port index */
-static bool auto_connect = true; /* try to VIRT_CABLE to every peer */
-static bool create_loop_dev = true; /* create a loopback device for each card */
-
-module_param_named(ttys, num_ttys, int, S_IRUGO | S_IWUSR);
-module_param_named(auto, auto_connect, bool, S_IRUGO | S_IWUSR);
-module_param_named(loop, create_loop_dev, bool, S_IRUGO | S_IWUSR);
-
-/*
- * Threshold below which the tty is woken for writing
- * - should be equal to WAKEUP_CHARS in drivers/tty/n_tty.c because
- * even if the writer is woken, n_tty_poll() won't set POLLOUT until
- * our fifo is below this level
- */
-#define WAKEUP_CHARS 256
-
-/**
- * fwserial_list: list of every fw_serial created for each fw_card
- * See discussion in fwserial_probe.
- */
-static LIST_HEAD(fwserial_list);
-static DEFINE_MUTEX(fwserial_list_mutex);
-
-/**
- * port_table: array of tty ports allocated to each fw_card
- *
- * tty ports are allocated during probe when an fw_serial is first
- * created for a given fw_card. Ports are allocated in a contiguous block,
- * each block consisting of 'num_ports' ports.
- */
-static struct fwtty_port *port_table[MAX_TOTAL_PORTS];
-static DEFINE_MUTEX(port_table_lock);
-static bool port_table_corrupt;
-#define FWTTY_INVALID_INDEX MAX_TOTAL_PORTS
-
-#define loop_idx(port) (((port)->index) / num_ports)
-#define table_idx(loop) ((loop) * num_ports + num_ttys)
-
-/* total # of tty ports created per fw_card */
-static int num_ports;
-
-/* slab used as pool for struct fwtty_transactions */
-static struct kmem_cache *fwtty_txn_cache;
-
-struct tty_driver *fwtty_driver;
-static struct tty_driver *fwloop_driver;
-
-static struct dentry *fwserial_debugfs;
-
-struct fwtty_transaction;
-typedef void (*fwtty_transaction_cb)(struct fw_card *card, int rcode,
- void *data, size_t length,
- struct fwtty_transaction *txn);
-
-struct fwtty_transaction {
- struct fw_transaction fw_txn;
- fwtty_transaction_cb callback;
- struct fwtty_port *port;
- union {
- struct dma_pending dma_pended;
- };
-};
-
-#define to_device(a, b) (a->b)
-#define fwtty_err(p, fmt, ...) \
- dev_err(to_device(p, device), fmt, ##__VA_ARGS__)
-#define fwtty_info(p, fmt, ...) \
- dev_info(to_device(p, device), fmt, ##__VA_ARGS__)
-#define fwtty_notice(p, fmt, ...) \
- dev_notice(to_device(p, device), fmt, ##__VA_ARGS__)
-#define fwtty_dbg(p, fmt, ...) \
- dev_dbg(to_device(p, device), "%s: " fmt, __func__, ##__VA_ARGS__)
-#define fwtty_err_ratelimited(p, fmt, ...) \
- dev_err_ratelimited(to_device(p, device), fmt, ##__VA_ARGS__)
-
-#ifdef DEBUG
-static inline void debug_short_write(struct fwtty_port *port, int c, int n)
-{
- int avail;
-
- if (n < c) {
- spin_lock_bh(&port->lock);
- avail = dma_fifo_avail(&port->tx_fifo);
- spin_unlock_bh(&port->lock);
- fwtty_dbg(port, "short write: avail:%d req:%d wrote:%d\n",
- avail, c, n);
- }
-}
-#else
-#define debug_short_write(port, c, n)
-#endif
-
-static struct fwtty_peer *__fwserial_peer_by_node_id(struct fw_card *card,
- int generation, int id);
-
-#ifdef FWTTY_PROFILING
-
-static void fwtty_profile_fifo(struct fwtty_port *port, unsigned *stat)
-{
- spin_lock_bh(&port->lock);
- fwtty_profile_data(stat, dma_fifo_avail(&port->tx_fifo));
- spin_unlock_bh(&port->lock);
-}
-
-static void fwtty_dump_profile(struct seq_file *m, struct stats *stats)
-{
- /* for each stat, print sum of 0 to 2^k, then individually */
- int k = 4;
- unsigned sum;
- int j;
- char t[10];
-
- snprintf(t, 10, "< %d", 1 << k);
- seq_printf(m, "\n%14s %6s", " ", t);
- for (j = k + 1; j < DISTRIBUTION_MAX_INDEX; ++j)
- seq_printf(m, "%6d", 1 << j);
-
- ++k;
- for (j = 0, sum = 0; j <= k; ++j)
- sum += stats->reads[j];
- seq_printf(m, "\n%14s: %6d", "reads", sum);
- for (j = k + 1; j <= DISTRIBUTION_MAX_INDEX; ++j)
- seq_printf(m, "%6d", stats->reads[j]);
-
- for (j = 0, sum = 0; j <= k; ++j)
- sum += stats->writes[j];
- seq_printf(m, "\n%14s: %6d", "writes", sum);
- for (j = k + 1; j <= DISTRIBUTION_MAX_INDEX; ++j)
- seq_printf(m, "%6d", stats->writes[j]);
-
- for (j = 0, sum = 0; j <= k; ++j)
- sum += stats->txns[j];
- seq_printf(m, "\n%14s: %6d", "txns", sum);
- for (j = k + 1; j <= DISTRIBUTION_MAX_INDEX; ++j)
- seq_printf(m, "%6d", stats->txns[j]);
-
- for (j = 0, sum = 0; j <= k; ++j)
- sum += stats->unthrottle[j];
- seq_printf(m, "\n%14s: %6d", "avail @ unthr", sum);
- for (j = k + 1; j <= DISTRIBUTION_MAX_INDEX; ++j)
- seq_printf(m, "%6d", stats->unthrottle[j]);
-}
-
-#else
-#define fwtty_profile_fifo(port, stat)
-#define fwtty_dump_profile(m, stats)
-#endif
-
-/*
- * Returns the max receive packet size for the given node
- * Devices which are OHCI v1.0/ v1.1/ v1.2-draft or RFC 2734 compliant
- * are required by specification to support max_rec of 8 (512 bytes) or more.
- */
-static inline int device_max_receive(struct fw_device *fw_device)
-{
- /* see IEEE 1394-2008 table 8-8 */
- return min(2 << fw_device->max_rec, 4096);
-}
-
-static void fwtty_log_tx_error(struct fwtty_port *port, int rcode)
-{
- switch (rcode) {
- case RCODE_SEND_ERROR:
- fwtty_err_ratelimited(port, "card busy\n");
- break;
- case RCODE_ADDRESS_ERROR:
- fwtty_err_ratelimited(port, "bad unit addr or write length\n");
- break;
- case RCODE_DATA_ERROR:
- fwtty_err_ratelimited(port, "failed rx\n");
- break;
- case RCODE_NO_ACK:
- fwtty_err_ratelimited(port, "missing ack\n");
- break;
- case RCODE_BUSY:
- fwtty_err_ratelimited(port, "remote busy\n");
- break;
- default:
- fwtty_err_ratelimited(port, "failed tx: %d\n", rcode);
- }
-}
-
-static void fwtty_txn_constructor(void *this)
-{
- struct fwtty_transaction *txn = this;
-
- init_timer(&txn->fw_txn.split_timeout_timer);
-}
-
-static void fwtty_common_callback(struct fw_card *card, int rcode,
- void *payload, size_t len, void *cb_data)
-{
- struct fwtty_transaction *txn = cb_data;
- struct fwtty_port *port = txn->port;
-
- if (port && rcode != RCODE_COMPLETE)
- fwtty_log_tx_error(port, rcode);
- if (txn->callback)
- txn->callback(card, rcode, payload, len, txn);
- kmem_cache_free(fwtty_txn_cache, txn);
-}
-
-static int fwtty_send_data_async(struct fwtty_peer *peer, int tcode,
- unsigned long long addr, void *payload,
- size_t len, fwtty_transaction_cb callback,
- struct fwtty_port *port)
-{
- struct fwtty_transaction *txn;
- int generation;
-
- txn = kmem_cache_alloc(fwtty_txn_cache, GFP_ATOMIC);
- if (!txn)
- return -ENOMEM;
-
- txn->callback = callback;
- txn->port = port;
-
- generation = peer->generation;
- smp_rmb();
- fw_send_request(peer->serial->card, &txn->fw_txn, tcode,
- peer->node_id, generation, peer->speed, addr, payload,
- len, fwtty_common_callback, txn);
- return 0;
-}
-
-static void fwtty_send_txn_async(struct fwtty_peer *peer,
- struct fwtty_transaction *txn, int tcode,
- unsigned long long addr, void *payload,
- size_t len, fwtty_transaction_cb callback,
- struct fwtty_port *port)
-{
- int generation;
-
- txn->callback = callback;
- txn->port = port;
-
- generation = peer->generation;
- smp_rmb();
- fw_send_request(peer->serial->card, &txn->fw_txn, tcode,
- peer->node_id, generation, peer->speed, addr, payload,
- len, fwtty_common_callback, txn);
-}
-
-static void __fwtty_restart_tx(struct fwtty_port *port)
-{
- int len, avail;
-
- len = dma_fifo_out_level(&port->tx_fifo);
- if (len)
- schedule_delayed_work(&port->drain, 0);
- avail = dma_fifo_avail(&port->tx_fifo);
-
- fwtty_dbg(port, "fifo len: %d avail: %d\n", len, avail);
-}
-
-static void fwtty_restart_tx(struct fwtty_port *port)
-{
- spin_lock_bh(&port->lock);
- __fwtty_restart_tx(port);
- spin_unlock_bh(&port->lock);
-}
-
-/**
- * fwtty_update_port_status - decodes & dispatches line status changes
- *
- * Note: in loopback, the port->lock is being held. Only use functions that
- * don't attempt to reclaim the port->lock.
- */
-static void fwtty_update_port_status(struct fwtty_port *port, unsigned status)
-{
- unsigned delta;
- struct tty_struct *tty;
-
- /* simulated LSR/MSR status from remote */
- status &= ~MCTRL_MASK;
- delta = (port->mstatus ^ status) & ~MCTRL_MASK;
- delta &= ~(status & TIOCM_RNG);
- port->mstatus = status;
-
- if (delta & TIOCM_RNG)
- ++port->icount.rng;
- if (delta & TIOCM_DSR)
- ++port->icount.dsr;
- if (delta & TIOCM_CAR)
- ++port->icount.dcd;
- if (delta & TIOCM_CTS)
- ++port->icount.cts;
-
- fwtty_dbg(port, "status: %x delta: %x\n", status, delta);
-
- if (delta & TIOCM_CAR) {
- tty = tty_port_tty_get(&port->port);
- if (tty && !C_CLOCAL(tty)) {
- if (status & TIOCM_CAR)
- wake_up_interruptible(&port->port.open_wait);
- else
- schedule_work(&port->hangup);
- }
- tty_kref_put(tty);
- }
-
- if (delta & TIOCM_CTS) {
- tty = tty_port_tty_get(&port->port);
- if (tty && C_CRTSCTS(tty)) {
- if (tty->hw_stopped) {
- if (status & TIOCM_CTS) {
- tty->hw_stopped = 0;
- if (port->loopback)
- __fwtty_restart_tx(port);
- else
- fwtty_restart_tx(port);
- }
- } else {
- if (~status & TIOCM_CTS)
- tty->hw_stopped = 1;
- }
- }
- tty_kref_put(tty);
-
- } else if (delta & OOB_TX_THROTTLE) {
- tty = tty_port_tty_get(&port->port);
- if (tty) {
- if (tty->hw_stopped) {
- if (~status & OOB_TX_THROTTLE) {
- tty->hw_stopped = 0;
- if (port->loopback)
- __fwtty_restart_tx(port);
- else
- fwtty_restart_tx(port);
- }
- } else {
- if (status & OOB_TX_THROTTLE)
- tty->hw_stopped = 1;
- }
- }
- tty_kref_put(tty);
- }
-
- if (delta & (UART_LSR_BI << 24)) {
- if (status & (UART_LSR_BI << 24)) {
- port->break_last = jiffies;
- schedule_delayed_work(&port->emit_breaks, 0);
- } else {
- /* run emit_breaks one last time (if pending) */
- mod_delayed_work(system_wq, &port->emit_breaks, 0);
- }
- }
-
- if (delta & (TIOCM_DSR | TIOCM_CAR | TIOCM_CTS | TIOCM_RNG))
- wake_up_interruptible(&port->port.delta_msr_wait);
-}
-
-/**
- * __fwtty_port_line_status - generate 'line status' for indicated port
- *
- * This function returns a remote 'MSR' state based on the local 'MCR' state,
- * as if a null modem cable was attached. The actual status is a mangling
- * of TIOCM_* bits suitable for sending to a peer's status_addr.
- *
- * Note: caller must be holding port lock
- */
-static unsigned __fwtty_port_line_status(struct fwtty_port *port)
-{
- unsigned status = 0;
-
- /* TODO: add module param to tie RNG to DTR as well */
-
- if (port->mctrl & TIOCM_DTR)
- status |= TIOCM_DSR | TIOCM_CAR;
- if (port->mctrl & TIOCM_RTS)
- status |= TIOCM_CTS;
- if (port->mctrl & OOB_RX_THROTTLE)
- status |= OOB_TX_THROTTLE;
- /* emulate BRK as add'l line status */
- if (port->break_ctl)
- status |= UART_LSR_BI << 24;
-
- return status;
-}
-
-/**
- * __fwtty_write_port_status - send the port line status to peer
- *
- * Note: caller must be holding the port lock.
- */
-static int __fwtty_write_port_status(struct fwtty_port *port)
-{
- struct fwtty_peer *peer;
- int err = -ENOENT;
- unsigned status = __fwtty_port_line_status(port);
-
- rcu_read_lock();
- peer = rcu_dereference(port->peer);
- if (peer) {
- err = fwtty_send_data_async(peer, TCODE_WRITE_QUADLET_REQUEST,
- peer->status_addr, &status,
- sizeof(status), NULL, port);
- }
- rcu_read_unlock();
-
- return err;
-}
-
-/**
- * fwtty_write_port_status - same as above but locked by port lock
- */
-static int fwtty_write_port_status(struct fwtty_port *port)
-{
- int err;
-
- spin_lock_bh(&port->lock);
- err = __fwtty_write_port_status(port);
- spin_unlock_bh(&port->lock);
- return err;
-}
-
-static void fwtty_throttle_port(struct fwtty_port *port)
-{
- struct tty_struct *tty;
- unsigned old;
-
- tty = tty_port_tty_get(&port->port);
- if (!tty)
- return;
-
- spin_lock_bh(&port->lock);
-
- old = port->mctrl;
- port->mctrl |= OOB_RX_THROTTLE;
- if (C_CRTSCTS(tty))
- port->mctrl &= ~TIOCM_RTS;
- if (~old & OOB_RX_THROTTLE)
- __fwtty_write_port_status(port);
-
- spin_unlock_bh(&port->lock);
-
- tty_kref_put(tty);
-}
-
-/**
- * fwtty_do_hangup - wait for ldisc to deliver all pending rx; only then hangup
- *
- * When the remote has finished tx, and all in-flight rx has been received and
- * and pushed to the flip buffer, the remote may close its device. This will
- * drop DTR on the remote which will drop carrier here. Typically, the tty is
- * hung up when carrier is dropped or lost.
- *
- * However, there is a race between the hang up and the line discipline
- * delivering its data to the reader. A hangup will cause the ldisc to flush
- * (ie., clear) the read buffer and flip buffer. Because of firewire's
- * relatively high throughput, the ldisc frequently lags well behind the driver,
- * resulting in lost data (which has already been received and written to
- * the flip buffer) when the remote closes its end.
- *
- * Unfortunately, since the flip buffer offers no direct method for determining
- * if it holds data, ensuring the ldisc has delivered all data is problematic.
- */
-
-/* FIXME: drop this workaround when __tty_hangup waits for ldisc completion */
-static void fwtty_do_hangup(struct work_struct *work)
-{
- struct fwtty_port *port = to_port(work, hangup);
- struct tty_struct *tty;
-
- schedule_timeout_uninterruptible(msecs_to_jiffies(50));
-
- tty = tty_port_tty_get(&port->port);
- if (tty)
- tty_vhangup(tty);
- tty_kref_put(tty);
-}
-
-static void fwtty_emit_breaks(struct work_struct *work)
-{
- struct fwtty_port *port = to_port(to_delayed_work(work), emit_breaks);
- static const char buf[16];
- unsigned long now = jiffies;
- unsigned long elapsed = now - port->break_last;
- int n, t, c, brk = 0;
-
- /* generate breaks at the line rate (but at least 1) */
- n = (elapsed * port->cps) / HZ + 1;
- port->break_last = now;
-
- fwtty_dbg(port, "sending %d brks\n", n);
-
- while (n) {
- t = min(n, 16);
- c = tty_insert_flip_string_fixed_flag(&port->port, buf,
- TTY_BREAK, t);
- n -= c;
- brk += c;
- if (c < t)
- break;
- }
- tty_flip_buffer_push(&port->port);
-
- if (port->mstatus & (UART_LSR_BI << 24))
- schedule_delayed_work(&port->emit_breaks, FREQ_BREAKS);
- port->icount.brk += brk;
-}
-
-static int fwtty_rx(struct fwtty_port *port, unsigned char *data, size_t len)
-{
- int c, n = len;
- unsigned lsr;
- int err = 0;
-
- fwtty_dbg(port, "%d\n", n);
- fwtty_profile_data(port->stats.reads, n);
-
- if (port->write_only) {
- n = 0;
- goto out;
- }
-
- /* disregard break status; breaks are generated by emit_breaks work */
- lsr = (port->mstatus >> 24) & ~UART_LSR_BI;
-
- if (port->overrun)
- lsr |= UART_LSR_OE;
-
- if (lsr & UART_LSR_OE)
- ++port->icount.overrun;
-
- lsr &= port->status_mask;
- if (lsr & ~port->ignore_mask & UART_LSR_OE) {
- if (!tty_insert_flip_char(&port->port, 0, TTY_OVERRUN)) {
- err = -EIO;
- goto out;
- }
- }
- port->overrun = false;
-
- if (lsr & port->ignore_mask & ~UART_LSR_OE) {
- /* TODO: don't drop SAK and Magic SysRq here */
- n = 0;
- goto out;
- }
-
- c = tty_insert_flip_string_fixed_flag(&port->port, data, TTY_NORMAL, n);
- if (c > 0)
- tty_flip_buffer_push(&port->port);
- n -= c;
-
- if (n) {
- port->overrun = true;
- err = -EIO;
- fwtty_err_ratelimited(port, "flip buffer overrun\n");
-
- } else {
- /* throttle the sender if remaining flip buffer space has
- * reached high watermark to avoid losing data which may be
- * in-flight. Since the AR request context is 32k, that much
- * data may have _already_ been acked.
- */
- if (tty_buffer_space_avail(&port->port) < HIGH_WATERMARK)
- fwtty_throttle_port(port);
- }
-
-out:
- port->icount.rx += len;
- port->stats.lost += n;
- return err;
-}
-
-/**
- * fwtty_port_handler - bus address handler for port reads/writes
- * @parameters: fw_address_callback_t as specified by firewire core interface
- *
- * This handler is responsible for handling inbound read/write dma from remotes.
- */
-static void fwtty_port_handler(struct fw_card *card,
- struct fw_request *request,
- int tcode, int destination, int source,
- int generation,
- unsigned long long addr,
- void *data, size_t len,
- void *callback_data)
-{
- struct fwtty_port *port = callback_data;
- struct fwtty_peer *peer;
- int err;
- int rcode;
-
- /* Only accept rx from the peer virtual-cabled to this port */
- rcu_read_lock();
- peer = __fwserial_peer_by_node_id(card, generation, source);
- rcu_read_unlock();
- if (!peer || peer != rcu_access_pointer(port->peer)) {
- rcode = RCODE_ADDRESS_ERROR;
- fwtty_err_ratelimited(port, "ignoring unauthenticated data\n");
- goto respond;
- }
-
- switch (tcode) {
- case TCODE_WRITE_QUADLET_REQUEST:
- if (addr != port->rx_handler.offset || len != 4) {
- rcode = RCODE_ADDRESS_ERROR;
- } else {
- fwtty_update_port_status(port, *(unsigned *)data);
- rcode = RCODE_COMPLETE;
- }
- break;
-
- case TCODE_WRITE_BLOCK_REQUEST:
- if (addr != port->rx_handler.offset + 4 ||
- len > port->rx_handler.length - 4) {
- rcode = RCODE_ADDRESS_ERROR;
- } else {
- err = fwtty_rx(port, data, len);
- switch (err) {
- case 0:
- rcode = RCODE_COMPLETE;
- break;
- case -EIO:
- rcode = RCODE_DATA_ERROR;
- break;
- default:
- rcode = RCODE_CONFLICT_ERROR;
- break;
- }
- }
- break;
-
- default:
- rcode = RCODE_TYPE_ERROR;
- }
-
-respond:
- fw_send_response(card, request, rcode);
-}
-
-/**
- * fwtty_tx_complete - callback for tx dma
- * @data: ignored, has no meaning for write txns
- * @length: ignored, has no meaning for write txns
- *
- * The writer must be woken here if the fifo has been emptied because it
- * may have slept if chars_in_buffer was != 0
- */
-static void fwtty_tx_complete(struct fw_card *card, int rcode,
- void *data, size_t length,
- struct fwtty_transaction *txn)
-{
- struct fwtty_port *port = txn->port;
- int len;
-
- fwtty_dbg(port, "rcode: %d\n", rcode);
-
- switch (rcode) {
- case RCODE_COMPLETE:
- spin_lock_bh(&port->lock);
- dma_fifo_out_complete(&port->tx_fifo, &txn->dma_pended);
- len = dma_fifo_level(&port->tx_fifo);
- spin_unlock_bh(&port->lock);
-
- port->icount.tx += txn->dma_pended.len;
- break;
-
- default:
- /* TODO: implement retries */
- spin_lock_bh(&port->lock);
- dma_fifo_out_complete(&port->tx_fifo, &txn->dma_pended);
- len = dma_fifo_level(&port->tx_fifo);
- spin_unlock_bh(&port->lock);
-
- port->stats.dropped += txn->dma_pended.len;
- }
-
- if (len < WAKEUP_CHARS)
- tty_port_tty_wakeup(&port->port);
-}
-
-static int fwtty_tx(struct fwtty_port *port, bool drain)
-{
- struct fwtty_peer *peer;
- struct fwtty_transaction *txn;
- struct tty_struct *tty;
- int n, len;
-
- tty = tty_port_tty_get(&port->port);
- if (!tty)
- return -ENOENT;
-
- rcu_read_lock();
- peer = rcu_dereference(port->peer);
- if (!peer) {
- n = -EIO;
- goto out;
- }
-
- if (test_and_set_bit(IN_TX, &port->flags)) {
- n = -EALREADY;
- goto out;
- }
-
- /* try to write as many dma transactions out as possible */
- n = -EAGAIN;
- while (!tty->stopped && !tty->hw_stopped &&
- !test_bit(STOP_TX, &port->flags)) {
- txn = kmem_cache_alloc(fwtty_txn_cache, GFP_ATOMIC);
- if (!txn) {
- n = -ENOMEM;
- break;
- }
-
- spin_lock_bh(&port->lock);
- n = dma_fifo_out_pend(&port->tx_fifo, &txn->dma_pended);
- spin_unlock_bh(&port->lock);
-
- fwtty_dbg(port, "out: %u rem: %d\n", txn->dma_pended.len, n);
-
- if (n < 0) {
- kmem_cache_free(fwtty_txn_cache, txn);
- if (n == -EAGAIN) {
- ++port->stats.tx_stall;
- } else if (n == -ENODATA) {
- fwtty_profile_data(port->stats.txns, 0);
- } else {
- ++port->stats.fifo_errs;
- fwtty_err_ratelimited(port, "fifo err: %d\n",
- n);
- }
- break;
- }
-
- fwtty_profile_data(port->stats.txns, txn->dma_pended.len);
-
- fwtty_send_txn_async(peer, txn, TCODE_WRITE_BLOCK_REQUEST,
- peer->fifo_addr, txn->dma_pended.data,
- txn->dma_pended.len, fwtty_tx_complete,
- port);
- ++port->stats.sent;
-
- /*
- * Stop tx if the 'last view' of the fifo is empty or if
- * this is the writer and there's not enough data to bother
- */
- if (n == 0 || (!drain && n < WRITER_MINIMUM))
- break;
- }
-
- if (n >= 0 || n == -EAGAIN || n == -ENOMEM || n == -ENODATA) {
- spin_lock_bh(&port->lock);
- len = dma_fifo_out_level(&port->tx_fifo);
- if (len) {
- unsigned long delay = (n == -ENOMEM) ? HZ : 1;
-
- schedule_delayed_work(&port->drain, delay);
- }
- len = dma_fifo_level(&port->tx_fifo);
- spin_unlock_bh(&port->lock);
-
- /* wakeup the writer */
- if (drain && len < WAKEUP_CHARS)
- tty_wakeup(tty);
- }
-
- clear_bit(IN_TX, &port->flags);
- wake_up_interruptible(&port->wait_tx);
-
-out:
- rcu_read_unlock();
- tty_kref_put(tty);
- return n;
-}
-
-static void fwtty_drain_tx(struct work_struct *work)
-{
- struct fwtty_port *port = to_port(to_delayed_work(work), drain);
-
- fwtty_tx(port, true);
-}
-
-static void fwtty_write_xchar(struct fwtty_port *port, char ch)
-{
- struct fwtty_peer *peer;
-
- ++port->stats.xchars;
-
- fwtty_dbg(port, "%02x\n", ch);
-
- rcu_read_lock();
- peer = rcu_dereference(port->peer);
- if (peer) {
- fwtty_send_data_async(peer, TCODE_WRITE_BLOCK_REQUEST,
- peer->fifo_addr, &ch, sizeof(ch),
- NULL, port);
- }
- rcu_read_unlock();
-}
-
-struct fwtty_port *fwtty_port_get(unsigned index)
-{
- struct fwtty_port *port;
-
- if (index >= MAX_TOTAL_PORTS)
- return NULL;
-
- mutex_lock(&port_table_lock);
- port = port_table[index];
- if (port)
- kref_get(&port->serial->kref);
- mutex_unlock(&port_table_lock);
- return port;
-}
-EXPORT_SYMBOL(fwtty_port_get);
-
-static int fwtty_ports_add(struct fw_serial *serial)
-{
- int err = -EBUSY;
- int i, j;
-
- if (port_table_corrupt)
- return err;
-
- mutex_lock(&port_table_lock);
- for (i = 0; i + num_ports <= MAX_TOTAL_PORTS; i += num_ports) {
- if (!port_table[i]) {
- for (j = 0; j < num_ports; ++i, ++j) {
- serial->ports[j]->index = i;
- port_table[i] = serial->ports[j];
- }
- err = 0;
- break;
- }
- }
- mutex_unlock(&port_table_lock);
- return err;
-}
-
-static void fwserial_destroy(struct kref *kref)
-{
- struct fw_serial *serial = to_serial(kref, kref);
- struct fwtty_port **ports = serial->ports;
- int j, i = ports[0]->index;
-
- synchronize_rcu();
-
- mutex_lock(&port_table_lock);
- for (j = 0; j < num_ports; ++i, ++j) {
- port_table_corrupt |= port_table[i] != ports[j];
- WARN_ONCE(port_table_corrupt, "port_table[%d]: %p != ports[%d]: %p",
- i, port_table[i], j, ports[j]);
-
- port_table[i] = NULL;
- }
- mutex_unlock(&port_table_lock);
-
- for (j = 0; j < num_ports; ++j) {
- fw_core_remove_address_handler(&ports[j]->rx_handler);
- tty_port_destroy(&ports[j]->port);
- kfree(ports[j]);
- }
- kfree(serial);
-}
-
-void fwtty_port_put(struct fwtty_port *port)
-{
- kref_put(&port->serial->kref, fwserial_destroy);
-}
-EXPORT_SYMBOL(fwtty_port_put);
-
-static void fwtty_port_dtr_rts(struct tty_port *tty_port, int on)
-{
- struct fwtty_port *port = to_port(tty_port, port);
-
- fwtty_dbg(port, "on/off: %d\n", on);
-
- spin_lock_bh(&port->lock);
- /* Don't change carrier state if this is a console */
- if (!port->port.console) {
- if (on)
- port->mctrl |= TIOCM_DTR | TIOCM_RTS;
- else
- port->mctrl &= ~(TIOCM_DTR | TIOCM_RTS);
- }
-
- __fwtty_write_port_status(port);
- spin_unlock_bh(&port->lock);
-}
-
-/**
- * fwtty_port_carrier_raised: required tty_port operation
- *
- * This port operation is polled after a tty has been opened and is waiting for
- * carrier detect -- see drivers/tty/tty_port:tty_port_block_til_ready().
- */
-static int fwtty_port_carrier_raised(struct tty_port *tty_port)
-{
- struct fwtty_port *port = to_port(tty_port, port);
- int rc;
-
- rc = (port->mstatus & TIOCM_CAR);
-
- fwtty_dbg(port, "%d\n", rc);
-
- return rc;
-}
-
-static unsigned set_termios(struct fwtty_port *port, struct tty_struct *tty)
-{
- unsigned baud, frame;
-
- baud = tty_termios_baud_rate(&tty->termios);
- tty_termios_encode_baud_rate(&tty->termios, baud, baud);
-
- /* compute bit count of 2 frames */
- frame = 12 + ((C_CSTOPB(tty)) ? 4 : 2) + ((C_PARENB(tty)) ? 2 : 0);
-
- switch (C_CSIZE(tty)) {
- case CS5:
- frame -= (C_CSTOPB(tty)) ? 1 : 0;
- break;
- case CS6:
- frame += 2;
- break;
- case CS7:
- frame += 4;
- break;
- case CS8:
- frame += 6;
- break;
- }
-
- port->cps = (baud << 1) / frame;
-
- port->status_mask = UART_LSR_OE;
- if (_I_FLAG(tty, BRKINT | PARMRK))
- port->status_mask |= UART_LSR_BI;
-
- port->ignore_mask = 0;
- if (I_IGNBRK(tty)) {
- port->ignore_mask |= UART_LSR_BI;
- if (I_IGNPAR(tty))
- port->ignore_mask |= UART_LSR_OE;
- }
-
- port->write_only = !C_CREAD(tty);
-
- /* turn off echo and newline xlat if loopback */
- if (port->loopback) {
- tty->termios.c_lflag &= ~(ECHO | ECHOE | ECHOK | ECHOKE |
- ECHONL | ECHOPRT | ECHOCTL);
- tty->termios.c_oflag &= ~ONLCR;
- }
-
- return baud;
-}
-
-static int fwtty_port_activate(struct tty_port *tty_port,
- struct tty_struct *tty)
-{
- struct fwtty_port *port = to_port(tty_port, port);
- unsigned baud;
- int err;
-
- set_bit(TTY_IO_ERROR, &tty->flags);
-
- err = dma_fifo_alloc(&port->tx_fifo, FWTTY_PORT_TXFIFO_LEN,
- cache_line_size(),
- port->max_payload,
- FWTTY_PORT_MAX_PEND_DMA,
- GFP_KERNEL);
- if (err)
- return err;
-
- spin_lock_bh(&port->lock);
-
- baud = set_termios(port, tty);
-
- /* if console, don't change carrier state */
- if (!port->port.console) {
- port->mctrl = 0;
- if (baud != 0)
- port->mctrl = TIOCM_DTR | TIOCM_RTS;
- }
-
- if (C_CRTSCTS(tty) && ~port->mstatus & TIOCM_CTS)
- tty->hw_stopped = 1;
-
- __fwtty_write_port_status(port);
- spin_unlock_bh(&port->lock);
-
- clear_bit(TTY_IO_ERROR, &tty->flags);
-
- return 0;
-}
-
-/**
- * fwtty_port_shutdown
- *
- * Note: the tty port core ensures this is not the console and
- * manages TTY_IO_ERROR properly
- */
-static void fwtty_port_shutdown(struct tty_port *tty_port)
-{
- struct fwtty_port *port = to_port(tty_port, port);
-
- /* TODO: cancel outstanding transactions */
-
- cancel_delayed_work_sync(&port->emit_breaks);
- cancel_delayed_work_sync(&port->drain);
-
- spin_lock_bh(&port->lock);
- port->flags = 0;
- port->break_ctl = 0;
- port->overrun = 0;
- __fwtty_write_port_status(port);
- dma_fifo_free(&port->tx_fifo);
- spin_unlock_bh(&port->lock);
-}
-
-static int fwtty_open(struct tty_struct *tty, struct file *fp)
-{
- struct fwtty_port *port = tty->driver_data;
-
- return tty_port_open(&port->port, tty, fp);
-}
-
-static void fwtty_close(struct tty_struct *tty, struct file *fp)
-{
- struct fwtty_port *port = tty->driver_data;
-
- tty_port_close(&port->port, tty, fp);
-}
-
-static void fwtty_hangup(struct tty_struct *tty)
-{
- struct fwtty_port *port = tty->driver_data;
-
- tty_port_hangup(&port->port);
-}
-
-static void fwtty_cleanup(struct tty_struct *tty)
-{
- struct fwtty_port *port = tty->driver_data;
-
- tty->driver_data = NULL;
- fwtty_port_put(port);
-}
-
-static int fwtty_install(struct tty_driver *driver, struct tty_struct *tty)
-{
- struct fwtty_port *port = fwtty_port_get(tty->index);
- int err;
-
- err = tty_standard_install(driver, tty);
- if (!err)
- tty->driver_data = port;
- else
- fwtty_port_put(port);
- return err;
-}
-
-static int fwloop_install(struct tty_driver *driver, struct tty_struct *tty)
-{
- struct fwtty_port *port = fwtty_port_get(table_idx(tty->index));
- int err;
-
- err = tty_standard_install(driver, tty);
- if (!err)
- tty->driver_data = port;
- else
- fwtty_port_put(port);
- return err;
-}
-
-static int fwtty_write(struct tty_struct *tty, const unsigned char *buf, int c)
-{
- struct fwtty_port *port = tty->driver_data;
- int n, len;
-
- fwtty_dbg(port, "%d\n", c);
- fwtty_profile_data(port->stats.writes, c);
-
- spin_lock_bh(&port->lock);
- n = dma_fifo_in(&port->tx_fifo, buf, c);
- len = dma_fifo_out_level(&port->tx_fifo);
- if (len < DRAIN_THRESHOLD)
- schedule_delayed_work(&port->drain, 1);
- spin_unlock_bh(&port->lock);
-
- if (len >= DRAIN_THRESHOLD)
- fwtty_tx(port, false);
-
- debug_short_write(port, c, n);
-
- return (n < 0) ? 0 : n;
-}
-
-static int fwtty_write_room(struct tty_struct *tty)
-{
- struct fwtty_port *port = tty->driver_data;
- int n;
-
- spin_lock_bh(&port->lock);
- n = dma_fifo_avail(&port->tx_fifo);
- spin_unlock_bh(&port->lock);
-
- fwtty_dbg(port, "%d\n", n);
-
- return n;
-}
-
-static int fwtty_chars_in_buffer(struct tty_struct *tty)
-{
- struct fwtty_port *port = tty->driver_data;
- int n;
-
- spin_lock_bh(&port->lock);
- n = dma_fifo_level(&port->tx_fifo);
- spin_unlock_bh(&port->lock);
-
- fwtty_dbg(port, "%d\n", n);
-
- return n;
-}
-
-static void fwtty_send_xchar(struct tty_struct *tty, char ch)
-{
- struct fwtty_port *port = tty->driver_data;
-
- fwtty_dbg(port, "%02x\n", ch);
-
- fwtty_write_xchar(port, ch);
-}
-
-static void fwtty_throttle(struct tty_struct *tty)
-{
- struct fwtty_port *port = tty->driver_data;
-
- /*
- * Ignore throttling (but not unthrottling).
- * It only makes sense to throttle when data will no longer be
- * accepted by the tty flip buffer. For example, it is
- * possible for received data to overflow the tty buffer long
- * before the line discipline ever has a chance to throttle the driver.
- * Additionally, the driver may have already completed the I/O
- * but the tty buffer is still emptying, so the line discipline is
- * throttling and unthrottling nothing.
- */
-
- ++port->stats.throttled;
-}
-
-static void fwtty_unthrottle(struct tty_struct *tty)
-{
- struct fwtty_port *port = tty->driver_data;
-
- fwtty_dbg(port, "CRTSCTS: %d\n", C_CRTSCTS(tty) != 0);
-
- fwtty_profile_fifo(port, port->stats.unthrottle);
-
- spin_lock_bh(&port->lock);
- port->mctrl &= ~OOB_RX_THROTTLE;
- if (C_CRTSCTS(tty))
- port->mctrl |= TIOCM_RTS;
- __fwtty_write_port_status(port);
- spin_unlock_bh(&port->lock);
-}
-
-static int check_msr_delta(struct fwtty_port *port, unsigned long mask,
- struct async_icount *prev)
-{
- struct async_icount now;
- int delta;
-
- now = port->icount;
-
- delta = ((mask & TIOCM_RNG && prev->rng != now.rng) ||
- (mask & TIOCM_DSR && prev->dsr != now.dsr) ||
- (mask & TIOCM_CAR && prev->dcd != now.dcd) ||
- (mask & TIOCM_CTS && prev->cts != now.cts));
-
- *prev = now;
-
- return delta;
-}
-
-static int wait_msr_change(struct fwtty_port *port, unsigned long mask)
-{
- struct async_icount prev;
-
- prev = port->icount;
-
- return wait_event_interruptible(port->port.delta_msr_wait,
- check_msr_delta(port, mask, &prev));
-}
-
-static int get_serial_info(struct fwtty_port *port,
- struct serial_struct __user *info)
-{
- struct serial_struct tmp;
-
- memset(&tmp, 0, sizeof(tmp));
-
- tmp.type = PORT_UNKNOWN;
- tmp.line = port->port.tty->index;
- tmp.flags = port->port.flags;
- tmp.xmit_fifo_size = FWTTY_PORT_TXFIFO_LEN;
- tmp.baud_base = 400000000;
- tmp.close_delay = port->port.close_delay;
-
- return (copy_to_user(info, &tmp, sizeof(*info))) ? -EFAULT : 0;
-}
-
-static int set_serial_info(struct fwtty_port *port,
- struct serial_struct __user *info)
-{
- struct serial_struct tmp;
-
- if (copy_from_user(&tmp, info, sizeof(tmp)))
- return -EFAULT;
-
- if (tmp.irq != 0 || tmp.port != 0 || tmp.custom_divisor != 0 ||
- tmp.baud_base != 400000000)
- return -EPERM;
-
- if (!capable(CAP_SYS_ADMIN)) {
- if (((tmp.flags & ~ASYNC_USR_MASK) !=
- (port->port.flags & ~ASYNC_USR_MASK)))
- return -EPERM;
- } else {
- port->port.close_delay = tmp.close_delay * HZ / 100;
- }
-
- return 0;
-}
-
-static int fwtty_ioctl(struct tty_struct *tty, unsigned cmd,
- unsigned long arg)
-{
- struct fwtty_port *port = tty->driver_data;
- int err;
-
- switch (cmd) {
- case TIOCGSERIAL:
- mutex_lock(&port->port.mutex);
- err = get_serial_info(port, (void __user *)arg);
- mutex_unlock(&port->port.mutex);
- break;
-
- case TIOCSSERIAL:
- mutex_lock(&port->port.mutex);
- err = set_serial_info(port, (void __user *)arg);
- mutex_unlock(&port->port.mutex);
- break;
-
- case TIOCMIWAIT:
- err = wait_msr_change(port, arg);
- break;
-
- default:
- err = -ENOIOCTLCMD;
- }
-
- return err;
-}
-
-static void fwtty_set_termios(struct tty_struct *tty, struct ktermios *old)
-{
- struct fwtty_port *port = tty->driver_data;
- unsigned baud;
-
- spin_lock_bh(&port->lock);
- baud = set_termios(port, tty);
-
- if ((baud == 0) && (old->c_cflag & CBAUD)) {
- port->mctrl &= ~(TIOCM_DTR | TIOCM_RTS);
- } else if ((baud != 0) && !(old->c_cflag & CBAUD)) {
- if (C_CRTSCTS(tty) || !test_bit(TTY_THROTTLED, &tty->flags))
- port->mctrl |= TIOCM_DTR | TIOCM_RTS;
- else
- port->mctrl |= TIOCM_DTR;
- }
- __fwtty_write_port_status(port);
- spin_unlock_bh(&port->lock);
-
- if (old->c_cflag & CRTSCTS) {
- if (!C_CRTSCTS(tty)) {
- tty->hw_stopped = 0;
- fwtty_restart_tx(port);
- }
- } else if (C_CRTSCTS(tty) && ~port->mstatus & TIOCM_CTS) {
- tty->hw_stopped = 1;
- }
-}
-
-/**
- * fwtty_break_ctl - start/stop sending breaks
- *
- * Signals the remote to start or stop generating simulated breaks.
- * First, stop dequeueing from the fifo and wait for writer/drain to leave tx
- * before signalling the break line status. This guarantees any pending rx will
- * be queued to the line discipline before break is simulated on the remote.
- * Conversely, turning off break_ctl requires signalling the line status change,
- * then enabling tx.
- */
-static int fwtty_break_ctl(struct tty_struct *tty, int state)
-{
- struct fwtty_port *port = tty->driver_data;
- long ret;
-
- fwtty_dbg(port, "%d\n", state);
-
- if (state == -1) {
- set_bit(STOP_TX, &port->flags);
- ret = wait_event_interruptible_timeout(port->wait_tx,
- !test_bit(IN_TX, &port->flags),
- 10);
- if (ret == 0 || ret == -ERESTARTSYS) {
- clear_bit(STOP_TX, &port->flags);
- fwtty_restart_tx(port);
- return -EINTR;
- }
- }
-
- spin_lock_bh(&port->lock);
- port->break_ctl = (state == -1);
- __fwtty_write_port_status(port);
- spin_unlock_bh(&port->lock);
-
- if (state == 0) {
- spin_lock_bh(&port->lock);
- dma_fifo_reset(&port->tx_fifo);
- clear_bit(STOP_TX, &port->flags);
- spin_unlock_bh(&port->lock);
- }
- return 0;
-}
-
-static int fwtty_tiocmget(struct tty_struct *tty)
-{
- struct fwtty_port *port = tty->driver_data;
- unsigned tiocm;
-
- spin_lock_bh(&port->lock);
- tiocm = (port->mctrl & MCTRL_MASK) | (port->mstatus & ~MCTRL_MASK);
- spin_unlock_bh(&port->lock);
-
- fwtty_dbg(port, "%x\n", tiocm);
-
- return tiocm;
-}
-
-static int fwtty_tiocmset(struct tty_struct *tty, unsigned set, unsigned clear)
-{
- struct fwtty_port *port = tty->driver_data;
-
- fwtty_dbg(port, "set: %x clear: %x\n", set, clear);
-
- /* TODO: simulate loopback if TIOCM_LOOP set */
-
- spin_lock_bh(&port->lock);
- port->mctrl &= ~(clear & MCTRL_MASK & 0xffff);
- port->mctrl |= set & MCTRL_MASK & 0xffff;
- __fwtty_write_port_status(port);
- spin_unlock_bh(&port->lock);
- return 0;
-}
-
-static int fwtty_get_icount(struct tty_struct *tty,
- struct serial_icounter_struct *icount)
-{
- struct fwtty_port *port = tty->driver_data;
- struct stats stats;
-
- memcpy(&stats, &port->stats, sizeof(stats));
- if (port->port.console)
- (*port->fwcon_ops->stats)(&stats, port->con_data);
-
- icount->cts = port->icount.cts;
- icount->dsr = port->icount.dsr;
- icount->rng = port->icount.rng;
- icount->dcd = port->icount.dcd;
- icount->rx = port->icount.rx;
- icount->tx = port->icount.tx + stats.xchars;
- icount->frame = port->icount.frame;
- icount->overrun = port->icount.overrun;
- icount->parity = port->icount.parity;
- icount->brk = port->icount.brk;
- icount->buf_overrun = port->icount.overrun;
- return 0;
-}
-
-static void fwtty_proc_show_port(struct seq_file *m, struct fwtty_port *port)
-{
- struct stats stats;
-
- memcpy(&stats, &port->stats, sizeof(stats));
- if (port->port.console)
- (*port->fwcon_ops->stats)(&stats, port->con_data);
-
- seq_printf(m, " addr:%012llx tx:%d rx:%d", port->rx_handler.offset,
- port->icount.tx + stats.xchars, port->icount.rx);
- seq_printf(m, " cts:%d dsr:%d rng:%d dcd:%d", port->icount.cts,
- port->icount.dsr, port->icount.rng, port->icount.dcd);
- seq_printf(m, " fe:%d oe:%d pe:%d brk:%d", port->icount.frame,
- port->icount.overrun, port->icount.parity, port->icount.brk);
-}
-
-static void fwtty_debugfs_show_port(struct seq_file *m, struct fwtty_port *port)
-{
- struct stats stats;
-
- memcpy(&stats, &port->stats, sizeof(stats));
- if (port->port.console)
- (*port->fwcon_ops->stats)(&stats, port->con_data);
-
- seq_printf(m, " dr:%d st:%d err:%d lost:%d", stats.dropped,
- stats.tx_stall, stats.fifo_errs, stats.lost);
- seq_printf(m, " pkts:%d thr:%d", stats.sent, stats.throttled);
-
- if (port->port.console) {
- seq_puts(m, "\n ");
- (*port->fwcon_ops->proc_show)(m, port->con_data);
- }
-
- fwtty_dump_profile(m, &port->stats);
-}
-
-static void fwtty_debugfs_show_peer(struct seq_file *m, struct fwtty_peer *peer)
-{
- int generation = peer->generation;
-
- smp_rmb();
- seq_printf(m, " %s:", dev_name(&peer->unit->device));
- seq_printf(m, " node:%04x gen:%d", peer->node_id, generation);
- seq_printf(m, " sp:%d max:%d guid:%016llx", peer->speed,
- peer->max_payload, (unsigned long long) peer->guid);
- seq_printf(m, " mgmt:%012llx", (unsigned long long) peer->mgmt_addr);
- seq_printf(m, " addr:%012llx", (unsigned long long) peer->status_addr);
- seq_putc(m, '\n');
-}
-
-static int fwtty_proc_show(struct seq_file *m, void *v)
-{
- struct fwtty_port *port;
- int i;
-
- seq_puts(m, "fwserinfo: 1.0 driver: 1.0\n");
- for (i = 0; i < MAX_TOTAL_PORTS && (port = fwtty_port_get(i)); ++i) {
- seq_printf(m, "%2d:", i);
- if (capable(CAP_SYS_ADMIN))
- fwtty_proc_show_port(m, port);
- fwtty_port_put(port);
- seq_puts(m, "\n");
- }
- return 0;
-}
-
-static int fwtty_debugfs_stats_show(struct seq_file *m, void *v)
-{
- struct fw_serial *serial = m->private;
- struct fwtty_port *port;
- int i;
-
- for (i = 0; i < num_ports; ++i) {
- port = fwtty_port_get(serial->ports[i]->index);
- if (port) {
- seq_printf(m, "%2d:", port->index);
- fwtty_proc_show_port(m, port);
- fwtty_debugfs_show_port(m, port);
- fwtty_port_put(port);
- seq_puts(m, "\n");
- }
- }
- return 0;
-}
-
-static int fwtty_debugfs_peers_show(struct seq_file *m, void *v)
-{
- struct fw_serial *serial = m->private;
- struct fwtty_peer *peer;
-
- rcu_read_lock();
- seq_printf(m, "card: %s guid: %016llx\n",
- dev_name(serial->card->device),
- (unsigned long long) serial->card->guid);
- list_for_each_entry_rcu(peer, &serial->peer_list, list)
- fwtty_debugfs_show_peer(m, peer);
- rcu_read_unlock();
- return 0;
-}
-
-static int fwtty_proc_open(struct inode *inode, struct file *fp)
-{
- return single_open(fp, fwtty_proc_show, NULL);
-}
-
-static int fwtty_stats_open(struct inode *inode, struct file *fp)
-{
- return single_open(fp, fwtty_debugfs_stats_show, inode->i_private);
-}
-
-static int fwtty_peers_open(struct inode *inode, struct file *fp)
-{
- return single_open(fp, fwtty_debugfs_peers_show, inode->i_private);
-}
-
-static const struct file_operations fwtty_stats_fops = {
- .owner = THIS_MODULE,
- .open = fwtty_stats_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
-static const struct file_operations fwtty_peers_fops = {
- .owner = THIS_MODULE,
- .open = fwtty_peers_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
-static const struct file_operations fwtty_proc_fops = {
- .owner = THIS_MODULE,
- .open = fwtty_proc_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
-static const struct tty_port_operations fwtty_port_ops = {
- .dtr_rts = fwtty_port_dtr_rts,
- .carrier_raised = fwtty_port_carrier_raised,
- .shutdown = fwtty_port_shutdown,
- .activate = fwtty_port_activate,
-};
-
-static const struct tty_operations fwtty_ops = {
- .open = fwtty_open,
- .close = fwtty_close,
- .hangup = fwtty_hangup,
- .cleanup = fwtty_cleanup,
- .install = fwtty_install,
- .write = fwtty_write,
- .write_room = fwtty_write_room,
- .chars_in_buffer = fwtty_chars_in_buffer,
- .send_xchar = fwtty_send_xchar,
- .throttle = fwtty_throttle,
- .unthrottle = fwtty_unthrottle,
- .ioctl = fwtty_ioctl,
- .set_termios = fwtty_set_termios,
- .break_ctl = fwtty_break_ctl,
- .tiocmget = fwtty_tiocmget,
- .tiocmset = fwtty_tiocmset,
- .get_icount = fwtty_get_icount,
- .proc_fops = &fwtty_proc_fops,
-};
-
-static const struct tty_operations fwloop_ops = {
- .open = fwtty_open,
- .close = fwtty_close,
- .hangup = fwtty_hangup,
- .cleanup = fwtty_cleanup,
- .install = fwloop_install,
- .write = fwtty_write,
- .write_room = fwtty_write_room,
- .chars_in_buffer = fwtty_chars_in_buffer,
- .send_xchar = fwtty_send_xchar,
- .throttle = fwtty_throttle,
- .unthrottle = fwtty_unthrottle,
- .ioctl = fwtty_ioctl,
- .set_termios = fwtty_set_termios,
- .break_ctl = fwtty_break_ctl,
- .tiocmget = fwtty_tiocmget,
- .tiocmset = fwtty_tiocmset,
- .get_icount = fwtty_get_icount,
-};
-
-static inline int mgmt_pkt_expected_len(__be16 code)
-{
- static const struct fwserial_mgmt_pkt pkt;
-
- switch (be16_to_cpu(code)) {
- case FWSC_VIRT_CABLE_PLUG:
- return sizeof(pkt.hdr) + sizeof(pkt.plug_req);
-
- case FWSC_VIRT_CABLE_PLUG_RSP: /* | FWSC_RSP_OK */
- return sizeof(pkt.hdr) + sizeof(pkt.plug_rsp);
-
- case FWSC_VIRT_CABLE_UNPLUG:
- case FWSC_VIRT_CABLE_UNPLUG_RSP:
- case FWSC_VIRT_CABLE_PLUG_RSP | FWSC_RSP_NACK:
- case FWSC_VIRT_CABLE_UNPLUG_RSP | FWSC_RSP_NACK:
- return sizeof(pkt.hdr);
-
- default:
- return -1;
- }
-}
-
-static inline void fill_plug_params(struct virt_plug_params *params,
- struct fwtty_port *port)
-{
- u64 status_addr = port->rx_handler.offset;
- u64 fifo_addr = port->rx_handler.offset + 4;
- size_t fifo_len = port->rx_handler.length - 4;
-
- params->status_hi = cpu_to_be32(status_addr >> 32);
- params->status_lo = cpu_to_be32(status_addr);
- params->fifo_hi = cpu_to_be32(fifo_addr >> 32);
- params->fifo_lo = cpu_to_be32(fifo_addr);
- params->fifo_len = cpu_to_be32(fifo_len);
-}
-
-static inline void fill_plug_req(struct fwserial_mgmt_pkt *pkt,
- struct fwtty_port *port)
-{
- pkt->hdr.code = cpu_to_be16(FWSC_VIRT_CABLE_PLUG);
- pkt->hdr.len = cpu_to_be16(mgmt_pkt_expected_len(pkt->hdr.code));
- fill_plug_params(&pkt->plug_req, port);
-}
-
-static inline void fill_plug_rsp_ok(struct fwserial_mgmt_pkt *pkt,
- struct fwtty_port *port)
-{
- pkt->hdr.code = cpu_to_be16(FWSC_VIRT_CABLE_PLUG_RSP);
- pkt->hdr.len = cpu_to_be16(mgmt_pkt_expected_len(pkt->hdr.code));
- fill_plug_params(&pkt->plug_rsp, port);
-}
-
-static inline void fill_plug_rsp_nack(struct fwserial_mgmt_pkt *pkt)
-{
- pkt->hdr.code = cpu_to_be16(FWSC_VIRT_CABLE_PLUG_RSP | FWSC_RSP_NACK);
- pkt->hdr.len = cpu_to_be16(mgmt_pkt_expected_len(pkt->hdr.code));
-}
-
-static inline void fill_unplug_req(struct fwserial_mgmt_pkt *pkt)
-{
- pkt->hdr.code = cpu_to_be16(FWSC_VIRT_CABLE_UNPLUG);
- pkt->hdr.len = cpu_to_be16(mgmt_pkt_expected_len(pkt->hdr.code));
-}
-
-static inline void fill_unplug_rsp_nack(struct fwserial_mgmt_pkt *pkt)
-{
- pkt->hdr.code = cpu_to_be16(FWSC_VIRT_CABLE_UNPLUG_RSP | FWSC_RSP_NACK);
- pkt->hdr.len = cpu_to_be16(mgmt_pkt_expected_len(pkt->hdr.code));
-}
-
-static inline void fill_unplug_rsp_ok(struct fwserial_mgmt_pkt *pkt)
-{
- pkt->hdr.code = cpu_to_be16(FWSC_VIRT_CABLE_UNPLUG_RSP);
- pkt->hdr.len = cpu_to_be16(mgmt_pkt_expected_len(pkt->hdr.code));
-}
-
-static void fwserial_virt_plug_complete(struct fwtty_peer *peer,
- struct virt_plug_params *params)
-{
- struct fwtty_port *port = peer->port;
-
- peer->status_addr = be32_to_u64(params->status_hi, params->status_lo);
- peer->fifo_addr = be32_to_u64(params->fifo_hi, params->fifo_lo);
- peer->fifo_len = be32_to_cpu(params->fifo_len);
- peer_set_state(peer, FWPS_ATTACHED);
-
- /* reconfigure tx_fifo optimally for this peer */
- spin_lock_bh(&port->lock);
- port->max_payload = min(peer->max_payload, peer->fifo_len);
- dma_fifo_change_tx_limit(&port->tx_fifo, port->max_payload);
- spin_unlock_bh(&peer->port->lock);
-
- if (port->port.console && port->fwcon_ops->notify != NULL)
- (*port->fwcon_ops->notify)(FWCON_NOTIFY_ATTACH, port->con_data);
-
- fwtty_info(&peer->unit, "peer (guid:%016llx) connected on %s\n",
- (unsigned long long)peer->guid, dev_name(port->device));
-}
-
-static inline int fwserial_send_mgmt_sync(struct fwtty_peer *peer,
- struct fwserial_mgmt_pkt *pkt)
-{
- int generation;
- int rcode, tries = 5;
-
- do {
- generation = peer->generation;
- smp_rmb();
-
- rcode = fw_run_transaction(peer->serial->card,
- TCODE_WRITE_BLOCK_REQUEST,
- peer->node_id,
- generation, peer->speed,
- peer->mgmt_addr,
- pkt, be16_to_cpu(pkt->hdr.len));
- if (rcode == RCODE_BUSY || rcode == RCODE_SEND_ERROR ||
- rcode == RCODE_GENERATION) {
- fwtty_dbg(&peer->unit, "mgmt write error: %d\n", rcode);
- continue;
- } else {
- break;
- }
- } while (--tries > 0);
- return rcode;
-}
-
-/**
- * fwserial_claim_port - attempt to claim port @ index for peer
- *
- * Returns ptr to claimed port or error code (as ERR_PTR())
- * Can sleep - must be called from process context
- */
-static struct fwtty_port *fwserial_claim_port(struct fwtty_peer *peer,
- int index)
-{
- struct fwtty_port *port;
-
- if (index < 0 || index >= num_ports)
- return ERR_PTR(-EINVAL);
-
- /* must guarantee that previous port releases have completed */
- synchronize_rcu();
-
- port = peer->serial->ports[index];
- spin_lock_bh(&port->lock);
- if (!rcu_access_pointer(port->peer))
- rcu_assign_pointer(port->peer, peer);
- else
- port = ERR_PTR(-EBUSY);
- spin_unlock_bh(&port->lock);
-
- return port;
-}
-
-/**
- * fwserial_find_port - find avail port and claim for peer
- *
- * Returns ptr to claimed port or NULL if none avail
- * Can sleep - must be called from process context
- */
-static struct fwtty_port *fwserial_find_port(struct fwtty_peer *peer)
-{
- struct fwtty_port **ports = peer->serial->ports;
- int i;
-
- /* must guarantee that previous port releases have completed */
- synchronize_rcu();
-
- /* TODO: implement optional GUID-to-specific port # matching */
-
- /* find an unattached port (but not the loopback port, if present) */
- for (i = 0; i < num_ttys; ++i) {
- spin_lock_bh(&ports[i]->lock);
- if (!ports[i]->peer) {
- /* claim port */
- rcu_assign_pointer(ports[i]->peer, peer);
- spin_unlock_bh(&ports[i]->lock);
- return ports[i];
- }
- spin_unlock_bh(&ports[i]->lock);
- }
- return NULL;
-}
-
-static void fwserial_release_port(struct fwtty_port *port, bool reset)
-{
- /* drop carrier (and all other line status) */
- if (reset)
- fwtty_update_port_status(port, 0);
-
- spin_lock_bh(&port->lock);
-
- /* reset dma fifo max transmission size back to S100 */
- port->max_payload = link_speed_to_max_payload(SCODE_100);
- dma_fifo_change_tx_limit(&port->tx_fifo, port->max_payload);
-
- RCU_INIT_POINTER(port->peer, NULL);
- spin_unlock_bh(&port->lock);
-
- if (port->port.console && port->fwcon_ops->notify != NULL)
- (*port->fwcon_ops->notify)(FWCON_NOTIFY_DETACH, port->con_data);
-}
-
-static void fwserial_plug_timeout(unsigned long data)
-{
- struct fwtty_peer *peer = (struct fwtty_peer *)data;
- struct fwtty_port *port;
-
- spin_lock_bh(&peer->lock);
- if (peer->state != FWPS_PLUG_PENDING) {
- spin_unlock_bh(&peer->lock);
- return;
- }
-
- port = peer_revert_state(peer);
- spin_unlock_bh(&peer->lock);
-
- if (port)
- fwserial_release_port(port, false);
-}
-
-/**
- * fwserial_connect_peer - initiate virtual cable with peer
- *
- * Returns 0 if VIRT_CABLE_PLUG request was successfully sent,
- * otherwise error code. Must be called from process context.
- */
-static int fwserial_connect_peer(struct fwtty_peer *peer)
-{
- struct fwtty_port *port;
- struct fwserial_mgmt_pkt *pkt;
- int err, rcode;
-
- pkt = kmalloc(sizeof(*pkt), GFP_KERNEL);
- if (!pkt)
- return -ENOMEM;
-
- port = fwserial_find_port(peer);
- if (!port) {
- fwtty_err(&peer->unit, "avail ports in use\n");
- err = -EBUSY;
- goto free_pkt;
- }
-
- spin_lock_bh(&peer->lock);
-
- /* only initiate VIRT_CABLE_PLUG if peer is currently not attached */
- if (peer->state != FWPS_NOT_ATTACHED) {
- err = -EBUSY;
- goto release_port;
- }
-
- peer->port = port;
- peer_set_state(peer, FWPS_PLUG_PENDING);
-
- fill_plug_req(pkt, peer->port);
-
- setup_timer(&peer->timer, fwserial_plug_timeout, (unsigned long)peer);
- mod_timer(&peer->timer, jiffies + VIRT_CABLE_PLUG_TIMEOUT);
- spin_unlock_bh(&peer->lock);
-
- rcode = fwserial_send_mgmt_sync(peer, pkt);
-
- spin_lock_bh(&peer->lock);
- if (peer->state == FWPS_PLUG_PENDING && rcode != RCODE_COMPLETE) {
- if (rcode == RCODE_CONFLICT_ERROR)
- err = -EAGAIN;
- else
- err = -EIO;
- goto cancel_timer;
- }
- spin_unlock_bh(&peer->lock);
-
- kfree(pkt);
- return 0;
-
-cancel_timer:
- del_timer(&peer->timer);
- peer_revert_state(peer);
-release_port:
- spin_unlock_bh(&peer->lock);
- fwserial_release_port(port, false);
-free_pkt:
- kfree(pkt);
- return err;
-}
-
-/**
- * fwserial_close_port -
- * HUP the tty (if the tty exists) and unregister the tty device.
- * Only used by the unit driver upon unit removal to disconnect and
- * cleanup all attached ports
- *
- * The port reference is put by fwtty_cleanup (if a reference was
- * ever taken).
- */
-static void fwserial_close_port(struct tty_driver *driver,
- struct fwtty_port *port)
-{
- struct tty_struct *tty;
-
- mutex_lock(&port->port.mutex);
- tty = tty_port_tty_get(&port->port);
- if (tty) {
- tty_vhangup(tty);
- tty_kref_put(tty);
- }
- mutex_unlock(&port->port.mutex);
-
- if (driver == fwloop_driver)
- tty_unregister_device(driver, loop_idx(port));
- else
- tty_unregister_device(driver, port->index);
-}
-
-/**
- * fwserial_lookup - finds first fw_serial associated with card
- * @card: fw_card to match
- *
- * NB: caller must be holding fwserial_list_mutex
- */
-static struct fw_serial *fwserial_lookup(struct fw_card *card)
-{
- struct fw_serial *serial;
-
- list_for_each_entry(serial, &fwserial_list, list) {
- if (card == serial->card)
- return serial;
- }
-
- return NULL;
-}
-
-/**
- * __fwserial_lookup_rcu - finds first fw_serial associated with card
- * @card: fw_card to match
- *
- * NB: caller must be inside rcu_read_lock() section
- */
-static struct fw_serial *__fwserial_lookup_rcu(struct fw_card *card)
-{
- struct fw_serial *serial;
-
- list_for_each_entry_rcu(serial, &fwserial_list, list) {
- if (card == serial->card)
- return serial;
- }
-
- return NULL;
-}
-
-/**
- * __fwserial_peer_by_node_id - finds a peer matching the given generation + id
- *
- * If a matching peer could not be found for the specified generation/node id,
- * this could be because:
- * a) the generation has changed and one of the nodes hasn't updated yet
- * b) the remote node has created its remote unit device before this
- * local node has created its corresponding remote unit device
- * In either case, the remote node should retry
- *
- * Note: caller must be in rcu_read_lock() section
- */
-static struct fwtty_peer *__fwserial_peer_by_node_id(struct fw_card *card,
- int generation, int id)
-{
- struct fw_serial *serial;
- struct fwtty_peer *peer;
-
- serial = __fwserial_lookup_rcu(card);
- if (!serial) {
- /*
- * Something is very wrong - there should be a matching
- * fw_serial structure for every fw_card. Maybe the remote node
- * has created its remote unit device before this driver has
- * been probed for any unit devices...
- */
- fwtty_err(card, "unknown card (guid %016llx)\n",
- (unsigned long long) card->guid);
- return NULL;
- }
-
- list_for_each_entry_rcu(peer, &serial->peer_list, list) {
- int g = peer->generation;
-
- smp_rmb();
- if (generation == g && id == peer->node_id)
- return peer;
- }
-
- return NULL;
-}
-
-#ifdef DEBUG
-static void __dump_peer_list(struct fw_card *card)
-{
- struct fw_serial *serial;
- struct fwtty_peer *peer;
-
- serial = __fwserial_lookup_rcu(card);
- if (!serial)
- return;
-
- list_for_each_entry_rcu(peer, &serial->peer_list, list) {
- int g = peer->generation;
-
- smp_rmb();
- fwtty_dbg(card, "peer(%d:%x) guid: %016llx\n",
- g, peer->node_id, (unsigned long long) peer->guid);
- }
-}
-#else
-#define __dump_peer_list(s)
-#endif
-
-static void fwserial_auto_connect(struct work_struct *work)
-{
- struct fwtty_peer *peer = to_peer(to_delayed_work(work), connect);
- int err;
-
- err = fwserial_connect_peer(peer);
- if (err == -EAGAIN && ++peer->connect_retries < MAX_CONNECT_RETRIES)
- schedule_delayed_work(&peer->connect, CONNECT_RETRY_DELAY);
-}
-
-static void fwserial_peer_workfn(struct work_struct *work)
-{
- struct fwtty_peer *peer = to_peer(work, work);
-
- peer->workfn(work);
-}
-
-/**
- * fwserial_add_peer - add a newly probed 'serial' unit device as a 'peer'
- * @serial: aggregate representing the specific fw_card to add the peer to
- * @unit: 'peer' to create and add to peer_list of serial
- *
- * Adds a 'peer' (ie, a local or remote 'serial' unit device) to the list of
- * peers for a specific fw_card. Optionally, auto-attach this peer to an
- * available tty port. This function is called either directly or indirectly
- * as a result of a 'serial' unit device being created & probed.
- *
- * Note: this function is serialized with fwserial_remove_peer() by the
- * fwserial_list_mutex held in fwserial_probe().
- *
- * A 1:1 correspondence between an fw_unit and an fwtty_peer is maintained
- * via the dev_set_drvdata() for the device of the fw_unit.
- */
-static int fwserial_add_peer(struct fw_serial *serial, struct fw_unit *unit)
-{
- struct device *dev = &unit->device;
- struct fw_device *parent = fw_parent_device(unit);
- struct fwtty_peer *peer;
- struct fw_csr_iterator ci;
- int key, val;
- int generation;
-
- peer = kzalloc(sizeof(*peer), GFP_KERNEL);
- if (!peer)
- return -ENOMEM;
-
- peer_set_state(peer, FWPS_NOT_ATTACHED);
-
- dev_set_drvdata(dev, peer);
- peer->unit = unit;
- peer->guid = (u64)parent->config_rom[3] << 32 | parent->config_rom[4];
- peer->speed = parent->max_speed;
- peer->max_payload = min(device_max_receive(parent),
- link_speed_to_max_payload(peer->speed));
-
- generation = parent->generation;
- smp_rmb();
- peer->node_id = parent->node_id;
- smp_wmb();
- peer->generation = generation;
-
- /* retrieve the mgmt bus addr from the unit directory */
- fw_csr_iterator_init(&ci, unit->directory);
- while (fw_csr_iterator_next(&ci, &key, &val)) {
- if (key == (CSR_OFFSET | CSR_DEPENDENT_INFO)) {
- peer->mgmt_addr = CSR_REGISTER_BASE + 4 * val;
- break;
- }
- }
- if (peer->mgmt_addr == 0ULL) {
- /*
- * No mgmt address effectively disables VIRT_CABLE_PLUG -
- * this peer will not be able to attach to a remote
- */
- peer_set_state(peer, FWPS_NO_MGMT_ADDR);
- }
-
- spin_lock_init(&peer->lock);
- peer->port = NULL;
-
- init_timer(&peer->timer);
- INIT_WORK(&peer->work, fwserial_peer_workfn);
- INIT_DELAYED_WORK(&peer->connect, fwserial_auto_connect);
-
- /* associate peer with specific fw_card */
- peer->serial = serial;
- list_add_rcu(&peer->list, &serial->peer_list);
-
- fwtty_info(&peer->unit, "peer added (guid:%016llx)\n",
- (unsigned long long)peer->guid);
-
- /* identify the local unit & virt cable to loopback port */
- if (parent->is_local) {
- serial->self = peer;
- if (create_loop_dev) {
- struct fwtty_port *port;
-
- port = fwserial_claim_port(peer, num_ttys);
- if (!IS_ERR(port)) {
- struct virt_plug_params params;
-
- spin_lock_bh(&peer->lock);
- peer->port = port;
- fill_plug_params(&params, port);
- fwserial_virt_plug_complete(peer, &params);
- spin_unlock_bh(&peer->lock);
-
- fwtty_write_port_status(port);
- }
- }
-
- } else if (auto_connect) {
- /* auto-attach to remote units only (if policy allows) */
- schedule_delayed_work(&peer->connect, 1);
- }
-
- return 0;
-}
-
-/**
- * fwserial_remove_peer - remove a 'serial' unit device as a 'peer'
- *
- * Remove a 'peer' from its list of peers. This function is only
- * called by fwserial_remove() on bus removal of the unit device.
- *
- * Note: this function is serialized with fwserial_add_peer() by the
- * fwserial_list_mutex held in fwserial_remove().
- */
-static void fwserial_remove_peer(struct fwtty_peer *peer)
-{
- struct fwtty_port *port;
-
- spin_lock_bh(&peer->lock);
- peer_set_state(peer, FWPS_GONE);
- spin_unlock_bh(&peer->lock);
-
- cancel_delayed_work_sync(&peer->connect);
- cancel_work_sync(&peer->work);
-
- spin_lock_bh(&peer->lock);
- /* if this unit is the local unit, clear link */
- if (peer == peer->serial->self)
- peer->serial->self = NULL;
-
- /* cancel the request timeout timer (if running) */
- del_timer(&peer->timer);
-
- port = peer->port;
- peer->port = NULL;
-
- list_del_rcu(&peer->list);
-
- fwtty_info(&peer->unit, "peer removed (guid:%016llx)\n",
- (unsigned long long)peer->guid);
-
- spin_unlock_bh(&peer->lock);
-
- if (port)
- fwserial_release_port(port, true);
-
- synchronize_rcu();
- kfree(peer);
-}
-
-/**
- * fwserial_create - init everything to create TTYs for a specific fw_card
- * @unit: fw_unit for first 'serial' unit device probed for this fw_card
- *
- * This function inits the aggregate structure (an fw_serial instance)
- * used to manage the TTY ports registered by a specific fw_card. Also, the
- * unit device is added as the first 'peer'.
- *
- * This unit device may represent a local unit device (as specified by the
- * config ROM unit directory) or it may represent a remote unit device
- * (as specified by the reading of the remote node's config ROM).
- *
- * Returns 0 to indicate "ownership" of the unit device, or a negative errno
- * value to indicate which error.
- */
-static int fwserial_create(struct fw_unit *unit)
-{
- struct fw_device *parent = fw_parent_device(unit);
- struct fw_card *card = parent->card;
- struct fw_serial *serial;
- struct fwtty_port *port;
- struct device *tty_dev;
- int i, j;
- int err;
-
- serial = kzalloc(sizeof(*serial), GFP_KERNEL);
- if (!serial)
- return -ENOMEM;
-
- kref_init(&serial->kref);
- serial->card = card;
- INIT_LIST_HEAD(&serial->peer_list);
-
- for (i = 0; i < num_ports; ++i) {
- port = kzalloc(sizeof(*port), GFP_KERNEL);
- if (!port) {
- err = -ENOMEM;
- goto free_ports;
- }
- tty_port_init(&port->port);
- port->index = FWTTY_INVALID_INDEX;
- port->port.ops = &fwtty_port_ops;
- port->serial = serial;
- tty_buffer_set_limit(&port->port, 128 * 1024);
-
- spin_lock_init(&port->lock);
- INIT_DELAYED_WORK(&port->drain, fwtty_drain_tx);
- INIT_DELAYED_WORK(&port->emit_breaks, fwtty_emit_breaks);
- INIT_WORK(&port->hangup, fwtty_do_hangup);
- init_waitqueue_head(&port->wait_tx);
- port->max_payload = link_speed_to_max_payload(SCODE_100);
- dma_fifo_init(&port->tx_fifo);
-
- RCU_INIT_POINTER(port->peer, NULL);
- serial->ports[i] = port;
-
- /* get unique bus addr region for port's status & recv fifo */
- port->rx_handler.length = FWTTY_PORT_RXFIFO_LEN + 4;
- port->rx_handler.address_callback = fwtty_port_handler;
- port->rx_handler.callback_data = port;
- /*
- * XXX: use custom memory region above cpu physical memory addrs
- * this will ease porting to 64-bit firewire adapters
- */
- err = fw_core_add_address_handler(&port->rx_handler,
- &fw_high_memory_region);
- if (err) {
- kfree(port);
- goto free_ports;
- }
- }
- /* preserve i for error cleanup */
-
- err = fwtty_ports_add(serial);
- if (err) {
- fwtty_err(&unit, "no space in port table\n");
- goto free_ports;
- }
-
- for (j = 0; j < num_ttys; ++j) {
- tty_dev = tty_port_register_device(&serial->ports[j]->port,
- fwtty_driver,
- serial->ports[j]->index,
- card->device);
- if (IS_ERR(tty_dev)) {
- err = PTR_ERR(tty_dev);
- fwtty_err(&unit, "register tty device error (%d)\n",
- err);
- goto unregister_ttys;
- }
-
- serial->ports[j]->device = tty_dev;
- }
- /* preserve j for error cleanup */
-
- if (create_loop_dev) {
- struct device *loop_dev;
-
- loop_dev = tty_port_register_device(&serial->ports[j]->port,
- fwloop_driver,
- loop_idx(serial->ports[j]),
- card->device);
- if (IS_ERR(loop_dev)) {
- err = PTR_ERR(loop_dev);
- fwtty_err(&unit, "create loop device failed (%d)\n",
- err);
- goto unregister_ttys;
- }
- serial->ports[j]->device = loop_dev;
- serial->ports[j]->loopback = true;
- }
-
- if (!IS_ERR_OR_NULL(fwserial_debugfs)) {
- serial->debugfs = debugfs_create_dir(dev_name(&unit->device),
- fwserial_debugfs);
- if (!IS_ERR_OR_NULL(serial->debugfs)) {
- debugfs_create_file("peers", 0444, serial->debugfs,
- serial, &fwtty_peers_fops);
- debugfs_create_file("stats", 0444, serial->debugfs,
- serial, &fwtty_stats_fops);
- }
- }
-
- list_add_rcu(&serial->list, &fwserial_list);
-
- fwtty_notice(&unit, "TTY over FireWire on device %s (guid %016llx)\n",
- dev_name(card->device), (unsigned long long) card->guid);
-
- err = fwserial_add_peer(serial, unit);
- if (!err)
- return 0;
-
- fwtty_err(&unit, "unable to add peer unit device (%d)\n", err);
-
- /* fall-through to error processing */
- debugfs_remove_recursive(serial->debugfs);
-
- list_del_rcu(&serial->list);
- if (create_loop_dev)
- tty_unregister_device(fwloop_driver,
- loop_idx(serial->ports[j]));
-unregister_ttys:
- for (--j; j >= 0; --j)
- tty_unregister_device(fwtty_driver, serial->ports[j]->index);
- kref_put(&serial->kref, fwserial_destroy);
- return err;
-
-free_ports:
- for (--i; i >= 0; --i) {
- tty_port_destroy(&serial->ports[i]->port);
- kfree(serial->ports[i]);
- }
- kfree(serial);
- return err;
-}
-
-/**
- * fwserial_probe: bus probe function for firewire 'serial' unit devices
- *
- * A 'serial' unit device is created and probed as a result of:
- * - declaring a ieee1394 bus id table for 'devices' matching a fabricated
- * 'serial' unit specifier id
- * - adding a unit directory to the config ROM(s) for a 'serial' unit
- *
- * The firewire core registers unit devices by enumerating unit directories
- * of a node's config ROM after reading the config ROM when a new node is
- * added to the bus topology after a bus reset.
- *
- * The practical implications of this are:
- * - this probe is called for both local and remote nodes that have a 'serial'
- * unit directory in their config ROM (that matches the specifiers in
- * fwserial_id_table).
- * - no specific order is enforced for local vs. remote unit devices
- *
- * This unit driver copes with the lack of specific order in the same way the
- * firewire net driver does -- each probe, for either a local or remote unit
- * device, is treated as a 'peer' (has a struct fwtty_peer instance) and the
- * first peer created for a given fw_card (tracked by the global fwserial_list)
- * creates the underlying TTYs (aggregated in a fw_serial instance).
- *
- * NB: an early attempt to differentiate local & remote unit devices by creating
- * peers only for remote units and fw_serial instances (with their
- * associated TTY devices) only for local units was discarded. Managing
- * the peer lifetimes on device removal proved too complicated.
- *
- * fwserial_probe/fwserial_remove are effectively serialized by the
- * fwserial_list_mutex. This is necessary because the addition of the first peer
- * for a given fw_card will trigger the creation of the fw_serial for that
- * fw_card, which must not simultaneously contend with the removal of the
- * last peer for a given fw_card triggering the destruction of the same
- * fw_serial for the same fw_card.
- */
-static int fwserial_probe(struct fw_unit *unit,
- const struct ieee1394_device_id *id)
-{
- struct fw_serial *serial;
- int err;
-
- mutex_lock(&fwserial_list_mutex);
- serial = fwserial_lookup(fw_parent_device(unit)->card);
- if (!serial)
- err = fwserial_create(unit);
- else
- err = fwserial_add_peer(serial, unit);
- mutex_unlock(&fwserial_list_mutex);
- return err;
-}
-
-/**
- * fwserial_remove: bus removal function for firewire 'serial' unit devices
- *
- * The corresponding 'peer' for this unit device is removed from the list of
- * peers for the associated fw_serial (which has a 1:1 correspondence with a
- * specific fw_card). If this is the last peer being removed, then trigger
- * the destruction of the underlying TTYs.
- */
-static void fwserial_remove(struct fw_unit *unit)
-{
- struct fwtty_peer *peer = dev_get_drvdata(&unit->device);
- struct fw_serial *serial = peer->serial;
- int i;
-
- mutex_lock(&fwserial_list_mutex);
- fwserial_remove_peer(peer);
-
- if (list_empty(&serial->peer_list)) {
- /* unlink from the fwserial_list here */
- list_del_rcu(&serial->list);
-
- debugfs_remove_recursive(serial->debugfs);
-
- for (i = 0; i < num_ttys; ++i)
- fwserial_close_port(fwtty_driver, serial->ports[i]);
- if (create_loop_dev)
- fwserial_close_port(fwloop_driver, serial->ports[i]);
- kref_put(&serial->kref, fwserial_destroy);
- }
- mutex_unlock(&fwserial_list_mutex);
-}
-
-/**
- * fwserial_update: bus update function for 'firewire' serial unit devices
- *
- * Updates the new node_id and bus generation for this peer. Note that locking
- * is unnecessary; but careful memory barrier usage is important to enforce the
- * load and store order of generation & node_id.
- *
- * The fw-core orders the write of node_id before generation in the parent
- * fw_device to ensure that a stale node_id cannot be used with a current
- * bus generation. So the generation value must be read before the node_id.
- *
- * In turn, this orders the write of node_id before generation in the peer to
- * also ensure a stale node_id cannot be used with a current bus generation.
- */
-static void fwserial_update(struct fw_unit *unit)
-{
- struct fw_device *parent = fw_parent_device(unit);
- struct fwtty_peer *peer = dev_get_drvdata(&unit->device);
- int generation;
-
- generation = parent->generation;
- smp_rmb();
- peer->node_id = parent->node_id;
- smp_wmb();
- peer->generation = generation;
-}
-
-static const struct ieee1394_device_id fwserial_id_table[] = {
- {
- .match_flags = IEEE1394_MATCH_SPECIFIER_ID |
- IEEE1394_MATCH_VERSION,
- .specifier_id = LINUX_VENDOR_ID,
- .version = FWSERIAL_VERSION,
- },
- { }
-};
-
-static struct fw_driver fwserial_driver = {
- .driver = {
- .owner = THIS_MODULE,
- .name = KBUILD_MODNAME,
- .bus = &fw_bus_type,
- },
- .probe = fwserial_probe,
- .update = fwserial_update,
- .remove = fwserial_remove,
- .id_table = fwserial_id_table,
-};
-
-#define FW_UNIT_SPECIFIER(id) ((CSR_SPECIFIER_ID << 24) | (id))
-#define FW_UNIT_VERSION(ver) ((CSR_VERSION << 24) | (ver))
-#define FW_UNIT_ADDRESS(ofs) (((CSR_OFFSET | CSR_DEPENDENT_INFO) << 24) \
- | (((ofs) - CSR_REGISTER_BASE) >> 2))
-/* XXX: config ROM definitons could be improved with semi-automated offset
- * and length calculation
- */
-#define FW_ROM_LEN(quads) ((quads) << 16)
-#define FW_ROM_DESCRIPTOR(ofs) (((CSR_LEAF | CSR_DESCRIPTOR) << 24) | (ofs))
-
-struct fwserial_unit_directory_data {
- u32 len_crc;
- u32 unit_specifier;
- u32 unit_sw_version;
- u32 unit_addr_offset;
- u32 desc1_ofs;
- u32 desc1_len_crc;
- u32 desc1_data[5];
-} __packed;
-
-static struct fwserial_unit_directory_data fwserial_unit_directory_data = {
- .len_crc = FW_ROM_LEN(4),
- .unit_specifier = FW_UNIT_SPECIFIER(LINUX_VENDOR_ID),
- .unit_sw_version = FW_UNIT_VERSION(FWSERIAL_VERSION),
- .desc1_ofs = FW_ROM_DESCRIPTOR(1),
- .desc1_len_crc = FW_ROM_LEN(5),
- .desc1_data = {
- 0x00000000, /* type = text */
- 0x00000000, /* enc = ASCII, lang EN */
- 0x4c696e75, /* 'Linux TTY' */
- 0x78205454,
- 0x59000000,
- },
-};
-
-static struct fw_descriptor fwserial_unit_directory = {
- .length = sizeof(fwserial_unit_directory_data) / sizeof(u32),
- .key = (CSR_DIRECTORY | CSR_UNIT) << 24,
- .data = (u32 *)&fwserial_unit_directory_data,
-};
-
-/*
- * The management address is in the unit space region but above other known
- * address users (to keep wild writes from causing havoc)
- */
-static const struct fw_address_region fwserial_mgmt_addr_region = {
- .start = CSR_REGISTER_BASE + 0x1e0000ULL,
- .end = 0x1000000000000ULL,
-};
-
-static struct fw_address_handler fwserial_mgmt_addr_handler;
-
-/**
- * fwserial_handle_plug_req - handle VIRT_CABLE_PLUG request work
- * @work: ptr to peer->work
- *
- * Attempts to complete the VIRT_CABLE_PLUG handshake sequence for this peer.
- *
- * This checks for a collided request-- ie, that a VIRT_CABLE_PLUG request was
- * already sent to this peer. If so, the collision is resolved by comparing
- * guid values; the loser sends the plug response.
- *
- * Note: if an error prevents a response, don't do anything -- the
- * remote will timeout its request.
- */
-static void fwserial_handle_plug_req(struct work_struct *work)
-{
- struct fwtty_peer *peer = to_peer(work, work);
- struct virt_plug_params *plug_req = &peer->work_params.plug_req;
- struct fwtty_port *port;
- struct fwserial_mgmt_pkt *pkt;
- int rcode;
-
- pkt = kmalloc(sizeof(*pkt), GFP_KERNEL);
- if (!pkt)
- return;
-
- port = fwserial_find_port(peer);
-
- spin_lock_bh(&peer->lock);
-
- switch (peer->state) {
- case FWPS_NOT_ATTACHED:
- if (!port) {
- fwtty_err(&peer->unit, "no more ports avail\n");
- fill_plug_rsp_nack(pkt);
- } else {
- peer->port = port;
- fill_plug_rsp_ok(pkt, peer->port);
- peer_set_state(peer, FWPS_PLUG_RESPONDING);
- /* don't release claimed port */
- port = NULL;
- }
- break;
-
- case FWPS_PLUG_PENDING:
- if (peer->serial->card->guid > peer->guid)
- goto cleanup;
-
- /* We lost - hijack the already-claimed port and send ok */
- del_timer(&peer->timer);
- fill_plug_rsp_ok(pkt, peer->port);
- peer_set_state(peer, FWPS_PLUG_RESPONDING);
- break;
-
- default:
- fill_plug_rsp_nack(pkt);
- }
-
- spin_unlock_bh(&peer->lock);
- if (port)
- fwserial_release_port(port, false);
-
- rcode = fwserial_send_mgmt_sync(peer, pkt);
-
- spin_lock_bh(&peer->lock);
- if (peer->state == FWPS_PLUG_RESPONDING) {
- if (rcode == RCODE_COMPLETE) {
- struct fwtty_port *tmp = peer->port;
-
- fwserial_virt_plug_complete(peer, plug_req);
- spin_unlock_bh(&peer->lock);
-
- fwtty_write_port_status(tmp);
- spin_lock_bh(&peer->lock);
- } else {
- fwtty_err(&peer->unit, "PLUG_RSP error (%d)\n", rcode);
- port = peer_revert_state(peer);
- }
- }
-cleanup:
- spin_unlock_bh(&peer->lock);
- if (port)
- fwserial_release_port(port, false);
- kfree(pkt);
-}
-
-static void fwserial_handle_unplug_req(struct work_struct *work)
-{
- struct fwtty_peer *peer = to_peer(work, work);
- struct fwtty_port *port = NULL;
- struct fwserial_mgmt_pkt *pkt;
- int rcode;
-
- pkt = kmalloc(sizeof(*pkt), GFP_KERNEL);
- if (!pkt)
- return;
-
- spin_lock_bh(&peer->lock);
-
- switch (peer->state) {
- case FWPS_ATTACHED:
- fill_unplug_rsp_ok(pkt);
- peer_set_state(peer, FWPS_UNPLUG_RESPONDING);
- break;
-
- case FWPS_UNPLUG_PENDING:
- if (peer->serial->card->guid > peer->guid)
- goto cleanup;
-
- /* We lost - send unplug rsp */
- del_timer(&peer->timer);
- fill_unplug_rsp_ok(pkt);
- peer_set_state(peer, FWPS_UNPLUG_RESPONDING);
- break;
-
- default:
- fill_unplug_rsp_nack(pkt);
- }
-
- spin_unlock_bh(&peer->lock);
-
- rcode = fwserial_send_mgmt_sync(peer, pkt);
-
- spin_lock_bh(&peer->lock);
- if (peer->state == FWPS_UNPLUG_RESPONDING) {
- if (rcode != RCODE_COMPLETE)
- fwtty_err(&peer->unit, "UNPLUG_RSP error (%d)\n",
- rcode);
- port = peer_revert_state(peer);
- }
-cleanup:
- spin_unlock_bh(&peer->lock);
- if (port)
- fwserial_release_port(port, true);
- kfree(pkt);
-}
-
-static int fwserial_parse_mgmt_write(struct fwtty_peer *peer,
- struct fwserial_mgmt_pkt *pkt,
- unsigned long long addr,
- size_t len)
-{
- struct fwtty_port *port = NULL;
- bool reset = false;
- int rcode;
-
- if (addr != fwserial_mgmt_addr_handler.offset || len < sizeof(pkt->hdr))
- return RCODE_ADDRESS_ERROR;
-
- if (len != be16_to_cpu(pkt->hdr.len) ||
- len != mgmt_pkt_expected_len(pkt->hdr.code))
- return RCODE_DATA_ERROR;
-
- spin_lock_bh(&peer->lock);
- if (peer->state == FWPS_GONE) {
- /*
- * This should never happen - it would mean that the
- * remote unit that just wrote this transaction was
- * already removed from the bus -- and the removal was
- * processed before we rec'd this transaction
- */
- fwtty_err(&peer->unit, "peer already removed\n");
- spin_unlock_bh(&peer->lock);
- return RCODE_ADDRESS_ERROR;
- }
-
- rcode = RCODE_COMPLETE;
-
- fwtty_dbg(&peer->unit, "mgmt: hdr.code: %04hx\n", pkt->hdr.code);
-
- switch (be16_to_cpu(pkt->hdr.code) & FWSC_CODE_MASK) {
- case FWSC_VIRT_CABLE_PLUG:
- if (work_pending(&peer->work)) {
- fwtty_err(&peer->unit, "plug req: busy\n");
- rcode = RCODE_CONFLICT_ERROR;
-
- } else {
- peer->work_params.plug_req = pkt->plug_req;
- peer->workfn = fwserial_handle_plug_req;
- queue_work(system_unbound_wq, &peer->work);
- }
- break;
-
- case FWSC_VIRT_CABLE_PLUG_RSP:
- if (peer->state != FWPS_PLUG_PENDING) {
- rcode = RCODE_CONFLICT_ERROR;
-
- } else if (be16_to_cpu(pkt->hdr.code) & FWSC_RSP_NACK) {
- fwtty_notice(&peer->unit, "NACK plug rsp\n");
- port = peer_revert_state(peer);
-
- } else {
- struct fwtty_port *tmp = peer->port;
-
- fwserial_virt_plug_complete(peer, &pkt->plug_rsp);
- spin_unlock_bh(&peer->lock);
-
- fwtty_write_port_status(tmp);
- spin_lock_bh(&peer->lock);
- }
- break;
-
- case FWSC_VIRT_CABLE_UNPLUG:
- if (work_pending(&peer->work)) {
- fwtty_err(&peer->unit, "unplug req: busy\n");
- rcode = RCODE_CONFLICT_ERROR;
- } else {
- peer->workfn = fwserial_handle_unplug_req;
- queue_work(system_unbound_wq, &peer->work);
- }
- break;
-
- case FWSC_VIRT_CABLE_UNPLUG_RSP:
- if (peer->state != FWPS_UNPLUG_PENDING) {
- rcode = RCODE_CONFLICT_ERROR;
- } else {
- if (be16_to_cpu(pkt->hdr.code) & FWSC_RSP_NACK)
- fwtty_notice(&peer->unit, "NACK unplug?\n");
- port = peer_revert_state(peer);
- reset = true;
- }
- break;
-
- default:
- fwtty_err(&peer->unit, "unknown mgmt code %d\n",
- be16_to_cpu(pkt->hdr.code));
- rcode = RCODE_DATA_ERROR;
- }
- spin_unlock_bh(&peer->lock);
-
- if (port)
- fwserial_release_port(port, reset);
-
- return rcode;
-}
-
-/**
- * fwserial_mgmt_handler: bus address handler for mgmt requests
- * @parameters: fw_address_callback_t as specified by firewire core interface
- *
- * This handler is responsible for handling virtual cable requests from remotes
- * for all cards.
- */
-static void fwserial_mgmt_handler(struct fw_card *card,
- struct fw_request *request,
- int tcode, int destination, int source,
- int generation,
- unsigned long long addr,
- void *data, size_t len,
- void *callback_data)
-{
- struct fwserial_mgmt_pkt *pkt = data;
- struct fwtty_peer *peer;
- int rcode;
-
- rcu_read_lock();
- peer = __fwserial_peer_by_node_id(card, generation, source);
- if (!peer) {
- fwtty_dbg(card, "peer(%d:%x) not found\n", generation, source);
- __dump_peer_list(card);
- rcode = RCODE_CONFLICT_ERROR;
-
- } else {
- switch (tcode) {
- case TCODE_WRITE_BLOCK_REQUEST:
- rcode = fwserial_parse_mgmt_write(peer, pkt, addr, len);
- break;
-
- default:
- rcode = RCODE_TYPE_ERROR;
- }
- }
-
- rcu_read_unlock();
- fw_send_response(card, request, rcode);
-}
-
-static int __init fwserial_init(void)
-{
- int err, num_loops = !!(create_loop_dev);
-
- /* XXX: placeholder for a "firewire" debugfs node */
- fwserial_debugfs = debugfs_create_dir(KBUILD_MODNAME, NULL);
-
- /* num_ttys/num_ports must not be set above the static alloc avail */
- if (num_ttys + num_loops > MAX_CARD_PORTS)
- num_ttys = MAX_CARD_PORTS - num_loops;
-
- num_ports = num_ttys + num_loops;
-
- fwtty_driver = tty_alloc_driver(MAX_TOTAL_PORTS, TTY_DRIVER_REAL_RAW
- | TTY_DRIVER_DYNAMIC_DEV);
- if (IS_ERR(fwtty_driver)) {
- err = PTR_ERR(fwtty_driver);
- goto remove_debugfs;
- }
-
- fwtty_driver->driver_name = KBUILD_MODNAME;
- fwtty_driver->name = tty_dev_name;
- fwtty_driver->major = 0;
- fwtty_driver->minor_start = 0;
- fwtty_driver->type = TTY_DRIVER_TYPE_SERIAL;
- fwtty_driver->subtype = SERIAL_TYPE_NORMAL;
- fwtty_driver->init_termios = tty_std_termios;
- fwtty_driver->init_termios.c_cflag |= CLOCAL;
- tty_set_operations(fwtty_driver, &fwtty_ops);
-
- err = tty_register_driver(fwtty_driver);
- if (err) {
- pr_err("register tty driver failed (%d)\n", err);
- goto put_tty;
- }
-
- if (create_loop_dev) {
- fwloop_driver = tty_alloc_driver(MAX_TOTAL_PORTS / num_ports,
- TTY_DRIVER_REAL_RAW
- | TTY_DRIVER_DYNAMIC_DEV);
- if (IS_ERR(fwloop_driver)) {
- err = PTR_ERR(fwloop_driver);
- goto unregister_driver;
- }
-
- fwloop_driver->driver_name = KBUILD_MODNAME "_loop";
- fwloop_driver->name = loop_dev_name;
- fwloop_driver->major = 0;
- fwloop_driver->minor_start = 0;
- fwloop_driver->type = TTY_DRIVER_TYPE_SERIAL;
- fwloop_driver->subtype = SERIAL_TYPE_NORMAL;
- fwloop_driver->init_termios = tty_std_termios;
- fwloop_driver->init_termios.c_cflag |= CLOCAL;
- tty_set_operations(fwloop_driver, &fwloop_ops);
-
- err = tty_register_driver(fwloop_driver);
- if (err) {
- pr_err("register loop driver failed (%d)\n", err);
- goto put_loop;
- }
- }
-
- fwtty_txn_cache = kmem_cache_create("fwtty_txn_cache",
- sizeof(struct fwtty_transaction),
- 0, 0, fwtty_txn_constructor);
- if (!fwtty_txn_cache) {
- err = -ENOMEM;
- goto unregister_loop;
- }
-
- /*
- * Ideally, this address handler would be registered per local node
- * (rather than the same handler for all local nodes). However,
- * since the firewire core requires the config rom descriptor *before*
- * the local unit device(s) are created, a single management handler
- * must suffice for all local serial units.
- */
- fwserial_mgmt_addr_handler.length = sizeof(struct fwserial_mgmt_pkt);
- fwserial_mgmt_addr_handler.address_callback = fwserial_mgmt_handler;
-
- err = fw_core_add_address_handler(&fwserial_mgmt_addr_handler,
- &fwserial_mgmt_addr_region);
- if (err) {
- pr_err("add management handler failed (%d)\n", err);
- goto destroy_cache;
- }
-
- fwserial_unit_directory_data.unit_addr_offset =
- FW_UNIT_ADDRESS(fwserial_mgmt_addr_handler.offset);
- err = fw_core_add_descriptor(&fwserial_unit_directory);
- if (err) {
- pr_err("add unit descriptor failed (%d)\n", err);
- goto remove_handler;
- }
-
- err = driver_register(&fwserial_driver.driver);
- if (err) {
- pr_err("register fwserial driver failed (%d)\n", err);
- goto remove_descriptor;
- }
-
- return 0;
-
-remove_descriptor:
- fw_core_remove_descriptor(&fwserial_unit_directory);
-remove_handler:
- fw_core_remove_address_handler(&fwserial_mgmt_addr_handler);
-destroy_cache:
- kmem_cache_destroy(fwtty_txn_cache);
-unregister_loop:
- if (create_loop_dev)
- tty_unregister_driver(fwloop_driver);
-put_loop:
- if (create_loop_dev)
- put_tty_driver(fwloop_driver);
-unregister_driver:
- tty_unregister_driver(fwtty_driver);
-put_tty:
- put_tty_driver(fwtty_driver);
-remove_debugfs:
- debugfs_remove_recursive(fwserial_debugfs);
-
- return err;
-}
-
-static void __exit fwserial_exit(void)
-{
- driver_unregister(&fwserial_driver.driver);
- fw_core_remove_descriptor(&fwserial_unit_directory);
- fw_core_remove_address_handler(&fwserial_mgmt_addr_handler);
- kmem_cache_destroy(fwtty_txn_cache);
- if (create_loop_dev) {
- tty_unregister_driver(fwloop_driver);
- put_tty_driver(fwloop_driver);
- }
- tty_unregister_driver(fwtty_driver);
- put_tty_driver(fwtty_driver);
- debugfs_remove_recursive(fwserial_debugfs);
-}
-
-module_init(fwserial_init);
-module_exit(fwserial_exit);
-
-MODULE_AUTHOR("Peter Hurley (peter@hurleysoftware.com)");
-MODULE_DESCRIPTION("FireWire Serial TTY Driver");
-MODULE_LICENSE("GPL");
-MODULE_DEVICE_TABLE(ieee1394, fwserial_id_table);
-MODULE_PARM_DESC(ttys, "Number of ttys to create for each local firewire node");
-MODULE_PARM_DESC(auto, "Auto-connect a tty to each firewire node discovered");
-MODULE_PARM_DESC(loop, "Create a loopback device, fwloop<n>, with ttys");
diff --git a/drivers/staging/fwserial/fwserial.h b/drivers/staging/fwserial/fwserial.h
deleted file mode 100644
index 787aa4f3a41b..000000000000
--- a/drivers/staging/fwserial/fwserial.h
+++ /dev/null
@@ -1,369 +0,0 @@
-#ifndef _FIREWIRE_FWSERIAL_H
-#define _FIREWIRE_FWSERIAL_H
-
-#include <linux/kernel.h>
-#include <linux/tty.h>
-#include <linux/tty_driver.h>
-#include <linux/tty_flip.h>
-#include <linux/list.h>
-#include <linux/firewire.h>
-#include <linux/firewire-constants.h>
-#include <linux/spinlock.h>
-#include <linux/rcupdate.h>
-#include <linux/mutex.h>
-#include <linux/serial.h>
-#include <linux/serial_reg.h>
-#include <linux/module.h>
-#include <linux/seq_file.h>
-#include <linux/debugfs.h>
-
-#include "dma_fifo.h"
-
-#ifdef FWTTY_PROFILING
-#define DISTRIBUTION_MAX_SIZE 8192
-#define DISTRIBUTION_MAX_INDEX (ilog2(DISTRIBUTION_MAX_SIZE) + 1)
-static inline void fwtty_profile_data(unsigned stat[], unsigned val)
-{
- int n = (val) ? min(ilog2(val) + 1, DISTRIBUTION_MAX_INDEX) : 0;
- ++stat[n];
-}
-#else
-#define DISTRIBUTION_MAX_INDEX 0
-#define fwtty_profile_data(st, n)
-#endif
-
-/* Parameters for both VIRT_CABLE_PLUG & VIRT_CABLE_PLUG_RSP mgmt codes */
-struct virt_plug_params {
- __be32 status_hi;
- __be32 status_lo;
- __be32 fifo_hi;
- __be32 fifo_lo;
- __be32 fifo_len;
-};
-
-struct peer_work_params {
- union {
- struct virt_plug_params plug_req;
- };
-};
-
-/**
- * fwtty_peer: structure representing local & remote unit devices
- * @unit: unit child device of fw_device node
- * @serial: back pointer to associated fw_serial aggregate
- * @guid: unique 64-bit guid for this unit device
- * @generation: most recent bus generation
- * @node_id: most recent node_id
- * @speed: link speed of peer (0 = S100, 2 = S400, ... 5 = S3200)
- * @mgmt_addr: bus addr region to write mgmt packets to
- * @status_addr: bus addr register to write line status to
- * @fifo_addr: bus addr region to write serial output to
- * @fifo_len: max length for single write to fifo_addr
- * @list: link for insertion into fw_serial's peer_list
- * @rcu: for deferring peer reclamation
- * @lock: spinlock to synchonize changes to state & port fields
- * @work: only one work item can be queued at any one time
- * Note: pending work is canceled prior to removal, so this
- * peer is valid for at least the lifetime of the work function
- * @work_params: parameter block for work functions
- * @timer: timer for resetting peer state if remote request times out
- * @state: current state
- * @connect: work item for auto-connecting
- * @connect_retries: # of connections already attempted
- * @port: associated tty_port (usable if state == FWSC_ATTACHED)
- */
-struct fwtty_peer {
- struct fw_unit *unit;
- struct fw_serial *serial;
- u64 guid;
- int generation;
- int node_id;
- unsigned speed;
- int max_payload;
- u64 mgmt_addr;
-
- /* these are usable only if state == FWSC_ATTACHED */
- u64 status_addr;
- u64 fifo_addr;
- int fifo_len;
-
- struct list_head list;
- struct rcu_head rcu;
-
- spinlock_t lock;
- work_func_t workfn;
- struct work_struct work;
- struct peer_work_params work_params;
- struct timer_list timer;
- int state;
- struct delayed_work connect;
- int connect_retries;
-
- struct fwtty_port *port;
-};
-
-#define to_peer(ptr, field) (container_of(ptr, struct fwtty_peer, field))
-
-/* state values for fwtty_peer.state field */
-enum fwtty_peer_state {
- FWPS_GONE,
- FWPS_NOT_ATTACHED,
- FWPS_ATTACHED,
- FWPS_PLUG_PENDING,
- FWPS_PLUG_RESPONDING,
- FWPS_UNPLUG_PENDING,
- FWPS_UNPLUG_RESPONDING,
-
- FWPS_NO_MGMT_ADDR = -1,
-};
-
-#define CONNECT_RETRY_DELAY HZ
-#define MAX_CONNECT_RETRIES 10
-
-/* must be holding peer lock for these state funclets */
-static inline void peer_set_state(struct fwtty_peer *peer, int new)
-{
- peer->state = new;
-}
-
-static inline struct fwtty_port *peer_revert_state(struct fwtty_peer *peer)
-{
- struct fwtty_port *port = peer->port;
-
- peer->port = NULL;
- peer_set_state(peer, FWPS_NOT_ATTACHED);
- return port;
-}
-
-struct fwserial_mgmt_pkt {
- struct {
- __be16 len;
- __be16 code;
- } hdr;
- union {
- struct virt_plug_params plug_req;
- struct virt_plug_params plug_rsp;
- };
-} __packed;
-
-/* fwserial_mgmt_packet codes */
-#define FWSC_RSP_OK 0x0000
-#define FWSC_RSP_NACK 0x8000
-#define FWSC_CODE_MASK 0x0fff
-
-#define FWSC_VIRT_CABLE_PLUG 1
-#define FWSC_VIRT_CABLE_UNPLUG 2
-#define FWSC_VIRT_CABLE_PLUG_RSP 3
-#define FWSC_VIRT_CABLE_UNPLUG_RSP 4
-
-/* 1 min. plug timeout -- suitable for userland authorization */
-#define VIRT_CABLE_PLUG_TIMEOUT (60 * HZ)
-
-struct stats {
- unsigned xchars;
- unsigned dropped;
- unsigned tx_stall;
- unsigned fifo_errs;
- unsigned sent;
- unsigned lost;
- unsigned throttled;
- unsigned reads[DISTRIBUTION_MAX_INDEX + 1];
- unsigned writes[DISTRIBUTION_MAX_INDEX + 1];
- unsigned txns[DISTRIBUTION_MAX_INDEX + 1];
- unsigned unthrottle[DISTRIBUTION_MAX_INDEX + 1];
-};
-
-struct fwconsole_ops {
- void (*notify)(int code, void *data);
- void (*stats)(struct stats *stats, void *data);
- void (*proc_show)(struct seq_file *m, void *data);
-};
-
-/* codes for console ops notify */
-#define FWCON_NOTIFY_ATTACH 1
-#define FWCON_NOTIFY_DETACH 2
-
-/**
- * fwtty_port: structure used to track/represent underlying tty_port
- * @port: underlying tty_port
- * @device: tty device
- * @index: index into port_table for this particular port
- * note: minor = index + minor_start assigned by tty_alloc_driver()
- * @serial: back pointer to the containing fw_serial
- * @rx_handler: bus address handler for unique addr region used by remotes
- * to communicate with this port. Every port uses
- * fwtty_port_handler() for per port transactions.
- * @fwcon_ops: ops for attached fw_console (if any)
- * @con_data: private data for fw_console
- * @wait_tx: waitqueue for sleeping until writer/drain completes tx
- * @emit_breaks: delayed work responsible for generating breaks when the
- * break line status is active
- * @cps : characters per second computed from the termios settings
- * @break_last: timestamp in jiffies from last emit_breaks
- * @hangup: work responsible for HUPing when carrier is dropped/lost
- * @mstatus: loose virtualization of LSR/MSR
- * bits 15..0 correspond to TIOCM_* bits
- * bits 19..16 reserved for mctrl
- * bit 20 OOB_TX_THROTTLE
- * bits 23..21 reserved
- * bits 31..24 correspond to UART_LSR_* bits
- * @lock: spinlock for protecting concurrent access to fields below it
- * @mctrl: loose virtualization of MCR
- * bits 15..0 correspond to TIOCM_* bits
- * bit 16 OOB_RX_THROTTLE
- * bits 19..17 reserved
- * bits 31..20 reserved for mstatus
- * @drain: delayed work scheduled to ensure that writes are flushed.
- * The work can race with the writer but concurrent sending is
- * prevented with the IN_TX flag. Scheduled under lock to
- * limit scheduling when fifo has just been drained.
- * @tx_fifo: fifo used to store & block-up writes for dma to remote
- * @max_payload: max bytes transmissible per dma (based on peer's max_payload)
- * @status_mask: UART_LSR_* bitmask significant to rx (based on termios)
- * @ignore_mask: UART_LSR_* bitmask of states to ignore (also based on termios)
- * @break_ctl: if set, port is 'sending break' to remote
- * @write_only: self-explanatory
- * @overrun: previous rx was lost (partially or completely)
- * @loopback: if set, port is in loopback mode
- * @flags: atomic bit flags
- * bit 0: IN_TX - gate to allow only one cpu to send from the dma fifo
- * at a time.
- * bit 1: STOP_TX - force tx to exit while sending
- * @peer: rcu-pointer to associated fwtty_peer (if attached)
- * NULL if no peer attached
- * @icount: predefined statistics reported by the TIOCGICOUNT ioctl
- * @stats: additional statistics reported in /proc/tty/driver/firewire_serial
- */
-struct fwtty_port {
- struct tty_port port;
- struct device *device;
- unsigned index;
- struct fw_serial *serial;
- struct fw_address_handler rx_handler;
-
- struct fwconsole_ops *fwcon_ops;
- void *con_data;
-
- wait_queue_head_t wait_tx;
- struct delayed_work emit_breaks;
- unsigned cps;
- unsigned long break_last;
-
- struct work_struct hangup;
-
- unsigned mstatus;
-
- spinlock_t lock;
- unsigned mctrl;
- struct delayed_work drain;
- struct dma_fifo tx_fifo;
- int max_payload;
- unsigned status_mask;
- unsigned ignore_mask;
- unsigned break_ctl:1,
- write_only:1,
- overrun:1,
- loopback:1;
- unsigned long flags;
-
- struct fwtty_peer __rcu *peer;
-
- struct async_icount icount;
- struct stats stats;
-};
-
-#define to_port(ptr, field) (container_of(ptr, struct fwtty_port, field))
-
-/* bit #s for flags field */
-#define IN_TX 0
-#define STOP_TX 1
-
-/* bitmasks for special mctrl/mstatus bits */
-#define OOB_RX_THROTTLE 0x00010000
-#define MCTRL_RSRVD 0x000e0000
-#define OOB_TX_THROTTLE 0x00100000
-#define MSTATUS_RSRVD 0x00e00000
-
-#define MCTRL_MASK (TIOCM_DTR | TIOCM_RTS | TIOCM_OUT1 | TIOCM_OUT2 | \
- TIOCM_LOOP | OOB_RX_THROTTLE | MCTRL_RSRVD)
-
-/* XXX even every 1/50th secs. may be unnecessarily accurate */
-/* delay in jiffies between brk emits */
-#define FREQ_BREAKS (HZ / 50)
-
-/* Ports are allocated in blocks of num_ports for each fw_card */
-#define MAX_CARD_PORTS CONFIG_FWTTY_MAX_CARD_PORTS
-#define MAX_TOTAL_PORTS CONFIG_FWTTY_MAX_TOTAL_PORTS
-
-/* tuning parameters */
-#define FWTTY_PORT_TXFIFO_LEN 4096
-#define FWTTY_PORT_MAX_PEND_DMA 8 /* costs a cache line per pend */
-#define DRAIN_THRESHOLD 1024
-#define MAX_ASYNC_PAYLOAD 4096 /* ohci-defined limit */
-#define WRITER_MINIMUM 128
-/* TODO: how to set watermark to AR context size? see fwtty_rx() */
-#define HIGH_WATERMARK 32768 /* AR context is 32K */
-
-/*
- * Size of bus addr region above 4GB used per port as the recv addr
- * - must be at least as big as the MAX_ASYNC_PAYLOAD
- */
-#define FWTTY_PORT_RXFIFO_LEN MAX_ASYNC_PAYLOAD
-
-/**
- * fw_serial: aggregate used to associate tty ports with specific fw_card
- * @card: fw_card associated with this fw_serial device (1:1 association)
- * @kref: reference-counted multi-port management allows delayed destroy
- * @self: local unit device as 'peer'. Not valid until local unit device
- * is enumerated.
- * @list: link for insertion into fwserial_list
- * @peer_list: list of local & remote unit devices attached to this card
- * @ports: fixed array of tty_ports provided by this serial device
- */
-struct fw_serial {
- struct fw_card *card;
- struct kref kref;
-
- struct dentry *debugfs;
- struct fwtty_peer *self;
-
- struct list_head list;
- struct list_head peer_list;
-
- struct fwtty_port *ports[MAX_CARD_PORTS];
-};
-
-#define to_serial(ptr, field) (container_of(ptr, struct fw_serial, field))
-
-#define TTY_DEV_NAME "fwtty" /* ttyFW was taken */
-static const char tty_dev_name[] = TTY_DEV_NAME;
-static const char loop_dev_name[] = "fwloop";
-
-extern struct tty_driver *fwtty_driver;
-
-struct fwtty_port *fwtty_port_get(unsigned index);
-void fwtty_port_put(struct fwtty_port *port);
-
-static inline void fwtty_bind_console(struct fwtty_port *port,
- struct fwconsole_ops *fwcon_ops,
- void *data)
-{
- port->con_data = data;
- port->fwcon_ops = fwcon_ops;
-}
-
-/*
- * Returns the max send async payload size in bytes based on the unit device
- * link speed. Self-limiting asynchronous bandwidth (via reducing the payload)
- * is not necessary and does not work, because
- * 1) asynchronous traffic will absorb all available bandwidth (less that
- * being used for isochronous traffic)
- * 2) isochronous arbitration always wins.
- */
-static inline int link_speed_to_max_payload(unsigned speed)
-{
- /* Max async payload is 4096 - see IEEE 1394-2008 tables 6-4, 16-18 */
- return min(512 << speed, 4096);
-}
-
-#endif /* _FIREWIRE_FWSERIAL_H */
diff --git a/drivers/staging/gdm724x/Kconfig b/drivers/staging/gdm724x/Kconfig
deleted file mode 100644
index 0a1f090bbf38..000000000000
--- a/drivers/staging/gdm724x/Kconfig
+++ /dev/null
@@ -1,15 +0,0 @@
-#
-# GCT GDM724x LTE driver configuration
-#
-
-config LTE_GDM724X
- tristate "GCT GDM724x LTE support"
- depends on NET && USB && TTY && m
- help
- This driver supports GCT GDM724x LTE chip based USB modem devices.
- It exposes 4 network devices to be used per PDN and 2 tty devices to be
- used for AT commands and DM monitoring applications.
- The modules will be called gdmulte.ko and gdmtty.ko
-
- GCT-ATCx can be used for AT Commands
- GCT-DMx can be used for LTE protocol monitoring
diff --git a/drivers/staging/gdm724x/Makefile b/drivers/staging/gdm724x/Makefile
deleted file mode 100644
index ba7f11a6a097..000000000000
--- a/drivers/staging/gdm724x/Makefile
+++ /dev/null
@@ -1,7 +0,0 @@
-obj-$(CONFIG_LTE_GDM724X) := gdmulte.o
-gdmulte-y += gdm_lte.o netlink_k.o
-gdmulte-y += gdm_usb.o gdm_endian.o
-
-obj-$(CONFIG_LTE_GDM724X) += gdmtty.o
-gdmtty-y := gdm_tty.o gdm_mux.o
-
diff --git a/drivers/staging/gdm724x/TODO b/drivers/staging/gdm724x/TODO
deleted file mode 100644
index b2b571ecb063..000000000000
--- a/drivers/staging/gdm724x/TODO
+++ /dev/null
@@ -1,16 +0,0 @@
-TODO:
-- Clean up coding style to meet kernel standard. (80 line limit, netdev_err)
-- Remove test for host endian
-- Remove confusing macros (endian, hci_send, sdu_send, rcv_with_cb)
-- Fixes for every instances of function returning -1
-- Check for skb->len in gdm_lte_emulate_arp()
-- Use ALIGN() macro for dummy_cnt in up_to_host()
-- Error handling in init_usb()
-- Explain reason for multiples of 512 bytes in alloc_tx_struct()
-- Review use of atomic allocation for tx structs
-- No error checking for alloc_tx_struct in do_tx()
-- fix up static tty port allocation to be dynamic
-
-Patches to:
- Jonathan Kim <jonathankim@gctsemi.com>
- Dean ahn <deanahn@gctsemi.com>
diff --git a/drivers/staging/gdm724x/gdm_endian.c b/drivers/staging/gdm724x/gdm_endian.c
deleted file mode 100644
index d7144e7afa32..000000000000
--- a/drivers/staging/gdm724x/gdm_endian.c
+++ /dev/null
@@ -1,55 +0,0 @@
-/*
- * Copyright (c) 2012 GCT Semiconductor, Inc. All rights reserved.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#include <linux/kernel.h>
-#include "gdm_endian.h"
-
-void gdm_set_endian(struct gdm_endian *ed, u8 dev_endian)
-{
- if (dev_endian == ENDIANNESS_BIG)
- ed->dev_ed = ENDIANNESS_BIG;
- else
- ed->dev_ed = ENDIANNESS_LITTLE;
-}
-
-u16 gdm_cpu_to_dev16(struct gdm_endian *ed, u16 x)
-{
- if (ed->dev_ed == ENDIANNESS_LITTLE)
- return cpu_to_le16(x);
- else
- return cpu_to_be16(x);
-}
-
-u16 gdm_dev16_to_cpu(struct gdm_endian *ed, u16 x)
-{
- if (ed->dev_ed == ENDIANNESS_LITTLE)
- return le16_to_cpu(x);
- else
- return be16_to_cpu(x);
-}
-
-u32 gdm_cpu_to_dev32(struct gdm_endian *ed, u32 x)
-{
- if (ed->dev_ed == ENDIANNESS_LITTLE)
- return cpu_to_le32(x);
- else
- return cpu_to_be32(x);
-}
-
-u32 gdm_dev32_to_cpu(struct gdm_endian *ed, u32 x)
-{
- if (ed->dev_ed == ENDIANNESS_LITTLE)
- return le32_to_cpu(x);
- else
- return be32_to_cpu(x);
-}
diff --git a/drivers/staging/gdm724x/gdm_endian.h b/drivers/staging/gdm724x/gdm_endian.h
deleted file mode 100644
index 6177870830e5..000000000000
--- a/drivers/staging/gdm724x/gdm_endian.h
+++ /dev/null
@@ -1,38 +0,0 @@
-/*
- * Copyright (c) 2012 GCT Semiconductor, Inc. All rights reserved.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#ifndef __GDM_ENDIAN_H__
-#define __GDM_ENDIAN_H__
-
-#include <linux/types.h>
-
-enum {
- ENDIANNESS_MIN = 0,
- ENDIANNESS_UNKNOWN,
- ENDIANNESS_LITTLE,
- ENDIANNESS_BIG,
- ENDIANNESS_MIDDLE,
- ENDIANNESS_MAX
-};
-
-struct gdm_endian {
- u8 dev_ed;
-};
-
-void gdm_set_endian(struct gdm_endian *ed, u8 dev_endian);
-u16 gdm_cpu_to_dev16(struct gdm_endian *ed, u16 x);
-u16 gdm_dev16_to_cpu(struct gdm_endian *ed, u16 x);
-u32 gdm_cpu_to_dev32(struct gdm_endian *ed, u32 x);
-u32 gdm_dev32_to_cpu(struct gdm_endian *ed, u32 x);
-
-#endif /*__GDM_ENDIAN_H__*/
diff --git a/drivers/staging/gdm724x/gdm_lte.c b/drivers/staging/gdm724x/gdm_lte.c
deleted file mode 100644
index a8d2cffb411c..000000000000
--- a/drivers/staging/gdm724x/gdm_lte.c
+++ /dev/null
@@ -1,947 +0,0 @@
-/*
- * Copyright (c) 2012 GCT Semiconductor, Inc. All rights reserved.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/etherdevice.h>
-#include <linux/ip.h>
-#include <linux/ipv6.h>
-#include <linux/udp.h>
-#include <linux/in.h>
-#include <linux/if_arp.h>
-#include <linux/if_ether.h>
-#include <linux/if_vlan.h>
-#include <linux/in6.h>
-#include <linux/tcp.h>
-#include <linux/icmp.h>
-#include <linux/icmpv6.h>
-#include <linux/uaccess.h>
-#include <net/ndisc.h>
-
-#include "gdm_lte.h"
-#include "netlink_k.h"
-#include "hci.h"
-#include "hci_packet.h"
-#include "gdm_endian.h"
-
-/*
- * Netlink protocol number
- */
-#define NETLINK_LTE 30
-
-/*
- * Default MTU Size
- */
-#define DEFAULT_MTU_SIZE 1500
-
-#define IP_VERSION_4 4
-#define IP_VERSION_6 6
-
-static struct {
- int ref_cnt;
- struct sock *sock;
-} lte_event;
-
-static struct device_type wwan_type = {
- .name = "wwan",
-};
-
-static int gdm_lte_open(struct net_device *dev)
-{
- netif_start_queue(dev);
- return 0;
-}
-
-static int gdm_lte_close(struct net_device *dev)
-{
- netif_stop_queue(dev);
- return 0;
-}
-
-static int gdm_lte_set_config(struct net_device *dev, struct ifmap *map)
-{
- if (dev->flags & IFF_UP)
- return -EBUSY;
- return 0;
-}
-
-static void tx_complete(void *arg)
-{
- struct nic *nic = arg;
-
- if (netif_queue_stopped(nic->netdev))
- netif_wake_queue(nic->netdev);
-}
-
-static int gdm_lte_rx(struct sk_buff *skb, struct nic *nic, int nic_type)
-{
- int ret;
-
- ret = netif_rx_ni(skb);
- if (ret == NET_RX_DROP) {
- nic->stats.rx_dropped++;
- } else {
- nic->stats.rx_packets++;
- nic->stats.rx_bytes += skb->len + ETH_HLEN;
- }
-
- return 0;
-}
-
-static int gdm_lte_emulate_arp(struct sk_buff *skb_in, u32 nic_type)
-{
- struct nic *nic = netdev_priv(skb_in->dev);
- struct sk_buff *skb_out;
- struct ethhdr eth;
- struct vlan_ethhdr vlan_eth;
- struct arphdr *arp_in;
- struct arphdr *arp_out;
- struct arpdata {
- u8 ar_sha[ETH_ALEN];
- u8 ar_sip[4];
- u8 ar_tha[ETH_ALEN];
- u8 ar_tip[4];
- };
- struct arpdata *arp_data_in;
- struct arpdata *arp_data_out;
- u8 arp_temp[60];
- void *mac_header_data;
- u32 mac_header_len;
-
- /* Format the mac header so that it can be put to skb */
- if (ntohs(((struct ethhdr *)skb_in->data)->h_proto) == ETH_P_8021Q) {
- memcpy(&vlan_eth, skb_in->data, sizeof(struct vlan_ethhdr));
- mac_header_data = &vlan_eth;
- mac_header_len = VLAN_ETH_HLEN;
- } else {
- memcpy(&eth, skb_in->data, sizeof(struct ethhdr));
- mac_header_data = &eth;
- mac_header_len = ETH_HLEN;
- }
-
- /* Get the pointer of the original request */
- arp_in = (struct arphdr *)(skb_in->data + mac_header_len);
- arp_data_in = (struct arpdata *)(skb_in->data + mac_header_len +
- sizeof(struct arphdr));
-
- /* Get the pointer of the outgoing response */
- arp_out = (struct arphdr *)arp_temp;
- arp_data_out = (struct arpdata *)(arp_temp + sizeof(struct arphdr));
-
- /* Copy the arp header */
- memcpy(arp_out, arp_in, sizeof(struct arphdr));
- arp_out->ar_op = htons(ARPOP_REPLY);
-
- /* Copy the arp payload: based on 2 bytes of mac and fill the IP */
- arp_data_out->ar_sha[0] = arp_data_in->ar_sha[0];
- arp_data_out->ar_sha[1] = arp_data_in->ar_sha[1];
- memcpy(&arp_data_out->ar_sha[2], &arp_data_in->ar_tip[0], 4);
- memcpy(&arp_data_out->ar_sip[0], &arp_data_in->ar_tip[0], 4);
- memcpy(&arp_data_out->ar_tha[0], &arp_data_in->ar_sha[0], 6);
- memcpy(&arp_data_out->ar_tip[0], &arp_data_in->ar_sip[0], 4);
-
- /* Fill the destination mac with source mac of the received packet */
- memcpy(mac_header_data, mac_header_data + ETH_ALEN, ETH_ALEN);
- /* Fill the source mac with nic's source mac */
- memcpy(mac_header_data + ETH_ALEN, nic->src_mac_addr, ETH_ALEN);
-
- /* Alloc skb and reserve align */
- skb_out = dev_alloc_skb(skb_in->len);
- if (!skb_out)
- return -ENOMEM;
- skb_reserve(skb_out, NET_IP_ALIGN);
-
- memcpy(skb_put(skb_out, mac_header_len), mac_header_data,
- mac_header_len);
- memcpy(skb_put(skb_out, sizeof(struct arphdr)), arp_out,
- sizeof(struct arphdr));
- memcpy(skb_put(skb_out, sizeof(struct arpdata)), arp_data_out,
- sizeof(struct arpdata));
-
- skb_out->protocol = ((struct ethhdr *)mac_header_data)->h_proto;
- skb_out->dev = skb_in->dev;
- skb_reset_mac_header(skb_out);
- skb_pull(skb_out, ETH_HLEN);
-
- gdm_lte_rx(skb_out, nic, nic_type);
-
- return 0;
-}
-
-static int icmp6_checksum(struct ipv6hdr *ipv6, u16 *ptr, int len)
-{
- unsigned short *w = ptr;
- int sum = 0;
- int i;
-
- union {
- struct {
- u8 ph_src[16];
- u8 ph_dst[16];
- u32 ph_len;
- u8 ph_zero[3];
- u8 ph_nxt;
- } ph __packed;
- u16 pa[20];
- } pseudo_header;
-
- memset(&pseudo_header, 0, sizeof(pseudo_header));
- memcpy(&pseudo_header.ph.ph_src, &ipv6->saddr.in6_u.u6_addr8, 16);
- memcpy(&pseudo_header.ph.ph_dst, &ipv6->daddr.in6_u.u6_addr8, 16);
- pseudo_header.ph.ph_len = ipv6->payload_len;
- pseudo_header.ph.ph_nxt = ipv6->nexthdr;
-
- w = (u16 *)&pseudo_header;
- for (i = 0; i < ARRAY_SIZE(pseudo_header.pa); i++)
- sum += pseudo_header.pa[i];
-
- w = ptr;
- while (len > 1) {
- sum += *w++;
- len -= 2;
- }
-
- sum = (sum >> 16) + (sum & 0xFFFF);
- sum += (sum >> 16);
- sum = ~sum & 0xffff;
-
- return sum;
-}
-
-static int gdm_lte_emulate_ndp(struct sk_buff *skb_in, u32 nic_type)
-{
- struct nic *nic = netdev_priv(skb_in->dev);
- struct sk_buff *skb_out;
- struct ethhdr eth;
- struct vlan_ethhdr vlan_eth;
- struct neighbour_advertisement {
- u8 target_address[16];
- u8 type;
- u8 length;
- u8 link_layer_address[6];
- };
- struct neighbour_advertisement na;
- struct neighbour_solicitation {
- u8 target_address[16];
- };
- struct neighbour_solicitation *ns;
- struct ipv6hdr *ipv6_in;
- struct ipv6hdr ipv6_out;
- struct icmp6hdr *icmp6_in;
- struct icmp6hdr icmp6_out;
-
- void *mac_header_data;
- u32 mac_header_len;
-
- /* Format the mac header so that it can be put to skb */
- if (ntohs(((struct ethhdr *)skb_in->data)->h_proto) == ETH_P_8021Q) {
- memcpy(&vlan_eth, skb_in->data, sizeof(struct vlan_ethhdr));
- if (ntohs(vlan_eth.h_vlan_encapsulated_proto) != ETH_P_IPV6)
- return -1;
- mac_header_data = &vlan_eth;
- mac_header_len = VLAN_ETH_HLEN;
- } else {
- memcpy(&eth, skb_in->data, sizeof(struct ethhdr));
- if (ntohs(eth.h_proto) != ETH_P_IPV6)
- return -1;
- mac_header_data = &eth;
- mac_header_len = ETH_HLEN;
- }
-
- /* Check if this is IPv6 ICMP packet */
- ipv6_in = (struct ipv6hdr *)(skb_in->data + mac_header_len);
- if (ipv6_in->version != 6 || ipv6_in->nexthdr != IPPROTO_ICMPV6)
- return -1;
-
- /* Check if this is NDP packet */
- icmp6_in = (struct icmp6hdr *)(skb_in->data + mac_header_len +
- sizeof(struct ipv6hdr));
- if (icmp6_in->icmp6_type == NDISC_ROUTER_SOLICITATION) { /* Check RS */
- return -1;
- } else if (icmp6_in->icmp6_type == NDISC_NEIGHBOUR_SOLICITATION) {
- /* Check NS */
- u8 icmp_na[sizeof(struct icmp6hdr) +
- sizeof(struct neighbour_advertisement)];
- u8 zero_addr8[16] = {0,};
-
- if (memcmp(ipv6_in->saddr.in6_u.u6_addr8, zero_addr8, 16) == 0)
- /* Duplicate Address Detection: Source IP is all zero */
- return 0;
-
- icmp6_out.icmp6_type = NDISC_NEIGHBOUR_ADVERTISEMENT;
- icmp6_out.icmp6_code = 0;
- icmp6_out.icmp6_cksum = 0;
- /* R=0, S=1, O=1 */
- icmp6_out.icmp6_dataun.un_data32[0] = htonl(0x60000000);
-
- ns = (struct neighbour_solicitation *)
- (skb_in->data + mac_header_len +
- sizeof(struct ipv6hdr) + sizeof(struct icmp6hdr));
- memcpy(&na.target_address, ns->target_address, 16);
- na.type = 0x02;
- na.length = 1;
- na.link_layer_address[0] = 0x00;
- na.link_layer_address[1] = 0x0a;
- na.link_layer_address[2] = 0x3b;
- na.link_layer_address[3] = 0xaf;
- na.link_layer_address[4] = 0x63;
- na.link_layer_address[5] = 0xc7;
-
- memcpy(&ipv6_out, ipv6_in, sizeof(struct ipv6hdr));
- memcpy(ipv6_out.saddr.in6_u.u6_addr8, &na.target_address, 16);
- memcpy(ipv6_out.daddr.in6_u.u6_addr8,
- ipv6_in->saddr.in6_u.u6_addr8, 16);
- ipv6_out.payload_len = htons(sizeof(struct icmp6hdr) +
- sizeof(struct neighbour_advertisement));
-
- memcpy(icmp_na, &icmp6_out, sizeof(struct icmp6hdr));
- memcpy(icmp_na + sizeof(struct icmp6hdr), &na,
- sizeof(struct neighbour_advertisement));
-
- icmp6_out.icmp6_cksum = icmp6_checksum(&ipv6_out,
- (u16 *)icmp_na, sizeof(icmp_na));
- } else {
- return -1;
- }
-
- /* Fill the destination mac with source mac of the received packet */
- memcpy(mac_header_data, mac_header_data + ETH_ALEN, ETH_ALEN);
- /* Fill the source mac with nic's source mac */
- memcpy(mac_header_data + ETH_ALEN, nic->src_mac_addr, ETH_ALEN);
-
- /* Alloc skb and reserve align */
- skb_out = dev_alloc_skb(skb_in->len);
- if (!skb_out)
- return -ENOMEM;
- skb_reserve(skb_out, NET_IP_ALIGN);
-
- memcpy(skb_put(skb_out, mac_header_len), mac_header_data,
- mac_header_len);
- memcpy(skb_put(skb_out, sizeof(struct ipv6hdr)), &ipv6_out,
- sizeof(struct ipv6hdr));
- memcpy(skb_put(skb_out, sizeof(struct icmp6hdr)), &icmp6_out,
- sizeof(struct icmp6hdr));
- memcpy(skb_put(skb_out, sizeof(struct neighbour_advertisement)), &na,
- sizeof(struct neighbour_advertisement));
-
- skb_out->protocol = ((struct ethhdr *)mac_header_data)->h_proto;
- skb_out->dev = skb_in->dev;
- skb_reset_mac_header(skb_out);
- skb_pull(skb_out, ETH_HLEN);
-
- gdm_lte_rx(skb_out, nic, nic_type);
-
- return 0;
-}
-
-static s32 gdm_lte_tx_nic_type(struct net_device *dev, struct sk_buff *skb)
-{
- struct nic *nic = netdev_priv(dev);
- struct ethhdr *eth;
- struct vlan_ethhdr *vlan_eth;
- struct iphdr *ip;
- struct ipv6hdr *ipv6;
- int mac_proto;
- void *network_data;
- u32 nic_type = 0;
-
- /* NIC TYPE is based on the nic_id of this net_device */
- nic_type = 0x00000010 | nic->nic_id;
-
- /* Get ethernet protocol */
- eth = (struct ethhdr *)skb->data;
- if (ntohs(eth->h_proto) == ETH_P_8021Q) {
- vlan_eth = (struct vlan_ethhdr *)skb->data;
- mac_proto = ntohs(vlan_eth->h_vlan_encapsulated_proto);
- network_data = skb->data + VLAN_ETH_HLEN;
- nic_type |= NIC_TYPE_F_VLAN;
- } else {
- mac_proto = ntohs(eth->h_proto);
- network_data = skb->data + ETH_HLEN;
- }
-
- /* Process packet for nic type */
- switch (mac_proto) {
- case ETH_P_ARP:
- nic_type |= NIC_TYPE_ARP;
- break;
- case ETH_P_IP:
- nic_type |= NIC_TYPE_F_IPV4;
- ip = (struct iphdr *)network_data;
-
- /* Check DHCPv4 */
- if (ip->protocol == IPPROTO_UDP) {
- struct udphdr *udp = (struct udphdr *)
- (network_data + sizeof(struct iphdr));
- if (ntohs(udp->dest) == 67 || ntohs(udp->dest) == 68)
- nic_type |= NIC_TYPE_F_DHCP;
- }
- break;
- case ETH_P_IPV6:
- nic_type |= NIC_TYPE_F_IPV6;
- ipv6 = (struct ipv6hdr *)network_data;
-
- if (ipv6->nexthdr == IPPROTO_ICMPV6) /* Check NDP request */ {
- struct icmp6hdr *icmp6 = (struct icmp6hdr *)
- (network_data + sizeof(struct ipv6hdr));
- if (icmp6->icmp6_type == NDISC_NEIGHBOUR_SOLICITATION)
- nic_type |= NIC_TYPE_ICMPV6;
- } else if (ipv6->nexthdr == IPPROTO_UDP) /* Check DHCPv6 */ {
- struct udphdr *udp = (struct udphdr *)
- (network_data + sizeof(struct ipv6hdr));
- if (ntohs(udp->dest) == 546 || ntohs(udp->dest) == 547)
- nic_type |= NIC_TYPE_F_DHCP;
- }
- break;
- default:
- break;
- }
-
- return nic_type;
-}
-
-static int gdm_lte_tx(struct sk_buff *skb, struct net_device *dev)
-{
- struct nic *nic = netdev_priv(dev);
- u32 nic_type;
- void *data_buf;
- int data_len;
- int idx;
- int ret = 0;
-
- nic_type = gdm_lte_tx_nic_type(dev, skb);
- if (nic_type == 0) {
- netdev_err(dev, "tx - invalid nic_type\n");
- return -1;
- }
-
- if (nic_type & NIC_TYPE_ARP) {
- if (gdm_lte_emulate_arp(skb, nic_type) == 0) {
- dev_kfree_skb(skb);
- return 0;
- }
- }
-
- if (nic_type & NIC_TYPE_ICMPV6) {
- if (gdm_lte_emulate_ndp(skb, nic_type) == 0) {
- dev_kfree_skb(skb);
- return 0;
- }
- }
-
- /*
- * Need byte shift (that is, remove VLAN tag) if there is one
- * For the case of ARP, this breaks the offset as vlan_ethhdr+4
- * is treated as ethhdr However, it shouldn't be a problem as
- * the response starts from arp_hdr and ethhdr is created by this
- * driver based on the NIC mac
- */
- if (nic_type & NIC_TYPE_F_VLAN) {
- struct vlan_ethhdr *vlan_eth = (struct vlan_ethhdr *)skb->data;
-
- nic->vlan_id = ntohs(vlan_eth->h_vlan_TCI) & VLAN_VID_MASK;
- data_buf = skb->data + (VLAN_ETH_HLEN - ETH_HLEN);
- data_len = skb->len - (VLAN_ETH_HLEN - ETH_HLEN);
- } else {
- nic->vlan_id = 0;
- data_buf = skb->data;
- data_len = skb->len;
- }
-
- /* If it is a ICMPV6 packet, clear all the other bits :
- * for backward compatibility with the firmware
- */
- if (nic_type & NIC_TYPE_ICMPV6)
- nic_type = NIC_TYPE_ICMPV6;
-
- /* If it is not a dhcp packet, clear all the flag bits :
- * original NIC, otherwise the special flag (IPVX | DHCP)
- */
- if (!(nic_type & NIC_TYPE_F_DHCP))
- nic_type &= NIC_TYPE_MASK;
-
- ret = sscanf(dev->name, "lte%d", &idx);
- if (ret != 1) {
- dev_kfree_skb(skb);
- return -EINVAL;
- }
-
- ret = nic->phy_dev->send_sdu_func(nic->phy_dev->priv_dev,
- data_buf, data_len,
- nic->pdn_table.dft_eps_id, 0,
- tx_complete, nic, idx,
- nic_type);
-
- if (ret == TX_NO_BUFFER || ret == TX_NO_SPC) {
- netif_stop_queue(dev);
- if (ret == TX_NO_BUFFER)
- ret = 0;
- else
- ret = -ENOSPC;
- } else if (ret == TX_NO_DEV) {
- ret = -ENODEV;
- }
-
- /* Updates tx stats */
- if (ret) {
- nic->stats.tx_dropped++;
- } else {
- nic->stats.tx_packets++;
- nic->stats.tx_bytes += data_len;
- }
- dev_kfree_skb(skb);
-
- return 0;
-}
-
-static struct net_device_stats *gdm_lte_stats(struct net_device *dev)
-{
- struct nic *nic = netdev_priv(dev);
-
- return &nic->stats;
-}
-
-static int gdm_lte_event_send(struct net_device *dev, char *buf, int len)
-{
- struct nic *nic = netdev_priv(dev);
- struct hci_packet *hci = (struct hci_packet *)buf;
- int idx;
- int ret;
-
- ret = sscanf(dev->name, "lte%d", &idx);
- if (ret != 1)
- return -EINVAL;
-
- return netlink_send(lte_event.sock, idx, 0, buf,
- gdm_dev16_to_cpu(
- nic->phy_dev->get_endian(
- nic->phy_dev->priv_dev), hci->len)
- + HCI_HEADER_SIZE);
-}
-
-static void gdm_lte_event_rcv(struct net_device *dev, u16 type,
- void *msg, int len)
-{
- struct nic *nic = netdev_priv(dev);
-
- nic->phy_dev->send_hci_func(nic->phy_dev->priv_dev, msg, len, NULL,
- NULL);
-}
-
-int gdm_lte_event_init(void)
-{
- if (lte_event.ref_cnt == 0)
- lte_event.sock = netlink_init(NETLINK_LTE, gdm_lte_event_rcv);
-
- if (lte_event.sock) {
- lte_event.ref_cnt++;
- return 0;
- }
-
- pr_err("event init failed\n");
- return -1;
-}
-
-void gdm_lte_event_exit(void)
-{
- if (lte_event.sock && --lte_event.ref_cnt == 0) {
- netlink_exit(lte_event.sock);
- lte_event.sock = NULL;
- }
-}
-
-static u8 find_dev_index(u32 nic_type)
-{
- u8 index;
-
- index = (u8)(nic_type & 0x0000000f);
- if (index > MAX_NIC_TYPE)
- index = 0;
-
- return index;
-}
-
-static void gdm_lte_netif_rx(struct net_device *dev, char *buf,
- int len, int flagged_nic_type)
-{
- u32 nic_type;
- struct nic *nic;
- struct sk_buff *skb;
- struct ethhdr eth;
- struct vlan_ethhdr vlan_eth;
- void *mac_header_data;
- u32 mac_header_len;
- char ip_version = 0;
-
- nic_type = flagged_nic_type & NIC_TYPE_MASK;
- nic = netdev_priv(dev);
-
- if (flagged_nic_type & NIC_TYPE_F_DHCP) {
- /* Change the destination mac address
- * with the one requested the IP
- */
- if (flagged_nic_type & NIC_TYPE_F_IPV4) {
- struct dhcp_packet {
- u8 op; /* BOOTREQUEST or BOOTREPLY */
- u8 htype; /* hardware address type.
- * 1 = 10mb ethernet
- */
- u8 hlen; /* hardware address length */
- u8 hops; /* used by relay agents only */
- u32 xid; /* unique id */
- u16 secs; /* elapsed since client began
- * acquisition/renewal
- */
- u16 flags; /* only one flag so far: */
- #define BROADCAST_FLAG 0x8000
- /* "I need broadcast replies" */
- u32 ciaddr; /* client IP (if client is in
- * BOUND, RENEW or REBINDING state)
- */
- u32 yiaddr; /* 'your' (client) IP address */
- /* IP address of next server to use in
- * bootstrap, returned in DHCPOFFER,
- * DHCPACK by server
- */
- u32 siaddr_nip;
- u32 gateway_nip; /* relay agent IP address */
- u8 chaddr[16]; /* link-layer client hardware
- * address (MAC)
- */
- u8 sname[64]; /* server host name (ASCIZ) */
- u8 file[128]; /* boot file name (ASCIZ) */
- u32 cookie; /* fixed first four option
- * bytes (99,130,83,99 dec)
- */
- } __packed;
- void *addr = buf + sizeof(struct iphdr) +
- sizeof(struct udphdr) +
- offsetof(struct dhcp_packet, chaddr);
- ether_addr_copy(nic->dest_mac_addr, addr);
- }
- }
-
- if (nic->vlan_id > 0) {
- mac_header_data = (void *)&vlan_eth;
- mac_header_len = VLAN_ETH_HLEN;
- } else {
- mac_header_data = (void *)&eth;
- mac_header_len = ETH_HLEN;
- }
-
- /* Format the data so that it can be put to skb */
- ether_addr_copy(mac_header_data, nic->dest_mac_addr);
- memcpy(mac_header_data + ETH_ALEN, nic->src_mac_addr, ETH_ALEN);
-
- vlan_eth.h_vlan_TCI = htons(nic->vlan_id);
- vlan_eth.h_vlan_proto = htons(ETH_P_8021Q);
-
- if (nic_type == NIC_TYPE_ARP) {
- /* Should be response: Only happens because
- * there was a request from the host
- */
- eth.h_proto = htons(ETH_P_ARP);
- vlan_eth.h_vlan_encapsulated_proto = htons(ETH_P_ARP);
- } else {
- ip_version = buf[0] >> 4;
- if (ip_version == IP_VERSION_4) {
- eth.h_proto = htons(ETH_P_IP);
- vlan_eth.h_vlan_encapsulated_proto = htons(ETH_P_IP);
- } else if (ip_version == IP_VERSION_6) {
- eth.h_proto = htons(ETH_P_IPV6);
- vlan_eth.h_vlan_encapsulated_proto = htons(ETH_P_IPV6);
- } else {
- netdev_err(dev, "Unknown IP version %d\n", ip_version);
- return;
- }
- }
-
- /* Alloc skb and reserve align */
- skb = dev_alloc_skb(len + mac_header_len + NET_IP_ALIGN);
- if (!skb)
- return;
- skb_reserve(skb, NET_IP_ALIGN);
-
- memcpy(skb_put(skb, mac_header_len), mac_header_data, mac_header_len);
- memcpy(skb_put(skb, len), buf, len);
-
- skb->protocol = ((struct ethhdr *)mac_header_data)->h_proto;
- skb->dev = dev;
- skb_reset_mac_header(skb);
- skb_pull(skb, ETH_HLEN);
-
- gdm_lte_rx(skb, nic, nic_type);
-}
-
-static void gdm_lte_multi_sdu_pkt(struct phy_dev *phy_dev, char *buf, int len)
-{
- struct net_device *dev;
- struct multi_sdu *multi_sdu = (struct multi_sdu *)buf;
- struct sdu *sdu = NULL;
- u8 *data = (u8 *)multi_sdu->data;
- u16 i = 0;
- u16 num_packet;
- u16 hci_len;
- u16 cmd_evt;
- u32 nic_type;
- u8 index;
-
- hci_len = gdm_dev16_to_cpu(phy_dev->get_endian(phy_dev->priv_dev),
- multi_sdu->len);
- num_packet = gdm_dev16_to_cpu(phy_dev->get_endian(phy_dev->priv_dev),
- multi_sdu->num_packet);
-
- for (i = 0; i < num_packet; i++) {
- sdu = (struct sdu *)data;
-
- cmd_evt = gdm_dev16_to_cpu(phy_dev->
- get_endian(phy_dev->priv_dev), sdu->cmd_evt);
- hci_len = gdm_dev16_to_cpu(phy_dev->
- get_endian(phy_dev->priv_dev), sdu->len);
- nic_type = gdm_dev32_to_cpu(phy_dev->
- get_endian(phy_dev->priv_dev), sdu->nic_type);
-
- if (cmd_evt != LTE_RX_SDU) {
- pr_err("rx sdu wrong hci %04x\n", cmd_evt);
- return;
- }
- if (hci_len < 12) {
- pr_err("rx sdu invalid len %d\n", hci_len);
- return;
- }
-
- index = find_dev_index(nic_type);
- if (index < MAX_NIC_TYPE) {
- dev = phy_dev->dev[index];
- gdm_lte_netif_rx(dev, (char *)sdu->data,
- (int)(hci_len-12), nic_type);
- } else {
- pr_err("rx sdu invalid nic_type :%x\n", nic_type);
- }
-
- data += ((hci_len+3) & 0xfffc) + HCI_HEADER_SIZE;
- }
-}
-
-static void gdm_lte_pdn_table(struct net_device *dev, char *buf, int len)
-{
- struct nic *nic = netdev_priv(dev);
- struct hci_pdn_table_ind *pdn_table = (struct hci_pdn_table_ind *)buf;
-
- if (pdn_table->activate) {
- nic->pdn_table.activate = pdn_table->activate;
- nic->pdn_table.dft_eps_id = gdm_dev32_to_cpu(
- nic->phy_dev->get_endian(
- nic->phy_dev->priv_dev),
- pdn_table->dft_eps_id);
- nic->pdn_table.nic_type = gdm_dev32_to_cpu(
- nic->phy_dev->get_endian(
- nic->phy_dev->priv_dev),
- pdn_table->nic_type);
-
- netdev_info(dev, "pdn activated, nic_type=0x%x\n",
- nic->pdn_table.nic_type);
- } else {
- memset(&nic->pdn_table, 0x00, sizeof(struct pdn_table));
- netdev_info(dev, "pdn deactivated\n");
- }
-}
-
-static int gdm_lte_receive_pkt(struct phy_dev *phy_dev, char *buf, int len)
-{
- struct hci_packet *hci = (struct hci_packet *)buf;
- struct hci_pdn_table_ind *pdn_table = (struct hci_pdn_table_ind *)buf;
- struct sdu *sdu;
- struct net_device *dev;
- int ret = 0;
- u16 cmd_evt;
- u32 nic_type;
- u8 index;
-
- if (!len)
- return ret;
-
- cmd_evt = gdm_dev16_to_cpu(phy_dev->get_endian(phy_dev->priv_dev),
- hci->cmd_evt);
-
- dev = phy_dev->dev[0];
- if (dev == NULL)
- return 0;
-
- switch (cmd_evt) {
- case LTE_RX_SDU:
- sdu = (struct sdu *)hci->data;
- nic_type = gdm_dev32_to_cpu(phy_dev->
- get_endian(phy_dev->priv_dev), sdu->nic_type);
- index = find_dev_index(nic_type);
- dev = phy_dev->dev[index];
- gdm_lte_netif_rx(dev, hci->data, len, nic_type);
- break;
- case LTE_RX_MULTI_SDU:
- gdm_lte_multi_sdu_pkt(phy_dev, buf, len);
- break;
- case LTE_LINK_ON_OFF_INDICATION:
- netdev_info(dev, "link %s\n",
- ((struct hci_connect_ind *)buf)->connect
- ? "on" : "off");
- break;
- case LTE_PDN_TABLE_IND:
- pdn_table = (struct hci_pdn_table_ind *)buf;
- nic_type = gdm_dev32_to_cpu(phy_dev->
- get_endian(phy_dev->priv_dev),
- pdn_table->nic_type);
- index = find_dev_index(nic_type);
- dev = phy_dev->dev[index];
- gdm_lte_pdn_table(dev, buf, len);
- /* Fall through */
- default:
- ret = gdm_lte_event_send(dev, buf, len);
- break;
- }
-
- return ret;
-}
-
-static int rx_complete(void *arg, void *data, int len, int context)
-{
- struct phy_dev *phy_dev = (struct phy_dev *)arg;
-
- return gdm_lte_receive_pkt(phy_dev, (char *)data, len);
-}
-
-void start_rx_proc(struct phy_dev *phy_dev)
-{
- int i;
-
- for (i = 0; i < MAX_RX_SUBMIT_COUNT; i++)
- phy_dev->rcv_func(phy_dev->priv_dev,
- rx_complete, phy_dev, USB_COMPLETE);
-}
-
-static struct net_device_ops gdm_netdev_ops = {
- .ndo_open = gdm_lte_open,
- .ndo_stop = gdm_lte_close,
- .ndo_set_config = gdm_lte_set_config,
- .ndo_start_xmit = gdm_lte_tx,
- .ndo_get_stats = gdm_lte_stats,
-};
-
-static u8 gdm_lte_macaddr[ETH_ALEN] = {0x00, 0x0a, 0x3b, 0x00, 0x00, 0x00};
-
-static void form_mac_address(u8 *dev_addr, u8 *nic_src, u8 *nic_dest,
- u8 *mac_address, u8 index)
-{
- /* Form the dev_addr */
- if (!mac_address)
- ether_addr_copy(dev_addr, gdm_lte_macaddr);
- else
- ether_addr_copy(dev_addr, mac_address);
-
- /* The last byte of the mac address
- * should be less than or equal to 0xFC
- */
- dev_addr[ETH_ALEN-1] += index;
-
- /* Create random nic src and copy the first
- * 3 bytes to be the same as dev_addr
- */
- random_ether_addr(nic_src);
- memcpy(nic_src, dev_addr, 3);
-
- /* Copy the nic_dest from dev_addr*/
- ether_addr_copy(nic_dest, dev_addr);
-}
-
-static void validate_mac_address(u8 *mac_address)
-{
- /* if zero address or multicast bit set, restore the default value */
- if (is_zero_ether_addr(mac_address) || (mac_address[0] & 0x01)) {
- pr_err("MAC invalid, restoring default\n");
- memcpy(mac_address, gdm_lte_macaddr, 6);
- }
-}
-
-int register_lte_device(struct phy_dev *phy_dev,
- struct device *dev, u8 *mac_address)
-{
- struct nic *nic;
- struct net_device *net;
- char pdn_dev_name[16];
- int ret = 0;
- u8 index;
-
- validate_mac_address(mac_address);
-
- for (index = 0; index < MAX_NIC_TYPE; index++) {
- /* Create device name lteXpdnX */
- sprintf(pdn_dev_name, "lte%%dpdn%d", index);
-
- /* Allocate netdev */
- net = alloc_netdev(sizeof(struct nic), pdn_dev_name,
- NET_NAME_UNKNOWN, ether_setup);
- if (!net) {
- pr_err("alloc_netdev failed\n");
- ret = -ENOMEM;
- goto err;
- }
- net->netdev_ops = &gdm_netdev_ops;
- net->flags &= ~IFF_MULTICAST;
- net->mtu = DEFAULT_MTU_SIZE;
-
- nic = netdev_priv(net);
- memset(nic, 0, sizeof(struct nic));
- nic->netdev = net;
- nic->phy_dev = phy_dev;
- nic->nic_id = index;
-
- form_mac_address(
- net->dev_addr,
- nic->src_mac_addr,
- nic->dest_mac_addr,
- mac_address,
- index);
-
- SET_NETDEV_DEV(net, dev);
- SET_NETDEV_DEVTYPE(net, &wwan_type);
-
- ret = register_netdev(net);
- if (ret)
- goto err;
-
- netif_carrier_on(net);
-
- phy_dev->dev[index] = net;
- }
-
- return 0;
-
-err:
- unregister_lte_device(phy_dev);
-
- return ret;
-}
-
-void unregister_lte_device(struct phy_dev *phy_dev)
-{
- struct net_device *net;
- int index;
-
- for (index = 0; index < MAX_NIC_TYPE; index++) {
- net = phy_dev->dev[index];
- if (net == NULL)
- continue;
-
- unregister_netdev(net);
- free_netdev(net);
- }
-}
diff --git a/drivers/staging/gdm724x/gdm_lte.h b/drivers/staging/gdm724x/gdm_lte.h
deleted file mode 100644
index 88414e5a70cc..000000000000
--- a/drivers/staging/gdm724x/gdm_lte.h
+++ /dev/null
@@ -1,81 +0,0 @@
-/*
- * Copyright (c) 2012 GCT Semiconductor, Inc. All rights reserved.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#ifndef _GDM_LTE_H_
-#define _GDM_LTE_H_
-
-#include <linux/netdevice.h>
-#include <linux/types.h>
-
-#include "gdm_endian.h"
-
-#define MAX_NIC_TYPE 4
-#define MAX_RX_SUBMIT_COUNT 3
-#define DRIVER_VERSION "3.7.17.0"
-
-enum TX_ERROR_CODE {
- TX_NO_ERROR = 0,
- TX_NO_DEV,
- TX_NO_SPC,
- TX_NO_BUFFER,
-};
-
-enum CALLBACK_CONTEXT {
- KERNEL_THREAD = 0,
- USB_COMPLETE,
-};
-
-struct pdn_table {
- u8 activate;
- u32 dft_eps_id;
- u32 nic_type;
-} __packed;
-
-struct nic;
-
-struct phy_dev {
- void *priv_dev;
- struct net_device *dev[MAX_NIC_TYPE];
- int (*send_hci_func)(void *priv_dev, void *data, int len,
- void (*cb)(void *cb_data), void *cb_data);
- int (*send_sdu_func)(void *priv_dev, void *data, int len,
- unsigned int dftEpsId, unsigned int epsId,
- void (*cb)(void *cb_data), void *cb_data,
- int dev_idx, int nic_type);
- int (*rcv_func)(void *priv_dev,
- int (*cb)(void *cb_data, void *data, int len,
- int context),
- void *cb_data, int context);
- struct gdm_endian * (*get_endian)(void *priv_dev);
-};
-
-struct nic {
- struct net_device *netdev;
- struct phy_dev *phy_dev;
- struct net_device_stats stats;
- struct pdn_table pdn_table;
- u8 dest_mac_addr[ETH_ALEN];
- u8 src_mac_addr[ETH_ALEN];
- u32 nic_id;
- u16 vlan_id;
-};
-
-int gdm_lte_event_init(void);
-void gdm_lte_event_exit(void);
-
-void start_rx_proc(struct phy_dev *phy_dev);
-int register_lte_device(struct phy_dev *phy_dev, struct device *dev,
- u8 *mac_address);
-void unregister_lte_device(struct phy_dev *phy_dev);
-
-#endif /* _GDM_LTE_H_ */
diff --git a/drivers/staging/gdm724x/gdm_mux.c b/drivers/staging/gdm724x/gdm_mux.c
deleted file mode 100644
index 1cf24e4edf25..000000000000
--- a/drivers/staging/gdm724x/gdm_mux.c
+++ /dev/null
@@ -1,691 +0,0 @@
-/*
- * Copyright (c) 2012 GCT Semiconductor, Inc. All rights reserved.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/usb.h>
-#include <linux/errno.h>
-#include <linux/init.h>
-#include <linux/tty.h>
-#include <linux/tty_driver.h>
-#include <linux/tty_flip.h>
-#include <linux/slab.h>
-#include <linux/usb/cdc.h>
-
-#include "gdm_mux.h"
-
-static struct workqueue_struct *mux_rx_wq;
-
-static u16 packet_type[TTY_MAX_COUNT] = {0xF011, 0xF010};
-
-#define USB_DEVICE_CDC_DATA(vid, pid) \
- .match_flags = \
- USB_DEVICE_ID_MATCH_DEVICE |\
- USB_DEVICE_ID_MATCH_INT_CLASS |\
- USB_DEVICE_ID_MATCH_INT_SUBCLASS,\
- .idVendor = vid,\
- .idProduct = pid,\
- .bInterfaceClass = USB_CLASS_COMM,\
- .bInterfaceSubClass = USB_CDC_SUBCLASS_ACM
-
-static const struct usb_device_id id_table[] = {
- { USB_DEVICE_CDC_DATA(0x1076, 0x8000) }, /* GCT GDM7240 */
- { USB_DEVICE_CDC_DATA(0x1076, 0x8f00) }, /* GCT GDM7243 */
- { USB_DEVICE_CDC_DATA(0x1076, 0x9000) }, /* GCT GDM7243 */
- { USB_DEVICE_CDC_DATA(0x1d74, 0x2300) }, /* LGIT Phoenix */
- {}
-};
-
-
-MODULE_DEVICE_TABLE(usb, id_table);
-
-static int packet_type_to_index(u16 packetType)
-{
- int i;
-
- for (i = 0; i < TTY_MAX_COUNT; i++) {
- if (packet_type[i] == packetType)
- return i;
- }
-
- return -1;
-}
-
-static struct mux_tx *alloc_mux_tx(int len)
-{
- struct mux_tx *t = NULL;
-
- t = kzalloc(sizeof(struct mux_tx), GFP_ATOMIC);
- if (!t)
- return NULL;
-
- t->urb = usb_alloc_urb(0, GFP_ATOMIC);
- t->buf = kmalloc(MUX_TX_MAX_SIZE, GFP_ATOMIC);
- if (!t->urb || !t->buf) {
- usb_free_urb(t->urb);
- kfree(t->buf);
- kfree(t);
- return NULL;
- }
-
- return t;
-}
-
-static void free_mux_tx(struct mux_tx *t)
-{
- if (t) {
- usb_free_urb(t->urb);
- kfree(t->buf);
- kfree(t);
- }
-}
-
-static struct mux_rx *alloc_mux_rx(void)
-{
- struct mux_rx *r = NULL;
-
- r = kzalloc(sizeof(struct mux_rx), GFP_KERNEL);
- if (!r)
- return NULL;
-
- r->urb = usb_alloc_urb(0, GFP_KERNEL);
- r->buf = kmalloc(MUX_RX_MAX_SIZE, GFP_KERNEL);
- if (!r->urb || !r->buf) {
- usb_free_urb(r->urb);
- kfree(r->buf);
- kfree(r);
- return NULL;
- }
-
- return r;
-}
-
-static void free_mux_rx(struct mux_rx *r)
-{
- if (r) {
- usb_free_urb(r->urb);
- kfree(r->buf);
- kfree(r);
- }
-}
-
-static struct mux_rx *get_rx_struct(struct rx_cxt *rx)
-{
- struct mux_rx *r;
- unsigned long flags;
-
- spin_lock_irqsave(&rx->free_list_lock, flags);
-
- if (list_empty(&rx->rx_free_list)) {
- spin_unlock_irqrestore(&rx->free_list_lock, flags);
- return NULL;
- }
-
- r = list_entry(rx->rx_free_list.prev, struct mux_rx, free_list);
- list_del(&r->free_list);
-
- spin_unlock_irqrestore(&rx->free_list_lock, flags);
-
- return r;
-}
-
-static void put_rx_struct(struct rx_cxt *rx, struct mux_rx *r)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&rx->free_list_lock, flags);
- list_add_tail(&r->free_list, &rx->rx_free_list);
- spin_unlock_irqrestore(&rx->free_list_lock, flags);
-}
-
-
-static int up_to_host(struct mux_rx *r)
-{
- struct mux_dev *mux_dev = (struct mux_dev *)r->mux_dev;
- struct mux_pkt_header *mux_header;
- unsigned int start_flag;
- unsigned int payload_size;
- unsigned short packet_type;
- int total_len;
- u32 packet_size_sum = r->offset;
- int index;
- int ret = TO_HOST_INVALID_PACKET;
- int len = r->len;
-
- while (1) {
- mux_header = (struct mux_pkt_header *)(r->buf +
- packet_size_sum);
- start_flag = __le32_to_cpu(mux_header->start_flag);
- payload_size = __le32_to_cpu(mux_header->payload_size);
- packet_type = __le16_to_cpu(mux_header->packet_type);
-
- if (start_flag != START_FLAG) {
- pr_err("invalid START_FLAG %x\n", start_flag);
- break;
- }
-
- total_len = ALIGN(MUX_HEADER_SIZE + payload_size, 4);
-
- if (len - packet_size_sum <
- total_len) {
- pr_err("invalid payload : %d %d %04x\n",
- payload_size, len, packet_type);
- break;
- }
-
- index = packet_type_to_index(packet_type);
- if (index < 0) {
- pr_err("invalid index %d\n", index);
- break;
- }
-
- ret = r->callback(mux_header->data,
- payload_size,
- index,
- mux_dev->tty_dev,
- RECV_PACKET_PROCESS_CONTINUE
- );
- if (ret == TO_HOST_BUFFER_REQUEST_FAIL) {
- r->offset += packet_size_sum;
- break;
- }
-
- packet_size_sum += total_len;
- if (len - packet_size_sum <= MUX_HEADER_SIZE + 2) {
- ret = r->callback(NULL,
- 0,
- index,
- mux_dev->tty_dev,
- RECV_PACKET_PROCESS_COMPLETE
- );
- break;
- }
- }
-
- return ret;
-}
-
-static void do_rx(struct work_struct *work)
-{
- struct mux_dev *mux_dev =
- container_of(work, struct mux_dev, work_rx.work);
- struct mux_rx *r;
- struct rx_cxt *rx = (struct rx_cxt *)&mux_dev->rx;
- unsigned long flags;
- int ret = 0;
-
- while (1) {
- spin_lock_irqsave(&rx->to_host_lock, flags);
- if (list_empty(&rx->to_host_list)) {
- spin_unlock_irqrestore(&rx->to_host_lock, flags);
- break;
- }
- r = list_entry(rx->to_host_list.next, struct mux_rx,
- to_host_list);
- list_del(&r->to_host_list);
- spin_unlock_irqrestore(&rx->to_host_lock, flags);
-
- ret = up_to_host(r);
- if (ret == TO_HOST_BUFFER_REQUEST_FAIL)
- pr_err("failed to send mux data to host\n");
- else
- put_rx_struct(rx, r);
- }
-}
-
-static void remove_rx_submit_list(struct mux_rx *r, struct rx_cxt *rx)
-{
- unsigned long flags;
- struct mux_rx *r_remove, *r_remove_next;
-
- spin_lock_irqsave(&rx->submit_list_lock, flags);
- list_for_each_entry_safe(r_remove, r_remove_next, &rx->rx_submit_list,
- rx_submit_list) {
- if (r == r_remove)
- list_del(&r->rx_submit_list);
- }
- spin_unlock_irqrestore(&rx->submit_list_lock, flags);
-}
-
-static void gdm_mux_rcv_complete(struct urb *urb)
-{
- struct mux_rx *r = urb->context;
- struct mux_dev *mux_dev = (struct mux_dev *)r->mux_dev;
- struct rx_cxt *rx = &mux_dev->rx;
- unsigned long flags;
-
- remove_rx_submit_list(r, rx);
-
- if (urb->status) {
- if (mux_dev->usb_state == PM_NORMAL)
- dev_err(&urb->dev->dev, "%s: urb status error %d\n",
- __func__, urb->status);
- put_rx_struct(rx, r);
- } else {
- r->len = r->urb->actual_length;
- spin_lock_irqsave(&rx->to_host_lock, flags);
- list_add_tail(&r->to_host_list, &rx->to_host_list);
- queue_work(mux_rx_wq, &mux_dev->work_rx.work);
- spin_unlock_irqrestore(&rx->to_host_lock, flags);
- }
-}
-
-static int gdm_mux_recv(void *priv_dev, int (*cb)(void *data, int len,
- int tty_index, struct tty_dev *tty_dev, int complete))
-{
- struct mux_dev *mux_dev = priv_dev;
- struct usb_device *usbdev = mux_dev->usbdev;
- struct mux_rx *r;
- struct rx_cxt *rx = &mux_dev->rx;
- unsigned long flags;
- int ret;
-
- if (!usbdev) {
- pr_err("device is disconnected\n");
- return -ENODEV;
- }
-
- r = get_rx_struct(rx);
- if (!r) {
- pr_err("get_rx_struct fail\n");
- return -ENOMEM;
- }
-
- r->offset = 0;
- r->mux_dev = (void *)mux_dev;
- r->callback = cb;
- mux_dev->rx_cb = cb;
-
- usb_fill_bulk_urb(r->urb,
- usbdev,
- usb_rcvbulkpipe(usbdev, 0x86),
- r->buf,
- MUX_RX_MAX_SIZE,
- gdm_mux_rcv_complete,
- r);
-
- spin_lock_irqsave(&rx->submit_list_lock, flags);
- list_add_tail(&r->rx_submit_list, &rx->rx_submit_list);
- spin_unlock_irqrestore(&rx->submit_list_lock, flags);
-
- ret = usb_submit_urb(r->urb, GFP_KERNEL);
-
- if (ret) {
- spin_lock_irqsave(&rx->submit_list_lock, flags);
- list_del(&r->rx_submit_list);
- spin_unlock_irqrestore(&rx->submit_list_lock, flags);
-
- put_rx_struct(rx, r);
-
- pr_err("usb_submit_urb ret=%d\n", ret);
- }
-
- usb_mark_last_busy(usbdev);
-
- return ret;
-}
-
-static void gdm_mux_send_complete(struct urb *urb)
-{
- struct mux_tx *t = urb->context;
-
- if (urb->status == -ECONNRESET) {
- dev_info(&urb->dev->dev, "CONNRESET\n");
- free_mux_tx(t);
- return;
- }
-
- if (t->callback)
- t->callback(t->cb_data);
-
- free_mux_tx(t);
-}
-
-static int gdm_mux_send(void *priv_dev, void *data, int len, int tty_index,
- void (*cb)(void *data), void *cb_data)
-{
- struct mux_dev *mux_dev = priv_dev;
- struct usb_device *usbdev = mux_dev->usbdev;
- struct mux_pkt_header *mux_header;
- struct mux_tx *t = NULL;
- static u32 seq_num = 1;
- int total_len;
- int ret;
- unsigned long flags;
-
- if (mux_dev->usb_state == PM_SUSPEND) {
- ret = usb_autopm_get_interface(mux_dev->intf);
- if (!ret)
- usb_autopm_put_interface(mux_dev->intf);
- }
-
- spin_lock_irqsave(&mux_dev->write_lock, flags);
-
- total_len = ALIGN(MUX_HEADER_SIZE + len, 4);
-
- t = alloc_mux_tx(total_len);
- if (!t) {
- pr_err("alloc_mux_tx fail\n");
- spin_unlock_irqrestore(&mux_dev->write_lock, flags);
- return -ENOMEM;
- }
-
- mux_header = (struct mux_pkt_header *)t->buf;
- mux_header->start_flag = __cpu_to_le32(START_FLAG);
- mux_header->seq_num = __cpu_to_le32(seq_num++);
- mux_header->payload_size = __cpu_to_le32((u32)len);
- mux_header->packet_type = __cpu_to_le16(packet_type[tty_index]);
-
- memcpy(t->buf+MUX_HEADER_SIZE, data, len);
- memset(t->buf+MUX_HEADER_SIZE+len, 0, total_len - MUX_HEADER_SIZE -
- len);
-
- t->len = total_len;
- t->callback = cb;
- t->cb_data = cb_data;
-
- usb_fill_bulk_urb(t->urb,
- usbdev,
- usb_sndbulkpipe(usbdev, 5),
- t->buf,
- total_len,
- gdm_mux_send_complete,
- t);
-
- ret = usb_submit_urb(t->urb, GFP_ATOMIC);
-
- spin_unlock_irqrestore(&mux_dev->write_lock, flags);
-
- if (ret)
- pr_err("usb_submit_urb Error: %d\n", ret);
-
- usb_mark_last_busy(usbdev);
-
- return ret;
-}
-
-static int gdm_mux_send_control(void *priv_dev, int request, int value,
- void *buf, int len)
-{
- struct mux_dev *mux_dev = priv_dev;
- struct usb_device *usbdev = mux_dev->usbdev;
- int ret;
-
- ret = usb_control_msg(usbdev,
- usb_sndctrlpipe(usbdev, 0),
- request,
- USB_RT_ACM,
- value,
- 2,
- buf,
- len,
- 5000
- );
-
- if (ret < 0)
- pr_err("usb_control_msg error: %d\n", ret);
-
- return ret < 0 ? ret : 0;
-}
-
-static void release_usb(struct mux_dev *mux_dev)
-{
- struct rx_cxt *rx = &mux_dev->rx;
- struct mux_rx *r, *r_next;
- unsigned long flags;
-
- cancel_delayed_work(&mux_dev->work_rx);
-
- spin_lock_irqsave(&rx->submit_list_lock, flags);
- list_for_each_entry_safe(r, r_next, &rx->rx_submit_list,
- rx_submit_list) {
- spin_unlock_irqrestore(&rx->submit_list_lock, flags);
- usb_kill_urb(r->urb);
- spin_lock_irqsave(&rx->submit_list_lock, flags);
- }
- spin_unlock_irqrestore(&rx->submit_list_lock, flags);
-
- spin_lock_irqsave(&rx->free_list_lock, flags);
- list_for_each_entry_safe(r, r_next, &rx->rx_free_list, free_list) {
- list_del(&r->free_list);
- free_mux_rx(r);
- }
- spin_unlock_irqrestore(&rx->free_list_lock, flags);
-
- spin_lock_irqsave(&rx->to_host_lock, flags);
- list_for_each_entry_safe(r, r_next, &rx->to_host_list, to_host_list) {
- if (r->mux_dev == (void *)mux_dev) {
- list_del(&r->to_host_list);
- free_mux_rx(r);
- }
- }
- spin_unlock_irqrestore(&rx->to_host_lock, flags);
-}
-
-
-static int init_usb(struct mux_dev *mux_dev)
-{
- struct mux_rx *r;
- struct rx_cxt *rx = &mux_dev->rx;
- int ret = 0;
- int i;
-
- spin_lock_init(&mux_dev->write_lock);
- INIT_LIST_HEAD(&rx->to_host_list);
- INIT_LIST_HEAD(&rx->rx_submit_list);
- INIT_LIST_HEAD(&rx->rx_free_list);
- spin_lock_init(&rx->to_host_lock);
- spin_lock_init(&rx->submit_list_lock);
- spin_lock_init(&rx->free_list_lock);
-
- for (i = 0; i < MAX_ISSUE_NUM * 2; i++) {
- r = alloc_mux_rx();
- if (r == NULL) {
- ret = -ENOMEM;
- break;
- }
-
- list_add(&r->free_list, &rx->rx_free_list);
- }
-
- INIT_DELAYED_WORK(&mux_dev->work_rx, do_rx);
-
- return ret;
-}
-
-static int gdm_mux_probe(struct usb_interface *intf,
- const struct usb_device_id *id)
-{
- struct mux_dev *mux_dev;
- struct tty_dev *tty_dev;
- u16 idVendor, idProduct;
- int bInterfaceNumber;
- int ret;
- int i;
- struct usb_device *usbdev = interface_to_usbdev(intf);
-
- bInterfaceNumber = intf->cur_altsetting->desc.bInterfaceNumber;
-
- idVendor = __le16_to_cpu(usbdev->descriptor.idVendor);
- idProduct = __le16_to_cpu(usbdev->descriptor.idProduct);
-
- pr_info("mux vid = 0x%04x pid = 0x%04x\n", idVendor, idProduct);
-
- if (bInterfaceNumber != 2)
- return -ENODEV;
-
- mux_dev = kzalloc(sizeof(struct mux_dev), GFP_KERNEL);
- if (!mux_dev)
- return -ENOMEM;
-
- tty_dev = kzalloc(sizeof(struct tty_dev), GFP_KERNEL);
- if (!tty_dev) {
- ret = -ENOMEM;
- goto err_free_mux;
- }
-
- mux_dev->usbdev = usbdev;
- mux_dev->control_intf = intf;
-
- ret = init_usb(mux_dev);
- if (ret)
- goto err_free_usb;
-
- tty_dev->priv_dev = (void *)mux_dev;
- tty_dev->send_func = gdm_mux_send;
- tty_dev->recv_func = gdm_mux_recv;
- tty_dev->send_control = gdm_mux_send_control;
-
- ret = register_lte_tty_device(tty_dev, &intf->dev);
- if (ret)
- goto err_unregister_tty;
-
- for (i = 0; i < TTY_MAX_COUNT; i++)
- mux_dev->tty_dev = tty_dev;
-
- mux_dev->intf = intf;
- mux_dev->usb_state = PM_NORMAL;
-
- usb_get_dev(usbdev);
- usb_set_intfdata(intf, tty_dev);
-
- return 0;
-
-err_unregister_tty:
- unregister_lte_tty_device(tty_dev);
-err_free_usb:
- release_usb(mux_dev);
- kfree(tty_dev);
-err_free_mux:
- kfree(mux_dev);
-
- return ret;
-}
-
-static void gdm_mux_disconnect(struct usb_interface *intf)
-{
- struct tty_dev *tty_dev;
- struct mux_dev *mux_dev;
- struct usb_device *usbdev = interface_to_usbdev(intf);
-
- tty_dev = usb_get_intfdata(intf);
-
- mux_dev = tty_dev->priv_dev;
-
- release_usb(mux_dev);
- unregister_lte_tty_device(tty_dev);
-
- kfree(mux_dev);
- kfree(tty_dev);
-
- usb_put_dev(usbdev);
-}
-
-static int gdm_mux_suspend(struct usb_interface *intf, pm_message_t pm_msg)
-{
- struct tty_dev *tty_dev;
- struct mux_dev *mux_dev;
- struct rx_cxt *rx;
- struct mux_rx *r, *r_next;
- unsigned long flags;
-
- tty_dev = usb_get_intfdata(intf);
- mux_dev = tty_dev->priv_dev;
- rx = &mux_dev->rx;
-
- if (mux_dev->usb_state != PM_NORMAL) {
- dev_err(intf->usb_dev, "usb suspend - invalid state\n");
- return -1;
- }
-
- mux_dev->usb_state = PM_SUSPEND;
-
-
- spin_lock_irqsave(&rx->submit_list_lock, flags);
- list_for_each_entry_safe(r, r_next, &rx->rx_submit_list,
- rx_submit_list) {
- spin_unlock_irqrestore(&rx->submit_list_lock, flags);
- usb_kill_urb(r->urb);
- spin_lock_irqsave(&rx->submit_list_lock, flags);
- }
- spin_unlock_irqrestore(&rx->submit_list_lock, flags);
-
- return 0;
-}
-
-static int gdm_mux_resume(struct usb_interface *intf)
-{
- struct tty_dev *tty_dev;
- struct mux_dev *mux_dev;
- u8 i;
-
- tty_dev = usb_get_intfdata(intf);
- mux_dev = tty_dev->priv_dev;
-
- if (mux_dev->usb_state != PM_SUSPEND) {
- dev_err(intf->usb_dev, "usb resume - invalid state\n");
- return -1;
- }
-
- mux_dev->usb_state = PM_NORMAL;
-
- for (i = 0; i < MAX_ISSUE_NUM; i++)
- gdm_mux_recv(mux_dev, mux_dev->rx_cb);
-
- return 0;
-}
-
-static struct usb_driver gdm_mux_driver = {
- .name = "gdm_mux",
- .probe = gdm_mux_probe,
- .disconnect = gdm_mux_disconnect,
- .id_table = id_table,
- .supports_autosuspend = 1,
- .suspend = gdm_mux_suspend,
- .resume = gdm_mux_resume,
- .reset_resume = gdm_mux_resume,
-};
-
-static int __init gdm_usb_mux_init(void)
-{
-
- mux_rx_wq = create_workqueue("mux_rx_wq");
- if (mux_rx_wq == NULL) {
- pr_err("work queue create fail\n");
- return -1;
- }
-
- register_lte_tty_driver();
-
- return usb_register(&gdm_mux_driver);
-}
-
-static void __exit gdm_usb_mux_exit(void)
-{
- unregister_lte_tty_driver();
-
- if (mux_rx_wq) {
- flush_workqueue(mux_rx_wq);
- destroy_workqueue(mux_rx_wq);
- }
-
- usb_deregister(&gdm_mux_driver);
-}
-
-module_init(gdm_usb_mux_init);
-module_exit(gdm_usb_mux_exit);
-
-MODULE_DESCRIPTION("GCT LTE TTY Device Driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/gdm724x/gdm_mux.h b/drivers/staging/gdm724x/gdm_mux.h
deleted file mode 100644
index 3d50383c6ced..000000000000
--- a/drivers/staging/gdm724x/gdm_mux.h
+++ /dev/null
@@ -1,95 +0,0 @@
-/*
- * Copyright (c) 2012 GCT Semiconductor, Inc. All rights reserved.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#ifndef _GDM_MUX_H_
-#define _GDM_MUX_H_
-
-#include <linux/types.h>
-#include <linux/usb.h>
-#include <linux/list.h>
-
-#include "gdm_tty.h"
-
-#define PM_NORMAL 0
-#define PM_SUSPEND 1
-
-#define USB_RT_ACM (USB_TYPE_CLASS | USB_RECIP_INTERFACE)
-
-#define START_FLAG 0xA512485A
-#define MUX_HEADER_SIZE 14
-#define MUX_TX_MAX_SIZE (1024*10)
-#define MUX_RX_MAX_SIZE (1024*30)
-#define AT_PKT_TYPE 0xF011
-#define DM_PKT_TYPE 0xF010
-
-#define RETRY_TIMER 30 /* msec */
-
-struct mux_pkt_header {
- __le32 start_flag;
- __le32 seq_num;
- __le32 payload_size;
- __le16 packet_type;
- unsigned char data[0];
-};
-
-struct mux_tx {
- struct urb *urb;
- u8 *buf;
- int len;
- void (*callback)(void *cb_data);
- void *cb_data;
-};
-
-struct mux_rx {
- struct list_head free_list;
- struct list_head rx_submit_list;
- struct list_head to_host_list;
- struct urb *urb;
- u8 *buf;
- void *mux_dev;
- u32 offset;
- u32 len;
- int (*callback)(void *data,
- int len,
- int tty_index,
- struct tty_dev *tty_dev,
- int complete);
-};
-
-struct rx_cxt {
- struct list_head to_host_list;
- struct list_head rx_submit_list;
- struct list_head rx_free_list;
- spinlock_t to_host_lock;
- spinlock_t submit_list_lock;
- spinlock_t free_list_lock;
-};
-
-struct mux_dev {
- struct usb_device *usbdev;
- struct usb_interface *control_intf;
- struct usb_interface *data_intf;
- struct rx_cxt rx;
- struct delayed_work work_rx;
- struct usb_interface *intf;
- int usb_state;
- int (*rx_cb)(void *data,
- int len,
- int tty_index,
- struct tty_dev *tty_dev,
- int complete);
- spinlock_t write_lock;
- struct tty_dev *tty_dev;
-};
-
-#endif /* _GDM_MUX_H_ */
diff --git a/drivers/staging/gdm724x/gdm_tty.c b/drivers/staging/gdm724x/gdm_tty.c
deleted file mode 100644
index 001348ccacf9..000000000000
--- a/drivers/staging/gdm724x/gdm_tty.c
+++ /dev/null
@@ -1,351 +0,0 @@
-/*
- * Copyright (c) 2012 GCT Semiconductor, Inc. All rights reserved.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/tty.h>
-#include <linux/tty_driver.h>
-#include <linux/tty_flip.h>
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/usb/cdc.h>
-#include <linux/serial.h>
-#include "gdm_tty.h"
-
-#define GDM_TTY_MAJOR 0
-#define GDM_TTY_MINOR 32
-
-#define ACM_CTRL_DTR 0x01
-#define ACM_CTRL_RTS 0x02
-#define ACM_CTRL_DSR 0x02
-#define ACM_CTRL_RI 0x08
-#define ACM_CTRL_DCD 0x01
-
-#define WRITE_SIZE 2048
-
-#define MUX_TX_MAX_SIZE 2048
-
-#define gdm_tty_send(n, d, l, i, c, b) (\
- n->tty_dev->send_func(n->tty_dev->priv_dev, d, l, i, c, b))
-#define gdm_tty_recv(n, c) (\
- n->tty_dev->recv_func(n->tty_dev->priv_dev, c))
-#define gdm_tty_send_control(n, r, v, d, l) (\
- n->tty_dev->send_control(n->tty_dev->priv_dev, r, v, d, l))
-
-#define GDM_TTY_READY(gdm) (gdm && gdm->tty_dev && gdm->port.count)
-
-static struct tty_driver *gdm_driver[TTY_MAX_COUNT];
-static struct gdm *gdm_table[TTY_MAX_COUNT][GDM_TTY_MINOR];
-static DEFINE_MUTEX(gdm_table_lock);
-
-static char *DRIVER_STRING[TTY_MAX_COUNT] = {"GCTATC", "GCTDM"};
-static char *DEVICE_STRING[TTY_MAX_COUNT] = {"GCT-ATC", "GCT-DM"};
-
-static void gdm_port_destruct(struct tty_port *port)
-{
- struct gdm *gdm = container_of(port, struct gdm, port);
-
- mutex_lock(&gdm_table_lock);
- gdm_table[gdm->index][gdm->minor] = NULL;
- mutex_unlock(&gdm_table_lock);
-
- kfree(gdm);
-}
-
-static struct tty_port_operations gdm_port_ops = {
- .destruct = gdm_port_destruct,
-};
-
-static int gdm_tty_install(struct tty_driver *driver, struct tty_struct *tty)
-{
- struct gdm *gdm = NULL;
- int ret;
- int i;
- int j;
-
- j = GDM_TTY_MINOR;
- for (i = 0; i < TTY_MAX_COUNT; i++) {
- if (!strcmp(tty->driver->driver_name, DRIVER_STRING[i])) {
- j = tty->index;
- break;
- }
- }
-
- if (j == GDM_TTY_MINOR)
- return -ENODEV;
-
- mutex_lock(&gdm_table_lock);
- gdm = gdm_table[i][j];
- if (gdm == NULL) {
- mutex_unlock(&gdm_table_lock);
- return -ENODEV;
- }
-
- tty_port_get(&gdm->port);
-
- ret = tty_standard_install(driver, tty);
- if (ret) {
- tty_port_put(&gdm->port);
- mutex_unlock(&gdm_table_lock);
- return ret;
- }
-
- tty->driver_data = gdm;
- mutex_unlock(&gdm_table_lock);
-
- return 0;
-}
-
-static int gdm_tty_open(struct tty_struct *tty, struct file *filp)
-{
- struct gdm *gdm = tty->driver_data;
-
- return tty_port_open(&gdm->port, tty, filp);
-}
-
-static void gdm_tty_cleanup(struct tty_struct *tty)
-{
- struct gdm *gdm = tty->driver_data;
-
- tty_port_put(&gdm->port);
-}
-
-static void gdm_tty_hangup(struct tty_struct *tty)
-{
- struct gdm *gdm = tty->driver_data;
-
- tty_port_hangup(&gdm->port);
-}
-
-static void gdm_tty_close(struct tty_struct *tty, struct file *filp)
-{
- struct gdm *gdm = tty->driver_data;
-
- tty_port_close(&gdm->port, tty, filp);
-}
-
-static int gdm_tty_recv_complete(void *data,
- int len,
- int index,
- struct tty_dev *tty_dev,
- int complete)
-{
- struct gdm *gdm = tty_dev->gdm[index];
-
- if (!GDM_TTY_READY(gdm)) {
- if (complete == RECV_PACKET_PROCESS_COMPLETE)
- gdm_tty_recv(gdm, gdm_tty_recv_complete);
- return TO_HOST_PORT_CLOSE;
- }
-
- if (data && len) {
- if (tty_buffer_request_room(&gdm->port, len) == len) {
- tty_insert_flip_string(&gdm->port, data, len);
- tty_flip_buffer_push(&gdm->port);
- } else {
- return TO_HOST_BUFFER_REQUEST_FAIL;
- }
- }
-
- if (complete == RECV_PACKET_PROCESS_COMPLETE)
- gdm_tty_recv(gdm, gdm_tty_recv_complete);
-
- return 0;
-}
-
-static void gdm_tty_send_complete(void *arg)
-{
- struct gdm *gdm = (struct gdm *)arg;
-
- if (!GDM_TTY_READY(gdm))
- return;
-
- tty_port_tty_wakeup(&gdm->port);
-}
-
-static int gdm_tty_write(struct tty_struct *tty, const unsigned char *buf,
- int len)
-{
- struct gdm *gdm = tty->driver_data;
- int remain = len;
- int sent_len = 0;
- int sending_len = 0;
-
- if (!GDM_TTY_READY(gdm))
- return -ENODEV;
-
- if (!len)
- return 0;
-
- while (1) {
- sending_len = remain > MUX_TX_MAX_SIZE ? MUX_TX_MAX_SIZE :
- remain;
- gdm_tty_send(gdm,
- (void *)(buf+sent_len),
- sending_len,
- gdm->index,
- gdm_tty_send_complete,
- gdm
- );
- sent_len += sending_len;
- remain -= sending_len;
- if (remain <= 0)
- break;
- }
-
- return len;
-}
-
-static int gdm_tty_write_room(struct tty_struct *tty)
-{
- struct gdm *gdm = tty->driver_data;
-
- if (!GDM_TTY_READY(gdm))
- return -ENODEV;
-
- return WRITE_SIZE;
-}
-
-int register_lte_tty_device(struct tty_dev *tty_dev, struct device *device)
-{
- struct gdm *gdm;
- int i;
- int j;
-
- for (i = 0; i < TTY_MAX_COUNT; i++) {
-
- gdm = kmalloc(sizeof(struct gdm), GFP_KERNEL);
- if (!gdm)
- return -ENOMEM;
-
- mutex_lock(&gdm_table_lock);
- for (j = 0; j < GDM_TTY_MINOR; j++) {
- if (!gdm_table[i][j])
- break;
- }
-
- if (j == GDM_TTY_MINOR) {
- kfree(gdm);
- mutex_unlock(&gdm_table_lock);
- return -EINVAL;
- }
-
- gdm_table[i][j] = gdm;
- mutex_unlock(&gdm_table_lock);
-
- tty_dev->gdm[i] = gdm;
- tty_port_init(&gdm->port);
-
- gdm->port.ops = &gdm_port_ops;
- gdm->index = i;
- gdm->minor = j;
- gdm->tty_dev = tty_dev;
-
- tty_port_register_device(&gdm->port, gdm_driver[i],
- gdm->minor, device);
- }
-
- for (i = 0; i < MAX_ISSUE_NUM; i++)
- gdm_tty_recv(gdm, gdm_tty_recv_complete);
-
- return 0;
-}
-
-void unregister_lte_tty_device(struct tty_dev *tty_dev)
-{
- struct gdm *gdm;
- struct tty_struct *tty;
- int i;
-
- for (i = 0; i < TTY_MAX_COUNT; i++) {
- gdm = tty_dev->gdm[i];
- if (!gdm)
- continue;
-
- mutex_lock(&gdm_table_lock);
- gdm_table[gdm->index][gdm->minor] = NULL;
- mutex_unlock(&gdm_table_lock);
-
- tty = tty_port_tty_get(&gdm->port);
- if (tty) {
- tty_vhangup(tty);
- tty_kref_put(tty);
- }
-
- tty_unregister_device(gdm_driver[i], gdm->minor);
- tty_port_put(&gdm->port);
- }
-}
-
-static const struct tty_operations gdm_tty_ops = {
- .install = gdm_tty_install,
- .open = gdm_tty_open,
- .close = gdm_tty_close,
- .cleanup = gdm_tty_cleanup,
- .hangup = gdm_tty_hangup,
- .write = gdm_tty_write,
- .write_room = gdm_tty_write_room,
-};
-
-int register_lte_tty_driver(void)
-{
- struct tty_driver *tty_driver;
- int i;
- int ret;
-
- for (i = 0; i < TTY_MAX_COUNT; i++) {
- tty_driver = alloc_tty_driver(GDM_TTY_MINOR);
- if (!tty_driver)
- return -ENOMEM;
-
- tty_driver->owner = THIS_MODULE;
- tty_driver->driver_name = DRIVER_STRING[i];
- tty_driver->name = DEVICE_STRING[i];
- tty_driver->major = GDM_TTY_MAJOR;
- tty_driver->type = TTY_DRIVER_TYPE_SERIAL;
- tty_driver->subtype = SERIAL_TYPE_NORMAL;
- tty_driver->flags = TTY_DRIVER_REAL_RAW |
- TTY_DRIVER_DYNAMIC_DEV;
- tty_driver->init_termios = tty_std_termios;
- tty_driver->init_termios.c_cflag = B9600 | CS8 | HUPCL | CLOCAL;
- tty_driver->init_termios.c_lflag = ISIG | ICANON | IEXTEN;
- tty_set_operations(tty_driver, &gdm_tty_ops);
-
- ret = tty_register_driver(tty_driver);
- if (ret) {
- put_tty_driver(tty_driver);
- return ret;
- }
-
- gdm_driver[i] = tty_driver;
- }
-
- return ret;
-}
-
-void unregister_lte_tty_driver(void)
-{
- struct tty_driver *tty_driver;
- int i;
-
- for (i = 0; i < TTY_MAX_COUNT; i++) {
- tty_driver = gdm_driver[i];
- if (tty_driver) {
- tty_unregister_driver(tty_driver);
- put_tty_driver(tty_driver);
- }
- }
-}
-
diff --git a/drivers/staging/gdm724x/gdm_tty.h b/drivers/staging/gdm724x/gdm_tty.h
deleted file mode 100644
index 297438b4ddcb..000000000000
--- a/drivers/staging/gdm724x/gdm_tty.h
+++ /dev/null
@@ -1,71 +0,0 @@
-/*
- * Copyright (c) 2012 GCT Semiconductor, Inc. All rights reserved.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#ifndef _GDM_TTY_H_
-#define _GDM_TTY_H_
-
-#include <linux/types.h>
-#include <linux/tty.h>
-
-
-#define TTY_MAX_COUNT 2
-
-#define MAX_ISSUE_NUM 3
-
-enum TO_HOST_RESULT {
- TO_HOST_BUFFER_REQUEST_FAIL = 1,
- TO_HOST_PORT_CLOSE = 2,
- TO_HOST_INVALID_PACKET = 3,
-};
-
-enum RECV_PACKET_PROCESS {
- RECV_PACKET_PROCESS_COMPLETE = 0,
- RECV_PACKET_PROCESS_CONTINUE = 1,
-};
-
-struct gdm {
- struct tty_dev *tty_dev;
- struct tty_port port;
- unsigned int index;
- unsigned int minor;
-};
-
-struct tty_dev {
- void *priv_dev;
- int (*send_func)(void *priv_dev,
- void *data,
- int len,
- int tty_index,
- void (*cb)(void *cb_data),
- void *cb_data);
- int (*recv_func)(void *priv_dev,
- int (*cb)(void *data,
- int len,
- int tty_index,
- struct tty_dev *tty_dev,
- int complete));
- int (*send_control)(void *priv_dev,
- int request,
- int value,
- void *data,
- int len);
- struct gdm *gdm[2];
-};
-
-int register_lte_tty_driver(void);
-void unregister_lte_tty_driver(void);
-int register_lte_tty_device(struct tty_dev *tty_dev, struct device *dev);
-void unregister_lte_tty_device(struct tty_dev *tty_dev);
-
-#endif /* _GDM_USB_H_ */
-
diff --git a/drivers/staging/gdm724x/gdm_usb.c b/drivers/staging/gdm724x/gdm_usb.c
deleted file mode 100644
index ed1a12f504e2..000000000000
--- a/drivers/staging/gdm724x/gdm_usb.c
+++ /dev/null
@@ -1,1041 +0,0 @@
-/*
- * Copyright (c) 2012 GCT Semiconductor, Inc. All rights reserved.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/usb.h>
-#include <linux/sched.h>
-#include <linux/kthread.h>
-#include <linux/usb/cdc.h>
-#include <linux/wait.h>
-#include <linux/if_ether.h>
-#include <linux/pm_runtime.h>
-
-#include "gdm_usb.h"
-#include "gdm_lte.h"
-#include "hci.h"
-#include "hci_packet.h"
-#include "gdm_endian.h"
-
-#define USB_DEVICE_CDC_DATA(vid, pid) \
- .match_flags = USB_DEVICE_ID_MATCH_DEVICE | \
- USB_DEVICE_ID_MATCH_INT_CLASS | \
- USB_DEVICE_ID_MATCH_INT_SUBCLASS,\
- .idVendor = vid,\
- .idProduct = pid,\
- .bInterfaceClass = USB_CLASS_COMM,\
- .bInterfaceSubClass = USB_CDC_SUBCLASS_ETHERNET
-
-#define USB_DEVICE_MASS_DATA(vid, pid) \
- .match_flags = USB_DEVICE_ID_MATCH_DEVICE | \
- USB_DEVICE_ID_MATCH_INT_INFO,\
- .idVendor = vid,\
- .idProduct = pid,\
- .bInterfaceSubClass = USB_SC_SCSI, \
- .bInterfaceClass = USB_CLASS_MASS_STORAGE,\
- .bInterfaceProtocol = USB_PR_BULK
-
-static const struct usb_device_id id_table[] = {
- { USB_DEVICE_CDC_DATA(VID_GCT, PID_GDM7240) }, /* GCT GDM7240 */
- { USB_DEVICE_CDC_DATA(VID_GCT, PID_GDM7243) }, /* GCT GDM7243 */
- { }
-};
-
-MODULE_DEVICE_TABLE(usb, id_table);
-
-static struct workqueue_struct *usb_tx_wq;
-static struct workqueue_struct *usb_rx_wq;
-
-static void do_tx(struct work_struct *work);
-static void do_rx(struct work_struct *work);
-
-static int gdm_usb_recv(void *priv_dev,
- int (*cb)(void *cb_data,
- void *data, int len, int context),
- void *cb_data,
- int context);
-
-static int request_mac_address(struct lte_udev *udev)
-{
- u8 buf[16] = {0,};
- struct hci_packet *hci = (struct hci_packet *)buf;
- struct usb_device *usbdev = udev->usbdev;
- int actual;
- int ret = -1;
-
- hci->cmd_evt = gdm_cpu_to_dev16(&udev->gdm_ed, LTE_GET_INFORMATION);
- hci->len = gdm_cpu_to_dev16(&udev->gdm_ed, 1);
- hci->data[0] = MAC_ADDRESS;
-
- ret = usb_bulk_msg(usbdev, usb_sndbulkpipe(usbdev, 2), buf, 5,
- &actual, 1000);
-
- udev->request_mac_addr = 1;
-
- return ret;
-}
-
-static struct usb_tx *alloc_tx_struct(int len)
-{
- struct usb_tx *t = NULL;
- int ret = 0;
-
- t = kzalloc(sizeof(struct usb_tx), GFP_ATOMIC);
- if (!t) {
- ret = -ENOMEM;
- goto out;
- }
-
- t->urb = usb_alloc_urb(0, GFP_ATOMIC);
- if (!(len % 512))
- len++;
-
- t->buf = kmalloc(len, GFP_ATOMIC);
- if (!t->urb || !t->buf) {
- ret = -ENOMEM;
- goto out;
- }
-
-out:
- if (ret < 0) {
- if (t) {
- usb_free_urb(t->urb);
- kfree(t->buf);
- kfree(t);
- }
- return NULL;
- }
-
- return t;
-}
-
-static struct usb_tx_sdu *alloc_tx_sdu_struct(void)
-{
- struct usb_tx_sdu *t_sdu;
-
- t_sdu = kzalloc(sizeof(struct usb_tx_sdu), GFP_KERNEL);
- if (!t_sdu)
- return NULL;
-
- t_sdu->buf = kmalloc(SDU_BUF_SIZE, GFP_KERNEL);
- if (!t_sdu->buf) {
- kfree(t_sdu);
- return NULL;
- }
-
- return t_sdu;
-}
-
-static void free_tx_struct(struct usb_tx *t)
-{
- if (t) {
- usb_free_urb(t->urb);
- kfree(t->buf);
- kfree(t);
- }
-}
-
-static void free_tx_sdu_struct(struct usb_tx_sdu *t_sdu)
-{
- if (t_sdu) {
- kfree(t_sdu->buf);
- kfree(t_sdu);
- }
-}
-
-static struct usb_tx_sdu *get_tx_sdu_struct(struct tx_cxt *tx, int *no_spc)
-{
- struct usb_tx_sdu *t_sdu;
-
- if (list_empty(&tx->free_list))
- return NULL;
-
- t_sdu = list_entry(tx->free_list.next, struct usb_tx_sdu, list);
- list_del(&t_sdu->list);
-
- tx->avail_count--;
-
- *no_spc = list_empty(&tx->free_list) ? 1 : 0;
-
- return t_sdu;
-}
-
-static void put_tx_struct(struct tx_cxt *tx, struct usb_tx_sdu *t_sdu)
-{
- list_add_tail(&t_sdu->list, &tx->free_list);
- tx->avail_count++;
-}
-
-static struct usb_rx *alloc_rx_struct(void)
-{
- struct usb_rx *r = NULL;
- int ret = 0;
-
- r = kmalloc(sizeof(struct usb_rx), GFP_KERNEL);
- if (!r) {
- ret = -ENOMEM;
- goto out;
- }
-
- r->urb = usb_alloc_urb(0, GFP_KERNEL);
- r->buf = kmalloc(RX_BUF_SIZE, GFP_KERNEL);
- if (!r->urb || !r->buf) {
- ret = -ENOMEM;
- goto out;
- }
-out:
-
- if (ret < 0) {
- if (r) {
- usb_free_urb(r->urb);
- kfree(r->buf);
- kfree(r);
- }
- return NULL;
- }
-
- return r;
-}
-
-static void free_rx_struct(struct usb_rx *r)
-{
- if (r) {
- usb_free_urb(r->urb);
- kfree(r->buf);
- kfree(r);
- }
-}
-
-static struct usb_rx *get_rx_struct(struct rx_cxt *rx, int *no_spc)
-{
- struct usb_rx *r;
- unsigned long flags;
-
- spin_lock_irqsave(&rx->rx_lock, flags);
-
- if (list_empty(&rx->free_list)) {
- spin_unlock_irqrestore(&rx->rx_lock, flags);
- return NULL;
- }
-
- r = list_entry(rx->free_list.next, struct usb_rx, free_list);
- list_del(&r->free_list);
-
- rx->avail_count--;
-
- *no_spc = list_empty(&rx->free_list) ? 1 : 0;
-
- spin_unlock_irqrestore(&rx->rx_lock, flags);
-
- return r;
-}
-
-static void put_rx_struct(struct rx_cxt *rx, struct usb_rx *r)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&rx->rx_lock, flags);
-
- list_add_tail(&r->free_list, &rx->free_list);
- rx->avail_count++;
-
- spin_unlock_irqrestore(&rx->rx_lock, flags);
-}
-
-static void release_usb(struct lte_udev *udev)
-{
- struct rx_cxt *rx = &udev->rx;
- struct tx_cxt *tx = &udev->tx;
- struct usb_tx *t, *t_next;
- struct usb_rx *r, *r_next;
- struct usb_tx_sdu *t_sdu, *t_sdu_next;
- unsigned long flags;
-
- spin_lock_irqsave(&tx->lock, flags);
- list_for_each_entry_safe(t_sdu, t_sdu_next, &tx->sdu_list, list) {
- list_del(&t_sdu->list);
- free_tx_sdu_struct(t_sdu);
- }
-
- list_for_each_entry_safe(t, t_next, &tx->hci_list, list) {
- list_del(&t->list);
- free_tx_struct(t);
- }
-
- list_for_each_entry_safe(t_sdu, t_sdu_next, &tx->free_list, list) {
- list_del(&t_sdu->list);
- free_tx_sdu_struct(t_sdu);
- }
- spin_unlock_irqrestore(&tx->lock, flags);
-
- spin_lock_irqsave(&rx->submit_lock, flags);
- list_for_each_entry_safe(r, r_next, &rx->rx_submit_list,
- rx_submit_list) {
- spin_unlock_irqrestore(&rx->submit_lock, flags);
- usb_kill_urb(r->urb);
- spin_lock_irqsave(&rx->submit_lock, flags);
- }
- spin_unlock_irqrestore(&rx->submit_lock, flags);
-
- spin_lock_irqsave(&rx->rx_lock, flags);
- list_for_each_entry_safe(r, r_next, &rx->free_list, free_list) {
- list_del(&r->free_list);
- free_rx_struct(r);
- }
- spin_unlock_irqrestore(&rx->rx_lock, flags);
-
- spin_lock_irqsave(&rx->to_host_lock, flags);
- list_for_each_entry_safe(r, r_next, &rx->to_host_list, to_host_list) {
- if (r->index == (void *)udev) {
- list_del(&r->to_host_list);
- free_rx_struct(r);
- }
- }
- spin_unlock_irqrestore(&rx->to_host_lock, flags);
-}
-
-static int init_usb(struct lte_udev *udev)
-{
- int ret = 0;
- int i;
- struct tx_cxt *tx = &udev->tx;
- struct rx_cxt *rx = &udev->rx;
- struct usb_tx_sdu *t_sdu = NULL;
- struct usb_rx *r = NULL;
-
- udev->send_complete = 1;
- udev->tx_stop = 0;
- udev->request_mac_addr = 0;
- udev->usb_state = PM_NORMAL;
-
- INIT_LIST_HEAD(&tx->sdu_list);
- INIT_LIST_HEAD(&tx->hci_list);
- INIT_LIST_HEAD(&tx->free_list);
- INIT_LIST_HEAD(&rx->rx_submit_list);
- INIT_LIST_HEAD(&rx->free_list);
- INIT_LIST_HEAD(&rx->to_host_list);
- spin_lock_init(&tx->lock);
- spin_lock_init(&rx->rx_lock);
- spin_lock_init(&rx->submit_lock);
- spin_lock_init(&rx->to_host_lock);
-
- tx->avail_count = 0;
- rx->avail_count = 0;
-
- udev->rx_cb = NULL;
-
- for (i = 0; i < MAX_NUM_SDU_BUF; i++) {
- t_sdu = alloc_tx_sdu_struct();
- if (t_sdu == NULL) {
- ret = -ENOMEM;
- goto fail;
- }
-
- list_add(&t_sdu->list, &tx->free_list);
- tx->avail_count++;
- }
-
- for (i = 0; i < MAX_RX_SUBMIT_COUNT*2; i++) {
- r = alloc_rx_struct();
- if (r == NULL) {
- ret = -ENOMEM;
- goto fail;
- }
-
- list_add(&r->free_list, &rx->free_list);
- rx->avail_count++;
- }
- INIT_DELAYED_WORK(&udev->work_tx, do_tx);
- INIT_DELAYED_WORK(&udev->work_rx, do_rx);
- return 0;
-fail:
- release_usb(udev);
- return ret;
-}
-
-static int set_mac_address(u8 *data, void *arg)
-{
- struct phy_dev *phy_dev = (struct phy_dev *)arg;
- struct lte_udev *udev = phy_dev->priv_dev;
- struct tlv *tlv = (struct tlv *)data;
- u8 mac_address[ETH_ALEN] = {0, };
-
- if (tlv->type == MAC_ADDRESS && udev->request_mac_addr) {
- memcpy(mac_address, tlv->data, tlv->len);
-
- if (register_lte_device(phy_dev,
- &udev->intf->dev, mac_address) < 0)
- pr_err("register lte device failed\n");
-
- udev->request_mac_addr = 0;
-
- return 1;
- }
-
- return 0;
-}
-
-static void do_rx(struct work_struct *work)
-{
- struct lte_udev *udev =
- container_of(work, struct lte_udev, work_rx.work);
- struct rx_cxt *rx = &udev->rx;
- struct usb_rx *r;
- struct hci_packet *hci;
- struct phy_dev *phy_dev;
- u16 cmd_evt;
- int ret;
- unsigned long flags;
-
- while (1) {
- spin_lock_irqsave(&rx->to_host_lock, flags);
- if (list_empty(&rx->to_host_list)) {
- spin_unlock_irqrestore(&rx->to_host_lock, flags);
- break;
- }
- r = list_entry(rx->to_host_list.next,
- struct usb_rx, to_host_list);
- list_del(&r->to_host_list);
- spin_unlock_irqrestore(&rx->to_host_lock, flags);
-
- phy_dev = (struct phy_dev *)r->cb_data;
- udev = (struct lte_udev *)phy_dev->priv_dev;
- hci = (struct hci_packet *)r->buf;
- cmd_evt = gdm_dev16_to_cpu(&udev->gdm_ed, hci->cmd_evt);
-
- switch (cmd_evt) {
- case LTE_GET_INFORMATION_RESULT:
- if (set_mac_address(hci->data, r->cb_data) == 0) {
- ret = r->callback(r->cb_data,
- r->buf,
- r->urb->actual_length,
- KERNEL_THREAD);
- }
- break;
-
- default:
- if (r->callback) {
- ret = r->callback(r->cb_data,
- r->buf,
- r->urb->actual_length,
- KERNEL_THREAD);
-
- if (ret == -EAGAIN)
- pr_err("failed to send received data\n");
- }
- break;
- }
-
- put_rx_struct(rx, r);
-
- gdm_usb_recv(udev,
- r->callback,
- r->cb_data,
- USB_COMPLETE);
- }
-}
-
-static void remove_rx_submit_list(struct usb_rx *r, struct rx_cxt *rx)
-{
- unsigned long flags;
- struct usb_rx *r_remove, *r_remove_next;
-
- spin_lock_irqsave(&rx->submit_lock, flags);
- list_for_each_entry_safe(r_remove, r_remove_next,
- &rx->rx_submit_list, rx_submit_list) {
- if (r == r_remove) {
- list_del(&r->rx_submit_list);
- break;
- }
- }
- spin_unlock_irqrestore(&rx->submit_lock, flags);
-}
-
-static void gdm_usb_rcv_complete(struct urb *urb)
-{
- struct usb_rx *r = urb->context;
- struct rx_cxt *rx = r->rx;
- unsigned long flags;
- struct lte_udev *udev = container_of(r->rx, struct lte_udev, rx);
- struct usb_device *usbdev = udev->usbdev;
-
- remove_rx_submit_list(r, rx);
-
- if (!urb->status && r->callback) {
- spin_lock_irqsave(&rx->to_host_lock, flags);
- list_add_tail(&r->to_host_list, &rx->to_host_list);
- queue_work(usb_rx_wq, &udev->work_rx.work);
- spin_unlock_irqrestore(&rx->to_host_lock, flags);
- } else {
- if (urb->status && udev->usb_state == PM_NORMAL)
- dev_err(&urb->dev->dev, "%s: urb status error %d\n",
- __func__, urb->status);
-
- put_rx_struct(rx, r);
- }
-
- usb_mark_last_busy(usbdev);
-}
-
-static int gdm_usb_recv(void *priv_dev,
- int (*cb)(void *cb_data,
- void *data, int len, int context),
- void *cb_data,
- int context)
-{
- struct lte_udev *udev = priv_dev;
- struct usb_device *usbdev = udev->usbdev;
- struct rx_cxt *rx = &udev->rx;
- struct usb_rx *r;
- int no_spc;
- int ret;
- unsigned long flags;
-
- if (!udev->usbdev) {
- pr_err("invalid device\n");
- return -ENODEV;
- }
-
- r = get_rx_struct(rx, &no_spc);
- if (!r) {
- pr_err("Out of Memory\n");
- return -ENOMEM;
- }
-
- udev->rx_cb = cb;
- r->callback = cb;
- r->cb_data = cb_data;
- r->index = (void *)udev;
- r->rx = rx;
-
- usb_fill_bulk_urb(r->urb,
- usbdev,
- usb_rcvbulkpipe(usbdev, 0x83),
- r->buf,
- RX_BUF_SIZE,
- gdm_usb_rcv_complete,
- r);
-
- spin_lock_irqsave(&rx->submit_lock, flags);
- list_add_tail(&r->rx_submit_list, &rx->rx_submit_list);
- spin_unlock_irqrestore(&rx->submit_lock, flags);
-
- if (context == KERNEL_THREAD)
- ret = usb_submit_urb(r->urb, GFP_KERNEL);
- else
- ret = usb_submit_urb(r->urb, GFP_ATOMIC);
-
- if (ret) {
- spin_lock_irqsave(&rx->submit_lock, flags);
- list_del(&r->rx_submit_list);
- spin_unlock_irqrestore(&rx->submit_lock, flags);
-
- pr_err("usb_submit_urb failed (%p)\n", r);
- put_rx_struct(rx, r);
- }
-
- return ret;
-}
-
-static void gdm_usb_send_complete(struct urb *urb)
-{
- struct usb_tx *t = urb->context;
- struct tx_cxt *tx = t->tx;
- struct lte_udev *udev = container_of(tx, struct lte_udev, tx);
- unsigned long flags;
-
- if (urb->status == -ECONNRESET) {
- dev_info(&urb->dev->dev, "CONNRESET\n");
- return;
- }
-
- if (t->callback)
- t->callback(t->cb_data);
-
- free_tx_struct(t);
-
- spin_lock_irqsave(&tx->lock, flags);
- udev->send_complete = 1;
- queue_work(usb_tx_wq, &udev->work_tx.work);
- spin_unlock_irqrestore(&tx->lock, flags);
-}
-
-static int send_tx_packet(struct usb_device *usbdev, struct usb_tx *t, u32 len)
-{
- int ret = 0;
-
- if (!(len%512))
- len++;
-
- usb_fill_bulk_urb(t->urb,
- usbdev,
- usb_sndbulkpipe(usbdev, 2),
- t->buf,
- len,
- gdm_usb_send_complete,
- t);
-
- ret = usb_submit_urb(t->urb, GFP_ATOMIC);
-
- if (ret)
- dev_err(&usbdev->dev, "usb_submit_urb failed: %d\n",
- ret);
-
- usb_mark_last_busy(usbdev);
-
- return ret;
-}
-
-static u32 packet_aggregation(struct lte_udev *udev, u8 *send_buf)
-{
- struct tx_cxt *tx = &udev->tx;
- struct usb_tx_sdu *t_sdu = NULL;
- struct multi_sdu *multi_sdu = (struct multi_sdu *)send_buf;
- u16 send_len = 0;
- u16 num_packet = 0;
- unsigned long flags;
-
- multi_sdu->cmd_evt = gdm_cpu_to_dev16(&udev->gdm_ed, LTE_TX_MULTI_SDU);
-
- while (num_packet < MAX_PACKET_IN_MULTI_SDU) {
- spin_lock_irqsave(&tx->lock, flags);
- if (list_empty(&tx->sdu_list)) {
- spin_unlock_irqrestore(&tx->lock, flags);
- break;
- }
-
- t_sdu = list_entry(tx->sdu_list.next, struct usb_tx_sdu, list);
- if (send_len + t_sdu->len > MAX_SDU_SIZE) {
- spin_unlock_irqrestore(&tx->lock, flags);
- break;
- }
-
- list_del(&t_sdu->list);
- spin_unlock_irqrestore(&tx->lock, flags);
-
- memcpy(multi_sdu->data + send_len, t_sdu->buf, t_sdu->len);
-
- send_len += (t_sdu->len + 3) & 0xfffc;
- num_packet++;
-
- if (tx->avail_count > 10)
- t_sdu->callback(t_sdu->cb_data);
-
- spin_lock_irqsave(&tx->lock, flags);
- put_tx_struct(tx, t_sdu);
- spin_unlock_irqrestore(&tx->lock, flags);
- }
-
- multi_sdu->len = gdm_cpu_to_dev16(&udev->gdm_ed, send_len);
- multi_sdu->num_packet = gdm_cpu_to_dev16(&udev->gdm_ed, num_packet);
-
- return send_len + offsetof(struct multi_sdu, data);
-}
-
-static void do_tx(struct work_struct *work)
-{
- struct lte_udev *udev =
- container_of(work, struct lte_udev, work_tx.work);
- struct usb_device *usbdev = udev->usbdev;
- struct tx_cxt *tx = &udev->tx;
- struct usb_tx *t = NULL;
- int is_send = 0;
- u32 len = 0;
- unsigned long flags;
-
- if (!usb_autopm_get_interface(udev->intf))
- usb_autopm_put_interface(udev->intf);
-
- if (udev->usb_state == PM_SUSPEND)
- return;
-
- spin_lock_irqsave(&tx->lock, flags);
- if (!udev->send_complete) {
- spin_unlock_irqrestore(&tx->lock, flags);
- return;
- }
- udev->send_complete = 0;
-
- if (!list_empty(&tx->hci_list)) {
- t = list_entry(tx->hci_list.next, struct usb_tx, list);
- list_del(&t->list);
- len = t->len;
- t->is_sdu = 0;
- is_send = 1;
- } else if (!list_empty(&tx->sdu_list)) {
- if (udev->tx_stop) {
- udev->send_complete = 1;
- spin_unlock_irqrestore(&tx->lock, flags);
- return;
- }
-
- t = alloc_tx_struct(TX_BUF_SIZE);
- if (t == NULL) {
- spin_unlock_irqrestore(&tx->lock, flags);
- return;
- }
- t->callback = NULL;
- t->tx = tx;
- t->is_sdu = 1;
- is_send = 1;
- }
-
- if (!is_send) {
- udev->send_complete = 1;
- spin_unlock_irqrestore(&tx->lock, flags);
- return;
- }
- spin_unlock_irqrestore(&tx->lock, flags);
-
- if (t->is_sdu)
- len = packet_aggregation(udev, t->buf);
-
- if (send_tx_packet(usbdev, t, len)) {
- pr_err("send_tx_packet failed\n");
- t->callback = NULL;
- gdm_usb_send_complete(t->urb);
- }
-}
-
-#define SDU_PARAM_LEN 12
-static int gdm_usb_sdu_send(void *priv_dev, void *data, int len,
- unsigned int dftEpsId, unsigned int epsId,
- void (*cb)(void *data), void *cb_data,
- int dev_idx, int nic_type)
-{
- struct lte_udev *udev = priv_dev;
- struct tx_cxt *tx = &udev->tx;
- struct usb_tx_sdu *t_sdu;
- struct sdu *sdu = NULL;
- unsigned long flags;
- int no_spc = 0;
- u16 send_len;
-
- if (!udev->usbdev) {
- pr_err("sdu send - invalid device\n");
- return TX_NO_DEV;
- }
-
- spin_lock_irqsave(&tx->lock, flags);
- t_sdu = get_tx_sdu_struct(tx, &no_spc);
- spin_unlock_irqrestore(&tx->lock, flags);
-
- if (t_sdu == NULL) {
- pr_err("sdu send - free list empty\n");
- return TX_NO_SPC;
- }
-
- sdu = (struct sdu *)t_sdu->buf;
- sdu->cmd_evt = gdm_cpu_to_dev16(&udev->gdm_ed, LTE_TX_SDU);
- if (nic_type == NIC_TYPE_ARP) {
- send_len = len + SDU_PARAM_LEN;
- memcpy(sdu->data, data, len);
- } else {
- send_len = len - ETH_HLEN;
- send_len += SDU_PARAM_LEN;
- memcpy(sdu->data, data+ETH_HLEN, len-ETH_HLEN);
- }
-
- sdu->len = gdm_cpu_to_dev16(&udev->gdm_ed, send_len);
- sdu->dftEpsId = gdm_cpu_to_dev32(&udev->gdm_ed, dftEpsId);
- sdu->bearer_ID = gdm_cpu_to_dev32(&udev->gdm_ed, epsId);
- sdu->nic_type = gdm_cpu_to_dev32(&udev->gdm_ed, nic_type);
-
- t_sdu->len = send_len + HCI_HEADER_SIZE;
- t_sdu->callback = cb;
- t_sdu->cb_data = cb_data;
-
- spin_lock_irqsave(&tx->lock, flags);
- list_add_tail(&t_sdu->list, &tx->sdu_list);
- queue_work(usb_tx_wq, &udev->work_tx.work);
- spin_unlock_irqrestore(&tx->lock, flags);
-
- if (no_spc)
- return TX_NO_BUFFER;
-
- return 0;
-}
-
-static int gdm_usb_hci_send(void *priv_dev, void *data, int len,
- void (*cb)(void *data), void *cb_data)
-{
- struct lte_udev *udev = priv_dev;
- struct tx_cxt *tx = &udev->tx;
- struct usb_tx *t;
- unsigned long flags;
-
- if (!udev->usbdev) {
- pr_err("hci send - invalid device\n");
- return -ENODEV;
- }
-
- t = alloc_tx_struct(len);
- if (t == NULL) {
- pr_err("hci_send - out of memory\n");
- return -ENOMEM;
- }
-
- memcpy(t->buf, data, len);
- t->callback = cb;
- t->cb_data = cb_data;
- t->len = len;
- t->tx = tx;
- t->is_sdu = 0;
-
- spin_lock_irqsave(&tx->lock, flags);
- list_add_tail(&t->list, &tx->hci_list);
- queue_work(usb_tx_wq, &udev->work_tx.work);
- spin_unlock_irqrestore(&tx->lock, flags);
-
- return 0;
-}
-
-static struct gdm_endian *gdm_usb_get_endian(void *priv_dev)
-{
- struct lte_udev *udev = priv_dev;
-
- return &udev->gdm_ed;
-}
-
-static int gdm_usb_probe(struct usb_interface *intf,
- const struct usb_device_id *id)
-{
- int ret = 0;
- struct phy_dev *phy_dev = NULL;
- struct lte_udev *udev = NULL;
- u16 idVendor, idProduct;
- int bInterfaceNumber;
- struct usb_device *usbdev = interface_to_usbdev(intf);
-
- bInterfaceNumber = intf->cur_altsetting->desc.bInterfaceNumber;
- idVendor = __le16_to_cpu(usbdev->descriptor.idVendor);
- idProduct = __le16_to_cpu(usbdev->descriptor.idProduct);
-
- pr_info("net vid = 0x%04x pid = 0x%04x\n", idVendor, idProduct);
-
- if (bInterfaceNumber > NETWORK_INTERFACE) {
- pr_info("not a network device\n");
- return -ENODEV;
- }
-
- phy_dev = kzalloc(sizeof(struct phy_dev), GFP_KERNEL);
- if (!phy_dev)
- return -ENOMEM;
-
- udev = kzalloc(sizeof(struct lte_udev), GFP_KERNEL);
- if (!udev) {
- ret = -ENOMEM;
- goto err_udev;
- }
-
- phy_dev->priv_dev = (void *)udev;
- phy_dev->send_hci_func = gdm_usb_hci_send;
- phy_dev->send_sdu_func = gdm_usb_sdu_send;
- phy_dev->rcv_func = gdm_usb_recv;
- phy_dev->get_endian = gdm_usb_get_endian;
-
- udev->usbdev = usbdev;
- ret = init_usb(udev);
- if (ret < 0) {
- dev_err(intf->usb_dev, "init_usb func failed\n");
- goto err_init_usb;
- }
- udev->intf = intf;
-
- intf->needs_remote_wakeup = 1;
- usb_enable_autosuspend(usbdev);
- pm_runtime_set_autosuspend_delay(&usbdev->dev, AUTO_SUSPEND_TIMER);
-
- /* List up hosts with big endians, otherwise,
- * defaults to little endian
- */
- if (idProduct == PID_GDM7243)
- gdm_set_endian(&udev->gdm_ed, ENDIANNESS_BIG);
- else
- gdm_set_endian(&udev->gdm_ed, ENDIANNESS_LITTLE);
-
- ret = request_mac_address(udev);
- if (ret < 0) {
- dev_err(intf->usb_dev, "request Mac address failed\n");
- goto err_mac_address;
- }
-
- start_rx_proc(phy_dev);
- usb_get_dev(usbdev);
- usb_set_intfdata(intf, phy_dev);
-
- return 0;
-
-err_mac_address:
- release_usb(udev);
-err_init_usb:
- kfree(udev);
-err_udev:
- kfree(phy_dev);
-
- return ret;
-}
-
-static void gdm_usb_disconnect(struct usb_interface *intf)
-{
- struct phy_dev *phy_dev;
- struct lte_udev *udev;
- u16 idVendor, idProduct;
- struct usb_device *usbdev;
-
- usbdev = interface_to_usbdev(intf);
-
- idVendor = __le16_to_cpu(usbdev->descriptor.idVendor);
- idProduct = __le16_to_cpu(usbdev->descriptor.idProduct);
-
- phy_dev = usb_get_intfdata(intf);
-
- udev = phy_dev->priv_dev;
- unregister_lte_device(phy_dev);
-
- release_usb(udev);
-
- kfree(udev);
- udev = NULL;
-
- kfree(phy_dev);
- phy_dev = NULL;
-
- usb_put_dev(usbdev);
-}
-
-static int gdm_usb_suspend(struct usb_interface *intf, pm_message_t pm_msg)
-{
- struct phy_dev *phy_dev;
- struct lte_udev *udev;
- struct rx_cxt *rx;
- struct usb_rx *r;
- struct usb_rx *r_next;
- unsigned long flags;
-
- phy_dev = usb_get_intfdata(intf);
- udev = phy_dev->priv_dev;
- rx = &udev->rx;
- if (udev->usb_state != PM_NORMAL) {
- dev_err(intf->usb_dev, "usb suspend - invalid state\n");
- return -1;
- }
-
- udev->usb_state = PM_SUSPEND;
-
- spin_lock_irqsave(&rx->submit_lock, flags);
- list_for_each_entry_safe(r, r_next, &rx->rx_submit_list,
- rx_submit_list) {
- spin_unlock_irqrestore(&rx->submit_lock, flags);
- usb_kill_urb(r->urb);
- spin_lock_irqsave(&rx->submit_lock, flags);
- }
- spin_unlock_irqrestore(&rx->submit_lock, flags);
-
- return 0;
-}
-
-static int gdm_usb_resume(struct usb_interface *intf)
-{
- struct phy_dev *phy_dev;
- struct lte_udev *udev;
- struct tx_cxt *tx;
- struct rx_cxt *rx;
- unsigned long flags;
- int issue_count;
- int i;
-
- phy_dev = usb_get_intfdata(intf);
- udev = phy_dev->priv_dev;
- rx = &udev->rx;
-
- if (udev->usb_state != PM_SUSPEND) {
- dev_err(intf->usb_dev, "usb resume - invalid state\n");
- return -1;
- }
- udev->usb_state = PM_NORMAL;
-
- spin_lock_irqsave(&rx->rx_lock, flags);
- issue_count = rx->avail_count - MAX_RX_SUBMIT_COUNT;
- spin_unlock_irqrestore(&rx->rx_lock, flags);
-
- if (issue_count >= 0) {
- for (i = 0; i < issue_count; i++)
- gdm_usb_recv(phy_dev->priv_dev,
- udev->rx_cb,
- phy_dev,
- USB_COMPLETE);
- }
-
- tx = &udev->tx;
- spin_lock_irqsave(&tx->lock, flags);
- queue_work(usb_tx_wq, &udev->work_tx.work);
- spin_unlock_irqrestore(&tx->lock, flags);
-
- return 0;
-}
-
-static struct usb_driver gdm_usb_lte_driver = {
- .name = "gdm_lte",
- .probe = gdm_usb_probe,
- .disconnect = gdm_usb_disconnect,
- .id_table = id_table,
- .supports_autosuspend = 1,
- .suspend = gdm_usb_suspend,
- .resume = gdm_usb_resume,
- .reset_resume = gdm_usb_resume,
-};
-
-static int __init gdm_usb_lte_init(void)
-{
- if (gdm_lte_event_init() < 0) {
- pr_err("error creating event\n");
- return -1;
- }
-
- usb_tx_wq = create_workqueue("usb_tx_wq");
- if (usb_tx_wq == NULL)
- return -1;
-
- usb_rx_wq = create_workqueue("usb_rx_wq");
- if (usb_rx_wq == NULL)
- return -1;
-
- return usb_register(&gdm_usb_lte_driver);
-}
-
-static void __exit gdm_usb_lte_exit(void)
-{
- gdm_lte_event_exit();
-
- usb_deregister(&gdm_usb_lte_driver);
-
- if (usb_tx_wq) {
- flush_workqueue(usb_tx_wq);
- destroy_workqueue(usb_tx_wq);
- }
-
- if (usb_rx_wq) {
- flush_workqueue(usb_rx_wq);
- destroy_workqueue(usb_rx_wq);
- }
-}
-
-module_init(gdm_usb_lte_init);
-module_exit(gdm_usb_lte_exit);
-
-MODULE_VERSION(DRIVER_VERSION);
-MODULE_DESCRIPTION("GCT LTE USB Device Driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/gdm724x/gdm_usb.h b/drivers/staging/gdm724x/gdm_usb.h
deleted file mode 100644
index e6486e71a428..000000000000
--- a/drivers/staging/gdm724x/gdm_usb.h
+++ /dev/null
@@ -1,109 +0,0 @@
-/*
- * Copyright (c) 2012 GCT Semiconductor, Inc. All rights reserved.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#ifndef _GDM_USB_H_
-#define _GDM_USB_H_
-
-#include <linux/types.h>
-#include <linux/usb.h>
-#include <linux/list.h>
-#include <linux/time.h>
-
-#include "gdm_endian.h"
-#include "hci_packet.h"
-
-#define PM_NORMAL 0
-#define PM_SUSPEND 1
-#define AUTO_SUSPEND_TIMER 5000 /* ms */
-
-#define RX_BUF_SIZE (1024*32)
-#define TX_BUF_SIZE (1024*32)
-#define SDU_BUF_SIZE 2048
-#define MAX_SDU_SIZE (1024*30)
-#define MAX_PACKET_IN_MULTI_SDU 256
-
-#define VID_GCT 0x1076
-#define PID_GDM7240 0x8000
-#define PID_GDM7243 0x9000
-
-#define NETWORK_INTERFACE 1
-#define USB_SC_SCSI 0x06
-#define USB_PR_BULK 0x50
-
-#define MAX_NUM_SDU_BUF 64
-
-struct usb_tx {
- struct list_head list;
- struct urb *urb;
- u8 *buf;
- u32 len;
- void (*callback)(void *cb_data);
- void *cb_data;
- struct tx_cxt *tx;
- u8 is_sdu;
-};
-
-struct usb_tx_sdu {
- struct list_head list;
- u8 *buf;
- u32 len;
- void (*callback)(void *cb_data);
- void *cb_data;
-};
-
-struct usb_rx {
- struct list_head to_host_list;
- struct list_head free_list;
- struct list_head rx_submit_list;
- struct rx_cxt *rx;
- struct urb *urb;
- u8 *buf;
- int (*callback)(void *cb_data, void *data, int len, int context);
- void *cb_data;
- void *index;
-};
-
-struct tx_cxt {
- struct list_head sdu_list;
- struct list_head hci_list;
- struct list_head free_list;
- u32 avail_count;
- spinlock_t lock;
-};
-
-struct rx_cxt {
- struct list_head to_host_list;
- struct list_head rx_submit_list;
- struct list_head free_list;
- u32 avail_count;
- spinlock_t to_host_lock;
- spinlock_t rx_lock;
- spinlock_t submit_lock;
-};
-
-struct lte_udev {
- struct usb_device *usbdev;
- struct gdm_endian gdm_ed;
- struct tx_cxt tx;
- struct rx_cxt rx;
- struct delayed_work work_tx;
- struct delayed_work work_rx;
- u8 send_complete;
- u8 tx_stop;
- struct usb_interface *intf;
- int (*rx_cb)(void *cb_data, void *data, int len, int context);
- int usb_state;
- u8 request_mac_addr;
-};
-
-#endif /* _GDM_USB_H_ */
diff --git a/drivers/staging/gdm724x/hci.h b/drivers/staging/gdm724x/hci.h
deleted file mode 100644
index 9a591b0db516..000000000000
--- a/drivers/staging/gdm724x/hci.h
+++ /dev/null
@@ -1,55 +0,0 @@
-/*
- * Copyright (c) 2012 GCT Semiconductor, Inc. All rights reserved.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#ifndef _HCI_H_
-#define _HCI_H_
-
-#define LTE_GET_INFORMATION 0x3002
-#define LTE_GET_INFORMATION_RESULT 0xB003
- #define MAC_ADDRESS 0xA2
-
-#define LTE_LINK_ON_OFF_INDICATION 0xB133
-#define LTE_PDN_TABLE_IND 0xB143
-
-#define LTE_TX_SDU 0x3200
-#define LTE_RX_SDU 0xB201
-#define LTE_TX_MULTI_SDU 0x3202
-#define LTE_RX_MULTI_SDU 0xB203
-
-#define LTE_DL_SDU_FLOW_CONTROL 0x3305
-#define LTE_UL_SDU_FLOW_CONTROL 0xB306
-
-#define LTE_AT_CMD_TO_DEVICE 0x3307
-#define LTE_AT_CMD_FROM_DEVICE 0xB308
-
-#define LTE_SDIO_DM_SEND_PKT 0x3312
-#define LTE_SDIO_DM_RECV_PKT 0xB313
-
-#define LTE_NV_RESTORE_REQUEST 0xB30C
-#define LTE_NV_RESTORE_RESPONSE 0x330D
-#define LTE_NV_SAVE_REQUEST 0xB30E
- #define NV_TYPE_LTE_INFO 0x00
- #define NV_TYPE_BOARD_CONFIG 0x01
- #define NV_TYPE_RF_CAL 0x02
- #define NV_TYPE_TEMP 0x03
- #define NV_TYPE_NET_INFO 0x04
- #define NV_TYPE_SAFETY_INFO 0x05
- #define NV_TYPE_CDMA_CAL 0x06
- #define NV_TYPE_VENDOR 0x07
- #define NV_TYPE_ALL 0xff
-#define LTE_NV_SAVE_RESPONSE 0x330F
-
-#define LTE_AT_CMD_TO_DEVICE_EXT 0x3323
-#define LTE_AT_CMD_FROM_DEVICE_EXT 0xB324
-
-#endif /* _HCI_H_ */
diff --git a/drivers/staging/gdm724x/hci_packet.h b/drivers/staging/gdm724x/hci_packet.h
deleted file mode 100644
index 7fba8a687faf..000000000000
--- a/drivers/staging/gdm724x/hci_packet.h
+++ /dev/null
@@ -1,93 +0,0 @@
-/*
- * Copyright (c) 2012 GCT Semiconductor, Inc. All rights reserved.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#ifndef _HCI_PACKET_H_
-#define _HCI_PACKET_H_
-
-#define HCI_HEADER_SIZE 4
-
-/*
- * The NIC type definition:
- * For backward compatibility, lower 16 bits used as they were.
- * Lower 16 bit: NIC_TYPE values
- * Uppoer 16 bit: NIC_TYPE Flags
- */
-#define NIC_TYPE_NIC0 0x00000010
-#define NIC_TYPE_NIC1 0x00000011
-#define NIC_TYPE_NIC2 0x00000012
-#define NIC_TYPE_NIC3 0x00000013
-#define NIC_TYPE_ARP 0x00000100
-#define NIC_TYPE_ICMPV6 0x00000200
-#define NIC_TYPE_MASK 0x0000FFFF
-#define NIC_TYPE_F_IPV4 0x00010000
-#define NIC_TYPE_F_IPV6 0x00020000
-#define NIC_TYPE_F_DHCP 0x00040000
-#define NIC_TYPE_F_NDP 0x00080000
-#define NIC_TYPE_F_VLAN 0x00100000
-
-struct hci_packet {
- u16 cmd_evt;
- u16 len;
- u8 data[0];
-} __packed;
-
-struct tlv {
- u8 type;
- u8 len;
- u8 *data[1];
-} __packed;
-
-struct sdu_header {
- u16 cmd_evt;
- u16 len;
- u32 dftEpsId;
- u32 bearer_ID;
- u32 nic_type;
-} __packed;
-
-struct sdu {
- u16 cmd_evt;
- u16 len;
- u32 dftEpsId;
- u32 bearer_ID;
- u32 nic_type;
- u8 data[0];
-} __packed;
-
-struct multi_sdu {
- u16 cmd_evt;
- u16 len;
- u16 num_packet;
- u16 reserved;
- u8 data[0];
-} __packed;
-
-struct hci_pdn_table_ind {
- u16 cmd_evt;
- u16 len;
- u8 activate;
- u32 dft_eps_id;
- u32 nic_type;
- u8 pdn_type;
- u8 ipv4_addr[4];
- u8 ipv6_intf_id[8];
-} __packed;
-
-struct hci_connect_ind {
- u16 cmd_evt;
- u16 len;
- u32 connect;
-} __packed;
-
-
-#endif /* _HCI_PACKET_H_ */
diff --git a/drivers/staging/gdm724x/netlink_k.c b/drivers/staging/gdm724x/netlink_k.c
deleted file mode 100644
index 59a18304ef4a..000000000000
--- a/drivers/staging/gdm724x/netlink_k.c
+++ /dev/null
@@ -1,150 +0,0 @@
-/*
- * Copyright (c) 2012 GCT Semiconductor, Inc. All rights reserved.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/export.h>
-#include <linux/etherdevice.h>
-#include <linux/netlink.h>
-#include <asm/byteorder.h>
-#include <net/sock.h>
-
-#include "netlink_k.h"
-
-#if defined(DEFINE_MUTEX)
-static DEFINE_MUTEX(netlink_mutex);
-#else
-static struct semaphore netlink_mutex;
-#define mutex_lock(x) down(x)
-#define mutex_unlock(x) up(x)
-#endif
-
-#define ND_MAX_GROUP 30
-#define ND_IFINDEX_LEN sizeof(int)
-#define ND_NLMSG_SPACE(len) (NLMSG_SPACE(len) + ND_IFINDEX_LEN)
-#define ND_NLMSG_DATA(nlh) ((void *)((char *)NLMSG_DATA(nlh) + \
- ND_IFINDEX_LEN))
-#define ND_NLMSG_S_LEN(len) (len+ND_IFINDEX_LEN)
-#define ND_NLMSG_R_LEN(nlh) (nlh->nlmsg_len-ND_IFINDEX_LEN)
-#define ND_NLMSG_IFIDX(nlh) NLMSG_DATA(nlh)
-#define ND_MAX_MSG_LEN (1024 * 32)
-
-static void (*rcv_cb)(struct net_device *dev, u16 type, void *msg, int len);
-
-static void netlink_rcv_cb(struct sk_buff *skb)
-{
- struct nlmsghdr *nlh;
- struct net_device *dev;
- u32 mlen;
- void *msg;
- int ifindex;
-
- if (!rcv_cb) {
- pr_err("nl cb - unregistered\n");
- return;
- }
-
- if (skb->len < NLMSG_HDRLEN) {
- pr_err("nl cb - invalid skb length\n");
- return;
- }
-
- nlh = (struct nlmsghdr *)skb->data;
-
- if (skb->len < nlh->nlmsg_len || nlh->nlmsg_len > ND_MAX_MSG_LEN) {
- pr_err("nl cb - invalid length (%d,%d)\n",
- skb->len, nlh->nlmsg_len);
- return;
- }
-
- memcpy(&ifindex, ND_NLMSG_IFIDX(nlh), ND_IFINDEX_LEN);
- msg = ND_NLMSG_DATA(nlh);
- mlen = ND_NLMSG_R_LEN(nlh);
-
- dev = dev_get_by_index(&init_net, ifindex);
- if (dev) {
- rcv_cb(dev, nlh->nlmsg_type, msg, mlen);
- dev_put(dev);
- } else {
- pr_err("nl cb - dev (%d) not found\n", ifindex);
- }
-}
-
-static void netlink_rcv(struct sk_buff *skb)
-{
- mutex_lock(&netlink_mutex);
- netlink_rcv_cb(skb);
- mutex_unlock(&netlink_mutex);
-}
-
-struct sock *netlink_init(int unit,
- void (*cb)(struct net_device *dev, u16 type, void *msg, int len))
-{
- struct sock *sock;
- struct netlink_kernel_cfg cfg = {
- .input = netlink_rcv,
- };
-
-#if !defined(DEFINE_MUTEX)
- init_MUTEX(&netlink_mutex);
-#endif
-
- sock = netlink_kernel_create(&init_net, unit, &cfg);
-
- if (sock)
- rcv_cb = cb;
-
- return sock;
-}
-
-void netlink_exit(struct sock *sock)
-{
- sock_release(sock->sk_socket);
-}
-
-int netlink_send(struct sock *sock, int group, u16 type, void *msg, int len)
-{
- static u32 seq;
- struct sk_buff *skb = NULL;
- struct nlmsghdr *nlh;
- int ret = 0;
-
- if (group > ND_MAX_GROUP)
- return -EINVAL;
-
- if (!netlink_has_listeners(sock, group+1))
- return -ESRCH;
-
- skb = alloc_skb(NLMSG_SPACE(len), GFP_ATOMIC);
- if (!skb)
- return -ENOMEM;
-
- seq++;
-
- nlh = nlmsg_put(skb, 0, seq, type, len, 0);
- memcpy(NLMSG_DATA(nlh), msg, len);
- NETLINK_CB(skb).portid = 0;
- NETLINK_CB(skb).dst_group = 0;
-
- ret = netlink_broadcast(sock, skb, 0, group+1, GFP_ATOMIC);
- if (!ret)
- return len;
-
- if (ret != -ESRCH)
- pr_err("nl broadcast g=%d, t=%d, l=%d, r=%d\n",
- group, type, len, ret);
- else if (netlink_has_listeners(sock, group+1))
- return -EAGAIN;
-
- return ret;
-}
diff --git a/drivers/staging/gdm724x/netlink_k.h b/drivers/staging/gdm724x/netlink_k.h
deleted file mode 100644
index 589486d76714..000000000000
--- a/drivers/staging/gdm724x/netlink_k.h
+++ /dev/null
@@ -1,25 +0,0 @@
-/*
- * Copyright (c) 2012 GCT Semiconductor, Inc. All rights reserved.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#ifndef _NETLINK_K_H
-#define _NETLINK_K_H
-
-#include <linux/netdevice.h>
-#include <net/sock.h>
-
-struct sock *netlink_init(int unit,
- void (*cb)(struct net_device *dev, u16 type, void *msg, int len));
-void netlink_exit(struct sock *sock);
-int netlink_send(struct sock *sock, int group, u16 type, void *msg, int len);
-
-#endif /* _NETLINK_K_H_ */
diff --git a/drivers/staging/gdm72xx/Kconfig b/drivers/staging/gdm72xx/Kconfig
deleted file mode 100644
index bf11a7fbfc51..000000000000
--- a/drivers/staging/gdm72xx/Kconfig
+++ /dev/null
@@ -1,63 +0,0 @@
-#
-# GCT GDM72xx WiMAX driver configuration
-#
-
-menuconfig WIMAX_GDM72XX
- tristate "GCT GDM72xx WiMAX support"
- depends on NET && (USB || MMC)
- help
- Support a WiMAX module based on the GCT GDM72xx WiMAX chip.
-
-if WIMAX_GDM72XX
-
-config WIMAX_GDM72XX_QOS
- bool "Enable QoS support"
- default n
- help
- Enable Quality of Service support based on the data protocol of
- transmitting packets.
-
-config WIMAX_GDM72XX_K_MODE
- bool "Enable K mode"
- default n
- help
- Enable support for proprietary functions for KT (Korea Telecom).
-
-config WIMAX_GDM72XX_WIMAX2
- bool "Enable WiMAX2 support"
- default n
- help
- Enable support for transmitting multiple packets (packet
- aggregation) from the WiMAX module to the host processor.
-
-choice
- prompt "Select interface"
-
-config WIMAX_GDM72XX_USB
- bool "USB interface"
- depends on (USB = y || USB = WIMAX_GDM72XX)
- help
- Select this option if the WiMAX module interfaces with the host
- processor via USB.
-
-config WIMAX_GDM72XX_SDIO
- bool "SDIO interface"
- depends on (MMC = y || MMC = WIMAX_GDM72XX)
- help
- Select this option if the WiMAX module interfaces with the host
- processor via SDIO.
-
-endchoice
-
-if WIMAX_GDM72XX_USB
-
-config WIMAX_GDM72XX_USB_PM
- bool "Enable power management support"
- depends on PM
- help
- Enable USB power management in order to reduce power consumption
- while the interface is not in use.
-
-endif # WIMAX_GDM72XX_USB
-
-endif # WIMAX_GDM72XX
diff --git a/drivers/staging/gdm72xx/Makefile b/drivers/staging/gdm72xx/Makefile
deleted file mode 100644
index 35da7b90b19b..000000000000
--- a/drivers/staging/gdm72xx/Makefile
+++ /dev/null
@@ -1,6 +0,0 @@
-obj-$(CONFIG_WIMAX_GDM72XX) := gdmwm.o
-
-gdmwm-y += gdm_wimax.o netlink_k.o
-gdmwm-$(CONFIG_WIMAX_GDM72XX_QOS) += gdm_qos.o
-gdmwm-$(CONFIG_WIMAX_GDM72XX_SDIO) += gdm_sdio.o sdio_boot.o
-gdmwm-$(CONFIG_WIMAX_GDM72XX_USB) += gdm_usb.o usb_boot.o
diff --git a/drivers/staging/gdm72xx/TODO b/drivers/staging/gdm72xx/TODO
deleted file mode 100644
index 62d0cd6225c8..000000000000
--- a/drivers/staging/gdm72xx/TODO
+++ /dev/null
@@ -1,2 +0,0 @@
-TODO:
-- Clean up coding style to meet kernel standard.
diff --git a/drivers/staging/gdm72xx/gdm_qos.c b/drivers/staging/gdm72xx/gdm_qos.c
deleted file mode 100644
index 72c0f7ef8f2b..000000000000
--- a/drivers/staging/gdm72xx/gdm_qos.c
+++ /dev/null
@@ -1,438 +0,0 @@
-/*
- * Copyright (c) 2012 GCT Semiconductor, Inc. All rights reserved.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/etherdevice.h>
-#include <asm/byteorder.h>
-
-#include <linux/ip.h>
-#include <linux/tcp.h>
-#include <linux/if_ether.h>
-
-#include "gdm_wimax.h"
-#include "hci.h"
-#include "gdm_qos.h"
-
-#define MAX_FREE_LIST_CNT 32
-static struct {
- struct list_head head;
- int cnt;
- spinlock_t lock;
-} qos_free_list;
-
-static void init_qos_entry_list(void)
-{
- qos_free_list.cnt = 0;
- INIT_LIST_HEAD(&qos_free_list.head);
- spin_lock_init(&qos_free_list.lock);
-}
-
-static void *alloc_qos_entry(void)
-{
- struct qos_entry_s *entry;
- unsigned long flags;
-
- spin_lock_irqsave(&qos_free_list.lock, flags);
- if (qos_free_list.cnt) {
- entry = list_entry(qos_free_list.head.prev, struct qos_entry_s,
- list);
- list_del(&entry->list);
- qos_free_list.cnt--;
- spin_unlock_irqrestore(&qos_free_list.lock, flags);
- return entry;
- }
- spin_unlock_irqrestore(&qos_free_list.lock, flags);
-
- return kmalloc(sizeof(*entry), GFP_ATOMIC);
-}
-
-static void free_qos_entry(void *entry)
-{
- struct qos_entry_s *qentry = (struct qos_entry_s *)entry;
- unsigned long flags;
-
- spin_lock_irqsave(&qos_free_list.lock, flags);
- if (qos_free_list.cnt < MAX_FREE_LIST_CNT) {
- list_add(&qentry->list, &qos_free_list.head);
- qos_free_list.cnt++;
- spin_unlock_irqrestore(&qos_free_list.lock, flags);
- return;
- }
- spin_unlock_irqrestore(&qos_free_list.lock, flags);
-
- kfree(entry);
-}
-
-static void free_qos_entry_list(struct list_head *free_list)
-{
- struct qos_entry_s *entry, *n;
- int total_free = 0;
-
- list_for_each_entry_safe(entry, n, free_list, list) {
- list_del(&entry->list);
- kfree(entry);
- total_free++;
- }
-
- pr_debug("%s: total_free_cnt=%d\n", __func__, total_free);
-}
-
-void gdm_qos_init(void *nic_ptr)
-{
- struct nic *nic = nic_ptr;
- struct qos_cb_s *qcb = &nic->qos;
- int i;
-
- for (i = 0; i < QOS_MAX; i++) {
- INIT_LIST_HEAD(&qcb->qos_list[i]);
- qcb->csr[i].qos_buf_count = 0;
- qcb->csr[i].enabled = false;
- }
-
- qcb->qos_list_cnt = 0;
- qcb->qos_null_idx = QOS_MAX-1;
- qcb->qos_limit_size = 255;
-
- spin_lock_init(&qcb->qos_lock);
-
- init_qos_entry_list();
-}
-
-void gdm_qos_release_list(void *nic_ptr)
-{
- struct nic *nic = nic_ptr;
- struct qos_cb_s *qcb = &nic->qos;
- unsigned long flags;
- struct qos_entry_s *entry, *n;
- struct list_head free_list;
- int i;
-
- INIT_LIST_HEAD(&free_list);
-
- spin_lock_irqsave(&qcb->qos_lock, flags);
-
- for (i = 0; i < QOS_MAX; i++) {
- qcb->csr[i].qos_buf_count = 0;
- qcb->csr[i].enabled = false;
- }
-
- qcb->qos_list_cnt = 0;
- qcb->qos_null_idx = QOS_MAX-1;
-
- for (i = 0; i < QOS_MAX; i++) {
- list_for_each_entry_safe(entry, n, &qcb->qos_list[i], list) {
- list_move_tail(&entry->list, &free_list);
- }
- }
- spin_unlock_irqrestore(&qcb->qos_lock, flags);
- free_qos_entry_list(&free_list);
-}
-
-static int chk_ipv4_rule(struct gdm_wimax_csr_s *csr, u8 *stream, u8 *port)
-{
- int i;
-
- if (csr->classifier_rule_en&IPTYPEOFSERVICE) {
- if (((stream[1] & csr->ip2s_mask) < csr->ip2s_lo) ||
- ((stream[1] & csr->ip2s_mask) > csr->ip2s_hi))
- return 1;
- }
-
- if (csr->classifier_rule_en&PROTOCOL) {
- if (stream[9] != csr->protocol)
- return 1;
- }
-
- if (csr->classifier_rule_en&IPMASKEDSRCADDRESS) {
- for (i = 0; i < 4; i++) {
- if ((stream[12 + i] & csr->ipsrc_addrmask[i]) !=
- (csr->ipsrc_addr[i] & csr->ipsrc_addrmask[i]))
- return 1;
- }
- }
-
- if (csr->classifier_rule_en&IPMASKEDDSTADDRESS) {
- for (i = 0; i < 4; i++) {
- if ((stream[16 + i] & csr->ipdst_addrmask[i]) !=
- (csr->ipdst_addr[i] & csr->ipdst_addrmask[i]))
- return 1;
- }
- }
-
- if (csr->classifier_rule_en&PROTOCOLSRCPORTRANGE) {
- i = ((port[0]<<8)&0xff00)+port[1];
- if ((i < csr->srcport_lo) || (i > csr->srcport_hi))
- return 1;
- }
-
- if (csr->classifier_rule_en&PROTOCOLDSTPORTRANGE) {
- i = ((port[2]<<8)&0xff00)+port[3];
- if ((i < csr->dstport_lo) || (i > csr->dstport_hi))
- return 1;
- }
-
- return 0;
-}
-
-static int get_qos_index(struct nic *nic, u8 *iph, u8 *tcpudph)
-{
- int ip_ver, i;
- struct qos_cb_s *qcb = &nic->qos;
-
- if (!iph || !tcpudph)
- return -1;
-
- ip_ver = (iph[0]>>4)&0xf;
-
- if (ip_ver != 4)
- return -1;
-
- for (i = 0; i < QOS_MAX; i++) {
- if (!qcb->csr[i].enabled)
- continue;
- if (!qcb->csr[i].classifier_rule_en)
- continue;
- if (chk_ipv4_rule(&qcb->csr[i], iph, tcpudph) == 0)
- return i;
- }
-
- return -1;
-}
-
-static void extract_qos_list(struct nic *nic, struct list_head *head)
-{
- struct qos_cb_s *qcb = &nic->qos;
- struct qos_entry_s *entry;
- int i;
-
- INIT_LIST_HEAD(head);
-
- for (i = 0; i < QOS_MAX; i++) {
- if (!qcb->csr[i].enabled)
- continue;
- if (qcb->csr[i].qos_buf_count >= qcb->qos_limit_size)
- continue;
- if (list_empty(&qcb->qos_list[i]))
- continue;
-
- entry = list_entry(qcb->qos_list[i].prev, struct qos_entry_s,
- list);
-
- list_move_tail(&entry->list, head);
- qcb->csr[i].qos_buf_count++;
-
- if (!list_empty(&qcb->qos_list[i]))
- netdev_warn(nic->netdev, "Index(%d) is piled!!\n", i);
- }
-}
-
-static void send_qos_list(struct nic *nic, struct list_head *head)
-{
- struct qos_entry_s *entry, *n;
-
- list_for_each_entry_safe(entry, n, head, list) {
- list_del(&entry->list);
- gdm_wimax_send_tx(entry->skb, entry->dev);
- free_qos_entry(entry);
- }
-}
-
-int gdm_qos_send_hci_pkt(struct sk_buff *skb, struct net_device *dev)
-{
- struct nic *nic = netdev_priv(dev);
- int index;
- struct qos_cb_s *qcb = &nic->qos;
- unsigned long flags;
- struct ethhdr *ethh = (struct ethhdr *)(skb->data + HCI_HEADER_SIZE);
- struct iphdr *iph = (struct iphdr *)((char *)ethh + ETH_HLEN);
- struct tcphdr *tcph;
- struct qos_entry_s *entry = NULL;
- struct list_head send_list;
- int ret = 0;
-
- tcph = (struct tcphdr *)iph + iph->ihl*4;
-
- if (ethh->h_proto == cpu_to_be16(ETH_P_IP)) {
- if (qcb->qos_list_cnt && !qos_free_list.cnt) {
- entry = alloc_qos_entry();
- entry->skb = skb;
- entry->dev = dev;
- netdev_dbg(dev, "qcb->qos_list_cnt=%d\n",
- qcb->qos_list_cnt);
- }
-
- spin_lock_irqsave(&qcb->qos_lock, flags);
- if (qcb->qos_list_cnt) {
- index = get_qos_index(nic, (u8 *)iph, (u8 *)tcph);
- if (index == -1)
- index = qcb->qos_null_idx;
-
- if (!entry) {
- entry = alloc_qos_entry();
- entry->skb = skb;
- entry->dev = dev;
- }
-
- list_add_tail(&entry->list, &qcb->qos_list[index]);
- extract_qos_list(nic, &send_list);
- spin_unlock_irqrestore(&qcb->qos_lock, flags);
- send_qos_list(nic, &send_list);
- goto out;
- }
- spin_unlock_irqrestore(&qcb->qos_lock, flags);
- if (entry)
- free_qos_entry(entry);
- }
-
- ret = gdm_wimax_send_tx(skb, dev);
-out:
- return ret;
-}
-
-static int get_csr(struct qos_cb_s *qcb, u32 sfid, int mode)
-{
- int i;
-
- for (i = 0; i < qcb->qos_list_cnt; i++) {
- if (qcb->csr[i].sfid == sfid)
- return i;
- }
-
- if (mode) {
- for (i = 0; i < QOS_MAX; i++) {
- if (!qcb->csr[i].enabled) {
- qcb->csr[i].enabled = true;
- qcb->qos_list_cnt++;
- return i;
- }
- }
- }
- return -1;
-}
-
-#define QOS_CHANGE_DEL 0xFC
-#define QOS_ADD 0xFD
-#define QOS_REPORT 0xFE
-
-void gdm_recv_qos_hci_packet(void *nic_ptr, u8 *buf, int size)
-{
- struct nic *nic = nic_ptr;
- int i, index, pos;
- u32 sfid;
- u8 sub_cmd_evt;
- struct qos_cb_s *qcb = &nic->qos;
- struct qos_entry_s *entry, *n;
- struct list_head send_list;
- struct list_head free_list;
- unsigned long flags;
-
- sub_cmd_evt = (u8)buf[4];
-
- if (sub_cmd_evt == QOS_REPORT) {
- spin_lock_irqsave(&qcb->qos_lock, flags);
- for (i = 0; i < qcb->qos_list_cnt; i++) {
- sfid = ((buf[(i*5)+6]<<24)&0xff000000);
- sfid += ((buf[(i*5)+7]<<16)&0xff0000);
- sfid += ((buf[(i*5)+8]<<8)&0xff00);
- sfid += (buf[(i*5)+9]);
- index = get_csr(qcb, sfid, 0);
- if (index == -1) {
- spin_unlock_irqrestore(&qcb->qos_lock, flags);
- netdev_err(nic->netdev, "QoS ERROR: No SF\n");
- return;
- }
- qcb->csr[index].qos_buf_count = buf[(i*5)+10];
- }
-
- extract_qos_list(nic, &send_list);
- spin_unlock_irqrestore(&qcb->qos_lock, flags);
- send_qos_list(nic, &send_list);
- return;
- }
-
- /* sub_cmd_evt == QOS_ADD || sub_cmd_evt == QOS_CHANG_DEL */
- pos = 6;
- sfid = ((buf[pos++]<<24)&0xff000000);
- sfid += ((buf[pos++]<<16)&0xff0000);
- sfid += ((buf[pos++]<<8)&0xff00);
- sfid += (buf[pos++]);
-
- index = get_csr(qcb, sfid, 1);
- if (index == -1) {
- netdev_err(nic->netdev,
- "QoS ERROR: csr Update Error / Wrong index (%d)\n",
- index);
- return;
- }
-
- if (sub_cmd_evt == QOS_ADD) {
- netdev_dbg(nic->netdev, "QOS_ADD SFID = 0x%x, index=%d\n",
- sfid, index);
-
- spin_lock_irqsave(&qcb->qos_lock, flags);
- qcb->csr[index].sfid = sfid;
- qcb->csr[index].classifier_rule_en = ((buf[pos++]<<8)&0xff00);
- qcb->csr[index].classifier_rule_en += buf[pos++];
- if (qcb->csr[index].classifier_rule_en == 0)
- qcb->qos_null_idx = index;
- qcb->csr[index].ip2s_mask = buf[pos++];
- qcb->csr[index].ip2s_lo = buf[pos++];
- qcb->csr[index].ip2s_hi = buf[pos++];
- qcb->csr[index].protocol = buf[pos++];
- qcb->csr[index].ipsrc_addrmask[0] = buf[pos++];
- qcb->csr[index].ipsrc_addrmask[1] = buf[pos++];
- qcb->csr[index].ipsrc_addrmask[2] = buf[pos++];
- qcb->csr[index].ipsrc_addrmask[3] = buf[pos++];
- qcb->csr[index].ipsrc_addr[0] = buf[pos++];
- qcb->csr[index].ipsrc_addr[1] = buf[pos++];
- qcb->csr[index].ipsrc_addr[2] = buf[pos++];
- qcb->csr[index].ipsrc_addr[3] = buf[pos++];
- qcb->csr[index].ipdst_addrmask[0] = buf[pos++];
- qcb->csr[index].ipdst_addrmask[1] = buf[pos++];
- qcb->csr[index].ipdst_addrmask[2] = buf[pos++];
- qcb->csr[index].ipdst_addrmask[3] = buf[pos++];
- qcb->csr[index].ipdst_addr[0] = buf[pos++];
- qcb->csr[index].ipdst_addr[1] = buf[pos++];
- qcb->csr[index].ipdst_addr[2] = buf[pos++];
- qcb->csr[index].ipdst_addr[3] = buf[pos++];
- qcb->csr[index].srcport_lo = ((buf[pos++]<<8)&0xff00);
- qcb->csr[index].srcport_lo += buf[pos++];
- qcb->csr[index].srcport_hi = ((buf[pos++]<<8)&0xff00);
- qcb->csr[index].srcport_hi += buf[pos++];
- qcb->csr[index].dstport_lo = ((buf[pos++]<<8)&0xff00);
- qcb->csr[index].dstport_lo += buf[pos++];
- qcb->csr[index].dstport_hi = ((buf[pos++]<<8)&0xff00);
- qcb->csr[index].dstport_hi += buf[pos++];
-
- qcb->qos_limit_size = 254/qcb->qos_list_cnt;
- spin_unlock_irqrestore(&qcb->qos_lock, flags);
- } else if (sub_cmd_evt == QOS_CHANGE_DEL) {
- netdev_dbg(nic->netdev, "QOS_CHANGE_DEL SFID = 0x%x, index=%d\n",
- sfid, index);
-
- INIT_LIST_HEAD(&free_list);
-
- spin_lock_irqsave(&qcb->qos_lock, flags);
- qcb->csr[index].enabled = false;
- qcb->qos_list_cnt--;
- qcb->qos_limit_size = 254/qcb->qos_list_cnt;
-
- list_for_each_entry_safe(entry, n, &qcb->qos_list[index],
- list) {
- list_move_tail(&entry->list, &free_list);
- }
- spin_unlock_irqrestore(&qcb->qos_lock, flags);
- free_qos_entry_list(&free_list);
- }
-}
diff --git a/drivers/staging/gdm72xx/gdm_qos.h b/drivers/staging/gdm72xx/gdm_qos.h
deleted file mode 100644
index bbc8aab338b5..000000000000
--- a/drivers/staging/gdm72xx/gdm_qos.h
+++ /dev/null
@@ -1,74 +0,0 @@
-/*
- * Copyright (c) 2012 GCT Semiconductor, Inc. All rights reserved.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#ifndef __GDM72XX_GDM_QOS_H__
-#define __GDM72XX_GDM_QOS_H__
-
-#include <linux/types.h>
-#include <linux/usb.h>
-#include <linux/list.h>
-
-#define QOS_MAX 16
-#define IPTYPEOFSERVICE 0x8000
-#define PROTOCOL 0x4000
-#define IPMASKEDSRCADDRESS 0x2000
-#define IPMASKEDDSTADDRESS 0x1000
-#define PROTOCOLSRCPORTRANGE 0x800
-#define PROTOCOLDSTPORTRANGE 0x400
-#define DSTMACADDR 0x200
-#define SRCMACADDR 0x100
-#define ETHERTYPE 0x80
-#define IEEE802_1DUSERPRIORITY 0x40
-#define IEEE802_1QVLANID 0x10
-
-struct gdm_wimax_csr_s {
- bool enabled;
- u32 sfid;
- u8 qos_buf_count;
- u16 classifier_rule_en;
- u8 ip2s_lo;
- u8 ip2s_hi;
- u8 ip2s_mask;
- u8 protocol;
- u8 ipsrc_addr[16];
- u8 ipsrc_addrmask[16];
- u8 ipdst_addr[16];
- u8 ipdst_addrmask[16];
- u16 srcport_lo;
- u16 srcport_hi;
- u16 dstport_lo;
- u16 dstport_hi;
-};
-
-struct qos_entry_s {
- struct list_head list;
- struct sk_buff *skb;
- struct net_device *dev;
-
-};
-
-struct qos_cb_s {
- struct list_head qos_list[QOS_MAX];
- int qos_list_cnt;
- int qos_null_idx;
- struct gdm_wimax_csr_s csr[QOS_MAX];
- spinlock_t qos_lock;
- int qos_limit_size;
-};
-
-void gdm_qos_init(void *nic_ptr);
-void gdm_qos_release_list(void *nic_ptr);
-int gdm_qos_send_hci_pkt(struct sk_buff *skb, struct net_device *dev);
-void gdm_recv_qos_hci_packet(void *nic_ptr, u8 *buf, int size);
-
-#endif /* __GDM72XX_GDM_QOS_H__ */
diff --git a/drivers/staging/gdm72xx/gdm_sdio.c b/drivers/staging/gdm72xx/gdm_sdio.c
deleted file mode 100644
index b0521da3c793..000000000000
--- a/drivers/staging/gdm72xx/gdm_sdio.c
+++ /dev/null
@@ -1,701 +0,0 @@
-/*
- * Copyright (c) 2012 GCT Semiconductor, Inc. All rights reserved.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/kernel.h>
-
-#include <linux/mmc/core.h>
-#include <linux/mmc/card.h>
-#include <linux/mmc/sdio_func.h>
-#include <linux/mmc/sdio_ids.h>
-
-#include "gdm_sdio.h"
-#include "gdm_wimax.h"
-#include "sdio_boot.h"
-#include "hci.h"
-
-#define TYPE_A_HEADER_SIZE 4
-#define TYPE_A_LOOKAHEAD_SIZE 16
-
-#define MAX_NR_RX_BUF 4
-
-#define SDU_TX_BUF_SIZE 2048
-#define TX_BUF_SIZE 2048
-#define TX_CHUNK_SIZE (2048 - TYPE_A_HEADER_SIZE)
-#define RX_BUF_SIZE (25*1024)
-
-#define TX_HZ 2000
-#define TX_INTERVAL (1000000/TX_HZ)
-
-static struct sdio_tx *alloc_tx_struct(struct tx_cxt *tx)
-{
- struct sdio_tx *t = kzalloc(sizeof(*t), GFP_ATOMIC);
-
- if (!t)
- return NULL;
-
- t->buf = kmalloc(TX_BUF_SIZE, GFP_ATOMIC);
- if (!t->buf) {
- kfree(t);
- return NULL;
- }
-
- t->tx_cxt = tx;
-
- return t;
-}
-
-static void free_tx_struct(struct sdio_tx *t)
-{
- if (t) {
- kfree(t->buf);
- kfree(t);
- }
-}
-
-static struct sdio_rx *alloc_rx_struct(struct rx_cxt *rx)
-{
- struct sdio_rx *r = kzalloc(sizeof(*r), GFP_ATOMIC);
-
- if (r)
- r->rx_cxt = rx;
-
- return r;
-}
-
-static void free_rx_struct(struct sdio_rx *r)
-{
- kfree(r);
-}
-
-/* Before this function is called, spin lock should be locked. */
-static struct sdio_tx *get_tx_struct(struct tx_cxt *tx, int *no_spc)
-{
- struct sdio_tx *t;
-
- if (list_empty(&tx->free_list))
- return NULL;
-
- t = list_entry(tx->free_list.prev, struct sdio_tx, list);
- list_del(&t->list);
-
- *no_spc = list_empty(&tx->free_list) ? 1 : 0;
-
- return t;
-}
-
-/* Before this function is called, spin lock should be locked. */
-static void put_tx_struct(struct tx_cxt *tx, struct sdio_tx *t)
-{
- list_add_tail(&t->list, &tx->free_list);
-}
-
-/* Before this function is called, spin lock should be locked. */
-static struct sdio_rx *get_rx_struct(struct rx_cxt *rx)
-{
- struct sdio_rx *r;
-
- if (list_empty(&rx->free_list))
- return NULL;
-
- r = list_entry(rx->free_list.prev, struct sdio_rx, list);
- list_del(&r->list);
-
- return r;
-}
-
-/* Before this function is called, spin lock should be locked. */
-static void put_rx_struct(struct rx_cxt *rx, struct sdio_rx *r)
-{
- list_add_tail(&r->list, &rx->free_list);
-}
-
-static void release_sdio(struct sdiowm_dev *sdev)
-{
- struct tx_cxt *tx = &sdev->tx;
- struct rx_cxt *rx = &sdev->rx;
- struct sdio_tx *t, *t_next;
- struct sdio_rx *r, *r_next;
-
- kfree(tx->sdu_buf);
-
- list_for_each_entry_safe(t, t_next, &tx->free_list, list) {
- list_del(&t->list);
- free_tx_struct(t);
- }
-
- list_for_each_entry_safe(t, t_next, &tx->sdu_list, list) {
- list_del(&t->list);
- free_tx_struct(t);
- }
-
- list_for_each_entry_safe(t, t_next, &tx->hci_list, list) {
- list_del(&t->list);
- free_tx_struct(t);
- }
-
- kfree(rx->rx_buf);
-
- list_for_each_entry_safe(r, r_next, &rx->free_list, list) {
- list_del(&r->list);
- free_rx_struct(r);
- }
-
- list_for_each_entry_safe(r, r_next, &rx->req_list, list) {
- list_del(&r->list);
- free_rx_struct(r);
- }
-}
-
-static int init_sdio(struct sdiowm_dev *sdev)
-{
- int ret = 0, i;
- struct tx_cxt *tx = &sdev->tx;
- struct rx_cxt *rx = &sdev->rx;
- struct sdio_tx *t;
- struct sdio_rx *r;
-
- INIT_LIST_HEAD(&tx->free_list);
- INIT_LIST_HEAD(&tx->sdu_list);
- INIT_LIST_HEAD(&tx->hci_list);
-
- spin_lock_init(&tx->lock);
-
- tx->sdu_buf = kmalloc(SDU_TX_BUF_SIZE, GFP_KERNEL);
- if (!tx->sdu_buf)
- goto fail;
-
- for (i = 0; i < MAX_NR_SDU_BUF; i++) {
- t = alloc_tx_struct(tx);
- if (!t) {
- ret = -ENOMEM;
- goto fail;
- }
- list_add(&t->list, &tx->free_list);
- }
-
- INIT_LIST_HEAD(&rx->free_list);
- INIT_LIST_HEAD(&rx->req_list);
-
- spin_lock_init(&rx->lock);
-
- for (i = 0; i < MAX_NR_RX_BUF; i++) {
- r = alloc_rx_struct(rx);
- if (!r) {
- ret = -ENOMEM;
- goto fail;
- }
- list_add(&r->list, &rx->free_list);
- }
-
- rx->rx_buf = kmalloc(RX_BUF_SIZE, GFP_KERNEL);
- if (!rx->rx_buf)
- goto fail;
-
- return 0;
-
-fail:
- release_sdio(sdev);
- return ret;
-}
-
-static void send_sdio_pkt(struct sdio_func *func, u8 *data, int len)
-{
- int n, blocks, ret, remain;
-
- sdio_claim_host(func);
-
- blocks = len / func->cur_blksize;
- n = blocks * func->cur_blksize;
- if (blocks) {
- ret = sdio_memcpy_toio(func, 0, data, n);
- if (ret < 0) {
- if (ret != -ENOMEDIUM)
- dev_err(&func->dev,
- "gdmwms: error: ret = %d\n", ret);
- goto end_io;
- }
- }
-
- remain = len - n;
- remain = (remain + 3) & ~3;
-
- if (remain) {
- ret = sdio_memcpy_toio(func, 0, data + n, remain);
- if (ret < 0) {
- if (ret != -ENOMEDIUM)
- dev_err(&func->dev,
- "gdmwms: error: ret = %d\n", ret);
- goto end_io;
- }
- }
-
-end_io:
- sdio_release_host(func);
-}
-
-static void send_sdu(struct sdio_func *func, struct tx_cxt *tx)
-{
- struct list_head *l, *next;
- struct hci_s *hci;
- struct sdio_tx *t;
- int pos, len, i, estlen, aggr_num = 0, aggr_len;
- u8 *buf;
- unsigned long flags;
-
- spin_lock_irqsave(&tx->lock, flags);
-
- pos = TYPE_A_HEADER_SIZE + HCI_HEADER_SIZE;
- list_for_each_entry(t, &tx->sdu_list, list) {
- estlen = ((t->len + 3) & ~3) + 4;
- if ((pos + estlen) > SDU_TX_BUF_SIZE)
- break;
-
- aggr_num++;
- memcpy(tx->sdu_buf + pos, t->buf, t->len);
- memset(tx->sdu_buf + pos + t->len, 0, estlen - t->len);
- pos += estlen;
- }
- aggr_len = pos;
-
- hci = (struct hci_s *)(tx->sdu_buf + TYPE_A_HEADER_SIZE);
- hci->cmd_evt = cpu_to_be16(WIMAX_TX_SDU_AGGR);
- hci->length = cpu_to_be16(aggr_len - TYPE_A_HEADER_SIZE -
- HCI_HEADER_SIZE);
-
- spin_unlock_irqrestore(&tx->lock, flags);
-
- dev_dbg(&func->dev, "sdio_send: %*ph\n", aggr_len - TYPE_A_HEADER_SIZE,
- tx->sdu_buf + TYPE_A_HEADER_SIZE);
-
- for (pos = TYPE_A_HEADER_SIZE; pos < aggr_len; pos += TX_CHUNK_SIZE) {
- len = aggr_len - pos;
- len = len > TX_CHUNK_SIZE ? TX_CHUNK_SIZE : len;
- buf = tx->sdu_buf + pos - TYPE_A_HEADER_SIZE;
-
- buf[0] = len & 0xff;
- buf[1] = (len >> 8) & 0xff;
- buf[2] = (len >> 16) & 0xff;
- buf[3] = (pos + len) >= aggr_len ? 0 : 1;
- send_sdio_pkt(func, buf, len + TYPE_A_HEADER_SIZE);
- }
-
- spin_lock_irqsave(&tx->lock, flags);
-
- for (l = tx->sdu_list.next, i = 0; i < aggr_num; i++, l = next) {
- next = l->next;
- t = list_entry(l, struct sdio_tx, list);
- if (t->callback)
- t->callback(t->cb_data);
-
- list_del(l);
- put_tx_struct(t->tx_cxt, t);
- }
-
- do_gettimeofday(&tx->sdu_stamp);
- spin_unlock_irqrestore(&tx->lock, flags);
-}
-
-static void send_hci(struct sdio_func *func, struct tx_cxt *tx,
- struct sdio_tx *t)
-{
- unsigned long flags;
-
- dev_dbg(&func->dev, "sdio_send: %*ph\n", t->len - TYPE_A_HEADER_SIZE,
- t->buf + TYPE_A_HEADER_SIZE);
-
- send_sdio_pkt(func, t->buf, t->len);
-
- spin_lock_irqsave(&tx->lock, flags);
- if (t->callback)
- t->callback(t->cb_data);
- free_tx_struct(t);
- spin_unlock_irqrestore(&tx->lock, flags);
-}
-
-static void do_tx(struct work_struct *work)
-{
- struct sdiowm_dev *sdev = container_of(work, struct sdiowm_dev, ws);
- struct sdio_func *func = sdev->func;
- struct tx_cxt *tx = &sdev->tx;
- struct sdio_tx *t = NULL;
- struct timeval now, *before;
- int is_sdu = 0;
- long diff;
- unsigned long flags;
-
- spin_lock_irqsave(&tx->lock, flags);
- if (!tx->can_send) {
- spin_unlock_irqrestore(&tx->lock, flags);
- return;
- }
-
- if (!list_empty(&tx->hci_list)) {
- t = list_entry(tx->hci_list.next, struct sdio_tx, list);
- list_del(&t->list);
- is_sdu = 0;
- } else if (!tx->stop_sdu_tx && !list_empty(&tx->sdu_list)) {
- do_gettimeofday(&now);
- before = &tx->sdu_stamp;
-
- diff = (now.tv_sec - before->tv_sec) * 1000000 +
- (now.tv_usec - before->tv_usec);
- if (diff >= 0 && diff < TX_INTERVAL) {
- schedule_work(&sdev->ws);
- spin_unlock_irqrestore(&tx->lock, flags);
- return;
- }
- is_sdu = 1;
- }
-
- if (!is_sdu && !t) {
- spin_unlock_irqrestore(&tx->lock, flags);
- return;
- }
-
- tx->can_send = 0;
-
- spin_unlock_irqrestore(&tx->lock, flags);
-
- if (is_sdu)
- send_sdu(func, tx);
- else
- send_hci(func, tx, t);
-}
-
-static int gdm_sdio_send(void *priv_dev, void *data, int len,
- void (*cb)(void *data), void *cb_data)
-{
- struct sdiowm_dev *sdev = priv_dev;
- struct tx_cxt *tx = &sdev->tx;
- struct sdio_tx *t;
- u8 *pkt = data;
- int no_spc = 0;
- u16 cmd_evt;
- unsigned long flags;
-
- if (len > TX_BUF_SIZE - TYPE_A_HEADER_SIZE)
- return -EINVAL;
-
- spin_lock_irqsave(&tx->lock, flags);
-
- cmd_evt = (pkt[0] << 8) | pkt[1];
- if (cmd_evt == WIMAX_TX_SDU) {
- t = get_tx_struct(tx, &no_spc);
- if (!t) {
- /* This case must not happen. */
- spin_unlock_irqrestore(&tx->lock, flags);
- return -ENOSPC;
- }
- list_add_tail(&t->list, &tx->sdu_list);
-
- memcpy(t->buf, data, len);
-
- t->len = len;
- t->callback = cb;
- t->cb_data = cb_data;
- } else {
- t = alloc_tx_struct(tx);
- if (!t) {
- spin_unlock_irqrestore(&tx->lock, flags);
- return -ENOMEM;
- }
- list_add_tail(&t->list, &tx->hci_list);
-
- t->buf[0] = len & 0xff;
- t->buf[1] = (len >> 8) & 0xff;
- t->buf[2] = (len >> 16) & 0xff;
- t->buf[3] = 2;
- memcpy(t->buf + TYPE_A_HEADER_SIZE, data, len);
-
- t->len = len + TYPE_A_HEADER_SIZE;
- t->callback = cb;
- t->cb_data = cb_data;
- }
-
- if (tx->can_send)
- schedule_work(&sdev->ws);
-
- spin_unlock_irqrestore(&tx->lock, flags);
-
- if (no_spc)
- return -ENOSPC;
-
- return 0;
-}
-
-/* Handle the HCI, WIMAX_SDU_TX_FLOW. */
-static int control_sdu_tx_flow(struct sdiowm_dev *sdev, u8 *hci_data, int len)
-{
- struct tx_cxt *tx = &sdev->tx;
- u16 cmd_evt;
- unsigned long flags;
-
- spin_lock_irqsave(&tx->lock, flags);
-
- cmd_evt = (hci_data[0] << 8) | (hci_data[1]);
- if (cmd_evt != WIMAX_SDU_TX_FLOW)
- goto out;
-
- if (hci_data[4] == 0) {
- dev_dbg(&sdev->func->dev, "WIMAX ==> STOP SDU TX\n");
- tx->stop_sdu_tx = 1;
- } else if (hci_data[4] == 1) {
- dev_dbg(&sdev->func->dev, "WIMAX ==> START SDU TX\n");
- tx->stop_sdu_tx = 0;
- if (tx->can_send)
- schedule_work(&sdev->ws);
- /* If free buffer for sdu tx doesn't exist, then tx queue
- * should not be woken. For this reason, don't pass the command,
- * START_SDU_TX.
- */
- if (list_empty(&tx->free_list))
- len = 0;
- }
-
-out:
- spin_unlock_irqrestore(&tx->lock, flags);
- return len;
-}
-
-static void gdm_sdio_irq(struct sdio_func *func)
-{
- struct phy_dev *phy_dev = sdio_get_drvdata(func);
- struct sdiowm_dev *sdev = phy_dev->priv_dev;
- struct tx_cxt *tx = &sdev->tx;
- struct rx_cxt *rx = &sdev->rx;
- struct sdio_rx *r;
- unsigned long flags;
- u8 val, hdr[TYPE_A_LOOKAHEAD_SIZE], *buf;
- u32 len, blocks, n;
- int ret, remain;
-
- /* Check interrupt */
- val = sdio_readb(func, 0x13, &ret);
- if (val & 0x01)
- sdio_writeb(func, 0x01, 0x13, &ret); /* clear interrupt */
- else
- return;
-
- ret = sdio_memcpy_fromio(func, hdr, 0x0, TYPE_A_LOOKAHEAD_SIZE);
- if (ret) {
- dev_err(&func->dev,
- "Cannot read from function %d\n", func->num);
- goto done;
- }
-
- len = (hdr[2] << 16) | (hdr[1] << 8) | hdr[0];
- if (len > (RX_BUF_SIZE - TYPE_A_HEADER_SIZE)) {
- dev_err(&func->dev, "Too big Type-A size: %d\n", len);
- goto done;
- }
-
- if (hdr[3] == 1) { /* Ack */
- u32 *ack_seq = (u32 *)&hdr[4];
-
- spin_lock_irqsave(&tx->lock, flags);
- tx->can_send = 1;
-
- if (!list_empty(&tx->sdu_list) || !list_empty(&tx->hci_list))
- schedule_work(&sdev->ws);
- spin_unlock_irqrestore(&tx->lock, flags);
- dev_dbg(&func->dev, "Ack... %0x\n", ntohl(*ack_seq));
- goto done;
- }
-
- memcpy(rx->rx_buf, hdr + TYPE_A_HEADER_SIZE,
- TYPE_A_LOOKAHEAD_SIZE - TYPE_A_HEADER_SIZE);
-
- buf = rx->rx_buf + TYPE_A_LOOKAHEAD_SIZE - TYPE_A_HEADER_SIZE;
- remain = len - TYPE_A_LOOKAHEAD_SIZE + TYPE_A_HEADER_SIZE;
- if (remain <= 0)
- goto end_io;
-
- blocks = remain / func->cur_blksize;
-
- if (blocks) {
- n = blocks * func->cur_blksize;
- ret = sdio_memcpy_fromio(func, buf, 0x0, n);
- if (ret) {
- dev_err(&func->dev,
- "Cannot read from function %d\n", func->num);
- goto done;
- }
- buf += n;
- remain -= n;
- }
-
- if (remain) {
- ret = sdio_memcpy_fromio(func, buf, 0x0, remain);
- if (ret) {
- dev_err(&func->dev,
- "Cannot read from function %d\n", func->num);
- goto done;
- }
- }
-
-end_io:
- dev_dbg(&func->dev, "sdio_receive: %*ph\n", len, rx->rx_buf);
-
- len = control_sdu_tx_flow(sdev, rx->rx_buf, len);
-
- spin_lock_irqsave(&rx->lock, flags);
-
- if (!list_empty(&rx->req_list)) {
- r = list_entry(rx->req_list.next, struct sdio_rx, list);
- spin_unlock_irqrestore(&rx->lock, flags);
- if (r->callback)
- r->callback(r->cb_data, rx->rx_buf, len);
- spin_lock_irqsave(&rx->lock, flags);
- list_del(&r->list);
- put_rx_struct(rx, r);
- }
-
- spin_unlock_irqrestore(&rx->lock, flags);
-
-done:
- sdio_writeb(func, 0x00, 0x10, &ret); /* PCRRT */
- if (!phy_dev->netdev)
- register_wimax_device(phy_dev, &func->dev);
-}
-
-static int gdm_sdio_receive(void *priv_dev,
- void (*cb)(void *cb_data, void *data, int len),
- void *cb_data)
-{
- struct sdiowm_dev *sdev = priv_dev;
- struct rx_cxt *rx = &sdev->rx;
- struct sdio_rx *r;
- unsigned long flags;
-
- spin_lock_irqsave(&rx->lock, flags);
- r = get_rx_struct(rx);
- if (!r) {
- spin_unlock_irqrestore(&rx->lock, flags);
- return -ENOMEM;
- }
-
- r->callback = cb;
- r->cb_data = cb_data;
-
- list_add_tail(&r->list, &rx->req_list);
- spin_unlock_irqrestore(&rx->lock, flags);
-
- return 0;
-}
-
-static int sdio_wimax_probe(struct sdio_func *func,
- const struct sdio_device_id *id)
-{
- int ret;
- struct phy_dev *phy_dev = NULL;
- struct sdiowm_dev *sdev = NULL;
-
- dev_info(&func->dev, "Found GDM SDIO VID = 0x%04x PID = 0x%04x...\n",
- func->vendor, func->device);
- dev_info(&func->dev, "GCT WiMax driver version %s\n", DRIVER_VERSION);
-
- sdio_claim_host(func);
- sdio_enable_func(func);
- sdio_claim_irq(func, gdm_sdio_irq);
-
- ret = sdio_boot(func);
- if (ret)
- return ret;
-
- phy_dev = kzalloc(sizeof(*phy_dev), GFP_KERNEL);
- if (!phy_dev) {
- ret = -ENOMEM;
- goto out;
- }
- sdev = kzalloc(sizeof(*sdev), GFP_KERNEL);
- if (!sdev) {
- ret = -ENOMEM;
- goto out;
- }
-
- phy_dev->priv_dev = (void *)sdev;
- phy_dev->send_func = gdm_sdio_send;
- phy_dev->rcv_func = gdm_sdio_receive;
-
- ret = init_sdio(sdev);
- if (ret < 0)
- goto out;
-
- sdev->func = func;
-
- sdio_writeb(func, 1, 0x14, &ret); /* Enable interrupt */
- sdio_release_host(func);
-
- INIT_WORK(&sdev->ws, do_tx);
-
- sdio_set_drvdata(func, phy_dev);
-out:
- if (ret) {
- kfree(phy_dev);
- kfree(sdev);
- }
-
- return ret;
-}
-
-static void sdio_wimax_remove(struct sdio_func *func)
-{
- struct phy_dev *phy_dev = sdio_get_drvdata(func);
- struct sdiowm_dev *sdev = phy_dev->priv_dev;
-
- cancel_work_sync(&sdev->ws);
- if (phy_dev->netdev)
- unregister_wimax_device(phy_dev);
- sdio_claim_host(func);
- sdio_release_irq(func);
- sdio_disable_func(func);
- sdio_release_host(func);
- release_sdio(sdev);
-
- kfree(sdev);
- kfree(phy_dev);
-}
-
-static const struct sdio_device_id sdio_wimax_ids[] = {
- { SDIO_DEVICE(0x0296, 0x5347) },
- {0}
-};
-
-MODULE_DEVICE_TABLE(sdio, sdio_wimax_ids);
-
-static struct sdio_driver sdio_wimax_driver = {
- .probe = sdio_wimax_probe,
- .remove = sdio_wimax_remove,
- .name = "sdio_wimax",
- .id_table = sdio_wimax_ids,
-};
-
-static int __init sdio_gdm_wimax_init(void)
-{
- return sdio_register_driver(&sdio_wimax_driver);
-}
-
-static void __exit sdio_gdm_wimax_exit(void)
-{
- sdio_unregister_driver(&sdio_wimax_driver);
-}
-
-module_init(sdio_gdm_wimax_init);
-module_exit(sdio_gdm_wimax_exit);
-
-MODULE_VERSION(DRIVER_VERSION);
-MODULE_DESCRIPTION("GCT WiMax SDIO Device Driver");
-MODULE_AUTHOR("Ethan Park");
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/gdm72xx/gdm_sdio.h b/drivers/staging/gdm72xx/gdm_sdio.h
deleted file mode 100644
index 77ad9d686f8e..000000000000
--- a/drivers/staging/gdm72xx/gdm_sdio.h
+++ /dev/null
@@ -1,63 +0,0 @@
-/*
- * Copyright (c) 2012 GCT Semiconductor, Inc. All rights reserved.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#ifndef __GDM72XX_GDM_SDIO_H__
-#define __GDM72XX_GDM_SDIO_H__
-
-#include <linux/types.h>
-#include <linux/time.h>
-
-#define MAX_NR_SDU_BUF 64
-
-struct sdio_tx {
- struct list_head list;
- struct tx_cxt *tx_cxt;
- u8 *buf;
- int len;
- void (*callback)(void *cb_data);
- void *cb_data;
-};
-
-struct tx_cxt {
- struct list_head free_list;
- struct list_head sdu_list;
- struct list_head hci_list;
- struct timeval sdu_stamp;
- u8 *sdu_buf;
- spinlock_t lock;
- int can_send;
- int stop_sdu_tx;
-};
-
-struct sdio_rx {
- struct list_head list;
- struct rx_cxt *rx_cxt;
- void (*callback)(void *cb_data, void *data, int len);
- void *cb_data;
-};
-
-struct rx_cxt {
- struct list_head free_list;
- struct list_head req_list;
- u8 *rx_buf;
- spinlock_t lock;
-};
-
-struct sdiowm_dev {
- struct sdio_func *func;
- struct tx_cxt tx;
- struct rx_cxt rx;
- struct work_struct ws;
-};
-
-#endif /* __GDM72XX_GDM_SDIO_H__ */
diff --git a/drivers/staging/gdm72xx/gdm_usb.c b/drivers/staging/gdm72xx/gdm_usb.c
deleted file mode 100644
index 16e497d9d0cf..000000000000
--- a/drivers/staging/gdm72xx/gdm_usb.c
+++ /dev/null
@@ -1,789 +0,0 @@
-/*
- * Copyright (c) 2012 GCT Semiconductor, Inc. All rights reserved.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/usb.h>
-#include <asm/byteorder.h>
-#include <linux/kthread.h>
-
-#include "gdm_usb.h"
-#include "gdm_wimax.h"
-#include "usb_boot.h"
-#include "hci.h"
-
-#include "usb_ids.h"
-
-MODULE_DEVICE_TABLE(usb, id_table);
-
-#define TX_BUF_SIZE 2048
-
-#if defined(CONFIG_WIMAX_GDM72XX_WIMAX2)
-#define RX_BUF_SIZE (128*1024) /* For packet aggregation */
-#else
-#define RX_BUF_SIZE 2048
-#endif
-
-#define GDM7205_PADDING 256
-
-#define DOWNLOAD_CONF_VALUE 0x21
-
-#ifdef CONFIG_WIMAX_GDM72XX_K_MODE
-
-static DECLARE_WAIT_QUEUE_HEAD(k_wait);
-static LIST_HEAD(k_list);
-static DEFINE_SPINLOCK(k_lock);
-static int k_mode_stop;
-
-#define K_WAIT_TIME (2 * HZ / 100)
-
-#endif /* CONFIG_WIMAX_GDM72XX_K_MODE */
-
-static struct usb_tx *alloc_tx_struct(struct tx_cxt *tx)
-{
- struct usb_tx *t = kzalloc(sizeof(*t), GFP_ATOMIC);
-
- if (!t)
- return NULL;
-
- t->urb = usb_alloc_urb(0, GFP_ATOMIC);
- t->buf = kmalloc(TX_BUF_SIZE, GFP_ATOMIC);
- if (!t->urb || !t->buf) {
- usb_free_urb(t->urb);
- kfree(t->buf);
- kfree(t);
- return NULL;
- }
-
- t->tx_cxt = tx;
-
- return t;
-}
-
-static void free_tx_struct(struct usb_tx *t)
-{
- if (t) {
- usb_free_urb(t->urb);
- kfree(t->buf);
- kfree(t);
- }
-}
-
-static struct usb_rx *alloc_rx_struct(struct rx_cxt *rx)
-{
- struct usb_rx *r = kzalloc(sizeof(*r), GFP_ATOMIC);
-
- if (!r)
- return NULL;
-
- r->urb = usb_alloc_urb(0, GFP_ATOMIC);
- r->buf = kmalloc(RX_BUF_SIZE, GFP_ATOMIC);
- if (!r->urb || !r->buf) {
- usb_free_urb(r->urb);
- kfree(r->buf);
- kfree(r);
- return NULL;
- }
-
- r->rx_cxt = rx;
- return r;
-}
-
-static void free_rx_struct(struct usb_rx *r)
-{
- if (r) {
- usb_free_urb(r->urb);
- kfree(r->buf);
- kfree(r);
- }
-}
-
-/* Before this function is called, spin lock should be locked. */
-static struct usb_tx *get_tx_struct(struct tx_cxt *tx, int *no_spc)
-{
- struct usb_tx *t;
-
- if (list_empty(&tx->free_list)) {
- *no_spc = 1;
- return NULL;
- }
-
- t = list_entry(tx->free_list.next, struct usb_tx, list);
- list_del(&t->list);
-
- *no_spc = list_empty(&tx->free_list) ? 1 : 0;
-
- return t;
-}
-
-/* Before this function is called, spin lock should be locked. */
-static void put_tx_struct(struct tx_cxt *tx, struct usb_tx *t)
-{
- list_add_tail(&t->list, &tx->free_list);
-}
-
-/* Before this function is called, spin lock should be locked. */
-static struct usb_rx *get_rx_struct(struct rx_cxt *rx)
-{
- struct usb_rx *r;
-
- if (list_empty(&rx->free_list)) {
- r = alloc_rx_struct(rx);
- if (!r)
- return NULL;
-
- list_add(&r->list, &rx->free_list);
- }
-
- r = list_entry(rx->free_list.next, struct usb_rx, list);
- list_move_tail(&r->list, &rx->used_list);
-
- return r;
-}
-
-/* Before this function is called, spin lock should be locked. */
-static void put_rx_struct(struct rx_cxt *rx, struct usb_rx *r)
-{
- list_move(&r->list, &rx->free_list);
-}
-
-static void release_usb(struct usbwm_dev *udev)
-{
- struct tx_cxt *tx = &udev->tx;
- struct rx_cxt *rx = &udev->rx;
- struct usb_tx *t, *t_next;
- struct usb_rx *r, *r_next;
- unsigned long flags;
-
- spin_lock_irqsave(&tx->lock, flags);
-
- list_for_each_entry_safe(t, t_next, &tx->sdu_list, list) {
- list_del(&t->list);
- free_tx_struct(t);
- }
-
- list_for_each_entry_safe(t, t_next, &tx->hci_list, list) {
- list_del(&t->list);
- free_tx_struct(t);
- }
-
- list_for_each_entry_safe(t, t_next, &tx->free_list, list) {
- list_del(&t->list);
- free_tx_struct(t);
- }
-
- spin_unlock_irqrestore(&tx->lock, flags);
-
- spin_lock_irqsave(&rx->lock, flags);
-
- list_for_each_entry_safe(r, r_next, &rx->free_list, list) {
- list_del(&r->list);
- free_rx_struct(r);
- }
-
- list_for_each_entry_safe(r, r_next, &rx->used_list, list) {
- list_del(&r->list);
- free_rx_struct(r);
- }
-
- spin_unlock_irqrestore(&rx->lock, flags);
-}
-
-static int init_usb(struct usbwm_dev *udev)
-{
- int ret = 0, i;
- struct tx_cxt *tx = &udev->tx;
- struct rx_cxt *rx = &udev->rx;
- struct usb_tx *t;
- struct usb_rx *r;
- unsigned long flags;
-
- INIT_LIST_HEAD(&tx->free_list);
- INIT_LIST_HEAD(&tx->sdu_list);
- INIT_LIST_HEAD(&tx->hci_list);
-#if defined(CONFIG_WIMAX_GDM72XX_USB_PM) || defined(CONFIG_WIMAX_GDM72XX_K_MODE)
- INIT_LIST_HEAD(&tx->pending_list);
-#endif
-
- INIT_LIST_HEAD(&rx->free_list);
- INIT_LIST_HEAD(&rx->used_list);
-
- spin_lock_init(&tx->lock);
- spin_lock_init(&rx->lock);
-
- spin_lock_irqsave(&tx->lock, flags);
- for (i = 0; i < MAX_NR_SDU_BUF; i++) {
- t = alloc_tx_struct(tx);
- if (!t) {
- spin_unlock_irqrestore(&tx->lock, flags);
- ret = -ENOMEM;
- goto fail;
- }
- list_add(&t->list, &tx->free_list);
- }
- spin_unlock_irqrestore(&tx->lock, flags);
-
- r = alloc_rx_struct(rx);
- if (!r) {
- ret = -ENOMEM;
- goto fail;
- }
-
- spin_lock_irqsave(&rx->lock, flags);
- list_add(&r->list, &rx->free_list);
- spin_unlock_irqrestore(&rx->lock, flags);
- return ret;
-
-fail:
- release_usb(udev);
- return ret;
-}
-
-static void __gdm_usb_send_complete(struct urb *urb)
-{
- struct usb_tx *t = urb->context;
- struct tx_cxt *tx = t->tx_cxt;
- u8 *pkt = t->buf;
- u16 cmd_evt;
-
- /* Completion by usb_unlink_urb */
- if (urb->status == -ECONNRESET)
- return;
-
- if (t->callback)
- t->callback(t->cb_data);
-
- /* Delete from sdu list or hci list. */
- list_del(&t->list);
-
- cmd_evt = (pkt[0] << 8) | pkt[1];
- if (cmd_evt == WIMAX_TX_SDU)
- put_tx_struct(tx, t);
- else
- free_tx_struct(t);
-}
-
-static void gdm_usb_send_complete(struct urb *urb)
-{
- struct usb_tx *t = urb->context;
- struct tx_cxt *tx = t->tx_cxt;
- unsigned long flags;
-
- spin_lock_irqsave(&tx->lock, flags);
- __gdm_usb_send_complete(urb);
- spin_unlock_irqrestore(&tx->lock, flags);
-}
-
-static int gdm_usb_send(void *priv_dev, void *data, int len,
- void (*cb)(void *data), void *cb_data)
-{
- struct usbwm_dev *udev = priv_dev;
- struct usb_device *usbdev = udev->usbdev;
- struct tx_cxt *tx = &udev->tx;
- struct usb_tx *t;
- int padding = udev->padding;
- int no_spc = 0, ret;
- u8 *pkt = data;
- u16 cmd_evt;
- unsigned long flags;
-#ifdef CONFIG_WIMAX_GDM72XX_K_MODE
- unsigned long flags2;
-#endif /* CONFIG_WIMAX_GDM72XX_K_MODE */
-
- if (!udev->usbdev) {
- dev_err(&usbdev->dev, "%s: No such device\n", __func__);
- return -ENODEV;
- }
-
- if (len > TX_BUF_SIZE - padding - 1)
- return -EINVAL;
-
- spin_lock_irqsave(&tx->lock, flags);
-
- cmd_evt = (pkt[0] << 8) | pkt[1];
- if (cmd_evt == WIMAX_TX_SDU) {
- t = get_tx_struct(tx, &no_spc);
- if (!t) {
- /* This case must not happen. */
- spin_unlock_irqrestore(&tx->lock, flags);
- return -ENOSPC;
- }
- list_add_tail(&t->list, &tx->sdu_list);
- } else {
- t = alloc_tx_struct(tx);
- if (!t) {
- spin_unlock_irqrestore(&tx->lock, flags);
- return -ENOMEM;
- }
- list_add_tail(&t->list, &tx->hci_list);
- }
-
- memcpy(t->buf + padding, data, len);
- t->callback = cb;
- t->cb_data = cb_data;
-
- /* In some cases, USB Module of WiMax is blocked when data size is
- * the multiple of 512. So, increment length by one in that case.
- */
- if ((len % 512) == 0)
- len++;
-
- usb_fill_bulk_urb(t->urb, usbdev, usb_sndbulkpipe(usbdev, 1), t->buf,
- len + padding, gdm_usb_send_complete, t);
-
- dev_dbg(&usbdev->dev, "usb_send: %*ph\n", len + padding, t->buf);
-
-#ifdef CONFIG_WIMAX_GDM72XX_USB_PM
- if (usbdev->state & USB_STATE_SUSPENDED) {
- list_add_tail(&t->p_list, &tx->pending_list);
- schedule_work(&udev->pm_ws);
- goto out;
- }
-#endif /* CONFIG_WIMAX_GDM72XX_USB_PM */
-
-#ifdef CONFIG_WIMAX_GDM72XX_K_MODE
- if (udev->bw_switch) {
- list_add_tail(&t->p_list, &tx->pending_list);
- goto out;
- } else if (cmd_evt == WIMAX_SCAN) {
- struct rx_cxt *rx;
- struct usb_rx *r;
-
- rx = &udev->rx;
-
- spin_lock_irqsave(&rx->lock, flags2);
- list_for_each_entry(r, &rx->used_list, list)
- usb_unlink_urb(r->urb);
- spin_unlock_irqrestore(&rx->lock, flags2);
-
- udev->bw_switch = 1;
-
- spin_lock_irqsave(&k_lock, flags2);
- list_add_tail(&udev->list, &k_list);
- spin_unlock_irqrestore(&k_lock, flags2);
-
- wake_up(&k_wait);
- }
-#endif /* CONFIG_WIMAX_GDM72XX_K_MODE */
-
- ret = usb_submit_urb(t->urb, GFP_ATOMIC);
- if (ret)
- goto send_fail;
-
-#ifdef CONFIG_WIMAX_GDM72XX_USB_PM
- usb_mark_last_busy(usbdev);
-#endif /* CONFIG_WIMAX_GDM72XX_USB_PM */
-
-#if defined(CONFIG_WIMAX_GDM72XX_USB_PM) || defined(CONFIG_WIMAX_GDM72XX_K_MODE)
-out:
-#endif
- spin_unlock_irqrestore(&tx->lock, flags);
-
- if (no_spc)
- return -ENOSPC;
-
- return 0;
-
-send_fail:
- t->callback = NULL;
- __gdm_usb_send_complete(t->urb);
- spin_unlock_irqrestore(&tx->lock, flags);
- return ret;
-}
-
-static void gdm_usb_rcv_complete(struct urb *urb)
-{
- struct usb_rx *r = urb->context;
- struct rx_cxt *rx = r->rx_cxt;
- struct usbwm_dev *udev = container_of(r->rx_cxt, struct usbwm_dev, rx);
- struct tx_cxt *tx = &udev->tx;
- struct usb_tx *t;
- u16 cmd_evt;
- unsigned long flags, flags2;
- struct usb_device *dev = urb->dev;
-
- /* Completion by usb_unlink_urb */
- if (urb->status == -ECONNRESET)
- return;
-
- spin_lock_irqsave(&tx->lock, flags);
-
- if (!urb->status) {
- cmd_evt = (r->buf[0] << 8) | (r->buf[1]);
-
- dev_dbg(&dev->dev, "usb_receive: %*ph\n", urb->actual_length,
- r->buf);
-
- if (cmd_evt == WIMAX_SDU_TX_FLOW) {
- if (r->buf[4] == 0) {
- dev_dbg(&dev->dev, "WIMAX ==> STOP SDU TX\n");
- list_for_each_entry(t, &tx->sdu_list, list)
- usb_unlink_urb(t->urb);
- } else if (r->buf[4] == 1) {
- dev_dbg(&dev->dev, "WIMAX ==> START SDU TX\n");
- list_for_each_entry(t, &tx->sdu_list, list) {
- usb_submit_urb(t->urb, GFP_ATOMIC);
- }
- /* If free buffer for sdu tx doesn't
- * exist, then tx queue should not be
- * woken. For this reason, don't pass
- * the command, START_SDU_TX.
- */
- if (list_empty(&tx->free_list))
- urb->actual_length = 0;
- }
- }
- }
-
- if (!urb->status && r->callback)
- r->callback(r->cb_data, r->buf, urb->actual_length);
-
- spin_lock_irqsave(&rx->lock, flags2);
- put_rx_struct(rx, r);
- spin_unlock_irqrestore(&rx->lock, flags2);
-
- spin_unlock_irqrestore(&tx->lock, flags);
-
-#ifdef CONFIG_WIMAX_GDM72XX_USB_PM
- usb_mark_last_busy(dev);
-#endif
-}
-
-static int gdm_usb_receive(void *priv_dev,
- void (*cb)(void *cb_data, void *data, int len),
- void *cb_data)
-{
- struct usbwm_dev *udev = priv_dev;
- struct usb_device *usbdev = udev->usbdev;
- struct rx_cxt *rx = &udev->rx;
- struct usb_rx *r;
- unsigned long flags;
-
- if (!udev->usbdev) {
- dev_err(&usbdev->dev, "%s: No such device\n", __func__);
- return -ENODEV;
- }
-
- spin_lock_irqsave(&rx->lock, flags);
- r = get_rx_struct(rx);
- spin_unlock_irqrestore(&rx->lock, flags);
-
- if (!r)
- return -ENOMEM;
-
- r->callback = cb;
- r->cb_data = cb_data;
-
- usb_fill_bulk_urb(r->urb, usbdev, usb_rcvbulkpipe(usbdev, 0x82), r->buf,
- RX_BUF_SIZE, gdm_usb_rcv_complete, r);
-
- return usb_submit_urb(r->urb, GFP_ATOMIC);
-}
-
-#ifdef CONFIG_WIMAX_GDM72XX_USB_PM
-static void do_pm_control(struct work_struct *work)
-{
- struct usbwm_dev *udev = container_of(work, struct usbwm_dev, pm_ws);
- struct tx_cxt *tx = &udev->tx;
- int ret;
- unsigned long flags;
-
- ret = usb_autopm_get_interface(udev->intf);
- if (!ret)
- usb_autopm_put_interface(udev->intf);
-
- spin_lock_irqsave(&tx->lock, flags);
- if (!(udev->usbdev->state & USB_STATE_SUSPENDED) &&
- (!list_empty(&tx->hci_list) || !list_empty(&tx->sdu_list))) {
- struct usb_tx *t, *temp;
-
- list_for_each_entry_safe(t, temp, &tx->pending_list, p_list) {
- list_del(&t->p_list);
- ret = usb_submit_urb(t->urb, GFP_ATOMIC);
-
- if (ret) {
- t->callback = NULL;
- __gdm_usb_send_complete(t->urb);
- }
- }
- }
- spin_unlock_irqrestore(&tx->lock, flags);
-}
-#endif /* CONFIG_WIMAX_GDM72XX_USB_PM */
-
-static int gdm_usb_probe(struct usb_interface *intf,
- const struct usb_device_id *id)
-{
- int ret = 0;
- u8 bConfigurationValue;
- struct phy_dev *phy_dev = NULL;
- struct usbwm_dev *udev = NULL;
- u16 idVendor, idProduct, bcdDevice;
-
- struct usb_device *usbdev = interface_to_usbdev(intf);
-
- usb_get_dev(usbdev);
- bConfigurationValue = usbdev->actconfig->desc.bConfigurationValue;
-
- /*USB description is set up with Little-Endian*/
- idVendor = le16_to_cpu(usbdev->descriptor.idVendor);
- idProduct = le16_to_cpu(usbdev->descriptor.idProduct);
- bcdDevice = le16_to_cpu(usbdev->descriptor.bcdDevice);
-
- dev_info(&intf->dev, "Found GDM USB VID = 0x%04x PID = 0x%04x...\n",
- idVendor, idProduct);
- dev_info(&intf->dev, "GCT WiMax driver version %s\n", DRIVER_VERSION);
-
-
- if (idProduct == EMERGENCY_PID) {
- ret = usb_emergency(usbdev);
- goto out;
- }
-
- /* Support for EEPROM bootloader */
- if (bConfigurationValue == DOWNLOAD_CONF_VALUE ||
- idProduct & B_DOWNLOAD) {
- ret = usb_boot(usbdev, bcdDevice);
- goto out;
- }
-
- phy_dev = kzalloc(sizeof(*phy_dev), GFP_KERNEL);
- if (!phy_dev) {
- ret = -ENOMEM;
- goto out;
- }
- udev = kzalloc(sizeof(*udev), GFP_KERNEL);
- if (!udev) {
- ret = -ENOMEM;
- goto out;
- }
-
- if (idProduct == 0x7205 || idProduct == 0x7206)
- udev->padding = GDM7205_PADDING;
- else
- udev->padding = 0;
-
- phy_dev->priv_dev = (void *)udev;
- phy_dev->send_func = gdm_usb_send;
- phy_dev->rcv_func = gdm_usb_receive;
-
- ret = init_usb(udev);
- if (ret < 0)
- goto out;
-
- udev->usbdev = usbdev;
-
-#ifdef CONFIG_WIMAX_GDM72XX_USB_PM
- udev->intf = intf;
-
- intf->needs_remote_wakeup = 1;
- device_init_wakeup(&intf->dev, 1);
-
- pm_runtime_set_autosuspend_delay(&usbdev->dev, 10 * 1000); /* msec */
-
- INIT_WORK(&udev->pm_ws, do_pm_control);
-#endif /* CONFIG_WIMAX_GDM72XX_USB_PM */
-
- ret = register_wimax_device(phy_dev, &intf->dev);
- if (ret)
- release_usb(udev);
-
-out:
- if (ret) {
- kfree(phy_dev);
- kfree(udev);
- usb_put_dev(usbdev);
- } else {
- usb_set_intfdata(intf, phy_dev);
- }
- return ret;
-}
-
-static void gdm_usb_disconnect(struct usb_interface *intf)
-{
- u8 bConfigurationValue;
- struct phy_dev *phy_dev;
- struct usbwm_dev *udev;
- u16 idProduct;
- struct usb_device *usbdev = interface_to_usbdev(intf);
-
- bConfigurationValue = usbdev->actconfig->desc.bConfigurationValue;
- phy_dev = usb_get_intfdata(intf);
-
- /*USB description is set up with Little-Endian*/
- idProduct = le16_to_cpu(usbdev->descriptor.idProduct);
-
- if (idProduct != EMERGENCY_PID &&
- bConfigurationValue != DOWNLOAD_CONF_VALUE &&
- (idProduct & B_DOWNLOAD) == 0) {
- udev = phy_dev->priv_dev;
- udev->usbdev = NULL;
-
- unregister_wimax_device(phy_dev);
- release_usb(udev);
- kfree(udev);
- kfree(phy_dev);
- }
-
- usb_put_dev(usbdev);
-}
-
-#ifdef CONFIG_WIMAX_GDM72XX_USB_PM
-static int gdm_suspend(struct usb_interface *intf, pm_message_t pm_msg)
-{
- struct phy_dev *phy_dev;
- struct usbwm_dev *udev;
- struct rx_cxt *rx;
- struct usb_rx *r;
- unsigned long flags;
-
- phy_dev = usb_get_intfdata(intf);
- if (!phy_dev)
- return 0;
-
- udev = phy_dev->priv_dev;
- rx = &udev->rx;
-
- spin_lock_irqsave(&rx->lock, flags);
-
- list_for_each_entry(r, &rx->used_list, list)
- usb_unlink_urb(r->urb);
-
- spin_unlock_irqrestore(&rx->lock, flags);
-
- return 0;
-}
-
-static int gdm_resume(struct usb_interface *intf)
-{
- struct phy_dev *phy_dev;
- struct usbwm_dev *udev;
- struct rx_cxt *rx;
- struct usb_rx *r;
- unsigned long flags;
-
- phy_dev = usb_get_intfdata(intf);
- if (!phy_dev)
- return 0;
-
- udev = phy_dev->priv_dev;
- rx = &udev->rx;
-
- spin_lock_irqsave(&rx->lock, flags);
-
- list_for_each_entry(r, &rx->used_list, list)
- usb_submit_urb(r->urb, GFP_ATOMIC);
-
- spin_unlock_irqrestore(&rx->lock, flags);
-
- return 0;
-}
-
-#endif /* CONFIG_WIMAX_GDM72XX_USB_PM */
-
-#ifdef CONFIG_WIMAX_GDM72XX_K_MODE
-static int k_mode_thread(void *arg)
-{
- struct usbwm_dev *udev;
- struct tx_cxt *tx;
- struct rx_cxt *rx;
- struct usb_tx *t, *temp;
- struct usb_rx *r;
- unsigned long flags, flags2, expire;
- int ret;
-
- while (!k_mode_stop) {
- spin_lock_irqsave(&k_lock, flags2);
- while (!list_empty(&k_list)) {
- udev = list_entry(k_list.next, struct usbwm_dev, list);
- tx = &udev->tx;
- rx = &udev->rx;
-
- list_del(&udev->list);
- spin_unlock_irqrestore(&k_lock, flags2);
-
- expire = jiffies + K_WAIT_TIME;
- while (time_before(jiffies, expire))
- schedule_timeout(K_WAIT_TIME);
-
- spin_lock_irqsave(&rx->lock, flags);
-
- list_for_each_entry(r, &rx->used_list, list)
- usb_submit_urb(r->urb, GFP_ATOMIC);
-
- spin_unlock_irqrestore(&rx->lock, flags);
-
- spin_lock_irqsave(&tx->lock, flags);
-
- list_for_each_entry_safe(t, temp, &tx->pending_list,
- p_list) {
- list_del(&t->p_list);
- ret = usb_submit_urb(t->urb, GFP_ATOMIC);
-
- if (ret) {
- t->callback = NULL;
- __gdm_usb_send_complete(t->urb);
- }
- }
-
- udev->bw_switch = 0;
- spin_unlock_irqrestore(&tx->lock, flags);
-
- spin_lock_irqsave(&k_lock, flags2);
- }
- wait_event_interruptible_lock_irq(k_wait,
- !list_empty(&k_list) ||
- k_mode_stop, k_lock);
- spin_unlock_irqrestore(&k_lock, flags2);
- }
- return 0;
-}
-#endif /* CONFIG_WIMAX_GDM72XX_K_MODE */
-
-static struct usb_driver gdm_usb_driver = {
- .name = "gdm_wimax",
- .probe = gdm_usb_probe,
- .disconnect = gdm_usb_disconnect,
- .id_table = id_table,
-#ifdef CONFIG_WIMAX_GDM72XX_USB_PM
- .supports_autosuspend = 1,
- .suspend = gdm_suspend,
- .resume = gdm_resume,
- .reset_resume = gdm_resume,
-#endif
-};
-
-static int __init usb_gdm_wimax_init(void)
-{
-#ifdef CONFIG_WIMAX_GDM72XX_K_MODE
- kthread_run(k_mode_thread, NULL, "k_mode_wimax");
-#endif /* CONFIG_WIMAX_GDM72XX_K_MODE */
- return usb_register(&gdm_usb_driver);
-}
-
-static void __exit usb_gdm_wimax_exit(void)
-{
-#ifdef CONFIG_WIMAX_GDM72XX_K_MODE
- k_mode_stop = 1;
- wake_up(&k_wait);
-#endif
- usb_deregister(&gdm_usb_driver);
-}
-
-module_init(usb_gdm_wimax_init);
-module_exit(usb_gdm_wimax_exit);
-
-MODULE_VERSION(DRIVER_VERSION);
-MODULE_DESCRIPTION("GCT WiMax Device Driver");
-MODULE_AUTHOR("Ethan Park");
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/gdm72xx/gdm_usb.h b/drivers/staging/gdm72xx/gdm_usb.h
deleted file mode 100644
index 8e58a25e7143..000000000000
--- a/drivers/staging/gdm72xx/gdm_usb.h
+++ /dev/null
@@ -1,78 +0,0 @@
-/*
- * Copyright (c) 2012 GCT Semiconductor, Inc. All rights reserved.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#ifndef __GDM72XX_GDM_USB_H__
-#define __GDM72XX_GDM_USB_H__
-
-#include <linux/types.h>
-#include <linux/usb.h>
-#include <linux/list.h>
-
-#define B_DIFF_DL_DRV (1 << 4)
-#define B_DOWNLOAD (1 << 5)
-#define MAX_NR_SDU_BUF 64
-
-struct usb_tx {
- struct list_head list;
-#if defined(CONFIG_WIMAX_GDM72XX_USB_PM) || defined(CONFIG_WIMAX_GDM72XX_K_MODE)
- struct list_head p_list;
-#endif
- struct tx_cxt *tx_cxt;
- struct urb *urb;
- u8 *buf;
- void (*callback)(void *cb_data);
- void *cb_data;
-};
-
-struct tx_cxt {
- struct list_head free_list;
- struct list_head sdu_list;
- struct list_head hci_list;
-#if defined(CONFIG_WIMAX_GDM72XX_USB_PM) || defined(CONFIG_WIMAX_GDM72XX_K_MODE)
- struct list_head pending_list;
-#endif
- spinlock_t lock;
-};
-
-struct usb_rx {
- struct list_head list;
- struct rx_cxt *rx_cxt;
- struct urb *urb;
- u8 *buf;
- void (*callback)(void *cb_data, void *data, int len);
- void *cb_data;
-};
-
-struct rx_cxt {
- struct list_head free_list;
- struct list_head used_list;
- spinlock_t lock;
-};
-
-struct usbwm_dev {
- struct usb_device *usbdev;
-#ifdef CONFIG_WIMAX_GDM72XX_USB_PM
- struct work_struct pm_ws;
-
- struct usb_interface *intf;
-#endif
-#ifdef CONFIG_WIMAX_GDM72XX_K_MODE
- int bw_switch;
- struct list_head list;
-#endif
- struct tx_cxt tx;
- struct rx_cxt rx;
- int padding;
-};
-
-#endif /* __GDM72XX_GDM_USB_H__ */
diff --git a/drivers/staging/gdm72xx/gdm_wimax.c b/drivers/staging/gdm72xx/gdm_wimax.c
deleted file mode 100644
index 6e8dbaf35445..000000000000
--- a/drivers/staging/gdm72xx/gdm_wimax.c
+++ /dev/null
@@ -1,816 +0,0 @@
-/*
- * Copyright (c) 2012 GCT Semiconductor, Inc. All rights reserved.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/etherdevice.h>
-#include <asm/byteorder.h>
-#include <linux/ip.h>
-#include <linux/ipv6.h>
-#include <linux/udp.h>
-#include <linux/in.h>
-
-#include "gdm_wimax.h"
-#include "hci.h"
-#include "wm_ioctl.h"
-#include "netlink_k.h"
-
-#define gdm_wimax_send(n, d, l) \
- (n->phy_dev->send_func)(n->phy_dev->priv_dev, d, l, NULL, NULL)
-#define gdm_wimax_send_with_cb(n, d, l, c, b) \
- (n->phy_dev->send_func)(n->phy_dev->priv_dev, d, l, c, b)
-#define gdm_wimax_rcv_with_cb(n, c, b) \
- (n->phy_dev->rcv_func)(n->phy_dev->priv_dev, c, b)
-
-#define EVT_MAX_SIZE 2048
-
-struct evt_entry {
- struct list_head list;
- struct net_device *dev;
- char evt_data[EVT_MAX_SIZE];
- int size;
-};
-
-static struct {
- int ref_cnt;
- struct sock *sock;
- struct list_head evtq;
- spinlock_t evt_lock;
- struct list_head freeq;
- struct work_struct ws;
-} wm_event;
-
-static u8 gdm_wimax_macaddr[6] = {0x00, 0x0a, 0x3b, 0xf0, 0x01, 0x30};
-
-static inline int gdm_wimax_header(struct sk_buff **pskb)
-{
- u16 buf[HCI_HEADER_SIZE / sizeof(u16)];
- struct hci_s *hci = (struct hci_s *)buf;
- struct sk_buff *skb = *pskb;
-
- if (unlikely(skb_headroom(skb) < HCI_HEADER_SIZE)) {
- struct sk_buff *skb2;
-
- skb2 = skb_realloc_headroom(skb, HCI_HEADER_SIZE);
- if (!skb2)
- return -ENOMEM;
- if (skb->sk)
- skb_set_owner_w(skb2, skb->sk);
- kfree_skb(skb);
- skb = skb2;
- }
-
- skb_push(skb, HCI_HEADER_SIZE);
- hci->cmd_evt = cpu_to_be16(WIMAX_TX_SDU);
- hci->length = cpu_to_be16(skb->len - HCI_HEADER_SIZE);
- memcpy(skb->data, buf, HCI_HEADER_SIZE);
-
- *pskb = skb;
- return 0;
-}
-
-static inline struct evt_entry *alloc_event_entry(void)
-{
- return kmalloc(sizeof(struct evt_entry), GFP_ATOMIC);
-}
-
-static inline void free_event_entry(struct evt_entry *e)
-{
- kfree(e);
-}
-
-static struct evt_entry *get_event_entry(void)
-{
- struct evt_entry *e;
-
- if (list_empty(&wm_event.freeq)) {
- e = alloc_event_entry();
- } else {
- e = list_entry(wm_event.freeq.next, struct evt_entry, list);
- list_del(&e->list);
- }
-
- return e;
-}
-
-static void put_event_entry(struct evt_entry *e)
-{
- BUG_ON(!e);
-
- list_add_tail(&e->list, &wm_event.freeq);
-}
-
-static void gdm_wimax_event_rcv(struct net_device *dev, u16 type, void *msg,
- int len)
-{
- struct nic *nic = netdev_priv(dev);
-
- u8 *buf = msg;
- u16 hci_cmd = (buf[0]<<8) | buf[1];
- u16 hci_len = (buf[2]<<8) | buf[3];
-
- netdev_dbg(dev, "H=>D: 0x%04x(%d)\n", hci_cmd, hci_len);
-
- gdm_wimax_send(nic, msg, len);
-}
-
-static void __gdm_wimax_event_send(struct work_struct *work)
-{
- int idx;
- unsigned long flags;
- struct evt_entry *e;
- struct evt_entry *tmp;
-
- spin_lock_irqsave(&wm_event.evt_lock, flags);
-
- list_for_each_entry_safe(e, tmp, &wm_event.evtq, list) {
- spin_unlock_irqrestore(&wm_event.evt_lock, flags);
-
- if (sscanf(e->dev->name, "wm%d", &idx) == 1)
- netlink_send(wm_event.sock, idx, 0, e->evt_data,
- e->size);
-
- spin_lock_irqsave(&wm_event.evt_lock, flags);
- list_del(&e->list);
- put_event_entry(e);
- }
-
- spin_unlock_irqrestore(&wm_event.evt_lock, flags);
-}
-
-static int gdm_wimax_event_init(void)
-{
- if (!wm_event.ref_cnt) {
- wm_event.sock = netlink_init(NETLINK_WIMAX,
- gdm_wimax_event_rcv);
- if (wm_event.sock) {
- INIT_LIST_HEAD(&wm_event.evtq);
- INIT_LIST_HEAD(&wm_event.freeq);
- INIT_WORK(&wm_event.ws, __gdm_wimax_event_send);
- spin_lock_init(&wm_event.evt_lock);
- }
- }
-
- if (wm_event.sock) {
- wm_event.ref_cnt++;
- return 0;
- }
-
- pr_err("Creating WiMax Event netlink is failed\n");
- return -1;
-}
-
-static void gdm_wimax_event_exit(void)
-{
- if (wm_event.sock && --wm_event.ref_cnt == 0) {
- struct evt_entry *e, *temp;
- unsigned long flags;
-
- spin_lock_irqsave(&wm_event.evt_lock, flags);
-
- list_for_each_entry_safe(e, temp, &wm_event.evtq, list) {
- list_del(&e->list);
- free_event_entry(e);
- }
- list_for_each_entry_safe(e, temp, &wm_event.freeq, list) {
- list_del(&e->list);
- free_event_entry(e);
- }
-
- spin_unlock_irqrestore(&wm_event.evt_lock, flags);
- netlink_exit(wm_event.sock);
- wm_event.sock = NULL;
- }
-}
-
-static int gdm_wimax_event_send(struct net_device *dev, char *buf, int size)
-{
- struct evt_entry *e;
- unsigned long flags;
-
- u16 hci_cmd = ((u8)buf[0]<<8) | (u8)buf[1];
- u16 hci_len = ((u8)buf[2]<<8) | (u8)buf[3];
-
- netdev_dbg(dev, "D=>H: 0x%04x(%d)\n", hci_cmd, hci_len);
-
- spin_lock_irqsave(&wm_event.evt_lock, flags);
-
- e = get_event_entry();
- if (!e) {
- netdev_err(dev, "%s: No memory for event\n", __func__);
- spin_unlock_irqrestore(&wm_event.evt_lock, flags);
- return -ENOMEM;
- }
-
- e->dev = dev;
- e->size = size;
- memcpy(e->evt_data, buf, size);
-
- list_add_tail(&e->list, &wm_event.evtq);
- spin_unlock_irqrestore(&wm_event.evt_lock, flags);
-
- schedule_work(&wm_event.ws);
-
- return 0;
-}
-
-static void tx_complete(void *arg)
-{
- struct nic *nic = arg;
-
- if (netif_queue_stopped(nic->netdev))
- netif_wake_queue(nic->netdev);
-}
-
-int gdm_wimax_send_tx(struct sk_buff *skb, struct net_device *dev)
-{
- int ret = 0;
- struct nic *nic = netdev_priv(dev);
-
- ret = gdm_wimax_send_with_cb(nic, skb->data, skb->len, tx_complete,
- nic);
- if (ret == -ENOSPC) {
- netif_stop_queue(dev);
- ret = 0;
- }
-
- if (ret) {
- skb_pull(skb, HCI_HEADER_SIZE);
- return ret;
- }
-
- dev->stats.tx_packets++;
- dev->stats.tx_bytes += skb->len - HCI_HEADER_SIZE;
- kfree_skb(skb);
- return ret;
-}
-
-static int gdm_wimax_tx(struct sk_buff *skb, struct net_device *dev)
-{
- int ret = 0;
-
- ret = gdm_wimax_header(&skb);
- if (ret < 0) {
- skb_pull(skb, HCI_HEADER_SIZE);
- return ret;
- }
-
-#if defined(CONFIG_WIMAX_GDM72XX_QOS)
- ret = gdm_qos_send_hci_pkt(skb, dev);
-#else
- ret = gdm_wimax_send_tx(skb, dev);
-#endif
- return ret;
-}
-
-static int gdm_wimax_set_config(struct net_device *dev, struct ifmap *map)
-{
- if (dev->flags & IFF_UP)
- return -EBUSY;
-
- return 0;
-}
-
-static void __gdm_wimax_set_mac_addr(struct net_device *dev, char *mac_addr)
-{
- u16 hci_pkt_buf[32 / sizeof(u16)];
- struct hci_s *hci = (struct hci_s *)hci_pkt_buf;
- struct nic *nic = netdev_priv(dev);
-
- /* Since dev is registered as a ethernet device,
- * ether_setup has made dev->addr_len to be ETH_ALEN
- */
- memcpy(dev->dev_addr, mac_addr, dev->addr_len);
-
- /* Let lower layer know of this change by sending
- * SetInformation(MAC Address)
- */
- hci->cmd_evt = cpu_to_be16(WIMAX_SET_INFO);
- hci->length = cpu_to_be16(8);
- hci->data[0] = 0; /* T */
- hci->data[1] = 6; /* L */
- memcpy(&hci->data[2], mac_addr, dev->addr_len); /* V */
-
- gdm_wimax_send(nic, hci, HCI_HEADER_SIZE + 8);
-}
-
-/* A driver function */
-static int gdm_wimax_set_mac_addr(struct net_device *dev, void *p)
-{
- struct sockaddr *addr = p;
-
- if (netif_running(dev))
- return -EBUSY;
-
- if (!is_valid_ether_addr(addr->sa_data))
- return -EADDRNOTAVAIL;
-
- __gdm_wimax_set_mac_addr(dev, addr->sa_data);
-
- return 0;
-}
-
-static void gdm_wimax_ind_if_updown(struct net_device *dev, int if_up)
-{
- u16 buf[32 / sizeof(u16)];
- struct hci_s *hci = (struct hci_s *)buf;
- unsigned char up_down;
-
- up_down = if_up ? WIMAX_IF_UP : WIMAX_IF_DOWN;
-
- /* Indicate updating fsm */
- hci->cmd_evt = cpu_to_be16(WIMAX_IF_UPDOWN);
- hci->length = cpu_to_be16(sizeof(up_down));
- hci->data[0] = up_down;
-
- gdm_wimax_event_send(dev, (char *)hci, HCI_HEADER_SIZE+sizeof(up_down));
-}
-
-static int gdm_wimax_open(struct net_device *dev)
-{
- struct nic *nic = netdev_priv(dev);
- struct fsm_s *fsm = (struct fsm_s *)nic->sdk_data[SIOC_DATA_FSM].buf;
-
- netif_start_queue(dev);
-
- if (fsm && fsm->m_status != M_INIT)
- gdm_wimax_ind_if_updown(dev, 1);
- return 0;
-}
-
-static int gdm_wimax_close(struct net_device *dev)
-{
- struct nic *nic = netdev_priv(dev);
- struct fsm_s *fsm = (struct fsm_s *)nic->sdk_data[SIOC_DATA_FSM].buf;
-
- netif_stop_queue(dev);
-
- if (fsm && fsm->m_status != M_INIT)
- gdm_wimax_ind_if_updown(dev, 0);
- return 0;
-}
-
-static void kdelete(void **buf)
-{
- if (buf && *buf) {
- kfree(*buf);
- *buf = NULL;
- }
-}
-
-static int gdm_wimax_ioctl_get_data(struct data_s *dst, struct data_s *src)
-{
- int size;
-
- size = dst->size < src->size ? dst->size : src->size;
-
- dst->size = size;
- if (src->size) {
- if (!dst->buf)
- return -EINVAL;
- if (copy_to_user((void __user *)dst->buf, src->buf, size))
- return -EFAULT;
- }
- return 0;
-}
-
-static int gdm_wimax_ioctl_set_data(struct data_s *dst, struct data_s *src)
-{
- if (!src->size) {
- dst->size = 0;
- return 0;
- }
-
- if (!src->buf)
- return -EINVAL;
-
- if (!(dst->buf && dst->size == src->size)) {
- kdelete(&dst->buf);
- dst->buf = kmalloc(src->size, GFP_KERNEL);
- if (!dst->buf)
- return -ENOMEM;
- }
-
- if (copy_from_user(dst->buf, (void __user *)src->buf, src->size)) {
- kdelete(&dst->buf);
- return -EFAULT;
- }
- dst->size = src->size;
- return 0;
-}
-
-static void gdm_wimax_cleanup_ioctl(struct net_device *dev)
-{
- struct nic *nic = netdev_priv(dev);
- int i;
-
- for (i = 0; i < SIOC_DATA_MAX; i++)
- kdelete(&nic->sdk_data[i].buf);
-}
-
-static void gdm_wimax_ind_fsm_update(struct net_device *dev, struct fsm_s *fsm)
-{
- u16 buf[32 / sizeof(u16)];
- struct hci_s *hci = (struct hci_s *)buf;
-
- /* Indicate updating fsm */
- hci->cmd_evt = cpu_to_be16(WIMAX_FSM_UPDATE);
- hci->length = cpu_to_be16(sizeof(struct fsm_s));
- memcpy(&hci->data[0], fsm, sizeof(struct fsm_s));
-
- gdm_wimax_event_send(dev, (char *)hci,
- HCI_HEADER_SIZE + sizeof(struct fsm_s));
-}
-
-static void gdm_update_fsm(struct net_device *dev, struct fsm_s *new_fsm)
-{
- struct nic *nic = netdev_priv(dev);
- struct fsm_s *cur_fsm = (struct fsm_s *)
- nic->sdk_data[SIOC_DATA_FSM].buf;
-
- if (!cur_fsm)
- return;
-
- if (cur_fsm->m_status != new_fsm->m_status ||
- cur_fsm->c_status != new_fsm->c_status) {
- if (new_fsm->m_status == M_CONNECTED) {
- netif_carrier_on(dev);
- } else if (cur_fsm->m_status == M_CONNECTED) {
- netif_carrier_off(dev);
- #if defined(CONFIG_WIMAX_GDM72XX_QOS)
- gdm_qos_release_list(nic);
- #endif
- }
- gdm_wimax_ind_fsm_update(dev, new_fsm);
- }
-}
-
-static int gdm_wimax_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
-{
- struct wm_req_s *req = (struct wm_req_s *)ifr;
- struct nic *nic = netdev_priv(dev);
- int ret;
-
- if (cmd != SIOCWMIOCTL)
- return -EOPNOTSUPP;
-
- switch (req->cmd) {
- case SIOCG_DATA:
- case SIOCS_DATA:
- if (req->data_id >= SIOC_DATA_MAX) {
- netdev_err(dev, "%s error: data-index(%d) is invalid!!\n",
- __func__, req->data_id);
- return -EOPNOTSUPP;
- }
- if (req->cmd == SIOCG_DATA) {
- ret = gdm_wimax_ioctl_get_data(
- &req->data, &nic->sdk_data[req->data_id]);
- if (ret < 0)
- return ret;
- } else if (req->cmd == SIOCS_DATA) {
- if (req->data_id == SIOC_DATA_FSM) {
- /* NOTE: gdm_update_fsm should be called
- * before gdm_wimax_ioctl_set_data is called.
- */
- gdm_update_fsm(dev,
- (struct fsm_s *)req->data.buf);
- }
- ret = gdm_wimax_ioctl_set_data(
- &nic->sdk_data[req->data_id], &req->data);
- if (ret < 0)
- return ret;
- }
- break;
- default:
- netdev_err(dev, "%s: %x unknown ioctl\n", __func__, cmd);
- return -EOPNOTSUPP;
- }
-
- return 0;
-}
-
-static void gdm_wimax_prepare_device(struct net_device *dev)
-{
- struct nic *nic = netdev_priv(dev);
- u16 buf[32 / sizeof(u16)];
- struct hci_s *hci = (struct hci_s *)buf;
- u16 len = 0;
- u32 val = 0;
- __be32 val_be32;
-
- /* GetInformation mac address */
- len = 0;
- hci->cmd_evt = cpu_to_be16(WIMAX_GET_INFO);
- hci->data[len++] = TLV_T(T_MAC_ADDRESS);
- hci->length = cpu_to_be16(len);
- gdm_wimax_send(nic, hci, HCI_HEADER_SIZE+len);
-
- val = T_CAPABILITY_WIMAX | T_CAPABILITY_MULTI_CS;
- #if defined(CONFIG_WIMAX_GDM72XX_QOS)
- val |= T_CAPABILITY_QOS;
- #endif
- #if defined(CONFIG_WIMAX_GDM72XX_WIMAX2)
- val |= T_CAPABILITY_AGGREGATION;
- #endif
-
- /* Set capability */
- len = 0;
- hci->cmd_evt = cpu_to_be16(WIMAX_SET_INFO);
- hci->data[len++] = TLV_T(T_CAPABILITY);
- hci->data[len++] = TLV_L(T_CAPABILITY);
- val_be32 = cpu_to_be32(val);
- memcpy(&hci->data[len], &val_be32, TLV_L(T_CAPABILITY));
- len += TLV_L(T_CAPABILITY);
- hci->length = cpu_to_be16(len);
- gdm_wimax_send(nic, hci, HCI_HEADER_SIZE+len);
-
- netdev_info(dev, "GDM WiMax Set CAPABILITY: 0x%08X\n", val);
-}
-
-static int gdm_wimax_hci_get_tlv(u8 *buf, u8 *T, u16 *L, u8 **V)
-{
- #define __U82U16(b) ((u16)((u8 *)(b))[0] | ((u16)((u8 *)(b))[1] << 8))
- int next_pos;
-
- *T = buf[0];
- if (buf[1] == 0x82) {
- *L = be16_to_cpu(__U82U16(&buf[2]));
- next_pos = 1/*type*/+3/*len*/;
- } else {
- *L = buf[1];
- next_pos = 1/*type*/+1/*len*/;
- }
- *V = &buf[next_pos];
-
- next_pos += *L/*length of val*/;
- return next_pos;
-}
-
-static int gdm_wimax_get_prepared_info(struct net_device *dev, char *buf,
- int len)
-{
- u8 T, *V;
- u16 L;
- u16 cmd_evt, cmd_len;
- int pos = HCI_HEADER_SIZE;
-
- cmd_evt = be16_to_cpup((const __be16 *)&buf[0]);
- cmd_len = be16_to_cpup((const __be16 *)&buf[2]);
-
- if (len < cmd_len + HCI_HEADER_SIZE) {
- netdev_err(dev, "%s: invalid length [%d/%d]\n", __func__,
- cmd_len + HCI_HEADER_SIZE, len);
- return -1;
- }
-
- if (cmd_evt == WIMAX_GET_INFO_RESULT) {
- if (cmd_len < 2) {
- netdev_err(dev, "%s: len is too short [%x/%d]\n",
- __func__, cmd_evt, len);
- return -1;
- }
-
- pos += gdm_wimax_hci_get_tlv(&buf[pos], &T, &L, &V);
- if (T == TLV_T(T_MAC_ADDRESS)) {
- if (L != dev->addr_len) {
- netdev_err(dev,
- "%s Invalid information result T/L [%x/%d]\n",
- __func__, T, L);
- return -1;
- }
- netdev_info(dev, "MAC change [%pM]->[%pM]\n",
- dev->dev_addr, V);
- memcpy(dev->dev_addr, V, dev->addr_len);
- return 1;
- }
- }
-
- gdm_wimax_event_send(dev, buf, len);
- return 0;
-}
-
-static void gdm_wimax_netif_rx(struct net_device *dev, char *buf, int len)
-{
- struct sk_buff *skb;
- int ret;
-
- skb = dev_alloc_skb(len + 2);
- if (!skb)
- return;
- skb_reserve(skb, 2);
-
- dev->stats.rx_packets++;
- dev->stats.rx_bytes += len;
-
- memcpy(skb_put(skb, len), buf, len);
-
- skb->dev = dev;
- skb->protocol = eth_type_trans(skb, dev); /* what will happen? */
-
- ret = in_interrupt() ? netif_rx(skb) : netif_rx_ni(skb);
- if (ret == NET_RX_DROP)
- netdev_err(dev, "%s skb dropped\n", __func__);
-}
-
-static void gdm_wimax_transmit_aggr_pkt(struct net_device *dev, char *buf,
- int len)
-{
- #define HCI_PADDING_BYTE 4
- #define HCI_RESERVED_BYTE 4
- struct hci_s *hci;
- int length;
-
- while (len > 0) {
- hci = (struct hci_s *)buf;
-
- if (hci->cmd_evt != cpu_to_be16(WIMAX_RX_SDU)) {
- netdev_err(dev, "Wrong cmd_evt(0x%04X)\n",
- be16_to_cpu(hci->cmd_evt));
- break;
- }
-
- length = be16_to_cpu(hci->length);
- gdm_wimax_netif_rx(dev, hci->data, length);
-
- if (length & 0x3) {
- /* Add padding size */
- length += HCI_PADDING_BYTE - (length & 0x3);
- }
-
- length += HCI_HEADER_SIZE + HCI_RESERVED_BYTE;
- len -= length;
- buf += length;
- }
-}
-
-static void gdm_wimax_transmit_pkt(struct net_device *dev, char *buf, int len)
-{
- #if defined(CONFIG_WIMAX_GDM72XX_QOS)
- struct nic *nic = netdev_priv(dev);
- #endif
- u16 cmd_evt, cmd_len;
-
- /* This code is added for certain rx packet to be ignored. */
- if (len == 0)
- return;
-
- cmd_evt = be16_to_cpup((const __be16 *)&buf[0]);
- cmd_len = be16_to_cpup((const __be16 *)&buf[2]);
-
- if (len < cmd_len + HCI_HEADER_SIZE) {
- if (len)
- netdev_err(dev, "%s: invalid length [%d/%d]\n",
- __func__, cmd_len + HCI_HEADER_SIZE, len);
- return;
- }
-
- switch (cmd_evt) {
- case WIMAX_RX_SDU_AGGR:
- gdm_wimax_transmit_aggr_pkt(dev, &buf[HCI_HEADER_SIZE],
- cmd_len);
- break;
- case WIMAX_RX_SDU:
- gdm_wimax_netif_rx(dev, &buf[HCI_HEADER_SIZE], cmd_len);
- break;
- #if defined(CONFIG_WIMAX_GDM72XX_QOS)
- case WIMAX_EVT_MODEM_REPORT:
- gdm_recv_qos_hci_packet(nic, buf, len);
- break;
- #endif
- case WIMAX_SDU_TX_FLOW:
- if (buf[4] == 0) {
- if (!netif_queue_stopped(dev))
- netif_stop_queue(dev);
- } else if (buf[4] == 1) {
- if (netif_queue_stopped(dev))
- netif_wake_queue(dev);
- }
- break;
- default:
- gdm_wimax_event_send(dev, buf, len);
- break;
- }
-}
-
-static void rx_complete(void *arg, void *data, int len)
-{
- struct nic *nic = arg;
-
- gdm_wimax_transmit_pkt(nic->netdev, data, len);
- gdm_wimax_rcv_with_cb(nic, rx_complete, nic);
-}
-
-static void prepare_rx_complete(void *arg, void *data, int len)
-{
- struct nic *nic = arg;
- int ret;
-
- ret = gdm_wimax_get_prepared_info(nic->netdev, data, len);
- if (ret == 1) {
- gdm_wimax_rcv_with_cb(nic, rx_complete, nic);
- } else {
- if (ret < 0)
- netdev_err(nic->netdev,
- "get_prepared_info failed(%d)\n", ret);
- gdm_wimax_rcv_with_cb(nic, prepare_rx_complete, nic);
- }
-}
-
-static void start_rx_proc(struct nic *nic)
-{
- gdm_wimax_rcv_with_cb(nic, prepare_rx_complete, nic);
-}
-
-static struct net_device_ops gdm_netdev_ops = {
- .ndo_open = gdm_wimax_open,
- .ndo_stop = gdm_wimax_close,
- .ndo_set_config = gdm_wimax_set_config,
- .ndo_start_xmit = gdm_wimax_tx,
- .ndo_set_mac_address = gdm_wimax_set_mac_addr,
- .ndo_do_ioctl = gdm_wimax_ioctl,
-};
-
-int register_wimax_device(struct phy_dev *phy_dev, struct device *pdev)
-{
- struct nic *nic = NULL;
- struct net_device *dev;
- int ret;
-
- dev = alloc_netdev(sizeof(*nic), "wm%d", NET_NAME_UNKNOWN,
- ether_setup);
-
- if (!dev) {
- pr_err("alloc_etherdev failed\n");
- return -ENOMEM;
- }
-
- SET_NETDEV_DEV(dev, pdev);
- dev->mtu = 1400;
- dev->netdev_ops = &gdm_netdev_ops;
- dev->flags &= ~IFF_MULTICAST;
- memcpy(dev->dev_addr, gdm_wimax_macaddr, sizeof(gdm_wimax_macaddr));
-
- nic = netdev_priv(dev);
- nic->netdev = dev;
- nic->phy_dev = phy_dev;
- phy_dev->netdev = dev;
-
- /* event socket init */
- ret = gdm_wimax_event_init();
- if (ret < 0) {
- pr_err("Cannot create event.\n");
- goto cleanup;
- }
-
- ret = register_netdev(dev);
- if (ret)
- goto cleanup;
-
- netif_carrier_off(dev);
-
-#ifdef CONFIG_WIMAX_GDM72XX_QOS
- gdm_qos_init(nic);
-#endif
-
- start_rx_proc(nic);
-
- /* Prepare WiMax device */
- gdm_wimax_prepare_device(dev);
-
- return 0;
-
-cleanup:
- pr_err("register_netdev failed\n");
- free_netdev(dev);
- return ret;
-}
-
-void unregister_wimax_device(struct phy_dev *phy_dev)
-{
- struct nic *nic = netdev_priv(phy_dev->netdev);
- struct fsm_s *fsm = (struct fsm_s *)nic->sdk_data[SIOC_DATA_FSM].buf;
-
- if (fsm)
- fsm->m_status = M_INIT;
- unregister_netdev(nic->netdev);
-
- gdm_wimax_event_exit();
-
-#if defined(CONFIG_WIMAX_GDM72XX_QOS)
- gdm_qos_release_list(nic);
-#endif
-
- gdm_wimax_cleanup_ioctl(phy_dev->netdev);
-
- free_netdev(nic->netdev);
-}
diff --git a/drivers/staging/gdm72xx/gdm_wimax.h b/drivers/staging/gdm72xx/gdm_wimax.h
deleted file mode 100644
index 3330cd798c69..000000000000
--- a/drivers/staging/gdm72xx/gdm_wimax.h
+++ /dev/null
@@ -1,49 +0,0 @@
-/*
- * Copyright (c) 2012 GCT Semiconductor, Inc. All rights reserved.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#ifndef __GDM72XX_GDM_WIMAX_H__
-#define __GDM72XX_GDM_WIMAX_H__
-
-#include <linux/netdevice.h>
-#include <linux/types.h>
-#include "wm_ioctl.h"
-#if defined(CONFIG_WIMAX_GDM72XX_QOS)
-#include "gdm_qos.h"
-#endif
-
-#define DRIVER_VERSION "3.2.3"
-
-struct phy_dev {
- void *priv_dev;
- struct net_device *netdev;
- int (*send_func)(void *priv_dev, void *data, int len,
- void (*cb)(void *cb_data), void *cb_data);
- int (*rcv_func)(void *priv_dev,
- void (*cb)(void *cb_data, void *data, int len),
- void *cb_data);
-};
-
-struct nic {
- struct net_device *netdev;
- struct phy_dev *phy_dev;
- struct data_s sdk_data[SIOC_DATA_MAX];
-#if defined(CONFIG_WIMAX_GDM72XX_QOS)
- struct qos_cb_s qos;
-#endif
-};
-
-int register_wimax_device(struct phy_dev *phy_dev, struct device *pdev);
-int gdm_wimax_send_tx(struct sk_buff *skb, struct net_device *dev);
-void unregister_wimax_device(struct phy_dev *phy_dev);
-
-#endif /* __GDM72XX_GDM_WIMAX_H__ */
diff --git a/drivers/staging/gdm72xx/hci.h b/drivers/staging/gdm72xx/hci.h
deleted file mode 100644
index 10a6bfa6e998..000000000000
--- a/drivers/staging/gdm72xx/hci.h
+++ /dev/null
@@ -1,213 +0,0 @@
-/*
- * Copyright (c) 2012 GCT Semiconductor, Inc. All rights reserved.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#ifndef __GDM72XX_HCI_H__
-#define __GDM72XX_HCI_H__
-
-#define HCI_HEADER_SIZE 4
-#define HCI_VALUE_OFFS (HCI_HEADER_SIZE)
-#define HCI_MAX_PACKET 2048
-#define HCI_MAX_PARAM (HCI_MAX_PACKET-HCI_HEADER_SIZE)
-#define HCI_MAX_TLV 32
-
-/* CMD-EVT */
-
-/* Category 0 */
-#define WIMAX_RESET 0x0000
-#define WIMAX_SET_INFO 0x0001
-#define WIMAX_GET_INFO 0x0002
-#define WIMAX_GET_INFO_RESULT 0x8003
-#define WIMAX_RADIO_OFF 0x0004
-#define WIMAX_RADIO_ON 0x0006
-#define WIMAX_WIMAX_RESET 0x0007 /* Is this still here */
-
-/* Category 1 */
-#define WIMAX_NET_ENTRY 0x0100
-#define WIMAX_NET_DISCONN 0x0102
-#define WIMAX_ENTER_SLEEP 0x0103
-#define WIMAX_EXIT_SLEEP 0x0104
-#define WIMAX_ENTER_IDLE 0x0105
-#define WIMAX_EXIT_IDLE 0x0106
-#define WIMAX_MODE_CHANGE 0x8108
-#define WIMAX_HANDOVER 0x8109 /* obsolete */
-#define WIMAX_SCAN 0x010d
-#define WIMAX_SCAN_COMPLETE 0x810e
-#define WIMAX_SCAN_RESULT 0x810f
-#define WIMAX_CONNECT 0x0110
-#define WIMAX_CONNECT_START 0x8111
-#define WIMAX_CONNECT_COMPLETE 0x8112
-#define WIMAX_ASSOC_START 0x8113
-#define WIMAX_ASSOC_COMPLETE 0x8114
-#define WIMAX_DISCONN_IND 0x8115
-#define WIMAX_ENTRY_IND 0x8116
-#define WIMAX_HO_START 0x8117
-#define WIMAX_HO_COMPLETE 0x8118
-#define WIMAX_RADIO_STATE_IND 0x8119
-#define WIMAX_IP_RENEW_IND 0x811a
-#define WIMAX_DISCOVER_NSP 0x011d
-#define WIMAX_DISCOVER_NSP_RESULT 0x811e
-#define WIMAX_SDU_TX_FLOW 0x8125
-
-/* Category 2 */
-#define WIMAX_TX_EAP 0x0200
-#define WIMAX_RX_EAP 0x8201
-#define WIMAX_TX_SDU 0x0202
-#define WIMAX_RX_SDU 0x8203
-#define WIMAX_RX_SDU_AGGR 0x8204
-#define WIMAX_TX_SDU_AGGR 0x0205
-
-/* Category 3 */
-#define WIMAX_DM_CMD 0x030a
-#define WIMAX_DM_RSP 0x830b
-
-#define WIMAX_CLI_CMD 0x030c
-#define WIMAX_CLI_RSP 0x830d
-
-#define WIMAX_DL_IMAGE 0x0310
-#define WIMAX_DL_IMAGE_STATUS 0x8311
-#define WIMAX_UL_IMAGE 0x0312
-#define WIMAX_UL_IMAGE_RESULT 0x8313
-#define WIMAX_UL_IMAGE_STATUS 0x0314
-#define WIMAX_EVT_MODEM_REPORT 0x8325
-
-/* Category 0xF */
-#define WIMAX_FSM_UPDATE 0x8F01
-#define WIMAX_IF_UPDOWN 0x8F02
-#define WIMAX_IF_UP 1
-#define WIMAX_IF_DOWN 2
-
-/* WIMAX mode */
-#define W_NULL 0
-#define W_STANDBY 1
-#define W_OOZ 2
-#define W_AWAKE 3
-#define W_IDLE 4
-#define W_SLEEP 5
-#define W_WAIT 6
-
-#define W_NET_ENTRY_RNG 0x80
-#define W_NET_ENTRY_SBC 0x81
-#define W_NET_ENTRY_PKM 0x82
-#define W_NET_ENTRY_REG 0x83
-#define W_NET_ENTRY_DSX 0x84
-
-#define W_NET_ENTRY_RNG_FAIL 0x1100100
-#define W_NET_ENTRY_SBC_FAIL 0x1100200
-#define W_NET_ENTRY_PKM_FAIL 0x1102000
-#define W_NET_ENTRY_REG_FAIL 0x1103000
-#define W_NET_ENTRY_DSX_FAIL 0x1104000
-
-/* Scan Type */
-#define W_SCAN_ALL_CHANNEL 0
-#define W_SCAN_ALL_SUBSCRIPTION 1
-#define W_SCAN_SPECIFIED_SUBSCRIPTION 2
-
-/* TLV
- *
- * [31:31] indicates the type is composite.
- * [30:16] is the length of the type. 0 length means length is variable.
- * [15:0] is the actual type.
- */
-#define TLV_L(x) (((x) >> 16) & 0xff)
-#define TLV_T(x) ((x) & 0xff)
-#define TLV_COMPOSITE(x) ((x) >> 31)
-
-/* GENERAL */
-#define T_MAC_ADDRESS (0x00 | (6 << 16))
-#define T_BSID (0x01 | (6 << 16))
-#define T_MSK (0x02 | (64 << 16))
-#define T_RSSI_THRSHLD (0x03 | (1 << 16))
-#define T_FREQUENCY (0x04 | (4 << 16))
-#define T_CONN_CS_TYPE (0x05 | (1 << 16))
-#define T_HOST_IP_VER (0x06 | (1 << 16))
-#define T_STBY_SCAN_INTERVAL (0x07 | (4 << 16))
-#define T_OOZ_SCAN_INTERVAL (0x08 | (4 << 16))
-#define T_IMEI (0x09 | (8 << 16))
-#define T_PID (0x0a | (12 << 16))
-#define T_CAPABILITY (0x1a | (4 << 16))
-#define T_RELEASE_NUMBER (0x1b | (4 << 16))
-#define T_DRIVER_REVISION (0x1c | (4 << 16))
-#define T_FW_REVISION (0x1d | (4 << 16))
-#define T_MAC_HW_REVISION (0x1e | (4 << 16))
-#define T_PHY_HW_REVISION (0x1f | (4 << 16))
-
-/* HANDOVER */
-#define T_SCAN_INTERVAL (0x20 | (1 << 16))
-#define T_RSC_RETAIN_TIME (0x2f | (2 << 16))
-
-/* SLEEP */
-#define T_TYPE1_ISW (0x40 | (1 << 16))
-#define T_SLP_START_TO (0x4a | (2 << 16))
-
-/* IDLE */
-#define T_IDLE_MODE_TO (0x50 | (2 << 16))
-#define T_IDLE_START_TO (0x54 | (2 << 16))
-
-/* MONITOR */
-#define T_RSSI (0x60 | (1 << 16))
-#define T_CINR (0x61 | (1 << 16))
-#define T_TX_POWER (0x6a | (1 << 16))
-#define T_CUR_FREQ (0x7f | (4 << 16))
-
-
-/* WIMAX */
-#define T_MAX_SUBSCRIPTION (0xa1 | (1 << 16))
-#define T_MAX_SF (0xa2 | (1 << 16))
-#define T_PHY_TYPE (0xa3 | (1 << 16))
-#define T_PKM (0xa4 | (1 << 16))
-#define T_AUTH_POLICY (0xa5 | (1 << 16))
-#define T_CS_TYPE (0xa6 | (2 << 16))
-#define T_VENDOR_NAME (0xa7 | (0 << 16))
-#define T_MOD_NAME (0xa8 | (0 << 16))
-#define T_PACKET_FILTER (0xa9 | (1 << 16))
-#define T_NSP_CHANGE_COUNT (0xaa | (4 << 16))
-#define T_RADIO_STATE (0xab | (1 << 16))
-#define T_URI_CONTACT_TYPE (0xac | (1 << 16))
-#define T_URI_TEXT (0xad | (0 << 16))
-#define T_URI (0xae | (0 << 16))
-#define T_ENABLE_AUTH (0xaf | (1 << 16))
-#define T_TIMEOUT (0xb0 | (2 << 16))
-#define T_RUN_MODE (0xb1 | (1 << 16))
-#define T_OMADMT_VER (0xb2 | (4 << 16))
-/* This is measured in seconds from 00:00:00 GMT January 1, 1970. */
-#define T_RTC_TIME (0xb3 | (4 << 16))
-#define T_CERT_STATUS (0xb4 | (4 << 16))
-#define T_CERT_MASK (0xb5 | (4 << 16))
-#define T_EMSK (0xb6 | (64 << 16))
-
-/* Subscription TLV */
-#define T_SUBSCRIPTION_LIST (0xd1 | (0 << 16) | (1 << 31))
-#define T_H_NSPID (0xd2 | (3 << 16))
-#define T_NSP_NAME (0xd3 | (0 << 16))
-#define T_SUBSCRIPTION_NAME (0xd4 | (0 << 16))
-#define T_SUBSCRIPTION_FLAG (0xd5 | (2 << 16))
-#define T_V_NSPID (0xd6 | (3 << 16))
-#define T_NAP_ID (0xd7 | (3 << 16))
-#define T_PREAMBLES (0xd8 | (15 << 16))
-#define T_BW (0xd9 | (4 << 16))
-#define T_FFTSIZE (0xda | (4 << 16))
-#define T_DUPLEX_MODE (0xdb | (4 << 16))
-
-/* T_CAPABILITY */
-#define T_CAPABILITY_MULTI_CS (1 << 0)
-#define T_CAPABILITY_WIMAX (1 << 1)
-#define T_CAPABILITY_QOS (1 << 2)
-#define T_CAPABILITY_AGGREGATION (1 << 3)
-
-struct hci_s {
- __be16 cmd_evt;
- __be16 length;
- u8 data[0];
-} __packed;
-
-#endif /* __GDM72XX_HCI_H__ */
diff --git a/drivers/staging/gdm72xx/netlink_k.c b/drivers/staging/gdm72xx/netlink_k.c
deleted file mode 100644
index f3cdaa6c468c..000000000000
--- a/drivers/staging/gdm72xx/netlink_k.c
+++ /dev/null
@@ -1,156 +0,0 @@
-/*
- * Copyright (c) 2012 GCT Semiconductor, Inc. All rights reserved.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/module.h>
-#include <linux/etherdevice.h>
-#include <net/netlink.h>
-#include <asm/byteorder.h>
-#include <net/sock.h>
-#include "netlink_k.h"
-
-#if !defined(NLMSG_HDRLEN)
-#define NLMSG_HDRLEN ((int) NLMSG_ALIGN(sizeof(struct nlmsghdr)))
-#endif
-
-#define ND_MAX_GROUP 30
-#define ND_IFINDEX_LEN sizeof(int)
-#define ND_NLMSG_SPACE(len) (nlmsg_total_size(len) + ND_IFINDEX_LEN)
-#define ND_NLMSG_DATA(nlh) \
- ((void *)((char *)nlmsg_data(nlh) + ND_IFINDEX_LEN))
-#define ND_NLMSG_S_LEN(len) (len+ND_IFINDEX_LEN)
-#define ND_NLMSG_R_LEN(nlh) (nlh->nlmsg_len-ND_IFINDEX_LEN)
-#define ND_NLMSG_IFIDX(nlh) nlmsg_data(nlh)
-#define ND_MAX_MSG_LEN 8096
-
-#if defined(DEFINE_MUTEX)
-static DEFINE_MUTEX(netlink_mutex);
-#else
-static struct semaphore netlink_mutex;
-#define mutex_lock(x) down(x)
-#define mutex_unlock(x) up(x)
-#endif
-
-static void (*rcv_cb)(struct net_device *dev, u16 type, void *msg, int len);
-
-static void netlink_rcv_cb(struct sk_buff *skb)
-{
- struct nlmsghdr *nlh;
- struct net_device *dev;
- u32 mlen;
- void *msg;
- int ifindex;
-
- if (skb->len >= NLMSG_HDRLEN) {
- nlh = (struct nlmsghdr *)skb->data;
-
- if (skb->len < nlh->nlmsg_len ||
- nlh->nlmsg_len > ND_MAX_MSG_LEN) {
- netdev_err(skb->dev, "Invalid length (%d,%d)\n",
- skb->len, nlh->nlmsg_len);
- return;
- }
-
- memcpy(&ifindex, ND_NLMSG_IFIDX(nlh), ND_IFINDEX_LEN);
- msg = ND_NLMSG_DATA(nlh);
- mlen = ND_NLMSG_R_LEN(nlh);
-
- if (rcv_cb) {
- dev = dev_get_by_index(&init_net, ifindex);
- if (dev) {
- rcv_cb(dev, nlh->nlmsg_type, msg, mlen);
- dev_put(dev);
- } else
- netdev_err(skb->dev,
- "dev_get_by_index(%d) is not found.\n",
- ifindex);
- } else {
- netdev_err(skb->dev, "Unregistered Callback\n");
- }
- }
-}
-
-static void netlink_rcv(struct sk_buff *skb)
-{
- mutex_lock(&netlink_mutex);
- netlink_rcv_cb(skb);
- mutex_unlock(&netlink_mutex);
-}
-
-struct sock *netlink_init(int unit, void (*cb)(struct net_device *dev, u16 type,
- void *msg, int len))
-{
- struct sock *sock;
- struct netlink_kernel_cfg cfg = {
- .input = netlink_rcv,
- };
-
-#if !defined(DEFINE_MUTEX)
- init_MUTEX(&netlink_mutex);
-#endif
-
- sock = netlink_kernel_create(&init_net, unit, &cfg);
-
- if (sock)
- rcv_cb = cb;
-
- return sock;
-}
-
-void netlink_exit(struct sock *sock)
-{
- netlink_kernel_release(sock);
-}
-
-int netlink_send(struct sock *sock, int group, u16 type, void *msg, int len)
-{
- static u32 seq;
- struct sk_buff *skb = NULL;
- struct nlmsghdr *nlh;
- int ret = 0;
-
- if (group > ND_MAX_GROUP) {
- pr_err("Group %d is invalid.\n", group);
- pr_err("Valid group is 0 ~ %d.\n", ND_MAX_GROUP);
- return -EINVAL;
- }
-
- skb = nlmsg_new(len, GFP_ATOMIC);
- if (!skb) {
- pr_err("netlink_broadcast ret=%d\n", ret);
- return -ENOMEM;
- }
-
- seq++;
- nlh = nlmsg_put(skb, 0, seq, type, len, 0);
- if (!nlh) {
- kfree_skb(skb);
- return -EMSGSIZE;
- }
- memcpy(nlmsg_data(nlh), msg, len);
-
- NETLINK_CB(skb).portid = 0;
- NETLINK_CB(skb).dst_group = 0;
-
- ret = netlink_broadcast(sock, skb, 0, group+1, GFP_ATOMIC);
-
- if (!ret)
- return len;
- if (ret != -ESRCH) {
- pr_err("netlink_broadcast g=%d, t=%d, l=%d, r=%d\n",
- group, type, len, ret);
- }
- ret = 0;
- return ret;
-}
diff --git a/drivers/staging/gdm72xx/netlink_k.h b/drivers/staging/gdm72xx/netlink_k.h
deleted file mode 100644
index 1fe7198d539e..000000000000
--- a/drivers/staging/gdm72xx/netlink_k.h
+++ /dev/null
@@ -1,25 +0,0 @@
-/*
- * Copyright (c) 2012 GCT Semiconductor, Inc. All rights reserved.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#ifndef __GDM72XX_NETLINK_K_H__
-#define __GDM72XX_NETLINK_K_H__
-
-#include <linux/netdevice.h>
-#include <net/sock.h>
-
-struct sock *netlink_init(int unit, void (*cb)(struct net_device *dev, u16 type,
- void *msg, int len));
-void netlink_exit(struct sock *sock);
-int netlink_send(struct sock *sock, int group, u16 type, void *msg, int len);
-
-#endif /* __GDM72XX_NETLINK_K_H__ */
diff --git a/drivers/staging/gdm72xx/sdio_boot.c b/drivers/staging/gdm72xx/sdio_boot.c
deleted file mode 100644
index ba94b5f13bb2..000000000000
--- a/drivers/staging/gdm72xx/sdio_boot.c
+++ /dev/null
@@ -1,158 +0,0 @@
-/*
- * Copyright (c) 2012 GCT Semiconductor, Inc. All rights reserved.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/mm.h>
-#include <linux/uaccess.h>
-#include <linux/fs.h>
-#include <linux/sched.h>
-#include <linux/slab.h>
-
-#include <linux/mmc/core.h>
-#include <linux/mmc/card.h>
-#include <linux/mmc/sdio_func.h>
-
-#include <linux/firmware.h>
-
-#include "gdm_sdio.h"
-#include "sdio_boot.h"
-
-#define TYPE_A_HEADER_SIZE 4
-#define TYPE_A_LOOKAHEAD_SIZE 16
-#define YMEM0_SIZE 0x8000 /* 32kbytes */
-#define DOWNLOAD_SIZE (YMEM0_SIZE - TYPE_A_HEADER_SIZE)
-
-#define FW_DIR "gdm72xx/"
-#define FW_KRN "gdmskrn.bin"
-#define FW_RFS "gdmsrfs.bin"
-
-static u8 *tx_buf;
-
-static int ack_ready(struct sdio_func *func)
-{
- unsigned long wait = jiffies + HZ;
- u8 val;
- int ret;
-
- while (time_before(jiffies, wait)) {
- val = sdio_readb(func, 0x13, &ret);
- if (val & 0x01)
- return 1;
- schedule();
- }
-
- return 0;
-}
-
-static int download_image(struct sdio_func *func, const char *img_name)
-{
- int ret = 0, len, pno;
- u8 *buf = tx_buf;
- loff_t pos = 0;
- int img_len;
- const struct firmware *firm;
-
- ret = request_firmware(&firm, img_name, &func->dev);
- if (ret < 0) {
- dev_err(&func->dev,
- "requesting firmware %s failed with error %d\n",
- img_name, ret);
- return ret;
- }
-
- buf = kmalloc(DOWNLOAD_SIZE + TYPE_A_HEADER_SIZE, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
-
- img_len = firm->size;
-
- if (img_len <= 0) {
- ret = -1;
- goto out;
- }
-
- pno = 0;
- while (img_len > 0) {
- if (img_len > DOWNLOAD_SIZE) {
- len = DOWNLOAD_SIZE;
- buf[3] = 0;
- } else {
- len = img_len; /* the last packet */
- buf[3] = 2;
- }
-
- buf[0] = len & 0xff;
- buf[1] = (len >> 8) & 0xff;
- buf[2] = (len >> 16) & 0xff;
-
- memcpy(buf+TYPE_A_HEADER_SIZE, firm->data + pos, len);
- ret = sdio_memcpy_toio(func, 0, buf, len + TYPE_A_HEADER_SIZE);
- if (ret < 0) {
- dev_err(&func->dev,
- "send image error: packet number = %d ret = %d\n",
- pno, ret);
- goto out;
- }
-
- if (buf[3] == 2) /* The last packet */
- break;
- if (!ack_ready(func)) {
- ret = -EIO;
- dev_err(&func->dev, "Ack is not ready.\n");
- goto out;
- }
- ret = sdio_memcpy_fromio(func, buf, 0, TYPE_A_LOOKAHEAD_SIZE);
- if (ret < 0) {
- dev_err(&func->dev,
- "receive ack error: packet number = %d ret = %d\n",
- pno, ret);
- goto out;
- }
- sdio_writeb(func, 0x01, 0x13, &ret);
- sdio_writeb(func, 0x00, 0x10, &ret); /* PCRRT */
-
- img_len -= DOWNLOAD_SIZE;
- pos += DOWNLOAD_SIZE;
- pno++;
- }
-
-out:
- kfree(buf);
- return ret;
-}
-
-int sdio_boot(struct sdio_func *func)
-{
- int ret;
- const char *krn_name = FW_DIR FW_KRN;
- const char *rfs_name = FW_DIR FW_RFS;
-
- tx_buf = kmalloc(YMEM0_SIZE, GFP_KERNEL);
- if (!tx_buf)
- return -ENOMEM;
-
- ret = download_image(func, krn_name);
- if (ret)
- goto restore_fs;
- dev_info(&func->dev, "GCT: Kernel download success.\n");
-
- ret = download_image(func, rfs_name);
- if (ret)
- goto restore_fs;
- dev_info(&func->dev, "GCT: Filesystem download success.\n");
-
-restore_fs:
- kfree(tx_buf);
- return ret;
-}
diff --git a/drivers/staging/gdm72xx/sdio_boot.h b/drivers/staging/gdm72xx/sdio_boot.h
deleted file mode 100644
index e0800c6fe2fd..000000000000
--- a/drivers/staging/gdm72xx/sdio_boot.h
+++ /dev/null
@@ -1,21 +0,0 @@
-/*
- * Copyright (c) 2012 GCT Semiconductor, Inc. All rights reserved.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#ifndef __GDM72XX_SDIO_BOOT_H__
-#define __GDM72XX_SDIO_BOOT_H__
-
-struct sdio_func;
-
-int sdio_boot(struct sdio_func *func);
-
-#endif /* __GDM72XX_SDIO_BOOT_H__ */
diff --git a/drivers/staging/gdm72xx/usb_boot.c b/drivers/staging/gdm72xx/usb_boot.c
deleted file mode 100644
index 39ca34031a6b..000000000000
--- a/drivers/staging/gdm72xx/usb_boot.c
+++ /dev/null
@@ -1,363 +0,0 @@
-/*
- * Copyright (c) 2012 GCT Semiconductor, Inc. All rights reserved.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#include <linux/uaccess.h>
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/mm.h>
-#include <linux/usb.h>
-#include <linux/unistd.h>
-#include <linux/slab.h>
-#include <linux/firmware.h>
-
-#include <asm/byteorder.h>
-#include "gdm_usb.h"
-#include "usb_boot.h"
-
-#define DN_KERNEL_MAGIC_NUMBER 0x10760001
-#define DN_ROOTFS_MAGIC_NUMBER 0x10760002
-
-#define DOWNLOAD_SIZE 1024
-
-#define MAX_IMG_CNT 16
-#define FW_DIR "gdm72xx/"
-#define FW_UIMG "gdmuimg.bin"
-#define FW_KERN "zImage"
-#define FW_FS "ramdisk.jffs2"
-
-struct dn_header {
- __be32 magic_num;
- __be32 file_size;
-};
-
-struct img_header {
- u32 magic_code;
- u32 count;
- u32 len;
- u32 offset[MAX_IMG_CNT];
- char hostname[32];
- char date[32];
-};
-
-struct fw_info {
- u32 id;
- u32 len;
- u32 kernel_len;
- u32 rootfs_len;
- u32 kernel_offset;
- u32 rootfs_offset;
- u32 fw_ver;
- u32 mac_ver;
- char hostname[32];
- char userid[16];
- char date[32];
- char user_desc[128];
-};
-
-static void array_le32_to_cpu(u32 *arr, int num)
-{
- int i;
-
- for (i = 0; i < num; i++, arr++)
- le32_to_cpus(arr);
-}
-
-static u8 *tx_buf;
-
-static int gdm_wibro_send(struct usb_device *usbdev, void *data, int len)
-{
- int ret;
- int actual;
-
- ret = usb_bulk_msg(usbdev, usb_sndbulkpipe(usbdev, 1), data, len,
- &actual, 1000);
-
- if (ret < 0) {
- dev_err(&usbdev->dev, "Error : usb_bulk_msg ( result = %d )\n",
- ret);
- return ret;
- }
- return 0;
-}
-
-static int gdm_wibro_recv(struct usb_device *usbdev, void *data, int len)
-{
- int ret;
- int actual;
-
- ret = usb_bulk_msg(usbdev, usb_rcvbulkpipe(usbdev, 2), data, len,
- &actual, 5000);
-
- if (ret < 0) {
- dev_err(&usbdev->dev,
- "Error : usb_bulk_msg(recv) ( result = %d )\n", ret);
- return ret;
- }
- return 0;
-}
-
-static int download_image(struct usb_device *usbdev,
- const struct firmware *firm,
- loff_t pos, u32 img_len, u32 magic_num)
-{
- struct dn_header h;
- int ret = 0;
- u32 size;
-
- size = ALIGN(img_len, DOWNLOAD_SIZE);
- h.magic_num = cpu_to_be32(magic_num);
- h.file_size = cpu_to_be32(size);
-
- ret = gdm_wibro_send(usbdev, &h, sizeof(h));
- if (ret < 0)
- return ret;
-
- while (img_len > 0) {
- if (img_len > DOWNLOAD_SIZE)
- size = DOWNLOAD_SIZE;
- else
- size = img_len; /* the last chunk of data */
-
- memcpy(tx_buf, firm->data + pos, size);
- ret = gdm_wibro_send(usbdev, tx_buf, size);
-
- if (ret < 0)
- return ret;
-
- img_len -= size;
- pos += size;
- }
-
- return ret;
-}
-
-int usb_boot(struct usb_device *usbdev, u16 pid)
-{
- int i, ret = 0;
- struct img_header hdr;
- struct fw_info fw_info;
- loff_t pos = 0;
- char *img_name = FW_DIR FW_UIMG;
- const struct firmware *firm;
-
- ret = request_firmware(&firm, img_name, &usbdev->dev);
- if (ret < 0) {
- dev_err(&usbdev->dev,
- "requesting firmware %s failed with error %d\n",
- img_name, ret);
- return ret;
- }
-
- tx_buf = kmalloc(DOWNLOAD_SIZE, GFP_KERNEL);
- if (!tx_buf) {
- release_firmware(firm);
- return -ENOMEM;
- }
-
- if (firm->size < sizeof(hdr)) {
- dev_err(&usbdev->dev, "Cannot read the image info.\n");
- ret = -EIO;
- goto out;
- }
- memcpy(&hdr, firm->data, sizeof(hdr));
-
- array_le32_to_cpu((u32 *)&hdr, 19);
-
- if (hdr.count > MAX_IMG_CNT) {
- dev_err(&usbdev->dev, "Too many images. %d\n", hdr.count);
- ret = -EINVAL;
- goto out;
- }
-
- for (i = 0; i < hdr.count; i++) {
- if (hdr.offset[i] > hdr.len) {
- dev_err(&usbdev->dev,
- "Invalid offset. Entry = %d Offset = 0x%08x Image length = 0x%08x\n",
- i, hdr.offset[i], hdr.len);
- ret = -EINVAL;
- goto out;
- }
-
- pos = hdr.offset[i];
- if (firm->size < sizeof(fw_info) + pos) {
- dev_err(&usbdev->dev, "Cannot read the FW info.\n");
- ret = -EIO;
- goto out;
- }
- memcpy(&fw_info, firm->data + pos, sizeof(fw_info));
-
- array_le32_to_cpu((u32 *)&fw_info, 8);
-
- if ((fw_info.id & 0xffff) != pid)
- continue;
-
- pos = hdr.offset[i] + fw_info.kernel_offset;
- if (firm->size < fw_info.kernel_len + pos) {
- dev_err(&usbdev->dev, "Kernel FW is too small.\n");
- goto out;
- }
-
- ret = download_image(usbdev, firm, pos, fw_info.kernel_len,
- DN_KERNEL_MAGIC_NUMBER);
- if (ret < 0)
- goto out;
- dev_info(&usbdev->dev, "GCT: Kernel download success.\n");
-
- pos = hdr.offset[i] + fw_info.rootfs_offset;
- if (firm->size < fw_info.rootfs_len + pos) {
- dev_err(&usbdev->dev, "Filesystem FW is too small.\n");
- goto out;
- }
- ret = download_image(usbdev, firm, pos, fw_info.rootfs_len,
- DN_ROOTFS_MAGIC_NUMBER);
- if (ret < 0)
- goto out;
- dev_info(&usbdev->dev, "GCT: Filesystem download success.\n");
-
- break;
- }
-
- if (i == hdr.count) {
- dev_err(&usbdev->dev, "Firmware for gsk%x is not installed.\n",
- pid);
- ret = -EINVAL;
- }
-out:
- release_firmware(firm);
- kfree(tx_buf);
- return ret;
-}
-
-/*#define GDM7205_PADDING 256 */
-#define DOWNLOAD_CHUCK 2048
-#define KERNEL_TYPE_STRING "linux"
-#define FS_TYPE_STRING "rootfs"
-
-static int em_wait_ack(struct usb_device *usbdev, int send_zlp)
-{
- int ack;
- int ret = -1;
-
- if (send_zlp) {
- /*Send ZLP*/
- ret = gdm_wibro_send(usbdev, NULL, 0);
- if (ret < 0)
- goto out;
- }
-
- /*Wait for ACK*/
- ret = gdm_wibro_recv(usbdev, &ack, sizeof(ack));
- if (ret < 0)
- goto out;
-out:
- return ret;
-}
-
-static int em_download_image(struct usb_device *usbdev, const char *img_name,
- char *type_string)
-{
- char *buf = NULL;
- loff_t pos = 0;
- int ret = 0;
- int len;
- int img_len;
- const struct firmware *firm;
- #if defined(GDM7205_PADDING)
- const int pad_size = GDM7205_PADDING;
- #else
- const int pad_size = 0;
- #endif
-
- ret = request_firmware(&firm, img_name, &usbdev->dev);
- if (ret < 0) {
- dev_err(&usbdev->dev,
- "requesting firmware %s failed with error %d\n",
- img_name, ret);
- return ret;
- }
-
- buf = kmalloc(DOWNLOAD_CHUCK + pad_size, GFP_KERNEL);
- if (!buf) {
- release_firmware(firm);
- return -ENOMEM;
- }
-
- strcpy(buf+pad_size, type_string);
- ret = gdm_wibro_send(usbdev, buf, strlen(type_string)+pad_size);
- if (ret < 0)
- goto out;
-
- img_len = firm->size;
-
- if (img_len <= 0) {
- ret = -1;
- goto out;
- }
-
- while (img_len > 0) {
- if (img_len > DOWNLOAD_CHUCK)
- len = DOWNLOAD_CHUCK;
- else
- len = img_len; /* the last chunk of data */
-
- memcpy(buf+pad_size, firm->data + pos, len);
- ret = gdm_wibro_send(usbdev, buf, len+pad_size);
-
- if (ret < 0)
- goto out;
-
- img_len -= DOWNLOAD_CHUCK;
- pos += DOWNLOAD_CHUCK;
-
- ret = em_wait_ack(usbdev, ((len+pad_size) % 512 == 0));
- if (ret < 0)
- goto out;
- }
-
- ret = em_wait_ack(usbdev, 1);
- if (ret < 0)
- goto out;
-
-out:
- release_firmware(firm);
- kfree(buf);
-
- return ret;
-}
-
-static int em_fw_reset(struct usb_device *usbdev)
-{
- /*Send ZLP*/
- return gdm_wibro_send(usbdev, NULL, 0);
-}
-
-int usb_emergency(struct usb_device *usbdev)
-{
- int ret;
- const char *kern_name = FW_DIR FW_KERN;
- const char *fs_name = FW_DIR FW_FS;
-
- ret = em_download_image(usbdev, kern_name, KERNEL_TYPE_STRING);
- if (ret < 0)
- return ret;
- dev_err(&usbdev->dev, "GCT Emergency: Kernel download success.\n");
-
- ret = em_download_image(usbdev, fs_name, FS_TYPE_STRING);
- if (ret < 0)
- return ret;
- dev_info(&usbdev->dev, "GCT Emergency: Filesystem download success.\n");
-
- ret = em_fw_reset(usbdev);
-
- return ret;
-}
diff --git a/drivers/staging/gdm72xx/usb_boot.h b/drivers/staging/gdm72xx/usb_boot.h
deleted file mode 100644
index 5bf7190377e2..000000000000
--- a/drivers/staging/gdm72xx/usb_boot.h
+++ /dev/null
@@ -1,22 +0,0 @@
-/*
- * Copyright (c) 2012 GCT Semiconductor, Inc. All rights reserved.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#ifndef __GDM72XX_USB_BOOT_H__
-#define __GDM72XX_USB_BOOT_H__
-
-struct usb_device;
-
-int usb_boot(struct usb_device *usbdev, u16 pid);
-int usb_emergency(struct usb_device *usbdev);
-
-#endif /* __GDM72XX_USB_BOOT_H__ */
diff --git a/drivers/staging/gdm72xx/usb_ids.h b/drivers/staging/gdm72xx/usb_ids.h
deleted file mode 100644
index 7afb9ba5fdba..000000000000
--- a/drivers/staging/gdm72xx/usb_ids.h
+++ /dev/null
@@ -1,86 +0,0 @@
-/*
- * Copyright (c) 2012 GCT Semiconductor, Inc. All rights reserved.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#ifndef __GDM72XX_USB_IDS_H__
-#define __GDM72XX_USB_IDS_H__
-
-/*You can replace vendor-ID as yours.*/
-#define GCT_VID 0x1076
-
-/*You can replace product-ID as yours.*/
-#define GCT_PID1 0x7e00
-#define GCT_PID2 0x7f00
-
-#define USB_DEVICE_ID_MATCH_DEVICE_INTERFACE \
- (USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_CLASS)
-
-#define USB_DEVICE_INTF(vend, prod, intf) \
- .match_flags = USB_DEVICE_ID_MATCH_DEVICE_INTERFACE, \
- .idVendor = (vend), .idProduct = (prod), .bInterfaceClass = (intf)
-
-#define EMERGENCY_PID 0x720f
-#define BL_PID_MASK 0xffc0
-
-#define USB_DEVICE_BOOTLOADER(vid, pid) \
- {USB_DEVICE((vid), ((pid)&BL_PID_MASK)|B_DOWNLOAD)}
-
-#define USB_DEVICE_BOOTLOADER_DRV(vid, pid) \
- {USB_DEVICE((vid), ((pid)&BL_PID_MASK)|B_DOWNLOAD|B_DIFF_DL_DRV)}
-
-#define USB_DEVICE_CDC_DATA(vid, pid) \
- {USB_DEVICE_INTF((vid), (pid), USB_CLASS_CDC_DATA)}
-
-static const struct usb_device_id id_table[] = {
- USB_DEVICE_BOOTLOADER(GCT_VID, GCT_PID1),
- USB_DEVICE_BOOTLOADER_DRV(GCT_VID, GCT_PID1),
- USB_DEVICE_CDC_DATA(GCT_VID, GCT_PID1),
- USB_DEVICE_CDC_DATA(GCT_VID, GCT_PID1+0x1),
- USB_DEVICE_CDC_DATA(GCT_VID, GCT_PID1+0x2),
- USB_DEVICE_CDC_DATA(GCT_VID, GCT_PID1+0x3),
- USB_DEVICE_CDC_DATA(GCT_VID, GCT_PID1+0x4),
- USB_DEVICE_CDC_DATA(GCT_VID, GCT_PID1+0x5),
- USB_DEVICE_CDC_DATA(GCT_VID, GCT_PID1+0x6),
- USB_DEVICE_CDC_DATA(GCT_VID, GCT_PID1+0x7),
- USB_DEVICE_CDC_DATA(GCT_VID, GCT_PID1+0x8),
- USB_DEVICE_CDC_DATA(GCT_VID, GCT_PID1+0x9),
- USB_DEVICE_CDC_DATA(GCT_VID, GCT_PID1+0xa),
- USB_DEVICE_CDC_DATA(GCT_VID, GCT_PID1+0xb),
- USB_DEVICE_CDC_DATA(GCT_VID, GCT_PID1+0xc),
- USB_DEVICE_CDC_DATA(GCT_VID, GCT_PID1+0xd),
- USB_DEVICE_CDC_DATA(GCT_VID, GCT_PID1+0xe),
- USB_DEVICE_CDC_DATA(GCT_VID, GCT_PID1+0xf),
-
- USB_DEVICE_BOOTLOADER(GCT_VID, GCT_PID2),
- USB_DEVICE_BOOTLOADER_DRV(GCT_VID, GCT_PID2),
- USB_DEVICE_CDC_DATA(GCT_VID, GCT_PID2),
- USB_DEVICE_CDC_DATA(GCT_VID, GCT_PID2+0x1),
- USB_DEVICE_CDC_DATA(GCT_VID, GCT_PID2+0x2),
- USB_DEVICE_CDC_DATA(GCT_VID, GCT_PID2+0x3),
- USB_DEVICE_CDC_DATA(GCT_VID, GCT_PID2+0x4),
- USB_DEVICE_CDC_DATA(GCT_VID, GCT_PID2+0x5),
- USB_DEVICE_CDC_DATA(GCT_VID, GCT_PID2+0x6),
- USB_DEVICE_CDC_DATA(GCT_VID, GCT_PID2+0x7),
- USB_DEVICE_CDC_DATA(GCT_VID, GCT_PID2+0x8),
- USB_DEVICE_CDC_DATA(GCT_VID, GCT_PID2+0x9),
- USB_DEVICE_CDC_DATA(GCT_VID, GCT_PID2+0xa),
- USB_DEVICE_CDC_DATA(GCT_VID, GCT_PID2+0xb),
- USB_DEVICE_CDC_DATA(GCT_VID, GCT_PID2+0xc),
- USB_DEVICE_CDC_DATA(GCT_VID, GCT_PID2+0xd),
- USB_DEVICE_CDC_DATA(GCT_VID, GCT_PID2+0xe),
- USB_DEVICE_CDC_DATA(GCT_VID, GCT_PID2+0xf),
-
- {USB_DEVICE(GCT_VID, EMERGENCY_PID)},
- { }
-};
-
-#endif /* __GDM72XX_USB_IDS_H__ */
diff --git a/drivers/staging/gdm72xx/wm_ioctl.h b/drivers/staging/gdm72xx/wm_ioctl.h
deleted file mode 100644
index ed8f649c0042..000000000000
--- a/drivers/staging/gdm72xx/wm_ioctl.h
+++ /dev/null
@@ -1,96 +0,0 @@
-/*
- * Copyright (c) 2012 GCT Semiconductor, Inc. All rights reserved.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#ifndef __GDM72XX_WM_IOCTL_H__
-#define __GDM72XX_WM_IOCTL_H__
-
-#if !defined(__KERNEL__)
-#include <net/if.h>
-#endif
-
-#define NETLINK_WIMAX 31
-
-#define SIOCWMIOCTL SIOCDEVPRIVATE
-
-#define SIOCG_DATA 0x8D10
-#define SIOCS_DATA 0x8D11
-
-enum {
- SIOC_DATA_FSM,
- SIOC_DATA_NETLIST,
- SIOC_DATA_CONNNSP,
- SIOC_DATA_CONNCOMP,
- SIOC_DATA_PROFILEID,
-
- SIOC_DATA_END
-};
-
-#define SIOC_DATA_MAX 16
-
-/* FSM */
-enum {
- M_INIT = 0,
- M_OPEN_OFF,
- M_OPEN_ON,
- M_SCAN,
- M_CONNECTING,
- M_CONNECTED,
- M_FSM_END,
-
- C_INIT = 0,
- C_CONNSTART,
- C_ASSOCSTART,
- C_RNG,
- C_SBC,
- C_AUTH,
- C_REG,
- C_DSX,
- C_ASSOCCOMPLETE,
- C_CONNCOMPLETE,
- C_FSM_END,
-
- D_INIT = 0,
- D_READY,
- D_LISTEN,
- D_IPACQUISITION,
-
- END_FSM
-};
-
-struct fsm_s {
- int m_status; /*main status*/
- int c_status; /*connection status*/
- int d_status; /*oma-dm status*/
-};
-
-struct data_s {
- int size;
- void *buf;
-};
-
-struct wm_req_s {
- union {
- char ifrn_name[IFNAMSIZ];
- } ifr_ifrn;
- unsigned short cmd;
- unsigned short data_id;
- struct data_s data;
-
-/* NOTE: sizeof(struct wm_req_s) must be less than sizeof(struct ifreq). */
-};
-
-#ifndef ifr_name
-#define ifr_name ifr_ifrn.ifrn_name
-#endif
-
-#endif /* __GDM72XX_WM_IOCTL_H__ */
diff --git a/drivers/staging/goldfish/Kconfig b/drivers/staging/goldfish/Kconfig
deleted file mode 100644
index 4e094602437c..000000000000
--- a/drivers/staging/goldfish/Kconfig
+++ /dev/null
@@ -1,13 +0,0 @@
-config GOLDFISH_AUDIO
- tristate "Goldfish AVD Audio Device"
- depends on GOLDFISH
- ---help---
- Emulated audio channel for the Goldfish Android Virtual Device
-
-config MTD_GOLDFISH_NAND
- tristate "Goldfish NAND device"
- depends on GOLDFISH
- depends on MTD
- help
- Drives the emulated NAND flash device on the Google Goldfish
- Android virtual device.
diff --git a/drivers/staging/goldfish/Makefile b/drivers/staging/goldfish/Makefile
deleted file mode 100644
index dec34ad58162..000000000000
--- a/drivers/staging/goldfish/Makefile
+++ /dev/null
@@ -1,6 +0,0 @@
-#
-# Makefile for the Goldfish audio driver
-#
-
-obj-$(CONFIG_GOLDFISH_AUDIO) += goldfish_audio.o
-obj-$(CONFIG_MTD_GOLDFISH_NAND) += goldfish_nand.o
diff --git a/drivers/staging/goldfish/README b/drivers/staging/goldfish/README
deleted file mode 100644
index 183af0053234..000000000000
--- a/drivers/staging/goldfish/README
+++ /dev/null
@@ -1,11 +0,0 @@
-Audio
------
-- Move to using the ALSA framework not faking it
-- Fix the wrong user page DMA (moving to ALSA may fix that too)
-
-NAND
-----
-- Remove excess checking of parameters in calls
-- Use dma coherent memory not kmalloc/__pa for the memory (this is just
- a cleanliness issue not a correctness one)
-
diff --git a/drivers/staging/goldfish/goldfish_audio.c b/drivers/staging/goldfish/goldfish_audio.c
deleted file mode 100644
index b0927e49d0a8..000000000000
--- a/drivers/staging/goldfish/goldfish_audio.c
+++ /dev/null
@@ -1,355 +0,0 @@
-/*
- * drivers/misc/goldfish_audio.c
- *
- * Copyright (C) 2007 Google, Inc.
- * Copyright (C) 2012 Intel, Inc.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#include <linux/module.h>
-#include <linux/miscdevice.h>
-#include <linux/fs.h>
-#include <linux/platform_device.h>
-#include <linux/types.h>
-#include <linux/pci.h>
-#include <linux/interrupt.h>
-#include <linux/io.h>
-#include <linux/sched.h>
-#include <linux/dma-mapping.h>
-#include <linux/uaccess.h>
-#include <linux/goldfish.h>
-
-MODULE_AUTHOR("Google, Inc.");
-MODULE_DESCRIPTION("Android QEMU Audio Driver");
-MODULE_LICENSE("GPL");
-MODULE_VERSION("1.0");
-
-struct goldfish_audio {
- char __iomem *reg_base;
- int irq;
- /* lock protects access to buffer_status and to device registers */
- spinlock_t lock;
- wait_queue_head_t wait;
-
- char *buffer_virt; /* combined buffer virtual address */
- unsigned long buffer_phys; /* combined buffer physical address */
-
- char *write_buffer1; /* write buffer 1 virtual address */
- char *write_buffer2; /* write buffer 2 virtual address */
- char *read_buffer; /* read buffer virtual address */
- int buffer_status;
- int read_supported; /* true if we have audio input support */
-};
-
-/*
- * We will allocate two read buffers and two write buffers.
- * Having two read buffers facilitate stereo -> mono conversion.
- * Having two write buffers facilitate interleaved IO.
- */
-#define READ_BUFFER_SIZE 16384
-#define WRITE_BUFFER_SIZE 16384
-#define COMBINED_BUFFER_SIZE ((2 * READ_BUFFER_SIZE) + \
- (2 * WRITE_BUFFER_SIZE))
-
-#define AUDIO_READ(data, addr) (readl(data->reg_base + addr))
-#define AUDIO_WRITE(data, addr, x) (writel(x, data->reg_base + addr))
-#define AUDIO_WRITE64(data, addr, addr2, x) \
- (gf_write_dma_addr((x), data->reg_base + addr, data->reg_base+addr2))
-
-/*
- * temporary variable used between goldfish_audio_probe() and
- * goldfish_audio_open()
- */
-static struct goldfish_audio *audio_data;
-
-enum {
- /* audio status register */
- AUDIO_INT_STATUS = 0x00,
- /* set this to enable IRQ */
- AUDIO_INT_ENABLE = 0x04,
- /* set these to specify buffer addresses */
- AUDIO_SET_WRITE_BUFFER_1 = 0x08,
- AUDIO_SET_WRITE_BUFFER_2 = 0x0C,
- /* set number of bytes in buffer to write */
- AUDIO_WRITE_BUFFER_1 = 0x10,
- AUDIO_WRITE_BUFFER_2 = 0x14,
- AUDIO_SET_WRITE_BUFFER_1_HIGH = 0x28,
- AUDIO_SET_WRITE_BUFFER_2_HIGH = 0x30,
-
- /* true if audio input is supported */
- AUDIO_READ_SUPPORTED = 0x18,
- /* buffer to use for audio input */
- AUDIO_SET_READ_BUFFER = 0x1C,
- AUDIO_SET_READ_BUFFER_HIGH = 0x34,
-
- /* driver writes number of bytes to read */
- AUDIO_START_READ = 0x20,
-
- /* number of bytes available in read buffer */
- AUDIO_READ_BUFFER_AVAILABLE = 0x24,
-
- /* AUDIO_INT_STATUS bits */
-
- /* this bit set when it is safe to write more bytes to the buffer */
- AUDIO_INT_WRITE_BUFFER_1_EMPTY = 1U << 0,
- AUDIO_INT_WRITE_BUFFER_2_EMPTY = 1U << 1,
- AUDIO_INT_READ_BUFFER_FULL = 1U << 2,
-
- AUDIO_INT_MASK = AUDIO_INT_WRITE_BUFFER_1_EMPTY |
- AUDIO_INT_WRITE_BUFFER_2_EMPTY |
- AUDIO_INT_READ_BUFFER_FULL,
-};
-
-static atomic_t open_count = ATOMIC_INIT(0);
-
-static ssize_t goldfish_audio_read(struct file *fp, char __user *buf,
- size_t count, loff_t *pos)
-{
- struct goldfish_audio *data = fp->private_data;
- int length;
- int result = 0;
-
- if (!data->read_supported)
- return -ENODEV;
-
- while (count > 0) {
- length = (count > READ_BUFFER_SIZE ? READ_BUFFER_SIZE : count);
- AUDIO_WRITE(data, AUDIO_START_READ, length);
-
- wait_event_interruptible(data->wait, data->buffer_status &
- AUDIO_INT_READ_BUFFER_FULL);
-
- length = AUDIO_READ(data, AUDIO_READ_BUFFER_AVAILABLE);
-
- /* copy data to user space */
- if (copy_to_user(buf, data->read_buffer, length))
- return -EFAULT;
-
- result += length;
- buf += length;
- count -= length;
- }
- return result;
-}
-
-static ssize_t goldfish_audio_write(struct file *fp, const char __user *buf,
- size_t count, loff_t *pos)
-{
- struct goldfish_audio *data = fp->private_data;
- unsigned long irq_flags;
- ssize_t result = 0;
- char *kbuf;
-
- while (count > 0) {
- ssize_t copy = count;
-
- if (copy > WRITE_BUFFER_SIZE)
- copy = WRITE_BUFFER_SIZE;
- wait_event_interruptible(data->wait, data->buffer_status &
- (AUDIO_INT_WRITE_BUFFER_1_EMPTY |
- AUDIO_INT_WRITE_BUFFER_2_EMPTY));
-
- if ((data->buffer_status & AUDIO_INT_WRITE_BUFFER_1_EMPTY) != 0)
- kbuf = data->write_buffer1;
- else
- kbuf = data->write_buffer2;
-
- /* copy from user space to the appropriate buffer */
- if (copy_from_user(kbuf, buf, copy)) {
- result = -EFAULT;
- break;
- }
-
- spin_lock_irqsave(&data->lock, irq_flags);
- /*
- * clear the buffer empty flag, and signal the emulator
- * to start writing the buffer
- */
- if (kbuf == data->write_buffer1) {
- data->buffer_status &= ~AUDIO_INT_WRITE_BUFFER_1_EMPTY;
- AUDIO_WRITE(data, AUDIO_WRITE_BUFFER_1, copy);
- } else {
- data->buffer_status &= ~AUDIO_INT_WRITE_BUFFER_2_EMPTY;
- AUDIO_WRITE(data, AUDIO_WRITE_BUFFER_2, copy);
- }
- spin_unlock_irqrestore(&data->lock, irq_flags);
-
- buf += copy;
- result += copy;
- count -= copy;
- }
- return result;
-}
-
-static int goldfish_audio_open(struct inode *ip, struct file *fp)
-{
- if (!audio_data)
- return -ENODEV;
-
- if (atomic_inc_return(&open_count) == 1) {
- fp->private_data = audio_data;
- audio_data->buffer_status = (AUDIO_INT_WRITE_BUFFER_1_EMPTY |
- AUDIO_INT_WRITE_BUFFER_2_EMPTY);
- AUDIO_WRITE(audio_data, AUDIO_INT_ENABLE, AUDIO_INT_MASK);
- return 0;
- }
-
- atomic_dec(&open_count);
- return -EBUSY;
-}
-
-static int goldfish_audio_release(struct inode *ip, struct file *fp)
-{
- atomic_dec(&open_count);
- /* FIXME: surely this is wrong for the multi-opened case */
- AUDIO_WRITE(audio_data, AUDIO_INT_ENABLE, 0);
- return 0;
-}
-
-static long goldfish_audio_ioctl(struct file *fp, unsigned int cmd,
- unsigned long arg)
-{
- /* temporary workaround, until we switch to the ALSA API */
- if (cmd == 315)
- return -1;
-
- return 0;
-}
-
-static irqreturn_t goldfish_audio_interrupt(int irq, void *dev_id)
-{
- unsigned long irq_flags;
- struct goldfish_audio *data = dev_id;
- u32 status;
-
- spin_lock_irqsave(&data->lock, irq_flags);
-
- /* read buffer status flags */
- status = AUDIO_READ(data, AUDIO_INT_STATUS);
- status &= AUDIO_INT_MASK;
- /*
- * if buffers are newly empty, wake up blocked
- * goldfish_audio_write() call
- */
- if (status) {
- data->buffer_status = status;
- wake_up(&data->wait);
- }
-
- spin_unlock_irqrestore(&data->lock, irq_flags);
- return status ? IRQ_HANDLED : IRQ_NONE;
-}
-
-/* file operations for /dev/eac */
-static const struct file_operations goldfish_audio_fops = {
- .owner = THIS_MODULE,
- .read = goldfish_audio_read,
- .write = goldfish_audio_write,
- .open = goldfish_audio_open,
- .release = goldfish_audio_release,
- .unlocked_ioctl = goldfish_audio_ioctl,
-};
-
-static struct miscdevice goldfish_audio_device = {
- .minor = MISC_DYNAMIC_MINOR,
- .name = "eac",
- .fops = &goldfish_audio_fops,
-};
-
-static int goldfish_audio_probe(struct platform_device *pdev)
-{
- int ret;
- struct resource *r;
- struct goldfish_audio *data;
- dma_addr_t buf_addr;
-
- data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
- if (!data)
- return -ENOMEM;
- spin_lock_init(&data->lock);
- init_waitqueue_head(&data->wait);
- platform_set_drvdata(pdev, data);
-
- r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (r == NULL) {
- dev_err(&pdev->dev, "platform_get_resource failed\n");
- return -ENODEV;
- }
- data->reg_base = devm_ioremap(&pdev->dev, r->start, PAGE_SIZE);
- if (data->reg_base == NULL)
- return -ENOMEM;
-
- data->irq = platform_get_irq(pdev, 0);
- if (data->irq < 0) {
- dev_err(&pdev->dev, "platform_get_irq failed\n");
- return -ENODEV;
- }
- data->buffer_virt = dmam_alloc_coherent(&pdev->dev,
- COMBINED_BUFFER_SIZE, &buf_addr, GFP_KERNEL);
- if (data->buffer_virt == NULL) {
- dev_err(&pdev->dev, "allocate buffer failed\n");
- return -ENOMEM;
- }
- data->buffer_phys = buf_addr;
- data->write_buffer1 = data->buffer_virt;
- data->write_buffer2 = data->buffer_virt + WRITE_BUFFER_SIZE;
- data->read_buffer = data->buffer_virt + 2 * WRITE_BUFFER_SIZE;
-
- ret = devm_request_irq(&pdev->dev, data->irq, goldfish_audio_interrupt,
- IRQF_SHARED, pdev->name, data);
- if (ret) {
- dev_err(&pdev->dev, "request_irq failed\n");
- return ret;
- }
-
- ret = misc_register(&goldfish_audio_device);
- if (ret) {
- dev_err(&pdev->dev,
- "misc_register returned %d in goldfish_audio_init\n",
- ret);
- return ret;
- }
-
- AUDIO_WRITE64(data, AUDIO_SET_WRITE_BUFFER_1,
- AUDIO_SET_WRITE_BUFFER_1_HIGH, buf_addr);
- buf_addr += WRITE_BUFFER_SIZE;
-
- AUDIO_WRITE64(data, AUDIO_SET_WRITE_BUFFER_2,
- AUDIO_SET_WRITE_BUFFER_2_HIGH, buf_addr);
-
- buf_addr += WRITE_BUFFER_SIZE;
-
- data->read_supported = AUDIO_READ(data, AUDIO_READ_SUPPORTED);
- if (data->read_supported)
- AUDIO_WRITE64(data, AUDIO_SET_READ_BUFFER,
- AUDIO_SET_READ_BUFFER_HIGH, buf_addr);
-
- audio_data = data;
- return 0;
-}
-
-static int goldfish_audio_remove(struct platform_device *pdev)
-{
- misc_deregister(&goldfish_audio_device);
- audio_data = NULL;
- return 0;
-}
-
-static struct platform_driver goldfish_audio_driver = {
- .probe = goldfish_audio_probe,
- .remove = goldfish_audio_remove,
- .driver = {
- .name = "goldfish_audio"
- }
-};
-
-module_platform_driver(goldfish_audio_driver);
diff --git a/drivers/staging/goldfish/goldfish_nand.c b/drivers/staging/goldfish/goldfish_nand.c
deleted file mode 100644
index 623353db5a08..000000000000
--- a/drivers/staging/goldfish/goldfish_nand.c
+++ /dev/null
@@ -1,442 +0,0 @@
-/*
- * drivers/mtd/devices/goldfish_nand.c
- *
- * Copyright (C) 2007 Google, Inc.
- * Copyright (C) 2012 Intel, Inc.
- * Copyright (C) 2013 Intel, Inc.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#include <linux/io.h>
-#include <linux/device.h>
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/ioport.h>
-#include <linux/vmalloc.h>
-#include <linux/mtd/mtd.h>
-#include <linux/platform_device.h>
-#include <linux/mutex.h>
-#include <linux/goldfish.h>
-#include <asm/div64.h>
-
-#include "goldfish_nand_reg.h"
-
-struct goldfish_nand {
- /* lock protects access to the device registers */
- struct mutex lock;
- unsigned char __iomem *base;
- struct cmd_params *cmd_params;
- size_t mtd_count;
- struct mtd_info mtd[0];
-};
-
-static u32 goldfish_nand_cmd_with_params(struct mtd_info *mtd,
- enum nand_cmd cmd, u64 addr, u32 len,
- void *ptr, u32 *rv)
-{
- u32 cmdp;
- struct goldfish_nand *nand = mtd->priv;
- struct cmd_params *cps = nand->cmd_params;
- unsigned char __iomem *base = nand->base;
-
- if (!cps)
- return -1;
-
- switch (cmd) {
- case NAND_CMD_ERASE:
- cmdp = NAND_CMD_ERASE_WITH_PARAMS;
- break;
- case NAND_CMD_READ:
- cmdp = NAND_CMD_READ_WITH_PARAMS;
- break;
- case NAND_CMD_WRITE:
- cmdp = NAND_CMD_WRITE_WITH_PARAMS;
- break;
- default:
- return -1;
- }
- cps->dev = mtd - nand->mtd;
- cps->addr_high = (u32)(addr >> 32);
- cps->addr_low = (u32)addr;
- cps->transfer_size = len;
- cps->data = (unsigned long)ptr;
- writel(cmdp, base + NAND_COMMAND);
- *rv = cps->result;
- return 0;
-}
-
-static u32 goldfish_nand_cmd(struct mtd_info *mtd, enum nand_cmd cmd,
- u64 addr, u32 len, void *ptr)
-{
- struct goldfish_nand *nand = mtd->priv;
- u32 rv;
- unsigned char __iomem *base = nand->base;
-
- mutex_lock(&nand->lock);
- if (goldfish_nand_cmd_with_params(mtd, cmd, addr, len, ptr, &rv)) {
- writel(mtd - nand->mtd, base + NAND_DEV);
- writel((u32)(addr >> 32), base + NAND_ADDR_HIGH);
- writel((u32)addr, base + NAND_ADDR_LOW);
- writel(len, base + NAND_TRANSFER_SIZE);
- gf_write_ptr(ptr, base + NAND_DATA, base + NAND_DATA_HIGH);
- writel(cmd, base + NAND_COMMAND);
- rv = readl(base + NAND_RESULT);
- }
- mutex_unlock(&nand->lock);
- return rv;
-}
-
-static int goldfish_nand_erase(struct mtd_info *mtd, struct erase_info *instr)
-{
- loff_t ofs = instr->addr;
- u32 len = instr->len;
- u32 rem;
-
- if (ofs + len > mtd->size)
- goto invalid_arg;
- rem = do_div(ofs, mtd->writesize);
- if (rem)
- goto invalid_arg;
- ofs *= (mtd->writesize + mtd->oobsize);
-
- if (len % mtd->writesize)
- goto invalid_arg;
- len = len / mtd->writesize * (mtd->writesize + mtd->oobsize);
-
- if (goldfish_nand_cmd(mtd, NAND_CMD_ERASE, ofs, len, NULL) != len) {
- pr_err("goldfish_nand_erase: erase failed, start %llx, len %x, dev_size %llx, erase_size %x\n",
- ofs, len, mtd->size, mtd->erasesize);
- return -EIO;
- }
-
- instr->state = MTD_ERASE_DONE;
- mtd_erase_callback(instr);
-
- return 0;
-
-invalid_arg:
- pr_err("goldfish_nand_erase: invalid erase, start %llx, len %x, dev_size %llx, erase_size %x\n",
- ofs, len, mtd->size, mtd->erasesize);
- return -EINVAL;
-}
-
-static int goldfish_nand_read_oob(struct mtd_info *mtd, loff_t ofs,
- struct mtd_oob_ops *ops)
-{
- u32 rem;
-
- if (ofs + ops->len > mtd->size)
- goto invalid_arg;
- if (ops->datbuf && ops->len && ops->len != mtd->writesize)
- goto invalid_arg;
- if (ops->ooblen + ops->ooboffs > mtd->oobsize)
- goto invalid_arg;
-
- rem = do_div(ofs, mtd->writesize);
- if (rem)
- goto invalid_arg;
- ofs *= (mtd->writesize + mtd->oobsize);
-
- if (ops->datbuf)
- ops->retlen = goldfish_nand_cmd(mtd, NAND_CMD_READ, ofs,
- ops->len, ops->datbuf);
- ofs += mtd->writesize + ops->ooboffs;
- if (ops->oobbuf)
- ops->oobretlen = goldfish_nand_cmd(mtd, NAND_CMD_READ, ofs,
- ops->ooblen, ops->oobbuf);
- return 0;
-
-invalid_arg:
- pr_err("goldfish_nand_read_oob: invalid read, start %llx, len %zx, ooblen %zx, dev_size %llx, write_size %x\n",
- ofs, ops->len, ops->ooblen, mtd->size, mtd->writesize);
- return -EINVAL;
-}
-
-static int goldfish_nand_write_oob(struct mtd_info *mtd, loff_t ofs,
- struct mtd_oob_ops *ops)
-{
- u32 rem;
-
- if (ofs + ops->len > mtd->size)
- goto invalid_arg;
- if (ops->len && ops->len != mtd->writesize)
- goto invalid_arg;
- if (ops->ooblen + ops->ooboffs > mtd->oobsize)
- goto invalid_arg;
-
- rem = do_div(ofs, mtd->writesize);
- if (rem)
- goto invalid_arg;
- ofs *= (mtd->writesize + mtd->oobsize);
-
- if (ops->datbuf)
- ops->retlen = goldfish_nand_cmd(mtd, NAND_CMD_WRITE, ofs,
- ops->len, ops->datbuf);
- ofs += mtd->writesize + ops->ooboffs;
- if (ops->oobbuf)
- ops->oobretlen = goldfish_nand_cmd(mtd, NAND_CMD_WRITE, ofs,
- ops->ooblen, ops->oobbuf);
- return 0;
-
-invalid_arg:
- pr_err("goldfish_nand_write_oob: invalid write, start %llx, len %zx, ooblen %zx, dev_size %llx, write_size %x\n",
- ofs, ops->len, ops->ooblen, mtd->size, mtd->writesize);
- return -EINVAL;
-}
-
-static int goldfish_nand_read(struct mtd_info *mtd, loff_t from, size_t len,
- size_t *retlen, u_char *buf)
-{
- u32 rem;
-
- if (from + len > mtd->size)
- goto invalid_arg;
-
- rem = do_div(from, mtd->writesize);
- if (rem)
- goto invalid_arg;
- from *= (mtd->writesize + mtd->oobsize);
-
- *retlen = goldfish_nand_cmd(mtd, NAND_CMD_READ, from, len, buf);
- return 0;
-
-invalid_arg:
- pr_err("goldfish_nand_read: invalid read, start %llx, len %zx, dev_size %llx, write_size %x\n",
- from, len, mtd->size, mtd->writesize);
- return -EINVAL;
-}
-
-static int goldfish_nand_write(struct mtd_info *mtd, loff_t to, size_t len,
- size_t *retlen, const u_char *buf)
-{
- u32 rem;
-
- if (to + len > mtd->size)
- goto invalid_arg;
-
- rem = do_div(to, mtd->writesize);
- if (rem)
- goto invalid_arg;
- to *= (mtd->writesize + mtd->oobsize);
-
- *retlen = goldfish_nand_cmd(mtd, NAND_CMD_WRITE, to, len, (void *)buf);
- return 0;
-
-invalid_arg:
- pr_err("goldfish_nand_write: invalid write, start %llx, len %zx, dev_size %llx, write_size %x\n",
- to, len, mtd->size, mtd->writesize);
- return -EINVAL;
-}
-
-static int goldfish_nand_block_isbad(struct mtd_info *mtd, loff_t ofs)
-{
- u32 rem;
-
- if (ofs >= mtd->size)
- goto invalid_arg;
-
- rem = do_div(ofs, mtd->erasesize);
- if (rem)
- goto invalid_arg;
- ofs *= mtd->erasesize / mtd->writesize;
- ofs *= (mtd->writesize + mtd->oobsize);
-
- return goldfish_nand_cmd(mtd, NAND_CMD_BLOCK_BAD_GET, ofs, 0, NULL);
-
-invalid_arg:
- pr_err("goldfish_nand_block_isbad: invalid arg, ofs %llx, dev_size %llx, write_size %x\n",
- ofs, mtd->size, mtd->writesize);
- return -EINVAL;
-}
-
-static int goldfish_nand_block_markbad(struct mtd_info *mtd, loff_t ofs)
-{
- u32 rem;
-
- if (ofs >= mtd->size)
- goto invalid_arg;
-
- rem = do_div(ofs, mtd->erasesize);
- if (rem)
- goto invalid_arg;
- ofs *= mtd->erasesize / mtd->writesize;
- ofs *= (mtd->writesize + mtd->oobsize);
-
- if (goldfish_nand_cmd(mtd, NAND_CMD_BLOCK_BAD_SET, ofs, 0, NULL) != 1)
- return -EIO;
- return 0;
-
-invalid_arg:
- pr_err("goldfish_nand_block_markbad: invalid arg, ofs %llx, dev_size %llx, write_size %x\n",
- ofs, mtd->size, mtd->writesize);
- return -EINVAL;
-}
-
-static int nand_setup_cmd_params(struct platform_device *pdev,
- struct goldfish_nand *nand)
-{
- u64 paddr;
- unsigned char __iomem *base = nand->base;
-
- nand->cmd_params = devm_kzalloc(&pdev->dev,
- sizeof(struct cmd_params), GFP_KERNEL);
- if (!nand->cmd_params)
- return -1;
-
- paddr = __pa(nand->cmd_params);
- writel((u32)(paddr >> 32), base + NAND_CMD_PARAMS_ADDR_HIGH);
- writel((u32)paddr, base + NAND_CMD_PARAMS_ADDR_LOW);
- return 0;
-}
-
-static int goldfish_nand_init_device(struct platform_device *pdev,
- struct goldfish_nand *nand, int id)
-{
- u32 name_len;
- u32 result;
- u32 flags;
- unsigned char __iomem *base = nand->base;
- struct mtd_info *mtd = &nand->mtd[id];
- char *name;
-
- mutex_lock(&nand->lock);
- writel(id, base + NAND_DEV);
- flags = readl(base + NAND_DEV_FLAGS);
- name_len = readl(base + NAND_DEV_NAME_LEN);
- mtd->writesize = readl(base + NAND_DEV_PAGE_SIZE);
- mtd->size = readl(base + NAND_DEV_SIZE_LOW);
- mtd->size |= (u64)readl(base + NAND_DEV_SIZE_HIGH) << 32;
- mtd->oobsize = readl(base + NAND_DEV_EXTRA_SIZE);
- mtd->oobavail = mtd->oobsize;
- mtd->erasesize = readl(base + NAND_DEV_ERASE_SIZE) /
- (mtd->writesize + mtd->oobsize) * mtd->writesize;
- do_div(mtd->size, mtd->writesize + mtd->oobsize);
- mtd->size *= mtd->writesize;
- dev_dbg(&pdev->dev,
- "goldfish nand dev%d: size %llx, page %d, extra %d, erase %d\n",
- id, mtd->size, mtd->writesize,
- mtd->oobsize, mtd->erasesize);
- mutex_unlock(&nand->lock);
-
- mtd->priv = nand;
-
- name = devm_kzalloc(&pdev->dev, name_len + 1, GFP_KERNEL);
- if (!name)
- return -ENOMEM;
- mtd->name = name;
-
- result = goldfish_nand_cmd(mtd, NAND_CMD_GET_DEV_NAME, 0, name_len,
- name);
- if (result != name_len) {
- dev_err(&pdev->dev,
- "goldfish_nand_init_device failed to get dev name %d != %d\n",
- result, name_len);
- return -ENODEV;
- }
- ((char *)mtd->name)[name_len] = '\0';
-
- /* Setup the MTD structure */
- mtd->type = MTD_NANDFLASH;
- mtd->flags = MTD_CAP_NANDFLASH;
- if (flags & NAND_DEV_FLAG_READ_ONLY)
- mtd->flags &= ~MTD_WRITEABLE;
- if (flags & NAND_DEV_FLAG_CMD_PARAMS_CAP)
- nand_setup_cmd_params(pdev, nand);
-
- mtd->owner = THIS_MODULE;
- mtd->_erase = goldfish_nand_erase;
- mtd->_read = goldfish_nand_read;
- mtd->_write = goldfish_nand_write;
- mtd->_read_oob = goldfish_nand_read_oob;
- mtd->_write_oob = goldfish_nand_write_oob;
- mtd->_block_isbad = goldfish_nand_block_isbad;
- mtd->_block_markbad = goldfish_nand_block_markbad;
-
- if (mtd_device_register(mtd, NULL, 0))
- return -EIO;
-
- return 0;
-}
-
-static int goldfish_nand_probe(struct platform_device *pdev)
-{
- u32 num_dev;
- int i;
- int err;
- u32 num_dev_working;
- u32 version;
- struct resource *r;
- struct goldfish_nand *nand;
- unsigned char __iomem *base;
-
- r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!r)
- return -ENODEV;
-
- base = devm_ioremap(&pdev->dev, r->start, PAGE_SIZE);
- if (!base)
- return -ENOMEM;
-
- version = readl(base + NAND_VERSION);
- if (version != NAND_VERSION_CURRENT) {
- dev_err(&pdev->dev,
- "goldfish_nand_init: version mismatch, got %d, expected %d\n",
- version, NAND_VERSION_CURRENT);
- return -ENODEV;
- }
- num_dev = readl(base + NAND_NUM_DEV);
- if (num_dev == 0)
- return -ENODEV;
-
- nand = devm_kzalloc(&pdev->dev, sizeof(*nand) +
- sizeof(struct mtd_info) * num_dev, GFP_KERNEL);
- if (!nand)
- return -ENOMEM;
-
- mutex_init(&nand->lock);
- nand->base = base;
- nand->mtd_count = num_dev;
- platform_set_drvdata(pdev, nand);
-
- num_dev_working = 0;
- for (i = 0; i < num_dev; i++) {
- err = goldfish_nand_init_device(pdev, nand, i);
- if (err == 0)
- num_dev_working++;
- }
- if (num_dev_working == 0)
- return -ENODEV;
- return 0;
-}
-
-static int goldfish_nand_remove(struct platform_device *pdev)
-{
- struct goldfish_nand *nand = platform_get_drvdata(pdev);
- int i;
-
- for (i = 0; i < nand->mtd_count; i++) {
- if (nand->mtd[i].name)
- mtd_device_unregister(&nand->mtd[i]);
- }
- return 0;
-}
-
-static struct platform_driver goldfish_nand_driver = {
- .probe = goldfish_nand_probe,
- .remove = goldfish_nand_remove,
- .driver = {
- .name = "goldfish_nand"
- }
-};
-
-module_platform_driver(goldfish_nand_driver);
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/goldfish/goldfish_nand_reg.h b/drivers/staging/goldfish/goldfish_nand_reg.h
deleted file mode 100644
index 43aeba3a4c8f..000000000000
--- a/drivers/staging/goldfish/goldfish_nand_reg.h
+++ /dev/null
@@ -1,76 +0,0 @@
-/*
- * drivers/mtd/devices/goldfish_nand_reg.h
- *
- * Copyright (C) 2007 Google, Inc.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#ifndef GOLDFISH_NAND_REG_H
-#define GOLDFISH_NAND_REG_H
-
-enum nand_cmd {
- /* Write device name for NAND_DEV to NAND_DATA (vaddr) */
- NAND_CMD_GET_DEV_NAME,
- NAND_CMD_READ,
- NAND_CMD_WRITE,
- NAND_CMD_ERASE,
- /* NAND_RESULT is 1 if block is bad, 0 if it is not */
- NAND_CMD_BLOCK_BAD_GET,
- NAND_CMD_BLOCK_BAD_SET,
- NAND_CMD_READ_WITH_PARAMS,
- NAND_CMD_WRITE_WITH_PARAMS,
- NAND_CMD_ERASE_WITH_PARAMS
-};
-
-enum nand_dev_flags {
- NAND_DEV_FLAG_READ_ONLY = 0x00000001,
- NAND_DEV_FLAG_CMD_PARAMS_CAP = 0x00000002,
-};
-
-#define NAND_VERSION_CURRENT (1)
-
-enum nand_reg {
- /* Global */
- NAND_VERSION = 0x000,
- NAND_NUM_DEV = 0x004,
- NAND_DEV = 0x008,
-
- /* Dev info */
- NAND_DEV_FLAGS = 0x010,
- NAND_DEV_NAME_LEN = 0x014,
- NAND_DEV_PAGE_SIZE = 0x018,
- NAND_DEV_EXTRA_SIZE = 0x01c,
- NAND_DEV_ERASE_SIZE = 0x020,
- NAND_DEV_SIZE_LOW = 0x028,
- NAND_DEV_SIZE_HIGH = 0x02c,
-
- /* Command */
- NAND_RESULT = 0x040,
- NAND_COMMAND = 0x044,
- NAND_DATA = 0x048,
- NAND_DATA_HIGH = 0x100,
- NAND_TRANSFER_SIZE = 0x04c,
- NAND_ADDR_LOW = 0x050,
- NAND_ADDR_HIGH = 0x054,
- NAND_CMD_PARAMS_ADDR_LOW = 0x058,
- NAND_CMD_PARAMS_ADDR_HIGH = 0x05c,
-};
-
-struct cmd_params {
- u32 dev;
- u32 addr_low;
- u32 addr_high;
- u32 transfer_size;
- unsigned long data;
- u32 result;
-};
-#endif
diff --git a/drivers/staging/greybus/Documentation/firmware/authenticate.c b/drivers/staging/greybus/Documentation/firmware/authenticate.c
new file mode 100644
index 000000000000..3d2c6f88a138
--- /dev/null
+++ b/drivers/staging/greybus/Documentation/firmware/authenticate.c
@@ -0,0 +1,94 @@
+// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
+/*
+ * Sample code to test CAP protocol
+ *
+ * Copyright(c) 2016 Google Inc. All rights reserved.
+ * Copyright(c) 2016 Linaro Ltd. All rights reserved.
+ */
+
+#include <stdio.h>
+#include <string.h>
+#include <unistd.h>
+#include <sys/ioctl.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+
+#include "../../greybus_authentication.h"
+
+struct cap_ioc_get_endpoint_uid uid;
+struct cap_ioc_get_ims_certificate cert = {
+ .certificate_class = 0,
+ .certificate_id = 0,
+};
+
+struct cap_ioc_authenticate authenticate = {
+ .auth_type = 0,
+ .challenge = {0},
+};
+
+int main(int argc, char *argv[])
+{
+ unsigned int timeout = 10000;
+ char *capdev;
+ int fd, ret;
+
+ /* Make sure arguments are correct */
+ if (argc != 2) {
+ printf("\nUsage: ./firmware <Path of the gb-cap-X dev>\n");
+ return 0;
+ }
+
+ capdev = argv[1];
+
+ printf("Opening %s authentication device\n", capdev);
+
+ fd = open(capdev, O_RDWR);
+ if (fd < 0) {
+ printf("Failed to open: %s\n", capdev);
+ return -1;
+ }
+
+ /* Get UID */
+ printf("Get UID\n");
+
+ ret = ioctl(fd, CAP_IOC_GET_ENDPOINT_UID, &uid);
+ if (ret < 0) {
+ printf("Failed to get UID: %s (%d)\n", capdev, ret);
+ ret = -1;
+ goto close_fd;
+ }
+
+ printf("UID received: 0x%llx\n", *(unsigned long long int *)(uid.uid));
+
+ /* Get certificate */
+ printf("Get IMS certificate\n");
+
+ ret = ioctl(fd, CAP_IOC_GET_IMS_CERTIFICATE, &cert);
+ if (ret < 0) {
+ printf("Failed to get IMS certificate: %s (%d)\n", capdev, ret);
+ ret = -1;
+ goto close_fd;
+ }
+
+ printf("IMS Certificate size: %d\n", cert.cert_size);
+
+ /* Authenticate */
+ printf("Authenticate module\n");
+
+ memcpy(authenticate.uid, uid.uid, 8);
+
+ ret = ioctl(fd, CAP_IOC_AUTHENTICATE, &authenticate);
+ if (ret < 0) {
+ printf("Failed to authenticate module: %s (%d)\n", capdev, ret);
+ ret = -1;
+ goto close_fd;
+ }
+
+ printf("Authenticated, result (%02x), sig-size (%02x)\n",
+ authenticate.result_code, authenticate.signature_size);
+
+close_fd:
+ close(fd);
+
+ return ret;
+}
diff --git a/drivers/staging/greybus/Documentation/firmware/firmware-management b/drivers/staging/greybus/Documentation/firmware/firmware-management
new file mode 100644
index 000000000000..7918257e5b3b
--- /dev/null
+++ b/drivers/staging/greybus/Documentation/firmware/firmware-management
@@ -0,0 +1,333 @@
+
+Firmware Management
+-------------------
+ Copyright 2016 Google Inc.
+ Copyright 2016 Linaro Ltd.
+
+Interface-Manifest
+------------------
+
+All firmware packages on the Modules or Interfaces are managed by a special
+Firmware Management Protocol. To support Firmware Management by the AP, the
+Interface Manifest shall at least contain the Firmware Management Bundle and a
+Firmware Management Protocol CPort within it.
+
+The bundle may contain additional CPorts based on the extra functionality
+required to manage firmware packages.
+
+For example, this is how the Firmware Management part of the Interface Manifest
+may look like:
+
+ ; Firmware Management Bundle (Bundle 1):
+ [bundle-descriptor 1]
+ class = 0x16
+
+ ; (Mandatory) Firmware Management Protocol on CPort 1
+ [cport-descriptor 2]
+ bundle = 1
+ protocol = 0x18
+
+ ; (Optional) Firmware Download Protocol on CPort 2
+ [cport-descriptor 1]
+ bundle = 1
+ protocol = 0x17
+
+ ; (Optional) SPI protocol on CPort 3
+ [cport-descriptor 3]
+ bundle = 1
+ protocol = 0x0b
+
+ ; (Optional) Component Authentication Protocol (CAP) on CPort 4
+ [cport-descriptor 4]
+ bundle = 1
+ protocol = 0x19
+
+
+Sysfs Interfaces - Firmware Management
+--------------------------------------
+
+The Firmware Management Protocol interacts with Userspace using the character
+device interface. The character device will be present in /dev/ directory
+and will be named gb-fw-mgmt-<N>. The number <N> is assigned at runtime.
+
+Identifying the Character Device
+================================
+
+There can be multiple devices present in /dev/ directory with name gb-fw-mgmt-N
+and user first needs to identify the character device used for
+firmware-management for a particular interface.
+
+The Firmware Management core creates a device of class 'gb_fw_mgmt', which shall
+be used by the user to identify the right character device for it. The class
+device is created within the Bundle directory for a particular Interface.
+
+For example this is how the class-device can be present:
+
+/sys/bus/greybus/devices/1-1/1-1.1/1-1.1.1/gb_fw_mgmt/gb-fw-mgmt-0
+
+The last name in this path: gb-fw-mgmt-0 is precisely the name of the char
+device and so the device in this case will be:
+
+/dev/gb-fw-mgmt-0.
+
+Operations on the Char device
+=============================
+
+The Character device (gb-fw-mgmt-0 in example) can be opened by the userspace
+application and it can perform various 'ioctl' operations on the device. The
+device doesn't support any read/write operations.
+
+Following are the IOCTLs and their data structures available to the user:
+
+/* IOCTL support */
+#define GB_FW_LOAD_METHOD_UNIPRO 0x01
+#define GB_FW_LOAD_METHOD_INTERNAL 0x02
+
+#define GB_FW_LOAD_STATUS_FAILED 0x00
+#define GB_FW_LOAD_STATUS_UNVALIDATED 0x01
+#define GB_FW_LOAD_STATUS_VALIDATED 0x02
+#define GB_FW_LOAD_STATUS_VALIDATION_FAILED 0x03
+
+#define GB_FW_BACKEND_FW_STATUS_SUCCESS 0x01
+#define GB_FW_BACKEND_FW_STATUS_FAIL_FIND 0x02
+#define GB_FW_BACKEND_FW_STATUS_FAIL_FETCH 0x03
+#define GB_FW_BACKEND_FW_STATUS_FAIL_WRITE 0x04
+#define GB_FW_BACKEND_FW_STATUS_INT 0x05
+#define GB_FW_BACKEND_FW_STATUS_RETRY 0x06
+#define GB_FW_BACKEND_FW_STATUS_NOT_SUPPORTED 0x07
+
+#define GB_FW_BACKEND_VERSION_STATUS_SUCCESS 0x01
+#define GB_FW_BACKEND_VERSION_STATUS_NOT_AVAILABLE 0x02
+#define GB_FW_BACKEND_VERSION_STATUS_NOT_SUPPORTED 0x03
+#define GB_FW_BACKEND_VERSION_STATUS_RETRY 0x04
+#define GB_FW_BACKEND_VERSION_STATUS_FAIL_INT 0x05
+
+
+struct fw_mgmt_ioc_get_intf_version {
+ __u8 firmware_tag[GB_FIRMWARE_U_TAG_MAX_SIZE];
+ __u16 major;
+ __u16 minor;
+} __attribute__ ((__packed__));
+
+struct fw_mgmt_ioc_get_backend_version {
+ __u8 firmware_tag[GB_FIRMWARE_U_TAG_MAX_SIZE];
+ __u16 major;
+ __u16 minor;
+ __u8 status;
+} __attribute__ ((__packed__));
+
+struct fw_mgmt_ioc_intf_load_and_validate {
+ __u8 firmware_tag[GB_FIRMWARE_TAG_MAX_SIZE];
+ __u8 load_method;
+ __u8 status;
+ __u16 major;
+ __u16 minor;
+} __packed;
+
+struct fw_mgmt_ioc_backend_fw_update {
+ __u8 firmware_tag[GB_FIRMWARE_TAG_MAX_SIZE];
+ __u8 status;
+} __packed;
+
+#define FW_MGMT_IOCTL_BASE 'S'
+#define FW_MGMT_IOC_GET_INTF_FW _IOR(FW_MGMT_IOCTL_BASE, 0, struct fw_mgmt_ioc_get_intf_version)
+#define FW_MGMT_IOC_GET_BACKEND_FW _IOWR(FW_MGMT_IOCTL_BASE, 1, struct fw_mgmt_ioc_get_backend_version)
+#define FW_MGMT_IOC_INTF_LOAD_AND_VALIDATE _IOWR(FW_MGMT_IOCTL_BASE, 2, struct fw_mgmt_ioc_intf_load_and_validate)
+#define FW_MGMT_IOC_INTF_BACKEND_FW_UPDATE _IOWR(FW_MGMT_IOCTL_BASE, 3, struct fw_mgmt_ioc_backend_fw_update)
+#define FW_MGMT_IOC_SET_TIMEOUT_MS _IOW(FW_MGMT_IOCTL_BASE, 4, unsigned int)
+#define FW_MGMT_IOC_MODE_SWITCH _IO(FW_MGMT_IOCTL_BASE, 5)
+
+1. FW_MGMT_IOC_GET_INTF_FW:
+
+ This ioctl shall be used by the user to get the version and firmware-tag of
+ the currently running Interface Firmware. All the fields of the 'struct
+ fw_mgmt_ioc_get_fw' are filled by the kernel.
+
+2. FW_MGMT_IOC_GET_BACKEND_FW:
+
+ This ioctl shall be used by the user to get the version of a currently
+ running Backend Interface Firmware identified by a firmware-tag. The user is
+ required to fill the 'firmware_tag' field of the 'struct fw_mgmt_ioc_get_fw'
+ in this case. The 'major' and 'minor' fields are set by the kernel in
+ response.
+
+3. FW_MGMT_IOC_INTF_LOAD_AND_VALIDATE:
+
+ This ioctl shall be used by the user to load an Interface Firmware package on
+ an Interface. The user needs to fill the 'firmware_tag' and 'load_method'
+ fields of the 'struct fw_mgmt_ioc_intf_load_and_validate'. The 'status',
+ 'major' and 'minor' fields are set by the kernel in response.
+
+4. FW_MGMT_IOC_INTF_BACKEND_FW_UPDATE:
+
+ This ioctl shall be used by the user to request an Interface to update a
+ Backend Interface Firmware. The user is required to fill the 'firmware_tag'
+ field of the 'struct fw_mgmt_ioc_get_fw' in this case. The 'status' field is
+ set by the kernel in response.
+
+5. FW_MGMT_IOC_SET_TIMEOUT_MS:
+
+ This ioctl shall be used by the user to increase the timeout interval within
+ which the firmware must get loaded by the Module. The default timeout is 1
+ second. The user needs to pass the timeout in milliseconds.
+
+6. FW_MGMT_IOC_MODE_SWITCH:
+
+ This ioctl shall be used by the user to mode-switch the module to the
+ previously loaded interface firmware. If the interface firmware isn't loaded
+ previously, or if another unsuccessful FW_MGMT_IOC_INTF_LOAD_AND_VALIDATE
+ operation is started after loading interface firmware, then the firmware core
+ wouldn't allow mode-switch.
+
+
+Sysfs Interfaces - Authentication
+---------------------------------
+
+The Component Authentication Protocol interacts with Userspace using the
+character device interface. The character device will be present in /dev/
+directory and will be named gb-authenticate-<N>. The number <N> is assigned at
+runtime.
+
+Identifying the Character Device
+================================
+
+There can be multiple devices present in /dev/ directory with name
+gb-authenticate-N and user first needs to identify the character device used for
+authentication a of particular interface.
+
+The Authentication core creates a device of class 'gb_authenticate', which shall
+be used by the user to identify the right character device for it. The class
+device is created within the Bundle directory for a particular Interface.
+
+For example this is how the class-device can be present:
+
+/sys/bus/greybus/devices/1-1/1-1.1/1-1.1.1/gb_authenticate/gb-authenticate-0
+
+The last name in this path: gb-authenticate-0 is precisely the name of the char
+device and so the device in this case will be:
+
+/dev/gb-authenticate-0.
+
+Operations on the Char device
+=============================
+
+The Character device (/dev/gb-authenticate-0 in above example) can be opened by
+the userspace application and it can perform various 'ioctl' operations on the
+device. The device doesn't support any read/write operations.
+
+Following are the IOCTLs and their data structures available to the user:
+
+#define CAP_CERTIFICATE_MAX_SIZE 1600
+#define CAP_SIGNATURE_MAX_SIZE 320
+
+/* Certificate class types */
+#define CAP_CERT_IMS_EAPC 0x00000001
+#define CAP_CERT_IMS_EASC 0x00000002
+#define CAP_CERT_IMS_EARC 0x00000003
+#define CAP_CERT_IMS_IAPC 0x00000004
+#define CAP_CERT_IMS_IASC 0x00000005
+#define CAP_CERT_IMS_IARC 0x00000006
+
+/* IMS Certificate response result codes */
+#define CAP_IMS_RESULT_CERT_FOUND 0x00
+#define CAP_IMS_RESULT_CERT_CLASS_INVAL 0x01
+#define CAP_IMS_RESULT_CERT_CORRUPT 0x02
+#define CAP_IMS_RESULT_CERT_NOT_FOUND 0x03
+
+/* Authentication types */
+#define CAP_AUTH_IMS_PRI 0x00000001
+#define CAP_AUTH_IMS_SEC 0x00000002
+#define CAP_AUTH_IMS_RSA 0x00000003
+
+/* Authenticate response result codes */
+#define CAP_AUTH_RESULT_CR_SUCCESS 0x00
+#define CAP_AUTH_RESULT_CR_BAD_TYPE 0x01
+#define CAP_AUTH_RESULT_CR_WRONG_EP 0x02
+#define CAP_AUTH_RESULT_CR_NO_KEY 0x03
+#define CAP_AUTH_RESULT_CR_SIG_FAIL 0x04
+
+
+/* IOCTL support */
+struct cap_ioc_get_endpoint_uid {
+ __u8 uid[8];
+} __attribute__ ((__packed__));
+
+struct cap_ioc_get_ims_certificate {
+ __u32 certificate_class;
+ __u32 certificate_id;
+
+ __u8 result_code;
+ __u32 cert_size;
+ __u8 certificate[CAP_CERTIFICATE_MAX_SIZE];
+} __attribute__ ((__packed__));
+
+struct cap_ioc_authenticate {
+ __u32 auth_type;
+ __u8 uid[8];
+ __u8 challenge[32];
+
+ __u8 result_code;
+ __u8 response[64];
+ __u32 signature_size;
+ __u8 signature[CAP_SIGNATURE_MAX_SIZE];
+} __attribute__ ((__packed__));
+
+#define CAP_IOCTL_BASE 'C'
+#define CAP_IOC_GET_ENDPOINT_UID _IOR(CAP_IOCTL_BASE, 0, struct cap_ioc_get_endpoint_uid)
+#define CAP_IOC_GET_IMS_CERTIFICATE _IOWR(CAP_IOCTL_BASE, 1, struct cap_ioc_get_ims_certificate)
+#define CAP_IOC_AUTHENTICATE _IOWR(CAP_IOCTL_BASE, 2, struct cap_ioc_authenticate)
+
+
+1. CAP_IOC_GET_ENDPOINT_UID:
+
+ This ioctl shall be used by the user to get the endpoint UID associated with
+ the Interface. All the fields of the 'struct cap_ioc_get_endpoint_uid' are
+ filled by the kernel.
+
+2. CAP_IOC_GET_IMS_CERTIFICATE:
+
+ This ioctl shall be used by the user to retrieve one of the available
+ cryptographic certificates held by the Interface for use in Component
+ Authentication. The user is required to fill the 'certificate_class' and
+ 'certificate_id' field of the 'struct cap_ioc_get_ims_certificate' in this
+ case. The other fields will be set by the kernel in response. The first
+ 'cert_size' bytes of the 'certificate' shall be read by the user and others
+ must be discarded.
+
+3. CAP_IOC_AUTHENTICATE:
+
+ This ioctl shall be used by the user to authenticate the Module attached to
+ an Interface. The user needs to fill the 'auth_type', 'uid', and 'challenge'
+ fields of the 'struct cap_ioc_authenticate'. The other fields will be set by
+ the kernel in response. The first 'signature_size' bytes of the 'signature'
+ shall be read by the user and others must be discarded.
+
+
+Sysfs Interfaces - Firmware Download
+------------------------------------
+
+The Firmware Download Protocol uses the existing Linux Kernel's Firmware class
+and the interface provided to userspace are described in:
+Documentation/firmware_class/.
+
+
+Sysfs Interfaces - SPI Flash
+----------------------------
+
+The SPI flash is exposed in userspace as a MTD device and is created
+within the Bundle directory. For example, this is how the path may look like:
+
+$ ls /sys/bus/greybus/devices/1-1/1-1.1/1-1.1.1/spi_master/spi32766/spi32766.0/mtd
+mtd0 mtd0ro
+
+
+Sample Applications
+-------------------
+
+The current directory also provides a firmware.c test application, which can be
+referenced while developing userspace application to talk to firmware-management
+protocol.
+
+The current directory also provides a authenticate.c test application, which can
+be referenced while developing userspace application to talk to
+component authentication protocol.
diff --git a/drivers/staging/greybus/Documentation/firmware/firmware.c b/drivers/staging/greybus/Documentation/firmware/firmware.c
new file mode 100644
index 000000000000..3b35ef6d4adb
--- /dev/null
+++ b/drivers/staging/greybus/Documentation/firmware/firmware.c
@@ -0,0 +1,218 @@
+// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
+/*
+ * Sample code to test firmware-management protocol
+ *
+ * Copyright(c) 2016 Google Inc. All rights reserved.
+ * Copyright(c) 2016 Linaro Ltd. All rights reserved.
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#include <sys/ioctl.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+
+#include "../../greybus_firmware.h"
+
+#define FW_DEV_DEFAULT "/dev/gb-fw-mgmt-0"
+#define FW_TAG_INT_DEFAULT "s3f"
+#define FW_TAG_BCND_DEFAULT "bf_01"
+#define FW_UPDATE_TYPE_DEFAULT 0
+#define FW_TIMEOUT_DEFAULT 10000
+
+static const char *firmware_tag;
+static const char *fwdev = FW_DEV_DEFAULT;
+static unsigned int fw_update_type = FW_UPDATE_TYPE_DEFAULT;
+static unsigned int fw_timeout = FW_TIMEOUT_DEFAULT;
+
+static struct fw_mgmt_ioc_get_intf_version intf_fw_info;
+static struct fw_mgmt_ioc_get_backend_version backend_fw_info;
+static struct fw_mgmt_ioc_intf_load_and_validate intf_load;
+static struct fw_mgmt_ioc_backend_fw_update backend_update;
+
+static void usage(void)
+{
+ printf("\nUsage: ./firmware <gb-fw-mgmt-X (default: gb-fw-mgmt-0)> <interface: 0, backend: 1 (default: 0)> <firmware-tag> (default: \"s3f\"/\"bf_01\") <timeout (default: 10000 ms)>\n");
+}
+
+static int update_intf_firmware(int fd)
+{
+ int ret;
+
+ /* Get Interface Firmware Version */
+ printf("Get Interface Firmware Version\n");
+
+ ret = ioctl(fd, FW_MGMT_IOC_GET_INTF_FW, &intf_fw_info);
+ if (ret < 0) {
+ printf("Failed to get interface firmware version: %s (%d)\n",
+ fwdev, ret);
+ return -1;
+ }
+
+ printf("Interface Firmware tag (%s), major (%d), minor (%d)\n",
+ intf_fw_info.firmware_tag, intf_fw_info.major,
+ intf_fw_info.minor);
+
+ /* Try Interface Firmware load over Unipro */
+ printf("Loading Interface Firmware\n");
+
+ intf_load.load_method = GB_FW_U_LOAD_METHOD_UNIPRO;
+ intf_load.status = 0;
+ intf_load.major = 0;
+ intf_load.minor = 0;
+
+ strncpy((char *)&intf_load.firmware_tag, firmware_tag,
+ GB_FIRMWARE_U_TAG_MAX_SIZE);
+
+ ret = ioctl(fd, FW_MGMT_IOC_INTF_LOAD_AND_VALIDATE, &intf_load);
+ if (ret < 0) {
+ printf("Failed to load interface firmware: %s (%d)\n", fwdev,
+ ret);
+ return -1;
+ }
+
+ if (intf_load.status != GB_FW_U_LOAD_STATUS_VALIDATED &&
+ intf_load.status != GB_FW_U_LOAD_STATUS_UNVALIDATED) {
+ printf("Load status says loading failed: %d\n",
+ intf_load.status);
+ return -1;
+ }
+
+ printf("Interface Firmware (%s) Load done: major: %d, minor: %d, status: %d\n",
+ firmware_tag, intf_load.major, intf_load.minor,
+ intf_load.status);
+
+ /* Initiate Mode-switch to the newly loaded firmware */
+ printf("Initiate Mode switch\n");
+
+ ret = ioctl(fd, FW_MGMT_IOC_MODE_SWITCH);
+ if (ret < 0)
+ printf("Failed to initiate mode-switch (%d)\n", ret);
+
+ return ret;
+}
+
+static int update_backend_firmware(int fd)
+{
+ int ret;
+
+ /* Get Backend Firmware Version */
+ printf("Getting Backend Firmware Version\n");
+
+ strncpy((char *)&backend_fw_info.firmware_tag, firmware_tag,
+ GB_FIRMWARE_U_TAG_MAX_SIZE);
+
+retry_fw_version:
+ ret = ioctl(fd, FW_MGMT_IOC_GET_BACKEND_FW, &backend_fw_info);
+ if (ret < 0) {
+ printf("Failed to get backend firmware version: %s (%d)\n",
+ fwdev, ret);
+ return -1;
+ }
+
+ printf("Backend Firmware tag (%s), major (%d), minor (%d), status (%d)\n",
+ backend_fw_info.firmware_tag, backend_fw_info.major,
+ backend_fw_info.minor, backend_fw_info.status);
+
+ if (backend_fw_info.status == GB_FW_U_BACKEND_VERSION_STATUS_RETRY)
+ goto retry_fw_version;
+
+ if ((backend_fw_info.status != GB_FW_U_BACKEND_VERSION_STATUS_SUCCESS) &&
+ (backend_fw_info.status != GB_FW_U_BACKEND_VERSION_STATUS_NOT_AVAILABLE)) {
+ printf("Failed to get backend firmware version: %s (%d)\n",
+ fwdev, backend_fw_info.status);
+ return -1;
+ }
+
+ /* Try Backend Firmware Update over Unipro */
+ printf("Updating Backend Firmware\n");
+
+ strncpy((char *)&backend_update.firmware_tag, firmware_tag,
+ GB_FIRMWARE_U_TAG_MAX_SIZE);
+
+retry_fw_update:
+ backend_update.status = 0;
+
+ ret = ioctl(fd, FW_MGMT_IOC_INTF_BACKEND_FW_UPDATE, &backend_update);
+ if (ret < 0) {
+ printf("Failed to load backend firmware: %s (%d)\n", fwdev, ret);
+ return -1;
+ }
+
+ if (backend_update.status == GB_FW_U_BACKEND_FW_STATUS_RETRY) {
+ printf("Retrying firmware update: %d\n", backend_update.status);
+ goto retry_fw_update;
+ }
+
+ if (backend_update.status != GB_FW_U_BACKEND_FW_STATUS_SUCCESS) {
+ printf("Load status says loading failed: %d\n",
+ backend_update.status);
+ } else {
+ printf("Backend Firmware (%s) Load done: status: %d\n",
+ firmware_tag, backend_update.status);
+ }
+
+ return 0;
+}
+
+int main(int argc, char *argv[])
+{
+ int fd, ret;
+ char *endptr;
+
+ if (argc > 1 &&
+ (!strcmp(argv[1], "-h") || !strcmp(argv[1], "--help"))) {
+ usage();
+ return -1;
+ }
+
+ if (argc > 1)
+ fwdev = argv[1];
+
+ if (argc > 2)
+ fw_update_type = strtoul(argv[2], &endptr, 10);
+
+ if (argc > 3)
+ firmware_tag = argv[3];
+ else if (!fw_update_type)
+ firmware_tag = FW_TAG_INT_DEFAULT;
+ else
+ firmware_tag = FW_TAG_BCND_DEFAULT;
+
+ if (argc > 4)
+ fw_timeout = strtoul(argv[4], &endptr, 10);
+
+ printf("Trying Firmware update: fwdev: %s, type: %s, tag: %s, timeout: %u\n",
+ fwdev, fw_update_type == 0 ? "interface" : "backend",
+ firmware_tag, fw_timeout);
+
+ printf("Opening %s firmware management device\n", fwdev);
+
+ fd = open(fwdev, O_RDWR);
+ if (fd < 0) {
+ printf("Failed to open: %s\n", fwdev);
+ return -1;
+ }
+
+ /* Set Timeout */
+ printf("Setting timeout to %u ms\n", fw_timeout);
+
+ ret = ioctl(fd, FW_MGMT_IOC_SET_TIMEOUT_MS, &fw_timeout);
+ if (ret < 0) {
+ printf("Failed to set timeout: %s (%d)\n", fwdev, ret);
+ ret = -1;
+ goto close_fd;
+ }
+
+ if (!fw_update_type)
+ ret = update_intf_firmware(fd);
+ else
+ ret = update_backend_firmware(fd);
+
+close_fd:
+ close(fd);
+
+ return ret;
+}
diff --git a/drivers/staging/greybus/Documentation/sysfs-bus-greybus b/drivers/staging/greybus/Documentation/sysfs-bus-greybus
new file mode 100644
index 000000000000..2e998966cbe1
--- /dev/null
+++ b/drivers/staging/greybus/Documentation/sysfs-bus-greybus
@@ -0,0 +1,275 @@
+What: /sys/bus/greybus/devices/greybusN
+Date: October 2015
+KernelVersion: 4.XX
+Contact: Greg Kroah-Hartman <greg@kroah.com>
+Description:
+ The "root" greybus device for the Greybus device tree, or bus,
+ where N is a dynamically assigned 1-based id.
+
+What: /sys/bus/greybus/devices/greybusN/bus_id
+Date: April 2016
+KernelVersion: 4.XX
+Contact: Greg Kroah-Hartman <greg@kroah.com>
+Description:
+ The ID of the "root" greybus device, or bus.
+
+What: /sys/bus/greybus/devices/N-M
+Date: March 2016
+KernelVersion: 4.XX
+Contact: Greg Kroah-Hartman <greg@kroah.com>
+Description:
+ A Module M on the bus N, where M is the 1-byte interface
+ ID of the module's primary interface.
+
+What: /sys/bus/greybus/devices/N-M/eject
+Date: March 2016
+KernelVersion: 4.XX
+Contact: Greg Kroah-Hartman <greg@kroah.com>
+Description:
+ Writing a non-zero argument to this attibute disables the
+ module's interfaces before physically ejecting it.
+
+What: /sys/bus/greybus/devices/N-M/module_id
+Date: March 2016
+KernelVersion: 4.XX
+Contact: Greg Kroah-Hartman <greg@kroah.com>
+Description:
+ The ID of a Greybus module, corresponding to the ID of its
+ primary interface.
+
+What: /sys/bus/greybus/devices/N-M/num_interfaces
+Date: March 2016
+KernelVersion: 4.XX
+Contact: Greg Kroah-Hartman <greg@kroah.com>
+Description:
+ The number of interfaces of a module.
+
+What: /sys/bus/greybus/devices/N-M.I
+Date: October 2015
+KernelVersion: 4.XX
+Contact: Greg Kroah-Hartman <greg@kroah.com>
+Description:
+ An Interface I on the bus N and module N-M, where I is the
+ 1-byte interface ID.
+
+What: /sys/bus/greybus/devices/N-M.I/current_now
+Date: March 2016
+KernelVersion: 4.XX
+Contact: Greg Kroah-Hartman <greg@kroah.com>
+Description:
+ Current measurement of the interface in microamps (uA)
+
+What: /sys/bus/greybus/devices/N-M.I/ddbl1_manufacturer_id
+Date: October 2015
+KernelVersion: 4.XX
+Contact: Greg Kroah-Hartman <greg@kroah.com>
+Description:
+ Unipro Device Descriptor Block Level 1 manufacturer ID for the
+ greybus Interface.
+
+What: /sys/bus/greybus/devices/N-M.I/ddbl1_product_id
+Date: October 2015
+KernelVersion: 4.XX
+Contact: Greg Kroah-Hartman <greg@kroah.com>
+Description:
+ Unipro Device Descriptor Block Level 1 product ID for the
+ greybus Interface.
+
+What: /sys/bus/greybus/devices/N-M.I/interface_id
+Date: October 2015
+KernelVersion: 4.XX
+Contact: Greg Kroah-Hartman <greg@kroah.com>
+Description:
+ The ID of a Greybus interface.
+
+What: /sys/bus/greybus/devices/N-M.I/interface_type
+Date: June 2016
+KernelVersion: 4.XX
+Contact: Greg Kroah-Hartman <greg@kroah.com>
+Description:
+ The type of a Greybus interface; "dummy", "unipro", "greybus",
+ or "unknown".
+
+What: /sys/bus/greybus/devices/N-M.I/power_now
+Date: March 2016
+KernelVersion: 4.XX
+Contact: Greg Kroah-Hartman <greg@kroah.com>
+Description:
+ Power measurement of the interface in microwatts (uW)
+
+What: /sys/bus/greybus/devices/N-M.I/power_state
+Date: March 2016
+KernelVersion: 4.XX
+Contact: Greg Kroah-Hartman <greg@kroah.com>
+Description:
+ This file reflects the power state of a Greybus interface. If
+ the value read from it is "on", then power is currently
+ supplied to the interface. Otherwise it will read "off" and
+ power is currently not supplied to the interface.
+
+ If the value read is "off", then writing "on" (or '1', 'y',
+ 'Y') to this file will enable power to the interface and an
+ attempt to boot and possibly enumerate it will be made. Note
+ that on errors, the interface will again be powered down.
+
+ If the value read is "on", then writing "off" (or '0', 'n',
+ 'N') to this file will power down the interface.
+
+What: /sys/bus/greybus/devices/N-M.I/product_id
+Date: October 2015
+KernelVersion: 4.XX
+Contact: Greg Kroah-Hartman <greg@kroah.com>
+Description:
+ Product ID of a Greybus interface.
+
+What: /sys/bus/greybus/devices/N-M.I/serial_number
+Date: October 2015
+KernelVersion: 4.XX
+Contact: Greg Kroah-Hartman <greg@kroah.com>
+Description:
+ Serial Number of the Greybus interface, represented by a 64 bit
+ hexadecimal number.
+
+What: /sys/bus/greybus/devices/N-M.I/vendor_id
+Date: October 2015
+KernelVersion: 4.XX
+Contact: Greg Kroah-Hartman <greg@kroah.com>
+Description:
+ Vendor ID of a Greybus interface.
+
+What: /sys/bus/greybus/devices/N-M.I/voltage_now
+Date: March 2016
+KernelVersion: 4.XX
+Contact: Greg Kroah-Hartman <greg@kroah.com>
+Description:
+ Voltage measurement of the interface in microvolts (uV)
+
+What: /sys/bus/greybus/devices/N-M.I.ctrl
+Date: October 2015
+KernelVersion: 4.XX
+Contact: Greg Kroah-Hartman <greg@kroah.com>
+Description:
+ Abstract control device for interface I that represents the
+ current mode of an enumerated Greybus interface.
+
+What: /sys/bus/greybus/devices/N-M.I.ctrl/product_string
+Date: October 2015
+KernelVersion: 4.XX
+Contact: Greg Kroah-Hartman <greg@kroah.com>
+Description:
+ Product ID string of a Greybus interface.
+
+What: /sys/bus/greybus/devices/N-M.I.ctrl/vendor_string
+Date: October 2015
+KernelVersion: 4.XX
+Contact: Greg Kroah-Hartman <greg@kroah.com>
+Description:
+ Vendor ID string of a Greybus interface.
+
+What: /sys/bus/greybus/devices/N-M.I.B
+Date: October 2015
+KernelVersion: 4.XX
+Contact: Greg Kroah-Hartman <greg@kroah.com>
+Description:
+ A bundle B on the Interface I, B is replaced by a 1-byte
+ number representing the bundle.
+
+What: /sys/bus/greybus/devices/N-M.I.B/bundle_class
+Date: October 2015
+KernelVersion: 4.XX
+Contact: Greg Kroah-Hartman <greg@kroah.com>
+Description:
+ The greybus class of the bundle B.
+
+What: /sys/bus/greybus/devices/N-M.I.B/bundle_id
+Date: October 2015
+KernelVersion: 4.XX
+Contact: Greg Kroah-Hartman <greg@kroah.com>
+Description:
+ The interface-unique id of the bundle B.
+
+What: /sys/bus/greybus/devices/N-M.I.B/gpbX
+Date: April 2016
+KernelVersion: 4.XX
+Contact: Greg Kroah-Hartman <greg@kroah.com>
+Description:
+ The General Purpose Bridged PHY device of the bundle B,
+ where X is a dynamically assigned 0-based id.
+
+What: /sys/bus/greybus/devices/N-M.I.B/state
+Date: October 2015
+KernelVersion: 4.XX
+Contact: Greg Kroah-Hartman <greg@kroah.com>
+Description:
+ A bundle has a state that is managed by the userspace
+ Endo process. This file allows that Endo to signal
+ other Android HALs that the state of the bundle has
+ changed to a specific value. When written to, any
+ process watching the file will be woken up, and the new
+ value can be read. It's a "poor-man's IPC", yes, but
+ simplifies the Android userspace code immensely.
+
+What: /sys/bus/greybus/devices/N-svc
+Date: October 2015
+KernelVersion: 4.XX
+Contact: Greg Kroah-Hartman <greg@kroah.com>
+Description:
+ The singleton SVC device of bus N.
+
+What: /sys/bus/greybus/devices/N-svc/ap_intf_id
+Date: October 2015
+KernelVersion: 4.XX
+Contact: Greg Kroah-Hartman <greg@kroah.com>
+Description:
+ The AP interface ID, a 1-byte non-zero integer which
+ defines the position of the AP module on the frame.
+ The interface positions are defined in the GMP
+ Module Developer Kit.
+
+What: /sys/bus/greybus/devices/N-svc/endo_id
+Date: October 2015
+KernelVersion: 4.XX
+Contact: Greg Kroah-Hartman <greg@kroah.com>
+Description:
+ The Endo ID, which is a 2-byte hexadecimal value
+ defined by the Endo layout scheme, documented in
+ the GMP Module Developer Kit.
+
+What: /sys/bus/greybus/devices/N-svc/intf_eject
+Date: October 2015
+KernelVersion: 4.XX
+Contact: Greg Kroah-Hartman <greg@kroah.com>
+Description:
+ Write the number of the interface that you wish to
+ forcibly eject from the system.
+
+What: /sys/bus/greybus/devices/N-svc/version
+Date: October 2015
+KernelVersion: 4.XX
+Contact: Greg Kroah-Hartman <greg@kroah.com>
+Description:
+ The version number of the firmware in the SVC device.
+
+What: /sys/bus/greybus/devices/N-svc/watchdog
+Date: October 2016
+KernelVersion: 4.XX
+Contact: Greg Kroah-Hartman <greg@kroah.com>
+Description:
+ If the SVC watchdog is enabled or not. Writing 0 to this
+ file will disable the watchdog, writing 1 will enable it.
+
+What: /sys/bus/greybus/devices/N-svc/watchdog_action
+Date: July 2016
+KernelVersion: 4.XX
+Contact: Greg Kroah-Hartman <greg@kroah.com>
+Description:
+ This attribute indicates the action to be performed upon SVC
+ watchdog bite.
+
+ The action can be one of the "reset" or "panic". Writing either
+ one of the "reset" or "panic" will change the behavior of SVC
+ watchdog bite. Default value is "reset".
+
+ "reset" means the UniPro subsystem is to be reset.
+
+ "panic" means SVC watchdog bite will cause kernel to panic.
diff --git a/drivers/staging/greybus/Kconfig b/drivers/staging/greybus/Kconfig
new file mode 100644
index 000000000000..1e745a8d439c
--- /dev/null
+++ b/drivers/staging/greybus/Kconfig
@@ -0,0 +1,216 @@
+# SPDX-License-Identifier: GPL-2.0
+if GREYBUS
+
+config GREYBUS_AUDIO
+ tristate "Greybus Audio Class driver"
+ depends on SOUND && SND_SOC
+ help
+ Select this option if you have a device that follows the
+ Greybus Audio Class specification.
+
+ To compile this code as a module, chose M here: the module
+ will be called gb-audio.ko
+
+config GREYBUS_AUDIO_APB_CODEC
+ tristate "Greybus APBridge Audio codec driver"
+ depends on SND_SOC && GREYBUS_AUDIO
+ help
+ Select this option if you have a Toshiba APB device that has I2S
+ ports and acts as a Greybus "Dummy codec". This device is a
+ bridge from an APB-I2S port to a Unipro network.
+
+ To compile this code as a module, chose M here: the module
+ will be called gb-audio-codec.ko
+
+
+config GREYBUS_BOOTROM
+ tristate "Greybus Bootrom Class driver"
+ help
+ Select this option if you have a device that follows the
+ Greybus Bootrom Class specification.
+
+ To compile this code as a module, chose M here: the module
+ will be called gb-bootrom.ko
+
+config GREYBUS_CAMERA
+ tristate "Greybus Camera Class driver"
+ depends on MEDIA_SUPPORT && LEDS_CLASS_FLASH && BROKEN
+ help
+ Select this option if you have a device that follows the
+ Greybus Camera Class specification.
+
+ To compile this code as a module, chose M here: the module
+ will be called gb-camera.ko
+
+config GREYBUS_FIRMWARE
+ tristate "Greybus Firmware Download Class driver"
+ depends on SPI
+ help
+ Select this option if you have a device that follows the
+ Greybus Firmware Download Class specification.
+
+ To compile this code as a module, chose M here: the module
+ will be called gb-firmware.ko
+
+config GREYBUS_HID
+ tristate "Greybus HID Class driver"
+ depends on HID && INPUT
+ help
+ Select this option if you have a device that follows the
+ Greybus HID Class specification.
+
+ To compile this code as a module, chose M here: the module
+ will be called gb-hid.ko
+
+config GREYBUS_LIGHT
+ tristate "Greybus LED Class driver"
+ depends on LEDS_CLASS_FLASH
+ help
+ Select this option if you have a device that follows the
+ Greybus LED Class specification.
+
+ To compile this code as a module, chose M here: the module
+ will be called gb-light.ko
+
+config GREYBUS_LOG
+ tristate "Greybus Debug Log Class driver"
+ help
+ Select this option if you have a device that follows the
+ Greybus Debug Log Class specification.
+
+ To compile this code as a module, chose M here: the module
+ will be called gb-log.ko
+
+config GREYBUS_LOOPBACK
+ tristate "Greybus Loopback Class driver"
+ help
+ Select this option if you have a device that follows the
+ Greybus Debug Log Class specification.
+
+ To compile this code as a module, chose M here: the module
+ will be called gb-log.ko
+
+config GREYBUS_POWER
+ tristate "Greybus Powersupply Class driver"
+ depends on POWER_SUPPLY
+ help
+ Select this option if you have a device that follows the
+ Greybus Powersupply Class specification.
+
+ To compile this code as a module, chose M here: the module
+ will be called gb-power-supply.ko
+
+config GREYBUS_RAW
+ tristate "Greybus Raw Class driver"
+ help
+ Select this option if you have a device that follows the
+ Greybus Raw Class specification.
+
+ To compile this code as a module, chose M here: the module
+ will be called gb-raw.ko
+
+config GREYBUS_VIBRATOR
+ tristate "Greybus Vibrator Motor Class driver"
+ help
+ Select this option if you have a device that follows the
+ Greybus Vibrator Motor Class specification.
+
+ To compile this code as a module, chose M here: the module
+ will be called gb-vibrator.ko
+
+menuconfig GREYBUS_BRIDGED_PHY
+ tristate "Greybus Bridged PHY Class drivers"
+ help
+ Select this option to pick from a variety of Greybus Bridged
+ PHY class drivers. These drivers emulate a number of
+ different "traditional" busses by tunneling them over Greybus.
+ Examples of this include serial, SPI, USB, and others.
+
+ To compile this code as a module, chose M here: the module
+ will be called gb-phy.ko
+
+if GREYBUS_BRIDGED_PHY
+
+config GREYBUS_GPIO
+ tristate "Greybus GPIO Bridged PHY driver"
+ depends on GPIOLIB
+ select GPIOLIB_IRQCHIP
+ help
+ Select this option if you have a device that follows the
+ Greybus GPIO Bridged PHY Class specification.
+
+ To compile this code as a module, chose M here: the module
+ will be called gb-gpio.ko
+
+config GREYBUS_I2C
+ tristate "Greybus I2C Bridged PHY driver"
+ depends on I2C
+ help
+ Select this option if you have a device that follows the
+ Greybus I2C Bridged PHY Class specification.
+
+ To compile this code as a module, chose M here: the module
+ will be called gb-i2c.ko
+
+config GREYBUS_PWM
+ tristate "Greybus PWM Bridged PHY driver"
+ depends on PWM
+ help
+ Select this option if you have a device that follows the
+ Greybus PWM Bridged PHY Class specification.
+
+ To compile this code as a module, chose M here: the module
+ will be called gb-pwm.ko
+
+config GREYBUS_SDIO
+ tristate "Greybus SDIO Bridged PHY driver"
+ depends on MMC
+ help
+ Select this option if you have a device that follows the
+ Greybus SDIO Bridged PHY Class specification.
+
+ To compile this code as a module, chose M here: the module
+ will be called gb-sdio.ko
+
+config GREYBUS_SPI
+ tristate "Greybus SPI Bridged PHY driver"
+ depends on SPI
+ help
+ Select this option if you have a device that follows the
+ Greybus SPI Bridged PHY Class specification.
+
+ To compile this code as a module, chose M here: the module
+ will be called gb-spi.ko
+
+config GREYBUS_UART
+ tristate "Greybus UART Bridged PHY driver"
+ depends on TTY
+ help
+ Select this option if you have a device that follows the
+ Greybus UART Bridged PHY Class specification.
+
+ To compile this code as a module, chose M here: the module
+ will be called gb-uart.ko
+
+config GREYBUS_USB
+ tristate "Greybus USB Host Bridged PHY driver"
+ depends on USB
+ help
+ Select this option if you have a device that follows the
+ Greybus USB Host Bridged PHY Class specification.
+
+ To compile this code as a module, chose M here: the module
+ will be called gb-usb.ko
+
+endif # GREYBUS_BRIDGED_PHY
+
+config GREYBUS_ARCHE
+ tristate "Greybus Arche Platform driver"
+ depends on USB_HSIC_USB3613 || COMPILE_TEST
+ help
+ Select this option if you have an Arche device.
+
+ To compile this code as a module, chose M here: the module
+ will be called gb-arche.ko
+
+endif # GREYBUS
diff --git a/drivers/staging/greybus/Makefile b/drivers/staging/greybus/Makefile
new file mode 100644
index 000000000000..7c5e89622334
--- /dev/null
+++ b/drivers/staging/greybus/Makefile
@@ -0,0 +1,73 @@
+# SPDX-License-Identifier: GPL-2.0
+# needed for trace events
+ccflags-y += -I$(src)
+
+# Greybus class drivers
+gb-bootrom-y := bootrom.o
+gb-camera-y := camera.o
+gb-firmware-y := fw-core.o fw-download.o fw-management.o authentication.o
+gb-spilib-y := spilib.o
+gb-hid-y := hid.o
+gb-light-y := light.o
+gb-log-y := log.o
+gb-loopback-y := loopback.o
+gb-power-supply-y := power_supply.o
+gb-raw-y := raw.o
+gb-vibrator-y := vibrator.o
+
+obj-$(CONFIG_GREYBUS_BOOTROM) += gb-bootrom.o
+obj-$(CONFIG_GREYBUS_CAMERA) += gb-camera.o
+obj-$(CONFIG_GREYBUS_FIRMWARE) += gb-firmware.o gb-spilib.o
+obj-$(CONFIG_GREYBUS_HID) += gb-hid.o
+obj-$(CONFIG_GREYBUS_LIGHT) += gb-light.o
+obj-$(CONFIG_GREYBUS_LOG) += gb-log.o
+obj-$(CONFIG_GREYBUS_LOOPBACK) += gb-loopback.o
+obj-$(CONFIG_GREYBUS_POWER) += gb-power-supply.o
+obj-$(CONFIG_GREYBUS_RAW) += gb-raw.o
+obj-$(CONFIG_GREYBUS_VIBRATOR) += gb-vibrator.o
+
+# Greybus Audio is a bunch of modules
+gb-audio-module-y := audio_module.o audio_topology.o
+gb-audio-codec-y := audio_codec.o audio_helper.o
+gb-audio-gb-y := audio_gb.o
+gb-audio-apbridgea-y := audio_apbridgea.o
+gb-audio-manager-y := audio_manager.o audio_manager_module.o
+
+# Greybus Audio sysfs helpers can be useful when debugging
+#GB_AUDIO_MANAGER_SYSFS ?= true
+#ifeq ($(GB_AUDIO_MANAGER_SYSFS),true)
+#gb-audio-manager-y += audio_manager_sysfs.o
+#ccflags-y += -DGB_AUDIO_MANAGER_SYSFS
+#endif
+
+obj-$(CONFIG_GREYBUS_AUDIO_APB_CODEC) += gb-audio-codec.o
+obj-$(CONFIG_GREYBUS_AUDIO_APB_CODEC) += gb-audio-module.o
+obj-$(CONFIG_GREYBUS_AUDIO) += gb-audio-gb.o
+obj-$(CONFIG_GREYBUS_AUDIO) += gb-audio-apbridgea.o
+obj-$(CONFIG_GREYBUS_AUDIO) += gb-audio-manager.o
+
+
+# Greybus Bridged PHY drivers
+gb-gbphy-y := gbphy.o
+gb-gpio-y := gpio.o
+gb-i2c-y := i2c.o
+gb-pwm-y := pwm.o
+gb-sdio-y := sdio.o
+gb-spi-y := spi.o
+gb-uart-y := uart.o
+gb-usb-y := usb.o
+
+obj-$(CONFIG_GREYBUS_BRIDGED_PHY) += gb-gbphy.o
+obj-$(CONFIG_GREYBUS_GPIO) += gb-gpio.o
+obj-$(CONFIG_GREYBUS_I2C) += gb-i2c.o
+obj-$(CONFIG_GREYBUS_PWM) += gb-pwm.o
+obj-$(CONFIG_GREYBUS_SDIO) += gb-sdio.o
+obj-$(CONFIG_GREYBUS_SPI) += gb-spi.o gb-spilib.o
+obj-$(CONFIG_GREYBUS_UART) += gb-uart.o
+obj-$(CONFIG_GREYBUS_USB) += gb-usb.o
+
+
+# Greybus Platform driver
+gb-arche-y := arche-platform.o arche-apb-ctrl.o
+
+obj-$(CONFIG_GREYBUS_ARCHE) += gb-arche.o
diff --git a/drivers/staging/greybus/TODO b/drivers/staging/greybus/TODO
new file mode 100644
index 000000000000..6461e0132fe3
--- /dev/null
+++ b/drivers/staging/greybus/TODO
@@ -0,0 +1,5 @@
+* Convert all uses of the old GPIO API from <linux/gpio.h> to the
+ GPIO descriptor API in <linux/gpio/consumer.h> and look up GPIO
+ lines from device tree or ACPI.
+* Make pwm.c use the struct pwm_ops::apply instead of ::config, ::set_polarity,
+ ::enable and ::disable.
diff --git a/drivers/staging/greybus/arche-apb-ctrl.c b/drivers/staging/greybus/arche-apb-ctrl.c
new file mode 100644
index 000000000000..90ab32638d3f
--- /dev/null
+++ b/drivers/staging/greybus/arche-apb-ctrl.c
@@ -0,0 +1,490 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Arche Platform driver to control APB.
+ *
+ * Copyright 2014-2015 Google Inc.
+ * Copyright 2014-2015 Linaro Ltd.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/interrupt.h>
+#include <linux/of_irq.h>
+#include <linux/module.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/platform_device.h>
+#include <linux/pm.h>
+#include <linux/regulator/consumer.h>
+#include <linux/spinlock.h>
+#include "arche_platform.h"
+
+static void apb_bootret_deassert(struct device *dev);
+
+struct arche_apb_ctrl_drvdata {
+ /* Control GPIO signals to and from AP <=> AP Bridges */
+ struct gpio_desc *resetn;
+ struct gpio_desc *boot_ret;
+ struct gpio_desc *pwroff;
+ struct gpio_desc *wake_in;
+ struct gpio_desc *wake_out;
+ struct gpio_desc *pwrdn;
+
+ enum arche_platform_state state;
+ bool init_disabled;
+
+ struct regulator *vcore;
+ struct regulator *vio;
+
+ struct gpio_desc *clk_en;
+ struct clk *clk;
+
+ struct pinctrl *pinctrl;
+ struct pinctrl_state *pin_default;
+
+ /* V2: SPI Bus control */
+ struct gpio_desc *spi_en;
+ bool spi_en_polarity_high;
+};
+
+/*
+ * Note that these low level api's are active high
+ */
+static inline void deassert_reset(struct gpio_desc *gpio)
+{
+ gpiod_set_raw_value(gpio, 1);
+}
+
+static inline void assert_reset(struct gpio_desc *gpio)
+{
+ gpiod_set_raw_value(gpio, 0);
+}
+
+/*
+ * Note: Please do not modify the below sequence, as it is as per the spec
+ */
+static int coldboot_seq(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct arche_apb_ctrl_drvdata *apb = platform_get_drvdata(pdev);
+ int ret;
+
+ if (apb->init_disabled ||
+ apb->state == ARCHE_PLATFORM_STATE_ACTIVE)
+ return 0;
+
+ /* Hold APB in reset state */
+ assert_reset(apb->resetn);
+
+ if (apb->state == ARCHE_PLATFORM_STATE_FW_FLASHING && apb->spi_en)
+ devm_gpiod_put(dev, apb->spi_en);
+
+ /* Enable power to APB */
+ if (!IS_ERR(apb->vcore)) {
+ ret = regulator_enable(apb->vcore);
+ if (ret) {
+ dev_err(dev, "failed to enable core regulator\n");
+ return ret;
+ }
+ }
+
+ if (!IS_ERR(apb->vio)) {
+ ret = regulator_enable(apb->vio);
+ if (ret) {
+ dev_err(dev, "failed to enable IO regulator\n");
+ return ret;
+ }
+ }
+
+ apb_bootret_deassert(dev);
+
+ /* On DB3 clock was not mandatory */
+ if (apb->clk_en)
+ gpiod_set_value(apb->clk_en, 1);
+
+ usleep_range(100, 200);
+
+ /* deassert reset to APB : Active-low signal */
+ deassert_reset(apb->resetn);
+
+ apb->state = ARCHE_PLATFORM_STATE_ACTIVE;
+
+ return 0;
+}
+
+static int fw_flashing_seq(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct arche_apb_ctrl_drvdata *apb = platform_get_drvdata(pdev);
+ int ret;
+
+ if (apb->init_disabled ||
+ apb->state == ARCHE_PLATFORM_STATE_FW_FLASHING)
+ return 0;
+
+ ret = regulator_enable(apb->vcore);
+ if (ret) {
+ dev_err(dev, "failed to enable core regulator\n");
+ return ret;
+ }
+
+ ret = regulator_enable(apb->vio);
+ if (ret) {
+ dev_err(dev, "failed to enable IO regulator\n");
+ return ret;
+ }
+
+ if (apb->spi_en) {
+ unsigned long flags;
+
+ if (apb->spi_en_polarity_high)
+ flags = GPIOD_OUT_HIGH;
+ else
+ flags = GPIOD_OUT_LOW;
+
+ apb->spi_en = devm_gpiod_get(dev, "spi-en", flags);
+ if (IS_ERR(apb->spi_en)) {
+ ret = PTR_ERR(apb->spi_en);
+ dev_err(dev, "Failed requesting SPI bus en GPIO: %d\n",
+ ret);
+ return ret;
+ }
+ }
+
+ /* for flashing device should be in reset state */
+ assert_reset(apb->resetn);
+ apb->state = ARCHE_PLATFORM_STATE_FW_FLASHING;
+
+ return 0;
+}
+
+static int standby_boot_seq(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct arche_apb_ctrl_drvdata *apb = platform_get_drvdata(pdev);
+
+ if (apb->init_disabled)
+ return 0;
+
+ /*
+ * Even if it is in OFF state,
+ * then we do not want to change the state
+ */
+ if (apb->state == ARCHE_PLATFORM_STATE_STANDBY ||
+ apb->state == ARCHE_PLATFORM_STATE_OFF)
+ return 0;
+
+ if (apb->state == ARCHE_PLATFORM_STATE_FW_FLASHING && apb->spi_en)
+ devm_gpiod_put(dev, apb->spi_en);
+
+ /*
+ * As per WDM spec, do nothing
+ *
+ * Pasted from WDM spec,
+ * - A falling edge on POWEROFF_L is detected (a)
+ * - WDM enters standby mode, but no output signals are changed
+ */
+
+ /* TODO: POWEROFF_L is input to WDM module */
+ apb->state = ARCHE_PLATFORM_STATE_STANDBY;
+ return 0;
+}
+
+static void poweroff_seq(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct arche_apb_ctrl_drvdata *apb = platform_get_drvdata(pdev);
+
+ if (apb->init_disabled || apb->state == ARCHE_PLATFORM_STATE_OFF)
+ return;
+
+ if (apb->state == ARCHE_PLATFORM_STATE_FW_FLASHING && apb->spi_en)
+ devm_gpiod_put(dev, apb->spi_en);
+
+ /* disable the clock */
+ if (apb->clk_en)
+ gpiod_set_value(apb->clk_en, 0);
+
+ if (!IS_ERR(apb->vcore) && regulator_is_enabled(apb->vcore) > 0)
+ regulator_disable(apb->vcore);
+
+ if (!IS_ERR(apb->vio) && regulator_is_enabled(apb->vio) > 0)
+ regulator_disable(apb->vio);
+
+ /* As part of exit, put APB back in reset state */
+ assert_reset(apb->resetn);
+ apb->state = ARCHE_PLATFORM_STATE_OFF;
+
+ /* TODO: May have to send an event to SVC about this exit */
+}
+
+static void apb_bootret_deassert(struct device *dev)
+{
+ struct arche_apb_ctrl_drvdata *apb = dev_get_drvdata(dev);
+
+ gpiod_set_value(apb->boot_ret, 0);
+}
+
+int apb_ctrl_coldboot(struct device *dev)
+{
+ return coldboot_seq(to_platform_device(dev));
+}
+
+int apb_ctrl_fw_flashing(struct device *dev)
+{
+ return fw_flashing_seq(to_platform_device(dev));
+}
+
+int apb_ctrl_standby_boot(struct device *dev)
+{
+ return standby_boot_seq(to_platform_device(dev));
+}
+
+void apb_ctrl_poweroff(struct device *dev)
+{
+ poweroff_seq(to_platform_device(dev));
+}
+
+static ssize_t state_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct arche_apb_ctrl_drvdata *apb = platform_get_drvdata(pdev);
+ int ret = 0;
+ bool is_disabled;
+
+ if (sysfs_streq(buf, "off")) {
+ if (apb->state == ARCHE_PLATFORM_STATE_OFF)
+ return count;
+
+ poweroff_seq(pdev);
+ } else if (sysfs_streq(buf, "active")) {
+ if (apb->state == ARCHE_PLATFORM_STATE_ACTIVE)
+ return count;
+
+ poweroff_seq(pdev);
+ is_disabled = apb->init_disabled;
+ apb->init_disabled = false;
+ ret = coldboot_seq(pdev);
+ if (ret)
+ apb->init_disabled = is_disabled;
+ } else if (sysfs_streq(buf, "standby")) {
+ if (apb->state == ARCHE_PLATFORM_STATE_STANDBY)
+ return count;
+
+ ret = standby_boot_seq(pdev);
+ } else if (sysfs_streq(buf, "fw_flashing")) {
+ if (apb->state == ARCHE_PLATFORM_STATE_FW_FLASHING)
+ return count;
+
+ /*
+ * First we want to make sure we power off everything
+ * and then enter FW flashing state
+ */
+ poweroff_seq(pdev);
+ ret = fw_flashing_seq(pdev);
+ } else {
+ dev_err(dev, "unknown state\n");
+ ret = -EINVAL;
+ }
+
+ return ret ? ret : count;
+}
+
+static ssize_t state_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct arche_apb_ctrl_drvdata *apb = dev_get_drvdata(dev);
+
+ switch (apb->state) {
+ case ARCHE_PLATFORM_STATE_OFF:
+ return sprintf(buf, "off%s\n",
+ apb->init_disabled ? ",disabled" : "");
+ case ARCHE_PLATFORM_STATE_ACTIVE:
+ return sprintf(buf, "active\n");
+ case ARCHE_PLATFORM_STATE_STANDBY:
+ return sprintf(buf, "standby\n");
+ case ARCHE_PLATFORM_STATE_FW_FLASHING:
+ return sprintf(buf, "fw_flashing\n");
+ default:
+ return sprintf(buf, "unknown state\n");
+ }
+}
+
+static DEVICE_ATTR_RW(state);
+
+static int apb_ctrl_get_devtree_data(struct platform_device *pdev,
+ struct arche_apb_ctrl_drvdata *apb)
+{
+ struct device *dev = &pdev->dev;
+ int ret;
+
+ apb->resetn = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(apb->resetn)) {
+ ret = PTR_ERR(apb->resetn);
+ dev_err(dev, "Failed requesting reset GPIO: %d\n", ret);
+ return ret;
+ }
+
+ apb->boot_ret = devm_gpiod_get(dev, "boot-ret", GPIOD_OUT_LOW);
+ if (IS_ERR(apb->boot_ret)) {
+ ret = PTR_ERR(apb->boot_ret);
+ dev_err(dev, "Failed requesting bootret GPIO: %d\n", ret);
+ return ret;
+ }
+
+ /* It's not mandatory to support power management interface */
+ apb->pwroff = devm_gpiod_get_optional(dev, "pwr-off", GPIOD_IN);
+ if (IS_ERR(apb->pwroff)) {
+ ret = PTR_ERR(apb->pwroff);
+ dev_err(dev, "Failed requesting pwroff_n GPIO: %d\n", ret);
+ return ret;
+ }
+
+ /* Do not make clock mandatory as of now (for DB3) */
+ apb->clk_en = devm_gpiod_get_optional(dev, "clock-en", GPIOD_OUT_LOW);
+ if (IS_ERR(apb->clk_en)) {
+ ret = PTR_ERR(apb->clk_en);
+ dev_err(dev, "Failed requesting APB clock en GPIO: %d\n", ret);
+ return ret;
+ }
+
+ apb->pwrdn = devm_gpiod_get(dev, "pwr-down", GPIOD_OUT_LOW);
+ if (IS_ERR(apb->pwrdn)) {
+ ret = PTR_ERR(apb->pwrdn);
+ dev_warn(dev, "Failed requesting power down GPIO: %d\n", ret);
+ return ret;
+ }
+
+ /* Regulators are optional, as we may have fixed supply coming in */
+ apb->vcore = devm_regulator_get(dev, "vcore");
+ if (IS_ERR(apb->vcore))
+ dev_warn(dev, "no core regulator found\n");
+
+ apb->vio = devm_regulator_get(dev, "vio");
+ if (IS_ERR(apb->vio))
+ dev_warn(dev, "no IO regulator found\n");
+
+ apb->pinctrl = devm_pinctrl_get(&pdev->dev);
+ if (IS_ERR(apb->pinctrl)) {
+ dev_err(&pdev->dev, "could not get pinctrl handle\n");
+ return PTR_ERR(apb->pinctrl);
+ }
+ apb->pin_default = pinctrl_lookup_state(apb->pinctrl, "default");
+ if (IS_ERR(apb->pin_default)) {
+ dev_err(&pdev->dev, "could not get default pin state\n");
+ return PTR_ERR(apb->pin_default);
+ }
+
+ /* Only applicable for platform >= V2 */
+ if (of_property_read_bool(pdev->dev.of_node, "gb,spi-en-active-high"))
+ apb->spi_en_polarity_high = true;
+
+ return 0;
+}
+
+static int arche_apb_ctrl_probe(struct platform_device *pdev)
+{
+ int ret;
+ struct arche_apb_ctrl_drvdata *apb;
+ struct device *dev = &pdev->dev;
+
+ apb = devm_kzalloc(&pdev->dev, sizeof(*apb), GFP_KERNEL);
+ if (!apb)
+ return -ENOMEM;
+
+ ret = apb_ctrl_get_devtree_data(pdev, apb);
+ if (ret) {
+ dev_err(dev, "failed to get apb devicetree data %d\n", ret);
+ return ret;
+ }
+
+ /* Initially set APB to OFF state */
+ apb->state = ARCHE_PLATFORM_STATE_OFF;
+ /* Check whether device needs to be enabled on boot */
+ if (of_property_read_bool(pdev->dev.of_node, "arche,init-disable"))
+ apb->init_disabled = true;
+
+ platform_set_drvdata(pdev, apb);
+
+ /* Create sysfs interface to allow user to change state dynamically */
+ ret = device_create_file(dev, &dev_attr_state);
+ if (ret) {
+ dev_err(dev, "failed to create state file in sysfs\n");
+ return ret;
+ }
+
+ dev_info(&pdev->dev, "Device registered successfully\n");
+ return 0;
+}
+
+static void arche_apb_ctrl_remove(struct platform_device *pdev)
+{
+ device_remove_file(&pdev->dev, &dev_attr_state);
+ poweroff_seq(pdev);
+ platform_set_drvdata(pdev, NULL);
+}
+
+static int __maybe_unused arche_apb_ctrl_suspend(struct device *dev)
+{
+ /*
+ * If timing profile permits, we may shutdown bridge
+ * completely
+ *
+ * TODO: sequence ??
+ *
+ * Also, need to make sure we meet precondition for unipro suspend
+ * Precondition: Definition ???
+ */
+ return 0;
+}
+
+static int __maybe_unused arche_apb_ctrl_resume(struct device *dev)
+{
+ /*
+ * At least for ES2 we have to meet the delay requirement between
+ * unipro switch and AP bridge init, depending on whether bridge is in
+ * OFF state or standby state.
+ *
+ * Based on whether bridge is in standby or OFF state we may have to
+ * assert multiple signals. Please refer to WDM spec, for more info.
+ *
+ */
+ return 0;
+}
+
+static void arche_apb_ctrl_shutdown(struct platform_device *pdev)
+{
+ apb_ctrl_poweroff(&pdev->dev);
+}
+
+static SIMPLE_DEV_PM_OPS(arche_apb_ctrl_pm_ops, arche_apb_ctrl_suspend,
+ arche_apb_ctrl_resume);
+
+static const struct of_device_id arche_apb_ctrl_of_match[] = {
+ { .compatible = "usbffff,2", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, arche_apb_ctrl_of_match);
+
+static struct platform_driver arche_apb_ctrl_device_driver = {
+ .probe = arche_apb_ctrl_probe,
+ .remove = arche_apb_ctrl_remove,
+ .shutdown = arche_apb_ctrl_shutdown,
+ .driver = {
+ .name = "arche-apb-ctrl",
+ .pm = &arche_apb_ctrl_pm_ops,
+ .of_match_table = arche_apb_ctrl_of_match,
+ }
+};
+
+int __init arche_apb_init(void)
+{
+ return platform_driver_register(&arche_apb_ctrl_device_driver);
+}
+
+void __exit arche_apb_exit(void)
+{
+ platform_driver_unregister(&arche_apb_ctrl_device_driver);
+}
diff --git a/drivers/staging/greybus/arche-platform.c b/drivers/staging/greybus/arche-platform.c
new file mode 100644
index 000000000000..d48464390f58
--- /dev/null
+++ b/drivers/staging/greybus/arche-platform.c
@@ -0,0 +1,660 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Arche Platform driver to enable Unipro link.
+ *
+ * Copyright 2014-2015 Google Inc.
+ * Copyright 2014-2015 Linaro Ltd.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/platform_device.h>
+#include <linux/pm.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/suspend.h>
+#include <linux/time.h>
+#include <linux/greybus.h>
+#include <linux/of.h>
+#include "arche_platform.h"
+
+#if IS_ENABLED(CONFIG_USB_HSIC_USB3613)
+#include <linux/usb/usb3613.h>
+#else
+static inline int usb3613_hub_mode_ctrl(bool unused)
+{
+ return 0;
+}
+#endif
+
+#define WD_COLDBOOT_PULSE_WIDTH_MS 30
+
+enum svc_wakedetect_state {
+ WD_STATE_IDLE, /* Default state = pulled high/low */
+ WD_STATE_BOOT_INIT, /* WD = falling edge (low) */
+ WD_STATE_COLDBOOT_TRIG, /* WD = rising edge (high), > 30msec */
+ WD_STATE_STANDBYBOOT_TRIG, /* As of now not used ?? */
+ WD_STATE_COLDBOOT_START, /* Cold boot process started */
+ WD_STATE_STANDBYBOOT_START, /* Not used */
+};
+
+struct arche_platform_drvdata {
+ /* Control GPIO signals to and from AP <=> SVC */
+ struct gpio_desc *svc_reset;
+ bool is_reset_act_hi;
+ struct gpio_desc *svc_sysboot;
+ struct gpio_desc *wake_detect; /* bi-dir,maps to WAKE_MOD & WAKE_FRAME signals */
+
+ enum arche_platform_state state;
+
+ struct gpio_desc *svc_refclk_req;
+ struct clk *svc_ref_clk;
+
+ struct pinctrl *pinctrl;
+ struct pinctrl_state *pin_default;
+
+ int num_apbs;
+
+ enum svc_wakedetect_state wake_detect_state;
+ int wake_detect_irq;
+ spinlock_t wake_lock; /* Protect wake_detect_state */
+ struct mutex platform_state_mutex; /* Protect state */
+ unsigned long wake_detect_start;
+ struct notifier_block pm_notifier;
+
+ struct device *dev;
+};
+
+/* Requires calling context to hold arche_pdata->platform_state_mutex */
+static void arche_platform_set_state(struct arche_platform_drvdata *arche_pdata,
+ enum arche_platform_state state)
+{
+ arche_pdata->state = state;
+}
+
+/* Requires arche_pdata->wake_lock is held by calling context */
+static void arche_platform_set_wake_detect_state(struct arche_platform_drvdata *arche_pdata,
+ enum svc_wakedetect_state state)
+{
+ arche_pdata->wake_detect_state = state;
+}
+
+static inline void svc_reset_onoff(struct gpio_desc *gpio, bool onoff)
+{
+ gpiod_set_raw_value(gpio, onoff);
+}
+
+static int apb_cold_boot(struct device *dev, void *data)
+{
+ int ret;
+
+ ret = apb_ctrl_coldboot(dev);
+ if (ret)
+ dev_warn(dev, "failed to coldboot\n");
+
+ /*Child nodes are independent, so do not exit coldboot operation */
+ return 0;
+}
+
+static int apb_poweroff(struct device *dev, void *data)
+{
+ apb_ctrl_poweroff(dev);
+
+ /* Enable HUB3613 into HUB mode. */
+ if (usb3613_hub_mode_ctrl(false))
+ dev_warn(dev, "failed to control hub device\n");
+
+ return 0;
+}
+
+static void arche_platform_wd_irq_en(struct arche_platform_drvdata *arche_pdata)
+{
+ /* Enable interrupt here, to read event back from SVC */
+ enable_irq(arche_pdata->wake_detect_irq);
+}
+
+static irqreturn_t arche_platform_wd_irq_thread(int irq, void *devid)
+{
+ struct arche_platform_drvdata *arche_pdata = devid;
+ unsigned long flags;
+
+ spin_lock_irqsave(&arche_pdata->wake_lock, flags);
+ if (arche_pdata->wake_detect_state != WD_STATE_COLDBOOT_TRIG) {
+ /* Something is wrong */
+ spin_unlock_irqrestore(&arche_pdata->wake_lock, flags);
+ return IRQ_HANDLED;
+ }
+
+ arche_platform_set_wake_detect_state(arche_pdata,
+ WD_STATE_COLDBOOT_START);
+ spin_unlock_irqrestore(&arche_pdata->wake_lock, flags);
+
+ /* It should complete power cycle, so first make sure it is poweroff */
+ device_for_each_child(arche_pdata->dev, NULL, apb_poweroff);
+
+ /* Bring APB out of reset: cold boot sequence */
+ device_for_each_child(arche_pdata->dev, NULL, apb_cold_boot);
+
+ /* Enable HUB3613 into HUB mode. */
+ if (usb3613_hub_mode_ctrl(true))
+ dev_warn(arche_pdata->dev, "failed to control hub device\n");
+
+ spin_lock_irqsave(&arche_pdata->wake_lock, flags);
+ arche_platform_set_wake_detect_state(arche_pdata, WD_STATE_IDLE);
+ spin_unlock_irqrestore(&arche_pdata->wake_lock, flags);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t arche_platform_wd_irq(int irq, void *devid)
+{
+ struct arche_platform_drvdata *arche_pdata = devid;
+ unsigned long flags;
+
+ spin_lock_irqsave(&arche_pdata->wake_lock, flags);
+
+ if (gpiod_get_value(arche_pdata->wake_detect)) {
+ /* wake/detect rising */
+
+ /*
+ * If wake/detect line goes high after low, within less than
+ * 30msec, then standby boot sequence is initiated, which is not
+ * supported/implemented as of now. So ignore it.
+ */
+ if (arche_pdata->wake_detect_state == WD_STATE_BOOT_INIT) {
+ if (time_before(jiffies,
+ arche_pdata->wake_detect_start +
+ msecs_to_jiffies(WD_COLDBOOT_PULSE_WIDTH_MS))) {
+ arche_platform_set_wake_detect_state(arche_pdata,
+ WD_STATE_IDLE);
+ } else {
+ /*
+ * Check we are not in middle of irq thread
+ * already
+ */
+ if (arche_pdata->wake_detect_state !=
+ WD_STATE_COLDBOOT_START) {
+ arche_platform_set_wake_detect_state(arche_pdata,
+ WD_STATE_COLDBOOT_TRIG);
+ spin_unlock_irqrestore(&arche_pdata->wake_lock,
+ flags);
+ return IRQ_WAKE_THREAD;
+ }
+ }
+ }
+ } else {
+ /* wake/detect falling */
+ if (arche_pdata->wake_detect_state == WD_STATE_IDLE) {
+ arche_pdata->wake_detect_start = jiffies;
+ /*
+ * In the beginning, when wake/detect goes low
+ * (first time), we assume it is meant for coldboot
+ * and set the flag. If wake/detect line stays low
+ * beyond 30msec, then it is coldboot else fallback
+ * to standby boot.
+ */
+ arche_platform_set_wake_detect_state(arche_pdata,
+ WD_STATE_BOOT_INIT);
+ }
+ }
+
+ spin_unlock_irqrestore(&arche_pdata->wake_lock, flags);
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * Requires arche_pdata->platform_state_mutex to be held
+ */
+static int
+arche_platform_coldboot_seq(struct arche_platform_drvdata *arche_pdata)
+{
+ int ret;
+
+ if (arche_pdata->state == ARCHE_PLATFORM_STATE_ACTIVE)
+ return 0;
+
+ dev_info(arche_pdata->dev, "Booting from cold boot state\n");
+
+ svc_reset_onoff(arche_pdata->svc_reset, arche_pdata->is_reset_act_hi);
+
+ gpiod_set_value(arche_pdata->svc_sysboot, 0);
+ usleep_range(100, 200);
+
+ ret = clk_prepare_enable(arche_pdata->svc_ref_clk);
+ if (ret) {
+ dev_err(arche_pdata->dev, "failed to enable svc_ref_clk: %d\n",
+ ret);
+ return ret;
+ }
+
+ /* bring SVC out of reset */
+ svc_reset_onoff(arche_pdata->svc_reset, !arche_pdata->is_reset_act_hi);
+
+ arche_platform_set_state(arche_pdata, ARCHE_PLATFORM_STATE_ACTIVE);
+
+ return 0;
+}
+
+/*
+ * Requires arche_pdata->platform_state_mutex to be held
+ */
+static int
+arche_platform_fw_flashing_seq(struct arche_platform_drvdata *arche_pdata)
+{
+ int ret;
+
+ if (arche_pdata->state == ARCHE_PLATFORM_STATE_FW_FLASHING)
+ return 0;
+
+ dev_info(arche_pdata->dev, "Switching to FW flashing state\n");
+
+ svc_reset_onoff(arche_pdata->svc_reset, arche_pdata->is_reset_act_hi);
+
+ gpiod_set_value(arche_pdata->svc_sysboot, 1);
+
+ usleep_range(100, 200);
+
+ ret = clk_prepare_enable(arche_pdata->svc_ref_clk);
+ if (ret) {
+ dev_err(arche_pdata->dev, "failed to enable svc_ref_clk: %d\n",
+ ret);
+ return ret;
+ }
+
+ svc_reset_onoff(arche_pdata->svc_reset, !arche_pdata->is_reset_act_hi);
+
+ arche_platform_set_state(arche_pdata, ARCHE_PLATFORM_STATE_FW_FLASHING);
+
+ return 0;
+}
+
+/*
+ * Requires arche_pdata->platform_state_mutex to be held
+ */
+static void
+arche_platform_poweroff_seq(struct arche_platform_drvdata *arche_pdata)
+{
+ unsigned long flags;
+
+ if (arche_pdata->state == ARCHE_PLATFORM_STATE_OFF)
+ return;
+
+ /* If in fw_flashing mode, then no need to repeate things again */
+ if (arche_pdata->state != ARCHE_PLATFORM_STATE_FW_FLASHING) {
+ disable_irq(arche_pdata->wake_detect_irq);
+
+ spin_lock_irqsave(&arche_pdata->wake_lock, flags);
+ arche_platform_set_wake_detect_state(arche_pdata,
+ WD_STATE_IDLE);
+ spin_unlock_irqrestore(&arche_pdata->wake_lock, flags);
+ }
+
+ clk_disable_unprepare(arche_pdata->svc_ref_clk);
+
+ /* As part of exit, put APB back in reset state */
+ svc_reset_onoff(arche_pdata->svc_reset, arche_pdata->is_reset_act_hi);
+
+ arche_platform_set_state(arche_pdata, ARCHE_PLATFORM_STATE_OFF);
+}
+
+static ssize_t state_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct arche_platform_drvdata *arche_pdata = dev_get_drvdata(dev);
+ int ret = 0;
+
+ mutex_lock(&arche_pdata->platform_state_mutex);
+
+ if (sysfs_streq(buf, "off")) {
+ if (arche_pdata->state == ARCHE_PLATFORM_STATE_OFF)
+ goto exit;
+
+ /* If SVC goes down, bring down APB's as well */
+ device_for_each_child(arche_pdata->dev, NULL, apb_poweroff);
+
+ arche_platform_poweroff_seq(arche_pdata);
+
+ } else if (sysfs_streq(buf, "active")) {
+ if (arche_pdata->state == ARCHE_PLATFORM_STATE_ACTIVE)
+ goto exit;
+
+ /* First we want to make sure we power off everything
+ * and then activate back again
+ */
+ device_for_each_child(arche_pdata->dev, NULL, apb_poweroff);
+ arche_platform_poweroff_seq(arche_pdata);
+
+ arche_platform_wd_irq_en(arche_pdata);
+ ret = arche_platform_coldboot_seq(arche_pdata);
+ if (ret)
+ goto exit;
+
+ } else if (sysfs_streq(buf, "standby")) {
+ if (arche_pdata->state == ARCHE_PLATFORM_STATE_STANDBY)
+ goto exit;
+
+ dev_warn(arche_pdata->dev, "standby state not supported\n");
+ } else if (sysfs_streq(buf, "fw_flashing")) {
+ if (arche_pdata->state == ARCHE_PLATFORM_STATE_FW_FLASHING)
+ goto exit;
+
+ /*
+ * Here we only control SVC.
+ *
+ * In case of FW_FLASHING mode we do not want to control
+ * APBs, as in case of V2, SPI bus is shared between both
+ * the APBs. So let user chose which APB he wants to flash.
+ */
+ arche_platform_poweroff_seq(arche_pdata);
+
+ ret = arche_platform_fw_flashing_seq(arche_pdata);
+ if (ret)
+ goto exit;
+ } else {
+ dev_err(arche_pdata->dev, "unknown state\n");
+ ret = -EINVAL;
+ }
+
+exit:
+ mutex_unlock(&arche_pdata->platform_state_mutex);
+ return ret ? ret : count;
+}
+
+static ssize_t state_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct arche_platform_drvdata *arche_pdata = dev_get_drvdata(dev);
+
+ switch (arche_pdata->state) {
+ case ARCHE_PLATFORM_STATE_OFF:
+ return sprintf(buf, "off\n");
+ case ARCHE_PLATFORM_STATE_ACTIVE:
+ return sprintf(buf, "active\n");
+ case ARCHE_PLATFORM_STATE_STANDBY:
+ return sprintf(buf, "standby\n");
+ case ARCHE_PLATFORM_STATE_FW_FLASHING:
+ return sprintf(buf, "fw_flashing\n");
+ default:
+ return sprintf(buf, "unknown state\n");
+ }
+}
+
+static DEVICE_ATTR_RW(state);
+
+static int arche_platform_pm_notifier(struct notifier_block *notifier,
+ unsigned long pm_event, void *unused)
+{
+ struct arche_platform_drvdata *arche_pdata =
+ container_of(notifier, struct arche_platform_drvdata,
+ pm_notifier);
+ int ret = NOTIFY_DONE;
+
+ mutex_lock(&arche_pdata->platform_state_mutex);
+ switch (pm_event) {
+ case PM_SUSPEND_PREPARE:
+ if (arche_pdata->state != ARCHE_PLATFORM_STATE_ACTIVE) {
+ ret = NOTIFY_STOP;
+ break;
+ }
+ device_for_each_child(arche_pdata->dev, NULL, apb_poweroff);
+ arche_platform_poweroff_seq(arche_pdata);
+ break;
+ case PM_POST_SUSPEND:
+ if (arche_pdata->state != ARCHE_PLATFORM_STATE_OFF)
+ break;
+
+ arche_platform_wd_irq_en(arche_pdata);
+ arche_platform_coldboot_seq(arche_pdata);
+ break;
+ default:
+ break;
+ }
+ mutex_unlock(&arche_pdata->platform_state_mutex);
+
+ return ret;
+}
+
+static int arche_platform_probe(struct platform_device *pdev)
+{
+ struct arche_platform_drvdata *arche_pdata;
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ int ret;
+ unsigned int flags;
+
+ arche_pdata = devm_kzalloc(&pdev->dev, sizeof(*arche_pdata),
+ GFP_KERNEL);
+ if (!arche_pdata)
+ return -ENOMEM;
+
+ /* setup svc reset gpio */
+ arche_pdata->is_reset_act_hi = of_property_read_bool(np,
+ "svc,reset-active-high");
+ if (arche_pdata->is_reset_act_hi)
+ flags = GPIOD_OUT_HIGH;
+ else
+ flags = GPIOD_OUT_LOW;
+
+ arche_pdata->svc_reset = devm_gpiod_get(dev, "svc,reset", flags);
+ if (IS_ERR(arche_pdata->svc_reset)) {
+ ret = PTR_ERR(arche_pdata->svc_reset);
+ dev_err(dev, "failed to request svc-reset GPIO: %d\n", ret);
+ return ret;
+ }
+ arche_platform_set_state(arche_pdata, ARCHE_PLATFORM_STATE_OFF);
+
+ arche_pdata->svc_sysboot = devm_gpiod_get(dev, "svc,sysboot",
+ GPIOD_OUT_LOW);
+ if (IS_ERR(arche_pdata->svc_sysboot)) {
+ ret = PTR_ERR(arche_pdata->svc_sysboot);
+ dev_err(dev, "failed to request sysboot0 GPIO: %d\n", ret);
+ return ret;
+ }
+
+ /* setup the clock request gpio first */
+ arche_pdata->svc_refclk_req = devm_gpiod_get(dev, "svc,refclk-req",
+ GPIOD_IN);
+ if (IS_ERR(arche_pdata->svc_refclk_req)) {
+ ret = PTR_ERR(arche_pdata->svc_refclk_req);
+ dev_err(dev, "failed to request svc-clk-req GPIO: %d\n", ret);
+ return ret;
+ }
+
+ /* setup refclk2 to follow the pin */
+ arche_pdata->svc_ref_clk = devm_clk_get(dev, "svc_ref_clk");
+ if (IS_ERR(arche_pdata->svc_ref_clk)) {
+ ret = PTR_ERR(arche_pdata->svc_ref_clk);
+ dev_err(dev, "failed to get svc_ref_clk: %d\n", ret);
+ return ret;
+ }
+
+ platform_set_drvdata(pdev, arche_pdata);
+
+ arche_pdata->num_apbs = of_get_child_count(np);
+ dev_dbg(dev, "Number of APB's available - %d\n", arche_pdata->num_apbs);
+
+ arche_pdata->wake_detect = devm_gpiod_get(dev, "svc,wake-detect",
+ GPIOD_IN);
+ if (IS_ERR(arche_pdata->wake_detect)) {
+ ret = PTR_ERR(arche_pdata->wake_detect);
+ dev_err(dev, "Failed requesting wake_detect GPIO: %d\n", ret);
+ return ret;
+ }
+
+ arche_platform_set_wake_detect_state(arche_pdata, WD_STATE_IDLE);
+
+ arche_pdata->dev = &pdev->dev;
+
+ spin_lock_init(&arche_pdata->wake_lock);
+ mutex_init(&arche_pdata->platform_state_mutex);
+ arche_pdata->wake_detect_irq =
+ gpiod_to_irq(arche_pdata->wake_detect);
+
+ ret = devm_request_threaded_irq(dev, arche_pdata->wake_detect_irq,
+ arche_platform_wd_irq,
+ arche_platform_wd_irq_thread,
+ IRQF_TRIGGER_FALLING |
+ IRQF_TRIGGER_RISING | IRQF_ONESHOT,
+ dev_name(dev), arche_pdata);
+ if (ret) {
+ dev_err(dev, "failed to request wake detect IRQ %d\n", ret);
+ return ret;
+ }
+ disable_irq(arche_pdata->wake_detect_irq);
+
+ ret = device_create_file(dev, &dev_attr_state);
+ if (ret) {
+ dev_err(dev, "failed to create state file in sysfs\n");
+ return ret;
+ }
+
+ ret = of_platform_populate(np, NULL, NULL, dev);
+ if (ret) {
+ dev_err(dev, "failed to populate child nodes %d\n", ret);
+ goto err_device_remove;
+ }
+
+ arche_pdata->pm_notifier.notifier_call = arche_platform_pm_notifier;
+ ret = register_pm_notifier(&arche_pdata->pm_notifier);
+
+ if (ret) {
+ dev_err(dev, "failed to register pm notifier %d\n", ret);
+ goto err_device_remove;
+ }
+
+ /* Explicitly power off if requested */
+ if (!of_property_read_bool(pdev->dev.of_node, "arche,init-off")) {
+ mutex_lock(&arche_pdata->platform_state_mutex);
+ ret = arche_platform_coldboot_seq(arche_pdata);
+ if (ret) {
+ dev_err(dev, "Failed to cold boot svc %d\n", ret);
+ goto err_coldboot;
+ }
+ arche_platform_wd_irq_en(arche_pdata);
+ mutex_unlock(&arche_pdata->platform_state_mutex);
+ }
+
+ dev_info(dev, "Device registered successfully\n");
+ return 0;
+
+err_coldboot:
+ mutex_unlock(&arche_pdata->platform_state_mutex);
+err_device_remove:
+ device_remove_file(&pdev->dev, &dev_attr_state);
+ return ret;
+}
+
+static int arche_remove_child(struct device *dev, void *unused)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+
+ platform_device_unregister(pdev);
+
+ return 0;
+}
+
+static void arche_platform_remove(struct platform_device *pdev)
+{
+ struct arche_platform_drvdata *arche_pdata = platform_get_drvdata(pdev);
+
+ unregister_pm_notifier(&arche_pdata->pm_notifier);
+ device_remove_file(&pdev->dev, &dev_attr_state);
+ device_for_each_child(&pdev->dev, NULL, arche_remove_child);
+ arche_platform_poweroff_seq(arche_pdata);
+
+ if (usb3613_hub_mode_ctrl(false))
+ dev_warn(arche_pdata->dev, "failed to control hub device\n");
+}
+
+static __maybe_unused int arche_platform_suspend(struct device *dev)
+{
+ /*
+ * If timing profile premits, we may shutdown bridge
+ * completely
+ *
+ * TODO: sequence ??
+ *
+ * Also, need to make sure we meet precondition for unipro suspend
+ * Precondition: Definition ???
+ */
+ return 0;
+}
+
+static __maybe_unused int arche_platform_resume(struct device *dev)
+{
+ /*
+ * At least for ES2 we have to meet the delay requirement between
+ * unipro switch and AP bridge init, depending on whether bridge is in
+ * OFF state or standby state.
+ *
+ * Based on whether bridge is in standby or OFF state we may have to
+ * assert multiple signals. Please refer to WDM spec, for more info.
+ *
+ */
+ return 0;
+}
+
+static void arche_platform_shutdown(struct platform_device *pdev)
+{
+ struct arche_platform_drvdata *arche_pdata = platform_get_drvdata(pdev);
+
+ arche_platform_poweroff_seq(arche_pdata);
+
+ usb3613_hub_mode_ctrl(false);
+}
+
+static SIMPLE_DEV_PM_OPS(arche_platform_pm_ops,
+ arche_platform_suspend,
+ arche_platform_resume);
+
+static const struct of_device_id arche_platform_of_match[] = {
+ /* Use PID/VID of SVC device */
+ { .compatible = "google,arche-platform", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, arche_platform_of_match);
+
+static struct platform_driver arche_platform_device_driver = {
+ .probe = arche_platform_probe,
+ .remove = arche_platform_remove,
+ .shutdown = arche_platform_shutdown,
+ .driver = {
+ .name = "arche-platform-ctrl",
+ .pm = &arche_platform_pm_ops,
+ .of_match_table = arche_platform_of_match,
+ }
+};
+
+static int __init arche_init(void)
+{
+ int retval;
+
+ retval = platform_driver_register(&arche_platform_device_driver);
+ if (retval)
+ return retval;
+
+ retval = arche_apb_init();
+ if (retval)
+ platform_driver_unregister(&arche_platform_device_driver);
+
+ return retval;
+}
+module_init(arche_init);
+
+static void __exit arche_exit(void)
+{
+ arche_apb_exit();
+ platform_driver_unregister(&arche_platform_device_driver);
+}
+module_exit(arche_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Vaibhav Hiremath <vaibhav.hiremath@linaro.org>");
+MODULE_DESCRIPTION("Arche Platform Driver");
diff --git a/drivers/staging/greybus/arche_platform.h b/drivers/staging/greybus/arche_platform.h
new file mode 100644
index 000000000000..9d997f2d6517
--- /dev/null
+++ b/drivers/staging/greybus/arche_platform.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Arche Platform driver to enable Unipro link.
+ *
+ * Copyright 2015-2016 Google Inc.
+ * Copyright 2015-2016 Linaro Ltd.
+ */
+
+#ifndef __ARCHE_PLATFORM_H
+#define __ARCHE_PLATFORM_H
+
+enum arche_platform_state {
+ ARCHE_PLATFORM_STATE_OFF,
+ ARCHE_PLATFORM_STATE_ACTIVE,
+ ARCHE_PLATFORM_STATE_STANDBY,
+ ARCHE_PLATFORM_STATE_FW_FLASHING,
+};
+
+int __init arche_apb_init(void);
+void __exit arche_apb_exit(void);
+
+/* Operational states for the APB device */
+int apb_ctrl_coldboot(struct device *dev);
+int apb_ctrl_fw_flashing(struct device *dev);
+int apb_ctrl_standby_boot(struct device *dev);
+void apb_ctrl_poweroff(struct device *dev);
+
+#endif /* __ARCHE_PLATFORM_H */
diff --git a/drivers/staging/greybus/audio_apbridgea.c b/drivers/staging/greybus/audio_apbridgea.c
new file mode 100644
index 000000000000..26117e390deb
--- /dev/null
+++ b/drivers/staging/greybus/audio_apbridgea.c
@@ -0,0 +1,205 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Greybus Audio Device Class Protocol helpers
+ *
+ * Copyright 2015-2016 Google Inc.
+ */
+
+#include <linux/greybus.h>
+#include "audio_apbridgea.h"
+#include "audio_codec.h"
+
+int gb_audio_apbridgea_set_config(struct gb_connection *connection,
+ __u16 i2s_port, __u32 format, __u32 rate,
+ __u32 mclk_freq)
+{
+ struct audio_apbridgea_set_config_request req;
+
+ req.hdr.type = AUDIO_APBRIDGEA_TYPE_SET_CONFIG;
+ req.hdr.i2s_port = cpu_to_le16(i2s_port);
+ req.format = cpu_to_le32(format);
+ req.rate = cpu_to_le32(rate);
+ req.mclk_freq = cpu_to_le32(mclk_freq);
+
+ return gb_hd_output(connection->hd, &req, sizeof(req),
+ GB_APB_REQUEST_AUDIO_CONTROL, true);
+}
+EXPORT_SYMBOL_GPL(gb_audio_apbridgea_set_config);
+
+int gb_audio_apbridgea_register_cport(struct gb_connection *connection,
+ __u16 i2s_port, __u16 cportid,
+ __u8 direction)
+{
+ struct audio_apbridgea_register_cport_request req;
+ int ret;
+
+ req.hdr.type = AUDIO_APBRIDGEA_TYPE_REGISTER_CPORT;
+ req.hdr.i2s_port = cpu_to_le16(i2s_port);
+ req.cport = cpu_to_le16(cportid);
+ req.direction = direction;
+
+ ret = gb_pm_runtime_get_sync(connection->bundle);
+ if (ret)
+ return ret;
+
+ return gb_hd_output(connection->hd, &req, sizeof(req),
+ GB_APB_REQUEST_AUDIO_CONTROL, true);
+}
+EXPORT_SYMBOL_GPL(gb_audio_apbridgea_register_cport);
+
+int gb_audio_apbridgea_unregister_cport(struct gb_connection *connection,
+ __u16 i2s_port, __u16 cportid,
+ __u8 direction)
+{
+ struct audio_apbridgea_unregister_cport_request req;
+ int ret;
+
+ req.hdr.type = AUDIO_APBRIDGEA_TYPE_UNREGISTER_CPORT;
+ req.hdr.i2s_port = cpu_to_le16(i2s_port);
+ req.cport = cpu_to_le16(cportid);
+ req.direction = direction;
+
+ ret = gb_hd_output(connection->hd, &req, sizeof(req),
+ GB_APB_REQUEST_AUDIO_CONTROL, true);
+
+ gb_pm_runtime_put_autosuspend(connection->bundle);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(gb_audio_apbridgea_unregister_cport);
+
+int gb_audio_apbridgea_set_tx_data_size(struct gb_connection *connection,
+ __u16 i2s_port, __u16 size)
+{
+ struct audio_apbridgea_set_tx_data_size_request req;
+
+ req.hdr.type = AUDIO_APBRIDGEA_TYPE_SET_TX_DATA_SIZE;
+ req.hdr.i2s_port = cpu_to_le16(i2s_port);
+ req.size = cpu_to_le16(size);
+
+ return gb_hd_output(connection->hd, &req, sizeof(req),
+ GB_APB_REQUEST_AUDIO_CONTROL, true);
+}
+EXPORT_SYMBOL_GPL(gb_audio_apbridgea_set_tx_data_size);
+
+int gb_audio_apbridgea_prepare_tx(struct gb_connection *connection,
+ __u16 i2s_port)
+{
+ struct audio_apbridgea_prepare_tx_request req;
+
+ req.hdr.type = AUDIO_APBRIDGEA_TYPE_PREPARE_TX;
+ req.hdr.i2s_port = cpu_to_le16(i2s_port);
+
+ return gb_hd_output(connection->hd, &req, sizeof(req),
+ GB_APB_REQUEST_AUDIO_CONTROL, true);
+}
+EXPORT_SYMBOL_GPL(gb_audio_apbridgea_prepare_tx);
+
+int gb_audio_apbridgea_start_tx(struct gb_connection *connection,
+ __u16 i2s_port, __u64 timestamp)
+{
+ struct audio_apbridgea_start_tx_request req;
+
+ req.hdr.type = AUDIO_APBRIDGEA_TYPE_START_TX;
+ req.hdr.i2s_port = cpu_to_le16(i2s_port);
+ req.timestamp = cpu_to_le64(timestamp);
+
+ return gb_hd_output(connection->hd, &req, sizeof(req),
+ GB_APB_REQUEST_AUDIO_CONTROL, true);
+}
+EXPORT_SYMBOL_GPL(gb_audio_apbridgea_start_tx);
+
+int gb_audio_apbridgea_stop_tx(struct gb_connection *connection, __u16 i2s_port)
+{
+ struct audio_apbridgea_stop_tx_request req;
+
+ req.hdr.type = AUDIO_APBRIDGEA_TYPE_STOP_TX;
+ req.hdr.i2s_port = cpu_to_le16(i2s_port);
+
+ return gb_hd_output(connection->hd, &req, sizeof(req),
+ GB_APB_REQUEST_AUDIO_CONTROL, true);
+}
+EXPORT_SYMBOL_GPL(gb_audio_apbridgea_stop_tx);
+
+int gb_audio_apbridgea_shutdown_tx(struct gb_connection *connection,
+ __u16 i2s_port)
+{
+ struct audio_apbridgea_shutdown_tx_request req;
+
+ req.hdr.type = AUDIO_APBRIDGEA_TYPE_SHUTDOWN_TX;
+ req.hdr.i2s_port = cpu_to_le16(i2s_port);
+
+ return gb_hd_output(connection->hd, &req, sizeof(req),
+ GB_APB_REQUEST_AUDIO_CONTROL, true);
+}
+EXPORT_SYMBOL_GPL(gb_audio_apbridgea_shutdown_tx);
+
+int gb_audio_apbridgea_set_rx_data_size(struct gb_connection *connection,
+ __u16 i2s_port, __u16 size)
+{
+ struct audio_apbridgea_set_rx_data_size_request req;
+
+ req.hdr.type = AUDIO_APBRIDGEA_TYPE_SET_RX_DATA_SIZE;
+ req.hdr.i2s_port = cpu_to_le16(i2s_port);
+ req.size = cpu_to_le16(size);
+
+ return gb_hd_output(connection->hd, &req, sizeof(req),
+ GB_APB_REQUEST_AUDIO_CONTROL, true);
+}
+EXPORT_SYMBOL_GPL(gb_audio_apbridgea_set_rx_data_size);
+
+int gb_audio_apbridgea_prepare_rx(struct gb_connection *connection,
+ __u16 i2s_port)
+{
+ struct audio_apbridgea_prepare_rx_request req;
+
+ req.hdr.type = AUDIO_APBRIDGEA_TYPE_PREPARE_RX;
+ req.hdr.i2s_port = cpu_to_le16(i2s_port);
+
+ return gb_hd_output(connection->hd, &req, sizeof(req),
+ GB_APB_REQUEST_AUDIO_CONTROL, true);
+}
+EXPORT_SYMBOL_GPL(gb_audio_apbridgea_prepare_rx);
+
+int gb_audio_apbridgea_start_rx(struct gb_connection *connection,
+ __u16 i2s_port)
+{
+ struct audio_apbridgea_start_rx_request req;
+
+ req.hdr.type = AUDIO_APBRIDGEA_TYPE_START_RX;
+ req.hdr.i2s_port = cpu_to_le16(i2s_port);
+
+ return gb_hd_output(connection->hd, &req, sizeof(req),
+ GB_APB_REQUEST_AUDIO_CONTROL, true);
+}
+EXPORT_SYMBOL_GPL(gb_audio_apbridgea_start_rx);
+
+int gb_audio_apbridgea_stop_rx(struct gb_connection *connection, __u16 i2s_port)
+{
+ struct audio_apbridgea_stop_rx_request req;
+
+ req.hdr.type = AUDIO_APBRIDGEA_TYPE_STOP_RX;
+ req.hdr.i2s_port = cpu_to_le16(i2s_port);
+
+ return gb_hd_output(connection->hd, &req, sizeof(req),
+ GB_APB_REQUEST_AUDIO_CONTROL, true);
+}
+EXPORT_SYMBOL_GPL(gb_audio_apbridgea_stop_rx);
+
+int gb_audio_apbridgea_shutdown_rx(struct gb_connection *connection,
+ __u16 i2s_port)
+{
+ struct audio_apbridgea_shutdown_rx_request req;
+
+ req.hdr.type = AUDIO_APBRIDGEA_TYPE_SHUTDOWN_RX;
+ req.hdr.i2s_port = cpu_to_le16(i2s_port);
+
+ return gb_hd_output(connection->hd, &req, sizeof(req),
+ GB_APB_REQUEST_AUDIO_CONTROL, true);
+}
+EXPORT_SYMBOL_GPL(gb_audio_apbridgea_shutdown_rx);
+
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("greybus:audio-apbridgea");
+MODULE_DESCRIPTION("Greybus Special APBridgeA Audio Protocol library");
+MODULE_AUTHOR("Mark Greer <mgreer@animalcreek.com>");
diff --git a/drivers/staging/greybus/audio_apbridgea.h b/drivers/staging/greybus/audio_apbridgea.h
new file mode 100644
index 000000000000..ab707d310129
--- /dev/null
+++ b/drivers/staging/greybus/audio_apbridgea.h
@@ -0,0 +1,132 @@
+/* SPDX-License-Identifier: BSD-3-Clause */
+/*
+ * Copyright (c) 2015-2016 Google Inc.
+ */
+/*
+ * This is a special protocol for configuring communication over the
+ * I2S bus between the DSP on the MSM8994 and APBridgeA. Therefore,
+ * we can predefine several low-level attributes of the communication
+ * because we know that they are supported. In particular, the following
+ * assumptions are made:
+ * - there are two channels (i.e., stereo)
+ * - the low-level protocol is I2S as defined by Philips/NXP
+ * - the DSP on the MSM8994 is the clock master for MCLK, BCLK, and WCLK
+ * - WCLK changes on the falling edge of BCLK
+ * - WCLK low for left channel; high for right channel
+ * - TX data is sent on the falling edge of BCLK
+ * - RX data is received/latched on the rising edge of BCLK
+ */
+
+#ifndef __AUDIO_APBRIDGEA_H
+#define __AUDIO_APBRIDGEA_H
+
+#define AUDIO_APBRIDGEA_TYPE_SET_CONFIG 0x01
+#define AUDIO_APBRIDGEA_TYPE_REGISTER_CPORT 0x02
+#define AUDIO_APBRIDGEA_TYPE_UNREGISTER_CPORT 0x03
+#define AUDIO_APBRIDGEA_TYPE_SET_TX_DATA_SIZE 0x04
+ /* 0x05 unused */
+#define AUDIO_APBRIDGEA_TYPE_PREPARE_TX 0x06
+#define AUDIO_APBRIDGEA_TYPE_START_TX 0x07
+#define AUDIO_APBRIDGEA_TYPE_STOP_TX 0x08
+#define AUDIO_APBRIDGEA_TYPE_SHUTDOWN_TX 0x09
+#define AUDIO_APBRIDGEA_TYPE_SET_RX_DATA_SIZE 0x0a
+ /* 0x0b unused */
+#define AUDIO_APBRIDGEA_TYPE_PREPARE_RX 0x0c
+#define AUDIO_APBRIDGEA_TYPE_START_RX 0x0d
+#define AUDIO_APBRIDGEA_TYPE_STOP_RX 0x0e
+#define AUDIO_APBRIDGEA_TYPE_SHUTDOWN_RX 0x0f
+
+#define AUDIO_APBRIDGEA_PCM_FMT_8 BIT(0)
+#define AUDIO_APBRIDGEA_PCM_FMT_16 BIT(1)
+#define AUDIO_APBRIDGEA_PCM_FMT_24 BIT(2)
+#define AUDIO_APBRIDGEA_PCM_FMT_32 BIT(3)
+#define AUDIO_APBRIDGEA_PCM_FMT_64 BIT(4)
+
+#define AUDIO_APBRIDGEA_PCM_RATE_5512 BIT(0)
+#define AUDIO_APBRIDGEA_PCM_RATE_8000 BIT(1)
+#define AUDIO_APBRIDGEA_PCM_RATE_11025 BIT(2)
+#define AUDIO_APBRIDGEA_PCM_RATE_16000 BIT(3)
+#define AUDIO_APBRIDGEA_PCM_RATE_22050 BIT(4)
+#define AUDIO_APBRIDGEA_PCM_RATE_32000 BIT(5)
+#define AUDIO_APBRIDGEA_PCM_RATE_44100 BIT(6)
+#define AUDIO_APBRIDGEA_PCM_RATE_48000 BIT(7)
+#define AUDIO_APBRIDGEA_PCM_RATE_64000 BIT(8)
+#define AUDIO_APBRIDGEA_PCM_RATE_88200 BIT(9)
+#define AUDIO_APBRIDGEA_PCM_RATE_96000 BIT(10)
+#define AUDIO_APBRIDGEA_PCM_RATE_176400 BIT(11)
+#define AUDIO_APBRIDGEA_PCM_RATE_192000 BIT(12)
+
+#define AUDIO_APBRIDGEA_DIRECTION_TX BIT(0)
+#define AUDIO_APBRIDGEA_DIRECTION_RX BIT(1)
+
+/* The I2S port is passed in the 'index' parameter of the USB request */
+/* The CPort is passed in the 'value' parameter of the USB request */
+
+struct audio_apbridgea_hdr {
+ __u8 type;
+ __le16 i2s_port;
+} __packed;
+
+struct audio_apbridgea_set_config_request {
+ struct audio_apbridgea_hdr hdr;
+ __le32 format; /* AUDIO_APBRIDGEA_PCM_FMT_* */
+ __le32 rate; /* AUDIO_APBRIDGEA_PCM_RATE_* */
+ __le32 mclk_freq; /* XXX Remove? */
+} __packed;
+
+struct audio_apbridgea_register_cport_request {
+ struct audio_apbridgea_hdr hdr;
+ __le16 cport;
+ __u8 direction;
+} __packed;
+
+struct audio_apbridgea_unregister_cport_request {
+ struct audio_apbridgea_hdr hdr;
+ __le16 cport;
+ __u8 direction;
+} __packed;
+
+struct audio_apbridgea_set_tx_data_size_request {
+ struct audio_apbridgea_hdr hdr;
+ __le16 size;
+} __packed;
+
+struct audio_apbridgea_prepare_tx_request {
+ struct audio_apbridgea_hdr hdr;
+} __packed;
+
+struct audio_apbridgea_start_tx_request {
+ struct audio_apbridgea_hdr hdr;
+ __le64 timestamp;
+} __packed;
+
+struct audio_apbridgea_stop_tx_request {
+ struct audio_apbridgea_hdr hdr;
+} __packed;
+
+struct audio_apbridgea_shutdown_tx_request {
+ struct audio_apbridgea_hdr hdr;
+} __packed;
+
+struct audio_apbridgea_set_rx_data_size_request {
+ struct audio_apbridgea_hdr hdr;
+ __le16 size;
+} __packed;
+
+struct audio_apbridgea_prepare_rx_request {
+ struct audio_apbridgea_hdr hdr;
+} __packed;
+
+struct audio_apbridgea_start_rx_request {
+ struct audio_apbridgea_hdr hdr;
+} __packed;
+
+struct audio_apbridgea_stop_rx_request {
+ struct audio_apbridgea_hdr hdr;
+} __packed;
+
+struct audio_apbridgea_shutdown_rx_request {
+ struct audio_apbridgea_hdr hdr;
+} __packed;
+
+#endif /*__AUDIO_APBRIDGEA_H */
diff --git a/drivers/staging/greybus/audio_codec.c b/drivers/staging/greybus/audio_codec.c
new file mode 100644
index 000000000000..444c53b4e08d
--- /dev/null
+++ b/drivers/staging/greybus/audio_codec.c
@@ -0,0 +1,1100 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * APBridge ALSA SoC dummy codec driver
+ * Copyright 2016 Google Inc.
+ * Copyright 2016 Linaro Ltd.
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pm_runtime.h>
+#include <sound/soc.h>
+#include <sound/pcm_params.h>
+#include <uapi/linux/input.h>
+
+#include "audio_codec.h"
+#include "audio_apbridgea.h"
+#include "audio_manager.h"
+#include "audio_helper.h"
+
+static struct gbaudio_codec_info *gbcodec;
+
+static struct gbaudio_data_connection *
+find_data(struct gbaudio_module_info *module, int id)
+{
+ struct gbaudio_data_connection *data;
+
+ list_for_each_entry(data, &module->data_list, list) {
+ if (id == data->id)
+ return data;
+ }
+ return NULL;
+}
+
+static struct gbaudio_stream_params *
+find_dai_stream_params(struct gbaudio_codec_info *codec, int id, int stream)
+{
+ struct gbaudio_codec_dai *dai;
+
+ list_for_each_entry(dai, &codec->dai_list, list) {
+ if (dai->id == id)
+ return &dai->params[stream];
+ }
+ return NULL;
+}
+
+static int gbaudio_module_enable_tx(struct gbaudio_codec_info *codec,
+ struct gbaudio_module_info *module, int id)
+{
+ int module_state, ret = 0;
+ u16 data_cport, i2s_port, cportid;
+ u8 sig_bits, channels;
+ u32 format, rate;
+ struct gbaudio_data_connection *data;
+ struct gbaudio_stream_params *params;
+
+ /* find the dai */
+ data = find_data(module, id);
+ if (!data) {
+ dev_err(module->dev, "%d:DATA connection missing\n", id);
+ return -ENODEV;
+ }
+ module_state = data->state[SNDRV_PCM_STREAM_PLAYBACK];
+
+ params = find_dai_stream_params(codec, id, SNDRV_PCM_STREAM_PLAYBACK);
+ if (!params) {
+ dev_err(codec->dev, "Failed to fetch dai_stream pointer\n");
+ return -EINVAL;
+ }
+
+ /* register cport */
+ if (module_state < GBAUDIO_CODEC_STARTUP) {
+ i2s_port = 0; /* fixed for now */
+ cportid = data->connection->hd_cport_id;
+ ret = gb_audio_apbridgea_register_cport(data->connection,
+ i2s_port, cportid,
+ AUDIO_APBRIDGEA_DIRECTION_TX);
+ if (ret) {
+ dev_err_ratelimited(module->dev, "reg_cport failed:%d\n", ret);
+ return ret;
+ }
+ data->state[SNDRV_PCM_STREAM_PLAYBACK] = GBAUDIO_CODEC_STARTUP;
+ dev_dbg(module->dev, "Dynamic Register %d DAI\n", cportid);
+ }
+
+ /* hw_params */
+ if (module_state < GBAUDIO_CODEC_HWPARAMS) {
+ format = params->format;
+ channels = params->channels;
+ rate = params->rate;
+ sig_bits = params->sig_bits;
+ data_cport = data->connection->intf_cport_id;
+ ret = gb_audio_gb_set_pcm(module->mgmt_connection, data_cport,
+ format, rate, channels, sig_bits);
+ if (ret) {
+ dev_err_ratelimited(module->dev, "set_pcm failed:%d\n", ret);
+ return ret;
+ }
+ data->state[SNDRV_PCM_STREAM_PLAYBACK] = GBAUDIO_CODEC_HWPARAMS;
+ dev_dbg(module->dev, "Dynamic hw_params %d DAI\n", data_cport);
+ }
+
+ /* prepare */
+ if (module_state < GBAUDIO_CODEC_PREPARE) {
+ data_cport = data->connection->intf_cport_id;
+ ret = gb_audio_gb_set_tx_data_size(module->mgmt_connection,
+ data_cport, 192);
+ if (ret) {
+ dev_err_ratelimited(module->dev,
+ "set_tx_data_size failed:%d\n",
+ ret);
+ return ret;
+ }
+ ret = gb_audio_gb_activate_tx(module->mgmt_connection, data_cport);
+ if (ret) {
+ dev_err_ratelimited(module->dev,
+ "activate_tx failed:%d\n", ret);
+ return ret;
+ }
+ data->state[SNDRV_PCM_STREAM_PLAYBACK] = GBAUDIO_CODEC_PREPARE;
+ dev_dbg(module->dev, "Dynamic prepare %d DAI\n", data_cport);
+ }
+
+ return 0;
+}
+
+static int gbaudio_module_disable_tx(struct gbaudio_module_info *module, int id)
+{
+ int ret;
+ u16 data_cport, cportid, i2s_port;
+ int module_state;
+ struct gbaudio_data_connection *data;
+
+ /* find the dai */
+ data = find_data(module, id);
+ if (!data) {
+ dev_err(module->dev, "%d:DATA connection missing\n", id);
+ return -ENODEV;
+ }
+ module_state = data->state[SNDRV_PCM_STREAM_PLAYBACK];
+
+ if (module_state > GBAUDIO_CODEC_HWPARAMS) {
+ data_cport = data->connection->intf_cport_id;
+ ret = gb_audio_gb_deactivate_tx(module->mgmt_connection,
+ data_cport);
+ if (ret) {
+ dev_err_ratelimited(module->dev,
+ "deactivate_tx failed:%d\n", ret);
+ return ret;
+ }
+ dev_dbg(module->dev, "Dynamic deactivate %d DAI\n", data_cport);
+ data->state[SNDRV_PCM_STREAM_PLAYBACK] = GBAUDIO_CODEC_HWPARAMS;
+ }
+
+ if (module_state > GBAUDIO_CODEC_SHUTDOWN) {
+ i2s_port = 0; /* fixed for now */
+ cportid = data->connection->hd_cport_id;
+ ret = gb_audio_apbridgea_unregister_cport(data->connection,
+ i2s_port, cportid,
+ AUDIO_APBRIDGEA_DIRECTION_TX);
+ if (ret) {
+ dev_err_ratelimited(module->dev,
+ "unregister_cport failed:%d\n", ret);
+ return ret;
+ }
+ dev_dbg(module->dev, "Dynamic Unregister %d DAI\n", cportid);
+ data->state[SNDRV_PCM_STREAM_PLAYBACK] = GBAUDIO_CODEC_SHUTDOWN;
+ }
+
+ return 0;
+}
+
+static int gbaudio_module_enable_rx(struct gbaudio_codec_info *codec,
+ struct gbaudio_module_info *module, int id)
+{
+ int module_state, ret = 0;
+ u16 data_cport, i2s_port, cportid;
+ u8 sig_bits, channels;
+ u32 format, rate;
+ struct gbaudio_data_connection *data;
+ struct gbaudio_stream_params *params;
+
+ /* find the dai */
+ data = find_data(module, id);
+ if (!data) {
+ dev_err(module->dev, "%d:DATA connection missing\n", id);
+ return -ENODEV;
+ }
+ module_state = data->state[SNDRV_PCM_STREAM_CAPTURE];
+
+ params = find_dai_stream_params(codec, id, SNDRV_PCM_STREAM_CAPTURE);
+ if (!params) {
+ dev_err(codec->dev, "Failed to fetch dai_stream pointer\n");
+ return -EINVAL;
+ }
+
+ /* register cport */
+ if (module_state < GBAUDIO_CODEC_STARTUP) {
+ i2s_port = 0; /* fixed for now */
+ cportid = data->connection->hd_cport_id;
+ ret = gb_audio_apbridgea_register_cport(data->connection,
+ i2s_port, cportid,
+ AUDIO_APBRIDGEA_DIRECTION_RX);
+ if (ret) {
+ dev_err_ratelimited(module->dev, "reg_cport failed:%d\n", ret);
+ return ret;
+ }
+ data->state[SNDRV_PCM_STREAM_CAPTURE] = GBAUDIO_CODEC_STARTUP;
+ dev_dbg(module->dev, "Dynamic Register %d DAI\n", cportid);
+ }
+
+ /* hw_params */
+ if (module_state < GBAUDIO_CODEC_HWPARAMS) {
+ format = params->format;
+ channels = params->channels;
+ rate = params->rate;
+ sig_bits = params->sig_bits;
+ data_cport = data->connection->intf_cport_id;
+ ret = gb_audio_gb_set_pcm(module->mgmt_connection, data_cport,
+ format, rate, channels, sig_bits);
+ if (ret) {
+ dev_err_ratelimited(module->dev, "set_pcm failed:%d\n", ret);
+ return ret;
+ }
+ data->state[SNDRV_PCM_STREAM_CAPTURE] = GBAUDIO_CODEC_HWPARAMS;
+ dev_dbg(module->dev, "Dynamic hw_params %d DAI\n", data_cport);
+ }
+
+ /* prepare */
+ if (module_state < GBAUDIO_CODEC_PREPARE) {
+ data_cport = data->connection->intf_cport_id;
+ ret = gb_audio_gb_set_rx_data_size(module->mgmt_connection,
+ data_cport, 192);
+ if (ret) {
+ dev_err_ratelimited(module->dev,
+ "set_rx_data_size failed:%d\n",
+ ret);
+ return ret;
+ }
+ ret = gb_audio_gb_activate_rx(module->mgmt_connection,
+ data_cport);
+ if (ret) {
+ dev_err_ratelimited(module->dev,
+ "activate_rx failed:%d\n", ret);
+ return ret;
+ }
+ data->state[SNDRV_PCM_STREAM_CAPTURE] = GBAUDIO_CODEC_PREPARE;
+ dev_dbg(module->dev, "Dynamic prepare %d DAI\n", data_cport);
+ }
+
+ return 0;
+}
+
+static int gbaudio_module_disable_rx(struct gbaudio_module_info *module, int id)
+{
+ int ret;
+ u16 data_cport, cportid, i2s_port;
+ int module_state;
+ struct gbaudio_data_connection *data;
+
+ /* find the dai */
+ data = find_data(module, id);
+ if (!data) {
+ dev_err(module->dev, "%d:DATA connection missing\n", id);
+ return -ENODEV;
+ }
+ module_state = data->state[SNDRV_PCM_STREAM_CAPTURE];
+
+ if (module_state > GBAUDIO_CODEC_HWPARAMS) {
+ data_cport = data->connection->intf_cport_id;
+ ret = gb_audio_gb_deactivate_rx(module->mgmt_connection,
+ data_cport);
+ if (ret) {
+ dev_err_ratelimited(module->dev,
+ "deactivate_rx failed:%d\n", ret);
+ return ret;
+ }
+ dev_dbg(module->dev, "Dynamic deactivate %d DAI\n", data_cport);
+ data->state[SNDRV_PCM_STREAM_CAPTURE] = GBAUDIO_CODEC_HWPARAMS;
+ }
+
+ if (module_state > GBAUDIO_CODEC_SHUTDOWN) {
+ i2s_port = 0; /* fixed for now */
+ cportid = data->connection->hd_cport_id;
+ ret = gb_audio_apbridgea_unregister_cport(data->connection,
+ i2s_port, cportid,
+ AUDIO_APBRIDGEA_DIRECTION_RX);
+ if (ret) {
+ dev_err_ratelimited(module->dev,
+ "unregister_cport failed:%d\n", ret);
+ return ret;
+ }
+ dev_dbg(module->dev, "Dynamic Unregister %d DAI\n", cportid);
+ data->state[SNDRV_PCM_STREAM_CAPTURE] = GBAUDIO_CODEC_SHUTDOWN;
+ }
+
+ return 0;
+}
+
+int gbaudio_module_update(struct gbaudio_codec_info *codec,
+ struct snd_soc_dapm_widget *w,
+ struct gbaudio_module_info *module, int enable)
+{
+ int dai_id, ret;
+ char intf_name[NAME_SIZE], dir[NAME_SIZE];
+
+ dev_dbg(module->dev, "%s:Module update %s sequence\n", w->name,
+ enable ? "Enable" : "Disable");
+
+ if ((w->id != snd_soc_dapm_aif_in) && (w->id != snd_soc_dapm_aif_out)) {
+ dev_dbg(codec->dev, "No action required for %s\n", w->name);
+ return 0;
+ }
+
+ /* parse dai_id from AIF widget's stream_name */
+ ret = sscanf(w->sname, "%s %d %s", intf_name, &dai_id, dir);
+ if (ret < 3) {
+ dev_err(codec->dev, "Error while parsing dai_id for %s\n", w->name);
+ return -EINVAL;
+ }
+
+ mutex_lock(&codec->lock);
+ if (w->id == snd_soc_dapm_aif_in) {
+ if (enable)
+ ret = gbaudio_module_enable_tx(codec, module, dai_id);
+ else
+ ret = gbaudio_module_disable_tx(module, dai_id);
+ } else if (w->id == snd_soc_dapm_aif_out) {
+ if (enable)
+ ret = gbaudio_module_enable_rx(codec, module, dai_id);
+ else
+ ret = gbaudio_module_disable_rx(module, dai_id);
+ }
+
+ mutex_unlock(&codec->lock);
+
+ return ret;
+}
+EXPORT_SYMBOL(gbaudio_module_update);
+
+/*
+ * codec DAI ops
+ */
+static int gbcodec_startup(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct gbaudio_codec_info *codec = dev_get_drvdata(dai->dev);
+ struct gbaudio_stream_params *params;
+
+ mutex_lock(&codec->lock);
+
+ if (list_empty(&codec->module_list)) {
+ dev_err(codec->dev, "No codec module available\n");
+ mutex_unlock(&codec->lock);
+ return -ENODEV;
+ }
+
+ params = find_dai_stream_params(codec, dai->id, substream->stream);
+ if (!params) {
+ dev_err(codec->dev, "Failed to fetch dai_stream pointer\n");
+ mutex_unlock(&codec->lock);
+ return -EINVAL;
+ }
+ params->state = GBAUDIO_CODEC_STARTUP;
+ mutex_unlock(&codec->lock);
+ /* to prevent suspend in case of active audio */
+ pm_stay_awake(dai->dev);
+
+ return 0;
+}
+
+static void gbcodec_shutdown(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct gbaudio_codec_info *codec = dev_get_drvdata(dai->dev);
+ struct gbaudio_stream_params *params;
+
+ mutex_lock(&codec->lock);
+
+ if (list_empty(&codec->module_list))
+ dev_info(codec->dev, "No codec module available during shutdown\n");
+
+ params = find_dai_stream_params(codec, dai->id, substream->stream);
+ if (!params) {
+ dev_err(codec->dev, "Failed to fetch dai_stream pointer\n");
+ mutex_unlock(&codec->lock);
+ return;
+ }
+ params->state = GBAUDIO_CODEC_SHUTDOWN;
+ mutex_unlock(&codec->lock);
+ pm_relax(dai->dev);
+}
+
+static int gbcodec_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *hwparams,
+ struct snd_soc_dai *dai)
+{
+ int ret;
+ u8 sig_bits, channels;
+ u32 format, rate;
+ struct gbaudio_module_info *module;
+ struct gbaudio_data_connection *data;
+ struct gb_bundle *bundle;
+ struct gbaudio_codec_info *codec = dev_get_drvdata(dai->dev);
+ struct gbaudio_stream_params *params;
+
+ mutex_lock(&codec->lock);
+
+ if (list_empty(&codec->module_list)) {
+ dev_err(codec->dev, "No codec module available\n");
+ mutex_unlock(&codec->lock);
+ return -ENODEV;
+ }
+
+ /*
+ * assuming, currently only 48000 Hz, 16BIT_LE, stereo
+ * is supported, validate params before configuring codec
+ */
+ if (params_channels(hwparams) != 2) {
+ dev_err(dai->dev, "Invalid channel count:%d\n",
+ params_channels(hwparams));
+ mutex_unlock(&codec->lock);
+ return -EINVAL;
+ }
+ channels = params_channels(hwparams);
+
+ if (params_rate(hwparams) != 48000) {
+ dev_err(dai->dev, "Invalid sampling rate:%d\n",
+ params_rate(hwparams));
+ mutex_unlock(&codec->lock);
+ return -EINVAL;
+ }
+ rate = GB_AUDIO_PCM_RATE_48000;
+
+ if (params_format(hwparams) != SNDRV_PCM_FORMAT_S16_LE) {
+ dev_err(dai->dev, "Invalid format:%d\n", params_format(hwparams));
+ mutex_unlock(&codec->lock);
+ return -EINVAL;
+ }
+ format = GB_AUDIO_PCM_FMT_S16_LE;
+
+ /* find the data connection */
+ list_for_each_entry(module, &codec->module_list, list) {
+ data = find_data(module, dai->id);
+ if (data)
+ break;
+ }
+
+ if (!data) {
+ dev_err(dai->dev, "DATA connection missing\n");
+ mutex_unlock(&codec->lock);
+ return -EINVAL;
+ }
+
+ params = find_dai_stream_params(codec, dai->id, substream->stream);
+ if (!params) {
+ dev_err(codec->dev, "Failed to fetch dai_stream pointer\n");
+ mutex_unlock(&codec->lock);
+ return -EINVAL;
+ }
+
+ bundle = to_gb_bundle(module->dev);
+ ret = gb_pm_runtime_get_sync(bundle);
+ if (ret) {
+ mutex_unlock(&codec->lock);
+ return ret;
+ }
+
+ ret = gb_audio_apbridgea_set_config(data->connection, 0,
+ AUDIO_APBRIDGEA_PCM_FMT_16,
+ AUDIO_APBRIDGEA_PCM_RATE_48000,
+ 6144000);
+ if (ret) {
+ dev_err_ratelimited(dai->dev, "%d: Error during set_config\n",
+ ret);
+ gb_pm_runtime_put_noidle(bundle);
+ mutex_unlock(&codec->lock);
+ return ret;
+ }
+
+ gb_pm_runtime_put_noidle(bundle);
+
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+ sig_bits = dai->driver->playback.sig_bits;
+ else
+ sig_bits = dai->driver->capture.sig_bits;
+
+ params->state = GBAUDIO_CODEC_HWPARAMS;
+ params->format = format;
+ params->rate = rate;
+ params->channels = channels;
+ params->sig_bits = sig_bits;
+
+ mutex_unlock(&codec->lock);
+ return 0;
+}
+
+static int gbcodec_prepare(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ int ret;
+ struct gbaudio_module_info *module = NULL, *iter;
+ struct gbaudio_data_connection *data;
+ struct gb_bundle *bundle;
+ struct gbaudio_codec_info *codec = dev_get_drvdata(dai->dev);
+ struct gbaudio_stream_params *params;
+
+ mutex_lock(&codec->lock);
+
+ if (list_empty(&codec->module_list)) {
+ dev_err(codec->dev, "No codec module available\n");
+ mutex_unlock(&codec->lock);
+ return -ENODEV;
+ }
+
+ list_for_each_entry(iter, &codec->module_list, list) {
+ /* find the dai */
+ data = find_data(iter, dai->id);
+ if (data) {
+ module = iter;
+ break;
+ }
+ }
+ if (!data) {
+ dev_err(dai->dev, "DATA connection missing\n");
+ mutex_unlock(&codec->lock);
+ return -ENODEV;
+ }
+
+ params = find_dai_stream_params(codec, dai->id, substream->stream);
+ if (!params) {
+ dev_err(codec->dev, "Failed to fetch dai_stream pointer\n");
+ mutex_unlock(&codec->lock);
+ return -EINVAL;
+ }
+
+ bundle = to_gb_bundle(module->dev);
+ ret = gb_pm_runtime_get_sync(bundle);
+ if (ret) {
+ mutex_unlock(&codec->lock);
+ return ret;
+ }
+
+ switch (substream->stream) {
+ case SNDRV_PCM_STREAM_PLAYBACK:
+ ret = gb_audio_apbridgea_set_tx_data_size(data->connection, 0, 192);
+ break;
+ case SNDRV_PCM_STREAM_CAPTURE:
+ ret = gb_audio_apbridgea_set_rx_data_size(data->connection, 0, 192);
+ break;
+ }
+ if (ret) {
+ gb_pm_runtime_put_noidle(bundle);
+ mutex_unlock(&codec->lock);
+ dev_err_ratelimited(dai->dev, "set_data_size failed:%d\n", ret);
+ return ret;
+ }
+
+ gb_pm_runtime_put_noidle(bundle);
+
+ params->state = GBAUDIO_CODEC_PREPARE;
+ mutex_unlock(&codec->lock);
+ return 0;
+}
+
+static int gbcodec_mute_stream(struct snd_soc_dai *dai, int mute, int stream)
+{
+ int ret;
+ struct gbaudio_data_connection *data;
+ struct gbaudio_module_info *module = NULL, *iter;
+ struct gb_bundle *bundle;
+ struct gbaudio_codec_info *codec = dev_get_drvdata(dai->dev);
+ struct gbaudio_stream_params *params;
+
+ dev_dbg(dai->dev, "Mute:%d, Direction:%s\n", mute,
+ stream ? "CAPTURE" : "PLAYBACK");
+
+ mutex_lock(&codec->lock);
+
+ params = find_dai_stream_params(codec, dai->id, stream);
+ if (!params) {
+ dev_err(codec->dev, "Failed to fetch dai_stream pointer\n");
+ mutex_unlock(&codec->lock);
+ return -EINVAL;
+ }
+
+ if (list_empty(&codec->module_list)) {
+ dev_err(codec->dev, "No codec module available\n");
+ if (mute) {
+ params->state = GBAUDIO_CODEC_STOP;
+ ret = 0;
+ } else {
+ ret = -ENODEV;
+ }
+ mutex_unlock(&codec->lock);
+ return ret;
+ }
+
+ list_for_each_entry(iter, &codec->module_list, list) {
+ /* find the dai */
+ data = find_data(iter, dai->id);
+ if (data) {
+ module = iter;
+ break;
+ }
+ }
+ if (!data) {
+ dev_err(dai->dev, "%s DATA connection missing\n",
+ dai->name);
+ mutex_unlock(&codec->lock);
+ return -ENODEV;
+ }
+
+ bundle = to_gb_bundle(module->dev);
+ ret = gb_pm_runtime_get_sync(bundle);
+ if (ret) {
+ mutex_unlock(&codec->lock);
+ return ret;
+ }
+
+ if (!mute && !stream) {/* start playback */
+ ret = gb_audio_apbridgea_prepare_tx(data->connection, 0);
+ if (!ret)
+ ret = gb_audio_apbridgea_start_tx(data->connection, 0, 0);
+ params->state = GBAUDIO_CODEC_START;
+ } else if (!mute && stream) {/* start capture */
+ ret = gb_audio_apbridgea_prepare_rx(data->connection, 0);
+ if (!ret)
+ ret = gb_audio_apbridgea_start_rx(data->connection, 0);
+ params->state = GBAUDIO_CODEC_START;
+ } else if (mute && !stream) {/* stop playback */
+ ret = gb_audio_apbridgea_stop_tx(data->connection, 0);
+ if (!ret)
+ ret = gb_audio_apbridgea_shutdown_tx(data->connection, 0);
+ params->state = GBAUDIO_CODEC_STOP;
+ } else if (mute && stream) {/* stop capture */
+ ret = gb_audio_apbridgea_stop_rx(data->connection, 0);
+ if (!ret)
+ ret = gb_audio_apbridgea_shutdown_rx(data->connection, 0);
+ params->state = GBAUDIO_CODEC_STOP;
+ } else {
+ ret = -EINVAL;
+ }
+
+ if (ret)
+ dev_err_ratelimited(dai->dev,
+ "%s:Error during %s %s stream:%d\n",
+ module->name, mute ? "Mute" : "Unmute",
+ stream ? "Capture" : "Playback", ret);
+
+ gb_pm_runtime_put_noidle(bundle);
+ mutex_unlock(&codec->lock);
+ return ret;
+}
+
+static const struct snd_soc_dai_ops gbcodec_dai_ops = {
+ .startup = gbcodec_startup,
+ .shutdown = gbcodec_shutdown,
+ .hw_params = gbcodec_hw_params,
+ .prepare = gbcodec_prepare,
+ .mute_stream = gbcodec_mute_stream,
+};
+
+static struct snd_soc_dai_driver gbaudio_dai[] = {
+ {
+ .name = "apb-i2s0",
+ .id = 0,
+ .playback = {
+ .stream_name = "I2S 0 Playback",
+ .rates = SNDRV_PCM_RATE_48000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE,
+ .rate_max = 48000,
+ .rate_min = 48000,
+ .channels_min = 1,
+ .channels_max = 2,
+ .sig_bits = 16,
+ },
+ .capture = {
+ .stream_name = "I2S 0 Capture",
+ .rates = SNDRV_PCM_RATE_48000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE,
+ .rate_max = 48000,
+ .rate_min = 48000,
+ .channels_min = 1,
+ .channels_max = 2,
+ .sig_bits = 16,
+ },
+ .ops = &gbcodec_dai_ops,
+ },
+};
+
+static int gbaudio_init_jack(struct gbaudio_module_info *module,
+ struct snd_soc_card *card)
+{
+ int ret;
+ struct gbaudio_jack *jack, *n;
+ struct snd_soc_jack_pin *headset, *button;
+
+ if (!module->jack_mask)
+ return 0;
+
+ snprintf(module->jack_name, NAME_SIZE, "GB %d Headset Jack",
+ module->dev_id);
+
+ headset = devm_kzalloc(module->dev, sizeof(*headset), GFP_KERNEL);
+ if (!headset)
+ return -ENOMEM;
+
+ headset->pin = module->jack_name;
+ headset->mask = module->jack_mask;
+ ret = snd_soc_card_jack_new_pins(card, module->jack_name,
+ module->jack_mask,
+ &module->headset.jack, headset, 1);
+ if (ret) {
+ dev_err(module->dev, "Failed to create new jack\n");
+ return ret;
+ }
+
+ /* Add to module's jack list */
+ list_add(&module->headset.list, &module->jack_list);
+
+ if (!module->button_mask)
+ return 0;
+
+ snprintf(module->button_name, NAME_SIZE, "GB %d Button Jack",
+ module->dev_id);
+ button = devm_kzalloc(module->dev, sizeof(*button), GFP_KERNEL);
+ if (!button) {
+ ret = -ENOMEM;
+ goto free_jacks;
+ }
+
+ button->pin = module->button_name;
+ button->mask = module->button_mask;
+ ret = snd_soc_card_jack_new_pins(card, module->button_name,
+ module->button_mask,
+ &module->button.jack,
+ button, 1);
+ if (ret) {
+ dev_err(module->dev, "Failed to create button jack\n");
+ goto free_jacks;
+ }
+
+ /* Add to module's jack list */
+ list_add(&module->button.list, &module->jack_list);
+
+ /*
+ * Currently, max 4 buttons are supported with following key mapping
+ * BTN_0 = KEY_MEDIA
+ * BTN_1 = KEY_VOICECOMMAND
+ * BTN_2 = KEY_VOLUMEUP
+ * BTN_3 = KEY_VOLUMEDOWN
+ */
+
+ if (module->button_mask & SND_JACK_BTN_0) {
+ ret = snd_jack_set_key(module->button.jack.jack, SND_JACK_BTN_0,
+ KEY_MEDIA);
+ if (ret) {
+ dev_err(module->dev, "Failed to set BTN_0\n");
+ goto free_jacks;
+ }
+ }
+
+ if (module->button_mask & SND_JACK_BTN_1) {
+ ret = snd_jack_set_key(module->button.jack.jack, SND_JACK_BTN_1,
+ KEY_VOICECOMMAND);
+ if (ret) {
+ dev_err(module->dev, "Failed to set BTN_1\n");
+ goto free_jacks;
+ }
+ }
+
+ if (module->button_mask & SND_JACK_BTN_2) {
+ ret = snd_jack_set_key(module->button.jack.jack, SND_JACK_BTN_2,
+ KEY_VOLUMEUP);
+ if (ret) {
+ dev_err(module->dev, "Failed to set BTN_2\n");
+ goto free_jacks;
+ }
+ }
+
+ if (module->button_mask & SND_JACK_BTN_3) {
+ ret = snd_jack_set_key(module->button.jack.jack, SND_JACK_BTN_3,
+ KEY_VOLUMEDOWN);
+ if (ret) {
+ dev_err(module->dev, "Failed to set BTN_0\n");
+ goto free_jacks;
+ }
+ }
+
+ /* FIXME
+ * verify if this is really required
+ set_bit(INPUT_PROP_NO_DUMMY_RELEASE,
+ module->button.jack.jack->input_dev->propbit);
+ */
+
+ return 0;
+
+free_jacks:
+ list_for_each_entry_safe(jack, n, &module->jack_list, list) {
+ snd_device_free(card->snd_card, jack->jack.jack);
+ list_del(&jack->list);
+ }
+
+ return ret;
+}
+
+int gbaudio_register_module(struct gbaudio_module_info *module)
+{
+ int ret;
+ struct snd_soc_component *comp;
+ struct snd_soc_dapm_context *dapm;
+ struct gbaudio_jack *jack = NULL;
+
+ if (!gbcodec) {
+ dev_err(module->dev, "GB Codec not yet probed\n");
+ return -EAGAIN;
+ }
+
+ comp = gbcodec->component;
+ dapm = snd_soc_component_to_dapm(comp);
+
+ mutex_lock(&gbcodec->register_mutex);
+
+ if (module->num_dais) {
+ dev_err(gbcodec->dev,
+ "%d:DAIs not supported via gbcodec driver\n",
+ module->num_dais);
+ mutex_unlock(&gbcodec->register_mutex);
+ return -EINVAL;
+ }
+
+ ret = gbaudio_init_jack(module, comp->card);
+ if (ret) {
+ mutex_unlock(&gbcodec->register_mutex);
+ return ret;
+ }
+
+ if (module->dapm_widgets)
+ snd_soc_dapm_new_controls(dapm, module->dapm_widgets,
+ module->num_dapm_widgets);
+ if (module->controls)
+ snd_soc_add_component_controls(comp, module->controls,
+ module->num_controls);
+ if (module->dapm_routes)
+ snd_soc_dapm_add_routes(dapm, module->dapm_routes,
+ module->num_dapm_routes);
+
+ /* card already instantiated, create widgets here only */
+ if (comp->card->instantiated) {
+ gbaudio_dapm_link_component_dai_widgets(comp->card, dapm);
+#ifdef CONFIG_SND_JACK
+ /*
+ * register jack devices for this module
+ * from codec->jack_list
+ */
+ list_for_each_entry(jack, &module->jack_list, list) {
+ snd_device_register(comp->card->snd_card,
+ jack->jack.jack);
+ }
+#endif
+ }
+
+ mutex_lock(&gbcodec->lock);
+ list_add(&module->list, &gbcodec->module_list);
+ mutex_unlock(&gbcodec->lock);
+
+ if (comp->card->instantiated)
+ ret = snd_soc_dapm_new_widgets(comp->card);
+ dev_dbg(comp->dev, "Registered %s module\n", module->name);
+
+ mutex_unlock(&gbcodec->register_mutex);
+ return ret;
+}
+EXPORT_SYMBOL(gbaudio_register_module);
+
+static void gbaudio_codec_clean_data_tx(struct gbaudio_data_connection *data)
+{
+ u16 i2s_port, cportid;
+ int ret;
+
+ if (list_is_singular(&gbcodec->module_list)) {
+ ret = gb_audio_apbridgea_stop_tx(data->connection, 0);
+ if (ret)
+ return;
+ ret = gb_audio_apbridgea_shutdown_tx(data->connection, 0);
+ if (ret)
+ return;
+ }
+ i2s_port = 0; /* fixed for now */
+ cportid = data->connection->hd_cport_id;
+ ret = gb_audio_apbridgea_unregister_cport(data->connection,
+ i2s_port, cportid,
+ AUDIO_APBRIDGEA_DIRECTION_TX);
+ data->state[0] = GBAUDIO_CODEC_SHUTDOWN;
+}
+
+static void gbaudio_codec_clean_data_rx(struct gbaudio_data_connection *data)
+{
+ u16 i2s_port, cportid;
+ int ret;
+
+ if (list_is_singular(&gbcodec->module_list)) {
+ ret = gb_audio_apbridgea_stop_rx(data->connection, 0);
+ if (ret)
+ return;
+ ret = gb_audio_apbridgea_shutdown_rx(data->connection, 0);
+ if (ret)
+ return;
+ }
+ i2s_port = 0; /* fixed for now */
+ cportid = data->connection->hd_cport_id;
+ ret = gb_audio_apbridgea_unregister_cport(data->connection,
+ i2s_port, cportid,
+ AUDIO_APBRIDGEA_DIRECTION_RX);
+ data->state[1] = GBAUDIO_CODEC_SHUTDOWN;
+}
+
+static void gbaudio_codec_cleanup(struct gbaudio_module_info *module)
+{
+ struct gbaudio_data_connection *data;
+ int pb_state, cap_state;
+
+ dev_dbg(gbcodec->dev, "%s: removed, cleanup APBridge\n", module->name);
+ list_for_each_entry(data, &module->data_list, list) {
+ pb_state = data->state[0];
+ cap_state = data->state[1];
+
+ if (pb_state > GBAUDIO_CODEC_SHUTDOWN)
+ gbaudio_codec_clean_data_tx(data);
+
+ if (cap_state > GBAUDIO_CODEC_SHUTDOWN)
+ gbaudio_codec_clean_data_rx(data);
+ }
+}
+
+void gbaudio_unregister_module(struct gbaudio_module_info *module)
+{
+ struct snd_soc_component *comp = gbcodec->component;
+ struct gbaudio_jack *jack, *n;
+ int mask;
+
+ dev_dbg(comp->dev, "Unregister %s module\n", module->name);
+
+ mutex_lock(&gbcodec->register_mutex);
+ mutex_lock(&gbcodec->lock);
+ gbaudio_codec_cleanup(module);
+ list_del(&module->list);
+ dev_dbg(comp->dev, "Process Unregister %s module\n", module->name);
+ mutex_unlock(&gbcodec->lock);
+
+#ifdef CONFIG_SND_JACK
+ /* free jack devices for this module jack_list */
+ list_for_each_entry_safe(jack, n, &module->jack_list, list) {
+ if (jack == &module->headset)
+ mask = GBCODEC_JACK_MASK;
+ else if (jack == &module->button)
+ mask = GBCODEC_JACK_BUTTON_MASK;
+ else
+ mask = 0;
+ if (mask) {
+ dev_dbg(module->dev, "Report %s removal\n",
+ jack->jack.jack->id);
+ snd_soc_jack_report(&jack->jack, 0, mask);
+ snd_device_free(comp->card->snd_card,
+ jack->jack.jack);
+ list_del(&jack->list);
+ }
+ }
+#endif
+
+ if (module->dapm_routes) {
+ struct snd_soc_dapm_context *dapm = snd_soc_component_to_dapm(comp);
+
+ dev_dbg(comp->dev, "Removing %d routes\n",
+ module->num_dapm_routes);
+ snd_soc_dapm_del_routes(dapm, module->dapm_routes,
+ module->num_dapm_routes);
+ }
+ if (module->controls) {
+ dev_dbg(comp->dev, "Removing %d controls\n",
+ module->num_controls);
+ /* release control semaphore */
+ gbaudio_remove_component_controls(comp, module->controls,
+ module->num_controls);
+ }
+ if (module->dapm_widgets) {
+ struct snd_soc_dapm_context *dapm = snd_soc_component_to_dapm(comp);
+
+ dev_dbg(comp->dev, "Removing %d widgets\n",
+ module->num_dapm_widgets);
+ gbaudio_dapm_free_controls(dapm, module->dapm_widgets,
+ module->num_dapm_widgets);
+ }
+
+ dev_dbg(comp->dev, "Unregistered %s module\n", module->name);
+
+ mutex_unlock(&gbcodec->register_mutex);
+}
+EXPORT_SYMBOL(gbaudio_unregister_module);
+
+/*
+ * component driver ops
+ */
+static int gbcodec_probe(struct snd_soc_component *comp)
+{
+ int i;
+ struct gbaudio_codec_info *info;
+ struct gbaudio_codec_dai *dai;
+
+ info = devm_kzalloc(comp->dev, sizeof(*info), GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ info->dev = comp->dev;
+ INIT_LIST_HEAD(&info->module_list);
+ mutex_init(&info->lock);
+ mutex_init(&info->register_mutex);
+ INIT_LIST_HEAD(&info->dai_list);
+
+ /* init dai_list used to maintain runtime stream info */
+ for (i = 0; i < ARRAY_SIZE(gbaudio_dai); i++) {
+ dai = devm_kzalloc(comp->dev, sizeof(*dai), GFP_KERNEL);
+ if (!dai)
+ return -ENOMEM;
+ dai->id = gbaudio_dai[i].id;
+ list_add(&dai->list, &info->dai_list);
+ }
+
+ info->component = comp;
+ snd_soc_component_set_drvdata(comp, info);
+ gbcodec = info;
+
+ device_init_wakeup(comp->dev, 1);
+ return 0;
+}
+
+static int gbcodec_write(struct snd_soc_component *comp, unsigned int reg,
+ unsigned int value)
+{
+ return 0;
+}
+
+static unsigned int gbcodec_read(struct snd_soc_component *comp,
+ unsigned int reg)
+{
+ return 0;
+}
+
+static const struct snd_soc_component_driver soc_codec_dev_gbaudio = {
+ .probe = gbcodec_probe,
+ .read = gbcodec_read,
+ .write = gbcodec_write,
+};
+
+#ifdef CONFIG_PM
+static int gbaudio_codec_suspend(struct device *dev)
+{
+ dev_dbg(dev, "%s: suspend\n", __func__);
+ return 0;
+}
+
+static int gbaudio_codec_resume(struct device *dev)
+{
+ dev_dbg(dev, "%s: resume\n", __func__);
+ return 0;
+}
+
+static const struct dev_pm_ops gbaudio_codec_pm_ops = {
+ .suspend = gbaudio_codec_suspend,
+ .resume = gbaudio_codec_resume,
+};
+#endif
+
+static int gbaudio_codec_probe(struct platform_device *pdev)
+{
+ return devm_snd_soc_register_component(&pdev->dev,
+ &soc_codec_dev_gbaudio,
+ gbaudio_dai, ARRAY_SIZE(gbaudio_dai));
+}
+
+static const struct of_device_id greybus_asoc_machine_of_match[] = {
+ { .compatible = "toshiba,apb-dummy-codec", },
+ {},
+};
+
+static struct platform_driver gbaudio_codec_driver = {
+ .driver = {
+ .name = "apb-dummy-codec",
+#ifdef CONFIG_PM
+ .pm = &gbaudio_codec_pm_ops,
+#endif
+ .of_match_table = greybus_asoc_machine_of_match,
+ },
+ .probe = gbaudio_codec_probe,
+};
+module_platform_driver(gbaudio_codec_driver);
+
+MODULE_DESCRIPTION("APBridge ALSA SoC dummy codec driver");
+MODULE_AUTHOR("Vaibhav Agarwal <vaibhav.agarwal@linaro.org>");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:apb-dummy-codec");
diff --git a/drivers/staging/greybus/audio_codec.h b/drivers/staging/greybus/audio_codec.h
new file mode 100644
index 000000000000..f3f7a7ec6be4
--- /dev/null
+++ b/drivers/staging/greybus/audio_codec.h
@@ -0,0 +1,243 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Greybus audio driver
+ * Copyright 2015 Google Inc.
+ * Copyright 2015 Linaro Ltd.
+ */
+
+#ifndef __LINUX_GBAUDIO_CODEC_H
+#define __LINUX_GBAUDIO_CODEC_H
+
+#include <linux/greybus.h>
+#include <sound/soc.h>
+#include <sound/jack.h>
+
+#define NAME_SIZE 32
+#define MAX_DAIS 2 /* APB1, APB2 */
+
+enum {
+ APB1_PCM = 0,
+ APB2_PCM,
+ NUM_CODEC_DAIS,
+};
+
+/*
+ * device_type should be same as defined in audio.h
+ * (Android media layer)
+ */
+enum {
+ GBAUDIO_DEVICE_NONE = 0x0,
+ /* reserved bits */
+ GBAUDIO_DEVICE_BIT_IN = 0x80000000,
+ GBAUDIO_DEVICE_BIT_DEFAULT = 0x40000000,
+ /* output devices */
+ GBAUDIO_DEVICE_OUT_SPEAKER = 0x2,
+ GBAUDIO_DEVICE_OUT_WIRED_HEADSET = 0x4,
+ GBAUDIO_DEVICE_OUT_WIRED_HEADPHONE = 0x8,
+ /* input devices */
+ GBAUDIO_DEVICE_IN_BUILTIN_MIC = GBAUDIO_DEVICE_BIT_IN | 0x4,
+ GBAUDIO_DEVICE_IN_WIRED_HEADSET = GBAUDIO_DEVICE_BIT_IN | 0x10,
+};
+
+#define GBCODEC_JACK_MASK 0x0000FFFF
+#define GBCODEC_JACK_BUTTON_MASK 0xFFFF0000
+
+enum gbaudio_codec_state {
+ GBAUDIO_CODEC_SHUTDOWN = 0,
+ GBAUDIO_CODEC_STARTUP,
+ GBAUDIO_CODEC_HWPARAMS,
+ GBAUDIO_CODEC_PREPARE,
+ GBAUDIO_CODEC_START,
+ GBAUDIO_CODEC_STOP,
+};
+
+struct gbaudio_stream_params {
+ int state;
+ u8 sig_bits, channels;
+ u32 format, rate;
+};
+
+struct gbaudio_codec_dai {
+ int id;
+ /* runtime params for playback/capture streams */
+ struct gbaudio_stream_params params[2];
+ struct list_head list;
+};
+
+struct gbaudio_codec_info {
+ struct device *dev;
+ struct snd_soc_component *component;
+ struct list_head module_list;
+ /* to maintain runtime stream params for each DAI */
+ struct list_head dai_list;
+ struct mutex lock;
+ struct mutex register_mutex;
+};
+
+struct gbaudio_widget {
+ __u8 id;
+ const char *name;
+ struct list_head list;
+};
+
+struct gbaudio_control {
+ __u8 id;
+ char *name;
+ char *wname;
+ const char * const *texts;
+ int items;
+ struct list_head list;
+};
+
+struct gbaudio_data_connection {
+ int id;
+ __le16 data_cport;
+ struct gb_connection *connection;
+ struct list_head list;
+ /* maintain runtime state for playback/capture stream */
+ int state[2];
+};
+
+/* stream direction */
+#define GB_PLAYBACK BIT(0)
+#define GB_CAPTURE BIT(1)
+
+enum gbaudio_module_state {
+ GBAUDIO_MODULE_OFF = 0,
+ GBAUDIO_MODULE_ON,
+};
+
+struct gbaudio_jack {
+ struct snd_soc_jack jack;
+ struct list_head list;
+};
+
+struct gbaudio_module_info {
+ /* module info */
+ struct device *dev;
+ int dev_id; /* check if it should be bundle_id/hd_cport_id */
+ int vid;
+ int pid;
+ int type;
+ int set_uevent;
+ char vstr[NAME_SIZE];
+ char pstr[NAME_SIZE];
+ struct list_head list;
+ /* need to share this info to above user space */
+ int manager_id;
+ char name[NAME_SIZE];
+ unsigned int ip_devices;
+ unsigned int op_devices;
+
+ /* jack related */
+ char jack_name[NAME_SIZE];
+ char button_name[NAME_SIZE];
+ int jack_type;
+ int jack_mask;
+ int button_mask;
+ int button_status;
+ struct gbaudio_jack headset;
+ struct gbaudio_jack button;
+
+ /* connection info */
+ struct gb_connection *mgmt_connection;
+ size_t num_data_connections;
+ struct list_head data_list;
+
+ /* topology related */
+ int num_dais;
+ int num_controls;
+ int num_dapm_widgets;
+ int num_dapm_routes;
+ unsigned long dai_offset;
+ unsigned long widget_offset;
+ unsigned long control_offset;
+ unsigned long route_offset;
+ struct snd_kcontrol_new *controls;
+ struct snd_soc_dapm_widget *dapm_widgets;
+ struct snd_soc_dapm_route *dapm_routes;
+ struct snd_soc_dai_driver *dais;
+
+ struct list_head widget_list;
+ struct list_head ctl_list;
+ struct list_head widget_ctl_list;
+ struct list_head jack_list;
+
+ struct gb_audio_topology *topology;
+};
+
+int gbaudio_tplg_parse_data(struct gbaudio_module_info *module,
+ struct gb_audio_topology *tplg_data);
+void gbaudio_tplg_release(struct gbaudio_module_info *module);
+
+int gbaudio_module_update(struct gbaudio_codec_info *codec,
+ struct snd_soc_dapm_widget *w,
+ struct gbaudio_module_info *module,
+ int enable);
+int gbaudio_register_module(struct gbaudio_module_info *module);
+void gbaudio_unregister_module(struct gbaudio_module_info *module);
+
+/* protocol related */
+int gb_audio_gb_get_topology(struct gb_connection *connection,
+ struct gb_audio_topology **topology);
+int gb_audio_gb_get_control(struct gb_connection *connection,
+ u8 control_id, u8 index,
+ struct gb_audio_ctl_elem_value *value);
+int gb_audio_gb_set_control(struct gb_connection *connection,
+ u8 control_id, u8 index,
+ struct gb_audio_ctl_elem_value *value);
+int gb_audio_gb_enable_widget(struct gb_connection *connection,
+ u8 widget_id);
+int gb_audio_gb_disable_widget(struct gb_connection *connection,
+ u8 widget_id);
+int gb_audio_gb_get_pcm(struct gb_connection *connection,
+ u16 data_cport, u32 *format,
+ u32 *rate, u8 *channels,
+ u8 *sig_bits);
+int gb_audio_gb_set_pcm(struct gb_connection *connection,
+ u16 data_cport, u32 format,
+ u32 rate, u8 channels,
+ u8 sig_bits);
+int gb_audio_gb_set_tx_data_size(struct gb_connection *connection,
+ u16 data_cport, u16 size);
+int gb_audio_gb_activate_tx(struct gb_connection *connection,
+ u16 data_cport);
+int gb_audio_gb_deactivate_tx(struct gb_connection *connection,
+ u16 data_cport);
+int gb_audio_gb_set_rx_data_size(struct gb_connection *connection,
+ u16 data_cport, u16 size);
+int gb_audio_gb_activate_rx(struct gb_connection *connection,
+ u16 data_cport);
+int gb_audio_gb_deactivate_rx(struct gb_connection *connection,
+ u16 data_cport);
+int gb_audio_apbridgea_set_config(struct gb_connection *connection,
+ __u16 i2s_port, __u32 format,
+ __u32 rate, __u32 mclk_freq);
+int gb_audio_apbridgea_register_cport(struct gb_connection *connection,
+ __u16 i2s_port, __u16 cportid,
+ __u8 direction);
+int gb_audio_apbridgea_unregister_cport(struct gb_connection *connection,
+ __u16 i2s_port, __u16 cportid,
+ __u8 direction);
+int gb_audio_apbridgea_set_tx_data_size(struct gb_connection *connection,
+ __u16 i2s_port, __u16 size);
+int gb_audio_apbridgea_prepare_tx(struct gb_connection *connection,
+ __u16 i2s_port);
+int gb_audio_apbridgea_start_tx(struct gb_connection *connection,
+ __u16 i2s_port, __u64 timestamp);
+int gb_audio_apbridgea_stop_tx(struct gb_connection *connection,
+ __u16 i2s_port);
+int gb_audio_apbridgea_shutdown_tx(struct gb_connection *connection,
+ __u16 i2s_port);
+int gb_audio_apbridgea_set_rx_data_size(struct gb_connection *connection,
+ __u16 i2s_port, __u16 size);
+int gb_audio_apbridgea_prepare_rx(struct gb_connection *connection,
+ __u16 i2s_port);
+int gb_audio_apbridgea_start_rx(struct gb_connection *connection,
+ __u16 i2s_port);
+int gb_audio_apbridgea_stop_rx(struct gb_connection *connection,
+ __u16 i2s_port);
+int gb_audio_apbridgea_shutdown_rx(struct gb_connection *connection,
+ __u16 i2s_port);
+
+#endif /* __LINUX_GBAUDIO_CODEC_H */
diff --git a/drivers/staging/greybus/audio_gb.c b/drivers/staging/greybus/audio_gb.c
new file mode 100644
index 000000000000..9d8994fdb41a
--- /dev/null
+++ b/drivers/staging/greybus/audio_gb.c
@@ -0,0 +1,225 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Greybus Audio Device Class Protocol helpers
+ *
+ * Copyright 2015-2016 Google Inc.
+ */
+
+#include <linux/greybus.h>
+#include "audio_codec.h"
+
+/* TODO: Split into separate calls */
+int gb_audio_gb_get_topology(struct gb_connection *connection,
+ struct gb_audio_topology **topology)
+{
+ struct gb_audio_get_topology_size_response size_resp;
+ struct gb_audio_topology *topo;
+ u16 size;
+ int ret;
+
+ ret = gb_operation_sync(connection, GB_AUDIO_TYPE_GET_TOPOLOGY_SIZE,
+ NULL, 0, &size_resp, sizeof(size_resp));
+ if (ret)
+ return ret;
+
+ size = le16_to_cpu(size_resp.size);
+ if (size < sizeof(*topo))
+ return -ENODATA;
+
+ topo = kzalloc(size, GFP_KERNEL);
+ if (!topo)
+ return -ENOMEM;
+
+ ret = gb_operation_sync(connection, GB_AUDIO_TYPE_GET_TOPOLOGY, NULL, 0,
+ topo, size);
+ if (ret) {
+ kfree(topo);
+ return ret;
+ }
+
+ *topology = topo;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(gb_audio_gb_get_topology);
+
+int gb_audio_gb_get_control(struct gb_connection *connection,
+ u8 control_id, u8 index,
+ struct gb_audio_ctl_elem_value *value)
+{
+ struct gb_audio_get_control_request req;
+ struct gb_audio_get_control_response resp;
+ int ret;
+
+ req.control_id = control_id;
+ req.index = index;
+
+ ret = gb_operation_sync(connection, GB_AUDIO_TYPE_GET_CONTROL,
+ &req, sizeof(req), &resp, sizeof(resp));
+ if (ret)
+ return ret;
+
+ memcpy(value, &resp.value, sizeof(*value));
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(gb_audio_gb_get_control);
+
+int gb_audio_gb_set_control(struct gb_connection *connection,
+ u8 control_id, u8 index,
+ struct gb_audio_ctl_elem_value *value)
+{
+ struct gb_audio_set_control_request req;
+
+ req.control_id = control_id;
+ req.index = index;
+ memcpy(&req.value, value, sizeof(req.value));
+
+ return gb_operation_sync(connection, GB_AUDIO_TYPE_SET_CONTROL,
+ &req, sizeof(req), NULL, 0);
+}
+EXPORT_SYMBOL_GPL(gb_audio_gb_set_control);
+
+int gb_audio_gb_enable_widget(struct gb_connection *connection,
+ u8 widget_id)
+{
+ struct gb_audio_enable_widget_request req;
+
+ req.widget_id = widget_id;
+
+ return gb_operation_sync(connection, GB_AUDIO_TYPE_ENABLE_WIDGET,
+ &req, sizeof(req), NULL, 0);
+}
+EXPORT_SYMBOL_GPL(gb_audio_gb_enable_widget);
+
+int gb_audio_gb_disable_widget(struct gb_connection *connection,
+ u8 widget_id)
+{
+ struct gb_audio_disable_widget_request req;
+
+ req.widget_id = widget_id;
+
+ return gb_operation_sync(connection, GB_AUDIO_TYPE_DISABLE_WIDGET,
+ &req, sizeof(req), NULL, 0);
+}
+EXPORT_SYMBOL_GPL(gb_audio_gb_disable_widget);
+
+int gb_audio_gb_get_pcm(struct gb_connection *connection, u16 data_cport,
+ u32 *format, u32 *rate, u8 *channels,
+ u8 *sig_bits)
+{
+ struct gb_audio_get_pcm_request req;
+ struct gb_audio_get_pcm_response resp;
+ int ret;
+
+ req.data_cport = cpu_to_le16(data_cport);
+
+ ret = gb_operation_sync(connection, GB_AUDIO_TYPE_GET_PCM,
+ &req, sizeof(req), &resp, sizeof(resp));
+ if (ret)
+ return ret;
+
+ *format = le32_to_cpu(resp.format);
+ *rate = le32_to_cpu(resp.rate);
+ *channels = resp.channels;
+ *sig_bits = resp.sig_bits;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(gb_audio_gb_get_pcm);
+
+int gb_audio_gb_set_pcm(struct gb_connection *connection, u16 data_cport,
+ u32 format, u32 rate, u8 channels,
+ u8 sig_bits)
+{
+ struct gb_audio_set_pcm_request req;
+
+ req.data_cport = cpu_to_le16(data_cport);
+ req.format = cpu_to_le32(format);
+ req.rate = cpu_to_le32(rate);
+ req.channels = channels;
+ req.sig_bits = sig_bits;
+
+ return gb_operation_sync(connection, GB_AUDIO_TYPE_SET_PCM,
+ &req, sizeof(req), NULL, 0);
+}
+EXPORT_SYMBOL_GPL(gb_audio_gb_set_pcm);
+
+int gb_audio_gb_set_tx_data_size(struct gb_connection *connection,
+ u16 data_cport, u16 size)
+{
+ struct gb_audio_set_tx_data_size_request req;
+
+ req.data_cport = cpu_to_le16(data_cport);
+ req.size = cpu_to_le16(size);
+
+ return gb_operation_sync(connection, GB_AUDIO_TYPE_SET_TX_DATA_SIZE,
+ &req, sizeof(req), NULL, 0);
+}
+EXPORT_SYMBOL_GPL(gb_audio_gb_set_tx_data_size);
+
+int gb_audio_gb_activate_tx(struct gb_connection *connection,
+ u16 data_cport)
+{
+ struct gb_audio_activate_tx_request req;
+
+ req.data_cport = cpu_to_le16(data_cport);
+
+ return gb_operation_sync(connection, GB_AUDIO_TYPE_ACTIVATE_TX,
+ &req, sizeof(req), NULL, 0);
+}
+EXPORT_SYMBOL_GPL(gb_audio_gb_activate_tx);
+
+int gb_audio_gb_deactivate_tx(struct gb_connection *connection,
+ u16 data_cport)
+{
+ struct gb_audio_deactivate_tx_request req;
+
+ req.data_cport = cpu_to_le16(data_cport);
+
+ return gb_operation_sync(connection, GB_AUDIO_TYPE_DEACTIVATE_TX,
+ &req, sizeof(req), NULL, 0);
+}
+EXPORT_SYMBOL_GPL(gb_audio_gb_deactivate_tx);
+
+int gb_audio_gb_set_rx_data_size(struct gb_connection *connection,
+ u16 data_cport, u16 size)
+{
+ struct gb_audio_set_rx_data_size_request req;
+
+ req.data_cport = cpu_to_le16(data_cport);
+ req.size = cpu_to_le16(size);
+
+ return gb_operation_sync(connection, GB_AUDIO_TYPE_SET_RX_DATA_SIZE,
+ &req, sizeof(req), NULL, 0);
+}
+EXPORT_SYMBOL_GPL(gb_audio_gb_set_rx_data_size);
+
+int gb_audio_gb_activate_rx(struct gb_connection *connection,
+ u16 data_cport)
+{
+ struct gb_audio_activate_rx_request req;
+
+ req.data_cport = cpu_to_le16(data_cport);
+
+ return gb_operation_sync(connection, GB_AUDIO_TYPE_ACTIVATE_RX,
+ &req, sizeof(req), NULL, 0);
+}
+EXPORT_SYMBOL_GPL(gb_audio_gb_activate_rx);
+
+int gb_audio_gb_deactivate_rx(struct gb_connection *connection,
+ u16 data_cport)
+{
+ struct gb_audio_deactivate_rx_request req;
+
+ req.data_cport = cpu_to_le16(data_cport);
+
+ return gb_operation_sync(connection, GB_AUDIO_TYPE_DEACTIVATE_RX,
+ &req, sizeof(req), NULL, 0);
+}
+EXPORT_SYMBOL_GPL(gb_audio_gb_deactivate_rx);
+
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("greybus:audio-gb");
+MODULE_DESCRIPTION("Greybus Audio Device Class Protocol library");
+MODULE_AUTHOR("Mark Greer <mgreer@animalcreek.com>");
diff --git a/drivers/staging/greybus/audio_helper.c b/drivers/staging/greybus/audio_helper.c
new file mode 100644
index 000000000000..b4873c6d6bed
--- /dev/null
+++ b/drivers/staging/greybus/audio_helper.c
@@ -0,0 +1,180 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Greybus Audio Sound SoC helper APIs
+ */
+
+#include <sound/core.h>
+#include <sound/soc.h>
+#include <sound/soc-dapm.h>
+#include "audio_helper.h"
+
+#define gbaudio_dapm_for_each_direction(dir) \
+ for ((dir) = SND_SOC_DAPM_DIR_IN; (dir) <= SND_SOC_DAPM_DIR_OUT; \
+ (dir)++)
+
+static void gbaudio_dapm_link_dai_widget(struct snd_soc_dapm_widget *dai_w,
+ struct snd_soc_card *card)
+{
+ struct snd_soc_dapm_widget *w;
+ struct snd_soc_dapm_widget *src, *sink;
+ struct snd_soc_dai *dai = dai_w->priv;
+
+ /* ...find all widgets with the same stream and link them */
+ list_for_each_entry(w, &card->widgets, list) {
+ if (w->dapm != dai_w->dapm)
+ continue;
+
+ switch (w->id) {
+ case snd_soc_dapm_dai_in:
+ case snd_soc_dapm_dai_out:
+ continue;
+ default:
+ break;
+ }
+
+ if (!w->sname || !strstr(w->sname, dai_w->sname))
+ continue;
+
+ /*
+ * check if widget is already linked,
+ * if (w->linked)
+ * return;
+ */
+
+ if (dai_w->id == snd_soc_dapm_dai_in) {
+ src = dai_w;
+ sink = w;
+ } else {
+ src = w;
+ sink = dai_w;
+ }
+ dev_dbg(dai->dev, "%s -> %s\n", src->name, sink->name);
+ /* Add the DAPM path and set widget's linked status
+ * snd_soc_dapm_add_path(w->dapm, src, sink, NULL, NULL);
+ * w->linked = 1;
+ */
+ }
+}
+
+int gbaudio_dapm_link_component_dai_widgets(struct snd_soc_card *card,
+ struct snd_soc_dapm_context *dapm)
+{
+ struct snd_soc_dapm_widget *dai_w;
+
+ /* For each DAI widget... */
+ list_for_each_entry(dai_w, &card->widgets, list) {
+ if (dai_w->dapm != dapm)
+ continue;
+ switch (dai_w->id) {
+ case snd_soc_dapm_dai_in:
+ case snd_soc_dapm_dai_out:
+ break;
+ default:
+ continue;
+ }
+ gbaudio_dapm_link_dai_widget(dai_w, card);
+ }
+
+ return 0;
+}
+
+static void gbaudio_dapm_free_path(struct snd_soc_dapm_path *path)
+{
+ list_del(&path->list_node[SND_SOC_DAPM_DIR_IN]);
+ list_del(&path->list_node[SND_SOC_DAPM_DIR_OUT]);
+ list_del(&path->list_kcontrol);
+ list_del(&path->list);
+ kfree(path);
+}
+
+static void gbaudio_dapm_free_widget(struct snd_soc_dapm_widget *w)
+{
+ struct snd_soc_dapm_path *p, *next_p;
+ enum snd_soc_dapm_direction dir;
+
+ list_del(&w->list);
+ /*
+ * remove source and sink paths associated to this widget.
+ * While removing the path, remove reference to it from both
+ * source and sink widgets so that path is removed only once.
+ */
+ gbaudio_dapm_for_each_direction(dir) {
+ snd_soc_dapm_widget_for_each_path_safe(w, dir, p, next_p)
+ gbaudio_dapm_free_path(p);
+ }
+
+ kfree(w->kcontrols);
+ kfree_const(w->name);
+ kfree_const(w->sname);
+ kfree(w);
+}
+
+int gbaudio_dapm_free_controls(struct snd_soc_dapm_context *dapm,
+ const struct snd_soc_dapm_widget *widget,
+ int num)
+{
+ int i;
+ struct snd_soc_dapm_widget *w, *tmp_w;
+ struct snd_soc_card *card = snd_soc_dapm_to_card(dapm);
+
+ mutex_lock(&card->dapm_mutex);
+ for (i = 0; i < num; i++) {
+ /* below logic can be optimized to identify widget pointer */
+ w = NULL;
+ list_for_each_entry(tmp_w, &card->widgets, list) {
+ if (tmp_w->dapm == dapm &&
+ !strcmp(tmp_w->name, widget->name)) {
+ w = tmp_w;
+ break;
+ }
+ }
+ if (!w) {
+ dev_err(card->dev, "%s: widget not found\n",
+ widget->name);
+ widget++;
+ continue;
+ }
+ widget++;
+ gbaudio_dapm_free_widget(w);
+ }
+ mutex_unlock(&card->dapm_mutex);
+ return 0;
+}
+
+static int gbaudio_remove_controls(struct snd_card *card, struct device *dev,
+ const struct snd_kcontrol_new *controls,
+ int num_controls, const char *prefix)
+{
+ int i, err;
+
+ for (i = 0; i < num_controls; i++) {
+ const struct snd_kcontrol_new *control = &controls[i];
+ struct snd_ctl_elem_id id;
+
+ if (prefix)
+ snprintf(id.name, sizeof(id.name), "%s %s", prefix,
+ control->name);
+ else
+ strscpy(id.name, control->name, sizeof(id.name));
+ id.numid = 0;
+ id.iface = control->iface;
+ id.device = control->device;
+ id.subdevice = control->subdevice;
+ id.index = control->index;
+ err = snd_ctl_remove_id(card, &id);
+ if (err < 0)
+ dev_err(dev, "%d: Failed to remove %s\n", err,
+ control->name);
+ }
+ return 0;
+}
+
+int gbaudio_remove_component_controls(struct snd_soc_component *component,
+ const struct snd_kcontrol_new *controls,
+ unsigned int num_controls)
+{
+ struct snd_card *card = component->card->snd_card;
+
+ return gbaudio_remove_controls(card, component->dev, controls,
+ num_controls, component->name_prefix);
+}
diff --git a/drivers/staging/greybus/audio_helper.h b/drivers/staging/greybus/audio_helper.h
new file mode 100644
index 000000000000..5cf1c6d7d3ea
--- /dev/null
+++ b/drivers/staging/greybus/audio_helper.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Greybus Audio Sound SoC helper APIs
+ */
+
+#ifndef __LINUX_GBAUDIO_HELPER_H
+#define __LINUX_GBAUDIO_HELPER_H
+
+int gbaudio_dapm_link_component_dai_widgets(struct snd_soc_card *card,
+ struct snd_soc_dapm_context *dapm);
+int gbaudio_dapm_free_controls(struct snd_soc_dapm_context *dapm,
+ const struct snd_soc_dapm_widget *widget,
+ int num);
+int gbaudio_remove_component_controls(struct snd_soc_component *component,
+ const struct snd_kcontrol_new *controls,
+ unsigned int num_controls);
+#endif
diff --git a/drivers/staging/greybus/audio_manager.c b/drivers/staging/greybus/audio_manager.c
new file mode 100644
index 000000000000..27ca5f796c5f
--- /dev/null
+++ b/drivers/staging/greybus/audio_manager.c
@@ -0,0 +1,187 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Greybus operations
+ *
+ * Copyright 2015-2016 Google Inc.
+ */
+
+#include <linux/string.h>
+#include <linux/sysfs.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/spinlock.h>
+#include <linux/idr.h>
+
+#include "audio_manager.h"
+#include "audio_manager_private.h"
+
+static struct kset *manager_kset;
+
+static LIST_HEAD(modules_list);
+static DECLARE_RWSEM(modules_rwsem);
+static DEFINE_IDA(module_id);
+
+/* helpers */
+static struct gb_audio_manager_module *gb_audio_manager_get_locked(int id)
+{
+ struct gb_audio_manager_module *module;
+
+ if (id < 0)
+ return NULL;
+
+ list_for_each_entry(module, &modules_list, list) {
+ if (module->id == id)
+ return module;
+ }
+
+ return NULL;
+}
+
+/* public API */
+int gb_audio_manager_add(struct gb_audio_manager_module_descriptor *desc)
+{
+ struct gb_audio_manager_module *module;
+ int id;
+ int err;
+
+ id = ida_alloc(&module_id, GFP_KERNEL);
+ if (id < 0)
+ return id;
+
+ err = gb_audio_manager_module_create(&module, manager_kset,
+ id, desc);
+ if (err) {
+ ida_free(&module_id, id);
+ return err;
+ }
+
+ /* Add it to the list */
+ down_write(&modules_rwsem);
+ list_add_tail(&module->list, &modules_list);
+ up_write(&modules_rwsem);
+
+ return module->id;
+}
+EXPORT_SYMBOL_GPL(gb_audio_manager_add);
+
+int gb_audio_manager_remove(int id)
+{
+ struct gb_audio_manager_module *module;
+
+ down_write(&modules_rwsem);
+
+ module = gb_audio_manager_get_locked(id);
+ if (!module) {
+ up_write(&modules_rwsem);
+ return -EINVAL;
+ }
+ list_del(&module->list);
+ kobject_put(&module->kobj);
+ up_write(&modules_rwsem);
+ ida_free(&module_id, id);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(gb_audio_manager_remove);
+
+void gb_audio_manager_remove_all(void)
+{
+ struct gb_audio_manager_module *module, *next;
+ int is_empty;
+
+ down_write(&modules_rwsem);
+
+ list_for_each_entry_safe(module, next, &modules_list, list) {
+ list_del(&module->list);
+ ida_free(&module_id, module->id);
+ kobject_put(&module->kobj);
+ }
+
+ is_empty = list_empty(&modules_list);
+
+ up_write(&modules_rwsem);
+
+ if (!is_empty)
+ pr_warn("Not all nodes were deleted\n");
+}
+EXPORT_SYMBOL_GPL(gb_audio_manager_remove_all);
+
+struct gb_audio_manager_module *gb_audio_manager_get_module(int id)
+{
+ struct gb_audio_manager_module *module;
+
+ down_read(&modules_rwsem);
+ module = gb_audio_manager_get_locked(id);
+ kobject_get(&module->kobj);
+ up_read(&modules_rwsem);
+ return module;
+}
+EXPORT_SYMBOL_GPL(gb_audio_manager_get_module);
+
+void gb_audio_manager_put_module(struct gb_audio_manager_module *module)
+{
+ kobject_put(&module->kobj);
+}
+EXPORT_SYMBOL_GPL(gb_audio_manager_put_module);
+
+int gb_audio_manager_dump_module(int id)
+{
+ struct gb_audio_manager_module *module;
+
+ down_read(&modules_rwsem);
+ module = gb_audio_manager_get_locked(id);
+ up_read(&modules_rwsem);
+
+ if (!module)
+ return -EINVAL;
+
+ gb_audio_manager_module_dump(module);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(gb_audio_manager_dump_module);
+
+void gb_audio_manager_dump_all(void)
+{
+ struct gb_audio_manager_module *module;
+ int count = 0;
+
+ down_read(&modules_rwsem);
+ list_for_each_entry(module, &modules_list, list) {
+ gb_audio_manager_module_dump(module);
+ count++;
+ }
+ up_read(&modules_rwsem);
+
+ pr_info("Number of connected modules: %d\n", count);
+}
+EXPORT_SYMBOL_GPL(gb_audio_manager_dump_all);
+
+/*
+ * module init/deinit
+ */
+static int __init manager_init(void)
+{
+ manager_kset = kset_create_and_add(GB_AUDIO_MANAGER_NAME, NULL,
+ kernel_kobj);
+ if (!manager_kset)
+ return -ENOMEM;
+
+#ifdef GB_AUDIO_MANAGER_SYSFS
+ gb_audio_manager_sysfs_init(&manager_kset->kobj);
+#endif
+
+ return 0;
+}
+
+static void __exit manager_exit(void)
+{
+ gb_audio_manager_remove_all();
+ kset_unregister(manager_kset);
+ ida_destroy(&module_id);
+}
+
+module_init(manager_init);
+module_exit(manager_exit);
+
+MODULE_DESCRIPTION("Greybus audio operations manager");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Svetlin Ankov <ankov_svetlin@projectara.com>");
diff --git a/drivers/staging/greybus/audio_manager.h b/drivers/staging/greybus/audio_manager.h
new file mode 100644
index 000000000000..be605485a8ce
--- /dev/null
+++ b/drivers/staging/greybus/audio_manager.h
@@ -0,0 +1,81 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Greybus operations
+ *
+ * Copyright 2015-2016 Google Inc.
+ */
+
+#ifndef _GB_AUDIO_MANAGER_H_
+#define _GB_AUDIO_MANAGER_H_
+
+#include <linux/kobject.h>
+#include <linux/list.h>
+
+#define GB_AUDIO_MANAGER_NAME "gb_audio_manager"
+#define GB_AUDIO_MANAGER_MODULE_NAME_LEN 64
+#define GB_AUDIO_MANAGER_MODULE_NAME_LEN_SSCANF "63"
+
+struct gb_audio_manager_module_descriptor {
+ char name[GB_AUDIO_MANAGER_MODULE_NAME_LEN];
+ int vid;
+ int pid;
+ int intf_id;
+ unsigned int ip_devices;
+ unsigned int op_devices;
+};
+
+struct gb_audio_manager_module {
+ struct kobject kobj;
+ struct list_head list;
+ int id;
+ struct gb_audio_manager_module_descriptor desc;
+};
+
+/*
+ * Creates a new gb_audio_manager_module_descriptor, using the specified
+ * descriptor.
+ *
+ * Returns a negative result on error, or the id of the newly created module.
+ *
+ */
+int gb_audio_manager_add(struct gb_audio_manager_module_descriptor *desc);
+
+/*
+ * Removes a connected gb_audio_manager_module_descriptor for the specified ID.
+ *
+ * Returns zero on success, or a negative value on error.
+ */
+int gb_audio_manager_remove(int id);
+
+/*
+ * Removes all connected gb_audio_modules
+ *
+ * Returns zero on success, or a negative value on error.
+ */
+void gb_audio_manager_remove_all(void);
+
+/*
+ * Retrieves a gb_audio_manager_module_descriptor for the specified id.
+ * Returns the gb_audio_manager_module_descriptor structure,
+ * or NULL if there is no module with the specified ID.
+ */
+struct gb_audio_manager_module *gb_audio_manager_get_module(int id);
+
+/*
+ * Decreases the refcount of the module, obtained by the get function.
+ * Modules are removed via gb_audio_manager_remove
+ */
+void gb_audio_manager_put_module(struct gb_audio_manager_module *module);
+
+/*
+ * Dumps the module for the specified id
+ * Return 0 on success
+ */
+int gb_audio_manager_dump_module(int id);
+
+/*
+ * Dumps all connected modules
+ */
+void gb_audio_manager_dump_all(void);
+
+#endif /* _GB_AUDIO_MANAGER_H_ */
diff --git a/drivers/staging/greybus/audio_manager_module.c b/drivers/staging/greybus/audio_manager_module.c
new file mode 100644
index 000000000000..4a4dfb42f50f
--- /dev/null
+++ b/drivers/staging/greybus/audio_manager_module.c
@@ -0,0 +1,241 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Greybus operations
+ *
+ * Copyright 2015-2016 Google Inc.
+ */
+
+#include <linux/slab.h>
+
+#include "audio_manager.h"
+#include "audio_manager_private.h"
+
+#define to_gb_audio_module_attr(x) \
+ container_of(x, struct gb_audio_manager_module_attribute, attr)
+
+static inline struct gb_audio_manager_module *to_gb_audio_module(struct kobject *kobj)
+{
+ return container_of(kobj, struct gb_audio_manager_module, kobj);
+}
+
+struct gb_audio_manager_module_attribute {
+ struct attribute attr;
+ ssize_t (*show)(struct gb_audio_manager_module *module,
+ struct gb_audio_manager_module_attribute *attr,
+ char *buf);
+ ssize_t (*store)(struct gb_audio_manager_module *module,
+ struct gb_audio_manager_module_attribute *attr,
+ const char *buf, size_t count);
+};
+
+static ssize_t gb_audio_module_attr_show(struct kobject *kobj,
+ struct attribute *attr, char *buf)
+{
+ struct gb_audio_manager_module_attribute *attribute;
+ struct gb_audio_manager_module *module;
+
+ attribute = to_gb_audio_module_attr(attr);
+ module = to_gb_audio_module(kobj);
+
+ if (!attribute->show)
+ return -EIO;
+
+ return attribute->show(module, attribute, buf);
+}
+
+static ssize_t gb_audio_module_attr_store(struct kobject *kobj,
+ struct attribute *attr,
+ const char *buf, size_t len)
+{
+ struct gb_audio_manager_module_attribute *attribute;
+ struct gb_audio_manager_module *module;
+
+ attribute = to_gb_audio_module_attr(attr);
+ module = to_gb_audio_module(kobj);
+
+ if (!attribute->store)
+ return -EIO;
+
+ return attribute->store(module, attribute, buf, len);
+}
+
+static const struct sysfs_ops gb_audio_module_sysfs_ops = {
+ .show = gb_audio_module_attr_show,
+ .store = gb_audio_module_attr_store,
+};
+
+static void gb_audio_module_release(struct kobject *kobj)
+{
+ struct gb_audio_manager_module *module = to_gb_audio_module(kobj);
+
+ pr_info("Destroying audio module #%d\n", module->id);
+ /* TODO -> delete from list */
+ kfree(module);
+}
+
+static ssize_t gb_audio_module_name_show(struct gb_audio_manager_module *module,
+ struct gb_audio_manager_module_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%s", module->desc.name);
+}
+
+static struct gb_audio_manager_module_attribute gb_audio_module_name_attribute =
+ __ATTR(name, 0664, gb_audio_module_name_show, NULL);
+
+static ssize_t gb_audio_module_vid_show(struct gb_audio_manager_module *module,
+ struct gb_audio_manager_module_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%d", module->desc.vid);
+}
+
+static struct gb_audio_manager_module_attribute gb_audio_module_vid_attribute =
+ __ATTR(vid, 0664, gb_audio_module_vid_show, NULL);
+
+static ssize_t gb_audio_module_pid_show(struct gb_audio_manager_module *module,
+ struct gb_audio_manager_module_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%d", module->desc.pid);
+}
+
+static struct gb_audio_manager_module_attribute gb_audio_module_pid_attribute =
+ __ATTR(pid, 0664, gb_audio_module_pid_show, NULL);
+
+static ssize_t gb_audio_module_intf_id_show(struct gb_audio_manager_module *module,
+ struct gb_audio_manager_module_attribute *attr,
+ char *buf)
+{
+ return sprintf(buf, "%d", module->desc.intf_id);
+}
+
+static struct gb_audio_manager_module_attribute
+ gb_audio_module_intf_id_attribute =
+ __ATTR(intf_id, 0664, gb_audio_module_intf_id_show, NULL);
+
+static ssize_t gb_audio_module_ip_devices_show(struct gb_audio_manager_module *module,
+ struct gb_audio_manager_module_attribute *attr,
+ char *buf)
+{
+ return sprintf(buf, "0x%X", module->desc.ip_devices);
+}
+
+static struct gb_audio_manager_module_attribute
+ gb_audio_module_ip_devices_attribute =
+ __ATTR(ip_devices, 0664, gb_audio_module_ip_devices_show, NULL);
+
+static ssize_t gb_audio_module_op_devices_show(struct gb_audio_manager_module *module,
+ struct gb_audio_manager_module_attribute *attr,
+ char *buf)
+{
+ return sprintf(buf, "0x%X", module->desc.op_devices);
+}
+
+static struct gb_audio_manager_module_attribute
+ gb_audio_module_op_devices_attribute =
+ __ATTR(op_devices, 0664, gb_audio_module_op_devices_show, NULL);
+
+static struct attribute *gb_audio_module_default_attrs[] = {
+ &gb_audio_module_name_attribute.attr,
+ &gb_audio_module_vid_attribute.attr,
+ &gb_audio_module_pid_attribute.attr,
+ &gb_audio_module_intf_id_attribute.attr,
+ &gb_audio_module_ip_devices_attribute.attr,
+ &gb_audio_module_op_devices_attribute.attr,
+ NULL, /* need to NULL terminate the list of attributes */
+};
+ATTRIBUTE_GROUPS(gb_audio_module_default);
+
+static const struct kobj_type gb_audio_module_type = {
+ .sysfs_ops = &gb_audio_module_sysfs_ops,
+ .release = gb_audio_module_release,
+ .default_groups = gb_audio_module_default_groups,
+};
+
+static void send_add_uevent(struct gb_audio_manager_module *module)
+{
+ char name_string[128];
+ char vid_string[64];
+ char pid_string[64];
+ char intf_id_string[64];
+ char ip_devices_string[64];
+ char op_devices_string[64];
+
+ char *envp[] = {
+ name_string,
+ vid_string,
+ pid_string,
+ intf_id_string,
+ ip_devices_string,
+ op_devices_string,
+ NULL
+ };
+
+ snprintf(name_string, 128, "NAME=%s", module->desc.name);
+ snprintf(vid_string, 64, "VID=%d", module->desc.vid);
+ snprintf(pid_string, 64, "PID=%d", module->desc.pid);
+ snprintf(intf_id_string, 64, "INTF_ID=%d", module->desc.intf_id);
+ snprintf(ip_devices_string, 64, "I/P DEVICES=0x%X",
+ module->desc.ip_devices);
+ snprintf(op_devices_string, 64, "O/P DEVICES=0x%X",
+ module->desc.op_devices);
+
+ kobject_uevent_env(&module->kobj, KOBJ_ADD, envp);
+}
+
+int gb_audio_manager_module_create(struct gb_audio_manager_module **module,
+ struct kset *manager_kset,
+ int id, struct gb_audio_manager_module_descriptor *desc)
+{
+ int err;
+ struct gb_audio_manager_module *m;
+
+ m = kzalloc(sizeof(*m), GFP_ATOMIC);
+ if (!m)
+ return -ENOMEM;
+
+ /* Initialize the node */
+ INIT_LIST_HEAD(&m->list);
+
+ /* Set the module id */
+ m->id = id;
+
+ /* Copy the provided descriptor */
+ memcpy(&m->desc, desc, sizeof(*desc));
+
+ /* set the kset */
+ m->kobj.kset = manager_kset;
+
+ /*
+ * Initialize and add the kobject to the kernel. All the default files
+ * will be created here. As we have already specified a kset for this
+ * kobject, we don't have to set a parent for the kobject, the kobject
+ * will be placed beneath that kset automatically.
+ */
+ err = kobject_init_and_add(&m->kobj, &gb_audio_module_type, NULL, "%d",
+ id);
+ if (err) {
+ pr_err("failed initializing kobject for audio module #%d\n", id);
+ kobject_put(&m->kobj);
+ return err;
+ }
+
+ /*
+ * Notify the object was created
+ */
+ send_add_uevent(m);
+
+ *module = m;
+ pr_info("Created audio module #%d\n", id);
+ return 0;
+}
+
+void gb_audio_manager_module_dump(struct gb_audio_manager_module *module)
+{
+ pr_info("audio module #%d name=%s vid=%d pid=%d intf_id=%d i/p devices=0x%X o/p devices=0x%X\n",
+ module->id,
+ module->desc.name,
+ module->desc.vid,
+ module->desc.pid,
+ module->desc.intf_id,
+ module->desc.ip_devices,
+ module->desc.op_devices);
+}
diff --git a/drivers/staging/greybus/audio_manager_private.h b/drivers/staging/greybus/audio_manager_private.h
new file mode 100644
index 000000000000..daca5b48b986
--- /dev/null
+++ b/drivers/staging/greybus/audio_manager_private.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Greybus operations
+ *
+ * Copyright 2015-2016 Google Inc.
+ */
+
+#ifndef _GB_AUDIO_MANAGER_PRIVATE_H_
+#define _GB_AUDIO_MANAGER_PRIVATE_H_
+
+#include <linux/kobject.h>
+
+#include "audio_manager.h"
+
+int gb_audio_manager_module_create(struct gb_audio_manager_module **module,
+ struct kset *manager_kset, int id,
+ struct gb_audio_manager_module_descriptor *desc);
+
+/* module destroyed via kobject_put */
+
+void gb_audio_manager_module_dump(struct gb_audio_manager_module *module);
+
+/* sysfs control */
+void gb_audio_manager_sysfs_init(struct kobject *kobj);
+
+#endif /* _GB_AUDIO_MANAGER_PRIVATE_H_ */
diff --git a/drivers/staging/greybus/audio_manager_sysfs.c b/drivers/staging/greybus/audio_manager_sysfs.c
new file mode 100644
index 000000000000..fcd518f9540c
--- /dev/null
+++ b/drivers/staging/greybus/audio_manager_sysfs.c
@@ -0,0 +1,101 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Greybus operations
+ *
+ * Copyright 2015-2016 Google Inc.
+ */
+
+#include <linux/string.h>
+#include <linux/sysfs.h>
+
+#include "audio_manager.h"
+#include "audio_manager_private.h"
+
+static ssize_t manager_sysfs_add_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct gb_audio_manager_module_descriptor desc = { {0} };
+
+ int num = sscanf(buf,
+ "name=%" GB_AUDIO_MANAGER_MODULE_NAME_LEN_SSCANF
+ "s vid=%d pid=%d intf_id=%d i/p devices=0x%X o/p devices=0x%X",
+ desc.name, &desc.vid, &desc.pid, &desc.intf_id,
+ &desc.ip_devices, &desc.op_devices);
+
+ if (num != 7)
+ return -EINVAL;
+
+ num = gb_audio_manager_add(&desc);
+ if (num < 0)
+ return -EINVAL;
+
+ return count;
+}
+
+static struct kobj_attribute manager_add_attribute =
+ __ATTR(add, 0664, NULL, manager_sysfs_add_store);
+
+static ssize_t manager_sysfs_remove_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ int id;
+
+ int num = kstrtoint(buf, 10, &id);
+
+ if (num != 1)
+ return -EINVAL;
+
+ num = gb_audio_manager_remove(id);
+ if (num)
+ return num;
+
+ return count;
+}
+
+static struct kobj_attribute manager_remove_attribute =
+ __ATTR(remove, 0664, NULL, manager_sysfs_remove_store);
+
+static ssize_t manager_sysfs_dump_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ int id;
+
+ int num = kstrtoint(buf, 10, &id);
+
+ if (num == 1) {
+ num = gb_audio_manager_dump_module(id);
+ if (num)
+ return num;
+ } else if (!strncmp("all", buf, 3)) {
+ gb_audio_manager_dump_all();
+ } else {
+ return -EINVAL;
+ }
+
+ return count;
+}
+
+static struct kobj_attribute manager_dump_attribute =
+ __ATTR(dump, 0664, NULL, manager_sysfs_dump_store);
+
+static void manager_sysfs_init_attribute(struct kobject *kobj,
+ struct kobj_attribute *kattr)
+{
+ int err;
+
+ err = sysfs_create_file(kobj, &kattr->attr);
+ if (err) {
+ pr_warn("creating the sysfs entry for %s failed: %d\n",
+ kattr->attr.name, err);
+ }
+}
+
+void gb_audio_manager_sysfs_init(struct kobject *kobj)
+{
+ manager_sysfs_init_attribute(kobj, &manager_add_attribute);
+ manager_sysfs_init_attribute(kobj, &manager_remove_attribute);
+ manager_sysfs_init_attribute(kobj, &manager_dump_attribute);
+}
diff --git a/drivers/staging/greybus/audio_module.c b/drivers/staging/greybus/audio_module.c
new file mode 100644
index 000000000000..12c376c477b3
--- /dev/null
+++ b/drivers/staging/greybus/audio_module.c
@@ -0,0 +1,479 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Greybus audio driver
+ * Copyright 2015 Google Inc.
+ * Copyright 2015 Linaro Ltd.
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <sound/soc.h>
+#include <sound/pcm_params.h>
+
+#include "audio_codec.h"
+#include "audio_apbridgea.h"
+#include "audio_manager.h"
+
+/*
+ * gb_snd management functions
+ */
+
+static int gbaudio_request_jack(struct gbaudio_module_info *module,
+ struct gb_audio_jack_event_request *req)
+{
+ int report;
+ struct snd_jack *jack = module->headset.jack.jack;
+ struct snd_jack *btn_jack = module->button.jack.jack;
+
+ if (!jack) {
+ dev_err_ratelimited(module->dev,
+ "Invalid jack event received:type: %u, event: %u\n",
+ req->jack_attribute, req->event);
+ return -EINVAL;
+ }
+
+ dev_warn_ratelimited(module->dev,
+ "Jack Event received: type: %u, event: %u\n",
+ req->jack_attribute, req->event);
+
+ if (req->event == GB_AUDIO_JACK_EVENT_REMOVAL) {
+ module->jack_type = 0;
+ if (btn_jack && module->button_status) {
+ snd_soc_jack_report(&module->button.jack, 0,
+ module->button_mask);
+ module->button_status = 0;
+ }
+ snd_soc_jack_report(&module->headset.jack, 0,
+ module->jack_mask);
+ return 0;
+ }
+
+ report = req->jack_attribute & module->jack_mask;
+ if (!report) {
+ dev_err_ratelimited(module->dev,
+ "Invalid jack event received:type: %u, event: %u\n",
+ req->jack_attribute, req->event);
+ return -EINVAL;
+ }
+
+ if (module->jack_type)
+ dev_warn_ratelimited(module->dev,
+ "Modifying jack from %d to %d\n",
+ module->jack_type, report);
+
+ module->jack_type = report;
+ snd_soc_jack_report(&module->headset.jack, report, module->jack_mask);
+
+ return 0;
+}
+
+static int gbaudio_request_button(struct gbaudio_module_info *module,
+ struct gb_audio_button_event_request *req)
+{
+ int soc_button_id, report;
+ struct snd_jack *btn_jack = module->button.jack.jack;
+
+ if (!btn_jack) {
+ dev_err_ratelimited(module->dev,
+ "Invalid button event received:type: %u, event: %u\n",
+ req->button_id, req->event);
+ return -EINVAL;
+ }
+
+ dev_warn_ratelimited(module->dev,
+ "Button Event received: id: %u, event: %u\n",
+ req->button_id, req->event);
+
+ /* currently supports 4 buttons only */
+ if (!module->jack_type) {
+ dev_err_ratelimited(module->dev,
+ "Jack not present. Bogus event!!\n");
+ return -EINVAL;
+ }
+
+ report = module->button_status & module->button_mask;
+ soc_button_id = 0;
+
+ switch (req->button_id) {
+ case 1:
+ soc_button_id = SND_JACK_BTN_0 & module->button_mask;
+ break;
+
+ case 2:
+ soc_button_id = SND_JACK_BTN_1 & module->button_mask;
+ break;
+
+ case 3:
+ soc_button_id = SND_JACK_BTN_2 & module->button_mask;
+ break;
+
+ case 4:
+ soc_button_id = SND_JACK_BTN_3 & module->button_mask;
+ break;
+ }
+
+ if (!soc_button_id) {
+ dev_err_ratelimited(module->dev,
+ "Invalid button request received\n");
+ return -EINVAL;
+ }
+
+ if (req->event == GB_AUDIO_BUTTON_EVENT_PRESS)
+ report = report | soc_button_id;
+ else
+ report = report & ~soc_button_id;
+
+ module->button_status = report;
+
+ snd_soc_jack_report(&module->button.jack, report, module->button_mask);
+
+ return 0;
+}
+
+static int gbaudio_request_stream(struct gbaudio_module_info *module,
+ struct gb_audio_streaming_event_request *req)
+{
+ dev_warn(module->dev, "Audio Event received: cport: %u, event: %u\n",
+ le16_to_cpu(req->data_cport), req->event);
+
+ return 0;
+}
+
+static int gbaudio_codec_request_handler(struct gb_operation *op)
+{
+ struct gb_connection *connection = op->connection;
+ struct gbaudio_module_info *module =
+ greybus_get_drvdata(connection->bundle);
+ struct gb_operation_msg_hdr *header = op->request->header;
+ struct gb_audio_streaming_event_request *stream_req;
+ struct gb_audio_jack_event_request *jack_req;
+ struct gb_audio_button_event_request *button_req;
+ int ret;
+
+ switch (header->type) {
+ case GB_AUDIO_TYPE_STREAMING_EVENT:
+ stream_req = op->request->payload;
+ ret = gbaudio_request_stream(module, stream_req);
+ break;
+
+ case GB_AUDIO_TYPE_JACK_EVENT:
+ jack_req = op->request->payload;
+ ret = gbaudio_request_jack(module, jack_req);
+ break;
+
+ case GB_AUDIO_TYPE_BUTTON_EVENT:
+ button_req = op->request->payload;
+ ret = gbaudio_request_button(module, button_req);
+ break;
+
+ default:
+ dev_err_ratelimited(&connection->bundle->dev,
+ "Invalid Audio Event received\n");
+ return -EINVAL;
+ }
+
+ return ret;
+}
+
+static int gb_audio_add_mgmt_connection(struct gbaudio_module_info *gbmodule,
+ struct greybus_descriptor_cport *cport_desc,
+ struct gb_bundle *bundle)
+{
+ struct gb_connection *connection;
+
+ /* Management Cport */
+ if (gbmodule->mgmt_connection) {
+ dev_err(&bundle->dev,
+ "Can't have multiple Management connections\n");
+ return -ENODEV;
+ }
+
+ connection = gb_connection_create(bundle, le16_to_cpu(cport_desc->id),
+ gbaudio_codec_request_handler);
+ if (IS_ERR(connection))
+ return PTR_ERR(connection);
+
+ greybus_set_drvdata(bundle, gbmodule);
+ gbmodule->mgmt_connection = connection;
+
+ return 0;
+}
+
+static int gb_audio_add_data_connection(struct gbaudio_module_info *gbmodule,
+ struct greybus_descriptor_cport *cport_desc,
+ struct gb_bundle *bundle)
+{
+ struct gb_connection *connection;
+ struct gbaudio_data_connection *dai;
+
+ dai = devm_kzalloc(gbmodule->dev, sizeof(*dai), GFP_KERNEL);
+ if (!dai)
+ return -ENOMEM;
+
+ connection = gb_connection_create_offloaded(bundle,
+ le16_to_cpu(cport_desc->id),
+ GB_CONNECTION_FLAG_CSD);
+ if (IS_ERR(connection)) {
+ devm_kfree(gbmodule->dev, dai);
+ return PTR_ERR(connection);
+ }
+
+ greybus_set_drvdata(bundle, gbmodule);
+ dai->id = 0;
+ dai->data_cport = cpu_to_le16(connection->intf_cport_id);
+ dai->connection = connection;
+ list_add(&dai->list, &gbmodule->data_list);
+
+ return 0;
+}
+
+/*
+ * This is the basic hook get things initialized and registered w/ gb
+ */
+
+static int gb_audio_probe(struct gb_bundle *bundle,
+ const struct greybus_bundle_id *id)
+{
+ struct device *dev = &bundle->dev;
+ struct gbaudio_module_info *gbmodule;
+ struct greybus_descriptor_cport *cport_desc;
+ struct gb_audio_manager_module_descriptor desc;
+ struct gbaudio_data_connection *dai, *_dai;
+ int ret, i;
+ struct gb_audio_topology *topology;
+
+ /* There should be at least one Management and one Data cport */
+ if (bundle->num_cports < 2)
+ return -ENODEV;
+
+ /*
+ * There can be only one Management connection and any number of data
+ * connections.
+ */
+ gbmodule = devm_kzalloc(dev, sizeof(*gbmodule), GFP_KERNEL);
+ if (!gbmodule)
+ return -ENOMEM;
+
+ gbmodule->num_data_connections = bundle->num_cports - 1;
+ INIT_LIST_HEAD(&gbmodule->data_list);
+ INIT_LIST_HEAD(&gbmodule->widget_list);
+ INIT_LIST_HEAD(&gbmodule->ctl_list);
+ INIT_LIST_HEAD(&gbmodule->widget_ctl_list);
+ INIT_LIST_HEAD(&gbmodule->jack_list);
+ gbmodule->dev = dev;
+ snprintf(gbmodule->name, sizeof(gbmodule->name), "%s.%s", dev->driver->name,
+ dev_name(dev));
+ greybus_set_drvdata(bundle, gbmodule);
+
+ /* Create all connections */
+ for (i = 0; i < bundle->num_cports; i++) {
+ cport_desc = &bundle->cport_desc[i];
+
+ switch (cport_desc->protocol_id) {
+ case GREYBUS_PROTOCOL_AUDIO_MGMT:
+ ret = gb_audio_add_mgmt_connection(gbmodule, cport_desc,
+ bundle);
+ if (ret)
+ goto destroy_connections;
+ break;
+ case GREYBUS_PROTOCOL_AUDIO_DATA:
+ ret = gb_audio_add_data_connection(gbmodule, cport_desc,
+ bundle);
+ if (ret)
+ goto destroy_connections;
+ break;
+ default:
+ dev_err(dev, "Unsupported protocol: 0x%02x\n",
+ cport_desc->protocol_id);
+ ret = -ENODEV;
+ goto destroy_connections;
+ }
+ }
+
+ /* There must be a management cport */
+ if (!gbmodule->mgmt_connection) {
+ ret = -EINVAL;
+ dev_err(dev, "Missing management connection\n");
+ goto destroy_connections;
+ }
+
+ /* Initialize management connection */
+ ret = gb_connection_enable(gbmodule->mgmt_connection);
+ if (ret) {
+ dev_err(dev, "%d: Error while enabling mgmt connection\n", ret);
+ goto destroy_connections;
+ }
+ gbmodule->dev_id = gbmodule->mgmt_connection->intf->interface_id;
+
+ /*
+ * FIXME: malloc for topology happens via audio_gb driver
+ * should be done within codec driver itself
+ */
+ ret = gb_audio_gb_get_topology(gbmodule->mgmt_connection, &topology);
+ if (ret) {
+ dev_err(dev, "%d:Error while fetching topology\n", ret);
+ goto disable_connection;
+ }
+
+ /* process topology data */
+ ret = gbaudio_tplg_parse_data(gbmodule, topology);
+ if (ret) {
+ dev_err(dev, "%d:Error while parsing topology data\n",
+ ret);
+ goto free_topology;
+ }
+ gbmodule->topology = topology;
+
+ /* Initialize data connections */
+ list_for_each_entry(dai, &gbmodule->data_list, list) {
+ ret = gb_connection_enable(dai->connection);
+ if (ret) {
+ dev_err(dev,
+ "%d:Error while enabling %d:data connection\n",
+ ret, le16_to_cpu(dai->data_cport));
+ goto disable_data_connection;
+ }
+ }
+
+ /* register module with gbcodec */
+ ret = gbaudio_register_module(gbmodule);
+ if (ret)
+ goto disable_data_connection;
+
+ /* inform above layer for uevent */
+ dev_dbg(dev, "Inform set_event:%d to above layer\n", 1);
+ /* prepare for the audio manager */
+ strscpy(desc.name, gbmodule->name, sizeof(desc.name));
+ desc.vid = 2; /* todo */
+ desc.pid = 3; /* todo */
+ desc.intf_id = gbmodule->dev_id;
+ desc.op_devices = gbmodule->op_devices;
+ desc.ip_devices = gbmodule->ip_devices;
+ gbmodule->manager_id = gb_audio_manager_add(&desc);
+
+ dev_dbg(dev, "Add GB Audio device:%s\n", gbmodule->name);
+
+ gb_pm_runtime_put_autosuspend(bundle);
+
+ return 0;
+
+disable_data_connection:
+ list_for_each_entry_safe(dai, _dai, &gbmodule->data_list, list)
+ gb_connection_disable(dai->connection);
+ gbaudio_tplg_release(gbmodule);
+ gbmodule->topology = NULL;
+
+free_topology:
+ kfree(topology);
+
+disable_connection:
+ gb_connection_disable(gbmodule->mgmt_connection);
+
+destroy_connections:
+ list_for_each_entry_safe(dai, _dai, &gbmodule->data_list, list) {
+ gb_connection_destroy(dai->connection);
+ list_del(&dai->list);
+ devm_kfree(dev, dai);
+ }
+
+ if (gbmodule->mgmt_connection)
+ gb_connection_destroy(gbmodule->mgmt_connection);
+
+ devm_kfree(dev, gbmodule);
+
+ return ret;
+}
+
+static void gb_audio_disconnect(struct gb_bundle *bundle)
+{
+ struct gbaudio_module_info *gbmodule = greybus_get_drvdata(bundle);
+ struct gbaudio_data_connection *dai, *_dai;
+
+ gb_pm_runtime_get_sync(bundle);
+
+ /* cleanup module related resources first */
+ gbaudio_unregister_module(gbmodule);
+
+ /* inform uevent to above layers */
+ gb_audio_manager_remove(gbmodule->manager_id);
+
+ gbaudio_tplg_release(gbmodule);
+ kfree(gbmodule->topology);
+ gbmodule->topology = NULL;
+ gb_connection_disable(gbmodule->mgmt_connection);
+ list_for_each_entry_safe(dai, _dai, &gbmodule->data_list, list) {
+ gb_connection_disable(dai->connection);
+ gb_connection_destroy(dai->connection);
+ list_del(&dai->list);
+ devm_kfree(gbmodule->dev, dai);
+ }
+ gb_connection_destroy(gbmodule->mgmt_connection);
+ gbmodule->mgmt_connection = NULL;
+
+ devm_kfree(&bundle->dev, gbmodule);
+}
+
+static const struct greybus_bundle_id gb_audio_id_table[] = {
+ { GREYBUS_DEVICE_CLASS(GREYBUS_CLASS_AUDIO) },
+ { }
+};
+MODULE_DEVICE_TABLE(greybus, gb_audio_id_table);
+
+#ifdef CONFIG_PM
+static int gb_audio_suspend(struct device *dev)
+{
+ struct gb_bundle *bundle = to_gb_bundle(dev);
+ struct gbaudio_module_info *gbmodule = greybus_get_drvdata(bundle);
+ struct gbaudio_data_connection *dai;
+
+ list_for_each_entry(dai, &gbmodule->data_list, list)
+ gb_connection_disable(dai->connection);
+
+ gb_connection_disable(gbmodule->mgmt_connection);
+
+ return 0;
+}
+
+static int gb_audio_resume(struct device *dev)
+{
+ struct gb_bundle *bundle = to_gb_bundle(dev);
+ struct gbaudio_module_info *gbmodule = greybus_get_drvdata(bundle);
+ struct gbaudio_data_connection *dai;
+ int ret;
+
+ ret = gb_connection_enable(gbmodule->mgmt_connection);
+ if (ret) {
+ dev_err(dev, "%d:Error while enabling mgmt connection\n", ret);
+ return ret;
+ }
+
+ list_for_each_entry(dai, &gbmodule->data_list, list) {
+ ret = gb_connection_enable(dai->connection);
+ if (ret) {
+ dev_err(dev,
+ "%d:Error while enabling %d:data connection\n",
+ ret, le16_to_cpu(dai->data_cport));
+ return ret;
+ }
+ }
+
+ return 0;
+}
+#endif
+
+static const struct dev_pm_ops gb_audio_pm_ops = {
+ SET_RUNTIME_PM_OPS(gb_audio_suspend, gb_audio_resume, NULL)
+};
+
+static struct greybus_driver gb_audio_driver = {
+ .name = "gb-audio",
+ .probe = gb_audio_probe,
+ .disconnect = gb_audio_disconnect,
+ .id_table = gb_audio_id_table,
+ .driver.pm = &gb_audio_pm_ops,
+};
+module_greybus_driver(gb_audio_driver);
+
+MODULE_DESCRIPTION("Greybus Audio module driver");
+MODULE_AUTHOR("Vaibhav Agarwal <vaibhav.agarwal@linaro.org>");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:gbaudio-module");
diff --git a/drivers/staging/greybus/audio_topology.c b/drivers/staging/greybus/audio_topology.c
new file mode 100644
index 000000000000..76146f91cddc
--- /dev/null
+++ b/drivers/staging/greybus/audio_topology.c
@@ -0,0 +1,1443 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Greybus audio driver
+ * Copyright 2015-2016 Google Inc.
+ * Copyright 2015-2016 Linaro Ltd.
+ */
+
+#include <linux/greybus.h>
+#include "audio_codec.h"
+
+#define GBAUDIO_INVALID_ID 0xFF
+
+struct gbaudio_ctl_pvt {
+ unsigned int ctl_id;
+ unsigned int data_cport;
+ unsigned int access;
+ unsigned int vcount;
+ struct gb_audio_ctl_elem_info *info;
+};
+
+static struct gbaudio_module_info *find_gb_module(struct gbaudio_codec_info *codec,
+ char const *name)
+{
+ int dev_id;
+ char begin[NAME_SIZE];
+ struct gbaudio_module_info *module;
+
+ if (!name)
+ return NULL;
+
+ if (sscanf(name, "%s %d", begin, &dev_id) != 2)
+ return NULL;
+
+ dev_dbg(codec->dev, "%s:Find module#%d\n", __func__, dev_id);
+
+ mutex_lock(&codec->lock);
+ list_for_each_entry(module, &codec->module_list, list) {
+ if (module->dev_id == dev_id) {
+ mutex_unlock(&codec->lock);
+ return module;
+ }
+ }
+ mutex_unlock(&codec->lock);
+ dev_warn(codec->dev, "%s: module#%d missing in codec list\n", name,
+ dev_id);
+ return NULL;
+}
+
+static const char *gbaudio_map_controlid(struct gbaudio_module_info *module,
+ __u8 control_id, __u8 index)
+{
+ struct gbaudio_control *control;
+
+ if (control_id == GBAUDIO_INVALID_ID)
+ return NULL;
+
+ list_for_each_entry(control, &module->ctl_list, list) {
+ if (control->id == control_id) {
+ if (index == GBAUDIO_INVALID_ID)
+ return control->name;
+ if (index >= control->items)
+ return NULL;
+ return control->texts[index];
+ }
+ }
+ list_for_each_entry(control, &module->widget_ctl_list, list) {
+ if (control->id == control_id) {
+ if (index == GBAUDIO_INVALID_ID)
+ return control->name;
+ if (index >= control->items)
+ return NULL;
+ return control->texts[index];
+ }
+ }
+ return NULL;
+}
+
+static int gbaudio_map_controlname(struct gbaudio_module_info *module,
+ const char *name)
+{
+ struct gbaudio_control *control;
+
+ list_for_each_entry(control, &module->ctl_list, list) {
+ if (!strncmp(control->name, name, NAME_SIZE))
+ return control->id;
+ }
+
+ dev_warn(module->dev, "%s: missing in modules controls list\n", name);
+
+ return -EINVAL;
+}
+
+static int gbaudio_map_wcontrolname(struct gbaudio_module_info *module,
+ const char *name)
+{
+ struct gbaudio_control *control;
+
+ list_for_each_entry(control, &module->widget_ctl_list, list) {
+ if (!strncmp(control->wname, name, NAME_SIZE))
+ return control->id;
+ }
+ dev_warn(module->dev, "%s: missing in modules controls list\n", name);
+
+ return -EINVAL;
+}
+
+static int gbaudio_map_widgetname(struct gbaudio_module_info *module,
+ const char *name)
+{
+ struct gbaudio_widget *widget;
+
+ list_for_each_entry(widget, &module->widget_list, list) {
+ if (!strncmp(widget->name, name, NAME_SIZE))
+ return widget->id;
+ }
+ dev_warn(module->dev, "%s: missing in modules widgets list\n", name);
+
+ return -EINVAL;
+}
+
+static const char *gbaudio_map_widgetid(struct gbaudio_module_info *module,
+ __u8 widget_id)
+{
+ struct gbaudio_widget *widget;
+
+ list_for_each_entry(widget, &module->widget_list, list) {
+ if (widget->id == widget_id)
+ return widget->name;
+ }
+ return NULL;
+}
+
+static const char **gb_generate_enum_strings(struct gbaudio_module_info *gb,
+ struct gb_audio_enumerated *gbenum)
+{
+ const char **strings;
+ int i;
+ unsigned int items;
+ __u8 *data;
+
+ items = le32_to_cpu(gbenum->items);
+ strings = devm_kcalloc(gb->dev, items, sizeof(char *), GFP_KERNEL);
+ if (!strings)
+ return NULL;
+
+ data = gbenum->names;
+
+ for (i = 0; i < items; i++) {
+ strings[i] = (const char *)data;
+ while (*data != '\0')
+ data++;
+ data++;
+ }
+
+ return strings;
+}
+
+static int gbcodec_mixer_ctl_info(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_info *uinfo)
+{
+ unsigned int max;
+ const char *name;
+ struct gbaudio_ctl_pvt *data;
+ struct gb_audio_ctl_elem_info *info;
+ struct gbaudio_module_info *module;
+ struct snd_soc_component *comp = snd_kcontrol_chip(kcontrol);
+ struct gbaudio_codec_info *gbcodec = snd_soc_component_get_drvdata(comp);
+
+ dev_dbg(comp->dev, "Entered %s:%s\n", __func__, kcontrol->id.name);
+ data = (struct gbaudio_ctl_pvt *)kcontrol->private_value;
+ info = (struct gb_audio_ctl_elem_info *)data->info;
+
+ if (!info) {
+ dev_err(comp->dev, "NULL info for %s\n", uinfo->id.name);
+ return -EINVAL;
+ }
+
+ /* update uinfo */
+ uinfo->access = data->access;
+ uinfo->count = data->vcount;
+ uinfo->type = (__force snd_ctl_elem_type_t)info->type;
+
+ switch (info->type) {
+ case GB_AUDIO_CTL_ELEM_TYPE_BOOLEAN:
+ case GB_AUDIO_CTL_ELEM_TYPE_INTEGER:
+ uinfo->value.integer.min = le32_to_cpu(info->value.integer.min);
+ uinfo->value.integer.max = le32_to_cpu(info->value.integer.max);
+ break;
+ case GB_AUDIO_CTL_ELEM_TYPE_ENUMERATED:
+ max = le32_to_cpu(info->value.enumerated.items);
+ uinfo->value.enumerated.items = max;
+ if (uinfo->value.enumerated.item > max - 1)
+ uinfo->value.enumerated.item = max - 1;
+ module = find_gb_module(gbcodec, kcontrol->id.name);
+ if (!module)
+ return -EINVAL;
+ name = gbaudio_map_controlid(module, data->ctl_id,
+ uinfo->value.enumerated.item);
+ strscpy(uinfo->value.enumerated.name, name, sizeof(uinfo->value.enumerated.name));
+ break;
+ default:
+ dev_err(comp->dev, "Invalid type: %d for %s:kcontrol\n",
+ info->type, kcontrol->id.name);
+ break;
+ }
+ return 0;
+}
+
+static int gbcodec_mixer_ctl_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ int ret;
+ struct gb_audio_ctl_elem_info *info;
+ struct gbaudio_ctl_pvt *data;
+ struct gb_audio_ctl_elem_value gbvalue;
+ struct gbaudio_module_info *module;
+ struct snd_soc_component *comp = snd_kcontrol_chip(kcontrol);
+ struct gbaudio_codec_info *gb = snd_soc_component_get_drvdata(comp);
+ struct gb_bundle *bundle;
+
+ dev_dbg(comp->dev, "Entered %s:%s\n", __func__, kcontrol->id.name);
+ module = find_gb_module(gb, kcontrol->id.name);
+ if (!module)
+ return -EINVAL;
+
+ data = (struct gbaudio_ctl_pvt *)kcontrol->private_value;
+ info = (struct gb_audio_ctl_elem_info *)data->info;
+ bundle = to_gb_bundle(module->dev);
+
+ ret = gb_pm_runtime_get_sync(bundle);
+ if (ret)
+ return ret;
+
+ ret = gb_audio_gb_get_control(module->mgmt_connection, data->ctl_id,
+ GB_AUDIO_INVALID_INDEX, &gbvalue);
+
+ gb_pm_runtime_put_autosuspend(bundle);
+
+ if (ret) {
+ dev_err_ratelimited(comp->dev, "%d:Error in %s for %s\n", ret,
+ __func__, kcontrol->id.name);
+ return ret;
+ }
+
+ /* update ucontrol */
+ switch (info->type) {
+ case GB_AUDIO_CTL_ELEM_TYPE_BOOLEAN:
+ case GB_AUDIO_CTL_ELEM_TYPE_INTEGER:
+ ucontrol->value.integer.value[0] =
+ le32_to_cpu(gbvalue.value.integer_value[0]);
+ if (data->vcount == 2)
+ ucontrol->value.integer.value[1] =
+ le32_to_cpu(gbvalue.value.integer_value[1]);
+ break;
+ case GB_AUDIO_CTL_ELEM_TYPE_ENUMERATED:
+ ucontrol->value.enumerated.item[0] =
+ le32_to_cpu(gbvalue.value.enumerated_item[0]);
+ if (data->vcount == 2)
+ ucontrol->value.enumerated.item[1] =
+ le32_to_cpu(gbvalue.value.enumerated_item[1]);
+ break;
+ default:
+ dev_err(comp->dev, "Invalid type: %d for %s:kcontrol\n",
+ info->type, kcontrol->id.name);
+ ret = -EINVAL;
+ break;
+ }
+ return ret;
+}
+
+static int gbcodec_mixer_ctl_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ int ret = 0;
+ struct gb_audio_ctl_elem_info *info;
+ struct gbaudio_ctl_pvt *data;
+ struct gb_audio_ctl_elem_value gbvalue;
+ struct gbaudio_module_info *module;
+ struct snd_soc_component *comp = snd_kcontrol_chip(kcontrol);
+ struct gbaudio_codec_info *gb = snd_soc_component_get_drvdata(comp);
+ struct gb_bundle *bundle;
+
+ dev_dbg(comp->dev, "Entered %s:%s\n", __func__, kcontrol->id.name);
+ module = find_gb_module(gb, kcontrol->id.name);
+ if (!module)
+ return -EINVAL;
+
+ data = (struct gbaudio_ctl_pvt *)kcontrol->private_value;
+ info = (struct gb_audio_ctl_elem_info *)data->info;
+ bundle = to_gb_bundle(module->dev);
+
+ /* update ucontrol */
+ switch (info->type) {
+ case GB_AUDIO_CTL_ELEM_TYPE_BOOLEAN:
+ case GB_AUDIO_CTL_ELEM_TYPE_INTEGER:
+ gbvalue.value.integer_value[0] =
+ cpu_to_le32(ucontrol->value.integer.value[0]);
+ if (data->vcount == 2)
+ gbvalue.value.integer_value[1] =
+ cpu_to_le32(ucontrol->value.integer.value[1]);
+ break;
+ case GB_AUDIO_CTL_ELEM_TYPE_ENUMERATED:
+ gbvalue.value.enumerated_item[0] =
+ cpu_to_le32(ucontrol->value.enumerated.item[0]);
+ if (data->vcount == 2)
+ gbvalue.value.enumerated_item[1] =
+ cpu_to_le32(ucontrol->value.enumerated.item[1]);
+ break;
+ default:
+ dev_err(comp->dev, "Invalid type: %d for %s:kcontrol\n",
+ info->type, kcontrol->id.name);
+ ret = -EINVAL;
+ break;
+ }
+
+ if (ret)
+ return ret;
+
+ ret = gb_pm_runtime_get_sync(bundle);
+ if (ret)
+ return ret;
+
+ ret = gb_audio_gb_set_control(module->mgmt_connection, data->ctl_id,
+ GB_AUDIO_INVALID_INDEX, &gbvalue);
+
+ gb_pm_runtime_put_autosuspend(bundle);
+
+ if (ret) {
+ dev_err_ratelimited(comp->dev, "%d:Error in %s for %s\n", ret,
+ __func__, kcontrol->id.name);
+ }
+
+ return ret;
+}
+
+#define SOC_MIXER_GB(xname, kcount, data) \
+{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, \
+ .count = kcount, .info = gbcodec_mixer_ctl_info, \
+ .get = gbcodec_mixer_ctl_get, .put = gbcodec_mixer_ctl_put, \
+ .private_value = (unsigned long)data }
+
+/*
+ * although below callback functions seems redundant to above functions.
+ * same are kept to allow provision for different handling in case
+ * of DAPM related sequencing, etc.
+ */
+static int gbcodec_mixer_dapm_ctl_info(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_info *uinfo)
+{
+ int platform_max, platform_min;
+ struct gbaudio_ctl_pvt *data;
+ struct gb_audio_ctl_elem_info *info;
+
+ data = (struct gbaudio_ctl_pvt *)kcontrol->private_value;
+ info = (struct gb_audio_ctl_elem_info *)data->info;
+
+ /* update uinfo */
+ platform_max = le32_to_cpu(info->value.integer.max);
+ platform_min = le32_to_cpu(info->value.integer.min);
+
+ if (platform_max == 1 &&
+ !strnstr(kcontrol->id.name, " Volume", sizeof(kcontrol->id.name)))
+ uinfo->type = SNDRV_CTL_ELEM_TYPE_BOOLEAN;
+ else
+ uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
+
+ uinfo->count = data->vcount;
+ uinfo->value.integer.min = platform_min;
+ uinfo->value.integer.max = platform_max;
+
+ return 0;
+}
+
+static int gbcodec_mixer_dapm_ctl_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ int ret;
+ struct gbaudio_ctl_pvt *data;
+ struct gb_audio_ctl_elem_value gbvalue;
+ struct gbaudio_module_info *module;
+ struct snd_soc_dapm_widget_list *wlist = snd_kcontrol_chip(kcontrol);
+ struct snd_soc_dapm_widget *widget = wlist->widgets[0];
+ struct device *codec_dev = snd_soc_dapm_to_dev(widget->dapm);
+ struct gbaudio_codec_info *gb = dev_get_drvdata(codec_dev);
+ struct gb_bundle *bundle;
+
+ dev_dbg(codec_dev, "Entered %s:%s\n", __func__, kcontrol->id.name);
+ module = find_gb_module(gb, kcontrol->id.name);
+ if (!module)
+ return -EINVAL;
+
+ data = (struct gbaudio_ctl_pvt *)kcontrol->private_value;
+ bundle = to_gb_bundle(module->dev);
+
+ if (data->vcount == 2)
+ dev_warn(codec_dev,
+ "GB: Control '%s' is stereo, which is not supported\n",
+ kcontrol->id.name);
+
+ ret = gb_pm_runtime_get_sync(bundle);
+ if (ret)
+ return ret;
+
+ ret = gb_audio_gb_get_control(module->mgmt_connection, data->ctl_id,
+ GB_AUDIO_INVALID_INDEX, &gbvalue);
+
+ gb_pm_runtime_put_autosuspend(bundle);
+
+ if (ret) {
+ dev_err_ratelimited(codec_dev, "%d:Error in %s for %s\n", ret,
+ __func__, kcontrol->id.name);
+ return ret;
+ }
+ /* update ucontrol */
+ ucontrol->value.integer.value[0] =
+ le32_to_cpu(gbvalue.value.integer_value[0]);
+
+ return ret;
+}
+
+static int gbcodec_mixer_dapm_ctl_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ int ret, wi, max, connect;
+ unsigned int mask, val;
+ struct gb_audio_ctl_elem_info *info;
+ struct gbaudio_ctl_pvt *data;
+ struct gb_audio_ctl_elem_value gbvalue;
+ struct gbaudio_module_info *module;
+ struct snd_soc_dapm_widget_list *wlist = snd_kcontrol_chip(kcontrol);
+ struct snd_soc_dapm_widget *widget = wlist->widgets[0];
+ struct device *codec_dev = snd_soc_dapm_to_dev(widget->dapm);
+ struct gbaudio_codec_info *gb = dev_get_drvdata(codec_dev);
+ struct gb_bundle *bundle;
+
+ dev_dbg(codec_dev, "Entered %s:%s\n", __func__, kcontrol->id.name);
+ module = find_gb_module(gb, kcontrol->id.name);
+ if (!module)
+ return -EINVAL;
+
+ data = (struct gbaudio_ctl_pvt *)kcontrol->private_value;
+ info = (struct gb_audio_ctl_elem_info *)data->info;
+ bundle = to_gb_bundle(module->dev);
+
+ if (data->vcount == 2)
+ dev_warn(codec_dev,
+ "GB: Control '%s' is stereo, which is not supported\n",
+ kcontrol->id.name);
+
+ max = le32_to_cpu(info->value.integer.max);
+ mask = (1 << fls(max)) - 1;
+ val = ucontrol->value.integer.value[0] & mask;
+ connect = !!val;
+
+ ret = gb_pm_runtime_get_sync(bundle);
+ if (ret)
+ return ret;
+
+ ret = gb_audio_gb_get_control(module->mgmt_connection, data->ctl_id,
+ GB_AUDIO_INVALID_INDEX, &gbvalue);
+ if (ret)
+ goto exit;
+
+ /* update ucontrol */
+ if (le32_to_cpu(gbvalue.value.integer_value[0]) != val) {
+ for (wi = 0; wi < wlist->num_widgets; wi++) {
+ widget = wlist->widgets[wi];
+ snd_soc_dapm_mixer_update_power(widget->dapm, kcontrol,
+ connect, NULL);
+ }
+ gbvalue.value.integer_value[0] =
+ cpu_to_le32(ucontrol->value.integer.value[0]);
+
+ ret = gb_audio_gb_set_control(module->mgmt_connection,
+ data->ctl_id,
+ GB_AUDIO_INVALID_INDEX, &gbvalue);
+ }
+
+exit:
+ gb_pm_runtime_put_autosuspend(bundle);
+ if (ret)
+ dev_err_ratelimited(codec_dev, "%d:Error in %s for %s\n", ret,
+ __func__, kcontrol->id.name);
+ return ret;
+}
+
+#define SOC_DAPM_MIXER_GB(xname, kcount, data) \
+{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, \
+ .count = kcount, .info = gbcodec_mixer_dapm_ctl_info, \
+ .get = gbcodec_mixer_dapm_ctl_get, .put = gbcodec_mixer_dapm_ctl_put, \
+ .private_value = (unsigned long)data}
+
+static int gbcodec_event_spk(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *k, int event)
+{
+ /* Ensure GB speaker is connected */
+
+ return 0;
+}
+
+static int gbcodec_event_hp(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *k, int event)
+{
+ /* Ensure GB module supports jack slot */
+
+ return 0;
+}
+
+static int gbcodec_event_int_mic(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *k, int event)
+{
+ /* Ensure GB module supports jack slot */
+
+ return 0;
+}
+
+static int gbaudio_validate_kcontrol_count(struct gb_audio_widget *w)
+{
+ int ret = 0;
+
+ switch (w->type) {
+ case snd_soc_dapm_spk:
+ case snd_soc_dapm_hp:
+ case snd_soc_dapm_mic:
+ case snd_soc_dapm_output:
+ case snd_soc_dapm_input:
+ if (w->ncontrols)
+ ret = -EINVAL;
+ break;
+ case snd_soc_dapm_switch:
+ case snd_soc_dapm_mux:
+ if (w->ncontrols != 1)
+ ret = -EINVAL;
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+static int gbcodec_enum_ctl_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ int ret, ctl_id;
+ struct snd_soc_component *comp = snd_kcontrol_chip(kcontrol);
+ struct gbaudio_codec_info *gb = snd_soc_component_get_drvdata(comp);
+ struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
+ struct gb_audio_ctl_elem_value gbvalue;
+ struct gbaudio_module_info *module;
+ struct gb_bundle *bundle;
+
+ module = find_gb_module(gb, kcontrol->id.name);
+ if (!module)
+ return -EINVAL;
+
+ ctl_id = gbaudio_map_controlname(module, kcontrol->id.name);
+ if (ctl_id < 0)
+ return -EINVAL;
+
+ bundle = to_gb_bundle(module->dev);
+
+ ret = gb_pm_runtime_get_sync(bundle);
+ if (ret)
+ return ret;
+
+ ret = gb_audio_gb_get_control(module->mgmt_connection, ctl_id,
+ GB_AUDIO_INVALID_INDEX, &gbvalue);
+
+ gb_pm_runtime_put_autosuspend(bundle);
+
+ if (ret) {
+ dev_err_ratelimited(comp->dev, "%d:Error in %s for %s\n", ret,
+ __func__, kcontrol->id.name);
+ return ret;
+ }
+
+ ucontrol->value.enumerated.item[0] =
+ le32_to_cpu(gbvalue.value.enumerated_item[0]);
+ if (e->shift_l != e->shift_r)
+ ucontrol->value.enumerated.item[1] =
+ le32_to_cpu(gbvalue.value.enumerated_item[1]);
+
+ return 0;
+}
+
+static int gbcodec_enum_ctl_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ int ret, ctl_id;
+ struct snd_soc_component *comp = snd_kcontrol_chip(kcontrol);
+ struct gbaudio_codec_info *gb = snd_soc_component_get_drvdata(comp);
+ struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
+ struct gb_audio_ctl_elem_value gbvalue;
+ struct gbaudio_module_info *module;
+ struct gb_bundle *bundle;
+
+ module = find_gb_module(gb, kcontrol->id.name);
+ if (!module)
+ return -EINVAL;
+
+ ctl_id = gbaudio_map_controlname(module, kcontrol->id.name);
+ if (ctl_id < 0)
+ return -EINVAL;
+
+ if (ucontrol->value.enumerated.item[0] > e->items - 1)
+ return -EINVAL;
+ gbvalue.value.enumerated_item[0] =
+ cpu_to_le32(ucontrol->value.enumerated.item[0]);
+
+ if (e->shift_l != e->shift_r) {
+ if (ucontrol->value.enumerated.item[1] > e->items - 1)
+ return -EINVAL;
+ gbvalue.value.enumerated_item[1] =
+ cpu_to_le32(ucontrol->value.enumerated.item[1]);
+ }
+
+ bundle = to_gb_bundle(module->dev);
+
+ ret = gb_pm_runtime_get_sync(bundle);
+ if (ret)
+ return ret;
+
+ ret = gb_audio_gb_set_control(module->mgmt_connection, ctl_id,
+ GB_AUDIO_INVALID_INDEX, &gbvalue);
+
+ gb_pm_runtime_put_autosuspend(bundle);
+
+ if (ret) {
+ dev_err_ratelimited(comp->dev, "%d:Error in %s for %s\n",
+ ret, __func__, kcontrol->id.name);
+ }
+
+ return ret;
+}
+
+static int gbaudio_tplg_create_enum_kctl(struct gbaudio_module_info *gb,
+ struct snd_kcontrol_new *kctl,
+ struct gb_audio_control *ctl)
+{
+ struct soc_enum *gbe;
+ struct gb_audio_enumerated *gb_enum;
+ int i;
+
+ gbe = devm_kzalloc(gb->dev, sizeof(*gbe), GFP_KERNEL);
+ if (!gbe)
+ return -ENOMEM;
+
+ gb_enum = &ctl->info.value.enumerated;
+
+ /* since count=1, and reg is dummy */
+ gbe->items = le32_to_cpu(gb_enum->items);
+ gbe->texts = gb_generate_enum_strings(gb, gb_enum);
+ if (!gbe->texts)
+ return -ENOMEM;
+
+ /* debug enum info */
+ dev_dbg(gb->dev, "Max:%d, name_length:%d\n", gbe->items,
+ le16_to_cpu(gb_enum->names_length));
+ for (i = 0; i < gbe->items; i++)
+ dev_dbg(gb->dev, "src[%d]: %s\n", i, gbe->texts[i]);
+
+ *kctl = (struct snd_kcontrol_new)
+ SOC_ENUM_EXT(ctl->name, *gbe, gbcodec_enum_ctl_get,
+ gbcodec_enum_ctl_put);
+ return 0;
+}
+
+static int gbaudio_tplg_create_kcontrol(struct gbaudio_module_info *gb,
+ struct snd_kcontrol_new *kctl,
+ struct gb_audio_control *ctl)
+{
+ int ret = 0;
+ struct gbaudio_ctl_pvt *ctldata;
+
+ switch (ctl->iface) {
+ case (__force int)SNDRV_CTL_ELEM_IFACE_MIXER:
+ switch (ctl->info.type) {
+ case GB_AUDIO_CTL_ELEM_TYPE_ENUMERATED:
+ ret = gbaudio_tplg_create_enum_kctl(gb, kctl, ctl);
+ break;
+ default:
+ ctldata = devm_kzalloc(gb->dev,
+ sizeof(struct gbaudio_ctl_pvt),
+ GFP_KERNEL);
+ if (!ctldata)
+ return -ENOMEM;
+ ctldata->ctl_id = ctl->id;
+ ctldata->data_cport = le16_to_cpu(ctl->data_cport);
+ ctldata->access = le32_to_cpu(ctl->access);
+ ctldata->vcount = ctl->count_values;
+ ctldata->info = &ctl->info;
+ *kctl = (struct snd_kcontrol_new)
+ SOC_MIXER_GB(ctl->name, ctl->count, ctldata);
+ ctldata = NULL;
+ break;
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ dev_dbg(gb->dev, "%s:%d control created\n", ctl->name, ctl->id);
+ return ret;
+}
+
+static int gbcodec_enum_dapm_ctl_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ int ret, ctl_id;
+ struct snd_soc_dapm_widget_list *wlist = snd_kcontrol_chip(kcontrol);
+ struct snd_soc_dapm_widget *widget = wlist->widgets[0];
+ struct gbaudio_module_info *module;
+ struct gb_audio_ctl_elem_value gbvalue;
+ struct device *codec_dev = snd_soc_dapm_to_dev(widget->dapm);
+ struct gbaudio_codec_info *gb = dev_get_drvdata(codec_dev);
+ struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
+ struct gb_bundle *bundle;
+
+ module = find_gb_module(gb, kcontrol->id.name);
+ if (!module)
+ return -EINVAL;
+
+ ctl_id = gbaudio_map_wcontrolname(module, kcontrol->id.name);
+ if (ctl_id < 0)
+ return -EINVAL;
+
+ bundle = to_gb_bundle(module->dev);
+
+ ret = gb_pm_runtime_get_sync(bundle);
+ if (ret)
+ return ret;
+
+ ret = gb_audio_gb_get_control(module->mgmt_connection, ctl_id,
+ GB_AUDIO_INVALID_INDEX, &gbvalue);
+
+ gb_pm_runtime_put_autosuspend(bundle);
+
+ if (ret) {
+ dev_err_ratelimited(codec_dev, "%d:Error in %s for %s\n", ret,
+ __func__, kcontrol->id.name);
+ return ret;
+ }
+
+ ucontrol->value.enumerated.item[0] = le32_to_cpu(gbvalue.value.enumerated_item[0]);
+ if (e->shift_l != e->shift_r)
+ ucontrol->value.enumerated.item[1] =
+ le32_to_cpu(gbvalue.value.enumerated_item[1]);
+
+ return 0;
+}
+
+static int gbcodec_enum_dapm_ctl_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ int ret, wi, ctl_id;
+ unsigned int val, mux, change;
+ struct snd_soc_dapm_widget_list *wlist = snd_kcontrol_chip(kcontrol);
+ struct snd_soc_dapm_widget *widget = wlist->widgets[0];
+ struct gb_audio_ctl_elem_value gbvalue;
+ struct gbaudio_module_info *module;
+ struct device *codec_dev = snd_soc_dapm_to_dev(widget->dapm);
+ struct gbaudio_codec_info *gb = dev_get_drvdata(codec_dev);
+ struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
+ struct gb_bundle *bundle;
+
+ if (ucontrol->value.enumerated.item[0] > e->items - 1)
+ return -EINVAL;
+
+ module = find_gb_module(gb, kcontrol->id.name);
+ if (!module)
+ return -EINVAL;
+
+ ctl_id = gbaudio_map_wcontrolname(module, kcontrol->id.name);
+ if (ctl_id < 0)
+ return -EINVAL;
+
+ change = 0;
+ bundle = to_gb_bundle(module->dev);
+
+ ret = gb_pm_runtime_get_sync(bundle);
+ if (ret)
+ return ret;
+
+ ret = gb_audio_gb_get_control(module->mgmt_connection, ctl_id,
+ GB_AUDIO_INVALID_INDEX, &gbvalue);
+
+ gb_pm_runtime_put_autosuspend(bundle);
+
+ if (ret) {
+ dev_err_ratelimited(codec_dev, "%d:Error in %s for %s\n", ret,
+ __func__, kcontrol->id.name);
+ return ret;
+ }
+
+ mux = ucontrol->value.enumerated.item[0];
+ val = mux << e->shift_l;
+
+ if (le32_to_cpu(gbvalue.value.enumerated_item[0]) !=
+ ucontrol->value.enumerated.item[0]) {
+ change = 1;
+ gbvalue.value.enumerated_item[0] =
+ cpu_to_le32(ucontrol->value.enumerated.item[0]);
+ }
+
+ if (e->shift_l != e->shift_r) {
+ if (ucontrol->value.enumerated.item[1] > e->items - 1)
+ return -EINVAL;
+ val |= ucontrol->value.enumerated.item[1] << e->shift_r;
+ if (le32_to_cpu(gbvalue.value.enumerated_item[1]) !=
+ ucontrol->value.enumerated.item[1]) {
+ change = 1;
+ gbvalue.value.enumerated_item[1] =
+ cpu_to_le32(ucontrol->value.enumerated.item[1]);
+ }
+ }
+
+ if (change) {
+ ret = gb_pm_runtime_get_sync(bundle);
+ if (ret)
+ return ret;
+
+ ret = gb_audio_gb_set_control(module->mgmt_connection, ctl_id,
+ GB_AUDIO_INVALID_INDEX, &gbvalue);
+
+ gb_pm_runtime_put_autosuspend(bundle);
+
+ if (ret) {
+ dev_err_ratelimited(codec_dev,
+ "%d:Error in %s for %s\n", ret,
+ __func__, kcontrol->id.name);
+ }
+ for (wi = 0; wi < wlist->num_widgets; wi++) {
+ widget = wlist->widgets[wi];
+ snd_soc_dapm_mux_update_power(widget->dapm, kcontrol,
+ val, e, NULL);
+ }
+ }
+
+ return change;
+}
+
+static int gbaudio_tplg_create_enum_ctl(struct gbaudio_module_info *gb,
+ struct snd_kcontrol_new *kctl,
+ struct gb_audio_control *ctl)
+{
+ struct soc_enum *gbe;
+ struct gb_audio_enumerated *gb_enum;
+ int i;
+
+ gbe = devm_kzalloc(gb->dev, sizeof(*gbe), GFP_KERNEL);
+ if (!gbe)
+ return -ENOMEM;
+
+ gb_enum = &ctl->info.value.enumerated;
+
+ /* since count=1, and reg is dummy */
+ gbe->items = le32_to_cpu(gb_enum->items);
+ gbe->texts = gb_generate_enum_strings(gb, gb_enum);
+ if (!gbe->texts)
+ return -ENOMEM;
+
+ /* debug enum info */
+ dev_dbg(gb->dev, "Max:%d, name_length:%d\n", gbe->items,
+ le16_to_cpu(gb_enum->names_length));
+ for (i = 0; i < gbe->items; i++)
+ dev_dbg(gb->dev, "src[%d]: %s\n", i, gbe->texts[i]);
+
+ *kctl = (struct snd_kcontrol_new)
+ SOC_DAPM_ENUM_EXT(ctl->name, *gbe, gbcodec_enum_dapm_ctl_get,
+ gbcodec_enum_dapm_ctl_put);
+ return 0;
+}
+
+static int gbaudio_tplg_create_mixer_ctl(struct gbaudio_module_info *gb,
+ struct snd_kcontrol_new *kctl,
+ struct gb_audio_control *ctl)
+{
+ struct gbaudio_ctl_pvt *ctldata;
+
+ ctldata = devm_kzalloc(gb->dev, sizeof(struct gbaudio_ctl_pvt),
+ GFP_KERNEL);
+ if (!ctldata)
+ return -ENOMEM;
+ ctldata->ctl_id = ctl->id;
+ ctldata->data_cport = le16_to_cpu(ctl->data_cport);
+ ctldata->access = le32_to_cpu(ctl->access);
+ ctldata->vcount = ctl->count_values;
+ ctldata->info = &ctl->info;
+ *kctl = (struct snd_kcontrol_new)
+ SOC_DAPM_MIXER_GB(ctl->name, ctl->count, ctldata);
+
+ return 0;
+}
+
+static int gbaudio_tplg_create_wcontrol(struct gbaudio_module_info *gb,
+ struct snd_kcontrol_new *kctl,
+ struct gb_audio_control *ctl)
+{
+ int ret;
+
+ switch (ctl->iface) {
+ case (__force int)SNDRV_CTL_ELEM_IFACE_MIXER:
+ switch (ctl->info.type) {
+ case GB_AUDIO_CTL_ELEM_TYPE_ENUMERATED:
+ ret = gbaudio_tplg_create_enum_ctl(gb, kctl, ctl);
+ break;
+ default:
+ ret = gbaudio_tplg_create_mixer_ctl(gb, kctl, ctl);
+ break;
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ dev_dbg(gb->dev, "%s:%d DAPM control created, ret:%d\n", ctl->name,
+ ctl->id, ret);
+ return ret;
+}
+
+static int gbaudio_widget_event(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol, int event)
+{
+ int wid;
+ int ret;
+ struct device *codec_dev = snd_soc_dapm_to_dev(w->dapm);
+ struct gbaudio_codec_info *gbcodec = dev_get_drvdata(codec_dev);
+ struct gbaudio_module_info *module;
+ struct gb_bundle *bundle;
+
+ dev_dbg(codec_dev, "%s %s %d\n", __func__, w->name, event);
+
+ /* Find relevant module */
+ module = find_gb_module(gbcodec, w->name);
+ if (!module)
+ return -EINVAL;
+
+ /* map name to widget id */
+ wid = gbaudio_map_widgetname(module, w->name);
+ if (wid < 0) {
+ dev_err(codec_dev, "Invalid widget name:%s\n", w->name);
+ return -EINVAL;
+ }
+
+ bundle = to_gb_bundle(module->dev);
+
+ ret = gb_pm_runtime_get_sync(bundle);
+ if (ret)
+ return ret;
+
+ switch (event) {
+ case SND_SOC_DAPM_PRE_PMU:
+ ret = gb_audio_gb_enable_widget(module->mgmt_connection, wid);
+ if (!ret)
+ ret = gbaudio_module_update(gbcodec, w, module, 1);
+ break;
+ case SND_SOC_DAPM_POST_PMD:
+ ret = gb_audio_gb_disable_widget(module->mgmt_connection, wid);
+ if (!ret)
+ ret = gbaudio_module_update(gbcodec, w, module, 0);
+ break;
+ }
+ if (ret)
+ dev_err_ratelimited(codec_dev,
+ "%d: widget, event:%d failed:%d\n", wid,
+ event, ret);
+
+ gb_pm_runtime_put_autosuspend(bundle);
+
+ return ret;
+}
+
+static const struct snd_soc_dapm_widget gbaudio_widgets[] = {
+ [snd_soc_dapm_spk] = SND_SOC_DAPM_SPK(NULL, gbcodec_event_spk),
+ [snd_soc_dapm_hp] = SND_SOC_DAPM_HP(NULL, gbcodec_event_hp),
+ [snd_soc_dapm_mic] = SND_SOC_DAPM_MIC(NULL, gbcodec_event_int_mic),
+ [snd_soc_dapm_output] = SND_SOC_DAPM_OUTPUT(NULL),
+ [snd_soc_dapm_input] = SND_SOC_DAPM_INPUT(NULL),
+ [snd_soc_dapm_switch] = SND_SOC_DAPM_SWITCH_E(NULL, SND_SOC_NOPM,
+ 0, 0, NULL,
+ gbaudio_widget_event,
+ SND_SOC_DAPM_PRE_PMU |
+ SND_SOC_DAPM_POST_PMD),
+ [snd_soc_dapm_pga] = SND_SOC_DAPM_PGA_E(NULL, SND_SOC_NOPM,
+ 0, 0, NULL, 0,
+ gbaudio_widget_event,
+ SND_SOC_DAPM_PRE_PMU |
+ SND_SOC_DAPM_POST_PMD),
+ [snd_soc_dapm_mixer] = SND_SOC_DAPM_MIXER_E(NULL, SND_SOC_NOPM,
+ 0, 0, NULL, 0,
+ gbaudio_widget_event,
+ SND_SOC_DAPM_PRE_PMU |
+ SND_SOC_DAPM_POST_PMD),
+ [snd_soc_dapm_mux] = SND_SOC_DAPM_MUX_E(NULL, SND_SOC_NOPM,
+ 0, 0, NULL,
+ gbaudio_widget_event,
+ SND_SOC_DAPM_PRE_PMU |
+ SND_SOC_DAPM_POST_PMD),
+ [snd_soc_dapm_aif_in] = SND_SOC_DAPM_AIF_IN_E(NULL, NULL, 0,
+ SND_SOC_NOPM, 0, 0,
+ gbaudio_widget_event,
+ SND_SOC_DAPM_PRE_PMU |
+ SND_SOC_DAPM_POST_PMD),
+ [snd_soc_dapm_aif_out] = SND_SOC_DAPM_AIF_OUT_E(NULL, NULL, 0,
+ SND_SOC_NOPM, 0, 0,
+ gbaudio_widget_event,
+ SND_SOC_DAPM_PRE_PMU |
+ SND_SOC_DAPM_POST_PMD),
+};
+
+static int gbaudio_tplg_create_widget(struct gbaudio_module_info *module,
+ struct snd_soc_dapm_widget *dw,
+ struct gb_audio_widget *w, int *w_size)
+{
+ int i, ret, csize;
+ struct snd_kcontrol_new *widget_kctls;
+ struct gb_audio_control *curr;
+ struct gbaudio_control *control, *_control;
+ size_t size;
+ char temp_name[NAME_SIZE];
+
+ ret = gbaudio_validate_kcontrol_count(w);
+ if (ret) {
+ dev_err(module->dev, "Invalid kcontrol count=%d for %s\n",
+ w->ncontrols, w->name);
+ return ret;
+ }
+
+ /* allocate memory for kcontrol */
+ if (w->ncontrols) {
+ size = sizeof(struct snd_kcontrol_new) * w->ncontrols;
+ widget_kctls = devm_kzalloc(module->dev, size, GFP_KERNEL);
+ if (!widget_kctls)
+ return -ENOMEM;
+ }
+
+ *w_size = sizeof(struct gb_audio_widget);
+
+ /* create relevant kcontrols */
+ curr = w->ctl;
+ for (i = 0; i < w->ncontrols; i++) {
+ ret = gbaudio_tplg_create_wcontrol(module, &widget_kctls[i],
+ curr);
+ if (ret) {
+ dev_err(module->dev,
+ "%s:%d type widget_ctl not supported\n",
+ curr->name, curr->iface);
+ goto error;
+ }
+ control = devm_kzalloc(module->dev,
+ sizeof(struct gbaudio_control),
+ GFP_KERNEL);
+ if (!control) {
+ ret = -ENOMEM;
+ goto error;
+ }
+ control->id = curr->id;
+ control->name = curr->name;
+ control->wname = w->name;
+
+ if (curr->info.type == GB_AUDIO_CTL_ELEM_TYPE_ENUMERATED) {
+ struct gb_audio_enumerated *gbenum =
+ &curr->info.value.enumerated;
+
+ csize = offsetof(struct gb_audio_control, info);
+ csize += offsetof(struct gb_audio_ctl_elem_info, value);
+ csize += offsetof(struct gb_audio_enumerated, names);
+ csize += le16_to_cpu(gbenum->names_length);
+ control->texts = (const char * const *)
+ gb_generate_enum_strings(module, gbenum);
+ if (!control->texts) {
+ ret = -ENOMEM;
+ goto error;
+ }
+ control->items = le32_to_cpu(gbenum->items);
+ } else {
+ csize = sizeof(struct gb_audio_control);
+ }
+
+ *w_size += csize;
+ curr = (void *)curr + csize;
+ list_add(&control->list, &module->widget_ctl_list);
+ dev_dbg(module->dev, "%s: control of type %d created\n",
+ widget_kctls[i].name, widget_kctls[i].iface);
+ }
+
+ /* Prefix dev_id to widget control_name */
+ strscpy(temp_name, w->name, sizeof(temp_name));
+ snprintf(w->name, sizeof(w->name), "GB %d %s", module->dev_id, temp_name);
+
+ switch (w->type) {
+ case snd_soc_dapm_spk:
+ *dw = gbaudio_widgets[w->type];
+ module->op_devices |= GBAUDIO_DEVICE_OUT_SPEAKER;
+ break;
+ case snd_soc_dapm_hp:
+ *dw = gbaudio_widgets[w->type];
+ module->op_devices |= (GBAUDIO_DEVICE_OUT_WIRED_HEADSET
+ | GBAUDIO_DEVICE_OUT_WIRED_HEADPHONE);
+ module->ip_devices |= GBAUDIO_DEVICE_IN_WIRED_HEADSET;
+ break;
+ case snd_soc_dapm_mic:
+ *dw = gbaudio_widgets[w->type];
+ module->ip_devices |= GBAUDIO_DEVICE_IN_BUILTIN_MIC;
+ break;
+ case snd_soc_dapm_output:
+ case snd_soc_dapm_input:
+ case snd_soc_dapm_switch:
+ case snd_soc_dapm_pga:
+ case snd_soc_dapm_mixer:
+ case snd_soc_dapm_mux:
+ *dw = gbaudio_widgets[w->type];
+ break;
+ case snd_soc_dapm_aif_in:
+ case snd_soc_dapm_aif_out:
+ *dw = gbaudio_widgets[w->type];
+ dw->sname = w->sname;
+ break;
+ default:
+ ret = -EINVAL;
+ goto error;
+ }
+ dw->name = w->name;
+
+ dev_dbg(module->dev, "%s: widget of type %d created\n", dw->name,
+ dw->id);
+ return 0;
+error:
+ list_for_each_entry_safe(control, _control, &module->widget_ctl_list,
+ list) {
+ list_del(&control->list);
+ devm_kfree(module->dev, control);
+ }
+ return ret;
+}
+
+static int gbaudio_tplg_process_kcontrols(struct gbaudio_module_info *module,
+ struct gb_audio_control *controls)
+{
+ int i, csize, ret;
+ struct snd_kcontrol_new *dapm_kctls;
+ struct gb_audio_control *curr;
+ struct gbaudio_control *control, *_control;
+ size_t size;
+ char temp_name[NAME_SIZE];
+
+ size = sizeof(struct snd_kcontrol_new) * module->num_controls;
+ dapm_kctls = devm_kzalloc(module->dev, size, GFP_KERNEL);
+ if (!dapm_kctls)
+ return -ENOMEM;
+
+ curr = controls;
+ for (i = 0; i < module->num_controls; i++) {
+ ret = gbaudio_tplg_create_kcontrol(module, &dapm_kctls[i],
+ curr);
+ if (ret) {
+ dev_err(module->dev, "%s:%d type not supported\n",
+ curr->name, curr->iface);
+ goto error;
+ }
+ control = devm_kzalloc(module->dev, sizeof(struct
+ gbaudio_control),
+ GFP_KERNEL);
+ if (!control) {
+ ret = -ENOMEM;
+ goto error;
+ }
+ control->id = curr->id;
+ /* Prefix dev_id to widget_name */
+ strscpy(temp_name, curr->name, sizeof(temp_name));
+ snprintf(curr->name, sizeof(curr->name), "GB %d %s", module->dev_id,
+ temp_name);
+ control->name = curr->name;
+ if (curr->info.type == GB_AUDIO_CTL_ELEM_TYPE_ENUMERATED) {
+ struct gb_audio_enumerated *gbenum =
+ &curr->info.value.enumerated;
+
+ csize = offsetof(struct gb_audio_control, info);
+ csize += offsetof(struct gb_audio_ctl_elem_info, value);
+ csize += offsetof(struct gb_audio_enumerated, names);
+ csize += le16_to_cpu(gbenum->names_length);
+ control->texts = (const char * const *)
+ gb_generate_enum_strings(module, gbenum);
+ if (!control->texts) {
+ ret = -ENOMEM;
+ goto error;
+ }
+ control->items = le32_to_cpu(gbenum->items);
+ } else {
+ csize = sizeof(struct gb_audio_control);
+ }
+
+ list_add(&control->list, &module->ctl_list);
+ dev_dbg(module->dev, "%d:%s created of type %d\n", curr->id,
+ curr->name, curr->info.type);
+ curr = (void *)curr + csize;
+ }
+ module->controls = dapm_kctls;
+
+ return 0;
+error:
+ list_for_each_entry_safe(control, _control, &module->ctl_list,
+ list) {
+ list_del(&control->list);
+ devm_kfree(module->dev, control);
+ }
+ devm_kfree(module->dev, dapm_kctls);
+ return ret;
+}
+
+static int gbaudio_tplg_process_widgets(struct gbaudio_module_info *module,
+ struct gb_audio_widget *widgets)
+{
+ int i, ret, w_size;
+ struct snd_soc_dapm_widget *dapm_widgets;
+ struct gb_audio_widget *curr;
+ struct gbaudio_widget *widget, *_widget;
+ size_t size;
+
+ size = sizeof(struct snd_soc_dapm_widget) * module->num_dapm_widgets;
+ dapm_widgets = devm_kzalloc(module->dev, size, GFP_KERNEL);
+ if (!dapm_widgets)
+ return -ENOMEM;
+
+ curr = widgets;
+ for (i = 0; i < module->num_dapm_widgets; i++) {
+ ret = gbaudio_tplg_create_widget(module, &dapm_widgets[i],
+ curr, &w_size);
+ if (ret) {
+ dev_err(module->dev, "%s:%d type not supported\n",
+ curr->name, curr->type);
+ goto error;
+ }
+ widget = devm_kzalloc(module->dev, sizeof(struct
+ gbaudio_widget),
+ GFP_KERNEL);
+ if (!widget) {
+ ret = -ENOMEM;
+ goto error;
+ }
+ widget->id = curr->id;
+ widget->name = curr->name;
+ list_add(&widget->list, &module->widget_list);
+ curr = (void *)curr + w_size;
+ }
+ module->dapm_widgets = dapm_widgets;
+
+ return 0;
+
+error:
+ list_for_each_entry_safe(widget, _widget, &module->widget_list,
+ list) {
+ list_del(&widget->list);
+ devm_kfree(module->dev, widget);
+ }
+ devm_kfree(module->dev, dapm_widgets);
+ return ret;
+}
+
+static int gbaudio_tplg_process_routes(struct gbaudio_module_info *module,
+ struct gb_audio_route *routes)
+{
+ int i, ret;
+ struct snd_soc_dapm_route *dapm_routes;
+ struct gb_audio_route *curr;
+ size_t size;
+
+ size = sizeof(struct snd_soc_dapm_route) * module->num_dapm_routes;
+ dapm_routes = devm_kzalloc(module->dev, size, GFP_KERNEL);
+ if (!dapm_routes)
+ return -ENOMEM;
+
+ module->dapm_routes = dapm_routes;
+ curr = routes;
+
+ for (i = 0; i < module->num_dapm_routes; i++) {
+ dapm_routes->sink =
+ gbaudio_map_widgetid(module, curr->destination_id);
+ if (!dapm_routes->sink) {
+ dev_err(module->dev, "%d:%d:%d:%d - Invalid sink\n",
+ curr->source_id, curr->destination_id,
+ curr->control_id, curr->index);
+ ret = -EINVAL;
+ goto error;
+ }
+ dapm_routes->source =
+ gbaudio_map_widgetid(module, curr->source_id);
+ if (!dapm_routes->source) {
+ dev_err(module->dev, "%d:%d:%d:%d - Invalid source\n",
+ curr->source_id, curr->destination_id,
+ curr->control_id, curr->index);
+ ret = -EINVAL;
+ goto error;
+ }
+ dapm_routes->control =
+ gbaudio_map_controlid(module,
+ curr->control_id,
+ curr->index);
+ if ((curr->control_id != GBAUDIO_INVALID_ID) &&
+ !dapm_routes->control) {
+ dev_err(module->dev, "%d:%d:%d:%d - Invalid control\n",
+ curr->source_id, curr->destination_id,
+ curr->control_id, curr->index);
+ ret = -EINVAL;
+ goto error;
+ }
+ dev_dbg(module->dev, "Route {%s, %s, %s}\n", dapm_routes->sink,
+ (dapm_routes->control) ? dapm_routes->control : "NULL",
+ dapm_routes->source);
+ dapm_routes++;
+ curr++;
+ }
+
+ return 0;
+
+error:
+ devm_kfree(module->dev, module->dapm_routes);
+ return ret;
+}
+
+static int gbaudio_tplg_process_header(struct gbaudio_module_info *module,
+ struct gb_audio_topology *tplg_data)
+{
+ /* fetch no. of kcontrols, widgets & routes */
+ module->num_controls = tplg_data->num_controls;
+ module->num_dapm_widgets = tplg_data->num_widgets;
+ module->num_dapm_routes = tplg_data->num_routes;
+
+ /* update block offset */
+ module->dai_offset = (unsigned long)&tplg_data->data;
+ module->control_offset = module->dai_offset +
+ le32_to_cpu(tplg_data->size_dais);
+ module->widget_offset = module->control_offset +
+ le32_to_cpu(tplg_data->size_controls);
+ module->route_offset = module->widget_offset +
+ le32_to_cpu(tplg_data->size_widgets);
+
+ dev_dbg(module->dev, "DAI offset is 0x%lx\n", module->dai_offset);
+ dev_dbg(module->dev, "control offset is %lx\n",
+ module->control_offset);
+ dev_dbg(module->dev, "widget offset is %lx\n", module->widget_offset);
+ dev_dbg(module->dev, "route offset is %lx\n", module->route_offset);
+
+ return 0;
+}
+
+int gbaudio_tplg_parse_data(struct gbaudio_module_info *module,
+ struct gb_audio_topology *tplg_data)
+{
+ int ret;
+ struct gb_audio_control *controls;
+ struct gb_audio_widget *widgets;
+ struct gb_audio_route *routes;
+ unsigned int jack_type;
+
+ if (!tplg_data)
+ return -EINVAL;
+
+ ret = gbaudio_tplg_process_header(module, tplg_data);
+ if (ret) {
+ dev_err(module->dev, "%d: Error in parsing topology header\n",
+ ret);
+ return ret;
+ }
+
+ /* process control */
+ controls = (struct gb_audio_control *)module->control_offset;
+ ret = gbaudio_tplg_process_kcontrols(module, controls);
+ if (ret) {
+ dev_err(module->dev,
+ "%d: Error in parsing controls data\n", ret);
+ return ret;
+ }
+ dev_dbg(module->dev, "Control parsing finished\n");
+
+ /* process widgets */
+ widgets = (struct gb_audio_widget *)module->widget_offset;
+ ret = gbaudio_tplg_process_widgets(module, widgets);
+ if (ret) {
+ dev_err(module->dev,
+ "%d: Error in parsing widgets data\n", ret);
+ return ret;
+ }
+ dev_dbg(module->dev, "Widget parsing finished\n");
+
+ /* process route */
+ routes = (struct gb_audio_route *)module->route_offset;
+ ret = gbaudio_tplg_process_routes(module, routes);
+ if (ret) {
+ dev_err(module->dev,
+ "%d: Error in parsing routes data\n", ret);
+ return ret;
+ }
+ dev_dbg(module->dev, "Route parsing finished\n");
+
+ /* parse jack capabilities */
+ jack_type = le32_to_cpu(tplg_data->jack_type);
+ if (jack_type) {
+ module->jack_mask = jack_type & GBCODEC_JACK_MASK;
+ module->button_mask = jack_type & GBCODEC_JACK_BUTTON_MASK;
+ }
+
+ return ret;
+}
+
+void gbaudio_tplg_release(struct gbaudio_module_info *module)
+{
+ struct gbaudio_control *control, *_control;
+ struct gbaudio_widget *widget, *_widget;
+
+ if (!module->topology)
+ return;
+
+ /* release kcontrols */
+ list_for_each_entry_safe(control, _control, &module->ctl_list,
+ list) {
+ list_del(&control->list);
+ devm_kfree(module->dev, control);
+ }
+ if (module->controls)
+ devm_kfree(module->dev, module->controls);
+
+ /* release widget controls */
+ list_for_each_entry_safe(control, _control, &module->widget_ctl_list,
+ list) {
+ list_del(&control->list);
+ devm_kfree(module->dev, control);
+ }
+
+ /* release widgets */
+ list_for_each_entry_safe(widget, _widget, &module->widget_list,
+ list) {
+ list_del(&widget->list);
+ devm_kfree(module->dev, widget);
+ }
+ if (module->dapm_widgets)
+ devm_kfree(module->dev, module->dapm_widgets);
+
+ /* release routes */
+ if (module->dapm_routes)
+ devm_kfree(module->dev, module->dapm_routes);
+}
diff --git a/drivers/staging/greybus/authentication.c b/drivers/staging/greybus/authentication.c
new file mode 100644
index 000000000000..d53e58f92e81
--- /dev/null
+++ b/drivers/staging/greybus/authentication.c
@@ -0,0 +1,429 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Greybus Component Authentication Protocol (CAP) Driver.
+ *
+ * Copyright 2016 Google Inc.
+ * Copyright 2016 Linaro Ltd.
+ */
+
+#include <linux/greybus.h>
+#include <linux/cdev.h>
+#include <linux/fs.h>
+#include <linux/ioctl.h>
+#include <linux/uaccess.h>
+
+#include "greybus_authentication.h"
+#include "firmware.h"
+
+#define CAP_TIMEOUT_MS 1000
+
+/*
+ * Number of minor devices this driver supports.
+ * There will be exactly one required per Interface.
+ */
+#define NUM_MINORS U8_MAX
+
+struct gb_cap {
+ struct device *parent;
+ struct gb_connection *connection;
+ struct kref kref;
+ struct list_head node;
+ bool disabled; /* connection getting disabled */
+
+ struct mutex mutex;
+ struct cdev cdev;
+ struct device *class_device;
+ dev_t dev_num;
+};
+
+static const struct class cap_class = {
+ .name = "gb_authenticate",
+};
+
+static dev_t cap_dev_num;
+static DEFINE_IDA(cap_minors_map);
+static LIST_HEAD(cap_list);
+static DEFINE_MUTEX(list_mutex);
+
+static void cap_kref_release(struct kref *kref)
+{
+ struct gb_cap *cap = container_of(kref, struct gb_cap, kref);
+
+ kfree(cap);
+}
+
+/*
+ * All users of cap take a reference (from within list_mutex lock), before
+ * they get a pointer to play with. And the structure will be freed only after
+ * the last user has put the reference to it.
+ */
+static void put_cap(struct gb_cap *cap)
+{
+ kref_put(&cap->kref, cap_kref_release);
+}
+
+/* Caller must call put_cap() after using struct gb_cap */
+static struct gb_cap *get_cap(struct cdev *cdev)
+{
+ struct gb_cap *cap;
+
+ mutex_lock(&list_mutex);
+
+ list_for_each_entry(cap, &cap_list, node) {
+ if (&cap->cdev == cdev) {
+ kref_get(&cap->kref);
+ goto unlock;
+ }
+ }
+
+ cap = NULL;
+
+unlock:
+ mutex_unlock(&list_mutex);
+
+ return cap;
+}
+
+static int cap_get_endpoint_uid(struct gb_cap *cap, u8 *euid)
+{
+ struct gb_connection *connection = cap->connection;
+ struct gb_cap_get_endpoint_uid_response response;
+ int ret;
+
+ ret = gb_operation_sync(connection, GB_CAP_TYPE_GET_ENDPOINT_UID, NULL,
+ 0, &response, sizeof(response));
+ if (ret) {
+ dev_err(cap->parent, "failed to get endpoint uid (%d)\n", ret);
+ return ret;
+ }
+
+ memcpy(euid, response.uid, sizeof(response.uid));
+
+ return 0;
+}
+
+static int cap_get_ims_certificate(struct gb_cap *cap, u32 class, u32 id,
+ u8 *certificate, u32 *size, u8 *result)
+{
+ struct gb_connection *connection = cap->connection;
+ struct gb_cap_get_ims_certificate_request *request;
+ struct gb_cap_get_ims_certificate_response *response;
+ size_t max_size = gb_operation_get_payload_size_max(connection);
+ struct gb_operation *op;
+ int ret;
+
+ op = gb_operation_create_flags(connection,
+ GB_CAP_TYPE_GET_IMS_CERTIFICATE,
+ sizeof(*request), max_size,
+ GB_OPERATION_FLAG_SHORT_RESPONSE,
+ GFP_KERNEL);
+ if (!op)
+ return -ENOMEM;
+
+ request = op->request->payload;
+ request->certificate_class = cpu_to_le32(class);
+ request->certificate_id = cpu_to_le32(id);
+
+ ret = gb_operation_request_send_sync(op);
+ if (ret) {
+ dev_err(cap->parent, "failed to get certificate (%d)\n", ret);
+ goto done;
+ }
+
+ response = op->response->payload;
+ *result = response->result_code;
+ *size = op->response->payload_size - sizeof(*response);
+ memcpy(certificate, response->certificate, *size);
+
+done:
+ gb_operation_put(op);
+ return ret;
+}
+
+static int cap_authenticate(struct gb_cap *cap, u32 auth_type, u8 *uid,
+ u8 *challenge, u8 *result, u8 *auth_response,
+ u32 *signature_size, u8 *signature)
+{
+ struct gb_connection *connection = cap->connection;
+ struct gb_cap_authenticate_request *request;
+ struct gb_cap_authenticate_response *response;
+ size_t max_size = gb_operation_get_payload_size_max(connection);
+ struct gb_operation *op;
+ int ret;
+
+ op = gb_operation_create_flags(connection, GB_CAP_TYPE_AUTHENTICATE,
+ sizeof(*request), max_size,
+ GB_OPERATION_FLAG_SHORT_RESPONSE,
+ GFP_KERNEL);
+ if (!op)
+ return -ENOMEM;
+
+ request = op->request->payload;
+ request->auth_type = cpu_to_le32(auth_type);
+ memcpy(request->uid, uid, sizeof(request->uid));
+ memcpy(request->challenge, challenge, sizeof(request->challenge));
+
+ ret = gb_operation_request_send_sync(op);
+ if (ret) {
+ dev_err(cap->parent, "failed to authenticate (%d)\n", ret);
+ goto done;
+ }
+
+ response = op->response->payload;
+ *result = response->result_code;
+ *signature_size = op->response->payload_size - sizeof(*response);
+ memcpy(auth_response, response->response, sizeof(response->response));
+ memcpy(signature, response->signature, *signature_size);
+
+done:
+ gb_operation_put(op);
+ return ret;
+}
+
+/* Char device fops */
+
+static int cap_open(struct inode *inode, struct file *file)
+{
+ struct gb_cap *cap = get_cap(inode->i_cdev);
+
+ /* cap structure can't get freed until file descriptor is closed */
+ if (cap) {
+ file->private_data = cap;
+ return 0;
+ }
+
+ return -ENODEV;
+}
+
+static int cap_release(struct inode *inode, struct file *file)
+{
+ struct gb_cap *cap = file->private_data;
+
+ put_cap(cap);
+ return 0;
+}
+
+static int cap_ioctl(struct gb_cap *cap, unsigned int cmd,
+ void __user *buf)
+{
+ struct cap_ioc_get_endpoint_uid endpoint_uid;
+ struct cap_ioc_get_ims_certificate *ims_cert;
+ struct cap_ioc_authenticate *authenticate;
+ size_t size;
+ int ret;
+
+ switch (cmd) {
+ case CAP_IOC_GET_ENDPOINT_UID:
+ ret = cap_get_endpoint_uid(cap, endpoint_uid.uid);
+ if (ret)
+ return ret;
+
+ if (copy_to_user(buf, &endpoint_uid, sizeof(endpoint_uid)))
+ return -EFAULT;
+
+ return 0;
+ case CAP_IOC_GET_IMS_CERTIFICATE:
+ size = sizeof(*ims_cert);
+ ims_cert = memdup_user(buf, size);
+ if (IS_ERR(ims_cert))
+ return PTR_ERR(ims_cert);
+
+ ret = cap_get_ims_certificate(cap, ims_cert->certificate_class,
+ ims_cert->certificate_id,
+ ims_cert->certificate,
+ &ims_cert->cert_size,
+ &ims_cert->result_code);
+ if (!ret && copy_to_user(buf, ims_cert, size))
+ ret = -EFAULT;
+ kfree(ims_cert);
+
+ return ret;
+ case CAP_IOC_AUTHENTICATE:
+ size = sizeof(*authenticate);
+ authenticate = memdup_user(buf, size);
+ if (IS_ERR(authenticate))
+ return PTR_ERR(authenticate);
+
+ ret = cap_authenticate(cap, authenticate->auth_type,
+ authenticate->uid,
+ authenticate->challenge,
+ &authenticate->result_code,
+ authenticate->response,
+ &authenticate->signature_size,
+ authenticate->signature);
+ if (!ret && copy_to_user(buf, authenticate, size))
+ ret = -EFAULT;
+ kfree(authenticate);
+
+ return ret;
+ default:
+ return -ENOTTY;
+ }
+}
+
+static long cap_ioctl_unlocked(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ struct gb_cap *cap = file->private_data;
+ struct gb_bundle *bundle = cap->connection->bundle;
+ int ret = -ENODEV;
+
+ /*
+ * Serialize ioctls.
+ *
+ * We don't want the user to do multiple authentication operations in
+ * parallel.
+ *
+ * This is also used to protect ->disabled, which is used to check if
+ * the connection is getting disconnected, so that we don't start any
+ * new operations.
+ */
+ mutex_lock(&cap->mutex);
+ if (!cap->disabled) {
+ ret = gb_pm_runtime_get_sync(bundle);
+ if (!ret) {
+ ret = cap_ioctl(cap, cmd, (void __user *)arg);
+ gb_pm_runtime_put_autosuspend(bundle);
+ }
+ }
+ mutex_unlock(&cap->mutex);
+
+ return ret;
+}
+
+static const struct file_operations cap_fops = {
+ .owner = THIS_MODULE,
+ .open = cap_open,
+ .release = cap_release,
+ .unlocked_ioctl = cap_ioctl_unlocked,
+};
+
+int gb_cap_connection_init(struct gb_connection *connection)
+{
+ struct gb_cap *cap;
+ int ret, minor;
+
+ if (!connection)
+ return 0;
+
+ cap = kzalloc(sizeof(*cap), GFP_KERNEL);
+ if (!cap)
+ return -ENOMEM;
+
+ cap->parent = &connection->bundle->dev;
+ cap->connection = connection;
+ mutex_init(&cap->mutex);
+ gb_connection_set_data(connection, cap);
+ kref_init(&cap->kref);
+
+ mutex_lock(&list_mutex);
+ list_add(&cap->node, &cap_list);
+ mutex_unlock(&list_mutex);
+
+ ret = gb_connection_enable(connection);
+ if (ret)
+ goto err_list_del;
+
+ minor = ida_alloc_max(&cap_minors_map, NUM_MINORS - 1, GFP_KERNEL);
+ if (minor < 0) {
+ ret = minor;
+ goto err_connection_disable;
+ }
+
+ /* Add a char device to allow userspace to interact with cap */
+ cap->dev_num = MKDEV(MAJOR(cap_dev_num), minor);
+ cdev_init(&cap->cdev, &cap_fops);
+
+ ret = cdev_add(&cap->cdev, cap->dev_num, 1);
+ if (ret)
+ goto err_remove_ida;
+
+ /* Add a soft link to the previously added char-dev within the bundle */
+ cap->class_device = device_create(&cap_class, cap->parent, cap->dev_num,
+ NULL, "gb-authenticate-%d", minor);
+ if (IS_ERR(cap->class_device)) {
+ ret = PTR_ERR(cap->class_device);
+ goto err_del_cdev;
+ }
+
+ return 0;
+
+err_del_cdev:
+ cdev_del(&cap->cdev);
+err_remove_ida:
+ ida_free(&cap_minors_map, minor);
+err_connection_disable:
+ gb_connection_disable(connection);
+err_list_del:
+ mutex_lock(&list_mutex);
+ list_del(&cap->node);
+ mutex_unlock(&list_mutex);
+
+ put_cap(cap);
+
+ return ret;
+}
+
+void gb_cap_connection_exit(struct gb_connection *connection)
+{
+ struct gb_cap *cap;
+
+ if (!connection)
+ return;
+
+ cap = gb_connection_get_data(connection);
+
+ device_destroy(&cap_class, cap->dev_num);
+ cdev_del(&cap->cdev);
+ ida_free(&cap_minors_map, MINOR(cap->dev_num));
+
+ /*
+ * Disallow any new ioctl operations on the char device and wait for
+ * existing ones to finish.
+ */
+ mutex_lock(&cap->mutex);
+ cap->disabled = true;
+ mutex_unlock(&cap->mutex);
+
+ /* All pending greybus operations should have finished by now */
+ gb_connection_disable(cap->connection);
+
+ /* Disallow new users to get access to the cap structure */
+ mutex_lock(&list_mutex);
+ list_del(&cap->node);
+ mutex_unlock(&list_mutex);
+
+ /*
+ * All current users of cap would have taken a reference to it by
+ * now, we can drop our reference and wait the last user will get
+ * cap freed.
+ */
+ put_cap(cap);
+}
+
+int cap_init(void)
+{
+ int ret;
+
+ ret = class_register(&cap_class);
+ if (ret)
+ return ret;
+
+ ret = alloc_chrdev_region(&cap_dev_num, 0, NUM_MINORS,
+ "gb_authenticate");
+ if (ret)
+ goto err_remove_class;
+
+ return 0;
+
+err_remove_class:
+ class_unregister(&cap_class);
+ return ret;
+}
+
+void cap_exit(void)
+{
+ unregister_chrdev_region(cap_dev_num, NUM_MINORS);
+ class_unregister(&cap_class);
+ ida_destroy(&cap_minors_map);
+}
diff --git a/drivers/staging/greybus/bootrom.c b/drivers/staging/greybus/bootrom.c
new file mode 100644
index 000000000000..d4d86b3898de
--- /dev/null
+++ b/drivers/staging/greybus/bootrom.c
@@ -0,0 +1,526 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * BOOTROM Greybus driver.
+ *
+ * Copyright 2016 Google Inc.
+ * Copyright 2016 Linaro Ltd.
+ */
+
+#include <linux/firmware.h>
+#include <linux/jiffies.h>
+#include <linux/mutex.h>
+#include <linux/workqueue.h>
+#include <linux/greybus.h>
+
+#include "firmware.h"
+
+/* Timeout, in jiffies, within which the next request must be received */
+#define NEXT_REQ_TIMEOUT_MS 1000
+
+/*
+ * FIXME: Reduce this timeout once svc core handles parallel processing of
+ * events from the SVC, which are handled sequentially today.
+ */
+#define MODE_SWITCH_TIMEOUT_MS 10000
+
+enum next_request_type {
+ NEXT_REQ_FIRMWARE_SIZE,
+ NEXT_REQ_GET_FIRMWARE,
+ NEXT_REQ_READY_TO_BOOT,
+ NEXT_REQ_MODE_SWITCH,
+};
+
+struct gb_bootrom {
+ struct gb_connection *connection;
+ const struct firmware *fw;
+ u8 protocol_major;
+ u8 protocol_minor;
+ enum next_request_type next_request;
+ struct delayed_work dwork;
+ struct mutex mutex; /* Protects bootrom->fw */
+};
+
+static void free_firmware(struct gb_bootrom *bootrom)
+{
+ if (!bootrom->fw)
+ return;
+
+ release_firmware(bootrom->fw);
+ bootrom->fw = NULL;
+}
+
+static void gb_bootrom_timedout(struct work_struct *work)
+{
+ struct delayed_work *dwork = to_delayed_work(work);
+ struct gb_bootrom *bootrom = container_of(dwork,
+ struct gb_bootrom, dwork);
+ struct device *dev = &bootrom->connection->bundle->dev;
+ const char *reason;
+
+ switch (bootrom->next_request) {
+ case NEXT_REQ_FIRMWARE_SIZE:
+ reason = "Firmware Size Request";
+ break;
+ case NEXT_REQ_GET_FIRMWARE:
+ reason = "Get Firmware Request";
+ break;
+ case NEXT_REQ_READY_TO_BOOT:
+ reason = "Ready to Boot Request";
+ break;
+ case NEXT_REQ_MODE_SWITCH:
+ reason = "Interface Mode Switch";
+ break;
+ default:
+ reason = NULL;
+ dev_err(dev, "Invalid next-request: %u", bootrom->next_request);
+ break;
+ }
+
+ dev_err(dev, "Timed out waiting for %s from the Module\n", reason);
+
+ mutex_lock(&bootrom->mutex);
+ free_firmware(bootrom);
+ mutex_unlock(&bootrom->mutex);
+
+ /* TODO: Power-off Module ? */
+}
+
+static void gb_bootrom_set_timeout(struct gb_bootrom *bootrom,
+ enum next_request_type next,
+ unsigned long timeout)
+{
+ bootrom->next_request = next;
+ schedule_delayed_work(&bootrom->dwork, msecs_to_jiffies(timeout));
+}
+
+static void gb_bootrom_cancel_timeout(struct gb_bootrom *bootrom)
+{
+ cancel_delayed_work_sync(&bootrom->dwork);
+}
+
+/*
+ * The es2 chip doesn't have VID/PID programmed into the hardware and we need to
+ * hack that up to distinguish different modules and their firmware blobs.
+ *
+ * This fetches VID/PID (over bootrom protocol) for es2 chip only, when VID/PID
+ * already sent during hotplug are 0.
+ *
+ * Otherwise, we keep intf->vendor_id/product_id same as what's passed
+ * during hotplug.
+ */
+static void bootrom_es2_fixup_vid_pid(struct gb_bootrom *bootrom)
+{
+ struct gb_bootrom_get_vid_pid_response response;
+ struct gb_connection *connection = bootrom->connection;
+ struct gb_interface *intf = connection->bundle->intf;
+ int ret;
+
+ if (!(intf->quirks & GB_INTERFACE_QUIRK_NO_GMP_IDS))
+ return;
+
+ ret = gb_operation_sync(connection, GB_BOOTROM_TYPE_GET_VID_PID,
+ NULL, 0, &response, sizeof(response));
+ if (ret) {
+ dev_err(&connection->bundle->dev,
+ "Bootrom get vid/pid operation failed (%d)\n", ret);
+ return;
+ }
+
+ /*
+ * NOTE: This is hacked, so that the same values of VID/PID can be used
+ * by next firmware level as well. The uevent for bootrom will still
+ * have VID/PID as 0, though after this point the sysfs files will start
+ * showing the updated values. But yeah, that's a bit racy as the same
+ * sysfs files would be showing 0 before this point.
+ */
+ intf->vendor_id = le32_to_cpu(response.vendor_id);
+ intf->product_id = le32_to_cpu(response.product_id);
+
+ dev_dbg(&connection->bundle->dev, "Bootrom got vid (0x%x)/pid (0x%x)\n",
+ intf->vendor_id, intf->product_id);
+}
+
+/* This returns path of the firmware blob on the disk */
+static int find_firmware(struct gb_bootrom *bootrom, u8 stage)
+{
+ struct gb_connection *connection = bootrom->connection;
+ struct gb_interface *intf = connection->bundle->intf;
+ char firmware_name[49];
+ int rc;
+
+ /* Already have a firmware, free it */
+ free_firmware(bootrom);
+
+ /* Bootrom protocol is only supported for loading Stage 2 firmware */
+ if (stage != 2) {
+ dev_err(&connection->bundle->dev, "Invalid boot stage: %u\n",
+ stage);
+ return -EINVAL;
+ }
+
+ /*
+ * Create firmware name
+ *
+ * XXX Name it properly..
+ */
+ snprintf(firmware_name, sizeof(firmware_name),
+ FW_NAME_PREFIX "%08x_%08x_%08x_%08x_s2l.tftf",
+ intf->ddbl1_manufacturer_id, intf->ddbl1_product_id,
+ intf->vendor_id, intf->product_id);
+
+ // FIXME:
+ // Turn to dev_dbg later after everyone has valid bootloaders with good
+ // ids, but leave this as dev_info for now to make it easier to track
+ // down "empty" vid/pid modules.
+ dev_info(&connection->bundle->dev, "Firmware file '%s' requested\n",
+ firmware_name);
+
+ rc = request_firmware(&bootrom->fw, firmware_name,
+ &connection->bundle->dev);
+ if (rc) {
+ dev_err(&connection->bundle->dev,
+ "failed to find %s firmware (%d)\n", firmware_name, rc);
+ }
+
+ return rc;
+}
+
+static int gb_bootrom_firmware_size_request(struct gb_operation *op)
+{
+ struct gb_bootrom *bootrom = gb_connection_get_data(op->connection);
+ struct gb_bootrom_firmware_size_request *size_request =
+ op->request->payload;
+ struct gb_bootrom_firmware_size_response *size_response;
+ struct device *dev = &op->connection->bundle->dev;
+ int ret;
+
+ /* Disable timeouts */
+ gb_bootrom_cancel_timeout(bootrom);
+
+ if (op->request->payload_size != sizeof(*size_request)) {
+ dev_err(dev, "%s: illegal size of firmware size request (%zu != %zu)\n",
+ __func__, op->request->payload_size,
+ sizeof(*size_request));
+ ret = -EINVAL;
+ goto queue_work;
+ }
+
+ mutex_lock(&bootrom->mutex);
+
+ ret = find_firmware(bootrom, size_request->stage);
+ if (ret)
+ goto unlock;
+
+ if (!gb_operation_response_alloc(op, sizeof(*size_response),
+ GFP_KERNEL)) {
+ dev_err(dev, "%s: error allocating response\n", __func__);
+ free_firmware(bootrom);
+ ret = -ENOMEM;
+ goto unlock;
+ }
+
+ size_response = op->response->payload;
+ size_response->size = cpu_to_le32(bootrom->fw->size);
+
+ dev_dbg(dev, "%s: firmware size %d bytes\n",
+ __func__, size_response->size);
+
+unlock:
+ mutex_unlock(&bootrom->mutex);
+
+queue_work:
+ if (!ret) {
+ /* Refresh timeout */
+ gb_bootrom_set_timeout(bootrom, NEXT_REQ_GET_FIRMWARE,
+ NEXT_REQ_TIMEOUT_MS);
+ }
+
+ return ret;
+}
+
+static int gb_bootrom_get_firmware(struct gb_operation *op)
+{
+ struct gb_bootrom *bootrom = gb_connection_get_data(op->connection);
+ const struct firmware *fw;
+ struct gb_bootrom_get_firmware_request *firmware_request;
+ struct device *dev = &op->connection->bundle->dev;
+ unsigned int offset, size;
+ enum next_request_type next_request;
+ u8 *firmware_response;
+ int ret = 0;
+
+ /* Disable timeouts */
+ gb_bootrom_cancel_timeout(bootrom);
+
+ if (op->request->payload_size != sizeof(*firmware_request)) {
+ dev_err(dev, "%s: Illegal size of get firmware request (%zu %zu)\n",
+ __func__, op->request->payload_size,
+ sizeof(*firmware_request));
+ ret = -EINVAL;
+ goto queue_work;
+ }
+
+ mutex_lock(&bootrom->mutex);
+
+ fw = bootrom->fw;
+ if (!fw) {
+ dev_err(dev, "%s: firmware not available\n", __func__);
+ ret = -EINVAL;
+ goto unlock;
+ }
+
+ firmware_request = op->request->payload;
+ offset = le32_to_cpu(firmware_request->offset);
+ size = le32_to_cpu(firmware_request->size);
+
+ if (offset >= fw->size || size > fw->size - offset) {
+ dev_warn(dev, "bad firmware request (offs = %u, size = %u)\n",
+ offset, size);
+ ret = -EINVAL;
+ goto unlock;
+ }
+
+ /* gb_bootrom_get_firmware_response contains only a byte array */
+ if (!gb_operation_response_alloc(op, size, GFP_KERNEL)) {
+ dev_err(dev, "%s: error allocating response\n", __func__);
+ ret = -ENOMEM;
+ goto unlock;
+ }
+
+ firmware_response = op->response->payload;
+ memcpy(firmware_response, fw->data + offset, size);
+
+ dev_dbg(dev, "responding with firmware (offs = %u, size = %u)\n",
+ offset, size);
+
+unlock:
+ mutex_unlock(&bootrom->mutex);
+
+queue_work:
+ /* Refresh timeout */
+ if (!ret && (offset + size == fw->size))
+ next_request = NEXT_REQ_READY_TO_BOOT;
+ else
+ next_request = NEXT_REQ_GET_FIRMWARE;
+
+ gb_bootrom_set_timeout(bootrom, next_request, NEXT_REQ_TIMEOUT_MS);
+
+ return ret;
+}
+
+static int gb_bootrom_ready_to_boot(struct gb_operation *op)
+{
+ struct gb_connection *connection = op->connection;
+ struct gb_bootrom *bootrom = gb_connection_get_data(connection);
+ struct gb_bootrom_ready_to_boot_request *rtb_request;
+ struct device *dev = &connection->bundle->dev;
+ u8 status;
+ int ret = 0;
+
+ /* Disable timeouts */
+ gb_bootrom_cancel_timeout(bootrom);
+
+ if (op->request->payload_size != sizeof(*rtb_request)) {
+ dev_err(dev, "%s: Illegal size of ready to boot request (%zu %zu)\n",
+ __func__, op->request->payload_size,
+ sizeof(*rtb_request));
+ ret = -EINVAL;
+ goto queue_work;
+ }
+
+ rtb_request = op->request->payload;
+ status = rtb_request->status;
+
+ /* Return error if the blob was invalid */
+ if (status == GB_BOOTROM_BOOT_STATUS_INVALID) {
+ ret = -EINVAL;
+ goto queue_work;
+ }
+
+ /*
+ * XXX Should we return error for insecure firmware?
+ */
+ dev_dbg(dev, "ready to boot: 0x%x, 0\n", status);
+
+queue_work:
+ /*
+ * Refresh timeout, the Interface shall load the new personality and
+ * send a new hotplug request, which shall get rid of the bootrom
+ * connection. As that can take some time, increase the timeout a bit.
+ */
+ gb_bootrom_set_timeout(bootrom, NEXT_REQ_MODE_SWITCH,
+ MODE_SWITCH_TIMEOUT_MS);
+
+ return ret;
+}
+
+static int gb_bootrom_request_handler(struct gb_operation *op)
+{
+ u8 type = op->type;
+
+ switch (type) {
+ case GB_BOOTROM_TYPE_FIRMWARE_SIZE:
+ return gb_bootrom_firmware_size_request(op);
+ case GB_BOOTROM_TYPE_GET_FIRMWARE:
+ return gb_bootrom_get_firmware(op);
+ case GB_BOOTROM_TYPE_READY_TO_BOOT:
+ return gb_bootrom_ready_to_boot(op);
+ default:
+ dev_err(&op->connection->bundle->dev,
+ "unsupported request: %u\n", type);
+ return -EINVAL;
+ }
+}
+
+static int gb_bootrom_get_version(struct gb_bootrom *bootrom)
+{
+ struct gb_bundle *bundle = bootrom->connection->bundle;
+ struct gb_bootrom_version_request request;
+ struct gb_bootrom_version_response response;
+ int ret;
+
+ request.major = GB_BOOTROM_VERSION_MAJOR;
+ request.minor = GB_BOOTROM_VERSION_MINOR;
+
+ ret = gb_operation_sync(bootrom->connection,
+ GB_BOOTROM_TYPE_VERSION,
+ &request, sizeof(request), &response,
+ sizeof(response));
+ if (ret) {
+ dev_err(&bundle->dev,
+ "failed to get protocol version: %d\n",
+ ret);
+ return ret;
+ }
+
+ if (response.major > request.major) {
+ dev_err(&bundle->dev,
+ "unsupported major protocol version (%u > %u)\n",
+ response.major, request.major);
+ return -ENOTSUPP;
+ }
+
+ bootrom->protocol_major = response.major;
+ bootrom->protocol_minor = response.minor;
+
+ dev_dbg(&bundle->dev, "%s - %u.%u\n", __func__, response.major,
+ response.minor);
+
+ return 0;
+}
+
+static int gb_bootrom_probe(struct gb_bundle *bundle,
+ const struct greybus_bundle_id *id)
+{
+ struct greybus_descriptor_cport *cport_desc;
+ struct gb_connection *connection;
+ struct gb_bootrom *bootrom;
+ int ret;
+
+ if (bundle->num_cports != 1)
+ return -ENODEV;
+
+ cport_desc = &bundle->cport_desc[0];
+ if (cport_desc->protocol_id != GREYBUS_PROTOCOL_BOOTROM)
+ return -ENODEV;
+
+ bootrom = kzalloc(sizeof(*bootrom), GFP_KERNEL);
+ if (!bootrom)
+ return -ENOMEM;
+
+ connection = gb_connection_create(bundle,
+ le16_to_cpu(cport_desc->id),
+ gb_bootrom_request_handler);
+ if (IS_ERR(connection)) {
+ ret = PTR_ERR(connection);
+ goto err_free_bootrom;
+ }
+
+ gb_connection_set_data(connection, bootrom);
+
+ bootrom->connection = connection;
+
+ mutex_init(&bootrom->mutex);
+ INIT_DELAYED_WORK(&bootrom->dwork, gb_bootrom_timedout);
+ greybus_set_drvdata(bundle, bootrom);
+
+ ret = gb_connection_enable_tx(connection);
+ if (ret)
+ goto err_connection_destroy;
+
+ ret = gb_bootrom_get_version(bootrom);
+ if (ret)
+ goto err_connection_disable;
+
+ bootrom_es2_fixup_vid_pid(bootrom);
+
+ ret = gb_connection_enable(connection);
+ if (ret)
+ goto err_connection_disable;
+
+ /* Refresh timeout */
+ gb_bootrom_set_timeout(bootrom, NEXT_REQ_FIRMWARE_SIZE,
+ NEXT_REQ_TIMEOUT_MS);
+
+ /* Tell bootrom we're ready. */
+ ret = gb_operation_sync(connection, GB_BOOTROM_TYPE_AP_READY, NULL, 0,
+ NULL, 0);
+ if (ret) {
+ dev_err(&connection->bundle->dev,
+ "failed to send AP READY: %d\n", ret);
+ goto err_cancel_timeout;
+ }
+
+ dev_dbg(&bundle->dev, "AP_READY sent\n");
+
+ return 0;
+
+err_cancel_timeout:
+ gb_bootrom_cancel_timeout(bootrom);
+err_connection_disable:
+ gb_connection_disable(connection);
+err_connection_destroy:
+ gb_connection_destroy(connection);
+err_free_bootrom:
+ kfree(bootrom);
+
+ return ret;
+}
+
+static void gb_bootrom_disconnect(struct gb_bundle *bundle)
+{
+ struct gb_bootrom *bootrom = greybus_get_drvdata(bundle);
+
+ gb_connection_disable(bootrom->connection);
+
+ /* Disable timeouts */
+ gb_bootrom_cancel_timeout(bootrom);
+
+ /*
+ * Release firmware:
+ *
+ * As the connection and the delayed work are already disabled, we don't
+ * need to lock access to bootrom->fw here.
+ */
+ free_firmware(bootrom);
+
+ gb_connection_destroy(bootrom->connection);
+ kfree(bootrom);
+}
+
+static const struct greybus_bundle_id gb_bootrom_id_table[] = {
+ { GREYBUS_DEVICE_CLASS(GREYBUS_CLASS_BOOTROM) },
+ { }
+};
+
+static struct greybus_driver gb_bootrom_driver = {
+ .name = "bootrom",
+ .probe = gb_bootrom_probe,
+ .disconnect = gb_bootrom_disconnect,
+ .id_table = gb_bootrom_id_table,
+};
+
+module_greybus_driver(gb_bootrom_driver);
+
+MODULE_DESCRIPTION("BOOTROM Greybus driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/greybus/camera.c b/drivers/staging/greybus/camera.c
new file mode 100644
index 000000000000..5ac19c0055d9
--- /dev/null
+++ b/drivers/staging/greybus/camera.c
@@ -0,0 +1,1367 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Greybus Camera protocol driver.
+ *
+ * Copyright 2015 Google Inc.
+ * Copyright 2015 Linaro Ltd.
+ */
+
+#include <linux/debugfs.h>
+#include <linux/fs.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/uaccess.h>
+#include <linux/vmalloc.h>
+#include <linux/greybus.h>
+
+#include "gb-camera.h"
+#include "greybus_protocols.h"
+
+enum gb_camera_debugs_buffer_id {
+ GB_CAMERA_DEBUGFS_BUFFER_CAPABILITIES,
+ GB_CAMERA_DEBUGFS_BUFFER_STREAMS,
+ GB_CAMERA_DEBUGFS_BUFFER_CAPTURE,
+ GB_CAMERA_DEBUGFS_BUFFER_FLUSH,
+ GB_CAMERA_DEBUGFS_BUFFER_MAX,
+};
+
+struct gb_camera_debugfs_buffer {
+ char data[PAGE_SIZE];
+ size_t length;
+};
+
+enum gb_camera_state {
+ GB_CAMERA_STATE_UNCONFIGURED,
+ GB_CAMERA_STATE_CONFIGURED,
+};
+
+/**
+ * struct gb_camera - A Greybus Camera Device
+ * @connection: the greybus connection for camera management
+ * @data_connection: the greybus connection for camera data
+ * @data_cport_id: the data CPort ID on the module side
+ * @mutex: protects the connection and state fields
+ * @state: the current module state
+ * @debugfs: debugfs entries for camera protocol operations testing
+ * @module: Greybus camera module registered to HOST processor.
+ */
+struct gb_camera {
+ struct gb_bundle *bundle;
+ struct gb_connection *connection;
+ struct gb_connection *data_connection;
+ u16 data_cport_id;
+
+ struct mutex mutex;
+ enum gb_camera_state state;
+
+ struct {
+ struct dentry *root;
+ struct gb_camera_debugfs_buffer *buffers;
+ } debugfs;
+
+ struct gb_camera_module module;
+};
+
+struct gb_camera_stream_config {
+ unsigned int width;
+ unsigned int height;
+ unsigned int format;
+ unsigned int vc;
+ unsigned int dt[2];
+ unsigned int max_size;
+};
+
+struct gb_camera_fmt_info {
+ enum v4l2_mbus_pixelcode mbus_code;
+ unsigned int gb_format;
+ unsigned int bpp;
+};
+
+/* GB format to media code map */
+static const struct gb_camera_fmt_info gb_fmt_info[] = {
+ {
+ .mbus_code = V4L2_MBUS_FMT_UYVY8_1X16,
+ .gb_format = 0x01,
+ .bpp = 16,
+ },
+ {
+ .mbus_code = V4L2_MBUS_FMT_NV12_1x8,
+ .gb_format = 0x12,
+ .bpp = 12,
+ },
+ {
+ .mbus_code = V4L2_MBUS_FMT_NV21_1x8,
+ .gb_format = 0x13,
+ .bpp = 12,
+ },
+ {
+ .mbus_code = V4L2_MBUS_FMT_YU12_1x8,
+ .gb_format = 0x16,
+ .bpp = 12,
+ },
+ {
+ .mbus_code = V4L2_MBUS_FMT_YV12_1x8,
+ .gb_format = 0x17,
+ .bpp = 12,
+ },
+ {
+ .mbus_code = V4L2_MBUS_FMT_JPEG_1X8,
+ .gb_format = 0x40,
+ .bpp = 0,
+ },
+ {
+ .mbus_code = V4L2_MBUS_FMT_GB_CAM_METADATA_1X8,
+ .gb_format = 0x41,
+ .bpp = 0,
+ },
+ {
+ .mbus_code = V4L2_MBUS_FMT_GB_CAM_DEBUG_DATA_1X8,
+ .gb_format = 0x42,
+ .bpp = 0,
+ },
+ {
+ .mbus_code = V4L2_MBUS_FMT_SBGGR10_1X10,
+ .gb_format = 0x80,
+ .bpp = 10,
+ },
+ {
+ .mbus_code = V4L2_MBUS_FMT_SGBRG10_1X10,
+ .gb_format = 0x81,
+ .bpp = 10,
+ },
+ {
+ .mbus_code = V4L2_MBUS_FMT_SGRBG10_1X10,
+ .gb_format = 0x82,
+ .bpp = 10,
+ },
+ {
+ .mbus_code = V4L2_MBUS_FMT_SRGGB10_1X10,
+ .gb_format = 0x83,
+ .bpp = 10,
+ },
+ {
+ .mbus_code = V4L2_MBUS_FMT_SBGGR12_1X12,
+ .gb_format = 0x84,
+ .bpp = 12,
+ },
+ {
+ .mbus_code = V4L2_MBUS_FMT_SGBRG12_1X12,
+ .gb_format = 0x85,
+ .bpp = 12,
+ },
+ {
+ .mbus_code = V4L2_MBUS_FMT_SGRBG12_1X12,
+ .gb_format = 0x86,
+ .bpp = 12,
+ },
+ {
+ .mbus_code = V4L2_MBUS_FMT_SRGGB12_1X12,
+ .gb_format = 0x87,
+ .bpp = 12,
+ },
+};
+
+static const struct gb_camera_fmt_info *gb_camera_get_format_info(u16 gb_fmt)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(gb_fmt_info); i++) {
+ if (gb_fmt_info[i].gb_format == gb_fmt)
+ return &gb_fmt_info[i];
+ }
+
+ return NULL;
+}
+
+#define ES2_APB_CDSI0_CPORT 16
+#define ES2_APB_CDSI1_CPORT 17
+
+#define GB_CAMERA_MAX_SETTINGS_SIZE 8192
+
+static int gb_camera_operation_sync_flags(struct gb_connection *connection,
+ int type, unsigned int flags,
+ void *request, size_t request_size,
+ void *response, size_t *response_size)
+{
+ struct gb_operation *operation;
+ int ret;
+
+ operation = gb_operation_create_flags(connection, type, request_size,
+ *response_size, flags,
+ GFP_KERNEL);
+ if (!operation)
+ return -ENOMEM;
+
+ if (request_size)
+ memcpy(operation->request->payload, request, request_size);
+
+ ret = gb_operation_request_send_sync(operation);
+ if (ret) {
+ dev_err(&connection->hd->dev,
+ "%s: synchronous operation of type 0x%02x failed: %d\n",
+ connection->name, type, ret);
+ } else {
+ *response_size = operation->response->payload_size;
+
+ if (operation->response->payload_size)
+ memcpy(response, operation->response->payload,
+ operation->response->payload_size);
+ }
+
+ gb_operation_put(operation);
+
+ return ret;
+}
+
+static int gb_camera_get_max_pkt_size(struct gb_camera *gcam,
+ struct gb_camera_configure_streams_response *resp)
+{
+ unsigned int max_pkt_size = 0;
+ unsigned int i;
+
+ for (i = 0; i < resp->num_streams; i++) {
+ struct gb_camera_stream_config_response *cfg = &resp->config[i];
+ const struct gb_camera_fmt_info *fmt_info;
+ unsigned int pkt_size;
+
+ fmt_info = gb_camera_get_format_info(cfg->format);
+ if (!fmt_info) {
+ dev_err(&gcam->bundle->dev, "unsupported greybus image format: %d\n",
+ cfg->format);
+ return -EIO;
+ }
+
+ if (fmt_info->bpp == 0) {
+ pkt_size = le32_to_cpu(cfg->max_pkt_size);
+
+ if (pkt_size == 0) {
+ dev_err(&gcam->bundle->dev,
+ "Stream %u: invalid zero maximum packet size\n",
+ i);
+ return -EIO;
+ }
+ } else {
+ pkt_size = le16_to_cpu(cfg->width) * fmt_info->bpp / 8;
+
+ if (pkt_size != le32_to_cpu(cfg->max_pkt_size)) {
+ dev_err(&gcam->bundle->dev,
+ "Stream %u: maximum packet size mismatch (%u/%u)\n",
+ i, pkt_size, cfg->max_pkt_size);
+ return -EIO;
+ }
+ }
+
+ max_pkt_size = max(pkt_size, max_pkt_size);
+ }
+
+ return max_pkt_size;
+}
+
+/*
+ * Validate the stream configuration response verifying padding is correctly
+ * set and the returned number of streams is supported
+ */
+static const int gb_camera_configure_streams_validate_response(struct gb_camera *gcam,
+ struct gb_camera_configure_streams_response *resp,
+ unsigned int nstreams)
+{
+ unsigned int i;
+
+ /* Validate the returned response structure */
+ if (resp->padding[0] || resp->padding[1]) {
+ dev_err(&gcam->bundle->dev, "response padding != 0\n");
+ return -EIO;
+ }
+
+ if (resp->num_streams > nstreams) {
+ dev_err(&gcam->bundle->dev, "got #streams %u > request %u\n",
+ resp->num_streams, nstreams);
+ return -EIO;
+ }
+
+ for (i = 0; i < resp->num_streams; i++) {
+ struct gb_camera_stream_config_response *cfg = &resp->config[i];
+
+ if (cfg->padding) {
+ dev_err(&gcam->bundle->dev, "stream #%u padding != 0\n", i);
+ return -EIO;
+ }
+ }
+
+ return 0;
+}
+
+/* -----------------------------------------------------------------------------
+ * Hardware Configuration
+ */
+
+static int gb_camera_set_intf_power_mode(struct gb_camera *gcam, u8 intf_id,
+ bool hs)
+{
+ struct gb_svc *svc = gcam->connection->hd->svc;
+ int ret;
+
+ if (hs)
+ ret = gb_svc_intf_set_power_mode(svc, intf_id,
+ GB_SVC_UNIPRO_HS_SERIES_A,
+ GB_SVC_UNIPRO_FAST_MODE, 2, 2,
+ GB_SVC_SMALL_AMPLITUDE,
+ GB_SVC_NO_DE_EMPHASIS,
+ GB_SVC_UNIPRO_FAST_MODE, 2, 2,
+ GB_SVC_PWRM_RXTERMINATION |
+ GB_SVC_PWRM_TXTERMINATION, 0,
+ NULL, NULL);
+ else
+ ret = gb_svc_intf_set_power_mode(svc, intf_id,
+ GB_SVC_UNIPRO_HS_SERIES_A,
+ GB_SVC_UNIPRO_SLOW_AUTO_MODE,
+ 2, 1,
+ GB_SVC_SMALL_AMPLITUDE,
+ GB_SVC_NO_DE_EMPHASIS,
+ GB_SVC_UNIPRO_SLOW_AUTO_MODE,
+ 2, 1,
+ 0, 0,
+ NULL, NULL);
+
+ return ret;
+}
+
+static int gb_camera_set_power_mode(struct gb_camera *gcam, bool hs)
+{
+ struct gb_interface *intf = gcam->connection->intf;
+ struct gb_svc *svc = gcam->connection->hd->svc;
+ int ret;
+
+ ret = gb_camera_set_intf_power_mode(gcam, intf->interface_id, hs);
+ if (ret < 0) {
+ dev_err(&gcam->bundle->dev, "failed to set module interface to %s (%d)\n",
+ hs ? "HS" : "PWM", ret);
+ return ret;
+ }
+
+ ret = gb_camera_set_intf_power_mode(gcam, svc->ap_intf_id, hs);
+ if (ret < 0) {
+ gb_camera_set_intf_power_mode(gcam, intf->interface_id, !hs);
+ dev_err(&gcam->bundle->dev, "failed to set AP interface to %s (%d)\n",
+ hs ? "HS" : "PWM", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+struct ap_csi_config_request {
+ __u8 csi_id;
+ __u8 flags;
+#define GB_CAMERA_CSI_FLAG_CLOCK_CONTINUOUS 0x01
+ __u8 num_lanes;
+ __u8 padding;
+ __le32 csi_clk_freq;
+ __le32 max_pkt_size;
+} __packed;
+
+/*
+ * TODO: Compute the number of lanes dynamically based on bandwidth
+ * requirements.
+ */
+#define GB_CAMERA_CSI_NUM_DATA_LANES 4
+
+#define GB_CAMERA_CSI_CLK_FREQ_MAX 999000000U
+#define GB_CAMERA_CSI_CLK_FREQ_MIN 100000000U
+#define GB_CAMERA_CSI_CLK_FREQ_MARGIN 150000000U
+
+static int gb_camera_setup_data_connection(struct gb_camera *gcam,
+ struct gb_camera_configure_streams_response *resp,
+ struct gb_camera_csi_params *csi_params)
+{
+ struct ap_csi_config_request csi_cfg;
+ struct gb_connection *conn;
+ unsigned int clk_freq;
+ int ret;
+
+ /*
+ * Create the data connection between the camera module data CPort and
+ * APB CDSI1. The CDSI1 CPort ID is hardcoded by the ES2 bridge.
+ */
+ conn = gb_connection_create_offloaded(gcam->bundle, gcam->data_cport_id,
+ GB_CONNECTION_FLAG_NO_FLOWCTRL |
+ GB_CONNECTION_FLAG_CDSI1);
+ if (IS_ERR(conn))
+ return PTR_ERR(conn);
+
+ gcam->data_connection = conn;
+ gb_connection_set_data(conn, gcam);
+
+ ret = gb_connection_enable(conn);
+ if (ret)
+ goto error_conn_destroy;
+
+ /* Set the UniPro link to high speed mode. */
+ ret = gb_camera_set_power_mode(gcam, true);
+ if (ret < 0)
+ goto error_conn_disable;
+
+ /*
+ * Configure the APB-A CSI-2 transmitter.
+ *
+ * Hardcode the number of lanes to 4 and compute the bus clock frequency
+ * based on the module bandwidth requirements with a safety margin.
+ */
+ memset(&csi_cfg, 0, sizeof(csi_cfg));
+ csi_cfg.csi_id = 1;
+ csi_cfg.flags = 0;
+ csi_cfg.num_lanes = GB_CAMERA_CSI_NUM_DATA_LANES;
+
+ clk_freq = resp->data_rate / 2 / GB_CAMERA_CSI_NUM_DATA_LANES;
+ clk_freq = clamp(clk_freq + GB_CAMERA_CSI_CLK_FREQ_MARGIN,
+ GB_CAMERA_CSI_CLK_FREQ_MIN,
+ GB_CAMERA_CSI_CLK_FREQ_MAX);
+ csi_cfg.csi_clk_freq = clk_freq;
+
+ ret = gb_camera_get_max_pkt_size(gcam, resp);
+ if (ret < 0) {
+ ret = -EIO;
+ goto error_power;
+ }
+ csi_cfg.max_pkt_size = ret;
+
+ ret = gb_hd_output(gcam->connection->hd, &csi_cfg,
+ sizeof(csi_cfg),
+ GB_APB_REQUEST_CSI_TX_CONTROL, false);
+ if (ret < 0) {
+ dev_err(&gcam->bundle->dev, "failed to start the CSI transmitter\n");
+ goto error_power;
+ }
+
+ if (csi_params) {
+ csi_params->clk_freq = csi_cfg.csi_clk_freq;
+ csi_params->num_lanes = csi_cfg.num_lanes;
+ }
+
+ return 0;
+
+error_power:
+ gb_camera_set_power_mode(gcam, false);
+error_conn_disable:
+ gb_connection_disable(gcam->data_connection);
+error_conn_destroy:
+ gb_connection_destroy(gcam->data_connection);
+ gcam->data_connection = NULL;
+ return ret;
+}
+
+static void gb_camera_teardown_data_connection(struct gb_camera *gcam)
+{
+ struct ap_csi_config_request csi_cfg;
+ int ret;
+
+ /* Stop the APB1 CSI transmitter. */
+ memset(&csi_cfg, 0, sizeof(csi_cfg));
+ csi_cfg.csi_id = 1;
+
+ ret = gb_hd_output(gcam->connection->hd, &csi_cfg,
+ sizeof(csi_cfg),
+ GB_APB_REQUEST_CSI_TX_CONTROL, false);
+
+ if (ret < 0)
+ dev_err(&gcam->bundle->dev, "failed to stop the CSI transmitter\n");
+
+ /* Set the UniPro link to low speed mode. */
+ gb_camera_set_power_mode(gcam, false);
+
+ /* Destroy the data connection. */
+ gb_connection_disable(gcam->data_connection);
+ gb_connection_destroy(gcam->data_connection);
+ gcam->data_connection = NULL;
+}
+
+/* -----------------------------------------------------------------------------
+ * Camera Protocol Operations
+ */
+
+static int gb_camera_capabilities(struct gb_camera *gcam,
+ u8 *capabilities, size_t *size)
+{
+ int ret;
+
+ ret = gb_pm_runtime_get_sync(gcam->bundle);
+ if (ret)
+ return ret;
+
+ mutex_lock(&gcam->mutex);
+
+ if (!gcam->connection) {
+ ret = -EINVAL;
+ goto done;
+ }
+
+ ret = gb_camera_operation_sync_flags(gcam->connection,
+ GB_CAMERA_TYPE_CAPABILITIES,
+ GB_OPERATION_FLAG_SHORT_RESPONSE,
+ NULL, 0,
+ (void *)capabilities, size);
+ if (ret)
+ dev_err(&gcam->bundle->dev, "failed to retrieve capabilities: %d\n", ret);
+
+done:
+ mutex_unlock(&gcam->mutex);
+
+ gb_pm_runtime_put_autosuspend(gcam->bundle);
+
+ return ret;
+}
+
+static int gb_camera_configure_streams(struct gb_camera *gcam,
+ unsigned int *num_streams,
+ unsigned int *flags,
+ struct gb_camera_stream_config *streams,
+ struct gb_camera_csi_params *csi_params)
+{
+ struct gb_camera_configure_streams_request *req;
+ struct gb_camera_configure_streams_response *resp;
+ unsigned int nstreams = *num_streams;
+ unsigned int i;
+ size_t req_size;
+ size_t resp_size;
+ int ret;
+
+ if (nstreams > GB_CAMERA_MAX_STREAMS)
+ return -EINVAL;
+
+ req_size = sizeof(*req) + nstreams * sizeof(req->config[0]);
+ resp_size = sizeof(*resp) + nstreams * sizeof(resp->config[0]);
+
+ req = kmalloc(req_size, GFP_KERNEL);
+ resp = kmalloc(resp_size, GFP_KERNEL);
+ if (!req || !resp) {
+ kfree(req);
+ kfree(resp);
+ return -ENOMEM;
+ }
+
+ req->num_streams = nstreams;
+ req->flags = *flags;
+ req->padding = 0;
+
+ for (i = 0; i < nstreams; ++i) {
+ struct gb_camera_stream_config_request *cfg = &req->config[i];
+
+ cfg->width = cpu_to_le16(streams[i].width);
+ cfg->height = cpu_to_le16(streams[i].height);
+ cfg->format = cpu_to_le16(streams[i].format);
+ cfg->padding = 0;
+ }
+
+ mutex_lock(&gcam->mutex);
+
+ ret = gb_pm_runtime_get_sync(gcam->bundle);
+ if (ret)
+ goto done_skip_pm_put;
+
+ if (!gcam->connection) {
+ ret = -EINVAL;
+ goto done;
+ }
+
+ ret = gb_camera_operation_sync_flags(gcam->connection,
+ GB_CAMERA_TYPE_CONFIGURE_STREAMS,
+ GB_OPERATION_FLAG_SHORT_RESPONSE,
+ req, req_size,
+ resp, &resp_size);
+ if (ret < 0)
+ goto done;
+
+ ret = gb_camera_configure_streams_validate_response(gcam, resp,
+ nstreams);
+ if (ret < 0)
+ goto done;
+
+ *flags = resp->flags;
+ *num_streams = resp->num_streams;
+
+ for (i = 0; i < resp->num_streams; ++i) {
+ struct gb_camera_stream_config_response *cfg = &resp->config[i];
+
+ streams[i].width = le16_to_cpu(cfg->width);
+ streams[i].height = le16_to_cpu(cfg->height);
+ streams[i].format = le16_to_cpu(cfg->format);
+ streams[i].vc = cfg->virtual_channel;
+ streams[i].dt[0] = cfg->data_type[0];
+ streams[i].dt[1] = cfg->data_type[1];
+ streams[i].max_size = le32_to_cpu(cfg->max_size);
+ }
+
+ if ((resp->flags & GB_CAMERA_CONFIGURE_STREAMS_ADJUSTED) ||
+ (req->flags & GB_CAMERA_CONFIGURE_STREAMS_TEST_ONLY))
+ goto done;
+
+ if (gcam->state == GB_CAMERA_STATE_CONFIGURED) {
+ gb_camera_teardown_data_connection(gcam);
+ gcam->state = GB_CAMERA_STATE_UNCONFIGURED;
+
+ /*
+ * When unconfiguring streams release the PM runtime reference
+ * that was acquired when streams were configured. The bundle
+ * won't be suspended until the PM runtime reference acquired at
+ * the beginning of this function gets released right before
+ * returning.
+ */
+ gb_pm_runtime_put_noidle(gcam->bundle);
+ }
+
+ if (resp->num_streams == 0)
+ goto done;
+
+ /*
+ * Make sure the bundle won't be suspended until streams get
+ * unconfigured after the stream is configured successfully
+ */
+ gb_pm_runtime_get_noresume(gcam->bundle);
+
+ /* Setup CSI-2 connection from APB-A to AP */
+ ret = gb_camera_setup_data_connection(gcam, resp, csi_params);
+ if (ret < 0) {
+ memset(req, 0, sizeof(*req));
+ gb_operation_sync(gcam->connection,
+ GB_CAMERA_TYPE_CONFIGURE_STREAMS,
+ req, sizeof(*req),
+ resp, sizeof(*resp));
+ *flags = 0;
+ *num_streams = 0;
+ gb_pm_runtime_put_noidle(gcam->bundle);
+ goto done;
+ }
+
+ gcam->state = GB_CAMERA_STATE_CONFIGURED;
+
+done:
+ gb_pm_runtime_put_autosuspend(gcam->bundle);
+
+done_skip_pm_put:
+ mutex_unlock(&gcam->mutex);
+ kfree(req);
+ kfree(resp);
+ return ret;
+}
+
+static int gb_camera_capture(struct gb_camera *gcam, u32 request_id,
+ unsigned int streams, unsigned int num_frames,
+ size_t settings_size, const void *settings)
+{
+ struct gb_camera_capture_request *req;
+ size_t req_size;
+ int ret;
+
+ if (settings_size > GB_CAMERA_MAX_SETTINGS_SIZE)
+ return -EINVAL;
+
+ req_size = sizeof(*req) + settings_size;
+ req = kmalloc(req_size, GFP_KERNEL);
+ if (!req)
+ return -ENOMEM;
+
+ req->request_id = cpu_to_le32(request_id);
+ req->streams = streams;
+ req->padding = 0;
+ req->num_frames = cpu_to_le16(num_frames);
+ memcpy(req->settings, settings, settings_size);
+
+ mutex_lock(&gcam->mutex);
+
+ if (!gcam->connection) {
+ ret = -EINVAL;
+ goto done;
+ }
+
+ ret = gb_operation_sync(gcam->connection, GB_CAMERA_TYPE_CAPTURE,
+ req, req_size, NULL, 0);
+done:
+ mutex_unlock(&gcam->mutex);
+
+ kfree(req);
+
+ return ret;
+}
+
+static int gb_camera_flush(struct gb_camera *gcam, u32 *request_id)
+{
+ struct gb_camera_flush_response resp;
+ int ret;
+
+ mutex_lock(&gcam->mutex);
+
+ if (!gcam->connection) {
+ ret = -EINVAL;
+ goto done;
+ }
+
+ ret = gb_operation_sync(gcam->connection, GB_CAMERA_TYPE_FLUSH, NULL, 0,
+ &resp, sizeof(resp));
+
+ if (ret < 0)
+ goto done;
+
+ if (request_id)
+ *request_id = le32_to_cpu(resp.request_id);
+
+done:
+ mutex_unlock(&gcam->mutex);
+
+ return ret;
+}
+
+static int gb_camera_request_handler(struct gb_operation *op)
+{
+ struct gb_camera *gcam = gb_connection_get_data(op->connection);
+ struct gb_camera_metadata_request *payload;
+ struct gb_message *request;
+
+ if (op->type != GB_CAMERA_TYPE_METADATA) {
+ dev_err(&gcam->bundle->dev, "Unsupported unsolicited event: %u\n", op->type);
+ return -EINVAL;
+ }
+
+ request = op->request;
+
+ if (request->payload_size < sizeof(*payload)) {
+ dev_err(&gcam->bundle->dev, "Wrong event size received (%zu < %zu)\n",
+ request->payload_size, sizeof(*payload));
+ return -EINVAL;
+ }
+
+ payload = request->payload;
+
+ dev_dbg(&gcam->bundle->dev, "received metadata for request %u, frame %u, stream %u\n",
+ payload->request_id, payload->frame_number, payload->stream);
+
+ return 0;
+}
+
+/* -----------------------------------------------------------------------------
+ * Interface with HOST gmp camera.
+ */
+static unsigned int gb_camera_mbus_to_gb(enum v4l2_mbus_pixelcode mbus_code)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(gb_fmt_info); i++) {
+ if (gb_fmt_info[i].mbus_code == mbus_code)
+ return gb_fmt_info[i].gb_format;
+ }
+ return gb_fmt_info[0].gb_format;
+}
+
+static enum v4l2_mbus_pixelcode gb_camera_gb_to_mbus(u16 gb_fmt)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(gb_fmt_info); i++) {
+ if (gb_fmt_info[i].gb_format == gb_fmt)
+ return gb_fmt_info[i].mbus_code;
+ }
+ return gb_fmt_info[0].mbus_code;
+}
+
+static ssize_t gb_camera_op_capabilities(void *priv, char *data, size_t len)
+{
+ struct gb_camera *gcam = priv;
+ size_t capabilities_len = len;
+ int ret;
+
+ ret = gb_camera_capabilities(gcam, data, &capabilities_len);
+ if (ret)
+ return ret;
+
+ return capabilities_len;
+}
+
+static int gb_camera_op_configure_streams(void *priv, unsigned int *nstreams,
+ unsigned int *flags, struct gb_camera_stream *streams,
+ struct gb_camera_csi_params *csi_params)
+{
+ struct gb_camera *gcam = priv;
+ struct gb_camera_stream_config *gb_streams;
+ unsigned int gb_flags = 0;
+ unsigned int gb_nstreams = *nstreams;
+ unsigned int i;
+ int ret;
+
+ if (gb_nstreams > GB_CAMERA_MAX_STREAMS)
+ return -EINVAL;
+
+ gb_streams = kcalloc(gb_nstreams, sizeof(*gb_streams), GFP_KERNEL);
+ if (!gb_streams)
+ return -ENOMEM;
+
+ for (i = 0; i < gb_nstreams; i++) {
+ gb_streams[i].width = streams[i].width;
+ gb_streams[i].height = streams[i].height;
+ gb_streams[i].format =
+ gb_camera_mbus_to_gb(streams[i].pixel_code);
+ }
+
+ if (*flags & GB_CAMERA_IN_FLAG_TEST)
+ gb_flags |= GB_CAMERA_CONFIGURE_STREAMS_TEST_ONLY;
+
+ ret = gb_camera_configure_streams(gcam, &gb_nstreams,
+ &gb_flags, gb_streams, csi_params);
+ if (ret < 0)
+ goto done;
+ if (gb_nstreams > *nstreams) {
+ ret = -EINVAL;
+ goto done;
+ }
+
+ *flags = 0;
+ if (gb_flags & GB_CAMERA_CONFIGURE_STREAMS_ADJUSTED)
+ *flags |= GB_CAMERA_OUT_FLAG_ADJUSTED;
+
+ for (i = 0; i < gb_nstreams; i++) {
+ streams[i].width = gb_streams[i].width;
+ streams[i].height = gb_streams[i].height;
+ streams[i].vc = gb_streams[i].vc;
+ streams[i].dt[0] = gb_streams[i].dt[0];
+ streams[i].dt[1] = gb_streams[i].dt[1];
+ streams[i].max_size = gb_streams[i].max_size;
+ streams[i].pixel_code =
+ gb_camera_gb_to_mbus(gb_streams[i].format);
+ }
+ *nstreams = gb_nstreams;
+
+done:
+ kfree(gb_streams);
+ return ret;
+}
+
+static int gb_camera_op_capture(void *priv, u32 request_id,
+ unsigned int streams, unsigned int num_frames,
+ size_t settings_size, const void *settings)
+{
+ struct gb_camera *gcam = priv;
+
+ return gb_camera_capture(gcam, request_id, streams, num_frames,
+ settings_size, settings);
+}
+
+static int gb_camera_op_flush(void *priv, u32 *request_id)
+{
+ struct gb_camera *gcam = priv;
+
+ return gb_camera_flush(gcam, request_id);
+}
+
+static const struct gb_camera_ops gb_cam_ops = {
+ .capabilities = gb_camera_op_capabilities,
+ .configure_streams = gb_camera_op_configure_streams,
+ .capture = gb_camera_op_capture,
+ .flush = gb_camera_op_flush,
+};
+
+/* -----------------------------------------------------------------------------
+ * DebugFS
+ */
+
+static ssize_t gb_camera_debugfs_capabilities(struct gb_camera *gcam,
+ char *buf, size_t len)
+{
+ struct gb_camera_debugfs_buffer *buffer =
+ &gcam->debugfs.buffers[GB_CAMERA_DEBUGFS_BUFFER_CAPABILITIES];
+ size_t size = 1024;
+ unsigned int i;
+ u8 *caps;
+ int ret;
+
+ caps = kmalloc(size, GFP_KERNEL);
+ if (!caps)
+ return -ENOMEM;
+
+ ret = gb_camera_capabilities(gcam, caps, &size);
+ if (ret < 0)
+ goto done;
+
+ /*
+ * hex_dump_to_buffer() doesn't return the number of bytes dumped prior
+ * to v4.0, we need our own implementation :-(
+ */
+ buffer->length = 0;
+
+ for (i = 0; i < size; i += 16) {
+ unsigned int nbytes = min_t(unsigned int, size - i, 16);
+
+ buffer->length += sprintf(buffer->data + buffer->length,
+ "%*ph\n", nbytes, caps + i);
+ }
+
+done:
+ kfree(caps);
+ return ret;
+}
+
+static ssize_t gb_camera_debugfs_configure_streams(struct gb_camera *gcam,
+ char *buf, size_t len)
+{
+ struct gb_camera_debugfs_buffer *buffer =
+ &gcam->debugfs.buffers[GB_CAMERA_DEBUGFS_BUFFER_STREAMS];
+ struct gb_camera_stream_config *streams;
+ unsigned int nstreams;
+ unsigned int flags;
+ unsigned int i;
+ char *token;
+ int ret;
+
+ /* Retrieve number of streams to configure */
+ token = strsep(&buf, ";");
+ if (!token)
+ return -EINVAL;
+
+ ret = kstrtouint(token, 10, &nstreams);
+ if (ret < 0)
+ return ret;
+
+ if (nstreams > GB_CAMERA_MAX_STREAMS)
+ return -EINVAL;
+
+ token = strsep(&buf, ";");
+ if (!token)
+ return -EINVAL;
+
+ ret = kstrtouint(token, 10, &flags);
+ if (ret < 0)
+ return ret;
+
+ /* For each stream to configure parse width, height and format */
+ streams = kcalloc(nstreams, sizeof(*streams), GFP_KERNEL);
+ if (!streams)
+ return -ENOMEM;
+
+ for (i = 0; i < nstreams; ++i) {
+ struct gb_camera_stream_config *stream = &streams[i];
+
+ /* width */
+ token = strsep(&buf, ";");
+ if (!token) {
+ ret = -EINVAL;
+ goto done;
+ }
+ ret = kstrtouint(token, 10, &stream->width);
+ if (ret < 0)
+ goto done;
+
+ /* height */
+ token = strsep(&buf, ";");
+ if (!token)
+ goto done;
+
+ ret = kstrtouint(token, 10, &stream->height);
+ if (ret < 0)
+ goto done;
+
+ /* Image format code */
+ token = strsep(&buf, ";");
+ if (!token)
+ goto done;
+
+ ret = kstrtouint(token, 16, &stream->format);
+ if (ret < 0)
+ goto done;
+ }
+
+ ret = gb_camera_configure_streams(gcam, &nstreams, &flags, streams,
+ NULL);
+ if (ret < 0)
+ goto done;
+
+ buffer->length = sprintf(buffer->data, "%u;%u;", nstreams, flags);
+
+ for (i = 0; i < nstreams; ++i) {
+ struct gb_camera_stream_config *stream = &streams[i];
+
+ buffer->length += sprintf(buffer->data + buffer->length,
+ "%u;%u;%u;%u;%u;%u;%u;",
+ stream->width, stream->height,
+ stream->format, stream->vc,
+ stream->dt[0], stream->dt[1],
+ stream->max_size);
+ }
+
+ ret = len;
+
+done:
+ kfree(streams);
+ return ret;
+};
+
+static ssize_t gb_camera_debugfs_capture(struct gb_camera *gcam,
+ char *buf, size_t len)
+{
+ unsigned int request_id;
+ unsigned int streams_mask;
+ unsigned int num_frames;
+ char *token;
+ int ret;
+
+ /* Request id */
+ token = strsep(&buf, ";");
+ if (!token)
+ return -EINVAL;
+ ret = kstrtouint(token, 10, &request_id);
+ if (ret < 0)
+ return ret;
+
+ /* Stream mask */
+ token = strsep(&buf, ";");
+ if (!token)
+ return -EINVAL;
+ ret = kstrtouint(token, 16, &streams_mask);
+ if (ret < 0)
+ return ret;
+
+ /* number of frames */
+ token = strsep(&buf, ";");
+ if (!token)
+ return -EINVAL;
+ ret = kstrtouint(token, 10, &num_frames);
+ if (ret < 0)
+ return ret;
+
+ ret = gb_camera_capture(gcam, request_id, streams_mask, num_frames, 0,
+ NULL);
+ if (ret < 0)
+ return ret;
+
+ return len;
+}
+
+static ssize_t gb_camera_debugfs_flush(struct gb_camera *gcam,
+ char *buf, size_t len)
+{
+ struct gb_camera_debugfs_buffer *buffer =
+ &gcam->debugfs.buffers[GB_CAMERA_DEBUGFS_BUFFER_FLUSH];
+ unsigned int req_id;
+ int ret;
+
+ ret = gb_camera_flush(gcam, &req_id);
+ if (ret < 0)
+ return ret;
+
+ buffer->length = sprintf(buffer->data, "%u", req_id);
+
+ return len;
+}
+
+struct gb_camera_debugfs_entry {
+ const char *name;
+ unsigned int mask;
+ unsigned int buffer;
+ ssize_t (*execute)(struct gb_camera *gcam, char *buf, size_t len);
+};
+
+static const struct gb_camera_debugfs_entry gb_camera_debugfs_entries[] = {
+ {
+ .name = "capabilities",
+ .mask = S_IFREG | 0444,
+ .buffer = GB_CAMERA_DEBUGFS_BUFFER_CAPABILITIES,
+ .execute = gb_camera_debugfs_capabilities,
+ }, {
+ .name = "configure_streams",
+ .mask = S_IFREG | 0666,
+ .buffer = GB_CAMERA_DEBUGFS_BUFFER_STREAMS,
+ .execute = gb_camera_debugfs_configure_streams,
+ }, {
+ .name = "capture",
+ .mask = S_IFREG | 0666,
+ .buffer = GB_CAMERA_DEBUGFS_BUFFER_CAPTURE,
+ .execute = gb_camera_debugfs_capture,
+ }, {
+ .name = "flush",
+ .mask = S_IFREG | 0666,
+ .buffer = GB_CAMERA_DEBUGFS_BUFFER_FLUSH,
+ .execute = gb_camera_debugfs_flush,
+ },
+};
+
+static ssize_t gb_camera_debugfs_read(struct file *file, char __user *buf,
+ size_t len, loff_t *offset)
+{
+ const struct gb_camera_debugfs_entry *op = file->private_data;
+ struct gb_camera *gcam = file_inode(file)->i_private;
+ struct gb_camera_debugfs_buffer *buffer;
+ ssize_t ret;
+
+ /* For read-only entries the operation is triggered by a read. */
+ if (!(op->mask & 0222)) {
+ ret = op->execute(gcam, NULL, 0);
+ if (ret < 0)
+ return ret;
+ }
+
+ buffer = &gcam->debugfs.buffers[op->buffer];
+
+ return simple_read_from_buffer(buf, len, offset, buffer->data,
+ buffer->length);
+}
+
+static ssize_t gb_camera_debugfs_write(struct file *file,
+ const char __user *buf, size_t len,
+ loff_t *offset)
+{
+ const struct gb_camera_debugfs_entry *op = file->private_data;
+ struct gb_camera *gcam = file_inode(file)->i_private;
+ ssize_t ret;
+ char *kbuf;
+
+ if (len > 1024)
+ return -EINVAL;
+
+ kbuf = memdup_user_nul(buf, len);
+ if (IS_ERR(kbuf))
+ return PTR_ERR(kbuf);
+
+ ret = op->execute(gcam, kbuf, len);
+
+done:
+ kfree(kbuf);
+ return ret;
+}
+
+static int gb_camera_debugfs_open(struct inode *inode, struct file *file)
+{
+ file->private_data = debugfs_get_aux(file);
+ return 0;
+}
+
+static const struct file_operations gb_camera_debugfs_ops = {
+ .open = gb_camera_debugfs_open,
+ .read = gb_camera_debugfs_read,
+ .write = gb_camera_debugfs_write,
+};
+
+static int gb_camera_debugfs_init(struct gb_camera *gcam)
+{
+ struct gb_connection *connection = gcam->connection;
+ char dirname[27];
+ unsigned int i;
+
+ /*
+ * Create root debugfs entry and a file entry for each camera operation.
+ */
+ snprintf(dirname, 27, "camera-%u.%u", connection->intf->interface_id,
+ gcam->bundle->id);
+
+ gcam->debugfs.root = debugfs_create_dir(dirname, gb_debugfs_get());
+
+ gcam->debugfs.buffers =
+ vmalloc(array_size(GB_CAMERA_DEBUGFS_BUFFER_MAX,
+ sizeof(*gcam->debugfs.buffers)));
+ if (!gcam->debugfs.buffers)
+ return -ENOMEM;
+
+ for (i = 0; i < ARRAY_SIZE(gb_camera_debugfs_entries); ++i) {
+ const struct gb_camera_debugfs_entry *entry =
+ &gb_camera_debugfs_entries[i];
+
+ gcam->debugfs.buffers[i].length = 0;
+
+ debugfs_create_file_aux(entry->name, entry->mask,
+ gcam->debugfs.root, gcam, entry,
+ &gb_camera_debugfs_ops);
+ }
+
+ return 0;
+}
+
+static void gb_camera_debugfs_cleanup(struct gb_camera *gcam)
+{
+ debugfs_remove_recursive(gcam->debugfs.root);
+
+ vfree(gcam->debugfs.buffers);
+}
+
+/* -----------------------------------------------------------------------------
+ * Init & Cleanup
+ */
+
+static void gb_camera_cleanup(struct gb_camera *gcam)
+{
+ gb_camera_debugfs_cleanup(gcam);
+
+ mutex_lock(&gcam->mutex);
+ if (gcam->data_connection) {
+ gb_connection_disable(gcam->data_connection);
+ gb_connection_destroy(gcam->data_connection);
+ gcam->data_connection = NULL;
+ }
+
+ if (gcam->connection) {
+ gb_connection_disable(gcam->connection);
+ gb_connection_destroy(gcam->connection);
+ gcam->connection = NULL;
+ }
+ mutex_unlock(&gcam->mutex);
+}
+
+static void gb_camera_release_module(struct kref *ref)
+{
+ struct gb_camera_module *cam_mod =
+ container_of(ref, struct gb_camera_module, refcount);
+ kfree(cam_mod->priv);
+}
+
+static int gb_camera_probe(struct gb_bundle *bundle,
+ const struct greybus_bundle_id *id)
+{
+ struct gb_connection *conn;
+ struct gb_camera *gcam;
+ u16 mgmt_cport_id = 0;
+ u16 data_cport_id = 0;
+ unsigned int i;
+ int ret;
+
+ /*
+ * The camera bundle must contain exactly two CPorts, one for the
+ * camera management protocol and one for the camera data protocol.
+ */
+ if (bundle->num_cports != 2)
+ return -ENODEV;
+
+ for (i = 0; i < bundle->num_cports; ++i) {
+ struct greybus_descriptor_cport *desc = &bundle->cport_desc[i];
+
+ switch (desc->protocol_id) {
+ case GREYBUS_PROTOCOL_CAMERA_MGMT:
+ mgmt_cport_id = le16_to_cpu(desc->id);
+ break;
+ case GREYBUS_PROTOCOL_CAMERA_DATA:
+ data_cport_id = le16_to_cpu(desc->id);
+ break;
+ default:
+ return -ENODEV;
+ }
+ }
+
+ if (!mgmt_cport_id || !data_cport_id)
+ return -ENODEV;
+
+ gcam = kzalloc(sizeof(*gcam), GFP_KERNEL);
+ if (!gcam)
+ return -ENOMEM;
+
+ mutex_init(&gcam->mutex);
+
+ gcam->bundle = bundle;
+ gcam->state = GB_CAMERA_STATE_UNCONFIGURED;
+ gcam->data_cport_id = data_cport_id;
+
+ conn = gb_connection_create(bundle, mgmt_cport_id,
+ gb_camera_request_handler);
+ if (IS_ERR(conn)) {
+ ret = PTR_ERR(conn);
+ goto error;
+ }
+
+ gcam->connection = conn;
+ gb_connection_set_data(conn, gcam);
+
+ ret = gb_connection_enable(conn);
+ if (ret)
+ goto error;
+
+ ret = gb_camera_debugfs_init(gcam);
+ if (ret < 0)
+ goto error;
+
+ gcam->module.priv = gcam;
+ gcam->module.ops = &gb_cam_ops;
+ gcam->module.interface_id = gcam->connection->intf->interface_id;
+ gcam->module.release = gb_camera_release_module;
+ ret = gb_camera_register(&gcam->module);
+ if (ret < 0)
+ goto error;
+
+ greybus_set_drvdata(bundle, gcam);
+
+ gb_pm_runtime_put_autosuspend(gcam->bundle);
+
+ return 0;
+
+error:
+ gb_camera_cleanup(gcam);
+ kfree(gcam);
+ return ret;
+}
+
+static void gb_camera_disconnect(struct gb_bundle *bundle)
+{
+ struct gb_camera *gcam = greybus_get_drvdata(bundle);
+ int ret;
+
+ ret = gb_pm_runtime_get_sync(bundle);
+ if (ret)
+ gb_pm_runtime_get_noresume(bundle);
+
+ gb_camera_cleanup(gcam);
+ gb_camera_unregister(&gcam->module);
+}
+
+static const struct greybus_bundle_id gb_camera_id_table[] = {
+ { GREYBUS_DEVICE_CLASS(GREYBUS_CLASS_CAMERA) },
+ { },
+};
+
+#ifdef CONFIG_PM
+static int gb_camera_suspend(struct device *dev)
+{
+ struct gb_bundle *bundle = to_gb_bundle(dev);
+ struct gb_camera *gcam = greybus_get_drvdata(bundle);
+
+ if (gcam->data_connection)
+ gb_connection_disable(gcam->data_connection);
+
+ gb_connection_disable(gcam->connection);
+
+ return 0;
+}
+
+static int gb_camera_resume(struct device *dev)
+{
+ struct gb_bundle *bundle = to_gb_bundle(dev);
+ struct gb_camera *gcam = greybus_get_drvdata(bundle);
+ int ret;
+
+ ret = gb_connection_enable(gcam->connection);
+ if (ret) {
+ dev_err(&gcam->bundle->dev, "failed to enable connection: %d\n", ret);
+ return ret;
+ }
+
+ if (gcam->data_connection) {
+ ret = gb_connection_enable(gcam->data_connection);
+ if (ret) {
+ dev_err(&gcam->bundle->dev,
+ "failed to enable data connection: %d\n", ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+#endif
+
+static const struct dev_pm_ops gb_camera_pm_ops = {
+ SET_RUNTIME_PM_OPS(gb_camera_suspend, gb_camera_resume, NULL)
+};
+
+static struct greybus_driver gb_camera_driver = {
+ .name = "camera",
+ .probe = gb_camera_probe,
+ .disconnect = gb_camera_disconnect,
+ .id_table = gb_camera_id_table,
+ .driver.pm = &gb_camera_pm_ops,
+};
+
+module_greybus_driver(gb_camera_driver);
+
+MODULE_DESCRIPTION("Greybus Camera protocol driver.");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/greybus/firmware.h b/drivers/staging/greybus/firmware.h
new file mode 100644
index 000000000000..5d2564462ffc
--- /dev/null
+++ b/drivers/staging/greybus/firmware.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Greybus Firmware Management Header
+ *
+ * Copyright 2016 Google Inc.
+ * Copyright 2016 Linaro Ltd.
+ */
+
+#ifndef __FIRMWARE_H
+#define __FIRMWARE_H
+
+#include <linux/greybus.h>
+
+#define FW_NAME_PREFIX "gmp_"
+
+/*
+ * Length of the string in format: "FW_NAME_PREFIX""%08x_%08x_%08x_%08x_%s.tftf"
+ * (3 + 1 + 4 * (8 + 1) + 10 + 1 + 4 + 1)
+ */
+#define FW_NAME_SIZE 56
+
+/* Firmware Management Protocol specific functions */
+int fw_mgmt_init(void);
+void fw_mgmt_exit(void);
+struct gb_connection *to_fw_mgmt_connection(struct device *dev);
+int gb_fw_mgmt_request_handler(struct gb_operation *op);
+int gb_fw_mgmt_connection_init(struct gb_connection *connection);
+void gb_fw_mgmt_connection_exit(struct gb_connection *connection);
+
+/* Firmware Download Protocol specific functions */
+int gb_fw_download_request_handler(struct gb_operation *op);
+int gb_fw_download_connection_init(struct gb_connection *connection);
+void gb_fw_download_connection_exit(struct gb_connection *connection);
+
+/* CAP Protocol specific functions */
+int cap_init(void);
+void cap_exit(void);
+int gb_cap_connection_init(struct gb_connection *connection);
+void gb_cap_connection_exit(struct gb_connection *connection);
+
+#endif /* __FIRMWARE_H */
diff --git a/drivers/staging/greybus/fw-core.c b/drivers/staging/greybus/fw-core.c
new file mode 100644
index 000000000000..0fb15a60412f
--- /dev/null
+++ b/drivers/staging/greybus/fw-core.c
@@ -0,0 +1,311 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Greybus Firmware Core Bundle Driver.
+ *
+ * Copyright 2016 Google Inc.
+ * Copyright 2016 Linaro Ltd.
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/firmware.h>
+#include <linux/greybus.h>
+#include "firmware.h"
+#include "spilib.h"
+
+struct gb_fw_core {
+ struct gb_connection *download_connection;
+ struct gb_connection *mgmt_connection;
+ struct gb_connection *spi_connection;
+ struct gb_connection *cap_connection;
+};
+
+static struct spilib_ops *spilib_ops;
+
+struct gb_connection *to_fw_mgmt_connection(struct device *dev)
+{
+ struct gb_fw_core *fw_core = dev_get_drvdata(dev);
+
+ return fw_core->mgmt_connection;
+}
+
+static int gb_fw_spi_connection_init(struct gb_connection *connection)
+{
+ int ret;
+
+ if (!connection)
+ return 0;
+
+ ret = gb_connection_enable(connection);
+ if (ret)
+ return ret;
+
+ ret = gb_spilib_master_init(connection, &connection->bundle->dev,
+ spilib_ops);
+ if (ret) {
+ gb_connection_disable(connection);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void gb_fw_spi_connection_exit(struct gb_connection *connection)
+{
+ if (!connection)
+ return;
+
+ gb_spilib_master_exit(connection);
+ gb_connection_disable(connection);
+}
+
+static int gb_fw_core_probe(struct gb_bundle *bundle,
+ const struct greybus_bundle_id *id)
+{
+ struct greybus_descriptor_cport *cport_desc;
+ struct gb_connection *connection;
+ struct gb_fw_core *fw_core;
+ int ret, i;
+ u16 cport_id;
+ u8 protocol_id;
+
+ fw_core = kzalloc(sizeof(*fw_core), GFP_KERNEL);
+ if (!fw_core)
+ return -ENOMEM;
+
+ /* Parse CPorts and create connections */
+ for (i = 0; i < bundle->num_cports; i++) {
+ cport_desc = &bundle->cport_desc[i];
+ cport_id = le16_to_cpu(cport_desc->id);
+ protocol_id = cport_desc->protocol_id;
+
+ switch (protocol_id) {
+ case GREYBUS_PROTOCOL_FW_MANAGEMENT:
+ /* Disallow multiple Firmware Management CPorts */
+ if (fw_core->mgmt_connection) {
+ dev_err(&bundle->dev,
+ "multiple management CPorts found\n");
+ ret = -EINVAL;
+ goto err_destroy_connections;
+ }
+
+ connection = gb_connection_create(bundle, cport_id,
+ gb_fw_mgmt_request_handler);
+ if (IS_ERR(connection)) {
+ ret = PTR_ERR(connection);
+ dev_err(&bundle->dev,
+ "failed to create management connection (%d)\n",
+ ret);
+ goto err_destroy_connections;
+ }
+
+ fw_core->mgmt_connection = connection;
+ break;
+ case GREYBUS_PROTOCOL_FW_DOWNLOAD:
+ /* Disallow multiple Firmware Download CPorts */
+ if (fw_core->download_connection) {
+ dev_err(&bundle->dev,
+ "multiple download CPorts found\n");
+ ret = -EINVAL;
+ goto err_destroy_connections;
+ }
+
+ connection = gb_connection_create(bundle, cport_id,
+ gb_fw_download_request_handler);
+ if (IS_ERR(connection)) {
+ dev_err(&bundle->dev, "failed to create download connection (%ld)\n",
+ PTR_ERR(connection));
+ } else {
+ fw_core->download_connection = connection;
+ }
+
+ break;
+ case GREYBUS_PROTOCOL_SPI:
+ /* Disallow multiple SPI CPorts */
+ if (fw_core->spi_connection) {
+ dev_err(&bundle->dev,
+ "multiple SPI CPorts found\n");
+ ret = -EINVAL;
+ goto err_destroy_connections;
+ }
+
+ connection = gb_connection_create(bundle, cport_id,
+ NULL);
+ if (IS_ERR(connection)) {
+ dev_err(&bundle->dev, "failed to create SPI connection (%ld)\n",
+ PTR_ERR(connection));
+ } else {
+ fw_core->spi_connection = connection;
+ }
+
+ break;
+ case GREYBUS_PROTOCOL_AUTHENTICATION:
+ /* Disallow multiple CAP CPorts */
+ if (fw_core->cap_connection) {
+ dev_err(&bundle->dev, "multiple Authentication CPorts found\n");
+ ret = -EINVAL;
+ goto err_destroy_connections;
+ }
+
+ connection = gb_connection_create(bundle, cport_id,
+ NULL);
+ if (IS_ERR(connection)) {
+ dev_err(&bundle->dev, "failed to create Authentication connection (%ld)\n",
+ PTR_ERR(connection));
+ } else {
+ fw_core->cap_connection = connection;
+ }
+
+ break;
+ default:
+ dev_err(&bundle->dev, "invalid protocol id (0x%02x)\n",
+ protocol_id);
+ ret = -EINVAL;
+ goto err_destroy_connections;
+ }
+ }
+
+ /* Firmware Management connection is mandatory */
+ if (!fw_core->mgmt_connection) {
+ dev_err(&bundle->dev, "missing management connection\n");
+ ret = -ENODEV;
+ goto err_destroy_connections;
+ }
+
+ ret = gb_fw_download_connection_init(fw_core->download_connection);
+ if (ret) {
+ /* We may still be able to work with the Interface */
+ dev_err(&bundle->dev, "failed to initialize firmware download connection, disable it (%d)\n",
+ ret);
+ gb_connection_destroy(fw_core->download_connection);
+ fw_core->download_connection = NULL;
+ }
+
+ ret = gb_fw_spi_connection_init(fw_core->spi_connection);
+ if (ret) {
+ /* We may still be able to work with the Interface */
+ dev_err(&bundle->dev, "failed to initialize SPI connection, disable it (%d)\n",
+ ret);
+ gb_connection_destroy(fw_core->spi_connection);
+ fw_core->spi_connection = NULL;
+ }
+
+ ret = gb_cap_connection_init(fw_core->cap_connection);
+ if (ret) {
+ /* We may still be able to work with the Interface */
+ dev_err(&bundle->dev, "failed to initialize CAP connection, disable it (%d)\n",
+ ret);
+ gb_connection_destroy(fw_core->cap_connection);
+ fw_core->cap_connection = NULL;
+ }
+
+ ret = gb_fw_mgmt_connection_init(fw_core->mgmt_connection);
+ if (ret) {
+ /* We may still be able to work with the Interface */
+ dev_err(&bundle->dev, "failed to initialize firmware management connection, disable it (%d)\n",
+ ret);
+ goto err_exit_connections;
+ }
+
+ greybus_set_drvdata(bundle, fw_core);
+
+ /* FIXME: Remove this after S2 Loader gets runtime PM support */
+ if (!(bundle->intf->quirks & GB_INTERFACE_QUIRK_NO_PM))
+ gb_pm_runtime_put_autosuspend(bundle);
+
+ return 0;
+
+err_exit_connections:
+ gb_cap_connection_exit(fw_core->cap_connection);
+ gb_fw_spi_connection_exit(fw_core->spi_connection);
+ gb_fw_download_connection_exit(fw_core->download_connection);
+err_destroy_connections:
+ gb_connection_destroy(fw_core->mgmt_connection);
+ gb_connection_destroy(fw_core->cap_connection);
+ gb_connection_destroy(fw_core->spi_connection);
+ gb_connection_destroy(fw_core->download_connection);
+ kfree(fw_core);
+
+ return ret;
+}
+
+static void gb_fw_core_disconnect(struct gb_bundle *bundle)
+{
+ struct gb_fw_core *fw_core = greybus_get_drvdata(bundle);
+ int ret;
+
+ /* FIXME: Remove this after S2 Loader gets runtime PM support */
+ if (!(bundle->intf->quirks & GB_INTERFACE_QUIRK_NO_PM)) {
+ ret = gb_pm_runtime_get_sync(bundle);
+ if (ret)
+ gb_pm_runtime_get_noresume(bundle);
+ }
+
+ gb_fw_mgmt_connection_exit(fw_core->mgmt_connection);
+ gb_cap_connection_exit(fw_core->cap_connection);
+ gb_fw_spi_connection_exit(fw_core->spi_connection);
+ gb_fw_download_connection_exit(fw_core->download_connection);
+
+ gb_connection_destroy(fw_core->mgmt_connection);
+ gb_connection_destroy(fw_core->cap_connection);
+ gb_connection_destroy(fw_core->spi_connection);
+ gb_connection_destroy(fw_core->download_connection);
+
+ kfree(fw_core);
+}
+
+static const struct greybus_bundle_id gb_fw_core_id_table[] = {
+ { GREYBUS_DEVICE_CLASS(GREYBUS_CLASS_FW_MANAGEMENT) },
+ { }
+};
+
+static struct greybus_driver gb_fw_core_driver = {
+ .name = "gb-firmware",
+ .probe = gb_fw_core_probe,
+ .disconnect = gb_fw_core_disconnect,
+ .id_table = gb_fw_core_id_table,
+};
+
+static int fw_core_init(void)
+{
+ int ret;
+
+ ret = fw_mgmt_init();
+ if (ret) {
+ pr_err("Failed to initialize fw-mgmt core (%d)\n", ret);
+ return ret;
+ }
+
+ ret = cap_init();
+ if (ret) {
+ pr_err("Failed to initialize component authentication core (%d)\n",
+ ret);
+ goto fw_mgmt_exit;
+ }
+
+ ret = greybus_register(&gb_fw_core_driver);
+ if (ret)
+ goto cap_exit;
+
+ return 0;
+
+cap_exit:
+ cap_exit();
+fw_mgmt_exit:
+ fw_mgmt_exit();
+
+ return ret;
+}
+module_init(fw_core_init);
+
+static void __exit fw_core_exit(void)
+{
+ greybus_deregister(&gb_fw_core_driver);
+ cap_exit();
+ fw_mgmt_exit();
+}
+module_exit(fw_core_exit);
+
+MODULE_ALIAS("greybus:firmware");
+MODULE_AUTHOR("Viresh Kumar <viresh.kumar@linaro.org>");
+MODULE_DESCRIPTION("Greybus Firmware Bundle Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/greybus/fw-download.c b/drivers/staging/greybus/fw-download.c
new file mode 100644
index 000000000000..9a09bd3af79b
--- /dev/null
+++ b/drivers/staging/greybus/fw-download.c
@@ -0,0 +1,465 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Greybus Firmware Download Protocol Driver.
+ *
+ * Copyright 2016 Google Inc.
+ * Copyright 2016 Linaro Ltd.
+ */
+
+#include <linux/firmware.h>
+#include <linux/jiffies.h>
+#include <linux/mutex.h>
+#include <linux/workqueue.h>
+#include <linux/greybus.h>
+#include "firmware.h"
+
+/* Estimated minimum buffer size, actual size can be smaller than this */
+#define MIN_FETCH_SIZE 512
+/* Timeout, in jiffies, within which fetch or release firmware must be called */
+#define NEXT_REQ_TIMEOUT_J msecs_to_jiffies(1000)
+
+struct fw_request {
+ u8 firmware_id;
+ bool disabled;
+ bool timedout;
+ char name[FW_NAME_SIZE];
+ const struct firmware *fw;
+ struct list_head node;
+
+ struct delayed_work dwork;
+ /* Timeout, in jiffies, within which the firmware shall download */
+ unsigned long release_timeout_j;
+ struct kref kref;
+ struct fw_download *fw_download;
+};
+
+struct fw_download {
+ struct device *parent;
+ struct gb_connection *connection;
+ struct list_head fw_requests;
+ struct ida id_map;
+ struct mutex mutex;
+};
+
+static void fw_req_release(struct kref *kref)
+{
+ struct fw_request *fw_req = container_of(kref, struct fw_request, kref);
+
+ dev_dbg(fw_req->fw_download->parent, "firmware %s released\n",
+ fw_req->name);
+
+ release_firmware(fw_req->fw);
+
+ /*
+ * The request timed out and the module may send a fetch-fw or
+ * release-fw request later. Lets block the id we allocated for this
+ * request, so that the AP doesn't refer to a later fw-request (with
+ * same firmware_id) for the old timedout fw-request.
+ *
+ * NOTE:
+ *
+ * This also means that after 255 timeouts we will fail to service new
+ * firmware downloads. But what else can we do in that case anyway? Lets
+ * just hope that it never happens.
+ */
+ if (!fw_req->timedout)
+ ida_free(&fw_req->fw_download->id_map, fw_req->firmware_id);
+
+ kfree(fw_req);
+}
+
+/*
+ * Incoming requests are serialized for a connection, and the only race possible
+ * is between the timeout handler freeing this and an incoming request.
+ *
+ * The operations on the fw-request list are protected by the mutex and
+ * get_fw_req() increments the reference count before returning a fw_req pointer
+ * to the users.
+ *
+ * free_firmware() also takes the mutex while removing an entry from the list,
+ * it guarantees that every user of fw_req has taken a kref-reference by now and
+ * we wouldn't have any new users.
+ *
+ * Once the last user drops the reference, the fw_req structure is freed.
+ */
+static void put_fw_req(struct fw_request *fw_req)
+{
+ kref_put(&fw_req->kref, fw_req_release);
+}
+
+/* Caller must call put_fw_req() after using struct fw_request */
+static struct fw_request *get_fw_req(struct fw_download *fw_download,
+ u8 firmware_id)
+{
+ struct fw_request *fw_req;
+
+ mutex_lock(&fw_download->mutex);
+
+ list_for_each_entry(fw_req, &fw_download->fw_requests, node) {
+ if (fw_req->firmware_id == firmware_id) {
+ kref_get(&fw_req->kref);
+ goto unlock;
+ }
+ }
+
+ fw_req = NULL;
+
+unlock:
+ mutex_unlock(&fw_download->mutex);
+
+ return fw_req;
+}
+
+static void free_firmware(struct fw_download *fw_download,
+ struct fw_request *fw_req)
+{
+ /* Already disabled from timeout handlers */
+ if (fw_req->disabled)
+ return;
+
+ mutex_lock(&fw_download->mutex);
+ list_del(&fw_req->node);
+ mutex_unlock(&fw_download->mutex);
+
+ fw_req->disabled = true;
+ put_fw_req(fw_req);
+}
+
+static void fw_request_timedout(struct work_struct *work)
+{
+ struct delayed_work *dwork = to_delayed_work(work);
+ struct fw_request *fw_req = container_of(dwork,
+ struct fw_request, dwork);
+ struct fw_download *fw_download = fw_req->fw_download;
+
+ dev_err(fw_download->parent,
+ "Timed out waiting for fetch / release firmware requests: %u\n",
+ fw_req->firmware_id);
+
+ fw_req->timedout = true;
+ free_firmware(fw_download, fw_req);
+}
+
+static int exceeds_release_timeout(struct fw_request *fw_req)
+{
+ struct fw_download *fw_download = fw_req->fw_download;
+
+ if (time_before(jiffies, fw_req->release_timeout_j))
+ return 0;
+
+ dev_err(fw_download->parent,
+ "Firmware download didn't finish in time, abort: %d\n",
+ fw_req->firmware_id);
+
+ fw_req->timedout = true;
+ free_firmware(fw_download, fw_req);
+
+ return -ETIMEDOUT;
+}
+
+/* This returns path of the firmware blob on the disk */
+static struct fw_request *find_firmware(struct fw_download *fw_download,
+ const char *tag)
+{
+ struct gb_interface *intf = fw_download->connection->bundle->intf;
+ struct fw_request *fw_req;
+ int ret, req_count;
+
+ fw_req = kzalloc(sizeof(*fw_req), GFP_KERNEL);
+ if (!fw_req)
+ return ERR_PTR(-ENOMEM);
+
+ /* Allocate ids from 1 to 255 (u8-max), 0 is an invalid id */
+ ret = ida_alloc_range(&fw_download->id_map, 1, 255, GFP_KERNEL);
+ if (ret < 0) {
+ dev_err(fw_download->parent,
+ "failed to allocate firmware id (%d)\n", ret);
+ goto err_free_req;
+ }
+ fw_req->firmware_id = ret;
+
+ snprintf(fw_req->name, sizeof(fw_req->name),
+ FW_NAME_PREFIX "%08x_%08x_%08x_%08x_%s.tftf",
+ intf->ddbl1_manufacturer_id, intf->ddbl1_product_id,
+ intf->vendor_id, intf->product_id, tag);
+
+ dev_info(fw_download->parent, "Requested firmware package '%s'\n",
+ fw_req->name);
+
+ ret = request_firmware(&fw_req->fw, fw_req->name, fw_download->parent);
+ if (ret) {
+ dev_err(fw_download->parent,
+ "firmware request failed for %s (%d)\n", fw_req->name,
+ ret);
+ goto err_free_id;
+ }
+
+ fw_req->fw_download = fw_download;
+ kref_init(&fw_req->kref);
+
+ mutex_lock(&fw_download->mutex);
+ list_add(&fw_req->node, &fw_download->fw_requests);
+ mutex_unlock(&fw_download->mutex);
+
+ /* Timeout, in jiffies, within which firmware should get loaded */
+ req_count = DIV_ROUND_UP(fw_req->fw->size, MIN_FETCH_SIZE);
+ fw_req->release_timeout_j = jiffies + req_count * NEXT_REQ_TIMEOUT_J;
+
+ INIT_DELAYED_WORK(&fw_req->dwork, fw_request_timedout);
+ schedule_delayed_work(&fw_req->dwork, NEXT_REQ_TIMEOUT_J);
+
+ return fw_req;
+
+err_free_id:
+ ida_free(&fw_download->id_map, fw_req->firmware_id);
+err_free_req:
+ kfree(fw_req);
+
+ return ERR_PTR(ret);
+}
+
+static int fw_download_find_firmware(struct gb_operation *op)
+{
+ struct gb_connection *connection = op->connection;
+ struct fw_download *fw_download = gb_connection_get_data(connection);
+ struct gb_fw_download_find_firmware_request *request;
+ struct gb_fw_download_find_firmware_response *response;
+ struct fw_request *fw_req;
+ const char *tag;
+
+ if (op->request->payload_size != sizeof(*request)) {
+ dev_err(fw_download->parent,
+ "illegal size of find firmware request (%zu != %zu)\n",
+ op->request->payload_size, sizeof(*request));
+ return -EINVAL;
+ }
+
+ request = op->request->payload;
+ tag = (const char *)request->firmware_tag;
+
+ /* firmware_tag must be null-terminated */
+ if (strnlen(tag, GB_FIRMWARE_TAG_MAX_SIZE) ==
+ GB_FIRMWARE_TAG_MAX_SIZE) {
+ dev_err(fw_download->parent,
+ "firmware-tag is not null-terminated\n");
+ return -EINVAL;
+ }
+
+ fw_req = find_firmware(fw_download, tag);
+ if (IS_ERR(fw_req))
+ return PTR_ERR(fw_req);
+
+ if (!gb_operation_response_alloc(op, sizeof(*response), GFP_KERNEL)) {
+ dev_err(fw_download->parent, "error allocating response\n");
+ free_firmware(fw_download, fw_req);
+ return -ENOMEM;
+ }
+
+ response = op->response->payload;
+ response->firmware_id = fw_req->firmware_id;
+ response->size = cpu_to_le32(fw_req->fw->size);
+
+ dev_dbg(fw_download->parent,
+ "firmware size is %zu bytes\n", fw_req->fw->size);
+
+ return 0;
+}
+
+static int fw_download_fetch_firmware(struct gb_operation *op)
+{
+ struct gb_connection *connection = op->connection;
+ struct fw_download *fw_download = gb_connection_get_data(connection);
+ struct gb_fw_download_fetch_firmware_request *request;
+ struct fw_request *fw_req;
+ const struct firmware *fw;
+ unsigned int offset, size;
+ u8 firmware_id;
+ u8 *response;
+ int ret = 0;
+
+ if (op->request->payload_size != sizeof(*request)) {
+ dev_err(fw_download->parent,
+ "Illegal size of fetch firmware request (%zu %zu)\n",
+ op->request->payload_size, sizeof(*request));
+ return -EINVAL;
+ }
+
+ request = op->request->payload;
+ offset = le32_to_cpu(request->offset);
+ size = le32_to_cpu(request->size);
+ firmware_id = request->firmware_id;
+
+ fw_req = get_fw_req(fw_download, firmware_id);
+ if (!fw_req) {
+ dev_err(fw_download->parent,
+ "firmware not available for id: %02u\n", firmware_id);
+ return -EINVAL;
+ }
+
+ /* Make sure work handler isn't running in parallel */
+ cancel_delayed_work_sync(&fw_req->dwork);
+
+ /* We timed-out before reaching here ? */
+ if (fw_req->disabled) {
+ ret = -ETIMEDOUT;
+ goto put_fw;
+ }
+
+ /*
+ * Firmware download must finish within a limited time interval. If it
+ * doesn't, then we might have a buggy Module on the other side. Abort
+ * download.
+ */
+ ret = exceeds_release_timeout(fw_req);
+ if (ret)
+ goto put_fw;
+
+ fw = fw_req->fw;
+
+ if (offset >= fw->size || size > fw->size - offset) {
+ dev_err(fw_download->parent,
+ "bad fetch firmware request (offs = %u, size = %u)\n",
+ offset, size);
+ ret = -EINVAL;
+ goto put_fw;
+ }
+
+ /* gb_fw_download_fetch_firmware_response contains only a byte array */
+ if (!gb_operation_response_alloc(op, size, GFP_KERNEL)) {
+ dev_err(fw_download->parent,
+ "error allocating fetch firmware response\n");
+ ret = -ENOMEM;
+ goto put_fw;
+ }
+
+ response = op->response->payload;
+ memcpy(response, fw->data + offset, size);
+
+ dev_dbg(fw_download->parent,
+ "responding with firmware (offs = %u, size = %u)\n", offset,
+ size);
+
+ /* Refresh timeout */
+ schedule_delayed_work(&fw_req->dwork, NEXT_REQ_TIMEOUT_J);
+
+put_fw:
+ put_fw_req(fw_req);
+
+ return ret;
+}
+
+static int fw_download_release_firmware(struct gb_operation *op)
+{
+ struct gb_connection *connection = op->connection;
+ struct fw_download *fw_download = gb_connection_get_data(connection);
+ struct gb_fw_download_release_firmware_request *request;
+ struct fw_request *fw_req;
+ u8 firmware_id;
+
+ if (op->request->payload_size != sizeof(*request)) {
+ dev_err(fw_download->parent,
+ "Illegal size of release firmware request (%zu %zu)\n",
+ op->request->payload_size, sizeof(*request));
+ return -EINVAL;
+ }
+
+ request = op->request->payload;
+ firmware_id = request->firmware_id;
+
+ fw_req = get_fw_req(fw_download, firmware_id);
+ if (!fw_req) {
+ dev_err(fw_download->parent,
+ "firmware not available for id: %02u\n", firmware_id);
+ return -EINVAL;
+ }
+
+ cancel_delayed_work_sync(&fw_req->dwork);
+
+ free_firmware(fw_download, fw_req);
+ put_fw_req(fw_req);
+
+ dev_dbg(fw_download->parent, "release firmware\n");
+
+ return 0;
+}
+
+int gb_fw_download_request_handler(struct gb_operation *op)
+{
+ u8 type = op->type;
+
+ switch (type) {
+ case GB_FW_DOWNLOAD_TYPE_FIND_FIRMWARE:
+ return fw_download_find_firmware(op);
+ case GB_FW_DOWNLOAD_TYPE_FETCH_FIRMWARE:
+ return fw_download_fetch_firmware(op);
+ case GB_FW_DOWNLOAD_TYPE_RELEASE_FIRMWARE:
+ return fw_download_release_firmware(op);
+ default:
+ dev_err(&op->connection->bundle->dev,
+ "unsupported request: %u\n", type);
+ return -EINVAL;
+ }
+}
+
+int gb_fw_download_connection_init(struct gb_connection *connection)
+{
+ struct fw_download *fw_download;
+ int ret;
+
+ if (!connection)
+ return 0;
+
+ fw_download = kzalloc(sizeof(*fw_download), GFP_KERNEL);
+ if (!fw_download)
+ return -ENOMEM;
+
+ fw_download->parent = &connection->bundle->dev;
+ INIT_LIST_HEAD(&fw_download->fw_requests);
+ ida_init(&fw_download->id_map);
+ gb_connection_set_data(connection, fw_download);
+ fw_download->connection = connection;
+ mutex_init(&fw_download->mutex);
+
+ ret = gb_connection_enable(connection);
+ if (ret)
+ goto err_destroy_id_map;
+
+ return 0;
+
+err_destroy_id_map:
+ ida_destroy(&fw_download->id_map);
+ kfree(fw_download);
+
+ return ret;
+}
+
+void gb_fw_download_connection_exit(struct gb_connection *connection)
+{
+ struct fw_download *fw_download;
+ struct fw_request *fw_req, *tmp;
+
+ if (!connection)
+ return;
+
+ fw_download = gb_connection_get_data(connection);
+ gb_connection_disable(fw_download->connection);
+
+ /*
+ * Make sure we have a reference to the pending requests, before they
+ * are freed from the timeout handler.
+ */
+ mutex_lock(&fw_download->mutex);
+ list_for_each_entry(fw_req, &fw_download->fw_requests, node)
+ kref_get(&fw_req->kref);
+ mutex_unlock(&fw_download->mutex);
+
+ /* Release pending firmware packages */
+ list_for_each_entry_safe(fw_req, tmp, &fw_download->fw_requests, node) {
+ cancel_delayed_work_sync(&fw_req->dwork);
+ free_firmware(fw_download, fw_req);
+ put_fw_req(fw_req);
+ }
+
+ ida_destroy(&fw_download->id_map);
+ kfree(fw_download);
+}
diff --git a/drivers/staging/greybus/fw-management.c b/drivers/staging/greybus/fw-management.c
new file mode 100644
index 000000000000..152949c23d65
--- /dev/null
+++ b/drivers/staging/greybus/fw-management.c
@@ -0,0 +1,705 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Greybus Firmware Management Protocol Driver.
+ *
+ * Copyright 2016 Google Inc.
+ * Copyright 2016 Linaro Ltd.
+ */
+
+#include <linux/cdev.h>
+#include <linux/completion.h>
+#include <linux/firmware.h>
+#include <linux/fs.h>
+#include <linux/idr.h>
+#include <linux/ioctl.h>
+#include <linux/uaccess.h>
+#include <linux/greybus.h>
+
+#include "firmware.h"
+#include "greybus_firmware.h"
+
+#define FW_MGMT_TIMEOUT_MS 1000
+
+struct fw_mgmt {
+ struct device *parent;
+ struct gb_connection *connection;
+ struct kref kref;
+ struct list_head node;
+
+ /* Common id-map for interface and backend firmware requests */
+ struct ida id_map;
+ struct mutex mutex;
+ struct completion completion;
+ struct cdev cdev;
+ struct device *class_device;
+ dev_t dev_num;
+ unsigned int timeout_jiffies;
+ bool disabled; /* connection getting disabled */
+
+ /* Interface Firmware specific fields */
+ bool mode_switch_started;
+ bool intf_fw_loaded;
+ u8 intf_fw_request_id;
+ u8 intf_fw_status;
+ u16 intf_fw_major;
+ u16 intf_fw_minor;
+
+ /* Backend Firmware specific fields */
+ u8 backend_fw_request_id;
+ u8 backend_fw_status;
+};
+
+/*
+ * Number of minor devices this driver supports.
+ * There will be exactly one required per Interface.
+ */
+#define NUM_MINORS U8_MAX
+
+static const struct class fw_mgmt_class = {
+ .name = "gb_fw_mgmt",
+};
+
+static dev_t fw_mgmt_dev_num;
+static DEFINE_IDA(fw_mgmt_minors_map);
+static LIST_HEAD(fw_mgmt_list);
+static DEFINE_MUTEX(list_mutex);
+
+static void fw_mgmt_kref_release(struct kref *kref)
+{
+ struct fw_mgmt *fw_mgmt = container_of(kref, struct fw_mgmt, kref);
+
+ ida_destroy(&fw_mgmt->id_map);
+ kfree(fw_mgmt);
+}
+
+/*
+ * All users of fw_mgmt take a reference (from within list_mutex lock), before
+ * they get a pointer to play with. And the structure will be freed only after
+ * the last user has put the reference to it.
+ */
+static void put_fw_mgmt(struct fw_mgmt *fw_mgmt)
+{
+ kref_put(&fw_mgmt->kref, fw_mgmt_kref_release);
+}
+
+/* Caller must call put_fw_mgmt() after using struct fw_mgmt */
+static struct fw_mgmt *get_fw_mgmt(struct cdev *cdev)
+{
+ struct fw_mgmt *fw_mgmt;
+
+ mutex_lock(&list_mutex);
+
+ list_for_each_entry(fw_mgmt, &fw_mgmt_list, node) {
+ if (&fw_mgmt->cdev == cdev) {
+ kref_get(&fw_mgmt->kref);
+ goto unlock;
+ }
+ }
+
+ fw_mgmt = NULL;
+
+unlock:
+ mutex_unlock(&list_mutex);
+
+ return fw_mgmt;
+}
+
+static int fw_mgmt_interface_fw_version_operation(struct fw_mgmt *fw_mgmt,
+ struct fw_mgmt_ioc_get_intf_version *fw_info)
+{
+ struct gb_connection *connection = fw_mgmt->connection;
+ struct gb_fw_mgmt_interface_fw_version_response response;
+ int ret;
+
+ ret = gb_operation_sync(connection,
+ GB_FW_MGMT_TYPE_INTERFACE_FW_VERSION, NULL, 0,
+ &response, sizeof(response));
+ if (ret) {
+ dev_err(fw_mgmt->parent,
+ "failed to get interface firmware version (%d)\n", ret);
+ return ret;
+ }
+
+ fw_info->major = le16_to_cpu(response.major);
+ fw_info->minor = le16_to_cpu(response.minor);
+
+ ret = strscpy_pad(fw_info->firmware_tag, response.firmware_tag);
+ if (ret == -E2BIG)
+ dev_err(fw_mgmt->parent,
+ "fw-version: truncated firmware tag: %s\n",
+ fw_info->firmware_tag);
+
+ return 0;
+}
+
+static int fw_mgmt_load_and_validate_operation(struct fw_mgmt *fw_mgmt,
+ u8 load_method, const char *tag)
+{
+ struct gb_fw_mgmt_load_and_validate_fw_request request;
+ int ret;
+
+ if (load_method != GB_FW_LOAD_METHOD_UNIPRO &&
+ load_method != GB_FW_LOAD_METHOD_INTERNAL) {
+ dev_err(fw_mgmt->parent,
+ "invalid load-method (%d)\n", load_method);
+ return -EINVAL;
+ }
+
+ request.load_method = load_method;
+
+ ret = strscpy_pad(request.firmware_tag, tag);
+ if (ret == -E2BIG) {
+ dev_err(fw_mgmt->parent,
+ "load-and-validate: truncated firmware tag: %s\n",
+ request.firmware_tag);
+ return -EINVAL;
+ }
+
+ /* Allocate ids from 1 to 255 (u8-max), 0 is an invalid id */
+ ret = ida_alloc_range(&fw_mgmt->id_map, 1, 255, GFP_KERNEL);
+ if (ret < 0) {
+ dev_err(fw_mgmt->parent, "failed to allocate request id (%d)\n",
+ ret);
+ return ret;
+ }
+
+ fw_mgmt->intf_fw_request_id = ret;
+ fw_mgmt->intf_fw_loaded = false;
+ request.request_id = ret;
+
+ ret = gb_operation_sync(fw_mgmt->connection,
+ GB_FW_MGMT_TYPE_LOAD_AND_VALIDATE_FW, &request,
+ sizeof(request), NULL, 0);
+ if (ret) {
+ ida_free(&fw_mgmt->id_map, fw_mgmt->intf_fw_request_id);
+ fw_mgmt->intf_fw_request_id = 0;
+ dev_err(fw_mgmt->parent,
+ "load and validate firmware request failed (%d)\n",
+ ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int fw_mgmt_interface_fw_loaded_operation(struct gb_operation *op)
+{
+ struct gb_connection *connection = op->connection;
+ struct fw_mgmt *fw_mgmt = gb_connection_get_data(connection);
+ struct gb_fw_mgmt_loaded_fw_request *request;
+
+ /* No pending load and validate request ? */
+ if (!fw_mgmt->intf_fw_request_id) {
+ dev_err(fw_mgmt->parent,
+ "unexpected firmware loaded request received\n");
+ return -ENODEV;
+ }
+
+ if (op->request->payload_size != sizeof(*request)) {
+ dev_err(fw_mgmt->parent, "illegal size of firmware loaded request (%zu != %zu)\n",
+ op->request->payload_size, sizeof(*request));
+ return -EINVAL;
+ }
+
+ request = op->request->payload;
+
+ /* Invalid request-id ? */
+ if (request->request_id != fw_mgmt->intf_fw_request_id) {
+ dev_err(fw_mgmt->parent, "invalid request id for firmware loaded request (%02u != %02u)\n",
+ fw_mgmt->intf_fw_request_id, request->request_id);
+ return -ENODEV;
+ }
+
+ ida_free(&fw_mgmt->id_map, fw_mgmt->intf_fw_request_id);
+ fw_mgmt->intf_fw_request_id = 0;
+ fw_mgmt->intf_fw_status = request->status;
+ fw_mgmt->intf_fw_major = le16_to_cpu(request->major);
+ fw_mgmt->intf_fw_minor = le16_to_cpu(request->minor);
+
+ if (fw_mgmt->intf_fw_status == GB_FW_LOAD_STATUS_FAILED)
+ dev_err(fw_mgmt->parent,
+ "failed to load interface firmware, status:%02x\n",
+ fw_mgmt->intf_fw_status);
+ else if (fw_mgmt->intf_fw_status == GB_FW_LOAD_STATUS_VALIDATION_FAILED)
+ dev_err(fw_mgmt->parent,
+ "failed to validate interface firmware, status:%02x\n",
+ fw_mgmt->intf_fw_status);
+ else
+ fw_mgmt->intf_fw_loaded = true;
+
+ complete(&fw_mgmt->completion);
+
+ return 0;
+}
+
+static int fw_mgmt_backend_fw_version_operation(struct fw_mgmt *fw_mgmt,
+ struct fw_mgmt_ioc_get_backend_version *fw_info)
+{
+ struct gb_connection *connection = fw_mgmt->connection;
+ struct gb_fw_mgmt_backend_fw_version_request request;
+ struct gb_fw_mgmt_backend_fw_version_response response;
+ int ret;
+
+ ret = strscpy_pad(request.firmware_tag, fw_info->firmware_tag);
+ if (ret == -E2BIG) {
+ dev_err(fw_mgmt->parent,
+ "backend-fw-version: truncated firmware tag: %s\n",
+ request.firmware_tag);
+ return -EINVAL;
+ }
+
+ ret = gb_operation_sync(connection,
+ GB_FW_MGMT_TYPE_BACKEND_FW_VERSION, &request,
+ sizeof(request), &response, sizeof(response));
+ if (ret) {
+ dev_err(fw_mgmt->parent, "failed to get version of %s backend firmware (%d)\n",
+ fw_info->firmware_tag, ret);
+ return ret;
+ }
+
+ fw_info->status = response.status;
+
+ /* Reset version as that should be non-zero only for success case */
+ fw_info->major = 0;
+ fw_info->minor = 0;
+
+ switch (fw_info->status) {
+ case GB_FW_BACKEND_VERSION_STATUS_SUCCESS:
+ fw_info->major = le16_to_cpu(response.major);
+ fw_info->minor = le16_to_cpu(response.minor);
+ break;
+ case GB_FW_BACKEND_VERSION_STATUS_NOT_AVAILABLE:
+ case GB_FW_BACKEND_VERSION_STATUS_RETRY:
+ break;
+ case GB_FW_BACKEND_VERSION_STATUS_NOT_SUPPORTED:
+ dev_err(fw_mgmt->parent,
+ "Firmware with tag %s is not supported by Interface\n",
+ fw_info->firmware_tag);
+ break;
+ default:
+ dev_err(fw_mgmt->parent, "Invalid status received: %u\n",
+ fw_info->status);
+ }
+
+ return 0;
+}
+
+static int fw_mgmt_backend_fw_update_operation(struct fw_mgmt *fw_mgmt,
+ char *tag)
+{
+ struct gb_fw_mgmt_backend_fw_update_request request;
+ int ret;
+
+ ret = strscpy_pad(request.firmware_tag, tag);
+ if (ret == -E2BIG) {
+ dev_err(fw_mgmt->parent,
+ "backend-fw-update: truncated firmware tag: %s\n",
+ request.firmware_tag);
+ return -EINVAL;
+ }
+
+ /* Allocate ids from 1 to 255 (u8-max), 0 is an invalid id */
+ ret = ida_alloc_range(&fw_mgmt->id_map, 1, 255, GFP_KERNEL);
+ if (ret < 0) {
+ dev_err(fw_mgmt->parent, "failed to allocate request id (%d)\n",
+ ret);
+ return ret;
+ }
+
+ fw_mgmt->backend_fw_request_id = ret;
+ request.request_id = ret;
+
+ ret = gb_operation_sync(fw_mgmt->connection,
+ GB_FW_MGMT_TYPE_BACKEND_FW_UPDATE, &request,
+ sizeof(request), NULL, 0);
+ if (ret) {
+ ida_free(&fw_mgmt->id_map, fw_mgmt->backend_fw_request_id);
+ fw_mgmt->backend_fw_request_id = 0;
+ dev_err(fw_mgmt->parent,
+ "backend %s firmware update request failed (%d)\n", tag,
+ ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int fw_mgmt_backend_fw_updated_operation(struct gb_operation *op)
+{
+ struct gb_connection *connection = op->connection;
+ struct fw_mgmt *fw_mgmt = gb_connection_get_data(connection);
+ struct gb_fw_mgmt_backend_fw_updated_request *request;
+
+ /* No pending load and validate request ? */
+ if (!fw_mgmt->backend_fw_request_id) {
+ dev_err(fw_mgmt->parent, "unexpected backend firmware updated request received\n");
+ return -ENODEV;
+ }
+
+ if (op->request->payload_size != sizeof(*request)) {
+ dev_err(fw_mgmt->parent, "illegal size of backend firmware updated request (%zu != %zu)\n",
+ op->request->payload_size, sizeof(*request));
+ return -EINVAL;
+ }
+
+ request = op->request->payload;
+
+ /* Invalid request-id ? */
+ if (request->request_id != fw_mgmt->backend_fw_request_id) {
+ dev_err(fw_mgmt->parent, "invalid request id for backend firmware updated request (%02u != %02u)\n",
+ fw_mgmt->backend_fw_request_id, request->request_id);
+ return -ENODEV;
+ }
+
+ ida_free(&fw_mgmt->id_map, fw_mgmt->backend_fw_request_id);
+ fw_mgmt->backend_fw_request_id = 0;
+ fw_mgmt->backend_fw_status = request->status;
+
+ if ((fw_mgmt->backend_fw_status != GB_FW_BACKEND_FW_STATUS_SUCCESS) &&
+ (fw_mgmt->backend_fw_status != GB_FW_BACKEND_FW_STATUS_RETRY))
+ dev_err(fw_mgmt->parent,
+ "failed to load backend firmware: %02x\n",
+ fw_mgmt->backend_fw_status);
+
+ complete(&fw_mgmt->completion);
+
+ return 0;
+}
+
+/* Char device fops */
+
+static int fw_mgmt_open(struct inode *inode, struct file *file)
+{
+ struct fw_mgmt *fw_mgmt = get_fw_mgmt(inode->i_cdev);
+
+ /* fw_mgmt structure can't get freed until file descriptor is closed */
+ if (fw_mgmt) {
+ file->private_data = fw_mgmt;
+ return 0;
+ }
+
+ return -ENODEV;
+}
+
+static int fw_mgmt_release(struct inode *inode, struct file *file)
+{
+ struct fw_mgmt *fw_mgmt = file->private_data;
+
+ put_fw_mgmt(fw_mgmt);
+ return 0;
+}
+
+static int fw_mgmt_ioctl(struct fw_mgmt *fw_mgmt, unsigned int cmd,
+ void __user *buf)
+{
+ struct fw_mgmt_ioc_get_intf_version intf_fw_info;
+ struct fw_mgmt_ioc_get_backend_version backend_fw_info;
+ struct fw_mgmt_ioc_intf_load_and_validate intf_load;
+ struct fw_mgmt_ioc_backend_fw_update backend_update;
+ unsigned int timeout;
+ int ret;
+
+ /* Reject any operations after mode-switch has started */
+ if (fw_mgmt->mode_switch_started)
+ return -EBUSY;
+
+ switch (cmd) {
+ case FW_MGMT_IOC_GET_INTF_FW:
+ ret = fw_mgmt_interface_fw_version_operation(fw_mgmt,
+ &intf_fw_info);
+ if (ret)
+ return ret;
+
+ if (copy_to_user(buf, &intf_fw_info, sizeof(intf_fw_info)))
+ return -EFAULT;
+
+ return 0;
+ case FW_MGMT_IOC_GET_BACKEND_FW:
+ if (copy_from_user(&backend_fw_info, buf,
+ sizeof(backend_fw_info)))
+ return -EFAULT;
+
+ ret = fw_mgmt_backend_fw_version_operation(fw_mgmt,
+ &backend_fw_info);
+ if (ret)
+ return ret;
+
+ if (copy_to_user(buf, &backend_fw_info,
+ sizeof(backend_fw_info)))
+ return -EFAULT;
+
+ return 0;
+ case FW_MGMT_IOC_INTF_LOAD_AND_VALIDATE:
+ if (copy_from_user(&intf_load, buf, sizeof(intf_load)))
+ return -EFAULT;
+
+ ret = fw_mgmt_load_and_validate_operation(fw_mgmt,
+ intf_load.load_method, intf_load.firmware_tag);
+ if (ret)
+ return ret;
+
+ if (!wait_for_completion_timeout(&fw_mgmt->completion,
+ fw_mgmt->timeout_jiffies)) {
+ dev_err(fw_mgmt->parent, "timed out waiting for firmware load and validation to finish\n");
+ return -ETIMEDOUT;
+ }
+
+ intf_load.status = fw_mgmt->intf_fw_status;
+ intf_load.major = fw_mgmt->intf_fw_major;
+ intf_load.minor = fw_mgmt->intf_fw_minor;
+
+ if (copy_to_user(buf, &intf_load, sizeof(intf_load)))
+ return -EFAULT;
+
+ return 0;
+ case FW_MGMT_IOC_INTF_BACKEND_FW_UPDATE:
+ if (copy_from_user(&backend_update, buf,
+ sizeof(backend_update)))
+ return -EFAULT;
+
+ ret = fw_mgmt_backend_fw_update_operation(fw_mgmt,
+ backend_update.firmware_tag);
+ if (ret)
+ return ret;
+
+ if (!wait_for_completion_timeout(&fw_mgmt->completion,
+ fw_mgmt->timeout_jiffies)) {
+ dev_err(fw_mgmt->parent, "timed out waiting for backend firmware update to finish\n");
+ return -ETIMEDOUT;
+ }
+
+ backend_update.status = fw_mgmt->backend_fw_status;
+
+ if (copy_to_user(buf, &backend_update, sizeof(backend_update)))
+ return -EFAULT;
+
+ return 0;
+ case FW_MGMT_IOC_SET_TIMEOUT_MS:
+ if (get_user(timeout, (unsigned int __user *)buf))
+ return -EFAULT;
+
+ if (!timeout) {
+ dev_err(fw_mgmt->parent, "timeout can't be zero\n");
+ return -EINVAL;
+ }
+
+ fw_mgmt->timeout_jiffies = msecs_to_jiffies(timeout);
+
+ return 0;
+ case FW_MGMT_IOC_MODE_SWITCH:
+ if (!fw_mgmt->intf_fw_loaded) {
+ dev_err(fw_mgmt->parent,
+ "Firmware not loaded for mode-switch\n");
+ return -EPERM;
+ }
+
+ /*
+ * Disallow new ioctls as the fw-core bundle driver is going to
+ * get disconnected soon and the character device will get
+ * removed.
+ */
+ fw_mgmt->mode_switch_started = true;
+
+ ret = gb_interface_request_mode_switch(fw_mgmt->connection->intf);
+ if (ret) {
+ dev_err(fw_mgmt->parent, "Mode-switch failed: %d\n",
+ ret);
+ fw_mgmt->mode_switch_started = false;
+ return ret;
+ }
+
+ return 0;
+ default:
+ return -ENOTTY;
+ }
+}
+
+static long fw_mgmt_ioctl_unlocked(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ struct fw_mgmt *fw_mgmt = file->private_data;
+ struct gb_bundle *bundle = fw_mgmt->connection->bundle;
+ int ret = -ENODEV;
+
+ /*
+ * Serialize ioctls.
+ *
+ * We don't want the user to do few operations in parallel. For example,
+ * updating Interface firmware in parallel for the same Interface. There
+ * is no need to do things in parallel for speed and we can avoid having
+ * complicated code for now.
+ *
+ * This is also used to protect ->disabled, which is used to check if
+ * the connection is getting disconnected, so that we don't start any
+ * new operations.
+ */
+ mutex_lock(&fw_mgmt->mutex);
+ if (!fw_mgmt->disabled) {
+ ret = gb_pm_runtime_get_sync(bundle);
+ if (!ret) {
+ ret = fw_mgmt_ioctl(fw_mgmt, cmd, (void __user *)arg);
+ gb_pm_runtime_put_autosuspend(bundle);
+ }
+ }
+ mutex_unlock(&fw_mgmt->mutex);
+
+ return ret;
+}
+
+static const struct file_operations fw_mgmt_fops = {
+ .owner = THIS_MODULE,
+ .open = fw_mgmt_open,
+ .release = fw_mgmt_release,
+ .unlocked_ioctl = fw_mgmt_ioctl_unlocked,
+};
+
+int gb_fw_mgmt_request_handler(struct gb_operation *op)
+{
+ u8 type = op->type;
+
+ switch (type) {
+ case GB_FW_MGMT_TYPE_LOADED_FW:
+ return fw_mgmt_interface_fw_loaded_operation(op);
+ case GB_FW_MGMT_TYPE_BACKEND_FW_UPDATED:
+ return fw_mgmt_backend_fw_updated_operation(op);
+ default:
+ dev_err(&op->connection->bundle->dev,
+ "unsupported request: %u\n", type);
+ return -EINVAL;
+ }
+}
+
+int gb_fw_mgmt_connection_init(struct gb_connection *connection)
+{
+ struct fw_mgmt *fw_mgmt;
+ int ret, minor;
+
+ if (!connection)
+ return 0;
+
+ fw_mgmt = kzalloc(sizeof(*fw_mgmt), GFP_KERNEL);
+ if (!fw_mgmt)
+ return -ENOMEM;
+
+ fw_mgmt->parent = &connection->bundle->dev;
+ fw_mgmt->timeout_jiffies = msecs_to_jiffies(FW_MGMT_TIMEOUT_MS);
+ fw_mgmt->connection = connection;
+
+ gb_connection_set_data(connection, fw_mgmt);
+ init_completion(&fw_mgmt->completion);
+ ida_init(&fw_mgmt->id_map);
+ mutex_init(&fw_mgmt->mutex);
+ kref_init(&fw_mgmt->kref);
+
+ mutex_lock(&list_mutex);
+ list_add(&fw_mgmt->node, &fw_mgmt_list);
+ mutex_unlock(&list_mutex);
+
+ ret = gb_connection_enable(connection);
+ if (ret)
+ goto err_list_del;
+
+ minor = ida_alloc_max(&fw_mgmt_minors_map, NUM_MINORS - 1, GFP_KERNEL);
+ if (minor < 0) {
+ ret = minor;
+ goto err_connection_disable;
+ }
+
+ /* Add a char device to allow userspace to interact with fw-mgmt */
+ fw_mgmt->dev_num = MKDEV(MAJOR(fw_mgmt_dev_num), minor);
+ cdev_init(&fw_mgmt->cdev, &fw_mgmt_fops);
+
+ ret = cdev_add(&fw_mgmt->cdev, fw_mgmt->dev_num, 1);
+ if (ret)
+ goto err_remove_ida;
+
+ /* Add a soft link to the previously added char-dev within the bundle */
+ fw_mgmt->class_device = device_create(&fw_mgmt_class, fw_mgmt->parent,
+ fw_mgmt->dev_num, NULL,
+ "gb-fw-mgmt-%d", minor);
+ if (IS_ERR(fw_mgmt->class_device)) {
+ ret = PTR_ERR(fw_mgmt->class_device);
+ goto err_del_cdev;
+ }
+
+ return 0;
+
+err_del_cdev:
+ cdev_del(&fw_mgmt->cdev);
+err_remove_ida:
+ ida_free(&fw_mgmt_minors_map, minor);
+err_connection_disable:
+ gb_connection_disable(connection);
+err_list_del:
+ mutex_lock(&list_mutex);
+ list_del(&fw_mgmt->node);
+ mutex_unlock(&list_mutex);
+
+ put_fw_mgmt(fw_mgmt);
+
+ return ret;
+}
+
+void gb_fw_mgmt_connection_exit(struct gb_connection *connection)
+{
+ struct fw_mgmt *fw_mgmt;
+
+ if (!connection)
+ return;
+
+ fw_mgmt = gb_connection_get_data(connection);
+
+ device_destroy(&fw_mgmt_class, fw_mgmt->dev_num);
+ cdev_del(&fw_mgmt->cdev);
+ ida_free(&fw_mgmt_minors_map, MINOR(fw_mgmt->dev_num));
+
+ /*
+ * Disallow any new ioctl operations on the char device and wait for
+ * existing ones to finish.
+ */
+ mutex_lock(&fw_mgmt->mutex);
+ fw_mgmt->disabled = true;
+ mutex_unlock(&fw_mgmt->mutex);
+
+ /* All pending greybus operations should have finished by now */
+ gb_connection_disable(fw_mgmt->connection);
+
+ /* Disallow new users to get access to the fw_mgmt structure */
+ mutex_lock(&list_mutex);
+ list_del(&fw_mgmt->node);
+ mutex_unlock(&list_mutex);
+
+ /*
+ * All current users of fw_mgmt would have taken a reference to it by
+ * now, we can drop our reference and wait the last user will get
+ * fw_mgmt freed.
+ */
+ put_fw_mgmt(fw_mgmt);
+}
+
+int fw_mgmt_init(void)
+{
+ int ret;
+
+ ret = class_register(&fw_mgmt_class);
+ if (ret)
+ return ret;
+
+ ret = alloc_chrdev_region(&fw_mgmt_dev_num, 0, NUM_MINORS,
+ "gb_fw_mgmt");
+ if (ret)
+ goto err_remove_class;
+
+ return 0;
+
+err_remove_class:
+ class_unregister(&fw_mgmt_class);
+ return ret;
+}
+
+void fw_mgmt_exit(void)
+{
+ unregister_chrdev_region(fw_mgmt_dev_num, NUM_MINORS);
+ class_unregister(&fw_mgmt_class);
+ ida_destroy(&fw_mgmt_minors_map);
+}
diff --git a/drivers/staging/greybus/gb-camera.h b/drivers/staging/greybus/gb-camera.h
new file mode 100644
index 000000000000..3e09147435a5
--- /dev/null
+++ b/drivers/staging/greybus/gb-camera.h
@@ -0,0 +1,126 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Greybus Camera protocol driver.
+ *
+ * Copyright 2015 Google Inc.
+ */
+#ifndef __GB_CAMERA_H
+#define __GB_CAMERA_H
+
+#include <linux/v4l2-mediabus.h>
+
+/* Input flags need to be set from the caller */
+#define GB_CAMERA_IN_FLAG_TEST (1 << 0)
+/* Output flags returned */
+#define GB_CAMERA_OUT_FLAG_ADJUSTED (1 << 0)
+
+/**
+ * struct gb_camera_stream - Represents greybus camera stream.
+ * @width: Stream width in pixels.
+ * @height: Stream height in pixels.
+ * @pixel_code: Media bus pixel code.
+ * @vc: MIPI CSI virtual channel.
+ * @dt: MIPI CSI data types. Most formats use a single data type, in which case
+ * the second element will be ignored.
+ * @max_size: Maximum size of a frame in bytes. The camera module guarantees
+ * that all data between the Frame Start and Frame End packet for
+ * the associated virtual channel and data type(s) will not exceed
+ * this size.
+ */
+struct gb_camera_stream {
+ unsigned int width;
+ unsigned int height;
+ enum v4l2_mbus_pixelcode pixel_code;
+ unsigned int vc;
+ unsigned int dt[2];
+ unsigned int max_size;
+};
+
+/**
+ * struct gb_camera_csi_params - CSI configuration parameters
+ * @num_lanes: number of CSI data lanes
+ * @clk_freq: CSI clock frequency in Hz
+ */
+struct gb_camera_csi_params {
+ unsigned int num_lanes;
+ unsigned int clk_freq;
+};
+
+/**
+ * struct gb_camera_ops - Greybus camera operations, used by the Greybus camera
+ * driver to expose operations to the host camera driver.
+ * @capabilities: Retrieve camera capabilities and store them in the buffer
+ * 'buf' capabilities. The buffer maximum size is specified by
+ * the caller in the 'size' parameter, and the effective
+ * capabilities size is returned from the function. If the buffer
+ * size is too small to hold the capabilities an error is
+ * returned and the buffer is left untouched.
+ *
+ * @configure_streams: Negotiate configuration and prepare the module for video
+ * capture. The caller specifies the number of streams it
+ * requests in the 'nstreams' argument and the associated
+ * streams configurations in the 'streams' argument. The
+ * GB_CAMERA_IN_FLAG_TEST 'flag' can be set to test a
+ * configuration without applying it, otherwise the
+ * configuration is applied by the module. The module can
+ * decide to modify the requested configuration, including
+ * using a different number of streams. In that case the
+ * modified configuration won't be applied, the
+ * GB_CAMERA_OUT_FLAG_ADJUSTED 'flag' will be set upon
+ * return, and the modified configuration and number of
+ * streams stored in 'streams' and 'array'. The module
+ * returns its CSI-2 bus parameters in the 'csi_params'
+ * structure in all cases.
+ *
+ * @capture: Submit a capture request. The supplied 'request_id' must be unique
+ * and higher than the IDs of all the previously submitted requests.
+ * The 'streams' argument specifies which streams are affected by the
+ * request in the form of a bitmask, with bits corresponding to the
+ * configured streams indexes. If the request contains settings, the
+ * 'settings' argument points to the settings buffer and its size is
+ * specified by the 'settings_size' argument. Otherwise the 'settings'
+ * argument should be set to NULL and 'settings_size' to 0.
+ *
+ * @flush: Flush the capture requests queue. Return the ID of the last request
+ * that will processed by the device before it stops transmitting video
+ * frames. All queued capture requests with IDs higher than the returned
+ * ID will be dropped without being processed.
+ */
+struct gb_camera_ops {
+ ssize_t (*capabilities)(void *priv, char *buf, size_t len);
+ int (*configure_streams)(void *priv, unsigned int *nstreams,
+ unsigned int *flags, struct gb_camera_stream *streams,
+ struct gb_camera_csi_params *csi_params);
+ int (*capture)(void *priv, u32 request_id,
+ unsigned int streams, unsigned int num_frames,
+ size_t settings_size, const void *settings);
+ int (*flush)(void *priv, u32 *request_id);
+};
+
+/**
+ * struct gb_camera_module - Represents greybus camera module.
+ * @priv: Module private data, passed to all camera operations.
+ * @ops: Greybus camera operation callbacks.
+ * @interface_id: Interface id of the module.
+ * @refcount: Reference counting object.
+ * @release: Module release function.
+ * @list: List entry in the camera modules list.
+ */
+struct gb_camera_module {
+ void *priv;
+ const struct gb_camera_ops *ops;
+
+ unsigned int interface_id;
+ struct kref refcount;
+ void (*release)(struct kref *kref);
+ struct list_head list; /* Global list */
+};
+
+#define gb_camera_call(f, op, args...) \
+ (!(f) ? -ENODEV : (((f)->ops->op) ? \
+ (f)->ops->op((f)->priv, ##args) : -ENOIOCTLCMD))
+
+int gb_camera_register(struct gb_camera_module *module);
+int gb_camera_unregister(struct gb_camera_module *module);
+
+#endif /* __GB_CAMERA_H */
diff --git a/drivers/staging/greybus/gbphy.c b/drivers/staging/greybus/gbphy.c
new file mode 100644
index 000000000000..60cf09a302a7
--- /dev/null
+++ b/drivers/staging/greybus/gbphy.c
@@ -0,0 +1,358 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Greybus Bridged-Phy Bus driver
+ *
+ * Copyright 2014 Google Inc.
+ * Copyright 2014 Linaro Ltd.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/device.h>
+#include <linux/greybus.h>
+
+#include "gbphy.h"
+
+#define GB_GBPHY_AUTOSUSPEND_MS 3000
+
+struct gbphy_host {
+ struct gb_bundle *bundle;
+ struct list_head devices;
+};
+
+static DEFINE_IDA(gbphy_id);
+
+static ssize_t protocol_id_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct gbphy_device *gbphy_dev = to_gbphy_dev(dev);
+
+ return sprintf(buf, "0x%02x\n", gbphy_dev->cport_desc->protocol_id);
+}
+static DEVICE_ATTR_RO(protocol_id);
+
+static struct attribute *gbphy_dev_attrs[] = {
+ &dev_attr_protocol_id.attr,
+ NULL,
+};
+
+ATTRIBUTE_GROUPS(gbphy_dev);
+
+static void gbphy_dev_release(struct device *dev)
+{
+ struct gbphy_device *gbphy_dev = to_gbphy_dev(dev);
+
+ ida_free(&gbphy_id, gbphy_dev->id);
+ kfree(gbphy_dev);
+}
+
+#ifdef CONFIG_PM
+static int gb_gbphy_idle(struct device *dev)
+{
+ pm_runtime_mark_last_busy(dev);
+ pm_request_autosuspend(dev);
+ return 0;
+}
+#endif
+
+static const struct dev_pm_ops gb_gbphy_pm_ops = {
+ SET_RUNTIME_PM_OPS(pm_generic_runtime_suspend,
+ pm_generic_runtime_resume,
+ gb_gbphy_idle)
+};
+
+static const struct device_type greybus_gbphy_dev_type = {
+ .name = "gbphy_device",
+ .release = gbphy_dev_release,
+ .pm = &gb_gbphy_pm_ops,
+};
+
+static int gbphy_dev_uevent(const struct device *dev, struct kobj_uevent_env *env)
+{
+ const struct gbphy_device *gbphy_dev = to_gbphy_dev(dev);
+ const struct greybus_descriptor_cport *cport_desc = gbphy_dev->cport_desc;
+ const struct gb_bundle *bundle = gbphy_dev->bundle;
+ const struct gb_interface *intf = bundle->intf;
+ const struct gb_module *module = intf->module;
+ const struct gb_host_device *hd = intf->hd;
+
+ if (add_uevent_var(env, "BUS=%u", hd->bus_id))
+ return -ENOMEM;
+ if (add_uevent_var(env, "MODULE=%u", module->module_id))
+ return -ENOMEM;
+ if (add_uevent_var(env, "INTERFACE=%u", intf->interface_id))
+ return -ENOMEM;
+ if (add_uevent_var(env, "GREYBUS_ID=%08x/%08x",
+ intf->vendor_id, intf->product_id))
+ return -ENOMEM;
+ if (add_uevent_var(env, "BUNDLE=%u", gbphy_dev->bundle->id))
+ return -ENOMEM;
+ if (add_uevent_var(env, "BUNDLE_CLASS=%02x", bundle->class))
+ return -ENOMEM;
+ if (add_uevent_var(env, "GBPHY=%u", gbphy_dev->id))
+ return -ENOMEM;
+ if (add_uevent_var(env, "PROTOCOL_ID=%02x", cport_desc->protocol_id))
+ return -ENOMEM;
+
+ return 0;
+}
+
+static const struct gbphy_device_id *
+gbphy_dev_match_id(const struct gbphy_device *gbphy_dev,
+ const struct gbphy_driver *gbphy_drv)
+{
+ const struct gbphy_device_id *id = gbphy_drv->id_table;
+
+ if (!id)
+ return NULL;
+
+ for (; id->protocol_id; id++)
+ if (id->protocol_id == gbphy_dev->cport_desc->protocol_id)
+ return id;
+
+ return NULL;
+}
+
+static int gbphy_dev_match(struct device *dev, const struct device_driver *drv)
+{
+ const struct gbphy_driver *gbphy_drv = to_gbphy_driver(drv);
+ struct gbphy_device *gbphy_dev = to_gbphy_dev(dev);
+ const struct gbphy_device_id *id;
+
+ id = gbphy_dev_match_id(gbphy_dev, gbphy_drv);
+ if (id)
+ return 1;
+
+ return 0;
+}
+
+static int gbphy_dev_probe(struct device *dev)
+{
+ struct gbphy_driver *gbphy_drv = to_gbphy_driver(dev->driver);
+ struct gbphy_device *gbphy_dev = to_gbphy_dev(dev);
+ const struct gbphy_device_id *id;
+ int ret;
+
+ id = gbphy_dev_match_id(gbphy_dev, gbphy_drv);
+ if (!id)
+ return -ENODEV;
+
+ /* for old kernels we need get_sync to resume parent devices */
+ ret = gb_pm_runtime_get_sync(gbphy_dev->bundle);
+ if (ret < 0)
+ return ret;
+
+ pm_runtime_set_autosuspend_delay(dev, GB_GBPHY_AUTOSUSPEND_MS);
+ pm_runtime_use_autosuspend(dev);
+ pm_runtime_get_noresume(dev);
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+
+ /*
+ * Drivers should call put on the gbphy dev before returning
+ * from probe if they support runtime pm.
+ */
+ ret = gbphy_drv->probe(gbphy_dev, id);
+ if (ret) {
+ pm_runtime_disable(dev);
+ pm_runtime_set_suspended(dev);
+ pm_runtime_put_noidle(dev);
+ pm_runtime_dont_use_autosuspend(dev);
+ }
+
+ gb_pm_runtime_put_autosuspend(gbphy_dev->bundle);
+
+ return ret;
+}
+
+static void gbphy_dev_remove(struct device *dev)
+{
+ struct gbphy_driver *gbphy_drv = to_gbphy_driver(dev->driver);
+ struct gbphy_device *gbphy_dev = to_gbphy_dev(dev);
+
+ gbphy_drv->remove(gbphy_dev);
+
+ pm_runtime_disable(dev);
+ pm_runtime_set_suspended(dev);
+ pm_runtime_put_noidle(dev);
+ pm_runtime_dont_use_autosuspend(dev);
+}
+
+static const struct bus_type gbphy_bus_type = {
+ .name = "gbphy",
+ .match = gbphy_dev_match,
+ .probe = gbphy_dev_probe,
+ .remove = gbphy_dev_remove,
+ .uevent = gbphy_dev_uevent,
+};
+
+int gb_gbphy_register_driver(struct gbphy_driver *driver,
+ struct module *owner, const char *mod_name)
+{
+ int retval;
+
+ if (greybus_disabled())
+ return -ENODEV;
+
+ driver->driver.bus = &gbphy_bus_type;
+ driver->driver.name = driver->name;
+ driver->driver.owner = owner;
+ driver->driver.mod_name = mod_name;
+
+ retval = driver_register(&driver->driver);
+ if (retval)
+ return retval;
+
+ pr_info("registered new driver %s\n", driver->name);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(gb_gbphy_register_driver);
+
+void gb_gbphy_deregister_driver(struct gbphy_driver *driver)
+{
+ driver_unregister(&driver->driver);
+}
+EXPORT_SYMBOL_GPL(gb_gbphy_deregister_driver);
+
+static struct gbphy_device *gb_gbphy_create_dev(struct gb_bundle *bundle,
+ struct greybus_descriptor_cport *cport_desc)
+{
+ struct gbphy_device *gbphy_dev;
+ int retval;
+ int id;
+
+ id = ida_alloc_min(&gbphy_id, 1, GFP_KERNEL);
+ if (id < 0)
+ return ERR_PTR(id);
+
+ gbphy_dev = kzalloc(sizeof(*gbphy_dev), GFP_KERNEL);
+ if (!gbphy_dev) {
+ ida_free(&gbphy_id, id);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ gbphy_dev->id = id;
+ gbphy_dev->bundle = bundle;
+ gbphy_dev->cport_desc = cport_desc;
+ gbphy_dev->dev.parent = &bundle->dev;
+ gbphy_dev->dev.bus = &gbphy_bus_type;
+ gbphy_dev->dev.type = &greybus_gbphy_dev_type;
+ gbphy_dev->dev.groups = gbphy_dev_groups;
+ gbphy_dev->dev.dma_mask = bundle->dev.dma_mask;
+ dev_set_name(&gbphy_dev->dev, "gbphy%d", id);
+
+ retval = device_register(&gbphy_dev->dev);
+ if (retval) {
+ put_device(&gbphy_dev->dev);
+ return ERR_PTR(retval);
+ }
+
+ return gbphy_dev;
+}
+
+static void gb_gbphy_disconnect(struct gb_bundle *bundle)
+{
+ struct gbphy_host *gbphy_host = greybus_get_drvdata(bundle);
+ struct gbphy_device *gbphy_dev, *temp;
+ int ret;
+
+ ret = gb_pm_runtime_get_sync(bundle);
+ if (ret < 0)
+ gb_pm_runtime_get_noresume(bundle);
+
+ list_for_each_entry_safe(gbphy_dev, temp, &gbphy_host->devices, list) {
+ list_del(&gbphy_dev->list);
+ device_unregister(&gbphy_dev->dev);
+ }
+
+ kfree(gbphy_host);
+}
+
+static int gb_gbphy_probe(struct gb_bundle *bundle,
+ const struct greybus_bundle_id *id)
+{
+ struct gbphy_host *gbphy_host;
+ struct gbphy_device *gbphy_dev;
+ int i;
+
+ if (bundle->num_cports == 0)
+ return -ENODEV;
+
+ gbphy_host = kzalloc(sizeof(*gbphy_host), GFP_KERNEL);
+ if (!gbphy_host)
+ return -ENOMEM;
+
+ gbphy_host->bundle = bundle;
+ INIT_LIST_HEAD(&gbphy_host->devices);
+ greybus_set_drvdata(bundle, gbphy_host);
+
+ /*
+ * Create a bunch of children devices, one per cport, and bind the
+ * bridged phy drivers to them.
+ */
+ for (i = 0; i < bundle->num_cports; ++i) {
+ gbphy_dev = gb_gbphy_create_dev(bundle, &bundle->cport_desc[i]);
+ if (IS_ERR(gbphy_dev)) {
+ gb_gbphy_disconnect(bundle);
+ return PTR_ERR(gbphy_dev);
+ }
+ list_add(&gbphy_dev->list, &gbphy_host->devices);
+ }
+
+ gb_pm_runtime_put_autosuspend(bundle);
+
+ return 0;
+}
+
+static const struct greybus_bundle_id gb_gbphy_id_table[] = {
+ { GREYBUS_DEVICE_CLASS(GREYBUS_CLASS_BRIDGED_PHY) },
+ { },
+};
+MODULE_DEVICE_TABLE(greybus, gb_gbphy_id_table);
+
+static struct greybus_driver gb_gbphy_driver = {
+ .name = "gbphy",
+ .probe = gb_gbphy_probe,
+ .disconnect = gb_gbphy_disconnect,
+ .id_table = gb_gbphy_id_table,
+};
+
+static int __init gbphy_init(void)
+{
+ int retval;
+
+ retval = bus_register(&gbphy_bus_type);
+ if (retval) {
+ pr_err("gbphy bus register failed (%d)\n", retval);
+ return retval;
+ }
+
+ retval = greybus_register(&gb_gbphy_driver);
+ if (retval) {
+ pr_err("error registering greybus driver\n");
+ goto error_gbphy;
+ }
+
+ return 0;
+
+error_gbphy:
+ bus_unregister(&gbphy_bus_type);
+ ida_destroy(&gbphy_id);
+ return retval;
+}
+module_init(gbphy_init);
+
+static void __exit gbphy_exit(void)
+{
+ greybus_deregister(&gb_gbphy_driver);
+ bus_unregister(&gbphy_bus_type);
+ ida_destroy(&gbphy_id);
+}
+module_exit(gbphy_exit);
+
+MODULE_DESCRIPTION("Greybus Bridged-Phy Bus driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/greybus/gbphy.h b/drivers/staging/greybus/gbphy.h
new file mode 100644
index 000000000000..d4a225b76338
--- /dev/null
+++ b/drivers/staging/greybus/gbphy.h
@@ -0,0 +1,109 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Greybus Bridged-Phy Bus driver
+ *
+ * Copyright 2016 Google Inc.
+ */
+
+#ifndef __GBPHY_H
+#define __GBPHY_H
+
+struct gbphy_device {
+ u32 id;
+ struct greybus_descriptor_cport *cport_desc;
+ struct gb_bundle *bundle;
+ struct list_head list;
+ struct device dev;
+};
+#define to_gbphy_dev(d) container_of(d, struct gbphy_device, dev)
+
+static inline void *gb_gbphy_get_data(struct gbphy_device *gdev)
+{
+ return dev_get_drvdata(&gdev->dev);
+}
+
+static inline void gb_gbphy_set_data(struct gbphy_device *gdev, void *data)
+{
+ dev_set_drvdata(&gdev->dev, data);
+}
+
+struct gbphy_device_id {
+ __u8 protocol_id;
+};
+
+#define GBPHY_PROTOCOL(p) \
+ .protocol_id = (p),
+
+struct gbphy_driver {
+ const char *name;
+ int (*probe)(struct gbphy_device *device,
+ const struct gbphy_device_id *id);
+ void (*remove)(struct gbphy_device *device);
+ const struct gbphy_device_id *id_table;
+
+ struct device_driver driver;
+};
+#define to_gbphy_driver(d) container_of(d, struct gbphy_driver, driver)
+
+int gb_gbphy_register_driver(struct gbphy_driver *driver,
+ struct module *owner, const char *mod_name);
+void gb_gbphy_deregister_driver(struct gbphy_driver *driver);
+
+#define gb_gbphy_register(driver) \
+ gb_gbphy_register_driver(driver, THIS_MODULE, KBUILD_MODNAME)
+#define gb_gbphy_deregister(driver) \
+ gb_gbphy_deregister_driver(driver)
+
+/**
+ * module_gbphy_driver() - Helper macro for registering a gbphy driver
+ * @__gbphy_driver: gbphy_driver structure
+ *
+ * Helper macro for gbphy drivers to set up proper module init / exit
+ * functions. Replaces module_init() and module_exit() and keeps people from
+ * printing pointless things to the kernel log when their driver is loaded.
+ */
+#define module_gbphy_driver(__gbphy_driver) \
+ module_driver(__gbphy_driver, gb_gbphy_register, gb_gbphy_deregister)
+
+#ifdef CONFIG_PM
+static inline int gbphy_runtime_get_sync(struct gbphy_device *gbphy_dev)
+{
+ struct device *dev = &gbphy_dev->dev;
+ int ret;
+
+ ret = pm_runtime_get_sync(dev);
+ if (ret < 0) {
+ dev_err(dev, "pm_runtime_get_sync failed: %d\n", ret);
+ pm_runtime_put_noidle(dev);
+ return ret;
+ }
+
+ return 0;
+}
+
+static inline void gbphy_runtime_put_autosuspend(struct gbphy_device *gbphy_dev)
+{
+ struct device *dev = &gbphy_dev->dev;
+
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
+}
+
+static inline void gbphy_runtime_get_noresume(struct gbphy_device *gbphy_dev)
+{
+ pm_runtime_get_noresume(&gbphy_dev->dev);
+}
+
+static inline void gbphy_runtime_put_noidle(struct gbphy_device *gbphy_dev)
+{
+ pm_runtime_put_noidle(&gbphy_dev->dev);
+}
+#else
+static inline int gbphy_runtime_get_sync(struct gbphy_device *gbphy_dev) { return 0; }
+static inline void gbphy_runtime_put_autosuspend(struct gbphy_device *gbphy_dev) {}
+static inline void gbphy_runtime_get_noresume(struct gbphy_device *gbphy_dev) {}
+static inline void gbphy_runtime_put_noidle(struct gbphy_device *gbphy_dev) {}
+#endif
+
+#endif /* __GBPHY_H */
+
diff --git a/drivers/staging/greybus/gpio.c b/drivers/staging/greybus/gpio.c
new file mode 100644
index 000000000000..ac62b932e6a4
--- /dev/null
+++ b/drivers/staging/greybus/gpio.c
@@ -0,0 +1,626 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * GPIO Greybus driver.
+ *
+ * Copyright 2014 Google Inc.
+ * Copyright 2014 Linaro Ltd.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/gpio/driver.h>
+#include <linux/mutex.h>
+#include <linux/greybus.h>
+
+#include "gbphy.h"
+
+struct gb_gpio_line {
+ /* The following has to be an array of line_max entries */
+ /* --> make them just a flags field */
+ u8 active: 1,
+ direction: 1, /* 0 = output, 1 = input */
+ value: 1; /* 0 = low, 1 = high */
+ u16 debounce_usec;
+
+ u8 irq_type;
+ bool irq_type_pending;
+ bool masked;
+ bool masked_pending;
+};
+
+struct gb_gpio_controller {
+ struct gbphy_device *gbphy_dev;
+ struct gb_connection *connection;
+ u8 line_max; /* max line number */
+ struct gb_gpio_line *lines;
+
+ struct gpio_chip chip;
+ struct irq_chip irqc;
+ struct mutex irq_lock;
+};
+
+static struct gpio_chip *irq_data_to_gpio_chip(struct irq_data *d)
+{
+ return d->domain->host_data;
+}
+
+static int gb_gpio_line_count_operation(struct gb_gpio_controller *ggc)
+{
+ struct gb_gpio_line_count_response response;
+ int ret;
+
+ ret = gb_operation_sync(ggc->connection, GB_GPIO_TYPE_LINE_COUNT,
+ NULL, 0, &response, sizeof(response));
+ if (!ret)
+ ggc->line_max = response.count;
+ return ret;
+}
+
+static int gb_gpio_activate_operation(struct gb_gpio_controller *ggc, u8 which)
+{
+ struct gb_gpio_activate_request request;
+ struct gbphy_device *gbphy_dev = ggc->gbphy_dev;
+ int ret;
+
+ ret = gbphy_runtime_get_sync(gbphy_dev);
+ if (ret)
+ return ret;
+
+ request.which = which;
+ ret = gb_operation_sync(ggc->connection, GB_GPIO_TYPE_ACTIVATE,
+ &request, sizeof(request), NULL, 0);
+ if (ret) {
+ gbphy_runtime_put_autosuspend(gbphy_dev);
+ return ret;
+ }
+
+ ggc->lines[which].active = true;
+
+ return 0;
+}
+
+static void gb_gpio_deactivate_operation(struct gb_gpio_controller *ggc,
+ u8 which)
+{
+ struct gbphy_device *gbphy_dev = ggc->gbphy_dev;
+ struct device *dev = &gbphy_dev->dev;
+ struct gb_gpio_deactivate_request request;
+ int ret;
+
+ request.which = which;
+ ret = gb_operation_sync(ggc->connection, GB_GPIO_TYPE_DEACTIVATE,
+ &request, sizeof(request), NULL, 0);
+ if (ret) {
+ dev_err(dev, "failed to deactivate gpio %u\n", which);
+ goto out_pm_put;
+ }
+
+ ggc->lines[which].active = false;
+
+out_pm_put:
+ gbphy_runtime_put_autosuspend(gbphy_dev);
+}
+
+static int gb_gpio_get_direction_operation(struct gb_gpio_controller *ggc,
+ u8 which)
+{
+ struct device *dev = &ggc->gbphy_dev->dev;
+ struct gb_gpio_get_direction_request request;
+ struct gb_gpio_get_direction_response response;
+ int ret;
+ u8 direction;
+
+ request.which = which;
+ ret = gb_operation_sync(ggc->connection, GB_GPIO_TYPE_GET_DIRECTION,
+ &request, sizeof(request),
+ &response, sizeof(response));
+ if (ret)
+ return ret;
+
+ direction = response.direction;
+ if (direction && direction != 1) {
+ dev_warn(dev, "gpio %u direction was %u (should be 0 or 1)\n",
+ which, direction);
+ }
+ ggc->lines[which].direction = direction ? 1 : 0;
+ return 0;
+}
+
+static int gb_gpio_direction_in_operation(struct gb_gpio_controller *ggc,
+ u8 which)
+{
+ struct gb_gpio_direction_in_request request;
+ int ret;
+
+ request.which = which;
+ ret = gb_operation_sync(ggc->connection, GB_GPIO_TYPE_DIRECTION_IN,
+ &request, sizeof(request), NULL, 0);
+ if (!ret)
+ ggc->lines[which].direction = 1;
+ return ret;
+}
+
+static int gb_gpio_direction_out_operation(struct gb_gpio_controller *ggc,
+ u8 which, bool value_high)
+{
+ struct gb_gpio_direction_out_request request;
+ int ret;
+
+ request.which = which;
+ request.value = value_high ? 1 : 0;
+ ret = gb_operation_sync(ggc->connection, GB_GPIO_TYPE_DIRECTION_OUT,
+ &request, sizeof(request), NULL, 0);
+ if (!ret)
+ ggc->lines[which].direction = 0;
+ return ret;
+}
+
+static int gb_gpio_get_value_operation(struct gb_gpio_controller *ggc,
+ u8 which)
+{
+ struct device *dev = &ggc->gbphy_dev->dev;
+ struct gb_gpio_get_value_request request;
+ struct gb_gpio_get_value_response response;
+ int ret;
+ u8 value;
+
+ request.which = which;
+ ret = gb_operation_sync(ggc->connection, GB_GPIO_TYPE_GET_VALUE,
+ &request, sizeof(request),
+ &response, sizeof(response));
+ if (ret) {
+ dev_err(dev, "failed to get value of gpio %u\n", which);
+ return ret;
+ }
+
+ value = response.value;
+ if (value && value != 1) {
+ dev_warn(dev, "gpio %u value was %u (should be 0 or 1)\n",
+ which, value);
+ }
+ ggc->lines[which].value = value ? 1 : 0;
+ return 0;
+}
+
+static int gb_gpio_set_value_operation(struct gb_gpio_controller *ggc,
+ u8 which, bool value_high)
+{
+ struct device *dev = &ggc->gbphy_dev->dev;
+ struct gb_gpio_set_value_request request;
+ int ret;
+
+ request.which = which;
+ request.value = value_high ? 1 : 0;
+ ret = gb_operation_sync(ggc->connection, GB_GPIO_TYPE_SET_VALUE,
+ &request, sizeof(request), NULL, 0);
+ if (ret) {
+ dev_err(dev, "failed to set value of gpio %u\n", which);
+ return ret;
+ }
+
+ ggc->lines[which].value = request.value;
+
+ return 0;
+}
+
+static int gb_gpio_set_debounce_operation(struct gb_gpio_controller *ggc,
+ u8 which, u16 debounce_usec)
+{
+ struct gb_gpio_set_debounce_request request;
+ int ret;
+
+ request.which = which;
+ request.usec = cpu_to_le16(debounce_usec);
+ ret = gb_operation_sync(ggc->connection, GB_GPIO_TYPE_SET_DEBOUNCE,
+ &request, sizeof(request), NULL, 0);
+ if (!ret)
+ ggc->lines[which].debounce_usec = debounce_usec;
+ return ret;
+}
+
+static void _gb_gpio_irq_mask(struct gb_gpio_controller *ggc, u8 hwirq)
+{
+ struct device *dev = &ggc->gbphy_dev->dev;
+ struct gb_gpio_irq_mask_request request;
+ int ret;
+
+ request.which = hwirq;
+ ret = gb_operation_sync(ggc->connection,
+ GB_GPIO_TYPE_IRQ_MASK,
+ &request, sizeof(request), NULL, 0);
+ if (ret)
+ dev_err(dev, "failed to mask irq: %d\n", ret);
+}
+
+static void _gb_gpio_irq_unmask(struct gb_gpio_controller *ggc, u8 hwirq)
+{
+ struct device *dev = &ggc->gbphy_dev->dev;
+ struct gb_gpio_irq_unmask_request request;
+ int ret;
+
+ request.which = hwirq;
+ ret = gb_operation_sync(ggc->connection,
+ GB_GPIO_TYPE_IRQ_UNMASK,
+ &request, sizeof(request), NULL, 0);
+ if (ret)
+ dev_err(dev, "failed to unmask irq: %d\n", ret);
+}
+
+static void _gb_gpio_irq_set_type(struct gb_gpio_controller *ggc,
+ u8 hwirq, u8 type)
+{
+ struct device *dev = &ggc->gbphy_dev->dev;
+ struct gb_gpio_irq_type_request request;
+ int ret;
+
+ request.which = hwirq;
+ request.type = type;
+
+ ret = gb_operation_sync(ggc->connection,
+ GB_GPIO_TYPE_IRQ_TYPE,
+ &request, sizeof(request), NULL, 0);
+ if (ret)
+ dev_err(dev, "failed to set irq type: %d\n", ret);
+}
+
+static void gb_gpio_irq_mask(struct irq_data *d)
+{
+ struct gpio_chip *chip = irq_data_to_gpio_chip(d);
+ struct gb_gpio_controller *ggc = gpiochip_get_data(chip);
+ struct gb_gpio_line *line = &ggc->lines[d->hwirq];
+
+ line->masked = true;
+ line->masked_pending = true;
+}
+
+static void gb_gpio_irq_unmask(struct irq_data *d)
+{
+ struct gpio_chip *chip = irq_data_to_gpio_chip(d);
+ struct gb_gpio_controller *ggc = gpiochip_get_data(chip);
+ struct gb_gpio_line *line = &ggc->lines[d->hwirq];
+
+ line->masked = false;
+ line->masked_pending = true;
+}
+
+static int gb_gpio_irq_set_type(struct irq_data *d, unsigned int type)
+{
+ struct gpio_chip *chip = irq_data_to_gpio_chip(d);
+ struct gb_gpio_controller *ggc = gpiochip_get_data(chip);
+ struct gb_gpio_line *line = &ggc->lines[d->hwirq];
+ struct device *dev = &ggc->gbphy_dev->dev;
+ u8 irq_type;
+
+ switch (type) {
+ case IRQ_TYPE_NONE:
+ irq_type = GB_GPIO_IRQ_TYPE_NONE;
+ break;
+ case IRQ_TYPE_EDGE_RISING:
+ irq_type = GB_GPIO_IRQ_TYPE_EDGE_RISING;
+ break;
+ case IRQ_TYPE_EDGE_FALLING:
+ irq_type = GB_GPIO_IRQ_TYPE_EDGE_FALLING;
+ break;
+ case IRQ_TYPE_EDGE_BOTH:
+ irq_type = GB_GPIO_IRQ_TYPE_EDGE_BOTH;
+ break;
+ case IRQ_TYPE_LEVEL_LOW:
+ irq_type = GB_GPIO_IRQ_TYPE_LEVEL_LOW;
+ break;
+ case IRQ_TYPE_LEVEL_HIGH:
+ irq_type = GB_GPIO_IRQ_TYPE_LEVEL_HIGH;
+ break;
+ default:
+ dev_err(dev, "unsupported irq type: %u\n", type);
+ return -EINVAL;
+ }
+
+ line->irq_type = irq_type;
+ line->irq_type_pending = true;
+
+ return 0;
+}
+
+static void gb_gpio_irq_bus_lock(struct irq_data *d)
+{
+ struct gpio_chip *chip = irq_data_to_gpio_chip(d);
+ struct gb_gpio_controller *ggc = gpiochip_get_data(chip);
+
+ mutex_lock(&ggc->irq_lock);
+}
+
+static void gb_gpio_irq_bus_sync_unlock(struct irq_data *d)
+{
+ struct gpio_chip *chip = irq_data_to_gpio_chip(d);
+ struct gb_gpio_controller *ggc = gpiochip_get_data(chip);
+ struct gb_gpio_line *line = &ggc->lines[d->hwirq];
+
+ if (line->irq_type_pending) {
+ _gb_gpio_irq_set_type(ggc, d->hwirq, line->irq_type);
+ line->irq_type_pending = false;
+ }
+
+ if (line->masked_pending) {
+ if (line->masked)
+ _gb_gpio_irq_mask(ggc, d->hwirq);
+ else
+ _gb_gpio_irq_unmask(ggc, d->hwirq);
+ line->masked_pending = false;
+ }
+
+ mutex_unlock(&ggc->irq_lock);
+}
+
+static int gb_gpio_request_handler(struct gb_operation *op)
+{
+ struct gb_connection *connection = op->connection;
+ struct gb_gpio_controller *ggc = gb_connection_get_data(connection);
+ struct device *dev = &ggc->gbphy_dev->dev;
+ struct gb_message *request;
+ struct gb_gpio_irq_event_request *event;
+ u8 type = op->type;
+ int irq, ret;
+
+ if (type != GB_GPIO_TYPE_IRQ_EVENT) {
+ dev_err(dev, "unsupported unsolicited request: %u\n", type);
+ return -EINVAL;
+ }
+
+ request = op->request;
+
+ if (request->payload_size < sizeof(*event)) {
+ dev_err(dev, "short event received (%zu < %zu)\n",
+ request->payload_size, sizeof(*event));
+ return -EINVAL;
+ }
+
+ event = request->payload;
+ if (event->which > ggc->line_max) {
+ dev_err(dev, "invalid hw irq: %d\n", event->which);
+ return -EINVAL;
+ }
+
+ irq = irq_find_mapping(ggc->chip.irq.domain, event->which);
+ if (!irq) {
+ dev_err(dev, "failed to find IRQ\n");
+ return -EINVAL;
+ }
+
+ ret = generic_handle_irq_safe(irq);
+ if (ret)
+ dev_err(dev, "failed to invoke irq handler\n");
+
+ return ret;
+}
+
+static int gb_gpio_request(struct gpio_chip *chip, unsigned int offset)
+{
+ struct gb_gpio_controller *ggc = gpiochip_get_data(chip);
+
+ return gb_gpio_activate_operation(ggc, (u8)offset);
+}
+
+static void gb_gpio_free(struct gpio_chip *chip, unsigned int offset)
+{
+ struct gb_gpio_controller *ggc = gpiochip_get_data(chip);
+
+ gb_gpio_deactivate_operation(ggc, (u8)offset);
+}
+
+static int gb_gpio_get_direction(struct gpio_chip *chip, unsigned int offset)
+{
+ struct gb_gpio_controller *ggc = gpiochip_get_data(chip);
+ u8 which;
+ int ret;
+
+ which = (u8)offset;
+ ret = gb_gpio_get_direction_operation(ggc, which);
+ if (ret)
+ return ret;
+
+ return ggc->lines[which].direction ? 1 : 0;
+}
+
+static int gb_gpio_direction_input(struct gpio_chip *chip, unsigned int offset)
+{
+ struct gb_gpio_controller *ggc = gpiochip_get_data(chip);
+
+ return gb_gpio_direction_in_operation(ggc, (u8)offset);
+}
+
+static int gb_gpio_direction_output(struct gpio_chip *chip, unsigned int offset,
+ int value)
+{
+ struct gb_gpio_controller *ggc = gpiochip_get_data(chip);
+
+ return gb_gpio_direction_out_operation(ggc, (u8)offset, !!value);
+}
+
+static int gb_gpio_get(struct gpio_chip *chip, unsigned int offset)
+{
+ struct gb_gpio_controller *ggc = gpiochip_get_data(chip);
+ u8 which;
+ int ret;
+
+ which = (u8)offset;
+ ret = gb_gpio_get_value_operation(ggc, which);
+ if (ret)
+ return ret;
+
+ return ggc->lines[which].value;
+}
+
+static int gb_gpio_set(struct gpio_chip *chip, unsigned int offset, int value)
+{
+ struct gb_gpio_controller *ggc = gpiochip_get_data(chip);
+
+ return gb_gpio_set_value_operation(ggc, (u8)offset, !!value);
+}
+
+static int gb_gpio_set_config(struct gpio_chip *chip, unsigned int offset,
+ unsigned long config)
+{
+ struct gb_gpio_controller *ggc = gpiochip_get_data(chip);
+ u32 debounce;
+
+ if (pinconf_to_config_param(config) != PIN_CONFIG_INPUT_DEBOUNCE)
+ return -ENOTSUPP;
+
+ debounce = pinconf_to_config_argument(config);
+ if (debounce > U16_MAX)
+ return -EINVAL;
+
+ return gb_gpio_set_debounce_operation(ggc, (u8)offset, (u16)debounce);
+}
+
+static int gb_gpio_controller_setup(struct gb_gpio_controller *ggc)
+{
+ int ret;
+
+ /* Now find out how many lines there are */
+ ret = gb_gpio_line_count_operation(ggc);
+ if (ret)
+ return ret;
+
+ ggc->lines = kcalloc(ggc->line_max + 1, sizeof(*ggc->lines),
+ GFP_KERNEL);
+ if (!ggc->lines)
+ return -ENOMEM;
+
+ return ret;
+}
+
+static int gb_gpio_probe(struct gbphy_device *gbphy_dev,
+ const struct gbphy_device_id *id)
+{
+ struct gb_connection *connection;
+ struct gb_gpio_controller *ggc;
+ struct gpio_chip *gpio;
+ struct gpio_irq_chip *girq;
+ struct irq_chip *irqc;
+ int ret;
+
+ ggc = kzalloc(sizeof(*ggc), GFP_KERNEL);
+ if (!ggc)
+ return -ENOMEM;
+
+ connection =
+ gb_connection_create(gbphy_dev->bundle,
+ le16_to_cpu(gbphy_dev->cport_desc->id),
+ gb_gpio_request_handler);
+ if (IS_ERR(connection)) {
+ ret = PTR_ERR(connection);
+ goto exit_ggc_free;
+ }
+
+ ggc->connection = connection;
+ gb_connection_set_data(connection, ggc);
+ ggc->gbphy_dev = gbphy_dev;
+ gb_gbphy_set_data(gbphy_dev, ggc);
+
+ ret = gb_connection_enable_tx(connection);
+ if (ret)
+ goto exit_connection_destroy;
+
+ ret = gb_gpio_controller_setup(ggc);
+ if (ret)
+ goto exit_connection_disable;
+
+ irqc = &ggc->irqc;
+ irqc->irq_mask = gb_gpio_irq_mask;
+ irqc->irq_unmask = gb_gpio_irq_unmask;
+ irqc->irq_set_type = gb_gpio_irq_set_type;
+ irqc->irq_bus_lock = gb_gpio_irq_bus_lock;
+ irqc->irq_bus_sync_unlock = gb_gpio_irq_bus_sync_unlock;
+ irqc->name = "greybus_gpio";
+
+ mutex_init(&ggc->irq_lock);
+
+ gpio = &ggc->chip;
+
+ gpio->label = "greybus_gpio";
+ gpio->parent = &gbphy_dev->dev;
+ gpio->owner = THIS_MODULE;
+
+ gpio->request = gb_gpio_request;
+ gpio->free = gb_gpio_free;
+ gpio->get_direction = gb_gpio_get_direction;
+ gpio->direction_input = gb_gpio_direction_input;
+ gpio->direction_output = gb_gpio_direction_output;
+ gpio->get = gb_gpio_get;
+ gpio->set = gb_gpio_set;
+ gpio->set_config = gb_gpio_set_config;
+ gpio->base = -1; /* Allocate base dynamically */
+ gpio->ngpio = ggc->line_max + 1;
+ gpio->can_sleep = true;
+
+ girq = &gpio->irq;
+ girq->chip = irqc;
+ /* The event comes from the outside so no parent handler */
+ girq->parent_handler = NULL;
+ girq->num_parents = 0;
+ girq->parents = NULL;
+ girq->default_type = IRQ_TYPE_NONE;
+ girq->handler = handle_level_irq;
+
+ ret = gb_connection_enable(connection);
+ if (ret)
+ goto exit_line_free;
+
+ ret = gpiochip_add_data(gpio, ggc);
+ if (ret) {
+ dev_err(&gbphy_dev->dev, "failed to add gpio chip: %d\n", ret);
+ goto exit_line_free;
+ }
+
+ gbphy_runtime_put_autosuspend(gbphy_dev);
+ return 0;
+
+exit_line_free:
+ kfree(ggc->lines);
+exit_connection_disable:
+ gb_connection_disable(connection);
+exit_connection_destroy:
+ gb_connection_destroy(connection);
+exit_ggc_free:
+ kfree(ggc);
+ return ret;
+}
+
+static void gb_gpio_remove(struct gbphy_device *gbphy_dev)
+{
+ struct gb_gpio_controller *ggc = gb_gbphy_get_data(gbphy_dev);
+ struct gb_connection *connection = ggc->connection;
+ int ret;
+
+ ret = gbphy_runtime_get_sync(gbphy_dev);
+ if (ret)
+ gbphy_runtime_get_noresume(gbphy_dev);
+
+ gb_connection_disable_rx(connection);
+ gpiochip_remove(&ggc->chip);
+ gb_connection_disable(connection);
+ gb_connection_destroy(connection);
+ kfree(ggc->lines);
+ kfree(ggc);
+}
+
+static const struct gbphy_device_id gb_gpio_id_table[] = {
+ { GBPHY_PROTOCOL(GREYBUS_PROTOCOL_GPIO) },
+ { },
+};
+MODULE_DEVICE_TABLE(gbphy, gb_gpio_id_table);
+
+static struct gbphy_driver gpio_driver = {
+ .name = "gpio",
+ .probe = gb_gpio_probe,
+ .remove = gb_gpio_remove,
+ .id_table = gb_gpio_id_table,
+};
+
+module_gbphy_driver(gpio_driver);
+MODULE_DESCRIPTION("GPIO Greybus driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/greybus/greybus_authentication.h b/drivers/staging/greybus/greybus_authentication.h
new file mode 100644
index 000000000000..ee88f880cfe3
--- /dev/null
+++ b/drivers/staging/greybus/greybus_authentication.h
@@ -0,0 +1,74 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */
+/*
+ * Greybus Component Authentication User Header
+ *
+ * Copyright(c) 2016 Google Inc. All rights reserved.
+ * Copyright(c) 2016 Linaro Ltd. All rights reserved.
+ */
+
+#ifndef __GREYBUS_AUTHENTICATION_USER_H
+#define __GREYBUS_AUTHENTICATION_USER_H
+
+#include <linux/ioctl.h>
+#include <linux/types.h>
+
+#define CAP_CERTIFICATE_MAX_SIZE 1600
+#define CAP_SIGNATURE_MAX_SIZE 320
+
+/* Certificate class types */
+#define CAP_CERT_IMS_EAPC 0x00000001
+#define CAP_CERT_IMS_EASC 0x00000002
+#define CAP_CERT_IMS_EARC 0x00000003
+#define CAP_CERT_IMS_IAPC 0x00000004
+#define CAP_CERT_IMS_IASC 0x00000005
+#define CAP_CERT_IMS_IARC 0x00000006
+
+/* IMS Certificate response result codes */
+#define CAP_IMS_RESULT_CERT_FOUND 0x00
+#define CAP_IMS_RESULT_CERT_CLASS_INVAL 0x01
+#define CAP_IMS_RESULT_CERT_CORRUPT 0x02
+#define CAP_IMS_RESULT_CERT_NOT_FOUND 0x03
+
+/* Authentication types */
+#define CAP_AUTH_IMS_PRI 0x00000001
+#define CAP_AUTH_IMS_SEC 0x00000002
+#define CAP_AUTH_IMS_RSA 0x00000003
+
+/* Authenticate response result codes */
+#define CAP_AUTH_RESULT_CR_SUCCESS 0x00
+#define CAP_AUTH_RESULT_CR_BAD_TYPE 0x01
+#define CAP_AUTH_RESULT_CR_WRONG_EP 0x02
+#define CAP_AUTH_RESULT_CR_NO_KEY 0x03
+#define CAP_AUTH_RESULT_CR_SIG_FAIL 0x04
+
+/* IOCTL support */
+struct cap_ioc_get_endpoint_uid {
+ __u8 uid[8];
+} __packed;
+
+struct cap_ioc_get_ims_certificate {
+ __u32 certificate_class;
+ __u32 certificate_id;
+
+ __u8 result_code;
+ __u32 cert_size;
+ __u8 certificate[CAP_CERTIFICATE_MAX_SIZE];
+} __packed;
+
+struct cap_ioc_authenticate {
+ __u32 auth_type;
+ __u8 uid[8];
+ __u8 challenge[32];
+
+ __u8 result_code;
+ __u8 response[64];
+ __u32 signature_size;
+ __u8 signature[CAP_SIGNATURE_MAX_SIZE];
+} __packed;
+
+#define CAP_IOCTL_BASE 'C'
+#define CAP_IOC_GET_ENDPOINT_UID _IOR(CAP_IOCTL_BASE, 0, struct cap_ioc_get_endpoint_uid)
+#define CAP_IOC_GET_IMS_CERTIFICATE _IOWR(CAP_IOCTL_BASE, 1, struct cap_ioc_get_ims_certificate)
+#define CAP_IOC_AUTHENTICATE _IOWR(CAP_IOCTL_BASE, 2, struct cap_ioc_authenticate)
+
+#endif /* __GREYBUS_AUTHENTICATION_USER_H */
diff --git a/drivers/staging/greybus/greybus_firmware.h b/drivers/staging/greybus/greybus_firmware.h
new file mode 100644
index 000000000000..b6042a82ada4
--- /dev/null
+++ b/drivers/staging/greybus/greybus_firmware.h
@@ -0,0 +1,75 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */
+/*
+ * Greybus Firmware Management User Header
+ *
+ * Copyright(c) 2016 Google Inc. All rights reserved.
+ * Copyright(c) 2016 Linaro Ltd. All rights reserved.
+ */
+
+#ifndef __GREYBUS_FIRMWARE_USER_H
+#define __GREYBUS_FIRMWARE_USER_H
+
+#include <linux/ioctl.h>
+#include <linux/types.h>
+
+#define GB_FIRMWARE_U_TAG_MAX_SIZE 10
+
+#define GB_FW_U_LOAD_METHOD_UNIPRO 0x01
+#define GB_FW_U_LOAD_METHOD_INTERNAL 0x02
+
+#define GB_FW_U_LOAD_STATUS_FAILED 0x00
+#define GB_FW_U_LOAD_STATUS_UNVALIDATED 0x01
+#define GB_FW_U_LOAD_STATUS_VALIDATED 0x02
+#define GB_FW_U_LOAD_STATUS_VALIDATION_FAILED 0x03
+
+#define GB_FW_U_BACKEND_FW_STATUS_SUCCESS 0x01
+#define GB_FW_U_BACKEND_FW_STATUS_FAIL_FIND 0x02
+#define GB_FW_U_BACKEND_FW_STATUS_FAIL_FETCH 0x03
+#define GB_FW_U_BACKEND_FW_STATUS_FAIL_WRITE 0x04
+#define GB_FW_U_BACKEND_FW_STATUS_INT 0x05
+#define GB_FW_U_BACKEND_FW_STATUS_RETRY 0x06
+#define GB_FW_U_BACKEND_FW_STATUS_NOT_SUPPORTED 0x07
+
+#define GB_FW_U_BACKEND_VERSION_STATUS_SUCCESS 0x01
+#define GB_FW_U_BACKEND_VERSION_STATUS_NOT_AVAILABLE 0x02
+#define GB_FW_U_BACKEND_VERSION_STATUS_NOT_SUPPORTED 0x03
+#define GB_FW_U_BACKEND_VERSION_STATUS_RETRY 0x04
+#define GB_FW_U_BACKEND_VERSION_STATUS_FAIL_INT 0x05
+
+/* IOCTL support */
+struct fw_mgmt_ioc_get_intf_version {
+ __u8 firmware_tag[GB_FIRMWARE_U_TAG_MAX_SIZE];
+ __u16 major;
+ __u16 minor;
+} __packed;
+
+struct fw_mgmt_ioc_get_backend_version {
+ __u8 firmware_tag[GB_FIRMWARE_U_TAG_MAX_SIZE];
+ __u16 major;
+ __u16 minor;
+ __u8 status;
+} __packed;
+
+struct fw_mgmt_ioc_intf_load_and_validate {
+ __u8 firmware_tag[GB_FIRMWARE_U_TAG_MAX_SIZE];
+ __u8 load_method;
+ __u8 status;
+ __u16 major;
+ __u16 minor;
+} __packed;
+
+struct fw_mgmt_ioc_backend_fw_update {
+ __u8 firmware_tag[GB_FIRMWARE_U_TAG_MAX_SIZE];
+ __u8 status;
+} __packed;
+
+#define FW_MGMT_IOCTL_BASE 'F'
+#define FW_MGMT_IOC_GET_INTF_FW _IOR(FW_MGMT_IOCTL_BASE, 0, struct fw_mgmt_ioc_get_intf_version)
+#define FW_MGMT_IOC_GET_BACKEND_FW _IOWR(FW_MGMT_IOCTL_BASE, 1, struct fw_mgmt_ioc_get_backend_version)
+#define FW_MGMT_IOC_INTF_LOAD_AND_VALIDATE _IOWR(FW_MGMT_IOCTL_BASE, 2, struct fw_mgmt_ioc_intf_load_and_validate)
+#define FW_MGMT_IOC_INTF_BACKEND_FW_UPDATE _IOWR(FW_MGMT_IOCTL_BASE, 3, struct fw_mgmt_ioc_backend_fw_update)
+#define FW_MGMT_IOC_SET_TIMEOUT_MS _IOW(FW_MGMT_IOCTL_BASE, 4, unsigned int)
+#define FW_MGMT_IOC_MODE_SWITCH _IO(FW_MGMT_IOCTL_BASE, 5)
+
+#endif /* __GREYBUS_FIRMWARE_USER_H */
+
diff --git a/drivers/staging/greybus/hid.c b/drivers/staging/greybus/hid.c
new file mode 100644
index 000000000000..63c77a3df591
--- /dev/null
+++ b/drivers/staging/greybus/hid.c
@@ -0,0 +1,520 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * HID class driver for the Greybus.
+ *
+ * Copyright 2014 Google Inc.
+ * Copyright 2014 Linaro Ltd.
+ */
+
+#include <linux/bitops.h>
+#include <linux/hid.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/greybus.h>
+
+/* Greybus HID device's structure */
+struct gb_hid {
+ struct gb_bundle *bundle;
+ struct gb_connection *connection;
+
+ struct hid_device *hid;
+ struct gb_hid_desc_response hdesc;
+
+ unsigned long flags;
+#define GB_HID_STARTED 0x01
+#define GB_HID_READ_PENDING 0x04
+
+ unsigned int bufsize;
+ char *inbuf;
+};
+
+/* Routines to get controller's information over greybus */
+
+/* Operations performed on greybus */
+static int gb_hid_get_desc(struct gb_hid *ghid)
+{
+ return gb_operation_sync(ghid->connection, GB_HID_TYPE_GET_DESC, NULL,
+ 0, &ghid->hdesc, sizeof(ghid->hdesc));
+}
+
+static int gb_hid_get_report_desc(struct gb_hid *ghid, char *rdesc)
+{
+ int ret;
+
+ ret = gb_pm_runtime_get_sync(ghid->bundle);
+ if (ret)
+ return ret;
+
+ ret = gb_operation_sync(ghid->connection, GB_HID_TYPE_GET_REPORT_DESC,
+ NULL, 0, rdesc,
+ le16_to_cpu(ghid->hdesc.wReportDescLength));
+
+ gb_pm_runtime_put_autosuspend(ghid->bundle);
+
+ return ret;
+}
+
+static int gb_hid_set_power(struct gb_hid *ghid, int type)
+{
+ int ret;
+
+ ret = gb_pm_runtime_get_sync(ghid->bundle);
+ if (ret)
+ return ret;
+
+ ret = gb_operation_sync(ghid->connection, type, NULL, 0, NULL, 0);
+
+ gb_pm_runtime_put_autosuspend(ghid->bundle);
+
+ return ret;
+}
+
+static int gb_hid_get_report(struct gb_hid *ghid, u8 report_type, u8 report_id,
+ unsigned char *buf, int len)
+{
+ struct gb_hid_get_report_request request;
+ int ret;
+
+ ret = gb_pm_runtime_get_sync(ghid->bundle);
+ if (ret)
+ return ret;
+
+ request.report_type = report_type;
+ request.report_id = report_id;
+
+ ret = gb_operation_sync(ghid->connection, GB_HID_TYPE_GET_REPORT,
+ &request, sizeof(request), buf, len);
+
+ gb_pm_runtime_put_autosuspend(ghid->bundle);
+
+ return ret;
+}
+
+static int gb_hid_set_report(struct gb_hid *ghid, u8 report_type, u8 report_id,
+ unsigned char *buf, int len)
+{
+ struct gb_hid_set_report_request *request;
+ struct gb_operation *operation;
+ int ret, size = sizeof(*request) + len - 1;
+
+ ret = gb_pm_runtime_get_sync(ghid->bundle);
+ if (ret)
+ return ret;
+
+ operation = gb_operation_create(ghid->connection,
+ GB_HID_TYPE_SET_REPORT, size, 0,
+ GFP_KERNEL);
+ if (!operation) {
+ gb_pm_runtime_put_autosuspend(ghid->bundle);
+ return -ENOMEM;
+ }
+
+ request = operation->request->payload;
+ request->report_type = report_type;
+ request->report_id = report_id;
+ memcpy(request->report, buf, len);
+
+ ret = gb_operation_request_send_sync(operation);
+ if (ret) {
+ dev_err(&operation->connection->bundle->dev,
+ "failed to set report: %d\n", ret);
+ } else {
+ ret = len;
+ }
+
+ gb_operation_put(operation);
+ gb_pm_runtime_put_autosuspend(ghid->bundle);
+
+ return ret;
+}
+
+static int gb_hid_request_handler(struct gb_operation *op)
+{
+ struct gb_connection *connection = op->connection;
+ struct gb_hid *ghid = gb_connection_get_data(connection);
+ struct gb_hid_input_report_request *request = op->request->payload;
+
+ if (op->type != GB_HID_TYPE_IRQ_EVENT) {
+ dev_err(&connection->bundle->dev,
+ "unsupported unsolicited request\n");
+ return -EINVAL;
+ }
+
+ if (test_bit(GB_HID_STARTED, &ghid->flags))
+ hid_input_report(ghid->hid, HID_INPUT_REPORT,
+ request->report, op->request->payload_size, 1);
+
+ return 0;
+}
+
+static int gb_hid_report_len(struct hid_report *report)
+{
+ return ((report->size - 1) >> 3) + 1 +
+ report->device->report_enum[report->type].numbered;
+}
+
+static void gb_hid_find_max_report(struct hid_device *hid, unsigned int type,
+ unsigned int *max)
+{
+ struct hid_report *report;
+ unsigned int size;
+
+ list_for_each_entry(report, &hid->report_enum[type].report_list, list) {
+ size = gb_hid_report_len(report);
+ if (*max < size)
+ *max = size;
+ }
+}
+
+static void gb_hid_free_buffers(struct gb_hid *ghid)
+{
+ kfree(ghid->inbuf);
+ ghid->inbuf = NULL;
+ ghid->bufsize = 0;
+}
+
+static int gb_hid_alloc_buffers(struct gb_hid *ghid, size_t bufsize)
+{
+ ghid->inbuf = kzalloc(bufsize, GFP_KERNEL);
+ if (!ghid->inbuf)
+ return -ENOMEM;
+
+ ghid->bufsize = bufsize;
+
+ return 0;
+}
+
+/* Routines dealing with reports */
+static void gb_hid_init_report(struct gb_hid *ghid, struct hid_report *report)
+{
+ unsigned int size;
+
+ size = gb_hid_report_len(report);
+ if (gb_hid_get_report(ghid, report->type, report->id, ghid->inbuf,
+ size))
+ return;
+
+ /*
+ * hid->driver_lock is held as we are in probe function,
+ * we just need to setup the input fields, so using
+ * hid_report_raw_event is safe.
+ */
+ hid_report_raw_event(ghid->hid, report->type, ghid->inbuf, size, 1);
+}
+
+static void gb_hid_init_reports(struct gb_hid *ghid)
+{
+ struct hid_device *hid = ghid->hid;
+ struct hid_report *report;
+
+ list_for_each_entry(report,
+ &hid->report_enum[HID_INPUT_REPORT].report_list,
+ list)
+ gb_hid_init_report(ghid, report);
+
+ list_for_each_entry(report,
+ &hid->report_enum[HID_FEATURE_REPORT].report_list,
+ list)
+ gb_hid_init_report(ghid, report);
+}
+
+static int __gb_hid_get_raw_report(struct hid_device *hid,
+ unsigned char report_number, __u8 *buf, size_t count,
+ unsigned char report_type)
+{
+ struct gb_hid *ghid = hid->driver_data;
+ int ret;
+
+ if (report_type == HID_OUTPUT_REPORT)
+ return -EINVAL;
+
+ ret = gb_hid_get_report(ghid, report_type, report_number, buf, count);
+ if (!ret)
+ ret = count;
+
+ return ret;
+}
+
+static int __gb_hid_output_raw_report(struct hid_device *hid, __u8 *buf,
+ size_t len, unsigned char report_type)
+{
+ struct gb_hid *ghid = hid->driver_data;
+ int report_id = buf[0];
+ int ret;
+
+ if (report_type == HID_INPUT_REPORT)
+ return -EINVAL;
+
+ if (report_id) {
+ buf++;
+ len--;
+ }
+
+ ret = gb_hid_set_report(ghid, report_type, report_id, buf, len);
+ if (report_id && ret >= 0)
+ ret++; /* add report_id to the number of transferred bytes */
+
+ return 0;
+}
+
+static int gb_hid_raw_request(struct hid_device *hid, unsigned char reportnum,
+ __u8 *buf, size_t len, unsigned char rtype,
+ int reqtype)
+{
+ switch (reqtype) {
+ case HID_REQ_GET_REPORT:
+ return __gb_hid_get_raw_report(hid, reportnum, buf, len, rtype);
+ case HID_REQ_SET_REPORT:
+ if (buf[0] != reportnum)
+ return -EINVAL;
+ return __gb_hid_output_raw_report(hid, buf, len, rtype);
+ default:
+ return -EIO;
+ }
+}
+
+/* HID Callbacks */
+static int gb_hid_parse(struct hid_device *hid)
+{
+ struct gb_hid *ghid = hid->driver_data;
+ unsigned int rsize;
+ char *rdesc;
+ int ret;
+
+ rsize = le16_to_cpu(ghid->hdesc.wReportDescLength);
+ if (!rsize || rsize > HID_MAX_DESCRIPTOR_SIZE) {
+ dbg_hid("weird size of report descriptor (%u)\n", rsize);
+ return -EINVAL;
+ }
+
+ rdesc = kzalloc(rsize, GFP_KERNEL);
+ if (!rdesc)
+ return -ENOMEM;
+
+ ret = gb_hid_get_report_desc(ghid, rdesc);
+ if (ret) {
+ hid_err(hid, "reading report descriptor failed\n");
+ goto free_rdesc;
+ }
+
+ ret = hid_parse_report(hid, rdesc, rsize);
+ if (ret)
+ dbg_hid("parsing report descriptor failed\n");
+
+free_rdesc:
+ kfree(rdesc);
+
+ return ret;
+}
+
+static int gb_hid_start(struct hid_device *hid)
+{
+ struct gb_hid *ghid = hid->driver_data;
+ unsigned int bufsize = HID_MIN_BUFFER_SIZE;
+ int ret;
+
+ gb_hid_find_max_report(hid, HID_INPUT_REPORT, &bufsize);
+ gb_hid_find_max_report(hid, HID_OUTPUT_REPORT, &bufsize);
+ gb_hid_find_max_report(hid, HID_FEATURE_REPORT, &bufsize);
+
+ if (bufsize > HID_MAX_BUFFER_SIZE)
+ bufsize = HID_MAX_BUFFER_SIZE;
+
+ ret = gb_hid_alloc_buffers(ghid, bufsize);
+ if (ret)
+ return ret;
+
+ if (!(hid->quirks & HID_QUIRK_NO_INIT_REPORTS))
+ gb_hid_init_reports(ghid);
+
+ return 0;
+}
+
+static void gb_hid_stop(struct hid_device *hid)
+{
+ struct gb_hid *ghid = hid->driver_data;
+
+ gb_hid_free_buffers(ghid);
+}
+
+static int gb_hid_open(struct hid_device *hid)
+{
+ struct gb_hid *ghid = hid->driver_data;
+ int ret;
+
+ ret = gb_hid_set_power(ghid, GB_HID_TYPE_PWR_ON);
+ if (ret < 0)
+ return ret;
+
+ set_bit(GB_HID_STARTED, &ghid->flags);
+ return 0;
+}
+
+static void gb_hid_close(struct hid_device *hid)
+{
+ struct gb_hid *ghid = hid->driver_data;
+ int ret;
+
+ clear_bit(GB_HID_STARTED, &ghid->flags);
+
+ /* Save some power */
+ ret = gb_hid_set_power(ghid, GB_HID_TYPE_PWR_OFF);
+ if (ret)
+ dev_err(&ghid->connection->bundle->dev,
+ "failed to power off (%d)\n", ret);
+}
+
+static int gb_hid_power(struct hid_device *hid, int lvl)
+{
+ struct gb_hid *ghid = hid->driver_data;
+
+ switch (lvl) {
+ case PM_HINT_FULLON:
+ return gb_hid_set_power(ghid, GB_HID_TYPE_PWR_ON);
+ case PM_HINT_NORMAL:
+ return gb_hid_set_power(ghid, GB_HID_TYPE_PWR_OFF);
+ }
+
+ return 0;
+}
+
+/* HID structure to pass callbacks */
+static const struct hid_ll_driver gb_hid_ll_driver = {
+ .parse = gb_hid_parse,
+ .start = gb_hid_start,
+ .stop = gb_hid_stop,
+ .open = gb_hid_open,
+ .close = gb_hid_close,
+ .power = gb_hid_power,
+ .raw_request = gb_hid_raw_request,
+};
+
+static int gb_hid_init(struct gb_hid *ghid)
+{
+ struct hid_device *hid = ghid->hid;
+ int ret;
+
+ ret = gb_hid_get_desc(ghid);
+ if (ret)
+ return ret;
+
+ hid->version = le16_to_cpu(ghid->hdesc.bcdHID);
+ hid->vendor = le16_to_cpu(ghid->hdesc.wVendorID);
+ hid->product = le16_to_cpu(ghid->hdesc.wProductID);
+ hid->country = ghid->hdesc.bCountryCode;
+
+ hid->driver_data = ghid;
+ hid->ll_driver = &gb_hid_ll_driver;
+ hid->dev.parent = &ghid->connection->bundle->dev;
+// hid->bus = BUS_GREYBUS; /* Need a bustype for GREYBUS in <linux/input.h> */
+
+ /* Set HID device's name */
+ snprintf(hid->name, sizeof(hid->name), "%s %04X:%04X",
+ dev_name(&ghid->connection->bundle->dev),
+ hid->vendor, hid->product);
+
+ return 0;
+}
+
+static int gb_hid_probe(struct gb_bundle *bundle,
+ const struct greybus_bundle_id *id)
+{
+ struct greybus_descriptor_cport *cport_desc;
+ struct gb_connection *connection;
+ struct hid_device *hid;
+ struct gb_hid *ghid;
+ int ret;
+
+ if (bundle->num_cports != 1)
+ return -ENODEV;
+
+ cport_desc = &bundle->cport_desc[0];
+ if (cport_desc->protocol_id != GREYBUS_PROTOCOL_HID)
+ return -ENODEV;
+
+ ghid = kzalloc(sizeof(*ghid), GFP_KERNEL);
+ if (!ghid)
+ return -ENOMEM;
+
+ connection = gb_connection_create(bundle, le16_to_cpu(cport_desc->id),
+ gb_hid_request_handler);
+ if (IS_ERR(connection)) {
+ ret = PTR_ERR(connection);
+ goto err_free_ghid;
+ }
+
+ gb_connection_set_data(connection, ghid);
+ ghid->connection = connection;
+
+ hid = hid_allocate_device();
+ if (IS_ERR(hid)) {
+ ret = PTR_ERR(hid);
+ goto err_connection_destroy;
+ }
+
+ ghid->hid = hid;
+ ghid->bundle = bundle;
+
+ greybus_set_drvdata(bundle, ghid);
+
+ ret = gb_connection_enable(connection);
+ if (ret)
+ goto err_destroy_hid;
+
+ ret = gb_hid_init(ghid);
+ if (ret)
+ goto err_connection_disable;
+
+ ret = hid_add_device(hid);
+ if (ret) {
+ hid_err(hid, "can't add hid device: %d\n", ret);
+ goto err_connection_disable;
+ }
+
+ gb_pm_runtime_put_autosuspend(bundle);
+
+ return 0;
+
+err_connection_disable:
+ gb_connection_disable(connection);
+err_destroy_hid:
+ hid_destroy_device(hid);
+err_connection_destroy:
+ gb_connection_destroy(connection);
+err_free_ghid:
+ kfree(ghid);
+
+ return ret;
+}
+
+static void gb_hid_disconnect(struct gb_bundle *bundle)
+{
+ struct gb_hid *ghid = greybus_get_drvdata(bundle);
+
+ if (gb_pm_runtime_get_sync(bundle))
+ gb_pm_runtime_get_noresume(bundle);
+
+ hid_destroy_device(ghid->hid);
+ gb_connection_disable(ghid->connection);
+ gb_connection_destroy(ghid->connection);
+ kfree(ghid);
+}
+
+static const struct greybus_bundle_id gb_hid_id_table[] = {
+ { GREYBUS_DEVICE_CLASS(GREYBUS_CLASS_HID) },
+ { }
+};
+MODULE_DEVICE_TABLE(greybus, gb_hid_id_table);
+
+static struct greybus_driver gb_hid_driver = {
+ .name = "hid",
+ .probe = gb_hid_probe,
+ .disconnect = gb_hid_disconnect,
+ .id_table = gb_hid_id_table,
+};
+module_greybus_driver(gb_hid_driver);
+
+MODULE_DESCRIPTION("HID class driver for the Greybus");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/greybus/i2c.c b/drivers/staging/greybus/i2c.c
new file mode 100644
index 000000000000..14f1ff6d448c
--- /dev/null
+++ b/drivers/staging/greybus/i2c.c
@@ -0,0 +1,322 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * I2C bridge driver for the Greybus "generic" I2C module.
+ *
+ * Copyright 2014 Google Inc.
+ * Copyright 2014 Linaro Ltd.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/i2c.h>
+#include <linux/greybus.h>
+
+#include "gbphy.h"
+
+struct gb_i2c_device {
+ struct gb_connection *connection;
+ struct gbphy_device *gbphy_dev;
+
+ u32 functionality;
+
+ struct i2c_adapter adapter;
+};
+
+/*
+ * Map Greybus i2c functionality bits into Linux ones
+ */
+static u32 gb_i2c_functionality_map(u32 gb_i2c_functionality)
+{
+ return gb_i2c_functionality; /* All bits the same for now */
+}
+
+/*
+ * Do initial setup of the i2c device. This includes verifying we
+ * can support it (based on the protocol version it advertises).
+ * If that's OK, we get and cached its functionality bits.
+ *
+ * Note: gb_i2c_dev->connection is assumed to have been valid.
+ */
+static int gb_i2c_device_setup(struct gb_i2c_device *gb_i2c_dev)
+{
+ struct gb_i2c_functionality_response response;
+ u32 functionality;
+ int ret;
+
+ ret = gb_operation_sync(gb_i2c_dev->connection,
+ GB_I2C_TYPE_FUNCTIONALITY,
+ NULL, 0, &response, sizeof(response));
+ if (ret)
+ return ret;
+
+ functionality = le32_to_cpu(response.functionality);
+ gb_i2c_dev->functionality = gb_i2c_functionality_map(functionality);
+
+ return 0;
+}
+
+/*
+ * Map Linux i2c_msg flags into Greybus i2c transfer op flags.
+ */
+static u16 gb_i2c_transfer_op_flags_map(u16 flags)
+{
+ return flags; /* All flags the same for now */
+}
+
+static void
+gb_i2c_fill_transfer_op(struct gb_i2c_transfer_op *op, struct i2c_msg *msg)
+{
+ u16 flags = gb_i2c_transfer_op_flags_map(msg->flags);
+
+ op->addr = cpu_to_le16(msg->addr);
+ op->flags = cpu_to_le16(flags);
+ op->size = cpu_to_le16(msg->len);
+}
+
+static struct gb_operation *
+gb_i2c_operation_create(struct gb_connection *connection,
+ struct i2c_msg *msgs, u32 msg_count)
+{
+ struct gb_i2c_device *gb_i2c_dev = gb_connection_get_data(connection);
+ struct gb_i2c_transfer_request *request;
+ struct gb_operation *operation;
+ struct gb_i2c_transfer_op *op;
+ struct i2c_msg *msg;
+ u32 data_out_size = 0;
+ u32 data_in_size = 0;
+ size_t request_size;
+ void *data;
+ u16 op_count;
+ u32 i;
+
+ if (msg_count > (u32)U16_MAX) {
+ dev_err(&gb_i2c_dev->gbphy_dev->dev, "msg_count (%u) too big\n",
+ msg_count);
+ return NULL;
+ }
+ op_count = (u16)msg_count;
+
+ /*
+ * In addition to space for all message descriptors we need
+ * to have enough to hold all outbound message data.
+ */
+ msg = msgs;
+ for (i = 0; i < msg_count; i++, msg++)
+ if (msg->flags & I2C_M_RD)
+ data_in_size += (u32)msg->len;
+ else
+ data_out_size += (u32)msg->len;
+
+ request_size = sizeof(*request);
+ request_size += msg_count * sizeof(*op);
+ request_size += data_out_size;
+
+ /* Response consists only of incoming data */
+ operation = gb_operation_create(connection, GB_I2C_TYPE_TRANSFER,
+ request_size, data_in_size, GFP_KERNEL);
+ if (!operation)
+ return NULL;
+
+ request = operation->request->payload;
+ request->op_count = cpu_to_le16(op_count);
+ /* Fill in the ops array */
+ op = &request->ops[0];
+ msg = msgs;
+ for (i = 0; i < msg_count; i++)
+ gb_i2c_fill_transfer_op(op++, msg++);
+
+ if (!data_out_size)
+ return operation;
+
+ /* Copy over the outgoing data; it starts after the last op */
+ data = op;
+ msg = msgs;
+ for (i = 0; i < msg_count; i++) {
+ if (!(msg->flags & I2C_M_RD)) {
+ memcpy(data, msg->buf, msg->len);
+ data += msg->len;
+ }
+ msg++;
+ }
+
+ return operation;
+}
+
+static void gb_i2c_decode_response(struct i2c_msg *msgs, u32 msg_count,
+ struct gb_i2c_transfer_response *response)
+{
+ struct i2c_msg *msg = msgs;
+ u8 *data;
+ u32 i;
+
+ if (!response)
+ return;
+ data = response->data;
+ for (i = 0; i < msg_count; i++) {
+ if (msg->flags & I2C_M_RD) {
+ memcpy(msg->buf, data, msg->len);
+ data += msg->len;
+ }
+ msg++;
+ }
+}
+
+/*
+ * Some i2c transfer operations return results that are expected.
+ */
+static bool gb_i2c_expected_transfer_error(int errno)
+{
+ return errno == -EAGAIN || errno == -ENODEV;
+}
+
+static int gb_i2c_transfer_operation(struct gb_i2c_device *gb_i2c_dev,
+ struct i2c_msg *msgs, u32 msg_count)
+{
+ struct gb_connection *connection = gb_i2c_dev->connection;
+ struct device *dev = &gb_i2c_dev->gbphy_dev->dev;
+ struct gb_operation *operation;
+ int ret;
+
+ operation = gb_i2c_operation_create(connection, msgs, msg_count);
+ if (!operation)
+ return -ENOMEM;
+
+ ret = gbphy_runtime_get_sync(gb_i2c_dev->gbphy_dev);
+ if (ret)
+ goto exit_operation_put;
+
+ ret = gb_operation_request_send_sync(operation);
+ if (!ret) {
+ struct gb_i2c_transfer_response *response;
+
+ response = operation->response->payload;
+ gb_i2c_decode_response(msgs, msg_count, response);
+ ret = msg_count;
+ } else if (!gb_i2c_expected_transfer_error(ret)) {
+ dev_err(dev, "transfer operation failed (%d)\n", ret);
+ }
+
+ gbphy_runtime_put_autosuspend(gb_i2c_dev->gbphy_dev);
+
+exit_operation_put:
+ gb_operation_put(operation);
+
+ return ret;
+}
+
+static int gb_i2c_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs,
+ int msg_count)
+{
+ struct gb_i2c_device *gb_i2c_dev;
+
+ gb_i2c_dev = i2c_get_adapdata(adap);
+
+ return gb_i2c_transfer_operation(gb_i2c_dev, msgs, msg_count);
+}
+
+static u32 gb_i2c_functionality(struct i2c_adapter *adap)
+{
+ struct gb_i2c_device *gb_i2c_dev = i2c_get_adapdata(adap);
+
+ return gb_i2c_dev->functionality;
+}
+
+static const struct i2c_algorithm gb_i2c_algorithm = {
+ .master_xfer = gb_i2c_master_xfer,
+ .functionality = gb_i2c_functionality,
+};
+
+static int gb_i2c_probe(struct gbphy_device *gbphy_dev,
+ const struct gbphy_device_id *id)
+{
+ struct gb_connection *connection;
+ struct gb_i2c_device *gb_i2c_dev;
+ struct i2c_adapter *adapter;
+ int ret;
+
+ gb_i2c_dev = kzalloc(sizeof(*gb_i2c_dev), GFP_KERNEL);
+ if (!gb_i2c_dev)
+ return -ENOMEM;
+
+ connection =
+ gb_connection_create(gbphy_dev->bundle,
+ le16_to_cpu(gbphy_dev->cport_desc->id),
+ NULL);
+ if (IS_ERR(connection)) {
+ ret = PTR_ERR(connection);
+ goto exit_i2cdev_free;
+ }
+
+ gb_i2c_dev->connection = connection;
+ gb_connection_set_data(connection, gb_i2c_dev);
+ gb_i2c_dev->gbphy_dev = gbphy_dev;
+ gb_gbphy_set_data(gbphy_dev, gb_i2c_dev);
+
+ ret = gb_connection_enable(connection);
+ if (ret)
+ goto exit_connection_destroy;
+
+ ret = gb_i2c_device_setup(gb_i2c_dev);
+ if (ret)
+ goto exit_connection_disable;
+
+ /* Looks good; up our i2c adapter */
+ adapter = &gb_i2c_dev->adapter;
+ adapter->owner = THIS_MODULE;
+ adapter->class = I2C_CLASS_HWMON;
+ adapter->algo = &gb_i2c_algorithm;
+
+ adapter->dev.parent = &gbphy_dev->dev;
+ snprintf(adapter->name, sizeof(adapter->name), "Greybus i2c adapter");
+ i2c_set_adapdata(adapter, gb_i2c_dev);
+
+ ret = i2c_add_adapter(adapter);
+ if (ret)
+ goto exit_connection_disable;
+
+ gbphy_runtime_put_autosuspend(gbphy_dev);
+ return 0;
+
+exit_connection_disable:
+ gb_connection_disable(connection);
+exit_connection_destroy:
+ gb_connection_destroy(connection);
+exit_i2cdev_free:
+ kfree(gb_i2c_dev);
+
+ return ret;
+}
+
+static void gb_i2c_remove(struct gbphy_device *gbphy_dev)
+{
+ struct gb_i2c_device *gb_i2c_dev = gb_gbphy_get_data(gbphy_dev);
+ struct gb_connection *connection = gb_i2c_dev->connection;
+ int ret;
+
+ ret = gbphy_runtime_get_sync(gbphy_dev);
+ if (ret)
+ gbphy_runtime_get_noresume(gbphy_dev);
+
+ i2c_del_adapter(&gb_i2c_dev->adapter);
+ gb_connection_disable(connection);
+ gb_connection_destroy(connection);
+ kfree(gb_i2c_dev);
+}
+
+static const struct gbphy_device_id gb_i2c_id_table[] = {
+ { GBPHY_PROTOCOL(GREYBUS_PROTOCOL_I2C) },
+ { },
+};
+MODULE_DEVICE_TABLE(gbphy, gb_i2c_id_table);
+
+static struct gbphy_driver i2c_driver = {
+ .name = "i2c",
+ .probe = gb_i2c_probe,
+ .remove = gb_i2c_remove,
+ .id_table = gb_i2c_id_table,
+};
+
+module_gbphy_driver(i2c_driver);
+MODULE_DESCRIPTION("I2C bridge driver for the Greybus 'generic' I2C module");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/greybus/light.c b/drivers/staging/greybus/light.c
new file mode 100644
index 000000000000..e509fdc715db
--- /dev/null
+++ b/drivers/staging/greybus/light.c
@@ -0,0 +1,1343 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Greybus Lights protocol driver.
+ *
+ * Copyright 2015 Google Inc.
+ * Copyright 2015 Linaro Ltd.
+ */
+
+#include <linux/kernel.h>
+#include <linux/leds.h>
+#include <linux/led-class-flash.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/greybus.h>
+#include <media/v4l2-flash-led-class.h>
+
+#define NAMES_MAX 32
+
+struct gb_channel {
+ u8 id;
+ u32 flags;
+ u32 color;
+ char *color_name;
+ u8 fade_in;
+ u8 fade_out;
+ u32 mode;
+ char *mode_name;
+ struct attribute **attrs;
+ struct attribute_group *attr_group;
+ const struct attribute_group **attr_groups;
+ struct led_classdev *led;
+ struct led_classdev_flash fled;
+ struct led_flash_setting intensity_uA;
+ struct led_flash_setting timeout_us;
+ struct gb_light *light;
+ bool is_registered;
+ bool releasing;
+ bool strobe_state;
+ bool active;
+ struct mutex lock;
+};
+
+struct gb_light {
+ u8 id;
+ char *name;
+ struct gb_lights *glights;
+ u32 flags;
+ u8 channels_count;
+ struct gb_channel *channels;
+ bool has_flash;
+ bool ready;
+#if IS_REACHABLE(CONFIG_V4L2_FLASH_LED_CLASS)
+ struct v4l2_flash *v4l2_flash;
+ struct v4l2_flash *v4l2_flash_ind;
+#endif
+};
+
+struct gb_lights {
+ struct gb_connection *connection;
+ u8 lights_count;
+ struct gb_light *lights;
+ struct mutex lights_lock;
+};
+
+static void gb_lights_channel_free(struct gb_channel *channel);
+
+static struct gb_connection *get_conn_from_channel(struct gb_channel *channel)
+{
+ return channel->light->glights->connection;
+}
+
+static struct gb_connection *get_conn_from_light(struct gb_light *light)
+{
+ return light->glights->connection;
+}
+
+static bool is_channel_flash(struct gb_channel *channel)
+{
+ return !!(channel->mode & (GB_CHANNEL_MODE_FLASH | GB_CHANNEL_MODE_TORCH
+ | GB_CHANNEL_MODE_INDICATOR));
+}
+
+static struct gb_channel *get_channel_from_cdev(struct led_classdev *cdev)
+{
+ struct led_classdev_flash *fled_cdev = lcdev_to_flcdev(cdev);
+
+ return container_of(fled_cdev, struct gb_channel, fled);
+}
+
+static struct led_classdev *get_channel_cdev(struct gb_channel *channel)
+{
+ return &channel->fled.led_cdev;
+}
+
+static struct gb_channel *get_channel_from_mode(struct gb_light *light,
+ u32 mode)
+{
+ struct gb_channel *channel;
+ int i;
+
+ for (i = 0; i < light->channels_count; i++) {
+ channel = &light->channels[i];
+ if (channel->mode == mode)
+ return channel;
+ }
+ return NULL;
+}
+
+static int __gb_lights_flash_intensity_set(struct gb_channel *channel,
+ u32 intensity)
+{
+ struct gb_connection *connection = get_conn_from_channel(channel);
+ struct gb_bundle *bundle = connection->bundle;
+ struct gb_lights_set_flash_intensity_request req;
+ int ret;
+
+ if (channel->releasing)
+ return -ESHUTDOWN;
+
+ ret = gb_pm_runtime_get_sync(bundle);
+ if (ret < 0)
+ return ret;
+
+ req.light_id = channel->light->id;
+ req.channel_id = channel->id;
+ req.intensity_uA = cpu_to_le32(intensity);
+
+ ret = gb_operation_sync(connection, GB_LIGHTS_TYPE_SET_FLASH_INTENSITY,
+ &req, sizeof(req), NULL, 0);
+
+ gb_pm_runtime_put_autosuspend(bundle);
+
+ return ret;
+}
+
+static int __gb_lights_flash_brightness_set(struct gb_channel *channel)
+{
+ u32 intensity;
+
+ /* If the channel is flash we need to get the attached torch channel */
+ if (channel->mode & GB_CHANNEL_MODE_FLASH)
+ channel = get_channel_from_mode(channel->light,
+ GB_CHANNEL_MODE_TORCH);
+
+ if (!channel)
+ return -EINVAL;
+
+ /* For not flash we need to convert brightness to intensity */
+ intensity = channel->intensity_uA.min +
+ (channel->intensity_uA.step * channel->led->brightness);
+
+ return __gb_lights_flash_intensity_set(channel, intensity);
+}
+
+static int gb_lights_color_set(struct gb_channel *channel, u32 color);
+static int gb_lights_fade_set(struct gb_channel *channel);
+
+static void led_lock(struct led_classdev *cdev)
+{
+ mutex_lock(&cdev->led_access);
+}
+
+static void led_unlock(struct led_classdev *cdev)
+{
+ mutex_unlock(&cdev->led_access);
+}
+
+#define gb_lights_fade_attr(__dir) \
+static ssize_t fade_##__dir##_show(struct device *dev, \
+ struct device_attribute *attr, \
+ char *buf) \
+{ \
+ struct led_classdev *cdev = dev_get_drvdata(dev); \
+ struct gb_channel *channel = get_channel_from_cdev(cdev); \
+ \
+ return sprintf(buf, "%u\n", channel->fade_##__dir); \
+} \
+ \
+static ssize_t fade_##__dir##_store(struct device *dev, \
+ struct device_attribute *attr, \
+ const char *buf, size_t size) \
+{ \
+ struct led_classdev *cdev = dev_get_drvdata(dev); \
+ struct gb_channel *channel = get_channel_from_cdev(cdev); \
+ u8 fade; \
+ int ret; \
+ \
+ led_lock(cdev); \
+ if (led_sysfs_is_disabled(cdev)) { \
+ ret = -EBUSY; \
+ goto unlock; \
+ } \
+ \
+ ret = kstrtou8(buf, 0, &fade); \
+ if (ret < 0) { \
+ dev_err(dev, "could not parse fade value %d\n", ret); \
+ goto unlock; \
+ } \
+ if (channel->fade_##__dir == fade) \
+ goto unlock; \
+ channel->fade_##__dir = fade; \
+ \
+ ret = gb_lights_fade_set(channel); \
+ if (ret < 0) \
+ goto unlock; \
+ \
+ ret = size; \
+unlock: \
+ led_unlock(cdev); \
+ return ret; \
+} \
+static DEVICE_ATTR_RW(fade_##__dir)
+
+gb_lights_fade_attr(in);
+gb_lights_fade_attr(out);
+
+static ssize_t color_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct led_classdev *cdev = dev_get_drvdata(dev);
+ struct gb_channel *channel = get_channel_from_cdev(cdev);
+
+ return sprintf(buf, "0x%08x\n", channel->color);
+}
+
+static ssize_t color_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ struct led_classdev *cdev = dev_get_drvdata(dev);
+ struct gb_channel *channel = get_channel_from_cdev(cdev);
+ u32 color;
+ int ret;
+
+ led_lock(cdev);
+ if (led_sysfs_is_disabled(cdev)) {
+ ret = -EBUSY;
+ goto unlock;
+ }
+ ret = kstrtou32(buf, 0, &color);
+ if (ret < 0) {
+ dev_err(dev, "could not parse color value %d\n", ret);
+ goto unlock;
+ }
+
+ ret = gb_lights_color_set(channel, color);
+ if (ret < 0)
+ goto unlock;
+
+ channel->color = color;
+ ret = size;
+unlock:
+ led_unlock(cdev);
+ return ret;
+}
+static DEVICE_ATTR_RW(color);
+
+static int channel_attr_groups_set(struct gb_channel *channel,
+ struct led_classdev *cdev)
+{
+ int attr = 0;
+ int size = 0;
+
+ if (channel->flags & GB_LIGHT_CHANNEL_MULTICOLOR)
+ size++;
+ if (channel->flags & GB_LIGHT_CHANNEL_FADER)
+ size += 2;
+
+ if (!size)
+ return 0;
+
+ /* Set attributes based in the channel flags */
+ channel->attrs = kcalloc(size + 1, sizeof(*channel->attrs), GFP_KERNEL);
+ if (!channel->attrs)
+ return -ENOMEM;
+ channel->attr_group = kzalloc(sizeof(*channel->attr_group), GFP_KERNEL);
+ if (!channel->attr_group)
+ return -ENOMEM;
+ channel->attr_groups = kcalloc(2, sizeof(*channel->attr_groups),
+ GFP_KERNEL);
+ if (!channel->attr_groups)
+ return -ENOMEM;
+
+ if (channel->flags & GB_LIGHT_CHANNEL_MULTICOLOR)
+ channel->attrs[attr++] = &dev_attr_color.attr;
+ if (channel->flags & GB_LIGHT_CHANNEL_FADER) {
+ channel->attrs[attr++] = &dev_attr_fade_in.attr;
+ channel->attrs[attr++] = &dev_attr_fade_out.attr;
+ }
+
+ channel->attr_group->attrs = channel->attrs;
+
+ channel->attr_groups[0] = channel->attr_group;
+
+ cdev->groups = channel->attr_groups;
+
+ return 0;
+}
+
+static int gb_lights_fade_set(struct gb_channel *channel)
+{
+ struct gb_connection *connection = get_conn_from_channel(channel);
+ struct gb_bundle *bundle = connection->bundle;
+ struct gb_lights_set_fade_request req;
+ int ret;
+
+ if (channel->releasing)
+ return -ESHUTDOWN;
+
+ ret = gb_pm_runtime_get_sync(bundle);
+ if (ret < 0)
+ return ret;
+
+ req.light_id = channel->light->id;
+ req.channel_id = channel->id;
+ req.fade_in = channel->fade_in;
+ req.fade_out = channel->fade_out;
+ ret = gb_operation_sync(connection, GB_LIGHTS_TYPE_SET_FADE,
+ &req, sizeof(req), NULL, 0);
+
+ gb_pm_runtime_put_autosuspend(bundle);
+
+ return ret;
+}
+
+static int gb_lights_color_set(struct gb_channel *channel, u32 color)
+{
+ struct gb_connection *connection = get_conn_from_channel(channel);
+ struct gb_bundle *bundle = connection->bundle;
+ struct gb_lights_set_color_request req;
+ int ret;
+
+ if (channel->releasing)
+ return -ESHUTDOWN;
+
+ ret = gb_pm_runtime_get_sync(bundle);
+ if (ret < 0)
+ return ret;
+
+ req.light_id = channel->light->id;
+ req.channel_id = channel->id;
+ req.color = cpu_to_le32(color);
+ ret = gb_operation_sync(connection, GB_LIGHTS_TYPE_SET_COLOR,
+ &req, sizeof(req), NULL, 0);
+
+ gb_pm_runtime_put_autosuspend(bundle);
+
+ return ret;
+}
+
+static int __gb_lights_led_brightness_set(struct gb_channel *channel)
+{
+ struct gb_lights_set_brightness_request req;
+ struct gb_connection *connection = get_conn_from_channel(channel);
+ struct gb_bundle *bundle = connection->bundle;
+ bool old_active;
+ int ret;
+
+ mutex_lock(&channel->lock);
+ ret = gb_pm_runtime_get_sync(bundle);
+ if (ret < 0)
+ goto out_unlock;
+
+ old_active = channel->active;
+
+ req.light_id = channel->light->id;
+ req.channel_id = channel->id;
+ req.brightness = (u8)channel->led->brightness;
+
+ ret = gb_operation_sync(connection, GB_LIGHTS_TYPE_SET_BRIGHTNESS,
+ &req, sizeof(req), NULL, 0);
+ if (ret < 0)
+ goto out_pm_put;
+
+ if (channel->led->brightness)
+ channel->active = true;
+ else
+ channel->active = false;
+
+ /* we need to keep module alive when turning to active state */
+ if (!old_active && channel->active)
+ goto out_unlock;
+
+ /*
+ * on the other hand if going to inactive we still hold a reference and
+ * need to put it, so we could go to suspend.
+ */
+ if (old_active && !channel->active)
+ gb_pm_runtime_put_autosuspend(bundle);
+
+out_pm_put:
+ gb_pm_runtime_put_autosuspend(bundle);
+out_unlock:
+ mutex_unlock(&channel->lock);
+
+ return ret;
+}
+
+static int __gb_lights_brightness_set(struct gb_channel *channel)
+{
+ int ret;
+
+ if (channel->releasing)
+ return 0;
+
+ if (is_channel_flash(channel))
+ ret = __gb_lights_flash_brightness_set(channel);
+ else
+ ret = __gb_lights_led_brightness_set(channel);
+
+ return ret;
+}
+
+static int gb_brightness_set(struct led_classdev *cdev,
+ enum led_brightness value)
+{
+ struct gb_channel *channel = get_channel_from_cdev(cdev);
+
+ channel->led->brightness = value;
+
+ return __gb_lights_brightness_set(channel);
+}
+
+static enum led_brightness gb_brightness_get(struct led_classdev *cdev)
+
+{
+ struct gb_channel *channel = get_channel_from_cdev(cdev);
+
+ return channel->led->brightness;
+}
+
+static int gb_blink_set(struct led_classdev *cdev, unsigned long *delay_on,
+ unsigned long *delay_off)
+{
+ struct gb_channel *channel = get_channel_from_cdev(cdev);
+ struct gb_connection *connection = get_conn_from_channel(channel);
+ struct gb_bundle *bundle = connection->bundle;
+ struct gb_lights_blink_request req;
+ bool old_active;
+ int ret;
+
+ if (channel->releasing)
+ return -ESHUTDOWN;
+
+ if (!delay_on || !delay_off)
+ return -EINVAL;
+
+ mutex_lock(&channel->lock);
+ ret = gb_pm_runtime_get_sync(bundle);
+ if (ret < 0)
+ goto out_unlock;
+
+ old_active = channel->active;
+
+ req.light_id = channel->light->id;
+ req.channel_id = channel->id;
+ req.time_on_ms = cpu_to_le16(*delay_on);
+ req.time_off_ms = cpu_to_le16(*delay_off);
+
+ ret = gb_operation_sync(connection, GB_LIGHTS_TYPE_SET_BLINK, &req,
+ sizeof(req), NULL, 0);
+ if (ret < 0)
+ goto out_pm_put;
+
+ if (*delay_on)
+ channel->active = true;
+ else
+ channel->active = false;
+
+ /* we need to keep module alive when turning to active state */
+ if (!old_active && channel->active)
+ goto out_unlock;
+
+ /*
+ * on the other hand if going to inactive we still hold a reference and
+ * need to put it, so we could go to suspend.
+ */
+ if (old_active && !channel->active)
+ gb_pm_runtime_put_autosuspend(bundle);
+
+out_pm_put:
+ gb_pm_runtime_put_autosuspend(bundle);
+out_unlock:
+ mutex_unlock(&channel->lock);
+
+ return ret;
+}
+
+static void gb_lights_led_operations_set(struct gb_channel *channel,
+ struct led_classdev *cdev)
+{
+ cdev->brightness_get = gb_brightness_get;
+ cdev->brightness_set_blocking = gb_brightness_set;
+
+ if (channel->flags & GB_LIGHT_CHANNEL_BLINK)
+ cdev->blink_set = gb_blink_set;
+}
+
+#if IS_REACHABLE(CONFIG_V4L2_FLASH_LED_CLASS)
+/* V4L2 specific helpers */
+static const struct v4l2_flash_ops v4l2_flash_ops;
+
+static void __gb_lights_channel_v4l2_config(struct led_flash_setting *channel_s,
+ struct led_flash_setting *v4l2_s)
+{
+ v4l2_s->min = channel_s->min;
+ v4l2_s->max = channel_s->max;
+ v4l2_s->step = channel_s->step;
+ /* For v4l2 val is the default value */
+ v4l2_s->val = channel_s->max;
+}
+
+static int gb_lights_light_v4l2_register(struct gb_light *light)
+{
+ struct gb_connection *connection = get_conn_from_light(light);
+ struct device *dev = &connection->bundle->dev;
+ struct v4l2_flash_config sd_cfg = { {0} }, sd_cfg_ind = { {0} };
+ struct led_classdev_flash *fled;
+ struct led_classdev *iled = NULL;
+ struct gb_channel *channel_torch, *channel_ind, *channel_flash;
+
+ channel_torch = get_channel_from_mode(light, GB_CHANNEL_MODE_TORCH);
+ if (channel_torch)
+ __gb_lights_channel_v4l2_config(&channel_torch->intensity_uA,
+ &sd_cfg.intensity);
+
+ channel_ind = get_channel_from_mode(light, GB_CHANNEL_MODE_INDICATOR);
+ if (channel_ind) {
+ __gb_lights_channel_v4l2_config(&channel_ind->intensity_uA,
+ &sd_cfg_ind.intensity);
+ iled = &channel_ind->fled.led_cdev;
+ }
+
+ channel_flash = get_channel_from_mode(light, GB_CHANNEL_MODE_FLASH);
+ if (!channel_flash) {
+ dev_err(dev, "failed to get flash channel from mode\n");
+ return -EINVAL;
+ }
+
+ fled = &channel_flash->fled;
+
+ snprintf(sd_cfg.dev_name, sizeof(sd_cfg.dev_name), "%s", light->name);
+ snprintf(sd_cfg_ind.dev_name, sizeof(sd_cfg_ind.dev_name),
+ "%s indicator", light->name);
+
+ /* Set the possible values to faults, in our case all faults */
+ sd_cfg.flash_faults = LED_FAULT_OVER_VOLTAGE | LED_FAULT_TIMEOUT |
+ LED_FAULT_OVER_TEMPERATURE | LED_FAULT_SHORT_CIRCUIT |
+ LED_FAULT_OVER_CURRENT | LED_FAULT_INDICATOR |
+ LED_FAULT_UNDER_VOLTAGE | LED_FAULT_INPUT_VOLTAGE |
+ LED_FAULT_LED_OVER_TEMPERATURE;
+
+ light->v4l2_flash = v4l2_flash_init(dev, NULL, fled, &v4l2_flash_ops,
+ &sd_cfg);
+ if (IS_ERR(light->v4l2_flash))
+ return PTR_ERR(light->v4l2_flash);
+
+ if (channel_ind) {
+ light->v4l2_flash_ind =
+ v4l2_flash_indicator_init(dev, NULL, iled, &sd_cfg_ind);
+ if (IS_ERR(light->v4l2_flash_ind)) {
+ v4l2_flash_release(light->v4l2_flash);
+ return PTR_ERR(light->v4l2_flash_ind);
+ }
+ }
+
+ return 0;
+}
+
+static void gb_lights_light_v4l2_unregister(struct gb_light *light)
+{
+ v4l2_flash_release(light->v4l2_flash_ind);
+ v4l2_flash_release(light->v4l2_flash);
+}
+#else
+static int gb_lights_light_v4l2_register(struct gb_light *light)
+{
+ struct gb_connection *connection = get_conn_from_light(light);
+
+ dev_err(&connection->bundle->dev, "no support for v4l2 subdevices\n");
+ return 0;
+}
+
+static void gb_lights_light_v4l2_unregister(struct gb_light *light)
+{
+}
+#endif
+
+#if IS_REACHABLE(CONFIG_LEDS_CLASS_FLASH)
+/* Flash specific operations */
+static int gb_lights_flash_intensity_set(struct led_classdev_flash *fcdev,
+ u32 brightness)
+{
+ struct gb_channel *channel = container_of(fcdev, struct gb_channel,
+ fled);
+ int ret;
+
+ ret = __gb_lights_flash_intensity_set(channel, brightness);
+ if (ret < 0)
+ return ret;
+
+ fcdev->brightness.val = brightness;
+
+ return 0;
+}
+
+static int gb_lights_flash_intensity_get(struct led_classdev_flash *fcdev,
+ u32 *brightness)
+{
+ *brightness = fcdev->brightness.val;
+
+ return 0;
+}
+
+static int gb_lights_flash_strobe_set(struct led_classdev_flash *fcdev,
+ bool state)
+{
+ struct gb_channel *channel = container_of(fcdev, struct gb_channel,
+ fled);
+ struct gb_connection *connection = get_conn_from_channel(channel);
+ struct gb_bundle *bundle = connection->bundle;
+ struct gb_lights_set_flash_strobe_request req;
+ int ret;
+
+ if (channel->releasing)
+ return -ESHUTDOWN;
+
+ ret = gb_pm_runtime_get_sync(bundle);
+ if (ret < 0)
+ return ret;
+
+ req.light_id = channel->light->id;
+ req.channel_id = channel->id;
+ req.state = state ? 1 : 0;
+
+ ret = gb_operation_sync(connection, GB_LIGHTS_TYPE_SET_FLASH_STROBE,
+ &req, sizeof(req), NULL, 0);
+ if (!ret)
+ channel->strobe_state = state;
+
+ gb_pm_runtime_put_autosuspend(bundle);
+
+ return ret;
+}
+
+static int gb_lights_flash_strobe_get(struct led_classdev_flash *fcdev,
+ bool *state)
+{
+ struct gb_channel *channel = container_of(fcdev, struct gb_channel,
+ fled);
+
+ *state = channel->strobe_state;
+ return 0;
+}
+
+static int gb_lights_flash_timeout_set(struct led_classdev_flash *fcdev,
+ u32 timeout)
+{
+ struct gb_channel *channel = container_of(fcdev, struct gb_channel,
+ fled);
+ struct gb_connection *connection = get_conn_from_channel(channel);
+ struct gb_bundle *bundle = connection->bundle;
+ struct gb_lights_set_flash_timeout_request req;
+ int ret;
+
+ if (channel->releasing)
+ return -ESHUTDOWN;
+
+ ret = gb_pm_runtime_get_sync(bundle);
+ if (ret < 0)
+ return ret;
+
+ req.light_id = channel->light->id;
+ req.channel_id = channel->id;
+ req.timeout_us = cpu_to_le32(timeout);
+
+ ret = gb_operation_sync(connection, GB_LIGHTS_TYPE_SET_FLASH_TIMEOUT,
+ &req, sizeof(req), NULL, 0);
+ if (!ret)
+ fcdev->timeout.val = timeout;
+
+ gb_pm_runtime_put_autosuspend(bundle);
+
+ return ret;
+}
+
+static int gb_lights_flash_fault_get(struct led_classdev_flash *fcdev,
+ u32 *fault)
+{
+ struct gb_channel *channel = container_of(fcdev, struct gb_channel,
+ fled);
+ struct gb_connection *connection = get_conn_from_channel(channel);
+ struct gb_bundle *bundle = connection->bundle;
+ struct gb_lights_get_flash_fault_request req;
+ struct gb_lights_get_flash_fault_response resp;
+ int ret;
+
+ if (channel->releasing)
+ return -ESHUTDOWN;
+
+ ret = gb_pm_runtime_get_sync(bundle);
+ if (ret < 0)
+ return ret;
+
+ req.light_id = channel->light->id;
+ req.channel_id = channel->id;
+
+ ret = gb_operation_sync(connection, GB_LIGHTS_TYPE_GET_FLASH_FAULT,
+ &req, sizeof(req), &resp, sizeof(resp));
+ if (!ret)
+ *fault = le32_to_cpu(resp.fault);
+
+ gb_pm_runtime_put_autosuspend(bundle);
+
+ return ret;
+}
+
+static const struct led_flash_ops gb_lights_flash_ops = {
+ .flash_brightness_set = gb_lights_flash_intensity_set,
+ .flash_brightness_get = gb_lights_flash_intensity_get,
+ .strobe_set = gb_lights_flash_strobe_set,
+ .strobe_get = gb_lights_flash_strobe_get,
+ .timeout_set = gb_lights_flash_timeout_set,
+ .fault_get = gb_lights_flash_fault_get,
+};
+
+static int __gb_lights_channel_torch_attach(struct gb_channel *channel,
+ struct gb_channel *channel_torch)
+{
+ char *name;
+
+ /* we can only attach torch to a flash channel */
+ if (!(channel->mode & GB_CHANNEL_MODE_FLASH))
+ return 0;
+
+ /* Move torch brightness to the destination */
+ channel->led->max_brightness = channel_torch->led->max_brightness;
+
+ /* append mode name to flash name */
+ name = kasprintf(GFP_KERNEL, "%s_%s", channel->led->name,
+ channel_torch->mode_name);
+ if (!name)
+ return -ENOMEM;
+ kfree(channel->led->name);
+ channel->led->name = name;
+
+ channel_torch->led = channel->led;
+
+ return 0;
+}
+
+static int __gb_lights_flash_led_register(struct gb_channel *channel)
+{
+ struct gb_connection *connection = get_conn_from_channel(channel);
+ struct led_classdev_flash *fled = &channel->fled;
+ struct led_flash_setting *fset;
+ struct gb_channel *channel_torch;
+ int ret;
+
+ fled->ops = &gb_lights_flash_ops;
+
+ fled->led_cdev.flags |= LED_DEV_CAP_FLASH;
+
+ fset = &fled->brightness;
+ fset->min = channel->intensity_uA.min;
+ fset->max = channel->intensity_uA.max;
+ fset->step = channel->intensity_uA.step;
+ fset->val = channel->intensity_uA.max;
+
+ /* Only the flash mode have the timeout constraints settings */
+ if (channel->mode & GB_CHANNEL_MODE_FLASH) {
+ fset = &fled->timeout;
+ fset->min = channel->timeout_us.min;
+ fset->max = channel->timeout_us.max;
+ fset->step = channel->timeout_us.step;
+ fset->val = channel->timeout_us.max;
+ }
+
+ /*
+ * If light have torch mode channel, this channel will be the led
+ * classdev of the registered above flash classdev
+ */
+ channel_torch = get_channel_from_mode(channel->light,
+ GB_CHANNEL_MODE_TORCH);
+ if (channel_torch) {
+ ret = __gb_lights_channel_torch_attach(channel, channel_torch);
+ if (ret < 0)
+ goto fail;
+ }
+
+ ret = led_classdev_flash_register(&connection->bundle->dev, fled);
+ if (ret < 0)
+ goto fail;
+
+ channel->is_registered = true;
+ return 0;
+fail:
+ channel->led = NULL;
+ return ret;
+}
+
+static void __gb_lights_flash_led_unregister(struct gb_channel *channel)
+{
+ if (!channel->is_registered)
+ return;
+
+ led_classdev_flash_unregister(&channel->fled);
+}
+
+static int gb_lights_channel_flash_config(struct gb_channel *channel)
+{
+ struct gb_connection *connection = get_conn_from_channel(channel);
+ struct gb_lights_get_channel_flash_config_request req;
+ struct gb_lights_get_channel_flash_config_response conf;
+ struct led_flash_setting *fset;
+ int ret;
+
+ req.light_id = channel->light->id;
+ req.channel_id = channel->id;
+
+ ret = gb_operation_sync(connection,
+ GB_LIGHTS_TYPE_GET_CHANNEL_FLASH_CONFIG,
+ &req, sizeof(req), &conf, sizeof(conf));
+ if (ret < 0)
+ return ret;
+
+ /*
+ * Intensity constraints for flash related modes: flash, torch,
+ * indicator. They will be needed for v4l2 registration.
+ */
+ fset = &channel->intensity_uA;
+ fset->min = le32_to_cpu(conf.intensity_min_uA);
+ fset->max = le32_to_cpu(conf.intensity_max_uA);
+ fset->step = le32_to_cpu(conf.intensity_step_uA);
+
+ /*
+ * On flash type, max brightness is set as the number of intensity steps
+ * available.
+ */
+ channel->led->max_brightness = (fset->max - fset->min) / fset->step;
+
+ /* Only the flash mode have the timeout constraints settings */
+ if (channel->mode & GB_CHANNEL_MODE_FLASH) {
+ fset = &channel->timeout_us;
+ fset->min = le32_to_cpu(conf.timeout_min_us);
+ fset->max = le32_to_cpu(conf.timeout_max_us);
+ fset->step = le32_to_cpu(conf.timeout_step_us);
+ }
+
+ return 0;
+}
+#else
+static int gb_lights_channel_flash_config(struct gb_channel *channel)
+{
+ struct gb_connection *connection = get_conn_from_channel(channel);
+
+ dev_err(&connection->bundle->dev, "no support for flash devices\n");
+ return 0;
+}
+
+static int __gb_lights_flash_led_register(struct gb_channel *channel)
+{
+ return 0;
+}
+
+static void __gb_lights_flash_led_unregister(struct gb_channel *channel)
+{
+}
+
+#endif
+
+static int __gb_lights_led_register(struct gb_channel *channel)
+{
+ struct gb_connection *connection = get_conn_from_channel(channel);
+ struct led_classdev *cdev = get_channel_cdev(channel);
+ int ret;
+
+ ret = led_classdev_register(&connection->bundle->dev, cdev);
+ if (ret < 0)
+ channel->led = NULL;
+ else
+ channel->is_registered = true;
+ return ret;
+}
+
+static int gb_lights_channel_register(struct gb_channel *channel)
+{
+ /* Normal LED channel, just register in led classdev and we are done */
+ if (!is_channel_flash(channel))
+ return __gb_lights_led_register(channel);
+
+ /*
+ * Flash Type need more work, register flash classdev, indicator as
+ * flash classdev, torch will be led classdev of the flash classdev.
+ */
+ if (!(channel->mode & GB_CHANNEL_MODE_TORCH))
+ return __gb_lights_flash_led_register(channel);
+
+ return 0;
+}
+
+static void __gb_lights_led_unregister(struct gb_channel *channel)
+{
+ struct led_classdev *cdev = get_channel_cdev(channel);
+
+ if (!channel->is_registered)
+ return;
+
+ led_classdev_unregister(cdev);
+ kfree(cdev->name);
+ cdev->name = NULL;
+ channel->led = NULL;
+}
+
+static void gb_lights_channel_unregister(struct gb_channel *channel)
+{
+ /* The same as register, handle channels differently */
+ if (!is_channel_flash(channel)) {
+ __gb_lights_led_unregister(channel);
+ return;
+ }
+
+ if (channel->mode & GB_CHANNEL_MODE_TORCH)
+ __gb_lights_led_unregister(channel);
+ else
+ __gb_lights_flash_led_unregister(channel);
+}
+
+static int gb_lights_channel_config(struct gb_light *light,
+ struct gb_channel *channel)
+{
+ struct gb_lights_get_channel_config_response conf;
+ struct gb_lights_get_channel_config_request req;
+ struct gb_connection *connection = get_conn_from_light(light);
+ struct led_classdev *cdev = get_channel_cdev(channel);
+ char *name;
+ int ret;
+
+ req.light_id = light->id;
+ req.channel_id = channel->id;
+
+ ret = gb_operation_sync(connection, GB_LIGHTS_TYPE_GET_CHANNEL_CONFIG,
+ &req, sizeof(req), &conf, sizeof(conf));
+ if (ret < 0)
+ return ret;
+
+ channel->light = light;
+ channel->mode = le32_to_cpu(conf.mode);
+ channel->flags = le32_to_cpu(conf.flags);
+ channel->color = le32_to_cpu(conf.color);
+ channel->color_name = kstrndup(conf.color_name, NAMES_MAX, GFP_KERNEL);
+ if (!channel->color_name)
+ return -ENOMEM;
+ channel->mode_name = kstrndup(conf.mode_name, NAMES_MAX, GFP_KERNEL);
+ if (!channel->mode_name)
+ return -ENOMEM;
+
+ channel->led = cdev;
+
+ name = kasprintf(GFP_KERNEL, "%s:%s:%s", light->name,
+ channel->color_name, channel->mode_name);
+ if (!name)
+ return -ENOMEM;
+
+ cdev->name = name;
+
+ cdev->max_brightness = conf.max_brightness;
+
+ ret = channel_attr_groups_set(channel, cdev);
+ if (ret < 0)
+ return ret;
+
+ gb_lights_led_operations_set(channel, cdev);
+
+ /*
+ * If it is not a flash related channel (flash, torch or indicator) we
+ * are done here. If not, continue and fetch flash related
+ * configurations.
+ */
+ if (!is_channel_flash(channel))
+ return ret;
+
+ light->has_flash = true;
+
+ return gb_lights_channel_flash_config(channel);
+}
+
+static int gb_lights_light_config(struct gb_lights *glights, u8 id)
+{
+ struct gb_light *light = &glights->lights[id];
+ struct gb_lights_get_light_config_request req;
+ struct gb_lights_get_light_config_response conf;
+ int ret;
+ int i;
+
+ light->glights = glights;
+ light->id = id;
+
+ req.id = id;
+
+ ret = gb_operation_sync(glights->connection,
+ GB_LIGHTS_TYPE_GET_LIGHT_CONFIG,
+ &req, sizeof(req), &conf, sizeof(conf));
+ if (ret < 0)
+ return ret;
+
+ if (!conf.channel_count)
+ return -EINVAL;
+ if (!strlen(conf.name))
+ return -EINVAL;
+
+ light->channels_count = conf.channel_count;
+ light->name = kstrndup(conf.name, NAMES_MAX, GFP_KERNEL);
+ if (!light->name)
+ return -ENOMEM;
+ light->channels = kcalloc(light->channels_count,
+ sizeof(struct gb_channel), GFP_KERNEL);
+ if (!light->channels)
+ return -ENOMEM;
+
+ /* First we collect all the configurations for all channels */
+ for (i = 0; i < light->channels_count; i++) {
+ light->channels[i].id = i;
+ ret = gb_lights_channel_config(light, &light->channels[i]);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int gb_lights_light_register(struct gb_light *light)
+{
+ int ret;
+ int i;
+
+ /*
+ * Then, if everything went ok in getting configurations, we register
+ * the classdev, flash classdev and v4l2 subsystem, if a flash device is
+ * found.
+ */
+ for (i = 0; i < light->channels_count; i++) {
+ ret = gb_lights_channel_register(&light->channels[i]);
+ if (ret < 0)
+ return ret;
+
+ mutex_init(&light->channels[i].lock);
+ }
+
+ light->ready = true;
+
+ if (light->has_flash) {
+ ret = gb_lights_light_v4l2_register(light);
+ if (ret < 0) {
+ light->has_flash = false;
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static void gb_lights_channel_free(struct gb_channel *channel)
+{
+ kfree(channel->attrs);
+ kfree(channel->attr_group);
+ kfree(channel->attr_groups);
+ kfree(channel->color_name);
+ kfree(channel->mode_name);
+ mutex_destroy(&channel->lock);
+}
+
+static void gb_lights_channel_release(struct gb_channel *channel)
+{
+ channel->releasing = true;
+
+ gb_lights_channel_unregister(channel);
+
+ gb_lights_channel_free(channel);
+}
+
+static void gb_lights_light_release(struct gb_light *light)
+{
+ int i;
+
+ light->ready = false;
+
+ if (light->has_flash)
+ gb_lights_light_v4l2_unregister(light);
+ light->has_flash = false;
+
+ for (i = 0; i < light->channels_count; i++)
+ gb_lights_channel_release(&light->channels[i]);
+ light->channels_count = 0;
+
+ kfree(light->channels);
+ light->channels = NULL;
+ kfree(light->name);
+ light->name = NULL;
+}
+
+static void gb_lights_release(struct gb_lights *glights)
+{
+ int i;
+
+ if (!glights)
+ return;
+
+ mutex_lock(&glights->lights_lock);
+ if (!glights->lights)
+ goto free_glights;
+
+ for (i = 0; i < glights->lights_count; i++)
+ gb_lights_light_release(&glights->lights[i]);
+
+ kfree(glights->lights);
+
+free_glights:
+ mutex_unlock(&glights->lights_lock);
+ mutex_destroy(&glights->lights_lock);
+ kfree(glights);
+}
+
+static int gb_lights_get_count(struct gb_lights *glights)
+{
+ struct gb_lights_get_lights_response resp;
+ int ret;
+
+ ret = gb_operation_sync(glights->connection, GB_LIGHTS_TYPE_GET_LIGHTS,
+ NULL, 0, &resp, sizeof(resp));
+ if (ret < 0)
+ return ret;
+
+ if (!resp.lights_count)
+ return -EINVAL;
+
+ glights->lights_count = resp.lights_count;
+
+ return 0;
+}
+
+static int gb_lights_create_all(struct gb_lights *glights)
+{
+ struct gb_connection *connection = glights->connection;
+ int ret;
+ int i;
+
+ mutex_lock(&glights->lights_lock);
+ ret = gb_lights_get_count(glights);
+ if (ret < 0)
+ goto out;
+
+ glights->lights = kcalloc(glights->lights_count,
+ sizeof(struct gb_light), GFP_KERNEL);
+ if (!glights->lights) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ for (i = 0; i < glights->lights_count; i++) {
+ ret = gb_lights_light_config(glights, i);
+ if (ret < 0) {
+ dev_err(&connection->bundle->dev,
+ "Fail to configure lights device\n");
+ goto out;
+ }
+ }
+
+out:
+ mutex_unlock(&glights->lights_lock);
+ return ret;
+}
+
+static int gb_lights_register_all(struct gb_lights *glights)
+{
+ struct gb_connection *connection = glights->connection;
+ int ret = 0;
+ int i;
+
+ mutex_lock(&glights->lights_lock);
+ for (i = 0; i < glights->lights_count; i++) {
+ ret = gb_lights_light_register(&glights->lights[i]);
+ if (ret < 0) {
+ dev_err(&connection->bundle->dev,
+ "Fail to enable lights device\n");
+ break;
+ }
+ }
+
+ mutex_unlock(&glights->lights_lock);
+ return ret;
+}
+
+static int gb_lights_request_handler(struct gb_operation *op)
+{
+ struct gb_connection *connection = op->connection;
+ struct device *dev = &connection->bundle->dev;
+ struct gb_lights *glights = gb_connection_get_data(connection);
+ struct gb_light *light;
+ struct gb_message *request;
+ struct gb_lights_event_request *payload;
+ int ret = 0;
+ u8 light_id;
+ u8 event;
+
+ if (op->type != GB_LIGHTS_TYPE_EVENT) {
+ dev_err(dev, "Unsupported unsolicited event: %u\n", op->type);
+ return -EINVAL;
+ }
+
+ request = op->request;
+
+ if (request->payload_size < sizeof(*payload)) {
+ dev_err(dev, "Wrong event size received (%zu < %zu)\n",
+ request->payload_size, sizeof(*payload));
+ return -EINVAL;
+ }
+
+ payload = request->payload;
+ light_id = payload->light_id;
+
+ if (light_id >= glights->lights_count ||
+ !glights->lights[light_id].ready) {
+ dev_err(dev, "Event received for unconfigured light id: %d\n",
+ light_id);
+ return -EINVAL;
+ }
+
+ event = payload->event;
+
+ if (event & GB_LIGHTS_LIGHT_CONFIG) {
+ light = &glights->lights[light_id];
+
+ mutex_lock(&glights->lights_lock);
+ gb_lights_light_release(light);
+ ret = gb_lights_light_config(glights, light_id);
+ if (!ret)
+ ret = gb_lights_light_register(light);
+ if (ret < 0)
+ gb_lights_light_release(light);
+ mutex_unlock(&glights->lights_lock);
+ }
+
+ return ret;
+}
+
+static int gb_lights_probe(struct gb_bundle *bundle,
+ const struct greybus_bundle_id *id)
+{
+ struct greybus_descriptor_cport *cport_desc;
+ struct gb_connection *connection;
+ struct gb_lights *glights;
+ int ret;
+
+ if (bundle->num_cports != 1)
+ return -ENODEV;
+
+ cport_desc = &bundle->cport_desc[0];
+ if (cport_desc->protocol_id != GREYBUS_PROTOCOL_LIGHTS)
+ return -ENODEV;
+
+ glights = kzalloc(sizeof(*glights), GFP_KERNEL);
+ if (!glights)
+ return -ENOMEM;
+
+ mutex_init(&glights->lights_lock);
+
+ connection = gb_connection_create(bundle, le16_to_cpu(cport_desc->id),
+ gb_lights_request_handler);
+ if (IS_ERR(connection)) {
+ ret = PTR_ERR(connection);
+ goto out;
+ }
+
+ glights->connection = connection;
+ gb_connection_set_data(connection, glights);
+
+ greybus_set_drvdata(bundle, glights);
+
+ /* We aren't ready to receive an incoming request yet */
+ ret = gb_connection_enable_tx(connection);
+ if (ret)
+ goto error_connection_destroy;
+
+ /*
+ * Setup all the lights devices over this connection, if anything goes
+ * wrong tear down all lights
+ */
+ ret = gb_lights_create_all(glights);
+ if (ret < 0)
+ goto error_connection_disable;
+
+ /* We are ready to receive an incoming request now, enable RX as well */
+ ret = gb_connection_enable(connection);
+ if (ret)
+ goto error_connection_disable;
+
+ /* Enable & register lights */
+ ret = gb_lights_register_all(glights);
+ if (ret < 0)
+ goto error_connection_disable;
+
+ gb_pm_runtime_put_autosuspend(bundle);
+
+ return 0;
+
+error_connection_disable:
+ gb_connection_disable(connection);
+error_connection_destroy:
+ gb_connection_destroy(connection);
+out:
+ gb_lights_release(glights);
+ return ret;
+}
+
+static void gb_lights_disconnect(struct gb_bundle *bundle)
+{
+ struct gb_lights *glights = greybus_get_drvdata(bundle);
+
+ if (gb_pm_runtime_get_sync(bundle))
+ gb_pm_runtime_get_noresume(bundle);
+
+ gb_connection_disable(glights->connection);
+ gb_connection_destroy(glights->connection);
+
+ gb_lights_release(glights);
+}
+
+static const struct greybus_bundle_id gb_lights_id_table[] = {
+ { GREYBUS_DEVICE_CLASS(GREYBUS_CLASS_LIGHTS) },
+ { }
+};
+MODULE_DEVICE_TABLE(greybus, gb_lights_id_table);
+
+static struct greybus_driver gb_lights_driver = {
+ .name = "lights",
+ .probe = gb_lights_probe,
+ .disconnect = gb_lights_disconnect,
+ .id_table = gb_lights_id_table,
+};
+module_greybus_driver(gb_lights_driver);
+
+MODULE_DESCRIPTION("Greybus Lights protocol driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/greybus/log.c b/drivers/staging/greybus/log.c
new file mode 100644
index 000000000000..57dcf9453bf1
--- /dev/null
+++ b/drivers/staging/greybus/log.c
@@ -0,0 +1,133 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Greybus driver for the log protocol
+ *
+ * Copyright 2016 Google Inc.
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/sizes.h>
+#include <linux/uaccess.h>
+#include <linux/greybus.h>
+
+struct gb_log {
+ struct gb_connection *connection;
+};
+
+static int gb_log_request_handler(struct gb_operation *op)
+{
+ struct gb_connection *connection = op->connection;
+ struct device *dev = &connection->bundle->dev;
+ struct gb_log_send_log_request *receive;
+ u16 len;
+
+ if (op->type != GB_LOG_TYPE_SEND_LOG) {
+ dev_err(dev, "unknown request type 0x%02x\n", op->type);
+ return -EINVAL;
+ }
+
+ /* Verify size of payload */
+ if (op->request->payload_size < sizeof(*receive)) {
+ dev_err(dev, "log request too small (%zu < %zu)\n",
+ op->request->payload_size, sizeof(*receive));
+ return -EINVAL;
+ }
+ receive = op->request->payload;
+ len = le16_to_cpu(receive->len);
+ if (len != (op->request->payload_size - sizeof(*receive))) {
+ dev_err(dev, "log request wrong size %d vs %zu\n", len,
+ (op->request->payload_size - sizeof(*receive)));
+ return -EINVAL;
+ }
+ if (len == 0) {
+ dev_err(dev, "log request of 0 bytes?\n");
+ return -EINVAL;
+ }
+
+ if (len > GB_LOG_MAX_LEN) {
+ dev_err(dev, "log request too big: %d\n", len);
+ return -EINVAL;
+ }
+
+ /* Ensure the buffer is 0 terminated */
+ receive->msg[len - 1] = '\0';
+
+ /*
+ * Print with dev_dbg() so that it can be easily turned off using
+ * dynamic debugging (and prevent any DoS)
+ */
+ dev_dbg(dev, "%s", receive->msg);
+
+ return 0;
+}
+
+static int gb_log_probe(struct gb_bundle *bundle,
+ const struct greybus_bundle_id *id)
+{
+ struct greybus_descriptor_cport *cport_desc;
+ struct gb_connection *connection;
+ struct gb_log *log;
+ int retval;
+
+ if (bundle->num_cports != 1)
+ return -ENODEV;
+
+ cport_desc = &bundle->cport_desc[0];
+ if (cport_desc->protocol_id != GREYBUS_PROTOCOL_LOG)
+ return -ENODEV;
+
+ log = kzalloc(sizeof(*log), GFP_KERNEL);
+ if (!log)
+ return -ENOMEM;
+
+ connection = gb_connection_create(bundle, le16_to_cpu(cport_desc->id),
+ gb_log_request_handler);
+ if (IS_ERR(connection)) {
+ retval = PTR_ERR(connection);
+ goto error_free;
+ }
+
+ log->connection = connection;
+ greybus_set_drvdata(bundle, log);
+
+ retval = gb_connection_enable(connection);
+ if (retval)
+ goto error_connection_destroy;
+
+ return 0;
+
+error_connection_destroy:
+ gb_connection_destroy(connection);
+error_free:
+ kfree(log);
+ return retval;
+}
+
+static void gb_log_disconnect(struct gb_bundle *bundle)
+{
+ struct gb_log *log = greybus_get_drvdata(bundle);
+ struct gb_connection *connection = log->connection;
+
+ gb_connection_disable(connection);
+ gb_connection_destroy(connection);
+
+ kfree(log);
+}
+
+static const struct greybus_bundle_id gb_log_id_table[] = {
+ { GREYBUS_DEVICE_CLASS(GREYBUS_CLASS_LOG) },
+ { }
+};
+MODULE_DEVICE_TABLE(greybus, gb_log_id_table);
+
+static struct greybus_driver gb_log_driver = {
+ .name = "log",
+ .probe = gb_log_probe,
+ .disconnect = gb_log_disconnect,
+ .id_table = gb_log_id_table,
+};
+module_greybus_driver(gb_log_driver);
+
+MODULE_DESCRIPTION("Greybus driver for the log protocol");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/greybus/loopback.c b/drivers/staging/greybus/loopback.c
new file mode 100644
index 000000000000..1f19323b0e1a
--- /dev/null
+++ b/drivers/staging/greybus/loopback.c
@@ -0,0 +1,1179 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Loopback bridge driver for the Greybus loopback module.
+ *
+ * Copyright 2014 Google Inc.
+ * Copyright 2014 Linaro Ltd.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/kthread.h>
+#include <linux/delay.h>
+#include <linux/random.h>
+#include <linux/sizes.h>
+#include <linux/cdev.h>
+#include <linux/fs.h>
+#include <linux/kfifo.h>
+#include <linux/debugfs.h>
+#include <linux/list_sort.h>
+#include <linux/spinlock.h>
+#include <linux/workqueue.h>
+#include <linux/atomic.h>
+#include <linux/pm_runtime.h>
+#include <linux/greybus.h>
+#include <asm/div64.h>
+
+#define NSEC_PER_DAY 86400000000000ULL
+
+struct gb_loopback_stats {
+ u32 min;
+ u32 max;
+ u64 sum;
+ u32 count;
+};
+
+struct gb_loopback_device {
+ struct dentry *root;
+ u32 count;
+ size_t size_max;
+
+ /* We need to take a lock in atomic context */
+ spinlock_t lock;
+ wait_queue_head_t wq;
+};
+
+static struct gb_loopback_device gb_dev;
+
+struct gb_loopback_async_operation {
+ struct gb_loopback *gb;
+ struct gb_operation *operation;
+ ktime_t ts;
+ int (*completion)(struct gb_loopback_async_operation *op_async);
+};
+
+struct gb_loopback {
+ struct gb_connection *connection;
+
+ struct dentry *file;
+ struct kfifo kfifo_lat;
+ struct mutex mutex;
+ struct task_struct *task;
+ struct device *dev;
+ wait_queue_head_t wq;
+ wait_queue_head_t wq_completion;
+ atomic_t outstanding_operations;
+
+ /* Per connection stats */
+ ktime_t ts;
+ struct gb_loopback_stats latency;
+ struct gb_loopback_stats throughput;
+ struct gb_loopback_stats requests_per_second;
+ struct gb_loopback_stats apbridge_unipro_latency;
+ struct gb_loopback_stats gbphy_firmware_latency;
+
+ int type;
+ int async;
+ int id;
+ u32 size;
+ u32 iteration_max;
+ u32 iteration_count;
+ int us_wait;
+ u32 error;
+ u32 requests_completed;
+ u32 requests_timedout;
+ u32 timeout;
+ u32 jiffy_timeout;
+ u32 timeout_min;
+ u32 timeout_max;
+ u32 outstanding_operations_max;
+ u64 elapsed_nsecs;
+ u32 apbridge_latency_ts;
+ u32 gbphy_latency_ts;
+
+ u32 send_count;
+};
+
+static struct class loopback_class = {
+ .name = "gb_loopback",
+};
+
+static DEFINE_IDA(loopback_ida);
+
+/* Min/max values in jiffies */
+#define GB_LOOPBACK_TIMEOUT_MIN 1
+#define GB_LOOPBACK_TIMEOUT_MAX 10000
+
+#define GB_LOOPBACK_FIFO_DEFAULT 8192
+
+static unsigned int kfifo_depth = GB_LOOPBACK_FIFO_DEFAULT;
+module_param(kfifo_depth, uint, 0444);
+
+/* Maximum size of any one send data buffer we support */
+#define MAX_PACKET_SIZE (PAGE_SIZE * 2)
+
+#define GB_LOOPBACK_US_WAIT_MAX 1000000
+
+/* interface sysfs attributes */
+#define gb_loopback_ro_attr(field) \
+static ssize_t field##_show(struct device *dev, \
+ struct device_attribute *attr, \
+ char *buf) \
+{ \
+ struct gb_loopback *gb = dev_get_drvdata(dev); \
+ return sprintf(buf, "%u\n", gb->field); \
+} \
+static DEVICE_ATTR_RO(field)
+
+#define gb_loopback_ro_stats_attr(name, field, type) \
+static ssize_t name##_##field##_show(struct device *dev, \
+ struct device_attribute *attr, \
+ char *buf) \
+{ \
+ struct gb_loopback *gb = dev_get_drvdata(dev); \
+ /* Report 0 for min and max if no transfer succeeded */ \
+ if (!gb->requests_completed) \
+ return sprintf(buf, "0\n"); \
+ return sprintf(buf, "%" #type "\n", gb->name.field); \
+} \
+static DEVICE_ATTR_RO(name##_##field)
+
+#define gb_loopback_ro_avg_attr(name) \
+static ssize_t name##_avg_show(struct device *dev, \
+ struct device_attribute *attr, \
+ char *buf) \
+{ \
+ struct gb_loopback_stats *stats; \
+ struct gb_loopback *gb; \
+ u64 avg, rem; \
+ u32 count; \
+ gb = dev_get_drvdata(dev); \
+ stats = &gb->name; \
+ count = stats->count ? stats->count : 1; \
+ avg = stats->sum + count / 2000000; /* round closest */ \
+ rem = do_div(avg, count); \
+ rem *= 1000000; \
+ do_div(rem, count); \
+ return sprintf(buf, "%llu.%06u\n", avg, (u32)rem); \
+} \
+static DEVICE_ATTR_RO(name##_avg)
+
+#define gb_loopback_stats_attrs(field) \
+ gb_loopback_ro_stats_attr(field, min, u); \
+ gb_loopback_ro_stats_attr(field, max, u); \
+ gb_loopback_ro_avg_attr(field)
+
+#define gb_loopback_attr(field, type) \
+static ssize_t field##_show(struct device *dev, \
+ struct device_attribute *attr, \
+ char *buf) \
+{ \
+ struct gb_loopback *gb = dev_get_drvdata(dev); \
+ return sprintf(buf, "%" #type "\n", gb->field); \
+} \
+static ssize_t field##_store(struct device *dev, \
+ struct device_attribute *attr, \
+ const char *buf, \
+ size_t len) \
+{ \
+ int ret; \
+ struct gb_loopback *gb = dev_get_drvdata(dev); \
+ mutex_lock(&gb->mutex); \
+ ret = sscanf(buf, "%"#type, &gb->field); \
+ if (ret != 1) \
+ len = -EINVAL; \
+ else \
+ gb_loopback_check_attr(gb, bundle); \
+ mutex_unlock(&gb->mutex); \
+ return len; \
+} \
+static DEVICE_ATTR_RW(field)
+
+#define gb_dev_loopback_ro_attr(field, conn) \
+static ssize_t field##_show(struct device *dev, \
+ struct device_attribute *attr, \
+ char *buf) \
+{ \
+ struct gb_loopback *gb = dev_get_drvdata(dev); \
+ return sprintf(buf, "%u\n", gb->field); \
+} \
+static DEVICE_ATTR_RO(field)
+
+#define gb_dev_loopback_rw_attr(field, type) \
+static ssize_t field##_show(struct device *dev, \
+ struct device_attribute *attr, \
+ char *buf) \
+{ \
+ struct gb_loopback *gb = dev_get_drvdata(dev); \
+ return sprintf(buf, "%" #type "\n", gb->field); \
+} \
+static ssize_t field##_store(struct device *dev, \
+ struct device_attribute *attr, \
+ const char *buf, \
+ size_t len) \
+{ \
+ int ret; \
+ struct gb_loopback *gb = dev_get_drvdata(dev); \
+ mutex_lock(&gb->mutex); \
+ ret = sscanf(buf, "%"#type, &gb->field); \
+ if (ret != 1) \
+ len = -EINVAL; \
+ else \
+ gb_loopback_check_attr(gb); \
+ mutex_unlock(&gb->mutex); \
+ return len; \
+} \
+static DEVICE_ATTR_RW(field)
+
+static void gb_loopback_reset_stats(struct gb_loopback *gb);
+static void gb_loopback_check_attr(struct gb_loopback *gb)
+{
+ if (gb->us_wait > GB_LOOPBACK_US_WAIT_MAX)
+ gb->us_wait = GB_LOOPBACK_US_WAIT_MAX;
+ if (gb->size > gb_dev.size_max)
+ gb->size = gb_dev.size_max;
+ gb->requests_timedout = 0;
+ gb->requests_completed = 0;
+ gb->iteration_count = 0;
+ gb->send_count = 0;
+ gb->error = 0;
+
+ if (kfifo_depth < gb->iteration_max) {
+ dev_warn(gb->dev,
+ "cannot log bytes %u kfifo_depth %u\n",
+ gb->iteration_max, kfifo_depth);
+ }
+ kfifo_reset_out(&gb->kfifo_lat);
+
+ switch (gb->type) {
+ case GB_LOOPBACK_TYPE_PING:
+ case GB_LOOPBACK_TYPE_TRANSFER:
+ case GB_LOOPBACK_TYPE_SINK:
+ gb->jiffy_timeout = usecs_to_jiffies(gb->timeout);
+ if (!gb->jiffy_timeout)
+ gb->jiffy_timeout = GB_LOOPBACK_TIMEOUT_MIN;
+ else if (gb->jiffy_timeout > GB_LOOPBACK_TIMEOUT_MAX)
+ gb->jiffy_timeout = GB_LOOPBACK_TIMEOUT_MAX;
+ gb_loopback_reset_stats(gb);
+ wake_up(&gb->wq);
+ break;
+ default:
+ gb->type = 0;
+ break;
+ }
+}
+
+/* Time to send and receive one message */
+gb_loopback_stats_attrs(latency);
+/* Number of requests sent per second on this cport */
+gb_loopback_stats_attrs(requests_per_second);
+/* Quantity of data sent and received on this cport */
+gb_loopback_stats_attrs(throughput);
+/* Latency across the UniPro link from APBridge's perspective */
+gb_loopback_stats_attrs(apbridge_unipro_latency);
+/* Firmware induced overhead in the GPBridge */
+gb_loopback_stats_attrs(gbphy_firmware_latency);
+
+/* Number of errors encountered during loop */
+gb_loopback_ro_attr(error);
+/* Number of requests successfully completed async */
+gb_loopback_ro_attr(requests_completed);
+/* Number of requests timed out async */
+gb_loopback_ro_attr(requests_timedout);
+/* Timeout minimum in useconds */
+gb_loopback_ro_attr(timeout_min);
+/* Timeout minimum in useconds */
+gb_loopback_ro_attr(timeout_max);
+
+/*
+ * Type of loopback message to send based on protocol type definitions
+ * 0 => Don't send message
+ * 2 => Send ping message continuously (message without payload)
+ * 3 => Send transfer message continuously (message with payload,
+ * payload returned in response)
+ * 4 => Send a sink message (message with payload, no payload in response)
+ */
+gb_dev_loopback_rw_attr(type, d);
+/* Size of transfer message payload: 0-4096 bytes */
+gb_dev_loopback_rw_attr(size, u);
+/* Time to wait between two messages: 0-1000 ms */
+gb_dev_loopback_rw_attr(us_wait, d);
+/* Maximum iterations for a given operation: 1-(2^32-1), 0 implies infinite */
+gb_dev_loopback_rw_attr(iteration_max, u);
+/* The current index of the for (i = 0; i < iteration_max; i++) loop */
+gb_dev_loopback_ro_attr(iteration_count, false);
+/* A flag to indicate synchronous or asynchronous operations */
+gb_dev_loopback_rw_attr(async, u);
+/* Timeout of an individual asynchronous request */
+gb_dev_loopback_rw_attr(timeout, u);
+/* Maximum number of in-flight operations before back-off */
+gb_dev_loopback_rw_attr(outstanding_operations_max, u);
+
+static struct attribute *loopback_attrs[] = {
+ &dev_attr_latency_min.attr,
+ &dev_attr_latency_max.attr,
+ &dev_attr_latency_avg.attr,
+ &dev_attr_requests_per_second_min.attr,
+ &dev_attr_requests_per_second_max.attr,
+ &dev_attr_requests_per_second_avg.attr,
+ &dev_attr_throughput_min.attr,
+ &dev_attr_throughput_max.attr,
+ &dev_attr_throughput_avg.attr,
+ &dev_attr_apbridge_unipro_latency_min.attr,
+ &dev_attr_apbridge_unipro_latency_max.attr,
+ &dev_attr_apbridge_unipro_latency_avg.attr,
+ &dev_attr_gbphy_firmware_latency_min.attr,
+ &dev_attr_gbphy_firmware_latency_max.attr,
+ &dev_attr_gbphy_firmware_latency_avg.attr,
+ &dev_attr_type.attr,
+ &dev_attr_size.attr,
+ &dev_attr_us_wait.attr,
+ &dev_attr_iteration_count.attr,
+ &dev_attr_iteration_max.attr,
+ &dev_attr_async.attr,
+ &dev_attr_error.attr,
+ &dev_attr_requests_completed.attr,
+ &dev_attr_requests_timedout.attr,
+ &dev_attr_timeout.attr,
+ &dev_attr_outstanding_operations_max.attr,
+ &dev_attr_timeout_min.attr,
+ &dev_attr_timeout_max.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(loopback);
+
+static void gb_loopback_calculate_stats(struct gb_loopback *gb, bool error);
+
+static u32 gb_loopback_nsec_to_usec_latency(u64 elapsed_nsecs)
+{
+ do_div(elapsed_nsecs, NSEC_PER_USEC);
+ return elapsed_nsecs;
+}
+
+static u64 __gb_loopback_calc_latency(u64 t1, u64 t2)
+{
+ if (t2 > t1)
+ return t2 - t1;
+ else
+ return NSEC_PER_DAY - t2 + t1;
+}
+
+static u64 gb_loopback_calc_latency(ktime_t ts, ktime_t te)
+{
+ return __gb_loopback_calc_latency(ktime_to_ns(ts), ktime_to_ns(te));
+}
+
+static int gb_loopback_operation_sync(struct gb_loopback *gb, int type,
+ void *request, int request_size,
+ void *response, int response_size)
+{
+ struct gb_operation *operation;
+ ktime_t ts, te;
+ int ret;
+
+ ts = ktime_get();
+ operation = gb_operation_create(gb->connection, type, request_size,
+ response_size, GFP_KERNEL);
+ if (!operation)
+ return -ENOMEM;
+
+ if (request_size)
+ memcpy(operation->request->payload, request, request_size);
+
+ ret = gb_operation_request_send_sync(operation);
+ if (ret) {
+ dev_err(&gb->connection->bundle->dev,
+ "synchronous operation failed: %d\n", ret);
+ goto out_put_operation;
+ } else {
+ if (response_size == operation->response->payload_size) {
+ memcpy(response, operation->response->payload,
+ response_size);
+ } else {
+ dev_err(&gb->connection->bundle->dev,
+ "response size %zu expected %d\n",
+ operation->response->payload_size,
+ response_size);
+ ret = -EINVAL;
+ goto out_put_operation;
+ }
+ }
+
+ te = ktime_get();
+
+ /* Calculate the total time the message took */
+ gb->elapsed_nsecs = gb_loopback_calc_latency(ts, te);
+
+out_put_operation:
+ gb_operation_put(operation);
+
+ return ret;
+}
+
+static void gb_loopback_async_wait_all(struct gb_loopback *gb)
+{
+ wait_event(gb->wq_completion,
+ !atomic_read(&gb->outstanding_operations));
+}
+
+static void gb_loopback_async_operation_callback(struct gb_operation *operation)
+{
+ struct gb_loopback_async_operation *op_async;
+ struct gb_loopback *gb;
+ ktime_t te;
+ int result;
+
+ te = ktime_get();
+ result = gb_operation_result(operation);
+ op_async = gb_operation_get_data(operation);
+ gb = op_async->gb;
+
+ mutex_lock(&gb->mutex);
+
+ if (!result && op_async->completion)
+ result = op_async->completion(op_async);
+
+ if (!result) {
+ gb->elapsed_nsecs = gb_loopback_calc_latency(op_async->ts, te);
+ } else {
+ gb->error++;
+ if (result == -ETIMEDOUT)
+ gb->requests_timedout++;
+ }
+
+ gb->iteration_count++;
+ gb_loopback_calculate_stats(gb, result);
+
+ mutex_unlock(&gb->mutex);
+
+ dev_dbg(&gb->connection->bundle->dev, "complete operation %d\n",
+ operation->id);
+
+ /* Wake up waiters */
+ atomic_dec(&op_async->gb->outstanding_operations);
+ wake_up(&gb->wq_completion);
+
+ /* Release resources */
+ gb_operation_put(operation);
+ kfree(op_async);
+}
+
+static int gb_loopback_async_operation(struct gb_loopback *gb, int type,
+ void *request, int request_size,
+ int response_size,
+ void *completion)
+{
+ struct gb_loopback_async_operation *op_async;
+ struct gb_operation *operation;
+ int ret;
+
+ op_async = kzalloc(sizeof(*op_async), GFP_KERNEL);
+ if (!op_async)
+ return -ENOMEM;
+
+ operation = gb_operation_create(gb->connection, type, request_size,
+ response_size, GFP_KERNEL);
+ if (!operation) {
+ kfree(op_async);
+ return -ENOMEM;
+ }
+
+ if (request_size)
+ memcpy(operation->request->payload, request, request_size);
+
+ gb_operation_set_data(operation, op_async);
+
+ op_async->gb = gb;
+ op_async->operation = operation;
+ op_async->completion = completion;
+
+ op_async->ts = ktime_get();
+
+ atomic_inc(&gb->outstanding_operations);
+ ret = gb_operation_request_send(operation,
+ gb_loopback_async_operation_callback,
+ jiffies_to_msecs(gb->jiffy_timeout),
+ GFP_KERNEL);
+ if (ret) {
+ atomic_dec(&gb->outstanding_operations);
+ gb_operation_put(operation);
+ kfree(op_async);
+ }
+ return ret;
+}
+
+static int gb_loopback_sync_sink(struct gb_loopback *gb, u32 len)
+{
+ struct gb_loopback_transfer_request *request;
+ int retval;
+
+ request = kmalloc(len + sizeof(*request), GFP_KERNEL);
+ if (!request)
+ return -ENOMEM;
+
+ request->len = cpu_to_le32(len);
+ retval = gb_loopback_operation_sync(gb, GB_LOOPBACK_TYPE_SINK,
+ request, len + sizeof(*request),
+ NULL, 0);
+ kfree(request);
+ return retval;
+}
+
+static int gb_loopback_sync_transfer(struct gb_loopback *gb, u32 len)
+{
+ struct gb_loopback_transfer_request *request;
+ struct gb_loopback_transfer_response *response;
+ int retval;
+
+ gb->apbridge_latency_ts = 0;
+ gb->gbphy_latency_ts = 0;
+
+ request = kmalloc(len + sizeof(*request), GFP_KERNEL);
+ if (!request)
+ return -ENOMEM;
+ response = kmalloc(len + sizeof(*response), GFP_KERNEL);
+ if (!response) {
+ kfree(request);
+ return -ENOMEM;
+ }
+
+ memset(request->data, 0x5A, len);
+
+ request->len = cpu_to_le32(len);
+ retval = gb_loopback_operation_sync(gb, GB_LOOPBACK_TYPE_TRANSFER,
+ request, len + sizeof(*request),
+ response, len + sizeof(*response));
+ if (retval)
+ goto gb_error;
+
+ if (memcmp(request->data, response->data, len)) {
+ dev_err(&gb->connection->bundle->dev,
+ "Loopback Data doesn't match\n");
+ retval = -EREMOTEIO;
+ }
+ gb->apbridge_latency_ts = (u32)__le32_to_cpu(response->reserved0);
+ gb->gbphy_latency_ts = (u32)__le32_to_cpu(response->reserved1);
+
+gb_error:
+ kfree(request);
+ kfree(response);
+
+ return retval;
+}
+
+static int gb_loopback_sync_ping(struct gb_loopback *gb)
+{
+ return gb_loopback_operation_sync(gb, GB_LOOPBACK_TYPE_PING,
+ NULL, 0, NULL, 0);
+}
+
+static int gb_loopback_async_sink(struct gb_loopback *gb, u32 len)
+{
+ struct gb_loopback_transfer_request *request;
+ int retval;
+
+ request = kmalloc(len + sizeof(*request), GFP_KERNEL);
+ if (!request)
+ return -ENOMEM;
+
+ request->len = cpu_to_le32(len);
+ retval = gb_loopback_async_operation(gb, GB_LOOPBACK_TYPE_SINK,
+ request, len + sizeof(*request),
+ 0, NULL);
+ kfree(request);
+ return retval;
+}
+
+static int gb_loopback_async_transfer_complete(
+ struct gb_loopback_async_operation *op_async)
+{
+ struct gb_loopback *gb;
+ struct gb_operation *operation;
+ struct gb_loopback_transfer_request *request;
+ struct gb_loopback_transfer_response *response;
+ size_t len;
+ int retval = 0;
+
+ gb = op_async->gb;
+ operation = op_async->operation;
+ request = operation->request->payload;
+ response = operation->response->payload;
+ len = le32_to_cpu(request->len);
+
+ if (memcmp(request->data, response->data, len)) {
+ dev_err(&gb->connection->bundle->dev,
+ "Loopback Data doesn't match operation id %d\n",
+ operation->id);
+ retval = -EREMOTEIO;
+ } else {
+ gb->apbridge_latency_ts =
+ (u32)__le32_to_cpu(response->reserved0);
+ gb->gbphy_latency_ts =
+ (u32)__le32_to_cpu(response->reserved1);
+ }
+
+ return retval;
+}
+
+static int gb_loopback_async_transfer(struct gb_loopback *gb, u32 len)
+{
+ struct gb_loopback_transfer_request *request;
+ int retval, response_len;
+
+ request = kmalloc(len + sizeof(*request), GFP_KERNEL);
+ if (!request)
+ return -ENOMEM;
+
+ memset(request->data, 0x5A, len);
+
+ request->len = cpu_to_le32(len);
+ response_len = sizeof(struct gb_loopback_transfer_response);
+ retval = gb_loopback_async_operation(gb, GB_LOOPBACK_TYPE_TRANSFER,
+ request, len + sizeof(*request),
+ len + response_len,
+ gb_loopback_async_transfer_complete);
+ if (retval)
+ goto gb_error;
+
+gb_error:
+ kfree(request);
+ return retval;
+}
+
+static int gb_loopback_async_ping(struct gb_loopback *gb)
+{
+ return gb_loopback_async_operation(gb, GB_LOOPBACK_TYPE_PING,
+ NULL, 0, 0, NULL);
+}
+
+static int gb_loopback_request_handler(struct gb_operation *operation)
+{
+ struct gb_connection *connection = operation->connection;
+ struct gb_loopback_transfer_request *request;
+ struct gb_loopback_transfer_response *response;
+ struct device *dev = &connection->bundle->dev;
+ size_t len;
+
+ /* By convention, the AP initiates the version operation */
+ switch (operation->type) {
+ case GB_LOOPBACK_TYPE_PING:
+ case GB_LOOPBACK_TYPE_SINK:
+ return 0;
+ case GB_LOOPBACK_TYPE_TRANSFER:
+ if (operation->request->payload_size < sizeof(*request)) {
+ dev_err(dev, "transfer request too small (%zu < %zu)\n",
+ operation->request->payload_size,
+ sizeof(*request));
+ return -EINVAL; /* -EMSGSIZE */
+ }
+ request = operation->request->payload;
+ len = le32_to_cpu(request->len);
+ if (len > gb_dev.size_max) {
+ dev_err(dev, "transfer request too large (%zu > %zu)\n",
+ len, gb_dev.size_max);
+ return -EINVAL;
+ }
+
+ if (!gb_operation_response_alloc(operation,
+ len + sizeof(*response), GFP_KERNEL)) {
+ dev_err(dev, "error allocating response\n");
+ return -ENOMEM;
+ }
+ response = operation->response->payload;
+ response->len = cpu_to_le32(len);
+ if (len)
+ memcpy(response->data, request->data, len);
+
+ return 0;
+ default:
+ dev_err(dev, "unsupported request: %u\n", operation->type);
+ return -EINVAL;
+ }
+}
+
+static void gb_loopback_reset_stats(struct gb_loopback *gb)
+{
+ struct gb_loopback_stats reset = {
+ .min = U32_MAX,
+ };
+
+ /* Reset per-connection stats */
+ memcpy(&gb->latency, &reset,
+ sizeof(struct gb_loopback_stats));
+ memcpy(&gb->throughput, &reset,
+ sizeof(struct gb_loopback_stats));
+ memcpy(&gb->requests_per_second, &reset,
+ sizeof(struct gb_loopback_stats));
+ memcpy(&gb->apbridge_unipro_latency, &reset,
+ sizeof(struct gb_loopback_stats));
+ memcpy(&gb->gbphy_firmware_latency, &reset,
+ sizeof(struct gb_loopback_stats));
+
+ /* Should be initialized at least once per transaction set */
+ gb->apbridge_latency_ts = 0;
+ gb->gbphy_latency_ts = 0;
+ gb->ts = ktime_set(0, 0);
+}
+
+static void gb_loopback_update_stats(struct gb_loopback_stats *stats, u32 val)
+{
+ if (stats->min > val)
+ stats->min = val;
+ if (stats->max < val)
+ stats->max = val;
+ stats->sum += val;
+ stats->count++;
+}
+
+static void gb_loopback_update_stats_window(struct gb_loopback_stats *stats,
+ u64 val, u32 count)
+{
+ stats->sum += val;
+ stats->count += count;
+
+ do_div(val, count);
+ if (stats->min > val)
+ stats->min = val;
+ if (stats->max < val)
+ stats->max = val;
+}
+
+static void gb_loopback_requests_update(struct gb_loopback *gb, u32 latency)
+{
+ u64 req = gb->requests_completed * USEC_PER_SEC;
+
+ gb_loopback_update_stats_window(&gb->requests_per_second, req, latency);
+}
+
+static void gb_loopback_throughput_update(struct gb_loopback *gb, u32 latency)
+{
+ u64 aggregate_size = sizeof(struct gb_operation_msg_hdr) * 2;
+
+ switch (gb->type) {
+ case GB_LOOPBACK_TYPE_PING:
+ break;
+ case GB_LOOPBACK_TYPE_SINK:
+ aggregate_size += sizeof(struct gb_loopback_transfer_request) +
+ gb->size;
+ break;
+ case GB_LOOPBACK_TYPE_TRANSFER:
+ aggregate_size += sizeof(struct gb_loopback_transfer_request) +
+ sizeof(struct gb_loopback_transfer_response) +
+ gb->size * 2;
+ break;
+ default:
+ return;
+ }
+
+ aggregate_size *= gb->requests_completed;
+ aggregate_size *= USEC_PER_SEC;
+ gb_loopback_update_stats_window(&gb->throughput, aggregate_size,
+ latency);
+}
+
+static void gb_loopback_calculate_latency_stats(struct gb_loopback *gb)
+{
+ u32 lat;
+
+ /* Express latency in terms of microseconds */
+ lat = gb_loopback_nsec_to_usec_latency(gb->elapsed_nsecs);
+
+ /* Log latency stastic */
+ gb_loopback_update_stats(&gb->latency, lat);
+
+ /* Raw latency log on a per thread basis */
+ kfifo_in(&gb->kfifo_lat, (unsigned char *)&lat, sizeof(lat));
+
+ /* Log the firmware supplied latency values */
+ gb_loopback_update_stats(&gb->apbridge_unipro_latency,
+ gb->apbridge_latency_ts);
+ gb_loopback_update_stats(&gb->gbphy_firmware_latency,
+ gb->gbphy_latency_ts);
+}
+
+static void gb_loopback_calculate_stats(struct gb_loopback *gb, bool error)
+{
+ u64 nlat;
+ u32 lat;
+ ktime_t te;
+
+ if (!error) {
+ gb->requests_completed++;
+ gb_loopback_calculate_latency_stats(gb);
+ }
+
+ te = ktime_get();
+ nlat = gb_loopback_calc_latency(gb->ts, te);
+ if (nlat >= NSEC_PER_SEC || gb->iteration_count == gb->iteration_max) {
+ lat = gb_loopback_nsec_to_usec_latency(nlat);
+
+ gb_loopback_throughput_update(gb, lat);
+ gb_loopback_requests_update(gb, lat);
+
+ if (gb->iteration_count != gb->iteration_max) {
+ gb->ts = te;
+ gb->requests_completed = 0;
+ }
+ }
+}
+
+static void gb_loopback_async_wait_to_send(struct gb_loopback *gb)
+{
+ if (!(gb->async && gb->outstanding_operations_max))
+ return;
+ wait_event_interruptible(gb->wq_completion,
+ (atomic_read(&gb->outstanding_operations) <
+ gb->outstanding_operations_max) ||
+ kthread_should_stop());
+}
+
+static int gb_loopback_fn(void *data)
+{
+ int error = 0;
+ int us_wait = 0;
+ int type;
+ int ret;
+ u32 size;
+
+ struct gb_loopback *gb = data;
+ struct gb_bundle *bundle = gb->connection->bundle;
+
+ ret = gb_pm_runtime_get_sync(bundle);
+ if (ret)
+ return ret;
+
+ while (1) {
+ if (!gb->type) {
+ gb_pm_runtime_put_autosuspend(bundle);
+ wait_event_interruptible(gb->wq, gb->type ||
+ kthread_should_stop());
+ ret = gb_pm_runtime_get_sync(bundle);
+ if (ret)
+ return ret;
+ }
+
+ if (kthread_should_stop())
+ break;
+
+ /* Limit the maximum number of in-flight async operations */
+ gb_loopback_async_wait_to_send(gb);
+ if (kthread_should_stop())
+ break;
+
+ mutex_lock(&gb->mutex);
+
+ /* Optionally terminate */
+ if (gb->send_count == gb->iteration_max) {
+ mutex_unlock(&gb->mutex);
+
+ /* Wait for synchronous and asynchronous completion */
+ gb_loopback_async_wait_all(gb);
+
+ /* Mark complete unless user-space has poked us */
+ mutex_lock(&gb->mutex);
+ if (gb->iteration_count == gb->iteration_max) {
+ gb->type = 0;
+ gb->send_count = 0;
+ sysfs_notify(&gb->dev->kobj, NULL,
+ "iteration_count");
+ dev_dbg(&bundle->dev, "load test complete\n");
+ } else {
+ dev_dbg(&bundle->dev,
+ "continuing on with new test set\n");
+ }
+ mutex_unlock(&gb->mutex);
+ continue;
+ }
+ size = gb->size;
+ us_wait = gb->us_wait;
+ type = gb->type;
+ if (ktime_to_ns(gb->ts) == 0)
+ gb->ts = ktime_get();
+
+ /* Else operations to perform */
+ if (gb->async) {
+ if (type == GB_LOOPBACK_TYPE_PING)
+ error = gb_loopback_async_ping(gb);
+ else if (type == GB_LOOPBACK_TYPE_TRANSFER)
+ error = gb_loopback_async_transfer(gb, size);
+ else if (type == GB_LOOPBACK_TYPE_SINK)
+ error = gb_loopback_async_sink(gb, size);
+
+ if (error) {
+ gb->error++;
+ gb->iteration_count++;
+ }
+ } else {
+ /* We are effectively single threaded here */
+ if (type == GB_LOOPBACK_TYPE_PING)
+ error = gb_loopback_sync_ping(gb);
+ else if (type == GB_LOOPBACK_TYPE_TRANSFER)
+ error = gb_loopback_sync_transfer(gb, size);
+ else if (type == GB_LOOPBACK_TYPE_SINK)
+ error = gb_loopback_sync_sink(gb, size);
+
+ if (error)
+ gb->error++;
+ gb->iteration_count++;
+ gb_loopback_calculate_stats(gb, !!error);
+ }
+ gb->send_count++;
+ mutex_unlock(&gb->mutex);
+
+ if (us_wait) {
+ if (us_wait < 20000)
+ usleep_range(us_wait, us_wait + 100);
+ else
+ msleep(us_wait / 1000);
+ }
+ }
+
+ gb_pm_runtime_put_autosuspend(bundle);
+
+ return 0;
+}
+
+static int gb_loopback_dbgfs_latency_show_common(struct seq_file *s,
+ struct kfifo *kfifo,
+ struct mutex *mutex)
+{
+ u32 latency;
+ int retval;
+
+ if (kfifo_len(kfifo) == 0) {
+ retval = -EAGAIN;
+ goto done;
+ }
+
+ mutex_lock(mutex);
+ retval = kfifo_out(kfifo, &latency, sizeof(latency));
+ if (retval > 0) {
+ seq_printf(s, "%u", latency);
+ retval = 0;
+ }
+ mutex_unlock(mutex);
+done:
+ return retval;
+}
+
+static int gb_loopback_dbgfs_latency_show(struct seq_file *s, void *unused)
+{
+ struct gb_loopback *gb = s->private;
+
+ return gb_loopback_dbgfs_latency_show_common(s, &gb->kfifo_lat,
+ &gb->mutex);
+}
+DEFINE_SHOW_ATTRIBUTE(gb_loopback_dbgfs_latency);
+
+#define DEBUGFS_NAMELEN 32
+
+static int gb_loopback_probe(struct gb_bundle *bundle,
+ const struct greybus_bundle_id *id)
+{
+ struct greybus_descriptor_cport *cport_desc;
+ struct gb_connection *connection;
+ struct gb_loopback *gb;
+ struct device *dev;
+ int retval;
+ char name[DEBUGFS_NAMELEN];
+ unsigned long flags;
+
+ if (bundle->num_cports != 1)
+ return -ENODEV;
+
+ cport_desc = &bundle->cport_desc[0];
+ if (cport_desc->protocol_id != GREYBUS_PROTOCOL_LOOPBACK)
+ return -ENODEV;
+
+ gb = kzalloc(sizeof(*gb), GFP_KERNEL);
+ if (!gb)
+ return -ENOMEM;
+
+ connection = gb_connection_create(bundle, le16_to_cpu(cport_desc->id),
+ gb_loopback_request_handler);
+ if (IS_ERR(connection)) {
+ retval = PTR_ERR(connection);
+ goto out_kzalloc;
+ }
+
+ gb->connection = connection;
+ greybus_set_drvdata(bundle, gb);
+
+ init_waitqueue_head(&gb->wq);
+ init_waitqueue_head(&gb->wq_completion);
+ atomic_set(&gb->outstanding_operations, 0);
+ gb_loopback_reset_stats(gb);
+
+ /* Reported values to user-space for min/max timeouts */
+ gb->timeout_min = jiffies_to_usecs(GB_LOOPBACK_TIMEOUT_MIN);
+ gb->timeout_max = jiffies_to_usecs(GB_LOOPBACK_TIMEOUT_MAX);
+
+ if (!gb_dev.count) {
+ /* Calculate maximum payload */
+ gb_dev.size_max = gb_operation_get_payload_size_max(connection);
+ if (gb_dev.size_max <=
+ sizeof(struct gb_loopback_transfer_request)) {
+ retval = -EINVAL;
+ goto out_connection_destroy;
+ }
+ gb_dev.size_max -= sizeof(struct gb_loopback_transfer_request);
+ }
+
+ /* Create per-connection sysfs and debugfs data-points */
+ snprintf(name, sizeof(name), "raw_latency_%s",
+ dev_name(&connection->bundle->dev));
+ gb->file = debugfs_create_file(name, S_IFREG | 0444, gb_dev.root, gb,
+ &gb_loopback_dbgfs_latency_fops);
+
+ gb->id = ida_alloc(&loopback_ida, GFP_KERNEL);
+ if (gb->id < 0) {
+ retval = gb->id;
+ goto out_debugfs_remove;
+ }
+
+ retval = gb_connection_enable(connection);
+ if (retval)
+ goto out_ida_remove;
+
+ dev = device_create_with_groups(&loopback_class,
+ &connection->bundle->dev,
+ MKDEV(0, 0), gb, loopback_groups,
+ "gb_loopback%d", gb->id);
+ if (IS_ERR(dev)) {
+ retval = PTR_ERR(dev);
+ goto out_connection_disable;
+ }
+ gb->dev = dev;
+
+ /* Allocate kfifo */
+ if (kfifo_alloc(&gb->kfifo_lat, kfifo_depth * sizeof(u32),
+ GFP_KERNEL)) {
+ retval = -ENOMEM;
+ goto out_conn;
+ }
+ /* Fork worker thread */
+ mutex_init(&gb->mutex);
+ gb->task = kthread_run(gb_loopback_fn, gb, "gb_loopback");
+ if (IS_ERR(gb->task)) {
+ retval = PTR_ERR(gb->task);
+ goto out_kfifo;
+ }
+
+ spin_lock_irqsave(&gb_dev.lock, flags);
+ gb_dev.count++;
+ spin_unlock_irqrestore(&gb_dev.lock, flags);
+
+ gb_connection_latency_tag_enable(connection);
+
+ gb_pm_runtime_put_autosuspend(bundle);
+
+ return 0;
+
+out_kfifo:
+ kfifo_free(&gb->kfifo_lat);
+out_conn:
+ device_unregister(dev);
+out_connection_disable:
+ gb_connection_disable(connection);
+out_ida_remove:
+ ida_free(&loopback_ida, gb->id);
+out_debugfs_remove:
+ debugfs_remove(gb->file);
+out_connection_destroy:
+ gb_connection_destroy(connection);
+out_kzalloc:
+ kfree(gb);
+
+ return retval;
+}
+
+static void gb_loopback_disconnect(struct gb_bundle *bundle)
+{
+ struct gb_loopback *gb = greybus_get_drvdata(bundle);
+ unsigned long flags;
+ int ret;
+
+ ret = gb_pm_runtime_get_sync(bundle);
+ if (ret)
+ gb_pm_runtime_get_noresume(bundle);
+
+ gb_connection_disable(gb->connection);
+
+ if (!IS_ERR_OR_NULL(gb->task))
+ kthread_stop(gb->task);
+
+ kfifo_free(&gb->kfifo_lat);
+ gb_connection_latency_tag_disable(gb->connection);
+ debugfs_remove(gb->file);
+
+ /*
+ * FIXME: gb_loopback_async_wait_all() is redundant now, as connection
+ * is disabled at the beginning and so we can't have any more
+ * incoming/outgoing requests.
+ */
+ gb_loopback_async_wait_all(gb);
+
+ spin_lock_irqsave(&gb_dev.lock, flags);
+ gb_dev.count--;
+ spin_unlock_irqrestore(&gb_dev.lock, flags);
+
+ device_unregister(gb->dev);
+ ida_free(&loopback_ida, gb->id);
+
+ gb_connection_destroy(gb->connection);
+ kfree(gb);
+}
+
+static const struct greybus_bundle_id gb_loopback_id_table[] = {
+ { GREYBUS_DEVICE_CLASS(GREYBUS_CLASS_LOOPBACK) },
+ { }
+};
+MODULE_DEVICE_TABLE(greybus, gb_loopback_id_table);
+
+static struct greybus_driver gb_loopback_driver = {
+ .name = "loopback",
+ .probe = gb_loopback_probe,
+ .disconnect = gb_loopback_disconnect,
+ .id_table = gb_loopback_id_table,
+};
+
+static int loopback_init(void)
+{
+ int retval;
+
+ spin_lock_init(&gb_dev.lock);
+ gb_dev.root = debugfs_create_dir("gb_loopback", NULL);
+
+ retval = class_register(&loopback_class);
+ if (retval)
+ goto err;
+
+ retval = greybus_register(&gb_loopback_driver);
+ if (retval)
+ goto err_unregister;
+
+ return 0;
+
+err_unregister:
+ class_unregister(&loopback_class);
+err:
+ debugfs_remove_recursive(gb_dev.root);
+ return retval;
+}
+module_init(loopback_init);
+
+static void __exit loopback_exit(void)
+{
+ debugfs_remove_recursive(gb_dev.root);
+ greybus_deregister(&gb_loopback_driver);
+ class_unregister(&loopback_class);
+ ida_destroy(&loopback_ida);
+}
+module_exit(loopback_exit);
+
+MODULE_DESCRIPTION("Loopback bridge driver for the Greybus loopback module");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/greybus/power_supply.c b/drivers/staging/greybus/power_supply.c
new file mode 100644
index 000000000000..a484c0ca058d
--- /dev/null
+++ b/drivers/staging/greybus/power_supply.c
@@ -0,0 +1,1140 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Power Supply driver for a Greybus module.
+ *
+ * Copyright 2014-2015 Google Inc.
+ * Copyright 2014-2015 Linaro Ltd.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/power_supply.h>
+#include <linux/slab.h>
+#include <linux/greybus.h>
+
+#define PROP_MAX 32
+
+struct gb_power_supply_prop {
+ enum power_supply_property prop;
+ u8 gb_prop;
+ int val;
+ int previous_val;
+ bool is_writeable;
+};
+
+struct gb_power_supply {
+ u8 id;
+ bool registered;
+ struct power_supply *psy;
+ struct power_supply_desc desc;
+ char name[64];
+ struct gb_power_supplies *supplies;
+ struct delayed_work work;
+ char *manufacturer;
+ char *model_name;
+ char *serial_number;
+ u8 type;
+ u8 properties_count;
+ u8 properties_count_str;
+ unsigned long last_update;
+ u8 cache_invalid;
+ unsigned int update_interval;
+ bool changed;
+ struct gb_power_supply_prop *props;
+ enum power_supply_property *props_raw;
+ bool pm_acquired;
+ struct mutex supply_lock;
+};
+
+struct gb_power_supplies {
+ struct gb_connection *connection;
+ u8 supplies_count;
+ struct gb_power_supply *supply;
+ struct mutex supplies_lock;
+};
+
+#define to_gb_power_supply(x) power_supply_get_drvdata(x)
+
+/*
+ * General power supply properties that could be absent from various reasons,
+ * like kernel versions or vendor specific versions
+ */
+#ifndef POWER_SUPPLY_PROP_VOLTAGE_BOOT
+ #define POWER_SUPPLY_PROP_VOLTAGE_BOOT -1
+#endif
+#ifndef POWER_SUPPLY_PROP_CURRENT_BOOT
+ #define POWER_SUPPLY_PROP_CURRENT_BOOT -1
+#endif
+#ifndef POWER_SUPPLY_PROP_CALIBRATE
+ #define POWER_SUPPLY_PROP_CALIBRATE -1
+#endif
+
+/* cache time in milliseconds, if cache_time is set to 0 cache is disable */
+static unsigned int cache_time = 1000;
+/*
+ * update interval initial and maximum value, between the two will
+ * back-off exponential
+ */
+static unsigned int update_interval_init = 1 * HZ;
+static unsigned int update_interval_max = 30 * HZ;
+
+struct gb_power_supply_changes {
+ enum power_supply_property prop;
+ u32 tolerance_change;
+ void (*prop_changed)(struct gb_power_supply *gbpsy,
+ struct gb_power_supply_prop *prop);
+};
+
+static void gb_power_supply_state_change(struct gb_power_supply *gbpsy,
+ struct gb_power_supply_prop *prop);
+
+static const struct gb_power_supply_changes psy_props_changes[] = {
+ { .prop = GB_POWER_SUPPLY_PROP_STATUS,
+ .tolerance_change = 0,
+ .prop_changed = gb_power_supply_state_change,
+ },
+ { .prop = GB_POWER_SUPPLY_PROP_TEMP,
+ .tolerance_change = 500,
+ .prop_changed = NULL,
+ },
+ { .prop = GB_POWER_SUPPLY_PROP_ONLINE,
+ .tolerance_change = 0,
+ .prop_changed = NULL,
+ },
+};
+
+static int get_psp_from_gb_prop(int gb_prop, enum power_supply_property *psp)
+{
+ int prop;
+
+ switch (gb_prop) {
+ case GB_POWER_SUPPLY_PROP_STATUS:
+ prop = POWER_SUPPLY_PROP_STATUS;
+ break;
+ case GB_POWER_SUPPLY_PROP_CHARGE_TYPE:
+ prop = POWER_SUPPLY_PROP_CHARGE_TYPE;
+ break;
+ case GB_POWER_SUPPLY_PROP_HEALTH:
+ prop = POWER_SUPPLY_PROP_HEALTH;
+ break;
+ case GB_POWER_SUPPLY_PROP_PRESENT:
+ prop = POWER_SUPPLY_PROP_PRESENT;
+ break;
+ case GB_POWER_SUPPLY_PROP_ONLINE:
+ prop = POWER_SUPPLY_PROP_ONLINE;
+ break;
+ case GB_POWER_SUPPLY_PROP_AUTHENTIC:
+ prop = POWER_SUPPLY_PROP_AUTHENTIC;
+ break;
+ case GB_POWER_SUPPLY_PROP_TECHNOLOGY:
+ prop = POWER_SUPPLY_PROP_TECHNOLOGY;
+ break;
+ case GB_POWER_SUPPLY_PROP_CYCLE_COUNT:
+ prop = POWER_SUPPLY_PROP_CYCLE_COUNT;
+ break;
+ case GB_POWER_SUPPLY_PROP_VOLTAGE_MAX:
+ prop = POWER_SUPPLY_PROP_VOLTAGE_MAX;
+ break;
+ case GB_POWER_SUPPLY_PROP_VOLTAGE_MIN:
+ prop = POWER_SUPPLY_PROP_VOLTAGE_MIN;
+ break;
+ case GB_POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN:
+ prop = POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN;
+ break;
+ case GB_POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN:
+ prop = POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN;
+ break;
+ case GB_POWER_SUPPLY_PROP_VOLTAGE_NOW:
+ prop = POWER_SUPPLY_PROP_VOLTAGE_NOW;
+ break;
+ case GB_POWER_SUPPLY_PROP_VOLTAGE_AVG:
+ prop = POWER_SUPPLY_PROP_VOLTAGE_AVG;
+ break;
+ case GB_POWER_SUPPLY_PROP_VOLTAGE_OCV:
+ prop = POWER_SUPPLY_PROP_VOLTAGE_OCV;
+ break;
+ case GB_POWER_SUPPLY_PROP_VOLTAGE_BOOT:
+ prop = POWER_SUPPLY_PROP_VOLTAGE_BOOT;
+ break;
+ case GB_POWER_SUPPLY_PROP_CURRENT_MAX:
+ prop = POWER_SUPPLY_PROP_CURRENT_MAX;
+ break;
+ case GB_POWER_SUPPLY_PROP_CURRENT_NOW:
+ prop = POWER_SUPPLY_PROP_CURRENT_NOW;
+ break;
+ case GB_POWER_SUPPLY_PROP_CURRENT_AVG:
+ prop = POWER_SUPPLY_PROP_CURRENT_AVG;
+ break;
+ case GB_POWER_SUPPLY_PROP_CURRENT_BOOT:
+ prop = POWER_SUPPLY_PROP_CURRENT_BOOT;
+ break;
+ case GB_POWER_SUPPLY_PROP_POWER_NOW:
+ prop = POWER_SUPPLY_PROP_POWER_NOW;
+ break;
+ case GB_POWER_SUPPLY_PROP_POWER_AVG:
+ prop = POWER_SUPPLY_PROP_POWER_AVG;
+ break;
+ case GB_POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN:
+ prop = POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN;
+ break;
+ case GB_POWER_SUPPLY_PROP_CHARGE_EMPTY_DESIGN:
+ prop = POWER_SUPPLY_PROP_CHARGE_EMPTY_DESIGN;
+ break;
+ case GB_POWER_SUPPLY_PROP_CHARGE_FULL:
+ prop = POWER_SUPPLY_PROP_CHARGE_FULL;
+ break;
+ case GB_POWER_SUPPLY_PROP_CHARGE_EMPTY:
+ prop = POWER_SUPPLY_PROP_CHARGE_EMPTY;
+ break;
+ case GB_POWER_SUPPLY_PROP_CHARGE_NOW:
+ prop = POWER_SUPPLY_PROP_CHARGE_NOW;
+ break;
+ case GB_POWER_SUPPLY_PROP_CHARGE_AVG:
+ prop = POWER_SUPPLY_PROP_CHARGE_AVG;
+ break;
+ case GB_POWER_SUPPLY_PROP_CHARGE_COUNTER:
+ prop = POWER_SUPPLY_PROP_CHARGE_COUNTER;
+ break;
+ case GB_POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT:
+ prop = POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT;
+ break;
+ case GB_POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX:
+ prop = POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX;
+ break;
+ case GB_POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE:
+ prop = POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE;
+ break;
+ case GB_POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE_MAX:
+ prop = POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE_MAX;
+ break;
+ case GB_POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT:
+ prop = POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT;
+ break;
+ case GB_POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT_MAX:
+ prop = POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT_MAX;
+ break;
+ case GB_POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT:
+ prop = POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT;
+ break;
+ case GB_POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN:
+ prop = POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN;
+ break;
+ case GB_POWER_SUPPLY_PROP_ENERGY_EMPTY_DESIGN:
+ prop = POWER_SUPPLY_PROP_ENERGY_EMPTY_DESIGN;
+ break;
+ case GB_POWER_SUPPLY_PROP_ENERGY_FULL:
+ prop = POWER_SUPPLY_PROP_ENERGY_FULL;
+ break;
+ case GB_POWER_SUPPLY_PROP_ENERGY_EMPTY:
+ prop = POWER_SUPPLY_PROP_ENERGY_EMPTY;
+ break;
+ case GB_POWER_SUPPLY_PROP_ENERGY_NOW:
+ prop = POWER_SUPPLY_PROP_ENERGY_NOW;
+ break;
+ case GB_POWER_SUPPLY_PROP_ENERGY_AVG:
+ prop = POWER_SUPPLY_PROP_ENERGY_AVG;
+ break;
+ case GB_POWER_SUPPLY_PROP_CAPACITY:
+ prop = POWER_SUPPLY_PROP_CAPACITY;
+ break;
+ case GB_POWER_SUPPLY_PROP_CAPACITY_ALERT_MIN:
+ prop = POWER_SUPPLY_PROP_CAPACITY_ALERT_MIN;
+ break;
+ case GB_POWER_SUPPLY_PROP_CAPACITY_ALERT_MAX:
+ prop = POWER_SUPPLY_PROP_CAPACITY_ALERT_MAX;
+ break;
+ case GB_POWER_SUPPLY_PROP_CAPACITY_LEVEL:
+ prop = POWER_SUPPLY_PROP_CAPACITY_LEVEL;
+ break;
+ case GB_POWER_SUPPLY_PROP_TEMP:
+ prop = POWER_SUPPLY_PROP_TEMP;
+ break;
+ case GB_POWER_SUPPLY_PROP_TEMP_MAX:
+ prop = POWER_SUPPLY_PROP_TEMP_MAX;
+ break;
+ case GB_POWER_SUPPLY_PROP_TEMP_MIN:
+ prop = POWER_SUPPLY_PROP_TEMP_MIN;
+ break;
+ case GB_POWER_SUPPLY_PROP_TEMP_ALERT_MIN:
+ prop = POWER_SUPPLY_PROP_TEMP_ALERT_MIN;
+ break;
+ case GB_POWER_SUPPLY_PROP_TEMP_ALERT_MAX:
+ prop = POWER_SUPPLY_PROP_TEMP_ALERT_MAX;
+ break;
+ case GB_POWER_SUPPLY_PROP_TEMP_AMBIENT:
+ prop = POWER_SUPPLY_PROP_TEMP_AMBIENT;
+ break;
+ case GB_POWER_SUPPLY_PROP_TEMP_AMBIENT_ALERT_MIN:
+ prop = POWER_SUPPLY_PROP_TEMP_AMBIENT_ALERT_MIN;
+ break;
+ case GB_POWER_SUPPLY_PROP_TEMP_AMBIENT_ALERT_MAX:
+ prop = POWER_SUPPLY_PROP_TEMP_AMBIENT_ALERT_MAX;
+ break;
+ case GB_POWER_SUPPLY_PROP_TIME_TO_EMPTY_NOW:
+ prop = POWER_SUPPLY_PROP_TIME_TO_EMPTY_NOW;
+ break;
+ case GB_POWER_SUPPLY_PROP_TIME_TO_EMPTY_AVG:
+ prop = POWER_SUPPLY_PROP_TIME_TO_EMPTY_AVG;
+ break;
+ case GB_POWER_SUPPLY_PROP_TIME_TO_FULL_NOW:
+ prop = POWER_SUPPLY_PROP_TIME_TO_FULL_NOW;
+ break;
+ case GB_POWER_SUPPLY_PROP_TIME_TO_FULL_AVG:
+ prop = POWER_SUPPLY_PROP_TIME_TO_FULL_AVG;
+ break;
+ case GB_POWER_SUPPLY_PROP_TYPE:
+ prop = POWER_SUPPLY_PROP_TYPE;
+ break;
+ case GB_POWER_SUPPLY_PROP_SCOPE:
+ prop = POWER_SUPPLY_PROP_SCOPE;
+ break;
+ case GB_POWER_SUPPLY_PROP_CHARGE_TERM_CURRENT:
+ prop = POWER_SUPPLY_PROP_CHARGE_TERM_CURRENT;
+ break;
+ case GB_POWER_SUPPLY_PROP_CALIBRATE:
+ prop = POWER_SUPPLY_PROP_CALIBRATE;
+ break;
+ default:
+ prop = -1;
+ break;
+ }
+
+ if (prop < 0)
+ return prop;
+
+ *psp = (enum power_supply_property)prop;
+
+ return 0;
+}
+
+static struct gb_connection *get_conn_from_psy(struct gb_power_supply *gbpsy)
+{
+ return gbpsy->supplies->connection;
+}
+
+static struct gb_power_supply_prop *get_psy_prop(struct gb_power_supply *gbpsy,
+ enum power_supply_property psp)
+{
+ int i;
+
+ for (i = 0; i < gbpsy->properties_count; i++)
+ if (gbpsy->props[i].prop == psp)
+ return &gbpsy->props[i];
+ return NULL;
+}
+
+static int is_psy_prop_writeable(struct gb_power_supply *gbpsy,
+ enum power_supply_property psp)
+{
+ struct gb_power_supply_prop *prop;
+
+ prop = get_psy_prop(gbpsy, psp);
+ if (!prop)
+ return -ENOENT;
+ return prop->is_writeable ? 1 : 0;
+}
+
+static int is_prop_valint(enum power_supply_property psp)
+{
+ return ((psp < POWER_SUPPLY_PROP_MODEL_NAME) ? 1 : 0);
+}
+
+static void next_interval(struct gb_power_supply *gbpsy)
+{
+ if (gbpsy->update_interval == update_interval_max)
+ return;
+
+ /* do some exponential back-off in the update interval */
+ gbpsy->update_interval *= 2;
+ if (gbpsy->update_interval > update_interval_max)
+ gbpsy->update_interval = update_interval_max;
+}
+
+static void __gb_power_supply_changed(struct gb_power_supply *gbpsy)
+{
+ power_supply_changed(gbpsy->psy);
+}
+
+static void gb_power_supply_state_change(struct gb_power_supply *gbpsy,
+ struct gb_power_supply_prop *prop)
+{
+ struct gb_connection *connection = get_conn_from_psy(gbpsy);
+ int ret;
+
+ /*
+ * Check gbpsy->pm_acquired to make sure only one pair of 'get_sync'
+ * and 'put_autosuspend' runtime pm call for state property change.
+ */
+ mutex_lock(&gbpsy->supply_lock);
+
+ if ((prop->val == GB_POWER_SUPPLY_STATUS_CHARGING) &&
+ !gbpsy->pm_acquired) {
+ ret = gb_pm_runtime_get_sync(connection->bundle);
+ if (ret)
+ dev_err(&connection->bundle->dev,
+ "Fail to set wake lock for charging state\n");
+ else
+ gbpsy->pm_acquired = true;
+ } else {
+ if (gbpsy->pm_acquired) {
+ ret = gb_pm_runtime_put_autosuspend(connection->bundle);
+ if (ret)
+ dev_err(&connection->bundle->dev,
+ "Fail to set wake unlock for none charging\n");
+ else
+ gbpsy->pm_acquired = false;
+ }
+ }
+
+ mutex_unlock(&gbpsy->supply_lock);
+}
+
+static void check_changed(struct gb_power_supply *gbpsy,
+ struct gb_power_supply_prop *prop)
+{
+ const struct gb_power_supply_changes *psyc;
+ int val = prop->val;
+ int prev_val = prop->previous_val;
+ bool changed = false;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(psy_props_changes); i++) {
+ psyc = &psy_props_changes[i];
+ if (prop->prop == psyc->prop) {
+ if (!psyc->tolerance_change)
+ changed = true;
+ else if (val < prev_val &&
+ prev_val - val > psyc->tolerance_change)
+ changed = true;
+ else if (val > prev_val &&
+ val - prev_val > psyc->tolerance_change)
+ changed = true;
+
+ if (changed && psyc->prop_changed)
+ psyc->prop_changed(gbpsy, prop);
+
+ if (changed)
+ gbpsy->changed = true;
+ break;
+ }
+ }
+}
+
+static int total_props(struct gb_power_supply *gbpsy)
+{
+ /* this return the intval plus the strval properties */
+ return (gbpsy->properties_count + gbpsy->properties_count_str);
+}
+
+static void prop_append(struct gb_power_supply *gbpsy,
+ enum power_supply_property prop)
+{
+ enum power_supply_property *new_props_raw;
+
+ gbpsy->properties_count_str++;
+ new_props_raw = krealloc(gbpsy->props_raw, total_props(gbpsy) *
+ sizeof(enum power_supply_property),
+ GFP_KERNEL);
+ if (!new_props_raw)
+ return;
+ gbpsy->props_raw = new_props_raw;
+ gbpsy->props_raw[total_props(gbpsy) - 1] = prop;
+}
+
+static int __gb_power_supply_set_name(char *init_name, char *name, size_t len)
+{
+ unsigned int i = 0;
+ int ret = 0;
+ struct power_supply *psy;
+
+ if (!strlen(init_name))
+ init_name = "gb_power_supply";
+ strscpy(name, init_name, len);
+
+ while ((ret < len) && (psy = power_supply_get_by_name(name))) {
+ power_supply_put(psy);
+
+ ret = snprintf(name, len, "%s_%u", init_name, ++i);
+ }
+ if (ret >= len)
+ return -ENOMEM;
+ return i;
+}
+
+static void _gb_power_supply_append_props(struct gb_power_supply *gbpsy)
+{
+ if (strlen(gbpsy->manufacturer))
+ prop_append(gbpsy, POWER_SUPPLY_PROP_MANUFACTURER);
+ if (strlen(gbpsy->model_name))
+ prop_append(gbpsy, POWER_SUPPLY_PROP_MODEL_NAME);
+ if (strlen(gbpsy->serial_number))
+ prop_append(gbpsy, POWER_SUPPLY_PROP_SERIAL_NUMBER);
+}
+
+static int gb_power_supply_description_get(struct gb_power_supply *gbpsy)
+{
+ struct gb_connection *connection = get_conn_from_psy(gbpsy);
+ struct gb_power_supply_get_description_request req;
+ struct gb_power_supply_get_description_response resp;
+ int ret;
+
+ req.psy_id = gbpsy->id;
+
+ ret = gb_operation_sync(connection,
+ GB_POWER_SUPPLY_TYPE_GET_DESCRIPTION,
+ &req, sizeof(req), &resp, sizeof(resp));
+ if (ret < 0)
+ return ret;
+
+ gbpsy->manufacturer = kstrndup(resp.manufacturer, PROP_MAX, GFP_KERNEL);
+ if (!gbpsy->manufacturer)
+ return -ENOMEM;
+ gbpsy->model_name = kstrndup(resp.model, PROP_MAX, GFP_KERNEL);
+ if (!gbpsy->model_name)
+ return -ENOMEM;
+ gbpsy->serial_number = kstrndup(resp.serial_number, PROP_MAX,
+ GFP_KERNEL);
+ if (!gbpsy->serial_number)
+ return -ENOMEM;
+
+ gbpsy->type = le16_to_cpu(resp.type);
+ gbpsy->properties_count = resp.properties_count;
+
+ return 0;
+}
+
+static int gb_power_supply_prop_descriptors_get(struct gb_power_supply *gbpsy)
+{
+ struct gb_connection *connection = get_conn_from_psy(gbpsy);
+ struct gb_power_supply_get_property_descriptors_request *req;
+ struct gb_power_supply_get_property_descriptors_response *resp;
+ struct gb_operation *op;
+ u8 props_count = gbpsy->properties_count;
+ enum power_supply_property psp;
+ int ret;
+ int i, r = 0;
+
+ if (props_count == 0)
+ return 0;
+
+ op = gb_operation_create(connection,
+ GB_POWER_SUPPLY_TYPE_GET_PROP_DESCRIPTORS,
+ sizeof(*req),
+ struct_size(resp, props, props_count),
+ GFP_KERNEL);
+ if (!op)
+ return -ENOMEM;
+
+ req = op->request->payload;
+ req->psy_id = gbpsy->id;
+
+ ret = gb_operation_request_send_sync(op);
+ if (ret < 0)
+ goto out_put_operation;
+
+ resp = op->response->payload;
+
+ /* validate received properties */
+ for (i = 0; i < props_count; i++) {
+ ret = get_psp_from_gb_prop(resp->props[i].property, &psp);
+ if (ret < 0) {
+ dev_warn(&connection->bundle->dev,
+ "greybus property %u it is not supported by this kernel, dropped\n",
+ resp->props[i].property);
+ gbpsy->properties_count--;
+ }
+ }
+
+ gbpsy->props = kcalloc(gbpsy->properties_count, sizeof(*gbpsy->props),
+ GFP_KERNEL);
+ if (!gbpsy->props) {
+ ret = -ENOMEM;
+ goto out_put_operation;
+ }
+
+ gbpsy->props_raw = kcalloc(gbpsy->properties_count,
+ sizeof(*gbpsy->props_raw), GFP_KERNEL);
+ if (!gbpsy->props_raw) {
+ ret = -ENOMEM;
+ goto out_put_operation;
+ }
+
+ /* Store available properties, skip the ones we do not support */
+ for (i = 0; i < props_count; i++) {
+ ret = get_psp_from_gb_prop(resp->props[i].property, &psp);
+ if (ret < 0) {
+ r++;
+ continue;
+ }
+ gbpsy->props[i - r].prop = psp;
+ gbpsy->props[i - r].gb_prop = resp->props[i].property;
+ gbpsy->props_raw[i - r] = psp;
+ if (resp->props[i].is_writeable)
+ gbpsy->props[i - r].is_writeable = true;
+ }
+
+ /*
+ * now append the properties that we already got information in the
+ * get_description operation. (char * ones)
+ */
+ _gb_power_supply_append_props(gbpsy);
+
+ ret = 0;
+out_put_operation:
+ gb_operation_put(op);
+
+ return ret;
+}
+
+static int __gb_power_supply_property_update(struct gb_power_supply *gbpsy,
+ enum power_supply_property psp)
+{
+ struct gb_connection *connection = get_conn_from_psy(gbpsy);
+ struct gb_power_supply_prop *prop;
+ struct gb_power_supply_get_property_request req;
+ struct gb_power_supply_get_property_response resp;
+ int val;
+ int ret;
+
+ prop = get_psy_prop(gbpsy, psp);
+ if (!prop)
+ return -EINVAL;
+ req.psy_id = gbpsy->id;
+ req.property = prop->gb_prop;
+
+ ret = gb_operation_sync(connection, GB_POWER_SUPPLY_TYPE_GET_PROPERTY,
+ &req, sizeof(req), &resp, sizeof(resp));
+ if (ret < 0)
+ return ret;
+
+ val = le32_to_cpu(resp.prop_val);
+ if (val == prop->val)
+ return 0;
+
+ prop->previous_val = prop->val;
+ prop->val = val;
+
+ check_changed(gbpsy, prop);
+
+ return 0;
+}
+
+static int __gb_power_supply_property_get(struct gb_power_supply *gbpsy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct gb_power_supply_prop *prop;
+
+ prop = get_psy_prop(gbpsy, psp);
+ if (!prop)
+ return -EINVAL;
+
+ val->intval = prop->val;
+ return 0;
+}
+
+static int __gb_power_supply_property_strval_get(struct gb_power_supply *gbpsy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ switch (psp) {
+ case POWER_SUPPLY_PROP_MODEL_NAME:
+ val->strval = gbpsy->model_name;
+ break;
+ case POWER_SUPPLY_PROP_MANUFACTURER:
+ val->strval = gbpsy->manufacturer;
+ break;
+ case POWER_SUPPLY_PROP_SERIAL_NUMBER:
+ val->strval = gbpsy->serial_number;
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int _gb_power_supply_property_get(struct gb_power_supply *gbpsy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct gb_connection *connection = get_conn_from_psy(gbpsy);
+ int ret;
+
+ /*
+ * Properties of type const char *, were already fetched on
+ * get_description operation and should be cached in gb
+ */
+ if (is_prop_valint(psp))
+ ret = __gb_power_supply_property_get(gbpsy, psp, val);
+ else
+ ret = __gb_power_supply_property_strval_get(gbpsy, psp, val);
+
+ if (ret < 0)
+ dev_err(&connection->bundle->dev, "get property %u\n", psp);
+
+ return 0;
+}
+
+static int is_cache_valid(struct gb_power_supply *gbpsy)
+{
+ /* check if cache is good enough or it has expired */
+ if (gbpsy->cache_invalid) {
+ gbpsy->cache_invalid = 0;
+ return 0;
+ }
+
+ if (gbpsy->last_update &&
+ time_is_after_jiffies(gbpsy->last_update +
+ msecs_to_jiffies(cache_time)))
+ return 1;
+
+ return 0;
+}
+
+static int gb_power_supply_status_get(struct gb_power_supply *gbpsy)
+{
+ struct gb_connection *connection = get_conn_from_psy(gbpsy);
+ int ret = 0;
+ int i;
+
+ if (is_cache_valid(gbpsy))
+ return 0;
+
+ ret = gb_pm_runtime_get_sync(connection->bundle);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < gbpsy->properties_count; i++) {
+ ret = __gb_power_supply_property_update(gbpsy,
+ gbpsy->props[i].prop);
+ if (ret < 0)
+ break;
+ }
+
+ if (ret == 0)
+ gbpsy->last_update = jiffies;
+
+ gb_pm_runtime_put_autosuspend(connection->bundle);
+ return ret;
+}
+
+static void gb_power_supply_status_update(struct gb_power_supply *gbpsy)
+{
+ /* check if there a change that need to be reported */
+ gb_power_supply_status_get(gbpsy);
+
+ if (!gbpsy->changed)
+ return;
+
+ gbpsy->update_interval = update_interval_init;
+ __gb_power_supply_changed(gbpsy);
+ gbpsy->changed = false;
+}
+
+static void gb_power_supply_work(struct work_struct *work)
+{
+ struct gb_power_supply *gbpsy = container_of(work,
+ struct gb_power_supply,
+ work.work);
+
+ /*
+ * if the poll interval is not set, disable polling, this is helpful
+ * specially at unregister time.
+ */
+ if (!gbpsy->update_interval)
+ return;
+
+ gb_power_supply_status_update(gbpsy);
+ next_interval(gbpsy);
+ schedule_delayed_work(&gbpsy->work, gbpsy->update_interval);
+}
+
+static int get_property(struct power_supply *b,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct gb_power_supply *gbpsy = to_gb_power_supply(b);
+
+ gb_power_supply_status_get(gbpsy);
+
+ return _gb_power_supply_property_get(gbpsy, psp, val);
+}
+
+static int gb_power_supply_property_set(struct gb_power_supply *gbpsy,
+ enum power_supply_property psp,
+ int val)
+{
+ struct gb_connection *connection = get_conn_from_psy(gbpsy);
+ struct gb_power_supply_prop *prop;
+ struct gb_power_supply_set_property_request req;
+ int ret;
+
+ ret = gb_pm_runtime_get_sync(connection->bundle);
+ if (ret)
+ return ret;
+
+ prop = get_psy_prop(gbpsy, psp);
+ if (!prop) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ req.psy_id = gbpsy->id;
+ req.property = prop->gb_prop;
+ req.prop_val = cpu_to_le32((s32)val);
+
+ ret = gb_operation_sync(connection, GB_POWER_SUPPLY_TYPE_SET_PROPERTY,
+ &req, sizeof(req), NULL, 0);
+ if (ret < 0)
+ goto out;
+
+ /* cache immediately the new value */
+ prop->val = val;
+
+out:
+ gb_pm_runtime_put_autosuspend(connection->bundle);
+ return ret;
+}
+
+static int set_property(struct power_supply *b,
+ enum power_supply_property psp,
+ const union power_supply_propval *val)
+{
+ struct gb_power_supply *gbpsy = to_gb_power_supply(b);
+
+ return gb_power_supply_property_set(gbpsy, psp, val->intval);
+}
+
+static int property_is_writeable(struct power_supply *b,
+ enum power_supply_property psp)
+{
+ struct gb_power_supply *gbpsy = to_gb_power_supply(b);
+
+ return is_psy_prop_writeable(gbpsy, psp);
+}
+
+static int gb_power_supply_register(struct gb_power_supply *gbpsy)
+{
+ struct gb_connection *connection = get_conn_from_psy(gbpsy);
+ struct power_supply_config cfg = {};
+
+ cfg.drv_data = gbpsy;
+
+ gbpsy->desc.name = gbpsy->name;
+ gbpsy->desc.type = gbpsy->type;
+ gbpsy->desc.properties = gbpsy->props_raw;
+ gbpsy->desc.num_properties = total_props(gbpsy);
+ gbpsy->desc.get_property = get_property;
+ gbpsy->desc.set_property = set_property;
+ gbpsy->desc.property_is_writeable = property_is_writeable;
+
+ gbpsy->psy = power_supply_register(&connection->bundle->dev,
+ &gbpsy->desc, &cfg);
+ return PTR_ERR_OR_ZERO(gbpsy->psy);
+}
+
+static void _gb_power_supply_free(struct gb_power_supply *gbpsy)
+{
+ kfree(gbpsy->serial_number);
+ kfree(gbpsy->model_name);
+ kfree(gbpsy->manufacturer);
+ kfree(gbpsy->props_raw);
+ kfree(gbpsy->props);
+}
+
+static void _gb_power_supply_release(struct gb_power_supply *gbpsy)
+{
+ gbpsy->update_interval = 0;
+
+ cancel_delayed_work_sync(&gbpsy->work);
+
+ if (gbpsy->registered)
+ power_supply_unregister(gbpsy->psy);
+
+ _gb_power_supply_free(gbpsy);
+}
+
+static void _gb_power_supplies_release(struct gb_power_supplies *supplies)
+{
+ int i;
+
+ if (!supplies->supply)
+ return;
+
+ mutex_lock(&supplies->supplies_lock);
+ for (i = 0; i < supplies->supplies_count; i++)
+ _gb_power_supply_release(&supplies->supply[i]);
+ kfree(supplies->supply);
+ mutex_unlock(&supplies->supplies_lock);
+ kfree(supplies);
+}
+
+static int gb_power_supplies_get_count(struct gb_power_supplies *supplies)
+{
+ struct gb_power_supply_get_supplies_response resp;
+ int ret;
+
+ ret = gb_operation_sync(supplies->connection,
+ GB_POWER_SUPPLY_TYPE_GET_SUPPLIES,
+ NULL, 0, &resp, sizeof(resp));
+ if (ret < 0)
+ return ret;
+
+ if (!resp.supplies_count)
+ return -EINVAL;
+
+ supplies->supplies_count = resp.supplies_count;
+
+ return ret;
+}
+
+static int gb_power_supply_config(struct gb_power_supplies *supplies, int id)
+{
+ struct gb_power_supply *gbpsy = &supplies->supply[id];
+ int ret;
+
+ gbpsy->supplies = supplies;
+ gbpsy->id = id;
+
+ ret = gb_power_supply_description_get(gbpsy);
+ if (ret < 0)
+ return ret;
+
+ return gb_power_supply_prop_descriptors_get(gbpsy);
+}
+
+static int gb_power_supply_enable(struct gb_power_supply *gbpsy)
+{
+ int ret;
+
+ /* guarantee that we have an unique name, before register */
+ ret = __gb_power_supply_set_name(gbpsy->model_name, gbpsy->name,
+ sizeof(gbpsy->name));
+ if (ret < 0)
+ return ret;
+
+ mutex_init(&gbpsy->supply_lock);
+
+ ret = gb_power_supply_register(gbpsy);
+ if (ret < 0)
+ return ret;
+
+ gbpsy->update_interval = update_interval_init;
+ INIT_DELAYED_WORK(&gbpsy->work, gb_power_supply_work);
+ schedule_delayed_work(&gbpsy->work, 0);
+
+ /* everything went fine, mark it for release code to know */
+ gbpsy->registered = true;
+
+ return 0;
+}
+
+static int gb_power_supplies_setup(struct gb_power_supplies *supplies)
+{
+ struct gb_connection *connection = supplies->connection;
+ int ret;
+ int i;
+
+ mutex_lock(&supplies->supplies_lock);
+
+ ret = gb_power_supplies_get_count(supplies);
+ if (ret < 0)
+ goto out;
+
+ supplies->supply = kcalloc(supplies->supplies_count,
+ sizeof(struct gb_power_supply),
+ GFP_KERNEL);
+
+ if (!supplies->supply) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ for (i = 0; i < supplies->supplies_count; i++) {
+ ret = gb_power_supply_config(supplies, i);
+ if (ret < 0) {
+ dev_err(&connection->bundle->dev,
+ "Fail to configure supplies devices\n");
+ goto out;
+ }
+ }
+out:
+ mutex_unlock(&supplies->supplies_lock);
+ return ret;
+}
+
+static int gb_power_supplies_register(struct gb_power_supplies *supplies)
+{
+ struct gb_connection *connection = supplies->connection;
+ int ret = 0;
+ int i;
+
+ mutex_lock(&supplies->supplies_lock);
+
+ for (i = 0; i < supplies->supplies_count; i++) {
+ ret = gb_power_supply_enable(&supplies->supply[i]);
+ if (ret < 0) {
+ dev_err(&connection->bundle->dev,
+ "Fail to enable supplies devices\n");
+ break;
+ }
+ }
+
+ mutex_unlock(&supplies->supplies_lock);
+ return ret;
+}
+
+static int gb_supplies_request_handler(struct gb_operation *op)
+{
+ struct gb_connection *connection = op->connection;
+ struct gb_power_supplies *supplies = gb_connection_get_data(connection);
+ struct gb_power_supply *gbpsy;
+ struct gb_message *request;
+ struct gb_power_supply_event_request *payload;
+ u8 psy_id;
+ u8 event;
+ int ret = 0;
+
+ if (op->type != GB_POWER_SUPPLY_TYPE_EVENT) {
+ dev_err(&connection->bundle->dev,
+ "Unsupported unsolicited event: %u\n", op->type);
+ return -EINVAL;
+ }
+
+ request = op->request;
+
+ if (request->payload_size < sizeof(*payload)) {
+ dev_err(&connection->bundle->dev,
+ "Wrong event size received (%zu < %zu)\n",
+ request->payload_size, sizeof(*payload));
+ return -EINVAL;
+ }
+
+ payload = request->payload;
+ psy_id = payload->psy_id;
+ mutex_lock(&supplies->supplies_lock);
+ if (psy_id >= supplies->supplies_count ||
+ !supplies->supply[psy_id].registered) {
+ dev_err(&connection->bundle->dev,
+ "Event received for unconfigured power_supply id: %d\n",
+ psy_id);
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ event = payload->event;
+ /*
+ * we will only handle events after setup is done and before release is
+ * running. For that just check update_interval.
+ */
+ gbpsy = &supplies->supply[psy_id];
+ if (!gbpsy->update_interval) {
+ ret = -ESHUTDOWN;
+ goto out_unlock;
+ }
+
+ if (event & GB_POWER_SUPPLY_UPDATE) {
+ /*
+ * we need to make sure we invalidate cache, if not no new
+ * values for the properties will be fetch and the all propose
+ * of this event is missed
+ */
+ gbpsy->cache_invalid = 1;
+ gb_power_supply_status_update(gbpsy);
+ }
+
+out_unlock:
+ mutex_unlock(&supplies->supplies_lock);
+ return ret;
+}
+
+static int gb_power_supply_probe(struct gb_bundle *bundle,
+ const struct greybus_bundle_id *id)
+{
+ struct greybus_descriptor_cport *cport_desc;
+ struct gb_connection *connection;
+ struct gb_power_supplies *supplies;
+ int ret;
+
+ if (bundle->num_cports != 1)
+ return -ENODEV;
+
+ cport_desc = &bundle->cport_desc[0];
+ if (cport_desc->protocol_id != GREYBUS_PROTOCOL_POWER_SUPPLY)
+ return -ENODEV;
+
+ supplies = kzalloc(sizeof(*supplies), GFP_KERNEL);
+ if (!supplies)
+ return -ENOMEM;
+
+ connection = gb_connection_create(bundle, le16_to_cpu(cport_desc->id),
+ gb_supplies_request_handler);
+ if (IS_ERR(connection)) {
+ ret = PTR_ERR(connection);
+ goto out;
+ }
+
+ supplies->connection = connection;
+ gb_connection_set_data(connection, supplies);
+
+ mutex_init(&supplies->supplies_lock);
+
+ greybus_set_drvdata(bundle, supplies);
+
+ /* We aren't ready to receive an incoming request yet */
+ ret = gb_connection_enable_tx(connection);
+ if (ret)
+ goto error_connection_destroy;
+
+ ret = gb_power_supplies_setup(supplies);
+ if (ret < 0)
+ goto error_connection_disable;
+
+ /* We are ready to receive an incoming request now, enable RX as well */
+ ret = gb_connection_enable(connection);
+ if (ret)
+ goto error_connection_disable;
+
+ ret = gb_power_supplies_register(supplies);
+ if (ret < 0)
+ goto error_connection_disable;
+
+ gb_pm_runtime_put_autosuspend(bundle);
+ return 0;
+
+error_connection_disable:
+ gb_connection_disable(connection);
+error_connection_destroy:
+ gb_connection_destroy(connection);
+out:
+ _gb_power_supplies_release(supplies);
+ return ret;
+}
+
+static void gb_power_supply_disconnect(struct gb_bundle *bundle)
+{
+ struct gb_power_supplies *supplies = greybus_get_drvdata(bundle);
+
+ gb_connection_disable(supplies->connection);
+ gb_connection_destroy(supplies->connection);
+
+ _gb_power_supplies_release(supplies);
+}
+
+static const struct greybus_bundle_id gb_power_supply_id_table[] = {
+ { GREYBUS_DEVICE_CLASS(GREYBUS_CLASS_POWER_SUPPLY) },
+ { }
+};
+MODULE_DEVICE_TABLE(greybus, gb_power_supply_id_table);
+
+static struct greybus_driver gb_power_supply_driver = {
+ .name = "power_supply",
+ .probe = gb_power_supply_probe,
+ .disconnect = gb_power_supply_disconnect,
+ .id_table = gb_power_supply_id_table,
+};
+module_greybus_driver(gb_power_supply_driver);
+
+MODULE_DESCRIPTION("Power Supply driver for a Greybus module");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/greybus/pwm.c b/drivers/staging/greybus/pwm.c
new file mode 100644
index 000000000000..1cb7b9089ead
--- /dev/null
+++ b/drivers/staging/greybus/pwm.c
@@ -0,0 +1,331 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PWM Greybus driver.
+ *
+ * Copyright 2014 Google Inc.
+ * Copyright 2014 Linaro Ltd.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/pwm.h>
+#include <linux/greybus.h>
+
+#include "gbphy.h"
+
+struct gb_pwm_chip {
+ struct gb_connection *connection;
+ struct pwm_chip chip;
+};
+
+static inline struct gb_pwm_chip *pwm_chip_to_gb_pwm_chip(struct pwm_chip *chip)
+{
+ return container_of(chip, struct gb_pwm_chip, chip);
+}
+
+static int gb_pwm_get_npwm(struct gb_connection *connection)
+{
+ struct gb_pwm_count_response response;
+ int ret;
+
+ ret = gb_operation_sync(connection, GB_PWM_TYPE_PWM_COUNT,
+ NULL, 0, &response, sizeof(response));
+ if (ret)
+ return ret;
+
+ /*
+ * The request returns the highest allowed PWM id parameter. So add one
+ * to get the number of PWMs.
+ */
+ return response.count + 1;
+}
+
+static int gb_pwm_activate_operation(struct pwm_chip *chip, u8 which)
+{
+ struct gb_pwm_chip *pwmc = pwm_chip_to_gb_pwm_chip(chip);
+ struct gb_pwm_activate_request request;
+ struct gbphy_device *gbphy_dev;
+ int ret;
+
+ request.which = which;
+
+ gbphy_dev = to_gbphy_dev(pwmchip_parent(chip));
+ ret = gbphy_runtime_get_sync(gbphy_dev);
+ if (ret)
+ return ret;
+
+ ret = gb_operation_sync(pwmc->connection, GB_PWM_TYPE_ACTIVATE,
+ &request, sizeof(request), NULL, 0);
+
+ gbphy_runtime_put_autosuspend(gbphy_dev);
+
+ return ret;
+}
+
+static int gb_pwm_deactivate_operation(struct pwm_chip *chip, u8 which)
+{
+ struct gb_pwm_chip *pwmc = pwm_chip_to_gb_pwm_chip(chip);
+ struct gb_pwm_deactivate_request request;
+ struct gbphy_device *gbphy_dev;
+ int ret;
+
+ request.which = which;
+
+ gbphy_dev = to_gbphy_dev(pwmchip_parent(chip));
+ ret = gbphy_runtime_get_sync(gbphy_dev);
+ if (ret)
+ return ret;
+
+ ret = gb_operation_sync(pwmc->connection, GB_PWM_TYPE_DEACTIVATE,
+ &request, sizeof(request), NULL, 0);
+
+ gbphy_runtime_put_autosuspend(gbphy_dev);
+
+ return ret;
+}
+
+static int gb_pwm_config_operation(struct pwm_chip *chip,
+ u8 which, u32 duty, u32 period)
+{
+ struct gb_pwm_chip *pwmc = pwm_chip_to_gb_pwm_chip(chip);
+ struct gb_pwm_config_request request;
+ struct gbphy_device *gbphy_dev;
+ int ret;
+
+ request.which = which;
+ request.duty = cpu_to_le32(duty);
+ request.period = cpu_to_le32(period);
+
+ gbphy_dev = to_gbphy_dev(pwmchip_parent(chip));
+ ret = gbphy_runtime_get_sync(gbphy_dev);
+ if (ret)
+ return ret;
+
+ ret = gb_operation_sync(pwmc->connection, GB_PWM_TYPE_CONFIG,
+ &request, sizeof(request), NULL, 0);
+
+ gbphy_runtime_put_autosuspend(gbphy_dev);
+
+ return ret;
+}
+
+static int gb_pwm_set_polarity_operation(struct pwm_chip *chip,
+ u8 which, u8 polarity)
+{
+ struct gb_pwm_chip *pwmc = pwm_chip_to_gb_pwm_chip(chip);
+ struct gb_pwm_polarity_request request;
+ struct gbphy_device *gbphy_dev;
+ int ret;
+
+ request.which = which;
+ request.polarity = polarity;
+
+ gbphy_dev = to_gbphy_dev(pwmchip_parent(chip));
+ ret = gbphy_runtime_get_sync(gbphy_dev);
+ if (ret)
+ return ret;
+
+ ret = gb_operation_sync(pwmc->connection, GB_PWM_TYPE_POLARITY,
+ &request, sizeof(request), NULL, 0);
+
+ gbphy_runtime_put_autosuspend(gbphy_dev);
+
+ return ret;
+}
+
+static int gb_pwm_enable_operation(struct pwm_chip *chip, u8 which)
+{
+ struct gb_pwm_chip *pwmc = pwm_chip_to_gb_pwm_chip(chip);
+ struct gb_pwm_enable_request request;
+ struct gbphy_device *gbphy_dev;
+ int ret;
+
+ request.which = which;
+
+ gbphy_dev = to_gbphy_dev(pwmchip_parent(chip));
+ ret = gbphy_runtime_get_sync(gbphy_dev);
+ if (ret)
+ return ret;
+
+ ret = gb_operation_sync(pwmc->connection, GB_PWM_TYPE_ENABLE,
+ &request, sizeof(request), NULL, 0);
+ if (ret)
+ gbphy_runtime_put_autosuspend(gbphy_dev);
+
+ return ret;
+}
+
+static int gb_pwm_disable_operation(struct pwm_chip *chip, u8 which)
+{
+ struct gb_pwm_chip *pwmc = pwm_chip_to_gb_pwm_chip(chip);
+ struct gb_pwm_disable_request request;
+ struct gbphy_device *gbphy_dev;
+ int ret;
+
+ request.which = which;
+
+ ret = gb_operation_sync(pwmc->connection, GB_PWM_TYPE_DISABLE,
+ &request, sizeof(request), NULL, 0);
+
+ gbphy_dev = to_gbphy_dev(pwmchip_parent(chip));
+ gbphy_runtime_put_autosuspend(gbphy_dev);
+
+ return ret;
+}
+
+static int gb_pwm_request(struct pwm_chip *chip, struct pwm_device *pwm)
+{
+ return gb_pwm_activate_operation(chip, pwm->hwpwm);
+};
+
+static void gb_pwm_free(struct pwm_chip *chip, struct pwm_device *pwm)
+{
+ if (pwm_is_enabled(pwm))
+ dev_warn(pwmchip_parent(chip), "freeing PWM device without disabling\n");
+
+ gb_pwm_deactivate_operation(chip, pwm->hwpwm);
+}
+
+static int gb_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
+ const struct pwm_state *state)
+{
+ int err;
+ bool enabled = pwm->state.enabled;
+ u64 period = state->period;
+ u64 duty_cycle = state->duty_cycle;
+
+ /* Set polarity */
+ if (state->polarity != pwm->state.polarity) {
+ if (enabled) {
+ gb_pwm_disable_operation(chip, pwm->hwpwm);
+ enabled = false;
+ }
+ err = gb_pwm_set_polarity_operation(chip, pwm->hwpwm, state->polarity);
+ if (err)
+ return err;
+ }
+
+ if (!state->enabled) {
+ if (enabled)
+ gb_pwm_disable_operation(chip, pwm->hwpwm);
+ return 0;
+ }
+
+ /*
+ * Set period and duty cycle
+ *
+ * PWM privodes 64-bit period and duty_cycle, but greybus only accepts
+ * 32-bit, so their values have to be limited to U32_MAX.
+ */
+ if (period > U32_MAX)
+ period = U32_MAX;
+
+ if (duty_cycle > period)
+ duty_cycle = period;
+
+ err = gb_pwm_config_operation(chip, pwm->hwpwm, duty_cycle, period);
+ if (err)
+ return err;
+
+ /* enable/disable */
+ if (!enabled)
+ return gb_pwm_enable_operation(chip, pwm->hwpwm);
+
+ return 0;
+}
+
+static const struct pwm_ops gb_pwm_ops = {
+ .request = gb_pwm_request,
+ .free = gb_pwm_free,
+ .apply = gb_pwm_apply,
+};
+
+static int gb_pwm_probe(struct gbphy_device *gbphy_dev,
+ const struct gbphy_device_id *id)
+{
+ struct gb_connection *connection;
+ struct gb_pwm_chip *pwmc;
+ struct pwm_chip *chip;
+ int ret, npwm;
+
+ connection = gb_connection_create(gbphy_dev->bundle,
+ le16_to_cpu(gbphy_dev->cport_desc->id),
+ NULL);
+ if (IS_ERR(connection))
+ return PTR_ERR(connection);
+
+ ret = gb_connection_enable(connection);
+ if (ret)
+ goto exit_connection_destroy;
+
+ /* Query number of pwms present */
+ ret = gb_pwm_get_npwm(connection);
+ if (ret < 0)
+ goto exit_connection_disable;
+ npwm = ret;
+
+ chip = pwmchip_alloc(&gbphy_dev->dev, npwm, sizeof(*pwmc));
+ if (IS_ERR(chip)) {
+ ret = PTR_ERR(chip);
+ goto exit_connection_disable;
+ }
+ gb_gbphy_set_data(gbphy_dev, chip);
+
+ pwmc = pwm_chip_to_gb_pwm_chip(chip);
+ pwmc->connection = connection;
+
+ chip->ops = &gb_pwm_ops;
+
+ ret = pwmchip_add(chip);
+ if (ret) {
+ dev_err(&gbphy_dev->dev,
+ "failed to register PWM: %d\n", ret);
+ goto exit_pwmchip_put;
+ }
+
+ gbphy_runtime_put_autosuspend(gbphy_dev);
+ return 0;
+
+exit_pwmchip_put:
+ pwmchip_put(chip);
+exit_connection_disable:
+ gb_connection_disable(connection);
+exit_connection_destroy:
+ gb_connection_destroy(connection);
+ return ret;
+}
+
+static void gb_pwm_remove(struct gbphy_device *gbphy_dev)
+{
+ struct pwm_chip *chip = gb_gbphy_get_data(gbphy_dev);
+ struct gb_pwm_chip *pwmc = pwm_chip_to_gb_pwm_chip(chip);
+ struct gb_connection *connection = pwmc->connection;
+ int ret;
+
+ ret = gbphy_runtime_get_sync(gbphy_dev);
+ if (ret)
+ gbphy_runtime_get_noresume(gbphy_dev);
+
+ pwmchip_remove(chip);
+ pwmchip_put(chip);
+ gb_connection_disable(connection);
+ gb_connection_destroy(connection);
+}
+
+static const struct gbphy_device_id gb_pwm_id_table[] = {
+ { GBPHY_PROTOCOL(GREYBUS_PROTOCOL_PWM) },
+ { },
+};
+MODULE_DEVICE_TABLE(gbphy, gb_pwm_id_table);
+
+static struct gbphy_driver pwm_driver = {
+ .name = "pwm",
+ .probe = gb_pwm_probe,
+ .remove = gb_pwm_remove,
+ .id_table = gb_pwm_id_table,
+};
+
+module_gbphy_driver(pwm_driver);
+MODULE_DESCRIPTION("PWM Greybus driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/greybus/raw.c b/drivers/staging/greybus/raw.c
new file mode 100644
index 000000000000..71de6776739c
--- /dev/null
+++ b/drivers/staging/greybus/raw.c
@@ -0,0 +1,381 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Greybus driver for the Raw protocol
+ *
+ * Copyright 2015 Google Inc.
+ * Copyright 2015 Linaro Ltd.
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/sizes.h>
+#include <linux/cdev.h>
+#include <linux/fs.h>
+#include <linux/idr.h>
+#include <linux/uaccess.h>
+#include <linux/greybus.h>
+
+struct gb_raw {
+ struct gb_connection *connection;
+
+ struct list_head list;
+ int list_data;
+ struct mutex list_lock;
+ dev_t dev;
+ struct cdev cdev;
+ struct device *device;
+};
+
+struct raw_data {
+ struct list_head entry;
+ u32 len;
+ u8 data[] __counted_by(len);
+};
+
+static const struct class raw_class = {
+ .name = "gb_raw",
+};
+
+static int raw_major;
+static const struct file_operations raw_fops;
+static DEFINE_IDA(minors);
+
+/* Number of minor devices this driver supports */
+#define NUM_MINORS 256
+
+/* Maximum size of any one send data buffer we support */
+#define MAX_PACKET_SIZE (PAGE_SIZE * 2)
+
+/*
+ * Maximum size of the data in the receive buffer we allow before we start to
+ * drop messages on the floor
+ */
+#define MAX_DATA_SIZE (MAX_PACKET_SIZE * 8)
+
+/*
+ * Add the raw data message to the list of received messages.
+ */
+static int receive_data(struct gb_raw *raw, u32 len, u8 *data)
+{
+ struct raw_data *raw_data;
+ struct device *dev = &raw->connection->bundle->dev;
+ int retval = 0;
+
+ if (len > MAX_PACKET_SIZE) {
+ dev_err(dev, "Too big of a data packet, rejected\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&raw->list_lock);
+ if ((raw->list_data + len) > MAX_DATA_SIZE) {
+ dev_err(dev, "Too much data in receive buffer, now dropping packets\n");
+ retval = -EINVAL;
+ goto exit;
+ }
+
+ raw_data = kmalloc(struct_size(raw_data, data, len), GFP_KERNEL);
+ if (!raw_data) {
+ retval = -ENOMEM;
+ goto exit;
+ }
+
+ raw->list_data += len;
+ raw_data->len = len;
+ memcpy(&raw_data->data[0], data, len);
+
+ list_add_tail(&raw_data->entry, &raw->list);
+exit:
+ mutex_unlock(&raw->list_lock);
+ return retval;
+}
+
+static int gb_raw_request_handler(struct gb_operation *op)
+{
+ struct gb_connection *connection = op->connection;
+ struct device *dev = &connection->bundle->dev;
+ struct gb_raw *raw = greybus_get_drvdata(connection->bundle);
+ struct gb_raw_send_request *receive;
+ u32 len;
+
+ if (op->type != GB_RAW_TYPE_SEND) {
+ dev_err(dev, "unknown request type 0x%02x\n", op->type);
+ return -EINVAL;
+ }
+
+ /* Verify size of payload */
+ if (op->request->payload_size < sizeof(*receive)) {
+ dev_err(dev, "raw receive request too small (%zu < %zu)\n",
+ op->request->payload_size, sizeof(*receive));
+ return -EINVAL;
+ }
+ receive = op->request->payload;
+ len = le32_to_cpu(receive->len);
+ if (len != (int)(op->request->payload_size - sizeof(__le32))) {
+ dev_err(dev, "raw receive request wrong size %d vs %d\n", len,
+ (int)(op->request->payload_size - sizeof(__le32)));
+ return -EINVAL;
+ }
+ if (len == 0) {
+ dev_err(dev, "raw receive request of 0 bytes?\n");
+ return -EINVAL;
+ }
+
+ return receive_data(raw, len, receive->data);
+}
+
+static int gb_raw_send(struct gb_raw *raw, u32 len, const char __user *data)
+{
+ struct gb_connection *connection = raw->connection;
+ struct gb_raw_send_request *request;
+ int retval;
+
+ request = kmalloc(len + sizeof(*request), GFP_KERNEL);
+ if (!request)
+ return -ENOMEM;
+
+ if (copy_from_user(&request->data[0], data, len)) {
+ kfree(request);
+ return -EFAULT;
+ }
+
+ request->len = cpu_to_le32(len);
+
+ retval = gb_operation_sync(connection, GB_RAW_TYPE_SEND,
+ request, len + sizeof(*request),
+ NULL, 0);
+
+ kfree(request);
+ return retval;
+}
+
+static int gb_raw_probe(struct gb_bundle *bundle,
+ const struct greybus_bundle_id *id)
+{
+ struct greybus_descriptor_cport *cport_desc;
+ struct gb_connection *connection;
+ struct gb_raw *raw;
+ int retval;
+ int minor;
+
+ if (bundle->num_cports != 1)
+ return -ENODEV;
+
+ cport_desc = &bundle->cport_desc[0];
+ if (cport_desc->protocol_id != GREYBUS_PROTOCOL_RAW)
+ return -ENODEV;
+
+ raw = kzalloc(sizeof(*raw), GFP_KERNEL);
+ if (!raw)
+ return -ENOMEM;
+
+ connection = gb_connection_create(bundle, le16_to_cpu(cport_desc->id),
+ gb_raw_request_handler);
+ if (IS_ERR(connection)) {
+ retval = PTR_ERR(connection);
+ goto error_free;
+ }
+
+ INIT_LIST_HEAD(&raw->list);
+ mutex_init(&raw->list_lock);
+
+ raw->connection = connection;
+ greybus_set_drvdata(bundle, raw);
+
+ minor = ida_alloc(&minors, GFP_KERNEL);
+ if (minor < 0) {
+ retval = minor;
+ goto error_connection_destroy;
+ }
+
+ raw->dev = MKDEV(raw_major, minor);
+ cdev_init(&raw->cdev, &raw_fops);
+
+ retval = gb_connection_enable(connection);
+ if (retval)
+ goto error_remove_ida;
+
+ retval = cdev_add(&raw->cdev, raw->dev, 1);
+ if (retval)
+ goto error_connection_disable;
+
+ raw->device = device_create(&raw_class, &connection->bundle->dev,
+ raw->dev, raw, "gb!raw%d", minor);
+ if (IS_ERR(raw->device)) {
+ retval = PTR_ERR(raw->device);
+ goto error_del_cdev;
+ }
+
+ return 0;
+
+error_del_cdev:
+ cdev_del(&raw->cdev);
+
+error_connection_disable:
+ gb_connection_disable(connection);
+
+error_remove_ida:
+ ida_free(&minors, minor);
+
+error_connection_destroy:
+ gb_connection_destroy(connection);
+
+error_free:
+ kfree(raw);
+ return retval;
+}
+
+static void gb_raw_disconnect(struct gb_bundle *bundle)
+{
+ struct gb_raw *raw = greybus_get_drvdata(bundle);
+ struct gb_connection *connection = raw->connection;
+ struct raw_data *raw_data;
+ struct raw_data *temp;
+
+ // FIXME - handle removing a connection when the char device node is open.
+ device_destroy(&raw_class, raw->dev);
+ cdev_del(&raw->cdev);
+ gb_connection_disable(connection);
+ ida_free(&minors, MINOR(raw->dev));
+ gb_connection_destroy(connection);
+
+ mutex_lock(&raw->list_lock);
+ list_for_each_entry_safe(raw_data, temp, &raw->list, entry) {
+ list_del(&raw_data->entry);
+ kfree(raw_data);
+ }
+ mutex_unlock(&raw->list_lock);
+
+ kfree(raw);
+}
+
+/*
+ * Character device node interfaces.
+ *
+ * Note, we are using read/write to only allow a single read/write per message.
+ * This means for read(), you have to provide a big enough buffer for the full
+ * message to be copied into. If the buffer isn't big enough, the read() will
+ * fail with -ENOSPC.
+ */
+
+static int raw_open(struct inode *inode, struct file *file)
+{
+ struct cdev *cdev = inode->i_cdev;
+ struct gb_raw *raw = container_of(cdev, struct gb_raw, cdev);
+
+ file->private_data = raw;
+ return 0;
+}
+
+static ssize_t raw_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct gb_raw *raw = file->private_data;
+ int retval;
+
+ if (!count)
+ return 0;
+
+ if (count > MAX_PACKET_SIZE)
+ return -E2BIG;
+
+ retval = gb_raw_send(raw, count, buf);
+ if (retval)
+ return retval;
+
+ return count;
+}
+
+static ssize_t raw_read(struct file *file, char __user *buf, size_t count,
+ loff_t *ppos)
+{
+ struct gb_raw *raw = file->private_data;
+ int retval = 0;
+ struct raw_data *raw_data;
+
+ mutex_lock(&raw->list_lock);
+ if (list_empty(&raw->list))
+ goto exit;
+
+ raw_data = list_first_entry(&raw->list, struct raw_data, entry);
+ if (raw_data->len > count) {
+ retval = -ENOSPC;
+ goto exit;
+ }
+
+ if (copy_to_user(buf, &raw_data->data[0], raw_data->len)) {
+ retval = -EFAULT;
+ goto exit;
+ }
+
+ list_del(&raw_data->entry);
+ raw->list_data -= raw_data->len;
+ retval = raw_data->len;
+ kfree(raw_data);
+
+exit:
+ mutex_unlock(&raw->list_lock);
+ return retval;
+}
+
+static const struct file_operations raw_fops = {
+ .owner = THIS_MODULE,
+ .write = raw_write,
+ .read = raw_read,
+ .open = raw_open,
+ .llseek = noop_llseek,
+};
+
+static const struct greybus_bundle_id gb_raw_id_table[] = {
+ { GREYBUS_DEVICE_CLASS(GREYBUS_CLASS_RAW) },
+ { }
+};
+MODULE_DEVICE_TABLE(greybus, gb_raw_id_table);
+
+static struct greybus_driver gb_raw_driver = {
+ .name = "raw",
+ .probe = gb_raw_probe,
+ .disconnect = gb_raw_disconnect,
+ .id_table = gb_raw_id_table,
+};
+
+static int raw_init(void)
+{
+ dev_t dev;
+ int retval;
+
+ retval = class_register(&raw_class);
+ if (retval)
+ goto error_class;
+
+ retval = alloc_chrdev_region(&dev, 0, NUM_MINORS, "gb_raw");
+ if (retval < 0)
+ goto error_chrdev;
+
+ raw_major = MAJOR(dev);
+
+ retval = greybus_register(&gb_raw_driver);
+ if (retval)
+ goto error_gb;
+
+ return 0;
+
+error_gb:
+ unregister_chrdev_region(dev, NUM_MINORS);
+error_chrdev:
+ class_unregister(&raw_class);
+error_class:
+ return retval;
+}
+module_init(raw_init);
+
+static void __exit raw_exit(void)
+{
+ greybus_deregister(&gb_raw_driver);
+ unregister_chrdev_region(MKDEV(raw_major, 0), NUM_MINORS);
+ class_unregister(&raw_class);
+ ida_destroy(&minors);
+}
+module_exit(raw_exit);
+
+MODULE_DESCRIPTION("Greybus driver for the Raw protocol");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/greybus/sdio.c b/drivers/staging/greybus/sdio.c
new file mode 100644
index 000000000000..5326ea372b24
--- /dev/null
+++ b/drivers/staging/greybus/sdio.c
@@ -0,0 +1,884 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * SD/MMC Greybus driver.
+ *
+ * Copyright 2014-2015 Google Inc.
+ * Copyright 2014-2015 Linaro Ltd.
+ */
+
+#include <linux/kernel.h>
+#include <linux/mmc/core.h>
+#include <linux/mmc/host.h>
+#include <linux/mmc/mmc.h>
+#include <linux/scatterlist.h>
+#include <linux/workqueue.h>
+#include <linux/greybus.h>
+
+#include "gbphy.h"
+
+struct gb_sdio_host {
+ struct gb_connection *connection;
+ struct gbphy_device *gbphy_dev;
+ struct mmc_host *mmc;
+ struct mmc_request *mrq;
+ struct mutex lock; /* lock for this host */
+ size_t data_max;
+ spinlock_t xfer; /* lock to cancel ongoing transfer */
+ bool xfer_stop;
+ struct workqueue_struct *mrq_workqueue;
+ struct work_struct mrqwork;
+ u8 queued_events;
+ bool removed;
+ bool card_present;
+ bool read_only;
+};
+
+#define GB_SDIO_RSP_R1_R5_R6_R7 (GB_SDIO_RSP_PRESENT | GB_SDIO_RSP_CRC | \
+ GB_SDIO_RSP_OPCODE)
+#define GB_SDIO_RSP_R3_R4 (GB_SDIO_RSP_PRESENT)
+#define GB_SDIO_RSP_R2 (GB_SDIO_RSP_PRESENT | GB_SDIO_RSP_CRC | \
+ GB_SDIO_RSP_136)
+#define GB_SDIO_RSP_R1B (GB_SDIO_RSP_PRESENT | GB_SDIO_RSP_CRC | \
+ GB_SDIO_RSP_OPCODE | GB_SDIO_RSP_BUSY)
+
+/* kernel vdd starts at 0x80 and we need to translate to greybus ones 0x01 */
+#define GB_SDIO_VDD_SHIFT 8
+
+#ifndef MMC_CAP2_CORE_RUNTIME_PM
+#define MMC_CAP2_CORE_RUNTIME_PM 0
+#endif
+
+static inline bool single_op(struct mmc_command *cmd)
+{
+ u32 opcode = cmd->opcode;
+
+ return opcode == MMC_WRITE_BLOCK ||
+ opcode == MMC_READ_SINGLE_BLOCK;
+}
+
+static void _gb_sdio_set_host_caps(struct gb_sdio_host *host, u32 r)
+{
+ u32 caps = 0;
+ u32 caps2 = 0;
+
+ caps = ((r & GB_SDIO_CAP_NONREMOVABLE) ? MMC_CAP_NONREMOVABLE : 0) |
+ ((r & GB_SDIO_CAP_4_BIT_DATA) ? MMC_CAP_4_BIT_DATA : 0) |
+ ((r & GB_SDIO_CAP_8_BIT_DATA) ? MMC_CAP_8_BIT_DATA : 0) |
+ ((r & GB_SDIO_CAP_MMC_HS) ? MMC_CAP_MMC_HIGHSPEED : 0) |
+ ((r & GB_SDIO_CAP_SD_HS) ? MMC_CAP_SD_HIGHSPEED : 0) |
+ ((r & GB_SDIO_CAP_1_2V_DDR) ? MMC_CAP_1_2V_DDR : 0) |
+ ((r & GB_SDIO_CAP_1_8V_DDR) ? MMC_CAP_1_8V_DDR : 0) |
+ ((r & GB_SDIO_CAP_POWER_OFF_CARD) ? MMC_CAP_POWER_OFF_CARD : 0) |
+ ((r & GB_SDIO_CAP_UHS_SDR12) ? MMC_CAP_UHS_SDR12 : 0) |
+ ((r & GB_SDIO_CAP_UHS_SDR25) ? MMC_CAP_UHS_SDR25 : 0) |
+ ((r & GB_SDIO_CAP_UHS_SDR50) ? MMC_CAP_UHS_SDR50 : 0) |
+ ((r & GB_SDIO_CAP_UHS_SDR104) ? MMC_CAP_UHS_SDR104 : 0) |
+ ((r & GB_SDIO_CAP_UHS_DDR50) ? MMC_CAP_UHS_DDR50 : 0) |
+ ((r & GB_SDIO_CAP_DRIVER_TYPE_A) ? MMC_CAP_DRIVER_TYPE_A : 0) |
+ ((r & GB_SDIO_CAP_DRIVER_TYPE_C) ? MMC_CAP_DRIVER_TYPE_C : 0) |
+ ((r & GB_SDIO_CAP_DRIVER_TYPE_D) ? MMC_CAP_DRIVER_TYPE_D : 0);
+
+ caps2 = ((r & GB_SDIO_CAP_HS200_1_2V) ? MMC_CAP2_HS200_1_2V_SDR : 0) |
+ ((r & GB_SDIO_CAP_HS400_1_2V) ? MMC_CAP2_HS400_1_2V : 0) |
+ ((r & GB_SDIO_CAP_HS400_1_8V) ? MMC_CAP2_HS400_1_8V : 0) |
+ ((r & GB_SDIO_CAP_HS200_1_8V) ? MMC_CAP2_HS200_1_8V_SDR : 0);
+
+ host->mmc->caps = caps;
+ host->mmc->caps2 = caps2 | MMC_CAP2_CORE_RUNTIME_PM;
+
+ if (caps & MMC_CAP_NONREMOVABLE)
+ host->card_present = true;
+}
+
+static u32 _gb_sdio_get_host_ocr(u32 ocr)
+{
+ return (((ocr & GB_SDIO_VDD_165_195) ? MMC_VDD_165_195 : 0) |
+ ((ocr & GB_SDIO_VDD_20_21) ? MMC_VDD_20_21 : 0) |
+ ((ocr & GB_SDIO_VDD_21_22) ? MMC_VDD_21_22 : 0) |
+ ((ocr & GB_SDIO_VDD_22_23) ? MMC_VDD_22_23 : 0) |
+ ((ocr & GB_SDIO_VDD_23_24) ? MMC_VDD_23_24 : 0) |
+ ((ocr & GB_SDIO_VDD_24_25) ? MMC_VDD_24_25 : 0) |
+ ((ocr & GB_SDIO_VDD_25_26) ? MMC_VDD_25_26 : 0) |
+ ((ocr & GB_SDIO_VDD_26_27) ? MMC_VDD_26_27 : 0) |
+ ((ocr & GB_SDIO_VDD_27_28) ? MMC_VDD_27_28 : 0) |
+ ((ocr & GB_SDIO_VDD_28_29) ? MMC_VDD_28_29 : 0) |
+ ((ocr & GB_SDIO_VDD_29_30) ? MMC_VDD_29_30 : 0) |
+ ((ocr & GB_SDIO_VDD_30_31) ? MMC_VDD_30_31 : 0) |
+ ((ocr & GB_SDIO_VDD_31_32) ? MMC_VDD_31_32 : 0) |
+ ((ocr & GB_SDIO_VDD_32_33) ? MMC_VDD_32_33 : 0) |
+ ((ocr & GB_SDIO_VDD_33_34) ? MMC_VDD_33_34 : 0) |
+ ((ocr & GB_SDIO_VDD_34_35) ? MMC_VDD_34_35 : 0) |
+ ((ocr & GB_SDIO_VDD_35_36) ? MMC_VDD_35_36 : 0)
+ );
+}
+
+static int gb_sdio_get_caps(struct gb_sdio_host *host)
+{
+ struct gb_sdio_get_caps_response response;
+ struct mmc_host *mmc = host->mmc;
+ u16 data_max;
+ u32 blksz;
+ u32 ocr;
+ u32 r;
+ int ret;
+
+ ret = gb_operation_sync(host->connection, GB_SDIO_TYPE_GET_CAPABILITIES,
+ NULL, 0, &response, sizeof(response));
+ if (ret < 0)
+ return ret;
+ r = le32_to_cpu(response.caps);
+
+ _gb_sdio_set_host_caps(host, r);
+
+ /* get the max block size that could fit our payload */
+ data_max = gb_operation_get_payload_size_max(host->connection);
+ data_max = min(data_max - sizeof(struct gb_sdio_transfer_request),
+ data_max - sizeof(struct gb_sdio_transfer_response));
+
+ blksz = min_t(u16, le16_to_cpu(response.max_blk_size), data_max);
+ blksz = max_t(u32, 512, blksz);
+
+ mmc->max_blk_size = rounddown_pow_of_two(blksz);
+ mmc->max_blk_count = le16_to_cpu(response.max_blk_count);
+ host->data_max = data_max;
+
+ /* get ocr supported values */
+ ocr = _gb_sdio_get_host_ocr(le32_to_cpu(response.ocr));
+ mmc->ocr_avail = ocr;
+ mmc->ocr_avail_sdio = mmc->ocr_avail;
+ mmc->ocr_avail_sd = mmc->ocr_avail;
+ mmc->ocr_avail_mmc = mmc->ocr_avail;
+
+ /* get frequency range values */
+ mmc->f_min = le32_to_cpu(response.f_min);
+ mmc->f_max = le32_to_cpu(response.f_max);
+
+ return 0;
+}
+
+static void _gb_queue_event(struct gb_sdio_host *host, u8 event)
+{
+ if (event & GB_SDIO_CARD_INSERTED)
+ host->queued_events &= ~GB_SDIO_CARD_REMOVED;
+ else if (event & GB_SDIO_CARD_REMOVED)
+ host->queued_events &= ~GB_SDIO_CARD_INSERTED;
+
+ host->queued_events |= event;
+}
+
+static int _gb_sdio_process_events(struct gb_sdio_host *host, u8 event)
+{
+ u8 state_changed = 0;
+
+ if (event & GB_SDIO_CARD_INSERTED) {
+ if (host->mmc->caps & MMC_CAP_NONREMOVABLE)
+ return 0;
+ if (host->card_present)
+ return 0;
+ host->card_present = true;
+ state_changed = 1;
+ }
+
+ if (event & GB_SDIO_CARD_REMOVED) {
+ if (host->mmc->caps & MMC_CAP_NONREMOVABLE)
+ return 0;
+ if (!(host->card_present))
+ return 0;
+ host->card_present = false;
+ state_changed = 1;
+ }
+
+ if (event & GB_SDIO_WP)
+ host->read_only = true;
+
+ if (state_changed) {
+ dev_info(mmc_dev(host->mmc), "card %s now event\n",
+ (host->card_present ? "inserted" : "removed"));
+ mmc_detect_change(host->mmc, 0);
+ }
+
+ return 0;
+}
+
+static int gb_sdio_request_handler(struct gb_operation *op)
+{
+ struct gb_sdio_host *host = gb_connection_get_data(op->connection);
+ struct gb_message *request;
+ struct gb_sdio_event_request *payload;
+ u8 type = op->type;
+ int ret = 0;
+ u8 event;
+
+ if (type != GB_SDIO_TYPE_EVENT) {
+ dev_err(mmc_dev(host->mmc),
+ "unsupported unsolicited event: %u\n", type);
+ return -EINVAL;
+ }
+
+ request = op->request;
+
+ if (request->payload_size < sizeof(*payload)) {
+ dev_err(mmc_dev(host->mmc), "wrong event size received (%zu < %zu)\n",
+ request->payload_size, sizeof(*payload));
+ return -EINVAL;
+ }
+
+ payload = request->payload;
+ event = payload->event;
+
+ if (host->removed)
+ _gb_queue_event(host, event);
+ else
+ ret = _gb_sdio_process_events(host, event);
+
+ return ret;
+}
+
+static int gb_sdio_set_ios(struct gb_sdio_host *host,
+ struct gb_sdio_set_ios_request *request)
+{
+ int ret;
+
+ ret = gbphy_runtime_get_sync(host->gbphy_dev);
+ if (ret)
+ return ret;
+
+ ret = gb_operation_sync(host->connection, GB_SDIO_TYPE_SET_IOS, request,
+ sizeof(*request), NULL, 0);
+
+ gbphy_runtime_put_autosuspend(host->gbphy_dev);
+
+ return ret;
+}
+
+static int _gb_sdio_send(struct gb_sdio_host *host, struct mmc_data *data,
+ size_t len, u16 nblocks, off_t skip)
+{
+ struct gb_sdio_transfer_request *request;
+ struct gb_sdio_transfer_response *response;
+ struct gb_operation *operation;
+ struct scatterlist *sg = data->sg;
+ unsigned int sg_len = data->sg_len;
+ size_t copied;
+ u16 send_blksz;
+ u16 send_blocks;
+ int ret;
+
+ WARN_ON(len > host->data_max);
+
+ operation = gb_operation_create(host->connection, GB_SDIO_TYPE_TRANSFER,
+ len + sizeof(*request),
+ sizeof(*response), GFP_KERNEL);
+ if (!operation)
+ return -ENOMEM;
+
+ request = operation->request->payload;
+ request->data_flags = data->flags >> 8;
+ request->data_blocks = cpu_to_le16(nblocks);
+ request->data_blksz = cpu_to_le16(data->blksz);
+
+ copied = sg_pcopy_to_buffer(sg, sg_len, &request->data[0], len, skip);
+
+ if (copied != len) {
+ ret = -EINVAL;
+ goto err_put_operation;
+ }
+
+ ret = gb_operation_request_send_sync(operation);
+ if (ret < 0)
+ goto err_put_operation;
+
+ response = operation->response->payload;
+
+ send_blocks = le16_to_cpu(response->data_blocks);
+ send_blksz = le16_to_cpu(response->data_blksz);
+
+ if (len != send_blksz * send_blocks) {
+ dev_err(mmc_dev(host->mmc), "send: size received: %zu != %d\n",
+ len, send_blksz * send_blocks);
+ ret = -EINVAL;
+ }
+
+err_put_operation:
+ gb_operation_put(operation);
+
+ return ret;
+}
+
+static int _gb_sdio_recv(struct gb_sdio_host *host, struct mmc_data *data,
+ size_t len, u16 nblocks, off_t skip)
+{
+ struct gb_sdio_transfer_request *request;
+ struct gb_sdio_transfer_response *response;
+ struct gb_operation *operation;
+ struct scatterlist *sg = data->sg;
+ unsigned int sg_len = data->sg_len;
+ size_t copied;
+ u16 recv_blksz;
+ u16 recv_blocks;
+ int ret;
+
+ WARN_ON(len > host->data_max);
+
+ operation = gb_operation_create(host->connection, GB_SDIO_TYPE_TRANSFER,
+ sizeof(*request),
+ len + sizeof(*response), GFP_KERNEL);
+ if (!operation)
+ return -ENOMEM;
+
+ request = operation->request->payload;
+ request->data_flags = data->flags >> 8;
+ request->data_blocks = cpu_to_le16(nblocks);
+ request->data_blksz = cpu_to_le16(data->blksz);
+
+ ret = gb_operation_request_send_sync(operation);
+ if (ret < 0)
+ goto err_put_operation;
+
+ response = operation->response->payload;
+ recv_blocks = le16_to_cpu(response->data_blocks);
+ recv_blksz = le16_to_cpu(response->data_blksz);
+
+ if (len != recv_blksz * recv_blocks) {
+ dev_err(mmc_dev(host->mmc), "recv: size received: %d != %zu\n",
+ recv_blksz * recv_blocks, len);
+ ret = -EINVAL;
+ goto err_put_operation;
+ }
+
+ copied = sg_pcopy_from_buffer(sg, sg_len, &response->data[0], len,
+ skip);
+ if (copied != len)
+ ret = -EINVAL;
+
+err_put_operation:
+ gb_operation_put(operation);
+
+ return ret;
+}
+
+static int gb_sdio_transfer(struct gb_sdio_host *host, struct mmc_data *data)
+{
+ size_t left, len;
+ off_t skip = 0;
+ int ret = 0;
+ u16 nblocks;
+
+ if (single_op(data->mrq->cmd) && data->blocks > 1) {
+ ret = -ETIMEDOUT;
+ goto out;
+ }
+
+ left = data->blksz * data->blocks;
+
+ while (left) {
+ /* check is a stop transmission is pending */
+ spin_lock(&host->xfer);
+ if (host->xfer_stop) {
+ host->xfer_stop = false;
+ spin_unlock(&host->xfer);
+ ret = -EINTR;
+ goto out;
+ }
+ spin_unlock(&host->xfer);
+ len = min(left, host->data_max);
+ nblocks = len / data->blksz;
+ len = nblocks * data->blksz;
+
+ if (data->flags & MMC_DATA_READ) {
+ ret = _gb_sdio_recv(host, data, len, nblocks, skip);
+ if (ret < 0)
+ goto out;
+ } else {
+ ret = _gb_sdio_send(host, data, len, nblocks, skip);
+ if (ret < 0)
+ goto out;
+ }
+ data->bytes_xfered += len;
+ left -= len;
+ skip += len;
+ }
+
+out:
+ data->error = ret;
+ return ret;
+}
+
+static int gb_sdio_command(struct gb_sdio_host *host, struct mmc_command *cmd)
+{
+ struct gb_sdio_command_request request = {0};
+ struct gb_sdio_command_response response;
+ struct mmc_data *data = host->mrq->data;
+ unsigned int timeout_ms;
+ u8 cmd_flags;
+ u8 cmd_type;
+ int i;
+ int ret;
+
+ switch (mmc_resp_type(cmd)) {
+ case MMC_RSP_NONE:
+ cmd_flags = GB_SDIO_RSP_NONE;
+ break;
+ case MMC_RSP_R1:
+ cmd_flags = GB_SDIO_RSP_R1_R5_R6_R7;
+ break;
+ case MMC_RSP_R1B:
+ cmd_flags = GB_SDIO_RSP_R1B;
+ break;
+ case MMC_RSP_R2:
+ cmd_flags = GB_SDIO_RSP_R2;
+ break;
+ case MMC_RSP_R3:
+ cmd_flags = GB_SDIO_RSP_R3_R4;
+ break;
+ default:
+ dev_err(mmc_dev(host->mmc), "cmd flag invalid 0x%04x\n",
+ mmc_resp_type(cmd));
+ ret = -EINVAL;
+ goto out;
+ }
+
+ switch (mmc_cmd_type(cmd)) {
+ case MMC_CMD_BC:
+ cmd_type = GB_SDIO_CMD_BC;
+ break;
+ case MMC_CMD_BCR:
+ cmd_type = GB_SDIO_CMD_BCR;
+ break;
+ case MMC_CMD_AC:
+ cmd_type = GB_SDIO_CMD_AC;
+ break;
+ case MMC_CMD_ADTC:
+ cmd_type = GB_SDIO_CMD_ADTC;
+ break;
+ default:
+ dev_err(mmc_dev(host->mmc), "cmd type invalid 0x%04x\n",
+ mmc_cmd_type(cmd));
+ ret = -EINVAL;
+ goto out;
+ }
+
+ request.cmd = cmd->opcode;
+ request.cmd_flags = cmd_flags;
+ request.cmd_type = cmd_type;
+ request.cmd_arg = cpu_to_le32(cmd->arg);
+ /* some controllers need to know at command time data details */
+ if (data) {
+ request.data_blocks = cpu_to_le16(data->blocks);
+ request.data_blksz = cpu_to_le16(data->blksz);
+ }
+
+ timeout_ms = cmd->busy_timeout ? cmd->busy_timeout :
+ GB_OPERATION_TIMEOUT_DEFAULT;
+
+ ret = gb_operation_sync_timeout(host->connection, GB_SDIO_TYPE_COMMAND,
+ &request, sizeof(request), &response,
+ sizeof(response), timeout_ms);
+ if (ret < 0)
+ goto out;
+
+ /* no response expected */
+ if (cmd_flags == GB_SDIO_RSP_NONE)
+ goto out;
+
+ /* long response expected */
+ if (cmd_flags & GB_SDIO_RSP_R2)
+ for (i = 0; i < 4; i++)
+ cmd->resp[i] = le32_to_cpu(response.resp[i]);
+ else
+ cmd->resp[0] = le32_to_cpu(response.resp[0]);
+
+out:
+ cmd->error = ret;
+ return ret;
+}
+
+static void gb_sdio_mrq_work(struct work_struct *work)
+{
+ struct gb_sdio_host *host;
+ struct mmc_request *mrq;
+ int ret;
+
+ host = container_of(work, struct gb_sdio_host, mrqwork);
+
+ ret = gbphy_runtime_get_sync(host->gbphy_dev);
+ if (ret)
+ return;
+
+ mutex_lock(&host->lock);
+ mrq = host->mrq;
+ if (!mrq) {
+ mutex_unlock(&host->lock);
+ gbphy_runtime_put_autosuspend(host->gbphy_dev);
+ dev_err(mmc_dev(host->mmc), "mmc request is NULL");
+ return;
+ }
+
+ if (host->removed) {
+ mrq->cmd->error = -ESHUTDOWN;
+ goto done;
+ }
+
+ if (mrq->sbc) {
+ ret = gb_sdio_command(host, mrq->sbc);
+ if (ret < 0)
+ goto done;
+ }
+
+ ret = gb_sdio_command(host, mrq->cmd);
+ if (ret < 0)
+ goto done;
+
+ if (mrq->data) {
+ ret = gb_sdio_transfer(host, mrq->data);
+ if (ret < 0)
+ goto done;
+ }
+
+ if (mrq->stop) {
+ ret = gb_sdio_command(host, mrq->stop);
+ if (ret < 0)
+ goto done;
+ }
+
+done:
+ host->mrq = NULL;
+ mutex_unlock(&host->lock);
+ mmc_request_done(host->mmc, mrq);
+ gbphy_runtime_put_autosuspend(host->gbphy_dev);
+}
+
+static void gb_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
+{
+ struct gb_sdio_host *host = mmc_priv(mmc);
+ struct mmc_command *cmd = mrq->cmd;
+
+ /* Check if it is a cancel to ongoing transfer */
+ if (cmd->opcode == MMC_STOP_TRANSMISSION) {
+ spin_lock(&host->xfer);
+ host->xfer_stop = true;
+ spin_unlock(&host->xfer);
+ }
+
+ mutex_lock(&host->lock);
+
+ WARN_ON(host->mrq);
+ host->mrq = mrq;
+
+ if (host->removed) {
+ mrq->cmd->error = -ESHUTDOWN;
+ goto out;
+ }
+ if (!host->card_present) {
+ mrq->cmd->error = -ENOMEDIUM;
+ goto out;
+ }
+
+ queue_work(host->mrq_workqueue, &host->mrqwork);
+
+ mutex_unlock(&host->lock);
+ return;
+
+out:
+ host->mrq = NULL;
+ mutex_unlock(&host->lock);
+ mmc_request_done(mmc, mrq);
+}
+
+static void gb_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
+{
+ struct gb_sdio_host *host = mmc_priv(mmc);
+ struct gb_sdio_set_ios_request request;
+ int ret;
+ u8 power_mode;
+ u8 bus_width;
+ u8 timing;
+ u8 signal_voltage;
+ u8 drv_type;
+ u32 vdd = 0;
+
+ mutex_lock(&host->lock);
+ request.clock = cpu_to_le32(ios->clock);
+
+ if (ios->vdd)
+ vdd = 1 << (ios->vdd - GB_SDIO_VDD_SHIFT);
+ request.vdd = cpu_to_le32(vdd);
+
+ request.bus_mode = ios->bus_mode == MMC_BUSMODE_OPENDRAIN ?
+ GB_SDIO_BUSMODE_OPENDRAIN :
+ GB_SDIO_BUSMODE_PUSHPULL;
+
+ switch (ios->power_mode) {
+ case MMC_POWER_OFF:
+ default:
+ power_mode = GB_SDIO_POWER_OFF;
+ break;
+ case MMC_POWER_UP:
+ power_mode = GB_SDIO_POWER_UP;
+ break;
+ case MMC_POWER_ON:
+ power_mode = GB_SDIO_POWER_ON;
+ break;
+ case MMC_POWER_UNDEFINED:
+ power_mode = GB_SDIO_POWER_UNDEFINED;
+ break;
+ }
+ request.power_mode = power_mode;
+
+ switch (ios->bus_width) {
+ case MMC_BUS_WIDTH_1:
+ bus_width = GB_SDIO_BUS_WIDTH_1;
+ break;
+ case MMC_BUS_WIDTH_4:
+ default:
+ bus_width = GB_SDIO_BUS_WIDTH_4;
+ break;
+ case MMC_BUS_WIDTH_8:
+ bus_width = GB_SDIO_BUS_WIDTH_8;
+ break;
+ }
+ request.bus_width = bus_width;
+
+ switch (ios->timing) {
+ case MMC_TIMING_LEGACY:
+ default:
+ timing = GB_SDIO_TIMING_LEGACY;
+ break;
+ case MMC_TIMING_MMC_HS:
+ timing = GB_SDIO_TIMING_MMC_HS;
+ break;
+ case MMC_TIMING_SD_HS:
+ timing = GB_SDIO_TIMING_SD_HS;
+ break;
+ case MMC_TIMING_UHS_SDR12:
+ timing = GB_SDIO_TIMING_UHS_SDR12;
+ break;
+ case MMC_TIMING_UHS_SDR25:
+ timing = GB_SDIO_TIMING_UHS_SDR25;
+ break;
+ case MMC_TIMING_UHS_SDR50:
+ timing = GB_SDIO_TIMING_UHS_SDR50;
+ break;
+ case MMC_TIMING_UHS_SDR104:
+ timing = GB_SDIO_TIMING_UHS_SDR104;
+ break;
+ case MMC_TIMING_UHS_DDR50:
+ timing = GB_SDIO_TIMING_UHS_DDR50;
+ break;
+ case MMC_TIMING_MMC_DDR52:
+ timing = GB_SDIO_TIMING_MMC_DDR52;
+ break;
+ case MMC_TIMING_MMC_HS200:
+ timing = GB_SDIO_TIMING_MMC_HS200;
+ break;
+ case MMC_TIMING_MMC_HS400:
+ timing = GB_SDIO_TIMING_MMC_HS400;
+ break;
+ }
+ request.timing = timing;
+
+ switch (ios->signal_voltage) {
+ case MMC_SIGNAL_VOLTAGE_330:
+ signal_voltage = GB_SDIO_SIGNAL_VOLTAGE_330;
+ break;
+ case MMC_SIGNAL_VOLTAGE_180:
+ default:
+ signal_voltage = GB_SDIO_SIGNAL_VOLTAGE_180;
+ break;
+ case MMC_SIGNAL_VOLTAGE_120:
+ signal_voltage = GB_SDIO_SIGNAL_VOLTAGE_120;
+ break;
+ }
+ request.signal_voltage = signal_voltage;
+
+ switch (ios->drv_type) {
+ case MMC_SET_DRIVER_TYPE_A:
+ drv_type = GB_SDIO_SET_DRIVER_TYPE_A;
+ break;
+ case MMC_SET_DRIVER_TYPE_C:
+ drv_type = GB_SDIO_SET_DRIVER_TYPE_C;
+ break;
+ case MMC_SET_DRIVER_TYPE_D:
+ drv_type = GB_SDIO_SET_DRIVER_TYPE_D;
+ break;
+ case MMC_SET_DRIVER_TYPE_B:
+ default:
+ drv_type = GB_SDIO_SET_DRIVER_TYPE_B;
+ break;
+ }
+ request.drv_type = drv_type;
+
+ ret = gb_sdio_set_ios(host, &request);
+ if (ret < 0)
+ goto out;
+
+ memcpy(&mmc->ios, ios, sizeof(mmc->ios));
+
+out:
+ mutex_unlock(&host->lock);
+}
+
+static int gb_mmc_get_ro(struct mmc_host *mmc)
+{
+ struct gb_sdio_host *host = mmc_priv(mmc);
+
+ mutex_lock(&host->lock);
+ if (host->removed) {
+ mutex_unlock(&host->lock);
+ return -ESHUTDOWN;
+ }
+ mutex_unlock(&host->lock);
+
+ return host->read_only;
+}
+
+static int gb_mmc_get_cd(struct mmc_host *mmc)
+{
+ struct gb_sdio_host *host = mmc_priv(mmc);
+
+ mutex_lock(&host->lock);
+ if (host->removed) {
+ mutex_unlock(&host->lock);
+ return -ESHUTDOWN;
+ }
+ mutex_unlock(&host->lock);
+
+ return host->card_present;
+}
+
+static int gb_mmc_switch_voltage(struct mmc_host *mmc, struct mmc_ios *ios)
+{
+ return 0;
+}
+
+static const struct mmc_host_ops gb_sdio_ops = {
+ .request = gb_mmc_request,
+ .set_ios = gb_mmc_set_ios,
+ .get_ro = gb_mmc_get_ro,
+ .get_cd = gb_mmc_get_cd,
+ .start_signal_voltage_switch = gb_mmc_switch_voltage,
+};
+
+static int gb_sdio_probe(struct gbphy_device *gbphy_dev,
+ const struct gbphy_device_id *id)
+{
+ struct gb_connection *connection;
+ struct mmc_host *mmc;
+ struct gb_sdio_host *host;
+ int ret = 0;
+
+ mmc = mmc_alloc_host(sizeof(*host), &gbphy_dev->dev);
+ if (!mmc)
+ return -ENOMEM;
+
+ connection = gb_connection_create(gbphy_dev->bundle,
+ le16_to_cpu(gbphy_dev->cport_desc->id),
+ gb_sdio_request_handler);
+ if (IS_ERR(connection)) {
+ ret = PTR_ERR(connection);
+ goto exit_mmc_free;
+ }
+
+ host = mmc_priv(mmc);
+ host->mmc = mmc;
+ host->removed = true;
+
+ host->connection = connection;
+ gb_connection_set_data(connection, host);
+ host->gbphy_dev = gbphy_dev;
+ gb_gbphy_set_data(gbphy_dev, host);
+
+ ret = gb_connection_enable_tx(connection);
+ if (ret)
+ goto exit_connection_destroy;
+
+ ret = gb_sdio_get_caps(host);
+ if (ret < 0)
+ goto exit_connection_disable;
+
+ mmc->ops = &gb_sdio_ops;
+
+ mmc->max_segs = host->mmc->max_blk_count;
+
+ /* for now we make a map 1:1 between max request and segment size */
+ mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
+ mmc->max_seg_size = mmc->max_req_size;
+
+ mutex_init(&host->lock);
+ spin_lock_init(&host->xfer);
+ host->mrq_workqueue = alloc_workqueue("mmc-%s", 0, 1,
+ dev_name(&gbphy_dev->dev));
+ if (!host->mrq_workqueue) {
+ ret = -ENOMEM;
+ goto exit_connection_disable;
+ }
+ INIT_WORK(&host->mrqwork, gb_sdio_mrq_work);
+
+ ret = gb_connection_enable(connection);
+ if (ret)
+ goto exit_wq_destroy;
+
+ ret = mmc_add_host(mmc);
+ if (ret < 0)
+ goto exit_wq_destroy;
+ host->removed = false;
+ ret = _gb_sdio_process_events(host, host->queued_events);
+ host->queued_events = 0;
+
+ gbphy_runtime_put_autosuspend(gbphy_dev);
+
+ return ret;
+
+exit_wq_destroy:
+ destroy_workqueue(host->mrq_workqueue);
+exit_connection_disable:
+ gb_connection_disable(connection);
+exit_connection_destroy:
+ gb_connection_destroy(connection);
+exit_mmc_free:
+ mmc_free_host(mmc);
+
+ return ret;
+}
+
+static void gb_sdio_remove(struct gbphy_device *gbphy_dev)
+{
+ struct gb_sdio_host *host = gb_gbphy_get_data(gbphy_dev);
+ struct gb_connection *connection = host->connection;
+ struct mmc_host *mmc;
+ int ret;
+
+ ret = gbphy_runtime_get_sync(gbphy_dev);
+ if (ret)
+ gbphy_runtime_get_noresume(gbphy_dev);
+
+ mutex_lock(&host->lock);
+ host->removed = true;
+ mmc = host->mmc;
+ gb_connection_set_data(connection, NULL);
+ mutex_unlock(&host->lock);
+
+ destroy_workqueue(host->mrq_workqueue);
+ gb_connection_disable_rx(connection);
+ mmc_remove_host(mmc);
+ gb_connection_disable(connection);
+ gb_connection_destroy(connection);
+ mmc_free_host(mmc);
+}
+
+static const struct gbphy_device_id gb_sdio_id_table[] = {
+ { GBPHY_PROTOCOL(GREYBUS_PROTOCOL_SDIO) },
+ { },
+};
+MODULE_DEVICE_TABLE(gbphy, gb_sdio_id_table);
+
+static struct gbphy_driver sdio_driver = {
+ .name = "sdio",
+ .probe = gb_sdio_probe,
+ .remove = gb_sdio_remove,
+ .id_table = gb_sdio_id_table,
+};
+
+module_gbphy_driver(sdio_driver);
+MODULE_DESCRIPTION("SD/MMC Greybus driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/greybus/spi.c b/drivers/staging/greybus/spi.c
new file mode 100644
index 000000000000..45ea8d1bdd51
--- /dev/null
+++ b/drivers/staging/greybus/spi.c
@@ -0,0 +1,79 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * SPI bridge PHY driver.
+ *
+ * Copyright 2014-2016 Google Inc.
+ * Copyright 2014-2016 Linaro Ltd.
+ */
+
+#include <linux/module.h>
+#include <linux/greybus.h>
+
+#include "gbphy.h"
+#include "spilib.h"
+
+static struct spilib_ops *spilib_ops;
+
+static int gb_spi_probe(struct gbphy_device *gbphy_dev,
+ const struct gbphy_device_id *id)
+{
+ struct gb_connection *connection;
+ int ret;
+
+ connection = gb_connection_create(gbphy_dev->bundle,
+ le16_to_cpu(gbphy_dev->cport_desc->id),
+ NULL);
+ if (IS_ERR(connection))
+ return PTR_ERR(connection);
+
+ ret = gb_connection_enable(connection);
+ if (ret)
+ goto exit_connection_destroy;
+
+ ret = gb_spilib_master_init(connection, &gbphy_dev->dev, spilib_ops);
+ if (ret)
+ goto exit_connection_disable;
+
+ gb_gbphy_set_data(gbphy_dev, connection);
+
+ gbphy_runtime_put_autosuspend(gbphy_dev);
+ return 0;
+
+exit_connection_disable:
+ gb_connection_disable(connection);
+exit_connection_destroy:
+ gb_connection_destroy(connection);
+
+ return ret;
+}
+
+static void gb_spi_remove(struct gbphy_device *gbphy_dev)
+{
+ struct gb_connection *connection = gb_gbphy_get_data(gbphy_dev);
+ int ret;
+
+ ret = gbphy_runtime_get_sync(gbphy_dev);
+ if (ret)
+ gbphy_runtime_get_noresume(gbphy_dev);
+
+ gb_spilib_master_exit(connection);
+ gb_connection_disable(connection);
+ gb_connection_destroy(connection);
+}
+
+static const struct gbphy_device_id gb_spi_id_table[] = {
+ { GBPHY_PROTOCOL(GREYBUS_PROTOCOL_SPI) },
+ { },
+};
+MODULE_DEVICE_TABLE(gbphy, gb_spi_id_table);
+
+static struct gbphy_driver spi_driver = {
+ .name = "spi",
+ .probe = gb_spi_probe,
+ .remove = gb_spi_remove,
+ .id_table = gb_spi_id_table,
+};
+
+module_gbphy_driver(spi_driver);
+MODULE_DESCRIPTION("Greybus SPI bridge PHY driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/greybus/spilib.c b/drivers/staging/greybus/spilib.c
new file mode 100644
index 000000000000..24e9c909fa02
--- /dev/null
+++ b/drivers/staging/greybus/spilib.c
@@ -0,0 +1,571 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Greybus SPI library
+ *
+ * Copyright 2014-2016 Google Inc.
+ * Copyright 2014-2016 Linaro Ltd.
+ */
+
+#include <linux/bitops.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/greybus.h>
+#include <linux/spi/spi.h>
+
+#include "spilib.h"
+
+struct gb_spilib {
+ struct gb_connection *connection;
+ struct device *parent;
+ struct spi_transfer *first_xfer;
+ struct spi_transfer *last_xfer;
+ struct spilib_ops *ops;
+ u32 rx_xfer_offset;
+ u32 tx_xfer_offset;
+ u32 last_xfer_size;
+ unsigned int op_timeout;
+ u16 mode;
+ u16 flags;
+ u32 bits_per_word_mask;
+ u8 num_chipselect;
+ u32 min_speed_hz;
+ u32 max_speed_hz;
+};
+
+#define GB_SPI_STATE_MSG_DONE ((void *)0)
+#define GB_SPI_STATE_MSG_IDLE ((void *)1)
+#define GB_SPI_STATE_MSG_RUNNING ((void *)2)
+#define GB_SPI_STATE_OP_READY ((void *)3)
+#define GB_SPI_STATE_OP_DONE ((void *)4)
+#define GB_SPI_STATE_MSG_ERROR ((void *)-1)
+
+#define XFER_TIMEOUT_TOLERANCE 200
+
+static struct spi_controller *get_controller_from_spi(struct gb_spilib *spi)
+{
+ return gb_connection_get_data(spi->connection);
+}
+
+static int tx_header_fit_operation(u32 tx_size, u32 count, size_t data_max)
+{
+ size_t headers_size;
+
+ data_max -= sizeof(struct gb_spi_transfer_request);
+ headers_size = (count + 1) * sizeof(struct gb_spi_transfer);
+
+ return tx_size + headers_size > data_max ? 0 : 1;
+}
+
+static size_t calc_rx_xfer_size(u32 rx_size, u32 *tx_xfer_size, u32 len,
+ size_t data_max)
+{
+ size_t rx_xfer_size;
+
+ data_max -= sizeof(struct gb_spi_transfer_response);
+
+ if (rx_size + len > data_max)
+ rx_xfer_size = data_max - rx_size;
+ else
+ rx_xfer_size = len;
+
+ /* if this is a write_read, for symmetry read the same as write */
+ if (*tx_xfer_size && rx_xfer_size > *tx_xfer_size)
+ rx_xfer_size = *tx_xfer_size;
+ if (*tx_xfer_size && rx_xfer_size < *tx_xfer_size)
+ *tx_xfer_size = rx_xfer_size;
+
+ return rx_xfer_size;
+}
+
+static size_t calc_tx_xfer_size(u32 tx_size, u32 count, size_t len,
+ size_t data_max)
+{
+ size_t headers_size;
+
+ data_max -= sizeof(struct gb_spi_transfer_request);
+ headers_size = (count + 1) * sizeof(struct gb_spi_transfer);
+
+ if (tx_size + headers_size + len > data_max)
+ return data_max - (tx_size + sizeof(struct gb_spi_transfer));
+
+ return len;
+}
+
+static void clean_xfer_state(struct gb_spilib *spi)
+{
+ spi->first_xfer = NULL;
+ spi->last_xfer = NULL;
+ spi->rx_xfer_offset = 0;
+ spi->tx_xfer_offset = 0;
+ spi->last_xfer_size = 0;
+ spi->op_timeout = 0;
+}
+
+static bool is_last_xfer_done(struct gb_spilib *spi)
+{
+ struct spi_transfer *last_xfer = spi->last_xfer;
+
+ if ((spi->tx_xfer_offset + spi->last_xfer_size == last_xfer->len) ||
+ (spi->rx_xfer_offset + spi->last_xfer_size == last_xfer->len))
+ return true;
+
+ return false;
+}
+
+static int setup_next_xfer(struct gb_spilib *spi, struct spi_message *msg)
+{
+ struct spi_transfer *last_xfer = spi->last_xfer;
+
+ if (msg->state != GB_SPI_STATE_OP_DONE)
+ return 0;
+
+ /*
+ * if we transferred all content of the last transfer, reset values and
+ * check if this was the last transfer in the message
+ */
+ if (is_last_xfer_done(spi)) {
+ spi->tx_xfer_offset = 0;
+ spi->rx_xfer_offset = 0;
+ spi->op_timeout = 0;
+ if (last_xfer == list_last_entry(&msg->transfers,
+ struct spi_transfer,
+ transfer_list))
+ msg->state = GB_SPI_STATE_MSG_DONE;
+ else
+ spi->first_xfer = list_next_entry(last_xfer,
+ transfer_list);
+ return 0;
+ }
+
+ spi->first_xfer = last_xfer;
+ if (last_xfer->tx_buf)
+ spi->tx_xfer_offset += spi->last_xfer_size;
+
+ if (last_xfer->rx_buf)
+ spi->rx_xfer_offset += spi->last_xfer_size;
+
+ return 0;
+}
+
+static struct spi_transfer *get_next_xfer(struct spi_transfer *xfer,
+ struct spi_message *msg)
+{
+ if (xfer == list_last_entry(&msg->transfers, struct spi_transfer,
+ transfer_list))
+ return NULL;
+
+ return list_next_entry(xfer, transfer_list);
+}
+
+/* Routines to transfer data */
+static struct gb_operation *gb_spi_operation_create(struct gb_spilib *spi,
+ struct gb_connection *connection, struct spi_message *msg)
+{
+ struct gb_spi_transfer_request *request;
+ struct spi_device *dev = msg->spi;
+ struct spi_transfer *xfer;
+ struct gb_spi_transfer *gb_xfer;
+ struct gb_operation *operation;
+ u32 tx_size = 0, rx_size = 0, count = 0, xfer_len = 0, request_size;
+ u32 tx_xfer_size = 0, rx_xfer_size = 0, len;
+ u32 total_len = 0;
+ unsigned int xfer_timeout;
+ size_t data_max;
+ void *tx_data;
+
+ data_max = gb_operation_get_payload_size_max(connection);
+ xfer = spi->first_xfer;
+
+ /* Find number of transfers queued and tx/rx length in the message */
+
+ while (msg->state != GB_SPI_STATE_OP_READY) {
+ msg->state = GB_SPI_STATE_MSG_RUNNING;
+ spi->last_xfer = xfer;
+
+ if (!xfer->tx_buf && !xfer->rx_buf) {
+ dev_err(spi->parent,
+ "bufferless transfer, length %u\n", xfer->len);
+ msg->state = GB_SPI_STATE_MSG_ERROR;
+ return NULL;
+ }
+
+ tx_xfer_size = 0;
+ rx_xfer_size = 0;
+
+ if (xfer->tx_buf) {
+ len = xfer->len - spi->tx_xfer_offset;
+ if (!tx_header_fit_operation(tx_size, count, data_max))
+ break;
+ tx_xfer_size = calc_tx_xfer_size(tx_size, count,
+ len, data_max);
+ spi->last_xfer_size = tx_xfer_size;
+ }
+
+ if (xfer->rx_buf) {
+ len = xfer->len - spi->rx_xfer_offset;
+ rx_xfer_size = calc_rx_xfer_size(rx_size, &tx_xfer_size,
+ len, data_max);
+ spi->last_xfer_size = rx_xfer_size;
+ }
+
+ tx_size += tx_xfer_size;
+ rx_size += rx_xfer_size;
+
+ total_len += spi->last_xfer_size;
+ count++;
+
+ xfer = get_next_xfer(xfer, msg);
+ if (!xfer || total_len >= data_max)
+ msg->state = GB_SPI_STATE_OP_READY;
+ }
+
+ /*
+ * In addition to space for all message descriptors we need
+ * to have enough to hold all tx data.
+ */
+ request_size = sizeof(*request);
+ request_size += count * sizeof(*gb_xfer);
+ request_size += tx_size;
+
+ /* Response consists only of incoming data */
+ operation = gb_operation_create(connection, GB_SPI_TYPE_TRANSFER,
+ request_size, rx_size, GFP_KERNEL);
+ if (!operation)
+ return NULL;
+
+ request = operation->request->payload;
+ request->count = cpu_to_le16(count);
+ request->mode = dev->mode;
+ request->chip_select = spi_get_chipselect(dev, 0);
+
+ gb_xfer = &request->transfers[0];
+ tx_data = gb_xfer + count; /* place tx data after last gb_xfer */
+
+ /* Fill in the transfers array */
+ xfer = spi->first_xfer;
+ while (msg->state != GB_SPI_STATE_OP_DONE) {
+ int xfer_delay;
+
+ if (xfer == spi->last_xfer)
+ xfer_len = spi->last_xfer_size;
+ else
+ xfer_len = xfer->len;
+
+ /* make sure we do not timeout in a slow transfer */
+ xfer_timeout = xfer_len * 8 * MSEC_PER_SEC / xfer->speed_hz;
+ xfer_timeout += GB_OPERATION_TIMEOUT_DEFAULT;
+
+ if (xfer_timeout > spi->op_timeout)
+ spi->op_timeout = xfer_timeout;
+
+ gb_xfer->speed_hz = cpu_to_le32(xfer->speed_hz);
+ gb_xfer->len = cpu_to_le32(xfer_len);
+ xfer_delay = spi_delay_to_ns(&xfer->delay, xfer) / 1000;
+ xfer_delay = clamp_t(u16, xfer_delay, 0, U16_MAX);
+ gb_xfer->delay_usecs = cpu_to_le16(xfer_delay);
+ gb_xfer->cs_change = xfer->cs_change;
+ gb_xfer->bits_per_word = xfer->bits_per_word;
+
+ /* Copy tx data */
+ if (xfer->tx_buf) {
+ gb_xfer->xfer_flags |= GB_SPI_XFER_WRITE;
+ memcpy(tx_data, xfer->tx_buf + spi->tx_xfer_offset,
+ xfer_len);
+ tx_data += xfer_len;
+ }
+
+ if (xfer->rx_buf)
+ gb_xfer->xfer_flags |= GB_SPI_XFER_READ;
+
+ if (xfer == spi->last_xfer) {
+ if (!is_last_xfer_done(spi))
+ gb_xfer->xfer_flags |= GB_SPI_XFER_INPROGRESS;
+ msg->state = GB_SPI_STATE_OP_DONE;
+ continue;
+ }
+
+ gb_xfer++;
+ xfer = get_next_xfer(xfer, msg);
+ }
+
+ msg->actual_length += total_len;
+
+ return operation;
+}
+
+static void gb_spi_decode_response(struct gb_spilib *spi,
+ struct spi_message *msg,
+ struct gb_spi_transfer_response *response)
+{
+ struct spi_transfer *xfer = spi->first_xfer;
+ void *rx_data = response->data;
+ u32 xfer_len;
+
+ while (xfer) {
+ /* Copy rx data */
+ if (xfer->rx_buf) {
+ if (xfer == spi->first_xfer)
+ xfer_len = xfer->len - spi->rx_xfer_offset;
+ else if (xfer == spi->last_xfer)
+ xfer_len = spi->last_xfer_size;
+ else
+ xfer_len = xfer->len;
+
+ memcpy(xfer->rx_buf + spi->rx_xfer_offset, rx_data,
+ xfer_len);
+ rx_data += xfer_len;
+ }
+
+ if (xfer == spi->last_xfer)
+ break;
+
+ xfer = list_next_entry(xfer, transfer_list);
+ }
+}
+
+static int gb_spi_transfer_one_message(struct spi_controller *ctlr,
+ struct spi_message *msg)
+{
+ struct gb_spilib *spi = spi_controller_get_devdata(ctlr);
+ struct gb_connection *connection = spi->connection;
+ struct gb_spi_transfer_response *response;
+ struct gb_operation *operation;
+ int ret = 0;
+
+ spi->first_xfer = list_first_entry_or_null(&msg->transfers,
+ struct spi_transfer,
+ transfer_list);
+ if (!spi->first_xfer) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ msg->state = GB_SPI_STATE_MSG_IDLE;
+
+ while (msg->state != GB_SPI_STATE_MSG_DONE &&
+ msg->state != GB_SPI_STATE_MSG_ERROR) {
+ operation = gb_spi_operation_create(spi, connection, msg);
+ if (!operation) {
+ msg->state = GB_SPI_STATE_MSG_ERROR;
+ ret = -EINVAL;
+ continue;
+ }
+
+ ret = gb_operation_request_send_sync_timeout(operation,
+ spi->op_timeout);
+ if (!ret) {
+ response = operation->response->payload;
+ if (response)
+ gb_spi_decode_response(spi, msg, response);
+ } else {
+ dev_err(spi->parent,
+ "transfer operation failed: %d\n", ret);
+ msg->state = GB_SPI_STATE_MSG_ERROR;
+ }
+
+ gb_operation_put(operation);
+ setup_next_xfer(spi, msg);
+ }
+
+out:
+ msg->status = ret;
+ clean_xfer_state(spi);
+ spi_finalize_current_message(ctlr);
+
+ return ret;
+}
+
+static int gb_spi_prepare_transfer_hardware(struct spi_controller *ctlr)
+{
+ struct gb_spilib *spi = spi_controller_get_devdata(ctlr);
+
+ return spi->ops->prepare_transfer_hardware(spi->parent);
+}
+
+static int gb_spi_unprepare_transfer_hardware(struct spi_controller *ctlr)
+{
+ struct gb_spilib *spi = spi_controller_get_devdata(ctlr);
+
+ spi->ops->unprepare_transfer_hardware(spi->parent);
+
+ return 0;
+}
+
+static int gb_spi_setup(struct spi_device *spi)
+{
+ /* Nothing to do for now */
+ return 0;
+}
+
+static void gb_spi_cleanup(struct spi_device *spi)
+{
+ /* Nothing to do for now */
+}
+
+/* Routines to get controller information */
+
+/*
+ * Map Greybus spi mode bits/flags/bpw into Linux ones.
+ * All bits are same for now and so these macro's return same values.
+ */
+#define gb_spi_mode_map(mode) mode
+#define gb_spi_flags_map(flags) flags
+
+static int gb_spi_get_master_config(struct gb_spilib *spi)
+{
+ struct gb_spi_master_config_response response;
+ u16 mode, flags;
+ int ret;
+
+ ret = gb_operation_sync(spi->connection, GB_SPI_TYPE_MASTER_CONFIG,
+ NULL, 0, &response, sizeof(response));
+ if (ret < 0)
+ return ret;
+
+ mode = le16_to_cpu(response.mode);
+ spi->mode = gb_spi_mode_map(mode);
+
+ flags = le16_to_cpu(response.flags);
+ spi->flags = gb_spi_flags_map(flags);
+
+ spi->bits_per_word_mask = le32_to_cpu(response.bits_per_word_mask);
+ spi->num_chipselect = response.num_chipselect;
+
+ spi->min_speed_hz = le32_to_cpu(response.min_speed_hz);
+ spi->max_speed_hz = le32_to_cpu(response.max_speed_hz);
+
+ return 0;
+}
+
+static int gb_spi_setup_device(struct gb_spilib *spi, u8 cs)
+{
+ struct spi_controller *ctlr = get_controller_from_spi(spi);
+ struct gb_spi_device_config_request request;
+ struct gb_spi_device_config_response response;
+ struct spi_board_info spi_board = { {0} };
+ struct spi_device *spidev;
+ int ret;
+ u8 dev_type;
+
+ request.chip_select = cs;
+
+ ret = gb_operation_sync(spi->connection, GB_SPI_TYPE_DEVICE_CONFIG,
+ &request, sizeof(request),
+ &response, sizeof(response));
+ if (ret < 0)
+ return ret;
+
+ dev_type = response.device_type;
+
+ if (dev_type == GB_SPI_SPI_DEV)
+ strscpy(spi_board.modalias, "spidev",
+ sizeof(spi_board.modalias));
+ else if (dev_type == GB_SPI_SPI_NOR)
+ strscpy(spi_board.modalias, "spi-nor",
+ sizeof(spi_board.modalias));
+ else if (dev_type == GB_SPI_SPI_MODALIAS)
+ memcpy(spi_board.modalias, response.name,
+ sizeof(spi_board.modalias));
+ else
+ return -EINVAL;
+
+ spi_board.mode = le16_to_cpu(response.mode);
+ spi_board.bus_num = ctlr->bus_num;
+ spi_board.chip_select = cs;
+ spi_board.max_speed_hz = le32_to_cpu(response.max_speed_hz);
+
+ spidev = spi_new_device(ctlr, &spi_board);
+ if (!spidev)
+ return -EINVAL;
+
+ return 0;
+}
+
+int gb_spilib_master_init(struct gb_connection *connection, struct device *dev,
+ struct spilib_ops *ops)
+{
+ struct gb_spilib *spi;
+ struct spi_controller *ctlr;
+ int ret;
+ u8 i;
+
+ /* Allocate host with space for data */
+ ctlr = spi_alloc_host(dev, sizeof(*spi));
+ if (!ctlr) {
+ dev_err(dev, "cannot alloc SPI host\n");
+ return -ENOMEM;
+ }
+
+ spi = spi_controller_get_devdata(ctlr);
+ spi->connection = connection;
+ gb_connection_set_data(connection, ctlr);
+ spi->parent = dev;
+ spi->ops = ops;
+
+ /* get controller configuration */
+ ret = gb_spi_get_master_config(spi);
+ if (ret)
+ goto exit_spi_put;
+
+ ctlr->bus_num = -1; /* Allow spi-core to allocate it dynamically */
+ ctlr->num_chipselect = spi->num_chipselect;
+ ctlr->mode_bits = spi->mode;
+ ctlr->flags = spi->flags;
+ ctlr->bits_per_word_mask = spi->bits_per_word_mask;
+
+ /* Attach methods */
+ ctlr->cleanup = gb_spi_cleanup;
+ ctlr->setup = gb_spi_setup;
+ ctlr->transfer_one_message = gb_spi_transfer_one_message;
+
+ if (ops && ops->prepare_transfer_hardware) {
+ ctlr->prepare_transfer_hardware =
+ gb_spi_prepare_transfer_hardware;
+ }
+
+ if (ops && ops->unprepare_transfer_hardware) {
+ ctlr->unprepare_transfer_hardware =
+ gb_spi_unprepare_transfer_hardware;
+ }
+
+ ctlr->auto_runtime_pm = true;
+
+ ret = spi_register_controller(ctlr);
+ if (ret < 0)
+ goto exit_spi_put;
+
+ /* now, fetch the devices configuration */
+ for (i = 0; i < spi->num_chipselect; i++) {
+ ret = gb_spi_setup_device(spi, i);
+ if (ret < 0) {
+ dev_err(dev, "failed to allocate spi device %d: %d\n",
+ i, ret);
+ goto exit_spi_unregister;
+ }
+ }
+
+ return 0;
+
+exit_spi_put:
+ spi_controller_put(ctlr);
+
+ return ret;
+
+exit_spi_unregister:
+ spi_unregister_controller(ctlr);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(gb_spilib_master_init);
+
+void gb_spilib_master_exit(struct gb_connection *connection)
+{
+ struct spi_controller *ctlr = gb_connection_get_data(connection);
+
+ spi_unregister_controller(ctlr);
+}
+EXPORT_SYMBOL_GPL(gb_spilib_master_exit);
+
+MODULE_DESCRIPTION("Greybus SPI library");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/greybus/spilib.h b/drivers/staging/greybus/spilib.h
new file mode 100644
index 000000000000..9d416839e3be
--- /dev/null
+++ b/drivers/staging/greybus/spilib.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Greybus SPI library header
+ *
+ * copyright 2016 google inc.
+ * copyright 2016 linaro ltd.
+ *
+ * released under the gplv2 only.
+ */
+
+#ifndef __SPILIB_H
+#define __SPILIB_H
+
+struct device;
+struct gb_connection;
+
+struct spilib_ops {
+ int (*prepare_transfer_hardware)(struct device *dev);
+ void (*unprepare_transfer_hardware)(struct device *dev);
+};
+
+int gb_spilib_master_init(struct gb_connection *connection,
+ struct device *dev, struct spilib_ops *ops);
+void gb_spilib_master_exit(struct gb_connection *connection);
+
+#endif /* __SPILIB_H */
diff --git a/drivers/staging/greybus/uart.c b/drivers/staging/greybus/uart.c
new file mode 100644
index 000000000000..5cece0a6606f
--- /dev/null
+++ b/drivers/staging/greybus/uart.c
@@ -0,0 +1,1029 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * UART driver for the Greybus "generic" UART module.
+ *
+ * Copyright 2014 Google Inc.
+ * Copyright 2014 Linaro Ltd.
+ *
+ * Heavily based on drivers/usb/class/cdc-acm.c and
+ * drivers/usb/serial/usb-serial.c.
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/module.h>
+#include <linux/sched/signal.h>
+#include <linux/wait.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/mutex.h>
+#include <linux/tty.h>
+#include <linux/serial.h>
+#include <linux/tty_driver.h>
+#include <linux/tty_flip.h>
+#include <linux/idr.h>
+#include <linux/fs.h>
+#include <linux/kdev_t.h>
+#include <linux/kfifo.h>
+#include <linux/workqueue.h>
+#include <linux/completion.h>
+#include <linux/greybus.h>
+
+#include "gbphy.h"
+
+#define GB_NUM_MINORS 16 /* 16 is more than enough */
+#define GB_NAME "ttyGB"
+
+#define GB_UART_WRITE_FIFO_SIZE PAGE_SIZE
+#define GB_UART_WRITE_ROOM_MARGIN 1 /* leave some space in fifo */
+#define GB_UART_FIRMWARE_CREDITS 4096
+#define GB_UART_CREDIT_WAIT_TIMEOUT_MSEC 10000
+
+struct gb_tty {
+ struct gbphy_device *gbphy_dev;
+ struct tty_port port;
+ void *buffer;
+ size_t buffer_payload_max;
+ struct gb_connection *connection;
+ u16 cport_id;
+ unsigned int minor;
+ unsigned char clocal;
+ bool disconnected;
+ spinlock_t read_lock;
+ spinlock_t write_lock;
+ struct async_icount iocount;
+ struct async_icount oldcount;
+ wait_queue_head_t wioctl;
+ struct mutex mutex;
+ u8 ctrlin; /* input control lines */
+ u8 ctrlout; /* output control lines */
+ struct gb_uart_set_line_coding_request line_coding;
+ struct work_struct tx_work;
+ struct kfifo write_fifo;
+ bool close_pending;
+ unsigned int credits;
+ struct completion credits_complete;
+};
+
+static struct tty_driver *gb_tty_driver;
+static DEFINE_IDR(tty_minors);
+static DEFINE_MUTEX(table_lock);
+
+static int gb_uart_receive_data_handler(struct gb_operation *op)
+{
+ struct gb_connection *connection = op->connection;
+ struct gb_tty *gb_tty = gb_connection_get_data(connection);
+ struct tty_port *port = &gb_tty->port;
+ struct gb_message *request = op->request;
+ struct gb_uart_recv_data_request *receive_data;
+ u16 recv_data_size;
+ int count;
+ unsigned long tty_flags = TTY_NORMAL;
+
+ if (request->payload_size < sizeof(*receive_data)) {
+ dev_err(&gb_tty->gbphy_dev->dev,
+ "short receive-data request received (%zu < %zu)\n",
+ request->payload_size, sizeof(*receive_data));
+ return -EINVAL;
+ }
+
+ receive_data = op->request->payload;
+ recv_data_size = le16_to_cpu(receive_data->size);
+
+ if (recv_data_size != request->payload_size - sizeof(*receive_data)) {
+ dev_err(&gb_tty->gbphy_dev->dev,
+ "malformed receive-data request received (%u != %zu)\n",
+ recv_data_size,
+ request->payload_size - sizeof(*receive_data));
+ return -EINVAL;
+ }
+
+ if (!recv_data_size)
+ return -EINVAL;
+
+ if (receive_data->flags) {
+ if (receive_data->flags & GB_UART_RECV_FLAG_BREAK)
+ tty_flags = TTY_BREAK;
+ else if (receive_data->flags & GB_UART_RECV_FLAG_PARITY)
+ tty_flags = TTY_PARITY;
+ else if (receive_data->flags & GB_UART_RECV_FLAG_FRAMING)
+ tty_flags = TTY_FRAME;
+
+ /* overrun is special, not associated with a char */
+ if (receive_data->flags & GB_UART_RECV_FLAG_OVERRUN)
+ tty_insert_flip_char(port, 0, TTY_OVERRUN);
+ }
+ count = tty_insert_flip_string_fixed_flag(port, receive_data->data,
+ tty_flags, recv_data_size);
+ if (count != recv_data_size) {
+ dev_err(&gb_tty->gbphy_dev->dev,
+ "UART: RX 0x%08x bytes only wrote 0x%08x\n",
+ recv_data_size, count);
+ }
+ if (count)
+ tty_flip_buffer_push(port);
+ return 0;
+}
+
+static int gb_uart_serial_state_handler(struct gb_operation *op)
+{
+ struct gb_connection *connection = op->connection;
+ struct gb_tty *gb_tty = gb_connection_get_data(connection);
+ struct gb_message *request = op->request;
+ struct gb_uart_serial_state_request *serial_state;
+
+ if (request->payload_size < sizeof(*serial_state)) {
+ dev_err(&gb_tty->gbphy_dev->dev,
+ "short serial-state event received (%zu < %zu)\n",
+ request->payload_size, sizeof(*serial_state));
+ return -EINVAL;
+ }
+
+ serial_state = request->payload;
+ gb_tty->ctrlin = serial_state->control;
+
+ return 0;
+}
+
+static int gb_uart_receive_credits_handler(struct gb_operation *op)
+{
+ struct gb_connection *connection = op->connection;
+ struct gb_tty *gb_tty = gb_connection_get_data(connection);
+ struct gb_message *request = op->request;
+ struct gb_uart_receive_credits_request *credit_request;
+ unsigned long flags;
+ unsigned int incoming_credits;
+ int ret = 0;
+
+ if (request->payload_size < sizeof(*credit_request)) {
+ dev_err(&gb_tty->gbphy_dev->dev,
+ "short receive_credits event received (%zu < %zu)\n",
+ request->payload_size,
+ sizeof(*credit_request));
+ return -EINVAL;
+ }
+
+ credit_request = request->payload;
+ incoming_credits = le16_to_cpu(credit_request->count);
+
+ spin_lock_irqsave(&gb_tty->write_lock, flags);
+ gb_tty->credits += incoming_credits;
+ if (gb_tty->credits > GB_UART_FIRMWARE_CREDITS) {
+ gb_tty->credits -= incoming_credits;
+ ret = -EINVAL;
+ }
+ spin_unlock_irqrestore(&gb_tty->write_lock, flags);
+
+ if (ret) {
+ dev_err(&gb_tty->gbphy_dev->dev,
+ "invalid number of incoming credits: %d\n",
+ incoming_credits);
+ return ret;
+ }
+
+ if (!gb_tty->close_pending)
+ schedule_work(&gb_tty->tx_work);
+
+ /*
+ * the port the tty layer may be waiting for credits
+ */
+ tty_port_tty_wakeup(&gb_tty->port);
+
+ if (gb_tty->credits == GB_UART_FIRMWARE_CREDITS)
+ complete(&gb_tty->credits_complete);
+
+ return ret;
+}
+
+static int gb_uart_request_handler(struct gb_operation *op)
+{
+ struct gb_connection *connection = op->connection;
+ struct gb_tty *gb_tty = gb_connection_get_data(connection);
+ int type = op->type;
+ int ret;
+
+ switch (type) {
+ case GB_UART_TYPE_RECEIVE_DATA:
+ ret = gb_uart_receive_data_handler(op);
+ break;
+ case GB_UART_TYPE_SERIAL_STATE:
+ ret = gb_uart_serial_state_handler(op);
+ break;
+ case GB_UART_TYPE_RECEIVE_CREDITS:
+ ret = gb_uart_receive_credits_handler(op);
+ break;
+ default:
+ dev_err(&gb_tty->gbphy_dev->dev,
+ "unsupported unsolicited request: 0x%02x\n", type);
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static void gb_uart_tx_write_work(struct work_struct *work)
+{
+ struct gb_uart_send_data_request *request;
+ struct gb_tty *gb_tty;
+ unsigned long flags;
+ unsigned int send_size;
+ int ret;
+
+ gb_tty = container_of(work, struct gb_tty, tx_work);
+ request = gb_tty->buffer;
+
+ while (1) {
+ if (gb_tty->close_pending)
+ break;
+
+ spin_lock_irqsave(&gb_tty->write_lock, flags);
+ send_size = gb_tty->buffer_payload_max;
+ if (send_size > gb_tty->credits)
+ send_size = gb_tty->credits;
+
+ send_size = kfifo_out_peek(&gb_tty->write_fifo,
+ &request->data[0],
+ send_size);
+ if (!send_size) {
+ spin_unlock_irqrestore(&gb_tty->write_lock, flags);
+ break;
+ }
+
+ gb_tty->credits -= send_size;
+ spin_unlock_irqrestore(&gb_tty->write_lock, flags);
+
+ request->size = cpu_to_le16(send_size);
+ ret = gb_operation_sync(gb_tty->connection,
+ GB_UART_TYPE_SEND_DATA,
+ request, sizeof(*request) + send_size,
+ NULL, 0);
+ if (ret) {
+ dev_err(&gb_tty->gbphy_dev->dev,
+ "send data error: %d\n", ret);
+ spin_lock_irqsave(&gb_tty->write_lock, flags);
+ gb_tty->credits += send_size;
+ spin_unlock_irqrestore(&gb_tty->write_lock, flags);
+ if (!gb_tty->close_pending)
+ schedule_work(work);
+ return;
+ }
+
+ spin_lock_irqsave(&gb_tty->write_lock, flags);
+ ret = kfifo_out(&gb_tty->write_fifo, &request->data[0],
+ send_size);
+ spin_unlock_irqrestore(&gb_tty->write_lock, flags);
+
+ tty_port_tty_wakeup(&gb_tty->port);
+ }
+}
+
+static int send_line_coding(struct gb_tty *tty)
+{
+ return gb_operation_sync(tty->connection, GB_UART_TYPE_SET_LINE_CODING,
+ &tty->line_coding, sizeof(tty->line_coding),
+ NULL, 0);
+}
+
+static int send_control(struct gb_tty *gb_tty, u8 control)
+{
+ struct gb_uart_set_control_line_state_request request;
+
+ request.control = control;
+ return gb_operation_sync(gb_tty->connection,
+ GB_UART_TYPE_SET_CONTROL_LINE_STATE,
+ &request, sizeof(request), NULL, 0);
+}
+
+static int send_break(struct gb_tty *gb_tty, u8 state)
+{
+ struct gb_uart_set_break_request request;
+
+ if ((state != 0) && (state != 1)) {
+ dev_err(&gb_tty->gbphy_dev->dev,
+ "invalid break state of %d\n", state);
+ return -EINVAL;
+ }
+
+ request.state = state;
+ return gb_operation_sync(gb_tty->connection, GB_UART_TYPE_SEND_BREAK,
+ &request, sizeof(request), NULL, 0);
+}
+
+static int gb_uart_wait_for_all_credits(struct gb_tty *gb_tty)
+{
+ int ret;
+
+ if (gb_tty->credits == GB_UART_FIRMWARE_CREDITS)
+ return 0;
+
+ ret = wait_for_completion_timeout(&gb_tty->credits_complete,
+ msecs_to_jiffies(GB_UART_CREDIT_WAIT_TIMEOUT_MSEC));
+ if (!ret) {
+ dev_err(&gb_tty->gbphy_dev->dev,
+ "time out waiting for credits\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static int gb_uart_flush(struct gb_tty *gb_tty, u8 flags)
+{
+ struct gb_uart_serial_flush_request request;
+
+ request.flags = flags;
+ return gb_operation_sync(gb_tty->connection, GB_UART_TYPE_FLUSH_FIFOS,
+ &request, sizeof(request), NULL, 0);
+}
+
+static struct gb_tty *get_gb_by_minor(unsigned int minor)
+{
+ struct gb_tty *gb_tty;
+
+ mutex_lock(&table_lock);
+ gb_tty = idr_find(&tty_minors, minor);
+ if (gb_tty) {
+ mutex_lock(&gb_tty->mutex);
+ if (gb_tty->disconnected) {
+ mutex_unlock(&gb_tty->mutex);
+ gb_tty = NULL;
+ } else {
+ tty_port_get(&gb_tty->port);
+ mutex_unlock(&gb_tty->mutex);
+ }
+ }
+ mutex_unlock(&table_lock);
+ return gb_tty;
+}
+
+static int alloc_minor(struct gb_tty *gb_tty)
+{
+ int minor;
+
+ mutex_lock(&table_lock);
+ minor = idr_alloc(&tty_minors, gb_tty, 0, GB_NUM_MINORS, GFP_KERNEL);
+ mutex_unlock(&table_lock);
+ if (minor >= 0)
+ gb_tty->minor = minor;
+ return minor;
+}
+
+static void release_minor(struct gb_tty *gb_tty)
+{
+ int minor = gb_tty->minor;
+
+ gb_tty->minor = 0; /* Maybe should use an invalid value instead */
+ mutex_lock(&table_lock);
+ idr_remove(&tty_minors, minor);
+ mutex_unlock(&table_lock);
+}
+
+static int gb_tty_install(struct tty_driver *driver, struct tty_struct *tty)
+{
+ struct gb_tty *gb_tty;
+ int retval;
+
+ gb_tty = get_gb_by_minor(tty->index);
+ if (!gb_tty)
+ return -ENODEV;
+
+ retval = tty_standard_install(driver, tty);
+ if (retval)
+ goto error;
+
+ tty->driver_data = gb_tty;
+ return 0;
+error:
+ tty_port_put(&gb_tty->port);
+ return retval;
+}
+
+static int gb_tty_open(struct tty_struct *tty, struct file *file)
+{
+ struct gb_tty *gb_tty = tty->driver_data;
+
+ return tty_port_open(&gb_tty->port, tty, file);
+}
+
+static void gb_tty_close(struct tty_struct *tty, struct file *file)
+{
+ struct gb_tty *gb_tty = tty->driver_data;
+
+ tty_port_close(&gb_tty->port, tty, file);
+}
+
+static void gb_tty_cleanup(struct tty_struct *tty)
+{
+ struct gb_tty *gb_tty = tty->driver_data;
+
+ tty_port_put(&gb_tty->port);
+}
+
+static void gb_tty_hangup(struct tty_struct *tty)
+{
+ struct gb_tty *gb_tty = tty->driver_data;
+
+ tty_port_hangup(&gb_tty->port);
+}
+
+static ssize_t gb_tty_write(struct tty_struct *tty, const u8 *buf, size_t count)
+{
+ struct gb_tty *gb_tty = tty->driver_data;
+
+ count = kfifo_in_spinlocked(&gb_tty->write_fifo, buf, count,
+ &gb_tty->write_lock);
+ if (count && !gb_tty->close_pending)
+ schedule_work(&gb_tty->tx_work);
+
+ return count;
+}
+
+static unsigned int gb_tty_write_room(struct tty_struct *tty)
+{
+ struct gb_tty *gb_tty = tty->driver_data;
+ unsigned long flags;
+ int room;
+
+ spin_lock_irqsave(&gb_tty->write_lock, flags);
+ room = kfifo_avail(&gb_tty->write_fifo);
+ spin_unlock_irqrestore(&gb_tty->write_lock, flags);
+
+ room -= GB_UART_WRITE_ROOM_MARGIN;
+ if (room < 0)
+ return 0;
+
+ return room;
+}
+
+static unsigned int gb_tty_chars_in_buffer(struct tty_struct *tty)
+{
+ struct gb_tty *gb_tty = tty->driver_data;
+ unsigned long flags;
+ unsigned int chars;
+
+ spin_lock_irqsave(&gb_tty->write_lock, flags);
+ chars = kfifo_len(&gb_tty->write_fifo);
+ if (gb_tty->credits < GB_UART_FIRMWARE_CREDITS)
+ chars += GB_UART_FIRMWARE_CREDITS - gb_tty->credits;
+ spin_unlock_irqrestore(&gb_tty->write_lock, flags);
+
+ return chars;
+}
+
+static int gb_tty_break_ctl(struct tty_struct *tty, int state)
+{
+ struct gb_tty *gb_tty = tty->driver_data;
+
+ return send_break(gb_tty, state ? 1 : 0);
+}
+
+static void gb_tty_set_termios(struct tty_struct *tty,
+ const struct ktermios *termios_old)
+{
+ struct gb_uart_set_line_coding_request newline;
+ struct gb_tty *gb_tty = tty->driver_data;
+ struct ktermios *termios = &tty->termios;
+ u8 newctrl = gb_tty->ctrlout;
+
+ newline.rate = cpu_to_le32(tty_get_baud_rate(tty));
+ newline.format = termios->c_cflag & CSTOPB ?
+ GB_SERIAL_2_STOP_BITS : GB_SERIAL_1_STOP_BITS;
+ newline.parity = termios->c_cflag & PARENB ?
+ (termios->c_cflag & PARODD ? 1 : 2) +
+ (termios->c_cflag & CMSPAR ? 2 : 0) : 0;
+
+ newline.data_bits = tty_get_char_size(termios->c_cflag);
+
+ /* FIXME: needs to clear unsupported bits in the termios */
+ gb_tty->clocal = ((termios->c_cflag & CLOCAL) != 0);
+
+ if (C_BAUD(tty) == B0) {
+ newline.rate = gb_tty->line_coding.rate;
+ newctrl &= ~(GB_UART_CTRL_DTR | GB_UART_CTRL_RTS);
+ } else if (termios_old && (termios_old->c_cflag & CBAUD) == B0) {
+ newctrl |= (GB_UART_CTRL_DTR | GB_UART_CTRL_RTS);
+ }
+
+ if (newctrl != gb_tty->ctrlout) {
+ gb_tty->ctrlout = newctrl;
+ send_control(gb_tty, newctrl);
+ }
+
+ if (C_CRTSCTS(tty) && C_BAUD(tty) != B0)
+ newline.flow_control = GB_SERIAL_AUTO_RTSCTS_EN;
+ else
+ newline.flow_control = 0;
+
+ if (memcmp(&gb_tty->line_coding, &newline, sizeof(newline))) {
+ memcpy(&gb_tty->line_coding, &newline, sizeof(newline));
+ send_line_coding(gb_tty);
+ }
+}
+
+static int gb_tty_tiocmget(struct tty_struct *tty)
+{
+ struct gb_tty *gb_tty = tty->driver_data;
+
+ return (gb_tty->ctrlout & GB_UART_CTRL_DTR ? TIOCM_DTR : 0) |
+ (gb_tty->ctrlout & GB_UART_CTRL_RTS ? TIOCM_RTS : 0) |
+ (gb_tty->ctrlin & GB_UART_CTRL_DSR ? TIOCM_DSR : 0) |
+ (gb_tty->ctrlin & GB_UART_CTRL_RI ? TIOCM_RI : 0) |
+ (gb_tty->ctrlin & GB_UART_CTRL_DCD ? TIOCM_CD : 0) |
+ TIOCM_CTS;
+}
+
+static int gb_tty_tiocmset(struct tty_struct *tty, unsigned int set,
+ unsigned int clear)
+{
+ struct gb_tty *gb_tty = tty->driver_data;
+ u8 newctrl = gb_tty->ctrlout;
+
+ set = (set & TIOCM_DTR ? GB_UART_CTRL_DTR : 0) |
+ (set & TIOCM_RTS ? GB_UART_CTRL_RTS : 0);
+ clear = (clear & TIOCM_DTR ? GB_UART_CTRL_DTR : 0) |
+ (clear & TIOCM_RTS ? GB_UART_CTRL_RTS : 0);
+
+ newctrl = (newctrl & ~clear) | set;
+ if (gb_tty->ctrlout == newctrl)
+ return 0;
+
+ gb_tty->ctrlout = newctrl;
+ return send_control(gb_tty, newctrl);
+}
+
+static void gb_tty_throttle(struct tty_struct *tty)
+{
+ struct gb_tty *gb_tty = tty->driver_data;
+ unsigned char stop_char;
+ int retval;
+
+ if (I_IXOFF(tty)) {
+ stop_char = STOP_CHAR(tty);
+ retval = gb_tty_write(tty, &stop_char, 1);
+ if (retval <= 0)
+ return;
+ }
+
+ if (tty->termios.c_cflag & CRTSCTS) {
+ gb_tty->ctrlout &= ~GB_UART_CTRL_RTS;
+ retval = send_control(gb_tty, gb_tty->ctrlout);
+ }
+}
+
+static void gb_tty_unthrottle(struct tty_struct *tty)
+{
+ struct gb_tty *gb_tty = tty->driver_data;
+ unsigned char start_char;
+ int retval;
+
+ if (I_IXOFF(tty)) {
+ start_char = START_CHAR(tty);
+ retval = gb_tty_write(tty, &start_char, 1);
+ if (retval <= 0)
+ return;
+ }
+
+ if (tty->termios.c_cflag & CRTSCTS) {
+ gb_tty->ctrlout |= GB_UART_CTRL_RTS;
+ retval = send_control(gb_tty, gb_tty->ctrlout);
+ }
+}
+
+static int get_serial_info(struct tty_struct *tty,
+ struct serial_struct *ss)
+{
+ struct gb_tty *gb_tty = tty->driver_data;
+
+ ss->line = gb_tty->minor;
+ mutex_lock(&gb_tty->port.mutex);
+ ss->close_delay = jiffies_to_msecs(gb_tty->port.close_delay) / 10;
+ ss->closing_wait =
+ gb_tty->port.closing_wait == ASYNC_CLOSING_WAIT_NONE ?
+ ASYNC_CLOSING_WAIT_NONE :
+ jiffies_to_msecs(gb_tty->port.closing_wait) / 10;
+ mutex_unlock(&gb_tty->port.mutex);
+
+ return 0;
+}
+
+static int set_serial_info(struct tty_struct *tty,
+ struct serial_struct *ss)
+{
+ struct gb_tty *gb_tty = tty->driver_data;
+ unsigned int closing_wait;
+ unsigned int close_delay;
+ int retval = 0;
+
+ close_delay = msecs_to_jiffies(ss->close_delay * 10);
+ closing_wait = ss->closing_wait == ASYNC_CLOSING_WAIT_NONE ?
+ ASYNC_CLOSING_WAIT_NONE :
+ msecs_to_jiffies(ss->closing_wait * 10);
+
+ mutex_lock(&gb_tty->port.mutex);
+ if (!capable(CAP_SYS_ADMIN)) {
+ if ((close_delay != gb_tty->port.close_delay) ||
+ (closing_wait != gb_tty->port.closing_wait))
+ retval = -EPERM;
+ } else {
+ gb_tty->port.close_delay = close_delay;
+ gb_tty->port.closing_wait = closing_wait;
+ }
+ mutex_unlock(&gb_tty->port.mutex);
+ return retval;
+}
+
+static int wait_serial_change(struct gb_tty *gb_tty, unsigned long arg)
+{
+ int retval = 0;
+ DECLARE_WAITQUEUE(wait, current);
+ struct async_icount old;
+ struct async_icount new;
+
+ if (!(arg & (TIOCM_DSR | TIOCM_RI | TIOCM_CD)))
+ return -EINVAL;
+
+ do {
+ spin_lock_irq(&gb_tty->read_lock);
+ old = gb_tty->oldcount;
+ new = gb_tty->iocount;
+ gb_tty->oldcount = new;
+ spin_unlock_irq(&gb_tty->read_lock);
+
+ if ((arg & TIOCM_DSR) && (old.dsr != new.dsr))
+ break;
+ if ((arg & TIOCM_CD) && (old.dcd != new.dcd))
+ break;
+ if ((arg & TIOCM_RI) && (old.rng != new.rng))
+ break;
+
+ add_wait_queue(&gb_tty->wioctl, &wait);
+ set_current_state(TASK_INTERRUPTIBLE);
+ schedule();
+ remove_wait_queue(&gb_tty->wioctl, &wait);
+ if (gb_tty->disconnected) {
+ if (arg & TIOCM_CD)
+ break;
+ retval = -ENODEV;
+ } else if (signal_pending(current)) {
+ retval = -ERESTARTSYS;
+ }
+ } while (!retval);
+
+ return retval;
+}
+
+static int gb_tty_get_icount(struct tty_struct *tty,
+ struct serial_icounter_struct *icount)
+{
+ struct gb_tty *gb_tty = tty->driver_data;
+
+ icount->dsr = gb_tty->iocount.dsr;
+ icount->rng = gb_tty->iocount.rng;
+ icount->dcd = gb_tty->iocount.dcd;
+ icount->frame = gb_tty->iocount.frame;
+ icount->overrun = gb_tty->iocount.overrun;
+ icount->parity = gb_tty->iocount.parity;
+ icount->brk = gb_tty->iocount.brk;
+
+ return 0;
+}
+
+static int gb_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
+ unsigned long arg)
+{
+ struct gb_tty *gb_tty = tty->driver_data;
+
+ switch (cmd) {
+ case TIOCMIWAIT:
+ return wait_serial_change(gb_tty, arg);
+ }
+
+ return -ENOIOCTLCMD;
+}
+
+static void gb_tty_dtr_rts(struct tty_port *port, bool active)
+{
+ struct gb_tty *gb_tty;
+ u8 newctrl;
+
+ gb_tty = container_of(port, struct gb_tty, port);
+ newctrl = gb_tty->ctrlout;
+
+ if (active)
+ newctrl |= (GB_UART_CTRL_DTR | GB_UART_CTRL_RTS);
+ else
+ newctrl &= ~(GB_UART_CTRL_DTR | GB_UART_CTRL_RTS);
+
+ gb_tty->ctrlout = newctrl;
+ send_control(gb_tty, newctrl);
+}
+
+static int gb_tty_port_activate(struct tty_port *port,
+ struct tty_struct *tty)
+{
+ struct gb_tty *gb_tty;
+
+ gb_tty = container_of(port, struct gb_tty, port);
+
+ return gbphy_runtime_get_sync(gb_tty->gbphy_dev);
+}
+
+static void gb_tty_port_shutdown(struct tty_port *port)
+{
+ struct gb_tty *gb_tty;
+ unsigned long flags;
+ int ret;
+
+ gb_tty = container_of(port, struct gb_tty, port);
+
+ gb_tty->close_pending = true;
+
+ cancel_work_sync(&gb_tty->tx_work);
+
+ spin_lock_irqsave(&gb_tty->write_lock, flags);
+ kfifo_reset_out(&gb_tty->write_fifo);
+ spin_unlock_irqrestore(&gb_tty->write_lock, flags);
+
+ if (gb_tty->credits == GB_UART_FIRMWARE_CREDITS)
+ goto out;
+
+ ret = gb_uart_flush(gb_tty, GB_SERIAL_FLAG_FLUSH_TRANSMITTER);
+ if (ret) {
+ dev_err(&gb_tty->gbphy_dev->dev,
+ "error flushing transmitter: %d\n", ret);
+ }
+
+ gb_uart_wait_for_all_credits(gb_tty);
+
+out:
+ gb_tty->close_pending = false;
+
+ gbphy_runtime_put_autosuspend(gb_tty->gbphy_dev);
+}
+
+static void gb_tty_port_destruct(struct tty_port *port)
+{
+ struct gb_tty *gb_tty = container_of(port, struct gb_tty, port);
+
+ if (gb_tty->minor != GB_NUM_MINORS)
+ release_minor(gb_tty);
+ kfifo_free(&gb_tty->write_fifo);
+ kfree(gb_tty->buffer);
+ kfree(gb_tty);
+}
+
+static const struct tty_operations gb_ops = {
+ .install = gb_tty_install,
+ .open = gb_tty_open,
+ .close = gb_tty_close,
+ .cleanup = gb_tty_cleanup,
+ .hangup = gb_tty_hangup,
+ .write = gb_tty_write,
+ .write_room = gb_tty_write_room,
+ .ioctl = gb_tty_ioctl,
+ .throttle = gb_tty_throttle,
+ .unthrottle = gb_tty_unthrottle,
+ .chars_in_buffer = gb_tty_chars_in_buffer,
+ .break_ctl = gb_tty_break_ctl,
+ .set_termios = gb_tty_set_termios,
+ .tiocmget = gb_tty_tiocmget,
+ .tiocmset = gb_tty_tiocmset,
+ .get_icount = gb_tty_get_icount,
+ .set_serial = set_serial_info,
+ .get_serial = get_serial_info,
+};
+
+static const struct tty_port_operations gb_port_ops = {
+ .dtr_rts = gb_tty_dtr_rts,
+ .activate = gb_tty_port_activate,
+ .shutdown = gb_tty_port_shutdown,
+ .destruct = gb_tty_port_destruct,
+};
+
+static int gb_uart_probe(struct gbphy_device *gbphy_dev,
+ const struct gbphy_device_id *id)
+{
+ struct gb_connection *connection;
+ size_t max_payload;
+ struct gb_tty *gb_tty;
+ struct device *tty_dev;
+ int retval;
+ int minor;
+
+ connection = gb_connection_create(gbphy_dev->bundle,
+ le16_to_cpu(gbphy_dev->cport_desc->id),
+ gb_uart_request_handler);
+ if (IS_ERR(connection))
+ return PTR_ERR(connection);
+
+ max_payload = gb_operation_get_payload_size_max(connection);
+ if (max_payload < sizeof(struct gb_uart_send_data_request)) {
+ retval = -EINVAL;
+ goto exit_connection_destroy;
+ }
+
+ gb_tty = kzalloc(sizeof(*gb_tty), GFP_KERNEL);
+ if (!gb_tty) {
+ retval = -ENOMEM;
+ goto exit_connection_destroy;
+ }
+
+ tty_port_init(&gb_tty->port);
+ gb_tty->port.ops = &gb_port_ops;
+ gb_tty->minor = GB_NUM_MINORS;
+
+ gb_tty->buffer_payload_max = max_payload -
+ sizeof(struct gb_uart_send_data_request);
+
+ gb_tty->buffer = kzalloc(gb_tty->buffer_payload_max, GFP_KERNEL);
+ if (!gb_tty->buffer) {
+ retval = -ENOMEM;
+ goto exit_put_port;
+ }
+
+ INIT_WORK(&gb_tty->tx_work, gb_uart_tx_write_work);
+
+ retval = kfifo_alloc(&gb_tty->write_fifo, GB_UART_WRITE_FIFO_SIZE,
+ GFP_KERNEL);
+ if (retval)
+ goto exit_put_port;
+
+ gb_tty->credits = GB_UART_FIRMWARE_CREDITS;
+ init_completion(&gb_tty->credits_complete);
+
+ minor = alloc_minor(gb_tty);
+ if (minor < 0) {
+ if (minor == -ENOSPC) {
+ dev_err(&gbphy_dev->dev,
+ "no more free minor numbers\n");
+ retval = -ENODEV;
+ } else {
+ retval = minor;
+ }
+ goto exit_put_port;
+ }
+
+ gb_tty->minor = minor;
+ spin_lock_init(&gb_tty->write_lock);
+ spin_lock_init(&gb_tty->read_lock);
+ init_waitqueue_head(&gb_tty->wioctl);
+ mutex_init(&gb_tty->mutex);
+
+ gb_tty->connection = connection;
+ gb_tty->gbphy_dev = gbphy_dev;
+ gb_connection_set_data(connection, gb_tty);
+ gb_gbphy_set_data(gbphy_dev, gb_tty);
+
+ retval = gb_connection_enable_tx(connection);
+ if (retval)
+ goto exit_put_port;
+
+ retval = send_control(gb_tty, gb_tty->ctrlout);
+ if (retval)
+ goto exit_connection_disable;
+
+ /* initialize the uart to be 9600n81 */
+ gb_tty->line_coding.rate = cpu_to_le32(9600);
+ gb_tty->line_coding.format = GB_SERIAL_1_STOP_BITS;
+ gb_tty->line_coding.parity = GB_SERIAL_NO_PARITY;
+ gb_tty->line_coding.data_bits = 8;
+ retval = send_line_coding(gb_tty);
+ if (retval)
+ goto exit_connection_disable;
+
+ retval = gb_connection_enable(connection);
+ if (retval)
+ goto exit_connection_disable;
+
+ tty_dev = tty_port_register_device(&gb_tty->port, gb_tty_driver, minor,
+ &gbphy_dev->dev);
+ if (IS_ERR(tty_dev)) {
+ retval = PTR_ERR(tty_dev);
+ goto exit_connection_disable;
+ }
+
+ gbphy_runtime_put_autosuspend(gbphy_dev);
+ return 0;
+
+exit_connection_disable:
+ gb_connection_disable(connection);
+exit_put_port:
+ tty_port_put(&gb_tty->port);
+exit_connection_destroy:
+ gb_connection_destroy(connection);
+
+ return retval;
+}
+
+static void gb_uart_remove(struct gbphy_device *gbphy_dev)
+{
+ struct gb_tty *gb_tty = gb_gbphy_get_data(gbphy_dev);
+ struct gb_connection *connection = gb_tty->connection;
+ int ret;
+
+ ret = gbphy_runtime_get_sync(gbphy_dev);
+ if (ret)
+ gbphy_runtime_get_noresume(gbphy_dev);
+
+ mutex_lock(&gb_tty->mutex);
+ gb_tty->disconnected = true;
+
+ wake_up_all(&gb_tty->wioctl);
+ mutex_unlock(&gb_tty->mutex);
+
+ tty_port_tty_vhangup(&gb_tty->port);
+
+ gb_connection_disable_rx(connection);
+ tty_unregister_device(gb_tty_driver, gb_tty->minor);
+
+ gb_connection_disable(connection);
+ gb_connection_destroy(connection);
+
+ tty_port_put(&gb_tty->port);
+}
+
+static int gb_tty_init(void)
+{
+ int retval = 0;
+
+ gb_tty_driver = tty_alloc_driver(GB_NUM_MINORS, TTY_DRIVER_REAL_RAW |
+ TTY_DRIVER_DYNAMIC_DEV);
+ if (IS_ERR(gb_tty_driver)) {
+ pr_err("Can not allocate tty driver\n");
+ retval = -ENOMEM;
+ goto fail_unregister_dev;
+ }
+
+ gb_tty_driver->driver_name = "gb";
+ gb_tty_driver->name = GB_NAME;
+ gb_tty_driver->major = 0;
+ gb_tty_driver->minor_start = 0;
+ gb_tty_driver->type = TTY_DRIVER_TYPE_SERIAL;
+ gb_tty_driver->subtype = SERIAL_TYPE_NORMAL;
+ gb_tty_driver->init_termios = tty_std_termios;
+ gb_tty_driver->init_termios.c_cflag = B9600 | CS8 |
+ CREAD | HUPCL | CLOCAL;
+ tty_set_operations(gb_tty_driver, &gb_ops);
+
+ retval = tty_register_driver(gb_tty_driver);
+ if (retval) {
+ pr_err("Can not register tty driver: %d\n", retval);
+ goto fail_put_gb_tty;
+ }
+
+ return 0;
+
+fail_put_gb_tty:
+ tty_driver_kref_put(gb_tty_driver);
+fail_unregister_dev:
+ return retval;
+}
+
+static void gb_tty_exit(void)
+{
+ tty_unregister_driver(gb_tty_driver);
+ tty_driver_kref_put(gb_tty_driver);
+ idr_destroy(&tty_minors);
+}
+
+static const struct gbphy_device_id gb_uart_id_table[] = {
+ { GBPHY_PROTOCOL(GREYBUS_PROTOCOL_UART) },
+ { },
+};
+MODULE_DEVICE_TABLE(gbphy, gb_uart_id_table);
+
+static struct gbphy_driver uart_driver = {
+ .name = "uart",
+ .probe = gb_uart_probe,
+ .remove = gb_uart_remove,
+ .id_table = gb_uart_id_table,
+};
+
+static int gb_uart_driver_init(void)
+{
+ int ret;
+
+ ret = gb_tty_init();
+ if (ret)
+ return ret;
+
+ ret = gb_gbphy_register(&uart_driver);
+ if (ret) {
+ gb_tty_exit();
+ return ret;
+ }
+
+ return 0;
+}
+module_init(gb_uart_driver_init);
+
+static void gb_uart_driver_exit(void)
+{
+ gb_gbphy_deregister(&uart_driver);
+ gb_tty_exit();
+}
+
+module_exit(gb_uart_driver_exit);
+MODULE_DESCRIPTION("UART driver for the Greybus 'generic' UART module");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/greybus/usb.c b/drivers/staging/greybus/usb.c
new file mode 100644
index 000000000000..475f24f20cd4
--- /dev/null
+++ b/drivers/staging/greybus/usb.c
@@ -0,0 +1,246 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * USB host driver for the Greybus "generic" USB module.
+ *
+ * Copyright 2014 Google Inc.
+ * Copyright 2014 Linaro Ltd.
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/usb.h>
+#include <linux/usb/hcd.h>
+#include <linux/greybus.h>
+
+#include "gbphy.h"
+
+/* Greybus USB request types */
+#define GB_USB_TYPE_HCD_START 0x02
+#define GB_USB_TYPE_HCD_STOP 0x03
+#define GB_USB_TYPE_HUB_CONTROL 0x04
+
+struct gb_usb_hub_control_request {
+ __le16 typeReq;
+ __le16 wValue;
+ __le16 wIndex;
+ __le16 wLength;
+};
+
+struct gb_usb_hub_control_response {
+ DECLARE_FLEX_ARRAY(u8, buf);
+};
+
+struct gb_usb_device {
+ struct gb_connection *connection;
+ struct gbphy_device *gbphy_dev;
+};
+
+static inline struct gb_usb_device *to_gb_usb_device(struct usb_hcd *hcd)
+{
+ return (struct gb_usb_device *)hcd->hcd_priv;
+}
+
+static inline struct usb_hcd *gb_usb_device_to_hcd(struct gb_usb_device *dev)
+{
+ return container_of((void *)dev, struct usb_hcd, hcd_priv);
+}
+
+static void hcd_stop(struct usb_hcd *hcd)
+{
+ struct gb_usb_device *dev = to_gb_usb_device(hcd);
+ int ret;
+
+ ret = gb_operation_sync(dev->connection, GB_USB_TYPE_HCD_STOP,
+ NULL, 0, NULL, 0);
+ if (ret)
+ dev_err(&dev->gbphy_dev->dev, "HCD stop failed '%d'\n", ret);
+}
+
+static int hcd_start(struct usb_hcd *hcd)
+{
+ struct usb_bus *bus = hcd_to_bus(hcd);
+ struct gb_usb_device *dev = to_gb_usb_device(hcd);
+ int ret;
+
+ ret = gb_operation_sync(dev->connection, GB_USB_TYPE_HCD_START,
+ NULL, 0, NULL, 0);
+ if (ret) {
+ dev_err(&dev->gbphy_dev->dev, "HCD start failed '%d'\n", ret);
+ return ret;
+ }
+
+ hcd->state = HC_STATE_RUNNING;
+ if (bus->root_hub)
+ usb_hcd_resume_root_hub(hcd);
+ return 0;
+}
+
+static int urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags)
+{
+ return -ENXIO;
+}
+
+static int urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
+{
+ return -ENXIO;
+}
+
+static int get_frame_number(struct usb_hcd *hcd)
+{
+ return 0;
+}
+
+static int hub_status_data(struct usb_hcd *hcd, char *buf)
+{
+ return 0;
+}
+
+static int hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, u16 wIndex,
+ char *buf, u16 wLength)
+{
+ struct gb_usb_device *dev = to_gb_usb_device(hcd);
+ struct gb_operation *operation;
+ struct gb_usb_hub_control_request *request;
+ struct gb_usb_hub_control_response *response;
+ size_t response_size;
+ int ret;
+
+ /* FIXME: handle unspecified lengths */
+ response_size = sizeof(*response) + wLength;
+
+ operation = gb_operation_create(dev->connection,
+ GB_USB_TYPE_HUB_CONTROL,
+ sizeof(*request),
+ response_size,
+ GFP_KERNEL);
+ if (!operation)
+ return -ENOMEM;
+
+ request = operation->request->payload;
+ request->typeReq = cpu_to_le16(typeReq);
+ request->wValue = cpu_to_le16(wValue);
+ request->wIndex = cpu_to_le16(wIndex);
+ request->wLength = cpu_to_le16(wLength);
+
+ ret = gb_operation_request_send_sync(operation);
+ if (ret)
+ goto out;
+
+ if (wLength) {
+ /* Greybus core has verified response size */
+ response = operation->response->payload;
+ memcpy(buf, response->buf, wLength);
+ }
+out:
+ gb_operation_put(operation);
+
+ return ret;
+}
+
+static const struct hc_driver usb_gb_hc_driver = {
+ .description = "greybus-hcd",
+ .product_desc = "Greybus USB Host Controller",
+ .hcd_priv_size = sizeof(struct gb_usb_device),
+
+ .flags = HCD_USB2,
+
+ .start = hcd_start,
+ .stop = hcd_stop,
+
+ .urb_enqueue = urb_enqueue,
+ .urb_dequeue = urb_dequeue,
+
+ .get_frame_number = get_frame_number,
+ .hub_status_data = hub_status_data,
+ .hub_control = hub_control,
+};
+
+static int gb_usb_probe(struct gbphy_device *gbphy_dev,
+ const struct gbphy_device_id *id)
+{
+ struct gb_connection *connection;
+ struct device *dev = &gbphy_dev->dev;
+ struct gb_usb_device *gb_usb_dev;
+ struct usb_hcd *hcd;
+ int retval;
+
+ hcd = usb_create_hcd(&usb_gb_hc_driver, dev, dev_name(dev));
+ if (!hcd)
+ return -ENOMEM;
+
+ connection = gb_connection_create(gbphy_dev->bundle,
+ le16_to_cpu(gbphy_dev->cport_desc->id),
+ NULL);
+ if (IS_ERR(connection)) {
+ retval = PTR_ERR(connection);
+ goto exit_usb_put;
+ }
+
+ gb_usb_dev = to_gb_usb_device(hcd);
+ gb_usb_dev->connection = connection;
+ gb_connection_set_data(connection, gb_usb_dev);
+ gb_usb_dev->gbphy_dev = gbphy_dev;
+ gb_gbphy_set_data(gbphy_dev, gb_usb_dev);
+
+ hcd->has_tt = 1;
+
+ retval = gb_connection_enable(connection);
+ if (retval)
+ goto exit_connection_destroy;
+
+ /*
+ * FIXME: The USB bridged-PHY protocol driver depends on changes to
+ * USB core which are not yet upstream.
+ *
+ * Disable for now.
+ */
+ if (1) {
+ dev_warn(dev, "USB protocol disabled\n");
+ retval = -EPROTONOSUPPORT;
+ goto exit_connection_disable;
+ }
+
+ retval = usb_add_hcd(hcd, 0, 0);
+ if (retval)
+ goto exit_connection_disable;
+
+ return 0;
+
+exit_connection_disable:
+ gb_connection_disable(connection);
+exit_connection_destroy:
+ gb_connection_destroy(connection);
+exit_usb_put:
+ usb_put_hcd(hcd);
+
+ return retval;
+}
+
+static void gb_usb_remove(struct gbphy_device *gbphy_dev)
+{
+ struct gb_usb_device *gb_usb_dev = gb_gbphy_get_data(gbphy_dev);
+ struct gb_connection *connection = gb_usb_dev->connection;
+ struct usb_hcd *hcd = gb_usb_device_to_hcd(gb_usb_dev);
+
+ usb_remove_hcd(hcd);
+ gb_connection_disable(connection);
+ gb_connection_destroy(connection);
+ usb_put_hcd(hcd);
+}
+
+static const struct gbphy_device_id gb_usb_id_table[] = {
+ { GBPHY_PROTOCOL(GREYBUS_PROTOCOL_USB) },
+ { },
+};
+MODULE_DEVICE_TABLE(gbphy, gb_usb_id_table);
+
+static struct gbphy_driver usb_driver = {
+ .name = "usb",
+ .probe = gb_usb_probe,
+ .remove = gb_usb_remove,
+ .id_table = gb_usb_id_table,
+};
+
+module_gbphy_driver(usb_driver);
+MODULE_DESCRIPTION("USB host driver for the Greybus 'generic' USB module");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/greybus/vibrator.c b/drivers/staging/greybus/vibrator.c
new file mode 100644
index 000000000000..ee112aa13ff1
--- /dev/null
+++ b/drivers/staging/greybus/vibrator.c
@@ -0,0 +1,249 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Greybus Vibrator protocol driver.
+ *
+ * Copyright 2014 Google Inc.
+ * Copyright 2014 Linaro Ltd.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/device.h>
+#include <linux/kdev_t.h>
+#include <linux/idr.h>
+#include <linux/pm_runtime.h>
+#include <linux/greybus.h>
+
+struct gb_vibrator_device {
+ struct gb_connection *connection;
+ struct device *dev;
+ int minor; /* vibrator minor number */
+ struct delayed_work delayed_work;
+};
+
+/* Greybus Vibrator operation types */
+#define GB_VIBRATOR_TYPE_ON 0x02
+#define GB_VIBRATOR_TYPE_OFF 0x03
+
+static int turn_off(struct gb_vibrator_device *vib)
+{
+ struct gb_bundle *bundle = vib->connection->bundle;
+ int ret;
+
+ ret = gb_operation_sync(vib->connection, GB_VIBRATOR_TYPE_OFF,
+ NULL, 0, NULL, 0);
+
+ gb_pm_runtime_put_autosuspend(bundle);
+
+ return ret;
+}
+
+static int turn_on(struct gb_vibrator_device *vib, u16 timeout_ms)
+{
+ struct gb_bundle *bundle = vib->connection->bundle;
+ int ret;
+
+ ret = gb_pm_runtime_get_sync(bundle);
+ if (ret)
+ return ret;
+
+ /* Vibrator was switched ON earlier */
+ if (cancel_delayed_work_sync(&vib->delayed_work))
+ turn_off(vib);
+
+ ret = gb_operation_sync(vib->connection, GB_VIBRATOR_TYPE_ON,
+ NULL, 0, NULL, 0);
+ if (ret) {
+ gb_pm_runtime_put_autosuspend(bundle);
+ return ret;
+ }
+
+ schedule_delayed_work(&vib->delayed_work, msecs_to_jiffies(timeout_ms));
+
+ return 0;
+}
+
+static void gb_vibrator_worker(struct work_struct *work)
+{
+ struct delayed_work *delayed_work = to_delayed_work(work);
+ struct gb_vibrator_device *vib =
+ container_of(delayed_work,
+ struct gb_vibrator_device,
+ delayed_work);
+
+ turn_off(vib);
+}
+
+static ssize_t timeout_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct gb_vibrator_device *vib = dev_get_drvdata(dev);
+ unsigned long val;
+ int retval;
+
+ retval = kstrtoul(buf, 10, &val);
+ if (retval < 0) {
+ dev_err(dev, "could not parse timeout value %d\n", retval);
+ return retval;
+ }
+
+ if (val)
+ retval = turn_on(vib, (u16)val);
+ else
+ retval = turn_off(vib);
+ if (retval)
+ return retval;
+
+ return count;
+}
+static DEVICE_ATTR_WO(timeout);
+
+static struct attribute *vibrator_attrs[] = {
+ &dev_attr_timeout.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(vibrator);
+
+static struct class vibrator_class = {
+ .name = "vibrator",
+ .dev_groups = vibrator_groups,
+};
+
+static DEFINE_IDA(minors);
+
+static int gb_vibrator_probe(struct gb_bundle *bundle,
+ const struct greybus_bundle_id *id)
+{
+ struct greybus_descriptor_cport *cport_desc;
+ struct gb_connection *connection;
+ struct gb_vibrator_device *vib;
+ struct device *dev;
+ int retval;
+
+ if (bundle->num_cports != 1)
+ return -ENODEV;
+
+ cport_desc = &bundle->cport_desc[0];
+ if (cport_desc->protocol_id != GREYBUS_PROTOCOL_VIBRATOR)
+ return -ENODEV;
+
+ vib = kzalloc(sizeof(*vib), GFP_KERNEL);
+ if (!vib)
+ return -ENOMEM;
+
+ connection = gb_connection_create(bundle, le16_to_cpu(cport_desc->id),
+ NULL);
+ if (IS_ERR(connection)) {
+ retval = PTR_ERR(connection);
+ goto err_free_vib;
+ }
+ gb_connection_set_data(connection, vib);
+
+ vib->connection = connection;
+
+ greybus_set_drvdata(bundle, vib);
+
+ retval = gb_connection_enable(connection);
+ if (retval)
+ goto err_connection_destroy;
+
+ /*
+ * For now we create a device in sysfs for the vibrator, but odds are
+ * there is a "real" device somewhere in the kernel for this, but I
+ * can't find it at the moment...
+ */
+ vib->minor = ida_alloc(&minors, GFP_KERNEL);
+ if (vib->minor < 0) {
+ retval = vib->minor;
+ goto err_connection_disable;
+ }
+ dev = device_create(&vibrator_class, &bundle->dev,
+ MKDEV(0, 0), vib, "vibrator%d", vib->minor);
+ if (IS_ERR(dev)) {
+ retval = -EINVAL;
+ goto err_ida_remove;
+ }
+ vib->dev = dev;
+
+ INIT_DELAYED_WORK(&vib->delayed_work, gb_vibrator_worker);
+
+ gb_pm_runtime_put_autosuspend(bundle);
+
+ return 0;
+
+err_ida_remove:
+ ida_free(&minors, vib->minor);
+err_connection_disable:
+ gb_connection_disable(connection);
+err_connection_destroy:
+ gb_connection_destroy(connection);
+err_free_vib:
+ kfree(vib);
+
+ return retval;
+}
+
+static void gb_vibrator_disconnect(struct gb_bundle *bundle)
+{
+ struct gb_vibrator_device *vib = greybus_get_drvdata(bundle);
+ int ret;
+
+ ret = gb_pm_runtime_get_sync(bundle);
+ if (ret)
+ gb_pm_runtime_get_noresume(bundle);
+
+ if (cancel_delayed_work_sync(&vib->delayed_work))
+ turn_off(vib);
+
+ device_unregister(vib->dev);
+ ida_free(&minors, vib->minor);
+ gb_connection_disable(vib->connection);
+ gb_connection_destroy(vib->connection);
+ kfree(vib);
+}
+
+static const struct greybus_bundle_id gb_vibrator_id_table[] = {
+ { GREYBUS_DEVICE_CLASS(GREYBUS_CLASS_VIBRATOR) },
+ { }
+};
+MODULE_DEVICE_TABLE(greybus, gb_vibrator_id_table);
+
+static struct greybus_driver gb_vibrator_driver = {
+ .name = "vibrator",
+ .probe = gb_vibrator_probe,
+ .disconnect = gb_vibrator_disconnect,
+ .id_table = gb_vibrator_id_table,
+};
+
+static __init int gb_vibrator_init(void)
+{
+ int retval;
+
+ retval = class_register(&vibrator_class);
+ if (retval)
+ return retval;
+
+ retval = greybus_register(&gb_vibrator_driver);
+ if (retval)
+ goto err_class_unregister;
+
+ return 0;
+
+err_class_unregister:
+ class_unregister(&vibrator_class);
+
+ return retval;
+}
+module_init(gb_vibrator_init);
+
+static __exit void gb_vibrator_exit(void)
+{
+ greybus_deregister(&gb_vibrator_driver);
+ class_unregister(&vibrator_class);
+ ida_destroy(&minors);
+}
+module_exit(gb_vibrator_exit);
+
+MODULE_DESCRIPTION("Greybus Vibrator protocol driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/gs_fpgaboot/Kconfig b/drivers/staging/gs_fpgaboot/Kconfig
deleted file mode 100644
index 550645291fab..000000000000
--- a/drivers/staging/gs_fpgaboot/Kconfig
+++ /dev/null
@@ -1,8 +0,0 @@
-#
-# "xilinx FPGA firmware download, fpgaboot"
-#
-config GS_FPGABOOT
- tristate "Xilinx FPGA firmware download module"
- default n
- help
- Xilinx FPGA firmware download module
diff --git a/drivers/staging/gs_fpgaboot/Makefile b/drivers/staging/gs_fpgaboot/Makefile
deleted file mode 100644
index d2f0211ba540..000000000000
--- a/drivers/staging/gs_fpgaboot/Makefile
+++ /dev/null
@@ -1,2 +0,0 @@
-gs_fpga-y += gs_fpgaboot.o io.o
-obj-$(CONFIG_GS_FPGABOOT) += gs_fpga.o
diff --git a/drivers/staging/gs_fpgaboot/README b/drivers/staging/gs_fpgaboot/README
deleted file mode 100644
index 8d793c1769b0..000000000000
--- a/drivers/staging/gs_fpgaboot/README
+++ /dev/null
@@ -1,70 +0,0 @@
-==============================================================================
-Linux Driver Source for Xilinx FPGA firmware download
-==============================================================================
-
-
-TABLE OF CONTENTS.
-
-1. SUMMARY
-2. BACKGROUND
-3. DESIGN
-4. HOW TO USE
-5. REFERENCE
-
-1. SUMMARY
-
- - Download Xilinx FPGA firmware
- - This module downloads Xilinx FPGA firmware using gpio pins.
-
-2. BACKGROUND
-
- An FPGA (Field Programmable Gate Array) is a programmable hardware that is
- used in various applications. Hardware design needs to programmed through
- a dedicated device or CPU assisted way (serial or parallel).
- This driver provides a way to download FPGA firmware.
-
-3. DESIGN
-
- - load Xilinx FPGA bitstream format[1] firmware image file using
- kernel firmware framework, request_firmware()
- - program the Xilinx FPGA using SelectMAP (parallel) mode [2]
- - FPGA prgram is done by gpio based bit-banging, as an example
- - platform independent file: gs_fpgaboot.c
- - platform dependent file: io.c
-
-4. HOW TO USE
-
- $ insmod gs_fpga.ko file="xlinx_fpga_top_bitstream.bit"
- $ rmmod gs_fpga
-
-5. USE CASE (from a mailing list discussion with Greg)
-
- a. As a FPGA development support tool,
- During FPGA firmware development, you need to download a new FPGA
- image frequently.
- You would do that with a dedicated JTAG, which usually a limited
- resource in the lab.
- However, if you use my driver, you don't have to have a dedicated JTAG.
- This is a real gain :)
-
- b. For the FPGA that runs without config after the download, which
- doesn't talk to any of Linux interfaces (such as PCIE).
-
- We download FPGA firmware from user triggered or some other way, and that's it.
- Since that FPGA runs on its own, it doesn't require a linux driver
- after the download.
-
- c. For the FPGA that requires config after the download, which talk to
- any of linux interfaces (such as PCIE)
-
- Then, this type of FPGA config can be put into device tree and have a
- separate driver (pcie or others), then THAT driver calls my driver to
- download FPGA firmware during the Linux boot, the take over the device
- through the interface.
-
-6. REFERENCE
-
- 1. Xilinx APP NOTE XAPP583:
- http://www.xilinx.com/support/documentation/application_notes/xapp583-fpga-configuration.pdf
- 2. bitstream file info:
- http://home.earthlink.net/~davesullins/software/bitinfo.html
diff --git a/drivers/staging/gs_fpgaboot/TODO b/drivers/staging/gs_fpgaboot/TODO
deleted file mode 100644
index 2d9fb17d606d..000000000000
--- a/drivers/staging/gs_fpgaboot/TODO
+++ /dev/null
@@ -1,7 +0,0 @@
-TODO:
- - get bus width input instead of hardcoded bus width
- - get it reviewed
-
-Please send any patches for this driver to Insop Song<insop.song@gainspeed.com>
-and Greg Kroah-Hartman <gregkh@linuxfoundation.org>.
-And please CC to "Staging subsystem" mail list <devel@driverdev.osuosl.org> too.
diff --git a/drivers/staging/gs_fpgaboot/gs_fpgaboot.c b/drivers/staging/gs_fpgaboot/gs_fpgaboot.c
deleted file mode 100644
index a3a10f9a2a2b..000000000000
--- a/drivers/staging/gs_fpgaboot/gs_fpgaboot.c
+++ /dev/null
@@ -1,389 +0,0 @@
-/*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/device.h>
-#include <linux/string.h>
-#include <linux/slab.h>
-#include <linux/fs.h>
-#include <linux/platform_device.h>
-#include <linux/of.h>
-#include <linux/delay.h>
-#include <linux/io.h>
-#include <linux/firmware.h>
-
-#include "gs_fpgaboot.h"
-#include "io.h"
-
-#define DEVICE_NAME "device"
-#define CLASS_NAME "fpgaboot"
-
-static uint8_t bits_magic[] = {
- 0x0, 0x9, 0xf, 0xf0, 0xf, 0xf0,
- 0xf, 0xf0, 0xf, 0xf0, 0x0, 0x0, 0x1};
-
-/* fake device for request_firmware */
-static struct platform_device *firmware_pdev;
-
-static char *file = "xlinx_fpga_firmware.bit";
-module_param(file, charp, S_IRUGO);
-MODULE_PARM_DESC(file, "Xilinx FPGA firmware file.");
-
-static void read_bitstream(char *bitdata, char *buf, int *offset, int rdsize)
-{
- memcpy(buf, bitdata + *offset, rdsize);
- *offset += rdsize;
-}
-
-static void readinfo_bitstream(char *bitdata, char *buf, int *offset)
-{
- char tbuf[64];
- int32_t len;
-
- /* read section char */
- read_bitstream(bitdata, tbuf, offset, 1);
-
- /* read length */
- read_bitstream(bitdata, tbuf, offset, 2);
-
- len = tbuf[0] << 8 | tbuf[1];
-
- read_bitstream(bitdata, buf, offset, len);
- buf[len] = '\0';
-}
-
-/*
- * read bitdata length
- */
-static int readlength_bitstream(char *bitdata, int *lendata, int *offset)
-{
- char tbuf[64];
-
- /* read section char */
- read_bitstream(bitdata, tbuf, offset, 1);
-
- /* make sure it is section 'e' */
- if (tbuf[0] != 'e') {
- pr_err("error: length section is not 'e', but %c\n", tbuf[0]);
- return -1;
- }
-
- /* read 4bytes length */
- read_bitstream(bitdata, tbuf, offset, 4);
-
- *lendata = tbuf[0] << 24 | tbuf[1] << 16 |
- tbuf[2] << 8 | tbuf[3];
-
- return 0;
-}
-
-
-/*
- * read first 13 bytes to check bitstream magic number
- */
-static int readmagic_bitstream(char *bitdata, int *offset)
-{
- char buf[13];
- int r;
-
- read_bitstream(bitdata, buf, offset, 13);
- r = memcmp(buf, bits_magic, 13);
- if (r) {
- pr_err("error: corrupted header");
- return -1;
- }
- pr_info("bitstream file magic number Ok\n");
-
- *offset = 13; /* magic length */
-
- return 0;
-}
-
-/*
- * NOTE: supports only bitstream format
- */
-static enum fmt_image get_imageformat(struct fpgaimage *fimage)
-{
- return f_bit;
-}
-
-static void gs_print_header(struct fpgaimage *fimage)
-{
- pr_info("file: %s\n", fimage->filename);
- pr_info("part: %s\n", fimage->part);
- pr_info("date: %s\n", fimage->date);
- pr_info("time: %s\n", fimage->time);
- pr_info("lendata: %d\n", fimage->lendata);
-}
-
-static void gs_read_bitstream(struct fpgaimage *fimage)
-{
- char *bitdata;
- int offset;
-
- offset = 0;
- bitdata = (char *)fimage->fw_entry->data;
-
- readmagic_bitstream(bitdata, &offset);
- readinfo_bitstream(bitdata, fimage->filename, &offset);
- readinfo_bitstream(bitdata, fimage->part, &offset);
- readinfo_bitstream(bitdata, fimage->date, &offset);
- readinfo_bitstream(bitdata, fimage->time, &offset);
- readlength_bitstream(bitdata, &fimage->lendata, &offset);
-
- fimage->fpgadata = bitdata + offset;
-}
-
-static int gs_read_image(struct fpgaimage *fimage)
-{
- int img_fmt;
-
- img_fmt = get_imageformat(fimage);
-
- switch (img_fmt) {
- case f_bit:
- pr_info("image is bitstream format\n");
- gs_read_bitstream(fimage);
- break;
- default:
- pr_err("unsupported fpga image format\n");
- return -1;
- }
-
- gs_print_header(fimage);
-
- return 0;
-}
-
-static int gs_load_image(struct fpgaimage *fimage, char *fw_file)
-{
- int err;
-
- pr_info("load fpgaimage %s\n", fw_file);
-
- err = request_firmware(&fimage->fw_entry, fw_file, &firmware_pdev->dev);
- if (err != 0) {
- pr_err("firmware %s is missing, cannot continue.\n", fw_file);
- return err;
- }
-
- return 0;
-}
-
-static int gs_download_image(struct fpgaimage *fimage, enum wbus bus_bytes)
-{
- char *bitdata;
- int size, i, cnt;
-
- cnt = 0;
- bitdata = (char *)fimage->fpgadata;
- size = fimage->lendata;
-
-#ifdef DEBUG_FPGA
- print_hex_dump_bytes("bitfile sample: ", DUMP_PREFIX_OFFSET,
- bitdata, 0x100);
-#endif /* DEBUG_FPGA */
- if (!xl_supported_prog_bus_width(bus_bytes)) {
- pr_err("unsupported program bus width %d\n",
- bus_bytes);
- return -1;
- }
-
- /* Bring csi_b, rdwr_b Low and program_b High */
- xl_program_b(1);
- xl_rdwr_b(0);
- xl_csi_b(0);
-
- /* Configuration reset */
- xl_program_b(0);
- msleep(20);
- xl_program_b(1);
-
- /* Wait for Device Initialization */
- while (xl_get_init_b() == 0)
- ;
-
- pr_info("device init done\n");
-
- for (i = 0; i < size; i += bus_bytes)
- xl_shift_bytes_out(bus_bytes, bitdata+i);
-
- pr_info("program done\n");
-
- /* Check INIT_B */
- if (xl_get_init_b() == 0) {
- pr_err("init_b 0\n");
- return -1;
- }
-
- while (xl_get_done_b() == 0) {
- if (cnt++ > MAX_WAIT_DONE) {
- pr_err("init_B %d\n", xl_get_init_b());
- break;
- }
- }
-
- if (cnt > MAX_WAIT_DONE) {
- pr_err("fpga download fail\n");
- return -1;
- }
-
- pr_info("download fpgaimage\n");
-
- /* Compensate for Special Startup Conditions */
- xl_shift_cclk(8);
-
- return 0;
-}
-
-static int gs_release_image(struct fpgaimage *fimage)
-{
- release_firmware(fimage->fw_entry);
- pr_info("release fpgaimage\n");
-
- return 0;
-}
-
-/*
- * NOTE: supports systemmap parallel programming
- */
-static int gs_set_download_method(struct fpgaimage *fimage)
-{
- pr_info("set program method\n");
-
- fimage->dmethod = m_systemmap;
-
- pr_info("systemmap program method\n");
-
- return 0;
-}
-
-static int init_driver(void)
-{
- firmware_pdev = platform_device_register_simple("fpgaboot", -1,
- NULL, 0);
- return PTR_ERR_OR_ZERO(firmware_pdev);
-}
-
-static void finish_driver(void)
-{
- platform_device_unregister(firmware_pdev);
-}
-
-static int gs_fpgaboot(void)
-{
- int err;
- struct fpgaimage *fimage;
-
- fimage = kmalloc(sizeof(struct fpgaimage), GFP_KERNEL);
- if (!fimage)
- return -ENOMEM;
-
- err = gs_load_image(fimage, file);
- if (err) {
- pr_err("gs_load_image error\n");
- goto err_out1;
- }
-
- err = gs_read_image(fimage);
- if (err) {
- pr_err("gs_read_image error\n");
- goto err_out2;
- }
-
- err = gs_set_download_method(fimage);
- if (err) {
- pr_err("gs_set_download_method error\n");
- goto err_out2;
- }
-
- err = gs_download_image(fimage, bus_2byte);
- if (err) {
- pr_err("gs_download_image error\n");
- goto err_out2;
- }
-
- err = gs_release_image(fimage);
- if (err) {
- pr_err("gs_release_image error\n");
- goto err_out1;
- }
-
- kfree(fimage);
- return 0;
-
-err_out2:
- err = gs_release_image(fimage);
- if (err)
- pr_err("gs_release_image error\n");
-err_out1:
- kfree(fimage);
-
- return -1;
-
-}
-
-static int __init gs_fpgaboot_init(void)
-{
- int err;
-
- pr_info("FPGA DOWNLOAD --->\n");
-
- pr_info("FPGA image file name: %s\n", file);
-
- err = init_driver();
- if (err) {
- pr_err("FPGA DRIVER INIT FAIL!!\n");
- return err;
- }
-
- err = xl_init_io();
- if (err) {
- pr_err("GPIO INIT FAIL!!\n");
- goto errout;
- }
-
- err = gs_fpgaboot();
- if (err) {
- pr_err("FPGA DOWNLOAD FAIL!!\n");
- goto errout;
- }
-
- pr_info("FPGA DOWNLOAD DONE <---\n");
-
- return 0;
-
-errout:
- finish_driver();
-
- return err;
-}
-
-static void __exit gs_fpgaboot_exit(void)
-{
- finish_driver();
- pr_info("FPGA image download module removed\n");
-}
-
-module_init(gs_fpgaboot_init);
-module_exit(gs_fpgaboot_exit);
-
-MODULE_AUTHOR("Insop Song");
-MODULE_DESCRIPTION("Xlinix FPGA firmware download");
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/gs_fpgaboot/gs_fpgaboot.h b/drivers/staging/gs_fpgaboot/gs_fpgaboot.h
deleted file mode 100644
index f41f4cc798cc..000000000000
--- a/drivers/staging/gs_fpgaboot/gs_fpgaboot.h
+++ /dev/null
@@ -1,56 +0,0 @@
-/*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- *
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-#include <linux/firmware.h>
-
-#define MAX_STR 256
-
-enum fmt_image {
- f_bit, /* only bitstream is supported */
- f_rbt,
- f_bin,
- f_mcs,
- f_hex,
-};
-
-enum mdownload {
- m_systemmap, /* only system map is supported */
- m_serial,
- m_jtag,
-};
-
-/*
- * xilinx fpgaimage information
- * NOTE: use MAX_STR instead of dynamic alloc for simplicity
- */
-struct fpgaimage {
- enum fmt_image fmt_img;
- enum mdownload dmethod;
-
- const struct firmware *fw_entry;
-
- /*
- * the followings can be read from bitstream,
- * but other image format should have as well
- */
- char filename[MAX_STR];
- char part[MAX_STR];
- char date[MAX_STR];
- char time[MAX_STR];
- int32_t lendata;
- char *fpgadata;
-};
diff --git a/drivers/staging/gs_fpgaboot/io.c b/drivers/staging/gs_fpgaboot/io.c
deleted file mode 100644
index 819db53da64d..000000000000
--- a/drivers/staging/gs_fpgaboot/io.c
+++ /dev/null
@@ -1,122 +0,0 @@
-/*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- *
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/device.h>
-#include <linux/string.h>
-#include <linux/slab.h>
-#include <linux/fs.h>
-#include <linux/platform_device.h>
-#include <linux/of.h>
-#include <linux/of_address.h>
-#include <linux/firmware.h>
-#include <linux/io.h>
-
-#include "io.h"
-
-static inline void byte0_out(unsigned char data);
-static inline void byte1_out(unsigned char data);
-static inline void xl_cclk_b(int32_t i);
-
-
-/* Assert and Deassert CCLK */
-void xl_shift_cclk(int count)
-{
- int i;
-
- for (i = 0; i < count; i++) {
- xl_cclk_b(1);
- xl_cclk_b(0);
- }
-}
-
-int xl_supported_prog_bus_width(enum wbus bus_bytes)
-{
- switch (bus_bytes) {
- case bus_1byte:
- break;
- case bus_2byte:
- break;
- default:
- pr_err("unsupported program bus width %d\n",
- bus_bytes);
- return 0;
- }
-
- return 1;
-}
-
-/* Serialize byte and clock each bit on target's DIN and CCLK pins */
-void xl_shift_bytes_out(enum wbus bus_byte, unsigned char *pdata)
-{
- /*
- * supports 1 and 2 bytes programming mode
- */
- if (likely(bus_byte == bus_2byte))
- byte0_out(pdata[0]);
-
- byte1_out(pdata[1]);
- xl_shift_cclk(1);
-}
-
-/*
- * generic bit swap for xilinx SYSTEMMAP FPGA programming
- */
-void xl_program_b(int32_t i)
-{
-}
-
-void xl_rdwr_b(int32_t i)
-{
-}
-
-void xl_csi_b(int32_t i)
-{
-}
-
-int xl_get_init_b(void)
-{
- return -1;
-}
-
-int xl_get_done_b(void)
-{
- return -1;
-}
-
-static inline void byte0_out(unsigned char data)
-{
-}
-
-static inline void byte1_out(unsigned char data)
-{
-}
-
-static inline void xl_cclk_b(int32_t i)
-{
-}
-
-/*
- * configurable per device type for different I/O config
- */
-int xl_init_io(void)
-{
- return -1;
-}
diff --git a/drivers/staging/gs_fpgaboot/io.h b/drivers/staging/gs_fpgaboot/io.h
deleted file mode 100644
index 7b46ac24b74e..000000000000
--- a/drivers/staging/gs_fpgaboot/io.h
+++ /dev/null
@@ -1,90 +0,0 @@
-/*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- *
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-#define GPDIR 0
-#define GPCFG 4 /* open drain or not */
-#define GPDAT 8
-
-/*
- * gpio port and pin definitions
- * NOTE: port number starts from 0
- */
-#define XL_INITN_PORT 1
-#define XL_INITN_PIN 14
-#define XL_RDWRN_PORT 1
-#define XL_RDWRN_PIN 13
-#define XL_CCLK_PORT 1
-#define XL_CCLK_PIN 10
-#define XL_PROGN_PORT 1
-#define XL_PROGN_PIN 25
-#define XL_CSIN_PORT 1
-#define XL_CSIN_PIN 26
-#define XL_DONE_PORT 1
-#define XL_DONE_PIN 27
-
-/*
- * gpio mapping
- *
- XL_config_D0 – gpio1_31
- Xl_config_d1 – gpio1_30
- Xl_config_d2 – gpio1_29
- Xl_config_d3 – gpio1_28
- Xl_config_d4 – gpio1_27
- Xl_config_d5 – gpio1_26
- Xl_config_d6 – gpio1_25
- Xl_config_d7 – gpio1_24
- Xl_config_d8 – gpio1_23
- Xl_config_d9 – gpio1_22
- Xl_config_d10 – gpio1_21
- Xl_config_d11 – gpio1_20
- Xl_config_d12 – gpio1_19
- Xl_config_d13 – gpio1_18
- Xl_config_d14 – gpio1_16
- Xl_config_d15 – gpio1_14
-*
-*/
-
-/*
- * program bus width in bytes
- */
-enum wbus {
- bus_1byte = 1,
- bus_2byte = 2,
-};
-
-
-#define MAX_WAIT_DONE 10000
-
-
-struct gpiobus {
- int ngpio;
- void __iomem *r[4];
-};
-
-int xl_supported_prog_bus_width(enum wbus bus_bytes);
-
-void xl_program_b(int32_t i);
-void xl_rdwr_b(int32_t i);
-void xl_csi_b(int32_t i);
-
-int xl_get_init_b(void);
-int xl_get_done_b(void);
-
-void xl_shift_cclk(int count);
-void xl_shift_bytes_out(enum wbus bus_byte, unsigned char *pdata);
-
-int xl_init_io(void);
diff --git a/drivers/staging/iio/Documentation/dac/max517 b/drivers/staging/iio/Documentation/dac/max517
deleted file mode 100644
index e60ec2f91a7a..000000000000
--- a/drivers/staging/iio/Documentation/dac/max517
+++ /dev/null
@@ -1,41 +0,0 @@
-Kernel driver max517
-====================
-
-Supported chips:
- * Maxim MAX517, MAX518, MAX519
- Prefix: 'max517'
- Datasheet: Publicly available at the Maxim website
- http://www.maxim-ic.com/
-
-Author:
- Roland Stigge <stigge@antcom.de>
-
-Description
------------
-
-The Maxim MAX517/518/519 is an 8-bit DAC on the I2C bus. The following table
-shows the different feature sets of the variants MAX517, MAX518 and MAX519:
-
-Feature MAX517 MAX518 MAX519
---------------------------------------------------------------------------
-One output channel X
-Two output channels X X
-Simultaneous output updates X X
-Supply voltage as reference X
-Separate reference input X
-Reference input for each DAC X
-
-Via the iio sysfs interface, there are three attributes available: out1_raw,
-out2_raw and out12_raw. With out1_raw and out2_raw, the current output values
-(0..255) of the DACs can be written to the device. out12_raw can be used to set
-both output channel values simultaneously.
-
-With MAX517, only out1_raw is available.
-
-Via out1_scale (and where appropriate, out2_scale), the current scaling factor
-in mV can be read.
-
-When the operating system goes to a power down state, the Power Down function
-of the chip is activated, reducing the supply current to 4uA.
-
-On power-up, the device is in 0V-output state.
diff --git a/drivers/staging/iio/Documentation/device.txt b/drivers/staging/iio/Documentation/device.txt
deleted file mode 100644
index 54ef0deed28f..000000000000
--- a/drivers/staging/iio/Documentation/device.txt
+++ /dev/null
@@ -1,79 +0,0 @@
-IIO Device drivers
-
-This is not intended to provide a comprehensive guide to writing an
-IIO device driver. For further information see the drivers within the
-subsystem.
-
-The crucial structure for device drivers in iio is iio_dev.
-
-First allocate one using:
-
-struct iio_dev *indio_dev = iio_device_alloc(sizeof(struct chip_state));
-where chip_state is a structure of local state data for this instance of
-the chip.
-
-That data can be accessed using iio_priv(struct iio_dev *).
-
-Then fill in the following:
-
-- indio_dev->dev.parent
- Struct device associated with the underlying hardware.
-- indio_dev->name
- Name of the device being driven - made available as the name
- attribute in sysfs.
-
-- indio_dev->info
- pointer to a structure with elements that tend to be fixed for
- large sets of different parts supported by a given driver.
- This contains:
- * info->driver_module:
- Set to THIS_MODULE. Used to ensure correct ownership
- of various resources allocate by the core.
- * info->event_attrs:
- Attributes used to enable / disable hardware events.
- * info->attrs:
- General device attributes. Typically used for the weird
- and the wonderful bits not covered by the channel specification.
- * info->read_raw:
- Raw data reading function. Used for both raw channel access
- and for associate parameters such as offsets and scales.
- * info->write_raw:
- Raw value writing function. Used for writable device values such
- as DAC values and calibbias.
- * info->read_event_config:
- Typically only set if there are some interrupt lines. This
- is used to read if an on sensor event detector is enabled.
- * info->write_event_config:
- Enable / disable an on sensor event detector.
- * info->read_event_value:
- Read value associated with on sensor event detectors. Note that
- the meaning of the returned value is dependent on the event
- type.
- * info->write_event_value:
- Write the value associated with on sensor event detectors. E.g.
- a threshold above which an interrupt occurs. Note that the
- meaning of the value to be set is event type dependent.
-
-- indio_dev->modes:
- Specify whether direct access and / or ring buffer access is supported.
-- indio_dev->buffer:
- An optional associated buffer.
-- indio_dev->pollfunc:
- Poll function related elements. This controls what occurs when a trigger
- to which this device is attached sends an event.
-- indio_dev->channels:
- Specification of device channels. Most attributes etc. are built
- from this spec.
-- indio_dev->num_channels:
- How many channels are there?
-
-Once these are set up, a call to iio_device_register(indio_dev)
-will register the device with the iio core.
-
-Worth noting here is that, if a ring buffer is to be used, it can be
-allocated prior to registering the device with the iio-core, but must
-be registered afterwards (otherwise the whole parentage of devices
-gets confused)
-
-On remove, iio_device_unregister(indio_dev) will remove the device from
-the core, and iio_device_free(indio_dev) will clean up.
diff --git a/drivers/staging/iio/Documentation/light/sysfs-bus-iio-light-tsl2583 b/drivers/staging/iio/Documentation/light/sysfs-bus-iio-light-tsl2583
deleted file mode 100644
index 470f7ad9c073..000000000000
--- a/drivers/staging/iio/Documentation/light/sysfs-bus-iio-light-tsl2583
+++ /dev/null
@@ -1,6 +0,0 @@
-What: /sys/bus/iio/devices/device[n]/in_illuminance0_calibrate
-KernelVersion: 2.6.37
-Contact: linux-iio@vger.kernel.org
-Description:
- This property causes an internal calibration of the als gain trim
- value which is later used in calculating illuminance in lux.
diff --git a/drivers/staging/iio/Documentation/light/sysfs-bus-iio-light-tsl2x7x b/drivers/staging/iio/Documentation/light/sysfs-bus-iio-light-tsl2x7x
deleted file mode 100644
index b2798b258bf7..000000000000
--- a/drivers/staging/iio/Documentation/light/sysfs-bus-iio-light-tsl2x7x
+++ /dev/null
@@ -1,13 +0,0 @@
-What: /sys/bus/iio/devices/device[n]/in_illuminance0_calibrate
-KernelVersion: 3.3-rc1
-Contact: linux-iio@vger.kernel.org
-Description:
- Causes an internal calibration of the als gain trim
- value which is later used in calculating illuminance in lux.
-
-What: /sys/bus/iio/devices/device[n]/in_proximity0_calibrate
-KernelVersion: 3.3-rc1
-Contact: linux-iio@vger.kernel.org
-Description:
- Causes a recalculation and adjustment to the
- proximity_thresh_rising_value.
diff --git a/drivers/staging/iio/Documentation/overview.txt b/drivers/staging/iio/Documentation/overview.txt
deleted file mode 100644
index 43f92b06bc3e..000000000000
--- a/drivers/staging/iio/Documentation/overview.txt
+++ /dev/null
@@ -1,57 +0,0 @@
-Overview of IIO
-
-The Industrial I/O subsystem is intended to provide support for devices
-that in some sense are analog to digital converters (ADCs). As many
-actual devices combine some ADCs with digital to analog converters
-(DACs) that functionality is also supported.
-
-The aim is to fill the gap between the somewhat similar hwmon and
-input subsystems. Hwmon is very much directed at low sample rate
-sensors used in applications such as fan speed control and temperature
-measurement. Input is, as its name suggests focused on input
-devices. In some cases there is considerable overlap between these and
-IIO.
-
-A typical device falling into this category would be connected via SPI
-or I2C.
-
-Functionality of IIO
-
-* Basic device registration and handling. This is very similar to
-hwmon with simple polled access to device channels via sysfs.
-
-* Event chrdevs. These are similar to input in that they provide a
-route to user space for hardware triggered events. Such events include
-threshold detectors, free-fall detectors and more complex action
-detection. The events themselves are currently very simple with
-merely an event code and a timestamp. Any data associated with the
-event must be accessed via polling.
-
-Note: A given device may have one or more event channel. These events are
-turned on or off (if possible) via sysfs interfaces.
-
-* Hardware buffer support. Some recent sensors have included
-fifo / ring buffers on the sensor chip. These greatly reduce the load
-on the host CPU by buffering relatively large numbers of data samples
-based on an internal sampling clock. Examples include VTI SCA3000
-series and Analog Device ADXL345 accelerometers. Each buffer supports
-polling to establish when data is available.
-
-* Trigger and software buffer support. In many data analysis
-applications it it useful to be able to capture data based on some
-external signal (trigger). These triggers might be a data ready
-signal, a gpio line connected to some external system or an on
-processor periodic interrupt. A single trigger may initialize data
-capture or reading from a number of sensors. These triggers are
-used in IIO to fill software buffers acting in a very similar
-fashion to the hardware buffers described above.
-
-Other documentation:
-
-device.txt - elements of a typical device driver.
-
-trigger.txt - elements of a typical trigger driver.
-
-ring.txt - additional elements required for buffer support.
-
-sysfs-bus-iio - abi documentation file.
diff --git a/drivers/staging/iio/Documentation/ring.txt b/drivers/staging/iio/Documentation/ring.txt
deleted file mode 100644
index 18718fcaf259..000000000000
--- a/drivers/staging/iio/Documentation/ring.txt
+++ /dev/null
@@ -1,47 +0,0 @@
-Buffer support within IIO
-
-This document is intended as a general overview of the functionality
-a buffer may supply and how it is specified within IIO. For more
-specific information on a given buffer implementation, see the
-comments in the source code. Note that some drivers allow buffer
-implementation to be selected at compile time via Kconfig options.
-
-A given buffer implementation typically embeds a struct
-iio_ring_buffer and it is a pointer to this that is provided to the
-IIO core. Access to the embedding structure is typically done via
-container_of functions.
-
-struct iio_ring_buffer contains a struct iio_ring_setup_ops *setup_ops
-which in turn contains the 4 function pointers
-(preenable, postenable, predisable and postdisable).
-These are used to perform device specific steps on either side
-of the core changing its current mode to indicate that the buffer
-is enabled or disabled (along with enabling triggering etc. as appropriate).
-
-Also in struct iio_ring_buffer is a struct iio_ring_access_funcs.
-The function pointers within here are used to allow the core to handle
-as much buffer functionality as possible. Note almost all of these
-are optional.
-
-store_to
- If possible, push data to the buffer.
-
-read_last
- If possible, get the most recent scan from the buffer (without removal).
- This provides polling like functionality whilst the ring buffering is in
- use without a separate read from the device.
-
-rip_first_n
- The primary buffer reading function. Note that it may well not return
- as much data as requested.
-
-request_update
- If parameters have changed that require reinitialization or configuration of
- the buffer this will trigger it.
-
-set_bytes_per_datum
- Set the number of bytes for a complete scan. (All samples + timestamp)
-
-set_length
- Set the number of complete scans that may be held by the buffer.
-
diff --git a/drivers/staging/iio/Documentation/sysfs-bus-iio-ad7192 b/drivers/staging/iio/Documentation/sysfs-bus-iio-ad7192
deleted file mode 100644
index 1c35c507cc05..000000000000
--- a/drivers/staging/iio/Documentation/sysfs-bus-iio-ad7192
+++ /dev/null
@@ -1,20 +0,0 @@
-What: /sys/.../iio:deviceX/ac_excitation_en
-KernelVersion: 3.1.0
-Contact: linux-iio@vger.kernel.org
-Description:
- This attribute, if available, is used to enable the AC
- excitation mode found on some converters. In ac excitation mode,
- the polarity of the excitation voltage is reversed on
- alternate cycles, to eliminate DC errors.
-
-What: /sys/.../iio:deviceX/bridge_switch_en
-KernelVersion: 3.1.0
-Contact: linux-iio@vger.kernel.org
-Description:
- This attribute, if available, is used to close or open the
- bridge power down switch found on some converters.
- In bridge applications, such as strain gauges and load cells,
- the bridge itself consumes the majority of the current in the
- system. To minimize the current consumption of the system,
- the bridge can be disconnected (when it is not being used
- using the bridge_switch_en attribute.
diff --git a/drivers/staging/iio/Documentation/sysfs-bus-iio-impedance-analyzer-ad5933 b/drivers/staging/iio/Documentation/sysfs-bus-iio-impedance-analyzer-ad5933
deleted file mode 100644
index 79c7e88c64cd..000000000000
--- a/drivers/staging/iio/Documentation/sysfs-bus-iio-impedance-analyzer-ad5933
+++ /dev/null
@@ -1,30 +0,0 @@
-What: /sys/bus/iio/devices/iio:deviceX/outY_freq_start
-KernelVersion: 3.1.0
-Contact: linux-iio@vger.kernel.org
-Description:
- Frequency sweep start frequency in Hz.
-
-What: /sys/bus/iio/devices/iio:deviceX/outY_freq_increment
-KernelVersion: 3.1.0
-Contact: linux-iio@vger.kernel.org
-Description:
- Frequency increment in Hz (step size) between consecutive
- frequency points along the sweep.
-
-What: /sys/bus/iio/devices/iio:deviceX/outY_freq_points
-KernelVersion: 3.1.0
-Contact: linux-iio@vger.kernel.org
-Description:
- Number of frequency points (steps) in the frequency sweep.
- This value, in conjunction with the outY_freq_start and the
- outY_freq_increment, determines the frequency sweep range
- for the sweep operation.
-
-What: /sys/bus/iio/devices/iio:deviceX/outY_settling_cycles
-KernelVersion: 3.1.0
-Contact: linux-iio@vger.kernel.org
-Description:
- Number of output excitation cycles (settling time cycles)
- that are allowed to pass through the unknown impedance,
- after each frequency increment, and before the ADC is triggered
- to perform a conversion sequence of the response signal.
diff --git a/drivers/staging/iio/Documentation/sysfs-bus-iio-light b/drivers/staging/iio/Documentation/sysfs-bus-iio-light
deleted file mode 100644
index 17e5c9c515d4..000000000000
--- a/drivers/staging/iio/Documentation/sysfs-bus-iio-light
+++ /dev/null
@@ -1,107 +0,0 @@
-
-What: /sys/bus/iio/devices/device[n]/range
-KernelVersion: 2.6.37
-Contact: linux-iio@vger.kernel.org
-Description:
- Hardware dependent ADC Full Scale Range used for some ambient
- light sensors in calculating lux.
-
-What: /sys/bus/iio/devices/device[n]/range_available
-KernelVersion: 2.6.37
-Contact: linux-iio@vger.kernel.org
-Description:
- Hardware dependent supported vales for ADC Full Scale Range.
-
-What: /sys/bus/iio/devices/device[n]/adc_resolution
-KernelVersion: 2.6.37
-Contact: linux-iio@vger.kernel.org
-Description:
- Hardware dependent ADC resolution of the ambient light sensor
- used in calculating the lux.
-
-What: /sys/bus/iio/devices/device[n]/adc_resolution_available
-KernelVersion: 2.6.37
-Contact: linux-iio@vger.kernel.org
-Description:
- Hardware dependent list of possible values supported for the
- adc_resolution of the given sensor.
-
-What: /sys/bus/iio/devices/device[n]/in_illuminance0[_input|_raw]
-KernelVersion: 2.6.35
-Contact: linux-iio@vger.kernel.org
-Description:
- This should return the calculated lux from the light sensor. If
- it comes back in SI units, it should also include _input else it
- should include _raw to signify it is not in SI units.
-
-What: /sys/.../device[n]/proximity_on_chip_ambient_infrared_suppression
-KernelVersion: 2.6.37
-Contact: linux-iio@vger.kernel.org
-Description:
- Hardware dependent mode for an ALS device to calculate the value
- in proximity mode. When this is enabled, then the device should
- use a infrared sensor reading to remove infrared noise from the
- proximity reading. If this is not enabled, the driver can still
- do this calculation manually by reading the infrared sensor
- value and doing the negation in sw.
-
-What: /sys/bus/iio/devices/device[n]/in_proximity[_input|_raw]
-KernelVersion: 2.6.37
-Contact: linux-iio@vger.kernel.org
-Description:
- This property is supported by proximity sensors and should be
- used to return the value of a reading by the sensor. If this
- value is returned in SI units, it should also include _input
- but if it is not, then it should include _raw.
-
-What: /sys/bus/iio/devices/device[n]/intensity_infrared[_input|_raw]
-KernelVersion: 2.6.37
-Contact: linux-iio@vger.kernel.org
-Description:
- This property is supported by sensors that have an infrared
- sensing mode. This value should be the output from a reading
- and if expressed in SI units, should include _input. If this
- value is not in SI units, then it should include _raw.
-
-What: /sys/bus/iio/devices/device[n]/in_illuminance0_target
-KernelVersion: 2.6.37
-Contact: linux-iio@vger.kernel.org
-Description:
- This property gets/sets the last known external
- lux measurement used in/for calibration.
-
-What: /sys/bus/iio/devices/device[n]/in_illuminance0_integration_time
-KernelVersion: 2.6.37
-Contact: linux-iio@vger.kernel.org
-Description:
- This property gets/sets the sensors ADC analog integration time.
-
-What: /sys/bus/iio/devices/device[n]/in_illuminance0_lux_table
-KernelVersion: 2.6.37
-Contact: linux-iio@vger.kernel.org
-Description:
- This property gets/sets the table of coefficients
- used in calculating illuminance in lux.
-
-What: /sys/bus/iio/devices/device[n]/in_intensity_clear[_input|_raw]
-What: /sys/bus/iio/devices/device[n]/in_intensity_red[_input|_raw]
-What: /sys/bus/iio/devices/device[n]/in_intensity_green[_input|_raw]
-What: /sys/bus/iio/devices/device[n]/in_intensity_blue[_input|_raw]
-KernelVersion: 3.6.0
-Contact: linux-iio@vger.kernel.org
-Description:
- This property is supported by sensors that have a RGBC
- sensing mode. This value should be the output from a reading
- and if expressed in SI units, should include _input. If this
- value is not in SI units (irradiance, uW/mm^2), then it should
- include _raw.
-
-What: /sys/bus/iio/devices/device[n]/in_cct0[_input|_raw]
-KernelVersion: 3.6.0
-Contact: linux-iio@vger.kernel.org
-Description:
- This should return the correlated color temperature from the
- light sensor. If it comes back in SI units, it should also
- include _input else it should include _raw to signify it is not
- in SI units.
-
diff --git a/drivers/staging/iio/Documentation/sysfs-bus-iio-light-tsl2583 b/drivers/staging/iio/Documentation/sysfs-bus-iio-light-tsl2583
deleted file mode 100644
index 660781df409f..000000000000
--- a/drivers/staging/iio/Documentation/sysfs-bus-iio-light-tsl2583
+++ /dev/null
@@ -1,20 +0,0 @@
-What: /sys/bus/iio/devices/device[n]/lux_table
-KernelVersion: 2.6.37
-Contact: linux-iio@vger.kernel.org
-Description:
- This property gets/sets the table of coefficients
- used in calculating illuminance in lux.
-
-What: /sys/bus/iio/devices/device[n]/illuminance0_calibrate
-KernelVersion: 2.6.37
-Contact: linux-iio@vger.kernel.org
-Description:
- This property causes an internal calibration of the als gain trim
- value which is later used in calculating illuminance in lux.
-
-What: /sys/bus/iio/devices/device[n]/illuminance0_input_target
-KernelVersion: 2.6.37
-Contact: linux-iio@vger.kernel.org
-Description:
- This property is the known externally illuminance (in lux).
- It is used in the process of calibrating the device accuracy.
diff --git a/drivers/staging/iio/Documentation/trigger.txt b/drivers/staging/iio/Documentation/trigger.txt
deleted file mode 100644
index 7c0e505e4f04..000000000000
--- a/drivers/staging/iio/Documentation/trigger.txt
+++ /dev/null
@@ -1,35 +0,0 @@
-IIO trigger drivers.
-
-Many triggers are provided by hardware that will also be registered as
-an IIO device. Whilst this can create device specific complexities
-such triggers are registered with the core in the same way as
-stand-alone triggers.
-
-struct iio_trig *trig = iio_trigger_alloc("<trigger format string>", ...);
-
-allocates a trigger structure. The key elements to then fill in within
-a driver are:
-
-trig->owner
- Typically set to THIS_MODULE. Used to ensure correct
- ownership of core allocated resources.
-
-trig->set_trigger_state:
- Function that enables / disables the underlying source of the trigger.
-
-There is also a
-trig->alloc_list which is useful for drivers that allocate multiple
-triggers to keep track of what they have created.
-
-When these have been set call:
-
-iio_trigger_register(trig);
-
-to register the trigger with the core, making it available to trigger
-consumers.
-
-Trigger Consumers
-
-Currently triggers are only used for the filling of software
-buffers and as such any device supporting INDIO_BUFFER_TRIGGERED has the
-consumer interface automatically created.
diff --git a/drivers/staging/iio/Kconfig b/drivers/staging/iio/Kconfig
index 6d5b38d69578..a60631c1f449 100644
--- a/drivers/staging/iio/Kconfig
+++ b/drivers/staging/iio/Kconfig
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Industrial I/O subsystem configuration
#
@@ -7,42 +8,7 @@ menu "IIO staging drivers"
source "drivers/staging/iio/accel/Kconfig"
source "drivers/staging/iio/adc/Kconfig"
source "drivers/staging/iio/addac/Kconfig"
-source "drivers/staging/iio/cdc/Kconfig"
source "drivers/staging/iio/frequency/Kconfig"
-source "drivers/staging/iio/gyro/Kconfig"
source "drivers/staging/iio/impedance-analyzer/Kconfig"
-source "drivers/staging/iio/light/Kconfig"
-source "drivers/staging/iio/magnetometer/Kconfig"
-source "drivers/staging/iio/meter/Kconfig"
-source "drivers/staging/iio/resolver/Kconfig"
-source "drivers/staging/iio/trigger/Kconfig"
-
-config IIO_DUMMY_EVGEN
- tristate
-
-config IIO_SIMPLE_DUMMY
- tristate "An example driver with no hardware requirements"
- help
- Driver intended mainly as documentation for how to write
- a driver. May also be useful for testing userspace code
- without hardware.
-
-if IIO_SIMPLE_DUMMY
-
-config IIO_SIMPLE_DUMMY_EVENTS
- bool "Event generation support"
- select IIO_DUMMY_EVGEN
- help
- Add some dummy events to the simple dummy driver.
-
-config IIO_SIMPLE_DUMMY_BUFFER
- bool "Buffered capture support"
- select IIO_BUFFER
- select IIO_TRIGGER
- select IIO_KFIFO_BUF
- help
- Add buffered data capture to the simple dummy driver.
-
-endif # IIO_SIMPLE_DUMMY
endmenu
diff --git a/drivers/staging/iio/Makefile b/drivers/staging/iio/Makefile
index d87106135b27..628583535393 100644
--- a/drivers/staging/iio/Makefile
+++ b/drivers/staging/iio/Makefile
@@ -1,23 +1,10 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for the industrial I/O core.
#
-obj-$(CONFIG_IIO_SIMPLE_DUMMY) += iio_dummy.o
-iio_dummy-y := iio_simple_dummy.o
-iio_dummy-$(CONFIG_IIO_SIMPLE_DUMMY_EVENTS) += iio_simple_dummy_events.o
-iio_dummy-$(CONFIG_IIO_SIMPLE_DUMMY_BUFFER) += iio_simple_dummy_buffer.o
-
-obj-$(CONFIG_IIO_DUMMY_EVGEN) += iio_dummy_evgen.o
-
obj-y += accel/
obj-y += adc/
obj-y += addac/
-obj-y += cdc/
obj-y += frequency/
-obj-y += gyro/
obj-y += impedance-analyzer/
-obj-y += light/
-obj-y += magnetometer/
-obj-y += meter/
-obj-y += resolver/
-obj-y += trigger/
diff --git a/drivers/staging/iio/TODO b/drivers/staging/iio/TODO
deleted file mode 100644
index c22a0edd1528..000000000000
--- a/drivers/staging/iio/TODO
+++ /dev/null
@@ -1,84 +0,0 @@
-2009 8/18
-
-Core:
-1) Get reviews
-2) Additional testing
-3) Ensure all desirable features present by adding more devices.
- Major changes not expected except in response to comments
-
-Max1363 core:
-1) Possibly add sysfs exports of constant useful to userspace.
-Would be nice
-2) Support hardware generated interrupts
-3) Expand device set. Lots of other maxim adc's have very
- similar interfaces.
-
-MXS LRADC driver:
-This is a classic MFD device as it combines the following subdevices
- - touchscreen controller (input subsystem related device)
- - general purpose ADC channels
- - battery voltage monitor (power subsystem related device)
- - die temperature monitor (thermal management)
-
-At least the battery voltage and die temperature feature is required in-kernel
-by a driver of the SoC's battery charging unit to avoid any damage to the
-silicon and the battery.
-
-TSL2561
-Would be nice
-1) Open question of userspace vs kernel space balance when
-converting to useful light measurements from device ones.
-2) Add sysfs elements necessary to allow device agnostic
-unit conversion.
-
-LIS3L02DQ core
-
-LIS3L02DQ ring
-
-KXSD9
-Currently minimal driver, would be nice to add:
-1) Support for all chip generated interrupts (events),
-basically get support up to level of lis3l02dq driver.
-
-Ring buffer core
-
-SCA3000
-Would be nice
-1) Testing on devices other than sca3000-e05
-
-Trigger core support
-1) Discussion of approach. Is it general enough?
-
-Ring Buffer:
-1) Discussion of approach.
-There are probably better ways of doing this. The
-intention is to allow for more than one software ring
-buffer implementation as different users will have
-different requirements. This one suits mid range
-frequencies (100Hz - 4kHz).
-2) Lots of testing
-
-Periodic Timer trigger
-1) Move to a more general hardware periodic timer request
-subsystem. Current approach is abusing purpose of RTC.
-Initial discussions have taken place, but no actual code
-is in place as yet. This topic will be reopened on lkml
-shortly. I don't really envision this patch being merged
-in anything like its current form.
-
-GPIO trigger
-1) Add control over the type of interrupt etc. This will
-necessitate a header that is also visible from arch board
-files. (avoided at the moment to keep the driver set
-contained in staging).
-
-ADI Drivers:
-CC the device-drivers-devel@blackfin.uclinux.org mailing list when
-e-mailing the normal IIO list (see below).
-
-Documentation
-1) Lots of cleanup and expansion.
-2) Some device require individual docs.
-
-Contact: Jonathan Cameron <jic23@kernel.org>.
-Mailing list: linux-iio@vger.kernel.org
diff --git a/drivers/staging/iio/accel/Kconfig b/drivers/staging/iio/accel/Kconfig
index fa67da9408b6..cee51f64bc4b 100644
--- a/drivers/staging/iio/accel/Kconfig
+++ b/drivers/staging/iio/accel/Kconfig
@@ -1,20 +1,9 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Accelerometer drivers
#
menu "Accelerometers"
-config ADIS16201
- tristate "Analog Devices ADIS16201 Dual-Axis Digital Inclinometer and Accelerometer"
- depends on SPI
- select IIO_ADIS_LIB
- select IIO_ADIS_LIB_BUFFER if IIO_BUFFER
- help
- Say Y here to build support for Analog Devices adis16201 dual-axis
- digital inclinometer and accelerometer.
-
- To compile this driver as a module, say M here: the module will
- be called adis16201.
-
config ADIS16203
tristate "Analog Devices ADIS16203 Programmable 360 Degrees Inclinometer"
depends on SPI
@@ -27,75 +16,4 @@ config ADIS16203
To compile this driver as a module, say M here: the module will be
called adis16203.
-config ADIS16204
- tristate "Analog Devices ADIS16204 Programmable High-g Digital Impact Sensor and Recorder"
- depends on SPI
- select IIO_ADIS_LIB
- select IIO_ADIS_LIB_BUFFER if IIO_BUFFER
- help
- Say Y here to build support for Analog Devices adis16204 Programmable
- High-g Digital Impact Sensor and Recorder.
-
- To compile this driver as a module, say M here: the module will be
- called adis16204.
-
-config ADIS16209
- tristate "Analog Devices ADIS16209 Dual-Axis Digital Inclinometer and Accelerometer"
- depends on SPI
- select IIO_ADIS_LIB
- select IIO_ADIS_LIB_BUFFER if IIO_BUFFER
- help
- Say Y here to build support for Analog Devices adis16209 dual-axis digital inclinometer
- and accelerometer.
-
- To compile this driver as a module, say M here: the module will be
- called adis16209.
-
-config ADIS16220
- tristate "Analog Devices ADIS16220 Programmable Digital Vibration Sensor"
- depends on SPI
- select IIO_ADIS_LIB
- help
- Say Y here to build support for Analog Devices adis16220 programmable
- digital vibration sensor.
-
- To compile this driver as a module, say M here: the module will be
- called adis16220.
-
-config ADIS16240
- tristate "Analog Devices ADIS16240 Programmable Impact Sensor and Recorder"
- depends on SPI
- select IIO_ADIS_LIB
- select IIO_ADIS_LIB_BUFFER if IIO_BUFFER
- help
- Say Y here to build support for Analog Devices adis16240 programmable
- impact Sensor and recorder.
-
- To compile this driver as a module, say M here: the module will be
- called adis16240.
-
-config LIS3L02DQ
- tristate "ST Microelectronics LIS3L02DQ Accelerometer Driver"
- depends on SPI
- select IIO_TRIGGER if IIO_BUFFER
- depends on !IIO_BUFFER || IIO_KFIFO_BUF
- depends on GPIOLIB || COMPILE_TEST
- help
- Say Y here to build SPI support for the ST microelectronics
- accelerometer. The driver supplies direct access via sysfs files
- and an event interface via a character device.
-
- To compile this driver as a module, say M here: the module will be
- called lis3l02dq.
-
-config SCA3000
- depends on IIO_BUFFER
- depends on SPI
- tristate "VTI SCA3000 series accelerometers"
- help
- Say Y here to build support for the VTI SCA3000 series of SPI
- accelerometers. These devices use a hardware ring buffer.
-
- To compile this driver as a module, say M here: the module will be
- called sca3000.
endmenu
diff --git a/drivers/staging/iio/accel/Makefile b/drivers/staging/iio/accel/Makefile
index 1ed137f1a506..acac7bc9b9c0 100644
--- a/drivers/staging/iio/accel/Makefile
+++ b/drivers/staging/iio/accel/Makefile
@@ -1,28 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for industrial I/O accelerometer drivers
#
-adis16201-y := adis16201_core.o
-obj-$(CONFIG_ADIS16201) += adis16201.o
-
-adis16203-y := adis16203_core.o
obj-$(CONFIG_ADIS16203) += adis16203.o
-
-adis16204-y := adis16204_core.o
-obj-$(CONFIG_ADIS16204) += adis16204.o
-
-adis16209-y := adis16209_core.o
-obj-$(CONFIG_ADIS16209) += adis16209.o
-
-adis16220-y := adis16220_core.o
-obj-$(CONFIG_ADIS16220) += adis16220.o
-
-adis16240-y := adis16240_core.o
-obj-$(CONFIG_ADIS16240) += adis16240.o
-
-lis3l02dq-y := lis3l02dq_core.o
-lis3l02dq-$(CONFIG_IIO_BUFFER) += lis3l02dq_ring.o
-obj-$(CONFIG_LIS3L02DQ) += lis3l02dq.o
-
-sca3000-y := sca3000_core.o sca3000_ring.o
-obj-$(CONFIG_SCA3000) += sca3000.o
diff --git a/drivers/staging/iio/accel/adis16201.h b/drivers/staging/iio/accel/adis16201.h
deleted file mode 100644
index e6b8c9af6e22..000000000000
--- a/drivers/staging/iio/accel/adis16201.h
+++ /dev/null
@@ -1,66 +0,0 @@
-#ifndef SPI_ADIS16201_H_
-#define SPI_ADIS16201_H_
-
-#define ADIS16201_STARTUP_DELAY 220 /* ms */
-
-#define ADIS16201_FLASH_CNT 0x00 /* Flash memory write count */
-#define ADIS16201_SUPPLY_OUT 0x02 /* Output, power supply */
-#define ADIS16201_XACCL_OUT 0x04 /* Output, x-axis accelerometer */
-#define ADIS16201_YACCL_OUT 0x06 /* Output, y-axis accelerometer */
-#define ADIS16201_AUX_ADC 0x08 /* Output, auxiliary ADC input */
-#define ADIS16201_TEMP_OUT 0x0A /* Output, temperature */
-#define ADIS16201_XINCL_OUT 0x0C /* Output, x-axis inclination */
-#define ADIS16201_YINCL_OUT 0x0E /* Output, y-axis inclination */
-#define ADIS16201_XACCL_OFFS 0x10 /* Calibration, x-axis acceleration offset */
-#define ADIS16201_YACCL_OFFS 0x12 /* Calibration, y-axis acceleration offset */
-#define ADIS16201_XACCL_SCALE 0x14 /* x-axis acceleration scale factor */
-#define ADIS16201_YACCL_SCALE 0x16 /* y-axis acceleration scale factor */
-#define ADIS16201_XINCL_OFFS 0x18 /* Calibration, x-axis inclination offset */
-#define ADIS16201_YINCL_OFFS 0x1A /* Calibration, y-axis inclination offset */
-#define ADIS16201_XINCL_SCALE 0x1C /* x-axis inclination scale factor */
-#define ADIS16201_YINCL_SCALE 0x1E /* y-axis inclination scale factor */
-#define ADIS16201_ALM_MAG1 0x20 /* Alarm 1 amplitude threshold */
-#define ADIS16201_ALM_MAG2 0x22 /* Alarm 2 amplitude threshold */
-#define ADIS16201_ALM_SMPL1 0x24 /* Alarm 1, sample period */
-#define ADIS16201_ALM_SMPL2 0x26 /* Alarm 2, sample period */
-#define ADIS16201_ALM_CTRL 0x28 /* Alarm control */
-#define ADIS16201_AUX_DAC 0x30 /* Auxiliary DAC data */
-#define ADIS16201_GPIO_CTRL 0x32 /* General-purpose digital input/output control */
-#define ADIS16201_MSC_CTRL 0x34 /* Miscellaneous control */
-#define ADIS16201_SMPL_PRD 0x36 /* Internal sample period (rate) control */
-#define ADIS16201_AVG_CNT 0x38 /* Operation, filter configuration */
-#define ADIS16201_SLP_CNT 0x3A /* Operation, sleep mode control */
-#define ADIS16201_DIAG_STAT 0x3C /* Diagnostics, system status register */
-#define ADIS16201_GLOB_CMD 0x3E /* Operation, system command register */
-
-/* MSC_CTRL */
-#define ADIS16201_MSC_CTRL_SELF_TEST_EN BIT(8) /* Self-test enable */
-#define ADIS16201_MSC_CTRL_DATA_RDY_EN BIT(2) /* Data-ready enable: 1 = enabled, 0 = disabled */
-#define ADIS16201_MSC_CTRL_ACTIVE_HIGH BIT(1) /* Data-ready polarity: 1 = active high, 0 = active low */
-#define ADIS16201_MSC_CTRL_DATA_RDY_DIO1 BIT(0) /* Data-ready line selection: 1 = DIO1, 0 = DIO0 */
-
-/* DIAG_STAT */
-#define ADIS16201_DIAG_STAT_ALARM2 BIT(9) /* Alarm 2 status: 1 = alarm active, 0 = alarm inactive */
-#define ADIS16201_DIAG_STAT_ALARM1 BIT(8) /* Alarm 1 status: 1 = alarm active, 0 = alarm inactive */
-#define ADIS16201_DIAG_STAT_SPI_FAIL_BIT 3 /* SPI communications failure */
-#define ADIS16201_DIAG_STAT_FLASH_UPT_BIT 2 /* Flash update failure */
-#define ADIS16201_DIAG_STAT_POWER_HIGH_BIT 1 /* Power supply above 3.625 V */
-#define ADIS16201_DIAG_STAT_POWER_LOW_BIT 0 /* Power supply below 3.15 V */
-
-/* GLOB_CMD */
-#define ADIS16201_GLOB_CMD_SW_RESET BIT(7)
-#define ADIS16201_GLOB_CMD_FACTORY_CAL BIT(1)
-
-#define ADIS16201_ERROR_ACTIVE BIT(14)
-
-enum adis16201_scan {
- ADIS16201_SCAN_ACC_X,
- ADIS16201_SCAN_ACC_Y,
- ADIS16201_SCAN_INCLI_X,
- ADIS16201_SCAN_INCLI_Y,
- ADIS16201_SCAN_SUPPLY,
- ADIS16201_SCAN_AUX_ADC,
- ADIS16201_SCAN_TEMP,
-};
-
-#endif /* SPI_ADIS16201_H_ */
diff --git a/drivers/staging/iio/accel/adis16201_core.c b/drivers/staging/iio/accel/adis16201_core.c
deleted file mode 100644
index 10db685813c9..000000000000
--- a/drivers/staging/iio/accel/adis16201_core.c
+++ /dev/null
@@ -1,248 +0,0 @@
-/*
- * ADIS16201 Programmable Digital Vibration Sensor driver
- *
- * Copyright 2010 Analog Devices Inc.
- *
- * Licensed under the GPL-2 or later.
- */
-
-#include <linux/delay.h>
-#include <linux/mutex.h>
-#include <linux/device.h>
-#include <linux/kernel.h>
-#include <linux/spi/spi.h>
-#include <linux/slab.h>
-#include <linux/sysfs.h>
-#include <linux/module.h>
-
-#include <linux/iio/iio.h>
-#include <linux/iio/sysfs.h>
-#include <linux/iio/buffer.h>
-#include <linux/iio/imu/adis.h>
-
-#include "adis16201.h"
-
-static const u8 adis16201_addresses[] = {
- [ADIS16201_SCAN_ACC_X] = ADIS16201_XACCL_OFFS,
- [ADIS16201_SCAN_ACC_Y] = ADIS16201_YACCL_OFFS,
- [ADIS16201_SCAN_INCLI_X] = ADIS16201_XINCL_OFFS,
- [ADIS16201_SCAN_INCLI_Y] = ADIS16201_YINCL_OFFS,
-};
-
-static int adis16201_read_raw(struct iio_dev *indio_dev,
- struct iio_chan_spec const *chan,
- int *val, int *val2,
- long mask)
-{
- struct adis *st = iio_priv(indio_dev);
- int ret;
- int bits;
- u8 addr;
- s16 val16;
-
- switch (mask) {
- case IIO_CHAN_INFO_RAW:
- return adis_single_conversion(indio_dev, chan,
- ADIS16201_ERROR_ACTIVE, val);
- case IIO_CHAN_INFO_SCALE:
- switch (chan->type) {
- case IIO_VOLTAGE:
- if (chan->channel == 0) {
- *val = 1;
- *val2 = 220000; /* 1.22 mV */
- } else {
- *val = 0;
- *val2 = 610000; /* 0.610 mV */
- }
- return IIO_VAL_INT_PLUS_MICRO;
- case IIO_TEMP:
- *val = -470; /* 0.47 C */
- *val2 = 0;
- return IIO_VAL_INT_PLUS_MICRO;
- case IIO_ACCEL:
- *val = 0;
- *val2 = IIO_G_TO_M_S_2(462400); /* 0.4624 mg */
- return IIO_VAL_INT_PLUS_NANO;
- case IIO_INCLI:
- *val = 0;
- *val2 = 100000; /* 0.1 degree */
- return IIO_VAL_INT_PLUS_MICRO;
- default:
- return -EINVAL;
- }
- break;
- case IIO_CHAN_INFO_OFFSET:
- *val = 25000 / -470 - 1278; /* 25 C = 1278 */
- return IIO_VAL_INT;
- case IIO_CHAN_INFO_CALIBBIAS:
- switch (chan->type) {
- case IIO_ACCEL:
- bits = 12;
- break;
- case IIO_INCLI:
- bits = 9;
- break;
- default:
- return -EINVAL;
- }
- mutex_lock(&indio_dev->mlock);
- addr = adis16201_addresses[chan->scan_index];
- ret = adis_read_reg_16(st, addr, &val16);
- if (ret) {
- mutex_unlock(&indio_dev->mlock);
- return ret;
- }
- val16 &= (1 << bits) - 1;
- val16 = (s16)(val16 << (16 - bits)) >> (16 - bits);
- *val = val16;
- mutex_unlock(&indio_dev->mlock);
- return IIO_VAL_INT;
- }
- return -EINVAL;
-}
-
-static int adis16201_write_raw(struct iio_dev *indio_dev,
- struct iio_chan_spec const *chan,
- int val,
- int val2,
- long mask)
-{
- struct adis *st = iio_priv(indio_dev);
- int bits;
- s16 val16;
- u8 addr;
-
- switch (mask) {
- case IIO_CHAN_INFO_CALIBBIAS:
- switch (chan->type) {
- case IIO_ACCEL:
- bits = 12;
- break;
- case IIO_INCLI:
- bits = 9;
- break;
- default:
- return -EINVAL;
- }
- val16 = val & ((1 << bits) - 1);
- addr = adis16201_addresses[chan->scan_index];
- return adis_write_reg_16(st, addr, val16);
- }
- return -EINVAL;
-}
-
-static const struct iio_chan_spec adis16201_channels[] = {
- ADIS_SUPPLY_CHAN(ADIS16201_SUPPLY_OUT, ADIS16201_SCAN_SUPPLY, 0, 12),
- ADIS_TEMP_CHAN(ADIS16201_TEMP_OUT, ADIS16201_SCAN_TEMP, 0, 12),
- ADIS_ACCEL_CHAN(X, ADIS16201_XACCL_OUT, ADIS16201_SCAN_ACC_X,
- BIT(IIO_CHAN_INFO_CALIBBIAS), 0, 14),
- ADIS_ACCEL_CHAN(Y, ADIS16201_YACCL_OUT, ADIS16201_SCAN_ACC_Y,
- BIT(IIO_CHAN_INFO_CALIBBIAS), 0, 14),
- ADIS_AUX_ADC_CHAN(ADIS16201_AUX_ADC, ADIS16201_SCAN_AUX_ADC, 0, 12),
- ADIS_INCLI_CHAN(X, ADIS16201_XINCL_OUT, ADIS16201_SCAN_INCLI_X,
- BIT(IIO_CHAN_INFO_CALIBBIAS), 0, 14),
- ADIS_INCLI_CHAN(X, ADIS16201_YINCL_OUT, ADIS16201_SCAN_INCLI_Y,
- BIT(IIO_CHAN_INFO_CALIBBIAS), 0, 14),
- IIO_CHAN_SOFT_TIMESTAMP(7)
-};
-
-static const struct iio_info adis16201_info = {
- .read_raw = &adis16201_read_raw,
- .write_raw = &adis16201_write_raw,
- .update_scan_mode = adis_update_scan_mode,
- .driver_module = THIS_MODULE,
-};
-
-static const char * const adis16201_status_error_msgs[] = {
- [ADIS16201_DIAG_STAT_SPI_FAIL_BIT] = "SPI failure",
- [ADIS16201_DIAG_STAT_FLASH_UPT_BIT] = "Flash update failed",
- [ADIS16201_DIAG_STAT_POWER_HIGH_BIT] = "Power supply above 3.625V",
- [ADIS16201_DIAG_STAT_POWER_LOW_BIT] = "Power supply below 3.15V",
-};
-
-static const struct adis_data adis16201_data = {
- .read_delay = 20,
- .msc_ctrl_reg = ADIS16201_MSC_CTRL,
- .glob_cmd_reg = ADIS16201_GLOB_CMD,
- .diag_stat_reg = ADIS16201_DIAG_STAT,
-
- .self_test_mask = ADIS16201_MSC_CTRL_SELF_TEST_EN,
- .startup_delay = ADIS16201_STARTUP_DELAY,
-
- .status_error_msgs = adis16201_status_error_msgs,
- .status_error_mask = BIT(ADIS16201_DIAG_STAT_SPI_FAIL_BIT) |
- BIT(ADIS16201_DIAG_STAT_FLASH_UPT_BIT) |
- BIT(ADIS16201_DIAG_STAT_POWER_HIGH_BIT) |
- BIT(ADIS16201_DIAG_STAT_POWER_LOW_BIT),
-};
-
-static int adis16201_probe(struct spi_device *spi)
-{
- int ret;
- struct adis *st;
- struct iio_dev *indio_dev;
-
- /* setup the industrialio driver allocated elements */
- indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
- if (!indio_dev)
- return -ENOMEM;
-
- st = iio_priv(indio_dev);
- /* this is only used for removal purposes */
- spi_set_drvdata(spi, indio_dev);
-
- indio_dev->name = spi->dev.driver->name;
- indio_dev->dev.parent = &spi->dev;
- indio_dev->info = &adis16201_info;
-
- indio_dev->channels = adis16201_channels;
- indio_dev->num_channels = ARRAY_SIZE(adis16201_channels);
- indio_dev->modes = INDIO_DIRECT_MODE;
-
- ret = adis_init(st, indio_dev, spi, &adis16201_data);
- if (ret)
- return ret;
- ret = adis_setup_buffer_and_trigger(st, indio_dev, NULL);
- if (ret)
- return ret;
-
- /* Get the device into a sane initial state */
- ret = adis_initial_startup(st);
- if (ret)
- goto error_cleanup_buffer_trigger;
-
- ret = iio_device_register(indio_dev);
- if (ret < 0)
- goto error_cleanup_buffer_trigger;
- return 0;
-
-error_cleanup_buffer_trigger:
- adis_cleanup_buffer_and_trigger(st, indio_dev);
- return ret;
-}
-
-static int adis16201_remove(struct spi_device *spi)
-{
- struct iio_dev *indio_dev = spi_get_drvdata(spi);
- struct adis *st = iio_priv(indio_dev);
-
- iio_device_unregister(indio_dev);
- adis_cleanup_buffer_and_trigger(st, indio_dev);
-
- return 0;
-}
-
-static struct spi_driver adis16201_driver = {
- .driver = {
- .name = "adis16201",
- .owner = THIS_MODULE,
- },
- .probe = adis16201_probe,
- .remove = adis16201_remove,
-};
-module_spi_driver(adis16201_driver);
-
-MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>");
-MODULE_DESCRIPTION("Analog Devices ADIS16201 Programmable Digital Vibration Sensor driver");
-MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("spi:adis16201");
diff --git a/drivers/staging/iio/accel/adis16203.c b/drivers/staging/iio/accel/adis16203.c
new file mode 100644
index 000000000000..830ff38fda92
--- /dev/null
+++ b/drivers/staging/iio/accel/adis16203.c
@@ -0,0 +1,315 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * ADIS16203 Programmable 360 Degrees Inclinometer
+ *
+ * Copyright 2010 Analog Devices Inc.
+ */
+
+#include <linux/device.h>
+
+#include <linux/iio/iio.h>
+#include <linux/iio/imu/adis.h>
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/spi/spi.h>
+
+#define ADIS16203_STARTUP_DELAY 220 /* ms */
+
+/* Flash memory write count */
+#define ADIS16203_FLASH_CNT 0x00
+
+/* Output, power supply */
+#define ADIS16203_SUPPLY_OUT 0x02
+
+/* Output, auxiliary ADC input */
+#define ADIS16203_AUX_ADC 0x08
+
+/* Output, temperature */
+#define ADIS16203_TEMP_OUT 0x0A
+
+/* Output, x-axis inclination */
+#define ADIS16203_XINCL_OUT 0x0C
+
+/* Output, y-axis inclination */
+#define ADIS16203_YINCL_OUT 0x0E
+
+/* Incline null calibration */
+#define ADIS16203_INCL_NULL 0x18
+
+/* Alarm 1 amplitude threshold */
+#define ADIS16203_ALM_MAG1 0x20
+
+/* Alarm 2 amplitude threshold */
+#define ADIS16203_ALM_MAG2 0x22
+
+/* Alarm 1, sample period */
+#define ADIS16203_ALM_SMPL1 0x24
+
+/* Alarm 2, sample period */
+#define ADIS16203_ALM_SMPL2 0x26
+
+/* Alarm control */
+#define ADIS16203_ALM_CTRL 0x28
+
+/* Auxiliary DAC data */
+#define ADIS16203_AUX_DAC 0x30
+
+/* General-purpose digital input/output control */
+#define ADIS16203_GPIO_CTRL 0x32
+
+/* Miscellaneous control */
+#define ADIS16203_MSC_CTRL 0x34
+
+/* Internal sample period (rate) control */
+#define ADIS16203_SMPL_PRD 0x36
+
+/* Operation, filter configuration */
+#define ADIS16203_AVG_CNT 0x38
+
+/* Operation, sleep mode control */
+#define ADIS16203_SLP_CNT 0x3A
+
+/* Diagnostics, system status register */
+#define ADIS16203_DIAG_STAT 0x3C
+
+/* Operation, system command register */
+#define ADIS16203_GLOB_CMD 0x3E
+
+/* MSC_CTRL */
+
+/* Self-test at power-on: 1 = disabled, 0 = enabled */
+#define ADIS16203_MSC_CTRL_PWRUP_SELF_TEST BIT(10)
+
+/* Reverses rotation of both inclination outputs */
+#define ADIS16203_MSC_CTRL_REVERSE_ROT_EN BIT(9)
+
+/* Self-test enable */
+#define ADIS16203_MSC_CTRL_SELF_TEST_EN BIT(8)
+
+/* Data-ready enable: 1 = enabled, 0 = disabled */
+#define ADIS16203_MSC_CTRL_DATA_RDY_EN BIT(2)
+
+/* Data-ready polarity: 1 = active high, 0 = active low */
+#define ADIS16203_MSC_CTRL_ACTIVE_HIGH BIT(1)
+
+/* Data-ready line selection: 1 = DIO1, 0 = DIO0 */
+#define ADIS16203_MSC_CTRL_DATA_RDY_DIO1 BIT(0)
+
+/* DIAG_STAT */
+
+/* Alarm 2 status: 1 = alarm active, 0 = alarm inactive */
+#define ADIS16203_DIAG_STAT_ALARM2 BIT(9)
+
+/* Alarm 1 status: 1 = alarm active, 0 = alarm inactive */
+#define ADIS16203_DIAG_STAT_ALARM1 BIT(8)
+
+/* Self-test diagnostic error flag */
+#define ADIS16203_DIAG_STAT_SELFTEST_FAIL_BIT 5
+
+/* SPI communications failure */
+#define ADIS16203_DIAG_STAT_SPI_FAIL_BIT 3
+
+/* Flash update failure */
+#define ADIS16203_DIAG_STAT_FLASH_UPT_BIT 2
+
+/* Power supply above 3.625 V */
+#define ADIS16203_DIAG_STAT_POWER_HIGH_BIT 1
+
+/* Power supply below 2.975 V */
+#define ADIS16203_DIAG_STAT_POWER_LOW_BIT 0
+
+/* GLOB_CMD */
+
+#define ADIS16203_GLOB_CMD_SW_RESET BIT(7)
+#define ADIS16203_GLOB_CMD_CLEAR_STAT BIT(4)
+#define ADIS16203_GLOB_CMD_FACTORY_CAL BIT(1)
+
+#define ADIS16203_ERROR_ACTIVE BIT(14)
+
+enum adis16203_scan {
+ ADIS16203_SCAN_INCLI_X,
+ ADIS16203_SCAN_INCLI_Y,
+ ADIS16203_SCAN_SUPPLY,
+ ADIS16203_SCAN_AUX_ADC,
+ ADIS16203_SCAN_TEMP,
+};
+
+#define DRIVER_NAME "adis16203"
+
+static const u8 adis16203_addresses[] = {
+ [ADIS16203_SCAN_INCLI_X] = ADIS16203_INCL_NULL,
+};
+
+static int adis16203_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val,
+ int val2,
+ long mask)
+{
+ struct adis *st = iio_priv(indio_dev);
+ /* currently only one writable parameter which keeps this simple */
+ u8 addr = adis16203_addresses[chan->scan_index];
+
+ return adis_write_reg_16(st, addr, val & 0x3FFF);
+}
+
+static int adis16203_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2,
+ long mask)
+{
+ struct adis *st = iio_priv(indio_dev);
+ int ret;
+ u8 addr;
+ s16 val16;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ return adis_single_conversion(indio_dev, chan,
+ ADIS16203_ERROR_ACTIVE, val);
+ case IIO_CHAN_INFO_SCALE:
+ switch (chan->type) {
+ case IIO_VOLTAGE:
+ if (chan->channel == 0) {
+ *val = 1;
+ *val2 = 220000; /* 1.22 mV */
+ } else {
+ *val = 0;
+ *val2 = 610000; /* 0.61 mV */
+ }
+ return IIO_VAL_INT_PLUS_MICRO;
+ case IIO_TEMP:
+ *val = -470; /* -0.47 C */
+ *val2 = 0;
+ return IIO_VAL_INT_PLUS_MICRO;
+ case IIO_INCLI:
+ *val = 0;
+ *val2 = 25000; /* 0.025 degree */
+ return IIO_VAL_INT_PLUS_MICRO;
+ default:
+ return -EINVAL;
+ }
+ case IIO_CHAN_INFO_OFFSET:
+ *val = 25000 / -470 - 1278; /* 25 C = 1278 */
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_CALIBBIAS:
+ addr = adis16203_addresses[chan->scan_index];
+ ret = adis_read_reg_16(st, addr, &val16);
+ if (ret)
+ return ret;
+ *val = sign_extend32(val16, 13);
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+}
+
+static const struct iio_chan_spec adis16203_channels[] = {
+ ADIS_SUPPLY_CHAN(ADIS16203_SUPPLY_OUT, ADIS16203_SCAN_SUPPLY, 0, 12),
+ ADIS_AUX_ADC_CHAN(ADIS16203_AUX_ADC, ADIS16203_SCAN_AUX_ADC, 0, 12),
+ ADIS_INCLI_CHAN(X, ADIS16203_XINCL_OUT, ADIS16203_SCAN_INCLI_X,
+ BIT(IIO_CHAN_INFO_CALIBBIAS), 0, 14),
+ /* Fixme: Not what it appears to be - see data sheet */
+ ADIS_INCLI_CHAN(Y, ADIS16203_YINCL_OUT, ADIS16203_SCAN_INCLI_Y,
+ 0, 0, 14),
+ ADIS_TEMP_CHAN(ADIS16203_TEMP_OUT, ADIS16203_SCAN_TEMP, 0, 12),
+ IIO_CHAN_SOFT_TIMESTAMP(5),
+};
+
+static const struct iio_info adis16203_info = {
+ .read_raw = adis16203_read_raw,
+ .write_raw = adis16203_write_raw,
+ .update_scan_mode = adis_update_scan_mode,
+};
+
+static const char * const adis16203_status_error_msgs[] = {
+ [ADIS16203_DIAG_STAT_SELFTEST_FAIL_BIT] = "Self test failure",
+ [ADIS16203_DIAG_STAT_SPI_FAIL_BIT] = "SPI failure",
+ [ADIS16203_DIAG_STAT_FLASH_UPT_BIT] = "Flash update failed",
+ [ADIS16203_DIAG_STAT_POWER_HIGH_BIT] = "Power supply above 3.625V",
+ [ADIS16203_DIAG_STAT_POWER_LOW_BIT] = "Power supply below 2.975V",
+};
+
+static const struct adis_timeout adis16203_timeouts = {
+ .reset_ms = ADIS16203_STARTUP_DELAY,
+ .sw_reset_ms = ADIS16203_STARTUP_DELAY,
+ .self_test_ms = ADIS16203_STARTUP_DELAY
+};
+
+static const struct adis_data adis16203_data = {
+ .read_delay = 20,
+ .msc_ctrl_reg = ADIS16203_MSC_CTRL,
+ .glob_cmd_reg = ADIS16203_GLOB_CMD,
+ .diag_stat_reg = ADIS16203_DIAG_STAT,
+
+ .self_test_mask = ADIS16203_MSC_CTRL_SELF_TEST_EN,
+ .self_test_reg = ADIS16203_MSC_CTRL,
+ .self_test_no_autoclear = true,
+ .timeouts = &adis16203_timeouts,
+
+ .status_error_msgs = adis16203_status_error_msgs,
+ .status_error_mask = BIT(ADIS16203_DIAG_STAT_SELFTEST_FAIL_BIT) |
+ BIT(ADIS16203_DIAG_STAT_SPI_FAIL_BIT) |
+ BIT(ADIS16203_DIAG_STAT_FLASH_UPT_BIT) |
+ BIT(ADIS16203_DIAG_STAT_POWER_HIGH_BIT) |
+ BIT(ADIS16203_DIAG_STAT_POWER_LOW_BIT),
+};
+
+static int adis16203_probe(struct spi_device *spi)
+{
+ int ret;
+ struct iio_dev *indio_dev;
+ struct adis *st;
+
+ /* setup the industrialio driver allocated elements */
+ indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
+ if (!indio_dev)
+ return -ENOMEM;
+ st = iio_priv(indio_dev);
+ /* this is only used for removal purposes */
+ spi_set_drvdata(spi, indio_dev);
+
+ indio_dev->name = spi->dev.driver->name;
+ indio_dev->channels = adis16203_channels;
+ indio_dev->num_channels = ARRAY_SIZE(adis16203_channels);
+ indio_dev->info = &adis16203_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+
+ ret = adis_init(st, indio_dev, spi, &adis16203_data);
+ if (ret)
+ return ret;
+
+ ret = devm_adis_setup_buffer_and_trigger(st, indio_dev, NULL);
+ if (ret)
+ return ret;
+
+ /* Get the device into a sane initial state */
+ ret = __adis_initial_startup(st);
+ if (ret)
+ return ret;
+
+ return devm_iio_device_register(&spi->dev, indio_dev);
+}
+
+static const struct of_device_id adis16203_of_match[] = {
+ { .compatible = "adi,adis16203" },
+ { }
+};
+
+MODULE_DEVICE_TABLE(of, adis16203_of_match);
+
+static struct spi_driver adis16203_driver = {
+ .driver = {
+ .name = "adis16203",
+ .of_match_table = adis16203_of_match,
+ },
+ .probe = adis16203_probe,
+};
+module_spi_driver(adis16203_driver);
+
+MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>");
+MODULE_DESCRIPTION("Analog Devices ADIS16203 Programmable 360 Degrees Inclinometer");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("spi:adis16203");
+MODULE_IMPORT_NS("IIO_ADISLIB");
diff --git a/drivers/staging/iio/accel/adis16203.h b/drivers/staging/iio/accel/adis16203.h
deleted file mode 100644
index 6426e38bf006..000000000000
--- a/drivers/staging/iio/accel/adis16203.h
+++ /dev/null
@@ -1,59 +0,0 @@
-#ifndef SPI_ADIS16203_H_
-#define SPI_ADIS16203_H_
-
-#define ADIS16203_STARTUP_DELAY 220 /* ms */
-
-#define ADIS16203_FLASH_CNT 0x00 /* Flash memory write count */
-#define ADIS16203_SUPPLY_OUT 0x02 /* Output, power supply */
-#define ADIS16203_AUX_ADC 0x08 /* Output, auxiliary ADC input */
-#define ADIS16203_TEMP_OUT 0x0A /* Output, temperature */
-#define ADIS16203_XINCL_OUT 0x0C /* Output, x-axis inclination */
-#define ADIS16203_YINCL_OUT 0x0E /* Output, y-axis inclination */
-#define ADIS16203_INCL_NULL 0x18 /* Incline null calibration */
-#define ADIS16203_ALM_MAG1 0x20 /* Alarm 1 amplitude threshold */
-#define ADIS16203_ALM_MAG2 0x22 /* Alarm 2 amplitude threshold */
-#define ADIS16203_ALM_SMPL1 0x24 /* Alarm 1, sample period */
-#define ADIS16203_ALM_SMPL2 0x26 /* Alarm 2, sample period */
-#define ADIS16203_ALM_CTRL 0x28 /* Alarm control */
-#define ADIS16203_AUX_DAC 0x30 /* Auxiliary DAC data */
-#define ADIS16203_GPIO_CTRL 0x32 /* General-purpose digital input/output control */
-#define ADIS16203_MSC_CTRL 0x34 /* Miscellaneous control */
-#define ADIS16203_SMPL_PRD 0x36 /* Internal sample period (rate) control */
-#define ADIS16203_AVG_CNT 0x38 /* Operation, filter configuration */
-#define ADIS16203_SLP_CNT 0x3A /* Operation, sleep mode control */
-#define ADIS16203_DIAG_STAT 0x3C /* Diagnostics, system status register */
-#define ADIS16203_GLOB_CMD 0x3E /* Operation, system command register */
-
-/* MSC_CTRL */
-#define ADIS16203_MSC_CTRL_PWRUP_SELF_TEST BIT(10) /* Self-test at power-on: 1 = disabled, 0 = enabled */
-#define ADIS16203_MSC_CTRL_REVERSE_ROT_EN BIT(9) /* Reverses rotation of both inclination outputs */
-#define ADIS16203_MSC_CTRL_SELF_TEST_EN BIT(8) /* Self-test enable */
-#define ADIS16203_MSC_CTRL_DATA_RDY_EN BIT(2) /* Data-ready enable: 1 = enabled, 0 = disabled */
-#define ADIS16203_MSC_CTRL_ACTIVE_HIGH BIT(1) /* Data-ready polarity: 1 = active high, 0 = active low */
-#define ADIS16203_MSC_CTRL_DATA_RDY_DIO1 BIT(0) /* Data-ready line selection: 1 = DIO1, 0 = DIO0 */
-
-/* DIAG_STAT */
-#define ADIS16203_DIAG_STAT_ALARM2 BIT(9) /* Alarm 2 status: 1 = alarm active, 0 = alarm inactive */
-#define ADIS16203_DIAG_STAT_ALARM1 BIT(8) /* Alarm 1 status: 1 = alarm active, 0 = alarm inactive */
-#define ADIS16203_DIAG_STAT_SELFTEST_FAIL_BIT 5 /* Self-test diagnostic error flag */
-#define ADIS16203_DIAG_STAT_SPI_FAIL_BIT 3 /* SPI communications failure */
-#define ADIS16203_DIAG_STAT_FLASH_UPT_BIT 2 /* Flash update failure */
-#define ADIS16203_DIAG_STAT_POWER_HIGH_BIT 1 /* Power supply above 3.625 V */
-#define ADIS16203_DIAG_STAT_POWER_LOW_BIT 0 /* Power supply below 3.15 V */
-
-/* GLOB_CMD */
-#define ADIS16203_GLOB_CMD_SW_RESET BIT(7)
-#define ADIS16203_GLOB_CMD_CLEAR_STAT BIT(4)
-#define ADIS16203_GLOB_CMD_FACTORY_CAL BIT(1)
-
-#define ADIS16203_ERROR_ACTIVE BIT(14)
-
-enum adis16203_scan {
- ADIS16203_SCAN_INCLI_X,
- ADIS16203_SCAN_INCLI_Y,
- ADIS16203_SCAN_SUPPLY,
- ADIS16203_SCAN_AUX_ADC,
- ADIS16203_SCAN_TEMP,
-};
-
-#endif /* SPI_ADIS16203_H_ */
diff --git a/drivers/staging/iio/accel/adis16203_core.c b/drivers/staging/iio/accel/adis16203_core.c
deleted file mode 100644
index fb593d23d5bc..000000000000
--- a/drivers/staging/iio/accel/adis16203_core.c
+++ /dev/null
@@ -1,216 +0,0 @@
-/*
- * ADIS16203 Programmable Digital Vibration Sensor driver
- *
- * Copyright 2030 Analog Devices Inc.
- *
- * Licensed under the GPL-2 or later.
- */
-
-#include <linux/delay.h>
-#include <linux/mutex.h>
-#include <linux/device.h>
-#include <linux/kernel.h>
-#include <linux/spi/spi.h>
-#include <linux/slab.h>
-#include <linux/sysfs.h>
-#include <linux/module.h>
-
-#include <linux/iio/iio.h>
-#include <linux/iio/sysfs.h>
-#include <linux/iio/buffer.h>
-#include <linux/iio/imu/adis.h>
-
-#include "adis16203.h"
-
-#define DRIVER_NAME "adis16203"
-
-static const u8 adis16203_addresses[] = {
- [ADIS16203_SCAN_INCLI_X] = ADIS16203_INCL_NULL,
-};
-
-static int adis16203_write_raw(struct iio_dev *indio_dev,
- struct iio_chan_spec const *chan,
- int val,
- int val2,
- long mask)
-{
- struct adis *st = iio_priv(indio_dev);
- /* currently only one writable parameter which keeps this simple */
- u8 addr = adis16203_addresses[chan->scan_index];
-
- return adis_write_reg_16(st, addr, val & 0x3FFF);
-}
-
-static int adis16203_read_raw(struct iio_dev *indio_dev,
- struct iio_chan_spec const *chan,
- int *val, int *val2,
- long mask)
-{
- struct adis *st = iio_priv(indio_dev);
- int ret;
- int bits;
- u8 addr;
- s16 val16;
-
- switch (mask) {
- case IIO_CHAN_INFO_RAW:
- return adis_single_conversion(indio_dev, chan,
- ADIS16203_ERROR_ACTIVE, val);
- case IIO_CHAN_INFO_SCALE:
- switch (chan->type) {
- case IIO_VOLTAGE:
- if (chan->channel == 0) {
- *val = 1;
- *val2 = 220000; /* 1.22 mV */
- } else {
- *val = 0;
- *val2 = 610000; /* 0.61 mV */
- }
- return IIO_VAL_INT_PLUS_MICRO;
- case IIO_TEMP:
- *val = -470; /* -0.47 C */
- *val2 = 0;
- return IIO_VAL_INT_PLUS_MICRO;
- case IIO_INCLI:
- *val = 0;
- *val2 = 25000; /* 0.025 degree */
- return IIO_VAL_INT_PLUS_MICRO;
- default:
- return -EINVAL;
- }
- case IIO_CHAN_INFO_OFFSET:
- *val = 25000 / -470 - 1278; /* 25 C = 1278 */
- return IIO_VAL_INT;
- case IIO_CHAN_INFO_CALIBBIAS:
- bits = 14;
- mutex_lock(&indio_dev->mlock);
- addr = adis16203_addresses[chan->scan_index];
- ret = adis_read_reg_16(st, addr, &val16);
- if (ret) {
- mutex_unlock(&indio_dev->mlock);
- return ret;
- }
- val16 &= (1 << bits) - 1;
- val16 = (s16)(val16 << (16 - bits)) >> (16 - bits);
- *val = val16;
- mutex_unlock(&indio_dev->mlock);
- return IIO_VAL_INT;
- default:
- return -EINVAL;
- }
-}
-
-static const struct iio_chan_spec adis16203_channels[] = {
- ADIS_SUPPLY_CHAN(ADIS16203_SUPPLY_OUT, ADIS16203_SCAN_SUPPLY, 0, 12),
- ADIS_AUX_ADC_CHAN(ADIS16203_AUX_ADC, ADIS16203_SCAN_AUX_ADC, 0, 12),
- ADIS_INCLI_CHAN(X, ADIS16203_XINCL_OUT, ADIS16203_SCAN_INCLI_X,
- BIT(IIO_CHAN_INFO_CALIBBIAS), 0, 14),
- /* Fixme: Not what it appears to be - see data sheet */
- ADIS_INCLI_CHAN(Y, ADIS16203_YINCL_OUT, ADIS16203_SCAN_INCLI_Y,
- 0, 0, 14),
- ADIS_TEMP_CHAN(ADIS16203_TEMP_OUT, ADIS16203_SCAN_TEMP, 0, 12),
- IIO_CHAN_SOFT_TIMESTAMP(5),
-};
-
-static const struct iio_info adis16203_info = {
- .read_raw = &adis16203_read_raw,
- .write_raw = &adis16203_write_raw,
- .update_scan_mode = adis_update_scan_mode,
- .driver_module = THIS_MODULE,
-};
-
-static const char * const adis16203_status_error_msgs[] = {
- [ADIS16203_DIAG_STAT_SELFTEST_FAIL_BIT] = "Self test failure",
- [ADIS16203_DIAG_STAT_SPI_FAIL_BIT] = "SPI failure",
- [ADIS16203_DIAG_STAT_FLASH_UPT_BIT] = "Flash update failed",
- [ADIS16203_DIAG_STAT_POWER_HIGH_BIT] = "Power supply above 3.625V",
- [ADIS16203_DIAG_STAT_POWER_LOW_BIT] = "Power supply below 3.15V",
-};
-
-static const struct adis_data adis16203_data = {
- .read_delay = 20,
- .msc_ctrl_reg = ADIS16203_MSC_CTRL,
- .glob_cmd_reg = ADIS16203_GLOB_CMD,
- .diag_stat_reg = ADIS16203_DIAG_STAT,
-
- .self_test_mask = ADIS16203_MSC_CTRL_SELF_TEST_EN,
- .startup_delay = ADIS16203_STARTUP_DELAY,
-
- .status_error_msgs = adis16203_status_error_msgs,
- .status_error_mask = BIT(ADIS16203_DIAG_STAT_SELFTEST_FAIL_BIT) |
- BIT(ADIS16203_DIAG_STAT_SPI_FAIL_BIT) |
- BIT(ADIS16203_DIAG_STAT_FLASH_UPT_BIT) |
- BIT(ADIS16203_DIAG_STAT_POWER_HIGH_BIT) |
- BIT(ADIS16203_DIAG_STAT_POWER_LOW_BIT),
-};
-
-static int adis16203_probe(struct spi_device *spi)
-{
- int ret;
- struct iio_dev *indio_dev;
- struct adis *st;
-
- /* setup the industrialio driver allocated elements */
- indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
- if (!indio_dev)
- return -ENOMEM;
- st = iio_priv(indio_dev);
- /* this is only used for removal purposes */
- spi_set_drvdata(spi, indio_dev);
-
- indio_dev->name = spi->dev.driver->name;
- indio_dev->dev.parent = &spi->dev;
- indio_dev->channels = adis16203_channels;
- indio_dev->num_channels = ARRAY_SIZE(adis16203_channels);
- indio_dev->info = &adis16203_info;
- indio_dev->modes = INDIO_DIRECT_MODE;
-
- ret = adis_init(st, indio_dev, spi, &adis16203_data);
- if (ret)
- return ret;
-
- ret = adis_setup_buffer_and_trigger(st, indio_dev, NULL);
- if (ret)
- return ret;
-
- /* Get the device into a sane initial state */
- ret = adis_initial_startup(st);
- if (ret)
- goto error_cleanup_buffer_trigger;
-
- ret = iio_device_register(indio_dev);
- if (ret)
- goto error_cleanup_buffer_trigger;
-
- return 0;
-
-error_cleanup_buffer_trigger:
- adis_cleanup_buffer_and_trigger(st, indio_dev);
- return ret;
-}
-
-static int adis16203_remove(struct spi_device *spi)
-{
- struct iio_dev *indio_dev = spi_get_drvdata(spi);
- struct adis *st = iio_priv(indio_dev);
-
- iio_device_unregister(indio_dev);
- adis_cleanup_buffer_and_trigger(st, indio_dev);
-
- return 0;
-}
-
-static struct spi_driver adis16203_driver = {
- .driver = {
- .name = "adis16203",
- .owner = THIS_MODULE,
- },
- .probe = adis16203_probe,
- .remove = adis16203_remove,
-};
-module_spi_driver(adis16203_driver);
-
-MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>");
-MODULE_DESCRIPTION("Analog Devices ADIS16203 Programmable Digital Vibration Sensor driver");
-MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("spi:adis16203");
diff --git a/drivers/staging/iio/accel/adis16204.h b/drivers/staging/iio/accel/adis16204.h
deleted file mode 100644
index 0b23f0b5c52f..000000000000
--- a/drivers/staging/iio/accel/adis16204.h
+++ /dev/null
@@ -1,68 +0,0 @@
-#ifndef SPI_ADIS16204_H_
-#define SPI_ADIS16204_H_
-
-#define ADIS16204_STARTUP_DELAY 220 /* ms */
-
-#define ADIS16204_FLASH_CNT 0x00 /* Flash memory write count */
-#define ADIS16204_SUPPLY_OUT 0x02 /* Output, power supply */
-#define ADIS16204_XACCL_OUT 0x04 /* Output, x-axis accelerometer */
-#define ADIS16204_YACCL_OUT 0x06 /* Output, y-axis accelerometer */
-#define ADIS16204_AUX_ADC 0x08 /* Output, auxiliary ADC input */
-#define ADIS16204_TEMP_OUT 0x0A /* Output, temperature */
-#define ADIS16204_X_PEAK_OUT 0x0C /* Twos complement */
-#define ADIS16204_Y_PEAK_OUT 0x0E /* Twos complement */
-#define ADIS16204_XACCL_NULL 0x10 /* Calibration, x-axis acceleration offset null */
-#define ADIS16204_YACCL_NULL 0x12 /* Calibration, y-axis acceleration offset null */
-#define ADIS16204_XACCL_SCALE 0x14 /* X-axis scale factor calibration register */
-#define ADIS16204_YACCL_SCALE 0x16 /* Y-axis scale factor calibration register */
-#define ADIS16204_XY_RSS_OUT 0x18 /* XY combined acceleration (RSS) */
-#define ADIS16204_XY_PEAK_OUT 0x1A /* Peak, XY combined output (RSS) */
-#define ADIS16204_CAP_BUF_1 0x1C /* Capture buffer output register 1 */
-#define ADIS16204_CAP_BUF_2 0x1E /* Capture buffer output register 2 */
-#define ADIS16204_ALM_MAG1 0x20 /* Alarm 1 amplitude threshold */
-#define ADIS16204_ALM_MAG2 0x22 /* Alarm 2 amplitude threshold */
-#define ADIS16204_ALM_CTRL 0x28 /* Alarm control */
-#define ADIS16204_CAPT_PNTR 0x2A /* Capture register address pointer */
-#define ADIS16204_AUX_DAC 0x30 /* Auxiliary DAC data */
-#define ADIS16204_GPIO_CTRL 0x32 /* General-purpose digital input/output control */
-#define ADIS16204_MSC_CTRL 0x34 /* Miscellaneous control */
-#define ADIS16204_SMPL_PRD 0x36 /* Internal sample period (rate) control */
-#define ADIS16204_AVG_CNT 0x38 /* Operation, filter configuration */
-#define ADIS16204_SLP_CNT 0x3A /* Operation, sleep mode control */
-#define ADIS16204_DIAG_STAT 0x3C /* Diagnostics, system status register */
-#define ADIS16204_GLOB_CMD 0x3E /* Operation, system command register */
-
-/* MSC_CTRL */
-#define ADIS16204_MSC_CTRL_PWRUP_SELF_TEST BIT(10) /* Self-test at power-on: 1 = disabled, 0 = enabled */
-#define ADIS16204_MSC_CTRL_SELF_TEST_EN BIT(8) /* Self-test enable */
-#define ADIS16204_MSC_CTRL_DATA_RDY_EN BIT(2) /* Data-ready enable: 1 = enabled, 0 = disabled */
-#define ADIS16204_MSC_CTRL_ACTIVE_HIGH BIT(1) /* Data-ready polarity: 1 = active high, 0 = active low */
-#define ADIS16204_MSC_CTRL_DATA_RDY_DIO2 BIT(0) /* Data-ready line selection: 1 = DIO2, 0 = DIO1 */
-
-/* DIAG_STAT */
-#define ADIS16204_DIAG_STAT_ALARM2 BIT(9) /* Alarm 2 status: 1 = alarm active, 0 = alarm inactive */
-#define ADIS16204_DIAG_STAT_ALARM1 BIT(8) /* Alarm 1 status: 1 = alarm active, 0 = alarm inactive */
-#define ADIS16204_DIAG_STAT_SELFTEST_FAIL_BIT 5 /* Self-test diagnostic error flag: 1 = error condition,
- 0 = normal operation */
-#define ADIS16204_DIAG_STAT_SPI_FAIL_BIT 3 /* SPI communications failure */
-#define ADIS16204_DIAG_STAT_FLASH_UPT_BIT 2 /* Flash update failure */
-#define ADIS16204_DIAG_STAT_POWER_HIGH_BIT 1 /* Power supply above 3.625 V */
-#define ADIS16204_DIAG_STAT_POWER_LOW_BIT 0 /* Power supply below 2.975 V */
-
-/* GLOB_CMD */
-#define ADIS16204_GLOB_CMD_SW_RESET BIT(7)
-#define ADIS16204_GLOB_CMD_CLEAR_STAT BIT(4)
-#define ADIS16204_GLOB_CMD_FACTORY_CAL BIT(1)
-
-#define ADIS16204_ERROR_ACTIVE BIT(14)
-
-enum adis16204_scan {
- ADIS16204_SCAN_ACC_X,
- ADIS16204_SCAN_ACC_Y,
- ADIS16204_SCAN_ACC_XY,
- ADIS16204_SCAN_SUPPLY,
- ADIS16204_SCAN_AUX_ADC,
- ADIS16204_SCAN_TEMP,
-};
-
-#endif /* SPI_ADIS16204_H_ */
diff --git a/drivers/staging/iio/accel/adis16204_core.c b/drivers/staging/iio/accel/adis16204_core.c
deleted file mode 100644
index ea0ac2467ac2..000000000000
--- a/drivers/staging/iio/accel/adis16204_core.c
+++ /dev/null
@@ -1,254 +0,0 @@
-/*
- * ADIS16204 Programmable High-g Digital Impact Sensor and Recorder
- *
- * Copyright 2010 Analog Devices Inc.
- *
- * Licensed under the GPL-2 or later.
- */
-
-#include <linux/interrupt.h>
-#include <linux/irq.h>
-#include <linux/delay.h>
-#include <linux/mutex.h>
-#include <linux/device.h>
-#include <linux/kernel.h>
-#include <linux/spi/spi.h>
-#include <linux/slab.h>
-#include <linux/sysfs.h>
-#include <linux/list.h>
-#include <linux/module.h>
-
-#include <linux/iio/iio.h>
-#include <linux/iio/sysfs.h>
-#include <linux/iio/buffer.h>
-#include <linux/iio/imu/adis.h>
-
-#include "adis16204.h"
-
-/* Unique to this driver currently */
-
-static const u8 adis16204_addresses[][2] = {
- [ADIS16204_SCAN_ACC_X] = { ADIS16204_XACCL_NULL, ADIS16204_X_PEAK_OUT },
- [ADIS16204_SCAN_ACC_Y] = { ADIS16204_YACCL_NULL, ADIS16204_Y_PEAK_OUT },
- [ADIS16204_SCAN_ACC_XY] = { 0, ADIS16204_XY_PEAK_OUT },
-};
-
-static int adis16204_read_raw(struct iio_dev *indio_dev,
- struct iio_chan_spec const *chan,
- int *val, int *val2,
- long mask)
-{
- struct adis *st = iio_priv(indio_dev);
- int ret;
- int bits;
- u8 addr;
- s16 val16;
- int addrind;
-
- switch (mask) {
- case IIO_CHAN_INFO_RAW:
- return adis_single_conversion(indio_dev, chan,
- ADIS16204_ERROR_ACTIVE, val);
- case IIO_CHAN_INFO_SCALE:
- switch (chan->type) {
- case IIO_VOLTAGE:
- if (chan->channel == 0) {
- *val = 1;
- *val2 = 220000; /* 1.22 mV */
- } else {
- *val = 0;
- *val2 = 610000; /* 0.61 mV */
- }
- return IIO_VAL_INT_PLUS_MICRO;
- case IIO_TEMP:
- *val = -470; /* 0.47 C */
- *val2 = 0;
- return IIO_VAL_INT_PLUS_MICRO;
- case IIO_ACCEL:
- *val = 0;
- switch (chan->channel2) {
- case IIO_MOD_X:
- case IIO_MOD_ROOT_SUM_SQUARED_X_Y:
- *val2 = IIO_G_TO_M_S_2(17125); /* 17.125 mg */
- break;
- case IIO_MOD_Y:
- case IIO_MOD_Z:
- *val2 = IIO_G_TO_M_S_2(8407); /* 8.407 mg */
- break;
- }
- return IIO_VAL_INT_PLUS_MICRO;
- default:
- return -EINVAL;
- }
- break;
- case IIO_CHAN_INFO_OFFSET:
- *val = 25000 / -470 - 1278; /* 25 C = 1278 */
- return IIO_VAL_INT;
- case IIO_CHAN_INFO_CALIBBIAS:
- case IIO_CHAN_INFO_PEAK:
- if (mask == IIO_CHAN_INFO_CALIBBIAS) {
- bits = 12;
- addrind = 0;
- } else { /* PEAK_SEPARATE */
- bits = 14;
- addrind = 1;
- }
- mutex_lock(&indio_dev->mlock);
- addr = adis16204_addresses[chan->scan_index][addrind];
- ret = adis_read_reg_16(st, addr, &val16);
- if (ret) {
- mutex_unlock(&indio_dev->mlock);
- return ret;
- }
- val16 &= (1 << bits) - 1;
- val16 = (s16)(val16 << (16 - bits)) >> (16 - bits);
- *val = val16;
- mutex_unlock(&indio_dev->mlock);
- return IIO_VAL_INT;
- }
- return -EINVAL;
-}
-
-static int adis16204_write_raw(struct iio_dev *indio_dev,
- struct iio_chan_spec const *chan,
- int val,
- int val2,
- long mask)
-{
- struct adis *st = iio_priv(indio_dev);
- int bits;
- s16 val16;
- u8 addr;
-
- switch (mask) {
- case IIO_CHAN_INFO_CALIBBIAS:
- switch (chan->type) {
- case IIO_ACCEL:
- bits = 12;
- break;
- default:
- return -EINVAL;
- }
- val16 = val & ((1 << bits) - 1);
- addr = adis16204_addresses[chan->scan_index][1];
- return adis_write_reg_16(st, addr, val16);
- }
- return -EINVAL;
-}
-
-static const struct iio_chan_spec adis16204_channels[] = {
- ADIS_SUPPLY_CHAN(ADIS16204_SUPPLY_OUT, ADIS16204_SCAN_SUPPLY, 0, 12),
- ADIS_AUX_ADC_CHAN(ADIS16204_AUX_ADC, ADIS16204_SCAN_AUX_ADC, 0, 12),
- ADIS_TEMP_CHAN(ADIS16204_TEMP_OUT, ADIS16204_SCAN_TEMP, 0, 12),
- ADIS_ACCEL_CHAN(X, ADIS16204_XACCL_OUT, ADIS16204_SCAN_ACC_X,
- BIT(IIO_CHAN_INFO_CALIBBIAS) | BIT(IIO_CHAN_INFO_PEAK),
- 0, 14),
- ADIS_ACCEL_CHAN(Y, ADIS16204_YACCL_OUT, ADIS16204_SCAN_ACC_Y,
- BIT(IIO_CHAN_INFO_CALIBBIAS) | BIT(IIO_CHAN_INFO_PEAK),
- 0, 14),
- ADIS_ACCEL_CHAN(ROOT_SUM_SQUARED_X_Y, ADIS16204_XY_RSS_OUT,
- ADIS16204_SCAN_ACC_XY, BIT(IIO_CHAN_INFO_PEAK), 0, 14),
- IIO_CHAN_SOFT_TIMESTAMP(5),
-};
-
-static const struct iio_info adis16204_info = {
- .read_raw = &adis16204_read_raw,
- .write_raw = &adis16204_write_raw,
- .update_scan_mode = adis_update_scan_mode,
- .driver_module = THIS_MODULE,
-};
-
-static const char * const adis16204_status_error_msgs[] = {
- [ADIS16204_DIAG_STAT_SELFTEST_FAIL_BIT] = "Self test failure",
- [ADIS16204_DIAG_STAT_SPI_FAIL_BIT] = "SPI failure",
- [ADIS16204_DIAG_STAT_FLASH_UPT_BIT] = "Flash update failed",
- [ADIS16204_DIAG_STAT_POWER_HIGH_BIT] = "Power supply above 3.625V",
- [ADIS16204_DIAG_STAT_POWER_LOW_BIT] = "Power supply below 2.975V",
-};
-
-static const struct adis_data adis16204_data = {
- .read_delay = 20,
- .msc_ctrl_reg = ADIS16204_MSC_CTRL,
- .glob_cmd_reg = ADIS16204_GLOB_CMD,
- .diag_stat_reg = ADIS16204_DIAG_STAT,
-
- .self_test_mask = ADIS16204_MSC_CTRL_SELF_TEST_EN,
- .startup_delay = ADIS16204_STARTUP_DELAY,
-
- .status_error_msgs = adis16204_status_error_msgs,
- .status_error_mask = BIT(ADIS16204_DIAG_STAT_SELFTEST_FAIL_BIT) |
- BIT(ADIS16204_DIAG_STAT_SPI_FAIL_BIT) |
- BIT(ADIS16204_DIAG_STAT_FLASH_UPT_BIT) |
- BIT(ADIS16204_DIAG_STAT_POWER_HIGH_BIT) |
- BIT(ADIS16204_DIAG_STAT_POWER_LOW_BIT),
-};
-
-static int adis16204_probe(struct spi_device *spi)
-{
- int ret;
- struct adis *st;
- struct iio_dev *indio_dev;
-
- /* setup the industrialio driver allocated elements */
- indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
- if (!indio_dev)
- return -ENOMEM;
- st = iio_priv(indio_dev);
- /* this is only used for removal purposes */
- spi_set_drvdata(spi, indio_dev);
-
- indio_dev->name = spi->dev.driver->name;
- indio_dev->dev.parent = &spi->dev;
- indio_dev->info = &adis16204_info;
- indio_dev->channels = adis16204_channels;
- indio_dev->num_channels = ARRAY_SIZE(adis16204_channels);
- indio_dev->modes = INDIO_DIRECT_MODE;
-
- ret = adis_init(st, indio_dev, spi, &adis16204_data);
- if (ret)
- return ret;
-
- ret = adis_setup_buffer_and_trigger(st, indio_dev, NULL);
- if (ret)
- return ret;
-
- /* Get the device into a sane initial state */
- ret = adis_initial_startup(st);
- if (ret)
- goto error_cleanup_buffer_trigger;
- ret = iio_device_register(indio_dev);
- if (ret)
- goto error_cleanup_buffer_trigger;
-
- return 0;
-
-error_cleanup_buffer_trigger:
- adis_cleanup_buffer_and_trigger(st, indio_dev);
- return ret;
-}
-
-static int adis16204_remove(struct spi_device *spi)
-{
- struct iio_dev *indio_dev = spi_get_drvdata(spi);
- struct adis *st = iio_priv(indio_dev);
-
- iio_device_unregister(indio_dev);
- adis_cleanup_buffer_and_trigger(st, indio_dev);
-
- return 0;
-}
-
-static struct spi_driver adis16204_driver = {
- .driver = {
- .name = "adis16204",
- .owner = THIS_MODULE,
- },
- .probe = adis16204_probe,
- .remove = adis16204_remove,
-};
-module_spi_driver(adis16204_driver);
-
-MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>");
-MODULE_DESCRIPTION("ADIS16204 High-g Digital Impact Sensor and Recorder");
-MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("spi:adis16204");
diff --git a/drivers/staging/iio/accel/adis16209.h b/drivers/staging/iio/accel/adis16209.h
deleted file mode 100644
index 813698d18ec8..000000000000
--- a/drivers/staging/iio/accel/adis16209.h
+++ /dev/null
@@ -1,105 +0,0 @@
-#ifndef SPI_ADIS16209_H_
-#define SPI_ADIS16209_H_
-
-#define ADIS16209_STARTUP_DELAY 220 /* ms */
-
-/* Flash memory write count */
-#define ADIS16209_FLASH_CNT 0x00
-/* Output, power supply */
-#define ADIS16209_SUPPLY_OUT 0x02
-/* Output, x-axis accelerometer */
-#define ADIS16209_XACCL_OUT 0x04
-/* Output, y-axis accelerometer */
-#define ADIS16209_YACCL_OUT 0x06
-/* Output, auxiliary ADC input */
-#define ADIS16209_AUX_ADC 0x08
-/* Output, temperature */
-#define ADIS16209_TEMP_OUT 0x0A
-/* Output, x-axis inclination */
-#define ADIS16209_XINCL_OUT 0x0C
-/* Output, y-axis inclination */
-#define ADIS16209_YINCL_OUT 0x0E
-/* Output, +/-180 vertical rotational position */
-#define ADIS16209_ROT_OUT 0x10
-/* Calibration, x-axis acceleration offset null */
-#define ADIS16209_XACCL_NULL 0x12
-/* Calibration, y-axis acceleration offset null */
-#define ADIS16209_YACCL_NULL 0x14
-/* Calibration, x-axis inclination offset null */
-#define ADIS16209_XINCL_NULL 0x16
-/* Calibration, y-axis inclination offset null */
-#define ADIS16209_YINCL_NULL 0x18
-/* Calibration, vertical rotation offset null */
-#define ADIS16209_ROT_NULL 0x1A
-/* Alarm 1 amplitude threshold */
-#define ADIS16209_ALM_MAG1 0x20
-/* Alarm 2 amplitude threshold */
-#define ADIS16209_ALM_MAG2 0x22
-/* Alarm 1, sample period */
-#define ADIS16209_ALM_SMPL1 0x24
-/* Alarm 2, sample period */
-#define ADIS16209_ALM_SMPL2 0x26
-/* Alarm control */
-#define ADIS16209_ALM_CTRL 0x28
-/* Auxiliary DAC data */
-#define ADIS16209_AUX_DAC 0x30
-/* General-purpose digital input/output control */
-#define ADIS16209_GPIO_CTRL 0x32
-/* Miscellaneous control */
-#define ADIS16209_MSC_CTRL 0x34
-/* Internal sample period (rate) control */
-#define ADIS16209_SMPL_PRD 0x36
-/* Operation, filter configuration */
-#define ADIS16209_AVG_CNT 0x38
-/* Operation, sleep mode control */
-#define ADIS16209_SLP_CNT 0x3A
-/* Diagnostics, system status register */
-#define ADIS16209_DIAG_STAT 0x3C
-/* Operation, system command register */
-#define ADIS16209_GLOB_CMD 0x3E
-
-/* MSC_CTRL */
-/* Self-test at power-on: 1 = disabled, 0 = enabled */
-#define ADIS16209_MSC_CTRL_PWRUP_SELF_TEST BIT(10)
-/* Self-test enable */
-#define ADIS16209_MSC_CTRL_SELF_TEST_EN BIT(8)
-/* Data-ready enable: 1 = enabled, 0 = disabled */
-#define ADIS16209_MSC_CTRL_DATA_RDY_EN BIT(2)
-/* Data-ready polarity: 1 = active high, 0 = active low */
-#define ADIS16209_MSC_CTRL_ACTIVE_HIGH BIT(1)
-/* Data-ready line selection: 1 = DIO2, 0 = DIO1 */
-#define ADIS16209_MSC_CTRL_DATA_RDY_DIO2 BIT(0)
-
-/* DIAG_STAT */
-/* Alarm 2 status: 1 = alarm active, 0 = alarm inactive */
-#define ADIS16209_DIAG_STAT_ALARM2 BIT(9)
-/* Alarm 1 status: 1 = alarm active, 0 = alarm inactive */
-#define ADIS16209_DIAG_STAT_ALARM1 BIT(8)
-/* Self-test diagnostic error flag: 1 = error condition, 0 = normal operation */
-#define ADIS16209_DIAG_STAT_SELFTEST_FAIL_BIT 5
-/* SPI communications failure */
-#define ADIS16209_DIAG_STAT_SPI_FAIL_BIT 3
-/* Flash update failure */
-#define ADIS16209_DIAG_STAT_FLASH_UPT_BIT 2
-/* Power supply above 3.625 V */
-#define ADIS16209_DIAG_STAT_POWER_HIGH_BIT 1
-/* Power supply below 3.15 V */
-#define ADIS16209_DIAG_STAT_POWER_LOW_BIT 0
-
-/* GLOB_CMD */
-#define ADIS16209_GLOB_CMD_SW_RESET BIT(7)
-#define ADIS16209_GLOB_CMD_CLEAR_STAT BIT(4)
-#define ADIS16209_GLOB_CMD_FACTORY_CAL BIT(1)
-
-#define ADIS16209_ERROR_ACTIVE BIT(14)
-
-#define ADIS16209_SCAN_SUPPLY 0
-#define ADIS16209_SCAN_ACC_X 1
-#define ADIS16209_SCAN_ACC_Y 2
-#define ADIS16209_SCAN_AUX_ADC 3
-#define ADIS16209_SCAN_TEMP 4
-#define ADIS16209_SCAN_INCLI_X 5
-#define ADIS16209_SCAN_INCLI_Y 6
-#define ADIS16209_SCAN_ROT 7
-
-#endif /* SPI_ADIS16209_H_ */
diff --git a/drivers/staging/iio/accel/adis16209_core.c b/drivers/staging/iio/accel/adis16209_core.c
deleted file mode 100644
index d1dc1a3cb3ce..000000000000
--- a/drivers/staging/iio/accel/adis16209_core.c
+++ /dev/null
@@ -1,248 +0,0 @@
-/*
- * ADIS16209 Programmable Digital Vibration Sensor driver
- *
- * Copyright 2010 Analog Devices Inc.
- *
- * Licensed under the GPL-2 or later.
- */
-
-#include <linux/delay.h>
-#include <linux/mutex.h>
-#include <linux/device.h>
-#include <linux/kernel.h>
-#include <linux/spi/spi.h>
-#include <linux/slab.h>
-#include <linux/sysfs.h>
-#include <linux/list.h>
-#include <linux/module.h>
-
-#include <linux/iio/iio.h>
-#include <linux/iio/sysfs.h>
-#include <linux/iio/buffer.h>
-#include <linux/iio/imu/adis.h>
-
-#include "adis16209.h"
-
-static const u8 adis16209_addresses[8][1] = {
- [ADIS16209_SCAN_SUPPLY] = { },
- [ADIS16209_SCAN_AUX_ADC] = { },
- [ADIS16209_SCAN_ACC_X] = { ADIS16209_XACCL_NULL },
- [ADIS16209_SCAN_ACC_Y] = { ADIS16209_YACCL_NULL },
- [ADIS16209_SCAN_INCLI_X] = { ADIS16209_XINCL_NULL },
- [ADIS16209_SCAN_INCLI_Y] = { ADIS16209_YINCL_NULL },
- [ADIS16209_SCAN_ROT] = { },
- [ADIS16209_SCAN_TEMP] = { },
-};
-
-static int adis16209_write_raw(struct iio_dev *indio_dev,
- struct iio_chan_spec const *chan,
- int val,
- int val2,
- long mask)
-{
- struct adis *st = iio_priv(indio_dev);
- int bits;
- s16 val16;
- u8 addr;
-
- switch (mask) {
- case IIO_CHAN_INFO_CALIBBIAS:
- switch (chan->type) {
- case IIO_ACCEL:
- case IIO_INCLI:
- bits = 14;
- break;
- default:
- return -EINVAL;
- }
- val16 = val & ((1 << bits) - 1);
- addr = adis16209_addresses[chan->scan_index][0];
- return adis_write_reg_16(st, addr, val16);
- }
- return -EINVAL;
-}
-
-static int adis16209_read_raw(struct iio_dev *indio_dev,
- struct iio_chan_spec const *chan,
- int *val, int *val2,
- long mask)
-{
- struct adis *st = iio_priv(indio_dev);
- int ret;
- int bits;
- u8 addr;
- s16 val16;
-
- switch (mask) {
- case IIO_CHAN_INFO_RAW:
- return adis_single_conversion(indio_dev, chan,
- ADIS16209_ERROR_ACTIVE, val);
- case IIO_CHAN_INFO_SCALE:
- switch (chan->type) {
- case IIO_VOLTAGE:
- *val = 0;
- if (chan->channel == 0)
- *val2 = 305180; /* 0.30518 mV */
- else
- *val2 = 610500; /* 0.6105 mV */
- return IIO_VAL_INT_PLUS_MICRO;
- case IIO_TEMP:
- *val = -470; /* -0.47 C */
- *val2 = 0;
- return IIO_VAL_INT_PLUS_MICRO;
- case IIO_ACCEL:
- *val = 0;
- *val2 = IIO_G_TO_M_S_2(244140); /* 0.244140 mg */
- return IIO_VAL_INT_PLUS_NANO;
- case IIO_INCLI:
- case IIO_ROT:
- *val = 0;
- *val2 = 25000; /* 0.025 degree */
- return IIO_VAL_INT_PLUS_MICRO;
- default:
- return -EINVAL;
- }
- break;
- case IIO_CHAN_INFO_OFFSET:
- *val = 25000 / -470 - 0x4FE; /* 25 C = 0x4FE */
- return IIO_VAL_INT;
- case IIO_CHAN_INFO_CALIBBIAS:
- switch (chan->type) {
- case IIO_ACCEL:
- bits = 14;
- break;
- default:
- return -EINVAL;
- }
- mutex_lock(&indio_dev->mlock);
- addr = adis16209_addresses[chan->scan_index][0];
- ret = adis_read_reg_16(st, addr, &val16);
- if (ret) {
- mutex_unlock(&indio_dev->mlock);
- return ret;
- }
- val16 &= (1 << bits) - 1;
- val16 = (s16)(val16 << (16 - bits)) >> (16 - bits);
- *val = val16;
- mutex_unlock(&indio_dev->mlock);
- return IIO_VAL_INT;
- }
- return -EINVAL;
-}
-
-static const struct iio_chan_spec adis16209_channels[] = {
- ADIS_SUPPLY_CHAN(ADIS16209_SUPPLY_OUT, ADIS16209_SCAN_SUPPLY, 0, 14),
- ADIS_TEMP_CHAN(ADIS16209_TEMP_OUT, ADIS16209_SCAN_TEMP, 0, 12),
- ADIS_ACCEL_CHAN(X, ADIS16209_XACCL_OUT, ADIS16209_SCAN_ACC_X,
- BIT(IIO_CHAN_INFO_CALIBBIAS), 0, 14),
- ADIS_ACCEL_CHAN(Y, ADIS16209_YACCL_OUT, ADIS16209_SCAN_ACC_Y,
- BIT(IIO_CHAN_INFO_CALIBBIAS), 0, 14),
- ADIS_AUX_ADC_CHAN(ADIS16209_AUX_ADC, ADIS16209_SCAN_AUX_ADC, 0, 12),
- ADIS_INCLI_CHAN(X, ADIS16209_XINCL_OUT, ADIS16209_SCAN_INCLI_X,
- 0, 0, 14),
- ADIS_INCLI_CHAN(Y, ADIS16209_YINCL_OUT, ADIS16209_SCAN_INCLI_Y,
- 0, 0, 14),
- ADIS_ROT_CHAN(X, ADIS16209_ROT_OUT, ADIS16209_SCAN_ROT, 0, 0, 14),
- IIO_CHAN_SOFT_TIMESTAMP(8)
-};
-
-static const struct iio_info adis16209_info = {
- .read_raw = &adis16209_read_raw,
- .write_raw = &adis16209_write_raw,
- .update_scan_mode = adis_update_scan_mode,
- .driver_module = THIS_MODULE,
-};
-
-static const char * const adis16209_status_error_msgs[] = {
- [ADIS16209_DIAG_STAT_SELFTEST_FAIL_BIT] = "Self test failure",
- [ADIS16209_DIAG_STAT_SPI_FAIL_BIT] = "SPI failure",
- [ADIS16209_DIAG_STAT_FLASH_UPT_BIT] = "Flash update failed",
- [ADIS16209_DIAG_STAT_POWER_HIGH_BIT] = "Power supply above 3.625V",
- [ADIS16209_DIAG_STAT_POWER_LOW_BIT] = "Power supply below 3.15V",
-};
-
-static const struct adis_data adis16209_data = {
- .read_delay = 30,
- .msc_ctrl_reg = ADIS16209_MSC_CTRL,
- .glob_cmd_reg = ADIS16209_GLOB_CMD,
- .diag_stat_reg = ADIS16209_DIAG_STAT,
-
- .self_test_mask = ADIS16209_MSC_CTRL_SELF_TEST_EN,
- .startup_delay = ADIS16209_STARTUP_DELAY,
-
- .status_error_msgs = adis16209_status_error_msgs,
- .status_error_mask = BIT(ADIS16209_DIAG_STAT_SELFTEST_FAIL_BIT) |
- BIT(ADIS16209_DIAG_STAT_SPI_FAIL_BIT) |
- BIT(ADIS16209_DIAG_STAT_FLASH_UPT_BIT) |
- BIT(ADIS16209_DIAG_STAT_POWER_HIGH_BIT) |
- BIT(ADIS16209_DIAG_STAT_POWER_LOW_BIT),
-};
-
-static int adis16209_probe(struct spi_device *spi)
-{
- int ret;
- struct adis *st;
- struct iio_dev *indio_dev;
-
- /* setup the industrialio driver allocated elements */
- indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
- if (!indio_dev)
- return -ENOMEM;
- st = iio_priv(indio_dev);
- /* this is only used for removal purposes */
- spi_set_drvdata(spi, indio_dev);
-
- indio_dev->name = spi->dev.driver->name;
- indio_dev->dev.parent = &spi->dev;
- indio_dev->info = &adis16209_info;
- indio_dev->channels = adis16209_channels;
- indio_dev->num_channels = ARRAY_SIZE(adis16209_channels);
- indio_dev->modes = INDIO_DIRECT_MODE;
-
- ret = adis_init(st, indio_dev, spi, &adis16209_data);
- if (ret)
- return ret;
- ret = adis_setup_buffer_and_trigger(st, indio_dev, NULL);
- if (ret)
- return ret;
-
- /* Get the device into a sane initial state */
- ret = adis_initial_startup(st);
- if (ret)
- goto error_cleanup_buffer_trigger;
- ret = iio_device_register(indio_dev);
- if (ret)
- goto error_cleanup_buffer_trigger;
-
- return 0;
-
-error_cleanup_buffer_trigger:
- adis_cleanup_buffer_and_trigger(st, indio_dev);
- return ret;
-}
-
-static int adis16209_remove(struct spi_device *spi)
-{
- struct iio_dev *indio_dev = spi_get_drvdata(spi);
- struct adis *st = iio_priv(indio_dev);
-
- iio_device_unregister(indio_dev);
- adis_cleanup_buffer_and_trigger(st, indio_dev);
-
- return 0;
-}
-
-static struct spi_driver adis16209_driver = {
- .driver = {
- .name = "adis16209",
- .owner = THIS_MODULE,
- },
- .probe = adis16209_probe,
- .remove = adis16209_remove,
-};
-module_spi_driver(adis16209_driver);
-
-MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>");
-MODULE_DESCRIPTION("Analog Devices ADIS16209 Digital Vibration Sensor driver");
-MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("spi:adis16209");
diff --git a/drivers/staging/iio/accel/adis16220.h b/drivers/staging/iio/accel/adis16220.h
deleted file mode 100644
index eab86331124f..000000000000
--- a/drivers/staging/iio/accel/adis16220.h
+++ /dev/null
@@ -1,140 +0,0 @@
-#ifndef SPI_ADIS16220_H_
-#define SPI_ADIS16220_H_
-
-#include <linux/iio/imu/adis.h>
-
-#define ADIS16220_STARTUP_DELAY 220 /* ms */
-
-/* Flash memory write count */
-#define ADIS16220_FLASH_CNT 0x00
-/* Control, acceleration offset adjustment control */
-#define ADIS16220_ACCL_NULL 0x02
-/* Control, AIN1 offset adjustment control */
-#define ADIS16220_AIN1_NULL 0x04
-/* Control, AIN2 offset adjustment control */
-#define ADIS16220_AIN2_NULL 0x06
-/* Output, power supply during capture */
-#define ADIS16220_CAPT_SUPPLY 0x0A
-/* Output, temperature during capture */
-#define ADIS16220_CAPT_TEMP 0x0C
-/* Output, peak acceleration during capture */
-#define ADIS16220_CAPT_PEAKA 0x0E
-/* Output, peak AIN1 level during capture */
-#define ADIS16220_CAPT_PEAK1 0x10
-/* Output, peak AIN2 level during capture */
-#define ADIS16220_CAPT_PEAK2 0x12
-/* Output, capture buffer for acceleration */
-#define ADIS16220_CAPT_BUFA 0x14
-/* Output, capture buffer for AIN1 */
-#define ADIS16220_CAPT_BUF1 0x16
-/* Output, capture buffer for AIN2 */
-#define ADIS16220_CAPT_BUF2 0x18
-/* Control, capture buffer address pointer */
-#define ADIS16220_CAPT_PNTR 0x1A
-/* Control, capture control register */
-#define ADIS16220_CAPT_CTRL 0x1C
-/* Control, capture period (automatic mode) */
-#define ADIS16220_CAPT_PRD 0x1E
-/* Control, Alarm A, acceleration peak threshold */
-#define ADIS16220_ALM_MAGA 0x20
-/* Control, Alarm 1, AIN1 peak threshold */
-#define ADIS16220_ALM_MAG1 0x22
-/* Control, Alarm 2, AIN2 peak threshold */
-#define ADIS16220_ALM_MAG2 0x24
-/* Control, Alarm S, peak threshold */
-#define ADIS16220_ALM_MAGS 0x26
-/* Control, alarm configuration register */
-#define ADIS16220_ALM_CTRL 0x28
-/* Control, general I/O configuration */
-#define ADIS16220_GPIO_CTRL 0x32
-/* Control, self-test control, AIN configuration */
-#define ADIS16220_MSC_CTRL 0x34
-/* Control, digital I/O configuration */
-#define ADIS16220_DIO_CTRL 0x36
-/* Control, filter configuration */
-#define ADIS16220_AVG_CNT 0x38
-/* Status, system status */
-#define ADIS16220_DIAG_STAT 0x3C
-/* Control, system commands */
-#define ADIS16220_GLOB_CMD 0x3E
-/* Status, self-test response */
-#define ADIS16220_ST_DELTA 0x40
-/* Lot Identification Code 1 */
-#define ADIS16220_LOT_ID1 0x52
-/* Lot Identification Code 2 */
-#define ADIS16220_LOT_ID2 0x54
-/* Product identifier; convert to decimal = 16220 */
-#define ADIS16220_PROD_ID 0x56
-/* Serial number */
-#define ADIS16220_SERIAL_NUM 0x58
-
-#define ADIS16220_CAPTURE_SIZE 2048
-
-/* MSC_CTRL */
-#define ADIS16220_MSC_CTRL_SELF_TEST_EN BIT(8)
-#define ADIS16220_MSC_CTRL_POWER_SUP_COM_AIN1 BIT(1)
-#define ADIS16220_MSC_CTRL_POWER_SUP_COM_AIN2 BIT(0)
-
-/* DIO_CTRL */
-#define ADIS16220_MSC_CTRL_DIO2_BUSY_IND (BIT(5) | BIT(4))
-#define ADIS16220_MSC_CTRL_DIO1_BUSY_IND (BIT(3) | BIT(2))
-#define ADIS16220_MSC_CTRL_DIO2_ACT_HIGH BIT(1)
-#define ADIS16220_MSC_CTRL_DIO1_ACT_HIGH BIT(0)
-
-/* DIAG_STAT */
-/* AIN2 sample > ALM_MAG2 */
-#define ADIS16220_DIAG_STAT_ALM_MAG2 BIT(14)
-/* AIN1 sample > ALM_MAG1 */
-#define ADIS16220_DIAG_STAT_ALM_MAG1 BIT(13)
-/* Acceleration sample > ALM_MAGA */
-#define ADIS16220_DIAG_STAT_ALM_MAGA BIT(12)
-/* Error condition programmed into ALM_MAGS[11:0] and ALM_CTRL[5:4] is true */
-#define ADIS16220_DIAG_STAT_ALM_MAGS BIT(11)
-/* |Peak value in AIN2 data capture| > ALM_MAG2 */
-#define ADIS16220_DIAG_STAT_PEAK_AIN2 BIT(10)
-/* |Peak value in AIN1 data capture| > ALM_MAG1 */
-#define ADIS16220_DIAG_STAT_PEAK_AIN1 BIT(9)
-/* |Peak value in acceleration data capture| > ALM_MAGA */
-#define ADIS16220_DIAG_STAT_PEAK_ACCEL BIT(8)
-/* Data ready, capture complete */
-#define ADIS16220_DIAG_STAT_DATA_RDY BIT(7)
-#define ADIS16220_DIAG_STAT_FLASH_CHK BIT(6)
-#define ADIS16220_DIAG_STAT_SELF_TEST BIT(5)
-/* Capture period violation/interruption */
-#define ADIS16220_DIAG_STAT_VIOLATION_BIT 4
-/* SPI communications failure */
-#define ADIS16220_DIAG_STAT_SPI_FAIL_BIT 3
-/* Flash update failure */
-#define ADIS16220_DIAG_STAT_FLASH_UPT_BIT 2
-/* Power supply above 3.625 V */
-#define ADIS16220_DIAG_STAT_POWER_HIGH_BIT 1
-/* Power supply below 3.15 V */
-#define ADIS16220_DIAG_STAT_POWER_LOW_BIT 0
-
-/* GLOB_CMD */
-#define ADIS16220_GLOB_CMD_SW_RESET BIT(7)
-#define ADIS16220_GLOB_CMD_SELF_TEST BIT(2)
-#define ADIS16220_GLOB_CMD_PWR_DOWN BIT(1)
-
-#define ADIS16220_MAX_TX 2048
-#define ADIS16220_MAX_RX 2048
-
-#define ADIS16220_SPI_BURST (u32)(1000 * 1000)
-#define ADIS16220_SPI_FAST (u32)(2000 * 1000)
-
-/**
- * struct adis16220_state - device instance specific data
- * @adis: adis device
- * @tx: transmit buffer
- * @rx: receive buffer
- * @buf_lock: mutex to protect tx and rx
- **/
-struct adis16220_state {
- struct adis adis;
-
- struct mutex buf_lock;
- u8 tx[ADIS16220_MAX_TX] ____cacheline_aligned;
- u8 rx[ADIS16220_MAX_RX];
-};
-
-#endif /* SPI_ADIS16220_H_ */
diff --git a/drivers/staging/iio/accel/adis16220_core.c b/drivers/staging/iio/accel/adis16220_core.c
deleted file mode 100644
index e46a91c69a31..000000000000
--- a/drivers/staging/iio/accel/adis16220_core.c
+++ /dev/null
@@ -1,495 +0,0 @@
-/*
- * ADIS16220 Programmable Digital Vibration Sensor driver
- *
- * Copyright 2010 Analog Devices Inc.
- *
- * Licensed under the GPL-2 or later.
- */
-
-#include <linux/delay.h>
-#include <linux/mutex.h>
-#include <linux/device.h>
-#include <linux/kernel.h>
-#include <linux/spi/spi.h>
-#include <linux/slab.h>
-#include <linux/sysfs.h>
-#include <linux/module.h>
-
-#include <linux/iio/iio.h>
-#include <linux/iio/sysfs.h>
-
-#include "adis16220.h"
-
-static ssize_t adis16220_read_16bit(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct adis16220_state *st = iio_priv(indio_dev);
- ssize_t ret;
- u16 val;
-
- /* Take the iio_dev status lock */
- mutex_lock(&indio_dev->mlock);
- ret = adis_read_reg_16(&st->adis, this_attr->address, &val);
- mutex_unlock(&indio_dev->mlock);
- if (ret)
- return ret;
- return sprintf(buf, "%u\n", val);
-}
-
-static ssize_t adis16220_write_16bit(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t len)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
- struct adis16220_state *st = iio_priv(indio_dev);
- int ret;
- u16 val;
-
- ret = kstrtou16(buf, 10, &val);
- if (ret)
- goto error_ret;
- ret = adis_write_reg_16(&st->adis, this_attr->address, val);
-
-error_ret:
- return ret ? ret : len;
-}
-
-static int adis16220_capture(struct iio_dev *indio_dev)
-{
- struct adis16220_state *st = iio_priv(indio_dev);
- int ret;
-
- /* initiates a manual data capture */
- ret = adis_write_reg_16(&st->adis, ADIS16220_GLOB_CMD, 0xBF08);
- if (ret)
- dev_err(&indio_dev->dev, "problem beginning capture");
-
- usleep_range(10000, 11000); /* delay for capture to finish */
-
- return ret;
-}
-
-static ssize_t adis16220_write_capture(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t len)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- bool val;
- int ret;
-
- ret = strtobool(buf, &val);
- if (ret)
- return ret;
- if (!val)
- return -EINVAL;
- ret = adis16220_capture(indio_dev);
- if (ret)
- return ret;
-
- return len;
-}
-
-static ssize_t adis16220_capture_buffer_read(struct iio_dev *indio_dev,
- char *buf,
- loff_t off,
- size_t count,
- int addr)
-{
- struct adis16220_state *st = iio_priv(indio_dev);
- struct spi_transfer xfers[] = {
- {
- .tx_buf = st->tx,
- .bits_per_word = 8,
- .len = 2,
- .cs_change = 1,
- .delay_usecs = 25,
- }, {
- .tx_buf = st->tx,
- .rx_buf = st->rx,
- .bits_per_word = 8,
- .cs_change = 1,
- .delay_usecs = 25,
- },
- };
- int ret;
- int i;
-
- if (unlikely(!count))
- return count;
-
- if ((off >= ADIS16220_CAPTURE_SIZE) || (count & 1) || (off & 1))
- return -EINVAL;
-
- if (off + count > ADIS16220_CAPTURE_SIZE)
- count = ADIS16220_CAPTURE_SIZE - off;
-
- /* write the begin position of capture buffer */
- ret = adis_write_reg_16(&st->adis,
- ADIS16220_CAPT_PNTR,
- off > 1);
- if (ret)
- return -EIO;
-
- /* read count/2 values from capture buffer */
- mutex_lock(&st->buf_lock);
-
- for (i = 0; i < count; i += 2) {
- st->tx[i] = ADIS_READ_REG(addr);
- st->tx[i + 1] = 0;
- }
- xfers[1].len = count;
-
- ret = spi_sync_transfer(st->adis.spi, xfers, ARRAY_SIZE(xfers));
- if (ret) {
- mutex_unlock(&st->buf_lock);
- return -EIO;
- }
-
- memcpy(buf, st->rx, count);
-
- mutex_unlock(&st->buf_lock);
- return count;
-}
-
-static ssize_t adis16220_accel_bin_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr,
- char *buf,
- loff_t off,
- size_t count)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(kobj_to_dev(kobj));
-
- return adis16220_capture_buffer_read(indio_dev, buf,
- off, count,
- ADIS16220_CAPT_BUFA);
-}
-
-static struct bin_attribute accel_bin = {
- .attr = {
- .name = "accel_bin",
- .mode = S_IRUGO,
- },
- .read = adis16220_accel_bin_read,
- .size = ADIS16220_CAPTURE_SIZE,
-};
-
-static ssize_t adis16220_adc1_bin_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr,
- char *buf, loff_t off,
- size_t count)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(kobj_to_dev(kobj));
-
- return adis16220_capture_buffer_read(indio_dev, buf,
- off, count,
- ADIS16220_CAPT_BUF1);
-}
-
-static struct bin_attribute adc1_bin = {
- .attr = {
- .name = "in0_bin",
- .mode = S_IRUGO,
- },
- .read = adis16220_adc1_bin_read,
- .size = ADIS16220_CAPTURE_SIZE,
-};
-
-static ssize_t adis16220_adc2_bin_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr,
- char *buf, loff_t off,
- size_t count)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(kobj_to_dev(kobj));
-
- return adis16220_capture_buffer_read(indio_dev, buf,
- off, count,
- ADIS16220_CAPT_BUF2);
-}
-
-static struct bin_attribute adc2_bin = {
- .attr = {
- .name = "in1_bin",
- .mode = S_IRUGO,
- },
- .read = adis16220_adc2_bin_read,
- .size = ADIS16220_CAPTURE_SIZE,
-};
-
-#define IIO_DEV_ATTR_CAPTURE(_store) \
- IIO_DEVICE_ATTR(capture, S_IWUSR, NULL, _store, 0)
-
-static IIO_DEV_ATTR_CAPTURE(adis16220_write_capture);
-
-#define IIO_DEV_ATTR_CAPTURE_COUNT(_mode, _show, _store, _addr) \
- IIO_DEVICE_ATTR(capture_count, _mode, _show, _store, _addr)
-
-static IIO_DEV_ATTR_CAPTURE_COUNT(S_IWUSR | S_IRUGO,
- adis16220_read_16bit,
- adis16220_write_16bit,
- ADIS16220_CAPT_PNTR);
-
-enum adis16220_channel {
- in_supply, in_1, in_2, accel, temp
-};
-
-struct adis16220_address_spec {
- u8 addr;
- u8 bits;
- bool sign;
-};
-
-/* Address / bits / signed */
-static const struct adis16220_address_spec adis16220_addresses[][3] = {
- [in_supply] = { { ADIS16220_CAPT_SUPPLY, 12, 0 }, },
- [in_1] = { { ADIS16220_CAPT_BUF1, 16, 1 },
- { ADIS16220_AIN1_NULL, 16, 1 },
- { ADIS16220_CAPT_PEAK1, 16, 1 }, },
- [in_2] = { { ADIS16220_CAPT_BUF2, 16, 1 },
- { ADIS16220_AIN2_NULL, 16, 1 },
- { ADIS16220_CAPT_PEAK2, 16, 1 }, },
- [accel] = { { ADIS16220_CAPT_BUFA, 16, 1 },
- { ADIS16220_ACCL_NULL, 16, 1 },
- { ADIS16220_CAPT_PEAKA, 16, 1 }, },
- [temp] = { { ADIS16220_CAPT_TEMP, 12, 0 }, }
-};
-
-static int adis16220_read_raw(struct iio_dev *indio_dev,
- struct iio_chan_spec const *chan,
- int *val, int *val2,
- long mask)
-{
- struct adis16220_state *st = iio_priv(indio_dev);
- const struct adis16220_address_spec *addr;
- int ret = -EINVAL;
- int addrind = 0;
- u16 uval;
- s16 sval;
- u8 bits;
-
- switch (mask) {
- case IIO_CHAN_INFO_RAW:
- addrind = 0;
- break;
- case IIO_CHAN_INFO_OFFSET:
- if (chan->type == IIO_TEMP) {
- *val = 25000 / -470 - 1278; /* 25 C = 1278 */
- return IIO_VAL_INT;
- }
- addrind = 1;
- break;
- case IIO_CHAN_INFO_PEAK:
- addrind = 2;
- break;
- case IIO_CHAN_INFO_SCALE:
- switch (chan->type) {
- case IIO_TEMP:
- *val = -470; /* -0.47 C */
- *val2 = 0;
- return IIO_VAL_INT_PLUS_MICRO;
- case IIO_ACCEL:
- *val2 = IIO_G_TO_M_S_2(19073); /* 19.073 g */
- return IIO_VAL_INT_PLUS_MICRO;
- case IIO_VOLTAGE:
- if (chan->channel == 0) {
- *val = 1;
- *val2 = 220700; /* 1.2207 mV */
- } else {
- /* Should really be dependent on VDD */
- *val2 = 305180; /* 305.18 uV */
- }
- return IIO_VAL_INT_PLUS_MICRO;
- default:
- return -EINVAL;
- }
- default:
- return -EINVAL;
- }
- addr = &adis16220_addresses[chan->address][addrind];
- if (addr->sign) {
- ret = adis_read_reg_16(&st->adis, addr->addr, &sval);
- if (ret)
- return ret;
- bits = addr->bits;
- sval &= (1 << bits) - 1;
- sval = (s16)(sval << (16 - bits)) >> (16 - bits);
- *val = sval;
- return IIO_VAL_INT;
- }
- ret = adis_read_reg_16(&st->adis, addr->addr, &uval);
- if (ret)
- return ret;
- bits = addr->bits;
- uval &= (1 << bits) - 1;
- *val = uval;
- return IIO_VAL_INT;
-}
-
-static const struct iio_chan_spec adis16220_channels[] = {
- {
- .type = IIO_VOLTAGE,
- .indexed = 1,
- .channel = 0,
- .extend_name = "supply",
- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
- BIT(IIO_CHAN_INFO_SCALE),
- .address = in_supply,
- }, {
- .type = IIO_ACCEL,
- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
- BIT(IIO_CHAN_INFO_OFFSET) |
- BIT(IIO_CHAN_INFO_SCALE) |
- BIT(IIO_CHAN_INFO_PEAK),
- .address = accel,
- }, {
- .type = IIO_TEMP,
- .indexed = 1,
- .channel = 0,
- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
- BIT(IIO_CHAN_INFO_OFFSET) |
- BIT(IIO_CHAN_INFO_SCALE),
- .address = temp,
- }, {
- .type = IIO_VOLTAGE,
- .indexed = 1,
- .channel = 1,
- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
- BIT(IIO_CHAN_INFO_OFFSET) |
- BIT(IIO_CHAN_INFO_SCALE),
- .address = in_1,
- }, {
- .type = IIO_VOLTAGE,
- .indexed = 1,
- .channel = 2,
- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
- .address = in_2,
- }
-};
-
-static struct attribute *adis16220_attributes[] = {
- &iio_dev_attr_capture.dev_attr.attr,
- &iio_dev_attr_capture_count.dev_attr.attr,
- NULL
-};
-
-static const struct attribute_group adis16220_attribute_group = {
- .attrs = adis16220_attributes,
-};
-
-static const struct iio_info adis16220_info = {
- .attrs = &adis16220_attribute_group,
- .driver_module = THIS_MODULE,
- .read_raw = &adis16220_read_raw,
-};
-
-static const char * const adis16220_status_error_msgs[] = {
- [ADIS16220_DIAG_STAT_VIOLATION_BIT] = "Capture period violation/interruption",
- [ADIS16220_DIAG_STAT_SPI_FAIL_BIT] = "SPI failure",
- [ADIS16220_DIAG_STAT_FLASH_UPT_BIT] = "Flash update failed",
- [ADIS16220_DIAG_STAT_POWER_HIGH_BIT] = "Power supply above 3.625V",
- [ADIS16220_DIAG_STAT_POWER_LOW_BIT] = "Power supply below 3.15V",
-};
-
-static const struct adis_data adis16220_data = {
- .read_delay = 35,
- .write_delay = 35,
- .msc_ctrl_reg = ADIS16220_MSC_CTRL,
- .glob_cmd_reg = ADIS16220_GLOB_CMD,
- .diag_stat_reg = ADIS16220_DIAG_STAT,
-
- .self_test_mask = ADIS16220_MSC_CTRL_SELF_TEST_EN,
- .startup_delay = ADIS16220_STARTUP_DELAY,
-
- .status_error_msgs = adis16220_status_error_msgs,
- .status_error_mask = BIT(ADIS16220_DIAG_STAT_VIOLATION_BIT) |
- BIT(ADIS16220_DIAG_STAT_SPI_FAIL_BIT) |
- BIT(ADIS16220_DIAG_STAT_FLASH_UPT_BIT) |
- BIT(ADIS16220_DIAG_STAT_POWER_HIGH_BIT) |
- BIT(ADIS16220_DIAG_STAT_POWER_LOW_BIT),
-};
-
-static int adis16220_probe(struct spi_device *spi)
-{
- int ret;
- struct adis16220_state *st;
- struct iio_dev *indio_dev;
-
- /* setup the industrialio driver allocated elements */
- indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
- if (!indio_dev)
- return -ENOMEM;
-
- st = iio_priv(indio_dev);
- /* this is only used for removal purposes */
- spi_set_drvdata(spi, indio_dev);
-
- indio_dev->name = spi->dev.driver->name;
- indio_dev->dev.parent = &spi->dev;
- indio_dev->info = &adis16220_info;
- indio_dev->modes = INDIO_DIRECT_MODE;
- indio_dev->channels = adis16220_channels;
- indio_dev->num_channels = ARRAY_SIZE(adis16220_channels);
-
- ret = devm_iio_device_register(&spi->dev, indio_dev);
- if (ret)
- return ret;
-
- ret = sysfs_create_bin_file(&indio_dev->dev.kobj, &accel_bin);
- if (ret)
- return ret;
-
- ret = sysfs_create_bin_file(&indio_dev->dev.kobj, &adc1_bin);
- if (ret)
- goto error_rm_accel_bin;
-
- ret = sysfs_create_bin_file(&indio_dev->dev.kobj, &adc2_bin);
- if (ret)
- goto error_rm_adc1_bin;
-
- ret = adis_init(&st->adis, indio_dev, spi, &adis16220_data);
- if (ret)
- goto error_rm_adc2_bin;
- /* Get the device into a sane initial state */
- ret = adis_initial_startup(&st->adis);
- if (ret)
- goto error_rm_adc2_bin;
- return 0;
-
-error_rm_adc2_bin:
- sysfs_remove_bin_file(&indio_dev->dev.kobj, &adc2_bin);
-error_rm_adc1_bin:
- sysfs_remove_bin_file(&indio_dev->dev.kobj, &adc1_bin);
-error_rm_accel_bin:
- sysfs_remove_bin_file(&indio_dev->dev.kobj, &accel_bin);
- return ret;
-}
-
-static int adis16220_remove(struct spi_device *spi)
-{
- struct iio_dev *indio_dev = spi_get_drvdata(spi);
-
- sysfs_remove_bin_file(&indio_dev->dev.kobj, &adc2_bin);
- sysfs_remove_bin_file(&indio_dev->dev.kobj, &adc1_bin);
- sysfs_remove_bin_file(&indio_dev->dev.kobj, &accel_bin);
-
- return 0;
-}
-
-static struct spi_driver adis16220_driver = {
- .driver = {
- .name = "adis16220",
- .owner = THIS_MODULE,
- },
- .probe = adis16220_probe,
- .remove = adis16220_remove,
-};
-module_spi_driver(adis16220_driver);
-
-MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>");
-MODULE_DESCRIPTION("Analog Devices ADIS16220 Digital Vibration Sensor");
-MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("spi:adis16220");
diff --git a/drivers/staging/iio/accel/adis16240.h b/drivers/staging/iio/accel/adis16240.h
deleted file mode 100644
index 66b5ad2f42c5..000000000000
--- a/drivers/staging/iio/accel/adis16240.h
+++ /dev/null
@@ -1,129 +0,0 @@
-#ifndef SPI_ADIS16240_H_
-#define SPI_ADIS16240_H_
-
-#define ADIS16240_STARTUP_DELAY 220 /* ms */
-
-/* Flash memory write count */
-#define ADIS16240_FLASH_CNT 0x00
-/* Output, power supply */
-#define ADIS16240_SUPPLY_OUT 0x02
-/* Output, x-axis accelerometer */
-#define ADIS16240_XACCL_OUT 0x04
-/* Output, y-axis accelerometer */
-#define ADIS16240_YACCL_OUT 0x06
-/* Output, z-axis accelerometer */
-#define ADIS16240_ZACCL_OUT 0x08
-/* Output, auxiliary ADC input */
-#define ADIS16240_AUX_ADC 0x0A
-/* Output, temperature */
-#define ADIS16240_TEMP_OUT 0x0C
-/* Output, x-axis acceleration peak */
-#define ADIS16240_XPEAK_OUT 0x0E
-/* Output, y-axis acceleration peak */
-#define ADIS16240_YPEAK_OUT 0x10
-/* Output, z-axis acceleration peak */
-#define ADIS16240_ZPEAK_OUT 0x12
-/* Output, sum-of-squares acceleration peak */
-#define ADIS16240_XYZPEAK_OUT 0x14
-/* Output, Capture Buffer 1, X and Y acceleration */
-#define ADIS16240_CAPT_BUF1 0x16
-/* Output, Capture Buffer 2, Z acceleration */
-#define ADIS16240_CAPT_BUF2 0x18
-/* Diagnostic, error flags */
-#define ADIS16240_DIAG_STAT 0x1A
-/* Diagnostic, event counter */
-#define ADIS16240_EVNT_CNTR 0x1C
-/* Diagnostic, check sum value from firmware test */
-#define ADIS16240_CHK_SUM 0x1E
-/* Calibration, x-axis acceleration offset adjustment */
-#define ADIS16240_XACCL_OFF 0x20
-/* Calibration, y-axis acceleration offset adjustment */
-#define ADIS16240_YACCL_OFF 0x22
-/* Calibration, z-axis acceleration offset adjustment */
-#define ADIS16240_ZACCL_OFF 0x24
-/* Clock, hour and minute */
-#define ADIS16240_CLK_TIME 0x2E
-/* Clock, month and day */
-#define ADIS16240_CLK_DATE 0x30
-/* Clock, year */
-#define ADIS16240_CLK_YEAR 0x32
-/* Wake-up setting, hour and minute */
-#define ADIS16240_WAKE_TIME 0x34
-/* Wake-up setting, month and day */
-#define ADIS16240_WAKE_DATE 0x36
-/* Alarm 1 amplitude threshold */
-#define ADIS16240_ALM_MAG1 0x38
-/* Alarm 2 amplitude threshold */
-#define ADIS16240_ALM_MAG2 0x3A
-/* Alarm control */
-#define ADIS16240_ALM_CTRL 0x3C
-/* Capture, external trigger control */
-#define ADIS16240_XTRIG_CTRL 0x3E
-/* Capture, address pointer */
-#define ADIS16240_CAPT_PNTR 0x40
-/* Capture, configuration and control */
-#define ADIS16240_CAPT_CTRL 0x42
-/* General-purpose digital input/output control */
-#define ADIS16240_GPIO_CTRL 0x44
-/* Miscellaneous control */
-#define ADIS16240_MSC_CTRL 0x46
-/* Internal sample period (rate) control */
-#define ADIS16240_SMPL_PRD 0x48
-/* System command */
-#define ADIS16240_GLOB_CMD 0x4A
-
-/* MSC_CTRL */
-/* Enables sum-of-squares output (XYZPEAK_OUT) */
-#define ADIS16240_MSC_CTRL_XYZPEAK_OUT_EN BIT(15)
-/* Enables peak tracking output (XPEAK_OUT, YPEAK_OUT, and ZPEAK_OUT) */
-#define ADIS16240_MSC_CTRL_X_Y_ZPEAK_OUT_EN BIT(14)
-/* Self-test enable: 1 = apply electrostatic force, 0 = disabled */
-#define ADIS16240_MSC_CTRL_SELF_TEST_EN BIT(8)
-/* Data-ready enable: 1 = enabled, 0 = disabled */
-#define ADIS16240_MSC_CTRL_DATA_RDY_EN BIT(2)
-/* Data-ready polarity: 1 = active high, 0 = active low */
-#define ADIS16240_MSC_CTRL_ACTIVE_HIGH BIT(1)
-/* Data-ready line selection: 1 = DIO2, 0 = DIO1 */
-#define ADIS16240_MSC_CTRL_DATA_RDY_DIO2 BIT(0)
-
-/* DIAG_STAT */
-/* Alarm 2 status: 1 = alarm active, 0 = alarm inactive */
-#define ADIS16240_DIAG_STAT_ALARM2 BIT(9)
-/* Alarm 1 status: 1 = alarm active, 0 = alarm inactive */
-#define ADIS16240_DIAG_STAT_ALARM1 BIT(8)
-/* Capture buffer full: 1 = capture buffer is full */
-#define ADIS16240_DIAG_STAT_CPT_BUF_FUL BIT(7)
-/* Flash test, checksum flag: 1 = mismatch, 0 = match */
-#define ADIS16240_DIAG_STAT_CHKSUM BIT(6)
-/* Power-on, self-test flag: 1 = failure, 0 = pass */
-#define ADIS16240_DIAG_STAT_PWRON_FAIL_BIT 5
-/* Power-on self-test: 1 = in-progress, 0 = complete */
-#define ADIS16240_DIAG_STAT_PWRON_BUSY BIT(4)
-/* SPI communications failure */
-#define ADIS16240_DIAG_STAT_SPI_FAIL_BIT 3
-/* Flash update failure */
-#define ADIS16240_DIAG_STAT_FLASH_UPT_BIT 2
-/* Power supply above 3.625 V */
-#define ADIS16240_DIAG_STAT_POWER_HIGH_BIT 1
- /* Power supply below 3.15 V */
-#define ADIS16240_DIAG_STAT_POWER_LOW_BIT 0
-
-/* GLOB_CMD */
-#define ADIS16240_GLOB_CMD_RESUME BIT(8)
-#define ADIS16240_GLOB_CMD_SW_RESET BIT(7)
-#define ADIS16240_GLOB_CMD_STANDBY BIT(2)
-
-#define ADIS16240_ERROR_ACTIVE BIT(14)
-
-/* At the moment triggers are only used for ring buffer
- * filling. This may change!
- */
-
-#define ADIS16240_SCAN_ACC_X 0
-#define ADIS16240_SCAN_ACC_Y 1
-#define ADIS16240_SCAN_ACC_Z 2
-#define ADIS16240_SCAN_SUPPLY 3
-#define ADIS16240_SCAN_AUX_ADC 4
-#define ADIS16240_SCAN_TEMP 5
-
-#endif /* SPI_ADIS16240_H_ */
diff --git a/drivers/staging/iio/accel/adis16240_core.c b/drivers/staging/iio/accel/adis16240_core.c
deleted file mode 100644
index cb074e864408..000000000000
--- a/drivers/staging/iio/accel/adis16240_core.c
+++ /dev/null
@@ -1,301 +0,0 @@
-/*
- * ADIS16240 Programmable Impact Sensor and Recorder driver
- *
- * Copyright 2010 Analog Devices Inc.
- *
- * Licensed under the GPL-2 or later.
- */
-
-#include <linux/interrupt.h>
-#include <linux/irq.h>
-#include <linux/gpio.h>
-#include <linux/delay.h>
-#include <linux/mutex.h>
-#include <linux/device.h>
-#include <linux/kernel.h>
-#include <linux/spi/spi.h>
-#include <linux/slab.h>
-#include <linux/sysfs.h>
-#include <linux/list.h>
-#include <linux/module.h>
-
-#include <linux/iio/iio.h>
-#include <linux/iio/sysfs.h>
-#include <linux/iio/buffer.h>
-#include <linux/iio/imu/adis.h>
-
-#include "adis16240.h"
-
-static ssize_t adis16240_spi_read_signed(struct device *dev,
- struct device_attribute *attr,
- char *buf,
- unsigned bits)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct adis *st = iio_priv(indio_dev);
- int ret;
- s16 val = 0;
- unsigned shift = 16 - bits;
- struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
-
- ret = adis_read_reg_16(st,
- this_attr->address, (u16 *)&val);
- if (ret)
- return ret;
-
- if (val & ADIS16240_ERROR_ACTIVE)
- adis_check_status(st);
-
- val = (s16)(val << shift) >> shift;
- return sprintf(buf, "%d\n", val);
-}
-
-static ssize_t adis16240_read_12bit_signed(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- ssize_t ret;
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
-
- /* Take the iio_dev status lock */
- mutex_lock(&indio_dev->mlock);
- ret = adis16240_spi_read_signed(dev, attr, buf, 12);
- mutex_unlock(&indio_dev->mlock);
-
- return ret;
-}
-
-static IIO_DEVICE_ATTR(in_accel_xyz_squared_peak_raw, S_IRUGO,
- adis16240_read_12bit_signed, NULL,
- ADIS16240_XYZPEAK_OUT);
-
-static IIO_CONST_ATTR_SAMP_FREQ_AVAIL("4096");
-
-static const u8 adis16240_addresses[][2] = {
- [ADIS16240_SCAN_ACC_X] = { ADIS16240_XACCL_OFF, ADIS16240_XPEAK_OUT },
- [ADIS16240_SCAN_ACC_Y] = { ADIS16240_YACCL_OFF, ADIS16240_YPEAK_OUT },
- [ADIS16240_SCAN_ACC_Z] = { ADIS16240_ZACCL_OFF, ADIS16240_ZPEAK_OUT },
-};
-
-static int adis16240_read_raw(struct iio_dev *indio_dev,
- struct iio_chan_spec const *chan,
- int *val, int *val2,
- long mask)
-{
- struct adis *st = iio_priv(indio_dev);
- int ret;
- int bits;
- u8 addr;
- s16 val16;
-
- switch (mask) {
- case IIO_CHAN_INFO_RAW:
- return adis_single_conversion(indio_dev, chan,
- ADIS16240_ERROR_ACTIVE, val);
- case IIO_CHAN_INFO_SCALE:
- switch (chan->type) {
- case IIO_VOLTAGE:
- if (chan->channel == 0) {
- *val = 4;
- *val2 = 880000; /* 4.88 mV */
- return IIO_VAL_INT_PLUS_MICRO;
- }
- return -EINVAL;
- case IIO_TEMP:
- *val = 244; /* 0.244 C */
- *val2 = 0;
- return IIO_VAL_INT_PLUS_MICRO;
- case IIO_ACCEL:
- *val = 0;
- *val2 = IIO_G_TO_M_S_2(51400); /* 51.4 mg */
- return IIO_VAL_INT_PLUS_MICRO;
- default:
- return -EINVAL;
- }
- break;
- case IIO_CHAN_INFO_PEAK_SCALE:
- *val = 0;
- *val2 = IIO_G_TO_M_S_2(51400); /* 51.4 mg */
- return IIO_VAL_INT_PLUS_MICRO;
- case IIO_CHAN_INFO_OFFSET:
- *val = 25000 / 244 - 0x133; /* 25 C = 0x133 */
- return IIO_VAL_INT;
- case IIO_CHAN_INFO_CALIBBIAS:
- bits = 10;
- mutex_lock(&indio_dev->mlock);
- addr = adis16240_addresses[chan->scan_index][0];
- ret = adis_read_reg_16(st, addr, &val16);
- if (ret) {
- mutex_unlock(&indio_dev->mlock);
- return ret;
- }
- val16 &= (1 << bits) - 1;
- val16 = (s16)(val16 << (16 - bits)) >> (16 - bits);
- *val = val16;
- mutex_unlock(&indio_dev->mlock);
- return IIO_VAL_INT;
- case IIO_CHAN_INFO_PEAK:
- bits = 10;
- mutex_lock(&indio_dev->mlock);
- addr = adis16240_addresses[chan->scan_index][1];
- ret = adis_read_reg_16(st, addr, &val16);
- if (ret) {
- mutex_unlock(&indio_dev->mlock);
- return ret;
- }
- val16 &= (1 << bits) - 1;
- val16 = (s16)(val16 << (16 - bits)) >> (16 - bits);
- *val = val16;
- mutex_unlock(&indio_dev->mlock);
- return IIO_VAL_INT;
- }
- return -EINVAL;
-}
-
-static int adis16240_write_raw(struct iio_dev *indio_dev,
- struct iio_chan_spec const *chan,
- int val,
- int val2,
- long mask)
-{
- struct adis *st = iio_priv(indio_dev);
- int bits = 10;
- s16 val16;
- u8 addr;
-
- switch (mask) {
- case IIO_CHAN_INFO_CALIBBIAS:
- val16 = val & ((1 << bits) - 1);
- addr = adis16240_addresses[chan->scan_index][0];
- return adis_write_reg_16(st, addr, val16);
- }
- return -EINVAL;
-}
-
-static const struct iio_chan_spec adis16240_channels[] = {
- ADIS_SUPPLY_CHAN(ADIS16240_SUPPLY_OUT, ADIS16240_SCAN_SUPPLY, 0, 10),
- ADIS_AUX_ADC_CHAN(ADIS16240_AUX_ADC, ADIS16240_SCAN_AUX_ADC, 0, 10),
- ADIS_ACCEL_CHAN(X, ADIS16240_XACCL_OUT, ADIS16240_SCAN_ACC_X,
- BIT(IIO_CHAN_INFO_CALIBBIAS) | BIT(IIO_CHAN_INFO_PEAK),
- 0, 10),
- ADIS_ACCEL_CHAN(Y, ADIS16240_YACCL_OUT, ADIS16240_SCAN_ACC_Y,
- BIT(IIO_CHAN_INFO_CALIBBIAS) | BIT(IIO_CHAN_INFO_PEAK),
- 0, 10),
- ADIS_ACCEL_CHAN(Z, ADIS16240_ZACCL_OUT, ADIS16240_SCAN_ACC_Z,
- BIT(IIO_CHAN_INFO_CALIBBIAS) | BIT(IIO_CHAN_INFO_PEAK),
- 0, 10),
- ADIS_TEMP_CHAN(ADIS16240_TEMP_OUT, ADIS16240_SCAN_TEMP, 0, 10),
- IIO_CHAN_SOFT_TIMESTAMP(6)
-};
-
-static struct attribute *adis16240_attributes[] = {
- &iio_dev_attr_in_accel_xyz_squared_peak_raw.dev_attr.attr,
- &iio_const_attr_sampling_frequency_available.dev_attr.attr,
- NULL
-};
-
-static const struct attribute_group adis16240_attribute_group = {
- .attrs = adis16240_attributes,
-};
-
-static const struct iio_info adis16240_info = {
- .attrs = &adis16240_attribute_group,
- .read_raw = &adis16240_read_raw,
- .write_raw = &adis16240_write_raw,
- .update_scan_mode = adis_update_scan_mode,
- .driver_module = THIS_MODULE,
-};
-
-static const char * const adis16240_status_error_msgs[] = {
- [ADIS16240_DIAG_STAT_PWRON_FAIL_BIT] = "Power on, self-test failed",
- [ADIS16240_DIAG_STAT_SPI_FAIL_BIT] = "SPI failure",
- [ADIS16240_DIAG_STAT_FLASH_UPT_BIT] = "Flash update failed",
- [ADIS16240_DIAG_STAT_POWER_HIGH_BIT] = "Power supply above 3.625V",
- [ADIS16240_DIAG_STAT_POWER_LOW_BIT] = "Power supply below 2.225V",
-};
-
-static const struct adis_data adis16240_data = {
- .write_delay = 35,
- .read_delay = 35,
- .msc_ctrl_reg = ADIS16240_MSC_CTRL,
- .glob_cmd_reg = ADIS16240_GLOB_CMD,
- .diag_stat_reg = ADIS16240_DIAG_STAT,
-
- .self_test_mask = ADIS16240_MSC_CTRL_SELF_TEST_EN,
- .startup_delay = ADIS16240_STARTUP_DELAY,
-
- .status_error_msgs = adis16240_status_error_msgs,
- .status_error_mask = BIT(ADIS16240_DIAG_STAT_PWRON_FAIL_BIT) |
- BIT(ADIS16240_DIAG_STAT_SPI_FAIL_BIT) |
- BIT(ADIS16240_DIAG_STAT_FLASH_UPT_BIT) |
- BIT(ADIS16240_DIAG_STAT_POWER_HIGH_BIT) |
- BIT(ADIS16240_DIAG_STAT_POWER_LOW_BIT),
-};
-
-static int adis16240_probe(struct spi_device *spi)
-{
- int ret;
- struct adis *st;
- struct iio_dev *indio_dev;
-
- /* setup the industrialio driver allocated elements */
- indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
- if (!indio_dev)
- return -ENOMEM;
- st = iio_priv(indio_dev);
- /* this is only used for removal purposes */
- spi_set_drvdata(spi, indio_dev);
-
- indio_dev->name = spi->dev.driver->name;
- indio_dev->dev.parent = &spi->dev;
- indio_dev->info = &adis16240_info;
- indio_dev->channels = adis16240_channels;
- indio_dev->num_channels = ARRAY_SIZE(adis16240_channels);
- indio_dev->modes = INDIO_DIRECT_MODE;
-
- ret = adis_init(st, indio_dev, spi, &adis16240_data);
- if (ret)
- return ret;
- ret = adis_setup_buffer_and_trigger(st, indio_dev, NULL);
- if (ret)
- return ret;
-
- /* Get the device into a sane initial state */
- ret = adis_initial_startup(st);
- if (ret)
- goto error_cleanup_buffer_trigger;
- ret = iio_device_register(indio_dev);
- if (ret)
- goto error_cleanup_buffer_trigger;
- return 0;
-
-error_cleanup_buffer_trigger:
- adis_cleanup_buffer_and_trigger(st, indio_dev);
- return ret;
-}
-
-static int adis16240_remove(struct spi_device *spi)
-{
- struct iio_dev *indio_dev = spi_get_drvdata(spi);
- struct adis *st = iio_priv(indio_dev);
-
- iio_device_unregister(indio_dev);
- adis_cleanup_buffer_and_trigger(st, indio_dev);
-
- return 0;
-}
-
-static struct spi_driver adis16240_driver = {
- .driver = {
- .name = "adis16240",
- .owner = THIS_MODULE,
- },
- .probe = adis16240_probe,
- .remove = adis16240_remove,
-};
-module_spi_driver(adis16240_driver);
-
-MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>");
-MODULE_DESCRIPTION("Analog Devices Programmable Impact Sensor and Recorder");
-MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("spi:adis16240");
diff --git a/drivers/staging/iio/accel/lis3l02dq.h b/drivers/staging/iio/accel/lis3l02dq.h
deleted file mode 100644
index 3f24c629be6f..000000000000
--- a/drivers/staging/iio/accel/lis3l02dq.h
+++ /dev/null
@@ -1,212 +0,0 @@
-/*
- * LISL02DQ.h -- support STMicroelectronics LISD02DQ
- * 3d 2g Linear Accelerometers via SPI
- *
- * Copyright (c) 2007 Jonathan Cameron <jic23@kernel.org>
- *
- * Loosely based upon tle62x0.c
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef SPI_LIS3L02DQ_H_
-#define SPI_LIS3L02DQ_H_
-#define LIS3L02DQ_READ_REG(a) ((a) | 0x80)
-#define LIS3L02DQ_WRITE_REG(a) a
-
-/* Calibration parameters */
-#define LIS3L02DQ_REG_OFFSET_X_ADDR 0x16
-#define LIS3L02DQ_REG_OFFSET_Y_ADDR 0x17
-#define LIS3L02DQ_REG_OFFSET_Z_ADDR 0x18
-
-#define LIS3L02DQ_REG_GAIN_X_ADDR 0x19
-#define LIS3L02DQ_REG_GAIN_Y_ADDR 0x1A
-#define LIS3L02DQ_REG_GAIN_Z_ADDR 0x1B
-
-/* Control Register (1 of 2) */
-#define LIS3L02DQ_REG_CTRL_1_ADDR 0x20
-/* Power ctrl - either bit set corresponds to on*/
-#define LIS3L02DQ_REG_CTRL_1_PD_ON 0xC0
-
-/* Decimation Factor */
-#define LIS3L02DQ_DEC_MASK 0x30
-#define LIS3L02DQ_REG_CTRL_1_DF_128 0x00
-#define LIS3L02DQ_REG_CTRL_1_DF_64 0x10
-#define LIS3L02DQ_REG_CTRL_1_DF_32 0x20
-#define LIS3L02DQ_REG_CTRL_1_DF_8 (0x10 | 0x20)
-
-/* Self Test Enable */
-#define LIS3L02DQ_REG_CTRL_1_SELF_TEST_ON 0x08
-
-/* Axes enable ctrls */
-#define LIS3L02DQ_REG_CTRL_1_AXES_Z_ENABLE 0x04
-#define LIS3L02DQ_REG_CTRL_1_AXES_Y_ENABLE 0x02
-#define LIS3L02DQ_REG_CTRL_1_AXES_X_ENABLE 0x01
-
-/* Control Register (2 of 2) */
-#define LIS3L02DQ_REG_CTRL_2_ADDR 0x21
-
-/* Block Data Update only after MSB and LSB read */
-#define LIS3L02DQ_REG_CTRL_2_BLOCK_UPDATE 0x40
-
-/* Set to big endian output */
-#define LIS3L02DQ_REG_CTRL_2_BIG_ENDIAN 0x20
-
-/* Reboot memory content */
-#define LIS3L02DQ_REG_CTRL_2_REBOOT_MEMORY 0x10
-
-/* Interrupt Enable - applies data ready to the RDY pad */
-#define LIS3L02DQ_REG_CTRL_2_ENABLE_INTERRUPT 0x08
-
-/* Enable Data Ready Generation - relationship with previous unclear in docs */
-#define LIS3L02DQ_REG_CTRL_2_ENABLE_DATA_READY_GENERATION 0x04
-
-/* SPI 3 wire mode */
-#define LIS3L02DQ_REG_CTRL_2_THREE_WIRE_SPI_MODE 0x02
-
-/* Data alignment, default is 12 bit right justified
- * - option for 16 bit left justified */
-#define LIS3L02DQ_REG_CTRL_2_DATA_ALIGNMENT_16_BIT_LEFT_JUSTIFIED 0x01
-
-/* Interrupt related stuff */
-#define LIS3L02DQ_REG_WAKE_UP_CFG_ADDR 0x23
-
-/* Switch from or combination of conditions to and */
-#define LIS3L02DQ_REG_WAKE_UP_CFG_BOOLEAN_AND 0x80
-
-/* Latch interrupt request,
- * if on ack must be given by reading the ack register */
-#define LIS3L02DQ_REG_WAKE_UP_CFG_LATCH_SRC 0x40
-
-/* Z Interrupt on High (above threshold) */
-#define LIS3L02DQ_REG_WAKE_UP_CFG_INTERRUPT_Z_HIGH 0x20
-/* Z Interrupt on Low */
-#define LIS3L02DQ_REG_WAKE_UP_CFG_INTERRUPT_Z_LOW 0x10
-/* Y Interrupt on High */
-#define LIS3L02DQ_REG_WAKE_UP_CFG_INTERRUPT_Y_HIGH 0x08
-/* Y Interrupt on Low */
-#define LIS3L02DQ_REG_WAKE_UP_CFG_INTERRUPT_Y_LOW 0x04
-/* X Interrupt on High */
-#define LIS3L02DQ_REG_WAKE_UP_CFG_INTERRUPT_X_HIGH 0x02
-/* X Interrupt on Low */
-#define LIS3L02DQ_REG_WAKE_UP_CFG_INTERRUPT_X_LOW 0x01
-
-/* Register that gives description of what caused interrupt
- * - latched if set in CFG_ADDRES */
-#define LIS3L02DQ_REG_WAKE_UP_SRC_ADDR 0x24
-/* top bit ignored */
-/* Interrupt Active */
-#define LIS3L02DQ_REG_WAKE_UP_SRC_INTERRUPT_ACTIVATED 0x40
-/* Interupts that have been triggered */
-#define LIS3L02DQ_REG_WAKE_UP_SRC_INTERRUPT_Z_HIGH 0x20
-#define LIS3L02DQ_REG_WAKE_UP_SRC_INTERRUPT_Z_LOW 0x10
-#define LIS3L02DQ_REG_WAKE_UP_SRC_INTERRUPT_Y_HIGH 0x08
-#define LIS3L02DQ_REG_WAKE_UP_SRC_INTERRUPT_Y_LOW 0x04
-#define LIS3L02DQ_REG_WAKE_UP_SRC_INTERRUPT_X_HIGH 0x02
-#define LIS3L02DQ_REG_WAKE_UP_SRC_INTERRUPT_X_LOW 0x01
-
-#define LIS3L02DQ_REG_WAKE_UP_ACK_ADDR 0x25
-
-/* Status register */
-#define LIS3L02DQ_REG_STATUS_ADDR 0x27
-/* XYZ axis data overrun - first is all overrun? */
-#define LIS3L02DQ_REG_STATUS_XYZ_OVERRUN 0x80
-#define LIS3L02DQ_REG_STATUS_Z_OVERRUN 0x40
-#define LIS3L02DQ_REG_STATUS_Y_OVERRUN 0x20
-#define LIS3L02DQ_REG_STATUS_X_OVERRUN 0x10
-/* XYZ new data available - first is all 3 available? */
-#define LIS3L02DQ_REG_STATUS_XYZ_NEW_DATA 0x08
-#define LIS3L02DQ_REG_STATUS_Z_NEW_DATA 0x04
-#define LIS3L02DQ_REG_STATUS_Y_NEW_DATA 0x02
-#define LIS3L02DQ_REG_STATUS_X_NEW_DATA 0x01
-
-/* The accelerometer readings - low and high bytes.
- * Form of high byte dependent on justification set in ctrl reg */
-#define LIS3L02DQ_REG_OUT_X_L_ADDR 0x28
-#define LIS3L02DQ_REG_OUT_X_H_ADDR 0x29
-#define LIS3L02DQ_REG_OUT_Y_L_ADDR 0x2A
-#define LIS3L02DQ_REG_OUT_Y_H_ADDR 0x2B
-#define LIS3L02DQ_REG_OUT_Z_L_ADDR 0x2C
-#define LIS3L02DQ_REG_OUT_Z_H_ADDR 0x2D
-
-/* Threshold values for all axes and both above and below thresholds
- * - i.e. there is only one value */
-#define LIS3L02DQ_REG_THS_L_ADDR 0x2E
-#define LIS3L02DQ_REG_THS_H_ADDR 0x2F
-
-#define LIS3L02DQ_DEFAULT_CTRL1 (LIS3L02DQ_REG_CTRL_1_PD_ON \
- | LIS3L02DQ_REG_CTRL_1_AXES_Z_ENABLE \
- | LIS3L02DQ_REG_CTRL_1_AXES_Y_ENABLE \
- | LIS3L02DQ_REG_CTRL_1_AXES_X_ENABLE \
- | LIS3L02DQ_REG_CTRL_1_DF_128)
-
-#define LIS3L02DQ_DEFAULT_CTRL2 0
-
-#define LIS3L02DQ_MAX_TX 12
-#define LIS3L02DQ_MAX_RX 12
-/**
- * struct lis3l02dq_state - device instance specific data
- * @us: actual spi_device
- * @trig: data ready trigger registered with iio
- * @buf_lock: mutex to protect tx and rx
- * @tx: transmit buffer
- * @rx: receive buffer
- **/
-struct lis3l02dq_state {
- struct spi_device *us;
- struct iio_trigger *trig;
- struct mutex buf_lock;
- int gpio;
- bool trigger_on;
-
- u8 tx[LIS3L02DQ_MAX_RX] ____cacheline_aligned;
- u8 rx[LIS3L02DQ_MAX_RX] ____cacheline_aligned;
-};
-
-int lis3l02dq_spi_read_reg_8(struct iio_dev *indio_dev,
- u8 reg_address,
- u8 *val);
-
-int lis3l02dq_spi_write_reg_8(struct iio_dev *indio_dev,
- u8 reg_address,
- u8 val);
-
-int lis3l02dq_disable_all_events(struct iio_dev *indio_dev);
-
-#ifdef CONFIG_IIO_BUFFER
-/* At the moment triggers are only used for buffer
- * filling. This may change!
- */
-void lis3l02dq_remove_trigger(struct iio_dev *indio_dev);
-int lis3l02dq_probe_trigger(struct iio_dev *indio_dev);
-
-int lis3l02dq_configure_buffer(struct iio_dev *indio_dev);
-void lis3l02dq_unconfigure_buffer(struct iio_dev *indio_dev);
-
-irqreturn_t lis3l02dq_data_rdy_trig_poll(int irq, void *private);
-#define lis3l02dq_th lis3l02dq_data_rdy_trig_poll
-
-#else /* CONFIG_IIO_BUFFER */
-#define lis3l02dq_th lis3l02dq_nobuffer
-
-static inline void lis3l02dq_remove_trigger(struct iio_dev *indio_dev)
-{
-}
-
-static inline int lis3l02dq_probe_trigger(struct iio_dev *indio_dev)
-{
- return 0;
-}
-
-static int lis3l02dq_configure_buffer(struct iio_dev *indio_dev)
-{
- return 0;
-}
-
-static inline void lis3l02dq_unconfigure_buffer(struct iio_dev *indio_dev)
-{
-}
-#endif /* CONFIG_IIO_BUFFER */
-#endif /* SPI_LIS3L02DQ_H_ */
diff --git a/drivers/staging/iio/accel/lis3l02dq_core.c b/drivers/staging/iio/accel/lis3l02dq_core.c
deleted file mode 100644
index ebcab56c81b9..000000000000
--- a/drivers/staging/iio/accel/lis3l02dq_core.c
+++ /dev/null
@@ -1,813 +0,0 @@
-/*
- * lis3l02dq.c support STMicroelectronics LISD02DQ
- * 3d 2g Linear Accelerometers via SPI
- *
- * Copyright (c) 2007 Jonathan Cameron <jic23@kernel.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * Settings:
- * 16 bit left justified mode used.
- */
-
-#include <linux/interrupt.h>
-#include <linux/irq.h>
-#include <linux/gpio.h>
-#include <linux/of_gpio.h>
-#include <linux/mutex.h>
-#include <linux/device.h>
-#include <linux/kernel.h>
-#include <linux/spi/spi.h>
-#include <linux/slab.h>
-#include <linux/sysfs.h>
-#include <linux/module.h>
-
-#include <linux/iio/iio.h>
-#include <linux/iio/sysfs.h>
-#include <linux/iio/events.h>
-#include <linux/iio/buffer.h>
-
-#include "lis3l02dq.h"
-
-/* At the moment the spi framework doesn't allow global setting of cs_change.
- * It's in the likely to be added comment at the top of spi.h.
- * This means that use cannot be made of spi_write etc.
- */
-/* direct copy of the irq_default_primary_handler */
-#ifndef CONFIG_IIO_BUFFER
-static irqreturn_t lis3l02dq_nobuffer(int irq, void *private)
-{
- return IRQ_WAKE_THREAD;
-}
-#endif
-
-/**
- * lis3l02dq_spi_read_reg_8() - read single byte from a single register
- * @indio_dev: iio_dev for this actual device
- * @reg_address: the address of the register to be read
- * @val: pass back the resulting value
- **/
-int lis3l02dq_spi_read_reg_8(struct iio_dev *indio_dev,
- u8 reg_address, u8 *val)
-{
- struct lis3l02dq_state *st = iio_priv(indio_dev);
- int ret;
- struct spi_transfer xfer = {
- .tx_buf = st->tx,
- .rx_buf = st->rx,
- .bits_per_word = 8,
- .len = 2,
- };
-
- mutex_lock(&st->buf_lock);
- st->tx[0] = LIS3L02DQ_READ_REG(reg_address);
- st->tx[1] = 0;
-
- ret = spi_sync_transfer(st->us, &xfer, 1);
- *val = st->rx[1];
- mutex_unlock(&st->buf_lock);
-
- return ret;
-}
-
-/**
- * lis3l02dq_spi_write_reg_8() - write single byte to a register
- * @indio_dev: iio_dev for this device
- * @reg_address: the address of the register to be written
- * @val: the value to write
- **/
-int lis3l02dq_spi_write_reg_8(struct iio_dev *indio_dev,
- u8 reg_address,
- u8 val)
-{
- int ret;
- struct lis3l02dq_state *st = iio_priv(indio_dev);
-
- mutex_lock(&st->buf_lock);
- st->tx[0] = LIS3L02DQ_WRITE_REG(reg_address);
- st->tx[1] = val;
- ret = spi_write(st->us, st->tx, 2);
- mutex_unlock(&st->buf_lock);
-
- return ret;
-}
-
-/**
- * lisl302dq_spi_write_reg_s16() - write 2 bytes to a pair of registers
- * @indio_dev: iio_dev for this device
- * @lower_reg_address: the address of the lower of the two registers.
- * Second register is assumed to have address one greater.
- * @value: value to be written
- **/
-static int lis3l02dq_spi_write_reg_s16(struct iio_dev *indio_dev,
- u8 lower_reg_address,
- s16 value)
-{
- int ret;
- struct lis3l02dq_state *st = iio_priv(indio_dev);
- struct spi_transfer xfers[] = { {
- .tx_buf = st->tx,
- .bits_per_word = 8,
- .len = 2,
- .cs_change = 1,
- }, {
- .tx_buf = st->tx + 2,
- .bits_per_word = 8,
- .len = 2,
- },
- };
-
- mutex_lock(&st->buf_lock);
- st->tx[0] = LIS3L02DQ_WRITE_REG(lower_reg_address);
- st->tx[1] = value & 0xFF;
- st->tx[2] = LIS3L02DQ_WRITE_REG(lower_reg_address + 1);
- st->tx[3] = (value >> 8) & 0xFF;
-
- ret = spi_sync_transfer(st->us, xfers, ARRAY_SIZE(xfers));
- mutex_unlock(&st->buf_lock);
-
- return ret;
-}
-
-static int lis3l02dq_read_reg_s16(struct iio_dev *indio_dev,
- u8 lower_reg_address,
- int *val)
-{
- struct lis3l02dq_state *st = iio_priv(indio_dev);
- int ret;
- s16 tempval;
- struct spi_transfer xfers[] = { {
- .tx_buf = st->tx,
- .rx_buf = st->rx,
- .bits_per_word = 8,
- .len = 2,
- .cs_change = 1,
- }, {
- .tx_buf = st->tx + 2,
- .rx_buf = st->rx + 2,
- .bits_per_word = 8,
- .len = 2,
- },
- };
-
- mutex_lock(&st->buf_lock);
- st->tx[0] = LIS3L02DQ_READ_REG(lower_reg_address);
- st->tx[1] = 0;
- st->tx[2] = LIS3L02DQ_READ_REG(lower_reg_address + 1);
- st->tx[3] = 0;
-
- ret = spi_sync_transfer(st->us, xfers, ARRAY_SIZE(xfers));
- if (ret) {
- dev_err(&st->us->dev, "problem when reading 16 bit register");
- goto error_ret;
- }
- tempval = (s16)(st->rx[1]) | ((s16)(st->rx[3]) << 8);
-
- *val = tempval;
-error_ret:
- mutex_unlock(&st->buf_lock);
- return ret;
-}
-
-enum lis3l02dq_rm_ind {
- LIS3L02DQ_ACCEL,
- LIS3L02DQ_GAIN,
- LIS3L02DQ_BIAS,
-};
-
-static u8 lis3l02dq_axis_map[3][3] = {
- [LIS3L02DQ_ACCEL] = { LIS3L02DQ_REG_OUT_X_L_ADDR,
- LIS3L02DQ_REG_OUT_Y_L_ADDR,
- LIS3L02DQ_REG_OUT_Z_L_ADDR },
- [LIS3L02DQ_GAIN] = { LIS3L02DQ_REG_GAIN_X_ADDR,
- LIS3L02DQ_REG_GAIN_Y_ADDR,
- LIS3L02DQ_REG_GAIN_Z_ADDR },
- [LIS3L02DQ_BIAS] = { LIS3L02DQ_REG_OFFSET_X_ADDR,
- LIS3L02DQ_REG_OFFSET_Y_ADDR,
- LIS3L02DQ_REG_OFFSET_Z_ADDR }
-};
-
-static int lis3l02dq_read_thresh(struct iio_dev *indio_dev,
- const struct iio_chan_spec *chan,
- enum iio_event_type type,
- enum iio_event_direction dir,
- enum iio_event_info info,
- int *val, int *val2)
-{
- int ret;
-
- ret = lis3l02dq_read_reg_s16(indio_dev, LIS3L02DQ_REG_THS_L_ADDR, val);
- if (ret)
- return ret;
- return IIO_VAL_INT;
-}
-
-static int lis3l02dq_write_thresh(struct iio_dev *indio_dev,
- const struct iio_chan_spec *chan,
- enum iio_event_type type,
- enum iio_event_direction dir,
- enum iio_event_info info,
- int val, int val2)
-{
- u16 value = val;
-
- return lis3l02dq_spi_write_reg_s16(indio_dev,
- LIS3L02DQ_REG_THS_L_ADDR,
- value);
-}
-
-static int lis3l02dq_write_raw(struct iio_dev *indio_dev,
- struct iio_chan_spec const *chan,
- int val,
- int val2,
- long mask)
-{
- int ret = -EINVAL, reg;
- u8 uval;
- s8 sval;
-
- switch (mask) {
- case IIO_CHAN_INFO_CALIBBIAS:
- if (val > 255 || val < -256)
- return -EINVAL;
- sval = val;
- reg = lis3l02dq_axis_map[LIS3L02DQ_BIAS][chan->address];
- ret = lis3l02dq_spi_write_reg_8(indio_dev, reg, sval);
- break;
- case IIO_CHAN_INFO_CALIBSCALE:
- if (val & ~0xFF)
- return -EINVAL;
- uval = val;
- reg = lis3l02dq_axis_map[LIS3L02DQ_GAIN][chan->address];
- ret = lis3l02dq_spi_write_reg_8(indio_dev, reg, uval);
- break;
- }
- return ret;
-}
-
-static int lis3l02dq_read_raw(struct iio_dev *indio_dev,
- struct iio_chan_spec const *chan,
- int *val,
- int *val2,
- long mask)
-{
- u8 utemp;
- s8 stemp;
- ssize_t ret = 0;
- u8 reg;
-
- switch (mask) {
- case IIO_CHAN_INFO_RAW:
- /* Take the iio_dev status lock */
- mutex_lock(&indio_dev->mlock);
- if (indio_dev->currentmode == INDIO_BUFFER_TRIGGERED) {
- ret = -EBUSY;
- } else {
- reg = lis3l02dq_axis_map
- [LIS3L02DQ_ACCEL][chan->address];
- ret = lis3l02dq_read_reg_s16(indio_dev, reg, val);
- }
- mutex_unlock(&indio_dev->mlock);
- if (ret < 0)
- goto error_ret;
- return IIO_VAL_INT;
- case IIO_CHAN_INFO_SCALE:
- *val = 0;
- *val2 = 9580;
- return IIO_VAL_INT_PLUS_MICRO;
- case IIO_CHAN_INFO_CALIBSCALE:
- reg = lis3l02dq_axis_map[LIS3L02DQ_GAIN][chan->address];
- ret = lis3l02dq_spi_read_reg_8(indio_dev, reg, &utemp);
- if (ret)
- goto error_ret;
- /* to match with what previous code does */
- *val = utemp;
- return IIO_VAL_INT;
-
- case IIO_CHAN_INFO_CALIBBIAS:
- reg = lis3l02dq_axis_map[LIS3L02DQ_BIAS][chan->address];
- ret = lis3l02dq_spi_read_reg_8(indio_dev, reg, (u8 *)&stemp);
- /* to match with what previous code does */
- *val = stemp;
- return IIO_VAL_INT;
- }
-error_ret:
- return ret;
-}
-
-static ssize_t lis3l02dq_read_frequency(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- int ret, len = 0;
- s8 t;
-
- ret = lis3l02dq_spi_read_reg_8(indio_dev,
- LIS3L02DQ_REG_CTRL_1_ADDR,
- (u8 *)&t);
- if (ret)
- return ret;
- t &= LIS3L02DQ_DEC_MASK;
- switch (t) {
- case LIS3L02DQ_REG_CTRL_1_DF_128:
- len = sprintf(buf, "280\n");
- break;
- case LIS3L02DQ_REG_CTRL_1_DF_64:
- len = sprintf(buf, "560\n");
- break;
- case LIS3L02DQ_REG_CTRL_1_DF_32:
- len = sprintf(buf, "1120\n");
- break;
- case LIS3L02DQ_REG_CTRL_1_DF_8:
- len = sprintf(buf, "4480\n");
- break;
- }
- return len;
-}
-
-static ssize_t lis3l02dq_write_frequency(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t len)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- unsigned long val;
- int ret;
- u8 t;
-
- ret = kstrtoul(buf, 10, &val);
- if (ret)
- return ret;
-
- mutex_lock(&indio_dev->mlock);
- ret = lis3l02dq_spi_read_reg_8(indio_dev,
- LIS3L02DQ_REG_CTRL_1_ADDR,
- &t);
- if (ret)
- goto error_ret_mutex;
- /* Wipe the bits clean */
- t &= ~LIS3L02DQ_DEC_MASK;
- switch (val) {
- case 280:
- t |= LIS3L02DQ_REG_CTRL_1_DF_128;
- break;
- case 560:
- t |= LIS3L02DQ_REG_CTRL_1_DF_64;
- break;
- case 1120:
- t |= LIS3L02DQ_REG_CTRL_1_DF_32;
- break;
- case 4480:
- t |= LIS3L02DQ_REG_CTRL_1_DF_8;
- break;
- default:
- ret = -EINVAL;
- goto error_ret_mutex;
- }
-
- ret = lis3l02dq_spi_write_reg_8(indio_dev,
- LIS3L02DQ_REG_CTRL_1_ADDR,
- t);
-
-error_ret_mutex:
- mutex_unlock(&indio_dev->mlock);
-
- return ret ? ret : len;
-}
-
-static int lis3l02dq_initial_setup(struct iio_dev *indio_dev)
-{
- struct lis3l02dq_state *st = iio_priv(indio_dev);
- int ret;
- u8 val, valtest;
-
- st->us->mode = SPI_MODE_3;
-
- spi_setup(st->us);
-
- val = LIS3L02DQ_DEFAULT_CTRL1;
- /* Write suitable defaults to ctrl1 */
- ret = lis3l02dq_spi_write_reg_8(indio_dev,
- LIS3L02DQ_REG_CTRL_1_ADDR,
- val);
- if (ret) {
- dev_err(&st->us->dev, "problem with setup control register 1");
- goto err_ret;
- }
- /* Repeat as sometimes doesn't work first time? */
- ret = lis3l02dq_spi_write_reg_8(indio_dev,
- LIS3L02DQ_REG_CTRL_1_ADDR,
- val);
- if (ret) {
- dev_err(&st->us->dev, "problem with setup control register 1");
- goto err_ret;
- }
-
- /* Read back to check this has worked acts as loose test of correct
- * chip */
- ret = lis3l02dq_spi_read_reg_8(indio_dev,
- LIS3L02DQ_REG_CTRL_1_ADDR,
- &valtest);
- if (ret || (valtest != val)) {
- dev_err(&indio_dev->dev,
- "device not playing ball %d %d\n", valtest, val);
- ret = -EINVAL;
- goto err_ret;
- }
-
- val = LIS3L02DQ_DEFAULT_CTRL2;
- ret = lis3l02dq_spi_write_reg_8(indio_dev,
- LIS3L02DQ_REG_CTRL_2_ADDR,
- val);
- if (ret) {
- dev_err(&st->us->dev, "problem with setup control register 2");
- goto err_ret;
- }
-
- val = LIS3L02DQ_REG_WAKE_UP_CFG_LATCH_SRC;
- ret = lis3l02dq_spi_write_reg_8(indio_dev,
- LIS3L02DQ_REG_WAKE_UP_CFG_ADDR,
- val);
- if (ret)
- dev_err(&st->us->dev, "problem with interrupt cfg register");
-err_ret:
-
- return ret;
-}
-
-static IIO_DEV_ATTR_SAMP_FREQ(S_IWUSR | S_IRUGO,
- lis3l02dq_read_frequency,
- lis3l02dq_write_frequency);
-
-static IIO_CONST_ATTR_SAMP_FREQ_AVAIL("280 560 1120 4480");
-
-static irqreturn_t lis3l02dq_event_handler(int irq, void *private)
-{
- struct iio_dev *indio_dev = private;
- u8 t;
-
- s64 timestamp = iio_get_time_ns();
-
- lis3l02dq_spi_read_reg_8(indio_dev,
- LIS3L02DQ_REG_WAKE_UP_SRC_ADDR,
- &t);
-
- if (t & LIS3L02DQ_REG_WAKE_UP_SRC_INTERRUPT_Z_HIGH)
- iio_push_event(indio_dev,
- IIO_MOD_EVENT_CODE(IIO_ACCEL,
- 0,
- IIO_MOD_Z,
- IIO_EV_TYPE_THRESH,
- IIO_EV_DIR_RISING),
- timestamp);
-
- if (t & LIS3L02DQ_REG_WAKE_UP_SRC_INTERRUPT_Z_LOW)
- iio_push_event(indio_dev,
- IIO_MOD_EVENT_CODE(IIO_ACCEL,
- 0,
- IIO_MOD_Z,
- IIO_EV_TYPE_THRESH,
- IIO_EV_DIR_FALLING),
- timestamp);
-
- if (t & LIS3L02DQ_REG_WAKE_UP_SRC_INTERRUPT_Y_HIGH)
- iio_push_event(indio_dev,
- IIO_MOD_EVENT_CODE(IIO_ACCEL,
- 0,
- IIO_MOD_Y,
- IIO_EV_TYPE_THRESH,
- IIO_EV_DIR_RISING),
- timestamp);
-
- if (t & LIS3L02DQ_REG_WAKE_UP_SRC_INTERRUPT_Y_LOW)
- iio_push_event(indio_dev,
- IIO_MOD_EVENT_CODE(IIO_ACCEL,
- 0,
- IIO_MOD_Y,
- IIO_EV_TYPE_THRESH,
- IIO_EV_DIR_FALLING),
- timestamp);
-
- if (t & LIS3L02DQ_REG_WAKE_UP_SRC_INTERRUPT_X_HIGH)
- iio_push_event(indio_dev,
- IIO_MOD_EVENT_CODE(IIO_ACCEL,
- 0,
- IIO_MOD_X,
- IIO_EV_TYPE_THRESH,
- IIO_EV_DIR_RISING),
- timestamp);
-
- if (t & LIS3L02DQ_REG_WAKE_UP_SRC_INTERRUPT_X_LOW)
- iio_push_event(indio_dev,
- IIO_MOD_EVENT_CODE(IIO_ACCEL,
- 0,
- IIO_MOD_X,
- IIO_EV_TYPE_THRESH,
- IIO_EV_DIR_FALLING),
- timestamp);
-
- /* Ack and allow for new interrupts */
- lis3l02dq_spi_read_reg_8(indio_dev,
- LIS3L02DQ_REG_WAKE_UP_ACK_ADDR,
- &t);
-
- return IRQ_HANDLED;
-}
-
-static const struct iio_event_spec lis3l02dq_event[] = {
- {
- .type = IIO_EV_TYPE_THRESH,
- .dir = IIO_EV_DIR_RISING,
- .mask_separate = BIT(IIO_EV_INFO_ENABLE),
- .mask_shared_by_type = BIT(IIO_EV_INFO_VALUE),
- }, {
- .type = IIO_EV_TYPE_THRESH,
- .dir = IIO_EV_DIR_FALLING,
- .mask_separate = BIT(IIO_EV_INFO_ENABLE),
- .mask_shared_by_type = BIT(IIO_EV_INFO_VALUE),
- }
-};
-
-#define LIS3L02DQ_CHAN(index, mod) \
- { \
- .type = IIO_ACCEL, \
- .modified = 1, \
- .channel2 = mod, \
- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
- BIT(IIO_CHAN_INFO_CALIBSCALE) | \
- BIT(IIO_CHAN_INFO_CALIBBIAS), \
- .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
- .address = index, \
- .scan_index = index, \
- .scan_type = { \
- .sign = 's', \
- .realbits = 12, \
- .storagebits = 16, \
- }, \
- .event_spec = lis3l02dq_event, \
- .num_event_specs = ARRAY_SIZE(lis3l02dq_event), \
- }
-
-static const struct iio_chan_spec lis3l02dq_channels[] = {
- LIS3L02DQ_CHAN(0, IIO_MOD_X),
- LIS3L02DQ_CHAN(1, IIO_MOD_Y),
- LIS3L02DQ_CHAN(2, IIO_MOD_Z),
- IIO_CHAN_SOFT_TIMESTAMP(3)
-};
-
-static int lis3l02dq_read_event_config(struct iio_dev *indio_dev,
- const struct iio_chan_spec *chan,
- enum iio_event_type type,
- enum iio_event_direction dir)
-{
- u8 val;
- int ret;
- u8 mask = (1 << (chan->channel2*2 + (dir == IIO_EV_DIR_RISING)));
-
- ret = lis3l02dq_spi_read_reg_8(indio_dev,
- LIS3L02DQ_REG_WAKE_UP_CFG_ADDR,
- &val);
- if (ret < 0)
- return ret;
-
- return !!(val & mask);
-}
-
-int lis3l02dq_disable_all_events(struct iio_dev *indio_dev)
-{
- int ret;
- u8 control, val;
-
- ret = lis3l02dq_spi_read_reg_8(indio_dev,
- LIS3L02DQ_REG_CTRL_2_ADDR,
- &control);
-
- control &= ~LIS3L02DQ_REG_CTRL_2_ENABLE_INTERRUPT;
- ret = lis3l02dq_spi_write_reg_8(indio_dev,
- LIS3L02DQ_REG_CTRL_2_ADDR,
- control);
- if (ret)
- goto error_ret;
- /* Also for consistency clear the mask */
- ret = lis3l02dq_spi_read_reg_8(indio_dev,
- LIS3L02DQ_REG_WAKE_UP_CFG_ADDR,
- &val);
- if (ret)
- goto error_ret;
- val &= ~0x3f;
-
- ret = lis3l02dq_spi_write_reg_8(indio_dev,
- LIS3L02DQ_REG_WAKE_UP_CFG_ADDR,
- val);
- if (ret)
- goto error_ret;
-
- ret = control;
-error_ret:
- return ret;
-}
-
-static int lis3l02dq_write_event_config(struct iio_dev *indio_dev,
- const struct iio_chan_spec *chan,
- enum iio_event_type type,
- enum iio_event_direction dir,
- int state)
-{
- int ret = 0;
- u8 val, control;
- u8 currentlyset;
- bool changed = false;
- u8 mask = (1 << (chan->channel2*2 + (dir == IIO_EV_DIR_RISING)));
-
- mutex_lock(&indio_dev->mlock);
- /* read current control */
- ret = lis3l02dq_spi_read_reg_8(indio_dev,
- LIS3L02DQ_REG_CTRL_2_ADDR,
- &control);
- if (ret)
- goto error_ret;
- ret = lis3l02dq_spi_read_reg_8(indio_dev,
- LIS3L02DQ_REG_WAKE_UP_CFG_ADDR,
- &val);
- if (ret < 0)
- goto error_ret;
- currentlyset = val & mask;
-
- if (!currentlyset && state) {
- changed = true;
- val |= mask;
- } else if (currentlyset && !state) {
- changed = true;
- val &= ~mask;
- }
-
- if (changed) {
- ret = lis3l02dq_spi_write_reg_8(indio_dev,
- LIS3L02DQ_REG_WAKE_UP_CFG_ADDR,
- val);
- if (ret)
- goto error_ret;
- control = val & 0x3f ?
- (control | LIS3L02DQ_REG_CTRL_2_ENABLE_INTERRUPT) :
- (control & ~LIS3L02DQ_REG_CTRL_2_ENABLE_INTERRUPT);
- ret = lis3l02dq_spi_write_reg_8(indio_dev,
- LIS3L02DQ_REG_CTRL_2_ADDR,
- control);
- if (ret)
- goto error_ret;
- }
-
-error_ret:
- mutex_unlock(&indio_dev->mlock);
- return ret;
-}
-
-static struct attribute *lis3l02dq_attributes[] = {
- &iio_dev_attr_sampling_frequency.dev_attr.attr,
- &iio_const_attr_sampling_frequency_available.dev_attr.attr,
- NULL
-};
-
-static const struct attribute_group lis3l02dq_attribute_group = {
- .attrs = lis3l02dq_attributes,
-};
-
-static const struct iio_info lis3l02dq_info = {
- .read_raw = &lis3l02dq_read_raw,
- .write_raw = &lis3l02dq_write_raw,
- .read_event_value = &lis3l02dq_read_thresh,
- .write_event_value = &lis3l02dq_write_thresh,
- .write_event_config = &lis3l02dq_write_event_config,
- .read_event_config = &lis3l02dq_read_event_config,
- .driver_module = THIS_MODULE,
- .attrs = &lis3l02dq_attribute_group,
-};
-
-static int lis3l02dq_probe(struct spi_device *spi)
-{
- int ret;
- struct lis3l02dq_state *st;
- struct iio_dev *indio_dev;
-
- indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
- if (!indio_dev)
- return -ENOMEM;
- st = iio_priv(indio_dev);
- /* this is only used for removal purposes */
- spi_set_drvdata(spi, indio_dev);
-
- st->us = spi;
- st->gpio = of_get_gpio(spi->dev.of_node, 0);
- mutex_init(&st->buf_lock);
- indio_dev->name = spi->dev.driver->name;
- indio_dev->dev.parent = &spi->dev;
- indio_dev->info = &lis3l02dq_info;
- indio_dev->channels = lis3l02dq_channels;
- indio_dev->num_channels = ARRAY_SIZE(lis3l02dq_channels);
-
- indio_dev->modes = INDIO_DIRECT_MODE;
-
- ret = lis3l02dq_configure_buffer(indio_dev);
- if (ret)
- return ret;
-
- if (spi->irq) {
- ret = request_threaded_irq(st->us->irq,
- &lis3l02dq_th,
- &lis3l02dq_event_handler,
- IRQF_TRIGGER_RISING,
- "lis3l02dq",
- indio_dev);
- if (ret)
- goto error_unreg_buffer_funcs;
-
- ret = lis3l02dq_probe_trigger(indio_dev);
- if (ret)
- goto error_free_interrupt;
- }
-
- /* Get the device into a sane initial state */
- ret = lis3l02dq_initial_setup(indio_dev);
- if (ret)
- goto error_remove_trigger;
-
- ret = iio_device_register(indio_dev);
- if (ret)
- goto error_remove_trigger;
-
- return 0;
-
-error_remove_trigger:
- if (spi->irq)
- lis3l02dq_remove_trigger(indio_dev);
-error_free_interrupt:
- if (spi->irq)
- free_irq(st->us->irq, indio_dev);
-error_unreg_buffer_funcs:
- lis3l02dq_unconfigure_buffer(indio_dev);
- return ret;
-}
-
-/* Power down the device */
-static int lis3l02dq_stop_device(struct iio_dev *indio_dev)
-{
- int ret;
- struct lis3l02dq_state *st = iio_priv(indio_dev);
- u8 val = 0;
-
- mutex_lock(&indio_dev->mlock);
- ret = lis3l02dq_spi_write_reg_8(indio_dev,
- LIS3L02DQ_REG_CTRL_1_ADDR,
- val);
- if (ret) {
- dev_err(&st->us->dev, "problem with turning device off: ctrl1");
- goto err_ret;
- }
-
- ret = lis3l02dq_spi_write_reg_8(indio_dev,
- LIS3L02DQ_REG_CTRL_2_ADDR,
- val);
- if (ret)
- dev_err(&st->us->dev, "problem with turning device off: ctrl2");
-err_ret:
- mutex_unlock(&indio_dev->mlock);
- return ret;
-}
-
-/* fixme, confirm ordering in this function */
-static int lis3l02dq_remove(struct spi_device *spi)
-{
- struct iio_dev *indio_dev = spi_get_drvdata(spi);
- struct lis3l02dq_state *st = iio_priv(indio_dev);
-
- iio_device_unregister(indio_dev);
-
- lis3l02dq_disable_all_events(indio_dev);
- lis3l02dq_stop_device(indio_dev);
-
- if (spi->irq)
- free_irq(st->us->irq, indio_dev);
-
- lis3l02dq_remove_trigger(indio_dev);
- lis3l02dq_unconfigure_buffer(indio_dev);
-
- return 0;
-}
-
-static struct spi_driver lis3l02dq_driver = {
- .driver = {
- .name = "lis3l02dq",
- .owner = THIS_MODULE,
- },
- .probe = lis3l02dq_probe,
- .remove = lis3l02dq_remove,
-};
-module_spi_driver(lis3l02dq_driver);
-
-MODULE_AUTHOR("Jonathan Cameron <jic23@kernel.org>");
-MODULE_DESCRIPTION("ST LIS3L02DQ Accelerometer SPI driver");
-MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("spi:lis3l02dq");
diff --git a/drivers/staging/iio/accel/lis3l02dq_ring.c b/drivers/staging/iio/accel/lis3l02dq_ring.c
deleted file mode 100644
index b892f2cf5f9e..000000000000
--- a/drivers/staging/iio/accel/lis3l02dq_ring.c
+++ /dev/null
@@ -1,426 +0,0 @@
-#include <linux/interrupt.h>
-#include <linux/gpio.h>
-#include <linux/mutex.h>
-#include <linux/kernel.h>
-#include <linux/spi/spi.h>
-#include <linux/slab.h>
-#include <linux/export.h>
-
-#include <linux/iio/iio.h>
-#include <linux/iio/kfifo_buf.h>
-#include <linux/iio/trigger.h>
-#include <linux/iio/trigger_consumer.h>
-#include "lis3l02dq.h"
-
-/**
- * combine_8_to_16() utility function to munge two u8s into u16
- **/
-static inline u16 combine_8_to_16(u8 lower, u8 upper)
-{
- u16 _lower = lower;
- u16 _upper = upper;
-
- return _lower | (_upper << 8);
-}
-
-/**
- * lis3l02dq_data_rdy_trig_poll() the event handler for the data rdy trig
- **/
-irqreturn_t lis3l02dq_data_rdy_trig_poll(int irq, void *private)
-{
- struct iio_dev *indio_dev = private;
- struct lis3l02dq_state *st = iio_priv(indio_dev);
-
- if (st->trigger_on) {
- iio_trigger_poll(st->trig);
- return IRQ_HANDLED;
- }
-
- return IRQ_WAKE_THREAD;
-}
-
-static const u8 read_all_tx_array[] = {
- LIS3L02DQ_READ_REG(LIS3L02DQ_REG_OUT_X_L_ADDR), 0,
- LIS3L02DQ_READ_REG(LIS3L02DQ_REG_OUT_X_H_ADDR), 0,
- LIS3L02DQ_READ_REG(LIS3L02DQ_REG_OUT_Y_L_ADDR), 0,
- LIS3L02DQ_READ_REG(LIS3L02DQ_REG_OUT_Y_H_ADDR), 0,
- LIS3L02DQ_READ_REG(LIS3L02DQ_REG_OUT_Z_L_ADDR), 0,
- LIS3L02DQ_READ_REG(LIS3L02DQ_REG_OUT_Z_H_ADDR), 0,
-};
-
-/**
- * lis3l02dq_read_all() Reads all channels currently selected
- * @indio_dev: IIO device state
- * @rx_array: (dma capable) receive array, must be at least
- * 4*number of channels
- **/
-static int lis3l02dq_read_all(struct iio_dev *indio_dev, u8 *rx_array)
-{
- struct lis3l02dq_state *st = iio_priv(indio_dev);
- struct spi_transfer *xfers;
- struct spi_message msg;
- int ret, i, j = 0;
-
- xfers = kcalloc(bitmap_weight(indio_dev->active_scan_mask,
- indio_dev->masklength) * 2,
- sizeof(*xfers), GFP_KERNEL);
- if (!xfers)
- return -ENOMEM;
-
- mutex_lock(&st->buf_lock);
-
- for (i = 0; i < ARRAY_SIZE(read_all_tx_array)/4; i++)
- if (test_bit(i, indio_dev->active_scan_mask)) {
- /* lower byte */
- xfers[j].tx_buf = st->tx + 2*j;
- st->tx[2*j] = read_all_tx_array[i*4];
- st->tx[2*j + 1] = 0;
- if (rx_array)
- xfers[j].rx_buf = rx_array + j*2;
- xfers[j].bits_per_word = 8;
- xfers[j].len = 2;
- xfers[j].cs_change = 1;
- j++;
-
- /* upper byte */
- xfers[j].tx_buf = st->tx + 2*j;
- st->tx[2*j] = read_all_tx_array[i*4 + 2];
- st->tx[2*j + 1] = 0;
- if (rx_array)
- xfers[j].rx_buf = rx_array + j*2;
- xfers[j].bits_per_word = 8;
- xfers[j].len = 2;
- xfers[j].cs_change = 1;
- j++;
- }
-
- /* After these are transmitted, the rx_buff should have
- * values in alternate bytes
- */
- spi_message_init(&msg);
- for (j = 0; j < bitmap_weight(indio_dev->active_scan_mask,
- indio_dev->masklength) * 2; j++)
- spi_message_add_tail(&xfers[j], &msg);
-
- ret = spi_sync(st->us, &msg);
- mutex_unlock(&st->buf_lock);
- kfree(xfers);
-
- return ret;
-}
-
-static int lis3l02dq_get_buffer_element(struct iio_dev *indio_dev,
- u8 *buf)
-{
- int ret, i;
- u8 *rx_array;
- s16 *data = (s16 *)buf;
- int scan_count = bitmap_weight(indio_dev->active_scan_mask,
- indio_dev->masklength);
-
- rx_array = kcalloc(4, scan_count, GFP_KERNEL);
- if (!rx_array)
- return -ENOMEM;
- ret = lis3l02dq_read_all(indio_dev, rx_array);
- if (ret < 0) {
- kfree(rx_array);
- return ret;
- }
- for (i = 0; i < scan_count; i++)
- data[i] = combine_8_to_16(rx_array[i*4+1],
- rx_array[i*4+3]);
- kfree(rx_array);
-
- return i*sizeof(data[0]);
-}
-
-static irqreturn_t lis3l02dq_trigger_handler(int irq, void *p)
-{
- struct iio_poll_func *pf = p;
- struct iio_dev *indio_dev = pf->indio_dev;
- int len = 0;
- char *data;
-
- data = kmalloc(indio_dev->scan_bytes, GFP_KERNEL);
- if (!data)
- goto done;
-
- if (!bitmap_empty(indio_dev->active_scan_mask, indio_dev->masklength))
- len = lis3l02dq_get_buffer_element(indio_dev, data);
-
- iio_push_to_buffers_with_timestamp(indio_dev, data, pf->timestamp);
-
- kfree(data);
-done:
- iio_trigger_notify_done(indio_dev->trig);
- return IRQ_HANDLED;
-}
-
-/* Caller responsible for locking as necessary. */
-static int
-__lis3l02dq_write_data_ready_config(struct iio_dev *indio_dev, bool state)
-{
- int ret;
- u8 valold;
- bool currentlyset;
- struct lis3l02dq_state *st = iio_priv(indio_dev);
-
- /* Get the current event mask register */
- ret = lis3l02dq_spi_read_reg_8(indio_dev,
- LIS3L02DQ_REG_CTRL_2_ADDR,
- &valold);
- if (ret)
- goto error_ret;
- /* Find out if data ready is already on */
- currentlyset
- = valold & LIS3L02DQ_REG_CTRL_2_ENABLE_DATA_READY_GENERATION;
-
- /* Disable requested */
- if (!state && currentlyset) {
- /* Disable the data ready signal */
- valold &= ~LIS3L02DQ_REG_CTRL_2_ENABLE_DATA_READY_GENERATION;
-
- /* The double write is to overcome a hardware bug? */
- ret = lis3l02dq_spi_write_reg_8(indio_dev,
- LIS3L02DQ_REG_CTRL_2_ADDR,
- valold);
- if (ret)
- goto error_ret;
- ret = lis3l02dq_spi_write_reg_8(indio_dev,
- LIS3L02DQ_REG_CTRL_2_ADDR,
- valold);
- if (ret)
- goto error_ret;
- st->trigger_on = false;
- /* Enable requested */
- } else if (state && !currentlyset) {
- /* If not set, enable requested
- * first disable all events */
- ret = lis3l02dq_disable_all_events(indio_dev);
- if (ret < 0)
- goto error_ret;
-
- valold = ret |
- LIS3L02DQ_REG_CTRL_2_ENABLE_DATA_READY_GENERATION;
-
- st->trigger_on = true;
- ret = lis3l02dq_spi_write_reg_8(indio_dev,
- LIS3L02DQ_REG_CTRL_2_ADDR,
- valold);
- if (ret)
- goto error_ret;
- }
-
- return 0;
-error_ret:
- return ret;
-}
-
-/**
- * lis3l02dq_data_rdy_trigger_set_state() set datardy interrupt state
- *
- * If disabling the interrupt also does a final read to ensure it is clear.
- * This is only important in some cases where the scan enable elements are
- * switched before the buffer is reenabled.
- **/
-static int lis3l02dq_data_rdy_trigger_set_state(struct iio_trigger *trig,
- bool state)
-{
- struct iio_dev *indio_dev = iio_trigger_get_drvdata(trig);
- int ret = 0;
- u8 t;
-
- __lis3l02dq_write_data_ready_config(indio_dev, state);
- if (!state) {
- /*
- * A possible quirk with the handler is currently worked around
- * by ensuring outstanding read events are cleared.
- */
- ret = lis3l02dq_read_all(indio_dev, NULL);
- }
- lis3l02dq_spi_read_reg_8(indio_dev,
- LIS3L02DQ_REG_WAKE_UP_SRC_ADDR,
- &t);
- return ret;
-}
-
-/**
- * lis3l02dq_trig_try_reen() try reenabling irq for data rdy trigger
- * @trig: the datardy trigger
- */
-static int lis3l02dq_trig_try_reen(struct iio_trigger *trig)
-{
- struct iio_dev *indio_dev = iio_trigger_get_drvdata(trig);
- struct lis3l02dq_state *st = iio_priv(indio_dev);
- int i;
-
- /* If gpio still high (or high again)
- * In theory possible we will need to do this several times */
- for (i = 0; i < 5; i++)
- if (gpio_get_value(st->gpio))
- lis3l02dq_read_all(indio_dev, NULL);
- else
- break;
- if (i == 5)
- pr_info("Failed to clear the interrupt for lis3l02dq\n");
-
- /* irq reenabled so success! */
- return 0;
-}
-
-static const struct iio_trigger_ops lis3l02dq_trigger_ops = {
- .owner = THIS_MODULE,
- .set_trigger_state = &lis3l02dq_data_rdy_trigger_set_state,
- .try_reenable = &lis3l02dq_trig_try_reen,
-};
-
-int lis3l02dq_probe_trigger(struct iio_dev *indio_dev)
-{
- int ret;
- struct lis3l02dq_state *st = iio_priv(indio_dev);
-
- st->trig = iio_trigger_alloc("lis3l02dq-dev%d", indio_dev->id);
- if (!st->trig) {
- ret = -ENOMEM;
- goto error_ret;
- }
-
- st->trig->dev.parent = &st->us->dev;
- st->trig->ops = &lis3l02dq_trigger_ops;
- iio_trigger_set_drvdata(st->trig, indio_dev);
- ret = iio_trigger_register(st->trig);
- if (ret)
- goto error_free_trig;
-
- return 0;
-
-error_free_trig:
- iio_trigger_free(st->trig);
-error_ret:
- return ret;
-}
-
-void lis3l02dq_remove_trigger(struct iio_dev *indio_dev)
-{
- struct lis3l02dq_state *st = iio_priv(indio_dev);
-
- iio_trigger_unregister(st->trig);
- iio_trigger_free(st->trig);
-}
-
-void lis3l02dq_unconfigure_buffer(struct iio_dev *indio_dev)
-{
- iio_dealloc_pollfunc(indio_dev->pollfunc);
- iio_kfifo_free(indio_dev->buffer);
-}
-
-static int lis3l02dq_buffer_postenable(struct iio_dev *indio_dev)
-{
- /* Disable unwanted channels otherwise the interrupt will not clear */
- u8 t;
- int ret;
- bool oneenabled = false;
-
- ret = lis3l02dq_spi_read_reg_8(indio_dev,
- LIS3L02DQ_REG_CTRL_1_ADDR,
- &t);
- if (ret)
- goto error_ret;
-
- if (test_bit(0, indio_dev->active_scan_mask)) {
- t |= LIS3L02DQ_REG_CTRL_1_AXES_X_ENABLE;
- oneenabled = true;
- } else {
- t &= ~LIS3L02DQ_REG_CTRL_1_AXES_X_ENABLE;
- }
- if (test_bit(1, indio_dev->active_scan_mask)) {
- t |= LIS3L02DQ_REG_CTRL_1_AXES_Y_ENABLE;
- oneenabled = true;
- } else {
- t &= ~LIS3L02DQ_REG_CTRL_1_AXES_Y_ENABLE;
- }
- if (test_bit(2, indio_dev->active_scan_mask)) {
- t |= LIS3L02DQ_REG_CTRL_1_AXES_Z_ENABLE;
- oneenabled = true;
- } else {
- t &= ~LIS3L02DQ_REG_CTRL_1_AXES_Z_ENABLE;
- }
- if (!oneenabled) /* what happens in this case is unknown */
- return -EINVAL;
- ret = lis3l02dq_spi_write_reg_8(indio_dev,
- LIS3L02DQ_REG_CTRL_1_ADDR,
- t);
- if (ret)
- goto error_ret;
-
- return iio_triggered_buffer_postenable(indio_dev);
-error_ret:
- return ret;
-}
-
-/* Turn all channels on again */
-static int lis3l02dq_buffer_predisable(struct iio_dev *indio_dev)
-{
- u8 t;
- int ret;
-
- ret = iio_triggered_buffer_predisable(indio_dev);
- if (ret)
- goto error_ret;
-
- ret = lis3l02dq_spi_read_reg_8(indio_dev,
- LIS3L02DQ_REG_CTRL_1_ADDR,
- &t);
- if (ret)
- goto error_ret;
- t |= LIS3L02DQ_REG_CTRL_1_AXES_X_ENABLE |
- LIS3L02DQ_REG_CTRL_1_AXES_Y_ENABLE |
- LIS3L02DQ_REG_CTRL_1_AXES_Z_ENABLE;
-
- ret = lis3l02dq_spi_write_reg_8(indio_dev,
- LIS3L02DQ_REG_CTRL_1_ADDR,
- t);
-
-error_ret:
- return ret;
-}
-
-static const struct iio_buffer_setup_ops lis3l02dq_buffer_setup_ops = {
- .postenable = &lis3l02dq_buffer_postenable,
- .predisable = &lis3l02dq_buffer_predisable,
-};
-
-int lis3l02dq_configure_buffer(struct iio_dev *indio_dev)
-{
- int ret;
- struct iio_buffer *buffer;
-
- buffer = iio_kfifo_allocate();
- if (!buffer)
- return -ENOMEM;
-
- iio_device_attach_buffer(indio_dev, buffer);
-
- buffer->scan_timestamp = true;
- indio_dev->setup_ops = &lis3l02dq_buffer_setup_ops;
-
- /* Functions are NULL as we set handler below */
- indio_dev->pollfunc = iio_alloc_pollfunc(&iio_pollfunc_store_time,
- &lis3l02dq_trigger_handler,
- 0,
- indio_dev,
- "lis3l02dq_consumer%d",
- indio_dev->id);
-
- if (!indio_dev->pollfunc) {
- ret = -ENOMEM;
- goto error_iio_sw_rb_free;
- }
-
- indio_dev->modes |= INDIO_BUFFER_TRIGGERED;
- return 0;
-
-error_iio_sw_rb_free:
- iio_kfifo_free(indio_dev->buffer);
- return ret;
-}
diff --git a/drivers/staging/iio/accel/sca3000.h b/drivers/staging/iio/accel/sca3000.h
deleted file mode 100644
index 9c8a9587df7d..000000000000
--- a/drivers/staging/iio/accel/sca3000.h
+++ /dev/null
@@ -1,278 +0,0 @@
-/*
- * sca3000.c -- support VTI sca3000 series accelerometers
- * via SPI
- *
- * Copyright (c) 2007 Jonathan Cameron <jic23@kernel.org>
- *
- * Partly based upon tle62x0.c
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * Initial mode is direct measurement.
- *
- * Untested things
- *
- * Temperature reading (the e05 I'm testing with doesn't have a sensor)
- *
- * Free fall detection mode - supported but untested as I'm not droping my
- * dubious wire rig far enough to test it.
- *
- * Unsupported as yet
- *
- * Time stamping of data from ring. Various ideas on how to do this but none
- * are remotely simple. Suggestions welcome.
- *
- * Individual enabling disabling of channels going into ring buffer
- *
- * Overflow handling (this is signaled for all but 8 bit ring buffer mode.)
- *
- * Motion detector using AND combinations of signals.
- *
- * Note: Be very careful about not touching an register bytes marked
- * as reserved on the data sheet. They really mean it as changing convents of
- * some will cause the device to lock up.
- *
- * Known issues - on rare occasions the interrupts lock up. Not sure why as yet.
- * Can probably alleviate this by reading the interrupt register on start, but
- * that is really just brushing the problem under the carpet.
- */
-#ifndef _SCA3000
-#define _SCA3000
-
-#define SCA3000_WRITE_REG(a) (((a) << 2) | 0x02)
-#define SCA3000_READ_REG(a) ((a) << 2)
-
-#define SCA3000_REG_ADDR_REVID 0x00
-#define SCA3000_REVID_MAJOR_MASK 0xf0
-#define SCA3000_REVID_MINOR_MASK 0x0f
-
-#define SCA3000_REG_ADDR_STATUS 0x02
-#define SCA3000_LOCKED 0x20
-#define SCA3000_EEPROM_CS_ERROR 0x02
-#define SCA3000_SPI_FRAME_ERROR 0x01
-
-/* All reads done using register decrement so no need to directly access LSBs */
-#define SCA3000_REG_ADDR_X_MSB 0x05
-#define SCA3000_REG_ADDR_Y_MSB 0x07
-#define SCA3000_REG_ADDR_Z_MSB 0x09
-
-#define SCA3000_REG_ADDR_RING_OUT 0x0f
-
-/* Temp read untested - the e05 doesn't have the sensor */
-#define SCA3000_REG_ADDR_TEMP_MSB 0x13
-
-#define SCA3000_REG_ADDR_MODE 0x14
-#define SCA3000_MODE_PROT_MASK 0x28
-
-#define SCA3000_RING_BUF_ENABLE 0x80
-#define SCA3000_RING_BUF_8BIT 0x40
-/*
- * Free fall detection triggers an interrupt if the acceleration
- * is below a threshold for equivalent of 25cm drop
- */
-#define SCA3000_FREE_FALL_DETECT 0x10
-#define SCA3000_MEAS_MODE_NORMAL 0x00
-#define SCA3000_MEAS_MODE_OP_1 0x01
-#define SCA3000_MEAS_MODE_OP_2 0x02
-
-/*
- * In motion detection mode the accelerations are band pass filtered
- * (approx 1 - 25Hz) and then a programmable threshold used to trigger
- * and interrupt.
- */
-#define SCA3000_MEAS_MODE_MOT_DET 0x03
-
-#define SCA3000_REG_ADDR_BUF_COUNT 0x15
-
-#define SCA3000_REG_ADDR_INT_STATUS 0x16
-
-#define SCA3000_INT_STATUS_THREE_QUARTERS 0x80
-#define SCA3000_INT_STATUS_HALF 0x40
-
-#define SCA3000_INT_STATUS_FREE_FALL 0x08
-#define SCA3000_INT_STATUS_Y_TRIGGER 0x04
-#define SCA3000_INT_STATUS_X_TRIGGER 0x02
-#define SCA3000_INT_STATUS_Z_TRIGGER 0x01
-
-/* Used to allow access to multiplexed registers */
-#define SCA3000_REG_ADDR_CTRL_SEL 0x18
-/* Only available for SCA3000-D03 and SCA3000-D01 */
-#define SCA3000_REG_CTRL_SEL_I2C_DISABLE 0x01
-#define SCA3000_REG_CTRL_SEL_MD_CTRL 0x02
-#define SCA3000_REG_CTRL_SEL_MD_Y_TH 0x03
-#define SCA3000_REG_CTRL_SEL_MD_X_TH 0x04
-#define SCA3000_REG_CTRL_SEL_MD_Z_TH 0x05
-/*
- * BE VERY CAREFUL WITH THIS, IF 3 BITS ARE NOT SET the device
- * will not function
- */
-#define SCA3000_REG_CTRL_SEL_OUT_CTRL 0x0B
-#define SCA3000_OUT_CTRL_PROT_MASK 0xE0
-#define SCA3000_OUT_CTRL_BUF_X_EN 0x10
-#define SCA3000_OUT_CTRL_BUF_Y_EN 0x08
-#define SCA3000_OUT_CTRL_BUF_Z_EN 0x04
-#define SCA3000_OUT_CTRL_BUF_DIV_4 0x02
-#define SCA3000_OUT_CTRL_BUF_DIV_2 0x01
-
-/*
- * Control which motion detector interrupts are on.
- * For now only OR combinations are supported.
- */
-#define SCA3000_MD_CTRL_PROT_MASK 0xC0
-#define SCA3000_MD_CTRL_OR_Y 0x01
-#define SCA3000_MD_CTRL_OR_X 0x02
-#define SCA3000_MD_CTRL_OR_Z 0x04
-/* Currently unsupported */
-#define SCA3000_MD_CTRL_AND_Y 0x08
-#define SCA3000_MD_CTRL_AND_X 0x10
-#define SAC3000_MD_CTRL_AND_Z 0x20
-
-/*
- * Some control registers of complex access methods requiring this register to
- * be used to remove a lock.
- */
-#define SCA3000_REG_ADDR_UNLOCK 0x1e
-
-#define SCA3000_REG_ADDR_INT_MASK 0x21
-#define SCA3000_INT_MASK_PROT_MASK 0x1C
-
-#define SCA3000_INT_MASK_RING_THREE_QUARTER 0x80
-#define SCA3000_INT_MASK_RING_HALF 0x40
-
-#define SCA3000_INT_MASK_ALL_INTS 0x02
-#define SCA3000_INT_MASK_ACTIVE_HIGH 0x01
-#define SCA3000_INT_MASK_ACTIVE_LOW 0x00
-
-/* Values of multiplexed registers (write to ctrl_data after select) */
-#define SCA3000_REG_ADDR_CTRL_DATA 0x22
-
-/*
- * Measurement modes available on some sca3000 series chips. Code assumes others
- * may become available in the future.
- *
- * Bypass - Bypass the low-pass filter in the signal channel so as to increase
- * signal bandwidth.
- *
- * Narrow - Narrow low-pass filtering of the signal channel and half output
- * data rate by decimation.
- *
- * Wide - Widen low-pass filtering of signal channel to increase bandwidth
- */
-#define SCA3000_OP_MODE_BYPASS 0x01
-#define SCA3000_OP_MODE_NARROW 0x02
-#define SCA3000_OP_MODE_WIDE 0x04
-#define SCA3000_MAX_TX 6
-#define SCA3000_MAX_RX 2
-
-/**
- * struct sca3000_state - device instance state information
- * @us: the associated spi device
- * @info: chip variant information
- * @interrupt_handler_ws: event interrupt handler for all events
- * @last_timestamp: the timestamp of the last event
- * @mo_det_use_count: reference counter for the motion detection unit
- * @lock: lock used to protect elements of sca3000_state
- * and the underlying device state.
- * @bpse: number of bits per scan element
- * @tx: dma-able transmit buffer
- * @rx: dma-able receive buffer
- **/
-struct sca3000_state {
- struct spi_device *us;
- const struct sca3000_chip_info *info;
- struct work_struct interrupt_handler_ws;
- s64 last_timestamp;
- int mo_det_use_count;
- struct mutex lock;
- int bpse;
- /* Can these share a cacheline ? */
- u8 rx[2] ____cacheline_aligned;
- u8 tx[6] ____cacheline_aligned;
-};
-
-/**
- * struct sca3000_chip_info - model dependent parameters
- * @scale: scale * 10^-6
- * @temp_output: some devices have temperature sensors.
- * @measurement_mode_freq: normal mode sampling frequency
- * @option_mode_1: first optional mode. Not all models have one
- * @option_mode_1_freq: option mode 1 sampling frequency
- * @option_mode_2: second optional mode. Not all chips have one
- * @option_mode_2_freq: option mode 2 sampling frequency
- *
- * This structure is used to hold information about the functionality of a given
- * sca3000 variant.
- **/
-struct sca3000_chip_info {
- unsigned int scale;
- bool temp_output;
- int measurement_mode_freq;
- int option_mode_1;
- int option_mode_1_freq;
- int option_mode_2;
- int option_mode_2_freq;
- int mot_det_mult_xz[6];
- int mot_det_mult_y[7];
-};
-
-int sca3000_read_data_short(struct sca3000_state *st,
- u8 reg_address_high,
- int len);
-
-/**
- * sca3000_write_reg() write a single register
- * @address: address of register on chip
- * @val: value to be written to register
- *
- * The main lock must be held.
- **/
-int sca3000_write_reg(struct sca3000_state *st, u8 address, u8 val);
-
-#ifdef CONFIG_IIO_BUFFER
-/**
- * sca3000_register_ring_funcs() setup the ring state change functions
- **/
-void sca3000_register_ring_funcs(struct iio_dev *indio_dev);
-
-/**
- * sca3000_configure_ring() - allocate and configure ring buffer
- * @indio_dev: iio-core device whose ring is to be configured
- *
- * The hardware ring buffer needs far fewer ring buffer functions than
- * a software one as a lot of things are handled automatically.
- * This function also tells the iio core that our device supports a
- * hardware ring buffer mode.
- **/
-int sca3000_configure_ring(struct iio_dev *indio_dev);
-
-/**
- * sca3000_unconfigure_ring() - deallocate the ring buffer
- * @indio_dev: iio-core device whose ring we are freeing
- **/
-void sca3000_unconfigure_ring(struct iio_dev *indio_dev);
-
-/**
- * sca3000_ring_int_process() handles ring related event pushing and escalation
- * @val: the event code
- **/
-void sca3000_ring_int_process(u8 val, struct iio_buffer *ring);
-
-#else
-static inline void sca3000_register_ring_funcs(struct iio_dev *indio_dev)
-{
-}
-
-static inline
-int sca3000_register_ring_access_and_init(struct iio_dev *indio_dev)
-{
- return 0;
-}
-
-static inline void sca3000_ring_int_process(u8 val, void *ring)
-{
-}
-
-#endif
-#endif /* _SCA3000 */
diff --git a/drivers/staging/iio/accel/sca3000_core.c b/drivers/staging/iio/accel/sca3000_core.c
deleted file mode 100644
index b614f272b5f4..000000000000
--- a/drivers/staging/iio/accel/sca3000_core.c
+++ /dev/null
@@ -1,1209 +0,0 @@
-/*
- * sca3000_core.c -- support VTI sca3000 series accelerometers via SPI
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * Copyright (c) 2009 Jonathan Cameron <jic23@kernel.org>
- *
- * See industrialio/accels/sca3000.h for comments.
- */
-
-#include <linux/interrupt.h>
-#include <linux/fs.h>
-#include <linux/device.h>
-#include <linux/slab.h>
-#include <linux/kernel.h>
-#include <linux/spi/spi.h>
-#include <linux/sysfs.h>
-#include <linux/module.h>
-#include <linux/iio/iio.h>
-#include <linux/iio/sysfs.h>
-#include <linux/iio/events.h>
-#include <linux/iio/buffer.h>
-
-#include "sca3000.h"
-
-enum sca3000_variant {
- d01,
- e02,
- e04,
- e05,
-};
-
-/*
- * Note where option modes are not defined, the chip simply does not
- * support any.
- * Other chips in the sca3000 series use i2c and are not included here.
- *
- * Some of these devices are only listed in the family data sheet and
- * do not actually appear to be available.
- */
-static const struct sca3000_chip_info sca3000_spi_chip_info_tbl[] = {
- [d01] = {
- .scale = 7357,
- .temp_output = true,
- .measurement_mode_freq = 250,
- .option_mode_1 = SCA3000_OP_MODE_BYPASS,
- .option_mode_1_freq = 250,
- .mot_det_mult_xz = {50, 100, 200, 350, 650, 1300},
- .mot_det_mult_y = {50, 100, 150, 250, 450, 850, 1750},
- },
- [e02] = {
- .scale = 9810,
- .measurement_mode_freq = 125,
- .option_mode_1 = SCA3000_OP_MODE_NARROW,
- .option_mode_1_freq = 63,
- .mot_det_mult_xz = {100, 150, 300, 550, 1050, 2050},
- .mot_det_mult_y = {50, 100, 200, 350, 700, 1350, 2700},
- },
- [e04] = {
- .scale = 19620,
- .measurement_mode_freq = 100,
- .option_mode_1 = SCA3000_OP_MODE_NARROW,
- .option_mode_1_freq = 50,
- .option_mode_2 = SCA3000_OP_MODE_WIDE,
- .option_mode_2_freq = 400,
- .mot_det_mult_xz = {200, 300, 600, 1100, 2100, 4100},
- .mot_det_mult_y = {100, 200, 400, 7000, 1400, 2700, 54000},
- },
- [e05] = {
- .scale = 61313,
- .measurement_mode_freq = 200,
- .option_mode_1 = SCA3000_OP_MODE_NARROW,
- .option_mode_1_freq = 50,
- .option_mode_2 = SCA3000_OP_MODE_WIDE,
- .option_mode_2_freq = 400,
- .mot_det_mult_xz = {600, 900, 1700, 3200, 6100, 11900},
- .mot_det_mult_y = {300, 600, 1200, 2000, 4100, 7800, 15600},
- },
-};
-
-int sca3000_write_reg(struct sca3000_state *st, u8 address, u8 val)
-{
- st->tx[0] = SCA3000_WRITE_REG(address);
- st->tx[1] = val;
- return spi_write(st->us, st->tx, 2);
-}
-
-int sca3000_read_data_short(struct sca3000_state *st,
- uint8_t reg_address_high,
- int len)
-{
- struct spi_transfer xfer[2] = {
- {
- .len = 1,
- .tx_buf = st->tx,
- }, {
- .len = len,
- .rx_buf = st->rx,
- }
- };
- st->tx[0] = SCA3000_READ_REG(reg_address_high);
-
- return spi_sync_transfer(st->us, xfer, ARRAY_SIZE(xfer));
-}
-
-/**
- * sca3000_reg_lock_on() test if the ctrl register lock is on
- *
- * Lock must be held.
- **/
-static int sca3000_reg_lock_on(struct sca3000_state *st)
-{
- int ret;
-
- ret = sca3000_read_data_short(st, SCA3000_REG_ADDR_STATUS, 1);
- if (ret < 0)
- return ret;
-
- return !(st->rx[0] & SCA3000_LOCKED);
-}
-
-/**
- * __sca3000_unlock_reg_lock() unlock the control registers
- *
- * Note the device does not appear to support doing this in a single transfer.
- * This should only ever be used as part of ctrl reg read.
- * Lock must be held before calling this
- **/
-static int __sca3000_unlock_reg_lock(struct sca3000_state *st)
-{
- struct spi_transfer xfer[3] = {
- {
- .len = 2,
- .cs_change = 1,
- .tx_buf = st->tx,
- }, {
- .len = 2,
- .cs_change = 1,
- .tx_buf = st->tx + 2,
- }, {
- .len = 2,
- .tx_buf = st->tx + 4,
- },
- };
- st->tx[0] = SCA3000_WRITE_REG(SCA3000_REG_ADDR_UNLOCK);
- st->tx[1] = 0x00;
- st->tx[2] = SCA3000_WRITE_REG(SCA3000_REG_ADDR_UNLOCK);
- st->tx[3] = 0x50;
- st->tx[4] = SCA3000_WRITE_REG(SCA3000_REG_ADDR_UNLOCK);
- st->tx[5] = 0xA0;
-
- return spi_sync_transfer(st->us, xfer, ARRAY_SIZE(xfer));
-}
-
-/**
- * sca3000_write_ctrl_reg() write to a lock protect ctrl register
- * @sel: selects which registers we wish to write to
- * @val: the value to be written
- *
- * Certain control registers are protected against overwriting by the lock
- * register and use a shared write address. This function allows writing of
- * these registers.
- * Lock must be held.
- **/
-static int sca3000_write_ctrl_reg(struct sca3000_state *st,
- uint8_t sel,
- uint8_t val)
-{
-
- int ret;
-
- ret = sca3000_reg_lock_on(st);
- if (ret < 0)
- goto error_ret;
- if (ret) {
- ret = __sca3000_unlock_reg_lock(st);
- if (ret)
- goto error_ret;
- }
-
- /* Set the control select register */
- ret = sca3000_write_reg(st, SCA3000_REG_ADDR_CTRL_SEL, sel);
- if (ret)
- goto error_ret;
-
- /* Write the actual value into the register */
- ret = sca3000_write_reg(st, SCA3000_REG_ADDR_CTRL_DATA, val);
-
-error_ret:
- return ret;
-}
-
-/**
- * sca3000_read_ctrl_reg() read from lock protected control register.
- *
- * Lock must be held.
- **/
-static int sca3000_read_ctrl_reg(struct sca3000_state *st,
- u8 ctrl_reg)
-{
- int ret;
-
- ret = sca3000_reg_lock_on(st);
- if (ret < 0)
- goto error_ret;
- if (ret) {
- ret = __sca3000_unlock_reg_lock(st);
- if (ret)
- goto error_ret;
- }
- /* Set the control select register */
- ret = sca3000_write_reg(st, SCA3000_REG_ADDR_CTRL_SEL, ctrl_reg);
- if (ret)
- goto error_ret;
- ret = sca3000_read_data_short(st, SCA3000_REG_ADDR_CTRL_DATA, 1);
- if (ret)
- goto error_ret;
- else
- return st->rx[0];
-error_ret:
- return ret;
-}
-
-/**
- * sca3000_show_rev() - sysfs interface to read the chip revision number
- **/
-static ssize_t sca3000_show_rev(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- int len = 0, ret;
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct sca3000_state *st = iio_priv(indio_dev);
-
- mutex_lock(&st->lock);
- ret = sca3000_read_data_short(st, SCA3000_REG_ADDR_REVID, 1);
- if (ret < 0)
- goto error_ret;
- len += sprintf(buf + len,
- "major=%d, minor=%d\n",
- st->rx[0] & SCA3000_REVID_MAJOR_MASK,
- st->rx[0] & SCA3000_REVID_MINOR_MASK);
-error_ret:
- mutex_unlock(&st->lock);
-
- return ret ? ret : len;
-}
-
-/**
- * sca3000_show_available_measurement_modes() display available modes
- *
- * This is all read from chip specific data in the driver. Not all
- * of the sca3000 series support modes other than normal.
- **/
-static ssize_t
-sca3000_show_available_measurement_modes(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct sca3000_state *st = iio_priv(indio_dev);
- int len = 0;
-
- len += sprintf(buf + len, "0 - normal mode");
- switch (st->info->option_mode_1) {
- case SCA3000_OP_MODE_NARROW:
- len += sprintf(buf + len, ", 1 - narrow mode");
- break;
- case SCA3000_OP_MODE_BYPASS:
- len += sprintf(buf + len, ", 1 - bypass mode");
- break;
- }
- switch (st->info->option_mode_2) {
- case SCA3000_OP_MODE_WIDE:
- len += sprintf(buf + len, ", 2 - wide mode");
- break;
- }
- /* always supported */
- len += sprintf(buf + len, " 3 - motion detection\n");
-
- return len;
-}
-
-/**
- * sca3000_show_measurement_mode() sysfs read of current mode
- **/
-static ssize_t
-sca3000_show_measurement_mode(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct sca3000_state *st = iio_priv(indio_dev);
- int len = 0, ret;
-
- mutex_lock(&st->lock);
- ret = sca3000_read_data_short(st, SCA3000_REG_ADDR_MODE, 1);
- if (ret)
- goto error_ret;
- /* mask bottom 2 bits - only ones that are relevant */
- st->rx[0] &= 0x03;
- switch (st->rx[0]) {
- case SCA3000_MEAS_MODE_NORMAL:
- len += sprintf(buf + len, "0 - normal mode\n");
- break;
- case SCA3000_MEAS_MODE_MOT_DET:
- len += sprintf(buf + len, "3 - motion detection\n");
- break;
- case SCA3000_MEAS_MODE_OP_1:
- switch (st->info->option_mode_1) {
- case SCA3000_OP_MODE_NARROW:
- len += sprintf(buf + len, "1 - narrow mode\n");
- break;
- case SCA3000_OP_MODE_BYPASS:
- len += sprintf(buf + len, "1 - bypass mode\n");
- break;
- }
- break;
- case SCA3000_MEAS_MODE_OP_2:
- switch (st->info->option_mode_2) {
- case SCA3000_OP_MODE_WIDE:
- len += sprintf(buf + len, "2 - wide mode\n");
- break;
- }
- break;
- }
-
-error_ret:
- mutex_unlock(&st->lock);
-
- return ret ? ret : len;
-}
-
-/**
- * sca3000_store_measurement_mode() set the current mode
- **/
-static ssize_t
-sca3000_store_measurement_mode(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t len)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct sca3000_state *st = iio_priv(indio_dev);
- int ret;
- u8 mask = 0x03;
- u8 val;
-
- mutex_lock(&st->lock);
- ret = kstrtou8(buf, 10, &val);
- if (ret)
- goto error_ret;
- if (val > 3) {
- ret = -EINVAL;
- goto error_ret;
- }
- ret = sca3000_read_data_short(st, SCA3000_REG_ADDR_MODE, 1);
- if (ret)
- goto error_ret;
- st->rx[0] &= ~mask;
- st->rx[0] |= (val & mask);
- ret = sca3000_write_reg(st, SCA3000_REG_ADDR_MODE, st->rx[0]);
- if (ret)
- goto error_ret;
- mutex_unlock(&st->lock);
-
- return len;
-
-error_ret:
- mutex_unlock(&st->lock);
-
- return ret;
-}
-
-
-/*
- * Not even vaguely standard attributes so defined here rather than
- * in the relevant IIO core headers
- */
-static IIO_DEVICE_ATTR(measurement_mode_available, S_IRUGO,
- sca3000_show_available_measurement_modes,
- NULL, 0);
-
-static IIO_DEVICE_ATTR(measurement_mode, S_IRUGO | S_IWUSR,
- sca3000_show_measurement_mode,
- sca3000_store_measurement_mode,
- 0);
-
-/* More standard attributes */
-
-static IIO_DEVICE_ATTR(revision, S_IRUGO, sca3000_show_rev, NULL, 0);
-
-static const struct iio_event_spec sca3000_event = {
- .type = IIO_EV_TYPE_MAG,
- .dir = IIO_EV_DIR_RISING,
- .mask_separate = BIT(IIO_EV_INFO_VALUE) | BIT(IIO_EV_INFO_ENABLE),
-};
-
-#define SCA3000_CHAN(index, mod) \
- { \
- .type = IIO_ACCEL, \
- .modified = 1, \
- .channel2 = mod, \
- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
- .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),\
- .address = index, \
- .scan_index = index, \
- .scan_type = { \
- .sign = 's', \
- .realbits = 11, \
- .storagebits = 16, \
- .shift = 5, \
- }, \
- .event_spec = &sca3000_event, \
- .num_event_specs = 1, \
- }
-
-static const struct iio_chan_spec sca3000_channels[] = {
- SCA3000_CHAN(0, IIO_MOD_X),
- SCA3000_CHAN(1, IIO_MOD_Y),
- SCA3000_CHAN(2, IIO_MOD_Z),
-};
-
-static const struct iio_chan_spec sca3000_channels_with_temp[] = {
- SCA3000_CHAN(0, IIO_MOD_X),
- SCA3000_CHAN(1, IIO_MOD_Y),
- SCA3000_CHAN(2, IIO_MOD_Z),
- {
- .type = IIO_TEMP,
- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
- .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE) |
- BIT(IIO_CHAN_INFO_OFFSET),
- /* No buffer support */
- .scan_index = -1,
- },
-};
-
-static u8 sca3000_addresses[3][3] = {
- [0] = {SCA3000_REG_ADDR_X_MSB, SCA3000_REG_CTRL_SEL_MD_X_TH,
- SCA3000_MD_CTRL_OR_X},
- [1] = {SCA3000_REG_ADDR_Y_MSB, SCA3000_REG_CTRL_SEL_MD_Y_TH,
- SCA3000_MD_CTRL_OR_Y},
- [2] = {SCA3000_REG_ADDR_Z_MSB, SCA3000_REG_CTRL_SEL_MD_Z_TH,
- SCA3000_MD_CTRL_OR_Z},
-};
-
-static int sca3000_read_raw(struct iio_dev *indio_dev,
- struct iio_chan_spec const *chan,
- int *val,
- int *val2,
- long mask)
-{
- struct sca3000_state *st = iio_priv(indio_dev);
- int ret;
- u8 address;
-
- switch (mask) {
- case IIO_CHAN_INFO_RAW:
- mutex_lock(&st->lock);
- if (chan->type == IIO_ACCEL) {
- if (st->mo_det_use_count) {
- mutex_unlock(&st->lock);
- return -EBUSY;
- }
- address = sca3000_addresses[chan->address][0];
- ret = sca3000_read_data_short(st, address, 2);
- if (ret < 0) {
- mutex_unlock(&st->lock);
- return ret;
- }
- *val = (be16_to_cpup((__be16 *)st->rx) >> 3) & 0x1FFF;
- *val = ((*val) << (sizeof(*val)*8 - 13)) >>
- (sizeof(*val)*8 - 13);
- } else {
- /* get the temperature when available */
- ret = sca3000_read_data_short(st,
- SCA3000_REG_ADDR_TEMP_MSB, 2);
- if (ret < 0) {
- mutex_unlock(&st->lock);
- return ret;
- }
- *val = ((st->rx[0] & 0x3F) << 3) |
- ((st->rx[1] & 0xE0) >> 5);
- }
- mutex_unlock(&st->lock);
- return IIO_VAL_INT;
- case IIO_CHAN_INFO_SCALE:
- *val = 0;
- if (chan->type == IIO_ACCEL)
- *val2 = st->info->scale;
- else /* temperature */
- *val2 = 555556;
- return IIO_VAL_INT_PLUS_MICRO;
- case IIO_CHAN_INFO_OFFSET:
- *val = -214;
- *val2 = 600000;
- return IIO_VAL_INT_PLUS_MICRO;
- default:
- return -EINVAL;
- }
-}
-
-/**
- * sca3000_read_av_freq() sysfs function to get available frequencies
- *
- * The later modes are only relevant to the ring buffer - and depend on current
- * mode. Note that data sheet gives rather wide tolerances for these so integer
- * division will give good enough answer and not all chips have them specified
- * at all.
- **/
-static ssize_t sca3000_read_av_freq(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct sca3000_state *st = iio_priv(indio_dev);
- int len = 0, ret, val;
-
- mutex_lock(&st->lock);
- ret = sca3000_read_data_short(st, SCA3000_REG_ADDR_MODE, 1);
- val = st->rx[0];
- mutex_unlock(&st->lock);
- if (ret)
- goto error_ret;
-
- switch (val & 0x03) {
- case SCA3000_MEAS_MODE_NORMAL:
- len += sprintf(buf + len, "%d %d %d\n",
- st->info->measurement_mode_freq,
- st->info->measurement_mode_freq/2,
- st->info->measurement_mode_freq/4);
- break;
- case SCA3000_MEAS_MODE_OP_1:
- len += sprintf(buf + len, "%d %d %d\n",
- st->info->option_mode_1_freq,
- st->info->option_mode_1_freq/2,
- st->info->option_mode_1_freq/4);
- break;
- case SCA3000_MEAS_MODE_OP_2:
- len += sprintf(buf + len, "%d %d %d\n",
- st->info->option_mode_2_freq,
- st->info->option_mode_2_freq/2,
- st->info->option_mode_2_freq/4);
- break;
- }
- return len;
-error_ret:
- return ret;
-}
-/**
- * __sca3000_get_base_freq() obtain mode specific base frequency
- *
- * lock must be held
- **/
-static inline int __sca3000_get_base_freq(struct sca3000_state *st,
- const struct sca3000_chip_info *info,
- int *base_freq)
-{
- int ret;
-
- ret = sca3000_read_data_short(st, SCA3000_REG_ADDR_MODE, 1);
- if (ret)
- goto error_ret;
- switch (0x03 & st->rx[0]) {
- case SCA3000_MEAS_MODE_NORMAL:
- *base_freq = info->measurement_mode_freq;
- break;
- case SCA3000_MEAS_MODE_OP_1:
- *base_freq = info->option_mode_1_freq;
- break;
- case SCA3000_MEAS_MODE_OP_2:
- *base_freq = info->option_mode_2_freq;
- break;
- }
-error_ret:
- return ret;
-}
-
-/**
- * sca3000_read_frequency() sysfs interface to get the current frequency
- **/
-static ssize_t sca3000_read_frequency(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct sca3000_state *st = iio_priv(indio_dev);
- int ret, len = 0, base_freq = 0, val;
-
- mutex_lock(&st->lock);
- ret = __sca3000_get_base_freq(st, st->info, &base_freq);
- if (ret)
- goto error_ret_mut;
- ret = sca3000_read_ctrl_reg(st, SCA3000_REG_CTRL_SEL_OUT_CTRL);
- mutex_unlock(&st->lock);
- if (ret)
- goto error_ret;
- val = ret;
- if (base_freq > 0)
- switch (val & 0x03) {
- case 0x00:
- case 0x03:
- len = sprintf(buf, "%d\n", base_freq);
- break;
- case 0x01:
- len = sprintf(buf, "%d\n", base_freq/2);
- break;
- case 0x02:
- len = sprintf(buf, "%d\n", base_freq/4);
- break;
- }
-
- return len;
-error_ret_mut:
- mutex_unlock(&st->lock);
-error_ret:
- return ret;
-}
-
-/**
- * sca3000_set_frequency() sysfs interface to set the current frequency
- **/
-static ssize_t sca3000_set_frequency(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t len)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct sca3000_state *st = iio_priv(indio_dev);
- int ret, base_freq = 0;
- int ctrlval;
- int val;
-
- ret = kstrtoint(buf, 10, &val);
- if (ret)
- return ret;
-
- mutex_lock(&st->lock);
- /* What mode are we in? */
- ret = __sca3000_get_base_freq(st, st->info, &base_freq);
- if (ret)
- goto error_free_lock;
-
- ret = sca3000_read_ctrl_reg(st, SCA3000_REG_CTRL_SEL_OUT_CTRL);
- if (ret < 0)
- goto error_free_lock;
- ctrlval = ret;
- /* clear the bits */
- ctrlval &= ~0x03;
-
- if (val == base_freq/2) {
- ctrlval |= SCA3000_OUT_CTRL_BUF_DIV_2;
- } else if (val == base_freq/4) {
- ctrlval |= SCA3000_OUT_CTRL_BUF_DIV_4;
- } else if (val != base_freq) {
- ret = -EINVAL;
- goto error_free_lock;
- }
- ret = sca3000_write_ctrl_reg(st, SCA3000_REG_CTRL_SEL_OUT_CTRL,
- ctrlval);
-error_free_lock:
- mutex_unlock(&st->lock);
-
- return ret ? ret : len;
-}
-
-/*
- * Should only really be registered if ring buffer support is compiled in.
- * Does no harm however and doing it right would add a fair bit of complexity
- */
-static IIO_DEV_ATTR_SAMP_FREQ_AVAIL(sca3000_read_av_freq);
-
-static IIO_DEV_ATTR_SAMP_FREQ(S_IWUSR | S_IRUGO,
- sca3000_read_frequency,
- sca3000_set_frequency);
-
-/**
- * sca3000_read_thresh() - query of a threshold
- **/
-static int sca3000_read_thresh(struct iio_dev *indio_dev,
- const struct iio_chan_spec *chan,
- enum iio_event_type type,
- enum iio_event_direction dir,
- enum iio_event_info info,
- int *val, int *val2)
-{
- int ret, i;
- struct sca3000_state *st = iio_priv(indio_dev);
- int num = chan->channel2;
-
- mutex_lock(&st->lock);
- ret = sca3000_read_ctrl_reg(st, sca3000_addresses[num][1]);
- mutex_unlock(&st->lock);
- if (ret < 0)
- return ret;
- *val = 0;
- if (num == 1)
- for_each_set_bit(i, (unsigned long *)&ret,
- ARRAY_SIZE(st->info->mot_det_mult_y))
- *val += st->info->mot_det_mult_y[i];
- else
- for_each_set_bit(i, (unsigned long *)&ret,
- ARRAY_SIZE(st->info->mot_det_mult_xz))
- *val += st->info->mot_det_mult_xz[i];
-
- return IIO_VAL_INT;
-}
-
-/**
- * sca3000_write_thresh() control of threshold
- **/
-static int sca3000_write_thresh(struct iio_dev *indio_dev,
- const struct iio_chan_spec *chan,
- enum iio_event_type type,
- enum iio_event_direction dir,
- enum iio_event_info info,
- int val, int val2)
-{
- struct sca3000_state *st = iio_priv(indio_dev);
- int num = chan->channel2;
- int ret;
- int i;
- u8 nonlinear = 0;
-
- if (num == 1) {
- i = ARRAY_SIZE(st->info->mot_det_mult_y);
- while (i > 0)
- if (val >= st->info->mot_det_mult_y[--i]) {
- nonlinear |= (1 << i);
- val -= st->info->mot_det_mult_y[i];
- }
- } else {
- i = ARRAY_SIZE(st->info->mot_det_mult_xz);
- while (i > 0)
- if (val >= st->info->mot_det_mult_xz[--i]) {
- nonlinear |= (1 << i);
- val -= st->info->mot_det_mult_xz[i];
- }
- }
-
- mutex_lock(&st->lock);
- ret = sca3000_write_ctrl_reg(st, sca3000_addresses[num][1], nonlinear);
- mutex_unlock(&st->lock);
-
- return ret;
-}
-
-static struct attribute *sca3000_attributes[] = {
- &iio_dev_attr_revision.dev_attr.attr,
- &iio_dev_attr_measurement_mode_available.dev_attr.attr,
- &iio_dev_attr_measurement_mode.dev_attr.attr,
- &iio_dev_attr_sampling_frequency_available.dev_attr.attr,
- &iio_dev_attr_sampling_frequency.dev_attr.attr,
- NULL,
-};
-
-static const struct attribute_group sca3000_attribute_group = {
- .attrs = sca3000_attributes,
-};
-
-/**
- * sca3000_event_handler() - handling ring and non ring events
- *
- * Ring related interrupt handler. Depending on event, push to
- * the ring buffer event chrdev or the event one.
- *
- * This function is complicated by the fact that the devices can signify ring
- * and non ring events via the same interrupt line and they can only
- * be distinguished via a read of the relevant status register.
- **/
-static irqreturn_t sca3000_event_handler(int irq, void *private)
-{
- struct iio_dev *indio_dev = private;
- struct sca3000_state *st = iio_priv(indio_dev);
- int ret, val;
- s64 last_timestamp = iio_get_time_ns();
-
- /*
- * Could lead if badly timed to an extra read of status reg,
- * but ensures no interrupt is missed.
- */
- mutex_lock(&st->lock);
- ret = sca3000_read_data_short(st, SCA3000_REG_ADDR_INT_STATUS, 1);
- val = st->rx[0];
- mutex_unlock(&st->lock);
- if (ret)
- goto done;
-
- sca3000_ring_int_process(val, indio_dev->buffer);
-
- if (val & SCA3000_INT_STATUS_FREE_FALL)
- iio_push_event(indio_dev,
- IIO_MOD_EVENT_CODE(IIO_ACCEL,
- 0,
- IIO_MOD_X_AND_Y_AND_Z,
- IIO_EV_TYPE_MAG,
- IIO_EV_DIR_FALLING),
- last_timestamp);
-
- if (val & SCA3000_INT_STATUS_Y_TRIGGER)
- iio_push_event(indio_dev,
- IIO_MOD_EVENT_CODE(IIO_ACCEL,
- 0,
- IIO_MOD_Y,
- IIO_EV_TYPE_MAG,
- IIO_EV_DIR_RISING),
- last_timestamp);
-
- if (val & SCA3000_INT_STATUS_X_TRIGGER)
- iio_push_event(indio_dev,
- IIO_MOD_EVENT_CODE(IIO_ACCEL,
- 0,
- IIO_MOD_X,
- IIO_EV_TYPE_MAG,
- IIO_EV_DIR_RISING),
- last_timestamp);
-
- if (val & SCA3000_INT_STATUS_Z_TRIGGER)
- iio_push_event(indio_dev,
- IIO_MOD_EVENT_CODE(IIO_ACCEL,
- 0,
- IIO_MOD_Z,
- IIO_EV_TYPE_MAG,
- IIO_EV_DIR_RISING),
- last_timestamp);
-
-done:
- return IRQ_HANDLED;
-}
-
-/**
- * sca3000_read_event_config() what events are enabled
- **/
-static int sca3000_read_event_config(struct iio_dev *indio_dev,
- const struct iio_chan_spec *chan,
- enum iio_event_type type,
- enum iio_event_direction dir)
-{
- struct sca3000_state *st = iio_priv(indio_dev);
- int ret;
- u8 protect_mask = 0x03;
- int num = chan->channel2;
-
- /* read current value of mode register */
- mutex_lock(&st->lock);
- ret = sca3000_read_data_short(st, SCA3000_REG_ADDR_MODE, 1);
- if (ret)
- goto error_ret;
-
- if ((st->rx[0] & protect_mask) != SCA3000_MEAS_MODE_MOT_DET)
- ret = 0;
- else {
- ret = sca3000_read_ctrl_reg(st, SCA3000_REG_CTRL_SEL_MD_CTRL);
- if (ret < 0)
- goto error_ret;
- /* only supporting logical or's for now */
- ret = !!(ret & sca3000_addresses[num][2]);
- }
-error_ret:
- mutex_unlock(&st->lock);
-
- return ret;
-}
-/**
- * sca3000_query_free_fall_mode() is free fall mode enabled
- **/
-static ssize_t sca3000_query_free_fall_mode(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- int ret;
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct sca3000_state *st = iio_priv(indio_dev);
- int val;
-
- mutex_lock(&st->lock);
- ret = sca3000_read_data_short(st, SCA3000_REG_ADDR_MODE, 1);
- val = st->rx[0];
- mutex_unlock(&st->lock);
- if (ret < 0)
- return ret;
- return sprintf(buf, "%d\n", !!(val & SCA3000_FREE_FALL_DETECT));
-}
-
-/**
- * sca3000_set_free_fall_mode() simple on off control for free fall int
- *
- * In these chips the free fall detector should send an interrupt if
- * the device falls more than 25cm. This has not been tested due
- * to fragile wiring.
- **/
-static ssize_t sca3000_set_free_fall_mode(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t len)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct sca3000_state *st = iio_priv(indio_dev);
- u8 val;
- int ret;
- u8 protect_mask = SCA3000_FREE_FALL_DETECT;
-
- mutex_lock(&st->lock);
- ret = kstrtou8(buf, 10, &val);
- if (ret)
- goto error_ret;
-
- /* read current value of mode register */
- ret = sca3000_read_data_short(st, SCA3000_REG_ADDR_MODE, 1);
- if (ret)
- goto error_ret;
-
- /* if off and should be on */
- if (val && !(st->rx[0] & protect_mask))
- ret = sca3000_write_reg(st, SCA3000_REG_ADDR_MODE,
- (st->rx[0] | SCA3000_FREE_FALL_DETECT));
- /* if on and should be off */
- else if (!val && (st->rx[0] & protect_mask))
- ret = sca3000_write_reg(st, SCA3000_REG_ADDR_MODE,
- (st->rx[0] & ~protect_mask));
-error_ret:
- mutex_unlock(&st->lock);
-
- return ret ? ret : len;
-}
-
-/**
- * sca3000_write_event_config() simple on off control for motion detector
- *
- * This is a per axis control, but enabling any will result in the
- * motion detector unit being enabled.
- * N.B. enabling motion detector stops normal data acquisition.
- * There is a complexity in knowing which mode to return to when
- * this mode is disabled. Currently normal mode is assumed.
- **/
-static int sca3000_write_event_config(struct iio_dev *indio_dev,
- const struct iio_chan_spec *chan,
- enum iio_event_type type,
- enum iio_event_direction dir,
- int state)
-{
- struct sca3000_state *st = iio_priv(indio_dev);
- int ret, ctrlval;
- u8 protect_mask = 0x03;
- int num = chan->channel2;
-
- mutex_lock(&st->lock);
- /*
- * First read the motion detector config to find out if
- * this axis is on
- */
- ret = sca3000_read_ctrl_reg(st, SCA3000_REG_CTRL_SEL_MD_CTRL);
- if (ret < 0)
- goto exit_point;
- ctrlval = ret;
- /* if off and should be on */
- if (state && !(ctrlval & sca3000_addresses[num][2])) {
- ret = sca3000_write_ctrl_reg(st,
- SCA3000_REG_CTRL_SEL_MD_CTRL,
- ctrlval |
- sca3000_addresses[num][2]);
- if (ret)
- goto exit_point;
- st->mo_det_use_count++;
- } else if (!state && (ctrlval & sca3000_addresses[num][2])) {
- ret = sca3000_write_ctrl_reg(st,
- SCA3000_REG_CTRL_SEL_MD_CTRL,
- ctrlval &
- ~(sca3000_addresses[num][2]));
- if (ret)
- goto exit_point;
- st->mo_det_use_count--;
- }
-
- /* read current value of mode register */
- ret = sca3000_read_data_short(st, SCA3000_REG_ADDR_MODE, 1);
- if (ret)
- goto exit_point;
- /* if off and should be on */
- if ((st->mo_det_use_count)
- && ((st->rx[0] & protect_mask) != SCA3000_MEAS_MODE_MOT_DET))
- ret = sca3000_write_reg(st, SCA3000_REG_ADDR_MODE,
- (st->rx[0] & ~protect_mask)
- | SCA3000_MEAS_MODE_MOT_DET);
- /* if on and should be off */
- else if (!(st->mo_det_use_count)
- && ((st->rx[0] & protect_mask) == SCA3000_MEAS_MODE_MOT_DET))
- ret = sca3000_write_reg(st, SCA3000_REG_ADDR_MODE,
- (st->rx[0] & ~protect_mask));
-exit_point:
- mutex_unlock(&st->lock);
-
- return ret;
-}
-
-/* Free fall detector related event attribute */
-static IIO_DEVICE_ATTR_NAMED(accel_xayaz_mag_falling_en,
- in_accel_x&y&z_mag_falling_en,
- S_IRUGO | S_IWUSR,
- sca3000_query_free_fall_mode,
- sca3000_set_free_fall_mode,
- 0);
-
-static IIO_CONST_ATTR_NAMED(accel_xayaz_mag_falling_period,
- in_accel_x&y&z_mag_falling_period,
- "0.226");
-
-static struct attribute *sca3000_event_attributes[] = {
- &iio_dev_attr_accel_xayaz_mag_falling_en.dev_attr.attr,
- &iio_const_attr_accel_xayaz_mag_falling_period.dev_attr.attr,
- NULL,
-};
-
-static struct attribute_group sca3000_event_attribute_group = {
- .attrs = sca3000_event_attributes,
- .name = "events",
-};
-
-/**
- * sca3000_clean_setup() get the device into a predictable state
- *
- * Devices use flash memory to store many of the register values
- * and hence can come up in somewhat unpredictable states.
- * Hence reset everything on driver load.
- **/
-static int sca3000_clean_setup(struct sca3000_state *st)
-{
- int ret;
-
- mutex_lock(&st->lock);
- /* Ensure all interrupts have been acknowledged */
- ret = sca3000_read_data_short(st, SCA3000_REG_ADDR_INT_STATUS, 1);
- if (ret)
- goto error_ret;
-
- /* Turn off all motion detection channels */
- ret = sca3000_read_ctrl_reg(st, SCA3000_REG_CTRL_SEL_MD_CTRL);
- if (ret < 0)
- goto error_ret;
- ret = sca3000_write_ctrl_reg(st, SCA3000_REG_CTRL_SEL_MD_CTRL,
- ret & SCA3000_MD_CTRL_PROT_MASK);
- if (ret)
- goto error_ret;
-
- /* Disable ring buffer */
- ret = sca3000_read_ctrl_reg(st, SCA3000_REG_CTRL_SEL_OUT_CTRL);
- ret = sca3000_write_ctrl_reg(st, SCA3000_REG_CTRL_SEL_OUT_CTRL,
- (ret & SCA3000_OUT_CTRL_PROT_MASK)
- | SCA3000_OUT_CTRL_BUF_X_EN
- | SCA3000_OUT_CTRL_BUF_Y_EN
- | SCA3000_OUT_CTRL_BUF_Z_EN
- | SCA3000_OUT_CTRL_BUF_DIV_4);
- if (ret)
- goto error_ret;
- /* Enable interrupts, relevant to mode and set up as active low */
- ret = sca3000_read_data_short(st, SCA3000_REG_ADDR_INT_MASK, 1);
- if (ret)
- goto error_ret;
- ret = sca3000_write_reg(st,
- SCA3000_REG_ADDR_INT_MASK,
- (ret & SCA3000_INT_MASK_PROT_MASK)
- | SCA3000_INT_MASK_ACTIVE_LOW);
- if (ret)
- goto error_ret;
- /*
- * Select normal measurement mode, free fall off, ring off
- * Ring in 12 bit mode - it is fine to overwrite reserved bits 3,5
- * as that occurs in one of the example on the datasheet
- */
- ret = sca3000_read_data_short(st, SCA3000_REG_ADDR_MODE, 1);
- if (ret)
- goto error_ret;
- ret = sca3000_write_reg(st, SCA3000_REG_ADDR_MODE,
- (st->rx[0] & SCA3000_MODE_PROT_MASK));
- st->bpse = 11;
-
-error_ret:
- mutex_unlock(&st->lock);
- return ret;
-}
-
-static const struct iio_info sca3000_info = {
- .attrs = &sca3000_attribute_group,
- .read_raw = &sca3000_read_raw,
- .event_attrs = &sca3000_event_attribute_group,
- .read_event_value = &sca3000_read_thresh,
- .write_event_value = &sca3000_write_thresh,
- .read_event_config = &sca3000_read_event_config,
- .write_event_config = &sca3000_write_event_config,
- .driver_module = THIS_MODULE,
-};
-
-static int sca3000_probe(struct spi_device *spi)
-{
- int ret;
- struct sca3000_state *st;
- struct iio_dev *indio_dev;
-
- indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
- if (!indio_dev)
- return -ENOMEM;
-
- st = iio_priv(indio_dev);
- spi_set_drvdata(spi, indio_dev);
- st->us = spi;
- mutex_init(&st->lock);
- st->info = &sca3000_spi_chip_info_tbl[spi_get_device_id(spi)
- ->driver_data];
-
- indio_dev->dev.parent = &spi->dev;
- indio_dev->name = spi_get_device_id(spi)->name;
- indio_dev->info = &sca3000_info;
- if (st->info->temp_output) {
- indio_dev->channels = sca3000_channels_with_temp;
- indio_dev->num_channels =
- ARRAY_SIZE(sca3000_channels_with_temp);
- } else {
- indio_dev->channels = sca3000_channels;
- indio_dev->num_channels = ARRAY_SIZE(sca3000_channels);
- }
- indio_dev->modes = INDIO_DIRECT_MODE;
-
- sca3000_configure_ring(indio_dev);
- ret = iio_device_register(indio_dev);
- if (ret < 0)
- return ret;
-
- if (spi->irq) {
- ret = request_threaded_irq(spi->irq,
- NULL,
- &sca3000_event_handler,
- IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
- "sca3000",
- indio_dev);
- if (ret)
- goto error_unregister_dev;
- }
- sca3000_register_ring_funcs(indio_dev);
- ret = sca3000_clean_setup(st);
- if (ret)
- goto error_free_irq;
- return 0;
-
-error_free_irq:
- if (spi->irq)
- free_irq(spi->irq, indio_dev);
-error_unregister_dev:
- iio_device_unregister(indio_dev);
- return ret;
-}
-
-static int sca3000_stop_all_interrupts(struct sca3000_state *st)
-{
- int ret;
-
- mutex_lock(&st->lock);
- ret = sca3000_read_data_short(st, SCA3000_REG_ADDR_INT_MASK, 1);
- if (ret)
- goto error_ret;
- ret = sca3000_write_reg(st, SCA3000_REG_ADDR_INT_MASK,
- (st->rx[0] &
- ~(SCA3000_INT_MASK_RING_THREE_QUARTER |
- SCA3000_INT_MASK_RING_HALF |
- SCA3000_INT_MASK_ALL_INTS)));
-error_ret:
- mutex_unlock(&st->lock);
- return ret;
-}
-
-static int sca3000_remove(struct spi_device *spi)
-{
- struct iio_dev *indio_dev = spi_get_drvdata(spi);
- struct sca3000_state *st = iio_priv(indio_dev);
-
- /* Must ensure no interrupts can be generated after this! */
- sca3000_stop_all_interrupts(st);
- if (spi->irq)
- free_irq(spi->irq, indio_dev);
- iio_device_unregister(indio_dev);
- sca3000_unconfigure_ring(indio_dev);
-
- return 0;
-}
-
-static const struct spi_device_id sca3000_id[] = {
- {"sca3000_d01", d01},
- {"sca3000_e02", e02},
- {"sca3000_e04", e04},
- {"sca3000_e05", e05},
- {}
-};
-MODULE_DEVICE_TABLE(spi, sca3000_id);
-
-static struct spi_driver sca3000_driver = {
- .driver = {
- .name = "sca3000",
- .owner = THIS_MODULE,
- },
- .probe = sca3000_probe,
- .remove = sca3000_remove,
- .id_table = sca3000_id,
-};
-module_spi_driver(sca3000_driver);
-
-MODULE_AUTHOR("Jonathan Cameron <jic23@kernel.org>");
-MODULE_DESCRIPTION("VTI SCA3000 Series Accelerometers SPI driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/iio/accel/sca3000_ring.c b/drivers/staging/iio/accel/sca3000_ring.c
deleted file mode 100644
index 23685e74917e..000000000000
--- a/drivers/staging/iio/accel/sca3000_ring.c
+++ /dev/null
@@ -1,350 +0,0 @@
-/*
- * sca3000_ring.c -- support VTI sca3000 series accelerometers via SPI
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * Copyright (c) 2009 Jonathan Cameron <jic23@kernel.org>
- *
- */
-
-#include <linux/interrupt.h>
-#include <linux/fs.h>
-#include <linux/slab.h>
-#include <linux/kernel.h>
-#include <linux/spi/spi.h>
-#include <linux/sysfs.h>
-#include <linux/sched.h>
-#include <linux/poll.h>
-
-#include <linux/iio/iio.h>
-#include <linux/iio/sysfs.h>
-#include <linux/iio/buffer.h>
-#include "../ring_hw.h"
-#include "sca3000.h"
-
-/* RFC / future work
- *
- * The internal ring buffer doesn't actually change what it holds depending
- * on which signals are enabled etc, merely whether you can read them.
- * As such the scan mode selection is somewhat different than for a software
- * ring buffer and changing it actually covers any data already in the buffer.
- * Currently scan elements aren't configured so it doesn't matter.
- */
-
-static int sca3000_read_data(struct sca3000_state *st,
- uint8_t reg_address_high,
- u8 **rx_p,
- int len)
-{
- int ret;
- struct spi_transfer xfer[2] = {
- {
- .len = 1,
- .tx_buf = st->tx,
- }, {
- .len = len,
- }
- };
- *rx_p = kmalloc(len, GFP_KERNEL);
- if (*rx_p == NULL) {
- ret = -ENOMEM;
- goto error_ret;
- }
- xfer[1].rx_buf = *rx_p;
- st->tx[0] = SCA3000_READ_REG(reg_address_high);
- ret = spi_sync_transfer(st->us, xfer, ARRAY_SIZE(xfer));
- if (ret) {
- dev_err(get_device(&st->us->dev), "problem reading register");
- goto error_free_rx;
- }
-
- return 0;
-error_free_rx:
- kfree(*rx_p);
-error_ret:
- return ret;
-}
-
-/**
- * sca3000_read_first_n_hw_rb() - main ring access, pulls data from ring
- * @r: the ring
- * @count: number of samples to try and pull
- * @data: output the actual samples pulled from the hw ring
- *
- * Currently does not provide timestamps. As the hardware doesn't add them they
- * can only be inferred approximately from ring buffer events such as 50% full
- * and knowledge of when buffer was last emptied. This is left to userspace.
- **/
-static int sca3000_read_first_n_hw_rb(struct iio_buffer *r,
- size_t count, char __user *buf)
-{
- struct iio_hw_buffer *hw_ring = iio_to_hw_buf(r);
- struct iio_dev *indio_dev = hw_ring->private;
- struct sca3000_state *st = iio_priv(indio_dev);
- u8 *rx;
- int ret, i, num_available, num_read = 0;
- int bytes_per_sample = 1;
-
- if (st->bpse == 11)
- bytes_per_sample = 2;
-
- mutex_lock(&st->lock);
- if (count % bytes_per_sample) {
- ret = -EINVAL;
- goto error_ret;
- }
-
- ret = sca3000_read_data_short(st, SCA3000_REG_ADDR_BUF_COUNT, 1);
- if (ret)
- goto error_ret;
- else
- num_available = st->rx[0];
- /*
- * num_available is the total number of samples available
- * i.e. number of time points * number of channels.
- */
- if (count > num_available * bytes_per_sample)
- num_read = num_available*bytes_per_sample;
- else
- num_read = count;
-
- ret = sca3000_read_data(st,
- SCA3000_REG_ADDR_RING_OUT,
- &rx, num_read);
- if (ret)
- goto error_ret;
-
- for (i = 0; i < num_read; i++)
- *(((u16 *)rx) + i) = be16_to_cpup((__be16 *)rx + i);
-
- if (copy_to_user(buf, rx, num_read))
- ret = -EFAULT;
- kfree(rx);
- r->stufftoread = 0;
-error_ret:
- mutex_unlock(&st->lock);
-
- return ret ? ret : num_read;
-}
-
-static size_t sca3000_ring_buf_data_available(struct iio_buffer *r)
-{
- return r->stufftoread ? r->watermark : 0;
-}
-
-/**
- * sca3000_query_ring_int() is the hardware ring status interrupt enabled
- **/
-static ssize_t sca3000_query_ring_int(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
- int ret, val;
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct sca3000_state *st = iio_priv(indio_dev);
-
- mutex_lock(&st->lock);
- ret = sca3000_read_data_short(st, SCA3000_REG_ADDR_INT_MASK, 1);
- val = st->rx[0];
- mutex_unlock(&st->lock);
- if (ret)
- return ret;
-
- return sprintf(buf, "%d\n", !!(val & this_attr->address));
-}
-
-/**
- * sca3000_set_ring_int() set state of ring status interrupt
- **/
-static ssize_t sca3000_set_ring_int(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t len)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct sca3000_state *st = iio_priv(indio_dev);
- struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
- u8 val;
- int ret;
-
- mutex_lock(&st->lock);
- ret = kstrtou8(buf, 10, &val);
- if (ret)
- goto error_ret;
- ret = sca3000_read_data_short(st, SCA3000_REG_ADDR_INT_MASK, 1);
- if (ret)
- goto error_ret;
- if (val)
- ret = sca3000_write_reg(st,
- SCA3000_REG_ADDR_INT_MASK,
- st->rx[0] | this_attr->address);
- else
- ret = sca3000_write_reg(st,
- SCA3000_REG_ADDR_INT_MASK,
- st->rx[0] & ~this_attr->address);
-error_ret:
- mutex_unlock(&st->lock);
-
- return ret ? ret : len;
-}
-
-static IIO_DEVICE_ATTR(50_percent, S_IRUGO | S_IWUSR,
- sca3000_query_ring_int,
- sca3000_set_ring_int,
- SCA3000_INT_MASK_RING_HALF);
-
-static IIO_DEVICE_ATTR(75_percent, S_IRUGO | S_IWUSR,
- sca3000_query_ring_int,
- sca3000_set_ring_int,
- SCA3000_INT_MASK_RING_THREE_QUARTER);
-
-static ssize_t sca3000_show_buffer_scale(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct sca3000_state *st = iio_priv(indio_dev);
-
- return sprintf(buf, "0.%06d\n", 4*st->info->scale);
-}
-
-static IIO_DEVICE_ATTR(in_accel_scale,
- S_IRUGO,
- sca3000_show_buffer_scale,
- NULL,
- 0);
-
-/*
- * Ring buffer attributes
- * This device is a bit unusual in that the sampling frequency and bpse
- * only apply to the ring buffer. At all times full rate and accuracy
- * is available via direct reading from registers.
- */
-static const struct attribute *sca3000_ring_attributes[] = {
- &iio_dev_attr_50_percent.dev_attr.attr,
- &iio_dev_attr_75_percent.dev_attr.attr,
- &iio_dev_attr_in_accel_scale.dev_attr.attr,
- NULL,
-};
-
-static struct iio_buffer *sca3000_rb_allocate(struct iio_dev *indio_dev)
-{
- struct iio_buffer *buf;
- struct iio_hw_buffer *ring;
-
- ring = kzalloc(sizeof(*ring), GFP_KERNEL);
- if (!ring)
- return NULL;
-
- ring->private = indio_dev;
- buf = &ring->buf;
- buf->stufftoread = 0;
- buf->length = 64;
- buf->attrs = sca3000_ring_attributes;
- iio_buffer_init(buf);
-
- return buf;
-}
-
-static void sca3000_ring_release(struct iio_buffer *r)
-{
- kfree(iio_to_hw_buf(r));
-}
-
-static const struct iio_buffer_access_funcs sca3000_ring_access_funcs = {
- .read_first_n = &sca3000_read_first_n_hw_rb,
- .data_available = sca3000_ring_buf_data_available,
- .release = sca3000_ring_release,
-
- .modes = INDIO_BUFFER_HARDWARE,
-};
-
-int sca3000_configure_ring(struct iio_dev *indio_dev)
-{
- struct iio_buffer *buffer;
-
- buffer = sca3000_rb_allocate(indio_dev);
- if (buffer == NULL)
- return -ENOMEM;
- indio_dev->modes |= INDIO_BUFFER_HARDWARE;
-
- indio_dev->buffer->access = &sca3000_ring_access_funcs;
-
- iio_device_attach_buffer(indio_dev, buffer);
-
- return 0;
-}
-
-void sca3000_unconfigure_ring(struct iio_dev *indio_dev)
-{
- iio_buffer_put(indio_dev->buffer);
-}
-
-static inline
-int __sca3000_hw_ring_state_set(struct iio_dev *indio_dev, bool state)
-{
- struct sca3000_state *st = iio_priv(indio_dev);
- int ret;
-
- mutex_lock(&st->lock);
- ret = sca3000_read_data_short(st, SCA3000_REG_ADDR_MODE, 1);
- if (ret)
- goto error_ret;
- if (state) {
- dev_info(&indio_dev->dev, "supposedly enabling ring buffer\n");
- ret = sca3000_write_reg(st,
- SCA3000_REG_ADDR_MODE,
- (st->rx[0] | SCA3000_RING_BUF_ENABLE));
- } else
- ret = sca3000_write_reg(st,
- SCA3000_REG_ADDR_MODE,
- (st->rx[0] & ~SCA3000_RING_BUF_ENABLE));
-error_ret:
- mutex_unlock(&st->lock);
-
- return ret;
-}
-/**
- * sca3000_hw_ring_preenable() hw ring buffer preenable function
- *
- * Very simple enable function as the chip will allows normal reads
- * during ring buffer operation so as long as it is indeed running
- * before we notify the core, the precise ordering does not matter.
- **/
-static int sca3000_hw_ring_preenable(struct iio_dev *indio_dev)
-{
- return __sca3000_hw_ring_state_set(indio_dev, 1);
-}
-
-static int sca3000_hw_ring_postdisable(struct iio_dev *indio_dev)
-{
- return __sca3000_hw_ring_state_set(indio_dev, 0);
-}
-
-static const struct iio_buffer_setup_ops sca3000_ring_setup_ops = {
- .preenable = &sca3000_hw_ring_preenable,
- .postdisable = &sca3000_hw_ring_postdisable,
-};
-
-void sca3000_register_ring_funcs(struct iio_dev *indio_dev)
-{
- indio_dev->setup_ops = &sca3000_ring_setup_ops;
-}
-
-/**
- * sca3000_ring_int_process() ring specific interrupt handling.
- *
- * This is only split from the main interrupt handler so as to
- * reduce the amount of code if the ring buffer is not enabled.
- **/
-void sca3000_ring_int_process(u8 val, struct iio_buffer *ring)
-{
- if (val & (SCA3000_INT_STATUS_THREE_QUARTERS |
- SCA3000_INT_STATUS_HALF)) {
- ring->stufftoread = true;
- wake_up_interruptible(&ring->pollq);
- }
-}
diff --git a/drivers/staging/iio/adc/Kconfig b/drivers/staging/iio/adc/Kconfig
index 94ae4232ee77..2f0d6cf048d2 100644
--- a/drivers/staging/iio/adc/Kconfig
+++ b/drivers/staging/iio/adc/Kconfig
@@ -1,54 +1,9 @@
+# SPDX-License-Identifier: GPL-2.0
#
# ADC drivers
#
menu "Analog to digital converters"
-config AD7606
- tristate "Analog Devices AD7606 ADC driver"
- depends on GPIOLIB || COMPILE_TEST
- select IIO_BUFFER
- select IIO_TRIGGERED_BUFFER
- help
- Say yes here to build support for Analog Devices:
- ad7606, ad7606-6, ad7606-4 analog to digital converters (ADC).
-
- To compile this driver as a module, choose M here: the
- module will be called ad7606.
-
-config AD7606_IFACE_PARALLEL
- tristate "parallel interface support"
- depends on AD7606
- help
- Say yes here to include parallel interface support on the AD7606
- ADC driver.
-
- To compile this driver as a module, choose M here: the
- module will be called ad7606_iface_parallel.
-
-config AD7606_IFACE_SPI
- tristate "spi interface support"
- depends on AD7606
- depends on SPI
- help
- Say yes here to include parallel interface support on the AD7606
- ADC driver.
-
- To compile this driver as a module, choose M here: the
- module will be called ad7606_iface_spi.
-
-config AD7780
- tristate "Analog Devices AD7780 and similar ADCs driver"
- depends on SPI
- depends on GPIOLIB || COMPILE_TEST
- select AD_SIGMA_DELTA
- help
- Say yes here to build support for Analog Devices AD7170, AD7171,
- AD7780 and AD7781 SPI analog to digital converters (ADC).
- If unsure, say N (but it's safe to say "Y").
-
- To compile this driver as a module, choose M here: the
- module will be called ad7780.
-
config AD7816
tristate "Analog Devices AD7816/7/8 temperature sensor and ADC driver"
depends on SPI
@@ -57,62 +12,7 @@ config AD7816
Say yes here to build support for Analog Devices AD7816/7/8
temperature sensors and ADC.
-config AD7192
- tristate "Analog Devices AD7190 AD7192 AD7195 ADC driver"
- depends on SPI
- select AD_SIGMA_DELTA
- help
- Say yes here to build support for Analog Devices AD7190,
- AD7192 or AD7195 SPI analog to digital converters (ADC).
- If unsure, say N (but it's safe to say "Y").
-
- To compile this driver as a module, choose M here: the
- module will be called ad7192.
-
-config AD7280
- tristate "Analog Devices AD7280A Lithium Ion Battery Monitoring System"
- depends on SPI
- help
- Say yes here to build support for Analog Devices AD7280A
- Lithium Ion Battery Monitoring System.
-
To compile this driver as a module, choose M here: the
- module will be called ad7280a
+ module will be called ad7816.
-config LPC32XX_ADC
- tristate "NXP LPC32XX ADC"
- depends on ARCH_LPC32XX || COMPILE_TEST
- depends on HAS_IOMEM
- help
- Say yes here to build support for the integrated ADC inside the
- LPC32XX SoC. Note that this feature uses the same hardware as the
- touchscreen driver, so you should either select only one of the two
- drivers (lpc32xx_adc or lpc32xx_ts) or, in the OpenFirmware case,
- activate only one via device tree selection. Provides direct access
- via sysfs.
-
-config MXS_LRADC
- tristate "Freescale i.MX23/i.MX28 LRADC"
- depends on (ARCH_MXS || COMPILE_TEST) && HAS_IOMEM
- depends on INPUT
- select STMP_DEVICE
- select IIO_BUFFER
- select IIO_TRIGGERED_BUFFER
- help
- Say yes here to build support for i.MX23/i.MX28 LRADC convertor
- built into these chips.
-
- To compile this driver as a module, choose M here: the
- module will be called mxs-lradc.
-
-config SPEAR_ADC
- tristate "ST SPEAr ADC"
- depends on PLAT_SPEAR || COMPILE_TEST
- depends on HAS_IOMEM
- help
- Say yes here to build support for the integrated ADC inside the
- ST SPEAr SoC. Provides direct access via sysfs.
-
- To compile this driver as a module, choose M here: the
- module will be called spear_adc.
endmenu
diff --git a/drivers/staging/iio/adc/Makefile b/drivers/staging/iio/adc/Makefile
index 1c4277dbd318..1e2a94c4db84 100644
--- a/drivers/staging/iio/adc/Makefile
+++ b/drivers/staging/iio/adc/Makefile
@@ -1,17 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for industrial I/O ADC drivers
#
-ad7606-y := ad7606_core.o
-ad7606-$(CONFIG_IIO_BUFFER) += ad7606_ring.o
-ad7606-$(CONFIG_AD7606_IFACE_PARALLEL) += ad7606_par.o
-ad7606-$(CONFIG_AD7606_IFACE_SPI) += ad7606_spi.o
-obj-$(CONFIG_AD7606) += ad7606.o
-
-obj-$(CONFIG_AD7780) += ad7780.o
obj-$(CONFIG_AD7816) += ad7816.o
-obj-$(CONFIG_AD7192) += ad7192.o
-obj-$(CONFIG_AD7280) += ad7280a.o
-obj-$(CONFIG_LPC32XX_ADC) += lpc32xx_adc.o
-obj-$(CONFIG_MXS_LRADC) += mxs-lradc.o
-obj-$(CONFIG_SPEAR_ADC) += spear_adc.o
diff --git a/drivers/staging/iio/adc/ad7192.c b/drivers/staging/iio/adc/ad7192.c
deleted file mode 100644
index fe56fb6c7d30..000000000000
--- a/drivers/staging/iio/adc/ad7192.c
+++ /dev/null
@@ -1,720 +0,0 @@
-/*
- * AD7190 AD7192 AD7195 SPI ADC driver
- *
- * Copyright 2011-2012 Analog Devices Inc.
- *
- * Licensed under the GPL-2.
- */
-
-#include <linux/interrupt.h>
-#include <linux/device.h>
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/sysfs.h>
-#include <linux/spi/spi.h>
-#include <linux/regulator/consumer.h>
-#include <linux/err.h>
-#include <linux/sched.h>
-#include <linux/delay.h>
-
-#include <linux/iio/iio.h>
-#include <linux/iio/sysfs.h>
-#include <linux/iio/buffer.h>
-#include <linux/iio/trigger.h>
-#include <linux/iio/trigger_consumer.h>
-#include <linux/iio/triggered_buffer.h>
-#include <linux/iio/adc/ad_sigma_delta.h>
-
-#include "ad7192.h"
-
-/* Registers */
-#define AD7192_REG_COMM 0 /* Communications Register (WO, 8-bit) */
-#define AD7192_REG_STAT 0 /* Status Register (RO, 8-bit) */
-#define AD7192_REG_MODE 1 /* Mode Register (RW, 24-bit */
-#define AD7192_REG_CONF 2 /* Configuration Register (RW, 24-bit) */
-#define AD7192_REG_DATA 3 /* Data Register (RO, 24/32-bit) */
-#define AD7192_REG_ID 4 /* ID Register (RO, 8-bit) */
-#define AD7192_REG_GPOCON 5 /* GPOCON Register (RO, 8-bit) */
-#define AD7192_REG_OFFSET 6 /* Offset Register (RW, 16-bit
- * (AD7792)/24-bit (AD7192)) */
-#define AD7192_REG_FULLSALE 7 /* Full-Scale Register
- * (RW, 16-bit (AD7792)/24-bit (AD7192)) */
-
-/* Communications Register Bit Designations (AD7192_REG_COMM) */
-#define AD7192_COMM_WEN BIT(7) /* Write Enable */
-#define AD7192_COMM_WRITE 0 /* Write Operation */
-#define AD7192_COMM_READ BIT(6) /* Read Operation */
-#define AD7192_COMM_ADDR(x) (((x) & 0x7) << 3) /* Register Address */
-#define AD7192_COMM_CREAD BIT(2) /* Continuous Read of Data Register */
-
-/* Status Register Bit Designations (AD7192_REG_STAT) */
-#define AD7192_STAT_RDY BIT(7) /* Ready */
-#define AD7192_STAT_ERR BIT(6) /* Error (Overrange, Underrange) */
-#define AD7192_STAT_NOREF BIT(5) /* Error no external reference */
-#define AD7192_STAT_PARITY BIT(4) /* Parity */
-#define AD7192_STAT_CH3 BIT(2) /* Channel 3 */
-#define AD7192_STAT_CH2 BIT(1) /* Channel 2 */
-#define AD7192_STAT_CH1 BIT(0) /* Channel 1 */
-
-/* Mode Register Bit Designations (AD7192_REG_MODE) */
-#define AD7192_MODE_SEL(x) (((x) & 0x7) << 21) /* Operation Mode Select */
-#define AD7192_MODE_SEL_MASK (0x7 << 21) /* Operation Mode Select Mask */
-#define AD7192_MODE_DAT_STA BIT(20) /* Status Register transmission */
-#define AD7192_MODE_CLKSRC(x) (((x) & 0x3) << 18) /* Clock Source Select */
-#define AD7192_MODE_SINC3 BIT(15) /* SINC3 Filter Select */
-#define AD7192_MODE_ACX BIT(14) /* AC excitation enable(AD7195 only)*/
-#define AD7192_MODE_ENPAR BIT(13) /* Parity Enable */
-#define AD7192_MODE_CLKDIV BIT(12) /* Clock divide by 2 (AD7190/2 only)*/
-#define AD7192_MODE_SCYCLE BIT(11) /* Single cycle conversion */
-#define AD7192_MODE_REJ60 BIT(10) /* 50/60Hz notch filter */
-#define AD7192_MODE_RATE(x) ((x) & 0x3FF) /* Filter Update Rate Select */
-
-/* Mode Register: AD7192_MODE_SEL options */
-#define AD7192_MODE_CONT 0 /* Continuous Conversion Mode */
-#define AD7192_MODE_SINGLE 1 /* Single Conversion Mode */
-#define AD7192_MODE_IDLE 2 /* Idle Mode */
-#define AD7192_MODE_PWRDN 3 /* Power-Down Mode */
-#define AD7192_MODE_CAL_INT_ZERO 4 /* Internal Zero-Scale Calibration */
-#define AD7192_MODE_CAL_INT_FULL 5 /* Internal Full-Scale Calibration */
-#define AD7192_MODE_CAL_SYS_ZERO 6 /* System Zero-Scale Calibration */
-#define AD7192_MODE_CAL_SYS_FULL 7 /* System Full-Scale Calibration */
-
-/* Mode Register: AD7192_MODE_CLKSRC options */
-#define AD7192_CLK_EXT_MCLK1_2 0 /* External 4.92 MHz Clock connected
- * from MCLK1 to MCLK2 */
-#define AD7192_CLK_EXT_MCLK2 1 /* External Clock applied to MCLK2 */
-#define AD7192_CLK_INT 2 /* Internal 4.92 MHz Clock not
- * available at the MCLK2 pin */
-#define AD7192_CLK_INT_CO 3 /* Internal 4.92 MHz Clock available
- * at the MCLK2 pin */
-
-
-/* Configuration Register Bit Designations (AD7192_REG_CONF) */
-
-#define AD7192_CONF_CHOP BIT(23) /* CHOP enable */
-#define AD7192_CONF_REFSEL BIT(20) /* REFIN1/REFIN2 Reference Select */
-#define AD7192_CONF_CHAN(x) (((1 << (x)) & 0xFF) << 8) /* Channel select */
-#define AD7192_CONF_CHAN_MASK (0xFF << 8) /* Channel select mask */
-#define AD7192_CONF_BURN BIT(7) /* Burnout current enable */
-#define AD7192_CONF_REFDET BIT(6) /* Reference detect enable */
-#define AD7192_CONF_BUF BIT(4) /* Buffered Mode Enable */
-#define AD7192_CONF_UNIPOLAR BIT(3) /* Unipolar/Bipolar Enable */
-#define AD7192_CONF_GAIN(x) ((x) & 0x7) /* Gain Select */
-
-#define AD7192_CH_AIN1P_AIN2M 0 /* AIN1(+) - AIN2(-) */
-#define AD7192_CH_AIN3P_AIN4M 1 /* AIN3(+) - AIN4(-) */
-#define AD7192_CH_TEMP 2 /* Temp Sensor */
-#define AD7192_CH_AIN2P_AIN2M 3 /* AIN2(+) - AIN2(-) */
-#define AD7192_CH_AIN1 4 /* AIN1 - AINCOM */
-#define AD7192_CH_AIN2 5 /* AIN2 - AINCOM */
-#define AD7192_CH_AIN3 6 /* AIN3 - AINCOM */
-#define AD7192_CH_AIN4 7 /* AIN4 - AINCOM */
-
-/* ID Register Bit Designations (AD7192_REG_ID) */
-#define ID_AD7190 0x4
-#define ID_AD7192 0x0
-#define ID_AD7195 0x6
-#define AD7192_ID_MASK 0x0F
-
-/* GPOCON Register Bit Designations (AD7192_REG_GPOCON) */
-#define AD7192_GPOCON_BPDSW BIT(6) /* Bridge power-down switch enable */
-#define AD7192_GPOCON_GP32EN BIT(5) /* Digital Output P3 and P2 enable */
-#define AD7192_GPOCON_GP10EN BIT(4) /* Digital Output P1 and P0 enable */
-#define AD7192_GPOCON_P3DAT BIT(3) /* P3 state */
-#define AD7192_GPOCON_P2DAT BIT(2) /* P2 state */
-#define AD7192_GPOCON_P1DAT BIT(1) /* P1 state */
-#define AD7192_GPOCON_P0DAT BIT(0) /* P0 state */
-
-#define AD7192_INT_FREQ_MHz 4915200
-
-/* NOTE:
- * The AD7190/2/5 features a dual use data out ready DOUT/RDY output.
- * In order to avoid contentions on the SPI bus, it's therefore necessary
- * to use spi bus locking.
- *
- * The DOUT/RDY output must also be wired to an interrupt capable GPIO.
- */
-
-struct ad7192_state {
- struct regulator *reg;
- u16 int_vref_mv;
- u32 mclk;
- u32 f_order;
- u32 mode;
- u32 conf;
- u32 scale_avail[8][2];
- u8 gpocon;
- u8 devid;
-
- struct ad_sigma_delta sd;
-};
-
-static struct ad7192_state *ad_sigma_delta_to_ad7192(struct ad_sigma_delta *sd)
-{
- return container_of(sd, struct ad7192_state, sd);
-}
-
-static int ad7192_set_channel(struct ad_sigma_delta *sd, unsigned int channel)
-{
- struct ad7192_state *st = ad_sigma_delta_to_ad7192(sd);
-
- st->conf &= ~AD7192_CONF_CHAN_MASK;
- st->conf |= AD7192_CONF_CHAN(channel);
-
- return ad_sd_write_reg(&st->sd, AD7192_REG_CONF, 3, st->conf);
-}
-
-static int ad7192_set_mode(struct ad_sigma_delta *sd,
- enum ad_sigma_delta_mode mode)
-{
- struct ad7192_state *st = ad_sigma_delta_to_ad7192(sd);
-
- st->mode &= ~AD7192_MODE_SEL_MASK;
- st->mode |= AD7192_MODE_SEL(mode);
-
- return ad_sd_write_reg(&st->sd, AD7192_REG_MODE, 3, st->mode);
-}
-
-static const struct ad_sigma_delta_info ad7192_sigma_delta_info = {
- .set_channel = ad7192_set_channel,
- .set_mode = ad7192_set_mode,
- .has_registers = true,
- .addr_shift = 3,
- .read_mask = BIT(6),
-};
-
-static const struct ad_sd_calib_data ad7192_calib_arr[8] = {
- {AD7192_MODE_CAL_INT_ZERO, AD7192_CH_AIN1},
- {AD7192_MODE_CAL_INT_FULL, AD7192_CH_AIN1},
- {AD7192_MODE_CAL_INT_ZERO, AD7192_CH_AIN2},
- {AD7192_MODE_CAL_INT_FULL, AD7192_CH_AIN2},
- {AD7192_MODE_CAL_INT_ZERO, AD7192_CH_AIN3},
- {AD7192_MODE_CAL_INT_FULL, AD7192_CH_AIN3},
- {AD7192_MODE_CAL_INT_ZERO, AD7192_CH_AIN4},
- {AD7192_MODE_CAL_INT_FULL, AD7192_CH_AIN4}
-};
-
-static int ad7192_calibrate_all(struct ad7192_state *st)
-{
- return ad_sd_calibrate_all(&st->sd, ad7192_calib_arr,
- ARRAY_SIZE(ad7192_calib_arr));
-}
-
-static int ad7192_setup(struct ad7192_state *st,
- const struct ad7192_platform_data *pdata)
-{
- struct iio_dev *indio_dev = spi_get_drvdata(st->sd.spi);
- unsigned long long scale_uv;
- int i, ret, id;
- u8 ones[6];
-
- /* reset the serial interface */
- memset(&ones, 0xFF, 6);
- ret = spi_write(st->sd.spi, &ones, 6);
- if (ret < 0)
- goto out;
- usleep_range(500, 1000); /* Wait for at least 500us */
-
- /* write/read test for device presence */
- ret = ad_sd_read_reg(&st->sd, AD7192_REG_ID, 1, &id);
- if (ret)
- goto out;
-
- id &= AD7192_ID_MASK;
-
- if (id != st->devid)
- dev_warn(&st->sd.spi->dev, "device ID query failed (0x%X)\n",
- id);
-
- switch (pdata->clock_source_sel) {
- case AD7192_CLK_EXT_MCLK1_2:
- case AD7192_CLK_EXT_MCLK2:
- st->mclk = AD7192_INT_FREQ_MHz;
- break;
- case AD7192_CLK_INT:
- case AD7192_CLK_INT_CO:
- if (pdata->ext_clk_Hz)
- st->mclk = pdata->ext_clk_Hz;
- else
- st->mclk = AD7192_INT_FREQ_MHz;
- break;
- default:
- ret = -EINVAL;
- goto out;
- }
-
- st->mode = AD7192_MODE_SEL(AD7192_MODE_IDLE) |
- AD7192_MODE_CLKSRC(pdata->clock_source_sel) |
- AD7192_MODE_RATE(480);
-
- st->conf = AD7192_CONF_GAIN(0);
-
- if (pdata->rej60_en)
- st->mode |= AD7192_MODE_REJ60;
-
- if (pdata->sinc3_en)
- st->mode |= AD7192_MODE_SINC3;
-
- if (pdata->refin2_en && (st->devid != ID_AD7195))
- st->conf |= AD7192_CONF_REFSEL;
-
- if (pdata->chop_en) {
- st->conf |= AD7192_CONF_CHOP;
- if (pdata->sinc3_en)
- st->f_order = 3; /* SINC 3rd order */
- else
- st->f_order = 4; /* SINC 4th order */
- } else {
- st->f_order = 1;
- }
-
- if (pdata->buf_en)
- st->conf |= AD7192_CONF_BUF;
-
- if (pdata->unipolar_en)
- st->conf |= AD7192_CONF_UNIPOLAR;
-
- if (pdata->burnout_curr_en)
- st->conf |= AD7192_CONF_BURN;
-
- ret = ad_sd_write_reg(&st->sd, AD7192_REG_MODE, 3, st->mode);
- if (ret)
- goto out;
-
- ret = ad_sd_write_reg(&st->sd, AD7192_REG_CONF, 3, st->conf);
- if (ret)
- goto out;
-
- ret = ad7192_calibrate_all(st);
- if (ret)
- goto out;
-
- /* Populate available ADC input ranges */
- for (i = 0; i < ARRAY_SIZE(st->scale_avail); i++) {
- scale_uv = ((u64)st->int_vref_mv * 100000000)
- >> (indio_dev->channels[0].scan_type.realbits -
- ((st->conf & AD7192_CONF_UNIPOLAR) ? 0 : 1));
- scale_uv >>= i;
-
- st->scale_avail[i][1] = do_div(scale_uv, 100000000) * 10;
- st->scale_avail[i][0] = scale_uv;
- }
-
- return 0;
-out:
- dev_err(&st->sd.spi->dev, "setup failed\n");
- return ret;
-}
-
-static ssize_t ad7192_read_frequency(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct ad7192_state *st = iio_priv(indio_dev);
-
- return sprintf(buf, "%d\n", st->mclk /
- (st->f_order * 1024 * AD7192_MODE_RATE(st->mode)));
-}
-
-static ssize_t ad7192_write_frequency(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t len)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct ad7192_state *st = iio_priv(indio_dev);
- unsigned long lval;
- int div, ret;
-
- ret = kstrtoul(buf, 10, &lval);
- if (ret)
- return ret;
- if (lval == 0)
- return -EINVAL;
-
- mutex_lock(&indio_dev->mlock);
- if (iio_buffer_enabled(indio_dev)) {
- mutex_unlock(&indio_dev->mlock);
- return -EBUSY;
- }
-
- div = st->mclk / (lval * st->f_order * 1024);
- if (div < 1 || div > 1023) {
- ret = -EINVAL;
- goto out;
- }
-
- st->mode &= ~AD7192_MODE_RATE(-1);
- st->mode |= AD7192_MODE_RATE(div);
- ad_sd_write_reg(&st->sd, AD7192_REG_MODE, 3, st->mode);
-
-out:
- mutex_unlock(&indio_dev->mlock);
-
- return ret ? ret : len;
-}
-
-static IIO_DEV_ATTR_SAMP_FREQ(S_IWUSR | S_IRUGO,
- ad7192_read_frequency,
- ad7192_write_frequency);
-
-static ssize_t ad7192_show_scale_available(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct ad7192_state *st = iio_priv(indio_dev);
- int i, len = 0;
-
- for (i = 0; i < ARRAY_SIZE(st->scale_avail); i++)
- len += sprintf(buf + len, "%d.%09u ", st->scale_avail[i][0],
- st->scale_avail[i][1]);
-
- len += sprintf(buf + len, "\n");
-
- return len;
-}
-
-static IIO_DEVICE_ATTR_NAMED(in_v_m_v_scale_available,
- in_voltage-voltage_scale_available,
- S_IRUGO, ad7192_show_scale_available, NULL, 0);
-
-static IIO_DEVICE_ATTR(in_voltage_scale_available, S_IRUGO,
- ad7192_show_scale_available, NULL, 0);
-
-static ssize_t ad7192_show_ac_excitation(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct ad7192_state *st = iio_priv(indio_dev);
-
- return sprintf(buf, "%d\n", !!(st->mode & AD7192_MODE_ACX));
-}
-
-static ssize_t ad7192_show_bridge_switch(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct ad7192_state *st = iio_priv(indio_dev);
-
- return sprintf(buf, "%d\n", !!(st->gpocon & AD7192_GPOCON_BPDSW));
-}
-
-static ssize_t ad7192_set(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t len)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct ad7192_state *st = iio_priv(indio_dev);
- struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
- int ret;
- bool val;
-
- ret = strtobool(buf, &val);
- if (ret < 0)
- return ret;
-
- mutex_lock(&indio_dev->mlock);
- if (iio_buffer_enabled(indio_dev)) {
- mutex_unlock(&indio_dev->mlock);
- return -EBUSY;
- }
-
- switch ((u32) this_attr->address) {
- case AD7192_REG_GPOCON:
- if (val)
- st->gpocon |= AD7192_GPOCON_BPDSW;
- else
- st->gpocon &= ~AD7192_GPOCON_BPDSW;
-
- ad_sd_write_reg(&st->sd, AD7192_REG_GPOCON, 1, st->gpocon);
- break;
- case AD7192_REG_MODE:
- if (val)
- st->mode |= AD7192_MODE_ACX;
- else
- st->mode &= ~AD7192_MODE_ACX;
-
- ad_sd_write_reg(&st->sd, AD7192_REG_MODE, 3, st->mode);
- break;
- default:
- ret = -EINVAL;
- }
-
- mutex_unlock(&indio_dev->mlock);
-
- return ret ? ret : len;
-}
-
-static IIO_DEVICE_ATTR(bridge_switch_en, S_IRUGO | S_IWUSR,
- ad7192_show_bridge_switch, ad7192_set,
- AD7192_REG_GPOCON);
-
-static IIO_DEVICE_ATTR(ac_excitation_en, S_IRUGO | S_IWUSR,
- ad7192_show_ac_excitation, ad7192_set,
- AD7192_REG_MODE);
-
-static struct attribute *ad7192_attributes[] = {
- &iio_dev_attr_sampling_frequency.dev_attr.attr,
- &iio_dev_attr_in_v_m_v_scale_available.dev_attr.attr,
- &iio_dev_attr_in_voltage_scale_available.dev_attr.attr,
- &iio_dev_attr_bridge_switch_en.dev_attr.attr,
- &iio_dev_attr_ac_excitation_en.dev_attr.attr,
- NULL
-};
-
-static const struct attribute_group ad7192_attribute_group = {
- .attrs = ad7192_attributes,
-};
-
-static struct attribute *ad7195_attributes[] = {
- &iio_dev_attr_sampling_frequency.dev_attr.attr,
- &iio_dev_attr_in_v_m_v_scale_available.dev_attr.attr,
- &iio_dev_attr_in_voltage_scale_available.dev_attr.attr,
- &iio_dev_attr_bridge_switch_en.dev_attr.attr,
- NULL
-};
-
-static const struct attribute_group ad7195_attribute_group = {
- .attrs = ad7195_attributes,
-};
-
-static unsigned int ad7192_get_temp_scale(bool unipolar)
-{
- return unipolar ? 2815 * 2 : 2815;
-}
-
-static int ad7192_read_raw(struct iio_dev *indio_dev,
- struct iio_chan_spec const *chan,
- int *val,
- int *val2,
- long m)
-{
- struct ad7192_state *st = iio_priv(indio_dev);
- bool unipolar = !!(st->conf & AD7192_CONF_UNIPOLAR);
-
- switch (m) {
- case IIO_CHAN_INFO_RAW:
- return ad_sigma_delta_single_conversion(indio_dev, chan, val);
- case IIO_CHAN_INFO_SCALE:
- switch (chan->type) {
- case IIO_VOLTAGE:
- mutex_lock(&indio_dev->mlock);
- *val = st->scale_avail[AD7192_CONF_GAIN(st->conf)][0];
- *val2 = st->scale_avail[AD7192_CONF_GAIN(st->conf)][1];
- mutex_unlock(&indio_dev->mlock);
- return IIO_VAL_INT_PLUS_NANO;
- case IIO_TEMP:
- *val = 0;
- *val2 = 1000000000 / ad7192_get_temp_scale(unipolar);
- return IIO_VAL_INT_PLUS_NANO;
- default:
- return -EINVAL;
- }
- case IIO_CHAN_INFO_OFFSET:
- if (!unipolar)
- *val = -(1 << (chan->scan_type.realbits - 1));
- else
- *val = 0;
- /* Kelvin to Celsius */
- if (chan->type == IIO_TEMP)
- *val -= 273 * ad7192_get_temp_scale(unipolar);
- return IIO_VAL_INT;
- }
-
- return -EINVAL;
-}
-
-static int ad7192_write_raw(struct iio_dev *indio_dev,
- struct iio_chan_spec const *chan,
- int val,
- int val2,
- long mask)
-{
- struct ad7192_state *st = iio_priv(indio_dev);
- int ret, i;
- unsigned int tmp;
-
- mutex_lock(&indio_dev->mlock);
- if (iio_buffer_enabled(indio_dev)) {
- mutex_unlock(&indio_dev->mlock);
- return -EBUSY;
- }
-
- switch (mask) {
- case IIO_CHAN_INFO_SCALE:
- ret = -EINVAL;
- for (i = 0; i < ARRAY_SIZE(st->scale_avail); i++)
- if (val2 == st->scale_avail[i][1]) {
- ret = 0;
- tmp = st->conf;
- st->conf &= ~AD7192_CONF_GAIN(-1);
- st->conf |= AD7192_CONF_GAIN(i);
- if (tmp == st->conf)
- break;
- ad_sd_write_reg(&st->sd, AD7192_REG_CONF,
- 3, st->conf);
- ad7192_calibrate_all(st);
- break;
- }
- break;
- default:
- ret = -EINVAL;
- }
-
- mutex_unlock(&indio_dev->mlock);
-
- return ret;
-}
-
-static int ad7192_write_raw_get_fmt(struct iio_dev *indio_dev,
- struct iio_chan_spec const *chan,
- long mask)
-{
- return IIO_VAL_INT_PLUS_NANO;
-}
-
-static const struct iio_info ad7192_info = {
- .read_raw = &ad7192_read_raw,
- .write_raw = &ad7192_write_raw,
- .write_raw_get_fmt = &ad7192_write_raw_get_fmt,
- .attrs = &ad7192_attribute_group,
- .validate_trigger = ad_sd_validate_trigger,
- .driver_module = THIS_MODULE,
-};
-
-static const struct iio_info ad7195_info = {
- .read_raw = &ad7192_read_raw,
- .write_raw = &ad7192_write_raw,
- .write_raw_get_fmt = &ad7192_write_raw_get_fmt,
- .attrs = &ad7195_attribute_group,
- .validate_trigger = ad_sd_validate_trigger,
- .driver_module = THIS_MODULE,
-};
-
-static const struct iio_chan_spec ad7192_channels[] = {
- AD_SD_DIFF_CHANNEL(0, 1, 2, AD7192_CH_AIN1P_AIN2M, 24, 32, 0),
- AD_SD_DIFF_CHANNEL(1, 3, 4, AD7192_CH_AIN3P_AIN4M, 24, 32, 0),
- AD_SD_TEMP_CHANNEL(2, AD7192_CH_TEMP, 24, 32, 0),
- AD_SD_SHORTED_CHANNEL(3, 2, AD7192_CH_AIN2P_AIN2M, 24, 32, 0),
- AD_SD_CHANNEL(4, 1, AD7192_CH_AIN1, 24, 32, 0),
- AD_SD_CHANNEL(5, 2, AD7192_CH_AIN2, 24, 32, 0),
- AD_SD_CHANNEL(6, 3, AD7192_CH_AIN3, 24, 32, 0),
- AD_SD_CHANNEL(7, 4, AD7192_CH_AIN4, 24, 32, 0),
- IIO_CHAN_SOFT_TIMESTAMP(8),
-};
-
-static int ad7192_probe(struct spi_device *spi)
-{
- const struct ad7192_platform_data *pdata = spi->dev.platform_data;
- struct ad7192_state *st;
- struct iio_dev *indio_dev;
- int ret, voltage_uv = 0;
-
- if (!pdata) {
- dev_err(&spi->dev, "no platform data?\n");
- return -ENODEV;
- }
-
- if (!spi->irq) {
- dev_err(&spi->dev, "no IRQ?\n");
- return -ENODEV;
- }
-
- indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
- if (indio_dev == NULL)
- return -ENOMEM;
-
- st = iio_priv(indio_dev);
-
- st->reg = devm_regulator_get(&spi->dev, "vcc");
- if (!IS_ERR(st->reg)) {
- ret = regulator_enable(st->reg);
- if (ret)
- return ret;
-
- voltage_uv = regulator_get_voltage(st->reg);
- }
-
- if (pdata && pdata->vref_mv)
- st->int_vref_mv = pdata->vref_mv;
- else if (voltage_uv)
- st->int_vref_mv = voltage_uv / 1000;
- else
- dev_warn(&spi->dev, "reference voltage undefined\n");
-
- spi_set_drvdata(spi, indio_dev);
- st->devid = spi_get_device_id(spi)->driver_data;
- indio_dev->dev.parent = &spi->dev;
- indio_dev->name = spi_get_device_id(spi)->name;
- indio_dev->modes = INDIO_DIRECT_MODE;
- indio_dev->channels = ad7192_channels;
- indio_dev->num_channels = ARRAY_SIZE(ad7192_channels);
- if (st->devid == ID_AD7195)
- indio_dev->info = &ad7195_info;
- else
- indio_dev->info = &ad7192_info;
-
- ad_sd_init(&st->sd, indio_dev, spi, &ad7192_sigma_delta_info);
-
- ret = ad_sd_setup_buffer_and_trigger(indio_dev);
- if (ret)
- goto error_disable_reg;
-
- ret = ad7192_setup(st, pdata);
- if (ret)
- goto error_remove_trigger;
-
- ret = iio_device_register(indio_dev);
- if (ret < 0)
- goto error_remove_trigger;
- return 0;
-
-error_remove_trigger:
- ad_sd_cleanup_buffer_and_trigger(indio_dev);
-error_disable_reg:
- if (!IS_ERR(st->reg))
- regulator_disable(st->reg);
-
- return ret;
-}
-
-static int ad7192_remove(struct spi_device *spi)
-{
- struct iio_dev *indio_dev = spi_get_drvdata(spi);
- struct ad7192_state *st = iio_priv(indio_dev);
-
- iio_device_unregister(indio_dev);
- ad_sd_cleanup_buffer_and_trigger(indio_dev);
-
- if (!IS_ERR(st->reg))
- regulator_disable(st->reg);
-
- return 0;
-}
-
-static const struct spi_device_id ad7192_id[] = {
- {"ad7190", ID_AD7190},
- {"ad7192", ID_AD7192},
- {"ad7195", ID_AD7195},
- {}
-};
-MODULE_DEVICE_TABLE(spi, ad7192_id);
-
-static struct spi_driver ad7192_driver = {
- .driver = {
- .name = "ad7192",
- .owner = THIS_MODULE,
- },
- .probe = ad7192_probe,
- .remove = ad7192_remove,
- .id_table = ad7192_id,
-};
-module_spi_driver(ad7192_driver);
-
-MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
-MODULE_DESCRIPTION("Analog Devices AD7190, AD7192, AD7195 ADC");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/iio/adc/ad7192.h b/drivers/staging/iio/adc/ad7192.h
deleted file mode 100644
index a0a5b61a41f1..000000000000
--- a/drivers/staging/iio/adc/ad7192.h
+++ /dev/null
@@ -1,47 +0,0 @@
-/*
- * AD7190 AD7192 AD7195 SPI ADC driver
- *
- * Copyright 2011 Analog Devices Inc.
- *
- * Licensed under the GPL-2.
- */
-#ifndef IIO_ADC_AD7192_H_
-#define IIO_ADC_AD7192_H_
-
-/*
- * TODO: struct ad7192_platform_data needs to go into include/linux/iio
- */
-
-/**
- * struct ad7192_platform_data - platform/board specific information
- * @vref_mv: the external reference voltage in millivolt
- * @clock_source_sel: [0..3]
- * 0 External 4.92 MHz clock connected from MCLK1 to MCLK2
- * 1 External Clock applied to MCLK2
- * 2 Internal 4.92 MHz Clock not available at the MCLK2 pin
- * 3 Internal 4.92 MHz Clock available at the MCLK2 pin
- * @ext_clk_Hz: the external clock frequency in Hz, if not set
- * the driver uses the internal clock (16.776 MHz)
- * @refin2_en: REFIN1/REFIN2 Reference Select (AD7190/2 only)
- * @rej60_en: 50/60Hz notch filter enable
- * @sinc3_en: SINC3 filter enable (default SINC4)
- * @chop_en: CHOP mode enable
- * @buf_en: buffered input mode enable
- * @unipolar_en: unipolar mode enable
- * @burnout_curr_en: constant current generators on AIN(+|-) enable
- */
-
-struct ad7192_platform_data {
- u16 vref_mv;
- u8 clock_source_sel;
- u32 ext_clk_Hz;
- bool refin2_en;
- bool rej60_en;
- bool sinc3_en;
- bool chop_en;
- bool buf_en;
- bool unipolar_en;
- bool burnout_curr_en;
-};
-
-#endif /* IIO_ADC_AD7192_H_ */
diff --git a/drivers/staging/iio/adc/ad7280a.c b/drivers/staging/iio/adc/ad7280a.c
deleted file mode 100644
index d98e229c46bf..000000000000
--- a/drivers/staging/iio/adc/ad7280a.c
+++ /dev/null
@@ -1,985 +0,0 @@
-/*
- * AD7280A Lithium Ion Battery Monitoring System
- *
- * Copyright 2011 Analog Devices Inc.
- *
- * Licensed under the GPL-2.
- */
-
-#include <linux/device.h>
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/sysfs.h>
-#include <linux/spi/spi.h>
-#include <linux/err.h>
-#include <linux/delay.h>
-#include <linux/interrupt.h>
-#include <linux/module.h>
-
-#include <linux/iio/iio.h>
-#include <linux/iio/sysfs.h>
-#include <linux/iio/events.h>
-
-#include "ad7280a.h"
-
-/* Registers */
-#define AD7280A_CELL_VOLTAGE_1 0x0 /* D11 to D0, Read only */
-#define AD7280A_CELL_VOLTAGE_2 0x1 /* D11 to D0, Read only */
-#define AD7280A_CELL_VOLTAGE_3 0x2 /* D11 to D0, Read only */
-#define AD7280A_CELL_VOLTAGE_4 0x3 /* D11 to D0, Read only */
-#define AD7280A_CELL_VOLTAGE_5 0x4 /* D11 to D0, Read only */
-#define AD7280A_CELL_VOLTAGE_6 0x5 /* D11 to D0, Read only */
-#define AD7280A_AUX_ADC_1 0x6 /* D11 to D0, Read only */
-#define AD7280A_AUX_ADC_2 0x7 /* D11 to D0, Read only */
-#define AD7280A_AUX_ADC_3 0x8 /* D11 to D0, Read only */
-#define AD7280A_AUX_ADC_4 0x9 /* D11 to D0, Read only */
-#define AD7280A_AUX_ADC_5 0xA /* D11 to D0, Read only */
-#define AD7280A_AUX_ADC_6 0xB /* D11 to D0, Read only */
-#define AD7280A_SELF_TEST 0xC /* D11 to D0, Read only */
-#define AD7280A_CONTROL_HB 0xD /* D15 to D8, Read/write */
-#define AD7280A_CONTROL_LB 0xE /* D7 to D0, Read/write */
-#define AD7280A_CELL_OVERVOLTAGE 0xF /* D7 to D0, Read/write */
-#define AD7280A_CELL_UNDERVOLTAGE 0x10 /* D7 to D0, Read/write */
-#define AD7280A_AUX_ADC_OVERVOLTAGE 0x11 /* D7 to D0, Read/write */
-#define AD7280A_AUX_ADC_UNDERVOLTAGE 0x12 /* D7 to D0, Read/write */
-#define AD7280A_ALERT 0x13 /* D7 to D0, Read/write */
-#define AD7280A_CELL_BALANCE 0x14 /* D7 to D0, Read/write */
-#define AD7280A_CB1_TIMER 0x15 /* D7 to D0, Read/write */
-#define AD7280A_CB2_TIMER 0x16 /* D7 to D0, Read/write */
-#define AD7280A_CB3_TIMER 0x17 /* D7 to D0, Read/write */
-#define AD7280A_CB4_TIMER 0x18 /* D7 to D0, Read/write */
-#define AD7280A_CB5_TIMER 0x19 /* D7 to D0, Read/write */
-#define AD7280A_CB6_TIMER 0x1A /* D7 to D0, Read/write */
-#define AD7280A_PD_TIMER 0x1B /* D7 to D0, Read/write */
-#define AD7280A_READ 0x1C /* D7 to D0, Read/write */
-#define AD7280A_CNVST_CONTROL 0x1D /* D7 to D0, Read/write */
-
-/* Bits and Masks */
-#define AD7280A_CTRL_HB_CONV_INPUT_ALL 0
-#define AD7280A_CTRL_HB_CONV_INPUT_6CELL_AUX1_3_4 BIT(6)
-#define AD7280A_CTRL_HB_CONV_INPUT_6CELL BIT(7)
-#define AD7280A_CTRL_HB_CONV_INPUT_SELF_TEST (BIT(7) | BIT(6))
-#define AD7280A_CTRL_HB_CONV_RES_READ_ALL 0
-#define AD7280A_CTRL_HB_CONV_RES_READ_6CELL_AUX1_3_4 BIT(4)
-#define AD7280A_CTRL_HB_CONV_RES_READ_6CELL BIT(5)
-#define AD7280A_CTRL_HB_CONV_RES_READ_NO (BIT(5) | BIT(4))
-#define AD7280A_CTRL_HB_CONV_START_CNVST 0
-#define AD7280A_CTRL_HB_CONV_START_CS BIT(3)
-#define AD7280A_CTRL_HB_CONV_AVG_DIS 0
-#define AD7280A_CTRL_HB_CONV_AVG_2 BIT(1)
-#define AD7280A_CTRL_HB_CONV_AVG_4 BIT(2)
-#define AD7280A_CTRL_HB_CONV_AVG_8 (BIT(2) | BIT(1))
-#define AD7280A_CTRL_HB_CONV_AVG(x) ((x) << 1)
-#define AD7280A_CTRL_HB_PWRDN_SW BIT(0)
-
-#define AD7280A_CTRL_LB_SWRST BIT(7)
-#define AD7280A_CTRL_LB_ACQ_TIME_400ns 0
-#define AD7280A_CTRL_LB_ACQ_TIME_800ns BIT(5)
-#define AD7280A_CTRL_LB_ACQ_TIME_1200ns BIT(6)
-#define AD7280A_CTRL_LB_ACQ_TIME_1600ns (BIT(6) | BIT(5))
-#define AD7280A_CTRL_LB_ACQ_TIME(x) ((x) << 5)
-#define AD7280A_CTRL_LB_MUST_SET BIT(4)
-#define AD7280A_CTRL_LB_THERMISTOR_EN BIT(3)
-#define AD7280A_CTRL_LB_LOCK_DEV_ADDR BIT(2)
-#define AD7280A_CTRL_LB_INC_DEV_ADDR BIT(1)
-#define AD7280A_CTRL_LB_DAISY_CHAIN_RB_EN BIT(0)
-
-#define AD7280A_ALERT_GEN_STATIC_HIGH BIT(6)
-#define AD7280A_ALERT_RELAY_SIG_CHAIN_DOWN (BIT(7) | BIT(6))
-
-#define AD7280A_ALL_CELLS (0xAD << 16)
-
-#define AD7280A_MAX_SPI_CLK_Hz 700000 /* < 1MHz */
-#define AD7280A_MAX_CHAIN 8
-#define AD7280A_CELLS_PER_DEV 6
-#define AD7280A_BITS 12
-#define AD7280A_NUM_CH (AD7280A_AUX_ADC_6 - \
- AD7280A_CELL_VOLTAGE_1 + 1)
-
-#define AD7280A_DEVADDR_MASTER 0
-#define AD7280A_DEVADDR_ALL 0x1F
-/* 5-bit device address is sent LSB first */
-#define AD7280A_DEVADDR(addr) (((addr & 0x1) << 4) | ((addr & 0x2) << 3) | \
- (addr & 0x4) | ((addr & 0x8) >> 3) | \
- ((addr & 0x10) >> 4))
-
-/* During a read a valid write is mandatory.
- * So writing to the highest available address (Address 0x1F)
- * and setting the address all parts bit to 0 is recommended
- * So the TXVAL is AD7280A_DEVADDR_ALL + CRC
- */
-#define AD7280A_READ_TXVAL 0xF800030A
-
-/*
- * AD7280 CRC
- *
- * P(x) = x^8 + x^5 + x^3 + x^2 + x^1 + x^0 = 0b100101111 => 0x2F
- */
-#define POLYNOM 0x2F
-#define POLYNOM_ORDER 8
-#define HIGHBIT (1 << (POLYNOM_ORDER - 1))
-
-struct ad7280_state {
- struct spi_device *spi;
- struct iio_chan_spec *channels;
- struct iio_dev_attr *iio_attr;
- int slave_num;
- int scan_cnt;
- int readback_delay_us;
- unsigned char crc_tab[256];
- unsigned char ctrl_hb;
- unsigned char ctrl_lb;
- unsigned char cell_threshhigh;
- unsigned char cell_threshlow;
- unsigned char aux_threshhigh;
- unsigned char aux_threshlow;
- unsigned char cb_mask[AD7280A_MAX_CHAIN];
-
- __be32 buf[2] ____cacheline_aligned;
-};
-
-static void ad7280_crc8_build_table(unsigned char *crc_tab)
-{
- unsigned char bit, crc;
- int cnt, i;
-
- for (cnt = 0; cnt < 256; cnt++) {
- crc = cnt;
- for (i = 0; i < 8; i++) {
- bit = crc & HIGHBIT;
- crc <<= 1;
- if (bit)
- crc ^= POLYNOM;
- }
- crc_tab[cnt] = crc;
- }
-}
-
-static unsigned char ad7280_calc_crc8(unsigned char *crc_tab, unsigned val)
-{
- unsigned char crc;
-
- crc = crc_tab[val >> 16 & 0xFF];
- crc = crc_tab[crc ^ (val >> 8 & 0xFF)];
-
- return crc ^ (val & 0xFF);
-}
-
-static int ad7280_check_crc(struct ad7280_state *st, unsigned val)
-{
- unsigned char crc = ad7280_calc_crc8(st->crc_tab, val >> 10);
-
- if (crc != ((val >> 2) & 0xFF))
- return -EIO;
-
- return 0;
-}
-
-/* After initiating a conversion sequence we need to wait until the
- * conversion is done. The delay is typically in the range of 15..30 us
- * however depending an the number of devices in the daisy chain and the
- * number of averages taken, conversion delays and acquisition time options
- * it may take up to 250us, in this case we better sleep instead of busy
- * wait.
- */
-
-static void ad7280_delay(struct ad7280_state *st)
-{
- if (st->readback_delay_us < 50)
- udelay(st->readback_delay_us);
- else
- usleep_range(250, 500);
-}
-
-static int __ad7280_read32(struct ad7280_state *st, unsigned *val)
-{
- int ret;
- struct spi_transfer t = {
- .tx_buf = &st->buf[0],
- .rx_buf = &st->buf[1],
- .len = 4,
- };
-
- st->buf[0] = cpu_to_be32(AD7280A_READ_TXVAL);
-
- ret = spi_sync_transfer(st->spi, &t, 1);
- if (ret)
- return ret;
-
- *val = be32_to_cpu(st->buf[1]);
-
- return 0;
-}
-
-static int ad7280_write(struct ad7280_state *st, unsigned devaddr,
- unsigned addr, bool all, unsigned val)
-{
- unsigned reg = (devaddr << 27 | addr << 21 |
- (val & 0xFF) << 13 | all << 12);
-
- reg |= ad7280_calc_crc8(st->crc_tab, reg >> 11) << 3 | 0x2;
- st->buf[0] = cpu_to_be32(reg);
-
- return spi_write(st->spi, &st->buf[0], 4);
-}
-
-static int ad7280_read(struct ad7280_state *st, unsigned devaddr,
- unsigned addr)
-{
- int ret;
- unsigned tmp;
-
- /* turns off the read operation on all parts */
- ret = ad7280_write(st, AD7280A_DEVADDR_MASTER, AD7280A_CONTROL_HB, 1,
- AD7280A_CTRL_HB_CONV_INPUT_ALL |
- AD7280A_CTRL_HB_CONV_RES_READ_NO |
- st->ctrl_hb);
- if (ret)
- return ret;
-
- /* turns on the read operation on the addressed part */
- ret = ad7280_write(st, devaddr, AD7280A_CONTROL_HB, 0,
- AD7280A_CTRL_HB_CONV_INPUT_ALL |
- AD7280A_CTRL_HB_CONV_RES_READ_ALL |
- st->ctrl_hb);
- if (ret)
- return ret;
-
- /* Set register address on the part to be read from */
- ret = ad7280_write(st, devaddr, AD7280A_READ, 0, addr << 2);
- if (ret)
- return ret;
-
- __ad7280_read32(st, &tmp);
-
- if (ad7280_check_crc(st, tmp))
- return -EIO;
-
- if (((tmp >> 27) != devaddr) || (((tmp >> 21) & 0x3F) != addr))
- return -EFAULT;
-
- return (tmp >> 13) & 0xFF;
-}
-
-static int ad7280_read_channel(struct ad7280_state *st, unsigned devaddr,
- unsigned addr)
-{
- int ret;
- unsigned tmp;
-
- ret = ad7280_write(st, devaddr, AD7280A_READ, 0, addr << 2);
- if (ret)
- return ret;
-
- ret = ad7280_write(st, AD7280A_DEVADDR_MASTER, AD7280A_CONTROL_HB, 1,
- AD7280A_CTRL_HB_CONV_INPUT_ALL |
- AD7280A_CTRL_HB_CONV_RES_READ_NO |
- st->ctrl_hb);
- if (ret)
- return ret;
-
- ret = ad7280_write(st, devaddr, AD7280A_CONTROL_HB, 0,
- AD7280A_CTRL_HB_CONV_INPUT_ALL |
- AD7280A_CTRL_HB_CONV_RES_READ_ALL |
- AD7280A_CTRL_HB_CONV_START_CS |
- st->ctrl_hb);
- if (ret)
- return ret;
-
- ad7280_delay(st);
-
- __ad7280_read32(st, &tmp);
-
- if (ad7280_check_crc(st, tmp))
- return -EIO;
-
- if (((tmp >> 27) != devaddr) || (((tmp >> 23) & 0xF) != addr))
- return -EFAULT;
-
- return (tmp >> 11) & 0xFFF;
-}
-
-static int ad7280_read_all_channels(struct ad7280_state *st, unsigned cnt,
- unsigned *array)
-{
- int i, ret;
- unsigned tmp, sum = 0;
-
- ret = ad7280_write(st, AD7280A_DEVADDR_MASTER, AD7280A_READ, 1,
- AD7280A_CELL_VOLTAGE_1 << 2);
- if (ret)
- return ret;
-
- ret = ad7280_write(st, AD7280A_DEVADDR_MASTER, AD7280A_CONTROL_HB, 1,
- AD7280A_CTRL_HB_CONV_INPUT_ALL |
- AD7280A_CTRL_HB_CONV_RES_READ_ALL |
- AD7280A_CTRL_HB_CONV_START_CS |
- st->ctrl_hb);
- if (ret)
- return ret;
-
- ad7280_delay(st);
-
- for (i = 0; i < cnt; i++) {
- __ad7280_read32(st, &tmp);
-
- if (ad7280_check_crc(st, tmp))
- return -EIO;
-
- if (array)
- array[i] = tmp;
- /* only sum cell voltages */
- if (((tmp >> 23) & 0xF) <= AD7280A_CELL_VOLTAGE_6)
- sum += ((tmp >> 11) & 0xFFF);
- }
-
- return sum;
-}
-
-static int ad7280_chain_setup(struct ad7280_state *st)
-{
- unsigned val, n;
- int ret;
-
- ret = ad7280_write(st, AD7280A_DEVADDR_MASTER, AD7280A_CONTROL_LB, 1,
- AD7280A_CTRL_LB_DAISY_CHAIN_RB_EN |
- AD7280A_CTRL_LB_LOCK_DEV_ADDR |
- AD7280A_CTRL_LB_MUST_SET |
- AD7280A_CTRL_LB_SWRST |
- st->ctrl_lb);
- if (ret)
- return ret;
-
- ret = ad7280_write(st, AD7280A_DEVADDR_MASTER, AD7280A_CONTROL_LB, 1,
- AD7280A_CTRL_LB_DAISY_CHAIN_RB_EN |
- AD7280A_CTRL_LB_LOCK_DEV_ADDR |
- AD7280A_CTRL_LB_MUST_SET |
- st->ctrl_lb);
- if (ret)
- return ret;
-
- ret = ad7280_write(st, AD7280A_DEVADDR_MASTER, AD7280A_READ, 1,
- AD7280A_CONTROL_LB << 2);
- if (ret)
- return ret;
-
- for (n = 0; n <= AD7280A_MAX_CHAIN; n++) {
- __ad7280_read32(st, &val);
- if (val == 0)
- return n - 1;
-
- if (ad7280_check_crc(st, val))
- return -EIO;
-
- if (n != AD7280A_DEVADDR(val >> 27))
- return -EIO;
- }
-
- return -EFAULT;
-}
-
-static ssize_t ad7280_show_balance_sw(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct ad7280_state *st = iio_priv(indio_dev);
- struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
-
- return sprintf(buf, "%d\n",
- !!(st->cb_mask[this_attr->address >> 8] &
- (1 << ((this_attr->address & 0xFF) + 2))));
-}
-
-static ssize_t ad7280_store_balance_sw(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t len)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct ad7280_state *st = iio_priv(indio_dev);
- struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
- bool readin;
- int ret;
- unsigned devaddr, ch;
-
- ret = strtobool(buf, &readin);
- if (ret)
- return ret;
-
- devaddr = this_attr->address >> 8;
- ch = this_attr->address & 0xFF;
-
- mutex_lock(&indio_dev->mlock);
- if (readin)
- st->cb_mask[devaddr] |= 1 << (ch + 2);
- else
- st->cb_mask[devaddr] &= ~(1 << (ch + 2));
-
- ret = ad7280_write(st, devaddr, AD7280A_CELL_BALANCE,
- 0, st->cb_mask[devaddr]);
- mutex_unlock(&indio_dev->mlock);
-
- return ret ? ret : len;
-}
-
-static ssize_t ad7280_show_balance_timer(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct ad7280_state *st = iio_priv(indio_dev);
- struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
- int ret;
- unsigned msecs;
-
- mutex_lock(&indio_dev->mlock);
- ret = ad7280_read(st, this_attr->address >> 8,
- this_attr->address & 0xFF);
- mutex_unlock(&indio_dev->mlock);
-
- if (ret < 0)
- return ret;
-
- msecs = (ret >> 3) * 71500;
-
- return sprintf(buf, "%u\n", msecs);
-}
-
-static ssize_t ad7280_store_balance_timer(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t len)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct ad7280_state *st = iio_priv(indio_dev);
- struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
- unsigned long val;
- int ret;
-
- ret = kstrtoul(buf, 10, &val);
- if (ret)
- return ret;
-
- val /= 71500;
-
- if (val > 31)
- return -EINVAL;
-
- mutex_lock(&indio_dev->mlock);
- ret = ad7280_write(st, this_attr->address >> 8,
- this_attr->address & 0xFF,
- 0, (val & 0x1F) << 3);
- mutex_unlock(&indio_dev->mlock);
-
- return ret ? ret : len;
-}
-
-static struct attribute *ad7280_attributes[AD7280A_MAX_CHAIN *
- AD7280A_CELLS_PER_DEV * 2 + 1];
-
-static struct attribute_group ad7280_attrs_group = {
- .attrs = ad7280_attributes,
-};
-
-static int ad7280_channel_init(struct ad7280_state *st)
-{
- int dev, ch, cnt;
-
- st->channels = kcalloc((st->slave_num + 1) * 12 + 2,
- sizeof(*st->channels), GFP_KERNEL);
- if (st->channels == NULL)
- return -ENOMEM;
-
- for (dev = 0, cnt = 0; dev <= st->slave_num; dev++)
- for (ch = AD7280A_CELL_VOLTAGE_1; ch <= AD7280A_AUX_ADC_6; ch++,
- cnt++) {
- if (ch < AD7280A_AUX_ADC_1) {
- st->channels[cnt].type = IIO_VOLTAGE;
- st->channels[cnt].differential = 1;
- st->channels[cnt].channel = (dev * 6) + ch;
- st->channels[cnt].channel2 =
- st->channels[cnt].channel + 1;
- } else {
- st->channels[cnt].type = IIO_TEMP;
- st->channels[cnt].channel = (dev * 6) + ch - 6;
- }
- st->channels[cnt].indexed = 1;
- st->channels[cnt].info_mask_separate =
- BIT(IIO_CHAN_INFO_RAW);
- st->channels[cnt].info_mask_shared_by_type =
- BIT(IIO_CHAN_INFO_SCALE);
- st->channels[cnt].address =
- AD7280A_DEVADDR(dev) << 8 | ch;
- st->channels[cnt].scan_index = cnt;
- st->channels[cnt].scan_type.sign = 'u';
- st->channels[cnt].scan_type.realbits = 12;
- st->channels[cnt].scan_type.storagebits = 32;
- st->channels[cnt].scan_type.shift = 0;
- }
-
- st->channels[cnt].type = IIO_VOLTAGE;
- st->channels[cnt].differential = 1;
- st->channels[cnt].channel = 0;
- st->channels[cnt].channel2 = dev * 6;
- st->channels[cnt].address = AD7280A_ALL_CELLS;
- st->channels[cnt].indexed = 1;
- st->channels[cnt].info_mask_separate = BIT(IIO_CHAN_INFO_RAW);
- st->channels[cnt].info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE);
- st->channels[cnt].scan_index = cnt;
- st->channels[cnt].scan_type.sign = 'u';
- st->channels[cnt].scan_type.realbits = 32;
- st->channels[cnt].scan_type.storagebits = 32;
- st->channels[cnt].scan_type.shift = 0;
- cnt++;
- st->channels[cnt].type = IIO_TIMESTAMP;
- st->channels[cnt].channel = -1;
- st->channels[cnt].scan_index = cnt;
- st->channels[cnt].scan_type.sign = 's';
- st->channels[cnt].scan_type.realbits = 64;
- st->channels[cnt].scan_type.storagebits = 64;
- st->channels[cnt].scan_type.shift = 0;
-
- return cnt + 1;
-}
-
-static int ad7280_attr_init(struct ad7280_state *st)
-{
- int dev, ch, cnt;
-
- st->iio_attr = kcalloc(2, sizeof(*st->iio_attr) *
- (st->slave_num + 1) * AD7280A_CELLS_PER_DEV,
- GFP_KERNEL);
- if (st->iio_attr == NULL)
- return -ENOMEM;
-
- for (dev = 0, cnt = 0; dev <= st->slave_num; dev++)
- for (ch = AD7280A_CELL_VOLTAGE_1; ch <= AD7280A_CELL_VOLTAGE_6;
- ch++, cnt++) {
- st->iio_attr[cnt].address =
- AD7280A_DEVADDR(dev) << 8 | ch;
- st->iio_attr[cnt].dev_attr.attr.mode =
- S_IWUSR | S_IRUGO;
- st->iio_attr[cnt].dev_attr.show =
- ad7280_show_balance_sw;
- st->iio_attr[cnt].dev_attr.store =
- ad7280_store_balance_sw;
- st->iio_attr[cnt].dev_attr.attr.name =
- kasprintf(GFP_KERNEL,
- "in%d-in%d_balance_switch_en",
- (dev * AD7280A_CELLS_PER_DEV) + ch,
- (dev * AD7280A_CELLS_PER_DEV) + ch + 1);
- ad7280_attributes[cnt] =
- &st->iio_attr[cnt].dev_attr.attr;
- cnt++;
- st->iio_attr[cnt].address =
- AD7280A_DEVADDR(dev) << 8 |
- (AD7280A_CB1_TIMER + ch);
- st->iio_attr[cnt].dev_attr.attr.mode =
- S_IWUSR | S_IRUGO;
- st->iio_attr[cnt].dev_attr.show =
- ad7280_show_balance_timer;
- st->iio_attr[cnt].dev_attr.store =
- ad7280_store_balance_timer;
- st->iio_attr[cnt].dev_attr.attr.name =
- kasprintf(GFP_KERNEL, "in%d-in%d_balance_timer",
- (dev * AD7280A_CELLS_PER_DEV) + ch,
- (dev * AD7280A_CELLS_PER_DEV) + ch + 1);
- ad7280_attributes[cnt] =
- &st->iio_attr[cnt].dev_attr.attr;
- }
-
- ad7280_attributes[cnt] = NULL;
-
- return 0;
-}
-
-static ssize_t ad7280_read_channel_config(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct ad7280_state *st = iio_priv(indio_dev);
- struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
- unsigned val;
-
- switch ((u32) this_attr->address) {
- case AD7280A_CELL_OVERVOLTAGE:
- val = 1000 + (st->cell_threshhigh * 1568) / 100;
- break;
- case AD7280A_CELL_UNDERVOLTAGE:
- val = 1000 + (st->cell_threshlow * 1568) / 100;
- break;
- case AD7280A_AUX_ADC_OVERVOLTAGE:
- val = (st->aux_threshhigh * 196) / 10;
- break;
- case AD7280A_AUX_ADC_UNDERVOLTAGE:
- val = (st->aux_threshlow * 196) / 10;
- break;
- default:
- return -EINVAL;
- }
-
- return sprintf(buf, "%u\n", val);
-}
-
-static ssize_t ad7280_write_channel_config(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t len)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct ad7280_state *st = iio_priv(indio_dev);
- struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
-
- long val;
- int ret;
-
- ret = kstrtol(buf, 10, &val);
- if (ret)
- return ret;
-
- switch ((u32) this_attr->address) {
- case AD7280A_CELL_OVERVOLTAGE:
- case AD7280A_CELL_UNDERVOLTAGE:
- val = ((val - 1000) * 100) / 1568; /* LSB 15.68mV */
- break;
- case AD7280A_AUX_ADC_OVERVOLTAGE:
- case AD7280A_AUX_ADC_UNDERVOLTAGE:
- val = (val * 10) / 196; /* LSB 19.6mV */
- break;
- default:
- return -EFAULT;
- }
-
- val = clamp(val, 0L, 0xFFL);
-
- mutex_lock(&indio_dev->mlock);
- switch ((u32) this_attr->address) {
- case AD7280A_CELL_OVERVOLTAGE:
- st->cell_threshhigh = val;
- break;
- case AD7280A_CELL_UNDERVOLTAGE:
- st->cell_threshlow = val;
- break;
- case AD7280A_AUX_ADC_OVERVOLTAGE:
- st->aux_threshhigh = val;
- break;
- case AD7280A_AUX_ADC_UNDERVOLTAGE:
- st->aux_threshlow = val;
- break;
- }
-
- ret = ad7280_write(st, AD7280A_DEVADDR_MASTER,
- this_attr->address, 1, val);
-
- mutex_unlock(&indio_dev->mlock);
-
- return ret ? ret : len;
-}
-
-static irqreturn_t ad7280_event_handler(int irq, void *private)
-{
- struct iio_dev *indio_dev = private;
- struct ad7280_state *st = iio_priv(indio_dev);
- unsigned *channels;
- int i, ret;
-
- channels = kcalloc(st->scan_cnt, sizeof(*channels), GFP_KERNEL);
- if (channels == NULL)
- return IRQ_HANDLED;
-
- ret = ad7280_read_all_channels(st, st->scan_cnt, channels);
- if (ret < 0)
- goto out;
-
- for (i = 0; i < st->scan_cnt; i++) {
- if (((channels[i] >> 23) & 0xF) <= AD7280A_CELL_VOLTAGE_6) {
- if (((channels[i] >> 11) & 0xFFF) >=
- st->cell_threshhigh)
- iio_push_event(indio_dev,
- IIO_EVENT_CODE(IIO_VOLTAGE,
- 1,
- 0,
- IIO_EV_DIR_RISING,
- IIO_EV_TYPE_THRESH,
- 0, 0, 0),
- iio_get_time_ns());
- else if (((channels[i] >> 11) & 0xFFF) <=
- st->cell_threshlow)
- iio_push_event(indio_dev,
- IIO_EVENT_CODE(IIO_VOLTAGE,
- 1,
- 0,
- IIO_EV_DIR_FALLING,
- IIO_EV_TYPE_THRESH,
- 0, 0, 0),
- iio_get_time_ns());
- } else {
- if (((channels[i] >> 11) & 0xFFF) >= st->aux_threshhigh)
- iio_push_event(indio_dev,
- IIO_UNMOD_EVENT_CODE(IIO_TEMP,
- 0,
- IIO_EV_TYPE_THRESH,
- IIO_EV_DIR_RISING),
- iio_get_time_ns());
- else if (((channels[i] >> 11) & 0xFFF) <=
- st->aux_threshlow)
- iio_push_event(indio_dev,
- IIO_UNMOD_EVENT_CODE(IIO_TEMP,
- 0,
- IIO_EV_TYPE_THRESH,
- IIO_EV_DIR_FALLING),
- iio_get_time_ns());
- }
- }
-
-out:
- kfree(channels);
-
- return IRQ_HANDLED;
-}
-
-static IIO_DEVICE_ATTR_NAMED(in_thresh_low_value,
- in_voltage-voltage_thresh_low_value,
- S_IRUGO | S_IWUSR,
- ad7280_read_channel_config,
- ad7280_write_channel_config,
- AD7280A_CELL_UNDERVOLTAGE);
-
-static IIO_DEVICE_ATTR_NAMED(in_thresh_high_value,
- in_voltage-voltage_thresh_high_value,
- S_IRUGO | S_IWUSR,
- ad7280_read_channel_config,
- ad7280_write_channel_config,
- AD7280A_CELL_OVERVOLTAGE);
-
-static IIO_DEVICE_ATTR(in_temp_thresh_low_value,
- S_IRUGO | S_IWUSR,
- ad7280_read_channel_config,
- ad7280_write_channel_config,
- AD7280A_AUX_ADC_UNDERVOLTAGE);
-
-static IIO_DEVICE_ATTR(in_temp_thresh_high_value,
- S_IRUGO | S_IWUSR,
- ad7280_read_channel_config,
- ad7280_write_channel_config,
- AD7280A_AUX_ADC_OVERVOLTAGE);
-
-
-static struct attribute *ad7280_event_attributes[] = {
- &iio_dev_attr_in_thresh_low_value.dev_attr.attr,
- &iio_dev_attr_in_thresh_high_value.dev_attr.attr,
- &iio_dev_attr_in_temp_thresh_low_value.dev_attr.attr,
- &iio_dev_attr_in_temp_thresh_high_value.dev_attr.attr,
- NULL,
-};
-
-static struct attribute_group ad7280_event_attrs_group = {
- .attrs = ad7280_event_attributes,
-};
-
-static int ad7280_read_raw(struct iio_dev *indio_dev,
- struct iio_chan_spec const *chan,
- int *val,
- int *val2,
- long m)
-{
- struct ad7280_state *st = iio_priv(indio_dev);
- int ret;
-
- switch (m) {
- case IIO_CHAN_INFO_RAW:
- mutex_lock(&indio_dev->mlock);
- if (chan->address == AD7280A_ALL_CELLS)
- ret = ad7280_read_all_channels(st, st->scan_cnt, NULL);
- else
- ret = ad7280_read_channel(st, chan->address >> 8,
- chan->address & 0xFF);
- mutex_unlock(&indio_dev->mlock);
-
- if (ret < 0)
- return ret;
-
- *val = ret;
-
- return IIO_VAL_INT;
- case IIO_CHAN_INFO_SCALE:
- if ((chan->address & 0xFF) <= AD7280A_CELL_VOLTAGE_6)
- *val = 4000;
- else
- *val = 5000;
-
- *val2 = AD7280A_BITS;
- return IIO_VAL_FRACTIONAL_LOG2;
- }
- return -EINVAL;
-}
-
-static const struct iio_info ad7280_info = {
- .read_raw = &ad7280_read_raw,
- .event_attrs = &ad7280_event_attrs_group,
- .attrs = &ad7280_attrs_group,
- .driver_module = THIS_MODULE,
-};
-
-static const struct ad7280_platform_data ad7793_default_pdata = {
- .acquisition_time = AD7280A_ACQ_TIME_400ns,
- .conversion_averaging = AD7280A_CONV_AVG_DIS,
- .thermistor_term_en = true,
-};
-
-static int ad7280_probe(struct spi_device *spi)
-{
- const struct ad7280_platform_data *pdata = spi->dev.platform_data;
- struct ad7280_state *st;
- int ret;
- const unsigned short tACQ_ns[4] = {465, 1010, 1460, 1890};
- const unsigned short nAVG[4] = {1, 2, 4, 8};
- struct iio_dev *indio_dev;
-
- indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
- if (indio_dev == NULL)
- return -ENOMEM;
-
- st = iio_priv(indio_dev);
- spi_set_drvdata(spi, indio_dev);
- st->spi = spi;
-
- if (!pdata)
- pdata = &ad7793_default_pdata;
-
- ad7280_crc8_build_table(st->crc_tab);
-
- st->spi->max_speed_hz = AD7280A_MAX_SPI_CLK_Hz;
- st->spi->mode = SPI_MODE_1;
- spi_setup(st->spi);
-
- st->ctrl_lb = AD7280A_CTRL_LB_ACQ_TIME(pdata->acquisition_time & 0x3);
- st->ctrl_hb = AD7280A_CTRL_HB_CONV_AVG(pdata->conversion_averaging
- & 0x3) | (pdata->thermistor_term_en ?
- AD7280A_CTRL_LB_THERMISTOR_EN : 0);
-
- ret = ad7280_chain_setup(st);
- if (ret < 0)
- return ret;
-
- st->slave_num = ret;
- st->scan_cnt = (st->slave_num + 1) * AD7280A_NUM_CH;
- st->cell_threshhigh = 0xFF;
- st->aux_threshhigh = 0xFF;
-
- /*
- * Total Conversion Time = ((tACQ + tCONV) *
- * (Number of Conversions per Part)) −
- * tACQ + ((N - 1) * tDELAY)
- *
- * Readback Delay = Total Conversion Time + tWAIT
- */
-
- st->readback_delay_us =
- ((tACQ_ns[pdata->acquisition_time & 0x3] + 695) *
- (AD7280A_NUM_CH * nAVG[pdata->conversion_averaging & 0x3]))
- - tACQ_ns[pdata->acquisition_time & 0x3] +
- st->slave_num * 250;
-
- /* Convert to usecs */
- st->readback_delay_us = DIV_ROUND_UP(st->readback_delay_us, 1000);
- st->readback_delay_us += 5; /* Add tWAIT */
-
- indio_dev->name = spi_get_device_id(spi)->name;
- indio_dev->dev.parent = &spi->dev;
- indio_dev->modes = INDIO_DIRECT_MODE;
-
- ret = ad7280_channel_init(st);
- if (ret < 0)
- return ret;
-
- indio_dev->num_channels = ret;
- indio_dev->channels = st->channels;
- indio_dev->info = &ad7280_info;
-
- ret = ad7280_attr_init(st);
- if (ret < 0)
- goto error_free_channels;
-
- ret = iio_device_register(indio_dev);
- if (ret)
- goto error_free_attr;
-
- if (spi->irq > 0) {
- ret = ad7280_write(st, AD7280A_DEVADDR_MASTER,
- AD7280A_ALERT, 1,
- AD7280A_ALERT_RELAY_SIG_CHAIN_DOWN);
- if (ret)
- goto error_unregister;
-
- ret = ad7280_write(st, AD7280A_DEVADDR(st->slave_num),
- AD7280A_ALERT, 0,
- AD7280A_ALERT_GEN_STATIC_HIGH |
- (pdata->chain_last_alert_ignore & 0xF));
- if (ret)
- goto error_unregister;
-
- ret = request_threaded_irq(spi->irq,
- NULL,
- ad7280_event_handler,
- IRQF_TRIGGER_FALLING |
- IRQF_ONESHOT,
- indio_dev->name,
- indio_dev);
- if (ret)
- goto error_unregister;
- }
-
- return 0;
-error_unregister:
- iio_device_unregister(indio_dev);
-
-error_free_attr:
- kfree(st->iio_attr);
-
-error_free_channels:
- kfree(st->channels);
-
- return ret;
-}
-
-static int ad7280_remove(struct spi_device *spi)
-{
- struct iio_dev *indio_dev = spi_get_drvdata(spi);
- struct ad7280_state *st = iio_priv(indio_dev);
-
- if (spi->irq > 0)
- free_irq(spi->irq, indio_dev);
- iio_device_unregister(indio_dev);
-
- ad7280_write(st, AD7280A_DEVADDR_MASTER, AD7280A_CONTROL_HB, 1,
- AD7280A_CTRL_HB_PWRDN_SW | st->ctrl_hb);
-
- kfree(st->channels);
- kfree(st->iio_attr);
-
- return 0;
-}
-
-static const struct spi_device_id ad7280_id[] = {
- {"ad7280a", 0},
- {}
-};
-MODULE_DEVICE_TABLE(spi, ad7280_id);
-
-static struct spi_driver ad7280_driver = {
- .driver = {
- .name = "ad7280",
- .owner = THIS_MODULE,
- },
- .probe = ad7280_probe,
- .remove = ad7280_remove,
- .id_table = ad7280_id,
-};
-module_spi_driver(ad7280_driver);
-
-MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
-MODULE_DESCRIPTION("Analog Devices AD7280A");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/iio/adc/ad7280a.h b/drivers/staging/iio/adc/ad7280a.h
deleted file mode 100644
index 732347a9bce4..000000000000
--- a/drivers/staging/iio/adc/ad7280a.h
+++ /dev/null
@@ -1,38 +0,0 @@
-/*
- * AD7280A Lithium Ion Battery Monitoring System
- *
- * Copyright 2011 Analog Devices Inc.
- *
- * Licensed under the GPL-2.
- */
-
-#ifndef IIO_ADC_AD7280_H_
-#define IIO_ADC_AD7280_H_
-
-/*
- * TODO: struct ad7280_platform_data needs to go into include/linux/iio
- */
-
-#define AD7280A_ACQ_TIME_400ns 0
-#define AD7280A_ACQ_TIME_800ns 1
-#define AD7280A_ACQ_TIME_1200ns 2
-#define AD7280A_ACQ_TIME_1600ns 3
-
-#define AD7280A_CONV_AVG_DIS 0
-#define AD7280A_CONV_AVG_2 1
-#define AD7280A_CONV_AVG_4 2
-#define AD7280A_CONV_AVG_8 3
-
-#define AD7280A_ALERT_REMOVE_VIN5 BIT(2)
-#define AD7280A_ALERT_REMOVE_VIN4_VIN5 BIT(3)
-#define AD7280A_ALERT_REMOVE_AUX5 BIT(0)
-#define AD7280A_ALERT_REMOVE_AUX4_AUX5 BIT(1)
-
-struct ad7280_platform_data {
- unsigned acquisition_time;
- unsigned conversion_averaging;
- unsigned chain_last_alert_ignore;
- bool thermistor_term_en;
-};
-
-#endif /* IIO_ADC_AD7280_H_ */
diff --git a/drivers/staging/iio/adc/ad7606.h b/drivers/staging/iio/adc/ad7606.h
deleted file mode 100644
index ec89d055cf58..000000000000
--- a/drivers/staging/iio/adc/ad7606.h
+++ /dev/null
@@ -1,104 +0,0 @@
-/*
- * AD7606 ADC driver
- *
- * Copyright 2011 Analog Devices Inc.
- *
- * Licensed under the GPL-2.
- */
-
-#ifndef IIO_ADC_AD7606_H_
-#define IIO_ADC_AD7606_H_
-
-/*
- * TODO: struct ad7606_platform_data needs to go into include/linux/iio
- */
-
-/**
- * struct ad7606_platform_data - platform/board specific information
- * @default_os: default oversampling value {0, 2, 4, 8, 16, 32, 64}
- * @default_range: default range +/-{5000, 10000} mVolt
- * @gpio_convst: number of gpio connected to the CONVST pin
- * @gpio_reset: gpio connected to the RESET pin, if not used set to -1
- * @gpio_range: gpio connected to the RANGE pin, if not used set to -1
- * @gpio_os0: gpio connected to the OS0 pin, if not used set to -1
- * @gpio_os1: gpio connected to the OS1 pin, if not used set to -1
- * @gpio_os2: gpio connected to the OS2 pin, if not used set to -1
- * @gpio_frstdata: gpio connected to the FRSTDAT pin, if not used set to -1
- * @gpio_stby: gpio connected to the STBY pin, if not used set to -1
- */
-
-struct ad7606_platform_data {
- unsigned default_os;
- unsigned default_range;
- unsigned gpio_convst;
- unsigned gpio_reset;
- unsigned gpio_range;
- unsigned gpio_os0;
- unsigned gpio_os1;
- unsigned gpio_os2;
- unsigned gpio_frstdata;
- unsigned gpio_stby;
-};
-
-/**
- * struct ad7606_chip_info - chip specific information
- * @name: identification string for chip
- * @int_vref_mv: the internal reference voltage
- * @channels: channel specification
- * @num_channels: number of channels
- */
-
-struct ad7606_chip_info {
- const char *name;
- u16 int_vref_mv;
- const struct iio_chan_spec *channels;
- unsigned num_channels;
-};
-
-/**
- * struct ad7606_state - driver instance specific data
- */
-
-struct ad7606_state {
- struct device *dev;
- const struct ad7606_chip_info *chip_info;
- struct ad7606_platform_data *pdata;
- struct regulator *reg;
- struct work_struct poll_work;
- wait_queue_head_t wq_data_avail;
- const struct ad7606_bus_ops *bops;
- unsigned range;
- unsigned oversampling;
- bool done;
- void __iomem *base_address;
-
- /*
- * DMA (thus cache coherency maintenance) requires the
- * transfer buffers to live in their own cache lines.
- */
-
- unsigned short data[8] ____cacheline_aligned;
-};
-
-struct ad7606_bus_ops {
- /* more methods added in future? */
- int (*read_block)(struct device *, int, void *);
-};
-
-void ad7606_suspend(struct iio_dev *indio_dev);
-void ad7606_resume(struct iio_dev *indio_dev);
-struct iio_dev *ad7606_probe(struct device *dev, int irq,
- void __iomem *base_address, unsigned id,
- const struct ad7606_bus_ops *bops);
-int ad7606_remove(struct iio_dev *indio_dev, int irq);
-int ad7606_reset(struct ad7606_state *st);
-
-enum ad7606_supported_device_ids {
- ID_AD7606_8,
- ID_AD7606_6,
- ID_AD7606_4
-};
-
-int ad7606_register_ring_funcs_and_init(struct iio_dev *indio_dev);
-void ad7606_ring_cleanup(struct iio_dev *indio_dev);
-#endif /* IIO_ADC_AD7606_H_ */
diff --git a/drivers/staging/iio/adc/ad7606_core.c b/drivers/staging/iio/adc/ad7606_core.c
deleted file mode 100644
index bf2c8013134c..000000000000
--- a/drivers/staging/iio/adc/ad7606_core.c
+++ /dev/null
@@ -1,603 +0,0 @@
-/*
- * AD7606 SPI ADC driver
- *
- * Copyright 2011 Analog Devices Inc.
- *
- * Licensed under the GPL-2.
- */
-
-#include <linux/interrupt.h>
-#include <linux/device.h>
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/sysfs.h>
-#include <linux/regulator/consumer.h>
-#include <linux/err.h>
-#include <linux/gpio.h>
-#include <linux/delay.h>
-#include <linux/sched.h>
-#include <linux/module.h>
-
-#include <linux/iio/iio.h>
-#include <linux/iio/sysfs.h>
-#include <linux/iio/buffer.h>
-
-#include "ad7606.h"
-
-int ad7606_reset(struct ad7606_state *st)
-{
- if (gpio_is_valid(st->pdata->gpio_reset)) {
- gpio_set_value(st->pdata->gpio_reset, 1);
- ndelay(100); /* t_reset >= 100ns */
- gpio_set_value(st->pdata->gpio_reset, 0);
- return 0;
- }
-
- return -ENODEV;
-}
-
-static int ad7606_scan_direct(struct iio_dev *indio_dev, unsigned ch)
-{
- struct ad7606_state *st = iio_priv(indio_dev);
- int ret;
-
- st->done = false;
- gpio_set_value(st->pdata->gpio_convst, 1);
-
- ret = wait_event_interruptible(st->wq_data_avail, st->done);
- if (ret)
- goto error_ret;
-
- if (gpio_is_valid(st->pdata->gpio_frstdata)) {
- ret = st->bops->read_block(st->dev, 1, st->data);
- if (ret)
- goto error_ret;
- if (!gpio_get_value(st->pdata->gpio_frstdata)) {
- /* This should never happen */
- ad7606_reset(st);
- ret = -EIO;
- goto error_ret;
- }
- ret = st->bops->read_block(st->dev,
- st->chip_info->num_channels - 1, &st->data[1]);
- if (ret)
- goto error_ret;
- } else {
- ret = st->bops->read_block(st->dev,
- st->chip_info->num_channels, st->data);
- if (ret)
- goto error_ret;
- }
-
- ret = st->data[ch];
-
-error_ret:
- gpio_set_value(st->pdata->gpio_convst, 0);
-
- return ret;
-}
-
-static int ad7606_read_raw(struct iio_dev *indio_dev,
- struct iio_chan_spec const *chan,
- int *val,
- int *val2,
- long m)
-{
- int ret;
- struct ad7606_state *st = iio_priv(indio_dev);
-
- switch (m) {
- case IIO_CHAN_INFO_RAW:
- mutex_lock(&indio_dev->mlock);
- if (iio_buffer_enabled(indio_dev))
- ret = -EBUSY;
- else
- ret = ad7606_scan_direct(indio_dev, chan->address);
- mutex_unlock(&indio_dev->mlock);
-
- if (ret < 0)
- return ret;
- *val = (short) ret;
- return IIO_VAL_INT;
- case IIO_CHAN_INFO_SCALE:
- *val = st->range * 2;
- *val2 = st->chip_info->channels[0].scan_type.realbits;
- return IIO_VAL_FRACTIONAL_LOG2;
- }
- return -EINVAL;
-}
-
-static ssize_t ad7606_show_range(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct ad7606_state *st = iio_priv(indio_dev);
-
- return sprintf(buf, "%u\n", st->range);
-}
-
-static ssize_t ad7606_store_range(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t count)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct ad7606_state *st = iio_priv(indio_dev);
- unsigned long lval;
- int ret;
-
- ret = kstrtoul(buf, 10, &lval);
- if (ret)
- return ret;
-
- if (!(lval == 5000 || lval == 10000)) {
- dev_err(dev, "range is not supported\n");
- return -EINVAL;
- }
- mutex_lock(&indio_dev->mlock);
- gpio_set_value(st->pdata->gpio_range, lval == 10000);
- st->range = lval;
- mutex_unlock(&indio_dev->mlock);
-
- return count;
-}
-
-static IIO_DEVICE_ATTR(in_voltage_range, S_IRUGO | S_IWUSR,
- ad7606_show_range, ad7606_store_range, 0);
-static IIO_CONST_ATTR(in_voltage_range_available, "5000 10000");
-
-static ssize_t ad7606_show_oversampling_ratio(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct ad7606_state *st = iio_priv(indio_dev);
-
- return sprintf(buf, "%u\n", st->oversampling);
-}
-
-static int ad7606_oversampling_get_index(unsigned val)
-{
- unsigned char supported[] = {0, 2, 4, 8, 16, 32, 64};
- int i;
-
- for (i = 0; i < ARRAY_SIZE(supported); i++)
- if (val == supported[i])
- return i;
-
- return -EINVAL;
-}
-
-static ssize_t ad7606_store_oversampling_ratio(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t count)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct ad7606_state *st = iio_priv(indio_dev);
- unsigned long lval;
- int ret;
-
- ret = kstrtoul(buf, 10, &lval);
- if (ret)
- return ret;
-
- ret = ad7606_oversampling_get_index(lval);
- if (ret < 0) {
- dev_err(dev, "oversampling %lu is not supported\n", lval);
- return ret;
- }
-
- mutex_lock(&indio_dev->mlock);
- gpio_set_value(st->pdata->gpio_os0, (ret >> 0) & 1);
- gpio_set_value(st->pdata->gpio_os1, (ret >> 1) & 1);
- gpio_set_value(st->pdata->gpio_os1, (ret >> 2) & 1);
- st->oversampling = lval;
- mutex_unlock(&indio_dev->mlock);
-
- return count;
-}
-
-static IIO_DEVICE_ATTR(oversampling_ratio, S_IRUGO | S_IWUSR,
- ad7606_show_oversampling_ratio,
- ad7606_store_oversampling_ratio, 0);
-static IIO_CONST_ATTR(oversampling_ratio_available, "0 2 4 8 16 32 64");
-
-static struct attribute *ad7606_attributes_os_and_range[] = {
- &iio_dev_attr_in_voltage_range.dev_attr.attr,
- &iio_const_attr_in_voltage_range_available.dev_attr.attr,
- &iio_dev_attr_oversampling_ratio.dev_attr.attr,
- &iio_const_attr_oversampling_ratio_available.dev_attr.attr,
- NULL,
-};
-
-static const struct attribute_group ad7606_attribute_group_os_and_range = {
- .attrs = ad7606_attributes_os_and_range,
-};
-
-static struct attribute *ad7606_attributes_os[] = {
- &iio_dev_attr_oversampling_ratio.dev_attr.attr,
- &iio_const_attr_oversampling_ratio_available.dev_attr.attr,
- NULL,
-};
-
-static const struct attribute_group ad7606_attribute_group_os = {
- .attrs = ad7606_attributes_os,
-};
-
-static struct attribute *ad7606_attributes_range[] = {
- &iio_dev_attr_in_voltage_range.dev_attr.attr,
- &iio_const_attr_in_voltage_range_available.dev_attr.attr,
- NULL,
-};
-
-static const struct attribute_group ad7606_attribute_group_range = {
- .attrs = ad7606_attributes_range,
-};
-
-#define AD7606_CHANNEL(num) \
- { \
- .type = IIO_VOLTAGE, \
- .indexed = 1, \
- .channel = num, \
- .address = num, \
- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
- .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),\
- .scan_index = num, \
- .scan_type = { \
- .sign = 's', \
- .realbits = 16, \
- .storagebits = 16, \
- .endianness = IIO_CPU, \
- }, \
- }
-
-static const struct iio_chan_spec ad7606_8_channels[] = {
- AD7606_CHANNEL(0),
- AD7606_CHANNEL(1),
- AD7606_CHANNEL(2),
- AD7606_CHANNEL(3),
- AD7606_CHANNEL(4),
- AD7606_CHANNEL(5),
- AD7606_CHANNEL(6),
- AD7606_CHANNEL(7),
- IIO_CHAN_SOFT_TIMESTAMP(8),
-};
-
-static const struct iio_chan_spec ad7606_6_channels[] = {
- AD7606_CHANNEL(0),
- AD7606_CHANNEL(1),
- AD7606_CHANNEL(2),
- AD7606_CHANNEL(3),
- AD7606_CHANNEL(4),
- AD7606_CHANNEL(5),
- IIO_CHAN_SOFT_TIMESTAMP(6),
-};
-
-static const struct iio_chan_spec ad7606_4_channels[] = {
- AD7606_CHANNEL(0),
- AD7606_CHANNEL(1),
- AD7606_CHANNEL(2),
- AD7606_CHANNEL(3),
- IIO_CHAN_SOFT_TIMESTAMP(4),
-};
-
-static const struct ad7606_chip_info ad7606_chip_info_tbl[] = {
- /*
- * More devices added in future
- */
- [ID_AD7606_8] = {
- .name = "ad7606",
- .int_vref_mv = 2500,
- .channels = ad7606_8_channels,
- .num_channels = 8,
- },
- [ID_AD7606_6] = {
- .name = "ad7606-6",
- .int_vref_mv = 2500,
- .channels = ad7606_6_channels,
- .num_channels = 6,
- },
- [ID_AD7606_4] = {
- .name = "ad7606-4",
- .int_vref_mv = 2500,
- .channels = ad7606_4_channels,
- .num_channels = 4,
- },
-};
-
-static int ad7606_request_gpios(struct ad7606_state *st)
-{
- struct gpio gpio_array[3] = {
- [0] = {
- .gpio = st->pdata->gpio_os0,
- .flags = GPIOF_DIR_OUT | ((st->oversampling & 1) ?
- GPIOF_INIT_HIGH : GPIOF_INIT_LOW),
- .label = "AD7606_OS0",
- },
- [1] = {
- .gpio = st->pdata->gpio_os1,
- .flags = GPIOF_DIR_OUT | ((st->oversampling & 2) ?
- GPIOF_INIT_HIGH : GPIOF_INIT_LOW),
- .label = "AD7606_OS1",
- },
- [2] = {
- .gpio = st->pdata->gpio_os2,
- .flags = GPIOF_DIR_OUT | ((st->oversampling & 4) ?
- GPIOF_INIT_HIGH : GPIOF_INIT_LOW),
- .label = "AD7606_OS2",
- },
- };
- int ret;
-
- if (gpio_is_valid(st->pdata->gpio_convst)) {
- ret = gpio_request_one(st->pdata->gpio_convst,
- GPIOF_OUT_INIT_LOW,
- "AD7606_CONVST");
- if (ret) {
- dev_err(st->dev, "failed to request GPIO CONVST\n");
- goto error_ret;
- }
- } else {
- ret = -EIO;
- goto error_ret;
- }
-
- if (gpio_is_valid(st->pdata->gpio_os0) &&
- gpio_is_valid(st->pdata->gpio_os1) &&
- gpio_is_valid(st->pdata->gpio_os2)) {
- ret = gpio_request_array(gpio_array, ARRAY_SIZE(gpio_array));
- if (ret < 0)
- goto error_free_convst;
- }
-
- if (gpio_is_valid(st->pdata->gpio_reset)) {
- ret = gpio_request_one(st->pdata->gpio_reset,
- GPIOF_OUT_INIT_LOW,
- "AD7606_RESET");
- if (ret < 0)
- goto error_free_os;
- }
-
- if (gpio_is_valid(st->pdata->gpio_range)) {
- ret = gpio_request_one(st->pdata->gpio_range, GPIOF_DIR_OUT |
- ((st->range == 10000) ? GPIOF_INIT_HIGH :
- GPIOF_INIT_LOW), "AD7606_RANGE");
- if (ret < 0)
- goto error_free_reset;
- }
- if (gpio_is_valid(st->pdata->gpio_stby)) {
- ret = gpio_request_one(st->pdata->gpio_stby,
- GPIOF_OUT_INIT_HIGH,
- "AD7606_STBY");
- if (ret < 0)
- goto error_free_range;
- }
-
- if (gpio_is_valid(st->pdata->gpio_frstdata)) {
- ret = gpio_request_one(st->pdata->gpio_frstdata, GPIOF_IN,
- "AD7606_FRSTDATA");
- if (ret < 0)
- goto error_free_stby;
- }
-
- return 0;
-
-error_free_stby:
- if (gpio_is_valid(st->pdata->gpio_stby))
- gpio_free(st->pdata->gpio_stby);
-error_free_range:
- if (gpio_is_valid(st->pdata->gpio_range))
- gpio_free(st->pdata->gpio_range);
-error_free_reset:
- if (gpio_is_valid(st->pdata->gpio_reset))
- gpio_free(st->pdata->gpio_reset);
-error_free_os:
- if (gpio_is_valid(st->pdata->gpio_os0) &&
- gpio_is_valid(st->pdata->gpio_os1) &&
- gpio_is_valid(st->pdata->gpio_os2))
- gpio_free_array(gpio_array, ARRAY_SIZE(gpio_array));
-error_free_convst:
- gpio_free(st->pdata->gpio_convst);
-error_ret:
- return ret;
-}
-
-static void ad7606_free_gpios(struct ad7606_state *st)
-{
- if (gpio_is_valid(st->pdata->gpio_frstdata))
- gpio_free(st->pdata->gpio_frstdata);
- if (gpio_is_valid(st->pdata->gpio_stby))
- gpio_free(st->pdata->gpio_stby);
- if (gpio_is_valid(st->pdata->gpio_range))
- gpio_free(st->pdata->gpio_range);
- if (gpio_is_valid(st->pdata->gpio_reset))
- gpio_free(st->pdata->gpio_reset);
- if (gpio_is_valid(st->pdata->gpio_os0) &&
- gpio_is_valid(st->pdata->gpio_os1) &&
- gpio_is_valid(st->pdata->gpio_os2)) {
- gpio_free(st->pdata->gpio_os2);
- gpio_free(st->pdata->gpio_os1);
- gpio_free(st->pdata->gpio_os0);
- }
- gpio_free(st->pdata->gpio_convst);
-}
-
-/**
- * Interrupt handler
- */
-static irqreturn_t ad7606_interrupt(int irq, void *dev_id)
-{
- struct iio_dev *indio_dev = dev_id;
- struct ad7606_state *st = iio_priv(indio_dev);
-
- if (iio_buffer_enabled(indio_dev)) {
- schedule_work(&st->poll_work);
- } else {
- st->done = true;
- wake_up_interruptible(&st->wq_data_avail);
- }
-
- return IRQ_HANDLED;
-};
-
-static const struct iio_info ad7606_info_no_os_or_range = {
- .driver_module = THIS_MODULE,
- .read_raw = &ad7606_read_raw,
-};
-
-static const struct iio_info ad7606_info_os_and_range = {
- .driver_module = THIS_MODULE,
- .read_raw = &ad7606_read_raw,
- .attrs = &ad7606_attribute_group_os_and_range,
-};
-
-static const struct iio_info ad7606_info_os = {
- .driver_module = THIS_MODULE,
- .read_raw = &ad7606_read_raw,
- .attrs = &ad7606_attribute_group_os,
-};
-
-static const struct iio_info ad7606_info_range = {
- .driver_module = THIS_MODULE,
- .read_raw = &ad7606_read_raw,
- .attrs = &ad7606_attribute_group_range,
-};
-
-struct iio_dev *ad7606_probe(struct device *dev, int irq,
- void __iomem *base_address,
- unsigned id,
- const struct ad7606_bus_ops *bops)
-{
- struct ad7606_platform_data *pdata = dev->platform_data;
- struct ad7606_state *st;
- int ret;
- struct iio_dev *indio_dev;
-
- indio_dev = devm_iio_device_alloc(dev, sizeof(*st));
- if (!indio_dev)
- return ERR_PTR(-ENOMEM);
-
- st = iio_priv(indio_dev);
-
- st->dev = dev;
- st->bops = bops;
- st->base_address = base_address;
- st->range = pdata->default_range == 10000 ? 10000 : 5000;
-
- ret = ad7606_oversampling_get_index(pdata->default_os);
- if (ret < 0) {
- dev_warn(dev, "oversampling %d is not supported\n",
- pdata->default_os);
- st->oversampling = 0;
- } else {
- st->oversampling = pdata->default_os;
- }
-
- st->reg = devm_regulator_get(dev, "vcc");
- if (!IS_ERR(st->reg)) {
- ret = regulator_enable(st->reg);
- if (ret)
- return ERR_PTR(ret);
- }
-
- st->pdata = pdata;
- st->chip_info = &ad7606_chip_info_tbl[id];
-
- indio_dev->dev.parent = dev;
- if (gpio_is_valid(st->pdata->gpio_os0) &&
- gpio_is_valid(st->pdata->gpio_os1) &&
- gpio_is_valid(st->pdata->gpio_os2)) {
- if (gpio_is_valid(st->pdata->gpio_range))
- indio_dev->info = &ad7606_info_os_and_range;
- else
- indio_dev->info = &ad7606_info_os;
- } else {
- if (gpio_is_valid(st->pdata->gpio_range))
- indio_dev->info = &ad7606_info_range;
- else
- indio_dev->info = &ad7606_info_no_os_or_range;
- }
- indio_dev->modes = INDIO_DIRECT_MODE;
- indio_dev->name = st->chip_info->name;
- indio_dev->channels = st->chip_info->channels;
- indio_dev->num_channels = st->chip_info->num_channels;
-
- init_waitqueue_head(&st->wq_data_avail);
-
- ret = ad7606_request_gpios(st);
- if (ret)
- goto error_disable_reg;
-
- ret = ad7606_reset(st);
- if (ret)
- dev_warn(st->dev, "failed to RESET: no RESET GPIO specified\n");
-
- ret = request_irq(irq, ad7606_interrupt,
- IRQF_TRIGGER_FALLING, st->chip_info->name, indio_dev);
- if (ret)
- goto error_free_gpios;
-
- ret = ad7606_register_ring_funcs_and_init(indio_dev);
- if (ret)
- goto error_free_irq;
-
- ret = iio_device_register(indio_dev);
- if (ret)
- goto error_unregister_ring;
-
- return indio_dev;
-error_unregister_ring:
- ad7606_ring_cleanup(indio_dev);
-
-error_free_irq:
- free_irq(irq, indio_dev);
-
-error_free_gpios:
- ad7606_free_gpios(st);
-
-error_disable_reg:
- if (!IS_ERR(st->reg))
- regulator_disable(st->reg);
- return ERR_PTR(ret);
-}
-
-int ad7606_remove(struct iio_dev *indio_dev, int irq)
-{
- struct ad7606_state *st = iio_priv(indio_dev);
-
- iio_device_unregister(indio_dev);
- ad7606_ring_cleanup(indio_dev);
-
- free_irq(irq, indio_dev);
- if (!IS_ERR(st->reg))
- regulator_disable(st->reg);
-
- ad7606_free_gpios(st);
-
- return 0;
-}
-
-void ad7606_suspend(struct iio_dev *indio_dev)
-{
- struct ad7606_state *st = iio_priv(indio_dev);
-
- if (gpio_is_valid(st->pdata->gpio_stby)) {
- if (gpio_is_valid(st->pdata->gpio_range))
- gpio_set_value(st->pdata->gpio_range, 1);
- gpio_set_value(st->pdata->gpio_stby, 0);
- }
-}
-
-void ad7606_resume(struct iio_dev *indio_dev)
-{
- struct ad7606_state *st = iio_priv(indio_dev);
-
- if (gpio_is_valid(st->pdata->gpio_stby)) {
- if (gpio_is_valid(st->pdata->gpio_range))
- gpio_set_value(st->pdata->gpio_range,
- st->range == 10000);
-
- gpio_set_value(st->pdata->gpio_stby, 1);
- ad7606_reset(st);
- }
-}
-
-MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
-MODULE_DESCRIPTION("Analog Devices AD7606 ADC");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/iio/adc/ad7606_par.c b/drivers/staging/iio/adc/ad7606_par.c
deleted file mode 100644
index 1d48ae381d16..000000000000
--- a/drivers/staging/iio/adc/ad7606_par.c
+++ /dev/null
@@ -1,152 +0,0 @@
-/*
- * AD7606 Parallel Interface ADC driver
- *
- * Copyright 2011 Analog Devices Inc.
- *
- * Licensed under the GPL-2.
- */
-
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/types.h>
-#include <linux/err.h>
-#include <linux/io.h>
-
-#include <linux/iio/iio.h>
-#include "ad7606.h"
-
-static int ad7606_par16_read_block(struct device *dev,
- int count, void *buf)
-{
- struct platform_device *pdev = to_platform_device(dev);
- struct iio_dev *indio_dev = platform_get_drvdata(pdev);
- struct ad7606_state *st = iio_priv(indio_dev);
-
- insw((unsigned long) st->base_address, buf, count);
-
- return 0;
-}
-
-static const struct ad7606_bus_ops ad7606_par16_bops = {
- .read_block = ad7606_par16_read_block,
-};
-
-static int ad7606_par8_read_block(struct device *dev,
- int count, void *buf)
-{
- struct platform_device *pdev = to_platform_device(dev);
- struct iio_dev *indio_dev = platform_get_drvdata(pdev);
- struct ad7606_state *st = iio_priv(indio_dev);
-
- insb((unsigned long) st->base_address, buf, count * 2);
-
- return 0;
-}
-
-static const struct ad7606_bus_ops ad7606_par8_bops = {
- .read_block = ad7606_par8_read_block,
-};
-
-static int ad7606_par_probe(struct platform_device *pdev)
-{
- struct resource *res;
- struct iio_dev *indio_dev;
- void __iomem *addr;
- resource_size_t remap_size;
- int irq;
-
- irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- dev_err(&pdev->dev, "no irq\n");
- return -ENODEV;
- }
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- addr = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(addr))
- return PTR_ERR(addr);
-
- remap_size = resource_size(res);
-
- indio_dev = ad7606_probe(&pdev->dev, irq, addr,
- platform_get_device_id(pdev)->driver_data,
- remap_size > 1 ? &ad7606_par16_bops :
- &ad7606_par8_bops);
-
- if (IS_ERR(indio_dev))
- return PTR_ERR(indio_dev);
-
- platform_set_drvdata(pdev, indio_dev);
-
- return 0;
-}
-
-static int ad7606_par_remove(struct platform_device *pdev)
-{
- struct iio_dev *indio_dev = platform_get_drvdata(pdev);
-
- ad7606_remove(indio_dev, platform_get_irq(pdev, 0));
-
- return 0;
-}
-
-#ifdef CONFIG_PM
-static int ad7606_par_suspend(struct device *dev)
-{
- struct iio_dev *indio_dev = dev_get_drvdata(dev);
-
- ad7606_suspend(indio_dev);
-
- return 0;
-}
-
-static int ad7606_par_resume(struct device *dev)
-{
- struct iio_dev *indio_dev = dev_get_drvdata(dev);
-
- ad7606_resume(indio_dev);
-
- return 0;
-}
-
-static const struct dev_pm_ops ad7606_pm_ops = {
- .suspend = ad7606_par_suspend,
- .resume = ad7606_par_resume,
-};
-#define AD7606_PAR_PM_OPS (&ad7606_pm_ops)
-
-#else
-#define AD7606_PAR_PM_OPS NULL
-#endif /* CONFIG_PM */
-
-static const struct platform_device_id ad7606_driver_ids[] = {
- {
- .name = "ad7606-8",
- .driver_data = ID_AD7606_8,
- }, {
- .name = "ad7606-6",
- .driver_data = ID_AD7606_6,
- }, {
- .name = "ad7606-4",
- .driver_data = ID_AD7606_4,
- },
- { }
-};
-
-MODULE_DEVICE_TABLE(platform, ad7606_driver_ids);
-
-static struct platform_driver ad7606_driver = {
- .probe = ad7606_par_probe,
- .remove = ad7606_par_remove,
- .id_table = ad7606_driver_ids,
- .driver = {
- .name = "ad7606",
- .pm = AD7606_PAR_PM_OPS,
- },
-};
-
-module_platform_driver(ad7606_driver);
-
-MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
-MODULE_DESCRIPTION("Analog Devices AD7606 ADC");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/iio/adc/ad7606_ring.c b/drivers/staging/iio/adc/ad7606_ring.c
deleted file mode 100644
index a6f8eb11242c..000000000000
--- a/drivers/staging/iio/adc/ad7606_ring.c
+++ /dev/null
@@ -1,101 +0,0 @@
-/*
- * Copyright 2011-2012 Analog Devices Inc.
- *
- * Licensed under the GPL-2.
- *
- */
-
-#include <linux/interrupt.h>
-#include <linux/gpio.h>
-#include <linux/device.h>
-#include <linux/kernel.h>
-#include <linux/slab.h>
-
-#include <linux/iio/iio.h>
-#include <linux/iio/buffer.h>
-#include <linux/iio/trigger_consumer.h>
-#include <linux/iio/triggered_buffer.h>
-
-#include "ad7606.h"
-
-/**
- * ad7606_trigger_handler_th() th/bh of trigger launched polling to ring buffer
- *
- **/
-static irqreturn_t ad7606_trigger_handler_th_bh(int irq, void *p)
-{
- struct iio_poll_func *pf = p;
- struct ad7606_state *st = iio_priv(pf->indio_dev);
-
- gpio_set_value(st->pdata->gpio_convst, 1);
-
- return IRQ_HANDLED;
-}
-
-/**
- * ad7606_poll_bh_to_ring() bh of trigger launched polling to ring buffer
- * @work_s: the work struct through which this was scheduled
- *
- * Currently there is no option in this driver to disable the saving of
- * timestamps within the ring.
- * I think the one copy of this at a time was to avoid problems if the
- * trigger was set far too high and the reads then locked up the computer.
- **/
-static void ad7606_poll_bh_to_ring(struct work_struct *work_s)
-{
- struct ad7606_state *st = container_of(work_s, struct ad7606_state,
- poll_work);
- struct iio_dev *indio_dev = iio_priv_to_dev(st);
- __u8 *buf;
- int ret;
-
- buf = kzalloc(indio_dev->scan_bytes, GFP_KERNEL);
- if (!buf)
- return;
-
- if (gpio_is_valid(st->pdata->gpio_frstdata)) {
- ret = st->bops->read_block(st->dev, 1, buf);
- if (ret)
- goto done;
- if (!gpio_get_value(st->pdata->gpio_frstdata)) {
- /* This should never happen. However
- * some signal glitch caused by bad PCB desgin or
- * electrostatic discharge, could cause an extra read
- * or clock. This allows recovery.
- */
- ad7606_reset(st);
- goto done;
- }
- ret = st->bops->read_block(st->dev,
- st->chip_info->num_channels - 1, buf + 2);
- if (ret)
- goto done;
- } else {
- ret = st->bops->read_block(st->dev,
- st->chip_info->num_channels, buf);
- if (ret)
- goto done;
- }
-
- iio_push_to_buffers_with_timestamp(indio_dev, buf, iio_get_time_ns());
-done:
- gpio_set_value(st->pdata->gpio_convst, 0);
- iio_trigger_notify_done(indio_dev->trig);
- kfree(buf);
-}
-
-int ad7606_register_ring_funcs_and_init(struct iio_dev *indio_dev)
-{
- struct ad7606_state *st = iio_priv(indio_dev);
-
- INIT_WORK(&st->poll_work, &ad7606_poll_bh_to_ring);
-
- return iio_triggered_buffer_setup(indio_dev,
- &ad7606_trigger_handler_th_bh, &ad7606_trigger_handler_th_bh,
- NULL);
-}
-
-void ad7606_ring_cleanup(struct iio_dev *indio_dev)
-{
- iio_triggered_buffer_cleanup(indio_dev);
-}
diff --git a/drivers/staging/iio/adc/ad7606_spi.c b/drivers/staging/iio/adc/ad7606_spi.c
deleted file mode 100644
index 7303983e64a7..000000000000
--- a/drivers/staging/iio/adc/ad7606_spi.c
+++ /dev/null
@@ -1,116 +0,0 @@
-/*
- * AD7606 SPI ADC driver
- *
- * Copyright 2011 Analog Devices Inc.
- *
- * Licensed under the GPL-2.
- */
-
-#include <linux/module.h>
-#include <linux/spi/spi.h>
-#include <linux/types.h>
-#include <linux/err.h>
-
-#include <linux/iio/iio.h>
-#include "ad7606.h"
-
-#define MAX_SPI_FREQ_HZ 23500000 /* VDRIVE above 4.75 V */
-
-static int ad7606_spi_read_block(struct device *dev,
- int count, void *buf)
-{
- struct spi_device *spi = to_spi_device(dev);
- int i, ret;
- unsigned short *data = buf;
-
- ret = spi_read(spi, buf, count * 2);
- if (ret < 0) {
- dev_err(&spi->dev, "SPI read error\n");
- return ret;
- }
-
- for (i = 0; i < count; i++)
- data[i] = be16_to_cpu(data[i]);
-
- return 0;
-}
-
-static const struct ad7606_bus_ops ad7606_spi_bops = {
- .read_block = ad7606_spi_read_block,
-};
-
-static int ad7606_spi_probe(struct spi_device *spi)
-{
- struct iio_dev *indio_dev;
-
- indio_dev = ad7606_probe(&spi->dev, spi->irq, NULL,
- spi_get_device_id(spi)->driver_data,
- &ad7606_spi_bops);
-
- if (IS_ERR(indio_dev))
- return PTR_ERR(indio_dev);
-
- spi_set_drvdata(spi, indio_dev);
-
- return 0;
-}
-
-static int ad7606_spi_remove(struct spi_device *spi)
-{
- struct iio_dev *indio_dev = dev_get_drvdata(&spi->dev);
-
- return ad7606_remove(indio_dev, spi->irq);
-}
-
-#ifdef CONFIG_PM
-static int ad7606_spi_suspend(struct device *dev)
-{
- struct iio_dev *indio_dev = dev_get_drvdata(dev);
-
- ad7606_suspend(indio_dev);
-
- return 0;
-}
-
-static int ad7606_spi_resume(struct device *dev)
-{
- struct iio_dev *indio_dev = dev_get_drvdata(dev);
-
- ad7606_resume(indio_dev);
-
- return 0;
-}
-
-static const struct dev_pm_ops ad7606_pm_ops = {
- .suspend = ad7606_spi_suspend,
- .resume = ad7606_spi_resume,
-};
-#define AD7606_SPI_PM_OPS (&ad7606_pm_ops)
-
-#else
-#define AD7606_SPI_PM_OPS NULL
-#endif
-
-static const struct spi_device_id ad7606_id[] = {
- {"ad7606-8", ID_AD7606_8},
- {"ad7606-6", ID_AD7606_6},
- {"ad7606-4", ID_AD7606_4},
- {}
-};
-MODULE_DEVICE_TABLE(spi, ad7606_id);
-
-static struct spi_driver ad7606_driver = {
- .driver = {
- .name = "ad7606",
- .owner = THIS_MODULE,
- .pm = AD7606_SPI_PM_OPS,
- },
- .probe = ad7606_spi_probe,
- .remove = ad7606_spi_remove,
- .id_table = ad7606_id,
-};
-module_spi_driver(ad7606_driver);
-
-MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
-MODULE_DESCRIPTION("Analog Devices AD7606 ADC");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/iio/adc/ad7780.c b/drivers/staging/iio/adc/ad7780.c
deleted file mode 100644
index 9f03fe3ee3d9..000000000000
--- a/drivers/staging/iio/adc/ad7780.c
+++ /dev/null
@@ -1,276 +0,0 @@
-/*
- * AD7170/AD7171 and AD7780/AD7781 SPI ADC driver
- *
- * Copyright 2011 Analog Devices Inc.
- *
- * Licensed under the GPL-2.
- */
-
-#include <linux/interrupt.h>
-#include <linux/device.h>
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/sysfs.h>
-#include <linux/spi/spi.h>
-#include <linux/regulator/consumer.h>
-#include <linux/err.h>
-#include <linux/sched.h>
-#include <linux/gpio.h>
-#include <linux/module.h>
-
-#include <linux/iio/iio.h>
-#include <linux/iio/sysfs.h>
-#include <linux/iio/adc/ad_sigma_delta.h>
-
-#include "ad7780.h"
-
-#define AD7780_RDY BIT(7)
-#define AD7780_FILTER BIT(6)
-#define AD7780_ERR BIT(5)
-#define AD7780_ID1 BIT(4)
-#define AD7780_ID0 BIT(3)
-#define AD7780_GAIN BIT(2)
-#define AD7780_PAT1 BIT(1)
-#define AD7780_PAT0 BIT(0)
-
-struct ad7780_chip_info {
- struct iio_chan_spec channel;
- unsigned int pattern_mask;
- unsigned int pattern;
-};
-
-struct ad7780_state {
- const struct ad7780_chip_info *chip_info;
- struct regulator *reg;
- int powerdown_gpio;
- unsigned int gain;
- u16 int_vref_mv;
-
- struct ad_sigma_delta sd;
-};
-
-enum ad7780_supported_device_ids {
- ID_AD7170,
- ID_AD7171,
- ID_AD7780,
- ID_AD7781,
-};
-
-static struct ad7780_state *ad_sigma_delta_to_ad7780(struct ad_sigma_delta *sd)
-{
- return container_of(sd, struct ad7780_state, sd);
-}
-
-static int ad7780_set_mode(struct ad_sigma_delta *sigma_delta,
- enum ad_sigma_delta_mode mode)
-{
- struct ad7780_state *st = ad_sigma_delta_to_ad7780(sigma_delta);
- unsigned val;
-
- switch (mode) {
- case AD_SD_MODE_SINGLE:
- case AD_SD_MODE_CONTINUOUS:
- val = 1;
- break;
- default:
- val = 0;
- break;
- }
-
- if (gpio_is_valid(st->powerdown_gpio))
- gpio_set_value(st->powerdown_gpio, val);
-
- return 0;
-}
-
-static int ad7780_read_raw(struct iio_dev *indio_dev,
- struct iio_chan_spec const *chan,
- int *val,
- int *val2,
- long m)
-{
- struct ad7780_state *st = iio_priv(indio_dev);
-
- switch (m) {
- case IIO_CHAN_INFO_RAW:
- return ad_sigma_delta_single_conversion(indio_dev, chan, val);
- case IIO_CHAN_INFO_SCALE:
- *val = st->int_vref_mv * st->gain;
- *val2 = chan->scan_type.realbits - 1;
- return IIO_VAL_FRACTIONAL_LOG2;
- case IIO_CHAN_INFO_OFFSET:
- *val -= (1 << (chan->scan_type.realbits - 1));
- return IIO_VAL_INT;
- }
-
- return -EINVAL;
-}
-
-static int ad7780_postprocess_sample(struct ad_sigma_delta *sigma_delta,
- unsigned int raw_sample)
-{
- struct ad7780_state *st = ad_sigma_delta_to_ad7780(sigma_delta);
- const struct ad7780_chip_info *chip_info = st->chip_info;
-
- if ((raw_sample & AD7780_ERR) ||
- ((raw_sample & chip_info->pattern_mask) != chip_info->pattern))
- return -EIO;
-
- if (raw_sample & AD7780_GAIN)
- st->gain = 1;
- else
- st->gain = 128;
-
- return 0;
-}
-
-static const struct ad_sigma_delta_info ad7780_sigma_delta_info = {
- .set_mode = ad7780_set_mode,
- .postprocess_sample = ad7780_postprocess_sample,
- .has_registers = false,
-};
-
-#define AD7780_CHANNEL(bits, wordsize) \
- AD_SD_CHANNEL(1, 0, 0, bits, 32, wordsize - bits)
-
-static const struct ad7780_chip_info ad7780_chip_info_tbl[] = {
- [ID_AD7170] = {
- .channel = AD7780_CHANNEL(12, 24),
- .pattern = 0x5,
- .pattern_mask = 0x7,
- },
- [ID_AD7171] = {
- .channel = AD7780_CHANNEL(16, 24),
- .pattern = 0x5,
- .pattern_mask = 0x7,
- },
- [ID_AD7780] = {
- .channel = AD7780_CHANNEL(24, 32),
- .pattern = 0x1,
- .pattern_mask = 0x3,
- },
- [ID_AD7781] = {
- .channel = AD7780_CHANNEL(20, 32),
- .pattern = 0x1,
- .pattern_mask = 0x3,
- },
-};
-
-static const struct iio_info ad7780_info = {
- .read_raw = &ad7780_read_raw,
- .driver_module = THIS_MODULE,
-};
-
-static int ad7780_probe(struct spi_device *spi)
-{
- struct ad7780_platform_data *pdata = spi->dev.platform_data;
- struct ad7780_state *st;
- struct iio_dev *indio_dev;
- int ret, voltage_uv = 0;
-
- indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
- if (!indio_dev)
- return -ENOMEM;
-
- st = iio_priv(indio_dev);
- st->gain = 1;
-
- ad_sd_init(&st->sd, indio_dev, spi, &ad7780_sigma_delta_info);
-
- st->reg = devm_regulator_get(&spi->dev, "vcc");
- if (!IS_ERR(st->reg)) {
- ret = regulator_enable(st->reg);
- if (ret)
- return ret;
-
- voltage_uv = regulator_get_voltage(st->reg);
- }
-
- st->chip_info =
- &ad7780_chip_info_tbl[spi_get_device_id(spi)->driver_data];
-
- if (pdata && pdata->vref_mv)
- st->int_vref_mv = pdata->vref_mv;
- else if (voltage_uv)
- st->int_vref_mv = voltage_uv / 1000;
- else
- dev_warn(&spi->dev, "reference voltage unspecified\n");
-
- spi_set_drvdata(spi, indio_dev);
-
- indio_dev->dev.parent = &spi->dev;
- indio_dev->name = spi_get_device_id(spi)->name;
- indio_dev->modes = INDIO_DIRECT_MODE;
- indio_dev->channels = &st->chip_info->channel;
- indio_dev->num_channels = 1;
- indio_dev->info = &ad7780_info;
-
- if (pdata && gpio_is_valid(pdata->gpio_pdrst)) {
-
- ret = devm_gpio_request_one(&spi->dev, pdata->gpio_pdrst,
- GPIOF_OUT_INIT_LOW, "AD7780 /PDRST");
- if (ret) {
- dev_err(&spi->dev, "failed to request GPIO PDRST\n");
- goto error_disable_reg;
- }
- st->powerdown_gpio = pdata->gpio_pdrst;
- } else {
- st->powerdown_gpio = -1;
- }
-
- ret = ad_sd_setup_buffer_and_trigger(indio_dev);
- if (ret)
- goto error_disable_reg;
-
- ret = iio_device_register(indio_dev);
- if (ret)
- goto error_cleanup_buffer_and_trigger;
-
- return 0;
-
-error_cleanup_buffer_and_trigger:
- ad_sd_cleanup_buffer_and_trigger(indio_dev);
-error_disable_reg:
- if (!IS_ERR(st->reg))
- regulator_disable(st->reg);
-
- return ret;
-}
-
-static int ad7780_remove(struct spi_device *spi)
-{
- struct iio_dev *indio_dev = spi_get_drvdata(spi);
- struct ad7780_state *st = iio_priv(indio_dev);
-
- iio_device_unregister(indio_dev);
- ad_sd_cleanup_buffer_and_trigger(indio_dev);
-
- if (!IS_ERR(st->reg))
- regulator_disable(st->reg);
-
- return 0;
-}
-
-static const struct spi_device_id ad7780_id[] = {
- {"ad7170", ID_AD7170},
- {"ad7171", ID_AD7171},
- {"ad7780", ID_AD7780},
- {"ad7781", ID_AD7781},
- {}
-};
-MODULE_DEVICE_TABLE(spi, ad7780_id);
-
-static struct spi_driver ad7780_driver = {
- .driver = {
- .name = "ad7780",
- .owner = THIS_MODULE,
- },
- .probe = ad7780_probe,
- .remove = ad7780_remove,
- .id_table = ad7780_id,
-};
-module_spi_driver(ad7780_driver);
-
-MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
-MODULE_DESCRIPTION("Analog Devices AD7780 and similar ADCs");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/iio/adc/ad7780.h b/drivers/staging/iio/adc/ad7780.h
deleted file mode 100644
index 67e511c3d6f0..000000000000
--- a/drivers/staging/iio/adc/ad7780.h
+++ /dev/null
@@ -1,30 +0,0 @@
-/*
- * AD7780/AD7781 SPI ADC driver
- *
- * Copyright 2011 Analog Devices Inc.
- *
- * Licensed under the GPL-2.
- */
-#ifndef IIO_ADC_AD7780_H_
-#define IIO_ADC_AD7780_H_
-
-/*
- * TODO: struct ad7780_platform_data needs to go into include/linux/iio
- */
-
-/* NOTE:
- * The AD7780 doesn't feature a dedicated SPI chip select, in addition it
- * features a dual use data out ready DOUT/RDY output.
- * In order to avoid contentions on the SPI bus, it's therefore necessary
- * to use spi bus locking combined with a dedicated GPIO to control the
- * power down reset signal of the AD7780.
- *
- * The DOUT/RDY output must also be wired to an interrupt capable GPIO.
- */
-
-struct ad7780_platform_data {
- u16 vref_mv;
- int gpio_pdrst;
-};
-
-#endif /* IIO_ADC_AD7780_H_ */
diff --git a/drivers/staging/iio/adc/ad7816.c b/drivers/staging/iio/adc/ad7816.c
index 48b1c3740030..172acf135f3b 100644
--- a/drivers/staging/iio/adc/ad7816.c
+++ b/drivers/staging/iio/adc/ad7816.c
@@ -1,13 +1,12 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* AD7816 digital temperature sensor driver supporting AD7816/7/8
*
* Copyright 2010 Analog Devices Inc.
- *
- * Licensed under the GPL-2 or later.
*/
#include <linux/interrupt.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/slab.h>
@@ -38,59 +37,67 @@
#define AD7816_TEMP_FLOAT_OFFSET 2
#define AD7816_TEMP_FLOAT_MASK 0x3
-
/*
* struct ad7816_chip_info - chip specific information
*/
struct ad7816_chip_info {
+ kernel_ulong_t id;
struct spi_device *spi_dev;
- u16 rdwr_pin;
- u16 convert_pin;
- u16 busy_pin;
- u8 oti_data[AD7816_CS_MAX+1];
+ struct gpio_desc *rdwr_pin;
+ struct gpio_desc *convert_pin;
+ struct gpio_desc *busy_pin;
+ u8 oti_data[AD7816_CS_MAX + 1];
u8 channel_id; /* 0 always be temperature */
u8 mode;
};
+enum ad7816_type {
+ ID_AD7816,
+ ID_AD7817,
+ ID_AD7818,
+};
+
/*
* ad7816 data access by SPI
*/
static int ad7816_spi_read(struct ad7816_chip_info *chip, u16 *data)
{
struct spi_device *spi_dev = chip->spi_dev;
- int ret = 0;
+ int ret;
+ __be16 buf;
- gpio_set_value(chip->rdwr_pin, 1);
- gpio_set_value(chip->rdwr_pin, 0);
+ gpiod_set_value(chip->rdwr_pin, 1);
+ gpiod_set_value(chip->rdwr_pin, 0);
ret = spi_write(spi_dev, &chip->channel_id, sizeof(chip->channel_id));
if (ret < 0) {
dev_err(&spi_dev->dev, "SPI channel setting error\n");
return ret;
}
- gpio_set_value(chip->rdwr_pin, 1);
-
+ gpiod_set_value(chip->rdwr_pin, 1);
if (chip->mode == AD7816_PD) { /* operating mode 2 */
- gpio_set_value(chip->convert_pin, 1);
- gpio_set_value(chip->convert_pin, 0);
+ gpiod_set_value(chip->convert_pin, 1);
+ gpiod_set_value(chip->convert_pin, 0);
} else { /* operating mode 1 */
- gpio_set_value(chip->convert_pin, 0);
- gpio_set_value(chip->convert_pin, 1);
+ gpiod_set_value(chip->convert_pin, 0);
+ gpiod_set_value(chip->convert_pin, 1);
}
- while (gpio_get_value(chip->busy_pin))
- cpu_relax();
+ if (chip->id == ID_AD7816 || chip->id == ID_AD7817) {
+ while (gpiod_get_value(chip->busy_pin))
+ cpu_relax();
+ }
- gpio_set_value(chip->rdwr_pin, 0);
- gpio_set_value(chip->rdwr_pin, 1);
- ret = spi_read(spi_dev, (u8 *)data, sizeof(*data));
+ gpiod_set_value(chip->rdwr_pin, 0);
+ gpiod_set_value(chip->rdwr_pin, 1);
+ ret = spi_read(spi_dev, &buf, sizeof(*data));
if (ret < 0) {
dev_err(&spi_dev->dev, "SPI data read error\n");
return ret;
}
- *data = be16_to_cpu(*data);
+ *data = be16_to_cpu(buf);
return ret;
}
@@ -98,10 +105,10 @@ static int ad7816_spi_read(struct ad7816_chip_info *chip, u16 *data)
static int ad7816_spi_write(struct ad7816_chip_info *chip, u8 data)
{
struct spi_device *spi_dev = chip->spi_dev;
- int ret = 0;
+ int ret;
- gpio_set_value(chip->rdwr_pin, 1);
- gpio_set_value(chip->rdwr_pin, 0);
+ gpiod_set_value(chip->rdwr_pin, 1);
+ gpiod_set_value(chip->rdwr_pin, 0);
ret = spi_write(spi_dev, &data, sizeof(data));
if (ret < 0)
dev_err(&spi_dev->dev, "SPI oti data write error\n");
@@ -110,8 +117,8 @@ static int ad7816_spi_write(struct ad7816_chip_info *chip, u8 data)
}
static ssize_t ad7816_show_mode(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+ struct device_attribute *attr,
+ char *buf)
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct ad7816_chip_info *chip = iio_priv(indio_dev);
@@ -122,42 +129,42 @@ static ssize_t ad7816_show_mode(struct device *dev,
}
static ssize_t ad7816_store_mode(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t len)
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct ad7816_chip_info *chip = iio_priv(indio_dev);
- if (strcmp(buf, "full")) {
- gpio_set_value(chip->rdwr_pin, 1);
+ if (strcmp(buf, "full") == 0) {
+ gpiod_set_value(chip->rdwr_pin, 1);
chip->mode = AD7816_FULL;
} else {
- gpio_set_value(chip->rdwr_pin, 0);
+ gpiod_set_value(chip->rdwr_pin, 0);
chip->mode = AD7816_PD;
}
return len;
}
-static IIO_DEVICE_ATTR(mode, S_IRUGO | S_IWUSR,
+static IIO_DEVICE_ATTR(mode, 0644,
ad7816_show_mode,
ad7816_store_mode,
0);
static ssize_t ad7816_show_available_modes(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+ struct device_attribute *attr,
+ char *buf)
{
return sprintf(buf, "full\npower-save\n");
}
-static IIO_DEVICE_ATTR(available_modes, S_IRUGO, ad7816_show_available_modes,
+static IIO_DEVICE_ATTR(available_modes, 0444, ad7816_show_available_modes,
NULL, 0);
static ssize_t ad7816_show_channel(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+ struct device_attribute *attr,
+ char *buf)
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct ad7816_chip_info *chip = iio_priv(indio_dev);
@@ -166,9 +173,9 @@ static ssize_t ad7816_show_channel(struct device *dev,
}
static ssize_t ad7816_store_channel(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t len)
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct ad7816_chip_info *chip = iio_priv(indio_dev);
@@ -198,15 +205,14 @@ static ssize_t ad7816_store_channel(struct device *dev,
return len;
}
-static IIO_DEVICE_ATTR(channel, S_IRUGO | S_IWUSR,
+static IIO_DEVICE_ATTR(channel, 0644,
ad7816_show_channel,
ad7816_store_channel,
0);
-
static ssize_t ad7816_show_value(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+ struct device_attribute *attr,
+ char *buf)
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct ad7816_chip_info *chip = iio_priv(indio_dev);
@@ -224,13 +230,13 @@ static ssize_t ad7816_show_value(struct device *dev,
value = (s8)((data >> AD7816_TEMP_FLOAT_OFFSET) - 103);
data &= AD7816_TEMP_FLOAT_MASK;
if (value < 0)
- data = (1 << AD7816_TEMP_FLOAT_OFFSET) - data;
+ data = BIT(AD7816_TEMP_FLOAT_OFFSET) - data;
return sprintf(buf, "%d.%.2d\n", value, data * 25);
}
return sprintf(buf, "%u\n", data);
}
-static IIO_DEVICE_ATTR(value, S_IRUGO, ad7816_show_value, NULL, 0);
+static IIO_DEVICE_ATTR(value, 0444, ad7816_show_value, NULL, 0);
static struct attribute *ad7816_attributes[] = {
&iio_dev_attr_available_modes.dev_attr.attr,
@@ -255,13 +261,14 @@ static const struct attribute_group ad7816_attribute_group = {
static irqreturn_t ad7816_event_handler(int irq, void *private)
{
- iio_push_event(private, IIO_EVENT_CODE_AD7816_OTI, iio_get_time_ns());
+ iio_push_event(private, IIO_EVENT_CODE_AD7816_OTI,
+ iio_get_time_ns(private));
return IRQ_HANDLED;
}
static ssize_t ad7816_show_oti(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+ struct device_attribute *attr,
+ char *buf)
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct ad7816_chip_info *chip = iio_priv(indio_dev);
@@ -280,9 +287,9 @@ static ssize_t ad7816_show_oti(struct device *dev,
}
static inline ssize_t ad7816_set_oti(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t len)
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct ad7816_chip_info *chip = iio_priv(indio_dev);
@@ -298,14 +305,14 @@ static inline ssize_t ad7816_set_oti(struct device *dev,
dev_err(dev, "Invalid oti channel id %d.\n", chip->channel_id);
return -EINVAL;
} else if (chip->channel_id == 0) {
- if (ret || value < AD7816_BOUND_VALUE_MIN ||
- value > AD7816_BOUND_VALUE_MAX)
+ if (value < AD7816_BOUND_VALUE_MIN ||
+ value > AD7816_BOUND_VALUE_MAX)
return -EINVAL;
data = (u8)(value - AD7816_BOUND_VALUE_MIN +
AD7816_BOUND_VALUE_BASE);
} else {
- if (ret || value < AD7816_BOUND_VALUE_BASE || value > 255)
+ if (value < AD7816_BOUND_VALUE_BASE || value > 255)
return -EINVAL;
data = (u8)value;
@@ -320,7 +327,7 @@ static inline ssize_t ad7816_set_oti(struct device *dev,
return len;
}
-static IIO_DEVICE_ATTR(oti, S_IRUGO | S_IWUSR,
+static IIO_DEVICE_ATTR(oti, 0644,
ad7816_show_oti, ad7816_set_oti, 0);
static struct attribute *ad7816_event_attributes[] = {
@@ -328,7 +335,7 @@ static struct attribute *ad7816_event_attributes[] = {
NULL,
};
-static struct attribute_group ad7816_event_attribute_group = {
+static const struct attribute_group ad7816_event_attribute_group = {
.attrs = ad7816_event_attributes,
.name = "events",
};
@@ -336,7 +343,6 @@ static struct attribute_group ad7816_event_attribute_group = {
static const struct iio_info ad7816_info = {
.attrs = &ad7816_attribute_group,
.event_attrs = &ad7816_event_attribute_group,
- .driver_module = THIS_MODULE,
};
/*
@@ -347,56 +353,45 @@ static int ad7816_probe(struct spi_device *spi_dev)
{
struct ad7816_chip_info *chip;
struct iio_dev *indio_dev;
- unsigned short *pins = spi_dev->dev.platform_data;
- int ret = 0;
- int i;
-
- if (!pins) {
- dev_err(&spi_dev->dev, "No necessary GPIO platform data.\n");
- return -EINVAL;
- }
+ int i, ret;
indio_dev = devm_iio_device_alloc(&spi_dev->dev, sizeof(*chip));
if (!indio_dev)
return -ENOMEM;
chip = iio_priv(indio_dev);
- /* this is only used for device removal purposes */
- dev_set_drvdata(&spi_dev->dev, indio_dev);
chip->spi_dev = spi_dev;
for (i = 0; i <= AD7816_CS_MAX; i++)
chip->oti_data[i] = 203;
- chip->rdwr_pin = pins[0];
- chip->convert_pin = pins[1];
- chip->busy_pin = pins[2];
-
- ret = devm_gpio_request(&spi_dev->dev, chip->rdwr_pin,
- spi_get_device_id(spi_dev)->name);
- if (ret) {
- dev_err(&spi_dev->dev, "Fail to request rdwr gpio PIN %d.\n",
- chip->rdwr_pin);
+
+ chip->id = spi_get_device_id(spi_dev)->driver_data;
+ chip->rdwr_pin = devm_gpiod_get(&spi_dev->dev, "rdwr", GPIOD_OUT_HIGH);
+ if (IS_ERR(chip->rdwr_pin)) {
+ ret = PTR_ERR(chip->rdwr_pin);
+ dev_err(&spi_dev->dev, "Failed to request rdwr GPIO: %d\n",
+ ret);
return ret;
}
- gpio_direction_input(chip->rdwr_pin);
- ret = devm_gpio_request(&spi_dev->dev, chip->convert_pin,
- spi_get_device_id(spi_dev)->name);
- if (ret) {
- dev_err(&spi_dev->dev, "Fail to request convert gpio PIN %d.\n",
- chip->convert_pin);
+ chip->convert_pin = devm_gpiod_get(&spi_dev->dev, "convert",
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(chip->convert_pin)) {
+ ret = PTR_ERR(chip->convert_pin);
+ dev_err(&spi_dev->dev, "Failed to request convert GPIO: %d\n",
+ ret);
return ret;
}
- gpio_direction_input(chip->convert_pin);
- ret = devm_gpio_request(&spi_dev->dev, chip->busy_pin,
- spi_get_device_id(spi_dev)->name);
- if (ret) {
- dev_err(&spi_dev->dev, "Fail to request busy gpio PIN %d.\n",
- chip->busy_pin);
- return ret;
+ if (chip->id == ID_AD7816 || chip->id == ID_AD7817) {
+ chip->busy_pin = devm_gpiod_get(&spi_dev->dev, "busy",
+ GPIOD_IN);
+ if (IS_ERR(chip->busy_pin)) {
+ ret = PTR_ERR(chip->busy_pin);
+ dev_err(&spi_dev->dev, "Failed to request busy GPIO: %d\n",
+ ret);
+ return ret;
+ }
}
- gpio_direction_input(chip->busy_pin);
indio_dev->name = spi_get_device_id(spi_dev)->name;
- indio_dev->dev.parent = &spi_dev->dev;
indio_dev->info = &ad7816_info;
indio_dev->modes = INDIO_DIRECT_MODE;
@@ -417,16 +412,24 @@ static int ad7816_probe(struct spi_device *spi_dev)
return ret;
dev_info(&spi_dev->dev, "%s temperature sensor and ADC registered.\n",
- indio_dev->name);
+ indio_dev->name);
return 0;
}
+static const struct of_device_id ad7816_of_match[] = {
+ { .compatible = "adi,ad7816", },
+ { .compatible = "adi,ad7817", },
+ { .compatible = "adi,ad7818", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, ad7816_of_match);
+
static const struct spi_device_id ad7816_id[] = {
- { "ad7816", 0 },
- { "ad7817", 0 },
- { "ad7818", 0 },
- {}
+ { "ad7816", ID_AD7816 },
+ { "ad7817", ID_AD7817 },
+ { "ad7818", ID_AD7818 },
+ { }
};
MODULE_DEVICE_TABLE(spi, ad7816_id);
@@ -434,7 +437,7 @@ MODULE_DEVICE_TABLE(spi, ad7816_id);
static struct spi_driver ad7816_driver = {
.driver = {
.name = "ad7816",
- .owner = THIS_MODULE,
+ .of_match_table = ad7816_of_match,
},
.probe = ad7816_probe,
.id_table = ad7816_id,
diff --git a/drivers/staging/iio/adc/lpc32xx_adc.c b/drivers/staging/iio/adc/lpc32xx_adc.c
deleted file mode 100644
index 5331c442fcfc..000000000000
--- a/drivers/staging/iio/adc/lpc32xx_adc.c
+++ /dev/null
@@ -1,215 +0,0 @@
-/*
- * lpc32xx_adc.c - Support for ADC in LPC32XX
- *
- * 3-channel, 10-bit ADC
- *
- * Copyright (C) 2011, 2012 Roland Stigge <stigge@antcom.de>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/interrupt.h>
-#include <linux/device.h>
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/io.h>
-#include <linux/clk.h>
-#include <linux/err.h>
-#include <linux/completion.h>
-#include <linux/of.h>
-
-#include <linux/iio/iio.h>
-#include <linux/iio/sysfs.h>
-
-/*
- * LPC32XX registers definitions
- */
-#define LPC32XX_ADC_SELECT(x) ((x) + 0x04)
-#define LPC32XX_ADC_CTRL(x) ((x) + 0x08)
-#define LPC32XX_ADC_VALUE(x) ((x) + 0x48)
-
-/* Bit definitions for LPC32XX_ADC_SELECT: */
-#define AD_REFm 0x00000200 /* constant, always write this value! */
-#define AD_REFp 0x00000080 /* constant, always write this value! */
-#define AD_IN 0x00000010 /* multiple of this is the */
- /* channel number: 0, 1, 2 */
-#define AD_INTERNAL 0x00000004 /* constant, always write this value! */
-
-/* Bit definitions for LPC32XX_ADC_CTRL: */
-#define AD_STROBE 0x00000002
-#define AD_PDN_CTRL 0x00000004
-
-/* Bit definitions for LPC32XX_ADC_VALUE: */
-#define ADC_VALUE_MASK 0x000003FF
-
-#define MOD_NAME "lpc32xx-adc"
-
-struct lpc32xx_adc_info {
- void __iomem *adc_base;
- struct clk *clk;
- struct completion completion;
-
- u32 value;
-};
-
-static int lpc32xx_read_raw(struct iio_dev *indio_dev,
- struct iio_chan_spec const *chan,
- int *val,
- int *val2,
- long mask)
-{
- struct lpc32xx_adc_info *info = iio_priv(indio_dev);
-
- if (mask == IIO_CHAN_INFO_RAW) {
- mutex_lock(&indio_dev->mlock);
- clk_enable(info->clk);
- /* Measurement setup */
- __raw_writel(AD_INTERNAL | (chan->address) | AD_REFp | AD_REFm,
- LPC32XX_ADC_SELECT(info->adc_base));
- /* Trigger conversion */
- __raw_writel(AD_PDN_CTRL | AD_STROBE,
- LPC32XX_ADC_CTRL(info->adc_base));
- wait_for_completion(&info->completion); /* set by ISR */
- clk_disable(info->clk);
- *val = info->value;
- mutex_unlock(&indio_dev->mlock);
-
- return IIO_VAL_INT;
- }
-
- return -EINVAL;
-}
-
-static const struct iio_info lpc32xx_adc_iio_info = {
- .read_raw = &lpc32xx_read_raw,
- .driver_module = THIS_MODULE,
-};
-
-#define LPC32XX_ADC_CHANNEL(_index) { \
- .type = IIO_VOLTAGE, \
- .indexed = 1, \
- .channel = _index, \
- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
- .address = AD_IN * _index, \
- .scan_index = _index, \
-}
-
-static const struct iio_chan_spec lpc32xx_adc_iio_channels[] = {
- LPC32XX_ADC_CHANNEL(0),
- LPC32XX_ADC_CHANNEL(1),
- LPC32XX_ADC_CHANNEL(2),
-};
-
-static irqreturn_t lpc32xx_adc_isr(int irq, void *dev_id)
-{
- struct lpc32xx_adc_info *info = dev_id;
-
- /* Read value and clear irq */
- info->value = __raw_readl(LPC32XX_ADC_VALUE(info->adc_base)) &
- ADC_VALUE_MASK;
- complete(&info->completion);
-
- return IRQ_HANDLED;
-}
-
-static int lpc32xx_adc_probe(struct platform_device *pdev)
-{
- struct lpc32xx_adc_info *info = NULL;
- struct resource *res;
- int retval = -ENODEV;
- struct iio_dev *iodev = NULL;
- int irq;
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(&pdev->dev, "failed to get platform I/O memory\n");
- return -EBUSY;
- }
-
- iodev = devm_iio_device_alloc(&pdev->dev, sizeof(*info));
- if (!iodev)
- return -ENOMEM;
-
- info = iio_priv(iodev);
-
- info->adc_base = devm_ioremap(&pdev->dev, res->start,
- resource_size(res));
- if (!info->adc_base) {
- dev_err(&pdev->dev, "failed mapping memory\n");
- return -EBUSY;
- }
-
- info->clk = devm_clk_get(&pdev->dev, NULL);
- if (IS_ERR(info->clk)) {
- dev_err(&pdev->dev, "failed getting clock\n");
- return PTR_ERR(info->clk);
- }
-
- irq = platform_get_irq(pdev, 0);
- if (irq <= 0) {
- dev_err(&pdev->dev, "failed getting interrupt resource\n");
- return -EINVAL;
- }
-
- retval = devm_request_irq(&pdev->dev, irq, lpc32xx_adc_isr, 0,
- MOD_NAME, info);
- if (retval < 0) {
- dev_err(&pdev->dev, "failed requesting interrupt\n");
- return retval;
- }
-
- platform_set_drvdata(pdev, iodev);
-
- init_completion(&info->completion);
-
- iodev->name = MOD_NAME;
- iodev->dev.parent = &pdev->dev;
- iodev->info = &lpc32xx_adc_iio_info;
- iodev->modes = INDIO_DIRECT_MODE;
- iodev->channels = lpc32xx_adc_iio_channels;
- iodev->num_channels = ARRAY_SIZE(lpc32xx_adc_iio_channels);
-
- retval = devm_iio_device_register(&pdev->dev, iodev);
- if (retval)
- return retval;
-
- dev_info(&pdev->dev, "LPC32XX ADC driver loaded, IRQ %d\n", irq);
-
- return 0;
-}
-
-#ifdef CONFIG_OF
-static const struct of_device_id lpc32xx_adc_match[] = {
- { .compatible = "nxp,lpc3220-adc" },
- {},
-};
-MODULE_DEVICE_TABLE(of, lpc32xx_adc_match);
-#endif
-
-static struct platform_driver lpc32xx_adc_driver = {
- .probe = lpc32xx_adc_probe,
- .driver = {
- .name = MOD_NAME,
- .of_match_table = of_match_ptr(lpc32xx_adc_match),
- },
-};
-
-module_platform_driver(lpc32xx_adc_driver);
-
-MODULE_AUTHOR("Roland Stigge <stigge@antcom.de>");
-MODULE_DESCRIPTION("LPC32XX ADC driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/iio/adc/mxs-lradc.c b/drivers/staging/iio/adc/mxs-lradc.c
deleted file mode 100644
index 3f7715c9968b..000000000000
--- a/drivers/staging/iio/adc/mxs-lradc.c
+++ /dev/null
@@ -1,1752 +0,0 @@
-/*
- * Freescale MXS LRADC driver
- *
- * Copyright (c) 2012 DENX Software Engineering, GmbH.
- * Marek Vasut <marex@denx.de>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#include <linux/bitops.h>
-#include <linux/clk.h>
-#include <linux/completion.h>
-#include <linux/device.h>
-#include <linux/err.h>
-#include <linux/input.h>
-#include <linux/interrupt.h>
-#include <linux/io.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/mutex.h>
-#include <linux/of.h>
-#include <linux/of_device.h>
-#include <linux/platform_device.h>
-#include <linux/slab.h>
-#include <linux/stmp_device.h>
-#include <linux/sysfs.h>
-
-#include <linux/iio/buffer.h>
-#include <linux/iio/iio.h>
-#include <linux/iio/trigger.h>
-#include <linux/iio/trigger_consumer.h>
-#include <linux/iio/triggered_buffer.h>
-#include <linux/iio/sysfs.h>
-
-#define DRIVER_NAME "mxs-lradc"
-
-#define LRADC_MAX_DELAY_CHANS 4
-#define LRADC_MAX_MAPPED_CHANS 8
-#define LRADC_MAX_TOTAL_CHANS 16
-
-#define LRADC_DELAY_TIMER_HZ 2000
-
-/*
- * Make this runtime configurable if necessary. Currently, if the buffered mode
- * is enabled, the LRADC takes LRADC_DELAY_TIMER_LOOP samples of data before
- * triggering IRQ. The sampling happens every (LRADC_DELAY_TIMER_PER / 2000)
- * seconds. The result is that the samples arrive every 500mS.
- */
-#define LRADC_DELAY_TIMER_PER 200
-#define LRADC_DELAY_TIMER_LOOP 5
-
-/*
- * Once the pen touches the touchscreen, the touchscreen switches from
- * IRQ-driven mode to polling mode to prevent interrupt storm. The polling
- * is realized by worker thread, which is called every 20 or so milliseconds.
- * This gives the touchscreen enough fluency and does not strain the system
- * too much.
- */
-#define LRADC_TS_SAMPLE_DELAY_MS 5
-
-/*
- * The LRADC reads the following amount of samples from each touchscreen
- * channel and the driver then computes average of these.
- */
-#define LRADC_TS_SAMPLE_AMOUNT 4
-
-enum mxs_lradc_id {
- IMX23_LRADC,
- IMX28_LRADC,
-};
-
-static const char * const mx23_lradc_irq_names[] = {
- "mxs-lradc-touchscreen",
- "mxs-lradc-channel0",
- "mxs-lradc-channel1",
- "mxs-lradc-channel2",
- "mxs-lradc-channel3",
- "mxs-lradc-channel4",
- "mxs-lradc-channel5",
- "mxs-lradc-channel6",
- "mxs-lradc-channel7",
-};
-
-static const char * const mx28_lradc_irq_names[] = {
- "mxs-lradc-touchscreen",
- "mxs-lradc-thresh0",
- "mxs-lradc-thresh1",
- "mxs-lradc-channel0",
- "mxs-lradc-channel1",
- "mxs-lradc-channel2",
- "mxs-lradc-channel3",
- "mxs-lradc-channel4",
- "mxs-lradc-channel5",
- "mxs-lradc-channel6",
- "mxs-lradc-channel7",
- "mxs-lradc-button0",
- "mxs-lradc-button1",
-};
-
-struct mxs_lradc_of_config {
- const int irq_count;
- const char * const *irq_name;
- const uint32_t *vref_mv;
-};
-
-#define VREF_MV_BASE 1850
-
-static const uint32_t mx23_vref_mv[LRADC_MAX_TOTAL_CHANS] = {
- VREF_MV_BASE, /* CH0 */
- VREF_MV_BASE, /* CH1 */
- VREF_MV_BASE, /* CH2 */
- VREF_MV_BASE, /* CH3 */
- VREF_MV_BASE, /* CH4 */
- VREF_MV_BASE, /* CH5 */
- VREF_MV_BASE * 2, /* CH6 VDDIO */
- VREF_MV_BASE * 4, /* CH7 VBATT */
- VREF_MV_BASE, /* CH8 Temp sense 0 */
- VREF_MV_BASE, /* CH9 Temp sense 1 */
- VREF_MV_BASE, /* CH10 */
- VREF_MV_BASE, /* CH11 */
- VREF_MV_BASE, /* CH12 USB_DP */
- VREF_MV_BASE, /* CH13 USB_DN */
- VREF_MV_BASE, /* CH14 VBG */
- VREF_MV_BASE * 4, /* CH15 VDD5V */
-};
-
-static const uint32_t mx28_vref_mv[LRADC_MAX_TOTAL_CHANS] = {
- VREF_MV_BASE, /* CH0 */
- VREF_MV_BASE, /* CH1 */
- VREF_MV_BASE, /* CH2 */
- VREF_MV_BASE, /* CH3 */
- VREF_MV_BASE, /* CH4 */
- VREF_MV_BASE, /* CH5 */
- VREF_MV_BASE, /* CH6 */
- VREF_MV_BASE * 4, /* CH7 VBATT */
- VREF_MV_BASE, /* CH8 Temp sense 0 */
- VREF_MV_BASE, /* CH9 Temp sense 1 */
- VREF_MV_BASE * 2, /* CH10 VDDIO */
- VREF_MV_BASE, /* CH11 VTH */
- VREF_MV_BASE * 2, /* CH12 VDDA */
- VREF_MV_BASE, /* CH13 VDDD */
- VREF_MV_BASE, /* CH14 VBG */
- VREF_MV_BASE * 4, /* CH15 VDD5V */
-};
-
-static const struct mxs_lradc_of_config mxs_lradc_of_config[] = {
- [IMX23_LRADC] = {
- .irq_count = ARRAY_SIZE(mx23_lradc_irq_names),
- .irq_name = mx23_lradc_irq_names,
- .vref_mv = mx23_vref_mv,
- },
- [IMX28_LRADC] = {
- .irq_count = ARRAY_SIZE(mx28_lradc_irq_names),
- .irq_name = mx28_lradc_irq_names,
- .vref_mv = mx28_vref_mv,
- },
-};
-
-enum mxs_lradc_ts {
- MXS_LRADC_TOUCHSCREEN_NONE = 0,
- MXS_LRADC_TOUCHSCREEN_4WIRE,
- MXS_LRADC_TOUCHSCREEN_5WIRE,
-};
-
-/*
- * Touchscreen handling
- */
-enum lradc_ts_plate {
- LRADC_TOUCH = 0,
- LRADC_SAMPLE_X,
- LRADC_SAMPLE_Y,
- LRADC_SAMPLE_PRESSURE,
- LRADC_SAMPLE_VALID,
-};
-
-enum mxs_lradc_divbytwo {
- MXS_LRADC_DIV_DISABLED = 0,
- MXS_LRADC_DIV_ENABLED,
-};
-
-struct mxs_lradc_scale {
- unsigned int integer;
- unsigned int nano;
-};
-
-struct mxs_lradc {
- struct device *dev;
- void __iomem *base;
- int irq[13];
-
- struct clk *clk;
-
- uint32_t *buffer;
- struct iio_trigger *trig;
-
- struct mutex lock;
-
- struct completion completion;
-
- const uint32_t *vref_mv;
- struct mxs_lradc_scale scale_avail[LRADC_MAX_TOTAL_CHANS][2];
- unsigned long is_divided;
-
- /*
- * When the touchscreen is enabled, we give it two private virtual
- * channels: #6 and #7. This means that only 6 virtual channels (instead
- * of 8) will be available for buffered capture.
- */
-#define TOUCHSCREEN_VCHANNEL1 7
-#define TOUCHSCREEN_VCHANNEL2 6
-#define BUFFER_VCHANS_LIMITED 0x3f
-#define BUFFER_VCHANS_ALL 0xff
- u8 buffer_vchans;
-
- /*
- * Furthermore, certain LRADC channels are shared between touchscreen
- * and/or touch-buttons and generic LRADC block. Therefore when using
- * either of these, these channels are not available for the regular
- * sampling. The shared channels are as follows:
- *
- * CH0 -- Touch button #0
- * CH1 -- Touch button #1
- * CH2 -- Touch screen XPUL
- * CH3 -- Touch screen YPLL
- * CH4 -- Touch screen XNUL
- * CH5 -- Touch screen YNLR
- * CH6 -- Touch screen WIPER (5-wire only)
- *
- * The bit fields below represents which parts of the LRADC block are
- * switched into special mode of operation. These channels can not
- * be sampled as regular LRADC channels. The driver will refuse any
- * attempt to sample these channels.
- */
-#define CHAN_MASK_TOUCHBUTTON (BIT(1) | BIT(0))
-#define CHAN_MASK_TOUCHSCREEN_4WIRE (0xf << 2)
-#define CHAN_MASK_TOUCHSCREEN_5WIRE (0x1f << 2)
- enum mxs_lradc_ts use_touchscreen;
- bool use_touchbutton;
-
- struct input_dev *ts_input;
-
- enum mxs_lradc_id soc;
- enum lradc_ts_plate cur_plate; /* state machine */
- bool ts_valid;
- unsigned ts_x_pos;
- unsigned ts_y_pos;
- unsigned ts_pressure;
-
- /* handle touchscreen's physical behaviour */
- /* samples per coordinate */
- unsigned over_sample_cnt;
- /* time clocks between samples */
- unsigned over_sample_delay;
- /* time in clocks to wait after the plates where switched */
- unsigned settling_delay;
-};
-
-#define LRADC_CTRL0 0x00
-# define LRADC_CTRL0_MX28_TOUCH_DETECT_ENABLE BIT(23)
-# define LRADC_CTRL0_MX28_TOUCH_SCREEN_TYPE BIT(22)
-# define LRADC_CTRL0_MX28_YNNSW /* YM */ BIT(21)
-# define LRADC_CTRL0_MX28_YPNSW /* YP */ BIT(20)
-# define LRADC_CTRL0_MX28_YPPSW /* YP */ BIT(19)
-# define LRADC_CTRL0_MX28_XNNSW /* XM */ BIT(18)
-# define LRADC_CTRL0_MX28_XNPSW /* XM */ BIT(17)
-# define LRADC_CTRL0_MX28_XPPSW /* XP */ BIT(16)
-
-# define LRADC_CTRL0_MX23_TOUCH_DETECT_ENABLE BIT(20)
-# define LRADC_CTRL0_MX23_YM BIT(19)
-# define LRADC_CTRL0_MX23_XM BIT(18)
-# define LRADC_CTRL0_MX23_YP BIT(17)
-# define LRADC_CTRL0_MX23_XP BIT(16)
-
-# define LRADC_CTRL0_MX28_PLATE_MASK \
- (LRADC_CTRL0_MX28_TOUCH_DETECT_ENABLE | \
- LRADC_CTRL0_MX28_YNNSW | LRADC_CTRL0_MX28_YPNSW | \
- LRADC_CTRL0_MX28_YPPSW | LRADC_CTRL0_MX28_XNNSW | \
- LRADC_CTRL0_MX28_XNPSW | LRADC_CTRL0_MX28_XPPSW)
-
-# define LRADC_CTRL0_MX23_PLATE_MASK \
- (LRADC_CTRL0_MX23_TOUCH_DETECT_ENABLE | \
- LRADC_CTRL0_MX23_YM | LRADC_CTRL0_MX23_XM | \
- LRADC_CTRL0_MX23_YP | LRADC_CTRL0_MX23_XP)
-
-#define LRADC_CTRL1 0x10
-#define LRADC_CTRL1_TOUCH_DETECT_IRQ_EN BIT(24)
-#define LRADC_CTRL1_LRADC_IRQ_EN(n) (1 << ((n) + 16))
-#define LRADC_CTRL1_MX28_LRADC_IRQ_EN_MASK (0x1fff << 16)
-#define LRADC_CTRL1_MX23_LRADC_IRQ_EN_MASK (0x01ff << 16)
-#define LRADC_CTRL1_LRADC_IRQ_EN_OFFSET 16
-#define LRADC_CTRL1_TOUCH_DETECT_IRQ BIT(8)
-#define LRADC_CTRL1_LRADC_IRQ(n) (1 << (n))
-#define LRADC_CTRL1_MX28_LRADC_IRQ_MASK 0x1fff
-#define LRADC_CTRL1_MX23_LRADC_IRQ_MASK 0x01ff
-#define LRADC_CTRL1_LRADC_IRQ_OFFSET 0
-
-#define LRADC_CTRL2 0x20
-#define LRADC_CTRL2_DIVIDE_BY_TWO_OFFSET 24
-#define LRADC_CTRL2_TEMPSENSE_PWD BIT(15)
-
-#define LRADC_STATUS 0x40
-#define LRADC_STATUS_TOUCH_DETECT_RAW BIT(0)
-
-#define LRADC_CH(n) (0x50 + (0x10 * (n)))
-#define LRADC_CH_ACCUMULATE BIT(29)
-#define LRADC_CH_NUM_SAMPLES_MASK (0x1f << 24)
-#define LRADC_CH_NUM_SAMPLES_OFFSET 24
-#define LRADC_CH_NUM_SAMPLES(x) \
- ((x) << LRADC_CH_NUM_SAMPLES_OFFSET)
-#define LRADC_CH_VALUE_MASK 0x3ffff
-#define LRADC_CH_VALUE_OFFSET 0
-
-#define LRADC_DELAY(n) (0xd0 + (0x10 * (n)))
-#define LRADC_DELAY_TRIGGER_LRADCS_MASK (0xff << 24)
-#define LRADC_DELAY_TRIGGER_LRADCS_OFFSET 24
-#define LRADC_DELAY_TRIGGER(x) \
- (((x) << LRADC_DELAY_TRIGGER_LRADCS_OFFSET) & \
- LRADC_DELAY_TRIGGER_LRADCS_MASK)
-#define LRADC_DELAY_KICK (1 << 20)
-#define LRADC_DELAY_TRIGGER_DELAYS_MASK (0xf << 16)
-#define LRADC_DELAY_TRIGGER_DELAYS_OFFSET 16
-#define LRADC_DELAY_TRIGGER_DELAYS(x) \
- (((x) << LRADC_DELAY_TRIGGER_DELAYS_OFFSET) & \
- LRADC_DELAY_TRIGGER_DELAYS_MASK)
-#define LRADC_DELAY_LOOP_COUNT_MASK (0x1f << 11)
-#define LRADC_DELAY_LOOP_COUNT_OFFSET 11
-#define LRADC_DELAY_LOOP(x) \
- (((x) << LRADC_DELAY_LOOP_COUNT_OFFSET) & \
- LRADC_DELAY_LOOP_COUNT_MASK)
-#define LRADC_DELAY_DELAY_MASK 0x7ff
-#define LRADC_DELAY_DELAY_OFFSET 0
-#define LRADC_DELAY_DELAY(x) \
- (((x) << LRADC_DELAY_DELAY_OFFSET) & \
- LRADC_DELAY_DELAY_MASK)
-
-#define LRADC_CTRL4 0x140
-#define LRADC_CTRL4_LRADCSELECT_MASK(n) (0xf << ((n) * 4))
-#define LRADC_CTRL4_LRADCSELECT_OFFSET(n) ((n) * 4)
-#define LRADC_CTRL4_LRADCSELECT(n, x) \
- (((x) << LRADC_CTRL4_LRADCSELECT_OFFSET(n)) & \
- LRADC_CTRL4_LRADCSELECT_MASK(n))
-
-#define LRADC_RESOLUTION 12
-#define LRADC_SINGLE_SAMPLE_MASK ((1 << LRADC_RESOLUTION) - 1)
-
-static void mxs_lradc_reg_set(struct mxs_lradc *lradc, u32 val, u32 reg)
-{
- writel(val, lradc->base + reg + STMP_OFFSET_REG_SET);
-}
-
-static void mxs_lradc_reg_clear(struct mxs_lradc *lradc, u32 val, u32 reg)
-{
- writel(val, lradc->base + reg + STMP_OFFSET_REG_CLR);
-}
-
-static void mxs_lradc_reg_wrt(struct mxs_lradc *lradc, u32 val, u32 reg)
-{
- writel(val, lradc->base + reg);
-}
-
-static u32 mxs_lradc_plate_mask(struct mxs_lradc *lradc)
-{
- if (lradc->soc == IMX23_LRADC)
- return LRADC_CTRL0_MX23_PLATE_MASK;
- return LRADC_CTRL0_MX28_PLATE_MASK;
-}
-
-static u32 mxs_lradc_irq_en_mask(struct mxs_lradc *lradc)
-{
- if (lradc->soc == IMX23_LRADC)
- return LRADC_CTRL1_MX23_LRADC_IRQ_EN_MASK;
- return LRADC_CTRL1_MX28_LRADC_IRQ_EN_MASK;
-}
-
-static u32 mxs_lradc_irq_mask(struct mxs_lradc *lradc)
-{
- if (lradc->soc == IMX23_LRADC)
- return LRADC_CTRL1_MX23_LRADC_IRQ_MASK;
- return LRADC_CTRL1_MX28_LRADC_IRQ_MASK;
-}
-
-static u32 mxs_lradc_touch_detect_bit(struct mxs_lradc *lradc)
-{
- if (lradc->soc == IMX23_LRADC)
- return LRADC_CTRL0_MX23_TOUCH_DETECT_ENABLE;
- return LRADC_CTRL0_MX28_TOUCH_DETECT_ENABLE;
-}
-
-static u32 mxs_lradc_drive_x_plate(struct mxs_lradc *lradc)
-{
- if (lradc->soc == IMX23_LRADC)
- return LRADC_CTRL0_MX23_XP | LRADC_CTRL0_MX23_XM;
- return LRADC_CTRL0_MX28_XPPSW | LRADC_CTRL0_MX28_XNNSW;
-}
-
-static u32 mxs_lradc_drive_y_plate(struct mxs_lradc *lradc)
-{
- if (lradc->soc == IMX23_LRADC)
- return LRADC_CTRL0_MX23_YP | LRADC_CTRL0_MX23_YM;
- return LRADC_CTRL0_MX28_YPPSW | LRADC_CTRL0_MX28_YNNSW;
-}
-
-static u32 mxs_lradc_drive_pressure(struct mxs_lradc *lradc)
-{
- if (lradc->soc == IMX23_LRADC)
- return LRADC_CTRL0_MX23_YP | LRADC_CTRL0_MX23_XM;
- return LRADC_CTRL0_MX28_YPPSW | LRADC_CTRL0_MX28_XNNSW;
-}
-
-static bool mxs_lradc_check_touch_event(struct mxs_lradc *lradc)
-{
- return !!(readl(lradc->base + LRADC_STATUS) &
- LRADC_STATUS_TOUCH_DETECT_RAW);
-}
-
-static void mxs_lradc_map_channel(struct mxs_lradc *lradc, unsigned vch,
- unsigned ch)
-{
- mxs_lradc_reg_clear(lradc, LRADC_CTRL4_LRADCSELECT_MASK(vch),
- LRADC_CTRL4);
- mxs_lradc_reg_set(lradc, LRADC_CTRL4_LRADCSELECT(vch, ch), LRADC_CTRL4);
-}
-
-static void mxs_lradc_setup_ts_channel(struct mxs_lradc *lradc, unsigned ch)
-{
- /*
- * prepare for oversampling conversion
- *
- * from the datasheet:
- * "The ACCUMULATE bit in the appropriate channel register
- * HW_LRADC_CHn must be set to 1 if NUM_SAMPLES is greater then 0;
- * otherwise, the IRQs will not fire."
- */
- mxs_lradc_reg_wrt(lradc, LRADC_CH_ACCUMULATE |
- LRADC_CH_NUM_SAMPLES(lradc->over_sample_cnt - 1),
- LRADC_CH(ch));
-
- /* from the datasheet:
- * "Software must clear this register in preparation for a
- * multi-cycle accumulation.
- */
- mxs_lradc_reg_clear(lradc, LRADC_CH_VALUE_MASK, LRADC_CH(ch));
-
- /*
- * prepare the delay/loop unit according to the oversampling count
- *
- * from the datasheet:
- * "The DELAY fields in HW_LRADC_DELAY0, HW_LRADC_DELAY1,
- * HW_LRADC_DELAY2, and HW_LRADC_DELAY3 must be non-zero; otherwise,
- * the LRADC will not trigger the delay group."
- */
- mxs_lradc_reg_wrt(lradc, LRADC_DELAY_TRIGGER(1 << ch) |
- LRADC_DELAY_TRIGGER_DELAYS(0) |
- LRADC_DELAY_LOOP(lradc->over_sample_cnt - 1) |
- LRADC_DELAY_DELAY(lradc->over_sample_delay - 1),
- LRADC_DELAY(3));
-
- mxs_lradc_reg_clear(lradc, LRADC_CTRL1_LRADC_IRQ(ch), LRADC_CTRL1);
-
- /*
- * after changing the touchscreen plates setting
- * the signals need some initial time to settle. Start the
- * SoC's delay unit and start the conversion later
- * and automatically.
- */
- mxs_lradc_reg_wrt(lradc,
- LRADC_DELAY_TRIGGER(0) | /* don't trigger ADC */
- LRADC_DELAY_TRIGGER_DELAYS(BIT(3)) | /* trigger DELAY unit#3 */
- LRADC_DELAY_KICK |
- LRADC_DELAY_DELAY(lradc->settling_delay),
- LRADC_DELAY(2));
-}
-
-/*
- * Pressure detection is special:
- * We want to do both required measurements for the pressure detection in
- * one turn. Use the hardware features to chain both conversions and let the
- * hardware report one interrupt if both conversions are done
- */
-static void mxs_lradc_setup_ts_pressure(struct mxs_lradc *lradc, unsigned ch1,
- unsigned ch2)
-{
- u32 reg;
-
- /*
- * prepare for oversampling conversion
- *
- * from the datasheet:
- * "The ACCUMULATE bit in the appropriate channel register
- * HW_LRADC_CHn must be set to 1 if NUM_SAMPLES is greater then 0;
- * otherwise, the IRQs will not fire."
- */
- reg = LRADC_CH_ACCUMULATE |
- LRADC_CH_NUM_SAMPLES(lradc->over_sample_cnt - 1);
- mxs_lradc_reg_wrt(lradc, reg, LRADC_CH(ch1));
- mxs_lradc_reg_wrt(lradc, reg, LRADC_CH(ch2));
-
- /* from the datasheet:
- * "Software must clear this register in preparation for a
- * multi-cycle accumulation.
- */
- mxs_lradc_reg_clear(lradc, LRADC_CH_VALUE_MASK, LRADC_CH(ch1));
- mxs_lradc_reg_clear(lradc, LRADC_CH_VALUE_MASK, LRADC_CH(ch2));
-
- /* prepare the delay/loop unit according to the oversampling count */
- mxs_lradc_reg_wrt(lradc, LRADC_DELAY_TRIGGER(1 << ch1) |
- LRADC_DELAY_TRIGGER(1 << ch2) | /* start both channels */
- LRADC_DELAY_TRIGGER_DELAYS(0) |
- LRADC_DELAY_LOOP(lradc->over_sample_cnt - 1) |
- LRADC_DELAY_DELAY(lradc->over_sample_delay - 1),
- LRADC_DELAY(3));
-
- mxs_lradc_reg_clear(lradc, LRADC_CTRL1_LRADC_IRQ(ch2), LRADC_CTRL1);
-
- /*
- * after changing the touchscreen plates setting
- * the signals need some initial time to settle. Start the
- * SoC's delay unit and start the conversion later
- * and automatically.
- */
- mxs_lradc_reg_wrt(lradc,
- LRADC_DELAY_TRIGGER(0) | /* don't trigger ADC */
- LRADC_DELAY_TRIGGER_DELAYS(BIT(3)) | /* trigger DELAY unit#3 */
- LRADC_DELAY_KICK |
- LRADC_DELAY_DELAY(lradc->settling_delay), LRADC_DELAY(2));
-}
-
-static unsigned mxs_lradc_read_raw_channel(struct mxs_lradc *lradc,
- unsigned channel)
-{
- u32 reg;
- unsigned num_samples, val;
-
- reg = readl(lradc->base + LRADC_CH(channel));
- if (reg & LRADC_CH_ACCUMULATE)
- num_samples = lradc->over_sample_cnt;
- else
- num_samples = 1;
-
- val = (reg & LRADC_CH_VALUE_MASK) >> LRADC_CH_VALUE_OFFSET;
- return val / num_samples;
-}
-
-static unsigned mxs_lradc_read_ts_pressure(struct mxs_lradc *lradc,
- unsigned ch1, unsigned ch2)
-{
- u32 reg, mask;
- unsigned pressure, m1, m2;
-
- mask = LRADC_CTRL1_LRADC_IRQ(ch1) | LRADC_CTRL1_LRADC_IRQ(ch2);
- reg = readl(lradc->base + LRADC_CTRL1) & mask;
-
- while (reg != mask) {
- reg = readl(lradc->base + LRADC_CTRL1) & mask;
- dev_dbg(lradc->dev, "One channel is still busy: %X\n", reg);
- }
-
- m1 = mxs_lradc_read_raw_channel(lradc, ch1);
- m2 = mxs_lradc_read_raw_channel(lradc, ch2);
-
- if (m2 == 0) {
- dev_warn(lradc->dev, "Cannot calculate pressure\n");
- return 1 << (LRADC_RESOLUTION - 1);
- }
-
- /* simply scale the value from 0 ... max ADC resolution */
- pressure = m1;
- pressure *= (1 << LRADC_RESOLUTION);
- pressure /= m2;
-
- dev_dbg(lradc->dev, "Pressure = %u\n", pressure);
- return pressure;
-}
-
-#define TS_CH_XP 2
-#define TS_CH_YP 3
-#define TS_CH_XM 4
-#define TS_CH_YM 5
-
-/*
- * YP(open)--+-------------+
- * | |--+
- * | | |
- * YM(-)--+-------------+ |
- * +--------------+
- * | |
- * XP(weak+) XM(open)
- *
- * "weak+" means 200k Ohm VDDIO
- * (-) means GND
- */
-static void mxs_lradc_setup_touch_detection(struct mxs_lradc *lradc)
-{
- /*
- * In order to detect a touch event the 'touch detect enable' bit
- * enables:
- * - a weak pullup to the X+ connector
- * - a strong ground at the Y- connector
- */
- mxs_lradc_reg_clear(lradc, mxs_lradc_plate_mask(lradc), LRADC_CTRL0);
- mxs_lradc_reg_set(lradc, mxs_lradc_touch_detect_bit(lradc),
- LRADC_CTRL0);
-}
-
-/*
- * YP(meas)--+-------------+
- * | |--+
- * | | |
- * YM(open)--+-------------+ |
- * +--------------+
- * | |
- * XP(+) XM(-)
- *
- * (+) means here 1.85 V
- * (-) means here GND
- */
-static void mxs_lradc_prepare_x_pos(struct mxs_lradc *lradc)
-{
- mxs_lradc_reg_clear(lradc, mxs_lradc_plate_mask(lradc), LRADC_CTRL0);
- mxs_lradc_reg_set(lradc, mxs_lradc_drive_x_plate(lradc), LRADC_CTRL0);
-
- lradc->cur_plate = LRADC_SAMPLE_X;
- mxs_lradc_map_channel(lradc, TOUCHSCREEN_VCHANNEL1, TS_CH_YP);
- mxs_lradc_setup_ts_channel(lradc, TOUCHSCREEN_VCHANNEL1);
-}
-
-/*
- * YP(+)--+-------------+
- * | |--+
- * | | |
- * YM(-)--+-------------+ |
- * +--------------+
- * | |
- * XP(open) XM(meas)
- *
- * (+) means here 1.85 V
- * (-) means here GND
- */
-static void mxs_lradc_prepare_y_pos(struct mxs_lradc *lradc)
-{
- mxs_lradc_reg_clear(lradc, mxs_lradc_plate_mask(lradc), LRADC_CTRL0);
- mxs_lradc_reg_set(lradc, mxs_lradc_drive_y_plate(lradc), LRADC_CTRL0);
-
- lradc->cur_plate = LRADC_SAMPLE_Y;
- mxs_lradc_map_channel(lradc, TOUCHSCREEN_VCHANNEL1, TS_CH_XM);
- mxs_lradc_setup_ts_channel(lradc, TOUCHSCREEN_VCHANNEL1);
-}
-
-/*
- * YP(+)--+-------------+
- * | |--+
- * | | |
- * YM(meas)--+-------------+ |
- * +--------------+
- * | |
- * XP(meas) XM(-)
- *
- * (+) means here 1.85 V
- * (-) means here GND
- */
-static void mxs_lradc_prepare_pressure(struct mxs_lradc *lradc)
-{
- mxs_lradc_reg_clear(lradc, mxs_lradc_plate_mask(lradc), LRADC_CTRL0);
- mxs_lradc_reg_set(lradc, mxs_lradc_drive_pressure(lradc), LRADC_CTRL0);
-
- lradc->cur_plate = LRADC_SAMPLE_PRESSURE;
- mxs_lradc_map_channel(lradc, TOUCHSCREEN_VCHANNEL1, TS_CH_YM);
- mxs_lradc_map_channel(lradc, TOUCHSCREEN_VCHANNEL2, TS_CH_XP);
- mxs_lradc_setup_ts_pressure(lradc, TOUCHSCREEN_VCHANNEL2,
- TOUCHSCREEN_VCHANNEL1);
-}
-
-static void mxs_lradc_enable_touch_detection(struct mxs_lradc *lradc)
-{
- mxs_lradc_setup_touch_detection(lradc);
-
- lradc->cur_plate = LRADC_TOUCH;
- mxs_lradc_reg_clear(lradc, LRADC_CTRL1_TOUCH_DETECT_IRQ |
- LRADC_CTRL1_TOUCH_DETECT_IRQ_EN, LRADC_CTRL1);
- mxs_lradc_reg_set(lradc, LRADC_CTRL1_TOUCH_DETECT_IRQ_EN, LRADC_CTRL1);
-}
-
-static void mxs_lradc_start_touch_event(struct mxs_lradc *lradc)
-{
- mxs_lradc_reg_clear(lradc, LRADC_CTRL1_TOUCH_DETECT_IRQ_EN,
- LRADC_CTRL1);
- mxs_lradc_reg_set(lradc,
- LRADC_CTRL1_LRADC_IRQ_EN(TOUCHSCREEN_VCHANNEL1), LRADC_CTRL1);
- /*
- * start with the Y-pos, because it uses nearly the same plate
- * settings like the touch detection
- */
- mxs_lradc_prepare_y_pos(lradc);
-}
-
-static void mxs_lradc_report_ts_event(struct mxs_lradc *lradc)
-{
- input_report_abs(lradc->ts_input, ABS_X, lradc->ts_x_pos);
- input_report_abs(lradc->ts_input, ABS_Y, lradc->ts_y_pos);
- input_report_abs(lradc->ts_input, ABS_PRESSURE, lradc->ts_pressure);
- input_report_key(lradc->ts_input, BTN_TOUCH, 1);
- input_sync(lradc->ts_input);
-}
-
-static void mxs_lradc_complete_touch_event(struct mxs_lradc *lradc)
-{
- mxs_lradc_setup_touch_detection(lradc);
- lradc->cur_plate = LRADC_SAMPLE_VALID;
- /*
- * start a dummy conversion to burn time to settle the signals
- * note: we are not interested in the conversion's value
- */
- mxs_lradc_reg_wrt(lradc, 0, LRADC_CH(TOUCHSCREEN_VCHANNEL1));
- mxs_lradc_reg_clear(lradc,
- LRADC_CTRL1_LRADC_IRQ(TOUCHSCREEN_VCHANNEL1) |
- LRADC_CTRL1_LRADC_IRQ(TOUCHSCREEN_VCHANNEL2), LRADC_CTRL1);
- mxs_lradc_reg_wrt(lradc,
- LRADC_DELAY_TRIGGER(1 << TOUCHSCREEN_VCHANNEL1) |
- LRADC_DELAY_KICK | LRADC_DELAY_DELAY(10), /* waste 5 ms */
- LRADC_DELAY(2));
-}
-
-/*
- * in order to avoid false measurements, report only samples where
- * the surface is still touched after the position measurement
- */
-static void mxs_lradc_finish_touch_event(struct mxs_lradc *lradc, bool valid)
-{
- /* if it is still touched, report the sample */
- if (valid && mxs_lradc_check_touch_event(lradc)) {
- lradc->ts_valid = true;
- mxs_lradc_report_ts_event(lradc);
- }
-
- /* if it is even still touched, continue with the next measurement */
- if (mxs_lradc_check_touch_event(lradc)) {
- mxs_lradc_prepare_y_pos(lradc);
- return;
- }
-
- if (lradc->ts_valid) {
- /* signal the release */
- lradc->ts_valid = false;
- input_report_key(lradc->ts_input, BTN_TOUCH, 0);
- input_sync(lradc->ts_input);
- }
-
- /* if it is released, wait for the next touch via IRQ */
- lradc->cur_plate = LRADC_TOUCH;
- mxs_lradc_reg_wrt(lradc, 0, LRADC_DELAY(2));
- mxs_lradc_reg_wrt(lradc, 0, LRADC_DELAY(3));
- mxs_lradc_reg_clear(lradc, LRADC_CTRL1_TOUCH_DETECT_IRQ |
- LRADC_CTRL1_LRADC_IRQ_EN(TOUCHSCREEN_VCHANNEL1) |
- LRADC_CTRL1_LRADC_IRQ(TOUCHSCREEN_VCHANNEL1), LRADC_CTRL1);
- mxs_lradc_reg_set(lradc, LRADC_CTRL1_TOUCH_DETECT_IRQ_EN, LRADC_CTRL1);
-}
-
-/* touchscreen's state machine */
-static void mxs_lradc_handle_touch(struct mxs_lradc *lradc)
-{
- switch (lradc->cur_plate) {
- case LRADC_TOUCH:
- if (mxs_lradc_check_touch_event(lradc))
- mxs_lradc_start_touch_event(lradc);
- mxs_lradc_reg_clear(lradc, LRADC_CTRL1_TOUCH_DETECT_IRQ,
- LRADC_CTRL1);
- return;
-
- case LRADC_SAMPLE_Y:
- lradc->ts_y_pos = mxs_lradc_read_raw_channel(lradc,
- TOUCHSCREEN_VCHANNEL1);
- mxs_lradc_prepare_x_pos(lradc);
- return;
-
- case LRADC_SAMPLE_X:
- lradc->ts_x_pos = mxs_lradc_read_raw_channel(lradc,
- TOUCHSCREEN_VCHANNEL1);
- mxs_lradc_prepare_pressure(lradc);
- return;
-
- case LRADC_SAMPLE_PRESSURE:
- lradc->ts_pressure = mxs_lradc_read_ts_pressure(lradc,
- TOUCHSCREEN_VCHANNEL2,
- TOUCHSCREEN_VCHANNEL1);
- mxs_lradc_complete_touch_event(lradc);
- return;
-
- case LRADC_SAMPLE_VALID:
- mxs_lradc_finish_touch_event(lradc, 1);
- break;
- }
-}
-
-/*
- * Raw I/O operations
- */
-static int mxs_lradc_read_single(struct iio_dev *iio_dev, int chan, int *val)
-{
- struct mxs_lradc *lradc = iio_priv(iio_dev);
- int ret;
-
- /*
- * See if there is no buffered operation in progress. If there is, simply
- * bail out. This can be improved to support both buffered and raw IO at
- * the same time, yet the code becomes horribly complicated. Therefore I
- * applied KISS principle here.
- */
- ret = mutex_trylock(&lradc->lock);
- if (!ret)
- return -EBUSY;
-
- reinit_completion(&lradc->completion);
-
- /*
- * No buffered operation in progress, map the channel and trigger it.
- * Virtual channel 0 is always used here as the others are always not
- * used if doing raw sampling.
- */
- if (lradc->soc == IMX28_LRADC)
- mxs_lradc_reg_clear(lradc, LRADC_CTRL1_LRADC_IRQ_EN(0),
- LRADC_CTRL1);
- mxs_lradc_reg_clear(lradc, 0x1, LRADC_CTRL0);
-
- /* Enable / disable the divider per requirement */
- if (test_bit(chan, &lradc->is_divided))
- mxs_lradc_reg_set(lradc, 1 << LRADC_CTRL2_DIVIDE_BY_TWO_OFFSET,
- LRADC_CTRL2);
- else
- mxs_lradc_reg_clear(lradc,
- 1 << LRADC_CTRL2_DIVIDE_BY_TWO_OFFSET, LRADC_CTRL2);
-
- /* Clean the slot's previous content, then set new one. */
- mxs_lradc_reg_clear(lradc, LRADC_CTRL4_LRADCSELECT_MASK(0),
- LRADC_CTRL4);
- mxs_lradc_reg_set(lradc, chan, LRADC_CTRL4);
-
- mxs_lradc_reg_wrt(lradc, 0, LRADC_CH(0));
-
- /* Enable the IRQ and start sampling the channel. */
- mxs_lradc_reg_set(lradc, LRADC_CTRL1_LRADC_IRQ_EN(0), LRADC_CTRL1);
- mxs_lradc_reg_set(lradc, BIT(0), LRADC_CTRL0);
-
- /* Wait for completion on the channel, 1 second max. */
- ret = wait_for_completion_killable_timeout(&lradc->completion, HZ);
- if (!ret)
- ret = -ETIMEDOUT;
- if (ret < 0)
- goto err;
-
- /* Read the data. */
- *val = readl(lradc->base + LRADC_CH(0)) & LRADC_CH_VALUE_MASK;
- ret = IIO_VAL_INT;
-
-err:
- mxs_lradc_reg_clear(lradc, LRADC_CTRL1_LRADC_IRQ_EN(0), LRADC_CTRL1);
-
- mutex_unlock(&lradc->lock);
-
- return ret;
-}
-
-static int mxs_lradc_read_temp(struct iio_dev *iio_dev, int *val)
-{
- int ret, min, max;
-
- ret = mxs_lradc_read_single(iio_dev, 8, &min);
- if (ret != IIO_VAL_INT)
- return ret;
-
- ret = mxs_lradc_read_single(iio_dev, 9, &max);
- if (ret != IIO_VAL_INT)
- return ret;
-
- *val = max - min;
-
- return IIO_VAL_INT;
-}
-
-static int mxs_lradc_read_raw(struct iio_dev *iio_dev,
- const struct iio_chan_spec *chan,
- int *val, int *val2, long m)
-{
- struct mxs_lradc *lradc = iio_priv(iio_dev);
-
- switch (m) {
- case IIO_CHAN_INFO_RAW:
- if (chan->type == IIO_TEMP)
- return mxs_lradc_read_temp(iio_dev, val);
-
- return mxs_lradc_read_single(iio_dev, chan->channel, val);
-
- case IIO_CHAN_INFO_SCALE:
- if (chan->type == IIO_TEMP) {
- /* From the datasheet, we have to multiply by 1.012 and
- * divide by 4
- */
- *val = 0;
- *val2 = 253000;
- return IIO_VAL_INT_PLUS_MICRO;
- }
-
- *val = lradc->vref_mv[chan->channel];
- *val2 = chan->scan_type.realbits -
- test_bit(chan->channel, &lradc->is_divided);
- return IIO_VAL_FRACTIONAL_LOG2;
-
- case IIO_CHAN_INFO_OFFSET:
- if (chan->type == IIO_TEMP) {
- /* The calculated value from the ADC is in Kelvin, we
- * want Celsius for hwmon so the offset is
- * -272.15 * scale
- */
- *val = -1075;
- *val2 = 691699;
-
- return IIO_VAL_INT_PLUS_MICRO;
- }
-
- return -EINVAL;
-
- default:
- break;
- }
-
- return -EINVAL;
-}
-
-static int mxs_lradc_write_raw(struct iio_dev *iio_dev,
- const struct iio_chan_spec *chan,
- int val, int val2, long m)
-{
- struct mxs_lradc *lradc = iio_priv(iio_dev);
- struct mxs_lradc_scale *scale_avail =
- lradc->scale_avail[chan->channel];
- int ret;
-
- ret = mutex_trylock(&lradc->lock);
- if (!ret)
- return -EBUSY;
-
- switch (m) {
- case IIO_CHAN_INFO_SCALE:
- ret = -EINVAL;
- if (val == scale_avail[MXS_LRADC_DIV_DISABLED].integer &&
- val2 == scale_avail[MXS_LRADC_DIV_DISABLED].nano) {
- /* divider by two disabled */
- clear_bit(chan->channel, &lradc->is_divided);
- ret = 0;
- } else if (val == scale_avail[MXS_LRADC_DIV_ENABLED].integer &&
- val2 == scale_avail[MXS_LRADC_DIV_ENABLED].nano) {
- /* divider by two enabled */
- set_bit(chan->channel, &lradc->is_divided);
- ret = 0;
- }
-
- break;
- default:
- ret = -EINVAL;
- break;
- }
-
- mutex_unlock(&lradc->lock);
-
- return ret;
-}
-
-static int mxs_lradc_write_raw_get_fmt(struct iio_dev *iio_dev,
- const struct iio_chan_spec *chan,
- long m)
-{
- return IIO_VAL_INT_PLUS_NANO;
-}
-
-static ssize_t mxs_lradc_show_scale_available_ch(struct device *dev,
- struct device_attribute *attr,
- char *buf,
- int ch)
-{
- struct iio_dev *iio = dev_to_iio_dev(dev);
- struct mxs_lradc *lradc = iio_priv(iio);
- int i, len = 0;
-
- for (i = 0; i < ARRAY_SIZE(lradc->scale_avail[ch]); i++)
- len += sprintf(buf + len, "%u.%09u ",
- lradc->scale_avail[ch][i].integer,
- lradc->scale_avail[ch][i].nano);
-
- len += sprintf(buf + len, "\n");
-
- return len;
-}
-
-static ssize_t mxs_lradc_show_scale_available(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct iio_dev_attr *iio_attr = to_iio_dev_attr(attr);
-
- return mxs_lradc_show_scale_available_ch(dev, attr, buf,
- iio_attr->address);
-}
-
-#define SHOW_SCALE_AVAILABLE_ATTR(ch) \
-static IIO_DEVICE_ATTR(in_voltage##ch##_scale_available, S_IRUGO, \
- mxs_lradc_show_scale_available, NULL, ch)
-
-SHOW_SCALE_AVAILABLE_ATTR(0);
-SHOW_SCALE_AVAILABLE_ATTR(1);
-SHOW_SCALE_AVAILABLE_ATTR(2);
-SHOW_SCALE_AVAILABLE_ATTR(3);
-SHOW_SCALE_AVAILABLE_ATTR(4);
-SHOW_SCALE_AVAILABLE_ATTR(5);
-SHOW_SCALE_AVAILABLE_ATTR(6);
-SHOW_SCALE_AVAILABLE_ATTR(7);
-SHOW_SCALE_AVAILABLE_ATTR(10);
-SHOW_SCALE_AVAILABLE_ATTR(11);
-SHOW_SCALE_AVAILABLE_ATTR(12);
-SHOW_SCALE_AVAILABLE_ATTR(13);
-SHOW_SCALE_AVAILABLE_ATTR(14);
-SHOW_SCALE_AVAILABLE_ATTR(15);
-
-static struct attribute *mxs_lradc_attributes[] = {
- &iio_dev_attr_in_voltage0_scale_available.dev_attr.attr,
- &iio_dev_attr_in_voltage1_scale_available.dev_attr.attr,
- &iio_dev_attr_in_voltage2_scale_available.dev_attr.attr,
- &iio_dev_attr_in_voltage3_scale_available.dev_attr.attr,
- &iio_dev_attr_in_voltage4_scale_available.dev_attr.attr,
- &iio_dev_attr_in_voltage5_scale_available.dev_attr.attr,
- &iio_dev_attr_in_voltage6_scale_available.dev_attr.attr,
- &iio_dev_attr_in_voltage7_scale_available.dev_attr.attr,
- &iio_dev_attr_in_voltage10_scale_available.dev_attr.attr,
- &iio_dev_attr_in_voltage11_scale_available.dev_attr.attr,
- &iio_dev_attr_in_voltage12_scale_available.dev_attr.attr,
- &iio_dev_attr_in_voltage13_scale_available.dev_attr.attr,
- &iio_dev_attr_in_voltage14_scale_available.dev_attr.attr,
- &iio_dev_attr_in_voltage15_scale_available.dev_attr.attr,
- NULL
-};
-
-static const struct attribute_group mxs_lradc_attribute_group = {
- .attrs = mxs_lradc_attributes,
-};
-
-static const struct iio_info mxs_lradc_iio_info = {
- .driver_module = THIS_MODULE,
- .read_raw = mxs_lradc_read_raw,
- .write_raw = mxs_lradc_write_raw,
- .write_raw_get_fmt = mxs_lradc_write_raw_get_fmt,
- .attrs = &mxs_lradc_attribute_group,
-};
-
-static int mxs_lradc_ts_open(struct input_dev *dev)
-{
- struct mxs_lradc *lradc = input_get_drvdata(dev);
-
- /* Enable the touch-detect circuitry. */
- mxs_lradc_enable_touch_detection(lradc);
-
- return 0;
-}
-
-static void mxs_lradc_disable_ts(struct mxs_lradc *lradc)
-{
- /* stop all interrupts from firing */
- mxs_lradc_reg_clear(lradc, LRADC_CTRL1_TOUCH_DETECT_IRQ_EN |
- LRADC_CTRL1_LRADC_IRQ_EN(TOUCHSCREEN_VCHANNEL1) |
- LRADC_CTRL1_LRADC_IRQ_EN(TOUCHSCREEN_VCHANNEL2), LRADC_CTRL1);
-
- /* Power-down touchscreen touch-detect circuitry. */
- mxs_lradc_reg_clear(lradc, mxs_lradc_plate_mask(lradc), LRADC_CTRL0);
-}
-
-static void mxs_lradc_ts_close(struct input_dev *dev)
-{
- struct mxs_lradc *lradc = input_get_drvdata(dev);
-
- mxs_lradc_disable_ts(lradc);
-}
-
-static int mxs_lradc_ts_register(struct mxs_lradc *lradc)
-{
- struct input_dev *input;
- struct device *dev = lradc->dev;
- int ret;
-
- if (!lradc->use_touchscreen)
- return 0;
-
- input = input_allocate_device();
- if (!input)
- return -ENOMEM;
-
- input->name = DRIVER_NAME;
- input->id.bustype = BUS_HOST;
- input->dev.parent = dev;
- input->open = mxs_lradc_ts_open;
- input->close = mxs_lradc_ts_close;
-
- __set_bit(EV_ABS, input->evbit);
- __set_bit(EV_KEY, input->evbit);
- __set_bit(BTN_TOUCH, input->keybit);
- input_set_abs_params(input, ABS_X, 0, LRADC_SINGLE_SAMPLE_MASK, 0, 0);
- input_set_abs_params(input, ABS_Y, 0, LRADC_SINGLE_SAMPLE_MASK, 0, 0);
- input_set_abs_params(input, ABS_PRESSURE, 0, LRADC_SINGLE_SAMPLE_MASK,
- 0, 0);
-
- lradc->ts_input = input;
- input_set_drvdata(input, lradc);
- ret = input_register_device(input);
- if (ret)
- input_free_device(lradc->ts_input);
-
- return ret;
-}
-
-static void mxs_lradc_ts_unregister(struct mxs_lradc *lradc)
-{
- if (!lradc->use_touchscreen)
- return;
-
- mxs_lradc_disable_ts(lradc);
- input_unregister_device(lradc->ts_input);
-}
-
-/*
- * IRQ Handling
- */
-static irqreturn_t mxs_lradc_handle_irq(int irq, void *data)
-{
- struct iio_dev *iio = data;
- struct mxs_lradc *lradc = iio_priv(iio);
- unsigned long reg = readl(lradc->base + LRADC_CTRL1);
- uint32_t clr_irq = mxs_lradc_irq_mask(lradc);
- const uint32_t ts_irq_mask =
- LRADC_CTRL1_TOUCH_DETECT_IRQ |
- LRADC_CTRL1_LRADC_IRQ(TOUCHSCREEN_VCHANNEL1) |
- LRADC_CTRL1_LRADC_IRQ(TOUCHSCREEN_VCHANNEL2);
-
- if (!(reg & mxs_lradc_irq_mask(lradc)))
- return IRQ_NONE;
-
- if (lradc->use_touchscreen && (reg & ts_irq_mask)) {
- mxs_lradc_handle_touch(lradc);
-
- /* Make sure we don't clear the next conversion's interrupt. */
- clr_irq &= ~(LRADC_CTRL1_LRADC_IRQ(TOUCHSCREEN_VCHANNEL1) |
- LRADC_CTRL1_LRADC_IRQ(TOUCHSCREEN_VCHANNEL2));
- }
-
- if (iio_buffer_enabled(iio)) {
- if (reg & lradc->buffer_vchans)
- iio_trigger_poll(iio->trig);
- } else if (reg & LRADC_CTRL1_LRADC_IRQ(0)) {
- complete(&lradc->completion);
- }
-
- mxs_lradc_reg_clear(lradc, reg & clr_irq, LRADC_CTRL1);
-
- return IRQ_HANDLED;
-}
-
-/*
- * Trigger handling
- */
-static irqreturn_t mxs_lradc_trigger_handler(int irq, void *p)
-{
- struct iio_poll_func *pf = p;
- struct iio_dev *iio = pf->indio_dev;
- struct mxs_lradc *lradc = iio_priv(iio);
- const uint32_t chan_value = LRADC_CH_ACCUMULATE |
- ((LRADC_DELAY_TIMER_LOOP - 1) << LRADC_CH_NUM_SAMPLES_OFFSET);
- unsigned int i, j = 0;
-
- for_each_set_bit(i, iio->active_scan_mask, LRADC_MAX_TOTAL_CHANS) {
- lradc->buffer[j] = readl(lradc->base + LRADC_CH(j));
- mxs_lradc_reg_wrt(lradc, chan_value, LRADC_CH(j));
- lradc->buffer[j] &= LRADC_CH_VALUE_MASK;
- lradc->buffer[j] /= LRADC_DELAY_TIMER_LOOP;
- j++;
- }
-
- iio_push_to_buffers_with_timestamp(iio, lradc->buffer, pf->timestamp);
-
- iio_trigger_notify_done(iio->trig);
-
- return IRQ_HANDLED;
-}
-
-static int mxs_lradc_configure_trigger(struct iio_trigger *trig, bool state)
-{
- struct iio_dev *iio = iio_trigger_get_drvdata(trig);
- struct mxs_lradc *lradc = iio_priv(iio);
- const uint32_t st = state ? STMP_OFFSET_REG_SET : STMP_OFFSET_REG_CLR;
-
- mxs_lradc_reg_wrt(lradc, LRADC_DELAY_KICK, LRADC_DELAY(0) + st);
-
- return 0;
-}
-
-static const struct iio_trigger_ops mxs_lradc_trigger_ops = {
- .owner = THIS_MODULE,
- .set_trigger_state = &mxs_lradc_configure_trigger,
-};
-
-static int mxs_lradc_trigger_init(struct iio_dev *iio)
-{
- int ret;
- struct iio_trigger *trig;
- struct mxs_lradc *lradc = iio_priv(iio);
-
- trig = iio_trigger_alloc("%s-dev%i", iio->name, iio->id);
- if (trig == NULL)
- return -ENOMEM;
-
- trig->dev.parent = lradc->dev;
- iio_trigger_set_drvdata(trig, iio);
- trig->ops = &mxs_lradc_trigger_ops;
-
- ret = iio_trigger_register(trig);
- if (ret) {
- iio_trigger_free(trig);
- return ret;
- }
-
- lradc->trig = trig;
-
- return 0;
-}
-
-static void mxs_lradc_trigger_remove(struct iio_dev *iio)
-{
- struct mxs_lradc *lradc = iio_priv(iio);
-
- iio_trigger_unregister(lradc->trig);
- iio_trigger_free(lradc->trig);
-}
-
-static int mxs_lradc_buffer_preenable(struct iio_dev *iio)
-{
- struct mxs_lradc *lradc = iio_priv(iio);
- int ret = 0, chan, ofs = 0;
- unsigned long enable = 0;
- uint32_t ctrl4_set = 0;
- uint32_t ctrl4_clr = 0;
- uint32_t ctrl1_irq = 0;
- const uint32_t chan_value = LRADC_CH_ACCUMULATE |
- ((LRADC_DELAY_TIMER_LOOP - 1) << LRADC_CH_NUM_SAMPLES_OFFSET);
- const int len = bitmap_weight(iio->active_scan_mask,
- LRADC_MAX_TOTAL_CHANS);
-
- if (!len)
- return -EINVAL;
-
- /*
- * Lock the driver so raw access can not be done during buffered
- * operation. This simplifies the code a lot.
- */
- ret = mutex_trylock(&lradc->lock);
- if (!ret)
- return -EBUSY;
-
- lradc->buffer = kmalloc_array(len, sizeof(*lradc->buffer), GFP_KERNEL);
- if (!lradc->buffer) {
- ret = -ENOMEM;
- goto err_mem;
- }
-
- if (lradc->soc == IMX28_LRADC)
- mxs_lradc_reg_clear(lradc,
- lradc->buffer_vchans << LRADC_CTRL1_LRADC_IRQ_EN_OFFSET,
- LRADC_CTRL1);
- mxs_lradc_reg_clear(lradc, lradc->buffer_vchans, LRADC_CTRL0);
-
- for_each_set_bit(chan, iio->active_scan_mask, LRADC_MAX_TOTAL_CHANS) {
- ctrl4_set |= chan << LRADC_CTRL4_LRADCSELECT_OFFSET(ofs);
- ctrl4_clr |= LRADC_CTRL4_LRADCSELECT_MASK(ofs);
- ctrl1_irq |= LRADC_CTRL1_LRADC_IRQ_EN(ofs);
- mxs_lradc_reg_wrt(lradc, chan_value, LRADC_CH(ofs));
- bitmap_set(&enable, ofs, 1);
- ofs++;
- }
-
- mxs_lradc_reg_clear(lradc, LRADC_DELAY_TRIGGER_LRADCS_MASK |
- LRADC_DELAY_KICK, LRADC_DELAY(0));
- mxs_lradc_reg_clear(lradc, ctrl4_clr, LRADC_CTRL4);
- mxs_lradc_reg_set(lradc, ctrl4_set, LRADC_CTRL4);
- mxs_lradc_reg_set(lradc, ctrl1_irq, LRADC_CTRL1);
- mxs_lradc_reg_set(lradc, enable << LRADC_DELAY_TRIGGER_LRADCS_OFFSET,
- LRADC_DELAY(0));
-
- return 0;
-
-err_mem:
- mutex_unlock(&lradc->lock);
- return ret;
-}
-
-static int mxs_lradc_buffer_postdisable(struct iio_dev *iio)
-{
- struct mxs_lradc *lradc = iio_priv(iio);
-
- mxs_lradc_reg_clear(lradc, LRADC_DELAY_TRIGGER_LRADCS_MASK |
- LRADC_DELAY_KICK, LRADC_DELAY(0));
-
- mxs_lradc_reg_clear(lradc, lradc->buffer_vchans, LRADC_CTRL0);
- if (lradc->soc == IMX28_LRADC)
- mxs_lradc_reg_clear(lradc,
- lradc->buffer_vchans << LRADC_CTRL1_LRADC_IRQ_EN_OFFSET,
- LRADC_CTRL1);
-
- kfree(lradc->buffer);
- mutex_unlock(&lradc->lock);
-
- return 0;
-}
-
-static bool mxs_lradc_validate_scan_mask(struct iio_dev *iio,
- const unsigned long *mask)
-{
- struct mxs_lradc *lradc = iio_priv(iio);
- const int map_chans = bitmap_weight(mask, LRADC_MAX_TOTAL_CHANS);
- int rsvd_chans = 0;
- unsigned long rsvd_mask = 0;
-
- if (lradc->use_touchbutton)
- rsvd_mask |= CHAN_MASK_TOUCHBUTTON;
- if (lradc->use_touchscreen == MXS_LRADC_TOUCHSCREEN_4WIRE)
- rsvd_mask |= CHAN_MASK_TOUCHSCREEN_4WIRE;
- if (lradc->use_touchscreen == MXS_LRADC_TOUCHSCREEN_5WIRE)
- rsvd_mask |= CHAN_MASK_TOUCHSCREEN_5WIRE;
-
- if (lradc->use_touchbutton)
- rsvd_chans++;
- if (lradc->use_touchscreen)
- rsvd_chans += 2;
-
- /* Test for attempts to map channels with special mode of operation. */
- if (bitmap_intersects(mask, &rsvd_mask, LRADC_MAX_TOTAL_CHANS))
- return false;
-
- /* Test for attempts to map more channels then available slots. */
- if (map_chans + rsvd_chans > LRADC_MAX_MAPPED_CHANS)
- return false;
-
- return true;
-}
-
-static const struct iio_buffer_setup_ops mxs_lradc_buffer_ops = {
- .preenable = &mxs_lradc_buffer_preenable,
- .postenable = &iio_triggered_buffer_postenable,
- .predisable = &iio_triggered_buffer_predisable,
- .postdisable = &mxs_lradc_buffer_postdisable,
- .validate_scan_mask = &mxs_lradc_validate_scan_mask,
-};
-
-/*
- * Driver initialization
- */
-
-#define MXS_ADC_CHAN(idx, chan_type, name) { \
- .type = (chan_type), \
- .indexed = 1, \
- .scan_index = (idx), \
- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
- BIT(IIO_CHAN_INFO_SCALE), \
- .channel = (idx), \
- .address = (idx), \
- .scan_type = { \
- .sign = 'u', \
- .realbits = LRADC_RESOLUTION, \
- .storagebits = 32, \
- }, \
- .datasheet_name = (name), \
-}
-
-static const struct iio_chan_spec mx23_lradc_chan_spec[] = {
- MXS_ADC_CHAN(0, IIO_VOLTAGE, "LRADC0"),
- MXS_ADC_CHAN(1, IIO_VOLTAGE, "LRADC1"),
- MXS_ADC_CHAN(2, IIO_VOLTAGE, "LRADC2"),
- MXS_ADC_CHAN(3, IIO_VOLTAGE, "LRADC3"),
- MXS_ADC_CHAN(4, IIO_VOLTAGE, "LRADC4"),
- MXS_ADC_CHAN(5, IIO_VOLTAGE, "LRADC5"),
- MXS_ADC_CHAN(6, IIO_VOLTAGE, "VDDIO"),
- MXS_ADC_CHAN(7, IIO_VOLTAGE, "VBATT"),
- /* Combined Temperature sensors */
- {
- .type = IIO_TEMP,
- .indexed = 1,
- .scan_index = 8,
- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
- BIT(IIO_CHAN_INFO_OFFSET) |
- BIT(IIO_CHAN_INFO_SCALE),
- .channel = 8,
- .scan_type = {.sign = 'u', .realbits = 18, .storagebits = 32,},
- .datasheet_name = "TEMP_DIE",
- },
- /* Hidden channel to keep indexes */
- {
- .type = IIO_TEMP,
- .indexed = 1,
- .scan_index = -1,
- .channel = 9,
- },
- MXS_ADC_CHAN(10, IIO_VOLTAGE, NULL),
- MXS_ADC_CHAN(11, IIO_VOLTAGE, NULL),
- MXS_ADC_CHAN(12, IIO_VOLTAGE, "USB_DP"),
- MXS_ADC_CHAN(13, IIO_VOLTAGE, "USB_DN"),
- MXS_ADC_CHAN(14, IIO_VOLTAGE, "VBG"),
- MXS_ADC_CHAN(15, IIO_VOLTAGE, "VDD5V"),
-};
-
-static const struct iio_chan_spec mx28_lradc_chan_spec[] = {
- MXS_ADC_CHAN(0, IIO_VOLTAGE, "LRADC0"),
- MXS_ADC_CHAN(1, IIO_VOLTAGE, "LRADC1"),
- MXS_ADC_CHAN(2, IIO_VOLTAGE, "LRADC2"),
- MXS_ADC_CHAN(3, IIO_VOLTAGE, "LRADC3"),
- MXS_ADC_CHAN(4, IIO_VOLTAGE, "LRADC4"),
- MXS_ADC_CHAN(5, IIO_VOLTAGE, "LRADC5"),
- MXS_ADC_CHAN(6, IIO_VOLTAGE, "LRADC6"),
- MXS_ADC_CHAN(7, IIO_VOLTAGE, "VBATT"),
- /* Combined Temperature sensors */
- {
- .type = IIO_TEMP,
- .indexed = 1,
- .scan_index = 8,
- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
- BIT(IIO_CHAN_INFO_OFFSET) |
- BIT(IIO_CHAN_INFO_SCALE),
- .channel = 8,
- .scan_type = {.sign = 'u', .realbits = 18, .storagebits = 32,},
- .datasheet_name = "TEMP_DIE",
- },
- /* Hidden channel to keep indexes */
- {
- .type = IIO_TEMP,
- .indexed = 1,
- .scan_index = -1,
- .channel = 9,
- },
- MXS_ADC_CHAN(10, IIO_VOLTAGE, "VDDIO"),
- MXS_ADC_CHAN(11, IIO_VOLTAGE, "VTH"),
- MXS_ADC_CHAN(12, IIO_VOLTAGE, "VDDA"),
- MXS_ADC_CHAN(13, IIO_VOLTAGE, "VDDD"),
- MXS_ADC_CHAN(14, IIO_VOLTAGE, "VBG"),
- MXS_ADC_CHAN(15, IIO_VOLTAGE, "VDD5V"),
-};
-
-static int mxs_lradc_hw_init(struct mxs_lradc *lradc)
-{
- /* The ADC always uses DELAY CHANNEL 0. */
- const uint32_t adc_cfg =
- (1 << (LRADC_DELAY_TRIGGER_DELAYS_OFFSET + 0)) |
- (LRADC_DELAY_TIMER_PER << LRADC_DELAY_DELAY_OFFSET);
-
- int ret = stmp_reset_block(lradc->base);
-
- if (ret)
- return ret;
-
- /* Configure DELAY CHANNEL 0 for generic ADC sampling. */
- mxs_lradc_reg_wrt(lradc, adc_cfg, LRADC_DELAY(0));
-
- /* Disable remaining DELAY CHANNELs */
- mxs_lradc_reg_wrt(lradc, 0, LRADC_DELAY(1));
- mxs_lradc_reg_wrt(lradc, 0, LRADC_DELAY(2));
- mxs_lradc_reg_wrt(lradc, 0, LRADC_DELAY(3));
-
- /* Configure the touchscreen type */
- if (lradc->soc == IMX28_LRADC) {
- mxs_lradc_reg_clear(lradc, LRADC_CTRL0_MX28_TOUCH_SCREEN_TYPE,
- LRADC_CTRL0);
-
- if (lradc->use_touchscreen == MXS_LRADC_TOUCHSCREEN_5WIRE)
- mxs_lradc_reg_set(lradc, LRADC_CTRL0_MX28_TOUCH_SCREEN_TYPE,
- LRADC_CTRL0);
- }
-
- /* Start internal temperature sensing. */
- mxs_lradc_reg_wrt(lradc, 0, LRADC_CTRL2);
-
- return 0;
-}
-
-static void mxs_lradc_hw_stop(struct mxs_lradc *lradc)
-{
- int i;
-
- mxs_lradc_reg_clear(lradc, mxs_lradc_irq_en_mask(lradc), LRADC_CTRL1);
-
- for (i = 0; i < LRADC_MAX_DELAY_CHANS; i++)
- mxs_lradc_reg_wrt(lradc, 0, LRADC_DELAY(i));
-}
-
-static const struct of_device_id mxs_lradc_dt_ids[] = {
- { .compatible = "fsl,imx23-lradc", .data = (void *)IMX23_LRADC, },
- { .compatible = "fsl,imx28-lradc", .data = (void *)IMX28_LRADC, },
- { /* sentinel */ }
-};
-MODULE_DEVICE_TABLE(of, mxs_lradc_dt_ids);
-
-static int mxs_lradc_probe_touchscreen(struct mxs_lradc *lradc,
- struct device_node *lradc_node)
-{
- int ret;
- u32 ts_wires = 0, adapt;
-
- ret = of_property_read_u32(lradc_node, "fsl,lradc-touchscreen-wires",
- &ts_wires);
- if (ret)
- return -ENODEV; /* touchscreen feature disabled */
-
- switch (ts_wires) {
- case 4:
- lradc->use_touchscreen = MXS_LRADC_TOUCHSCREEN_4WIRE;
- break;
- case 5:
- if (lradc->soc == IMX28_LRADC) {
- lradc->use_touchscreen = MXS_LRADC_TOUCHSCREEN_5WIRE;
- break;
- }
- /* fall through an error message for i.MX23 */
- default:
- dev_err(lradc->dev,
- "Unsupported number of touchscreen wires (%d)\n",
- ts_wires);
- return -EINVAL;
- }
-
- if (of_property_read_u32(lradc_node, "fsl,ave-ctrl", &adapt)) {
- lradc->over_sample_cnt = 4;
- } else {
- if (adapt < 1 || adapt > 32) {
- dev_err(lradc->dev, "Invalid sample count (%u)\n",
- adapt);
- return -EINVAL;
- }
- lradc->over_sample_cnt = adapt;
- }
-
- if (of_property_read_u32(lradc_node, "fsl,ave-delay", &adapt)) {
- lradc->over_sample_delay = 2;
- } else {
- if (adapt < 2 || adapt > LRADC_DELAY_DELAY_MASK + 1) {
- dev_err(lradc->dev, "Invalid sample delay (%u)\n",
- adapt);
- return -EINVAL;
- }
- lradc->over_sample_delay = adapt;
- }
-
- if (of_property_read_u32(lradc_node, "fsl,settling", &adapt)) {
- lradc->settling_delay = 10;
- } else {
- if (adapt < 1 || adapt > LRADC_DELAY_DELAY_MASK) {
- dev_err(lradc->dev, "Invalid settling delay (%u)\n",
- adapt);
- return -EINVAL;
- }
- lradc->settling_delay = adapt;
- }
-
- return 0;
-}
-
-static int mxs_lradc_probe(struct platform_device *pdev)
-{
- const struct of_device_id *of_id =
- of_match_device(mxs_lradc_dt_ids, &pdev->dev);
- const struct mxs_lradc_of_config *of_cfg =
- &mxs_lradc_of_config[(enum mxs_lradc_id)of_id->data];
- struct device *dev = &pdev->dev;
- struct device_node *node = dev->of_node;
- struct mxs_lradc *lradc;
- struct iio_dev *iio;
- struct resource *iores;
- int ret = 0, touch_ret;
- int i, s;
- uint64_t scale_uv;
-
- /* Allocate the IIO device. */
- iio = devm_iio_device_alloc(dev, sizeof(*lradc));
- if (!iio) {
- dev_err(dev, "Failed to allocate IIO device\n");
- return -ENOMEM;
- }
-
- lradc = iio_priv(iio);
- lradc->soc = (enum mxs_lradc_id)of_id->data;
-
- /* Grab the memory area */
- iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- lradc->dev = &pdev->dev;
- lradc->base = devm_ioremap_resource(dev, iores);
- if (IS_ERR(lradc->base))
- return PTR_ERR(lradc->base);
-
- lradc->clk = devm_clk_get(&pdev->dev, NULL);
- if (IS_ERR(lradc->clk)) {
- dev_err(dev, "Failed to get the delay unit clock\n");
- return PTR_ERR(lradc->clk);
- }
- ret = clk_prepare_enable(lradc->clk);
- if (ret != 0) {
- dev_err(dev, "Failed to enable the delay unit clock\n");
- return ret;
- }
-
- touch_ret = mxs_lradc_probe_touchscreen(lradc, node);
-
- if (touch_ret == 0)
- lradc->buffer_vchans = BUFFER_VCHANS_LIMITED;
- else
- lradc->buffer_vchans = BUFFER_VCHANS_ALL;
-
- /* Grab all IRQ sources */
- for (i = 0; i < of_cfg->irq_count; i++) {
- lradc->irq[i] = platform_get_irq(pdev, i);
- if (lradc->irq[i] < 0) {
- ret = lradc->irq[i];
- goto err_clk;
- }
-
- ret = devm_request_irq(dev, lradc->irq[i],
- mxs_lradc_handle_irq, 0,
- of_cfg->irq_name[i], iio);
- if (ret)
- goto err_clk;
- }
-
- lradc->vref_mv = of_cfg->vref_mv;
-
- platform_set_drvdata(pdev, iio);
-
- init_completion(&lradc->completion);
- mutex_init(&lradc->lock);
-
- iio->name = pdev->name;
- iio->dev.parent = &pdev->dev;
- iio->info = &mxs_lradc_iio_info;
- iio->modes = INDIO_DIRECT_MODE;
- iio->masklength = LRADC_MAX_TOTAL_CHANS;
-
- if (lradc->soc == IMX23_LRADC) {
- iio->channels = mx23_lradc_chan_spec;
- iio->num_channels = ARRAY_SIZE(mx23_lradc_chan_spec);
- } else {
- iio->channels = mx28_lradc_chan_spec;
- iio->num_channels = ARRAY_SIZE(mx28_lradc_chan_spec);
- }
-
- ret = iio_triggered_buffer_setup(iio, &iio_pollfunc_store_time,
- &mxs_lradc_trigger_handler,
- &mxs_lradc_buffer_ops);
- if (ret)
- goto err_clk;
-
- ret = mxs_lradc_trigger_init(iio);
- if (ret)
- goto err_trig;
-
- /* Populate available ADC input ranges */
- for (i = 0; i < LRADC_MAX_TOTAL_CHANS; i++) {
- for (s = 0; s < ARRAY_SIZE(lradc->scale_avail[i]); s++) {
- /*
- * [s=0] = optional divider by two disabled (default)
- * [s=1] = optional divider by two enabled
- *
- * The scale is calculated by doing:
- * Vref >> (realbits - s)
- * which multiplies by two on the second component
- * of the array.
- */
- scale_uv = ((u64)lradc->vref_mv[i] * 100000000) >>
- (LRADC_RESOLUTION - s);
- lradc->scale_avail[i][s].nano =
- do_div(scale_uv, 100000000) * 10;
- lradc->scale_avail[i][s].integer = scale_uv;
- }
- }
-
- /* Configure the hardware. */
- ret = mxs_lradc_hw_init(lradc);
- if (ret)
- goto err_dev;
-
- /* Register the touchscreen input device. */
- if (touch_ret == 0) {
- ret = mxs_lradc_ts_register(lradc);
- if (ret)
- goto err_ts_register;
- }
-
- /* Register IIO device. */
- ret = iio_device_register(iio);
- if (ret) {
- dev_err(dev, "Failed to register IIO device\n");
- goto err_ts;
- }
-
- return 0;
-
-err_ts:
- mxs_lradc_ts_unregister(lradc);
-err_ts_register:
- mxs_lradc_hw_stop(lradc);
-err_dev:
- mxs_lradc_trigger_remove(iio);
-err_trig:
- iio_triggered_buffer_cleanup(iio);
-err_clk:
- clk_disable_unprepare(lradc->clk);
- return ret;
-}
-
-static int mxs_lradc_remove(struct platform_device *pdev)
-{
- struct iio_dev *iio = platform_get_drvdata(pdev);
- struct mxs_lradc *lradc = iio_priv(iio);
-
- iio_device_unregister(iio);
- mxs_lradc_ts_unregister(lradc);
- mxs_lradc_hw_stop(lradc);
- mxs_lradc_trigger_remove(iio);
- iio_triggered_buffer_cleanup(iio);
-
- clk_disable_unprepare(lradc->clk);
- return 0;
-}
-
-static struct platform_driver mxs_lradc_driver = {
- .driver = {
- .name = DRIVER_NAME,
- .of_match_table = mxs_lradc_dt_ids,
- },
- .probe = mxs_lradc_probe,
- .remove = mxs_lradc_remove,
-};
-
-module_platform_driver(mxs_lradc_driver);
-
-MODULE_AUTHOR("Marek Vasut <marex@denx.de>");
-MODULE_DESCRIPTION("Freescale MXS LRADC driver");
-MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("platform:" DRIVER_NAME);
diff --git a/drivers/staging/iio/adc/spear_adc.c b/drivers/staging/iio/adc/spear_adc.c
deleted file mode 100644
index c5382374ddf6..000000000000
--- a/drivers/staging/iio/adc/spear_adc.c
+++ /dev/null
@@ -1,400 +0,0 @@
-/*
- * ST SPEAr ADC driver
- *
- * Copyright 2012 Stefan Roese <sr@denx.de>
- *
- * Licensed under the GPL-2.
- */
-
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/interrupt.h>
-#include <linux/device.h>
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/io.h>
-#include <linux/clk.h>
-#include <linux/err.h>
-#include <linux/completion.h>
-#include <linux/of.h>
-#include <linux/of_address.h>
-
-#include <linux/iio/iio.h>
-#include <linux/iio/sysfs.h>
-
-/* SPEAR registers definitions */
-#define SPEAR600_ADC_SCAN_RATE_LO(x) ((x) & 0xFFFF)
-#define SPEAR600_ADC_SCAN_RATE_HI(x) (((x) >> 0x10) & 0xFFFF)
-#define SPEAR_ADC_CLK_LOW(x) (((x) & 0xf) << 0)
-#define SPEAR_ADC_CLK_HIGH(x) (((x) & 0xf) << 4)
-
-/* Bit definitions for SPEAR_ADC_STATUS */
-#define SPEAR_ADC_STATUS_START_CONVERSION BIT(0)
-#define SPEAR_ADC_STATUS_CHANNEL_NUM(x) ((x) << 1)
-#define SPEAR_ADC_STATUS_ADC_ENABLE BIT(4)
-#define SPEAR_ADC_STATUS_AVG_SAMPLE(x) ((x) << 5)
-#define SPEAR_ADC_STATUS_VREF_INTERNAL BIT(9)
-
-#define SPEAR_ADC_DATA_MASK 0x03ff
-#define SPEAR_ADC_DATA_BITS 10
-
-#define SPEAR_ADC_MOD_NAME "spear-adc"
-
-#define SPEAR_ADC_CHANNEL_NUM 8
-
-#define SPEAR_ADC_CLK_MIN 2500000
-#define SPEAR_ADC_CLK_MAX 20000000
-
-struct adc_regs_spear3xx {
- u32 status;
- u32 average;
- u32 scan_rate;
- u32 clk; /* Not avail for 1340 & 1310 */
- u32 ch_ctrl[SPEAR_ADC_CHANNEL_NUM];
- u32 ch_data[SPEAR_ADC_CHANNEL_NUM];
-};
-
-struct chan_data {
- u32 lsb;
- u32 msb;
-};
-
-struct adc_regs_spear6xx {
- u32 status;
- u32 pad[2];
- u32 clk;
- u32 ch_ctrl[SPEAR_ADC_CHANNEL_NUM];
- struct chan_data ch_data[SPEAR_ADC_CHANNEL_NUM];
- u32 scan_rate_lo;
- u32 scan_rate_hi;
- struct chan_data average;
-};
-
-struct spear_adc_state {
- struct device_node *np;
- struct adc_regs_spear3xx __iomem *adc_base_spear3xx;
- struct adc_regs_spear6xx __iomem *adc_base_spear6xx;
- struct clk *clk;
- struct completion completion;
- u32 current_clk;
- u32 sampling_freq;
- u32 avg_samples;
- u32 vref_external;
- u32 value;
-};
-
-/*
- * Functions to access some SPEAr ADC register. Abstracted into
- * static inline functions, because of different register offsets
- * on different SoC variants (SPEAr300 vs SPEAr600 etc).
- */
-static void spear_adc_set_status(struct spear_adc_state *st, u32 val)
-{
- __raw_writel(val, &st->adc_base_spear6xx->status);
-}
-
-static void spear_adc_set_clk(struct spear_adc_state *st, u32 val)
-{
- u32 clk_high, clk_low, count;
- u32 apb_clk = clk_get_rate(st->clk);
-
- count = DIV_ROUND_UP(apb_clk, val);
- clk_low = count / 2;
- clk_high = count - clk_low;
- st->current_clk = apb_clk / count;
-
- __raw_writel(SPEAR_ADC_CLK_LOW(clk_low) | SPEAR_ADC_CLK_HIGH(clk_high),
- &st->adc_base_spear6xx->clk);
-}
-
-static void spear_adc_set_ctrl(struct spear_adc_state *st, int n,
- u32 val)
-{
- __raw_writel(val, &st->adc_base_spear6xx->ch_ctrl[n]);
-}
-
-static u32 spear_adc_get_average(struct spear_adc_state *st)
-{
- if (of_device_is_compatible(st->np, "st,spear600-adc")) {
- return __raw_readl(&st->adc_base_spear6xx->average.msb) &
- SPEAR_ADC_DATA_MASK;
- } else {
- return __raw_readl(&st->adc_base_spear3xx->average) &
- SPEAR_ADC_DATA_MASK;
- }
-}
-
-static void spear_adc_set_scanrate(struct spear_adc_state *st, u32 rate)
-{
- if (of_device_is_compatible(st->np, "st,spear600-adc")) {
- __raw_writel(SPEAR600_ADC_SCAN_RATE_LO(rate),
- &st->adc_base_spear6xx->scan_rate_lo);
- __raw_writel(SPEAR600_ADC_SCAN_RATE_HI(rate),
- &st->adc_base_spear6xx->scan_rate_hi);
- } else {
- __raw_writel(rate, &st->adc_base_spear3xx->scan_rate);
- }
-}
-
-static int spear_adc_read_raw(struct iio_dev *indio_dev,
- struct iio_chan_spec const *chan,
- int *val,
- int *val2,
- long mask)
-{
- struct spear_adc_state *st = iio_priv(indio_dev);
- u32 status;
-
- switch (mask) {
- case IIO_CHAN_INFO_RAW:
- mutex_lock(&indio_dev->mlock);
-
- status = SPEAR_ADC_STATUS_CHANNEL_NUM(chan->channel) |
- SPEAR_ADC_STATUS_AVG_SAMPLE(st->avg_samples) |
- SPEAR_ADC_STATUS_START_CONVERSION |
- SPEAR_ADC_STATUS_ADC_ENABLE;
- if (st->vref_external == 0)
- status |= SPEAR_ADC_STATUS_VREF_INTERNAL;
-
- spear_adc_set_status(st, status);
- wait_for_completion(&st->completion); /* set by ISR */
- *val = st->value;
-
- mutex_unlock(&indio_dev->mlock);
-
- return IIO_VAL_INT;
-
- case IIO_CHAN_INFO_SCALE:
- *val = st->vref_external;
- *val2 = SPEAR_ADC_DATA_BITS;
- return IIO_VAL_FRACTIONAL_LOG2;
- case IIO_CHAN_INFO_SAMP_FREQ:
- *val = st->current_clk;
- return IIO_VAL_INT;
- }
-
- return -EINVAL;
-}
-
-static int spear_adc_write_raw(struct iio_dev *indio_dev,
- struct iio_chan_spec const *chan,
- int val,
- int val2,
- long mask)
-{
- struct spear_adc_state *st = iio_priv(indio_dev);
- int ret = 0;
-
- if (mask != IIO_CHAN_INFO_SAMP_FREQ)
- return -EINVAL;
-
- mutex_lock(&indio_dev->mlock);
-
- if ((val < SPEAR_ADC_CLK_MIN) ||
- (val > SPEAR_ADC_CLK_MAX) ||
- (val2 != 0)) {
- ret = -EINVAL;
- goto out;
- }
-
- spear_adc_set_clk(st, val);
-
-out:
- mutex_unlock(&indio_dev->mlock);
- return ret;
-}
-
-#define SPEAR_ADC_CHAN(idx) { \
- .type = IIO_VOLTAGE, \
- .indexed = 1, \
- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
- .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
- .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ),\
- .channel = idx, \
-}
-
-static const struct iio_chan_spec spear_adc_iio_channels[] = {
- SPEAR_ADC_CHAN(0),
- SPEAR_ADC_CHAN(1),
- SPEAR_ADC_CHAN(2),
- SPEAR_ADC_CHAN(3),
- SPEAR_ADC_CHAN(4),
- SPEAR_ADC_CHAN(5),
- SPEAR_ADC_CHAN(6),
- SPEAR_ADC_CHAN(7),
-};
-
-static irqreturn_t spear_adc_isr(int irq, void *dev_id)
-{
- struct spear_adc_state *st = dev_id;
-
- /* Read value to clear IRQ */
- st->value = spear_adc_get_average(st);
- complete(&st->completion);
-
- return IRQ_HANDLED;
-}
-
-static int spear_adc_configure(struct spear_adc_state *st)
-{
- int i;
-
- /* Reset ADC core */
- spear_adc_set_status(st, 0);
- __raw_writel(0, &st->adc_base_spear6xx->clk);
- for (i = 0; i < 8; i++)
- spear_adc_set_ctrl(st, i, 0);
- spear_adc_set_scanrate(st, 0);
-
- spear_adc_set_clk(st, st->sampling_freq);
-
- return 0;
-}
-
-static const struct iio_info spear_adc_info = {
- .read_raw = &spear_adc_read_raw,
- .write_raw = &spear_adc_write_raw,
- .driver_module = THIS_MODULE,
-};
-
-static int spear_adc_probe(struct platform_device *pdev)
-{
- struct device_node *np = pdev->dev.of_node;
- struct device *dev = &pdev->dev;
- struct spear_adc_state *st;
- struct iio_dev *indio_dev = NULL;
- int ret = -ENODEV;
- int irq;
-
- indio_dev = devm_iio_device_alloc(dev, sizeof(struct spear_adc_state));
- if (!indio_dev) {
- dev_err(dev, "failed allocating iio device\n");
- return -ENOMEM;
- }
-
- st = iio_priv(indio_dev);
- st->np = np;
-
- /*
- * SPEAr600 has a different register layout than other SPEAr SoC's
- * (e.g. SPEAr3xx). Let's provide two register base addresses
- * to support multi-arch kernels.
- */
- st->adc_base_spear6xx = of_iomap(np, 0);
- if (!st->adc_base_spear6xx) {
- dev_err(dev, "failed mapping memory\n");
- return -ENOMEM;
- }
- st->adc_base_spear3xx =
- (struct adc_regs_spear3xx __iomem *)st->adc_base_spear6xx;
-
- st->clk = clk_get(dev, NULL);
- if (IS_ERR(st->clk)) {
- dev_err(dev, "failed getting clock\n");
- goto errout1;
- }
-
- ret = clk_prepare_enable(st->clk);
- if (ret) {
- dev_err(dev, "failed enabling clock\n");
- goto errout2;
- }
-
- irq = platform_get_irq(pdev, 0);
- if (irq <= 0) {
- dev_err(dev, "failed getting interrupt resource\n");
- ret = -EINVAL;
- goto errout3;
- }
-
- ret = devm_request_irq(dev, irq, spear_adc_isr, 0, SPEAR_ADC_MOD_NAME,
- st);
- if (ret < 0) {
- dev_err(dev, "failed requesting interrupt\n");
- goto errout3;
- }
-
- if (of_property_read_u32(np, "sampling-frequency",
- &st->sampling_freq)) {
- dev_err(dev, "sampling-frequency missing in DT\n");
- ret = -EINVAL;
- goto errout3;
- }
-
- /*
- * Optional avg_samples defaults to 0, resulting in single data
- * conversion
- */
- of_property_read_u32(np, "average-samples", &st->avg_samples);
-
- /*
- * Optional vref_external defaults to 0, resulting in internal vref
- * selection
- */
- of_property_read_u32(np, "vref-external", &st->vref_external);
-
- spear_adc_configure(st);
-
- platform_set_drvdata(pdev, indio_dev);
-
- init_completion(&st->completion);
-
- indio_dev->name = SPEAR_ADC_MOD_NAME;
- indio_dev->dev.parent = dev;
- indio_dev->info = &spear_adc_info;
- indio_dev->modes = INDIO_DIRECT_MODE;
- indio_dev->channels = spear_adc_iio_channels;
- indio_dev->num_channels = ARRAY_SIZE(spear_adc_iio_channels);
-
- ret = iio_device_register(indio_dev);
- if (ret)
- goto errout3;
-
- dev_info(dev, "SPEAR ADC driver loaded, IRQ %d\n", irq);
-
- return 0;
-
-errout3:
- clk_disable_unprepare(st->clk);
-errout2:
- clk_put(st->clk);
-errout1:
- iounmap(st->adc_base_spear6xx);
- return ret;
-}
-
-static int spear_adc_remove(struct platform_device *pdev)
-{
- struct iio_dev *indio_dev = platform_get_drvdata(pdev);
- struct spear_adc_state *st = iio_priv(indio_dev);
-
- iio_device_unregister(indio_dev);
- clk_disable_unprepare(st->clk);
- clk_put(st->clk);
- iounmap(st->adc_base_spear6xx);
-
- return 0;
-}
-
-#ifdef CONFIG_OF
-static const struct of_device_id spear_adc_dt_ids[] = {
- { .compatible = "st,spear600-adc", },
- { /* sentinel */ }
-};
-MODULE_DEVICE_TABLE(of, spear_adc_dt_ids);
-#endif
-
-static struct platform_driver spear_adc_driver = {
- .probe = spear_adc_probe,
- .remove = spear_adc_remove,
- .driver = {
- .name = SPEAR_ADC_MOD_NAME,
- .of_match_table = of_match_ptr(spear_adc_dt_ids),
- },
-};
-
-module_platform_driver(spear_adc_driver);
-
-MODULE_AUTHOR("Stefan Roese <sr@denx.de>");
-MODULE_DESCRIPTION("SPEAr ADC driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/iio/addac/Kconfig b/drivers/staging/iio/addac/Kconfig
index ba18b8432d9c..b7c3c4a7dfe4 100644
--- a/drivers/staging/iio/addac/Kconfig
+++ b/drivers/staging/iio/addac/Kconfig
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# ADDAC drivers
#
diff --git a/drivers/staging/iio/addac/Makefile b/drivers/staging/iio/addac/Makefile
index 4c7686133692..8fdbd8cab21f 100644
--- a/drivers/staging/iio/addac/Makefile
+++ b/drivers/staging/iio/addac/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for industrial I/O ADDAC drivers
#
diff --git a/drivers/staging/iio/addac/adt7316-i2c.c b/drivers/staging/iio/addac/adt7316-i2c.c
index 78fe0b557280..f45968ef94ea 100644
--- a/drivers/staging/iio/addac/adt7316-i2c.c
+++ b/drivers/staging/iio/addac/adt7316-i2c.c
@@ -1,10 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* I2C bus driver for ADT7316/7/8 ADT7516/7/9 digital temperature
* sensor, ADC and DAC
*
* Copyright 2010 Analog Devices Inc.
- *
- * Licensed under the GPL-2 or later.
*/
#include <linux/device.h>
@@ -21,7 +20,7 @@
static int adt7316_i2c_read(void *client, u8 reg, u8 *data)
{
struct i2c_client *cl = client;
- int ret = 0;
+ int ret;
ret = i2c_smbus_write_byte(cl, reg);
if (ret < 0) {
@@ -35,13 +34,15 @@ static int adt7316_i2c_read(void *client, u8 reg, u8 *data)
return ret;
}
+ *data = ret;
+
return 0;
}
static int adt7316_i2c_write(void *client, u8 reg, u8 data)
{
struct i2c_client *cl = client;
- int ret = 0;
+ int ret;
ret = i2c_smbus_write_byte_data(cl, reg, data);
if (ret < 0)
@@ -53,7 +54,7 @@ static int adt7316_i2c_write(void *client, u8 reg, u8 data)
static int adt7316_i2c_multi_read(void *client, u8 reg, u8 count, u8 *data)
{
struct i2c_client *cl = client;
- int i, ret = 0;
+ int i, ret;
if (count > ADT7316_REG_MAX_ADDR)
count = ADT7316_REG_MAX_ADDR;
@@ -72,7 +73,7 @@ static int adt7316_i2c_multi_read(void *client, u8 reg, u8 count, u8 *data)
static int adt7316_i2c_multi_write(void *client, u8 reg, u8 count, u8 *data)
{
struct i2c_client *cl = client;
- int i, ret = 0;
+ int i, ret;
if (count > ADT7316_REG_MAX_ADDR)
count = ADT7316_REG_MAX_ADDR;
@@ -92,13 +93,12 @@ static int adt7316_i2c_multi_write(void *client, u8 reg, u8 count, u8 *data)
* device probe and remove
*/
-static int adt7316_i2c_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int adt7316_i2c_probe(struct i2c_client *client)
{
+ const struct i2c_device_id *id = i2c_client_get_device_id(client);
struct adt7316_bus bus = {
.client = client,
.irq = client->irq,
- .irq_flags = IRQF_TRIGGER_LOW,
.read = adt7316_i2c_read,
.write = adt7316_i2c_write,
.multi_read = adt7316_i2c_multi_read,
@@ -109,20 +109,33 @@ static int adt7316_i2c_probe(struct i2c_client *client,
}
static const struct i2c_device_id adt7316_i2c_id[] = {
- { "adt7316", 0 },
- { "adt7317", 0 },
- { "adt7318", 0 },
- { "adt7516", 0 },
- { "adt7517", 0 },
- { "adt7519", 0 },
+ { "adt7316" },
+ { "adt7317" },
+ { "adt7318" },
+ { "adt7516" },
+ { "adt7517" },
+ { "adt7519" },
{ }
};
MODULE_DEVICE_TABLE(i2c, adt7316_i2c_id);
+static const struct of_device_id adt7316_of_match[] = {
+ { .compatible = "adi,adt7316" },
+ { .compatible = "adi,adt7317" },
+ { .compatible = "adi,adt7318" },
+ { .compatible = "adi,adt7516" },
+ { .compatible = "adi,adt7517" },
+ { .compatible = "adi,adt7519" },
+ { }
+};
+
+MODULE_DEVICE_TABLE(of, adt7316_of_match);
+
static struct i2c_driver adt7316_driver = {
.driver = {
.name = "adt7316",
+ .of_match_table = adt7316_of_match,
.pm = ADT7316_PM_OPS,
},
.probe = adt7316_i2c_probe,
diff --git a/drivers/staging/iio/addac/adt7316-spi.c b/drivers/staging/iio/addac/adt7316-spi.c
index e480abb72e4a..af513e003da7 100644
--- a/drivers/staging/iio/addac/adt7316-spi.c
+++ b/drivers/staging/iio/addac/adt7316-spi.c
@@ -1,10 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* API bus driver for ADT7316/7/8 ADT7516/7/9 digital temperature
* sensor, ADC and DAC
*
* Copyright 2010 Analog Devices Inc.
- *
- * Licensed under the GPL-2 or later.
*/
#include <linux/device.h>
@@ -27,7 +26,7 @@ static int adt7316_spi_multi_read(void *client, u8 reg, u8 count, u8 *data)
{
struct spi_device *spi_dev = client;
u8 cmd[2];
- int ret = 0;
+ int ret;
if (count > ADT7316_REG_MAX_ADDR)
count = ADT7316_REG_MAX_ADDR;
@@ -56,7 +55,7 @@ static int adt7316_spi_multi_write(void *client, u8 reg, u8 count, u8 *data)
{
struct spi_device *spi_dev = client;
u8 buf[ADT7316_REG_MAX_ADDR + 2];
- int i, ret = 0;
+ int i, ret;
if (count > ADT7316_REG_MAX_ADDR)
count = ADT7316_REG_MAX_ADDR;
@@ -94,7 +93,6 @@ static int adt7316_spi_probe(struct spi_device *spi_dev)
struct adt7316_bus bus = {
.client = spi_dev,
.irq = spi_dev->irq,
- .irq_flags = IRQF_TRIGGER_LOW,
.read = adt7316_spi_read,
.write = adt7316_spi_write,
.multi_read = adt7316_spi_multi_read,
@@ -128,11 +126,23 @@ static const struct spi_device_id adt7316_spi_id[] = {
MODULE_DEVICE_TABLE(spi, adt7316_spi_id);
+static const struct of_device_id adt7316_of_spi_match[] = {
+ { .compatible = "adi,adt7316" },
+ { .compatible = "adi,adt7317" },
+ { .compatible = "adi,adt7318" },
+ { .compatible = "adi,adt7516" },
+ { .compatible = "adi,adt7517" },
+ { .compatible = "adi,adt7519" },
+ { }
+};
+
+MODULE_DEVICE_TABLE(of, adt7316_of_spi_match);
+
static struct spi_driver adt7316_driver = {
.driver = {
.name = "adt7316",
+ .of_match_table = adt7316_of_spi_match,
.pm = ADT7316_PM_OPS,
- .owner = THIS_MODULE,
},
.probe = adt7316_spi_probe,
.id_table = adt7316_spi_id,
diff --git a/drivers/staging/iio/addac/adt7316.c b/drivers/staging/iio/addac/adt7316.c
index a1dd74525c10..8a9a8262c2be 100644
--- a/drivers/staging/iio/addac/adt7316.c
+++ b/drivers/staging/iio/addac/adt7316.c
@@ -1,14 +1,13 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* ADT7316 digital temperature sensor driver supporting ADT7316/7/8 ADT7516/7/9
*
- *
* Copyright 2010 Analog Devices Inc.
- *
- * Licensed under the GPL-2 or later.
*/
#include <linux/interrupt.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
+#include <linux/irq.h>
#include <linux/workqueue.h>
#include <linux/device.h>
#include <linux/kernel.h>
@@ -47,6 +46,8 @@
#define ADT7516_MSB_AIN3 0xA
#define ADT7516_MSB_AIN4 0xB
#define ADT7316_DA_DATA_BASE 0x10
+#define ADT7316_DA_10_BIT_LSB_SHIFT 6
+#define ADT7316_DA_12_BIT_LSB_SHIFT 4
#define ADT7316_DA_MSB_DATA_REGS 4
#define ADT7316_LSB_DAC_A 0x10
#define ADT7316_MSB_DAC_A 0x11
@@ -59,8 +60,8 @@
#define ADT7316_CONFIG1 0x18
#define ADT7316_CONFIG2 0x19
#define ADT7316_CONFIG3 0x1A
-#define ADT7316_LDAC_CONFIG 0x1B
-#define ADT7316_DAC_CONFIG 0x1C
+#define ADT7316_DAC_CONFIG 0x1B
+#define ADT7316_LDAC_CONFIG 0x1C
#define ADT7316_INT_MASK1 0x1D
#define ADT7316_INT_MASK2 0x1E
#define ADT7316_IN_TEMP_OFFSET 0x1F
@@ -117,7 +118,7 @@
*/
#define ADT7316_ADCLK_22_5 0x1
#define ADT7316_DA_HIGH_RESOLUTION 0x2
-#define ADT7316_DA_EN_VIA_DAC_LDCA 0x4
+#define ADT7316_DA_EN_VIA_DAC_LDAC 0x8
#define ADT7516_AIN_IN_VREF 0x10
#define ADT7316_EN_IN_TEMP_PROP_DACA 0x20
#define ADT7316_EN_EX_TEMP_PROP_DACB 0x40
@@ -127,6 +128,7 @@
*/
#define ADT7316_DA_2VREF_CH_MASK 0xF
#define ADT7316_DA_EN_MODE_MASK 0x30
+#define ADT7316_DA_EN_MODE_SHIFT 4
#define ADT7316_DA_EN_MODE_SINGLE 0x00
#define ADT7316_DA_EN_MODE_AB_CD 0x10
#define ADT7316_DA_EN_MODE_ABCD 0x20
@@ -177,7 +179,7 @@
struct adt7316_chip_info {
struct adt7316_bus bus;
- u16 ldac_pin;
+ struct gpio_desc *ldac_pin;
u16 int_mask; /* 0x2f */
u8 config1;
u8 config2;
@@ -207,27 +209,18 @@ struct adt7316_chip_info {
#define ADT7316_TEMP_AIN_INT_MASK \
(ADT7316_TEMP_INT_MASK)
-/*
- * struct adt7316_chip_info - chip specific information
- */
-
-struct adt7316_limit_regs {
- u16 data_high;
- u16 data_low;
-};
-
static ssize_t adt7316_show_enabled(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+ struct device_attribute *attr,
+ char *buf)
{
struct iio_dev *dev_info = dev_to_iio_dev(dev);
struct adt7316_chip_info *chip = iio_priv(dev_info);
- return sprintf(buf, "%d\n", !!(chip->config1 & ADT7316_EN));
+ return sysfs_emit(buf, "%d\n", !!(chip->config1 & ADT7316_EN));
}
static ssize_t _adt7316_store_enabled(struct adt7316_chip_info *chip,
- int enable)
+ int enable)
{
u8 config1;
int ret;
@@ -244,13 +237,12 @@ static ssize_t _adt7316_store_enabled(struct adt7316_chip_info *chip,
chip->config1 = config1;
return ret;
-
}
static ssize_t adt7316_store_enabled(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t len)
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
{
struct iio_dev *dev_info = dev_to_iio_dev(dev);
struct adt7316_chip_info *chip = iio_priv(dev_info);
@@ -267,14 +259,14 @@ static ssize_t adt7316_store_enabled(struct device *dev,
return len;
}
-static IIO_DEVICE_ATTR(enabled, S_IRUGO | S_IWUSR,
+static IIO_DEVICE_ATTR(enabled, 0644,
adt7316_show_enabled,
adt7316_store_enabled,
0);
static ssize_t adt7316_show_select_ex_temp(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+ struct device_attribute *attr,
+ char *buf)
{
struct iio_dev *dev_info = dev_to_iio_dev(dev);
struct adt7316_chip_info *chip = iio_priv(dev_info);
@@ -282,13 +274,13 @@ static ssize_t adt7316_show_select_ex_temp(struct device *dev,
if ((chip->id & ID_FAMILY_MASK) != ID_ADT75XX)
return -EPERM;
- return sprintf(buf, "%d\n", !!(chip->config1 & ADT7516_SEL_EX_TEMP));
+ return sysfs_emit(buf, "%d\n", !!(chip->config1 & ADT7516_SEL_EX_TEMP));
}
static ssize_t adt7316_store_select_ex_temp(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t len)
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
{
struct iio_dev *dev_info = dev_to_iio_dev(dev);
struct adt7316_chip_info *chip = iio_priv(dev_info);
@@ -311,28 +303,28 @@ static ssize_t adt7316_store_select_ex_temp(struct device *dev,
return len;
}
-static IIO_DEVICE_ATTR(select_ex_temp, S_IRUGO | S_IWUSR,
+static IIO_DEVICE_ATTR(select_ex_temp, 0644,
adt7316_show_select_ex_temp,
adt7316_store_select_ex_temp,
0);
static ssize_t adt7316_show_mode(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+ struct device_attribute *attr,
+ char *buf)
{
struct iio_dev *dev_info = dev_to_iio_dev(dev);
struct adt7316_chip_info *chip = iio_priv(dev_info);
if (chip->config2 & ADT7316_AD_SINGLE_CH_MODE)
- return sprintf(buf, "single_channel\n");
+ return sysfs_emit(buf, "single_channel\n");
- return sprintf(buf, "round_robin\n");
+ return sysfs_emit(buf, "round_robin\n");
}
static ssize_t adt7316_store_mode(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t len)
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
{
struct iio_dev *dev_info = dev_to_iio_dev(dev);
struct adt7316_chip_info *chip = iio_priv(dev_info);
@@ -352,23 +344,23 @@ static ssize_t adt7316_store_mode(struct device *dev,
return len;
}
-static IIO_DEVICE_ATTR(mode, S_IRUGO | S_IWUSR,
+static IIO_DEVICE_ATTR(mode, 0644,
adt7316_show_mode,
adt7316_store_mode,
0);
static ssize_t adt7316_show_all_modes(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+ struct device_attribute *attr,
+ char *buf)
{
- return sprintf(buf, "single_channel\nround_robin\n");
+ return sysfs_emit(buf, "single_channel\nround_robin\n");
}
-static IIO_DEVICE_ATTR(all_modes, S_IRUGO, adt7316_show_all_modes, NULL, 0);
+static IIO_DEVICE_ATTR(all_modes, 0444, adt7316_show_all_modes, NULL, 0);
static ssize_t adt7316_show_ad_channel(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+ struct device_attribute *attr,
+ char *buf)
{
struct iio_dev *dev_info = dev_to_iio_dev(dev);
struct adt7316_chip_info *chip = iio_priv(dev_info);
@@ -378,36 +370,36 @@ static ssize_t adt7316_show_ad_channel(struct device *dev,
switch (chip->config2 & ADT7516_AD_SINGLE_CH_MASK) {
case ADT7316_AD_SINGLE_CH_VDD:
- return sprintf(buf, "0 - VDD\n");
+ return sysfs_emit(buf, "0 - VDD\n");
case ADT7316_AD_SINGLE_CH_IN:
- return sprintf(buf, "1 - Internal Temperature\n");
+ return sysfs_emit(buf, "1 - Internal Temperature\n");
case ADT7316_AD_SINGLE_CH_EX:
if (((chip->id & ID_FAMILY_MASK) == ID_ADT75XX) &&
- (chip->config1 & ADT7516_SEL_AIN1_2_EX_TEMP_MASK) == 0)
- return sprintf(buf, "2 - AIN1\n");
+ (chip->config1 & ADT7516_SEL_AIN1_2_EX_TEMP_MASK) == 0)
+ return sysfs_emit(buf, "2 - AIN1\n");
- return sprintf(buf, "2 - External Temperature\n");
+ return sysfs_emit(buf, "2 - External Temperature\n");
case ADT7516_AD_SINGLE_CH_AIN2:
if ((chip->config1 & ADT7516_SEL_AIN1_2_EX_TEMP_MASK) == 0)
- return sprintf(buf, "3 - AIN2\n");
+ return sysfs_emit(buf, "3 - AIN2\n");
- return sprintf(buf, "N/A\n");
+ return sysfs_emit(buf, "N/A\n");
case ADT7516_AD_SINGLE_CH_AIN3:
if (chip->config1 & ADT7516_SEL_AIN3)
- return sprintf(buf, "4 - AIN3\n");
+ return sysfs_emit(buf, "4 - AIN3\n");
- return sprintf(buf, "N/A\n");
+ return sysfs_emit(buf, "N/A\n");
case ADT7516_AD_SINGLE_CH_AIN4:
- return sprintf(buf, "5 - AIN4\n");
+ return sysfs_emit(buf, "5 - AIN4\n");
default:
- return sprintf(buf, "N/A\n");
+ return sysfs_emit(buf, "N/A\n");
}
}
static ssize_t adt7316_store_ad_channel(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t len)
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
{
struct iio_dev *dev_info = dev_to_iio_dev(dev);
struct adt7316_chip_info *chip = iio_priv(dev_info);
@@ -434,7 +426,6 @@ static ssize_t adt7316_store_ad_channel(struct device *dev,
config2 = chip->config2 & (~ADT7316_AD_SINGLE_CH_MASK);
}
-
config2 |= data;
ret = chip->bus.write(chip->bus.client, ADT7316_CONFIG2, config2);
@@ -446,14 +437,14 @@ static ssize_t adt7316_store_ad_channel(struct device *dev,
return len;
}
-static IIO_DEVICE_ATTR(ad_channel, S_IRUGO | S_IWUSR,
+static IIO_DEVICE_ATTR(ad_channel, 0644,
adt7316_show_ad_channel,
adt7316_store_ad_channel,
0);
static ssize_t adt7316_show_all_ad_channels(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+ struct device_attribute *attr,
+ char *buf)
{
struct iio_dev *dev_info = dev_to_iio_dev(dev);
struct adt7316_chip_info *chip = iio_priv(dev_info);
@@ -462,32 +453,31 @@ static ssize_t adt7316_show_all_ad_channels(struct device *dev,
return -EPERM;
if ((chip->id & ID_FAMILY_MASK) == ID_ADT75XX)
- return sprintf(buf, "0 - VDD\n1 - Internal Temperature\n"
+ return sysfs_emit(buf, "0 - VDD\n1 - Internal Temperature\n"
"2 - External Temperature or AIN1\n"
"3 - AIN2\n4 - AIN3\n5 - AIN4\n");
- else
- return sprintf(buf, "0 - VDD\n1 - Internal Temperature\n"
- "2 - External Temperature\n");
+ return sysfs_emit(buf, "0 - VDD\n1 - Internal Temperature\n"
+ "2 - External Temperature\n");
}
-static IIO_DEVICE_ATTR(all_ad_channels, S_IRUGO,
+static IIO_DEVICE_ATTR(all_ad_channels, 0444,
adt7316_show_all_ad_channels, NULL, 0);
static ssize_t adt7316_show_disable_averaging(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+ struct device_attribute *attr,
+ char *buf)
{
struct iio_dev *dev_info = dev_to_iio_dev(dev);
struct adt7316_chip_info *chip = iio_priv(dev_info);
- return sprintf(buf, "%d\n",
+ return sysfs_emit(buf, "%d\n",
!!(chip->config2 & ADT7316_DISABLE_AVERAGING));
}
static ssize_t adt7316_store_disable_averaging(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t len)
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
{
struct iio_dev *dev_info = dev_to_iio_dev(dev);
struct adt7316_chip_info *chip = iio_priv(dev_info);
@@ -507,26 +497,26 @@ static ssize_t adt7316_store_disable_averaging(struct device *dev,
return len;
}
-static IIO_DEVICE_ATTR(disable_averaging, S_IRUGO | S_IWUSR,
+static IIO_DEVICE_ATTR(disable_averaging, 0644,
adt7316_show_disable_averaging,
adt7316_store_disable_averaging,
0);
static ssize_t adt7316_show_enable_smbus_timeout(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+ struct device_attribute *attr,
+ char *buf)
{
struct iio_dev *dev_info = dev_to_iio_dev(dev);
struct adt7316_chip_info *chip = iio_priv(dev_info);
- return sprintf(buf, "%d\n",
+ return sysfs_emit(buf, "%d\n",
!!(chip->config2 & ADT7316_EN_SMBUS_TIMEOUT));
}
static ssize_t adt7316_store_enable_smbus_timeout(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t len)
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
{
struct iio_dev *dev_info = dev_to_iio_dev(dev);
struct adt7316_chip_info *chip = iio_priv(dev_info);
@@ -546,25 +536,25 @@ static ssize_t adt7316_store_enable_smbus_timeout(struct device *dev,
return len;
}
-static IIO_DEVICE_ATTR(enable_smbus_timeout, S_IRUGO | S_IWUSR,
+static IIO_DEVICE_ATTR(enable_smbus_timeout, 0644,
adt7316_show_enable_smbus_timeout,
adt7316_store_enable_smbus_timeout,
0);
static ssize_t adt7316_show_powerdown(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+ struct device_attribute *attr,
+ char *buf)
{
struct iio_dev *dev_info = dev_to_iio_dev(dev);
struct adt7316_chip_info *chip = iio_priv(dev_info);
- return sprintf(buf, "%d\n", !!(chip->config1 & ADT7316_PD));
+ return sysfs_emit(buf, "%d\n", !!(chip->config1 & ADT7316_PD));
}
static ssize_t adt7316_store_powerdown(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t len)
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
{
struct iio_dev *dev_info = dev_to_iio_dev(dev);
struct adt7316_chip_info *chip = iio_priv(dev_info);
@@ -584,25 +574,25 @@ static ssize_t adt7316_store_powerdown(struct device *dev,
return len;
}
-static IIO_DEVICE_ATTR(powerdown, S_IRUGO | S_IWUSR,
+static IIO_DEVICE_ATTR(powerdown, 0644,
adt7316_show_powerdown,
adt7316_store_powerdown,
0);
static ssize_t adt7316_show_fast_ad_clock(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+ struct device_attribute *attr,
+ char *buf)
{
struct iio_dev *dev_info = dev_to_iio_dev(dev);
struct adt7316_chip_info *chip = iio_priv(dev_info);
- return sprintf(buf, "%d\n", !!(chip->config3 & ADT7316_ADCLK_22_5));
+ return sysfs_emit(buf, "%d\n", !!(chip->config3 & ADT7316_ADCLK_22_5));
}
static ssize_t adt7316_store_fast_ad_clock(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t len)
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
{
struct iio_dev *dev_info = dev_to_iio_dev(dev);
struct adt7316_chip_info *chip = iio_priv(dev_info);
@@ -622,48 +612,42 @@ static ssize_t adt7316_store_fast_ad_clock(struct device *dev,
return len;
}
-static IIO_DEVICE_ATTR(fast_ad_clock, S_IRUGO | S_IWUSR,
+static IIO_DEVICE_ATTR(fast_ad_clock, 0644,
adt7316_show_fast_ad_clock,
adt7316_store_fast_ad_clock,
0);
static ssize_t adt7316_show_da_high_resolution(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+ struct device_attribute *attr,
+ char *buf)
{
struct iio_dev *dev_info = dev_to_iio_dev(dev);
struct adt7316_chip_info *chip = iio_priv(dev_info);
if (chip->config3 & ADT7316_DA_HIGH_RESOLUTION) {
- if (chip->id == ID_ADT7316 || chip->id == ID_ADT7516)
- return sprintf(buf, "1 (12 bits)\n");
- else if (chip->id == ID_ADT7317 || chip->id == ID_ADT7517)
- return sprintf(buf, "1 (10 bits)\n");
+ if (chip->id != ID_ADT7318 && chip->id != ID_ADT7519)
+ return sysfs_emit(buf, "1 (10 bits)\n");
}
- return sprintf(buf, "0 (8 bits)\n");
+ return sysfs_emit(buf, "0 (8 bits)\n");
}
static ssize_t adt7316_store_da_high_resolution(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t len)
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
{
struct iio_dev *dev_info = dev_to_iio_dev(dev);
struct adt7316_chip_info *chip = iio_priv(dev_info);
u8 config3;
int ret;
- chip->dac_bits = 8;
+ if (chip->id == ID_ADT7318 || chip->id == ID_ADT7519)
+ return -EPERM;
- if (buf[0] == '1') {
- config3 = chip->config3 | ADT7316_DA_HIGH_RESOLUTION;
- if (chip->id == ID_ADT7316 || chip->id == ID_ADT7516)
- chip->dac_bits = 12;
- else if (chip->id == ID_ADT7317 || chip->id == ID_ADT7517)
- chip->dac_bits = 10;
- } else
- config3 = chip->config3 & (~ADT7316_DA_HIGH_RESOLUTION);
+ config3 = chip->config3 & (~ADT7316_DA_HIGH_RESOLUTION);
+ if (buf[0] == '1')
+ config3 |= ADT7316_DA_HIGH_RESOLUTION;
ret = chip->bus.write(chip->bus.client, ADT7316_CONFIG3, config3);
if (ret)
@@ -674,14 +658,14 @@ static ssize_t adt7316_store_da_high_resolution(struct device *dev,
return len;
}
-static IIO_DEVICE_ATTR(da_high_resolution, S_IRUGO | S_IWUSR,
+static IIO_DEVICE_ATTR(da_high_resolution, 0644,
adt7316_show_da_high_resolution,
adt7316_store_da_high_resolution,
0);
static ssize_t adt7316_show_AIN_internal_Vref(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+ struct device_attribute *attr,
+ char *buf)
{
struct iio_dev *dev_info = dev_to_iio_dev(dev);
struct adt7316_chip_info *chip = iio_priv(dev_info);
@@ -689,14 +673,14 @@ static ssize_t adt7316_show_AIN_internal_Vref(struct device *dev,
if ((chip->id & ID_FAMILY_MASK) != ID_ADT75XX)
return -EPERM;
- return sprintf(buf, "%d\n",
+ return sysfs_emit(buf, "%d\n",
!!(chip->config3 & ADT7516_AIN_IN_VREF));
}
static ssize_t adt7316_store_AIN_internal_Vref(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t len)
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
{
struct iio_dev *dev_info = dev_to_iio_dev(dev);
struct adt7316_chip_info *chip = iio_priv(dev_info);
@@ -720,27 +704,26 @@ static ssize_t adt7316_store_AIN_internal_Vref(struct device *dev,
return len;
}
-static IIO_DEVICE_ATTR(AIN_internal_Vref, S_IRUGO | S_IWUSR,
+static IIO_DEVICE_ATTR(AIN_internal_Vref, 0644,
adt7316_show_AIN_internal_Vref,
adt7316_store_AIN_internal_Vref,
0);
-
static ssize_t adt7316_show_enable_prop_DACA(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+ struct device_attribute *attr,
+ char *buf)
{
struct iio_dev *dev_info = dev_to_iio_dev(dev);
struct adt7316_chip_info *chip = iio_priv(dev_info);
- return sprintf(buf, "%d\n",
+ return sysfs_emit(buf, "%d\n",
!!(chip->config3 & ADT7316_EN_IN_TEMP_PROP_DACA));
}
static ssize_t adt7316_store_enable_prop_DACA(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t len)
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
{
struct iio_dev *dev_info = dev_to_iio_dev(dev);
struct adt7316_chip_info *chip = iio_priv(dev_info);
@@ -760,26 +743,26 @@ static ssize_t adt7316_store_enable_prop_DACA(struct device *dev,
return len;
}
-static IIO_DEVICE_ATTR(enable_proportion_DACA, S_IRUGO | S_IWUSR,
- adt7316_show_enable_prop_DACA,
- adt7316_store_enable_prop_DACA,
- 0);
+static IIO_DEVICE_ATTR(enable_proportion_DACA, 0644,
+ adt7316_show_enable_prop_DACA,
+ adt7316_store_enable_prop_DACA,
+ 0);
static ssize_t adt7316_show_enable_prop_DACB(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+ struct device_attribute *attr,
+ char *buf)
{
struct iio_dev *dev_info = dev_to_iio_dev(dev);
struct adt7316_chip_info *chip = iio_priv(dev_info);
- return sprintf(buf, "%d\n",
+ return sysfs_emit(buf, "%d\n",
!!(chip->config3 & ADT7316_EN_EX_TEMP_PROP_DACB));
}
static ssize_t adt7316_store_enable_prop_DACB(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t len)
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
{
struct iio_dev *dev_info = dev_to_iio_dev(dev);
struct adt7316_chip_info *chip = iio_priv(dev_info);
@@ -799,26 +782,26 @@ static ssize_t adt7316_store_enable_prop_DACB(struct device *dev,
return len;
}
-static IIO_DEVICE_ATTR(enable_proportion_DACB, S_IRUGO | S_IWUSR,
- adt7316_show_enable_prop_DACB,
- adt7316_store_enable_prop_DACB,
- 0);
+static IIO_DEVICE_ATTR(enable_proportion_DACB, 0644,
+ adt7316_show_enable_prop_DACB,
+ adt7316_store_enable_prop_DACB,
+ 0);
static ssize_t adt7316_show_DAC_2Vref_ch_mask(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+ struct device_attribute *attr,
+ char *buf)
{
struct iio_dev *dev_info = dev_to_iio_dev(dev);
struct adt7316_chip_info *chip = iio_priv(dev_info);
- return sprintf(buf, "0x%x\n",
+ return sysfs_emit(buf, "0x%x\n",
chip->dac_config & ADT7316_DA_2VREF_CH_MASK);
}
static ssize_t adt7316_store_DAC_2Vref_ch_mask(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t len)
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
{
struct iio_dev *dev_info = dev_to_iio_dev(dev);
struct adt7316_chip_info *chip = iio_priv(dev_info);
@@ -842,40 +825,40 @@ static ssize_t adt7316_store_DAC_2Vref_ch_mask(struct device *dev,
return len;
}
-static IIO_DEVICE_ATTR(DAC_2Vref_channels_mask, S_IRUGO | S_IWUSR,
- adt7316_show_DAC_2Vref_ch_mask,
- adt7316_store_DAC_2Vref_ch_mask,
- 0);
+static IIO_DEVICE_ATTR(DAC_2Vref_channels_mask, 0644,
+ adt7316_show_DAC_2Vref_ch_mask,
+ adt7316_store_DAC_2Vref_ch_mask,
+ 0);
static ssize_t adt7316_show_DAC_update_mode(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+ struct device_attribute *attr,
+ char *buf)
{
struct iio_dev *dev_info = dev_to_iio_dev(dev);
struct adt7316_chip_info *chip = iio_priv(dev_info);
- if (!(chip->config3 & ADT7316_DA_EN_VIA_DAC_LDCA))
- return sprintf(buf, "manual\n");
+ if (!(chip->config3 & ADT7316_DA_EN_VIA_DAC_LDAC))
+ return sysfs_emit(buf, "manual\n");
switch (chip->dac_config & ADT7316_DA_EN_MODE_MASK) {
case ADT7316_DA_EN_MODE_SINGLE:
- return sprintf(buf,
+ return sysfs_emit(buf,
"0 - auto at any MSB DAC writing\n");
case ADT7316_DA_EN_MODE_AB_CD:
- return sprintf(buf,
+ return sysfs_emit(buf,
"1 - auto at MSB DAC AB and CD writing\n");
case ADT7316_DA_EN_MODE_ABCD:
- return sprintf(buf,
+ return sysfs_emit(buf,
"2 - auto at MSB DAC ABCD writing\n");
default: /* ADT7316_DA_EN_MODE_LDAC */
- return sprintf(buf, "3 - manual\n");
+ return sysfs_emit(buf, "3 - manual\n");
}
}
static ssize_t adt7316_store_DAC_update_mode(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t len)
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
{
struct iio_dev *dev_info = dev_to_iio_dev(dev);
struct adt7316_chip_info *chip = iio_priv(dev_info);
@@ -883,15 +866,15 @@ static ssize_t adt7316_store_DAC_update_mode(struct device *dev,
u8 data;
int ret;
- if (!(chip->config3 & ADT7316_DA_EN_VIA_DAC_LDCA))
+ if (!(chip->config3 & ADT7316_DA_EN_VIA_DAC_LDAC))
return -EPERM;
ret = kstrtou8(buf, 10, &data);
- if (ret || data > ADT7316_DA_EN_MODE_MASK)
+ if (ret || data > (ADT7316_DA_EN_MODE_MASK >> ADT7316_DA_EN_MODE_SHIFT))
return -EINVAL;
dac_config = chip->dac_config & (~ADT7316_DA_EN_MODE_MASK);
- dac_config |= data;
+ dac_config |= data << ADT7316_DA_EN_MODE_SHIFT;
ret = chip->bus.write(chip->bus.client, ADT7316_DAC_CONFIG, dac_config);
if (ret)
@@ -902,35 +885,33 @@ static ssize_t adt7316_store_DAC_update_mode(struct device *dev,
return len;
}
-static IIO_DEVICE_ATTR(DAC_update_mode, S_IRUGO | S_IWUSR,
- adt7316_show_DAC_update_mode,
- adt7316_store_DAC_update_mode,
- 0);
+static IIO_DEVICE_ATTR(DAC_update_mode, 0644,
+ adt7316_show_DAC_update_mode,
+ adt7316_store_DAC_update_mode,
+ 0);
static ssize_t adt7316_show_all_DAC_update_modes(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+ struct device_attribute *attr,
+ char *buf)
{
struct iio_dev *dev_info = dev_to_iio_dev(dev);
struct adt7316_chip_info *chip = iio_priv(dev_info);
- if (chip->config3 & ADT7316_DA_EN_VIA_DAC_LDCA)
- return sprintf(buf, "0 - auto at any MSB DAC writing\n"
+ if (chip->config3 & ADT7316_DA_EN_VIA_DAC_LDAC)
+ return sysfs_emit(buf, "0 - auto at any MSB DAC writing\n"
"1 - auto at MSB DAC AB and CD writing\n"
"2 - auto at MSB DAC ABCD writing\n"
"3 - manual\n");
- else
- return sprintf(buf, "manual\n");
+ return sysfs_emit(buf, "manual\n");
}
-static IIO_DEVICE_ATTR(all_DAC_update_modes, S_IRUGO,
- adt7316_show_all_DAC_update_modes, NULL, 0);
-
+static IIO_DEVICE_ATTR(all_DAC_update_modes, 0444,
+ adt7316_show_all_DAC_update_modes, NULL, 0);
static ssize_t adt7316_store_update_DAC(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t len)
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
{
struct iio_dev *dev_info = dev_to_iio_dev(dev);
struct adt7316_chip_info *chip = iio_priv(dev_info);
@@ -938,7 +919,7 @@ static ssize_t adt7316_store_update_DAC(struct device *dev,
u8 data;
int ret;
- if (chip->config3 & ADT7316_DA_EN_VIA_DAC_LDCA) {
+ if (chip->config3 & ADT7316_DA_EN_VIA_DAC_LDAC) {
if ((chip->dac_config & ADT7316_DA_EN_MODE_MASK) !=
ADT7316_DA_EN_MODE_LDAC)
return -EPERM;
@@ -955,45 +936,39 @@ static ssize_t adt7316_store_update_DAC(struct device *dev,
if (ret)
return -EIO;
} else {
- gpio_set_value(chip->ldac_pin, 0);
- gpio_set_value(chip->ldac_pin, 1);
+ gpiod_set_value(chip->ldac_pin, 0);
+ gpiod_set_value(chip->ldac_pin, 1);
}
return len;
}
-static IIO_DEVICE_ATTR(update_DAC, S_IRUGO | S_IWUSR,
- NULL,
- adt7316_store_update_DAC,
- 0);
+static IIO_DEVICE_ATTR(update_DAC, 0644,
+ NULL,
+ adt7316_store_update_DAC,
+ 0);
static ssize_t adt7316_show_DA_AB_Vref_bypass(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+ struct device_attribute *attr,
+ char *buf)
{
struct iio_dev *dev_info = dev_to_iio_dev(dev);
struct adt7316_chip_info *chip = iio_priv(dev_info);
- if ((chip->id & ID_FAMILY_MASK) == ID_ADT75XX)
- return -EPERM;
-
- return sprintf(buf, "%d\n",
+ return sysfs_emit(buf, "%d\n",
!!(chip->dac_config & ADT7316_VREF_BYPASS_DAC_AB));
}
static ssize_t adt7316_store_DA_AB_Vref_bypass(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t len)
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
{
struct iio_dev *dev_info = dev_to_iio_dev(dev);
struct adt7316_chip_info *chip = iio_priv(dev_info);
u8 dac_config;
int ret;
- if ((chip->id & ID_FAMILY_MASK) == ID_ADT75XX)
- return -EPERM;
-
dac_config = chip->dac_config & (~ADT7316_VREF_BYPASS_DAC_AB);
if (buf[0] == '1')
dac_config |= ADT7316_VREF_BYPASS_DAC_AB;
@@ -1007,38 +982,32 @@ static ssize_t adt7316_store_DA_AB_Vref_bypass(struct device *dev,
return len;
}
-static IIO_DEVICE_ATTR(DA_AB_Vref_bypass, S_IRUGO | S_IWUSR,
- adt7316_show_DA_AB_Vref_bypass,
- adt7316_store_DA_AB_Vref_bypass,
- 0);
+static IIO_DEVICE_ATTR(DA_AB_Vref_bypass, 0644,
+ adt7316_show_DA_AB_Vref_bypass,
+ adt7316_store_DA_AB_Vref_bypass,
+ 0);
static ssize_t adt7316_show_DA_CD_Vref_bypass(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+ struct device_attribute *attr,
+ char *buf)
{
struct iio_dev *dev_info = dev_to_iio_dev(dev);
struct adt7316_chip_info *chip = iio_priv(dev_info);
- if ((chip->id & ID_FAMILY_MASK) == ID_ADT75XX)
- return -EPERM;
-
- return sprintf(buf, "%d\n",
+ return sysfs_emit(buf, "%d\n",
!!(chip->dac_config & ADT7316_VREF_BYPASS_DAC_CD));
}
static ssize_t adt7316_store_DA_CD_Vref_bypass(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t len)
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
{
struct iio_dev *dev_info = dev_to_iio_dev(dev);
struct adt7316_chip_info *chip = iio_priv(dev_info);
u8 dac_config;
int ret;
- if ((chip->id & ID_FAMILY_MASK) == ID_ADT75XX)
- return -EPERM;
-
dac_config = chip->dac_config & (~ADT7316_VREF_BYPASS_DAC_CD);
if (buf[0] == '1')
dac_config |= ADT7316_VREF_BYPASS_DAC_CD;
@@ -1052,31 +1021,30 @@ static ssize_t adt7316_store_DA_CD_Vref_bypass(struct device *dev,
return len;
}
-static IIO_DEVICE_ATTR(DA_CD_Vref_bypass, S_IRUGO | S_IWUSR,
- adt7316_show_DA_CD_Vref_bypass,
- adt7316_store_DA_CD_Vref_bypass,
- 0);
+static IIO_DEVICE_ATTR(DA_CD_Vref_bypass, 0644,
+ adt7316_show_DA_CD_Vref_bypass,
+ adt7316_store_DA_CD_Vref_bypass,
+ 0);
static ssize_t adt7316_show_DAC_internal_Vref(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+ struct device_attribute *attr,
+ char *buf)
{
struct iio_dev *dev_info = dev_to_iio_dev(dev);
struct adt7316_chip_info *chip = iio_priv(dev_info);
if ((chip->id & ID_FAMILY_MASK) == ID_ADT75XX)
- return sprintf(buf, "0x%x\n",
- (chip->dac_config & ADT7516_DAC_IN_VREF_MASK) >>
+ return sysfs_emit(buf, "0x%x\n",
+ (chip->ldac_config & ADT7516_DAC_IN_VREF_MASK) >>
ADT7516_DAC_IN_VREF_OFFSET);
- else
- return sprintf(buf, "%d\n",
- !!(chip->dac_config & ADT7316_DAC_IN_VREF));
+ return sysfs_emit(buf, "%d\n",
+ !!(chip->ldac_config & ADT7316_DAC_IN_VREF));
}
static ssize_t adt7316_store_DAC_internal_Vref(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t len)
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
{
struct iio_dev *dev_info = dev_to_iio_dev(dev);
struct adt7316_chip_info *chip = iio_priv(dev_info);
@@ -1092,7 +1060,7 @@ static ssize_t adt7316_store_DAC_internal_Vref(struct device *dev,
ldac_config = chip->ldac_config & (~ADT7516_DAC_IN_VREF_MASK);
if (data & 0x1)
ldac_config |= ADT7516_DAC_AB_IN_VREF;
- else if (data & 0x2)
+ if (data & 0x2)
ldac_config |= ADT7516_DAC_CD_IN_VREF;
} else {
ret = kstrtou8(buf, 16, &data);
@@ -1114,13 +1082,13 @@ static ssize_t adt7316_store_DAC_internal_Vref(struct device *dev,
return len;
}
-static IIO_DEVICE_ATTR(DAC_internal_Vref, S_IRUGO | S_IWUSR,
- adt7316_show_DAC_internal_Vref,
- adt7316_store_DAC_internal_Vref,
- 0);
+static IIO_DEVICE_ATTR(DAC_internal_Vref, 0644,
+ adt7316_show_DAC_internal_Vref,
+ adt7316_store_DAC_internal_Vref,
+ 0);
static ssize_t adt7316_show_ad(struct adt7316_chip_info *chip,
- int channel, char *buf)
+ int channel, char *buf)
{
u16 data;
u8 msb, lsb;
@@ -1128,7 +1096,7 @@ static ssize_t adt7316_show_ad(struct adt7316_chip_info *chip,
int ret;
if ((chip->config2 & ADT7316_AD_SINGLE_CH_MODE) &&
- channel != (chip->config2 & ADT7516_AD_SINGLE_CH_MASK))
+ channel != (chip->config2 & ADT7516_AD_SINGLE_CH_MASK))
return -EPERM;
switch (channel) {
@@ -1160,7 +1128,7 @@ static ssize_t adt7316_show_ad(struct adt7316_chip_info *chip,
data = msb << ADT7316_T_VALUE_FLOAT_OFFSET;
data |= (lsb & ADT7316_LSB_VDD_MASK) >> ADT7316_LSB_VDD_OFFSET;
- return sprintf(buf, "%d\n", data);
+ return sysfs_emit(buf, "%d\n", data);
default: /* ex_temp and ain */
ret = chip->bus.read(chip->bus.client,
ADT7316_LSB_EX_TEMP_AIN, &lsb);
@@ -1178,7 +1146,7 @@ static ssize_t adt7316_show_ad(struct adt7316_chip_info *chip,
(ADT7316_MSB_EX_TEMP - ADT7316_AD_MSB_DATA_BASE))));
if ((chip->id & ID_FAMILY_MASK) == ID_ADT75XX)
- return sprintf(buf, "%d\n", data);
+ return sysfs_emit(buf, "%d\n", data);
break;
}
@@ -1189,25 +1157,25 @@ static ssize_t adt7316_show_ad(struct adt7316_chip_info *chip,
sign = '-';
}
- return sprintf(buf, "%c%d.%.2d\n", sign,
+ return sysfs_emit(buf, "%c%d.%.2d\n", sign,
(data >> ADT7316_T_VALUE_FLOAT_OFFSET),
(data & ADT7316_T_VALUE_FLOAT_MASK) * 25);
}
static ssize_t adt7316_show_VDD(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+ struct device_attribute *attr,
+ char *buf)
{
struct iio_dev *dev_info = dev_to_iio_dev(dev);
struct adt7316_chip_info *chip = iio_priv(dev_info);
return adt7316_show_ad(chip, ADT7316_AD_SINGLE_CH_VDD, buf);
}
-static IIO_DEVICE_ATTR(VDD, S_IRUGO, adt7316_show_VDD, NULL, 0);
+static IIO_DEVICE_ATTR(VDD, 0444, adt7316_show_VDD, NULL, 0);
static ssize_t adt7316_show_in_temp(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+ struct device_attribute *attr,
+ char *buf)
{
struct iio_dev *dev_info = dev_to_iio_dev(dev);
struct adt7316_chip_info *chip = iio_priv(dev_info);
@@ -1215,11 +1183,11 @@ static ssize_t adt7316_show_in_temp(struct device *dev,
return adt7316_show_ad(chip, ADT7316_AD_SINGLE_CH_IN, buf);
}
-static IIO_DEVICE_ATTR(in_temp, S_IRUGO, adt7316_show_in_temp, NULL, 0);
+static IIO_DEVICE_ATTR(in_temp, 0444, adt7316_show_in_temp, NULL, 0);
static ssize_t adt7316_show_ex_temp_AIN1(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+ struct device_attribute *attr,
+ char *buf)
{
struct iio_dev *dev_info = dev_to_iio_dev(dev);
struct adt7316_chip_info *chip = iio_priv(dev_info);
@@ -1227,45 +1195,45 @@ static ssize_t adt7316_show_ex_temp_AIN1(struct device *dev,
return adt7316_show_ad(chip, ADT7316_AD_SINGLE_CH_EX, buf);
}
-static IIO_DEVICE_ATTR(ex_temp_AIN1, S_IRUGO, adt7316_show_ex_temp_AIN1,
- NULL, 0);
-static IIO_DEVICE_ATTR(ex_temp, S_IRUGO, adt7316_show_ex_temp_AIN1, NULL, 0);
+static IIO_DEVICE_ATTR(ex_temp_AIN1, 0444, adt7316_show_ex_temp_AIN1,
+ NULL, 0);
+static IIO_DEVICE_ATTR(ex_temp, 0444, adt7316_show_ex_temp_AIN1, NULL, 0);
static ssize_t adt7316_show_AIN2(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+ struct device_attribute *attr,
+ char *buf)
{
struct iio_dev *dev_info = dev_to_iio_dev(dev);
struct adt7316_chip_info *chip = iio_priv(dev_info);
return adt7316_show_ad(chip, ADT7516_AD_SINGLE_CH_AIN2, buf);
}
-static IIO_DEVICE_ATTR(AIN2, S_IRUGO, adt7316_show_AIN2, NULL, 0);
+static IIO_DEVICE_ATTR(AIN2, 0444, adt7316_show_AIN2, NULL, 0);
static ssize_t adt7316_show_AIN3(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+ struct device_attribute *attr,
+ char *buf)
{
struct iio_dev *dev_info = dev_to_iio_dev(dev);
struct adt7316_chip_info *chip = iio_priv(dev_info);
return adt7316_show_ad(chip, ADT7516_AD_SINGLE_CH_AIN3, buf);
}
-static IIO_DEVICE_ATTR(AIN3, S_IRUGO, adt7316_show_AIN3, NULL, 0);
+static IIO_DEVICE_ATTR(AIN3, 0444, adt7316_show_AIN3, NULL, 0);
static ssize_t adt7316_show_AIN4(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+ struct device_attribute *attr,
+ char *buf)
{
struct iio_dev *dev_info = dev_to_iio_dev(dev);
struct adt7316_chip_info *chip = iio_priv(dev_info);
return adt7316_show_ad(chip, ADT7516_AD_SINGLE_CH_AIN4, buf);
}
-static IIO_DEVICE_ATTR(AIN4, S_IRUGO, adt7316_show_AIN4, NULL, 0);
+static IIO_DEVICE_ATTR(AIN4, 0444, adt7316_show_AIN4, NULL, 0);
static ssize_t adt7316_show_temp_offset(struct adt7316_chip_info *chip,
- int offset_addr, char *buf)
+ int offset_addr, char *buf)
{
int data;
u8 val;
@@ -1279,11 +1247,13 @@ static ssize_t adt7316_show_temp_offset(struct adt7316_chip_info *chip,
if (val & 0x80)
data -= 256;
- return sprintf(buf, "%d\n", data);
+ return sysfs_emit(buf, "%d\n", data);
}
static ssize_t adt7316_store_temp_offset(struct adt7316_chip_info *chip,
- int offset_addr, const char *buf, size_t len)
+ int offset_addr,
+ const char *buf,
+ size_t len)
{
int data;
u8 val;
@@ -1306,8 +1276,8 @@ static ssize_t adt7316_store_temp_offset(struct adt7316_chip_info *chip,
}
static ssize_t adt7316_show_in_temp_offset(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+ struct device_attribute *attr,
+ char *buf)
{
struct iio_dev *dev_info = dev_to_iio_dev(dev);
struct adt7316_chip_info *chip = iio_priv(dev_info);
@@ -1316,9 +1286,9 @@ static ssize_t adt7316_show_in_temp_offset(struct device *dev,
}
static ssize_t adt7316_store_in_temp_offset(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t len)
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
{
struct iio_dev *dev_info = dev_to_iio_dev(dev);
struct adt7316_chip_info *chip = iio_priv(dev_info);
@@ -1327,13 +1297,13 @@ static ssize_t adt7316_store_in_temp_offset(struct device *dev,
len);
}
-static IIO_DEVICE_ATTR(in_temp_offset, S_IRUGO | S_IWUSR,
- adt7316_show_in_temp_offset,
- adt7316_store_in_temp_offset, 0);
+static IIO_DEVICE_ATTR(in_temp_offset, 0644,
+ adt7316_show_in_temp_offset,
+ adt7316_store_in_temp_offset, 0);
static ssize_t adt7316_show_ex_temp_offset(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+ struct device_attribute *attr,
+ char *buf)
{
struct iio_dev *dev_info = dev_to_iio_dev(dev);
struct adt7316_chip_info *chip = iio_priv(dev_info);
@@ -1342,9 +1312,9 @@ static ssize_t adt7316_show_ex_temp_offset(struct device *dev,
}
static ssize_t adt7316_store_ex_temp_offset(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t len)
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
{
struct iio_dev *dev_info = dev_to_iio_dev(dev);
struct adt7316_chip_info *chip = iio_priv(dev_info);
@@ -1353,13 +1323,13 @@ static ssize_t adt7316_store_ex_temp_offset(struct device *dev,
len);
}
-static IIO_DEVICE_ATTR(ex_temp_offset, S_IRUGO | S_IWUSR,
- adt7316_show_ex_temp_offset,
- adt7316_store_ex_temp_offset, 0);
+static IIO_DEVICE_ATTR(ex_temp_offset, 0644,
+ adt7316_show_ex_temp_offset,
+ adt7316_store_ex_temp_offset, 0);
static ssize_t adt7316_show_in_analog_temp_offset(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+ struct device_attribute *attr,
+ char *buf)
{
struct iio_dev *dev_info = dev_to_iio_dev(dev);
struct adt7316_chip_info *chip = iio_priv(dev_info);
@@ -1369,9 +1339,9 @@ static ssize_t adt7316_show_in_analog_temp_offset(struct device *dev,
}
static ssize_t adt7316_store_in_analog_temp_offset(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t len)
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
{
struct iio_dev *dev_info = dev_to_iio_dev(dev);
struct adt7316_chip_info *chip = iio_priv(dev_info);
@@ -1380,13 +1350,13 @@ static ssize_t adt7316_store_in_analog_temp_offset(struct device *dev,
ADT7316_IN_ANALOG_TEMP_OFFSET, buf, len);
}
-static IIO_DEVICE_ATTR(in_analog_temp_offset, S_IRUGO | S_IWUSR,
- adt7316_show_in_analog_temp_offset,
- adt7316_store_in_analog_temp_offset, 0);
+static IIO_DEVICE_ATTR(in_analog_temp_offset, 0644,
+ adt7316_show_in_analog_temp_offset,
+ adt7316_store_in_analog_temp_offset, 0);
static ssize_t adt7316_show_ex_analog_temp_offset(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+ struct device_attribute *attr,
+ char *buf)
{
struct iio_dev *dev_info = dev_to_iio_dev(dev);
struct adt7316_chip_info *chip = iio_priv(dev_info);
@@ -1396,9 +1366,9 @@ static ssize_t adt7316_show_ex_analog_temp_offset(struct device *dev,
}
static ssize_t adt7316_store_ex_analog_temp_offset(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t len)
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
{
struct iio_dev *dev_info = dev_to_iio_dev(dev);
struct adt7316_chip_info *chip = iio_priv(dev_info);
@@ -1407,22 +1377,22 @@ static ssize_t adt7316_store_ex_analog_temp_offset(struct device *dev,
ADT7316_EX_ANALOG_TEMP_OFFSET, buf, len);
}
-static IIO_DEVICE_ATTR(ex_analog_temp_offset, S_IRUGO | S_IWUSR,
- adt7316_show_ex_analog_temp_offset,
- adt7316_store_ex_analog_temp_offset, 0);
+static IIO_DEVICE_ATTR(ex_analog_temp_offset, 0644,
+ adt7316_show_ex_analog_temp_offset,
+ adt7316_store_ex_analog_temp_offset, 0);
static ssize_t adt7316_show_DAC(struct adt7316_chip_info *chip,
- int channel, char *buf)
+ int channel, char *buf)
{
- u16 data;
+ u16 data = 0;
u8 msb, lsb, offset;
int ret;
if (channel >= ADT7316_DA_MSB_DATA_REGS ||
- (channel == 0 &&
- (chip->config3 & ADT7316_EN_IN_TEMP_PROP_DACA)) ||
- (channel == 1 &&
- (chip->config3 & ADT7316_EN_EX_TEMP_PROP_DACB)))
+ (channel == 0 &&
+ (chip->config3 & ADT7316_EN_IN_TEMP_PROP_DACA)) ||
+ (channel == 1 &&
+ (chip->config3 & ADT7316_EN_EX_TEMP_PROP_DACB)))
return -EPERM;
offset = chip->dac_bits - 8;
@@ -1439,23 +1409,27 @@ static ssize_t adt7316_show_DAC(struct adt7316_chip_info *chip,
if (ret)
return -EIO;
- data = (msb << offset) + (lsb & ((1 << offset) - 1));
+ if (chip->dac_bits == 12)
+ data = lsb >> ADT7316_DA_12_BIT_LSB_SHIFT;
+ else if (chip->dac_bits == 10)
+ data = lsb >> ADT7316_DA_10_BIT_LSB_SHIFT;
+ data |= msb << offset;
- return sprintf(buf, "%d\n", data);
+ return sysfs_emit(buf, "%d\n", data);
}
static ssize_t adt7316_store_DAC(struct adt7316_chip_info *chip,
- int channel, const char *buf, size_t len)
+ int channel, const char *buf, size_t len)
{
- u8 msb, lsb, offset;
+ u8 msb, lsb, lsb_reg, offset;
u16 data;
int ret;
if (channel >= ADT7316_DA_MSB_DATA_REGS ||
- (channel == 0 &&
- (chip->config3 & ADT7316_EN_IN_TEMP_PROP_DACA)) ||
- (channel == 1 &&
- (chip->config3 & ADT7316_EN_EX_TEMP_PROP_DACB)))
+ (channel == 0 &&
+ (chip->config3 & ADT7316_EN_IN_TEMP_PROP_DACA)) ||
+ (channel == 1 &&
+ (chip->config3 & ADT7316_EN_EX_TEMP_PROP_DACB)))
return -EPERM;
offset = chip->dac_bits - 8;
@@ -1465,9 +1439,13 @@ static ssize_t adt7316_store_DAC(struct adt7316_chip_info *chip,
return -EINVAL;
if (chip->dac_bits > 8) {
- lsb = data & (1 << offset);
+ lsb = data & ((1 << offset) - 1);
+ if (chip->dac_bits == 12)
+ lsb_reg = lsb << ADT7316_DA_12_BIT_LSB_SHIFT;
+ else
+ lsb_reg = lsb << ADT7316_DA_10_BIT_LSB_SHIFT;
ret = chip->bus.write(chip->bus.client,
- ADT7316_DA_DATA_BASE + channel * 2, lsb);
+ ADT7316_DA_DATA_BASE + channel * 2, lsb_reg);
if (ret)
return -EIO;
}
@@ -1482,8 +1460,8 @@ static ssize_t adt7316_store_DAC(struct adt7316_chip_info *chip,
}
static ssize_t adt7316_show_DAC_A(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+ struct device_attribute *attr,
+ char *buf)
{
struct iio_dev *dev_info = dev_to_iio_dev(dev);
struct adt7316_chip_info *chip = iio_priv(dev_info);
@@ -1492,9 +1470,9 @@ static ssize_t adt7316_show_DAC_A(struct device *dev,
}
static ssize_t adt7316_store_DAC_A(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t len)
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
{
struct iio_dev *dev_info = dev_to_iio_dev(dev);
struct adt7316_chip_info *chip = iio_priv(dev_info);
@@ -1502,12 +1480,12 @@ static ssize_t adt7316_store_DAC_A(struct device *dev,
return adt7316_store_DAC(chip, 0, buf, len);
}
-static IIO_DEVICE_ATTR(DAC_A, S_IRUGO | S_IWUSR, adt7316_show_DAC_A,
- adt7316_store_DAC_A, 0);
+static IIO_DEVICE_ATTR(DAC_A, 0644, adt7316_show_DAC_A,
+ adt7316_store_DAC_A, 0);
static ssize_t adt7316_show_DAC_B(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+ struct device_attribute *attr,
+ char *buf)
{
struct iio_dev *dev_info = dev_to_iio_dev(dev);
struct adt7316_chip_info *chip = iio_priv(dev_info);
@@ -1516,9 +1494,9 @@ static ssize_t adt7316_show_DAC_B(struct device *dev,
}
static ssize_t adt7316_store_DAC_B(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t len)
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
{
struct iio_dev *dev_info = dev_to_iio_dev(dev);
struct adt7316_chip_info *chip = iio_priv(dev_info);
@@ -1526,12 +1504,12 @@ static ssize_t adt7316_store_DAC_B(struct device *dev,
return adt7316_store_DAC(chip, 1, buf, len);
}
-static IIO_DEVICE_ATTR(DAC_B, S_IRUGO | S_IWUSR, adt7316_show_DAC_B,
- adt7316_store_DAC_B, 0);
+static IIO_DEVICE_ATTR(DAC_B, 0644, adt7316_show_DAC_B,
+ adt7316_store_DAC_B, 0);
static ssize_t adt7316_show_DAC_C(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+ struct device_attribute *attr,
+ char *buf)
{
struct iio_dev *dev_info = dev_to_iio_dev(dev);
struct adt7316_chip_info *chip = iio_priv(dev_info);
@@ -1540,9 +1518,9 @@ static ssize_t adt7316_show_DAC_C(struct device *dev,
}
static ssize_t adt7316_store_DAC_C(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t len)
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
{
struct iio_dev *dev_info = dev_to_iio_dev(dev);
struct adt7316_chip_info *chip = iio_priv(dev_info);
@@ -1550,12 +1528,12 @@ static ssize_t adt7316_store_DAC_C(struct device *dev,
return adt7316_store_DAC(chip, 2, buf, len);
}
-static IIO_DEVICE_ATTR(DAC_C, S_IRUGO | S_IWUSR, adt7316_show_DAC_C,
- adt7316_store_DAC_C, 0);
+static IIO_DEVICE_ATTR(DAC_C, 0644, adt7316_show_DAC_C,
+ adt7316_store_DAC_C, 0);
static ssize_t adt7316_show_DAC_D(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+ struct device_attribute *attr,
+ char *buf)
{
struct iio_dev *dev_info = dev_to_iio_dev(dev);
struct adt7316_chip_info *chip = iio_priv(dev_info);
@@ -1564,9 +1542,9 @@ static ssize_t adt7316_show_DAC_D(struct device *dev,
}
static ssize_t adt7316_store_DAC_D(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t len)
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
{
struct iio_dev *dev_info = dev_to_iio_dev(dev);
struct adt7316_chip_info *chip = iio_priv(dev_info);
@@ -1574,12 +1552,12 @@ static ssize_t adt7316_store_DAC_D(struct device *dev,
return adt7316_store_DAC(chip, 3, buf, len);
}
-static IIO_DEVICE_ATTR(DAC_D, S_IRUGO | S_IWUSR, adt7316_show_DAC_D,
- adt7316_store_DAC_D, 0);
+static IIO_DEVICE_ATTR(DAC_D, 0644, adt7316_show_DAC_D,
+ adt7316_store_DAC_D, 0);
static ssize_t adt7316_show_device_id(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+ struct device_attribute *attr,
+ char *buf)
{
struct iio_dev *dev_info = dev_to_iio_dev(dev);
struct adt7316_chip_info *chip = iio_priv(dev_info);
@@ -1590,14 +1568,14 @@ static ssize_t adt7316_show_device_id(struct device *dev,
if (ret)
return -EIO;
- return sprintf(buf, "%d\n", id);
+ return sysfs_emit(buf, "%d\n", id);
}
-static IIO_DEVICE_ATTR(device_id, S_IRUGO, adt7316_show_device_id, NULL, 0);
+static IIO_DEVICE_ATTR(device_id, 0444, adt7316_show_device_id, NULL, 0);
static ssize_t adt7316_show_manufactorer_id(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+ struct device_attribute *attr,
+ char *buf)
{
struct iio_dev *dev_info = dev_to_iio_dev(dev);
struct adt7316_chip_info *chip = iio_priv(dev_info);
@@ -1608,15 +1586,15 @@ static ssize_t adt7316_show_manufactorer_id(struct device *dev,
if (ret)
return -EIO;
- return sprintf(buf, "%d\n", id);
+ return sysfs_emit(buf, "%d\n", id);
}
-static IIO_DEVICE_ATTR(manufactorer_id, S_IRUGO,
- adt7316_show_manufactorer_id, NULL, 0);
+static IIO_DEVICE_ATTR(manufactorer_id, 0444,
+ adt7316_show_manufactorer_id, NULL, 0);
static ssize_t adt7316_show_device_rev(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+ struct device_attribute *attr,
+ char *buf)
{
struct iio_dev *dev_info = dev_to_iio_dev(dev);
struct adt7316_chip_info *chip = iio_priv(dev_info);
@@ -1627,14 +1605,14 @@ static ssize_t adt7316_show_device_rev(struct device *dev,
if (ret)
return -EIO;
- return sprintf(buf, "%d\n", rev);
+ return sysfs_emit(buf, "%d\n", rev);
}
-static IIO_DEVICE_ATTR(device_rev, S_IRUGO, adt7316_show_device_rev, NULL, 0);
+static IIO_DEVICE_ATTR(device_rev, 0444, adt7316_show_device_rev, NULL, 0);
static ssize_t adt7316_show_bus_type(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+ struct device_attribute *attr,
+ char *buf)
{
struct iio_dev *dev_info = dev_to_iio_dev(dev);
struct adt7316_chip_info *chip = iio_priv(dev_info);
@@ -1646,12 +1624,12 @@ static ssize_t adt7316_show_bus_type(struct device *dev,
return -EIO;
if (stat)
- return sprintf(buf, "spi\n");
+ return sysfs_emit(buf, "spi\n");
- return sprintf(buf, "i2c\n");
+ return sysfs_emit(buf, "i2c\n");
}
-static IIO_DEVICE_ATTR(bus_type, S_IRUGO, adt7316_show_bus_type, NULL, 0);
+static IIO_DEVICE_ATTR(bus_type, 0444, adt7316_show_bus_type, NULL, 0);
static struct attribute *adt7316_attributes[] = {
&iio_dev_attr_all_modes.dev_attr.attr,
@@ -1714,8 +1692,6 @@ static struct attribute *adt7516_attributes[] = {
&iio_dev_attr_DAC_update_mode.dev_attr.attr,
&iio_dev_attr_all_DAC_update_modes.dev_attr.attr,
&iio_dev_attr_update_DAC.dev_attr.attr,
- &iio_dev_attr_DA_AB_Vref_bypass.dev_attr.attr,
- &iio_dev_attr_DA_CD_Vref_bypass.dev_attr.attr,
&iio_dev_attr_DAC_internal_Vref.dev_attr.attr,
&iio_dev_attr_VDD.dev_attr.attr,
&iio_dev_attr_in_temp.dev_attr.attr,
@@ -1755,7 +1731,7 @@ static irqreturn_t adt7316_event_handler(int irq, void *private)
if ((chip->id & ID_FAMILY_MASK) != ID_ADT75XX)
stat1 &= 0x1F;
- time = iio_get_time_ns();
+ time = iio_get_time_ns(indio_dev);
if (stat1 & BIT(0))
iio_push_event(indio_dev,
IIO_UNMOD_EVENT_CODE(IIO_TEMP, 0,
@@ -1807,32 +1783,69 @@ static irqreturn_t adt7316_event_handler(int irq, void *private)
0,
IIO_EV_TYPE_THRESH,
IIO_EV_DIR_RISING),
- iio_get_time_ns());
+ iio_get_time_ns(indio_dev));
}
return IRQ_HANDLED;
}
+static int adt7316_setup_irq(struct iio_dev *indio_dev)
+{
+ struct adt7316_chip_info *chip = iio_priv(indio_dev);
+ int irq_type, ret;
+
+ irq_type = irq_get_trigger_type(chip->bus.irq);
+
+ switch (irq_type) {
+ case IRQF_TRIGGER_HIGH:
+ case IRQF_TRIGGER_RISING:
+ break;
+ case IRQF_TRIGGER_LOW:
+ case IRQF_TRIGGER_FALLING:
+ break;
+ default:
+ dev_info(&indio_dev->dev, "mode %d unsupported, using IRQF_TRIGGER_LOW\n",
+ irq_type);
+ irq_type = IRQF_TRIGGER_LOW;
+ break;
+ }
+
+ ret = devm_request_threaded_irq(&indio_dev->dev, chip->bus.irq,
+ NULL, adt7316_event_handler,
+ irq_type | IRQF_ONESHOT,
+ indio_dev->name, indio_dev);
+ if (ret) {
+ dev_err(&indio_dev->dev, "failed to request irq %d\n",
+ chip->bus.irq);
+ return ret;
+ }
+
+ if (irq_type & IRQF_TRIGGER_HIGH)
+ chip->config1 |= ADT7316_INT_POLARITY;
+
+ return 0;
+}
+
/*
* Show mask of enabled interrupts in Hex.
*/
static ssize_t adt7316_show_int_mask(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+ struct device_attribute *attr,
+ char *buf)
{
struct iio_dev *dev_info = dev_to_iio_dev(dev);
struct adt7316_chip_info *chip = iio_priv(dev_info);
- return sprintf(buf, "0x%x\n", chip->int_mask);
+ return sysfs_emit(buf, "0x%x\n", chip->int_mask);
}
/*
* Set 1 to the mask in Hex to enabled interrupts.
*/
static ssize_t adt7316_set_int_mask(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t len)
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
{
struct iio_dev *dev_info = dev_to_iio_dev(dev);
struct adt7316_chip_info *chip = iio_priv(dev_info);
@@ -1869,9 +1882,10 @@ static ssize_t adt7316_set_int_mask(struct device *dev,
return len;
}
+
static inline ssize_t adt7316_show_ad_bound(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+ struct device_attribute *attr,
+ char *buf)
{
struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
struct iio_dev *dev_info = dev_to_iio_dev(dev);
@@ -1881,7 +1895,7 @@ static inline ssize_t adt7316_show_ad_bound(struct device *dev,
int ret;
if ((chip->id & ID_FAMILY_MASK) == ID_ADT73XX &&
- this_attr->address > ADT7316_EX_TEMP_LOW)
+ this_attr->address > ADT7316_EX_TEMP_LOW)
return -EPERM;
ret = chip->bus.read(chip->bus.client, this_attr->address, &val);
@@ -1891,18 +1905,18 @@ static inline ssize_t adt7316_show_ad_bound(struct device *dev,
data = (int)val;
if (!((chip->id & ID_FAMILY_MASK) == ID_ADT75XX &&
- (chip->config1 & ADT7516_SEL_AIN1_2_EX_TEMP_MASK) == 0)) {
+ (chip->config1 & ADT7516_SEL_AIN1_2_EX_TEMP_MASK) == 0)) {
if (data & 0x80)
data -= 256;
}
- return sprintf(buf, "%d\n", data);
+ return sysfs_emit(buf, "%d\n", data);
}
static inline ssize_t adt7316_set_ad_bound(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t len)
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
{
struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
struct iio_dev *dev_info = dev_to_iio_dev(dev);
@@ -1912,7 +1926,7 @@ static inline ssize_t adt7316_set_ad_bound(struct device *dev,
int ret;
if ((chip->id & ID_FAMILY_MASK) == ID_ADT73XX &&
- this_attr->address > ADT7316_EX_TEMP_LOW)
+ this_attr->address > ADT7316_EX_TEMP_LOW)
return -EPERM;
ret = kstrtoint(buf, 10, &data);
@@ -1920,7 +1934,7 @@ static inline ssize_t adt7316_set_ad_bound(struct device *dev,
return -EINVAL;
if ((chip->id & ID_FAMILY_MASK) == ID_ADT75XX &&
- (chip->config1 & ADT7516_SEL_AIN1_2_EX_TEMP_MASK) == 0) {
+ (chip->config1 & ADT7516_SEL_AIN1_2_EX_TEMP_MASK) == 0) {
if (data > 255 || data < 0)
return -EINVAL;
} else {
@@ -1941,19 +1955,19 @@ static inline ssize_t adt7316_set_ad_bound(struct device *dev,
}
static ssize_t adt7316_show_int_enabled(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+ struct device_attribute *attr,
+ char *buf)
{
struct iio_dev *dev_info = dev_to_iio_dev(dev);
struct adt7316_chip_info *chip = iio_priv(dev_info);
- return sprintf(buf, "%d\n", !!(chip->config1 & ADT7316_INT_EN));
+ return sysfs_emit(buf, "%d\n", !!(chip->config1 & ADT7316_INT_EN));
}
static ssize_t adt7316_set_int_enabled(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t len)
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
{
struct iio_dev *dev_info = dev_to_iio_dev(dev);
struct adt7316_chip_info *chip = iio_priv(dev_info);
@@ -1974,61 +1988,61 @@ static ssize_t adt7316_set_int_enabled(struct device *dev,
}
static IIO_DEVICE_ATTR(int_mask,
- S_IRUGO | S_IWUSR,
+ 0644,
adt7316_show_int_mask, adt7316_set_int_mask,
0);
static IIO_DEVICE_ATTR(in_temp_high_value,
- S_IRUGO | S_IWUSR,
+ 0644,
adt7316_show_ad_bound, adt7316_set_ad_bound,
ADT7316_IN_TEMP_HIGH);
static IIO_DEVICE_ATTR(in_temp_low_value,
- S_IRUGO | S_IWUSR,
+ 0644,
adt7316_show_ad_bound, adt7316_set_ad_bound,
ADT7316_IN_TEMP_LOW);
static IIO_DEVICE_ATTR(ex_temp_high_value,
- S_IRUGO | S_IWUSR,
+ 0644,
adt7316_show_ad_bound, adt7316_set_ad_bound,
ADT7316_EX_TEMP_HIGH);
static IIO_DEVICE_ATTR(ex_temp_low_value,
- S_IRUGO | S_IWUSR,
+ 0644,
adt7316_show_ad_bound, adt7316_set_ad_bound,
ADT7316_EX_TEMP_LOW);
/* NASTY duplication to be fixed */
static IIO_DEVICE_ATTR(ex_temp_ain1_high_value,
- S_IRUGO | S_IWUSR,
+ 0644,
adt7316_show_ad_bound, adt7316_set_ad_bound,
ADT7316_EX_TEMP_HIGH);
static IIO_DEVICE_ATTR(ex_temp_ain1_low_value,
- S_IRUGO | S_IWUSR,
+ 0644,
adt7316_show_ad_bound, adt7316_set_ad_bound,
ADT7316_EX_TEMP_LOW);
static IIO_DEVICE_ATTR(ain2_high_value,
- S_IRUGO | S_IWUSR,
+ 0644,
adt7316_show_ad_bound, adt7316_set_ad_bound,
ADT7516_AIN2_HIGH);
static IIO_DEVICE_ATTR(ain2_low_value,
- S_IRUGO | S_IWUSR,
+ 0644,
adt7316_show_ad_bound, adt7316_set_ad_bound,
ADT7516_AIN2_LOW);
static IIO_DEVICE_ATTR(ain3_high_value,
- S_IRUGO | S_IWUSR,
+ 0644,
adt7316_show_ad_bound, adt7316_set_ad_bound,
ADT7516_AIN3_HIGH);
static IIO_DEVICE_ATTR(ain3_low_value,
- S_IRUGO | S_IWUSR,
+ 0644,
adt7316_show_ad_bound, adt7316_set_ad_bound,
ADT7516_AIN3_LOW);
static IIO_DEVICE_ATTR(ain4_high_value,
- S_IRUGO | S_IWUSR,
+ 0644,
adt7316_show_ad_bound, adt7316_set_ad_bound,
ADT7516_AIN4_HIGH);
static IIO_DEVICE_ATTR(ain4_low_value,
- S_IRUGO | S_IWUSR,
+ 0644,
adt7316_show_ad_bound, adt7316_set_ad_bound,
ADT7516_AIN4_LOW);
static IIO_DEVICE_ATTR(int_enabled,
- S_IRUGO | S_IWUSR,
+ 0644,
adt7316_show_int_enabled,
adt7316_set_int_enabled, 0);
@@ -2042,7 +2056,7 @@ static struct attribute *adt7316_event_attributes[] = {
NULL,
};
-static struct attribute_group adt7316_event_attribute_group = {
+static const struct attribute_group adt7316_event_attribute_group = {
.attrs = adt7316_event_attributes,
.name = "events",
};
@@ -2063,7 +2077,7 @@ static struct attribute *adt7516_event_attributes[] = {
NULL,
};
-static struct attribute_group adt7516_event_attribute_group = {
+static const struct attribute_group adt7516_event_attribute_group = {
.attrs = adt7516_event_attributes,
.name = "events",
};
@@ -2084,33 +2098,29 @@ static int adt7316_enable(struct device *dev)
return _adt7316_store_enabled(chip, 1);
}
-
-SIMPLE_DEV_PM_OPS(adt7316_pm_ops, adt7316_disable, adt7316_enable);
EXPORT_SYMBOL_GPL(adt7316_pm_ops);
+SIMPLE_DEV_PM_OPS(adt7316_pm_ops, adt7316_disable, adt7316_enable);
#endif
static const struct iio_info adt7316_info = {
.attrs = &adt7316_attribute_group,
.event_attrs = &adt7316_event_attribute_group,
- .driver_module = THIS_MODULE,
};
static const struct iio_info adt7516_info = {
.attrs = &adt7516_attribute_group,
.event_attrs = &adt7516_event_attribute_group,
- .driver_module = THIS_MODULE,
};
/*
* device probe and remove
*/
int adt7316_probe(struct device *dev, struct adt7316_bus *bus,
- const char *name)
+ const char *name)
{
struct adt7316_chip_info *chip;
struct iio_dev *indio_dev;
- unsigned short *adt7316_platform_data = dev->platform_data;
- int ret = 0;
+ int ret;
indio_dev = devm_iio_device_alloc(dev, sizeof(*chip));
if (!indio_dev)
@@ -2128,9 +2138,23 @@ int adt7316_probe(struct device *dev, struct adt7316_bus *bus,
else
return -ENODEV;
- chip->ldac_pin = adt7316_platform_data[1];
- if (chip->ldac_pin) {
- chip->config3 |= ADT7316_DA_EN_VIA_DAC_LDCA;
+ if (chip->id == ID_ADT7316 || chip->id == ID_ADT7516)
+ chip->dac_bits = 12;
+ else if (chip->id == ID_ADT7317 || chip->id == ID_ADT7517)
+ chip->dac_bits = 10;
+ else
+ chip->dac_bits = 8;
+
+ chip->ldac_pin = devm_gpiod_get_optional(dev, "adi,ldac",
+ GPIOD_OUT_LOW);
+ if (IS_ERR(chip->ldac_pin)) {
+ ret = PTR_ERR(chip->ldac_pin);
+ dev_err(dev, "Failed to request ldac GPIO: %d\n", ret);
+ return ret;
+ }
+
+ if (!chip->ldac_pin) {
+ chip->config3 |= ADT7316_DA_EN_VIA_DAC_LDAC;
if ((chip->id & ID_FAMILY_MASK) == ID_ADT75XX)
chip->config1 |= ADT7516_SEL_AIN3;
}
@@ -2138,7 +2162,6 @@ int adt7316_probe(struct device *dev, struct adt7316_bus *bus,
if ((chip->id & ID_FAMILY_MASK) == ID_ADT75XX)
chip->int_mask |= ADT7516_AIN_INT_MASK;
- indio_dev->dev.parent = dev;
if ((chip->id & ID_FAMILY_MASK) == ID_ADT75XX)
indio_dev->info = &adt7516_info;
else
@@ -2147,21 +2170,9 @@ int adt7316_probe(struct device *dev, struct adt7316_bus *bus,
indio_dev->modes = INDIO_DIRECT_MODE;
if (chip->bus.irq > 0) {
- if (adt7316_platform_data[0])
- chip->bus.irq_flags = adt7316_platform_data[0];
-
- ret = devm_request_threaded_irq(dev, chip->bus.irq,
- NULL,
- &adt7316_event_handler,
- chip->bus.irq_flags |
- IRQF_ONESHOT,
- indio_dev->name,
- indio_dev);
+ ret = adt7316_setup_irq(indio_dev);
if (ret)
return ret;
-
- if (chip->bus.irq_flags & IRQF_TRIGGER_HIGH)
- chip->config1 |= ADT7316_INT_POLARITY;
}
ret = chip->bus.write(chip->bus.client, ADT7316_CONFIG1, chip->config1);
@@ -2177,7 +2188,7 @@ int adt7316_probe(struct device *dev, struct adt7316_bus *bus,
return ret;
dev_info(dev, "%s temperature sensor, ADC and DAC registered.\n",
- indio_dev->name);
+ indio_dev->name);
return 0;
}
diff --git a/drivers/staging/iio/addac/adt7316.h b/drivers/staging/iio/addac/adt7316.h
index ec40fbb698a6..8c2a92ae7157 100644
--- a/drivers/staging/iio/addac/adt7316.h
+++ b/drivers/staging/iio/addac/adt7316.h
@@ -1,9 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* ADT7316 digital temperature sensor driver supporting ADT7316/7/8 ADT7516/7/9
*
* Copyright 2010 Analog Devices Inc.
- *
- * Licensed under the GPL-2 or later.
*/
#ifndef _ADT7316_H_
@@ -17,7 +16,6 @@
struct adt7316_bus {
void *client;
int irq;
- int irq_flags;
int (*read)(void *client, u8 reg, u8 *data);
int (*write)(void *client, u8 reg, u8 val);
int (*multi_read)(void *client, u8 first_reg, u8 count, u8 *data);
@@ -31,6 +29,6 @@ extern const struct dev_pm_ops adt7316_pm_ops;
#define ADT7316_PM_OPS NULL
#endif
int adt7316_probe(struct device *dev, struct adt7316_bus *bus,
- const char *name);
+ const char *name);
#endif
diff --git a/drivers/staging/iio/cdc/Kconfig b/drivers/staging/iio/cdc/Kconfig
deleted file mode 100644
index 80211df8c577..000000000000
--- a/drivers/staging/iio/cdc/Kconfig
+++ /dev/null
@@ -1,36 +0,0 @@
-#
-# CDC drivers
-#
-menu "Capacitance to digital converters"
-
-config AD7150
- tristate "Analog Devices ad7150/1/6 capacitive sensor driver"
- depends on I2C
- help
- Say yes here to build support for Analog Devices capacitive sensors.
- (ad7150, ad7151, ad7156) Provides direct access via sysfs.
-
- To compile this driver as a module, choose M here: the
- module will be called ad7150.
-
-config AD7152
- tristate "Analog Devices ad7152/3 capacitive sensor driver"
- depends on I2C
- help
- Say yes here to build support for Analog Devices capacitive sensors.
- (ad7152, ad7153) Provides direct access via sysfs.
-
- To compile this driver as a module, choose M here: the
- module will be called ad7152.
-
-config AD7746
- tristate "Analog Devices AD7745, AD7746 AD7747 capacitive sensor driver"
- depends on I2C
- help
- Say yes here to build support for Analog Devices capacitive sensors.
- (AD7745, AD7746, AD7747) Provides direct access via sysfs.
-
- To compile this driver as a module, choose M here: the
- module will be called ad7746.
-
-endmenu
diff --git a/drivers/staging/iio/cdc/Makefile b/drivers/staging/iio/cdc/Makefile
deleted file mode 100644
index a5fbabf5c8bf..000000000000
--- a/drivers/staging/iio/cdc/Makefile
+++ /dev/null
@@ -1,7 +0,0 @@
-#
-# Makefile for industrial I/O DAC drivers
-#
-
-obj-$(CONFIG_AD7150) += ad7150.o
-obj-$(CONFIG_AD7152) += ad7152.o
-obj-$(CONFIG_AD7746) += ad7746.o
diff --git a/drivers/staging/iio/cdc/ad7150.c b/drivers/staging/iio/cdc/ad7150.c
deleted file mode 100644
index a2b7ae3329c0..000000000000
--- a/drivers/staging/iio/cdc/ad7150.c
+++ /dev/null
@@ -1,679 +0,0 @@
-/*
- * AD7150 capacitive sensor driver supporting AD7150/1/6
- *
- * Copyright 2010-2011 Analog Devices Inc.
- *
- * Licensed under the GPL-2 or later.
- */
-
-#include <linux/interrupt.h>
-#include <linux/device.h>
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/i2c.h>
-#include <linux/module.h>
-
-#include <linux/iio/iio.h>
-#include <linux/iio/sysfs.h>
-#include <linux/iio/events.h>
-/*
- * AD7150 registers definition
- */
-
-#define AD7150_STATUS 0
-#define AD7150_STATUS_OUT1 (1 << 3)
-#define AD7150_STATUS_OUT2 (1 << 5)
-#define AD7150_CH1_DATA_HIGH 1
-#define AD7150_CH2_DATA_HIGH 3
-#define AD7150_CH1_AVG_HIGH 5
-#define AD7150_CH2_AVG_HIGH 7
-#define AD7150_CH1_SENSITIVITY 9
-#define AD7150_CH1_THR_HOLD_H 9
-#define AD7150_CH1_TIMEOUT 10
-#define AD7150_CH1_SETUP 11
-#define AD7150_CH2_SENSITIVITY 12
-#define AD7150_CH2_THR_HOLD_H 12
-#define AD7150_CH2_TIMEOUT 13
-#define AD7150_CH2_SETUP 14
-#define AD7150_CFG 15
-#define AD7150_CFG_FIX (1 << 7)
-#define AD7150_PD_TIMER 16
-#define AD7150_CH1_CAPDAC 17
-#define AD7150_CH2_CAPDAC 18
-#define AD7150_SN3 19
-#define AD7150_SN2 20
-#define AD7150_SN1 21
-#define AD7150_SN0 22
-#define AD7150_ID 23
-
-/**
- * struct ad7150_chip_info - instance specific chip data
- * @client: i2c client for this device
- * @current_event: device always has one type of event enabled.
- * This element stores the event code of the current one.
- * @threshold: thresholds for simple capacitance value events
- * @thresh_sensitivity: threshold for simple capacitance offset
- * from 'average' value.
- * @mag_sensitity: threshold for magnitude of capacitance offset from
- * from 'average' value.
- * @thresh_timeout: a timeout, in samples from the moment an
- * adaptive threshold event occurs to when the average
- * value jumps to current value.
- * @mag_timeout: a timeout, in sample from the moment an
- * adaptive magnitude event occurs to when the average
- * value jumps to the current value.
- * @old_state: store state from previous event, allowing confirmation
- * of new condition.
- * @conversion_mode: the current conversion mode.
- * @state_lock: ensure consistent state of this structure wrt the
- * hardware.
- */
-struct ad7150_chip_info {
- struct i2c_client *client;
- u64 current_event;
- u16 threshold[2][2];
- u8 thresh_sensitivity[2][2];
- u8 mag_sensitivity[2][2];
- u8 thresh_timeout[2][2];
- u8 mag_timeout[2][2];
- int old_state;
- char *conversion_mode;
- struct mutex state_lock;
-};
-
-/*
- * sysfs nodes
- */
-
-static const u8 ad7150_addresses[][6] = {
- { AD7150_CH1_DATA_HIGH, AD7150_CH1_AVG_HIGH,
- AD7150_CH1_SETUP, AD7150_CH1_THR_HOLD_H,
- AD7150_CH1_SENSITIVITY, AD7150_CH1_TIMEOUT },
- { AD7150_CH2_DATA_HIGH, AD7150_CH2_AVG_HIGH,
- AD7150_CH2_SETUP, AD7150_CH2_THR_HOLD_H,
- AD7150_CH2_SENSITIVITY, AD7150_CH2_TIMEOUT },
-};
-
-static int ad7150_read_raw(struct iio_dev *indio_dev,
- struct iio_chan_spec const *chan,
- int *val,
- int *val2,
- long mask)
-{
- int ret;
- struct ad7150_chip_info *chip = iio_priv(indio_dev);
-
- switch (mask) {
- case IIO_CHAN_INFO_RAW:
- ret = i2c_smbus_read_word_data(chip->client,
- ad7150_addresses[chan->channel][0]);
- if (ret < 0)
- return ret;
- *val = swab16(ret);
- return IIO_VAL_INT;
- case IIO_CHAN_INFO_AVERAGE_RAW:
- ret = i2c_smbus_read_word_data(chip->client,
- ad7150_addresses[chan->channel][1]);
- if (ret < 0)
- return ret;
- *val = swab16(ret);
- return IIO_VAL_INT;
- default:
- return -EINVAL;
- }
-}
-
-static int ad7150_read_event_config(struct iio_dev *indio_dev,
- const struct iio_chan_spec *chan, enum iio_event_type type,
- enum iio_event_direction dir)
-{
- int ret;
- u8 threshtype;
- bool adaptive;
- struct ad7150_chip_info *chip = iio_priv(indio_dev);
-
- ret = i2c_smbus_read_byte_data(chip->client, AD7150_CFG);
- if (ret < 0)
- return ret;
-
- threshtype = (ret >> 5) & 0x03;
- adaptive = !!(ret & 0x80);
-
- switch (type) {
- case IIO_EV_TYPE_MAG_ADAPTIVE:
- if (dir == IIO_EV_DIR_RISING)
- return adaptive && (threshtype == 0x1);
- return adaptive && (threshtype == 0x0);
- case IIO_EV_TYPE_THRESH_ADAPTIVE:
- if (dir == IIO_EV_DIR_RISING)
- return adaptive && (threshtype == 0x3);
- return adaptive && (threshtype == 0x2);
- case IIO_EV_TYPE_THRESH:
- if (dir == IIO_EV_DIR_RISING)
- return !adaptive && (threshtype == 0x1);
- return !adaptive && (threshtype == 0x0);
- default:
- break;
- }
- return -EINVAL;
-}
-
-/* lock should be held */
-static int ad7150_write_event_params(struct iio_dev *indio_dev,
- unsigned int chan, enum iio_event_type type,
- enum iio_event_direction dir)
-{
- int ret;
- u16 value;
- u8 sens, timeout;
- struct ad7150_chip_info *chip = iio_priv(indio_dev);
- int rising = (dir == IIO_EV_DIR_RISING);
- u64 event_code;
-
- event_code = IIO_UNMOD_EVENT_CODE(IIO_CAPACITANCE, chan, type, dir);
-
- if (event_code != chip->current_event)
- return 0;
-
- switch (type) {
- /* Note completely different from the adaptive versions */
- case IIO_EV_TYPE_THRESH:
- value = chip->threshold[rising][chan];
- ret = i2c_smbus_write_word_data(chip->client,
- ad7150_addresses[chan][3],
- swab16(value));
- if (ret < 0)
- return ret;
- return 0;
- case IIO_EV_TYPE_MAG_ADAPTIVE:
- sens = chip->mag_sensitivity[rising][chan];
- timeout = chip->mag_timeout[rising][chan];
- break;
- case IIO_EV_TYPE_THRESH_ADAPTIVE:
- sens = chip->thresh_sensitivity[rising][chan];
- timeout = chip->thresh_timeout[rising][chan];
- break;
- default:
- return -EINVAL;
- }
- ret = i2c_smbus_write_byte_data(chip->client,
- ad7150_addresses[chan][4],
- sens);
- if (ret < 0)
- return ret;
-
- ret = i2c_smbus_write_byte_data(chip->client,
- ad7150_addresses[chan][5],
- timeout);
- if (ret < 0)
- return ret;
-
- return 0;
-}
-
-static int ad7150_write_event_config(struct iio_dev *indio_dev,
- const struct iio_chan_spec *chan, enum iio_event_type type,
- enum iio_event_direction dir, int state)
-{
- u8 thresh_type, cfg, adaptive;
- int ret;
- struct ad7150_chip_info *chip = iio_priv(indio_dev);
- int rising = (dir == IIO_EV_DIR_RISING);
- u64 event_code;
-
- /* Something must always be turned on */
- if (state == 0)
- return -EINVAL;
-
- event_code = IIO_UNMOD_EVENT_CODE(chan->type, chan->channel, type, dir);
- if (event_code == chip->current_event)
- return 0;
- mutex_lock(&chip->state_lock);
- ret = i2c_smbus_read_byte_data(chip->client, AD7150_CFG);
- if (ret < 0)
- goto error_ret;
-
- cfg = ret & ~((0x03 << 5) | (0x1 << 7));
-
- switch (type) {
- case IIO_EV_TYPE_MAG_ADAPTIVE:
- adaptive = 1;
- if (rising)
- thresh_type = 0x1;
- else
- thresh_type = 0x0;
- break;
- case IIO_EV_TYPE_THRESH_ADAPTIVE:
- adaptive = 1;
- if (rising)
- thresh_type = 0x3;
- else
- thresh_type = 0x2;
- break;
- case IIO_EV_TYPE_THRESH:
- adaptive = 0;
- if (rising)
- thresh_type = 0x1;
- else
- thresh_type = 0x0;
- break;
- default:
- ret = -EINVAL;
- goto error_ret;
- }
-
- cfg |= (!adaptive << 7) | (thresh_type << 5);
-
- ret = i2c_smbus_write_byte_data(chip->client, AD7150_CFG, cfg);
- if (ret < 0)
- goto error_ret;
-
- chip->current_event = event_code;
-
- /* update control attributes */
- ret = ad7150_write_event_params(indio_dev, chan->channel, type, dir);
-error_ret:
- mutex_unlock(&chip->state_lock);
-
- return 0;
-}
-
-static int ad7150_read_event_value(struct iio_dev *indio_dev,
- const struct iio_chan_spec *chan,
- enum iio_event_type type,
- enum iio_event_direction dir,
- enum iio_event_info info,
- int *val, int *val2)
-{
- struct ad7150_chip_info *chip = iio_priv(indio_dev);
- int rising = (dir == IIO_EV_DIR_RISING);
-
- /* Complex register sharing going on here */
- switch (type) {
- case IIO_EV_TYPE_MAG_ADAPTIVE:
- *val = chip->mag_sensitivity[rising][chan->channel];
- return IIO_VAL_INT;
- case IIO_EV_TYPE_THRESH_ADAPTIVE:
- *val = chip->thresh_sensitivity[rising][chan->channel];
- return IIO_VAL_INT;
- case IIO_EV_TYPE_THRESH:
- *val = chip->threshold[rising][chan->channel];
- return IIO_VAL_INT;
- default:
- return -EINVAL;
- }
-}
-
-static int ad7150_write_event_value(struct iio_dev *indio_dev,
- const struct iio_chan_spec *chan,
- enum iio_event_type type,
- enum iio_event_direction dir,
- enum iio_event_info info,
- int val, int val2)
-{
- int ret;
- struct ad7150_chip_info *chip = iio_priv(indio_dev);
- int rising = (dir == IIO_EV_DIR_RISING);
-
- mutex_lock(&chip->state_lock);
- switch (type) {
- case IIO_EV_TYPE_MAG_ADAPTIVE:
- chip->mag_sensitivity[rising][chan->channel] = val;
- break;
- case IIO_EV_TYPE_THRESH_ADAPTIVE:
- chip->thresh_sensitivity[rising][chan->channel] = val;
- break;
- case IIO_EV_TYPE_THRESH:
- chip->threshold[rising][chan->channel] = val;
- break;
- default:
- ret = -EINVAL;
- goto error_ret;
- }
-
- /* write back if active */
- ret = ad7150_write_event_params(indio_dev, chan->channel, type, dir);
-
-error_ret:
- mutex_unlock(&chip->state_lock);
- return ret;
-}
-
-static ssize_t ad7150_show_timeout(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct ad7150_chip_info *chip = iio_priv(indio_dev);
- struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
- u8 value;
-
- /* use the event code for consistency reasons */
- int chan = IIO_EVENT_CODE_EXTRACT_CHAN(this_attr->address);
- int rising = !!(IIO_EVENT_CODE_EXTRACT_DIR(this_attr->address)
- == IIO_EV_DIR_RISING);
-
- switch (IIO_EVENT_CODE_EXTRACT_TYPE(this_attr->address)) {
- case IIO_EV_TYPE_MAG_ADAPTIVE:
- value = chip->mag_timeout[rising][chan];
- break;
- case IIO_EV_TYPE_THRESH_ADAPTIVE:
- value = chip->thresh_timeout[rising][chan];
- break;
- default:
- return -EINVAL;
- }
-
- return sprintf(buf, "%d\n", value);
-}
-
-static ssize_t ad7150_store_timeout(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t len)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct ad7150_chip_info *chip = iio_priv(indio_dev);
- struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
- int chan = IIO_EVENT_CODE_EXTRACT_CHAN(this_attr->address);
- enum iio_event_direction dir;
- enum iio_event_type type;
- int rising;
- u8 data;
- int ret;
-
- type = IIO_EVENT_CODE_EXTRACT_TYPE(this_attr->address);
- dir = IIO_EVENT_CODE_EXTRACT_DIR(this_attr->address);
- rising = (dir == IIO_EV_DIR_RISING);
-
- ret = kstrtou8(buf, 10, &data);
- if (ret < 0)
- return ret;
-
- mutex_lock(&chip->state_lock);
- switch (type) {
- case IIO_EV_TYPE_MAG_ADAPTIVE:
- chip->mag_timeout[rising][chan] = data;
- break;
- case IIO_EV_TYPE_THRESH_ADAPTIVE:
- chip->thresh_timeout[rising][chan] = data;
- break;
- default:
- ret = -EINVAL;
- goto error_ret;
- }
-
- ret = ad7150_write_event_params(indio_dev, chan, type, dir);
-error_ret:
- mutex_unlock(&chip->state_lock);
-
- if (ret < 0)
- return ret;
-
- return len;
-}
-
-#define AD7150_TIMEOUT(chan, type, dir, ev_type, ev_dir) \
- IIO_DEVICE_ATTR(in_capacitance##chan##_##type##_##dir##_timeout, \
- S_IRUGO | S_IWUSR, \
- &ad7150_show_timeout, \
- &ad7150_store_timeout, \
- IIO_UNMOD_EVENT_CODE(IIO_CAPACITANCE, \
- chan, \
- IIO_EV_TYPE_##ev_type, \
- IIO_EV_DIR_##ev_dir))
-static AD7150_TIMEOUT(0, mag_adaptive, rising, MAG_ADAPTIVE, RISING);
-static AD7150_TIMEOUT(0, mag_adaptive, falling, MAG_ADAPTIVE, FALLING);
-static AD7150_TIMEOUT(1, mag_adaptive, rising, MAG_ADAPTIVE, RISING);
-static AD7150_TIMEOUT(1, mag_adaptive, falling, MAG_ADAPTIVE, FALLING);
-static AD7150_TIMEOUT(0, thresh_adaptive, rising, THRESH_ADAPTIVE, RISING);
-static AD7150_TIMEOUT(0, thresh_adaptive, falling, THRESH_ADAPTIVE, FALLING);
-static AD7150_TIMEOUT(1, thresh_adaptive, rising, THRESH_ADAPTIVE, RISING);
-static AD7150_TIMEOUT(1, thresh_adaptive, falling, THRESH_ADAPTIVE, FALLING);
-
-static const struct iio_event_spec ad7150_events[] = {
- {
- .type = IIO_EV_TYPE_THRESH,
- .dir = IIO_EV_DIR_RISING,
- .mask_separate = BIT(IIO_EV_INFO_VALUE) |
- BIT(IIO_EV_INFO_ENABLE),
- }, {
- .type = IIO_EV_TYPE_THRESH,
- .dir = IIO_EV_DIR_FALLING,
- .mask_separate = BIT(IIO_EV_INFO_VALUE) |
- BIT(IIO_EV_INFO_ENABLE),
- }, {
- .type = IIO_EV_TYPE_THRESH_ADAPTIVE,
- .dir = IIO_EV_DIR_RISING,
- .mask_separate = BIT(IIO_EV_INFO_VALUE) |
- BIT(IIO_EV_INFO_ENABLE),
- }, {
- .type = IIO_EV_TYPE_THRESH_ADAPTIVE,
- .dir = IIO_EV_DIR_FALLING,
- .mask_separate = BIT(IIO_EV_INFO_VALUE) |
- BIT(IIO_EV_INFO_ENABLE),
- }, {
- .type = IIO_EV_TYPE_MAG_ADAPTIVE,
- .dir = IIO_EV_DIR_RISING,
- .mask_separate = BIT(IIO_EV_INFO_VALUE) |
- BIT(IIO_EV_INFO_ENABLE),
- }, {
- .type = IIO_EV_TYPE_MAG_ADAPTIVE,
- .dir = IIO_EV_DIR_FALLING,
- .mask_separate = BIT(IIO_EV_INFO_VALUE) |
- BIT(IIO_EV_INFO_ENABLE),
- },
-};
-
-static const struct iio_chan_spec ad7150_channels[] = {
- {
- .type = IIO_CAPACITANCE,
- .indexed = 1,
- .channel = 0,
- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
- BIT(IIO_CHAN_INFO_AVERAGE_RAW),
- .event_spec = ad7150_events,
- .num_event_specs = ARRAY_SIZE(ad7150_events),
- }, {
- .type = IIO_CAPACITANCE,
- .indexed = 1,
- .channel = 1,
- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
- BIT(IIO_CHAN_INFO_AVERAGE_RAW),
- .event_spec = ad7150_events,
- .num_event_specs = ARRAY_SIZE(ad7150_events),
- },
-};
-
-/*
- * threshold events
- */
-
-static irqreturn_t ad7150_event_handler(int irq, void *private)
-{
- struct iio_dev *indio_dev = private;
- struct ad7150_chip_info *chip = iio_priv(indio_dev);
- u8 int_status;
- s64 timestamp = iio_get_time_ns();
- int ret;
-
- ret = i2c_smbus_read_byte_data(chip->client, AD7150_STATUS);
- if (ret < 0)
- return IRQ_HANDLED;
-
- int_status = ret;
-
- if ((int_status & AD7150_STATUS_OUT1) &&
- !(chip->old_state & AD7150_STATUS_OUT1))
- iio_push_event(indio_dev,
- IIO_UNMOD_EVENT_CODE(IIO_CAPACITANCE,
- 0,
- IIO_EV_TYPE_THRESH,
- IIO_EV_DIR_RISING),
- timestamp);
- else if ((!(int_status & AD7150_STATUS_OUT1)) &&
- (chip->old_state & AD7150_STATUS_OUT1))
- iio_push_event(indio_dev,
- IIO_UNMOD_EVENT_CODE(IIO_CAPACITANCE,
- 0,
- IIO_EV_TYPE_THRESH,
- IIO_EV_DIR_FALLING),
- timestamp);
-
- if ((int_status & AD7150_STATUS_OUT2) &&
- !(chip->old_state & AD7150_STATUS_OUT2))
- iio_push_event(indio_dev,
- IIO_UNMOD_EVENT_CODE(IIO_CAPACITANCE,
- 1,
- IIO_EV_TYPE_THRESH,
- IIO_EV_DIR_RISING),
- timestamp);
- else if ((!(int_status & AD7150_STATUS_OUT2)) &&
- (chip->old_state & AD7150_STATUS_OUT2))
- iio_push_event(indio_dev,
- IIO_UNMOD_EVENT_CODE(IIO_CAPACITANCE,
- 1,
- IIO_EV_TYPE_THRESH,
- IIO_EV_DIR_FALLING),
- timestamp);
- /* store the status to avoid repushing same events */
- chip->old_state = int_status;
-
- return IRQ_HANDLED;
-}
-
-/* Timeouts not currently handled by core */
-static struct attribute *ad7150_event_attributes[] = {
- &iio_dev_attr_in_capacitance0_mag_adaptive_rising_timeout
- .dev_attr.attr,
- &iio_dev_attr_in_capacitance0_mag_adaptive_falling_timeout
- .dev_attr.attr,
- &iio_dev_attr_in_capacitance1_mag_adaptive_rising_timeout
- .dev_attr.attr,
- &iio_dev_attr_in_capacitance1_mag_adaptive_falling_timeout
- .dev_attr.attr,
- &iio_dev_attr_in_capacitance0_thresh_adaptive_rising_timeout
- .dev_attr.attr,
- &iio_dev_attr_in_capacitance0_thresh_adaptive_falling_timeout
- .dev_attr.attr,
- &iio_dev_attr_in_capacitance1_thresh_adaptive_rising_timeout
- .dev_attr.attr,
- &iio_dev_attr_in_capacitance1_thresh_adaptive_falling_timeout
- .dev_attr.attr,
- NULL,
-};
-
-static struct attribute_group ad7150_event_attribute_group = {
- .attrs = ad7150_event_attributes,
- .name = "events",
-};
-
-static const struct iio_info ad7150_info = {
- .event_attrs = &ad7150_event_attribute_group,
- .driver_module = THIS_MODULE,
- .read_raw = &ad7150_read_raw,
- .read_event_config = &ad7150_read_event_config,
- .write_event_config = &ad7150_write_event_config,
- .read_event_value = &ad7150_read_event_value,
- .write_event_value = &ad7150_write_event_value,
-};
-
-/*
- * device probe and remove
- */
-
-static int ad7150_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
-{
- int ret;
- struct ad7150_chip_info *chip;
- struct iio_dev *indio_dev;
-
- indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*chip));
- if (!indio_dev)
- return -ENOMEM;
- chip = iio_priv(indio_dev);
- mutex_init(&chip->state_lock);
- /* this is only used for device removal purposes */
- i2c_set_clientdata(client, indio_dev);
-
- chip->client = client;
-
- indio_dev->name = id->name;
- indio_dev->channels = ad7150_channels;
- indio_dev->num_channels = ARRAY_SIZE(ad7150_channels);
- /* Establish that the iio_dev is a child of the i2c device */
- indio_dev->dev.parent = &client->dev;
-
- indio_dev->info = &ad7150_info;
-
- indio_dev->modes = INDIO_DIRECT_MODE;
-
- if (client->irq) {
- ret = devm_request_threaded_irq(&client->dev, client->irq,
- NULL,
- &ad7150_event_handler,
- IRQF_TRIGGER_RISING |
- IRQF_TRIGGER_FALLING |
- IRQF_ONESHOT,
- "ad7150_irq1",
- indio_dev);
- if (ret)
- return ret;
- }
-
- if (client->dev.platform_data) {
- ret = devm_request_threaded_irq(&client->dev, *(unsigned int *)
- client->dev.platform_data,
- NULL,
- &ad7150_event_handler,
- IRQF_TRIGGER_RISING |
- IRQF_TRIGGER_FALLING |
- IRQF_ONESHOT,
- "ad7150_irq2",
- indio_dev);
- if (ret)
- return ret;
- }
-
- ret = iio_device_register(indio_dev);
- if (ret)
- return ret;
-
- dev_info(&client->dev, "%s capacitive sensor registered,irq: %d\n",
- id->name, client->irq);
-
- return 0;
-}
-
-static int ad7150_remove(struct i2c_client *client)
-{
- struct iio_dev *indio_dev = i2c_get_clientdata(client);
-
- iio_device_unregister(indio_dev);
-
- return 0;
-}
-
-static const struct i2c_device_id ad7150_id[] = {
- { "ad7150", 0 },
- { "ad7151", 0 },
- { "ad7156", 0 },
- {}
-};
-
-MODULE_DEVICE_TABLE(i2c, ad7150_id);
-
-static struct i2c_driver ad7150_driver = {
- .driver = {
- .name = "ad7150",
- },
- .probe = ad7150_probe,
- .remove = ad7150_remove,
- .id_table = ad7150_id,
-};
-module_i2c_driver(ad7150_driver);
-
-MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>");
-MODULE_DESCRIPTION("Analog Devices AD7150/1/6 capacitive sensor driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/iio/cdc/ad7152.c b/drivers/staging/iio/cdc/ad7152.c
deleted file mode 100644
index 87110d940e9a..000000000000
--- a/drivers/staging/iio/cdc/ad7152.c
+++ /dev/null
@@ -1,543 +0,0 @@
-/*
- * AD7152 capacitive sensor driver supporting AD7152/3
- *
- * Copyright 2010-2011a Analog Devices Inc.
- *
- * Licensed under the GPL-2 or later.
- */
-
-#include <linux/interrupt.h>
-#include <linux/device.h>
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/sysfs.h>
-#include <linux/i2c.h>
-#include <linux/module.h>
-#include <linux/delay.h>
-
-#include <linux/iio/iio.h>
-#include <linux/iio/sysfs.h>
-
-/*
- * TODO: Check compliance of calibbias with abi (units)
- */
-/*
- * AD7152 registers definition
- */
-
-#define AD7152_REG_STATUS 0
-#define AD7152_REG_CH1_DATA_HIGH 1
-#define AD7152_REG_CH2_DATA_HIGH 3
-#define AD7152_REG_CH1_OFFS_HIGH 5
-#define AD7152_REG_CH2_OFFS_HIGH 7
-#define AD7152_REG_CH1_GAIN_HIGH 9
-#define AD7152_REG_CH1_SETUP 11
-#define AD7152_REG_CH2_GAIN_HIGH 12
-#define AD7152_REG_CH2_SETUP 14
-#define AD7152_REG_CFG 15
-#define AD7152_REG_RESEVERD 16
-#define AD7152_REG_CAPDAC_POS 17
-#define AD7152_REG_CAPDAC_NEG 18
-#define AD7152_REG_CFG2 26
-
-/* Status Register Bit Designations (AD7152_REG_STATUS) */
-#define AD7152_STATUS_RDY1 (1 << 0)
-#define AD7152_STATUS_RDY2 (1 << 1)
-#define AD7152_STATUS_C1C2 (1 << 2)
-#define AD7152_STATUS_PWDN (1 << 7)
-
-/* Setup Register Bit Designations (AD7152_REG_CHx_SETUP) */
-#define AD7152_SETUP_CAPDIFF (1 << 5)
-#define AD7152_SETUP_RANGE_2pF (0 << 6)
-#define AD7152_SETUP_RANGE_0_5pF (1 << 6)
-#define AD7152_SETUP_RANGE_1pF (2 << 6)
-#define AD7152_SETUP_RANGE_4pF (3 << 6)
-#define AD7152_SETUP_RANGE(x) ((x) << 6)
-
-/* Config Register Bit Designations (AD7152_REG_CFG) */
-#define AD7152_CONF_CH2EN (1 << 3)
-#define AD7152_CONF_CH1EN (1 << 4)
-#define AD7152_CONF_MODE_IDLE (0 << 0)
-#define AD7152_CONF_MODE_CONT_CONV (1 << 0)
-#define AD7152_CONF_MODE_SINGLE_CONV (2 << 0)
-#define AD7152_CONF_MODE_OFFS_CAL (5 << 0)
-#define AD7152_CONF_MODE_GAIN_CAL (6 << 0)
-
-/* Capdac Register Bit Designations (AD7152_REG_CAPDAC_XXX) */
-#define AD7152_CAPDAC_DACEN (1 << 7)
-#define AD7152_CAPDAC_DACP(x) ((x) & 0x1F)
-
-/* CFG2 Register Bit Designations (AD7152_REG_CFG2) */
-#define AD7152_CFG2_OSR(x) (((x) & 0x3) << 4)
-
-enum {
- AD7152_DATA,
- AD7152_OFFS,
- AD7152_GAIN,
- AD7152_SETUP
-};
-
-/*
- * struct ad7152_chip_info - chip specific information
- */
-
-struct ad7152_chip_info {
- struct i2c_client *client;
- /*
- * Capacitive channel digital filter setup;
- * conversion time/update rate setup per channel
- */
- u8 filter_rate_setup;
- u8 setup[2];
-};
-
-static inline ssize_t ad7152_start_calib(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t len,
- u8 regval)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct ad7152_chip_info *chip = iio_priv(indio_dev);
- struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
- bool doit;
- int ret, timeout = 10;
-
- ret = strtobool(buf, &doit);
- if (ret < 0)
- return ret;
-
- if (!doit)
- return 0;
-
- if (this_attr->address == 0)
- regval |= AD7152_CONF_CH1EN;
- else
- regval |= AD7152_CONF_CH2EN;
-
- mutex_lock(&indio_dev->mlock);
- ret = i2c_smbus_write_byte_data(chip->client, AD7152_REG_CFG, regval);
- if (ret < 0) {
- mutex_unlock(&indio_dev->mlock);
- return ret;
- }
-
- do {
- mdelay(20);
- ret = i2c_smbus_read_byte_data(chip->client, AD7152_REG_CFG);
- if (ret < 0) {
- mutex_unlock(&indio_dev->mlock);
- return ret;
- }
- } while ((ret == regval) && timeout--);
-
- mutex_unlock(&indio_dev->mlock);
- return len;
-}
-static ssize_t ad7152_start_offset_calib(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t len)
-{
- return ad7152_start_calib(dev, attr, buf, len,
- AD7152_CONF_MODE_OFFS_CAL);
-}
-static ssize_t ad7152_start_gain_calib(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t len)
-{
- return ad7152_start_calib(dev, attr, buf, len,
- AD7152_CONF_MODE_GAIN_CAL);
-}
-
-static IIO_DEVICE_ATTR(in_capacitance0_calibbias_calibration,
- S_IWUSR, NULL, ad7152_start_offset_calib, 0);
-static IIO_DEVICE_ATTR(in_capacitance1_calibbias_calibration,
- S_IWUSR, NULL, ad7152_start_offset_calib, 1);
-static IIO_DEVICE_ATTR(in_capacitance0_calibscale_calibration,
- S_IWUSR, NULL, ad7152_start_gain_calib, 0);
-static IIO_DEVICE_ATTR(in_capacitance1_calibscale_calibration,
- S_IWUSR, NULL, ad7152_start_gain_calib, 1);
-
-/* Values are Update Rate (Hz), Conversion Time (ms) + 1*/
-static const unsigned char ad7152_filter_rate_table[][2] = {
- {200, 5 + 1}, {50, 20 + 1}, {20, 50 + 1}, {17, 60 + 1},
-};
-
-static ssize_t ad7152_show_filter_rate_setup(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct ad7152_chip_info *chip = iio_priv(indio_dev);
-
- return sprintf(buf, "%d\n",
- ad7152_filter_rate_table[chip->filter_rate_setup][0]);
-}
-
-static ssize_t ad7152_store_filter_rate_setup(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t len)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct ad7152_chip_info *chip = iio_priv(indio_dev);
- u8 data;
- int ret, i;
-
- ret = kstrtou8(buf, 10, &data);
- if (ret < 0)
- return ret;
-
- for (i = 0; i < ARRAY_SIZE(ad7152_filter_rate_table); i++)
- if (data >= ad7152_filter_rate_table[i][0])
- break;
-
- if (i >= ARRAY_SIZE(ad7152_filter_rate_table))
- i = ARRAY_SIZE(ad7152_filter_rate_table) - 1;
-
- mutex_lock(&indio_dev->mlock);
- ret = i2c_smbus_write_byte_data(chip->client,
- AD7152_REG_CFG2, AD7152_CFG2_OSR(i));
- if (ret < 0) {
- mutex_unlock(&indio_dev->mlock);
- return ret;
- }
-
- chip->filter_rate_setup = i;
- mutex_unlock(&indio_dev->mlock);
-
- return len;
-}
-
-static IIO_DEV_ATTR_SAMP_FREQ(S_IRUGO | S_IWUSR,
- ad7152_show_filter_rate_setup,
- ad7152_store_filter_rate_setup);
-
-static IIO_CONST_ATTR_SAMP_FREQ_AVAIL("200 50 20 17");
-
-static IIO_CONST_ATTR(in_capacitance_scale_available,
- "0.000061050 0.000030525 0.000015263 0.000007631");
-
-static struct attribute *ad7152_attributes[] = {
- &iio_dev_attr_sampling_frequency.dev_attr.attr,
- &iio_dev_attr_in_capacitance0_calibbias_calibration.dev_attr.attr,
- &iio_dev_attr_in_capacitance1_calibbias_calibration.dev_attr.attr,
- &iio_dev_attr_in_capacitance0_calibscale_calibration.dev_attr.attr,
- &iio_dev_attr_in_capacitance1_calibscale_calibration.dev_attr.attr,
- &iio_const_attr_in_capacitance_scale_available.dev_attr.attr,
- &iio_const_attr_sampling_frequency_available.dev_attr.attr,
- NULL,
-};
-
-static const struct attribute_group ad7152_attribute_group = {
- .attrs = ad7152_attributes,
-};
-
-static const u8 ad7152_addresses[][4] = {
- { AD7152_REG_CH1_DATA_HIGH, AD7152_REG_CH1_OFFS_HIGH,
- AD7152_REG_CH1_GAIN_HIGH, AD7152_REG_CH1_SETUP },
- { AD7152_REG_CH2_DATA_HIGH, AD7152_REG_CH2_OFFS_HIGH,
- AD7152_REG_CH2_GAIN_HIGH, AD7152_REG_CH2_SETUP },
-};
-
-/* Values are nano relative to pf base. */
-static const int ad7152_scale_table[] = {
- 30525, 7631, 15263, 61050
-};
-
-static int ad7152_write_raw(struct iio_dev *indio_dev,
- struct iio_chan_spec const *chan,
- int val,
- int val2,
- long mask)
-{
- struct ad7152_chip_info *chip = iio_priv(indio_dev);
- int ret, i;
-
- mutex_lock(&indio_dev->mlock);
-
- switch (mask) {
- case IIO_CHAN_INFO_CALIBSCALE:
- if (val != 1) {
- ret = -EINVAL;
- goto out;
- }
-
- val = (val2 * 1024) / 15625;
-
- ret = i2c_smbus_write_word_data(chip->client,
- ad7152_addresses[chan->channel][AD7152_GAIN],
- swab16(val));
- if (ret < 0)
- goto out;
-
- ret = 0;
- break;
-
- case IIO_CHAN_INFO_CALIBBIAS:
- if ((val < 0) | (val > 0xFFFF)) {
- ret = -EINVAL;
- goto out;
- }
- ret = i2c_smbus_write_word_data(chip->client,
- ad7152_addresses[chan->channel][AD7152_OFFS],
- swab16(val));
- if (ret < 0)
- goto out;
-
- ret = 0;
- break;
- case IIO_CHAN_INFO_SCALE:
- if (val != 0) {
- ret = -EINVAL;
- goto out;
- }
- for (i = 0; i < ARRAY_SIZE(ad7152_scale_table); i++)
- if (val2 == ad7152_scale_table[i])
- break;
-
- chip->setup[chan->channel] &= ~AD7152_SETUP_RANGE_4pF;
- chip->setup[chan->channel] |= AD7152_SETUP_RANGE(i);
-
- ret = i2c_smbus_write_byte_data(chip->client,
- ad7152_addresses[chan->channel][AD7152_SETUP],
- chip->setup[chan->channel]);
- if (ret < 0)
- goto out;
-
- ret = 0;
- break;
- default:
- ret = -EINVAL;
- }
-
-out:
- mutex_unlock(&indio_dev->mlock);
- return ret;
-}
-static int ad7152_read_raw(struct iio_dev *indio_dev,
- struct iio_chan_spec const *chan,
- int *val, int *val2,
- long mask)
-{
- struct ad7152_chip_info *chip = iio_priv(indio_dev);
- int ret;
- u8 regval = 0;
-
- mutex_lock(&indio_dev->mlock);
-
- switch (mask) {
- case IIO_CHAN_INFO_RAW:
- /* First set whether in differential mode */
-
- regval = chip->setup[chan->channel];
-
- if (chan->differential)
- chip->setup[chan->channel] |= AD7152_SETUP_CAPDIFF;
- else
- chip->setup[chan->channel] &= ~AD7152_SETUP_CAPDIFF;
-
- if (regval != chip->setup[chan->channel]) {
- ret = i2c_smbus_write_byte_data(chip->client,
- ad7152_addresses[chan->channel][AD7152_SETUP],
- chip->setup[chan->channel]);
- if (ret < 0)
- goto out;
- }
- /* Make sure the channel is enabled */
- if (chan->channel == 0)
- regval = AD7152_CONF_CH1EN;
- else
- regval = AD7152_CONF_CH2EN;
-
- /* Trigger a single read */
- regval |= AD7152_CONF_MODE_SINGLE_CONV;
- ret = i2c_smbus_write_byte_data(chip->client, AD7152_REG_CFG,
- regval);
- if (ret < 0)
- goto out;
-
- msleep(ad7152_filter_rate_table[chip->filter_rate_setup][1]);
- /* Now read the actual register */
- ret = i2c_smbus_read_word_data(chip->client,
- ad7152_addresses[chan->channel][AD7152_DATA]);
- if (ret < 0)
- goto out;
- *val = swab16(ret);
-
- if (chan->differential)
- *val -= 0x8000;
-
- ret = IIO_VAL_INT;
- break;
- case IIO_CHAN_INFO_CALIBSCALE:
-
- ret = i2c_smbus_read_word_data(chip->client,
- ad7152_addresses[chan->channel][AD7152_GAIN]);
- if (ret < 0)
- goto out;
- /* 1 + gain_val / 2^16 */
- *val = 1;
- *val2 = (15625 * swab16(ret)) / 1024;
-
- ret = IIO_VAL_INT_PLUS_MICRO;
- break;
- case IIO_CHAN_INFO_CALIBBIAS:
- ret = i2c_smbus_read_word_data(chip->client,
- ad7152_addresses[chan->channel][AD7152_OFFS]);
- if (ret < 0)
- goto out;
- *val = swab16(ret);
-
- ret = IIO_VAL_INT;
- break;
- case IIO_CHAN_INFO_SCALE:
- ret = i2c_smbus_read_byte_data(chip->client,
- ad7152_addresses[chan->channel][AD7152_SETUP]);
- if (ret < 0)
- goto out;
- *val = 0;
- *val2 = ad7152_scale_table[ret >> 6];
-
- ret = IIO_VAL_INT_PLUS_NANO;
- break;
- default:
- ret = -EINVAL;
- }
-out:
- mutex_unlock(&indio_dev->mlock);
- return ret;
-}
-
-static int ad7152_write_raw_get_fmt(struct iio_dev *indio_dev,
- struct iio_chan_spec const *chan,
- long mask)
-{
- switch (mask) {
- case IIO_CHAN_INFO_SCALE:
- return IIO_VAL_INT_PLUS_NANO;
- default:
- return IIO_VAL_INT_PLUS_MICRO;
- }
-}
-
-static const struct iio_info ad7152_info = {
- .attrs = &ad7152_attribute_group,
- .read_raw = &ad7152_read_raw,
- .write_raw = &ad7152_write_raw,
- .write_raw_get_fmt = &ad7152_write_raw_get_fmt,
- .driver_module = THIS_MODULE,
-};
-
-static const struct iio_chan_spec ad7152_channels[] = {
- {
- .type = IIO_CAPACITANCE,
- .indexed = 1,
- .channel = 0,
- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
- BIT(IIO_CHAN_INFO_CALIBSCALE) |
- BIT(IIO_CHAN_INFO_CALIBBIAS) |
- BIT(IIO_CHAN_INFO_SCALE),
- }, {
- .type = IIO_CAPACITANCE,
- .differential = 1,
- .indexed = 1,
- .channel = 0,
- .channel2 = 2,
- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
- BIT(IIO_CHAN_INFO_CALIBSCALE) |
- BIT(IIO_CHAN_INFO_CALIBBIAS) |
- BIT(IIO_CHAN_INFO_SCALE),
- }, {
- .type = IIO_CAPACITANCE,
- .indexed = 1,
- .channel = 1,
- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
- BIT(IIO_CHAN_INFO_CALIBSCALE) |
- BIT(IIO_CHAN_INFO_CALIBBIAS) |
- BIT(IIO_CHAN_INFO_SCALE),
- }, {
- .type = IIO_CAPACITANCE,
- .differential = 1,
- .indexed = 1,
- .channel = 1,
- .channel2 = 3,
- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
- BIT(IIO_CHAN_INFO_CALIBSCALE) |
- BIT(IIO_CHAN_INFO_CALIBBIAS) |
- BIT(IIO_CHAN_INFO_SCALE),
- }
-};
-/*
- * device probe and remove
- */
-
-static int ad7152_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
-{
- int ret = 0;
- struct ad7152_chip_info *chip;
- struct iio_dev *indio_dev;
-
- indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*chip));
- if (!indio_dev)
- return -ENOMEM;
- chip = iio_priv(indio_dev);
- /* this is only used for device removal purposes */
- i2c_set_clientdata(client, indio_dev);
-
- chip->client = client;
-
- /* Establish that the iio_dev is a child of the i2c device */
- indio_dev->name = id->name;
- indio_dev->dev.parent = &client->dev;
- indio_dev->info = &ad7152_info;
- indio_dev->channels = ad7152_channels;
- if (id->driver_data == 0)
- indio_dev->num_channels = ARRAY_SIZE(ad7152_channels);
- else
- indio_dev->num_channels = 2;
- indio_dev->num_channels = ARRAY_SIZE(ad7152_channels);
- indio_dev->modes = INDIO_DIRECT_MODE;
-
- ret = iio_device_register(indio_dev);
- if (ret)
- return ret;
-
- dev_err(&client->dev, "%s capacitive sensor registered\n", id->name);
-
- return 0;
-}
-
-static int ad7152_remove(struct i2c_client *client)
-{
- struct iio_dev *indio_dev = i2c_get_clientdata(client);
-
- iio_device_unregister(indio_dev);
-
- return 0;
-}
-
-static const struct i2c_device_id ad7152_id[] = {
- { "ad7152", 0 },
- { "ad7153", 1 },
- {}
-};
-
-MODULE_DEVICE_TABLE(i2c, ad7152_id);
-
-static struct i2c_driver ad7152_driver = {
- .driver = {
- .name = KBUILD_MODNAME,
- },
- .probe = ad7152_probe,
- .remove = ad7152_remove,
- .id_table = ad7152_id,
-};
-module_i2c_driver(ad7152_driver);
-
-MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>");
-MODULE_DESCRIPTION("Analog Devices AD7152/3 capacitive sensor driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/iio/cdc/ad7746.c b/drivers/staging/iio/cdc/ad7746.c
deleted file mode 100644
index 10fa372a4182..000000000000
--- a/drivers/staging/iio/cdc/ad7746.c
+++ /dev/null
@@ -1,791 +0,0 @@
-/*
- * AD7746 capacitive sensor driver supporting AD7745, AD7746 and AD7747
- *
- * Copyright 2011 Analog Devices Inc.
- *
- * Licensed under the GPL-2.
- */
-
-#include <linux/interrupt.h>
-#include <linux/device.h>
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/sysfs.h>
-#include <linux/i2c.h>
-#include <linux/delay.h>
-#include <linux/module.h>
-#include <linux/stat.h>
-
-#include <linux/iio/iio.h>
-#include <linux/iio/sysfs.h>
-
-#include "ad7746.h"
-
-/*
- * AD7746 Register Definition
- */
-
-#define AD7746_REG_STATUS 0
-#define AD7746_REG_CAP_DATA_HIGH 1
-#define AD7746_REG_CAP_DATA_MID 2
-#define AD7746_REG_CAP_DATA_LOW 3
-#define AD7746_REG_VT_DATA_HIGH 4
-#define AD7746_REG_VT_DATA_MID 5
-#define AD7746_REG_VT_DATA_LOW 6
-#define AD7746_REG_CAP_SETUP 7
-#define AD7746_REG_VT_SETUP 8
-#define AD7746_REG_EXC_SETUP 9
-#define AD7746_REG_CFG 10
-#define AD7746_REG_CAPDACA 11
-#define AD7746_REG_CAPDACB 12
-#define AD7746_REG_CAP_OFFH 13
-#define AD7746_REG_CAP_OFFL 14
-#define AD7746_REG_CAP_GAINH 15
-#define AD7746_REG_CAP_GAINL 16
-#define AD7746_REG_VOLT_GAINH 17
-#define AD7746_REG_VOLT_GAINL 18
-
-/* Status Register Bit Designations (AD7746_REG_STATUS) */
-#define AD7746_STATUS_EXCERR BIT(3)
-#define AD7746_STATUS_RDY BIT(2)
-#define AD7746_STATUS_RDYVT BIT(1)
-#define AD7746_STATUS_RDYCAP BIT(0)
-
-/* Capacitive Channel Setup Register Bit Designations (AD7746_REG_CAP_SETUP) */
-#define AD7746_CAPSETUP_CAPEN (1 << 7)
-#define AD7746_CAPSETUP_CIN2 (1 << 6) /* AD7746 only */
-#define AD7746_CAPSETUP_CAPDIFF (1 << 5)
-#define AD7746_CAPSETUP_CACHOP (1 << 0)
-
-/* Voltage/Temperature Setup Register Bit Designations (AD7746_REG_VT_SETUP) */
-#define AD7746_VTSETUP_VTEN (1 << 7)
-#define AD7746_VTSETUP_VTMD_INT_TEMP (0 << 5)
-#define AD7746_VTSETUP_VTMD_EXT_TEMP (1 << 5)
-#define AD7746_VTSETUP_VTMD_VDD_MON (2 << 5)
-#define AD7746_VTSETUP_VTMD_EXT_VIN (3 << 5)
-#define AD7746_VTSETUP_EXTREF (1 << 4)
-#define AD7746_VTSETUP_VTSHORT (1 << 1)
-#define AD7746_VTSETUP_VTCHOP (1 << 0)
-
-/* Excitation Setup Register Bit Designations (AD7746_REG_EXC_SETUP) */
-#define AD7746_EXCSETUP_CLKCTRL (1 << 7)
-#define AD7746_EXCSETUP_EXCON (1 << 6)
-#define AD7746_EXCSETUP_EXCB (1 << 5)
-#define AD7746_EXCSETUP_NEXCB (1 << 4)
-#define AD7746_EXCSETUP_EXCA (1 << 3)
-#define AD7746_EXCSETUP_NEXCA (1 << 2)
-#define AD7746_EXCSETUP_EXCLVL(x) (((x) & 0x3) << 0)
-
-/* Config Register Bit Designations (AD7746_REG_CFG) */
-#define AD7746_CONF_VTFS(x) ((x) << 6)
-#define AD7746_CONF_CAPFS(x) ((x) << 3)
-#define AD7746_CONF_MODE_IDLE (0 << 0)
-#define AD7746_CONF_MODE_CONT_CONV (1 << 0)
-#define AD7746_CONF_MODE_SINGLE_CONV (2 << 0)
-#define AD7746_CONF_MODE_PWRDN (3 << 0)
-#define AD7746_CONF_MODE_OFFS_CAL (5 << 0)
-#define AD7746_CONF_MODE_GAIN_CAL (6 << 0)
-
-/* CAPDAC Register Bit Designations (AD7746_REG_CAPDACx) */
-#define AD7746_CAPDAC_DACEN (1 << 7)
-#define AD7746_CAPDAC_DACP(x) ((x) & 0x7F)
-
-/*
- * struct ad7746_chip_info - chip specific information
- */
-
-struct ad7746_chip_info {
- struct i2c_client *client;
- /*
- * Capacitive channel digital filter setup;
- * conversion time/update rate setup per channel
- */
- u8 config;
- u8 cap_setup;
- u8 vt_setup;
- u8 capdac[2][2];
- s8 capdac_set;
-
- union {
- __be32 d32;
- u8 d8[4];
- } data ____cacheline_aligned;
-};
-
-enum ad7746_chan {
- VIN,
- VIN_VDD,
- TEMP_INT,
- TEMP_EXT,
- CIN1,
- CIN1_DIFF,
- CIN2,
- CIN2_DIFF,
-};
-
-static const struct iio_chan_spec ad7746_channels[] = {
- [VIN] = {
- .type = IIO_VOLTAGE,
- .indexed = 1,
- .channel = 0,
- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
- .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
- .address = AD7746_REG_VT_DATA_HIGH << 8 |
- AD7746_VTSETUP_VTMD_EXT_VIN,
- },
- [VIN_VDD] = {
- .type = IIO_VOLTAGE,
- .indexed = 1,
- .channel = 1,
- .extend_name = "supply",
- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
- .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
- .address = AD7746_REG_VT_DATA_HIGH << 8 |
- AD7746_VTSETUP_VTMD_VDD_MON,
- },
- [TEMP_INT] = {
- .type = IIO_TEMP,
- .indexed = 1,
- .channel = 0,
- .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED),
- .address = AD7746_REG_VT_DATA_HIGH << 8 |
- AD7746_VTSETUP_VTMD_INT_TEMP,
- },
- [TEMP_EXT] = {
- .type = IIO_TEMP,
- .indexed = 1,
- .channel = 1,
- .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED),
- .address = AD7746_REG_VT_DATA_HIGH << 8 |
- AD7746_VTSETUP_VTMD_EXT_TEMP,
- },
- [CIN1] = {
- .type = IIO_CAPACITANCE,
- .indexed = 1,
- .channel = 0,
- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
- BIT(IIO_CHAN_INFO_CALIBSCALE) | BIT(IIO_CHAN_INFO_OFFSET),
- .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_CALIBBIAS) |
- BIT(IIO_CHAN_INFO_SCALE),
- .address = AD7746_REG_CAP_DATA_HIGH << 8,
- },
- [CIN1_DIFF] = {
- .type = IIO_CAPACITANCE,
- .differential = 1,
- .indexed = 1,
- .channel = 0,
- .channel2 = 2,
- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
- BIT(IIO_CHAN_INFO_CALIBSCALE) | BIT(IIO_CHAN_INFO_OFFSET),
- .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_CALIBBIAS) |
- BIT(IIO_CHAN_INFO_SCALE),
- .address = AD7746_REG_CAP_DATA_HIGH << 8 |
- AD7746_CAPSETUP_CAPDIFF
- },
- [CIN2] = {
- .type = IIO_CAPACITANCE,
- .indexed = 1,
- .channel = 1,
- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
- BIT(IIO_CHAN_INFO_CALIBSCALE) | BIT(IIO_CHAN_INFO_OFFSET),
- .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_CALIBBIAS) |
- BIT(IIO_CHAN_INFO_SCALE),
- .address = AD7746_REG_CAP_DATA_HIGH << 8 |
- AD7746_CAPSETUP_CIN2,
- },
- [CIN2_DIFF] = {
- .type = IIO_CAPACITANCE,
- .differential = 1,
- .indexed = 1,
- .channel = 1,
- .channel2 = 3,
- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
- BIT(IIO_CHAN_INFO_CALIBSCALE) | BIT(IIO_CHAN_INFO_OFFSET),
- .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_CALIBBIAS) |
- BIT(IIO_CHAN_INFO_SCALE),
- .address = AD7746_REG_CAP_DATA_HIGH << 8 |
- AD7746_CAPSETUP_CAPDIFF | AD7746_CAPSETUP_CIN2,
- }
-};
-
-/* Values are Update Rate (Hz), Conversion Time (ms) + 1*/
-static const unsigned char ad7746_vt_filter_rate_table[][2] = {
- {50, 20 + 1}, {31, 32 + 1}, {16, 62 + 1}, {8, 122 + 1},
-};
-
-static const unsigned char ad7746_cap_filter_rate_table[][2] = {
- {91, 11 + 1}, {84, 12 + 1}, {50, 20 + 1}, {26, 38 + 1},
- {16, 62 + 1}, {13, 77 + 1}, {11, 92 + 1}, {9, 110 + 1},
-};
-
-static int ad7746_select_channel(struct iio_dev *indio_dev,
- struct iio_chan_spec const *chan)
-{
- struct ad7746_chip_info *chip = iio_priv(indio_dev);
- int ret, delay;
- u8 vt_setup, cap_setup;
-
- switch (chan->type) {
- case IIO_CAPACITANCE:
- cap_setup = (chan->address & 0xFF) | AD7746_CAPSETUP_CAPEN;
- vt_setup = chip->vt_setup & ~AD7746_VTSETUP_VTEN;
- delay = ad7746_cap_filter_rate_table[(chip->config >> 3) &
- 0x7][1];
-
- if (chip->capdac_set != chan->channel) {
- ret = i2c_smbus_write_byte_data(chip->client,
- AD7746_REG_CAPDACA,
- chip->capdac[chan->channel][0]);
- if (ret < 0)
- return ret;
- ret = i2c_smbus_write_byte_data(chip->client,
- AD7746_REG_CAPDACB,
- chip->capdac[chan->channel][1]);
- if (ret < 0)
- return ret;
-
- chip->capdac_set = chan->channel;
- }
- break;
- case IIO_VOLTAGE:
- case IIO_TEMP:
- vt_setup = (chan->address & 0xFF) | AD7746_VTSETUP_VTEN;
- cap_setup = chip->cap_setup & ~AD7746_CAPSETUP_CAPEN;
- delay = ad7746_cap_filter_rate_table[(chip->config >> 6) &
- 0x3][1];
- break;
- default:
- return -EINVAL;
- }
-
- if (chip->cap_setup != cap_setup) {
- ret = i2c_smbus_write_byte_data(chip->client,
- AD7746_REG_CAP_SETUP,
- cap_setup);
- if (ret < 0)
- return ret;
-
- chip->cap_setup = cap_setup;
- }
-
- if (chip->vt_setup != vt_setup) {
- ret = i2c_smbus_write_byte_data(chip->client,
- AD7746_REG_VT_SETUP,
- vt_setup);
- if (ret < 0)
- return ret;
-
- chip->vt_setup = vt_setup;
- }
-
- return delay;
-}
-
-static inline ssize_t ad7746_start_calib(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t len,
- u8 regval)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct ad7746_chip_info *chip = iio_priv(indio_dev);
- bool doit;
- int ret, timeout = 10;
-
- ret = strtobool(buf, &doit);
- if (ret < 0)
- return ret;
-
- if (!doit)
- return 0;
-
- mutex_lock(&indio_dev->mlock);
- regval |= chip->config;
- ret = i2c_smbus_write_byte_data(chip->client, AD7746_REG_CFG, regval);
- if (ret < 0) {
- mutex_unlock(&indio_dev->mlock);
- return ret;
- }
-
- do {
- msleep(20);
- ret = i2c_smbus_read_byte_data(chip->client, AD7746_REG_CFG);
- if (ret < 0) {
- mutex_unlock(&indio_dev->mlock);
- return ret;
- }
- } while ((ret == regval) && timeout--);
-
- mutex_unlock(&indio_dev->mlock);
-
- return len;
-}
-
-static ssize_t ad7746_start_offset_calib(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t len)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- int ret = ad7746_select_channel(indio_dev,
- &ad7746_channels[to_iio_dev_attr(attr)->address]);
- if (ret < 0)
- return ret;
-
- return ad7746_start_calib(dev, attr, buf, len,
- AD7746_CONF_MODE_OFFS_CAL);
-}
-
-static ssize_t ad7746_start_gain_calib(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t len)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- int ret = ad7746_select_channel(indio_dev,
- &ad7746_channels[to_iio_dev_attr(attr)->address]);
- if (ret < 0)
- return ret;
-
- return ad7746_start_calib(dev, attr, buf, len,
- AD7746_CONF_MODE_GAIN_CAL);
-}
-
-static IIO_DEVICE_ATTR(in_capacitance0_calibbias_calibration,
- S_IWUSR, NULL, ad7746_start_offset_calib, CIN1);
-static IIO_DEVICE_ATTR(in_capacitance1_calibbias_calibration,
- S_IWUSR, NULL, ad7746_start_offset_calib, CIN2);
-static IIO_DEVICE_ATTR(in_capacitance0_calibscale_calibration,
- S_IWUSR, NULL, ad7746_start_gain_calib, CIN1);
-static IIO_DEVICE_ATTR(in_capacitance1_calibscale_calibration,
- S_IWUSR, NULL, ad7746_start_gain_calib, CIN2);
-static IIO_DEVICE_ATTR(in_voltage0_calibscale_calibration,
- S_IWUSR, NULL, ad7746_start_gain_calib, VIN);
-
-static ssize_t ad7746_show_cap_filter_rate_setup(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct ad7746_chip_info *chip = iio_priv(indio_dev);
-
- return sprintf(buf, "%d\n", ad7746_cap_filter_rate_table[
- (chip->config >> 3) & 0x7][0]);
-}
-
-static ssize_t ad7746_store_cap_filter_rate_setup(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t len)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct ad7746_chip_info *chip = iio_priv(indio_dev);
- u8 data;
- int ret, i;
-
- ret = kstrtou8(buf, 10, &data);
- if (ret < 0)
- return ret;
-
- for (i = 0; i < ARRAY_SIZE(ad7746_cap_filter_rate_table); i++)
- if (data >= ad7746_cap_filter_rate_table[i][0])
- break;
-
- if (i >= ARRAY_SIZE(ad7746_cap_filter_rate_table))
- i = ARRAY_SIZE(ad7746_cap_filter_rate_table) - 1;
-
- mutex_lock(&indio_dev->mlock);
- chip->config &= ~AD7746_CONF_CAPFS(0x7);
- chip->config |= AD7746_CONF_CAPFS(i);
- mutex_unlock(&indio_dev->mlock);
-
- return len;
-}
-
-static ssize_t ad7746_show_vt_filter_rate_setup(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct ad7746_chip_info *chip = iio_priv(indio_dev);
-
- return sprintf(buf, "%d\n", ad7746_vt_filter_rate_table[
- (chip->config >> 6) & 0x3][0]);
-}
-
-static ssize_t ad7746_store_vt_filter_rate_setup(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t len)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct ad7746_chip_info *chip = iio_priv(indio_dev);
- u8 data;
- int ret, i;
-
- ret = kstrtou8(buf, 10, &data);
- if (ret < 0)
- return ret;
-
- for (i = 0; i < ARRAY_SIZE(ad7746_vt_filter_rate_table); i++)
- if (data >= ad7746_vt_filter_rate_table[i][0])
- break;
-
- if (i >= ARRAY_SIZE(ad7746_vt_filter_rate_table))
- i = ARRAY_SIZE(ad7746_vt_filter_rate_table) - 1;
-
- mutex_lock(&indio_dev->mlock);
- chip->config &= ~AD7746_CONF_VTFS(0x3);
- chip->config |= AD7746_CONF_VTFS(i);
- mutex_unlock(&indio_dev->mlock);
-
- return len;
-}
-
-static IIO_DEVICE_ATTR(in_capacitance_sampling_frequency,
- S_IRUGO | S_IWUSR, ad7746_show_cap_filter_rate_setup,
- ad7746_store_cap_filter_rate_setup, 0);
-
-static IIO_DEVICE_ATTR(in_voltage_sampling_frequency,
- S_IRUGO | S_IWUSR, ad7746_show_vt_filter_rate_setup,
- ad7746_store_vt_filter_rate_setup, 0);
-
-static IIO_CONST_ATTR(in_voltage_sampling_frequency_available, "50 31 16 8");
-static IIO_CONST_ATTR(in_capacitance_sampling_frequency_available,
- "91 84 50 26 16 13 11 9");
-
-static struct attribute *ad7746_attributes[] = {
- &iio_dev_attr_in_capacitance_sampling_frequency.dev_attr.attr,
- &iio_dev_attr_in_voltage_sampling_frequency.dev_attr.attr,
- &iio_dev_attr_in_capacitance0_calibbias_calibration.dev_attr.attr,
- &iio_dev_attr_in_capacitance0_calibscale_calibration.dev_attr.attr,
- &iio_dev_attr_in_capacitance1_calibscale_calibration.dev_attr.attr,
- &iio_dev_attr_in_capacitance1_calibbias_calibration.dev_attr.attr,
- &iio_dev_attr_in_voltage0_calibscale_calibration.dev_attr.attr,
- &iio_const_attr_in_voltage_sampling_frequency_available.dev_attr.attr,
- &iio_const_attr_in_capacitance_sampling_frequency_available.
- dev_attr.attr,
- NULL,
-};
-
-static const struct attribute_group ad7746_attribute_group = {
- .attrs = ad7746_attributes,
-};
-
-static int ad7746_write_raw(struct iio_dev *indio_dev,
- struct iio_chan_spec const *chan,
- int val,
- int val2,
- long mask)
-{
- struct ad7746_chip_info *chip = iio_priv(indio_dev);
- int ret, reg;
-
- mutex_lock(&indio_dev->mlock);
-
- switch (mask) {
- case IIO_CHAN_INFO_CALIBSCALE:
- if (val != 1) {
- ret = -EINVAL;
- goto out;
- }
-
- val = (val2 * 1024) / 15625;
-
- switch (chan->type) {
- case IIO_CAPACITANCE:
- reg = AD7746_REG_CAP_GAINH;
- break;
- case IIO_VOLTAGE:
- reg = AD7746_REG_VOLT_GAINH;
- break;
- default:
- ret = -EINVAL;
- goto out;
- }
-
- ret = i2c_smbus_write_word_data(chip->client, reg, swab16(val));
- if (ret < 0)
- goto out;
-
- ret = 0;
- break;
- case IIO_CHAN_INFO_CALIBBIAS:
- if ((val < 0) | (val > 0xFFFF)) {
- ret = -EINVAL;
- goto out;
- }
- ret = i2c_smbus_write_word_data(chip->client,
- AD7746_REG_CAP_OFFH, swab16(val));
- if (ret < 0)
- goto out;
-
- ret = 0;
- break;
- case IIO_CHAN_INFO_OFFSET:
- if ((val < 0) | (val > 43008000)) { /* 21pF */
- ret = -EINVAL;
- goto out;
- }
-
- /* CAPDAC Scale = 21pF_typ / 127
- * CIN Scale = 8.192pF / 2^24
- * Offset Scale = CAPDAC Scale / CIN Scale = 338646
- * */
-
- val /= 338646;
-
- chip->capdac[chan->channel][chan->differential] = (val > 0 ?
- AD7746_CAPDAC_DACP(val) | AD7746_CAPDAC_DACEN : 0);
-
- ret = i2c_smbus_write_byte_data(chip->client,
- AD7746_REG_CAPDACA,
- chip->capdac[chan->channel][0]);
- if (ret < 0)
- goto out;
- ret = i2c_smbus_write_byte_data(chip->client,
- AD7746_REG_CAPDACB,
- chip->capdac[chan->channel][1]);
- if (ret < 0)
- goto out;
-
- chip->capdac_set = chan->channel;
-
- ret = 0;
- break;
- default:
- ret = -EINVAL;
- }
-
-out:
- mutex_unlock(&indio_dev->mlock);
- return ret;
-}
-
-static int ad7746_read_raw(struct iio_dev *indio_dev,
- struct iio_chan_spec const *chan,
- int *val, int *val2,
- long mask)
-{
- struct ad7746_chip_info *chip = iio_priv(indio_dev);
- int ret, delay;
- u8 regval, reg;
-
- mutex_lock(&indio_dev->mlock);
-
- switch (mask) {
- case IIO_CHAN_INFO_RAW:
- case IIO_CHAN_INFO_PROCESSED:
- ret = ad7746_select_channel(indio_dev, chan);
- if (ret < 0)
- goto out;
- delay = ret;
-
- regval = chip->config | AD7746_CONF_MODE_SINGLE_CONV;
- ret = i2c_smbus_write_byte_data(chip->client, AD7746_REG_CFG,
- regval);
- if (ret < 0)
- goto out;
-
- msleep(delay);
- /* Now read the actual register */
-
- ret = i2c_smbus_read_i2c_block_data(chip->client,
- chan->address >> 8, 3, &chip->data.d8[1]);
-
- if (ret < 0)
- goto out;
-
- *val = (be32_to_cpu(chip->data.d32) & 0xFFFFFF) - 0x800000;
-
- switch (chan->type) {
- case IIO_TEMP:
- /* temperature in milli degrees Celsius
- * T = ((*val / 2048) - 4096) * 1000
- */
- *val = (*val * 125) / 256;
- break;
- case IIO_VOLTAGE:
- if (chan->channel == 1) /* supply_raw*/
- *val = *val * 6;
- break;
- default:
- break;
- }
-
- ret = IIO_VAL_INT;
- break;
- case IIO_CHAN_INFO_CALIBSCALE:
- switch (chan->type) {
- case IIO_CAPACITANCE:
- reg = AD7746_REG_CAP_GAINH;
- break;
- case IIO_VOLTAGE:
- reg = AD7746_REG_VOLT_GAINH;
- break;
- default:
- ret = -EINVAL;
- goto out;
- }
-
- ret = i2c_smbus_read_word_data(chip->client, reg);
- if (ret < 0)
- goto out;
- /* 1 + gain_val / 2^16 */
- *val = 1;
- *val2 = (15625 * swab16(ret)) / 1024;
-
- ret = IIO_VAL_INT_PLUS_MICRO;
- break;
- case IIO_CHAN_INFO_CALIBBIAS:
- ret = i2c_smbus_read_word_data(chip->client,
- AD7746_REG_CAP_OFFH);
- if (ret < 0)
- goto out;
- *val = swab16(ret);
-
- ret = IIO_VAL_INT;
- break;
- case IIO_CHAN_INFO_OFFSET:
- *val = AD7746_CAPDAC_DACP(chip->capdac[chan->channel]
- [chan->differential]) * 338646;
-
- ret = IIO_VAL_INT;
- break;
- case IIO_CHAN_INFO_SCALE:
- switch (chan->type) {
- case IIO_CAPACITANCE:
- /* 8.192pf / 2^24 */
- *val = 0;
- *val2 = 488;
- ret = IIO_VAL_INT_PLUS_NANO;
- break;
- case IIO_VOLTAGE:
- /* 1170mV / 2^23 */
- *val = 1170;
- *val2 = 23;
- ret = IIO_VAL_FRACTIONAL_LOG2;
- break;
- default:
- ret = -EINVAL;
- break;
- }
-
- break;
- default:
- ret = -EINVAL;
- }
-out:
- mutex_unlock(&indio_dev->mlock);
- return ret;
-}
-
-static const struct iio_info ad7746_info = {
- .attrs = &ad7746_attribute_group,
- .read_raw = &ad7746_read_raw,
- .write_raw = &ad7746_write_raw,
- .driver_module = THIS_MODULE,
-};
-
-/*
- * device probe and remove
- */
-
-static int ad7746_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
-{
- struct ad7746_platform_data *pdata = client->dev.platform_data;
- struct ad7746_chip_info *chip;
- struct iio_dev *indio_dev;
- int ret = 0;
- unsigned char regval = 0;
-
- indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*chip));
- if (!indio_dev)
- return -ENOMEM;
- chip = iio_priv(indio_dev);
- /* this is only used for device removal purposes */
- i2c_set_clientdata(client, indio_dev);
-
- chip->client = client;
- chip->capdac_set = -1;
-
- /* Establish that the iio_dev is a child of the i2c device */
- indio_dev->name = id->name;
- indio_dev->dev.parent = &client->dev;
- indio_dev->info = &ad7746_info;
- indio_dev->channels = ad7746_channels;
- if (id->driver_data == 7746)
- indio_dev->num_channels = ARRAY_SIZE(ad7746_channels);
- else
- indio_dev->num_channels = ARRAY_SIZE(ad7746_channels) - 2;
- indio_dev->num_channels = ARRAY_SIZE(ad7746_channels);
- indio_dev->modes = INDIO_DIRECT_MODE;
-
- if (pdata) {
- if (pdata->exca_en) {
- if (pdata->exca_inv_en)
- regval |= AD7746_EXCSETUP_NEXCA;
- else
- regval |= AD7746_EXCSETUP_EXCA;
- }
-
- if (pdata->excb_en) {
- if (pdata->excb_inv_en)
- regval |= AD7746_EXCSETUP_NEXCB;
- else
- regval |= AD7746_EXCSETUP_EXCB;
- }
-
- regval |= AD7746_EXCSETUP_EXCLVL(pdata->exclvl);
- } else {
- dev_warn(&client->dev, "No platform data? using default\n");
- regval = AD7746_EXCSETUP_EXCA | AD7746_EXCSETUP_EXCB |
- AD7746_EXCSETUP_EXCLVL(3);
- }
-
- ret = i2c_smbus_write_byte_data(chip->client,
- AD7746_REG_EXC_SETUP, regval);
- if (ret < 0)
- return ret;
-
- ret = iio_device_register(indio_dev);
- if (ret)
- return ret;
-
- dev_info(&client->dev, "%s capacitive sensor registered\n", id->name);
-
- return 0;
-}
-
-static int ad7746_remove(struct i2c_client *client)
-{
- struct iio_dev *indio_dev = i2c_get_clientdata(client);
-
- iio_device_unregister(indio_dev);
-
- return 0;
-}
-
-static const struct i2c_device_id ad7746_id[] = {
- { "ad7745", 7745 },
- { "ad7746", 7746 },
- { "ad7747", 7747 },
- {}
-};
-
-MODULE_DEVICE_TABLE(i2c, ad7746_id);
-
-static struct i2c_driver ad7746_driver = {
- .driver = {
- .name = KBUILD_MODNAME,
- },
- .probe = ad7746_probe,
- .remove = ad7746_remove,
- .id_table = ad7746_id,
-};
-module_i2c_driver(ad7746_driver);
-
-MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
-MODULE_DESCRIPTION("Analog Devices AD7746/5/7 capacitive sensor driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/iio/cdc/ad7746.h b/drivers/staging/iio/cdc/ad7746.h
deleted file mode 100644
index ea8572d1df02..000000000000
--- a/drivers/staging/iio/cdc/ad7746.h
+++ /dev/null
@@ -1,29 +0,0 @@
-/*
- * AD7746 capacitive sensor driver supporting AD7745, AD7746 and AD7747
- *
- * Copyright 2011 Analog Devices Inc.
- *
- * Licensed under the GPL-2.
- */
-
-#ifndef IIO_CDC_AD7746_H_
-#define IIO_CDC_AD7746_H_
-
-/*
- * TODO: struct ad7746_platform_data needs to go into include/linux/iio
- */
-
-#define AD7466_EXCLVL_0 0 /* +-VDD/8 */
-#define AD7466_EXCLVL_1 1 /* +-VDD/4 */
-#define AD7466_EXCLVL_2 2 /* +-VDD * 3/8 */
-#define AD7466_EXCLVL_3 3 /* +-VDD/2 */
-
-struct ad7746_platform_data {
- unsigned char exclvl; /*Excitation Voltage Level */
- bool exca_en; /* enables EXCA pin as the excitation output */
- bool exca_inv_en; /* enables /EXCA pin as the excitation output */
- bool excb_en; /* enables EXCB pin as the excitation output */
- bool excb_inv_en; /* enables /EXCB pin as the excitation output */
-};
-
-#endif /* IIO_CDC_AD7746_H_ */
diff --git a/drivers/staging/iio/frequency/Kconfig b/drivers/staging/iio/frequency/Kconfig
index fc726d3c64a6..72d899cbef8e 100644
--- a/drivers/staging/iio/frequency/Kconfig
+++ b/drivers/staging/iio/frequency/Kconfig
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Direct Digital Synthesis drivers
#
diff --git a/drivers/staging/iio/frequency/Makefile b/drivers/staging/iio/frequency/Makefile
index e5dbcfce44f9..b8c5cf98aa5e 100644
--- a/drivers/staging/iio/frequency/Makefile
+++ b/drivers/staging/iio/frequency/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for Direct Digital Synthesis drivers
#
diff --git a/drivers/staging/iio/frequency/ad9832.c b/drivers/staging/iio/frequency/ad9832.c
index a861fe0149b1..49388da5a684 100644
--- a/drivers/staging/iio/frequency/ad9832.c
+++ b/drivers/staging/iio/frequency/ad9832.c
@@ -1,27 +1,122 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* AD9832 SPI DDS driver
*
* Copyright 2011 Analog Devices Inc.
- *
- * Licensed under the GPL-2.
*/
+#include <asm/div64.h>
+
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/clk.h>
#include <linux/device.h>
+#include <linux/err.h>
#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/regulator/consumer.h>
#include <linux/slab.h>
-#include <linux/sysfs.h>
#include <linux/spi/spi.h>
-#include <linux/regulator/consumer.h>
-#include <linux/err.h>
-#include <linux/module.h>
-#include <asm/div64.h>
+#include <linux/sysfs.h>
+#include <linux/unaligned.h>
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
-#include "dds.h"
#include "ad9832.h"
+#include "dds.h"
+
+/* Registers */
+
+#define AD9832_FREQ0LL 0x0
+#define AD9832_FREQ0HL 0x1
+#define AD9832_FREQ0LM 0x2
+#define AD9832_FREQ0HM 0x3
+#define AD9832_FREQ1LL 0x4
+#define AD9832_FREQ1HL 0x5
+#define AD9832_FREQ1LM 0x6
+#define AD9832_FREQ1HM 0x7
+#define AD9832_PHASE0L 0x8
+#define AD9832_PHASE0H 0x9
+#define AD9832_PHASE1L 0xA
+#define AD9832_PHASE1H 0xB
+#define AD9832_PHASE2L 0xC
+#define AD9832_PHASE2H 0xD
+#define AD9832_PHASE3L 0xE
+#define AD9832_PHASE3H 0xF
+
+#define AD9832_PHASE_SYM 0x10
+#define AD9832_FREQ_SYM 0x11
+#define AD9832_PINCTRL_EN 0x12
+#define AD9832_OUTPUT_EN 0x13
+
+/* Command Control Bits */
+
+#define AD9832_CMD_PHA8BITSW 0x1
+#define AD9832_CMD_PHA16BITSW 0x0
+#define AD9832_CMD_FRE8BITSW 0x3
+#define AD9832_CMD_FRE16BITSW 0x2
+#define AD9832_CMD_FPSELECT 0x6
+#define AD9832_CMD_SYNCSELSRC 0x8
+#define AD9832_CMD_SLEEPRESCLR 0xC
+
+#define AD9832_FREQ BIT(11)
+#define AD9832_PHASE_MASK GENMASK(10, 9)
+#define AD9832_SYNC BIT(13)
+#define AD9832_SELSRC BIT(12)
+#define AD9832_SLEEP BIT(13)
+#define AD9832_RESET BIT(12)
+#define AD9832_CLR BIT(11)
+#define AD9832_FREQ_BITS 32
+#define AD9832_PHASE_BITS 12
+#define AD9832_CMD_MSK GENMASK(15, 12)
+#define AD9832_ADD_MSK GENMASK(11, 8)
+#define AD9832_DAT_MSK GENMASK(7, 0)
+
+/**
+ * struct ad9832_state - driver instance specific data
+ * @spi: spi_device
+ * @mclk: external master clock
+ * @ctrl_fp: cached frequency/phase control word
+ * @ctrl_ss: cached sync/selsrc control word
+ * @ctrl_src: cached sleep/reset/clr word
+ * @xfer: default spi transfer
+ * @msg: default spi message
+ * @freq_xfer: tuning word spi transfer
+ * @freq_msg: tuning word spi message
+ * @phase_xfer: tuning word spi transfer
+ * @phase_msg: tuning word spi message
+ * @lock: protect sensor state
+ * @data: spi transmit buffer
+ * @phase_data: tuning word spi transmit buffer
+ * @freq_data: tuning word spi transmit buffer
+ */
+
+struct ad9832_state {
+ struct spi_device *spi;
+ struct clk *mclk;
+ unsigned short ctrl_fp;
+ unsigned short ctrl_ss;
+ unsigned short ctrl_src;
+ struct spi_transfer xfer;
+ struct spi_message msg;
+ struct spi_transfer freq_xfer[4];
+ struct spi_message freq_msg;
+ struct spi_transfer phase_xfer[2];
+ struct spi_message phase_msg;
+ struct mutex lock; /* protect sensor state */
+ /*
+ * DMA (thus cache coherency maintenance) requires the
+ * transfer buffers to live in their own cache lines.
+ */
+ union {
+ __be16 freq_data[4];
+ __be16 phase_data[2];
+ __be16 data;
+ } __aligned(IIO_DMA_MINALIGN);
+};
+
static unsigned long ad9832_calc_freqreg(unsigned long mclk, unsigned long fout)
{
unsigned long long freqreg = (u64)fout *
@@ -31,27 +126,28 @@ static unsigned long ad9832_calc_freqreg(unsigned long mclk, unsigned long fout)
}
static int ad9832_write_frequency(struct ad9832_state *st,
- unsigned addr, unsigned long fout)
+ unsigned int addr, unsigned long fout)
{
+ unsigned long clk_freq;
unsigned long regval;
+ u8 regval_bytes[4];
+ u16 freq_cmd;
- if (fout > (st->mclk / 2))
+ clk_freq = clk_get_rate(st->mclk);
+
+ if (!clk_freq || fout > (clk_freq / 2))
return -EINVAL;
- regval = ad9832_calc_freqreg(st->mclk, fout);
-
- st->freq_data[0] = cpu_to_be16((AD9832_CMD_FRE8BITSW << CMD_SHIFT) |
- (addr << ADD_SHIFT) |
- ((regval >> 24) & 0xFF));
- st->freq_data[1] = cpu_to_be16((AD9832_CMD_FRE16BITSW << CMD_SHIFT) |
- ((addr - 1) << ADD_SHIFT) |
- ((regval >> 16) & 0xFF));
- st->freq_data[2] = cpu_to_be16((AD9832_CMD_FRE8BITSW << CMD_SHIFT) |
- ((addr - 2) << ADD_SHIFT) |
- ((regval >> 8) & 0xFF));
- st->freq_data[3] = cpu_to_be16((AD9832_CMD_FRE16BITSW << CMD_SHIFT) |
- ((addr - 3) << ADD_SHIFT) |
- ((regval >> 0) & 0xFF));
+ regval = ad9832_calc_freqreg(clk_freq, fout);
+ put_unaligned_be32(regval, regval_bytes);
+
+ for (int i = 0; i < ARRAY_SIZE(regval_bytes); i++) {
+ freq_cmd = (i % 2 == 0) ? AD9832_CMD_FRE8BITSW : AD9832_CMD_FRE16BITSW;
+
+ st->freq_data[i] = cpu_to_be16(FIELD_PREP(AD9832_CMD_MSK, freq_cmd) |
+ FIELD_PREP(AD9832_ADD_MSK, addr - i) |
+ FIELD_PREP(AD9832_DAT_MSK, regval_bytes[i]));
+ }
return spi_sync(st->spi, &st->freq_msg);
}
@@ -59,15 +155,21 @@ static int ad9832_write_frequency(struct ad9832_state *st,
static int ad9832_write_phase(struct ad9832_state *st,
unsigned long addr, unsigned long phase)
{
- if (phase > BIT(AD9832_PHASE_BITS))
+ u8 phase_bytes[2];
+ u16 phase_cmd;
+
+ if (phase >= BIT(AD9832_PHASE_BITS))
return -EINVAL;
- st->phase_data[0] = cpu_to_be16((AD9832_CMD_PHA8BITSW << CMD_SHIFT) |
- (addr << ADD_SHIFT) |
- ((phase >> 8) & 0xFF));
- st->phase_data[1] = cpu_to_be16((AD9832_CMD_PHA16BITSW << CMD_SHIFT) |
- ((addr - 1) << ADD_SHIFT) |
- (phase & 0xFF));
+ put_unaligned_be16(phase, phase_bytes);
+
+ for (int i = 0; i < ARRAY_SIZE(phase_bytes); i++) {
+ phase_cmd = (i % 2 == 0) ? AD9832_CMD_PHA8BITSW : AD9832_CMD_PHA16BITSW;
+
+ st->phase_data[i] = cpu_to_be16(FIELD_PREP(AD9832_CMD_MSK, phase_cmd) |
+ FIELD_PREP(AD9832_ADD_MSK, addr - i) |
+ FIELD_PREP(AD9832_DAT_MSK, phase_bytes[i]));
+ }
return spi_sync(st->spi, &st->phase_msg);
}
@@ -85,7 +187,7 @@ static ssize_t ad9832_write(struct device *dev, struct device_attribute *attr,
if (ret)
goto error_ret;
- mutex_lock(&indio_dev->mlock);
+ mutex_lock(&st->lock);
switch ((u32)this_attr->address) {
case AD9832_FREQ0HM:
case AD9832_FREQ1HM:
@@ -98,25 +200,23 @@ static ssize_t ad9832_write(struct device *dev, struct device_attribute *attr,
ret = ad9832_write_phase(st, this_attr->address, val);
break;
case AD9832_PINCTRL_EN:
- if (val)
- st->ctrl_ss &= ~AD9832_SELSRC;
- else
- st->ctrl_ss |= AD9832_SELSRC;
- st->data = cpu_to_be16((AD9832_CMD_SYNCSELSRC << CMD_SHIFT) |
- st->ctrl_ss);
+ st->ctrl_ss &= ~AD9832_SELSRC;
+ st->ctrl_ss |= FIELD_PREP(AD9832_SELSRC, val ? 0 : 1);
+
+ st->data = cpu_to_be16(FIELD_PREP(AD9832_CMD_MSK, AD9832_CMD_SYNCSELSRC) |
+ st->ctrl_ss);
ret = spi_sync(st->spi, &st->msg);
break;
case AD9832_FREQ_SYM:
- if (val == 1) {
- st->ctrl_fp |= AD9832_FREQ;
- } else if (val == 0) {
+ if (val == 1 || val == 0) {
st->ctrl_fp &= ~AD9832_FREQ;
+ st->ctrl_fp |= FIELD_PREP(AD9832_FREQ, val ? 1 : 0);
} else {
ret = -EINVAL;
break;
}
- st->data = cpu_to_be16((AD9832_CMD_FPSELECT << CMD_SHIFT) |
- st->ctrl_fp);
+ st->data = cpu_to_be16(FIELD_PREP(AD9832_CMD_MSK, AD9832_CMD_FPSELECT) |
+ st->ctrl_fp);
ret = spi_sync(st->spi, &st->msg);
break;
case AD9832_PHASE_SYM:
@@ -125,53 +225,52 @@ static ssize_t ad9832_write(struct device *dev, struct device_attribute *attr,
break;
}
- st->ctrl_fp &= ~AD9832_PHASE(3);
- st->ctrl_fp |= AD9832_PHASE(val);
+ st->ctrl_fp &= ~AD9832_PHASE_MASK;
+ st->ctrl_fp |= FIELD_PREP(AD9832_PHASE_MASK, val);
- st->data = cpu_to_be16((AD9832_CMD_FPSELECT << CMD_SHIFT) |
- st->ctrl_fp);
+ st->data = cpu_to_be16(FIELD_PREP(AD9832_CMD_MSK, AD9832_CMD_FPSELECT) |
+ st->ctrl_fp);
ret = spi_sync(st->spi, &st->msg);
break;
case AD9832_OUTPUT_EN:
if (val)
- st->ctrl_src &= ~(AD9832_RESET | AD9832_SLEEP |
- AD9832_CLR);
+ st->ctrl_src &= ~(AD9832_RESET | AD9832_SLEEP | AD9832_CLR);
else
- st->ctrl_src |= AD9832_RESET;
+ st->ctrl_src |= FIELD_PREP(AD9832_RESET, 1);
- st->data = cpu_to_be16((AD9832_CMD_SLEEPRESCLR << CMD_SHIFT) |
- st->ctrl_src);
+ st->data = cpu_to_be16(FIELD_PREP(AD9832_CMD_MSK, AD9832_CMD_SLEEPRESCLR) |
+ st->ctrl_src);
ret = spi_sync(st->spi, &st->msg);
break;
default:
ret = -ENODEV;
}
- mutex_unlock(&indio_dev->mlock);
+ mutex_unlock(&st->lock);
error_ret:
return ret ? ret : len;
}
-/**
+/*
* see dds.h for further information
*/
-static IIO_DEV_ATTR_FREQ(0, 0, S_IWUSR, NULL, ad9832_write, AD9832_FREQ0HM);
-static IIO_DEV_ATTR_FREQ(0, 1, S_IWUSR, NULL, ad9832_write, AD9832_FREQ1HM);
-static IIO_DEV_ATTR_FREQSYMBOL(0, S_IWUSR, NULL, ad9832_write, AD9832_FREQ_SYM);
+static IIO_DEV_ATTR_FREQ(0, 0, 0200, NULL, ad9832_write, AD9832_FREQ0HM);
+static IIO_DEV_ATTR_FREQ(0, 1, 0200, NULL, ad9832_write, AD9832_FREQ1HM);
+static IIO_DEV_ATTR_FREQSYMBOL(0, 0200, NULL, ad9832_write, AD9832_FREQ_SYM);
static IIO_CONST_ATTR_FREQ_SCALE(0, "1"); /* 1Hz */
-static IIO_DEV_ATTR_PHASE(0, 0, S_IWUSR, NULL, ad9832_write, AD9832_PHASE0H);
-static IIO_DEV_ATTR_PHASE(0, 1, S_IWUSR, NULL, ad9832_write, AD9832_PHASE1H);
-static IIO_DEV_ATTR_PHASE(0, 2, S_IWUSR, NULL, ad9832_write, AD9832_PHASE2H);
-static IIO_DEV_ATTR_PHASE(0, 3, S_IWUSR, NULL, ad9832_write, AD9832_PHASE3H);
-static IIO_DEV_ATTR_PHASESYMBOL(0, S_IWUSR, NULL,
+static IIO_DEV_ATTR_PHASE(0, 0, 0200, NULL, ad9832_write, AD9832_PHASE0H);
+static IIO_DEV_ATTR_PHASE(0, 1, 0200, NULL, ad9832_write, AD9832_PHASE1H);
+static IIO_DEV_ATTR_PHASE(0, 2, 0200, NULL, ad9832_write, AD9832_PHASE2H);
+static IIO_DEV_ATTR_PHASE(0, 3, 0200, NULL, ad9832_write, AD9832_PHASE3H);
+static IIO_DEV_ATTR_PHASESYMBOL(0, 0200, NULL,
ad9832_write, AD9832_PHASE_SYM);
static IIO_CONST_ATTR_PHASE_SCALE(0, "0.0015339808"); /* 2PI/2^12 rad*/
-static IIO_DEV_ATTR_PINCONTROL_EN(0, S_IWUSR, NULL,
+static IIO_DEV_ATTR_PINCONTROL_EN(0, 0200, NULL,
ad9832_write, AD9832_PINCTRL_EN);
-static IIO_DEV_ATTR_OUT_ENABLE(0, S_IWUSR, NULL,
+static IIO_DEV_ATTR_OUT_ENABLE(0, 0200, NULL,
ad9832_write, AD9832_OUTPUT_EN);
static struct attribute *ad9832_attributes[] = {
@@ -196,15 +295,13 @@ static const struct attribute_group ad9832_attribute_group = {
static const struct iio_info ad9832_info = {
.attrs = &ad9832_attribute_group,
- .driver_module = THIS_MODULE,
};
static int ad9832_probe(struct spi_device *spi)
{
- struct ad9832_platform_data *pdata = spi->dev.platform_data;
+ struct ad9832_platform_data *pdata = dev_get_platdata(&spi->dev);
struct iio_dev *indio_dev;
struct ad9832_state *st;
- struct regulator *reg;
int ret;
if (!pdata) {
@@ -212,25 +309,27 @@ static int ad9832_probe(struct spi_device *spi)
return -ENODEV;
}
- reg = devm_regulator_get(&spi->dev, "vcc");
- if (!IS_ERR(reg)) {
- ret = regulator_enable(reg);
- if (ret)
- return ret;
- }
-
indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
- if (!indio_dev) {
- ret = -ENOMEM;
- goto error_disable_reg;
- }
- spi_set_drvdata(spi, indio_dev);
+ if (!indio_dev)
+ return -ENOMEM;
+
st = iio_priv(indio_dev);
- st->reg = reg;
- st->mclk = pdata->mclk;
+
+ ret = devm_regulator_get_enable(&spi->dev, "avdd");
+ if (ret)
+ return dev_err_probe(&spi->dev, ret, "failed to enable specified AVDD voltage\n");
+
+ ret = devm_regulator_get_enable(&spi->dev, "dvdd");
+ if (ret)
+ return dev_err_probe(&spi->dev, ret, "Failed to enable specified DVDD supply\n");
+
+ st->mclk = devm_clk_get_enabled(&spi->dev, "mclk");
+ if (IS_ERR(st->mclk))
+ return PTR_ERR(st->mclk);
+
st->spi = spi;
+ mutex_init(&st->lock);
- indio_dev->dev.parent = &spi->dev;
indio_dev->name = spi_get_device_id(spi)->name;
indio_dev->info = &ad9832_info;
indio_dev->modes = INDIO_DIRECT_MODE;
@@ -272,81 +371,65 @@ static int ad9832_probe(struct spi_device *spi)
spi_message_add_tail(&st->phase_xfer[1], &st->phase_msg);
st->ctrl_src = AD9832_SLEEP | AD9832_RESET | AD9832_CLR;
- st->data = cpu_to_be16((AD9832_CMD_SLEEPRESCLR << CMD_SHIFT) |
- st->ctrl_src);
+ st->data = cpu_to_be16(FIELD_PREP(AD9832_CMD_MSK, AD9832_CMD_SLEEPRESCLR) |
+ st->ctrl_src);
ret = spi_sync(st->spi, &st->msg);
if (ret) {
dev_err(&spi->dev, "device init failed\n");
- goto error_disable_reg;
+ return ret;
}
ret = ad9832_write_frequency(st, AD9832_FREQ0HM, pdata->freq0);
if (ret)
- goto error_disable_reg;
+ return ret;
ret = ad9832_write_frequency(st, AD9832_FREQ1HM, pdata->freq1);
if (ret)
- goto error_disable_reg;
+ return ret;
ret = ad9832_write_phase(st, AD9832_PHASE0H, pdata->phase0);
if (ret)
- goto error_disable_reg;
+ return ret;
ret = ad9832_write_phase(st, AD9832_PHASE1H, pdata->phase1);
if (ret)
- goto error_disable_reg;
+ return ret;
ret = ad9832_write_phase(st, AD9832_PHASE2H, pdata->phase2);
if (ret)
- goto error_disable_reg;
+ return ret;
ret = ad9832_write_phase(st, AD9832_PHASE3H, pdata->phase3);
if (ret)
- goto error_disable_reg;
-
- ret = iio_device_register(indio_dev);
- if (ret)
- goto error_disable_reg;
-
- return 0;
-
-error_disable_reg:
- if (!IS_ERR(reg))
- regulator_disable(reg);
+ return ret;
- return ret;
+ return devm_iio_device_register(&spi->dev, indio_dev);
}
-static int ad9832_remove(struct spi_device *spi)
-{
- struct iio_dev *indio_dev = spi_get_drvdata(spi);
- struct ad9832_state *st = iio_priv(indio_dev);
-
- iio_device_unregister(indio_dev);
- if (!IS_ERR(st->reg))
- regulator_disable(st->reg);
-
- return 0;
-}
+static const struct of_device_id ad9832_of_match[] = {
+ { .compatible = "adi,ad9832" },
+ { .compatible = "adi,ad9835" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, ad9832_of_match);
static const struct spi_device_id ad9832_id[] = {
{"ad9832", 0},
{"ad9835", 0},
- {}
+ { }
};
MODULE_DEVICE_TABLE(spi, ad9832_id);
static struct spi_driver ad9832_driver = {
.driver = {
.name = "ad9832",
- .owner = THIS_MODULE,
+ .of_match_table = ad9832_of_match,
},
.probe = ad9832_probe,
- .remove = ad9832_remove,
.id_table = ad9832_id,
};
module_spi_driver(ad9832_driver);
-MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
+MODULE_AUTHOR("Michael Hennerich <michael.hennerich@analog.com>");
MODULE_DESCRIPTION("Analog Devices AD9832/AD9835 DDS");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/iio/frequency/ad9832.h b/drivers/staging/iio/frequency/ad9832.h
index d32323b46be6..d0d840edb8d2 100644
--- a/drivers/staging/iio/frequency/ad9832.h
+++ b/drivers/staging/iio/frequency/ad9832.h
@@ -1,110 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* AD9832 SPI DDS driver
*
* Copyright 2011 Analog Devices Inc.
- *
- * Licensed under the GPL-2 or later.
*/
#ifndef IIO_DDS_AD9832_H_
#define IIO_DDS_AD9832_H_
-/* Registers */
-
-#define AD9832_FREQ0LL 0x0
-#define AD9832_FREQ0HL 0x1
-#define AD9832_FREQ0LM 0x2
-#define AD9832_FREQ0HM 0x3
-#define AD9832_FREQ1LL 0x4
-#define AD9832_FREQ1HL 0x5
-#define AD9832_FREQ1LM 0x6
-#define AD9832_FREQ1HM 0x7
-#define AD9832_PHASE0L 0x8
-#define AD9832_PHASE0H 0x9
-#define AD9832_PHASE1L 0xA
-#define AD9832_PHASE1H 0xB
-#define AD9832_PHASE2L 0xC
-#define AD9832_PHASE2H 0xD
-#define AD9832_PHASE3L 0xE
-#define AD9832_PHASE3H 0xF
-
-#define AD9832_PHASE_SYM 0x10
-#define AD9832_FREQ_SYM 0x11
-#define AD9832_PINCTRL_EN 0x12
-#define AD9832_OUTPUT_EN 0x13
-
-/* Command Control Bits */
-
-#define AD9832_CMD_PHA8BITSW 0x1
-#define AD9832_CMD_PHA16BITSW 0x0
-#define AD9832_CMD_FRE8BITSW 0x3
-#define AD9832_CMD_FRE16BITSW 0x2
-#define AD9832_CMD_FPSELECT 0x6
-#define AD9832_CMD_SYNCSELSRC 0x8
-#define AD9832_CMD_SLEEPRESCLR 0xC
-
-#define AD9832_FREQ BIT(11)
-#define AD9832_PHASE(x) (((x) & 3) << 9)
-#define AD9832_SYNC BIT(13)
-#define AD9832_SELSRC BIT(12)
-#define AD9832_SLEEP BIT(13)
-#define AD9832_RESET BIT(12)
-#define AD9832_CLR BIT(11)
-#define CMD_SHIFT 12
-#define ADD_SHIFT 8
-#define AD9832_FREQ_BITS 32
-#define AD9832_PHASE_BITS 12
-#define RES_MASK(bits) ((1 << (bits)) - 1)
-
-/**
- * struct ad9832_state - driver instance specific data
- * @spi: spi_device
- * @reg: supply regulator
- * @mclk: external master clock
- * @ctrl_fp: cached frequency/phase control word
- * @ctrl_ss: cached sync/selsrc control word
- * @ctrl_src: cached sleep/reset/clr word
- * @xfer: default spi transfer
- * @msg: default spi message
- * @freq_xfer: tuning word spi transfer
- * @freq_msg: tuning word spi message
- * @phase_xfer: tuning word spi transfer
- * @phase_msg: tuning word spi message
- * @data: spi transmit buffer
- * @phase_data: tuning word spi transmit buffer
- * @freq_data: tuning word spi transmit buffer
- */
-
-struct ad9832_state {
- struct spi_device *spi;
- struct regulator *reg;
- unsigned long mclk;
- unsigned short ctrl_fp;
- unsigned short ctrl_ss;
- unsigned short ctrl_src;
- struct spi_transfer xfer;
- struct spi_message msg;
- struct spi_transfer freq_xfer[4];
- struct spi_message freq_msg;
- struct spi_transfer phase_xfer[2];
- struct spi_message phase_msg;
- /*
- * DMA (thus cache coherency maintenance) requires the
- * transfer buffers to live in their own cache lines.
- */
- union {
- __be16 freq_data[4]____cacheline_aligned;
- __be16 phase_data[2];
- __be16 data;
- };
-};
-
/*
* TODO: struct ad9832_platform_data needs to go into include/linux/iio
*/
/**
* struct ad9832_platform_data - platform specific information
- * @mclk: master clock in Hz
* @freq0: power up freq0 tuning word in Hz
* @freq1: power up freq1 tuning word in Hz
* @phase0: power up phase0 value [0..4095] correlates with 0..2PI
@@ -114,7 +22,6 @@ struct ad9832_state {
*/
struct ad9832_platform_data {
- unsigned long mclk;
unsigned long freq0;
unsigned long freq1;
unsigned short phase0;
diff --git a/drivers/staging/iio/frequency/ad9834.c b/drivers/staging/iio/frequency/ad9834.c
index d02bb44fb8fc..d339d5e8e043 100644
--- a/drivers/staging/iio/frequency/ad9834.c
+++ b/drivers/staging/iio/frequency/ad9834.c
@@ -1,11 +1,11 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* AD9833/AD9834/AD9837/AD9838 SPI DDS driver
*
* Copyright 2010-2011 Analog Devices Inc.
- *
- * Licensed under the GPL-2.
*/
+#include <linux/clk.h>
#include <linux/interrupt.h>
#include <linux/workqueue.h>
#include <linux/device.h>
@@ -21,9 +21,81 @@
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
+
#include "dds.h"
-#include "ad9834.h"
+/* Registers */
+
+#define AD9834_REG_CMD 0
+#define AD9834_REG_FREQ0 BIT(14)
+#define AD9834_REG_FREQ1 BIT(15)
+#define AD9834_REG_PHASE0 (BIT(15) | BIT(14))
+#define AD9834_REG_PHASE1 (BIT(15) | BIT(14) | BIT(13))
+
+/* Command Control Bits */
+
+#define AD9834_B28 BIT(13)
+#define AD9834_HLB BIT(12)
+#define AD9834_FSEL BIT(11)
+#define AD9834_PSEL BIT(10)
+#define AD9834_PIN_SW BIT(9)
+#define AD9834_RESET BIT(8)
+#define AD9834_SLEEP1 BIT(7)
+#define AD9834_SLEEP12 BIT(6)
+#define AD9834_OPBITEN BIT(5)
+#define AD9834_SIGN_PIB BIT(4)
+#define AD9834_DIV2 BIT(3)
+#define AD9834_MODE BIT(1)
+
+#define AD9834_FREQ_BITS 28
+#define AD9834_PHASE_BITS 12
+
+#define RES_MASK(bits) (BIT(bits) - 1)
+
+/**
+ * struct ad9834_state - driver instance specific data
+ * @spi: spi_device
+ * @mclk: external master clock
+ * @control: cached control word
+ * @devid: device id
+ * @xfer: default spi transfer
+ * @msg: default spi message
+ * @freq_xfer: tuning word spi transfer
+ * @freq_msg: tuning word spi message
+ * @lock: protect sensor state
+ * @data: spi transmit buffer
+ * @freq_data: tuning word spi transmit buffer
+ */
+
+struct ad9834_state {
+ struct spi_device *spi;
+ struct clk *mclk;
+ unsigned short control;
+ unsigned short devid;
+ struct spi_transfer xfer;
+ struct spi_message msg;
+ struct spi_transfer freq_xfer[2];
+ struct spi_message freq_msg;
+ struct mutex lock; /* protect sensor state */
+
+ /*
+ * DMA (thus cache coherency maintenance) requires the
+ * transfer buffers to live in their own cache lines.
+ */
+ __be16 data __aligned(IIO_DMA_MINALIGN);
+ __be16 freq_data[2];
+};
+
+/*
+ * ad9834_supported_device_ids:
+ */
+
+enum ad9834_supported_device_ids {
+ ID_AD9833,
+ ID_AD9834,
+ ID_AD9837,
+ ID_AD9838,
+};
static unsigned int ad9834_calc_freqreg(unsigned long mclk, unsigned long fout)
{
@@ -36,12 +108,15 @@ static unsigned int ad9834_calc_freqreg(unsigned long mclk, unsigned long fout)
static int ad9834_write_frequency(struct ad9834_state *st,
unsigned long addr, unsigned long fout)
{
+ unsigned long clk_freq;
unsigned long regval;
- if (fout > (st->mclk / 2))
+ clk_freq = clk_get_rate(st->mclk);
+
+ if (!clk_freq || fout > (clk_freq / 2))
return -EINVAL;
- regval = ad9834_calc_freqreg(st->mclk, fout);
+ regval = ad9834_calc_freqreg(clk_freq, fout);
st->freq_data[0] = cpu_to_be16(addr | (regval &
RES_MASK(AD9834_FREQ_BITS / 2)));
@@ -55,7 +130,7 @@ static int ad9834_write_frequency(struct ad9834_state *st,
static int ad9834_write_phase(struct ad9834_state *st,
unsigned long addr, unsigned long phase)
{
- if (phase > BIT(AD9834_PHASE_BITS))
+ if (phase >= BIT(AD9834_PHASE_BITS))
return -EINVAL;
st->data = cpu_to_be16(addr | phase);
@@ -75,9 +150,9 @@ static ssize_t ad9834_write(struct device *dev,
ret = kstrtoul(buf, 10, &val);
if (ret)
- goto error_ret;
+ return ret;
- mutex_lock(&indio_dev->mlock);
+ mutex_lock(&st->lock);
switch ((u32)this_attr->address) {
case AD9834_REG_FREQ0:
case AD9834_REG_FREQ1:
@@ -111,7 +186,7 @@ static ssize_t ad9834_write(struct device *dev,
break;
case AD9834_FSEL:
case AD9834_PSEL:
- if (val == 0) {
+ if (!val) {
st->control &= ~(this_attr->address | AD9834_PIN_SW);
} else if (val == 1) {
st->control |= this_attr->address;
@@ -135,9 +210,8 @@ static ssize_t ad9834_write(struct device *dev,
default:
ret = -ENODEV;
}
- mutex_unlock(&indio_dev->mlock);
+ mutex_unlock(&st->lock);
-error_ret:
return ret ? ret : len;
}
@@ -152,7 +226,7 @@ static ssize_t ad9834_store_wavetype(struct device *dev,
int ret = 0;
bool is_ad9833_7 = (st->devid == ID_AD9833) || (st->devid == ID_AD9837);
- mutex_lock(&indio_dev->mlock);
+ mutex_lock(&st->lock);
switch ((u32)this_attr->address) {
case 0:
@@ -195,7 +269,7 @@ static ssize_t ad9834_store_wavetype(struct device *dev,
st->data = cpu_to_be16(AD9834_REG_CMD | st->control);
ret = spi_sync(st->spi, &st->msg);
}
- mutex_unlock(&indio_dev->mlock);
+ mutex_unlock(&st->lock);
return ret ? ret : len;
}
@@ -209,7 +283,7 @@ ssize_t ad9834_show_out0_wavetype_available(struct device *dev,
struct ad9834_state *st = iio_priv(indio_dev);
char *str;
- if ((st->devid == ID_AD9833) || (st->devid == ID_AD9837))
+ if (st->devid == ID_AD9833 || st->devid == ID_AD9837)
str = "sine triangle square";
else if (st->control & AD9834_OPBITEN)
str = "sine";
@@ -219,7 +293,7 @@ ssize_t ad9834_show_out0_wavetype_available(struct device *dev,
return sprintf(buf, "%s\n", str);
}
-static IIO_DEVICE_ATTR(out_altvoltage0_out0_wavetype_available, S_IRUGO,
+static IIO_DEVICE_ATTR(out_altvoltage0_out0_wavetype_available, 0444,
ad9834_show_out0_wavetype_available, NULL, 0);
static
@@ -239,28 +313,26 @@ ssize_t ad9834_show_out1_wavetype_available(struct device *dev,
return sprintf(buf, "%s\n", str);
}
-static IIO_DEVICE_ATTR(out_altvoltage0_out1_wavetype_available, S_IRUGO,
+static IIO_DEVICE_ATTR(out_altvoltage0_out1_wavetype_available, 0444,
ad9834_show_out1_wavetype_available, NULL, 0);
-/**
+/*
* see dds.h for further information
*/
-static IIO_DEV_ATTR_FREQ(0, 0, S_IWUSR, NULL, ad9834_write, AD9834_REG_FREQ0);
-static IIO_DEV_ATTR_FREQ(0, 1, S_IWUSR, NULL, ad9834_write, AD9834_REG_FREQ1);
-static IIO_DEV_ATTR_FREQSYMBOL(0, S_IWUSR, NULL, ad9834_write, AD9834_FSEL);
+static IIO_DEV_ATTR_FREQ(0, 0, 0200, NULL, ad9834_write, AD9834_REG_FREQ0);
+static IIO_DEV_ATTR_FREQ(0, 1, 0200, NULL, ad9834_write, AD9834_REG_FREQ1);
+static IIO_DEV_ATTR_FREQSYMBOL(0, 0200, NULL, ad9834_write, AD9834_FSEL);
static IIO_CONST_ATTR_FREQ_SCALE(0, "1"); /* 1Hz */
-static IIO_DEV_ATTR_PHASE(0, 0, S_IWUSR, NULL, ad9834_write, AD9834_REG_PHASE0);
-static IIO_DEV_ATTR_PHASE(0, 1, S_IWUSR, NULL, ad9834_write, AD9834_REG_PHASE1);
-static IIO_DEV_ATTR_PHASESYMBOL(0, S_IWUSR, NULL, ad9834_write, AD9834_PSEL);
+static IIO_DEV_ATTR_PHASE(0, 0, 0200, NULL, ad9834_write, AD9834_REG_PHASE0);
+static IIO_DEV_ATTR_PHASE(0, 1, 0200, NULL, ad9834_write, AD9834_REG_PHASE1);
+static IIO_DEV_ATTR_PHASESYMBOL(0, 0200, NULL, ad9834_write, AD9834_PSEL);
static IIO_CONST_ATTR_PHASE_SCALE(0, "0.0015339808"); /* 2PI/2^12 rad*/
-static IIO_DEV_ATTR_PINCONTROL_EN(0, S_IWUSR, NULL,
- ad9834_write, AD9834_PIN_SW);
-static IIO_DEV_ATTR_OUT_ENABLE(0, S_IWUSR, NULL, ad9834_write, AD9834_RESET);
-static IIO_DEV_ATTR_OUTY_ENABLE(0, 1, S_IWUSR, NULL,
- ad9834_write, AD9834_OPBITEN);
+static IIO_DEV_ATTR_PINCONTROL_EN(0, 0200, NULL, ad9834_write, AD9834_PIN_SW);
+static IIO_DEV_ATTR_OUT_ENABLE(0, 0200, NULL, ad9834_write, AD9834_RESET);
+static IIO_DEV_ATTR_OUTY_ENABLE(0, 1, 0200, NULL, ad9834_write, AD9834_OPBITEN);
static IIO_DEV_ATTR_OUT_WAVETYPE(0, 0, ad9834_store_wavetype, 0);
static IIO_DEV_ATTR_OUT_WAVETYPE(0, 1, ad9834_store_wavetype, 1);
@@ -308,46 +380,37 @@ static const struct attribute_group ad9833_attribute_group = {
static const struct iio_info ad9834_info = {
.attrs = &ad9834_attribute_group,
- .driver_module = THIS_MODULE,
};
static const struct iio_info ad9833_info = {
.attrs = &ad9833_attribute_group,
- .driver_module = THIS_MODULE,
};
static int ad9834_probe(struct spi_device *spi)
{
- struct ad9834_platform_data *pdata = spi->dev.platform_data;
struct ad9834_state *st;
struct iio_dev *indio_dev;
- struct regulator *reg;
int ret;
- if (!pdata) {
- dev_dbg(&spi->dev, "no platform data?\n");
- return -ENODEV;
- }
-
- reg = devm_regulator_get(&spi->dev, "vcc");
- if (!IS_ERR(reg)) {
- ret = regulator_enable(reg);
- if (ret)
- return ret;
- }
+ ret = devm_regulator_get_enable(&spi->dev, "avdd");
+ if (ret)
+ return dev_err_probe(&spi->dev, ret, "Failed to enable specified AVDD supply\n");
indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
if (!indio_dev) {
ret = -ENOMEM;
- goto error_disable_reg;
+ return ret;
}
- spi_set_drvdata(spi, indio_dev);
st = iio_priv(indio_dev);
- st->mclk = pdata->mclk;
+ mutex_init(&st->lock);
+ st->mclk = devm_clk_get_enabled(&spi->dev, NULL);
+ if (IS_ERR(st->mclk)) {
+ dev_err(&spi->dev, "Failed to enable master clock\n");
+ return PTR_ERR(st->mclk);
+ }
+
st->spi = spi;
st->devid = spi_get_device_id(spi)->driver_data;
- st->reg = reg;
- indio_dev->dev.parent = &spi->dev;
indio_dev->name = spi_get_device_id(spi)->name;
switch (st->devid) {
case ID_AD9833:
@@ -379,59 +442,35 @@ static int ad9834_probe(struct spi_device *spi)
spi_message_add_tail(&st->freq_xfer[1], &st->freq_msg);
st->control = AD9834_B28 | AD9834_RESET;
+ st->control |= AD9834_DIV2;
- if (!pdata->en_div2)
- st->control |= AD9834_DIV2;
-
- if (!pdata->en_signbit_msb_out && (st->devid == ID_AD9834))
+ if (st->devid == ID_AD9834)
st->control |= AD9834_SIGN_PIB;
st->data = cpu_to_be16(AD9834_REG_CMD | st->control);
ret = spi_sync(st->spi, &st->msg);
if (ret) {
dev_err(&spi->dev, "device init failed\n");
- goto error_disable_reg;
+ return ret;
}
- ret = ad9834_write_frequency(st, AD9834_REG_FREQ0, pdata->freq0);
+ ret = ad9834_write_frequency(st, AD9834_REG_FREQ0, 1000000);
if (ret)
- goto error_disable_reg;
+ return ret;
- ret = ad9834_write_frequency(st, AD9834_REG_FREQ1, pdata->freq1);
+ ret = ad9834_write_frequency(st, AD9834_REG_FREQ1, 5000000);
if (ret)
- goto error_disable_reg;
+ return ret;
- ret = ad9834_write_phase(st, AD9834_REG_PHASE0, pdata->phase0);
+ ret = ad9834_write_phase(st, AD9834_REG_PHASE0, 512);
if (ret)
- goto error_disable_reg;
+ return ret;
- ret = ad9834_write_phase(st, AD9834_REG_PHASE1, pdata->phase1);
+ ret = ad9834_write_phase(st, AD9834_REG_PHASE1, 1024);
if (ret)
- goto error_disable_reg;
-
- ret = iio_device_register(indio_dev);
- if (ret)
- goto error_disable_reg;
-
- return 0;
+ return ret;
-error_disable_reg:
- if (!IS_ERR(reg))
- regulator_disable(reg);
-
- return ret;
-}
-
-static int ad9834_remove(struct spi_device *spi)
-{
- struct iio_dev *indio_dev = spi_get_drvdata(spi);
- struct ad9834_state *st = iio_priv(indio_dev);
-
- iio_device_unregister(indio_dev);
- if (!IS_ERR(st->reg))
- regulator_disable(st->reg);
-
- return 0;
+ return devm_iio_device_register(&spi->dev, indio_dev);
}
static const struct spi_device_id ad9834_id[] = {
@@ -439,21 +478,30 @@ static const struct spi_device_id ad9834_id[] = {
{"ad9834", ID_AD9834},
{"ad9837", ID_AD9837},
{"ad9838", ID_AD9838},
- {}
+ { }
};
MODULE_DEVICE_TABLE(spi, ad9834_id);
+static const struct of_device_id ad9834_of_match[] = {
+ {.compatible = "adi,ad9833"},
+ {.compatible = "adi,ad9834"},
+ {.compatible = "adi,ad9837"},
+ {.compatible = "adi,ad9838"},
+ { }
+};
+
+MODULE_DEVICE_TABLE(of, ad9834_of_match);
+
static struct spi_driver ad9834_driver = {
.driver = {
.name = "ad9834",
- .owner = THIS_MODULE,
+ .of_match_table = ad9834_of_match
},
.probe = ad9834_probe,
- .remove = ad9834_remove,
.id_table = ad9834_id,
};
module_spi_driver(ad9834_driver);
-MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
+MODULE_AUTHOR("Michael Hennerich <michael.hennerich@analog.com>");
MODULE_DESCRIPTION("Analog Devices AD9833/AD9834/AD9837/AD9838 DDS");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/iio/frequency/ad9834.h b/drivers/staging/iio/frequency/ad9834.h
deleted file mode 100644
index 40fdd5da7bd0..000000000000
--- a/drivers/staging/iio/frequency/ad9834.h
+++ /dev/null
@@ -1,111 +0,0 @@
-/*
- * AD9833/AD9834/AD9837/AD9838 SPI DDS driver
- *
- * Copyright 2010-2011 Analog Devices Inc.
- *
- * Licensed under the GPL-2.
- */
-#ifndef IIO_DDS_AD9834_H_
-#define IIO_DDS_AD9834_H_
-
-/* Registers */
-
-#define AD9834_REG_CMD 0
-#define AD9834_REG_FREQ0 BIT(14)
-#define AD9834_REG_FREQ1 BIT(15)
-#define AD9834_REG_PHASE0 (BIT(15) | BIT(14))
-#define AD9834_REG_PHASE1 (BIT(15) | BIT(14) | BIT(13))
-
-/* Command Control Bits */
-
-#define AD9834_B28 BIT(13)
-#define AD9834_HLB BIT(12)
-#define AD9834_FSEL BIT(11)
-#define AD9834_PSEL BIT(10)
-#define AD9834_PIN_SW BIT(9)
-#define AD9834_RESET BIT(8)
-#define AD9834_SLEEP1 BIT(7)
-#define AD9834_SLEEP12 BIT(6)
-#define AD9834_OPBITEN BIT(5)
-#define AD9834_SIGN_PIB BIT(4)
-#define AD9834_DIV2 BIT(3)
-#define AD9834_MODE BIT(1)
-
-#define AD9834_FREQ_BITS 28
-#define AD9834_PHASE_BITS 12
-
-#define RES_MASK(bits) (BIT(bits) - 1)
-
-/**
- * struct ad9834_state - driver instance specific data
- * @spi: spi_device
- * @reg: supply regulator
- * @mclk: external master clock
- * @control: cached control word
- * @xfer: default spi transfer
- * @msg: default spi message
- * @freq_xfer: tuning word spi transfer
- * @freq_msg: tuning word spi message
- * @data: spi transmit buffer
- * @freq_data: tuning word spi transmit buffer
- */
-
-struct ad9834_state {
- struct spi_device *spi;
- struct regulator *reg;
- unsigned int mclk;
- unsigned short control;
- unsigned short devid;
- struct spi_transfer xfer;
- struct spi_message msg;
- struct spi_transfer freq_xfer[2];
- struct spi_message freq_msg;
-
- /*
- * DMA (thus cache coherency maintenance) requires the
- * transfer buffers to live in their own cache lines.
- */
- __be16 data ____cacheline_aligned;
- __be16 freq_data[2];
-};
-
-/*
- * TODO: struct ad7887_platform_data needs to go into include/linux/iio
- */
-
-/**
- * struct ad9834_platform_data - platform specific information
- * @mclk: master clock in Hz
- * @freq0: power up freq0 tuning word in Hz
- * @freq1: power up freq1 tuning word in Hz
- * @phase0: power up phase0 value [0..4095] correlates with 0..2PI
- * @phase1: power up phase1 value [0..4095] correlates with 0..2PI
- * @en_div2: digital output/2 is passed to the SIGN BIT OUT pin
- * @en_signbit_msb_out: the MSB (or MSB/2) of the DAC data is connected to the
- * SIGN BIT OUT pin. en_div2 controls whether it is the MSB
- * or MSB/2 that is output. if en_signbit_msb_out=false,
- * the on-board comparator is connected to SIGN BIT OUT
- */
-
-struct ad9834_platform_data {
- unsigned int mclk;
- unsigned int freq0;
- unsigned int freq1;
- unsigned short phase0;
- unsigned short phase1;
- bool en_div2;
- bool en_signbit_msb_out;
-};
-
-/**
- * ad9834_supported_device_ids:
- */
-
-enum ad9834_supported_device_ids {
- ID_AD9833,
- ID_AD9834,
- ID_AD9837,
- ID_AD9838,
-};
-
-#endif /* IIO_DDS_AD9834_H_ */
diff --git a/drivers/staging/iio/frequency/dds.h b/drivers/staging/iio/frequency/dds.h
index fe53e7324c94..2ebe68eb7398 100644
--- a/drivers/staging/iio/frequency/dds.h
+++ b/drivers/staging/iio/frequency/dds.h
@@ -1,9 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* dds.h - sysfs attributes associated with DDS devices
*
* Copyright (c) 2010 Analog Devices Inc.
- *
- * Licensed under the GPL-2 or later.
*/
#ifndef IIO_DDS_H_
#define IIO_DDS_H_
@@ -101,7 +100,7 @@
#define IIO_DEV_ATTR_OUT_WAVETYPE(_channel, _output, _store, _addr) \
IIO_DEVICE_ATTR(out_altvoltage##_channel##_out##_output##_wavetype,\
- S_IWUSR, NULL, _store, _addr)
+ 0200, NULL, _store, _addr)
/**
* /sys/bus/iio/devices/.../out_altvoltageX_outY_wavetype_available
diff --git a/drivers/staging/iio/gyro/Kconfig b/drivers/staging/iio/gyro/Kconfig
deleted file mode 100644
index f62f68fd6f3f..000000000000
--- a/drivers/staging/iio/gyro/Kconfig
+++ /dev/null
@@ -1,16 +0,0 @@
-#
-# IIO Digital Gyroscope Sensor drivers configuration
-#
-menu "Digital gyroscope sensors"
-
-config ADIS16060
- tristate "Analog Devices ADIS16060 Yaw Rate Gyroscope with SPI driver"
- depends on SPI
- help
- Say Y (yes) here to build support for Analog Devices adis16060 wide bandwidth
- yaw rate gyroscope with SPI.
-
- To compile this driver as a module, say M here: the module will be
- called adis16060. If unsure, say N.
-
-endmenu
diff --git a/drivers/staging/iio/gyro/Makefile b/drivers/staging/iio/gyro/Makefile
deleted file mode 100644
index cf22d6d55e27..000000000000
--- a/drivers/staging/iio/gyro/Makefile
+++ /dev/null
@@ -1,6 +0,0 @@
-#
-# Makefile for digital gyroscope sensor drivers
-#
-
-adis16060-y := adis16060_core.o
-obj-$(CONFIG_ADIS16060) += adis16060.o
diff --git a/drivers/staging/iio/gyro/adis16060_core.c b/drivers/staging/iio/gyro/adis16060_core.c
deleted file mode 100644
index 4c5869dd8223..000000000000
--- a/drivers/staging/iio/gyro/adis16060_core.c
+++ /dev/null
@@ -1,246 +0,0 @@
-/*
- * ADIS16060 Wide Bandwidth Yaw Rate Gyroscope with SPI driver
- *
- * Copyright 2010 Analog Devices Inc.
- *
- * Licensed under the GPL-2 or later.
- */
-
-#include <linux/module.h>
-#include <linux/delay.h>
-#include <linux/mutex.h>
-#include <linux/device.h>
-#include <linux/kernel.h>
-#include <linux/spi/spi.h>
-#include <linux/slab.h>
-#include <linux/sysfs.h>
-
-#include <linux/iio/iio.h>
-#include <linux/iio/sysfs.h>
-
-#define ADIS16060_GYRO 0x20 /* Measure Angular Rate (Gyro) */
-#define ADIS16060_TEMP_OUT 0x10 /* Measure Temperature */
-#define ADIS16060_AIN2 0x80 /* Measure AIN2 */
-#define ADIS16060_AIN1 0x40 /* Measure AIN1 */
-
-/**
- * struct adis16060_state - device instance specific data
- * @us_w: actual spi_device to write config
- * @us_r: actual spi_device to read back data
- * @buf: transmit or receive buffer
- * @buf_lock: mutex to protect tx and rx
- **/
-struct adis16060_state {
- struct spi_device *us_w;
- struct spi_device *us_r;
- struct mutex buf_lock;
-
- u8 buf[3] ____cacheline_aligned;
-};
-
-static struct iio_dev *adis16060_iio_dev;
-
-static int adis16060_spi_write(struct iio_dev *indio_dev, u8 val)
-{
- int ret;
- struct adis16060_state *st = iio_priv(indio_dev);
-
- mutex_lock(&st->buf_lock);
- st->buf[2] = val; /* The last 8 bits clocked in are latched */
- ret = spi_write(st->us_w, st->buf, 3);
- mutex_unlock(&st->buf_lock);
-
- return ret;
-}
-
-static int adis16060_spi_read(struct iio_dev *indio_dev, u16 *val)
-{
- int ret;
- struct adis16060_state *st = iio_priv(indio_dev);
-
- mutex_lock(&st->buf_lock);
-
- ret = spi_read(st->us_r, st->buf, 3);
-
- /* The internal successive approximation ADC begins the
- * conversion process on the falling edge of MSEL1 and
- * starts to place data MSB first on the DOUT line at
- * the 6th falling edge of SCLK
- */
- if (ret == 0)
- *val = ((st->buf[0] & 0x3) << 12) |
- (st->buf[1] << 4) |
- ((st->buf[2] >> 4) & 0xF);
- mutex_unlock(&st->buf_lock);
-
- return ret;
-}
-
-static int adis16060_read_raw(struct iio_dev *indio_dev,
- struct iio_chan_spec const *chan,
- int *val, int *val2,
- long mask)
-{
- u16 tval = 0;
- int ret;
-
- switch (mask) {
- case IIO_CHAN_INFO_RAW:
- /* Take the iio_dev status lock */
- mutex_lock(&indio_dev->mlock);
- ret = adis16060_spi_write(indio_dev, chan->address);
- if (ret < 0) {
- mutex_unlock(&indio_dev->mlock);
- return ret;
- }
- ret = adis16060_spi_read(indio_dev, &tval);
- mutex_unlock(&indio_dev->mlock);
- *val = tval;
- return IIO_VAL_INT;
- case IIO_CHAN_INFO_OFFSET:
- *val = -7;
- *val2 = 461117;
- return IIO_VAL_INT_PLUS_MICRO;
- case IIO_CHAN_INFO_SCALE:
- *val = 0;
- *val2 = 34000;
- return IIO_VAL_INT_PLUS_MICRO;
- }
-
- return -EINVAL;
-}
-
-static const struct iio_info adis16060_info = {
- .read_raw = &adis16060_read_raw,
- .driver_module = THIS_MODULE,
-};
-
-static const struct iio_chan_spec adis16060_channels[] = {
- {
- .type = IIO_ANGL_VEL,
- .modified = 1,
- .channel2 = IIO_MOD_Z,
- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
- .address = ADIS16060_GYRO,
- }, {
- .type = IIO_VOLTAGE,
- .indexed = 1,
- .channel = 0,
- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
- .address = ADIS16060_AIN1,
- }, {
- .type = IIO_VOLTAGE,
- .indexed = 1,
- .channel = 1,
- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
- .address = ADIS16060_AIN2,
- }, {
- .type = IIO_TEMP,
- .indexed = 1,
- .channel = 0,
- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
- BIT(IIO_CHAN_INFO_OFFSET) | BIT(IIO_CHAN_INFO_SCALE),
- .address = ADIS16060_TEMP_OUT,
- }
-};
-
-static int adis16060_r_probe(struct spi_device *spi)
-{
- int ret;
- struct adis16060_state *st;
- struct iio_dev *indio_dev;
-
- /* setup the industrialio driver allocated elements */
- indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
- if (!indio_dev)
- return -ENOMEM;
- /* this is only used for removal purposes */
- spi_set_drvdata(spi, indio_dev);
- st = iio_priv(indio_dev);
- st->us_r = spi;
- mutex_init(&st->buf_lock);
-
- indio_dev->name = spi->dev.driver->name;
- indio_dev->dev.parent = &spi->dev;
- indio_dev->info = &adis16060_info;
- indio_dev->modes = INDIO_DIRECT_MODE;
- indio_dev->channels = adis16060_channels;
- indio_dev->num_channels = ARRAY_SIZE(adis16060_channels);
-
- ret = devm_iio_device_register(&spi->dev, indio_dev);
- if (ret)
- return ret;
-
- adis16060_iio_dev = indio_dev;
- return 0;
-}
-
-static int adis16060_w_probe(struct spi_device *spi)
-{
- int ret;
- struct iio_dev *indio_dev = adis16060_iio_dev;
- struct adis16060_state *st;
-
- if (!indio_dev) {
- ret = -ENODEV;
- goto error_ret;
- }
- st = iio_priv(indio_dev);
- spi_set_drvdata(spi, indio_dev);
- st->us_w = spi;
- return 0;
-
-error_ret:
- return ret;
-}
-
-static int adis16060_w_remove(struct spi_device *spi)
-{
- return 0;
-}
-
-static struct spi_driver adis16060_r_driver = {
- .driver = {
- .name = "adis16060_r",
- .owner = THIS_MODULE,
- },
- .probe = adis16060_r_probe,
-};
-
-static struct spi_driver adis16060_w_driver = {
- .driver = {
- .name = "adis16060_w",
- .owner = THIS_MODULE,
- },
- .probe = adis16060_w_probe,
- .remove = adis16060_w_remove,
-};
-
-static __init int adis16060_init(void)
-{
- int ret;
-
- ret = spi_register_driver(&adis16060_r_driver);
- if (ret < 0)
- return ret;
-
- ret = spi_register_driver(&adis16060_w_driver);
- if (ret < 0) {
- spi_unregister_driver(&adis16060_r_driver);
- return ret;
- }
-
- return 0;
-}
-module_init(adis16060_init);
-
-static __exit void adis16060_exit(void)
-{
- spi_unregister_driver(&adis16060_w_driver);
- spi_unregister_driver(&adis16060_r_driver);
-}
-module_exit(adis16060_exit);
-
-MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>");
-MODULE_DESCRIPTION("Analog Devices ADIS16060 Yaw Rate Gyroscope Driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/iio/iio_dummy_evgen.c b/drivers/staging/iio/iio_dummy_evgen.c
deleted file mode 100644
index 6d38854c38c8..000000000000
--- a/drivers/staging/iio/iio_dummy_evgen.c
+++ /dev/null
@@ -1,238 +0,0 @@
-/**
- * Copyright (c) 2011 Jonathan Cameron
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * Companion module to the iio simple dummy example driver.
- * The purpose of this is to generate 'fake' event interrupts thus
- * allowing that driver's code to be as close as possible to that of
- * a normal driver talking to hardware. The approach used here
- * is not intended to be general and just happens to work for this
- * particular use case.
- */
-
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/interrupt.h>
-#include <linux/irq.h>
-#include <linux/mutex.h>
-#include <linux/module.h>
-#include <linux/sysfs.h>
-
-#include "iio_dummy_evgen.h"
-#include <linux/iio/iio.h>
-#include <linux/iio/sysfs.h>
-
-/* Fiddly bit of faking and irq without hardware */
-#define IIO_EVENTGEN_NO 10
-/**
- * struct iio_dummy_evgen - evgen state
- * @chip: irq chip we are faking
- * @base: base of irq range
- * @enabled: mask of which irqs are enabled
- * @inuse: mask of which irqs are connected
- * @regs: irq regs we are faking
- * @lock: protect the evgen state
- */
-struct iio_dummy_eventgen {
- struct irq_chip chip;
- int base;
- bool enabled[IIO_EVENTGEN_NO];
- bool inuse[IIO_EVENTGEN_NO];
- struct iio_dummy_regs regs[IIO_EVENTGEN_NO];
- struct mutex lock;
-};
-
-/* We can only ever have one instance of this 'device' */
-static struct iio_dummy_eventgen *iio_evgen;
-static const char *iio_evgen_name = "iio_dummy_evgen";
-
-static void iio_dummy_event_irqmask(struct irq_data *d)
-{
- struct irq_chip *chip = irq_data_get_irq_chip(d);
- struct iio_dummy_eventgen *evgen =
- container_of(chip, struct iio_dummy_eventgen, chip);
-
- evgen->enabled[d->irq - evgen->base] = false;
-}
-
-static void iio_dummy_event_irqunmask(struct irq_data *d)
-{
- struct irq_chip *chip = irq_data_get_irq_chip(d);
- struct iio_dummy_eventgen *evgen =
- container_of(chip, struct iio_dummy_eventgen, chip);
-
- evgen->enabled[d->irq - evgen->base] = true;
-}
-
-static int iio_dummy_evgen_create(void)
-{
- int ret, i;
-
- iio_evgen = kzalloc(sizeof(*iio_evgen), GFP_KERNEL);
- if (!iio_evgen)
- return -ENOMEM;
-
- iio_evgen->base = irq_alloc_descs(-1, 0, IIO_EVENTGEN_NO, 0);
- if (iio_evgen->base < 0) {
- ret = iio_evgen->base;
- kfree(iio_evgen);
- return ret;
- }
- iio_evgen->chip.name = iio_evgen_name;
- iio_evgen->chip.irq_mask = &iio_dummy_event_irqmask;
- iio_evgen->chip.irq_unmask = &iio_dummy_event_irqunmask;
- for (i = 0; i < IIO_EVENTGEN_NO; i++) {
- irq_set_chip(iio_evgen->base + i, &iio_evgen->chip);
- irq_set_handler(iio_evgen->base + i, &handle_simple_irq);
- irq_modify_status(iio_evgen->base + i,
- IRQ_NOREQUEST | IRQ_NOAUTOEN,
- IRQ_NOPROBE);
- }
- mutex_init(&iio_evgen->lock);
- return 0;
-}
-
-/**
- * iio_dummy_evgen_get_irq() - get an evgen provided irq for a device
- *
- * This function will give a free allocated irq to a client device.
- * That irq can then be caused to 'fire' by using the associated sysfs file.
- */
-int iio_dummy_evgen_get_irq(void)
-{
- int i, ret = 0;
-
- if (!iio_evgen)
- return -ENODEV;
-
- mutex_lock(&iio_evgen->lock);
- for (i = 0; i < IIO_EVENTGEN_NO; i++)
- if (!iio_evgen->inuse[i]) {
- ret = iio_evgen->base + i;
- iio_evgen->inuse[i] = true;
- break;
- }
- mutex_unlock(&iio_evgen->lock);
- if (i == IIO_EVENTGEN_NO)
- return -ENOMEM;
- return ret;
-}
-EXPORT_SYMBOL_GPL(iio_dummy_evgen_get_irq);
-
-/**
- * iio_dummy_evgen_release_irq() - give the irq back.
- * @irq: irq being returned to the pool
- *
- * Used by client driver instances to give the irqs back when they disconnect
- */
-void iio_dummy_evgen_release_irq(int irq)
-{
- mutex_lock(&iio_evgen->lock);
- iio_evgen->inuse[irq - iio_evgen->base] = false;
- mutex_unlock(&iio_evgen->lock);
-}
-EXPORT_SYMBOL_GPL(iio_dummy_evgen_release_irq);
-
-struct iio_dummy_regs *iio_dummy_evgen_get_regs(int irq)
-{
- return &iio_evgen->regs[irq - iio_evgen->base];
-}
-EXPORT_SYMBOL_GPL(iio_dummy_evgen_get_regs);
-
-static void iio_dummy_evgen_free(void)
-{
- irq_free_descs(iio_evgen->base, IIO_EVENTGEN_NO);
- kfree(iio_evgen);
-}
-
-static void iio_evgen_release(struct device *dev)
-{
- iio_dummy_evgen_free();
-}
-
-static ssize_t iio_evgen_poke(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t len)
-{
- struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
- unsigned long event;
- int ret;
-
- ret = kstrtoul(buf, 10, &event);
- if (ret)
- return ret;
-
- iio_evgen->regs[this_attr->address].reg_id = this_attr->address;
- iio_evgen->regs[this_attr->address].reg_data = event;
-
- if (iio_evgen->enabled[this_attr->address])
- handle_nested_irq(iio_evgen->base + this_attr->address);
-
- return len;
-}
-
-static IIO_DEVICE_ATTR(poke_ev0, S_IWUSR, NULL, &iio_evgen_poke, 0);
-static IIO_DEVICE_ATTR(poke_ev1, S_IWUSR, NULL, &iio_evgen_poke, 1);
-static IIO_DEVICE_ATTR(poke_ev2, S_IWUSR, NULL, &iio_evgen_poke, 2);
-static IIO_DEVICE_ATTR(poke_ev3, S_IWUSR, NULL, &iio_evgen_poke, 3);
-static IIO_DEVICE_ATTR(poke_ev4, S_IWUSR, NULL, &iio_evgen_poke, 4);
-static IIO_DEVICE_ATTR(poke_ev5, S_IWUSR, NULL, &iio_evgen_poke, 5);
-static IIO_DEVICE_ATTR(poke_ev6, S_IWUSR, NULL, &iio_evgen_poke, 6);
-static IIO_DEVICE_ATTR(poke_ev7, S_IWUSR, NULL, &iio_evgen_poke, 7);
-static IIO_DEVICE_ATTR(poke_ev8, S_IWUSR, NULL, &iio_evgen_poke, 8);
-static IIO_DEVICE_ATTR(poke_ev9, S_IWUSR, NULL, &iio_evgen_poke, 9);
-
-static struct attribute *iio_evgen_attrs[] = {
- &iio_dev_attr_poke_ev0.dev_attr.attr,
- &iio_dev_attr_poke_ev1.dev_attr.attr,
- &iio_dev_attr_poke_ev2.dev_attr.attr,
- &iio_dev_attr_poke_ev3.dev_attr.attr,
- &iio_dev_attr_poke_ev4.dev_attr.attr,
- &iio_dev_attr_poke_ev5.dev_attr.attr,
- &iio_dev_attr_poke_ev6.dev_attr.attr,
- &iio_dev_attr_poke_ev7.dev_attr.attr,
- &iio_dev_attr_poke_ev8.dev_attr.attr,
- &iio_dev_attr_poke_ev9.dev_attr.attr,
- NULL,
-};
-
-static const struct attribute_group iio_evgen_group = {
- .attrs = iio_evgen_attrs,
-};
-
-static const struct attribute_group *iio_evgen_groups[] = {
- &iio_evgen_group,
- NULL
-};
-
-static struct device iio_evgen_dev = {
- .bus = &iio_bus_type,
- .groups = iio_evgen_groups,
- .release = &iio_evgen_release,
-};
-
-static __init int iio_dummy_evgen_init(void)
-{
- int ret = iio_dummy_evgen_create();
-
- if (ret < 0)
- return ret;
- device_initialize(&iio_evgen_dev);
- dev_set_name(&iio_evgen_dev, "iio_evgen");
- return device_add(&iio_evgen_dev);
-}
-module_init(iio_dummy_evgen_init);
-
-static __exit void iio_dummy_evgen_exit(void)
-{
- device_unregister(&iio_evgen_dev);
-}
-module_exit(iio_dummy_evgen_exit);
-
-MODULE_AUTHOR("Jonathan Cameron <jic23@kernel.org>");
-MODULE_DESCRIPTION("IIO dummy driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/iio/iio_dummy_evgen.h b/drivers/staging/iio/iio_dummy_evgen.h
deleted file mode 100644
index d044b946e74a..000000000000
--- a/drivers/staging/iio/iio_dummy_evgen.h
+++ /dev/null
@@ -1,13 +0,0 @@
-#ifndef _IIO_DUMMY_EVGEN_H_
-#define _IIO_DUMMY_EVGEN_H_
-
-struct iio_dummy_regs {
- u32 reg_id;
- u32 reg_data;
-};
-
-struct iio_dummy_regs *iio_dummy_evgen_get_regs(int irq);
-int iio_dummy_evgen_get_irq(void);
-void iio_dummy_evgen_release_irq(int irq);
-
-#endif /* _IIO_DUMMY_EVGEN_H_ */
diff --git a/drivers/staging/iio/iio_simple_dummy.c b/drivers/staging/iio/iio_simple_dummy.c
deleted file mode 100644
index 381f90ff468a..000000000000
--- a/drivers/staging/iio/iio_simple_dummy.c
+++ /dev/null
@@ -1,748 +0,0 @@
-/**
- * Copyright (c) 2011 Jonathan Cameron
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * A reference industrial I/O driver to illustrate the functionality available.
- *
- * There are numerous real drivers to illustrate the finer points.
- * The purpose of this driver is to provide a driver with far more comments
- * and explanatory notes than any 'real' driver would have.
- * Anyone starting out writing an IIO driver should first make sure they
- * understand all of this driver except those bits specifically marked
- * as being present to allow us to 'fake' the presence of hardware.
- */
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/module.h>
-
-#include <linux/iio/iio.h>
-#include <linux/iio/sysfs.h>
-#include <linux/iio/events.h>
-#include <linux/iio/buffer.h>
-#include "iio_simple_dummy.h"
-
-/*
- * A few elements needed to fake a bus for this driver
- * Note instances parameter controls how many of these
- * dummy devices are registered.
- */
-static unsigned instances = 1;
-module_param(instances, uint, 0);
-
-/* Pointer array used to fake bus elements */
-static struct iio_dev **iio_dummy_devs;
-
-/* Fake a name for the part number, usually obtained from the id table */
-static const char *iio_dummy_part_number = "iio_dummy_part_no";
-
-/**
- * struct iio_dummy_accel_calibscale - realworld to register mapping
- * @val: first value in read_raw - here integer part.
- * @val2: second value in read_raw etc - here micro part.
- * @regval: register value - magic device specific numbers.
- */
-struct iio_dummy_accel_calibscale {
- int val;
- int val2;
- int regval; /* what would be written to hardware */
-};
-
-static const struct iio_dummy_accel_calibscale dummy_scales[] = {
- { 0, 100, 0x8 }, /* 0.000100 */
- { 0, 133, 0x7 }, /* 0.000133 */
- { 733, 13, 0x9 }, /* 733.000013 */
-};
-
-#ifdef CONFIG_IIO_SIMPLE_DUMMY_EVENTS
-
-/*
- * simple event - triggered when value rises above
- * a threshold
- */
-static const struct iio_event_spec iio_dummy_event = {
- .type = IIO_EV_TYPE_THRESH,
- .dir = IIO_EV_DIR_RISING,
- .mask_separate = BIT(IIO_EV_INFO_VALUE) | BIT(IIO_EV_INFO_ENABLE),
-};
-
-/*
- * simple step detect event - triggered when a step is detected
- */
-static const struct iio_event_spec step_detect_event = {
- .type = IIO_EV_TYPE_CHANGE,
- .dir = IIO_EV_DIR_NONE,
- .mask_separate = BIT(IIO_EV_INFO_ENABLE),
-};
-
-/*
- * simple transition event - triggered when the reported running confidence
- * value rises above a threshold value
- */
-static const struct iio_event_spec iio_running_event = {
- .type = IIO_EV_TYPE_THRESH,
- .dir = IIO_EV_DIR_RISING,
- .mask_separate = BIT(IIO_EV_INFO_VALUE) | BIT(IIO_EV_INFO_ENABLE),
-};
-
-/*
- * simple transition event - triggered when the reported walking confidence
- * value falls under a threshold value
- */
-static const struct iio_event_spec iio_walking_event = {
- .type = IIO_EV_TYPE_THRESH,
- .dir = IIO_EV_DIR_FALLING,
- .mask_separate = BIT(IIO_EV_INFO_VALUE) | BIT(IIO_EV_INFO_ENABLE),
-};
-#endif
-
-/*
- * iio_dummy_channels - Description of available channels
- *
- * This array of structures tells the IIO core about what the device
- * actually provides for a given channel.
- */
-static const struct iio_chan_spec iio_dummy_channels[] = {
- /* indexed ADC channel in_voltage0_raw etc */
- {
- .type = IIO_VOLTAGE,
- /* Channel has a numeric index of 0 */
- .indexed = 1,
- .channel = 0,
- /* What other information is available? */
- .info_mask_separate =
- /*
- * in_voltage0_raw
- * Raw (unscaled no bias removal etc) measurement
- * from the device.
- */
- BIT(IIO_CHAN_INFO_RAW) |
- /*
- * in_voltage0_offset
- * Offset for userspace to apply prior to scale
- * when converting to standard units (microvolts)
- */
- BIT(IIO_CHAN_INFO_OFFSET) |
- /*
- * in_voltage0_scale
- * Multipler for userspace to apply post offset
- * when converting to standard units (microvolts)
- */
- BIT(IIO_CHAN_INFO_SCALE),
- /*
- * sampling_frequency
- * The frequency in Hz at which the channels are sampled
- */
- .info_mask_shared_by_dir = BIT(IIO_CHAN_INFO_SAMP_FREQ),
- /* The ordering of elements in the buffer via an enum */
- .scan_index = voltage0,
- .scan_type = { /* Description of storage in buffer */
- .sign = 'u', /* unsigned */
- .realbits = 13, /* 13 bits */
- .storagebits = 16, /* 16 bits used for storage */
- .shift = 0, /* zero shift */
- },
-#ifdef CONFIG_IIO_SIMPLE_DUMMY_EVENTS
- .event_spec = &iio_dummy_event,
- .num_event_specs = 1,
-#endif /* CONFIG_IIO_SIMPLE_DUMMY_EVENTS */
- },
- /* Differential ADC channel in_voltage1-voltage2_raw etc*/
- {
- .type = IIO_VOLTAGE,
- .differential = 1,
- /*
- * Indexing for differential channels uses channel
- * for the positive part, channel2 for the negative.
- */
- .indexed = 1,
- .channel = 1,
- .channel2 = 2,
- /*
- * in_voltage1-voltage2_raw
- * Raw (unscaled no bias removal etc) measurement
- * from the device.
- */
- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
- /*
- * in_voltage-voltage_scale
- * Shared version of scale - shared by differential
- * input channels of type IIO_VOLTAGE.
- */
- .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
- /*
- * sampling_frequency
- * The frequency in Hz at which the channels are sampled
- */
- .scan_index = diffvoltage1m2,
- .scan_type = { /* Description of storage in buffer */
- .sign = 's', /* signed */
- .realbits = 12, /* 12 bits */
- .storagebits = 16, /* 16 bits used for storage */
- .shift = 0, /* zero shift */
- },
- },
- /* Differential ADC channel in_voltage3-voltage4_raw etc*/
- {
- .type = IIO_VOLTAGE,
- .differential = 1,
- .indexed = 1,
- .channel = 3,
- .channel2 = 4,
- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
- .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
- .info_mask_shared_by_dir = BIT(IIO_CHAN_INFO_SAMP_FREQ),
- .scan_index = diffvoltage3m4,
- .scan_type = {
- .sign = 's',
- .realbits = 11,
- .storagebits = 16,
- .shift = 0,
- },
- },
- /*
- * 'modified' (i.e. axis specified) acceleration channel
- * in_accel_z_raw
- */
- {
- .type = IIO_ACCEL,
- .modified = 1,
- /* Channel 2 is use for modifiers */
- .channel2 = IIO_MOD_X,
- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
- /*
- * Internal bias and gain correction values. Applied
- * by the hardware or driver prior to userspace
- * seeing the readings. Typically part of hardware
- * calibration.
- */
- BIT(IIO_CHAN_INFO_CALIBSCALE) |
- BIT(IIO_CHAN_INFO_CALIBBIAS),
- .info_mask_shared_by_dir = BIT(IIO_CHAN_INFO_SAMP_FREQ),
- .scan_index = accelx,
- .scan_type = { /* Description of storage in buffer */
- .sign = 's', /* signed */
- .realbits = 16, /* 16 bits */
- .storagebits = 16, /* 16 bits used for storage */
- .shift = 0, /* zero shift */
- },
- },
- /*
- * Convenience macro for timestamps. 4 is the index in
- * the buffer.
- */
- IIO_CHAN_SOFT_TIMESTAMP(4),
- /* DAC channel out_voltage0_raw */
- {
- .type = IIO_VOLTAGE,
- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
- .scan_index = -1, /* No buffer support */
- .output = 1,
- .indexed = 1,
- .channel = 0,
- },
- {
- .type = IIO_STEPS,
- .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_ENABLE) |
- BIT(IIO_CHAN_INFO_CALIBHEIGHT),
- .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED),
- .scan_index = -1, /* No buffer support */
-#ifdef CONFIG_IIO_SIMPLE_DUMMY_EVENTS
- .event_spec = &step_detect_event,
- .num_event_specs = 1,
-#endif /* CONFIG_IIO_SIMPLE_DUMMY_EVENTS */
- },
- {
- .type = IIO_ACTIVITY,
- .modified = 1,
- .channel2 = IIO_MOD_RUNNING,
- .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED),
- .scan_index = -1, /* No buffer support */
-#ifdef CONFIG_IIO_SIMPLE_DUMMY_EVENTS
- .event_spec = &iio_running_event,
- .num_event_specs = 1,
-#endif /* CONFIG_IIO_SIMPLE_DUMMY_EVENTS */
- },
- {
- .type = IIO_ACTIVITY,
- .modified = 1,
- .channel2 = IIO_MOD_WALKING,
- .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED),
- .scan_index = -1, /* No buffer support */
-#ifdef CONFIG_IIO_SIMPLE_DUMMY_EVENTS
- .event_spec = &iio_walking_event,
- .num_event_specs = 1,
-#endif /* CONFIG_IIO_SIMPLE_DUMMY_EVENTS */
- },
-};
-
-/**
- * iio_dummy_read_raw() - data read function.
- * @indio_dev: the struct iio_dev associated with this device instance
- * @chan: the channel whose data is to be read
- * @val: first element of returned value (typically INT)
- * @val2: second element of returned value (typically MICRO)
- * @mask: what we actually want to read as per the info_mask_*
- * in iio_chan_spec.
- */
-static int iio_dummy_read_raw(struct iio_dev *indio_dev,
- struct iio_chan_spec const *chan,
- int *val,
- int *val2,
- long mask)
-{
- struct iio_dummy_state *st = iio_priv(indio_dev);
- int ret = -EINVAL;
-
- mutex_lock(&st->lock);
- switch (mask) {
- case IIO_CHAN_INFO_RAW: /* magic value - channel value read */
- switch (chan->type) {
- case IIO_VOLTAGE:
- if (chan->output) {
- /* Set integer part to cached value */
- *val = st->dac_val;
- ret = IIO_VAL_INT;
- } else if (chan->differential) {
- if (chan->channel == 1)
- *val = st->differential_adc_val[0];
- else
- *val = st->differential_adc_val[1];
- ret = IIO_VAL_INT;
- } else {
- *val = st->single_ended_adc_val;
- ret = IIO_VAL_INT;
- }
- break;
- case IIO_ACCEL:
- *val = st->accel_val;
- ret = IIO_VAL_INT;
- break;
- default:
- break;
- }
- break;
- case IIO_CHAN_INFO_PROCESSED:
- switch (chan->type) {
- case IIO_STEPS:
- *val = st->steps;
- ret = IIO_VAL_INT;
- break;
- case IIO_ACTIVITY:
- switch (chan->channel2) {
- case IIO_MOD_RUNNING:
- *val = st->activity_running;
- ret = IIO_VAL_INT;
- break;
- case IIO_MOD_WALKING:
- *val = st->activity_walking;
- ret = IIO_VAL_INT;
- break;
- default:
- break;
- }
- break;
- default:
- break;
- }
- break;
- case IIO_CHAN_INFO_OFFSET:
- /* only single ended adc -> 7 */
- *val = 7;
- ret = IIO_VAL_INT;
- break;
- case IIO_CHAN_INFO_SCALE:
- switch (chan->type) {
- case IIO_VOLTAGE:
- switch (chan->differential) {
- case 0:
- /* only single ended adc -> 0.001333 */
- *val = 0;
- *val2 = 1333;
- ret = IIO_VAL_INT_PLUS_MICRO;
- break;
- case 1:
- /* all differential adc channels ->
- * 0.000001344 */
- *val = 0;
- *val2 = 1344;
- ret = IIO_VAL_INT_PLUS_NANO;
- }
- break;
- default:
- break;
- }
- break;
- case IIO_CHAN_INFO_CALIBBIAS:
- /* only the acceleration axis - read from cache */
- *val = st->accel_calibbias;
- ret = IIO_VAL_INT;
- break;
- case IIO_CHAN_INFO_CALIBSCALE:
- *val = st->accel_calibscale->val;
- *val2 = st->accel_calibscale->val2;
- ret = IIO_VAL_INT_PLUS_MICRO;
- break;
- case IIO_CHAN_INFO_SAMP_FREQ:
- *val = 3;
- *val2 = 33;
- ret = IIO_VAL_INT_PLUS_NANO;
- break;
- case IIO_CHAN_INFO_ENABLE:
- switch (chan->type) {
- case IIO_STEPS:
- *val = st->steps_enabled;
- ret = IIO_VAL_INT;
- break;
- default:
- break;
- }
- break;
- case IIO_CHAN_INFO_CALIBHEIGHT:
- switch (chan->type) {
- case IIO_STEPS:
- *val = st->height;
- ret = IIO_VAL_INT;
- break;
- default:
- break;
- }
- break;
-
- default:
- break;
- }
- mutex_unlock(&st->lock);
- return ret;
-}
-
-/**
- * iio_dummy_write_raw() - data write function.
- * @indio_dev: the struct iio_dev associated with this device instance
- * @chan: the channel whose data is to be written
- * @val: first element of value to set (typically INT)
- * @val2: second element of value to set (typically MICRO)
- * @mask: what we actually want to write as per the info_mask_*
- * in iio_chan_spec.
- *
- * Note that all raw writes are assumed IIO_VAL_INT and info mask elements
- * are assumed to be IIO_INT_PLUS_MICRO unless the callback write_raw_get_fmt
- * in struct iio_info is provided by the driver.
- */
-static int iio_dummy_write_raw(struct iio_dev *indio_dev,
- struct iio_chan_spec const *chan,
- int val,
- int val2,
- long mask)
-{
- int i;
- int ret = 0;
- struct iio_dummy_state *st = iio_priv(indio_dev);
-
- switch (mask) {
- case IIO_CHAN_INFO_RAW:
- switch (chan->type) {
- case IIO_VOLTAGE:
- if (chan->output == 0)
- return -EINVAL;
-
- /* Locking not required as writing single value */
- mutex_lock(&st->lock);
- st->dac_val = val;
- mutex_unlock(&st->lock);
- return 0;
- default:
- return -EINVAL;
- }
- case IIO_CHAN_INFO_PROCESSED:
- switch (chan->type) {
- case IIO_STEPS:
- mutex_lock(&st->lock);
- st->steps = val;
- mutex_unlock(&st->lock);
- return 0;
- case IIO_ACTIVITY:
- if (val < 0)
- val = 0;
- if (val > 100)
- val = 100;
- switch (chan->channel2) {
- case IIO_MOD_RUNNING:
- st->activity_running = val;
- return 0;
- case IIO_MOD_WALKING:
- st->activity_walking = val;
- return 0;
- default:
- return -EINVAL;
- }
- break;
- default:
- return -EINVAL;
- }
- case IIO_CHAN_INFO_CALIBSCALE:
- mutex_lock(&st->lock);
- /* Compare against table - hard matching here */
- for (i = 0; i < ARRAY_SIZE(dummy_scales); i++)
- if (val == dummy_scales[i].val &&
- val2 == dummy_scales[i].val2)
- break;
- if (i == ARRAY_SIZE(dummy_scales))
- ret = -EINVAL;
- else
- st->accel_calibscale = &dummy_scales[i];
- mutex_unlock(&st->lock);
- return ret;
- case IIO_CHAN_INFO_CALIBBIAS:
- mutex_lock(&st->lock);
- st->accel_calibbias = val;
- mutex_unlock(&st->lock);
- return 0;
- case IIO_CHAN_INFO_ENABLE:
- switch (chan->type) {
- case IIO_STEPS:
- mutex_lock(&st->lock);
- st->steps_enabled = val;
- mutex_unlock(&st->lock);
- return 0;
- default:
- return -EINVAL;
- }
- case IIO_CHAN_INFO_CALIBHEIGHT:
- switch (chan->type) {
- case IIO_STEPS:
- st->height = val;
- return 0;
- default:
- return -EINVAL;
- }
-
- default:
- return -EINVAL;
- }
-}
-
-/*
- * Device type specific information.
- */
-static const struct iio_info iio_dummy_info = {
- .driver_module = THIS_MODULE,
- .read_raw = &iio_dummy_read_raw,
- .write_raw = &iio_dummy_write_raw,
-#ifdef CONFIG_IIO_SIMPLE_DUMMY_EVENTS
- .read_event_config = &iio_simple_dummy_read_event_config,
- .write_event_config = &iio_simple_dummy_write_event_config,
- .read_event_value = &iio_simple_dummy_read_event_value,
- .write_event_value = &iio_simple_dummy_write_event_value,
-#endif /* CONFIG_IIO_SIMPLE_DUMMY_EVENTS */
-};
-
-/**
- * iio_dummy_init_device() - device instance specific init
- * @indio_dev: the iio device structure
- *
- * Most drivers have one of these to set up default values,
- * reset the device to known state etc.
- */
-static int iio_dummy_init_device(struct iio_dev *indio_dev)
-{
- struct iio_dummy_state *st = iio_priv(indio_dev);
-
- st->dac_val = 0;
- st->single_ended_adc_val = 73;
- st->differential_adc_val[0] = 33;
- st->differential_adc_val[1] = -34;
- st->accel_val = 34;
- st->accel_calibbias = -7;
- st->accel_calibscale = &dummy_scales[0];
- st->steps = 47;
- st->activity_running = 98;
- st->activity_walking = 4;
-
- return 0;
-}
-
-/**
- * iio_dummy_probe() - device instance probe
- * @index: an id number for this instance.
- *
- * Arguments are bus type specific.
- * I2C: iio_dummy_probe(struct i2c_client *client,
- * const struct i2c_device_id *id)
- * SPI: iio_dummy_probe(struct spi_device *spi)
- */
-static int iio_dummy_probe(int index)
-{
- int ret;
- struct iio_dev *indio_dev;
- struct iio_dummy_state *st;
-
- /*
- * Allocate an IIO device.
- *
- * This structure contains all generic state
- * information about the device instance.
- * It also has a region (accessed by iio_priv()
- * for chip specific state information.
- */
- indio_dev = iio_device_alloc(sizeof(*st));
- if (!indio_dev) {
- ret = -ENOMEM;
- goto error_ret;
- }
-
- st = iio_priv(indio_dev);
- mutex_init(&st->lock);
-
- iio_dummy_init_device(indio_dev);
- /*
- * With hardware: Set the parent device.
- * indio_dev->dev.parent = &spi->dev;
- * indio_dev->dev.parent = &client->dev;
- */
-
- /*
- * Make the iio_dev struct available to remove function.
- * Bus equivalents
- * i2c_set_clientdata(client, indio_dev);
- * spi_set_drvdata(spi, indio_dev);
- */
- iio_dummy_devs[index] = indio_dev;
-
- /*
- * Set the device name.
- *
- * This is typically a part number and obtained from the module
- * id table.
- * e.g. for i2c and spi:
- * indio_dev->name = id->name;
- * indio_dev->name = spi_get_device_id(spi)->name;
- */
- indio_dev->name = iio_dummy_part_number;
-
- /* Provide description of available channels */
- indio_dev->channels = iio_dummy_channels;
- indio_dev->num_channels = ARRAY_SIZE(iio_dummy_channels);
-
- /*
- * Provide device type specific interface functions and
- * constant data.
- */
- indio_dev->info = &iio_dummy_info;
-
- /* Specify that device provides sysfs type interfaces */
- indio_dev->modes = INDIO_DIRECT_MODE;
-
- ret = iio_simple_dummy_events_register(indio_dev);
- if (ret < 0)
- goto error_free_device;
-
- ret = iio_simple_dummy_configure_buffer(indio_dev);
- if (ret < 0)
- goto error_unregister_events;
-
- ret = iio_device_register(indio_dev);
- if (ret < 0)
- goto error_unconfigure_buffer;
-
- return 0;
-error_unconfigure_buffer:
- iio_simple_dummy_unconfigure_buffer(indio_dev);
-error_unregister_events:
- iio_simple_dummy_events_unregister(indio_dev);
-error_free_device:
- iio_device_free(indio_dev);
-error_ret:
- return ret;
-}
-
-/**
- * iio_dummy_remove() - device instance removal function
- * @index: device index.
- *
- * Parameters follow those of iio_dummy_probe for buses.
- */
-static void iio_dummy_remove(int index)
-{
- /*
- * Get a pointer to the device instance iio_dev structure
- * from the bus subsystem. E.g.
- * struct iio_dev *indio_dev = i2c_get_clientdata(client);
- * struct iio_dev *indio_dev = spi_get_drvdata(spi);
- */
- struct iio_dev *indio_dev = iio_dummy_devs[index];
-
- /* Unregister the device */
- iio_device_unregister(indio_dev);
-
- /* Device specific code to power down etc */
-
- /* Buffered capture related cleanup */
- iio_simple_dummy_unconfigure_buffer(indio_dev);
-
- iio_simple_dummy_events_unregister(indio_dev);
-
- /* Free all structures */
- iio_device_free(indio_dev);
-}
-
-/**
- * iio_dummy_init() - device driver registration
- *
- * Varies depending on bus type of the device. As there is no device
- * here, call probe directly. For information on device registration
- * i2c:
- * Documentation/i2c/writing-clients
- * spi:
- * Documentation/spi/spi-summary
- */
-static __init int iio_dummy_init(void)
-{
- int i, ret;
-
- if (instances > 10) {
- instances = 1;
- return -EINVAL;
- }
-
- /* Fake a bus */
- iio_dummy_devs = kcalloc(instances, sizeof(*iio_dummy_devs),
- GFP_KERNEL);
- /* Here we have no actual device so call probe */
- for (i = 0; i < instances; i++) {
- ret = iio_dummy_probe(i);
- if (ret < 0)
- goto error_remove_devs;
- }
- return 0;
-
-error_remove_devs:
- while (i--)
- iio_dummy_remove(i);
-
- kfree(iio_dummy_devs);
- return ret;
-}
-module_init(iio_dummy_init);
-
-/**
- * iio_dummy_exit() - device driver removal
- *
- * Varies depending on bus type of the device.
- * As there is no device here, call remove directly.
- */
-static __exit void iio_dummy_exit(void)
-{
- int i;
-
- for (i = 0; i < instances; i++)
- iio_dummy_remove(i);
- kfree(iio_dummy_devs);
-}
-module_exit(iio_dummy_exit);
-
-MODULE_AUTHOR("Jonathan Cameron <jic23@kernel.org>");
-MODULE_DESCRIPTION("IIO dummy driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/iio/iio_simple_dummy.h b/drivers/staging/iio/iio_simple_dummy.h
deleted file mode 100644
index 8d00224e6fad..000000000000
--- a/drivers/staging/iio/iio_simple_dummy.h
+++ /dev/null
@@ -1,128 +0,0 @@
-/**
- * Copyright (c) 2011 Jonathan Cameron
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * Join together the various functionality of iio_simple_dummy driver
- */
-
-#ifndef _IIO_SIMPLE_DUMMY_H_
-#define _IIO_SIMPLE_DUMMY_H_
-#include <linux/kernel.h>
-
-struct iio_dummy_accel_calibscale;
-struct iio_dummy_regs;
-
-/**
- * struct iio_dummy_state - device instance specific state.
- * @dac_val: cache for dac value
- * @single_ended_adc_val: cache for single ended adc value
- * @differential_adc_val: cache for differential adc value
- * @accel_val: cache for acceleration value
- * @accel_calibbias: cache for acceleration calibbias
- * @accel_calibscale: cache for acceleration calibscale
- * @lock: lock to ensure state is consistent
- * @event_irq: irq number for event line (faked)
- * @event_val: cache for event threshold value
- * @event_en: cache of whether event is enabled
- */
-struct iio_dummy_state {
- int dac_val;
- int single_ended_adc_val;
- int differential_adc_val[2];
- int accel_val;
- int accel_calibbias;
- int activity_running;
- int activity_walking;
- const struct iio_dummy_accel_calibscale *accel_calibscale;
- struct mutex lock;
- struct iio_dummy_regs *regs;
- int steps_enabled;
- int steps;
- int height;
-#ifdef CONFIG_IIO_SIMPLE_DUMMY_EVENTS
- int event_irq;
- int event_val;
- bool event_en;
-#endif /* CONFIG_IIO_SIMPLE_DUMMY_EVENTS */
-};
-
-#ifdef CONFIG_IIO_SIMPLE_DUMMY_EVENTS
-
-struct iio_dev;
-
-int iio_simple_dummy_read_event_config(struct iio_dev *indio_dev,
- const struct iio_chan_spec *chan,
- enum iio_event_type type,
- enum iio_event_direction dir);
-
-int iio_simple_dummy_write_event_config(struct iio_dev *indio_dev,
- const struct iio_chan_spec *chan,
- enum iio_event_type type,
- enum iio_event_direction dir,
- int state);
-
-int iio_simple_dummy_read_event_value(struct iio_dev *indio_dev,
- const struct iio_chan_spec *chan,
- enum iio_event_type type,
- enum iio_event_direction dir,
- enum iio_event_info info, int *val,
- int *val2);
-
-int iio_simple_dummy_write_event_value(struct iio_dev *indio_dev,
- const struct iio_chan_spec *chan,
- enum iio_event_type type,
- enum iio_event_direction dir,
- enum iio_event_info info, int val,
- int val2);
-
-int iio_simple_dummy_events_register(struct iio_dev *indio_dev);
-void iio_simple_dummy_events_unregister(struct iio_dev *indio_dev);
-
-#else /* Stubs for when events are disabled at compile time */
-
-static inline int
-iio_simple_dummy_events_register(struct iio_dev *indio_dev)
-{
- return 0;
-};
-
-static inline void
-iio_simple_dummy_events_unregister(struct iio_dev *indio_dev)
-{ };
-
-#endif /* CONFIG_IIO_SIMPLE_DUMMY_EVENTS*/
-
-/**
- * enum iio_simple_dummy_scan_elements - scan index enum
- * @voltage0: the single ended voltage channel
- * @diffvoltage1m2: first differential channel
- * @diffvoltage3m4: second differenial channel
- * @accelx: acceleration channel
- *
- * Enum provides convenient numbering for the scan index.
- */
-enum iio_simple_dummy_scan_elements {
- voltage0,
- diffvoltage1m2,
- diffvoltage3m4,
- accelx,
-};
-
-#ifdef CONFIG_IIO_SIMPLE_DUMMY_BUFFER
-int iio_simple_dummy_configure_buffer(struct iio_dev *indio_dev);
-void iio_simple_dummy_unconfigure_buffer(struct iio_dev *indio_dev);
-#else
-static inline int iio_simple_dummy_configure_buffer(struct iio_dev *indio_dev)
-{
- return 0;
-};
-
-static inline
-void iio_simple_dummy_unconfigure_buffer(struct iio_dev *indio_dev)
-{};
-
-#endif /* CONFIG_IIO_SIMPLE_DUMMY_BUFFER */
-#endif /* _IIO_SIMPLE_DUMMY_H_ */
diff --git a/drivers/staging/iio/iio_simple_dummy_buffer.c b/drivers/staging/iio/iio_simple_dummy_buffer.c
deleted file mode 100644
index 00ed7745f3c5..000000000000
--- a/drivers/staging/iio/iio_simple_dummy_buffer.c
+++ /dev/null
@@ -1,192 +0,0 @@
-/**
- * Copyright (c) 2011 Jonathan Cameron
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * Buffer handling elements of industrial I/O reference driver.
- * Uses the kfifo buffer.
- *
- * To test without hardware use the sysfs trigger.
- */
-
-#include <linux/kernel.h>
-#include <linux/export.h>
-#include <linux/slab.h>
-#include <linux/interrupt.h>
-#include <linux/irq.h>
-#include <linux/bitmap.h>
-
-#include <linux/iio/iio.h>
-#include <linux/iio/trigger_consumer.h>
-#include <linux/iio/kfifo_buf.h>
-
-#include "iio_simple_dummy.h"
-
-/* Some fake data */
-
-static const s16 fakedata[] = {
- [voltage0] = 7,
- [diffvoltage1m2] = -33,
- [diffvoltage3m4] = -2,
- [accelx] = 344,
-};
-
-/**
- * iio_simple_dummy_trigger_h() - the trigger handler function
- * @irq: the interrupt number
- * @p: private data - always a pointer to the poll func.
- *
- * This is the guts of buffered capture. On a trigger event occurring,
- * if the pollfunc is attached then this handler is called as a threaded
- * interrupt (and hence may sleep). It is responsible for grabbing data
- * from the device and pushing it into the associated buffer.
- */
-static irqreturn_t iio_simple_dummy_trigger_h(int irq, void *p)
-{
- struct iio_poll_func *pf = p;
- struct iio_dev *indio_dev = pf->indio_dev;
- int len = 0;
- u16 *data;
-
- data = kmalloc(indio_dev->scan_bytes, GFP_KERNEL);
- if (!data)
- goto done;
-
- if (!bitmap_empty(indio_dev->active_scan_mask, indio_dev->masklength)) {
- /*
- * Three common options here:
- * hardware scans: certain combinations of channels make
- * up a fast read. The capture will consist of all of them.
- * Hence we just call the grab data function and fill the
- * buffer without processing.
- * software scans: can be considered to be random access
- * so efficient reading is just a case of minimal bus
- * transactions.
- * software culled hardware scans:
- * occasionally a driver may process the nearest hardware
- * scan to avoid storing elements that are not desired. This
- * is the fiddliest option by far.
- * Here let's pretend we have random access. And the values are
- * in the constant table fakedata.
- */
- int i, j;
-
- for (i = 0, j = 0;
- i < bitmap_weight(indio_dev->active_scan_mask,
- indio_dev->masklength);
- i++, j++) {
- j = find_next_bit(indio_dev->active_scan_mask,
- indio_dev->masklength, j);
- /* random access read from the 'device' */
- data[i] = fakedata[j];
- len += 2;
- }
- }
-
- iio_push_to_buffers_with_timestamp(indio_dev, data, iio_get_time_ns());
-
- kfree(data);
-
-done:
- /*
- * Tell the core we are done with this trigger and ready for the
- * next one.
- */
- iio_trigger_notify_done(indio_dev->trig);
-
- return IRQ_HANDLED;
-}
-
-static const struct iio_buffer_setup_ops iio_simple_dummy_buffer_setup_ops = {
- /*
- * iio_triggered_buffer_postenable:
- * Generic function that simply attaches the pollfunc to the trigger.
- * Replace this to mess with hardware state before we attach the
- * trigger.
- */
- .postenable = &iio_triggered_buffer_postenable,
- /*
- * iio_triggered_buffer_predisable:
- * Generic function that simple detaches the pollfunc from the trigger.
- * Replace this to put hardware state back again after the trigger is
- * detached but before userspace knows we have disabled the ring.
- */
- .predisable = &iio_triggered_buffer_predisable,
-};
-
-int iio_simple_dummy_configure_buffer(struct iio_dev *indio_dev)
-{
- int ret;
- struct iio_buffer *buffer;
-
- /* Allocate a buffer to use - here a kfifo */
- buffer = iio_kfifo_allocate();
- if (!buffer) {
- ret = -ENOMEM;
- goto error_ret;
- }
-
- iio_device_attach_buffer(indio_dev, buffer);
-
- /* Enable timestamps by default */
- buffer->scan_timestamp = true;
-
- /*
- * Tell the core what device type specific functions should
- * be run on either side of buffer capture enable / disable.
- */
- indio_dev->setup_ops = &iio_simple_dummy_buffer_setup_ops;
-
- /*
- * Configure a polling function.
- * When a trigger event with this polling function connected
- * occurs, this function is run. Typically this grabs data
- * from the device.
- *
- * NULL for the bottom half. This is normally implemented only if we
- * either want to ping a capture now pin (no sleeping) or grab
- * a timestamp as close as possible to a data ready trigger firing.
- *
- * IRQF_ONESHOT ensures irqs are masked such that only one instance
- * of the handler can run at a time.
- *
- * "iio_simple_dummy_consumer%d" formatting string for the irq 'name'
- * as seen under /proc/interrupts. Remaining parameters as per printk.
- */
- indio_dev->pollfunc = iio_alloc_pollfunc(NULL,
- &iio_simple_dummy_trigger_h,
- IRQF_ONESHOT,
- indio_dev,
- "iio_simple_dummy_consumer%d",
- indio_dev->id);
-
- if (!indio_dev->pollfunc) {
- ret = -ENOMEM;
- goto error_free_buffer;
- }
-
- /*
- * Notify the core that this device is capable of buffered capture
- * driven by a trigger.
- */
- indio_dev->modes |= INDIO_BUFFER_TRIGGERED;
-
- return 0;
-
-error_free_buffer:
- iio_kfifo_free(indio_dev->buffer);
-error_ret:
- return ret;
-}
-
-/**
- * iio_simple_dummy_unconfigure_buffer() - release buffer resources
- * @indo_dev: device instance state
- */
-void iio_simple_dummy_unconfigure_buffer(struct iio_dev *indio_dev)
-{
- iio_dealloc_pollfunc(indio_dev->pollfunc);
- iio_kfifo_free(indio_dev->buffer);
-}
diff --git a/drivers/staging/iio/iio_simple_dummy_events.c b/drivers/staging/iio/iio_simple_dummy_events.c
deleted file mode 100644
index 73108baf80ad..000000000000
--- a/drivers/staging/iio/iio_simple_dummy_events.c
+++ /dev/null
@@ -1,267 +0,0 @@
-/**
- * Copyright (c) 2011 Jonathan Cameron
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * Event handling elements of industrial I/O reference driver.
- */
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/interrupt.h>
-#include <linux/irq.h>
-
-#include <linux/iio/iio.h>
-#include <linux/iio/sysfs.h>
-#include <linux/iio/events.h>
-#include "iio_simple_dummy.h"
-
-/* Evgen 'fakes' interrupt events for this example */
-#include "iio_dummy_evgen.h"
-
-/**
- * iio_simple_dummy_read_event_config() - is event enabled?
- * @indio_dev: the device instance data
- * @chan: channel for the event whose state is being queried
- * @type: type of the event whose state is being queried
- * @dir: direction of the vent whose state is being queried
- *
- * This function would normally query the relevant registers or a cache to
- * discover if the event generation is enabled on the device.
- */
-int iio_simple_dummy_read_event_config(struct iio_dev *indio_dev,
- const struct iio_chan_spec *chan,
- enum iio_event_type type,
- enum iio_event_direction dir)
-{
- struct iio_dummy_state *st = iio_priv(indio_dev);
-
- return st->event_en;
-}
-
-/**
- * iio_simple_dummy_write_event_config() - set whether event is enabled
- * @indio_dev: the device instance data
- * @chan: channel for the event whose state is being set
- * @type: type of the event whose state is being set
- * @dir: direction of the vent whose state is being set
- * @state: whether to enable or disable the device.
- *
- * This function would normally set the relevant registers on the devices
- * so that it generates the specified event. Here it just sets up a cached
- * value.
- */
-int iio_simple_dummy_write_event_config(struct iio_dev *indio_dev,
- const struct iio_chan_spec *chan,
- enum iio_event_type type,
- enum iio_event_direction dir,
- int state)
-{
- struct iio_dummy_state *st = iio_priv(indio_dev);
-
- /*
- * Deliberately over the top code splitting to illustrate
- * how this is done when multiple events exist.
- */
- switch (chan->type) {
- case IIO_VOLTAGE:
- switch (type) {
- case IIO_EV_TYPE_THRESH:
- if (dir == IIO_EV_DIR_RISING)
- st->event_en = state;
- else
- return -EINVAL;
- default:
- return -EINVAL;
- }
- break;
- case IIO_ACTIVITY:
- switch (type) {
- case IIO_EV_TYPE_THRESH:
- st->event_en = state;
- break;
- default:
- return -EINVAL;
- }
- break;
- case IIO_STEPS:
- switch (type) {
- case IIO_EV_TYPE_CHANGE:
- st->event_en = state;
- break;
- default:
- return -EINVAL;
- }
- break;
- default:
- return -EINVAL;
- }
-
- return 0;
-}
-
-/**
- * iio_simple_dummy_read_event_value() - get value associated with event
- * @indio_dev: device instance specific data
- * @chan: channel for the event whose value is being read
- * @type: type of the event whose value is being read
- * @dir: direction of the vent whose value is being read
- * @info: info type of the event whose value is being read
- * @val: value for the event code.
- *
- * Many devices provide a large set of events of which only a subset may
- * be enabled at a time, with value registers whose meaning changes depending
- * on the event enabled. This often means that the driver must cache the values
- * associated with each possible events so that the right value is in place when
- * the enabled event is changed.
- */
-int iio_simple_dummy_read_event_value(struct iio_dev *indio_dev,
- const struct iio_chan_spec *chan,
- enum iio_event_type type,
- enum iio_event_direction dir,
- enum iio_event_info info,
- int *val, int *val2)
-{
- struct iio_dummy_state *st = iio_priv(indio_dev);
-
- *val = st->event_val;
-
- return IIO_VAL_INT;
-}
-
-/**
- * iio_simple_dummy_write_event_value() - set value associate with event
- * @indio_dev: device instance specific data
- * @chan: channel for the event whose value is being set
- * @type: type of the event whose value is being set
- * @dir: direction of the vent whose value is being set
- * @info: info type of the event whose value is being set
- * @val: the value to be set.
- */
-int iio_simple_dummy_write_event_value(struct iio_dev *indio_dev,
- const struct iio_chan_spec *chan,
- enum iio_event_type type,
- enum iio_event_direction dir,
- enum iio_event_info info,
- int val, int val2)
-{
- struct iio_dummy_state *st = iio_priv(indio_dev);
-
- st->event_val = val;
-
- return 0;
-}
-
-/**
- * iio_simple_dummy_event_handler() - identify and pass on event
- * @irq: irq of event line
- * @private: pointer to device instance state.
- *
- * This handler is responsible for querying the device to find out what
- * event occurred and for then pushing that event towards userspace.
- * Here only one event occurs so we push that directly on with locally
- * grabbed timestamp.
- */
-static irqreturn_t iio_simple_dummy_event_handler(int irq, void *private)
-{
- struct iio_dev *indio_dev = private;
- struct iio_dummy_state *st = iio_priv(indio_dev);
-
- dev_dbg(&indio_dev->dev, "id %x event %x\n",
- st->regs->reg_id, st->regs->reg_data);
-
- switch (st->regs->reg_data) {
- case 0:
- iio_push_event(indio_dev,
- IIO_EVENT_CODE(IIO_VOLTAGE, 0, 0,
- IIO_EV_DIR_RISING,
- IIO_EV_TYPE_THRESH, 0, 0, 0),
- iio_get_time_ns());
- break;
- case 1:
- if (st->activity_running > st->event_val)
- iio_push_event(indio_dev,
- IIO_EVENT_CODE(IIO_ACTIVITY, 0,
- IIO_MOD_RUNNING,
- IIO_EV_DIR_RISING,
- IIO_EV_TYPE_THRESH,
- 0, 0, 0),
- iio_get_time_ns());
- break;
- case 2:
- if (st->activity_walking < st->event_val)
- iio_push_event(indio_dev,
- IIO_EVENT_CODE(IIO_ACTIVITY, 0,
- IIO_MOD_WALKING,
- IIO_EV_DIR_FALLING,
- IIO_EV_TYPE_THRESH,
- 0, 0, 0),
- iio_get_time_ns());
- break;
- case 3:
- iio_push_event(indio_dev,
- IIO_EVENT_CODE(IIO_STEPS, 0, IIO_NO_MOD,
- IIO_EV_DIR_NONE,
- IIO_EV_TYPE_CHANGE, 0, 0, 0),
- iio_get_time_ns());
- break;
- default:
- break;
- }
-
- return IRQ_HANDLED;
-}
-
-/**
- * iio_simple_dummy_events_register() - setup interrupt handling for events
- * @indio_dev: device instance data
- *
- * This function requests the threaded interrupt to handle the events.
- * Normally the irq is a hardware interrupt and the number comes
- * from board configuration files. Here we get it from a companion
- * module that fakes the interrupt for us. Note that module in
- * no way forms part of this example. Just assume that events magically
- * appear via the provided interrupt.
- */
-int iio_simple_dummy_events_register(struct iio_dev *indio_dev)
-{
- struct iio_dummy_state *st = iio_priv(indio_dev);
- int ret;
-
- /* Fire up event source - normally not present */
- st->event_irq = iio_dummy_evgen_get_irq();
- if (st->event_irq < 0) {
- ret = st->event_irq;
- goto error_ret;
- }
- st->regs = iio_dummy_evgen_get_regs(st->event_irq);
-
- ret = request_threaded_irq(st->event_irq,
- NULL,
- &iio_simple_dummy_event_handler,
- IRQF_ONESHOT,
- "iio_simple_event",
- indio_dev);
- if (ret < 0)
- goto error_free_evgen;
- return 0;
-
-error_free_evgen:
- iio_dummy_evgen_release_irq(st->event_irq);
-error_ret:
- return ret;
-}
-
-/**
- * iio_simple_dummy_events_unregister() - tidy up interrupt handling on remove
- * @indio_dev: device instance data
- */
-void iio_simple_dummy_events_unregister(struct iio_dev *indio_dev)
-{
- struct iio_dummy_state *st = iio_priv(indio_dev);
-
- free_irq(st->event_irq, indio_dev);
- /* Not part of normal driver */
- iio_dummy_evgen_release_irq(st->event_irq);
-}
diff --git a/drivers/staging/iio/impedance-analyzer/Kconfig b/drivers/staging/iio/impedance-analyzer/Kconfig
index dd97b6bb3fd0..841648847edf 100644
--- a/drivers/staging/iio/impedance-analyzer/Kconfig
+++ b/drivers/staging/iio/impedance-analyzer/Kconfig
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Impedance Converter, Network Analyzer drivers
#
@@ -10,7 +11,7 @@ config AD5933
select IIO_KFIFO_BUF
help
Say yes here to build support for Analog Devices Impedance Converter,
- Network Analyzer, AD5933/4, provides direct access via sysfs.
+ Network Analyzer, AD5933/4.
To compile this driver as a module, choose M here: the
module will be called ad5933.
diff --git a/drivers/staging/iio/impedance-analyzer/Makefile b/drivers/staging/iio/impedance-analyzer/Makefile
index 7604d786583e..b4e657a1ac18 100644
--- a/drivers/staging/iio/impedance-analyzer/Makefile
+++ b/drivers/staging/iio/impedance-analyzer/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for Impedance Converter, Network Analyzer drivers
#
diff --git a/drivers/staging/iio/impedance-analyzer/ad5933.c b/drivers/staging/iio/impedance-analyzer/ad5933.c
index c18109c55497..85a4223295cd 100644
--- a/drivers/staging/iio/impedance-analyzer/ad5933.c
+++ b/drivers/staging/iio/impedance-analyzer/ad5933.c
@@ -1,34 +1,30 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* AD5933 AD5934 Impedance Converter, Network Analyzer
*
* Copyright 2011 Analog Devices Inc.
- *
- * Licensed under the GPL-2.
*/
-#include <linux/interrupt.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
#include <linux/device.h>
-#include <linux/kernel.h>
-#include <linux/sysfs.h>
+#include <linux/err.h>
#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
#include <linux/regulator/consumer.h>
-#include <linux/slab.h>
+#include <linux/sysfs.h>
#include <linux/types.h>
-#include <linux/err.h>
-#include <linux/delay.h>
-#include <linux/module.h>
-#include <asm/div64.h>
-#include <linux/iio/iio.h>
-#include <linux/iio/sysfs.h>
#include <linux/iio/buffer.h>
+#include <linux/iio/iio.h>
#include <linux/iio/kfifo_buf.h>
-
-#include "ad5933.h"
+#include <linux/iio/sysfs.h>
/* AD5933/AD5934 Registers */
-#define AD5933_REG_CONTROL_HB 0x80 /* R/W, 2 bytes */
-#define AD5933_REG_CONTROL_LB 0x81 /* R/W, 2 bytes */
+#define AD5933_REG_CONTROL_HB 0x80 /* R/W, 1 byte */
+#define AD5933_REG_CONTROL_LB 0x81 /* R/W, 1 byte */
#define AD5933_REG_FREQ_START 0x82 /* R/W, 3 bytes */
#define AD5933_REG_FREQ_INC 0x85 /* R/W, 3 bytes */
#define AD5933_REG_INC_NUM 0x88 /* R/W, 2 bytes, 9 bit */
@@ -88,68 +84,45 @@
struct ad5933_state {
struct i2c_client *client;
- struct regulator *reg;
+ struct clk *mclk;
struct delayed_work work;
+ struct mutex lock; /* Protect sensor state */
unsigned long mclk_hz;
unsigned char ctrl_hb;
unsigned char ctrl_lb;
- unsigned range_avail[4];
+ unsigned int range_avail[4];
unsigned short vref_mv;
unsigned short settling_cycles;
unsigned short freq_points;
- unsigned freq_start;
- unsigned freq_inc;
- unsigned state;
- unsigned poll_time_jiffies;
+ unsigned int freq_start;
+ unsigned int freq_inc;
+ unsigned int state;
+ unsigned int poll_time_jiffies;
};
-static struct ad5933_platform_data ad5933_default_pdata = {
- .vref_mv = 3300,
-};
+#define AD5933_CHANNEL(_type, _extend_name, _info_mask_separate, _address, \
+ _scan_index, _realbits) { \
+ .type = (_type), \
+ .extend_name = (_extend_name), \
+ .info_mask_separate = (_info_mask_separate), \
+ .address = (_address), \
+ .scan_index = (_scan_index), \
+ .scan_type = { \
+ .sign = 's', \
+ .realbits = (_realbits), \
+ .storagebits = 16, \
+ }, \
+}
static const struct iio_chan_spec ad5933_channels[] = {
- {
- .type = IIO_TEMP,
- .indexed = 1,
- .channel = 0,
- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
- BIT(IIO_CHAN_INFO_SCALE),
- .address = AD5933_REG_TEMP_DATA,
- .scan_index = -1,
- .scan_type = {
- .sign = 's',
- .realbits = 14,
- .storagebits = 16,
- },
- }, { /* Ring Channels */
- .type = IIO_VOLTAGE,
- .indexed = 1,
- .channel = 0,
- .extend_name = "real",
- .address = AD5933_REG_REAL_DATA,
- .scan_index = 0,
- .scan_type = {
- .sign = 's',
- .realbits = 16,
- .storagebits = 16,
- },
- }, {
- .type = IIO_VOLTAGE,
- .indexed = 1,
- .channel = 0,
- .extend_name = "imag",
- .address = AD5933_REG_IMAG_DATA,
- .scan_index = 1,
- .scan_type = {
- .sign = 's',
- .realbits = 16,
- .storagebits = 16,
- },
- },
+ AD5933_CHANNEL(IIO_TEMP, NULL, BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE), AD5933_REG_TEMP_DATA, -1, 14),
+ /* Ring Channels */
+ AD5933_CHANNEL(IIO_VOLTAGE, "real", 0, AD5933_REG_REAL_DATA, 0, 16),
+ AD5933_CHANNEL(IIO_VOLTAGE, "imag", 0, AD5933_REG_IMAG_DATA, 1, 16),
};
-static int ad5933_i2c_write(struct i2c_client *client,
- u8 reg, u8 len, u8 *data)
+static int ad5933_i2c_write(struct i2c_client *client, u8 reg, u8 len, u8 *data)
{
int ret;
@@ -163,8 +136,7 @@ static int ad5933_i2c_write(struct i2c_client *client,
return 0;
}
-static int ad5933_i2c_read(struct i2c_client *client,
- u8 reg, u8 len, u8 *data)
+static int ad5933_i2c_read(struct i2c_client *client, u8 reg, u8 len, u8 *data)
{
int ret;
@@ -214,7 +186,7 @@ static int ad5933_wait_busy(struct ad5933_state *st, unsigned char event)
}
static int ad5933_set_freq(struct ad5933_state *st,
- unsigned reg, unsigned long freq)
+ unsigned int reg, unsigned long freq)
{
unsigned long long freqreg;
union {
@@ -222,7 +194,7 @@ static int ad5933_set_freq(struct ad5933_state *st,
u8 d8[4];
} dat;
- freqreg = (u64) freq * (u64) (1 << 27);
+ freqreg = (u64)freq * (u64)(1 << 27);
do_div(freqreg, st->mclk_hz / 4);
switch (reg) {
@@ -261,7 +233,8 @@ static int ad5933_setup(struct ad5933_state *st)
dat = cpu_to_be16(st->settling_cycles);
ret = ad5933_i2c_write(st->client,
- AD5933_REG_SETTLING_CYCLES, 2, (u8 *)&dat);
+ AD5933_REG_SETTLING_CYCLES,
+ 2, (u8 *)&dat);
if (ret < 0)
return ret;
@@ -274,11 +247,10 @@ static int ad5933_setup(struct ad5933_state *st)
static void ad5933_calc_out_ranges(struct ad5933_state *st)
{
int i;
- unsigned normalized_3v3[4] = {1980, 198, 383, 970};
+ unsigned int normalized_3v3[4] = {1980, 198, 383, 970};
for (i = 0; i < 4; i++)
st->range_avail[i] = normalized_3v3[i] * st->vref_mv / 3300;
-
}
/*
@@ -286,8 +258,8 @@ static void ad5933_calc_out_ranges(struct ad5933_state *st)
*/
static ssize_t ad5933_show_frequency(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+ struct device_attribute *attr,
+ char *buf)
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct ad5933_state *st = iio_priv(indio_dev);
@@ -299,24 +271,27 @@ static ssize_t ad5933_show_frequency(struct device *dev,
u8 d8[4];
} dat;
- mutex_lock(&indio_dev->mlock);
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
+
ret = ad5933_i2c_read(st->client, this_attr->address, 3, &dat.d8[1]);
- mutex_unlock(&indio_dev->mlock);
+
+ iio_device_release_direct(indio_dev);
if (ret < 0)
return ret;
freqreg = be32_to_cpu(dat.d32) & 0xFFFFFF;
- freqreg = (u64) freqreg * (u64) (st->mclk_hz / 4);
- do_div(freqreg, 1 << 27);
+ freqreg = (u64)freqreg * (u64)(st->mclk_hz / 4);
+ do_div(freqreg, BIT(27));
- return sprintf(buf, "%d\n", (int) freqreg);
+ return sprintf(buf, "%d\n", (int)freqreg);
}
static ssize_t ad5933_store_frequency(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t len)
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct ad5933_state *st = iio_priv(indio_dev);
@@ -331,34 +306,37 @@ static ssize_t ad5933_store_frequency(struct device *dev,
if (val > AD5933_MAX_OUTPUT_FREQ_Hz)
return -EINVAL;
- mutex_lock(&indio_dev->mlock);
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
+
ret = ad5933_set_freq(st, this_attr->address, val);
- mutex_unlock(&indio_dev->mlock);
+
+ iio_device_release_direct(indio_dev);
return ret ? ret : len;
}
-static IIO_DEVICE_ATTR(out_voltage0_freq_start, S_IRUGO | S_IWUSR,
+static IIO_DEVICE_ATTR(out_altvoltage0_frequency_start, 0644,
ad5933_show_frequency,
ad5933_store_frequency,
AD5933_REG_FREQ_START);
-static IIO_DEVICE_ATTR(out_voltage0_freq_increment, S_IRUGO | S_IWUSR,
+static IIO_DEVICE_ATTR(out_altvoltage0_frequency_increment, 0644,
ad5933_show_frequency,
ad5933_store_frequency,
AD5933_REG_FREQ_INC);
static ssize_t ad5933_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+ struct device_attribute *attr,
+ char *buf)
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct ad5933_state *st = iio_priv(indio_dev);
struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
int ret = 0, len = 0;
- mutex_lock(&indio_dev->mlock);
- switch ((u32) this_attr->address) {
+ mutex_lock(&st->lock);
+ switch ((u32)this_attr->address) {
case AD5933_OUT_RANGE:
len = sprintf(buf, "%u\n",
st->range_avail[(st->ctrl_hb >> 1) & 0x3]);
@@ -386,14 +364,14 @@ static ssize_t ad5933_show(struct device *dev,
ret = -EINVAL;
}
- mutex_unlock(&indio_dev->mlock);
+ mutex_unlock(&st->lock);
return ret ? ret : len;
}
static ssize_t ad5933_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t len)
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct ad5933_state *st = iio_priv(indio_dev);
@@ -408,9 +386,13 @@ static ssize_t ad5933_store(struct device *dev,
return ret;
}
- mutex_lock(&indio_dev->mlock);
- switch ((u32) this_attr->address) {
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
+
+ mutex_lock(&st->lock);
+ switch ((u32)this_attr->address) {
case AD5933_OUT_RANGE:
+ ret = -EINVAL;
for (i = 0; i < 4; i++)
if (val == st->range_avail[i]) {
st->ctrl_hb &= ~AD5933_CTRL_RANGE(0x3);
@@ -418,7 +400,6 @@ static ssize_t ad5933_store(struct device *dev,
ret = ad5933_cmd(st, 0);
break;
}
- ret = -EINVAL;
break;
case AD5933_IN_PGA_GAIN:
if (sysfs_streq(buf, "1")) {
@@ -432,18 +413,19 @@ static ssize_t ad5933_store(struct device *dev,
ret = ad5933_cmd(st, 0);
break;
case AD5933_OUT_SETTLING_CYCLES:
- val = clamp(val, (u16)0, (u16)0x7FF);
+ val = clamp(val, (u16)0, (u16)0x7FC);
st->settling_cycles = val;
/* 2x, 4x handling, see datasheet */
- if (val > 511)
- val = (val >> 1) | (1 << 9);
- else if (val > 1022)
+ if (val > 1022)
val = (val >> 2) | (3 << 9);
+ else if (val > 511)
+ val = (val >> 1) | BIT(9);
dat = cpu_to_be16(val);
ret = ad5933_i2c_write(st->client,
- AD5933_REG_SETTLING_CYCLES, 2, (u8 *)&dat);
+ AD5933_REG_SETTLING_CYCLES,
+ 2, (u8 *)&dat);
break;
case AD5933_FREQ_POINTS:
val = clamp(val, (u16)0, (u16)511);
@@ -457,52 +439,55 @@ static ssize_t ad5933_store(struct device *dev,
ret = -EINVAL;
}
- mutex_unlock(&indio_dev->mlock);
+ mutex_unlock(&st->lock);
+
+ iio_device_release_direct(indio_dev);
return ret ? ret : len;
}
-static IIO_DEVICE_ATTR(out_voltage0_scale, S_IRUGO | S_IWUSR,
+static IIO_DEVICE_ATTR(out_altvoltage0_raw, 0644,
ad5933_show,
ad5933_store,
AD5933_OUT_RANGE);
-static IIO_DEVICE_ATTR(out_voltage0_scale_available, S_IRUGO,
+static IIO_DEVICE_ATTR(out_altvoltage0_scale_available, 0444,
ad5933_show,
NULL,
AD5933_OUT_RANGE_AVAIL);
-static IIO_DEVICE_ATTR(in_voltage0_scale, S_IRUGO | S_IWUSR,
+static IIO_DEVICE_ATTR(in_voltage0_scale, 0644,
ad5933_show,
ad5933_store,
AD5933_IN_PGA_GAIN);
-static IIO_DEVICE_ATTR(in_voltage0_scale_available, S_IRUGO,
+static IIO_DEVICE_ATTR(in_voltage0_scale_available, 0444,
ad5933_show,
NULL,
AD5933_IN_PGA_GAIN_AVAIL);
-static IIO_DEVICE_ATTR(out_voltage0_freq_points, S_IRUGO | S_IWUSR,
+static IIO_DEVICE_ATTR(out_altvoltage0_frequency_points, 0644,
ad5933_show,
ad5933_store,
AD5933_FREQ_POINTS);
-static IIO_DEVICE_ATTR(out_voltage0_settling_cycles, S_IRUGO | S_IWUSR,
+static IIO_DEVICE_ATTR(out_altvoltage0_settling_cycles, 0644,
ad5933_show,
ad5933_store,
AD5933_OUT_SETTLING_CYCLES);
-/* note:
+/*
+ * note:
* ideally we would handle the scale attributes via the iio_info
* (read|write)_raw methods, however this part is a untypical since we
* don't create dedicated sysfs channel attributes for out0 and in0.
*/
static struct attribute *ad5933_attributes[] = {
- &iio_dev_attr_out_voltage0_scale.dev_attr.attr,
- &iio_dev_attr_out_voltage0_scale_available.dev_attr.attr,
- &iio_dev_attr_out_voltage0_freq_start.dev_attr.attr,
- &iio_dev_attr_out_voltage0_freq_increment.dev_attr.attr,
- &iio_dev_attr_out_voltage0_freq_points.dev_attr.attr,
- &iio_dev_attr_out_voltage0_settling_cycles.dev_attr.attr,
+ &iio_dev_attr_out_altvoltage0_raw.dev_attr.attr,
+ &iio_dev_attr_out_altvoltage0_scale_available.dev_attr.attr,
+ &iio_dev_attr_out_altvoltage0_frequency_start.dev_attr.attr,
+ &iio_dev_attr_out_altvoltage0_frequency_increment.dev_attr.attr,
+ &iio_dev_attr_out_altvoltage0_frequency_points.dev_attr.attr,
+ &iio_dev_attr_out_altvoltage0_settling_cycles.dev_attr.attr,
&iio_dev_attr_in_voltage0_scale.dev_attr.attr,
&iio_dev_attr_in_voltage0_scale_available.dev_attr.attr,
NULL
@@ -524,11 +509,9 @@ static int ad5933_read_raw(struct iio_dev *indio_dev,
switch (m) {
case IIO_CHAN_INFO_RAW:
- mutex_lock(&indio_dev->mlock);
- if (iio_buffer_enabled(indio_dev)) {
- ret = -EBUSY;
- goto out;
- }
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
+
ret = ad5933_cmd(st, AD5933_CTRL_MEASURE_TEMP);
if (ret < 0)
goto out;
@@ -537,11 +520,12 @@ static int ad5933_read_raw(struct iio_dev *indio_dev,
goto out;
ret = ad5933_i2c_read(st->client,
- AD5933_REG_TEMP_DATA, 2,
- (u8 *)&dat);
+ AD5933_REG_TEMP_DATA,
+ 2, (u8 *)&dat);
if (ret < 0)
goto out;
- mutex_unlock(&indio_dev->mlock);
+
+ iio_device_release_direct(indio_dev);
*val = sign_extend32(be16_to_cpu(dat), 13);
return IIO_VAL_INT;
@@ -553,14 +537,13 @@ static int ad5933_read_raw(struct iio_dev *indio_dev,
return -EINVAL;
out:
- mutex_unlock(&indio_dev->mlock);
+ iio_device_release_direct(indio_dev);
return ret;
}
static const struct iio_info ad5933_info = {
- .read_raw = &ad5933_read_raw,
+ .read_raw = ad5933_read_raw,
.attrs = &ad5933_attribute_group,
- .driver_module = THIS_MODULE,
};
static int ad5933_ring_preenable(struct iio_dev *indio_dev)
@@ -568,7 +551,8 @@ static int ad5933_ring_preenable(struct iio_dev *indio_dev)
struct ad5933_state *st = iio_priv(indio_dev);
int ret;
- if (bitmap_empty(indio_dev->active_scan_mask, indio_dev->masklength))
+ if (bitmap_empty(indio_dev->active_scan_mask,
+ iio_get_masklength(indio_dev)))
return -EINVAL;
ret = ad5933_reset(st);
@@ -592,7 +576,8 @@ static int ad5933_ring_postenable(struct iio_dev *indio_dev)
{
struct ad5933_state *st = iio_priv(indio_dev);
- /* AD5933_CTRL_INIT_START_FREQ:
+ /*
+ * AD5933_CTRL_INIT_START_FREQ:
* High Q complex circuits require a long time to reach steady state.
* To facilitate the measurement of such impedances, this mode allows
* the user full control of the settling time requirement before
@@ -616,91 +601,76 @@ static int ad5933_ring_postdisable(struct iio_dev *indio_dev)
}
static const struct iio_buffer_setup_ops ad5933_ring_setup_ops = {
- .preenable = &ad5933_ring_preenable,
- .postenable = &ad5933_ring_postenable,
- .postdisable = &ad5933_ring_postdisable,
+ .preenable = ad5933_ring_preenable,
+ .postenable = ad5933_ring_postenable,
+ .postdisable = ad5933_ring_postdisable,
};
-static int ad5933_register_ring_funcs_and_init(struct iio_dev *indio_dev)
-{
- struct iio_buffer *buffer;
-
- buffer = iio_kfifo_allocate();
- if (!buffer)
- return -ENOMEM;
-
- iio_device_attach_buffer(indio_dev, buffer);
-
- /* Ring buffer functions - here trigger setup related */
- indio_dev->setup_ops = &ad5933_ring_setup_ops;
-
- indio_dev->modes |= INDIO_BUFFER_HARDWARE;
-
- return 0;
-}
-
static void ad5933_work(struct work_struct *work)
{
struct ad5933_state *st = container_of(work,
struct ad5933_state, work.work);
struct iio_dev *indio_dev = i2c_get_clientdata(st->client);
- signed short buf[2];
+ __be16 buf[2];
+ u16 val[2];
unsigned char status;
+ int ret;
- mutex_lock(&indio_dev->mlock);
if (st->state == AD5933_CTRL_INIT_START_FREQ) {
/* start sweep */
ad5933_cmd(st, AD5933_CTRL_START_SWEEP);
st->state = AD5933_CTRL_START_SWEEP;
schedule_delayed_work(&st->work, st->poll_time_jiffies);
- mutex_unlock(&indio_dev->mlock);
return;
}
- ad5933_i2c_read(st->client, AD5933_REG_STATUS, 1, &status);
+ ret = ad5933_i2c_read(st->client, AD5933_REG_STATUS, 1, &status);
+ if (ret)
+ return;
if (status & AD5933_STAT_DATA_VALID) {
int scan_count = bitmap_weight(indio_dev->active_scan_mask,
- indio_dev->masklength);
- ad5933_i2c_read(st->client,
- test_bit(1, indio_dev->active_scan_mask) ?
- AD5933_REG_REAL_DATA : AD5933_REG_IMAG_DATA,
- scan_count * 2, (u8 *)buf);
+ iio_get_masklength(indio_dev));
+ ret = ad5933_i2c_read(st->client,
+ test_bit(1, indio_dev->active_scan_mask) ?
+ AD5933_REG_REAL_DATA : AD5933_REG_IMAG_DATA,
+ scan_count * 2, (u8 *)buf);
+ if (ret)
+ return;
if (scan_count == 2) {
- buf[0] = be16_to_cpu(buf[0]);
- buf[1] = be16_to_cpu(buf[1]);
+ val[0] = be16_to_cpu(buf[0]);
+ val[1] = be16_to_cpu(buf[1]);
} else {
- buf[0] = be16_to_cpu(buf[0]);
+ val[0] = be16_to_cpu(buf[0]);
}
- iio_push_to_buffers(indio_dev, buf);
+ iio_push_to_buffers(indio_dev, val);
} else {
/* no data available - try again later */
schedule_delayed_work(&st->work, st->poll_time_jiffies);
- mutex_unlock(&indio_dev->mlock);
return;
}
if (status & AD5933_STAT_SWEEP_DONE) {
- /* last sample received - power down do nothing until
- * the ring enable is toggled */
+ /*
+ * last sample received - power down do
+ * nothing until the ring enable is toggled
+ */
ad5933_cmd(st, AD5933_CTRL_POWER_DOWN);
} else {
/* we just received a valid datum, move on to the next */
ad5933_cmd(st, AD5933_CTRL_INC_FREQ);
schedule_delayed_work(&st->work, st->poll_time_jiffies);
}
-
- mutex_unlock(&indio_dev->mlock);
}
-static int ad5933_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int ad5933_probe(struct i2c_client *client)
{
- int ret, voltage_uv = 0;
- struct ad5933_platform_data *pdata = client->dev.platform_data;
+ const struct i2c_device_id *id = i2c_client_get_device_id(client);
+ int ret;
struct ad5933_state *st;
struct iio_dev *indio_dev;
+ unsigned long ext_clk_hz = 0;
indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*st));
if (!indio_dev)
@@ -710,24 +680,23 @@ static int ad5933_probe(struct i2c_client *client,
i2c_set_clientdata(client, indio_dev);
st->client = client;
- if (!pdata)
- pdata = &ad5933_default_pdata;
+ mutex_init(&st->lock);
- st->reg = devm_regulator_get(&client->dev, "vcc");
- if (!IS_ERR(st->reg)) {
- ret = regulator_enable(st->reg);
- if (ret)
- return ret;
- voltage_uv = regulator_get_voltage(st->reg);
- }
+ ret = devm_regulator_get_enable_read_voltage(&client->dev, "vdd");
+ if (ret < 0)
+ return dev_err_probe(&client->dev, ret, "failed to get vdd voltage\n");
+
+ st->vref_mv = ret / 1000;
- if (voltage_uv)
- st->vref_mv = voltage_uv / 1000;
- else
- st->vref_mv = pdata->vref_mv;
+ st->mclk = devm_clk_get_enabled(&client->dev, "mclk");
+ if (IS_ERR(st->mclk) && PTR_ERR(st->mclk) != -ENOENT)
+ return PTR_ERR(st->mclk);
- if (pdata->ext_clk_Hz) {
- st->mclk_hz = pdata->ext_clk_Hz;
+ if (!IS_ERR(st->mclk))
+ ext_clk_hz = clk_get_rate(st->mclk);
+
+ if (ext_clk_hz) {
+ st->mclk_hz = ext_clk_hz;
st->ctrl_lb = AD5933_CTRL_EXT_SYSCLK;
} else {
st->mclk_hz = AD5933_INT_OSC_FREQ_Hz;
@@ -738,67 +707,50 @@ static int ad5933_probe(struct i2c_client *client,
INIT_DELAYED_WORK(&st->work, ad5933_work);
st->poll_time_jiffies = msecs_to_jiffies(AD5933_POLL_TIME_ms);
- indio_dev->dev.parent = &client->dev;
indio_dev->info = &ad5933_info;
indio_dev->name = id->name;
indio_dev->modes = INDIO_DIRECT_MODE;
indio_dev->channels = ad5933_channels;
indio_dev->num_channels = ARRAY_SIZE(ad5933_channels);
- ret = ad5933_register_ring_funcs_and_init(indio_dev);
+ ret = devm_iio_kfifo_buffer_setup(&client->dev, indio_dev,
+ &ad5933_ring_setup_ops);
if (ret)
- goto error_disable_reg;
+ return ret;
ret = ad5933_setup(st);
if (ret)
- goto error_unreg_ring;
-
- ret = iio_device_register(indio_dev);
- if (ret)
- goto error_unreg_ring;
-
- return 0;
-
-error_unreg_ring:
- iio_kfifo_free(indio_dev->buffer);
-error_disable_reg:
- if (!IS_ERR(st->reg))
- regulator_disable(st->reg);
-
- return ret;
-}
-
-static int ad5933_remove(struct i2c_client *client)
-{
- struct iio_dev *indio_dev = i2c_get_clientdata(client);
- struct ad5933_state *st = iio_priv(indio_dev);
-
- iio_device_unregister(indio_dev);
- iio_kfifo_free(indio_dev->buffer);
- if (!IS_ERR(st->reg))
- regulator_disable(st->reg);
+ return ret;
- return 0;
+ return devm_iio_device_register(&client->dev, indio_dev);
}
static const struct i2c_device_id ad5933_id[] = {
- { "ad5933", 0 },
- { "ad5934", 0 },
- {}
+ { "ad5933" },
+ { "ad5934" },
+ { }
};
MODULE_DEVICE_TABLE(i2c, ad5933_id);
+static const struct of_device_id ad5933_of_match[] = {
+ { .compatible = "adi,ad5933" },
+ { .compatible = "adi,ad5934" },
+ { }
+};
+
+MODULE_DEVICE_TABLE(of, ad5933_of_match);
+
static struct i2c_driver ad5933_driver = {
.driver = {
.name = "ad5933",
+ .of_match_table = ad5933_of_match,
},
.probe = ad5933_probe,
- .remove = ad5933_remove,
.id_table = ad5933_id,
};
module_i2c_driver(ad5933_driver);
-MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
+MODULE_AUTHOR("Michael Hennerich <michael.hennerich@analog.com>");
MODULE_DESCRIPTION("Analog Devices AD5933 Impedance Conv. Network Analyzer");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/iio/impedance-analyzer/ad5933.h b/drivers/staging/iio/impedance-analyzer/ad5933.h
deleted file mode 100644
index b140e42d67cf..000000000000
--- a/drivers/staging/iio/impedance-analyzer/ad5933.h
+++ /dev/null
@@ -1,28 +0,0 @@
-/*
- * AD5933 AD5934 Impedance Converter, Network Analyzer
- *
- * Copyright 2011 Analog Devices Inc.
- *
- * Licensed under the GPL-2.
- */
-
-#ifndef IIO_ADC_AD5933_H_
-#define IIO_ADC_AD5933_H_
-
-/*
- * TODO: struct ad5933_platform_data needs to go into include/linux/iio
- */
-
-/**
- * struct ad5933_platform_data - platform specific data
- * @ext_clk_Hz: the external clock frequency in Hz, if not set
- * the driver uses the internal clock (16.776 MHz)
- * @vref_mv: the external reference voltage in millivolt
- */
-
-struct ad5933_platform_data {
- unsigned long ext_clk_Hz;
- unsigned short vref_mv;
-};
-
-#endif /* IIO_ADC_AD5933_H_ */
diff --git a/drivers/staging/iio/light/Kconfig b/drivers/staging/iio/light/Kconfig
deleted file mode 100644
index ca8d6e66c899..000000000000
--- a/drivers/staging/iio/light/Kconfig
+++ /dev/null
@@ -1,43 +0,0 @@
-#
-# Light sensors
-#
-menu "Light sensors"
-
-config SENSORS_ISL29018
- tristate "ISL 29018 light and proximity sensor"
- depends on I2C
- select REGMAP_I2C
- default n
- help
- If you say yes here you get support for ambient light sensing and
- proximity infrared sensing from Intersil ISL29018.
- This driver will provide the measurements of ambient light intensity
- in lux, proximity infrared sensing and normal infrared sensing.
- Data from sensor is accessible via sysfs.
-
-config SENSORS_ISL29028
- tristate "Intersil ISL29028 Concurrent Light and Proximity Sensor"
- depends on I2C
- select REGMAP_I2C
- help
- Provides driver for the Intersil's ISL29028 device.
- This driver supports the sysfs interface to get the ALS, IR intensity,
- Proximity value via iio. The ISL29028 provides the concurrent sensing
- of ambient light and proximity.
-
-config TSL2583
- tristate "TAOS TSL2580, TSL2581 and TSL2583 light-to-digital converters"
- depends on I2C
- help
- Provides support for the TAOS tsl2580, tsl2581 and tsl2583 devices.
- Access ALS data via iio, sysfs.
-
-config TSL2x7x
- tristate "TAOS TSL/TMD2x71 and TSL/TMD2x72 Family of light and proximity sensors"
- depends on I2C
- help
- Support for: tsl2571, tsl2671, tmd2671, tsl2771, tmd2771, tsl2572, tsl2672,
- tmd2672, tsl2772, tmd2772 devices.
- Provides iio_events and direct access via sysfs.
-
-endmenu
diff --git a/drivers/staging/iio/light/Makefile b/drivers/staging/iio/light/Makefile
deleted file mode 100644
index 9960fdf7c15b..000000000000
--- a/drivers/staging/iio/light/Makefile
+++ /dev/null
@@ -1,8 +0,0 @@
-#
-# Makefile for industrial I/O Light sensors
-#
-
-obj-$(CONFIG_SENSORS_ISL29018) += isl29018.o
-obj-$(CONFIG_SENSORS_ISL29028) += isl29028.o
-obj-$(CONFIG_TSL2583) += tsl2583.o
-obj-$(CONFIG_TSL2x7x) += tsl2x7x_core.o
diff --git a/drivers/staging/iio/light/isl29018.c b/drivers/staging/iio/light/isl29018.c
deleted file mode 100644
index 019ba5245c23..000000000000
--- a/drivers/staging/iio/light/isl29018.c
+++ /dev/null
@@ -1,849 +0,0 @@
-/*
- * A iio driver for the light sensor ISL 29018/29023/29035.
- *
- * IIO driver for monitoring ambient light intensity in luxi, proximity
- * sensing and infrared sensing.
- *
- * Copyright (c) 2010, NVIDIA Corporation.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
- */
-
-#include <linux/module.h>
-#include <linux/i2c.h>
-#include <linux/err.h>
-#include <linux/mutex.h>
-#include <linux/delay.h>
-#include <linux/regmap.h>
-#include <linux/slab.h>
-#include <linux/iio/iio.h>
-#include <linux/iio/sysfs.h>
-#include <linux/acpi.h>
-
-#define CONVERSION_TIME_MS 100
-
-#define ISL29018_REG_ADD_COMMAND1 0x00
-#define COMMMAND1_OPMODE_SHIFT 5
-#define COMMMAND1_OPMODE_MASK (7 << COMMMAND1_OPMODE_SHIFT)
-#define COMMMAND1_OPMODE_POWER_DOWN 0
-#define COMMMAND1_OPMODE_ALS_ONCE 1
-#define COMMMAND1_OPMODE_IR_ONCE 2
-#define COMMMAND1_OPMODE_PROX_ONCE 3
-
-#define ISL29018_REG_ADD_COMMANDII 0x01
-#define COMMANDII_RESOLUTION_SHIFT 2
-#define COMMANDII_RESOLUTION_MASK (0x3 << COMMANDII_RESOLUTION_SHIFT)
-
-#define COMMANDII_RANGE_SHIFT 0
-#define COMMANDII_RANGE_MASK (0x3 << COMMANDII_RANGE_SHIFT)
-
-#define COMMANDII_SCHEME_SHIFT 7
-#define COMMANDII_SCHEME_MASK (0x1 << COMMANDII_SCHEME_SHIFT)
-
-#define ISL29018_REG_ADD_DATA_LSB 0x02
-#define ISL29018_REG_ADD_DATA_MSB 0x03
-
-#define ISL29018_REG_TEST 0x08
-#define ISL29018_TEST_SHIFT 0
-#define ISL29018_TEST_MASK (0xFF << ISL29018_TEST_SHIFT)
-
-#define ISL29035_REG_DEVICE_ID 0x0F
-#define ISL29035_DEVICE_ID_SHIFT 0x03
-#define ISL29035_DEVICE_ID_MASK (0x7 << ISL29035_DEVICE_ID_SHIFT)
-#define ISL29035_DEVICE_ID 0x5
-#define ISL29035_BOUT_SHIFT 0x07
-#define ISL29035_BOUT_MASK (0x01 << ISL29035_BOUT_SHIFT)
-
-#define ISL29018_INT_TIME_AVAIL "0.090000 0.005630 0.000351 0.000021"
-#define ISL29023_INT_TIME_AVAIL "0.090000 0.005600 0.000352 0.000022"
-#define ISL29035_INT_TIME_AVAIL "0.105000 0.006500 0.000410 0.000025"
-
-static const char * const int_time_avail[] = {
- ISL29018_INT_TIME_AVAIL,
- ISL29023_INT_TIME_AVAIL,
- ISL29035_INT_TIME_AVAIL,
-};
-
-enum isl29018_int_time {
- ISL29018_INT_TIME_16,
- ISL29018_INT_TIME_12,
- ISL29018_INT_TIME_8,
- ISL29018_INT_TIME_4,
-};
-
-static const unsigned int isl29018_int_utimes[3][4] = {
- {90000, 5630, 351, 21},
- {90000, 5600, 352, 22},
- {105000, 6500, 410, 25},
-};
-
-static const struct isl29018_scale {
- unsigned int scale;
- unsigned int uscale;
-} isl29018_scales[4][4] = {
- { {0, 15258}, {0, 61035}, {0, 244140}, {0, 976562} },
- { {0, 244140}, {0, 976562}, {3, 906250}, {15, 625000} },
- { {3, 906250}, {15, 625000}, {62, 500000}, {250, 0} },
- { {62, 500000}, {250, 0}, {1000, 0}, {4000, 0} }
-};
-
-struct isl29018_chip {
- struct device *dev;
- struct regmap *regmap;
- struct mutex lock;
- int type;
- unsigned int calibscale;
- unsigned int ucalibscale;
- unsigned int int_time;
- struct isl29018_scale scale;
- int prox_scheme;
- bool suspended;
-};
-
-static int isl29018_set_integration_time(struct isl29018_chip *chip,
- unsigned int utime)
-{
- int i, ret;
- unsigned int int_time, new_int_time;
- struct isl29018_scale new_scale;
-
- for (i = 0; i < ARRAY_SIZE(isl29018_int_utimes[chip->type]); ++i) {
- if (utime == isl29018_int_utimes[chip->type][i]) {
- new_int_time = i;
- new_scale = isl29018_scales[new_int_time][0];
- break;
- }
- }
-
- if (i >= ARRAY_SIZE(isl29018_int_utimes[chip->type]))
- return -EINVAL;
-
- ret = regmap_update_bits(chip->regmap, ISL29018_REG_ADD_COMMANDII,
- COMMANDII_RESOLUTION_MASK,
- i << COMMANDII_RESOLUTION_SHIFT);
- if (ret < 0)
- return ret;
-
- /* keep the same range when integration time changes */
- int_time = chip->int_time;
- for (i = 0; i < ARRAY_SIZE(isl29018_scales[int_time]); ++i) {
- if (chip->scale.scale == isl29018_scales[int_time][i].scale &&
- chip->scale.uscale == isl29018_scales[int_time][i].uscale) {
- chip->scale = isl29018_scales[new_int_time][i];
- break;
- }
- }
- chip->int_time = new_int_time;
-
- return 0;
-}
-
-static int isl29018_set_scale(struct isl29018_chip *chip, int scale, int uscale)
-{
- int i, ret;
- struct isl29018_scale new_scale;
-
- for (i = 0; i < ARRAY_SIZE(isl29018_scales[chip->int_time]); ++i) {
- if (scale == isl29018_scales[chip->int_time][i].scale &&
- uscale == isl29018_scales[chip->int_time][i].uscale) {
- new_scale = isl29018_scales[chip->int_time][i];
- break;
- }
- }
-
- if (i >= ARRAY_SIZE(isl29018_scales[chip->int_time]))
- return -EINVAL;
-
- ret = regmap_update_bits(chip->regmap, ISL29018_REG_ADD_COMMANDII,
- COMMANDII_RANGE_MASK,
- i << COMMANDII_RANGE_SHIFT);
- if (ret < 0)
- return ret;
-
- chip->scale = new_scale;
-
- return 0;
-}
-
-static int isl29018_read_sensor_input(struct isl29018_chip *chip, int mode)
-{
- int status;
- unsigned int lsb;
- unsigned int msb;
-
- /* Set mode */
- status = regmap_write(chip->regmap, ISL29018_REG_ADD_COMMAND1,
- mode << COMMMAND1_OPMODE_SHIFT);
- if (status) {
- dev_err(chip->dev,
- "Error in setting operating mode err %d\n", status);
- return status;
- }
- msleep(CONVERSION_TIME_MS);
- status = regmap_read(chip->regmap, ISL29018_REG_ADD_DATA_LSB, &lsb);
- if (status < 0) {
- dev_err(chip->dev,
- "Error in reading LSB DATA with err %d\n", status);
- return status;
- }
-
- status = regmap_read(chip->regmap, ISL29018_REG_ADD_DATA_MSB, &msb);
- if (status < 0) {
- dev_err(chip->dev,
- "Error in reading MSB DATA with error %d\n", status);
- return status;
- }
- dev_vdbg(chip->dev, "MSB 0x%x and LSB 0x%x\n", msb, lsb);
-
- return (msb << 8) | lsb;
-}
-
-static int isl29018_read_lux(struct isl29018_chip *chip, int *lux)
-{
- int lux_data;
- unsigned int data_x_range;
-
- lux_data = isl29018_read_sensor_input(chip, COMMMAND1_OPMODE_ALS_ONCE);
-
- if (lux_data < 0)
- return lux_data;
-
- data_x_range = lux_data * chip->scale.scale +
- lux_data * chip->scale.uscale / 1000000;
- *lux = data_x_range * chip->calibscale +
- data_x_range * chip->ucalibscale / 1000000;
-
- return 0;
-}
-
-static int isl29018_read_ir(struct isl29018_chip *chip, int *ir)
-{
- int ir_data;
-
- ir_data = isl29018_read_sensor_input(chip, COMMMAND1_OPMODE_IR_ONCE);
-
- if (ir_data < 0)
- return ir_data;
-
- *ir = ir_data;
-
- return 0;
-}
-
-static int isl29018_read_proximity_ir(struct isl29018_chip *chip, int scheme,
- int *near_ir)
-{
- int status;
- int prox_data = -1;
- int ir_data = -1;
-
- /* Do proximity sensing with required scheme */
- status = regmap_update_bits(chip->regmap, ISL29018_REG_ADD_COMMANDII,
- COMMANDII_SCHEME_MASK,
- scheme << COMMANDII_SCHEME_SHIFT);
- if (status) {
- dev_err(chip->dev, "Error in setting operating mode\n");
- return status;
- }
-
- prox_data = isl29018_read_sensor_input(chip,
- COMMMAND1_OPMODE_PROX_ONCE);
- if (prox_data < 0)
- return prox_data;
-
- if (scheme == 1) {
- *near_ir = prox_data;
- return 0;
- }
-
- ir_data = isl29018_read_sensor_input(chip, COMMMAND1_OPMODE_IR_ONCE);
-
- if (ir_data < 0)
- return ir_data;
-
- if (prox_data >= ir_data)
- *near_ir = prox_data - ir_data;
- else
- *near_ir = 0;
-
- return 0;
-}
-
-static ssize_t show_scale_available(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct isl29018_chip *chip = iio_priv(indio_dev);
- int i, len = 0;
-
- for (i = 0; i < ARRAY_SIZE(isl29018_scales[chip->int_time]); ++i)
- len += sprintf(buf + len, "%d.%06d ",
- isl29018_scales[chip->int_time][i].scale,
- isl29018_scales[chip->int_time][i].uscale);
-
- buf[len - 1] = '\n';
-
- return len;
-}
-
-static ssize_t show_int_time_available(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct isl29018_chip *chip = iio_priv(indio_dev);
- int i, len = 0;
-
- for (i = 0; i < ARRAY_SIZE(isl29018_int_utimes[chip->type]); ++i)
- len += sprintf(buf + len, "0.%06d ",
- isl29018_int_utimes[chip->type][i]);
-
- buf[len - 1] = '\n';
-
- return len;
-}
-
-/* proximity scheme */
-static ssize_t show_prox_infrared_suppression(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct isl29018_chip *chip = iio_priv(indio_dev);
-
- /* return the "proximity scheme" i.e. if the chip does on chip
- infrared suppression (1 means perform on chip suppression) */
- return sprintf(buf, "%d\n", chip->prox_scheme);
-}
-
-static ssize_t store_prox_infrared_suppression(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t count)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct isl29018_chip *chip = iio_priv(indio_dev);
- int val;
-
- if (kstrtoint(buf, 10, &val))
- return -EINVAL;
- if (!(val == 0 || val == 1)) {
- dev_err(dev, "The mode is not supported\n");
- return -EINVAL;
- }
-
- /* get the "proximity scheme" i.e. if the chip does on chip
- infrared suppression (1 means perform on chip suppression) */
- mutex_lock(&chip->lock);
- chip->prox_scheme = val;
- mutex_unlock(&chip->lock);
-
- return count;
-}
-
-/* Channel IO */
-static int isl29018_write_raw(struct iio_dev *indio_dev,
- struct iio_chan_spec const *chan,
- int val,
- int val2,
- long mask)
-{
- struct isl29018_chip *chip = iio_priv(indio_dev);
- int ret = -EINVAL;
-
- mutex_lock(&chip->lock);
- switch (mask) {
- case IIO_CHAN_INFO_CALIBSCALE:
- if (chan->type == IIO_LIGHT) {
- chip->calibscale = val;
- chip->ucalibscale = val2;
- ret = 0;
- }
- break;
- case IIO_CHAN_INFO_INT_TIME:
- if (chan->type == IIO_LIGHT) {
- if (val != 0) {
- mutex_unlock(&chip->lock);
- return -EINVAL;
- }
- ret = isl29018_set_integration_time(chip, val2);
- }
- break;
- case IIO_CHAN_INFO_SCALE:
- if (chan->type == IIO_LIGHT)
- ret = isl29018_set_scale(chip, val, val2);
- break;
- default:
- break;
- }
- mutex_unlock(&chip->lock);
-
- return ret;
-}
-
-static int isl29018_read_raw(struct iio_dev *indio_dev,
- struct iio_chan_spec const *chan,
- int *val,
- int *val2,
- long mask)
-{
- int ret = -EINVAL;
- struct isl29018_chip *chip = iio_priv(indio_dev);
-
- mutex_lock(&chip->lock);
- if (chip->suspended) {
- mutex_unlock(&chip->lock);
- return -EBUSY;
- }
- switch (mask) {
- case IIO_CHAN_INFO_RAW:
- case IIO_CHAN_INFO_PROCESSED:
- switch (chan->type) {
- case IIO_LIGHT:
- ret = isl29018_read_lux(chip, val);
- break;
- case IIO_INTENSITY:
- ret = isl29018_read_ir(chip, val);
- break;
- case IIO_PROXIMITY:
- ret = isl29018_read_proximity_ir(chip,
- chip->prox_scheme, val);
- break;
- default:
- break;
- }
- if (!ret)
- ret = IIO_VAL_INT;
- break;
- case IIO_CHAN_INFO_INT_TIME:
- if (chan->type == IIO_LIGHT) {
- *val = 0;
- *val2 = isl29018_int_utimes[chip->type][chip->int_time];
- ret = IIO_VAL_INT_PLUS_MICRO;
- }
- break;
- case IIO_CHAN_INFO_SCALE:
- if (chan->type == IIO_LIGHT) {
- *val = chip->scale.scale;
- *val2 = chip->scale.uscale;
- ret = IIO_VAL_INT_PLUS_MICRO;
- }
- break;
- case IIO_CHAN_INFO_CALIBSCALE:
- if (chan->type == IIO_LIGHT) {
- *val = chip->calibscale;
- *val2 = chip->ucalibscale;
- ret = IIO_VAL_INT_PLUS_MICRO;
- }
- break;
- default:
- break;
- }
- mutex_unlock(&chip->lock);
- return ret;
-}
-
-#define ISL29018_LIGHT_CHANNEL { \
- .type = IIO_LIGHT, \
- .indexed = 1, \
- .channel = 0, \
- .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED) | \
- BIT(IIO_CHAN_INFO_CALIBSCALE) | \
- BIT(IIO_CHAN_INFO_SCALE) | \
- BIT(IIO_CHAN_INFO_INT_TIME), \
-}
-
-#define ISL29018_IR_CHANNEL { \
- .type = IIO_INTENSITY, \
- .modified = 1, \
- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
- .channel2 = IIO_MOD_LIGHT_IR, \
-}
-
-#define ISL29018_PROXIMITY_CHANNEL { \
- .type = IIO_PROXIMITY, \
- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
-}
-
-static const struct iio_chan_spec isl29018_channels[] = {
- ISL29018_LIGHT_CHANNEL,
- ISL29018_IR_CHANNEL,
- ISL29018_PROXIMITY_CHANNEL,
-};
-
-static const struct iio_chan_spec isl29023_channels[] = {
- ISL29018_LIGHT_CHANNEL,
- ISL29018_IR_CHANNEL,
-};
-
-static IIO_DEVICE_ATTR(in_illuminance_integration_time_available, S_IRUGO,
- show_int_time_available, NULL, 0);
-static IIO_DEVICE_ATTR(in_illuminance_scale_available, S_IRUGO,
- show_scale_available, NULL, 0);
-static IIO_DEVICE_ATTR(proximity_on_chip_ambient_infrared_suppression,
- S_IRUGO | S_IWUSR,
- show_prox_infrared_suppression,
- store_prox_infrared_suppression, 0);
-
-#define ISL29018_DEV_ATTR(name) (&iio_dev_attr_##name.dev_attr.attr)
-
-static struct attribute *isl29018_attributes[] = {
- ISL29018_DEV_ATTR(in_illuminance_scale_available),
- ISL29018_DEV_ATTR(in_illuminance_integration_time_available),
- ISL29018_DEV_ATTR(proximity_on_chip_ambient_infrared_suppression),
- NULL
-};
-
-static struct attribute *isl29023_attributes[] = {
- ISL29018_DEV_ATTR(in_illuminance_scale_available),
- ISL29018_DEV_ATTR(in_illuminance_integration_time_available),
- NULL
-};
-
-static const struct attribute_group isl29018_group = {
- .attrs = isl29018_attributes,
-};
-
-static const struct attribute_group isl29023_group = {
- .attrs = isl29023_attributes,
-};
-
-static int isl29035_detect(struct isl29018_chip *chip)
-{
- int status;
- unsigned int id;
-
- status = regmap_read(chip->regmap, ISL29035_REG_DEVICE_ID, &id);
- if (status < 0) {
- dev_err(chip->dev,
- "Error reading ID register with error %d\n",
- status);
- return status;
- }
-
- id = (id & ISL29035_DEVICE_ID_MASK) >> ISL29035_DEVICE_ID_SHIFT;
-
- if (id != ISL29035_DEVICE_ID)
- return -ENODEV;
-
- /* clear out brownout bit */
- return regmap_update_bits(chip->regmap, ISL29035_REG_DEVICE_ID,
- ISL29035_BOUT_MASK, 0);
-}
-
-enum {
- isl29018,
- isl29023,
- isl29035,
-};
-
-static int isl29018_chip_init(struct isl29018_chip *chip)
-{
- int status;
-
- if (chip->type == isl29035) {
- status = isl29035_detect(chip);
- if (status < 0)
- return status;
- }
-
- /* Code added per Intersil Application Note 1534:
- * When VDD sinks to approximately 1.8V or below, some of
- * the part's registers may change their state. When VDD
- * recovers to 2.25V (or greater), the part may thus be in an
- * unknown mode of operation. The user can return the part to
- * a known mode of operation either by (a) setting VDD = 0V for
- * 1 second or more and then powering back up with a slew rate
- * of 0.5V/ms or greater, or (b) via I2C disable all ALS/PROX
- * conversions, clear the test registers, and then rewrite all
- * registers to the desired values.
- * ...
- * FOR ISL29011, ISL29018, ISL29021, ISL29023
- * 1. Write 0x00 to register 0x08 (TEST)
- * 2. Write 0x00 to register 0x00 (CMD1)
- * 3. Rewrite all registers to the desired values
- *
- * ISL29018 Data Sheet (FN6619.1, Feb 11, 2010) essentially says
- * the same thing EXCEPT the data sheet asks for a 1ms delay after
- * writing the CMD1 register.
- */
- status = regmap_write(chip->regmap, ISL29018_REG_TEST, 0x0);
- if (status < 0) {
- dev_err(chip->dev, "Failed to clear isl29018 TEST reg.(%d)\n",
- status);
- return status;
- }
-
- /* See Intersil AN1534 comments above.
- * "Operating Mode" (COMMAND1) register is reprogrammed when
- * data is read from the device.
- */
- status = regmap_write(chip->regmap, ISL29018_REG_ADD_COMMAND1, 0);
- if (status < 0) {
- dev_err(chip->dev, "Failed to clear isl29018 CMD1 reg.(%d)\n",
- status);
- return status;
- }
-
- usleep_range(1000, 2000); /* per data sheet, page 10 */
-
- /* set defaults */
- status = isl29018_set_scale(chip, chip->scale.scale,
- chip->scale.uscale);
- if (status < 0) {
- dev_err(chip->dev, "Init of isl29018 fails\n");
- return status;
- }
-
- status = isl29018_set_integration_time(chip,
- isl29018_int_utimes[chip->type][chip->int_time]);
- if (status < 0) {
- dev_err(chip->dev, "Init of isl29018 fails\n");
- return status;
- }
-
- return 0;
-}
-
-static const struct iio_info isl29018_info = {
- .attrs = &isl29018_group,
- .driver_module = THIS_MODULE,
- .read_raw = &isl29018_read_raw,
- .write_raw = &isl29018_write_raw,
-};
-
-static const struct iio_info isl29023_info = {
- .attrs = &isl29023_group,
- .driver_module = THIS_MODULE,
- .read_raw = &isl29018_read_raw,
- .write_raw = &isl29018_write_raw,
-};
-
-static bool is_volatile_reg(struct device *dev, unsigned int reg)
-{
- switch (reg) {
- case ISL29018_REG_ADD_DATA_LSB:
- case ISL29018_REG_ADD_DATA_MSB:
- case ISL29018_REG_ADD_COMMAND1:
- case ISL29018_REG_TEST:
- case ISL29035_REG_DEVICE_ID:
- return true;
- default:
- return false;
- }
-}
-
-/*
- * isl29018_regmap_config: regmap configuration.
- * Use RBTREE mechanism for caching.
- */
-static const struct regmap_config isl29018_regmap_config = {
- .reg_bits = 8,
- .val_bits = 8,
- .volatile_reg = is_volatile_reg,
- .max_register = ISL29018_REG_TEST,
- .num_reg_defaults_raw = ISL29018_REG_TEST + 1,
- .cache_type = REGCACHE_RBTREE,
-};
-
-/* isl29035_regmap_config: regmap configuration for ISL29035 */
-static const struct regmap_config isl29035_regmap_config = {
- .reg_bits = 8,
- .val_bits = 8,
- .volatile_reg = is_volatile_reg,
- .max_register = ISL29035_REG_DEVICE_ID,
- .num_reg_defaults_raw = ISL29035_REG_DEVICE_ID + 1,
- .cache_type = REGCACHE_RBTREE,
-};
-
-struct chip_info {
- const struct iio_chan_spec *channels;
- int num_channels;
- const struct iio_info *indio_info;
- const struct regmap_config *regmap_cfg;
-};
-
-static const struct chip_info chip_info_tbl[] = {
- [isl29018] = {
- .channels = isl29018_channels,
- .num_channels = ARRAY_SIZE(isl29018_channels),
- .indio_info = &isl29018_info,
- .regmap_cfg = &isl29018_regmap_config,
- },
- [isl29023] = {
- .channels = isl29023_channels,
- .num_channels = ARRAY_SIZE(isl29023_channels),
- .indio_info = &isl29023_info,
- .regmap_cfg = &isl29018_regmap_config,
- },
- [isl29035] = {
- .channels = isl29023_channels,
- .num_channels = ARRAY_SIZE(isl29023_channels),
- .indio_info = &isl29023_info,
- .regmap_cfg = &isl29035_regmap_config,
- },
-};
-
-static const char *isl29018_match_acpi_device(struct device *dev, int *data)
-{
- const struct acpi_device_id *id;
-
- id = acpi_match_device(dev->driver->acpi_match_table, dev);
-
- if (!id)
- return NULL;
-
- *data = (int) id->driver_data;
-
- return dev_name(dev);
-}
-
-static int isl29018_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
-{
- struct isl29018_chip *chip;
- struct iio_dev *indio_dev;
- int err;
- const char *name = NULL;
- int dev_id = 0;
-
- indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*chip));
- if (indio_dev == NULL) {
- dev_err(&client->dev, "iio allocation fails\n");
- return -ENOMEM;
- }
- chip = iio_priv(indio_dev);
-
- i2c_set_clientdata(client, indio_dev);
- chip->dev = &client->dev;
-
- if (id) {
- name = id->name;
- dev_id = id->driver_data;
- }
-
- if (ACPI_HANDLE(&client->dev))
- name = isl29018_match_acpi_device(&client->dev, &dev_id);
-
- mutex_init(&chip->lock);
-
- chip->type = dev_id;
- chip->calibscale = 1;
- chip->ucalibscale = 0;
- chip->int_time = ISL29018_INT_TIME_16;
- chip->scale = isl29018_scales[chip->int_time][0];
- chip->suspended = false;
-
- chip->regmap = devm_regmap_init_i2c(client,
- chip_info_tbl[dev_id].regmap_cfg);
- if (IS_ERR(chip->regmap)) {
- err = PTR_ERR(chip->regmap);
- dev_err(chip->dev, "regmap initialization failed: %d\n", err);
- return err;
- }
-
- err = isl29018_chip_init(chip);
- if (err)
- return err;
-
- indio_dev->info = chip_info_tbl[dev_id].indio_info;
- indio_dev->channels = chip_info_tbl[dev_id].channels;
- indio_dev->num_channels = chip_info_tbl[dev_id].num_channels;
- indio_dev->name = name;
- indio_dev->dev.parent = &client->dev;
- indio_dev->modes = INDIO_DIRECT_MODE;
- err = devm_iio_device_register(&client->dev, indio_dev);
- if (err) {
- dev_err(&client->dev, "iio registration fails\n");
- return err;
- }
-
- return 0;
-}
-
-#ifdef CONFIG_PM_SLEEP
-static int isl29018_suspend(struct device *dev)
-{
- struct isl29018_chip *chip = iio_priv(dev_get_drvdata(dev));
-
- mutex_lock(&chip->lock);
-
- /* Since this driver uses only polling commands, we are by default in
- * auto shutdown (ie, power-down) mode.
- * So we do not have much to do here.
- */
- chip->suspended = true;
-
- mutex_unlock(&chip->lock);
- return 0;
-}
-
-static int isl29018_resume(struct device *dev)
-{
- struct isl29018_chip *chip = iio_priv(dev_get_drvdata(dev));
- int err;
-
- mutex_lock(&chip->lock);
-
- err = isl29018_chip_init(chip);
- if (!err)
- chip->suspended = false;
-
- mutex_unlock(&chip->lock);
- return err;
-}
-
-static SIMPLE_DEV_PM_OPS(isl29018_pm_ops, isl29018_suspend, isl29018_resume);
-#define ISL29018_PM_OPS (&isl29018_pm_ops)
-#else
-#define ISL29018_PM_OPS NULL
-#endif
-
-static const struct acpi_device_id isl29018_acpi_match[] = {
- {"ISL29018", isl29018},
- {"ISL29023", isl29023},
- {"ISL29035", isl29035},
- {},
-};
-MODULE_DEVICE_TABLE(acpi, isl29018_acpi_match);
-
-static const struct i2c_device_id isl29018_id[] = {
- {"isl29018", isl29018},
- {"isl29023", isl29023},
- {"isl29035", isl29035},
- {}
-};
-
-MODULE_DEVICE_TABLE(i2c, isl29018_id);
-
-static const struct of_device_id isl29018_of_match[] = {
- { .compatible = "isil,isl29018", },
- { .compatible = "isil,isl29023", },
- { .compatible = "isil,isl29035", },
- { },
-};
-MODULE_DEVICE_TABLE(of, isl29018_of_match);
-
-static struct i2c_driver isl29018_driver = {
- .class = I2C_CLASS_HWMON,
- .driver = {
- .name = "isl29018",
- .acpi_match_table = ACPI_PTR(isl29018_acpi_match),
- .pm = ISL29018_PM_OPS,
- .of_match_table = isl29018_of_match,
- },
- .probe = isl29018_probe,
- .id_table = isl29018_id,
-};
-module_i2c_driver(isl29018_driver);
-
-MODULE_DESCRIPTION("ISL29018 Ambient Light Sensor driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/iio/light/isl29028.c b/drivers/staging/iio/light/isl29028.c
deleted file mode 100644
index cd6f2727aa58..000000000000
--- a/drivers/staging/iio/light/isl29028.c
+++ /dev/null
@@ -1,561 +0,0 @@
-/*
- * IIO driver for the light sensor ISL29028.
- * ISL29028 is Concurrent Ambient Light and Proximity Sensor
- *
- * Copyright (c) 2012, NVIDIA CORPORATION. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- */
-
-#include <linux/module.h>
-#include <linux/i2c.h>
-#include <linux/err.h>
-#include <linux/mutex.h>
-#include <linux/delay.h>
-#include <linux/slab.h>
-#include <linux/regmap.h>
-#include <linux/iio/iio.h>
-#include <linux/iio/sysfs.h>
-
-#define CONVERSION_TIME_MS 100
-
-#define ISL29028_REG_CONFIGURE 0x01
-
-#define CONFIGURE_ALS_IR_MODE_ALS 0
-#define CONFIGURE_ALS_IR_MODE_IR BIT(0)
-#define CONFIGURE_ALS_IR_MODE_MASK BIT(0)
-
-#define CONFIGURE_ALS_RANGE_LOW_LUX 0
-#define CONFIGURE_ALS_RANGE_HIGH_LUX BIT(1)
-#define CONFIGURE_ALS_RANGE_MASK BIT(1)
-
-#define CONFIGURE_ALS_DIS 0
-#define CONFIGURE_ALS_EN BIT(2)
-#define CONFIGURE_ALS_EN_MASK BIT(2)
-
-#define CONFIGURE_PROX_DRIVE BIT(3)
-
-#define CONFIGURE_PROX_SLP_SH 4
-#define CONFIGURE_PROX_SLP_MASK (7 << CONFIGURE_PROX_SLP_SH)
-
-#define CONFIGURE_PROX_EN BIT(7)
-#define CONFIGURE_PROX_EN_MASK BIT(7)
-
-#define ISL29028_REG_INTERRUPT 0x02
-
-#define ISL29028_REG_PROX_DATA 0x08
-#define ISL29028_REG_ALSIR_L 0x09
-#define ISL29028_REG_ALSIR_U 0x0A
-
-#define ISL29028_REG_TEST1_MODE 0x0E
-#define ISL29028_REG_TEST2_MODE 0x0F
-
-#define ISL29028_NUM_REGS (ISL29028_REG_TEST2_MODE + 1)
-
-enum als_ir_mode {
- MODE_NONE = 0,
- MODE_ALS,
- MODE_IR
-};
-
-struct isl29028_chip {
- struct device *dev;
- struct mutex lock;
- struct regmap *regmap;
-
- unsigned int prox_sampling;
- bool enable_prox;
-
- int lux_scale;
- int als_ir_mode;
-};
-
-static int isl29028_set_proxim_sampling(struct isl29028_chip *chip,
- unsigned int sampling)
-{
- static unsigned int prox_period[] = {800, 400, 200, 100, 75, 50, 12, 0};
- int sel;
- unsigned int period = DIV_ROUND_UP(1000, sampling);
-
- for (sel = 0; sel < ARRAY_SIZE(prox_period); ++sel) {
- if (period >= prox_period[sel])
- break;
- }
- return regmap_update_bits(chip->regmap, ISL29028_REG_CONFIGURE,
- CONFIGURE_PROX_SLP_MASK, sel << CONFIGURE_PROX_SLP_SH);
-}
-
-static int isl29028_enable_proximity(struct isl29028_chip *chip, bool enable)
-{
- int ret;
- int val = 0;
-
- if (enable)
- val = CONFIGURE_PROX_EN;
- ret = regmap_update_bits(chip->regmap, ISL29028_REG_CONFIGURE,
- CONFIGURE_PROX_EN_MASK, val);
- if (ret < 0)
- return ret;
-
- /* Wait for conversion to be complete for first sample */
- mdelay(DIV_ROUND_UP(1000, chip->prox_sampling));
- return 0;
-}
-
-static int isl29028_set_als_scale(struct isl29028_chip *chip, int lux_scale)
-{
- int val = (lux_scale == 2000) ? CONFIGURE_ALS_RANGE_HIGH_LUX :
- CONFIGURE_ALS_RANGE_LOW_LUX;
-
- return regmap_update_bits(chip->regmap, ISL29028_REG_CONFIGURE,
- CONFIGURE_ALS_RANGE_MASK, val);
-}
-
-static int isl29028_set_als_ir_mode(struct isl29028_chip *chip,
- enum als_ir_mode mode)
-{
- int ret = 0;
-
- switch (mode) {
- case MODE_ALS:
- ret = regmap_update_bits(chip->regmap, ISL29028_REG_CONFIGURE,
- CONFIGURE_ALS_IR_MODE_MASK, CONFIGURE_ALS_IR_MODE_ALS);
- if (ret < 0)
- return ret;
-
- ret = regmap_update_bits(chip->regmap, ISL29028_REG_CONFIGURE,
- CONFIGURE_ALS_RANGE_MASK, CONFIGURE_ALS_RANGE_HIGH_LUX);
- break;
-
- case MODE_IR:
- ret = regmap_update_bits(chip->regmap, ISL29028_REG_CONFIGURE,
- CONFIGURE_ALS_IR_MODE_MASK, CONFIGURE_ALS_IR_MODE_IR);
- break;
-
- case MODE_NONE:
- return regmap_update_bits(chip->regmap, ISL29028_REG_CONFIGURE,
- CONFIGURE_ALS_EN_MASK, CONFIGURE_ALS_DIS);
- }
-
- if (ret < 0)
- return ret;
-
- /* Enable the ALS/IR */
- ret = regmap_update_bits(chip->regmap, ISL29028_REG_CONFIGURE,
- CONFIGURE_ALS_EN_MASK, CONFIGURE_ALS_EN);
- if (ret < 0)
- return ret;
-
- /* Need to wait for conversion time if ALS/IR mode enabled */
- mdelay(CONVERSION_TIME_MS);
- return 0;
-}
-
-static int isl29028_read_als_ir(struct isl29028_chip *chip, int *als_ir)
-{
- unsigned int lsb;
- unsigned int msb;
- int ret;
-
- ret = regmap_read(chip->regmap, ISL29028_REG_ALSIR_L, &lsb);
- if (ret < 0) {
- dev_err(chip->dev,
- "Error in reading register ALSIR_L err %d\n", ret);
- return ret;
- }
-
- ret = regmap_read(chip->regmap, ISL29028_REG_ALSIR_U, &msb);
- if (ret < 0) {
- dev_err(chip->dev,
- "Error in reading register ALSIR_U err %d\n", ret);
- return ret;
- }
-
- *als_ir = ((msb & 0xF) << 8) | (lsb & 0xFF);
- return 0;
-}
-
-static int isl29028_read_proxim(struct isl29028_chip *chip, int *prox)
-{
- unsigned int data;
- int ret;
-
- ret = regmap_read(chip->regmap, ISL29028_REG_PROX_DATA, &data);
- if (ret < 0) {
- dev_err(chip->dev, "Error in reading register %d, error %d\n",
- ISL29028_REG_PROX_DATA, ret);
- return ret;
- }
- *prox = data;
- return 0;
-}
-
-static int isl29028_proxim_get(struct isl29028_chip *chip, int *prox_data)
-{
- int ret;
-
- if (!chip->enable_prox) {
- ret = isl29028_enable_proximity(chip, true);
- if (ret < 0)
- return ret;
- chip->enable_prox = true;
- }
- return isl29028_read_proxim(chip, prox_data);
-}
-
-static int isl29028_als_get(struct isl29028_chip *chip, int *als_data)
-{
- int ret;
- int als_ir_data;
-
- if (chip->als_ir_mode != MODE_ALS) {
- ret = isl29028_set_als_ir_mode(chip, MODE_ALS);
- if (ret < 0) {
- dev_err(chip->dev,
- "Error in enabling ALS mode err %d\n", ret);
- return ret;
- }
- chip->als_ir_mode = MODE_ALS;
- }
-
- ret = isl29028_read_als_ir(chip, &als_ir_data);
- if (ret < 0)
- return ret;
-
- /*
- * convert als data count to lux.
- * if lux_scale = 125, lux = count * 0.031
- * if lux_scale = 2000, lux = count * 0.49
- */
- if (chip->lux_scale == 125)
- als_ir_data = (als_ir_data * 31) / 1000;
- else
- als_ir_data = (als_ir_data * 49) / 100;
-
- *als_data = als_ir_data;
- return 0;
-}
-
-static int isl29028_ir_get(struct isl29028_chip *chip, int *ir_data)
-{
- int ret;
-
- if (chip->als_ir_mode != MODE_IR) {
- ret = isl29028_set_als_ir_mode(chip, MODE_IR);
- if (ret < 0) {
- dev_err(chip->dev,
- "Error in enabling IR mode err %d\n", ret);
- return ret;
- }
- chip->als_ir_mode = MODE_IR;
- }
- return isl29028_read_als_ir(chip, ir_data);
-}
-
-/* Channel IO */
-static int isl29028_write_raw(struct iio_dev *indio_dev,
- struct iio_chan_spec const *chan, int val, int val2, long mask)
-{
- struct isl29028_chip *chip = iio_priv(indio_dev);
- int ret = -EINVAL;
-
- mutex_lock(&chip->lock);
- switch (chan->type) {
- case IIO_PROXIMITY:
- if (mask != IIO_CHAN_INFO_SAMP_FREQ) {
- dev_err(chip->dev,
- "proximity: mask value 0x%08lx not supported\n",
- mask);
- break;
- }
- if (val < 1 || val > 100) {
- dev_err(chip->dev,
- "Samp_freq %d is not in range[1:100]\n", val);
- break;
- }
- ret = isl29028_set_proxim_sampling(chip, val);
- if (ret < 0) {
- dev_err(chip->dev,
- "Setting proximity samp_freq fail, err %d\n",
- ret);
- break;
- }
- chip->prox_sampling = val;
- break;
-
- case IIO_LIGHT:
- if (mask != IIO_CHAN_INFO_SCALE) {
- dev_err(chip->dev,
- "light: mask value 0x%08lx not supported\n",
- mask);
- break;
- }
- if ((val != 125) && (val != 2000)) {
- dev_err(chip->dev,
- "lux scale %d is invalid [125, 2000]\n", val);
- break;
- }
- ret = isl29028_set_als_scale(chip, val);
- if (ret < 0) {
- dev_err(chip->dev,
- "Setting lux scale fail with error %d\n", ret);
- break;
- }
- chip->lux_scale = val;
- break;
-
- default:
- dev_err(chip->dev, "Unsupported channel type\n");
- break;
- }
- mutex_unlock(&chip->lock);
- return ret;
-}
-
-static int isl29028_read_raw(struct iio_dev *indio_dev,
- struct iio_chan_spec const *chan, int *val, int *val2, long mask)
-{
- struct isl29028_chip *chip = iio_priv(indio_dev);
- int ret = -EINVAL;
-
- mutex_lock(&chip->lock);
- switch (mask) {
- case IIO_CHAN_INFO_RAW:
- case IIO_CHAN_INFO_PROCESSED:
- switch (chan->type) {
- case IIO_LIGHT:
- ret = isl29028_als_get(chip, val);
- break;
- case IIO_INTENSITY:
- ret = isl29028_ir_get(chip, val);
- break;
- case IIO_PROXIMITY:
- ret = isl29028_proxim_get(chip, val);
- break;
- default:
- break;
- }
- if (ret < 0)
- break;
- ret = IIO_VAL_INT;
- break;
-
- case IIO_CHAN_INFO_SAMP_FREQ:
- if (chan->type != IIO_PROXIMITY)
- break;
- *val = chip->prox_sampling;
- ret = IIO_VAL_INT;
- break;
-
- case IIO_CHAN_INFO_SCALE:
- if (chan->type != IIO_LIGHT)
- break;
- *val = chip->lux_scale;
- ret = IIO_VAL_INT;
- break;
-
- default:
- dev_err(chip->dev, "mask value 0x%08lx not supported\n", mask);
- break;
- }
- mutex_unlock(&chip->lock);
- return ret;
-}
-
-static IIO_CONST_ATTR(in_proximity_sampling_frequency_available,
- "1, 3, 5, 10, 13, 20, 83, 100");
-static IIO_CONST_ATTR(in_illuminance_scale_available, "125, 2000");
-
-#define ISL29028_DEV_ATTR(name) (&iio_dev_attr_##name.dev_attr.attr)
-#define ISL29028_CONST_ATTR(name) (&iio_const_attr_##name.dev_attr.attr)
-static struct attribute *isl29028_attributes[] = {
- ISL29028_CONST_ATTR(in_proximity_sampling_frequency_available),
- ISL29028_CONST_ATTR(in_illuminance_scale_available),
- NULL,
-};
-
-static const struct attribute_group isl29108_group = {
- .attrs = isl29028_attributes,
-};
-
-static const struct iio_chan_spec isl29028_channels[] = {
- {
- .type = IIO_LIGHT,
- .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED) |
- BIT(IIO_CHAN_INFO_SCALE),
- }, {
- .type = IIO_INTENSITY,
- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
- }, {
- .type = IIO_PROXIMITY,
- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
- BIT(IIO_CHAN_INFO_SAMP_FREQ),
- }
-};
-
-static const struct iio_info isl29028_info = {
- .attrs = &isl29108_group,
- .driver_module = THIS_MODULE,
- .read_raw = &isl29028_read_raw,
- .write_raw = &isl29028_write_raw,
-};
-
-static int isl29028_chip_init(struct isl29028_chip *chip)
-{
- int ret;
-
- chip->enable_prox = false;
- chip->prox_sampling = 20;
- chip->lux_scale = 2000;
- chip->als_ir_mode = MODE_NONE;
-
- ret = regmap_write(chip->regmap, ISL29028_REG_TEST1_MODE, 0x0);
- if (ret < 0) {
- dev_err(chip->dev, "%s(): write to reg %d failed, err = %d\n",
- __func__, ISL29028_REG_TEST1_MODE, ret);
- return ret;
- }
- ret = regmap_write(chip->regmap, ISL29028_REG_TEST2_MODE, 0x0);
- if (ret < 0) {
- dev_err(chip->dev, "%s(): write to reg %d failed, err = %d\n",
- __func__, ISL29028_REG_TEST2_MODE, ret);
- return ret;
- }
-
- ret = regmap_write(chip->regmap, ISL29028_REG_CONFIGURE, 0x0);
- if (ret < 0) {
- dev_err(chip->dev, "%s(): write to reg %d failed, err = %d\n",
- __func__, ISL29028_REG_CONFIGURE, ret);
- return ret;
- }
-
- ret = isl29028_set_proxim_sampling(chip, chip->prox_sampling);
- if (ret < 0) {
- dev_err(chip->dev, "setting the proximity, err = %d\n",
- ret);
- return ret;
- }
-
- ret = isl29028_set_als_scale(chip, chip->lux_scale);
- if (ret < 0)
- dev_err(chip->dev,
- "setting als scale failed, err = %d\n", ret);
- return ret;
-}
-
-static bool is_volatile_reg(struct device *dev, unsigned int reg)
-{
- switch (reg) {
- case ISL29028_REG_INTERRUPT:
- case ISL29028_REG_PROX_DATA:
- case ISL29028_REG_ALSIR_L:
- case ISL29028_REG_ALSIR_U:
- return true;
- default:
- return false;
- }
-}
-
-static const struct regmap_config isl29028_regmap_config = {
- .reg_bits = 8,
- .val_bits = 8,
- .volatile_reg = is_volatile_reg,
- .max_register = ISL29028_NUM_REGS - 1,
- .num_reg_defaults_raw = ISL29028_NUM_REGS,
- .cache_type = REGCACHE_RBTREE,
-};
-
-static int isl29028_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
-{
- struct isl29028_chip *chip;
- struct iio_dev *indio_dev;
- int ret;
-
- indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*chip));
- if (!indio_dev) {
- dev_err(&client->dev, "iio allocation fails\n");
- return -ENOMEM;
- }
-
- chip = iio_priv(indio_dev);
-
- i2c_set_clientdata(client, indio_dev);
- chip->dev = &client->dev;
- mutex_init(&chip->lock);
-
- chip->regmap = devm_regmap_init_i2c(client, &isl29028_regmap_config);
- if (IS_ERR(chip->regmap)) {
- ret = PTR_ERR(chip->regmap);
- dev_err(chip->dev, "regmap initialization failed: %d\n", ret);
- return ret;
- }
-
- ret = isl29028_chip_init(chip);
- if (ret < 0) {
- dev_err(chip->dev, "chip initialization failed: %d\n", ret);
- return ret;
- }
-
- indio_dev->info = &isl29028_info;
- indio_dev->channels = isl29028_channels;
- indio_dev->num_channels = ARRAY_SIZE(isl29028_channels);
- indio_dev->name = id->name;
- indio_dev->dev.parent = &client->dev;
- indio_dev->modes = INDIO_DIRECT_MODE;
- ret = iio_device_register(indio_dev);
- if (ret < 0) {
- dev_err(chip->dev, "iio registration fails with error %d\n",
- ret);
- return ret;
- }
- return 0;
-}
-
-static int isl29028_remove(struct i2c_client *client)
-{
- struct iio_dev *indio_dev = i2c_get_clientdata(client);
-
- iio_device_unregister(indio_dev);
- return 0;
-}
-
-static const struct i2c_device_id isl29028_id[] = {
- {"isl29028", 0},
- {}
-};
-MODULE_DEVICE_TABLE(i2c, isl29028_id);
-
-static const struct of_device_id isl29028_of_match[] = {
- { .compatible = "isl,isl29028", }, /* for backward compat., don't use */
- { .compatible = "isil,isl29028", },
- { },
-};
-MODULE_DEVICE_TABLE(of, isl29028_of_match);
-
-static struct i2c_driver isl29028_driver = {
- .class = I2C_CLASS_HWMON,
- .driver = {
- .name = "isl29028",
- .of_match_table = isl29028_of_match,
- },
- .probe = isl29028_probe,
- .remove = isl29028_remove,
- .id_table = isl29028_id,
-};
-
-module_i2c_driver(isl29028_driver);
-
-MODULE_DESCRIPTION("ISL29028 Ambient Light and Proximity Sensor driver");
-MODULE_LICENSE("GPL v2");
-MODULE_AUTHOR("Laxman Dewangan <ldewangan@nvidia.com>");
diff --git a/drivers/staging/iio/light/tsl2583.c b/drivers/staging/iio/light/tsl2583.c
deleted file mode 100644
index b5e1b8b0a6f0..000000000000
--- a/drivers/staging/iio/light/tsl2583.c
+++ /dev/null
@@ -1,949 +0,0 @@
-/*
- * Device driver for monitoring ambient light intensity (lux)
- * within the TAOS tsl258x family of devices (tsl2580, tsl2581).
- *
- * Copyright (c) 2011, TAOS Corporation.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
- */
-
-#include <linux/kernel.h>
-#include <linux/i2c.h>
-#include <linux/errno.h>
-#include <linux/delay.h>
-#include <linux/string.h>
-#include <linux/mutex.h>
-#include <linux/unistd.h>
-#include <linux/slab.h>
-#include <linux/module.h>
-#include <linux/iio/iio.h>
-
-#define TSL258X_MAX_DEVICE_REGS 32
-
-/* Triton register offsets */
-#define TSL258X_REG_MAX 8
-
-/* Device Registers and Masks */
-#define TSL258X_CNTRL 0x00
-#define TSL258X_ALS_TIME 0X01
-#define TSL258X_INTERRUPT 0x02
-#define TSL258X_GAIN 0x07
-#define TSL258X_REVID 0x11
-#define TSL258X_CHIPID 0x12
-#define TSL258X_ALS_CHAN0LO 0x14
-#define TSL258X_ALS_CHAN0HI 0x15
-#define TSL258X_ALS_CHAN1LO 0x16
-#define TSL258X_ALS_CHAN1HI 0x17
-#define TSL258X_TMR_LO 0x18
-#define TSL258X_TMR_HI 0x19
-
-/* tsl2583 cmd reg masks */
-#define TSL258X_CMD_REG 0x80
-#define TSL258X_CMD_SPL_FN 0x60
-#define TSL258X_CMD_ALS_INT_CLR 0X01
-
-/* tsl2583 cntrl reg masks */
-#define TSL258X_CNTL_ADC_ENBL 0x02
-#define TSL258X_CNTL_PWR_ON 0x01
-
-/* tsl2583 status reg masks */
-#define TSL258X_STA_ADC_VALID 0x01
-#define TSL258X_STA_ADC_INTR 0x10
-
-/* Lux calculation constants */
-#define TSL258X_LUX_CALC_OVER_FLOW 65535
-
-enum {
- TSL258X_CHIP_UNKNOWN = 0,
- TSL258X_CHIP_WORKING = 1,
- TSL258X_CHIP_SUSPENDED = 2
-};
-
-/* Per-device data */
-struct taos_als_info {
- u16 als_ch0;
- u16 als_ch1;
- u16 lux;
-};
-
-struct taos_settings {
- int als_time;
- int als_gain;
- int als_gain_trim;
- int als_cal_target;
-};
-
-struct tsl2583_chip {
- struct mutex als_mutex;
- struct i2c_client *client;
- struct taos_als_info als_cur_info;
- struct taos_settings taos_settings;
- int als_time_scale;
- int als_saturation;
- int taos_chip_status;
- u8 taos_config[8];
-};
-
-/*
- * Initial values for device - this values can/will be changed by driver.
- * and applications as needed.
- * These values are dynamic.
- */
-static const u8 taos_config[8] = {
- 0x00, 0xee, 0x00, 0x03, 0x00, 0xFF, 0xFF, 0x00
-}; /* cntrl atime intC Athl0 Athl1 Athh0 Athh1 gain */
-
-struct taos_lux {
- unsigned int ratio;
- unsigned int ch0;
- unsigned int ch1;
-};
-
-/* This structure is intentionally large to accommodate updates via sysfs. */
-/* Sized to 11 = max 10 segments + 1 termination segment */
-/* Assumption is one and only one type of glass used */
-static struct taos_lux taos_device_lux[11] = {
- { 9830, 8520, 15729 },
- { 12452, 10807, 23344 },
- { 14746, 6383, 11705 },
- { 17695, 4063, 6554 },
-};
-
-struct gainadj {
- s16 ch0;
- s16 ch1;
-};
-
-/* Index = (0 - 3) Used to validate the gain selection index */
-static const struct gainadj gainadj[] = {
- { 1, 1 },
- { 8, 8 },
- { 16, 16 },
- { 107, 115 }
-};
-
-/*
- * Provides initial operational parameter defaults.
- * These defaults may be changed through the device's sysfs files.
- */
-static void taos_defaults(struct tsl2583_chip *chip)
-{
- /* Operational parameters */
- chip->taos_settings.als_time = 100;
- /* must be a multiple of 50mS */
- chip->taos_settings.als_gain = 0;
- /* this is actually an index into the gain table */
- /* assume clear glass as default */
- chip->taos_settings.als_gain_trim = 1000;
- /* default gain trim to account for aperture effects */
- chip->taos_settings.als_cal_target = 130;
- /* Known external ALS reading used for calibration */
-}
-
-/*
- * Read a number of bytes starting at register (reg) location.
- * Return 0, or i2c_smbus_write_byte ERROR code.
- */
-static int
-taos_i2c_read(struct i2c_client *client, u8 reg, u8 *val, unsigned int len)
-{
- int i, ret;
-
- for (i = 0; i < len; i++) {
- /* select register to write */
- ret = i2c_smbus_write_byte(client, (TSL258X_CMD_REG | reg));
- if (ret < 0) {
- dev_err(&client->dev,
- "taos_i2c_read failed to write register %x\n",
- reg);
- return ret;
- }
- /* read the data */
- *val = i2c_smbus_read_byte(client);
- val++;
- reg++;
- }
- return 0;
-}
-
-/*
- * Reads and calculates current lux value.
- * The raw ch0 and ch1 values of the ambient light sensed in the last
- * integration cycle are read from the device.
- * Time scale factor array values are adjusted based on the integration time.
- * The raw values are multiplied by a scale factor, and device gain is obtained
- * using gain index. Limit checks are done next, then the ratio of a multiple
- * of ch1 value, to the ch0 value, is calculated. The array taos_device_lux[]
- * declared above is then scanned to find the first ratio value that is just
- * above the ratio we just calculated. The ch0 and ch1 multiplier constants in
- * the array are then used along with the time scale factor array values, to
- * calculate the lux.
- */
-static int taos_get_lux(struct iio_dev *indio_dev)
-{
- u16 ch0, ch1; /* separated ch0/ch1 data from device */
- u32 lux; /* raw lux calculated from device data */
- u64 lux64;
- u32 ratio;
- u8 buf[5];
- struct taos_lux *p;
- struct tsl2583_chip *chip = iio_priv(indio_dev);
- int i, ret;
- u32 ch0lux = 0;
- u32 ch1lux = 0;
-
- if (mutex_trylock(&chip->als_mutex) == 0) {
- dev_info(&chip->client->dev, "taos_get_lux device is busy\n");
- return chip->als_cur_info.lux; /* busy, so return LAST VALUE */
- }
-
- if (chip->taos_chip_status != TSL258X_CHIP_WORKING) {
- /* device is not enabled */
- dev_err(&chip->client->dev, "taos_get_lux device is not enabled\n");
- ret = -EBUSY;
- goto out_unlock;
- }
-
- ret = taos_i2c_read(chip->client, (TSL258X_CMD_REG), &buf[0], 1);
- if (ret < 0) {
- dev_err(&chip->client->dev, "taos_get_lux failed to read CMD_REG\n");
- goto out_unlock;
- }
- /* is data new & valid */
- if (!(buf[0] & TSL258X_STA_ADC_INTR)) {
- dev_err(&chip->client->dev, "taos_get_lux data not valid\n");
- ret = chip->als_cur_info.lux; /* return LAST VALUE */
- goto out_unlock;
- }
-
- for (i = 0; i < 4; i++) {
- int reg = TSL258X_CMD_REG | (TSL258X_ALS_CHAN0LO + i);
-
- ret = taos_i2c_read(chip->client, reg, &buf[i], 1);
- if (ret < 0) {
- dev_err(&chip->client->dev,
- "taos_get_lux failed to read register %x\n",
- reg);
- goto out_unlock;
- }
- }
-
- /* clear status, really interrupt status (interrupts are off), but
- * we use the bit anyway - don't forget 0x80 - this is a command*/
- ret = i2c_smbus_write_byte(chip->client,
- (TSL258X_CMD_REG | TSL258X_CMD_SPL_FN |
- TSL258X_CMD_ALS_INT_CLR));
-
- if (ret < 0) {
- dev_err(&chip->client->dev,
- "taos_i2c_write_command failed in taos_get_lux, err = %d\n",
- ret);
- goto out_unlock; /* have no data, so return failure */
- }
-
- /* extract ALS/lux data */
- ch0 = le16_to_cpup((const __le16 *)&buf[0]);
- ch1 = le16_to_cpup((const __le16 *)&buf[2]);
-
- chip->als_cur_info.als_ch0 = ch0;
- chip->als_cur_info.als_ch1 = ch1;
-
- if ((ch0 >= chip->als_saturation) || (ch1 >= chip->als_saturation))
- goto return_max;
-
- if (ch0 == 0) {
- /* have no data, so return LAST VALUE */
- ret = chip->als_cur_info.lux = 0;
- goto out_unlock;
- }
- /* calculate ratio */
- ratio = (ch1 << 15) / ch0;
- /* convert to unscaled lux using the pointer to the table */
- for (p = (struct taos_lux *) taos_device_lux;
- p->ratio != 0 && p->ratio < ratio; p++)
- ;
-
- if (p->ratio == 0) {
- lux = 0;
- } else {
- ch0lux = ((ch0 * p->ch0) +
- (gainadj[chip->taos_settings.als_gain].ch0 >> 1))
- / gainadj[chip->taos_settings.als_gain].ch0;
- ch1lux = ((ch1 * p->ch1) +
- (gainadj[chip->taos_settings.als_gain].ch1 >> 1))
- / gainadj[chip->taos_settings.als_gain].ch1;
- lux = ch0lux - ch1lux;
- }
-
- /* note: lux is 31 bit max at this point */
- if (ch1lux > ch0lux) {
- dev_dbg(&chip->client->dev, "No Data - Return last value\n");
- ret = chip->als_cur_info.lux = 0;
- goto out_unlock;
- }
-
- /* adjust for active time scale */
- if (chip->als_time_scale == 0)
- lux = 0;
- else
- lux = (lux + (chip->als_time_scale >> 1)) /
- chip->als_time_scale;
-
- /* Adjust for active gain scale.
- * The taos_device_lux tables above have a factor of 8192 built in,
- * so we need to shift right.
- * User-specified gain provides a multiplier.
- * Apply user-specified gain before shifting right to retain precision.
- * Use 64 bits to avoid overflow on multiplication.
- * Then go back to 32 bits before division to avoid using div_u64().
- */
- lux64 = lux;
- lux64 = lux64 * chip->taos_settings.als_gain_trim;
- lux64 >>= 13;
- lux = lux64;
- lux = (lux + 500) / 1000;
- if (lux > TSL258X_LUX_CALC_OVER_FLOW) { /* check for overflow */
-return_max:
- lux = TSL258X_LUX_CALC_OVER_FLOW;
- }
-
- /* Update the structure with the latest VALID lux. */
- chip->als_cur_info.lux = lux;
- ret = lux;
-
-out_unlock:
- mutex_unlock(&chip->als_mutex);
- return ret;
-}
-
-/*
- * Obtain single reading and calculate the als_gain_trim (later used
- * to derive actual lux).
- * Return updated gain_trim value.
- */
-static int taos_als_calibrate(struct iio_dev *indio_dev)
-{
- struct tsl2583_chip *chip = iio_priv(indio_dev);
- u8 reg_val;
- unsigned int gain_trim_val;
- int ret;
- int lux_val;
-
- ret = i2c_smbus_write_byte(chip->client,
- (TSL258X_CMD_REG | TSL258X_CNTRL));
- if (ret < 0) {
- dev_err(&chip->client->dev,
- "taos_als_calibrate failed to reach the CNTRL register, ret=%d\n",
- ret);
- return ret;
- }
-
- reg_val = i2c_smbus_read_byte(chip->client);
- if ((reg_val & (TSL258X_CNTL_ADC_ENBL | TSL258X_CNTL_PWR_ON))
- != (TSL258X_CNTL_ADC_ENBL | TSL258X_CNTL_PWR_ON)) {
- dev_err(&chip->client->dev,
- "taos_als_calibrate failed: device not powered on with ADC enabled\n");
- return -1;
- }
-
- ret = i2c_smbus_write_byte(chip->client,
- (TSL258X_CMD_REG | TSL258X_CNTRL));
- if (ret < 0) {
- dev_err(&chip->client->dev,
- "taos_als_calibrate failed to reach the STATUS register, ret=%d\n",
- ret);
- return ret;
- }
- reg_val = i2c_smbus_read_byte(chip->client);
-
- if ((reg_val & TSL258X_STA_ADC_VALID) != TSL258X_STA_ADC_VALID) {
- dev_err(&chip->client->dev,
- "taos_als_calibrate failed: STATUS - ADC not valid.\n");
- return -ENODATA;
- }
- lux_val = taos_get_lux(indio_dev);
- if (lux_val < 0) {
- dev_err(&chip->client->dev, "taos_als_calibrate failed to get lux\n");
- return lux_val;
- }
- gain_trim_val = (unsigned int) (((chip->taos_settings.als_cal_target)
- * chip->taos_settings.als_gain_trim) / lux_val);
-
- if ((gain_trim_val < 250) || (gain_trim_val > 4000)) {
- dev_err(&chip->client->dev,
- "taos_als_calibrate failed: trim_val of %d is out of range\n",
- gain_trim_val);
- return -ENODATA;
- }
- chip->taos_settings.als_gain_trim = (int) gain_trim_val;
-
- return (int) gain_trim_val;
-}
-
-/*
- * Turn the device on.
- * Configuration must be set before calling this function.
- */
-static int taos_chip_on(struct iio_dev *indio_dev)
-{
- int i;
- int ret;
- u8 *uP;
- u8 utmp;
- int als_count;
- int als_time;
- struct tsl2583_chip *chip = iio_priv(indio_dev);
-
- /* and make sure we're not already on */
- if (chip->taos_chip_status == TSL258X_CHIP_WORKING) {
- /* if forcing a register update - turn off, then on */
- dev_info(&chip->client->dev, "device is already enabled\n");
- return -EINVAL;
- }
-
- /* determine als integration register */
- als_count = (chip->taos_settings.als_time * 100 + 135) / 270;
- if (als_count == 0)
- als_count = 1; /* ensure at least one cycle */
-
- /* convert back to time (encompasses overrides) */
- als_time = (als_count * 27 + 5) / 10;
- chip->taos_config[TSL258X_ALS_TIME] = 256 - als_count;
-
- /* Set the gain based on taos_settings struct */
- chip->taos_config[TSL258X_GAIN] = chip->taos_settings.als_gain;
-
- /* set chip struct re scaling and saturation */
- chip->als_saturation = als_count * 922; /* 90% of full scale */
- chip->als_time_scale = (als_time + 25) / 50;
-
- /* TSL258x Specific power-on / adc enable sequence
- * Power on the device 1st. */
- utmp = TSL258X_CNTL_PWR_ON;
- ret = i2c_smbus_write_byte_data(chip->client,
- TSL258X_CMD_REG | TSL258X_CNTRL, utmp);
- if (ret < 0) {
- dev_err(&chip->client->dev, "taos_chip_on failed on CNTRL reg.\n");
- return ret;
- }
-
- /* Use the following shadow copy for our delay before enabling ADC.
- * Write all the registers. */
- for (i = 0, uP = chip->taos_config; i < TSL258X_REG_MAX; i++) {
- ret = i2c_smbus_write_byte_data(chip->client,
- TSL258X_CMD_REG + i,
- *uP++);
- if (ret < 0) {
- dev_err(&chip->client->dev,
- "taos_chip_on failed on reg %d.\n", i);
- return ret;
- }
- }
-
- usleep_range(3000, 3500);
- /* NOW enable the ADC
- * initialize the desired mode of operation */
- utmp = TSL258X_CNTL_PWR_ON | TSL258X_CNTL_ADC_ENBL;
- ret = i2c_smbus_write_byte_data(chip->client,
- TSL258X_CMD_REG | TSL258X_CNTRL,
- utmp);
- if (ret < 0) {
- dev_err(&chip->client->dev, "taos_chip_on failed on 2nd CTRL reg.\n");
- return ret;
- }
- chip->taos_chip_status = TSL258X_CHIP_WORKING;
-
- return ret;
-}
-
-static int taos_chip_off(struct iio_dev *indio_dev)
-{
- struct tsl2583_chip *chip = iio_priv(indio_dev);
-
- /* turn device off */
- chip->taos_chip_status = TSL258X_CHIP_SUSPENDED;
- return i2c_smbus_write_byte_data(chip->client,
- TSL258X_CMD_REG | TSL258X_CNTRL,
- 0x00);
-}
-
-/* Sysfs Interface Functions */
-
-static ssize_t taos_power_state_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct tsl2583_chip *chip = iio_priv(indio_dev);
-
- return sprintf(buf, "%d\n", chip->taos_chip_status);
-}
-
-static ssize_t taos_power_state_store(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t len)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- int value;
-
- if (kstrtoint(buf, 0, &value))
- return -EINVAL;
-
- if (value == 0)
- taos_chip_off(indio_dev);
- else
- taos_chip_on(indio_dev);
-
- return len;
-}
-
-static ssize_t taos_gain_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct tsl2583_chip *chip = iio_priv(indio_dev);
- char gain[4] = {0};
-
- switch (chip->taos_settings.als_gain) {
- case 0:
- strcpy(gain, "001");
- break;
- case 1:
- strcpy(gain, "008");
- break;
- case 2:
- strcpy(gain, "016");
- break;
- case 3:
- strcpy(gain, "111");
- break;
- }
-
- return sprintf(buf, "%s\n", gain);
-}
-
-static ssize_t taos_gain_store(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t len)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct tsl2583_chip *chip = iio_priv(indio_dev);
- int value;
-
- if (kstrtoint(buf, 0, &value))
- return -EINVAL;
-
- switch (value) {
- case 1:
- chip->taos_settings.als_gain = 0;
- break;
- case 8:
- chip->taos_settings.als_gain = 1;
- break;
- case 16:
- chip->taos_settings.als_gain = 2;
- break;
- case 111:
- chip->taos_settings.als_gain = 3;
- break;
- default:
- dev_err(dev, "Invalid Gain Index (must be 1,8,16,111)\n");
- return -1;
- }
-
- return len;
-}
-
-static ssize_t taos_gain_available_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "%s\n", "1 8 16 111");
-}
-
-static ssize_t taos_als_time_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct tsl2583_chip *chip = iio_priv(indio_dev);
-
- return sprintf(buf, "%d\n", chip->taos_settings.als_time);
-}
-
-static ssize_t taos_als_time_store(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t len)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct tsl2583_chip *chip = iio_priv(indio_dev);
- int value;
-
- if (kstrtoint(buf, 0, &value))
- return -EINVAL;
-
- if ((value < 50) || (value > 650))
- return -EINVAL;
-
- if (value % 50)
- return -EINVAL;
-
- chip->taos_settings.als_time = value;
-
- return len;
-}
-
-static ssize_t taos_als_time_available_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "%s\n",
- "50 100 150 200 250 300 350 400 450 500 550 600 650");
-}
-
-static ssize_t taos_als_trim_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct tsl2583_chip *chip = iio_priv(indio_dev);
-
- return sprintf(buf, "%d\n", chip->taos_settings.als_gain_trim);
-}
-
-static ssize_t taos_als_trim_store(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t len)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct tsl2583_chip *chip = iio_priv(indio_dev);
- int value;
-
- if (kstrtoint(buf, 0, &value))
- return -EINVAL;
-
- if (value)
- chip->taos_settings.als_gain_trim = value;
-
- return len;
-}
-
-static ssize_t taos_als_cal_target_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct tsl2583_chip *chip = iio_priv(indio_dev);
-
- return sprintf(buf, "%d\n", chip->taos_settings.als_cal_target);
-}
-
-static ssize_t taos_als_cal_target_store(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t len)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct tsl2583_chip *chip = iio_priv(indio_dev);
- int value;
-
- if (kstrtoint(buf, 0, &value))
- return -EINVAL;
-
- if (value)
- chip->taos_settings.als_cal_target = value;
-
- return len;
-}
-
-static ssize_t taos_lux_show(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- int ret;
-
- ret = taos_get_lux(dev_to_iio_dev(dev));
- if (ret < 0)
- return ret;
-
- return sprintf(buf, "%d\n", ret);
-}
-
-static ssize_t taos_do_calibrate(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t len)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- int value;
-
- if (kstrtoint(buf, 0, &value))
- return -EINVAL;
-
- if (value == 1)
- taos_als_calibrate(indio_dev);
-
- return len;
-}
-
-static ssize_t taos_luxtable_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- int i;
- int offset = 0;
-
- for (i = 0; i < ARRAY_SIZE(taos_device_lux); i++) {
- offset += sprintf(buf + offset, "%u,%u,%u,",
- taos_device_lux[i].ratio,
- taos_device_lux[i].ch0,
- taos_device_lux[i].ch1);
- if (taos_device_lux[i].ratio == 0) {
- /* We just printed the first "0" entry.
- * Now get rid of the extra "," and break. */
- offset--;
- break;
- }
- }
-
- offset += sprintf(buf + offset, "\n");
- return offset;
-}
-
-static ssize_t taos_luxtable_store(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t len)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct tsl2583_chip *chip = iio_priv(indio_dev);
- int value[ARRAY_SIZE(taos_device_lux)*3 + 1];
- int n;
-
- get_options(buf, ARRAY_SIZE(value), value);
-
- /* We now have an array of ints starting at value[1], and
- * enumerated by value[0].
- * We expect each group of three ints is one table entry,
- * and the last table entry is all 0.
- */
- n = value[0];
- if ((n % 3) || n < 6 || n > ((ARRAY_SIZE(taos_device_lux) - 1) * 3)) {
- dev_info(dev, "LUX TABLE INPUT ERROR 1 Value[0]=%d\n", n);
- return -EINVAL;
- }
- if ((value[(n - 2)] | value[(n - 1)] | value[n]) != 0) {
- dev_info(dev, "LUX TABLE INPUT ERROR 2 Value[0]=%d\n", n);
- return -EINVAL;
- }
-
- if (chip->taos_chip_status == TSL258X_CHIP_WORKING)
- taos_chip_off(indio_dev);
-
- /* Zero out the table */
- memset(taos_device_lux, 0, sizeof(taos_device_lux));
- memcpy(taos_device_lux, &value[1], (value[0] * 4));
-
- taos_chip_on(indio_dev);
-
- return len;
-}
-
-static DEVICE_ATTR(power_state, S_IRUGO | S_IWUSR,
- taos_power_state_show, taos_power_state_store);
-
-static DEVICE_ATTR(illuminance0_calibscale, S_IRUGO | S_IWUSR,
- taos_gain_show, taos_gain_store);
-static DEVICE_ATTR(illuminance0_calibscale_available, S_IRUGO,
- taos_gain_available_show, NULL);
-
-static DEVICE_ATTR(illuminance0_integration_time, S_IRUGO | S_IWUSR,
- taos_als_time_show, taos_als_time_store);
-static DEVICE_ATTR(illuminance0_integration_time_available, S_IRUGO,
- taos_als_time_available_show, NULL);
-
-static DEVICE_ATTR(illuminance0_calibbias, S_IRUGO | S_IWUSR,
- taos_als_trim_show, taos_als_trim_store);
-
-static DEVICE_ATTR(illuminance0_input_target, S_IRUGO | S_IWUSR,
- taos_als_cal_target_show, taos_als_cal_target_store);
-
-static DEVICE_ATTR(illuminance0_input, S_IRUGO, taos_lux_show, NULL);
-static DEVICE_ATTR(illuminance0_calibrate, S_IWUSR, NULL, taos_do_calibrate);
-static DEVICE_ATTR(illuminance0_lux_table, S_IRUGO | S_IWUSR,
- taos_luxtable_show, taos_luxtable_store);
-
-static struct attribute *sysfs_attrs_ctrl[] = {
- &dev_attr_power_state.attr,
- &dev_attr_illuminance0_calibscale.attr, /* Gain */
- &dev_attr_illuminance0_calibscale_available.attr,
- &dev_attr_illuminance0_integration_time.attr, /* I time*/
- &dev_attr_illuminance0_integration_time_available.attr,
- &dev_attr_illuminance0_calibbias.attr, /* trim */
- &dev_attr_illuminance0_input_target.attr,
- &dev_attr_illuminance0_input.attr,
- &dev_attr_illuminance0_calibrate.attr,
- &dev_attr_illuminance0_lux_table.attr,
- NULL
-};
-
-static struct attribute_group tsl2583_attribute_group = {
- .attrs = sysfs_attrs_ctrl,
-};
-
-/* Use the default register values to identify the Taos device */
-static int taos_tsl258x_device(unsigned char *bufp)
-{
- return ((bufp[TSL258X_CHIPID] & 0xf0) == 0x90);
-}
-
-static const struct iio_info tsl2583_info = {
- .attrs = &tsl2583_attribute_group,
- .driver_module = THIS_MODULE,
-};
-
-/*
- * Client probe function - When a valid device is found, the driver's device
- * data structure is updated, and initialization completes successfully.
- */
-static int taos_probe(struct i2c_client *clientp,
- const struct i2c_device_id *idp)
-{
- int i, ret;
- unsigned char buf[TSL258X_MAX_DEVICE_REGS];
- struct tsl2583_chip *chip;
- struct iio_dev *indio_dev;
-
- if (!i2c_check_functionality(clientp->adapter,
- I2C_FUNC_SMBUS_BYTE_DATA)) {
- dev_err(&clientp->dev, "taos_probe() - i2c smbus byte data func unsupported\n");
- return -EOPNOTSUPP;
- }
-
- indio_dev = devm_iio_device_alloc(&clientp->dev, sizeof(*chip));
- if (!indio_dev)
- return -ENOMEM;
- chip = iio_priv(indio_dev);
- chip->client = clientp;
- i2c_set_clientdata(clientp, indio_dev);
-
- mutex_init(&chip->als_mutex);
- chip->taos_chip_status = TSL258X_CHIP_UNKNOWN;
- memcpy(chip->taos_config, taos_config, sizeof(chip->taos_config));
-
- for (i = 0; i < TSL258X_MAX_DEVICE_REGS; i++) {
- ret = i2c_smbus_write_byte(clientp,
- (TSL258X_CMD_REG | (TSL258X_CNTRL + i)));
- if (ret < 0) {
- dev_err(&clientp->dev,
- "i2c_smbus_write_byte to cmd reg failed in taos_probe(), err = %d\n",
- ret);
- return ret;
- }
- ret = i2c_smbus_read_byte(clientp);
- if (ret < 0) {
- dev_err(&clientp->dev,
- "i2c_smbus_read_byte from reg failed in taos_probe(), err = %d\n",
- ret);
- return ret;
- }
- buf[i] = ret;
- }
-
- if (!taos_tsl258x_device(buf)) {
- dev_info(&clientp->dev,
- "i2c device found but does not match expected id in taos_probe()\n");
- return -EINVAL;
- }
-
- ret = i2c_smbus_write_byte(clientp, (TSL258X_CMD_REG | TSL258X_CNTRL));
- if (ret < 0) {
- dev_err(&clientp->dev,
- "i2c_smbus_write_byte() to cmd reg failed in taos_probe(), err = %d\n",
- ret);
- return ret;
- }
-
- indio_dev->info = &tsl2583_info;
- indio_dev->dev.parent = &clientp->dev;
- indio_dev->modes = INDIO_DIRECT_MODE;
- indio_dev->name = chip->client->name;
- ret = iio_device_register(indio_dev);
- if (ret) {
- dev_err(&clientp->dev, "iio registration failed\n");
- return ret;
- }
-
- /* Load up the V2 defaults (these are hard coded defaults for now) */
- taos_defaults(chip);
-
- /* Make sure the chip is on */
- taos_chip_on(indio_dev);
-
- dev_info(&clientp->dev, "Light sensor found.\n");
- return 0;
-}
-
-#ifdef CONFIG_PM_SLEEP
-static int taos_suspend(struct device *dev)
-{
- struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev));
- struct tsl2583_chip *chip = iio_priv(indio_dev);
- int ret = 0;
-
- mutex_lock(&chip->als_mutex);
-
- if (chip->taos_chip_status == TSL258X_CHIP_WORKING) {
- ret = taos_chip_off(indio_dev);
- chip->taos_chip_status = TSL258X_CHIP_SUSPENDED;
- }
-
- mutex_unlock(&chip->als_mutex);
- return ret;
-}
-
-static int taos_resume(struct device *dev)
-{
- struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev));
- struct tsl2583_chip *chip = iio_priv(indio_dev);
- int ret = 0;
-
- mutex_lock(&chip->als_mutex);
-
- if (chip->taos_chip_status == TSL258X_CHIP_SUSPENDED)
- ret = taos_chip_on(indio_dev);
-
- mutex_unlock(&chip->als_mutex);
- return ret;
-}
-
-static SIMPLE_DEV_PM_OPS(taos_pm_ops, taos_suspend, taos_resume);
-#define TAOS_PM_OPS (&taos_pm_ops)
-#else
-#define TAOS_PM_OPS NULL
-#endif
-
-static int taos_remove(struct i2c_client *client)
-{
- iio_device_unregister(i2c_get_clientdata(client));
-
- return 0;
-}
-
-static struct i2c_device_id taos_idtable[] = {
- { "tsl2580", 0 },
- { "tsl2581", 1 },
- { "tsl2583", 2 },
- {}
-};
-MODULE_DEVICE_TABLE(i2c, taos_idtable);
-
-/* Driver definition */
-static struct i2c_driver taos_driver = {
- .driver = {
- .name = "tsl2583",
- .pm = TAOS_PM_OPS,
- },
- .id_table = taos_idtable,
- .probe = taos_probe,
- .remove = taos_remove,
-};
-module_i2c_driver(taos_driver);
-
-MODULE_AUTHOR("J. August Brenner<jbrenner@taosinc.com>");
-MODULE_DESCRIPTION("TAOS tsl2583 ambient light sensor driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/iio/light/tsl2x7x.h b/drivers/staging/iio/light/tsl2x7x.h
deleted file mode 100644
index ecae92211216..000000000000
--- a/drivers/staging/iio/light/tsl2x7x.h
+++ /dev/null
@@ -1,100 +0,0 @@
-/*
- * Device driver for monitoring ambient light intensity (lux)
- * and proximity (prox) within the TAOS TSL2X7X family of devices.
- *
- * Copyright (c) 2012, TAOS Corporation.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
- */
-
-#ifndef __TSL2X7X_H
-#define __TSL2X7X_H
-#include <linux/pm.h>
-
-/* Max number of segments allowable in LUX table */
-#define TSL2X7X_MAX_LUX_TABLE_SIZE 9
-#define MAX_DEFAULT_TABLE_BYTES (sizeof(int) * TSL2X7X_MAX_LUX_TABLE_SIZE)
-
-struct iio_dev;
-
-struct tsl2x7x_lux {
- unsigned int ratio;
- unsigned int ch0;
- unsigned int ch1;
-};
-
-/**
- * struct tsl2x7x_default_settings - power on defaults unless
- * overridden by platform data.
- * @als_time: ALS Integration time - multiple of 50mS
- * @als_gain: Index into the ALS gain table.
- * @als_gain_trim: default gain trim to account for
- * aperture effects.
- * @wait_time: Time between PRX and ALS cycles
- * in 2.7 periods
- * @prx_time: 5.2ms prox integration time -
- * decrease in 2.7ms periods
- * @prx_gain: Proximity gain index
- * @prox_config: Prox configuration filters.
- * @als_cal_target: Known external ALS reading for
- * calibration.
- * @interrupts_en: Enable/Disable - 0x00 = none, 0x10 = als,
- * 0x20 = prx, 0x30 = bth
- * @persistence: H/W Filters, Number of 'out of limits'
- * ADC readings PRX/ALS.
- * @als_thresh_low: CH0 'low' count to trigger interrupt.
- * @als_thresh_high: CH0 'high' count to trigger interrupt.
- * @prox_thres_low: Low threshold proximity detection.
- * @prox_thres_high: High threshold proximity detection
- * @prox_pulse_count: Number if proximity emitter pulses
- * @prox_max_samples_cal: Used for prox cal.
- */
-struct tsl2x7x_settings {
- int als_time;
- int als_gain;
- int als_gain_trim;
- int wait_time;
- int prx_time;
- int prox_gain;
- int prox_config;
- int als_cal_target;
- u8 interrupts_en;
- u8 persistence;
- int als_thresh_low;
- int als_thresh_high;
- int prox_thres_low;
- int prox_thres_high;
- int prox_pulse_count;
- int prox_max_samples_cal;
-};
-
-/**
- * struct tsl2X7X_platform_data - Platform callback, glass and defaults
- * @platform_power: Suspend/resume platform callback
- * @power_on: Power on callback
- * @power_off: Power off callback
- * @platform_lux_table: Device specific glass coefficents
- * @platform_default_settings: Device specific power on defaults
- *
- */
-struct tsl2X7X_platform_data {
- int (*platform_power)(struct device *dev, pm_message_t);
- int (*power_on)(struct iio_dev *indio_dev);
- int (*power_off)(struct i2c_client *dev);
- struct tsl2x7x_lux platform_lux_table[TSL2X7X_MAX_LUX_TABLE_SIZE];
- struct tsl2x7x_settings *platform_default_settings;
-};
-
-#endif /* __TSL2X7X_H */
diff --git a/drivers/staging/iio/light/tsl2x7x_core.c b/drivers/staging/iio/light/tsl2x7x_core.c
deleted file mode 100644
index 010e607dd880..000000000000
--- a/drivers/staging/iio/light/tsl2x7x_core.c
+++ /dev/null
@@ -1,2030 +0,0 @@
-/*
- * Device driver for monitoring ambient light intensity in (lux)
- * and proximity detection (prox) within the TAOS TSL2X7X family of devices.
- *
- * Copyright (c) 2012, TAOS Corporation.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
- */
-
-#include <linux/kernel.h>
-#include <linux/i2c.h>
-#include <linux/errno.h>
-#include <linux/delay.h>
-#include <linux/mutex.h>
-#include <linux/interrupt.h>
-#include <linux/slab.h>
-#include <linux/module.h>
-#include <linux/iio/events.h>
-#include <linux/iio/iio.h>
-#include <linux/iio/sysfs.h>
-#include "tsl2x7x.h"
-
-/* Cal defs*/
-#define PROX_STAT_CAL 0
-#define PROX_STAT_SAMP 1
-#define MAX_SAMPLES_CAL 200
-
-/* TSL2X7X Device ID */
-#define TRITON_ID 0x00
-#define SWORDFISH_ID 0x30
-#define HALIBUT_ID 0x20
-
-/* Lux calculation constants */
-#define TSL2X7X_LUX_CALC_OVER_FLOW 65535
-
-/* TAOS Register definitions - note:
- * depending on device, some of these register are not used and the
- * register address is benign.
- */
-/* 2X7X register offsets */
-#define TSL2X7X_MAX_CONFIG_REG 16
-
-/* Device Registers and Masks */
-#define TSL2X7X_CNTRL 0x00
-#define TSL2X7X_ALS_TIME 0X01
-#define TSL2X7X_PRX_TIME 0x02
-#define TSL2X7X_WAIT_TIME 0x03
-#define TSL2X7X_ALS_MINTHRESHLO 0X04
-#define TSL2X7X_ALS_MINTHRESHHI 0X05
-#define TSL2X7X_ALS_MAXTHRESHLO 0X06
-#define TSL2X7X_ALS_MAXTHRESHHI 0X07
-#define TSL2X7X_PRX_MINTHRESHLO 0X08
-#define TSL2X7X_PRX_MINTHRESHHI 0X09
-#define TSL2X7X_PRX_MAXTHRESHLO 0X0A
-#define TSL2X7X_PRX_MAXTHRESHHI 0X0B
-#define TSL2X7X_PERSISTENCE 0x0C
-#define TSL2X7X_PRX_CONFIG 0x0D
-#define TSL2X7X_PRX_COUNT 0x0E
-#define TSL2X7X_GAIN 0x0F
-#define TSL2X7X_NOTUSED 0x10
-#define TSL2X7X_REVID 0x11
-#define TSL2X7X_CHIPID 0x12
-#define TSL2X7X_STATUS 0x13
-#define TSL2X7X_ALS_CHAN0LO 0x14
-#define TSL2X7X_ALS_CHAN0HI 0x15
-#define TSL2X7X_ALS_CHAN1LO 0x16
-#define TSL2X7X_ALS_CHAN1HI 0x17
-#define TSL2X7X_PRX_LO 0x18
-#define TSL2X7X_PRX_HI 0x19
-
-/* tsl2X7X cmd reg masks */
-#define TSL2X7X_CMD_REG 0x80
-#define TSL2X7X_CMD_SPL_FN 0x60
-
-#define TSL2X7X_CMD_PROX_INT_CLR 0X05
-#define TSL2X7X_CMD_ALS_INT_CLR 0x06
-#define TSL2X7X_CMD_PROXALS_INT_CLR 0X07
-
-/* tsl2X7X cntrl reg masks */
-#define TSL2X7X_CNTL_ADC_ENBL 0x02
-#define TSL2X7X_CNTL_PWR_ON 0x01
-
-/* tsl2X7X status reg masks */
-#define TSL2X7X_STA_ADC_VALID 0x01
-#define TSL2X7X_STA_PRX_VALID 0x02
-#define TSL2X7X_STA_ADC_PRX_VALID (TSL2X7X_STA_ADC_VALID |\
- TSL2X7X_STA_PRX_VALID)
-#define TSL2X7X_STA_ALS_INTR 0x10
-#define TSL2X7X_STA_PRX_INTR 0x20
-
-/* tsl2X7X cntrl reg masks */
-#define TSL2X7X_CNTL_REG_CLEAR 0x00
-#define TSL2X7X_CNTL_PROX_INT_ENBL 0X20
-#define TSL2X7X_CNTL_ALS_INT_ENBL 0X10
-#define TSL2X7X_CNTL_WAIT_TMR_ENBL 0X08
-#define TSL2X7X_CNTL_PROX_DET_ENBL 0X04
-#define TSL2X7X_CNTL_PWRON 0x01
-#define TSL2X7X_CNTL_ALSPON_ENBL 0x03
-#define TSL2X7X_CNTL_INTALSPON_ENBL 0x13
-#define TSL2X7X_CNTL_PROXPON_ENBL 0x0F
-#define TSL2X7X_CNTL_INTPROXPON_ENBL 0x2F
-
-/*Prox diode to use */
-#define TSL2X7X_DIODE0 0x10
-#define TSL2X7X_DIODE1 0x20
-#define TSL2X7X_DIODE_BOTH 0x30
-
-/* LED Power */
-#define TSL2X7X_mA100 0x00
-#define TSL2X7X_mA50 0x40
-#define TSL2X7X_mA25 0x80
-#define TSL2X7X_mA13 0xD0
-#define TSL2X7X_MAX_TIMER_CNT (0xFF)
-
-#define TSL2X7X_MIN_ITIME 3
-
-/* TAOS txx2x7x Device family members */
-enum {
- tsl2571,
- tsl2671,
- tmd2671,
- tsl2771,
- tmd2771,
- tsl2572,
- tsl2672,
- tmd2672,
- tsl2772,
- tmd2772
-};
-
-enum {
- TSL2X7X_CHIP_UNKNOWN = 0,
- TSL2X7X_CHIP_WORKING = 1,
- TSL2X7X_CHIP_SUSPENDED = 2
-};
-
-struct tsl2x7x_parse_result {
- int integer;
- int fract;
-};
-
-/* Per-device data */
-struct tsl2x7x_als_info {
- u16 als_ch0;
- u16 als_ch1;
- u16 lux;
-};
-
-struct tsl2x7x_prox_stat {
- int min;
- int max;
- int mean;
- unsigned long stddev;
-};
-
-struct tsl2x7x_chip_info {
- int chan_table_elements;
- struct iio_chan_spec channel[4];
- const struct iio_info *info;
-};
-
-struct tsl2X7X_chip {
- kernel_ulong_t id;
- struct mutex prox_mutex;
- struct mutex als_mutex;
- struct i2c_client *client;
- u16 prox_data;
- struct tsl2x7x_als_info als_cur_info;
- struct tsl2x7x_settings tsl2x7x_settings;
- struct tsl2X7X_platform_data *pdata;
- int als_time_scale;
- int als_saturation;
- int tsl2x7x_chip_status;
- u8 tsl2x7x_config[TSL2X7X_MAX_CONFIG_REG];
- const struct tsl2x7x_chip_info *chip_info;
- const struct iio_info *info;
- s64 event_timestamp;
- /* This structure is intentionally large to accommodate
- * updates via sysfs. */
- /* Sized to 9 = max 8 segments + 1 termination segment */
- struct tsl2x7x_lux tsl2x7x_device_lux[TSL2X7X_MAX_LUX_TABLE_SIZE];
-};
-
-/* Different devices require different coefficents */
-static const struct tsl2x7x_lux tsl2x71_lux_table[] = {
- { 14461, 611, 1211 },
- { 18540, 352, 623 },
- { 0, 0, 0 },
-};
-
-static const struct tsl2x7x_lux tmd2x71_lux_table[] = {
- { 11635, 115, 256 },
- { 15536, 87, 179 },
- { 0, 0, 0 },
-};
-
-static const struct tsl2x7x_lux tsl2x72_lux_table[] = {
- { 14013, 466, 917 },
- { 18222, 310, 552 },
- { 0, 0, 0 },
-};
-
-static const struct tsl2x7x_lux tmd2x72_lux_table[] = {
- { 13218, 130, 262 },
- { 17592, 92, 169 },
- { 0, 0, 0 },
-};
-
-static const struct tsl2x7x_lux *tsl2x7x_default_lux_table_group[] = {
- [tsl2571] = tsl2x71_lux_table,
- [tsl2671] = tsl2x71_lux_table,
- [tmd2671] = tmd2x71_lux_table,
- [tsl2771] = tsl2x71_lux_table,
- [tmd2771] = tmd2x71_lux_table,
- [tsl2572] = tsl2x72_lux_table,
- [tsl2672] = tsl2x72_lux_table,
- [tmd2672] = tmd2x72_lux_table,
- [tsl2772] = tsl2x72_lux_table,
- [tmd2772] = tmd2x72_lux_table,
-};
-
-static const struct tsl2x7x_settings tsl2x7x_default_settings = {
- .als_time = 219, /* 101 ms */
- .als_gain = 0,
- .prx_time = 254, /* 5.4 ms */
- .prox_gain = 1,
- .wait_time = 245,
- .prox_config = 0,
- .als_gain_trim = 1000,
- .als_cal_target = 150,
- .als_thresh_low = 200,
- .als_thresh_high = 256,
- .persistence = 255,
- .interrupts_en = 0,
- .prox_thres_low = 0,
- .prox_thres_high = 512,
- .prox_max_samples_cal = 30,
- .prox_pulse_count = 8
-};
-
-static const s16 tsl2X7X_als_gainadj[] = {
- 1,
- 8,
- 16,
- 120
-};
-
-static const s16 tsl2X7X_prx_gainadj[] = {
- 1,
- 2,
- 4,
- 8
-};
-
-/* Channel variations */
-enum {
- ALS,
- PRX,
- ALSPRX,
- PRX2,
- ALSPRX2,
-};
-
-static const u8 device_channel_config[] = {
- ALS,
- PRX,
- PRX,
- ALSPRX,
- ALSPRX,
- ALS,
- PRX2,
- PRX2,
- ALSPRX2,
- ALSPRX2
-};
-
-/**
- * tsl2x7x_i2c_read() - Read a byte from a register.
- * @client: i2c client
- * @reg: device register to read from
- * @*val: pointer to location to store register contents.
- *
- */
-static int
-tsl2x7x_i2c_read(struct i2c_client *client, u8 reg, u8 *val)
-{
- int ret = 0;
-
- /* select register to write */
- ret = i2c_smbus_write_byte(client, (TSL2X7X_CMD_REG | reg));
- if (ret < 0) {
- dev_err(&client->dev, "failed to write register %x\n", reg);
- return ret;
- }
-
- /* read the data */
- ret = i2c_smbus_read_byte(client);
- if (ret >= 0)
- *val = (u8)ret;
- else
- dev_err(&client->dev, "failed to read register %x\n", reg);
-
- return ret;
-}
-
-/**
- * tsl2x7x_get_lux() - Reads and calculates current lux value.
- * @indio_dev: pointer to IIO device
- *
- * The raw ch0 and ch1 values of the ambient light sensed in the last
- * integration cycle are read from the device.
- * Time scale factor array values are adjusted based on the integration time.
- * The raw values are multiplied by a scale factor, and device gain is obtained
- * using gain index. Limit checks are done next, then the ratio of a multiple
- * of ch1 value, to the ch0 value, is calculated. Array tsl2x7x_device_lux[]
- * is then scanned to find the first ratio value that is just above the ratio
- * we just calculated. The ch0 and ch1 multiplier constants in the array are
- * then used along with the time scale factor array values, to calculate the
- * lux.
- */
-static int tsl2x7x_get_lux(struct iio_dev *indio_dev)
-{
- u16 ch0, ch1; /* separated ch0/ch1 data from device */
- u32 lux; /* raw lux calculated from device data */
- u64 lux64;
- u32 ratio;
- u8 buf[4];
- struct tsl2x7x_lux *p;
- struct tsl2X7X_chip *chip = iio_priv(indio_dev);
- int i, ret;
- u32 ch0lux = 0;
- u32 ch1lux = 0;
-
- if (mutex_trylock(&chip->als_mutex) == 0)
- return chip->als_cur_info.lux; /* busy, so return LAST VALUE */
-
- if (chip->tsl2x7x_chip_status != TSL2X7X_CHIP_WORKING) {
- /* device is not enabled */
- dev_err(&chip->client->dev, "%s: device is not enabled\n",
- __func__);
- ret = -EBUSY;
- goto out_unlock;
- }
-
- ret = tsl2x7x_i2c_read(chip->client,
- (TSL2X7X_CMD_REG | TSL2X7X_STATUS), &buf[0]);
- if (ret < 0) {
- dev_err(&chip->client->dev,
- "%s: Failed to read STATUS Reg\n", __func__);
- goto out_unlock;
- }
- /* is data new & valid */
- if (!(buf[0] & TSL2X7X_STA_ADC_VALID)) {
- dev_err(&chip->client->dev,
- "%s: data not valid yet\n", __func__);
- ret = chip->als_cur_info.lux; /* return LAST VALUE */
- goto out_unlock;
- }
-
- for (i = 0; i < 4; i++) {
- ret = tsl2x7x_i2c_read(chip->client,
- (TSL2X7X_CMD_REG | (TSL2X7X_ALS_CHAN0LO + i)),
- &buf[i]);
- if (ret < 0) {
- dev_err(&chip->client->dev,
- "failed to read. err=%x\n", ret);
- goto out_unlock;
- }
- }
-
- /* clear any existing interrupt status */
- ret = i2c_smbus_write_byte(chip->client,
- (TSL2X7X_CMD_REG |
- TSL2X7X_CMD_SPL_FN |
- TSL2X7X_CMD_ALS_INT_CLR));
- if (ret < 0) {
- dev_err(&chip->client->dev,
- "i2c_write_command failed - err = %d\n", ret);
- goto out_unlock; /* have no data, so return failure */
- }
-
- /* extract ALS/lux data */
- ch0 = le16_to_cpup((const __le16 *)&buf[0]);
- ch1 = le16_to_cpup((const __le16 *)&buf[2]);
-
- chip->als_cur_info.als_ch0 = ch0;
- chip->als_cur_info.als_ch1 = ch1;
-
- if ((ch0 >= chip->als_saturation) || (ch1 >= chip->als_saturation)) {
- lux = TSL2X7X_LUX_CALC_OVER_FLOW;
- goto return_max;
- }
-
- if (ch0 == 0) {
- /* have no data, so return LAST VALUE */
- ret = chip->als_cur_info.lux;
- goto out_unlock;
- }
- /* calculate ratio */
- ratio = (ch1 << 15) / ch0;
- /* convert to unscaled lux using the pointer to the table */
- p = (struct tsl2x7x_lux *) chip->tsl2x7x_device_lux;
- while (p->ratio != 0 && p->ratio < ratio)
- p++;
-
- if (p->ratio == 0) {
- lux = 0;
- } else {
- ch0lux = DIV_ROUND_UP((ch0 * p->ch0),
- tsl2X7X_als_gainadj[chip->tsl2x7x_settings.als_gain]);
- ch1lux = DIV_ROUND_UP((ch1 * p->ch1),
- tsl2X7X_als_gainadj[chip->tsl2x7x_settings.als_gain]);
- lux = ch0lux - ch1lux;
- }
-
- /* note: lux is 31 bit max at this point */
- if (ch1lux > ch0lux) {
- dev_dbg(&chip->client->dev, "ch1lux > ch0lux-return last value\n");
- ret = chip->als_cur_info.lux;
- goto out_unlock;
- }
-
- /* adjust for active time scale */
- if (chip->als_time_scale == 0)
- lux = 0;
- else
- lux = (lux + (chip->als_time_scale >> 1)) /
- chip->als_time_scale;
-
- /* adjust for active gain scale
- * The tsl2x7x_device_lux tables have a factor of 256 built-in.
- * User-specified gain provides a multiplier.
- * Apply user-specified gain before shifting right to retain precision.
- * Use 64 bits to avoid overflow on multiplication.
- * Then go back to 32 bits before division to avoid using div_u64().
- */
-
- lux64 = lux;
- lux64 = lux64 * chip->tsl2x7x_settings.als_gain_trim;
- lux64 >>= 8;
- lux = lux64;
- lux = (lux + 500) / 1000;
-
- if (lux > TSL2X7X_LUX_CALC_OVER_FLOW) /* check for overflow */
- lux = TSL2X7X_LUX_CALC_OVER_FLOW;
-
- /* Update the structure with the latest lux. */
-return_max:
- chip->als_cur_info.lux = lux;
- ret = lux;
-
-out_unlock:
- mutex_unlock(&chip->als_mutex);
-
- return ret;
-}
-
-/**
- * tsl2x7x_get_prox() - Reads proximity data registers and updates
- * chip->prox_data.
- *
- * @indio_dev: pointer to IIO device
- */
-static int tsl2x7x_get_prox(struct iio_dev *indio_dev)
-{
- int i;
- int ret;
- u8 status;
- u8 chdata[2];
- struct tsl2X7X_chip *chip = iio_priv(indio_dev);
-
- if (mutex_trylock(&chip->prox_mutex) == 0) {
- dev_err(&chip->client->dev,
- "%s: Can't get prox mutex\n", __func__);
- return -EBUSY;
- }
-
- ret = tsl2x7x_i2c_read(chip->client,
- (TSL2X7X_CMD_REG | TSL2X7X_STATUS), &status);
- if (ret < 0) {
- dev_err(&chip->client->dev, "i2c err=%d\n", ret);
- goto prox_poll_err;
- }
-
- switch (chip->id) {
- case tsl2571:
- case tsl2671:
- case tmd2671:
- case tsl2771:
- case tmd2771:
- if (!(status & TSL2X7X_STA_ADC_VALID))
- goto prox_poll_err;
- break;
- case tsl2572:
- case tsl2672:
- case tmd2672:
- case tsl2772:
- case tmd2772:
- if (!(status & TSL2X7X_STA_PRX_VALID))
- goto prox_poll_err;
- break;
- }
-
- for (i = 0; i < 2; i++) {
- ret = tsl2x7x_i2c_read(chip->client,
- (TSL2X7X_CMD_REG |
- (TSL2X7X_PRX_LO + i)), &chdata[i]);
- if (ret < 0)
- goto prox_poll_err;
- }
-
- chip->prox_data =
- le16_to_cpup((const __le16 *)&chdata[0]);
-
-prox_poll_err:
-
- mutex_unlock(&chip->prox_mutex);
-
- return chip->prox_data;
-}
-
-/**
- * tsl2x7x_defaults() - Populates the device nominal operating parameters
- * with those provided by a 'platform' data struct or
- * with prefined defaults.
- *
- * @chip: pointer to device structure.
- */
-static void tsl2x7x_defaults(struct tsl2X7X_chip *chip)
-{
- /* If Operational settings defined elsewhere.. */
- if (chip->pdata && chip->pdata->platform_default_settings)
- memcpy(&(chip->tsl2x7x_settings),
- chip->pdata->platform_default_settings,
- sizeof(tsl2x7x_default_settings));
- else
- memcpy(&(chip->tsl2x7x_settings),
- &tsl2x7x_default_settings,
- sizeof(tsl2x7x_default_settings));
-
- /* Load up the proper lux table. */
- if (chip->pdata && chip->pdata->platform_lux_table[0].ratio != 0)
- memcpy(chip->tsl2x7x_device_lux,
- chip->pdata->platform_lux_table,
- sizeof(chip->pdata->platform_lux_table));
- else
- memcpy(chip->tsl2x7x_device_lux,
- (struct tsl2x7x_lux *)tsl2x7x_default_lux_table_group[chip->id],
- MAX_DEFAULT_TABLE_BYTES);
-}
-
-/**
- * tsl2x7x_als_calibrate() - Obtain single reading and calculate
- * the als_gain_trim.
- *
- * @indio_dev: pointer to IIO device
- */
-static int tsl2x7x_als_calibrate(struct iio_dev *indio_dev)
-{
- struct tsl2X7X_chip *chip = iio_priv(indio_dev);
- u8 reg_val;
- int gain_trim_val;
- int ret;
- int lux_val;
-
- ret = i2c_smbus_write_byte(chip->client,
- (TSL2X7X_CMD_REG | TSL2X7X_CNTRL));
- if (ret < 0) {
- dev_err(&chip->client->dev,
- "failed to write CNTRL register, ret=%d\n", ret);
- return ret;
- }
-
- reg_val = i2c_smbus_read_byte(chip->client);
- if ((reg_val & (TSL2X7X_CNTL_ADC_ENBL | TSL2X7X_CNTL_PWR_ON))
- != (TSL2X7X_CNTL_ADC_ENBL | TSL2X7X_CNTL_PWR_ON)) {
- dev_err(&chip->client->dev,
- "%s: failed: ADC not enabled\n", __func__);
- return -1;
- }
-
- ret = i2c_smbus_write_byte(chip->client,
- (TSL2X7X_CMD_REG | TSL2X7X_CNTRL));
- if (ret < 0) {
- dev_err(&chip->client->dev,
- "failed to write ctrl reg: ret=%d\n", ret);
- return ret;
- }
-
- reg_val = i2c_smbus_read_byte(chip->client);
- if ((reg_val & TSL2X7X_STA_ADC_VALID) != TSL2X7X_STA_ADC_VALID) {
- dev_err(&chip->client->dev,
- "%s: failed: STATUS - ADC not valid.\n", __func__);
- return -ENODATA;
- }
-
- lux_val = tsl2x7x_get_lux(indio_dev);
- if (lux_val < 0) {
- dev_err(&chip->client->dev,
- "%s: failed to get lux\n", __func__);
- return lux_val;
- }
-
- gain_trim_val = ((chip->tsl2x7x_settings.als_cal_target)
- * chip->tsl2x7x_settings.als_gain_trim) / lux_val;
- if ((gain_trim_val < 250) || (gain_trim_val > 4000))
- return -ERANGE;
-
- chip->tsl2x7x_settings.als_gain_trim = gain_trim_val;
- dev_info(&chip->client->dev,
- "%s als_calibrate completed\n", chip->client->name);
-
- return (int) gain_trim_val;
-}
-
-static int tsl2x7x_chip_on(struct iio_dev *indio_dev)
-{
- int i;
- int ret = 0;
- u8 *dev_reg;
- u8 utmp;
- int als_count;
- int als_time;
- struct tsl2X7X_chip *chip = iio_priv(indio_dev);
- u8 reg_val = 0;
-
- if (chip->pdata && chip->pdata->power_on)
- chip->pdata->power_on(indio_dev);
-
- /* Non calculated parameters */
- chip->tsl2x7x_config[TSL2X7X_PRX_TIME] =
- chip->tsl2x7x_settings.prx_time;
- chip->tsl2x7x_config[TSL2X7X_WAIT_TIME] =
- chip->tsl2x7x_settings.wait_time;
- chip->tsl2x7x_config[TSL2X7X_PRX_CONFIG] =
- chip->tsl2x7x_settings.prox_config;
-
- chip->tsl2x7x_config[TSL2X7X_ALS_MINTHRESHLO] =
- (chip->tsl2x7x_settings.als_thresh_low) & 0xFF;
- chip->tsl2x7x_config[TSL2X7X_ALS_MINTHRESHHI] =
- (chip->tsl2x7x_settings.als_thresh_low >> 8) & 0xFF;
- chip->tsl2x7x_config[TSL2X7X_ALS_MAXTHRESHLO] =
- (chip->tsl2x7x_settings.als_thresh_high) & 0xFF;
- chip->tsl2x7x_config[TSL2X7X_ALS_MAXTHRESHHI] =
- (chip->tsl2x7x_settings.als_thresh_high >> 8) & 0xFF;
- chip->tsl2x7x_config[TSL2X7X_PERSISTENCE] =
- chip->tsl2x7x_settings.persistence;
-
- chip->tsl2x7x_config[TSL2X7X_PRX_COUNT] =
- chip->tsl2x7x_settings.prox_pulse_count;
- chip->tsl2x7x_config[TSL2X7X_PRX_MINTHRESHLO] =
- (chip->tsl2x7x_settings.prox_thres_low) & 0xFF;
- chip->tsl2x7x_config[TSL2X7X_PRX_MINTHRESHHI] =
- (chip->tsl2x7x_settings.prox_thres_low >> 8) & 0xFF;
- chip->tsl2x7x_config[TSL2X7X_PRX_MAXTHRESHLO] =
- (chip->tsl2x7x_settings.prox_thres_high) & 0xFF;
- chip->tsl2x7x_config[TSL2X7X_PRX_MAXTHRESHHI] =
- (chip->tsl2x7x_settings.prox_thres_high >> 8) & 0xFF;
-
- /* and make sure we're not already on */
- if (chip->tsl2x7x_chip_status == TSL2X7X_CHIP_WORKING) {
- /* if forcing a register update - turn off, then on */
- dev_info(&chip->client->dev, "device is already enabled\n");
- return -EINVAL;
- }
-
- /* determine als integration register */
- als_count = (chip->tsl2x7x_settings.als_time * 100 + 135) / 270;
- if (als_count == 0)
- als_count = 1; /* ensure at least one cycle */
-
- /* convert back to time (encompasses overrides) */
- als_time = (als_count * 27 + 5) / 10;
- chip->tsl2x7x_config[TSL2X7X_ALS_TIME] = 256 - als_count;
-
- /* Set the gain based on tsl2x7x_settings struct */
- chip->tsl2x7x_config[TSL2X7X_GAIN] =
- (chip->tsl2x7x_settings.als_gain |
- (TSL2X7X_mA100 | TSL2X7X_DIODE1)
- | ((chip->tsl2x7x_settings.prox_gain) << 2));
-
- /* set chip struct re scaling and saturation */
- chip->als_saturation = als_count * 922; /* 90% of full scale */
- chip->als_time_scale = (als_time + 25) / 50;
-
- /* TSL2X7X Specific power-on / adc enable sequence
- * Power on the device 1st. */
- utmp = TSL2X7X_CNTL_PWR_ON;
- ret = i2c_smbus_write_byte_data(chip->client,
- TSL2X7X_CMD_REG | TSL2X7X_CNTRL, utmp);
- if (ret < 0) {
- dev_err(&chip->client->dev,
- "%s: failed on CNTRL reg.\n", __func__);
- return ret;
- }
-
- /* Use the following shadow copy for our delay before enabling ADC.
- * Write all the registers. */
- for (i = 0, dev_reg = chip->tsl2x7x_config;
- i < TSL2X7X_MAX_CONFIG_REG; i++) {
- ret = i2c_smbus_write_byte_data(chip->client,
- TSL2X7X_CMD_REG + i, *dev_reg++);
- if (ret < 0) {
- dev_err(&chip->client->dev,
- "failed on write to reg %d.\n", i);
- return ret;
- }
- }
-
- mdelay(3); /* Power-on settling time */
-
- /* NOW enable the ADC
- * initialize the desired mode of operation */
- utmp = TSL2X7X_CNTL_PWR_ON |
- TSL2X7X_CNTL_ADC_ENBL |
- TSL2X7X_CNTL_PROX_DET_ENBL;
- ret = i2c_smbus_write_byte_data(chip->client,
- TSL2X7X_CMD_REG | TSL2X7X_CNTRL, utmp);
- if (ret < 0) {
- dev_err(&chip->client->dev,
- "%s: failed on 2nd CTRL reg.\n", __func__);
- return ret;
- }
-
- chip->tsl2x7x_chip_status = TSL2X7X_CHIP_WORKING;
-
- if (chip->tsl2x7x_settings.interrupts_en != 0) {
- dev_info(&chip->client->dev, "Setting Up Interrupt(s)\n");
-
- reg_val = TSL2X7X_CNTL_PWR_ON | TSL2X7X_CNTL_ADC_ENBL;
- if ((chip->tsl2x7x_settings.interrupts_en == 0x20) ||
- (chip->tsl2x7x_settings.interrupts_en == 0x30))
- reg_val |= TSL2X7X_CNTL_PROX_DET_ENBL;
-
- reg_val |= chip->tsl2x7x_settings.interrupts_en;
- ret = i2c_smbus_write_byte_data(chip->client,
- (TSL2X7X_CMD_REG | TSL2X7X_CNTRL), reg_val);
- if (ret < 0)
- dev_err(&chip->client->dev,
- "%s: failed in tsl2x7x_IOCTL_INT_SET.\n",
- __func__);
-
- /* Clear out any initial interrupts */
- ret = i2c_smbus_write_byte(chip->client,
- TSL2X7X_CMD_REG | TSL2X7X_CMD_SPL_FN |
- TSL2X7X_CMD_PROXALS_INT_CLR);
- if (ret < 0) {
- dev_err(&chip->client->dev,
- "%s: Failed to clear Int status\n",
- __func__);
- return ret;
- }
- }
-
- return ret;
-}
-
-static int tsl2x7x_chip_off(struct iio_dev *indio_dev)
-{
- int ret;
- struct tsl2X7X_chip *chip = iio_priv(indio_dev);
-
- /* turn device off */
- chip->tsl2x7x_chip_status = TSL2X7X_CHIP_SUSPENDED;
-
- ret = i2c_smbus_write_byte_data(chip->client,
- TSL2X7X_CMD_REG | TSL2X7X_CNTRL, 0x00);
-
- if (chip->pdata && chip->pdata->power_off)
- chip->pdata->power_off(chip->client);
-
- return ret;
-}
-
-/**
- * tsl2x7x_invoke_change
- * @indio_dev: pointer to IIO device
- *
- * Obtain and lock both ALS and PROX resources,
- * determine and save device state (On/Off),
- * cycle device to implement updated parameter,
- * put device back into proper state, and unlock
- * resource.
- */
-static
-int tsl2x7x_invoke_change(struct iio_dev *indio_dev)
-{
- struct tsl2X7X_chip *chip = iio_priv(indio_dev);
- int device_status = chip->tsl2x7x_chip_status;
-
- mutex_lock(&chip->als_mutex);
- mutex_lock(&chip->prox_mutex);
-
- if (device_status == TSL2X7X_CHIP_WORKING)
- tsl2x7x_chip_off(indio_dev);
-
- tsl2x7x_chip_on(indio_dev);
-
- if (device_status != TSL2X7X_CHIP_WORKING)
- tsl2x7x_chip_off(indio_dev);
-
- mutex_unlock(&chip->prox_mutex);
- mutex_unlock(&chip->als_mutex);
-
- return 0;
-}
-
-static
-void tsl2x7x_prox_calculate(int *data, int length,
- struct tsl2x7x_prox_stat *statP)
-{
- int i;
- int sample_sum;
- int tmp;
-
- if (length == 0)
- length = 1;
-
- sample_sum = 0;
- statP->min = INT_MAX;
- statP->max = INT_MIN;
- for (i = 0; i < length; i++) {
- sample_sum += data[i];
- statP->min = min(statP->min, data[i]);
- statP->max = max(statP->max, data[i]);
- }
-
- statP->mean = sample_sum / length;
- sample_sum = 0;
- for (i = 0; i < length; i++) {
- tmp = data[i] - statP->mean;
- sample_sum += tmp * tmp;
- }
- statP->stddev = int_sqrt((long)sample_sum)/length;
-}
-
-/**
- * tsl2x7x_prox_cal() - Calculates std. and sets thresholds.
- * @indio_dev: pointer to IIO device
- *
- * Calculates a standard deviation based on the samples,
- * and sets the threshold accordingly.
- */
-static void tsl2x7x_prox_cal(struct iio_dev *indio_dev)
-{
- int prox_history[MAX_SAMPLES_CAL + 1];
- int i;
- struct tsl2x7x_prox_stat prox_stat_data[2];
- struct tsl2x7x_prox_stat *calP;
- struct tsl2X7X_chip *chip = iio_priv(indio_dev);
- u8 tmp_irq_settings;
- u8 current_state = chip->tsl2x7x_chip_status;
-
- if (chip->tsl2x7x_settings.prox_max_samples_cal > MAX_SAMPLES_CAL) {
- dev_err(&chip->client->dev,
- "max prox samples cal is too big: %d\n",
- chip->tsl2x7x_settings.prox_max_samples_cal);
- chip->tsl2x7x_settings.prox_max_samples_cal = MAX_SAMPLES_CAL;
- }
-
- /* have to stop to change settings */
- tsl2x7x_chip_off(indio_dev);
-
- /* Enable proximity detection save just in case prox not wanted yet*/
- tmp_irq_settings = chip->tsl2x7x_settings.interrupts_en;
- chip->tsl2x7x_settings.interrupts_en |= TSL2X7X_CNTL_PROX_INT_ENBL;
-
- /*turn on device if not already on*/
- tsl2x7x_chip_on(indio_dev);
-
- /*gather the samples*/
- for (i = 0; i < chip->tsl2x7x_settings.prox_max_samples_cal; i++) {
- mdelay(15);
- tsl2x7x_get_prox(indio_dev);
- prox_history[i] = chip->prox_data;
- dev_info(&chip->client->dev, "2 i=%d prox data= %d\n",
- i, chip->prox_data);
- }
-
- tsl2x7x_chip_off(indio_dev);
- calP = &prox_stat_data[PROX_STAT_CAL];
- tsl2x7x_prox_calculate(prox_history,
- chip->tsl2x7x_settings.prox_max_samples_cal, calP);
- chip->tsl2x7x_settings.prox_thres_high = (calP->max << 1) - calP->mean;
-
- dev_info(&chip->client->dev, " cal min=%d mean=%d max=%d\n",
- calP->min, calP->mean, calP->max);
- dev_info(&chip->client->dev,
- "%s proximity threshold set to %d\n",
- chip->client->name, chip->tsl2x7x_settings.prox_thres_high);
-
- /* back to the way they were */
- chip->tsl2x7x_settings.interrupts_en = tmp_irq_settings;
- if (current_state == TSL2X7X_CHIP_WORKING)
- tsl2x7x_chip_on(indio_dev);
-}
-
-static ssize_t tsl2x7x_power_state_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct tsl2X7X_chip *chip = iio_priv(dev_to_iio_dev(dev));
-
- return snprintf(buf, PAGE_SIZE, "%d\n", chip->tsl2x7x_chip_status);
-}
-
-static ssize_t tsl2x7x_power_state_store(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t len)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- bool value;
-
- if (strtobool(buf, &value))
- return -EINVAL;
-
- if (value)
- tsl2x7x_chip_on(indio_dev);
- else
- tsl2x7x_chip_off(indio_dev);
-
- return len;
-}
-
-static ssize_t tsl2x7x_gain_available_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct tsl2X7X_chip *chip = iio_priv(dev_to_iio_dev(dev));
-
- switch (chip->id) {
- case tsl2571:
- case tsl2671:
- case tmd2671:
- case tsl2771:
- case tmd2771:
- return snprintf(buf, PAGE_SIZE, "%s\n", "1 8 16 128");
- }
-
- return snprintf(buf, PAGE_SIZE, "%s\n", "1 8 16 120");
-}
-
-static ssize_t tsl2x7x_prox_gain_available_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- return snprintf(buf, PAGE_SIZE, "%s\n", "1 2 4 8");
-}
-
-static ssize_t tsl2x7x_als_time_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct tsl2X7X_chip *chip = iio_priv(dev_to_iio_dev(dev));
- int y, z;
-
- y = (TSL2X7X_MAX_TIMER_CNT - (u8)chip->tsl2x7x_settings.als_time) + 1;
- z = y * TSL2X7X_MIN_ITIME;
- y /= 1000;
- z %= 1000;
-
- return snprintf(buf, PAGE_SIZE, "%d.%03d\n", y, z);
-}
-
-static ssize_t tsl2x7x_als_time_store(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t len)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct tsl2X7X_chip *chip = iio_priv(indio_dev);
- struct tsl2x7x_parse_result result;
- int ret;
-
- ret = iio_str_to_fixpoint(buf, 100, &result.integer, &result.fract);
- if (ret)
- return ret;
-
- result.fract /= 3;
- chip->tsl2x7x_settings.als_time =
- (TSL2X7X_MAX_TIMER_CNT - (u8)result.fract);
-
- dev_info(&chip->client->dev, "%s: als time = %d",
- __func__, chip->tsl2x7x_settings.als_time);
-
- tsl2x7x_invoke_change(indio_dev);
-
- return IIO_VAL_INT_PLUS_MICRO;
-}
-
-static IIO_CONST_ATTR(in_illuminance0_integration_time_available,
- ".00272 - .696");
-
-static ssize_t tsl2x7x_als_cal_target_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct tsl2X7X_chip *chip = iio_priv(dev_to_iio_dev(dev));
-
- return snprintf(buf, PAGE_SIZE, "%d\n",
- chip->tsl2x7x_settings.als_cal_target);
-}
-
-static ssize_t tsl2x7x_als_cal_target_store(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t len)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct tsl2X7X_chip *chip = iio_priv(indio_dev);
- unsigned long value;
-
- if (kstrtoul(buf, 0, &value))
- return -EINVAL;
-
- if (value)
- chip->tsl2x7x_settings.als_cal_target = value;
-
- tsl2x7x_invoke_change(indio_dev);
-
- return len;
-}
-
-/* persistence settings */
-static ssize_t tsl2x7x_als_persistence_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct tsl2X7X_chip *chip = iio_priv(dev_to_iio_dev(dev));
- int y, z, filter_delay;
-
- /* Determine integration time */
- y = (TSL2X7X_MAX_TIMER_CNT - (u8)chip->tsl2x7x_settings.als_time) + 1;
- z = y * TSL2X7X_MIN_ITIME;
- filter_delay = z * (chip->tsl2x7x_settings.persistence & 0x0F);
- y = filter_delay / 1000;
- z = filter_delay % 1000;
-
- return snprintf(buf, PAGE_SIZE, "%d.%03d\n", y, z);
-}
-
-static ssize_t tsl2x7x_als_persistence_store(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t len)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct tsl2X7X_chip *chip = iio_priv(indio_dev);
- struct tsl2x7x_parse_result result;
- int y, z, filter_delay;
- int ret;
-
- ret = iio_str_to_fixpoint(buf, 100, &result.integer, &result.fract);
- if (ret)
- return ret;
-
- y = (TSL2X7X_MAX_TIMER_CNT - (u8)chip->tsl2x7x_settings.als_time) + 1;
- z = y * TSL2X7X_MIN_ITIME;
-
- filter_delay =
- DIV_ROUND_UP(((result.integer * 1000) + result.fract), z);
-
- chip->tsl2x7x_settings.persistence &= 0xF0;
- chip->tsl2x7x_settings.persistence |= (filter_delay & 0x0F);
-
- dev_info(&chip->client->dev, "%s: als persistence = %d",
- __func__, filter_delay);
-
- tsl2x7x_invoke_change(indio_dev);
-
- return IIO_VAL_INT_PLUS_MICRO;
-}
-
-static ssize_t tsl2x7x_prox_persistence_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct tsl2X7X_chip *chip = iio_priv(dev_to_iio_dev(dev));
- int y, z, filter_delay;
-
- /* Determine integration time */
- y = (TSL2X7X_MAX_TIMER_CNT - (u8)chip->tsl2x7x_settings.prx_time) + 1;
- z = y * TSL2X7X_MIN_ITIME;
- filter_delay = z * ((chip->tsl2x7x_settings.persistence & 0xF0) >> 4);
- y = filter_delay / 1000;
- z = filter_delay % 1000;
-
- return snprintf(buf, PAGE_SIZE, "%d.%03d\n", y, z);
-}
-
-static ssize_t tsl2x7x_prox_persistence_store(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t len)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct tsl2X7X_chip *chip = iio_priv(indio_dev);
- struct tsl2x7x_parse_result result;
- int y, z, filter_delay;
- int ret;
-
- ret = iio_str_to_fixpoint(buf, 100, &result.integer, &result.fract);
- if (ret)
- return ret;
-
- y = (TSL2X7X_MAX_TIMER_CNT - (u8)chip->tsl2x7x_settings.prx_time) + 1;
- z = y * TSL2X7X_MIN_ITIME;
-
- filter_delay =
- DIV_ROUND_UP(((result.integer * 1000) + result.fract), z);
-
- chip->tsl2x7x_settings.persistence &= 0x0F;
- chip->tsl2x7x_settings.persistence |= ((filter_delay << 4) & 0xF0);
-
- dev_info(&chip->client->dev, "%s: prox persistence = %d",
- __func__, filter_delay);
-
- tsl2x7x_invoke_change(indio_dev);
-
- return IIO_VAL_INT_PLUS_MICRO;
-}
-
-static ssize_t tsl2x7x_do_calibrate(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t len)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- bool value;
-
- if (strtobool(buf, &value))
- return -EINVAL;
-
- if (value)
- tsl2x7x_als_calibrate(indio_dev);
-
- tsl2x7x_invoke_change(indio_dev);
-
- return len;
-}
-
-static ssize_t tsl2x7x_luxtable_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct tsl2X7X_chip *chip = iio_priv(dev_to_iio_dev(dev));
- int i = 0;
- int offset = 0;
-
- while (i < (TSL2X7X_MAX_LUX_TABLE_SIZE * 3)) {
- offset += snprintf(buf + offset, PAGE_SIZE, "%u,%u,%u,",
- chip->tsl2x7x_device_lux[i].ratio,
- chip->tsl2x7x_device_lux[i].ch0,
- chip->tsl2x7x_device_lux[i].ch1);
- if (chip->tsl2x7x_device_lux[i].ratio == 0) {
- /* We just printed the first "0" entry.
- * Now get rid of the extra "," and break. */
- offset--;
- break;
- }
- i++;
- }
-
- offset += snprintf(buf + offset, PAGE_SIZE, "\n");
- return offset;
-}
-
-static ssize_t tsl2x7x_luxtable_store(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t len)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct tsl2X7X_chip *chip = iio_priv(indio_dev);
- int value[ARRAY_SIZE(chip->tsl2x7x_device_lux)*3 + 1];
- int n;
-
- get_options(buf, ARRAY_SIZE(value), value);
-
- /* We now have an array of ints starting at value[1], and
- * enumerated by value[0].
- * We expect each group of three ints is one table entry,
- * and the last table entry is all 0.
- */
- n = value[0];
- if ((n % 3) || n < 6 ||
- n > ((ARRAY_SIZE(chip->tsl2x7x_device_lux) - 1) * 3)) {
- dev_info(dev, "LUX TABLE INPUT ERROR 1 Value[0]=%d\n", n);
- return -EINVAL;
- }
-
- if ((value[(n - 2)] | value[(n - 1)] | value[n]) != 0) {
- dev_info(dev, "LUX TABLE INPUT ERROR 2 Value[0]=%d\n", n);
- return -EINVAL;
- }
-
- if (chip->tsl2x7x_chip_status == TSL2X7X_CHIP_WORKING)
- tsl2x7x_chip_off(indio_dev);
-
- /* Zero out the table */
- memset(chip->tsl2x7x_device_lux, 0, sizeof(chip->tsl2x7x_device_lux));
- memcpy(chip->tsl2x7x_device_lux, &value[1], (value[0] * 4));
-
- tsl2x7x_invoke_change(indio_dev);
-
- return len;
-}
-
-static ssize_t tsl2x7x_do_prox_calibrate(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t len)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- bool value;
-
- if (strtobool(buf, &value))
- return -EINVAL;
-
- if (value)
- tsl2x7x_prox_cal(indio_dev);
-
- tsl2x7x_invoke_change(indio_dev);
-
- return len;
-}
-
-static int tsl2x7x_read_interrupt_config(struct iio_dev *indio_dev,
- const struct iio_chan_spec *chan,
- enum iio_event_type type,
- enum iio_event_direction dir)
-{
- struct tsl2X7X_chip *chip = iio_priv(indio_dev);
- int ret;
-
- if (chan->type == IIO_INTENSITY)
- ret = !!(chip->tsl2x7x_settings.interrupts_en & 0x10);
- else
- ret = !!(chip->tsl2x7x_settings.interrupts_en & 0x20);
-
- return ret;
-}
-
-static int tsl2x7x_write_interrupt_config(struct iio_dev *indio_dev,
- const struct iio_chan_spec *chan,
- enum iio_event_type type,
- enum iio_event_direction dir,
- int val)
-{
- struct tsl2X7X_chip *chip = iio_priv(indio_dev);
-
- if (chan->type == IIO_INTENSITY) {
- if (val)
- chip->tsl2x7x_settings.interrupts_en |= 0x10;
- else
- chip->tsl2x7x_settings.interrupts_en &= 0x20;
- } else {
- if (val)
- chip->tsl2x7x_settings.interrupts_en |= 0x20;
- else
- chip->tsl2x7x_settings.interrupts_en &= 0x10;
- }
-
- tsl2x7x_invoke_change(indio_dev);
-
- return 0;
-}
-
-static int tsl2x7x_write_thresh(struct iio_dev *indio_dev,
- const struct iio_chan_spec *chan,
- enum iio_event_type type,
- enum iio_event_direction dir,
- enum iio_event_info info,
- int val, int val2)
-{
- struct tsl2X7X_chip *chip = iio_priv(indio_dev);
-
- if (chan->type == IIO_INTENSITY) {
- switch (dir) {
- case IIO_EV_DIR_RISING:
- chip->tsl2x7x_settings.als_thresh_high = val;
- break;
- case IIO_EV_DIR_FALLING:
- chip->tsl2x7x_settings.als_thresh_low = val;
- break;
- default:
- return -EINVAL;
- }
- } else {
- switch (dir) {
- case IIO_EV_DIR_RISING:
- chip->tsl2x7x_settings.prox_thres_high = val;
- break;
- case IIO_EV_DIR_FALLING:
- chip->tsl2x7x_settings.prox_thres_low = val;
- break;
- default:
- return -EINVAL;
- }
- }
-
- tsl2x7x_invoke_change(indio_dev);
-
- return 0;
-}
-
-static int tsl2x7x_read_thresh(struct iio_dev *indio_dev,
- const struct iio_chan_spec *chan,
- enum iio_event_type type,
- enum iio_event_direction dir,
- enum iio_event_info info,
- int *val, int *val2)
-{
- struct tsl2X7X_chip *chip = iio_priv(indio_dev);
-
- if (chan->type == IIO_INTENSITY) {
- switch (dir) {
- case IIO_EV_DIR_RISING:
- *val = chip->tsl2x7x_settings.als_thresh_high;
- break;
- case IIO_EV_DIR_FALLING:
- *val = chip->tsl2x7x_settings.als_thresh_low;
- break;
- default:
- return -EINVAL;
- }
- } else {
- switch (dir) {
- case IIO_EV_DIR_RISING:
- *val = chip->tsl2x7x_settings.prox_thres_high;
- break;
- case IIO_EV_DIR_FALLING:
- *val = chip->tsl2x7x_settings.prox_thres_low;
- break;
- default:
- return -EINVAL;
- }
- }
-
- return IIO_VAL_INT;
-}
-
-static int tsl2x7x_read_raw(struct iio_dev *indio_dev,
- struct iio_chan_spec const *chan,
- int *val,
- int *val2,
- long mask)
-{
- int ret = -EINVAL;
- struct tsl2X7X_chip *chip = iio_priv(indio_dev);
-
- switch (mask) {
- case IIO_CHAN_INFO_PROCESSED:
- switch (chan->type) {
- case IIO_LIGHT:
- tsl2x7x_get_lux(indio_dev);
- *val = chip->als_cur_info.lux;
- ret = IIO_VAL_INT;
- break;
- default:
- return -EINVAL;
- }
- break;
- case IIO_CHAN_INFO_RAW:
- switch (chan->type) {
- case IIO_INTENSITY:
- tsl2x7x_get_lux(indio_dev);
- if (chan->channel == 0)
- *val = chip->als_cur_info.als_ch0;
- else
- *val = chip->als_cur_info.als_ch1;
- ret = IIO_VAL_INT;
- break;
- case IIO_PROXIMITY:
- tsl2x7x_get_prox(indio_dev);
- *val = chip->prox_data;
- ret = IIO_VAL_INT;
- break;
- default:
- return -EINVAL;
- }
- break;
- case IIO_CHAN_INFO_CALIBSCALE:
- if (chan->type == IIO_LIGHT)
- *val =
- tsl2X7X_als_gainadj[chip->tsl2x7x_settings.als_gain];
- else
- *val =
- tsl2X7X_prx_gainadj[chip->tsl2x7x_settings.prox_gain];
- ret = IIO_VAL_INT;
- break;
- case IIO_CHAN_INFO_CALIBBIAS:
- *val = chip->tsl2x7x_settings.als_gain_trim;
- ret = IIO_VAL_INT;
- break;
-
- default:
- ret = -EINVAL;
- }
-
- return ret;
-}
-
-static int tsl2x7x_write_raw(struct iio_dev *indio_dev,
- struct iio_chan_spec const *chan,
- int val,
- int val2,
- long mask)
-{
- struct tsl2X7X_chip *chip = iio_priv(indio_dev);
-
- switch (mask) {
- case IIO_CHAN_INFO_CALIBSCALE:
- if (chan->type == IIO_INTENSITY) {
- switch (val) {
- case 1:
- chip->tsl2x7x_settings.als_gain = 0;
- break;
- case 8:
- chip->tsl2x7x_settings.als_gain = 1;
- break;
- case 16:
- chip->tsl2x7x_settings.als_gain = 2;
- break;
- case 120:
- switch (chip->id) {
- case tsl2572:
- case tsl2672:
- case tmd2672:
- case tsl2772:
- case tmd2772:
- return -EINVAL;
- }
- chip->tsl2x7x_settings.als_gain = 3;
- break;
- case 128:
- switch (chip->id) {
- case tsl2571:
- case tsl2671:
- case tmd2671:
- case tsl2771:
- case tmd2771:
- return -EINVAL;
- }
- chip->tsl2x7x_settings.als_gain = 3;
- break;
- default:
- return -EINVAL;
- }
- } else {
- switch (val) {
- case 1:
- chip->tsl2x7x_settings.prox_gain = 0;
- break;
- case 2:
- chip->tsl2x7x_settings.prox_gain = 1;
- break;
- case 4:
- chip->tsl2x7x_settings.prox_gain = 2;
- break;
- case 8:
- chip->tsl2x7x_settings.prox_gain = 3;
- break;
- default:
- return -EINVAL;
- }
- }
- break;
- case IIO_CHAN_INFO_CALIBBIAS:
- chip->tsl2x7x_settings.als_gain_trim = val;
- break;
-
- default:
- return -EINVAL;
- }
-
- tsl2x7x_invoke_change(indio_dev);
-
- return 0;
-}
-
-static DEVICE_ATTR(power_state, S_IRUGO | S_IWUSR,
- tsl2x7x_power_state_show, tsl2x7x_power_state_store);
-
-static DEVICE_ATTR(in_proximity0_calibscale_available, S_IRUGO,
- tsl2x7x_prox_gain_available_show, NULL);
-
-static DEVICE_ATTR(in_illuminance0_calibscale_available, S_IRUGO,
- tsl2x7x_gain_available_show, NULL);
-
-static DEVICE_ATTR(in_illuminance0_integration_time, S_IRUGO | S_IWUSR,
- tsl2x7x_als_time_show, tsl2x7x_als_time_store);
-
-static DEVICE_ATTR(in_illuminance0_target_input, S_IRUGO | S_IWUSR,
- tsl2x7x_als_cal_target_show, tsl2x7x_als_cal_target_store);
-
-static DEVICE_ATTR(in_illuminance0_calibrate, S_IWUSR, NULL,
- tsl2x7x_do_calibrate);
-
-static DEVICE_ATTR(in_proximity0_calibrate, S_IWUSR, NULL,
- tsl2x7x_do_prox_calibrate);
-
-static DEVICE_ATTR(in_illuminance0_lux_table, S_IRUGO | S_IWUSR,
- tsl2x7x_luxtable_show, tsl2x7x_luxtable_store);
-
-static DEVICE_ATTR(in_intensity0_thresh_period, S_IRUGO | S_IWUSR,
- tsl2x7x_als_persistence_show, tsl2x7x_als_persistence_store);
-
-static DEVICE_ATTR(in_proximity0_thresh_period, S_IRUGO | S_IWUSR,
- tsl2x7x_prox_persistence_show, tsl2x7x_prox_persistence_store);
-
-/* Use the default register values to identify the Taos device */
-static int tsl2x7x_device_id(unsigned char *id, int target)
-{
- switch (target) {
- case tsl2571:
- case tsl2671:
- case tsl2771:
- return (*id & 0xf0) == TRITON_ID;
- case tmd2671:
- case tmd2771:
- return (*id & 0xf0) == HALIBUT_ID;
- case tsl2572:
- case tsl2672:
- case tmd2672:
- case tsl2772:
- case tmd2772:
- return (*id & 0xf0) == SWORDFISH_ID;
- }
-
- return -EINVAL;
-}
-
-static irqreturn_t tsl2x7x_event_handler(int irq, void *private)
-{
- struct iio_dev *indio_dev = private;
- struct tsl2X7X_chip *chip = iio_priv(indio_dev);
- s64 timestamp = iio_get_time_ns();
- int ret;
- u8 value;
-
- value = i2c_smbus_read_byte_data(chip->client,
- TSL2X7X_CMD_REG | TSL2X7X_STATUS);
-
- /* What type of interrupt do we need to process */
- if (value & TSL2X7X_STA_PRX_INTR) {
- tsl2x7x_get_prox(indio_dev); /* freshen data for ABI */
- iio_push_event(indio_dev,
- IIO_UNMOD_EVENT_CODE(IIO_PROXIMITY,
- 0,
- IIO_EV_TYPE_THRESH,
- IIO_EV_DIR_EITHER),
- timestamp);
- }
-
- if (value & TSL2X7X_STA_ALS_INTR) {
- tsl2x7x_get_lux(indio_dev); /* freshen data for ABI */
- iio_push_event(indio_dev,
- IIO_UNMOD_EVENT_CODE(IIO_LIGHT,
- 0,
- IIO_EV_TYPE_THRESH,
- IIO_EV_DIR_EITHER),
- timestamp);
- }
- /* Clear interrupt now that we have handled it. */
- ret = i2c_smbus_write_byte(chip->client,
- TSL2X7X_CMD_REG | TSL2X7X_CMD_SPL_FN |
- TSL2X7X_CMD_PROXALS_INT_CLR);
- if (ret < 0)
- dev_err(&chip->client->dev,
- "Failed to clear irq from event handler. err = %d\n",
- ret);
-
- return IRQ_HANDLED;
-}
-
-static struct attribute *tsl2x7x_ALS_device_attrs[] = {
- &dev_attr_power_state.attr,
- &dev_attr_in_illuminance0_calibscale_available.attr,
- &dev_attr_in_illuminance0_integration_time.attr,
- &iio_const_attr_in_illuminance0_integration_time_available.dev_attr.attr,
- &dev_attr_in_illuminance0_target_input.attr,
- &dev_attr_in_illuminance0_calibrate.attr,
- &dev_attr_in_illuminance0_lux_table.attr,
- NULL
-};
-
-static struct attribute *tsl2x7x_PRX_device_attrs[] = {
- &dev_attr_power_state.attr,
- &dev_attr_in_proximity0_calibrate.attr,
- NULL
-};
-
-static struct attribute *tsl2x7x_ALSPRX_device_attrs[] = {
- &dev_attr_power_state.attr,
- &dev_attr_in_illuminance0_calibscale_available.attr,
- &dev_attr_in_illuminance0_integration_time.attr,
- &iio_const_attr_in_illuminance0_integration_time_available.dev_attr.attr,
- &dev_attr_in_illuminance0_target_input.attr,
- &dev_attr_in_illuminance0_calibrate.attr,
- &dev_attr_in_illuminance0_lux_table.attr,
- &dev_attr_in_proximity0_calibrate.attr,
- NULL
-};
-
-static struct attribute *tsl2x7x_PRX2_device_attrs[] = {
- &dev_attr_power_state.attr,
- &dev_attr_in_proximity0_calibrate.attr,
- &dev_attr_in_proximity0_calibscale_available.attr,
- NULL
-};
-
-static struct attribute *tsl2x7x_ALSPRX2_device_attrs[] = {
- &dev_attr_power_state.attr,
- &dev_attr_in_illuminance0_calibscale_available.attr,
- &dev_attr_in_illuminance0_integration_time.attr,
- &iio_const_attr_in_illuminance0_integration_time_available.dev_attr.attr,
- &dev_attr_in_illuminance0_target_input.attr,
- &dev_attr_in_illuminance0_calibrate.attr,
- &dev_attr_in_illuminance0_lux_table.attr,
- &dev_attr_in_proximity0_calibrate.attr,
- &dev_attr_in_proximity0_calibscale_available.attr,
- NULL
-};
-
-static struct attribute *tsl2X7X_ALS_event_attrs[] = {
- &dev_attr_in_intensity0_thresh_period.attr,
- NULL,
-};
-static struct attribute *tsl2X7X_PRX_event_attrs[] = {
- &dev_attr_in_proximity0_thresh_period.attr,
- NULL,
-};
-
-static struct attribute *tsl2X7X_ALSPRX_event_attrs[] = {
- &dev_attr_in_intensity0_thresh_period.attr,
- &dev_attr_in_proximity0_thresh_period.attr,
- NULL,
-};
-
-static const struct attribute_group tsl2X7X_device_attr_group_tbl[] = {
- [ALS] = {
- .attrs = tsl2x7x_ALS_device_attrs,
- },
- [PRX] = {
- .attrs = tsl2x7x_PRX_device_attrs,
- },
- [ALSPRX] = {
- .attrs = tsl2x7x_ALSPRX_device_attrs,
- },
- [PRX2] = {
- .attrs = tsl2x7x_PRX2_device_attrs,
- },
- [ALSPRX2] = {
- .attrs = tsl2x7x_ALSPRX2_device_attrs,
- },
-};
-
-static struct attribute_group tsl2X7X_event_attr_group_tbl[] = {
- [ALS] = {
- .attrs = tsl2X7X_ALS_event_attrs,
- .name = "events",
- },
- [PRX] = {
- .attrs = tsl2X7X_PRX_event_attrs,
- .name = "events",
- },
- [ALSPRX] = {
- .attrs = tsl2X7X_ALSPRX_event_attrs,
- .name = "events",
- },
-};
-
-static const struct iio_info tsl2X7X_device_info[] = {
- [ALS] = {
- .attrs = &tsl2X7X_device_attr_group_tbl[ALS],
- .event_attrs = &tsl2X7X_event_attr_group_tbl[ALS],
- .driver_module = THIS_MODULE,
- .read_raw = &tsl2x7x_read_raw,
- .write_raw = &tsl2x7x_write_raw,
- .read_event_value = &tsl2x7x_read_thresh,
- .write_event_value = &tsl2x7x_write_thresh,
- .read_event_config = &tsl2x7x_read_interrupt_config,
- .write_event_config = &tsl2x7x_write_interrupt_config,
- },
- [PRX] = {
- .attrs = &tsl2X7X_device_attr_group_tbl[PRX],
- .event_attrs = &tsl2X7X_event_attr_group_tbl[PRX],
- .driver_module = THIS_MODULE,
- .read_raw = &tsl2x7x_read_raw,
- .write_raw = &tsl2x7x_write_raw,
- .read_event_value = &tsl2x7x_read_thresh,
- .write_event_value = &tsl2x7x_write_thresh,
- .read_event_config = &tsl2x7x_read_interrupt_config,
- .write_event_config = &tsl2x7x_write_interrupt_config,
- },
- [ALSPRX] = {
- .attrs = &tsl2X7X_device_attr_group_tbl[ALSPRX],
- .event_attrs = &tsl2X7X_event_attr_group_tbl[ALSPRX],
- .driver_module = THIS_MODULE,
- .read_raw = &tsl2x7x_read_raw,
- .write_raw = &tsl2x7x_write_raw,
- .read_event_value = &tsl2x7x_read_thresh,
- .write_event_value = &tsl2x7x_write_thresh,
- .read_event_config = &tsl2x7x_read_interrupt_config,
- .write_event_config = &tsl2x7x_write_interrupt_config,
- },
- [PRX2] = {
- .attrs = &tsl2X7X_device_attr_group_tbl[PRX2],
- .event_attrs = &tsl2X7X_event_attr_group_tbl[PRX],
- .driver_module = THIS_MODULE,
- .read_raw = &tsl2x7x_read_raw,
- .write_raw = &tsl2x7x_write_raw,
- .read_event_value = &tsl2x7x_read_thresh,
- .write_event_value = &tsl2x7x_write_thresh,
- .read_event_config = &tsl2x7x_read_interrupt_config,
- .write_event_config = &tsl2x7x_write_interrupt_config,
- },
- [ALSPRX2] = {
- .attrs = &tsl2X7X_device_attr_group_tbl[ALSPRX2],
- .event_attrs = &tsl2X7X_event_attr_group_tbl[ALSPRX],
- .driver_module = THIS_MODULE,
- .read_raw = &tsl2x7x_read_raw,
- .write_raw = &tsl2x7x_write_raw,
- .read_event_value = &tsl2x7x_read_thresh,
- .write_event_value = &tsl2x7x_write_thresh,
- .read_event_config = &tsl2x7x_read_interrupt_config,
- .write_event_config = &tsl2x7x_write_interrupt_config,
- },
-};
-
-static const struct iio_event_spec tsl2x7x_events[] = {
- {
- .type = IIO_EV_TYPE_THRESH,
- .dir = IIO_EV_DIR_RISING,
- .mask_separate = BIT(IIO_EV_INFO_VALUE) |
- BIT(IIO_EV_INFO_ENABLE),
- }, {
- .type = IIO_EV_TYPE_THRESH,
- .dir = IIO_EV_DIR_FALLING,
- .mask_separate = BIT(IIO_EV_INFO_VALUE) |
- BIT(IIO_EV_INFO_ENABLE),
- },
-};
-
-static const struct tsl2x7x_chip_info tsl2x7x_chip_info_tbl[] = {
- [ALS] = {
- .channel = {
- {
- .type = IIO_LIGHT,
- .indexed = 1,
- .channel = 0,
- .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED),
- }, {
- .type = IIO_INTENSITY,
- .indexed = 1,
- .channel = 0,
- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
- BIT(IIO_CHAN_INFO_CALIBSCALE) |
- BIT(IIO_CHAN_INFO_CALIBBIAS),
- .event_spec = tsl2x7x_events,
- .num_event_specs = ARRAY_SIZE(tsl2x7x_events),
- }, {
- .type = IIO_INTENSITY,
- .indexed = 1,
- .channel = 1,
- },
- },
- .chan_table_elements = 3,
- .info = &tsl2X7X_device_info[ALS],
- },
- [PRX] = {
- .channel = {
- {
- .type = IIO_PROXIMITY,
- .indexed = 1,
- .channel = 0,
- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
- .event_spec = tsl2x7x_events,
- .num_event_specs = ARRAY_SIZE(tsl2x7x_events),
- },
- },
- .chan_table_elements = 1,
- .info = &tsl2X7X_device_info[PRX],
- },
- [ALSPRX] = {
- .channel = {
- {
- .type = IIO_LIGHT,
- .indexed = 1,
- .channel = 0,
- .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED)
- }, {
- .type = IIO_INTENSITY,
- .indexed = 1,
- .channel = 0,
- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
- BIT(IIO_CHAN_INFO_CALIBSCALE) |
- BIT(IIO_CHAN_INFO_CALIBBIAS),
- .event_spec = tsl2x7x_events,
- .num_event_specs = ARRAY_SIZE(tsl2x7x_events),
- }, {
- .type = IIO_INTENSITY,
- .indexed = 1,
- .channel = 1,
- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
- }, {
- .type = IIO_PROXIMITY,
- .indexed = 1,
- .channel = 0,
- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
- .event_spec = tsl2x7x_events,
- .num_event_specs = ARRAY_SIZE(tsl2x7x_events),
- },
- },
- .chan_table_elements = 4,
- .info = &tsl2X7X_device_info[ALSPRX],
- },
- [PRX2] = {
- .channel = {
- {
- .type = IIO_PROXIMITY,
- .indexed = 1,
- .channel = 0,
- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
- BIT(IIO_CHAN_INFO_CALIBSCALE),
- .event_spec = tsl2x7x_events,
- .num_event_specs = ARRAY_SIZE(tsl2x7x_events),
- },
- },
- .chan_table_elements = 1,
- .info = &tsl2X7X_device_info[PRX2],
- },
- [ALSPRX2] = {
- .channel = {
- {
- .type = IIO_LIGHT,
- .indexed = 1,
- .channel = 0,
- .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED),
- }, {
- .type = IIO_INTENSITY,
- .indexed = 1,
- .channel = 0,
- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
- BIT(IIO_CHAN_INFO_CALIBSCALE) |
- BIT(IIO_CHAN_INFO_CALIBBIAS),
- .event_spec = tsl2x7x_events,
- .num_event_specs = ARRAY_SIZE(tsl2x7x_events),
- }, {
- .type = IIO_INTENSITY,
- .indexed = 1,
- .channel = 1,
- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
- }, {
- .type = IIO_PROXIMITY,
- .indexed = 1,
- .channel = 0,
- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
- BIT(IIO_CHAN_INFO_CALIBSCALE),
- .event_spec = tsl2x7x_events,
- .num_event_specs = ARRAY_SIZE(tsl2x7x_events),
- },
- },
- .chan_table_elements = 4,
- .info = &tsl2X7X_device_info[ALSPRX2],
- },
-};
-
-static int tsl2x7x_probe(struct i2c_client *clientp,
- const struct i2c_device_id *id)
-{
- int ret;
- unsigned char device_id;
- struct iio_dev *indio_dev;
- struct tsl2X7X_chip *chip;
-
- indio_dev = devm_iio_device_alloc(&clientp->dev, sizeof(*chip));
- if (!indio_dev)
- return -ENOMEM;
-
- chip = iio_priv(indio_dev);
- chip->client = clientp;
- i2c_set_clientdata(clientp, indio_dev);
-
- ret = tsl2x7x_i2c_read(chip->client,
- TSL2X7X_CHIPID, &device_id);
- if (ret < 0)
- return ret;
-
- if ((!tsl2x7x_device_id(&device_id, id->driver_data)) ||
- (tsl2x7x_device_id(&device_id, id->driver_data) == -EINVAL)) {
- dev_info(&chip->client->dev,
- "%s: i2c device found does not match expected id\n",
- __func__);
- return -EINVAL;
- }
-
- ret = i2c_smbus_write_byte(clientp, (TSL2X7X_CMD_REG | TSL2X7X_CNTRL));
- if (ret < 0) {
- dev_err(&clientp->dev, "write to cmd reg failed. err = %d\n",
- ret);
- return ret;
- }
-
- /* ALS and PROX functions can be invoked via user space poll
- * or H/W interrupt. If busy return last sample. */
- mutex_init(&chip->als_mutex);
- mutex_init(&chip->prox_mutex);
-
- chip->tsl2x7x_chip_status = TSL2X7X_CHIP_UNKNOWN;
- chip->pdata = clientp->dev.platform_data;
- chip->id = id->driver_data;
- chip->chip_info =
- &tsl2x7x_chip_info_tbl[device_channel_config[id->driver_data]];
-
- indio_dev->info = chip->chip_info->info;
- indio_dev->dev.parent = &clientp->dev;
- indio_dev->modes = INDIO_DIRECT_MODE;
- indio_dev->name = chip->client->name;
- indio_dev->channels = chip->chip_info->channel;
- indio_dev->num_channels = chip->chip_info->chan_table_elements;
-
- if (clientp->irq) {
- ret = devm_request_threaded_irq(&clientp->dev, clientp->irq,
- NULL,
- &tsl2x7x_event_handler,
- IRQF_TRIGGER_RISING |
- IRQF_ONESHOT,
- "TSL2X7X_event",
- indio_dev);
- if (ret) {
- dev_err(&clientp->dev,
- "%s: irq request failed", __func__);
- return ret;
- }
- }
-
- /* Load up the defaults */
- tsl2x7x_defaults(chip);
- /* Make sure the chip is on */
- tsl2x7x_chip_on(indio_dev);
-
- ret = iio_device_register(indio_dev);
- if (ret) {
- dev_err(&clientp->dev,
- "%s: iio registration failed\n", __func__);
- return ret;
- }
-
- dev_info(&clientp->dev, "%s Light sensor found.\n", id->name);
-
- return 0;
-}
-
-static int tsl2x7x_suspend(struct device *dev)
-{
- struct iio_dev *indio_dev = dev_get_drvdata(dev);
- struct tsl2X7X_chip *chip = iio_priv(indio_dev);
- int ret = 0;
-
- if (chip->tsl2x7x_chip_status == TSL2X7X_CHIP_WORKING) {
- ret = tsl2x7x_chip_off(indio_dev);
- chip->tsl2x7x_chip_status = TSL2X7X_CHIP_SUSPENDED;
- }
-
- if (chip->pdata && chip->pdata->platform_power) {
- pm_message_t pmm = {PM_EVENT_SUSPEND};
-
- chip->pdata->platform_power(dev, pmm);
- }
-
- return ret;
-}
-
-static int tsl2x7x_resume(struct device *dev)
-{
- struct iio_dev *indio_dev = dev_get_drvdata(dev);
- struct tsl2X7X_chip *chip = iio_priv(indio_dev);
- int ret = 0;
-
- if (chip->pdata && chip->pdata->platform_power) {
- pm_message_t pmm = {PM_EVENT_RESUME};
-
- chip->pdata->platform_power(dev, pmm);
- }
-
- if (chip->tsl2x7x_chip_status == TSL2X7X_CHIP_SUSPENDED)
- ret = tsl2x7x_chip_on(indio_dev);
-
- return ret;
-}
-
-static int tsl2x7x_remove(struct i2c_client *client)
-{
- struct iio_dev *indio_dev = i2c_get_clientdata(client);
-
- tsl2x7x_chip_off(indio_dev);
-
- iio_device_unregister(indio_dev);
-
- return 0;
-}
-
-static struct i2c_device_id tsl2x7x_idtable[] = {
- { "tsl2571", tsl2571 },
- { "tsl2671", tsl2671 },
- { "tmd2671", tmd2671 },
- { "tsl2771", tsl2771 },
- { "tmd2771", tmd2771 },
- { "tsl2572", tsl2572 },
- { "tsl2672", tsl2672 },
- { "tmd2672", tmd2672 },
- { "tsl2772", tsl2772 },
- { "tmd2772", tmd2772 },
- {}
-};
-
-MODULE_DEVICE_TABLE(i2c, tsl2x7x_idtable);
-
-static const struct dev_pm_ops tsl2x7x_pm_ops = {
- .suspend = tsl2x7x_suspend,
- .resume = tsl2x7x_resume,
-};
-
-/* Driver definition */
-static struct i2c_driver tsl2x7x_driver = {
- .driver = {
- .name = "tsl2x7x",
- .pm = &tsl2x7x_pm_ops,
- },
- .id_table = tsl2x7x_idtable,
- .probe = tsl2x7x_probe,
- .remove = tsl2x7x_remove,
-};
-
-module_i2c_driver(tsl2x7x_driver);
-
-MODULE_AUTHOR("J. August Brenner<jbrenner@taosinc.com>");
-MODULE_DESCRIPTION("TAOS tsl2x7x ambient and proximity light sensor driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/iio/magnetometer/Kconfig b/drivers/staging/iio/magnetometer/Kconfig
deleted file mode 100644
index dec814a7a073..000000000000
--- a/drivers/staging/iio/magnetometer/Kconfig
+++ /dev/null
@@ -1,40 +0,0 @@
-#
-# Magnetometer sensors
-#
-menu "Magnetometer sensors"
-
-config SENSORS_HMC5843
- tristate
- select IIO_BUFFER
- select IIO_TRIGGERED_BUFFER
-
-config SENSORS_HMC5843_I2C
- tristate "Honeywell HMC5843/5883/5883L 3-Axis Magnetometer (I2C)"
- depends on I2C
- select SENSORS_HMC5843
- select REGMAP_I2C
- help
- Say Y here to add support for the Honeywell HMC5843, HMC5883 and
- HMC5883L 3-Axis Magnetometer (digital compass).
-
- This driver can also be compiled as a set of modules.
- If so, these modules will be created:
- - hmc5843_core (core functions)
- - hmc5843_i2c (support for HMC5843, HMC5883, HMC5883L and HMC5983)
-
-config SENSORS_HMC5843_SPI
- tristate "Honeywell HMC5983 3-Axis Magnetometer (SPI)"
- depends on SPI_MASTER
- select SENSORS_HMC5843
- select REGMAP_SPI
- help
- Say Y here to add support for the Honeywell HMC5983 3-Axis Magnetometer
- (digital compass).
-
- This driver can also be compiled as a set of modules.
- If so, these modules will be created:
- - hmc5843_core (core functions)
- - hmc5843_spi (support for HMC5983)
-
-
-endmenu
diff --git a/drivers/staging/iio/magnetometer/Makefile b/drivers/staging/iio/magnetometer/Makefile
deleted file mode 100644
index 33761a19a956..000000000000
--- a/drivers/staging/iio/magnetometer/Makefile
+++ /dev/null
@@ -1,7 +0,0 @@
-#
-# Makefile for industrial I/O Magnetometer sensors
-#
-
-obj-$(CONFIG_SENSORS_HMC5843) += hmc5843_core.o
-obj-$(CONFIG_SENSORS_HMC5843_I2C) += hmc5843_i2c.o
-obj-$(CONFIG_SENSORS_HMC5843_SPI) += hmc5843_spi.o
diff --git a/drivers/staging/iio/magnetometer/hmc5843.h b/drivers/staging/iio/magnetometer/hmc5843.h
deleted file mode 100644
index f3d0da2fe458..000000000000
--- a/drivers/staging/iio/magnetometer/hmc5843.h
+++ /dev/null
@@ -1,66 +0,0 @@
-/*
- * Header file for hmc5843 driver
- *
- * Split from hmc5843.c
- * Copyright (C) Josef Gajdusek <atx@atx.name>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * */
-
-#ifndef HMC5843_CORE_H
-#define HMC5843_CORE_H
-
-#include <linux/regmap.h>
-#include <linux/iio/iio.h>
-
-#define HMC5843_CONFIG_REG_A 0x00
-#define HMC5843_CONFIG_REG_B 0x01
-#define HMC5843_MODE_REG 0x02
-#define HMC5843_DATA_OUT_MSB_REGS 0x03
-#define HMC5843_STATUS_REG 0x09
-#define HMC5843_ID_REG 0x0a
-#define HMC5843_ID_END 0x0c
-
-enum hmc5843_ids {
- HMC5843_ID,
- HMC5883_ID,
- HMC5883L_ID,
- HMC5983_ID,
-};
-
-/**
- * struct hcm5843_data - device specific data
- * @dev: actual device
- * @lock: update and read regmap data
- * @regmap: hardware access register maps
- * @variant: describe chip variants
- * @buffer: 3x 16-bit channels + padding + 64-bit timestamp
- **/
-struct hmc5843_data {
- struct device *dev;
- struct mutex lock;
- struct regmap *regmap;
- const struct hmc5843_chip_info *variant;
- __be16 buffer[8];
-};
-
-int hmc5843_common_probe(struct device *dev, struct regmap *regmap,
- enum hmc5843_ids id);
-int hmc5843_common_remove(struct device *dev);
-
-int hmc5843_common_suspend(struct device *dev);
-int hmc5843_common_resume(struct device *dev);
-
-#ifdef CONFIG_PM_SLEEP
-static SIMPLE_DEV_PM_OPS(hmc5843_pm_ops,
- hmc5843_common_suspend,
- hmc5843_common_resume);
-#define HMC5843_PM_OPS (&hmc5843_pm_ops)
-#else
-#define HMC5843_PM_OPS NULL
-#endif
-
-#endif /* HMC5843_CORE_H */
diff --git a/drivers/staging/iio/magnetometer/hmc5843_core.c b/drivers/staging/iio/magnetometer/hmc5843_core.c
deleted file mode 100644
index fffca3a9f637..000000000000
--- a/drivers/staging/iio/magnetometer/hmc5843_core.c
+++ /dev/null
@@ -1,646 +0,0 @@
-/*
- * Device driver for the the HMC5843 multi-chip module designed
- * for low field magnetic sensing.
- *
- * Copyright (C) 2010 Texas Instruments
- *
- * Author: Shubhrajyoti Datta <shubhrajyoti@ti.com>
- * Acknowledgment: Jonathan Cameron <jic23@kernel.org> for valuable inputs.
- * Support for HMC5883 and HMC5883L by Peter Meerwald <pmeerw@pmeerw.net>.
- * Split to multiple files by Josef Gajdusek <atx@atx.name> - 2014
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#include <linux/module.h>
-#include <linux/regmap.h>
-#include <linux/iio/iio.h>
-#include <linux/iio/sysfs.h>
-#include <linux/iio/trigger_consumer.h>
-#include <linux/iio/buffer.h>
-#include <linux/iio/triggered_buffer.h>
-#include <linux/delay.h>
-
-#include "hmc5843.h"
-
-/*
- * Range gain settings in (+-)Ga
- * Beware: HMC5843 and HMC5883 have different recommended sensor field
- * ranges; default corresponds to +-1.0 Ga and +-1.3 Ga, respectively
- */
-#define HMC5843_RANGE_GAIN_OFFSET 0x05
-#define HMC5843_RANGE_GAIN_DEFAULT 0x01
-#define HMC5843_RANGE_GAIN_MASK 0xe0
-
-/* Device status */
-#define HMC5843_DATA_READY 0x01
-#define HMC5843_DATA_OUTPUT_LOCK 0x02
-
-/* Mode register configuration */
-#define HMC5843_MODE_CONVERSION_CONTINUOUS 0x00
-#define HMC5843_MODE_CONVERSION_SINGLE 0x01
-#define HMC5843_MODE_IDLE 0x02
-#define HMC5843_MODE_SLEEP 0x03
-#define HMC5843_MODE_MASK 0x03
-
-/*
- * HMC5843: Minimum data output rate
- * HMC5883: Typical data output rate
- */
-#define HMC5843_RATE_OFFSET 0x02
-#define HMC5843_RATE_DEFAULT 0x04
-#define HMC5843_RATE_MASK 0x1c
-
-/* Device measurement configuration */
-#define HMC5843_MEAS_CONF_NORMAL 0x00
-#define HMC5843_MEAS_CONF_POSITIVE_BIAS 0x01
-#define HMC5843_MEAS_CONF_NEGATIVE_BIAS 0x02
-#define HMC5843_MEAS_CONF_MASK 0x03
-
-/* Scaling factors: 10000000/Gain */
-static const int hmc5843_regval_to_nanoscale[] = {
- 6173, 7692, 10309, 12821, 18868, 21739, 25641, 35714
-};
-
-static const int hmc5883_regval_to_nanoscale[] = {
- 7812, 9766, 13021, 16287, 24096, 27701, 32573, 45662
-};
-
-static const int hmc5883l_regval_to_nanoscale[] = {
- 7299, 9174, 12195, 15152, 22727, 25641, 30303, 43478
-};
-
-/*
- * From the datasheet:
- * Value | HMC5843 | HMC5883/HMC5883L
- * | Data output rate (Hz) | Data output rate (Hz)
- * 0 | 0.5 | 0.75
- * 1 | 1 | 1.5
- * 2 | 2 | 3
- * 3 | 5 | 7.5
- * 4 | 10 (default) | 15
- * 5 | 20 | 30
- * 6 | 50 | 75
- * 7 | Not used | Not used
- */
-static const int hmc5843_regval_to_samp_freq[][2] = {
- {0, 500000}, {1, 0}, {2, 0}, {5, 0}, {10, 0}, {20, 0}, {50, 0}
-};
-
-static const int hmc5883_regval_to_samp_freq[][2] = {
- {0, 750000}, {1, 500000}, {3, 0}, {7, 500000}, {15, 0}, {30, 0},
- {75, 0}
-};
-
-static const int hmc5983_regval_to_samp_freq[][2] = {
- {0, 750000}, {1, 500000}, {3, 0}, {7, 500000}, {15, 0}, {30, 0},
- {75, 0}, {220, 0}
-};
-
-/* Describe chip variants */
-struct hmc5843_chip_info {
- const struct iio_chan_spec *channels;
- const int (*regval_to_samp_freq)[2];
- const int n_regval_to_samp_freq;
- const int *regval_to_nanoscale;
- const int n_regval_to_nanoscale;
-};
-
-/* The lower two bits contain the current conversion mode */
-static s32 hmc5843_set_mode(struct hmc5843_data *data, u8 operating_mode)
-{
- int ret;
-
- mutex_lock(&data->lock);
- ret = regmap_update_bits(data->regmap, HMC5843_MODE_REG,
- HMC5843_MODE_MASK, operating_mode);
- mutex_unlock(&data->lock);
-
- return ret;
-}
-
-static int hmc5843_wait_measurement(struct hmc5843_data *data)
-{
- int tries = 150;
- unsigned int val;
- int ret;
-
- while (tries-- > 0) {
- ret = regmap_read(data->regmap, HMC5843_STATUS_REG, &val);
- if (ret < 0)
- return ret;
- if (val & HMC5843_DATA_READY)
- break;
- msleep(20);
- }
-
- if (tries < 0) {
- dev_err(data->dev, "data not ready\n");
- return -EIO;
- }
-
- return 0;
-}
-
-/* Return the measurement value from the specified channel */
-static int hmc5843_read_measurement(struct hmc5843_data *data,
- int idx, int *val)
-{
- __be16 values[3];
- int ret;
-
- mutex_lock(&data->lock);
- ret = hmc5843_wait_measurement(data);
- if (ret < 0) {
- mutex_unlock(&data->lock);
- return ret;
- }
- ret = regmap_bulk_read(data->regmap, HMC5843_DATA_OUT_MSB_REGS,
- values, sizeof(values));
- mutex_unlock(&data->lock);
- if (ret < 0)
- return ret;
-
- *val = sign_extend32(be16_to_cpu(values[idx]), 15);
- return IIO_VAL_INT;
-}
-
-/*
- * API for setting the measurement configuration to
- * Normal, Positive bias and Negative bias
- *
- * From the datasheet:
- * 0 - Normal measurement configuration (default): In normal measurement
- * configuration the device follows normal measurement flow. Pins BP
- * and BN are left floating and high impedance.
- *
- * 1 - Positive bias configuration: In positive bias configuration, a
- * positive current is forced across the resistive load on pins BP
- * and BN.
- *
- * 2 - Negative bias configuration. In negative bias configuration, a
- * negative current is forced across the resistive load on pins BP
- * and BN.
- *
- */
-static int hmc5843_set_meas_conf(struct hmc5843_data *data, u8 meas_conf)
-{
- int ret;
-
- mutex_lock(&data->lock);
- ret = regmap_update_bits(data->regmap, HMC5843_CONFIG_REG_A,
- HMC5843_MEAS_CONF_MASK, meas_conf);
- mutex_unlock(&data->lock);
-
- return ret;
-}
-
-static
-ssize_t hmc5843_show_measurement_configuration(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct hmc5843_data *data = iio_priv(dev_to_iio_dev(dev));
- unsigned int val;
- int ret;
-
- ret = regmap_read(data->regmap, HMC5843_CONFIG_REG_A, &val);
- if (ret)
- return ret;
- val &= HMC5843_MEAS_CONF_MASK;
-
- return sprintf(buf, "%d\n", val);
-}
-
-static
-ssize_t hmc5843_set_measurement_configuration(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t count)
-{
- struct hmc5843_data *data = iio_priv(dev_to_iio_dev(dev));
- unsigned long meas_conf = 0;
- int ret;
-
- ret = kstrtoul(buf, 10, &meas_conf);
- if (ret)
- return ret;
- if (meas_conf >= HMC5843_MEAS_CONF_MASK)
- return -EINVAL;
-
- ret = hmc5843_set_meas_conf(data, meas_conf);
-
- return (ret < 0) ? ret : count;
-}
-
-static IIO_DEVICE_ATTR(meas_conf,
- S_IWUSR | S_IRUGO,
- hmc5843_show_measurement_configuration,
- hmc5843_set_measurement_configuration,
- 0);
-
-static
-ssize_t hmc5843_show_samp_freq_avail(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct hmc5843_data *data = iio_priv(dev_to_iio_dev(dev));
- size_t len = 0;
- int i;
-
- for (i = 0; i < data->variant->n_regval_to_samp_freq; i++)
- len += scnprintf(buf + len, PAGE_SIZE - len,
- "%d.%d ", data->variant->regval_to_samp_freq[i][0],
- data->variant->regval_to_samp_freq[i][1]);
-
- /* replace trailing space by newline */
- buf[len - 1] = '\n';
-
- return len;
-}
-
-static IIO_DEV_ATTR_SAMP_FREQ_AVAIL(hmc5843_show_samp_freq_avail);
-
-static int hmc5843_set_samp_freq(struct hmc5843_data *data, u8 rate)
-{
- int ret;
-
- mutex_lock(&data->lock);
- ret = regmap_update_bits(data->regmap, HMC5843_CONFIG_REG_A,
- HMC5843_RATE_MASK,
- rate << HMC5843_RATE_OFFSET);
- mutex_unlock(&data->lock);
-
- return ret;
-}
-
-static int hmc5843_get_samp_freq_index(struct hmc5843_data *data,
- int val, int val2)
-{
- int i;
-
- for (i = 0; i < data->variant->n_regval_to_samp_freq; i++)
- if (val == data->variant->regval_to_samp_freq[i][0] &&
- val2 == data->variant->regval_to_samp_freq[i][1])
- return i;
-
- return -EINVAL;
-}
-
-static int hmc5843_set_range_gain(struct hmc5843_data *data, u8 range)
-{
- int ret;
-
- mutex_lock(&data->lock);
- ret = regmap_update_bits(data->regmap, HMC5843_CONFIG_REG_B,
- HMC5843_RANGE_GAIN_MASK,
- range << HMC5843_RANGE_GAIN_OFFSET);
- mutex_unlock(&data->lock);
-
- return ret;
-}
-
-static ssize_t hmc5843_show_scale_avail(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct hmc5843_data *data = iio_priv(dev_to_iio_dev(dev));
-
- size_t len = 0;
- int i;
-
- for (i = 0; i < data->variant->n_regval_to_nanoscale; i++)
- len += scnprintf(buf + len, PAGE_SIZE - len,
- "0.%09d ", data->variant->regval_to_nanoscale[i]);
-
- /* replace trailing space by newline */
- buf[len - 1] = '\n';
-
- return len;
-}
-
-static IIO_DEVICE_ATTR(scale_available, S_IRUGO,
- hmc5843_show_scale_avail, NULL, 0);
-
-static int hmc5843_get_scale_index(struct hmc5843_data *data, int val, int val2)
-{
- int i;
-
- if (val != 0)
- return -EINVAL;
-
- for (i = 0; i < data->variant->n_regval_to_nanoscale; i++)
- if (val2 == data->variant->regval_to_nanoscale[i])
- return i;
-
- return -EINVAL;
-}
-
-static int hmc5843_read_raw(struct iio_dev *indio_dev,
- struct iio_chan_spec const *chan,
- int *val, int *val2, long mask)
-{
- struct hmc5843_data *data = iio_priv(indio_dev);
- unsigned int rval;
- int ret;
-
- switch (mask) {
- case IIO_CHAN_INFO_RAW:
- return hmc5843_read_measurement(data, chan->scan_index, val);
- case IIO_CHAN_INFO_SCALE:
- ret = regmap_read(data->regmap, HMC5843_CONFIG_REG_B, &rval);
- if (ret < 0)
- return ret;
- rval >>= HMC5843_RANGE_GAIN_OFFSET;
- *val = 0;
- *val2 = data->variant->regval_to_nanoscale[rval];
- return IIO_VAL_INT_PLUS_NANO;
- case IIO_CHAN_INFO_SAMP_FREQ:
- ret = regmap_read(data->regmap, HMC5843_CONFIG_REG_A, &rval);
- if (ret < 0)
- return ret;
- rval >>= HMC5843_RATE_OFFSET;
- *val = data->variant->regval_to_samp_freq[rval][0];
- *val2 = data->variant->regval_to_samp_freq[rval][1];
- return IIO_VAL_INT_PLUS_MICRO;
- }
- return -EINVAL;
-}
-
-static int hmc5843_write_raw(struct iio_dev *indio_dev,
- struct iio_chan_spec const *chan,
- int val, int val2, long mask)
-{
- struct hmc5843_data *data = iio_priv(indio_dev);
- int rate, range;
-
- switch (mask) {
- case IIO_CHAN_INFO_SAMP_FREQ:
- rate = hmc5843_get_samp_freq_index(data, val, val2);
- if (rate < 0)
- return -EINVAL;
-
- return hmc5843_set_samp_freq(data, rate);
- case IIO_CHAN_INFO_SCALE:
- range = hmc5843_get_scale_index(data, val, val2);
- if (range < 0)
- return -EINVAL;
-
- return hmc5843_set_range_gain(data, range);
- default:
- return -EINVAL;
- }
-}
-
-static int hmc5843_write_raw_get_fmt(struct iio_dev *indio_dev,
- struct iio_chan_spec const *chan,
- long mask)
-{
- switch (mask) {
- case IIO_CHAN_INFO_SAMP_FREQ:
- return IIO_VAL_INT_PLUS_MICRO;
- case IIO_CHAN_INFO_SCALE:
- return IIO_VAL_INT_PLUS_NANO;
- default:
- return -EINVAL;
- }
-}
-
-static irqreturn_t hmc5843_trigger_handler(int irq, void *p)
-{
- struct iio_poll_func *pf = p;
- struct iio_dev *indio_dev = pf->indio_dev;
- struct hmc5843_data *data = iio_priv(indio_dev);
- int ret;
-
- mutex_lock(&data->lock);
- ret = hmc5843_wait_measurement(data);
- if (ret < 0) {
- mutex_unlock(&data->lock);
- goto done;
- }
-
- ret = regmap_bulk_read(data->regmap, HMC5843_DATA_OUT_MSB_REGS,
- data->buffer, 3 * sizeof(__be16));
-
- mutex_unlock(&data->lock);
- if (ret < 0)
- goto done;
-
- iio_push_to_buffers_with_timestamp(indio_dev, data->buffer,
- iio_get_time_ns());
-
-done:
- iio_trigger_notify_done(indio_dev->trig);
-
- return IRQ_HANDLED;
-}
-
-#define HMC5843_CHANNEL(axis, idx) \
- { \
- .type = IIO_MAGN, \
- .modified = 1, \
- .channel2 = IIO_MOD_##axis, \
- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
- .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE) | \
- BIT(IIO_CHAN_INFO_SAMP_FREQ), \
- .scan_index = idx, \
- .scan_type = { \
- .sign = 's', \
- .realbits = 16, \
- .storagebits = 16, \
- .endianness = IIO_BE, \
- }, \
- }
-
-static const struct iio_chan_spec hmc5843_channels[] = {
- HMC5843_CHANNEL(X, 0),
- HMC5843_CHANNEL(Y, 1),
- HMC5843_CHANNEL(Z, 2),
- IIO_CHAN_SOFT_TIMESTAMP(3),
-};
-
-/* Beware: Y and Z are exchanged on HMC5883 and 5983 */
-static const struct iio_chan_spec hmc5883_channels[] = {
- HMC5843_CHANNEL(X, 0),
- HMC5843_CHANNEL(Z, 1),
- HMC5843_CHANNEL(Y, 2),
- IIO_CHAN_SOFT_TIMESTAMP(3),
-};
-
-static struct attribute *hmc5843_attributes[] = {
- &iio_dev_attr_meas_conf.dev_attr.attr,
- &iio_dev_attr_scale_available.dev_attr.attr,
- &iio_dev_attr_sampling_frequency_available.dev_attr.attr,
- NULL
-};
-
-static const struct attribute_group hmc5843_group = {
- .attrs = hmc5843_attributes,
-};
-
-static const struct hmc5843_chip_info hmc5843_chip_info_tbl[] = {
- [HMC5843_ID] = {
- .channels = hmc5843_channels,
- .regval_to_samp_freq = hmc5843_regval_to_samp_freq,
- .n_regval_to_samp_freq =
- ARRAY_SIZE(hmc5843_regval_to_samp_freq),
- .regval_to_nanoscale = hmc5843_regval_to_nanoscale,
- .n_regval_to_nanoscale =
- ARRAY_SIZE(hmc5843_regval_to_nanoscale),
- },
- [HMC5883_ID] = {
- .channels = hmc5883_channels,
- .regval_to_samp_freq = hmc5883_regval_to_samp_freq,
- .n_regval_to_samp_freq =
- ARRAY_SIZE(hmc5883_regval_to_samp_freq),
- .regval_to_nanoscale = hmc5883_regval_to_nanoscale,
- .n_regval_to_nanoscale =
- ARRAY_SIZE(hmc5883_regval_to_nanoscale),
- },
- [HMC5883L_ID] = {
- .channels = hmc5883_channels,
- .regval_to_samp_freq = hmc5883_regval_to_samp_freq,
- .n_regval_to_samp_freq =
- ARRAY_SIZE(hmc5883_regval_to_samp_freq),
- .regval_to_nanoscale = hmc5883l_regval_to_nanoscale,
- .n_regval_to_nanoscale =
- ARRAY_SIZE(hmc5883l_regval_to_nanoscale),
- },
- [HMC5983_ID] = {
- .channels = hmc5883_channels,
- .regval_to_samp_freq = hmc5983_regval_to_samp_freq,
- .n_regval_to_samp_freq =
- ARRAY_SIZE(hmc5983_regval_to_samp_freq),
- .regval_to_nanoscale = hmc5883l_regval_to_nanoscale,
- .n_regval_to_nanoscale =
- ARRAY_SIZE(hmc5883l_regval_to_nanoscale),
- }
-};
-
-static int hmc5843_init(struct hmc5843_data *data)
-{
- int ret;
- u8 id[3];
-
- ret = regmap_bulk_read(data->regmap, HMC5843_ID_REG,
- id, ARRAY_SIZE(id));
- if (ret < 0)
- return ret;
- if (id[0] != 'H' || id[1] != '4' || id[2] != '3') {
- dev_err(data->dev, "no HMC5843/5883/5883L/5983 sensor\n");
- return -ENODEV;
- }
-
- ret = hmc5843_set_meas_conf(data, HMC5843_MEAS_CONF_NORMAL);
- if (ret < 0)
- return ret;
- ret = hmc5843_set_samp_freq(data, HMC5843_RATE_DEFAULT);
- if (ret < 0)
- return ret;
- ret = hmc5843_set_range_gain(data, HMC5843_RANGE_GAIN_DEFAULT);
- if (ret < 0)
- return ret;
- return hmc5843_set_mode(data, HMC5843_MODE_CONVERSION_CONTINUOUS);
-}
-
-static const struct iio_info hmc5843_info = {
- .attrs = &hmc5843_group,
- .read_raw = &hmc5843_read_raw,
- .write_raw = &hmc5843_write_raw,
- .write_raw_get_fmt = &hmc5843_write_raw_get_fmt,
- .driver_module = THIS_MODULE,
-};
-
-static const unsigned long hmc5843_scan_masks[] = {0x7, 0};
-
-int hmc5843_common_suspend(struct device *dev)
-{
- return hmc5843_set_mode(iio_priv(dev_get_drvdata(dev)),
- HMC5843_MODE_CONVERSION_CONTINUOUS);
-}
-EXPORT_SYMBOL(hmc5843_common_suspend);
-
-int hmc5843_common_resume(struct device *dev)
-{
- return hmc5843_set_mode(iio_priv(dev_get_drvdata(dev)),
- HMC5843_MODE_SLEEP);
-}
-EXPORT_SYMBOL(hmc5843_common_resume);
-
-int hmc5843_common_probe(struct device *dev, struct regmap *regmap,
- enum hmc5843_ids id)
-{
- struct hmc5843_data *data;
- struct iio_dev *indio_dev;
- int ret;
-
- indio_dev = devm_iio_device_alloc(dev, sizeof(*data));
- if (!indio_dev)
- return -ENOMEM;
-
- dev_set_drvdata(dev, indio_dev);
-
- /* default settings at probe */
- data = iio_priv(indio_dev);
- data->dev = dev;
- data->regmap = regmap;
- data->variant = &hmc5843_chip_info_tbl[id];
- mutex_init(&data->lock);
-
- indio_dev->dev.parent = dev;
- indio_dev->name = dev->driver->name;
- indio_dev->info = &hmc5843_info;
- indio_dev->modes = INDIO_DIRECT_MODE;
- indio_dev->channels = data->variant->channels;
- indio_dev->num_channels = 4;
- indio_dev->available_scan_masks = hmc5843_scan_masks;
-
- ret = hmc5843_init(data);
- if (ret < 0)
- return ret;
-
- ret = iio_triggered_buffer_setup(indio_dev, NULL,
- hmc5843_trigger_handler, NULL);
- if (ret < 0)
- goto buffer_setup_err;
-
- ret = iio_device_register(indio_dev);
- if (ret < 0)
- goto buffer_cleanup;
-
- return 0;
-
-buffer_cleanup:
- iio_triggered_buffer_cleanup(indio_dev);
-buffer_setup_err:
- hmc5843_set_mode(iio_priv(indio_dev), HMC5843_MODE_SLEEP);
- return ret;
-}
-EXPORT_SYMBOL(hmc5843_common_probe);
-
-int hmc5843_common_remove(struct device *dev)
-{
- struct iio_dev *indio_dev = dev_get_drvdata(dev);
-
- iio_device_unregister(indio_dev);
- iio_triggered_buffer_cleanup(indio_dev);
-
- /* sleep mode to save power */
- hmc5843_set_mode(iio_priv(indio_dev), HMC5843_MODE_SLEEP);
-
- return 0;
-}
-EXPORT_SYMBOL(hmc5843_common_remove);
-
-MODULE_AUTHOR("Shubhrajyoti Datta <shubhrajyoti@ti.com>");
-MODULE_DESCRIPTION("HMC5843/5883/5883L/5983 core driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/iio/magnetometer/hmc5843_i2c.c b/drivers/staging/iio/magnetometer/hmc5843_i2c.c
deleted file mode 100644
index ff08667fa2f6..000000000000
--- a/drivers/staging/iio/magnetometer/hmc5843_i2c.c
+++ /dev/null
@@ -1,104 +0,0 @@
-/*
- * i2c driver for hmc5843/5843/5883/5883l/5983
- *
- * Split from hmc5843.c
- * Copyright (C) Josef Gajdusek <atx@atx.name>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * */
-
-#include <linux/module.h>
-#include <linux/i2c.h>
-#include <linux/regmap.h>
-#include <linux/iio/iio.h>
-#include <linux/iio/triggered_buffer.h>
-
-#include "hmc5843.h"
-
-static const struct regmap_range hmc5843_readable_ranges[] = {
- regmap_reg_range(0, HMC5843_ID_END),
-};
-
-static const struct regmap_access_table hmc5843_readable_table = {
- .yes_ranges = hmc5843_readable_ranges,
- .n_yes_ranges = ARRAY_SIZE(hmc5843_readable_ranges),
-};
-
-static const struct regmap_range hmc5843_writable_ranges[] = {
- regmap_reg_range(0, HMC5843_MODE_REG),
-};
-
-static const struct regmap_access_table hmc5843_writable_table = {
- .yes_ranges = hmc5843_writable_ranges,
- .n_yes_ranges = ARRAY_SIZE(hmc5843_writable_ranges),
-};
-
-static const struct regmap_range hmc5843_volatile_ranges[] = {
- regmap_reg_range(HMC5843_DATA_OUT_MSB_REGS, HMC5843_STATUS_REG),
-};
-
-static const struct regmap_access_table hmc5843_volatile_table = {
- .yes_ranges = hmc5843_volatile_ranges,
- .n_yes_ranges = ARRAY_SIZE(hmc5843_volatile_ranges),
-};
-
-static const struct regmap_config hmc5843_i2c_regmap_config = {
- .reg_bits = 8,
- .val_bits = 8,
-
- .rd_table = &hmc5843_readable_table,
- .wr_table = &hmc5843_writable_table,
- .volatile_table = &hmc5843_volatile_table,
-
- .cache_type = REGCACHE_RBTREE,
-};
-
-static int hmc5843_i2c_probe(struct i2c_client *cli,
- const struct i2c_device_id *id)
-{
- return hmc5843_common_probe(&cli->dev,
- devm_regmap_init_i2c(cli, &hmc5843_i2c_regmap_config),
- id->driver_data);
-}
-
-static int hmc5843_i2c_remove(struct i2c_client *client)
-{
- return hmc5843_common_remove(&client->dev);
-}
-
-static const struct i2c_device_id hmc5843_id[] = {
- { "hmc5843", HMC5843_ID },
- { "hmc5883", HMC5883_ID },
- { "hmc5883l", HMC5883L_ID },
- { "hmc5983", HMC5983_ID },
- { }
-};
-MODULE_DEVICE_TABLE(i2c, hmc5843_id);
-
-static const struct of_device_id hmc5843_of_match[] = {
- { .compatible = "honeywell,hmc5843", .data = (void *)HMC5843_ID },
- { .compatible = "honeywell,hmc5883", .data = (void *)HMC5883_ID },
- { .compatible = "honeywell,hmc5883l", .data = (void *)HMC5883L_ID },
- { .compatible = "honeywell,hmc5983", .data = (void *)HMC5983_ID },
- {}
-};
-MODULE_DEVICE_TABLE(of, hmc5843_of_match);
-
-static struct i2c_driver hmc5843_driver = {
- .driver = {
- .name = "hmc5843",
- .pm = HMC5843_PM_OPS,
- .of_match_table = hmc5843_of_match,
- },
- .id_table = hmc5843_id,
- .probe = hmc5843_i2c_probe,
- .remove = hmc5843_i2c_remove,
-};
-module_i2c_driver(hmc5843_driver);
-
-MODULE_AUTHOR("Josef Gajdusek <atx@atx.name>");
-MODULE_DESCRIPTION("HMC5843/5883/5883L/5983 i2c driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/iio/magnetometer/hmc5843_spi.c b/drivers/staging/iio/magnetometer/hmc5843_spi.c
deleted file mode 100644
index 8e658f736e1f..000000000000
--- a/drivers/staging/iio/magnetometer/hmc5843_spi.c
+++ /dev/null
@@ -1,100 +0,0 @@
-/*
- * SPI driver for hmc5983
- *
- * Copyright (C) Josef Gajdusek <atx@atx.name>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * */
-
-#include <linux/module.h>
-#include <linux/spi/spi.h>
-#include <linux/iio/iio.h>
-
-#include "hmc5843.h"
-
-static const struct regmap_range hmc5843_readable_ranges[] = {
- regmap_reg_range(0, HMC5843_ID_END),
-};
-
-static const struct regmap_access_table hmc5843_readable_table = {
- .yes_ranges = hmc5843_readable_ranges,
- .n_yes_ranges = ARRAY_SIZE(hmc5843_readable_ranges),
-};
-
-static const struct regmap_range hmc5843_writable_ranges[] = {
- regmap_reg_range(0, HMC5843_MODE_REG),
-};
-
-static const struct regmap_access_table hmc5843_writable_table = {
- .yes_ranges = hmc5843_writable_ranges,
- .n_yes_ranges = ARRAY_SIZE(hmc5843_writable_ranges),
-};
-
-static const struct regmap_range hmc5843_volatile_ranges[] = {
- regmap_reg_range(HMC5843_DATA_OUT_MSB_REGS, HMC5843_STATUS_REG),
-};
-
-static const struct regmap_access_table hmc5843_volatile_table = {
- .yes_ranges = hmc5843_volatile_ranges,
- .n_yes_ranges = ARRAY_SIZE(hmc5843_volatile_ranges),
-};
-
-static const struct regmap_config hmc5843_spi_regmap_config = {
- .reg_bits = 8,
- .val_bits = 8,
-
- .rd_table = &hmc5843_readable_table,
- .wr_table = &hmc5843_writable_table,
- .volatile_table = &hmc5843_volatile_table,
-
- /* Autoincrement address pointer */
- .read_flag_mask = 0xc0,
-
- .cache_type = REGCACHE_RBTREE,
-};
-
-static int hmc5843_spi_probe(struct spi_device *spi)
-{
- int ret;
-
- spi->mode = SPI_MODE_3;
- spi->max_speed_hz = 8000000;
- spi->bits_per_word = 8;
- ret = spi_setup(spi);
- if (ret)
- return ret;
-
- return hmc5843_common_probe(&spi->dev,
- devm_regmap_init_spi(spi, &hmc5843_spi_regmap_config),
- HMC5983_ID);
-}
-
-static int hmc5843_spi_remove(struct spi_device *spi)
-{
- return hmc5843_common_remove(&spi->dev);
-}
-
-static const struct spi_device_id hmc5843_id[] = {
- { "hmc5983", HMC5983_ID },
- { }
-};
-
-static struct spi_driver hmc5843_driver = {
- .driver = {
- .name = "hmc5843",
- .pm = HMC5843_PM_OPS,
- .owner = THIS_MODULE,
- },
- .id_table = hmc5843_id,
- .probe = hmc5843_spi_probe,
- .remove = hmc5843_spi_remove,
-};
-
-module_spi_driver(hmc5843_driver);
-
-MODULE_AUTHOR("Josef Gajdusek <atx@atx.name>");
-MODULE_DESCRIPTION("HMC5983 SPI driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/iio/meter/Kconfig b/drivers/staging/iio/meter/Kconfig
deleted file mode 100644
index 64cd3704ec6e..000000000000
--- a/drivers/staging/iio/meter/Kconfig
+++ /dev/null
@@ -1,78 +0,0 @@
-#
-# IIO meter drivers configuration
-#
-menu "Active energy metering IC"
-
-config ADE7753
- tristate "Analog Devices ADE7753/6 Single-Phase Multifunction Metering IC Driver"
- depends on SPI
- help
- Say yes here to build support for Analog Devices ADE7753 Single-Phase Multifunction
- Metering IC with di/dt Sensor Interface.
-
- To compile this driver as a module, choose M here: the
- module will be called ade7753.
-
-config ADE7754
- tristate "Analog Devices ADE7754 Polyphase Multifunction Energy Metering IC Driver"
- depends on SPI
- help
- Say yes here to build support for Analog Devices ADE7754 Polyphase
- Multifunction Energy Metering IC Driver.
-
- To compile this driver as a module, choose M here: the
- module will be called ade7754.
-
-config ADE7758
- tristate "Analog Devices ADE7758 Poly Phase Multifunction Energy Metering IC Driver"
- depends on SPI
- select IIO_TRIGGER if IIO_BUFFER
- select IIO_KFIFO_BUF if IIO_BUFFER
- help
- Say yes here to build support for Analog Devices ADE7758 Polyphase
- Multifunction Energy Metering IC with Per Phase Information Driver.
-
- To compile this driver as a module, choose M here: the
- module will be called ade7758.
-
-config ADE7759
- tristate "Analog Devices ADE7759 Active Energy Metering IC Driver"
- depends on SPI
- help
- Say yes here to build support for Analog Devices ADE7758 Active Energy
- Metering IC with di/dt Sensor Interface.
-
- To compile this driver as a module, choose M here: the
- module will be called ade7759.
-
-config ADE7854
- tristate "Analog Devices ADE7854/58/68/78 Polyphase Multifunction Energy Metering IC Driver"
- depends on SPI || I2C
- help
- Say yes here to build support for Analog Devices ADE7854/58/68/78 Polyphase
- Multifunction Energy Metering IC Driver.
-
- To compile this driver as a module, choose M here: the
- module will be called ade7854.
-
-config ADE7854_I2C
- tristate "support I2C bus connection"
- depends on ADE7854 && I2C
- default y
- help
- Say Y here if you have ADE7854/58/68/78 hooked to an I2C bus.
-
- To compile this driver as a module, choose M here: the
- module will be called ade7854-i2c.
-
-config ADE7854_SPI
- tristate "support SPI bus connection"
- depends on ADE7854 && SPI
- default y
- help
- Say Y here if you have ADE7854/58/68/78 hooked to a SPI bus.
-
- To compile this driver as a module, choose M here: the
- module will be called ade7854-spi.
-
-endmenu
diff --git a/drivers/staging/iio/meter/Makefile b/drivers/staging/iio/meter/Makefile
deleted file mode 100644
index de3863d6b078..000000000000
--- a/drivers/staging/iio/meter/Makefile
+++ /dev/null
@@ -1,15 +0,0 @@
-#
-# Makefile for metering ic drivers
-#
-
-obj-$(CONFIG_ADE7753) += ade7753.o
-obj-$(CONFIG_ADE7754) += ade7754.o
-
-ade7758-y := ade7758_core.o
-ade7758-$(CONFIG_IIO_BUFFER) += ade7758_ring.o ade7758_trigger.o
-obj-$(CONFIG_ADE7758) += ade7758.o
-
-obj-$(CONFIG_ADE7759) += ade7759.o
-obj-$(CONFIG_ADE7854) += ade7854.o
-obj-$(CONFIG_ADE7854_I2C) += ade7854-i2c.o
-obj-$(CONFIG_ADE7854_SPI) += ade7854-spi.o
diff --git a/drivers/staging/iio/meter/ade7753.c b/drivers/staging/iio/meter/ade7753.c
deleted file mode 100644
index 3d2e50cfe9a3..000000000000
--- a/drivers/staging/iio/meter/ade7753.c
+++ /dev/null
@@ -1,547 +0,0 @@
-/*
- * ADE7753 Single-Phase Multifunction Metering IC with di/dt Sensor Interface
- *
- * Copyright 2010 Analog Devices Inc.
- *
- * Licensed under the GPL-2 or later.
- */
-
-#include <linux/interrupt.h>
-#include <linux/irq.h>
-#include <linux/delay.h>
-#include <linux/mutex.h>
-#include <linux/device.h>
-#include <linux/kernel.h>
-#include <linux/spi/spi.h>
-#include <linux/slab.h>
-#include <linux/sysfs.h>
-#include <linux/list.h>
-#include <linux/module.h>
-
-#include <linux/iio/iio.h>
-#include <linux/iio/sysfs.h>
-#include "meter.h"
-#include "ade7753.h"
-
-static int ade7753_spi_write_reg_8(struct device *dev,
- u8 reg_address,
- u8 val)
-{
- int ret;
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct ade7753_state *st = iio_priv(indio_dev);
-
- mutex_lock(&st->buf_lock);
- st->tx[0] = ADE7753_WRITE_REG(reg_address);
- st->tx[1] = val;
-
- ret = spi_write(st->us, st->tx, 2);
- mutex_unlock(&st->buf_lock);
-
- return ret;
-}
-
-static int ade7753_spi_write_reg_16(struct device *dev,
- u8 reg_address,
- u16 value)
-{
- int ret;
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct ade7753_state *st = iio_priv(indio_dev);
-
- mutex_lock(&st->buf_lock);
- st->tx[0] = ADE7753_WRITE_REG(reg_address);
- st->tx[1] = (value >> 8) & 0xFF;
- st->tx[2] = value & 0xFF;
- ret = spi_write(st->us, st->tx, 3);
- mutex_unlock(&st->buf_lock);
-
- return ret;
-}
-
-static int ade7753_spi_read_reg_8(struct device *dev,
- u8 reg_address,
- u8 *val)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct ade7753_state *st = iio_priv(indio_dev);
- ssize_t ret;
-
- ret = spi_w8r8(st->us, ADE7753_READ_REG(reg_address));
- if (ret < 0) {
- dev_err(&st->us->dev, "problem when reading 8 bit register 0x%02X",
- reg_address);
- return ret;
- }
- *val = ret;
-
- return 0;
-}
-
-static int ade7753_spi_read_reg_16(struct device *dev,
- u8 reg_address,
- u16 *val)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct ade7753_state *st = iio_priv(indio_dev);
- ssize_t ret;
-
- ret = spi_w8r16be(st->us, ADE7753_READ_REG(reg_address));
- if (ret < 0) {
- dev_err(&st->us->dev, "problem when reading 16 bit register 0x%02X",
- reg_address);
- return ret;
- }
-
- *val = ret;
-
- return 0;
-}
-
-static int ade7753_spi_read_reg_24(struct device *dev,
- u8 reg_address,
- u32 *val)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct ade7753_state *st = iio_priv(indio_dev);
- int ret;
- struct spi_transfer xfers[] = {
- {
- .tx_buf = st->tx,
- .bits_per_word = 8,
- .len = 1,
- }, {
- .rx_buf = st->tx,
- .bits_per_word = 8,
- .len = 3,
- }
- };
-
- mutex_lock(&st->buf_lock);
- st->tx[0] = ADE7753_READ_REG(reg_address);
-
- ret = spi_sync_transfer(st->us, xfers, ARRAY_SIZE(xfers));
- if (ret) {
- dev_err(&st->us->dev, "problem when reading 24 bit register 0x%02X",
- reg_address);
- goto error_ret;
- }
- *val = (st->rx[0] << 16) | (st->rx[1] << 8) | st->rx[2];
-
-error_ret:
- mutex_unlock(&st->buf_lock);
- return ret;
-}
-
-static ssize_t ade7753_read_8bit(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- int ret;
- u8 val;
- struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
-
- ret = ade7753_spi_read_reg_8(dev, this_attr->address, &val);
- if (ret)
- return ret;
-
- return sprintf(buf, "%u\n", val);
-}
-
-static ssize_t ade7753_read_16bit(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- int ret;
- u16 val;
- struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
-
- ret = ade7753_spi_read_reg_16(dev, this_attr->address, &val);
- if (ret)
- return ret;
-
- return sprintf(buf, "%u\n", val);
-}
-
-static ssize_t ade7753_read_24bit(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- int ret;
- u32 val;
- struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
-
- ret = ade7753_spi_read_reg_24(dev, this_attr->address, &val);
- if (ret)
- return ret;
-
- return sprintf(buf, "%u\n", val);
-}
-
-static ssize_t ade7753_write_8bit(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t len)
-{
- struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
- int ret;
- u8 val;
-
- ret = kstrtou8(buf, 10, &val);
- if (ret)
- goto error_ret;
- ret = ade7753_spi_write_reg_8(dev, this_attr->address, val);
-
-error_ret:
- return ret ? ret : len;
-}
-
-static ssize_t ade7753_write_16bit(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t len)
-{
- struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
- int ret;
- u16 val;
-
- ret = kstrtou16(buf, 10, &val);
- if (ret)
- goto error_ret;
- ret = ade7753_spi_write_reg_16(dev, this_attr->address, val);
-
-error_ret:
- return ret ? ret : len;
-}
-
-static int ade7753_reset(struct device *dev)
-{
- u16 val;
-
- ade7753_spi_read_reg_16(dev, ADE7753_MODE, &val);
- val |= BIT(6); /* Software Chip Reset */
-
- return ade7753_spi_write_reg_16(dev, ADE7753_MODE, val);
-}
-
-static IIO_DEV_ATTR_AENERGY(ade7753_read_24bit, ADE7753_AENERGY);
-static IIO_DEV_ATTR_LAENERGY(ade7753_read_24bit, ADE7753_LAENERGY);
-static IIO_DEV_ATTR_VAENERGY(ade7753_read_24bit, ADE7753_VAENERGY);
-static IIO_DEV_ATTR_LVAENERGY(ade7753_read_24bit, ADE7753_LVAENERGY);
-static IIO_DEV_ATTR_CFDEN(S_IWUSR | S_IRUGO,
- ade7753_read_16bit,
- ade7753_write_16bit,
- ADE7753_CFDEN);
-static IIO_DEV_ATTR_CFNUM(S_IWUSR | S_IRUGO,
- ade7753_read_8bit,
- ade7753_write_8bit,
- ADE7753_CFNUM);
-static IIO_DEV_ATTR_CHKSUM(ade7753_read_8bit, ADE7753_CHKSUM);
-static IIO_DEV_ATTR_PHCAL(S_IWUSR | S_IRUGO,
- ade7753_read_16bit,
- ade7753_write_16bit,
- ADE7753_PHCAL);
-static IIO_DEV_ATTR_APOS(S_IWUSR | S_IRUGO,
- ade7753_read_16bit,
- ade7753_write_16bit,
- ADE7753_APOS);
-static IIO_DEV_ATTR_SAGCYC(S_IWUSR | S_IRUGO,
- ade7753_read_8bit,
- ade7753_write_8bit,
- ADE7753_SAGCYC);
-static IIO_DEV_ATTR_SAGLVL(S_IWUSR | S_IRUGO,
- ade7753_read_8bit,
- ade7753_write_8bit,
- ADE7753_SAGLVL);
-static IIO_DEV_ATTR_LINECYC(S_IWUSR | S_IRUGO,
- ade7753_read_8bit,
- ade7753_write_8bit,
- ADE7753_LINECYC);
-static IIO_DEV_ATTR_WDIV(S_IWUSR | S_IRUGO,
- ade7753_read_8bit,
- ade7753_write_8bit,
- ADE7753_WDIV);
-static IIO_DEV_ATTR_IRMS(S_IWUSR | S_IRUGO,
- ade7753_read_24bit,
- NULL,
- ADE7753_IRMS);
-static IIO_DEV_ATTR_VRMS(S_IRUGO,
- ade7753_read_24bit,
- NULL,
- ADE7753_VRMS);
-static IIO_DEV_ATTR_IRMSOS(S_IWUSR | S_IRUGO,
- ade7753_read_16bit,
- ade7753_write_16bit,
- ADE7753_IRMSOS);
-static IIO_DEV_ATTR_VRMSOS(S_IWUSR | S_IRUGO,
- ade7753_read_16bit,
- ade7753_write_16bit,
- ADE7753_VRMSOS);
-static IIO_DEV_ATTR_WGAIN(S_IWUSR | S_IRUGO,
- ade7753_read_16bit,
- ade7753_write_16bit,
- ADE7753_WGAIN);
-static IIO_DEV_ATTR_VAGAIN(S_IWUSR | S_IRUGO,
- ade7753_read_16bit,
- ade7753_write_16bit,
- ADE7753_VAGAIN);
-static IIO_DEV_ATTR_PGA_GAIN(S_IWUSR | S_IRUGO,
- ade7753_read_16bit,
- ade7753_write_16bit,
- ADE7753_GAIN);
-static IIO_DEV_ATTR_IPKLVL(S_IWUSR | S_IRUGO,
- ade7753_read_8bit,
- ade7753_write_8bit,
- ADE7753_IPKLVL);
-static IIO_DEV_ATTR_VPKLVL(S_IWUSR | S_IRUGO,
- ade7753_read_8bit,
- ade7753_write_8bit,
- ADE7753_VPKLVL);
-static IIO_DEV_ATTR_IPEAK(S_IRUGO,
- ade7753_read_24bit,
- NULL,
- ADE7753_IPEAK);
-static IIO_DEV_ATTR_VPEAK(S_IRUGO,
- ade7753_read_24bit,
- NULL,
- ADE7753_VPEAK);
-static IIO_DEV_ATTR_VPERIOD(S_IRUGO,
- ade7753_read_16bit,
- NULL,
- ADE7753_PERIOD);
-static IIO_DEV_ATTR_CH_OFF(1, S_IWUSR | S_IRUGO,
- ade7753_read_8bit,
- ade7753_write_8bit,
- ADE7753_CH1OS);
-static IIO_DEV_ATTR_CH_OFF(2, S_IWUSR | S_IRUGO,
- ade7753_read_8bit,
- ade7753_write_8bit,
- ADE7753_CH2OS);
-
-static int ade7753_set_irq(struct device *dev, bool enable)
-{
- int ret;
- u8 irqen;
-
- ret = ade7753_spi_read_reg_8(dev, ADE7753_IRQEN, &irqen);
- if (ret)
- goto error_ret;
-
- if (enable)
- irqen |= BIT(3); /* Enables an interrupt when a data is
- present in the waveform register */
- else
- irqen &= ~BIT(3);
-
- ret = ade7753_spi_write_reg_8(dev, ADE7753_IRQEN, irqen);
-
-error_ret:
- return ret;
-}
-
-/* Power down the device */
-static int ade7753_stop_device(struct device *dev)
-{
- u16 val;
-
- ade7753_spi_read_reg_16(dev, ADE7753_MODE, &val);
- val |= BIT(4); /* AD converters can be turned off */
-
- return ade7753_spi_write_reg_16(dev, ADE7753_MODE, val);
-}
-
-static int ade7753_initial_setup(struct iio_dev *indio_dev)
-{
- int ret;
- struct device *dev = &indio_dev->dev;
- struct ade7753_state *st = iio_priv(indio_dev);
-
- /* use low spi speed for init */
- st->us->mode = SPI_MODE_3;
- spi_setup(st->us);
-
- /* Disable IRQ */
- ret = ade7753_set_irq(dev, false);
- if (ret) {
- dev_err(dev, "disable irq failed");
- goto err_ret;
- }
-
- ade7753_reset(dev);
- msleep(ADE7753_STARTUP_DELAY);
-
-err_ret:
- return ret;
-}
-
-static ssize_t ade7753_read_frequency(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- int ret;
- u16 t;
- int sps;
-
- ret = ade7753_spi_read_reg_16(dev, ADE7753_MODE, &t);
- if (ret)
- return ret;
-
- t = (t >> 11) & 0x3;
- sps = 27900 / (1 + t);
-
- return sprintf(buf, "%d\n", sps);
-}
-
-static ssize_t ade7753_write_frequency(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t len)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct ade7753_state *st = iio_priv(indio_dev);
- u16 val;
- int ret;
- u16 reg, t;
-
- ret = kstrtou16(buf, 10, &val);
- if (ret)
- return ret;
- if (val == 0)
- return -EINVAL;
-
- mutex_lock(&indio_dev->mlock);
-
- t = 27900 / val;
- if (t > 0)
- t--;
-
- if (t > 1)
- st->us->max_speed_hz = ADE7753_SPI_SLOW;
- else
- st->us->max_speed_hz = ADE7753_SPI_FAST;
-
- ret = ade7753_spi_read_reg_16(dev, ADE7753_MODE, &reg);
- if (ret)
- goto out;
-
- reg &= ~(3 << 11);
- reg |= t << 11;
-
- ret = ade7753_spi_write_reg_16(dev, ADE7753_MODE, reg);
-
-out:
- mutex_unlock(&indio_dev->mlock);
-
- return ret ? ret : len;
-}
-
-static IIO_DEV_ATTR_TEMP_RAW(ade7753_read_8bit);
-static IIO_CONST_ATTR(in_temp_offset, "-25 C");
-static IIO_CONST_ATTR(in_temp_scale, "0.67 C");
-
-static IIO_DEV_ATTR_SAMP_FREQ(S_IWUSR | S_IRUGO,
- ade7753_read_frequency,
- ade7753_write_frequency);
-
-static IIO_CONST_ATTR_SAMP_FREQ_AVAIL("27900 14000 7000 3500");
-
-static struct attribute *ade7753_attributes[] = {
- &iio_dev_attr_in_temp_raw.dev_attr.attr,
- &iio_const_attr_in_temp_offset.dev_attr.attr,
- &iio_const_attr_in_temp_scale.dev_attr.attr,
- &iio_dev_attr_sampling_frequency.dev_attr.attr,
- &iio_const_attr_sampling_frequency_available.dev_attr.attr,
- &iio_dev_attr_phcal.dev_attr.attr,
- &iio_dev_attr_cfden.dev_attr.attr,
- &iio_dev_attr_aenergy.dev_attr.attr,
- &iio_dev_attr_laenergy.dev_attr.attr,
- &iio_dev_attr_vaenergy.dev_attr.attr,
- &iio_dev_attr_lvaenergy.dev_attr.attr,
- &iio_dev_attr_cfnum.dev_attr.attr,
- &iio_dev_attr_apos.dev_attr.attr,
- &iio_dev_attr_sagcyc.dev_attr.attr,
- &iio_dev_attr_saglvl.dev_attr.attr,
- &iio_dev_attr_linecyc.dev_attr.attr,
- &iio_dev_attr_chksum.dev_attr.attr,
- &iio_dev_attr_pga_gain.dev_attr.attr,
- &iio_dev_attr_wgain.dev_attr.attr,
- &iio_dev_attr_choff_1.dev_attr.attr,
- &iio_dev_attr_choff_2.dev_attr.attr,
- &iio_dev_attr_wdiv.dev_attr.attr,
- &iio_dev_attr_irms.dev_attr.attr,
- &iio_dev_attr_vrms.dev_attr.attr,
- &iio_dev_attr_irmsos.dev_attr.attr,
- &iio_dev_attr_vrmsos.dev_attr.attr,
- &iio_dev_attr_vagain.dev_attr.attr,
- &iio_dev_attr_ipklvl.dev_attr.attr,
- &iio_dev_attr_vpklvl.dev_attr.attr,
- &iio_dev_attr_ipeak.dev_attr.attr,
- &iio_dev_attr_vpeak.dev_attr.attr,
- &iio_dev_attr_vperiod.dev_attr.attr,
- NULL,
-};
-
-static const struct attribute_group ade7753_attribute_group = {
- .attrs = ade7753_attributes,
-};
-
-static const struct iio_info ade7753_info = {
- .attrs = &ade7753_attribute_group,
- .driver_module = THIS_MODULE,
-};
-
-static int ade7753_probe(struct spi_device *spi)
-{
- int ret;
- struct ade7753_state *st;
- struct iio_dev *indio_dev;
-
- /* setup the industrialio driver allocated elements */
- indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
- if (!indio_dev)
- return -ENOMEM;
- /* this is only used for removal purposes */
- spi_set_drvdata(spi, indio_dev);
-
- st = iio_priv(indio_dev);
- st->us = spi;
- mutex_init(&st->buf_lock);
-
- indio_dev->name = spi->dev.driver->name;
- indio_dev->dev.parent = &spi->dev;
- indio_dev->info = &ade7753_info;
- indio_dev->modes = INDIO_DIRECT_MODE;
-
- /* Get the device into a sane initial state */
- ret = ade7753_initial_setup(indio_dev);
- if (ret)
- return ret;
-
- return iio_device_register(indio_dev);
-}
-
-/* fixme, confirm ordering in this function */
-static int ade7753_remove(struct spi_device *spi)
-{
- struct iio_dev *indio_dev = spi_get_drvdata(spi);
-
- iio_device_unregister(indio_dev);
- ade7753_stop_device(&indio_dev->dev);
-
- return 0;
-}
-
-static struct spi_driver ade7753_driver = {
- .driver = {
- .name = "ade7753",
- .owner = THIS_MODULE,
- },
- .probe = ade7753_probe,
- .remove = ade7753_remove,
-};
-module_spi_driver(ade7753_driver);
-
-MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>");
-MODULE_DESCRIPTION("Analog Devices ADE7753/6 Single-Phase Multifunction Meter");
-MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("spi:ade7753");
diff --git a/drivers/staging/iio/meter/ade7753.h b/drivers/staging/iio/meter/ade7753.h
deleted file mode 100644
index a9d93cc1c414..000000000000
--- a/drivers/staging/iio/meter/ade7753.h
+++ /dev/null
@@ -1,72 +0,0 @@
-#ifndef _ADE7753_H
-#define _ADE7753_H
-
-#define ADE7753_WAVEFORM 0x01
-#define ADE7753_AENERGY 0x02
-#define ADE7753_RAENERGY 0x03
-#define ADE7753_LAENERGY 0x04
-#define ADE7753_VAENERGY 0x05
-#define ADE7753_RVAENERGY 0x06
-#define ADE7753_LVAENERGY 0x07
-#define ADE7753_LVARENERGY 0x08
-#define ADE7753_MODE 0x09
-#define ADE7753_IRQEN 0x0A
-#define ADE7753_STATUS 0x0B
-#define ADE7753_RSTSTATUS 0x0C
-#define ADE7753_CH1OS 0x0D
-#define ADE7753_CH2OS 0x0E
-#define ADE7753_GAIN 0x0F
-#define ADE7753_PHCAL 0x10
-#define ADE7753_APOS 0x11
-#define ADE7753_WGAIN 0x12
-#define ADE7753_WDIV 0x13
-#define ADE7753_CFNUM 0x14
-#define ADE7753_CFDEN 0x15
-#define ADE7753_IRMS 0x16
-#define ADE7753_VRMS 0x17
-#define ADE7753_IRMSOS 0x18
-#define ADE7753_VRMSOS 0x19
-#define ADE7753_VAGAIN 0x1A
-#define ADE7753_VADIV 0x1B
-#define ADE7753_LINECYC 0x1C
-#define ADE7753_ZXTOUT 0x1D
-#define ADE7753_SAGCYC 0x1E
-#define ADE7753_SAGLVL 0x1F
-#define ADE7753_IPKLVL 0x20
-#define ADE7753_VPKLVL 0x21
-#define ADE7753_IPEAK 0x22
-#define ADE7753_RSTIPEAK 0x23
-#define ADE7753_VPEAK 0x24
-#define ADE7753_RSTVPEAK 0x25
-#define ADE7753_TEMP 0x26
-#define ADE7753_PERIOD 0x27
-#define ADE7753_TMODE 0x3D
-#define ADE7753_CHKSUM 0x3E
-#define ADE7753_DIEREV 0x3F
-
-#define ADE7753_READ_REG(a) a
-#define ADE7753_WRITE_REG(a) ((a) | 0x80)
-
-#define ADE7753_MAX_TX 4
-#define ADE7753_MAX_RX 4
-#define ADE7753_STARTUP_DELAY 1
-
-#define ADE7753_SPI_SLOW (u32)(300 * 1000)
-#define ADE7753_SPI_BURST (u32)(1000 * 1000)
-#define ADE7753_SPI_FAST (u32)(2000 * 1000)
-
-/**
- * struct ade7753_state - device instance specific data
- * @us: actual spi_device
- * @tx: transmit buffer
- * @rx: receive buffer
- * @buf_lock: mutex to protect tx and rx
- **/
-struct ade7753_state {
- struct spi_device *us;
- struct mutex buf_lock;
- u8 tx[ADE7753_MAX_TX] ____cacheline_aligned;
- u8 rx[ADE7753_MAX_RX];
-};
-
-#endif
diff --git a/drivers/staging/iio/meter/ade7754.c b/drivers/staging/iio/meter/ade7754.c
deleted file mode 100644
index 8552c76cbbe7..000000000000
--- a/drivers/staging/iio/meter/ade7754.c
+++ /dev/null
@@ -1,588 +0,0 @@
-/*
- * ADE7754 Polyphase Multifunction Energy Metering IC Driver
- *
- * Copyright 2010 Analog Devices Inc.
- *
- * Licensed under the GPL-2 or later.
- */
-
-#include <linux/interrupt.h>
-#include <linux/irq.h>
-#include <linux/delay.h>
-#include <linux/mutex.h>
-#include <linux/device.h>
-#include <linux/kernel.h>
-#include <linux/spi/spi.h>
-#include <linux/slab.h>
-#include <linux/sysfs.h>
-#include <linux/list.h>
-#include <linux/module.h>
-
-#include <linux/iio/iio.h>
-#include <linux/iio/sysfs.h>
-#include "meter.h"
-#include "ade7754.h"
-
-static int ade7754_spi_write_reg_8(struct device *dev,
- u8 reg_address,
- u8 val)
-{
- int ret;
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct ade7754_state *st = iio_priv(indio_dev);
-
- mutex_lock(&st->buf_lock);
- st->tx[0] = ADE7754_WRITE_REG(reg_address);
- st->tx[1] = val;
-
- ret = spi_write(st->us, st->tx, 2);
- mutex_unlock(&st->buf_lock);
-
- return ret;
-}
-
-static int ade7754_spi_write_reg_16(struct device *dev,
- u8 reg_address,
- u16 value)
-{
- int ret;
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct ade7754_state *st = iio_priv(indio_dev);
-
- mutex_lock(&st->buf_lock);
- st->tx[0] = ADE7754_WRITE_REG(reg_address);
- st->tx[1] = (value >> 8) & 0xFF;
- st->tx[2] = value & 0xFF;
- ret = spi_write(st->us, st->tx, 3);
- mutex_unlock(&st->buf_lock);
-
- return ret;
-}
-
-static int ade7754_spi_read_reg_8(struct device *dev,
- u8 reg_address,
- u8 *val)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct ade7754_state *st = iio_priv(indio_dev);
- int ret;
-
- ret = spi_w8r8(st->us, ADE7754_READ_REG(reg_address));
- if (ret < 0) {
- dev_err(&st->us->dev, "problem when reading 8 bit register 0x%02X",
- reg_address);
- return ret;
- }
- *val = ret;
-
- return 0;
-}
-
-static int ade7754_spi_read_reg_16(struct device *dev,
- u8 reg_address,
- u16 *val)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct ade7754_state *st = iio_priv(indio_dev);
- int ret;
-
- ret = spi_w8r16be(st->us, ADE7754_READ_REG(reg_address));
- if (ret < 0) {
- dev_err(&st->us->dev, "problem when reading 16 bit register 0x%02X",
- reg_address);
- return ret;
- }
-
- *val = ret;
-
- return 0;
-}
-
-static int ade7754_spi_read_reg_24(struct device *dev,
- u8 reg_address,
- u32 *val)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct ade7754_state *st = iio_priv(indio_dev);
- int ret;
- struct spi_transfer xfers[] = {
- {
- .tx_buf = st->tx,
- .rx_buf = st->rx,
- .bits_per_word = 8,
- .len = 4,
- },
- };
-
- mutex_lock(&st->buf_lock);
- st->tx[0] = ADE7754_READ_REG(reg_address);
- st->tx[1] = 0;
- st->tx[2] = 0;
- st->tx[3] = 0;
-
- ret = spi_sync_transfer(st->us, xfers, ARRAY_SIZE(xfers));
- if (ret) {
- dev_err(&st->us->dev, "problem when reading 24 bit register 0x%02X",
- reg_address);
- goto error_ret;
- }
- *val = (st->rx[1] << 16) | (st->rx[2] << 8) | st->rx[3];
-
-error_ret:
- mutex_unlock(&st->buf_lock);
- return ret;
-}
-
-static ssize_t ade7754_read_8bit(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- int ret;
- u8 val = 0;
- struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
-
- ret = ade7754_spi_read_reg_8(dev, this_attr->address, &val);
- if (ret)
- return ret;
-
- return sprintf(buf, "%u\n", val);
-}
-
-static ssize_t ade7754_read_16bit(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- int ret;
- u16 val = 0;
- struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
-
- ret = ade7754_spi_read_reg_16(dev, this_attr->address, &val);
- if (ret)
- return ret;
-
- return sprintf(buf, "%u\n", val);
-}
-
-static ssize_t ade7754_read_24bit(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- int ret;
- u32 val = 0;
- struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
-
- ret = ade7754_spi_read_reg_24(dev, this_attr->address, &val);
- if (ret)
- return ret;
-
- return sprintf(buf, "%u\n", val & 0xFFFFFF);
-}
-
-static ssize_t ade7754_write_8bit(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t len)
-{
- struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
- int ret;
- u8 val;
-
- ret = kstrtou8(buf, 10, &val);
- if (ret)
- goto error_ret;
- ret = ade7754_spi_write_reg_8(dev, this_attr->address, val);
-
-error_ret:
- return ret ? ret : len;
-}
-
-static ssize_t ade7754_write_16bit(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t len)
-{
- struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
- int ret;
- u16 val;
-
- ret = kstrtou16(buf, 10, &val);
- if (ret)
- goto error_ret;
- ret = ade7754_spi_write_reg_16(dev, this_attr->address, val);
-
-error_ret:
- return ret ? ret : len;
-}
-
-static int ade7754_reset(struct device *dev)
-{
- int ret;
- u8 val;
-
- ret = ade7754_spi_read_reg_8(dev, ADE7754_OPMODE, &val);
- if (ret < 0)
- return ret;
-
- val |= BIT(6); /* Software Chip Reset */
- return ade7754_spi_write_reg_8(dev, ADE7754_OPMODE, val);
-}
-
-static IIO_DEV_ATTR_AENERGY(ade7754_read_24bit, ADE7754_AENERGY);
-static IIO_DEV_ATTR_LAENERGY(ade7754_read_24bit, ADE7754_LAENERGY);
-static IIO_DEV_ATTR_VAENERGY(ade7754_read_24bit, ADE7754_VAENERGY);
-static IIO_DEV_ATTR_LVAENERGY(ade7754_read_24bit, ADE7754_LVAENERGY);
-static IIO_DEV_ATTR_VPEAK(S_IWUSR | S_IRUGO,
- ade7754_read_8bit,
- ade7754_write_8bit,
- ADE7754_VPEAK);
-static IIO_DEV_ATTR_IPEAK(S_IWUSR | S_IRUGO,
- ade7754_read_8bit,
- ade7754_write_8bit,
- ADE7754_VPEAK);
-static IIO_DEV_ATTR_APHCAL(S_IWUSR | S_IRUGO,
- ade7754_read_8bit,
- ade7754_write_8bit,
- ADE7754_APHCAL);
-static IIO_DEV_ATTR_BPHCAL(S_IWUSR | S_IRUGO,
- ade7754_read_8bit,
- ade7754_write_8bit,
- ADE7754_BPHCAL);
-static IIO_DEV_ATTR_CPHCAL(S_IWUSR | S_IRUGO,
- ade7754_read_8bit,
- ade7754_write_8bit,
- ADE7754_CPHCAL);
-static IIO_DEV_ATTR_AAPOS(S_IWUSR | S_IRUGO,
- ade7754_read_16bit,
- ade7754_write_16bit,
- ADE7754_AAPOS);
-static IIO_DEV_ATTR_BAPOS(S_IWUSR | S_IRUGO,
- ade7754_read_16bit,
- ade7754_write_16bit,
- ADE7754_BAPOS);
-static IIO_DEV_ATTR_CAPOS(S_IWUSR | S_IRUGO,
- ade7754_read_16bit,
- ade7754_write_16bit,
- ADE7754_CAPOS);
-static IIO_DEV_ATTR_WDIV(S_IWUSR | S_IRUGO,
- ade7754_read_8bit,
- ade7754_write_8bit,
- ADE7754_WDIV);
-static IIO_DEV_ATTR_VADIV(S_IWUSR | S_IRUGO,
- ade7754_read_8bit,
- ade7754_write_8bit,
- ADE7754_VADIV);
-static IIO_DEV_ATTR_CFNUM(S_IWUSR | S_IRUGO,
- ade7754_read_16bit,
- ade7754_write_16bit,
- ADE7754_CFNUM);
-static IIO_DEV_ATTR_CFDEN(S_IWUSR | S_IRUGO,
- ade7754_read_16bit,
- ade7754_write_16bit,
- ADE7754_CFDEN);
-static IIO_DEV_ATTR_ACTIVE_POWER_A_GAIN(S_IWUSR | S_IRUGO,
- ade7754_read_16bit,
- ade7754_write_16bit,
- ADE7754_AAPGAIN);
-static IIO_DEV_ATTR_ACTIVE_POWER_B_GAIN(S_IWUSR | S_IRUGO,
- ade7754_read_16bit,
- ade7754_write_16bit,
- ADE7754_BAPGAIN);
-static IIO_DEV_ATTR_ACTIVE_POWER_C_GAIN(S_IWUSR | S_IRUGO,
- ade7754_read_16bit,
- ade7754_write_16bit,
- ADE7754_CAPGAIN);
-static IIO_DEV_ATTR_AIRMS(S_IRUGO,
- ade7754_read_24bit,
- NULL,
- ADE7754_AIRMS);
-static IIO_DEV_ATTR_BIRMS(S_IRUGO,
- ade7754_read_24bit,
- NULL,
- ADE7754_BIRMS);
-static IIO_DEV_ATTR_CIRMS(S_IRUGO,
- ade7754_read_24bit,
- NULL,
- ADE7754_CIRMS);
-static IIO_DEV_ATTR_AVRMS(S_IRUGO,
- ade7754_read_24bit,
- NULL,
- ADE7754_AVRMS);
-static IIO_DEV_ATTR_BVRMS(S_IRUGO,
- ade7754_read_24bit,
- NULL,
- ADE7754_BVRMS);
-static IIO_DEV_ATTR_CVRMS(S_IRUGO,
- ade7754_read_24bit,
- NULL,
- ADE7754_CVRMS);
-static IIO_DEV_ATTR_AIRMSOS(S_IRUGO,
- ade7754_read_16bit,
- ade7754_write_16bit,
- ADE7754_AIRMSOS);
-static IIO_DEV_ATTR_BIRMSOS(S_IRUGO,
- ade7754_read_16bit,
- ade7754_write_16bit,
- ADE7754_BIRMSOS);
-static IIO_DEV_ATTR_CIRMSOS(S_IRUGO,
- ade7754_read_16bit,
- ade7754_write_16bit,
- ADE7754_CIRMSOS);
-static IIO_DEV_ATTR_AVRMSOS(S_IRUGO,
- ade7754_read_16bit,
- ade7754_write_16bit,
- ADE7754_AVRMSOS);
-static IIO_DEV_ATTR_BVRMSOS(S_IRUGO,
- ade7754_read_16bit,
- ade7754_write_16bit,
- ADE7754_BVRMSOS);
-static IIO_DEV_ATTR_CVRMSOS(S_IRUGO,
- ade7754_read_16bit,
- ade7754_write_16bit,
- ADE7754_CVRMSOS);
-
-static int ade7754_set_irq(struct device *dev, bool enable)
-{
- int ret;
- u16 irqen;
-
- ret = ade7754_spi_read_reg_16(dev, ADE7754_IRQEN, &irqen);
- if (ret)
- goto error_ret;
-
- if (enable)
- irqen |= BIT(14); /* Enables an interrupt when a data is
- present in the waveform register */
- else
- irqen &= ~BIT(14);
-
- ret = ade7754_spi_write_reg_16(dev, ADE7754_IRQEN, irqen);
- if (ret)
- goto error_ret;
-
-error_ret:
- return ret;
-}
-
-/* Power down the device */
-static int ade7754_stop_device(struct device *dev)
-{
- int ret;
- u8 val;
-
- ret = ade7754_spi_read_reg_8(dev, ADE7754_OPMODE, &val);
- if (ret < 0) {
- dev_err(dev, "unable to power down the device, error: %d",
- ret);
- return ret;
- }
-
- val |= 7 << 3; /* ADE7754 powered down */
- return ade7754_spi_write_reg_8(dev, ADE7754_OPMODE, val);
-}
-
-static int ade7754_initial_setup(struct iio_dev *indio_dev)
-{
- int ret;
- struct ade7754_state *st = iio_priv(indio_dev);
- struct device *dev = &indio_dev->dev;
-
- /* use low spi speed for init */
- st->us->mode = SPI_MODE_3;
- spi_setup(st->us);
-
- /* Disable IRQ */
- ret = ade7754_set_irq(dev, false);
- if (ret) {
- dev_err(dev, "disable irq failed");
- goto err_ret;
- }
-
- ade7754_reset(dev);
- msleep(ADE7754_STARTUP_DELAY);
-
-err_ret:
- return ret;
-}
-
-static ssize_t ade7754_read_frequency(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- int ret;
- u8 t;
- int sps;
-
- ret = ade7754_spi_read_reg_8(dev,
- ADE7754_WAVMODE,
- &t);
- if (ret)
- return ret;
-
- t = (t >> 3) & 0x3;
- sps = 26000 / (1 + t);
-
- return sprintf(buf, "%d\n", sps);
-}
-
-static ssize_t ade7754_write_frequency(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t len)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct ade7754_state *st = iio_priv(indio_dev);
- u16 val;
- int ret;
- u8 reg, t;
-
- ret = kstrtou16(buf, 10, &val);
- if (ret)
- return ret;
- if (val == 0)
- return -EINVAL;
-
- mutex_lock(&indio_dev->mlock);
-
- t = 26000 / val;
- if (t > 0)
- t--;
-
- if (t > 1)
- st->us->max_speed_hz = ADE7754_SPI_SLOW;
- else
- st->us->max_speed_hz = ADE7754_SPI_FAST;
-
- ret = ade7754_spi_read_reg_8(dev, ADE7754_WAVMODE, &reg);
- if (ret)
- goto out;
-
- reg &= ~(3 << 3);
- reg |= t << 3;
-
- ret = ade7754_spi_write_reg_8(dev, ADE7754_WAVMODE, reg);
-
-out:
- mutex_unlock(&indio_dev->mlock);
-
- return ret ? ret : len;
-}
-static IIO_DEV_ATTR_TEMP_RAW(ade7754_read_8bit);
-static IIO_CONST_ATTR(in_temp_offset, "129 C");
-static IIO_CONST_ATTR(in_temp_scale, "4 C");
-
-static IIO_DEV_ATTR_SAMP_FREQ(S_IWUSR | S_IRUGO,
- ade7754_read_frequency,
- ade7754_write_frequency);
-
-static IIO_CONST_ATTR_SAMP_FREQ_AVAIL("26000 13000 65000 33000");
-
-static struct attribute *ade7754_attributes[] = {
- &iio_dev_attr_in_temp_raw.dev_attr.attr,
- &iio_const_attr_in_temp_offset.dev_attr.attr,
- &iio_const_attr_in_temp_scale.dev_attr.attr,
- &iio_dev_attr_sampling_frequency.dev_attr.attr,
- &iio_const_attr_sampling_frequency_available.dev_attr.attr,
- &iio_dev_attr_aenergy.dev_attr.attr,
- &iio_dev_attr_laenergy.dev_attr.attr,
- &iio_dev_attr_vaenergy.dev_attr.attr,
- &iio_dev_attr_lvaenergy.dev_attr.attr,
- &iio_dev_attr_vpeak.dev_attr.attr,
- &iio_dev_attr_ipeak.dev_attr.attr,
- &iio_dev_attr_aphcal.dev_attr.attr,
- &iio_dev_attr_bphcal.dev_attr.attr,
- &iio_dev_attr_cphcal.dev_attr.attr,
- &iio_dev_attr_aapos.dev_attr.attr,
- &iio_dev_attr_bapos.dev_attr.attr,
- &iio_dev_attr_capos.dev_attr.attr,
- &iio_dev_attr_wdiv.dev_attr.attr,
- &iio_dev_attr_vadiv.dev_attr.attr,
- &iio_dev_attr_cfnum.dev_attr.attr,
- &iio_dev_attr_cfden.dev_attr.attr,
- &iio_dev_attr_active_power_a_gain.dev_attr.attr,
- &iio_dev_attr_active_power_b_gain.dev_attr.attr,
- &iio_dev_attr_active_power_c_gain.dev_attr.attr,
- &iio_dev_attr_airms.dev_attr.attr,
- &iio_dev_attr_birms.dev_attr.attr,
- &iio_dev_attr_cirms.dev_attr.attr,
- &iio_dev_attr_avrms.dev_attr.attr,
- &iio_dev_attr_bvrms.dev_attr.attr,
- &iio_dev_attr_cvrms.dev_attr.attr,
- &iio_dev_attr_airmsos.dev_attr.attr,
- &iio_dev_attr_birmsos.dev_attr.attr,
- &iio_dev_attr_cirmsos.dev_attr.attr,
- &iio_dev_attr_avrmsos.dev_attr.attr,
- &iio_dev_attr_bvrmsos.dev_attr.attr,
- &iio_dev_attr_cvrmsos.dev_attr.attr,
- NULL,
-};
-
-static const struct attribute_group ade7754_attribute_group = {
- .attrs = ade7754_attributes,
-};
-
-static const struct iio_info ade7754_info = {
- .attrs = &ade7754_attribute_group,
- .driver_module = THIS_MODULE,
-};
-
-static int ade7754_probe(struct spi_device *spi)
-{
- int ret;
- struct ade7754_state *st;
- struct iio_dev *indio_dev;
-
- /* setup the industrialio driver allocated elements */
- indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
- if (!indio_dev)
- return -ENOMEM;
- /* this is only used for removal purposes */
- spi_set_drvdata(spi, indio_dev);
-
- st = iio_priv(indio_dev);
- st->us = spi;
- mutex_init(&st->buf_lock);
-
- indio_dev->name = spi->dev.driver->name;
- indio_dev->dev.parent = &spi->dev;
- indio_dev->info = &ade7754_info;
- indio_dev->modes = INDIO_DIRECT_MODE;
-
- /* Get the device into a sane initial state */
- ret = ade7754_initial_setup(indio_dev);
- if (ret)
- goto powerdown_on_error;
- ret = iio_device_register(indio_dev);
- if (ret)
- goto powerdown_on_error;
- return ret;
-
-powerdown_on_error:
- ade7754_stop_device(&indio_dev->dev);
- return ret;
-}
-
-/* fixme, confirm ordering in this function */
-static int ade7754_remove(struct spi_device *spi)
-{
- struct iio_dev *indio_dev = spi_get_drvdata(spi);
-
- iio_device_unregister(indio_dev);
- ade7754_stop_device(&indio_dev->dev);
-
- return 0;
-}
-
-static struct spi_driver ade7754_driver = {
- .driver = {
- .name = "ade7754",
- .owner = THIS_MODULE,
- },
- .probe = ade7754_probe,
- .remove = ade7754_remove,
-};
-module_spi_driver(ade7754_driver);
-
-MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>");
-MODULE_DESCRIPTION("Analog Devices ADE7754 Polyphase Multifunction Energy Metering IC Driver");
-MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("spi:ad7754");
diff --git a/drivers/staging/iio/meter/ade7754.h b/drivers/staging/iio/meter/ade7754.h
deleted file mode 100644
index e42ffc387a14..000000000000
--- a/drivers/staging/iio/meter/ade7754.h
+++ /dev/null
@@ -1,90 +0,0 @@
-#ifndef _ADE7754_H
-#define _ADE7754_H
-
-#define ADE7754_AENERGY 0x01
-#define ADE7754_RAENERGY 0x02
-#define ADE7754_LAENERGY 0x03
-#define ADE7754_VAENERGY 0x04
-#define ADE7754_RVAENERGY 0x05
-#define ADE7754_LVAENERGY 0x06
-#define ADE7754_PERIOD 0x07
-#define ADE7754_TEMP 0x08
-#define ADE7754_WFORM 0x09
-#define ADE7754_OPMODE 0x0A
-#define ADE7754_MMODE 0x0B
-#define ADE7754_WAVMODE 0x0C
-#define ADE7754_WATMODE 0x0D
-#define ADE7754_VAMODE 0x0E
-#define ADE7754_IRQEN 0x0F
-#define ADE7754_STATUS 0x10
-#define ADE7754_RSTATUS 0x11
-#define ADE7754_ZXTOUT 0x12
-#define ADE7754_LINCYC 0x13
-#define ADE7754_SAGCYC 0x14
-#define ADE7754_SAGLVL 0x15
-#define ADE7754_VPEAK 0x16
-#define ADE7754_IPEAK 0x17
-#define ADE7754_GAIN 0x18
-#define ADE7754_AWG 0x19
-#define ADE7754_BWG 0x1A
-#define ADE7754_CWG 0x1B
-#define ADE7754_AVAG 0x1C
-#define ADE7754_BVAG 0x1D
-#define ADE7754_CVAG 0x1E
-#define ADE7754_APHCAL 0x1F
-#define ADE7754_BPHCAL 0x20
-#define ADE7754_CPHCAL 0x21
-#define ADE7754_AAPOS 0x22
-#define ADE7754_BAPOS 0x23
-#define ADE7754_CAPOS 0x24
-#define ADE7754_CFNUM 0x25
-#define ADE7754_CFDEN 0x26
-#define ADE7754_WDIV 0x27
-#define ADE7754_VADIV 0x28
-#define ADE7754_AIRMS 0x29
-#define ADE7754_BIRMS 0x2A
-#define ADE7754_CIRMS 0x2B
-#define ADE7754_AVRMS 0x2C
-#define ADE7754_BVRMS 0x2D
-#define ADE7754_CVRMS 0x2E
-#define ADE7754_AIRMSOS 0x2F
-#define ADE7754_BIRMSOS 0x30
-#define ADE7754_CIRMSOS 0x31
-#define ADE7754_AVRMSOS 0x32
-#define ADE7754_BVRMSOS 0x33
-#define ADE7754_CVRMSOS 0x34
-#define ADE7754_AAPGAIN 0x35
-#define ADE7754_BAPGAIN 0x36
-#define ADE7754_CAPGAIN 0x37
-#define ADE7754_AVGAIN 0x38
-#define ADE7754_BVGAIN 0x39
-#define ADE7754_CVGAIN 0x3A
-#define ADE7754_CHKSUM 0x3E
-#define ADE7754_VERSION 0x3F
-
-#define ADE7754_READ_REG(a) a
-#define ADE7754_WRITE_REG(a) ((a) | 0x80)
-
-#define ADE7754_MAX_TX 4
-#define ADE7754_MAX_RX 4
-#define ADE7754_STARTUP_DELAY 1
-
-#define ADE7754_SPI_SLOW (u32)(300 * 1000)
-#define ADE7754_SPI_BURST (u32)(1000 * 1000)
-#define ADE7754_SPI_FAST (u32)(2000 * 1000)
-
-/**
- * struct ade7754_state - device instance specific data
- * @us: actual spi_device
- * @buf_lock: mutex to protect tx and rx
- * @tx: transmit buffer
- * @rx: receive buffer
- **/
-struct ade7754_state {
- struct spi_device *us;
- struct mutex buf_lock;
- u8 tx[ADE7754_MAX_TX] ____cacheline_aligned;
- u8 rx[ADE7754_MAX_RX];
-};
-
-#endif
diff --git a/drivers/staging/iio/meter/ade7758.h b/drivers/staging/iio/meter/ade7758.h
deleted file mode 100644
index f6739e2c24b1..000000000000
--- a/drivers/staging/iio/meter/ade7758.h
+++ /dev/null
@@ -1,181 +0,0 @@
-/*
- * ADE7758 Poly Phase Multifunction Energy Metering IC driver
- *
- * Copyright 2010-2011 Analog Devices Inc.
- *
- * Licensed under the GPL-2.
- */
-
-#ifndef _ADE7758_H
-#define _ADE7758_H
-
-#define ADE7758_AWATTHR 0x01
-#define ADE7758_BWATTHR 0x02
-#define ADE7758_CWATTHR 0x03
-#define ADE7758_AVARHR 0x04
-#define ADE7758_BVARHR 0x05
-#define ADE7758_CVARHR 0x06
-#define ADE7758_AVAHR 0x07
-#define ADE7758_BVAHR 0x08
-#define ADE7758_CVAHR 0x09
-#define ADE7758_AIRMS 0x0A
-#define ADE7758_BIRMS 0x0B
-#define ADE7758_CIRMS 0x0C
-#define ADE7758_AVRMS 0x0D
-#define ADE7758_BVRMS 0x0E
-#define ADE7758_CVRMS 0x0F
-#define ADE7758_FREQ 0x10
-#define ADE7758_TEMP 0x11
-#define ADE7758_WFORM 0x12
-#define ADE7758_OPMODE 0x13
-#define ADE7758_MMODE 0x14
-#define ADE7758_WAVMODE 0x15
-#define ADE7758_COMPMODE 0x16
-#define ADE7758_LCYCMODE 0x17
-#define ADE7758_MASK 0x18
-#define ADE7758_STATUS 0x19
-#define ADE7758_RSTATUS 0x1A
-#define ADE7758_ZXTOUT 0x1B
-#define ADE7758_LINECYC 0x1C
-#define ADE7758_SAGCYC 0x1D
-#define ADE7758_SAGLVL 0x1E
-#define ADE7758_VPINTLVL 0x1F
-#define ADE7758_IPINTLVL 0x20
-#define ADE7758_VPEAK 0x21
-#define ADE7758_IPEAK 0x22
-#define ADE7758_GAIN 0x23
-#define ADE7758_AVRMSGAIN 0x24
-#define ADE7758_BVRMSGAIN 0x25
-#define ADE7758_CVRMSGAIN 0x26
-#define ADE7758_AIGAIN 0x27
-#define ADE7758_BIGAIN 0x28
-#define ADE7758_CIGAIN 0x29
-#define ADE7758_AWG 0x2A
-#define ADE7758_BWG 0x2B
-#define ADE7758_CWG 0x2C
-#define ADE7758_AVARG 0x2D
-#define ADE7758_BVARG 0x2E
-#define ADE7758_CVARG 0x2F
-#define ADE7758_AVAG 0x30
-#define ADE7758_BVAG 0x31
-#define ADE7758_CVAG 0x32
-#define ADE7758_AVRMSOS 0x33
-#define ADE7758_BVRMSOS 0x34
-#define ADE7758_CVRMSOS 0x35
-#define ADE7758_AIRMSOS 0x36
-#define ADE7758_BIRMSOS 0x37
-#define ADE7758_CIRMSOS 0x38
-#define ADE7758_AWAITOS 0x39
-#define ADE7758_BWAITOS 0x3A
-#define ADE7758_CWAITOS 0x3B
-#define ADE7758_AVAROS 0x3C
-#define ADE7758_BVAROS 0x3D
-#define ADE7758_CVAROS 0x3E
-#define ADE7758_APHCAL 0x3F
-#define ADE7758_BPHCAL 0x40
-#define ADE7758_CPHCAL 0x41
-#define ADE7758_WDIV 0x42
-#define ADE7758_VADIV 0x44
-#define ADE7758_VARDIV 0x43
-#define ADE7758_APCFNUM 0x45
-#define ADE7758_APCFDEN 0x46
-#define ADE7758_VARCFNUM 0x47
-#define ADE7758_VARCFDEN 0x48
-#define ADE7758_CHKSUM 0x7E
-#define ADE7758_VERSION 0x7F
-
-#define ADE7758_READ_REG(a) a
-#define ADE7758_WRITE_REG(a) ((a) | 0x80)
-
-#define ADE7758_MAX_TX 8
-#define ADE7758_MAX_RX 4
-#define ADE7758_STARTUP_DELAY 1
-
-#define AD7758_NUM_WAVSEL 5
-#define AD7758_NUM_PHSEL 3
-#define AD7758_NUM_WAVESRC (AD7758_NUM_WAVSEL * AD7758_NUM_PHSEL)
-
-#define AD7758_PHASE_A 0
-#define AD7758_PHASE_B 1
-#define AD7758_PHASE_C 2
-#define AD7758_CURRENT 0
-#define AD7758_VOLTAGE 1
-#define AD7758_ACT_PWR 2
-#define AD7758_REACT_PWR 3
-#define AD7758_APP_PWR 4
-#define AD7758_WT(p, w) (((w) << 2) | (p))
-
-/**
- * struct ade7758_state - device instance specific data
- * @us: actual spi_device
- * @trig: data ready trigger registered with iio
- * @tx: transmit buffer
- * @rx: receive buffer
- * @buf_lock: mutex to protect tx and rx
- **/
-struct ade7758_state {
- struct spi_device *us;
- struct iio_trigger *trig;
- u8 *tx;
- u8 *rx;
- struct mutex buf_lock;
- struct spi_transfer ring_xfer[4];
- struct spi_message ring_msg;
- /*
- * DMA (thus cache coherency maintenance) requires the
- * transfer buffers to live in their own cache lines.
- */
- unsigned char rx_buf[8] ____cacheline_aligned;
- unsigned char tx_buf[8];
-
-};
-#ifdef CONFIG_IIO_BUFFER
-/* At the moment triggers are only used for ring buffer
- * filling. This may change!
- */
-
-void ade7758_remove_trigger(struct iio_dev *indio_dev);
-int ade7758_probe_trigger(struct iio_dev *indio_dev);
-
-ssize_t ade7758_read_data_from_ring(struct device *dev,
- struct device_attribute *attr,
- char *buf);
-
-
-int ade7758_configure_ring(struct iio_dev *indio_dev);
-void ade7758_unconfigure_ring(struct iio_dev *indio_dev);
-
-int ade7758_set_irq(struct device *dev, bool enable);
-
-int ade7758_spi_write_reg_8(struct device *dev,
- u8 reg_address, u8 val);
-int ade7758_spi_read_reg_8(struct device *dev,
- u8 reg_address, u8 *val);
-
-#else /* CONFIG_IIO_BUFFER */
-
-static inline void ade7758_remove_trigger(struct iio_dev *indio_dev)
-{
-}
-static inline int ade7758_probe_trigger(struct iio_dev *indio_dev)
-{
- return 0;
-}
-
-static int ade7758_configure_ring(struct iio_dev *indio_dev)
-{
- return 0;
-}
-static inline void ade7758_unconfigure_ring(struct iio_dev *indio_dev)
-{
-}
-static inline int ade7758_initialize_ring(struct iio_ring_buffer *ring)
-{
- return 0;
-}
-static inline void ade7758_uninitialize_ring(struct iio_dev *indio_dev)
-{
-}
-#endif /* CONFIG_IIO_BUFFER */
-
-#endif
diff --git a/drivers/staging/iio/meter/ade7758_core.c b/drivers/staging/iio/meter/ade7758_core.c
deleted file mode 100644
index 38838085824f..000000000000
--- a/drivers/staging/iio/meter/ade7758_core.c
+++ /dev/null
@@ -1,917 +0,0 @@
-/*
- * ADE7758 Poly Phase Multifunction Energy Metering IC driver
- *
- * Copyright 2010-2011 Analog Devices Inc.
- *
- * Licensed under the GPL-2.
- */
-
-#include <linux/interrupt.h>
-#include <linux/irq.h>
-#include <linux/delay.h>
-#include <linux/mutex.h>
-#include <linux/device.h>
-#include <linux/kernel.h>
-#include <linux/spi/spi.h>
-#include <linux/slab.h>
-#include <linux/sysfs.h>
-#include <linux/list.h>
-#include <linux/module.h>
-
-#include <linux/iio/iio.h>
-#include <linux/iio/sysfs.h>
-#include <linux/iio/buffer.h>
-#include "meter.h"
-#include "ade7758.h"
-
-int ade7758_spi_write_reg_8(struct device *dev,
- u8 reg_address,
- u8 val)
-{
- int ret;
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct ade7758_state *st = iio_priv(indio_dev);
-
- mutex_lock(&st->buf_lock);
- st->tx[0] = ADE7758_WRITE_REG(reg_address);
- st->tx[1] = val;
-
- ret = spi_write(st->us, st->tx, 2);
- mutex_unlock(&st->buf_lock);
-
- return ret;
-}
-
-static int ade7758_spi_write_reg_16(struct device *dev,
- u8 reg_address,
- u16 value)
-{
- int ret;
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct ade7758_state *st = iio_priv(indio_dev);
- struct spi_transfer xfers[] = {
- {
- .tx_buf = st->tx,
- .bits_per_word = 8,
- .len = 3,
- }
- };
-
- mutex_lock(&st->buf_lock);
- st->tx[0] = ADE7758_WRITE_REG(reg_address);
- st->tx[1] = (value >> 8) & 0xFF;
- st->tx[2] = value & 0xFF;
-
- ret = spi_sync_transfer(st->us, xfers, ARRAY_SIZE(xfers));
- mutex_unlock(&st->buf_lock);
-
- return ret;
-}
-
-static int ade7758_spi_write_reg_24(struct device *dev,
- u8 reg_address,
- u32 value)
-{
- int ret;
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct ade7758_state *st = iio_priv(indio_dev);
- struct spi_transfer xfers[] = {
- {
- .tx_buf = st->tx,
- .bits_per_word = 8,
- .len = 4,
- }
- };
-
- mutex_lock(&st->buf_lock);
- st->tx[0] = ADE7758_WRITE_REG(reg_address);
- st->tx[1] = (value >> 16) & 0xFF;
- st->tx[2] = (value >> 8) & 0xFF;
- st->tx[3] = value & 0xFF;
-
- ret = spi_sync_transfer(st->us, xfers, ARRAY_SIZE(xfers));
- mutex_unlock(&st->buf_lock);
-
- return ret;
-}
-
-int ade7758_spi_read_reg_8(struct device *dev,
- u8 reg_address,
- u8 *val)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct ade7758_state *st = iio_priv(indio_dev);
- int ret;
- struct spi_transfer xfers[] = {
- {
- .tx_buf = st->tx,
- .bits_per_word = 8,
- .len = 1,
- .delay_usecs = 4,
- },
- {
- .tx_buf = &st->tx[1],
- .rx_buf = st->rx,
- .bits_per_word = 8,
- .len = 1,
- },
- };
-
- mutex_lock(&st->buf_lock);
- st->tx[0] = ADE7758_READ_REG(reg_address);
- st->tx[1] = 0;
-
- ret = spi_sync_transfer(st->us, xfers, ARRAY_SIZE(xfers));
- if (ret) {
- dev_err(&st->us->dev, "problem when reading 8 bit register 0x%02X",
- reg_address);
- goto error_ret;
- }
- *val = st->rx[0];
-
-error_ret:
- mutex_unlock(&st->buf_lock);
- return ret;
-}
-
-static int ade7758_spi_read_reg_16(struct device *dev,
- u8 reg_address,
- u16 *val)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct ade7758_state *st = iio_priv(indio_dev);
- int ret;
- struct spi_transfer xfers[] = {
- {
- .tx_buf = st->tx,
- .bits_per_word = 8,
- .len = 1,
- .delay_usecs = 4,
- },
- {
- .tx_buf = &st->tx[1],
- .rx_buf = st->rx,
- .bits_per_word = 8,
- .len = 2,
- },
- };
-
-
- mutex_lock(&st->buf_lock);
- st->tx[0] = ADE7758_READ_REG(reg_address);
- st->tx[1] = 0;
- st->tx[2] = 0;
-
- ret = spi_sync_transfer(st->us, xfers, ARRAY_SIZE(xfers));
- if (ret) {
- dev_err(&st->us->dev, "problem when reading 16 bit register 0x%02X",
- reg_address);
- goto error_ret;
- }
-
- *val = (st->rx[0] << 8) | st->rx[1];
-
-error_ret:
- mutex_unlock(&st->buf_lock);
- return ret;
-}
-
-static int ade7758_spi_read_reg_24(struct device *dev,
- u8 reg_address,
- u32 *val)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct ade7758_state *st = iio_priv(indio_dev);
- int ret;
- struct spi_transfer xfers[] = {
- {
- .tx_buf = st->tx,
- .bits_per_word = 8,
- .len = 1,
- .delay_usecs = 4,
- },
- {
- .tx_buf = &st->tx[1],
- .rx_buf = st->rx,
- .bits_per_word = 8,
- .len = 3,
- },
- };
-
- mutex_lock(&st->buf_lock);
- st->tx[0] = ADE7758_READ_REG(reg_address);
- st->tx[1] = 0;
- st->tx[2] = 0;
- st->tx[3] = 0;
-
- ret = spi_sync_transfer(st->us, xfers, ARRAY_SIZE(xfers));
- if (ret) {
- dev_err(&st->us->dev, "problem when reading 24 bit register 0x%02X",
- reg_address);
- goto error_ret;
- }
- *val = (st->rx[0] << 16) | (st->rx[1] << 8) | st->rx[2];
-
-error_ret:
- mutex_unlock(&st->buf_lock);
- return ret;
-}
-
-static ssize_t ade7758_read_8bit(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- int ret;
- u8 val = 0;
- struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
-
- ret = ade7758_spi_read_reg_8(dev, this_attr->address, &val);
- if (ret)
- return ret;
-
- return sprintf(buf, "%u\n", val);
-}
-
-static ssize_t ade7758_read_16bit(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- int ret;
- u16 val = 0;
- struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
-
- ret = ade7758_spi_read_reg_16(dev, this_attr->address, &val);
- if (ret)
- return ret;
-
- return sprintf(buf, "%u\n", val);
-}
-
-static ssize_t ade7758_read_24bit(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- int ret;
- u32 val = 0;
- struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
-
- ret = ade7758_spi_read_reg_24(dev, this_attr->address, &val);
- if (ret)
- return ret;
-
- return sprintf(buf, "%u\n", val & 0xFFFFFF);
-}
-
-static ssize_t ade7758_write_8bit(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t len)
-{
- struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
- int ret;
- u8 val;
-
- ret = kstrtou8(buf, 10, &val);
- if (ret)
- goto error_ret;
- ret = ade7758_spi_write_reg_8(dev, this_attr->address, val);
-
-error_ret:
- return ret ? ret : len;
-}
-
-static ssize_t ade7758_write_16bit(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t len)
-{
- struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
- int ret;
- u16 val;
-
- ret = kstrtou16(buf, 10, &val);
- if (ret)
- goto error_ret;
- ret = ade7758_spi_write_reg_16(dev, this_attr->address, val);
-
-error_ret:
- return ret ? ret : len;
-}
-
-static int ade7758_reset(struct device *dev)
-{
- int ret;
- u8 val;
-
- ret = ade7758_spi_read_reg_8(dev, ADE7758_OPMODE, &val);
- if (ret < 0) {
- dev_err(dev, "Failed to read opmode reg\n");
- return ret;
- }
- val |= BIT(6); /* Software Chip Reset */
- ret = ade7758_spi_write_reg_8(dev, ADE7758_OPMODE, val);
- if (ret < 0)
- dev_err(dev, "Failed to write opmode reg\n");
- return ret;
-}
-
-static IIO_DEV_ATTR_VPEAK(S_IWUSR | S_IRUGO,
- ade7758_read_8bit,
- ade7758_write_8bit,
- ADE7758_VPEAK);
-static IIO_DEV_ATTR_IPEAK(S_IWUSR | S_IRUGO,
- ade7758_read_8bit,
- ade7758_write_8bit,
- ADE7758_VPEAK);
-static IIO_DEV_ATTR_APHCAL(S_IWUSR | S_IRUGO,
- ade7758_read_8bit,
- ade7758_write_8bit,
- ADE7758_APHCAL);
-static IIO_DEV_ATTR_BPHCAL(S_IWUSR | S_IRUGO,
- ade7758_read_8bit,
- ade7758_write_8bit,
- ADE7758_BPHCAL);
-static IIO_DEV_ATTR_CPHCAL(S_IWUSR | S_IRUGO,
- ade7758_read_8bit,
- ade7758_write_8bit,
- ADE7758_CPHCAL);
-static IIO_DEV_ATTR_WDIV(S_IWUSR | S_IRUGO,
- ade7758_read_8bit,
- ade7758_write_8bit,
- ADE7758_WDIV);
-static IIO_DEV_ATTR_VADIV(S_IWUSR | S_IRUGO,
- ade7758_read_8bit,
- ade7758_write_8bit,
- ADE7758_VADIV);
-static IIO_DEV_ATTR_AIRMS(S_IRUGO,
- ade7758_read_24bit,
- NULL,
- ADE7758_AIRMS);
-static IIO_DEV_ATTR_BIRMS(S_IRUGO,
- ade7758_read_24bit,
- NULL,
- ADE7758_BIRMS);
-static IIO_DEV_ATTR_CIRMS(S_IRUGO,
- ade7758_read_24bit,
- NULL,
- ADE7758_CIRMS);
-static IIO_DEV_ATTR_AVRMS(S_IRUGO,
- ade7758_read_24bit,
- NULL,
- ADE7758_AVRMS);
-static IIO_DEV_ATTR_BVRMS(S_IRUGO,
- ade7758_read_24bit,
- NULL,
- ADE7758_BVRMS);
-static IIO_DEV_ATTR_CVRMS(S_IRUGO,
- ade7758_read_24bit,
- NULL,
- ADE7758_CVRMS);
-static IIO_DEV_ATTR_AIRMSOS(S_IWUSR | S_IRUGO,
- ade7758_read_16bit,
- ade7758_write_16bit,
- ADE7758_AIRMSOS);
-static IIO_DEV_ATTR_BIRMSOS(S_IWUSR | S_IRUGO,
- ade7758_read_16bit,
- ade7758_write_16bit,
- ADE7758_BIRMSOS);
-static IIO_DEV_ATTR_CIRMSOS(S_IWUSR | S_IRUGO,
- ade7758_read_16bit,
- ade7758_write_16bit,
- ADE7758_CIRMSOS);
-static IIO_DEV_ATTR_AVRMSOS(S_IWUSR | S_IRUGO,
- ade7758_read_16bit,
- ade7758_write_16bit,
- ADE7758_AVRMSOS);
-static IIO_DEV_ATTR_BVRMSOS(S_IWUSR | S_IRUGO,
- ade7758_read_16bit,
- ade7758_write_16bit,
- ADE7758_BVRMSOS);
-static IIO_DEV_ATTR_CVRMSOS(S_IWUSR | S_IRUGO,
- ade7758_read_16bit,
- ade7758_write_16bit,
- ADE7758_CVRMSOS);
-static IIO_DEV_ATTR_AIGAIN(S_IWUSR | S_IRUGO,
- ade7758_read_16bit,
- ade7758_write_16bit,
- ADE7758_AIGAIN);
-static IIO_DEV_ATTR_BIGAIN(S_IWUSR | S_IRUGO,
- ade7758_read_16bit,
- ade7758_write_16bit,
- ADE7758_BIGAIN);
-static IIO_DEV_ATTR_CIGAIN(S_IWUSR | S_IRUGO,
- ade7758_read_16bit,
- ade7758_write_16bit,
- ADE7758_CIGAIN);
-static IIO_DEV_ATTR_AVRMSGAIN(S_IWUSR | S_IRUGO,
- ade7758_read_16bit,
- ade7758_write_16bit,
- ADE7758_AVRMSGAIN);
-static IIO_DEV_ATTR_BVRMSGAIN(S_IWUSR | S_IRUGO,
- ade7758_read_16bit,
- ade7758_write_16bit,
- ADE7758_BVRMSGAIN);
-static IIO_DEV_ATTR_CVRMSGAIN(S_IWUSR | S_IRUGO,
- ade7758_read_16bit,
- ade7758_write_16bit,
- ADE7758_CVRMSGAIN);
-
-int ade7758_set_irq(struct device *dev, bool enable)
-{
- int ret;
- u32 irqen;
-
- ret = ade7758_spi_read_reg_24(dev, ADE7758_MASK, &irqen);
- if (ret)
- goto error_ret;
-
- if (enable)
- irqen |= BIT(16); /* Enables an interrupt when a data is
- present in the waveform register */
- else
- irqen &= ~BIT(16);
-
- ret = ade7758_spi_write_reg_24(dev, ADE7758_MASK, irqen);
- if (ret)
- goto error_ret;
-
-error_ret:
- return ret;
-}
-
-/* Power down the device */
-static int ade7758_stop_device(struct device *dev)
-{
- int ret;
- u8 val;
-
- ret = ade7758_spi_read_reg_8(dev, ADE7758_OPMODE, &val);
- if (ret < 0) {
- dev_err(dev, "Failed to read opmode reg\n");
- return ret;
- }
- val |= 7 << 3; /* ADE7758 powered down */
- ret = ade7758_spi_write_reg_8(dev, ADE7758_OPMODE, val);
- if (ret < 0)
- dev_err(dev, "Failed to write opmode reg\n");
- return ret;
-}
-
-static int ade7758_initial_setup(struct iio_dev *indio_dev)
-{
- struct ade7758_state *st = iio_priv(indio_dev);
- struct device *dev = &indio_dev->dev;
- int ret;
-
- /* use low spi speed for init */
- st->us->mode = SPI_MODE_1;
- spi_setup(st->us);
-
- /* Disable IRQ */
- ret = ade7758_set_irq(dev, false);
- if (ret) {
- dev_err(dev, "disable irq failed");
- goto err_ret;
- }
-
- ade7758_reset(dev);
- msleep(ADE7758_STARTUP_DELAY);
-
-err_ret:
- return ret;
-}
-
-static ssize_t ade7758_read_frequency(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- int ret;
- u8 t;
- int sps;
-
- ret = ade7758_spi_read_reg_8(dev,
- ADE7758_WAVMODE,
- &t);
- if (ret)
- return ret;
-
- t = (t >> 5) & 0x3;
- sps = 26040 / (1 << t);
-
- return sprintf(buf, "%d SPS\n", sps);
-}
-
-static ssize_t ade7758_write_frequency(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t len)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- u16 val;
- int ret;
- u8 reg, t;
-
- ret = kstrtou16(buf, 10, &val);
- if (ret)
- return ret;
-
- mutex_lock(&indio_dev->mlock);
-
- switch (val) {
- case 26040:
- t = 0;
- break;
- case 13020:
- t = 1;
- break;
- case 6510:
- t = 2;
- break;
- case 3255:
- t = 3;
- break;
- default:
- ret = -EINVAL;
- goto out;
- }
-
- ret = ade7758_spi_read_reg_8(dev,
- ADE7758_WAVMODE,
- &reg);
- if (ret)
- goto out;
-
- reg &= ~(5 << 3);
- reg |= t << 5;
-
- ret = ade7758_spi_write_reg_8(dev,
- ADE7758_WAVMODE,
- reg);
-
-out:
- mutex_unlock(&indio_dev->mlock);
-
- return ret ? ret : len;
-}
-
-static IIO_DEV_ATTR_TEMP_RAW(ade7758_read_8bit);
-static IIO_CONST_ATTR(in_temp_offset, "129 C");
-static IIO_CONST_ATTR(in_temp_scale, "4 C");
-
-static IIO_DEV_ATTR_AWATTHR(ade7758_read_16bit,
- ADE7758_AWATTHR);
-static IIO_DEV_ATTR_BWATTHR(ade7758_read_16bit,
- ADE7758_BWATTHR);
-static IIO_DEV_ATTR_CWATTHR(ade7758_read_16bit,
- ADE7758_CWATTHR);
-static IIO_DEV_ATTR_AVARHR(ade7758_read_16bit,
- ADE7758_AVARHR);
-static IIO_DEV_ATTR_BVARHR(ade7758_read_16bit,
- ADE7758_BVARHR);
-static IIO_DEV_ATTR_CVARHR(ade7758_read_16bit,
- ADE7758_CVARHR);
-static IIO_DEV_ATTR_AVAHR(ade7758_read_16bit,
- ADE7758_AVAHR);
-static IIO_DEV_ATTR_BVAHR(ade7758_read_16bit,
- ADE7758_BVAHR);
-static IIO_DEV_ATTR_CVAHR(ade7758_read_16bit,
- ADE7758_CVAHR);
-
-static IIO_DEV_ATTR_SAMP_FREQ(S_IWUSR | S_IRUGO,
- ade7758_read_frequency,
- ade7758_write_frequency);
-
-static IIO_CONST_ATTR_SAMP_FREQ_AVAIL("26040 13020 6510 3255");
-
-static struct attribute *ade7758_attributes[] = {
- &iio_dev_attr_in_temp_raw.dev_attr.attr,
- &iio_const_attr_in_temp_offset.dev_attr.attr,
- &iio_const_attr_in_temp_scale.dev_attr.attr,
- &iio_dev_attr_sampling_frequency.dev_attr.attr,
- &iio_const_attr_sampling_frequency_available.dev_attr.attr,
- &iio_dev_attr_awatthr.dev_attr.attr,
- &iio_dev_attr_bwatthr.dev_attr.attr,
- &iio_dev_attr_cwatthr.dev_attr.attr,
- &iio_dev_attr_avarhr.dev_attr.attr,
- &iio_dev_attr_bvarhr.dev_attr.attr,
- &iio_dev_attr_cvarhr.dev_attr.attr,
- &iio_dev_attr_avahr.dev_attr.attr,
- &iio_dev_attr_bvahr.dev_attr.attr,
- &iio_dev_attr_cvahr.dev_attr.attr,
- &iio_dev_attr_vpeak.dev_attr.attr,
- &iio_dev_attr_ipeak.dev_attr.attr,
- &iio_dev_attr_aphcal.dev_attr.attr,
- &iio_dev_attr_bphcal.dev_attr.attr,
- &iio_dev_attr_cphcal.dev_attr.attr,
- &iio_dev_attr_wdiv.dev_attr.attr,
- &iio_dev_attr_vadiv.dev_attr.attr,
- &iio_dev_attr_airms.dev_attr.attr,
- &iio_dev_attr_birms.dev_attr.attr,
- &iio_dev_attr_cirms.dev_attr.attr,
- &iio_dev_attr_avrms.dev_attr.attr,
- &iio_dev_attr_bvrms.dev_attr.attr,
- &iio_dev_attr_cvrms.dev_attr.attr,
- &iio_dev_attr_aigain.dev_attr.attr,
- &iio_dev_attr_bigain.dev_attr.attr,
- &iio_dev_attr_cigain.dev_attr.attr,
- &iio_dev_attr_avrmsgain.dev_attr.attr,
- &iio_dev_attr_bvrmsgain.dev_attr.attr,
- &iio_dev_attr_cvrmsgain.dev_attr.attr,
- &iio_dev_attr_airmsos.dev_attr.attr,
- &iio_dev_attr_birmsos.dev_attr.attr,
- &iio_dev_attr_cirmsos.dev_attr.attr,
- &iio_dev_attr_avrmsos.dev_attr.attr,
- &iio_dev_attr_bvrmsos.dev_attr.attr,
- &iio_dev_attr_cvrmsos.dev_attr.attr,
- NULL,
-};
-
-static const struct attribute_group ade7758_attribute_group = {
- .attrs = ade7758_attributes,
-};
-
-static const struct iio_chan_spec ade7758_channels[] = {
- {
- .type = IIO_VOLTAGE,
- .indexed = 1,
- .channel = 0,
- .address = AD7758_WT(AD7758_PHASE_A, AD7758_VOLTAGE),
- .scan_index = 0,
- .scan_type = {
- .sign = 's',
- .realbits = 24,
- .storagebits = 32,
- },
- }, {
- .type = IIO_CURRENT,
- .indexed = 1,
- .channel = 0,
- .address = AD7758_WT(AD7758_PHASE_A, AD7758_CURRENT),
- .scan_index = 1,
- .scan_type = {
- .sign = 's',
- .realbits = 24,
- .storagebits = 32,
- },
- }, {
- .type = IIO_POWER,
- .indexed = 1,
- .channel = 0,
- .extend_name = "apparent",
- .address = AD7758_WT(AD7758_PHASE_A, AD7758_APP_PWR),
- .scan_index = 2,
- .scan_type = {
- .sign = 's',
- .realbits = 24,
- .storagebits = 32,
- },
- }, {
- .type = IIO_POWER,
- .indexed = 1,
- .channel = 0,
- .extend_name = "active",
- .address = AD7758_WT(AD7758_PHASE_A, AD7758_ACT_PWR),
- .scan_index = 3,
- .scan_type = {
- .sign = 's',
- .realbits = 24,
- .storagebits = 32,
- },
- }, {
- .type = IIO_POWER,
- .indexed = 1,
- .channel = 0,
- .extend_name = "reactive",
- .address = AD7758_WT(AD7758_PHASE_A, AD7758_REACT_PWR),
- .scan_index = 4,
- .scan_type = {
- .sign = 's',
- .realbits = 24,
- .storagebits = 32,
- },
- }, {
- .type = IIO_VOLTAGE,
- .indexed = 1,
- .channel = 1,
- .address = AD7758_WT(AD7758_PHASE_B, AD7758_VOLTAGE),
- .scan_index = 5,
- .scan_type = {
- .sign = 's',
- .realbits = 24,
- .storagebits = 32,
- },
- }, {
- .type = IIO_CURRENT,
- .indexed = 1,
- .channel = 1,
- .address = AD7758_WT(AD7758_PHASE_B, AD7758_CURRENT),
- .scan_index = 6,
- .scan_type = {
- .sign = 's',
- .realbits = 24,
- .storagebits = 32,
- },
- }, {
- .type = IIO_POWER,
- .indexed = 1,
- .channel = 1,
- .extend_name = "apparent",
- .address = AD7758_WT(AD7758_PHASE_B, AD7758_APP_PWR),
- .scan_index = 7,
- .scan_type = {
- .sign = 's',
- .realbits = 24,
- .storagebits = 32,
- },
- }, {
- .type = IIO_POWER,
- .indexed = 1,
- .channel = 1,
- .extend_name = "active",
- .address = AD7758_WT(AD7758_PHASE_B, AD7758_ACT_PWR),
- .scan_index = 8,
- .scan_type = {
- .sign = 's',
- .realbits = 24,
- .storagebits = 32,
- },
- }, {
- .type = IIO_POWER,
- .indexed = 1,
- .channel = 1,
- .extend_name = "reactive",
- .address = AD7758_WT(AD7758_PHASE_B, AD7758_REACT_PWR),
- .scan_index = 9,
- .scan_type = {
- .sign = 's',
- .realbits = 24,
- .storagebits = 32,
- },
- }, {
- .type = IIO_VOLTAGE,
- .indexed = 1,
- .channel = 2,
- .address = AD7758_WT(AD7758_PHASE_C, AD7758_VOLTAGE),
- .scan_index = 10,
- .scan_type = {
- .sign = 's',
- .realbits = 24,
- .storagebits = 32,
- },
- }, {
- .type = IIO_CURRENT,
- .indexed = 1,
- .channel = 2,
- .address = AD7758_WT(AD7758_PHASE_C, AD7758_CURRENT),
- .scan_index = 11,
- .scan_type = {
- .sign = 's',
- .realbits = 24,
- .storagebits = 32,
- },
- }, {
- .type = IIO_POWER,
- .indexed = 1,
- .channel = 2,
- .extend_name = "apparent",
- .address = AD7758_WT(AD7758_PHASE_C, AD7758_APP_PWR),
- .scan_index = 12,
- .scan_type = {
- .sign = 's',
- .realbits = 24,
- .storagebits = 32,
- },
- }, {
- .type = IIO_POWER,
- .indexed = 1,
- .channel = 2,
- .extend_name = "active",
- .address = AD7758_WT(AD7758_PHASE_C, AD7758_ACT_PWR),
- .scan_index = 13,
- .scan_type = {
- .sign = 's',
- .realbits = 24,
- .storagebits = 32,
- },
- }, {
- .type = IIO_POWER,
- .indexed = 1,
- .channel = 2,
- .extend_name = "reactive",
- .address = AD7758_WT(AD7758_PHASE_C, AD7758_REACT_PWR),
- .scan_index = 14,
- .scan_type = {
- .sign = 's',
- .realbits = 24,
- .storagebits = 32,
- },
- },
- IIO_CHAN_SOFT_TIMESTAMP(15),
-};
-
-static const struct iio_info ade7758_info = {
- .attrs = &ade7758_attribute_group,
- .driver_module = THIS_MODULE,
-};
-
-static int ade7758_probe(struct spi_device *spi)
-{
- int ret;
- struct ade7758_state *st;
- struct iio_dev *indio_dev;
-
- indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
- if (!indio_dev)
- return -ENOMEM;
-
- st = iio_priv(indio_dev);
- /* this is only used for removal purposes */
- spi_set_drvdata(spi, indio_dev);
-
- /* Allocate the comms buffers */
- st->rx = kcalloc(ADE7758_MAX_RX, sizeof(*st->rx), GFP_KERNEL);
- if (!st->rx)
- return -ENOMEM;
- st->tx = kcalloc(ADE7758_MAX_TX, sizeof(*st->tx), GFP_KERNEL);
- if (!st->tx) {
- ret = -ENOMEM;
- goto error_free_rx;
- }
- st->us = spi;
- mutex_init(&st->buf_lock);
-
- indio_dev->name = spi->dev.driver->name;
- indio_dev->dev.parent = &spi->dev;
- indio_dev->info = &ade7758_info;
- indio_dev->modes = INDIO_DIRECT_MODE;
- indio_dev->channels = ade7758_channels;
- indio_dev->num_channels = ARRAY_SIZE(ade7758_channels);
-
- ret = ade7758_configure_ring(indio_dev);
- if (ret)
- goto error_free_tx;
-
- /* Get the device into a sane initial state */
- ret = ade7758_initial_setup(indio_dev);
- if (ret)
- goto error_unreg_ring_funcs;
-
- if (spi->irq) {
- ret = ade7758_probe_trigger(indio_dev);
- if (ret)
- goto error_unreg_ring_funcs;
- }
-
- ret = iio_device_register(indio_dev);
- if (ret)
- goto error_remove_trigger;
-
- return 0;
-
-error_remove_trigger:
- if (spi->irq)
- ade7758_remove_trigger(indio_dev);
-error_unreg_ring_funcs:
- ade7758_unconfigure_ring(indio_dev);
-error_free_tx:
- kfree(st->tx);
-error_free_rx:
- kfree(st->rx);
- return ret;
-}
-
-static int ade7758_remove(struct spi_device *spi)
-{
- struct iio_dev *indio_dev = spi_get_drvdata(spi);
- struct ade7758_state *st = iio_priv(indio_dev);
-
- iio_device_unregister(indio_dev);
- ade7758_stop_device(&indio_dev->dev);
- ade7758_remove_trigger(indio_dev);
- ade7758_unconfigure_ring(indio_dev);
- kfree(st->tx);
- kfree(st->rx);
-
- return 0;
-}
-
-static const struct spi_device_id ade7758_id[] = {
- {"ade7758", 0},
- {}
-};
-MODULE_DEVICE_TABLE(spi, ade7758_id);
-
-static struct spi_driver ade7758_driver = {
- .driver = {
- .name = "ade7758",
- .owner = THIS_MODULE,
- },
- .probe = ade7758_probe,
- .remove = ade7758_remove,
- .id_table = ade7758_id,
-};
-module_spi_driver(ade7758_driver);
-
-MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>");
-MODULE_DESCRIPTION("Analog Devices ADE7758 Polyphase Multifunction Energy Metering IC Driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/iio/meter/ade7758_ring.c b/drivers/staging/iio/meter/ade7758_ring.c
deleted file mode 100644
index 9a24e0226f8b..000000000000
--- a/drivers/staging/iio/meter/ade7758_ring.c
+++ /dev/null
@@ -1,180 +0,0 @@
-/*
- * ADE7758 Poly Phase Multifunction Energy Metering IC driver
- *
- * Copyright 2010-2011 Analog Devices Inc.
- *
- * Licensed under the GPL-2.
- */
-#include <linux/export.h>
-#include <linux/interrupt.h>
-#include <linux/kernel.h>
-#include <linux/spi/spi.h>
-#include <linux/slab.h>
-#include <asm/unaligned.h>
-
-#include <linux/iio/iio.h>
-#include <linux/iio/kfifo_buf.h>
-#include <linux/iio/trigger_consumer.h>
-#include "ade7758.h"
-
-/**
- * ade7758_spi_read_burst() - read data registers
- * @indio_dev: the IIO device
- **/
-static int ade7758_spi_read_burst(struct iio_dev *indio_dev)
-{
- struct ade7758_state *st = iio_priv(indio_dev);
- int ret;
-
- ret = spi_sync(st->us, &st->ring_msg);
- if (ret)
- dev_err(&st->us->dev, "problem when reading WFORM value\n");
-
- return ret;
-}
-
-static int ade7758_write_waveform_type(struct device *dev, unsigned type)
-{
- int ret;
- u8 reg;
-
- ret = ade7758_spi_read_reg_8(dev,
- ADE7758_WAVMODE,
- &reg);
- if (ret)
- goto out;
-
- reg &= ~0x1F;
- reg |= type & 0x1F;
-
- ret = ade7758_spi_write_reg_8(dev,
- ADE7758_WAVMODE,
- reg);
-out:
- return ret;
-}
-
-/* Whilst this makes a lot of calls to iio_sw_ring functions - it is too device
- * specific to be rolled into the core.
- */
-static irqreturn_t ade7758_trigger_handler(int irq, void *p)
-{
- struct iio_poll_func *pf = p;
- struct iio_dev *indio_dev = pf->indio_dev;
- struct ade7758_state *st = iio_priv(indio_dev);
- s64 dat64[2];
- u32 *dat32 = (u32 *)dat64;
-
- if (!bitmap_empty(indio_dev->active_scan_mask, indio_dev->masklength))
- if (ade7758_spi_read_burst(indio_dev) >= 0)
- *dat32 = get_unaligned_be32(&st->rx_buf[5]) & 0xFFFFFF;
-
- iio_push_to_buffers_with_timestamp(indio_dev, dat64, pf->timestamp);
-
- iio_trigger_notify_done(indio_dev->trig);
-
- return IRQ_HANDLED;
-}
-
-/**
- * ade7758_ring_preenable() setup the parameters of the ring before enabling
- *
- * The complex nature of the setting of the number of bytes per datum is due
- * to this driver currently ensuring that the timestamp is stored at an 8
- * byte boundary.
- **/
-static int ade7758_ring_preenable(struct iio_dev *indio_dev)
-{
- unsigned channel;
-
- if (bitmap_empty(indio_dev->active_scan_mask, indio_dev->masklength))
- return -EINVAL;
-
- channel = find_first_bit(indio_dev->active_scan_mask,
- indio_dev->masklength);
-
- ade7758_write_waveform_type(&indio_dev->dev,
- indio_dev->channels[channel].address);
-
- return 0;
-}
-
-static const struct iio_buffer_setup_ops ade7758_ring_setup_ops = {
- .preenable = &ade7758_ring_preenable,
- .postenable = &iio_triggered_buffer_postenable,
- .predisable = &iio_triggered_buffer_predisable,
- .validate_scan_mask = &iio_validate_scan_mask_onehot,
-};
-
-void ade7758_unconfigure_ring(struct iio_dev *indio_dev)
-{
- iio_dealloc_pollfunc(indio_dev->pollfunc);
- iio_kfifo_free(indio_dev->buffer);
-}
-
-int ade7758_configure_ring(struct iio_dev *indio_dev)
-{
- struct ade7758_state *st = iio_priv(indio_dev);
- struct iio_buffer *buffer;
- int ret = 0;
-
- buffer = iio_kfifo_allocate();
- if (!buffer)
- return -ENOMEM;
-
- iio_device_attach_buffer(indio_dev, buffer);
-
- indio_dev->setup_ops = &ade7758_ring_setup_ops;
-
- indio_dev->pollfunc = iio_alloc_pollfunc(&iio_pollfunc_store_time,
- &ade7758_trigger_handler,
- 0,
- indio_dev,
- "ade7759_consumer%d",
- indio_dev->id);
- if (!indio_dev->pollfunc) {
- ret = -ENOMEM;
- goto error_iio_kfifo_free;
- }
-
- indio_dev->modes |= INDIO_BUFFER_TRIGGERED;
-
- st->tx_buf[0] = ADE7758_READ_REG(ADE7758_RSTATUS);
- st->tx_buf[1] = 0;
- st->tx_buf[2] = 0;
- st->tx_buf[3] = 0;
- st->tx_buf[4] = ADE7758_READ_REG(ADE7758_WFORM);
- st->tx_buf[5] = 0;
- st->tx_buf[6] = 0;
- st->tx_buf[7] = 0;
-
- /* build spi ring message */
- st->ring_xfer[0].tx_buf = &st->tx_buf[0];
- st->ring_xfer[0].len = 1;
- st->ring_xfer[0].bits_per_word = 8;
- st->ring_xfer[0].delay_usecs = 4;
- st->ring_xfer[1].rx_buf = &st->rx_buf[1];
- st->ring_xfer[1].len = 3;
- st->ring_xfer[1].bits_per_word = 8;
- st->ring_xfer[1].cs_change = 1;
-
- st->ring_xfer[2].tx_buf = &st->tx_buf[4];
- st->ring_xfer[2].len = 1;
- st->ring_xfer[2].bits_per_word = 8;
- st->ring_xfer[2].delay_usecs = 1;
- st->ring_xfer[3].rx_buf = &st->rx_buf[5];
- st->ring_xfer[3].len = 3;
- st->ring_xfer[3].bits_per_word = 8;
-
- spi_message_init(&st->ring_msg);
- spi_message_add_tail(&st->ring_xfer[0], &st->ring_msg);
- spi_message_add_tail(&st->ring_xfer[1], &st->ring_msg);
- spi_message_add_tail(&st->ring_xfer[2], &st->ring_msg);
- spi_message_add_tail(&st->ring_xfer[3], &st->ring_msg);
-
- return 0;
-
-error_iio_kfifo_free:
- iio_kfifo_free(indio_dev->buffer);
- return ret;
-}
diff --git a/drivers/staging/iio/meter/ade7758_trigger.c b/drivers/staging/iio/meter/ade7758_trigger.c
deleted file mode 100644
index 5b35a7f08f4f..000000000000
--- a/drivers/staging/iio/meter/ade7758_trigger.c
+++ /dev/null
@@ -1,109 +0,0 @@
-/*
- * ADE7758 Poly Phase Multifunction Energy Metering IC driver
- *
- * Copyright 2010-2011 Analog Devices Inc.
- *
- * Licensed under the GPL-2.
- */
-
-#include <linux/interrupt.h>
-#include <linux/kernel.h>
-#include <linux/spi/spi.h>
-#include <linux/export.h>
-
-#include <linux/iio/iio.h>
-#include <linux/iio/trigger.h>
-#include "ade7758.h"
-
-/**
- * ade7758_data_rdy_trig_poll() the event handler for the data rdy trig
- **/
-static irqreturn_t ade7758_data_rdy_trig_poll(int irq, void *private)
-{
- disable_irq_nosync(irq);
- iio_trigger_poll(private);
-
- return IRQ_HANDLED;
-}
-
-/**
- * ade7758_data_rdy_trigger_set_state() set datardy interrupt state
- **/
-static int ade7758_data_rdy_trigger_set_state(struct iio_trigger *trig,
- bool state)
-{
- struct iio_dev *indio_dev = iio_trigger_get_drvdata(trig);
-
- dev_dbg(&indio_dev->dev, "%s (%d)\n", __func__, state);
- return ade7758_set_irq(&indio_dev->dev, state);
-}
-
-/**
- * ade7758_trig_try_reen() try renabling irq for data rdy trigger
- * @trig: the datardy trigger
- **/
-static int ade7758_trig_try_reen(struct iio_trigger *trig)
-{
- struct iio_dev *indio_dev = iio_trigger_get_drvdata(trig);
- struct ade7758_state *st = iio_priv(indio_dev);
-
- enable_irq(st->us->irq);
- /* irq reenabled so success! */
- return 0;
-}
-
-static const struct iio_trigger_ops ade7758_trigger_ops = {
- .owner = THIS_MODULE,
- .set_trigger_state = &ade7758_data_rdy_trigger_set_state,
- .try_reenable = &ade7758_trig_try_reen,
-};
-
-int ade7758_probe_trigger(struct iio_dev *indio_dev)
-{
- struct ade7758_state *st = iio_priv(indio_dev);
- int ret;
-
- st->trig = iio_trigger_alloc("%s-dev%d",
- spi_get_device_id(st->us)->name,
- indio_dev->id);
- if (!st->trig) {
- ret = -ENOMEM;
- goto error_ret;
- }
-
- ret = request_irq(st->us->irq,
- ade7758_data_rdy_trig_poll,
- IRQF_TRIGGER_LOW,
- spi_get_device_id(st->us)->name,
- st->trig);
- if (ret)
- goto error_free_trig;
-
- st->trig->dev.parent = &st->us->dev;
- st->trig->ops = &ade7758_trigger_ops;
- iio_trigger_set_drvdata(st->trig, indio_dev);
- ret = iio_trigger_register(st->trig);
-
- /* select default trigger */
- indio_dev->trig = iio_trigger_get(st->trig);
- if (ret)
- goto error_free_irq;
-
- return 0;
-
-error_free_irq:
- free_irq(st->us->irq, st->trig);
-error_free_trig:
- iio_trigger_free(st->trig);
-error_ret:
- return ret;
-}
-
-void ade7758_remove_trigger(struct iio_dev *indio_dev)
-{
- struct ade7758_state *st = iio_priv(indio_dev);
-
- iio_trigger_unregister(st->trig);
- free_irq(st->us->irq, st->trig);
- iio_trigger_free(st->trig);
-}
diff --git a/drivers/staging/iio/meter/ade7759.c b/drivers/staging/iio/meter/ade7759.c
deleted file mode 100644
index 23e739207027..000000000000
--- a/drivers/staging/iio/meter/ade7759.c
+++ /dev/null
@@ -1,503 +0,0 @@
-/*
- * ADE7759 Active Energy Metering IC with di/dt Sensor Interface Driver
- *
- * Copyright 2010 Analog Devices Inc.
- *
- * Licensed under the GPL-2 or later.
- */
-
-#include <linux/interrupt.h>
-#include <linux/irq.h>
-#include <linux/delay.h>
-#include <linux/mutex.h>
-#include <linux/device.h>
-#include <linux/kernel.h>
-#include <linux/spi/spi.h>
-#include <linux/slab.h>
-#include <linux/sysfs.h>
-#include <linux/list.h>
-#include <linux/module.h>
-
-#include <linux/iio/iio.h>
-#include <linux/iio/sysfs.h>
-#include "meter.h"
-#include "ade7759.h"
-
-static int ade7759_spi_write_reg_8(struct device *dev,
- u8 reg_address,
- u8 val)
-{
- int ret;
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct ade7759_state *st = iio_priv(indio_dev);
-
- mutex_lock(&st->buf_lock);
- st->tx[0] = ADE7759_WRITE_REG(reg_address);
- st->tx[1] = val;
-
- ret = spi_write(st->us, st->tx, 2);
- mutex_unlock(&st->buf_lock);
-
- return ret;
-}
-
-static int ade7759_spi_write_reg_16(struct device *dev,
- u8 reg_address,
- u16 value)
-{
- int ret;
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct ade7759_state *st = iio_priv(indio_dev);
-
- mutex_lock(&st->buf_lock);
- st->tx[0] = ADE7759_WRITE_REG(reg_address);
- st->tx[1] = (value >> 8) & 0xFF;
- st->tx[2] = value & 0xFF;
- ret = spi_write(st->us, st->tx, 3);
- mutex_unlock(&st->buf_lock);
-
- return ret;
-}
-
-static int ade7759_spi_read_reg_8(struct device *dev,
- u8 reg_address,
- u8 *val)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct ade7759_state *st = iio_priv(indio_dev);
- int ret;
-
- ret = spi_w8r8(st->us, ADE7759_READ_REG(reg_address));
- if (ret < 0) {
- dev_err(&st->us->dev, "problem when reading 8 bit register 0x%02X",
- reg_address);
- return ret;
- }
- *val = ret;
-
- return 0;
-}
-
-static int ade7759_spi_read_reg_16(struct device *dev,
- u8 reg_address,
- u16 *val)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct ade7759_state *st = iio_priv(indio_dev);
- int ret;
-
- ret = spi_w8r16be(st->us, ADE7759_READ_REG(reg_address));
- if (ret < 0) {
- dev_err(&st->us->dev, "problem when reading 16 bit register 0x%02X",
- reg_address);
- return ret;
- }
-
- *val = ret;
-
- return 0;
-}
-
-static int ade7759_spi_read_reg_40(struct device *dev,
- u8 reg_address,
- u64 *val)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct ade7759_state *st = iio_priv(indio_dev);
- int ret;
- struct spi_transfer xfers[] = {
- {
- .tx_buf = st->tx,
- .rx_buf = st->rx,
- .bits_per_word = 8,
- .len = 6,
- },
- };
-
- mutex_lock(&st->buf_lock);
- st->tx[0] = ADE7759_READ_REG(reg_address);
- memset(&st->tx[1], 0, 5);
-
- ret = spi_sync_transfer(st->us, xfers, ARRAY_SIZE(xfers));
- if (ret) {
- dev_err(&st->us->dev, "problem when reading 40 bit register 0x%02X",
- reg_address);
- goto error_ret;
- }
- *val = ((u64)st->rx[1] << 32) | (st->rx[2] << 24) |
- (st->rx[3] << 16) | (st->rx[4] << 8) | st->rx[5];
-
-error_ret:
- mutex_unlock(&st->buf_lock);
- return ret;
-}
-
-static ssize_t ade7759_read_8bit(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- int ret;
- u8 val = 0;
- struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
-
- ret = ade7759_spi_read_reg_8(dev, this_attr->address, &val);
- if (ret)
- return ret;
-
- return sprintf(buf, "%u\n", val);
-}
-
-static ssize_t ade7759_read_16bit(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- int ret;
- u16 val = 0;
- struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
-
- ret = ade7759_spi_read_reg_16(dev, this_attr->address, &val);
- if (ret)
- return ret;
-
- return sprintf(buf, "%u\n", val);
-}
-
-static ssize_t ade7759_read_40bit(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- int ret;
- u64 val = 0;
- struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
-
- ret = ade7759_spi_read_reg_40(dev, this_attr->address, &val);
- if (ret)
- return ret;
-
- return sprintf(buf, "%llu\n", val);
-}
-
-static ssize_t ade7759_write_8bit(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t len)
-{
- struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
- int ret;
- u8 val;
-
- ret = kstrtou8(buf, 10, &val);
- if (ret)
- goto error_ret;
- ret = ade7759_spi_write_reg_8(dev, this_attr->address, val);
-
-error_ret:
- return ret ? ret : len;
-}
-
-static ssize_t ade7759_write_16bit(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t len)
-{
- struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
- int ret;
- u16 val;
-
- ret = kstrtou16(buf, 10, &val);
- if (ret)
- goto error_ret;
- ret = ade7759_spi_write_reg_16(dev, this_attr->address, val);
-
-error_ret:
- return ret ? ret : len;
-}
-
-static int ade7759_reset(struct device *dev)
-{
- int ret;
- u16 val;
-
- ret = ade7759_spi_read_reg_16(dev,
- ADE7759_MODE,
- &val);
- if (ret < 0)
- return ret;
-
- val |= BIT(6); /* Software Chip Reset */
- return ade7759_spi_write_reg_16(dev,
- ADE7759_MODE,
- val);
-}
-
-static IIO_DEV_ATTR_AENERGY(ade7759_read_40bit, ADE7759_AENERGY);
-static IIO_DEV_ATTR_CFDEN(S_IWUSR | S_IRUGO,
- ade7759_read_16bit,
- ade7759_write_16bit,
- ADE7759_CFDEN);
-static IIO_DEV_ATTR_CFNUM(S_IWUSR | S_IRUGO,
- ade7759_read_8bit,
- ade7759_write_8bit,
- ADE7759_CFNUM);
-static IIO_DEV_ATTR_CHKSUM(ade7759_read_8bit, ADE7759_CHKSUM);
-static IIO_DEV_ATTR_PHCAL(S_IWUSR | S_IRUGO,
- ade7759_read_16bit,
- ade7759_write_16bit,
- ADE7759_PHCAL);
-static IIO_DEV_ATTR_APOS(S_IWUSR | S_IRUGO,
- ade7759_read_16bit,
- ade7759_write_16bit,
- ADE7759_APOS);
-static IIO_DEV_ATTR_SAGCYC(S_IWUSR | S_IRUGO,
- ade7759_read_8bit,
- ade7759_write_8bit,
- ADE7759_SAGCYC);
-static IIO_DEV_ATTR_SAGLVL(S_IWUSR | S_IRUGO,
- ade7759_read_8bit,
- ade7759_write_8bit,
- ADE7759_SAGLVL);
-static IIO_DEV_ATTR_LINECYC(S_IWUSR | S_IRUGO,
- ade7759_read_8bit,
- ade7759_write_8bit,
- ADE7759_LINECYC);
-static IIO_DEV_ATTR_LENERGY(ade7759_read_40bit, ADE7759_LENERGY);
-static IIO_DEV_ATTR_PGA_GAIN(S_IWUSR | S_IRUGO,
- ade7759_read_8bit,
- ade7759_write_8bit,
- ADE7759_GAIN);
-static IIO_DEV_ATTR_ACTIVE_POWER_GAIN(S_IWUSR | S_IRUGO,
- ade7759_read_16bit,
- ade7759_write_16bit,
- ADE7759_APGAIN);
-static IIO_DEV_ATTR_CH_OFF(1, S_IWUSR | S_IRUGO,
- ade7759_read_8bit,
- ade7759_write_8bit,
- ADE7759_CH1OS);
-static IIO_DEV_ATTR_CH_OFF(2, S_IWUSR | S_IRUGO,
- ade7759_read_8bit,
- ade7759_write_8bit,
- ADE7759_CH2OS);
-
-static int ade7759_set_irq(struct device *dev, bool enable)
-{
- int ret;
- u8 irqen;
-
- ret = ade7759_spi_read_reg_8(dev, ADE7759_IRQEN, &irqen);
- if (ret)
- goto error_ret;
-
- if (enable)
- irqen |= BIT(3); /* Enables an interrupt when a data is
- present in the waveform register */
- else
- irqen &= ~BIT(3);
-
- ret = ade7759_spi_write_reg_8(dev, ADE7759_IRQEN, irqen);
-
-error_ret:
- return ret;
-}
-
-/* Power down the device */
-static int ade7759_stop_device(struct device *dev)
-{
- int ret;
- u16 val;
-
- ret = ade7759_spi_read_reg_16(dev,
- ADE7759_MODE,
- &val);
- if (ret < 0) {
- dev_err(dev, "unable to power down the device, error: %d\n",
- ret);
- return ret;
- }
-
- val |= BIT(4); /* AD converters can be turned off */
-
- return ade7759_spi_write_reg_16(dev, ADE7759_MODE, val);
-}
-
-static int ade7759_initial_setup(struct iio_dev *indio_dev)
-{
- int ret;
- struct ade7759_state *st = iio_priv(indio_dev);
- struct device *dev = &indio_dev->dev;
-
- /* use low spi speed for init */
- st->us->mode = SPI_MODE_3;
- spi_setup(st->us);
-
- /* Disable IRQ */
- ret = ade7759_set_irq(dev, false);
- if (ret) {
- dev_err(dev, "disable irq failed");
- goto err_ret;
- }
-
- ade7759_reset(dev);
- msleep(ADE7759_STARTUP_DELAY);
-
-err_ret:
- return ret;
-}
-
-static ssize_t ade7759_read_frequency(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- int ret;
- u16 t;
- int sps;
-
- ret = ade7759_spi_read_reg_16(dev,
- ADE7759_MODE,
- &t);
- if (ret)
- return ret;
-
- t = (t >> 3) & 0x3;
- sps = 27900 / (1 + t);
-
- return sprintf(buf, "%d\n", sps);
-}
-
-static ssize_t ade7759_write_frequency(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t len)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct ade7759_state *st = iio_priv(indio_dev);
- u16 val;
- int ret;
- u16 reg, t;
-
- ret = kstrtou16(buf, 10, &val);
- if (ret)
- return ret;
- if (val == 0)
- return -EINVAL;
-
- mutex_lock(&indio_dev->mlock);
-
- t = 27900 / val;
- if (t > 0)
- t--;
-
- if (t > 1)
- st->us->max_speed_hz = ADE7759_SPI_SLOW;
- else
- st->us->max_speed_hz = ADE7759_SPI_FAST;
-
- ret = ade7759_spi_read_reg_16(dev, ADE7759_MODE, &reg);
- if (ret)
- goto out;
-
- reg &= ~(3 << 13);
- reg |= t << 13;
-
- ret = ade7759_spi_write_reg_16(dev, ADE7759_MODE, reg);
-
-out:
- mutex_unlock(&indio_dev->mlock);
-
- return ret ? ret : len;
-}
-static IIO_DEV_ATTR_TEMP_RAW(ade7759_read_8bit);
-static IIO_CONST_ATTR(in_temp_offset, "70 C");
-static IIO_CONST_ATTR(in_temp_scale, "1 C");
-
-static IIO_DEV_ATTR_SAMP_FREQ(S_IWUSR | S_IRUGO,
- ade7759_read_frequency,
- ade7759_write_frequency);
-
-static IIO_CONST_ATTR_SAMP_FREQ_AVAIL("27900 14000 7000 3500");
-
-static struct attribute *ade7759_attributes[] = {
- &iio_dev_attr_in_temp_raw.dev_attr.attr,
- &iio_const_attr_in_temp_offset.dev_attr.attr,
- &iio_const_attr_in_temp_scale.dev_attr.attr,
- &iio_dev_attr_sampling_frequency.dev_attr.attr,
- &iio_const_attr_sampling_frequency_available.dev_attr.attr,
- &iio_dev_attr_phcal.dev_attr.attr,
- &iio_dev_attr_cfden.dev_attr.attr,
- &iio_dev_attr_aenergy.dev_attr.attr,
- &iio_dev_attr_cfnum.dev_attr.attr,
- &iio_dev_attr_apos.dev_attr.attr,
- &iio_dev_attr_sagcyc.dev_attr.attr,
- &iio_dev_attr_saglvl.dev_attr.attr,
- &iio_dev_attr_linecyc.dev_attr.attr,
- &iio_dev_attr_lenergy.dev_attr.attr,
- &iio_dev_attr_chksum.dev_attr.attr,
- &iio_dev_attr_pga_gain.dev_attr.attr,
- &iio_dev_attr_active_power_gain.dev_attr.attr,
- &iio_dev_attr_choff_1.dev_attr.attr,
- &iio_dev_attr_choff_2.dev_attr.attr,
- NULL,
-};
-
-static const struct attribute_group ade7759_attribute_group = {
- .attrs = ade7759_attributes,
-};
-
-static const struct iio_info ade7759_info = {
- .attrs = &ade7759_attribute_group,
- .driver_module = THIS_MODULE,
-};
-
-static int ade7759_probe(struct spi_device *spi)
-{
- int ret;
- struct ade7759_state *st;
- struct iio_dev *indio_dev;
-
- /* setup the industrialio driver allocated elements */
- indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
- if (!indio_dev)
- return -ENOMEM;
- /* this is only used for removal purposes */
- spi_set_drvdata(spi, indio_dev);
-
- st = iio_priv(indio_dev);
- st->us = spi;
- mutex_init(&st->buf_lock);
- indio_dev->name = spi->dev.driver->name;
- indio_dev->dev.parent = &spi->dev;
- indio_dev->info = &ade7759_info;
- indio_dev->modes = INDIO_DIRECT_MODE;
-
- /* Get the device into a sane initial state */
- ret = ade7759_initial_setup(indio_dev);
- if (ret)
- return ret;
-
- return iio_device_register(indio_dev);
-}
-
-/* fixme, confirm ordering in this function */
-static int ade7759_remove(struct spi_device *spi)
-{
- struct iio_dev *indio_dev = spi_get_drvdata(spi);
-
- iio_device_unregister(indio_dev);
- ade7759_stop_device(&indio_dev->dev);
-
- return 0;
-}
-
-static struct spi_driver ade7759_driver = {
- .driver = {
- .name = "ade7759",
- .owner = THIS_MODULE,
- },
- .probe = ade7759_probe,
- .remove = ade7759_remove,
-};
-module_spi_driver(ade7759_driver);
-
-MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>");
-MODULE_DESCRIPTION("Analog Devices ADE7759 Active Energy Metering IC Driver");
-MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("spi:ad7759");
diff --git a/drivers/staging/iio/meter/ade7759.h b/drivers/staging/iio/meter/ade7759.h
deleted file mode 100644
index f9ff1f8e7372..000000000000
--- a/drivers/staging/iio/meter/ade7759.h
+++ /dev/null
@@ -1,53 +0,0 @@
-#ifndef _ADE7759_H
-#define _ADE7759_H
-
-#define ADE7759_WAVEFORM 0x01
-#define ADE7759_AENERGY 0x02
-#define ADE7759_RSTENERGY 0x03
-#define ADE7759_STATUS 0x04
-#define ADE7759_RSTSTATUS 0x05
-#define ADE7759_MODE 0x06
-#define ADE7759_CFDEN 0x07
-#define ADE7759_CH1OS 0x08
-#define ADE7759_CH2OS 0x09
-#define ADE7759_GAIN 0x0A
-#define ADE7759_APGAIN 0x0B
-#define ADE7759_PHCAL 0x0C
-#define ADE7759_APOS 0x0D
-#define ADE7759_ZXTOUT 0x0E
-#define ADE7759_SAGCYC 0x0F
-#define ADE7759_IRQEN 0x10
-#define ADE7759_SAGLVL 0x11
-#define ADE7759_TEMP 0x12
-#define ADE7759_LINECYC 0x13
-#define ADE7759_LENERGY 0x14
-#define ADE7759_CFNUM 0x15
-#define ADE7759_CHKSUM 0x1E
-#define ADE7759_DIEREV 0x1F
-
-#define ADE7759_READ_REG(a) a
-#define ADE7759_WRITE_REG(a) ((a) | 0x80)
-
-#define ADE7759_MAX_TX 6
-#define ADE7759_MAX_RX 6
-#define ADE7759_STARTUP_DELAY 1
-
-#define ADE7759_SPI_SLOW (u32)(300 * 1000)
-#define ADE7759_SPI_BURST (u32)(1000 * 1000)
-#define ADE7759_SPI_FAST (u32)(2000 * 1000)
-
-/**
- * struct ade7759_state - device instance specific data
- * @us: actual spi_device
- * @buf_lock: mutex to protect tx and rx
- * @tx: transmit buffer
- * @rx: receive buffer
- **/
-struct ade7759_state {
- struct spi_device *us;
- struct mutex buf_lock;
- u8 tx[ADE7759_MAX_TX] ____cacheline_aligned;
- u8 rx[ADE7759_MAX_RX];
-};
-
-#endif
diff --git a/drivers/staging/iio/meter/ade7854-i2c.c b/drivers/staging/iio/meter/ade7854-i2c.c
deleted file mode 100644
index 07cfe28b24e2..000000000000
--- a/drivers/staging/iio/meter/ade7854-i2c.c
+++ /dev/null
@@ -1,256 +0,0 @@
-/*
- * ADE7854/58/68/78 Polyphase Multifunction Energy Metering IC Driver (I2C Bus)
- *
- * Copyright 2010 Analog Devices Inc.
- *
- * Licensed under the GPL-2 or later.
- */
-
-#include <linux/device.h>
-#include <linux/kernel.h>
-#include <linux/i2c.h>
-#include <linux/slab.h>
-#include <linux/module.h>
-
-#include <linux/iio/iio.h>
-#include "ade7854.h"
-
-static int ade7854_i2c_write_reg_8(struct device *dev,
- u16 reg_address,
- u8 value)
-{
- int ret;
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct ade7854_state *st = iio_priv(indio_dev);
-
- mutex_lock(&st->buf_lock);
- st->tx[0] = (reg_address >> 8) & 0xFF;
- st->tx[1] = reg_address & 0xFF;
- st->tx[2] = value;
-
- ret = i2c_master_send(st->i2c, st->tx, 3);
- mutex_unlock(&st->buf_lock);
-
- return ret;
-}
-
-static int ade7854_i2c_write_reg_16(struct device *dev,
- u16 reg_address,
- u16 value)
-{
- int ret;
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct ade7854_state *st = iio_priv(indio_dev);
-
- mutex_lock(&st->buf_lock);
- st->tx[0] = (reg_address >> 8) & 0xFF;
- st->tx[1] = reg_address & 0xFF;
- st->tx[2] = (value >> 8) & 0xFF;
- st->tx[3] = value & 0xFF;
-
- ret = i2c_master_send(st->i2c, st->tx, 4);
- mutex_unlock(&st->buf_lock);
-
- return ret;
-}
-
-static int ade7854_i2c_write_reg_24(struct device *dev,
- u16 reg_address,
- u32 value)
-{
- int ret;
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct ade7854_state *st = iio_priv(indio_dev);
-
- mutex_lock(&st->buf_lock);
- st->tx[0] = (reg_address >> 8) & 0xFF;
- st->tx[1] = reg_address & 0xFF;
- st->tx[2] = (value >> 16) & 0xFF;
- st->tx[3] = (value >> 8) & 0xFF;
- st->tx[4] = value & 0xFF;
-
- ret = i2c_master_send(st->i2c, st->tx, 5);
- mutex_unlock(&st->buf_lock);
-
- return ret;
-}
-
-static int ade7854_i2c_write_reg_32(struct device *dev,
- u16 reg_address,
- u32 value)
-{
- int ret;
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct ade7854_state *st = iio_priv(indio_dev);
-
- mutex_lock(&st->buf_lock);
- st->tx[0] = (reg_address >> 8) & 0xFF;
- st->tx[1] = reg_address & 0xFF;
- st->tx[2] = (value >> 24) & 0xFF;
- st->tx[3] = (value >> 16) & 0xFF;
- st->tx[4] = (value >> 8) & 0xFF;
- st->tx[5] = value & 0xFF;
-
- ret = i2c_master_send(st->i2c, st->tx, 6);
- mutex_unlock(&st->buf_lock);
-
- return ret;
-}
-
-static int ade7854_i2c_read_reg_8(struct device *dev,
- u16 reg_address,
- u8 *val)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct ade7854_state *st = iio_priv(indio_dev);
- int ret;
-
- mutex_lock(&st->buf_lock);
- st->tx[0] = (reg_address >> 8) & 0xFF;
- st->tx[1] = reg_address & 0xFF;
-
- ret = i2c_master_send(st->i2c, st->tx, 2);
- if (ret)
- goto out;
-
- ret = i2c_master_recv(st->i2c, st->rx, 1);
- if (ret)
- goto out;
-
- *val = st->rx[0];
-out:
- mutex_unlock(&st->buf_lock);
- return ret;
-}
-
-static int ade7854_i2c_read_reg_16(struct device *dev,
- u16 reg_address,
- u16 *val)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct ade7854_state *st = iio_priv(indio_dev);
- int ret;
-
- mutex_lock(&st->buf_lock);
- st->tx[0] = (reg_address >> 8) & 0xFF;
- st->tx[1] = reg_address & 0xFF;
-
- ret = i2c_master_send(st->i2c, st->tx, 2);
- if (ret)
- goto out;
-
- ret = i2c_master_recv(st->i2c, st->rx, 2);
- if (ret)
- goto out;
-
- *val = (st->rx[0] << 8) | st->rx[1];
-out:
- mutex_unlock(&st->buf_lock);
- return ret;
-}
-
-static int ade7854_i2c_read_reg_24(struct device *dev,
- u16 reg_address,
- u32 *val)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct ade7854_state *st = iio_priv(indio_dev);
- int ret;
-
- mutex_lock(&st->buf_lock);
- st->tx[0] = (reg_address >> 8) & 0xFF;
- st->tx[1] = reg_address & 0xFF;
-
- ret = i2c_master_send(st->i2c, st->tx, 2);
- if (ret)
- goto out;
-
- ret = i2c_master_recv(st->i2c, st->rx, 3);
- if (ret)
- goto out;
-
- *val = (st->rx[0] << 16) | (st->rx[1] << 8) | st->rx[2];
-out:
- mutex_unlock(&st->buf_lock);
- return ret;
-}
-
-static int ade7854_i2c_read_reg_32(struct device *dev,
- u16 reg_address,
- u32 *val)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct ade7854_state *st = iio_priv(indio_dev);
- int ret;
-
- mutex_lock(&st->buf_lock);
- st->tx[0] = (reg_address >> 8) & 0xFF;
- st->tx[1] = reg_address & 0xFF;
-
- ret = i2c_master_send(st->i2c, st->tx, 2);
- if (ret)
- goto out;
-
- ret = i2c_master_recv(st->i2c, st->rx, 3);
- if (ret)
- goto out;
-
- *val = (st->rx[0] << 24) | (st->rx[1] << 16) |
- (st->rx[2] << 8) | st->rx[3];
-out:
- mutex_unlock(&st->buf_lock);
- return ret;
-}
-
-static int ade7854_i2c_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
-{
- struct ade7854_state *st;
- struct iio_dev *indio_dev;
-
- indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*st));
- if (!indio_dev)
- return -ENOMEM;
- st = iio_priv(indio_dev);
- i2c_set_clientdata(client, indio_dev);
- st->read_reg_8 = ade7854_i2c_read_reg_8;
- st->read_reg_16 = ade7854_i2c_read_reg_16;
- st->read_reg_24 = ade7854_i2c_read_reg_24;
- st->read_reg_32 = ade7854_i2c_read_reg_32;
- st->write_reg_8 = ade7854_i2c_write_reg_8;
- st->write_reg_16 = ade7854_i2c_write_reg_16;
- st->write_reg_24 = ade7854_i2c_write_reg_24;
- st->write_reg_32 = ade7854_i2c_write_reg_32;
- st->i2c = client;
- st->irq = client->irq;
-
- return ade7854_probe(indio_dev, &client->dev);
-}
-
-static int ade7854_i2c_remove(struct i2c_client *client)
-{
- return ade7854_remove(i2c_get_clientdata(client));
-}
-
-static const struct i2c_device_id ade7854_id[] = {
- { "ade7854", 0 },
- { "ade7858", 0 },
- { "ade7868", 0 },
- { "ade7878", 0 },
- { }
-};
-MODULE_DEVICE_TABLE(i2c, ade7854_id);
-
-static struct i2c_driver ade7854_i2c_driver = {
- .driver = {
- .name = "ade7854",
- },
- .probe = ade7854_i2c_probe,
- .remove = ade7854_i2c_remove,
- .id_table = ade7854_id,
-};
-module_i2c_driver(ade7854_i2c_driver);
-
-MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>");
-MODULE_DESCRIPTION("Analog Devices ADE7854/58/68/78 Polyphase Multifunction Energy Metering IC I2C Driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/iio/meter/ade7854-spi.c b/drivers/staging/iio/meter/ade7854-spi.c
deleted file mode 100644
index 9b255a5f62c3..000000000000
--- a/drivers/staging/iio/meter/ade7854-spi.c
+++ /dev/null
@@ -1,327 +0,0 @@
-/*
- * ADE7854/58/68/78 Polyphase Multifunction Energy Metering IC Driver (SPI Bus)
- *
- * Copyright 2010 Analog Devices Inc.
- *
- * Licensed under the GPL-2 or later.
- */
-
-#include <linux/device.h>
-#include <linux/kernel.h>
-#include <linux/spi/spi.h>
-#include <linux/slab.h>
-#include <linux/module.h>
-
-#include <linux/iio/iio.h>
-#include "ade7854.h"
-
-static int ade7854_spi_write_reg_8(struct device *dev,
- u16 reg_address,
- u8 value)
-{
- int ret;
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct ade7854_state *st = iio_priv(indio_dev);
- struct spi_transfer xfer = {
- .tx_buf = st->tx,
- .bits_per_word = 8,
- .len = 4,
- };
-
- mutex_lock(&st->buf_lock);
- st->tx[0] = ADE7854_WRITE_REG;
- st->tx[1] = (reg_address >> 8) & 0xFF;
- st->tx[2] = reg_address & 0xFF;
- st->tx[3] = value & 0xFF;
-
- ret = spi_sync_transfer(st->spi, &xfer, 1);
- mutex_unlock(&st->buf_lock);
-
- return ret;
-}
-
-static int ade7854_spi_write_reg_16(struct device *dev,
- u16 reg_address,
- u16 value)
-{
- int ret;
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct ade7854_state *st = iio_priv(indio_dev);
- struct spi_transfer xfer = {
- .tx_buf = st->tx,
- .bits_per_word = 8,
- .len = 5,
- };
-
- mutex_lock(&st->buf_lock);
- st->tx[0] = ADE7854_WRITE_REG;
- st->tx[1] = (reg_address >> 8) & 0xFF;
- st->tx[2] = reg_address & 0xFF;
- st->tx[3] = (value >> 8) & 0xFF;
- st->tx[4] = value & 0xFF;
-
- ret = spi_sync_transfer(st->spi, &xfer, 1);
- mutex_unlock(&st->buf_lock);
-
- return ret;
-}
-
-static int ade7854_spi_write_reg_24(struct device *dev,
- u16 reg_address,
- u32 value)
-{
- int ret;
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct ade7854_state *st = iio_priv(indio_dev);
- struct spi_transfer xfer = {
- .tx_buf = st->tx,
- .bits_per_word = 8,
- .len = 6,
- };
-
- mutex_lock(&st->buf_lock);
- st->tx[0] = ADE7854_WRITE_REG;
- st->tx[1] = (reg_address >> 8) & 0xFF;
- st->tx[2] = reg_address & 0xFF;
- st->tx[3] = (value >> 16) & 0xFF;
- st->tx[4] = (value >> 8) & 0xFF;
- st->tx[5] = value & 0xFF;
-
- ret = spi_sync_transfer(st->spi, &xfer, 1);
- mutex_unlock(&st->buf_lock);
-
- return ret;
-}
-
-static int ade7854_spi_write_reg_32(struct device *dev,
- u16 reg_address,
- u32 value)
-{
- int ret;
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct ade7854_state *st = iio_priv(indio_dev);
- struct spi_transfer xfer = {
- .tx_buf = st->tx,
- .bits_per_word = 8,
- .len = 7,
- };
-
- mutex_lock(&st->buf_lock);
- st->tx[0] = ADE7854_WRITE_REG;
- st->tx[1] = (reg_address >> 8) & 0xFF;
- st->tx[2] = reg_address & 0xFF;
- st->tx[3] = (value >> 24) & 0xFF;
- st->tx[4] = (value >> 16) & 0xFF;
- st->tx[5] = (value >> 8) & 0xFF;
- st->tx[6] = value & 0xFF;
-
- ret = spi_sync_transfer(st->spi, &xfer, 1);
- mutex_unlock(&st->buf_lock);
-
- return ret;
-}
-
-static int ade7854_spi_read_reg_8(struct device *dev,
- u16 reg_address,
- u8 *val)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct ade7854_state *st = iio_priv(indio_dev);
- int ret;
- struct spi_transfer xfers[] = {
- {
- .tx_buf = st->tx,
- .bits_per_word = 8,
- .len = 3,
- }, {
- .rx_buf = st->rx,
- .bits_per_word = 8,
- .len = 1,
- }
- };
-
- mutex_lock(&st->buf_lock);
-
- st->tx[0] = ADE7854_READ_REG;
- st->tx[1] = (reg_address >> 8) & 0xFF;
- st->tx[2] = reg_address & 0xFF;
-
- ret = spi_sync_transfer(st->spi, xfers, ARRAY_SIZE(xfers));
- if (ret) {
- dev_err(&st->spi->dev, "problem when reading 8 bit register 0x%02X",
- reg_address);
- goto error_ret;
- }
- *val = st->rx[0];
-
-error_ret:
- mutex_unlock(&st->buf_lock);
- return ret;
-}
-
-static int ade7854_spi_read_reg_16(struct device *dev,
- u16 reg_address,
- u16 *val)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct ade7854_state *st = iio_priv(indio_dev);
- int ret;
- struct spi_transfer xfers[] = {
- {
- .tx_buf = st->tx,
- .bits_per_word = 8,
- .len = 3,
- }, {
- .rx_buf = st->rx,
- .bits_per_word = 8,
- .len = 2,
- }
- };
-
- mutex_lock(&st->buf_lock);
- st->tx[0] = ADE7854_READ_REG;
- st->tx[1] = (reg_address >> 8) & 0xFF;
- st->tx[2] = reg_address & 0xFF;
-
- ret = spi_sync_transfer(st->spi, xfers, ARRAY_SIZE(xfers));
- if (ret) {
- dev_err(&st->spi->dev, "problem when reading 16 bit register 0x%02X",
- reg_address);
- goto error_ret;
- }
- *val = be16_to_cpup((const __be16 *)st->rx);
-
-error_ret:
- mutex_unlock(&st->buf_lock);
- return ret;
-}
-
-static int ade7854_spi_read_reg_24(struct device *dev,
- u16 reg_address,
- u32 *val)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct ade7854_state *st = iio_priv(indio_dev);
- int ret;
- struct spi_transfer xfers[] = {
- {
- .tx_buf = st->tx,
- .bits_per_word = 8,
- .len = 3,
- }, {
- .rx_buf = st->rx,
- .bits_per_word = 8,
- .len = 3,
- }
- };
-
- mutex_lock(&st->buf_lock);
-
- st->tx[0] = ADE7854_READ_REG;
- st->tx[1] = (reg_address >> 8) & 0xFF;
- st->tx[2] = reg_address & 0xFF;
-
- ret = spi_sync_transfer(st->spi, xfers, ARRAY_SIZE(xfers));
- if (ret) {
- dev_err(&st->spi->dev, "problem when reading 24 bit register 0x%02X",
- reg_address);
- goto error_ret;
- }
- *val = (st->rx[0] << 16) | (st->rx[1] << 8) | st->rx[2];
-
-error_ret:
- mutex_unlock(&st->buf_lock);
- return ret;
-}
-
-static int ade7854_spi_read_reg_32(struct device *dev,
- u16 reg_address,
- u32 *val)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct ade7854_state *st = iio_priv(indio_dev);
- int ret;
- struct spi_transfer xfers[] = {
- {
- .tx_buf = st->tx,
- .bits_per_word = 8,
- .len = 3,
- }, {
- .rx_buf = st->rx,
- .bits_per_word = 8,
- .len = 4,
- }
- };
-
- mutex_lock(&st->buf_lock);
-
- st->tx[0] = ADE7854_READ_REG;
- st->tx[1] = (reg_address >> 8) & 0xFF;
- st->tx[2] = reg_address & 0xFF;
-
- ret = spi_sync_transfer(st->spi, xfers, ARRAY_SIZE(xfers));
- if (ret) {
- dev_err(&st->spi->dev, "problem when reading 32 bit register 0x%02X",
- reg_address);
- goto error_ret;
- }
- *val = be32_to_cpup((const __be32 *)st->rx);
-
-error_ret:
- mutex_unlock(&st->buf_lock);
- return ret;
-}
-
-static int ade7854_spi_probe(struct spi_device *spi)
-{
- struct ade7854_state *st;
- struct iio_dev *indio_dev;
-
- indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
- if (indio_dev == NULL)
- return -ENOMEM;
- st = iio_priv(indio_dev);
- spi_set_drvdata(spi, indio_dev);
- st->read_reg_8 = ade7854_spi_read_reg_8;
- st->read_reg_16 = ade7854_spi_read_reg_16;
- st->read_reg_24 = ade7854_spi_read_reg_24;
- st->read_reg_32 = ade7854_spi_read_reg_32;
- st->write_reg_8 = ade7854_spi_write_reg_8;
- st->write_reg_16 = ade7854_spi_write_reg_16;
- st->write_reg_24 = ade7854_spi_write_reg_24;
- st->write_reg_32 = ade7854_spi_write_reg_32;
- st->irq = spi->irq;
- st->spi = spi;
-
- return ade7854_probe(indio_dev, &spi->dev);
-}
-
-static int ade7854_spi_remove(struct spi_device *spi)
-{
- ade7854_remove(spi_get_drvdata(spi));
-
- return 0;
-}
-static const struct spi_device_id ade7854_id[] = {
- { "ade7854", 0 },
- { "ade7858", 0 },
- { "ade7868", 0 },
- { "ade7878", 0 },
- { }
-};
-MODULE_DEVICE_TABLE(spi, ade7854_id);
-
-static struct spi_driver ade7854_driver = {
- .driver = {
- .name = "ade7854",
- .owner = THIS_MODULE,
- },
- .probe = ade7854_spi_probe,
- .remove = ade7854_spi_remove,
- .id_table = ade7854_id,
-};
-module_spi_driver(ade7854_driver);
-
-MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>");
-MODULE_DESCRIPTION("Analog Devices ADE7854/58/68/78 SPI Driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/iio/meter/ade7854.c b/drivers/staging/iio/meter/ade7854.c
deleted file mode 100644
index a83883596dbc..000000000000
--- a/drivers/staging/iio/meter/ade7854.c
+++ /dev/null
@@ -1,578 +0,0 @@
-/*
- * ADE7854/58/68/78 Polyphase Multifunction Energy Metering IC Driver
- *
- * Copyright 2010 Analog Devices Inc.
- *
- * Licensed under the GPL-2 or later.
- */
-
-#include <linux/interrupt.h>
-#include <linux/irq.h>
-#include <linux/delay.h>
-#include <linux/mutex.h>
-#include <linux/device.h>
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/sysfs.h>
-#include <linux/list.h>
-#include <linux/module.h>
-
-#include <linux/iio/iio.h>
-#include <linux/iio/sysfs.h>
-#include "meter.h"
-#include "ade7854.h"
-
-static ssize_t ade7854_read_8bit(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- int ret;
- u8 val = 0;
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct ade7854_state *st = iio_priv(indio_dev);
- struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
-
- ret = st->read_reg_8(dev, this_attr->address, &val);
- if (ret)
- return ret;
-
- return sprintf(buf, "%u\n", val);
-}
-
-static ssize_t ade7854_read_16bit(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- int ret;
- u16 val = 0;
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct ade7854_state *st = iio_priv(indio_dev);
- struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
-
- ret = st->read_reg_16(dev, this_attr->address, &val);
- if (ret)
- return ret;
-
- return sprintf(buf, "%u\n", val);
-}
-
-static ssize_t ade7854_read_24bit(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- int ret;
- u32 val;
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct ade7854_state *st = iio_priv(indio_dev);
- struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
-
- ret = st->read_reg_24(dev, this_attr->address, &val);
- if (ret)
- return ret;
-
- return sprintf(buf, "%u\n", val);
-}
-
-static ssize_t ade7854_read_32bit(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- int ret;
- u32 val = 0;
- struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct ade7854_state *st = iio_priv(indio_dev);
-
- ret = st->read_reg_32(dev, this_attr->address, &val);
- if (ret)
- return ret;
-
- return sprintf(buf, "%u\n", val);
-}
-
-static ssize_t ade7854_write_8bit(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t len)
-{
- struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct ade7854_state *st = iio_priv(indio_dev);
-
- int ret;
- u8 val;
-
- ret = kstrtou8(buf, 10, &val);
- if (ret)
- goto error_ret;
- ret = st->write_reg_8(dev, this_attr->address, val);
-
-error_ret:
- return ret ? ret : len;
-}
-
-static ssize_t ade7854_write_16bit(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t len)
-{
- struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct ade7854_state *st = iio_priv(indio_dev);
-
- int ret;
- u16 val;
-
- ret = kstrtou16(buf, 10, &val);
- if (ret)
- goto error_ret;
- ret = st->write_reg_16(dev, this_attr->address, val);
-
-error_ret:
- return ret ? ret : len;
-}
-
-static ssize_t ade7854_write_24bit(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t len)
-{
- struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct ade7854_state *st = iio_priv(indio_dev);
-
- int ret;
- u32 val;
-
- ret = kstrtou32(buf, 10, &val);
- if (ret)
- goto error_ret;
- ret = st->write_reg_24(dev, this_attr->address, val);
-
-error_ret:
- return ret ? ret : len;
-}
-
-static ssize_t ade7854_write_32bit(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t len)
-{
- struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct ade7854_state *st = iio_priv(indio_dev);
-
- int ret;
- u32 val;
-
- ret = kstrtou32(buf, 10, &val);
- if (ret)
- goto error_ret;
- ret = st->write_reg_32(dev, this_attr->address, val);
-
-error_ret:
- return ret ? ret : len;
-}
-
-static int ade7854_reset(struct device *dev)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct ade7854_state *st = iio_priv(indio_dev);
- u16 val;
-
- st->read_reg_16(dev, ADE7854_CONFIG, &val);
- val |= BIT(7); /* Software Chip Reset */
-
- return st->write_reg_16(dev, ADE7854_CONFIG, val);
-}
-
-static IIO_DEV_ATTR_AIGAIN(S_IWUSR | S_IRUGO,
- ade7854_read_24bit,
- ade7854_write_24bit,
- ADE7854_AIGAIN);
-static IIO_DEV_ATTR_BIGAIN(S_IWUSR | S_IRUGO,
- ade7854_read_24bit,
- ade7854_write_24bit,
- ADE7854_BIGAIN);
-static IIO_DEV_ATTR_CIGAIN(S_IWUSR | S_IRUGO,
- ade7854_read_24bit,
- ade7854_write_24bit,
- ADE7854_CIGAIN);
-static IIO_DEV_ATTR_NIGAIN(S_IWUSR | S_IRUGO,
- ade7854_read_24bit,
- ade7854_write_24bit,
- ADE7854_NIGAIN);
-static IIO_DEV_ATTR_AVGAIN(S_IWUSR | S_IRUGO,
- ade7854_read_24bit,
- ade7854_write_24bit,
- ADE7854_AVGAIN);
-static IIO_DEV_ATTR_BVGAIN(S_IWUSR | S_IRUGO,
- ade7854_read_24bit,
- ade7854_write_24bit,
- ADE7854_BVGAIN);
-static IIO_DEV_ATTR_CVGAIN(S_IWUSR | S_IRUGO,
- ade7854_read_24bit,
- ade7854_write_24bit,
- ADE7854_CVGAIN);
-static IIO_DEV_ATTR_APPARENT_POWER_A_GAIN(S_IWUSR | S_IRUGO,
- ade7854_read_24bit,
- ade7854_write_24bit,
- ADE7854_AVAGAIN);
-static IIO_DEV_ATTR_APPARENT_POWER_B_GAIN(S_IWUSR | S_IRUGO,
- ade7854_read_24bit,
- ade7854_write_24bit,
- ADE7854_BVAGAIN);
-static IIO_DEV_ATTR_APPARENT_POWER_C_GAIN(S_IWUSR | S_IRUGO,
- ade7854_read_24bit,
- ade7854_write_24bit,
- ADE7854_CVAGAIN);
-static IIO_DEV_ATTR_ACTIVE_POWER_A_OFFSET(S_IWUSR | S_IRUGO,
- ade7854_read_24bit,
- ade7854_write_24bit,
- ADE7854_AWATTOS);
-static IIO_DEV_ATTR_ACTIVE_POWER_B_OFFSET(S_IWUSR | S_IRUGO,
- ade7854_read_24bit,
- ade7854_write_24bit,
- ADE7854_BWATTOS);
-static IIO_DEV_ATTR_ACTIVE_POWER_C_OFFSET(S_IWUSR | S_IRUGO,
- ade7854_read_24bit,
- ade7854_write_24bit,
- ADE7854_CWATTOS);
-static IIO_DEV_ATTR_REACTIVE_POWER_A_GAIN(S_IWUSR | S_IRUGO,
- ade7854_read_24bit,
- ade7854_write_24bit,
- ADE7854_AVARGAIN);
-static IIO_DEV_ATTR_REACTIVE_POWER_B_GAIN(S_IWUSR | S_IRUGO,
- ade7854_read_24bit,
- ade7854_write_24bit,
- ADE7854_BVARGAIN);
-static IIO_DEV_ATTR_REACTIVE_POWER_C_GAIN(S_IWUSR | S_IRUGO,
- ade7854_read_24bit,
- ade7854_write_24bit,
- ADE7854_CVARGAIN);
-static IIO_DEV_ATTR_REACTIVE_POWER_A_OFFSET(S_IWUSR | S_IRUGO,
- ade7854_read_24bit,
- ade7854_write_24bit,
- ADE7854_AVAROS);
-static IIO_DEV_ATTR_REACTIVE_POWER_B_OFFSET(S_IWUSR | S_IRUGO,
- ade7854_read_24bit,
- ade7854_write_24bit,
- ADE7854_BVAROS);
-static IIO_DEV_ATTR_REACTIVE_POWER_C_OFFSET(S_IWUSR | S_IRUGO,
- ade7854_read_24bit,
- ade7854_write_24bit,
- ADE7854_CVAROS);
-static IIO_DEV_ATTR_VPEAK(S_IWUSR | S_IRUGO,
- ade7854_read_32bit,
- ade7854_write_32bit,
- ADE7854_VPEAK);
-static IIO_DEV_ATTR_IPEAK(S_IWUSR | S_IRUGO,
- ade7854_read_32bit,
- ade7854_write_32bit,
- ADE7854_VPEAK);
-static IIO_DEV_ATTR_APHCAL(S_IWUSR | S_IRUGO,
- ade7854_read_16bit,
- ade7854_write_16bit,
- ADE7854_APHCAL);
-static IIO_DEV_ATTR_BPHCAL(S_IWUSR | S_IRUGO,
- ade7854_read_16bit,
- ade7854_write_16bit,
- ADE7854_BPHCAL);
-static IIO_DEV_ATTR_CPHCAL(S_IWUSR | S_IRUGO,
- ade7854_read_16bit,
- ade7854_write_16bit,
- ADE7854_CPHCAL);
-static IIO_DEV_ATTR_CF1DEN(S_IWUSR | S_IRUGO,
- ade7854_read_16bit,
- ade7854_write_16bit,
- ADE7854_CF1DEN);
-static IIO_DEV_ATTR_CF2DEN(S_IWUSR | S_IRUGO,
- ade7854_read_16bit,
- ade7854_write_16bit,
- ADE7854_CF2DEN);
-static IIO_DEV_ATTR_CF3DEN(S_IWUSR | S_IRUGO,
- ade7854_read_16bit,
- ade7854_write_16bit,
- ADE7854_CF3DEN);
-static IIO_DEV_ATTR_LINECYC(S_IWUSR | S_IRUGO,
- ade7854_read_16bit,
- ade7854_write_16bit,
- ADE7854_LINECYC);
-static IIO_DEV_ATTR_SAGCYC(S_IWUSR | S_IRUGO,
- ade7854_read_8bit,
- ade7854_write_8bit,
- ADE7854_SAGCYC);
-static IIO_DEV_ATTR_CFCYC(S_IWUSR | S_IRUGO,
- ade7854_read_8bit,
- ade7854_write_8bit,
- ADE7854_CFCYC);
-static IIO_DEV_ATTR_PEAKCYC(S_IWUSR | S_IRUGO,
- ade7854_read_8bit,
- ade7854_write_8bit,
- ADE7854_PEAKCYC);
-static IIO_DEV_ATTR_CHKSUM(ade7854_read_24bit,
- ADE7854_CHECKSUM);
-static IIO_DEV_ATTR_ANGLE0(ade7854_read_24bit,
- ADE7854_ANGLE0);
-static IIO_DEV_ATTR_ANGLE1(ade7854_read_24bit,
- ADE7854_ANGLE1);
-static IIO_DEV_ATTR_ANGLE2(ade7854_read_24bit,
- ADE7854_ANGLE2);
-static IIO_DEV_ATTR_AIRMS(S_IRUGO,
- ade7854_read_24bit,
- NULL,
- ADE7854_AIRMS);
-static IIO_DEV_ATTR_BIRMS(S_IRUGO,
- ade7854_read_24bit,
- NULL,
- ADE7854_BIRMS);
-static IIO_DEV_ATTR_CIRMS(S_IRUGO,
- ade7854_read_24bit,
- NULL,
- ADE7854_CIRMS);
-static IIO_DEV_ATTR_NIRMS(S_IRUGO,
- ade7854_read_24bit,
- NULL,
- ADE7854_NIRMS);
-static IIO_DEV_ATTR_AVRMS(S_IRUGO,
- ade7854_read_24bit,
- NULL,
- ADE7854_AVRMS);
-static IIO_DEV_ATTR_BVRMS(S_IRUGO,
- ade7854_read_24bit,
- NULL,
- ADE7854_BVRMS);
-static IIO_DEV_ATTR_CVRMS(S_IRUGO,
- ade7854_read_24bit,
- NULL,
- ADE7854_CVRMS);
-static IIO_DEV_ATTR_AIRMSOS(S_IRUGO,
- ade7854_read_16bit,
- ade7854_write_16bit,
- ADE7854_AIRMSOS);
-static IIO_DEV_ATTR_BIRMSOS(S_IRUGO,
- ade7854_read_16bit,
- ade7854_write_16bit,
- ADE7854_BIRMSOS);
-static IIO_DEV_ATTR_CIRMSOS(S_IRUGO,
- ade7854_read_16bit,
- ade7854_write_16bit,
- ADE7854_CIRMSOS);
-static IIO_DEV_ATTR_AVRMSOS(S_IRUGO,
- ade7854_read_16bit,
- ade7854_write_16bit,
- ADE7854_AVRMSOS);
-static IIO_DEV_ATTR_BVRMSOS(S_IRUGO,
- ade7854_read_16bit,
- ade7854_write_16bit,
- ADE7854_BVRMSOS);
-static IIO_DEV_ATTR_CVRMSOS(S_IRUGO,
- ade7854_read_16bit,
- ade7854_write_16bit,
- ADE7854_CVRMSOS);
-static IIO_DEV_ATTR_VOLT_A(ade7854_read_24bit,
- ADE7854_VAWV);
-static IIO_DEV_ATTR_VOLT_B(ade7854_read_24bit,
- ADE7854_VBWV);
-static IIO_DEV_ATTR_VOLT_C(ade7854_read_24bit,
- ADE7854_VCWV);
-static IIO_DEV_ATTR_CURRENT_A(ade7854_read_24bit,
- ADE7854_IAWV);
-static IIO_DEV_ATTR_CURRENT_B(ade7854_read_24bit,
- ADE7854_IBWV);
-static IIO_DEV_ATTR_CURRENT_C(ade7854_read_24bit,
- ADE7854_ICWV);
-static IIO_DEV_ATTR_AWATTHR(ade7854_read_32bit,
- ADE7854_AWATTHR);
-static IIO_DEV_ATTR_BWATTHR(ade7854_read_32bit,
- ADE7854_BWATTHR);
-static IIO_DEV_ATTR_CWATTHR(ade7854_read_32bit,
- ADE7854_CWATTHR);
-static IIO_DEV_ATTR_AFWATTHR(ade7854_read_32bit,
- ADE7854_AFWATTHR);
-static IIO_DEV_ATTR_BFWATTHR(ade7854_read_32bit,
- ADE7854_BFWATTHR);
-static IIO_DEV_ATTR_CFWATTHR(ade7854_read_32bit,
- ADE7854_CFWATTHR);
-static IIO_DEV_ATTR_AVARHR(ade7854_read_32bit,
- ADE7854_AVARHR);
-static IIO_DEV_ATTR_BVARHR(ade7854_read_32bit,
- ADE7854_BVARHR);
-static IIO_DEV_ATTR_CVARHR(ade7854_read_32bit,
- ADE7854_CVARHR);
-static IIO_DEV_ATTR_AVAHR(ade7854_read_32bit,
- ADE7854_AVAHR);
-static IIO_DEV_ATTR_BVAHR(ade7854_read_32bit,
- ADE7854_BVAHR);
-static IIO_DEV_ATTR_CVAHR(ade7854_read_32bit,
- ADE7854_CVAHR);
-
-static int ade7854_set_irq(struct device *dev, bool enable)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct ade7854_state *st = iio_priv(indio_dev);
-
- int ret;
- u32 irqen;
-
- ret = st->read_reg_32(dev, ADE7854_MASK0, &irqen);
- if (ret)
- goto error_ret;
-
- if (enable)
- irqen |= BIT(17); /* 1: interrupt enabled when all periodical
- (at 8 kHz rate) DSP computations finish. */
- else
- irqen &= ~BIT(17);
-
- ret = st->write_reg_32(dev, ADE7854_MASK0, irqen);
- if (ret)
- goto error_ret;
-
-error_ret:
- return ret;
-}
-
-static int ade7854_initial_setup(struct iio_dev *indio_dev)
-{
- int ret;
- struct device *dev = &indio_dev->dev;
-
- /* Disable IRQ */
- ret = ade7854_set_irq(dev, false);
- if (ret) {
- dev_err(dev, "disable irq failed");
- goto err_ret;
- }
-
- ade7854_reset(dev);
- msleep(ADE7854_STARTUP_DELAY);
-
-err_ret:
- return ret;
-}
-
-static IIO_CONST_ATTR_SAMP_FREQ_AVAIL("8000");
-
-static IIO_CONST_ATTR(name, "ade7854");
-
-static struct attribute *ade7854_attributes[] = {
- &iio_dev_attr_aigain.dev_attr.attr,
- &iio_dev_attr_bigain.dev_attr.attr,
- &iio_dev_attr_cigain.dev_attr.attr,
- &iio_dev_attr_nigain.dev_attr.attr,
- &iio_dev_attr_avgain.dev_attr.attr,
- &iio_dev_attr_bvgain.dev_attr.attr,
- &iio_dev_attr_cvgain.dev_attr.attr,
- &iio_dev_attr_linecyc.dev_attr.attr,
- &iio_dev_attr_sagcyc.dev_attr.attr,
- &iio_dev_attr_cfcyc.dev_attr.attr,
- &iio_dev_attr_peakcyc.dev_attr.attr,
- &iio_dev_attr_chksum.dev_attr.attr,
- &iio_dev_attr_apparent_power_a_gain.dev_attr.attr,
- &iio_dev_attr_apparent_power_b_gain.dev_attr.attr,
- &iio_dev_attr_apparent_power_c_gain.dev_attr.attr,
- &iio_dev_attr_active_power_a_offset.dev_attr.attr,
- &iio_dev_attr_active_power_b_offset.dev_attr.attr,
- &iio_dev_attr_active_power_c_offset.dev_attr.attr,
- &iio_dev_attr_reactive_power_a_gain.dev_attr.attr,
- &iio_dev_attr_reactive_power_b_gain.dev_attr.attr,
- &iio_dev_attr_reactive_power_c_gain.dev_attr.attr,
- &iio_dev_attr_reactive_power_a_offset.dev_attr.attr,
- &iio_dev_attr_reactive_power_b_offset.dev_attr.attr,
- &iio_dev_attr_reactive_power_c_offset.dev_attr.attr,
- &iio_dev_attr_awatthr.dev_attr.attr,
- &iio_dev_attr_bwatthr.dev_attr.attr,
- &iio_dev_attr_cwatthr.dev_attr.attr,
- &iio_dev_attr_afwatthr.dev_attr.attr,
- &iio_dev_attr_bfwatthr.dev_attr.attr,
- &iio_dev_attr_cfwatthr.dev_attr.attr,
- &iio_dev_attr_avarhr.dev_attr.attr,
- &iio_dev_attr_bvarhr.dev_attr.attr,
- &iio_dev_attr_cvarhr.dev_attr.attr,
- &iio_dev_attr_angle0.dev_attr.attr,
- &iio_dev_attr_angle1.dev_attr.attr,
- &iio_dev_attr_angle2.dev_attr.attr,
- &iio_dev_attr_avahr.dev_attr.attr,
- &iio_dev_attr_bvahr.dev_attr.attr,
- &iio_dev_attr_cvahr.dev_attr.attr,
- &iio_const_attr_sampling_frequency_available.dev_attr.attr,
- &iio_const_attr_name.dev_attr.attr,
- &iio_dev_attr_vpeak.dev_attr.attr,
- &iio_dev_attr_ipeak.dev_attr.attr,
- &iio_dev_attr_aphcal.dev_attr.attr,
- &iio_dev_attr_bphcal.dev_attr.attr,
- &iio_dev_attr_cphcal.dev_attr.attr,
- &iio_dev_attr_cf1den.dev_attr.attr,
- &iio_dev_attr_cf2den.dev_attr.attr,
- &iio_dev_attr_cf3den.dev_attr.attr,
- &iio_dev_attr_airms.dev_attr.attr,
- &iio_dev_attr_birms.dev_attr.attr,
- &iio_dev_attr_cirms.dev_attr.attr,
- &iio_dev_attr_nirms.dev_attr.attr,
- &iio_dev_attr_avrms.dev_attr.attr,
- &iio_dev_attr_bvrms.dev_attr.attr,
- &iio_dev_attr_cvrms.dev_attr.attr,
- &iio_dev_attr_airmsos.dev_attr.attr,
- &iio_dev_attr_birmsos.dev_attr.attr,
- &iio_dev_attr_cirmsos.dev_attr.attr,
- &iio_dev_attr_avrmsos.dev_attr.attr,
- &iio_dev_attr_bvrmsos.dev_attr.attr,
- &iio_dev_attr_cvrmsos.dev_attr.attr,
- &iio_dev_attr_volt_a.dev_attr.attr,
- &iio_dev_attr_volt_b.dev_attr.attr,
- &iio_dev_attr_volt_c.dev_attr.attr,
- &iio_dev_attr_current_a.dev_attr.attr,
- &iio_dev_attr_current_b.dev_attr.attr,
- &iio_dev_attr_current_c.dev_attr.attr,
- NULL,
-};
-
-static const struct attribute_group ade7854_attribute_group = {
- .attrs = ade7854_attributes,
-};
-
-static const struct iio_info ade7854_info = {
- .attrs = &ade7854_attribute_group,
- .driver_module = THIS_MODULE,
-};
-
-int ade7854_probe(struct iio_dev *indio_dev, struct device *dev)
-{
- int ret;
- struct ade7854_state *st = iio_priv(indio_dev);
- /* setup the industrialio driver allocated elements */
- mutex_init(&st->buf_lock);
-
- indio_dev->dev.parent = dev;
- indio_dev->info = &ade7854_info;
- indio_dev->modes = INDIO_DIRECT_MODE;
-
- ret = iio_device_register(indio_dev);
- if (ret)
- return ret;
-
- /* Get the device into a sane initial state */
- ret = ade7854_initial_setup(indio_dev);
- if (ret)
- goto error_unreg_dev;
-
- return 0;
-
-error_unreg_dev:
- iio_device_unregister(indio_dev);
- return ret;
-}
-EXPORT_SYMBOL(ade7854_probe);
-
-int ade7854_remove(struct iio_dev *indio_dev)
-{
- iio_device_unregister(indio_dev);
-
- return 0;
-}
-EXPORT_SYMBOL(ade7854_remove);
-
-MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>");
-MODULE_DESCRIPTION("Analog Devices ADE7854/58/68/78 Polyphase Energy Meter");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/iio/meter/ade7854.h b/drivers/staging/iio/meter/ade7854.h
deleted file mode 100644
index 52f4195cf6f4..000000000000
--- a/drivers/staging/iio/meter/ade7854.h
+++ /dev/null
@@ -1,174 +0,0 @@
-#ifndef _ADE7854_H
-#define _ADE7854_H
-
-#define ADE7854_AIGAIN 0x4380
-#define ADE7854_AVGAIN 0x4381
-#define ADE7854_BIGAIN 0x4382
-#define ADE7854_BVGAIN 0x4383
-#define ADE7854_CIGAIN 0x4384
-#define ADE7854_CVGAIN 0x4385
-#define ADE7854_NIGAIN 0x4386
-#define ADE7854_AIRMSOS 0x4387
-#define ADE7854_AVRMSOS 0x4388
-#define ADE7854_BIRMSOS 0x4389
-#define ADE7854_BVRMSOS 0x438A
-#define ADE7854_CIRMSOS 0x438B
-#define ADE7854_CVRMSOS 0x438C
-#define ADE7854_NIRMSOS 0x438D
-#define ADE7854_AVAGAIN 0x438E
-#define ADE7854_BVAGAIN 0x438F
-#define ADE7854_CVAGAIN 0x4390
-#define ADE7854_AWGAIN 0x4391
-#define ADE7854_AWATTOS 0x4392
-#define ADE7854_BWGAIN 0x4393
-#define ADE7854_BWATTOS 0x4394
-#define ADE7854_CWGAIN 0x4395
-#define ADE7854_CWATTOS 0x4396
-#define ADE7854_AVARGAIN 0x4397
-#define ADE7854_AVAROS 0x4398
-#define ADE7854_BVARGAIN 0x4399
-#define ADE7854_BVAROS 0x439A
-#define ADE7854_CVARGAIN 0x439B
-#define ADE7854_CVAROS 0x439C
-#define ADE7854_AFWGAIN 0x439D
-#define ADE7854_AFWATTOS 0x439E
-#define ADE7854_BFWGAIN 0x439F
-#define ADE7854_BFWATTOS 0x43A0
-#define ADE7854_CFWGAIN 0x43A1
-#define ADE7854_CFWATTOS 0x43A2
-#define ADE7854_AFVARGAIN 0x43A3
-#define ADE7854_AFVAROS 0x43A4
-#define ADE7854_BFVARGAIN 0x43A5
-#define ADE7854_BFVAROS 0x43A6
-#define ADE7854_CFVARGAIN 0x43A7
-#define ADE7854_CFVAROS 0x43A8
-#define ADE7854_VATHR1 0x43A9
-#define ADE7854_VATHR0 0x43AA
-#define ADE7854_WTHR1 0x43AB
-#define ADE7854_WTHR0 0x43AC
-#define ADE7854_VARTHR1 0x43AD
-#define ADE7854_VARTHR0 0x43AE
-#define ADE7854_RSV 0x43AF
-#define ADE7854_VANOLOAD 0x43B0
-#define ADE7854_APNOLOAD 0x43B1
-#define ADE7854_VARNOLOAD 0x43B2
-#define ADE7854_VLEVEL 0x43B3
-#define ADE7854_DICOEFF 0x43B5
-#define ADE7854_HPFDIS 0x43B6
-#define ADE7854_ISUMLVL 0x43B8
-#define ADE7854_ISUM 0x43BF
-#define ADE7854_AIRMS 0x43C0
-#define ADE7854_AVRMS 0x43C1
-#define ADE7854_BIRMS 0x43C2
-#define ADE7854_BVRMS 0x43C3
-#define ADE7854_CIRMS 0x43C4
-#define ADE7854_CVRMS 0x43C5
-#define ADE7854_NIRMS 0x43C6
-#define ADE7854_RUN 0xE228
-#define ADE7854_AWATTHR 0xE400
-#define ADE7854_BWATTHR 0xE401
-#define ADE7854_CWATTHR 0xE402
-#define ADE7854_AFWATTHR 0xE403
-#define ADE7854_BFWATTHR 0xE404
-#define ADE7854_CFWATTHR 0xE405
-#define ADE7854_AVARHR 0xE406
-#define ADE7854_BVARHR 0xE407
-#define ADE7854_CVARHR 0xE408
-#define ADE7854_AFVARHR 0xE409
-#define ADE7854_BFVARHR 0xE40A
-#define ADE7854_CFVARHR 0xE40B
-#define ADE7854_AVAHR 0xE40C
-#define ADE7854_BVAHR 0xE40D
-#define ADE7854_CVAHR 0xE40E
-#define ADE7854_IPEAK 0xE500
-#define ADE7854_VPEAK 0xE501
-#define ADE7854_STATUS0 0xE502
-#define ADE7854_STATUS1 0xE503
-#define ADE7854_OILVL 0xE507
-#define ADE7854_OVLVL 0xE508
-#define ADE7854_SAGLVL 0xE509
-#define ADE7854_MASK0 0xE50A
-#define ADE7854_MASK1 0xE50B
-#define ADE7854_IAWV 0xE50C
-#define ADE7854_IBWV 0xE50D
-#define ADE7854_ICWV 0xE50E
-#define ADE7854_VAWV 0xE510
-#define ADE7854_VBWV 0xE511
-#define ADE7854_VCWV 0xE512
-#define ADE7854_AWATT 0xE513
-#define ADE7854_BWATT 0xE514
-#define ADE7854_CWATT 0xE515
-#define ADE7854_AVA 0xE519
-#define ADE7854_BVA 0xE51A
-#define ADE7854_CVA 0xE51B
-#define ADE7854_CHECKSUM 0xE51F
-#define ADE7854_VNOM 0xE520
-#define ADE7854_PHSTATUS 0xE600
-#define ADE7854_ANGLE0 0xE601
-#define ADE7854_ANGLE1 0xE602
-#define ADE7854_ANGLE2 0xE603
-#define ADE7854_PERIOD 0xE607
-#define ADE7854_PHNOLOAD 0xE608
-#define ADE7854_LINECYC 0xE60C
-#define ADE7854_ZXTOUT 0xE60D
-#define ADE7854_COMPMODE 0xE60E
-#define ADE7854_GAIN 0xE60F
-#define ADE7854_CFMODE 0xE610
-#define ADE7854_CF1DEN 0xE611
-#define ADE7854_CF2DEN 0xE612
-#define ADE7854_CF3DEN 0xE613
-#define ADE7854_APHCAL 0xE614
-#define ADE7854_BPHCAL 0xE615
-#define ADE7854_CPHCAL 0xE616
-#define ADE7854_PHSIGN 0xE617
-#define ADE7854_CONFIG 0xE618
-#define ADE7854_MMODE 0xE700
-#define ADE7854_ACCMODE 0xE701
-#define ADE7854_LCYCMODE 0xE702
-#define ADE7854_PEAKCYC 0xE703
-#define ADE7854_SAGCYC 0xE704
-#define ADE7854_CFCYC 0xE705
-#define ADE7854_HSDC_CFG 0xE706
-#define ADE7854_CONFIG2 0xEC01
-
-#define ADE7854_READ_REG 0x1
-#define ADE7854_WRITE_REG 0x0
-
-#define ADE7854_MAX_TX 7
-#define ADE7854_MAX_RX 7
-#define ADE7854_STARTUP_DELAY 1
-
-#define ADE7854_SPI_SLOW (u32)(300 * 1000)
-#define ADE7854_SPI_BURST (u32)(1000 * 1000)
-#define ADE7854_SPI_FAST (u32)(2000 * 1000)
-
-/**
- * struct ade7854_state - device instance specific data
- * @spi: actual spi_device
- * @indio_dev: industrial I/O device structure
- * @buf_lock: mutex to protect tx and rx
- * @tx: transmit buffer
- * @rx: receive buffer
- **/
-struct ade7854_state {
- struct spi_device *spi;
- struct i2c_client *i2c;
- int (*read_reg_8)(struct device *, u16, u8 *);
- int (*read_reg_16)(struct device *, u16, u16 *);
- int (*read_reg_24)(struct device *, u16, u32 *);
- int (*read_reg_32)(struct device *, u16, u32 *);
- int (*write_reg_8)(struct device *, u16, u8);
- int (*write_reg_16)(struct device *, u16, u16);
- int (*write_reg_24)(struct device *, u16, u32);
- int (*write_reg_32)(struct device *, u16, u32);
- int irq;
- struct mutex buf_lock;
- u8 tx[ADE7854_MAX_TX] ____cacheline_aligned;
- u8 rx[ADE7854_MAX_RX];
-
-};
-
-int ade7854_probe(struct iio_dev *indio_dev, struct device *dev);
-int ade7854_remove(struct iio_dev *indio_dev);
-
-#endif
diff --git a/drivers/staging/iio/meter/meter.h b/drivers/staging/iio/meter/meter.h
deleted file mode 100644
index dfba510f29be..000000000000
--- a/drivers/staging/iio/meter/meter.h
+++ /dev/null
@@ -1,400 +0,0 @@
-#ifndef _METER_H
-#define _METER_H
-
-#include <linux/iio/sysfs.h>
-
-/* metering ic types of attribute */
-
-#define IIO_DEV_ATTR_CURRENT_A_OFFSET(_mode, _show, _store, _addr) \
- IIO_DEVICE_ATTR(current_a_offset, _mode, _show, _store, _addr)
-
-#define IIO_DEV_ATTR_CURRENT_B_OFFSET(_mode, _show, _store, _addr) \
- IIO_DEVICE_ATTR(current_b_offset, _mode, _show, _store, _addr)
-
-#define IIO_DEV_ATTR_CURRENT_C_OFFSET(_mode, _show, _store, _addr) \
- IIO_DEVICE_ATTR(current_c_offset, _mode, _show, _store, _addr)
-
-#define IIO_DEV_ATTR_VOLT_A_OFFSET(_mode, _show, _store, _addr) \
- IIO_DEVICE_ATTR(volt_a_offset, _mode, _show, _store, _addr)
-
-#define IIO_DEV_ATTR_VOLT_B_OFFSET(_mode, _show, _store, _addr) \
- IIO_DEVICE_ATTR(volt_b_offset, _mode, _show, _store, _addr)
-
-#define IIO_DEV_ATTR_VOLT_C_OFFSET(_mode, _show, _store, _addr) \
- IIO_DEVICE_ATTR(volt_c_offset, _mode, _show, _store, _addr)
-
-#define IIO_DEV_ATTR_REACTIVE_POWER_A_OFFSET(_mode, _show, _store, _addr) \
- IIO_DEVICE_ATTR(reactive_power_a_offset, _mode, _show, _store, _addr)
-
-#define IIO_DEV_ATTR_REACTIVE_POWER_B_OFFSET(_mode, _show, _store, _addr) \
- IIO_DEVICE_ATTR(reactive_power_b_offset, _mode, _show, _store, _addr)
-
-#define IIO_DEV_ATTR_REACTIVE_POWER_C_OFFSET(_mode, _show, _store, _addr) \
- IIO_DEVICE_ATTR(reactive_power_c_offset, _mode, _show, _store, _addr)
-
-#define IIO_DEV_ATTR_ACTIVE_POWER_A_OFFSET(_mode, _show, _store, _addr) \
- IIO_DEVICE_ATTR(active_power_a_offset, _mode, _show, _store, _addr)
-
-#define IIO_DEV_ATTR_ACTIVE_POWER_B_OFFSET(_mode, _show, _store, _addr) \
- IIO_DEVICE_ATTR(active_power_b_offset, _mode, _show, _store, _addr)
-
-#define IIO_DEV_ATTR_ACTIVE_POWER_C_OFFSET(_mode, _show, _store, _addr) \
- IIO_DEVICE_ATTR(active_power_c_offset, _mode, _show, _store, _addr)
-
-#define IIO_DEV_ATTR_CURRENT_A_GAIN(_mode, _show, _store, _addr) \
- IIO_DEVICE_ATTR(current_a_gain, _mode, _show, _store, _addr)
-
-#define IIO_DEV_ATTR_CURRENT_B_GAIN(_mode, _show, _store, _addr) \
- IIO_DEVICE_ATTR(current_b_gain, _mode, _show, _store, _addr)
-
-#define IIO_DEV_ATTR_CURRENT_C_GAIN(_mode, _show, _store, _addr) \
- IIO_DEVICE_ATTR(current_c_gain, _mode, _show, _store, _addr)
-
-#define IIO_DEV_ATTR_APPARENT_POWER_A_GAIN(_mode, _show, _store, _addr) \
- IIO_DEVICE_ATTR(apparent_power_a_gain, _mode, _show, _store, _addr)
-
-#define IIO_DEV_ATTR_APPARENT_POWER_B_GAIN(_mode, _show, _store, _addr) \
- IIO_DEVICE_ATTR(apparent_power_b_gain, _mode, _show, _store, _addr)
-
-#define IIO_DEV_ATTR_APPARENT_POWER_C_GAIN(_mode, _show, _store, _addr) \
- IIO_DEVICE_ATTR(apparent_power_c_gain, _mode, _show, _store, _addr)
-
-#define IIO_DEV_ATTR_ACTIVE_POWER_GAIN(_mode, _show, _store, _addr) \
- IIO_DEVICE_ATTR(active_power_gain, _mode, _show, _store, _addr)
-
-#define IIO_DEV_ATTR_ACTIVE_POWER_A_GAIN(_mode, _show, _store, _addr) \
- IIO_DEVICE_ATTR(active_power_a_gain, _mode, _show, _store, _addr)
-
-#define IIO_DEV_ATTR_ACTIVE_POWER_B_GAIN(_mode, _show, _store, _addr) \
- IIO_DEVICE_ATTR(active_power_b_gain, _mode, _show, _store, _addr)
-
-#define IIO_DEV_ATTR_ACTIVE_POWER_C_GAIN(_mode, _show, _store, _addr) \
- IIO_DEVICE_ATTR(active_power_c_gain, _mode, _show, _store, _addr)
-
-#define IIO_DEV_ATTR_REACTIVE_POWER_A_GAIN(_mode, _show, _store, _addr) \
- IIO_DEVICE_ATTR(reactive_power_a_gain, _mode, _show, _store, _addr)
-
-#define IIO_DEV_ATTR_REACTIVE_POWER_B_GAIN(_mode, _show, _store, _addr) \
- IIO_DEVICE_ATTR(reactive_power_b_gain, _mode, _show, _store, _addr)
-
-#define IIO_DEV_ATTR_REACTIVE_POWER_C_GAIN(_mode, _show, _store, _addr) \
- IIO_DEVICE_ATTR(reactive_power_c_gain, _mode, _show, _store, _addr)
-
-#define IIO_DEV_ATTR_CURRENT_A(_show, _addr) \
- IIO_DEVICE_ATTR(current_a, S_IRUGO, _show, NULL, _addr)
-
-#define IIO_DEV_ATTR_CURRENT_B(_show, _addr) \
- IIO_DEVICE_ATTR(current_b, S_IRUGO, _show, NULL, _addr)
-
-#define IIO_DEV_ATTR_CURRENT_C(_show, _addr) \
- IIO_DEVICE_ATTR(current_c, S_IRUGO, _show, NULL, _addr)
-
-#define IIO_DEV_ATTR_VOLT_A(_show, _addr) \
- IIO_DEVICE_ATTR(volt_a, S_IRUGO, _show, NULL, _addr)
-
-#define IIO_DEV_ATTR_VOLT_B(_show, _addr) \
- IIO_DEVICE_ATTR(volt_b, S_IRUGO, _show, NULL, _addr)
-
-#define IIO_DEV_ATTR_VOLT_C(_show, _addr) \
- IIO_DEVICE_ATTR(volt_c, S_IRUGO, _show, NULL, _addr)
-
-#define IIO_DEV_ATTR_AENERGY(_show, _addr) \
- IIO_DEVICE_ATTR(aenergy, S_IRUGO, _show, NULL, _addr)
-
-#define IIO_DEV_ATTR_LENERGY(_show, _addr) \
- IIO_DEVICE_ATTR(lenergy, S_IRUGO, _show, NULL, _addr)
-
-#define IIO_DEV_ATTR_RAENERGY(_show, _addr) \
- IIO_DEVICE_ATTR(raenergy, S_IRUGO, _show, NULL, _addr)
-
-#define IIO_DEV_ATTR_LAENERGY(_show, _addr) \
- IIO_DEVICE_ATTR(laenergy, S_IRUGO, _show, NULL, _addr)
-
-#define IIO_DEV_ATTR_VAENERGY(_show, _addr) \
- IIO_DEVICE_ATTR(vaenergy, S_IRUGO, _show, NULL, _addr)
-
-#define IIO_DEV_ATTR_LVAENERGY(_show, _addr) \
- IIO_DEVICE_ATTR(lvaenergy, S_IRUGO, _show, NULL, _addr)
-
-#define IIO_DEV_ATTR_RVAENERGY(_show, _addr) \
- IIO_DEVICE_ATTR(rvaenergy, S_IRUGO, _show, NULL, _addr)
-
-#define IIO_DEV_ATTR_LVARENERGY(_show, _addr) \
- IIO_DEVICE_ATTR(lvarenergy, S_IRUGO, _show, NULL, _addr)
-
-#define IIO_DEV_ATTR_CHKSUM(_show, _addr) \
- IIO_DEVICE_ATTR(chksum, S_IRUGO, _show, NULL, _addr)
-
-#define IIO_DEV_ATTR_ANGLE0(_show, _addr) \
- IIO_DEVICE_ATTR(angle0, S_IRUGO, _show, NULL, _addr)
-
-#define IIO_DEV_ATTR_ANGLE1(_show, _addr) \
- IIO_DEVICE_ATTR(angle1, S_IRUGO, _show, NULL, _addr)
-
-#define IIO_DEV_ATTR_ANGLE2(_show, _addr) \
- IIO_DEVICE_ATTR(angle2, S_IRUGO, _show, NULL, _addr)
-
-#define IIO_DEV_ATTR_AWATTHR(_show, _addr) \
- IIO_DEVICE_ATTR(awatthr, S_IRUGO, _show, NULL, _addr)
-
-#define IIO_DEV_ATTR_BWATTHR(_show, _addr) \
- IIO_DEVICE_ATTR(bwatthr, S_IRUGO, _show, NULL, _addr)
-
-#define IIO_DEV_ATTR_CWATTHR(_show, _addr) \
- IIO_DEVICE_ATTR(cwatthr, S_IRUGO, _show, NULL, _addr)
-
-#define IIO_DEV_ATTR_AFWATTHR(_show, _addr) \
- IIO_DEVICE_ATTR(afwatthr, S_IRUGO, _show, NULL, _addr)
-
-#define IIO_DEV_ATTR_BFWATTHR(_show, _addr) \
- IIO_DEVICE_ATTR(bfwatthr, S_IRUGO, _show, NULL, _addr)
-
-#define IIO_DEV_ATTR_CFWATTHR(_show, _addr) \
- IIO_DEVICE_ATTR(cfwatthr, S_IRUGO, _show, NULL, _addr)
-
-#define IIO_DEV_ATTR_AVARHR(_show, _addr) \
- IIO_DEVICE_ATTR(avarhr, S_IRUGO, _show, NULL, _addr)
-
-#define IIO_DEV_ATTR_BVARHR(_show, _addr) \
- IIO_DEVICE_ATTR(bvarhr, S_IRUGO, _show, NULL, _addr)
-
-#define IIO_DEV_ATTR_CVARHR(_show, _addr) \
- IIO_DEVICE_ATTR(cvarhr, S_IRUGO, _show, NULL, _addr)
-
-#define IIO_DEV_ATTR_AVAHR(_show, _addr) \
- IIO_DEVICE_ATTR(avahr, S_IRUGO, _show, NULL, _addr)
-
-#define IIO_DEV_ATTR_BVAHR(_show, _addr) \
- IIO_DEVICE_ATTR(bvahr, S_IRUGO, _show, NULL, _addr)
-
-#define IIO_DEV_ATTR_CVAHR(_show, _addr) \
- IIO_DEVICE_ATTR(cvahr, S_IRUGO, _show, NULL, _addr)
-
-#define IIO_DEV_ATTR_IOS(_mode, _show, _store, _addr) \
- IIO_DEVICE_ATTR(ios, _mode, _show, _store, _addr)
-
-#define IIO_DEV_ATTR_VOS(_mode, _show, _store, _addr) \
- IIO_DEVICE_ATTR(vos, _mode, _show, _store, _addr)
-
-#define IIO_DEV_ATTR_PHCAL(_mode, _show, _store, _addr) \
- IIO_DEVICE_ATTR(phcal, _mode, _show, _store, _addr)
-
-#define IIO_DEV_ATTR_APHCAL(_mode, _show, _store, _addr) \
- IIO_DEVICE_ATTR(aphcal, _mode, _show, _store, _addr)
-
-#define IIO_DEV_ATTR_BPHCAL(_mode, _show, _store, _addr) \
- IIO_DEVICE_ATTR(bphcal, _mode, _show, _store, _addr)
-
-#define IIO_DEV_ATTR_CPHCAL(_mode, _show, _store, _addr) \
- IIO_DEVICE_ATTR(cphcal, _mode, _show, _store, _addr)
-
-#define IIO_DEV_ATTR_APOS(_mode, _show, _store, _addr) \
- IIO_DEVICE_ATTR(apos, _mode, _show, _store, _addr)
-
-#define IIO_DEV_ATTR_AAPOS(_mode, _show, _store, _addr) \
- IIO_DEVICE_ATTR(aapos, _mode, _show, _store, _addr)
-
-#define IIO_DEV_ATTR_BAPOS(_mode, _show, _store, _addr) \
- IIO_DEVICE_ATTR(bapos, _mode, _show, _store, _addr)
-
-#define IIO_DEV_ATTR_CAPOS(_mode, _show, _store, _addr) \
- IIO_DEVICE_ATTR(capos, _mode, _show, _store, _addr)
-
-#define IIO_DEV_ATTR_AVRMSGAIN(_mode, _show, _store, _addr) \
- IIO_DEVICE_ATTR(avrmsgain, _mode, _show, _store, _addr)
-
-#define IIO_DEV_ATTR_BVRMSGAIN(_mode, _show, _store, _addr) \
- IIO_DEVICE_ATTR(bvrmsgain, _mode, _show, _store, _addr)
-
-#define IIO_DEV_ATTR_CVRMSGAIN(_mode, _show, _store, _addr) \
- IIO_DEVICE_ATTR(cvrmsgain, _mode, _show, _store, _addr)
-
-#define IIO_DEV_ATTR_AIGAIN(_mode, _show, _store, _addr) \
- IIO_DEVICE_ATTR(aigain, _mode, _show, _store, _addr)
-
-#define IIO_DEV_ATTR_BIGAIN(_mode, _show, _store, _addr) \
- IIO_DEVICE_ATTR(bigain, _mode, _show, _store, _addr)
-
-#define IIO_DEV_ATTR_CIGAIN(_mode, _show, _store, _addr) \
- IIO_DEVICE_ATTR(cigain, _mode, _show, _store, _addr)
-
-#define IIO_DEV_ATTR_NIGAIN(_mode, _show, _store, _addr) \
- IIO_DEVICE_ATTR(nigain, _mode, _show, _store, _addr)
-
-#define IIO_DEV_ATTR_AVGAIN(_mode, _show, _store, _addr) \
- IIO_DEVICE_ATTR(avgain, _mode, _show, _store, _addr)
-
-#define IIO_DEV_ATTR_BVGAIN(_mode, _show, _store, _addr) \
- IIO_DEVICE_ATTR(bvgain, _mode, _show, _store, _addr)
-
-#define IIO_DEV_ATTR_CVGAIN(_mode, _show, _store, _addr) \
- IIO_DEVICE_ATTR(cvgain, _mode, _show, _store, _addr)
-
-#define IIO_DEV_ATTR_WGAIN(_mode, _show, _store, _addr) \
- IIO_DEVICE_ATTR(wgain, _mode, _show, _store, _addr)
-
-#define IIO_DEV_ATTR_WDIV(_mode, _show, _store, _addr) \
- IIO_DEVICE_ATTR(wdiv, _mode, _show, _store, _addr)
-
-#define IIO_DEV_ATTR_CFNUM(_mode, _show, _store, _addr) \
- IIO_DEVICE_ATTR(cfnum, _mode, _show, _store, _addr)
-
-#define IIO_DEV_ATTR_CFDEN(_mode, _show, _store, _addr) \
- IIO_DEVICE_ATTR(cfden, _mode, _show, _store, _addr)
-
-#define IIO_DEV_ATTR_CF1DEN(_mode, _show, _store, _addr) \
- IIO_DEVICE_ATTR(cf1den, _mode, _show, _store, _addr)
-
-#define IIO_DEV_ATTR_CF2DEN(_mode, _show, _store, _addr) \
- IIO_DEVICE_ATTR(cf2den, _mode, _show, _store, _addr)
-
-#define IIO_DEV_ATTR_CF3DEN(_mode, _show, _store, _addr) \
- IIO_DEVICE_ATTR(cf3den, _mode, _show, _store, _addr)
-
-#define IIO_DEV_ATTR_IRMS(_mode, _show, _store, _addr) \
- IIO_DEVICE_ATTR(irms, _mode, _show, _store, _addr)
-
-#define IIO_DEV_ATTR_VRMS(_mode, _show, _store, _addr) \
- IIO_DEVICE_ATTR(vrms, _mode, _show, _store, _addr)
-
-#define IIO_DEV_ATTR_AIRMS(_mode, _show, _store, _addr) \
- IIO_DEVICE_ATTR(airms, _mode, _show, _store, _addr)
-
-#define IIO_DEV_ATTR_BIRMS(_mode, _show, _store, _addr) \
- IIO_DEVICE_ATTR(birms, _mode, _show, _store, _addr)
-
-#define IIO_DEV_ATTR_CIRMS(_mode, _show, _store, _addr) \
- IIO_DEVICE_ATTR(cirms, _mode, _show, _store, _addr)
-
-#define IIO_DEV_ATTR_NIRMS(_mode, _show, _store, _addr) \
- IIO_DEVICE_ATTR(nirms, _mode, _show, _store, _addr)
-
-#define IIO_DEV_ATTR_AVRMS(_mode, _show, _store, _addr) \
- IIO_DEVICE_ATTR(avrms, _mode, _show, _store, _addr)
-
-#define IIO_DEV_ATTR_BVRMS(_mode, _show, _store, _addr) \
- IIO_DEVICE_ATTR(bvrms, _mode, _show, _store, _addr)
-
-#define IIO_DEV_ATTR_CVRMS(_mode, _show, _store, _addr) \
- IIO_DEVICE_ATTR(cvrms, _mode, _show, _store, _addr)
-
-#define IIO_DEV_ATTR_IRMSOS(_mode, _show, _store, _addr) \
- IIO_DEVICE_ATTR(irmsos, _mode, _show, _store, _addr)
-
-#define IIO_DEV_ATTR_VRMSOS(_mode, _show, _store, _addr) \
- IIO_DEVICE_ATTR(vrmsos, _mode, _show, _store, _addr)
-
-#define IIO_DEV_ATTR_AIRMSOS(_mode, _show, _store, _addr) \
- IIO_DEVICE_ATTR(airmsos, _mode, _show, _store, _addr)
-
-#define IIO_DEV_ATTR_BIRMSOS(_mode, _show, _store, _addr) \
- IIO_DEVICE_ATTR(birmsos, _mode, _show, _store, _addr)
-
-#define IIO_DEV_ATTR_CIRMSOS(_mode, _show, _store, _addr) \
- IIO_DEVICE_ATTR(cirmsos, _mode, _show, _store, _addr)
-
-#define IIO_DEV_ATTR_AVRMSOS(_mode, _show, _store, _addr) \
- IIO_DEVICE_ATTR(avrmsos, _mode, _show, _store, _addr)
-
-#define IIO_DEV_ATTR_BVRMSOS(_mode, _show, _store, _addr) \
- IIO_DEVICE_ATTR(bvrmsos, _mode, _show, _store, _addr)
-
-#define IIO_DEV_ATTR_CVRMSOS(_mode, _show, _store, _addr) \
- IIO_DEVICE_ATTR(cvrmsos, _mode, _show, _store, _addr)
-
-#define IIO_DEV_ATTR_VAGAIN(_mode, _show, _store, _addr) \
- IIO_DEVICE_ATTR(vagain, _mode, _show, _store, _addr)
-
-#define IIO_DEV_ATTR_PGA_GAIN(_mode, _show, _store, _addr) \
- IIO_DEVICE_ATTR(pga_gain, _mode, _show, _store, _addr)
-
-#define IIO_DEV_ATTR_VADIV(_mode, _show, _store, _addr) \
- IIO_DEVICE_ATTR(vadiv, _mode, _show, _store, _addr)
-
-#define IIO_DEV_ATTR_LINECYC(_mode, _show, _store, _addr) \
- IIO_DEVICE_ATTR(linecyc, _mode, _show, _store, _addr)
-
-#define IIO_DEV_ATTR_SAGCYC(_mode, _show, _store, _addr) \
- IIO_DEVICE_ATTR(sagcyc, _mode, _show, _store, _addr)
-
-#define IIO_DEV_ATTR_CFCYC(_mode, _show, _store, _addr) \
- IIO_DEVICE_ATTR(cfcyc, _mode, _show, _store, _addr)
-
-#define IIO_DEV_ATTR_PEAKCYC(_mode, _show, _store, _addr) \
- IIO_DEVICE_ATTR(peakcyc, _mode, _show, _store, _addr)
-
-#define IIO_DEV_ATTR_SAGLVL(_mode, _show, _store, _addr) \
- IIO_DEVICE_ATTR(saglvl, _mode, _show, _store, _addr)
-
-#define IIO_DEV_ATTR_IPKLVL(_mode, _show, _store, _addr) \
- IIO_DEVICE_ATTR(ipklvl, _mode, _show, _store, _addr)
-
-#define IIO_DEV_ATTR_VPKLVL(_mode, _show, _store, _addr) \
- IIO_DEVICE_ATTR(vpklvl, _mode, _show, _store, _addr)
-
-#define IIO_DEV_ATTR_IPEAK(_mode, _show, _store, _addr) \
- IIO_DEVICE_ATTR(ipeak, _mode, _show, _store, _addr)
-
-#define IIO_DEV_ATTR_RIPEAK(_mode, _show, _store, _addr) \
- IIO_DEVICE_ATTR(ripeak, _mode, _show, _store, _addr)
-
-#define IIO_DEV_ATTR_VPEAK(_mode, _show, _store, _addr) \
- IIO_DEVICE_ATTR(vpeak, _mode, _show, _store, _addr)
-
-#define IIO_DEV_ATTR_RVPEAK(_mode, _show, _store, _addr) \
- IIO_DEVICE_ATTR(rvpeak, _mode, _show, _store, _addr)
-
-#define IIO_DEV_ATTR_VPERIOD(_mode, _show, _store, _addr) \
- IIO_DEVICE_ATTR(vperiod, _mode, _show, _store, _addr)
-
-#define IIO_DEV_ATTR_CH_OFF(_num, _mode, _show, _store, _addr) \
- IIO_DEVICE_ATTR(choff_##_num, _mode, _show, _store, _addr)
-
-/* active energy register, AENERGY, is more than half full */
-#define IIO_EVENT_ATTR_AENERGY_HALF_FULL(_evlist, _show, _store, _mask) \
- IIO_EVENT_ATTR_SH(aenergy_half_full, _evlist, _show, _store, _mask)
-
-/* a SAG on the line voltage */
-#define IIO_EVENT_ATTR_LINE_VOLT_SAG(_evlist, _show, _store, _mask) \
- IIO_EVENT_ATTR_SH(line_volt_sag, _evlist, _show, _store, _mask)
-
-/*
- * Indicates the end of energy accumulation over an integer number
- * of half line cycles
- */
-#define IIO_EVENT_ATTR_CYCEND(_evlist, _show, _store, _mask) \
- IIO_EVENT_ATTR_SH(cycend, _evlist, _show, _store, _mask)
-
-/* on the rising and falling edge of the voltage waveform */
-#define IIO_EVENT_ATTR_ZERO_CROSS(_evlist, _show, _store, _mask) \
- IIO_EVENT_ATTR_SH(zero_cross, _evlist, _show, _store, _mask)
-
-/* the active energy register has overflowed */
-#define IIO_EVENT_ATTR_AENERGY_OVERFLOW(_evlist, _show, _store, _mask) \
- IIO_EVENT_ATTR_SH(aenergy_overflow, _evlist, _show, _store, _mask)
-
-/* the apparent energy register has overflowed */
-#define IIO_EVENT_ATTR_VAENERGY_OVERFLOW(_evlist, _show, _store, _mask) \
- IIO_EVENT_ATTR_SH(vaenergy_overflow, _evlist, _show, _store, _mask)
-
-/* the active energy register, VAENERGY, is more than half full */
-#define IIO_EVENT_ATTR_VAENERGY_HALF_FULL(_evlist, _show, _store, _mask) \
- IIO_EVENT_ATTR_SH(vaenergy_half_full, _evlist, _show, _store, _mask)
-
-/* the power has gone from negative to positive */
-#define IIO_EVENT_ATTR_PPOS(_evlist, _show, _store, _mask) \
- IIO_EVENT_ATTR_SH(ppos, _evlist, _show, _store, _mask)
-
-/* the power has gone from positive to negative */
-#define IIO_EVENT_ATTR_PNEG(_evlist, _show, _store, _mask) \
- IIO_EVENT_ATTR_SH(pneg, _evlist, _show, _store, _mask)
-
-/* waveform sample from Channel 1 has exceeded the IPKLVL value */
-#define IIO_EVENT_ATTR_IPKLVL_EXC(_evlist, _show, _store, _mask) \
- IIO_EVENT_ATTR_SH(ipklvl_exc, _evlist, _show, _store, _mask)
-
-/* waveform sample from Channel 2 has exceeded the VPKLVL value */
-#define IIO_EVENT_ATTR_VPKLVL_EXC(_evlist, _show, _store, _mask) \
- IIO_EVENT_ATTR_SH(vpklvl_exc, _evlist, _show, _store, _mask)
-
-#endif /* _METER_H */
diff --git a/drivers/staging/iio/resolver/Kconfig b/drivers/staging/iio/resolver/Kconfig
deleted file mode 100644
index 1c7e2860d6b7..000000000000
--- a/drivers/staging/iio/resolver/Kconfig
+++ /dev/null
@@ -1,39 +0,0 @@
-#
-# Resolver/Synchro drivers
-#
-menu "Resolver to digital converters"
-
-config AD2S90
- tristate "Analog Devices ad2s90 driver"
- depends on SPI
- help
- Say yes here to build support for Analog Devices spi resolver
- to digital converters, ad2s90, provides direct access via sysfs.
-
- To compile this driver as a module, choose M here: the
- module will be called ad2s90.
-
-config AD2S1200
- tristate "Analog Devices ad2s1200/ad2s1205 driver"
- depends on SPI
- depends on GPIOLIB || COMPILE_TEST
- help
- Say yes here to build support for Analog Devices spi resolver
- to digital converters, ad2s1200 and ad2s1205, provides direct access
- via sysfs.
-
- To compile this driver as a module, choose M here: the
- module will be called ad2s1200.
-
-config AD2S1210
- tristate "Analog Devices ad2s1210 driver"
- depends on SPI
- depends on GPIOLIB || COMPILE_TEST
- help
- Say yes here to build support for Analog Devices spi resolver
- to digital converters, ad2s1210, provides direct access via sysfs.
-
- To compile this driver as a module, choose M here: the
- module will be called ad2s1210.
-
-endmenu
diff --git a/drivers/staging/iio/resolver/Makefile b/drivers/staging/iio/resolver/Makefile
deleted file mode 100644
index 14375e444ebf..000000000000
--- a/drivers/staging/iio/resolver/Makefile
+++ /dev/null
@@ -1,7 +0,0 @@
-#
-# Makefile for Resolver/Synchro drivers
-#
-
-obj-$(CONFIG_AD2S90) += ad2s90.o
-obj-$(CONFIG_AD2S1200) += ad2s1200.o
-obj-$(CONFIG_AD2S1210) += ad2s1210.o
diff --git a/drivers/staging/iio/resolver/ad2s1200.c b/drivers/staging/iio/resolver/ad2s1200.c
deleted file mode 100644
index c17893b4918c..000000000000
--- a/drivers/staging/iio/resolver/ad2s1200.c
+++ /dev/null
@@ -1,167 +0,0 @@
-/*
- * ad2s1200.c simple support for the ADI Resolver to Digital Converters:
- * AD2S1200/1205
- *
- * Copyright (c) 2010-2010 Analog Devices Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- */
-#include <linux/types.h>
-#include <linux/mutex.h>
-#include <linux/device.h>
-#include <linux/spi/spi.h>
-#include <linux/slab.h>
-#include <linux/sysfs.h>
-#include <linux/delay.h>
-#include <linux/gpio.h>
-#include <linux/module.h>
-#include <linux/bitops.h>
-
-#include <linux/iio/iio.h>
-#include <linux/iio/sysfs.h>
-
-#define DRV_NAME "ad2s1200"
-
-/* input pin sample and rdvel is controlled by driver */
-#define AD2S1200_PN 2
-
-/* input clock on serial interface */
-#define AD2S1200_HZ 8192000
-/* clock period in nano second */
-#define AD2S1200_TSCLK (1000000000/AD2S1200_HZ)
-
-struct ad2s1200_state {
- struct mutex lock;
- struct spi_device *sdev;
- int sample;
- int rdvel;
- u8 rx[2] ____cacheline_aligned;
-};
-
-static int ad2s1200_read_raw(struct iio_dev *indio_dev,
- struct iio_chan_spec const *chan,
- int *val,
- int *val2,
- long m)
-{
- int ret = 0;
- s16 vel;
- struct ad2s1200_state *st = iio_priv(indio_dev);
-
- mutex_lock(&st->lock);
- gpio_set_value(st->sample, 0);
- /* delay (6 * AD2S1200_TSCLK + 20) nano seconds */
- udelay(1);
- gpio_set_value(st->sample, 1);
- gpio_set_value(st->rdvel, !!(chan->type == IIO_ANGL));
- ret = spi_read(st->sdev, st->rx, 2);
- if (ret < 0) {
- mutex_unlock(&st->lock);
- return ret;
- }
-
- switch (chan->type) {
- case IIO_ANGL:
- *val = (((u16)(st->rx[0])) << 4) | ((st->rx[1] & 0xF0) >> 4);
- break;
- case IIO_ANGL_VEL:
- vel = (((s16)(st->rx[0])) << 4) | ((st->rx[1] & 0xF0) >> 4);
- vel = sign_extend32(vel, 11);
- *val = vel;
- break;
- default:
- mutex_unlock(&st->lock);
- return -EINVAL;
- }
- /* delay (2 * AD2S1200_TSCLK + 20) ns for sample pulse */
- udelay(1);
- mutex_unlock(&st->lock);
- return IIO_VAL_INT;
-}
-
-static const struct iio_chan_spec ad2s1200_channels[] = {
- {
- .type = IIO_ANGL,
- .indexed = 1,
- .channel = 0,
- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
- }, {
- .type = IIO_ANGL_VEL,
- .indexed = 1,
- .channel = 0,
- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
- }
-};
-
-static const struct iio_info ad2s1200_info = {
- .read_raw = &ad2s1200_read_raw,
- .driver_module = THIS_MODULE,
-};
-
-static int ad2s1200_probe(struct spi_device *spi)
-{
- struct ad2s1200_state *st;
- struct iio_dev *indio_dev;
- int pn, ret = 0;
- unsigned short *pins = spi->dev.platform_data;
-
- for (pn = 0; pn < AD2S1200_PN; pn++) {
- ret = devm_gpio_request_one(&spi->dev, pins[pn], GPIOF_DIR_OUT,
- DRV_NAME);
- if (ret) {
- dev_err(&spi->dev, "request gpio pin %d failed\n",
- pins[pn]);
- return ret;
- }
- }
- indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
- if (!indio_dev)
- return -ENOMEM;
- spi_set_drvdata(spi, indio_dev);
- st = iio_priv(indio_dev);
- mutex_init(&st->lock);
- st->sdev = spi;
- st->sample = pins[0];
- st->rdvel = pins[1];
-
- indio_dev->dev.parent = &spi->dev;
- indio_dev->info = &ad2s1200_info;
- indio_dev->modes = INDIO_DIRECT_MODE;
- indio_dev->channels = ad2s1200_channels;
- indio_dev->num_channels = ARRAY_SIZE(ad2s1200_channels);
- indio_dev->name = spi_get_device_id(spi)->name;
-
- ret = devm_iio_device_register(&spi->dev, indio_dev);
- if (ret)
- return ret;
-
- spi->max_speed_hz = AD2S1200_HZ;
- spi->mode = SPI_MODE_3;
- spi_setup(spi);
-
- return 0;
-}
-
-static const struct spi_device_id ad2s1200_id[] = {
- { "ad2s1200" },
- { "ad2s1205" },
- {}
-};
-MODULE_DEVICE_TABLE(spi, ad2s1200_id);
-
-static struct spi_driver ad2s1200_driver = {
- .driver = {
- .name = DRV_NAME,
- .owner = THIS_MODULE,
- },
- .probe = ad2s1200_probe,
- .id_table = ad2s1200_id,
-};
-module_spi_driver(ad2s1200_driver);
-
-MODULE_AUTHOR("Graff Yang <graff.yang@gmail.com>");
-MODULE_DESCRIPTION("Analog Devices AD2S1200/1205 Resolver to Digital SPI driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/iio/resolver/ad2s1210.c b/drivers/staging/iio/resolver/ad2s1210.c
deleted file mode 100644
index 7bc3e4a73834..000000000000
--- a/drivers/staging/iio/resolver/ad2s1210.c
+++ /dev/null
@@ -1,748 +0,0 @@
-/*
- * ad2s1210.c support for the ADI Resolver to Digital Converters: AD2S1210
- *
- * Copyright (c) 2010-2010 Analog Devices Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- */
-#include <linux/types.h>
-#include <linux/mutex.h>
-#include <linux/device.h>
-#include <linux/spi/spi.h>
-#include <linux/slab.h>
-#include <linux/sysfs.h>
-#include <linux/delay.h>
-#include <linux/gpio.h>
-#include <linux/module.h>
-
-#include <linux/iio/iio.h>
-#include <linux/iio/sysfs.h>
-#include "ad2s1210.h"
-
-#define DRV_NAME "ad2s1210"
-
-#define AD2S1210_DEF_CONTROL 0x7E
-
-#define AD2S1210_MSB_IS_HIGH 0x80
-#define AD2S1210_MSB_IS_LOW 0x7F
-#define AD2S1210_PHASE_LOCK_RANGE_44 0x20
-#define AD2S1210_ENABLE_HYSTERESIS 0x10
-#define AD2S1210_SET_ENRES1 0x08
-#define AD2S1210_SET_ENRES0 0x04
-#define AD2S1210_SET_RES1 0x02
-#define AD2S1210_SET_RES0 0x01
-
-#define AD2S1210_SET_ENRESOLUTION (AD2S1210_SET_ENRES1 | \
- AD2S1210_SET_ENRES0)
-#define AD2S1210_SET_RESOLUTION (AD2S1210_SET_RES1 | AD2S1210_SET_RES0)
-
-#define AD2S1210_REG_POSITION 0x80
-#define AD2S1210_REG_VELOCITY 0x82
-#define AD2S1210_REG_LOS_THRD 0x88
-#define AD2S1210_REG_DOS_OVR_THRD 0x89
-#define AD2S1210_REG_DOS_MIS_THRD 0x8A
-#define AD2S1210_REG_DOS_RST_MAX_THRD 0x8B
-#define AD2S1210_REG_DOS_RST_MIN_THRD 0x8C
-#define AD2S1210_REG_LOT_HIGH_THRD 0x8D
-#define AD2S1210_REG_LOT_LOW_THRD 0x8E
-#define AD2S1210_REG_EXCIT_FREQ 0x91
-#define AD2S1210_REG_CONTROL 0x92
-#define AD2S1210_REG_SOFT_RESET 0xF0
-#define AD2S1210_REG_FAULT 0xFF
-
-/* pin SAMPLE, A0, A1, RES0, RES1, is controlled by driver */
-#define AD2S1210_SAA 3
-#define AD2S1210_PN (AD2S1210_SAA + AD2S1210_RES)
-
-#define AD2S1210_MIN_CLKIN 6144000
-#define AD2S1210_MAX_CLKIN 10240000
-#define AD2S1210_MIN_EXCIT 2000
-#define AD2S1210_MAX_EXCIT 20000
-#define AD2S1210_MIN_FCW 0x4
-#define AD2S1210_MAX_FCW 0x50
-
-/* default input clock on serial interface */
-#define AD2S1210_DEF_CLKIN 8192000
-/* clock period in nano second */
-#define AD2S1210_DEF_TCK (1000000000/AD2S1210_DEF_CLKIN)
-#define AD2S1210_DEF_EXCIT 10000
-
-enum ad2s1210_mode {
- MOD_POS = 0,
- MOD_VEL,
- MOD_CONFIG,
- MOD_RESERVED,
-};
-
-static const unsigned int ad2s1210_resolution_value[] = { 10, 12, 14, 16 };
-
-struct ad2s1210_state {
- const struct ad2s1210_platform_data *pdata;
- struct mutex lock;
- struct spi_device *sdev;
- unsigned int fclkin;
- unsigned int fexcit;
- bool hysteresis;
- bool old_data;
- u8 resolution;
- enum ad2s1210_mode mode;
- u8 rx[2] ____cacheline_aligned;
- u8 tx[2] ____cacheline_aligned;
-};
-
-static const int ad2s1210_mode_vals[4][2] = {
- [MOD_POS] = { 0, 0 },
- [MOD_VEL] = { 0, 1 },
- [MOD_CONFIG] = { 1, 0 },
-};
-static inline void ad2s1210_set_mode(enum ad2s1210_mode mode,
- struct ad2s1210_state *st)
-{
- gpio_set_value(st->pdata->a[0], ad2s1210_mode_vals[mode][0]);
- gpio_set_value(st->pdata->a[1], ad2s1210_mode_vals[mode][1]);
- st->mode = mode;
-}
-
-/* write 1 bytes (address or data) to the chip */
-static int ad2s1210_config_write(struct ad2s1210_state *st, u8 data)
-{
- int ret;
-
- ad2s1210_set_mode(MOD_CONFIG, st);
- st->tx[0] = data;
- ret = spi_write(st->sdev, st->tx, 1);
- if (ret < 0)
- return ret;
- st->old_data = true;
-
- return 0;
-}
-
-/* read value from one of the registers */
-static int ad2s1210_config_read(struct ad2s1210_state *st,
- unsigned char address)
-{
- struct spi_transfer xfer = {
- .len = 2,
- .rx_buf = st->rx,
- .tx_buf = st->tx,
- };
- int ret = 0;
-
- ad2s1210_set_mode(MOD_CONFIG, st);
- st->tx[0] = address | AD2S1210_MSB_IS_HIGH;
- st->tx[1] = AD2S1210_REG_FAULT;
- ret = spi_sync_transfer(st->sdev, &xfer, 1);
- if (ret < 0)
- return ret;
- st->old_data = true;
-
- return st->rx[1];
-}
-
-static inline
-int ad2s1210_update_frequency_control_word(struct ad2s1210_state *st)
-{
- int ret;
- unsigned char fcw;
-
- fcw = (unsigned char)(st->fexcit * (1 << 15) / st->fclkin);
- if (fcw < AD2S1210_MIN_FCW || fcw > AD2S1210_MAX_FCW) {
- dev_err(&st->sdev->dev, "ad2s1210: FCW out of range\n");
- return -ERANGE;
- }
-
- ret = ad2s1210_config_write(st, AD2S1210_REG_EXCIT_FREQ);
- if (ret < 0)
- return ret;
-
- return ad2s1210_config_write(st, fcw);
-}
-
-static unsigned char ad2s1210_read_resolution_pin(struct ad2s1210_state *st)
-{
- return ad2s1210_resolution_value[
- (gpio_get_value(st->pdata->res[0]) << 1) |
- gpio_get_value(st->pdata->res[1])];
-}
-
-static const int ad2s1210_res_pins[4][2] = {
- { 0, 0 }, {0, 1}, {1, 0}, {1, 1}
-};
-
-static inline void ad2s1210_set_resolution_pin(struct ad2s1210_state *st)
-{
- gpio_set_value(st->pdata->res[0],
- ad2s1210_res_pins[(st->resolution - 10)/2][0]);
- gpio_set_value(st->pdata->res[1],
- ad2s1210_res_pins[(st->resolution - 10)/2][1]);
-}
-
-static inline int ad2s1210_soft_reset(struct ad2s1210_state *st)
-{
- int ret;
-
- ret = ad2s1210_config_write(st, AD2S1210_REG_SOFT_RESET);
- if (ret < 0)
- return ret;
-
- return ad2s1210_config_write(st, 0x0);
-}
-
-static ssize_t ad2s1210_show_fclkin(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct ad2s1210_state *st = iio_priv(dev_to_iio_dev(dev));
-
- return sprintf(buf, "%u\n", st->fclkin);
-}
-
-static ssize_t ad2s1210_store_fclkin(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t len)
-{
- struct ad2s1210_state *st = iio_priv(dev_to_iio_dev(dev));
- unsigned int fclkin;
- int ret;
-
- ret = kstrtouint(buf, 10, &fclkin);
- if (ret)
- return ret;
- if (fclkin < AD2S1210_MIN_CLKIN || fclkin > AD2S1210_MAX_CLKIN) {
- dev_err(dev, "ad2s1210: fclkin out of range\n");
- return -EINVAL;
- }
-
- mutex_lock(&st->lock);
- st->fclkin = fclkin;
-
- ret = ad2s1210_update_frequency_control_word(st);
- if (ret < 0)
- goto error_ret;
- ret = ad2s1210_soft_reset(st);
-error_ret:
- mutex_unlock(&st->lock);
-
- return ret < 0 ? ret : len;
-}
-
-static ssize_t ad2s1210_show_fexcit(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct ad2s1210_state *st = iio_priv(dev_to_iio_dev(dev));
-
- return sprintf(buf, "%u\n", st->fexcit);
-}
-
-static ssize_t ad2s1210_store_fexcit(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t len)
-{
- struct ad2s1210_state *st = iio_priv(dev_to_iio_dev(dev));
- unsigned int fexcit;
- int ret;
-
- ret = kstrtouint(buf, 10, &fexcit);
- if (ret < 0)
- return ret;
- if (fexcit < AD2S1210_MIN_EXCIT || fexcit > AD2S1210_MAX_EXCIT) {
- dev_err(dev,
- "ad2s1210: excitation frequency out of range\n");
- return -EINVAL;
- }
- mutex_lock(&st->lock);
- st->fexcit = fexcit;
- ret = ad2s1210_update_frequency_control_word(st);
- if (ret < 0)
- goto error_ret;
- ret = ad2s1210_soft_reset(st);
-error_ret:
- mutex_unlock(&st->lock);
-
- return ret < 0 ? ret : len;
-}
-
-static ssize_t ad2s1210_show_control(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct ad2s1210_state *st = iio_priv(dev_to_iio_dev(dev));
- int ret;
-
- mutex_lock(&st->lock);
- ret = ad2s1210_config_read(st, AD2S1210_REG_CONTROL);
- mutex_unlock(&st->lock);
- return ret < 0 ? ret : sprintf(buf, "0x%x\n", ret);
-}
-
-static ssize_t ad2s1210_store_control(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t len)
-{
- struct ad2s1210_state *st = iio_priv(dev_to_iio_dev(dev));
- unsigned char udata;
- unsigned char data;
- int ret;
-
- ret = kstrtou8(buf, 16, &udata);
- if (ret)
- return -EINVAL;
-
- mutex_lock(&st->lock);
- ret = ad2s1210_config_write(st, AD2S1210_REG_CONTROL);
- if (ret < 0)
- goto error_ret;
- data = udata & AD2S1210_MSB_IS_LOW;
- ret = ad2s1210_config_write(st, data);
- if (ret < 0)
- goto error_ret;
-
- ret = ad2s1210_config_read(st, AD2S1210_REG_CONTROL);
- if (ret < 0)
- goto error_ret;
- if (ret & AD2S1210_MSB_IS_HIGH) {
- ret = -EIO;
- dev_err(dev,
- "ad2s1210: write control register fail\n");
- goto error_ret;
- }
- st->resolution
- = ad2s1210_resolution_value[data & AD2S1210_SET_RESOLUTION];
- if (st->pdata->gpioin) {
- data = ad2s1210_read_resolution_pin(st);
- if (data != st->resolution)
- dev_warn(dev, "ad2s1210: resolution settings not match\n");
- } else
- ad2s1210_set_resolution_pin(st);
-
- ret = len;
- st->hysteresis = !!(data & AD2S1210_ENABLE_HYSTERESIS);
-
-error_ret:
- mutex_unlock(&st->lock);
- return ret;
-}
-
-static ssize_t ad2s1210_show_resolution(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct ad2s1210_state *st = iio_priv(dev_to_iio_dev(dev));
-
- return sprintf(buf, "%d\n", st->resolution);
-}
-
-static ssize_t ad2s1210_store_resolution(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t len)
-{
- struct ad2s1210_state *st = iio_priv(dev_to_iio_dev(dev));
- unsigned char data;
- unsigned char udata;
- int ret;
-
- ret = kstrtou8(buf, 10, &udata);
- if (ret || udata < 10 || udata > 16) {
- dev_err(dev, "ad2s1210: resolution out of range\n");
- return -EINVAL;
- }
- mutex_lock(&st->lock);
- ret = ad2s1210_config_read(st, AD2S1210_REG_CONTROL);
- if (ret < 0)
- goto error_ret;
- data = ret;
- data &= ~AD2S1210_SET_RESOLUTION;
- data |= (udata - 10) >> 1;
- ret = ad2s1210_config_write(st, AD2S1210_REG_CONTROL);
- if (ret < 0)
- goto error_ret;
- ret = ad2s1210_config_write(st, data & AD2S1210_MSB_IS_LOW);
- if (ret < 0)
- goto error_ret;
- ret = ad2s1210_config_read(st, AD2S1210_REG_CONTROL);
- if (ret < 0)
- goto error_ret;
- data = ret;
- if (data & AD2S1210_MSB_IS_HIGH) {
- ret = -EIO;
- dev_err(dev, "ad2s1210: setting resolution fail\n");
- goto error_ret;
- }
- st->resolution
- = ad2s1210_resolution_value[data & AD2S1210_SET_RESOLUTION];
- if (st->pdata->gpioin) {
- data = ad2s1210_read_resolution_pin(st);
- if (data != st->resolution)
- dev_warn(dev, "ad2s1210: resolution settings not match\n");
- } else
- ad2s1210_set_resolution_pin(st);
- ret = len;
-error_ret:
- mutex_unlock(&st->lock);
- return ret;
-}
-
-/* read the fault register since last sample */
-static ssize_t ad2s1210_show_fault(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct ad2s1210_state *st = iio_priv(dev_to_iio_dev(dev));
- int ret;
-
- mutex_lock(&st->lock);
- ret = ad2s1210_config_read(st, AD2S1210_REG_FAULT);
- mutex_unlock(&st->lock);
-
- return ret ? ret : sprintf(buf, "0x%x\n", ret);
-}
-
-static ssize_t ad2s1210_clear_fault(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t len)
-{
- struct ad2s1210_state *st = iio_priv(dev_to_iio_dev(dev));
- int ret;
-
- mutex_lock(&st->lock);
- gpio_set_value(st->pdata->sample, 0);
- /* delay (2 * tck + 20) nano seconds */
- udelay(1);
- gpio_set_value(st->pdata->sample, 1);
- ret = ad2s1210_config_read(st, AD2S1210_REG_FAULT);
- if (ret < 0)
- goto error_ret;
- gpio_set_value(st->pdata->sample, 0);
- gpio_set_value(st->pdata->sample, 1);
-error_ret:
- mutex_unlock(&st->lock);
-
- return ret < 0 ? ret : len;
-}
-
-static ssize_t ad2s1210_show_reg(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct ad2s1210_state *st = iio_priv(dev_to_iio_dev(dev));
- struct iio_dev_attr *iattr = to_iio_dev_attr(attr);
- int ret;
-
- mutex_lock(&st->lock);
- ret = ad2s1210_config_read(st, iattr->address);
- mutex_unlock(&st->lock);
-
- return ret < 0 ? ret : sprintf(buf, "%d\n", ret);
-}
-
-static ssize_t ad2s1210_store_reg(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t len)
-{
- struct ad2s1210_state *st = iio_priv(dev_to_iio_dev(dev));
- unsigned char data;
- int ret;
- struct iio_dev_attr *iattr = to_iio_dev_attr(attr);
-
- ret = kstrtou8(buf, 10, &data);
- if (ret)
- return -EINVAL;
- mutex_lock(&st->lock);
- ret = ad2s1210_config_write(st, iattr->address);
- if (ret < 0)
- goto error_ret;
- ret = ad2s1210_config_write(st, data & AD2S1210_MSB_IS_LOW);
-error_ret:
- mutex_unlock(&st->lock);
- return ret < 0 ? ret : len;
-}
-
-static int ad2s1210_read_raw(struct iio_dev *indio_dev,
- struct iio_chan_spec const *chan,
- int *val,
- int *val2,
- long m)
-{
- struct ad2s1210_state *st = iio_priv(indio_dev);
- bool negative;
- int ret = 0;
- u16 pos;
- s16 vel;
-
- mutex_lock(&st->lock);
- gpio_set_value(st->pdata->sample, 0);
- /* delay (6 * tck + 20) nano seconds */
- udelay(1);
-
- switch (chan->type) {
- case IIO_ANGL:
- ad2s1210_set_mode(MOD_POS, st);
- break;
- case IIO_ANGL_VEL:
- ad2s1210_set_mode(MOD_VEL, st);
- break;
- default:
- ret = -EINVAL;
- break;
- }
- if (ret < 0)
- goto error_ret;
- ret = spi_read(st->sdev, st->rx, 2);
- if (ret < 0)
- goto error_ret;
-
- switch (chan->type) {
- case IIO_ANGL:
- pos = be16_to_cpup((__be16 *) st->rx);
- if (st->hysteresis)
- pos >>= 16 - st->resolution;
- *val = pos;
- ret = IIO_VAL_INT;
- break;
- case IIO_ANGL_VEL:
- negative = st->rx[0] & 0x80;
- vel = be16_to_cpup((__be16 *) st->rx);
- vel >>= 16 - st->resolution;
- if (vel & 0x8000) {
- negative = (0xffff >> st->resolution) << st->resolution;
- vel |= negative;
- }
- *val = vel;
- ret = IIO_VAL_INT;
- break;
- default:
- mutex_unlock(&st->lock);
- return -EINVAL;
- }
-
-error_ret:
- gpio_set_value(st->pdata->sample, 1);
- /* delay (2 * tck + 20) nano seconds */
- udelay(1);
- mutex_unlock(&st->lock);
- return ret;
-}
-
-static IIO_DEVICE_ATTR(fclkin, S_IRUGO | S_IWUSR,
- ad2s1210_show_fclkin, ad2s1210_store_fclkin, 0);
-static IIO_DEVICE_ATTR(fexcit, S_IRUGO | S_IWUSR,
- ad2s1210_show_fexcit, ad2s1210_store_fexcit, 0);
-static IIO_DEVICE_ATTR(control, S_IRUGO | S_IWUSR,
- ad2s1210_show_control, ad2s1210_store_control, 0);
-static IIO_DEVICE_ATTR(bits, S_IRUGO | S_IWUSR,
- ad2s1210_show_resolution, ad2s1210_store_resolution, 0);
-static IIO_DEVICE_ATTR(fault, S_IRUGO | S_IWUSR,
- ad2s1210_show_fault, ad2s1210_clear_fault, 0);
-
-static IIO_DEVICE_ATTR(los_thrd, S_IRUGO | S_IWUSR,
- ad2s1210_show_reg, ad2s1210_store_reg,
- AD2S1210_REG_LOS_THRD);
-static IIO_DEVICE_ATTR(dos_ovr_thrd, S_IRUGO | S_IWUSR,
- ad2s1210_show_reg, ad2s1210_store_reg,
- AD2S1210_REG_DOS_OVR_THRD);
-static IIO_DEVICE_ATTR(dos_mis_thrd, S_IRUGO | S_IWUSR,
- ad2s1210_show_reg, ad2s1210_store_reg,
- AD2S1210_REG_DOS_MIS_THRD);
-static IIO_DEVICE_ATTR(dos_rst_max_thrd, S_IRUGO | S_IWUSR,
- ad2s1210_show_reg, ad2s1210_store_reg,
- AD2S1210_REG_DOS_RST_MAX_THRD);
-static IIO_DEVICE_ATTR(dos_rst_min_thrd, S_IRUGO | S_IWUSR,
- ad2s1210_show_reg, ad2s1210_store_reg,
- AD2S1210_REG_DOS_RST_MIN_THRD);
-static IIO_DEVICE_ATTR(lot_high_thrd, S_IRUGO | S_IWUSR,
- ad2s1210_show_reg, ad2s1210_store_reg,
- AD2S1210_REG_LOT_HIGH_THRD);
-static IIO_DEVICE_ATTR(lot_low_thrd, S_IRUGO | S_IWUSR,
- ad2s1210_show_reg, ad2s1210_store_reg,
- AD2S1210_REG_LOT_LOW_THRD);
-
-
-static const struct iio_chan_spec ad2s1210_channels[] = {
- {
- .type = IIO_ANGL,
- .indexed = 1,
- .channel = 0,
- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
- }, {
- .type = IIO_ANGL_VEL,
- .indexed = 1,
- .channel = 0,
- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
- }
-};
-
-static struct attribute *ad2s1210_attributes[] = {
- &iio_dev_attr_fclkin.dev_attr.attr,
- &iio_dev_attr_fexcit.dev_attr.attr,
- &iio_dev_attr_control.dev_attr.attr,
- &iio_dev_attr_bits.dev_attr.attr,
- &iio_dev_attr_fault.dev_attr.attr,
- &iio_dev_attr_los_thrd.dev_attr.attr,
- &iio_dev_attr_dos_ovr_thrd.dev_attr.attr,
- &iio_dev_attr_dos_mis_thrd.dev_attr.attr,
- &iio_dev_attr_dos_rst_max_thrd.dev_attr.attr,
- &iio_dev_attr_dos_rst_min_thrd.dev_attr.attr,
- &iio_dev_attr_lot_high_thrd.dev_attr.attr,
- &iio_dev_attr_lot_low_thrd.dev_attr.attr,
- NULL,
-};
-
-static const struct attribute_group ad2s1210_attribute_group = {
- .attrs = ad2s1210_attributes,
-};
-
-static int ad2s1210_initial(struct ad2s1210_state *st)
-{
- unsigned char data;
- int ret;
-
- mutex_lock(&st->lock);
- if (st->pdata->gpioin)
- st->resolution = ad2s1210_read_resolution_pin(st);
- else
- ad2s1210_set_resolution_pin(st);
-
- ret = ad2s1210_config_write(st, AD2S1210_REG_CONTROL);
- if (ret < 0)
- goto error_ret;
- data = AD2S1210_DEF_CONTROL & ~(AD2S1210_SET_RESOLUTION);
- data |= (st->resolution - 10) >> 1;
- ret = ad2s1210_config_write(st, data);
- if (ret < 0)
- goto error_ret;
- ret = ad2s1210_config_read(st, AD2S1210_REG_CONTROL);
- if (ret < 0)
- goto error_ret;
-
- if (ret & AD2S1210_MSB_IS_HIGH) {
- ret = -EIO;
- goto error_ret;
- }
-
- ret = ad2s1210_update_frequency_control_word(st);
- if (ret < 0)
- goto error_ret;
- ret = ad2s1210_soft_reset(st);
-error_ret:
- mutex_unlock(&st->lock);
- return ret;
-}
-
-static const struct iio_info ad2s1210_info = {
- .read_raw = &ad2s1210_read_raw,
- .attrs = &ad2s1210_attribute_group,
- .driver_module = THIS_MODULE,
-};
-
-static int ad2s1210_setup_gpios(struct ad2s1210_state *st)
-{
- unsigned long flags = st->pdata->gpioin ? GPIOF_DIR_IN : GPIOF_DIR_OUT;
- struct gpio ad2s1210_gpios[] = {
- { st->pdata->sample, GPIOF_DIR_IN, "sample" },
- { st->pdata->a[0], flags, "a0" },
- { st->pdata->a[1], flags, "a1" },
- { st->pdata->res[0], flags, "res0" },
- { st->pdata->res[0], flags, "res1" },
- };
-
- return gpio_request_array(ad2s1210_gpios, ARRAY_SIZE(ad2s1210_gpios));
-}
-
-static void ad2s1210_free_gpios(struct ad2s1210_state *st)
-{
- unsigned long flags = st->pdata->gpioin ? GPIOF_DIR_IN : GPIOF_DIR_OUT;
- struct gpio ad2s1210_gpios[] = {
- { st->pdata->sample, GPIOF_DIR_IN, "sample" },
- { st->pdata->a[0], flags, "a0" },
- { st->pdata->a[1], flags, "a1" },
- { st->pdata->res[0], flags, "res0" },
- { st->pdata->res[0], flags, "res1" },
- };
-
- gpio_free_array(ad2s1210_gpios, ARRAY_SIZE(ad2s1210_gpios));
-}
-
-static int ad2s1210_probe(struct spi_device *spi)
-{
- struct iio_dev *indio_dev;
- struct ad2s1210_state *st;
- int ret;
-
- if (spi->dev.platform_data == NULL)
- return -EINVAL;
-
- indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
- if (!indio_dev)
- return -ENOMEM;
- st = iio_priv(indio_dev);
- st->pdata = spi->dev.platform_data;
- ret = ad2s1210_setup_gpios(st);
- if (ret < 0)
- return ret;
-
- spi_set_drvdata(spi, indio_dev);
-
- mutex_init(&st->lock);
- st->sdev = spi;
- st->hysteresis = true;
- st->mode = MOD_CONFIG;
- st->resolution = 12;
- st->fexcit = AD2S1210_DEF_EXCIT;
-
- indio_dev->dev.parent = &spi->dev;
- indio_dev->info = &ad2s1210_info;
- indio_dev->modes = INDIO_DIRECT_MODE;
- indio_dev->channels = ad2s1210_channels;
- indio_dev->num_channels = ARRAY_SIZE(ad2s1210_channels);
- indio_dev->name = spi_get_device_id(spi)->name;
-
- ret = iio_device_register(indio_dev);
- if (ret)
- goto error_free_gpios;
-
- st->fclkin = spi->max_speed_hz;
- spi->mode = SPI_MODE_3;
- spi_setup(spi);
- ad2s1210_initial(st);
-
- return 0;
-
-error_free_gpios:
- ad2s1210_free_gpios(st);
- return ret;
-}
-
-static int ad2s1210_remove(struct spi_device *spi)
-{
- struct iio_dev *indio_dev = spi_get_drvdata(spi);
-
- iio_device_unregister(indio_dev);
- ad2s1210_free_gpios(iio_priv(indio_dev));
-
- return 0;
-}
-
-static const struct spi_device_id ad2s1210_id[] = {
- { "ad2s1210" },
- {}
-};
-MODULE_DEVICE_TABLE(spi, ad2s1210_id);
-
-static struct spi_driver ad2s1210_driver = {
- .driver = {
- .name = DRV_NAME,
- .owner = THIS_MODULE,
- },
- .probe = ad2s1210_probe,
- .remove = ad2s1210_remove,
- .id_table = ad2s1210_id,
-};
-module_spi_driver(ad2s1210_driver);
-
-MODULE_AUTHOR("Graff Yang <graff.yang@gmail.com>");
-MODULE_DESCRIPTION("Analog Devices AD2S1210 Resolver to Digital SPI driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/iio/resolver/ad2s1210.h b/drivers/staging/iio/resolver/ad2s1210.h
deleted file mode 100644
index c7158f6e61c2..000000000000
--- a/drivers/staging/iio/resolver/ad2s1210.h
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- * ad2s1210.h plaform data for the ADI Resolver to Digital Converters:
- * AD2S1210
- *
- * Copyright (c) 2010-2010 Analog Devices Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-#ifndef _AD2S1210_H
-#define _AD2S1210_H
-
-struct ad2s1210_platform_data {
- unsigned sample;
- unsigned a[2];
- unsigned res[2];
- bool gpioin;
-};
-#endif /* _AD2S1210_H */
diff --git a/drivers/staging/iio/resolver/ad2s90.c b/drivers/staging/iio/resolver/ad2s90.c
deleted file mode 100644
index e24c5890652f..000000000000
--- a/drivers/staging/iio/resolver/ad2s90.c
+++ /dev/null
@@ -1,120 +0,0 @@
-/*
- * ad2s90.c simple support for the ADI Resolver to Digital Converters: AD2S90
- *
- * Copyright (c) 2010-2010 Analog Devices Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- */
-#include <linux/types.h>
-#include <linux/mutex.h>
-#include <linux/device.h>
-#include <linux/spi/spi.h>
-#include <linux/slab.h>
-#include <linux/sysfs.h>
-#include <linux/module.h>
-
-#include <linux/iio/iio.h>
-#include <linux/iio/sysfs.h>
-
-struct ad2s90_state {
- struct mutex lock;
- struct spi_device *sdev;
- u8 rx[2] ____cacheline_aligned;
-};
-
-static int ad2s90_read_raw(struct iio_dev *indio_dev,
- struct iio_chan_spec const *chan,
- int *val,
- int *val2,
- long m)
-{
- int ret;
- struct ad2s90_state *st = iio_priv(indio_dev);
-
- mutex_lock(&st->lock);
- ret = spi_read(st->sdev, st->rx, 2);
- if (ret)
- goto error_ret;
- *val = (((u16)(st->rx[0])) << 4) | ((st->rx[1] & 0xF0) >> 4);
-
-error_ret:
- mutex_unlock(&st->lock);
-
- return IIO_VAL_INT;
-}
-
-static const struct iio_info ad2s90_info = {
- .read_raw = &ad2s90_read_raw,
- .driver_module = THIS_MODULE,
-};
-
-static const struct iio_chan_spec ad2s90_chan = {
- .type = IIO_ANGL,
- .indexed = 1,
- .channel = 0,
- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
-};
-
-static int ad2s90_probe(struct spi_device *spi)
-{
- struct iio_dev *indio_dev;
- struct ad2s90_state *st;
- int ret = 0;
-
- indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
- if (!indio_dev)
- return -ENOMEM;
- st = iio_priv(indio_dev);
- spi_set_drvdata(spi, indio_dev);
-
- mutex_init(&st->lock);
- st->sdev = spi;
- indio_dev->dev.parent = &spi->dev;
- indio_dev->info = &ad2s90_info;
- indio_dev->modes = INDIO_DIRECT_MODE;
- indio_dev->channels = &ad2s90_chan;
- indio_dev->num_channels = 1;
- indio_dev->name = spi_get_device_id(spi)->name;
-
- ret = iio_device_register(indio_dev);
- if (ret)
- return ret;
-
- /* need 600ns between CS and the first falling edge of SCLK */
- spi->max_speed_hz = 830000;
- spi->mode = SPI_MODE_3;
- spi_setup(spi);
-
- return 0;
-}
-
-static int ad2s90_remove(struct spi_device *spi)
-{
- iio_device_unregister(spi_get_drvdata(spi));
-
- return 0;
-}
-
-static const struct spi_device_id ad2s90_id[] = {
- { "ad2s90" },
- {}
-};
-MODULE_DEVICE_TABLE(spi, ad2s90_id);
-
-static struct spi_driver ad2s90_driver = {
- .driver = {
- .name = "ad2s90",
- .owner = THIS_MODULE,
- },
- .probe = ad2s90_probe,
- .remove = ad2s90_remove,
- .id_table = ad2s90_id,
-};
-module_spi_driver(ad2s90_driver);
-
-MODULE_AUTHOR("Graff Yang <graff.yang@gmail.com>");
-MODULE_DESCRIPTION("Analog Devices AD2S90 Resolver to Digital SPI driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/iio/ring_hw.h b/drivers/staging/iio/ring_hw.h
deleted file mode 100644
index 75bf47bfee78..000000000000
--- a/drivers/staging/iio/ring_hw.h
+++ /dev/null
@@ -1,27 +0,0 @@
-/*
- * ring_hw.h - common functionality for iio hardware ring buffers
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * Copyright (c) 2009 Jonathan Cameron <jic23@kernel.org>
- *
- */
-
-#ifndef _RING_HW_H_
-#define _RING_HW_H_
-
-/**
- * struct iio_hw_ring_buffer- hardware ring buffer
- * @buf: generic ring buffer elements
- * @private: device specific data
- */
-struct iio_hw_buffer {
- struct iio_buffer buf;
- void *private;
-};
-
-#define iio_to_hw_buf(r) container_of(r, struct iio_hw_buffer, buf)
-
-#endif /* _RING_HW_H_ */
diff --git a/drivers/staging/iio/trigger/Kconfig b/drivers/staging/iio/trigger/Kconfig
deleted file mode 100644
index 710a2f3e787e..000000000000
--- a/drivers/staging/iio/trigger/Kconfig
+++ /dev/null
@@ -1,29 +0,0 @@
- #
-# Industrial I/O standalone triggers
-#
-comment "Triggers - standalone"
-
-if IIO_TRIGGER
-
-config IIO_PERIODIC_RTC_TRIGGER
- tristate "Periodic RTC triggers"
- depends on RTC_CLASS
- help
- Provides support for using periodic capable real time
- clocks as IIO triggers.
-
- To compile this driver as a module, choose M here: the
- module will be called iio-trig-periodic-rtc.
-
-config IIO_BFIN_TMR_TRIGGER
- tristate "Blackfin TIMER trigger"
- depends on BLACKFIN
- select BFIN_GPTIMERS
- help
- Provides support for using a Blackfin timer as IIO triggers.
- If unsure, say N (but it's safe to say "Y").
-
- To compile this driver as a module, choose M here: the
- module will be called iio-trig-bfin-timer.
-
-endif # IIO_TRIGGER
diff --git a/drivers/staging/iio/trigger/Makefile b/drivers/staging/iio/trigger/Makefile
deleted file mode 100644
index 238481b78e72..000000000000
--- a/drivers/staging/iio/trigger/Makefile
+++ /dev/null
@@ -1,6 +0,0 @@
-#
-# Makefile for triggers not associated with iio-devices
-#
-
-obj-$(CONFIG_IIO_PERIODIC_RTC_TRIGGER) += iio-trig-periodic-rtc.o
-obj-$(CONFIG_IIO_BFIN_TMR_TRIGGER) += iio-trig-bfin-timer.o
diff --git a/drivers/staging/iio/trigger/iio-trig-bfin-timer.c b/drivers/staging/iio/trigger/iio-trig-bfin-timer.c
deleted file mode 100644
index 9fe48ef11473..000000000000
--- a/drivers/staging/iio/trigger/iio-trig-bfin-timer.c
+++ /dev/null
@@ -1,293 +0,0 @@
-/*
- * Copyright 2011 Analog Devices Inc.
- *
- * Licensed under the GPL-2.
- *
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/slab.h>
-#include <linux/interrupt.h>
-#include <linux/irq.h>
-#include <linux/delay.h>
-
-#include <asm/gptimers.h>
-#include <asm/portmux.h>
-
-#include <linux/iio/iio.h>
-#include <linux/iio/trigger.h>
-
-#include "iio-trig-bfin-timer.h"
-
-struct bfin_timer {
- unsigned short id, bit;
- unsigned long irqbit;
- int irq;
- int pin;
-};
-
-/*
- * this covers all hardware timer configurations on
- * all Blackfin derivatives out there today
- */
-
-static struct bfin_timer iio_bfin_timer_code[MAX_BLACKFIN_GPTIMERS] = {
- {TIMER0_id, TIMER0bit, TIMER_STATUS_TIMIL0, IRQ_TIMER0, P_TMR0},
- {TIMER1_id, TIMER1bit, TIMER_STATUS_TIMIL1, IRQ_TIMER1, P_TMR1},
- {TIMER2_id, TIMER2bit, TIMER_STATUS_TIMIL2, IRQ_TIMER2, P_TMR2},
-#if (MAX_BLACKFIN_GPTIMERS > 3)
- {TIMER3_id, TIMER3bit, TIMER_STATUS_TIMIL3, IRQ_TIMER3, P_TMR3},
- {TIMER4_id, TIMER4bit, TIMER_STATUS_TIMIL4, IRQ_TIMER4, P_TMR4},
- {TIMER5_id, TIMER5bit, TIMER_STATUS_TIMIL5, IRQ_TIMER5, P_TMR5},
- {TIMER6_id, TIMER6bit, TIMER_STATUS_TIMIL6, IRQ_TIMER6, P_TMR6},
- {TIMER7_id, TIMER7bit, TIMER_STATUS_TIMIL7, IRQ_TIMER7, P_TMR7},
-#endif
-#if (MAX_BLACKFIN_GPTIMERS > 8)
- {TIMER8_id, TIMER8bit, TIMER_STATUS_TIMIL8, IRQ_TIMER8, P_TMR8},
- {TIMER9_id, TIMER9bit, TIMER_STATUS_TIMIL9, IRQ_TIMER9, P_TMR9},
- {TIMER10_id, TIMER10bit, TIMER_STATUS_TIMIL10, IRQ_TIMER10, P_TMR10},
-#if (MAX_BLACKFIN_GPTIMERS > 11)
- {TIMER11_id, TIMER11bit, TIMER_STATUS_TIMIL11, IRQ_TIMER11, P_TMR11},
-#endif
-#endif
-};
-
-struct bfin_tmr_state {
- struct iio_trigger *trig;
- struct bfin_timer *t;
- unsigned timer_num;
- bool output_enable;
- unsigned int duty;
- int irq;
-};
-
-static int iio_bfin_tmr_set_state(struct iio_trigger *trig, bool state)
-{
- struct bfin_tmr_state *st = iio_trigger_get_drvdata(trig);
-
- if (get_gptimer_period(st->t->id) == 0)
- return -EINVAL;
-
- if (state)
- enable_gptimers(st->t->bit);
- else
- disable_gptimers(st->t->bit);
-
- return 0;
-}
-
-static ssize_t iio_bfin_tmr_frequency_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct iio_trigger *trig = to_iio_trigger(dev);
- struct bfin_tmr_state *st = iio_trigger_get_drvdata(trig);
- unsigned int val;
- bool enabled;
- int ret;
-
- ret = kstrtouint(buf, 10, &val);
- if (ret)
- return ret;
-
- if (val > 100000)
- return -EINVAL;
-
- enabled = get_enabled_gptimers() & st->t->bit;
-
- if (enabled)
- disable_gptimers(st->t->bit);
-
- if (val == 0)
- return count;
-
- val = get_sclk() / val;
- if (val <= 4 || val <= st->duty)
- return -EINVAL;
-
- set_gptimer_period(st->t->id, val);
- set_gptimer_pwidth(st->t->id, val - st->duty);
-
- if (enabled)
- enable_gptimers(st->t->bit);
-
- return count;
-}
-
-static ssize_t iio_bfin_tmr_frequency_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct iio_trigger *trig = to_iio_trigger(dev);
- struct bfin_tmr_state *st = iio_trigger_get_drvdata(trig);
- unsigned int period = get_gptimer_period(st->t->id);
- unsigned long val;
-
- if (period == 0)
- val = 0;
- else
- val = get_sclk() / get_gptimer_period(st->t->id);
-
- return sprintf(buf, "%lu\n", val);
-}
-
-static DEVICE_ATTR(frequency, S_IRUGO | S_IWUSR, iio_bfin_tmr_frequency_show,
- iio_bfin_tmr_frequency_store);
-
-static struct attribute *iio_bfin_tmr_trigger_attrs[] = {
- &dev_attr_frequency.attr,
- NULL,
-};
-
-static const struct attribute_group iio_bfin_tmr_trigger_attr_group = {
- .attrs = iio_bfin_tmr_trigger_attrs,
-};
-
-static const struct attribute_group *iio_bfin_tmr_trigger_attr_groups[] = {
- &iio_bfin_tmr_trigger_attr_group,
- NULL
-};
-
-static irqreturn_t iio_bfin_tmr_trigger_isr(int irq, void *devid)
-{
- struct bfin_tmr_state *st = devid;
-
- clear_gptimer_intr(st->t->id);
- iio_trigger_poll(st->trig);
-
- return IRQ_HANDLED;
-}
-
-static int iio_bfin_tmr_get_number(int irq)
-{
- int i;
-
- for (i = 0; i < MAX_BLACKFIN_GPTIMERS; i++)
- if (iio_bfin_timer_code[i].irq == irq)
- return i;
-
- return -ENODEV;
-}
-
-static const struct iio_trigger_ops iio_bfin_tmr_trigger_ops = {
- .owner = THIS_MODULE,
- .set_trigger_state = iio_bfin_tmr_set_state,
-};
-
-static int iio_bfin_tmr_trigger_probe(struct platform_device *pdev)
-{
- struct iio_bfin_timer_trigger_pdata *pdata = pdev->dev.platform_data;
- struct bfin_tmr_state *st;
- unsigned int config;
- int ret;
-
- st = devm_kzalloc(&pdev->dev, sizeof(*st), GFP_KERNEL);
- if (!st)
- return -ENOMEM;
-
- st->irq = platform_get_irq(pdev, 0);
- if (!st->irq) {
- dev_err(&pdev->dev, "No IRQs specified");
- return -ENODEV;
- }
-
- ret = iio_bfin_tmr_get_number(st->irq);
- if (ret < 0)
- return ret;
-
- st->timer_num = ret;
- st->t = &iio_bfin_timer_code[st->timer_num];
-
- st->trig = iio_trigger_alloc("bfintmr%d", st->timer_num);
- if (!st->trig)
- return -ENOMEM;
-
- st->trig->ops = &iio_bfin_tmr_trigger_ops;
- st->trig->dev.groups = iio_bfin_tmr_trigger_attr_groups;
- iio_trigger_set_drvdata(st->trig, st);
- ret = iio_trigger_register(st->trig);
- if (ret)
- goto out;
-
- ret = request_irq(st->irq, iio_bfin_tmr_trigger_isr,
- 0, st->trig->name, st);
- if (ret) {
- dev_err(&pdev->dev,
- "request IRQ-%d failed", st->irq);
- goto out1;
- }
-
- config = PWM_OUT | PERIOD_CNT | IRQ_ENA;
-
- if (pdata && pdata->output_enable) {
- unsigned long long val;
-
- st->output_enable = true;
-
- ret = peripheral_request(st->t->pin, st->trig->name);
- if (ret)
- goto out_free_irq;
-
- val = (unsigned long long)get_sclk() * pdata->duty_ns;
- do_div(val, NSEC_PER_SEC);
- st->duty = val;
-
- /**
- * The interrupt will be generated at the end of the period,
- * since we want the interrupt to be generated at end of the
- * pulse we invert both polarity and duty cycle, so that the
- * pulse will be generated directly before the interrupt.
- */
- if (pdata->active_low)
- config |= PULSE_HI;
- } else {
- st->duty = 1;
- config |= OUT_DIS;
- }
-
- set_gptimer_config(st->t->id, config);
-
- dev_info(&pdev->dev, "iio trigger Blackfin TMR%d, IRQ-%d",
- st->timer_num, st->irq);
- platform_set_drvdata(pdev, st);
-
- return 0;
-out_free_irq:
- free_irq(st->irq, st);
-out1:
- iio_trigger_unregister(st->trig);
-out:
- iio_trigger_put(st->trig);
- return ret;
-}
-
-static int iio_bfin_tmr_trigger_remove(struct platform_device *pdev)
-{
- struct bfin_tmr_state *st = platform_get_drvdata(pdev);
-
- disable_gptimers(st->t->bit);
- if (st->output_enable)
- peripheral_free(st->t->pin);
- free_irq(st->irq, st);
- iio_trigger_unregister(st->trig);
- iio_trigger_put(st->trig);
-
- return 0;
-}
-
-static struct platform_driver iio_bfin_tmr_trigger_driver = {
- .driver = {
- .name = "iio_bfin_tmr_trigger",
- },
- .probe = iio_bfin_tmr_trigger_probe,
- .remove = iio_bfin_tmr_trigger_remove,
-};
-
-module_platform_driver(iio_bfin_tmr_trigger_driver);
-
-MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
-MODULE_DESCRIPTION("Blackfin system timer based trigger for the iio subsystem");
-MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("platform:iio-trig-bfin-timer");
diff --git a/drivers/staging/iio/trigger/iio-trig-bfin-timer.h b/drivers/staging/iio/trigger/iio-trig-bfin-timer.h
deleted file mode 100644
index c07321f8d94c..000000000000
--- a/drivers/staging/iio/trigger/iio-trig-bfin-timer.h
+++ /dev/null
@@ -1,24 +0,0 @@
-#ifndef __IIO_BFIN_TIMER_TRIGGER_H__
-#define __IIO_BFIN_TIMER_TRIGGER_H__
-
-/**
- * struct iio_bfin_timer_trigger_pdata - timer trigger platform data
- * @output_enable: Enable external trigger pulse generation.
- * @active_low: Whether the trigger pulse is active low.
- * @duty_ns: Length of the trigger pulse in nanoseconds.
- *
- * This struct is used to configure the output pulse generation of the blackfin
- * timer trigger. If output_enable is set to true an external trigger signal
- * will generated on the pin corresponding to the timer. This is useful for
- * converters which needs an external signal to start conversion. active_low and
- * duty_ns are used to configure the type of the trigger pulse. If output_enable
- * is set to false no external trigger pulse will be generated and active_low
- * and duty_ns are ignored.
- **/
-struct iio_bfin_timer_trigger_pdata {
- bool output_enable;
- bool active_low;
- unsigned int duty_ns;
-};
-
-#endif
diff --git a/drivers/staging/iio/trigger/iio-trig-periodic-rtc.c b/drivers/staging/iio/trigger/iio-trig-periodic-rtc.c
deleted file mode 100644
index 2db885750fb8..000000000000
--- a/drivers/staging/iio/trigger/iio-trig-periodic-rtc.c
+++ /dev/null
@@ -1,216 +0,0 @@
-/* The industrial I/O periodic RTC trigger driver
- *
- * Copyright (c) 2008 Jonathan Cameron
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This is a heavily rewritten version of the periodic timer system in
- * earlier version of industrialio. It supplies the same functionality
- * but via a trigger rather than a specific periodic timer system.
- */
-
-#include <linux/platform_device.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/rtc.h>
-#include <linux/iio/iio.h>
-#include <linux/iio/trigger.h>
-
-static LIST_HEAD(iio_prtc_trigger_list);
-static DEFINE_MUTEX(iio_prtc_trigger_list_lock);
-
-struct iio_prtc_trigger_info {
- struct rtc_device *rtc;
- unsigned int frequency;
- struct rtc_task task;
- bool state;
-};
-
-static int iio_trig_periodic_rtc_set_state(struct iio_trigger *trig, bool state)
-{
- struct iio_prtc_trigger_info *trig_info = iio_trigger_get_drvdata(trig);
- int ret;
-
- if (trig_info->frequency == 0 && state)
- return -EINVAL;
- dev_dbg(&trig_info->rtc->dev, "trigger frequency is %u\n",
- trig_info->frequency);
- ret = rtc_irq_set_state(trig_info->rtc, &trig_info->task, state);
- if (ret == 0)
- trig_info->state = state;
-
- return ret;
-}
-
-static ssize_t iio_trig_periodic_read_freq(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct iio_trigger *trig = to_iio_trigger(dev);
- struct iio_prtc_trigger_info *trig_info = iio_trigger_get_drvdata(trig);
-
- return sprintf(buf, "%u\n", trig_info->frequency);
-}
-
-static ssize_t iio_trig_periodic_write_freq(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t len)
-{
- struct iio_trigger *trig = to_iio_trigger(dev);
- struct iio_prtc_trigger_info *trig_info = iio_trigger_get_drvdata(trig);
- unsigned int val;
- int ret;
-
- ret = kstrtouint(buf, 10, &val);
- if (ret)
- goto error_ret;
-
- if (val > 0) {
- ret = rtc_irq_set_freq(trig_info->rtc, &trig_info->task, val);
- if (ret == 0 && trig_info->state && trig_info->frequency == 0)
- ret = rtc_irq_set_state(trig_info->rtc,
- &trig_info->task, 1);
- } else {
- ret = rtc_irq_set_state(trig_info->rtc, &trig_info->task, 0);
- }
- if (ret)
- goto error_ret;
-
- trig_info->frequency = val;
-
- return len;
-
-error_ret:
- return ret;
-}
-
-static DEVICE_ATTR(frequency, S_IRUGO | S_IWUSR,
- iio_trig_periodic_read_freq,
- iio_trig_periodic_write_freq);
-
-static struct attribute *iio_trig_prtc_attrs[] = {
- &dev_attr_frequency.attr,
- NULL,
-};
-
-static const struct attribute_group iio_trig_prtc_attr_group = {
- .attrs = iio_trig_prtc_attrs,
-};
-
-static const struct attribute_group *iio_trig_prtc_attr_groups[] = {
- &iio_trig_prtc_attr_group,
- NULL
-};
-
-static void iio_prtc_trigger_poll(void *private_data)
-{
- iio_trigger_poll(private_data);
-}
-
-static const struct iio_trigger_ops iio_prtc_trigger_ops = {
- .owner = THIS_MODULE,
- .set_trigger_state = &iio_trig_periodic_rtc_set_state,
-};
-
-static int iio_trig_periodic_rtc_probe(struct platform_device *dev)
-{
- char **pdata = dev->dev.platform_data;
- struct iio_prtc_trigger_info *trig_info;
- struct iio_trigger *trig, *trig2;
-
- int i, ret;
-
- for (i = 0;; i++) {
- if (!pdata[i])
- break;
- trig = iio_trigger_alloc("periodic%s", pdata[i]);
- if (!trig) {
- ret = -ENOMEM;
- goto error_free_completed_registrations;
- }
- list_add(&trig->alloc_list, &iio_prtc_trigger_list);
-
- trig_info = kzalloc(sizeof(*trig_info), GFP_KERNEL);
- if (!trig_info) {
- ret = -ENOMEM;
- goto error_put_trigger_and_remove_from_list;
- }
- iio_trigger_set_drvdata(trig, trig_info);
- trig->ops = &iio_prtc_trigger_ops;
- /* RTC access */
- trig_info->rtc = rtc_class_open(pdata[i]);
- if (!trig_info->rtc) {
- ret = -EINVAL;
- goto error_free_trig_info;
- }
- trig_info->task.func = iio_prtc_trigger_poll;
- trig_info->task.private_data = trig;
- ret = rtc_irq_register(trig_info->rtc, &trig_info->task);
- if (ret)
- goto error_close_rtc;
- trig->dev.groups = iio_trig_prtc_attr_groups;
- ret = iio_trigger_register(trig);
- if (ret)
- goto error_unregister_rtc_irq;
- }
- return 0;
-error_unregister_rtc_irq:
- rtc_irq_unregister(trig_info->rtc, &trig_info->task);
-error_close_rtc:
- rtc_class_close(trig_info->rtc);
-error_free_trig_info:
- kfree(trig_info);
-error_put_trigger_and_remove_from_list:
- list_del(&trig->alloc_list);
- iio_trigger_put(trig);
-error_free_completed_registrations:
- list_for_each_entry_safe(trig,
- trig2,
- &iio_prtc_trigger_list,
- alloc_list) {
- trig_info = iio_trigger_get_drvdata(trig);
- rtc_irq_unregister(trig_info->rtc, &trig_info->task);
- rtc_class_close(trig_info->rtc);
- kfree(trig_info);
- iio_trigger_unregister(trig);
- }
- return ret;
-}
-
-static int iio_trig_periodic_rtc_remove(struct platform_device *dev)
-{
- struct iio_trigger *trig, *trig2;
- struct iio_prtc_trigger_info *trig_info;
-
- mutex_lock(&iio_prtc_trigger_list_lock);
- list_for_each_entry_safe(trig,
- trig2,
- &iio_prtc_trigger_list,
- alloc_list) {
- trig_info = iio_trigger_get_drvdata(trig);
- rtc_irq_unregister(trig_info->rtc, &trig_info->task);
- rtc_class_close(trig_info->rtc);
- kfree(trig_info);
- iio_trigger_unregister(trig);
- }
- mutex_unlock(&iio_prtc_trigger_list_lock);
- return 0;
-}
-
-static struct platform_driver iio_trig_periodic_rtc_driver = {
- .probe = iio_trig_periodic_rtc_probe,
- .remove = iio_trig_periodic_rtc_remove,
- .driver = {
- .name = "iio_prtc_trigger",
- },
-};
-
-module_platform_driver(iio_trig_periodic_rtc_driver);
-
-MODULE_AUTHOR("Jonathan Cameron <jic23@kernel.org>");
-MODULE_DESCRIPTION("Periodic realtime clock trigger for the iio subsystem");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/lustre/Kconfig b/drivers/staging/lustre/Kconfig
deleted file mode 100644
index a224d88bf43d..000000000000
--- a/drivers/staging/lustre/Kconfig
+++ /dev/null
@@ -1,3 +0,0 @@
-source "drivers/staging/lustre/lustre/Kconfig"
-
-source "drivers/staging/lustre/lnet/Kconfig"
diff --git a/drivers/staging/lustre/Makefile b/drivers/staging/lustre/Makefile
deleted file mode 100644
index 95ffe337a80a..000000000000
--- a/drivers/staging/lustre/Makefile
+++ /dev/null
@@ -1,2 +0,0 @@
-obj-$(CONFIG_LNET) += lnet/
-obj-$(CONFIG_LUSTRE_FS) += lustre/
diff --git a/drivers/staging/lustre/README.txt b/drivers/staging/lustre/README.txt
deleted file mode 100644
index 0676243eea9e..000000000000
--- a/drivers/staging/lustre/README.txt
+++ /dev/null
@@ -1,83 +0,0 @@
-Lustre Parallel Filesystem Client
-=================================
-
-The Lustre file system is an open-source, parallel file system
-that supports many requirements of leadership class HPC simulation
-environments.
-Born from from a research project at Carnegie Mellon University,
-the Lustre file system is a widely-used option in HPC.
-The Lustre file system provides a POSIX compliant file system interface,
-can scale to thousands of clients, petabytes of storage and
-hundreds of gigabytes per second of I/O bandwidth.
-
-Unlike shared disk storage cluster filesystems (e.g. OCFS2, GFS, GPFS),
-Lustre has independent Metadata and Data servers that clients can access
-in parallel to maximize performance.
-
-In order to use Lustre client you will need to download the "lustre-client"
-package that contains the userspace tools from http://lustre.org/download/
-
-You will need to install and configure your Lustre servers separately.
-
-Mount Syntax
-============
-After you installed the lustre-client tools including mount.lustre binary
-you can mount your Lustre filesystem with:
-
-mount -t lustre mgs:/fsname mnt
-
-where mgs is the host name or ip address of your Lustre MGS(management service)
-fsname is the name of the filesystem you would like to mount.
-
-
-Mount Options
-=============
-
- noflock
- Disable posix file locking (Applications trying to use
- the functionality will get ENOSYS)
-
- localflock
- Enable local flock support, using only client-local flock
- (faster, for applications that require flock but do not run
- on multiple nodes).
-
- flock
- Enable cluster-global posix file locking coherent across all
- client nodes.
-
- user_xattr, nouser_xattr
- Support "user." extended attributes (or not)
-
- user_fid2path, nouser_fid2path
- Enable FID to path translation by regular users (or not)
-
- checksum, nochecksum
- Verify data consistency on the wire and in memory as it passes
- between the layers (or not).
-
- lruresize, nolruresize
- Allow lock LRU to be controlled by memory pressure on the server
- (or only 100 (default, controlled by lru_size proc parameter) locks
- per CPU per server on this client).
-
- lazystatfs, nolazystatfs
- Do not block in statfs() if some of the servers are down.
-
- 32bitapi
- Shrink inode numbers to fit into 32 bits. This is necessary
- if you plan to reexport Lustre filesystem from this client via
- NFSv4.
-
- verbose, noverbose
- Enable mount/umount console messages (or not)
-
-More Information
-================
-You can get more information at the Lustre website: http://wiki.lustre.org/
-
-Source for the userspace tools and out-of-tree client and server code
-is available at: http://git.hpdd.intel.com/fs/lustre-release.git
-
-Latest binary packages:
-http://lustre.org/download/
diff --git a/drivers/staging/lustre/TODO b/drivers/staging/lustre/TODO
deleted file mode 100644
index f194417d0af7..000000000000
--- a/drivers/staging/lustre/TODO
+++ /dev/null
@@ -1,12 +0,0 @@
-* Possible remaining coding style fix.
-* Remove deadcode.
-* Separate client/server functionality. Functions only used by server can be
- removed from client.
-* Clean up libcfs layer. Ideally we can remove include/linux/libcfs entirely.
-* Clean up CLIO layer. Lustre client readahead/writeback control needs to better
- suit kernel providings.
-* Add documents in Documentation.
-* Other minor misc cleanups...
-
-Please send any patches to Greg Kroah-Hartman <greg@kroah.com>, Andreas Dilger
-<andreas.dilger@intel.com>, and Oleg Drokin <oleg.drokin@intel.com>.
diff --git a/drivers/staging/lustre/include/linux/libcfs/curproc.h b/drivers/staging/lustre/include/linux/libcfs/curproc.h
deleted file mode 100644
index 1edfca58c1c6..000000000000
--- a/drivers/staging/lustre/include/linux/libcfs/curproc.h
+++ /dev/null
@@ -1,97 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * libcfs/include/libcfs/curproc.h
- *
- * Lustre curproc API declaration
- *
- * Author: Nikita Danilov <nikita@clusterfs.com>
- */
-
-#ifndef __LIBCFS_CURPROC_H__
-#define __LIBCFS_CURPROC_H__
-
-/*
- * Plus, platform-specific constant
- *
- * CFS_CURPROC_COMM_MAX,
- *
- * and opaque scalar type
- *
- * kernel_cap_t
- */
-
-/* check if task is running in compat mode.*/
-#define current_pid() (current->pid)
-#define current_comm() (current->comm)
-
-typedef __u32 cfs_cap_t;
-
-#define CFS_CAP_CHOWN 0
-#define CFS_CAP_DAC_OVERRIDE 1
-#define CFS_CAP_DAC_READ_SEARCH 2
-#define CFS_CAP_FOWNER 3
-#define CFS_CAP_FSETID 4
-#define CFS_CAP_LINUX_IMMUTABLE 9
-#define CFS_CAP_SYS_ADMIN 21
-#define CFS_CAP_SYS_BOOT 23
-#define CFS_CAP_SYS_RESOURCE 24
-
-#define CFS_CAP_FS_MASK ((1 << CFS_CAP_CHOWN) | \
- (1 << CFS_CAP_DAC_OVERRIDE) | \
- (1 << CFS_CAP_DAC_READ_SEARCH) | \
- (1 << CFS_CAP_FOWNER) | \
- (1 << CFS_CAP_FSETID) | \
- (1 << CFS_CAP_LINUX_IMMUTABLE) | \
- (1 << CFS_CAP_SYS_ADMIN) | \
- (1 << CFS_CAP_SYS_BOOT) | \
- (1 << CFS_CAP_SYS_RESOURCE))
-
-void cfs_cap_raise(cfs_cap_t cap);
-void cfs_cap_lower(cfs_cap_t cap);
-int cfs_cap_raised(cfs_cap_t cap);
-cfs_cap_t cfs_curproc_cap_pack(void);
-
-/* __LIBCFS_CURPROC_H__ */
-#endif
-/*
- * Local variables:
- * c-indentation-style: "K&R"
- * c-basic-offset: 8
- * tab-width: 8
- * fill-column: 80
- * scroll-step: 1
- * End:
- */
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs.h b/drivers/staging/lustre/include/linux/libcfs/libcfs.h
deleted file mode 100644
index 385ced15c742..000000000000
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs.h
+++ /dev/null
@@ -1,169 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- */
-
-#ifndef __LIBCFS_LIBCFS_H__
-#define __LIBCFS_LIBCFS_H__
-
-#include "linux/libcfs.h"
-#include <linux/gfp.h>
-
-#include "curproc.h"
-
-static inline int __is_po2(unsigned long long val)
-{
- return !(val & (val - 1));
-}
-
-#define IS_PO2(val) __is_po2((unsigned long long)(val))
-
-#define LOWEST_BIT_SET(x) ((x) & ~((x) - 1))
-
-/*
- * Lustre Error Checksum: calculates checksum
- * of Hex number by XORing each bit.
- */
-#define LERRCHKSUM(hexnum) (((hexnum) & 0xf) ^ ((hexnum) >> 4 & 0xf) ^ \
- ((hexnum) >> 8 & 0xf))
-
-#define LUSTRE_SRV_LNET_PID LUSTRE_LNET_PID
-
-#include <linux/list.h>
-
-/* need both kernel and user-land acceptor */
-#define LNET_ACCEPTOR_MIN_RESERVED_PORT 512
-#define LNET_ACCEPTOR_MAX_RESERVED_PORT 1023
-
-/*
- * libcfs pseudo device operations
- *
- * It's just draft now.
- */
-
-struct cfs_psdev_file {
- unsigned long off;
- void *private_data;
- unsigned long reserved1;
- unsigned long reserved2;
-};
-
-struct cfs_psdev_ops {
- int (*p_open)(unsigned long, void *);
- int (*p_close)(unsigned long, void *);
- int (*p_read)(struct cfs_psdev_file *, char *, unsigned long);
- int (*p_write)(struct cfs_psdev_file *, char *, unsigned long);
- int (*p_ioctl)(struct cfs_psdev_file *, unsigned long, void *);
-};
-
-/*
- * Drop into debugger, if possible. Implementation is provided by platform.
- */
-
-void cfs_enter_debugger(void);
-
-/*
- * Defined by platform
- */
-int unshare_fs_struct(void);
-sigset_t cfs_get_blocked_sigs(void);
-sigset_t cfs_block_allsigs(void);
-sigset_t cfs_block_sigs(unsigned long sigs);
-sigset_t cfs_block_sigsinv(unsigned long sigs);
-void cfs_restore_sigs(sigset_t);
-int cfs_signal_pending(void);
-void cfs_clear_sigpending(void);
-
-/*
- * Random number handling
- */
-
-/* returns a random 32-bit integer */
-unsigned int cfs_rand(void);
-/* seed the generator */
-void cfs_srand(unsigned int, unsigned int);
-void cfs_get_random_bytes(void *buf, int size);
-
-#include "libcfs_debug.h"
-#include "libcfs_cpu.h"
-#include "libcfs_private.h"
-#include "libcfs_ioctl.h"
-#include "libcfs_prim.h"
-#include "libcfs_time.h"
-#include "libcfs_string.h"
-#include "libcfs_kernelcomm.h"
-#include "libcfs_workitem.h"
-#include "libcfs_hash.h"
-#include "libcfs_fail.h"
-#include "libcfs_crypto.h"
-
-/* container_of depends on "likely" which is defined in libcfs_private.h */
-static inline void *__container_of(void *ptr, unsigned long shift)
-{
- if (IS_ERR_OR_NULL(ptr))
- return ptr;
- return (char *)ptr - shift;
-}
-
-#define container_of0(ptr, type, member) \
- ((type *)__container_of((void *)(ptr), offsetof(type, member)))
-
-#define _LIBCFS_H
-
-void *libcfs_kvzalloc(size_t size, gfp_t flags);
-void *libcfs_kvzalloc_cpt(struct cfs_cpt_table *cptab, int cpt, size_t size,
- gfp_t flags);
-
-extern struct miscdevice libcfs_dev;
-/**
- * The path of debug log dump upcall script.
- */
-extern char lnet_upcall[1024];
-extern char lnet_debug_log_upcall[1024];
-
-extern void libcfs_init_nidstrings(void);
-
-extern struct cfs_psdev_ops libcfs_psdev_ops;
-
-extern struct cfs_wi_sched *cfs_sched_rehash;
-
-struct lnet_debugfs_symlink_def {
- char *name;
- char *target;
-};
-
-void lustre_insert_debugfs(struct ctl_table *table,
- const struct lnet_debugfs_symlink_def *symlinks);
-
-#endif /* _LIBCFS_H */
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_cpu.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_cpu.h
deleted file mode 100644
index 787867847483..000000000000
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs_cpu.h
+++ /dev/null
@@ -1,219 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; if not, write to the
- * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
- * Boston, MA 021110-1307, USA
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * libcfs/include/libcfs/libcfs_cpu.h
- *
- * CPU partition
- * . CPU partition is virtual processing unit
- *
- * . CPU partition can present 1-N cores, or 1-N NUMA nodes,
- * in other words, CPU partition is a processors pool.
- *
- * CPU Partition Table (CPT)
- * . a set of CPU partitions
- *
- * . There are two modes for CPT: CFS_CPU_MODE_NUMA and CFS_CPU_MODE_SMP
- *
- * . User can specify total number of CPU partitions while creating a
- * CPT, ID of CPU partition is always start from 0.
- *
- * Example: if there are 8 cores on the system, while creating a CPT
- * with cpu_npartitions=4:
- * core[0, 1] = partition[0], core[2, 3] = partition[1]
- * core[4, 5] = partition[2], core[6, 7] = partition[3]
- *
- * cpu_npartitions=1:
- * core[0, 1, ... 7] = partition[0]
- *
- * . User can also specify CPU partitions by string pattern
- *
- * Examples: cpu_partitions="0[0,1], 1[2,3]"
- * cpu_partitions="N 0[0-3], 1[4-8]"
- *
- * The first character "N" means following numbers are numa ID
- *
- * . NUMA allocators, CPU affinity threads are built over CPU partitions,
- * instead of HW CPUs or HW nodes.
- *
- * . By default, Lustre modules should refer to the global cfs_cpt_table,
- * instead of accessing HW CPUs directly, so concurrency of Lustre can be
- * configured by cpu_npartitions of the global cfs_cpt_table
- *
- * . If cpu_npartitions=1(all CPUs in one pool), lustre should work the
- * same way as 2.2 or earlier versions
- *
- * Author: liang@whamcloud.com
- */
-
-#ifndef __LIBCFS_CPU_H__
-#define __LIBCFS_CPU_H__
-
-/* any CPU partition */
-#define CFS_CPT_ANY (-1)
-
-#ifdef CONFIG_SMP
-/**
- * return cpumask of CPU partition \a cpt
- */
-cpumask_t *cfs_cpt_cpumask(struct cfs_cpt_table *cptab, int cpt);
-/**
- * print string information of cpt-table
- */
-int cfs_cpt_table_print(struct cfs_cpt_table *cptab, char *buf, int len);
-#else /* !CONFIG_SMP */
-struct cfs_cpt_table {
- /* # of CPU partitions */
- int ctb_nparts;
- /* cpu mask */
- cpumask_t ctb_mask;
- /* node mask */
- nodemask_t ctb_nodemask;
- /* version */
- __u64 ctb_version;
-};
-
-static inline cpumask_t *
-cfs_cpt_cpumask(struct cfs_cpt_table *cptab, int cpt)
-{
- return NULL;
-}
-
-static inline int
-cfs_cpt_table_print(struct cfs_cpt_table *cptab, char *buf, int len)
-{
- return 0;
-}
-#endif /* CONFIG_SMP */
-
-extern struct cfs_cpt_table *cfs_cpt_table;
-
-/**
- * destroy a CPU partition table
- */
-void cfs_cpt_table_free(struct cfs_cpt_table *cptab);
-/**
- * create a cfs_cpt_table with \a ncpt number of partitions
- */
-struct cfs_cpt_table *cfs_cpt_table_alloc(unsigned int ncpt);
-/**
- * return total number of CPU partitions in \a cptab
- */
-int
-cfs_cpt_number(struct cfs_cpt_table *cptab);
-/**
- * return number of HW cores or hyper-threadings in a CPU partition \a cpt
- */
-int cfs_cpt_weight(struct cfs_cpt_table *cptab, int cpt);
-/**
- * is there any online CPU in CPU partition \a cpt
- */
-int cfs_cpt_online(struct cfs_cpt_table *cptab, int cpt);
-/**
- * return nodemask of CPU partition \a cpt
- */
-nodemask_t *cfs_cpt_nodemask(struct cfs_cpt_table *cptab, int cpt);
-/**
- * shadow current HW processor ID to CPU-partition ID of \a cptab
- */
-int cfs_cpt_current(struct cfs_cpt_table *cptab, int remap);
-/**
- * shadow HW processor ID \a CPU to CPU-partition ID by \a cptab
- */
-int cfs_cpt_of_cpu(struct cfs_cpt_table *cptab, int cpu);
-/**
- * bind current thread on a CPU-partition \a cpt of \a cptab
- */
-int cfs_cpt_bind(struct cfs_cpt_table *cptab, int cpt);
-/**
- * add \a cpu to CPU partition @cpt of \a cptab, return 1 for success,
- * otherwise 0 is returned
- */
-int cfs_cpt_set_cpu(struct cfs_cpt_table *cptab, int cpt, int cpu);
-/**
- * remove \a cpu from CPU partition \a cpt of \a cptab
- */
-void cfs_cpt_unset_cpu(struct cfs_cpt_table *cptab, int cpt, int cpu);
-/**
- * add all cpus in \a mask to CPU partition \a cpt
- * return 1 if successfully set all CPUs, otherwise return 0
- */
-int cfs_cpt_set_cpumask(struct cfs_cpt_table *cptab,
- int cpt, cpumask_t *mask);
-/**
- * remove all cpus in \a mask from CPU partition \a cpt
- */
-void cfs_cpt_unset_cpumask(struct cfs_cpt_table *cptab,
- int cpt, cpumask_t *mask);
-/**
- * add all cpus in NUMA node \a node to CPU partition \a cpt
- * return 1 if successfully set all CPUs, otherwise return 0
- */
-int cfs_cpt_set_node(struct cfs_cpt_table *cptab, int cpt, int node);
-/**
- * remove all cpus in NUMA node \a node from CPU partition \a cpt
- */
-void cfs_cpt_unset_node(struct cfs_cpt_table *cptab, int cpt, int node);
-
-/**
- * add all cpus in node mask \a mask to CPU partition \a cpt
- * return 1 if successfully set all CPUs, otherwise return 0
- */
-int cfs_cpt_set_nodemask(struct cfs_cpt_table *cptab,
- int cpt, nodemask_t *mask);
-/**
- * remove all cpus in node mask \a mask from CPU partition \a cpt
- */
-void cfs_cpt_unset_nodemask(struct cfs_cpt_table *cptab,
- int cpt, nodemask_t *mask);
-/**
- * unset all cpus for CPU partition \a cpt
- */
-void cfs_cpt_clear(struct cfs_cpt_table *cptab, int cpt);
-/**
- * convert partition id \a cpt to numa node id, if there are more than one
- * nodes in this partition, it might return a different node id each time.
- */
-int cfs_cpt_spread_node(struct cfs_cpt_table *cptab, int cpt);
-
-/**
- * return number of HTs in the same core of \a cpu
- */
-int cfs_cpu_ht_nsiblings(int cpu);
-
-/**
- * iterate over all CPU partitions in \a cptab
- */
-#define cfs_cpt_for_each(i, cptab) \
- for (i = 0; i < cfs_cpt_number(cptab); i++)
-
-int cfs_cpu_init(void);
-void cfs_cpu_fini(void);
-
-#endif /* __LIBCFS_CPU_H__ */
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_crypto.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_crypto.h
deleted file mode 100644
index e8663697e7a6..000000000000
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs_crypto.h
+++ /dev/null
@@ -1,199 +0,0 @@
-/* GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see http://www.gnu.org/licenses
- *
- * Please visit http://www.xyratex.com/contact if you need additional
- * information or have any questions.
- *
- * GPL HEADER END
- */
-
-/*
- * Copyright 2012 Xyratex Technology Limited
- */
-
-#ifndef _LIBCFS_CRYPTO_H
-#define _LIBCFS_CRYPTO_H
-
-struct cfs_crypto_hash_type {
- char *cht_name; /**< hash algorithm name, equal to
- * format name for crypto api */
- unsigned int cht_key; /**< init key by default (valid for
- * 4 bytes context like crc32, adler */
- unsigned int cht_size; /**< hash digest size */
-};
-
-enum cfs_crypto_hash_alg {
- CFS_HASH_ALG_NULL = 0,
- CFS_HASH_ALG_ADLER32,
- CFS_HASH_ALG_CRC32,
- CFS_HASH_ALG_MD5,
- CFS_HASH_ALG_SHA1,
- CFS_HASH_ALG_SHA256,
- CFS_HASH_ALG_SHA384,
- CFS_HASH_ALG_SHA512,
- CFS_HASH_ALG_CRC32C,
- CFS_HASH_ALG_MAX
-};
-
-static struct cfs_crypto_hash_type hash_types[] = {
- [CFS_HASH_ALG_NULL] = { "null", 0, 0 },
- [CFS_HASH_ALG_ADLER32] = { "adler32", 1, 4 },
- [CFS_HASH_ALG_CRC32] = { "crc32", ~0, 4 },
- [CFS_HASH_ALG_CRC32C] = { "crc32c", ~0, 4 },
- [CFS_HASH_ALG_MD5] = { "md5", 0, 16 },
- [CFS_HASH_ALG_SHA1] = { "sha1", 0, 20 },
- [CFS_HASH_ALG_SHA256] = { "sha256", 0, 32 },
- [CFS_HASH_ALG_SHA384] = { "sha384", 0, 48 },
- [CFS_HASH_ALG_SHA512] = { "sha512", 0, 64 },
-};
-
-/** Return pointer to type of hash for valid hash algorithm identifier */
-static inline const struct cfs_crypto_hash_type *
- cfs_crypto_hash_type(unsigned char hash_alg)
-{
- struct cfs_crypto_hash_type *ht;
-
- if (hash_alg < CFS_HASH_ALG_MAX) {
- ht = &hash_types[hash_alg];
- if (ht->cht_name)
- return ht;
- }
- return NULL;
-}
-
-/** Return hash name for valid hash algorithm identifier or "unknown" */
-static inline const char *cfs_crypto_hash_name(unsigned char hash_alg)
-{
- const struct cfs_crypto_hash_type *ht;
-
- ht = cfs_crypto_hash_type(hash_alg);
- if (ht)
- return ht->cht_name;
- return "unknown";
-}
-
-/** Return digest size for valid algorithm identifier or 0 */
-static inline int cfs_crypto_hash_digestsize(unsigned char hash_alg)
-{
- const struct cfs_crypto_hash_type *ht;
-
- ht = cfs_crypto_hash_type(hash_alg);
- if (ht)
- return ht->cht_size;
- return 0;
-}
-
-/** Return hash identifier for valid hash algorithm name or 0xFF */
-static inline unsigned char cfs_crypto_hash_alg(const char *algname)
-{
- unsigned char i;
-
- for (i = 0; i < CFS_HASH_ALG_MAX; i++)
- if (!strcmp(hash_types[i].cht_name, algname))
- break;
- return (i == CFS_HASH_ALG_MAX ? 0xFF : i);
-}
-
-/** Calculate hash digest for buffer.
- * @param alg id of hash algorithm
- * @param buf buffer of data
- * @param buf_len buffer len
- * @param key initial value for algorithm, if it is NULL,
- * default initial value should be used.
- * @param key_len len of initial value
- * @param hash [out] pointer to hash, if it is NULL, hash_len is
- * set to valid digest size in bytes, retval -ENOSPC.
- * @param hash_len [in,out] size of hash buffer
- * @returns status of operation
- * @retval -EINVAL if buf, buf_len, hash_len or alg_id is invalid
- * @retval -ENODEV if this algorithm is unsupported
- * @retval -ENOSPC if pointer to hash is NULL, or hash_len less than
- * digest size
- * @retval 0 for success
- * @retval < 0 other errors from lower layers.
- */
-int cfs_crypto_hash_digest(unsigned char alg,
- const void *buf, unsigned int buf_len,
- unsigned char *key, unsigned int key_len,
- unsigned char *hash, unsigned int *hash_len);
-
-/* cfs crypto hash descriptor */
-struct cfs_crypto_hash_desc;
-
-/** Allocate and initialize descriptor for hash algorithm.
- * @param alg algorithm id
- * @param key initial value for algorithm, if it is NULL,
- * default initial value should be used.
- * @param key_len len of initial value
- * @returns pointer to descriptor of hash instance
- * @retval ERR_PTR(error) when errors occurred.
- */
-struct cfs_crypto_hash_desc*
- cfs_crypto_hash_init(unsigned char alg,
- unsigned char *key, unsigned int key_len);
-
-/** Update digest by part of data.
- * @param desc hash descriptor
- * @param page data page
- * @param offset data offset
- * @param len data len
- * @returns status of operation
- * @retval 0 for success.
- */
-int cfs_crypto_hash_update_page(struct cfs_crypto_hash_desc *desc,
- struct page *page, unsigned int offset,
- unsigned int len);
-
-/** Update digest by part of data.
- * @param desc hash descriptor
- * @param buf pointer to data buffer
- * @param buf_len size of data at buffer
- * @returns status of operation
- * @retval 0 for success.
- */
-int cfs_crypto_hash_update(struct cfs_crypto_hash_desc *desc, const void *buf,
- unsigned int buf_len);
-
-/** Finalize hash calculation, copy hash digest to buffer, destroy hash
- * descriptor.
- * @param desc hash descriptor
- * @param hash buffer pointer to store hash digest
- * @param hash_len pointer to hash buffer size, if NULL
- * destroy hash descriptor
- * @returns status of operation
- * @retval -ENOSPC if hash is NULL, or *hash_len less than
- * digest size
- * @retval 0 for success
- * @retval < 0 other errors from lower layers.
- */
-int cfs_crypto_hash_final(struct cfs_crypto_hash_desc *desc,
- unsigned char *hash, unsigned int *hash_len);
-/**
- * Register crypto hash algorithms
- */
-int cfs_crypto_register(void);
-
-/**
- * Unregister
- */
-void cfs_crypto_unregister(void);
-
-/** Return hash speed in Mbytes per second for valid hash algorithm
- * identifier. If test was unsuccessful -1 would be returned.
- */
-int cfs_crypto_hash_speed(unsigned char hash_alg);
-#endif
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_debug.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_debug.h
deleted file mode 100644
index a1787bb43483..000000000000
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs_debug.h
+++ /dev/null
@@ -1,262 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * libcfs/include/libcfs/libcfs_debug.h
- *
- * Debug messages and assertions
- *
- */
-
-#ifndef __LIBCFS_DEBUG_H__
-#define __LIBCFS_DEBUG_H__
-
-/*
- * Debugging
- */
-extern unsigned int libcfs_subsystem_debug;
-extern unsigned int libcfs_stack;
-extern unsigned int libcfs_debug;
-extern unsigned int libcfs_printk;
-extern unsigned int libcfs_console_ratelimit;
-extern unsigned int libcfs_console_max_delay;
-extern unsigned int libcfs_console_min_delay;
-extern unsigned int libcfs_console_backoff;
-extern unsigned int libcfs_debug_binary;
-extern char libcfs_debug_file_path_arr[PATH_MAX];
-
-int libcfs_debug_mask2str(char *str, int size, int mask, int is_subsys);
-int libcfs_debug_str2mask(int *mask, const char *str, int is_subsys);
-
-/* Has there been an LBUG? */
-extern unsigned int libcfs_catastrophe;
-extern unsigned int libcfs_panic_on_lbug;
-
-/**
- * Format for debug message headers
- */
-struct ptldebug_header {
- __u32 ph_len;
- __u32 ph_flags;
- __u32 ph_subsys;
- __u32 ph_mask;
- __u16 ph_cpu_id;
- __u16 ph_type;
- /* time_t overflow in 2106 */
- __u32 ph_sec;
- __u64 ph_usec;
- __u32 ph_stack;
- __u32 ph_pid;
- __u32 ph_extern_pid;
- __u32 ph_line_num;
-} __packed;
-
-#define PH_FLAG_FIRST_RECORD 1
-
-/* Debugging subsystems (32 bits, non-overlapping) */
-/* keep these in sync with lnet/utils/debug.c and lnet/libcfs/debug.c */
-#define S_UNDEFINED 0x00000001
-#define S_MDC 0x00000002
-#define S_MDS 0x00000004
-#define S_OSC 0x00000008
-#define S_OST 0x00000010
-#define S_CLASS 0x00000020
-#define S_LOG 0x00000040
-#define S_LLITE 0x00000080
-#define S_RPC 0x00000100
-#define S_MGMT 0x00000200
-#define S_LNET 0x00000400
-#define S_LND 0x00000800 /* ALL LNDs */
-#define S_PINGER 0x00001000
-#define S_FILTER 0x00002000
-/* unused */
-#define S_ECHO 0x00008000
-#define S_LDLM 0x00010000
-#define S_LOV 0x00020000
-#define S_LQUOTA 0x00040000
-#define S_OSD 0x00080000
-/* unused */
-/* unused */
-/* unused */
-#define S_LMV 0x00800000 /* b_new_cmd */
-/* unused */
-#define S_SEC 0x02000000 /* upcall cache */
-#define S_GSS 0x04000000 /* b_new_cmd */
-/* unused */
-#define S_MGC 0x10000000
-#define S_MGS 0x20000000
-#define S_FID 0x40000000 /* b_new_cmd */
-#define S_FLD 0x80000000 /* b_new_cmd */
-/* keep these in sync with lnet/utils/debug.c and lnet/libcfs/debug.c */
-
-/* Debugging masks (32 bits, non-overlapping) */
-/* keep these in sync with lnet/utils/debug.c and lnet/libcfs/debug.c */
-#define D_TRACE 0x00000001 /* ENTRY/EXIT markers */
-#define D_INODE 0x00000002
-#define D_SUPER 0x00000004
-#define D_EXT2 0x00000008 /* anything from ext2_debug */
-#define D_MALLOC 0x00000010 /* print malloc, free information */
-#define D_CACHE 0x00000020 /* cache-related items */
-#define D_INFO 0x00000040 /* general information */
-#define D_IOCTL 0x00000080 /* ioctl related information */
-#define D_NETERROR 0x00000100 /* network errors */
-#define D_NET 0x00000200 /* network communications */
-#define D_WARNING 0x00000400 /* CWARN(...) == CDEBUG (D_WARNING, ...) */
-#define D_BUFFS 0x00000800
-#define D_OTHER 0x00001000
-#define D_DENTRY 0x00002000
-#define D_NETTRACE 0x00004000
-#define D_PAGE 0x00008000 /* bulk page handling */
-#define D_DLMTRACE 0x00010000
-#define D_ERROR 0x00020000 /* CERROR(...) == CDEBUG (D_ERROR, ...) */
-#define D_EMERG 0x00040000 /* CEMERG(...) == CDEBUG (D_EMERG, ...) */
-#define D_HA 0x00080000 /* recovery and failover */
-#define D_RPCTRACE 0x00100000 /* for distributed debugging */
-#define D_VFSTRACE 0x00200000
-#define D_READA 0x00400000 /* read-ahead */
-#define D_MMAP 0x00800000
-#define D_CONFIG 0x01000000
-#define D_CONSOLE 0x02000000
-#define D_QUOTA 0x04000000
-#define D_SEC 0x08000000
-#define D_LFSCK 0x10000000 /* For both OI scrub and LFSCK */
-/* keep these in sync with lnet/{utils,libcfs}/debug.c */
-
-#define D_HSM D_TRACE
-
-#define D_CANTMASK (D_ERROR | D_EMERG | D_WARNING | D_CONSOLE)
-
-#ifndef DEBUG_SUBSYSTEM
-# define DEBUG_SUBSYSTEM S_UNDEFINED
-#endif
-
-#define CDEBUG_DEFAULT_MAX_DELAY (cfs_time_seconds(600)) /* jiffies */
-#define CDEBUG_DEFAULT_MIN_DELAY ((cfs_time_seconds(1) + 1) / 2) /* jiffies */
-#define CDEBUG_DEFAULT_BACKOFF 2
-struct cfs_debug_limit_state {
- unsigned long cdls_next;
- unsigned int cdls_delay;
- int cdls_count;
-};
-
-struct libcfs_debug_msg_data {
- const char *msg_file;
- const char *msg_fn;
- int msg_subsys;
- int msg_line;
- int msg_mask;
- struct cfs_debug_limit_state *msg_cdls;
-};
-
-#define LIBCFS_DEBUG_MSG_DATA_INIT(data, mask, cdls) \
-do { \
- (data)->msg_subsys = DEBUG_SUBSYSTEM; \
- (data)->msg_file = __FILE__; \
- (data)->msg_fn = __func__; \
- (data)->msg_line = __LINE__; \
- (data)->msg_cdls = (cdls); \
- (data)->msg_mask = (mask); \
-} while (0)
-
-#define LIBCFS_DEBUG_MSG_DATA_DECL(dataname, mask, cdls) \
- static struct libcfs_debug_msg_data dataname = { \
- .msg_subsys = DEBUG_SUBSYSTEM, \
- .msg_file = __FILE__, \
- .msg_fn = __func__, \
- .msg_line = __LINE__, \
- .msg_cdls = (cdls) }; \
- dataname.msg_mask = (mask)
-
-/**
- * Filters out logging messages based on mask and subsystem.
- */
-static inline int cfs_cdebug_show(unsigned int mask, unsigned int subsystem)
-{
- return mask & D_CANTMASK ||
- ((libcfs_debug & mask) && (libcfs_subsystem_debug & subsystem));
-}
-
-#define __CDEBUG(cdls, mask, format, ...) \
-do { \
- static struct libcfs_debug_msg_data msgdata; \
- \
- CFS_CHECK_STACK(&msgdata, mask, cdls); \
- \
- if (cfs_cdebug_show(mask, DEBUG_SUBSYSTEM)) { \
- LIBCFS_DEBUG_MSG_DATA_INIT(&msgdata, mask, cdls); \
- libcfs_debug_msg(&msgdata, format, ## __VA_ARGS__); \
- } \
-} while (0)
-
-#define CDEBUG(mask, format, ...) __CDEBUG(NULL, mask, format, ## __VA_ARGS__)
-
-#define CDEBUG_LIMIT(mask, format, ...) \
-do { \
- static struct cfs_debug_limit_state cdls; \
- \
- __CDEBUG(&cdls, mask, format, ## __VA_ARGS__); \
-} while (0)
-
-#define CWARN(format, ...) CDEBUG_LIMIT(D_WARNING, format, ## __VA_ARGS__)
-#define CERROR(format, ...) CDEBUG_LIMIT(D_ERROR, format, ## __VA_ARGS__)
-#define CNETERR(format, a...) CDEBUG_LIMIT(D_NETERROR, format, ## a)
-#define CEMERG(format, ...) CDEBUG_LIMIT(D_EMERG, format, ## __VA_ARGS__)
-
-#define LCONSOLE(mask, format, ...) CDEBUG(D_CONSOLE | (mask), format, ## __VA_ARGS__)
-#define LCONSOLE_INFO(format, ...) CDEBUG_LIMIT(D_CONSOLE, format, ## __VA_ARGS__)
-#define LCONSOLE_WARN(format, ...) CDEBUG_LIMIT(D_CONSOLE | D_WARNING, format, ## __VA_ARGS__)
-#define LCONSOLE_ERROR_MSG(errnum, format, ...) CDEBUG_LIMIT(D_CONSOLE | D_ERROR, \
- "%x-%x: " format, errnum, LERRCHKSUM(errnum), ## __VA_ARGS__)
-#define LCONSOLE_ERROR(format, ...) LCONSOLE_ERROR_MSG(0x00, format, ## __VA_ARGS__)
-
-#define LCONSOLE_EMERG(format, ...) CDEBUG(D_CONSOLE | D_EMERG, format, ## __VA_ARGS__)
-
-int libcfs_debug_msg(struct libcfs_debug_msg_data *msgdata,
- const char *format1, ...)
- __printf(2, 3);
-
-int libcfs_debug_vmsg2(struct libcfs_debug_msg_data *msgdata,
- const char *format1,
- va_list args, const char *format2, ...)
- __printf(4, 5);
-
-/* other external symbols that tracefile provides: */
-int cfs_trace_copyin_string(char *knl_buffer, int knl_buffer_nob,
- const char __user *usr_buffer, int usr_buffer_nob);
-int cfs_trace_copyout_string(char __user *usr_buffer, int usr_buffer_nob,
- const char *knl_buffer, char *append);
-
-#define LIBCFS_DEBUG_FILE_PATH_DEFAULT "/tmp/lustre-log"
-
-#endif /* __LIBCFS_DEBUG_H__ */
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_fail.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_fail.h
deleted file mode 100644
index aa69c6a33d19..000000000000
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs_fail.h
+++ /dev/null
@@ -1,173 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see http://www.gnu.org/licenses
- *
- * Please contact Oracle Corporation, Inc., 500 Oracle Parkway, Redwood Shores,
- * CA 94065 USA or visit www.oracle.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Oracle Corporation, Inc.
- */
-
-#ifndef _LIBCFS_FAIL_H
-#define _LIBCFS_FAIL_H
-
-extern unsigned long cfs_fail_loc;
-extern unsigned int cfs_fail_val;
-
-extern wait_queue_head_t cfs_race_waitq;
-extern int cfs_race_state;
-
-int __cfs_fail_check_set(__u32 id, __u32 value, int set);
-int __cfs_fail_timeout_set(__u32 id, __u32 value, int ms, int set);
-
-enum {
- CFS_FAIL_LOC_NOSET = 0,
- CFS_FAIL_LOC_ORSET = 1,
- CFS_FAIL_LOC_RESET = 2,
- CFS_FAIL_LOC_VALUE = 3
-};
-
-/* Failure injection control */
-#define CFS_FAIL_MASK_SYS 0x0000FF00
-#define CFS_FAIL_MASK_LOC (0x000000FF | CFS_FAIL_MASK_SYS)
-
-#define CFS_FAILED_BIT 30
-/* CFS_FAILED is 0x40000000 */
-#define CFS_FAILED (1 << CFS_FAILED_BIT)
-
-#define CFS_FAIL_ONCE_BIT 31
-/* CFS_FAIL_ONCE is 0x80000000 */
-#define CFS_FAIL_ONCE (1 << CFS_FAIL_ONCE_BIT)
-
-/* The following flags aren't made to be combined */
-#define CFS_FAIL_SKIP 0x20000000 /* skip N times then fail */
-#define CFS_FAIL_SOME 0x10000000 /* only fail N times */
-#define CFS_FAIL_RAND 0x08000000 /* fail 1/N of the times */
-#define CFS_FAIL_USR1 0x04000000 /* user flag */
-
-#define CFS_FAIL_PRECHECK(id) (cfs_fail_loc && \
- (cfs_fail_loc & CFS_FAIL_MASK_LOC) == \
- ((id) & CFS_FAIL_MASK_LOC))
-
-static inline int cfs_fail_check_set(__u32 id, __u32 value,
- int set, int quiet)
-{
- int ret = 0;
-
- if (unlikely(CFS_FAIL_PRECHECK(id))) {
- ret = __cfs_fail_check_set(id, value, set);
- if (ret) {
- if (quiet) {
- CDEBUG(D_INFO, "*** cfs_fail_loc=%x, val=%u***\n",
- id, value);
- } else {
- LCONSOLE_INFO("*** cfs_fail_loc=%x, val=%u***\n",
- id, value);
- }
- }
- }
-
- return ret;
-}
-
-/* If id hit cfs_fail_loc, return 1, otherwise return 0 */
-#define CFS_FAIL_CHECK(id) \
- cfs_fail_check_set(id, 0, CFS_FAIL_LOC_NOSET, 0)
-#define CFS_FAIL_CHECK_QUIET(id) \
- cfs_fail_check_set(id, 0, CFS_FAIL_LOC_NOSET, 1)
-
-/* If id hit cfs_fail_loc and cfs_fail_val == (-1 or value) return 1,
- * otherwise return 0 */
-#define CFS_FAIL_CHECK_VALUE(id, value) \
- cfs_fail_check_set(id, value, CFS_FAIL_LOC_VALUE, 0)
-#define CFS_FAIL_CHECK_VALUE_QUIET(id, value) \
- cfs_fail_check_set(id, value, CFS_FAIL_LOC_VALUE, 1)
-
-/* If id hit cfs_fail_loc, cfs_fail_loc |= value and return 1,
- * otherwise return 0 */
-#define CFS_FAIL_CHECK_ORSET(id, value) \
- cfs_fail_check_set(id, value, CFS_FAIL_LOC_ORSET, 0)
-#define CFS_FAIL_CHECK_ORSET_QUIET(id, value) \
- cfs_fail_check_set(id, value, CFS_FAIL_LOC_ORSET, 1)
-
-/* If id hit cfs_fail_loc, cfs_fail_loc = value and return 1,
- * otherwise return 0 */
-#define CFS_FAIL_CHECK_RESET(id, value) \
- cfs_fail_check_set(id, value, CFS_FAIL_LOC_RESET, 0)
-#define CFS_FAIL_CHECK_RESET_QUIET(id, value) \
- cfs_fail_check_set(id, value, CFS_FAIL_LOC_RESET, 1)
-
-static inline int cfs_fail_timeout_set(__u32 id, __u32 value, int ms, int set)
-{
- if (unlikely(CFS_FAIL_PRECHECK(id)))
- return __cfs_fail_timeout_set(id, value, ms, set);
- return 0;
-}
-
-/* If id hit cfs_fail_loc, sleep for seconds or milliseconds */
-#define CFS_FAIL_TIMEOUT(id, secs) \
- cfs_fail_timeout_set(id, 0, secs * 1000, CFS_FAIL_LOC_NOSET)
-
-#define CFS_FAIL_TIMEOUT_MS(id, ms) \
- cfs_fail_timeout_set(id, 0, ms, CFS_FAIL_LOC_NOSET)
-
-/* If id hit cfs_fail_loc, cfs_fail_loc |= value and
- * sleep seconds or milliseconds */
-#define CFS_FAIL_TIMEOUT_ORSET(id, value, secs) \
- cfs_fail_timeout_set(id, value, secs * 1000, CFS_FAIL_LOC_ORSET)
-
-#define CFS_FAIL_TIMEOUT_MS_ORSET(id, value, ms) \
- cfs_fail_timeout_set(id, value, ms, CFS_FAIL_LOC_ORSET)
-
-/* The idea here is to synchronise two threads to force a race. The
- * first thread that calls this with a matching fail_loc is put to
- * sleep. The next thread that calls with the same fail_loc wakes up
- * the first and continues. */
-static inline void cfs_race(__u32 id)
-{
-
- if (CFS_FAIL_PRECHECK(id)) {
- if (unlikely(__cfs_fail_check_set(id, 0, CFS_FAIL_LOC_NOSET))) {
- int rc;
-
- cfs_race_state = 0;
- CERROR("cfs_race id %x sleeping\n", id);
- rc = wait_event_interruptible(cfs_race_waitq,
- cfs_race_state != 0);
- CERROR("cfs_fail_race id %x awake, rc=%d\n", id, rc);
- } else {
- CERROR("cfs_fail_race id %x waking\n", id);
- cfs_race_state = 1;
- wake_up(&cfs_race_waitq);
- }
- }
-}
-
-#define CFS_RACE(id) cfs_race(id)
-
-#endif /* _LIBCFS_FAIL_H */
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_hash.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_hash.h
deleted file mode 100644
index c40814591189..000000000000
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs_hash.h
+++ /dev/null
@@ -1,843 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * libcfs/include/libcfs/libcfs_hash.h
- *
- * Hashing routines
- *
- */
-
-#ifndef __LIBCFS_HASH_H__
-#define __LIBCFS_HASH_H__
-/*
- * Knuth recommends primes in approximately golden ratio to the maximum
- * integer representable by a machine word for multiplicative hashing.
- * Chuck Lever verified the effectiveness of this technique:
- * http://www.citi.umich.edu/techreports/reports/citi-tr-00-1.pdf
- *
- * These primes are chosen to be bit-sparse, that is operations on
- * them can use shifts and additions instead of multiplications for
- * machines where multiplications are slow.
- */
-/* 2^31 + 2^29 - 2^25 + 2^22 - 2^19 - 2^16 + 1 */
-#define CFS_GOLDEN_RATIO_PRIME_32 0x9e370001UL
-/* 2^63 + 2^61 - 2^57 + 2^54 - 2^51 - 2^18 + 1 */
-#define CFS_GOLDEN_RATIO_PRIME_64 0x9e37fffffffc0001ULL
-
-/*
- * Ideally we would use HAVE_HASH_LONG for this, but on linux we configure
- * the linux kernel and user space at the same time, so we need to differentiate
- * between them explicitly. If this is not needed on other architectures, then
- * we'll need to move the functions to architecture specific headers.
- */
-
-#include <linux/hash.h>
-
-/** disable debug */
-#define CFS_HASH_DEBUG_NONE 0
-/** record hash depth and output to console when it's too deep,
- * computing overhead is low but consume more memory */
-#define CFS_HASH_DEBUG_1 1
-/** expensive, check key validation */
-#define CFS_HASH_DEBUG_2 2
-
-#define CFS_HASH_DEBUG_LEVEL CFS_HASH_DEBUG_NONE
-
-struct cfs_hash_ops;
-struct cfs_hash_lock_ops;
-struct cfs_hash_hlist_ops;
-
-union cfs_hash_lock {
- rwlock_t rw; /**< rwlock */
- spinlock_t spin; /**< spinlock */
-};
-
-/**
- * cfs_hash_bucket is a container of:
- * - lock, counter ...
- * - array of hash-head starting from hsb_head[0], hash-head can be one of
- * . cfs_hash_head_t
- * . cfs_hash_head_dep_t
- * . cfs_hash_dhead_t
- * . cfs_hash_dhead_dep_t
- * which depends on requirement of user
- * - some extra bytes (caller can require it while creating hash)
- */
-struct cfs_hash_bucket {
- union cfs_hash_lock hsb_lock; /**< bucket lock */
- __u32 hsb_count; /**< current entries */
- __u32 hsb_version; /**< change version */
- unsigned int hsb_index; /**< index of bucket */
- int hsb_depmax; /**< max depth on bucket */
- long hsb_head[0]; /**< hash-head array */
-};
-
-/**
- * cfs_hash bucket descriptor, it's normally in stack of caller
- */
-struct cfs_hash_bd {
- struct cfs_hash_bucket *bd_bucket; /**< address of bucket */
- unsigned int bd_offset; /**< offset in bucket */
-};
-
-#define CFS_HASH_NAME_LEN 16 /**< default name length */
-#define CFS_HASH_BIGNAME_LEN 64 /**< bigname for param tree */
-
-#define CFS_HASH_BKT_BITS 3 /**< default bits of bucket */
-#define CFS_HASH_BITS_MAX 30 /**< max bits of bucket */
-#define CFS_HASH_BITS_MIN CFS_HASH_BKT_BITS
-
-/**
- * common hash attributes.
- */
-enum cfs_hash_tag {
- /**
- * don't need any lock, caller will protect operations with it's
- * own lock. With this flag:
- * . CFS_HASH_NO_BKTLOCK, CFS_HASH_RW_BKTLOCK, CFS_HASH_SPIN_BKTLOCK
- * will be ignored.
- * . Some functions will be disabled with this flag, i.e:
- * cfs_hash_for_each_empty, cfs_hash_rehash
- */
- CFS_HASH_NO_LOCK = 1 << 0,
- /** no bucket lock, use one spinlock to protect the whole hash */
- CFS_HASH_NO_BKTLOCK = 1 << 1,
- /** rwlock to protect bucket */
- CFS_HASH_RW_BKTLOCK = 1 << 2,
- /** spinlock to protect bucket */
- CFS_HASH_SPIN_BKTLOCK = 1 << 3,
- /** always add new item to tail */
- CFS_HASH_ADD_TAIL = 1 << 4,
- /** hash-table doesn't have refcount on item */
- CFS_HASH_NO_ITEMREF = 1 << 5,
- /** big name for param-tree */
- CFS_HASH_BIGNAME = 1 << 6,
- /** track global count */
- CFS_HASH_COUNTER = 1 << 7,
- /** rehash item by new key */
- CFS_HASH_REHASH_KEY = 1 << 8,
- /** Enable dynamic hash resizing */
- CFS_HASH_REHASH = 1 << 9,
- /** can shrink hash-size */
- CFS_HASH_SHRINK = 1 << 10,
- /** assert hash is empty on exit */
- CFS_HASH_ASSERT_EMPTY = 1 << 11,
- /** record hlist depth */
- CFS_HASH_DEPTH = 1 << 12,
- /**
- * rehash is always scheduled in a different thread, so current
- * change on hash table is non-blocking
- */
- CFS_HASH_NBLK_CHANGE = 1 << 13,
- /** NB, we typed hs_flags as __u16, please change it
- * if you need to extend >=16 flags */
-};
-
-/** most used attributes */
-#define CFS_HASH_DEFAULT (CFS_HASH_RW_BKTLOCK | \
- CFS_HASH_COUNTER | CFS_HASH_REHASH)
-
-/**
- * cfs_hash is a hash-table implementation for general purpose, it can support:
- * . two refcount modes
- * hash-table with & without refcount
- * . four lock modes
- * nolock, one-spinlock, rw-bucket-lock, spin-bucket-lock
- * . general operations
- * lookup, add(add_tail or add_head), delete
- * . rehash
- * grows or shrink
- * . iteration
- * locked iteration and unlocked iteration
- * . bigname
- * support long name hash
- * . debug
- * trace max searching depth
- *
- * Rehash:
- * When the htable grows or shrinks, a separate task (cfs_hash_rehash_worker)
- * is spawned to handle the rehash in the background, it's possible that other
- * processes can concurrently perform additions, deletions, and lookups
- * without being blocked on rehash completion, because rehash will release
- * the global wrlock for each bucket.
- *
- * rehash and iteration can't run at the same time because it's too tricky
- * to keep both of them safe and correct.
- * As they are relatively rare operations, so:
- * . if iteration is in progress while we try to launch rehash, then
- * it just giveup, iterator will launch rehash at the end.
- * . if rehash is in progress while we try to iterate the hash table,
- * then we just wait (shouldn't be very long time), anyway, nobody
- * should expect iteration of whole hash-table to be non-blocking.
- *
- * During rehashing, a (key,object) pair may be in one of two buckets,
- * depending on whether the worker task has yet to transfer the object
- * to its new location in the table. Lookups and deletions need to search both
- * locations; additions must take care to only insert into the new bucket.
- */
-
-struct cfs_hash {
- /** serialize with rehash, or serialize all operations if
- * the hash-table has CFS_HASH_NO_BKTLOCK */
- union cfs_hash_lock hs_lock;
- /** hash operations */
- struct cfs_hash_ops *hs_ops;
- /** hash lock operations */
- struct cfs_hash_lock_ops *hs_lops;
- /** hash list operations */
- struct cfs_hash_hlist_ops *hs_hops;
- /** hash buckets-table */
- struct cfs_hash_bucket **hs_buckets;
- /** total number of items on this hash-table */
- atomic_t hs_count;
- /** hash flags, see cfs_hash_tag for detail */
- __u16 hs_flags;
- /** # of extra-bytes for bucket, for user saving extended attributes */
- __u16 hs_extra_bytes;
- /** wants to iterate */
- __u8 hs_iterating;
- /** hash-table is dying */
- __u8 hs_exiting;
- /** current hash bits */
- __u8 hs_cur_bits;
- /** min hash bits */
- __u8 hs_min_bits;
- /** max hash bits */
- __u8 hs_max_bits;
- /** bits for rehash */
- __u8 hs_rehash_bits;
- /** bits for each bucket */
- __u8 hs_bkt_bits;
- /** resize min threshold */
- __u16 hs_min_theta;
- /** resize max threshold */
- __u16 hs_max_theta;
- /** resize count */
- __u32 hs_rehash_count;
- /** # of iterators (caller of cfs_hash_for_each_*) */
- __u32 hs_iterators;
- /** rehash workitem */
- cfs_workitem_t hs_rehash_wi;
- /** refcount on this hash table */
- atomic_t hs_refcount;
- /** rehash buckets-table */
- struct cfs_hash_bucket **hs_rehash_buckets;
-#if CFS_HASH_DEBUG_LEVEL >= CFS_HASH_DEBUG_1
- /** serialize debug members */
- spinlock_t hs_dep_lock;
- /** max depth */
- unsigned int hs_dep_max;
- /** id of the deepest bucket */
- unsigned int hs_dep_bkt;
- /** offset in the deepest bucket */
- unsigned int hs_dep_off;
- /** bits when we found the max depth */
- unsigned int hs_dep_bits;
- /** workitem to output max depth */
- cfs_workitem_t hs_dep_wi;
-#endif
- /** name of htable */
- char hs_name[0];
-};
-
-typedef struct cfs_hash_lock_ops {
- /** lock the hash table */
- void (*hs_lock)(union cfs_hash_lock *lock, int exclusive);
- /** unlock the hash table */
- void (*hs_unlock)(union cfs_hash_lock *lock, int exclusive);
- /** lock the hash bucket */
- void (*hs_bkt_lock)(union cfs_hash_lock *lock, int exclusive);
- /** unlock the hash bucket */
- void (*hs_bkt_unlock)(union cfs_hash_lock *lock, int exclusive);
-} cfs_hash_lock_ops_t;
-
-typedef struct cfs_hash_hlist_ops {
- /** return hlist_head of hash-head of @bd */
- struct hlist_head *(*hop_hhead)(struct cfs_hash *hs, struct cfs_hash_bd *bd);
- /** return hash-head size */
- int (*hop_hhead_size)(struct cfs_hash *hs);
- /** add @hnode to hash-head of @bd */
- int (*hop_hnode_add)(struct cfs_hash *hs,
- struct cfs_hash_bd *bd, struct hlist_node *hnode);
- /** remove @hnode from hash-head of @bd */
- int (*hop_hnode_del)(struct cfs_hash *hs,
- struct cfs_hash_bd *bd, struct hlist_node *hnode);
-} cfs_hash_hlist_ops_t;
-
-typedef struct cfs_hash_ops {
- /** return hashed value from @key */
- unsigned (*hs_hash)(struct cfs_hash *hs, const void *key, unsigned mask);
- /** return key address of @hnode */
- void * (*hs_key)(struct hlist_node *hnode);
- /** copy key from @hnode to @key */
- void (*hs_keycpy)(struct hlist_node *hnode, void *key);
- /**
- * compare @key with key of @hnode
- * returns 1 on a match
- */
- int (*hs_keycmp)(const void *key, struct hlist_node *hnode);
- /** return object address of @hnode, i.e: container_of(...hnode) */
- void * (*hs_object)(struct hlist_node *hnode);
- /** get refcount of item, always called with holding bucket-lock */
- void (*hs_get)(struct cfs_hash *hs, struct hlist_node *hnode);
- /** release refcount of item */
- void (*hs_put)(struct cfs_hash *hs, struct hlist_node *hnode);
- /** release refcount of item, always called with holding bucket-lock */
- void (*hs_put_locked)(struct cfs_hash *hs, struct hlist_node *hnode);
- /** it's called before removing of @hnode */
- void (*hs_exit)(struct cfs_hash *hs, struct hlist_node *hnode);
-} cfs_hash_ops_t;
-
-/** total number of buckets in @hs */
-#define CFS_HASH_NBKT(hs) \
- (1U << ((hs)->hs_cur_bits - (hs)->hs_bkt_bits))
-
-/** total number of buckets in @hs while rehashing */
-#define CFS_HASH_RH_NBKT(hs) \
- (1U << ((hs)->hs_rehash_bits - (hs)->hs_bkt_bits))
-
-/** number of hlist for in bucket */
-#define CFS_HASH_BKT_NHLIST(hs) (1U << (hs)->hs_bkt_bits)
-
-/** total number of hlist in @hs */
-#define CFS_HASH_NHLIST(hs) (1U << (hs)->hs_cur_bits)
-
-/** total number of hlist in @hs while rehashing */
-#define CFS_HASH_RH_NHLIST(hs) (1U << (hs)->hs_rehash_bits)
-
-static inline int
-cfs_hash_with_no_lock(struct cfs_hash *hs)
-{
- /* caller will serialize all operations for this hash-table */
- return (hs->hs_flags & CFS_HASH_NO_LOCK) != 0;
-}
-
-static inline int
-cfs_hash_with_no_bktlock(struct cfs_hash *hs)
-{
- /* no bucket lock, one single lock to protect the hash-table */
- return (hs->hs_flags & CFS_HASH_NO_BKTLOCK) != 0;
-}
-
-static inline int
-cfs_hash_with_rw_bktlock(struct cfs_hash *hs)
-{
- /* rwlock to protect hash bucket */
- return (hs->hs_flags & CFS_HASH_RW_BKTLOCK) != 0;
-}
-
-static inline int
-cfs_hash_with_spin_bktlock(struct cfs_hash *hs)
-{
- /* spinlock to protect hash bucket */
- return (hs->hs_flags & CFS_HASH_SPIN_BKTLOCK) != 0;
-}
-
-static inline int
-cfs_hash_with_add_tail(struct cfs_hash *hs)
-{
- return (hs->hs_flags & CFS_HASH_ADD_TAIL) != 0;
-}
-
-static inline int
-cfs_hash_with_no_itemref(struct cfs_hash *hs)
-{
- /* hash-table doesn't keep refcount on item,
- * item can't be removed from hash unless it's
- * ZERO refcount */
- return (hs->hs_flags & CFS_HASH_NO_ITEMREF) != 0;
-}
-
-static inline int
-cfs_hash_with_bigname(struct cfs_hash *hs)
-{
- return (hs->hs_flags & CFS_HASH_BIGNAME) != 0;
-}
-
-static inline int
-cfs_hash_with_counter(struct cfs_hash *hs)
-{
- return (hs->hs_flags & CFS_HASH_COUNTER) != 0;
-}
-
-static inline int
-cfs_hash_with_rehash(struct cfs_hash *hs)
-{
- return (hs->hs_flags & CFS_HASH_REHASH) != 0;
-}
-
-static inline int
-cfs_hash_with_rehash_key(struct cfs_hash *hs)
-{
- return (hs->hs_flags & CFS_HASH_REHASH_KEY) != 0;
-}
-
-static inline int
-cfs_hash_with_shrink(struct cfs_hash *hs)
-{
- return (hs->hs_flags & CFS_HASH_SHRINK) != 0;
-}
-
-static inline int
-cfs_hash_with_assert_empty(struct cfs_hash *hs)
-{
- return (hs->hs_flags & CFS_HASH_ASSERT_EMPTY) != 0;
-}
-
-static inline int
-cfs_hash_with_depth(struct cfs_hash *hs)
-{
- return (hs->hs_flags & CFS_HASH_DEPTH) != 0;
-}
-
-static inline int
-cfs_hash_with_nblk_change(struct cfs_hash *hs)
-{
- return (hs->hs_flags & CFS_HASH_NBLK_CHANGE) != 0;
-}
-
-static inline int
-cfs_hash_is_exiting(struct cfs_hash *hs)
-{ /* cfs_hash_destroy is called */
- return hs->hs_exiting;
-}
-
-static inline int
-cfs_hash_is_rehashing(struct cfs_hash *hs)
-{ /* rehash is launched */
- return hs->hs_rehash_bits != 0;
-}
-
-static inline int
-cfs_hash_is_iterating(struct cfs_hash *hs)
-{ /* someone is calling cfs_hash_for_each_* */
- return hs->hs_iterating || hs->hs_iterators != 0;
-}
-
-static inline int
-cfs_hash_bkt_size(struct cfs_hash *hs)
-{
- return offsetof(struct cfs_hash_bucket, hsb_head[0]) +
- hs->hs_hops->hop_hhead_size(hs) * CFS_HASH_BKT_NHLIST(hs) +
- hs->hs_extra_bytes;
-}
-
-static inline unsigned
-cfs_hash_id(struct cfs_hash *hs, const void *key, unsigned mask)
-{
- return hs->hs_ops->hs_hash(hs, key, mask);
-}
-
-static inline void *
-cfs_hash_key(struct cfs_hash *hs, struct hlist_node *hnode)
-{
- return hs->hs_ops->hs_key(hnode);
-}
-
-static inline void
-cfs_hash_keycpy(struct cfs_hash *hs, struct hlist_node *hnode, void *key)
-{
- if (hs->hs_ops->hs_keycpy)
- hs->hs_ops->hs_keycpy(hnode, key);
-}
-
-/**
- * Returns 1 on a match,
- */
-static inline int
-cfs_hash_keycmp(struct cfs_hash *hs, const void *key, struct hlist_node *hnode)
-{
- return hs->hs_ops->hs_keycmp(key, hnode);
-}
-
-static inline void *
-cfs_hash_object(struct cfs_hash *hs, struct hlist_node *hnode)
-{
- return hs->hs_ops->hs_object(hnode);
-}
-
-static inline void
-cfs_hash_get(struct cfs_hash *hs, struct hlist_node *hnode)
-{
- return hs->hs_ops->hs_get(hs, hnode);
-}
-
-static inline void
-cfs_hash_put_locked(struct cfs_hash *hs, struct hlist_node *hnode)
-{
- return hs->hs_ops->hs_put_locked(hs, hnode);
-}
-
-static inline void
-cfs_hash_put(struct cfs_hash *hs, struct hlist_node *hnode)
-{
- return hs->hs_ops->hs_put(hs, hnode);
-}
-
-static inline void
-cfs_hash_exit(struct cfs_hash *hs, struct hlist_node *hnode)
-{
- if (hs->hs_ops->hs_exit)
- hs->hs_ops->hs_exit(hs, hnode);
-}
-
-static inline void cfs_hash_lock(struct cfs_hash *hs, int excl)
-{
- hs->hs_lops->hs_lock(&hs->hs_lock, excl);
-}
-
-static inline void cfs_hash_unlock(struct cfs_hash *hs, int excl)
-{
- hs->hs_lops->hs_unlock(&hs->hs_lock, excl);
-}
-
-static inline int cfs_hash_dec_and_lock(struct cfs_hash *hs,
- atomic_t *condition)
-{
- LASSERT(cfs_hash_with_no_bktlock(hs));
- return atomic_dec_and_lock(condition, &hs->hs_lock.spin);
-}
-
-static inline void cfs_hash_bd_lock(struct cfs_hash *hs,
- struct cfs_hash_bd *bd, int excl)
-{
- hs->hs_lops->hs_bkt_lock(&bd->bd_bucket->hsb_lock, excl);
-}
-
-static inline void cfs_hash_bd_unlock(struct cfs_hash *hs,
- struct cfs_hash_bd *bd, int excl)
-{
- hs->hs_lops->hs_bkt_unlock(&bd->bd_bucket->hsb_lock, excl);
-}
-
-/**
- * operations on cfs_hash bucket (bd: bucket descriptor),
- * they are normally for hash-table without rehash
- */
-void cfs_hash_bd_get(struct cfs_hash *hs, const void *key, struct cfs_hash_bd *bd);
-
-static inline void cfs_hash_bd_get_and_lock(struct cfs_hash *hs, const void *key,
- struct cfs_hash_bd *bd, int excl)
-{
- cfs_hash_bd_get(hs, key, bd);
- cfs_hash_bd_lock(hs, bd, excl);
-}
-
-static inline unsigned cfs_hash_bd_index_get(struct cfs_hash *hs, struct cfs_hash_bd *bd)
-{
- return bd->bd_offset | (bd->bd_bucket->hsb_index << hs->hs_bkt_bits);
-}
-
-static inline void cfs_hash_bd_index_set(struct cfs_hash *hs,
- unsigned index, struct cfs_hash_bd *bd)
-{
- bd->bd_bucket = hs->hs_buckets[index >> hs->hs_bkt_bits];
- bd->bd_offset = index & (CFS_HASH_BKT_NHLIST(hs) - 1U);
-}
-
-static inline void *
-cfs_hash_bd_extra_get(struct cfs_hash *hs, struct cfs_hash_bd *bd)
-{
- return (void *)bd->bd_bucket +
- cfs_hash_bkt_size(hs) - hs->hs_extra_bytes;
-}
-
-static inline __u32
-cfs_hash_bd_version_get(struct cfs_hash_bd *bd)
-{
- /* need hold cfs_hash_bd_lock */
- return bd->bd_bucket->hsb_version;
-}
-
-static inline __u32
-cfs_hash_bd_count_get(struct cfs_hash_bd *bd)
-{
- /* need hold cfs_hash_bd_lock */
- return bd->bd_bucket->hsb_count;
-}
-
-static inline int
-cfs_hash_bd_depmax_get(struct cfs_hash_bd *bd)
-{
- return bd->bd_bucket->hsb_depmax;
-}
-
-static inline int
-cfs_hash_bd_compare(struct cfs_hash_bd *bd1, struct cfs_hash_bd *bd2)
-{
- if (bd1->bd_bucket->hsb_index != bd2->bd_bucket->hsb_index)
- return bd1->bd_bucket->hsb_index - bd2->bd_bucket->hsb_index;
-
- if (bd1->bd_offset != bd2->bd_offset)
- return bd1->bd_offset - bd2->bd_offset;
-
- return 0;
-}
-
-void cfs_hash_bd_add_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd,
- struct hlist_node *hnode);
-void cfs_hash_bd_del_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd,
- struct hlist_node *hnode);
-void cfs_hash_bd_move_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd_old,
- struct cfs_hash_bd *bd_new, struct hlist_node *hnode);
-
-static inline int cfs_hash_bd_dec_and_lock(struct cfs_hash *hs, struct cfs_hash_bd *bd,
- atomic_t *condition)
-{
- LASSERT(cfs_hash_with_spin_bktlock(hs));
- return atomic_dec_and_lock(condition,
- &bd->bd_bucket->hsb_lock.spin);
-}
-
-static inline struct hlist_head *cfs_hash_bd_hhead(struct cfs_hash *hs,
- struct cfs_hash_bd *bd)
-{
- return hs->hs_hops->hop_hhead(hs, bd);
-}
-
-struct hlist_node *cfs_hash_bd_lookup_locked(struct cfs_hash *hs,
- struct cfs_hash_bd *bd, const void *key);
-struct hlist_node *cfs_hash_bd_peek_locked(struct cfs_hash *hs,
- struct cfs_hash_bd *bd, const void *key);
-struct hlist_node *cfs_hash_bd_findadd_locked(struct cfs_hash *hs,
- struct cfs_hash_bd *bd, const void *key,
- struct hlist_node *hnode,
- int insist_add);
-struct hlist_node *cfs_hash_bd_finddel_locked(struct cfs_hash *hs,
- struct cfs_hash_bd *bd, const void *key,
- struct hlist_node *hnode);
-
-/**
- * operations on cfs_hash bucket (bd: bucket descriptor),
- * they are safe for hash-table with rehash
- */
-void cfs_hash_dual_bd_get(struct cfs_hash *hs, const void *key, struct cfs_hash_bd *bds);
-void cfs_hash_dual_bd_lock(struct cfs_hash *hs, struct cfs_hash_bd *bds, int excl);
-void cfs_hash_dual_bd_unlock(struct cfs_hash *hs, struct cfs_hash_bd *bds, int excl);
-
-static inline void cfs_hash_dual_bd_get_and_lock(struct cfs_hash *hs, const void *key,
- struct cfs_hash_bd *bds, int excl)
-{
- cfs_hash_dual_bd_get(hs, key, bds);
- cfs_hash_dual_bd_lock(hs, bds, excl);
-}
-
-struct hlist_node *cfs_hash_dual_bd_lookup_locked(struct cfs_hash *hs,
- struct cfs_hash_bd *bds,
- const void *key);
-struct hlist_node *cfs_hash_dual_bd_findadd_locked(struct cfs_hash *hs,
- struct cfs_hash_bd *bds,
- const void *key,
- struct hlist_node *hnode,
- int insist_add);
-struct hlist_node *cfs_hash_dual_bd_finddel_locked(struct cfs_hash *hs,
- struct cfs_hash_bd *bds,
- const void *key,
- struct hlist_node *hnode);
-
-/* Hash init/cleanup functions */
-struct cfs_hash *cfs_hash_create(char *name, unsigned cur_bits, unsigned max_bits,
- unsigned bkt_bits, unsigned extra_bytes,
- unsigned min_theta, unsigned max_theta,
- cfs_hash_ops_t *ops, unsigned flags);
-
-struct cfs_hash *cfs_hash_getref(struct cfs_hash *hs);
-void cfs_hash_putref(struct cfs_hash *hs);
-
-/* Hash addition functions */
-void cfs_hash_add(struct cfs_hash *hs, const void *key,
- struct hlist_node *hnode);
-int cfs_hash_add_unique(struct cfs_hash *hs, const void *key,
- struct hlist_node *hnode);
-void *cfs_hash_findadd_unique(struct cfs_hash *hs, const void *key,
- struct hlist_node *hnode);
-
-/* Hash deletion functions */
-void *cfs_hash_del(struct cfs_hash *hs, const void *key, struct hlist_node *hnode);
-void *cfs_hash_del_key(struct cfs_hash *hs, const void *key);
-
-/* Hash lookup/for_each functions */
-#define CFS_HASH_LOOP_HOG 1024
-
-typedef int (*cfs_hash_for_each_cb_t)(struct cfs_hash *hs, struct cfs_hash_bd *bd,
- struct hlist_node *node, void *data);
-void *cfs_hash_lookup(struct cfs_hash *hs, const void *key);
-void cfs_hash_for_each(struct cfs_hash *hs, cfs_hash_for_each_cb_t, void *data);
-void cfs_hash_for_each_safe(struct cfs_hash *hs, cfs_hash_for_each_cb_t, void *data);
-int cfs_hash_for_each_nolock(struct cfs_hash *hs,
- cfs_hash_for_each_cb_t, void *data);
-int cfs_hash_for_each_empty(struct cfs_hash *hs,
- cfs_hash_for_each_cb_t, void *data);
-void cfs_hash_for_each_key(struct cfs_hash *hs, const void *key,
- cfs_hash_for_each_cb_t, void *data);
-typedef int (*cfs_hash_cond_opt_cb_t)(void *obj, void *data);
-void cfs_hash_cond_del(struct cfs_hash *hs, cfs_hash_cond_opt_cb_t, void *data);
-
-void cfs_hash_hlist_for_each(struct cfs_hash *hs, unsigned hindex,
- cfs_hash_for_each_cb_t, void *data);
-int cfs_hash_is_empty(struct cfs_hash *hs);
-__u64 cfs_hash_size_get(struct cfs_hash *hs);
-
-/*
- * Rehash - Theta is calculated to be the average chained
- * hash depth assuming a perfectly uniform hash function.
- */
-void cfs_hash_rehash_cancel_locked(struct cfs_hash *hs);
-void cfs_hash_rehash_cancel(struct cfs_hash *hs);
-int cfs_hash_rehash(struct cfs_hash *hs, int do_rehash);
-void cfs_hash_rehash_key(struct cfs_hash *hs, const void *old_key,
- void *new_key, struct hlist_node *hnode);
-
-#if CFS_HASH_DEBUG_LEVEL > CFS_HASH_DEBUG_1
-/* Validate hnode references the correct key */
-static inline void
-cfs_hash_key_validate(struct cfs_hash *hs, const void *key,
- struct hlist_node *hnode)
-{
- LASSERT(cfs_hash_keycmp(hs, key, hnode));
-}
-
-/* Validate hnode is in the correct bucket */
-static inline void
-cfs_hash_bucket_validate(struct cfs_hash *hs, struct cfs_hash_bd *bd,
- struct hlist_node *hnode)
-{
- struct cfs_hash_bd bds[2];
-
- cfs_hash_dual_bd_get(hs, cfs_hash_key(hs, hnode), bds);
- LASSERT(bds[0].bd_bucket == bd->bd_bucket ||
- bds[1].bd_bucket == bd->bd_bucket);
-}
-
-#else /* CFS_HASH_DEBUG_LEVEL > CFS_HASH_DEBUG_1 */
-
-static inline void
-cfs_hash_key_validate(struct cfs_hash *hs, const void *key,
- struct hlist_node *hnode) {}
-
-static inline void
-cfs_hash_bucket_validate(struct cfs_hash *hs, struct cfs_hash_bd *bd,
- struct hlist_node *hnode) {}
-
-#endif /* CFS_HASH_DEBUG_LEVEL */
-
-#define CFS_HASH_THETA_BITS 10
-#define CFS_HASH_MIN_THETA (1U << (CFS_HASH_THETA_BITS - 1))
-#define CFS_HASH_MAX_THETA (1U << (CFS_HASH_THETA_BITS + 1))
-
-/* Return integer component of theta */
-static inline int __cfs_hash_theta_int(int theta)
-{
- return (theta >> CFS_HASH_THETA_BITS);
-}
-
-/* Return a fractional value between 0 and 999 */
-static inline int __cfs_hash_theta_frac(int theta)
-{
- return ((theta * 1000) >> CFS_HASH_THETA_BITS) -
- (__cfs_hash_theta_int(theta) * 1000);
-}
-
-static inline int __cfs_hash_theta(struct cfs_hash *hs)
-{
- return (atomic_read(&hs->hs_count) <<
- CFS_HASH_THETA_BITS) >> hs->hs_cur_bits;
-}
-
-static inline void __cfs_hash_set_theta(struct cfs_hash *hs, int min, int max)
-{
- LASSERT(min < max);
- hs->hs_min_theta = (__u16)min;
- hs->hs_max_theta = (__u16)max;
-}
-
-/* Generic debug formatting routines mainly for proc handler */
-struct seq_file;
-void cfs_hash_debug_header(struct seq_file *m);
-void cfs_hash_debug_str(struct cfs_hash *hs, struct seq_file *m);
-
-/*
- * Generic djb2 hash algorithm for character arrays.
- */
-static inline unsigned
-cfs_hash_djb2_hash(const void *key, size_t size, unsigned mask)
-{
- unsigned i, hash = 5381;
-
- LASSERT(key != NULL);
-
- for (i = 0; i < size; i++)
- hash = hash * 33 + ((char *)key)[i];
-
- return (hash & mask);
-}
-
-/*
- * Generic u32 hash algorithm.
- */
-static inline unsigned
-cfs_hash_u32_hash(const __u32 key, unsigned mask)
-{
- return ((key * CFS_GOLDEN_RATIO_PRIME_32) & mask);
-}
-
-/*
- * Generic u64 hash algorithm.
- */
-static inline unsigned
-cfs_hash_u64_hash(const __u64 key, unsigned mask)
-{
- return ((unsigned)(key * CFS_GOLDEN_RATIO_PRIME_64) & mask);
-}
-
-/** iterate over all buckets in @bds (array of struct cfs_hash_bd) */
-#define cfs_hash_for_each_bd(bds, n, i) \
- for (i = 0; i < n && (bds)[i].bd_bucket != NULL; i++)
-
-/** iterate over all buckets of @hs */
-#define cfs_hash_for_each_bucket(hs, bd, pos) \
- for (pos = 0; \
- pos < CFS_HASH_NBKT(hs) && \
- ((bd)->bd_bucket = (hs)->hs_buckets[pos]) != NULL; pos++)
-
-/** iterate over all hlist of bucket @bd */
-#define cfs_hash_bd_for_each_hlist(hs, bd, hlist) \
- for ((bd)->bd_offset = 0; \
- (bd)->bd_offset < CFS_HASH_BKT_NHLIST(hs) && \
- (hlist = cfs_hash_bd_hhead(hs, bd)) != NULL; \
- (bd)->bd_offset++)
-
-/* !__LIBCFS__HASH_H__ */
-#endif
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_ioctl.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_ioctl.h
deleted file mode 100644
index f5d741f25ffd..000000000000
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs_ioctl.h
+++ /dev/null
@@ -1,214 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * libcfs/include/libcfs/libcfs_ioctl.h
- *
- * Low-level ioctl data structures. Kernel ioctl functions declared here,
- * and user space functions are in libcfsutil_ioctl.h.
- *
- */
-
-#ifndef __LIBCFS_IOCTL_H__
-#define __LIBCFS_IOCTL_H__
-
-#define LIBCFS_IOCTL_VERSION 0x0001000a
-
-struct libcfs_ioctl_data {
- __u32 ioc_len;
- __u32 ioc_version;
-
- __u64 ioc_nid;
- __u64 ioc_u64[1];
-
- __u32 ioc_flags;
- __u32 ioc_count;
- __u32 ioc_net;
- __u32 ioc_u32[7];
-
- __u32 ioc_inllen1;
- char *ioc_inlbuf1;
- __u32 ioc_inllen2;
- char *ioc_inlbuf2;
-
- __u32 ioc_plen1; /* buffers in userspace */
- char *ioc_pbuf1;
- __u32 ioc_plen2; /* buffers in userspace */
- char *ioc_pbuf2;
-
- char ioc_bulk[0];
-};
-
-#define ioc_priority ioc_u32[0]
-
-struct libcfs_ioctl_hdr {
- __u32 ioc_len;
- __u32 ioc_version;
-};
-
-struct libcfs_debug_ioctl_data {
- struct libcfs_ioctl_hdr hdr;
- unsigned int subs;
- unsigned int debug;
-};
-
-#define LIBCFS_IOC_INIT(data) \
-do { \
- memset(&data, 0, sizeof(data)); \
- data.ioc_version = LIBCFS_IOCTL_VERSION; \
- data.ioc_len = sizeof(data); \
-} while (0)
-
-struct libcfs_ioctl_handler {
- struct list_head item;
- int (*handle_ioctl)(unsigned int cmd, struct libcfs_ioctl_data *data);
-};
-
-#define DECLARE_IOCTL_HANDLER(ident, func) \
- struct libcfs_ioctl_handler ident = { \
- /* .item = */ LIST_HEAD_INIT(ident.item), \
- /* .handle_ioctl = */ func \
- }
-
-/* FIXME check conflict with lustre_lib.h */
-#define LIBCFS_IOC_DEBUG_MASK _IOWR('f', 250, long)
-
-/* ioctls for manipulating snapshots 30- */
-#define IOC_LIBCFS_TYPE 'e'
-#define IOC_LIBCFS_MIN_NR 30
-/* libcfs ioctls */
-#define IOC_LIBCFS_PANIC _IOWR('e', 30, long)
-#define IOC_LIBCFS_CLEAR_DEBUG _IOWR('e', 31, long)
-#define IOC_LIBCFS_MARK_DEBUG _IOWR('e', 32, long)
-#define IOC_LIBCFS_MEMHOG _IOWR('e', 36, long)
-#define IOC_LIBCFS_PING_TEST _IOWR('e', 37, long)
-/* lnet ioctls */
-#define IOC_LIBCFS_GET_NI _IOWR('e', 50, long)
-#define IOC_LIBCFS_FAIL_NID _IOWR('e', 51, long)
-#define IOC_LIBCFS_ADD_ROUTE _IOWR('e', 52, long)
-#define IOC_LIBCFS_DEL_ROUTE _IOWR('e', 53, long)
-#define IOC_LIBCFS_GET_ROUTE _IOWR('e', 54, long)
-#define IOC_LIBCFS_NOTIFY_ROUTER _IOWR('e', 55, long)
-#define IOC_LIBCFS_UNCONFIGURE _IOWR('e', 56, long)
-#define IOC_LIBCFS_PORTALS_COMPATIBILITY _IOWR('e', 57, long)
-#define IOC_LIBCFS_LNET_DIST _IOWR('e', 58, long)
-#define IOC_LIBCFS_CONFIGURE _IOWR('e', 59, long)
-#define IOC_LIBCFS_TESTPROTOCOMPAT _IOWR('e', 60, long)
-#define IOC_LIBCFS_PING _IOWR('e', 61, long)
-#define IOC_LIBCFS_DEBUG_PEER _IOWR('e', 62, long)
-#define IOC_LIBCFS_LNETST _IOWR('e', 63, long)
-/* lnd ioctls */
-#define IOC_LIBCFS_REGISTER_MYNID _IOWR('e', 70, long)
-#define IOC_LIBCFS_CLOSE_CONNECTION _IOWR('e', 71, long)
-#define IOC_LIBCFS_PUSH_CONNECTION _IOWR('e', 72, long)
-#define IOC_LIBCFS_GET_CONN _IOWR('e', 73, long)
-#define IOC_LIBCFS_DEL_PEER _IOWR('e', 74, long)
-#define IOC_LIBCFS_ADD_PEER _IOWR('e', 75, long)
-#define IOC_LIBCFS_GET_PEER _IOWR('e', 76, long)
-/* ioctl 77 is free for use */
-#define IOC_LIBCFS_ADD_INTERFACE _IOWR('e', 78, long)
-#define IOC_LIBCFS_DEL_INTERFACE _IOWR('e', 79, long)
-#define IOC_LIBCFS_GET_INTERFACE _IOWR('e', 80, long)
-
-#define IOC_LIBCFS_MAX_NR 80
-
-static inline int libcfs_ioctl_packlen(struct libcfs_ioctl_data *data)
-{
- int len = sizeof(*data);
-
- len += cfs_size_round(data->ioc_inllen1);
- len += cfs_size_round(data->ioc_inllen2);
- return len;
-}
-
-static inline int libcfs_ioctl_is_invalid(struct libcfs_ioctl_data *data)
-{
- if (data->ioc_len > (1<<30)) {
- CERROR("LIBCFS ioctl: ioc_len larger than 1<<30\n");
- return 1;
- }
- if (data->ioc_inllen1 > (1<<30)) {
- CERROR("LIBCFS ioctl: ioc_inllen1 larger than 1<<30\n");
- return 1;
- }
- if (data->ioc_inllen2 > (1<<30)) {
- CERROR("LIBCFS ioctl: ioc_inllen2 larger than 1<<30\n");
- return 1;
- }
- if (data->ioc_inlbuf1 && !data->ioc_inllen1) {
- CERROR("LIBCFS ioctl: inlbuf1 pointer but 0 length\n");
- return 1;
- }
- if (data->ioc_inlbuf2 && !data->ioc_inllen2) {
- CERROR("LIBCFS ioctl: inlbuf2 pointer but 0 length\n");
- return 1;
- }
- if (data->ioc_pbuf1 && !data->ioc_plen1) {
- CERROR("LIBCFS ioctl: pbuf1 pointer but 0 length\n");
- return 1;
- }
- if (data->ioc_pbuf2 && !data->ioc_plen2) {
- CERROR("LIBCFS ioctl: pbuf2 pointer but 0 length\n");
- return 1;
- }
- if (data->ioc_plen1 && !data->ioc_pbuf1) {
- CERROR("LIBCFS ioctl: plen1 nonzero but no pbuf1 pointer\n");
- return 1;
- }
- if (data->ioc_plen2 && !data->ioc_pbuf2) {
- CERROR("LIBCFS ioctl: plen2 nonzero but no pbuf2 pointer\n");
- return 1;
- }
- if ((__u32)libcfs_ioctl_packlen(data) != data->ioc_len) {
- CERROR("LIBCFS ioctl: packlen != ioc_len\n");
- return 1;
- }
- if (data->ioc_inllen1 &&
- data->ioc_bulk[data->ioc_inllen1 - 1] != '\0') {
- CERROR("LIBCFS ioctl: inlbuf1 not 0 terminated\n");
- return 1;
- }
- if (data->ioc_inllen2 &&
- data->ioc_bulk[cfs_size_round(data->ioc_inllen1) +
- data->ioc_inllen2 - 1] != '\0') {
- CERROR("LIBCFS ioctl: inlbuf2 not 0 terminated\n");
- return 1;
- }
- return 0;
-}
-
-int libcfs_register_ioctl(struct libcfs_ioctl_handler *hand);
-int libcfs_deregister_ioctl(struct libcfs_ioctl_handler *hand);
-int libcfs_ioctl_getdata(char *buf, char *end, void *arg);
-int libcfs_ioctl_popdata(void *arg, void *buf, int size);
-
-#endif /* __LIBCFS_IOCTL_H__ */
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_kernelcomm.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_kernelcomm.h
deleted file mode 100644
index a989d2666230..000000000000
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs_kernelcomm.h
+++ /dev/null
@@ -1,118 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * Author: Nathan Rutman <nathan.rutman@sun.com>
- *
- * libcfs/include/libcfs/libcfs_kernelcomm.h
- *
- * Kernel <-> userspace communication routines.
- * The definitions below are used in the kernel and userspace.
- *
- */
-
-#ifndef __LIBCFS_KERNELCOMM_H__
-#define __LIBCFS_KERNELCOMM_H__
-
-#ifndef __LIBCFS_LIBCFS_H__
-#error Do not #include this file directly. #include <linux/libcfs/libcfs.h> instead
-#endif
-
-/* KUC message header.
- * All current and future KUC messages should use this header.
- * To avoid having to include Lustre headers from libcfs, define this here.
- */
-struct kuc_hdr {
- __u16 kuc_magic;
- __u8 kuc_transport; /* Each new Lustre feature should use a different
- transport */
- __u8 kuc_flags;
- __u16 kuc_msgtype; /* Message type or opcode, transport-specific */
- __u16 kuc_msglen; /* Including header */
-} __aligned(sizeof(__u64));
-
-#define KUC_CHANGELOG_MSG_MAXSIZE (sizeof(struct kuc_hdr)+CR_MAXSIZE)
-
-#define KUC_MAGIC 0x191C /*Lustre9etLinC */
-#define KUC_FL_BLOCK 0x01 /* Wait for send */
-
-/* kuc_msgtype values are defined in each transport */
-enum kuc_transport_type {
- KUC_TRANSPORT_GENERIC = 1,
- KUC_TRANSPORT_HSM = 2,
- KUC_TRANSPORT_CHANGELOG = 3,
-};
-
-enum kuc_generic_message_type {
- KUC_MSG_SHUTDOWN = 1,
-};
-
-/* prototype for callback function on kuc groups */
-typedef int (*libcfs_kkuc_cb_t)(__u32 data, void *cb_arg);
-
-/* KUC Broadcast Groups. This determines which userspace process hears which
- * messages. Mutliple transports may be used within a group, or multiple
- * groups may use the same transport. Broadcast
- * groups need not be used if e.g. a UID is specified instead;
- * use group 0 to signify unicast.
- */
-#define KUC_GRP_HSM 0x02
-#define KUC_GRP_MAX KUC_GRP_HSM
-
-/* Kernel methods */
-int libcfs_kkuc_msg_put(struct file *fp, void *payload);
-int libcfs_kkuc_group_put(int group, void *payload);
-int libcfs_kkuc_group_add(struct file *fp, int uid, int group,
- __u32 data);
-int libcfs_kkuc_group_rem(int uid, int group);
-int libcfs_kkuc_group_foreach(int group, libcfs_kkuc_cb_t cb_func,
- void *cb_arg);
-
-#define LK_FLG_STOP 0x01
-
-/* kernelcomm control structure, passed from userspace to kernel */
-typedef struct lustre_kernelcomm {
- __u32 lk_wfd;
- __u32 lk_rfd;
- __u32 lk_uid;
- __u32 lk_group;
- __u32 lk_data;
- __u32 lk_flags;
-} __packed lustre_kernelcomm;
-
-/* Userspace methods */
-int libcfs_ukuc_start(lustre_kernelcomm *l, int groups);
-int libcfs_ukuc_stop(lustre_kernelcomm *l);
-int libcfs_ukuc_msg_get(lustre_kernelcomm *l, char *buf, int maxsize,
- int transport);
-
-#endif /* __LIBCFS_KERNELCOMM_H__ */
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_prim.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_prim.h
deleted file mode 100644
index 082fe6de90e4..000000000000
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs_prim.h
+++ /dev/null
@@ -1,74 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * libcfs/include/libcfs/libcfs_prim.h
- *
- * General primitives.
- *
- */
-
-#ifndef __LIBCFS_PRIM_H__
-#define __LIBCFS_PRIM_H__
-
-void add_wait_queue_exclusive_head(wait_queue_head_t *, wait_queue_t *);
-
-/*
- * Memory
- */
-#ifndef memory_pressure_get
-#define memory_pressure_get() (0)
-#endif
-#ifndef memory_pressure_set
-#define memory_pressure_set() do {} while (0)
-#endif
-#ifndef memory_pressure_clr
-#define memory_pressure_clr() do {} while (0)
-#endif
-
-static inline int cfs_memory_pressure_get_and_set(void)
-{
- int old = memory_pressure_get();
-
- if (!old)
- memory_pressure_set();
- return old;
-}
-
-static inline void cfs_memory_pressure_restore(int old)
-{
- if (old)
- memory_pressure_set();
- else
- memory_pressure_clr();
-}
-#endif
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_private.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_private.h
deleted file mode 100644
index 6af733de69ca..000000000000
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs_private.h
+++ /dev/null
@@ -1,462 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * libcfs/include/libcfs/libcfs_private.h
- *
- * Various defines for libcfs.
- *
- */
-
-#ifndef __LIBCFS_PRIVATE_H__
-#define __LIBCFS_PRIVATE_H__
-
-#ifndef DEBUG_SUBSYSTEM
-# define DEBUG_SUBSYSTEM S_UNDEFINED
-#endif
-
-/*
- * When this is on, LASSERT macro includes check for assignment used instead
- * of equality check, but doesn't have unlikely(). Turn this on from time to
- * time to make test-builds. This shouldn't be on for production release.
- */
-#define LASSERT_CHECKED (0)
-
-#define LASSERTF(cond, fmt, ...) \
-do { \
- if (unlikely(!(cond))) { \
- LIBCFS_DEBUG_MSG_DATA_DECL(__msg_data, D_EMERG, NULL); \
- libcfs_debug_msg(&__msg_data, \
- "ASSERTION( %s ) failed: " fmt, #cond, \
- ## __VA_ARGS__); \
- lbug_with_loc(&__msg_data); \
- } \
-} while (0)
-
-#define LASSERT(cond) LASSERTF(cond, "\n")
-
-#ifdef CONFIG_LUSTRE_DEBUG_EXPENSIVE_CHECK
-/**
- * This is for more expensive checks that one doesn't want to be enabled all
- * the time. LINVRNT() has to be explicitly enabled by
- * CONFIG_LUSTRE_DEBUG_EXPENSIVE_CHECK option.
- */
-# define LINVRNT(exp) LASSERT(exp)
-#else
-# define LINVRNT(exp) ((void)sizeof !!(exp))
-#endif
-
-#define KLASSERT(e) LASSERT(e)
-
-void __noreturn lbug_with_loc(struct libcfs_debug_msg_data *);
-
-#define LBUG() \
-do { \
- LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, D_EMERG, NULL); \
- lbug_with_loc(&msgdata); \
-} while (0)
-
-#ifndef LIBCFS_VMALLOC_SIZE
-#define LIBCFS_VMALLOC_SIZE (2 << PAGE_CACHE_SHIFT) /* 2 pages */
-#endif
-
-#define LIBCFS_ALLOC_PRE(size, mask) \
-do { \
- LASSERT(!in_interrupt() || \
- ((size) <= LIBCFS_VMALLOC_SIZE && \
- ((mask) & __GFP_WAIT) == 0)); \
-} while (0)
-
-#define LIBCFS_ALLOC_POST(ptr, size) \
-do { \
- if (unlikely((ptr) == NULL)) { \
- CERROR("LNET: out of memory at %s:%d (tried to alloc '" \
- #ptr "' = %d)\n", __FILE__, __LINE__, (int)(size)); \
- } else { \
- memset((ptr), 0, (size)); \
- } \
-} while (0)
-
-/**
- * allocate memory with GFP flags @mask
- */
-#define LIBCFS_ALLOC_GFP(ptr, size, mask) \
-do { \
- LIBCFS_ALLOC_PRE((size), (mask)); \
- (ptr) = (size) <= LIBCFS_VMALLOC_SIZE ? \
- kmalloc((size), (mask)) : vmalloc(size); \
- LIBCFS_ALLOC_POST((ptr), (size)); \
-} while (0)
-
-/**
- * default allocator
- */
-#define LIBCFS_ALLOC(ptr, size) \
- LIBCFS_ALLOC_GFP(ptr, size, GFP_NOFS)
-
-/**
- * non-sleeping allocator
- */
-#define LIBCFS_ALLOC_ATOMIC(ptr, size) \
- LIBCFS_ALLOC_GFP(ptr, size, GFP_ATOMIC)
-
-/**
- * allocate memory for specified CPU partition
- * \a cptab != NULL, \a cpt is CPU partition id of \a cptab
- * \a cptab == NULL, \a cpt is HW NUMA node id
- */
-#define LIBCFS_CPT_ALLOC_GFP(ptr, cptab, cpt, size, mask) \
-do { \
- LIBCFS_ALLOC_PRE((size), (mask)); \
- (ptr) = (size) <= LIBCFS_VMALLOC_SIZE ? \
- kmalloc_node((size), (mask), cfs_cpt_spread_node(cptab, cpt)) :\
- vmalloc_node(size, cfs_cpt_spread_node(cptab, cpt)); \
- LIBCFS_ALLOC_POST((ptr), (size)); \
-} while (0)
-
-/** default numa allocator */
-#define LIBCFS_CPT_ALLOC(ptr, cptab, cpt, size) \
- LIBCFS_CPT_ALLOC_GFP(ptr, cptab, cpt, size, GFP_NOFS)
-
-#define LIBCFS_FREE(ptr, size) \
-do { \
- int s = (size); \
- if (unlikely((ptr) == NULL)) { \
- CERROR("LIBCFS: free NULL '" #ptr "' (%d bytes) at " \
- "%s:%d\n", s, __FILE__, __LINE__); \
- break; \
- } \
- if (unlikely(s > LIBCFS_VMALLOC_SIZE)) \
- vfree(ptr); \
- else \
- kfree(ptr); \
-} while (0)
-
-/******************************************************************************/
-
-/* htonl hack - either this, or compile with -O2. Stupid byteorder/generic.h */
-#if defined(__GNUC__) && (__GNUC__ >= 2) && !defined(__OPTIMIZE__)
-#define ___htonl(x) __cpu_to_be32(x)
-#define ___htons(x) __cpu_to_be16(x)
-#define ___ntohl(x) __be32_to_cpu(x)
-#define ___ntohs(x) __be16_to_cpu(x)
-#define htonl(x) ___htonl(x)
-#define ntohl(x) ___ntohl(x)
-#define htons(x) ___htons(x)
-#define ntohs(x) ___ntohs(x)
-#endif
-
-void libcfs_run_upcall(char **argv);
-void libcfs_run_lbug_upcall(struct libcfs_debug_msg_data *);
-void libcfs_debug_dumplog(void);
-int libcfs_debug_init(unsigned long bufsize);
-int libcfs_debug_cleanup(void);
-int libcfs_debug_clear_buffer(void);
-int libcfs_debug_mark_buffer(const char *text);
-
-void libcfs_debug_set_level(unsigned int debug_level);
-
-/*
- * allocate per-cpu-partition data, returned value is an array of pointers,
- * variable can be indexed by CPU ID.
- * cptable != NULL: size of array is number of CPU partitions
- * cptable == NULL: size of array is number of HW cores
- */
-void *cfs_percpt_alloc(struct cfs_cpt_table *cptab, unsigned int size);
-/*
- * destroy per-cpu-partition variable
- */
-void cfs_percpt_free(void *vars);
-int cfs_percpt_number(void *vars);
-void *cfs_percpt_current(void *vars);
-void *cfs_percpt_index(void *vars, int idx);
-
-#define cfs_percpt_for_each(var, i, vars) \
- for (i = 0; i < cfs_percpt_number(vars) && \
- ((var) = (vars)[i]) != NULL; i++)
-
-/*
- * allocate a variable array, returned value is an array of pointers.
- * Caller can specify length of array by count.
- */
-void *cfs_array_alloc(int count, unsigned int size);
-void cfs_array_free(void *vars);
-
-#define LASSERT_ATOMIC_ENABLED (1)
-
-#if LASSERT_ATOMIC_ENABLED
-
-/** assert value of @a is equal to @v */
-#define LASSERT_ATOMIC_EQ(a, v) \
-do { \
- LASSERTF(atomic_read(a) == v, \
- "value: %d\n", atomic_read((a))); \
-} while (0)
-
-/** assert value of @a is unequal to @v */
-#define LASSERT_ATOMIC_NE(a, v) \
-do { \
- LASSERTF(atomic_read(a) != v, \
- "value: %d\n", atomic_read((a))); \
-} while (0)
-
-/** assert value of @a is little than @v */
-#define LASSERT_ATOMIC_LT(a, v) \
-do { \
- LASSERTF(atomic_read(a) < v, \
- "value: %d\n", atomic_read((a))); \
-} while (0)
-
-/** assert value of @a is little/equal to @v */
-#define LASSERT_ATOMIC_LE(a, v) \
-do { \
- LASSERTF(atomic_read(a) <= v, \
- "value: %d\n", atomic_read((a))); \
-} while (0)
-
-/** assert value of @a is great than @v */
-#define LASSERT_ATOMIC_GT(a, v) \
-do { \
- LASSERTF(atomic_read(a) > v, \
- "value: %d\n", atomic_read((a))); \
-} while (0)
-
-/** assert value of @a is great/equal to @v */
-#define LASSERT_ATOMIC_GE(a, v) \
-do { \
- LASSERTF(atomic_read(a) >= v, \
- "value: %d\n", atomic_read((a))); \
-} while (0)
-
-/** assert value of @a is great than @v1 and little than @v2 */
-#define LASSERT_ATOMIC_GT_LT(a, v1, v2) \
-do { \
- int __v = atomic_read(a); \
- LASSERTF(__v > v1 && __v < v2, "value: %d\n", __v); \
-} while (0)
-
-/** assert value of @a is great than @v1 and little/equal to @v2 */
-#define LASSERT_ATOMIC_GT_LE(a, v1, v2) \
-do { \
- int __v = atomic_read(a); \
- LASSERTF(__v > v1 && __v <= v2, "value: %d\n", __v); \
-} while (0)
-
-/** assert value of @a is great/equal to @v1 and little than @v2 */
-#define LASSERT_ATOMIC_GE_LT(a, v1, v2) \
-do { \
- int __v = atomic_read(a); \
- LASSERTF(__v >= v1 && __v < v2, "value: %d\n", __v); \
-} while (0)
-
-/** assert value of @a is great/equal to @v1 and little/equal to @v2 */
-#define LASSERT_ATOMIC_GE_LE(a, v1, v2) \
-do { \
- int __v = atomic_read(a); \
- LASSERTF(__v >= v1 && __v <= v2, "value: %d\n", __v); \
-} while (0)
-
-#else /* !LASSERT_ATOMIC_ENABLED */
-
-#define LASSERT_ATOMIC_EQ(a, v) do {} while (0)
-#define LASSERT_ATOMIC_NE(a, v) do {} while (0)
-#define LASSERT_ATOMIC_LT(a, v) do {} while (0)
-#define LASSERT_ATOMIC_LE(a, v) do {} while (0)
-#define LASSERT_ATOMIC_GT(a, v) do {} while (0)
-#define LASSERT_ATOMIC_GE(a, v) do {} while (0)
-#define LASSERT_ATOMIC_GT_LT(a, v1, v2) do {} while (0)
-#define LASSERT_ATOMIC_GT_LE(a, v1, v2) do {} while (0)
-#define LASSERT_ATOMIC_GE_LT(a, v1, v2) do {} while (0)
-#define LASSERT_ATOMIC_GE_LE(a, v1, v2) do {} while (0)
-
-#endif /* LASSERT_ATOMIC_ENABLED */
-
-#define LASSERT_ATOMIC_ZERO(a) LASSERT_ATOMIC_EQ(a, 0)
-#define LASSERT_ATOMIC_POS(a) LASSERT_ATOMIC_GT(a, 0)
-
-#define CFS_ALLOC_PTR(ptr) LIBCFS_ALLOC(ptr, sizeof(*(ptr)))
-#define CFS_FREE_PTR(ptr) LIBCFS_FREE(ptr, sizeof(*(ptr)))
-
-/*
- * percpu partition lock
- *
- * There are some use-cases like this in Lustre:
- * . each CPU partition has it's own private data which is frequently changed,
- * and mostly by the local CPU partition.
- * . all CPU partitions share some global data, these data are rarely changed.
- *
- * LNet is typical example.
- * CPU partition lock is designed for this kind of use-cases:
- * . each CPU partition has it's own private lock
- * . change on private data just needs to take the private lock
- * . read on shared data just needs to take _any_ of private locks
- * . change on shared data needs to take _all_ private locks,
- * which is slow and should be really rare.
- */
-
-enum {
- CFS_PERCPT_LOCK_EX = -1, /* negative */
-};
-
-struct cfs_percpt_lock {
- /* cpu-partition-table for this lock */
- struct cfs_cpt_table *pcl_cptab;
- /* exclusively locked */
- unsigned int pcl_locked;
- /* private lock table */
- spinlock_t **pcl_locks;
-};
-
-/* return number of private locks */
-static inline int
-cfs_percpt_lock_num(struct cfs_percpt_lock *pcl)
-{
- return cfs_cpt_number(pcl->pcl_cptab);
-}
-
-/*
- * create a cpu-partition lock based on CPU partition table \a cptab,
- * each private lock has extra \a psize bytes padding data
- */
-struct cfs_percpt_lock *cfs_percpt_lock_alloc(struct cfs_cpt_table *cptab);
-/* destroy a cpu-partition lock */
-void cfs_percpt_lock_free(struct cfs_percpt_lock *pcl);
-
-/* lock private lock \a index of \a pcl */
-void cfs_percpt_lock(struct cfs_percpt_lock *pcl, int index);
-/* unlock private lock \a index of \a pcl */
-void cfs_percpt_unlock(struct cfs_percpt_lock *pcl, int index);
-/* create percpt (atomic) refcount based on @cptab */
-atomic_t **cfs_percpt_atomic_alloc(struct cfs_cpt_table *cptab, int val);
-/* destroy percpt refcount */
-void cfs_percpt_atomic_free(atomic_t **refs);
-/* return sum of all percpu refs */
-int cfs_percpt_atomic_summary(atomic_t **refs);
-
-/** Compile-time assertion.
-
- * Check an invariant described by a constant expression at compile time by
- * forcing a compiler error if it does not hold. \a cond must be a constant
- * expression as defined by the ISO C Standard:
- *
- * 6.8.4.2 The switch statement
- * ....
- * [#3] The expression of each case label shall be an integer
- * constant expression and no two of the case constant
- * expressions in the same switch statement shall have the same
- * value after conversion...
- *
- */
-#define CLASSERT(cond) do {switch (42) {case (cond): case 0: break; } } while (0)
-
-/* max value for numeric network address */
-#define MAX_NUMERIC_VALUE 0xffffffff
-
-/* implication */
-#define ergo(a, b) (!(a) || (b))
-/* logical equivalence */
-#define equi(a, b) (!!(a) == !!(b))
-
-/* --------------------------------------------------------------------
- * Light-weight trace
- * Support for temporary event tracing with minimal Heisenberg effect.
- * -------------------------------------------------------------------- */
-
-struct libcfs_device_userstate {
- int ldu_memhog_pages;
- struct page *ldu_memhog_root_page;
-};
-
-#define MKSTR(ptr) ((ptr)) ? (ptr) : ""
-
-static inline int cfs_size_round4(int val)
-{
- return (val + 3) & (~0x3);
-}
-
-#ifndef HAVE_CFS_SIZE_ROUND
-static inline int cfs_size_round(int val)
-{
- return (val + 7) & (~0x7);
-}
-
-#define HAVE_CFS_SIZE_ROUND
-#endif
-
-static inline int cfs_size_round16(int val)
-{
- return (val + 0xf) & (~0xf);
-}
-
-static inline int cfs_size_round32(int val)
-{
- return (val + 0x1f) & (~0x1f);
-}
-
-static inline int cfs_size_round0(int val)
-{
- if (!val)
- return 0;
- return (val + 1 + 7) & (~0x7);
-}
-
-static inline size_t cfs_round_strlen(char *fset)
-{
- return (size_t)cfs_size_round((int)strlen(fset) + 1);
-}
-
-#define LOGL(var, len, ptr) \
-do { \
- if (var) \
- memcpy((char *)ptr, (const char *)var, len); \
- ptr += cfs_size_round(len); \
-} while (0)
-
-#define LOGU(var, len, ptr) \
-do { \
- if (var) \
- memcpy((char *)var, (const char *)ptr, len); \
- ptr += cfs_size_round(len); \
-} while (0)
-
-#define LOGL0(var, len, ptr) \
-do { \
- if (!len) \
- break; \
- memcpy((char *)ptr, (const char *)var, len); \
- *((char *)(ptr) + len) = 0; \
- ptr += cfs_size_round(len + 1); \
-} while (0)
-
-#endif
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_string.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_string.h
deleted file mode 100644
index 478e9582ff54..000000000000
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs_string.h
+++ /dev/null
@@ -1,105 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * libcfs/include/libcfs/libcfs_string.h
- *
- * Generic string manipulation functions.
- *
- * Author: Nathan Rutman <nathan.rutman@sun.com>
- */
-
-#ifndef __LIBCFS_STRING_H__
-#define __LIBCFS_STRING_H__
-
-/* libcfs_string.c */
-/* string comparison ignoring case */
-int cfs_strncasecmp(const char *s1, const char *s2, size_t n);
-/* Convert a text string to a bitmask */
-int cfs_str2mask(const char *str, const char *(*bit2str)(int bit),
- int *oldmask, int minmask, int allmask);
-/* trim leading and trailing space characters */
-char *cfs_firststr(char *str, size_t size);
-
-/**
- * Structure to represent NULL-less strings.
- */
-struct cfs_lstr {
- char *ls_str;
- int ls_len;
-};
-
-/*
- * Structure to represent \<range_expr\> token of the syntax.
- */
-struct cfs_range_expr {
- /*
- * Link to cfs_expr_list::el_exprs.
- */
- struct list_head re_link;
- __u32 re_lo;
- __u32 re_hi;
- __u32 re_stride;
-};
-
-struct cfs_expr_list {
- struct list_head el_link;
- struct list_head el_exprs;
-};
-
-char *cfs_trimwhite(char *str);
-int cfs_gettok(struct cfs_lstr *next, char delim, struct cfs_lstr *res);
-int cfs_str2num_check(char *str, int nob, unsigned *num,
- unsigned min, unsigned max);
-int cfs_expr_list_match(__u32 value, struct cfs_expr_list *expr_list);
-int cfs_expr_list_values(struct cfs_expr_list *expr_list,
- int max, __u32 **values);
-static inline void
-cfs_expr_list_values_free(__u32 *values, int num)
-{
- /* This array is allocated by LIBCFS_ALLOC(), so it shouldn't be freed
- * by OBD_FREE() if it's called by module other than libcfs & LNet,
- * otherwise we will see fake memory leak */
- LIBCFS_FREE(values, num * sizeof(values[0]));
-}
-
-void cfs_expr_list_free(struct cfs_expr_list *expr_list);
-int cfs_expr_list_parse(char *str, int len, unsigned min, unsigned max,
- struct cfs_expr_list **elpp);
-void cfs_expr_list_free_list(struct list_head *list);
-int cfs_ip_addr_parse(char *str, int len, struct list_head *list);
-int cfs_ip_addr_match(__u32 addr, struct list_head *list);
-void cfs_ip_addr_free(struct list_head *list);
-
-#endif
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_time.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_time.h
deleted file mode 100644
index 2c7ec2d28f38..000000000000
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs_time.h
+++ /dev/null
@@ -1,82 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * libcfs/include/libcfs/libcfs_time.h
- *
- * Time functions.
- *
- */
-
-#ifndef __LIBCFS_TIME_H__
-#define __LIBCFS_TIME_H__
-/*
- * generic time manipulation functions.
- */
-
-static inline unsigned long cfs_time_add(unsigned long t, long d)
-{
- return (unsigned long)(t + d);
-}
-
-static inline unsigned long cfs_time_sub(unsigned long t1, unsigned long t2)
-{
- return (unsigned long)(t1 - t2);
-}
-
-static inline int cfs_time_after(unsigned long t1, unsigned long t2)
-{
- return time_before(t2, t1);
-}
-
-static inline int cfs_time_aftereq(unsigned long t1, unsigned long t2)
-{
- return time_before_eq(t2, t1);
-}
-
-static inline unsigned long cfs_time_shift(int seconds)
-{
- return cfs_time_add(cfs_time_current(), cfs_time_seconds(seconds));
-}
-
-/*
- * return valid time-out based on user supplied one. Currently we only check
- * that time-out is not shorted than allowed.
- */
-static inline long cfs_timeout_cap(long timeout)
-{
- if (timeout < CFS_TICK)
- timeout = CFS_TICK;
- return timeout;
-}
-
-#endif
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_workitem.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_workitem.h
deleted file mode 100644
index 5cc64f327a87..000000000000
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs_workitem.h
+++ /dev/null
@@ -1,110 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * libcfs/include/libcfs/libcfs_workitem.h
- *
- * Author: Isaac Huang <he.h.huang@oracle.com>
- * Liang Zhen <zhen.liang@sun.com>
- *
- * A workitems is deferred work with these semantics:
- * - a workitem always runs in thread context.
- * - a workitem can be concurrent with other workitems but is strictly
- * serialized with respect to itself.
- * - no CPU affinity, a workitem does not necessarily run on the same CPU
- * that schedules it. However, this might change in the future.
- * - if a workitem is scheduled again before it has a chance to run, it
- * runs only once.
- * - if a workitem is scheduled while it runs, it runs again after it
- * completes; this ensures that events occurring while other events are
- * being processed receive due attention. This behavior also allows a
- * workitem to reschedule itself.
- *
- * Usage notes:
- * - a workitem can sleep but it should be aware of how that sleep might
- * affect others.
- * - a workitem runs inside a kernel thread so there's no user space to access.
- * - do not use a workitem if the scheduling latency can't be tolerated.
- *
- * When wi_action returns non-zero, it means the workitem has either been
- * freed or reused and workitem scheduler won't touch it any more.
- */
-
-#ifndef __LIBCFS_WORKITEM_H__
-#define __LIBCFS_WORKITEM_H__
-
-struct cfs_wi_sched;
-
-void cfs_wi_sched_destroy(struct cfs_wi_sched *);
-int cfs_wi_sched_create(char *name, struct cfs_cpt_table *cptab, int cpt,
- int nthrs, struct cfs_wi_sched **);
-
-struct cfs_workitem;
-
-typedef int (*cfs_wi_action_t) (struct cfs_workitem *);
-typedef struct cfs_workitem {
- /** chain on runq or rerunq */
- struct list_head wi_list;
- /** working function */
- cfs_wi_action_t wi_action;
- /** arg for working function */
- void *wi_data;
- /** in running */
- unsigned short wi_running:1;
- /** scheduled */
- unsigned short wi_scheduled:1;
-} cfs_workitem_t;
-
-static inline void
-cfs_wi_init(cfs_workitem_t *wi, void *data, cfs_wi_action_t action)
-{
- INIT_LIST_HEAD(&wi->wi_list);
-
- wi->wi_running = 0;
- wi->wi_scheduled = 0;
- wi->wi_data = data;
- wi->wi_action = action;
-}
-
-void cfs_wi_schedule(struct cfs_wi_sched *sched, cfs_workitem_t *wi);
-int cfs_wi_deschedule(struct cfs_wi_sched *sched, cfs_workitem_t *wi);
-void cfs_wi_exit(struct cfs_wi_sched *sched, cfs_workitem_t *wi);
-
-int cfs_wi_startup(void);
-void cfs_wi_shutdown(void);
-
-/** # workitem scheduler loops before reschedule */
-#define CFS_WI_RESCHED 128
-
-#endif /* __LIBCFS_WORKITEM_H__ */
diff --git a/drivers/staging/lustre/include/linux/libcfs/linux/libcfs.h b/drivers/staging/lustre/include/linux/libcfs/linux/libcfs.h
deleted file mode 100644
index 3e2502a69bbd..000000000000
--- a/drivers/staging/lustre/include/linux/libcfs/linux/libcfs.h
+++ /dev/null
@@ -1,146 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- */
-
-#ifndef __LIBCFS_LINUX_LIBCFS_H__
-#define __LIBCFS_LINUX_LIBCFS_H__
-
-#ifndef __LIBCFS_LIBCFS_H__
-#error Do not #include this file directly. #include <linux/libcfs/libcfs.h> instead
-#endif
-
-
-#include <linux/bitops.h>
-#include <linux/compiler.h>
-#include <linux/ctype.h>
-#include <linux/errno.h>
-#include <linux/file.h>
-#include <linux/fs.h>
-#include <linux/highmem.h>
-#include <linux/interrupt.h>
-#include <linux/kallsyms.h>
-#include <linux/kernel.h>
-#include <linux/kmod.h>
-#include <linux/kthread.h>
-#include <linux/miscdevice.h>
-#include <linux/mm.h>
-#include <linux/mm_inline.h>
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include <linux/mutex.h>
-#include <linux/notifier.h>
-#include <linux/random.h>
-#include <linux/rbtree.h>
-#include <linux/rwsem.h>
-#include <linux/scatterlist.h>
-#include <linux/sched.h>
-#include <linux/signal.h>
-#include <linux/slab.h>
-#include <linux/smp.h>
-#include <linux/stat.h>
-#include <linux/string.h>
-#include <linux/time.h>
-#include <linux/timer.h>
-#include <linux/types.h>
-#include <linux/unistd.h>
-#include <linux/vmalloc.h>
-#include <net/sock.h>
-#include <linux/atomic.h>
-#include <asm/div64.h>
-#include <linux/timex.h>
-#include <linux/uaccess.h>
-#include <stdarg.h>
-#include "linux-cpu.h"
-#include "linux-time.h"
-#include "linux-mem.h"
-
-
-#define LUSTRE_TRACE_SIZE (THREAD_SIZE >> 5)
-
-#if !defined(__x86_64__)
-# ifdef __ia64__
-# define CDEBUG_STACK() (THREAD_SIZE - \
- ((unsigned long)__builtin_dwarf_cfa() & \
- (THREAD_SIZE - 1)))
-# else
-# define CDEBUG_STACK() (THREAD_SIZE - \
- ((unsigned long)__builtin_frame_address(0) & \
- (THREAD_SIZE - 1)))
-# endif /* __ia64__ */
-
-#define __CHECK_STACK(msgdata, mask, cdls) \
-do { \
- if (unlikely(CDEBUG_STACK() > libcfs_stack)) { \
- LIBCFS_DEBUG_MSG_DATA_INIT(msgdata, D_WARNING, NULL); \
- libcfs_stack = CDEBUG_STACK(); \
- libcfs_debug_msg(msgdata, \
- "maximum lustre stack %lu\n", \
- CDEBUG_STACK()); \
- (msgdata)->msg_mask = mask; \
- (msgdata)->msg_cdls = cdls; \
- dump_stack(); \
- /*panic("LBUG");*/ \
- } \
-} while (0)
-#define CFS_CHECK_STACK(msgdata, mask, cdls) __CHECK_STACK(msgdata, mask, cdls)
-#else /* __x86_64__ */
-#define CFS_CHECK_STACK(msgdata, mask, cdls) do {} while (0)
-#define CDEBUG_STACK() (0L)
-#endif /* __x86_64__ */
-
-/* initial pid */
-#define LUSTRE_LNET_PID 12345
-
-#define __current_nesting_level() (0)
-
-/**
- * Platform specific declarations for cfs_curproc API (libcfs/curproc.h)
- *
- * Implementation is in linux-curproc.c
- */
-#define CFS_CURPROC_COMM_MAX (sizeof((struct task_struct *)0)->comm)
-
-#include <linux/capability.h>
-
-/* long integer with size equal to pointer */
-typedef unsigned long ulong_ptr_t;
-typedef long long_ptr_t;
-
-#ifndef WITH_WATCHDOG
-#define WITH_WATCHDOG
-#endif
-
-
-#endif /* _LINUX_LIBCFS_H */
diff --git a/drivers/staging/lustre/include/linux/libcfs/linux/linux-cpu.h b/drivers/staging/lustre/include/linux/libcfs/linux/linux-cpu.h
deleted file mode 100644
index 520209f17173..000000000000
--- a/drivers/staging/lustre/include/linux/libcfs/linux/linux-cpu.h
+++ /dev/null
@@ -1,82 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; if not, write to the
- * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
- * Boston, MA 021110-1307, USA
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * libcfs/include/libcfs/linux/linux-mem.h
- *
- * Basic library routines.
- *
- * Author: liang@whamcloud.com
- */
-
-#ifndef __LIBCFS_LINUX_CPU_H__
-#define __LIBCFS_LINUX_CPU_H__
-
-#ifndef __LIBCFS_LIBCFS_H__
-#error Do not #include this file directly. #include <linux/libcfs/libcfs.h> instead
-#endif
-
-#include <linux/cpu.h>
-#include <linux/cpuset.h>
-#include <linux/topology.h>
-
-#ifdef CONFIG_SMP
-
-#define HAVE_LIBCFS_CPT
-
-/** virtual processing unit */
-struct cfs_cpu_partition {
- /* CPUs mask for this partition */
- cpumask_t *cpt_cpumask;
- /* nodes mask for this partition */
- nodemask_t *cpt_nodemask;
- /* spread rotor for NUMA allocator */
- unsigned cpt_spread_rotor;
-};
-
-/** descriptor for CPU partitions */
-struct cfs_cpt_table {
- /* version, reserved for hotplug */
- unsigned ctb_version;
- /* spread rotor for NUMA allocator */
- unsigned ctb_spread_rotor;
- /* # of CPU partitions */
- unsigned ctb_nparts;
- /* partitions tables */
- struct cfs_cpu_partition *ctb_parts;
- /* shadow HW CPU to CPU partition ID */
- int *ctb_cpu2cpt;
- /* all cpus in this partition table */
- cpumask_t *ctb_cpumask;
- /* all nodes in this partition table */
- nodemask_t *ctb_nodemask;
-};
-
-#endif /* CONFIG_SMP */
-#endif /* __LIBCFS_LINUX_CPU_H__ */
diff --git a/drivers/staging/lustre/include/linux/libcfs/linux/linux-mem.h b/drivers/staging/lustre/include/linux/libcfs/linux/linux-mem.h
deleted file mode 100644
index 0f2fd79e5ec8..000000000000
--- a/drivers/staging/lustre/include/linux/libcfs/linux/linux-mem.h
+++ /dev/null
@@ -1,80 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * libcfs/include/libcfs/linux/linux-mem.h
- *
- * Basic library routines.
- */
-
-#ifndef __LIBCFS_LINUX_CFS_MEM_H__
-#define __LIBCFS_LINUX_CFS_MEM_H__
-
-#ifndef __LIBCFS_LIBCFS_H__
-#error Do not #include this file directly. #include <linux/libcfs/libcfs.h> instead
-#endif
-
-#include <linux/mm.h>
-#include <linux/vmalloc.h>
-#include <linux/pagemap.h>
-#include <linux/slab.h>
-#include <linux/memcontrol.h>
-#include <linux/mm_inline.h>
-
-#ifndef HAVE_LIBCFS_CPT
-/* Need this for cfs_cpt_table */
-#include "../libcfs_cpu.h"
-#endif
-
-#define CFS_PAGE_MASK (~((__u64)PAGE_CACHE_SIZE-1))
-#define page_index(p) ((p)->index)
-
-#define memory_pressure_get() (current->flags & PF_MEMALLOC)
-#define memory_pressure_set() do { current->flags |= PF_MEMALLOC; } while (0)
-#define memory_pressure_clr() do { current->flags &= ~PF_MEMALLOC; } while (0)
-
-#if BITS_PER_LONG == 32
-/* limit to lowmem on 32-bit systems */
-#define NUM_CACHEPAGES \
- min(totalram_pages, 1UL << (30 - PAGE_CACHE_SHIFT) * 3 / 4)
-#else
-#define NUM_CACHEPAGES totalram_pages
-#endif
-
-#define DECL_MMSPACE mm_segment_t __oldfs
-#define MMSPACE_OPEN \
- do { __oldfs = get_fs(); set_fs(get_ds()); } while (0)
-#define MMSPACE_CLOSE set_fs(__oldfs)
-
-#endif /* __LINUX_CFS_MEM_H__ */
diff --git a/drivers/staging/lustre/include/linux/libcfs/linux/linux-time.h b/drivers/staging/lustre/include/linux/libcfs/linux/linux-time.h
deleted file mode 100644
index ed8764b11c80..000000000000
--- a/drivers/staging/lustre/include/linux/libcfs/linux/linux-time.h
+++ /dev/null
@@ -1,111 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * libcfs/include/libcfs/linux/linux-time.h
- *
- * Implementation of portable time API for Linux (kernel and user-level).
- *
- * Author: Nikita Danilov <nikita@clusterfs.com>
- */
-
-#ifndef __LIBCFS_LINUX_LINUX_TIME_H__
-#define __LIBCFS_LINUX_LINUX_TIME_H__
-
-#ifndef __LIBCFS_LIBCFS_H__
-#error Do not #include this file directly. #include <linux/libcfs/libcfs.h> instead
-#endif
-
-#define ONE_BILLION ((u_int64_t)1000000000)
-#define ONE_MILLION 1000000
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/time.h>
-#include <asm/div64.h>
-
-/*
- * post 2.5 kernels.
- */
-
-#include <linux/jiffies.h>
-
-/*
- * Generic kernel stuff
- */
-
-static inline unsigned long cfs_time_current(void)
-{
- return jiffies;
-}
-
-static inline long cfs_time_seconds(int seconds)
-{
- return ((long)seconds) * HZ;
-}
-
-static inline long cfs_duration_sec(long d)
-{
- return d / HZ;
-}
-
-#define cfs_time_current_64 get_jiffies_64
-
-static inline __u64 cfs_time_add_64(__u64 t, __u64 d)
-{
- return t + d;
-}
-
-static inline __u64 cfs_time_shift_64(int seconds)
-{
- return cfs_time_add_64(cfs_time_current_64(),
- cfs_time_seconds(seconds));
-}
-
-static inline int cfs_time_before_64(__u64 t1, __u64 t2)
-{
- return (__s64)t2 - (__s64)t1 > 0;
-}
-
-static inline int cfs_time_beforeq_64(__u64 t1, __u64 t2)
-{
- return (__s64)t2 - (__s64)t1 >= 0;
-}
-
-/*
- * One jiffy
- */
-#define CFS_TICK (1)
-
-#define CFS_DURATION_T "%ld"
-
-#endif /* __LIBCFS_LINUX_LINUX_TIME_H__ */
diff --git a/drivers/staging/lustre/include/linux/lnet/api.h b/drivers/staging/lustre/include/linux/lnet/api.h
deleted file mode 100644
index 9493d5e236c5..000000000000
--- a/drivers/staging/lustre/include/linux/lnet/api.h
+++ /dev/null
@@ -1,210 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011 - 2015, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Seagate, Inc.
- */
-
-#ifndef __LNET_API_H__
-#define __LNET_API_H__
-
-/** \defgroup lnet LNet
- *
- * The Lustre Networking subsystem.
- *
- * LNet is an asynchronous message-passing API, which provides an unreliable
- * connectionless service that can't guarantee any order. It supports OFA IB,
- * TCP/IP, and Cray Interconnects, and routes between heterogeneous networks.
- *
- * @{
- */
-
-#include "../lnet/types.h"
-
-/** \defgroup lnet_init_fini Initialization and cleanup
- * The LNet must be properly initialized before any LNet calls can be made.
- * @{ */
-int LNetNIInit(lnet_pid_t requested_pid);
-int LNetNIFini(void);
-/** @} lnet_init_fini */
-
-/** \defgroup lnet_addr LNet addressing and basic types
- *
- * Addressing scheme and basic data types of LNet.
- *
- * The LNet API is memory-oriented, so LNet must be able to address not only
- * end-points but also memory region within a process address space.
- * An ::lnet_nid_t addresses an end-point. An ::lnet_pid_t identifies a process
- * in a node. A portal represents an opening in the address space of a
- * process. Match bits is criteria to identify a region of memory inside a
- * portal, and offset specifies an offset within the memory region.
- *
- * LNet creates a table of portals for each process during initialization.
- * This table has MAX_PORTALS entries and its size can't be dynamically
- * changed. A portal stays empty until the owning process starts to add
- * memory regions to it. A portal is sometimes called an index because
- * it's an entry in the portals table of a process.
- *
- * \see LNetMEAttach
- * @{ */
-int LNetGetId(unsigned int index, lnet_process_id_t *id);
-int LNetDist(lnet_nid_t nid, lnet_nid_t *srcnid, __u32 *order);
-void LNetSnprintHandle(char *str, int str_len, lnet_handle_any_t handle);
-
-/** @} lnet_addr */
-
-/** \defgroup lnet_me Match entries
- *
- * A match entry (abbreviated as ME) describes a set of criteria to accept
- * incoming requests.
- *
- * A portal is essentially a match list plus a set of attributes. A match
- * list is a chain of MEs. Each ME includes a pointer to a memory descriptor
- * and a set of match criteria. The match criteria can be used to reject
- * incoming requests based on process ID or the match bits provided in the
- * request. MEs can be dynamically inserted into a match list by LNetMEAttach()
- * and LNetMEInsert(), and removed from its list by LNetMEUnlink().
- * @{ */
-int LNetMEAttach(unsigned int portal,
- lnet_process_id_t match_id_in,
- __u64 match_bits_in,
- __u64 ignore_bits_in,
- lnet_unlink_t unlink_in,
- lnet_ins_pos_t pos_in,
- lnet_handle_me_t *handle_out);
-
-int LNetMEInsert(lnet_handle_me_t current_in,
- lnet_process_id_t match_id_in,
- __u64 match_bits_in,
- __u64 ignore_bits_in,
- lnet_unlink_t unlink_in,
- lnet_ins_pos_t position_in,
- lnet_handle_me_t *handle_out);
-
-int LNetMEUnlink(lnet_handle_me_t current_in);
-/** @} lnet_me */
-
-/** \defgroup lnet_md Memory descriptors
- *
- * A memory descriptor contains information about a region of a user's
- * memory (either in kernel or user space) and optionally points to an
- * event queue where information about the operations performed on the
- * memory descriptor are recorded. Memory descriptor is abbreviated as
- * MD and can be used interchangeably with the memory region it describes.
- *
- * The LNet API provides two operations to create MDs: LNetMDAttach()
- * and LNetMDBind(); one operation to unlink and release the resources
- * associated with a MD: LNetMDUnlink().
- * @{ */
-int LNetMDAttach(lnet_handle_me_t current_in,
- lnet_md_t md_in,
- lnet_unlink_t unlink_in,
- lnet_handle_md_t *handle_out);
-
-int LNetMDBind(lnet_md_t md_in,
- lnet_unlink_t unlink_in,
- lnet_handle_md_t *handle_out);
-
-int LNetMDUnlink(lnet_handle_md_t md_in);
-/** @} lnet_md */
-
-/** \defgroup lnet_eq Events and event queues
- *
- * Event queues (abbreviated as EQ) are used to log operations performed on
- * local MDs. In particular, they signal the completion of a data transmission
- * into or out of a MD. They can also be used to hold acknowledgments for
- * completed PUT operations and indicate when a MD has been unlinked. Multiple
- * MDs can share a single EQ. An EQ may have an optional event handler
- * associated with it. If an event handler exists, it will be run for each
- * event that is deposited into the EQ.
- *
- * In addition to the lnet_handle_eq_t, the LNet API defines two types
- * associated with events: The ::lnet_event_kind_t defines the kinds of events
- * that can be stored in an EQ. The lnet_event_t defines a structure that
- * holds the information about with an event.
- *
- * There are five functions for dealing with EQs: LNetEQAlloc() is used to
- * create an EQ and allocate the resources needed, while LNetEQFree()
- * releases these resources and free the EQ. LNetEQGet() retrieves the next
- * event from an EQ, and LNetEQWait() can be used to block a process until
- * an EQ has at least one event. LNetEQPoll() can be used to test or wait
- * on multiple EQs.
- * @{ */
-int LNetEQAlloc(unsigned int count_in,
- lnet_eq_handler_t handler,
- lnet_handle_eq_t *handle_out);
-
-int LNetEQFree(lnet_handle_eq_t eventq_in);
-
-int LNetEQGet(lnet_handle_eq_t eventq_in,
- lnet_event_t *event_out);
-
-int LNetEQWait(lnet_handle_eq_t eventq_in,
- lnet_event_t *event_out);
-
-int LNetEQPoll(lnet_handle_eq_t *eventqs_in,
- int neq_in,
- int timeout_ms,
- lnet_event_t *event_out,
- int *which_eq_out);
-/** @} lnet_eq */
-
-/** \defgroup lnet_data Data movement operations
- *
- * The LNet API provides two data movement operations: LNetPut()
- * and LNetGet().
- * @{ */
-int LNetPut(lnet_nid_t self,
- lnet_handle_md_t md_in,
- lnet_ack_req_t ack_req_in,
- lnet_process_id_t target_in,
- unsigned int portal_in,
- __u64 match_bits_in,
- unsigned int offset_in,
- __u64 hdr_data_in);
-
-int LNetGet(lnet_nid_t self,
- lnet_handle_md_t md_in,
- lnet_process_id_t target_in,
- unsigned int portal_in,
- __u64 match_bits_in,
- unsigned int offset_in);
-/** @} lnet_data */
-
-/** \defgroup lnet_misc Miscellaneous operations.
- * Miscellaneous operations.
- * @{ */
-
-int LNetSetLazyPortal(int portal);
-int LNetClearLazyPortal(int portal);
-int LNetCtl(unsigned int cmd, void *arg);
-
-/** @} lnet_misc */
-
-/** @} lnet */
-#endif
diff --git a/drivers/staging/lustre/include/linux/lnet/lib-lnet.h b/drivers/staging/lustre/include/linux/lnet/lib-lnet.h
deleted file mode 100644
index b61d5045a566..000000000000
--- a/drivers/staging/lustre/include/linux/lnet/lib-lnet.h
+++ /dev/null
@@ -1,688 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2012 - 2015, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Seagate, Inc.
- *
- * lnet/include/lnet/lib-lnet.h
- */
-
-#ifndef __LNET_LIB_LNET_H__
-#define __LNET_LIB_LNET_H__
-
-#include "../libcfs/libcfs.h"
-#include "api.h"
-#include "lnet.h"
-#include "lib-types.h"
-
-extern lnet_t the_lnet; /* THE network */
-
-#if (BITS_PER_LONG == 32)
-/* 2 CPTs, allowing more CPTs might make us under memory pressure */
-#define LNET_CPT_MAX_BITS 1
-
-#else /* 64-bit system */
-/*
- * 256 CPTs for thousands of CPUs, allowing more CPTs might make us
- * under risk of consuming all lh_cookie.
- */
-#define LNET_CPT_MAX_BITS 8
-#endif /* BITS_PER_LONG == 32 */
-
-/* max allowed CPT number */
-#define LNET_CPT_MAX (1 << LNET_CPT_MAX_BITS)
-
-#define LNET_CPT_NUMBER (the_lnet.ln_cpt_number)
-#define LNET_CPT_BITS (the_lnet.ln_cpt_bits)
-#define LNET_CPT_MASK ((1ULL << LNET_CPT_BITS) - 1)
-
-/** exclusive lock */
-#define LNET_LOCK_EX CFS_PERCPT_LOCK_EX
-
-static inline int lnet_is_wire_handle_none(lnet_handle_wire_t *wh)
-{
- return (wh->wh_interface_cookie == LNET_WIRE_HANDLE_COOKIE_NONE &&
- wh->wh_object_cookie == LNET_WIRE_HANDLE_COOKIE_NONE);
-}
-
-static inline int lnet_md_exhausted(lnet_libmd_t *md)
-{
- return (md->md_threshold == 0 ||
- ((md->md_options & LNET_MD_MAX_SIZE) != 0 &&
- md->md_offset + md->md_max_size > md->md_length));
-}
-
-static inline int lnet_md_unlinkable(lnet_libmd_t *md)
-{
- /* Should unlink md when its refcount is 0 and either:
- * - md has been flagged for deletion (by auto unlink or
- * LNetM[DE]Unlink, in the latter case md may not be exhausted).
- * - auto unlink is on and md is exhausted.
- */
- if (md->md_refcount != 0)
- return 0;
-
- if ((md->md_flags & LNET_MD_FLAG_ZOMBIE) != 0)
- return 1;
-
- return ((md->md_flags & LNET_MD_FLAG_AUTO_UNLINK) != 0 &&
- lnet_md_exhausted(md));
-}
-
-#define lnet_cpt_table() (the_lnet.ln_cpt_table)
-#define lnet_cpt_current() cfs_cpt_current(the_lnet.ln_cpt_table, 1)
-
-static inline int
-lnet_cpt_of_cookie(__u64 cookie)
-{
- unsigned int cpt = (cookie >> LNET_COOKIE_TYPE_BITS) & LNET_CPT_MASK;
-
- /* LNET_CPT_NUMBER doesn't have to be power2, which means we can
- * get illegal cpt from it's invalid cookie */
- return cpt < LNET_CPT_NUMBER ? cpt : cpt % LNET_CPT_NUMBER;
-}
-
-static inline void
-lnet_res_lock(int cpt)
-{
- cfs_percpt_lock(the_lnet.ln_res_lock, cpt);
-}
-
-static inline void
-lnet_res_unlock(int cpt)
-{
- cfs_percpt_unlock(the_lnet.ln_res_lock, cpt);
-}
-
-static inline int
-lnet_res_lock_current(void)
-{
- int cpt = lnet_cpt_current();
-
- lnet_res_lock(cpt);
- return cpt;
-}
-
-static inline void
-lnet_net_lock(int cpt)
-{
- cfs_percpt_lock(the_lnet.ln_net_lock, cpt);
-}
-
-static inline void
-lnet_net_unlock(int cpt)
-{
- cfs_percpt_unlock(the_lnet.ln_net_lock, cpt);
-}
-
-static inline int
-lnet_net_lock_current(void)
-{
- int cpt = lnet_cpt_current();
-
- lnet_net_lock(cpt);
- return cpt;
-}
-
-#define LNET_LOCK() lnet_net_lock(LNET_LOCK_EX)
-#define LNET_UNLOCK() lnet_net_unlock(LNET_LOCK_EX)
-
-#define lnet_ptl_lock(ptl) spin_lock(&(ptl)->ptl_lock)
-#define lnet_ptl_unlock(ptl) spin_unlock(&(ptl)->ptl_lock)
-#define lnet_eq_wait_lock() spin_lock(&the_lnet.ln_eq_wait_lock)
-#define lnet_eq_wait_unlock() spin_unlock(&the_lnet.ln_eq_wait_lock)
-#define lnet_ni_lock(ni) spin_lock(&(ni)->ni_lock)
-#define lnet_ni_unlock(ni) spin_unlock(&(ni)->ni_lock)
-
-#define MAX_PORTALS 64
-
-static inline lnet_eq_t *
-lnet_eq_alloc(void)
-{
- lnet_eq_t *eq;
-
- LIBCFS_ALLOC(eq, sizeof(*eq));
- return eq;
-}
-
-static inline void
-lnet_eq_free(lnet_eq_t *eq)
-{
- LIBCFS_FREE(eq, sizeof(*eq));
-}
-
-static inline lnet_libmd_t *
-lnet_md_alloc(lnet_md_t *umd)
-{
- lnet_libmd_t *md;
- unsigned int size;
- unsigned int niov;
-
- if ((umd->options & LNET_MD_KIOV) != 0) {
- niov = umd->length;
- size = offsetof(lnet_libmd_t, md_iov.kiov[niov]);
- } else {
- niov = ((umd->options & LNET_MD_IOVEC) != 0) ?
- umd->length : 1;
- size = offsetof(lnet_libmd_t, md_iov.iov[niov]);
- }
-
- LIBCFS_ALLOC(md, size);
-
- if (md != NULL) {
- /* Set here in case of early free */
- md->md_options = umd->options;
- md->md_niov = niov;
- INIT_LIST_HEAD(&md->md_list);
- }
-
- return md;
-}
-
-static inline void
-lnet_md_free(lnet_libmd_t *md)
-{
- unsigned int size;
-
- if ((md->md_options & LNET_MD_KIOV) != 0)
- size = offsetof(lnet_libmd_t, md_iov.kiov[md->md_niov]);
- else
- size = offsetof(lnet_libmd_t, md_iov.iov[md->md_niov]);
-
- LIBCFS_FREE(md, size);
-}
-
-static inline lnet_me_t *
-lnet_me_alloc(void)
-{
- lnet_me_t *me;
-
- LIBCFS_ALLOC(me, sizeof(*me));
- return me;
-}
-
-static inline void
-lnet_me_free(lnet_me_t *me)
-{
- LIBCFS_FREE(me, sizeof(*me));
-}
-
-static inline lnet_msg_t *
-lnet_msg_alloc(void)
-{
- lnet_msg_t *msg;
-
- LIBCFS_ALLOC(msg, sizeof(*msg));
-
- /* no need to zero, LIBCFS_ALLOC does for us */
- return msg;
-}
-
-static inline void
-lnet_msg_free(lnet_msg_t *msg)
-{
- LASSERT(!msg->msg_onactivelist);
- LIBCFS_FREE(msg, sizeof(*msg));
-}
-
-lnet_libhandle_t *lnet_res_lh_lookup(struct lnet_res_container *rec,
- __u64 cookie);
-void lnet_res_lh_initialize(struct lnet_res_container *rec,
- lnet_libhandle_t *lh);
-static inline void
-lnet_res_lh_invalidate(lnet_libhandle_t *lh)
-{
- /* NB: cookie is still useful, don't reset it */
- list_del(&lh->lh_hash_chain);
-}
-
-static inline void
-lnet_eq2handle(lnet_handle_eq_t *handle, lnet_eq_t *eq)
-{
- if (eq == NULL) {
- LNetInvalidateHandle(handle);
- return;
- }
-
- handle->cookie = eq->eq_lh.lh_cookie;
-}
-
-static inline lnet_eq_t *
-lnet_handle2eq(lnet_handle_eq_t *handle)
-{
- lnet_libhandle_t *lh;
-
- lh = lnet_res_lh_lookup(&the_lnet.ln_eq_container, handle->cookie);
- if (lh == NULL)
- return NULL;
-
- return lh_entry(lh, lnet_eq_t, eq_lh);
-}
-
-static inline void
-lnet_md2handle(lnet_handle_md_t *handle, lnet_libmd_t *md)
-{
- handle->cookie = md->md_lh.lh_cookie;
-}
-
-static inline lnet_libmd_t *
-lnet_handle2md(lnet_handle_md_t *handle)
-{
- /* ALWAYS called with resource lock held */
- lnet_libhandle_t *lh;
- int cpt;
-
- cpt = lnet_cpt_of_cookie(handle->cookie);
- lh = lnet_res_lh_lookup(the_lnet.ln_md_containers[cpt],
- handle->cookie);
- if (lh == NULL)
- return NULL;
-
- return lh_entry(lh, lnet_libmd_t, md_lh);
-}
-
-static inline lnet_libmd_t *
-lnet_wire_handle2md(lnet_handle_wire_t *wh)
-{
- /* ALWAYS called with resource lock held */
- lnet_libhandle_t *lh;
- int cpt;
-
- if (wh->wh_interface_cookie != the_lnet.ln_interface_cookie)
- return NULL;
-
- cpt = lnet_cpt_of_cookie(wh->wh_object_cookie);
- lh = lnet_res_lh_lookup(the_lnet.ln_md_containers[cpt],
- wh->wh_object_cookie);
- if (lh == NULL)
- return NULL;
-
- return lh_entry(lh, lnet_libmd_t, md_lh);
-}
-
-static inline void
-lnet_me2handle(lnet_handle_me_t *handle, lnet_me_t *me)
-{
- handle->cookie = me->me_lh.lh_cookie;
-}
-
-static inline lnet_me_t *
-lnet_handle2me(lnet_handle_me_t *handle)
-{
- /* ALWAYS called with resource lock held */
- lnet_libhandle_t *lh;
- int cpt;
-
- cpt = lnet_cpt_of_cookie(handle->cookie);
- lh = lnet_res_lh_lookup(the_lnet.ln_me_containers[cpt],
- handle->cookie);
- if (lh == NULL)
- return NULL;
-
- return lh_entry(lh, lnet_me_t, me_lh);
-}
-
-static inline void
-lnet_peer_addref_locked(lnet_peer_t *lp)
-{
- LASSERT(lp->lp_refcount > 0);
- lp->lp_refcount++;
-}
-
-void lnet_destroy_peer_locked(lnet_peer_t *lp);
-
-static inline void
-lnet_peer_decref_locked(lnet_peer_t *lp)
-{
- LASSERT(lp->lp_refcount > 0);
- lp->lp_refcount--;
- if (lp->lp_refcount == 0)
- lnet_destroy_peer_locked(lp);
-}
-
-static inline int
-lnet_isrouter(lnet_peer_t *lp)
-{
- return lp->lp_rtr_refcount != 0;
-}
-
-static inline void
-lnet_ni_addref_locked(lnet_ni_t *ni, int cpt)
-{
- LASSERT(cpt >= 0 && cpt < LNET_CPT_NUMBER);
- LASSERT(*ni->ni_refs[cpt] >= 0);
-
- (*ni->ni_refs[cpt])++;
-}
-
-static inline void
-lnet_ni_addref(lnet_ni_t *ni)
-{
- lnet_net_lock(0);
- lnet_ni_addref_locked(ni, 0);
- lnet_net_unlock(0);
-}
-
-static inline void
-lnet_ni_decref_locked(lnet_ni_t *ni, int cpt)
-{
- LASSERT(cpt >= 0 && cpt < LNET_CPT_NUMBER);
- LASSERT(*ni->ni_refs[cpt] > 0);
-
- (*ni->ni_refs[cpt])--;
-}
-
-static inline void
-lnet_ni_decref(lnet_ni_t *ni)
-{
- lnet_net_lock(0);
- lnet_ni_decref_locked(ni, 0);
- lnet_net_unlock(0);
-}
-
-void lnet_ni_free(lnet_ni_t *ni);
-
-static inline int
-lnet_nid2peerhash(lnet_nid_t nid)
-{
- return hash_long(nid, LNET_PEER_HASH_BITS);
-}
-
-static inline struct list_head *
-lnet_net2rnethash(__u32 net)
-{
- return &the_lnet.ln_remote_nets_hash[(LNET_NETNUM(net) +
- LNET_NETTYP(net)) &
- ((1U << the_lnet.ln_remote_nets_hbits) - 1)];
-}
-
-extern lnd_t the_lolnd;
-extern int avoid_asym_router_failure;
-
-int lnet_cpt_of_nid_locked(lnet_nid_t nid);
-int lnet_cpt_of_nid(lnet_nid_t nid);
-lnet_ni_t *lnet_nid2ni_locked(lnet_nid_t nid, int cpt);
-lnet_ni_t *lnet_net2ni_locked(__u32 net, int cpt);
-lnet_ni_t *lnet_net2ni(__u32 net);
-
-int lnet_init(void);
-void lnet_fini(void);
-
-int lnet_notify(lnet_ni_t *ni, lnet_nid_t peer, int alive, unsigned long when);
-void lnet_notify_locked(lnet_peer_t *lp, int notifylnd, int alive,
- unsigned long when);
-int lnet_add_route(__u32 net, unsigned int hops, lnet_nid_t gateway_nid,
- unsigned int priority);
-int lnet_check_routes(void);
-int lnet_del_route(__u32 net, lnet_nid_t gw_nid);
-void lnet_destroy_routes(void);
-int lnet_get_route(int idx, __u32 *net, __u32 *hops,
- lnet_nid_t *gateway, __u32 *alive, __u32 *priority);
-void lnet_router_debugfs_init(void);
-void lnet_router_debugfs_fini(void);
-int lnet_rtrpools_alloc(int im_a_router);
-void lnet_rtrpools_free(void);
-lnet_remotenet_t *lnet_find_net_locked(__u32 net);
-
-int lnet_islocalnid(lnet_nid_t nid);
-int lnet_islocalnet(__u32 net);
-
-void lnet_msg_attach_md(lnet_msg_t *msg, lnet_libmd_t *md,
- unsigned int offset, unsigned int mlen);
-void lnet_msg_detach_md(lnet_msg_t *msg, int status);
-void lnet_build_unlink_event(lnet_libmd_t *md, lnet_event_t *ev);
-void lnet_build_msg_event(lnet_msg_t *msg, lnet_event_kind_t ev_type);
-void lnet_msg_commit(lnet_msg_t *msg, int cpt);
-void lnet_msg_decommit(lnet_msg_t *msg, int cpt, int status);
-
-void lnet_eq_enqueue_event(lnet_eq_t *eq, lnet_event_t *ev);
-void lnet_prep_send(lnet_msg_t *msg, int type, lnet_process_id_t target,
- unsigned int offset, unsigned int len);
-int lnet_send(lnet_nid_t nid, lnet_msg_t *msg, lnet_nid_t rtr_nid);
-void lnet_return_tx_credits_locked(lnet_msg_t *msg);
-void lnet_return_rx_credits_locked(lnet_msg_t *msg);
-
-/* portals functions */
-/* portals attributes */
-static inline int
-lnet_ptl_is_lazy(lnet_portal_t *ptl)
-{
- return !!(ptl->ptl_options & LNET_PTL_LAZY);
-}
-
-static inline int
-lnet_ptl_is_unique(lnet_portal_t *ptl)
-{
- return !!(ptl->ptl_options & LNET_PTL_MATCH_UNIQUE);
-}
-
-static inline int
-lnet_ptl_is_wildcard(lnet_portal_t *ptl)
-{
- return !!(ptl->ptl_options & LNET_PTL_MATCH_WILDCARD);
-}
-
-static inline void
-lnet_ptl_setopt(lnet_portal_t *ptl, int opt)
-{
- ptl->ptl_options |= opt;
-}
-
-static inline void
-lnet_ptl_unsetopt(lnet_portal_t *ptl, int opt)
-{
- ptl->ptl_options &= ~opt;
-}
-
-/* match-table functions */
-struct list_head *lnet_mt_match_head(struct lnet_match_table *mtable,
- lnet_process_id_t id, __u64 mbits);
-struct lnet_match_table *lnet_mt_of_attach(unsigned int index,
- lnet_process_id_t id, __u64 mbits,
- __u64 ignore_bits,
- lnet_ins_pos_t pos);
-int lnet_mt_match_md(struct lnet_match_table *mtable,
- struct lnet_match_info *info, struct lnet_msg *msg);
-
-/* portals match/attach functions */
-void lnet_ptl_attach_md(lnet_me_t *me, lnet_libmd_t *md,
- struct list_head *matches, struct list_head *drops);
-void lnet_ptl_detach_md(lnet_me_t *me, lnet_libmd_t *md);
-int lnet_ptl_match_md(struct lnet_match_info *info, struct lnet_msg *msg);
-
-/* initialized and finalize portals */
-int lnet_portals_create(void);
-void lnet_portals_destroy(void);
-
-/* message functions */
-int lnet_parse(lnet_ni_t *ni, lnet_hdr_t *hdr,
- lnet_nid_t fromnid, void *private, int rdma_req);
-void lnet_recv(lnet_ni_t *ni, void *private, lnet_msg_t *msg, int delayed,
- unsigned int offset, unsigned int mlen, unsigned int rlen);
-lnet_msg_t *lnet_create_reply_msg(lnet_ni_t *ni, lnet_msg_t *get_msg);
-void lnet_set_reply_msg_len(lnet_ni_t *ni, lnet_msg_t *msg, unsigned int len);
-
-void lnet_finalize(lnet_ni_t *ni, lnet_msg_t *msg, int rc);
-
-void lnet_drop_delayed_msg_list(struct list_head *head, char *reason);
-void lnet_recv_delayed_msg_list(struct list_head *head);
-
-int lnet_msg_container_setup(struct lnet_msg_container *container, int cpt);
-void lnet_msg_container_cleanup(struct lnet_msg_container *container);
-void lnet_msg_containers_destroy(void);
-int lnet_msg_containers_create(void);
-
-char *lnet_msgtyp2str(int type);
-void lnet_print_hdr(lnet_hdr_t *hdr);
-int lnet_fail_nid(lnet_nid_t nid, unsigned int threshold);
-
-void lnet_counters_get(lnet_counters_t *counters);
-void lnet_counters_reset(void);
-
-unsigned int lnet_iov_nob(unsigned int niov, struct kvec *iov);
-int lnet_extract_iov(int dst_niov, struct kvec *dst,
- int src_niov, struct kvec *src,
- unsigned int offset, unsigned int len);
-
-unsigned int lnet_kiov_nob(unsigned int niov, lnet_kiov_t *iov);
-int lnet_extract_kiov(int dst_niov, lnet_kiov_t *dst,
- int src_niov, lnet_kiov_t *src,
- unsigned int offset, unsigned int len);
-
-void lnet_copy_iov2iov(unsigned int ndiov, struct kvec *diov,
- unsigned int doffset,
- unsigned int nsiov, struct kvec *siov,
- unsigned int soffset, unsigned int nob);
-void lnet_copy_kiov2iov(unsigned int niov, struct kvec *iov,
- unsigned int iovoffset,
- unsigned int nkiov, lnet_kiov_t *kiov,
- unsigned int kiovoffset, unsigned int nob);
-void lnet_copy_iov2kiov(unsigned int nkiov, lnet_kiov_t *kiov,
- unsigned int kiovoffset,
- unsigned int niov, struct kvec *iov,
- unsigned int iovoffset, unsigned int nob);
-void lnet_copy_kiov2kiov(unsigned int ndkiov, lnet_kiov_t *dkiov,
- unsigned int doffset,
- unsigned int nskiov, lnet_kiov_t *skiov,
- unsigned int soffset, unsigned int nob);
-
-static inline void
-lnet_copy_iov2flat(int dlen, void *dest, unsigned int doffset,
- unsigned int nsiov, struct kvec *siov, unsigned int soffset,
- unsigned int nob)
-{
- struct kvec diov = {/*.iov_base = */ dest, /*.iov_len = */ dlen};
-
- lnet_copy_iov2iov(1, &diov, doffset,
- nsiov, siov, soffset, nob);
-}
-
-static inline void
-lnet_copy_kiov2flat(int dlen, void *dest, unsigned int doffset,
- unsigned int nsiov, lnet_kiov_t *skiov,
- unsigned int soffset, unsigned int nob)
-{
- struct kvec diov = {/* .iov_base = */ dest, /* .iov_len = */ dlen};
-
- lnet_copy_kiov2iov(1, &diov, doffset,
- nsiov, skiov, soffset, nob);
-}
-
-static inline void
-lnet_copy_flat2iov(unsigned int ndiov, struct kvec *diov, unsigned int doffset,
- int slen, void *src, unsigned int soffset, unsigned int nob)
-{
- struct kvec siov = {/*.iov_base = */ src, /*.iov_len = */slen};
-
- lnet_copy_iov2iov(ndiov, diov, doffset,
- 1, &siov, soffset, nob);
-}
-
-static inline void
-lnet_copy_flat2kiov(unsigned int ndiov, lnet_kiov_t *dkiov,
- unsigned int doffset, int slen, void *src,
- unsigned int soffset, unsigned int nob)
-{
- struct kvec siov = {/* .iov_base = */ src, /* .iov_len = */ slen};
-
- lnet_copy_iov2kiov(ndiov, dkiov, doffset,
- 1, &siov, soffset, nob);
-}
-
-void lnet_me_unlink(lnet_me_t *me);
-
-void lnet_md_unlink(lnet_libmd_t *md);
-void lnet_md_deconstruct(lnet_libmd_t *lmd, lnet_md_t *umd);
-
-void lnet_register_lnd(lnd_t *lnd);
-void lnet_unregister_lnd(lnd_t *lnd);
-
-int lnet_connect(struct socket **sockp, lnet_nid_t peer_nid,
- __u32 local_ip, __u32 peer_ip, int peer_port);
-void lnet_connect_console_error(int rc, lnet_nid_t peer_nid,
- __u32 peer_ip, int port);
-int lnet_count_acceptor_nis(void);
-int lnet_acceptor_timeout(void);
-int lnet_acceptor_port(void);
-
-int lnet_count_acceptor_nis(void);
-int lnet_acceptor_port(void);
-
-int lnet_acceptor_start(void);
-void lnet_acceptor_stop(void);
-
-int lnet_ipif_query(char *name, int *up, __u32 *ip, __u32 *mask);
-int lnet_ipif_enumerate(char ***names);
-void lnet_ipif_free_enumeration(char **names, int n);
-int lnet_sock_setbuf(struct socket *socket, int txbufsize, int rxbufsize);
-int lnet_sock_getbuf(struct socket *socket, int *txbufsize, int *rxbufsize);
-int lnet_sock_getaddr(struct socket *socket, bool remote, __u32 *ip, int *port);
-int lnet_sock_write(struct socket *sock, void *buffer, int nob, int timeout);
-int lnet_sock_read(struct socket *sock, void *buffer, int nob, int timeout);
-
-int lnet_sock_listen(struct socket **sockp, __u32 ip, int port, int backlog);
-int lnet_sock_accept(struct socket **newsockp, struct socket *sock);
-int lnet_sock_connect(struct socket **sockp, int *fatal,
- __u32 local_ip, int local_port,
- __u32 peer_ip, int peer_port);
-void libcfs_sock_release(struct socket *sock);
-
-int lnet_peers_start_down(void);
-int lnet_peer_buffer_credits(lnet_ni_t *ni);
-
-int lnet_router_checker_start(void);
-void lnet_router_checker_stop(void);
-void lnet_router_ni_update_locked(lnet_peer_t *gw, __u32 net);
-void lnet_swap_pinginfo(lnet_ping_info_t *info);
-
-int lnet_ping_target_init(void);
-void lnet_ping_target_fini(void);
-int lnet_ping(lnet_process_id_t id, int timeout_ms,
- lnet_process_id_t *ids, int n_ids);
-
-int lnet_parse_ip2nets(char **networksp, char *ip2nets);
-int lnet_parse_routes(char *route_str, int *im_a_router);
-int lnet_parse_networks(struct list_head *nilist, char *networks);
-
-int lnet_nid2peer_locked(lnet_peer_t **lpp, lnet_nid_t nid, int cpt);
-lnet_peer_t *lnet_find_peer_locked(struct lnet_peer_table *ptable,
- lnet_nid_t nid);
-void lnet_peer_tables_cleanup(void);
-void lnet_peer_tables_destroy(void);
-int lnet_peer_tables_create(void);
-void lnet_debug_peer(lnet_nid_t nid);
-
-static inline void
-lnet_peer_set_alive(lnet_peer_t *lp)
-{
- lp->lp_last_alive = lp->lp_last_query = jiffies;
- if (!lp->lp_alive)
- lnet_notify_locked(lp, 0, 1, lp->lp_last_alive);
-}
-
-#endif
diff --git a/drivers/staging/lustre/include/linux/lnet/lib-types.h b/drivers/staging/lustre/include/linux/lnet/lib-types.h
deleted file mode 100644
index d792c4adb0ca..000000000000
--- a/drivers/staging/lustre/include/linux/lnet/lib-types.h
+++ /dev/null
@@ -1,611 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2012 - 2015, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Seagate, Inc.
- *
- * lnet/include/lnet/lib-types.h
- */
-
-#ifndef __LNET_LIB_TYPES_H__
-#define __LNET_LIB_TYPES_H__
-
-#include <linux/kthread.h>
-#include <linux/uio.h>
-#include <linux/types.h>
-#include <net/sock.h>
-
-#include "types.h"
-
-/* Max payload size */
-#define LNET_MAX_PAYLOAD CONFIG_LNET_MAX_PAYLOAD
-#if (LNET_MAX_PAYLOAD < LNET_MTU)
-# error "LNET_MAX_PAYLOAD too small - error in configure --with-max-payload-mb"
-#elif (LNET_MAX_PAYLOAD > (PAGE_SIZE * LNET_MAX_IOV))
-# error "LNET_MAX_PAYLOAD too large - error in configure --with-max-payload-mb"
-#endif
-
-/* forward refs */
-struct lnet_libmd;
-
-typedef struct lnet_msg {
- struct list_head msg_activelist;
- struct list_head msg_list; /* Q for credits/MD */
-
- lnet_process_id_t msg_target;
- /* where is it from, it's only for building event */
- lnet_nid_t msg_from;
- __u32 msg_type;
-
- /* committed for sending */
- unsigned int msg_tx_committed:1;
- /* CPT # this message committed for sending */
- unsigned int msg_tx_cpt:15;
- /* committed for receiving */
- unsigned int msg_rx_committed:1;
- /* CPT # this message committed for receiving */
- unsigned int msg_rx_cpt:15;
- /* queued for tx credit */
- unsigned int msg_tx_delayed:1;
- /* queued for RX buffer */
- unsigned int msg_rx_delayed:1;
- /* ready for pending on RX delay list */
- unsigned int msg_rx_ready_delay:1;
-
- unsigned int msg_vmflush:1; /* VM trying to free memory */
- unsigned int msg_target_is_router:1; /* sending to a router */
- unsigned int msg_routing:1; /* being forwarded */
- unsigned int msg_ack:1; /* ack on finalize (PUT) */
- unsigned int msg_sending:1; /* outgoing message */
- unsigned int msg_receiving:1; /* being received */
- unsigned int msg_txcredit:1; /* taken an NI send credit */
- unsigned int msg_peertxcredit:1; /* taken a peer send credit */
- unsigned int msg_rtrcredit:1; /* taken a global
- router credit */
- unsigned int msg_peerrtrcredit:1; /* taken a peer router credit */
- unsigned int msg_onactivelist:1; /* on the activelist */
-
- struct lnet_peer *msg_txpeer; /* peer I'm sending to */
- struct lnet_peer *msg_rxpeer; /* peer I received from */
-
- void *msg_private;
- struct lnet_libmd *msg_md;
-
- unsigned int msg_len;
- unsigned int msg_wanted;
- unsigned int msg_offset;
- unsigned int msg_niov;
- struct kvec *msg_iov;
- lnet_kiov_t *msg_kiov;
-
- lnet_event_t msg_ev;
- lnet_hdr_t msg_hdr;
-} lnet_msg_t;
-
-typedef struct lnet_libhandle {
- struct list_head lh_hash_chain;
- __u64 lh_cookie;
-} lnet_libhandle_t;
-
-#define lh_entry(ptr, type, member) \
- ((type *)((char *)(ptr)-(char *)(&((type *)0)->member)))
-
-typedef struct lnet_eq {
- struct list_head eq_list;
- lnet_libhandle_t eq_lh;
- lnet_seq_t eq_enq_seq;
- lnet_seq_t eq_deq_seq;
- unsigned int eq_size;
- lnet_eq_handler_t eq_callback;
- lnet_event_t *eq_events;
- int **eq_refs; /* percpt refcount for EQ */
-} lnet_eq_t;
-
-typedef struct lnet_me {
- struct list_head me_list;
- lnet_libhandle_t me_lh;
- lnet_process_id_t me_match_id;
- unsigned int me_portal;
- unsigned int me_pos; /* hash offset in mt_hash */
- __u64 me_match_bits;
- __u64 me_ignore_bits;
- lnet_unlink_t me_unlink;
- struct lnet_libmd *me_md;
-} lnet_me_t;
-
-typedef struct lnet_libmd {
- struct list_head md_list;
- lnet_libhandle_t md_lh;
- lnet_me_t *md_me;
- char *md_start;
- unsigned int md_offset;
- unsigned int md_length;
- unsigned int md_max_size;
- int md_threshold;
- int md_refcount;
- unsigned int md_options;
- unsigned int md_flags;
- void *md_user_ptr;
- lnet_eq_t *md_eq;
- unsigned int md_niov; /* # frags */
- union {
- struct kvec iov[LNET_MAX_IOV];
- lnet_kiov_t kiov[LNET_MAX_IOV];
- } md_iov;
-} lnet_libmd_t;
-
-#define LNET_MD_FLAG_ZOMBIE (1 << 0)
-#define LNET_MD_FLAG_AUTO_UNLINK (1 << 1)
-#define LNET_MD_FLAG_ABORTED (1 << 2)
-
-typedef struct {
- /* info about peers we are trying to fail */
- struct list_head tp_list; /* ln_test_peers */
- lnet_nid_t tp_nid; /* matching nid */
- unsigned int tp_threshold; /* # failures to simulate */
-} lnet_test_peer_t;
-
-#define LNET_COOKIE_TYPE_MD 1
-#define LNET_COOKIE_TYPE_ME 2
-#define LNET_COOKIE_TYPE_EQ 3
-#define LNET_COOKIE_TYPE_BITS 2
-#define LNET_COOKIE_MASK ((1ULL << LNET_COOKIE_TYPE_BITS) - 1ULL)
-
-struct lnet_ni; /* forward ref */
-
-typedef struct lnet_lnd {
- /* fields managed by portals */
- struct list_head lnd_list; /* stash in the LND table */
- int lnd_refcount; /* # active instances */
-
- /* fields initialised by the LND */
- __u32 lnd_type;
-
- int (*lnd_startup)(struct lnet_ni *ni);
- void (*lnd_shutdown)(struct lnet_ni *ni);
- int (*lnd_ctl)(struct lnet_ni *ni, unsigned int cmd, void *arg);
-
- /* In data movement APIs below, payload buffers are described as a set
- * of 'niov' fragments which are...
- * EITHER
- * in virtual memory (struct iovec *iov != NULL)
- * OR
- * in pages (kernel only: plt_kiov_t *kiov != NULL).
- * The LND may NOT overwrite these fragment descriptors.
- * An 'offset' and may specify a byte offset within the set of
- * fragments to start from
- */
-
- /* Start sending a preformatted message. 'private' is NULL for PUT and
- * GET messages; otherwise this is a response to an incoming message
- * and 'private' is the 'private' passed to lnet_parse(). Return
- * non-zero for immediate failure, otherwise complete later with
- * lnet_finalize() */
- int (*lnd_send)(struct lnet_ni *ni, void *private, lnet_msg_t *msg);
-
- /* Start receiving 'mlen' bytes of payload data, skipping the following
- * 'rlen' - 'mlen' bytes. 'private' is the 'private' passed to
- * lnet_parse(). Return non-zero for immediate failure, otherwise
- * complete later with lnet_finalize(). This also gives back a receive
- * credit if the LND does flow control. */
- int (*lnd_recv)(struct lnet_ni *ni, void *private, lnet_msg_t *msg,
- int delayed, unsigned int niov,
- struct kvec *iov, lnet_kiov_t *kiov,
- unsigned int offset, unsigned int mlen,
- unsigned int rlen);
-
- /* lnet_parse() has had to delay processing of this message
- * (e.g. waiting for a forwarding buffer or send credits). Give the
- * LND a chance to free urgently needed resources. If called, return 0
- * for success and do NOT give back a receive credit; that has to wait
- * until lnd_recv() gets called. On failure return < 0 and
- * release resources; lnd_recv() will not be called. */
- int (*lnd_eager_recv)(struct lnet_ni *ni, void *private,
- lnet_msg_t *msg, void **new_privatep);
-
- /* notification of peer health */
- void (*lnd_notify)(struct lnet_ni *ni, lnet_nid_t peer, int alive);
-
- /* query of peer aliveness */
- void (*lnd_query)(struct lnet_ni *ni, lnet_nid_t peer,
- unsigned long *when);
-
- /* accept a new connection */
- int (*lnd_accept)(struct lnet_ni *ni, struct socket *sock);
-} lnd_t;
-
-struct lnet_tx_queue {
- int tq_credits; /* # tx credits free */
- int tq_credits_min; /* lowest it's been */
- int tq_credits_max; /* total # tx credits */
- struct list_head tq_delayed; /* delayed TXs */
-};
-
-typedef struct lnet_ni {
- spinlock_t ni_lock;
- struct list_head ni_list; /* chain on ln_nis */
- struct list_head ni_cptlist; /* chain on ln_nis_cpt */
- int ni_maxtxcredits; /* # tx credits */
- /* # per-peer send credits */
- int ni_peertxcredits;
- /* # per-peer router buffer credits */
- int ni_peerrtrcredits;
- /* seconds to consider peer dead */
- int ni_peertimeout;
- int ni_ncpts; /* number of CPTs */
- __u32 *ni_cpts; /* bond NI on some CPTs */
- lnet_nid_t ni_nid; /* interface's NID */
- void *ni_data; /* instance-specific data */
- lnd_t *ni_lnd; /* procedural interface */
- struct lnet_tx_queue **ni_tx_queues; /* percpt TX queues */
- int **ni_refs; /* percpt reference count */
- time64_t ni_last_alive;/* when I was last alive */
- lnet_ni_status_t *ni_status; /* my health status */
- /* equivalent interfaces to use */
- char *ni_interfaces[LNET_MAX_INTERFACES];
-} lnet_ni_t;
-
-#define LNET_PROTO_PING_MATCHBITS 0x8000000000000000LL
-
-/* NB: value of these features equal to LNET_PROTO_PING_VERSION_x
- * of old LNet, so there shouldn't be any compatibility issue */
-#define LNET_PING_FEAT_INVAL (0) /* no feature */
-#define LNET_PING_FEAT_BASE (1 << 0) /* just a ping */
-#define LNET_PING_FEAT_NI_STATUS (1 << 1) /* return NI status */
-
-#define LNET_PING_FEAT_MASK (LNET_PING_FEAT_BASE | \
- LNET_PING_FEAT_NI_STATUS)
-
-/* router checker data, per router */
-#define LNET_MAX_RTR_NIS 16
-#define LNET_PINGINFO_SIZE offsetof(lnet_ping_info_t, pi_ni[LNET_MAX_RTR_NIS])
-typedef struct {
- /* chain on the_lnet.ln_zombie_rcd or ln_deathrow_rcd */
- struct list_head rcd_list;
- lnet_handle_md_t rcd_mdh; /* ping buffer MD */
- struct lnet_peer *rcd_gateway; /* reference to gateway */
- lnet_ping_info_t *rcd_pinginfo; /* ping buffer */
-} lnet_rc_data_t;
-
-typedef struct lnet_peer {
- struct list_head lp_hashlist; /* chain on peer hash */
- struct list_head lp_txq; /* messages blocking for
- tx credits */
- struct list_head lp_rtrq; /* messages blocking for
- router credits */
- struct list_head lp_rtr_list; /* chain on router list */
- int lp_txcredits; /* # tx credits available */
- int lp_mintxcredits; /* low water mark */
- int lp_rtrcredits; /* # router credits */
- int lp_minrtrcredits; /* low water mark */
- unsigned int lp_alive:1; /* alive/dead? */
- unsigned int lp_notify:1; /* notification outstanding? */
- unsigned int lp_notifylnd:1;/* outstanding notification
- for LND? */
- unsigned int lp_notifying:1; /* some thread is handling
- notification */
- unsigned int lp_ping_notsent;/* SEND event outstanding
- from ping */
- int lp_alive_count; /* # times router went
- dead<->alive */
- long lp_txqnob; /* bytes queued for sending */
- unsigned long lp_timestamp; /* time of last aliveness
- news */
- unsigned long lp_ping_timestamp;/* time of last ping
- attempt */
- unsigned long lp_ping_deadline; /* != 0 if ping reply
- expected */
- unsigned long lp_last_alive; /* when I was last alive */
- unsigned long lp_last_query; /* when lp_ni was queried
- last time */
- lnet_ni_t *lp_ni; /* interface peer is on */
- lnet_nid_t lp_nid; /* peer's NID */
- int lp_refcount; /* # refs */
- int lp_cpt; /* CPT this peer attached on */
- /* # refs from lnet_route_t::lr_gateway */
- int lp_rtr_refcount;
- /* returned RC ping features */
- unsigned int lp_ping_feats;
- struct list_head lp_routes; /* routers on this peer */
- lnet_rc_data_t *lp_rcd; /* router checker state */
-} lnet_peer_t;
-
-/* peer hash size */
-#define LNET_PEER_HASH_BITS 9
-#define LNET_PEER_HASH_SIZE (1 << LNET_PEER_HASH_BITS)
-
-/* peer hash table */
-struct lnet_peer_table {
- int pt_version; /* /proc validity stamp */
- int pt_number; /* # peers extant */
- struct list_head pt_deathrow; /* zombie peers */
- struct list_head *pt_hash; /* NID->peer hash */
-};
-
-/* peer aliveness is enabled only on routers for peers in a network where the
- * lnet_ni_t::ni_peertimeout has been set to a positive value */
-#define lnet_peer_aliveness_enabled(lp) (the_lnet.ln_routing != 0 && \
- (lp)->lp_ni->ni_peertimeout > 0)
-
-typedef struct {
- struct list_head lr_list; /* chain on net */
- struct list_head lr_gwlist; /* chain on gateway */
- lnet_peer_t *lr_gateway; /* router node */
- __u32 lr_net; /* remote network number */
- int lr_seq; /* sequence for round-robin */
- unsigned int lr_downis; /* number of down NIs */
- unsigned int lr_hops; /* how far I am */
- unsigned int lr_priority; /* route priority */
-} lnet_route_t;
-
-#define LNET_REMOTE_NETS_HASH_DEFAULT (1U << 7)
-#define LNET_REMOTE_NETS_HASH_MAX (1U << 16)
-#define LNET_REMOTE_NETS_HASH_SIZE (1 << the_lnet.ln_remote_nets_hbits)
-
-typedef struct {
- struct list_head lrn_list; /* chain on
- ln_remote_nets_hash */
- struct list_head lrn_routes; /* routes to me */
- __u32 lrn_net; /* my net number */
-} lnet_remotenet_t;
-
-/** lnet message has credit and can be submitted to lnd for send/receive */
-#define LNET_CREDIT_OK 0
-/** lnet message is waiting for credit */
-#define LNET_CREDIT_WAIT 1
-
-typedef struct {
- struct list_head rbp_bufs; /* my free buffer pool */
- struct list_head rbp_msgs; /* messages blocking
- for a buffer */
- int rbp_npages; /* # pages in each buffer */
- int rbp_nbuffers; /* # buffers */
- int rbp_credits; /* # free buffers /
- blocked messages */
- int rbp_mincredits; /* low water mark */
-} lnet_rtrbufpool_t;
-
-typedef struct {
- struct list_head rb_list; /* chain on rbp_bufs */
- lnet_rtrbufpool_t *rb_pool; /* owning pool */
- lnet_kiov_t rb_kiov[0]; /* the buffer space */
-} lnet_rtrbuf_t;
-
-#define LNET_PEER_HASHSIZE 503 /* prime! */
-
-#define LNET_NRBPOOLS 3 /* # different router buffer pools */
-
-enum {
- /* Didn't match anything */
- LNET_MATCHMD_NONE = (1 << 0),
- /* Matched OK */
- LNET_MATCHMD_OK = (1 << 1),
- /* Must be discarded */
- LNET_MATCHMD_DROP = (1 << 2),
- /* match and buffer is exhausted */
- LNET_MATCHMD_EXHAUSTED = (1 << 3),
- /* match or drop */
- LNET_MATCHMD_FINISH = (LNET_MATCHMD_OK | LNET_MATCHMD_DROP),
-};
-
-/* Options for lnet_portal_t::ptl_options */
-#define LNET_PTL_LAZY (1 << 0)
-#define LNET_PTL_MATCH_UNIQUE (1 << 1) /* unique match, for RDMA */
-#define LNET_PTL_MATCH_WILDCARD (1 << 2) /* wildcard match,
- request portal */
-
-/* parameter for matching operations (GET, PUT) */
-struct lnet_match_info {
- __u64 mi_mbits;
- lnet_process_id_t mi_id;
- unsigned int mi_opc;
- unsigned int mi_portal;
- unsigned int mi_rlength;
- unsigned int mi_roffset;
-};
-
-/* ME hash of RDMA portal */
-#define LNET_MT_HASH_BITS 8
-#define LNET_MT_HASH_SIZE (1 << LNET_MT_HASH_BITS)
-#define LNET_MT_HASH_MASK (LNET_MT_HASH_SIZE - 1)
-/* we allocate (LNET_MT_HASH_SIZE + 1) entries for lnet_match_table::mt_hash,
- * the last entry is reserved for MEs with ignore-bits */
-#define LNET_MT_HASH_IGNORE LNET_MT_HASH_SIZE
-/* __u64 has 2^6 bits, so need 2^(LNET_MT_HASH_BITS - LNET_MT_BITS_U64) which
- * is 4 __u64s as bit-map, and add an extra __u64 (only use one bit) for the
- * ME-list with ignore-bits, which is mtable::mt_hash[LNET_MT_HASH_IGNORE] */
-#define LNET_MT_BITS_U64 6 /* 2^6 bits */
-#define LNET_MT_EXHAUSTED_BITS (LNET_MT_HASH_BITS - LNET_MT_BITS_U64)
-#define LNET_MT_EXHAUSTED_BMAP ((1 << LNET_MT_EXHAUSTED_BITS) + 1)
-
-/* portal match table */
-struct lnet_match_table {
- /* reserved for upcoming patches, CPU partition ID */
- unsigned int mt_cpt;
- unsigned int mt_portal; /* portal index */
- /* match table is set as "enabled" if there's non-exhausted MD
- * attached on mt_mhash, it's only valid for wildcard portal */
- unsigned int mt_enabled;
- /* bitmap to flag whether MEs on mt_hash are exhausted or not */
- __u64 mt_exhausted[LNET_MT_EXHAUSTED_BMAP];
- struct list_head *mt_mhash; /* matching hash */
-};
-
-/* these are only useful for wildcard portal */
-/* Turn off message rotor for wildcard portals */
-#define LNET_PTL_ROTOR_OFF 0
-/* round-robin dispatch all PUT messages for wildcard portals */
-#define LNET_PTL_ROTOR_ON 1
-/* round-robin dispatch routed PUT message for wildcard portals */
-#define LNET_PTL_ROTOR_RR_RT 2
-/* dispatch routed PUT message by hashing source NID for wildcard portals */
-#define LNET_PTL_ROTOR_HASH_RT 3
-
-typedef struct lnet_portal {
- spinlock_t ptl_lock;
- unsigned int ptl_index; /* portal ID, reserved */
- /* flags on this portal: lazy, unique... */
- unsigned int ptl_options;
- /* list of messages which are stealing buffer */
- struct list_head ptl_msg_stealing;
- /* messages blocking for MD */
- struct list_head ptl_msg_delayed;
- /* Match table for each CPT */
- struct lnet_match_table **ptl_mtables;
- /* spread rotor of incoming "PUT" */
- unsigned int ptl_rotor;
- /* # active entries for this portal */
- int ptl_mt_nmaps;
- /* array of active entries' cpu-partition-id */
- int ptl_mt_maps[0];
-} lnet_portal_t;
-
-#define LNET_LH_HASH_BITS 12
-#define LNET_LH_HASH_SIZE (1ULL << LNET_LH_HASH_BITS)
-#define LNET_LH_HASH_MASK (LNET_LH_HASH_SIZE - 1)
-
-/* resource container (ME, MD, EQ) */
-struct lnet_res_container {
- unsigned int rec_type; /* container type */
- __u64 rec_lh_cookie; /* cookie generator */
- struct list_head rec_active; /* active resource list */
- struct list_head *rec_lh_hash; /* handle hash */
-};
-
-/* message container */
-struct lnet_msg_container {
- int msc_init; /* initialized or not */
- /* max # threads finalizing */
- int msc_nfinalizers;
- /* msgs waiting to complete finalizing */
- struct list_head msc_finalizing;
- struct list_head msc_active; /* active message list */
- /* threads doing finalization */
- void **msc_finalizers;
-};
-
-/* Router Checker states */
-#define LNET_RC_STATE_SHUTDOWN 0 /* not started */
-#define LNET_RC_STATE_RUNNING 1 /* started up OK */
-#define LNET_RC_STATE_STOPPING 2 /* telling thread to stop */
-
-typedef struct {
- /* CPU partition table of LNet */
- struct cfs_cpt_table *ln_cpt_table;
- /* number of CPTs in ln_cpt_table */
- unsigned int ln_cpt_number;
- unsigned int ln_cpt_bits;
-
- /* protect LNet resources (ME/MD/EQ) */
- struct cfs_percpt_lock *ln_res_lock;
- /* # portals */
- int ln_nportals;
- /* the vector of portals */
- lnet_portal_t **ln_portals;
- /* percpt ME containers */
- struct lnet_res_container **ln_me_containers;
- /* percpt MD container */
- struct lnet_res_container **ln_md_containers;
-
- /* Event Queue container */
- struct lnet_res_container ln_eq_container;
- wait_queue_head_t ln_eq_waitq;
- spinlock_t ln_eq_wait_lock;
- unsigned int ln_remote_nets_hbits;
-
- /* protect NI, peer table, credits, routers, rtrbuf... */
- struct cfs_percpt_lock *ln_net_lock;
- /* percpt message containers for active/finalizing/freed message */
- struct lnet_msg_container **ln_msg_containers;
- lnet_counters_t **ln_counters;
- struct lnet_peer_table **ln_peer_tables;
- /* failure simulation */
- struct list_head ln_test_peers;
-
- struct list_head ln_nis; /* LND instances */
- /* NIs bond on specific CPT(s) */
- struct list_head ln_nis_cpt;
- /* dying LND instances */
- struct list_head ln_nis_zombie;
- lnet_ni_t *ln_loni; /* the loopback NI */
- /* NI to wait for events in */
- lnet_ni_t *ln_eq_waitni;
-
- /* remote networks with routes to them */
- struct list_head *ln_remote_nets_hash;
- /* validity stamp */
- __u64 ln_remote_nets_version;
- /* list of all known routers */
- struct list_head ln_routers;
- /* validity stamp */
- __u64 ln_routers_version;
- /* percpt router buffer pools */
- lnet_rtrbufpool_t **ln_rtrpools;
-
- lnet_handle_md_t ln_ping_target_md;
- lnet_handle_eq_t ln_ping_target_eq;
- lnet_ping_info_t *ln_ping_info;
-
- /* router checker startup/shutdown state */
- int ln_rc_state;
- /* router checker's event queue */
- lnet_handle_eq_t ln_rc_eqh;
- /* rcd still pending on net */
- struct list_head ln_rcd_deathrow;
- /* rcd ready for free */
- struct list_head ln_rcd_zombie;
- /* serialise startup/shutdown */
- struct semaphore ln_rc_signal;
-
- struct mutex ln_api_mutex;
- struct mutex ln_lnd_mutex;
- int ln_init; /* lnet_init()
- called? */
- /* Have I called LNetNIInit myself? */
- int ln_niinit_self;
- /* LNetNIInit/LNetNIFini counter */
- int ln_refcount;
- /* shutdown in progress */
- int ln_shutdown;
-
- int ln_routing; /* am I a router? */
- lnet_pid_t ln_pid; /* requested pid */
- /* uniquely identifies this ni in this epoch */
- __u64 ln_interface_cookie;
- /* registered LNDs */
- struct list_head ln_lnds;
-
- /* space for network names */
- char *ln_network_tokens;
- int ln_network_tokens_nob;
- /* test protocol compatibility flags */
- int ln_testprotocompat;
-
-} lnet_t;
-
-#endif
diff --git a/drivers/staging/lustre/include/linux/lnet/lnet.h b/drivers/staging/lustre/include/linux/lnet/lnet.h
deleted file mode 100644
index 5d1559a26638..000000000000
--- a/drivers/staging/lustre/include/linux/lnet/lnet.h
+++ /dev/null
@@ -1,44 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2012 - 2015, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Seagate, Inc.
- */
-
-#ifndef __LNET_H__
-#define __LNET_H__
-
-/*
- * lnet.h
- *
- * User application interface file
- */
-#include "types.h"
-#include "nidstr.h"
-
-#endif
diff --git a/drivers/staging/lustre/include/linux/lnet/lnetctl.h b/drivers/staging/lustre/include/linux/lnet/lnetctl.h
deleted file mode 100644
index bdd69b2af909..000000000000
--- a/drivers/staging/lustre/include/linux/lnet/lnetctl.h
+++ /dev/null
@@ -1,79 +0,0 @@
-/*
- * This file is part of Portals, http://www.sf.net/projects/lustre/
- *
- * Portals is free software; you can redistribute it and/or
- * modify it under the terms of version 2 of the GNU General Public
- * License as published by the Free Software Foundation.
- *
- * Portals is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with Portals; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- * header for lnet ioctl
- */
-#ifndef _LNETCTL_H_
-#define _LNETCTL_H_
-
-#include "types.h"
-
-#define LNET_DEV_ID 0
-#define LNET_DEV_PATH "/dev/lnet"
-#define LNET_DEV_MAJOR 10
-#define LNET_DEV_MINOR 240
-#define OBD_DEV_ID 1
-#define OBD_DEV_NAME "obd"
-#define OBD_DEV_PATH "/dev/" OBD_DEV_NAME
-#define OBD_DEV_MAJOR 10
-#define OBD_DEV_MINOR 241
-#define SMFS_DEV_ID 2
-#define SMFS_DEV_PATH "/dev/snapdev"
-#define SMFS_DEV_MAJOR 10
-#define SMFS_DEV_MINOR 242
-
-int ptl_initialize(int argc, char **argv);
-int jt_ptl_network(int argc, char **argv);
-int jt_ptl_list_nids(int argc, char **argv);
-int jt_ptl_which_nid(int argc, char **argv);
-int jt_ptl_print_interfaces(int argc, char **argv);
-int jt_ptl_add_interface(int argc, char **argv);
-int jt_ptl_del_interface(int argc, char **argv);
-int jt_ptl_print_peers(int argc, char **argv);
-int jt_ptl_add_peer(int argc, char **argv);
-int jt_ptl_del_peer(int argc, char **argv);
-int jt_ptl_print_connections(int argc, char **argv);
-int jt_ptl_disconnect(int argc, char **argv);
-int jt_ptl_push_connection(int argc, char **argv);
-int jt_ptl_print_active_txs(int argc, char **argv);
-int jt_ptl_ping(int argc, char **argv);
-int jt_ptl_mynid(int argc, char **argv);
-int jt_ptl_add_uuid(int argc, char **argv);
-int jt_ptl_add_uuid_old(int argc, char **argv); /* backwards compatibility */
-int jt_ptl_close_uuid(int argc, char **argv);
-int jt_ptl_del_uuid(int argc, char **argv);
-int jt_ptl_add_route(int argc, char **argv);
-int jt_ptl_del_route(int argc, char **argv);
-int jt_ptl_notify_router(int argc, char **argv);
-int jt_ptl_print_routes(int argc, char **argv);
-int jt_ptl_fail_nid(int argc, char **argv);
-int jt_ptl_lwt(int argc, char **argv);
-int jt_ptl_testprotocompat(int argc, char **argv);
-int jt_ptl_memhog(int argc, char **argv);
-
-int dbg_initialize(int argc, char **argv);
-int jt_dbg_filter(int argc, char **argv);
-int jt_dbg_show(int argc, char **argv);
-int jt_dbg_list(int argc, char **argv);
-int jt_dbg_debug_kernel(int argc, char **argv);
-int jt_dbg_debug_daemon(int argc, char **argv);
-int jt_dbg_debug_file(int argc, char **argv);
-int jt_dbg_clear_debug_buf(int argc, char **argv);
-int jt_dbg_mark_debug_buf(int argc, char **argv);
-int jt_dbg_modules(int argc, char **argv);
-int jt_dbg_panic(int argc, char **argv);
-
-#endif
diff --git a/drivers/staging/lustre/include/linux/lnet/lnetst.h b/drivers/staging/lustre/include/linux/lnet/lnetst.h
deleted file mode 100644
index fd1e0fd3696f..000000000000
--- a/drivers/staging/lustre/include/linux/lnet/lnetst.h
+++ /dev/null
@@ -1,521 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011 - 2015, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Seagate, Inc.
- *
- * lnet/include/lnet/lnetst.h
- *
- * Author: Liang Zhen <liang.zhen@intel.com>
- */
-
-#ifndef __LNET_ST_H__
-#define __LNET_ST_H__
-
-#include <linux/types.h>
-
-#define LST_FEAT_NONE (0)
-#define LST_FEAT_BULK_LEN (1 << 0) /* enable variable page size */
-
-#define LST_FEATS_EMPTY (LST_FEAT_NONE)
-#define LST_FEATS_MASK (LST_FEAT_NONE | LST_FEAT_BULK_LEN)
-
-#define LST_NAME_SIZE 32 /* max name buffer length */
-
-#define LSTIO_DEBUG 0xC00 /* debug */
-#define LSTIO_SESSION_NEW 0xC01 /* create session */
-#define LSTIO_SESSION_END 0xC02 /* end session */
-#define LSTIO_SESSION_INFO 0xC03 /* query session */
-#define LSTIO_GROUP_ADD 0xC10 /* add group */
-#define LSTIO_GROUP_LIST 0xC11 /* list all groups in session */
-#define LSTIO_GROUP_INFO 0xC12 /* query default information of
- * specified group */
-#define LSTIO_GROUP_DEL 0xC13 /* delete group */
-#define LSTIO_NODES_ADD 0xC14 /* add nodes to specified group */
-#define LSTIO_GROUP_UPDATE 0xC15 /* update group */
-#define LSTIO_BATCH_ADD 0xC20 /* add batch */
-#define LSTIO_BATCH_START 0xC21 /* start batch */
-#define LSTIO_BATCH_STOP 0xC22 /* stop batch */
-#define LSTIO_BATCH_DEL 0xC23 /* delete batch */
-#define LSTIO_BATCH_LIST 0xC24 /* show all batches in the session */
-#define LSTIO_BATCH_INFO 0xC25 /* show defail of specified batch */
-#define LSTIO_TEST_ADD 0xC26 /* add test (to batch) */
-#define LSTIO_BATCH_QUERY 0xC27 /* query batch status */
-#define LSTIO_STAT_QUERY 0xC30 /* get stats */
-
-typedef struct {
- lnet_nid_t ses_nid; /* nid of console node */
- __u64 ses_stamp; /* time stamp */
-} lst_sid_t; /*** session id */
-
-extern lst_sid_t LST_INVALID_SID;
-
-typedef struct {
- __u64 bat_id; /* unique id in session */
-} lst_bid_t; /*** batch id (group of tests) */
-
-/* Status of test node */
-#define LST_NODE_ACTIVE 0x1 /* node in this session */
-#define LST_NODE_BUSY 0x2 /* node is taken by other session */
-#define LST_NODE_DOWN 0x4 /* node is down */
-#define LST_NODE_UNKNOWN 0x8 /* node not in session */
-
-typedef struct {
- lnet_process_id_t nde_id; /* id of node */
- int nde_state; /* state of node */
-} lstcon_node_ent_t; /*** node entry, for list_group command */
-
-typedef struct {
- int nle_nnode; /* # of nodes */
- int nle_nactive; /* # of active nodes */
- int nle_nbusy; /* # of busy nodes */
- int nle_ndown; /* # of down nodes */
- int nle_nunknown; /* # of unknown nodes */
-} lstcon_ndlist_ent_t; /*** node_list entry, for list_batch command */
-
-typedef struct {
- int tse_type; /* test type */
- int tse_loop; /* loop count */
- int tse_concur; /* concurrency of test */
-} lstcon_test_ent_t; /*** test summary entry, for
- *** list_batch command */
-
-typedef struct {
- int bae_state; /* batch status */
- int bae_timeout; /* batch timeout */
- int bae_ntest; /* # of tests in the batch */
-} lstcon_batch_ent_t; /*** batch summary entry, for
- *** list_batch command */
-
-typedef struct {
- lstcon_ndlist_ent_t tbe_cli_nle; /* client (group) node_list
- * entry */
- lstcon_ndlist_ent_t tbe_srv_nle; /* server (group) node_list
- * entry */
- union {
- lstcon_test_ent_t tbe_test; /* test entry */
- lstcon_batch_ent_t tbe_batch; /* batch entry */
- } u;
-} lstcon_test_batch_ent_t; /*** test/batch verbose information entry,
- *** for list_batch command */
-
-typedef struct {
- struct list_head rpe_link; /* link chain */
- lnet_process_id_t rpe_peer; /* peer's id */
- struct timeval rpe_stamp; /* time stamp of RPC */
- int rpe_state; /* peer's state */
- int rpe_rpc_errno; /* RPC errno */
-
- lst_sid_t rpe_sid; /* peer's session id */
- int rpe_fwk_errno; /* framework errno */
- int rpe_priv[4]; /* private data */
- char rpe_payload[0]; /* private reply payload */
-} lstcon_rpc_ent_t;
-
-typedef struct {
- int trs_rpc_stat[4]; /* RPCs stat (0: total
- 1: failed
- 2: finished
- 4: reserved */
- int trs_rpc_errno; /* RPC errno */
- int trs_fwk_stat[8]; /* framework stat */
- int trs_fwk_errno; /* errno of the first remote error */
- void *trs_fwk_private; /* private framework stat */
-} lstcon_trans_stat_t;
-
-static inline int
-lstcon_rpc_stat_total(lstcon_trans_stat_t *stat, int inc)
-{
- return inc ? ++stat->trs_rpc_stat[0] : stat->trs_rpc_stat[0];
-}
-
-static inline int
-lstcon_rpc_stat_success(lstcon_trans_stat_t *stat, int inc)
-{
- return inc ? ++stat->trs_rpc_stat[1] : stat->trs_rpc_stat[1];
-}
-
-static inline int
-lstcon_rpc_stat_failure(lstcon_trans_stat_t *stat, int inc)
-{
- return inc ? ++stat->trs_rpc_stat[2] : stat->trs_rpc_stat[2];
-}
-
-static inline int
-lstcon_sesop_stat_success(lstcon_trans_stat_t *stat, int inc)
-{
- return inc ? ++stat->trs_fwk_stat[0] : stat->trs_fwk_stat[0];
-}
-
-static inline int
-lstcon_sesop_stat_failure(lstcon_trans_stat_t *stat, int inc)
-{
- return inc ? ++stat->trs_fwk_stat[1] : stat->trs_fwk_stat[1];
-}
-
-static inline int
-lstcon_sesqry_stat_active(lstcon_trans_stat_t *stat, int inc)
-{
- return inc ? ++stat->trs_fwk_stat[0] : stat->trs_fwk_stat[0];
-}
-
-static inline int
-lstcon_sesqry_stat_busy(lstcon_trans_stat_t *stat, int inc)
-{
- return inc ? ++stat->trs_fwk_stat[1] : stat->trs_fwk_stat[1];
-}
-
-static inline int
-lstcon_sesqry_stat_unknown(lstcon_trans_stat_t *stat, int inc)
-{
- return inc ? ++stat->trs_fwk_stat[2] : stat->trs_fwk_stat[2];
-}
-
-static inline int
-lstcon_tsbop_stat_success(lstcon_trans_stat_t *stat, int inc)
-{
- return inc ? ++stat->trs_fwk_stat[0] : stat->trs_fwk_stat[0];
-}
-
-static inline int
-lstcon_tsbop_stat_failure(lstcon_trans_stat_t *stat, int inc)
-{
- return inc ? ++stat->trs_fwk_stat[1] : stat->trs_fwk_stat[1];
-}
-
-static inline int
-lstcon_tsbqry_stat_idle(lstcon_trans_stat_t *stat, int inc)
-{
- return inc ? ++stat->trs_fwk_stat[0] : stat->trs_fwk_stat[0];
-}
-
-static inline int
-lstcon_tsbqry_stat_run(lstcon_trans_stat_t *stat, int inc)
-{
- return inc ? ++stat->trs_fwk_stat[1] : stat->trs_fwk_stat[1];
-}
-
-static inline int
-lstcon_tsbqry_stat_failure(lstcon_trans_stat_t *stat, int inc)
-{
- return inc ? ++stat->trs_fwk_stat[2] : stat->trs_fwk_stat[2];
-}
-
-static inline int
-lstcon_statqry_stat_success(lstcon_trans_stat_t *stat, int inc)
-{
- return inc ? ++stat->trs_fwk_stat[0] : stat->trs_fwk_stat[0];
-}
-
-static inline int
-lstcon_statqry_stat_failure(lstcon_trans_stat_t *stat, int inc)
-{
- return inc ? ++stat->trs_fwk_stat[1] : stat->trs_fwk_stat[1];
-}
-
-/* create a session */
-typedef struct {
- int lstio_ses_key; /* IN: local key */
- int lstio_ses_timeout; /* IN: session timeout */
- int lstio_ses_force; /* IN: force create ? */
- /** IN: session features */
- unsigned lstio_ses_feats;
- lst_sid_t *lstio_ses_idp; /* OUT: session id */
- int lstio_ses_nmlen; /* IN: name length */
- char *lstio_ses_namep; /* IN: session name */
-} lstio_session_new_args_t;
-
-/* query current session */
-typedef struct {
- lst_sid_t *lstio_ses_idp; /* OUT: session id */
- int *lstio_ses_keyp; /* OUT: local key */
- /** OUT: session features */
- unsigned *lstio_ses_featp;
- lstcon_ndlist_ent_t *lstio_ses_ndinfo; /* OUT: */
- int lstio_ses_nmlen; /* IN: name length */
- char *lstio_ses_namep; /* OUT: session name */
-} lstio_session_info_args_t;
-
-/* delete a session */
-typedef struct {
- int lstio_ses_key; /* IN: session key */
-} lstio_session_end_args_t;
-
-#define LST_OPC_SESSION 1
-#define LST_OPC_GROUP 2
-#define LST_OPC_NODES 3
-#define LST_OPC_BATCHCLI 4
-#define LST_OPC_BATCHSRV 5
-
-typedef struct {
- int lstio_dbg_key; /* IN: session key */
- int lstio_dbg_type; /* IN: debug
- session|batch|
- group|nodes
- list */
- int lstio_dbg_flags; /* IN: reserved debug
- flags */
- int lstio_dbg_timeout; /* IN: timeout of
- debug */
- int lstio_dbg_nmlen; /* IN: len of name */
- char *lstio_dbg_namep; /* IN: name of
- group|batch */
- int lstio_dbg_count; /* IN: # of test nodes
- to debug */
- lnet_process_id_t *lstio_dbg_idsp; /* IN: id of test
- nodes */
- struct list_head *lstio_dbg_resultp; /* OUT: list head of
- result buffer */
-} lstio_debug_args_t;
-
-typedef struct {
- int lstio_grp_key; /* IN: session key */
- int lstio_grp_nmlen; /* IN: name length */
- char *lstio_grp_namep; /* IN: group name */
-} lstio_group_add_args_t;
-
-typedef struct {
- int lstio_grp_key; /* IN: session key */
- int lstio_grp_nmlen; /* IN: name length */
- char *lstio_grp_namep; /* IN: group name */
-} lstio_group_del_args_t;
-
-#define LST_GROUP_CLEAN 1 /* remove inactive nodes in the group */
-#define LST_GROUP_REFRESH 2 /* refresh inactive nodes
- * in the group */
-#define LST_GROUP_RMND 3 /* delete nodes from the group */
-
-typedef struct {
- int lstio_grp_key; /* IN: session key */
- int lstio_grp_opc; /* IN: OPC */
- int lstio_grp_args; /* IN: arguments */
- int lstio_grp_nmlen; /* IN: name length */
- char *lstio_grp_namep; /* IN: group name */
- int lstio_grp_count; /* IN: # of nodes id */
- lnet_process_id_t *lstio_grp_idsp; /* IN: array of nodes */
- struct list_head *lstio_grp_resultp; /* OUT: list head of
- result buffer */
-} lstio_group_update_args_t;
-
-typedef struct {
- int lstio_grp_key; /* IN: session key */
- int lstio_grp_nmlen; /* IN: name length */
- char *lstio_grp_namep; /* IN: group name */
- int lstio_grp_count; /* IN: # of nodes */
- /** OUT: session features */
- unsigned *lstio_grp_featp;
- lnet_process_id_t *lstio_grp_idsp; /* IN: nodes */
- struct list_head *lstio_grp_resultp; /* OUT: list head of
- result buffer */
-} lstio_group_nodes_args_t;
-
-typedef struct {
- int lstio_grp_key; /* IN: session key */
- int lstio_grp_idx; /* IN: group idx */
- int lstio_grp_nmlen; /* IN: name len */
- char *lstio_grp_namep; /* OUT: name */
-} lstio_group_list_args_t;
-
-typedef struct {
- int lstio_grp_key; /* IN: session key */
- int lstio_grp_nmlen; /* IN: name len */
- char *lstio_grp_namep; /* IN: name */
- lstcon_ndlist_ent_t *lstio_grp_entp; /* OUT: description of
- group */
- int *lstio_grp_idxp; /* IN/OUT: node index */
- int *lstio_grp_ndentp; /* IN/OUT: # of nodent */
- lstcon_node_ent_t *lstio_grp_dentsp; /* OUT: nodent array */
-} lstio_group_info_args_t;
-
-#define LST_DEFAULT_BATCH "batch" /* default batch name */
-
-typedef struct {
- int lstio_bat_key; /* IN: session key */
- int lstio_bat_nmlen; /* IN: name length */
- char *lstio_bat_namep; /* IN: batch name */
-} lstio_batch_add_args_t;
-
-typedef struct {
- int lstio_bat_key; /* IN: session key */
- int lstio_bat_nmlen; /* IN: name length */
- char *lstio_bat_namep; /* IN: batch name */
-} lstio_batch_del_args_t;
-
-typedef struct {
- int lstio_bat_key; /* IN: session key */
- int lstio_bat_timeout; /* IN: timeout for
- the batch */
- int lstio_bat_nmlen; /* IN: name length */
- char *lstio_bat_namep; /* IN: batch name */
- struct list_head *lstio_bat_resultp; /* OUT: list head of
- result buffer */
-} lstio_batch_run_args_t;
-
-typedef struct {
- int lstio_bat_key; /* IN: session key */
- int lstio_bat_force; /* IN: abort unfinished
- test RPC */
- int lstio_bat_nmlen; /* IN: name length */
- char *lstio_bat_namep; /* IN: batch name */
- struct list_head *lstio_bat_resultp; /* OUT: list head of
- result buffer */
-} lstio_batch_stop_args_t;
-
-typedef struct {
- int lstio_bat_key; /* IN: session key */
- int lstio_bat_testidx; /* IN: test index */
- int lstio_bat_client; /* IN: we testing
- client? */
- int lstio_bat_timeout; /* IN: timeout for
- waiting */
- int lstio_bat_nmlen; /* IN: name length */
- char *lstio_bat_namep; /* IN: batch name */
- struct list_head *lstio_bat_resultp; /* OUT: list head of
- result buffer */
-} lstio_batch_query_args_t;
-
-typedef struct {
- int lstio_bat_key; /* IN: session key */
- int lstio_bat_idx; /* IN: index */
- int lstio_bat_nmlen; /* IN: name length */
- char *lstio_bat_namep; /* IN: batch name */
-} lstio_batch_list_args_t;
-
-typedef struct {
- int lstio_bat_key; /* IN: session key */
- int lstio_bat_nmlen; /* IN: name length */
- char *lstio_bat_namep; /* IN: name */
- int lstio_bat_server; /* IN: query server
- or not */
- int lstio_bat_testidx; /* IN: test index */
- lstcon_test_batch_ent_t *lstio_bat_entp; /* OUT: batch ent */
-
- int *lstio_bat_idxp; /* IN/OUT: index of node */
- int *lstio_bat_ndentp; /* IN/OUT: # of nodent */
- lstcon_node_ent_t *lstio_bat_dentsp; /* array of nodent */
-} lstio_batch_info_args_t;
-
-/* add stat in session */
-typedef struct {
- int lstio_sta_key; /* IN: session key */
- int lstio_sta_timeout; /* IN: timeout for
- stat request */
- int lstio_sta_nmlen; /* IN: group name
- length */
- char *lstio_sta_namep; /* IN: group name */
- int lstio_sta_count; /* IN: # of pid */
- lnet_process_id_t *lstio_sta_idsp; /* IN: pid */
- struct list_head *lstio_sta_resultp; /* OUT: list head of
- result buffer */
-} lstio_stat_args_t;
-
-typedef enum {
- LST_TEST_BULK = 1,
- LST_TEST_PING = 2
-} lst_test_type_t;
-
-/* create a test in a batch */
-#define LST_MAX_CONCUR 1024 /* Max concurrency of test */
-
-typedef struct {
- int lstio_tes_key; /* IN: session key */
- int lstio_tes_bat_nmlen; /* IN: batch name len */
- char *lstio_tes_bat_name; /* IN: batch name */
- int lstio_tes_type; /* IN: test type */
- int lstio_tes_oneside; /* IN: one sided test */
- int lstio_tes_loop; /* IN: loop count */
- int lstio_tes_concur; /* IN: concurrency */
-
- int lstio_tes_dist; /* IN: node distribution in
- destination groups */
- int lstio_tes_span; /* IN: node span in
- destination groups */
- int lstio_tes_sgrp_nmlen; /* IN: source group
- name length */
- char *lstio_tes_sgrp_name; /* IN: group name */
- int lstio_tes_dgrp_nmlen; /* IN: destination group
- name length */
- char *lstio_tes_dgrp_name; /* IN: group name */
-
- int lstio_tes_param_len; /* IN: param buffer len */
- void *lstio_tes_param; /* IN: parameter for specified
- test:
- lstio_bulk_param_t,
- lstio_ping_param_t,
- ... more */
- int *lstio_tes_retp; /* OUT: private returned
- value */
- struct list_head *lstio_tes_resultp; /* OUT: list head of
- result buffer */
-} lstio_test_args_t;
-
-typedef enum {
- LST_BRW_READ = 1,
- LST_BRW_WRITE = 2
-} lst_brw_type_t;
-
-typedef enum {
- LST_BRW_CHECK_NONE = 1,
- LST_BRW_CHECK_SIMPLE = 2,
- LST_BRW_CHECK_FULL = 3
-} lst_brw_flags_t;
-
-typedef struct {
- int blk_opc; /* bulk operation code */
- int blk_size; /* size (bytes) */
- int blk_time; /* time of running the test*/
- int blk_flags; /* reserved flags */
-} lst_test_bulk_param_t;
-
-typedef struct {
- int png_size; /* size of ping message */
- int png_time; /* time */
- int png_loop; /* loop */
- int png_flags; /* reserved flags */
-} lst_test_ping_param_t;
-
-typedef struct {
- __u32 errors;
- __u32 rpcs_sent;
- __u32 rpcs_rcvd;
- __u32 rpcs_dropped;
- __u32 rpcs_expired;
- __u64 bulk_get;
- __u64 bulk_put;
-} WIRE_ATTR srpc_counters_t;
-
-typedef struct {
- /** milliseconds since current session started */
- __u32 running_ms;
- __u32 active_batches;
- __u32 zombie_sessions;
- __u32 brw_errors;
- __u32 ping_errors;
-} WIRE_ATTR sfw_counters_t;
-
-#endif
diff --git a/drivers/staging/lustre/include/linux/lnet/nidstr.h b/drivers/staging/lustre/include/linux/lnet/nidstr.h
deleted file mode 100644
index a627be9fcdd0..000000000000
--- a/drivers/staging/lustre/include/linux/lnet/nidstr.h
+++ /dev/null
@@ -1,77 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011 - 2015, Intel Corporation.
- */
-#ifndef _LNET_NIDSTRINGS_H
-#define _LNET_NIDSTRINGS_H
-
-#include "types.h"
-
-/**
- * Lustre Network Driver types.
- */
-enum {
- /* Only add to these values (i.e. don't ever change or redefine them):
- * network addresses depend on them... */
- QSWLND = 1,
- SOCKLND = 2,
- GMLND = 3,
- PTLLND = 4,
- O2IBLND = 5,
- CIBLND = 6,
- OPENIBLND = 7,
- IIBLND = 8,
- LOLND = 9,
- RALND = 10,
- VIBLND = 11,
- MXLND = 12,
- GNILND = 13,
- GNIIPLND = 14,
-};
-
-struct list_head;
-
-#define LNET_NIDSTR_COUNT 1024 /* # of nidstrings */
-#define LNET_NIDSTR_SIZE 32 /* size of each one (see below for usage) */
-
-int libcfs_isknown_lnd(int type);
-char *libcfs_lnd2modname(int type);
-char *libcfs_lnd2str(int type);
-int libcfs_str2lnd(const char *str);
-char *libcfs_net2str(__u32 net);
-char *libcfs_nid2str(lnet_nid_t nid);
-__u32 libcfs_str2net(const char *str);
-lnet_nid_t libcfs_str2nid(const char *str);
-int libcfs_str2anynid(lnet_nid_t *nid, const char *str);
-char *libcfs_id2str(lnet_process_id_t id);
-void cfs_free_nidlist(struct list_head *list);
-int cfs_parse_nidlist(char *str, int len, struct list_head *list);
-int cfs_match_nid(lnet_nid_t nid, struct list_head *list);
-bool cfs_nidrange_is_contiguous(struct list_head *nidlist);
-void cfs_nidrange_find_min_max(struct list_head *nidlist, char *min_nid,
- char *max_nid, size_t nidstr_length);
-
-#endif /* _LNET_NIDSTRINGS_H */
diff --git a/drivers/staging/lustre/include/linux/lnet/socklnd.h b/drivers/staging/lustre/include/linux/lnet/socklnd.h
deleted file mode 100644
index 599c9f6628fb..000000000000
--- a/drivers/staging/lustre/include/linux/lnet/socklnd.h
+++ /dev/null
@@ -1,99 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2012 - 2015, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Seagate, Inc.
- *
- * lnet/include/lnet/socklnd.h
- */
-#ifndef __LNET_LNET_SOCKLND_H__
-#define __LNET_LNET_SOCKLND_H__
-
-#include "types.h"
-
-#define SOCKLND_CONN_NONE (-1)
-#define SOCKLND_CONN_ANY 0
-#define SOCKLND_CONN_CONTROL 1
-#define SOCKLND_CONN_BULK_IN 2
-#define SOCKLND_CONN_BULK_OUT 3
-#define SOCKLND_CONN_NTYPES 4
-
-#define SOCKLND_CONN_ACK SOCKLND_CONN_BULK_IN
-
-typedef struct {
- __u32 kshm_magic; /* magic number of socklnd message */
- __u32 kshm_version; /* version of socklnd message */
- lnet_nid_t kshm_src_nid; /* sender's nid */
- lnet_nid_t kshm_dst_nid; /* destination nid */
- lnet_pid_t kshm_src_pid; /* sender's pid */
- lnet_pid_t kshm_dst_pid; /* destination pid */
- __u64 kshm_src_incarnation; /* sender's incarnation */
- __u64 kshm_dst_incarnation; /* destination's incarnation */
- __u32 kshm_ctype; /* connection type */
- __u32 kshm_nips; /* # IP addrs */
- __u32 kshm_ips[0]; /* IP addrs */
-} WIRE_ATTR ksock_hello_msg_t;
-
-typedef struct {
- lnet_hdr_t ksnm_hdr; /* lnet hdr */
-
- /*
- * ksnm_payload is removed because of winnt compiler's limitation:
- * zero-sized array can only be placed at the tail of [nested]
- * structure definitions. lnet payload will be stored just after
- * the body of structure ksock_lnet_msg_t
- */
-} WIRE_ATTR ksock_lnet_msg_t;
-
-typedef struct {
- __u32 ksm_type; /* type of socklnd message */
- __u32 ksm_csum; /* checksum if != 0 */
- __u64 ksm_zc_cookies[2]; /* Zero-Copy request/ACK cookie */
- union {
- ksock_lnet_msg_t lnetmsg;/* lnet message, it's empty if
- * it's NOOP */
- } WIRE_ATTR ksm_u;
-} WIRE_ATTR ksock_msg_t;
-
-static inline void
-socklnd_init_msg(ksock_msg_t *msg, int type)
-{
- msg->ksm_csum = 0;
- msg->ksm_type = type;
- msg->ksm_zc_cookies[0] = msg->ksm_zc_cookies[1] = 0;
-}
-
-#define KSOCK_MSG_NOOP 0xC0 /* ksm_u empty */
-#define KSOCK_MSG_LNET 0xC1 /* lnet msg */
-
-/* We need to know this number to parse hello msg from ksocklnd in
- * other LND (usocklnd, for example) */
-#define KSOCK_PROTO_V2 2
-#define KSOCK_PROTO_V3 3
-
-#endif
diff --git a/drivers/staging/lustre/include/linux/lnet/types.h b/drivers/staging/lustre/include/linux/lnet/types.h
deleted file mode 100644
index 11630180c5e7..000000000000
--- a/drivers/staging/lustre/include/linux/lnet/types.h
+++ /dev/null
@@ -1,662 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2012 - 2015, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Seagate, Inc.
- */
-
-#ifndef __LNET_TYPES_H__
-#define __LNET_TYPES_H__
-
-#include <linux/types.h>
-
-/** \addtogroup lnet
- * @{ */
-
-/** \addtogroup lnet_addr
- * @{ */
-
-/** Portal reserved for LNet's own use.
- * \see lustre/include/lustre/lustre_idl.h for Lustre portal assignments.
- */
-#define LNET_RESERVED_PORTAL 0
-
-/**
- * Address of an end-point in an LNet network.
- *
- * A node can have multiple end-points and hence multiple addresses.
- * An LNet network can be a simple network (e.g. tcp0) or a network of
- * LNet networks connected by LNet routers. Therefore an end-point address
- * has two parts: network ID, and address within a network.
- *
- * \see LNET_NIDNET, LNET_NIDADDR, and LNET_MKNID.
- */
-typedef __u64 lnet_nid_t;
-/**
- * ID of a process in a node. Shortened as PID to distinguish from
- * lnet_process_id_t, the global process ID.
- */
-typedef __u32 lnet_pid_t;
-
-/** wildcard NID that matches any end-point address */
-#define LNET_NID_ANY ((lnet_nid_t) -1)
-/** wildcard PID that matches any lnet_pid_t */
-#define LNET_PID_ANY ((lnet_pid_t) -1)
-
-#define LNET_PID_RESERVED 0xf0000000 /* reserved bits in PID */
-#define LNET_PID_USERFLAG 0x80000000 /* set in userspace peers */
-#define LNET_PID_LUSTRE 12345
-
-#define LNET_TIME_FOREVER (-1)
-
-/* how an LNET NID encodes net:address */
-/** extract the address part of an lnet_nid_t */
-
-static inline __u32 LNET_NIDADDR(lnet_nid_t nid)
-{
- return nid & 0xffffffff;
-}
-
-static inline __u32 LNET_NIDNET(lnet_nid_t nid)
-{
- return (nid >> 32) & 0xffffffff;
-}
-
-static inline lnet_nid_t LNET_MKNID(__u32 net, __u32 addr)
-{
- return (((__u64)net) << 32) | addr;
-}
-
-static inline __u32 LNET_NETNUM(__u32 net)
-{
- return net & 0xffff;
-}
-
-static inline __u32 LNET_NETTYP(__u32 net)
-{
- return (net >> 16) & 0xffff;
-}
-
-static inline __u32 LNET_MKNET(__u32 type, __u32 num)
-{
- return (type << 16) | num;
-}
-
-#define WIRE_ATTR __packed
-
-/* Packed version of lnet_process_id_t to transfer via network */
-typedef struct {
- /* node id / process id */
- lnet_nid_t nid;
- lnet_pid_t pid;
-} WIRE_ATTR lnet_process_id_packed_t;
-
-/* The wire handle's interface cookie only matches one network interface in
- * one epoch (i.e. new cookie when the interface restarts or the node
- * reboots). The object cookie only matches one object on that interface
- * during that object's lifetime (i.e. no cookie re-use). */
-typedef struct {
- __u64 wh_interface_cookie;
- __u64 wh_object_cookie;
-} WIRE_ATTR lnet_handle_wire_t;
-
-typedef enum {
- LNET_MSG_ACK = 0,
- LNET_MSG_PUT,
- LNET_MSG_GET,
- LNET_MSG_REPLY,
- LNET_MSG_HELLO,
-} lnet_msg_type_t;
-
-/* The variant fields of the portals message header are aligned on an 8
- * byte boundary in the message header. Note that all types used in these
- * wire structs MUST be fixed size and the smaller types are placed at the
- * end. */
-typedef struct lnet_ack {
- lnet_handle_wire_t dst_wmd;
- __u64 match_bits;
- __u32 mlength;
-} WIRE_ATTR lnet_ack_t;
-
-typedef struct lnet_put {
- lnet_handle_wire_t ack_wmd;
- __u64 match_bits;
- __u64 hdr_data;
- __u32 ptl_index;
- __u32 offset;
-} WIRE_ATTR lnet_put_t;
-
-typedef struct lnet_get {
- lnet_handle_wire_t return_wmd;
- __u64 match_bits;
- __u32 ptl_index;
- __u32 src_offset;
- __u32 sink_length;
-} WIRE_ATTR lnet_get_t;
-
-typedef struct lnet_reply {
- lnet_handle_wire_t dst_wmd;
-} WIRE_ATTR lnet_reply_t;
-
-typedef struct lnet_hello {
- __u64 incarnation;
- __u32 type;
-} WIRE_ATTR lnet_hello_t;
-
-typedef struct {
- lnet_nid_t dest_nid;
- lnet_nid_t src_nid;
- lnet_pid_t dest_pid;
- lnet_pid_t src_pid;
- __u32 type; /* lnet_msg_type_t */
- __u32 payload_length; /* payload data to follow */
- /*<------__u64 aligned------->*/
- union {
- lnet_ack_t ack;
- lnet_put_t put;
- lnet_get_t get;
- lnet_reply_t reply;
- lnet_hello_t hello;
- } msg;
-} WIRE_ATTR lnet_hdr_t;
-
-/* A HELLO message contains a magic number and protocol version
- * code in the header's dest_nid, the peer's NID in the src_nid, and
- * LNET_MSG_HELLO in the type field. All other common fields are zero
- * (including payload_size; i.e. no payload).
- * This is for use by byte-stream LNDs (e.g. TCP/IP) to check the peer is
- * running the same protocol and to find out its NID. These LNDs should
- * exchange HELLO messages when a connection is first established. Individual
- * LNDs can put whatever else they fancy in lnet_hdr_t::msg.
- */
-typedef struct {
- __u32 magic; /* LNET_PROTO_TCP_MAGIC */
- __u16 version_major; /* increment on incompatible change */
- __u16 version_minor; /* increment on compatible change */
-} WIRE_ATTR lnet_magicversion_t;
-
-/* PROTO MAGIC for LNDs */
-#define LNET_PROTO_IB_MAGIC 0x0be91b91
-#define LNET_PROTO_GNI_MAGIC 0xb00fbabe /* ask Kim */
-#define LNET_PROTO_TCP_MAGIC 0xeebc0ded
-#define LNET_PROTO_ACCEPTOR_MAGIC 0xacce7100
-#define LNET_PROTO_PING_MAGIC 0x70696E67 /* 'ping' */
-
-/* Placeholder for a future "unified" protocol across all LNDs */
-/* Current LNDs that receive a request with this magic will respond with a
- * "stub" reply using their current protocol */
-#define LNET_PROTO_MAGIC 0x45726963 /* ! */
-
-#define LNET_PROTO_TCP_VERSION_MAJOR 1
-#define LNET_PROTO_TCP_VERSION_MINOR 0
-
-/* Acceptor connection request */
-typedef struct {
- __u32 acr_magic; /* PTL_ACCEPTOR_PROTO_MAGIC */
- __u32 acr_version; /* protocol version */
- __u64 acr_nid; /* target NID */
-} WIRE_ATTR lnet_acceptor_connreq_t;
-
-#define LNET_PROTO_ACCEPTOR_VERSION 1
-
-typedef struct {
- lnet_nid_t ns_nid;
- __u32 ns_status;
- __u32 ns_unused;
-} WIRE_ATTR lnet_ni_status_t;
-
-typedef struct {
- __u32 pi_magic;
- __u32 pi_features;
- lnet_pid_t pi_pid;
- __u32 pi_nnis;
- lnet_ni_status_t pi_ni[0];
-} WIRE_ATTR lnet_ping_info_t;
-
-typedef struct lnet_counters {
- __u32 msgs_alloc;
- __u32 msgs_max;
- __u32 errors;
- __u32 send_count;
- __u32 recv_count;
- __u32 route_count;
- __u32 drop_count;
- __u64 send_length;
- __u64 recv_length;
- __u64 route_length;
- __u64 drop_length;
-} WIRE_ATTR lnet_counters_t;
-
-#define LNET_NI_STATUS_UP 0x15aac0de
-#define LNET_NI_STATUS_DOWN 0xdeadface
-#define LNET_NI_STATUS_INVALID 0x00000000
-
-#define LNET_MAX_INTERFACES 16
-
-/*
- * Objects maintained by the LNet are accessed through handles. Handle types
- * have names of the form lnet_handle_xx_t, where xx is one of the two letter
- * object type codes ('eq' for event queue, 'md' for memory descriptor, and
- * 'me' for match entry).
- * Each type of object is given a unique handle type to enhance type checking.
- * The type lnet_handle_any_t can be used when a generic handle is needed.
- * Every handle value can be converted into a value of type lnet_handle_any_t
- * without loss of information.
- */
-typedef struct {
- __u64 cookie;
-} lnet_handle_any_t;
-
-typedef lnet_handle_any_t lnet_handle_eq_t;
-typedef lnet_handle_any_t lnet_handle_md_t;
-typedef lnet_handle_any_t lnet_handle_me_t;
-
-#define LNET_WIRE_HANDLE_COOKIE_NONE (-1)
-
-/**
- * Invalidate handle \a h.
- */
-static inline void LNetInvalidateHandle(lnet_handle_any_t *h)
-{
- h->cookie = LNET_WIRE_HANDLE_COOKIE_NONE;
-}
-
-/**
- * Compare handles \a h1 and \a h2.
- *
- * \return 1 if handles are equal, 0 if otherwise.
- */
-static inline int LNetHandleIsEqual(lnet_handle_any_t h1, lnet_handle_any_t h2)
-{
- return h1.cookie == h2.cookie;
-}
-
-/**
- * Check whether handle \a h is invalid.
- *
- * \return 1 if handle is invalid, 0 if valid.
- */
-static inline int LNetHandleIsInvalid(lnet_handle_any_t h)
-{
- return h.cookie == LNET_WIRE_HANDLE_COOKIE_NONE;
-}
-
-/**
- * Global process ID.
- */
-typedef struct {
- /** node id */
- lnet_nid_t nid;
- /** process id */
- lnet_pid_t pid;
-} lnet_process_id_t;
-/** @} lnet_addr */
-
-/** \addtogroup lnet_me
- * @{ */
-
-/**
- * Specifies whether the match entry or memory descriptor should be unlinked
- * automatically (LNET_UNLINK) or not (LNET_RETAIN).
- */
-typedef enum {
- LNET_RETAIN = 0,
- LNET_UNLINK
-} lnet_unlink_t;
-
-/**
- * Values of the type lnet_ins_pos_t are used to control where a new match
- * entry is inserted. The value LNET_INS_BEFORE is used to insert the new
- * entry before the current entry or before the head of the list. The value
- * LNET_INS_AFTER is used to insert the new entry after the current entry
- * or after the last item in the list.
- */
-typedef enum {
- /** insert ME before current position or head of the list */
- LNET_INS_BEFORE,
- /** insert ME after current position or tail of the list */
- LNET_INS_AFTER,
- /** attach ME at tail of local CPU partition ME list */
- LNET_INS_LOCAL
-} lnet_ins_pos_t;
-
-/** @} lnet_me */
-
-/** \addtogroup lnet_md
- * @{ */
-
-/**
- * Defines the visible parts of a memory descriptor. Values of this type
- * are used to initialize memory descriptors.
- */
-typedef struct {
- /**
- * Specify the memory region associated with the memory descriptor.
- * If the options field has:
- * - LNET_MD_KIOV bit set: The start field points to the starting
- * address of an array of lnet_kiov_t and the length field specifies
- * the number of entries in the array. The length can't be bigger
- * than LNET_MAX_IOV. The lnet_kiov_t is used to describe page-based
- * fragments that are not necessarily mapped in virtual memory.
- * - LNET_MD_IOVEC bit set: The start field points to the starting
- * address of an array of struct iovec and the length field specifies
- * the number of entries in the array. The length can't be bigger
- * than LNET_MAX_IOV. The struct iovec is used to describe fragments
- * that have virtual addresses.
- * - Otherwise: The memory region is contiguous. The start field
- * specifies the starting address for the memory region and the
- * length field specifies its length.
- *
- * When the memory region is fragmented, all fragments but the first
- * one must start on page boundary, and all but the last must end on
- * page boundary.
- */
- void *start;
- unsigned int length;
- /**
- * Specifies the maximum number of operations that can be performed
- * on the memory descriptor. An operation is any action that could
- * possibly generate an event. In the usual case, the threshold value
- * is decremented for each operation on the MD. When the threshold
- * drops to zero, the MD becomes inactive and does not respond to
- * operations. A threshold value of LNET_MD_THRESH_INF indicates that
- * there is no bound on the number of operations that may be applied
- * to a MD.
- */
- int threshold;
- /**
- * Specifies the largest incoming request that the memory descriptor
- * should respond to. When the unused portion of a MD (length -
- * local offset) falls below this value, the MD becomes inactive and
- * does not respond to further operations. This value is only used
- * if the LNET_MD_MAX_SIZE option is set.
- */
- int max_size;
- /**
- * Specifies the behavior of the memory descriptor. A bitwise OR
- * of the following values can be used:
- * - LNET_MD_OP_PUT: The LNet PUT operation is allowed on this MD.
- * - LNET_MD_OP_GET: The LNet GET operation is allowed on this MD.
- * - LNET_MD_MANAGE_REMOTE: The offset used in accessing the memory
- * region is provided by the incoming request. By default, the
- * offset is maintained locally. When maintained locally, the
- * offset is incremented by the length of the request so that
- * the next operation (PUT or GET) will access the next part of
- * the memory region. Note that only one offset variable exists
- * per memory descriptor. If both PUT and GET operations are
- * performed on a memory descriptor, the offset is updated each time.
- * - LNET_MD_TRUNCATE: The length provided in the incoming request can
- * be reduced to match the memory available in the region (determined
- * by subtracting the offset from the length of the memory region).
- * By default, if the length in the incoming operation is greater
- * than the amount of memory available, the operation is rejected.
- * - LNET_MD_ACK_DISABLE: An acknowledgment should not be sent for
- * incoming PUT operations, even if requested. By default,
- * acknowledgments are sent for PUT operations that request an
- * acknowledgment. Acknowledgments are never sent for GET operations.
- * The data sent in the REPLY serves as an implicit acknowledgment.
- * - LNET_MD_KIOV: The start and length fields specify an array of
- * lnet_kiov_t.
- * - LNET_MD_IOVEC: The start and length fields specify an array of
- * struct iovec.
- * - LNET_MD_MAX_SIZE: The max_size field is valid.
- *
- * Note:
- * - LNET_MD_KIOV or LNET_MD_IOVEC allows for a scatter/gather
- * capability for memory descriptors. They can't be both set.
- * - When LNET_MD_MAX_SIZE is set, the total length of the memory
- * region (i.e. sum of all fragment lengths) must not be less than
- * \a max_size.
- */
- unsigned int options;
- /**
- * A user-specified value that is associated with the memory
- * descriptor. The value does not need to be a pointer, but must fit
- * in the space used by a pointer. This value is recorded in events
- * associated with operations on this MD.
- */
- void *user_ptr;
- /**
- * A handle for the event queue used to log the operations performed on
- * the memory region. If this argument is a NULL handle (i.e. nullified
- * by LNetInvalidateHandle()), operations performed on this memory
- * descriptor are not logged.
- */
- lnet_handle_eq_t eq_handle;
-} lnet_md_t;
-
-/* Max Transfer Unit (minimum supported everywhere).
- * CAVEAT EMPTOR, with multinet (i.e. routers forwarding between networks)
- * these limits are system wide and not interface-local. */
-#define LNET_MTU_BITS 20
-#define LNET_MTU (1 << LNET_MTU_BITS)
-
-/** limit on the number of fragments in discontiguous MDs */
-#define LNET_MAX_IOV 256
-
-/**
- * Options for the MD structure. See lnet_md_t::options.
- */
-#define LNET_MD_OP_PUT (1 << 0)
-/** See lnet_md_t::options. */
-#define LNET_MD_OP_GET (1 << 1)
-/** See lnet_md_t::options. */
-#define LNET_MD_MANAGE_REMOTE (1 << 2)
-/* unused (1 << 3) */
-/** See lnet_md_t::options. */
-#define LNET_MD_TRUNCATE (1 << 4)
-/** See lnet_md_t::options. */
-#define LNET_MD_ACK_DISABLE (1 << 5)
-/** See lnet_md_t::options. */
-#define LNET_MD_IOVEC (1 << 6)
-/** See lnet_md_t::options. */
-#define LNET_MD_MAX_SIZE (1 << 7)
-/** See lnet_md_t::options. */
-#define LNET_MD_KIOV (1 << 8)
-
-/* For compatibility with Cray Portals */
-#define LNET_MD_PHYS 0
-
-/** Infinite threshold on MD operations. See lnet_md_t::threshold */
-#define LNET_MD_THRESH_INF (-1)
-
-/* NB lustre portals uses struct iovec internally! */
-typedef struct iovec lnet_md_iovec_t;
-
-/**
- * A page-based fragment of a MD.
- */
-typedef struct {
- /** Pointer to the page where the fragment resides */
- struct page *kiov_page;
- /** Length in bytes of the fragment */
- unsigned int kiov_len;
- /**
- * Starting offset of the fragment within the page. Note that the
- * end of the fragment must not pass the end of the page; i.e.,
- * kiov_len + kiov_offset <= PAGE_CACHE_SIZE.
- */
- unsigned int kiov_offset;
-} lnet_kiov_t;
-/** @} lnet_md */
-
-/** \addtogroup lnet_eq
- * @{ */
-
-/**
- * Six types of events can be logged in an event queue.
- */
-typedef enum {
- /** An incoming GET operation has completed on the MD. */
- LNET_EVENT_GET = 1,
- /**
- * An incoming PUT operation has completed on the MD. The
- * underlying layers will not alter the memory (on behalf of this
- * operation) once this event has been logged.
- */
- LNET_EVENT_PUT,
- /**
- * A REPLY operation has completed. This event is logged after the
- * data (if any) from the REPLY has been written into the MD.
- */
- LNET_EVENT_REPLY,
- /** An acknowledgment has been received. */
- LNET_EVENT_ACK,
- /**
- * An outgoing send (PUT or GET) operation has completed. This event
- * is logged after the entire buffer has been sent and it is safe for
- * the caller to reuse the buffer.
- *
- * Note:
- * - The LNET_EVENT_SEND doesn't guarantee message delivery. It can
- * happen even when the message has not yet been put out on wire.
- * - It's unsafe to assume that in an outgoing GET operation
- * the LNET_EVENT_SEND event would happen before the
- * LNET_EVENT_REPLY event. The same holds for LNET_EVENT_SEND and
- * LNET_EVENT_ACK events in an outgoing PUT operation.
- */
- LNET_EVENT_SEND,
- /**
- * A MD has been unlinked. Note that LNetMDUnlink() does not
- * necessarily trigger an LNET_EVENT_UNLINK event.
- * \see LNetMDUnlink
- */
- LNET_EVENT_UNLINK,
-} lnet_event_kind_t;
-
-#define LNET_SEQ_BASETYPE long
-typedef unsigned LNET_SEQ_BASETYPE lnet_seq_t;
-#define LNET_SEQ_GT(a, b) (((signed LNET_SEQ_BASETYPE)((a) - (b))) > 0)
-
-/**
- * Information about an event on a MD.
- */
-typedef struct {
- /** The identifier (nid, pid) of the target. */
- lnet_process_id_t target;
- /** The identifier (nid, pid) of the initiator. */
- lnet_process_id_t initiator;
- /**
- * The NID of the immediate sender. If the request has been forwarded
- * by routers, this is the NID of the last hop; otherwise it's the
- * same as the initiator.
- */
- lnet_nid_t sender;
- /** Indicates the type of the event. */
- lnet_event_kind_t type;
- /** The portal table index specified in the request */
- unsigned int pt_index;
- /** A copy of the match bits specified in the request. */
- __u64 match_bits;
- /** The length (in bytes) specified in the request. */
- unsigned int rlength;
- /**
- * The length (in bytes) of the data that was manipulated by the
- * operation. For truncated operations, the manipulated length will be
- * the number of bytes specified by the MD (possibly with an offset,
- * see lnet_md_t). For all other operations, the manipulated length
- * will be the length of the requested operation, i.e. rlength.
- */
- unsigned int mlength;
- /**
- * The handle to the MD associated with the event. The handle may be
- * invalid if the MD has been unlinked.
- */
- lnet_handle_md_t md_handle;
- /**
- * A snapshot of the state of the MD immediately after the event has
- * been processed. In particular, the threshold field in md will
- * reflect the value of the threshold after the operation occurred.
- */
- lnet_md_t md;
- /**
- * 64 bits of out-of-band user data. Only valid for LNET_EVENT_PUT.
- * \see LNetPut
- */
- __u64 hdr_data;
- /**
- * Indicates the completion status of the operation. It's 0 for
- * successful operations, otherwise it's an error code.
- */
- int status;
- /**
- * Indicates whether the MD has been unlinked. Note that:
- * - An event with unlinked set is the last event on the MD.
- * - This field is also set for an explicit LNET_EVENT_UNLINK event.
- * \see LNetMDUnlink
- */
- int unlinked;
- /**
- * The displacement (in bytes) into the memory region that the
- * operation used. The offset can be determined by the operation for
- * a remote managed MD or by the local MD.
- * \see lnet_md_t::options
- */
- unsigned int offset;
- /**
- * The sequence number for this event. Sequence numbers are unique
- * to each event.
- */
- volatile lnet_seq_t sequence;
-} lnet_event_t;
-
-/**
- * Event queue handler function type.
- *
- * The EQ handler runs for each event that is deposited into the EQ. The
- * handler is supplied with a pointer to the event that triggered the
- * handler invocation.
- *
- * The handler must not block, must be reentrant, and must not call any LNet
- * API functions. It should return as quickly as possible.
- */
-typedef void (*lnet_eq_handler_t)(lnet_event_t *event);
-#define LNET_EQ_HANDLER_NONE NULL
-/** @} lnet_eq */
-
-/** \addtogroup lnet_data
- * @{ */
-
-/**
- * Specify whether an acknowledgment should be sent by target when the PUT
- * operation completes (i.e., when the data has been written to a MD of the
- * target process).
- *
- * \see lnet_md_t::options for the discussion on LNET_MD_ACK_DISABLE by which
- * acknowledgments can be disabled for a MD.
- */
-typedef enum {
- /** Request an acknowledgment */
- LNET_ACK_REQ,
- /** Request that no acknowledgment should be generated. */
- LNET_NOACK_REQ
-} lnet_ack_req_t;
-/** @} lnet_data */
-
-/** @} lnet */
-#endif
diff --git a/drivers/staging/lustre/lnet/Kconfig b/drivers/staging/lustre/lnet/Kconfig
deleted file mode 100644
index 00850eeb6a8c..000000000000
--- a/drivers/staging/lustre/lnet/Kconfig
+++ /dev/null
@@ -1,40 +0,0 @@
-config LNET
- tristate "Lustre networking subsystem"
- depends on LUSTRE_FS
-
-config LNET_MAX_PAYLOAD
- int "Lustre lnet max transfer payload (default 2MB)"
- depends on LUSTRE_FS
- default "1048576"
- help
- This option defines the maximum size of payload in bytes that lnet
- can put into its transport.
-
- If unsure, use default.
-
-config LNET_SELFTEST
- tristate "Lustre networking self testing"
- depends on LNET
- help
- Choose Y here if you want to do lnet self testing. To compile this
- as a module, choose M here: the module will be called lnet_selftest.
-
- To compile this as a kernel modules, choose M here and it will be
- called lnet_selftest.
-
- If unsure, say N.
-
- See also http://wiki.lustre.org/
-
-config LNET_XPRT_IB
- tristate "LNET infiniband support"
- depends on LNET && INFINIBAND && INFINIBAND_ADDR_TRANS
- default LNET && INFINIBAND
- help
- This option allows the LNET users to use infiniband as an
- RDMA-enabled transport.
-
- To compile this as a kernel module, choose M here and it will be
- called ko2iblnd.
-
- If unsure, say N.
diff --git a/drivers/staging/lustre/lnet/Makefile b/drivers/staging/lustre/lnet/Makefile
deleted file mode 100644
index f6f03e304d81..000000000000
--- a/drivers/staging/lustre/lnet/Makefile
+++ /dev/null
@@ -1 +0,0 @@
-obj-$(CONFIG_LNET) += lnet/ klnds/ selftest/
diff --git a/drivers/staging/lustre/lnet/klnds/Makefile b/drivers/staging/lustre/lnet/klnds/Makefile
deleted file mode 100644
index c23e4f67f837..000000000000
--- a/drivers/staging/lustre/lnet/klnds/Makefile
+++ /dev/null
@@ -1 +0,0 @@
-obj-$(CONFIG_LNET) += o2iblnd/ socklnd/
diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/Makefile b/drivers/staging/lustre/lnet/klnds/o2iblnd/Makefile
deleted file mode 100644
index e0a7aa72b7d5..000000000000
--- a/drivers/staging/lustre/lnet/klnds/o2iblnd/Makefile
+++ /dev/null
@@ -1,2 +0,0 @@
-obj-$(CONFIG_LNET_XPRT_IB) += ko2iblnd.o
-ko2iblnd-y := o2iblnd.o o2iblnd_cb.o o2iblnd_modparams.o
diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c
deleted file mode 100644
index c2cc4e446b22..000000000000
--- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c
+++ /dev/null
@@ -1,2873 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * lnet/klnds/o2iblnd/o2iblnd.c
- *
- * Author: Eric Barton <eric@bartonsoftware.com>
- */
-
-#include <asm/div64.h>
-#include <asm/page.h>
-#include "o2iblnd.h"
-
-static lnd_t the_o2iblnd = {
- .lnd_type = O2IBLND,
- .lnd_startup = kiblnd_startup,
- .lnd_shutdown = kiblnd_shutdown,
- .lnd_ctl = kiblnd_ctl,
- .lnd_query = kiblnd_query,
- .lnd_send = kiblnd_send,
- .lnd_recv = kiblnd_recv,
-};
-
-kib_data_t kiblnd_data;
-
-static __u32 kiblnd_cksum(void *ptr, int nob)
-{
- char *c = ptr;
- __u32 sum = 0;
-
- while (nob-- > 0)
- sum = ((sum << 1) | (sum >> 31)) + *c++;
-
- /* ensure I don't return 0 (== no checksum) */
- return (sum == 0) ? 1 : sum;
-}
-
-static char *kiblnd_msgtype2str(int type)
-{
- switch (type) {
- case IBLND_MSG_CONNREQ:
- return "CONNREQ";
-
- case IBLND_MSG_CONNACK:
- return "CONNACK";
-
- case IBLND_MSG_NOOP:
- return "NOOP";
-
- case IBLND_MSG_IMMEDIATE:
- return "IMMEDIATE";
-
- case IBLND_MSG_PUT_REQ:
- return "PUT_REQ";
-
- case IBLND_MSG_PUT_NAK:
- return "PUT_NAK";
-
- case IBLND_MSG_PUT_ACK:
- return "PUT_ACK";
-
- case IBLND_MSG_PUT_DONE:
- return "PUT_DONE";
-
- case IBLND_MSG_GET_REQ:
- return "GET_REQ";
-
- case IBLND_MSG_GET_DONE:
- return "GET_DONE";
-
- default:
- return "???";
- }
-}
-
-static int kiblnd_msgtype2size(int type)
-{
- const int hdr_size = offsetof(kib_msg_t, ibm_u);
-
- switch (type) {
- case IBLND_MSG_CONNREQ:
- case IBLND_MSG_CONNACK:
- return hdr_size + sizeof(kib_connparams_t);
-
- case IBLND_MSG_NOOP:
- return hdr_size;
-
- case IBLND_MSG_IMMEDIATE:
- return offsetof(kib_msg_t, ibm_u.immediate.ibim_payload[0]);
-
- case IBLND_MSG_PUT_REQ:
- return hdr_size + sizeof(kib_putreq_msg_t);
-
- case IBLND_MSG_PUT_ACK:
- return hdr_size + sizeof(kib_putack_msg_t);
-
- case IBLND_MSG_GET_REQ:
- return hdr_size + sizeof(kib_get_msg_t);
-
- case IBLND_MSG_PUT_NAK:
- case IBLND_MSG_PUT_DONE:
- case IBLND_MSG_GET_DONE:
- return hdr_size + sizeof(kib_completion_msg_t);
- default:
- return -1;
- }
-}
-
-static int kiblnd_unpack_rd(kib_msg_t *msg, int flip)
-{
- kib_rdma_desc_t *rd;
- int nob;
- int n;
- int i;
-
- LASSERT(msg->ibm_type == IBLND_MSG_GET_REQ ||
- msg->ibm_type == IBLND_MSG_PUT_ACK);
-
- rd = msg->ibm_type == IBLND_MSG_GET_REQ ?
- &msg->ibm_u.get.ibgm_rd :
- &msg->ibm_u.putack.ibpam_rd;
-
- if (flip) {
- __swab32s(&rd->rd_key);
- __swab32s(&rd->rd_nfrags);
- }
-
- n = rd->rd_nfrags;
-
- if (n <= 0 || n > IBLND_MAX_RDMA_FRAGS) {
- CERROR("Bad nfrags: %d, should be 0 < n <= %d\n",
- n, IBLND_MAX_RDMA_FRAGS);
- return 1;
- }
-
- nob = offsetof(kib_msg_t, ibm_u) +
- kiblnd_rd_msg_size(rd, msg->ibm_type, n);
-
- if (msg->ibm_nob < nob) {
- CERROR("Short %s: %d(%d)\n",
- kiblnd_msgtype2str(msg->ibm_type), msg->ibm_nob, nob);
- return 1;
- }
-
- if (!flip)
- return 0;
-
- for (i = 0; i < n; i++) {
- __swab32s(&rd->rd_frags[i].rf_nob);
- __swab64s(&rd->rd_frags[i].rf_addr);
- }
-
- return 0;
-}
-
-void kiblnd_pack_msg(lnet_ni_t *ni, kib_msg_t *msg, int version,
- int credits, lnet_nid_t dstnid, __u64 dststamp)
-{
- kib_net_t *net = ni->ni_data;
-
- /* CAVEAT EMPTOR! all message fields not set here should have been
- * initialised previously. */
- msg->ibm_magic = IBLND_MSG_MAGIC;
- msg->ibm_version = version;
- /* ibm_type */
- msg->ibm_credits = credits;
- /* ibm_nob */
- msg->ibm_cksum = 0;
- msg->ibm_srcnid = ni->ni_nid;
- msg->ibm_srcstamp = net->ibn_incarnation;
- msg->ibm_dstnid = dstnid;
- msg->ibm_dststamp = dststamp;
-
- if (*kiblnd_tunables.kib_cksum) {
- /* NB ibm_cksum zero while computing cksum */
- msg->ibm_cksum = kiblnd_cksum(msg, msg->ibm_nob);
- }
-}
-
-int kiblnd_unpack_msg(kib_msg_t *msg, int nob)
-{
- const int hdr_size = offsetof(kib_msg_t, ibm_u);
- __u32 msg_cksum;
- __u16 version;
- int msg_nob;
- int flip;
-
- /* 6 bytes are enough to have received magic + version */
- if (nob < 6) {
- CERROR("Short message: %d\n", nob);
- return -EPROTO;
- }
-
- if (msg->ibm_magic == IBLND_MSG_MAGIC) {
- flip = 0;
- } else if (msg->ibm_magic == __swab32(IBLND_MSG_MAGIC)) {
- flip = 1;
- } else {
- CERROR("Bad magic: %08x\n", msg->ibm_magic);
- return -EPROTO;
- }
-
- version = flip ? __swab16(msg->ibm_version) : msg->ibm_version;
- if (version != IBLND_MSG_VERSION &&
- version != IBLND_MSG_VERSION_1) {
- CERROR("Bad version: %x\n", version);
- return -EPROTO;
- }
-
- if (nob < hdr_size) {
- CERROR("Short message: %d\n", nob);
- return -EPROTO;
- }
-
- msg_nob = flip ? __swab32(msg->ibm_nob) : msg->ibm_nob;
- if (msg_nob > nob) {
- CERROR("Short message: got %d, wanted %d\n", nob, msg_nob);
- return -EPROTO;
- }
-
- /* checksum must be computed with ibm_cksum zero and BEFORE anything
- * gets flipped */
- msg_cksum = flip ? __swab32(msg->ibm_cksum) : msg->ibm_cksum;
- msg->ibm_cksum = 0;
- if (msg_cksum != 0 &&
- msg_cksum != kiblnd_cksum(msg, msg_nob)) {
- CERROR("Bad checksum\n");
- return -EPROTO;
- }
-
- msg->ibm_cksum = msg_cksum;
-
- if (flip) {
- /* leave magic unflipped as a clue to peer endianness */
- msg->ibm_version = version;
- CLASSERT(sizeof(msg->ibm_type) == 1);
- CLASSERT(sizeof(msg->ibm_credits) == 1);
- msg->ibm_nob = msg_nob;
- __swab64s(&msg->ibm_srcnid);
- __swab64s(&msg->ibm_srcstamp);
- __swab64s(&msg->ibm_dstnid);
- __swab64s(&msg->ibm_dststamp);
- }
-
- if (msg->ibm_srcnid == LNET_NID_ANY) {
- CERROR("Bad src nid: %s\n", libcfs_nid2str(msg->ibm_srcnid));
- return -EPROTO;
- }
-
- if (msg_nob < kiblnd_msgtype2size(msg->ibm_type)) {
- CERROR("Short %s: %d(%d)\n", kiblnd_msgtype2str(msg->ibm_type),
- msg_nob, kiblnd_msgtype2size(msg->ibm_type));
- return -EPROTO;
- }
-
- switch (msg->ibm_type) {
- default:
- CERROR("Unknown message type %x\n", msg->ibm_type);
- return -EPROTO;
-
- case IBLND_MSG_NOOP:
- case IBLND_MSG_IMMEDIATE:
- case IBLND_MSG_PUT_REQ:
- break;
-
- case IBLND_MSG_PUT_ACK:
- case IBLND_MSG_GET_REQ:
- if (kiblnd_unpack_rd(msg, flip))
- return -EPROTO;
- break;
-
- case IBLND_MSG_PUT_NAK:
- case IBLND_MSG_PUT_DONE:
- case IBLND_MSG_GET_DONE:
- if (flip)
- __swab32s(&msg->ibm_u.completion.ibcm_status);
- break;
-
- case IBLND_MSG_CONNREQ:
- case IBLND_MSG_CONNACK:
- if (flip) {
- __swab16s(&msg->ibm_u.connparams.ibcp_queue_depth);
- __swab16s(&msg->ibm_u.connparams.ibcp_max_frags);
- __swab32s(&msg->ibm_u.connparams.ibcp_max_msg_size);
- }
- break;
- }
- return 0;
-}
-
-int kiblnd_create_peer(lnet_ni_t *ni, kib_peer_t **peerp, lnet_nid_t nid)
-{
- kib_peer_t *peer;
- kib_net_t *net = ni->ni_data;
- int cpt = lnet_cpt_of_nid(nid);
- unsigned long flags;
-
- LASSERT(net != NULL);
- LASSERT(nid != LNET_NID_ANY);
-
- LIBCFS_CPT_ALLOC(peer, lnet_cpt_table(), cpt, sizeof(*peer));
- if (peer == NULL) {
- CERROR("Cannot allocate peer\n");
- return -ENOMEM;
- }
-
- memset(peer, 0, sizeof(*peer)); /* zero flags etc */
-
- peer->ibp_ni = ni;
- peer->ibp_nid = nid;
- peer->ibp_error = 0;
- peer->ibp_last_alive = 0;
- atomic_set(&peer->ibp_refcount, 1); /* 1 ref for caller */
-
- INIT_LIST_HEAD(&peer->ibp_list); /* not in the peer table yet */
- INIT_LIST_HEAD(&peer->ibp_conns);
- INIT_LIST_HEAD(&peer->ibp_tx_queue);
-
- write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
-
- /* always called with a ref on ni, which prevents ni being shutdown */
- LASSERT(net->ibn_shutdown == 0);
-
- /* npeers only grows with the global lock held */
- atomic_inc(&net->ibn_npeers);
-
- write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
-
- *peerp = peer;
- return 0;
-}
-
-void kiblnd_destroy_peer(kib_peer_t *peer)
-{
- kib_net_t *net = peer->ibp_ni->ni_data;
-
- LASSERT(net != NULL);
- LASSERT(atomic_read(&peer->ibp_refcount) == 0);
- LASSERT(!kiblnd_peer_active(peer));
- LASSERT(peer->ibp_connecting == 0);
- LASSERT(peer->ibp_accepting == 0);
- LASSERT(list_empty(&peer->ibp_conns));
- LASSERT(list_empty(&peer->ibp_tx_queue));
-
- LIBCFS_FREE(peer, sizeof(*peer));
-
- /* NB a peer's connections keep a reference on their peer until
- * they are destroyed, so we can be assured that _all_ state to do
- * with this peer has been cleaned up when its refcount drops to
- * zero. */
- atomic_dec(&net->ibn_npeers);
-}
-
-kib_peer_t *kiblnd_find_peer_locked(lnet_nid_t nid)
-{
- /* the caller is responsible for accounting the additional reference
- * that this creates */
- struct list_head *peer_list = kiblnd_nid2peerlist(nid);
- struct list_head *tmp;
- kib_peer_t *peer;
-
- list_for_each(tmp, peer_list) {
-
- peer = list_entry(tmp, kib_peer_t, ibp_list);
-
- LASSERT(peer->ibp_connecting > 0 || /* creating conns */
- peer->ibp_accepting > 0 ||
- !list_empty(&peer->ibp_conns)); /* active conn */
-
- if (peer->ibp_nid != nid)
- continue;
-
- CDEBUG(D_NET, "got peer [%p] -> %s (%d) version: %x\n",
- peer, libcfs_nid2str(nid),
- atomic_read(&peer->ibp_refcount),
- peer->ibp_version);
- return peer;
- }
- return NULL;
-}
-
-void kiblnd_unlink_peer_locked(kib_peer_t *peer)
-{
- LASSERT(list_empty(&peer->ibp_conns));
-
- LASSERT(kiblnd_peer_active(peer));
- list_del_init(&peer->ibp_list);
- /* lose peerlist's ref */
- kiblnd_peer_decref(peer);
-}
-
-static int kiblnd_get_peer_info(lnet_ni_t *ni, int index,
- lnet_nid_t *nidp, int *count)
-{
- kib_peer_t *peer;
- struct list_head *ptmp;
- int i;
- unsigned long flags;
-
- read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
-
- for (i = 0; i < kiblnd_data.kib_peer_hash_size; i++) {
-
- list_for_each(ptmp, &kiblnd_data.kib_peers[i]) {
-
- peer = list_entry(ptmp, kib_peer_t, ibp_list);
- LASSERT(peer->ibp_connecting > 0 ||
- peer->ibp_accepting > 0 ||
- !list_empty(&peer->ibp_conns));
-
- if (peer->ibp_ni != ni)
- continue;
-
- if (index-- > 0)
- continue;
-
- *nidp = peer->ibp_nid;
- *count = atomic_read(&peer->ibp_refcount);
-
- read_unlock_irqrestore(&kiblnd_data.kib_global_lock,
- flags);
- return 0;
- }
- }
-
- read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
- return -ENOENT;
-}
-
-static void kiblnd_del_peer_locked(kib_peer_t *peer)
-{
- struct list_head *ctmp;
- struct list_head *cnxt;
- kib_conn_t *conn;
-
- if (list_empty(&peer->ibp_conns)) {
- kiblnd_unlink_peer_locked(peer);
- } else {
- list_for_each_safe(ctmp, cnxt, &peer->ibp_conns) {
- conn = list_entry(ctmp, kib_conn_t, ibc_list);
-
- kiblnd_close_conn_locked(conn, 0);
- }
- /* NB closing peer's last conn unlinked it. */
- }
- /* NB peer now unlinked; might even be freed if the peer table had the
- * last ref on it. */
-}
-
-static int kiblnd_del_peer(lnet_ni_t *ni, lnet_nid_t nid)
-{
- LIST_HEAD(zombies);
- struct list_head *ptmp;
- struct list_head *pnxt;
- kib_peer_t *peer;
- int lo;
- int hi;
- int i;
- unsigned long flags;
- int rc = -ENOENT;
-
- write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
-
- if (nid != LNET_NID_ANY) {
- lo = hi = kiblnd_nid2peerlist(nid) - kiblnd_data.kib_peers;
- } else {
- lo = 0;
- hi = kiblnd_data.kib_peer_hash_size - 1;
- }
-
- for (i = lo; i <= hi; i++) {
- list_for_each_safe(ptmp, pnxt, &kiblnd_data.kib_peers[i]) {
- peer = list_entry(ptmp, kib_peer_t, ibp_list);
- LASSERT(peer->ibp_connecting > 0 ||
- peer->ibp_accepting > 0 ||
- !list_empty(&peer->ibp_conns));
-
- if (peer->ibp_ni != ni)
- continue;
-
- if (!(nid == LNET_NID_ANY || peer->ibp_nid == nid))
- continue;
-
- if (!list_empty(&peer->ibp_tx_queue)) {
- LASSERT(list_empty(&peer->ibp_conns));
-
- list_splice_init(&peer->ibp_tx_queue,
- &zombies);
- }
-
- kiblnd_del_peer_locked(peer);
- rc = 0; /* matched something */
- }
- }
-
- write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
-
- kiblnd_txlist_done(ni, &zombies, -EIO);
-
- return rc;
-}
-
-static kib_conn_t *kiblnd_get_conn_by_idx(lnet_ni_t *ni, int index)
-{
- kib_peer_t *peer;
- struct list_head *ptmp;
- kib_conn_t *conn;
- struct list_head *ctmp;
- int i;
- unsigned long flags;
-
- read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
-
- for (i = 0; i < kiblnd_data.kib_peer_hash_size; i++) {
- list_for_each(ptmp, &kiblnd_data.kib_peers[i]) {
-
- peer = list_entry(ptmp, kib_peer_t, ibp_list);
- LASSERT(peer->ibp_connecting > 0 ||
- peer->ibp_accepting > 0 ||
- !list_empty(&peer->ibp_conns));
-
- if (peer->ibp_ni != ni)
- continue;
-
- list_for_each(ctmp, &peer->ibp_conns) {
- if (index-- > 0)
- continue;
-
- conn = list_entry(ctmp, kib_conn_t,
- ibc_list);
- kiblnd_conn_addref(conn);
- read_unlock_irqrestore(
- &kiblnd_data.kib_global_lock,
- flags);
- return conn;
- }
- }
- }
-
- read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
- return NULL;
-}
-
-int kiblnd_translate_mtu(int value)
-{
- switch (value) {
- default:
- return -1;
- case 0:
- return 0;
- case 256:
- return IB_MTU_256;
- case 512:
- return IB_MTU_512;
- case 1024:
- return IB_MTU_1024;
- case 2048:
- return IB_MTU_2048;
- case 4096:
- return IB_MTU_4096;
- }
-}
-
-static void kiblnd_setup_mtu_locked(struct rdma_cm_id *cmid)
-{
- int mtu;
-
- /* XXX There is no path record for iWARP, set by netdev->change_mtu? */
- if (cmid->route.path_rec == NULL)
- return;
-
- mtu = kiblnd_translate_mtu(*kiblnd_tunables.kib_ib_mtu);
- LASSERT(mtu >= 0);
- if (mtu != 0)
- cmid->route.path_rec->mtu = mtu;
-}
-
-static int kiblnd_get_completion_vector(kib_conn_t *conn, int cpt)
-{
- cpumask_t *mask;
- int vectors;
- int off;
- int i;
- lnet_nid_t nid = conn->ibc_peer->ibp_nid;
-
- vectors = conn->ibc_cmid->device->num_comp_vectors;
- if (vectors <= 1)
- return 0;
-
- mask = cfs_cpt_cpumask(lnet_cpt_table(), cpt);
- if (mask == NULL)
- return 0;
-
- /* hash NID to CPU id in this partition... */
- off = do_div(nid, cpumask_weight(mask));
- for_each_cpu(i, mask) {
- if (off-- == 0)
- return i % vectors;
- }
-
- LBUG();
- return 1;
-}
-
-kib_conn_t *kiblnd_create_conn(kib_peer_t *peer, struct rdma_cm_id *cmid,
- int state, int version)
-{
- /* CAVEAT EMPTOR:
- * If the new conn is created successfully it takes over the caller's
- * ref on 'peer'. It also "owns" 'cmid' and destroys it when it itself
- * is destroyed. On failure, the caller's ref on 'peer' remains and
- * she must dispose of 'cmid'. (Actually I'd block forever if I tried
- * to destroy 'cmid' here since I'm called from the CM which still has
- * its ref on 'cmid'). */
- rwlock_t *glock = &kiblnd_data.kib_global_lock;
- kib_net_t *net = peer->ibp_ni->ni_data;
- kib_dev_t *dev;
- struct ib_qp_init_attr *init_qp_attr;
- struct kib_sched_info *sched;
- struct ib_cq_init_attr cq_attr = {};
- kib_conn_t *conn;
- struct ib_cq *cq;
- unsigned long flags;
- int cpt;
- int rc;
- int i;
-
- LASSERT(net != NULL);
- LASSERT(!in_interrupt());
-
- dev = net->ibn_dev;
-
- cpt = lnet_cpt_of_nid(peer->ibp_nid);
- sched = kiblnd_data.kib_scheds[cpt];
-
- LASSERT(sched->ibs_nthreads > 0);
-
- LIBCFS_CPT_ALLOC(init_qp_attr, lnet_cpt_table(), cpt,
- sizeof(*init_qp_attr));
- if (init_qp_attr == NULL) {
- CERROR("Can't allocate qp_attr for %s\n",
- libcfs_nid2str(peer->ibp_nid));
- goto failed_0;
- }
-
- LIBCFS_CPT_ALLOC(conn, lnet_cpt_table(), cpt, sizeof(*conn));
- if (conn == NULL) {
- CERROR("Can't allocate connection for %s\n",
- libcfs_nid2str(peer->ibp_nid));
- goto failed_1;
- }
-
- conn->ibc_state = IBLND_CONN_INIT;
- conn->ibc_version = version;
- conn->ibc_peer = peer; /* I take the caller's ref */
- cmid->context = conn; /* for future CM callbacks */
- conn->ibc_cmid = cmid;
-
- INIT_LIST_HEAD(&conn->ibc_early_rxs);
- INIT_LIST_HEAD(&conn->ibc_tx_noops);
- INIT_LIST_HEAD(&conn->ibc_tx_queue);
- INIT_LIST_HEAD(&conn->ibc_tx_queue_rsrvd);
- INIT_LIST_HEAD(&conn->ibc_tx_queue_nocred);
- INIT_LIST_HEAD(&conn->ibc_active_txs);
- spin_lock_init(&conn->ibc_lock);
-
- LIBCFS_CPT_ALLOC(conn->ibc_connvars, lnet_cpt_table(), cpt,
- sizeof(*conn->ibc_connvars));
- if (conn->ibc_connvars == NULL) {
- CERROR("Can't allocate in-progress connection state\n");
- goto failed_2;
- }
-
- write_lock_irqsave(glock, flags);
- if (dev->ibd_failover) {
- write_unlock_irqrestore(glock, flags);
- CERROR("%s: failover in progress\n", dev->ibd_ifname);
- goto failed_2;
- }
-
- if (dev->ibd_hdev->ibh_ibdev != cmid->device) {
- /* wakeup failover thread and teardown connection */
- if (kiblnd_dev_can_failover(dev)) {
- list_add_tail(&dev->ibd_fail_list,
- &kiblnd_data.kib_failed_devs);
- wake_up(&kiblnd_data.kib_failover_waitq);
- }
-
- write_unlock_irqrestore(glock, flags);
- CERROR("cmid HCA(%s), kib_dev(%s) need failover\n",
- cmid->device->name, dev->ibd_ifname);
- goto failed_2;
- }
-
- kiblnd_hdev_addref_locked(dev->ibd_hdev);
- conn->ibc_hdev = dev->ibd_hdev;
-
- kiblnd_setup_mtu_locked(cmid);
-
- write_unlock_irqrestore(glock, flags);
-
- LIBCFS_CPT_ALLOC(conn->ibc_rxs, lnet_cpt_table(), cpt,
- IBLND_RX_MSGS(version) * sizeof(kib_rx_t));
- if (conn->ibc_rxs == NULL) {
- CERROR("Cannot allocate RX buffers\n");
- goto failed_2;
- }
-
- rc = kiblnd_alloc_pages(&conn->ibc_rx_pages, cpt,
- IBLND_RX_MSG_PAGES(version));
- if (rc != 0)
- goto failed_2;
-
- kiblnd_map_rx_descs(conn);
-
- cq_attr.cqe = IBLND_CQ_ENTRIES(version);
- cq_attr.comp_vector = kiblnd_get_completion_vector(conn, cpt);
- cq = ib_create_cq(cmid->device,
- kiblnd_cq_completion, kiblnd_cq_event, conn,
- &cq_attr);
- if (IS_ERR(cq)) {
- CERROR("Can't create CQ: %ld, cqe: %d\n",
- PTR_ERR(cq), IBLND_CQ_ENTRIES(version));
- goto failed_2;
- }
-
- conn->ibc_cq = cq;
-
- rc = ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
- if (rc != 0) {
- CERROR("Can't request completion notificiation: %d\n", rc);
- goto failed_2;
- }
-
- init_qp_attr->event_handler = kiblnd_qp_event;
- init_qp_attr->qp_context = conn;
- init_qp_attr->cap.max_send_wr = IBLND_SEND_WRS(version);
- init_qp_attr->cap.max_recv_wr = IBLND_RECV_WRS(version);
- init_qp_attr->cap.max_send_sge = 1;
- init_qp_attr->cap.max_recv_sge = 1;
- init_qp_attr->sq_sig_type = IB_SIGNAL_REQ_WR;
- init_qp_attr->qp_type = IB_QPT_RC;
- init_qp_attr->send_cq = cq;
- init_qp_attr->recv_cq = cq;
-
- conn->ibc_sched = sched;
-
- rc = rdma_create_qp(cmid, conn->ibc_hdev->ibh_pd, init_qp_attr);
- if (rc != 0) {
- CERROR("Can't create QP: %d, send_wr: %d, recv_wr: %d\n",
- rc, init_qp_attr->cap.max_send_wr,
- init_qp_attr->cap.max_recv_wr);
- goto failed_2;
- }
-
- LIBCFS_FREE(init_qp_attr, sizeof(*init_qp_attr));
-
- /* 1 ref for caller and each rxmsg */
- atomic_set(&conn->ibc_refcount, 1 + IBLND_RX_MSGS(version));
- conn->ibc_nrx = IBLND_RX_MSGS(version);
-
- /* post receives */
- for (i = 0; i < IBLND_RX_MSGS(version); i++) {
- rc = kiblnd_post_rx(&conn->ibc_rxs[i],
- IBLND_POSTRX_NO_CREDIT);
- if (rc != 0) {
- CERROR("Can't post rxmsg: %d\n", rc);
-
- /* Make posted receives complete */
- kiblnd_abort_receives(conn);
-
- /* correct # of posted buffers
- * NB locking needed now I'm racing with completion */
- spin_lock_irqsave(&sched->ibs_lock, flags);
- conn->ibc_nrx -= IBLND_RX_MSGS(version) - i;
- spin_unlock_irqrestore(&sched->ibs_lock, flags);
-
- /* cmid will be destroyed by CM(ofed) after cm_callback
- * returned, so we can't refer it anymore
- * (by kiblnd_connd()->kiblnd_destroy_conn) */
- rdma_destroy_qp(conn->ibc_cmid);
- conn->ibc_cmid = NULL;
-
- /* Drop my own and unused rxbuffer refcounts */
- while (i++ <= IBLND_RX_MSGS(version))
- kiblnd_conn_decref(conn);
-
- return NULL;
- }
- }
-
- /* Init successful! */
- LASSERT(state == IBLND_CONN_ACTIVE_CONNECT ||
- state == IBLND_CONN_PASSIVE_WAIT);
- conn->ibc_state = state;
-
- /* 1 more conn */
- atomic_inc(&net->ibn_nconns);
- return conn;
-
- failed_2:
- kiblnd_destroy_conn(conn);
- failed_1:
- LIBCFS_FREE(init_qp_attr, sizeof(*init_qp_attr));
- failed_0:
- return NULL;
-}
-
-void kiblnd_destroy_conn(kib_conn_t *conn)
-{
- struct rdma_cm_id *cmid = conn->ibc_cmid;
- kib_peer_t *peer = conn->ibc_peer;
- int rc;
-
- LASSERT(!in_interrupt());
- LASSERT(atomic_read(&conn->ibc_refcount) == 0);
- LASSERT(list_empty(&conn->ibc_early_rxs));
- LASSERT(list_empty(&conn->ibc_tx_noops));
- LASSERT(list_empty(&conn->ibc_tx_queue));
- LASSERT(list_empty(&conn->ibc_tx_queue_rsrvd));
- LASSERT(list_empty(&conn->ibc_tx_queue_nocred));
- LASSERT(list_empty(&conn->ibc_active_txs));
- LASSERT(conn->ibc_noops_posted == 0);
- LASSERT(conn->ibc_nsends_posted == 0);
-
- switch (conn->ibc_state) {
- default:
- /* conn must be completely disengaged from the network */
- LBUG();
-
- case IBLND_CONN_DISCONNECTED:
- /* connvars should have been freed already */
- LASSERT(conn->ibc_connvars == NULL);
- break;
-
- case IBLND_CONN_INIT:
- break;
- }
-
- /* conn->ibc_cmid might be destroyed by CM already */
- if (cmid != NULL && cmid->qp != NULL)
- rdma_destroy_qp(cmid);
-
- if (conn->ibc_cq != NULL) {
- rc = ib_destroy_cq(conn->ibc_cq);
- if (rc != 0)
- CWARN("Error destroying CQ: %d\n", rc);
- }
-
- if (conn->ibc_rx_pages != NULL)
- kiblnd_unmap_rx_descs(conn);
-
- if (conn->ibc_rxs != NULL) {
- LIBCFS_FREE(conn->ibc_rxs,
- IBLND_RX_MSGS(conn->ibc_version)
- * sizeof(kib_rx_t));
- }
-
- if (conn->ibc_connvars != NULL)
- LIBCFS_FREE(conn->ibc_connvars, sizeof(*conn->ibc_connvars));
-
- if (conn->ibc_hdev != NULL)
- kiblnd_hdev_decref(conn->ibc_hdev);
-
- /* See CAVEAT EMPTOR above in kiblnd_create_conn */
- if (conn->ibc_state != IBLND_CONN_INIT) {
- kib_net_t *net = peer->ibp_ni->ni_data;
-
- kiblnd_peer_decref(peer);
- rdma_destroy_id(cmid);
- atomic_dec(&net->ibn_nconns);
- }
-
- LIBCFS_FREE(conn, sizeof(*conn));
-}
-
-int kiblnd_close_peer_conns_locked(kib_peer_t *peer, int why)
-{
- kib_conn_t *conn;
- struct list_head *ctmp;
- struct list_head *cnxt;
- int count = 0;
-
- list_for_each_safe(ctmp, cnxt, &peer->ibp_conns) {
- conn = list_entry(ctmp, kib_conn_t, ibc_list);
-
- CDEBUG(D_NET, "Closing conn -> %s, version: %x, reason: %d\n",
- libcfs_nid2str(peer->ibp_nid),
- conn->ibc_version, why);
-
- kiblnd_close_conn_locked(conn, why);
- count++;
- }
-
- return count;
-}
-
-int kiblnd_close_stale_conns_locked(kib_peer_t *peer,
- int version, __u64 incarnation)
-{
- kib_conn_t *conn;
- struct list_head *ctmp;
- struct list_head *cnxt;
- int count = 0;
-
- list_for_each_safe(ctmp, cnxt, &peer->ibp_conns) {
- conn = list_entry(ctmp, kib_conn_t, ibc_list);
-
- if (conn->ibc_version == version &&
- conn->ibc_incarnation == incarnation)
- continue;
-
- CDEBUG(D_NET,
- "Closing stale conn -> %s version: %x, incarnation:%#llx(%x, %#llx)\n",
- libcfs_nid2str(peer->ibp_nid),
- conn->ibc_version, conn->ibc_incarnation,
- version, incarnation);
-
- kiblnd_close_conn_locked(conn, -ESTALE);
- count++;
- }
-
- return count;
-}
-
-static int kiblnd_close_matching_conns(lnet_ni_t *ni, lnet_nid_t nid)
-{
- kib_peer_t *peer;
- struct list_head *ptmp;
- struct list_head *pnxt;
- int lo;
- int hi;
- int i;
- unsigned long flags;
- int count = 0;
-
- write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
-
- if (nid != LNET_NID_ANY)
- lo = hi = kiblnd_nid2peerlist(nid) - kiblnd_data.kib_peers;
- else {
- lo = 0;
- hi = kiblnd_data.kib_peer_hash_size - 1;
- }
-
- for (i = lo; i <= hi; i++) {
- list_for_each_safe(ptmp, pnxt, &kiblnd_data.kib_peers[i]) {
-
- peer = list_entry(ptmp, kib_peer_t, ibp_list);
- LASSERT(peer->ibp_connecting > 0 ||
- peer->ibp_accepting > 0 ||
- !list_empty(&peer->ibp_conns));
-
- if (peer->ibp_ni != ni)
- continue;
-
- if (!(nid == LNET_NID_ANY || nid == peer->ibp_nid))
- continue;
-
- count += kiblnd_close_peer_conns_locked(peer, 0);
- }
- }
-
- write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
-
- /* wildcards always succeed */
- if (nid == LNET_NID_ANY)
- return 0;
-
- return (count == 0) ? -ENOENT : 0;
-}
-
-int kiblnd_ctl(lnet_ni_t *ni, unsigned int cmd, void *arg)
-{
- struct libcfs_ioctl_data *data = arg;
- int rc = -EINVAL;
-
- switch (cmd) {
- case IOC_LIBCFS_GET_PEER: {
- lnet_nid_t nid = 0;
- int count = 0;
-
- rc = kiblnd_get_peer_info(ni, data->ioc_count,
- &nid, &count);
- data->ioc_nid = nid;
- data->ioc_count = count;
- break;
- }
-
- case IOC_LIBCFS_DEL_PEER: {
- rc = kiblnd_del_peer(ni, data->ioc_nid);
- break;
- }
- case IOC_LIBCFS_GET_CONN: {
- kib_conn_t *conn;
-
- rc = 0;
- conn = kiblnd_get_conn_by_idx(ni, data->ioc_count);
- if (conn == NULL) {
- rc = -ENOENT;
- break;
- }
-
- LASSERT(conn->ibc_cmid != NULL);
- data->ioc_nid = conn->ibc_peer->ibp_nid;
- if (conn->ibc_cmid->route.path_rec == NULL)
- data->ioc_u32[0] = 0; /* iWarp has no path MTU */
- else
- data->ioc_u32[0] =
- ib_mtu_enum_to_int(conn->ibc_cmid->route.path_rec->mtu);
- kiblnd_conn_decref(conn);
- break;
- }
- case IOC_LIBCFS_CLOSE_CONNECTION: {
- rc = kiblnd_close_matching_conns(ni, data->ioc_nid);
- break;
- }
-
- default:
- break;
- }
-
- return rc;
-}
-
-void kiblnd_query(lnet_ni_t *ni, lnet_nid_t nid, unsigned long *when)
-{
- unsigned long last_alive = 0;
- unsigned long now = cfs_time_current();
- rwlock_t *glock = &kiblnd_data.kib_global_lock;
- kib_peer_t *peer;
- unsigned long flags;
-
- read_lock_irqsave(glock, flags);
-
- peer = kiblnd_find_peer_locked(nid);
- if (peer != NULL) {
- LASSERT(peer->ibp_connecting > 0 || /* creating conns */
- peer->ibp_accepting > 0 ||
- !list_empty(&peer->ibp_conns)); /* active conn */
- last_alive = peer->ibp_last_alive;
- }
-
- read_unlock_irqrestore(glock, flags);
-
- if (last_alive != 0)
- *when = last_alive;
-
- /* peer is not persistent in hash, trigger peer creation
- * and connection establishment with a NULL tx */
- if (peer == NULL)
- kiblnd_launch_tx(ni, NULL, nid);
-
- CDEBUG(D_NET, "Peer %s %p, alive %ld secs ago\n",
- libcfs_nid2str(nid), peer,
- last_alive ? cfs_duration_sec(now - last_alive) : -1);
-}
-
-void kiblnd_free_pages(kib_pages_t *p)
-{
- int npages = p->ibp_npages;
- int i;
-
- for (i = 0; i < npages; i++) {
- if (p->ibp_pages[i] != NULL)
- __free_page(p->ibp_pages[i]);
- }
-
- LIBCFS_FREE(p, offsetof(kib_pages_t, ibp_pages[npages]));
-}
-
-int kiblnd_alloc_pages(kib_pages_t **pp, int cpt, int npages)
-{
- kib_pages_t *p;
- int i;
-
- LIBCFS_CPT_ALLOC(p, lnet_cpt_table(), cpt,
- offsetof(kib_pages_t, ibp_pages[npages]));
- if (p == NULL) {
- CERROR("Can't allocate descriptor for %d pages\n", npages);
- return -ENOMEM;
- }
-
- memset(p, 0, offsetof(kib_pages_t, ibp_pages[npages]));
- p->ibp_npages = npages;
-
- for (i = 0; i < npages; i++) {
- p->ibp_pages[i] = alloc_pages_node(
- cfs_cpt_spread_node(lnet_cpt_table(), cpt),
- GFP_NOFS, 0);
- if (p->ibp_pages[i] == NULL) {
- CERROR("Can't allocate page %d of %d\n", i, npages);
- kiblnd_free_pages(p);
- return -ENOMEM;
- }
- }
-
- *pp = p;
- return 0;
-}
-
-void kiblnd_unmap_rx_descs(kib_conn_t *conn)
-{
- kib_rx_t *rx;
- int i;
-
- LASSERT(conn->ibc_rxs != NULL);
- LASSERT(conn->ibc_hdev != NULL);
-
- for (i = 0; i < IBLND_RX_MSGS(conn->ibc_version); i++) {
- rx = &conn->ibc_rxs[i];
-
- LASSERT(rx->rx_nob >= 0); /* not posted */
-
- kiblnd_dma_unmap_single(conn->ibc_hdev->ibh_ibdev,
- KIBLND_UNMAP_ADDR(rx, rx_msgunmap,
- rx->rx_msgaddr),
- IBLND_MSG_SIZE, DMA_FROM_DEVICE);
- }
-
- kiblnd_free_pages(conn->ibc_rx_pages);
-
- conn->ibc_rx_pages = NULL;
-}
-
-void kiblnd_map_rx_descs(kib_conn_t *conn)
-{
- kib_rx_t *rx;
- struct page *pg;
- int pg_off;
- int ipg;
- int i;
-
- for (pg_off = ipg = i = 0; i < IBLND_RX_MSGS(conn->ibc_version); i++) {
- pg = conn->ibc_rx_pages->ibp_pages[ipg];
- rx = &conn->ibc_rxs[i];
-
- rx->rx_conn = conn;
- rx->rx_msg = (kib_msg_t *)(((char *)page_address(pg)) + pg_off);
-
- rx->rx_msgaddr = kiblnd_dma_map_single(conn->ibc_hdev->ibh_ibdev,
- rx->rx_msg,
- IBLND_MSG_SIZE,
- DMA_FROM_DEVICE);
- LASSERT(!kiblnd_dma_mapping_error(conn->ibc_hdev->ibh_ibdev,
- rx->rx_msgaddr));
- KIBLND_UNMAP_ADDR_SET(rx, rx_msgunmap, rx->rx_msgaddr);
-
- CDEBUG(D_NET, "rx %d: %p %#llx(%#llx)\n",
- i, rx->rx_msg, rx->rx_msgaddr,
- (__u64)(page_to_phys(pg) + pg_off));
-
- pg_off += IBLND_MSG_SIZE;
- LASSERT(pg_off <= PAGE_SIZE);
-
- if (pg_off == PAGE_SIZE) {
- pg_off = 0;
- ipg++;
- LASSERT(ipg <= IBLND_RX_MSG_PAGES(conn->ibc_version));
- }
- }
-}
-
-static void kiblnd_unmap_tx_pool(kib_tx_pool_t *tpo)
-{
- kib_hca_dev_t *hdev = tpo->tpo_hdev;
- kib_tx_t *tx;
- int i;
-
- LASSERT(tpo->tpo_pool.po_allocated == 0);
-
- if (hdev == NULL)
- return;
-
- for (i = 0; i < tpo->tpo_pool.po_size; i++) {
- tx = &tpo->tpo_tx_descs[i];
- kiblnd_dma_unmap_single(hdev->ibh_ibdev,
- KIBLND_UNMAP_ADDR(tx, tx_msgunmap,
- tx->tx_msgaddr),
- IBLND_MSG_SIZE, DMA_TO_DEVICE);
- }
-
- kiblnd_hdev_decref(hdev);
- tpo->tpo_hdev = NULL;
-}
-
-static kib_hca_dev_t *kiblnd_current_hdev(kib_dev_t *dev)
-{
- kib_hca_dev_t *hdev;
- unsigned long flags;
- int i = 0;
-
- read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
- while (dev->ibd_failover) {
- read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
- if (i++ % 50 == 0)
- CDEBUG(D_NET, "%s: Wait for failover\n",
- dev->ibd_ifname);
- schedule_timeout(cfs_time_seconds(1) / 100);
-
- read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
- }
-
- kiblnd_hdev_addref_locked(dev->ibd_hdev);
- hdev = dev->ibd_hdev;
-
- read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
-
- return hdev;
-}
-
-static void kiblnd_map_tx_pool(kib_tx_pool_t *tpo)
-{
- kib_pages_t *txpgs = tpo->tpo_tx_pages;
- kib_pool_t *pool = &tpo->tpo_pool;
- kib_net_t *net = pool->po_owner->ps_net;
- kib_dev_t *dev;
- struct page *page;
- kib_tx_t *tx;
- int page_offset;
- int ipage;
- int i;
-
- LASSERT(net != NULL);
-
- dev = net->ibn_dev;
-
- /* pre-mapped messages are not bigger than 1 page */
- CLASSERT(IBLND_MSG_SIZE <= PAGE_SIZE);
-
- /* No fancy arithmetic when we do the buffer calculations */
- CLASSERT(PAGE_SIZE % IBLND_MSG_SIZE == 0);
-
- tpo->tpo_hdev = kiblnd_current_hdev(dev);
-
- for (ipage = page_offset = i = 0; i < pool->po_size; i++) {
- page = txpgs->ibp_pages[ipage];
- tx = &tpo->tpo_tx_descs[i];
-
- tx->tx_msg = (kib_msg_t *)(((char *)page_address(page)) +
- page_offset);
-
- tx->tx_msgaddr = kiblnd_dma_map_single(
- tpo->tpo_hdev->ibh_ibdev, tx->tx_msg,
- IBLND_MSG_SIZE, DMA_TO_DEVICE);
- LASSERT(!kiblnd_dma_mapping_error(tpo->tpo_hdev->ibh_ibdev,
- tx->tx_msgaddr));
- KIBLND_UNMAP_ADDR_SET(tx, tx_msgunmap, tx->tx_msgaddr);
-
- list_add(&tx->tx_list, &pool->po_free_list);
-
- page_offset += IBLND_MSG_SIZE;
- LASSERT(page_offset <= PAGE_SIZE);
-
- if (page_offset == PAGE_SIZE) {
- page_offset = 0;
- ipage++;
- LASSERT(ipage <= txpgs->ibp_npages);
- }
- }
-}
-
-struct ib_mr *kiblnd_find_dma_mr(kib_hca_dev_t *hdev, __u64 addr, __u64 size)
-{
- __u64 index;
-
- LASSERT(hdev->ibh_mrs[0] != NULL);
-
- if (hdev->ibh_nmrs == 1)
- return hdev->ibh_mrs[0];
-
- index = addr >> hdev->ibh_mr_shift;
-
- if (index < hdev->ibh_nmrs &&
- index == ((addr + size - 1) >> hdev->ibh_mr_shift))
- return hdev->ibh_mrs[index];
-
- return NULL;
-}
-
-struct ib_mr *kiblnd_find_rd_dma_mr(kib_hca_dev_t *hdev, kib_rdma_desc_t *rd)
-{
- struct ib_mr *prev_mr;
- struct ib_mr *mr;
- int i;
-
- LASSERT(hdev->ibh_mrs[0] != NULL);
-
- if (*kiblnd_tunables.kib_map_on_demand > 0 &&
- *kiblnd_tunables.kib_map_on_demand <= rd->rd_nfrags)
- return NULL;
-
- if (hdev->ibh_nmrs == 1)
- return hdev->ibh_mrs[0];
-
- for (i = 0, mr = prev_mr = NULL;
- i < rd->rd_nfrags; i++) {
- mr = kiblnd_find_dma_mr(hdev,
- rd->rd_frags[i].rf_addr,
- rd->rd_frags[i].rf_nob);
- if (prev_mr == NULL)
- prev_mr = mr;
-
- if (mr == NULL || prev_mr != mr) {
- /* Can't covered by one single MR */
- mr = NULL;
- break;
- }
- }
-
- return mr;
-}
-
-static void kiblnd_destroy_fmr_pool(kib_fmr_pool_t *pool)
-{
- LASSERT(pool->fpo_map_count == 0);
-
- if (pool->fpo_fmr_pool != NULL)
- ib_destroy_fmr_pool(pool->fpo_fmr_pool);
-
- if (pool->fpo_hdev != NULL)
- kiblnd_hdev_decref(pool->fpo_hdev);
-
- LIBCFS_FREE(pool, sizeof(kib_fmr_pool_t));
-}
-
-static void kiblnd_destroy_fmr_pool_list(struct list_head *head)
-{
- kib_fmr_pool_t *pool;
-
- while (!list_empty(head)) {
- pool = list_entry(head->next, kib_fmr_pool_t, fpo_list);
- list_del(&pool->fpo_list);
- kiblnd_destroy_fmr_pool(pool);
- }
-}
-
-static int kiblnd_fmr_pool_size(int ncpts)
-{
- int size = *kiblnd_tunables.kib_fmr_pool_size / ncpts;
-
- return max(IBLND_FMR_POOL, size);
-}
-
-static int kiblnd_fmr_flush_trigger(int ncpts)
-{
- int size = *kiblnd_tunables.kib_fmr_flush_trigger / ncpts;
-
- return max(IBLND_FMR_POOL_FLUSH, size);
-}
-
-static int kiblnd_create_fmr_pool(kib_fmr_poolset_t *fps,
- kib_fmr_pool_t **pp_fpo)
-{
- /* FMR pool for RDMA */
- kib_dev_t *dev = fps->fps_net->ibn_dev;
- kib_fmr_pool_t *fpo;
- struct ib_fmr_pool_param param = {
- .max_pages_per_fmr = LNET_MAX_PAYLOAD/PAGE_SIZE,
- .page_shift = PAGE_SHIFT,
- .access = (IB_ACCESS_LOCAL_WRITE |
- IB_ACCESS_REMOTE_WRITE),
- .pool_size = fps->fps_pool_size,
- .dirty_watermark = fps->fps_flush_trigger,
- .flush_function = NULL,
- .flush_arg = NULL,
- .cache = !!*kiblnd_tunables.kib_fmr_cache};
- int rc;
-
- LIBCFS_CPT_ALLOC(fpo, lnet_cpt_table(), fps->fps_cpt, sizeof(*fpo));
- if (fpo == NULL)
- return -ENOMEM;
-
- fpo->fpo_hdev = kiblnd_current_hdev(dev);
-
- fpo->fpo_fmr_pool = ib_create_fmr_pool(fpo->fpo_hdev->ibh_pd, &param);
- if (IS_ERR(fpo->fpo_fmr_pool)) {
- rc = PTR_ERR(fpo->fpo_fmr_pool);
- CERROR("Failed to create FMR pool: %d\n", rc);
-
- kiblnd_hdev_decref(fpo->fpo_hdev);
- LIBCFS_FREE(fpo, sizeof(kib_fmr_pool_t));
- return rc;
- }
-
- fpo->fpo_deadline = cfs_time_shift(IBLND_POOL_DEADLINE);
- fpo->fpo_owner = fps;
- *pp_fpo = fpo;
-
- return 0;
-}
-
-static void kiblnd_fail_fmr_poolset(kib_fmr_poolset_t *fps,
- struct list_head *zombies)
-{
- if (fps->fps_net == NULL) /* intialized? */
- return;
-
- spin_lock(&fps->fps_lock);
-
- while (!list_empty(&fps->fps_pool_list)) {
- kib_fmr_pool_t *fpo = list_entry(fps->fps_pool_list.next,
- kib_fmr_pool_t, fpo_list);
- fpo->fpo_failed = 1;
- list_del(&fpo->fpo_list);
- if (fpo->fpo_map_count == 0)
- list_add(&fpo->fpo_list, zombies);
- else
- list_add(&fpo->fpo_list, &fps->fps_failed_pool_list);
- }
-
- spin_unlock(&fps->fps_lock);
-}
-
-static void kiblnd_fini_fmr_poolset(kib_fmr_poolset_t *fps)
-{
- if (fps->fps_net != NULL) { /* initialized? */
- kiblnd_destroy_fmr_pool_list(&fps->fps_failed_pool_list);
- kiblnd_destroy_fmr_pool_list(&fps->fps_pool_list);
- }
-}
-
-static int kiblnd_init_fmr_poolset(kib_fmr_poolset_t *fps, int cpt,
- kib_net_t *net, int pool_size,
- int flush_trigger)
-{
- kib_fmr_pool_t *fpo;
- int rc;
-
- memset(fps, 0, sizeof(kib_fmr_poolset_t));
-
- fps->fps_net = net;
- fps->fps_cpt = cpt;
- fps->fps_pool_size = pool_size;
- fps->fps_flush_trigger = flush_trigger;
- spin_lock_init(&fps->fps_lock);
- INIT_LIST_HEAD(&fps->fps_pool_list);
- INIT_LIST_HEAD(&fps->fps_failed_pool_list);
-
- rc = kiblnd_create_fmr_pool(fps, &fpo);
- if (rc == 0)
- list_add_tail(&fpo->fpo_list, &fps->fps_pool_list);
-
- return rc;
-}
-
-static int kiblnd_fmr_pool_is_idle(kib_fmr_pool_t *fpo, unsigned long now)
-{
- if (fpo->fpo_map_count != 0) /* still in use */
- return 0;
- if (fpo->fpo_failed)
- return 1;
- return cfs_time_aftereq(now, fpo->fpo_deadline);
-}
-
-void kiblnd_fmr_pool_unmap(kib_fmr_t *fmr, int status)
-{
- LIST_HEAD(zombies);
- kib_fmr_pool_t *fpo = fmr->fmr_pool;
- kib_fmr_poolset_t *fps = fpo->fpo_owner;
- unsigned long now = cfs_time_current();
- kib_fmr_pool_t *tmp;
- int rc;
-
- rc = ib_fmr_pool_unmap(fmr->fmr_pfmr);
- LASSERT(rc == 0);
-
- if (status != 0) {
- rc = ib_flush_fmr_pool(fpo->fpo_fmr_pool);
- LASSERT(rc == 0);
- }
-
- fmr->fmr_pool = NULL;
- fmr->fmr_pfmr = NULL;
-
- spin_lock(&fps->fps_lock);
- fpo->fpo_map_count--; /* decref the pool */
-
- list_for_each_entry_safe(fpo, tmp, &fps->fps_pool_list, fpo_list) {
- /* the first pool is persistent */
- if (fps->fps_pool_list.next == &fpo->fpo_list)
- continue;
-
- if (kiblnd_fmr_pool_is_idle(fpo, now)) {
- list_move(&fpo->fpo_list, &zombies);
- fps->fps_version++;
- }
- }
- spin_unlock(&fps->fps_lock);
-
- if (!list_empty(&zombies))
- kiblnd_destroy_fmr_pool_list(&zombies);
-}
-
-int kiblnd_fmr_pool_map(kib_fmr_poolset_t *fps, __u64 *pages, int npages,
- __u64 iov, kib_fmr_t *fmr)
-{
- struct ib_pool_fmr *pfmr;
- kib_fmr_pool_t *fpo;
- __u64 version;
- int rc;
-
- again:
- spin_lock(&fps->fps_lock);
- version = fps->fps_version;
- list_for_each_entry(fpo, &fps->fps_pool_list, fpo_list) {
- fpo->fpo_deadline = cfs_time_shift(IBLND_POOL_DEADLINE);
- fpo->fpo_map_count++;
- spin_unlock(&fps->fps_lock);
-
- pfmr = ib_fmr_pool_map_phys(fpo->fpo_fmr_pool,
- pages, npages, iov);
- if (likely(!IS_ERR(pfmr))) {
- fmr->fmr_pool = fpo;
- fmr->fmr_pfmr = pfmr;
- return 0;
- }
-
- spin_lock(&fps->fps_lock);
- fpo->fpo_map_count--;
- if (PTR_ERR(pfmr) != -EAGAIN) {
- spin_unlock(&fps->fps_lock);
- return PTR_ERR(pfmr);
- }
-
- /* EAGAIN and ... */
- if (version != fps->fps_version) {
- spin_unlock(&fps->fps_lock);
- goto again;
- }
- }
-
- if (fps->fps_increasing) {
- spin_unlock(&fps->fps_lock);
- CDEBUG(D_NET,
- "Another thread is allocating new FMR pool, waiting for her to complete\n");
- schedule();
- goto again;
-
- }
-
- if (time_before(cfs_time_current(), fps->fps_next_retry)) {
- /* someone failed recently */
- spin_unlock(&fps->fps_lock);
- return -EAGAIN;
- }
-
- fps->fps_increasing = 1;
- spin_unlock(&fps->fps_lock);
-
- CDEBUG(D_NET, "Allocate new FMR pool\n");
- rc = kiblnd_create_fmr_pool(fps, &fpo);
- spin_lock(&fps->fps_lock);
- fps->fps_increasing = 0;
- if (rc == 0) {
- fps->fps_version++;
- list_add_tail(&fpo->fpo_list, &fps->fps_pool_list);
- } else {
- fps->fps_next_retry = cfs_time_shift(IBLND_POOL_RETRY);
- }
- spin_unlock(&fps->fps_lock);
-
- goto again;
-}
-
-static void kiblnd_fini_pool(kib_pool_t *pool)
-{
- LASSERT(list_empty(&pool->po_free_list));
- LASSERT(pool->po_allocated == 0);
-
- CDEBUG(D_NET, "Finalize %s pool\n", pool->po_owner->ps_name);
-}
-
-static void kiblnd_init_pool(kib_poolset_t *ps, kib_pool_t *pool, int size)
-{
- CDEBUG(D_NET, "Initialize %s pool\n", ps->ps_name);
-
- memset(pool, 0, sizeof(kib_pool_t));
- INIT_LIST_HEAD(&pool->po_free_list);
- pool->po_deadline = cfs_time_shift(IBLND_POOL_DEADLINE);
- pool->po_owner = ps;
- pool->po_size = size;
-}
-
-static void kiblnd_destroy_pool_list(struct list_head *head)
-{
- kib_pool_t *pool;
-
- while (!list_empty(head)) {
- pool = list_entry(head->next, kib_pool_t, po_list);
- list_del(&pool->po_list);
-
- LASSERT(pool->po_owner != NULL);
- pool->po_owner->ps_pool_destroy(pool);
- }
-}
-
-static void kiblnd_fail_poolset(kib_poolset_t *ps, struct list_head *zombies)
-{
- if (ps->ps_net == NULL) /* intialized? */
- return;
-
- spin_lock(&ps->ps_lock);
- while (!list_empty(&ps->ps_pool_list)) {
- kib_pool_t *po = list_entry(ps->ps_pool_list.next,
- kib_pool_t, po_list);
- po->po_failed = 1;
- list_del(&po->po_list);
- if (po->po_allocated == 0)
- list_add(&po->po_list, zombies);
- else
- list_add(&po->po_list, &ps->ps_failed_pool_list);
- }
- spin_unlock(&ps->ps_lock);
-}
-
-static void kiblnd_fini_poolset(kib_poolset_t *ps)
-{
- if (ps->ps_net != NULL) { /* initialized? */
- kiblnd_destroy_pool_list(&ps->ps_failed_pool_list);
- kiblnd_destroy_pool_list(&ps->ps_pool_list);
- }
-}
-
-static int kiblnd_init_poolset(kib_poolset_t *ps, int cpt,
- kib_net_t *net, char *name, int size,
- kib_ps_pool_create_t po_create,
- kib_ps_pool_destroy_t po_destroy,
- kib_ps_node_init_t nd_init,
- kib_ps_node_fini_t nd_fini)
-{
- kib_pool_t *pool;
- int rc;
-
- memset(ps, 0, sizeof(kib_poolset_t));
-
- ps->ps_cpt = cpt;
- ps->ps_net = net;
- ps->ps_pool_create = po_create;
- ps->ps_pool_destroy = po_destroy;
- ps->ps_node_init = nd_init;
- ps->ps_node_fini = nd_fini;
- ps->ps_pool_size = size;
- if (strlcpy(ps->ps_name, name, sizeof(ps->ps_name))
- >= sizeof(ps->ps_name))
- return -E2BIG;
- spin_lock_init(&ps->ps_lock);
- INIT_LIST_HEAD(&ps->ps_pool_list);
- INIT_LIST_HEAD(&ps->ps_failed_pool_list);
-
- rc = ps->ps_pool_create(ps, size, &pool);
- if (rc == 0)
- list_add(&pool->po_list, &ps->ps_pool_list);
- else
- CERROR("Failed to create the first pool for %s\n", ps->ps_name);
-
- return rc;
-}
-
-static int kiblnd_pool_is_idle(kib_pool_t *pool, unsigned long now)
-{
- if (pool->po_allocated != 0) /* still in use */
- return 0;
- if (pool->po_failed)
- return 1;
- return cfs_time_aftereq(now, pool->po_deadline);
-}
-
-void kiblnd_pool_free_node(kib_pool_t *pool, struct list_head *node)
-{
- LIST_HEAD(zombies);
- kib_poolset_t *ps = pool->po_owner;
- kib_pool_t *tmp;
- unsigned long now = cfs_time_current();
-
- spin_lock(&ps->ps_lock);
-
- if (ps->ps_node_fini != NULL)
- ps->ps_node_fini(pool, node);
-
- LASSERT(pool->po_allocated > 0);
- list_add(node, &pool->po_free_list);
- pool->po_allocated--;
-
- list_for_each_entry_safe(pool, tmp, &ps->ps_pool_list, po_list) {
- /* the first pool is persistent */
- if (ps->ps_pool_list.next == &pool->po_list)
- continue;
-
- if (kiblnd_pool_is_idle(pool, now))
- list_move(&pool->po_list, &zombies);
- }
- spin_unlock(&ps->ps_lock);
-
- if (!list_empty(&zombies))
- kiblnd_destroy_pool_list(&zombies);
-}
-
-struct list_head *kiblnd_pool_alloc_node(kib_poolset_t *ps)
-{
- struct list_head *node;
- kib_pool_t *pool;
- int rc;
-
- again:
- spin_lock(&ps->ps_lock);
- list_for_each_entry(pool, &ps->ps_pool_list, po_list) {
- if (list_empty(&pool->po_free_list))
- continue;
-
- pool->po_allocated++;
- pool->po_deadline = cfs_time_shift(IBLND_POOL_DEADLINE);
- node = pool->po_free_list.next;
- list_del(node);
-
- if (ps->ps_node_init != NULL) {
- /* still hold the lock */
- ps->ps_node_init(pool, node);
- }
- spin_unlock(&ps->ps_lock);
- return node;
- }
-
- /* no available tx pool and ... */
- if (ps->ps_increasing) {
- /* another thread is allocating a new pool */
- spin_unlock(&ps->ps_lock);
- CDEBUG(D_NET, "Another thread is allocating new %s pool, waiting for her to complete\n",
- ps->ps_name);
- schedule();
- goto again;
- }
-
- if (time_before(cfs_time_current(), ps->ps_next_retry)) {
- /* someone failed recently */
- spin_unlock(&ps->ps_lock);
- return NULL;
- }
-
- ps->ps_increasing = 1;
- spin_unlock(&ps->ps_lock);
-
- CDEBUG(D_NET, "%s pool exhausted, allocate new pool\n", ps->ps_name);
-
- rc = ps->ps_pool_create(ps, ps->ps_pool_size, &pool);
-
- spin_lock(&ps->ps_lock);
- ps->ps_increasing = 0;
- if (rc == 0) {
- list_add_tail(&pool->po_list, &ps->ps_pool_list);
- } else {
- ps->ps_next_retry = cfs_time_shift(IBLND_POOL_RETRY);
- CERROR("Can't allocate new %s pool because out of memory\n",
- ps->ps_name);
- }
- spin_unlock(&ps->ps_lock);
-
- goto again;
-}
-
-static void kiblnd_destroy_tx_pool(kib_pool_t *pool)
-{
- kib_tx_pool_t *tpo = container_of(pool, kib_tx_pool_t, tpo_pool);
- int i;
-
- LASSERT(pool->po_allocated == 0);
-
- if (tpo->tpo_tx_pages != NULL) {
- kiblnd_unmap_tx_pool(tpo);
- kiblnd_free_pages(tpo->tpo_tx_pages);
- }
-
- if (tpo->tpo_tx_descs == NULL)
- goto out;
-
- for (i = 0; i < pool->po_size; i++) {
- kib_tx_t *tx = &tpo->tpo_tx_descs[i];
-
- list_del(&tx->tx_list);
- if (tx->tx_pages != NULL)
- LIBCFS_FREE(tx->tx_pages,
- LNET_MAX_IOV *
- sizeof(*tx->tx_pages));
- if (tx->tx_frags != NULL)
- LIBCFS_FREE(tx->tx_frags,
- IBLND_MAX_RDMA_FRAGS *
- sizeof(*tx->tx_frags));
- if (tx->tx_wrq != NULL)
- LIBCFS_FREE(tx->tx_wrq,
- (1 + IBLND_MAX_RDMA_FRAGS) *
- sizeof(*tx->tx_wrq));
- if (tx->tx_sge != NULL)
- LIBCFS_FREE(tx->tx_sge,
- (1 + IBLND_MAX_RDMA_FRAGS) *
- sizeof(*tx->tx_sge));
- if (tx->tx_rd != NULL)
- LIBCFS_FREE(tx->tx_rd,
- offsetof(kib_rdma_desc_t,
- rd_frags[IBLND_MAX_RDMA_FRAGS]));
- }
-
- LIBCFS_FREE(tpo->tpo_tx_descs,
- pool->po_size * sizeof(kib_tx_t));
-out:
- kiblnd_fini_pool(pool);
- LIBCFS_FREE(tpo, sizeof(kib_tx_pool_t));
-}
-
-static int kiblnd_tx_pool_size(int ncpts)
-{
- int ntx = *kiblnd_tunables.kib_ntx / ncpts;
-
- return max(IBLND_TX_POOL, ntx);
-}
-
-static int kiblnd_create_tx_pool(kib_poolset_t *ps, int size,
- kib_pool_t **pp_po)
-{
- int i;
- int npg;
- kib_pool_t *pool;
- kib_tx_pool_t *tpo;
-
- LIBCFS_CPT_ALLOC(tpo, lnet_cpt_table(), ps->ps_cpt, sizeof(*tpo));
- if (tpo == NULL) {
- CERROR("Failed to allocate TX pool\n");
- return -ENOMEM;
- }
-
- pool = &tpo->tpo_pool;
- kiblnd_init_pool(ps, pool, size);
- tpo->tpo_tx_descs = NULL;
- tpo->tpo_tx_pages = NULL;
-
- npg = (size * IBLND_MSG_SIZE + PAGE_SIZE - 1) / PAGE_SIZE;
- if (kiblnd_alloc_pages(&tpo->tpo_tx_pages, ps->ps_cpt, npg) != 0) {
- CERROR("Can't allocate tx pages: %d\n", npg);
- LIBCFS_FREE(tpo, sizeof(kib_tx_pool_t));
- return -ENOMEM;
- }
-
- LIBCFS_CPT_ALLOC(tpo->tpo_tx_descs, lnet_cpt_table(), ps->ps_cpt,
- size * sizeof(kib_tx_t));
- if (tpo->tpo_tx_descs == NULL) {
- CERROR("Can't allocate %d tx descriptors\n", size);
- ps->ps_pool_destroy(pool);
- return -ENOMEM;
- }
-
- memset(tpo->tpo_tx_descs, 0, size * sizeof(kib_tx_t));
-
- for (i = 0; i < size; i++) {
- kib_tx_t *tx = &tpo->tpo_tx_descs[i];
-
- tx->tx_pool = tpo;
- if (ps->ps_net->ibn_fmr_ps != NULL) {
- LIBCFS_CPT_ALLOC(tx->tx_pages,
- lnet_cpt_table(), ps->ps_cpt,
- LNET_MAX_IOV * sizeof(*tx->tx_pages));
- if (tx->tx_pages == NULL)
- break;
- }
-
- LIBCFS_CPT_ALLOC(tx->tx_frags, lnet_cpt_table(), ps->ps_cpt,
- IBLND_MAX_RDMA_FRAGS * sizeof(*tx->tx_frags));
- if (tx->tx_frags == NULL)
- break;
-
- sg_init_table(tx->tx_frags, IBLND_MAX_RDMA_FRAGS);
-
- LIBCFS_CPT_ALLOC(tx->tx_wrq, lnet_cpt_table(), ps->ps_cpt,
- (1 + IBLND_MAX_RDMA_FRAGS) *
- sizeof(*tx->tx_wrq));
- if (tx->tx_wrq == NULL)
- break;
-
- LIBCFS_CPT_ALLOC(tx->tx_sge, lnet_cpt_table(), ps->ps_cpt,
- (1 + IBLND_MAX_RDMA_FRAGS) *
- sizeof(*tx->tx_sge));
- if (tx->tx_sge == NULL)
- break;
-
- LIBCFS_CPT_ALLOC(tx->tx_rd, lnet_cpt_table(), ps->ps_cpt,
- offsetof(kib_rdma_desc_t,
- rd_frags[IBLND_MAX_RDMA_FRAGS]));
- if (tx->tx_rd == NULL)
- break;
- }
-
- if (i == size) {
- kiblnd_map_tx_pool(tpo);
- *pp_po = pool;
- return 0;
- }
-
- ps->ps_pool_destroy(pool);
- return -ENOMEM;
-}
-
-static void kiblnd_tx_init(kib_pool_t *pool, struct list_head *node)
-{
- kib_tx_poolset_t *tps = container_of(pool->po_owner, kib_tx_poolset_t,
- tps_poolset);
- kib_tx_t *tx = list_entry(node, kib_tx_t, tx_list);
-
- tx->tx_cookie = tps->tps_next_tx_cookie++;
-}
-
-static void kiblnd_net_fini_pools(kib_net_t *net)
-{
- int i;
-
- cfs_cpt_for_each(i, lnet_cpt_table()) {
- kib_tx_poolset_t *tps;
- kib_fmr_poolset_t *fps;
-
- if (net->ibn_tx_ps != NULL) {
- tps = net->ibn_tx_ps[i];
- kiblnd_fini_poolset(&tps->tps_poolset);
- }
-
- if (net->ibn_fmr_ps != NULL) {
- fps = net->ibn_fmr_ps[i];
- kiblnd_fini_fmr_poolset(fps);
- }
- }
-
- if (net->ibn_tx_ps != NULL) {
- cfs_percpt_free(net->ibn_tx_ps);
- net->ibn_tx_ps = NULL;
- }
-
- if (net->ibn_fmr_ps != NULL) {
- cfs_percpt_free(net->ibn_fmr_ps);
- net->ibn_fmr_ps = NULL;
- }
-}
-
-static int kiblnd_net_init_pools(kib_net_t *net, __u32 *cpts, int ncpts)
-{
- unsigned long flags;
- int cpt;
- int rc = 0;
- int i;
-
- read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
- if (*kiblnd_tunables.kib_map_on_demand == 0 &&
- net->ibn_dev->ibd_hdev->ibh_nmrs == 1) {
- read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
- goto create_tx_pool;
- }
-
- read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
-
- if (*kiblnd_tunables.kib_fmr_pool_size <
- *kiblnd_tunables.kib_ntx / 4) {
- CERROR("Can't set fmr pool size (%d) < ntx / 4(%d)\n",
- *kiblnd_tunables.kib_fmr_pool_size,
- *kiblnd_tunables.kib_ntx / 4);
- rc = -EINVAL;
- goto failed;
- }
-
- /*
- * TX pool must be created later than FMR, see LU-2268
- * for details
- */
- LASSERT(net->ibn_tx_ps == NULL);
-
- /*
- * premapping can fail if ibd_nmr > 1, so we always create
- * FMR pool and map-on-demand if premapping failed
- */
-
- net->ibn_fmr_ps = cfs_percpt_alloc(lnet_cpt_table(),
- sizeof(kib_fmr_poolset_t));
- if (net->ibn_fmr_ps == NULL) {
- CERROR("Failed to allocate FMR pool array\n");
- rc = -ENOMEM;
- goto failed;
- }
-
- for (i = 0; i < ncpts; i++) {
- cpt = (cpts == NULL) ? i : cpts[i];
- rc = kiblnd_init_fmr_poolset(net->ibn_fmr_ps[cpt], cpt, net,
- kiblnd_fmr_pool_size(ncpts),
- kiblnd_fmr_flush_trigger(ncpts));
- if (rc == -ENOSYS && i == 0) /* no FMR */
- break;
-
- if (rc != 0) { /* a real error */
- CERROR("Can't initialize FMR pool for CPT %d: %d\n",
- cpt, rc);
- goto failed;
- }
- }
-
- if (i > 0) {
- LASSERT(i == ncpts);
- goto create_tx_pool;
- }
-
- cfs_percpt_free(net->ibn_fmr_ps);
- net->ibn_fmr_ps = NULL;
-
- CWARN("Device does not support FMR\n");
- goto failed;
-
- create_tx_pool:
- net->ibn_tx_ps = cfs_percpt_alloc(lnet_cpt_table(),
- sizeof(kib_tx_poolset_t));
- if (net->ibn_tx_ps == NULL) {
- CERROR("Failed to allocate tx pool array\n");
- rc = -ENOMEM;
- goto failed;
- }
-
- for (i = 0; i < ncpts; i++) {
- cpt = (cpts == NULL) ? i : cpts[i];
- rc = kiblnd_init_poolset(&net->ibn_tx_ps[cpt]->tps_poolset,
- cpt, net, "TX",
- kiblnd_tx_pool_size(ncpts),
- kiblnd_create_tx_pool,
- kiblnd_destroy_tx_pool,
- kiblnd_tx_init, NULL);
- if (rc != 0) {
- CERROR("Can't initialize TX pool for CPT %d: %d\n",
- cpt, rc);
- goto failed;
- }
- }
-
- return 0;
- failed:
- kiblnd_net_fini_pools(net);
- LASSERT(rc != 0);
- return rc;
-}
-
-static int kiblnd_hdev_get_attr(kib_hca_dev_t *hdev)
-{
- struct ib_device_attr *attr;
- int rc;
-
- /* It's safe to assume a HCA can handle a page size
- * matching that of the native system */
- hdev->ibh_page_shift = PAGE_SHIFT;
- hdev->ibh_page_size = 1 << PAGE_SHIFT;
- hdev->ibh_page_mask = ~((__u64)hdev->ibh_page_size - 1);
-
- LIBCFS_ALLOC(attr, sizeof(*attr));
- if (attr == NULL) {
- CERROR("Out of memory\n");
- return -ENOMEM;
- }
-
- rc = ib_query_device(hdev->ibh_ibdev, attr);
- if (rc == 0)
- hdev->ibh_mr_size = attr->max_mr_size;
-
- LIBCFS_FREE(attr, sizeof(*attr));
-
- if (rc != 0) {
- CERROR("Failed to query IB device: %d\n", rc);
- return rc;
- }
-
- if (hdev->ibh_mr_size == ~0ULL) {
- hdev->ibh_mr_shift = 64;
- return 0;
- }
-
- for (hdev->ibh_mr_shift = 0;
- hdev->ibh_mr_shift < 64; hdev->ibh_mr_shift++) {
- if (hdev->ibh_mr_size == (1ULL << hdev->ibh_mr_shift) ||
- hdev->ibh_mr_size == (1ULL << hdev->ibh_mr_shift) - 1)
- return 0;
- }
-
- CERROR("Invalid mr size: %#llx\n", hdev->ibh_mr_size);
- return -EINVAL;
-}
-
-static void kiblnd_hdev_cleanup_mrs(kib_hca_dev_t *hdev)
-{
- int i;
-
- if (hdev->ibh_nmrs == 0 || hdev->ibh_mrs == NULL)
- return;
-
- for (i = 0; i < hdev->ibh_nmrs; i++) {
- if (hdev->ibh_mrs[i] == NULL)
- break;
-
- ib_dereg_mr(hdev->ibh_mrs[i]);
- }
-
- LIBCFS_FREE(hdev->ibh_mrs, sizeof(*hdev->ibh_mrs) * hdev->ibh_nmrs);
- hdev->ibh_mrs = NULL;
- hdev->ibh_nmrs = 0;
-}
-
-void kiblnd_hdev_destroy(kib_hca_dev_t *hdev)
-{
- kiblnd_hdev_cleanup_mrs(hdev);
-
- if (hdev->ibh_pd != NULL)
- ib_dealloc_pd(hdev->ibh_pd);
-
- if (hdev->ibh_cmid != NULL)
- rdma_destroy_id(hdev->ibh_cmid);
-
- LIBCFS_FREE(hdev, sizeof(*hdev));
-}
-
-static int kiblnd_hdev_setup_mrs(kib_hca_dev_t *hdev)
-{
- struct ib_mr *mr;
- int rc;
- int acflags = IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_WRITE;
-
- rc = kiblnd_hdev_get_attr(hdev);
- if (rc != 0)
- return rc;
-
- LIBCFS_ALLOC(hdev->ibh_mrs, 1 * sizeof(*hdev->ibh_mrs));
- if (hdev->ibh_mrs == NULL) {
- CERROR("Failed to allocate MRs table\n");
- return -ENOMEM;
- }
-
- hdev->ibh_mrs[0] = NULL;
- hdev->ibh_nmrs = 1;
-
- mr = ib_get_dma_mr(hdev->ibh_pd, acflags);
- if (IS_ERR(mr)) {
- CERROR("Failed ib_get_dma_mr : %ld\n", PTR_ERR(mr));
- kiblnd_hdev_cleanup_mrs(hdev);
- return PTR_ERR(mr);
- }
-
- hdev->ibh_mrs[0] = mr;
-
- return 0;
-}
-
-/* DUMMY */
-static int kiblnd_dummy_callback(struct rdma_cm_id *cmid,
- struct rdma_cm_event *event)
-{
- return 0;
-}
-
-static int kiblnd_dev_need_failover(kib_dev_t *dev)
-{
- struct rdma_cm_id *cmid;
- struct sockaddr_in srcaddr;
- struct sockaddr_in dstaddr;
- int rc;
-
- if (dev->ibd_hdev == NULL || /* initializing */
- dev->ibd_hdev->ibh_cmid == NULL || /* listener is dead */
- *kiblnd_tunables.kib_dev_failover > 1) /* debugging */
- return 1;
-
- /* XXX: it's UGLY, but I don't have better way to find
- * ib-bonding HCA failover because:
- *
- * a. no reliable CM event for HCA failover...
- * b. no OFED API to get ib_device for current net_device...
- *
- * We have only two choices at this point:
- *
- * a. rdma_bind_addr(), it will conflict with listener cmid
- * b. rdma_resolve_addr() to zero addr */
- cmid = kiblnd_rdma_create_id(kiblnd_dummy_callback, dev, RDMA_PS_TCP,
- IB_QPT_RC);
- if (IS_ERR(cmid)) {
- rc = PTR_ERR(cmid);
- CERROR("Failed to create cmid for failover: %d\n", rc);
- return rc;
- }
-
- memset(&srcaddr, 0, sizeof(srcaddr));
- srcaddr.sin_family = AF_INET;
- srcaddr.sin_addr.s_addr = (__force u32)htonl(dev->ibd_ifip);
-
- memset(&dstaddr, 0, sizeof(dstaddr));
- dstaddr.sin_family = AF_INET;
- rc = rdma_resolve_addr(cmid, (struct sockaddr *)&srcaddr,
- (struct sockaddr *)&dstaddr, 1);
- if (rc != 0 || cmid->device == NULL) {
- CERROR("Failed to bind %s:%pI4h to device(%p): %d\n",
- dev->ibd_ifname, &dev->ibd_ifip,
- cmid->device, rc);
- rdma_destroy_id(cmid);
- return rc;
- }
-
- rc = dev->ibd_hdev->ibh_ibdev != cmid->device; /* true for failover */
- rdma_destroy_id(cmid);
-
- return rc;
-}
-
-int kiblnd_dev_failover(kib_dev_t *dev)
-{
- LIST_HEAD(zombie_tpo);
- LIST_HEAD(zombie_ppo);
- LIST_HEAD(zombie_fpo);
- struct rdma_cm_id *cmid = NULL;
- kib_hca_dev_t *hdev = NULL;
- struct ib_pd *pd;
- kib_net_t *net;
- struct sockaddr_in addr;
- unsigned long flags;
- int rc = 0;
- int i;
-
- LASSERT(*kiblnd_tunables.kib_dev_failover > 1 ||
- dev->ibd_can_failover ||
- dev->ibd_hdev == NULL);
-
- rc = kiblnd_dev_need_failover(dev);
- if (rc <= 0)
- goto out;
-
- if (dev->ibd_hdev != NULL &&
- dev->ibd_hdev->ibh_cmid != NULL) {
- /* XXX it's not good to close old listener at here,
- * because we can fail to create new listener.
- * But we have to close it now, otherwise rdma_bind_addr
- * will return EADDRINUSE... How crap! */
- write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
-
- cmid = dev->ibd_hdev->ibh_cmid;
- /* make next schedule of kiblnd_dev_need_failover()
- * return 1 for me */
- dev->ibd_hdev->ibh_cmid = NULL;
- write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
-
- rdma_destroy_id(cmid);
- }
-
- cmid = kiblnd_rdma_create_id(kiblnd_cm_callback, dev, RDMA_PS_TCP,
- IB_QPT_RC);
- if (IS_ERR(cmid)) {
- rc = PTR_ERR(cmid);
- CERROR("Failed to create cmid for failover: %d\n", rc);
- goto out;
- }
-
- memset(&addr, 0, sizeof(addr));
- addr.sin_family = AF_INET;
- addr.sin_addr.s_addr = (__force u32)htonl(dev->ibd_ifip);
- addr.sin_port = htons(*kiblnd_tunables.kib_service);
-
- /* Bind to failover device or port */
- rc = rdma_bind_addr(cmid, (struct sockaddr *)&addr);
- if (rc != 0 || cmid->device == NULL) {
- CERROR("Failed to bind %s:%pI4h to device(%p): %d\n",
- dev->ibd_ifname, &dev->ibd_ifip,
- cmid->device, rc);
- rdma_destroy_id(cmid);
- goto out;
- }
-
- LIBCFS_ALLOC(hdev, sizeof(*hdev));
- if (hdev == NULL) {
- CERROR("Failed to allocate kib_hca_dev\n");
- rdma_destroy_id(cmid);
- rc = -ENOMEM;
- goto out;
- }
-
- atomic_set(&hdev->ibh_ref, 1);
- hdev->ibh_dev = dev;
- hdev->ibh_cmid = cmid;
- hdev->ibh_ibdev = cmid->device;
-
- pd = ib_alloc_pd(cmid->device);
- if (IS_ERR(pd)) {
- rc = PTR_ERR(pd);
- CERROR("Can't allocate PD: %d\n", rc);
- goto out;
- }
-
- hdev->ibh_pd = pd;
-
- rc = rdma_listen(cmid, 0);
- if (rc != 0) {
- CERROR("Can't start new listener: %d\n", rc);
- goto out;
- }
-
- rc = kiblnd_hdev_setup_mrs(hdev);
- if (rc != 0) {
- CERROR("Can't setup device: %d\n", rc);
- goto out;
- }
-
- write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
-
- swap(dev->ibd_hdev, hdev); /* take over the refcount */
-
- list_for_each_entry(net, &dev->ibd_nets, ibn_list) {
- cfs_cpt_for_each(i, lnet_cpt_table()) {
- kiblnd_fail_poolset(&net->ibn_tx_ps[i]->tps_poolset,
- &zombie_tpo);
-
- if (net->ibn_fmr_ps)
- kiblnd_fail_fmr_poolset(net->ibn_fmr_ps[i],
- &zombie_fpo);
- }
- }
-
- write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
- out:
- if (!list_empty(&zombie_tpo))
- kiblnd_destroy_pool_list(&zombie_tpo);
- if (!list_empty(&zombie_ppo))
- kiblnd_destroy_pool_list(&zombie_ppo);
- if (!list_empty(&zombie_fpo))
- kiblnd_destroy_fmr_pool_list(&zombie_fpo);
- if (hdev != NULL)
- kiblnd_hdev_decref(hdev);
-
- if (rc != 0)
- dev->ibd_failed_failover++;
- else
- dev->ibd_failed_failover = 0;
-
- return rc;
-}
-
-void kiblnd_destroy_dev(kib_dev_t *dev)
-{
- LASSERT(dev->ibd_nnets == 0);
- LASSERT(list_empty(&dev->ibd_nets));
-
- list_del(&dev->ibd_fail_list);
- list_del(&dev->ibd_list);
-
- if (dev->ibd_hdev != NULL)
- kiblnd_hdev_decref(dev->ibd_hdev);
-
- LIBCFS_FREE(dev, sizeof(*dev));
-}
-
-static kib_dev_t *kiblnd_create_dev(char *ifname)
-{
- struct net_device *netdev;
- kib_dev_t *dev;
- __u32 netmask;
- __u32 ip;
- int up;
- int rc;
-
- rc = lnet_ipif_query(ifname, &up, &ip, &netmask);
- if (rc != 0) {
- CERROR("Can't query IPoIB interface %s: %d\n",
- ifname, rc);
- return NULL;
- }
-
- if (!up) {
- CERROR("Can't query IPoIB interface %s: it's down\n", ifname);
- return NULL;
- }
-
- LIBCFS_ALLOC(dev, sizeof(*dev));
- if (dev == NULL)
- return NULL;
-
- netdev = dev_get_by_name(&init_net, ifname);
- if (netdev == NULL) {
- dev->ibd_can_failover = 0;
- } else {
- dev->ibd_can_failover = !!(netdev->flags & IFF_MASTER);
- dev_put(netdev);
- }
-
- INIT_LIST_HEAD(&dev->ibd_nets);
- INIT_LIST_HEAD(&dev->ibd_list); /* not yet in kib_devs */
- INIT_LIST_HEAD(&dev->ibd_fail_list);
- dev->ibd_ifip = ip;
- strcpy(&dev->ibd_ifname[0], ifname);
-
- /* initialize the device */
- rc = kiblnd_dev_failover(dev);
- if (rc != 0) {
- CERROR("Can't initialize device: %d\n", rc);
- LIBCFS_FREE(dev, sizeof(*dev));
- return NULL;
- }
-
- list_add_tail(&dev->ibd_list,
- &kiblnd_data.kib_devs);
- return dev;
-}
-
-static void kiblnd_base_shutdown(void)
-{
- struct kib_sched_info *sched;
- int i;
-
- LASSERT(list_empty(&kiblnd_data.kib_devs));
-
- switch (kiblnd_data.kib_init) {
- default:
- LBUG();
-
- case IBLND_INIT_ALL:
- case IBLND_INIT_DATA:
- LASSERT(kiblnd_data.kib_peers != NULL);
- for (i = 0; i < kiblnd_data.kib_peer_hash_size; i++)
- LASSERT(list_empty(&kiblnd_data.kib_peers[i]));
- LASSERT(list_empty(&kiblnd_data.kib_connd_zombies));
- LASSERT(list_empty(&kiblnd_data.kib_connd_conns));
-
- /* flag threads to terminate; wake and wait for them to die */
- kiblnd_data.kib_shutdown = 1;
-
- /* NB: we really want to stop scheduler threads net by net
- * instead of the whole module, this should be improved
- * with dynamic configuration LNet */
- cfs_percpt_for_each(sched, i, kiblnd_data.kib_scheds)
- wake_up_all(&sched->ibs_waitq);
-
- wake_up_all(&kiblnd_data.kib_connd_waitq);
- wake_up_all(&kiblnd_data.kib_failover_waitq);
-
- i = 2;
- while (atomic_read(&kiblnd_data.kib_nthreads) != 0) {
- i++;
- /* power of 2 ? */
- CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET,
- "Waiting for %d threads to terminate\n",
- atomic_read(&kiblnd_data.kib_nthreads));
- set_current_state(TASK_UNINTERRUPTIBLE);
- schedule_timeout(cfs_time_seconds(1));
- }
-
- /* fall through */
-
- case IBLND_INIT_NOTHING:
- break;
- }
-
- if (kiblnd_data.kib_peers != NULL) {
- LIBCFS_FREE(kiblnd_data.kib_peers,
- sizeof(struct list_head) *
- kiblnd_data.kib_peer_hash_size);
- }
-
- if (kiblnd_data.kib_scheds != NULL)
- cfs_percpt_free(kiblnd_data.kib_scheds);
-
- kiblnd_data.kib_init = IBLND_INIT_NOTHING;
- module_put(THIS_MODULE);
-}
-
-void kiblnd_shutdown(lnet_ni_t *ni)
-{
- kib_net_t *net = ni->ni_data;
- rwlock_t *g_lock = &kiblnd_data.kib_global_lock;
- int i;
- unsigned long flags;
-
- LASSERT(kiblnd_data.kib_init == IBLND_INIT_ALL);
-
- if (net == NULL)
- goto out;
-
- write_lock_irqsave(g_lock, flags);
- net->ibn_shutdown = 1;
- write_unlock_irqrestore(g_lock, flags);
-
- switch (net->ibn_init) {
- default:
- LBUG();
-
- case IBLND_INIT_ALL:
- /* nuke all existing peers within this net */
- kiblnd_del_peer(ni, LNET_NID_ANY);
-
- /* Wait for all peer state to clean up */
- i = 2;
- while (atomic_read(&net->ibn_npeers) != 0) {
- i++;
- CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, /* 2**n? */
- "%s: waiting for %d peers to disconnect\n",
- libcfs_nid2str(ni->ni_nid),
- atomic_read(&net->ibn_npeers));
- set_current_state(TASK_UNINTERRUPTIBLE);
- schedule_timeout(cfs_time_seconds(1));
- }
-
- kiblnd_net_fini_pools(net);
-
- write_lock_irqsave(g_lock, flags);
- LASSERT(net->ibn_dev->ibd_nnets > 0);
- net->ibn_dev->ibd_nnets--;
- list_del(&net->ibn_list);
- write_unlock_irqrestore(g_lock, flags);
-
- /* fall through */
-
- case IBLND_INIT_NOTHING:
- LASSERT(atomic_read(&net->ibn_nconns) == 0);
-
- if (net->ibn_dev != NULL &&
- net->ibn_dev->ibd_nnets == 0)
- kiblnd_destroy_dev(net->ibn_dev);
-
- break;
- }
-
- net->ibn_init = IBLND_INIT_NOTHING;
- ni->ni_data = NULL;
-
- LIBCFS_FREE(net, sizeof(*net));
-
-out:
- if (list_empty(&kiblnd_data.kib_devs))
- kiblnd_base_shutdown();
-}
-
-static int kiblnd_base_startup(void)
-{
- struct kib_sched_info *sched;
- int rc;
- int i;
-
- LASSERT(kiblnd_data.kib_init == IBLND_INIT_NOTHING);
-
- try_module_get(THIS_MODULE);
- /* zero pointers, flags etc */
- memset(&kiblnd_data, 0, sizeof(kiblnd_data));
-
- rwlock_init(&kiblnd_data.kib_global_lock);
-
- INIT_LIST_HEAD(&kiblnd_data.kib_devs);
- INIT_LIST_HEAD(&kiblnd_data.kib_failed_devs);
-
- kiblnd_data.kib_peer_hash_size = IBLND_PEER_HASH_SIZE;
- LIBCFS_ALLOC(kiblnd_data.kib_peers,
- sizeof(struct list_head) * kiblnd_data.kib_peer_hash_size);
- if (kiblnd_data.kib_peers == NULL)
- goto failed;
- for (i = 0; i < kiblnd_data.kib_peer_hash_size; i++)
- INIT_LIST_HEAD(&kiblnd_data.kib_peers[i]);
-
- spin_lock_init(&kiblnd_data.kib_connd_lock);
- INIT_LIST_HEAD(&kiblnd_data.kib_connd_conns);
- INIT_LIST_HEAD(&kiblnd_data.kib_connd_zombies);
- init_waitqueue_head(&kiblnd_data.kib_connd_waitq);
- init_waitqueue_head(&kiblnd_data.kib_failover_waitq);
-
- kiblnd_data.kib_scheds = cfs_percpt_alloc(lnet_cpt_table(),
- sizeof(*sched));
- if (kiblnd_data.kib_scheds == NULL)
- goto failed;
-
- cfs_percpt_for_each(sched, i, kiblnd_data.kib_scheds) {
- int nthrs;
-
- spin_lock_init(&sched->ibs_lock);
- INIT_LIST_HEAD(&sched->ibs_conns);
- init_waitqueue_head(&sched->ibs_waitq);
-
- nthrs = cfs_cpt_weight(lnet_cpt_table(), i);
- if (*kiblnd_tunables.kib_nscheds > 0) {
- nthrs = min(nthrs, *kiblnd_tunables.kib_nscheds);
- } else {
- /* max to half of CPUs, another half is reserved for
- * upper layer modules */
- nthrs = min(max(IBLND_N_SCHED, nthrs >> 1), nthrs);
- }
-
- sched->ibs_nthreads_max = nthrs;
- sched->ibs_cpt = i;
- }
-
- kiblnd_data.kib_error_qpa.qp_state = IB_QPS_ERR;
-
- /* lists/ptrs/locks initialised */
- kiblnd_data.kib_init = IBLND_INIT_DATA;
- /*****************************************************/
-
- rc = kiblnd_thread_start(kiblnd_connd, NULL, "kiblnd_connd");
- if (rc != 0) {
- CERROR("Can't spawn o2iblnd connd: %d\n", rc);
- goto failed;
- }
-
- if (*kiblnd_tunables.kib_dev_failover != 0)
- rc = kiblnd_thread_start(kiblnd_failover_thread, NULL,
- "kiblnd_failover");
-
- if (rc != 0) {
- CERROR("Can't spawn o2iblnd failover thread: %d\n", rc);
- goto failed;
- }
-
- /* flag everything initialised */
- kiblnd_data.kib_init = IBLND_INIT_ALL;
- /*****************************************************/
-
- return 0;
-
- failed:
- kiblnd_base_shutdown();
- return -ENETDOWN;
-}
-
-static int kiblnd_start_schedulers(struct kib_sched_info *sched)
-{
- int rc = 0;
- int nthrs;
- int i;
-
- if (sched->ibs_nthreads == 0) {
- if (*kiblnd_tunables.kib_nscheds > 0) {
- nthrs = sched->ibs_nthreads_max;
- } else {
- nthrs = cfs_cpt_weight(lnet_cpt_table(),
- sched->ibs_cpt);
- nthrs = min(max(IBLND_N_SCHED, nthrs >> 1), nthrs);
- nthrs = min(IBLND_N_SCHED_HIGH, nthrs);
- }
- } else {
- LASSERT(sched->ibs_nthreads <= sched->ibs_nthreads_max);
- /* increase one thread if there is new interface */
- nthrs = sched->ibs_nthreads < sched->ibs_nthreads_max;
- }
-
- for (i = 0; i < nthrs; i++) {
- long id;
- char name[20];
-
- id = KIB_THREAD_ID(sched->ibs_cpt, sched->ibs_nthreads + i);
- snprintf(name, sizeof(name), "kiblnd_sd_%02ld_%02ld",
- KIB_THREAD_CPT(id), KIB_THREAD_TID(id));
- rc = kiblnd_thread_start(kiblnd_scheduler, (void *)id, name);
- if (rc == 0)
- continue;
-
- CERROR("Can't spawn thread %d for scheduler[%d]: %d\n",
- sched->ibs_cpt, sched->ibs_nthreads + i, rc);
- break;
- }
-
- sched->ibs_nthreads += i;
- return rc;
-}
-
-static int kiblnd_dev_start_threads(kib_dev_t *dev, int newdev, __u32 *cpts,
- int ncpts)
-{
- int cpt;
- int rc;
- int i;
-
- for (i = 0; i < ncpts; i++) {
- struct kib_sched_info *sched;
-
- cpt = (cpts == NULL) ? i : cpts[i];
- sched = kiblnd_data.kib_scheds[cpt];
-
- if (!newdev && sched->ibs_nthreads > 0)
- continue;
-
- rc = kiblnd_start_schedulers(kiblnd_data.kib_scheds[cpt]);
- if (rc != 0) {
- CERROR("Failed to start scheduler threads for %s\n",
- dev->ibd_ifname);
- return rc;
- }
- }
- return 0;
-}
-
-static kib_dev_t *kiblnd_dev_search(char *ifname)
-{
- kib_dev_t *alias = NULL;
- kib_dev_t *dev;
- char *colon;
- char *colon2;
-
- colon = strchr(ifname, ':');
- list_for_each_entry(dev, &kiblnd_data.kib_devs, ibd_list) {
- if (strcmp(&dev->ibd_ifname[0], ifname) == 0)
- return dev;
-
- if (alias != NULL)
- continue;
-
- colon2 = strchr(dev->ibd_ifname, ':');
- if (colon != NULL)
- *colon = 0;
- if (colon2 != NULL)
- *colon2 = 0;
-
- if (strcmp(&dev->ibd_ifname[0], ifname) == 0)
- alias = dev;
-
- if (colon != NULL)
- *colon = ':';
- if (colon2 != NULL)
- *colon2 = ':';
- }
- return alias;
-}
-
-int kiblnd_startup(lnet_ni_t *ni)
-{
- char *ifname;
- kib_dev_t *ibdev = NULL;
- kib_net_t *net;
- struct timespec64 tv;
- unsigned long flags;
- int rc;
- int newdev;
-
- LASSERT(ni->ni_lnd == &the_o2iblnd);
-
- if (kiblnd_data.kib_init == IBLND_INIT_NOTHING) {
- rc = kiblnd_base_startup();
- if (rc != 0)
- return rc;
- }
-
- LIBCFS_ALLOC(net, sizeof(*net));
- ni->ni_data = net;
- if (net == NULL)
- goto net_failed;
-
- ktime_get_real_ts64(&tv);
- net->ibn_incarnation = tv.tv_sec * USEC_PER_SEC +
- tv.tv_nsec / NSEC_PER_USEC;
-
- ni->ni_peertimeout = *kiblnd_tunables.kib_peertimeout;
- ni->ni_maxtxcredits = *kiblnd_tunables.kib_credits;
- ni->ni_peertxcredits = *kiblnd_tunables.kib_peertxcredits;
- ni->ni_peerrtrcredits = *kiblnd_tunables.kib_peerrtrcredits;
-
- if (ni->ni_interfaces[0] != NULL) {
- /* Use the IPoIB interface specified in 'networks=' */
-
- CLASSERT(LNET_MAX_INTERFACES > 1);
- if (ni->ni_interfaces[1] != NULL) {
- CERROR("Multiple interfaces not supported\n");
- goto failed;
- }
-
- ifname = ni->ni_interfaces[0];
- } else {
- ifname = *kiblnd_tunables.kib_default_ipif;
- }
-
- if (strlen(ifname) >= sizeof(ibdev->ibd_ifname)) {
- CERROR("IPoIB interface name too long: %s\n", ifname);
- goto failed;
- }
-
- ibdev = kiblnd_dev_search(ifname);
-
- newdev = ibdev == NULL;
- /* hmm...create kib_dev even for alias */
- if (ibdev == NULL || strcmp(&ibdev->ibd_ifname[0], ifname) != 0)
- ibdev = kiblnd_create_dev(ifname);
-
- if (ibdev == NULL)
- goto failed;
-
- net->ibn_dev = ibdev;
- ni->ni_nid = LNET_MKNID(LNET_NIDNET(ni->ni_nid), ibdev->ibd_ifip);
-
- rc = kiblnd_dev_start_threads(ibdev, newdev,
- ni->ni_cpts, ni->ni_ncpts);
- if (rc != 0)
- goto failed;
-
- rc = kiblnd_net_init_pools(net, ni->ni_cpts, ni->ni_ncpts);
- if (rc != 0) {
- CERROR("Failed to initialize NI pools: %d\n", rc);
- goto failed;
- }
-
- write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
- ibdev->ibd_nnets++;
- list_add_tail(&net->ibn_list, &ibdev->ibd_nets);
- write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
-
- net->ibn_init = IBLND_INIT_ALL;
-
- return 0;
-
-failed:
- if (net->ibn_dev == NULL && ibdev != NULL)
- kiblnd_destroy_dev(ibdev);
-
-net_failed:
- kiblnd_shutdown(ni);
-
- CDEBUG(D_NET, "kiblnd_startup failed\n");
- return -ENETDOWN;
-}
-
-static void __exit kiblnd_module_fini(void)
-{
- lnet_unregister_lnd(&the_o2iblnd);
-}
-
-static int __init kiblnd_module_init(void)
-{
- int rc;
-
- CLASSERT(sizeof(kib_msg_t) <= IBLND_MSG_SIZE);
- CLASSERT(offsetof(kib_msg_t,
- ibm_u.get.ibgm_rd.rd_frags[IBLND_MAX_RDMA_FRAGS])
- <= IBLND_MSG_SIZE);
- CLASSERT(offsetof(kib_msg_t,
- ibm_u.putack.ibpam_rd.rd_frags[IBLND_MAX_RDMA_FRAGS])
- <= IBLND_MSG_SIZE);
-
- rc = kiblnd_tunables_init();
- if (rc != 0)
- return rc;
-
- lnet_register_lnd(&the_o2iblnd);
-
- return 0;
-}
-
-MODULE_AUTHOR("Sun Microsystems, Inc. <http://www.lustre.org/>");
-MODULE_DESCRIPTION("Kernel OpenIB gen2 LND v2.00");
-MODULE_LICENSE("GPL");
-
-module_init(kiblnd_module_init);
-module_exit(kiblnd_module_fini);
diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h
deleted file mode 100644
index eb08400cc7d6..000000000000
--- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h
+++ /dev/null
@@ -1,984 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * lnet/klnds/o2iblnd/o2iblnd.h
- *
- * Author: Eric Barton <eric@bartonsoftware.com>
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/mm.h>
-#include <linux/string.h>
-#include <linux/stat.h>
-#include <linux/errno.h>
-#include <linux/unistd.h>
-#include <linux/uio.h>
-#include <linux/uaccess.h>
-
-#include <linux/io.h>
-
-#include <linux/fs.h>
-#include <linux/file.h>
-#include <linux/list.h>
-#include <linux/kmod.h>
-#include <linux/sysctl.h>
-#include <linux/pci.h>
-
-#include <net/sock.h>
-#include <linux/in.h>
-
-#define DEBUG_SUBSYSTEM S_LND
-
-#include "../../../include/linux/libcfs/libcfs.h"
-#include "../../../include/linux/lnet/lnet.h"
-#include "../../../include/linux/lnet/lib-lnet.h"
-
-#include <rdma/rdma_cm.h>
-#include <rdma/ib_cm.h>
-#include <rdma/ib_verbs.h>
-#include <rdma/ib_fmr_pool.h>
-
-#define IBLND_PEER_HASH_SIZE 101 /* # peer lists */
-/* # scheduler loops before reschedule */
-#define IBLND_RESCHED 100
-
-#define IBLND_N_SCHED 2
-#define IBLND_N_SCHED_HIGH 4
-
-typedef struct {
- int *kib_dev_failover; /* HCA failover */
- unsigned int *kib_service; /* IB service number */
- int *kib_min_reconnect_interval; /* first failed connection retry... */
- int *kib_max_reconnect_interval; /* exponentially increasing to this */
- int *kib_cksum; /* checksum kib_msg_t? */
- int *kib_timeout; /* comms timeout (seconds) */
- int *kib_keepalive; /* keepalive timeout (seconds) */
- int *kib_ntx; /* # tx descs */
- int *kib_credits; /* # concurrent sends */
- int *kib_peertxcredits; /* # concurrent sends to 1 peer */
- int *kib_peerrtrcredits; /* # per-peer router buffer credits */
- int *kib_peercredits_hiw; /* # when eagerly to return credits */
- int *kib_peertimeout; /* seconds to consider peer dead */
- char **kib_default_ipif; /* default IPoIB interface */
- int *kib_retry_count;
- int *kib_rnr_retry_count;
- int *kib_concurrent_sends; /* send work queue sizing */
- int *kib_ib_mtu; /* IB MTU */
- int *kib_map_on_demand; /* map-on-demand if RD has more */
- /* fragments than this value, 0 */
- /* disable map-on-demand */
- int *kib_fmr_pool_size; /* # FMRs in pool */
- int *kib_fmr_flush_trigger; /* When to trigger FMR flush */
- int *kib_fmr_cache; /* enable FMR pool cache? */
- int *kib_require_priv_port; /* accept only privileged ports */
- int *kib_use_priv_port; /* use privileged port for active connect */
- int *kib_nscheds; /* # threads on each CPT */
-} kib_tunables_t;
-
-extern kib_tunables_t kiblnd_tunables;
-
-#define IBLND_MSG_QUEUE_SIZE_V1 8 /* V1 only : # messages/RDMAs in-flight */
-#define IBLND_CREDIT_HIGHWATER_V1 7 /* V1 only : when eagerly to return credits */
-
-#define IBLND_CREDITS_DEFAULT 8 /* default # of peer credits */
-#define IBLND_CREDITS_MAX ((typeof(((kib_msg_t *) 0)->ibm_credits)) - 1) /* Max # of peer credits */
-
-#define IBLND_MSG_QUEUE_SIZE(v) ((v) == IBLND_MSG_VERSION_1 ? \
- IBLND_MSG_QUEUE_SIZE_V1 : \
- *kiblnd_tunables.kib_peertxcredits) /* # messages/RDMAs in-flight */
-#define IBLND_CREDITS_HIGHWATER(v) ((v) == IBLND_MSG_VERSION_1 ? \
- IBLND_CREDIT_HIGHWATER_V1 : \
- *kiblnd_tunables.kib_peercredits_hiw) /* when eagerly to return credits */
-
-#define kiblnd_rdma_create_id(cb, dev, ps, qpt) rdma_create_id(cb, dev, ps, qpt)
-
-static inline int
-kiblnd_concurrent_sends_v1(void)
-{
- if (*kiblnd_tunables.kib_concurrent_sends > IBLND_MSG_QUEUE_SIZE_V1 * 2)
- return IBLND_MSG_QUEUE_SIZE_V1 * 2;
-
- if (*kiblnd_tunables.kib_concurrent_sends < IBLND_MSG_QUEUE_SIZE_V1 / 2)
- return IBLND_MSG_QUEUE_SIZE_V1 / 2;
-
- return *kiblnd_tunables.kib_concurrent_sends;
-}
-
-#define IBLND_CONCURRENT_SENDS(v) ((v) == IBLND_MSG_VERSION_1 ? \
- kiblnd_concurrent_sends_v1() : \
- *kiblnd_tunables.kib_concurrent_sends)
-/* 2 OOB shall suffice for 1 keepalive and 1 returning credits */
-#define IBLND_OOB_CAPABLE(v) ((v) != IBLND_MSG_VERSION_1)
-#define IBLND_OOB_MSGS(v) (IBLND_OOB_CAPABLE(v) ? 2 : 0)
-
-#define IBLND_MSG_SIZE (4<<10) /* max size of queued messages (inc hdr) */
-#define IBLND_MAX_RDMA_FRAGS LNET_MAX_IOV /* max # of fragments supported */
-#define IBLND_CFG_RDMA_FRAGS (*kiblnd_tunables.kib_map_on_demand != 0 ? \
- *kiblnd_tunables.kib_map_on_demand : \
- IBLND_MAX_RDMA_FRAGS) /* max # of fragments configured by user */
-#define IBLND_RDMA_FRAGS(v) ((v) == IBLND_MSG_VERSION_1 ? \
- IBLND_MAX_RDMA_FRAGS : IBLND_CFG_RDMA_FRAGS)
-
-/************************/
-/* derived constants... */
-/* Pools (shared by connections on each CPT) */
-/* These pools can grow at runtime, so don't need give a very large value */
-#define IBLND_TX_POOL 256
-#define IBLND_FMR_POOL 256
-#define IBLND_FMR_POOL_FLUSH 192
-
-/* TX messages (shared by all connections) */
-#define IBLND_TX_MSGS() (*kiblnd_tunables.kib_ntx)
-
-/* RX messages (per connection) */
-#define IBLND_RX_MSGS(v) (IBLND_MSG_QUEUE_SIZE(v) * 2 + IBLND_OOB_MSGS(v))
-#define IBLND_RX_MSG_BYTES(v) (IBLND_RX_MSGS(v) * IBLND_MSG_SIZE)
-#define IBLND_RX_MSG_PAGES(v) ((IBLND_RX_MSG_BYTES(v) + PAGE_SIZE - 1) / PAGE_SIZE)
-
-/* WRs and CQEs (per connection) */
-#define IBLND_RECV_WRS(v) IBLND_RX_MSGS(v)
-#define IBLND_SEND_WRS(v) ((IBLND_RDMA_FRAGS(v) + 1) * IBLND_CONCURRENT_SENDS(v))
-#define IBLND_CQ_ENTRIES(v) (IBLND_RECV_WRS(v) + IBLND_SEND_WRS(v))
-
-struct kib_hca_dev;
-
-/* o2iblnd can run over aliased interface */
-#ifdef IFALIASZ
-#define KIB_IFNAME_SIZE IFALIASZ
-#else
-#define KIB_IFNAME_SIZE 256
-#endif
-
-typedef struct {
- struct list_head ibd_list; /* chain on kib_devs */
- struct list_head ibd_fail_list; /* chain on kib_failed_devs */
- __u32 ibd_ifip; /* IPoIB interface IP */
-
- /* IPoIB interface name */
- char ibd_ifname[KIB_IFNAME_SIZE];
- int ibd_nnets; /* # nets extant */
-
- unsigned long ibd_next_failover;
- int ibd_failed_failover; /* # failover failures */
- unsigned int ibd_failover; /* failover in progress */
- unsigned int ibd_can_failover; /* IPoIB interface is a bonding master */
- struct list_head ibd_nets;
- struct kib_hca_dev *ibd_hdev;
-} kib_dev_t;
-
-typedef struct kib_hca_dev {
- struct rdma_cm_id *ibh_cmid; /* listener cmid */
- struct ib_device *ibh_ibdev; /* IB device */
- int ibh_page_shift; /* page shift of current HCA */
- int ibh_page_size; /* page size of current HCA */
- __u64 ibh_page_mask; /* page mask of current HCA */
- int ibh_mr_shift; /* bits shift of max MR size */
- __u64 ibh_mr_size; /* size of MR */
- int ibh_nmrs; /* # of global MRs */
- struct ib_mr **ibh_mrs; /* global MR */
- struct ib_pd *ibh_pd; /* PD */
- kib_dev_t *ibh_dev; /* owner */
- atomic_t ibh_ref; /* refcount */
-} kib_hca_dev_t;
-
-/** # of seconds to keep pool alive */
-#define IBLND_POOL_DEADLINE 300
-/** # of seconds to retry if allocation failed */
-#define IBLND_POOL_RETRY 1
-
-typedef struct {
- int ibp_npages; /* # pages */
- struct page *ibp_pages[0]; /* page array */
-} kib_pages_t;
-
-struct kib_pool;
-struct kib_poolset;
-
-typedef int (*kib_ps_pool_create_t)(struct kib_poolset *ps,
- int inc, struct kib_pool **pp_po);
-typedef void (*kib_ps_pool_destroy_t)(struct kib_pool *po);
-typedef void (*kib_ps_node_init_t)(struct kib_pool *po, struct list_head *node);
-typedef void (*kib_ps_node_fini_t)(struct kib_pool *po, struct list_head *node);
-
-struct kib_net;
-
-#define IBLND_POOL_NAME_LEN 32
-
-typedef struct kib_poolset {
- spinlock_t ps_lock; /* serialize */
- struct kib_net *ps_net; /* network it belongs to */
- char ps_name[IBLND_POOL_NAME_LEN]; /* pool set name */
- struct list_head ps_pool_list; /* list of pools */
- struct list_head ps_failed_pool_list;/* failed pool list */
- unsigned long ps_next_retry; /* time stamp for retry if */
- /* failed to allocate */
- int ps_increasing; /* is allocating new pool */
- int ps_pool_size; /* new pool size */
- int ps_cpt; /* CPT id */
-
- kib_ps_pool_create_t ps_pool_create; /* create a new pool */
- kib_ps_pool_destroy_t ps_pool_destroy; /* destroy a pool */
- kib_ps_node_init_t ps_node_init; /* initialize new allocated node */
- kib_ps_node_fini_t ps_node_fini; /* finalize node */
-} kib_poolset_t;
-
-typedef struct kib_pool {
- struct list_head po_list; /* chain on pool list */
- struct list_head po_free_list; /* pre-allocated node */
- kib_poolset_t *po_owner; /* pool_set of this pool */
- unsigned long po_deadline; /* deadline of this pool */
- int po_allocated; /* # of elements in use */
- int po_failed; /* pool is created on failed HCA */
- int po_size; /* # of pre-allocated elements */
-} kib_pool_t;
-
-typedef struct {
- kib_poolset_t tps_poolset; /* pool-set */
- __u64 tps_next_tx_cookie; /* cookie of TX */
-} kib_tx_poolset_t;
-
-typedef struct {
- kib_pool_t tpo_pool; /* pool */
- struct kib_hca_dev *tpo_hdev; /* device for this pool */
- struct kib_tx *tpo_tx_descs; /* all the tx descriptors */
- kib_pages_t *tpo_tx_pages; /* premapped tx msg pages */
-} kib_tx_pool_t;
-
-typedef struct {
- spinlock_t fps_lock; /* serialize */
- struct kib_net *fps_net; /* IB network */
- struct list_head fps_pool_list; /* FMR pool list */
- struct list_head fps_failed_pool_list;/* FMR pool list */
- __u64 fps_version; /* validity stamp */
- int fps_cpt; /* CPT id */
- int fps_pool_size;
- int fps_flush_trigger;
- int fps_increasing; /* is allocating new pool */
- unsigned long fps_next_retry; /* time stamp for retry if*/
- /* failed to allocate */
-} kib_fmr_poolset_t;
-
-typedef struct {
- struct list_head fpo_list; /* chain on pool list */
- struct kib_hca_dev *fpo_hdev; /* device for this pool */
- kib_fmr_poolset_t *fpo_owner; /* owner of this pool */
- struct ib_fmr_pool *fpo_fmr_pool; /* IB FMR pool */
- unsigned long fpo_deadline; /* deadline of this pool */
- int fpo_failed; /* fmr pool is failed */
- int fpo_map_count; /* # of mapped FMR */
-} kib_fmr_pool_t;
-
-typedef struct {
- struct ib_pool_fmr *fmr_pfmr; /* IB pool fmr */
- kib_fmr_pool_t *fmr_pool; /* pool of FMR */
-} kib_fmr_t;
-
-typedef struct kib_net {
- struct list_head ibn_list; /* chain on kib_dev_t::ibd_nets */
- __u64 ibn_incarnation;/* my epoch */
- int ibn_init; /* initialisation state */
- int ibn_shutdown; /* shutting down? */
-
- atomic_t ibn_npeers; /* # peers extant */
- atomic_t ibn_nconns; /* # connections extant */
-
- kib_tx_poolset_t **ibn_tx_ps; /* tx pool-set */
- kib_fmr_poolset_t **ibn_fmr_ps; /* fmr pool-set */
-
- kib_dev_t *ibn_dev; /* underlying IB device */
-} kib_net_t;
-
-#define KIB_THREAD_SHIFT 16
-#define KIB_THREAD_ID(cpt, tid) ((cpt) << KIB_THREAD_SHIFT | (tid))
-#define KIB_THREAD_CPT(id) ((id) >> KIB_THREAD_SHIFT)
-#define KIB_THREAD_TID(id) ((id) & ((1UL << KIB_THREAD_SHIFT) - 1))
-
-struct kib_sched_info {
- spinlock_t ibs_lock; /* serialise */
- wait_queue_head_t ibs_waitq; /* schedulers sleep here */
- struct list_head ibs_conns; /* conns to check for rx completions */
- int ibs_nthreads; /* number of scheduler threads */
- int ibs_nthreads_max; /* max allowed scheduler threads */
- int ibs_cpt; /* CPT id */
-};
-
-typedef struct {
- int kib_init; /* initialisation state */
- int kib_shutdown; /* shut down? */
- struct list_head kib_devs; /* IB devices extant */
- struct list_head kib_failed_devs; /* list head of failed devices */
- wait_queue_head_t kib_failover_waitq; /* schedulers sleep here */
- atomic_t kib_nthreads; /* # live threads */
- rwlock_t kib_global_lock; /* stabilize net/dev/peer/conn ops */
- struct list_head *kib_peers; /* hash table of all my known peers */
- int kib_peer_hash_size; /* size of kib_peers */
- void *kib_connd; /* the connd task (serialisation assertions) */
- struct list_head kib_connd_conns; /* connections to setup/teardown */
- struct list_head kib_connd_zombies; /* connections with zero refcount */
- wait_queue_head_t kib_connd_waitq; /* connection daemon sleeps here */
- spinlock_t kib_connd_lock; /* serialise */
- struct ib_qp_attr kib_error_qpa; /* QP->ERROR */
- struct kib_sched_info **kib_scheds; /* percpt data for schedulers */
-} kib_data_t;
-
-#define IBLND_INIT_NOTHING 0
-#define IBLND_INIT_DATA 1
-#define IBLND_INIT_ALL 2
-
-/************************************************************************
- * IB Wire message format.
- * These are sent in sender's byte order (i.e. receiver flips).
- */
-
-typedef struct kib_connparams {
- __u16 ibcp_queue_depth;
- __u16 ibcp_max_frags;
- __u32 ibcp_max_msg_size;
-} WIRE_ATTR kib_connparams_t;
-
-typedef struct {
- lnet_hdr_t ibim_hdr; /* portals header */
- char ibim_payload[0]; /* piggy-backed payload */
-} WIRE_ATTR kib_immediate_msg_t;
-
-typedef struct {
- __u32 rf_nob; /* # bytes this frag */
- __u64 rf_addr; /* CAVEAT EMPTOR: misaligned!! */
-} WIRE_ATTR kib_rdma_frag_t;
-
-typedef struct {
- __u32 rd_key; /* local/remote key */
- __u32 rd_nfrags; /* # fragments */
- kib_rdma_frag_t rd_frags[0]; /* buffer frags */
-} WIRE_ATTR kib_rdma_desc_t;
-
-typedef struct {
- lnet_hdr_t ibprm_hdr; /* portals header */
- __u64 ibprm_cookie; /* opaque completion cookie */
-} WIRE_ATTR kib_putreq_msg_t;
-
-typedef struct {
- __u64 ibpam_src_cookie; /* reflected completion cookie */
- __u64 ibpam_dst_cookie; /* opaque completion cookie */
- kib_rdma_desc_t ibpam_rd; /* sender's sink buffer */
-} WIRE_ATTR kib_putack_msg_t;
-
-typedef struct {
- lnet_hdr_t ibgm_hdr; /* portals header */
- __u64 ibgm_cookie; /* opaque completion cookie */
- kib_rdma_desc_t ibgm_rd; /* rdma descriptor */
-} WIRE_ATTR kib_get_msg_t;
-
-typedef struct {
- __u64 ibcm_cookie; /* opaque completion cookie */
- __s32 ibcm_status; /* < 0 failure: >= 0 length */
-} WIRE_ATTR kib_completion_msg_t;
-
-typedef struct {
- /* First 2 fields fixed FOR ALL TIME */
- __u32 ibm_magic; /* I'm an ibnal message */
- __u16 ibm_version; /* this is my version number */
-
- __u8 ibm_type; /* msg type */
- __u8 ibm_credits; /* returned credits */
- __u32 ibm_nob; /* # bytes in whole message */
- __u32 ibm_cksum; /* checksum (0 == no checksum) */
- __u64 ibm_srcnid; /* sender's NID */
- __u64 ibm_srcstamp; /* sender's incarnation */
- __u64 ibm_dstnid; /* destination's NID */
- __u64 ibm_dststamp; /* destination's incarnation */
-
- union {
- kib_connparams_t connparams;
- kib_immediate_msg_t immediate;
- kib_putreq_msg_t putreq;
- kib_putack_msg_t putack;
- kib_get_msg_t get;
- kib_completion_msg_t completion;
- } WIRE_ATTR ibm_u;
-} WIRE_ATTR kib_msg_t;
-
-#define IBLND_MSG_MAGIC LNET_PROTO_IB_MAGIC /* unique magic */
-
-#define IBLND_MSG_VERSION_1 0x11
-#define IBLND_MSG_VERSION_2 0x12
-#define IBLND_MSG_VERSION IBLND_MSG_VERSION_2
-
-#define IBLND_MSG_CONNREQ 0xc0 /* connection request */
-#define IBLND_MSG_CONNACK 0xc1 /* connection acknowledge */
-#define IBLND_MSG_NOOP 0xd0 /* nothing (just credits) */
-#define IBLND_MSG_IMMEDIATE 0xd1 /* immediate */
-#define IBLND_MSG_PUT_REQ 0xd2 /* putreq (src->sink) */
-#define IBLND_MSG_PUT_NAK 0xd3 /* completion (sink->src) */
-#define IBLND_MSG_PUT_ACK 0xd4 /* putack (sink->src) */
-#define IBLND_MSG_PUT_DONE 0xd5 /* completion (src->sink) */
-#define IBLND_MSG_GET_REQ 0xd6 /* getreq (sink->src) */
-#define IBLND_MSG_GET_DONE 0xd7 /* completion (src->sink: all OK) */
-
-typedef struct {
- __u32 ibr_magic; /* sender's magic */
- __u16 ibr_version; /* sender's version */
- __u8 ibr_why; /* reject reason */
- __u8 ibr_padding; /* padding */
- __u64 ibr_incarnation; /* incarnation of peer */
- kib_connparams_t ibr_cp; /* connection parameters */
-} WIRE_ATTR kib_rej_t;
-
-/* connection rejection reasons */
-#define IBLND_REJECT_CONN_RACE 1 /* You lost connection race */
-#define IBLND_REJECT_NO_RESOURCES 2 /* Out of memory/conns etc */
-#define IBLND_REJECT_FATAL 3 /* Anything else */
-#define IBLND_REJECT_CONN_UNCOMPAT 4 /* incompatible version peer */
-#define IBLND_REJECT_CONN_STALE 5 /* stale peer */
-#define IBLND_REJECT_RDMA_FRAGS 6 /* Fatal: peer's rdma frags can't match */
- /* mine */
-#define IBLND_REJECT_MSG_QUEUE_SIZE 7 /* Fatal: peer's msg queue size can't */
- /* match mine */
-
-/***********************************************************************/
-
-typedef struct kib_rx /* receive message */
-{
- struct list_head rx_list; /* queue for attention */
- struct kib_conn *rx_conn; /* owning conn */
- int rx_nob; /* # bytes received (-1 while posted) */
- enum ib_wc_status rx_status; /* completion status */
- kib_msg_t *rx_msg; /* message buffer (host vaddr) */
- __u64 rx_msgaddr; /* message buffer (I/O addr) */
- DECLARE_PCI_UNMAP_ADDR(rx_msgunmap); /* for dma_unmap_single() */
- struct ib_recv_wr rx_wrq; /* receive work item... */
- struct ib_sge rx_sge; /* ...and its memory */
-} kib_rx_t;
-
-#define IBLND_POSTRX_DONT_POST 0 /* don't post */
-#define IBLND_POSTRX_NO_CREDIT 1 /* post: no credits */
-#define IBLND_POSTRX_PEER_CREDIT 2 /* post: give peer back 1 credit */
-#define IBLND_POSTRX_RSRVD_CREDIT 3 /* post: give self back 1 reserved credit */
-
-typedef struct kib_tx /* transmit message */
-{
- struct list_head tx_list; /* queue on idle_txs ibc_tx_queue etc. */
- kib_tx_pool_t *tx_pool; /* pool I'm from */
- struct kib_conn *tx_conn; /* owning conn */
- short tx_sending; /* # tx callbacks outstanding */
- short tx_queued; /* queued for sending */
- short tx_waiting; /* waiting for peer */
- int tx_status; /* LNET completion status */
- unsigned long tx_deadline; /* completion deadline */
- __u64 tx_cookie; /* completion cookie */
- lnet_msg_t *tx_lntmsg[2]; /* lnet msgs to finalize on completion */
- kib_msg_t *tx_msg; /* message buffer (host vaddr) */
- __u64 tx_msgaddr; /* message buffer (I/O addr) */
- DECLARE_PCI_UNMAP_ADDR(tx_msgunmap); /* for dma_unmap_single() */
- int tx_nwrq; /* # send work items */
- struct ib_send_wr *tx_wrq; /* send work items... */
- struct ib_sge *tx_sge; /* ...and their memory */
- kib_rdma_desc_t *tx_rd; /* rdma descriptor */
- int tx_nfrags; /* # entries in... */
- struct scatterlist *tx_frags; /* dma_map_sg descriptor */
- __u64 *tx_pages; /* rdma phys page addrs */
- kib_fmr_t fmr; /* FMR */
- int tx_dmadir; /* dma direction */
-} kib_tx_t;
-
-typedef struct kib_connvars {
- kib_msg_t cv_msg; /* connection-in-progress variables */
-} kib_connvars_t;
-
-typedef struct kib_conn {
- struct kib_sched_info *ibc_sched; /* scheduler information */
- struct kib_peer *ibc_peer; /* owning peer */
- kib_hca_dev_t *ibc_hdev; /* HCA bound on */
- struct list_head ibc_list; /* stash on peer's conn list */
- struct list_head ibc_sched_list; /* schedule for attention */
- __u16 ibc_version; /* version of connection */
- __u64 ibc_incarnation; /* which instance of the peer */
- atomic_t ibc_refcount; /* # users */
- int ibc_state; /* what's happening */
- int ibc_nsends_posted; /* # uncompleted sends */
- int ibc_noops_posted; /* # uncompleted NOOPs */
- int ibc_credits; /* # credits I have */
- int ibc_outstanding_credits; /* # credits to return */
- int ibc_reserved_credits; /* # ACK/DONE msg credits */
- int ibc_comms_error; /* set on comms error */
- unsigned int ibc_nrx:16; /* receive buffers owned */
- unsigned int ibc_scheduled:1; /* scheduled for attention */
- unsigned int ibc_ready:1; /* CQ callback fired */
- unsigned long ibc_last_send; /* time of last send */
- struct list_head ibc_connd_list; /* link chain for */
- /* kiblnd_check_conns only */
- struct list_head ibc_early_rxs; /* rxs completed before ESTABLISHED */
- struct list_head ibc_tx_noops; /* IBLND_MSG_NOOPs for */
- /* IBLND_MSG_VERSION_1 */
- struct list_head ibc_tx_queue; /* sends that need a credit */
- struct list_head ibc_tx_queue_nocred; /* sends that don't need a */
- /* credit */
- struct list_head ibc_tx_queue_rsrvd; /* sends that need to */
- /* reserve an ACK/DONE msg */
- struct list_head ibc_active_txs; /* active tx awaiting completion */
- spinlock_t ibc_lock; /* serialise */
- kib_rx_t *ibc_rxs; /* the rx descs */
- kib_pages_t *ibc_rx_pages; /* premapped rx msg pages */
-
- struct rdma_cm_id *ibc_cmid; /* CM id */
- struct ib_cq *ibc_cq; /* completion queue */
-
- kib_connvars_t *ibc_connvars; /* in-progress connection state */
-} kib_conn_t;
-
-#define IBLND_CONN_INIT 0 /* being initialised */
-#define IBLND_CONN_ACTIVE_CONNECT 1 /* active sending req */
-#define IBLND_CONN_PASSIVE_WAIT 2 /* passive waiting for rtu */
-#define IBLND_CONN_ESTABLISHED 3 /* connection established */
-#define IBLND_CONN_CLOSING 4 /* being closed */
-#define IBLND_CONN_DISCONNECTED 5 /* disconnected */
-
-typedef struct kib_peer {
- struct list_head ibp_list; /* stash on global peer list */
- lnet_nid_t ibp_nid; /* who's on the other end(s) */
- lnet_ni_t *ibp_ni; /* LNet interface */
- atomic_t ibp_refcount; /* # users */
- struct list_head ibp_conns; /* all active connections */
- struct list_head ibp_tx_queue; /* msgs waiting for a conn */
- __u16 ibp_version; /* version of peer */
- __u64 ibp_incarnation; /* incarnation of peer */
- int ibp_connecting; /* current active connection attempts
- */
- int ibp_accepting; /* current passive connection attempts
- */
- int ibp_error; /* errno on closing this peer */
- unsigned long ibp_last_alive; /* when (in jiffies) I was last alive
- */
-} kib_peer_t;
-
-extern kib_data_t kiblnd_data;
-
-void kiblnd_hdev_destroy(kib_hca_dev_t *hdev);
-
-static inline void
-kiblnd_hdev_addref_locked(kib_hca_dev_t *hdev)
-{
- LASSERT(atomic_read(&hdev->ibh_ref) > 0);
- atomic_inc(&hdev->ibh_ref);
-}
-
-static inline void
-kiblnd_hdev_decref(kib_hca_dev_t *hdev)
-{
- LASSERT(atomic_read(&hdev->ibh_ref) > 0);
- if (atomic_dec_and_test(&hdev->ibh_ref))
- kiblnd_hdev_destroy(hdev);
-}
-
-static inline int
-kiblnd_dev_can_failover(kib_dev_t *dev)
-{
- if (!list_empty(&dev->ibd_fail_list)) /* already scheduled */
- return 0;
-
- if (*kiblnd_tunables.kib_dev_failover == 0) /* disabled */
- return 0;
-
- if (*kiblnd_tunables.kib_dev_failover > 1) /* force failover */
- return 1;
-
- return dev->ibd_can_failover;
-}
-
-#define kiblnd_conn_addref(conn) \
-do { \
- CDEBUG(D_NET, "conn[%p] (%d)++\n", \
- (conn), atomic_read(&(conn)->ibc_refcount)); \
- atomic_inc(&(conn)->ibc_refcount); \
-} while (0)
-
-#define kiblnd_conn_decref(conn) \
-do { \
- unsigned long flags; \
- \
- CDEBUG(D_NET, "conn[%p] (%d)--\n", \
- (conn), atomic_read(&(conn)->ibc_refcount)); \
- LASSERT_ATOMIC_POS(&(conn)->ibc_refcount); \
- if (atomic_dec_and_test(&(conn)->ibc_refcount)) { \
- spin_lock_irqsave(&kiblnd_data.kib_connd_lock, flags); \
- list_add_tail(&(conn)->ibc_list, \
- &kiblnd_data.kib_connd_zombies); \
- wake_up(&kiblnd_data.kib_connd_waitq); \
- spin_unlock_irqrestore(&kiblnd_data.kib_connd_lock, flags);\
- } \
-} while (0)
-
-#define kiblnd_peer_addref(peer) \
-do { \
- CDEBUG(D_NET, "peer[%p] -> %s (%d)++\n", \
- (peer), libcfs_nid2str((peer)->ibp_nid), \
- atomic_read(&(peer)->ibp_refcount)); \
- atomic_inc(&(peer)->ibp_refcount); \
-} while (0)
-
-#define kiblnd_peer_decref(peer) \
-do { \
- CDEBUG(D_NET, "peer[%p] -> %s (%d)--\n", \
- (peer), libcfs_nid2str((peer)->ibp_nid), \
- atomic_read(&(peer)->ibp_refcount)); \
- LASSERT_ATOMIC_POS(&(peer)->ibp_refcount); \
- if (atomic_dec_and_test(&(peer)->ibp_refcount)) \
- kiblnd_destroy_peer(peer); \
-} while (0)
-
-static inline struct list_head *
-kiblnd_nid2peerlist(lnet_nid_t nid)
-{
- unsigned int hash =
- ((unsigned int)nid) % kiblnd_data.kib_peer_hash_size;
-
- return &kiblnd_data.kib_peers[hash];
-}
-
-static inline int
-kiblnd_peer_active(kib_peer_t *peer)
-{
- /* Am I in the peer hash table? */
- return !list_empty(&peer->ibp_list);
-}
-
-static inline kib_conn_t *
-kiblnd_get_conn_locked(kib_peer_t *peer)
-{
- LASSERT(!list_empty(&peer->ibp_conns));
-
- /* just return the first connection */
- return list_entry(peer->ibp_conns.next, kib_conn_t, ibc_list);
-}
-
-static inline int
-kiblnd_send_keepalive(kib_conn_t *conn)
-{
- return (*kiblnd_tunables.kib_keepalive > 0) &&
- cfs_time_after(jiffies, conn->ibc_last_send +
- *kiblnd_tunables.kib_keepalive*HZ);
-}
-
-static inline int
-kiblnd_need_noop(kib_conn_t *conn)
-{
- LASSERT(conn->ibc_state >= IBLND_CONN_ESTABLISHED);
-
- if (conn->ibc_outstanding_credits <
- IBLND_CREDITS_HIGHWATER(conn->ibc_version) &&
- !kiblnd_send_keepalive(conn))
- return 0; /* No need to send NOOP */
-
- if (IBLND_OOB_CAPABLE(conn->ibc_version)) {
- if (!list_empty(&conn->ibc_tx_queue_nocred))
- return 0; /* NOOP can be piggybacked */
-
- /* No tx to piggyback NOOP onto or no credit to send a tx */
- return (list_empty(&conn->ibc_tx_queue) ||
- conn->ibc_credits == 0);
- }
-
- if (!list_empty(&conn->ibc_tx_noops) || /* NOOP already queued */
- !list_empty(&conn->ibc_tx_queue_nocred) || /* piggyback NOOP */
- conn->ibc_credits == 0) /* no credit */
- return 0;
-
- if (conn->ibc_credits == 1 && /* last credit reserved for */
- conn->ibc_outstanding_credits == 0) /* giving back credits */
- return 0;
-
- /* No tx to piggyback NOOP onto or no credit to send a tx */
- return (list_empty(&conn->ibc_tx_queue) || conn->ibc_credits == 1);
-}
-
-static inline void
-kiblnd_abort_receives(kib_conn_t *conn)
-{
- ib_modify_qp(conn->ibc_cmid->qp,
- &kiblnd_data.kib_error_qpa, IB_QP_STATE);
-}
-
-static inline const char *
-kiblnd_queue2str(kib_conn_t *conn, struct list_head *q)
-{
- if (q == &conn->ibc_tx_queue)
- return "tx_queue";
-
- if (q == &conn->ibc_tx_queue_rsrvd)
- return "tx_queue_rsrvd";
-
- if (q == &conn->ibc_tx_queue_nocred)
- return "tx_queue_nocred";
-
- if (q == &conn->ibc_active_txs)
- return "active_txs";
-
- LBUG();
- return NULL;
-}
-
-/* CAVEAT EMPTOR: We rely on descriptor alignment to allow us to use the */
-/* lowest bits of the work request id to stash the work item type. */
-
-#define IBLND_WID_TX 0
-#define IBLND_WID_RDMA 1
-#define IBLND_WID_RX 2
-#define IBLND_WID_MASK 3UL
-
-static inline __u64
-kiblnd_ptr2wreqid(void *ptr, int type)
-{
- unsigned long lptr = (unsigned long)ptr;
-
- LASSERT((lptr & IBLND_WID_MASK) == 0);
- LASSERT((type & ~IBLND_WID_MASK) == 0);
- return (__u64)(lptr | type);
-}
-
-static inline void *
-kiblnd_wreqid2ptr(__u64 wreqid)
-{
- return (void *)(((unsigned long)wreqid) & ~IBLND_WID_MASK);
-}
-
-static inline int
-kiblnd_wreqid2type(__u64 wreqid)
-{
- return wreqid & IBLND_WID_MASK;
-}
-
-static inline void
-kiblnd_set_conn_state(kib_conn_t *conn, int state)
-{
- conn->ibc_state = state;
- mb();
-}
-
-static inline void
-kiblnd_init_msg(kib_msg_t *msg, int type, int body_nob)
-{
- msg->ibm_type = type;
- msg->ibm_nob = offsetof(kib_msg_t, ibm_u) + body_nob;
-}
-
-static inline int
-kiblnd_rd_size(kib_rdma_desc_t *rd)
-{
- int i;
- int size;
-
- for (i = size = 0; i < rd->rd_nfrags; i++)
- size += rd->rd_frags[i].rf_nob;
-
- return size;
-}
-
-static inline __u64
-kiblnd_rd_frag_addr(kib_rdma_desc_t *rd, int index)
-{
- return rd->rd_frags[index].rf_addr;
-}
-
-static inline __u32
-kiblnd_rd_frag_size(kib_rdma_desc_t *rd, int index)
-{
- return rd->rd_frags[index].rf_nob;
-}
-
-static inline __u32
-kiblnd_rd_frag_key(kib_rdma_desc_t *rd, int index)
-{
- return rd->rd_key;
-}
-
-static inline int
-kiblnd_rd_consume_frag(kib_rdma_desc_t *rd, int index, __u32 nob)
-{
- if (nob < rd->rd_frags[index].rf_nob) {
- rd->rd_frags[index].rf_addr += nob;
- rd->rd_frags[index].rf_nob -= nob;
- } else {
- index++;
- }
-
- return index;
-}
-
-static inline int
-kiblnd_rd_msg_size(kib_rdma_desc_t *rd, int msgtype, int n)
-{
- LASSERT(msgtype == IBLND_MSG_GET_REQ ||
- msgtype == IBLND_MSG_PUT_ACK);
-
- return msgtype == IBLND_MSG_GET_REQ ?
- offsetof(kib_get_msg_t, ibgm_rd.rd_frags[n]) :
- offsetof(kib_putack_msg_t, ibpam_rd.rd_frags[n]);
-}
-
-
-static inline __u64
-kiblnd_dma_mapping_error(struct ib_device *dev, u64 dma_addr)
-{
- return ib_dma_mapping_error(dev, dma_addr);
-}
-
-static inline __u64 kiblnd_dma_map_single(struct ib_device *dev,
- void *msg, size_t size,
- enum dma_data_direction direction)
-{
- return ib_dma_map_single(dev, msg, size, direction);
-}
-
-static inline void kiblnd_dma_unmap_single(struct ib_device *dev,
- __u64 addr, size_t size,
- enum dma_data_direction direction)
-{
- ib_dma_unmap_single(dev, addr, size, direction);
-}
-
-#define KIBLND_UNMAP_ADDR_SET(p, m, a) do {} while (0)
-#define KIBLND_UNMAP_ADDR(p, m, a) (a)
-
-static inline int kiblnd_dma_map_sg(struct ib_device *dev,
- struct scatterlist *sg, int nents,
- enum dma_data_direction direction)
-{
- return ib_dma_map_sg(dev, sg, nents, direction);
-}
-
-static inline void kiblnd_dma_unmap_sg(struct ib_device *dev,
- struct scatterlist *sg, int nents,
- enum dma_data_direction direction)
-{
- ib_dma_unmap_sg(dev, sg, nents, direction);
-}
-
-static inline __u64 kiblnd_sg_dma_address(struct ib_device *dev,
- struct scatterlist *sg)
-{
- return ib_sg_dma_address(dev, sg);
-}
-
-static inline unsigned int kiblnd_sg_dma_len(struct ib_device *dev,
- struct scatterlist *sg)
-{
- return ib_sg_dma_len(dev, sg);
-}
-
-/* XXX We use KIBLND_CONN_PARAM(e) as writable buffer, it's not strictly */
-/* right because OFED1.2 defines it as const, to use it we have to add */
-/* (void *) cast to overcome "const" */
-
-#define KIBLND_CONN_PARAM(e) ((e)->param.conn.private_data)
-#define KIBLND_CONN_PARAM_LEN(e) ((e)->param.conn.private_data_len)
-
-
-struct ib_mr *kiblnd_find_rd_dma_mr(kib_hca_dev_t *hdev,
- kib_rdma_desc_t *rd);
-struct ib_mr *kiblnd_find_dma_mr(kib_hca_dev_t *hdev,
- __u64 addr, __u64 size);
-void kiblnd_map_rx_descs(kib_conn_t *conn);
-void kiblnd_unmap_rx_descs(kib_conn_t *conn);
-void kiblnd_pool_free_node(kib_pool_t *pool, struct list_head *node);
-struct list_head *kiblnd_pool_alloc_node(kib_poolset_t *ps);
-
-int kiblnd_fmr_pool_map(kib_fmr_poolset_t *fps, __u64 *pages,
- int npages, __u64 iov, kib_fmr_t *fmr);
-void kiblnd_fmr_pool_unmap(kib_fmr_t *fmr, int status);
-
-int kiblnd_startup(lnet_ni_t *ni);
-void kiblnd_shutdown(lnet_ni_t *ni);
-int kiblnd_ctl(lnet_ni_t *ni, unsigned int cmd, void *arg);
-void kiblnd_query(struct lnet_ni *ni, lnet_nid_t nid, unsigned long *when);
-
-int kiblnd_tunables_init(void);
-void kiblnd_tunables_fini(void);
-
-int kiblnd_connd(void *arg);
-int kiblnd_scheduler(void *arg);
-int kiblnd_thread_start(int (*fn)(void *arg), void *arg, char *name);
-int kiblnd_failover_thread(void *arg);
-
-int kiblnd_alloc_pages(kib_pages_t **pp, int cpt, int npages);
-void kiblnd_free_pages(kib_pages_t *p);
-
-int kiblnd_cm_callback(struct rdma_cm_id *cmid,
- struct rdma_cm_event *event);
-int kiblnd_translate_mtu(int value);
-
-int kiblnd_dev_failover(kib_dev_t *dev);
-int kiblnd_create_peer(lnet_ni_t *ni, kib_peer_t **peerp, lnet_nid_t nid);
-void kiblnd_destroy_peer(kib_peer_t *peer);
-void kiblnd_destroy_dev(kib_dev_t *dev);
-void kiblnd_unlink_peer_locked(kib_peer_t *peer);
-void kiblnd_peer_alive(kib_peer_t *peer);
-kib_peer_t *kiblnd_find_peer_locked(lnet_nid_t nid);
-void kiblnd_peer_connect_failed(kib_peer_t *peer, int active, int error);
-int kiblnd_close_stale_conns_locked(kib_peer_t *peer,
- int version, __u64 incarnation);
-int kiblnd_close_peer_conns_locked(kib_peer_t *peer, int why);
-
-void kiblnd_connreq_done(kib_conn_t *conn, int status);
-kib_conn_t *kiblnd_create_conn(kib_peer_t *peer, struct rdma_cm_id *cmid,
- int state, int version);
-void kiblnd_destroy_conn(kib_conn_t *conn);
-void kiblnd_close_conn(kib_conn_t *conn, int error);
-void kiblnd_close_conn_locked(kib_conn_t *conn, int error);
-
-int kiblnd_init_rdma(kib_conn_t *conn, kib_tx_t *tx, int type,
- int nob, kib_rdma_desc_t *dstrd, __u64 dstcookie);
-
-void kiblnd_launch_tx(lnet_ni_t *ni, kib_tx_t *tx, lnet_nid_t nid);
-void kiblnd_queue_tx_locked(kib_tx_t *tx, kib_conn_t *conn);
-void kiblnd_queue_tx(kib_tx_t *tx, kib_conn_t *conn);
-void kiblnd_init_tx_msg(lnet_ni_t *ni, kib_tx_t *tx, int type, int body_nob);
-void kiblnd_txlist_done(lnet_ni_t *ni, struct list_head *txlist,
- int status);
-void kiblnd_check_sends (kib_conn_t *conn);
-
-void kiblnd_qp_event(struct ib_event *event, void *arg);
-void kiblnd_cq_event(struct ib_event *event, void *arg);
-void kiblnd_cq_completion(struct ib_cq *cq, void *arg);
-
-void kiblnd_pack_msg(lnet_ni_t *ni, kib_msg_t *msg, int version,
- int credits, lnet_nid_t dstnid, __u64 dststamp);
-int kiblnd_unpack_msg(kib_msg_t *msg, int nob);
-int kiblnd_post_rx(kib_rx_t *rx, int credit);
-
-int kiblnd_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg);
-int kiblnd_recv(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg, int delayed,
- unsigned int niov, struct kvec *iov, lnet_kiov_t *kiov,
- unsigned int offset, unsigned int mlen, unsigned int rlen);
diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c
deleted file mode 100644
index 8ab73eece620..000000000000
--- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c
+++ /dev/null
@@ -1,3479 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * lnet/klnds/o2iblnd/o2iblnd_cb.c
- *
- * Author: Eric Barton <eric@bartonsoftware.com>
- */
-
-#include "o2iblnd.h"
-
-static void kiblnd_unmap_tx(lnet_ni_t *ni, kib_tx_t *tx);
-
-static void
-kiblnd_tx_done(lnet_ni_t *ni, kib_tx_t *tx)
-{
- lnet_msg_t *lntmsg[2];
- kib_net_t *net = ni->ni_data;
- int rc;
- int i;
-
- LASSERT(net != NULL);
- LASSERT(!in_interrupt());
- LASSERT(!tx->tx_queued); /* mustn't be queued for sending */
- LASSERT(tx->tx_sending == 0); /* mustn't be awaiting sent callback */
- LASSERT(!tx->tx_waiting); /* mustn't be awaiting peer response */
- LASSERT(tx->tx_pool != NULL);
-
- kiblnd_unmap_tx(ni, tx);
-
- /* tx may have up to 2 lnet msgs to finalise */
- lntmsg[0] = tx->tx_lntmsg[0]; tx->tx_lntmsg[0] = NULL;
- lntmsg[1] = tx->tx_lntmsg[1]; tx->tx_lntmsg[1] = NULL;
- rc = tx->tx_status;
-
- if (tx->tx_conn != NULL) {
- LASSERT(ni == tx->tx_conn->ibc_peer->ibp_ni);
-
- kiblnd_conn_decref(tx->tx_conn);
- tx->tx_conn = NULL;
- }
-
- tx->tx_nwrq = 0;
- tx->tx_status = 0;
-
- kiblnd_pool_free_node(&tx->tx_pool->tpo_pool, &tx->tx_list);
-
- /* delay finalize until my descs have been freed */
- for (i = 0; i < 2; i++) {
- if (lntmsg[i] == NULL)
- continue;
-
- lnet_finalize(ni, lntmsg[i], rc);
- }
-}
-
-void
-kiblnd_txlist_done(lnet_ni_t *ni, struct list_head *txlist, int status)
-{
- kib_tx_t *tx;
-
- while (!list_empty(txlist)) {
- tx = list_entry(txlist->next, kib_tx_t, tx_list);
-
- list_del(&tx->tx_list);
- /* complete now */
- tx->tx_waiting = 0;
- tx->tx_status = status;
- kiblnd_tx_done(ni, tx);
- }
-}
-
-static kib_tx_t *
-kiblnd_get_idle_tx(lnet_ni_t *ni, lnet_nid_t target)
-{
- kib_net_t *net = (kib_net_t *)ni->ni_data;
- struct list_head *node;
- kib_tx_t *tx;
- kib_tx_poolset_t *tps;
-
- tps = net->ibn_tx_ps[lnet_cpt_of_nid(target)];
- node = kiblnd_pool_alloc_node(&tps->tps_poolset);
- if (node == NULL)
- return NULL;
- tx = container_of(node, kib_tx_t, tx_list);
-
- LASSERT(tx->tx_nwrq == 0);
- LASSERT(!tx->tx_queued);
- LASSERT(tx->tx_sending == 0);
- LASSERT(!tx->tx_waiting);
- LASSERT(tx->tx_status == 0);
- LASSERT(tx->tx_conn == NULL);
- LASSERT(tx->tx_lntmsg[0] == NULL);
- LASSERT(tx->tx_lntmsg[1] == NULL);
- LASSERT(tx->tx_nfrags == 0);
-
- return tx;
-}
-
-static void
-kiblnd_drop_rx(kib_rx_t *rx)
-{
- kib_conn_t *conn = rx->rx_conn;
- struct kib_sched_info *sched = conn->ibc_sched;
- unsigned long flags;
-
- spin_lock_irqsave(&sched->ibs_lock, flags);
- LASSERT(conn->ibc_nrx > 0);
- conn->ibc_nrx--;
- spin_unlock_irqrestore(&sched->ibs_lock, flags);
-
- kiblnd_conn_decref(conn);
-}
-
-int
-kiblnd_post_rx(kib_rx_t *rx, int credit)
-{
- kib_conn_t *conn = rx->rx_conn;
- kib_net_t *net = conn->ibc_peer->ibp_ni->ni_data;
- struct ib_recv_wr *bad_wrq = NULL;
- struct ib_mr *mr;
- int rc;
-
- LASSERT(net != NULL);
- LASSERT(!in_interrupt());
- LASSERT(credit == IBLND_POSTRX_NO_CREDIT ||
- credit == IBLND_POSTRX_PEER_CREDIT ||
- credit == IBLND_POSTRX_RSRVD_CREDIT);
-
- mr = kiblnd_find_dma_mr(conn->ibc_hdev, rx->rx_msgaddr, IBLND_MSG_SIZE);
- LASSERT(mr != NULL);
-
- rx->rx_sge.lkey = mr->lkey;
- rx->rx_sge.addr = rx->rx_msgaddr;
- rx->rx_sge.length = IBLND_MSG_SIZE;
-
- rx->rx_wrq.next = NULL;
- rx->rx_wrq.sg_list = &rx->rx_sge;
- rx->rx_wrq.num_sge = 1;
- rx->rx_wrq.wr_id = kiblnd_ptr2wreqid(rx, IBLND_WID_RX);
-
- LASSERT(conn->ibc_state >= IBLND_CONN_INIT);
- LASSERT(rx->rx_nob >= 0); /* not posted */
-
- if (conn->ibc_state > IBLND_CONN_ESTABLISHED) {
- kiblnd_drop_rx(rx); /* No more posts for this rx */
- return 0;
- }
-
- rx->rx_nob = -1; /* flag posted */
-
- /* NB: need an extra reference after ib_post_recv because we don't
- * own this rx (and rx::rx_conn) anymore, LU-5678.
- */
- kiblnd_conn_addref(conn);
- rc = ib_post_recv(conn->ibc_cmid->qp, &rx->rx_wrq, &bad_wrq);
- if (unlikely(rc != 0)) {
- CERROR("Can't post rx for %s: %d, bad_wrq: %p\n",
- libcfs_nid2str(conn->ibc_peer->ibp_nid), rc, bad_wrq);
- rx->rx_nob = 0;
- }
-
- if (conn->ibc_state < IBLND_CONN_ESTABLISHED) /* Initial post */
- goto out;
-
- if (unlikely(rc != 0)) {
- kiblnd_close_conn(conn, rc);
- kiblnd_drop_rx(rx); /* No more posts for this rx */
- goto out;
- }
-
- if (credit == IBLND_POSTRX_NO_CREDIT)
- goto out;
-
- spin_lock(&conn->ibc_lock);
- if (credit == IBLND_POSTRX_PEER_CREDIT)
- conn->ibc_outstanding_credits++;
- else
- conn->ibc_reserved_credits++;
- spin_unlock(&conn->ibc_lock);
-
- kiblnd_check_sends(conn);
-out:
- kiblnd_conn_decref(conn);
- return rc;
-}
-
-static kib_tx_t *
-kiblnd_find_waiting_tx_locked(kib_conn_t *conn, int txtype, __u64 cookie)
-{
- struct list_head *tmp;
-
- list_for_each(tmp, &conn->ibc_active_txs) {
- kib_tx_t *tx = list_entry(tmp, kib_tx_t, tx_list);
-
- LASSERT(!tx->tx_queued);
- LASSERT(tx->tx_sending != 0 || tx->tx_waiting);
-
- if (tx->tx_cookie != cookie)
- continue;
-
- if (tx->tx_waiting &&
- tx->tx_msg->ibm_type == txtype)
- return tx;
-
- CWARN("Bad completion: %swaiting, type %x (wanted %x)\n",
- tx->tx_waiting ? "" : "NOT ",
- tx->tx_msg->ibm_type, txtype);
- }
- return NULL;
-}
-
-static void
-kiblnd_handle_completion(kib_conn_t *conn, int txtype, int status, __u64 cookie)
-{
- kib_tx_t *tx;
- lnet_ni_t *ni = conn->ibc_peer->ibp_ni;
- int idle;
-
- spin_lock(&conn->ibc_lock);
-
- tx = kiblnd_find_waiting_tx_locked(conn, txtype, cookie);
- if (tx == NULL) {
- spin_unlock(&conn->ibc_lock);
-
- CWARN("Unmatched completion type %x cookie %#llx from %s\n",
- txtype, cookie, libcfs_nid2str(conn->ibc_peer->ibp_nid));
- kiblnd_close_conn(conn, -EPROTO);
- return;
- }
-
- if (tx->tx_status == 0) { /* success so far */
- if (status < 0) /* failed? */
- tx->tx_status = status;
- else if (txtype == IBLND_MSG_GET_REQ)
- lnet_set_reply_msg_len(ni, tx->tx_lntmsg[1], status);
- }
-
- tx->tx_waiting = 0;
-
- idle = !tx->tx_queued && (tx->tx_sending == 0);
- if (idle)
- list_del(&tx->tx_list);
-
- spin_unlock(&conn->ibc_lock);
-
- if (idle)
- kiblnd_tx_done(ni, tx);
-}
-
-static void
-kiblnd_send_completion(kib_conn_t *conn, int type, int status, __u64 cookie)
-{
- lnet_ni_t *ni = conn->ibc_peer->ibp_ni;
- kib_tx_t *tx = kiblnd_get_idle_tx(ni, conn->ibc_peer->ibp_nid);
-
- if (tx == NULL) {
- CERROR("Can't get tx for completion %x for %s\n",
- type, libcfs_nid2str(conn->ibc_peer->ibp_nid));
- return;
- }
-
- tx->tx_msg->ibm_u.completion.ibcm_status = status;
- tx->tx_msg->ibm_u.completion.ibcm_cookie = cookie;
- kiblnd_init_tx_msg(ni, tx, type, sizeof(kib_completion_msg_t));
-
- kiblnd_queue_tx(tx, conn);
-}
-
-static void
-kiblnd_handle_rx(kib_rx_t *rx)
-{
- kib_msg_t *msg = rx->rx_msg;
- kib_conn_t *conn = rx->rx_conn;
- lnet_ni_t *ni = conn->ibc_peer->ibp_ni;
- int credits = msg->ibm_credits;
- kib_tx_t *tx;
- int rc = 0;
- int rc2;
- int post_credit;
-
- LASSERT(conn->ibc_state >= IBLND_CONN_ESTABLISHED);
-
- CDEBUG(D_NET, "Received %x[%d] from %s\n",
- msg->ibm_type, credits,
- libcfs_nid2str(conn->ibc_peer->ibp_nid));
-
- if (credits != 0) {
- /* Have I received credits that will let me send? */
- spin_lock(&conn->ibc_lock);
-
- if (conn->ibc_credits + credits >
- IBLND_MSG_QUEUE_SIZE(conn->ibc_version)) {
- rc2 = conn->ibc_credits;
- spin_unlock(&conn->ibc_lock);
-
- CERROR("Bad credits from %s: %d + %d > %d\n",
- libcfs_nid2str(conn->ibc_peer->ibp_nid),
- rc2, credits,
- IBLND_MSG_QUEUE_SIZE(conn->ibc_version));
-
- kiblnd_close_conn(conn, -EPROTO);
- kiblnd_post_rx(rx, IBLND_POSTRX_NO_CREDIT);
- return;
- }
-
- conn->ibc_credits += credits;
-
- /* This ensures the credit taken by NOOP can be returned */
- if (msg->ibm_type == IBLND_MSG_NOOP &&
- !IBLND_OOB_CAPABLE(conn->ibc_version)) /* v1 only */
- conn->ibc_outstanding_credits++;
-
- spin_unlock(&conn->ibc_lock);
- kiblnd_check_sends(conn);
- }
-
- switch (msg->ibm_type) {
- default:
- CERROR("Bad IBLND message type %x from %s\n",
- msg->ibm_type, libcfs_nid2str(conn->ibc_peer->ibp_nid));
- post_credit = IBLND_POSTRX_NO_CREDIT;
- rc = -EPROTO;
- break;
-
- case IBLND_MSG_NOOP:
- if (IBLND_OOB_CAPABLE(conn->ibc_version)) {
- post_credit = IBLND_POSTRX_NO_CREDIT;
- break;
- }
-
- if (credits != 0) /* credit already posted */
- post_credit = IBLND_POSTRX_NO_CREDIT;
- else /* a keepalive NOOP */
- post_credit = IBLND_POSTRX_PEER_CREDIT;
- break;
-
- case IBLND_MSG_IMMEDIATE:
- post_credit = IBLND_POSTRX_DONT_POST;
- rc = lnet_parse(ni, &msg->ibm_u.immediate.ibim_hdr,
- msg->ibm_srcnid, rx, 0);
- if (rc < 0) /* repost on error */
- post_credit = IBLND_POSTRX_PEER_CREDIT;
- break;
-
- case IBLND_MSG_PUT_REQ:
- post_credit = IBLND_POSTRX_DONT_POST;
- rc = lnet_parse(ni, &msg->ibm_u.putreq.ibprm_hdr,
- msg->ibm_srcnid, rx, 1);
- if (rc < 0) /* repost on error */
- post_credit = IBLND_POSTRX_PEER_CREDIT;
- break;
-
- case IBLND_MSG_PUT_NAK:
- CWARN("PUT_NACK from %s\n",
- libcfs_nid2str(conn->ibc_peer->ibp_nid));
- post_credit = IBLND_POSTRX_RSRVD_CREDIT;
- kiblnd_handle_completion(conn, IBLND_MSG_PUT_REQ,
- msg->ibm_u.completion.ibcm_status,
- msg->ibm_u.completion.ibcm_cookie);
- break;
-
- case IBLND_MSG_PUT_ACK:
- post_credit = IBLND_POSTRX_RSRVD_CREDIT;
-
- spin_lock(&conn->ibc_lock);
- tx = kiblnd_find_waiting_tx_locked(conn, IBLND_MSG_PUT_REQ,
- msg->ibm_u.putack.ibpam_src_cookie);
- if (tx != NULL)
- list_del(&tx->tx_list);
- spin_unlock(&conn->ibc_lock);
-
- if (tx == NULL) {
- CERROR("Unmatched PUT_ACK from %s\n",
- libcfs_nid2str(conn->ibc_peer->ibp_nid));
- rc = -EPROTO;
- break;
- }
-
- LASSERT(tx->tx_waiting);
- /* CAVEAT EMPTOR: I could be racing with tx_complete, but...
- * (a) I can overwrite tx_msg since my peer has received it!
- * (b) tx_waiting set tells tx_complete() it's not done. */
-
- tx->tx_nwrq = 0; /* overwrite PUT_REQ */
-
- rc2 = kiblnd_init_rdma(conn, tx, IBLND_MSG_PUT_DONE,
- kiblnd_rd_size(&msg->ibm_u.putack.ibpam_rd),
- &msg->ibm_u.putack.ibpam_rd,
- msg->ibm_u.putack.ibpam_dst_cookie);
- if (rc2 < 0)
- CERROR("Can't setup rdma for PUT to %s: %d\n",
- libcfs_nid2str(conn->ibc_peer->ibp_nid), rc2);
-
- spin_lock(&conn->ibc_lock);
- tx->tx_waiting = 0; /* clear waiting and queue atomically */
- kiblnd_queue_tx_locked(tx, conn);
- spin_unlock(&conn->ibc_lock);
- break;
-
- case IBLND_MSG_PUT_DONE:
- post_credit = IBLND_POSTRX_PEER_CREDIT;
- kiblnd_handle_completion(conn, IBLND_MSG_PUT_ACK,
- msg->ibm_u.completion.ibcm_status,
- msg->ibm_u.completion.ibcm_cookie);
- break;
-
- case IBLND_MSG_GET_REQ:
- post_credit = IBLND_POSTRX_DONT_POST;
- rc = lnet_parse(ni, &msg->ibm_u.get.ibgm_hdr,
- msg->ibm_srcnid, rx, 1);
- if (rc < 0) /* repost on error */
- post_credit = IBLND_POSTRX_PEER_CREDIT;
- break;
-
- case IBLND_MSG_GET_DONE:
- post_credit = IBLND_POSTRX_RSRVD_CREDIT;
- kiblnd_handle_completion(conn, IBLND_MSG_GET_REQ,
- msg->ibm_u.completion.ibcm_status,
- msg->ibm_u.completion.ibcm_cookie);
- break;
- }
-
- if (rc < 0) /* protocol error */
- kiblnd_close_conn(conn, rc);
-
- if (post_credit != IBLND_POSTRX_DONT_POST)
- kiblnd_post_rx(rx, post_credit);
-}
-
-static void
-kiblnd_rx_complete(kib_rx_t *rx, int status, int nob)
-{
- kib_msg_t *msg = rx->rx_msg;
- kib_conn_t *conn = rx->rx_conn;
- lnet_ni_t *ni = conn->ibc_peer->ibp_ni;
- kib_net_t *net = ni->ni_data;
- int rc;
- int err = -EIO;
-
- LASSERT(net != NULL);
- LASSERT(rx->rx_nob < 0); /* was posted */
- rx->rx_nob = 0; /* isn't now */
-
- if (conn->ibc_state > IBLND_CONN_ESTABLISHED)
- goto ignore;
-
- if (status != IB_WC_SUCCESS) {
- CNETERR("Rx from %s failed: %d\n",
- libcfs_nid2str(conn->ibc_peer->ibp_nid), status);
- goto failed;
- }
-
- LASSERT(nob >= 0);
- rx->rx_nob = nob;
-
- rc = kiblnd_unpack_msg(msg, rx->rx_nob);
- if (rc != 0) {
- CERROR("Error %d unpacking rx from %s\n",
- rc, libcfs_nid2str(conn->ibc_peer->ibp_nid));
- goto failed;
- }
-
- if (msg->ibm_srcnid != conn->ibc_peer->ibp_nid ||
- msg->ibm_dstnid != ni->ni_nid ||
- msg->ibm_srcstamp != conn->ibc_incarnation ||
- msg->ibm_dststamp != net->ibn_incarnation) {
- CERROR("Stale rx from %s\n",
- libcfs_nid2str(conn->ibc_peer->ibp_nid));
- err = -ESTALE;
- goto failed;
- }
-
- /* set time last known alive */
- kiblnd_peer_alive(conn->ibc_peer);
-
- /* racing with connection establishment/teardown! */
-
- if (conn->ibc_state < IBLND_CONN_ESTABLISHED) {
- rwlock_t *g_lock = &kiblnd_data.kib_global_lock;
- unsigned long flags;
-
- write_lock_irqsave(g_lock, flags);
- /* must check holding global lock to eliminate race */
- if (conn->ibc_state < IBLND_CONN_ESTABLISHED) {
- list_add_tail(&rx->rx_list, &conn->ibc_early_rxs);
- write_unlock_irqrestore(g_lock, flags);
- return;
- }
- write_unlock_irqrestore(g_lock, flags);
- }
- kiblnd_handle_rx(rx);
- return;
-
- failed:
- CDEBUG(D_NET, "rx %p conn %p\n", rx, conn);
- kiblnd_close_conn(conn, err);
- ignore:
- kiblnd_drop_rx(rx); /* Don't re-post rx. */
-}
-
-static struct page *
-kiblnd_kvaddr_to_page(unsigned long vaddr)
-{
- struct page *page;
-
- if (is_vmalloc_addr((void *)vaddr)) {
- page = vmalloc_to_page((void *)vaddr);
- LASSERT(page != NULL);
- return page;
- }
-#ifdef CONFIG_HIGHMEM
- if (vaddr >= PKMAP_BASE &&
- vaddr < (PKMAP_BASE + LAST_PKMAP * PAGE_SIZE)) {
- /* No highmem pages only used for bulk (kiov) I/O */
- CERROR("find page for address in highmem\n");
- LBUG();
- }
-#endif
- page = virt_to_page(vaddr);
- LASSERT(page != NULL);
- return page;
-}
-
-static int
-kiblnd_fmr_map_tx(kib_net_t *net, kib_tx_t *tx, kib_rdma_desc_t *rd, int nob)
-{
- kib_hca_dev_t *hdev;
- __u64 *pages = tx->tx_pages;
- kib_fmr_poolset_t *fps;
- int npages;
- int size;
- int cpt;
- int rc;
- int i;
-
- LASSERT(tx->tx_pool != NULL);
- LASSERT(tx->tx_pool->tpo_pool.po_owner != NULL);
-
- hdev = tx->tx_pool->tpo_hdev;
-
- for (i = 0, npages = 0; i < rd->rd_nfrags; i++) {
- for (size = 0; size < rd->rd_frags[i].rf_nob;
- size += hdev->ibh_page_size) {
- pages[npages++] = (rd->rd_frags[i].rf_addr &
- hdev->ibh_page_mask) + size;
- }
- }
-
- cpt = tx->tx_pool->tpo_pool.po_owner->ps_cpt;
-
- fps = net->ibn_fmr_ps[cpt];
- rc = kiblnd_fmr_pool_map(fps, pages, npages, 0, &tx->fmr);
- if (rc != 0) {
- CERROR("Can't map %d pages: %d\n", npages, rc);
- return rc;
- }
-
- /* If rd is not tx_rd, it's going to get sent to a peer, who will need
- * the rkey */
- rd->rd_key = (rd != tx->tx_rd) ? tx->fmr.fmr_pfmr->fmr->rkey :
- tx->fmr.fmr_pfmr->fmr->lkey;
- rd->rd_frags[0].rf_addr &= ~hdev->ibh_page_mask;
- rd->rd_frags[0].rf_nob = nob;
- rd->rd_nfrags = 1;
-
- return 0;
-}
-
-static void kiblnd_unmap_tx(lnet_ni_t *ni, kib_tx_t *tx)
-{
- kib_net_t *net = ni->ni_data;
-
- LASSERT(net != NULL);
-
- if (net->ibn_fmr_ps && tx->fmr.fmr_pfmr) {
- kiblnd_fmr_pool_unmap(&tx->fmr, tx->tx_status);
- tx->fmr.fmr_pfmr = NULL;
- }
-
- if (tx->tx_nfrags != 0) {
- kiblnd_dma_unmap_sg(tx->tx_pool->tpo_hdev->ibh_ibdev,
- tx->tx_frags, tx->tx_nfrags, tx->tx_dmadir);
- tx->tx_nfrags = 0;
- }
-}
-
-static int kiblnd_map_tx(lnet_ni_t *ni, kib_tx_t *tx, kib_rdma_desc_t *rd,
- int nfrags)
-{
- kib_hca_dev_t *hdev = tx->tx_pool->tpo_hdev;
- kib_net_t *net = ni->ni_data;
- struct ib_mr *mr = NULL;
- __u32 nob;
- int i;
-
- /* If rd is not tx_rd, it's going to get sent to a peer and I'm the
- * RDMA sink */
- tx->tx_dmadir = (rd != tx->tx_rd) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
- tx->tx_nfrags = nfrags;
-
- rd->rd_nfrags = kiblnd_dma_map_sg(hdev->ibh_ibdev, tx->tx_frags,
- tx->tx_nfrags, tx->tx_dmadir);
-
- for (i = 0, nob = 0; i < rd->rd_nfrags; i++) {
- rd->rd_frags[i].rf_nob = kiblnd_sg_dma_len(
- hdev->ibh_ibdev, &tx->tx_frags[i]);
- rd->rd_frags[i].rf_addr = kiblnd_sg_dma_address(
- hdev->ibh_ibdev, &tx->tx_frags[i]);
- nob += rd->rd_frags[i].rf_nob;
- }
-
- /* looking for pre-mapping MR */
- mr = kiblnd_find_rd_dma_mr(hdev, rd);
- if (mr != NULL) {
- /* found pre-mapping MR */
- rd->rd_key = (rd != tx->tx_rd) ? mr->rkey : mr->lkey;
- return 0;
- }
-
- if (net->ibn_fmr_ps != NULL)
- return kiblnd_fmr_map_tx(net, tx, rd, nob);
-
- return -EINVAL;
-}
-
-
-static int
-kiblnd_setup_rd_iov(lnet_ni_t *ni, kib_tx_t *tx, kib_rdma_desc_t *rd,
- unsigned int niov, struct kvec *iov, int offset, int nob)
-{
- kib_net_t *net = ni->ni_data;
- struct page *page;
- struct scatterlist *sg;
- unsigned long vaddr;
- int fragnob;
- int page_offset;
-
- LASSERT(nob > 0);
- LASSERT(niov > 0);
- LASSERT(net != NULL);
-
- while (offset >= iov->iov_len) {
- offset -= iov->iov_len;
- niov--;
- iov++;
- LASSERT(niov > 0);
- }
-
- sg = tx->tx_frags;
- do {
- LASSERT(niov > 0);
-
- vaddr = ((unsigned long)iov->iov_base) + offset;
- page_offset = vaddr & (PAGE_SIZE - 1);
- page = kiblnd_kvaddr_to_page(vaddr);
- if (page == NULL) {
- CERROR("Can't find page\n");
- return -EFAULT;
- }
-
- fragnob = min((int)(iov->iov_len - offset), nob);
- fragnob = min(fragnob, (int)PAGE_SIZE - page_offset);
-
- sg_set_page(sg, page, fragnob, page_offset);
- sg++;
-
- if (offset + fragnob < iov->iov_len) {
- offset += fragnob;
- } else {
- offset = 0;
- iov++;
- niov--;
- }
- nob -= fragnob;
- } while (nob > 0);
-
- return kiblnd_map_tx(ni, tx, rd, sg - tx->tx_frags);
-}
-
-static int
-kiblnd_setup_rd_kiov(lnet_ni_t *ni, kib_tx_t *tx, kib_rdma_desc_t *rd,
- int nkiov, lnet_kiov_t *kiov, int offset, int nob)
-{
- kib_net_t *net = ni->ni_data;
- struct scatterlist *sg;
- int fragnob;
-
- CDEBUG(D_NET, "niov %d offset %d nob %d\n", nkiov, offset, nob);
-
- LASSERT(nob > 0);
- LASSERT(nkiov > 0);
- LASSERT(net != NULL);
-
- while (offset >= kiov->kiov_len) {
- offset -= kiov->kiov_len;
- nkiov--;
- kiov++;
- LASSERT(nkiov > 0);
- }
-
- sg = tx->tx_frags;
- do {
- LASSERT(nkiov > 0);
-
- fragnob = min((int)(kiov->kiov_len - offset), nob);
-
- sg_set_page(sg, kiov->kiov_page, fragnob,
- kiov->kiov_offset + offset);
- sg++;
-
- offset = 0;
- kiov++;
- nkiov--;
- nob -= fragnob;
- } while (nob > 0);
-
- return kiblnd_map_tx(ni, tx, rd, sg - tx->tx_frags);
-}
-
-static int
-kiblnd_post_tx_locked(kib_conn_t *conn, kib_tx_t *tx, int credit)
- __releases(conn->ibc_lock)
- __acquires(conn->ibc_lock)
-{
- kib_msg_t *msg = tx->tx_msg;
- kib_peer_t *peer = conn->ibc_peer;
- int ver = conn->ibc_version;
- int rc;
- int done;
- struct ib_send_wr *bad_wrq;
-
- LASSERT(tx->tx_queued);
- /* We rely on this for QP sizing */
- LASSERT(tx->tx_nwrq > 0);
- LASSERT(tx->tx_nwrq <= 1 + IBLND_RDMA_FRAGS(ver));
-
- LASSERT(credit == 0 || credit == 1);
- LASSERT(conn->ibc_outstanding_credits >= 0);
- LASSERT(conn->ibc_outstanding_credits <= IBLND_MSG_QUEUE_SIZE(ver));
- LASSERT(conn->ibc_credits >= 0);
- LASSERT(conn->ibc_credits <= IBLND_MSG_QUEUE_SIZE(ver));
-
- if (conn->ibc_nsends_posted == IBLND_CONCURRENT_SENDS(ver)) {
- /* tx completions outstanding... */
- CDEBUG(D_NET, "%s: posted enough\n",
- libcfs_nid2str(peer->ibp_nid));
- return -EAGAIN;
- }
-
- if (credit != 0 && conn->ibc_credits == 0) { /* no credits */
- CDEBUG(D_NET, "%s: no credits\n",
- libcfs_nid2str(peer->ibp_nid));
- return -EAGAIN;
- }
-
- if (credit != 0 && !IBLND_OOB_CAPABLE(ver) &&
- conn->ibc_credits == 1 && /* last credit reserved */
- msg->ibm_type != IBLND_MSG_NOOP) { /* for NOOP */
- CDEBUG(D_NET, "%s: not using last credit\n",
- libcfs_nid2str(peer->ibp_nid));
- return -EAGAIN;
- }
-
- /* NB don't drop ibc_lock before bumping tx_sending */
- list_del(&tx->tx_list);
- tx->tx_queued = 0;
-
- if (msg->ibm_type == IBLND_MSG_NOOP &&
- (!kiblnd_need_noop(conn) || /* redundant NOOP */
- (IBLND_OOB_CAPABLE(ver) && /* posted enough NOOP */
- conn->ibc_noops_posted == IBLND_OOB_MSGS(ver)))) {
- /* OK to drop when posted enough NOOPs, since
- * kiblnd_check_sends will queue NOOP again when
- * posted NOOPs complete */
- spin_unlock(&conn->ibc_lock);
- kiblnd_tx_done(peer->ibp_ni, tx);
- spin_lock(&conn->ibc_lock);
- CDEBUG(D_NET, "%s(%d): redundant or enough NOOP\n",
- libcfs_nid2str(peer->ibp_nid),
- conn->ibc_noops_posted);
- return 0;
- }
-
- kiblnd_pack_msg(peer->ibp_ni, msg, ver, conn->ibc_outstanding_credits,
- peer->ibp_nid, conn->ibc_incarnation);
-
- conn->ibc_credits -= credit;
- conn->ibc_outstanding_credits = 0;
- conn->ibc_nsends_posted++;
- if (msg->ibm_type == IBLND_MSG_NOOP)
- conn->ibc_noops_posted++;
-
- /* CAVEAT EMPTOR! This tx could be the PUT_DONE of an RDMA
- * PUT. If so, it was first queued here as a PUT_REQ, sent and
- * stashed on ibc_active_txs, matched by an incoming PUT_ACK,
- * and then re-queued here. It's (just) possible that
- * tx_sending is non-zero if we've not done the tx_complete()
- * from the first send; hence the ++ rather than = below. */
- tx->tx_sending++;
- list_add(&tx->tx_list, &conn->ibc_active_txs);
-
- /* I'm still holding ibc_lock! */
- if (conn->ibc_state != IBLND_CONN_ESTABLISHED) {
- rc = -ECONNABORTED;
- } else if (tx->tx_pool->tpo_pool.po_failed ||
- conn->ibc_hdev != tx->tx_pool->tpo_hdev) {
- /* close_conn will launch failover */
- rc = -ENETDOWN;
- } else {
- rc = ib_post_send(conn->ibc_cmid->qp, tx->tx_wrq, &bad_wrq);
- }
-
- conn->ibc_last_send = jiffies;
-
- if (rc == 0)
- return 0;
-
- /* NB credits are transferred in the actual
- * message, which can only be the last work item */
- conn->ibc_credits += credit;
- conn->ibc_outstanding_credits += msg->ibm_credits;
- conn->ibc_nsends_posted--;
- if (msg->ibm_type == IBLND_MSG_NOOP)
- conn->ibc_noops_posted--;
-
- tx->tx_status = rc;
- tx->tx_waiting = 0;
- tx->tx_sending--;
-
- done = (tx->tx_sending == 0);
- if (done)
- list_del(&tx->tx_list);
-
- spin_unlock(&conn->ibc_lock);
-
- if (conn->ibc_state == IBLND_CONN_ESTABLISHED)
- CERROR("Error %d posting transmit to %s\n",
- rc, libcfs_nid2str(peer->ibp_nid));
- else
- CDEBUG(D_NET, "Error %d posting transmit to %s\n",
- rc, libcfs_nid2str(peer->ibp_nid));
-
- kiblnd_close_conn(conn, rc);
-
- if (done)
- kiblnd_tx_done(peer->ibp_ni, tx);
-
- spin_lock(&conn->ibc_lock);
-
- return -EIO;
-}
-
-void
-kiblnd_check_sends(kib_conn_t *conn)
-{
- int ver = conn->ibc_version;
- lnet_ni_t *ni = conn->ibc_peer->ibp_ni;
- kib_tx_t *tx;
-
- /* Don't send anything until after the connection is established */
- if (conn->ibc_state < IBLND_CONN_ESTABLISHED) {
- CDEBUG(D_NET, "%s too soon\n",
- libcfs_nid2str(conn->ibc_peer->ibp_nid));
- return;
- }
-
- spin_lock(&conn->ibc_lock);
-
- LASSERT(conn->ibc_nsends_posted <= IBLND_CONCURRENT_SENDS(ver));
- LASSERT(!IBLND_OOB_CAPABLE(ver) ||
- conn->ibc_noops_posted <= IBLND_OOB_MSGS(ver));
- LASSERT(conn->ibc_reserved_credits >= 0);
-
- while (conn->ibc_reserved_credits > 0 &&
- !list_empty(&conn->ibc_tx_queue_rsrvd)) {
- tx = list_entry(conn->ibc_tx_queue_rsrvd.next,
- kib_tx_t, tx_list);
- list_del(&tx->tx_list);
- list_add_tail(&tx->tx_list, &conn->ibc_tx_queue);
- conn->ibc_reserved_credits--;
- }
-
- if (kiblnd_need_noop(conn)) {
- spin_unlock(&conn->ibc_lock);
-
- tx = kiblnd_get_idle_tx(ni, conn->ibc_peer->ibp_nid);
- if (tx != NULL)
- kiblnd_init_tx_msg(ni, tx, IBLND_MSG_NOOP, 0);
-
- spin_lock(&conn->ibc_lock);
- if (tx != NULL)
- kiblnd_queue_tx_locked(tx, conn);
- }
-
- kiblnd_conn_addref(conn); /* 1 ref for me.... (see b21911) */
-
- for (;;) {
- int credit;
-
- if (!list_empty(&conn->ibc_tx_queue_nocred)) {
- credit = 0;
- tx = list_entry(conn->ibc_tx_queue_nocred.next,
- kib_tx_t, tx_list);
- } else if (!list_empty(&conn->ibc_tx_noops)) {
- LASSERT(!IBLND_OOB_CAPABLE(ver));
- credit = 1;
- tx = list_entry(conn->ibc_tx_noops.next,
- kib_tx_t, tx_list);
- } else if (!list_empty(&conn->ibc_tx_queue)) {
- credit = 1;
- tx = list_entry(conn->ibc_tx_queue.next,
- kib_tx_t, tx_list);
- } else
- break;
-
- if (kiblnd_post_tx_locked(conn, tx, credit) != 0)
- break;
- }
-
- spin_unlock(&conn->ibc_lock);
-
- kiblnd_conn_decref(conn); /* ...until here */
-}
-
-static void
-kiblnd_tx_complete(kib_tx_t *tx, int status)
-{
- int failed = (status != IB_WC_SUCCESS);
- kib_conn_t *conn = tx->tx_conn;
- int idle;
-
- LASSERT(tx->tx_sending > 0);
-
- if (failed) {
- if (conn->ibc_state == IBLND_CONN_ESTABLISHED)
- CNETERR("Tx -> %s cookie %#llx sending %d waiting %d: failed %d\n",
- libcfs_nid2str(conn->ibc_peer->ibp_nid),
- tx->tx_cookie, tx->tx_sending, tx->tx_waiting,
- status);
-
- kiblnd_close_conn(conn, -EIO);
- } else {
- kiblnd_peer_alive(conn->ibc_peer);
- }
-
- spin_lock(&conn->ibc_lock);
-
- /* I could be racing with rdma completion. Whoever makes 'tx' idle
- * gets to free it, which also drops its ref on 'conn'. */
-
- tx->tx_sending--;
- conn->ibc_nsends_posted--;
- if (tx->tx_msg->ibm_type == IBLND_MSG_NOOP)
- conn->ibc_noops_posted--;
-
- if (failed) {
- tx->tx_waiting = 0; /* don't wait for peer */
- tx->tx_status = -EIO;
- }
-
- idle = (tx->tx_sending == 0) && /* This is the final callback */
- !tx->tx_waiting && /* Not waiting for peer */
- !tx->tx_queued; /* Not re-queued (PUT_DONE) */
- if (idle)
- list_del(&tx->tx_list);
-
- kiblnd_conn_addref(conn); /* 1 ref for me.... */
-
- spin_unlock(&conn->ibc_lock);
-
- if (idle)
- kiblnd_tx_done(conn->ibc_peer->ibp_ni, tx);
-
- kiblnd_check_sends(conn);
-
- kiblnd_conn_decref(conn); /* ...until here */
-}
-
-void
-kiblnd_init_tx_msg(lnet_ni_t *ni, kib_tx_t *tx, int type, int body_nob)
-{
- kib_hca_dev_t *hdev = tx->tx_pool->tpo_hdev;
- struct ib_sge *sge = &tx->tx_sge[tx->tx_nwrq];
- struct ib_send_wr *wrq = &tx->tx_wrq[tx->tx_nwrq];
- int nob = offsetof(kib_msg_t, ibm_u) + body_nob;
- struct ib_mr *mr;
-
- LASSERT(tx->tx_nwrq >= 0);
- LASSERT(tx->tx_nwrq < IBLND_MAX_RDMA_FRAGS + 1);
- LASSERT(nob <= IBLND_MSG_SIZE);
-
- kiblnd_init_msg(tx->tx_msg, type, body_nob);
-
- mr = kiblnd_find_dma_mr(hdev, tx->tx_msgaddr, nob);
- LASSERT(mr != NULL);
-
- sge->lkey = mr->lkey;
- sge->addr = tx->tx_msgaddr;
- sge->length = nob;
-
- memset(wrq, 0, sizeof(*wrq));
-
- wrq->next = NULL;
- wrq->wr_id = kiblnd_ptr2wreqid(tx, IBLND_WID_TX);
- wrq->sg_list = sge;
- wrq->num_sge = 1;
- wrq->opcode = IB_WR_SEND;
- wrq->send_flags = IB_SEND_SIGNALED;
-
- tx->tx_nwrq++;
-}
-
-int
-kiblnd_init_rdma(kib_conn_t *conn, kib_tx_t *tx, int type,
- int resid, kib_rdma_desc_t *dstrd, __u64 dstcookie)
-{
- kib_msg_t *ibmsg = tx->tx_msg;
- kib_rdma_desc_t *srcrd = tx->tx_rd;
- struct ib_sge *sge = &tx->tx_sge[0];
- struct ib_send_wr *wrq = &tx->tx_wrq[0];
- int rc = resid;
- int srcidx;
- int dstidx;
- int wrknob;
-
- LASSERT(!in_interrupt());
- LASSERT(tx->tx_nwrq == 0);
- LASSERT(type == IBLND_MSG_GET_DONE ||
- type == IBLND_MSG_PUT_DONE);
-
- srcidx = dstidx = 0;
-
- while (resid > 0) {
- if (srcidx >= srcrd->rd_nfrags) {
- CERROR("Src buffer exhausted: %d frags\n", srcidx);
- rc = -EPROTO;
- break;
- }
-
- if (dstidx == dstrd->rd_nfrags) {
- CERROR("Dst buffer exhausted: %d frags\n", dstidx);
- rc = -EPROTO;
- break;
- }
-
- if (tx->tx_nwrq == IBLND_RDMA_FRAGS(conn->ibc_version)) {
- CERROR("RDMA too fragmented for %s (%d): %d/%d src %d/%d dst frags\n",
- libcfs_nid2str(conn->ibc_peer->ibp_nid),
- IBLND_RDMA_FRAGS(conn->ibc_version),
- srcidx, srcrd->rd_nfrags,
- dstidx, dstrd->rd_nfrags);
- rc = -EMSGSIZE;
- break;
- }
-
- wrknob = min(min(kiblnd_rd_frag_size(srcrd, srcidx),
- kiblnd_rd_frag_size(dstrd, dstidx)),
- (__u32) resid);
-
- sge = &tx->tx_sge[tx->tx_nwrq];
- sge->addr = kiblnd_rd_frag_addr(srcrd, srcidx);
- sge->lkey = kiblnd_rd_frag_key(srcrd, srcidx);
- sge->length = wrknob;
-
- wrq = &tx->tx_wrq[tx->tx_nwrq];
-
- wrq->next = wrq + 1;
- wrq->wr_id = kiblnd_ptr2wreqid(tx, IBLND_WID_RDMA);
- wrq->sg_list = sge;
- wrq->num_sge = 1;
- wrq->opcode = IB_WR_RDMA_WRITE;
- wrq->send_flags = 0;
-
- wrq->wr.rdma.remote_addr = kiblnd_rd_frag_addr(dstrd, dstidx);
- wrq->wr.rdma.rkey = kiblnd_rd_frag_key(dstrd, dstidx);
-
- srcidx = kiblnd_rd_consume_frag(srcrd, srcidx, wrknob);
- dstidx = kiblnd_rd_consume_frag(dstrd, dstidx, wrknob);
-
- resid -= wrknob;
-
- tx->tx_nwrq++;
- wrq++;
- sge++;
- }
-
- if (rc < 0) /* no RDMA if completing with failure */
- tx->tx_nwrq = 0;
-
- ibmsg->ibm_u.completion.ibcm_status = rc;
- ibmsg->ibm_u.completion.ibcm_cookie = dstcookie;
- kiblnd_init_tx_msg(conn->ibc_peer->ibp_ni, tx,
- type, sizeof(kib_completion_msg_t));
-
- return rc;
-}
-
-void
-kiblnd_queue_tx_locked(kib_tx_t *tx, kib_conn_t *conn)
-{
- struct list_head *q;
-
- LASSERT(tx->tx_nwrq > 0); /* work items set up */
- LASSERT(!tx->tx_queued); /* not queued for sending already */
- LASSERT(conn->ibc_state >= IBLND_CONN_ESTABLISHED);
-
- tx->tx_queued = 1;
- tx->tx_deadline = jiffies + (*kiblnd_tunables.kib_timeout * HZ);
-
- if (tx->tx_conn == NULL) {
- kiblnd_conn_addref(conn);
- tx->tx_conn = conn;
- LASSERT(tx->tx_msg->ibm_type != IBLND_MSG_PUT_DONE);
- } else {
- /* PUT_DONE first attached to conn as a PUT_REQ */
- LASSERT(tx->tx_conn == conn);
- LASSERT(tx->tx_msg->ibm_type == IBLND_MSG_PUT_DONE);
- }
-
- switch (tx->tx_msg->ibm_type) {
- default:
- LBUG();
-
- case IBLND_MSG_PUT_REQ:
- case IBLND_MSG_GET_REQ:
- q = &conn->ibc_tx_queue_rsrvd;
- break;
-
- case IBLND_MSG_PUT_NAK:
- case IBLND_MSG_PUT_ACK:
- case IBLND_MSG_PUT_DONE:
- case IBLND_MSG_GET_DONE:
- q = &conn->ibc_tx_queue_nocred;
- break;
-
- case IBLND_MSG_NOOP:
- if (IBLND_OOB_CAPABLE(conn->ibc_version))
- q = &conn->ibc_tx_queue_nocred;
- else
- q = &conn->ibc_tx_noops;
- break;
-
- case IBLND_MSG_IMMEDIATE:
- q = &conn->ibc_tx_queue;
- break;
- }
-
- list_add_tail(&tx->tx_list, q);
-}
-
-void
-kiblnd_queue_tx(kib_tx_t *tx, kib_conn_t *conn)
-{
- spin_lock(&conn->ibc_lock);
- kiblnd_queue_tx_locked(tx, conn);
- spin_unlock(&conn->ibc_lock);
-
- kiblnd_check_sends(conn);
-}
-
-static int kiblnd_resolve_addr(struct rdma_cm_id *cmid,
- struct sockaddr_in *srcaddr,
- struct sockaddr_in *dstaddr,
- int timeout_ms)
-{
- unsigned short port;
- int rc;
-
- /* allow the port to be reused */
- rc = rdma_set_reuseaddr(cmid, 1);
- if (rc != 0) {
- CERROR("Unable to set reuse on cmid: %d\n", rc);
- return rc;
- }
-
- /* look for a free privileged port */
- for (port = PROT_SOCK-1; port > 0; port--) {
- srcaddr->sin_port = htons(port);
- rc = rdma_resolve_addr(cmid,
- (struct sockaddr *)srcaddr,
- (struct sockaddr *)dstaddr,
- timeout_ms);
- if (rc == 0) {
- CDEBUG(D_NET, "bound to port %hu\n", port);
- return 0;
- } else if (rc == -EADDRINUSE || rc == -EADDRNOTAVAIL) {
- CDEBUG(D_NET, "bind to port %hu failed: %d\n",
- port, rc);
- } else {
- return rc;
- }
- }
-
- CERROR("Failed to bind to a free privileged port\n");
- return rc;
-}
-
-static void
-kiblnd_connect_peer(kib_peer_t *peer)
-{
- struct rdma_cm_id *cmid;
- kib_dev_t *dev;
- kib_net_t *net = peer->ibp_ni->ni_data;
- struct sockaddr_in srcaddr;
- struct sockaddr_in dstaddr;
- int rc;
-
- LASSERT(net != NULL);
- LASSERT(peer->ibp_connecting > 0);
-
- cmid = kiblnd_rdma_create_id(kiblnd_cm_callback, peer, RDMA_PS_TCP,
- IB_QPT_RC);
-
- if (IS_ERR(cmid)) {
- CERROR("Can't create CMID for %s: %ld\n",
- libcfs_nid2str(peer->ibp_nid), PTR_ERR(cmid));
- rc = PTR_ERR(cmid);
- goto failed;
- }
-
- dev = net->ibn_dev;
- memset(&srcaddr, 0, sizeof(srcaddr));
- srcaddr.sin_family = AF_INET;
- srcaddr.sin_addr.s_addr = htonl(dev->ibd_ifip);
-
- memset(&dstaddr, 0, sizeof(dstaddr));
- dstaddr.sin_family = AF_INET;
- dstaddr.sin_port = htons(*kiblnd_tunables.kib_service);
- dstaddr.sin_addr.s_addr = htonl(LNET_NIDADDR(peer->ibp_nid));
-
- kiblnd_peer_addref(peer); /* cmid's ref */
-
- if (*kiblnd_tunables.kib_use_priv_port) {
- rc = kiblnd_resolve_addr(cmid, &srcaddr, &dstaddr,
- *kiblnd_tunables.kib_timeout * 1000);
- } else {
- rc = rdma_resolve_addr(cmid,
- (struct sockaddr *)&srcaddr,
- (struct sockaddr *)&dstaddr,
- *kiblnd_tunables.kib_timeout * 1000);
- }
- if (rc != 0) {
- /* Can't initiate address resolution: */
- CERROR("Can't resolve addr for %s: %d\n",
- libcfs_nid2str(peer->ibp_nid), rc);
- goto failed2;
- }
-
- LASSERT(cmid->device != NULL);
- CDEBUG(D_NET, "%s: connection bound to %s:%pI4h:%s\n",
- libcfs_nid2str(peer->ibp_nid), dev->ibd_ifname,
- &dev->ibd_ifip, cmid->device->name);
-
- return;
-
- failed2:
- kiblnd_peer_decref(peer); /* cmid's ref */
- rdma_destroy_id(cmid);
- failed:
- kiblnd_peer_connect_failed(peer, 1, rc);
-}
-
-void
-kiblnd_launch_tx(lnet_ni_t *ni, kib_tx_t *tx, lnet_nid_t nid)
-{
- kib_peer_t *peer;
- kib_peer_t *peer2;
- kib_conn_t *conn;
- rwlock_t *g_lock = &kiblnd_data.kib_global_lock;
- unsigned long flags;
- int rc;
-
- /* If I get here, I've committed to send, so I complete the tx with
- * failure on any problems */
-
- LASSERT(tx == NULL || tx->tx_conn == NULL); /* only set when assigned a conn */
- LASSERT(tx == NULL || tx->tx_nwrq > 0); /* work items have been set up */
-
- /* First time, just use a read lock since I expect to find my peer
- * connected */
- read_lock_irqsave(g_lock, flags);
-
- peer = kiblnd_find_peer_locked(nid);
- if (peer != NULL && !list_empty(&peer->ibp_conns)) {
- /* Found a peer with an established connection */
- conn = kiblnd_get_conn_locked(peer);
- kiblnd_conn_addref(conn); /* 1 ref for me... */
-
- read_unlock_irqrestore(g_lock, flags);
-
- if (tx != NULL)
- kiblnd_queue_tx(tx, conn);
- kiblnd_conn_decref(conn); /* ...to here */
- return;
- }
-
- read_unlock(g_lock);
- /* Re-try with a write lock */
- write_lock(g_lock);
-
- peer = kiblnd_find_peer_locked(nid);
- if (peer != NULL) {
- if (list_empty(&peer->ibp_conns)) {
- /* found a peer, but it's still connecting... */
- LASSERT(peer->ibp_connecting != 0 ||
- peer->ibp_accepting != 0);
- if (tx != NULL)
- list_add_tail(&tx->tx_list,
- &peer->ibp_tx_queue);
- write_unlock_irqrestore(g_lock, flags);
- } else {
- conn = kiblnd_get_conn_locked(peer);
- kiblnd_conn_addref(conn); /* 1 ref for me... */
-
- write_unlock_irqrestore(g_lock, flags);
-
- if (tx != NULL)
- kiblnd_queue_tx(tx, conn);
- kiblnd_conn_decref(conn); /* ...to here */
- }
- return;
- }
-
- write_unlock_irqrestore(g_lock, flags);
-
- /* Allocate a peer ready to add to the peer table and retry */
- rc = kiblnd_create_peer(ni, &peer, nid);
- if (rc != 0) {
- CERROR("Can't create peer %s\n", libcfs_nid2str(nid));
- if (tx != NULL) {
- tx->tx_status = -EHOSTUNREACH;
- tx->tx_waiting = 0;
- kiblnd_tx_done(ni, tx);
- }
- return;
- }
-
- write_lock_irqsave(g_lock, flags);
-
- peer2 = kiblnd_find_peer_locked(nid);
- if (peer2 != NULL) {
- if (list_empty(&peer2->ibp_conns)) {
- /* found a peer, but it's still connecting... */
- LASSERT(peer2->ibp_connecting != 0 ||
- peer2->ibp_accepting != 0);
- if (tx != NULL)
- list_add_tail(&tx->tx_list,
- &peer2->ibp_tx_queue);
- write_unlock_irqrestore(g_lock, flags);
- } else {
- conn = kiblnd_get_conn_locked(peer2);
- kiblnd_conn_addref(conn); /* 1 ref for me... */
-
- write_unlock_irqrestore(g_lock, flags);
-
- if (tx != NULL)
- kiblnd_queue_tx(tx, conn);
- kiblnd_conn_decref(conn); /* ...to here */
- }
-
- kiblnd_peer_decref(peer);
- return;
- }
-
- /* Brand new peer */
- LASSERT(peer->ibp_connecting == 0);
- peer->ibp_connecting = 1;
-
- /* always called with a ref on ni, which prevents ni being shutdown */
- LASSERT(((kib_net_t *)ni->ni_data)->ibn_shutdown == 0);
-
- if (tx != NULL)
- list_add_tail(&tx->tx_list, &peer->ibp_tx_queue);
-
- kiblnd_peer_addref(peer);
- list_add_tail(&peer->ibp_list, kiblnd_nid2peerlist(nid));
-
- write_unlock_irqrestore(g_lock, flags);
-
- kiblnd_connect_peer(peer);
- kiblnd_peer_decref(peer);
-}
-
-int
-kiblnd_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg)
-{
- lnet_hdr_t *hdr = &lntmsg->msg_hdr;
- int type = lntmsg->msg_type;
- lnet_process_id_t target = lntmsg->msg_target;
- int target_is_router = lntmsg->msg_target_is_router;
- int routing = lntmsg->msg_routing;
- unsigned int payload_niov = lntmsg->msg_niov;
- struct kvec *payload_iov = lntmsg->msg_iov;
- lnet_kiov_t *payload_kiov = lntmsg->msg_kiov;
- unsigned int payload_offset = lntmsg->msg_offset;
- unsigned int payload_nob = lntmsg->msg_len;
- kib_msg_t *ibmsg;
- kib_rdma_desc_t *rd;
- kib_tx_t *tx;
- int nob;
- int rc;
-
- /* NB 'private' is different depending on what we're sending.... */
-
- CDEBUG(D_NET, "sending %d bytes in %d frags to %s\n",
- payload_nob, payload_niov, libcfs_id2str(target));
-
- LASSERT(payload_nob == 0 || payload_niov > 0);
- LASSERT(payload_niov <= LNET_MAX_IOV);
-
- /* Thread context */
- LASSERT(!in_interrupt());
- /* payload is either all vaddrs or all pages */
- LASSERT(!(payload_kiov != NULL && payload_iov != NULL));
-
- switch (type) {
- default:
- LBUG();
- return -EIO;
-
- case LNET_MSG_ACK:
- LASSERT(payload_nob == 0);
- break;
-
- case LNET_MSG_GET:
- if (routing || target_is_router)
- break; /* send IMMEDIATE */
-
- /* is the REPLY message too small for RDMA? */
- nob = offsetof(kib_msg_t, ibm_u.immediate.ibim_payload[lntmsg->msg_md->md_length]);
- if (nob <= IBLND_MSG_SIZE)
- break; /* send IMMEDIATE */
-
- tx = kiblnd_get_idle_tx(ni, target.nid);
- if (tx == NULL) {
- CERROR("Can't allocate txd for GET to %s\n",
- libcfs_nid2str(target.nid));
- return -ENOMEM;
- }
-
- ibmsg = tx->tx_msg;
- rd = &ibmsg->ibm_u.get.ibgm_rd;
- if ((lntmsg->msg_md->md_options & LNET_MD_KIOV) == 0)
- rc = kiblnd_setup_rd_iov(ni, tx, rd,
- lntmsg->msg_md->md_niov,
- lntmsg->msg_md->md_iov.iov,
- 0, lntmsg->msg_md->md_length);
- else
- rc = kiblnd_setup_rd_kiov(ni, tx, rd,
- lntmsg->msg_md->md_niov,
- lntmsg->msg_md->md_iov.kiov,
- 0, lntmsg->msg_md->md_length);
- if (rc != 0) {
- CERROR("Can't setup GET sink for %s: %d\n",
- libcfs_nid2str(target.nid), rc);
- kiblnd_tx_done(ni, tx);
- return -EIO;
- }
-
- nob = offsetof(kib_get_msg_t, ibgm_rd.rd_frags[rd->rd_nfrags]);
- ibmsg->ibm_u.get.ibgm_cookie = tx->tx_cookie;
- ibmsg->ibm_u.get.ibgm_hdr = *hdr;
-
- kiblnd_init_tx_msg(ni, tx, IBLND_MSG_GET_REQ, nob);
-
- tx->tx_lntmsg[1] = lnet_create_reply_msg(ni, lntmsg);
- if (tx->tx_lntmsg[1] == NULL) {
- CERROR("Can't create reply for GET -> %s\n",
- libcfs_nid2str(target.nid));
- kiblnd_tx_done(ni, tx);
- return -EIO;
- }
-
- tx->tx_lntmsg[0] = lntmsg; /* finalise lntmsg[0,1] on completion */
- tx->tx_waiting = 1; /* waiting for GET_DONE */
- kiblnd_launch_tx(ni, tx, target.nid);
- return 0;
-
- case LNET_MSG_REPLY:
- case LNET_MSG_PUT:
- /* Is the payload small enough not to need RDMA? */
- nob = offsetof(kib_msg_t, ibm_u.immediate.ibim_payload[payload_nob]);
- if (nob <= IBLND_MSG_SIZE)
- break; /* send IMMEDIATE */
-
- tx = kiblnd_get_idle_tx(ni, target.nid);
- if (tx == NULL) {
- CERROR("Can't allocate %s txd for %s\n",
- type == LNET_MSG_PUT ? "PUT" : "REPLY",
- libcfs_nid2str(target.nid));
- return -ENOMEM;
- }
-
- if (payload_kiov == NULL)
- rc = kiblnd_setup_rd_iov(ni, tx, tx->tx_rd,
- payload_niov, payload_iov,
- payload_offset, payload_nob);
- else
- rc = kiblnd_setup_rd_kiov(ni, tx, tx->tx_rd,
- payload_niov, payload_kiov,
- payload_offset, payload_nob);
- if (rc != 0) {
- CERROR("Can't setup PUT src for %s: %d\n",
- libcfs_nid2str(target.nid), rc);
- kiblnd_tx_done(ni, tx);
- return -EIO;
- }
-
- ibmsg = tx->tx_msg;
- ibmsg->ibm_u.putreq.ibprm_hdr = *hdr;
- ibmsg->ibm_u.putreq.ibprm_cookie = tx->tx_cookie;
- kiblnd_init_tx_msg(ni, tx, IBLND_MSG_PUT_REQ, sizeof(kib_putreq_msg_t));
-
- tx->tx_lntmsg[0] = lntmsg; /* finalise lntmsg on completion */
- tx->tx_waiting = 1; /* waiting for PUT_{ACK,NAK} */
- kiblnd_launch_tx(ni, tx, target.nid);
- return 0;
- }
-
- /* send IMMEDIATE */
-
- LASSERT(offsetof(kib_msg_t, ibm_u.immediate.ibim_payload[payload_nob])
- <= IBLND_MSG_SIZE);
-
- tx = kiblnd_get_idle_tx(ni, target.nid);
- if (tx == NULL) {
- CERROR("Can't send %d to %s: tx descs exhausted\n",
- type, libcfs_nid2str(target.nid));
- return -ENOMEM;
- }
-
- ibmsg = tx->tx_msg;
- ibmsg->ibm_u.immediate.ibim_hdr = *hdr;
-
- if (payload_kiov != NULL)
- lnet_copy_kiov2flat(IBLND_MSG_SIZE, ibmsg,
- offsetof(kib_msg_t, ibm_u.immediate.ibim_payload),
- payload_niov, payload_kiov,
- payload_offset, payload_nob);
- else
- lnet_copy_iov2flat(IBLND_MSG_SIZE, ibmsg,
- offsetof(kib_msg_t, ibm_u.immediate.ibim_payload),
- payload_niov, payload_iov,
- payload_offset, payload_nob);
-
- nob = offsetof(kib_immediate_msg_t, ibim_payload[payload_nob]);
- kiblnd_init_tx_msg(ni, tx, IBLND_MSG_IMMEDIATE, nob);
-
- tx->tx_lntmsg[0] = lntmsg; /* finalise lntmsg on completion */
- kiblnd_launch_tx(ni, tx, target.nid);
- return 0;
-}
-
-static void
-kiblnd_reply(lnet_ni_t *ni, kib_rx_t *rx, lnet_msg_t *lntmsg)
-{
- lnet_process_id_t target = lntmsg->msg_target;
- unsigned int niov = lntmsg->msg_niov;
- struct kvec *iov = lntmsg->msg_iov;
- lnet_kiov_t *kiov = lntmsg->msg_kiov;
- unsigned int offset = lntmsg->msg_offset;
- unsigned int nob = lntmsg->msg_len;
- kib_tx_t *tx;
- int rc;
-
- tx = kiblnd_get_idle_tx(ni, rx->rx_conn->ibc_peer->ibp_nid);
- if (tx == NULL) {
- CERROR("Can't get tx for REPLY to %s\n",
- libcfs_nid2str(target.nid));
- goto failed_0;
- }
-
- if (nob == 0)
- rc = 0;
- else if (kiov == NULL)
- rc = kiblnd_setup_rd_iov(ni, tx, tx->tx_rd,
- niov, iov, offset, nob);
- else
- rc = kiblnd_setup_rd_kiov(ni, tx, tx->tx_rd,
- niov, kiov, offset, nob);
-
- if (rc != 0) {
- CERROR("Can't setup GET src for %s: %d\n",
- libcfs_nid2str(target.nid), rc);
- goto failed_1;
- }
-
- rc = kiblnd_init_rdma(rx->rx_conn, tx,
- IBLND_MSG_GET_DONE, nob,
- &rx->rx_msg->ibm_u.get.ibgm_rd,
- rx->rx_msg->ibm_u.get.ibgm_cookie);
- if (rc < 0) {
- CERROR("Can't setup rdma for GET from %s: %d\n",
- libcfs_nid2str(target.nid), rc);
- goto failed_1;
- }
-
- if (nob == 0) {
- /* No RDMA: local completion may happen now! */
- lnet_finalize(ni, lntmsg, 0);
- } else {
- /* RDMA: lnet_finalize(lntmsg) when it
- * completes */
- tx->tx_lntmsg[0] = lntmsg;
- }
-
- kiblnd_queue_tx(tx, rx->rx_conn);
- return;
-
- failed_1:
- kiblnd_tx_done(ni, tx);
- failed_0:
- lnet_finalize(ni, lntmsg, -EIO);
-}
-
-int
-kiblnd_recv(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg, int delayed,
- unsigned int niov, struct kvec *iov, lnet_kiov_t *kiov,
- unsigned int offset, unsigned int mlen, unsigned int rlen)
-{
- kib_rx_t *rx = private;
- kib_msg_t *rxmsg = rx->rx_msg;
- kib_conn_t *conn = rx->rx_conn;
- kib_tx_t *tx;
- int nob;
- int post_credit = IBLND_POSTRX_PEER_CREDIT;
- int rc = 0;
-
- LASSERT(mlen <= rlen);
- LASSERT(!in_interrupt());
- /* Either all pages or all vaddrs */
- LASSERT(!(kiov != NULL && iov != NULL));
-
- switch (rxmsg->ibm_type) {
- default:
- LBUG();
-
- case IBLND_MSG_IMMEDIATE:
- nob = offsetof(kib_msg_t, ibm_u.immediate.ibim_payload[rlen]);
- if (nob > rx->rx_nob) {
- CERROR("Immediate message from %s too big: %d(%d)\n",
- libcfs_nid2str(rxmsg->ibm_u.immediate.ibim_hdr.src_nid),
- nob, rx->rx_nob);
- rc = -EPROTO;
- break;
- }
-
- if (kiov != NULL)
- lnet_copy_flat2kiov(niov, kiov, offset,
- IBLND_MSG_SIZE, rxmsg,
- offsetof(kib_msg_t, ibm_u.immediate.ibim_payload),
- mlen);
- else
- lnet_copy_flat2iov(niov, iov, offset,
- IBLND_MSG_SIZE, rxmsg,
- offsetof(kib_msg_t, ibm_u.immediate.ibim_payload),
- mlen);
- lnet_finalize(ni, lntmsg, 0);
- break;
-
- case IBLND_MSG_PUT_REQ: {
- kib_msg_t *txmsg;
- kib_rdma_desc_t *rd;
-
- if (mlen == 0) {
- lnet_finalize(ni, lntmsg, 0);
- kiblnd_send_completion(rx->rx_conn, IBLND_MSG_PUT_NAK, 0,
- rxmsg->ibm_u.putreq.ibprm_cookie);
- break;
- }
-
- tx = kiblnd_get_idle_tx(ni, conn->ibc_peer->ibp_nid);
- if (tx == NULL) {
- CERROR("Can't allocate tx for %s\n",
- libcfs_nid2str(conn->ibc_peer->ibp_nid));
- /* Not replying will break the connection */
- rc = -ENOMEM;
- break;
- }
-
- txmsg = tx->tx_msg;
- rd = &txmsg->ibm_u.putack.ibpam_rd;
- if (kiov == NULL)
- rc = kiblnd_setup_rd_iov(ni, tx, rd,
- niov, iov, offset, mlen);
- else
- rc = kiblnd_setup_rd_kiov(ni, tx, rd,
- niov, kiov, offset, mlen);
- if (rc != 0) {
- CERROR("Can't setup PUT sink for %s: %d\n",
- libcfs_nid2str(conn->ibc_peer->ibp_nid), rc);
- kiblnd_tx_done(ni, tx);
- /* tell peer it's over */
- kiblnd_send_completion(rx->rx_conn, IBLND_MSG_PUT_NAK, rc,
- rxmsg->ibm_u.putreq.ibprm_cookie);
- break;
- }
-
- nob = offsetof(kib_putack_msg_t, ibpam_rd.rd_frags[rd->rd_nfrags]);
- txmsg->ibm_u.putack.ibpam_src_cookie = rxmsg->ibm_u.putreq.ibprm_cookie;
- txmsg->ibm_u.putack.ibpam_dst_cookie = tx->tx_cookie;
-
- kiblnd_init_tx_msg(ni, tx, IBLND_MSG_PUT_ACK, nob);
-
- tx->tx_lntmsg[0] = lntmsg; /* finalise lntmsg on completion */
- tx->tx_waiting = 1; /* waiting for PUT_DONE */
- kiblnd_queue_tx(tx, conn);
-
- /* reposted buffer reserved for PUT_DONE */
- post_credit = IBLND_POSTRX_NO_CREDIT;
- break;
- }
-
- case IBLND_MSG_GET_REQ:
- if (lntmsg != NULL) {
- /* Optimized GET; RDMA lntmsg's payload */
- kiblnd_reply(ni, rx, lntmsg);
- } else {
- /* GET didn't match anything */
- kiblnd_send_completion(rx->rx_conn, IBLND_MSG_GET_DONE,
- -ENODATA,
- rxmsg->ibm_u.get.ibgm_cookie);
- }
- break;
- }
-
- kiblnd_post_rx(rx, post_credit);
- return rc;
-}
-
-int
-kiblnd_thread_start(int (*fn)(void *arg), void *arg, char *name)
-{
- struct task_struct *task = kthread_run(fn, arg, "%s", name);
-
- if (IS_ERR(task))
- return PTR_ERR(task);
-
- atomic_inc(&kiblnd_data.kib_nthreads);
- return 0;
-}
-
-static void
-kiblnd_thread_fini(void)
-{
- atomic_dec(&kiblnd_data.kib_nthreads);
-}
-
-void
-kiblnd_peer_alive(kib_peer_t *peer)
-{
- /* This is racy, but everyone's only writing cfs_time_current() */
- peer->ibp_last_alive = cfs_time_current();
- mb();
-}
-
-static void
-kiblnd_peer_notify(kib_peer_t *peer)
-{
- int error = 0;
- unsigned long last_alive = 0;
- unsigned long flags;
-
- read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
-
- if (list_empty(&peer->ibp_conns) &&
- peer->ibp_accepting == 0 &&
- peer->ibp_connecting == 0 &&
- peer->ibp_error != 0) {
- error = peer->ibp_error;
- peer->ibp_error = 0;
-
- last_alive = peer->ibp_last_alive;
- }
-
- read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
-
- if (error != 0)
- lnet_notify(peer->ibp_ni,
- peer->ibp_nid, 0, last_alive);
-}
-
-void
-kiblnd_close_conn_locked(kib_conn_t *conn, int error)
-{
- /* This just does the immediate housekeeping. 'error' is zero for a
- * normal shutdown which can happen only after the connection has been
- * established. If the connection is established, schedule the
- * connection to be finished off by the connd. Otherwise the connd is
- * already dealing with it (either to set it up or tear it down).
- * Caller holds kib_global_lock exclusively in irq context */
- kib_peer_t *peer = conn->ibc_peer;
- kib_dev_t *dev;
- unsigned long flags;
-
- LASSERT(error != 0 || conn->ibc_state >= IBLND_CONN_ESTABLISHED);
-
- if (error != 0 && conn->ibc_comms_error == 0)
- conn->ibc_comms_error = error;
-
- if (conn->ibc_state != IBLND_CONN_ESTABLISHED)
- return; /* already being handled */
-
- if (error == 0 &&
- list_empty(&conn->ibc_tx_noops) &&
- list_empty(&conn->ibc_tx_queue) &&
- list_empty(&conn->ibc_tx_queue_rsrvd) &&
- list_empty(&conn->ibc_tx_queue_nocred) &&
- list_empty(&conn->ibc_active_txs)) {
- CDEBUG(D_NET, "closing conn to %s\n",
- libcfs_nid2str(peer->ibp_nid));
- } else {
- CNETERR("Closing conn to %s: error %d%s%s%s%s%s\n",
- libcfs_nid2str(peer->ibp_nid), error,
- list_empty(&conn->ibc_tx_queue) ? "" : "(sending)",
- list_empty(&conn->ibc_tx_noops) ? "" : "(sending_noops)",
- list_empty(&conn->ibc_tx_queue_rsrvd) ? "" : "(sending_rsrvd)",
- list_empty(&conn->ibc_tx_queue_nocred) ? "" : "(sending_nocred)",
- list_empty(&conn->ibc_active_txs) ? "" : "(waiting)");
- }
-
- dev = ((kib_net_t *)peer->ibp_ni->ni_data)->ibn_dev;
- list_del(&conn->ibc_list);
- /* connd (see below) takes over ibc_list's ref */
-
- if (list_empty(&peer->ibp_conns) && /* no more conns */
- kiblnd_peer_active(peer)) { /* still in peer table */
- kiblnd_unlink_peer_locked(peer);
-
- /* set/clear error on last conn */
- peer->ibp_error = conn->ibc_comms_error;
- }
-
- kiblnd_set_conn_state(conn, IBLND_CONN_CLOSING);
-
- if (error != 0 &&
- kiblnd_dev_can_failover(dev)) {
- list_add_tail(&dev->ibd_fail_list,
- &kiblnd_data.kib_failed_devs);
- wake_up(&kiblnd_data.kib_failover_waitq);
- }
-
- spin_lock_irqsave(&kiblnd_data.kib_connd_lock, flags);
-
- list_add_tail(&conn->ibc_list, &kiblnd_data.kib_connd_conns);
- wake_up(&kiblnd_data.kib_connd_waitq);
-
- spin_unlock_irqrestore(&kiblnd_data.kib_connd_lock, flags);
-}
-
-void
-kiblnd_close_conn(kib_conn_t *conn, int error)
-{
- unsigned long flags;
-
- write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
-
- kiblnd_close_conn_locked(conn, error);
-
- write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
-}
-
-static void
-kiblnd_handle_early_rxs(kib_conn_t *conn)
-{
- unsigned long flags;
- kib_rx_t *rx;
- kib_rx_t *tmp;
-
- LASSERT(!in_interrupt());
- LASSERT(conn->ibc_state >= IBLND_CONN_ESTABLISHED);
-
- write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
- list_for_each_entry_safe(rx, tmp, &conn->ibc_early_rxs, rx_list) {
- list_del(&rx->rx_list);
- write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
-
- kiblnd_handle_rx(rx);
-
- write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
- }
- write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
-}
-
-static void
-kiblnd_abort_txs(kib_conn_t *conn, struct list_head *txs)
-{
- LIST_HEAD(zombies);
- struct list_head *tmp;
- struct list_head *nxt;
- kib_tx_t *tx;
-
- spin_lock(&conn->ibc_lock);
-
- list_for_each_safe(tmp, nxt, txs) {
- tx = list_entry(tmp, kib_tx_t, tx_list);
-
- if (txs == &conn->ibc_active_txs) {
- LASSERT(!tx->tx_queued);
- LASSERT(tx->tx_waiting ||
- tx->tx_sending != 0);
- } else {
- LASSERT(tx->tx_queued);
- }
-
- tx->tx_status = -ECONNABORTED;
- tx->tx_waiting = 0;
-
- if (tx->tx_sending == 0) {
- tx->tx_queued = 0;
- list_del(&tx->tx_list);
- list_add(&tx->tx_list, &zombies);
- }
- }
-
- spin_unlock(&conn->ibc_lock);
-
- kiblnd_txlist_done(conn->ibc_peer->ibp_ni, &zombies, -ECONNABORTED);
-}
-
-static void
-kiblnd_finalise_conn(kib_conn_t *conn)
-{
- LASSERT(!in_interrupt());
- LASSERT(conn->ibc_state > IBLND_CONN_INIT);
-
- kiblnd_set_conn_state(conn, IBLND_CONN_DISCONNECTED);
-
- /* abort_receives moves QP state to IB_QPS_ERR. This is only required
- * for connections that didn't get as far as being connected, because
- * rdma_disconnect() does this for free. */
- kiblnd_abort_receives(conn);
-
- /* Complete all tx descs not waiting for sends to complete.
- * NB we should be safe from RDMA now that the QP has changed state */
-
- kiblnd_abort_txs(conn, &conn->ibc_tx_noops);
- kiblnd_abort_txs(conn, &conn->ibc_tx_queue);
- kiblnd_abort_txs(conn, &conn->ibc_tx_queue_rsrvd);
- kiblnd_abort_txs(conn, &conn->ibc_tx_queue_nocred);
- kiblnd_abort_txs(conn, &conn->ibc_active_txs);
-
- kiblnd_handle_early_rxs(conn);
-}
-
-void
-kiblnd_peer_connect_failed(kib_peer_t *peer, int active, int error)
-{
- LIST_HEAD(zombies);
- unsigned long flags;
-
- LASSERT(error != 0);
- LASSERT(!in_interrupt());
-
- write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
-
- if (active) {
- LASSERT(peer->ibp_connecting > 0);
- peer->ibp_connecting--;
- } else {
- LASSERT(peer->ibp_accepting > 0);
- peer->ibp_accepting--;
- }
-
- if (peer->ibp_connecting != 0 ||
- peer->ibp_accepting != 0) {
- /* another connection attempt under way... */
- write_unlock_irqrestore(&kiblnd_data.kib_global_lock,
- flags);
- return;
- }
-
- if (list_empty(&peer->ibp_conns)) {
- /* Take peer's blocked transmits to complete with error */
- list_add(&zombies, &peer->ibp_tx_queue);
- list_del_init(&peer->ibp_tx_queue);
-
- if (kiblnd_peer_active(peer))
- kiblnd_unlink_peer_locked(peer);
-
- peer->ibp_error = error;
- } else {
- /* Can't have blocked transmits if there are connections */
- LASSERT(list_empty(&peer->ibp_tx_queue));
- }
-
- write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
-
- kiblnd_peer_notify(peer);
-
- if (list_empty(&zombies))
- return;
-
- CNETERR("Deleting messages for %s: connection failed\n",
- libcfs_nid2str(peer->ibp_nid));
-
- kiblnd_txlist_done(peer->ibp_ni, &zombies, -EHOSTUNREACH);
-}
-
-void
-kiblnd_connreq_done(kib_conn_t *conn, int status)
-{
- kib_peer_t *peer = conn->ibc_peer;
- kib_tx_t *tx;
- kib_tx_t *tmp;
- struct list_head txs;
- unsigned long flags;
- int active;
-
- active = (conn->ibc_state == IBLND_CONN_ACTIVE_CONNECT);
-
- CDEBUG(D_NET, "%s: active(%d), version(%x), status(%d)\n",
- libcfs_nid2str(peer->ibp_nid), active,
- conn->ibc_version, status);
-
- LASSERT(!in_interrupt());
- LASSERT((conn->ibc_state == IBLND_CONN_ACTIVE_CONNECT &&
- peer->ibp_connecting > 0) ||
- (conn->ibc_state == IBLND_CONN_PASSIVE_WAIT &&
- peer->ibp_accepting > 0));
-
- LIBCFS_FREE(conn->ibc_connvars, sizeof(*conn->ibc_connvars));
- conn->ibc_connvars = NULL;
-
- if (status != 0) {
- /* failed to establish connection */
- kiblnd_peer_connect_failed(peer, active, status);
- kiblnd_finalise_conn(conn);
- return;
- }
-
- /* connection established */
- write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
-
- conn->ibc_last_send = jiffies;
- kiblnd_set_conn_state(conn, IBLND_CONN_ESTABLISHED);
- kiblnd_peer_alive(peer);
-
- /* Add conn to peer's list and nuke any dangling conns from a different
- * peer instance... */
- kiblnd_conn_addref(conn); /* +1 ref for ibc_list */
- list_add(&conn->ibc_list, &peer->ibp_conns);
- if (active)
- peer->ibp_connecting--;
- else
- peer->ibp_accepting--;
-
- if (peer->ibp_version == 0) {
- peer->ibp_version = conn->ibc_version;
- peer->ibp_incarnation = conn->ibc_incarnation;
- }
-
- if (peer->ibp_version != conn->ibc_version ||
- peer->ibp_incarnation != conn->ibc_incarnation) {
- kiblnd_close_stale_conns_locked(peer, conn->ibc_version,
- conn->ibc_incarnation);
- peer->ibp_version = conn->ibc_version;
- peer->ibp_incarnation = conn->ibc_incarnation;
- }
-
- /* grab pending txs while I have the lock */
- list_add(&txs, &peer->ibp_tx_queue);
- list_del_init(&peer->ibp_tx_queue);
-
- if (!kiblnd_peer_active(peer) || /* peer has been deleted */
- conn->ibc_comms_error != 0) { /* error has happened already */
- lnet_ni_t *ni = peer->ibp_ni;
-
- /* start to shut down connection */
- kiblnd_close_conn_locked(conn, -ECONNABORTED);
- write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
-
- kiblnd_txlist_done(ni, &txs, -ECONNABORTED);
-
- return;
- }
-
- write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
-
- /* Schedule blocked txs */
- spin_lock(&conn->ibc_lock);
- list_for_each_entry_safe(tx, tmp, &txs, tx_list) {
- list_del(&tx->tx_list);
-
- kiblnd_queue_tx_locked(tx, conn);
- }
- spin_unlock(&conn->ibc_lock);
-
- kiblnd_check_sends(conn);
-
- /* schedule blocked rxs */
- kiblnd_handle_early_rxs(conn);
-}
-
-static void
-kiblnd_reject(struct rdma_cm_id *cmid, kib_rej_t *rej)
-{
- int rc;
-
- rc = rdma_reject(cmid, rej, sizeof(*rej));
-
- if (rc != 0)
- CWARN("Error %d sending reject\n", rc);
-}
-
-static int
-kiblnd_passive_connect(struct rdma_cm_id *cmid, void *priv, int priv_nob)
-{
- rwlock_t *g_lock = &kiblnd_data.kib_global_lock;
- kib_msg_t *reqmsg = priv;
- kib_msg_t *ackmsg;
- kib_dev_t *ibdev;
- kib_peer_t *peer;
- kib_peer_t *peer2;
- kib_conn_t *conn;
- lnet_ni_t *ni = NULL;
- kib_net_t *net = NULL;
- lnet_nid_t nid;
- struct rdma_conn_param cp;
- kib_rej_t rej;
- int version = IBLND_MSG_VERSION;
- unsigned long flags;
- int rc;
- struct sockaddr_in *peer_addr;
- LASSERT(!in_interrupt());
-
- /* cmid inherits 'context' from the corresponding listener id */
- ibdev = (kib_dev_t *)cmid->context;
- LASSERT(ibdev != NULL);
-
- memset(&rej, 0, sizeof(rej));
- rej.ibr_magic = IBLND_MSG_MAGIC;
- rej.ibr_why = IBLND_REJECT_FATAL;
- rej.ibr_cp.ibcp_max_msg_size = IBLND_MSG_SIZE;
-
- peer_addr = (struct sockaddr_in *)&(cmid->route.addr.dst_addr);
- if (*kiblnd_tunables.kib_require_priv_port &&
- ntohs(peer_addr->sin_port) >= PROT_SOCK) {
- __u32 ip = ntohl(peer_addr->sin_addr.s_addr);
- CERROR("Peer's port (%pI4h:%hu) is not privileged\n",
- &ip, ntohs(peer_addr->sin_port));
- goto failed;
- }
-
- if (priv_nob < offsetof(kib_msg_t, ibm_type)) {
- CERROR("Short connection request\n");
- goto failed;
- }
-
- /* Future protocol version compatibility support! If the
- * o2iblnd-specific protocol changes, or when LNET unifies
- * protocols over all LNDs, the initial connection will
- * negotiate a protocol version. I trap this here to avoid
- * console errors; the reject tells the peer which protocol I
- * speak. */
- if (reqmsg->ibm_magic == LNET_PROTO_MAGIC ||
- reqmsg->ibm_magic == __swab32(LNET_PROTO_MAGIC))
- goto failed;
- if (reqmsg->ibm_magic == IBLND_MSG_MAGIC &&
- reqmsg->ibm_version != IBLND_MSG_VERSION &&
- reqmsg->ibm_version != IBLND_MSG_VERSION_1)
- goto failed;
- if (reqmsg->ibm_magic == __swab32(IBLND_MSG_MAGIC) &&
- reqmsg->ibm_version != __swab16(IBLND_MSG_VERSION) &&
- reqmsg->ibm_version != __swab16(IBLND_MSG_VERSION_1))
- goto failed;
-
- rc = kiblnd_unpack_msg(reqmsg, priv_nob);
- if (rc != 0) {
- CERROR("Can't parse connection request: %d\n", rc);
- goto failed;
- }
-
- nid = reqmsg->ibm_srcnid;
- ni = lnet_net2ni(LNET_NIDNET(reqmsg->ibm_dstnid));
-
- if (ni != NULL) {
- net = (kib_net_t *)ni->ni_data;
- rej.ibr_incarnation = net->ibn_incarnation;
- }
-
- if (ni == NULL || /* no matching net */
- ni->ni_nid != reqmsg->ibm_dstnid || /* right NET, wrong NID! */
- net->ibn_dev != ibdev) { /* wrong device */
- CERROR("Can't accept %s on %s (%s:%d:%pI4h): bad dst nid %s\n",
- libcfs_nid2str(nid),
- ni == NULL ? "NA" : libcfs_nid2str(ni->ni_nid),
- ibdev->ibd_ifname, ibdev->ibd_nnets,
- &ibdev->ibd_ifip,
- libcfs_nid2str(reqmsg->ibm_dstnid));
-
- goto failed;
- }
-
- /* check time stamp as soon as possible */
- if (reqmsg->ibm_dststamp != 0 &&
- reqmsg->ibm_dststamp != net->ibn_incarnation) {
- CWARN("Stale connection request\n");
- rej.ibr_why = IBLND_REJECT_CONN_STALE;
- goto failed;
- }
-
- /* I can accept peer's version */
- version = reqmsg->ibm_version;
-
- if (reqmsg->ibm_type != IBLND_MSG_CONNREQ) {
- CERROR("Unexpected connreq msg type: %x from %s\n",
- reqmsg->ibm_type, libcfs_nid2str(nid));
- goto failed;
- }
-
- if (reqmsg->ibm_u.connparams.ibcp_queue_depth !=
- IBLND_MSG_QUEUE_SIZE(version)) {
- CERROR("Can't accept %s: incompatible queue depth %d (%d wanted)\n",
- libcfs_nid2str(nid), reqmsg->ibm_u.connparams.ibcp_queue_depth,
- IBLND_MSG_QUEUE_SIZE(version));
-
- if (version == IBLND_MSG_VERSION)
- rej.ibr_why = IBLND_REJECT_MSG_QUEUE_SIZE;
-
- goto failed;
- }
-
- if (reqmsg->ibm_u.connparams.ibcp_max_frags !=
- IBLND_RDMA_FRAGS(version)) {
- CERROR("Can't accept %s(version %x): incompatible max_frags %d (%d wanted)\n",
- libcfs_nid2str(nid), version,
- reqmsg->ibm_u.connparams.ibcp_max_frags,
- IBLND_RDMA_FRAGS(version));
-
- if (version == IBLND_MSG_VERSION)
- rej.ibr_why = IBLND_REJECT_RDMA_FRAGS;
-
- goto failed;
-
- }
-
- if (reqmsg->ibm_u.connparams.ibcp_max_msg_size > IBLND_MSG_SIZE) {
- CERROR("Can't accept %s: message size %d too big (%d max)\n",
- libcfs_nid2str(nid),
- reqmsg->ibm_u.connparams.ibcp_max_msg_size,
- IBLND_MSG_SIZE);
- goto failed;
- }
-
- /* assume 'nid' is a new peer; create */
- rc = kiblnd_create_peer(ni, &peer, nid);
- if (rc != 0) {
- CERROR("Can't create peer for %s\n", libcfs_nid2str(nid));
- rej.ibr_why = IBLND_REJECT_NO_RESOURCES;
- goto failed;
- }
-
- write_lock_irqsave(g_lock, flags);
-
- peer2 = kiblnd_find_peer_locked(nid);
- if (peer2 != NULL) {
- if (peer2->ibp_version == 0) {
- peer2->ibp_version = version;
- peer2->ibp_incarnation = reqmsg->ibm_srcstamp;
- }
-
- /* not the guy I've talked with */
- if (peer2->ibp_incarnation != reqmsg->ibm_srcstamp ||
- peer2->ibp_version != version) {
- kiblnd_close_peer_conns_locked(peer2, -ESTALE);
- write_unlock_irqrestore(g_lock, flags);
-
- CWARN("Conn stale %s [old ver: %x, new ver: %x]\n",
- libcfs_nid2str(nid), peer2->ibp_version, version);
-
- kiblnd_peer_decref(peer);
- rej.ibr_why = IBLND_REJECT_CONN_STALE;
- goto failed;
- }
-
- /* tie-break connection race in favour of the higher NID */
- if (peer2->ibp_connecting != 0 &&
- nid < ni->ni_nid) {
- write_unlock_irqrestore(g_lock, flags);
-
- CWARN("Conn race %s\n", libcfs_nid2str(peer2->ibp_nid));
-
- kiblnd_peer_decref(peer);
- rej.ibr_why = IBLND_REJECT_CONN_RACE;
- goto failed;
- }
-
- peer2->ibp_accepting++;
- kiblnd_peer_addref(peer2);
-
- write_unlock_irqrestore(g_lock, flags);
- kiblnd_peer_decref(peer);
- peer = peer2;
- } else {
- /* Brand new peer */
- LASSERT(peer->ibp_accepting == 0);
- LASSERT(peer->ibp_version == 0 &&
- peer->ibp_incarnation == 0);
-
- peer->ibp_accepting = 1;
- peer->ibp_version = version;
- peer->ibp_incarnation = reqmsg->ibm_srcstamp;
-
- /* I have a ref on ni that prevents it being shutdown */
- LASSERT(net->ibn_shutdown == 0);
-
- kiblnd_peer_addref(peer);
- list_add_tail(&peer->ibp_list, kiblnd_nid2peerlist(nid));
-
- write_unlock_irqrestore(g_lock, flags);
- }
-
- conn = kiblnd_create_conn(peer, cmid, IBLND_CONN_PASSIVE_WAIT, version);
- if (conn == NULL) {
- kiblnd_peer_connect_failed(peer, 0, -ENOMEM);
- kiblnd_peer_decref(peer);
- rej.ibr_why = IBLND_REJECT_NO_RESOURCES;
- goto failed;
- }
-
- /* conn now "owns" cmid, so I return success from here on to ensure the
- * CM callback doesn't destroy cmid. */
-
- conn->ibc_incarnation = reqmsg->ibm_srcstamp;
- conn->ibc_credits = IBLND_MSG_QUEUE_SIZE(version);
- conn->ibc_reserved_credits = IBLND_MSG_QUEUE_SIZE(version);
- LASSERT(conn->ibc_credits + conn->ibc_reserved_credits + IBLND_OOB_MSGS(version)
- <= IBLND_RX_MSGS(version));
-
- ackmsg = &conn->ibc_connvars->cv_msg;
- memset(ackmsg, 0, sizeof(*ackmsg));
-
- kiblnd_init_msg(ackmsg, IBLND_MSG_CONNACK,
- sizeof(ackmsg->ibm_u.connparams));
- ackmsg->ibm_u.connparams.ibcp_queue_depth = IBLND_MSG_QUEUE_SIZE(version);
- ackmsg->ibm_u.connparams.ibcp_max_msg_size = IBLND_MSG_SIZE;
- ackmsg->ibm_u.connparams.ibcp_max_frags = IBLND_RDMA_FRAGS(version);
-
- kiblnd_pack_msg(ni, ackmsg, version, 0, nid, reqmsg->ibm_srcstamp);
-
- memset(&cp, 0, sizeof(cp));
- cp.private_data = ackmsg;
- cp.private_data_len = ackmsg->ibm_nob;
- cp.responder_resources = 0; /* No atomic ops or RDMA reads */
- cp.initiator_depth = 0;
- cp.flow_control = 1;
- cp.retry_count = *kiblnd_tunables.kib_retry_count;
- cp.rnr_retry_count = *kiblnd_tunables.kib_rnr_retry_count;
-
- CDEBUG(D_NET, "Accept %s\n", libcfs_nid2str(nid));
-
- rc = rdma_accept(cmid, &cp);
- if (rc != 0) {
- CERROR("Can't accept %s: %d\n", libcfs_nid2str(nid), rc);
- rej.ibr_version = version;
- rej.ibr_why = IBLND_REJECT_FATAL;
-
- kiblnd_reject(cmid, &rej);
- kiblnd_connreq_done(conn, rc);
- kiblnd_conn_decref(conn);
- }
-
- lnet_ni_decref(ni);
- return 0;
-
- failed:
- if (ni != NULL)
- lnet_ni_decref(ni);
-
- rej.ibr_version = version;
- rej.ibr_cp.ibcp_queue_depth = IBLND_MSG_QUEUE_SIZE(version);
- rej.ibr_cp.ibcp_max_frags = IBLND_RDMA_FRAGS(version);
- kiblnd_reject(cmid, &rej);
-
- return -ECONNREFUSED;
-}
-
-static void
-kiblnd_reconnect(kib_conn_t *conn, int version,
- __u64 incarnation, int why, kib_connparams_t *cp)
-{
- kib_peer_t *peer = conn->ibc_peer;
- char *reason;
- int retry = 0;
- unsigned long flags;
-
- LASSERT(conn->ibc_state == IBLND_CONN_ACTIVE_CONNECT);
- LASSERT(peer->ibp_connecting > 0); /* 'conn' at least */
-
- write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
-
- /* retry connection if it's still needed and no other connection
- * attempts (active or passive) are in progress
- * NB: reconnect is still needed even when ibp_tx_queue is
- * empty if ibp_version != version because reconnect may be
- * initiated by kiblnd_query() */
- if ((!list_empty(&peer->ibp_tx_queue) ||
- peer->ibp_version != version) &&
- peer->ibp_connecting == 1 &&
- peer->ibp_accepting == 0) {
- retry = 1;
- peer->ibp_connecting++;
-
- peer->ibp_version = version;
- peer->ibp_incarnation = incarnation;
- }
-
- write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
-
- if (!retry)
- return;
-
- switch (why) {
- default:
- reason = "Unknown";
- break;
-
- case IBLND_REJECT_CONN_STALE:
- reason = "stale";
- break;
-
- case IBLND_REJECT_CONN_RACE:
- reason = "conn race";
- break;
-
- case IBLND_REJECT_CONN_UNCOMPAT:
- reason = "version negotiation";
- break;
- }
-
- CNETERR("%s: retrying (%s), %x, %x, queue_dep: %d, max_frag: %d, msg_size: %d\n",
- libcfs_nid2str(peer->ibp_nid),
- reason, IBLND_MSG_VERSION, version,
- cp != NULL ? cp->ibcp_queue_depth : IBLND_MSG_QUEUE_SIZE(version),
- cp != NULL ? cp->ibcp_max_frags : IBLND_RDMA_FRAGS(version),
- cp != NULL ? cp->ibcp_max_msg_size : IBLND_MSG_SIZE);
-
- kiblnd_connect_peer(peer);
-}
-
-static void
-kiblnd_rejected(kib_conn_t *conn, int reason, void *priv, int priv_nob)
-{
- kib_peer_t *peer = conn->ibc_peer;
-
- LASSERT(!in_interrupt());
- LASSERT(conn->ibc_state == IBLND_CONN_ACTIVE_CONNECT);
-
- switch (reason) {
- case IB_CM_REJ_STALE_CONN:
- kiblnd_reconnect(conn, IBLND_MSG_VERSION, 0,
- IBLND_REJECT_CONN_STALE, NULL);
- break;
-
- case IB_CM_REJ_INVALID_SERVICE_ID:
- CNETERR("%s rejected: no listener at %d\n",
- libcfs_nid2str(peer->ibp_nid),
- *kiblnd_tunables.kib_service);
- break;
-
- case IB_CM_REJ_CONSUMER_DEFINED:
- if (priv_nob >= offsetof(kib_rej_t, ibr_padding)) {
- kib_rej_t *rej = priv;
- kib_connparams_t *cp = NULL;
- int flip = 0;
- __u64 incarnation = -1;
-
- /* NB. default incarnation is -1 because:
- * a) V1 will ignore dst incarnation in connreq.
- * b) V2 will provide incarnation while rejecting me,
- * -1 will be overwrote.
- *
- * if I try to connect to a V1 peer with V2 protocol,
- * it rejected me then upgrade to V2, I have no idea
- * about the upgrading and try to reconnect with V1,
- * in this case upgraded V2 can find out I'm trying to
- * talk to the old guy and reject me(incarnation is -1).
- */
-
- if (rej->ibr_magic == __swab32(IBLND_MSG_MAGIC) ||
- rej->ibr_magic == __swab32(LNET_PROTO_MAGIC)) {
- __swab32s(&rej->ibr_magic);
- __swab16s(&rej->ibr_version);
- flip = 1;
- }
-
- if (priv_nob >= sizeof(kib_rej_t) &&
- rej->ibr_version > IBLND_MSG_VERSION_1) {
- /* priv_nob is always 148 in current version
- * of OFED, so we still need to check version.
- * (define of IB_CM_REJ_PRIVATE_DATA_SIZE) */
- cp = &rej->ibr_cp;
-
- if (flip) {
- __swab64s(&rej->ibr_incarnation);
- __swab16s(&cp->ibcp_queue_depth);
- __swab16s(&cp->ibcp_max_frags);
- __swab32s(&cp->ibcp_max_msg_size);
- }
-
- incarnation = rej->ibr_incarnation;
- }
-
- if (rej->ibr_magic != IBLND_MSG_MAGIC &&
- rej->ibr_magic != LNET_PROTO_MAGIC) {
- CERROR("%s rejected: consumer defined fatal error\n",
- libcfs_nid2str(peer->ibp_nid));
- break;
- }
-
- if (rej->ibr_version != IBLND_MSG_VERSION &&
- rej->ibr_version != IBLND_MSG_VERSION_1) {
- CERROR("%s rejected: o2iblnd version %x error\n",
- libcfs_nid2str(peer->ibp_nid),
- rej->ibr_version);
- break;
- }
-
- if (rej->ibr_why == IBLND_REJECT_FATAL &&
- rej->ibr_version == IBLND_MSG_VERSION_1) {
- CDEBUG(D_NET, "rejected by old version peer %s: %x\n",
- libcfs_nid2str(peer->ibp_nid), rej->ibr_version);
-
- if (conn->ibc_version != IBLND_MSG_VERSION_1)
- rej->ibr_why = IBLND_REJECT_CONN_UNCOMPAT;
- }
-
- switch (rej->ibr_why) {
- case IBLND_REJECT_CONN_RACE:
- case IBLND_REJECT_CONN_STALE:
- case IBLND_REJECT_CONN_UNCOMPAT:
- kiblnd_reconnect(conn, rej->ibr_version,
- incarnation, rej->ibr_why, cp);
- break;
-
- case IBLND_REJECT_MSG_QUEUE_SIZE:
- CERROR("%s rejected: incompatible message queue depth %d, %d\n",
- libcfs_nid2str(peer->ibp_nid),
- cp != NULL ? cp->ibcp_queue_depth :
- IBLND_MSG_QUEUE_SIZE(rej->ibr_version),
- IBLND_MSG_QUEUE_SIZE(conn->ibc_version));
- break;
-
- case IBLND_REJECT_RDMA_FRAGS:
- CERROR("%s rejected: incompatible # of RDMA fragments %d, %d\n",
- libcfs_nid2str(peer->ibp_nid),
- cp != NULL ? cp->ibcp_max_frags :
- IBLND_RDMA_FRAGS(rej->ibr_version),
- IBLND_RDMA_FRAGS(conn->ibc_version));
- break;
-
- case IBLND_REJECT_NO_RESOURCES:
- CERROR("%s rejected: o2iblnd no resources\n",
- libcfs_nid2str(peer->ibp_nid));
- break;
-
- case IBLND_REJECT_FATAL:
- CERROR("%s rejected: o2iblnd fatal error\n",
- libcfs_nid2str(peer->ibp_nid));
- break;
-
- default:
- CERROR("%s rejected: o2iblnd reason %d\n",
- libcfs_nid2str(peer->ibp_nid),
- rej->ibr_why);
- break;
- }
- break;
- }
- /* fall through */
- default:
- CNETERR("%s rejected: reason %d, size %d\n",
- libcfs_nid2str(peer->ibp_nid), reason, priv_nob);
- break;
- }
-
- kiblnd_connreq_done(conn, -ECONNREFUSED);
-}
-
-static void
-kiblnd_check_connreply(kib_conn_t *conn, void *priv, int priv_nob)
-{
- kib_peer_t *peer = conn->ibc_peer;
- lnet_ni_t *ni = peer->ibp_ni;
- kib_net_t *net = ni->ni_data;
- kib_msg_t *msg = priv;
- int ver = conn->ibc_version;
- int rc = kiblnd_unpack_msg(msg, priv_nob);
- unsigned long flags;
-
- LASSERT(net != NULL);
-
- if (rc != 0) {
- CERROR("Can't unpack connack from %s: %d\n",
- libcfs_nid2str(peer->ibp_nid), rc);
- goto failed;
- }
-
- if (msg->ibm_type != IBLND_MSG_CONNACK) {
- CERROR("Unexpected message %d from %s\n",
- msg->ibm_type, libcfs_nid2str(peer->ibp_nid));
- rc = -EPROTO;
- goto failed;
- }
-
- if (ver != msg->ibm_version) {
- CERROR("%s replied version %x is different with requested version %x\n",
- libcfs_nid2str(peer->ibp_nid), msg->ibm_version, ver);
- rc = -EPROTO;
- goto failed;
- }
-
- if (msg->ibm_u.connparams.ibcp_queue_depth !=
- IBLND_MSG_QUEUE_SIZE(ver)) {
- CERROR("%s has incompatible queue depth %d(%d wanted)\n",
- libcfs_nid2str(peer->ibp_nid),
- msg->ibm_u.connparams.ibcp_queue_depth,
- IBLND_MSG_QUEUE_SIZE(ver));
- rc = -EPROTO;
- goto failed;
- }
-
- if (msg->ibm_u.connparams.ibcp_max_frags !=
- IBLND_RDMA_FRAGS(ver)) {
- CERROR("%s has incompatible max_frags %d (%d wanted)\n",
- libcfs_nid2str(peer->ibp_nid),
- msg->ibm_u.connparams.ibcp_max_frags,
- IBLND_RDMA_FRAGS(ver));
- rc = -EPROTO;
- goto failed;
- }
-
- if (msg->ibm_u.connparams.ibcp_max_msg_size > IBLND_MSG_SIZE) {
- CERROR("%s max message size %d too big (%d max)\n",
- libcfs_nid2str(peer->ibp_nid),
- msg->ibm_u.connparams.ibcp_max_msg_size,
- IBLND_MSG_SIZE);
- rc = -EPROTO;
- goto failed;
- }
-
- read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
- if (msg->ibm_dstnid == ni->ni_nid &&
- msg->ibm_dststamp == net->ibn_incarnation)
- rc = 0;
- else
- rc = -ESTALE;
- read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
-
- if (rc != 0) {
- CERROR("Bad connection reply from %s, rc = %d, version: %x max_frags: %d\n",
- libcfs_nid2str(peer->ibp_nid), rc,
- msg->ibm_version, msg->ibm_u.connparams.ibcp_max_frags);
- goto failed;
- }
-
- conn->ibc_incarnation = msg->ibm_srcstamp;
- conn->ibc_credits =
- conn->ibc_reserved_credits = IBLND_MSG_QUEUE_SIZE(ver);
- LASSERT(conn->ibc_credits + conn->ibc_reserved_credits + IBLND_OOB_MSGS(ver)
- <= IBLND_RX_MSGS(ver));
-
- kiblnd_connreq_done(conn, 0);
- return;
-
- failed:
- /* NB My QP has already established itself, so I handle anything going
- * wrong here by setting ibc_comms_error.
- * kiblnd_connreq_done(0) moves the conn state to ESTABLISHED, but then
- * immediately tears it down. */
-
- LASSERT(rc != 0);
- conn->ibc_comms_error = rc;
- kiblnd_connreq_done(conn, 0);
-}
-
-static int
-kiblnd_active_connect(struct rdma_cm_id *cmid)
-{
- kib_peer_t *peer = (kib_peer_t *)cmid->context;
- kib_conn_t *conn;
- kib_msg_t *msg;
- struct rdma_conn_param cp;
- int version;
- __u64 incarnation;
- unsigned long flags;
- int rc;
-
- read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
-
- incarnation = peer->ibp_incarnation;
- version = (peer->ibp_version == 0) ? IBLND_MSG_VERSION :
- peer->ibp_version;
-
- read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
-
- conn = kiblnd_create_conn(peer, cmid, IBLND_CONN_ACTIVE_CONNECT, version);
- if (conn == NULL) {
- kiblnd_peer_connect_failed(peer, 1, -ENOMEM);
- kiblnd_peer_decref(peer); /* lose cmid's ref */
- return -ENOMEM;
- }
-
- /* conn "owns" cmid now, so I return success from here on to ensure the
- * CM callback doesn't destroy cmid. conn also takes over cmid's ref
- * on peer */
-
- msg = &conn->ibc_connvars->cv_msg;
-
- memset(msg, 0, sizeof(*msg));
- kiblnd_init_msg(msg, IBLND_MSG_CONNREQ, sizeof(msg->ibm_u.connparams));
- msg->ibm_u.connparams.ibcp_queue_depth = IBLND_MSG_QUEUE_SIZE(version);
- msg->ibm_u.connparams.ibcp_max_frags = IBLND_RDMA_FRAGS(version);
- msg->ibm_u.connparams.ibcp_max_msg_size = IBLND_MSG_SIZE;
-
- kiblnd_pack_msg(peer->ibp_ni, msg, version,
- 0, peer->ibp_nid, incarnation);
-
- memset(&cp, 0, sizeof(cp));
- cp.private_data = msg;
- cp.private_data_len = msg->ibm_nob;
- cp.responder_resources = 0; /* No atomic ops or RDMA reads */
- cp.initiator_depth = 0;
- cp.flow_control = 1;
- cp.retry_count = *kiblnd_tunables.kib_retry_count;
- cp.rnr_retry_count = *kiblnd_tunables.kib_rnr_retry_count;
-
- LASSERT(cmid->context == (void *)conn);
- LASSERT(conn->ibc_cmid == cmid);
-
- rc = rdma_connect(cmid, &cp);
- if (rc != 0) {
- CERROR("Can't connect to %s: %d\n",
- libcfs_nid2str(peer->ibp_nid), rc);
- kiblnd_connreq_done(conn, rc);
- kiblnd_conn_decref(conn);
- }
-
- return 0;
-}
-
-int
-kiblnd_cm_callback(struct rdma_cm_id *cmid, struct rdma_cm_event *event)
-{
- kib_peer_t *peer;
- kib_conn_t *conn;
- int rc;
-
- switch (event->event) {
- default:
- CERROR("Unexpected event: %d, status: %d\n",
- event->event, event->status);
- LBUG();
-
- case RDMA_CM_EVENT_CONNECT_REQUEST:
- /* destroy cmid on failure */
- rc = kiblnd_passive_connect(cmid,
- (void *)KIBLND_CONN_PARAM(event),
- KIBLND_CONN_PARAM_LEN(event));
- CDEBUG(D_NET, "connreq: %d\n", rc);
- return rc;
-
- case RDMA_CM_EVENT_ADDR_ERROR:
- peer = (kib_peer_t *)cmid->context;
- CNETERR("%s: ADDR ERROR %d\n",
- libcfs_nid2str(peer->ibp_nid), event->status);
- kiblnd_peer_connect_failed(peer, 1, -EHOSTUNREACH);
- kiblnd_peer_decref(peer);
- return -EHOSTUNREACH; /* rc != 0 destroys cmid */
-
- case RDMA_CM_EVENT_ADDR_RESOLVED:
- peer = (kib_peer_t *)cmid->context;
-
- CDEBUG(D_NET, "%s Addr resolved: %d\n",
- libcfs_nid2str(peer->ibp_nid), event->status);
-
- if (event->status != 0) {
- CNETERR("Can't resolve address for %s: %d\n",
- libcfs_nid2str(peer->ibp_nid), event->status);
- rc = event->status;
- } else {
- rc = rdma_resolve_route(
- cmid, *kiblnd_tunables.kib_timeout * 1000);
- if (rc == 0)
- return 0;
- /* Can't initiate route resolution */
- CERROR("Can't resolve route for %s: %d\n",
- libcfs_nid2str(peer->ibp_nid), rc);
- }
- kiblnd_peer_connect_failed(peer, 1, rc);
- kiblnd_peer_decref(peer);
- return rc; /* rc != 0 destroys cmid */
-
- case RDMA_CM_EVENT_ROUTE_ERROR:
- peer = (kib_peer_t *)cmid->context;
- CNETERR("%s: ROUTE ERROR %d\n",
- libcfs_nid2str(peer->ibp_nid), event->status);
- kiblnd_peer_connect_failed(peer, 1, -EHOSTUNREACH);
- kiblnd_peer_decref(peer);
- return -EHOSTUNREACH; /* rc != 0 destroys cmid */
-
- case RDMA_CM_EVENT_ROUTE_RESOLVED:
- peer = (kib_peer_t *)cmid->context;
- CDEBUG(D_NET, "%s Route resolved: %d\n",
- libcfs_nid2str(peer->ibp_nid), event->status);
-
- if (event->status == 0)
- return kiblnd_active_connect(cmid);
-
- CNETERR("Can't resolve route for %s: %d\n",
- libcfs_nid2str(peer->ibp_nid), event->status);
- kiblnd_peer_connect_failed(peer, 1, event->status);
- kiblnd_peer_decref(peer);
- return event->status; /* rc != 0 destroys cmid */
-
- case RDMA_CM_EVENT_UNREACHABLE:
- conn = (kib_conn_t *)cmid->context;
- LASSERT(conn->ibc_state == IBLND_CONN_ACTIVE_CONNECT ||
- conn->ibc_state == IBLND_CONN_PASSIVE_WAIT);
- CNETERR("%s: UNREACHABLE %d\n",
- libcfs_nid2str(conn->ibc_peer->ibp_nid), event->status);
- kiblnd_connreq_done(conn, -ENETDOWN);
- kiblnd_conn_decref(conn);
- return 0;
-
- case RDMA_CM_EVENT_CONNECT_ERROR:
- conn = (kib_conn_t *)cmid->context;
- LASSERT(conn->ibc_state == IBLND_CONN_ACTIVE_CONNECT ||
- conn->ibc_state == IBLND_CONN_PASSIVE_WAIT);
- CNETERR("%s: CONNECT ERROR %d\n",
- libcfs_nid2str(conn->ibc_peer->ibp_nid), event->status);
- kiblnd_connreq_done(conn, -ENOTCONN);
- kiblnd_conn_decref(conn);
- return 0;
-
- case RDMA_CM_EVENT_REJECTED:
- conn = (kib_conn_t *)cmid->context;
- switch (conn->ibc_state) {
- default:
- LBUG();
-
- case IBLND_CONN_PASSIVE_WAIT:
- CERROR("%s: REJECTED %d\n",
- libcfs_nid2str(conn->ibc_peer->ibp_nid),
- event->status);
- kiblnd_connreq_done(conn, -ECONNRESET);
- break;
-
- case IBLND_CONN_ACTIVE_CONNECT:
- kiblnd_rejected(conn, event->status,
- (void *)KIBLND_CONN_PARAM(event),
- KIBLND_CONN_PARAM_LEN(event));
- break;
- }
- kiblnd_conn_decref(conn);
- return 0;
-
- case RDMA_CM_EVENT_ESTABLISHED:
- conn = (kib_conn_t *)cmid->context;
- switch (conn->ibc_state) {
- default:
- LBUG();
-
- case IBLND_CONN_PASSIVE_WAIT:
- CDEBUG(D_NET, "ESTABLISHED (passive): %s\n",
- libcfs_nid2str(conn->ibc_peer->ibp_nid));
- kiblnd_connreq_done(conn, 0);
- break;
-
- case IBLND_CONN_ACTIVE_CONNECT:
- CDEBUG(D_NET, "ESTABLISHED(active): %s\n",
- libcfs_nid2str(conn->ibc_peer->ibp_nid));
- kiblnd_check_connreply(conn,
- (void *)KIBLND_CONN_PARAM(event),
- KIBLND_CONN_PARAM_LEN(event));
- break;
- }
- /* net keeps its ref on conn! */
- return 0;
-
- case RDMA_CM_EVENT_TIMEWAIT_EXIT:
- CDEBUG(D_NET, "Ignore TIMEWAIT_EXIT event\n");
- return 0;
- case RDMA_CM_EVENT_DISCONNECTED:
- conn = (kib_conn_t *)cmid->context;
- if (conn->ibc_state < IBLND_CONN_ESTABLISHED) {
- CERROR("%s DISCONNECTED\n",
- libcfs_nid2str(conn->ibc_peer->ibp_nid));
- kiblnd_connreq_done(conn, -ECONNRESET);
- } else {
- kiblnd_close_conn(conn, 0);
- }
- kiblnd_conn_decref(conn);
- cmid->context = NULL;
- return 0;
-
- case RDMA_CM_EVENT_DEVICE_REMOVAL:
- LCONSOLE_ERROR_MSG(0x131,
- "Received notification of device removal\n"
- "Please shutdown LNET to allow this to proceed\n");
- /* Can't remove network from underneath LNET for now, so I have
- * to ignore this */
- return 0;
-
- case RDMA_CM_EVENT_ADDR_CHANGE:
- LCONSOLE_INFO("Physical link changed (eg hca/port)\n");
- return 0;
- }
-}
-
-static int
-kiblnd_check_txs_locked(kib_conn_t *conn, struct list_head *txs)
-{
- kib_tx_t *tx;
- struct list_head *ttmp;
-
- list_for_each(ttmp, txs) {
- tx = list_entry(ttmp, kib_tx_t, tx_list);
-
- if (txs != &conn->ibc_active_txs) {
- LASSERT(tx->tx_queued);
- } else {
- LASSERT(!tx->tx_queued);
- LASSERT(tx->tx_waiting || tx->tx_sending != 0);
- }
-
- if (cfs_time_aftereq(jiffies, tx->tx_deadline)) {
- CERROR("Timed out tx: %s, %lu seconds\n",
- kiblnd_queue2str(conn, txs),
- cfs_duration_sec(jiffies - tx->tx_deadline));
- return 1;
- }
- }
-
- return 0;
-}
-
-static int
-kiblnd_conn_timed_out_locked(kib_conn_t *conn)
-{
- return kiblnd_check_txs_locked(conn, &conn->ibc_tx_queue) ||
- kiblnd_check_txs_locked(conn, &conn->ibc_tx_noops) ||
- kiblnd_check_txs_locked(conn, &conn->ibc_tx_queue_rsrvd) ||
- kiblnd_check_txs_locked(conn, &conn->ibc_tx_queue_nocred) ||
- kiblnd_check_txs_locked(conn, &conn->ibc_active_txs);
-}
-
-static void
-kiblnd_check_conns(int idx)
-{
- LIST_HEAD(closes);
- LIST_HEAD(checksends);
- struct list_head *peers = &kiblnd_data.kib_peers[idx];
- struct list_head *ptmp;
- kib_peer_t *peer;
- kib_conn_t *conn;
- kib_conn_t *tmp;
- struct list_head *ctmp;
- unsigned long flags;
-
- /* NB. We expect to have a look at all the peers and not find any
- * RDMAs to time out, so we just use a shared lock while we
- * take a look... */
- read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
-
- list_for_each(ptmp, peers) {
- peer = list_entry(ptmp, kib_peer_t, ibp_list);
-
- list_for_each(ctmp, &peer->ibp_conns) {
- int timedout;
- int sendnoop;
-
- conn = list_entry(ctmp, kib_conn_t, ibc_list);
-
- LASSERT(conn->ibc_state == IBLND_CONN_ESTABLISHED);
-
- spin_lock(&conn->ibc_lock);
-
- sendnoop = kiblnd_need_noop(conn);
- timedout = kiblnd_conn_timed_out_locked(conn);
- if (!sendnoop && !timedout) {
- spin_unlock(&conn->ibc_lock);
- continue;
- }
-
- if (timedout) {
- CERROR("Timed out RDMA with %s (%lu): c: %u, oc: %u, rc: %u\n",
- libcfs_nid2str(peer->ibp_nid),
- cfs_duration_sec(cfs_time_current() -
- peer->ibp_last_alive),
- conn->ibc_credits,
- conn->ibc_outstanding_credits,
- conn->ibc_reserved_credits);
- list_add(&conn->ibc_connd_list, &closes);
- } else {
- list_add(&conn->ibc_connd_list,
- &checksends);
- }
- /* +ref for 'closes' or 'checksends' */
- kiblnd_conn_addref(conn);
-
- spin_unlock(&conn->ibc_lock);
- }
- }
-
- read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
-
- /* Handle timeout by closing the whole
- * connection. We can only be sure RDMA activity
- * has ceased once the QP has been modified. */
- list_for_each_entry_safe(conn, tmp, &closes, ibc_connd_list) {
- list_del(&conn->ibc_connd_list);
- kiblnd_close_conn(conn, -ETIMEDOUT);
- kiblnd_conn_decref(conn);
- }
-
- /* In case we have enough credits to return via a
- * NOOP, but there were no non-blocking tx descs
- * free to do it last time... */
- while (!list_empty(&checksends)) {
- conn = list_entry(checksends.next,
- kib_conn_t, ibc_connd_list);
- list_del(&conn->ibc_connd_list);
- kiblnd_check_sends(conn);
- kiblnd_conn_decref(conn);
- }
-}
-
-static void
-kiblnd_disconnect_conn(kib_conn_t *conn)
-{
- LASSERT(!in_interrupt());
- LASSERT(current == kiblnd_data.kib_connd);
- LASSERT(conn->ibc_state == IBLND_CONN_CLOSING);
-
- rdma_disconnect(conn->ibc_cmid);
- kiblnd_finalise_conn(conn);
-
- kiblnd_peer_notify(conn->ibc_peer);
-}
-
-int
-kiblnd_connd(void *arg)
-{
- wait_queue_t wait;
- unsigned long flags;
- kib_conn_t *conn;
- int timeout;
- int i;
- int dropped_lock;
- int peer_index = 0;
- unsigned long deadline = jiffies;
-
- cfs_block_allsigs();
-
- init_waitqueue_entry(&wait, current);
- kiblnd_data.kib_connd = current;
-
- spin_lock_irqsave(&kiblnd_data.kib_connd_lock, flags);
-
- while (!kiblnd_data.kib_shutdown) {
-
- dropped_lock = 0;
-
- if (!list_empty(&kiblnd_data.kib_connd_zombies)) {
- conn = list_entry(kiblnd_data.kib_connd_zombies.next,
- kib_conn_t, ibc_list);
- list_del(&conn->ibc_list);
-
- spin_unlock_irqrestore(&kiblnd_data.kib_connd_lock,
- flags);
- dropped_lock = 1;
-
- kiblnd_destroy_conn(conn);
-
- spin_lock_irqsave(&kiblnd_data.kib_connd_lock, flags);
- }
-
- if (!list_empty(&kiblnd_data.kib_connd_conns)) {
- conn = list_entry(kiblnd_data.kib_connd_conns.next,
- kib_conn_t, ibc_list);
- list_del(&conn->ibc_list);
-
- spin_unlock_irqrestore(&kiblnd_data.kib_connd_lock,
- flags);
- dropped_lock = 1;
-
- kiblnd_disconnect_conn(conn);
- kiblnd_conn_decref(conn);
-
- spin_lock_irqsave(&kiblnd_data.kib_connd_lock, flags);
- }
-
- /* careful with the jiffy wrap... */
- timeout = (int)(deadline - jiffies);
- if (timeout <= 0) {
- const int n = 4;
- const int p = 1;
- int chunk = kiblnd_data.kib_peer_hash_size;
-
- spin_unlock_irqrestore(&kiblnd_data.kib_connd_lock, flags);
- dropped_lock = 1;
-
- /* Time to check for RDMA timeouts on a few more
- * peers: I do checks every 'p' seconds on a
- * proportion of the peer table and I need to check
- * every connection 'n' times within a timeout
- * interval, to ensure I detect a timeout on any
- * connection within (n+1)/n times the timeout
- * interval. */
-
- if (*kiblnd_tunables.kib_timeout > n * p)
- chunk = (chunk * n * p) /
- *kiblnd_tunables.kib_timeout;
- if (chunk == 0)
- chunk = 1;
-
- for (i = 0; i < chunk; i++) {
- kiblnd_check_conns(peer_index);
- peer_index = (peer_index + 1) %
- kiblnd_data.kib_peer_hash_size;
- }
-
- deadline += p * HZ;
- spin_lock_irqsave(&kiblnd_data.kib_connd_lock, flags);
- }
-
- if (dropped_lock)
- continue;
-
- /* Nothing to do for 'timeout' */
- set_current_state(TASK_INTERRUPTIBLE);
- add_wait_queue(&kiblnd_data.kib_connd_waitq, &wait);
- spin_unlock_irqrestore(&kiblnd_data.kib_connd_lock, flags);
-
- schedule_timeout(timeout);
-
- remove_wait_queue(&kiblnd_data.kib_connd_waitq, &wait);
- spin_lock_irqsave(&kiblnd_data.kib_connd_lock, flags);
- }
-
- spin_unlock_irqrestore(&kiblnd_data.kib_connd_lock, flags);
-
- kiblnd_thread_fini();
- return 0;
-}
-
-void
-kiblnd_qp_event(struct ib_event *event, void *arg)
-{
- kib_conn_t *conn = arg;
-
- switch (event->event) {
- case IB_EVENT_COMM_EST:
- CDEBUG(D_NET, "%s established\n",
- libcfs_nid2str(conn->ibc_peer->ibp_nid));
- return;
-
- default:
- CERROR("%s: Async QP event type %d\n",
- libcfs_nid2str(conn->ibc_peer->ibp_nid), event->event);
- return;
- }
-}
-
-static void
-kiblnd_complete(struct ib_wc *wc)
-{
- switch (kiblnd_wreqid2type(wc->wr_id)) {
- default:
- LBUG();
-
- case IBLND_WID_RDMA:
- /* We only get RDMA completion notification if it fails. All
- * subsequent work items, including the final SEND will fail
- * too. However we can't print out any more info about the
- * failing RDMA because 'tx' might be back on the idle list or
- * even reused already if we didn't manage to post all our work
- * items */
- CNETERR("RDMA (tx: %p) failed: %d\n",
- kiblnd_wreqid2ptr(wc->wr_id), wc->status);
- return;
-
- case IBLND_WID_TX:
- kiblnd_tx_complete(kiblnd_wreqid2ptr(wc->wr_id), wc->status);
- return;
-
- case IBLND_WID_RX:
- kiblnd_rx_complete(kiblnd_wreqid2ptr(wc->wr_id), wc->status,
- wc->byte_len);
- return;
- }
-}
-
-void
-kiblnd_cq_completion(struct ib_cq *cq, void *arg)
-{
- /* NB I'm not allowed to schedule this conn once its refcount has
- * reached 0. Since fundamentally I'm racing with scheduler threads
- * consuming my CQ I could be called after all completions have
- * occurred. But in this case, ibc_nrx == 0 && ibc_nsends_posted == 0
- * and this CQ is about to be destroyed so I NOOP. */
- kib_conn_t *conn = (kib_conn_t *)arg;
- struct kib_sched_info *sched = conn->ibc_sched;
- unsigned long flags;
-
- LASSERT(cq == conn->ibc_cq);
-
- spin_lock_irqsave(&sched->ibs_lock, flags);
-
- conn->ibc_ready = 1;
-
- if (!conn->ibc_scheduled &&
- (conn->ibc_nrx > 0 ||
- conn->ibc_nsends_posted > 0)) {
- kiblnd_conn_addref(conn); /* +1 ref for sched_conns */
- conn->ibc_scheduled = 1;
- list_add_tail(&conn->ibc_sched_list, &sched->ibs_conns);
-
- if (waitqueue_active(&sched->ibs_waitq))
- wake_up(&sched->ibs_waitq);
- }
-
- spin_unlock_irqrestore(&sched->ibs_lock, flags);
-}
-
-void
-kiblnd_cq_event(struct ib_event *event, void *arg)
-{
- kib_conn_t *conn = arg;
-
- CERROR("%s: async CQ event type %d\n",
- libcfs_nid2str(conn->ibc_peer->ibp_nid), event->event);
-}
-
-int
-kiblnd_scheduler(void *arg)
-{
- long id = (long)arg;
- struct kib_sched_info *sched;
- kib_conn_t *conn;
- wait_queue_t wait;
- unsigned long flags;
- struct ib_wc wc;
- int did_something;
- int busy_loops = 0;
- int rc;
-
- cfs_block_allsigs();
-
- init_waitqueue_entry(&wait, current);
-
- sched = kiblnd_data.kib_scheds[KIB_THREAD_CPT(id)];
-
- rc = cfs_cpt_bind(lnet_cpt_table(), sched->ibs_cpt);
- if (rc != 0) {
- CWARN("Failed to bind on CPT %d, please verify whether all CPUs are healthy and reload modules if necessary, otherwise your system might under risk of low performance\n",
- sched->ibs_cpt);
- }
-
- spin_lock_irqsave(&sched->ibs_lock, flags);
-
- while (!kiblnd_data.kib_shutdown) {
- if (busy_loops++ >= IBLND_RESCHED) {
- spin_unlock_irqrestore(&sched->ibs_lock, flags);
-
- cond_resched();
- busy_loops = 0;
-
- spin_lock_irqsave(&sched->ibs_lock, flags);
- }
-
- did_something = 0;
-
- if (!list_empty(&sched->ibs_conns)) {
- conn = list_entry(sched->ibs_conns.next,
- kib_conn_t, ibc_sched_list);
- /* take over kib_sched_conns' ref on conn... */
- LASSERT(conn->ibc_scheduled);
- list_del(&conn->ibc_sched_list);
- conn->ibc_ready = 0;
-
- spin_unlock_irqrestore(&sched->ibs_lock, flags);
-
- rc = ib_poll_cq(conn->ibc_cq, 1, &wc);
- if (rc == 0) {
- rc = ib_req_notify_cq(conn->ibc_cq,
- IB_CQ_NEXT_COMP);
- if (rc < 0) {
- CWARN("%s: ib_req_notify_cq failed: %d, closing connection\n",
- libcfs_nid2str(conn->ibc_peer->ibp_nid), rc);
- kiblnd_close_conn(conn, -EIO);
- kiblnd_conn_decref(conn);
- spin_lock_irqsave(&sched->ibs_lock,
- flags);
- continue;
- }
-
- rc = ib_poll_cq(conn->ibc_cq, 1, &wc);
- }
-
- if (rc < 0) {
- CWARN("%s: ib_poll_cq failed: %d, closing connection\n",
- libcfs_nid2str(conn->ibc_peer->ibp_nid),
- rc);
- kiblnd_close_conn(conn, -EIO);
- kiblnd_conn_decref(conn);
- spin_lock_irqsave(&sched->ibs_lock, flags);
- continue;
- }
-
- spin_lock_irqsave(&sched->ibs_lock, flags);
-
- if (rc != 0 || conn->ibc_ready) {
- /* There may be another completion waiting; get
- * another scheduler to check while I handle
- * this one... */
- /* +1 ref for sched_conns */
- kiblnd_conn_addref(conn);
- list_add_tail(&conn->ibc_sched_list,
- &sched->ibs_conns);
- if (waitqueue_active(&sched->ibs_waitq))
- wake_up(&sched->ibs_waitq);
- } else {
- conn->ibc_scheduled = 0;
- }
-
- if (rc != 0) {
- spin_unlock_irqrestore(&sched->ibs_lock, flags);
- kiblnd_complete(&wc);
-
- spin_lock_irqsave(&sched->ibs_lock, flags);
- }
-
- kiblnd_conn_decref(conn); /* ...drop my ref from above */
- did_something = 1;
- }
-
- if (did_something)
- continue;
-
- set_current_state(TASK_INTERRUPTIBLE);
- add_wait_queue_exclusive(&sched->ibs_waitq, &wait);
- spin_unlock_irqrestore(&sched->ibs_lock, flags);
-
- schedule();
- busy_loops = 0;
-
- remove_wait_queue(&sched->ibs_waitq, &wait);
- spin_lock_irqsave(&sched->ibs_lock, flags);
- }
-
- spin_unlock_irqrestore(&sched->ibs_lock, flags);
-
- kiblnd_thread_fini();
- return 0;
-}
-
-int
-kiblnd_failover_thread(void *arg)
-{
- rwlock_t *glock = &kiblnd_data.kib_global_lock;
- kib_dev_t *dev;
- wait_queue_t wait;
- unsigned long flags;
- int rc;
-
- LASSERT(*kiblnd_tunables.kib_dev_failover != 0);
-
- cfs_block_allsigs();
-
- init_waitqueue_entry(&wait, current);
- write_lock_irqsave(glock, flags);
-
- while (!kiblnd_data.kib_shutdown) {
- int do_failover = 0;
- int long_sleep;
-
- list_for_each_entry(dev, &kiblnd_data.kib_failed_devs,
- ibd_fail_list) {
- if (time_before(cfs_time_current(),
- dev->ibd_next_failover))
- continue;
- do_failover = 1;
- break;
- }
-
- if (do_failover) {
- list_del_init(&dev->ibd_fail_list);
- dev->ibd_failover = 1;
- write_unlock_irqrestore(glock, flags);
-
- rc = kiblnd_dev_failover(dev);
-
- write_lock_irqsave(glock, flags);
-
- LASSERT(dev->ibd_failover);
- dev->ibd_failover = 0;
- if (rc >= 0) { /* Device is OK or failover succeed */
- dev->ibd_next_failover = cfs_time_shift(3);
- continue;
- }
-
- /* failed to failover, retry later */
- dev->ibd_next_failover =
- cfs_time_shift(min(dev->ibd_failed_failover, 10));
- if (kiblnd_dev_can_failover(dev)) {
- list_add_tail(&dev->ibd_fail_list,
- &kiblnd_data.kib_failed_devs);
- }
-
- continue;
- }
-
- /* long sleep if no more pending failover */
- long_sleep = list_empty(&kiblnd_data.kib_failed_devs);
-
- set_current_state(TASK_INTERRUPTIBLE);
- add_wait_queue(&kiblnd_data.kib_failover_waitq, &wait);
- write_unlock_irqrestore(glock, flags);
-
- rc = schedule_timeout(long_sleep ? cfs_time_seconds(10) :
- cfs_time_seconds(1));
- remove_wait_queue(&kiblnd_data.kib_failover_waitq, &wait);
- write_lock_irqsave(glock, flags);
-
- if (!long_sleep || rc != 0)
- continue;
-
- /* have a long sleep, routine check all active devices,
- * we need checking like this because if there is not active
- * connection on the dev and no SEND from local, we may listen
- * on wrong HCA for ever while there is a bonding failover */
- list_for_each_entry(dev, &kiblnd_data.kib_devs, ibd_list) {
- if (kiblnd_dev_can_failover(dev)) {
- list_add_tail(&dev->ibd_fail_list,
- &kiblnd_data.kib_failed_devs);
- }
- }
- }
-
- write_unlock_irqrestore(glock, flags);
-
- kiblnd_thread_fini();
- return 0;
-}
diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_modparams.c b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_modparams.c
deleted file mode 100644
index b3d1b5d627cb..000000000000
--- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_modparams.c
+++ /dev/null
@@ -1,224 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * lnet/klnds/o2iblnd/o2iblnd_modparams.c
- *
- * Author: Eric Barton <eric@bartonsoftware.com>
- */
-
-#include "o2iblnd.h"
-
-static int service = 987;
-module_param(service, int, 0444);
-MODULE_PARM_DESC(service, "service number (within RDMA_PS_TCP)");
-
-static int cksum;
-module_param(cksum, int, 0644);
-MODULE_PARM_DESC(cksum, "set non-zero to enable message (not RDMA) checksums");
-
-static int timeout = 50;
-module_param(timeout, int, 0644);
-MODULE_PARM_DESC(timeout, "timeout (seconds)");
-
-/* Number of threads in each scheduler pool which is percpt,
- * we will estimate reasonable value based on CPUs if it's set to zero. */
-static int nscheds;
-module_param(nscheds, int, 0444);
-MODULE_PARM_DESC(nscheds, "number of threads in each scheduler pool");
-
-/* NB: this value is shared by all CPTs, it can grow at runtime */
-static int ntx = 512;
-module_param(ntx, int, 0444);
-MODULE_PARM_DESC(ntx, "# of message descriptors allocated for each pool");
-
-/* NB: this value is shared by all CPTs */
-static int credits = 256;
-module_param(credits, int, 0444);
-MODULE_PARM_DESC(credits, "# concurrent sends");
-
-static int peer_credits = 8;
-module_param(peer_credits, int, 0444);
-MODULE_PARM_DESC(peer_credits, "# concurrent sends to 1 peer");
-
-static int peer_credits_hiw;
-module_param(peer_credits_hiw, int, 0444);
-MODULE_PARM_DESC(peer_credits_hiw, "when eagerly to return credits");
-
-static int peer_buffer_credits;
-module_param(peer_buffer_credits, int, 0444);
-MODULE_PARM_DESC(peer_buffer_credits, "# per-peer router buffer credits");
-
-static int peer_timeout = 180;
-module_param(peer_timeout, int, 0444);
-MODULE_PARM_DESC(peer_timeout, "Seconds without aliveness news to declare peer dead (<=0 to disable)");
-
-static char *ipif_name = "ib0";
-module_param(ipif_name, charp, 0444);
-MODULE_PARM_DESC(ipif_name, "IPoIB interface name");
-
-static int retry_count = 5;
-module_param(retry_count, int, 0644);
-MODULE_PARM_DESC(retry_count, "Retransmissions when no ACK received");
-
-static int rnr_retry_count = 6;
-module_param(rnr_retry_count, int, 0644);
-MODULE_PARM_DESC(rnr_retry_count, "RNR retransmissions");
-
-static int keepalive = 100;
-module_param(keepalive, int, 0644);
-MODULE_PARM_DESC(keepalive, "Idle time in seconds before sending a keepalive");
-
-static int ib_mtu;
-module_param(ib_mtu, int, 0444);
-MODULE_PARM_DESC(ib_mtu, "IB MTU 256/512/1024/2048/4096");
-
-static int concurrent_sends;
-module_param(concurrent_sends, int, 0444);
-MODULE_PARM_DESC(concurrent_sends, "send work-queue sizing");
-
-static int map_on_demand;
-module_param(map_on_demand, int, 0444);
-MODULE_PARM_DESC(map_on_demand, "map on demand");
-
-/* NB: this value is shared by all CPTs, it can grow at runtime */
-static int fmr_pool_size = 512;
-module_param(fmr_pool_size, int, 0444);
-MODULE_PARM_DESC(fmr_pool_size, "size of fmr pool on each CPT (>= ntx / 4)");
-
-/* NB: this value is shared by all CPTs, it can grow at runtime */
-static int fmr_flush_trigger = 384;
-module_param(fmr_flush_trigger, int, 0444);
-MODULE_PARM_DESC(fmr_flush_trigger, "# dirty FMRs that triggers pool flush");
-
-static int fmr_cache = 1;
-module_param(fmr_cache, int, 0444);
-MODULE_PARM_DESC(fmr_cache, "non-zero to enable FMR caching");
-
-/*
- * 0: disable failover
- * 1: enable failover if necessary
- * 2: force to failover (for debug)
- */
-static int dev_failover;
-module_param(dev_failover, int, 0444);
-MODULE_PARM_DESC(dev_failover, "HCA failover for bonding (0 off, 1 on, other values reserved)");
-
-
-static int require_privileged_port;
-module_param(require_privileged_port, int, 0644);
-MODULE_PARM_DESC(require_privileged_port, "require privileged port when accepting connection");
-
-static int use_privileged_port = 1;
-module_param(use_privileged_port, int, 0644);
-MODULE_PARM_DESC(use_privileged_port, "use privileged port when initiating connection");
-
-kib_tunables_t kiblnd_tunables = {
- .kib_dev_failover = &dev_failover,
- .kib_service = &service,
- .kib_cksum = &cksum,
- .kib_timeout = &timeout,
- .kib_keepalive = &keepalive,
- .kib_ntx = &ntx,
- .kib_credits = &credits,
- .kib_peertxcredits = &peer_credits,
- .kib_peercredits_hiw = &peer_credits_hiw,
- .kib_peerrtrcredits = &peer_buffer_credits,
- .kib_peertimeout = &peer_timeout,
- .kib_default_ipif = &ipif_name,
- .kib_retry_count = &retry_count,
- .kib_rnr_retry_count = &rnr_retry_count,
- .kib_concurrent_sends = &concurrent_sends,
- .kib_ib_mtu = &ib_mtu,
- .kib_map_on_demand = &map_on_demand,
- .kib_fmr_pool_size = &fmr_pool_size,
- .kib_fmr_flush_trigger = &fmr_flush_trigger,
- .kib_fmr_cache = &fmr_cache,
- .kib_require_priv_port = &require_privileged_port,
- .kib_use_priv_port = &use_privileged_port,
- .kib_nscheds = &nscheds
-};
-
-int
-kiblnd_tunables_init(void)
-{
- if (kiblnd_translate_mtu(*kiblnd_tunables.kib_ib_mtu) < 0) {
- CERROR("Invalid ib_mtu %d, expected 256/512/1024/2048/4096\n",
- *kiblnd_tunables.kib_ib_mtu);
- return -EINVAL;
- }
-
- if (*kiblnd_tunables.kib_peertxcredits < IBLND_CREDITS_DEFAULT)
- *kiblnd_tunables.kib_peertxcredits = IBLND_CREDITS_DEFAULT;
-
- if (*kiblnd_tunables.kib_peertxcredits > IBLND_CREDITS_MAX)
- *kiblnd_tunables.kib_peertxcredits = IBLND_CREDITS_MAX;
-
- if (*kiblnd_tunables.kib_peertxcredits > *kiblnd_tunables.kib_credits)
- *kiblnd_tunables.kib_peertxcredits = *kiblnd_tunables.kib_credits;
-
- if (*kiblnd_tunables.kib_peercredits_hiw < *kiblnd_tunables.kib_peertxcredits / 2)
- *kiblnd_tunables.kib_peercredits_hiw = *kiblnd_tunables.kib_peertxcredits / 2;
-
- if (*kiblnd_tunables.kib_peercredits_hiw >= *kiblnd_tunables.kib_peertxcredits)
- *kiblnd_tunables.kib_peercredits_hiw = *kiblnd_tunables.kib_peertxcredits - 1;
-
- if (*kiblnd_tunables.kib_map_on_demand < 0 ||
- *kiblnd_tunables.kib_map_on_demand > IBLND_MAX_RDMA_FRAGS)
- *kiblnd_tunables.kib_map_on_demand = 0; /* disable map-on-demand */
-
- if (*kiblnd_tunables.kib_map_on_demand == 1)
- *kiblnd_tunables.kib_map_on_demand = 2; /* don't make sense to create map if only one fragment */
-
- if (*kiblnd_tunables.kib_concurrent_sends == 0) {
- if (*kiblnd_tunables.kib_map_on_demand > 0 &&
- *kiblnd_tunables.kib_map_on_demand <= IBLND_MAX_RDMA_FRAGS / 8)
- *kiblnd_tunables.kib_concurrent_sends = (*kiblnd_tunables.kib_peertxcredits) * 2;
- else
- *kiblnd_tunables.kib_concurrent_sends = (*kiblnd_tunables.kib_peertxcredits);
- }
-
- if (*kiblnd_tunables.kib_concurrent_sends > *kiblnd_tunables.kib_peertxcredits * 2)
- *kiblnd_tunables.kib_concurrent_sends = *kiblnd_tunables.kib_peertxcredits * 2;
-
- if (*kiblnd_tunables.kib_concurrent_sends < *kiblnd_tunables.kib_peertxcredits / 2)
- *kiblnd_tunables.kib_concurrent_sends = *kiblnd_tunables.kib_peertxcredits / 2;
-
- if (*kiblnd_tunables.kib_concurrent_sends < *kiblnd_tunables.kib_peertxcredits) {
- CWARN("Concurrent sends %d is lower than message queue size: %d, performance may drop slightly.\n",
- *kiblnd_tunables.kib_concurrent_sends, *kiblnd_tunables.kib_peertxcredits);
- }
-
- return 0;
-}
diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/Makefile b/drivers/staging/lustre/lnet/klnds/socklnd/Makefile
deleted file mode 100644
index c011581d3453..000000000000
--- a/drivers/staging/lustre/lnet/klnds/socklnd/Makefile
+++ /dev/null
@@ -1,3 +0,0 @@
-obj-$(CONFIG_LNET) += ksocklnd.o
-
-ksocklnd-y := socklnd.o socklnd_cb.o socklnd_proto.o socklnd_modparams.o socklnd_lib.o
diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c
deleted file mode 100644
index 904d15837cbb..000000000000
--- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c
+++ /dev/null
@@ -1,2879 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * lnet/klnds/socklnd/socklnd.c
- *
- * Author: Zach Brown <zab@zabbo.net>
- * Author: Peter J. Braam <braam@clusterfs.com>
- * Author: Phil Schwan <phil@clusterfs.com>
- * Author: Eric Barton <eric@bartonsoftware.com>
- */
-
-#include "socklnd.h"
-
-static lnd_t the_ksocklnd;
-ksock_nal_data_t ksocknal_data;
-
-static ksock_interface_t *
-ksocknal_ip2iface(lnet_ni_t *ni, __u32 ip)
-{
- ksock_net_t *net = ni->ni_data;
- int i;
- ksock_interface_t *iface;
-
- for (i = 0; i < net->ksnn_ninterfaces; i++) {
- LASSERT(i < LNET_MAX_INTERFACES);
- iface = &net->ksnn_interfaces[i];
-
- if (iface->ksni_ipaddr == ip)
- return iface;
- }
-
- return NULL;
-}
-
-static ksock_route_t *
-ksocknal_create_route(__u32 ipaddr, int port)
-{
- ksock_route_t *route;
-
- LIBCFS_ALLOC(route, sizeof(*route));
- if (route == NULL)
- return NULL;
-
- atomic_set(&route->ksnr_refcount, 1);
- route->ksnr_peer = NULL;
- route->ksnr_retry_interval = 0; /* OK to connect at any time */
- route->ksnr_ipaddr = ipaddr;
- route->ksnr_port = port;
- route->ksnr_scheduled = 0;
- route->ksnr_connecting = 0;
- route->ksnr_connected = 0;
- route->ksnr_deleted = 0;
- route->ksnr_conn_count = 0;
- route->ksnr_share_count = 0;
-
- return route;
-}
-
-void
-ksocknal_destroy_route(ksock_route_t *route)
-{
- LASSERT(atomic_read(&route->ksnr_refcount) == 0);
-
- if (route->ksnr_peer != NULL)
- ksocknal_peer_decref(route->ksnr_peer);
-
- LIBCFS_FREE(route, sizeof(*route));
-}
-
-static int
-ksocknal_create_peer(ksock_peer_t **peerp, lnet_ni_t *ni, lnet_process_id_t id)
-{
- ksock_net_t *net = ni->ni_data;
- ksock_peer_t *peer;
-
- LASSERT(id.nid != LNET_NID_ANY);
- LASSERT(id.pid != LNET_PID_ANY);
- LASSERT(!in_interrupt());
-
- LIBCFS_ALLOC(peer, sizeof(*peer));
- if (peer == NULL)
- return -ENOMEM;
-
- peer->ksnp_ni = ni;
- peer->ksnp_id = id;
- atomic_set(&peer->ksnp_refcount, 1); /* 1 ref for caller */
- peer->ksnp_closing = 0;
- peer->ksnp_accepting = 0;
- peer->ksnp_proto = NULL;
- peer->ksnp_last_alive = 0;
- peer->ksnp_zc_next_cookie = SOCKNAL_KEEPALIVE_PING + 1;
-
- INIT_LIST_HEAD(&peer->ksnp_conns);
- INIT_LIST_HEAD(&peer->ksnp_routes);
- INIT_LIST_HEAD(&peer->ksnp_tx_queue);
- INIT_LIST_HEAD(&peer->ksnp_zc_req_list);
- spin_lock_init(&peer->ksnp_lock);
-
- spin_lock_bh(&net->ksnn_lock);
-
- if (net->ksnn_shutdown) {
- spin_unlock_bh(&net->ksnn_lock);
-
- LIBCFS_FREE(peer, sizeof(*peer));
- CERROR("Can't create peer: network shutdown\n");
- return -ESHUTDOWN;
- }
-
- net->ksnn_npeers++;
-
- spin_unlock_bh(&net->ksnn_lock);
-
- *peerp = peer;
- return 0;
-}
-
-void
-ksocknal_destroy_peer(ksock_peer_t *peer)
-{
- ksock_net_t *net = peer->ksnp_ni->ni_data;
-
- CDEBUG(D_NET, "peer %s %p deleted\n",
- libcfs_id2str(peer->ksnp_id), peer);
-
- LASSERT(atomic_read(&peer->ksnp_refcount) == 0);
- LASSERT(peer->ksnp_accepting == 0);
- LASSERT(list_empty(&peer->ksnp_conns));
- LASSERT(list_empty(&peer->ksnp_routes));
- LASSERT(list_empty(&peer->ksnp_tx_queue));
- LASSERT(list_empty(&peer->ksnp_zc_req_list));
-
- LIBCFS_FREE(peer, sizeof(*peer));
-
- /* NB a peer's connections and routes keep a reference on their peer
- * until they are destroyed, so we can be assured that _all_ state to
- * do with this peer has been cleaned up when its refcount drops to
- * zero. */
- spin_lock_bh(&net->ksnn_lock);
- net->ksnn_npeers--;
- spin_unlock_bh(&net->ksnn_lock);
-}
-
-ksock_peer_t *
-ksocknal_find_peer_locked(lnet_ni_t *ni, lnet_process_id_t id)
-{
- struct list_head *peer_list = ksocknal_nid2peerlist(id.nid);
- struct list_head *tmp;
- ksock_peer_t *peer;
-
- list_for_each(tmp, peer_list) {
-
- peer = list_entry(tmp, ksock_peer_t, ksnp_list);
-
- LASSERT(!peer->ksnp_closing);
-
- if (peer->ksnp_ni != ni)
- continue;
-
- if (peer->ksnp_id.nid != id.nid ||
- peer->ksnp_id.pid != id.pid)
- continue;
-
- CDEBUG(D_NET, "got peer [%p] -> %s (%d)\n",
- peer, libcfs_id2str(id),
- atomic_read(&peer->ksnp_refcount));
- return peer;
- }
- return NULL;
-}
-
-ksock_peer_t *
-ksocknal_find_peer(lnet_ni_t *ni, lnet_process_id_t id)
-{
- ksock_peer_t *peer;
-
- read_lock(&ksocknal_data.ksnd_global_lock);
- peer = ksocknal_find_peer_locked(ni, id);
- if (peer != NULL) /* +1 ref for caller? */
- ksocknal_peer_addref(peer);
- read_unlock(&ksocknal_data.ksnd_global_lock);
-
- return peer;
-}
-
-static void
-ksocknal_unlink_peer_locked(ksock_peer_t *peer)
-{
- int i;
- __u32 ip;
- ksock_interface_t *iface;
-
- for (i = 0; i < peer->ksnp_n_passive_ips; i++) {
- LASSERT(i < LNET_MAX_INTERFACES);
- ip = peer->ksnp_passive_ips[i];
-
- iface = ksocknal_ip2iface(peer->ksnp_ni, ip);
- /* All IPs in peer->ksnp_passive_ips[] come from the
- * interface list, therefore the call must succeed. */
- LASSERT(iface != NULL);
-
- CDEBUG(D_NET, "peer=%p iface=%p ksni_nroutes=%d\n",
- peer, iface, iface->ksni_nroutes);
- iface->ksni_npeers--;
- }
-
- LASSERT(list_empty(&peer->ksnp_conns));
- LASSERT(list_empty(&peer->ksnp_routes));
- LASSERT(!peer->ksnp_closing);
- peer->ksnp_closing = 1;
- list_del(&peer->ksnp_list);
- /* lose peerlist's ref */
- ksocknal_peer_decref(peer);
-}
-
-static int
-ksocknal_get_peer_info(lnet_ni_t *ni, int index,
- lnet_process_id_t *id, __u32 *myip, __u32 *peer_ip,
- int *port, int *conn_count, int *share_count)
-{
- ksock_peer_t *peer;
- struct list_head *ptmp;
- ksock_route_t *route;
- struct list_head *rtmp;
- int i;
- int j;
- int rc = -ENOENT;
-
- read_lock(&ksocknal_data.ksnd_global_lock);
-
- for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) {
-
- list_for_each(ptmp, &ksocknal_data.ksnd_peers[i]) {
- peer = list_entry(ptmp, ksock_peer_t, ksnp_list);
-
- if (peer->ksnp_ni != ni)
- continue;
-
- if (peer->ksnp_n_passive_ips == 0 &&
- list_empty(&peer->ksnp_routes)) {
- if (index-- > 0)
- continue;
-
- *id = peer->ksnp_id;
- *myip = 0;
- *peer_ip = 0;
- *port = 0;
- *conn_count = 0;
- *share_count = 0;
- rc = 0;
- goto out;
- }
-
- for (j = 0; j < peer->ksnp_n_passive_ips; j++) {
- if (index-- > 0)
- continue;
-
- *id = peer->ksnp_id;
- *myip = peer->ksnp_passive_ips[j];
- *peer_ip = 0;
- *port = 0;
- *conn_count = 0;
- *share_count = 0;
- rc = 0;
- goto out;
- }
-
- list_for_each(rtmp, &peer->ksnp_routes) {
- if (index-- > 0)
- continue;
-
- route = list_entry(rtmp, ksock_route_t,
- ksnr_list);
-
- *id = peer->ksnp_id;
- *myip = route->ksnr_myipaddr;
- *peer_ip = route->ksnr_ipaddr;
- *port = route->ksnr_port;
- *conn_count = route->ksnr_conn_count;
- *share_count = route->ksnr_share_count;
- rc = 0;
- goto out;
- }
- }
- }
- out:
- read_unlock(&ksocknal_data.ksnd_global_lock);
- return rc;
-}
-
-static void
-ksocknal_associate_route_conn_locked(ksock_route_t *route, ksock_conn_t *conn)
-{
- ksock_peer_t *peer = route->ksnr_peer;
- int type = conn->ksnc_type;
- ksock_interface_t *iface;
-
- conn->ksnc_route = route;
- ksocknal_route_addref(route);
-
- if (route->ksnr_myipaddr != conn->ksnc_myipaddr) {
- if (route->ksnr_myipaddr == 0) {
- /* route wasn't bound locally yet (the initial route) */
- CDEBUG(D_NET, "Binding %s %pI4h to %pI4h\n",
- libcfs_id2str(peer->ksnp_id),
- &route->ksnr_ipaddr,
- &conn->ksnc_myipaddr);
- } else {
- CDEBUG(D_NET, "Rebinding %s %pI4h from %pI4h to %pI4h\n",
- libcfs_id2str(peer->ksnp_id),
- &route->ksnr_ipaddr,
- &route->ksnr_myipaddr,
- &conn->ksnc_myipaddr);
-
- iface = ksocknal_ip2iface(route->ksnr_peer->ksnp_ni,
- route->ksnr_myipaddr);
- if (iface != NULL)
- iface->ksni_nroutes--;
- }
- route->ksnr_myipaddr = conn->ksnc_myipaddr;
- iface = ksocknal_ip2iface(route->ksnr_peer->ksnp_ni,
- route->ksnr_myipaddr);
- if (iface != NULL)
- iface->ksni_nroutes++;
- }
-
- route->ksnr_connected |= (1<<type);
- route->ksnr_conn_count++;
-
- /* Successful connection => further attempts can
- * proceed immediately */
- route->ksnr_retry_interval = 0;
-}
-
-static void
-ksocknal_add_route_locked(ksock_peer_t *peer, ksock_route_t *route)
-{
- struct list_head *tmp;
- ksock_conn_t *conn;
- ksock_route_t *route2;
-
- LASSERT(!peer->ksnp_closing);
- LASSERT(route->ksnr_peer == NULL);
- LASSERT(!route->ksnr_scheduled);
- LASSERT(!route->ksnr_connecting);
- LASSERT(route->ksnr_connected == 0);
-
- /* LASSERT(unique) */
- list_for_each(tmp, &peer->ksnp_routes) {
- route2 = list_entry(tmp, ksock_route_t, ksnr_list);
-
- if (route2->ksnr_ipaddr == route->ksnr_ipaddr) {
- CERROR("Duplicate route %s %pI4h\n",
- libcfs_id2str(peer->ksnp_id),
- &route->ksnr_ipaddr);
- LBUG();
- }
- }
-
- route->ksnr_peer = peer;
- ksocknal_peer_addref(peer);
- /* peer's routelist takes over my ref on 'route' */
- list_add_tail(&route->ksnr_list, &peer->ksnp_routes);
-
- list_for_each(tmp, &peer->ksnp_conns) {
- conn = list_entry(tmp, ksock_conn_t, ksnc_list);
-
- if (conn->ksnc_ipaddr != route->ksnr_ipaddr)
- continue;
-
- ksocknal_associate_route_conn_locked(route, conn);
- /* keep going (typed routes) */
- }
-}
-
-static void
-ksocknal_del_route_locked(ksock_route_t *route)
-{
- ksock_peer_t *peer = route->ksnr_peer;
- ksock_interface_t *iface;
- ksock_conn_t *conn;
- struct list_head *ctmp;
- struct list_head *cnxt;
-
- LASSERT(!route->ksnr_deleted);
-
- /* Close associated conns */
- list_for_each_safe(ctmp, cnxt, &peer->ksnp_conns) {
- conn = list_entry(ctmp, ksock_conn_t, ksnc_list);
-
- if (conn->ksnc_route != route)
- continue;
-
- ksocknal_close_conn_locked(conn, 0);
- }
-
- if (route->ksnr_myipaddr != 0) {
- iface = ksocknal_ip2iface(route->ksnr_peer->ksnp_ni,
- route->ksnr_myipaddr);
- if (iface != NULL)
- iface->ksni_nroutes--;
- }
-
- route->ksnr_deleted = 1;
- list_del(&route->ksnr_list);
- ksocknal_route_decref(route); /* drop peer's ref */
-
- if (list_empty(&peer->ksnp_routes) &&
- list_empty(&peer->ksnp_conns)) {
- /* I've just removed the last route to a peer with no active
- * connections */
- ksocknal_unlink_peer_locked(peer);
- }
-}
-
-int
-ksocknal_add_peer(lnet_ni_t *ni, lnet_process_id_t id, __u32 ipaddr, int port)
-{
- struct list_head *tmp;
- ksock_peer_t *peer;
- ksock_peer_t *peer2;
- ksock_route_t *route;
- ksock_route_t *route2;
- int rc;
-
- if (id.nid == LNET_NID_ANY ||
- id.pid == LNET_PID_ANY)
- return -EINVAL;
-
- /* Have a brand new peer ready... */
- rc = ksocknal_create_peer(&peer, ni, id);
- if (rc != 0)
- return rc;
-
- route = ksocknal_create_route(ipaddr, port);
- if (route == NULL) {
- ksocknal_peer_decref(peer);
- return -ENOMEM;
- }
-
- write_lock_bh(&ksocknal_data.ksnd_global_lock);
-
- /* always called with a ref on ni, so shutdown can't have started */
- LASSERT(((ksock_net_t *) ni->ni_data)->ksnn_shutdown == 0);
-
- peer2 = ksocknal_find_peer_locked(ni, id);
- if (peer2 != NULL) {
- ksocknal_peer_decref(peer);
- peer = peer2;
- } else {
- /* peer table takes my ref on peer */
- list_add_tail(&peer->ksnp_list,
- ksocknal_nid2peerlist(id.nid));
- }
-
- route2 = NULL;
- list_for_each(tmp, &peer->ksnp_routes) {
- route2 = list_entry(tmp, ksock_route_t, ksnr_list);
-
- if (route2->ksnr_ipaddr == ipaddr)
- break;
-
- route2 = NULL;
- }
- if (route2 == NULL) {
- ksocknal_add_route_locked(peer, route);
- route->ksnr_share_count++;
- } else {
- ksocknal_route_decref(route);
- route2->ksnr_share_count++;
- }
-
- write_unlock_bh(&ksocknal_data.ksnd_global_lock);
-
- return 0;
-}
-
-static void
-ksocknal_del_peer_locked(ksock_peer_t *peer, __u32 ip)
-{
- ksock_conn_t *conn;
- ksock_route_t *route;
- struct list_head *tmp;
- struct list_head *nxt;
- int nshared;
-
- LASSERT(!peer->ksnp_closing);
-
- /* Extra ref prevents peer disappearing until I'm done with it */
- ksocknal_peer_addref(peer);
-
- list_for_each_safe(tmp, nxt, &peer->ksnp_routes) {
- route = list_entry(tmp, ksock_route_t, ksnr_list);
-
- /* no match */
- if (!(ip == 0 || route->ksnr_ipaddr == ip))
- continue;
-
- route->ksnr_share_count = 0;
- /* This deletes associated conns too */
- ksocknal_del_route_locked(route);
- }
-
- nshared = 0;
- list_for_each_safe(tmp, nxt, &peer->ksnp_routes) {
- route = list_entry(tmp, ksock_route_t, ksnr_list);
- nshared += route->ksnr_share_count;
- }
-
- if (nshared == 0) {
- /* remove everything else if there are no explicit entries
- * left */
-
- list_for_each_safe(tmp, nxt, &peer->ksnp_routes) {
- route = list_entry(tmp, ksock_route_t, ksnr_list);
-
- /* we should only be removing auto-entries */
- LASSERT(route->ksnr_share_count == 0);
- ksocknal_del_route_locked(route);
- }
-
- list_for_each_safe(tmp, nxt, &peer->ksnp_conns) {
- conn = list_entry(tmp, ksock_conn_t, ksnc_list);
-
- ksocknal_close_conn_locked(conn, 0);
- }
- }
-
- ksocknal_peer_decref(peer);
- /* NB peer unlinks itself when last conn/route is removed */
-}
-
-static int
-ksocknal_del_peer(lnet_ni_t *ni, lnet_process_id_t id, __u32 ip)
-{
- LIST_HEAD(zombies);
- struct list_head *ptmp;
- struct list_head *pnxt;
- ksock_peer_t *peer;
- int lo;
- int hi;
- int i;
- int rc = -ENOENT;
-
- write_lock_bh(&ksocknal_data.ksnd_global_lock);
-
- if (id.nid != LNET_NID_ANY)
- lo = hi = (int)(ksocknal_nid2peerlist(id.nid) - ksocknal_data.ksnd_peers);
- else {
- lo = 0;
- hi = ksocknal_data.ksnd_peer_hash_size - 1;
- }
-
- for (i = lo; i <= hi; i++) {
- list_for_each_safe(ptmp, pnxt,
- &ksocknal_data.ksnd_peers[i]) {
- peer = list_entry(ptmp, ksock_peer_t, ksnp_list);
-
- if (peer->ksnp_ni != ni)
- continue;
-
- if (!((id.nid == LNET_NID_ANY || peer->ksnp_id.nid == id.nid) &&
- (id.pid == LNET_PID_ANY || peer->ksnp_id.pid == id.pid)))
- continue;
-
- ksocknal_peer_addref(peer); /* a ref for me... */
-
- ksocknal_del_peer_locked(peer, ip);
-
- if (peer->ksnp_closing &&
- !list_empty(&peer->ksnp_tx_queue)) {
- LASSERT(list_empty(&peer->ksnp_conns));
- LASSERT(list_empty(&peer->ksnp_routes));
-
- list_splice_init(&peer->ksnp_tx_queue,
- &zombies);
- }
-
- ksocknal_peer_decref(peer); /* ...till here */
-
- rc = 0; /* matched! */
- }
- }
-
- write_unlock_bh(&ksocknal_data.ksnd_global_lock);
-
- ksocknal_txlist_done(ni, &zombies, 1);
-
- return rc;
-}
-
-static ksock_conn_t *
-ksocknal_get_conn_by_idx(lnet_ni_t *ni, int index)
-{
- ksock_peer_t *peer;
- struct list_head *ptmp;
- ksock_conn_t *conn;
- struct list_head *ctmp;
- int i;
-
- read_lock(&ksocknal_data.ksnd_global_lock);
-
- for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) {
- list_for_each(ptmp, &ksocknal_data.ksnd_peers[i]) {
- peer = list_entry(ptmp, ksock_peer_t, ksnp_list);
-
- LASSERT(!peer->ksnp_closing);
-
- if (peer->ksnp_ni != ni)
- continue;
-
- list_for_each(ctmp, &peer->ksnp_conns) {
- if (index-- > 0)
- continue;
-
- conn = list_entry(ctmp, ksock_conn_t,
- ksnc_list);
- ksocknal_conn_addref(conn);
- read_unlock(&ksocknal_data.ksnd_global_lock);
- return conn;
- }
- }
- }
-
- read_unlock(&ksocknal_data.ksnd_global_lock);
- return NULL;
-}
-
-static ksock_sched_t *
-ksocknal_choose_scheduler_locked(unsigned int cpt)
-{
- struct ksock_sched_info *info = ksocknal_data.ksnd_sched_info[cpt];
- ksock_sched_t *sched;
- int i;
-
- LASSERT(info->ksi_nthreads > 0);
-
- sched = &info->ksi_scheds[0];
- /*
- * NB: it's safe so far, but info->ksi_nthreads could be changed
- * at runtime when we have dynamic LNet configuration, then we
- * need to take care of this.
- */
- for (i = 1; i < info->ksi_nthreads; i++) {
- if (sched->kss_nconns > info->ksi_scheds[i].kss_nconns)
- sched = &info->ksi_scheds[i];
- }
-
- return sched;
-}
-
-static int
-ksocknal_local_ipvec(lnet_ni_t *ni, __u32 *ipaddrs)
-{
- ksock_net_t *net = ni->ni_data;
- int i;
- int nip;
-
- read_lock(&ksocknal_data.ksnd_global_lock);
-
- nip = net->ksnn_ninterfaces;
- LASSERT(nip <= LNET_MAX_INTERFACES);
-
- /* Only offer interfaces for additional connections if I have
- * more than one. */
- if (nip < 2) {
- read_unlock(&ksocknal_data.ksnd_global_lock);
- return 0;
- }
-
- for (i = 0; i < nip; i++) {
- ipaddrs[i] = net->ksnn_interfaces[i].ksni_ipaddr;
- LASSERT(ipaddrs[i] != 0);
- }
-
- read_unlock(&ksocknal_data.ksnd_global_lock);
- return nip;
-}
-
-static int
-ksocknal_match_peerip(ksock_interface_t *iface, __u32 *ips, int nips)
-{
- int best_netmatch = 0;
- int best_xor = 0;
- int best = -1;
- int this_xor;
- int this_netmatch;
- int i;
-
- for (i = 0; i < nips; i++) {
- if (ips[i] == 0)
- continue;
-
- this_xor = ips[i] ^ iface->ksni_ipaddr;
- this_netmatch = ((this_xor & iface->ksni_netmask) == 0) ? 1 : 0;
-
- if (!(best < 0 ||
- best_netmatch < this_netmatch ||
- (best_netmatch == this_netmatch &&
- best_xor > this_xor)))
- continue;
-
- best = i;
- best_netmatch = this_netmatch;
- best_xor = this_xor;
- }
-
- LASSERT(best >= 0);
- return best;
-}
-
-static int
-ksocknal_select_ips(ksock_peer_t *peer, __u32 *peerips, int n_peerips)
-{
- rwlock_t *global_lock = &ksocknal_data.ksnd_global_lock;
- ksock_net_t *net = peer->ksnp_ni->ni_data;
- ksock_interface_t *iface;
- ksock_interface_t *best_iface;
- int n_ips;
- int i;
- int j;
- int k;
- __u32 ip;
- __u32 xor;
- int this_netmatch;
- int best_netmatch;
- int best_npeers;
-
- /* CAVEAT EMPTOR: We do all our interface matching with an
- * exclusive hold of global lock at IRQ priority. We're only
- * expecting to be dealing with small numbers of interfaces, so the
- * O(n**3)-ness shouldn't matter */
-
- /* Also note that I'm not going to return more than n_peerips
- * interfaces, even if I have more myself */
-
- write_lock_bh(global_lock);
-
- LASSERT(n_peerips <= LNET_MAX_INTERFACES);
- LASSERT(net->ksnn_ninterfaces <= LNET_MAX_INTERFACES);
-
- /* Only match interfaces for additional connections
- * if I have > 1 interface */
- n_ips = (net->ksnn_ninterfaces < 2) ? 0 :
- min(n_peerips, net->ksnn_ninterfaces);
-
- for (i = 0; peer->ksnp_n_passive_ips < n_ips; i++) {
- /* ^ yes really... */
-
- /* If we have any new interfaces, first tick off all the
- * peer IPs that match old interfaces, then choose new
- * interfaces to match the remaining peer IPS.
- * We don't forget interfaces we've stopped using; we might
- * start using them again... */
-
- if (i < peer->ksnp_n_passive_ips) {
- /* Old interface. */
- ip = peer->ksnp_passive_ips[i];
- best_iface = ksocknal_ip2iface(peer->ksnp_ni, ip);
-
- } else {
- /* choose a new interface */
- LASSERT(i == peer->ksnp_n_passive_ips);
-
- best_iface = NULL;
- best_netmatch = 0;
- best_npeers = 0;
-
- for (j = 0; j < net->ksnn_ninterfaces; j++) {
- iface = &net->ksnn_interfaces[j];
- ip = iface->ksni_ipaddr;
-
- for (k = 0; k < peer->ksnp_n_passive_ips; k++)
- if (peer->ksnp_passive_ips[k] == ip)
- break;
-
- if (k < peer->ksnp_n_passive_ips) /* using it already */
- continue;
-
- k = ksocknal_match_peerip(iface, peerips, n_peerips);
- xor = ip ^ peerips[k];
- this_netmatch = ((xor & iface->ksni_netmask) == 0) ? 1 : 0;
-
- if (!(best_iface == NULL ||
- best_netmatch < this_netmatch ||
- (best_netmatch == this_netmatch &&
- best_npeers > iface->ksni_npeers)))
- continue;
-
- best_iface = iface;
- best_netmatch = this_netmatch;
- best_npeers = iface->ksni_npeers;
- }
-
- best_iface->ksni_npeers++;
- ip = best_iface->ksni_ipaddr;
- peer->ksnp_passive_ips[i] = ip;
- peer->ksnp_n_passive_ips = i+1;
- }
-
- /* mark the best matching peer IP used */
- j = ksocknal_match_peerip(best_iface, peerips, n_peerips);
- peerips[j] = 0;
- }
-
- /* Overwrite input peer IP addresses */
- memcpy(peerips, peer->ksnp_passive_ips, n_ips * sizeof(*peerips));
-
- write_unlock_bh(global_lock);
-
- return n_ips;
-}
-
-static void
-ksocknal_create_routes(ksock_peer_t *peer, int port,
- __u32 *peer_ipaddrs, int npeer_ipaddrs)
-{
- ksock_route_t *newroute = NULL;
- rwlock_t *global_lock = &ksocknal_data.ksnd_global_lock;
- lnet_ni_t *ni = peer->ksnp_ni;
- ksock_net_t *net = ni->ni_data;
- struct list_head *rtmp;
- ksock_route_t *route;
- ksock_interface_t *iface;
- ksock_interface_t *best_iface;
- int best_netmatch;
- int this_netmatch;
- int best_nroutes;
- int i;
- int j;
-
- /* CAVEAT EMPTOR: We do all our interface matching with an
- * exclusive hold of global lock at IRQ priority. We're only
- * expecting to be dealing with small numbers of interfaces, so the
- * O(n**3)-ness here shouldn't matter */
-
- write_lock_bh(global_lock);
-
- if (net->ksnn_ninterfaces < 2) {
- /* Only create additional connections
- * if I have > 1 interface */
- write_unlock_bh(global_lock);
- return;
- }
-
- LASSERT(npeer_ipaddrs <= LNET_MAX_INTERFACES);
-
- for (i = 0; i < npeer_ipaddrs; i++) {
- if (newroute != NULL) {
- newroute->ksnr_ipaddr = peer_ipaddrs[i];
- } else {
- write_unlock_bh(global_lock);
-
- newroute = ksocknal_create_route(peer_ipaddrs[i], port);
- if (newroute == NULL)
- return;
-
- write_lock_bh(global_lock);
- }
-
- if (peer->ksnp_closing) {
- /* peer got closed under me */
- break;
- }
-
- /* Already got a route? */
- route = NULL;
- list_for_each(rtmp, &peer->ksnp_routes) {
- route = list_entry(rtmp, ksock_route_t, ksnr_list);
-
- if (route->ksnr_ipaddr == newroute->ksnr_ipaddr)
- break;
-
- route = NULL;
- }
- if (route != NULL)
- continue;
-
- best_iface = NULL;
- best_nroutes = 0;
- best_netmatch = 0;
-
- LASSERT(net->ksnn_ninterfaces <= LNET_MAX_INTERFACES);
-
- /* Select interface to connect from */
- for (j = 0; j < net->ksnn_ninterfaces; j++) {
- iface = &net->ksnn_interfaces[j];
-
- /* Using this interface already? */
- list_for_each(rtmp, &peer->ksnp_routes) {
- route = list_entry(rtmp, ksock_route_t,
- ksnr_list);
-
- if (route->ksnr_myipaddr == iface->ksni_ipaddr)
- break;
-
- route = NULL;
- }
- if (route != NULL)
- continue;
-
- this_netmatch = (((iface->ksni_ipaddr ^
- newroute->ksnr_ipaddr) &
- iface->ksni_netmask) == 0) ? 1 : 0;
-
- if (!(best_iface == NULL ||
- best_netmatch < this_netmatch ||
- (best_netmatch == this_netmatch &&
- best_nroutes > iface->ksni_nroutes)))
- continue;
-
- best_iface = iface;
- best_netmatch = this_netmatch;
- best_nroutes = iface->ksni_nroutes;
- }
-
- if (best_iface == NULL)
- continue;
-
- newroute->ksnr_myipaddr = best_iface->ksni_ipaddr;
- best_iface->ksni_nroutes++;
-
- ksocknal_add_route_locked(peer, newroute);
- newroute = NULL;
- }
-
- write_unlock_bh(global_lock);
- if (newroute != NULL)
- ksocknal_route_decref(newroute);
-}
-
-int
-ksocknal_accept(lnet_ni_t *ni, struct socket *sock)
-{
- ksock_connreq_t *cr;
- int rc;
- __u32 peer_ip;
- int peer_port;
-
- rc = lnet_sock_getaddr(sock, 1, &peer_ip, &peer_port);
- LASSERT(rc == 0); /* we succeeded before */
-
- LIBCFS_ALLOC(cr, sizeof(*cr));
- if (cr == NULL) {
- LCONSOLE_ERROR_MSG(0x12f, "Dropping connection request from %pI4h: memory exhausted\n",
- &peer_ip);
- return -ENOMEM;
- }
-
- lnet_ni_addref(ni);
- cr->ksncr_ni = ni;
- cr->ksncr_sock = sock;
-
- spin_lock_bh(&ksocknal_data.ksnd_connd_lock);
-
- list_add_tail(&cr->ksncr_list, &ksocknal_data.ksnd_connd_connreqs);
- wake_up(&ksocknal_data.ksnd_connd_waitq);
-
- spin_unlock_bh(&ksocknal_data.ksnd_connd_lock);
- return 0;
-}
-
-static int
-ksocknal_connecting(ksock_peer_t *peer, __u32 ipaddr)
-{
- ksock_route_t *route;
-
- list_for_each_entry(route, &peer->ksnp_routes, ksnr_list) {
-
- if (route->ksnr_ipaddr == ipaddr)
- return route->ksnr_connecting;
- }
- return 0;
-}
-
-int
-ksocknal_create_conn(lnet_ni_t *ni, ksock_route_t *route,
- struct socket *sock, int type)
-{
- rwlock_t *global_lock = &ksocknal_data.ksnd_global_lock;
- LIST_HEAD(zombies);
- lnet_process_id_t peerid;
- struct list_head *tmp;
- __u64 incarnation;
- ksock_conn_t *conn;
- ksock_conn_t *conn2;
- ksock_peer_t *peer = NULL;
- ksock_peer_t *peer2;
- ksock_sched_t *sched;
- ksock_hello_msg_t *hello;
- int cpt;
- ksock_tx_t *tx;
- ksock_tx_t *txtmp;
- int rc;
- int active;
- char *warn = NULL;
-
- active = (route != NULL);
-
- LASSERT(active == (type != SOCKLND_CONN_NONE));
-
- LIBCFS_ALLOC(conn, sizeof(*conn));
- if (conn == NULL) {
- rc = -ENOMEM;
- goto failed_0;
- }
-
- conn->ksnc_peer = NULL;
- conn->ksnc_route = NULL;
- conn->ksnc_sock = sock;
- /* 2 ref, 1 for conn, another extra ref prevents socket
- * being closed before establishment of connection */
- atomic_set(&conn->ksnc_sock_refcount, 2);
- conn->ksnc_type = type;
- ksocknal_lib_save_callback(sock, conn);
- atomic_set(&conn->ksnc_conn_refcount, 1); /* 1 ref for me */
-
- conn->ksnc_rx_ready = 0;
- conn->ksnc_rx_scheduled = 0;
-
- INIT_LIST_HEAD(&conn->ksnc_tx_queue);
- conn->ksnc_tx_ready = 0;
- conn->ksnc_tx_scheduled = 0;
- conn->ksnc_tx_carrier = NULL;
- atomic_set(&conn->ksnc_tx_nob, 0);
-
- LIBCFS_ALLOC(hello, offsetof(ksock_hello_msg_t,
- kshm_ips[LNET_MAX_INTERFACES]));
- if (hello == NULL) {
- rc = -ENOMEM;
- goto failed_1;
- }
-
- /* stash conn's local and remote addrs */
- rc = ksocknal_lib_get_conn_addrs(conn);
- if (rc != 0)
- goto failed_1;
-
- /* Find out/confirm peer's NID and connection type and get the
- * vector of interfaces she's willing to let me connect to.
- * Passive connections use the listener timeout since the peer sends
- * eagerly */
-
- if (active) {
- peer = route->ksnr_peer;
- LASSERT(ni == peer->ksnp_ni);
-
- /* Active connection sends HELLO eagerly */
- hello->kshm_nips = ksocknal_local_ipvec(ni, hello->kshm_ips);
- peerid = peer->ksnp_id;
-
- write_lock_bh(global_lock);
- conn->ksnc_proto = peer->ksnp_proto;
- write_unlock_bh(global_lock);
-
- if (conn->ksnc_proto == NULL) {
- conn->ksnc_proto = &ksocknal_protocol_v3x;
-#if SOCKNAL_VERSION_DEBUG
- if (*ksocknal_tunables.ksnd_protocol == 2)
- conn->ksnc_proto = &ksocknal_protocol_v2x;
- else if (*ksocknal_tunables.ksnd_protocol == 1)
- conn->ksnc_proto = &ksocknal_protocol_v1x;
-#endif
- }
-
- rc = ksocknal_send_hello(ni, conn, peerid.nid, hello);
- if (rc != 0)
- goto failed_1;
- } else {
- peerid.nid = LNET_NID_ANY;
- peerid.pid = LNET_PID_ANY;
-
- /* Passive, get protocol from peer */
- conn->ksnc_proto = NULL;
- }
-
- rc = ksocknal_recv_hello(ni, conn, hello, &peerid, &incarnation);
- if (rc < 0)
- goto failed_1;
-
- LASSERT(rc == 0 || active);
- LASSERT(conn->ksnc_proto != NULL);
- LASSERT(peerid.nid != LNET_NID_ANY);
-
- cpt = lnet_cpt_of_nid(peerid.nid);
-
- if (active) {
- ksocknal_peer_addref(peer);
- write_lock_bh(global_lock);
- } else {
- rc = ksocknal_create_peer(&peer, ni, peerid);
- if (rc != 0)
- goto failed_1;
-
- write_lock_bh(global_lock);
-
- /* called with a ref on ni, so shutdown can't have started */
- LASSERT(((ksock_net_t *) ni->ni_data)->ksnn_shutdown == 0);
-
- peer2 = ksocknal_find_peer_locked(ni, peerid);
- if (peer2 == NULL) {
- /* NB this puts an "empty" peer in the peer
- * table (which takes my ref) */
- list_add_tail(&peer->ksnp_list,
- ksocknal_nid2peerlist(peerid.nid));
- } else {
- ksocknal_peer_decref(peer);
- peer = peer2;
- }
-
- /* +1 ref for me */
- ksocknal_peer_addref(peer);
- peer->ksnp_accepting++;
-
- /* Am I already connecting to this guy? Resolve in
- * favour of higher NID... */
- if (peerid.nid < ni->ni_nid &&
- ksocknal_connecting(peer, conn->ksnc_ipaddr)) {
- rc = EALREADY;
- warn = "connection race resolution";
- goto failed_2;
- }
- }
-
- if (peer->ksnp_closing ||
- (active && route->ksnr_deleted)) {
- /* peer/route got closed under me */
- rc = -ESTALE;
- warn = "peer/route removed";
- goto failed_2;
- }
-
- if (peer->ksnp_proto == NULL) {
- /* Never connected before.
- * NB recv_hello may have returned EPROTO to signal my peer
- * wants a different protocol than the one I asked for.
- */
- LASSERT(list_empty(&peer->ksnp_conns));
-
- peer->ksnp_proto = conn->ksnc_proto;
- peer->ksnp_incarnation = incarnation;
- }
-
- if (peer->ksnp_proto != conn->ksnc_proto ||
- peer->ksnp_incarnation != incarnation) {
- /* Peer rebooted or I've got the wrong protocol version */
- ksocknal_close_peer_conns_locked(peer, 0, 0);
-
- peer->ksnp_proto = NULL;
- rc = ESTALE;
- warn = peer->ksnp_incarnation != incarnation ?
- "peer rebooted" :
- "wrong proto version";
- goto failed_2;
- }
-
- switch (rc) {
- default:
- LBUG();
- case 0:
- break;
- case EALREADY:
- warn = "lost conn race";
- goto failed_2;
- case EPROTO:
- warn = "retry with different protocol version";
- goto failed_2;
- }
-
- /* Refuse to duplicate an existing connection, unless this is a
- * loopback connection */
- if (conn->ksnc_ipaddr != conn->ksnc_myipaddr) {
- list_for_each(tmp, &peer->ksnp_conns) {
- conn2 = list_entry(tmp, ksock_conn_t, ksnc_list);
-
- if (conn2->ksnc_ipaddr != conn->ksnc_ipaddr ||
- conn2->ksnc_myipaddr != conn->ksnc_myipaddr ||
- conn2->ksnc_type != conn->ksnc_type)
- continue;
-
- /* Reply on a passive connection attempt so the peer
- * realises we're connected. */
- LASSERT(rc == 0);
- if (!active)
- rc = EALREADY;
-
- warn = "duplicate";
- goto failed_2;
- }
- }
-
- /* If the connection created by this route didn't bind to the IP
- * address the route connected to, the connection/route matching
- * code below probably isn't going to work. */
- if (active &&
- route->ksnr_ipaddr != conn->ksnc_ipaddr) {
- CERROR("Route %s %pI4h connected to %pI4h\n",
- libcfs_id2str(peer->ksnp_id),
- &route->ksnr_ipaddr,
- &conn->ksnc_ipaddr);
- }
-
- /* Search for a route corresponding to the new connection and
- * create an association. This allows incoming connections created
- * by routes in my peer to match my own route entries so I don't
- * continually create duplicate routes. */
- list_for_each(tmp, &peer->ksnp_routes) {
- route = list_entry(tmp, ksock_route_t, ksnr_list);
-
- if (route->ksnr_ipaddr != conn->ksnc_ipaddr)
- continue;
-
- ksocknal_associate_route_conn_locked(route, conn);
- break;
- }
-
- conn->ksnc_peer = peer; /* conn takes my ref on peer */
- peer->ksnp_last_alive = cfs_time_current();
- peer->ksnp_send_keepalive = 0;
- peer->ksnp_error = 0;
-
- sched = ksocknal_choose_scheduler_locked(cpt);
- sched->kss_nconns++;
- conn->ksnc_scheduler = sched;
-
- conn->ksnc_tx_last_post = cfs_time_current();
- /* Set the deadline for the outgoing HELLO to drain */
- conn->ksnc_tx_bufnob = sock->sk->sk_wmem_queued;
- conn->ksnc_tx_deadline = cfs_time_shift(*ksocknal_tunables.ksnd_timeout);
- mb(); /* order with adding to peer's conn list */
-
- list_add(&conn->ksnc_list, &peer->ksnp_conns);
- ksocknal_conn_addref(conn);
-
- ksocknal_new_packet(conn, 0);
-
- conn->ksnc_zc_capable = ksocknal_lib_zc_capable(conn);
-
- /* Take packets blocking for this connection. */
- list_for_each_entry_safe(tx, txtmp, &peer->ksnp_tx_queue, tx_list) {
- if (conn->ksnc_proto->pro_match_tx(conn, tx, tx->tx_nonblk) == SOCKNAL_MATCH_NO)
- continue;
-
- list_del(&tx->tx_list);
- ksocknal_queue_tx_locked(tx, conn);
- }
-
- write_unlock_bh(global_lock);
-
- /* We've now got a new connection. Any errors from here on are just
- * like "normal" comms errors and we close the connection normally.
- * NB (a) we still have to send the reply HELLO for passive
- * connections,
- * (b) normal I/O on the conn is blocked until I setup and call the
- * socket callbacks.
- */
-
- CDEBUG(D_NET, "New conn %s p %d.x %pI4h -> %pI4h/%d incarnation:%lld sched[%d:%d]\n",
- libcfs_id2str(peerid), conn->ksnc_proto->pro_version,
- &conn->ksnc_myipaddr, &conn->ksnc_ipaddr,
- conn->ksnc_port, incarnation, cpt,
- (int)(sched - &sched->kss_info->ksi_scheds[0]));
-
- if (active) {
- /* additional routes after interface exchange? */
- ksocknal_create_routes(peer, conn->ksnc_port,
- hello->kshm_ips, hello->kshm_nips);
- } else {
- hello->kshm_nips = ksocknal_select_ips(peer, hello->kshm_ips,
- hello->kshm_nips);
- rc = ksocknal_send_hello(ni, conn, peerid.nid, hello);
- }
-
- LIBCFS_FREE(hello, offsetof(ksock_hello_msg_t,
- kshm_ips[LNET_MAX_INTERFACES]));
-
- /* setup the socket AFTER I've received hello (it disables
- * SO_LINGER). I might call back to the acceptor who may want
- * to send a protocol version response and then close the
- * socket; this ensures the socket only tears down after the
- * response has been sent. */
- if (rc == 0)
- rc = ksocknal_lib_setup_sock(sock);
-
- write_lock_bh(global_lock);
-
- /* NB my callbacks block while I hold ksnd_global_lock */
- ksocknal_lib_set_callback(sock, conn);
-
- if (!active)
- peer->ksnp_accepting--;
-
- write_unlock_bh(global_lock);
-
- if (rc != 0) {
- write_lock_bh(global_lock);
- if (!conn->ksnc_closing) {
- /* could be closed by another thread */
- ksocknal_close_conn_locked(conn, rc);
- }
- write_unlock_bh(global_lock);
- } else if (ksocknal_connsock_addref(conn) == 0) {
- /* Allow I/O to proceed. */
- ksocknal_read_callback(conn);
- ksocknal_write_callback(conn);
- ksocknal_connsock_decref(conn);
- }
-
- ksocknal_connsock_decref(conn);
- ksocknal_conn_decref(conn);
- return rc;
-
- failed_2:
- if (!peer->ksnp_closing &&
- list_empty(&peer->ksnp_conns) &&
- list_empty(&peer->ksnp_routes)) {
- list_add(&zombies, &peer->ksnp_tx_queue);
- list_del_init(&peer->ksnp_tx_queue);
- ksocknal_unlink_peer_locked(peer);
- }
-
- write_unlock_bh(global_lock);
-
- if (warn != NULL) {
- if (rc < 0)
- CERROR("Not creating conn %s type %d: %s\n",
- libcfs_id2str(peerid), conn->ksnc_type, warn);
- else
- CDEBUG(D_NET, "Not creating conn %s type %d: %s\n",
- libcfs_id2str(peerid), conn->ksnc_type, warn);
- }
-
- if (!active) {
- if (rc > 0) {
- /* Request retry by replying with CONN_NONE
- * ksnc_proto has been set already */
- conn->ksnc_type = SOCKLND_CONN_NONE;
- hello->kshm_nips = 0;
- ksocknal_send_hello(ni, conn, peerid.nid, hello);
- }
-
- write_lock_bh(global_lock);
- peer->ksnp_accepting--;
- write_unlock_bh(global_lock);
- }
-
- ksocknal_txlist_done(ni, &zombies, 1);
- ksocknal_peer_decref(peer);
-
-failed_1:
- if (hello != NULL)
- LIBCFS_FREE(hello, offsetof(ksock_hello_msg_t,
- kshm_ips[LNET_MAX_INTERFACES]));
-
- LIBCFS_FREE(conn, sizeof(*conn));
-
-failed_0:
- sock_release(sock);
- return rc;
-}
-
-void
-ksocknal_close_conn_locked(ksock_conn_t *conn, int error)
-{
- /* This just does the immmediate housekeeping, and queues the
- * connection for the reaper to terminate.
- * Caller holds ksnd_global_lock exclusively in irq context */
- ksock_peer_t *peer = conn->ksnc_peer;
- ksock_route_t *route;
- ksock_conn_t *conn2;
- struct list_head *tmp;
-
- LASSERT(peer->ksnp_error == 0);
- LASSERT(!conn->ksnc_closing);
- conn->ksnc_closing = 1;
-
- /* ksnd_deathrow_conns takes over peer's ref */
- list_del(&conn->ksnc_list);
-
- route = conn->ksnc_route;
- if (route != NULL) {
- /* dissociate conn from route... */
- LASSERT(!route->ksnr_deleted);
- LASSERT((route->ksnr_connected & (1 << conn->ksnc_type)) != 0);
-
- conn2 = NULL;
- list_for_each(tmp, &peer->ksnp_conns) {
- conn2 = list_entry(tmp, ksock_conn_t, ksnc_list);
-
- if (conn2->ksnc_route == route &&
- conn2->ksnc_type == conn->ksnc_type)
- break;
-
- conn2 = NULL;
- }
- if (conn2 == NULL)
- route->ksnr_connected &= ~(1 << conn->ksnc_type);
-
- conn->ksnc_route = NULL;
-
-#if 0 /* irrelevant with only eager routes */
- /* make route least favourite */
- list_del(&route->ksnr_list);
- list_add_tail(&route->ksnr_list, &peer->ksnp_routes);
-#endif
- ksocknal_route_decref(route); /* drop conn's ref on route */
- }
-
- if (list_empty(&peer->ksnp_conns)) {
- /* No more connections to this peer */
-
- if (!list_empty(&peer->ksnp_tx_queue)) {
- ksock_tx_t *tx;
-
- LASSERT(conn->ksnc_proto == &ksocknal_protocol_v3x);
-
- /* throw them to the last connection...,
- * these TXs will be send to /dev/null by scheduler */
- list_for_each_entry(tx, &peer->ksnp_tx_queue,
- tx_list)
- ksocknal_tx_prep(conn, tx);
-
- spin_lock_bh(&conn->ksnc_scheduler->kss_lock);
- list_splice_init(&peer->ksnp_tx_queue,
- &conn->ksnc_tx_queue);
- spin_unlock_bh(&conn->ksnc_scheduler->kss_lock);
- }
-
- peer->ksnp_proto = NULL; /* renegotiate protocol version */
- peer->ksnp_error = error; /* stash last conn close reason */
-
- if (list_empty(&peer->ksnp_routes)) {
- /* I've just closed last conn belonging to a
- * peer with no routes to it */
- ksocknal_unlink_peer_locked(peer);
- }
- }
-
- spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
-
- list_add_tail(&conn->ksnc_list,
- &ksocknal_data.ksnd_deathrow_conns);
- wake_up(&ksocknal_data.ksnd_reaper_waitq);
-
- spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
-}
-
-void
-ksocknal_peer_failed(ksock_peer_t *peer)
-{
- int notify = 0;
- unsigned long last_alive = 0;
-
- /* There has been a connection failure or comms error; but I'll only
- * tell LNET I think the peer is dead if it's to another kernel and
- * there are no connections or connection attempts in existence. */
-
- read_lock(&ksocknal_data.ksnd_global_lock);
-
- if ((peer->ksnp_id.pid & LNET_PID_USERFLAG) == 0 &&
- list_empty(&peer->ksnp_conns) &&
- peer->ksnp_accepting == 0 &&
- ksocknal_find_connecting_route_locked(peer) == NULL) {
- notify = 1;
- last_alive = peer->ksnp_last_alive;
- }
-
- read_unlock(&ksocknal_data.ksnd_global_lock);
-
- if (notify)
- lnet_notify(peer->ksnp_ni, peer->ksnp_id.nid, 0,
- last_alive);
-}
-
-void
-ksocknal_finalize_zcreq(ksock_conn_t *conn)
-{
- ksock_peer_t *peer = conn->ksnc_peer;
- ksock_tx_t *tx;
- ksock_tx_t *tmp;
- LIST_HEAD(zlist);
-
- /* NB safe to finalize TXs because closing of socket will
- * abort all buffered data */
- LASSERT(conn->ksnc_sock == NULL);
-
- spin_lock(&peer->ksnp_lock);
-
- list_for_each_entry_safe(tx, tmp, &peer->ksnp_zc_req_list, tx_zc_list) {
- if (tx->tx_conn != conn)
- continue;
-
- LASSERT(tx->tx_msg.ksm_zc_cookies[0] != 0);
-
- tx->tx_msg.ksm_zc_cookies[0] = 0;
- tx->tx_zc_aborted = 1; /* mark it as not-acked */
- list_del(&tx->tx_zc_list);
- list_add(&tx->tx_zc_list, &zlist);
- }
-
- spin_unlock(&peer->ksnp_lock);
-
- while (!list_empty(&zlist)) {
- tx = list_entry(zlist.next, ksock_tx_t, tx_zc_list);
-
- list_del(&tx->tx_zc_list);
- ksocknal_tx_decref(tx);
- }
-}
-
-void
-ksocknal_terminate_conn(ksock_conn_t *conn)
-{
- /* This gets called by the reaper (guaranteed thread context) to
- * disengage the socket from its callbacks and close it.
- * ksnc_refcount will eventually hit zero, and then the reaper will
- * destroy it. */
- ksock_peer_t *peer = conn->ksnc_peer;
- ksock_sched_t *sched = conn->ksnc_scheduler;
- int failed = 0;
-
- LASSERT(conn->ksnc_closing);
-
- /* wake up the scheduler to "send" all remaining packets to /dev/null */
- spin_lock_bh(&sched->kss_lock);
-
- /* a closing conn is always ready to tx */
- conn->ksnc_tx_ready = 1;
-
- if (!conn->ksnc_tx_scheduled &&
- !list_empty(&conn->ksnc_tx_queue)) {
- list_add_tail(&conn->ksnc_tx_list,
- &sched->kss_tx_conns);
- conn->ksnc_tx_scheduled = 1;
- /* extra ref for scheduler */
- ksocknal_conn_addref(conn);
-
- wake_up(&sched->kss_waitq);
- }
-
- spin_unlock_bh(&sched->kss_lock);
-
- /* serialise with callbacks */
- write_lock_bh(&ksocknal_data.ksnd_global_lock);
-
- ksocknal_lib_reset_callback(conn->ksnc_sock, conn);
-
- /* OK, so this conn may not be completely disengaged from its
- * scheduler yet, but it _has_ committed to terminate... */
- conn->ksnc_scheduler->kss_nconns--;
-
- if (peer->ksnp_error != 0) {
- /* peer's last conn closed in error */
- LASSERT(list_empty(&peer->ksnp_conns));
- failed = 1;
- peer->ksnp_error = 0; /* avoid multiple notifications */
- }
-
- write_unlock_bh(&ksocknal_data.ksnd_global_lock);
-
- if (failed)
- ksocknal_peer_failed(peer);
-
- /* The socket is closed on the final put; either here, or in
- * ksocknal_{send,recv}msg(). Since we set up the linger2 option
- * when the connection was established, this will close the socket
- * immediately, aborting anything buffered in it. Any hung
- * zero-copy transmits will therefore complete in finite time. */
- ksocknal_connsock_decref(conn);
-}
-
-void
-ksocknal_queue_zombie_conn(ksock_conn_t *conn)
-{
- /* Queue the conn for the reaper to destroy */
-
- LASSERT(atomic_read(&conn->ksnc_conn_refcount) == 0);
- spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
-
- list_add_tail(&conn->ksnc_list, &ksocknal_data.ksnd_zombie_conns);
- wake_up(&ksocknal_data.ksnd_reaper_waitq);
-
- spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
-}
-
-void
-ksocknal_destroy_conn(ksock_conn_t *conn)
-{
- unsigned long last_rcv;
-
- /* Final coup-de-grace of the reaper */
- CDEBUG(D_NET, "connection %p\n", conn);
-
- LASSERT(atomic_read(&conn->ksnc_conn_refcount) == 0);
- LASSERT(atomic_read(&conn->ksnc_sock_refcount) == 0);
- LASSERT(conn->ksnc_sock == NULL);
- LASSERT(conn->ksnc_route == NULL);
- LASSERT(!conn->ksnc_tx_scheduled);
- LASSERT(!conn->ksnc_rx_scheduled);
- LASSERT(list_empty(&conn->ksnc_tx_queue));
-
- /* complete current receive if any */
- switch (conn->ksnc_rx_state) {
- case SOCKNAL_RX_LNET_PAYLOAD:
- last_rcv = conn->ksnc_rx_deadline -
- cfs_time_seconds(*ksocknal_tunables.ksnd_timeout);
- CERROR("Completing partial receive from %s[%d], ip %pI4h:%d, with error, wanted: %d, left: %d, last alive is %ld secs ago\n",
- libcfs_id2str(conn->ksnc_peer->ksnp_id), conn->ksnc_type,
- &conn->ksnc_ipaddr, conn->ksnc_port,
- conn->ksnc_rx_nob_wanted, conn->ksnc_rx_nob_left,
- cfs_duration_sec(cfs_time_sub(cfs_time_current(),
- last_rcv)));
- lnet_finalize(conn->ksnc_peer->ksnp_ni,
- conn->ksnc_cookie, -EIO);
- break;
- case SOCKNAL_RX_LNET_HEADER:
- if (conn->ksnc_rx_started)
- CERROR("Incomplete receive of lnet header from %s, ip %pI4h:%d, with error, protocol: %d.x.\n",
- libcfs_id2str(conn->ksnc_peer->ksnp_id),
- &conn->ksnc_ipaddr, conn->ksnc_port,
- conn->ksnc_proto->pro_version);
- break;
- case SOCKNAL_RX_KSM_HEADER:
- if (conn->ksnc_rx_started)
- CERROR("Incomplete receive of ksock message from %s, ip %pI4h:%d, with error, protocol: %d.x.\n",
- libcfs_id2str(conn->ksnc_peer->ksnp_id),
- &conn->ksnc_ipaddr, conn->ksnc_port,
- conn->ksnc_proto->pro_version);
- break;
- case SOCKNAL_RX_SLOP:
- if (conn->ksnc_rx_started)
- CERROR("Incomplete receive of slops from %s, ip %pI4h:%d, with error\n",
- libcfs_id2str(conn->ksnc_peer->ksnp_id),
- &conn->ksnc_ipaddr, conn->ksnc_port);
- break;
- default:
- LBUG();
- break;
- }
-
- ksocknal_peer_decref(conn->ksnc_peer);
-
- LIBCFS_FREE(conn, sizeof(*conn));
-}
-
-int
-ksocknal_close_peer_conns_locked(ksock_peer_t *peer, __u32 ipaddr, int why)
-{
- ksock_conn_t *conn;
- struct list_head *ctmp;
- struct list_head *cnxt;
- int count = 0;
-
- list_for_each_safe(ctmp, cnxt, &peer->ksnp_conns) {
- conn = list_entry(ctmp, ksock_conn_t, ksnc_list);
-
- if (ipaddr == 0 ||
- conn->ksnc_ipaddr == ipaddr) {
- count++;
- ksocknal_close_conn_locked(conn, why);
- }
- }
-
- return count;
-}
-
-int
-ksocknal_close_conn_and_siblings(ksock_conn_t *conn, int why)
-{
- ksock_peer_t *peer = conn->ksnc_peer;
- __u32 ipaddr = conn->ksnc_ipaddr;
- int count;
-
- write_lock_bh(&ksocknal_data.ksnd_global_lock);
-
- count = ksocknal_close_peer_conns_locked(peer, ipaddr, why);
-
- write_unlock_bh(&ksocknal_data.ksnd_global_lock);
-
- return count;
-}
-
-int
-ksocknal_close_matching_conns(lnet_process_id_t id, __u32 ipaddr)
-{
- ksock_peer_t *peer;
- struct list_head *ptmp;
- struct list_head *pnxt;
- int lo;
- int hi;
- int i;
- int count = 0;
-
- write_lock_bh(&ksocknal_data.ksnd_global_lock);
-
- if (id.nid != LNET_NID_ANY)
- lo = hi = (int)(ksocknal_nid2peerlist(id.nid) - ksocknal_data.ksnd_peers);
- else {
- lo = 0;
- hi = ksocknal_data.ksnd_peer_hash_size - 1;
- }
-
- for (i = lo; i <= hi; i++) {
- list_for_each_safe(ptmp, pnxt,
- &ksocknal_data.ksnd_peers[i]) {
-
- peer = list_entry(ptmp, ksock_peer_t, ksnp_list);
-
- if (!((id.nid == LNET_NID_ANY || id.nid == peer->ksnp_id.nid) &&
- (id.pid == LNET_PID_ANY || id.pid == peer->ksnp_id.pid)))
- continue;
-
- count += ksocknal_close_peer_conns_locked(peer, ipaddr, 0);
- }
- }
-
- write_unlock_bh(&ksocknal_data.ksnd_global_lock);
-
- /* wildcards always succeed */
- if (id.nid == LNET_NID_ANY || id.pid == LNET_PID_ANY || ipaddr == 0)
- return 0;
-
- if (count == 0)
- return -ENOENT;
- else
- return 0;
-}
-
-void
-ksocknal_notify(lnet_ni_t *ni, lnet_nid_t gw_nid, int alive)
-{
- /* The router is telling me she's been notified of a change in
- * gateway state.... */
- lnet_process_id_t id = {0};
-
- id.nid = gw_nid;
- id.pid = LNET_PID_ANY;
-
- CDEBUG(D_NET, "gw %s %s\n", libcfs_nid2str(gw_nid),
- alive ? "up" : "down");
-
- if (!alive) {
- /* If the gateway crashed, close all open connections... */
- ksocknal_close_matching_conns(id, 0);
- return;
- }
-
- /* ...otherwise do nothing. We can only establish new connections
- * if we have autroutes, and these connect on demand. */
-}
-
-void
-ksocknal_query(lnet_ni_t *ni, lnet_nid_t nid, unsigned long *when)
-{
- int connect = 1;
- unsigned long last_alive = 0;
- unsigned long now = cfs_time_current();
- ksock_peer_t *peer = NULL;
- rwlock_t *glock = &ksocknal_data.ksnd_global_lock;
- lnet_process_id_t id = {.nid = nid, .pid = LUSTRE_SRV_LNET_PID};
-
- read_lock(glock);
-
- peer = ksocknal_find_peer_locked(ni, id);
- if (peer != NULL) {
- struct list_head *tmp;
- ksock_conn_t *conn;
- int bufnob;
-
- list_for_each(tmp, &peer->ksnp_conns) {
- conn = list_entry(tmp, ksock_conn_t, ksnc_list);
- bufnob = conn->ksnc_sock->sk->sk_wmem_queued;
-
- if (bufnob < conn->ksnc_tx_bufnob) {
- /* something got ACKed */
- conn->ksnc_tx_deadline =
- cfs_time_shift(*ksocknal_tunables.ksnd_timeout);
- peer->ksnp_last_alive = now;
- conn->ksnc_tx_bufnob = bufnob;
- }
- }
-
- last_alive = peer->ksnp_last_alive;
- if (ksocknal_find_connectable_route_locked(peer) == NULL)
- connect = 0;
- }
-
- read_unlock(glock);
-
- if (last_alive != 0)
- *when = last_alive;
-
- CDEBUG(D_NET, "Peer %s %p, alive %ld secs ago, connect %d\n",
- libcfs_nid2str(nid), peer,
- last_alive ? cfs_duration_sec(now - last_alive) : -1,
- connect);
-
- if (!connect)
- return;
-
- ksocknal_add_peer(ni, id, LNET_NIDADDR(nid), lnet_acceptor_port());
-
- write_lock_bh(glock);
-
- peer = ksocknal_find_peer_locked(ni, id);
- if (peer != NULL)
- ksocknal_launch_all_connections_locked(peer);
-
- write_unlock_bh(glock);
- return;
-}
-
-static void
-ksocknal_push_peer(ksock_peer_t *peer)
-{
- int index;
- int i;
- struct list_head *tmp;
- ksock_conn_t *conn;
-
- for (index = 0; ; index++) {
- read_lock(&ksocknal_data.ksnd_global_lock);
-
- i = 0;
- conn = NULL;
-
- list_for_each(tmp, &peer->ksnp_conns) {
- if (i++ == index) {
- conn = list_entry(tmp, ksock_conn_t,
- ksnc_list);
- ksocknal_conn_addref(conn);
- break;
- }
- }
-
- read_unlock(&ksocknal_data.ksnd_global_lock);
-
- if (conn == NULL)
- break;
-
- ksocknal_lib_push_conn(conn);
- ksocknal_conn_decref(conn);
- }
-}
-
-static int ksocknal_push(lnet_ni_t *ni, lnet_process_id_t id)
-{
- struct list_head *start;
- struct list_head *end;
- struct list_head *tmp;
- int rc = -ENOENT;
- unsigned int hsize = ksocknal_data.ksnd_peer_hash_size;
-
- if (id.nid == LNET_NID_ANY) {
- start = &ksocknal_data.ksnd_peers[0];
- end = &ksocknal_data.ksnd_peers[hsize - 1];
- } else {
- start = end = ksocknal_nid2peerlist(id.nid);
- }
-
- for (tmp = start; tmp <= end; tmp++) {
- int peer_off; /* searching offset in peer hash table */
-
- for (peer_off = 0; ; peer_off++) {
- ksock_peer_t *peer;
- int i = 0;
-
- read_lock(&ksocknal_data.ksnd_global_lock);
- list_for_each_entry(peer, tmp, ksnp_list) {
- if (!((id.nid == LNET_NID_ANY ||
- id.nid == peer->ksnp_id.nid) &&
- (id.pid == LNET_PID_ANY ||
- id.pid == peer->ksnp_id.pid)))
- continue;
-
- if (i++ == peer_off) {
- ksocknal_peer_addref(peer);
- break;
- }
- }
- read_unlock(&ksocknal_data.ksnd_global_lock);
-
- if (i == 0) /* no match */
- break;
-
- rc = 0;
- ksocknal_push_peer(peer);
- ksocknal_peer_decref(peer);
- }
- }
- return rc;
-}
-
-static int
-ksocknal_add_interface(lnet_ni_t *ni, __u32 ipaddress, __u32 netmask)
-{
- ksock_net_t *net = ni->ni_data;
- ksock_interface_t *iface;
- int rc;
- int i;
- int j;
- struct list_head *ptmp;
- ksock_peer_t *peer;
- struct list_head *rtmp;
- ksock_route_t *route;
-
- if (ipaddress == 0 ||
- netmask == 0)
- return -EINVAL;
-
- write_lock_bh(&ksocknal_data.ksnd_global_lock);
-
- iface = ksocknal_ip2iface(ni, ipaddress);
- if (iface != NULL) {
- /* silently ignore dups */
- rc = 0;
- } else if (net->ksnn_ninterfaces == LNET_MAX_INTERFACES) {
- rc = -ENOSPC;
- } else {
- iface = &net->ksnn_interfaces[net->ksnn_ninterfaces++];
-
- iface->ksni_ipaddr = ipaddress;
- iface->ksni_netmask = netmask;
- iface->ksni_nroutes = 0;
- iface->ksni_npeers = 0;
-
- for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) {
- list_for_each(ptmp, &ksocknal_data.ksnd_peers[i]) {
- peer = list_entry(ptmp, ksock_peer_t,
- ksnp_list);
-
- for (j = 0; j < peer->ksnp_n_passive_ips; j++)
- if (peer->ksnp_passive_ips[j] == ipaddress)
- iface->ksni_npeers++;
-
- list_for_each(rtmp, &peer->ksnp_routes) {
- route = list_entry(rtmp,
- ksock_route_t,
- ksnr_list);
-
- if (route->ksnr_myipaddr == ipaddress)
- iface->ksni_nroutes++;
- }
- }
- }
-
- rc = 0;
- /* NB only new connections will pay attention to the new interface! */
- }
-
- write_unlock_bh(&ksocknal_data.ksnd_global_lock);
-
- return rc;
-}
-
-static void
-ksocknal_peer_del_interface_locked(ksock_peer_t *peer, __u32 ipaddr)
-{
- struct list_head *tmp;
- struct list_head *nxt;
- ksock_route_t *route;
- ksock_conn_t *conn;
- int i;
- int j;
-
- for (i = 0; i < peer->ksnp_n_passive_ips; i++)
- if (peer->ksnp_passive_ips[i] == ipaddr) {
- for (j = i+1; j < peer->ksnp_n_passive_ips; j++)
- peer->ksnp_passive_ips[j-1] =
- peer->ksnp_passive_ips[j];
- peer->ksnp_n_passive_ips--;
- break;
- }
-
- list_for_each_safe(tmp, nxt, &peer->ksnp_routes) {
- route = list_entry(tmp, ksock_route_t, ksnr_list);
-
- if (route->ksnr_myipaddr != ipaddr)
- continue;
-
- if (route->ksnr_share_count != 0) {
- /* Manually created; keep, but unbind */
- route->ksnr_myipaddr = 0;
- } else {
- ksocknal_del_route_locked(route);
- }
- }
-
- list_for_each_safe(tmp, nxt, &peer->ksnp_conns) {
- conn = list_entry(tmp, ksock_conn_t, ksnc_list);
-
- if (conn->ksnc_myipaddr == ipaddr)
- ksocknal_close_conn_locked(conn, 0);
- }
-}
-
-static int
-ksocknal_del_interface(lnet_ni_t *ni, __u32 ipaddress)
-{
- ksock_net_t *net = ni->ni_data;
- int rc = -ENOENT;
- struct list_head *tmp;
- struct list_head *nxt;
- ksock_peer_t *peer;
- __u32 this_ip;
- int i;
- int j;
-
- write_lock_bh(&ksocknal_data.ksnd_global_lock);
-
- for (i = 0; i < net->ksnn_ninterfaces; i++) {
- this_ip = net->ksnn_interfaces[i].ksni_ipaddr;
-
- if (!(ipaddress == 0 ||
- ipaddress == this_ip))
- continue;
-
- rc = 0;
-
- for (j = i+1; j < net->ksnn_ninterfaces; j++)
- net->ksnn_interfaces[j-1] =
- net->ksnn_interfaces[j];
-
- net->ksnn_ninterfaces--;
-
- for (j = 0; j < ksocknal_data.ksnd_peer_hash_size; j++) {
- list_for_each_safe(tmp, nxt,
- &ksocknal_data.ksnd_peers[j]) {
- peer = list_entry(tmp, ksock_peer_t,
- ksnp_list);
-
- if (peer->ksnp_ni != ni)
- continue;
-
- ksocknal_peer_del_interface_locked(peer, this_ip);
- }
- }
- }
-
- write_unlock_bh(&ksocknal_data.ksnd_global_lock);
-
- return rc;
-}
-
-int
-ksocknal_ctl(lnet_ni_t *ni, unsigned int cmd, void *arg)
-{
- lnet_process_id_t id = {0};
- struct libcfs_ioctl_data *data = arg;
- int rc;
-
- switch (cmd) {
- case IOC_LIBCFS_GET_INTERFACE: {
- ksock_net_t *net = ni->ni_data;
- ksock_interface_t *iface;
-
- read_lock(&ksocknal_data.ksnd_global_lock);
-
- if (data->ioc_count >= (__u32)net->ksnn_ninterfaces) {
- rc = -ENOENT;
- } else {
- rc = 0;
- iface = &net->ksnn_interfaces[data->ioc_count];
-
- data->ioc_u32[0] = iface->ksni_ipaddr;
- data->ioc_u32[1] = iface->ksni_netmask;
- data->ioc_u32[2] = iface->ksni_npeers;
- data->ioc_u32[3] = iface->ksni_nroutes;
- }
-
- read_unlock(&ksocknal_data.ksnd_global_lock);
- return rc;
- }
-
- case IOC_LIBCFS_ADD_INTERFACE:
- return ksocknal_add_interface(ni,
- data->ioc_u32[0], /* IP address */
- data->ioc_u32[1]); /* net mask */
-
- case IOC_LIBCFS_DEL_INTERFACE:
- return ksocknal_del_interface(ni,
- data->ioc_u32[0]); /* IP address */
-
- case IOC_LIBCFS_GET_PEER: {
- __u32 myip = 0;
- __u32 ip = 0;
- int port = 0;
- int conn_count = 0;
- int share_count = 0;
-
- rc = ksocknal_get_peer_info(ni, data->ioc_count,
- &id, &myip, &ip, &port,
- &conn_count, &share_count);
- if (rc != 0)
- return rc;
-
- data->ioc_nid = id.nid;
- data->ioc_count = share_count;
- data->ioc_u32[0] = ip;
- data->ioc_u32[1] = port;
- data->ioc_u32[2] = myip;
- data->ioc_u32[3] = conn_count;
- data->ioc_u32[4] = id.pid;
- return 0;
- }
-
- case IOC_LIBCFS_ADD_PEER:
- id.nid = data->ioc_nid;
- id.pid = LUSTRE_SRV_LNET_PID;
- return ksocknal_add_peer(ni, id,
- data->ioc_u32[0], /* IP */
- data->ioc_u32[1]); /* port */
-
- case IOC_LIBCFS_DEL_PEER:
- id.nid = data->ioc_nid;
- id.pid = LNET_PID_ANY;
- return ksocknal_del_peer(ni, id,
- data->ioc_u32[0]); /* IP */
-
- case IOC_LIBCFS_GET_CONN: {
- int txmem;
- int rxmem;
- int nagle;
- ksock_conn_t *conn = ksocknal_get_conn_by_idx(ni, data->ioc_count);
-
- if (conn == NULL)
- return -ENOENT;
-
- ksocknal_lib_get_conn_tunables(conn, &txmem, &rxmem, &nagle);
-
- data->ioc_count = txmem;
- data->ioc_nid = conn->ksnc_peer->ksnp_id.nid;
- data->ioc_flags = nagle;
- data->ioc_u32[0] = conn->ksnc_ipaddr;
- data->ioc_u32[1] = conn->ksnc_port;
- data->ioc_u32[2] = conn->ksnc_myipaddr;
- data->ioc_u32[3] = conn->ksnc_type;
- data->ioc_u32[4] = conn->ksnc_scheduler->kss_info->ksi_cpt;
- data->ioc_u32[5] = rxmem;
- data->ioc_u32[6] = conn->ksnc_peer->ksnp_id.pid;
- ksocknal_conn_decref(conn);
- return 0;
- }
-
- case IOC_LIBCFS_CLOSE_CONNECTION:
- id.nid = data->ioc_nid;
- id.pid = LNET_PID_ANY;
- return ksocknal_close_matching_conns(id,
- data->ioc_u32[0]);
-
- case IOC_LIBCFS_REGISTER_MYNID:
- /* Ignore if this is a noop */
- if (data->ioc_nid == ni->ni_nid)
- return 0;
-
- CERROR("obsolete IOC_LIBCFS_REGISTER_MYNID: %s(%s)\n",
- libcfs_nid2str(data->ioc_nid),
- libcfs_nid2str(ni->ni_nid));
- return -EINVAL;
-
- case IOC_LIBCFS_PUSH_CONNECTION:
- id.nid = data->ioc_nid;
- id.pid = LNET_PID_ANY;
- return ksocknal_push(ni, id);
-
- default:
- return -EINVAL;
- }
- /* not reached */
-}
-
-static void
-ksocknal_free_buffers(void)
-{
- LASSERT(atomic_read(&ksocknal_data.ksnd_nactive_txs) == 0);
-
- if (ksocknal_data.ksnd_sched_info != NULL) {
- struct ksock_sched_info *info;
- int i;
-
- cfs_percpt_for_each(info, i, ksocknal_data.ksnd_sched_info) {
- if (info->ksi_scheds != NULL) {
- LIBCFS_FREE(info->ksi_scheds,
- info->ksi_nthreads_max *
- sizeof(info->ksi_scheds[0]));
- }
- }
- cfs_percpt_free(ksocknal_data.ksnd_sched_info);
- }
-
- LIBCFS_FREE(ksocknal_data.ksnd_peers,
- sizeof(struct list_head) *
- ksocknal_data.ksnd_peer_hash_size);
-
- spin_lock(&ksocknal_data.ksnd_tx_lock);
-
- if (!list_empty(&ksocknal_data.ksnd_idle_noop_txs)) {
- struct list_head zlist;
- ksock_tx_t *tx;
-
- list_add(&zlist, &ksocknal_data.ksnd_idle_noop_txs);
- list_del_init(&ksocknal_data.ksnd_idle_noop_txs);
- spin_unlock(&ksocknal_data.ksnd_tx_lock);
-
- while (!list_empty(&zlist)) {
- tx = list_entry(zlist.next, ksock_tx_t, tx_list);
- list_del(&tx->tx_list);
- LIBCFS_FREE(tx, tx->tx_desc_size);
- }
- } else {
- spin_unlock(&ksocknal_data.ksnd_tx_lock);
- }
-}
-
-static void
-ksocknal_base_shutdown(void)
-{
- struct ksock_sched_info *info;
- ksock_sched_t *sched;
- int i;
- int j;
-
- LASSERT(ksocknal_data.ksnd_nnets == 0);
-
- switch (ksocknal_data.ksnd_init) {
- default:
- LASSERT(0);
-
- case SOCKNAL_INIT_ALL:
- case SOCKNAL_INIT_DATA:
- LASSERT(ksocknal_data.ksnd_peers != NULL);
- for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) {
- LASSERT(list_empty(&ksocknal_data.ksnd_peers[i]));
- }
-
- LASSERT(list_empty(&ksocknal_data.ksnd_nets));
- LASSERT(list_empty(&ksocknal_data.ksnd_enomem_conns));
- LASSERT(list_empty(&ksocknal_data.ksnd_zombie_conns));
- LASSERT(list_empty(&ksocknal_data.ksnd_connd_connreqs));
- LASSERT(list_empty(&ksocknal_data.ksnd_connd_routes));
-
- if (ksocknal_data.ksnd_sched_info != NULL) {
- cfs_percpt_for_each(info, i,
- ksocknal_data.ksnd_sched_info) {
- if (info->ksi_scheds == NULL)
- continue;
-
- for (j = 0; j < info->ksi_nthreads_max; j++) {
-
- sched = &info->ksi_scheds[j];
- LASSERT(list_empty(
- &sched->kss_tx_conns));
- LASSERT(list_empty(
- &sched->kss_rx_conns));
- LASSERT(list_empty(
- &sched->kss_zombie_noop_txs));
- LASSERT(sched->kss_nconns == 0);
- }
- }
- }
-
- /* flag threads to terminate; wake and wait for them to die */
- ksocknal_data.ksnd_shuttingdown = 1;
- wake_up_all(&ksocknal_data.ksnd_connd_waitq);
- wake_up_all(&ksocknal_data.ksnd_reaper_waitq);
-
- if (ksocknal_data.ksnd_sched_info != NULL) {
- cfs_percpt_for_each(info, i,
- ksocknal_data.ksnd_sched_info) {
- if (info->ksi_scheds == NULL)
- continue;
-
- for (j = 0; j < info->ksi_nthreads_max; j++) {
- sched = &info->ksi_scheds[j];
- wake_up_all(&sched->kss_waitq);
- }
- }
- }
-
- i = 4;
- read_lock(&ksocknal_data.ksnd_global_lock);
- while (ksocknal_data.ksnd_nthreads != 0) {
- i++;
- CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, /* power of 2? */
- "waiting for %d threads to terminate\n",
- ksocknal_data.ksnd_nthreads);
- read_unlock(&ksocknal_data.ksnd_global_lock);
- set_current_state(TASK_UNINTERRUPTIBLE);
- schedule_timeout(cfs_time_seconds(1));
- read_lock(&ksocknal_data.ksnd_global_lock);
- }
- read_unlock(&ksocknal_data.ksnd_global_lock);
-
- ksocknal_free_buffers();
-
- ksocknal_data.ksnd_init = SOCKNAL_INIT_NOTHING;
- break;
- }
-
- module_put(THIS_MODULE);
-}
-
-static __u64
-ksocknal_new_incarnation(void)
-{
-
- /* The incarnation number is the time this module loaded and it
- * identifies this particular instance of the socknal.
- */
- return ktime_get_ns();
-}
-
-static int
-ksocknal_base_startup(void)
-{
- struct ksock_sched_info *info;
- int rc;
- int i;
-
- LASSERT(ksocknal_data.ksnd_init == SOCKNAL_INIT_NOTHING);
- LASSERT(ksocknal_data.ksnd_nnets == 0);
-
- memset(&ksocknal_data, 0, sizeof(ksocknal_data)); /* zero pointers */
-
- ksocknal_data.ksnd_peer_hash_size = SOCKNAL_PEER_HASH_SIZE;
- LIBCFS_ALLOC(ksocknal_data.ksnd_peers,
- sizeof(struct list_head) *
- ksocknal_data.ksnd_peer_hash_size);
- if (ksocknal_data.ksnd_peers == NULL)
- return -ENOMEM;
-
- for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++)
- INIT_LIST_HEAD(&ksocknal_data.ksnd_peers[i]);
-
- rwlock_init(&ksocknal_data.ksnd_global_lock);
- INIT_LIST_HEAD(&ksocknal_data.ksnd_nets);
-
- spin_lock_init(&ksocknal_data.ksnd_reaper_lock);
- INIT_LIST_HEAD(&ksocknal_data.ksnd_enomem_conns);
- INIT_LIST_HEAD(&ksocknal_data.ksnd_zombie_conns);
- INIT_LIST_HEAD(&ksocknal_data.ksnd_deathrow_conns);
- init_waitqueue_head(&ksocknal_data.ksnd_reaper_waitq);
-
- spin_lock_init(&ksocknal_data.ksnd_connd_lock);
- INIT_LIST_HEAD(&ksocknal_data.ksnd_connd_connreqs);
- INIT_LIST_HEAD(&ksocknal_data.ksnd_connd_routes);
- init_waitqueue_head(&ksocknal_data.ksnd_connd_waitq);
-
- spin_lock_init(&ksocknal_data.ksnd_tx_lock);
- INIT_LIST_HEAD(&ksocknal_data.ksnd_idle_noop_txs);
-
- /* NB memset above zeros whole of ksocknal_data */
-
- /* flag lists/ptrs/locks initialised */
- ksocknal_data.ksnd_init = SOCKNAL_INIT_DATA;
- try_module_get(THIS_MODULE);
-
- ksocknal_data.ksnd_sched_info = cfs_percpt_alloc(lnet_cpt_table(),
- sizeof(*info));
- if (ksocknal_data.ksnd_sched_info == NULL)
- goto failed;
-
- cfs_percpt_for_each(info, i, ksocknal_data.ksnd_sched_info) {
- ksock_sched_t *sched;
- int nthrs;
-
- nthrs = cfs_cpt_weight(lnet_cpt_table(), i);
- if (*ksocknal_tunables.ksnd_nscheds > 0) {
- nthrs = min(nthrs, *ksocknal_tunables.ksnd_nscheds);
- } else {
- /* max to half of CPUs, assume another half should be
- * reserved for upper layer modules */
- nthrs = min(max(SOCKNAL_NSCHEDS, nthrs >> 1), nthrs);
- }
-
- info->ksi_nthreads_max = nthrs;
- info->ksi_cpt = i;
-
- LIBCFS_CPT_ALLOC(info->ksi_scheds, lnet_cpt_table(), i,
- info->ksi_nthreads_max * sizeof(*sched));
- if (info->ksi_scheds == NULL)
- goto failed;
-
- for (; nthrs > 0; nthrs--) {
- sched = &info->ksi_scheds[nthrs - 1];
-
- sched->kss_info = info;
- spin_lock_init(&sched->kss_lock);
- INIT_LIST_HEAD(&sched->kss_rx_conns);
- INIT_LIST_HEAD(&sched->kss_tx_conns);
- INIT_LIST_HEAD(&sched->kss_zombie_noop_txs);
- init_waitqueue_head(&sched->kss_waitq);
- }
- }
-
- ksocknal_data.ksnd_connd_starting = 0;
- ksocknal_data.ksnd_connd_failed_stamp = 0;
- ksocknal_data.ksnd_connd_starting_stamp = ktime_get_real_seconds();
- /* must have at least 2 connds to remain responsive to accepts while
- * connecting */
- if (*ksocknal_tunables.ksnd_nconnds < SOCKNAL_CONND_RESV + 1)
- *ksocknal_tunables.ksnd_nconnds = SOCKNAL_CONND_RESV + 1;
-
- if (*ksocknal_tunables.ksnd_nconnds_max <
- *ksocknal_tunables.ksnd_nconnds) {
- ksocknal_tunables.ksnd_nconnds_max =
- ksocknal_tunables.ksnd_nconnds;
- }
-
- for (i = 0; i < *ksocknal_tunables.ksnd_nconnds; i++) {
- char name[16];
- spin_lock_bh(&ksocknal_data.ksnd_connd_lock);
- ksocknal_data.ksnd_connd_starting++;
- spin_unlock_bh(&ksocknal_data.ksnd_connd_lock);
-
-
- snprintf(name, sizeof(name), "socknal_cd%02d", i);
- rc = ksocknal_thread_start(ksocknal_connd,
- (void *)((ulong_ptr_t)i), name);
- if (rc != 0) {
- spin_lock_bh(&ksocknal_data.ksnd_connd_lock);
- ksocknal_data.ksnd_connd_starting--;
- spin_unlock_bh(&ksocknal_data.ksnd_connd_lock);
- CERROR("Can't spawn socknal connd: %d\n", rc);
- goto failed;
- }
- }
-
- rc = ksocknal_thread_start(ksocknal_reaper, NULL, "socknal_reaper");
- if (rc != 0) {
- CERROR("Can't spawn socknal reaper: %d\n", rc);
- goto failed;
- }
-
- /* flag everything initialised */
- ksocknal_data.ksnd_init = SOCKNAL_INIT_ALL;
-
- return 0;
-
- failed:
- ksocknal_base_shutdown();
- return -ENETDOWN;
-}
-
-static void
-ksocknal_debug_peerhash(lnet_ni_t *ni)
-{
- ksock_peer_t *peer = NULL;
- struct list_head *tmp;
- int i;
-
- read_lock(&ksocknal_data.ksnd_global_lock);
-
- for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) {
- list_for_each(tmp, &ksocknal_data.ksnd_peers[i]) {
- peer = list_entry(tmp, ksock_peer_t, ksnp_list);
-
- if (peer->ksnp_ni == ni)
- break;
-
- peer = NULL;
- }
- }
-
- if (peer != NULL) {
- ksock_route_t *route;
- ksock_conn_t *conn;
-
- CWARN("Active peer on shutdown: %s, ref %d, scnt %d, closing %d, accepting %d, err %d, zcookie %llu, txq %d, zc_req %d\n",
- libcfs_id2str(peer->ksnp_id),
- atomic_read(&peer->ksnp_refcount),
- peer->ksnp_sharecount, peer->ksnp_closing,
- peer->ksnp_accepting, peer->ksnp_error,
- peer->ksnp_zc_next_cookie,
- !list_empty(&peer->ksnp_tx_queue),
- !list_empty(&peer->ksnp_zc_req_list));
-
- list_for_each(tmp, &peer->ksnp_routes) {
- route = list_entry(tmp, ksock_route_t, ksnr_list);
- CWARN("Route: ref %d, schd %d, conn %d, cnted %d, del %d\n",
- atomic_read(&route->ksnr_refcount),
- route->ksnr_scheduled, route->ksnr_connecting,
- route->ksnr_connected, route->ksnr_deleted);
- }
-
- list_for_each(tmp, &peer->ksnp_conns) {
- conn = list_entry(tmp, ksock_conn_t, ksnc_list);
- CWARN("Conn: ref %d, sref %d, t %d, c %d\n",
- atomic_read(&conn->ksnc_conn_refcount),
- atomic_read(&conn->ksnc_sock_refcount),
- conn->ksnc_type, conn->ksnc_closing);
- }
- }
-
- read_unlock(&ksocknal_data.ksnd_global_lock);
- return;
-}
-
-void
-ksocknal_shutdown(lnet_ni_t *ni)
-{
- ksock_net_t *net = ni->ni_data;
- int i;
- lnet_process_id_t anyid = {0};
-
- anyid.nid = LNET_NID_ANY;
- anyid.pid = LNET_PID_ANY;
-
- LASSERT(ksocknal_data.ksnd_init == SOCKNAL_INIT_ALL);
- LASSERT(ksocknal_data.ksnd_nnets > 0);
-
- spin_lock_bh(&net->ksnn_lock);
- net->ksnn_shutdown = 1; /* prevent new peers */
- spin_unlock_bh(&net->ksnn_lock);
-
- /* Delete all peers */
- ksocknal_del_peer(ni, anyid, 0);
-
- /* Wait for all peer state to clean up */
- i = 2;
- spin_lock_bh(&net->ksnn_lock);
- while (net->ksnn_npeers != 0) {
- spin_unlock_bh(&net->ksnn_lock);
-
- i++;
- CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, /* power of 2? */
- "waiting for %d peers to disconnect\n",
- net->ksnn_npeers);
- set_current_state(TASK_UNINTERRUPTIBLE);
- schedule_timeout(cfs_time_seconds(1));
-
- ksocknal_debug_peerhash(ni);
-
- spin_lock_bh(&net->ksnn_lock);
- }
- spin_unlock_bh(&net->ksnn_lock);
-
- for (i = 0; i < net->ksnn_ninterfaces; i++) {
- LASSERT(net->ksnn_interfaces[i].ksni_npeers == 0);
- LASSERT(net->ksnn_interfaces[i].ksni_nroutes == 0);
- }
-
- list_del(&net->ksnn_list);
- LIBCFS_FREE(net, sizeof(*net));
-
- ksocknal_data.ksnd_nnets--;
- if (ksocknal_data.ksnd_nnets == 0)
- ksocknal_base_shutdown();
-}
-
-static int
-ksocknal_enumerate_interfaces(ksock_net_t *net)
-{
- char **names;
- int i;
- int j;
- int rc;
- int n;
-
- n = lnet_ipif_enumerate(&names);
- if (n <= 0) {
- CERROR("Can't enumerate interfaces: %d\n", n);
- return n;
- }
-
- for (i = j = 0; i < n; i++) {
- int up;
- __u32 ip;
- __u32 mask;
-
- if (!strcmp(names[i], "lo")) /* skip the loopback IF */
- continue;
-
- rc = lnet_ipif_query(names[i], &up, &ip, &mask);
- if (rc != 0) {
- CWARN("Can't get interface %s info: %d\n",
- names[i], rc);
- continue;
- }
-
- if (!up) {
- CWARN("Ignoring interface %s (down)\n",
- names[i]);
- continue;
- }
-
- if (j == LNET_MAX_INTERFACES) {
- CWARN("Ignoring interface %s (too many interfaces)\n",
- names[i]);
- continue;
- }
-
- net->ksnn_interfaces[j].ksni_ipaddr = ip;
- net->ksnn_interfaces[j].ksni_netmask = mask;
- strncpy(&net->ksnn_interfaces[j].ksni_name[0],
- names[i], IFNAMSIZ);
- j++;
- }
-
- lnet_ipif_free_enumeration(names, n);
-
- if (j == 0)
- CERROR("Can't find any usable interfaces\n");
-
- return j;
-}
-
-static int
-ksocknal_search_new_ipif(ksock_net_t *net)
-{
- int new_ipif = 0;
- int i;
-
- for (i = 0; i < net->ksnn_ninterfaces; i++) {
- char *ifnam = &net->ksnn_interfaces[i].ksni_name[0];
- char *colon = strchr(ifnam, ':');
- int found = 0;
- ksock_net_t *tmp;
- int j;
-
- if (colon != NULL) /* ignore alias device */
- *colon = 0;
-
- list_for_each_entry(tmp, &ksocknal_data.ksnd_nets,
- ksnn_list) {
- for (j = 0; !found && j < tmp->ksnn_ninterfaces; j++) {
- char *ifnam2 =
- &tmp->ksnn_interfaces[j].ksni_name[0];
- char *colon2 = strchr(ifnam2, ':');
-
- if (colon2 != NULL)
- *colon2 = 0;
-
- found = strcmp(ifnam, ifnam2) == 0;
- if (colon2 != NULL)
- *colon2 = ':';
- }
- if (found)
- break;
- }
-
- new_ipif += !found;
- if (colon != NULL)
- *colon = ':';
- }
-
- return new_ipif;
-}
-
-static int
-ksocknal_start_schedulers(struct ksock_sched_info *info)
-{
- int nthrs;
- int rc = 0;
- int i;
-
- if (info->ksi_nthreads == 0) {
- if (*ksocknal_tunables.ksnd_nscheds > 0) {
- nthrs = info->ksi_nthreads_max;
- } else {
- nthrs = cfs_cpt_weight(lnet_cpt_table(),
- info->ksi_cpt);
- nthrs = min(max(SOCKNAL_NSCHEDS, nthrs >> 1), nthrs);
- nthrs = min(SOCKNAL_NSCHEDS_HIGH, nthrs);
- }
- nthrs = min(nthrs, info->ksi_nthreads_max);
- } else {
- LASSERT(info->ksi_nthreads <= info->ksi_nthreads_max);
- /* increase two threads if there is new interface */
- nthrs = min(2, info->ksi_nthreads_max - info->ksi_nthreads);
- }
-
- for (i = 0; i < nthrs; i++) {
- long id;
- char name[20];
- ksock_sched_t *sched;
- id = KSOCK_THREAD_ID(info->ksi_cpt, info->ksi_nthreads + i);
- sched = &info->ksi_scheds[KSOCK_THREAD_SID(id)];
- snprintf(name, sizeof(name), "socknal_sd%02d_%02d",
- info->ksi_cpt, (int)(sched - &info->ksi_scheds[0]));
-
- rc = ksocknal_thread_start(ksocknal_scheduler,
- (void *)id, name);
- if (rc == 0)
- continue;
-
- CERROR("Can't spawn thread %d for scheduler[%d]: %d\n",
- info->ksi_cpt, info->ksi_nthreads + i, rc);
- break;
- }
-
- info->ksi_nthreads += i;
- return rc;
-}
-
-static int
-ksocknal_net_start_threads(ksock_net_t *net, __u32 *cpts, int ncpts)
-{
- int newif = ksocknal_search_new_ipif(net);
- int rc;
- int i;
-
- LASSERT(ncpts > 0 && ncpts <= cfs_cpt_number(lnet_cpt_table()));
-
- for (i = 0; i < ncpts; i++) {
- struct ksock_sched_info *info;
- int cpt = (cpts == NULL) ? i : cpts[i];
-
- LASSERT(cpt < cfs_cpt_number(lnet_cpt_table()));
- info = ksocknal_data.ksnd_sched_info[cpt];
-
- if (!newif && info->ksi_nthreads > 0)
- continue;
-
- rc = ksocknal_start_schedulers(info);
- if (rc != 0)
- return rc;
- }
- return 0;
-}
-
-int
-ksocknal_startup(lnet_ni_t *ni)
-{
- ksock_net_t *net;
- int rc;
- int i;
-
- LASSERT(ni->ni_lnd == &the_ksocklnd);
-
- if (ksocknal_data.ksnd_init == SOCKNAL_INIT_NOTHING) {
- rc = ksocknal_base_startup();
- if (rc != 0)
- return rc;
- }
-
- LIBCFS_ALLOC(net, sizeof(*net));
- if (net == NULL)
- goto fail_0;
-
- spin_lock_init(&net->ksnn_lock);
- net->ksnn_incarnation = ksocknal_new_incarnation();
- ni->ni_data = net;
- ni->ni_peertimeout = *ksocknal_tunables.ksnd_peertimeout;
- ni->ni_maxtxcredits = *ksocknal_tunables.ksnd_credits;
- ni->ni_peertxcredits = *ksocknal_tunables.ksnd_peertxcredits;
- ni->ni_peerrtrcredits = *ksocknal_tunables.ksnd_peerrtrcredits;
-
- if (ni->ni_interfaces[0] == NULL) {
- rc = ksocknal_enumerate_interfaces(net);
- if (rc <= 0)
- goto fail_1;
-
- net->ksnn_ninterfaces = 1;
- } else {
- for (i = 0; i < LNET_MAX_INTERFACES; i++) {
- int up;
-
- if (ni->ni_interfaces[i] == NULL)
- break;
-
- rc = lnet_ipif_query(ni->ni_interfaces[i], &up,
- &net->ksnn_interfaces[i].ksni_ipaddr,
- &net->ksnn_interfaces[i].ksni_netmask);
-
- if (rc != 0) {
- CERROR("Can't get interface %s info: %d\n",
- ni->ni_interfaces[i], rc);
- goto fail_1;
- }
-
- if (!up) {
- CERROR("Interface %s is down\n",
- ni->ni_interfaces[i]);
- goto fail_1;
- }
-
- strncpy(&net->ksnn_interfaces[i].ksni_name[0],
- ni->ni_interfaces[i], IFNAMSIZ);
- }
- net->ksnn_ninterfaces = i;
- }
-
- /* call it before add it to ksocknal_data.ksnd_nets */
- rc = ksocknal_net_start_threads(net, ni->ni_cpts, ni->ni_ncpts);
- if (rc != 0)
- goto fail_1;
-
- ni->ni_nid = LNET_MKNID(LNET_NIDNET(ni->ni_nid),
- net->ksnn_interfaces[0].ksni_ipaddr);
- list_add(&net->ksnn_list, &ksocknal_data.ksnd_nets);
-
- ksocknal_data.ksnd_nnets++;
-
- return 0;
-
- fail_1:
- LIBCFS_FREE(net, sizeof(*net));
- fail_0:
- if (ksocknal_data.ksnd_nnets == 0)
- ksocknal_base_shutdown();
-
- return -ENETDOWN;
-}
-
-
-static void __exit
-ksocknal_module_fini(void)
-{
- lnet_unregister_lnd(&the_ksocklnd);
-}
-
-static int __init
-ksocknal_module_init(void)
-{
- int rc;
-
- /* check ksnr_connected/connecting field large enough */
- CLASSERT(SOCKLND_CONN_NTYPES <= 4);
- CLASSERT(SOCKLND_CONN_ACK == SOCKLND_CONN_BULK_IN);
-
- /* initialize the_ksocklnd */
- the_ksocklnd.lnd_type = SOCKLND;
- the_ksocklnd.lnd_startup = ksocknal_startup;
- the_ksocklnd.lnd_shutdown = ksocknal_shutdown;
- the_ksocklnd.lnd_ctl = ksocknal_ctl;
- the_ksocklnd.lnd_send = ksocknal_send;
- the_ksocklnd.lnd_recv = ksocknal_recv;
- the_ksocklnd.lnd_notify = ksocknal_notify;
- the_ksocklnd.lnd_query = ksocknal_query;
- the_ksocklnd.lnd_accept = ksocknal_accept;
-
- rc = ksocknal_tunables_init();
- if (rc != 0)
- return rc;
-
- lnet_register_lnd(&the_ksocklnd);
-
- return 0;
-}
-
-MODULE_AUTHOR("Sun Microsystems, Inc. <http://www.lustre.org/>");
-MODULE_DESCRIPTION("Kernel TCP Socket LND v3.0.0");
-MODULE_LICENSE("GPL");
-MODULE_VERSION("3.0.0");
-
-module_init(ksocknal_module_init);
-module_exit(ksocknal_module_fini);
diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.h b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.h
deleted file mode 100644
index b349847f9cf9..000000000000
--- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.h
+++ /dev/null
@@ -1,689 +0,0 @@
-/*
- * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
- *
- * Copyright (c) 2011, 2012, Intel Corporation.
- *
- * Author: Zach Brown <zab@zabbo.net>
- * Author: Peter J. Braam <braam@clusterfs.com>
- * Author: Phil Schwan <phil@clusterfs.com>
- * Author: Eric Barton <eric@bartonsoftware.com>
- *
- * This file is part of Lustre, http://www.lustre.org
- *
- * Portals is free software; you can redistribute it and/or
- * modify it under the terms of version 2 of the GNU General Public
- * License as published by the Free Software Foundation.
- *
- * Portals is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with Portals; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- */
-
-#ifndef _SOCKLND_SOCKLND_H_
-#define _SOCKLND_SOCKLND_H_
-
-#define DEBUG_PORTAL_ALLOC
-#define DEBUG_SUBSYSTEM S_LND
-
-#include <linux/crc32.h>
-#include <linux/errno.h>
-#include <linux/if.h>
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/kmod.h>
-#include <linux/list.h>
-#include <linux/mm.h>
-#include <linux/module.h>
-#include <linux/stat.h>
-#include <linux/string.h>
-#include <linux/syscalls.h>
-#include <linux/sysctl.h>
-#include <linux/uio.h>
-#include <linux/unistd.h>
-#include <asm/irq.h>
-#include <net/sock.h>
-#include <net/tcp.h>
-
-#include "../../../include/linux/libcfs/libcfs.h"
-#include "../../../include/linux/lnet/lnet.h"
-#include "../../../include/linux/lnet/lib-lnet.h"
-#include "../../../include/linux/lnet/socklnd.h"
-
-/* assume one thread for each connection type */
-#define SOCKNAL_NSCHEDS 3
-#define SOCKNAL_NSCHEDS_HIGH (SOCKNAL_NSCHEDS << 1)
-
-#define SOCKNAL_PEER_HASH_SIZE 101 /* # peer lists */
-#define SOCKNAL_RESCHED 100 /* # scheduler loops before reschedule */
-#define SOCKNAL_INSANITY_RECONN 5000 /* connd is trying on reconn infinitely */
-#define SOCKNAL_ENOMEM_RETRY CFS_TICK /* jiffies between retries */
-
-#define SOCKNAL_SINGLE_FRAG_TX 0 /* disable multi-fragment sends */
-#define SOCKNAL_SINGLE_FRAG_RX 0 /* disable multi-fragment receives */
-
-#define SOCKNAL_VERSION_DEBUG 0 /* enable protocol version debugging */
-
-/* risk kmap deadlock on multi-frag I/O (backs off to single-frag if disabled).
- * no risk if we're not running on a CONFIG_HIGHMEM platform. */
-#ifdef CONFIG_HIGHMEM
-# define SOCKNAL_RISK_KMAP_DEADLOCK 0
-#else
-# define SOCKNAL_RISK_KMAP_DEADLOCK 1
-#endif
-
-struct ksock_sched_info;
-
-typedef struct /* per scheduler state */
-{
- spinlock_t kss_lock; /* serialise */
- struct list_head kss_rx_conns; /* conn waiting to be read */
- struct list_head kss_tx_conns; /* conn waiting to be written */
- struct list_head kss_zombie_noop_txs; /* zombie noop tx list */
- wait_queue_head_t kss_waitq; /* where scheduler sleeps */
- int kss_nconns; /* # connections assigned to
- * this scheduler */
- struct ksock_sched_info *kss_info; /* owner of it */
- struct page *kss_rx_scratch_pgs[LNET_MAX_IOV];
- struct kvec kss_scratch_iov[LNET_MAX_IOV];
-} ksock_sched_t;
-
-struct ksock_sched_info {
- int ksi_nthreads_max; /* max allowed threads */
- int ksi_nthreads; /* number of threads */
- int ksi_cpt; /* CPT id */
- ksock_sched_t *ksi_scheds; /* array of schedulers */
-};
-
-#define KSOCK_CPT_SHIFT 16
-#define KSOCK_THREAD_ID(cpt, sid) (((cpt) << KSOCK_CPT_SHIFT) | (sid))
-#define KSOCK_THREAD_CPT(id) ((id) >> KSOCK_CPT_SHIFT)
-#define KSOCK_THREAD_SID(id) ((id) & ((1UL << KSOCK_CPT_SHIFT) - 1))
-
-typedef struct /* in-use interface */
-{
- __u32 ksni_ipaddr; /* interface's IP address */
- __u32 ksni_netmask; /* interface's network mask */
- int ksni_nroutes; /* # routes using (active) */
- int ksni_npeers; /* # peers using (passive) */
- char ksni_name[IFNAMSIZ]; /* interface name */
-} ksock_interface_t;
-
-typedef struct {
- int *ksnd_timeout; /* "stuck" socket timeout
- * (seconds) */
- int *ksnd_nscheds; /* # scheduler threads in each
- * pool while starting */
- int *ksnd_nconnds; /* # connection daemons */
- int *ksnd_nconnds_max; /* max # connection daemons */
- int *ksnd_min_reconnectms; /* first connection retry after
- * (ms)... */
- int *ksnd_max_reconnectms; /* ...exponentially increasing to
- * this */
- int *ksnd_eager_ack; /* make TCP ack eagerly? */
- int *ksnd_typed_conns; /* drive sockets by type? */
- int *ksnd_min_bulk; /* smallest "large" message */
- int *ksnd_tx_buffer_size; /* socket tx buffer size */
- int *ksnd_rx_buffer_size; /* socket rx buffer size */
- int *ksnd_nagle; /* enable NAGLE? */
- int *ksnd_round_robin; /* round robin for multiple
- * interfaces */
- int *ksnd_keepalive; /* # secs for sending keepalive
- * NOOP */
- int *ksnd_keepalive_idle; /* # idle secs before 1st probe
- */
- int *ksnd_keepalive_count; /* # probes */
- int *ksnd_keepalive_intvl; /* time between probes */
- int *ksnd_credits; /* # concurrent sends */
- int *ksnd_peertxcredits; /* # concurrent sends to 1 peer
- */
- int *ksnd_peerrtrcredits; /* # per-peer router buffer
- * credits */
- int *ksnd_peertimeout; /* seconds to consider peer dead
- */
- int *ksnd_enable_csum; /* enable check sum */
- int *ksnd_inject_csum_error; /* set non-zero to inject
- * checksum error */
- int *ksnd_nonblk_zcack; /* always send zc-ack on
- * non-blocking connection */
- unsigned int *ksnd_zc_min_payload; /* minimum zero copy payload
- * size */
- int *ksnd_zc_recv; /* enable ZC receive (for
- * Chelsio TOE) */
- int *ksnd_zc_recv_min_nfrags; /* minimum # of fragments to
- * enable ZC receive */
-} ksock_tunables_t;
-
-typedef struct {
- __u64 ksnn_incarnation; /* my epoch */
- spinlock_t ksnn_lock; /* serialise */
- struct list_head ksnn_list; /* chain on global list */
- int ksnn_npeers; /* # peers */
- int ksnn_shutdown; /* shutting down? */
- int ksnn_ninterfaces; /* IP interfaces */
- ksock_interface_t ksnn_interfaces[LNET_MAX_INTERFACES];
-} ksock_net_t;
-
-/** connd timeout */
-#define SOCKNAL_CONND_TIMEOUT 120
-/** reserved thread for accepting & creating new connd */
-#define SOCKNAL_CONND_RESV 1
-
-typedef struct {
- int ksnd_init; /* initialisation state
- */
- int ksnd_nnets; /* # networks set up */
- struct list_head ksnd_nets; /* list of nets */
- rwlock_t ksnd_global_lock; /* stabilize peer/conn
- * ops */
- struct list_head *ksnd_peers; /* hash table of all my
- * known peers */
- int ksnd_peer_hash_size; /* size of ksnd_peers */
-
- int ksnd_nthreads; /* # live threads */
- int ksnd_shuttingdown; /* tell threads to exit
- */
- struct ksock_sched_info **ksnd_sched_info; /* schedulers info */
-
- atomic_t ksnd_nactive_txs; /* #active txs */
-
- struct list_head ksnd_deathrow_conns; /* conns to close:
- * reaper_lock*/
- struct list_head ksnd_zombie_conns; /* conns to free:
- * reaper_lock */
- struct list_head ksnd_enomem_conns; /* conns to retry:
- * reaper_lock*/
- wait_queue_head_t ksnd_reaper_waitq; /* reaper sleeps here */
- unsigned long ksnd_reaper_waketime; /* when reaper will wake
- */
- spinlock_t ksnd_reaper_lock; /* serialise */
-
- int ksnd_enomem_tx; /* test ENOMEM sender */
- int ksnd_stall_tx; /* test sluggish sender
- */
- int ksnd_stall_rx; /* test sluggish
- * receiver */
-
- struct list_head ksnd_connd_connreqs; /* incoming connection
- * requests */
- struct list_head ksnd_connd_routes; /* routes waiting to be
- * connected */
- wait_queue_head_t ksnd_connd_waitq; /* connds sleep here */
- int ksnd_connd_connecting; /* # connds connecting
- */
- time64_t ksnd_connd_failed_stamp;/* time stamp of the
- * last failed
- * connecting attempt */
- time64_t ksnd_connd_starting_stamp;/* time stamp of the
- * last starting connd
- */
- unsigned ksnd_connd_starting; /* # starting connd */
- unsigned ksnd_connd_running; /* # running connd */
- spinlock_t ksnd_connd_lock; /* serialise */
-
- struct list_head ksnd_idle_noop_txs; /* list head for freed
- * noop tx */
- spinlock_t ksnd_tx_lock; /* serialise, g_lock
- * unsafe */
-
-} ksock_nal_data_t;
-
-#define SOCKNAL_INIT_NOTHING 0
-#define SOCKNAL_INIT_DATA 1
-#define SOCKNAL_INIT_ALL 2
-
-/* A packet just assembled for transmission is represented by 1 or more
- * struct iovec fragments (the first frag contains the portals header),
- * followed by 0 or more lnet_kiov_t fragments.
- *
- * On the receive side, initially 1 struct iovec fragment is posted for
- * receive (the header). Once the header has been received, the payload is
- * received into either struct iovec or lnet_kiov_t fragments, depending on
- * what the header matched or whether the message needs forwarding. */
-
-struct ksock_conn; /* forward ref */
-struct ksock_peer; /* forward ref */
-struct ksock_route; /* forward ref */
-struct ksock_proto; /* forward ref */
-
-typedef struct /* transmit packet */
-{
- struct list_head tx_list; /* queue on conn for transmission etc
- */
- struct list_head tx_zc_list; /* queue on peer for ZC request */
- atomic_t tx_refcount; /* tx reference count */
- int tx_nob; /* # packet bytes */
- int tx_resid; /* residual bytes */
- int tx_niov; /* # packet iovec frags */
- struct kvec *tx_iov; /* packet iovec frags */
- int tx_nkiov; /* # packet page frags */
- unsigned short tx_zc_aborted; /* aborted ZC request */
- unsigned short tx_zc_capable:1; /* payload is large enough for ZC */
- unsigned short tx_zc_checked:1; /* Have I checked if I should ZC? */
- unsigned short tx_nonblk:1; /* it's a non-blocking ACK */
- lnet_kiov_t *tx_kiov; /* packet page frags */
- struct ksock_conn *tx_conn; /* owning conn */
- lnet_msg_t *tx_lnetmsg; /* lnet message for lnet_finalize()
- */
- unsigned long tx_deadline; /* when (in jiffies) tx times out */
- ksock_msg_t tx_msg; /* socklnd message buffer */
- int tx_desc_size; /* size of this descriptor */
- union {
- struct {
- struct kvec iov; /* virt hdr */
- lnet_kiov_t kiov[0]; /* paged payload */
- } paged;
- struct {
- struct kvec iov[1]; /* virt hdr + payload */
- } virt;
- } tx_frags;
-} ksock_tx_t;
-
-#define KSOCK_NOOP_TX_SIZE ((int)offsetof(ksock_tx_t, tx_frags.paged.kiov[0]))
-
-/* network zero copy callback descriptor embedded in ksock_tx_t */
-
-/* space for the rx frag descriptors; we either read a single contiguous
- * header, or up to LNET_MAX_IOV frags of payload of either type. */
-typedef union {
- struct kvec iov[LNET_MAX_IOV];
- lnet_kiov_t kiov[LNET_MAX_IOV];
-} ksock_rxiovspace_t;
-
-#define SOCKNAL_RX_KSM_HEADER 1 /* reading ksock message header */
-#define SOCKNAL_RX_LNET_HEADER 2 /* reading lnet message header */
-#define SOCKNAL_RX_PARSE 3 /* Calling lnet_parse() */
-#define SOCKNAL_RX_PARSE_WAIT 4 /* waiting to be told to read the body */
-#define SOCKNAL_RX_LNET_PAYLOAD 5 /* reading lnet payload (to deliver here) */
-#define SOCKNAL_RX_SLOP 6 /* skipping body */
-
-typedef struct ksock_conn {
- struct ksock_peer *ksnc_peer; /* owning peer */
- struct ksock_route *ksnc_route; /* owning route */
- struct list_head ksnc_list; /* stash on peer's conn list */
- struct socket *ksnc_sock; /* actual socket */
- void *ksnc_saved_data_ready; /* socket's original
- * data_ready() callback */
- void *ksnc_saved_write_space; /* socket's original
- * write_space() callback */
- atomic_t ksnc_conn_refcount;/* conn refcount */
- atomic_t ksnc_sock_refcount;/* sock refcount */
- ksock_sched_t *ksnc_scheduler; /* who schedules this connection
- */
- __u32 ksnc_myipaddr; /* my IP */
- __u32 ksnc_ipaddr; /* peer's IP */
- int ksnc_port; /* peer's port */
- signed int ksnc_type:3; /* type of connection, should be
- * signed value */
- unsigned int ksnc_closing:1; /* being shut down */
- unsigned int ksnc_flip:1; /* flip or not, only for V2.x */
- unsigned int ksnc_zc_capable:1; /* enable to ZC */
- struct ksock_proto *ksnc_proto; /* protocol for the connection */
-
- /* reader */
- struct list_head ksnc_rx_list; /* where I enq waiting input or a
- * forwarding descriptor */
- unsigned long ksnc_rx_deadline; /* when (in jiffies) receive times
- * out */
- __u8 ksnc_rx_started; /* started receiving a message */
- __u8 ksnc_rx_ready; /* data ready to read */
- __u8 ksnc_rx_scheduled; /* being progressed */
- __u8 ksnc_rx_state; /* what is being read */
- int ksnc_rx_nob_left; /* # bytes to next hdr/body */
- int ksnc_rx_nob_wanted;/* bytes actually wanted */
- int ksnc_rx_niov; /* # iovec frags */
- struct kvec *ksnc_rx_iov; /* the iovec frags */
- int ksnc_rx_nkiov; /* # page frags */
- lnet_kiov_t *ksnc_rx_kiov; /* the page frags */
- ksock_rxiovspace_t ksnc_rx_iov_space; /* space for frag descriptors */
- __u32 ksnc_rx_csum; /* partial checksum for incoming
- * data */
- void *ksnc_cookie; /* rx lnet_finalize passthru arg
- */
- ksock_msg_t ksnc_msg; /* incoming message buffer:
- * V2.x message takes the
- * whole struct
- * V1.x message is a bare
- * lnet_hdr_t, it's stored in
- * ksnc_msg.ksm_u.lnetmsg */
-
- /* WRITER */
- struct list_head ksnc_tx_list; /* where I enq waiting for output
- * space */
- struct list_head ksnc_tx_queue; /* packets waiting to be sent */
- ksock_tx_t *ksnc_tx_carrier; /* next TX that can carry a LNet
- * message or ZC-ACK */
- unsigned long ksnc_tx_deadline; /* when (in jiffies) tx times out
- */
- int ksnc_tx_bufnob; /* send buffer marker */
- atomic_t ksnc_tx_nob; /* # bytes queued */
- int ksnc_tx_ready; /* write space */
- int ksnc_tx_scheduled; /* being progressed */
- unsigned long ksnc_tx_last_post; /* time stamp of the last posted
- * TX */
-} ksock_conn_t;
-
-typedef struct ksock_route {
- struct list_head ksnr_list; /* chain on peer route list */
- struct list_head ksnr_connd_list; /* chain on ksnr_connd_routes */
- struct ksock_peer *ksnr_peer; /* owning peer */
- atomic_t ksnr_refcount; /* # users */
- unsigned long ksnr_timeout; /* when (in jiffies) reconnection
- * can happen next */
- long ksnr_retry_interval; /* how long between retries */
- __u32 ksnr_myipaddr; /* my IP */
- __u32 ksnr_ipaddr; /* IP address to connect to */
- int ksnr_port; /* port to connect to */
- unsigned int ksnr_scheduled:1; /* scheduled for attention */
- unsigned int ksnr_connecting:1; /* connection establishment in
- * progress */
- unsigned int ksnr_connected:4; /* connections established by
- * type */
- unsigned int ksnr_deleted:1; /* been removed from peer? */
- unsigned int ksnr_share_count; /* created explicitly? */
- int ksnr_conn_count; /* # conns established by this
- * route */
-} ksock_route_t;
-
-#define SOCKNAL_KEEPALIVE_PING 1 /* cookie for keepalive ping */
-
-typedef struct ksock_peer {
- struct list_head ksnp_list; /* stash on global peer list */
- unsigned long ksnp_last_alive; /* when (in jiffies) I was last
- * alive */
- lnet_process_id_t ksnp_id; /* who's on the other end(s) */
- atomic_t ksnp_refcount; /* # users */
- int ksnp_sharecount; /* lconf usage counter */
- int ksnp_closing; /* being closed */
- int ksnp_accepting; /* # passive connections pending
- */
- int ksnp_error; /* errno on closing last conn */
- __u64 ksnp_zc_next_cookie; /* ZC completion cookie */
- __u64 ksnp_incarnation; /* latest known peer incarnation
- */
- struct ksock_proto *ksnp_proto; /* latest known peer protocol */
- struct list_head ksnp_conns; /* all active connections */
- struct list_head ksnp_routes; /* routes */
- struct list_head ksnp_tx_queue; /* waiting packets */
- spinlock_t ksnp_lock; /* serialize, g_lock unsafe */
- struct list_head ksnp_zc_req_list; /* zero copy requests wait for
- * ACK */
- unsigned long ksnp_send_keepalive; /* time to send keepalive */
- lnet_ni_t *ksnp_ni; /* which network */
- int ksnp_n_passive_ips; /* # of... */
-
- /* preferred local interfaces */
- __u32 ksnp_passive_ips[LNET_MAX_INTERFACES];
-} ksock_peer_t;
-
-typedef struct ksock_connreq {
- struct list_head ksncr_list; /* stash on ksnd_connd_connreqs */
- lnet_ni_t *ksncr_ni; /* chosen NI */
- struct socket *ksncr_sock; /* accepted socket */
-} ksock_connreq_t;
-
-extern ksock_nal_data_t ksocknal_data;
-extern ksock_tunables_t ksocknal_tunables;
-
-#define SOCKNAL_MATCH_NO 0 /* TX can't match type of connection */
-#define SOCKNAL_MATCH_YES 1 /* TX matches type of connection */
-#define SOCKNAL_MATCH_MAY 2 /* TX can be sent on the connection, but not
- * preferred */
-
-typedef struct ksock_proto {
- /* version number of protocol */
- int pro_version;
-
- /* handshake function */
- int (*pro_send_hello)(ksock_conn_t *, ksock_hello_msg_t *);
-
- /* handshake function */
- int (*pro_recv_hello)(ksock_conn_t *, ksock_hello_msg_t *, int);
-
- /* message pack */
- void (*pro_pack)(ksock_tx_t *);
-
- /* message unpack */
- void (*pro_unpack)(ksock_msg_t *);
-
- /* queue tx on the connection */
- ksock_tx_t *(*pro_queue_tx_msg)(ksock_conn_t *, ksock_tx_t *);
-
- /* queue ZC ack on the connection */
- int (*pro_queue_tx_zcack)(ksock_conn_t *, ksock_tx_t *, __u64);
-
- /* handle ZC request */
- int (*pro_handle_zcreq)(ksock_conn_t *, __u64, int);
-
- /* handle ZC ACK */
- int (*pro_handle_zcack)(ksock_conn_t *, __u64, __u64);
-
- /* msg type matches the connection type:
- * return value:
- * return MATCH_NO : no
- * return MATCH_YES : matching type
- * return MATCH_MAY : can be backup */
- int (*pro_match_tx)(ksock_conn_t *, ksock_tx_t *, int);
-} ksock_proto_t;
-
-extern ksock_proto_t ksocknal_protocol_v1x;
-extern ksock_proto_t ksocknal_protocol_v2x;
-extern ksock_proto_t ksocknal_protocol_v3x;
-
-#define KSOCK_PROTO_V1_MAJOR LNET_PROTO_TCP_VERSION_MAJOR
-#define KSOCK_PROTO_V1_MINOR LNET_PROTO_TCP_VERSION_MINOR
-#define KSOCK_PROTO_V1 KSOCK_PROTO_V1_MAJOR
-
-#ifndef CPU_MASK_NONE
-#define CPU_MASK_NONE 0UL
-#endif
-
-static inline __u32 ksocknal_csum(__u32 crc, unsigned char const *p, size_t len)
-{
-#if 1
- return crc32_le(crc, p, len);
-#else
- while (len-- > 0)
- crc = ((crc + 0x100) & ~0xff) | ((crc + *p++) & 0xff) ;
- return crc;
-#endif
-}
-
-static inline int
-ksocknal_route_mask(void)
-{
- if (!*ksocknal_tunables.ksnd_typed_conns)
- return (1 << SOCKLND_CONN_ANY);
-
- return ((1 << SOCKLND_CONN_CONTROL) |
- (1 << SOCKLND_CONN_BULK_IN) |
- (1 << SOCKLND_CONN_BULK_OUT));
-}
-
-static inline struct list_head *
-ksocknal_nid2peerlist(lnet_nid_t nid)
-{
- unsigned int hash = ((unsigned int)nid) % ksocknal_data.ksnd_peer_hash_size;
-
- return &ksocknal_data.ksnd_peers[hash];
-}
-
-static inline void
-ksocknal_conn_addref(ksock_conn_t *conn)
-{
- LASSERT(atomic_read(&conn->ksnc_conn_refcount) > 0);
- atomic_inc(&conn->ksnc_conn_refcount);
-}
-
-void ksocknal_queue_zombie_conn(ksock_conn_t *conn);
-void ksocknal_finalize_zcreq(ksock_conn_t *conn);
-
-static inline void
-ksocknal_conn_decref(ksock_conn_t *conn)
-{
- LASSERT(atomic_read(&conn->ksnc_conn_refcount) > 0);
- if (atomic_dec_and_test(&conn->ksnc_conn_refcount))
- ksocknal_queue_zombie_conn(conn);
-}
-
-static inline int
-ksocknal_connsock_addref(ksock_conn_t *conn)
-{
- int rc = -ESHUTDOWN;
-
- read_lock(&ksocknal_data.ksnd_global_lock);
- if (!conn->ksnc_closing) {
- LASSERT(atomic_read(&conn->ksnc_sock_refcount) > 0);
- atomic_inc(&conn->ksnc_sock_refcount);
- rc = 0;
- }
- read_unlock(&ksocknal_data.ksnd_global_lock);
-
- return rc;
-}
-
-static inline void
-ksocknal_connsock_decref(ksock_conn_t *conn)
-{
- LASSERT(atomic_read(&conn->ksnc_sock_refcount) > 0);
- if (atomic_dec_and_test(&conn->ksnc_sock_refcount)) {
- LASSERT(conn->ksnc_closing);
- sock_release(conn->ksnc_sock);
- conn->ksnc_sock = NULL;
- ksocknal_finalize_zcreq(conn);
- }
-}
-
-static inline void
-ksocknal_tx_addref(ksock_tx_t *tx)
-{
- LASSERT(atomic_read(&tx->tx_refcount) > 0);
- atomic_inc(&tx->tx_refcount);
-}
-
-void ksocknal_tx_prep(ksock_conn_t *, ksock_tx_t *tx);
-void ksocknal_tx_done(lnet_ni_t *ni, ksock_tx_t *tx);
-
-static inline void
-ksocknal_tx_decref(ksock_tx_t *tx)
-{
- LASSERT(atomic_read(&tx->tx_refcount) > 0);
- if (atomic_dec_and_test(&tx->tx_refcount))
- ksocknal_tx_done(NULL, tx);
-}
-
-static inline void
-ksocknal_route_addref(ksock_route_t *route)
-{
- LASSERT(atomic_read(&route->ksnr_refcount) > 0);
- atomic_inc(&route->ksnr_refcount);
-}
-
-void ksocknal_destroy_route(ksock_route_t *route);
-
-static inline void
-ksocknal_route_decref(ksock_route_t *route)
-{
- LASSERT(atomic_read(&route->ksnr_refcount) > 0);
- if (atomic_dec_and_test(&route->ksnr_refcount))
- ksocknal_destroy_route(route);
-}
-
-static inline void
-ksocknal_peer_addref(ksock_peer_t *peer)
-{
- LASSERT(atomic_read(&peer->ksnp_refcount) > 0);
- atomic_inc(&peer->ksnp_refcount);
-}
-
-void ksocknal_destroy_peer(ksock_peer_t *peer);
-
-static inline void
-ksocknal_peer_decref(ksock_peer_t *peer)
-{
- LASSERT(atomic_read(&peer->ksnp_refcount) > 0);
- if (atomic_dec_and_test(&peer->ksnp_refcount))
- ksocknal_destroy_peer(peer);
-}
-
-int ksocknal_startup(lnet_ni_t *ni);
-void ksocknal_shutdown(lnet_ni_t *ni);
-int ksocknal_ctl(lnet_ni_t *ni, unsigned int cmd, void *arg);
-int ksocknal_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg);
-int ksocknal_recv(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg,
- int delayed, unsigned int niov,
- struct kvec *iov, lnet_kiov_t *kiov,
- unsigned int offset, unsigned int mlen, unsigned int rlen);
-int ksocknal_accept(lnet_ni_t *ni, struct socket *sock);
-
-int ksocknal_add_peer(lnet_ni_t *ni, lnet_process_id_t id, __u32 ip, int port);
-ksock_peer_t *ksocknal_find_peer_locked(lnet_ni_t *ni, lnet_process_id_t id);
-ksock_peer_t *ksocknal_find_peer(lnet_ni_t *ni, lnet_process_id_t id);
-void ksocknal_peer_failed(ksock_peer_t *peer);
-int ksocknal_create_conn(lnet_ni_t *ni, ksock_route_t *route,
- struct socket *sock, int type);
-void ksocknal_close_conn_locked(ksock_conn_t *conn, int why);
-void ksocknal_terminate_conn(ksock_conn_t *conn);
-void ksocknal_destroy_conn(ksock_conn_t *conn);
-int ksocknal_close_peer_conns_locked(ksock_peer_t *peer,
- __u32 ipaddr, int why);
-int ksocknal_close_conn_and_siblings(ksock_conn_t *conn, int why);
-int ksocknal_close_matching_conns(lnet_process_id_t id, __u32 ipaddr);
-ksock_conn_t *ksocknal_find_conn_locked(ksock_peer_t *peer,
- ksock_tx_t *tx, int nonblk);
-
-int ksocknal_launch_packet(lnet_ni_t *ni, ksock_tx_t *tx,
- lnet_process_id_t id);
-ksock_tx_t *ksocknal_alloc_tx(int type, int size);
-void ksocknal_free_tx(ksock_tx_t *tx);
-ksock_tx_t *ksocknal_alloc_tx_noop(__u64 cookie, int nonblk);
-void ksocknal_next_tx_carrier(ksock_conn_t *conn);
-void ksocknal_queue_tx_locked(ksock_tx_t *tx, ksock_conn_t *conn);
-void ksocknal_txlist_done(lnet_ni_t *ni, struct list_head *txlist, int error);
-void ksocknal_notify(lnet_ni_t *ni, lnet_nid_t gw_nid, int alive);
-void ksocknal_query(struct lnet_ni *ni, lnet_nid_t nid, unsigned long *when);
-int ksocknal_thread_start(int (*fn)(void *arg), void *arg, char *name);
-void ksocknal_thread_fini(void);
-void ksocknal_launch_all_connections_locked(ksock_peer_t *peer);
-ksock_route_t *ksocknal_find_connectable_route_locked(ksock_peer_t *peer);
-ksock_route_t *ksocknal_find_connecting_route_locked(ksock_peer_t *peer);
-int ksocknal_new_packet(ksock_conn_t *conn, int skip);
-int ksocknal_scheduler(void *arg);
-int ksocknal_connd(void *arg);
-int ksocknal_reaper(void *arg);
-int ksocknal_send_hello(lnet_ni_t *ni, ksock_conn_t *conn,
- lnet_nid_t peer_nid, ksock_hello_msg_t *hello);
-int ksocknal_recv_hello(lnet_ni_t *ni, ksock_conn_t *conn,
- ksock_hello_msg_t *hello, lnet_process_id_t *id,
- __u64 *incarnation);
-void ksocknal_read_callback(ksock_conn_t *conn);
-void ksocknal_write_callback(ksock_conn_t *conn);
-
-int ksocknal_lib_zc_capable(ksock_conn_t *conn);
-void ksocknal_lib_save_callback(struct socket *sock, ksock_conn_t *conn);
-void ksocknal_lib_set_callback(struct socket *sock, ksock_conn_t *conn);
-void ksocknal_lib_reset_callback(struct socket *sock, ksock_conn_t *conn);
-void ksocknal_lib_push_conn(ksock_conn_t *conn);
-int ksocknal_lib_get_conn_addrs(ksock_conn_t *conn);
-int ksocknal_lib_setup_sock(struct socket *so);
-int ksocknal_lib_send_iov(ksock_conn_t *conn, ksock_tx_t *tx);
-int ksocknal_lib_send_kiov(ksock_conn_t *conn, ksock_tx_t *tx);
-void ksocknal_lib_eager_ack(ksock_conn_t *conn);
-int ksocknal_lib_recv_iov(ksock_conn_t *conn);
-int ksocknal_lib_recv_kiov(ksock_conn_t *conn);
-int ksocknal_lib_get_conn_tunables(ksock_conn_t *conn, int *txmem,
- int *rxmem, int *nagle);
-
-int ksocknal_tunables_init(void);
-
-void ksocknal_lib_csum_tx(ksock_tx_t *tx);
-
-int ksocknal_lib_memory_pressure(ksock_conn_t *conn);
-int ksocknal_lib_bind_thread_to_cpu(int id);
-
-#endif /* _SOCKLND_SOCKLND_H_ */
diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c
deleted file mode 100644
index 354c8105f9a0..000000000000
--- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c
+++ /dev/null
@@ -1,2633 +0,0 @@
-/*
- * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
- *
- * Copyright (c) 2011, 2012, Intel Corporation.
- *
- * Author: Zach Brown <zab@zabbo.net>
- * Author: Peter J. Braam <braam@clusterfs.com>
- * Author: Phil Schwan <phil@clusterfs.com>
- * Author: Eric Barton <eric@bartonsoftware.com>
- *
- * This file is part of Portals, http://www.sf.net/projects/sandiaportals/
- *
- * Portals is free software; you can redistribute it and/or
- * modify it under the terms of version 2 of the GNU General Public
- * License as published by the Free Software Foundation.
- *
- * Portals is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with Portals; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-#include "socklnd.h"
-
-ksock_tx_t *
-ksocknal_alloc_tx(int type, int size)
-{
- ksock_tx_t *tx = NULL;
-
- if (type == KSOCK_MSG_NOOP) {
- LASSERT(size == KSOCK_NOOP_TX_SIZE);
-
- /* searching for a noop tx in free list */
- spin_lock(&ksocknal_data.ksnd_tx_lock);
-
- if (!list_empty(&ksocknal_data.ksnd_idle_noop_txs)) {
- tx = list_entry(ksocknal_data.ksnd_idle_noop_txs. \
- next, ksock_tx_t, tx_list);
- LASSERT(tx->tx_desc_size == size);
- list_del(&tx->tx_list);
- }
-
- spin_unlock(&ksocknal_data.ksnd_tx_lock);
- }
-
- if (tx == NULL)
- LIBCFS_ALLOC(tx, size);
-
- if (tx == NULL)
- return NULL;
-
- atomic_set(&tx->tx_refcount, 1);
- tx->tx_zc_aborted = 0;
- tx->tx_zc_capable = 0;
- tx->tx_zc_checked = 0;
- tx->tx_desc_size = size;
-
- atomic_inc(&ksocknal_data.ksnd_nactive_txs);
-
- return tx;
-}
-
-ksock_tx_t *
-ksocknal_alloc_tx_noop(__u64 cookie, int nonblk)
-{
- ksock_tx_t *tx;
-
- tx = ksocknal_alloc_tx(KSOCK_MSG_NOOP, KSOCK_NOOP_TX_SIZE);
- if (tx == NULL) {
- CERROR("Can't allocate noop tx desc\n");
- return NULL;
- }
-
- tx->tx_conn = NULL;
- tx->tx_lnetmsg = NULL;
- tx->tx_kiov = NULL;
- tx->tx_nkiov = 0;
- tx->tx_iov = tx->tx_frags.virt.iov;
- tx->tx_niov = 1;
- tx->tx_nonblk = nonblk;
-
- socklnd_init_msg(&tx->tx_msg, KSOCK_MSG_NOOP);
- tx->tx_msg.ksm_zc_cookies[1] = cookie;
-
- return tx;
-}
-
-
-void
-ksocknal_free_tx (ksock_tx_t *tx)
-{
- atomic_dec(&ksocknal_data.ksnd_nactive_txs);
-
- if (tx->tx_lnetmsg == NULL && tx->tx_desc_size == KSOCK_NOOP_TX_SIZE) {
- /* it's a noop tx */
- spin_lock(&ksocknal_data.ksnd_tx_lock);
-
- list_add(&tx->tx_list, &ksocknal_data.ksnd_idle_noop_txs);
-
- spin_unlock(&ksocknal_data.ksnd_tx_lock);
- } else {
- LIBCFS_FREE(tx, tx->tx_desc_size);
- }
-}
-
-static int
-ksocknal_send_iov (ksock_conn_t *conn, ksock_tx_t *tx)
-{
- struct kvec *iov = tx->tx_iov;
- int nob;
- int rc;
-
- LASSERT(tx->tx_niov > 0);
-
- /* Never touch tx->tx_iov inside ksocknal_lib_send_iov() */
- rc = ksocknal_lib_send_iov(conn, tx);
-
- if (rc <= 0) /* sent nothing? */
- return rc;
-
- nob = rc;
- LASSERT (nob <= tx->tx_resid);
- tx->tx_resid -= nob;
-
- /* "consume" iov */
- do {
- LASSERT(tx->tx_niov > 0);
-
- if (nob < (int) iov->iov_len) {
- iov->iov_base = (void *)((char *)iov->iov_base + nob);
- iov->iov_len -= nob;
- return rc;
- }
-
- nob -= iov->iov_len;
- tx->tx_iov = ++iov;
- tx->tx_niov--;
- } while (nob != 0);
-
- return rc;
-}
-
-static int
-ksocknal_send_kiov (ksock_conn_t *conn, ksock_tx_t *tx)
-{
- lnet_kiov_t *kiov = tx->tx_kiov;
- int nob;
- int rc;
-
- LASSERT(tx->tx_niov == 0);
- LASSERT(tx->tx_nkiov > 0);
-
- /* Never touch tx->tx_kiov inside ksocknal_lib_send_kiov() */
- rc = ksocknal_lib_send_kiov(conn, tx);
-
- if (rc <= 0) /* sent nothing? */
- return rc;
-
- nob = rc;
- LASSERT (nob <= tx->tx_resid);
- tx->tx_resid -= nob;
-
- /* "consume" kiov */
- do {
- LASSERT(tx->tx_nkiov > 0);
-
- if (nob < (int)kiov->kiov_len) {
- kiov->kiov_offset += nob;
- kiov->kiov_len -= nob;
- return rc;
- }
-
- nob -= (int)kiov->kiov_len;
- tx->tx_kiov = ++kiov;
- tx->tx_nkiov--;
- } while (nob != 0);
-
- return rc;
-}
-
-static int
-ksocknal_transmit (ksock_conn_t *conn, ksock_tx_t *tx)
-{
- int rc;
- int bufnob;
-
- if (ksocknal_data.ksnd_stall_tx != 0) {
- set_current_state(TASK_UNINTERRUPTIBLE);
- schedule_timeout(cfs_time_seconds(ksocknal_data.ksnd_stall_tx));
- }
-
- LASSERT(tx->tx_resid != 0);
-
- rc = ksocknal_connsock_addref(conn);
- if (rc != 0) {
- LASSERT (conn->ksnc_closing);
- return -ESHUTDOWN;
- }
-
- do {
- if (ksocknal_data.ksnd_enomem_tx > 0) {
- /* testing... */
- ksocknal_data.ksnd_enomem_tx--;
- rc = -EAGAIN;
- } else if (tx->tx_niov != 0) {
- rc = ksocknal_send_iov (conn, tx);
- } else {
- rc = ksocknal_send_kiov (conn, tx);
- }
-
- bufnob = conn->ksnc_sock->sk->sk_wmem_queued;
- if (rc > 0) /* sent something? */
- conn->ksnc_tx_bufnob += rc; /* account it */
-
- if (bufnob < conn->ksnc_tx_bufnob) {
- /* allocated send buffer bytes < computed; infer
- * something got ACKed */
- conn->ksnc_tx_deadline =
- cfs_time_shift(*ksocknal_tunables.ksnd_timeout);
- conn->ksnc_peer->ksnp_last_alive = cfs_time_current();
- conn->ksnc_tx_bufnob = bufnob;
- mb();
- }
-
- if (rc <= 0) { /* Didn't write anything? */
-
- if (rc == 0) /* some stacks return 0 instead of -EAGAIN */
- rc = -EAGAIN;
-
- /* Check if EAGAIN is due to memory pressure */
- if (rc == -EAGAIN && ksocknal_lib_memory_pressure(conn))
- rc = -ENOMEM;
-
- break;
- }
-
- /* socket's wmem_queued now includes 'rc' bytes */
- atomic_sub (rc, &conn->ksnc_tx_nob);
- rc = 0;
-
- } while (tx->tx_resid != 0);
-
- ksocknal_connsock_decref(conn);
- return rc;
-}
-
-static int
-ksocknal_recv_iov (ksock_conn_t *conn)
-{
- struct kvec *iov = conn->ksnc_rx_iov;
- int nob;
- int rc;
-
- LASSERT(conn->ksnc_rx_niov > 0);
-
- /* Never touch conn->ksnc_rx_iov or change connection
- * status inside ksocknal_lib_recv_iov */
- rc = ksocknal_lib_recv_iov(conn);
-
- if (rc <= 0)
- return rc;
-
- /* received something... */
- nob = rc;
-
- conn->ksnc_peer->ksnp_last_alive = cfs_time_current();
- conn->ksnc_rx_deadline =
- cfs_time_shift(*ksocknal_tunables.ksnd_timeout);
- mb(); /* order with setting rx_started */
- conn->ksnc_rx_started = 1;
-
- conn->ksnc_rx_nob_wanted -= nob;
- conn->ksnc_rx_nob_left -= nob;
-
- do {
- LASSERT(conn->ksnc_rx_niov > 0);
-
- if (nob < (int)iov->iov_len) {
- iov->iov_len -= nob;
- iov->iov_base += nob;
- return -EAGAIN;
- }
-
- nob -= iov->iov_len;
- conn->ksnc_rx_iov = ++iov;
- conn->ksnc_rx_niov--;
- } while (nob != 0);
-
- return rc;
-}
-
-static int
-ksocknal_recv_kiov (ksock_conn_t *conn)
-{
- lnet_kiov_t *kiov = conn->ksnc_rx_kiov;
- int nob;
- int rc;
- LASSERT(conn->ksnc_rx_nkiov > 0);
-
- /* Never touch conn->ksnc_rx_kiov or change connection
- * status inside ksocknal_lib_recv_iov */
- rc = ksocknal_lib_recv_kiov(conn);
-
- if (rc <= 0)
- return rc;
-
- /* received something... */
- nob = rc;
-
- conn->ksnc_peer->ksnp_last_alive = cfs_time_current();
- conn->ksnc_rx_deadline =
- cfs_time_shift(*ksocknal_tunables.ksnd_timeout);
- mb(); /* order with setting rx_started */
- conn->ksnc_rx_started = 1;
-
- conn->ksnc_rx_nob_wanted -= nob;
- conn->ksnc_rx_nob_left -= nob;
-
- do {
- LASSERT(conn->ksnc_rx_nkiov > 0);
-
- if (nob < (int) kiov->kiov_len) {
- kiov->kiov_offset += nob;
- kiov->kiov_len -= nob;
- return -EAGAIN;
- }
-
- nob -= kiov->kiov_len;
- conn->ksnc_rx_kiov = ++kiov;
- conn->ksnc_rx_nkiov--;
- } while (nob != 0);
-
- return 1;
-}
-
-static int
-ksocknal_receive (ksock_conn_t *conn)
-{
- /* Return 1 on success, 0 on EOF, < 0 on error.
- * Caller checks ksnc_rx_nob_wanted to determine
- * progress/completion. */
- int rc;
-
- if (ksocknal_data.ksnd_stall_rx != 0) {
- set_current_state(TASK_UNINTERRUPTIBLE);
- schedule_timeout(cfs_time_seconds(ksocknal_data.ksnd_stall_rx));
- }
-
- rc = ksocknal_connsock_addref(conn);
- if (rc != 0) {
- LASSERT (conn->ksnc_closing);
- return -ESHUTDOWN;
- }
-
- for (;;) {
- if (conn->ksnc_rx_niov != 0)
- rc = ksocknal_recv_iov (conn);
- else
- rc = ksocknal_recv_kiov (conn);
-
- if (rc <= 0) {
- /* error/EOF or partial receive */
- if (rc == -EAGAIN) {
- rc = 1;
- } else if (rc == 0 && conn->ksnc_rx_started) {
- /* EOF in the middle of a message */
- rc = -EPROTO;
- }
- break;
- }
-
- /* Completed a fragment */
-
- if (conn->ksnc_rx_nob_wanted == 0) {
- rc = 1;
- break;
- }
- }
-
- ksocknal_connsock_decref(conn);
- return rc;
-}
-
-void
-ksocknal_tx_done (lnet_ni_t *ni, ksock_tx_t *tx)
-{
- lnet_msg_t *lnetmsg = tx->tx_lnetmsg;
- int rc = (tx->tx_resid == 0 && !tx->tx_zc_aborted) ? 0 : -EIO;
-
- LASSERT(ni != NULL || tx->tx_conn != NULL);
-
- if (tx->tx_conn != NULL)
- ksocknal_conn_decref(tx->tx_conn);
-
- if (ni == NULL && tx->tx_conn != NULL)
- ni = tx->tx_conn->ksnc_peer->ksnp_ni;
-
- ksocknal_free_tx (tx);
- if (lnetmsg != NULL) /* KSOCK_MSG_NOOP go without lnetmsg */
- lnet_finalize (ni, lnetmsg, rc);
-}
-
-void
-ksocknal_txlist_done (lnet_ni_t *ni, struct list_head *txlist, int error)
-{
- ksock_tx_t *tx;
-
- while (!list_empty (txlist)) {
- tx = list_entry(txlist->next, ksock_tx_t, tx_list);
-
- if (error && tx->tx_lnetmsg != NULL) {
- CNETERR("Deleting packet type %d len %d %s->%s\n",
- le32_to_cpu (tx->tx_lnetmsg->msg_hdr.type),
- le32_to_cpu (tx->tx_lnetmsg->msg_hdr.payload_length),
- libcfs_nid2str(le64_to_cpu(tx->tx_lnetmsg->msg_hdr.src_nid)),
- libcfs_nid2str(le64_to_cpu(tx->tx_lnetmsg->msg_hdr.dest_nid)));
- } else if (error) {
- CNETERR("Deleting noop packet\n");
- }
-
- list_del(&tx->tx_list);
-
- LASSERT(atomic_read(&tx->tx_refcount) == 1);
- ksocknal_tx_done(ni, tx);
- }
-}
-
-static void
-ksocknal_check_zc_req(ksock_tx_t *tx)
-{
- ksock_conn_t *conn = tx->tx_conn;
- ksock_peer_t *peer = conn->ksnc_peer;
-
- /* Set tx_msg.ksm_zc_cookies[0] to a unique non-zero cookie and add tx
- * to ksnp_zc_req_list if some fragment of this message should be sent
- * zero-copy. Our peer will send an ACK containing this cookie when
- * she has received this message to tell us we can signal completion.
- * tx_msg.ksm_zc_cookies[0] remains non-zero while tx is on
- * ksnp_zc_req_list. */
- LASSERT(tx->tx_msg.ksm_type != KSOCK_MSG_NOOP);
- LASSERT(tx->tx_zc_capable);
-
- tx->tx_zc_checked = 1;
-
- if (conn->ksnc_proto == &ksocknal_protocol_v1x ||
- !conn->ksnc_zc_capable)
- return;
-
- /* assign cookie and queue tx to pending list, it will be released when
- * a matching ack is received. See ksocknal_handle_zcack() */
-
- ksocknal_tx_addref(tx);
-
- spin_lock(&peer->ksnp_lock);
-
- /* ZC_REQ is going to be pinned to the peer */
- tx->tx_deadline =
- cfs_time_shift(*ksocknal_tunables.ksnd_timeout);
-
- LASSERT(tx->tx_msg.ksm_zc_cookies[0] == 0);
-
- tx->tx_msg.ksm_zc_cookies[0] = peer->ksnp_zc_next_cookie++;
-
- if (peer->ksnp_zc_next_cookie == 0)
- peer->ksnp_zc_next_cookie = SOCKNAL_KEEPALIVE_PING + 1;
-
- list_add_tail(&tx->tx_zc_list, &peer->ksnp_zc_req_list);
-
- spin_unlock(&peer->ksnp_lock);
-}
-
-static void
-ksocknal_uncheck_zc_req(ksock_tx_t *tx)
-{
- ksock_peer_t *peer = tx->tx_conn->ksnc_peer;
-
- LASSERT(tx->tx_msg.ksm_type != KSOCK_MSG_NOOP);
- LASSERT(tx->tx_zc_capable);
-
- tx->tx_zc_checked = 0;
-
- spin_lock(&peer->ksnp_lock);
-
- if (tx->tx_msg.ksm_zc_cookies[0] == 0) {
- /* Not waiting for an ACK */
- spin_unlock(&peer->ksnp_lock);
- return;
- }
-
- tx->tx_msg.ksm_zc_cookies[0] = 0;
- list_del(&tx->tx_zc_list);
-
- spin_unlock(&peer->ksnp_lock);
-
- ksocknal_tx_decref(tx);
-}
-
-static int
-ksocknal_process_transmit (ksock_conn_t *conn, ksock_tx_t *tx)
-{
- int rc;
-
- if (tx->tx_zc_capable && !tx->tx_zc_checked)
- ksocknal_check_zc_req(tx);
-
- rc = ksocknal_transmit (conn, tx);
-
- CDEBUG(D_NET, "send(%d) %d\n", tx->tx_resid, rc);
-
- if (tx->tx_resid == 0) {
- /* Sent everything OK */
- LASSERT (rc == 0);
-
- return 0;
- }
-
- if (rc == -EAGAIN)
- return rc;
-
- if (rc == -ENOMEM) {
- static int counter;
-
- counter++; /* exponential backoff warnings */
- if ((counter & (-counter)) == counter)
- CWARN("%u ENOMEM tx %p\n", counter, conn);
-
- /* Queue on ksnd_enomem_conns for retry after a timeout */
- spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
-
- /* enomem list takes over scheduler's ref... */
- LASSERT (conn->ksnc_tx_scheduled);
- list_add_tail(&conn->ksnc_tx_list,
- &ksocknal_data.ksnd_enomem_conns);
- if (!cfs_time_aftereq(cfs_time_add(cfs_time_current(),
- SOCKNAL_ENOMEM_RETRY),
- ksocknal_data.ksnd_reaper_waketime))
- wake_up (&ksocknal_data.ksnd_reaper_waitq);
-
- spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
- return rc;
- }
-
- /* Actual error */
- LASSERT(rc < 0);
-
- if (!conn->ksnc_closing) {
- switch (rc) {
- case -ECONNRESET:
- LCONSOLE_WARN("Host %pI4h reset our connection while we were sending data; it may have rebooted.\n",
- &conn->ksnc_ipaddr);
- break;
- default:
- LCONSOLE_WARN("There was an unexpected network error while writing to %pI4h: %d.\n",
- &conn->ksnc_ipaddr, rc);
- break;
- }
- CDEBUG(D_NET, "[%p] Error %d on write to %s ip %pI4h:%d\n",
- conn, rc,
- libcfs_id2str(conn->ksnc_peer->ksnp_id),
- &conn->ksnc_ipaddr,
- conn->ksnc_port);
- }
-
- if (tx->tx_zc_checked)
- ksocknal_uncheck_zc_req(tx);
-
- /* it's not an error if conn is being closed */
- ksocknal_close_conn_and_siblings (conn,
- (conn->ksnc_closing) ? 0 : rc);
-
- return rc;
-}
-
-static void
-ksocknal_launch_connection_locked (ksock_route_t *route)
-{
-
- /* called holding write lock on ksnd_global_lock */
-
- LASSERT(!route->ksnr_scheduled);
- LASSERT(!route->ksnr_connecting);
- LASSERT((ksocknal_route_mask() & ~route->ksnr_connected) != 0);
-
- route->ksnr_scheduled = 1; /* scheduling conn for connd */
- ksocknal_route_addref(route); /* extra ref for connd */
-
- spin_lock_bh(&ksocknal_data.ksnd_connd_lock);
-
- list_add_tail(&route->ksnr_connd_list,
- &ksocknal_data.ksnd_connd_routes);
- wake_up(&ksocknal_data.ksnd_connd_waitq);
-
- spin_unlock_bh(&ksocknal_data.ksnd_connd_lock);
-}
-
-void
-ksocknal_launch_all_connections_locked (ksock_peer_t *peer)
-{
- ksock_route_t *route;
-
- /* called holding write lock on ksnd_global_lock */
- for (;;) {
- /* launch any/all connections that need it */
- route = ksocknal_find_connectable_route_locked(peer);
- if (route == NULL)
- return;
-
- ksocknal_launch_connection_locked(route);
- }
-}
-
-ksock_conn_t *
-ksocknal_find_conn_locked(ksock_peer_t *peer, ksock_tx_t *tx, int nonblk)
-{
- struct list_head *tmp;
- ksock_conn_t *conn;
- ksock_conn_t *typed = NULL;
- ksock_conn_t *fallback = NULL;
- int tnob = 0;
- int fnob = 0;
-
- list_for_each (tmp, &peer->ksnp_conns) {
- ksock_conn_t *c = list_entry(tmp, ksock_conn_t, ksnc_list);
- int nob = atomic_read(&c->ksnc_tx_nob) +
- c->ksnc_sock->sk->sk_wmem_queued;
- int rc;
-
- LASSERT(!c->ksnc_closing);
- LASSERT(c->ksnc_proto != NULL &&
- c->ksnc_proto->pro_match_tx != NULL);
-
- rc = c->ksnc_proto->pro_match_tx(c, tx, nonblk);
-
- switch (rc) {
- default:
- LBUG();
- case SOCKNAL_MATCH_NO: /* protocol rejected the tx */
- continue;
-
- case SOCKNAL_MATCH_YES: /* typed connection */
- if (typed == NULL || tnob > nob ||
- (tnob == nob && *ksocknal_tunables.ksnd_round_robin &&
- cfs_time_after(typed->ksnc_tx_last_post, c->ksnc_tx_last_post))) {
- typed = c;
- tnob = nob;
- }
- break;
-
- case SOCKNAL_MATCH_MAY: /* fallback connection */
- if (fallback == NULL || fnob > nob ||
- (fnob == nob && *ksocknal_tunables.ksnd_round_robin &&
- cfs_time_after(fallback->ksnc_tx_last_post, c->ksnc_tx_last_post))) {
- fallback = c;
- fnob = nob;
- }
- break;
- }
- }
-
- /* prefer the typed selection */
- conn = (typed != NULL) ? typed : fallback;
-
- if (conn != NULL)
- conn->ksnc_tx_last_post = cfs_time_current();
-
- return conn;
-}
-
-void
-ksocknal_tx_prep(ksock_conn_t *conn, ksock_tx_t *tx)
-{
- conn->ksnc_proto->pro_pack(tx);
-
- atomic_add (tx->tx_nob, &conn->ksnc_tx_nob);
- ksocknal_conn_addref(conn); /* +1 ref for tx */
- tx->tx_conn = conn;
-}
-
-void
-ksocknal_queue_tx_locked (ksock_tx_t *tx, ksock_conn_t *conn)
-{
- ksock_sched_t *sched = conn->ksnc_scheduler;
- ksock_msg_t *msg = &tx->tx_msg;
- ksock_tx_t *ztx = NULL;
- int bufnob = 0;
-
- /* called holding global lock (read or irq-write) and caller may
- * not have dropped this lock between finding conn and calling me,
- * so we don't need the {get,put}connsock dance to deref
- * ksnc_sock... */
- LASSERT(!conn->ksnc_closing);
-
- CDEBUG(D_NET, "Sending to %s ip %pI4h:%d\n",
- libcfs_id2str(conn->ksnc_peer->ksnp_id),
- &conn->ksnc_ipaddr,
- conn->ksnc_port);
-
- ksocknal_tx_prep(conn, tx);
-
- /* Ensure the frags we've been given EXACTLY match the number of
- * bytes we want to send. Many TCP/IP stacks disregard any total
- * size parameters passed to them and just look at the frags.
- *
- * We always expect at least 1 mapped fragment containing the
- * complete ksocknal message header. */
- LASSERT(lnet_iov_nob (tx->tx_niov, tx->tx_iov) +
- lnet_kiov_nob(tx->tx_nkiov, tx->tx_kiov) ==
- (unsigned int)tx->tx_nob);
- LASSERT(tx->tx_niov >= 1);
- LASSERT(tx->tx_resid == tx->tx_nob);
-
- CDEBUG (D_NET, "Packet %p type %d, nob %d niov %d nkiov %d\n",
- tx, (tx->tx_lnetmsg != NULL) ? tx->tx_lnetmsg->msg_hdr.type :
- KSOCK_MSG_NOOP,
- tx->tx_nob, tx->tx_niov, tx->tx_nkiov);
-
- /*
- * FIXME: SOCK_WMEM_QUEUED and SOCK_ERROR could block in __DARWIN8__
- * but they're used inside spinlocks a lot.
- */
- bufnob = conn->ksnc_sock->sk->sk_wmem_queued;
- spin_lock_bh(&sched->kss_lock);
-
- if (list_empty(&conn->ksnc_tx_queue) && bufnob == 0) {
- /* First packet starts the timeout */
- conn->ksnc_tx_deadline =
- cfs_time_shift(*ksocknal_tunables.ksnd_timeout);
- if (conn->ksnc_tx_bufnob > 0) /* something got ACKed */
- conn->ksnc_peer->ksnp_last_alive = cfs_time_current();
- conn->ksnc_tx_bufnob = 0;
- mb(); /* order with adding to tx_queue */
- }
-
- if (msg->ksm_type == KSOCK_MSG_NOOP) {
- /* The packet is noop ZC ACK, try to piggyback the ack_cookie
- * on a normal packet so I don't need to send it */
- LASSERT(msg->ksm_zc_cookies[1] != 0);
- LASSERT(conn->ksnc_proto->pro_queue_tx_zcack != NULL);
-
- if (conn->ksnc_proto->pro_queue_tx_zcack(conn, tx, 0))
- ztx = tx; /* ZC ACK piggybacked on ztx release tx later */
-
- } else {
- /* It's a normal packet - can it piggback a noop zc-ack that
- * has been queued already? */
- LASSERT(msg->ksm_zc_cookies[1] == 0);
- LASSERT(conn->ksnc_proto->pro_queue_tx_msg != NULL);
-
- ztx = conn->ksnc_proto->pro_queue_tx_msg(conn, tx);
- /* ztx will be released later */
- }
-
- if (ztx != NULL) {
- atomic_sub (ztx->tx_nob, &conn->ksnc_tx_nob);
- list_add_tail(&ztx->tx_list, &sched->kss_zombie_noop_txs);
- }
-
- if (conn->ksnc_tx_ready && /* able to send */
- !conn->ksnc_tx_scheduled) { /* not scheduled to send */
- /* +1 ref for scheduler */
- ksocknal_conn_addref(conn);
- list_add_tail (&conn->ksnc_tx_list,
- &sched->kss_tx_conns);
- conn->ksnc_tx_scheduled = 1;
- wake_up (&sched->kss_waitq);
- }
-
- spin_unlock_bh(&sched->kss_lock);
-}
-
-
-ksock_route_t *
-ksocknal_find_connectable_route_locked (ksock_peer_t *peer)
-{
- unsigned long now = cfs_time_current();
- struct list_head *tmp;
- ksock_route_t *route;
-
- list_for_each (tmp, &peer->ksnp_routes) {
- route = list_entry (tmp, ksock_route_t, ksnr_list);
-
- LASSERT(!route->ksnr_connecting || route->ksnr_scheduled);
-
- if (route->ksnr_scheduled) /* connections being established */
- continue;
-
- /* all route types connected ? */
- if ((ksocknal_route_mask() & ~route->ksnr_connected) == 0)
- continue;
-
- if (!(route->ksnr_retry_interval == 0 || /* first attempt */
- cfs_time_aftereq(now, route->ksnr_timeout))) {
- CDEBUG(D_NET,
- "Too soon to retry route %pI4h (cnted %d, interval %ld, %ld secs later)\n",
- &route->ksnr_ipaddr,
- route->ksnr_connected,
- route->ksnr_retry_interval,
- cfs_duration_sec(route->ksnr_timeout - now));
- continue;
- }
-
- return route;
- }
-
- return NULL;
-}
-
-ksock_route_t *
-ksocknal_find_connecting_route_locked (ksock_peer_t *peer)
-{
- struct list_head *tmp;
- ksock_route_t *route;
-
- list_for_each (tmp, &peer->ksnp_routes) {
- route = list_entry (tmp, ksock_route_t, ksnr_list);
-
- LASSERT(!route->ksnr_connecting || route->ksnr_scheduled);
-
- if (route->ksnr_scheduled)
- return route;
- }
-
- return NULL;
-}
-
-int
-ksocknal_launch_packet (lnet_ni_t *ni, ksock_tx_t *tx, lnet_process_id_t id)
-{
- ksock_peer_t *peer;
- ksock_conn_t *conn;
- rwlock_t *g_lock;
- int retry;
- int rc;
-
- LASSERT(tx->tx_conn == NULL);
-
- g_lock = &ksocknal_data.ksnd_global_lock;
-
- for (retry = 0;; retry = 1) {
- read_lock(g_lock);
- peer = ksocknal_find_peer_locked(ni, id);
- if (peer != NULL) {
- if (ksocknal_find_connectable_route_locked(peer) == NULL) {
- conn = ksocknal_find_conn_locked(peer, tx, tx->tx_nonblk);
- if (conn != NULL) {
- /* I've got no routes that need to be
- * connecting and I do have an actual
- * connection... */
- ksocknal_queue_tx_locked (tx, conn);
- read_unlock(g_lock);
- return 0;
- }
- }
- }
-
- /* I'll need a write lock... */
- read_unlock(g_lock);
-
- write_lock_bh(g_lock);
-
- peer = ksocknal_find_peer_locked(ni, id);
- if (peer != NULL)
- break;
-
- write_unlock_bh(g_lock);
-
- if ((id.pid & LNET_PID_USERFLAG) != 0) {
- CERROR("Refusing to create a connection to userspace process %s\n",
- libcfs_id2str(id));
- return -EHOSTUNREACH;
- }
-
- if (retry) {
- CERROR("Can't find peer %s\n", libcfs_id2str(id));
- return -EHOSTUNREACH;
- }
-
- rc = ksocknal_add_peer(ni, id,
- LNET_NIDADDR(id.nid),
- lnet_acceptor_port());
- if (rc != 0) {
- CERROR("Can't add peer %s: %d\n",
- libcfs_id2str(id), rc);
- return rc;
- }
- }
-
- ksocknal_launch_all_connections_locked(peer);
-
- conn = ksocknal_find_conn_locked(peer, tx, tx->tx_nonblk);
- if (conn != NULL) {
- /* Connection exists; queue message on it */
- ksocknal_queue_tx_locked (tx, conn);
- write_unlock_bh(g_lock);
- return 0;
- }
-
- if (peer->ksnp_accepting > 0 ||
- ksocknal_find_connecting_route_locked (peer) != NULL) {
- /* the message is going to be pinned to the peer */
- tx->tx_deadline =
- cfs_time_shift(*ksocknal_tunables.ksnd_timeout);
-
- /* Queue the message until a connection is established */
- list_add_tail (&tx->tx_list, &peer->ksnp_tx_queue);
- write_unlock_bh(g_lock);
- return 0;
- }
-
- write_unlock_bh(g_lock);
-
- /* NB Routes may be ignored if connections to them failed recently */
- CNETERR("No usable routes to %s\n", libcfs_id2str(id));
- return -EHOSTUNREACH;
-}
-
-int
-ksocknal_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg)
-{
- int mpflag = 1;
- int type = lntmsg->msg_type;
- lnet_process_id_t target = lntmsg->msg_target;
- unsigned int payload_niov = lntmsg->msg_niov;
- struct kvec *payload_iov = lntmsg->msg_iov;
- lnet_kiov_t *payload_kiov = lntmsg->msg_kiov;
- unsigned int payload_offset = lntmsg->msg_offset;
- unsigned int payload_nob = lntmsg->msg_len;
- ksock_tx_t *tx;
- int desc_size;
- int rc;
-
- /* NB 'private' is different depending on what we're sending.
- * Just ignore it... */
-
- CDEBUG(D_NET, "sending %u bytes in %d frags to %s\n",
- payload_nob, payload_niov, libcfs_id2str(target));
-
- LASSERT(payload_nob == 0 || payload_niov > 0);
- LASSERT(payload_niov <= LNET_MAX_IOV);
- /* payload is either all vaddrs or all pages */
- LASSERT (!(payload_kiov != NULL && payload_iov != NULL));
- LASSERT (!in_interrupt ());
-
- if (payload_iov != NULL)
- desc_size = offsetof(ksock_tx_t,
- tx_frags.virt.iov[1 + payload_niov]);
- else
- desc_size = offsetof(ksock_tx_t,
- tx_frags.paged.kiov[payload_niov]);
-
- if (lntmsg->msg_vmflush)
- mpflag = cfs_memory_pressure_get_and_set();
- tx = ksocknal_alloc_tx(KSOCK_MSG_LNET, desc_size);
- if (tx == NULL) {
- CERROR("Can't allocate tx desc type %d size %d\n",
- type, desc_size);
- if (lntmsg->msg_vmflush)
- cfs_memory_pressure_restore(mpflag);
- return -ENOMEM;
- }
-
- tx->tx_conn = NULL; /* set when assigned a conn */
- tx->tx_lnetmsg = lntmsg;
-
- if (payload_iov != NULL) {
- tx->tx_kiov = NULL;
- tx->tx_nkiov = 0;
- tx->tx_iov = tx->tx_frags.virt.iov;
- tx->tx_niov = 1 +
- lnet_extract_iov(payload_niov, &tx->tx_iov[1],
- payload_niov, payload_iov,
- payload_offset, payload_nob);
- } else {
- tx->tx_niov = 1;
- tx->tx_iov = &tx->tx_frags.paged.iov;
- tx->tx_kiov = tx->tx_frags.paged.kiov;
- tx->tx_nkiov = lnet_extract_kiov(payload_niov, tx->tx_kiov,
- payload_niov, payload_kiov,
- payload_offset, payload_nob);
-
- if (payload_nob >= *ksocknal_tunables.ksnd_zc_min_payload)
- tx->tx_zc_capable = 1;
- }
-
- socklnd_init_msg(&tx->tx_msg, KSOCK_MSG_LNET);
-
- /* The first fragment will be set later in pro_pack */
- rc = ksocknal_launch_packet(ni, tx, target);
- if (!mpflag)
- cfs_memory_pressure_restore(mpflag);
-
- if (rc == 0)
- return 0;
-
- ksocknal_free_tx(tx);
- return -EIO;
-}
-
-int
-ksocknal_thread_start(int (*fn)(void *arg), void *arg, char *name)
-{
- struct task_struct *task = kthread_run(fn, arg, "%s", name);
-
- if (IS_ERR(task))
- return PTR_ERR(task);
-
- write_lock_bh(&ksocknal_data.ksnd_global_lock);
- ksocknal_data.ksnd_nthreads++;
- write_unlock_bh(&ksocknal_data.ksnd_global_lock);
- return 0;
-}
-
-void
-ksocknal_thread_fini (void)
-{
- write_lock_bh(&ksocknal_data.ksnd_global_lock);
- ksocknal_data.ksnd_nthreads--;
- write_unlock_bh(&ksocknal_data.ksnd_global_lock);
-}
-
-int
-ksocknal_new_packet (ksock_conn_t *conn, int nob_to_skip)
-{
- static char ksocknal_slop_buffer[4096];
-
- int nob;
- unsigned int niov;
- int skipped;
-
- LASSERT(conn->ksnc_proto != NULL);
-
- if ((*ksocknal_tunables.ksnd_eager_ack & conn->ksnc_type) != 0) {
- /* Remind the socket to ack eagerly... */
- ksocknal_lib_eager_ack(conn);
- }
-
- if (nob_to_skip == 0) { /* right at next packet boundary now */
- conn->ksnc_rx_started = 0;
- mb(); /* racing with timeout thread */
-
- switch (conn->ksnc_proto->pro_version) {
- case KSOCK_PROTO_V2:
- case KSOCK_PROTO_V3:
- conn->ksnc_rx_state = SOCKNAL_RX_KSM_HEADER;
- conn->ksnc_rx_iov = (struct kvec *)&conn->ksnc_rx_iov_space;
- conn->ksnc_rx_iov[0].iov_base = &conn->ksnc_msg;
-
- conn->ksnc_rx_nob_wanted = offsetof(ksock_msg_t, ksm_u);
- conn->ksnc_rx_nob_left = offsetof(ksock_msg_t, ksm_u);
- conn->ksnc_rx_iov[0].iov_len = offsetof(ksock_msg_t, ksm_u);
- break;
-
- case KSOCK_PROTO_V1:
- /* Receiving bare lnet_hdr_t */
- conn->ksnc_rx_state = SOCKNAL_RX_LNET_HEADER;
- conn->ksnc_rx_nob_wanted = sizeof(lnet_hdr_t);
- conn->ksnc_rx_nob_left = sizeof(lnet_hdr_t);
-
- conn->ksnc_rx_iov = (struct kvec *)&conn->ksnc_rx_iov_space;
- conn->ksnc_rx_iov[0].iov_base = &conn->ksnc_msg.ksm_u.lnetmsg;
- conn->ksnc_rx_iov[0].iov_len = sizeof (lnet_hdr_t);
- break;
-
- default:
- LBUG ();
- }
- conn->ksnc_rx_niov = 1;
-
- conn->ksnc_rx_kiov = NULL;
- conn->ksnc_rx_nkiov = 0;
- conn->ksnc_rx_csum = ~0;
- return 1;
- }
-
- /* Set up to skip as much as possible now. If there's more left
- * (ran out of iov entries) we'll get called again */
-
- conn->ksnc_rx_state = SOCKNAL_RX_SLOP;
- conn->ksnc_rx_nob_left = nob_to_skip;
- conn->ksnc_rx_iov = (struct kvec *)&conn->ksnc_rx_iov_space;
- skipped = 0;
- niov = 0;
-
- do {
- nob = min_t(int, nob_to_skip, sizeof(ksocknal_slop_buffer));
-
- conn->ksnc_rx_iov[niov].iov_base = ksocknal_slop_buffer;
- conn->ksnc_rx_iov[niov].iov_len = nob;
- niov++;
- skipped += nob;
- nob_to_skip -= nob;
-
- } while (nob_to_skip != 0 && /* mustn't overflow conn's rx iov */
- niov < sizeof(conn->ksnc_rx_iov_space) / sizeof (struct iovec));
-
- conn->ksnc_rx_niov = niov;
- conn->ksnc_rx_kiov = NULL;
- conn->ksnc_rx_nkiov = 0;
- conn->ksnc_rx_nob_wanted = skipped;
- return 0;
-}
-
-static int
-ksocknal_process_receive (ksock_conn_t *conn)
-{
- lnet_hdr_t *lhdr;
- lnet_process_id_t *id;
- int rc;
-
- LASSERT (atomic_read(&conn->ksnc_conn_refcount) > 0);
-
- /* NB: sched lock NOT held */
- /* SOCKNAL_RX_LNET_HEADER is here for backward compatibility */
- LASSERT(conn->ksnc_rx_state == SOCKNAL_RX_KSM_HEADER ||
- conn->ksnc_rx_state == SOCKNAL_RX_LNET_PAYLOAD ||
- conn->ksnc_rx_state == SOCKNAL_RX_LNET_HEADER ||
- conn->ksnc_rx_state == SOCKNAL_RX_SLOP);
- again:
- if (conn->ksnc_rx_nob_wanted != 0) {
- rc = ksocknal_receive(conn);
-
- if (rc <= 0) {
- LASSERT (rc != -EAGAIN);
-
- if (rc == 0)
- CDEBUG(D_NET, "[%p] EOF from %s ip %pI4h:%d\n",
- conn,
- libcfs_id2str(conn->ksnc_peer->ksnp_id),
- &conn->ksnc_ipaddr,
- conn->ksnc_port);
- else if (!conn->ksnc_closing)
- CERROR("[%p] Error %d on read from %s ip %pI4h:%d\n",
- conn, rc,
- libcfs_id2str(conn->ksnc_peer->ksnp_id),
- &conn->ksnc_ipaddr,
- conn->ksnc_port);
-
- /* it's not an error if conn is being closed */
- ksocknal_close_conn_and_siblings (conn,
- (conn->ksnc_closing) ? 0 : rc);
- return (rc == 0 ? -ESHUTDOWN : rc);
- }
-
- if (conn->ksnc_rx_nob_wanted != 0) {
- /* short read */
- return -EAGAIN;
- }
- }
- switch (conn->ksnc_rx_state) {
- case SOCKNAL_RX_KSM_HEADER:
- if (conn->ksnc_flip) {
- __swab32s(&conn->ksnc_msg.ksm_type);
- __swab32s(&conn->ksnc_msg.ksm_csum);
- __swab64s(&conn->ksnc_msg.ksm_zc_cookies[0]);
- __swab64s(&conn->ksnc_msg.ksm_zc_cookies[1]);
- }
-
- if (conn->ksnc_msg.ksm_type != KSOCK_MSG_NOOP &&
- conn->ksnc_msg.ksm_type != KSOCK_MSG_LNET) {
- CERROR("%s: Unknown message type: %x\n",
- libcfs_id2str(conn->ksnc_peer->ksnp_id),
- conn->ksnc_msg.ksm_type);
- ksocknal_new_packet(conn, 0);
- ksocknal_close_conn_and_siblings(conn, -EPROTO);
- return -EPROTO;
- }
-
- if (conn->ksnc_msg.ksm_type == KSOCK_MSG_NOOP &&
- conn->ksnc_msg.ksm_csum != 0 && /* has checksum */
- conn->ksnc_msg.ksm_csum != conn->ksnc_rx_csum) {
- /* NOOP Checksum error */
- CERROR("%s: Checksum error, wire:0x%08X data:0x%08X\n",
- libcfs_id2str(conn->ksnc_peer->ksnp_id),
- conn->ksnc_msg.ksm_csum, conn->ksnc_rx_csum);
- ksocknal_new_packet(conn, 0);
- ksocknal_close_conn_and_siblings(conn, -EPROTO);
- return -EIO;
- }
-
- if (conn->ksnc_msg.ksm_zc_cookies[1] != 0) {
- __u64 cookie = 0;
-
- LASSERT (conn->ksnc_proto != &ksocknal_protocol_v1x);
-
- if (conn->ksnc_msg.ksm_type == KSOCK_MSG_NOOP)
- cookie = conn->ksnc_msg.ksm_zc_cookies[0];
-
- rc = conn->ksnc_proto->pro_handle_zcack(conn, cookie,
- conn->ksnc_msg.ksm_zc_cookies[1]);
-
- if (rc != 0) {
- CERROR("%s: Unknown ZC-ACK cookie: %llu, %llu\n",
- libcfs_id2str(conn->ksnc_peer->ksnp_id),
- cookie, conn->ksnc_msg.ksm_zc_cookies[1]);
- ksocknal_new_packet(conn, 0);
- ksocknal_close_conn_and_siblings(conn, -EPROTO);
- return rc;
- }
- }
-
- if (conn->ksnc_msg.ksm_type == KSOCK_MSG_NOOP) {
- ksocknal_new_packet (conn, 0);
- return 0; /* NOOP is done and just return */
- }
-
- conn->ksnc_rx_state = SOCKNAL_RX_LNET_HEADER;
- conn->ksnc_rx_nob_wanted = sizeof(ksock_lnet_msg_t);
- conn->ksnc_rx_nob_left = sizeof(ksock_lnet_msg_t);
-
- conn->ksnc_rx_iov = (struct kvec *)&conn->ksnc_rx_iov_space;
- conn->ksnc_rx_iov[0].iov_base = &conn->ksnc_msg.ksm_u.lnetmsg;
- conn->ksnc_rx_iov[0].iov_len = sizeof(ksock_lnet_msg_t);
-
- conn->ksnc_rx_niov = 1;
- conn->ksnc_rx_kiov = NULL;
- conn->ksnc_rx_nkiov = 0;
-
- goto again; /* read lnet header now */
-
- case SOCKNAL_RX_LNET_HEADER:
- /* unpack message header */
- conn->ksnc_proto->pro_unpack(&conn->ksnc_msg);
-
- if ((conn->ksnc_peer->ksnp_id.pid & LNET_PID_USERFLAG) != 0) {
- /* Userspace peer */
- lhdr = &conn->ksnc_msg.ksm_u.lnetmsg.ksnm_hdr;
- id = &conn->ksnc_peer->ksnp_id;
-
- /* Substitute process ID assigned at connection time */
- lhdr->src_pid = cpu_to_le32(id->pid);
- lhdr->src_nid = cpu_to_le64(id->nid);
- }
-
- conn->ksnc_rx_state = SOCKNAL_RX_PARSE;
- ksocknal_conn_addref(conn); /* ++ref while parsing */
-
- rc = lnet_parse(conn->ksnc_peer->ksnp_ni,
- &conn->ksnc_msg.ksm_u.lnetmsg.ksnm_hdr,
- conn->ksnc_peer->ksnp_id.nid, conn, 0);
- if (rc < 0) {
- /* I just received garbage: give up on this conn */
- ksocknal_new_packet(conn, 0);
- ksocknal_close_conn_and_siblings (conn, rc);
- ksocknal_conn_decref(conn);
- return -EPROTO;
- }
-
- /* I'm racing with ksocknal_recv() */
- LASSERT (conn->ksnc_rx_state == SOCKNAL_RX_PARSE ||
- conn->ksnc_rx_state == SOCKNAL_RX_LNET_PAYLOAD);
-
- if (conn->ksnc_rx_state != SOCKNAL_RX_LNET_PAYLOAD)
- return 0;
-
- /* ksocknal_recv() got called */
- goto again;
-
- case SOCKNAL_RX_LNET_PAYLOAD:
- /* payload all received */
- rc = 0;
-
- if (conn->ksnc_rx_nob_left == 0 && /* not truncating */
- conn->ksnc_msg.ksm_csum != 0 && /* has checksum */
- conn->ksnc_msg.ksm_csum != conn->ksnc_rx_csum) {
- CERROR("%s: Checksum error, wire:0x%08X data:0x%08X\n",
- libcfs_id2str(conn->ksnc_peer->ksnp_id),
- conn->ksnc_msg.ksm_csum, conn->ksnc_rx_csum);
- rc = -EIO;
- }
-
- if (rc == 0 && conn->ksnc_msg.ksm_zc_cookies[0] != 0) {
- LASSERT(conn->ksnc_proto != &ksocknal_protocol_v1x);
-
- lhdr = &conn->ksnc_msg.ksm_u.lnetmsg.ksnm_hdr;
- id = &conn->ksnc_peer->ksnp_id;
-
- rc = conn->ksnc_proto->pro_handle_zcreq(conn,
- conn->ksnc_msg.ksm_zc_cookies[0],
- *ksocknal_tunables.ksnd_nonblk_zcack ||
- le64_to_cpu(lhdr->src_nid) != id->nid);
- }
-
- lnet_finalize(conn->ksnc_peer->ksnp_ni, conn->ksnc_cookie, rc);
-
- if (rc != 0) {
- ksocknal_new_packet(conn, 0);
- ksocknal_close_conn_and_siblings (conn, rc);
- return -EPROTO;
- }
- /* Fall through */
-
- case SOCKNAL_RX_SLOP:
- /* starting new packet? */
- if (ksocknal_new_packet (conn, conn->ksnc_rx_nob_left))
- return 0; /* come back later */
- goto again; /* try to finish reading slop now */
-
- default:
- break;
- }
-
- /* Not Reached */
- LBUG();
- return -EINVAL; /* keep gcc happy */
-}
-
-int
-ksocknal_recv (lnet_ni_t *ni, void *private, lnet_msg_t *msg, int delayed,
- unsigned int niov, struct kvec *iov, lnet_kiov_t *kiov,
- unsigned int offset, unsigned int mlen, unsigned int rlen)
-{
- ksock_conn_t *conn = private;
- ksock_sched_t *sched = conn->ksnc_scheduler;
-
- LASSERT(mlen <= rlen);
- LASSERT(niov <= LNET_MAX_IOV);
-
- conn->ksnc_cookie = msg;
- conn->ksnc_rx_nob_wanted = mlen;
- conn->ksnc_rx_nob_left = rlen;
-
- if (mlen == 0 || iov != NULL) {
- conn->ksnc_rx_nkiov = 0;
- conn->ksnc_rx_kiov = NULL;
- conn->ksnc_rx_iov = conn->ksnc_rx_iov_space.iov;
- conn->ksnc_rx_niov =
- lnet_extract_iov(LNET_MAX_IOV, conn->ksnc_rx_iov,
- niov, iov, offset, mlen);
- } else {
- conn->ksnc_rx_niov = 0;
- conn->ksnc_rx_iov = NULL;
- conn->ksnc_rx_kiov = conn->ksnc_rx_iov_space.kiov;
- conn->ksnc_rx_nkiov =
- lnet_extract_kiov(LNET_MAX_IOV, conn->ksnc_rx_kiov,
- niov, kiov, offset, mlen);
- }
-
- LASSERT(mlen ==
- lnet_iov_nob(conn->ksnc_rx_niov, conn->ksnc_rx_iov) +
- lnet_kiov_nob(conn->ksnc_rx_nkiov, conn->ksnc_rx_kiov));
-
- LASSERT(conn->ksnc_rx_scheduled);
-
- spin_lock_bh(&sched->kss_lock);
-
- switch (conn->ksnc_rx_state) {
- case SOCKNAL_RX_PARSE_WAIT:
- list_add_tail(&conn->ksnc_rx_list, &sched->kss_rx_conns);
- wake_up (&sched->kss_waitq);
- LASSERT (conn->ksnc_rx_ready);
- break;
-
- case SOCKNAL_RX_PARSE:
- /* scheduler hasn't noticed I'm parsing yet */
- break;
- }
-
- conn->ksnc_rx_state = SOCKNAL_RX_LNET_PAYLOAD;
-
- spin_unlock_bh(&sched->kss_lock);
- ksocknal_conn_decref(conn);
- return 0;
-}
-
-static inline int
-ksocknal_sched_cansleep(ksock_sched_t *sched)
-{
- int rc;
-
- spin_lock_bh(&sched->kss_lock);
-
- rc = !ksocknal_data.ksnd_shuttingdown &&
- list_empty(&sched->kss_rx_conns) &&
- list_empty(&sched->kss_tx_conns);
-
- spin_unlock_bh(&sched->kss_lock);
- return rc;
-}
-
-int ksocknal_scheduler(void *arg)
-{
- struct ksock_sched_info *info;
- ksock_sched_t *sched;
- ksock_conn_t *conn;
- ksock_tx_t *tx;
- int rc;
- int nloops = 0;
- long id = (long)arg;
-
- info = ksocknal_data.ksnd_sched_info[KSOCK_THREAD_CPT(id)];
- sched = &info->ksi_scheds[KSOCK_THREAD_SID(id)];
-
- cfs_block_allsigs();
-
- rc = cfs_cpt_bind(lnet_cpt_table(), info->ksi_cpt);
- if (rc != 0) {
- CERROR("Can't set CPT affinity to %d: %d\n",
- info->ksi_cpt, rc);
- }
-
- spin_lock_bh(&sched->kss_lock);
-
- while (!ksocknal_data.ksnd_shuttingdown) {
- int did_something = 0;
-
- /* Ensure I progress everything semi-fairly */
-
- if (!list_empty (&sched->kss_rx_conns)) {
- conn = list_entry(sched->kss_rx_conns.next,
- ksock_conn_t, ksnc_rx_list);
- list_del(&conn->ksnc_rx_list);
-
- LASSERT(conn->ksnc_rx_scheduled);
- LASSERT(conn->ksnc_rx_ready);
-
- /* clear rx_ready in case receive isn't complete.
- * Do it BEFORE we call process_recv, since
- * data_ready can set it any time after we release
- * kss_lock. */
- conn->ksnc_rx_ready = 0;
- spin_unlock_bh(&sched->kss_lock);
-
- rc = ksocknal_process_receive(conn);
-
- spin_lock_bh(&sched->kss_lock);
-
- /* I'm the only one that can clear this flag */
- LASSERT(conn->ksnc_rx_scheduled);
-
- /* Did process_receive get everything it wanted? */
- if (rc == 0)
- conn->ksnc_rx_ready = 1;
-
- if (conn->ksnc_rx_state == SOCKNAL_RX_PARSE) {
- /* Conn blocked waiting for ksocknal_recv()
- * I change its state (under lock) to signal
- * it can be rescheduled */
- conn->ksnc_rx_state = SOCKNAL_RX_PARSE_WAIT;
- } else if (conn->ksnc_rx_ready) {
- /* reschedule for rx */
- list_add_tail (&conn->ksnc_rx_list,
- &sched->kss_rx_conns);
- } else {
- conn->ksnc_rx_scheduled = 0;
- /* drop my ref */
- ksocknal_conn_decref(conn);
- }
-
- did_something = 1;
- }
-
- if (!list_empty (&sched->kss_tx_conns)) {
- LIST_HEAD(zlist);
-
- if (!list_empty(&sched->kss_zombie_noop_txs)) {
- list_add(&zlist,
- &sched->kss_zombie_noop_txs);
- list_del_init(&sched->kss_zombie_noop_txs);
- }
-
- conn = list_entry(sched->kss_tx_conns.next,
- ksock_conn_t, ksnc_tx_list);
- list_del (&conn->ksnc_tx_list);
-
- LASSERT(conn->ksnc_tx_scheduled);
- LASSERT(conn->ksnc_tx_ready);
- LASSERT(!list_empty(&conn->ksnc_tx_queue));
-
- tx = list_entry(conn->ksnc_tx_queue.next,
- ksock_tx_t, tx_list);
-
- if (conn->ksnc_tx_carrier == tx)
- ksocknal_next_tx_carrier(conn);
-
- /* dequeue now so empty list => more to send */
- list_del(&tx->tx_list);
-
- /* Clear tx_ready in case send isn't complete. Do
- * it BEFORE we call process_transmit, since
- * write_space can set it any time after we release
- * kss_lock. */
- conn->ksnc_tx_ready = 0;
- spin_unlock_bh(&sched->kss_lock);
-
- if (!list_empty(&zlist)) {
- /* free zombie noop txs, it's fast because
- * noop txs are just put in freelist */
- ksocknal_txlist_done(NULL, &zlist, 0);
- }
-
- rc = ksocknal_process_transmit(conn, tx);
-
- if (rc == -ENOMEM || rc == -EAGAIN) {
- /* Incomplete send: replace tx on HEAD of tx_queue */
- spin_lock_bh(&sched->kss_lock);
- list_add(&tx->tx_list,
- &conn->ksnc_tx_queue);
- } else {
- /* Complete send; tx -ref */
- ksocknal_tx_decref(tx);
-
- spin_lock_bh(&sched->kss_lock);
- /* assume space for more */
- conn->ksnc_tx_ready = 1;
- }
-
- if (rc == -ENOMEM) {
- /* Do nothing; after a short timeout, this
- * conn will be reposted on kss_tx_conns. */
- } else if (conn->ksnc_tx_ready &&
- !list_empty(&conn->ksnc_tx_queue)) {
- /* reschedule for tx */
- list_add_tail(&conn->ksnc_tx_list,
- &sched->kss_tx_conns);
- } else {
- conn->ksnc_tx_scheduled = 0;
- /* drop my ref */
- ksocknal_conn_decref(conn);
- }
-
- did_something = 1;
- }
- if (!did_something || /* nothing to do */
- ++nloops == SOCKNAL_RESCHED) { /* hogging CPU? */
- spin_unlock_bh(&sched->kss_lock);
-
- nloops = 0;
-
- if (!did_something) { /* wait for something to do */
- rc = wait_event_interruptible_exclusive(
- sched->kss_waitq,
- !ksocknal_sched_cansleep(sched));
- LASSERT (rc == 0);
- } else {
- cond_resched();
- }
-
- spin_lock_bh(&sched->kss_lock);
- }
- }
-
- spin_unlock_bh(&sched->kss_lock);
- ksocknal_thread_fini();
- return 0;
-}
-
-/*
- * Add connection to kss_rx_conns of scheduler
- * and wakeup the scheduler.
- */
-void ksocknal_read_callback (ksock_conn_t *conn)
-{
- ksock_sched_t *sched;
-
- sched = conn->ksnc_scheduler;
-
- spin_lock_bh(&sched->kss_lock);
-
- conn->ksnc_rx_ready = 1;
-
- if (!conn->ksnc_rx_scheduled) { /* not being progressed */
- list_add_tail(&conn->ksnc_rx_list,
- &sched->kss_rx_conns);
- conn->ksnc_rx_scheduled = 1;
- /* extra ref for scheduler */
- ksocknal_conn_addref(conn);
-
- wake_up (&sched->kss_waitq);
- }
- spin_unlock_bh(&sched->kss_lock);
-}
-
-/*
- * Add connection to kss_tx_conns of scheduler
- * and wakeup the scheduler.
- */
-void ksocknal_write_callback (ksock_conn_t *conn)
-{
- ksock_sched_t *sched;
-
- sched = conn->ksnc_scheduler;
-
- spin_lock_bh(&sched->kss_lock);
-
- conn->ksnc_tx_ready = 1;
-
- if (!conn->ksnc_tx_scheduled && /* not being progressed */
- !list_empty(&conn->ksnc_tx_queue)) { /* packets to send */
- list_add_tail (&conn->ksnc_tx_list,
- &sched->kss_tx_conns);
- conn->ksnc_tx_scheduled = 1;
- /* extra ref for scheduler */
- ksocknal_conn_addref(conn);
-
- wake_up (&sched->kss_waitq);
- }
-
- spin_unlock_bh(&sched->kss_lock);
-}
-
-static ksock_proto_t *
-ksocknal_parse_proto_version (ksock_hello_msg_t *hello)
-{
- __u32 version = 0;
-
- if (hello->kshm_magic == LNET_PROTO_MAGIC)
- version = hello->kshm_version;
- else if (hello->kshm_magic == __swab32(LNET_PROTO_MAGIC))
- version = __swab32(hello->kshm_version);
-
- if (version != 0) {
-#if SOCKNAL_VERSION_DEBUG
- if (*ksocknal_tunables.ksnd_protocol == 1)
- return NULL;
-
- if (*ksocknal_tunables.ksnd_protocol == 2 &&
- version == KSOCK_PROTO_V3)
- return NULL;
-#endif
- if (version == KSOCK_PROTO_V2)
- return &ksocknal_protocol_v2x;
-
- if (version == KSOCK_PROTO_V3)
- return &ksocknal_protocol_v3x;
-
- return NULL;
- }
-
- if (hello->kshm_magic == le32_to_cpu(LNET_PROTO_TCP_MAGIC)) {
- lnet_magicversion_t *hmv = (lnet_magicversion_t *)hello;
-
- CLASSERT(sizeof (lnet_magicversion_t) ==
- offsetof (ksock_hello_msg_t, kshm_src_nid));
-
- if (hmv->version_major == cpu_to_le16 (KSOCK_PROTO_V1_MAJOR) &&
- hmv->version_minor == cpu_to_le16 (KSOCK_PROTO_V1_MINOR))
- return &ksocknal_protocol_v1x;
- }
-
- return NULL;
-}
-
-int
-ksocknal_send_hello (lnet_ni_t *ni, ksock_conn_t *conn,
- lnet_nid_t peer_nid, ksock_hello_msg_t *hello)
-{
- /* CAVEAT EMPTOR: this byte flips 'ipaddrs' */
- ksock_net_t *net = (ksock_net_t *)ni->ni_data;
-
- LASSERT(hello->kshm_nips <= LNET_MAX_INTERFACES);
-
- /* rely on caller to hold a ref on socket so it wouldn't disappear */
- LASSERT(conn->ksnc_proto != NULL);
-
- hello->kshm_src_nid = ni->ni_nid;
- hello->kshm_dst_nid = peer_nid;
- hello->kshm_src_pid = the_lnet.ln_pid;
-
- hello->kshm_src_incarnation = net->ksnn_incarnation;
- hello->kshm_ctype = conn->ksnc_type;
-
- return conn->ksnc_proto->pro_send_hello(conn, hello);
-}
-
-static int
-ksocknal_invert_type(int type)
-{
- switch (type) {
- case SOCKLND_CONN_ANY:
- case SOCKLND_CONN_CONTROL:
- return type;
- case SOCKLND_CONN_BULK_IN:
- return SOCKLND_CONN_BULK_OUT;
- case SOCKLND_CONN_BULK_OUT:
- return SOCKLND_CONN_BULK_IN;
- default:
- return SOCKLND_CONN_NONE;
- }
-}
-
-int
-ksocknal_recv_hello (lnet_ni_t *ni, ksock_conn_t *conn,
- ksock_hello_msg_t *hello, lnet_process_id_t *peerid,
- __u64 *incarnation)
-{
- /* Return < 0 fatal error
- * 0 success
- * EALREADY lost connection race
- * EPROTO protocol version mismatch
- */
- struct socket *sock = conn->ksnc_sock;
- int active = (conn->ksnc_proto != NULL);
- int timeout;
- int proto_match;
- int rc;
- ksock_proto_t *proto;
- lnet_process_id_t recv_id;
-
- /* socket type set on active connections - not set on passive */
- LASSERT(!active == !(conn->ksnc_type != SOCKLND_CONN_NONE));
-
- timeout = active ? *ksocknal_tunables.ksnd_timeout :
- lnet_acceptor_timeout();
-
- rc = lnet_sock_read(sock, &hello->kshm_magic, sizeof (hello->kshm_magic), timeout);
- if (rc != 0) {
- CERROR("Error %d reading HELLO from %pI4h\n",
- rc, &conn->ksnc_ipaddr);
- LASSERT (rc < 0);
- return rc;
- }
-
- if (hello->kshm_magic != LNET_PROTO_MAGIC &&
- hello->kshm_magic != __swab32(LNET_PROTO_MAGIC) &&
- hello->kshm_magic != le32_to_cpu (LNET_PROTO_TCP_MAGIC)) {
- /* Unexpected magic! */
- CERROR("Bad magic(1) %#08x (%#08x expected) from %pI4h\n",
- __cpu_to_le32 (hello->kshm_magic),
- LNET_PROTO_TCP_MAGIC,
- &conn->ksnc_ipaddr);
- return -EPROTO;
- }
-
- rc = lnet_sock_read(sock, &hello->kshm_version,
- sizeof(hello->kshm_version), timeout);
- if (rc != 0) {
- CERROR("Error %d reading HELLO from %pI4h\n",
- rc, &conn->ksnc_ipaddr);
- LASSERT(rc < 0);
- return rc;
- }
-
- proto = ksocknal_parse_proto_version(hello);
- if (proto == NULL) {
- if (!active) {
- /* unknown protocol from peer, tell peer my protocol */
- conn->ksnc_proto = &ksocknal_protocol_v3x;
-#if SOCKNAL_VERSION_DEBUG
- if (*ksocknal_tunables.ksnd_protocol == 2)
- conn->ksnc_proto = &ksocknal_protocol_v2x;
- else if (*ksocknal_tunables.ksnd_protocol == 1)
- conn->ksnc_proto = &ksocknal_protocol_v1x;
-#endif
- hello->kshm_nips = 0;
- ksocknal_send_hello(ni, conn, ni->ni_nid, hello);
- }
-
- CERROR("Unknown protocol version (%d.x expected) from %pI4h\n",
- conn->ksnc_proto->pro_version,
- &conn->ksnc_ipaddr);
-
- return -EPROTO;
- }
-
- proto_match = (conn->ksnc_proto == proto);
- conn->ksnc_proto = proto;
-
- /* receive the rest of hello message anyway */
- rc = conn->ksnc_proto->pro_recv_hello(conn, hello, timeout);
- if (rc != 0) {
- CERROR("Error %d reading or checking hello from from %pI4h\n",
- rc, &conn->ksnc_ipaddr);
- LASSERT(rc < 0);
- return rc;
- }
-
- *incarnation = hello->kshm_src_incarnation;
-
- if (hello->kshm_src_nid == LNET_NID_ANY) {
- CERROR("Expecting a HELLO hdr with a NID, but got LNET_NID_ANY from %pI4h\n",
- &conn->ksnc_ipaddr);
- return -EPROTO;
- }
-
- if (!active &&
- conn->ksnc_port > LNET_ACCEPTOR_MAX_RESERVED_PORT) {
- /* Userspace NAL assigns peer process ID from socket */
- recv_id.pid = conn->ksnc_port | LNET_PID_USERFLAG;
- recv_id.nid = LNET_MKNID(LNET_NIDNET(ni->ni_nid), conn->ksnc_ipaddr);
- } else {
- recv_id.nid = hello->kshm_src_nid;
- recv_id.pid = hello->kshm_src_pid;
- }
-
- if (!active) {
- *peerid = recv_id;
-
- /* peer determines type */
- conn->ksnc_type = ksocknal_invert_type(hello->kshm_ctype);
- if (conn->ksnc_type == SOCKLND_CONN_NONE) {
- CERROR("Unexpected type %d from %s ip %pI4h\n",
- hello->kshm_ctype, libcfs_id2str(*peerid),
- &conn->ksnc_ipaddr);
- return -EPROTO;
- }
-
- return 0;
- }
-
- if (peerid->pid != recv_id.pid ||
- peerid->nid != recv_id.nid) {
- LCONSOLE_ERROR_MSG(0x130, "Connected successfully to %s on host %pI4h, but they claimed they were %s; please check your Lustre configuration.\n",
- libcfs_id2str(*peerid),
- &conn->ksnc_ipaddr,
- libcfs_id2str(recv_id));
- return -EPROTO;
- }
-
- if (hello->kshm_ctype == SOCKLND_CONN_NONE) {
- /* Possible protocol mismatch or I lost the connection race */
- return proto_match ? EALREADY : EPROTO;
- }
-
- if (ksocknal_invert_type(hello->kshm_ctype) != conn->ksnc_type) {
- CERROR("Mismatched types: me %d, %s ip %pI4h %d\n",
- conn->ksnc_type, libcfs_id2str(*peerid),
- &conn->ksnc_ipaddr,
- hello->kshm_ctype);
- return -EPROTO;
- }
-
- return 0;
-}
-
-static int
-ksocknal_connect (ksock_route_t *route)
-{
- LIST_HEAD(zombies);
- ksock_peer_t *peer = route->ksnr_peer;
- int type;
- int wanted;
- struct socket *sock;
- unsigned long deadline;
- int retry_later = 0;
- int rc = 0;
-
- deadline = cfs_time_add(cfs_time_current(),
- cfs_time_seconds(*ksocknal_tunables.ksnd_timeout));
-
- write_lock_bh(&ksocknal_data.ksnd_global_lock);
-
- LASSERT(route->ksnr_scheduled);
- LASSERT(!route->ksnr_connecting);
-
- route->ksnr_connecting = 1;
-
- for (;;) {
- wanted = ksocknal_route_mask() & ~route->ksnr_connected;
-
- /* stop connecting if peer/route got closed under me, or
- * route got connected while queued */
- if (peer->ksnp_closing || route->ksnr_deleted ||
- wanted == 0) {
- retry_later = 0;
- break;
- }
-
- /* reschedule if peer is connecting to me */
- if (peer->ksnp_accepting > 0) {
- CDEBUG(D_NET,
- "peer %s(%d) already connecting to me, retry later.\n",
- libcfs_nid2str(peer->ksnp_id.nid), peer->ksnp_accepting);
- retry_later = 1;
- }
-
- if (retry_later) /* needs reschedule */
- break;
-
- if ((wanted & (1 << SOCKLND_CONN_ANY)) != 0) {
- type = SOCKLND_CONN_ANY;
- } else if ((wanted & (1 << SOCKLND_CONN_CONTROL)) != 0) {
- type = SOCKLND_CONN_CONTROL;
- } else if ((wanted & (1 << SOCKLND_CONN_BULK_IN)) != 0) {
- type = SOCKLND_CONN_BULK_IN;
- } else {
- LASSERT ((wanted & (1 << SOCKLND_CONN_BULK_OUT)) != 0);
- type = SOCKLND_CONN_BULK_OUT;
- }
-
- write_unlock_bh(&ksocknal_data.ksnd_global_lock);
-
- if (cfs_time_aftereq(cfs_time_current(), deadline)) {
- rc = -ETIMEDOUT;
- lnet_connect_console_error(rc, peer->ksnp_id.nid,
- route->ksnr_ipaddr,
- route->ksnr_port);
- goto failed;
- }
-
- rc = lnet_connect(&sock, peer->ksnp_id.nid,
- route->ksnr_myipaddr,
- route->ksnr_ipaddr, route->ksnr_port);
- if (rc != 0)
- goto failed;
-
- rc = ksocknal_create_conn(peer->ksnp_ni, route, sock, type);
- if (rc < 0) {
- lnet_connect_console_error(rc, peer->ksnp_id.nid,
- route->ksnr_ipaddr,
- route->ksnr_port);
- goto failed;
- }
-
- /* A +ve RC means I have to retry because I lost the connection
- * race or I have to renegotiate protocol version */
- retry_later = (rc != 0);
- if (retry_later)
- CDEBUG(D_NET, "peer %s: conn race, retry later.\n",
- libcfs_nid2str(peer->ksnp_id.nid));
-
- write_lock_bh(&ksocknal_data.ksnd_global_lock);
- }
-
- route->ksnr_scheduled = 0;
- route->ksnr_connecting = 0;
-
- if (retry_later) {
- /* re-queue for attention; this frees me up to handle
- * the peer's incoming connection request */
-
- if (rc == EALREADY ||
- (rc == 0 && peer->ksnp_accepting > 0)) {
- /* We want to introduce a delay before next
- * attempt to connect if we lost conn race,
- * but the race is resolved quickly usually,
- * so min_reconnectms should be good heuristic */
- route->ksnr_retry_interval =
- cfs_time_seconds(*ksocknal_tunables.ksnd_min_reconnectms)/1000;
- route->ksnr_timeout = cfs_time_add(cfs_time_current(),
- route->ksnr_retry_interval);
- }
-
- ksocknal_launch_connection_locked(route);
- }
-
- write_unlock_bh(&ksocknal_data.ksnd_global_lock);
- return retry_later;
-
- failed:
- write_lock_bh(&ksocknal_data.ksnd_global_lock);
-
- route->ksnr_scheduled = 0;
- route->ksnr_connecting = 0;
-
- /* This is a retry rather than a new connection */
- route->ksnr_retry_interval *= 2;
- route->ksnr_retry_interval =
- max(route->ksnr_retry_interval,
- cfs_time_seconds(*ksocknal_tunables.ksnd_min_reconnectms)/1000);
- route->ksnr_retry_interval =
- min(route->ksnr_retry_interval,
- cfs_time_seconds(*ksocknal_tunables.ksnd_max_reconnectms)/1000);
-
- LASSERT (route->ksnr_retry_interval != 0);
- route->ksnr_timeout = cfs_time_add(cfs_time_current(),
- route->ksnr_retry_interval);
-
- if (!list_empty(&peer->ksnp_tx_queue) &&
- peer->ksnp_accepting == 0 &&
- ksocknal_find_connecting_route_locked(peer) == NULL) {
- ksock_conn_t *conn;
-
- /* ksnp_tx_queue is queued on a conn on successful
- * connection for V1.x and V2.x */
- if (!list_empty (&peer->ksnp_conns)) {
- conn = list_entry(peer->ksnp_conns.next,
- ksock_conn_t, ksnc_list);
- LASSERT (conn->ksnc_proto == &ksocknal_protocol_v3x);
- }
-
- /* take all the blocked packets while I've got the lock and
- * complete below... */
- list_splice_init(&peer->ksnp_tx_queue, &zombies);
- }
-
-#if 0 /* irrelevant with only eager routes */
- if (!route->ksnr_deleted) {
- /* make this route least-favourite for re-selection */
- list_del(&route->ksnr_list);
- list_add_tail(&route->ksnr_list, &peer->ksnp_routes);
- }
-#endif
- write_unlock_bh(&ksocknal_data.ksnd_global_lock);
-
- ksocknal_peer_failed(peer);
- ksocknal_txlist_done(peer->ksnp_ni, &zombies, 1);
- return 0;
-}
-
-/*
- * check whether we need to create more connds.
- * It will try to create new thread if it's necessary, @timeout can
- * be updated if failed to create, so caller wouldn't keep try while
- * running out of resource.
- */
-static int
-ksocknal_connd_check_start(time64_t sec, long *timeout)
-{
- char name[16];
- int rc;
- int total = ksocknal_data.ksnd_connd_starting +
- ksocknal_data.ksnd_connd_running;
-
- if (unlikely(ksocknal_data.ksnd_init < SOCKNAL_INIT_ALL)) {
- /* still in initializing */
- return 0;
- }
-
- if (total >= *ksocknal_tunables.ksnd_nconnds_max ||
- total > ksocknal_data.ksnd_connd_connecting + SOCKNAL_CONND_RESV) {
- /* can't create more connd, or still have enough
- * threads to handle more connecting */
- return 0;
- }
-
- if (list_empty(&ksocknal_data.ksnd_connd_routes)) {
- /* no pending connecting request */
- return 0;
- }
-
- if (sec - ksocknal_data.ksnd_connd_failed_stamp <= 1) {
- /* may run out of resource, retry later */
- *timeout = cfs_time_seconds(1);
- return 0;
- }
-
- if (ksocknal_data.ksnd_connd_starting > 0) {
- /* serialize starting to avoid flood */
- return 0;
- }
-
- ksocknal_data.ksnd_connd_starting_stamp = sec;
- ksocknal_data.ksnd_connd_starting++;
- spin_unlock_bh(&ksocknal_data.ksnd_connd_lock);
-
- /* NB: total is the next id */
- snprintf(name, sizeof(name), "socknal_cd%02d", total);
- rc = ksocknal_thread_start(ksocknal_connd, NULL, name);
-
- spin_lock_bh(&ksocknal_data.ksnd_connd_lock);
- if (rc == 0)
- return 1;
-
- /* we tried ... */
- LASSERT(ksocknal_data.ksnd_connd_starting > 0);
- ksocknal_data.ksnd_connd_starting--;
- ksocknal_data.ksnd_connd_failed_stamp = ktime_get_real_seconds();
-
- return 1;
-}
-
-/*
- * check whether current thread can exit, it will return 1 if there are too
- * many threads and no creating in past 120 seconds.
- * Also, this function may update @timeout to make caller come back
- * again to recheck these conditions.
- */
-static int
-ksocknal_connd_check_stop(time64_t sec, long *timeout)
-{
- int val;
-
- if (unlikely(ksocknal_data.ksnd_init < SOCKNAL_INIT_ALL)) {
- /* still in initializing */
- return 0;
- }
-
- if (ksocknal_data.ksnd_connd_starting > 0) {
- /* in progress of starting new thread */
- return 0;
- }
-
- if (ksocknal_data.ksnd_connd_running <=
- *ksocknal_tunables.ksnd_nconnds) { /* can't shrink */
- return 0;
- }
-
- /* created thread in past 120 seconds? */
- val = (int)(ksocknal_data.ksnd_connd_starting_stamp +
- SOCKNAL_CONND_TIMEOUT - sec);
-
- *timeout = (val > 0) ? cfs_time_seconds(val) :
- cfs_time_seconds(SOCKNAL_CONND_TIMEOUT);
- if (val > 0)
- return 0;
-
- /* no creating in past 120 seconds */
-
- return ksocknal_data.ksnd_connd_running >
- ksocknal_data.ksnd_connd_connecting + SOCKNAL_CONND_RESV;
-}
-
-/* Go through connd_routes queue looking for a route that we can process
- * right now, @timeout_p can be updated if we need to come back later */
-static ksock_route_t *
-ksocknal_connd_get_route_locked(signed long *timeout_p)
-{
- ksock_route_t *route;
- unsigned long now;
-
- now = cfs_time_current();
-
- /* connd_routes can contain both pending and ordinary routes */
- list_for_each_entry (route, &ksocknal_data.ksnd_connd_routes,
- ksnr_connd_list) {
-
- if (route->ksnr_retry_interval == 0 ||
- cfs_time_aftereq(now, route->ksnr_timeout))
- return route;
-
- if (*timeout_p == MAX_SCHEDULE_TIMEOUT ||
- (int)*timeout_p > (int)(route->ksnr_timeout - now))
- *timeout_p = (int)(route->ksnr_timeout - now);
- }
-
- return NULL;
-}
-
-int
-ksocknal_connd (void *arg)
-{
- spinlock_t *connd_lock = &ksocknal_data.ksnd_connd_lock;
- ksock_connreq_t *cr;
- wait_queue_t wait;
- int nloops = 0;
- int cons_retry = 0;
-
- cfs_block_allsigs();
-
- init_waitqueue_entry(&wait, current);
-
- spin_lock_bh(connd_lock);
-
- LASSERT(ksocknal_data.ksnd_connd_starting > 0);
- ksocknal_data.ksnd_connd_starting--;
- ksocknal_data.ksnd_connd_running++;
-
- while (!ksocknal_data.ksnd_shuttingdown) {
- ksock_route_t *route = NULL;
- time64_t sec = ktime_get_real_seconds();
- long timeout = MAX_SCHEDULE_TIMEOUT;
- int dropped_lock = 0;
-
- if (ksocknal_connd_check_stop(sec, &timeout)) {
- /* wakeup another one to check stop */
- wake_up(&ksocknal_data.ksnd_connd_waitq);
- break;
- }
-
- if (ksocknal_connd_check_start(sec, &timeout)) {
- /* created new thread */
- dropped_lock = 1;
- }
-
- if (!list_empty(&ksocknal_data.ksnd_connd_connreqs)) {
- /* Connection accepted by the listener */
- cr = list_entry(ksocknal_data.ksnd_connd_connreqs. \
- next, ksock_connreq_t, ksncr_list);
-
- list_del(&cr->ksncr_list);
- spin_unlock_bh(connd_lock);
- dropped_lock = 1;
-
- ksocknal_create_conn(cr->ksncr_ni, NULL,
- cr->ksncr_sock, SOCKLND_CONN_NONE);
- lnet_ni_decref(cr->ksncr_ni);
- LIBCFS_FREE(cr, sizeof(*cr));
-
- spin_lock_bh(connd_lock);
- }
-
- /* Only handle an outgoing connection request if there
- * is a thread left to handle incoming connections and
- * create new connd */
- if (ksocknal_data.ksnd_connd_connecting + SOCKNAL_CONND_RESV <
- ksocknal_data.ksnd_connd_running) {
- route = ksocknal_connd_get_route_locked(&timeout);
- }
- if (route != NULL) {
- list_del (&route->ksnr_connd_list);
- ksocknal_data.ksnd_connd_connecting++;
- spin_unlock_bh(connd_lock);
- dropped_lock = 1;
-
- if (ksocknal_connect(route)) {
- /* consecutive retry */
- if (cons_retry++ > SOCKNAL_INSANITY_RECONN) {
- CWARN("massive consecutive re-connecting to %pI4h\n",
- &route->ksnr_ipaddr);
- cons_retry = 0;
- }
- } else {
- cons_retry = 0;
- }
-
- ksocknal_route_decref(route);
-
- spin_lock_bh(connd_lock);
- ksocknal_data.ksnd_connd_connecting--;
- }
-
- if (dropped_lock) {
- if (++nloops < SOCKNAL_RESCHED)
- continue;
- spin_unlock_bh(connd_lock);
- nloops = 0;
- cond_resched();
- spin_lock_bh(connd_lock);
- continue;
- }
-
- /* Nothing to do for 'timeout' */
- set_current_state(TASK_INTERRUPTIBLE);
- add_wait_queue_exclusive(&ksocknal_data.ksnd_connd_waitq, &wait);
- spin_unlock_bh(connd_lock);
-
- nloops = 0;
- schedule_timeout(timeout);
-
- remove_wait_queue(&ksocknal_data.ksnd_connd_waitq, &wait);
- spin_lock_bh(connd_lock);
- }
- ksocknal_data.ksnd_connd_running--;
- spin_unlock_bh(connd_lock);
-
- ksocknal_thread_fini();
- return 0;
-}
-
-static ksock_conn_t *
-ksocknal_find_timed_out_conn (ksock_peer_t *peer)
-{
- /* We're called with a shared lock on ksnd_global_lock */
- ksock_conn_t *conn;
- struct list_head *ctmp;
-
- list_for_each (ctmp, &peer->ksnp_conns) {
- int error;
- conn = list_entry (ctmp, ksock_conn_t, ksnc_list);
-
- /* Don't need the {get,put}connsock dance to deref ksnc_sock */
- LASSERT(!conn->ksnc_closing);
-
- /* SOCK_ERROR will reset error code of socket in
- * some platform (like Darwin8.x) */
- error = conn->ksnc_sock->sk->sk_err;
- if (error != 0) {
- ksocknal_conn_addref(conn);
-
- switch (error) {
- case ECONNRESET:
- CNETERR("A connection with %s (%pI4h:%d) was reset; it may have rebooted.\n",
- libcfs_id2str(peer->ksnp_id),
- &conn->ksnc_ipaddr,
- conn->ksnc_port);
- break;
- case ETIMEDOUT:
- CNETERR("A connection with %s (%pI4h:%d) timed out; the network or node may be down.\n",
- libcfs_id2str(peer->ksnp_id),
- &conn->ksnc_ipaddr,
- conn->ksnc_port);
- break;
- default:
- CNETERR("An unexpected network error %d occurred with %s (%pI4h:%d\n",
- error,
- libcfs_id2str(peer->ksnp_id),
- &conn->ksnc_ipaddr,
- conn->ksnc_port);
- break;
- }
-
- return conn;
- }
-
- if (conn->ksnc_rx_started &&
- cfs_time_aftereq(cfs_time_current(),
- conn->ksnc_rx_deadline)) {
- /* Timed out incomplete incoming message */
- ksocknal_conn_addref(conn);
- CNETERR("Timeout receiving from %s (%pI4h:%d), state %d wanted %d left %d\n",
- libcfs_id2str(peer->ksnp_id),
- &conn->ksnc_ipaddr,
- conn->ksnc_port,
- conn->ksnc_rx_state,
- conn->ksnc_rx_nob_wanted,
- conn->ksnc_rx_nob_left);
- return conn;
- }
-
- if ((!list_empty(&conn->ksnc_tx_queue) ||
- conn->ksnc_sock->sk->sk_wmem_queued != 0) &&
- cfs_time_aftereq(cfs_time_current(),
- conn->ksnc_tx_deadline)) {
- /* Timed out messages queued for sending or
- * buffered in the socket's send buffer */
- ksocknal_conn_addref(conn);
- CNETERR("Timeout sending data to %s (%pI4h:%d) the network or that node may be down.\n",
- libcfs_id2str(peer->ksnp_id),
- &conn->ksnc_ipaddr,
- conn->ksnc_port);
- return conn;
- }
- }
-
- return NULL;
-}
-
-static inline void
-ksocknal_flush_stale_txs(ksock_peer_t *peer)
-{
- ksock_tx_t *tx;
- LIST_HEAD(stale_txs);
-
- write_lock_bh(&ksocknal_data.ksnd_global_lock);
-
- while (!list_empty (&peer->ksnp_tx_queue)) {
- tx = list_entry (peer->ksnp_tx_queue.next,
- ksock_tx_t, tx_list);
-
- if (!cfs_time_aftereq(cfs_time_current(),
- tx->tx_deadline))
- break;
-
- list_del (&tx->tx_list);
- list_add_tail (&tx->tx_list, &stale_txs);
- }
-
- write_unlock_bh(&ksocknal_data.ksnd_global_lock);
-
- ksocknal_txlist_done(peer->ksnp_ni, &stale_txs, 1);
-}
-
-static int
-ksocknal_send_keepalive_locked(ksock_peer_t *peer)
-{
- ksock_sched_t *sched;
- ksock_conn_t *conn;
- ksock_tx_t *tx;
-
- if (list_empty(&peer->ksnp_conns)) /* last_alive will be updated by create_conn */
- return 0;
-
- if (peer->ksnp_proto != &ksocknal_protocol_v3x)
- return 0;
-
- if (*ksocknal_tunables.ksnd_keepalive <= 0 ||
- time_before(cfs_time_current(),
- cfs_time_add(peer->ksnp_last_alive,
- cfs_time_seconds(*ksocknal_tunables.ksnd_keepalive))))
- return 0;
-
- if (time_before(cfs_time_current(), peer->ksnp_send_keepalive))
- return 0;
-
- /* retry 10 secs later, so we wouldn't put pressure
- * on this peer if we failed to send keepalive this time */
- peer->ksnp_send_keepalive = cfs_time_shift(10);
-
- conn = ksocknal_find_conn_locked(peer, NULL, 1);
- if (conn != NULL) {
- sched = conn->ksnc_scheduler;
-
- spin_lock_bh(&sched->kss_lock);
- if (!list_empty(&conn->ksnc_tx_queue)) {
- spin_unlock_bh(&sched->kss_lock);
- /* there is an queued ACK, don't need keepalive */
- return 0;
- }
-
- spin_unlock_bh(&sched->kss_lock);
- }
-
- read_unlock(&ksocknal_data.ksnd_global_lock);
-
- /* cookie = 1 is reserved for keepalive PING */
- tx = ksocknal_alloc_tx_noop(1, 1);
- if (tx == NULL) {
- read_lock(&ksocknal_data.ksnd_global_lock);
- return -ENOMEM;
- }
-
- if (ksocknal_launch_packet(peer->ksnp_ni, tx, peer->ksnp_id) == 0) {
- read_lock(&ksocknal_data.ksnd_global_lock);
- return 1;
- }
-
- ksocknal_free_tx(tx);
- read_lock(&ksocknal_data.ksnd_global_lock);
-
- return -EIO;
-}
-
-
-static void
-ksocknal_check_peer_timeouts (int idx)
-{
- struct list_head *peers = &ksocknal_data.ksnd_peers[idx];
- ksock_peer_t *peer;
- ksock_conn_t *conn;
- ksock_tx_t *tx;
-
- again:
- /* NB. We expect to have a look at all the peers and not find any
- * connections to time out, so we just use a shared lock while we
- * take a look... */
- read_lock(&ksocknal_data.ksnd_global_lock);
-
- list_for_each_entry(peer, peers, ksnp_list) {
- unsigned long deadline = 0;
- int resid = 0;
- int n = 0;
-
- if (ksocknal_send_keepalive_locked(peer) != 0) {
- read_unlock(&ksocknal_data.ksnd_global_lock);
- goto again;
- }
-
- conn = ksocknal_find_timed_out_conn (peer);
-
- if (conn != NULL) {
- read_unlock(&ksocknal_data.ksnd_global_lock);
-
- ksocknal_close_conn_and_siblings (conn, -ETIMEDOUT);
-
- /* NB we won't find this one again, but we can't
- * just proceed with the next peer, since we dropped
- * ksnd_global_lock and it might be dead already! */
- ksocknal_conn_decref(conn);
- goto again;
- }
-
- /* we can't process stale txs right here because we're
- * holding only shared lock */
- if (!list_empty (&peer->ksnp_tx_queue)) {
- ksock_tx_t *tx =
- list_entry (peer->ksnp_tx_queue.next,
- ksock_tx_t, tx_list);
-
- if (cfs_time_aftereq(cfs_time_current(),
- tx->tx_deadline)) {
-
- ksocknal_peer_addref(peer);
- read_unlock(&ksocknal_data.ksnd_global_lock);
-
- ksocknal_flush_stale_txs(peer);
-
- ksocknal_peer_decref(peer);
- goto again;
- }
- }
-
- if (list_empty(&peer->ksnp_zc_req_list))
- continue;
-
- spin_lock(&peer->ksnp_lock);
- list_for_each_entry(tx, &peer->ksnp_zc_req_list, tx_zc_list) {
- if (!cfs_time_aftereq(cfs_time_current(),
- tx->tx_deadline))
- break;
- /* ignore the TX if connection is being closed */
- if (tx->tx_conn->ksnc_closing)
- continue;
- n++;
- }
-
- if (n == 0) {
- spin_unlock(&peer->ksnp_lock);
- continue;
- }
-
- tx = list_entry(peer->ksnp_zc_req_list.next,
- ksock_tx_t, tx_zc_list);
- deadline = tx->tx_deadline;
- resid = tx->tx_resid;
- conn = tx->tx_conn;
- ksocknal_conn_addref(conn);
-
- spin_unlock(&peer->ksnp_lock);
- read_unlock(&ksocknal_data.ksnd_global_lock);
-
- CERROR("Total %d stale ZC_REQs for peer %s detected; the oldest(%p) timed out %ld secs ago, resid: %d, wmem: %d\n",
- n, libcfs_nid2str(peer->ksnp_id.nid), tx,
- cfs_duration_sec(cfs_time_current() - deadline),
- resid, conn->ksnc_sock->sk->sk_wmem_queued);
-
- ksocknal_close_conn_and_siblings (conn, -ETIMEDOUT);
- ksocknal_conn_decref(conn);
- goto again;
- }
-
- read_unlock(&ksocknal_data.ksnd_global_lock);
-}
-
-int
-ksocknal_reaper (void *arg)
-{
- wait_queue_t wait;
- ksock_conn_t *conn;
- ksock_sched_t *sched;
- struct list_head enomem_conns;
- int nenomem_conns;
- long timeout;
- int i;
- int peer_index = 0;
- unsigned long deadline = cfs_time_current();
-
- cfs_block_allsigs();
-
- INIT_LIST_HEAD(&enomem_conns);
- init_waitqueue_entry(&wait, current);
-
- spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
-
- while (!ksocknal_data.ksnd_shuttingdown) {
-
- if (!list_empty (&ksocknal_data.ksnd_deathrow_conns)) {
- conn = list_entry (ksocknal_data. \
- ksnd_deathrow_conns.next,
- ksock_conn_t, ksnc_list);
- list_del (&conn->ksnc_list);
-
- spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
-
- ksocknal_terminate_conn(conn);
- ksocknal_conn_decref(conn);
-
- spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
- continue;
- }
-
- if (!list_empty (&ksocknal_data.ksnd_zombie_conns)) {
- conn = list_entry (ksocknal_data.ksnd_zombie_conns.\
- next, ksock_conn_t, ksnc_list);
- list_del (&conn->ksnc_list);
-
- spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
-
- ksocknal_destroy_conn(conn);
-
- spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
- continue;
- }
-
- if (!list_empty (&ksocknal_data.ksnd_enomem_conns)) {
- list_add(&enomem_conns,
- &ksocknal_data.ksnd_enomem_conns);
- list_del_init(&ksocknal_data.ksnd_enomem_conns);
- }
-
- spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
-
- /* reschedule all the connections that stalled with ENOMEM... */
- nenomem_conns = 0;
- while (!list_empty (&enomem_conns)) {
- conn = list_entry (enomem_conns.next,
- ksock_conn_t, ksnc_tx_list);
- list_del (&conn->ksnc_tx_list);
-
- sched = conn->ksnc_scheduler;
-
- spin_lock_bh(&sched->kss_lock);
-
- LASSERT(conn->ksnc_tx_scheduled);
- conn->ksnc_tx_ready = 1;
- list_add_tail(&conn->ksnc_tx_list,
- &sched->kss_tx_conns);
- wake_up(&sched->kss_waitq);
-
- spin_unlock_bh(&sched->kss_lock);
- nenomem_conns++;
- }
-
- /* careful with the jiffy wrap... */
- while ((timeout = cfs_time_sub(deadline,
- cfs_time_current())) <= 0) {
- const int n = 4;
- const int p = 1;
- int chunk = ksocknal_data.ksnd_peer_hash_size;
-
- /* Time to check for timeouts on a few more peers: I do
- * checks every 'p' seconds on a proportion of the peer
- * table and I need to check every connection 'n' times
- * within a timeout interval, to ensure I detect a
- * timeout on any connection within (n+1)/n times the
- * timeout interval. */
-
- if (*ksocknal_tunables.ksnd_timeout > n * p)
- chunk = (chunk * n * p) /
- *ksocknal_tunables.ksnd_timeout;
- if (chunk == 0)
- chunk = 1;
-
- for (i = 0; i < chunk; i++) {
- ksocknal_check_peer_timeouts (peer_index);
- peer_index = (peer_index + 1) %
- ksocknal_data.ksnd_peer_hash_size;
- }
-
- deadline = cfs_time_add(deadline, cfs_time_seconds(p));
- }
-
- if (nenomem_conns != 0) {
- /* Reduce my timeout if I rescheduled ENOMEM conns.
- * This also prevents me getting woken immediately
- * if any go back on my enomem list. */
- timeout = SOCKNAL_ENOMEM_RETRY;
- }
- ksocknal_data.ksnd_reaper_waketime =
- cfs_time_add(cfs_time_current(), timeout);
-
- set_current_state (TASK_INTERRUPTIBLE);
- add_wait_queue (&ksocknal_data.ksnd_reaper_waitq, &wait);
-
- if (!ksocknal_data.ksnd_shuttingdown &&
- list_empty (&ksocknal_data.ksnd_deathrow_conns) &&
- list_empty (&ksocknal_data.ksnd_zombie_conns))
- schedule_timeout(timeout);
-
- set_current_state (TASK_RUNNING);
- remove_wait_queue (&ksocknal_data.ksnd_reaper_waitq, &wait);
-
- spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
- }
-
- spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
-
- ksocknal_thread_fini();
- return 0;
-}
diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib.c b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib.c
deleted file mode 100644
index 679785b0209c..000000000000
--- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib.c
+++ /dev/null
@@ -1,710 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- */
-
-#include "socklnd.h"
-
-int
-ksocknal_lib_get_conn_addrs(ksock_conn_t *conn)
-{
- int rc = lnet_sock_getaddr(conn->ksnc_sock, 1, &conn->ksnc_ipaddr,
- &conn->ksnc_port);
-
- /* Didn't need the {get,put}connsock dance to deref ksnc_sock... */
- LASSERT(!conn->ksnc_closing);
-
- if (rc != 0) {
- CERROR("Error %d getting sock peer IP\n", rc);
- return rc;
- }
-
- rc = lnet_sock_getaddr(conn->ksnc_sock, 0, &conn->ksnc_myipaddr, NULL);
- if (rc != 0) {
- CERROR("Error %d getting sock local IP\n", rc);
- return rc;
- }
-
- return 0;
-}
-
-int
-ksocknal_lib_zc_capable(ksock_conn_t *conn)
-{
- int caps = conn->ksnc_sock->sk->sk_route_caps;
-
- if (conn->ksnc_proto == &ksocknal_protocol_v1x)
- return 0;
-
- /* ZC if the socket supports scatter/gather and doesn't need software
- * checksums */
- return ((caps & NETIF_F_SG) != 0 && (caps & NETIF_F_ALL_CSUM) != 0);
-}
-
-int
-ksocknal_lib_send_iov(ksock_conn_t *conn, ksock_tx_t *tx)
-{
- struct socket *sock = conn->ksnc_sock;
- int nob;
- int rc;
-
- if (*ksocknal_tunables.ksnd_enable_csum && /* checksum enabled */
- conn->ksnc_proto == &ksocknal_protocol_v2x && /* V2.x connection */
- tx->tx_nob == tx->tx_resid && /* frist sending */
- tx->tx_msg.ksm_csum == 0) /* not checksummed */
- ksocknal_lib_csum_tx(tx);
-
- /* NB we can't trust socket ops to either consume our iovs
- * or leave them alone. */
-
- {
-#if SOCKNAL_SINGLE_FRAG_TX
- struct kvec scratch;
- struct kvec *scratchiov = &scratch;
- unsigned int niov = 1;
-#else
- struct kvec *scratchiov = conn->ksnc_scheduler->kss_scratch_iov;
- unsigned int niov = tx->tx_niov;
-#endif
- struct msghdr msg = {.msg_flags = MSG_DONTWAIT};
- int i;
-
- for (nob = i = 0; i < niov; i++) {
- scratchiov[i] = tx->tx_iov[i];
- nob += scratchiov[i].iov_len;
- }
-
- if (!list_empty(&conn->ksnc_tx_queue) ||
- nob < tx->tx_resid)
- msg.msg_flags |= MSG_MORE;
-
- rc = kernel_sendmsg(sock, &msg, scratchiov, niov, nob);
- }
- return rc;
-}
-
-int
-ksocknal_lib_send_kiov(ksock_conn_t *conn, ksock_tx_t *tx)
-{
- struct socket *sock = conn->ksnc_sock;
- lnet_kiov_t *kiov = tx->tx_kiov;
- int rc;
- int nob;
-
- /* Not NOOP message */
- LASSERT(tx->tx_lnetmsg != NULL);
-
- /* NB we can't trust socket ops to either consume our iovs
- * or leave them alone. */
- if (tx->tx_msg.ksm_zc_cookies[0] != 0) {
- /* Zero copy is enabled */
- struct sock *sk = sock->sk;
- struct page *page = kiov->kiov_page;
- int offset = kiov->kiov_offset;
- int fragsize = kiov->kiov_len;
- int msgflg = MSG_DONTWAIT;
-
- CDEBUG(D_NET, "page %p + offset %x for %d\n",
- page, offset, kiov->kiov_len);
-
- if (!list_empty(&conn->ksnc_tx_queue) ||
- fragsize < tx->tx_resid)
- msgflg |= MSG_MORE;
-
- if (sk->sk_prot->sendpage != NULL) {
- rc = sk->sk_prot->sendpage(sk, page,
- offset, fragsize, msgflg);
- } else {
- rc = tcp_sendpage(sk, page, offset, fragsize, msgflg);
- }
- } else {
-#if SOCKNAL_SINGLE_FRAG_TX || !SOCKNAL_RISK_KMAP_DEADLOCK
- struct kvec scratch;
- struct kvec *scratchiov = &scratch;
- unsigned int niov = 1;
-#else
-#ifdef CONFIG_HIGHMEM
-#warning "XXX risk of kmap deadlock on multiple frags..."
-#endif
- struct kvec *scratchiov = conn->ksnc_scheduler->kss_scratch_iov;
- unsigned int niov = tx->tx_nkiov;
-#endif
- struct msghdr msg = {.msg_flags = MSG_DONTWAIT};
- int i;
-
- for (nob = i = 0; i < niov; i++) {
- scratchiov[i].iov_base = kmap(kiov[i].kiov_page) +
- kiov[i].kiov_offset;
- nob += scratchiov[i].iov_len = kiov[i].kiov_len;
- }
-
- if (!list_empty(&conn->ksnc_tx_queue) ||
- nob < tx->tx_resid)
- msg.msg_flags |= MSG_MORE;
-
- rc = kernel_sendmsg(sock, &msg, (struct kvec *)scratchiov, niov, nob);
-
- for (i = 0; i < niov; i++)
- kunmap(kiov[i].kiov_page);
- }
- return rc;
-}
-
-void
-ksocknal_lib_eager_ack(ksock_conn_t *conn)
-{
- int opt = 1;
- struct socket *sock = conn->ksnc_sock;
-
- /* Remind the socket to ACK eagerly. If I don't, the socket might
- * think I'm about to send something it could piggy-back the ACK
- * on, introducing delay in completing zero-copy sends in my
- * peer. */
-
- kernel_setsockopt(sock, SOL_TCP, TCP_QUICKACK,
- (char *)&opt, sizeof(opt));
-}
-
-int
-ksocknal_lib_recv_iov(ksock_conn_t *conn)
-{
-#if SOCKNAL_SINGLE_FRAG_RX
- struct kvec scratch;
- struct kvec *scratchiov = &scratch;
- unsigned int niov = 1;
-#else
- struct kvec *scratchiov = conn->ksnc_scheduler->kss_scratch_iov;
- unsigned int niov = conn->ksnc_rx_niov;
-#endif
- struct kvec *iov = conn->ksnc_rx_iov;
- struct msghdr msg = {
- .msg_flags = 0
- };
- int nob;
- int i;
- int rc;
- int fragnob;
- int sum;
- __u32 saved_csum;
-
- /* NB we can't trust socket ops to either consume our iovs
- * or leave them alone. */
- LASSERT(niov > 0);
-
- for (nob = i = 0; i < niov; i++) {
- scratchiov[i] = iov[i];
- nob += scratchiov[i].iov_len;
- }
- LASSERT(nob <= conn->ksnc_rx_nob_wanted);
-
- rc = kernel_recvmsg(conn->ksnc_sock, &msg,
- scratchiov, niov, nob, MSG_DONTWAIT);
-
- saved_csum = 0;
- if (conn->ksnc_proto == &ksocknal_protocol_v2x) {
- saved_csum = conn->ksnc_msg.ksm_csum;
- conn->ksnc_msg.ksm_csum = 0;
- }
-
- if (saved_csum != 0) {
- /* accumulate checksum */
- for (i = 0, sum = rc; sum > 0; i++, sum -= fragnob) {
- LASSERT(i < niov);
-
- fragnob = iov[i].iov_len;
- if (fragnob > sum)
- fragnob = sum;
-
- conn->ksnc_rx_csum = ksocknal_csum(conn->ksnc_rx_csum,
- iov[i].iov_base, fragnob);
- }
- conn->ksnc_msg.ksm_csum = saved_csum;
- }
-
- return rc;
-}
-
-static void
-ksocknal_lib_kiov_vunmap(void *addr)
-{
- if (addr == NULL)
- return;
-
- vunmap(addr);
-}
-
-static void *
-ksocknal_lib_kiov_vmap(lnet_kiov_t *kiov, int niov,
- struct kvec *iov, struct page **pages)
-{
- void *addr;
- int nob;
- int i;
-
- if (!*ksocknal_tunables.ksnd_zc_recv || pages == NULL)
- return NULL;
-
- LASSERT(niov <= LNET_MAX_IOV);
-
- if (niov < 2 ||
- niov < *ksocknal_tunables.ksnd_zc_recv_min_nfrags)
- return NULL;
-
- for (nob = i = 0; i < niov; i++) {
- if ((kiov[i].kiov_offset != 0 && i > 0) ||
- (kiov[i].kiov_offset + kiov[i].kiov_len != PAGE_CACHE_SIZE && i < niov - 1))
- return NULL;
-
- pages[i] = kiov[i].kiov_page;
- nob += kiov[i].kiov_len;
- }
-
- addr = vmap(pages, niov, VM_MAP, PAGE_KERNEL);
- if (addr == NULL)
- return NULL;
-
- iov->iov_base = addr + kiov[0].kiov_offset;
- iov->iov_len = nob;
-
- return addr;
-}
-
-int
-ksocknal_lib_recv_kiov(ksock_conn_t *conn)
-{
-#if SOCKNAL_SINGLE_FRAG_RX || !SOCKNAL_RISK_KMAP_DEADLOCK
- struct kvec scratch;
- struct kvec *scratchiov = &scratch;
- struct page **pages = NULL;
- unsigned int niov = 1;
-#else
-#ifdef CONFIG_HIGHMEM
-#warning "XXX risk of kmap deadlock on multiple frags..."
-#endif
- struct kvec *scratchiov = conn->ksnc_scheduler->kss_scratch_iov;
- struct page **pages = conn->ksnc_scheduler->kss_rx_scratch_pgs;
- unsigned int niov = conn->ksnc_rx_nkiov;
-#endif
- lnet_kiov_t *kiov = conn->ksnc_rx_kiov;
- struct msghdr msg = {
- .msg_flags = 0
- };
- int nob;
- int i;
- int rc;
- void *base;
- void *addr;
- int sum;
- int fragnob;
- int n;
-
- /* NB we can't trust socket ops to either consume our iovs
- * or leave them alone. */
- addr = ksocknal_lib_kiov_vmap(kiov, niov, scratchiov, pages);
- if (addr != NULL) {
- nob = scratchiov[0].iov_len;
- n = 1;
-
- } else {
- for (nob = i = 0; i < niov; i++) {
- nob += scratchiov[i].iov_len = kiov[i].kiov_len;
- scratchiov[i].iov_base = kmap(kiov[i].kiov_page) +
- kiov[i].kiov_offset;
- }
- n = niov;
- }
-
- LASSERT(nob <= conn->ksnc_rx_nob_wanted);
-
- rc = kernel_recvmsg(conn->ksnc_sock, &msg,
- (struct kvec *)scratchiov, n, nob, MSG_DONTWAIT);
-
- if (conn->ksnc_msg.ksm_csum != 0) {
- for (i = 0, sum = rc; sum > 0; i++, sum -= fragnob) {
- LASSERT(i < niov);
-
- /* Dang! have to kmap again because I have nowhere to
- * stash the mapped address. But by doing it while the
- * page is still mapped, the kernel just bumps the map
- * count and returns me the address it stashed. */
- base = kmap(kiov[i].kiov_page) + kiov[i].kiov_offset;
- fragnob = kiov[i].kiov_len;
- if (fragnob > sum)
- fragnob = sum;
-
- conn->ksnc_rx_csum = ksocknal_csum(conn->ksnc_rx_csum,
- base, fragnob);
-
- kunmap(kiov[i].kiov_page);
- }
- }
-
- if (addr != NULL) {
- ksocknal_lib_kiov_vunmap(addr);
- } else {
- for (i = 0; i < niov; i++)
- kunmap(kiov[i].kiov_page);
- }
-
- return rc;
-}
-
-void
-ksocknal_lib_csum_tx(ksock_tx_t *tx)
-{
- int i;
- __u32 csum;
- void *base;
-
- LASSERT(tx->tx_iov[0].iov_base == &tx->tx_msg);
- LASSERT(tx->tx_conn != NULL);
- LASSERT(tx->tx_conn->ksnc_proto == &ksocknal_protocol_v2x);
-
- tx->tx_msg.ksm_csum = 0;
-
- csum = ksocknal_csum(~0, tx->tx_iov[0].iov_base,
- tx->tx_iov[0].iov_len);
-
- if (tx->tx_kiov != NULL) {
- for (i = 0; i < tx->tx_nkiov; i++) {
- base = kmap(tx->tx_kiov[i].kiov_page) +
- tx->tx_kiov[i].kiov_offset;
-
- csum = ksocknal_csum(csum, base, tx->tx_kiov[i].kiov_len);
-
- kunmap(tx->tx_kiov[i].kiov_page);
- }
- } else {
- for (i = 1; i < tx->tx_niov; i++)
- csum = ksocknal_csum(csum, tx->tx_iov[i].iov_base,
- tx->tx_iov[i].iov_len);
- }
-
- if (*ksocknal_tunables.ksnd_inject_csum_error) {
- csum++;
- *ksocknal_tunables.ksnd_inject_csum_error = 0;
- }
-
- tx->tx_msg.ksm_csum = csum;
-}
-
-int
-ksocknal_lib_get_conn_tunables(ksock_conn_t *conn, int *txmem, int *rxmem, int *nagle)
-{
- struct socket *sock = conn->ksnc_sock;
- int len;
- int rc;
-
- rc = ksocknal_connsock_addref(conn);
- if (rc != 0) {
- LASSERT(conn->ksnc_closing);
- *txmem = *rxmem = *nagle = 0;
- return -ESHUTDOWN;
- }
-
- rc = lnet_sock_getbuf(sock, txmem, rxmem);
- if (rc == 0) {
- len = sizeof(*nagle);
- rc = kernel_getsockopt(sock, SOL_TCP, TCP_NODELAY,
- (char *)nagle, &len);
- }
-
- ksocknal_connsock_decref(conn);
-
- if (rc == 0)
- *nagle = !*nagle;
- else
- *txmem = *rxmem = *nagle = 0;
-
- return rc;
-}
-
-int
-ksocknal_lib_setup_sock(struct socket *sock)
-{
- int rc;
- int option;
- int keep_idle;
- int keep_intvl;
- int keep_count;
- int do_keepalive;
- struct linger linger;
-
- sock->sk->sk_allocation = GFP_NOFS;
-
- /* Ensure this socket aborts active sends immediately when we close
- * it. */
-
- linger.l_onoff = 0;
- linger.l_linger = 0;
-
- rc = kernel_setsockopt(sock, SOL_SOCKET, SO_LINGER,
- (char *)&linger, sizeof(linger));
- if (rc != 0) {
- CERROR("Can't set SO_LINGER: %d\n", rc);
- return rc;
- }
-
- option = -1;
- rc = kernel_setsockopt(sock, SOL_TCP, TCP_LINGER2,
- (char *)&option, sizeof(option));
- if (rc != 0) {
- CERROR("Can't set SO_LINGER2: %d\n", rc);
- return rc;
- }
-
- if (!*ksocknal_tunables.ksnd_nagle) {
- option = 1;
-
- rc = kernel_setsockopt(sock, SOL_TCP, TCP_NODELAY,
- (char *)&option, sizeof(option));
- if (rc != 0) {
- CERROR("Can't disable nagle: %d\n", rc);
- return rc;
- }
- }
-
- rc = lnet_sock_setbuf(sock, *ksocknal_tunables.ksnd_tx_buffer_size,
- *ksocknal_tunables.ksnd_rx_buffer_size);
- if (rc != 0) {
- CERROR("Can't set buffer tx %d, rx %d buffers: %d\n",
- *ksocknal_tunables.ksnd_tx_buffer_size,
- *ksocknal_tunables.ksnd_rx_buffer_size, rc);
- return rc;
- }
-
-/* TCP_BACKOFF_* sockopt tunables unsupported in stock kernels */
-
- /* snapshot tunables */
- keep_idle = *ksocknal_tunables.ksnd_keepalive_idle;
- keep_count = *ksocknal_tunables.ksnd_keepalive_count;
- keep_intvl = *ksocknal_tunables.ksnd_keepalive_intvl;
-
- do_keepalive = (keep_idle > 0 && keep_count > 0 && keep_intvl > 0);
-
- option = (do_keepalive ? 1 : 0);
- rc = kernel_setsockopt(sock, SOL_SOCKET, SO_KEEPALIVE,
- (char *)&option, sizeof(option));
- if (rc != 0) {
- CERROR("Can't set SO_KEEPALIVE: %d\n", rc);
- return rc;
- }
-
- if (!do_keepalive)
- return 0;
-
- rc = kernel_setsockopt(sock, SOL_TCP, TCP_KEEPIDLE,
- (char *)&keep_idle, sizeof(keep_idle));
- if (rc != 0) {
- CERROR("Can't set TCP_KEEPIDLE: %d\n", rc);
- return rc;
- }
-
- rc = kernel_setsockopt(sock, SOL_TCP, TCP_KEEPINTVL,
- (char *)&keep_intvl, sizeof(keep_intvl));
- if (rc != 0) {
- CERROR("Can't set TCP_KEEPINTVL: %d\n", rc);
- return rc;
- }
-
- rc = kernel_setsockopt(sock, SOL_TCP, TCP_KEEPCNT,
- (char *)&keep_count, sizeof(keep_count));
- if (rc != 0) {
- CERROR("Can't set TCP_KEEPCNT: %d\n", rc);
- return rc;
- }
-
- return 0;
-}
-
-void
-ksocknal_lib_push_conn(ksock_conn_t *conn)
-{
- struct sock *sk;
- struct tcp_sock *tp;
- int nonagle;
- int val = 1;
- int rc;
-
- rc = ksocknal_connsock_addref(conn);
- if (rc != 0) /* being shut down */
- return;
-
- sk = conn->ksnc_sock->sk;
- tp = tcp_sk(sk);
-
- lock_sock(sk);
- nonagle = tp->nonagle;
- tp->nonagle = 1;
- release_sock(sk);
-
- rc = kernel_setsockopt(conn->ksnc_sock, SOL_TCP, TCP_NODELAY,
- (char *)&val, sizeof(val));
- LASSERT(rc == 0);
-
- lock_sock(sk);
- tp->nonagle = nonagle;
- release_sock(sk);
-
- ksocknal_connsock_decref(conn);
-}
-
-extern void ksocknal_read_callback(ksock_conn_t *conn);
-extern void ksocknal_write_callback(ksock_conn_t *conn);
-/*
- * socket call back in Linux
- */
-static void
-ksocknal_data_ready(struct sock *sk)
-{
- ksock_conn_t *conn;
-
- /* interleave correctly with closing sockets... */
- LASSERT(!in_irq());
- read_lock(&ksocknal_data.ksnd_global_lock);
-
- conn = sk->sk_user_data;
- if (conn == NULL) { /* raced with ksocknal_terminate_conn */
- LASSERT(sk->sk_data_ready != &ksocknal_data_ready);
- sk->sk_data_ready(sk);
- } else
- ksocknal_read_callback(conn);
-
- read_unlock(&ksocknal_data.ksnd_global_lock);
-}
-
-static void
-ksocknal_write_space(struct sock *sk)
-{
- ksock_conn_t *conn;
- int wspace;
- int min_wpace;
-
- /* interleave correctly with closing sockets... */
- LASSERT(!in_irq());
- read_lock(&ksocknal_data.ksnd_global_lock);
-
- conn = sk->sk_user_data;
- wspace = sk_stream_wspace(sk);
- min_wpace = sk_stream_min_wspace(sk);
-
- CDEBUG(D_NET, "sk %p wspace %d low water %d conn %p%s%s%s\n",
- sk, wspace, min_wpace, conn,
- (conn == NULL) ? "" : (conn->ksnc_tx_ready ?
- " ready" : " blocked"),
- (conn == NULL) ? "" : (conn->ksnc_tx_scheduled ?
- " scheduled" : " idle"),
- (conn == NULL) ? "" : (list_empty(&conn->ksnc_tx_queue) ?
- " empty" : " queued"));
-
- if (conn == NULL) { /* raced with ksocknal_terminate_conn */
- LASSERT(sk->sk_write_space != &ksocknal_write_space);
- sk->sk_write_space(sk);
-
- read_unlock(&ksocknal_data.ksnd_global_lock);
- return;
- }
-
- if (wspace >= min_wpace) { /* got enough space */
- ksocknal_write_callback(conn);
-
- /* Clear SOCK_NOSPACE _after_ ksocknal_write_callback so the
- * ENOMEM check in ksocknal_transmit is race-free (think about
- * it). */
-
- clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
- }
-
- read_unlock(&ksocknal_data.ksnd_global_lock);
-}
-
-void
-ksocknal_lib_save_callback(struct socket *sock, ksock_conn_t *conn)
-{
- conn->ksnc_saved_data_ready = sock->sk->sk_data_ready;
- conn->ksnc_saved_write_space = sock->sk->sk_write_space;
-}
-
-void
-ksocknal_lib_set_callback(struct socket *sock, ksock_conn_t *conn)
-{
- sock->sk->sk_user_data = conn;
- sock->sk->sk_data_ready = ksocknal_data_ready;
- sock->sk->sk_write_space = ksocknal_write_space;
- return;
-}
-
-void
-ksocknal_lib_reset_callback(struct socket *sock, ksock_conn_t *conn)
-{
- /* Remove conn's network callbacks.
- * NB I _have_ to restore the callback, rather than storing a noop,
- * since the socket could survive past this module being unloaded!! */
- sock->sk->sk_data_ready = conn->ksnc_saved_data_ready;
- sock->sk->sk_write_space = conn->ksnc_saved_write_space;
-
- /* A callback could be in progress already; they hold a read lock
- * on ksnd_global_lock (to serialise with me) and NOOP if
- * sk_user_data is NULL. */
- sock->sk->sk_user_data = NULL;
-
- return ;
-}
-
-int
-ksocknal_lib_memory_pressure(ksock_conn_t *conn)
-{
- int rc = 0;
- ksock_sched_t *sched;
-
- sched = conn->ksnc_scheduler;
- spin_lock_bh(&sched->kss_lock);
-
- if (!test_bit(SOCK_NOSPACE, &conn->ksnc_sock->flags) &&
- !conn->ksnc_tx_ready) {
- /* SOCK_NOSPACE is set when the socket fills
- * and cleared in the write_space callback
- * (which also sets ksnc_tx_ready). If
- * SOCK_NOSPACE and ksnc_tx_ready are BOTH
- * zero, I didn't fill the socket and
- * write_space won't reschedule me, so I
- * return -ENOMEM to get my caller to retry
- * after a timeout */
- rc = -ENOMEM;
- }
-
- spin_unlock_bh(&sched->kss_lock);
-
- return rc;
-}
diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_modparams.c b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_modparams.c
deleted file mode 100644
index c3ac67698125..000000000000
--- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_modparams.c
+++ /dev/null
@@ -1,185 +0,0 @@
-/*
- * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
- *
- * Copyright (c) 2011, 2012, Intel Corporation.
- *
- * Author: Eric Barton <eric@bartonsoftware.com>
- *
- * Portals is free software; you can redistribute it and/or
- * modify it under the terms of version 2 of the GNU General Public
- * License as published by the Free Software Foundation.
- *
- * Portals is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with Portals; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-#include "socklnd.h"
-
-static int sock_timeout = 50;
-module_param(sock_timeout, int, 0644);
-MODULE_PARM_DESC(sock_timeout, "dead socket timeout (seconds)");
-
-static int credits = 256;
-module_param(credits, int, 0444);
-MODULE_PARM_DESC(credits, "# concurrent sends");
-
-static int peer_credits = 8;
-module_param(peer_credits, int, 0444);
-MODULE_PARM_DESC(peer_credits, "# concurrent sends to 1 peer");
-
-static int peer_buffer_credits;
-module_param(peer_buffer_credits, int, 0444);
-MODULE_PARM_DESC(peer_buffer_credits, "# per-peer router buffer credits");
-
-static int peer_timeout = 180;
-module_param(peer_timeout, int, 0444);
-MODULE_PARM_DESC(peer_timeout, "Seconds without aliveness news to declare peer dead (<=0 to disable)");
-
-/* Number of daemons in each thread pool which is percpt,
- * we will estimate reasonable value based on CPUs if it's not set. */
-static unsigned int nscheds;
-module_param(nscheds, int, 0444);
-MODULE_PARM_DESC(nscheds, "# scheduler daemons in each pool while starting");
-
-static int nconnds = 4;
-module_param(nconnds, int, 0444);
-MODULE_PARM_DESC(nconnds, "# connection daemons while starting");
-
-static int nconnds_max = 64;
-module_param(nconnds_max, int, 0444);
-MODULE_PARM_DESC(nconnds_max, "max # connection daemons");
-
-static int min_reconnectms = 1000;
-module_param(min_reconnectms, int, 0644);
-MODULE_PARM_DESC(min_reconnectms, "min connection retry interval (mS)");
-
-static int max_reconnectms = 60000;
-module_param(max_reconnectms, int, 0644);
-MODULE_PARM_DESC(max_reconnectms, "max connection retry interval (mS)");
-
-# define DEFAULT_EAGER_ACK 0
-static int eager_ack = DEFAULT_EAGER_ACK;
-module_param(eager_ack, int, 0644);
-MODULE_PARM_DESC(eager_ack, "send tcp ack packets eagerly");
-
-static int typed_conns = 1;
-module_param(typed_conns, int, 0444);
-MODULE_PARM_DESC(typed_conns, "use different sockets for bulk");
-
-static int min_bulk = 1<<10;
-module_param(min_bulk, int, 0644);
-MODULE_PARM_DESC(min_bulk, "smallest 'large' message");
-
-# define DEFAULT_BUFFER_SIZE 0
-static int tx_buffer_size = DEFAULT_BUFFER_SIZE;
-module_param(tx_buffer_size, int, 0644);
-MODULE_PARM_DESC(tx_buffer_size, "socket tx buffer size (0 for system default)");
-
-static int rx_buffer_size = DEFAULT_BUFFER_SIZE;
-module_param(rx_buffer_size, int, 0644);
-MODULE_PARM_DESC(rx_buffer_size, "socket rx buffer size (0 for system default)");
-
-static int nagle;
-module_param(nagle, int, 0644);
-MODULE_PARM_DESC(nagle, "enable NAGLE?");
-
-static int round_robin = 1;
-module_param(round_robin, int, 0644);
-MODULE_PARM_DESC(round_robin, "Round robin for multiple interfaces");
-
-static int keepalive = 30;
-module_param(keepalive, int, 0644);
-MODULE_PARM_DESC(keepalive, "# seconds before send keepalive");
-
-static int keepalive_idle = 30;
-module_param(keepalive_idle, int, 0644);
-MODULE_PARM_DESC(keepalive_idle, "# idle seconds before probe");
-
-#define DEFAULT_KEEPALIVE_COUNT 5
-static int keepalive_count = DEFAULT_KEEPALIVE_COUNT;
-module_param(keepalive_count, int, 0644);
-MODULE_PARM_DESC(keepalive_count, "# missed probes == dead");
-
-static int keepalive_intvl = 5;
-module_param(keepalive_intvl, int, 0644);
-MODULE_PARM_DESC(keepalive_intvl, "seconds between probes");
-
-static int enable_csum;
-module_param(enable_csum, int, 0644);
-MODULE_PARM_DESC(enable_csum, "enable check sum");
-
-static int inject_csum_error;
-module_param(inject_csum_error, int, 0644);
-MODULE_PARM_DESC(inject_csum_error, "set non-zero to inject a checksum error");
-
-static int nonblk_zcack = 1;
-module_param(nonblk_zcack, int, 0644);
-MODULE_PARM_DESC(nonblk_zcack, "always send ZC-ACK on non-blocking connection");
-
-static unsigned int zc_min_payload = 16 << 10;
-module_param(zc_min_payload, int, 0644);
-MODULE_PARM_DESC(zc_min_payload, "minimum payload size to zero copy");
-
-static unsigned int zc_recv;
-module_param(zc_recv, int, 0644);
-MODULE_PARM_DESC(zc_recv, "enable ZC recv for Chelsio driver");
-
-static unsigned int zc_recv_min_nfrags = 16;
-module_param(zc_recv_min_nfrags, int, 0644);
-MODULE_PARM_DESC(zc_recv_min_nfrags, "minimum # of fragments to enable ZC recv");
-
-
-#if SOCKNAL_VERSION_DEBUG
-static int protocol = 3;
-module_param(protocol, int, 0644);
-MODULE_PARM_DESC(protocol, "protocol version");
-#endif
-
-ksock_tunables_t ksocknal_tunables;
-
-int ksocknal_tunables_init(void)
-{
- /* initialize ksocknal_tunables structure */
- ksocknal_tunables.ksnd_timeout = &sock_timeout;
- ksocknal_tunables.ksnd_nscheds = &nscheds;
- ksocknal_tunables.ksnd_nconnds = &nconnds;
- ksocknal_tunables.ksnd_nconnds_max = &nconnds_max;
- ksocknal_tunables.ksnd_min_reconnectms = &min_reconnectms;
- ksocknal_tunables.ksnd_max_reconnectms = &max_reconnectms;
- ksocknal_tunables.ksnd_eager_ack = &eager_ack;
- ksocknal_tunables.ksnd_typed_conns = &typed_conns;
- ksocknal_tunables.ksnd_min_bulk = &min_bulk;
- ksocknal_tunables.ksnd_tx_buffer_size = &tx_buffer_size;
- ksocknal_tunables.ksnd_rx_buffer_size = &rx_buffer_size;
- ksocknal_tunables.ksnd_nagle = &nagle;
- ksocknal_tunables.ksnd_round_robin = &round_robin;
- ksocknal_tunables.ksnd_keepalive = &keepalive;
- ksocknal_tunables.ksnd_keepalive_idle = &keepalive_idle;
- ksocknal_tunables.ksnd_keepalive_count = &keepalive_count;
- ksocknal_tunables.ksnd_keepalive_intvl = &keepalive_intvl;
- ksocknal_tunables.ksnd_credits = &credits;
- ksocknal_tunables.ksnd_peertxcredits = &peer_credits;
- ksocknal_tunables.ksnd_peerrtrcredits = &peer_buffer_credits;
- ksocknal_tunables.ksnd_peertimeout = &peer_timeout;
- ksocknal_tunables.ksnd_enable_csum = &enable_csum;
- ksocknal_tunables.ksnd_inject_csum_error = &inject_csum_error;
- ksocknal_tunables.ksnd_nonblk_zcack = &nonblk_zcack;
- ksocknal_tunables.ksnd_zc_min_payload = &zc_min_payload;
- ksocknal_tunables.ksnd_zc_recv = &zc_recv;
- ksocknal_tunables.ksnd_zc_recv_min_nfrags = &zc_recv_min_nfrags;
-
-#if SOCKNAL_VERSION_DEBUG
- ksocknal_tunables.ksnd_protocol = &protocol;
-#endif
-
- if (*ksocknal_tunables.ksnd_zc_min_payload < (2 << 10))
- *ksocknal_tunables.ksnd_zc_min_payload = 2 << 10;
-
- return 0;
-};
diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_proto.c b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_proto.c
deleted file mode 100644
index 986bce4c9f3b..000000000000
--- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_proto.c
+++ /dev/null
@@ -1,794 +0,0 @@
-/*
- * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
- *
- * Copyright (c) 2012, Intel Corporation.
- *
- * Author: Zach Brown <zab@zabbo.net>
- * Author: Peter J. Braam <braam@clusterfs.com>
- * Author: Phil Schwan <phil@clusterfs.com>
- * Author: Eric Barton <eric@bartonsoftware.com>
- *
- * This file is part of Portals, http://www.sf.net/projects/sandiaportals/
- *
- * Portals is free software; you can redistribute it and/or
- * modify it under the terms of version 2 of the GNU General Public
- * License as published by the Free Software Foundation.
- *
- * Portals is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with Portals; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-#include "socklnd.h"
-
-/*
- * Protocol entries :
- * pro_send_hello : send hello message
- * pro_recv_hello : receive hello message
- * pro_pack : pack message header
- * pro_unpack : unpack message header
- * pro_queue_tx_zcack() : Called holding BH lock: kss_lock
- * return 1 if ACK is piggybacked, otherwise return 0
- * pro_queue_tx_msg() : Called holding BH lock: kss_lock
- * return the ACK that piggybacked by my message, or NULL
- * pro_handle_zcreq() : handler of incoming ZC-REQ
- * pro_handle_zcack() : handler of incoming ZC-ACK
- * pro_match_tx() : Called holding glock
- */
-
-static ksock_tx_t *
-ksocknal_queue_tx_msg_v1(ksock_conn_t *conn, ksock_tx_t *tx_msg)
-{
- /* V1.x, just enqueue it */
- list_add_tail(&tx_msg->tx_list, &conn->ksnc_tx_queue);
- return NULL;
-}
-
-void
-ksocknal_next_tx_carrier(ksock_conn_t *conn)
-{
- ksock_tx_t *tx = conn->ksnc_tx_carrier;
-
- /* Called holding BH lock: conn->ksnc_scheduler->kss_lock */
- LASSERT(!list_empty(&conn->ksnc_tx_queue));
- LASSERT(tx != NULL);
-
- /* Next TX that can carry ZC-ACK or LNet message */
- if (tx->tx_list.next == &conn->ksnc_tx_queue) {
- /* no more packets queued */
- conn->ksnc_tx_carrier = NULL;
- } else {
- conn->ksnc_tx_carrier = list_entry(tx->tx_list.next,
- ksock_tx_t, tx_list);
- LASSERT(conn->ksnc_tx_carrier->tx_msg.ksm_type == tx->tx_msg.ksm_type);
- }
-}
-
-static int
-ksocknal_queue_tx_zcack_v2(ksock_conn_t *conn,
- ksock_tx_t *tx_ack, __u64 cookie)
-{
- ksock_tx_t *tx = conn->ksnc_tx_carrier;
-
- LASSERT(tx_ack == NULL ||
- tx_ack->tx_msg.ksm_type == KSOCK_MSG_NOOP);
-
- /*
- * Enqueue or piggyback tx_ack / cookie
- * . no tx can piggyback cookie of tx_ack (or cookie), just
- * enqueue the tx_ack (if tx_ack != NUL) and return NULL.
- * . There is tx can piggyback cookie of tx_ack (or cookie),
- * piggyback the cookie and return the tx.
- */
- if (tx == NULL) {
- if (tx_ack != NULL) {
- list_add_tail(&tx_ack->tx_list,
- &conn->ksnc_tx_queue);
- conn->ksnc_tx_carrier = tx_ack;
- }
- return 0;
- }
-
- if (tx->tx_msg.ksm_type == KSOCK_MSG_NOOP) {
- /* tx is noop zc-ack, can't piggyback zc-ack cookie */
- if (tx_ack != NULL)
- list_add_tail(&tx_ack->tx_list,
- &conn->ksnc_tx_queue);
- return 0;
- }
-
- LASSERT(tx->tx_msg.ksm_type == KSOCK_MSG_LNET);
- LASSERT(tx->tx_msg.ksm_zc_cookies[1] == 0);
-
- if (tx_ack != NULL)
- cookie = tx_ack->tx_msg.ksm_zc_cookies[1];
-
- /* piggyback the zc-ack cookie */
- tx->tx_msg.ksm_zc_cookies[1] = cookie;
- /* move on to the next TX which can carry cookie */
- ksocknal_next_tx_carrier(conn);
-
- return 1;
-}
-
-static ksock_tx_t *
-ksocknal_queue_tx_msg_v2(ksock_conn_t *conn, ksock_tx_t *tx_msg)
-{
- ksock_tx_t *tx = conn->ksnc_tx_carrier;
-
- /*
- * Enqueue tx_msg:
- * . If there is no NOOP on the connection, just enqueue
- * tx_msg and return NULL
- * . If there is NOOP on the connection, piggyback the cookie
- * and replace the NOOP tx, and return the NOOP tx.
- */
- if (tx == NULL) { /* nothing on queue */
- list_add_tail(&tx_msg->tx_list, &conn->ksnc_tx_queue);
- conn->ksnc_tx_carrier = tx_msg;
- return NULL;
- }
-
- if (tx->tx_msg.ksm_type == KSOCK_MSG_LNET) { /* nothing to carry */
- list_add_tail(&tx_msg->tx_list, &conn->ksnc_tx_queue);
- return NULL;
- }
-
- LASSERT(tx->tx_msg.ksm_type == KSOCK_MSG_NOOP);
-
- /* There is a noop zc-ack can be piggybacked */
- tx_msg->tx_msg.ksm_zc_cookies[1] = tx->tx_msg.ksm_zc_cookies[1];
- ksocknal_next_tx_carrier(conn);
-
- /* use new_tx to replace the noop zc-ack packet */
- list_add(&tx_msg->tx_list, &tx->tx_list);
- list_del(&tx->tx_list);
-
- return tx;
-}
-
-static int
-ksocknal_queue_tx_zcack_v3(ksock_conn_t *conn,
- ksock_tx_t *tx_ack, __u64 cookie)
-{
- ksock_tx_t *tx;
-
- if (conn->ksnc_type != SOCKLND_CONN_ACK)
- return ksocknal_queue_tx_zcack_v2(conn, tx_ack, cookie);
-
- /* non-blocking ZC-ACK (to router) */
- LASSERT(tx_ack == NULL ||
- tx_ack->tx_msg.ksm_type == KSOCK_MSG_NOOP);
-
- tx = conn->ksnc_tx_carrier;
- if (tx == NULL) {
- if (tx_ack != NULL) {
- list_add_tail(&tx_ack->tx_list,
- &conn->ksnc_tx_queue);
- conn->ksnc_tx_carrier = tx_ack;
- }
- return 0;
- }
-
- /* conn->ksnc_tx_carrier != NULL */
-
- if (tx_ack != NULL)
- cookie = tx_ack->tx_msg.ksm_zc_cookies[1];
-
- if (cookie == SOCKNAL_KEEPALIVE_PING) /* ignore keepalive PING */
- return 1;
-
- if (tx->tx_msg.ksm_zc_cookies[1] == SOCKNAL_KEEPALIVE_PING) {
- /* replace the keepalive PING with a real ACK */
- LASSERT(tx->tx_msg.ksm_zc_cookies[0] == 0);
- tx->tx_msg.ksm_zc_cookies[1] = cookie;
- return 1;
- }
-
- if (cookie == tx->tx_msg.ksm_zc_cookies[0] ||
- cookie == tx->tx_msg.ksm_zc_cookies[1]) {
- CWARN("%s: duplicated ZC cookie: %llu\n",
- libcfs_id2str(conn->ksnc_peer->ksnp_id), cookie);
- return 1; /* XXX return error in the future */
- }
-
- if (tx->tx_msg.ksm_zc_cookies[0] == 0) {
- /* NOOP tx has only one ZC-ACK cookie, can carry at least one more */
- if (tx->tx_msg.ksm_zc_cookies[1] > cookie) {
- tx->tx_msg.ksm_zc_cookies[0] = tx->tx_msg.ksm_zc_cookies[1];
- tx->tx_msg.ksm_zc_cookies[1] = cookie;
- } else {
- tx->tx_msg.ksm_zc_cookies[0] = cookie;
- }
-
- if (tx->tx_msg.ksm_zc_cookies[0] - tx->tx_msg.ksm_zc_cookies[1] > 2) {
- /* not likely to carry more ACKs, skip it to simplify logic */
- ksocknal_next_tx_carrier(conn);
- }
-
- return 1;
- }
-
- /* takes two or more cookies already */
-
- if (tx->tx_msg.ksm_zc_cookies[0] > tx->tx_msg.ksm_zc_cookies[1]) {
- __u64 tmp = 0;
-
- /* two separated cookies: (a+2, a) or (a+1, a) */
- LASSERT(tx->tx_msg.ksm_zc_cookies[0] -
- tx->tx_msg.ksm_zc_cookies[1] <= 2);
-
- if (tx->tx_msg.ksm_zc_cookies[0] -
- tx->tx_msg.ksm_zc_cookies[1] == 2) {
- if (cookie == tx->tx_msg.ksm_zc_cookies[1] + 1)
- tmp = cookie;
- } else if (cookie == tx->tx_msg.ksm_zc_cookies[1] - 1) {
- tmp = tx->tx_msg.ksm_zc_cookies[1];
- } else if (cookie == tx->tx_msg.ksm_zc_cookies[0] + 1) {
- tmp = tx->tx_msg.ksm_zc_cookies[0];
- }
-
- if (tmp != 0) {
- /* range of cookies */
- tx->tx_msg.ksm_zc_cookies[0] = tmp - 1;
- tx->tx_msg.ksm_zc_cookies[1] = tmp + 1;
- return 1;
- }
-
- } else {
- /* ksm_zc_cookies[0] < ksm_zc_cookies[1], it is range of cookies */
- if (cookie >= tx->tx_msg.ksm_zc_cookies[0] &&
- cookie <= tx->tx_msg.ksm_zc_cookies[1]) {
- CWARN("%s: duplicated ZC cookie: %llu\n",
- libcfs_id2str(conn->ksnc_peer->ksnp_id), cookie);
- return 1; /* XXX: return error in the future */
- }
-
- if (cookie == tx->tx_msg.ksm_zc_cookies[1] + 1) {
- tx->tx_msg.ksm_zc_cookies[1] = cookie;
- return 1;
- }
-
- if (cookie == tx->tx_msg.ksm_zc_cookies[0] - 1) {
- tx->tx_msg.ksm_zc_cookies[0] = cookie;
- return 1;
- }
- }
-
- /* failed to piggyback ZC-ACK */
- if (tx_ack != NULL) {
- list_add_tail(&tx_ack->tx_list, &conn->ksnc_tx_queue);
- /* the next tx can piggyback at least 1 ACK */
- ksocknal_next_tx_carrier(conn);
- }
-
- return 0;
-}
-
-static int
-ksocknal_match_tx(ksock_conn_t *conn, ksock_tx_t *tx, int nonblk)
-{
- int nob;
-
-#if SOCKNAL_VERSION_DEBUG
- if (!*ksocknal_tunables.ksnd_typed_conns)
- return SOCKNAL_MATCH_YES;
-#endif
-
- if (tx == NULL || tx->tx_lnetmsg == NULL) {
- /* noop packet */
- nob = offsetof(ksock_msg_t, ksm_u);
- } else {
- nob = tx->tx_lnetmsg->msg_len +
- ((conn->ksnc_proto == &ksocknal_protocol_v1x) ?
- sizeof(lnet_hdr_t) : sizeof(ksock_msg_t));
- }
-
- /* default checking for typed connection */
- switch (conn->ksnc_type) {
- default:
- CERROR("ksnc_type bad: %u\n", conn->ksnc_type);
- LBUG();
- case SOCKLND_CONN_ANY:
- return SOCKNAL_MATCH_YES;
-
- case SOCKLND_CONN_BULK_IN:
- return SOCKNAL_MATCH_MAY;
-
- case SOCKLND_CONN_BULK_OUT:
- if (nob < *ksocknal_tunables.ksnd_min_bulk)
- return SOCKNAL_MATCH_MAY;
- else
- return SOCKNAL_MATCH_YES;
-
- case SOCKLND_CONN_CONTROL:
- if (nob >= *ksocknal_tunables.ksnd_min_bulk)
- return SOCKNAL_MATCH_MAY;
- else
- return SOCKNAL_MATCH_YES;
- }
-}
-
-static int
-ksocknal_match_tx_v3(ksock_conn_t *conn, ksock_tx_t *tx, int nonblk)
-{
- int nob;
-
- if (tx == NULL || tx->tx_lnetmsg == NULL)
- nob = offsetof(ksock_msg_t, ksm_u);
- else
- nob = tx->tx_lnetmsg->msg_len + sizeof(ksock_msg_t);
-
- switch (conn->ksnc_type) {
- default:
- CERROR("ksnc_type bad: %u\n", conn->ksnc_type);
- LBUG();
- case SOCKLND_CONN_ANY:
- return SOCKNAL_MATCH_NO;
-
- case SOCKLND_CONN_ACK:
- if (nonblk)
- return SOCKNAL_MATCH_YES;
- else if (tx == NULL || tx->tx_lnetmsg == NULL)
- return SOCKNAL_MATCH_MAY;
- else
- return SOCKNAL_MATCH_NO;
-
- case SOCKLND_CONN_BULK_OUT:
- if (nonblk)
- return SOCKNAL_MATCH_NO;
- else if (nob < *ksocknal_tunables.ksnd_min_bulk)
- return SOCKNAL_MATCH_MAY;
- else
- return SOCKNAL_MATCH_YES;
-
- case SOCKLND_CONN_CONTROL:
- if (nonblk)
- return SOCKNAL_MATCH_NO;
- else if (nob >= *ksocknal_tunables.ksnd_min_bulk)
- return SOCKNAL_MATCH_MAY;
- else
- return SOCKNAL_MATCH_YES;
- }
-}
-
-/* (Sink) handle incoming ZC request from sender */
-static int
-ksocknal_handle_zcreq(ksock_conn_t *c, __u64 cookie, int remote)
-{
- ksock_peer_t *peer = c->ksnc_peer;
- ksock_conn_t *conn;
- ksock_tx_t *tx;
- int rc;
-
- read_lock(&ksocknal_data.ksnd_global_lock);
-
- conn = ksocknal_find_conn_locked(peer, NULL, !!remote);
- if (conn != NULL) {
- ksock_sched_t *sched = conn->ksnc_scheduler;
-
- LASSERT(conn->ksnc_proto->pro_queue_tx_zcack != NULL);
-
- spin_lock_bh(&sched->kss_lock);
-
- rc = conn->ksnc_proto->pro_queue_tx_zcack(conn, NULL, cookie);
-
- spin_unlock_bh(&sched->kss_lock);
-
- if (rc) { /* piggybacked */
- read_unlock(&ksocknal_data.ksnd_global_lock);
- return 0;
- }
- }
-
- read_unlock(&ksocknal_data.ksnd_global_lock);
-
- /* ACK connection is not ready, or can't piggyback the ACK */
- tx = ksocknal_alloc_tx_noop(cookie, !!remote);
- if (tx == NULL)
- return -ENOMEM;
-
- rc = ksocknal_launch_packet(peer->ksnp_ni, tx, peer->ksnp_id);
- if (rc == 0)
- return 0;
-
- ksocknal_free_tx(tx);
- return rc;
-}
-
-/* (Sender) handle ZC_ACK from sink */
-static int
-ksocknal_handle_zcack(ksock_conn_t *conn, __u64 cookie1, __u64 cookie2)
-{
- ksock_peer_t *peer = conn->ksnc_peer;
- ksock_tx_t *tx;
- ksock_tx_t *tmp;
- LIST_HEAD(zlist);
- int count;
-
- if (cookie1 == 0)
- cookie1 = cookie2;
-
- count = (cookie1 > cookie2) ? 2 : (cookie2 - cookie1 + 1);
-
- if (cookie2 == SOCKNAL_KEEPALIVE_PING &&
- conn->ksnc_proto == &ksocknal_protocol_v3x) {
- /* keepalive PING for V3.x, just ignore it */
- return count == 1 ? 0 : -EPROTO;
- }
-
- spin_lock(&peer->ksnp_lock);
-
- list_for_each_entry_safe(tx, tmp,
- &peer->ksnp_zc_req_list, tx_zc_list) {
- __u64 c = tx->tx_msg.ksm_zc_cookies[0];
-
- if (c == cookie1 || c == cookie2 || (cookie1 < c && c < cookie2)) {
- tx->tx_msg.ksm_zc_cookies[0] = 0;
- list_del(&tx->tx_zc_list);
- list_add(&tx->tx_zc_list, &zlist);
-
- if (--count == 0)
- break;
- }
- }
-
- spin_unlock(&peer->ksnp_lock);
-
- while (!list_empty(&zlist)) {
- tx = list_entry(zlist.next, ksock_tx_t, tx_zc_list);
- list_del(&tx->tx_zc_list);
- ksocknal_tx_decref(tx);
- }
-
- return count == 0 ? 0 : -EPROTO;
-}
-
-static int
-ksocknal_send_hello_v1(ksock_conn_t *conn, ksock_hello_msg_t *hello)
-{
- struct socket *sock = conn->ksnc_sock;
- lnet_hdr_t *hdr;
- lnet_magicversion_t *hmv;
- int rc;
- int i;
-
- CLASSERT(sizeof(lnet_magicversion_t) == offsetof(lnet_hdr_t, src_nid));
-
- LIBCFS_ALLOC(hdr, sizeof(*hdr));
- if (hdr == NULL) {
- CERROR("Can't allocate lnet_hdr_t\n");
- return -ENOMEM;
- }
-
- hmv = (lnet_magicversion_t *)&hdr->dest_nid;
-
- /* Re-organize V2.x message header to V1.x (lnet_hdr_t)
- * header and send out */
- hmv->magic = cpu_to_le32 (LNET_PROTO_TCP_MAGIC);
- hmv->version_major = cpu_to_le16 (KSOCK_PROTO_V1_MAJOR);
- hmv->version_minor = cpu_to_le16 (KSOCK_PROTO_V1_MINOR);
-
- if (the_lnet.ln_testprotocompat != 0) {
- /* single-shot proto check */
- LNET_LOCK();
- if ((the_lnet.ln_testprotocompat & 1) != 0) {
- hmv->version_major++; /* just different! */
- the_lnet.ln_testprotocompat &= ~1;
- }
- if ((the_lnet.ln_testprotocompat & 2) != 0) {
- hmv->magic = LNET_PROTO_MAGIC;
- the_lnet.ln_testprotocompat &= ~2;
- }
- LNET_UNLOCK();
- }
-
- hdr->src_nid = cpu_to_le64 (hello->kshm_src_nid);
- hdr->src_pid = cpu_to_le32 (hello->kshm_src_pid);
- hdr->type = cpu_to_le32 (LNET_MSG_HELLO);
- hdr->payload_length = cpu_to_le32 (hello->kshm_nips * sizeof(__u32));
- hdr->msg.hello.type = cpu_to_le32 (hello->kshm_ctype);
- hdr->msg.hello.incarnation = cpu_to_le64 (hello->kshm_src_incarnation);
-
- rc = lnet_sock_write(sock, hdr, sizeof(*hdr), lnet_acceptor_timeout());
- if (rc != 0) {
- CNETERR("Error %d sending HELLO hdr to %pI4h/%d\n",
- rc, &conn->ksnc_ipaddr, conn->ksnc_port);
- goto out;
- }
-
- if (hello->kshm_nips == 0)
- goto out;
-
- for (i = 0; i < (int) hello->kshm_nips; i++) {
- hello->kshm_ips[i] = __cpu_to_le32 (hello->kshm_ips[i]);
- }
-
- rc = lnet_sock_write(sock, hello->kshm_ips,
- hello->kshm_nips * sizeof(__u32),
- lnet_acceptor_timeout());
- if (rc != 0) {
- CNETERR("Error %d sending HELLO payload (%d) to %pI4h/%d\n",
- rc, hello->kshm_nips,
- &conn->ksnc_ipaddr, conn->ksnc_port);
- }
-out:
- LIBCFS_FREE(hdr, sizeof(*hdr));
-
- return rc;
-}
-
-static int
-ksocknal_send_hello_v2(ksock_conn_t *conn, ksock_hello_msg_t *hello)
-{
- struct socket *sock = conn->ksnc_sock;
- int rc;
-
- hello->kshm_magic = LNET_PROTO_MAGIC;
- hello->kshm_version = conn->ksnc_proto->pro_version;
-
- if (the_lnet.ln_testprotocompat != 0) {
- /* single-shot proto check */
- LNET_LOCK();
- if ((the_lnet.ln_testprotocompat & 1) != 0) {
- hello->kshm_version++; /* just different! */
- the_lnet.ln_testprotocompat &= ~1;
- }
- LNET_UNLOCK();
- }
-
- rc = lnet_sock_write(sock, hello, offsetof(ksock_hello_msg_t, kshm_ips),
- lnet_acceptor_timeout());
- if (rc != 0) {
- CNETERR("Error %d sending HELLO hdr to %pI4h/%d\n",
- rc, &conn->ksnc_ipaddr, conn->ksnc_port);
- return rc;
- }
-
- if (hello->kshm_nips == 0)
- return 0;
-
- rc = lnet_sock_write(sock, hello->kshm_ips,
- hello->kshm_nips * sizeof(__u32),
- lnet_acceptor_timeout());
- if (rc != 0) {
- CNETERR("Error %d sending HELLO payload (%d) to %pI4h/%d\n",
- rc, hello->kshm_nips,
- &conn->ksnc_ipaddr, conn->ksnc_port);
- }
-
- return rc;
-}
-
-static int
-ksocknal_recv_hello_v1(ksock_conn_t *conn, ksock_hello_msg_t *hello,
- int timeout)
-{
- struct socket *sock = conn->ksnc_sock;
- lnet_hdr_t *hdr;
- int rc;
- int i;
-
- LIBCFS_ALLOC(hdr, sizeof(*hdr));
- if (hdr == NULL) {
- CERROR("Can't allocate lnet_hdr_t\n");
- return -ENOMEM;
- }
-
- rc = lnet_sock_read(sock, &hdr->src_nid,
- sizeof(*hdr) - offsetof(lnet_hdr_t, src_nid),
- timeout);
- if (rc != 0) {
- CERROR("Error %d reading rest of HELLO hdr from %pI4h\n",
- rc, &conn->ksnc_ipaddr);
- LASSERT(rc < 0 && rc != -EALREADY);
- goto out;
- }
-
- /* ...and check we got what we expected */
- if (hdr->type != cpu_to_le32 (LNET_MSG_HELLO)) {
- CERROR("Expecting a HELLO hdr, but got type %d from %pI4h\n",
- le32_to_cpu(hdr->type),
- &conn->ksnc_ipaddr);
- rc = -EPROTO;
- goto out;
- }
-
- hello->kshm_src_nid = le64_to_cpu(hdr->src_nid);
- hello->kshm_src_pid = le32_to_cpu(hdr->src_pid);
- hello->kshm_src_incarnation = le64_to_cpu(hdr->msg.hello.incarnation);
- hello->kshm_ctype = le32_to_cpu(hdr->msg.hello.type);
- hello->kshm_nips = le32_to_cpu(hdr->payload_length) /
- sizeof(__u32);
-
- if (hello->kshm_nips > LNET_MAX_INTERFACES) {
- CERROR("Bad nips %d from ip %pI4h\n",
- hello->kshm_nips, &conn->ksnc_ipaddr);
- rc = -EPROTO;
- goto out;
- }
-
- if (hello->kshm_nips == 0)
- goto out;
-
- rc = lnet_sock_read(sock, hello->kshm_ips,
- hello->kshm_nips * sizeof(__u32), timeout);
- if (rc != 0) {
- CERROR("Error %d reading IPs from ip %pI4h\n",
- rc, &conn->ksnc_ipaddr);
- LASSERT(rc < 0 && rc != -EALREADY);
- goto out;
- }
-
- for (i = 0; i < (int) hello->kshm_nips; i++) {
- hello->kshm_ips[i] = __le32_to_cpu(hello->kshm_ips[i]);
-
- if (hello->kshm_ips[i] == 0) {
- CERROR("Zero IP[%d] from ip %pI4h\n",
- i, &conn->ksnc_ipaddr);
- rc = -EPROTO;
- break;
- }
- }
-out:
- LIBCFS_FREE(hdr, sizeof(*hdr));
-
- return rc;
-}
-
-static int
-ksocknal_recv_hello_v2(ksock_conn_t *conn, ksock_hello_msg_t *hello, int timeout)
-{
- struct socket *sock = conn->ksnc_sock;
- int rc;
- int i;
-
- if (hello->kshm_magic == LNET_PROTO_MAGIC)
- conn->ksnc_flip = 0;
- else
- conn->ksnc_flip = 1;
-
- rc = lnet_sock_read(sock, &hello->kshm_src_nid,
- offsetof(ksock_hello_msg_t, kshm_ips) -
- offsetof(ksock_hello_msg_t, kshm_src_nid),
- timeout);
- if (rc != 0) {
- CERROR("Error %d reading HELLO from %pI4h\n",
- rc, &conn->ksnc_ipaddr);
- LASSERT(rc < 0 && rc != -EALREADY);
- return rc;
- }
-
- if (conn->ksnc_flip) {
- __swab32s(&hello->kshm_src_pid);
- __swab64s(&hello->kshm_src_nid);
- __swab32s(&hello->kshm_dst_pid);
- __swab64s(&hello->kshm_dst_nid);
- __swab64s(&hello->kshm_src_incarnation);
- __swab64s(&hello->kshm_dst_incarnation);
- __swab32s(&hello->kshm_ctype);
- __swab32s(&hello->kshm_nips);
- }
-
- if (hello->kshm_nips > LNET_MAX_INTERFACES) {
- CERROR("Bad nips %d from ip %pI4h\n",
- hello->kshm_nips, &conn->ksnc_ipaddr);
- return -EPROTO;
- }
-
- if (hello->kshm_nips == 0)
- return 0;
-
- rc = lnet_sock_read(sock, hello->kshm_ips,
- hello->kshm_nips * sizeof(__u32), timeout);
- if (rc != 0) {
- CERROR("Error %d reading IPs from ip %pI4h\n",
- rc, &conn->ksnc_ipaddr);
- LASSERT(rc < 0 && rc != -EALREADY);
- return rc;
- }
-
- for (i = 0; i < (int) hello->kshm_nips; i++) {
- if (conn->ksnc_flip)
- __swab32s(&hello->kshm_ips[i]);
-
- if (hello->kshm_ips[i] == 0) {
- CERROR("Zero IP[%d] from ip %pI4h\n",
- i, &conn->ksnc_ipaddr);
- return -EPROTO;
- }
- }
-
- return 0;
-}
-
-static void
-ksocknal_pack_msg_v1(ksock_tx_t *tx)
-{
- /* V1.x has no KSOCK_MSG_NOOP */
- LASSERT(tx->tx_msg.ksm_type != KSOCK_MSG_NOOP);
- LASSERT(tx->tx_lnetmsg != NULL);
-
- tx->tx_iov[0].iov_base = &tx->tx_lnetmsg->msg_hdr;
- tx->tx_iov[0].iov_len = sizeof(lnet_hdr_t);
-
- tx->tx_resid = tx->tx_nob = tx->tx_lnetmsg->msg_len + sizeof(lnet_hdr_t);
-}
-
-static void
-ksocknal_pack_msg_v2(ksock_tx_t *tx)
-{
- tx->tx_iov[0].iov_base = &tx->tx_msg;
-
- if (tx->tx_lnetmsg != NULL) {
- LASSERT(tx->tx_msg.ksm_type != KSOCK_MSG_NOOP);
-
- tx->tx_msg.ksm_u.lnetmsg.ksnm_hdr = tx->tx_lnetmsg->msg_hdr;
- tx->tx_iov[0].iov_len = sizeof(ksock_msg_t);
- tx->tx_resid = tx->tx_nob = sizeof(ksock_msg_t) + tx->tx_lnetmsg->msg_len;
- } else {
- LASSERT(tx->tx_msg.ksm_type == KSOCK_MSG_NOOP);
-
- tx->tx_iov[0].iov_len = offsetof(ksock_msg_t, ksm_u.lnetmsg.ksnm_hdr);
- tx->tx_resid = tx->tx_nob = offsetof(ksock_msg_t, ksm_u.lnetmsg.ksnm_hdr);
- }
- /* Don't checksum before start sending, because packet can be piggybacked with ACK */
-}
-
-static void
-ksocknal_unpack_msg_v1(ksock_msg_t *msg)
-{
- msg->ksm_csum = 0;
- msg->ksm_type = KSOCK_MSG_LNET;
- msg->ksm_zc_cookies[0] = msg->ksm_zc_cookies[1] = 0;
-}
-
-static void
-ksocknal_unpack_msg_v2(ksock_msg_t *msg)
-{
- return; /* Do nothing */
-}
-
-ksock_proto_t ksocknal_protocol_v1x = {
- .pro_version = KSOCK_PROTO_V1,
- .pro_send_hello = ksocknal_send_hello_v1,
- .pro_recv_hello = ksocknal_recv_hello_v1,
- .pro_pack = ksocknal_pack_msg_v1,
- .pro_unpack = ksocknal_unpack_msg_v1,
- .pro_queue_tx_msg = ksocknal_queue_tx_msg_v1,
- .pro_handle_zcreq = NULL,
- .pro_handle_zcack = NULL,
- .pro_queue_tx_zcack = NULL,
- .pro_match_tx = ksocknal_match_tx
-};
-
-ksock_proto_t ksocknal_protocol_v2x = {
- .pro_version = KSOCK_PROTO_V2,
- .pro_send_hello = ksocknal_send_hello_v2,
- .pro_recv_hello = ksocknal_recv_hello_v2,
- .pro_pack = ksocknal_pack_msg_v2,
- .pro_unpack = ksocknal_unpack_msg_v2,
- .pro_queue_tx_msg = ksocknal_queue_tx_msg_v2,
- .pro_queue_tx_zcack = ksocknal_queue_tx_zcack_v2,
- .pro_handle_zcreq = ksocknal_handle_zcreq,
- .pro_handle_zcack = ksocknal_handle_zcack,
- .pro_match_tx = ksocknal_match_tx
-};
-
-ksock_proto_t ksocknal_protocol_v3x = {
- .pro_version = KSOCK_PROTO_V3,
- .pro_send_hello = ksocknal_send_hello_v2,
- .pro_recv_hello = ksocknal_recv_hello_v2,
- .pro_pack = ksocknal_pack_msg_v2,
- .pro_unpack = ksocknal_unpack_msg_v2,
- .pro_queue_tx_msg = ksocknal_queue_tx_msg_v2,
- .pro_queue_tx_zcack = ksocknal_queue_tx_zcack_v3,
- .pro_handle_zcreq = ksocknal_handle_zcreq,
- .pro_handle_zcack = ksocknal_handle_zcack,
- .pro_match_tx = ksocknal_match_tx_v3
-};
diff --git a/drivers/staging/lustre/lnet/lnet/Makefile b/drivers/staging/lustre/lnet/lnet/Makefile
deleted file mode 100644
index 52492fb10f85..000000000000
--- a/drivers/staging/lustre/lnet/lnet/Makefile
+++ /dev/null
@@ -1,6 +0,0 @@
-obj-$(CONFIG_LNET) += lnet.o
-
-lnet-y := api-ni.o config.o \
- lib-me.o lib-msg.o lib-eq.o lib-md.o lib-ptl.o \
- lib-socket.o lib-move.o module.o lo.o \
- router.o router_proc.o acceptor.o peer.o
diff --git a/drivers/staging/lustre/lnet/lnet/acceptor.c b/drivers/staging/lustre/lnet/lnet/acceptor.c
deleted file mode 100644
index 99f8396f3822..000000000000
--- a/drivers/staging/lustre/lnet/lnet/acceptor.c
+++ /dev/null
@@ -1,493 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- */
-
-#define DEBUG_SUBSYSTEM S_LNET
-#include <linux/completion.h>
-#include "../../include/linux/lnet/lib-lnet.h"
-
-static int accept_port = 988;
-static int accept_backlog = 127;
-static int accept_timeout = 5;
-
-static struct {
- int pta_shutdown;
- struct socket *pta_sock;
- struct completion pta_signal;
-} lnet_acceptor_state;
-
-int
-lnet_acceptor_port(void)
-{
- return accept_port;
-}
-EXPORT_SYMBOL(lnet_acceptor_port);
-
-static inline int
-lnet_accept_magic(__u32 magic, __u32 constant)
-{
- return (magic == constant ||
- magic == __swab32(constant));
-}
-
-static char *accept = "secure";
-
-module_param(accept, charp, 0444);
-MODULE_PARM_DESC(accept, "Accept connections (secure|all|none)");
-module_param(accept_port, int, 0444);
-MODULE_PARM_DESC(accept_port, "Acceptor's port (same on all nodes)");
-module_param(accept_backlog, int, 0444);
-MODULE_PARM_DESC(accept_backlog, "Acceptor's listen backlog");
-module_param(accept_timeout, int, 0644);
-MODULE_PARM_DESC(accept_timeout, "Acceptor's timeout (seconds)");
-
-static char *accept_type;
-
-static int
-lnet_acceptor_get_tunables(void)
-{
- /* Userland acceptor uses 'accept_type' instead of 'accept', due to
- * conflict with 'accept(2)', but kernel acceptor still uses 'accept'
- * for compatibility. Hence the trick. */
- accept_type = accept;
- return 0;
-}
-
-int
-lnet_acceptor_timeout(void)
-{
- return accept_timeout;
-}
-EXPORT_SYMBOL(lnet_acceptor_timeout);
-
-void
-lnet_connect_console_error(int rc, lnet_nid_t peer_nid,
- __u32 peer_ip, int peer_port)
-{
- switch (rc) {
- /* "normal" errors */
- case -ECONNREFUSED:
- CNETERR("Connection to %s at host %pI4h on port %d was refused: check that Lustre is running on that node.\n",
- libcfs_nid2str(peer_nid),
- &peer_ip, peer_port);
- break;
- case -EHOSTUNREACH:
- case -ENETUNREACH:
- CNETERR("Connection to %s at host %pI4h was unreachable: the network or that node may be down, or Lustre may be misconfigured.\n",
- libcfs_nid2str(peer_nid), &peer_ip);
- break;
- case -ETIMEDOUT:
- CNETERR("Connection to %s at host %pI4h on port %d took too long: that node may be hung or experiencing high load.\n",
- libcfs_nid2str(peer_nid),
- &peer_ip, peer_port);
- break;
- case -ECONNRESET:
- LCONSOLE_ERROR_MSG(0x11b, "Connection to %s at host %pI4h on port %d was reset: is it running a compatible version of Lustre and is %s one of its NIDs?\n",
- libcfs_nid2str(peer_nid),
- &peer_ip, peer_port,
- libcfs_nid2str(peer_nid));
- break;
- case -EPROTO:
- LCONSOLE_ERROR_MSG(0x11c, "Protocol error connecting to %s at host %pI4h on port %d: is it running a compatible version of Lustre?\n",
- libcfs_nid2str(peer_nid),
- &peer_ip, peer_port);
- break;
- case -EADDRINUSE:
- LCONSOLE_ERROR_MSG(0x11d, "No privileged ports available to connect to %s at host %pI4h on port %d\n",
- libcfs_nid2str(peer_nid),
- &peer_ip, peer_port);
- break;
- default:
- LCONSOLE_ERROR_MSG(0x11e, "Unexpected error %d connecting to %s at host %pI4h on port %d\n",
- rc, libcfs_nid2str(peer_nid),
- &peer_ip, peer_port);
- break;
- }
-}
-EXPORT_SYMBOL(lnet_connect_console_error);
-
-int
-lnet_connect(struct socket **sockp, lnet_nid_t peer_nid,
- __u32 local_ip, __u32 peer_ip, int peer_port)
-{
- lnet_acceptor_connreq_t cr;
- struct socket *sock;
- int rc;
- int port;
- int fatal;
-
- CLASSERT(sizeof(cr) <= 16); /* not too big to be on the stack */
-
- for (port = LNET_ACCEPTOR_MAX_RESERVED_PORT;
- port >= LNET_ACCEPTOR_MIN_RESERVED_PORT;
- --port) {
- /* Iterate through reserved ports. */
-
- rc = lnet_sock_connect(&sock, &fatal, local_ip, port, peer_ip,
- peer_port);
- if (rc != 0) {
- if (fatal)
- goto failed;
- continue;
- }
-
- CLASSERT(LNET_PROTO_ACCEPTOR_VERSION == 1);
-
- cr.acr_magic = LNET_PROTO_ACCEPTOR_MAGIC;
- cr.acr_version = LNET_PROTO_ACCEPTOR_VERSION;
- cr.acr_nid = peer_nid;
-
- if (the_lnet.ln_testprotocompat != 0) {
- /* single-shot proto check */
- lnet_net_lock(LNET_LOCK_EX);
- if ((the_lnet.ln_testprotocompat & 4) != 0) {
- cr.acr_version++;
- the_lnet.ln_testprotocompat &= ~4;
- }
- if ((the_lnet.ln_testprotocompat & 8) != 0) {
- cr.acr_magic = LNET_PROTO_MAGIC;
- the_lnet.ln_testprotocompat &= ~8;
- }
- lnet_net_unlock(LNET_LOCK_EX);
- }
-
- rc = lnet_sock_write(sock, &cr, sizeof(cr), accept_timeout);
- if (rc != 0)
- goto failed_sock;
-
- *sockp = sock;
- return 0;
- }
-
- rc = -EADDRINUSE;
- goto failed;
-
- failed_sock:
- sock_release(sock);
- failed:
- lnet_connect_console_error(rc, peer_nid, peer_ip, peer_port);
- return rc;
-}
-EXPORT_SYMBOL(lnet_connect);
-
-
-/* Below is the code common for both kernel and MT user-space */
-
-static int
-lnet_accept(struct socket *sock, __u32 magic)
-{
- lnet_acceptor_connreq_t cr;
- __u32 peer_ip;
- int peer_port;
- int rc;
- int flip;
- lnet_ni_t *ni;
- char *str;
-
- LASSERT(sizeof(cr) <= 16); /* not too big for the stack */
-
- rc = lnet_sock_getaddr(sock, 1, &peer_ip, &peer_port);
- LASSERT(rc == 0); /* we succeeded before */
-
- if (!lnet_accept_magic(magic, LNET_PROTO_ACCEPTOR_MAGIC)) {
-
- if (lnet_accept_magic(magic, LNET_PROTO_MAGIC)) {
- /* future version compatibility!
- * When LNET unifies protocols over all LNDs, the first
- * thing sent will be a version query. I send back
- * LNET_PROTO_ACCEPTOR_MAGIC to tell her I'm "old" */
-
- memset(&cr, 0, sizeof(cr));
- cr.acr_magic = LNET_PROTO_ACCEPTOR_MAGIC;
- cr.acr_version = LNET_PROTO_ACCEPTOR_VERSION;
- rc = lnet_sock_write(sock, &cr, sizeof(cr),
- accept_timeout);
-
- if (rc != 0)
- CERROR("Error sending magic+version in response to LNET magic from %pI4h: %d\n",
- &peer_ip, rc);
- return -EPROTO;
- }
-
- if (magic == le32_to_cpu(LNET_PROTO_TCP_MAGIC))
- str = "'old' socknal/tcpnal";
- else
- str = "unrecognised";
-
- LCONSOLE_ERROR_MSG(0x11f, "Refusing connection from %pI4h magic %08x: %s acceptor protocol\n",
- &peer_ip, magic, str);
- return -EPROTO;
- }
-
- flip = (magic != LNET_PROTO_ACCEPTOR_MAGIC);
-
- rc = lnet_sock_read(sock, &cr.acr_version, sizeof(cr.acr_version),
- accept_timeout);
- if (rc != 0) {
- CERROR("Error %d reading connection request version from %pI4h\n",
- rc, &peer_ip);
- return -EIO;
- }
-
- if (flip)
- __swab32s(&cr.acr_version);
-
- if (cr.acr_version != LNET_PROTO_ACCEPTOR_VERSION) {
- /* future version compatibility!
- * An acceptor-specific protocol rev will first send a version
- * query. I send back my current version to tell her I'm
- * "old". */
- int peer_version = cr.acr_version;
-
- memset(&cr, 0, sizeof(cr));
- cr.acr_magic = LNET_PROTO_ACCEPTOR_MAGIC;
- cr.acr_version = LNET_PROTO_ACCEPTOR_VERSION;
-
- rc = lnet_sock_write(sock, &cr, sizeof(cr), accept_timeout);
- if (rc != 0)
- CERROR("Error sending magic+version in response to version %d from %pI4h: %d\n",
- peer_version, &peer_ip, rc);
- return -EPROTO;
- }
-
- rc = lnet_sock_read(sock, &cr.acr_nid,
- sizeof(cr) -
- offsetof(lnet_acceptor_connreq_t, acr_nid),
- accept_timeout);
- if (rc != 0) {
- CERROR("Error %d reading connection request from %pI4h\n",
- rc, &peer_ip);
- return -EIO;
- }
-
- if (flip)
- __swab64s(&cr.acr_nid);
-
- ni = lnet_net2ni(LNET_NIDNET(cr.acr_nid));
- if (ni == NULL || /* no matching net */
- ni->ni_nid != cr.acr_nid) { /* right NET, wrong NID! */
- if (ni != NULL)
- lnet_ni_decref(ni);
- LCONSOLE_ERROR_MSG(0x120, "Refusing connection from %pI4h for %s: No matching NI\n",
- &peer_ip, libcfs_nid2str(cr.acr_nid));
- return -EPERM;
- }
-
- if (ni->ni_lnd->lnd_accept == NULL) {
- /* This catches a request for the loopback LND */
- lnet_ni_decref(ni);
- LCONSOLE_ERROR_MSG(0x121, "Refusing connection from %pI4h for %s: NI doesn not accept IP connections\n",
- &peer_ip, libcfs_nid2str(cr.acr_nid));
- return -EPERM;
- }
-
- CDEBUG(D_NET, "Accept %s from %pI4h\n",
- libcfs_nid2str(cr.acr_nid), &peer_ip);
-
- rc = ni->ni_lnd->lnd_accept(ni, sock);
-
- lnet_ni_decref(ni);
- return rc;
-}
-
-static int
-lnet_acceptor(void *arg)
-{
- struct socket *newsock;
- int rc;
- __u32 magic;
- __u32 peer_ip;
- int peer_port;
- int secure = (int)((long_ptr_t)arg);
-
- LASSERT(lnet_acceptor_state.pta_sock == NULL);
-
- cfs_block_allsigs();
-
- rc = lnet_sock_listen(&lnet_acceptor_state.pta_sock, 0, accept_port,
- accept_backlog);
- if (rc != 0) {
- if (rc == -EADDRINUSE)
- LCONSOLE_ERROR_MSG(0x122, "Can't start acceptor on port %d: port already in use\n",
- accept_port);
- else
- LCONSOLE_ERROR_MSG(0x123, "Can't start acceptor on port %d: unexpected error %d\n",
- accept_port, rc);
-
- lnet_acceptor_state.pta_sock = NULL;
- } else {
- LCONSOLE(0, "Accept %s, port %d\n", accept_type, accept_port);
- }
-
- /* set init status and unblock parent */
- lnet_acceptor_state.pta_shutdown = rc;
- complete(&lnet_acceptor_state.pta_signal);
-
- if (rc != 0)
- return rc;
-
- while (!lnet_acceptor_state.pta_shutdown) {
-
- rc = lnet_sock_accept(&newsock, lnet_acceptor_state.pta_sock);
- if (rc != 0) {
- if (rc != -EAGAIN) {
- CWARN("Accept error %d: pausing...\n", rc);
- set_current_state(TASK_UNINTERRUPTIBLE);
- schedule_timeout(cfs_time_seconds(1));
- }
- continue;
- }
-
- /* maybe the LNet acceptor thread has been waken */
- if (lnet_acceptor_state.pta_shutdown) {
- sock_release(newsock);
- break;
- }
-
- rc = lnet_sock_getaddr(newsock, 1, &peer_ip, &peer_port);
- if (rc != 0) {
- CERROR("Can't determine new connection's address\n");
- goto failed;
- }
-
- if (secure && peer_port > LNET_ACCEPTOR_MAX_RESERVED_PORT) {
- CERROR("Refusing connection from %pI4h: insecure port %d\n",
- &peer_ip, peer_port);
- goto failed;
- }
-
- rc = lnet_sock_read(newsock, &magic, sizeof(magic),
- accept_timeout);
- if (rc != 0) {
- CERROR("Error %d reading connection request from %pI4h\n",
- rc, &peer_ip);
- goto failed;
- }
-
- rc = lnet_accept(newsock, magic);
- if (rc != 0)
- goto failed;
-
- continue;
-
-failed:
- sock_release(newsock);
- }
-
- sock_release(lnet_acceptor_state.pta_sock);
- lnet_acceptor_state.pta_sock = NULL;
-
- CDEBUG(D_NET, "Acceptor stopping\n");
-
- /* unblock lnet_acceptor_stop() */
- complete(&lnet_acceptor_state.pta_signal);
- return 0;
-}
-
-static inline int
-accept2secure(const char *acc, long *sec)
-{
- if (!strcmp(acc, "secure")) {
- *sec = 1;
- return 1;
- } else if (!strcmp(acc, "all")) {
- *sec = 0;
- return 1;
- } else if (!strcmp(acc, "none")) {
- return 0;
- }
-
- LCONSOLE_ERROR_MSG(0x124, "Can't parse 'accept=\"%s\"'\n",
- acc);
- return -EINVAL;
-}
-
-int
-lnet_acceptor_start(void)
-{
- int rc;
- long rc2;
- long secure;
-
- LASSERT(lnet_acceptor_state.pta_sock == NULL);
-
- rc = lnet_acceptor_get_tunables();
- if (rc != 0)
- return rc;
-
-
- init_completion(&lnet_acceptor_state.pta_signal);
- rc = accept2secure(accept_type, &secure);
- if (rc <= 0)
- return rc;
-
- if (lnet_count_acceptor_nis() == 0) /* not required */
- return 0;
-
- rc2 = PTR_ERR(kthread_run(lnet_acceptor,
- (void *)(ulong_ptr_t)secure,
- "acceptor_%03ld", secure));
- if (IS_ERR_VALUE(rc2)) {
- CERROR("Can't start acceptor thread: %ld\n", rc2);
-
- return -ESRCH;
- }
-
- /* wait for acceptor to startup */
- wait_for_completion(&lnet_acceptor_state.pta_signal);
-
- if (!lnet_acceptor_state.pta_shutdown) {
- /* started OK */
- LASSERT(lnet_acceptor_state.pta_sock != NULL);
- return 0;
- }
-
- LASSERT(lnet_acceptor_state.pta_sock == NULL);
-
- return -ENETDOWN;
-}
-
-void
-lnet_acceptor_stop(void)
-{
- if (lnet_acceptor_state.pta_sock == NULL) /* not running */
- return;
-
- lnet_acceptor_state.pta_shutdown = 1;
- wake_up_all(sk_sleep(lnet_acceptor_state.pta_sock->sk));
-
- /* block until acceptor signals exit */
- wait_for_completion(&lnet_acceptor_state.pta_signal);
-}
diff --git a/drivers/staging/lustre/lnet/lnet/api-ni.c b/drivers/staging/lustre/lnet/lnet/api-ni.c
deleted file mode 100644
index c368cf561b9d..000000000000
--- a/drivers/staging/lustre/lnet/lnet/api-ni.c
+++ /dev/null
@@ -1,1867 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- */
-
-#define DEBUG_SUBSYSTEM S_LNET
-#include <linux/log2.h>
-#include <linux/ktime.h>
-
-#include "../../include/linux/lnet/lib-lnet.h"
-
-#define D_LNI D_CONSOLE
-
-lnet_t the_lnet; /* THE state of the network */
-EXPORT_SYMBOL(the_lnet);
-
-
-static char *ip2nets = "";
-module_param(ip2nets, charp, 0444);
-MODULE_PARM_DESC(ip2nets, "LNET network <- IP table");
-
-static char *networks = "";
-module_param(networks, charp, 0444);
-MODULE_PARM_DESC(networks, "local networks");
-
-static char *routes = "";
-module_param(routes, charp, 0444);
-MODULE_PARM_DESC(routes, "routes to non-local networks");
-
-static int rnet_htable_size = LNET_REMOTE_NETS_HASH_DEFAULT;
-module_param(rnet_htable_size, int, 0444);
-MODULE_PARM_DESC(rnet_htable_size, "size of remote network hash table");
-
-static char *
-lnet_get_routes(void)
-{
- return routes;
-}
-
-static char *
-lnet_get_networks(void)
-{
- char *nets;
- int rc;
-
- if (*networks != 0 && *ip2nets != 0) {
- LCONSOLE_ERROR_MSG(0x101, "Please specify EITHER 'networks' or 'ip2nets' but not both at once\n");
- return NULL;
- }
-
- if (*ip2nets != 0) {
- rc = lnet_parse_ip2nets(&nets, ip2nets);
- return (rc == 0) ? nets : NULL;
- }
-
- if (*networks != 0)
- return networks;
-
- return "tcp";
-}
-
-static void
-lnet_init_locks(void)
-{
- spin_lock_init(&the_lnet.ln_eq_wait_lock);
- init_waitqueue_head(&the_lnet.ln_eq_waitq);
- mutex_init(&the_lnet.ln_lnd_mutex);
- mutex_init(&the_lnet.ln_api_mutex);
-}
-
-static int
-lnet_create_remote_nets_table(void)
-{
- int i;
- struct list_head *hash;
-
- LASSERT(the_lnet.ln_remote_nets_hash == NULL);
- LASSERT(the_lnet.ln_remote_nets_hbits > 0);
- LIBCFS_ALLOC(hash, LNET_REMOTE_NETS_HASH_SIZE * sizeof(*hash));
- if (hash == NULL) {
- CERROR("Failed to create remote nets hash table\n");
- return -ENOMEM;
- }
-
- for (i = 0; i < LNET_REMOTE_NETS_HASH_SIZE; i++)
- INIT_LIST_HEAD(&hash[i]);
- the_lnet.ln_remote_nets_hash = hash;
- return 0;
-}
-
-static void
-lnet_destroy_remote_nets_table(void)
-{
- int i;
-
- if (the_lnet.ln_remote_nets_hash == NULL)
- return;
-
- for (i = 0; i < LNET_REMOTE_NETS_HASH_SIZE; i++)
- LASSERT(list_empty(&the_lnet.ln_remote_nets_hash[i]));
-
- LIBCFS_FREE(the_lnet.ln_remote_nets_hash,
- LNET_REMOTE_NETS_HASH_SIZE *
- sizeof(the_lnet.ln_remote_nets_hash[0]));
- the_lnet.ln_remote_nets_hash = NULL;
-}
-
-static void
-lnet_destroy_locks(void)
-{
- if (the_lnet.ln_res_lock != NULL) {
- cfs_percpt_lock_free(the_lnet.ln_res_lock);
- the_lnet.ln_res_lock = NULL;
- }
-
- if (the_lnet.ln_net_lock != NULL) {
- cfs_percpt_lock_free(the_lnet.ln_net_lock);
- the_lnet.ln_net_lock = NULL;
- }
-}
-
-static int
-lnet_create_locks(void)
-{
- lnet_init_locks();
-
- the_lnet.ln_res_lock = cfs_percpt_lock_alloc(lnet_cpt_table());
- if (the_lnet.ln_res_lock == NULL)
- goto failed;
-
- the_lnet.ln_net_lock = cfs_percpt_lock_alloc(lnet_cpt_table());
- if (the_lnet.ln_net_lock == NULL)
- goto failed;
-
- return 0;
-
- failed:
- lnet_destroy_locks();
- return -ENOMEM;
-}
-
-static void lnet_assert_wire_constants(void)
-{
- /* Wire protocol assertions generated by 'wirecheck'
- * running on Linux robert.bartonsoftware.com 2.6.8-1.521
- * #1 Mon Aug 16 09:01:18 EDT 2004 i686 athlon i386 GNU/Linux
- * with gcc version 3.3.3 20040412 (Red Hat Linux 3.3.3-7) */
-
- /* Constants... */
- CLASSERT(LNET_PROTO_TCP_MAGIC == 0xeebc0ded);
- CLASSERT(LNET_PROTO_TCP_VERSION_MAJOR == 1);
- CLASSERT(LNET_PROTO_TCP_VERSION_MINOR == 0);
- CLASSERT(LNET_MSG_ACK == 0);
- CLASSERT(LNET_MSG_PUT == 1);
- CLASSERT(LNET_MSG_GET == 2);
- CLASSERT(LNET_MSG_REPLY == 3);
- CLASSERT(LNET_MSG_HELLO == 4);
-
- /* Checks for struct ptl_handle_wire_t */
- CLASSERT((int)sizeof(lnet_handle_wire_t) == 16);
- CLASSERT((int)offsetof(lnet_handle_wire_t, wh_interface_cookie) == 0);
- CLASSERT((int)sizeof(((lnet_handle_wire_t *)0)->wh_interface_cookie) == 8);
- CLASSERT((int)offsetof(lnet_handle_wire_t, wh_object_cookie) == 8);
- CLASSERT((int)sizeof(((lnet_handle_wire_t *)0)->wh_object_cookie) == 8);
-
- /* Checks for struct lnet_magicversion_t */
- CLASSERT((int)sizeof(lnet_magicversion_t) == 8);
- CLASSERT((int)offsetof(lnet_magicversion_t, magic) == 0);
- CLASSERT((int)sizeof(((lnet_magicversion_t *)0)->magic) == 4);
- CLASSERT((int)offsetof(lnet_magicversion_t, version_major) == 4);
- CLASSERT((int)sizeof(((lnet_magicversion_t *)0)->version_major) == 2);
- CLASSERT((int)offsetof(lnet_magicversion_t, version_minor) == 6);
- CLASSERT((int)sizeof(((lnet_magicversion_t *)0)->version_minor) == 2);
-
- /* Checks for struct lnet_hdr_t */
- CLASSERT((int)sizeof(lnet_hdr_t) == 72);
- CLASSERT((int)offsetof(lnet_hdr_t, dest_nid) == 0);
- CLASSERT((int)sizeof(((lnet_hdr_t *)0)->dest_nid) == 8);
- CLASSERT((int)offsetof(lnet_hdr_t, src_nid) == 8);
- CLASSERT((int)sizeof(((lnet_hdr_t *)0)->src_nid) == 8);
- CLASSERT((int)offsetof(lnet_hdr_t, dest_pid) == 16);
- CLASSERT((int)sizeof(((lnet_hdr_t *)0)->dest_pid) == 4);
- CLASSERT((int)offsetof(lnet_hdr_t, src_pid) == 20);
- CLASSERT((int)sizeof(((lnet_hdr_t *)0)->src_pid) == 4);
- CLASSERT((int)offsetof(lnet_hdr_t, type) == 24);
- CLASSERT((int)sizeof(((lnet_hdr_t *)0)->type) == 4);
- CLASSERT((int)offsetof(lnet_hdr_t, payload_length) == 28);
- CLASSERT((int)sizeof(((lnet_hdr_t *)0)->payload_length) == 4);
- CLASSERT((int)offsetof(lnet_hdr_t, msg) == 32);
- CLASSERT((int)sizeof(((lnet_hdr_t *)0)->msg) == 40);
-
- /* Ack */
- CLASSERT((int)offsetof(lnet_hdr_t, msg.ack.dst_wmd) == 32);
- CLASSERT((int)sizeof(((lnet_hdr_t *)0)->msg.ack.dst_wmd) == 16);
- CLASSERT((int)offsetof(lnet_hdr_t, msg.ack.match_bits) == 48);
- CLASSERT((int)sizeof(((lnet_hdr_t *)0)->msg.ack.match_bits) == 8);
- CLASSERT((int)offsetof(lnet_hdr_t, msg.ack.mlength) == 56);
- CLASSERT((int)sizeof(((lnet_hdr_t *)0)->msg.ack.mlength) == 4);
-
- /* Put */
- CLASSERT((int)offsetof(lnet_hdr_t, msg.put.ack_wmd) == 32);
- CLASSERT((int)sizeof(((lnet_hdr_t *)0)->msg.put.ack_wmd) == 16);
- CLASSERT((int)offsetof(lnet_hdr_t, msg.put.match_bits) == 48);
- CLASSERT((int)sizeof(((lnet_hdr_t *)0)->msg.put.match_bits) == 8);
- CLASSERT((int)offsetof(lnet_hdr_t, msg.put.hdr_data) == 56);
- CLASSERT((int)sizeof(((lnet_hdr_t *)0)->msg.put.hdr_data) == 8);
- CLASSERT((int)offsetof(lnet_hdr_t, msg.put.ptl_index) == 64);
- CLASSERT((int)sizeof(((lnet_hdr_t *)0)->msg.put.ptl_index) == 4);
- CLASSERT((int)offsetof(lnet_hdr_t, msg.put.offset) == 68);
- CLASSERT((int)sizeof(((lnet_hdr_t *)0)->msg.put.offset) == 4);
-
- /* Get */
- CLASSERT((int)offsetof(lnet_hdr_t, msg.get.return_wmd) == 32);
- CLASSERT((int)sizeof(((lnet_hdr_t *)0)->msg.get.return_wmd) == 16);
- CLASSERT((int)offsetof(lnet_hdr_t, msg.get.match_bits) == 48);
- CLASSERT((int)sizeof(((lnet_hdr_t *)0)->msg.get.match_bits) == 8);
- CLASSERT((int)offsetof(lnet_hdr_t, msg.get.ptl_index) == 56);
- CLASSERT((int)sizeof(((lnet_hdr_t *)0)->msg.get.ptl_index) == 4);
- CLASSERT((int)offsetof(lnet_hdr_t, msg.get.src_offset) == 60);
- CLASSERT((int)sizeof(((lnet_hdr_t *)0)->msg.get.src_offset) == 4);
- CLASSERT((int)offsetof(lnet_hdr_t, msg.get.sink_length) == 64);
- CLASSERT((int)sizeof(((lnet_hdr_t *)0)->msg.get.sink_length) == 4);
-
- /* Reply */
- CLASSERT((int)offsetof(lnet_hdr_t, msg.reply.dst_wmd) == 32);
- CLASSERT((int)sizeof(((lnet_hdr_t *)0)->msg.reply.dst_wmd) == 16);
-
- /* Hello */
- CLASSERT((int)offsetof(lnet_hdr_t, msg.hello.incarnation) == 32);
- CLASSERT((int)sizeof(((lnet_hdr_t *)0)->msg.hello.incarnation) == 8);
- CLASSERT((int)offsetof(lnet_hdr_t, msg.hello.type) == 40);
- CLASSERT((int)sizeof(((lnet_hdr_t *)0)->msg.hello.type) == 4);
-}
-
-static lnd_t *
-lnet_find_lnd_by_type(int type)
-{
- lnd_t *lnd;
- struct list_head *tmp;
-
- /* holding lnd mutex */
- list_for_each(tmp, &the_lnet.ln_lnds) {
- lnd = list_entry(tmp, lnd_t, lnd_list);
-
- if ((int)lnd->lnd_type == type)
- return lnd;
- }
-
- return NULL;
-}
-
-void
-lnet_register_lnd(lnd_t *lnd)
-{
- mutex_lock(&the_lnet.ln_lnd_mutex);
-
- LASSERT(the_lnet.ln_init);
- LASSERT(libcfs_isknown_lnd(lnd->lnd_type));
- LASSERT(lnet_find_lnd_by_type(lnd->lnd_type) == NULL);
-
- list_add_tail(&lnd->lnd_list, &the_lnet.ln_lnds);
- lnd->lnd_refcount = 0;
-
- CDEBUG(D_NET, "%s LND registered\n", libcfs_lnd2str(lnd->lnd_type));
-
- mutex_unlock(&the_lnet.ln_lnd_mutex);
-}
-EXPORT_SYMBOL(lnet_register_lnd);
-
-void
-lnet_unregister_lnd(lnd_t *lnd)
-{
- mutex_lock(&the_lnet.ln_lnd_mutex);
-
- LASSERT(the_lnet.ln_init);
- LASSERT(lnet_find_lnd_by_type(lnd->lnd_type) == lnd);
- LASSERT(lnd->lnd_refcount == 0);
-
- list_del(&lnd->lnd_list);
- CDEBUG(D_NET, "%s LND unregistered\n", libcfs_lnd2str(lnd->lnd_type));
-
- mutex_unlock(&the_lnet.ln_lnd_mutex);
-}
-EXPORT_SYMBOL(lnet_unregister_lnd);
-
-void
-lnet_counters_get(lnet_counters_t *counters)
-{
- lnet_counters_t *ctr;
- int i;
-
- memset(counters, 0, sizeof(*counters));
-
- lnet_net_lock(LNET_LOCK_EX);
-
- cfs_percpt_for_each(ctr, i, the_lnet.ln_counters) {
- counters->msgs_max += ctr->msgs_max;
- counters->msgs_alloc += ctr->msgs_alloc;
- counters->errors += ctr->errors;
- counters->send_count += ctr->send_count;
- counters->recv_count += ctr->recv_count;
- counters->route_count += ctr->route_count;
- counters->drop_count += ctr->drop_count;
- counters->send_length += ctr->send_length;
- counters->recv_length += ctr->recv_length;
- counters->route_length += ctr->route_length;
- counters->drop_length += ctr->drop_length;
-
- }
- lnet_net_unlock(LNET_LOCK_EX);
-}
-EXPORT_SYMBOL(lnet_counters_get);
-
-void
-lnet_counters_reset(void)
-{
- lnet_counters_t *counters;
- int i;
-
- lnet_net_lock(LNET_LOCK_EX);
-
- cfs_percpt_for_each(counters, i, the_lnet.ln_counters)
- memset(counters, 0, sizeof(lnet_counters_t));
-
- lnet_net_unlock(LNET_LOCK_EX);
-}
-EXPORT_SYMBOL(lnet_counters_reset);
-
-static __u64
-lnet_create_interface_cookie(void)
-{
- /* NB the interface cookie in wire handles guards against delayed
- * replies and ACKs appearing valid after reboot.
- */
- return ktime_get_ns();
-}
-
-static char *
-lnet_res_type2str(int type)
-{
- switch (type) {
- default:
- LBUG();
- case LNET_COOKIE_TYPE_MD:
- return "MD";
- case LNET_COOKIE_TYPE_ME:
- return "ME";
- case LNET_COOKIE_TYPE_EQ:
- return "EQ";
- }
-}
-
-static void
-lnet_res_container_cleanup(struct lnet_res_container *rec)
-{
- int count = 0;
-
- if (rec->rec_type == 0) /* not set yet, it's uninitialized */
- return;
-
- while (!list_empty(&rec->rec_active)) {
- struct list_head *e = rec->rec_active.next;
-
- list_del_init(e);
- if (rec->rec_type == LNET_COOKIE_TYPE_EQ) {
- lnet_eq_free(list_entry(e, lnet_eq_t, eq_list));
-
- } else if (rec->rec_type == LNET_COOKIE_TYPE_MD) {
- lnet_md_free(list_entry(e, lnet_libmd_t, md_list));
-
- } else { /* NB: Active MEs should be attached on portals */
- LBUG();
- }
- count++;
- }
-
- if (count > 0) {
- /* Found alive MD/ME/EQ, user really should unlink/free
- * all of them before finalize LNet, but if someone didn't,
- * we have to recycle garbage for him */
- CERROR("%d active elements on exit of %s container\n",
- count, lnet_res_type2str(rec->rec_type));
- }
-
- if (rec->rec_lh_hash != NULL) {
- LIBCFS_FREE(rec->rec_lh_hash,
- LNET_LH_HASH_SIZE * sizeof(rec->rec_lh_hash[0]));
- rec->rec_lh_hash = NULL;
- }
-
- rec->rec_type = 0; /* mark it as finalized */
-}
-
-static int
-lnet_res_container_setup(struct lnet_res_container *rec, int cpt, int type)
-{
- int rc = 0;
- int i;
-
- LASSERT(rec->rec_type == 0);
-
- rec->rec_type = type;
- INIT_LIST_HEAD(&rec->rec_active);
- rec->rec_lh_cookie = (cpt << LNET_COOKIE_TYPE_BITS) | type;
-
- /* Arbitrary choice of hash table size */
- LIBCFS_CPT_ALLOC(rec->rec_lh_hash, lnet_cpt_table(), cpt,
- LNET_LH_HASH_SIZE * sizeof(rec->rec_lh_hash[0]));
- if (rec->rec_lh_hash == NULL) {
- rc = -ENOMEM;
- goto out;
- }
-
- for (i = 0; i < LNET_LH_HASH_SIZE; i++)
- INIT_LIST_HEAD(&rec->rec_lh_hash[i]);
-
- return 0;
-
-out:
- CERROR("Failed to setup %s resource container\n",
- lnet_res_type2str(type));
- lnet_res_container_cleanup(rec);
- return rc;
-}
-
-static void
-lnet_res_containers_destroy(struct lnet_res_container **recs)
-{
- struct lnet_res_container *rec;
- int i;
-
- cfs_percpt_for_each(rec, i, recs)
- lnet_res_container_cleanup(rec);
-
- cfs_percpt_free(recs);
-}
-
-static struct lnet_res_container **
-lnet_res_containers_create(int type)
-{
- struct lnet_res_container **recs;
- struct lnet_res_container *rec;
- int rc;
- int i;
-
- recs = cfs_percpt_alloc(lnet_cpt_table(), sizeof(*rec));
- if (recs == NULL) {
- CERROR("Failed to allocate %s resource containers\n",
- lnet_res_type2str(type));
- return NULL;
- }
-
- cfs_percpt_for_each(rec, i, recs) {
- rc = lnet_res_container_setup(rec, i, type);
- if (rc != 0) {
- lnet_res_containers_destroy(recs);
- return NULL;
- }
- }
-
- return recs;
-}
-
-lnet_libhandle_t *
-lnet_res_lh_lookup(struct lnet_res_container *rec, __u64 cookie)
-{
- /* ALWAYS called with lnet_res_lock held */
- struct list_head *head;
- lnet_libhandle_t *lh;
- unsigned int hash;
-
- if ((cookie & LNET_COOKIE_MASK) != rec->rec_type)
- return NULL;
-
- hash = cookie >> (LNET_COOKIE_TYPE_BITS + LNET_CPT_BITS);
- head = &rec->rec_lh_hash[hash & LNET_LH_HASH_MASK];
-
- list_for_each_entry(lh, head, lh_hash_chain) {
- if (lh->lh_cookie == cookie)
- return lh;
- }
-
- return NULL;
-}
-
-void
-lnet_res_lh_initialize(struct lnet_res_container *rec, lnet_libhandle_t *lh)
-{
- /* ALWAYS called with lnet_res_lock held */
- unsigned int ibits = LNET_COOKIE_TYPE_BITS + LNET_CPT_BITS;
- unsigned int hash;
-
- lh->lh_cookie = rec->rec_lh_cookie;
- rec->rec_lh_cookie += 1 << ibits;
-
- hash = (lh->lh_cookie >> ibits) & LNET_LH_HASH_MASK;
-
- list_add(&lh->lh_hash_chain, &rec->rec_lh_hash[hash]);
-}
-
-
-int lnet_unprepare(void);
-
-static int
-lnet_prepare(lnet_pid_t requested_pid)
-{
- /* Prepare to bring up the network */
- struct lnet_res_container **recs;
- int rc = 0;
-
- LASSERT(the_lnet.ln_refcount == 0);
-
- the_lnet.ln_routing = 0;
-
- LASSERT((requested_pid & LNET_PID_USERFLAG) == 0);
- the_lnet.ln_pid = requested_pid;
-
- INIT_LIST_HEAD(&the_lnet.ln_test_peers);
- INIT_LIST_HEAD(&the_lnet.ln_nis);
- INIT_LIST_HEAD(&the_lnet.ln_nis_cpt);
- INIT_LIST_HEAD(&the_lnet.ln_nis_zombie);
- INIT_LIST_HEAD(&the_lnet.ln_routers);
-
- rc = lnet_create_remote_nets_table();
- if (rc != 0)
- goto failed;
-
- the_lnet.ln_interface_cookie = lnet_create_interface_cookie();
-
- the_lnet.ln_counters = cfs_percpt_alloc(lnet_cpt_table(),
- sizeof(lnet_counters_t));
- if (the_lnet.ln_counters == NULL) {
- CERROR("Failed to allocate counters for LNet\n");
- rc = -ENOMEM;
- goto failed;
- }
-
- rc = lnet_peer_tables_create();
- if (rc != 0)
- goto failed;
-
- rc = lnet_msg_containers_create();
- if (rc != 0)
- goto failed;
-
- rc = lnet_res_container_setup(&the_lnet.ln_eq_container, 0,
- LNET_COOKIE_TYPE_EQ);
- if (rc != 0)
- goto failed;
-
- recs = lnet_res_containers_create(LNET_COOKIE_TYPE_ME);
- if (recs == NULL) {
- rc = -ENOMEM;
- goto failed;
- }
-
- the_lnet.ln_me_containers = recs;
-
- recs = lnet_res_containers_create(LNET_COOKIE_TYPE_MD);
- if (recs == NULL) {
- rc = -ENOMEM;
- goto failed;
- }
-
- the_lnet.ln_md_containers = recs;
-
- rc = lnet_portals_create();
- if (rc != 0) {
- CERROR("Failed to create portals for LNet: %d\n", rc);
- goto failed;
- }
-
- return 0;
-
- failed:
- lnet_unprepare();
- return rc;
-}
-
-int
-lnet_unprepare(void)
-{
- /* NB no LNET_LOCK since this is the last reference. All LND instances
- * have shut down already, so it is safe to unlink and free all
- * descriptors, even those that appear committed to a network op (eg MD
- * with non-zero pending count) */
-
- lnet_fail_nid(LNET_NID_ANY, 0);
-
- LASSERT(the_lnet.ln_refcount == 0);
- LASSERT(list_empty(&the_lnet.ln_test_peers));
- LASSERT(list_empty(&the_lnet.ln_nis));
- LASSERT(list_empty(&the_lnet.ln_nis_cpt));
- LASSERT(list_empty(&the_lnet.ln_nis_zombie));
-
- lnet_portals_destroy();
-
- if (the_lnet.ln_md_containers != NULL) {
- lnet_res_containers_destroy(the_lnet.ln_md_containers);
- the_lnet.ln_md_containers = NULL;
- }
-
- if (the_lnet.ln_me_containers != NULL) {
- lnet_res_containers_destroy(the_lnet.ln_me_containers);
- the_lnet.ln_me_containers = NULL;
- }
-
- lnet_res_container_cleanup(&the_lnet.ln_eq_container);
-
- lnet_msg_containers_destroy();
- lnet_peer_tables_destroy();
- lnet_rtrpools_free();
-
- if (the_lnet.ln_counters != NULL) {
- cfs_percpt_free(the_lnet.ln_counters);
- the_lnet.ln_counters = NULL;
- }
- lnet_destroy_remote_nets_table();
-
- return 0;
-}
-
-lnet_ni_t *
-lnet_net2ni_locked(__u32 net, int cpt)
-{
- struct list_head *tmp;
- lnet_ni_t *ni;
-
- LASSERT(cpt != LNET_LOCK_EX);
-
- list_for_each(tmp, &the_lnet.ln_nis) {
- ni = list_entry(tmp, lnet_ni_t, ni_list);
-
- if (LNET_NIDNET(ni->ni_nid) == net) {
- lnet_ni_addref_locked(ni, cpt);
- return ni;
- }
- }
-
- return NULL;
-}
-
-lnet_ni_t *
-lnet_net2ni(__u32 net)
-{
- lnet_ni_t *ni;
-
- lnet_net_lock(0);
- ni = lnet_net2ni_locked(net, 0);
- lnet_net_unlock(0);
-
- return ni;
-}
-EXPORT_SYMBOL(lnet_net2ni);
-
-static unsigned int
-lnet_nid_cpt_hash(lnet_nid_t nid, unsigned int number)
-{
- __u64 key = nid;
- unsigned int val;
-
- LASSERT(number >= 1 && number <= LNET_CPT_NUMBER);
-
- if (number == 1)
- return 0;
-
- val = hash_long(key, LNET_CPT_BITS);
- /* NB: LNET_CP_NUMBER doesn't have to be PO2 */
- if (val < number)
- return val;
-
- return (unsigned int)(key + val + (val >> 1)) % number;
-}
-
-int
-lnet_cpt_of_nid_locked(lnet_nid_t nid)
-{
- struct lnet_ni *ni;
-
- /* must called with hold of lnet_net_lock */
- if (LNET_CPT_NUMBER == 1)
- return 0; /* the only one */
-
- /* take lnet_net_lock(any) would be OK */
- if (!list_empty(&the_lnet.ln_nis_cpt)) {
- list_for_each_entry(ni, &the_lnet.ln_nis_cpt, ni_cptlist) {
- if (LNET_NIDNET(ni->ni_nid) != LNET_NIDNET(nid))
- continue;
-
- LASSERT(ni->ni_cpts != NULL);
- return ni->ni_cpts[lnet_nid_cpt_hash
- (nid, ni->ni_ncpts)];
- }
- }
-
- return lnet_nid_cpt_hash(nid, LNET_CPT_NUMBER);
-}
-
-int
-lnet_cpt_of_nid(lnet_nid_t nid)
-{
- int cpt;
- int cpt2;
-
- if (LNET_CPT_NUMBER == 1)
- return 0; /* the only one */
-
- if (list_empty(&the_lnet.ln_nis_cpt))
- return lnet_nid_cpt_hash(nid, LNET_CPT_NUMBER);
-
- cpt = lnet_net_lock_current();
- cpt2 = lnet_cpt_of_nid_locked(nid);
- lnet_net_unlock(cpt);
-
- return cpt2;
-}
-EXPORT_SYMBOL(lnet_cpt_of_nid);
-
-int
-lnet_islocalnet(__u32 net)
-{
- struct lnet_ni *ni;
- int cpt;
-
- cpt = lnet_net_lock_current();
-
- ni = lnet_net2ni_locked(net, cpt);
- if (ni != NULL)
- lnet_ni_decref_locked(ni, cpt);
-
- lnet_net_unlock(cpt);
-
- return ni != NULL;
-}
-
-lnet_ni_t *
-lnet_nid2ni_locked(lnet_nid_t nid, int cpt)
-{
- struct lnet_ni *ni;
- struct list_head *tmp;
-
- LASSERT(cpt != LNET_LOCK_EX);
-
- list_for_each(tmp, &the_lnet.ln_nis) {
- ni = list_entry(tmp, lnet_ni_t, ni_list);
-
- if (ni->ni_nid == nid) {
- lnet_ni_addref_locked(ni, cpt);
- return ni;
- }
- }
-
- return NULL;
-}
-
-int
-lnet_islocalnid(lnet_nid_t nid)
-{
- struct lnet_ni *ni;
- int cpt;
-
- cpt = lnet_net_lock_current();
- ni = lnet_nid2ni_locked(nid, cpt);
- if (ni != NULL)
- lnet_ni_decref_locked(ni, cpt);
- lnet_net_unlock(cpt);
-
- return ni != NULL;
-}
-
-int
-lnet_count_acceptor_nis(void)
-{
- /* Return the # of NIs that need the acceptor. */
- int count = 0;
- struct list_head *tmp;
- struct lnet_ni *ni;
- int cpt;
-
- cpt = lnet_net_lock_current();
- list_for_each(tmp, &the_lnet.ln_nis) {
- ni = list_entry(tmp, lnet_ni_t, ni_list);
-
- if (ni->ni_lnd->lnd_accept != NULL)
- count++;
- }
-
- lnet_net_unlock(cpt);
-
- return count;
-}
-
-static int
-lnet_ni_tq_credits(lnet_ni_t *ni)
-{
- int credits;
-
- LASSERT(ni->ni_ncpts >= 1);
-
- if (ni->ni_ncpts == 1)
- return ni->ni_maxtxcredits;
-
- credits = ni->ni_maxtxcredits / ni->ni_ncpts;
- credits = max(credits, 8 * ni->ni_peertxcredits);
- credits = min(credits, ni->ni_maxtxcredits);
-
- return credits;
-}
-
-static void
-lnet_shutdown_lndnis(void)
-{
- int i;
- int islo;
- lnet_ni_t *ni;
-
- /* NB called holding the global mutex */
-
- /* All quiet on the API front */
- LASSERT(!the_lnet.ln_shutdown);
- LASSERT(the_lnet.ln_refcount == 0);
- LASSERT(list_empty(&the_lnet.ln_nis_zombie));
-
- lnet_net_lock(LNET_LOCK_EX);
- the_lnet.ln_shutdown = 1; /* flag shutdown */
-
- /* Unlink NIs from the global table */
- while (!list_empty(&the_lnet.ln_nis)) {
- ni = list_entry(the_lnet.ln_nis.next,
- lnet_ni_t, ni_list);
- /* move it to zombie list and nobody can find it anymore */
- list_move(&ni->ni_list, &the_lnet.ln_nis_zombie);
- lnet_ni_decref_locked(ni, 0); /* drop ln_nis' ref */
-
- if (!list_empty(&ni->ni_cptlist)) {
- list_del_init(&ni->ni_cptlist);
- lnet_ni_decref_locked(ni, 0);
- }
- }
-
- /* Drop the cached eqwait NI. */
- if (the_lnet.ln_eq_waitni != NULL) {
- lnet_ni_decref_locked(the_lnet.ln_eq_waitni, 0);
- the_lnet.ln_eq_waitni = NULL;
- }
-
- /* Drop the cached loopback NI. */
- if (the_lnet.ln_loni != NULL) {
- lnet_ni_decref_locked(the_lnet.ln_loni, 0);
- the_lnet.ln_loni = NULL;
- }
-
- lnet_net_unlock(LNET_LOCK_EX);
-
- /* Clear lazy portals and drop delayed messages which hold refs
- * on their lnet_msg_t::msg_rxpeer */
- for (i = 0; i < the_lnet.ln_nportals; i++)
- LNetClearLazyPortal(i);
-
- /* Clear the peer table and wait for all peers to go (they hold refs on
- * their NIs) */
- lnet_peer_tables_cleanup();
-
- lnet_net_lock(LNET_LOCK_EX);
- /* Now wait for the NI's I just nuked to show up on ln_zombie_nis
- * and shut them down in guaranteed thread context */
- i = 2;
- while (!list_empty(&the_lnet.ln_nis_zombie)) {
- int *ref;
- int j;
-
- ni = list_entry(the_lnet.ln_nis_zombie.next,
- lnet_ni_t, ni_list);
- list_del_init(&ni->ni_list);
- cfs_percpt_for_each(ref, j, ni->ni_refs) {
- if (*ref == 0)
- continue;
- /* still busy, add it back to zombie list */
- list_add(&ni->ni_list, &the_lnet.ln_nis_zombie);
- break;
- }
-
- if (!list_empty(&ni->ni_list)) {
- lnet_net_unlock(LNET_LOCK_EX);
- ++i;
- if ((i & (-i)) == i) {
- CDEBUG(D_WARNING, "Waiting for zombie LNI %s\n",
- libcfs_nid2str(ni->ni_nid));
- }
- set_current_state(TASK_UNINTERRUPTIBLE);
- schedule_timeout(cfs_time_seconds(1));
- lnet_net_lock(LNET_LOCK_EX);
- continue;
- }
-
- ni->ni_lnd->lnd_refcount--;
- lnet_net_unlock(LNET_LOCK_EX);
-
- islo = ni->ni_lnd->lnd_type == LOLND;
-
- LASSERT(!in_interrupt());
- (ni->ni_lnd->lnd_shutdown)(ni);
-
- /* can't deref lnd anymore now; it might have unregistered
- * itself... */
-
- if (!islo)
- CDEBUG(D_LNI, "Removed LNI %s\n",
- libcfs_nid2str(ni->ni_nid));
-
- lnet_ni_free(ni);
- i = 2;
-
- lnet_net_lock(LNET_LOCK_EX);
- }
-
- the_lnet.ln_shutdown = 0;
- lnet_net_unlock(LNET_LOCK_EX);
-
- if (the_lnet.ln_network_tokens != NULL) {
- LIBCFS_FREE(the_lnet.ln_network_tokens,
- the_lnet.ln_network_tokens_nob);
- the_lnet.ln_network_tokens = NULL;
- }
-}
-
-static int
-lnet_startup_lndnis(void)
-{
- lnd_t *lnd;
- struct lnet_ni *ni;
- struct lnet_tx_queue *tq;
- struct list_head nilist;
- int i;
- int rc = 0;
- int lnd_type;
- int nicount = 0;
- char *nets = lnet_get_networks();
-
- INIT_LIST_HEAD(&nilist);
-
- if (nets == NULL)
- goto failed;
-
- rc = lnet_parse_networks(&nilist, nets);
- if (rc != 0)
- goto failed;
-
- while (!list_empty(&nilist)) {
- ni = list_entry(nilist.next, lnet_ni_t, ni_list);
- lnd_type = LNET_NETTYP(LNET_NIDNET(ni->ni_nid));
-
- LASSERT(libcfs_isknown_lnd(lnd_type));
-
- if (lnd_type == CIBLND ||
- lnd_type == OPENIBLND ||
- lnd_type == IIBLND ||
- lnd_type == VIBLND) {
- CERROR("LND %s obsoleted\n",
- libcfs_lnd2str(lnd_type));
- goto failed;
- }
-
- mutex_lock(&the_lnet.ln_lnd_mutex);
- lnd = lnet_find_lnd_by_type(lnd_type);
-
- if (lnd == NULL) {
- mutex_unlock(&the_lnet.ln_lnd_mutex);
- rc = request_module("%s",
- libcfs_lnd2modname(lnd_type));
- mutex_lock(&the_lnet.ln_lnd_mutex);
-
- lnd = lnet_find_lnd_by_type(lnd_type);
- if (lnd == NULL) {
- mutex_unlock(&the_lnet.ln_lnd_mutex);
- CERROR("Can't load LND %s, module %s, rc=%d\n",
- libcfs_lnd2str(lnd_type),
- libcfs_lnd2modname(lnd_type), rc);
- goto failed;
- }
- }
-
- lnet_net_lock(LNET_LOCK_EX);
- lnd->lnd_refcount++;
- lnet_net_unlock(LNET_LOCK_EX);
-
- ni->ni_lnd = lnd;
-
- rc = (lnd->lnd_startup)(ni);
-
- mutex_unlock(&the_lnet.ln_lnd_mutex);
-
- if (rc != 0) {
- LCONSOLE_ERROR_MSG(0x105, "Error %d starting up LNI %s\n",
- rc, libcfs_lnd2str(lnd->lnd_type));
- lnet_net_lock(LNET_LOCK_EX);
- lnd->lnd_refcount--;
- lnet_net_unlock(LNET_LOCK_EX);
- goto failed;
- }
-
- LASSERT(ni->ni_peertimeout <= 0 || lnd->lnd_query != NULL);
-
- list_del(&ni->ni_list);
-
- lnet_net_lock(LNET_LOCK_EX);
- /* refcount for ln_nis */
- lnet_ni_addref_locked(ni, 0);
- list_add_tail(&ni->ni_list, &the_lnet.ln_nis);
- if (ni->ni_cpts != NULL) {
- list_add_tail(&ni->ni_cptlist,
- &the_lnet.ln_nis_cpt);
- lnet_ni_addref_locked(ni, 0);
- }
-
- lnet_net_unlock(LNET_LOCK_EX);
-
- if (lnd->lnd_type == LOLND) {
- lnet_ni_addref(ni);
- LASSERT(the_lnet.ln_loni == NULL);
- the_lnet.ln_loni = ni;
- continue;
- }
-
- if (ni->ni_peertxcredits == 0 ||
- ni->ni_maxtxcredits == 0) {
- LCONSOLE_ERROR_MSG(0x107, "LNI %s has no %scredits\n",
- libcfs_lnd2str(lnd->lnd_type),
- ni->ni_peertxcredits == 0 ?
- "" : "per-peer ");
- goto failed;
- }
-
- cfs_percpt_for_each(tq, i, ni->ni_tx_queues) {
- tq->tq_credits_min =
- tq->tq_credits_max =
- tq->tq_credits = lnet_ni_tq_credits(ni);
- }
-
- CDEBUG(D_LNI, "Added LNI %s [%d/%d/%d/%d]\n",
- libcfs_nid2str(ni->ni_nid), ni->ni_peertxcredits,
- lnet_ni_tq_credits(ni) * LNET_CPT_NUMBER,
- ni->ni_peerrtrcredits, ni->ni_peertimeout);
-
- nicount++;
- }
-
- if (the_lnet.ln_eq_waitni != NULL && nicount > 1) {
- lnd_type = the_lnet.ln_eq_waitni->ni_lnd->lnd_type;
- LCONSOLE_ERROR_MSG(0x109, "LND %s can only run single-network\n",
- libcfs_lnd2str(lnd_type));
- goto failed;
- }
-
- return 0;
-
- failed:
- lnet_shutdown_lndnis();
-
- while (!list_empty(&nilist)) {
- ni = list_entry(nilist.next, lnet_ni_t, ni_list);
- list_del(&ni->ni_list);
- lnet_ni_free(ni);
- }
-
- return -ENETDOWN;
-}
-
-/**
- * Initialize LNet library.
- *
- * Only userspace program needs to call this function - it's automatically
- * called in the kernel at module loading time. Caller has to call lnet_fini()
- * after a call to lnet_init(), if and only if the latter returned 0. It must
- * be called exactly once.
- *
- * \return 0 on success, and -ve on failures.
- */
-int
-lnet_init(void)
-{
- int rc;
-
- lnet_assert_wire_constants();
- LASSERT(!the_lnet.ln_init);
-
- memset(&the_lnet, 0, sizeof(the_lnet));
-
- /* refer to global cfs_cpt_table for now */
- the_lnet.ln_cpt_table = cfs_cpt_table;
- the_lnet.ln_cpt_number = cfs_cpt_number(cfs_cpt_table);
-
- LASSERT(the_lnet.ln_cpt_number > 0);
- if (the_lnet.ln_cpt_number > LNET_CPT_MAX) {
- /* we are under risk of consuming all lh_cookie */
- CERROR("Can't have %d CPTs for LNet (max allowed is %d), please change setting of CPT-table and retry\n",
- the_lnet.ln_cpt_number, LNET_CPT_MAX);
- return -1;
- }
-
- while ((1 << the_lnet.ln_cpt_bits) < the_lnet.ln_cpt_number)
- the_lnet.ln_cpt_bits++;
-
- rc = lnet_create_locks();
- if (rc != 0) {
- CERROR("Can't create LNet global locks: %d\n", rc);
- return -1;
- }
-
- the_lnet.ln_refcount = 0;
- the_lnet.ln_init = 1;
- LNetInvalidateHandle(&the_lnet.ln_rc_eqh);
- INIT_LIST_HEAD(&the_lnet.ln_lnds);
- INIT_LIST_HEAD(&the_lnet.ln_rcd_zombie);
- INIT_LIST_HEAD(&the_lnet.ln_rcd_deathrow);
-
- /* The hash table size is the number of bits it takes to express the set
- * ln_num_routes, minus 1 (better to under estimate than over so we
- * don't waste memory). */
- if (rnet_htable_size <= 0)
- rnet_htable_size = LNET_REMOTE_NETS_HASH_DEFAULT;
- else if (rnet_htable_size > LNET_REMOTE_NETS_HASH_MAX)
- rnet_htable_size = LNET_REMOTE_NETS_HASH_MAX;
- the_lnet.ln_remote_nets_hbits = max_t(int, 1,
- order_base_2(rnet_htable_size) - 1);
-
- /* All LNDs apart from the LOLND are in separate modules. They
- * register themselves when their module loads, and unregister
- * themselves when their module is unloaded. */
- lnet_register_lnd(&the_lolnd);
- return 0;
-}
-EXPORT_SYMBOL(lnet_init);
-
-/**
- * Finalize LNet library.
- *
- * Only userspace program needs to call this function. It can be called
- * at most once.
- *
- * \pre lnet_init() called with success.
- * \pre All LNet users called LNetNIFini() for matching LNetNIInit() calls.
- */
-void
-lnet_fini(void)
-{
- LASSERT(the_lnet.ln_init);
- LASSERT(the_lnet.ln_refcount == 0);
-
- while (!list_empty(&the_lnet.ln_lnds))
- lnet_unregister_lnd(list_entry(the_lnet.ln_lnds.next,
- lnd_t, lnd_list));
- lnet_destroy_locks();
-
- the_lnet.ln_init = 0;
-}
-EXPORT_SYMBOL(lnet_fini);
-
-/**
- * Set LNet PID and start LNet interfaces, routing, and forwarding.
- *
- * Userspace program should call this after a successful call to lnet_init().
- * Users must call this function at least once before any other functions.
- * For each successful call there must be a corresponding call to
- * LNetNIFini(). For subsequent calls to LNetNIInit(), \a requested_pid is
- * ignored.
- *
- * The PID used by LNet may be different from the one requested.
- * See LNetGetId().
- *
- * \param requested_pid PID requested by the caller.
- *
- * \return >= 0 on success, and < 0 error code on failures.
- */
-int
-LNetNIInit(lnet_pid_t requested_pid)
-{
- int im_a_router = 0;
- int rc;
-
- mutex_lock(&the_lnet.ln_api_mutex);
-
- LASSERT(the_lnet.ln_init);
- CDEBUG(D_OTHER, "refs %d\n", the_lnet.ln_refcount);
-
- if (the_lnet.ln_refcount > 0) {
- rc = the_lnet.ln_refcount++;
- goto out;
- }
-
- if (requested_pid == LNET_PID_ANY) {
- /* Don't instantiate LNET just for me */
- rc = -ENETDOWN;
- goto failed0;
- }
-
- rc = lnet_prepare(requested_pid);
- if (rc != 0)
- goto failed0;
-
- rc = lnet_startup_lndnis();
- if (rc != 0)
- goto failed1;
-
- rc = lnet_parse_routes(lnet_get_routes(), &im_a_router);
- if (rc != 0)
- goto failed2;
-
- rc = lnet_check_routes();
- if (rc != 0)
- goto failed2;
-
- rc = lnet_rtrpools_alloc(im_a_router);
- if (rc != 0)
- goto failed2;
-
- rc = lnet_acceptor_start();
- if (rc != 0)
- goto failed2;
-
- the_lnet.ln_refcount = 1;
- /* Now I may use my own API functions... */
-
- /* NB router checker needs the_lnet.ln_ping_info in
- * lnet_router_checker -> lnet_update_ni_status_locked */
- rc = lnet_ping_target_init();
- if (rc != 0)
- goto failed3;
-
- rc = lnet_router_checker_start();
- if (rc != 0)
- goto failed4;
-
- lnet_router_debugfs_init();
- goto out;
-
- failed4:
- lnet_ping_target_fini();
- failed3:
- the_lnet.ln_refcount = 0;
- lnet_acceptor_stop();
- failed2:
- lnet_destroy_routes();
- lnet_shutdown_lndnis();
- failed1:
- lnet_unprepare();
- failed0:
- LASSERT(rc < 0);
- out:
- mutex_unlock(&the_lnet.ln_api_mutex);
- return rc;
-}
-EXPORT_SYMBOL(LNetNIInit);
-
-/**
- * Stop LNet interfaces, routing, and forwarding.
- *
- * Users must call this function once for each successful call to LNetNIInit().
- * Once the LNetNIFini() operation has been started, the results of pending
- * API operations are undefined.
- *
- * \return always 0 for current implementation.
- */
-int
-LNetNIFini(void)
-{
- mutex_lock(&the_lnet.ln_api_mutex);
-
- LASSERT(the_lnet.ln_init);
- LASSERT(the_lnet.ln_refcount > 0);
-
- if (the_lnet.ln_refcount != 1) {
- the_lnet.ln_refcount--;
- } else {
- LASSERT(!the_lnet.ln_niinit_self);
-
- lnet_router_debugfs_fini();
- lnet_router_checker_stop();
- lnet_ping_target_fini();
-
- /* Teardown fns that use my own API functions BEFORE here */
- the_lnet.ln_refcount = 0;
-
- lnet_acceptor_stop();
- lnet_destroy_routes();
- lnet_shutdown_lndnis();
- lnet_unprepare();
- }
-
- mutex_unlock(&the_lnet.ln_api_mutex);
- return 0;
-}
-EXPORT_SYMBOL(LNetNIFini);
-
-/**
- * This is an ugly hack to export IOC_LIBCFS_DEBUG_PEER and
- * IOC_LIBCFS_PORTALS_COMPATIBILITY commands to users, by tweaking the LNet
- * internal ioctl handler.
- *
- * IOC_LIBCFS_PORTALS_COMPATIBILITY is now deprecated, don't use it.
- *
- * \param cmd IOC_LIBCFS_DEBUG_PEER to print debugging data about a peer.
- * The data will be printed to system console. Don't use it excessively.
- * \param arg A pointer to lnet_process_id_t, process ID of the peer.
- *
- * \return Always return 0 when called by users directly (i.e., not via ioctl).
- */
-int
-LNetCtl(unsigned int cmd, void *arg)
-{
- struct libcfs_ioctl_data *data = arg;
- lnet_process_id_t id = {0};
- lnet_ni_t *ni;
- int rc;
- unsigned long secs_passed;
-
- LASSERT(the_lnet.ln_init);
- LASSERT(the_lnet.ln_refcount > 0);
-
- switch (cmd) {
- case IOC_LIBCFS_GET_NI:
- rc = LNetGetId(data->ioc_count, &id);
- data->ioc_nid = id.nid;
- return rc;
-
- case IOC_LIBCFS_FAIL_NID:
- return lnet_fail_nid(data->ioc_nid, data->ioc_count);
-
- case IOC_LIBCFS_ADD_ROUTE:
- rc = lnet_add_route(data->ioc_net, data->ioc_count,
- data->ioc_nid, data->ioc_priority);
- return (rc != 0) ? rc : lnet_check_routes();
-
- case IOC_LIBCFS_DEL_ROUTE:
- return lnet_del_route(data->ioc_net, data->ioc_nid);
-
- case IOC_LIBCFS_GET_ROUTE:
- return lnet_get_route(data->ioc_count,
- &data->ioc_net, &data->ioc_count,
- &data->ioc_nid, &data->ioc_flags,
- &data->ioc_priority);
- case IOC_LIBCFS_NOTIFY_ROUTER:
- secs_passed = (ktime_get_real_seconds() - data->ioc_u64[0]);
- return lnet_notify(NULL, data->ioc_nid, data->ioc_flags,
- jiffies - secs_passed * HZ);
-
- case IOC_LIBCFS_PORTALS_COMPATIBILITY:
- /* This can be removed once lustre stops calling it */
- return 0;
-
- case IOC_LIBCFS_LNET_DIST:
- rc = LNetDist(data->ioc_nid, &data->ioc_nid, &data->ioc_u32[1]);
- if (rc < 0 && rc != -EHOSTUNREACH)
- return rc;
-
- data->ioc_u32[0] = rc;
- return 0;
-
- case IOC_LIBCFS_TESTPROTOCOMPAT:
- lnet_net_lock(LNET_LOCK_EX);
- the_lnet.ln_testprotocompat = data->ioc_flags;
- lnet_net_unlock(LNET_LOCK_EX);
- return 0;
-
- case IOC_LIBCFS_PING:
- id.nid = data->ioc_nid;
- id.pid = data->ioc_u32[0];
- rc = lnet_ping(id, data->ioc_u32[1], /* timeout */
- (lnet_process_id_t *)data->ioc_pbuf1,
- data->ioc_plen1/sizeof(lnet_process_id_t));
- if (rc < 0)
- return rc;
- data->ioc_count = rc;
- return 0;
-
- case IOC_LIBCFS_DEBUG_PEER: {
- /* CAVEAT EMPTOR: this one designed for calling directly; not
- * via an ioctl */
- id = *((lnet_process_id_t *) arg);
-
- lnet_debug_peer(id.nid);
-
- ni = lnet_net2ni(LNET_NIDNET(id.nid));
- if (ni == NULL) {
- CDEBUG(D_WARNING, "No NI for %s\n", libcfs_id2str(id));
- } else {
- if (ni->ni_lnd->lnd_ctl == NULL) {
- CDEBUG(D_WARNING, "No ctl for %s\n",
- libcfs_id2str(id));
- } else {
- (void)ni->ni_lnd->lnd_ctl(ni, cmd, arg);
- }
-
- lnet_ni_decref(ni);
- }
- return 0;
- }
-
- default:
- ni = lnet_net2ni(data->ioc_net);
- if (ni == NULL)
- return -EINVAL;
-
- if (ni->ni_lnd->lnd_ctl == NULL)
- rc = -EINVAL;
- else
- rc = ni->ni_lnd->lnd_ctl(ni, cmd, arg);
-
- lnet_ni_decref(ni);
- return rc;
- }
- /* not reached */
-}
-EXPORT_SYMBOL(LNetCtl);
-
-/**
- * Retrieve the lnet_process_id_t ID of LNet interface at \a index. Note that
- * all interfaces share a same PID, as requested by LNetNIInit().
- *
- * \param index Index of the interface to look up.
- * \param id On successful return, this location will hold the
- * lnet_process_id_t ID of the interface.
- *
- * \retval 0 If an interface exists at \a index.
- * \retval -ENOENT If no interface has been found.
- */
-int
-LNetGetId(unsigned int index, lnet_process_id_t *id)
-{
- struct lnet_ni *ni;
- struct list_head *tmp;
- int cpt;
- int rc = -ENOENT;
-
- LASSERT(the_lnet.ln_init);
-
- /* LNetNI initilization failed? */
- if (the_lnet.ln_refcount == 0)
- return rc;
-
- cpt = lnet_net_lock_current();
-
- list_for_each(tmp, &the_lnet.ln_nis) {
- if (index-- != 0)
- continue;
-
- ni = list_entry(tmp, lnet_ni_t, ni_list);
-
- id->nid = ni->ni_nid;
- id->pid = the_lnet.ln_pid;
- rc = 0;
- break;
- }
-
- lnet_net_unlock(cpt);
- return rc;
-}
-EXPORT_SYMBOL(LNetGetId);
-
-/**
- * Print a string representation of handle \a h into buffer \a str of
- * \a len bytes.
- */
-void
-LNetSnprintHandle(char *str, int len, lnet_handle_any_t h)
-{
- snprintf(str, len, "%#llx", h.cookie);
-}
-EXPORT_SYMBOL(LNetSnprintHandle);
-
-static int
-lnet_create_ping_info(void)
-{
- int i;
- int n;
- int rc;
- unsigned int infosz;
- lnet_ni_t *ni;
- lnet_process_id_t id;
- lnet_ping_info_t *pinfo;
-
- for (n = 0; ; n++) {
- rc = LNetGetId(n, &id);
- if (rc == -ENOENT)
- break;
-
- LASSERT(rc == 0);
- }
-
- infosz = offsetof(lnet_ping_info_t, pi_ni[n]);
- LIBCFS_ALLOC(pinfo, infosz);
- if (pinfo == NULL) {
- CERROR("Can't allocate ping info[%d]\n", n);
- return -ENOMEM;
- }
-
- pinfo->pi_nnis = n;
- pinfo->pi_pid = the_lnet.ln_pid;
- pinfo->pi_magic = LNET_PROTO_PING_MAGIC;
- pinfo->pi_features = LNET_PING_FEAT_NI_STATUS;
-
- for (i = 0; i < n; i++) {
- lnet_ni_status_t *ns = &pinfo->pi_ni[i];
-
- rc = LNetGetId(i, &id);
- LASSERT(rc == 0);
-
- ns->ns_nid = id.nid;
- ns->ns_status = LNET_NI_STATUS_UP;
-
- lnet_net_lock(0);
-
- ni = lnet_nid2ni_locked(id.nid, 0);
- LASSERT(ni != NULL);
-
- lnet_ni_lock(ni);
- LASSERT(ni->ni_status == NULL);
- ni->ni_status = ns;
- lnet_ni_unlock(ni);
-
- lnet_ni_decref_locked(ni, 0);
- lnet_net_unlock(0);
- }
-
- the_lnet.ln_ping_info = pinfo;
- return 0;
-}
-
-static void
-lnet_destroy_ping_info(void)
-{
- struct lnet_ni *ni;
-
- lnet_net_lock(0);
-
- list_for_each_entry(ni, &the_lnet.ln_nis, ni_list) {
- lnet_ni_lock(ni);
- ni->ni_status = NULL;
- lnet_ni_unlock(ni);
- }
-
- lnet_net_unlock(0);
-
- LIBCFS_FREE(the_lnet.ln_ping_info,
- offsetof(lnet_ping_info_t,
- pi_ni[the_lnet.ln_ping_info->pi_nnis]));
- the_lnet.ln_ping_info = NULL;
-}
-
-int
-lnet_ping_target_init(void)
-{
- lnet_md_t md = { NULL };
- lnet_handle_me_t meh;
- lnet_process_id_t id;
- int rc;
- int rc2;
- int infosz;
-
- rc = lnet_create_ping_info();
- if (rc != 0)
- return rc;
-
- /* We can have a tiny EQ since we only need to see the unlink event on
- * teardown, which by definition is the last one! */
- rc = LNetEQAlloc(2, LNET_EQ_HANDLER_NONE, &the_lnet.ln_ping_target_eq);
- if (rc != 0) {
- CERROR("Can't allocate ping EQ: %d\n", rc);
- goto failed_0;
- }
-
- memset(&id, 0, sizeof(lnet_process_id_t));
- id.nid = LNET_NID_ANY;
- id.pid = LNET_PID_ANY;
-
- rc = LNetMEAttach(LNET_RESERVED_PORTAL, id,
- LNET_PROTO_PING_MATCHBITS, 0,
- LNET_UNLINK, LNET_INS_AFTER,
- &meh);
- if (rc != 0) {
- CERROR("Can't create ping ME: %d\n", rc);
- goto failed_1;
- }
-
- /* initialize md content */
- infosz = offsetof(lnet_ping_info_t,
- pi_ni[the_lnet.ln_ping_info->pi_nnis]);
- md.start = the_lnet.ln_ping_info;
- md.length = infosz;
- md.threshold = LNET_MD_THRESH_INF;
- md.max_size = 0;
- md.options = LNET_MD_OP_GET | LNET_MD_TRUNCATE |
- LNET_MD_MANAGE_REMOTE;
- md.user_ptr = NULL;
- md.eq_handle = the_lnet.ln_ping_target_eq;
-
- rc = LNetMDAttach(meh, md,
- LNET_RETAIN,
- &the_lnet.ln_ping_target_md);
- if (rc != 0) {
- CERROR("Can't attach ping MD: %d\n", rc);
- goto failed_2;
- }
-
- return 0;
-
- failed_2:
- rc2 = LNetMEUnlink(meh);
- LASSERT(rc2 == 0);
- failed_1:
- rc2 = LNetEQFree(the_lnet.ln_ping_target_eq);
- LASSERT(rc2 == 0);
- failed_0:
- lnet_destroy_ping_info();
- return rc;
-}
-
-void
-lnet_ping_target_fini(void)
-{
- lnet_event_t event;
- int rc;
- int which;
- int timeout_ms = 1000;
- sigset_t blocked = cfs_block_allsigs();
-
- LNetMDUnlink(the_lnet.ln_ping_target_md);
- /* NB md could be busy; this just starts the unlink */
-
- for (;;) {
- rc = LNetEQPoll(&the_lnet.ln_ping_target_eq, 1,
- timeout_ms, &event, &which);
-
- /* I expect overflow... */
- LASSERT(rc >= 0 || rc == -EOVERFLOW);
-
- if (rc == 0) {
- /* timed out: provide a diagnostic */
- CWARN("Still waiting for ping MD to unlink\n");
- timeout_ms *= 2;
- continue;
- }
-
- /* Got a valid event */
- if (event.unlinked)
- break;
- }
-
- rc = LNetEQFree(the_lnet.ln_ping_target_eq);
- LASSERT(rc == 0);
- lnet_destroy_ping_info();
- cfs_restore_sigs(blocked);
-}
-
-int
-lnet_ping(lnet_process_id_t id, int timeout_ms, lnet_process_id_t *ids, int n_ids)
-{
- lnet_handle_eq_t eqh;
- lnet_handle_md_t mdh;
- lnet_event_t event;
- lnet_md_t md = { NULL };
- int which;
- int unlinked = 0;
- int replied = 0;
- const int a_long_time = 60000; /* mS */
- int infosz = offsetof(lnet_ping_info_t, pi_ni[n_ids]);
- lnet_ping_info_t *info;
- lnet_process_id_t tmpid;
- int i;
- int nob;
- int rc;
- int rc2;
- sigset_t blocked;
-
- if (n_ids <= 0 ||
- id.nid == LNET_NID_ANY ||
- timeout_ms > 500000 || /* arbitrary limit! */
- n_ids > 20) /* arbitrary limit! */
- return -EINVAL;
-
- if (id.pid == LNET_PID_ANY)
- id.pid = LUSTRE_SRV_LNET_PID;
-
- LIBCFS_ALLOC(info, infosz);
- if (info == NULL)
- return -ENOMEM;
-
- /* NB 2 events max (including any unlink event) */
- rc = LNetEQAlloc(2, LNET_EQ_HANDLER_NONE, &eqh);
- if (rc != 0) {
- CERROR("Can't allocate EQ: %d\n", rc);
- goto out_0;
- }
-
- /* initialize md content */
- md.start = info;
- md.length = infosz;
- md.threshold = 2; /*GET/REPLY*/
- md.max_size = 0;
- md.options = LNET_MD_TRUNCATE;
- md.user_ptr = NULL;
- md.eq_handle = eqh;
-
- rc = LNetMDBind(md, LNET_UNLINK, &mdh);
- if (rc != 0) {
- CERROR("Can't bind MD: %d\n", rc);
- goto out_1;
- }
-
- rc = LNetGet(LNET_NID_ANY, mdh, id,
- LNET_RESERVED_PORTAL,
- LNET_PROTO_PING_MATCHBITS, 0);
-
- if (rc != 0) {
- /* Don't CERROR; this could be deliberate! */
-
- rc2 = LNetMDUnlink(mdh);
- LASSERT(rc2 == 0);
-
- /* NB must wait for the UNLINK event below... */
- unlinked = 1;
- timeout_ms = a_long_time;
- }
-
- do {
- /* MUST block for unlink to complete */
- if (unlinked)
- blocked = cfs_block_allsigs();
-
- rc2 = LNetEQPoll(&eqh, 1, timeout_ms, &event, &which);
-
- if (unlinked)
- cfs_restore_sigs(blocked);
-
- CDEBUG(D_NET, "poll %d(%d %d)%s\n", rc2,
- (rc2 <= 0) ? -1 : event.type,
- (rc2 <= 0) ? -1 : event.status,
- (rc2 > 0 && event.unlinked) ? " unlinked" : "");
-
- LASSERT(rc2 != -EOVERFLOW); /* can't miss anything */
-
- if (rc2 <= 0 || event.status != 0) {
- /* timeout or error */
- if (!replied && rc == 0)
- rc = (rc2 < 0) ? rc2 :
- (rc2 == 0) ? -ETIMEDOUT :
- event.status;
-
- if (!unlinked) {
- /* Ensure completion in finite time... */
- LNetMDUnlink(mdh);
- /* No assertion (racing with network) */
- unlinked = 1;
- timeout_ms = a_long_time;
- } else if (rc2 == 0) {
- /* timed out waiting for unlink */
- CWARN("ping %s: late network completion\n",
- libcfs_id2str(id));
- }
- } else if (event.type == LNET_EVENT_REPLY) {
- replied = 1;
- rc = event.mlength;
- }
-
- } while (rc2 <= 0 || !event.unlinked);
-
- if (!replied) {
- if (rc >= 0)
- CWARN("%s: Unexpected rc >= 0 but no reply!\n",
- libcfs_id2str(id));
- rc = -EIO;
- goto out_1;
- }
-
- nob = rc;
- LASSERT(nob >= 0 && nob <= infosz);
-
- rc = -EPROTO; /* if I can't parse... */
-
- if (nob < 8) {
- /* can't check magic/version */
- CERROR("%s: ping info too short %d\n",
- libcfs_id2str(id), nob);
- goto out_1;
- }
-
- if (info->pi_magic == __swab32(LNET_PROTO_PING_MAGIC)) {
- lnet_swap_pinginfo(info);
- } else if (info->pi_magic != LNET_PROTO_PING_MAGIC) {
- CERROR("%s: Unexpected magic %08x\n",
- libcfs_id2str(id), info->pi_magic);
- goto out_1;
- }
-
- if ((info->pi_features & LNET_PING_FEAT_NI_STATUS) == 0) {
- CERROR("%s: ping w/o NI status: 0x%x\n",
- libcfs_id2str(id), info->pi_features);
- goto out_1;
- }
-
- if (nob < offsetof(lnet_ping_info_t, pi_ni[0])) {
- CERROR("%s: Short reply %d(%d min)\n", libcfs_id2str(id),
- nob, (int)offsetof(lnet_ping_info_t, pi_ni[0]));
- goto out_1;
- }
-
- if (info->pi_nnis < n_ids)
- n_ids = info->pi_nnis;
-
- if (nob < offsetof(lnet_ping_info_t, pi_ni[n_ids])) {
- CERROR("%s: Short reply %d(%d expected)\n", libcfs_id2str(id),
- nob, (int)offsetof(lnet_ping_info_t, pi_ni[n_ids]));
- goto out_1;
- }
-
- rc = -EFAULT; /* If I SEGV... */
-
- memset(&tmpid, 0, sizeof(tmpid));
- for (i = 0; i < n_ids; i++) {
- tmpid.pid = info->pi_pid;
- tmpid.nid = info->pi_ni[i].ns_nid;
- if (copy_to_user(&ids[i], &tmpid, sizeof(tmpid)))
- goto out_1;
- }
- rc = info->pi_nnis;
-
- out_1:
- rc2 = LNetEQFree(eqh);
- if (rc2 != 0)
- CERROR("rc2 %d\n", rc2);
- LASSERT(rc2 == 0);
-
- out_0:
- LIBCFS_FREE(info, infosz);
- return rc;
-}
diff --git a/drivers/staging/lustre/lnet/lnet/config.c b/drivers/staging/lustre/lnet/lnet/config.c
deleted file mode 100644
index b09a438c49d6..000000000000
--- a/drivers/staging/lustre/lnet/lnet/config.c
+++ /dev/null
@@ -1,1212 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- */
-
-#define DEBUG_SUBSYSTEM S_LNET
-#include "../../include/linux/lnet/lib-lnet.h"
-
-struct lnet_text_buf_t { /* tmp struct for parsing routes */
- struct list_head ltb_list; /* stash on lists */
- int ltb_size; /* allocated size */
- char ltb_text[0]; /* text buffer */
-};
-
-static int lnet_tbnob; /* track text buf allocation */
-#define LNET_MAX_TEXTBUF_NOB (64<<10) /* bound allocation */
-#define LNET_SINGLE_TEXTBUF_NOB (4<<10)
-
-static void
-lnet_syntax(char *name, char *str, int offset, int width)
-{
- static char dots[LNET_SINGLE_TEXTBUF_NOB];
- static char dashes[LNET_SINGLE_TEXTBUF_NOB];
-
- memset(dots, '.', sizeof(dots));
- dots[sizeof(dots)-1] = 0;
- memset(dashes, '-', sizeof(dashes));
- dashes[sizeof(dashes)-1] = 0;
-
- LCONSOLE_ERROR_MSG(0x10f, "Error parsing '%s=\"%s\"'\n", name, str);
- LCONSOLE_ERROR_MSG(0x110, "here...........%.*s..%.*s|%.*s|\n",
- (int)strlen(name), dots, offset, dots,
- (width < 1) ? 0 : width - 1, dashes);
-}
-
-static int
-lnet_issep(char c)
-{
- switch (c) {
- case '\n':
- case '\r':
- case ';':
- return 1;
- default:
- return 0;
- }
-}
-
-static int
-lnet_net_unique(__u32 net, struct list_head *nilist)
-{
- struct list_head *tmp;
- lnet_ni_t *ni;
-
- list_for_each(tmp, nilist) {
- ni = list_entry(tmp, lnet_ni_t, ni_list);
-
- if (LNET_NIDNET(ni->ni_nid) == net)
- return 0;
- }
-
- return 1;
-}
-
-void
-lnet_ni_free(struct lnet_ni *ni)
-{
- if (ni->ni_refs != NULL)
- cfs_percpt_free(ni->ni_refs);
-
- if (ni->ni_tx_queues != NULL)
- cfs_percpt_free(ni->ni_tx_queues);
-
- if (ni->ni_cpts != NULL)
- cfs_expr_list_values_free(ni->ni_cpts, ni->ni_ncpts);
-
- LIBCFS_FREE(ni, sizeof(*ni));
-}
-
-static lnet_ni_t *
-lnet_ni_alloc(__u32 net, struct cfs_expr_list *el, struct list_head *nilist)
-{
- struct lnet_tx_queue *tq;
- struct lnet_ni *ni;
- int rc;
- int i;
-
- if (!lnet_net_unique(net, nilist)) {
- LCONSOLE_ERROR_MSG(0x111, "Duplicate network specified: %s\n",
- libcfs_net2str(net));
- return NULL;
- }
-
- LIBCFS_ALLOC(ni, sizeof(*ni));
- if (ni == NULL) {
- CERROR("Out of memory creating network %s\n",
- libcfs_net2str(net));
- return NULL;
- }
-
- spin_lock_init(&ni->ni_lock);
- INIT_LIST_HEAD(&ni->ni_cptlist);
- ni->ni_refs = cfs_percpt_alloc(lnet_cpt_table(),
- sizeof(*ni->ni_refs[0]));
- if (ni->ni_refs == NULL)
- goto failed;
-
- ni->ni_tx_queues = cfs_percpt_alloc(lnet_cpt_table(),
- sizeof(*ni->ni_tx_queues[0]));
- if (ni->ni_tx_queues == NULL)
- goto failed;
-
- cfs_percpt_for_each(tq, i, ni->ni_tx_queues)
- INIT_LIST_HEAD(&tq->tq_delayed);
-
- if (el == NULL) {
- ni->ni_cpts = NULL;
- ni->ni_ncpts = LNET_CPT_NUMBER;
- } else {
- rc = cfs_expr_list_values(el, LNET_CPT_NUMBER, &ni->ni_cpts);
- if (rc <= 0) {
- CERROR("Failed to set CPTs for NI %s: %d\n",
- libcfs_net2str(net), rc);
- goto failed;
- }
-
- LASSERT(rc <= LNET_CPT_NUMBER);
- if (rc == LNET_CPT_NUMBER) {
- LIBCFS_FREE(ni->ni_cpts, rc * sizeof(ni->ni_cpts[0]));
- ni->ni_cpts = NULL;
- }
-
- ni->ni_ncpts = rc;
- }
-
- /* LND will fill in the address part of the NID */
- ni->ni_nid = LNET_MKNID(net, 0);
- ni->ni_last_alive = ktime_get_real_seconds();
- list_add_tail(&ni->ni_list, nilist);
- return ni;
- failed:
- lnet_ni_free(ni);
- return NULL;
-}
-
-int
-lnet_parse_networks(struct list_head *nilist, char *networks)
-{
- struct cfs_expr_list *el = NULL;
- int tokensize = strlen(networks) + 1;
- char *tokens;
- char *str;
- char *tmp;
- struct lnet_ni *ni;
- __u32 net;
- int nnets = 0;
-
- if (strlen(networks) > LNET_SINGLE_TEXTBUF_NOB) {
- /* _WAY_ conservative */
- LCONSOLE_ERROR_MSG(0x112,
- "Can't parse networks: string too long\n");
- return -EINVAL;
- }
-
- LIBCFS_ALLOC(tokens, tokensize);
- if (tokens == NULL) {
- CERROR("Can't allocate net tokens\n");
- return -ENOMEM;
- }
-
- the_lnet.ln_network_tokens = tokens;
- the_lnet.ln_network_tokens_nob = tokensize;
- memcpy(tokens, networks, tokensize);
- str = tmp = tokens;
-
- /* Add in the loopback network */
- ni = lnet_ni_alloc(LNET_MKNET(LOLND, 0), NULL, nilist);
- if (ni == NULL)
- goto failed;
-
- while (str != NULL && *str != 0) {
- char *comma = strchr(str, ',');
- char *bracket = strchr(str, '(');
- char *square = strchr(str, '[');
- char *iface;
- int niface;
- int rc;
-
- /* NB we don't check interface conflicts here; it's the LNDs
- * responsibility (if it cares at all) */
-
- if (square != NULL && (comma == NULL || square < comma)) {
- /* i.e: o2ib0(ib0)[1,2], number between square
- * brackets are CPTs this NI needs to be bond */
- if (bracket != NULL && bracket > square) {
- tmp = square;
- goto failed_syntax;
- }
-
- tmp = strchr(square, ']');
- if (tmp == NULL) {
- tmp = square;
- goto failed_syntax;
- }
-
- rc = cfs_expr_list_parse(square, tmp - square + 1,
- 0, LNET_CPT_NUMBER - 1, &el);
- if (rc != 0) {
- tmp = square;
- goto failed_syntax;
- }
-
- while (square <= tmp)
- *square++ = ' ';
- }
-
- if (bracket == NULL ||
- (comma != NULL && comma < bracket)) {
-
- /* no interface list specified */
-
- if (comma != NULL)
- *comma++ = 0;
- net = libcfs_str2net(cfs_trimwhite(str));
-
- if (net == LNET_NIDNET(LNET_NID_ANY)) {
- LCONSOLE_ERROR_MSG(0x113,
- "Unrecognised network type\n");
- tmp = str;
- goto failed_syntax;
- }
-
- if (LNET_NETTYP(net) != LOLND && /* LO is implicit */
- lnet_ni_alloc(net, el, nilist) == NULL)
- goto failed;
-
- if (el != NULL) {
- cfs_expr_list_free(el);
- el = NULL;
- }
-
- str = comma;
- continue;
- }
-
- *bracket = 0;
- net = libcfs_str2net(cfs_trimwhite(str));
- if (net == LNET_NIDNET(LNET_NID_ANY)) {
- tmp = str;
- goto failed_syntax;
- }
-
- nnets++;
- ni = lnet_ni_alloc(net, el, nilist);
- if (ni == NULL)
- goto failed;
-
- if (el != NULL) {
- cfs_expr_list_free(el);
- el = NULL;
- }
-
- niface = 0;
- iface = bracket + 1;
-
- bracket = strchr(iface, ')');
- if (bracket == NULL) {
- tmp = iface;
- goto failed_syntax;
- }
-
- *bracket = 0;
- do {
- comma = strchr(iface, ',');
- if (comma != NULL)
- *comma++ = 0;
-
- iface = cfs_trimwhite(iface);
- if (*iface == 0) {
- tmp = iface;
- goto failed_syntax;
- }
-
- if (niface == LNET_MAX_INTERFACES) {
- LCONSOLE_ERROR_MSG(0x115,
- "Too many interfaces for net %s\n",
- libcfs_net2str(net));
- goto failed;
- }
-
- ni->ni_interfaces[niface++] = iface;
- iface = comma;
- } while (iface != NULL);
-
- str = bracket + 1;
- comma = strchr(bracket + 1, ',');
- if (comma != NULL) {
- *comma = 0;
- str = cfs_trimwhite(str);
- if (*str != 0) {
- tmp = str;
- goto failed_syntax;
- }
- str = comma + 1;
- continue;
- }
-
- str = cfs_trimwhite(str);
- if (*str != 0) {
- tmp = str;
- goto failed_syntax;
- }
- }
-
- LASSERT(!list_empty(nilist));
- return 0;
-
- failed_syntax:
- lnet_syntax("networks", networks, (int)(tmp - tokens), strlen(tmp));
- failed:
- while (!list_empty(nilist)) {
- ni = list_entry(nilist->next, lnet_ni_t, ni_list);
-
- list_del(&ni->ni_list);
- lnet_ni_free(ni);
- }
-
- if (el != NULL)
- cfs_expr_list_free(el);
-
- LIBCFS_FREE(tokens, tokensize);
- the_lnet.ln_network_tokens = NULL;
-
- return -EINVAL;
-}
-
-static struct lnet_text_buf_t *
-lnet_new_text_buf(int str_len)
-{
- struct lnet_text_buf_t *ltb;
- int nob;
-
- /* NB allocate space for the terminating 0 */
- nob = offsetof(struct lnet_text_buf_t, ltb_text[str_len + 1]);
- if (nob > LNET_SINGLE_TEXTBUF_NOB) {
- /* _way_ conservative for "route net gateway..." */
- CERROR("text buffer too big\n");
- return NULL;
- }
-
- if (lnet_tbnob + nob > LNET_MAX_TEXTBUF_NOB) {
- CERROR("Too many text buffers\n");
- return NULL;
- }
-
- LIBCFS_ALLOC(ltb, nob);
- if (ltb == NULL)
- return NULL;
-
- ltb->ltb_size = nob;
- ltb->ltb_text[0] = 0;
- lnet_tbnob += nob;
- return ltb;
-}
-
-static void
-lnet_free_text_buf(struct lnet_text_buf_t *ltb)
-{
- lnet_tbnob -= ltb->ltb_size;
- LIBCFS_FREE(ltb, ltb->ltb_size);
-}
-
-static void
-lnet_free_text_bufs(struct list_head *tbs)
-{
- struct lnet_text_buf_t *ltb;
-
- while (!list_empty(tbs)) {
- ltb = list_entry(tbs->next, struct lnet_text_buf_t, ltb_list);
-
- list_del(&ltb->ltb_list);
- lnet_free_text_buf(ltb);
- }
-}
-
-static int
-lnet_str2tbs_sep(struct list_head *tbs, char *str)
-{
- struct list_head pending;
- char *sep;
- int nob;
- int i;
- struct lnet_text_buf_t *ltb;
-
- INIT_LIST_HEAD(&pending);
-
- /* Split 'str' into separate commands */
- for (;;) {
- /* skip leading whitespace */
- while (isspace(*str))
- str++;
-
- /* scan for separator or comment */
- for (sep = str; *sep != 0; sep++)
- if (lnet_issep(*sep) || *sep == '#')
- break;
-
- nob = (int)(sep - str);
- if (nob > 0) {
- ltb = lnet_new_text_buf(nob);
- if (ltb == NULL) {
- lnet_free_text_bufs(&pending);
- return -1;
- }
-
- for (i = 0; i < nob; i++)
- if (isspace(str[i]))
- ltb->ltb_text[i] = ' ';
- else
- ltb->ltb_text[i] = str[i];
-
- ltb->ltb_text[nob] = 0;
-
- list_add_tail(&ltb->ltb_list, &pending);
- }
-
- if (*sep == '#') {
- /* scan for separator */
- do {
- sep++;
- } while (*sep != 0 && !lnet_issep(*sep));
- }
-
- if (*sep == 0)
- break;
-
- str = sep + 1;
- }
-
- list_splice(&pending, tbs->prev);
- return 0;
-}
-
-static int
-lnet_expand1tb(struct list_head *list,
- char *str, char *sep1, char *sep2,
- char *item, int itemlen)
-{
- int len1 = (int)(sep1 - str);
- int len2 = strlen(sep2 + 1);
- struct lnet_text_buf_t *ltb;
-
- LASSERT(*sep1 == '[');
- LASSERT(*sep2 == ']');
-
- ltb = lnet_new_text_buf(len1 + itemlen + len2);
- if (ltb == NULL)
- return -ENOMEM;
-
- memcpy(ltb->ltb_text, str, len1);
- memcpy(&ltb->ltb_text[len1], item, itemlen);
- memcpy(&ltb->ltb_text[len1+itemlen], sep2 + 1, len2);
- ltb->ltb_text[len1 + itemlen + len2] = 0;
-
- list_add_tail(&ltb->ltb_list, list);
- return 0;
-}
-
-static int
-lnet_str2tbs_expand(struct list_head *tbs, char *str)
-{
- char num[16];
- struct list_head pending;
- char *sep;
- char *sep2;
- char *parsed;
- char *enditem;
- int lo;
- int hi;
- int stride;
- int i;
- int nob;
- int scanned;
-
- INIT_LIST_HEAD(&pending);
-
- sep = strchr(str, '[');
- if (sep == NULL) /* nothing to expand */
- return 0;
-
- sep2 = strchr(sep, ']');
- if (sep2 == NULL)
- goto failed;
-
- for (parsed = sep; parsed < sep2; parsed = enditem) {
-
- enditem = ++parsed;
- while (enditem < sep2 && *enditem != ',')
- enditem++;
-
- if (enditem == parsed) /* no empty items */
- goto failed;
-
- if (sscanf(parsed, "%d-%d/%d%n", &lo, &hi,
- &stride, &scanned) < 3) {
-
- if (sscanf(parsed, "%d-%d%n", &lo, &hi, &scanned) < 2) {
-
- /* simple string enumeration */
- if (lnet_expand1tb(
- &pending, str, sep, sep2,
- parsed,
- (int)(enditem - parsed)) != 0) {
- goto failed;
- }
-
- continue;
- }
-
- stride = 1;
- }
-
- /* range expansion */
-
- if (enditem != parsed + scanned) /* no trailing junk */
- goto failed;
-
- if (hi < 0 || lo < 0 || stride < 0 || hi < lo ||
- (hi - lo) % stride != 0)
- goto failed;
-
- for (i = lo; i <= hi; i += stride) {
-
- snprintf(num, sizeof(num), "%d", i);
- nob = strlen(num);
- if (nob + 1 == sizeof(num))
- goto failed;
-
- if (lnet_expand1tb(&pending, str, sep, sep2,
- num, nob) != 0)
- goto failed;
- }
- }
-
- list_splice(&pending, tbs->prev);
- return 1;
-
- failed:
- lnet_free_text_bufs(&pending);
- return -1;
-}
-
-static int
-lnet_parse_hops(char *str, unsigned int *hops)
-{
- int len = strlen(str);
- int nob = len;
-
- return (sscanf(str, "%u%n", hops, &nob) >= 1 &&
- nob == len &&
- *hops > 0 && *hops < 256);
-}
-
-#define LNET_PRIORITY_SEPARATOR (':')
-
-static int
-lnet_parse_priority(char *str, unsigned int *priority, char **token)
-{
- int nob;
- char *sep;
- int len;
-
- sep = strchr(str, LNET_PRIORITY_SEPARATOR);
- if (sep == NULL) {
- *priority = 0;
- return 0;
- }
- len = strlen(sep + 1);
-
- if ((sscanf((sep+1), "%u%n", priority, &nob) < 1) || (len != nob)) {
- /* Update the caller's token pointer so it treats the found
- priority as the token to report in the error message. */
- *token += sep - str + 1;
- return -1;
- }
-
- CDEBUG(D_NET, "gateway %s, priority %d, nob %d\n", str, *priority, nob);
-
- /*
- * Change priority separator to \0 to be able to parse NID
- */
- *sep = '\0';
- return 0;
-}
-
-static int
-lnet_parse_route(char *str, int *im_a_router)
-{
- /* static scratch buffer OK (single threaded) */
- static char cmd[LNET_SINGLE_TEXTBUF_NOB];
-
- struct list_head nets;
- struct list_head gateways;
- struct list_head *tmp1;
- struct list_head *tmp2;
- __u32 net;
- lnet_nid_t nid;
- struct lnet_text_buf_t *ltb;
- int rc;
- char *sep;
- char *token = str;
- int ntokens = 0;
- int myrc = -1;
- unsigned int hops;
- int got_hops = 0;
- unsigned int priority = 0;
-
- INIT_LIST_HEAD(&gateways);
- INIT_LIST_HEAD(&nets);
-
- /* save a copy of the string for error messages */
- strncpy(cmd, str, sizeof(cmd) - 1);
- cmd[sizeof(cmd) - 1] = 0;
-
- sep = str;
- for (;;) {
- /* scan for token start */
- while (isspace(*sep))
- sep++;
- if (*sep == 0) {
- if (ntokens < (got_hops ? 3 : 2))
- goto token_error;
- break;
- }
-
- ntokens++;
- token = sep++;
-
- /* scan for token end */
- while (*sep != 0 && !isspace(*sep))
- sep++;
- if (*sep != 0)
- *sep++ = 0;
-
- if (ntokens == 1) {
- tmp2 = &nets; /* expanding nets */
- } else if (ntokens == 2 &&
- lnet_parse_hops(token, &hops)) {
- got_hops = 1; /* got a hop count */
- continue;
- } else {
- tmp2 = &gateways; /* expanding gateways */
- }
-
- ltb = lnet_new_text_buf(strlen(token));
- if (ltb == NULL)
- goto out;
-
- strcpy(ltb->ltb_text, token);
- tmp1 = &ltb->ltb_list;
- list_add_tail(tmp1, tmp2);
-
- while (tmp1 != tmp2) {
- ltb = list_entry(tmp1, struct lnet_text_buf_t,
- ltb_list);
-
- rc = lnet_str2tbs_expand(tmp1->next, ltb->ltb_text);
- if (rc < 0)
- goto token_error;
-
- tmp1 = tmp1->next;
-
- if (rc > 0) { /* expanded! */
- list_del(&ltb->ltb_list);
- lnet_free_text_buf(ltb);
- continue;
- }
-
- if (ntokens == 1) {
- net = libcfs_str2net(ltb->ltb_text);
- if (net == LNET_NIDNET(LNET_NID_ANY) ||
- LNET_NETTYP(net) == LOLND)
- goto token_error;
- } else {
- rc = lnet_parse_priority(ltb->ltb_text,
- &priority, &token);
- if (rc < 0)
- goto token_error;
-
- nid = libcfs_str2nid(ltb->ltb_text);
- if (nid == LNET_NID_ANY ||
- LNET_NETTYP(LNET_NIDNET(nid)) == LOLND)
- goto token_error;
- }
- }
- }
-
- if (!got_hops)
- hops = 1;
-
- LASSERT(!list_empty(&nets));
- LASSERT(!list_empty(&gateways));
-
- list_for_each(tmp1, &nets) {
- ltb = list_entry(tmp1, struct lnet_text_buf_t, ltb_list);
- net = libcfs_str2net(ltb->ltb_text);
- LASSERT(net != LNET_NIDNET(LNET_NID_ANY));
-
- list_for_each(tmp2, &gateways) {
- ltb = list_entry(tmp2, struct lnet_text_buf_t,
- ltb_list);
- nid = libcfs_str2nid(ltb->ltb_text);
- LASSERT(nid != LNET_NID_ANY);
-
- if (lnet_islocalnid(nid)) {
- *im_a_router = 1;
- continue;
- }
-
- rc = lnet_add_route(net, hops, nid, priority);
- if (rc != 0) {
- CERROR("Can't create route to %s via %s\n",
- libcfs_net2str(net),
- libcfs_nid2str(nid));
- goto out;
- }
- }
- }
-
- myrc = 0;
- goto out;
-
- token_error:
- lnet_syntax("routes", cmd, (int)(token - str), strlen(token));
- out:
- lnet_free_text_bufs(&nets);
- lnet_free_text_bufs(&gateways);
- return myrc;
-}
-
-static int
-lnet_parse_route_tbs(struct list_head *tbs, int *im_a_router)
-{
- struct lnet_text_buf_t *ltb;
-
- while (!list_empty(tbs)) {
- ltb = list_entry(tbs->next, struct lnet_text_buf_t, ltb_list);
-
- if (lnet_parse_route(ltb->ltb_text, im_a_router) < 0) {
- lnet_free_text_bufs(tbs);
- return -EINVAL;
- }
-
- list_del(&ltb->ltb_list);
- lnet_free_text_buf(ltb);
- }
-
- return 0;
-}
-
-int
-lnet_parse_routes(char *routes, int *im_a_router)
-{
- struct list_head tbs;
- int rc = 0;
-
- *im_a_router = 0;
-
- INIT_LIST_HEAD(&tbs);
-
- if (lnet_str2tbs_sep(&tbs, routes) < 0) {
- CERROR("Error parsing routes\n");
- rc = -EINVAL;
- } else {
- rc = lnet_parse_route_tbs(&tbs, im_a_router);
- }
-
- LASSERT(lnet_tbnob == 0);
- return rc;
-}
-
-static int
-lnet_match_network_token(char *token, int len, __u32 *ipaddrs, int nip)
-{
- LIST_HEAD(list);
- int rc;
- int i;
-
- rc = cfs_ip_addr_parse(token, len, &list);
- if (rc != 0)
- return rc;
-
- for (rc = i = 0; !rc && i < nip; i++)
- rc = cfs_ip_addr_match(ipaddrs[i], &list);
-
- cfs_ip_addr_free(&list);
-
- return rc;
-}
-
-static int
-lnet_match_network_tokens(char *net_entry, __u32 *ipaddrs, int nip)
-{
- static char tokens[LNET_SINGLE_TEXTBUF_NOB];
-
- int matched = 0;
- int ntokens = 0;
- int len;
- char *net = NULL;
- char *sep;
- char *token;
- int rc;
-
- LASSERT(strlen(net_entry) < sizeof(tokens));
-
- /* work on a copy of the string */
- strcpy(tokens, net_entry);
- sep = tokens;
- for (;;) {
- /* scan for token start */
- while (isspace(*sep))
- sep++;
- if (*sep == 0)
- break;
-
- token = sep++;
-
- /* scan for token end */
- while (*sep != 0 && !isspace(*sep))
- sep++;
- if (*sep != 0)
- *sep++ = 0;
-
- if (ntokens++ == 0) {
- net = token;
- continue;
- }
-
- len = strlen(token);
-
- rc = lnet_match_network_token(token, len, ipaddrs, nip);
- if (rc < 0) {
- lnet_syntax("ip2nets", net_entry,
- (int)(token - tokens), len);
- return rc;
- }
-
- matched |= (rc != 0);
- }
-
- if (!matched)
- return 0;
-
- strcpy(net_entry, net); /* replace with matched net */
- return 1;
-}
-
-static __u32
-lnet_netspec2net(char *netspec)
-{
- char *bracket = strchr(netspec, '(');
- __u32 net;
-
- if (bracket != NULL)
- *bracket = 0;
-
- net = libcfs_str2net(netspec);
-
- if (bracket != NULL)
- *bracket = '(';
-
- return net;
-}
-
-static int
-lnet_splitnets(char *source, struct list_head *nets)
-{
- int offset = 0;
- int offset2;
- int len;
- struct lnet_text_buf_t *tb;
- struct lnet_text_buf_t *tb2;
- struct list_head *t;
- char *sep;
- char *bracket;
- __u32 net;
-
- LASSERT(!list_empty(nets));
- LASSERT(nets->next == nets->prev); /* single entry */
-
- tb = list_entry(nets->next, struct lnet_text_buf_t, ltb_list);
-
- for (;;) {
- sep = strchr(tb->ltb_text, ',');
- bracket = strchr(tb->ltb_text, '(');
-
- if (sep != NULL &&
- bracket != NULL &&
- bracket < sep) {
- /* netspec lists interfaces... */
-
- offset2 = offset + (int)(bracket - tb->ltb_text);
- len = strlen(bracket);
-
- bracket = strchr(bracket + 1, ')');
-
- if (bracket == NULL ||
- !(bracket[1] == ',' || bracket[1] == 0)) {
- lnet_syntax("ip2nets", source, offset2, len);
- return -EINVAL;
- }
-
- sep = (bracket[1] == 0) ? NULL : bracket + 1;
- }
-
- if (sep != NULL)
- *sep++ = 0;
-
- net = lnet_netspec2net(tb->ltb_text);
- if (net == LNET_NIDNET(LNET_NID_ANY)) {
- lnet_syntax("ip2nets", source, offset,
- strlen(tb->ltb_text));
- return -EINVAL;
- }
-
- list_for_each(t, nets) {
- tb2 = list_entry(t, struct lnet_text_buf_t, ltb_list);
-
- if (tb2 == tb)
- continue;
-
- if (net == lnet_netspec2net(tb2->ltb_text)) {
- /* duplicate network */
- lnet_syntax("ip2nets", source, offset,
- strlen(tb->ltb_text));
- return -EINVAL;
- }
- }
-
- if (sep == NULL)
- return 0;
-
- offset += (int)(sep - tb->ltb_text);
- tb2 = lnet_new_text_buf(strlen(sep));
- if (tb2 == NULL)
- return -ENOMEM;
-
- strcpy(tb2->ltb_text, sep);
- list_add_tail(&tb2->ltb_list, nets);
-
- tb = tb2;
- }
-}
-
-static int
-lnet_match_networks(char **networksp, char *ip2nets, __u32 *ipaddrs, int nip)
-{
- static char networks[LNET_SINGLE_TEXTBUF_NOB];
- static char source[LNET_SINGLE_TEXTBUF_NOB];
-
- struct list_head raw_entries;
- struct list_head matched_nets;
- struct list_head current_nets;
- struct list_head *t;
- struct list_head *t2;
- struct lnet_text_buf_t *tb;
- struct lnet_text_buf_t *tb2;
- __u32 net1;
- __u32 net2;
- int len;
- int count;
- int dup;
- int rc;
-
- INIT_LIST_HEAD(&raw_entries);
- if (lnet_str2tbs_sep(&raw_entries, ip2nets) < 0) {
- CERROR("Error parsing ip2nets\n");
- LASSERT(lnet_tbnob == 0);
- return -EINVAL;
- }
-
- INIT_LIST_HEAD(&matched_nets);
- INIT_LIST_HEAD(&current_nets);
- networks[0] = 0;
- count = 0;
- len = 0;
- rc = 0;
-
- while (!list_empty(&raw_entries)) {
- tb = list_entry(raw_entries.next, struct lnet_text_buf_t,
- ltb_list);
-
- strncpy(source, tb->ltb_text, sizeof(source)-1);
- source[sizeof(source)-1] = 0;
-
- /* replace ltb_text with the network(s) add on match */
- rc = lnet_match_network_tokens(tb->ltb_text, ipaddrs, nip);
- if (rc < 0)
- break;
-
- list_del(&tb->ltb_list);
-
- if (rc == 0) { /* no match */
- lnet_free_text_buf(tb);
- continue;
- }
-
- /* split into separate networks */
- INIT_LIST_HEAD(&current_nets);
- list_add(&tb->ltb_list, &current_nets);
- rc = lnet_splitnets(source, &current_nets);
- if (rc < 0)
- break;
-
- dup = 0;
- list_for_each(t, &current_nets) {
- tb = list_entry(t, struct lnet_text_buf_t, ltb_list);
- net1 = lnet_netspec2net(tb->ltb_text);
- LASSERT(net1 != LNET_NIDNET(LNET_NID_ANY));
-
- list_for_each(t2, &matched_nets) {
- tb2 = list_entry(t2, struct lnet_text_buf_t,
- ltb_list);
- net2 = lnet_netspec2net(tb2->ltb_text);
- LASSERT(net2 != LNET_NIDNET(LNET_NID_ANY));
-
- if (net1 == net2) {
- dup = 1;
- break;
- }
- }
-
- if (dup)
- break;
- }
-
- if (dup) {
- lnet_free_text_bufs(&current_nets);
- continue;
- }
-
- list_for_each_safe(t, t2, &current_nets) {
- tb = list_entry(t, struct lnet_text_buf_t, ltb_list);
-
- list_del(&tb->ltb_list);
- list_add_tail(&tb->ltb_list, &matched_nets);
-
- len += snprintf(networks + len, sizeof(networks) - len,
- "%s%s", (len == 0) ? "" : ",",
- tb->ltb_text);
-
- if (len >= sizeof(networks)) {
- CERROR("Too many matched networks\n");
- rc = -E2BIG;
- goto out;
- }
- }
-
- count++;
- }
-
- out:
- lnet_free_text_bufs(&raw_entries);
- lnet_free_text_bufs(&matched_nets);
- lnet_free_text_bufs(&current_nets);
- LASSERT(lnet_tbnob == 0);
-
- if (rc < 0)
- return rc;
-
- *networksp = networks;
- return count;
-}
-
-static void
-lnet_ipaddr_free_enumeration(__u32 *ipaddrs, int nip)
-{
- LIBCFS_FREE(ipaddrs, nip * sizeof(*ipaddrs));
-}
-
-static int
-lnet_ipaddr_enumerate(__u32 **ipaddrsp)
-{
- int up;
- __u32 netmask;
- __u32 *ipaddrs;
- __u32 *ipaddrs2;
- int nip;
- char **ifnames;
- int nif = lnet_ipif_enumerate(&ifnames);
- int i;
- int rc;
-
- if (nif <= 0)
- return nif;
-
- LIBCFS_ALLOC(ipaddrs, nif * sizeof(*ipaddrs));
- if (ipaddrs == NULL) {
- CERROR("Can't allocate ipaddrs[%d]\n", nif);
- lnet_ipif_free_enumeration(ifnames, nif);
- return -ENOMEM;
- }
-
- for (i = nip = 0; i < nif; i++) {
- if (!strcmp(ifnames[i], "lo"))
- continue;
-
- rc = lnet_ipif_query(ifnames[i], &up, &ipaddrs[nip], &netmask);
- if (rc != 0) {
- CWARN("Can't query interface %s: %d\n",
- ifnames[i], rc);
- continue;
- }
-
- if (!up) {
- CWARN("Ignoring interface %s: it's down\n",
- ifnames[i]);
- continue;
- }
-
- nip++;
- }
-
- lnet_ipif_free_enumeration(ifnames, nif);
-
- if (nip == nif) {
- *ipaddrsp = ipaddrs;
- } else {
- if (nip > 0) {
- LIBCFS_ALLOC(ipaddrs2, nip * sizeof(*ipaddrs2));
- if (ipaddrs2 == NULL) {
- CERROR("Can't allocate ipaddrs[%d]\n", nip);
- nip = -ENOMEM;
- } else {
- memcpy(ipaddrs2, ipaddrs,
- nip * sizeof(*ipaddrs));
- *ipaddrsp = ipaddrs2;
- rc = nip;
- }
- }
- lnet_ipaddr_free_enumeration(ipaddrs, nif);
- }
- return nip;
-}
-
-int
-lnet_parse_ip2nets(char **networksp, char *ip2nets)
-{
- __u32 *ipaddrs = NULL;
- int nip = lnet_ipaddr_enumerate(&ipaddrs);
- int rc;
-
- if (nip < 0) {
- LCONSOLE_ERROR_MSG(0x117,
- "Error %d enumerating local IP interfaces for ip2nets to match\n",
- nip);
- return nip;
- }
-
- if (nip == 0) {
- LCONSOLE_ERROR_MSG(0x118,
- "No local IP interfaces for ip2nets to match\n");
- return -ENOENT;
- }
-
- rc = lnet_match_networks(networksp, ip2nets, ipaddrs, nip);
- lnet_ipaddr_free_enumeration(ipaddrs, nip);
-
- if (rc < 0) {
- LCONSOLE_ERROR_MSG(0x119, "Error %d parsing ip2nets\n", rc);
- return rc;
- }
-
- if (rc == 0) {
- LCONSOLE_ERROR_MSG(0x11a,
- "ip2nets does not match any local IP interfaces\n");
- return -ENOENT;
- }
-
- return 0;
-}
diff --git a/drivers/staging/lustre/lnet/lnet/lib-eq.c b/drivers/staging/lustre/lnet/lnet/lib-eq.c
deleted file mode 100644
index 8b843c5e71a5..000000000000
--- a/drivers/staging/lustre/lnet/lnet/lib-eq.c
+++ /dev/null
@@ -1,438 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * lnet/lnet/lib-eq.c
- *
- * Library level Event queue management routines
- */
-
-#define DEBUG_SUBSYSTEM S_LNET
-#include "../../include/linux/lnet/lib-lnet.h"
-
-/**
- * Create an event queue that has room for \a count number of events.
- *
- * The event queue is circular and older events will be overwritten by new
- * ones if they are not removed in time by the user using the functions
- * LNetEQGet(), LNetEQWait(), or LNetEQPoll(). It is up to the user to
- * determine the appropriate size of the event queue to prevent this loss
- * of events. Note that when EQ handler is specified in \a callback, no
- * event loss can happen, since the handler is run for each event deposited
- * into the EQ.
- *
- * \param count The number of events to be stored in the event queue. It
- * will be rounded up to the next power of two.
- * \param callback A handler function that runs when an event is deposited
- * into the EQ. The constant value LNET_EQ_HANDLER_NONE can be used to
- * indicate that no event handler is desired.
- * \param handle On successful return, this location will hold a handle for
- * the newly created EQ.
- *
- * \retval 0 On success.
- * \retval -EINVAL If an parameter is not valid.
- * \retval -ENOMEM If memory for the EQ can't be allocated.
- *
- * \see lnet_eq_handler_t for the discussion on EQ handler semantics.
- */
-int
-LNetEQAlloc(unsigned int count, lnet_eq_handler_t callback,
- lnet_handle_eq_t *handle)
-{
- lnet_eq_t *eq;
-
- LASSERT(the_lnet.ln_init);
- LASSERT(the_lnet.ln_refcount > 0);
-
- /* We need count to be a power of 2 so that when eq_{enq,deq}_seq
- * overflow, they don't skip entries, so the queue has the same
- * apparent capacity at all times */
-
- count = roundup_pow_of_two(count);
-
- if (callback != LNET_EQ_HANDLER_NONE && count != 0)
- CWARN("EQ callback is guaranteed to get every event, do you still want to set eqcount %d for polling event which will have locking overhead? Please contact with developer to confirm\n", count);
-
- /* count can be 0 if only need callback, we can eliminate
- * overhead of enqueue event */
- if (count == 0 && callback == LNET_EQ_HANDLER_NONE)
- return -EINVAL;
-
- eq = lnet_eq_alloc();
- if (eq == NULL)
- return -ENOMEM;
-
- if (count != 0) {
- LIBCFS_ALLOC(eq->eq_events, count * sizeof(lnet_event_t));
- if (eq->eq_events == NULL)
- goto failed;
- /* NB allocator has set all event sequence numbers to 0,
- * so all them should be earlier than eq_deq_seq */
- }
-
- eq->eq_deq_seq = 1;
- eq->eq_enq_seq = 1;
- eq->eq_size = count;
- eq->eq_callback = callback;
-
- eq->eq_refs = cfs_percpt_alloc(lnet_cpt_table(),
- sizeof(*eq->eq_refs[0]));
- if (eq->eq_refs == NULL)
- goto failed;
-
- /* MUST hold both exclusive lnet_res_lock */
- lnet_res_lock(LNET_LOCK_EX);
- /* NB: hold lnet_eq_wait_lock for EQ link/unlink, so we can do
- * both EQ lookup and poll event with only lnet_eq_wait_lock */
- lnet_eq_wait_lock();
-
- lnet_res_lh_initialize(&the_lnet.ln_eq_container, &eq->eq_lh);
- list_add(&eq->eq_list, &the_lnet.ln_eq_container.rec_active);
-
- lnet_eq_wait_unlock();
- lnet_res_unlock(LNET_LOCK_EX);
-
- lnet_eq2handle(handle, eq);
- return 0;
-
-failed:
- if (eq->eq_events != NULL)
- LIBCFS_FREE(eq->eq_events, count * sizeof(lnet_event_t));
-
- if (eq->eq_refs != NULL)
- cfs_percpt_free(eq->eq_refs);
-
- lnet_eq_free(eq);
- return -ENOMEM;
-}
-EXPORT_SYMBOL(LNetEQAlloc);
-
-/**
- * Release the resources associated with an event queue if it's idle;
- * otherwise do nothing and it's up to the user to try again.
- *
- * \param eqh A handle for the event queue to be released.
- *
- * \retval 0 If the EQ is not in use and freed.
- * \retval -ENOENT If \a eqh does not point to a valid EQ.
- * \retval -EBUSY If the EQ is still in use by some MDs.
- */
-int
-LNetEQFree(lnet_handle_eq_t eqh)
-{
- struct lnet_eq *eq;
- lnet_event_t *events = NULL;
- int **refs = NULL;
- int *ref;
- int rc = 0;
- int size = 0;
- int i;
-
- LASSERT(the_lnet.ln_init);
- LASSERT(the_lnet.ln_refcount > 0);
-
- lnet_res_lock(LNET_LOCK_EX);
- /* NB: hold lnet_eq_wait_lock for EQ link/unlink, so we can do
- * both EQ lookup and poll event with only lnet_eq_wait_lock */
- lnet_eq_wait_lock();
-
- eq = lnet_handle2eq(&eqh);
- if (eq == NULL) {
- rc = -ENOENT;
- goto out;
- }
-
- cfs_percpt_for_each(ref, i, eq->eq_refs) {
- LASSERT(*ref >= 0);
- if (*ref == 0)
- continue;
-
- CDEBUG(D_NET, "Event equeue (%d: %d) busy on destroy.\n",
- i, *ref);
- rc = -EBUSY;
- goto out;
- }
-
- /* stash for free after lock dropped */
- events = eq->eq_events;
- size = eq->eq_size;
- refs = eq->eq_refs;
-
- lnet_res_lh_invalidate(&eq->eq_lh);
- list_del(&eq->eq_list);
- lnet_eq_free(eq);
- out:
- lnet_eq_wait_unlock();
- lnet_res_unlock(LNET_LOCK_EX);
-
- if (events != NULL)
- LIBCFS_FREE(events, size * sizeof(lnet_event_t));
- if (refs != NULL)
- cfs_percpt_free(refs);
-
- return rc;
-}
-EXPORT_SYMBOL(LNetEQFree);
-
-void
-lnet_eq_enqueue_event(lnet_eq_t *eq, lnet_event_t *ev)
-{
- /* MUST called with resource lock hold but w/o lnet_eq_wait_lock */
- int index;
-
- if (eq->eq_size == 0) {
- LASSERT(eq->eq_callback != LNET_EQ_HANDLER_NONE);
- eq->eq_callback(ev);
- return;
- }
-
- lnet_eq_wait_lock();
- ev->sequence = eq->eq_enq_seq++;
-
- LASSERT(eq->eq_size == LOWEST_BIT_SET(eq->eq_size));
- index = ev->sequence & (eq->eq_size - 1);
-
- eq->eq_events[index] = *ev;
-
- if (eq->eq_callback != LNET_EQ_HANDLER_NONE)
- eq->eq_callback(ev);
-
- /* Wake anyone waiting in LNetEQPoll() */
- if (waitqueue_active(&the_lnet.ln_eq_waitq))
- wake_up_all(&the_lnet.ln_eq_waitq);
- lnet_eq_wait_unlock();
-}
-
-static int
-lnet_eq_dequeue_event(lnet_eq_t *eq, lnet_event_t *ev)
-{
- int new_index = eq->eq_deq_seq & (eq->eq_size - 1);
- lnet_event_t *new_event = &eq->eq_events[new_index];
- int rc;
-
- /* must called with lnet_eq_wait_lock hold */
- if (LNET_SEQ_GT(eq->eq_deq_seq, new_event->sequence))
- return 0;
-
- /* We've got a new event... */
- *ev = *new_event;
-
- CDEBUG(D_INFO, "event: %p, sequence: %lu, eq->size: %u\n",
- new_event, eq->eq_deq_seq, eq->eq_size);
-
- /* ...but did it overwrite an event we've not seen yet? */
- if (eq->eq_deq_seq == new_event->sequence) {
- rc = 1;
- } else {
- /* don't complain with CERROR: some EQs are sized small
- * anyway; if it's important, the caller should complain */
- CDEBUG(D_NET, "Event Queue Overflow: eq seq %lu ev seq %lu\n",
- eq->eq_deq_seq, new_event->sequence);
- rc = -EOVERFLOW;
- }
-
- eq->eq_deq_seq = new_event->sequence + 1;
- return rc;
-}
-
-/**
- * A nonblocking function that can be used to get the next event in an EQ.
- * If an event handler is associated with the EQ, the handler will run before
- * this function returns successfully. The event is removed from the queue.
- *
- * \param eventq A handle for the event queue.
- * \param event On successful return (1 or -EOVERFLOW), this location will
- * hold the next event in the EQ.
- *
- * \retval 0 No pending event in the EQ.
- * \retval 1 Indicates success.
- * \retval -ENOENT If \a eventq does not point to a valid EQ.
- * \retval -EOVERFLOW Indicates success (i.e., an event is returned) and that
- * at least one event between this event and the last event obtained from the
- * EQ has been dropped due to limited space in the EQ.
- */
-int
-LNetEQGet(lnet_handle_eq_t eventq, lnet_event_t *event)
-{
- int which;
-
- return LNetEQPoll(&eventq, 1, 0,
- event, &which);
-}
-EXPORT_SYMBOL(LNetEQGet);
-
-/**
- * Block the calling process until there is an event in the EQ.
- * If an event handler is associated with the EQ, the handler will run before
- * this function returns successfully. This function returns the next event
- * in the EQ and removes it from the EQ.
- *
- * \param eventq A handle for the event queue.
- * \param event On successful return (1 or -EOVERFLOW), this location will
- * hold the next event in the EQ.
- *
- * \retval 1 Indicates success.
- * \retval -ENOENT If \a eventq does not point to a valid EQ.
- * \retval -EOVERFLOW Indicates success (i.e., an event is returned) and that
- * at least one event between this event and the last event obtained from the
- * EQ has been dropped due to limited space in the EQ.
- */
-int
-LNetEQWait(lnet_handle_eq_t eventq, lnet_event_t *event)
-{
- int which;
-
- return LNetEQPoll(&eventq, 1, LNET_TIME_FOREVER,
- event, &which);
-}
-EXPORT_SYMBOL(LNetEQWait);
-
-
-static int
-lnet_eq_wait_locked(int *timeout_ms)
-__must_hold(&the_lnet.ln_eq_wait_lock)
-{
- int tms = *timeout_ms;
- int wait;
- wait_queue_t wl;
- unsigned long now;
-
- if (tms == 0)
- return -1; /* don't want to wait and no new event */
-
- init_waitqueue_entry(&wl, current);
- set_current_state(TASK_INTERRUPTIBLE);
- add_wait_queue(&the_lnet.ln_eq_waitq, &wl);
-
- lnet_eq_wait_unlock();
-
- if (tms < 0) {
- schedule();
-
- } else {
- now = jiffies;
- schedule_timeout(msecs_to_jiffies(tms));
- tms -= jiffies_to_msecs(jiffies - now);
- if (tms < 0) /* no more wait but may have new event */
- tms = 0;
- }
-
- wait = tms != 0; /* might need to call here again */
- *timeout_ms = tms;
-
- lnet_eq_wait_lock();
- remove_wait_queue(&the_lnet.ln_eq_waitq, &wl);
-
- return wait;
-}
-
-
-
-/**
- * Block the calling process until there's an event from a set of EQs or
- * timeout happens.
- *
- * If an event handler is associated with the EQ, the handler will run before
- * this function returns successfully, in which case the corresponding event
- * is consumed.
- *
- * LNetEQPoll() provides a timeout to allow applications to poll, block for a
- * fixed period, or block indefinitely.
- *
- * \param eventqs,neq An array of EQ handles, and size of the array.
- * \param timeout_ms Time in milliseconds to wait for an event to occur on
- * one of the EQs. The constant LNET_TIME_FOREVER can be used to indicate an
- * infinite timeout.
- * \param event,which On successful return (1 or -EOVERFLOW), \a event will
- * hold the next event in the EQs, and \a which will contain the index of the
- * EQ from which the event was taken.
- *
- * \retval 0 No pending event in the EQs after timeout.
- * \retval 1 Indicates success.
- * \retval -EOVERFLOW Indicates success (i.e., an event is returned) and that
- * at least one event between this event and the last event obtained from the
- * EQ indicated by \a which has been dropped due to limited space in the EQ.
- * \retval -ENOENT If there's an invalid handle in \a eventqs.
- */
-int
-LNetEQPoll(lnet_handle_eq_t *eventqs, int neq, int timeout_ms,
- lnet_event_t *event, int *which)
-{
- int wait = 1;
- int rc;
- int i;
-
- LASSERT(the_lnet.ln_init);
- LASSERT(the_lnet.ln_refcount > 0);
-
- if (neq < 1)
- return -ENOENT;
-
- lnet_eq_wait_lock();
-
- for (;;) {
- for (i = 0; i < neq; i++) {
- lnet_eq_t *eq = lnet_handle2eq(&eventqs[i]);
-
- if (eq == NULL) {
- lnet_eq_wait_unlock();
- return -ENOENT;
- }
-
- rc = lnet_eq_dequeue_event(eq, event);
- if (rc != 0) {
- lnet_eq_wait_unlock();
- *which = i;
- return rc;
- }
- }
-
- if (wait == 0)
- break;
-
- /*
- * return value of lnet_eq_wait_locked:
- * -1 : did nothing and it's sure no new event
- * 1 : sleep inside and wait until new event
- * 0 : don't want to wait anymore, but might have new event
- * so need to call dequeue again
- */
- wait = lnet_eq_wait_locked(&timeout_ms);
- if (wait < 0) /* no new event */
- break;
- }
-
- lnet_eq_wait_unlock();
- return 0;
-}
diff --git a/drivers/staging/lustre/lnet/lnet/lib-md.c b/drivers/staging/lustre/lnet/lnet/lib-md.c
deleted file mode 100644
index 758f5bedef7e..000000000000
--- a/drivers/staging/lustre/lnet/lnet/lib-md.c
+++ /dev/null
@@ -1,454 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * lnet/lnet/lib-md.c
- *
- * Memory Descriptor management routines
- */
-
-#define DEBUG_SUBSYSTEM S_LNET
-
-#include "../../include/linux/lnet/lib-lnet.h"
-
-/* must be called with lnet_res_lock held */
-void
-lnet_md_unlink(lnet_libmd_t *md)
-{
- if ((md->md_flags & LNET_MD_FLAG_ZOMBIE) == 0) {
- /* first unlink attempt... */
- lnet_me_t *me = md->md_me;
-
- md->md_flags |= LNET_MD_FLAG_ZOMBIE;
-
- /* Disassociate from ME (if any),
- * and unlink it if it was created
- * with LNET_UNLINK */
- if (me != NULL) {
- /* detach MD from portal */
- lnet_ptl_detach_md(me, md);
- if (me->me_unlink == LNET_UNLINK)
- lnet_me_unlink(me);
- }
-
- /* ensure all future handle lookups fail */
- lnet_res_lh_invalidate(&md->md_lh);
- }
-
- if (md->md_refcount != 0) {
- CDEBUG(D_NET, "Queueing unlink of md %p\n", md);
- return;
- }
-
- CDEBUG(D_NET, "Unlinking md %p\n", md);
-
- if (md->md_eq != NULL) {
- int cpt = lnet_cpt_of_cookie(md->md_lh.lh_cookie);
-
- LASSERT(*md->md_eq->eq_refs[cpt] > 0);
- (*md->md_eq->eq_refs[cpt])--;
- }
-
- LASSERT(!list_empty(&md->md_list));
- list_del_init(&md->md_list);
- lnet_md_free(md);
-}
-
-static int
-lnet_md_build(lnet_libmd_t *lmd, lnet_md_t *umd, int unlink)
-{
- int i;
- unsigned int niov;
- int total_length = 0;
-
- lmd->md_me = NULL;
- lmd->md_start = umd->start;
- lmd->md_offset = 0;
- lmd->md_max_size = umd->max_size;
- lmd->md_options = umd->options;
- lmd->md_user_ptr = umd->user_ptr;
- lmd->md_eq = NULL;
- lmd->md_threshold = umd->threshold;
- lmd->md_refcount = 0;
- lmd->md_flags = (unlink == LNET_UNLINK) ? LNET_MD_FLAG_AUTO_UNLINK : 0;
-
- if ((umd->options & LNET_MD_IOVEC) != 0) {
-
- if ((umd->options & LNET_MD_KIOV) != 0) /* Can't specify both */
- return -EINVAL;
-
- lmd->md_niov = niov = umd->length;
- memcpy(lmd->md_iov.iov, umd->start,
- niov * sizeof(lmd->md_iov.iov[0]));
-
- for (i = 0; i < (int)niov; i++) {
- /* We take the base address on trust */
- /* invalid length */
- if (lmd->md_iov.iov[i].iov_len <= 0)
- return -EINVAL;
-
- total_length += lmd->md_iov.iov[i].iov_len;
- }
-
- lmd->md_length = total_length;
-
- if ((umd->options & LNET_MD_MAX_SIZE) != 0 && /* use max size */
- (umd->max_size < 0 ||
- umd->max_size > total_length)) /* illegal max_size */
- return -EINVAL;
-
- } else if ((umd->options & LNET_MD_KIOV) != 0) {
- lmd->md_niov = niov = umd->length;
- memcpy(lmd->md_iov.kiov, umd->start,
- niov * sizeof(lmd->md_iov.kiov[0]));
-
- for (i = 0; i < (int)niov; i++) {
- /* We take the page pointer on trust */
- if (lmd->md_iov.kiov[i].kiov_offset +
- lmd->md_iov.kiov[i].kiov_len > PAGE_CACHE_SIZE)
- return -EINVAL; /* invalid length */
-
- total_length += lmd->md_iov.kiov[i].kiov_len;
- }
-
- lmd->md_length = total_length;
-
- if ((umd->options & LNET_MD_MAX_SIZE) != 0 && /* max size used */
- (umd->max_size < 0 ||
- umd->max_size > total_length)) /* illegal max_size */
- return -EINVAL;
- } else { /* contiguous */
- lmd->md_length = umd->length;
- lmd->md_niov = niov = 1;
- lmd->md_iov.iov[0].iov_base = umd->start;
- lmd->md_iov.iov[0].iov_len = umd->length;
-
- if ((umd->options & LNET_MD_MAX_SIZE) != 0 && /* max size used */
- (umd->max_size < 0 ||
- umd->max_size > (int)umd->length)) /* illegal max_size */
- return -EINVAL;
- }
-
- return 0;
-}
-
-/* must be called with resource lock held */
-static int
-lnet_md_link(lnet_libmd_t *md, lnet_handle_eq_t eq_handle, int cpt)
-{
- struct lnet_res_container *container = the_lnet.ln_md_containers[cpt];
-
- /* NB we are passed an allocated, but inactive md.
- * if we return success, caller may lnet_md_unlink() it.
- * otherwise caller may only lnet_md_free() it.
- */
- /* This implementation doesn't know how to create START events or
- * disable END events. Best to LASSERT our caller is compliant so
- * we find out quickly... */
- /* TODO - reevaluate what should be here in light of
- * the removal of the start and end events
- * maybe there we shouldn't even allow LNET_EQ_NONE!)
- * LASSERT (eq == NULL);
- */
- if (!LNetHandleIsInvalid(eq_handle)) {
- md->md_eq = lnet_handle2eq(&eq_handle);
-
- if (md->md_eq == NULL)
- return -ENOENT;
-
- (*md->md_eq->eq_refs[cpt])++;
- }
-
- lnet_res_lh_initialize(container, &md->md_lh);
-
- LASSERT(list_empty(&md->md_list));
- list_add(&md->md_list, &container->rec_active);
-
- return 0;
-}
-
-/* must be called with lnet_res_lock held */
-void
-lnet_md_deconstruct(lnet_libmd_t *lmd, lnet_md_t *umd)
-{
- /* NB this doesn't copy out all the iov entries so when a
- * discontiguous MD is copied out, the target gets to know the
- * original iov pointer (in start) and the number of entries it had
- * and that's all.
- */
- umd->start = lmd->md_start;
- umd->length = ((lmd->md_options &
- (LNET_MD_IOVEC | LNET_MD_KIOV)) == 0) ?
- lmd->md_length : lmd->md_niov;
- umd->threshold = lmd->md_threshold;
- umd->max_size = lmd->md_max_size;
- umd->options = lmd->md_options;
- umd->user_ptr = lmd->md_user_ptr;
- lnet_eq2handle(&umd->eq_handle, lmd->md_eq);
-}
-
-static int
-lnet_md_validate(lnet_md_t *umd)
-{
- if (umd->start == NULL && umd->length != 0) {
- CERROR("MD start pointer can not be NULL with length %u\n",
- umd->length);
- return -EINVAL;
- }
-
- if ((umd->options & (LNET_MD_KIOV | LNET_MD_IOVEC)) != 0 &&
- umd->length > LNET_MAX_IOV) {
- CERROR("Invalid option: too many fragments %u, %d max\n",
- umd->length, LNET_MAX_IOV);
- return -EINVAL;
- }
-
- return 0;
-}
-
-/**
- * Create a memory descriptor and attach it to a ME
- *
- * \param meh A handle for a ME to associate the new MD with.
- * \param umd Provides initial values for the user-visible parts of a MD.
- * Other than its use for initialization, there is no linkage between this
- * structure and the MD maintained by the LNet.
- * \param unlink A flag to indicate whether the MD is automatically unlinked
- * when it becomes inactive, either because the operation threshold drops to
- * zero or because the available memory becomes less than \a umd.max_size.
- * (Note that the check for unlinking a MD only occurs after the completion
- * of a successful operation on the MD.) The value LNET_UNLINK enables auto
- * unlinking; the value LNET_RETAIN disables it.
- * \param handle On successful returns, a handle to the newly created MD is
- * saved here. This handle can be used later in LNetMDUnlink().
- *
- * \retval 0 On success.
- * \retval -EINVAL If \a umd is not valid.
- * \retval -ENOMEM If new MD cannot be allocated.
- * \retval -ENOENT Either \a meh or \a umd.eq_handle does not point to a
- * valid object. Note that it's OK to supply a NULL \a umd.eq_handle by
- * calling LNetInvalidateHandle() on it.
- * \retval -EBUSY If the ME pointed to by \a meh is already associated with
- * a MD.
- */
-int
-LNetMDAttach(lnet_handle_me_t meh, lnet_md_t umd,
- lnet_unlink_t unlink, lnet_handle_md_t *handle)
-{
- LIST_HEAD(matches);
- LIST_HEAD(drops);
- struct lnet_me *me;
- struct lnet_libmd *md;
- int cpt;
- int rc;
-
- LASSERT(the_lnet.ln_init);
- LASSERT(the_lnet.ln_refcount > 0);
-
- if (lnet_md_validate(&umd) != 0)
- return -EINVAL;
-
- if ((umd.options & (LNET_MD_OP_GET | LNET_MD_OP_PUT)) == 0) {
- CERROR("Invalid option: no MD_OP set\n");
- return -EINVAL;
- }
-
- md = lnet_md_alloc(&umd);
- if (md == NULL)
- return -ENOMEM;
-
- rc = lnet_md_build(md, &umd, unlink);
- cpt = lnet_cpt_of_cookie(meh.cookie);
-
- lnet_res_lock(cpt);
- if (rc != 0)
- goto failed;
-
- me = lnet_handle2me(&meh);
- if (me == NULL)
- rc = -ENOENT;
- else if (me->me_md != NULL)
- rc = -EBUSY;
- else
- rc = lnet_md_link(md, umd.eq_handle, cpt);
-
- if (rc != 0)
- goto failed;
-
- /* attach this MD to portal of ME and check if it matches any
- * blocked msgs on this portal */
- lnet_ptl_attach_md(me, md, &matches, &drops);
-
- lnet_md2handle(handle, md);
-
- lnet_res_unlock(cpt);
-
- lnet_drop_delayed_msg_list(&drops, "Bad match");
- lnet_recv_delayed_msg_list(&matches);
-
- return 0;
-
- failed:
- lnet_md_free(md);
-
- lnet_res_unlock(cpt);
- return rc;
-}
-EXPORT_SYMBOL(LNetMDAttach);
-
-/**
- * Create a "free floating" memory descriptor - a MD that is not associated
- * with a ME. Such MDs are usually used in LNetPut() and LNetGet() operations.
- *
- * \param umd,unlink See the discussion for LNetMDAttach().
- * \param handle On successful returns, a handle to the newly created MD is
- * saved here. This handle can be used later in LNetMDUnlink(), LNetPut(),
- * and LNetGet() operations.
- *
- * \retval 0 On success.
- * \retval -EINVAL If \a umd is not valid.
- * \retval -ENOMEM If new MD cannot be allocated.
- * \retval -ENOENT \a umd.eq_handle does not point to a valid EQ. Note that
- * it's OK to supply a NULL \a umd.eq_handle by calling
- * LNetInvalidateHandle() on it.
- */
-int
-LNetMDBind(lnet_md_t umd, lnet_unlink_t unlink, lnet_handle_md_t *handle)
-{
- lnet_libmd_t *md;
- int cpt;
- int rc;
-
- LASSERT(the_lnet.ln_init);
- LASSERT(the_lnet.ln_refcount > 0);
-
- if (lnet_md_validate(&umd) != 0)
- return -EINVAL;
-
- if ((umd.options & (LNET_MD_OP_GET | LNET_MD_OP_PUT)) != 0) {
- CERROR("Invalid option: GET|PUT illegal on active MDs\n");
- return -EINVAL;
- }
-
- md = lnet_md_alloc(&umd);
- if (md == NULL)
- return -ENOMEM;
-
- rc = lnet_md_build(md, &umd, unlink);
-
- cpt = lnet_res_lock_current();
- if (rc != 0)
- goto failed;
-
- rc = lnet_md_link(md, umd.eq_handle, cpt);
- if (rc != 0)
- goto failed;
-
- lnet_md2handle(handle, md);
-
- lnet_res_unlock(cpt);
- return 0;
-
- failed:
- lnet_md_free(md);
-
- lnet_res_unlock(cpt);
- return rc;
-}
-EXPORT_SYMBOL(LNetMDBind);
-
-/**
- * Unlink the memory descriptor from any ME it may be linked to and release
- * the internal resources associated with it. As a result, active messages
- * associated with the MD may get aborted.
- *
- * This function does not free the memory region associated with the MD;
- * i.e., the memory the user allocated for this MD. If the ME associated with
- * this MD is not NULL and was created with auto unlink enabled, the ME is
- * unlinked as well (see LNetMEAttach()).
- *
- * Explicitly unlinking a MD via this function call has the same behavior as
- * a MD that has been automatically unlinked, except that no LNET_EVENT_UNLINK
- * is generated in the latter case.
- *
- * An unlinked event can be reported in two ways:
- * - If there's no pending operations on the MD, it's unlinked immediately
- * and an LNET_EVENT_UNLINK event is logged before this function returns.
- * - Otherwise, the MD is only marked for deletion when this function
- * returns, and the unlinked event will be piggybacked on the event of
- * the completion of the last operation by setting the unlinked field of
- * the event. No dedicated LNET_EVENT_UNLINK event is generated.
- *
- * Note that in both cases the unlinked field of the event is always set; no
- * more event will happen on the MD after such an event is logged.
- *
- * \param mdh A handle for the MD to be unlinked.
- *
- * \retval 0 On success.
- * \retval -ENOENT If \a mdh does not point to a valid MD object.
- */
-int
-LNetMDUnlink(lnet_handle_md_t mdh)
-{
- lnet_event_t ev;
- lnet_libmd_t *md;
- int cpt;
-
- LASSERT(the_lnet.ln_init);
- LASSERT(the_lnet.ln_refcount > 0);
-
- cpt = lnet_cpt_of_cookie(mdh.cookie);
- lnet_res_lock(cpt);
-
- md = lnet_handle2md(&mdh);
- if (md == NULL) {
- lnet_res_unlock(cpt);
- return -ENOENT;
- }
-
- md->md_flags |= LNET_MD_FLAG_ABORTED;
- /* If the MD is busy, lnet_md_unlink just marks it for deletion, and
- * when the LND is done, the completion event flags that the MD was
- * unlinked. Otherwise, we enqueue an event now... */
- if (md->md_eq != NULL && md->md_refcount == 0) {
- lnet_build_unlink_event(md, &ev);
- lnet_eq_enqueue_event(md->md_eq, &ev);
- }
-
- lnet_md_unlink(md);
-
- lnet_res_unlock(cpt);
- return 0;
-}
-EXPORT_SYMBOL(LNetMDUnlink);
diff --git a/drivers/staging/lustre/lnet/lnet/lib-me.c b/drivers/staging/lustre/lnet/lnet/lib-me.c
deleted file mode 100644
index 42fc99ef9f80..000000000000
--- a/drivers/staging/lustre/lnet/lnet/lib-me.c
+++ /dev/null
@@ -1,298 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * lnet/lnet/lib-me.c
- *
- * Match Entry management routines
- */
-
-#define DEBUG_SUBSYSTEM S_LNET
-
-#include "../../include/linux/lnet/lib-lnet.h"
-
-/**
- * Create and attach a match entry to the match list of \a portal. The new
- * ME is empty, i.e. not associated with a memory descriptor. LNetMDAttach()
- * can be used to attach a MD to an empty ME.
- *
- * \param portal The portal table index where the ME should be attached.
- * \param match_id Specifies the match criteria for the process ID of
- * the requester. The constants LNET_PID_ANY and LNET_NID_ANY can be
- * used to wildcard either of the identifiers in the lnet_process_id_t
- * structure.
- * \param match_bits,ignore_bits Specify the match criteria to apply
- * to the match bits in the incoming request. The ignore bits are used
- * to mask out insignificant bits in the incoming match bits. The resulting
- * bits are then compared to the ME's match bits to determine if the
- * incoming request meets the match criteria.
- * \param unlink Indicates whether the ME should be unlinked when the memory
- * descriptor associated with it is unlinked (Note that the check for
- * unlinking a ME only occurs when the memory descriptor is unlinked.).
- * Valid values are LNET_RETAIN and LNET_UNLINK.
- * \param pos Indicates whether the new ME should be prepended or
- * appended to the match list. Allowed constants: LNET_INS_BEFORE,
- * LNET_INS_AFTER.
- * \param handle On successful returns, a handle to the newly created ME
- * object is saved here. This handle can be used later in LNetMEInsert(),
- * LNetMEUnlink(), or LNetMDAttach() functions.
- *
- * \retval 0 On success.
- * \retval -EINVAL If \a portal is invalid.
- * \retval -ENOMEM If new ME object cannot be allocated.
- */
-int
-LNetMEAttach(unsigned int portal,
- lnet_process_id_t match_id,
- __u64 match_bits, __u64 ignore_bits,
- lnet_unlink_t unlink, lnet_ins_pos_t pos,
- lnet_handle_me_t *handle)
-{
- struct lnet_match_table *mtable;
- struct lnet_me *me;
- struct list_head *head;
-
- LASSERT(the_lnet.ln_init);
- LASSERT(the_lnet.ln_refcount > 0);
-
- if ((int)portal >= the_lnet.ln_nportals)
- return -EINVAL;
-
- mtable = lnet_mt_of_attach(portal, match_id,
- match_bits, ignore_bits, pos);
- if (mtable == NULL) /* can't match portal type */
- return -EPERM;
-
- me = lnet_me_alloc();
- if (me == NULL)
- return -ENOMEM;
-
- lnet_res_lock(mtable->mt_cpt);
-
- me->me_portal = portal;
- me->me_match_id = match_id;
- me->me_match_bits = match_bits;
- me->me_ignore_bits = ignore_bits;
- me->me_unlink = unlink;
- me->me_md = NULL;
-
- lnet_res_lh_initialize(the_lnet.ln_me_containers[mtable->mt_cpt],
- &me->me_lh);
- if (ignore_bits != 0)
- head = &mtable->mt_mhash[LNET_MT_HASH_IGNORE];
- else
- head = lnet_mt_match_head(mtable, match_id, match_bits);
-
- me->me_pos = head - &mtable->mt_mhash[0];
- if (pos == LNET_INS_AFTER || pos == LNET_INS_LOCAL)
- list_add_tail(&me->me_list, head);
- else
- list_add(&me->me_list, head);
-
- lnet_me2handle(handle, me);
-
- lnet_res_unlock(mtable->mt_cpt);
- return 0;
-}
-EXPORT_SYMBOL(LNetMEAttach);
-
-/**
- * Create and a match entry and insert it before or after the ME pointed to by
- * \a current_meh. The new ME is empty, i.e. not associated with a memory
- * descriptor. LNetMDAttach() can be used to attach a MD to an empty ME.
- *
- * This function is identical to LNetMEAttach() except for the position
- * where the new ME is inserted.
- *
- * \param current_meh A handle for a ME. The new ME will be inserted
- * immediately before or immediately after this ME.
- * \param match_id,match_bits,ignore_bits,unlink,pos,handle See the discussion
- * for LNetMEAttach().
- *
- * \retval 0 On success.
- * \retval -ENOMEM If new ME object cannot be allocated.
- * \retval -ENOENT If \a current_meh does not point to a valid match entry.
- */
-int
-LNetMEInsert(lnet_handle_me_t current_meh,
- lnet_process_id_t match_id,
- __u64 match_bits, __u64 ignore_bits,
- lnet_unlink_t unlink, lnet_ins_pos_t pos,
- lnet_handle_me_t *handle)
-{
- struct lnet_me *current_me;
- struct lnet_me *new_me;
- struct lnet_portal *ptl;
- int cpt;
-
- LASSERT(the_lnet.ln_init);
- LASSERT(the_lnet.ln_refcount > 0);
-
- if (pos == LNET_INS_LOCAL)
- return -EPERM;
-
- new_me = lnet_me_alloc();
- if (new_me == NULL)
- return -ENOMEM;
-
- cpt = lnet_cpt_of_cookie(current_meh.cookie);
-
- lnet_res_lock(cpt);
-
- current_me = lnet_handle2me(&current_meh);
- if (current_me == NULL) {
- lnet_me_free(new_me);
-
- lnet_res_unlock(cpt);
- return -ENOENT;
- }
-
- LASSERT(current_me->me_portal < the_lnet.ln_nportals);
-
- ptl = the_lnet.ln_portals[current_me->me_portal];
- if (lnet_ptl_is_unique(ptl)) {
- /* nosense to insertion on unique portal */
- lnet_me_free(new_me);
- lnet_res_unlock(cpt);
- return -EPERM;
- }
-
- new_me->me_pos = current_me->me_pos;
- new_me->me_portal = current_me->me_portal;
- new_me->me_match_id = match_id;
- new_me->me_match_bits = match_bits;
- new_me->me_ignore_bits = ignore_bits;
- new_me->me_unlink = unlink;
- new_me->me_md = NULL;
-
- lnet_res_lh_initialize(the_lnet.ln_me_containers[cpt], &new_me->me_lh);
-
- if (pos == LNET_INS_AFTER)
- list_add(&new_me->me_list, &current_me->me_list);
- else
- list_add_tail(&new_me->me_list, &current_me->me_list);
-
- lnet_me2handle(handle, new_me);
-
- lnet_res_unlock(cpt);
-
- return 0;
-}
-EXPORT_SYMBOL(LNetMEInsert);
-
-/**
- * Unlink a match entry from its match list.
- *
- * This operation also releases any resources associated with the ME. If a
- * memory descriptor is attached to the ME, then it will be unlinked as well
- * and an unlink event will be generated. It is an error to use the ME handle
- * after calling LNetMEUnlink().
- *
- * \param meh A handle for the ME to be unlinked.
- *
- * \retval 0 On success.
- * \retval -ENOENT If \a meh does not point to a valid ME.
- * \see LNetMDUnlink() for the discussion on delivering unlink event.
- */
-int
-LNetMEUnlink(lnet_handle_me_t meh)
-{
- lnet_me_t *me;
- lnet_libmd_t *md;
- lnet_event_t ev;
- int cpt;
-
- LASSERT(the_lnet.ln_init);
- LASSERT(the_lnet.ln_refcount > 0);
-
- cpt = lnet_cpt_of_cookie(meh.cookie);
- lnet_res_lock(cpt);
-
- me = lnet_handle2me(&meh);
- if (me == NULL) {
- lnet_res_unlock(cpt);
- return -ENOENT;
- }
-
- md = me->me_md;
- if (md != NULL) {
- md->md_flags |= LNET_MD_FLAG_ABORTED;
- if (md->md_eq != NULL && md->md_refcount == 0) {
- lnet_build_unlink_event(md, &ev);
- lnet_eq_enqueue_event(md->md_eq, &ev);
- }
- }
-
- lnet_me_unlink(me);
-
- lnet_res_unlock(cpt);
- return 0;
-}
-EXPORT_SYMBOL(LNetMEUnlink);
-
-/* call with lnet_res_lock please */
-void
-lnet_me_unlink(lnet_me_t *me)
-{
- list_del(&me->me_list);
-
- if (me->me_md != NULL) {
- lnet_libmd_t *md = me->me_md;
-
- /* detach MD from portal of this ME */
- lnet_ptl_detach_md(me, md);
- lnet_md_unlink(md);
- }
-
- lnet_res_lh_invalidate(&me->me_lh);
- lnet_me_free(me);
-}
-
-#if 0
-static void
-lib_me_dump(lnet_me_t *me)
-{
- CWARN("Match Entry %p (%#llx)\n", me,
- me->me_lh.lh_cookie);
-
- CWARN("\tMatch/Ignore\t= %016lx / %016lx\n",
- me->me_match_bits, me->me_ignore_bits);
-
- CWARN("\tMD\t= %p\n", me->md);
- CWARN("\tprev\t= %p\n",
- list_entry(me->me_list.prev, lnet_me_t, me_list));
- CWARN("\tnext\t= %p\n",
- list_entry(me->me_list.next, lnet_me_t, me_list));
-}
-#endif
diff --git a/drivers/staging/lustre/lnet/lnet/lib-move.c b/drivers/staging/lustre/lnet/lnet/lib-move.c
deleted file mode 100644
index 6badfec4c6a0..000000000000
--- a/drivers/staging/lustre/lnet/lnet/lib-move.c
+++ /dev/null
@@ -1,2436 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * lnet/lnet/lib-move.c
- *
- * Data movement routines
- */
-
-#define DEBUG_SUBSYSTEM S_LNET
-
-#include "../../include/linux/lnet/lib-lnet.h"
-
-static int local_nid_dist_zero = 1;
-module_param(local_nid_dist_zero, int, 0444);
-MODULE_PARM_DESC(local_nid_dist_zero, "Reserved");
-
-int
-lnet_fail_nid(lnet_nid_t nid, unsigned int threshold)
-{
- lnet_test_peer_t *tp;
- struct list_head *el;
- struct list_head *next;
- struct list_head cull;
-
- LASSERT(the_lnet.ln_init);
-
- /* NB: use lnet_net_lock(0) to serialize operations on test peers */
- if (threshold != 0) {
- /* Adding a new entry */
- LIBCFS_ALLOC(tp, sizeof(*tp));
- if (tp == NULL)
- return -ENOMEM;
-
- tp->tp_nid = nid;
- tp->tp_threshold = threshold;
-
- lnet_net_lock(0);
- list_add_tail(&tp->tp_list, &the_lnet.ln_test_peers);
- lnet_net_unlock(0);
- return 0;
- }
-
- /* removing entries */
- INIT_LIST_HEAD(&cull);
-
- lnet_net_lock(0);
-
- list_for_each_safe(el, next, &the_lnet.ln_test_peers) {
- tp = list_entry(el, lnet_test_peer_t, tp_list);
-
- if (tp->tp_threshold == 0 || /* needs culling anyway */
- nid == LNET_NID_ANY || /* removing all entries */
- tp->tp_nid == nid) { /* matched this one */
- list_del(&tp->tp_list);
- list_add(&tp->tp_list, &cull);
- }
- }
-
- lnet_net_unlock(0);
-
- while (!list_empty(&cull)) {
- tp = list_entry(cull.next, lnet_test_peer_t, tp_list);
-
- list_del(&tp->tp_list);
- LIBCFS_FREE(tp, sizeof(*tp));
- }
- return 0;
-}
-
-static int
-fail_peer(lnet_nid_t nid, int outgoing)
-{
- lnet_test_peer_t *tp;
- struct list_head *el;
- struct list_head *next;
- struct list_head cull;
- int fail = 0;
-
- INIT_LIST_HEAD(&cull);
-
- /* NB: use lnet_net_lock(0) to serialize operations on test peers */
- lnet_net_lock(0);
-
- list_for_each_safe(el, next, &the_lnet.ln_test_peers) {
- tp = list_entry(el, lnet_test_peer_t, tp_list);
-
- if (tp->tp_threshold == 0) {
- /* zombie entry */
- if (outgoing) {
- /* only cull zombies on outgoing tests,
- * since we may be at interrupt priority on
- * incoming messages. */
- list_del(&tp->tp_list);
- list_add(&tp->tp_list, &cull);
- }
- continue;
- }
-
- if (tp->tp_nid == LNET_NID_ANY || /* fail every peer */
- nid == tp->tp_nid) { /* fail this peer */
- fail = 1;
-
- if (tp->tp_threshold != LNET_MD_THRESH_INF) {
- tp->tp_threshold--;
- if (outgoing &&
- tp->tp_threshold == 0) {
- /* see above */
- list_del(&tp->tp_list);
- list_add(&tp->tp_list, &cull);
- }
- }
- break;
- }
- }
-
- lnet_net_unlock(0);
-
- while (!list_empty(&cull)) {
- tp = list_entry(cull.next, lnet_test_peer_t, tp_list);
- list_del(&tp->tp_list);
-
- LIBCFS_FREE(tp, sizeof(*tp));
- }
-
- return fail;
-}
-
-unsigned int
-lnet_iov_nob(unsigned int niov, struct kvec *iov)
-{
- unsigned int nob = 0;
-
- while (niov-- > 0)
- nob += (iov++)->iov_len;
-
- return nob;
-}
-EXPORT_SYMBOL(lnet_iov_nob);
-
-void
-lnet_copy_iov2iov(unsigned int ndiov, struct kvec *diov, unsigned int doffset,
- unsigned int nsiov, struct kvec *siov, unsigned int soffset,
- unsigned int nob)
-{
- /* NB diov, siov are READ-ONLY */
- unsigned int this_nob;
-
- if (nob == 0)
- return;
-
- /* skip complete frags before 'doffset' */
- LASSERT(ndiov > 0);
- while (doffset >= diov->iov_len) {
- doffset -= diov->iov_len;
- diov++;
- ndiov--;
- LASSERT(ndiov > 0);
- }
-
- /* skip complete frags before 'soffset' */
- LASSERT(nsiov > 0);
- while (soffset >= siov->iov_len) {
- soffset -= siov->iov_len;
- siov++;
- nsiov--;
- LASSERT(nsiov > 0);
- }
-
- do {
- LASSERT(ndiov > 0);
- LASSERT(nsiov > 0);
- this_nob = min(diov->iov_len - doffset,
- siov->iov_len - soffset);
- this_nob = min(this_nob, nob);
-
- memcpy((char *)diov->iov_base + doffset,
- (char *)siov->iov_base + soffset, this_nob);
- nob -= this_nob;
-
- if (diov->iov_len > doffset + this_nob) {
- doffset += this_nob;
- } else {
- diov++;
- ndiov--;
- doffset = 0;
- }
-
- if (siov->iov_len > soffset + this_nob) {
- soffset += this_nob;
- } else {
- siov++;
- nsiov--;
- soffset = 0;
- }
- } while (nob > 0);
-}
-EXPORT_SYMBOL(lnet_copy_iov2iov);
-
-int
-lnet_extract_iov(int dst_niov, struct kvec *dst,
- int src_niov, struct kvec *src,
- unsigned int offset, unsigned int len)
-{
- /* Initialise 'dst' to the subset of 'src' starting at 'offset',
- * for exactly 'len' bytes, and return the number of entries.
- * NB not destructive to 'src' */
- unsigned int frag_len;
- unsigned int niov;
-
- if (len == 0) /* no data => */
- return 0; /* no frags */
-
- LASSERT(src_niov > 0);
- while (offset >= src->iov_len) { /* skip initial frags */
- offset -= src->iov_len;
- src_niov--;
- src++;
- LASSERT(src_niov > 0);
- }
-
- niov = 1;
- for (;;) {
- LASSERT(src_niov > 0);
- LASSERT((int)niov <= dst_niov);
-
- frag_len = src->iov_len - offset;
- dst->iov_base = ((char *)src->iov_base) + offset;
-
- if (len <= frag_len) {
- dst->iov_len = len;
- return niov;
- }
-
- dst->iov_len = frag_len;
-
- len -= frag_len;
- dst++;
- src++;
- niov++;
- src_niov--;
- offset = 0;
- }
-}
-EXPORT_SYMBOL(lnet_extract_iov);
-
-
-unsigned int
-lnet_kiov_nob(unsigned int niov, lnet_kiov_t *kiov)
-{
- unsigned int nob = 0;
-
- while (niov-- > 0)
- nob += (kiov++)->kiov_len;
-
- return nob;
-}
-EXPORT_SYMBOL(lnet_kiov_nob);
-
-void
-lnet_copy_kiov2kiov(unsigned int ndiov, lnet_kiov_t *diov, unsigned int doffset,
- unsigned int nsiov, lnet_kiov_t *siov, unsigned int soffset,
- unsigned int nob)
-{
- /* NB diov, siov are READ-ONLY */
- unsigned int this_nob;
- char *daddr = NULL;
- char *saddr = NULL;
-
- if (nob == 0)
- return;
-
- LASSERT(!in_interrupt());
-
- LASSERT(ndiov > 0);
- while (doffset >= diov->kiov_len) {
- doffset -= diov->kiov_len;
- diov++;
- ndiov--;
- LASSERT(ndiov > 0);
- }
-
- LASSERT(nsiov > 0);
- while (soffset >= siov->kiov_len) {
- soffset -= siov->kiov_len;
- siov++;
- nsiov--;
- LASSERT(nsiov > 0);
- }
-
- do {
- LASSERT(ndiov > 0);
- LASSERT(nsiov > 0);
- this_nob = min(diov->kiov_len - doffset,
- siov->kiov_len - soffset);
- this_nob = min(this_nob, nob);
-
- if (daddr == NULL)
- daddr = ((char *)kmap(diov->kiov_page)) +
- diov->kiov_offset + doffset;
- if (saddr == NULL)
- saddr = ((char *)kmap(siov->kiov_page)) +
- siov->kiov_offset + soffset;
-
- /* Vanishing risk of kmap deadlock when mapping 2 pages.
- * However in practice at least one of the kiovs will be mapped
- * kernel pages and the map/unmap will be NOOPs */
-
- memcpy(daddr, saddr, this_nob);
- nob -= this_nob;
-
- if (diov->kiov_len > doffset + this_nob) {
- daddr += this_nob;
- doffset += this_nob;
- } else {
- kunmap(diov->kiov_page);
- daddr = NULL;
- diov++;
- ndiov--;
- doffset = 0;
- }
-
- if (siov->kiov_len > soffset + this_nob) {
- saddr += this_nob;
- soffset += this_nob;
- } else {
- kunmap(siov->kiov_page);
- saddr = NULL;
- siov++;
- nsiov--;
- soffset = 0;
- }
- } while (nob > 0);
-
- if (daddr != NULL)
- kunmap(diov->kiov_page);
- if (saddr != NULL)
- kunmap(siov->kiov_page);
-}
-EXPORT_SYMBOL(lnet_copy_kiov2kiov);
-
-void
-lnet_copy_kiov2iov(unsigned int niov, struct kvec *iov, unsigned int iovoffset,
- unsigned int nkiov, lnet_kiov_t *kiov,
- unsigned int kiovoffset, unsigned int nob)
-{
- /* NB iov, kiov are READ-ONLY */
- unsigned int this_nob;
- char *addr = NULL;
-
- if (nob == 0)
- return;
-
- LASSERT(!in_interrupt());
-
- LASSERT(niov > 0);
- while (iovoffset >= iov->iov_len) {
- iovoffset -= iov->iov_len;
- iov++;
- niov--;
- LASSERT(niov > 0);
- }
-
- LASSERT(nkiov > 0);
- while (kiovoffset >= kiov->kiov_len) {
- kiovoffset -= kiov->kiov_len;
- kiov++;
- nkiov--;
- LASSERT(nkiov > 0);
- }
-
- do {
- LASSERT(niov > 0);
- LASSERT(nkiov > 0);
- this_nob = min(iov->iov_len - iovoffset,
- (__kernel_size_t) kiov->kiov_len - kiovoffset);
- this_nob = min(this_nob, nob);
-
- if (addr == NULL)
- addr = ((char *)kmap(kiov->kiov_page)) +
- kiov->kiov_offset + kiovoffset;
-
- memcpy((char *)iov->iov_base + iovoffset, addr, this_nob);
- nob -= this_nob;
-
- if (iov->iov_len > iovoffset + this_nob) {
- iovoffset += this_nob;
- } else {
- iov++;
- niov--;
- iovoffset = 0;
- }
-
- if (kiov->kiov_len > kiovoffset + this_nob) {
- addr += this_nob;
- kiovoffset += this_nob;
- } else {
- kunmap(kiov->kiov_page);
- addr = NULL;
- kiov++;
- nkiov--;
- kiovoffset = 0;
- }
-
- } while (nob > 0);
-
- if (addr != NULL)
- kunmap(kiov->kiov_page);
-}
-EXPORT_SYMBOL(lnet_copy_kiov2iov);
-
-void
-lnet_copy_iov2kiov(unsigned int nkiov, lnet_kiov_t *kiov,
- unsigned int kiovoffset, unsigned int niov,
- struct kvec *iov, unsigned int iovoffset,
- unsigned int nob)
-{
- /* NB kiov, iov are READ-ONLY */
- unsigned int this_nob;
- char *addr = NULL;
-
- if (nob == 0)
- return;
-
- LASSERT(!in_interrupt());
-
- LASSERT(nkiov > 0);
- while (kiovoffset >= kiov->kiov_len) {
- kiovoffset -= kiov->kiov_len;
- kiov++;
- nkiov--;
- LASSERT(nkiov > 0);
- }
-
- LASSERT(niov > 0);
- while (iovoffset >= iov->iov_len) {
- iovoffset -= iov->iov_len;
- iov++;
- niov--;
- LASSERT(niov > 0);
- }
-
- do {
- LASSERT(nkiov > 0);
- LASSERT(niov > 0);
- this_nob = min((__kernel_size_t) kiov->kiov_len - kiovoffset,
- iov->iov_len - iovoffset);
- this_nob = min(this_nob, nob);
-
- if (addr == NULL)
- addr = ((char *)kmap(kiov->kiov_page)) +
- kiov->kiov_offset + kiovoffset;
-
- memcpy(addr, (char *)iov->iov_base + iovoffset, this_nob);
- nob -= this_nob;
-
- if (kiov->kiov_len > kiovoffset + this_nob) {
- addr += this_nob;
- kiovoffset += this_nob;
- } else {
- kunmap(kiov->kiov_page);
- addr = NULL;
- kiov++;
- nkiov--;
- kiovoffset = 0;
- }
-
- if (iov->iov_len > iovoffset + this_nob) {
- iovoffset += this_nob;
- } else {
- iov++;
- niov--;
- iovoffset = 0;
- }
- } while (nob > 0);
-
- if (addr != NULL)
- kunmap(kiov->kiov_page);
-}
-EXPORT_SYMBOL(lnet_copy_iov2kiov);
-
-int
-lnet_extract_kiov(int dst_niov, lnet_kiov_t *dst,
- int src_niov, lnet_kiov_t *src,
- unsigned int offset, unsigned int len)
-{
- /* Initialise 'dst' to the subset of 'src' starting at 'offset',
- * for exactly 'len' bytes, and return the number of entries.
- * NB not destructive to 'src' */
- unsigned int frag_len;
- unsigned int niov;
-
- if (len == 0) /* no data => */
- return 0; /* no frags */
-
- LASSERT(src_niov > 0);
- while (offset >= src->kiov_len) { /* skip initial frags */
- offset -= src->kiov_len;
- src_niov--;
- src++;
- LASSERT(src_niov > 0);
- }
-
- niov = 1;
- for (;;) {
- LASSERT(src_niov > 0);
- LASSERT((int)niov <= dst_niov);
-
- frag_len = src->kiov_len - offset;
- dst->kiov_page = src->kiov_page;
- dst->kiov_offset = src->kiov_offset + offset;
-
- if (len <= frag_len) {
- dst->kiov_len = len;
- LASSERT(dst->kiov_offset + dst->kiov_len
- <= PAGE_CACHE_SIZE);
- return niov;
- }
-
- dst->kiov_len = frag_len;
- LASSERT(dst->kiov_offset + dst->kiov_len <= PAGE_CACHE_SIZE);
-
- len -= frag_len;
- dst++;
- src++;
- niov++;
- src_niov--;
- offset = 0;
- }
-}
-EXPORT_SYMBOL(lnet_extract_kiov);
-
-static void
-lnet_ni_recv(lnet_ni_t *ni, void *private, lnet_msg_t *msg, int delayed,
- unsigned int offset, unsigned int mlen, unsigned int rlen)
-{
- unsigned int niov = 0;
- struct kvec *iov = NULL;
- lnet_kiov_t *kiov = NULL;
- int rc;
-
- LASSERT(!in_interrupt());
- LASSERT(mlen == 0 || msg != NULL);
-
- if (msg != NULL) {
- LASSERT(msg->msg_receiving);
- LASSERT(!msg->msg_sending);
- LASSERT(rlen == msg->msg_len);
- LASSERT(mlen <= msg->msg_len);
- LASSERT(msg->msg_offset == offset);
- LASSERT(msg->msg_wanted == mlen);
-
- msg->msg_receiving = 0;
-
- if (mlen != 0) {
- niov = msg->msg_niov;
- iov = msg->msg_iov;
- kiov = msg->msg_kiov;
-
- LASSERT(niov > 0);
- LASSERT((iov == NULL) != (kiov == NULL));
- }
- }
-
- rc = (ni->ni_lnd->lnd_recv)(ni, private, msg, delayed,
- niov, iov, kiov, offset, mlen, rlen);
- if (rc < 0)
- lnet_finalize(ni, msg, rc);
-}
-
-static void
-lnet_setpayloadbuffer(lnet_msg_t *msg)
-{
- lnet_libmd_t *md = msg->msg_md;
-
- LASSERT(msg->msg_len > 0);
- LASSERT(!msg->msg_routing);
- LASSERT(md != NULL);
- LASSERT(msg->msg_niov == 0);
- LASSERT(msg->msg_iov == NULL);
- LASSERT(msg->msg_kiov == NULL);
-
- msg->msg_niov = md->md_niov;
- if ((md->md_options & LNET_MD_KIOV) != 0)
- msg->msg_kiov = md->md_iov.kiov;
- else
- msg->msg_iov = md->md_iov.iov;
-}
-
-void
-lnet_prep_send(lnet_msg_t *msg, int type, lnet_process_id_t target,
- unsigned int offset, unsigned int len)
-{
- msg->msg_type = type;
- msg->msg_target = target;
- msg->msg_len = len;
- msg->msg_offset = offset;
-
- if (len != 0)
- lnet_setpayloadbuffer(msg);
-
- memset(&msg->msg_hdr, 0, sizeof(msg->msg_hdr));
- msg->msg_hdr.type = cpu_to_le32(type);
- msg->msg_hdr.dest_nid = cpu_to_le64(target.nid);
- msg->msg_hdr.dest_pid = cpu_to_le32(target.pid);
- /* src_nid will be set later */
- msg->msg_hdr.src_pid = cpu_to_le32(the_lnet.ln_pid);
- msg->msg_hdr.payload_length = cpu_to_le32(len);
-}
-
-static void
-lnet_ni_send(lnet_ni_t *ni, lnet_msg_t *msg)
-{
- void *priv = msg->msg_private;
- int rc;
-
- LASSERT(!in_interrupt());
- LASSERT(LNET_NETTYP(LNET_NIDNET(ni->ni_nid)) == LOLND ||
- (msg->msg_txcredit && msg->msg_peertxcredit));
-
- rc = (ni->ni_lnd->lnd_send)(ni, priv, msg);
- if (rc < 0)
- lnet_finalize(ni, msg, rc);
-}
-
-static int
-lnet_ni_eager_recv(lnet_ni_t *ni, lnet_msg_t *msg)
-{
- int rc;
-
- LASSERT(!msg->msg_sending);
- LASSERT(msg->msg_receiving);
- LASSERT(!msg->msg_rx_ready_delay);
- LASSERT(ni->ni_lnd->lnd_eager_recv != NULL);
-
- msg->msg_rx_ready_delay = 1;
- rc = (ni->ni_lnd->lnd_eager_recv)(ni, msg->msg_private, msg,
- &msg->msg_private);
- if (rc != 0) {
- CERROR("recv from %s / send to %s aborted: eager_recv failed %d\n",
- libcfs_nid2str(msg->msg_rxpeer->lp_nid),
- libcfs_id2str(msg->msg_target), rc);
- LASSERT(rc < 0); /* required by my callers */
- }
-
- return rc;
-}
-
-/* NB: caller shall hold a ref on 'lp' as I'd drop lnet_net_lock */
-static void
-lnet_ni_query_locked(lnet_ni_t *ni, lnet_peer_t *lp)
-{
- unsigned long last_alive = 0;
-
- LASSERT(lnet_peer_aliveness_enabled(lp));
- LASSERT(ni->ni_lnd->lnd_query != NULL);
-
- lnet_net_unlock(lp->lp_cpt);
- (ni->ni_lnd->lnd_query)(ni, lp->lp_nid, &last_alive);
- lnet_net_lock(lp->lp_cpt);
-
- lp->lp_last_query = cfs_time_current();
-
- if (last_alive != 0) /* NI has updated timestamp */
- lp->lp_last_alive = last_alive;
-}
-
-/* NB: always called with lnet_net_lock held */
-static inline int
-lnet_peer_is_alive(lnet_peer_t *lp, unsigned long now)
-{
- int alive;
- unsigned long deadline;
-
- LASSERT(lnet_peer_aliveness_enabled(lp));
-
- /* Trust lnet_notify() if it has more recent aliveness news, but
- * ignore the initial assumed death (see lnet_peers_start_down()).
- */
- if (!lp->lp_alive && lp->lp_alive_count > 0 &&
- cfs_time_aftereq(lp->lp_timestamp, lp->lp_last_alive))
- return 0;
-
- deadline = cfs_time_add(lp->lp_last_alive,
- cfs_time_seconds(lp->lp_ni->ni_peertimeout));
- alive = cfs_time_after(deadline, now);
-
- /* Update obsolete lp_alive except for routers assumed to be dead
- * initially, because router checker would update aliveness in this
- * case, and moreover lp_last_alive at peer creation is assumed.
- */
- if (alive && !lp->lp_alive &&
- !(lnet_isrouter(lp) && lp->lp_alive_count == 0))
- lnet_notify_locked(lp, 0, 1, lp->lp_last_alive);
-
- return alive;
-}
-
-
-/* NB: returns 1 when alive, 0 when dead, negative when error;
- * may drop the lnet_net_lock */
-static int
-lnet_peer_alive_locked(lnet_peer_t *lp)
-{
- unsigned long now = cfs_time_current();
-
- if (!lnet_peer_aliveness_enabled(lp))
- return -ENODEV;
-
- if (lnet_peer_is_alive(lp, now))
- return 1;
-
- /* Peer appears dead, but we should avoid frequent NI queries (at
- * most once per lnet_queryinterval seconds). */
- if (lp->lp_last_query != 0) {
- static const int lnet_queryinterval = 1;
-
- unsigned long next_query =
- cfs_time_add(lp->lp_last_query,
- cfs_time_seconds(lnet_queryinterval));
-
- if (time_before(now, next_query)) {
- if (lp->lp_alive)
- CWARN("Unexpected aliveness of peer %s: %d < %d (%d/%d)\n",
- libcfs_nid2str(lp->lp_nid),
- (int)now, (int)next_query,
- lnet_queryinterval,
- lp->lp_ni->ni_peertimeout);
- return 0;
- }
- }
-
- /* query NI for latest aliveness news */
- lnet_ni_query_locked(lp->lp_ni, lp);
-
- if (lnet_peer_is_alive(lp, now))
- return 1;
-
- lnet_notify_locked(lp, 0, 0, lp->lp_last_alive);
- return 0;
-}
-
-/**
- * \param msg The message to be sent.
- * \param do_send True if lnet_ni_send() should be called in this function.
- * lnet_send() is going to lnet_net_unlock immediately after this, so
- * it sets do_send FALSE and I don't do the unlock/send/lock bit.
- *
- * \retval 0 If \a msg sent or OK to send.
- * \retval EAGAIN If \a msg blocked for credit.
- * \retval EHOSTUNREACH If the next hop of the message appears dead.
- * \retval ECANCELED If the MD of the message has been unlinked.
- */
-static int
-lnet_post_send_locked(lnet_msg_t *msg, int do_send)
-{
- lnet_peer_t *lp = msg->msg_txpeer;
- lnet_ni_t *ni = lp->lp_ni;
- int cpt = msg->msg_tx_cpt;
- struct lnet_tx_queue *tq = ni->ni_tx_queues[cpt];
-
- /* non-lnet_send() callers have checked before */
- LASSERT(!do_send || msg->msg_tx_delayed);
- LASSERT(!msg->msg_receiving);
- LASSERT(msg->msg_tx_committed);
-
- /* NB 'lp' is always the next hop */
- if ((msg->msg_target.pid & LNET_PID_USERFLAG) == 0 &&
- lnet_peer_alive_locked(lp) == 0) {
- the_lnet.ln_counters[cpt]->drop_count++;
- the_lnet.ln_counters[cpt]->drop_length += msg->msg_len;
- lnet_net_unlock(cpt);
-
- CNETERR("Dropping message for %s: peer not alive\n",
- libcfs_id2str(msg->msg_target));
- if (do_send)
- lnet_finalize(ni, msg, -EHOSTUNREACH);
-
- lnet_net_lock(cpt);
- return EHOSTUNREACH;
- }
-
- if (msg->msg_md != NULL &&
- (msg->msg_md->md_flags & LNET_MD_FLAG_ABORTED) != 0) {
- lnet_net_unlock(cpt);
-
- CNETERR("Aborting message for %s: LNetM[DE]Unlink() already called on the MD/ME.\n",
- libcfs_id2str(msg->msg_target));
- if (do_send)
- lnet_finalize(ni, msg, -ECANCELED);
-
- lnet_net_lock(cpt);
- return ECANCELED;
- }
-
- if (!msg->msg_peertxcredit) {
- LASSERT((lp->lp_txcredits < 0) ==
- !list_empty(&lp->lp_txq));
-
- msg->msg_peertxcredit = 1;
- lp->lp_txqnob += msg->msg_len + sizeof(lnet_hdr_t);
- lp->lp_txcredits--;
-
- if (lp->lp_txcredits < lp->lp_mintxcredits)
- lp->lp_mintxcredits = lp->lp_txcredits;
-
- if (lp->lp_txcredits < 0) {
- msg->msg_tx_delayed = 1;
- list_add_tail(&msg->msg_list, &lp->lp_txq);
- return EAGAIN;
- }
- }
-
- if (!msg->msg_txcredit) {
- LASSERT((tq->tq_credits < 0) ==
- !list_empty(&tq->tq_delayed));
-
- msg->msg_txcredit = 1;
- tq->tq_credits--;
-
- if (tq->tq_credits < tq->tq_credits_min)
- tq->tq_credits_min = tq->tq_credits;
-
- if (tq->tq_credits < 0) {
- msg->msg_tx_delayed = 1;
- list_add_tail(&msg->msg_list, &tq->tq_delayed);
- return EAGAIN;
- }
- }
-
- if (do_send) {
- lnet_net_unlock(cpt);
- lnet_ni_send(ni, msg);
- lnet_net_lock(cpt);
- }
- return 0;
-}
-
-
-static lnet_rtrbufpool_t *
-lnet_msg2bufpool(lnet_msg_t *msg)
-{
- lnet_rtrbufpool_t *rbp;
- int cpt;
-
- LASSERT(msg->msg_rx_committed);
-
- cpt = msg->msg_rx_cpt;
- rbp = &the_lnet.ln_rtrpools[cpt][0];
-
- LASSERT(msg->msg_len <= LNET_MTU);
- while (msg->msg_len > (unsigned int)rbp->rbp_npages * PAGE_CACHE_SIZE) {
- rbp++;
- LASSERT(rbp < &the_lnet.ln_rtrpools[cpt][LNET_NRBPOOLS]);
- }
-
- return rbp;
-}
-
-static int
-lnet_post_routed_recv_locked(lnet_msg_t *msg, int do_recv)
-{
- /* lnet_parse is going to lnet_net_unlock immediately after this, so it
- * sets do_recv FALSE and I don't do the unlock/send/lock bit. I
- * return EAGAIN if msg blocked and 0 if received or OK to receive */
- lnet_peer_t *lp = msg->msg_rxpeer;
- lnet_rtrbufpool_t *rbp;
- lnet_rtrbuf_t *rb;
-
- LASSERT(msg->msg_iov == NULL);
- LASSERT(msg->msg_kiov == NULL);
- LASSERT(msg->msg_niov == 0);
- LASSERT(msg->msg_routing);
- LASSERT(msg->msg_receiving);
- LASSERT(!msg->msg_sending);
-
- /* non-lnet_parse callers only receive delayed messages */
- LASSERT(!do_recv || msg->msg_rx_delayed);
-
- if (!msg->msg_peerrtrcredit) {
- LASSERT((lp->lp_rtrcredits < 0) ==
- !list_empty(&lp->lp_rtrq));
-
- msg->msg_peerrtrcredit = 1;
- lp->lp_rtrcredits--;
- if (lp->lp_rtrcredits < lp->lp_minrtrcredits)
- lp->lp_minrtrcredits = lp->lp_rtrcredits;
-
- if (lp->lp_rtrcredits < 0) {
- /* must have checked eager_recv before here */
- LASSERT(msg->msg_rx_ready_delay);
- msg->msg_rx_delayed = 1;
- list_add_tail(&msg->msg_list, &lp->lp_rtrq);
- return EAGAIN;
- }
- }
-
- rbp = lnet_msg2bufpool(msg);
-
- if (!msg->msg_rtrcredit) {
- LASSERT((rbp->rbp_credits < 0) ==
- !list_empty(&rbp->rbp_msgs));
-
- msg->msg_rtrcredit = 1;
- rbp->rbp_credits--;
- if (rbp->rbp_credits < rbp->rbp_mincredits)
- rbp->rbp_mincredits = rbp->rbp_credits;
-
- if (rbp->rbp_credits < 0) {
- /* must have checked eager_recv before here */
- LASSERT(msg->msg_rx_ready_delay);
- msg->msg_rx_delayed = 1;
- list_add_tail(&msg->msg_list, &rbp->rbp_msgs);
- return EAGAIN;
- }
- }
-
- LASSERT(!list_empty(&rbp->rbp_bufs));
- rb = list_entry(rbp->rbp_bufs.next, lnet_rtrbuf_t, rb_list);
- list_del(&rb->rb_list);
-
- msg->msg_niov = rbp->rbp_npages;
- msg->msg_kiov = &rb->rb_kiov[0];
-
- if (do_recv) {
- int cpt = msg->msg_rx_cpt;
-
- lnet_net_unlock(cpt);
- lnet_ni_recv(lp->lp_ni, msg->msg_private, msg, 1,
- 0, msg->msg_len, msg->msg_len);
- lnet_net_lock(cpt);
- }
- return 0;
-}
-
-void
-lnet_return_tx_credits_locked(lnet_msg_t *msg)
-{
- lnet_peer_t *txpeer = msg->msg_txpeer;
- lnet_msg_t *msg2;
-
- if (msg->msg_txcredit) {
- struct lnet_ni *ni = txpeer->lp_ni;
- struct lnet_tx_queue *tq = ni->ni_tx_queues[msg->msg_tx_cpt];
-
- /* give back NI txcredits */
- msg->msg_txcredit = 0;
-
- LASSERT((tq->tq_credits < 0) ==
- !list_empty(&tq->tq_delayed));
-
- tq->tq_credits++;
- if (tq->tq_credits <= 0) {
- msg2 = list_entry(tq->tq_delayed.next,
- lnet_msg_t, msg_list);
- list_del(&msg2->msg_list);
-
- LASSERT(msg2->msg_txpeer->lp_ni == ni);
- LASSERT(msg2->msg_tx_delayed);
-
- (void) lnet_post_send_locked(msg2, 1);
- }
- }
-
- if (msg->msg_peertxcredit) {
- /* give back peer txcredits */
- msg->msg_peertxcredit = 0;
-
- LASSERT((txpeer->lp_txcredits < 0) ==
- !list_empty(&txpeer->lp_txq));
-
- txpeer->lp_txqnob -= msg->msg_len + sizeof(lnet_hdr_t);
- LASSERT(txpeer->lp_txqnob >= 0);
-
- txpeer->lp_txcredits++;
- if (txpeer->lp_txcredits <= 0) {
- msg2 = list_entry(txpeer->lp_txq.next,
- lnet_msg_t, msg_list);
- list_del(&msg2->msg_list);
-
- LASSERT(msg2->msg_txpeer == txpeer);
- LASSERT(msg2->msg_tx_delayed);
-
- (void) lnet_post_send_locked(msg2, 1);
- }
- }
-
- if (txpeer != NULL) {
- msg->msg_txpeer = NULL;
- lnet_peer_decref_locked(txpeer);
- }
-}
-
-void
-lnet_return_rx_credits_locked(lnet_msg_t *msg)
-{
- lnet_peer_t *rxpeer = msg->msg_rxpeer;
- lnet_msg_t *msg2;
-
- if (msg->msg_rtrcredit) {
- /* give back global router credits */
- lnet_rtrbuf_t *rb;
- lnet_rtrbufpool_t *rbp;
-
- /* NB If a msg ever blocks for a buffer in rbp_msgs, it stays
- * there until it gets one allocated, or aborts the wait
- * itself */
- LASSERT(msg->msg_kiov != NULL);
-
- rb = list_entry(msg->msg_kiov, lnet_rtrbuf_t, rb_kiov[0]);
- rbp = rb->rb_pool;
- LASSERT(rbp == lnet_msg2bufpool(msg));
-
- msg->msg_kiov = NULL;
- msg->msg_rtrcredit = 0;
-
- LASSERT((rbp->rbp_credits < 0) ==
- !list_empty(&rbp->rbp_msgs));
- LASSERT((rbp->rbp_credits > 0) ==
- !list_empty(&rbp->rbp_bufs));
-
- list_add(&rb->rb_list, &rbp->rbp_bufs);
- rbp->rbp_credits++;
- if (rbp->rbp_credits <= 0) {
- msg2 = list_entry(rbp->rbp_msgs.next,
- lnet_msg_t, msg_list);
- list_del(&msg2->msg_list);
-
- (void) lnet_post_routed_recv_locked(msg2, 1);
- }
- }
-
- if (msg->msg_peerrtrcredit) {
- /* give back peer router credits */
- msg->msg_peerrtrcredit = 0;
-
- LASSERT((rxpeer->lp_rtrcredits < 0) ==
- !list_empty(&rxpeer->lp_rtrq));
-
- rxpeer->lp_rtrcredits++;
- if (rxpeer->lp_rtrcredits <= 0) {
- msg2 = list_entry(rxpeer->lp_rtrq.next,
- lnet_msg_t, msg_list);
- list_del(&msg2->msg_list);
-
- (void) lnet_post_routed_recv_locked(msg2, 1);
- }
- }
- if (rxpeer != NULL) {
- msg->msg_rxpeer = NULL;
- lnet_peer_decref_locked(rxpeer);
- }
-}
-
-static int
-lnet_compare_routes(lnet_route_t *r1, lnet_route_t *r2)
-{
- lnet_peer_t *p1 = r1->lr_gateway;
- lnet_peer_t *p2 = r2->lr_gateway;
-
- if (r1->lr_priority < r2->lr_priority)
- return 1;
-
- if (r1->lr_priority > r2->lr_priority)
- return -1;
-
- if (r1->lr_hops < r2->lr_hops)
- return 1;
-
- if (r1->lr_hops > r2->lr_hops)
- return -1;
-
- if (p1->lp_txqnob < p2->lp_txqnob)
- return 1;
-
- if (p1->lp_txqnob > p2->lp_txqnob)
- return -1;
-
- if (p1->lp_txcredits > p2->lp_txcredits)
- return 1;
-
- if (p1->lp_txcredits < p2->lp_txcredits)
- return -1;
-
- if (r1->lr_seq - r2->lr_seq <= 0)
- return 1;
-
- return -1;
-}
-
-static lnet_peer_t *
-lnet_find_route_locked(lnet_ni_t *ni, lnet_nid_t target, lnet_nid_t rtr_nid)
-{
- lnet_remotenet_t *rnet;
- lnet_route_t *rtr;
- lnet_route_t *rtr_best;
- lnet_route_t *rtr_last;
- struct lnet_peer *lp_best;
- struct lnet_peer *lp;
- int rc;
-
- /* If @rtr_nid is not LNET_NID_ANY, return the gateway with
- * rtr_nid nid, otherwise find the best gateway I can use */
-
- rnet = lnet_find_net_locked(LNET_NIDNET(target));
- if (rnet == NULL)
- return NULL;
-
- lp_best = NULL;
- rtr_best = rtr_last = NULL;
- list_for_each_entry(rtr, &rnet->lrn_routes, lr_list) {
- lp = rtr->lr_gateway;
-
- if (!lp->lp_alive || /* gateway is down */
- ((lp->lp_ping_feats & LNET_PING_FEAT_NI_STATUS) != 0 &&
- rtr->lr_downis != 0)) /* NI to target is down */
- continue;
-
- if (ni != NULL && lp->lp_ni != ni)
- continue;
-
- if (lp->lp_nid == rtr_nid) /* it's pre-determined router */
- return lp;
-
- if (lp_best == NULL) {
- rtr_best = rtr_last = rtr;
- lp_best = lp;
- continue;
- }
-
- /* no protection on below fields, but it's harmless */
- if (rtr_last->lr_seq - rtr->lr_seq < 0)
- rtr_last = rtr;
-
- rc = lnet_compare_routes(rtr, rtr_best);
- if (rc < 0)
- continue;
-
- rtr_best = rtr;
- lp_best = lp;
- }
-
- /* set sequence number on the best router to the latest sequence + 1
- * so we can round-robin all routers, it's race and inaccurate but
- * harmless and functional */
- if (rtr_best != NULL)
- rtr_best->lr_seq = rtr_last->lr_seq + 1;
- return lp_best;
-}
-
-int
-lnet_send(lnet_nid_t src_nid, lnet_msg_t *msg, lnet_nid_t rtr_nid)
-{
- lnet_nid_t dst_nid = msg->msg_target.nid;
- struct lnet_ni *src_ni;
- struct lnet_ni *local_ni;
- struct lnet_peer *lp;
- int cpt;
- int cpt2;
- int rc;
-
- /* NB: rtr_nid is set to LNET_NID_ANY for all current use-cases,
- * but we might want to use pre-determined router for ACK/REPLY
- * in the future */
- /* NB: ni != NULL == interface pre-determined (ACK/REPLY) */
- LASSERT(msg->msg_txpeer == NULL);
- LASSERT(!msg->msg_sending);
- LASSERT(!msg->msg_target_is_router);
- LASSERT(!msg->msg_receiving);
-
- msg->msg_sending = 1;
-
- LASSERT(!msg->msg_tx_committed);
- cpt = lnet_cpt_of_nid(rtr_nid == LNET_NID_ANY ? dst_nid : rtr_nid);
- again:
- lnet_net_lock(cpt);
-
- if (the_lnet.ln_shutdown) {
- lnet_net_unlock(cpt);
- return -ESHUTDOWN;
- }
-
- if (src_nid == LNET_NID_ANY) {
- src_ni = NULL;
- } else {
- src_ni = lnet_nid2ni_locked(src_nid, cpt);
- if (src_ni == NULL) {
- lnet_net_unlock(cpt);
- LCONSOLE_WARN("Can't send to %s: src %s is not a local nid\n",
- libcfs_nid2str(dst_nid),
- libcfs_nid2str(src_nid));
- return -EINVAL;
- }
- LASSERT(!msg->msg_routing);
- }
-
- /* Is this for someone on a local network? */
- local_ni = lnet_net2ni_locked(LNET_NIDNET(dst_nid), cpt);
-
- if (local_ni != NULL) {
- if (src_ni == NULL) {
- src_ni = local_ni;
- src_nid = src_ni->ni_nid;
- } else if (src_ni == local_ni) {
- lnet_ni_decref_locked(local_ni, cpt);
- } else {
- lnet_ni_decref_locked(local_ni, cpt);
- lnet_ni_decref_locked(src_ni, cpt);
- lnet_net_unlock(cpt);
- LCONSOLE_WARN("No route to %s via from %s\n",
- libcfs_nid2str(dst_nid),
- libcfs_nid2str(src_nid));
- return -EINVAL;
- }
-
- LASSERT(src_nid != LNET_NID_ANY);
- lnet_msg_commit(msg, cpt);
-
- if (!msg->msg_routing)
- msg->msg_hdr.src_nid = cpu_to_le64(src_nid);
-
- if (src_ni == the_lnet.ln_loni) {
- /* No send credit hassles with LOLND */
- lnet_net_unlock(cpt);
- lnet_ni_send(src_ni, msg);
-
- lnet_net_lock(cpt);
- lnet_ni_decref_locked(src_ni, cpt);
- lnet_net_unlock(cpt);
- return 0;
- }
-
- rc = lnet_nid2peer_locked(&lp, dst_nid, cpt);
- /* lp has ref on src_ni; lose mine */
- lnet_ni_decref_locked(src_ni, cpt);
- if (rc != 0) {
- lnet_net_unlock(cpt);
- LCONSOLE_WARN("Error %d finding peer %s\n", rc,
- libcfs_nid2str(dst_nid));
- /* ENOMEM or shutting down */
- return rc;
- }
- LASSERT(lp->lp_ni == src_ni);
- } else {
- /* sending to a remote network */
- lp = lnet_find_route_locked(src_ni, dst_nid, rtr_nid);
- if (lp == NULL) {
- if (src_ni != NULL)
- lnet_ni_decref_locked(src_ni, cpt);
- lnet_net_unlock(cpt);
-
- LCONSOLE_WARN("No route to %s via %s (all routers down)\n",
- libcfs_id2str(msg->msg_target),
- libcfs_nid2str(src_nid));
- return -EHOSTUNREACH;
- }
-
- /* rtr_nid is LNET_NID_ANY or NID of pre-determined router,
- * it's possible that rtr_nid isn't LNET_NID_ANY and lp isn't
- * pre-determined router, this can happen if router table
- * was changed when we release the lock */
- if (rtr_nid != lp->lp_nid) {
- cpt2 = lnet_cpt_of_nid_locked(lp->lp_nid);
- if (cpt2 != cpt) {
- if (src_ni != NULL)
- lnet_ni_decref_locked(src_ni, cpt);
- lnet_net_unlock(cpt);
-
- rtr_nid = lp->lp_nid;
- cpt = cpt2;
- goto again;
- }
- }
-
- CDEBUG(D_NET, "Best route to %s via %s for %s %d\n",
- libcfs_nid2str(dst_nid), libcfs_nid2str(lp->lp_nid),
- lnet_msgtyp2str(msg->msg_type), msg->msg_len);
-
- if (src_ni == NULL) {
- src_ni = lp->lp_ni;
- src_nid = src_ni->ni_nid;
- } else {
- LASSERT(src_ni == lp->lp_ni);
- lnet_ni_decref_locked(src_ni, cpt);
- }
-
- lnet_peer_addref_locked(lp);
-
- LASSERT(src_nid != LNET_NID_ANY);
- lnet_msg_commit(msg, cpt);
-
- if (!msg->msg_routing) {
- /* I'm the source and now I know which NI to send on */
- msg->msg_hdr.src_nid = cpu_to_le64(src_nid);
- }
-
- msg->msg_target_is_router = 1;
- msg->msg_target.nid = lp->lp_nid;
- msg->msg_target.pid = LUSTRE_SRV_LNET_PID;
- }
-
- /* 'lp' is our best choice of peer */
-
- LASSERT(!msg->msg_peertxcredit);
- LASSERT(!msg->msg_txcredit);
- LASSERT(msg->msg_txpeer == NULL);
-
- msg->msg_txpeer = lp; /* msg takes my ref on lp */
-
- rc = lnet_post_send_locked(msg, 0);
- lnet_net_unlock(cpt);
-
- if (rc == EHOSTUNREACH || rc == ECANCELED)
- return -rc;
-
- if (rc == 0)
- lnet_ni_send(src_ni, msg);
-
- return 0; /* rc == 0 or EAGAIN */
-}
-
-static void
-lnet_drop_message(lnet_ni_t *ni, int cpt, void *private, unsigned int nob)
-{
- lnet_net_lock(cpt);
- the_lnet.ln_counters[cpt]->drop_count++;
- the_lnet.ln_counters[cpt]->drop_length += nob;
- lnet_net_unlock(cpt);
-
- lnet_ni_recv(ni, private, NULL, 0, 0, 0, nob);
-}
-
-static void
-lnet_recv_put(lnet_ni_t *ni, lnet_msg_t *msg)
-{
- lnet_hdr_t *hdr = &msg->msg_hdr;
-
- if (msg->msg_wanted != 0)
- lnet_setpayloadbuffer(msg);
-
- lnet_build_msg_event(msg, LNET_EVENT_PUT);
-
- /* Must I ACK? If so I'll grab the ack_wmd out of the header and put
- * it back into the ACK during lnet_finalize() */
- msg->msg_ack = (!lnet_is_wire_handle_none(&hdr->msg.put.ack_wmd) &&
- (msg->msg_md->md_options & LNET_MD_ACK_DISABLE) == 0);
-
- lnet_ni_recv(ni, msg->msg_private, msg, msg->msg_rx_delayed,
- msg->msg_offset, msg->msg_wanted, hdr->payload_length);
-}
-
-static int
-lnet_parse_put(lnet_ni_t *ni, lnet_msg_t *msg)
-{
- lnet_hdr_t *hdr = &msg->msg_hdr;
- struct lnet_match_info info;
- int rc;
-
- /* Convert put fields to host byte order */
- hdr->msg.put.match_bits = le64_to_cpu(hdr->msg.put.match_bits);
- hdr->msg.put.ptl_index = le32_to_cpu(hdr->msg.put.ptl_index);
- hdr->msg.put.offset = le32_to_cpu(hdr->msg.put.offset);
-
- info.mi_id.nid = hdr->src_nid;
- info.mi_id.pid = hdr->src_pid;
- info.mi_opc = LNET_MD_OP_PUT;
- info.mi_portal = hdr->msg.put.ptl_index;
- info.mi_rlength = hdr->payload_length;
- info.mi_roffset = hdr->msg.put.offset;
- info.mi_mbits = hdr->msg.put.match_bits;
-
- msg->msg_rx_ready_delay = ni->ni_lnd->lnd_eager_recv == NULL;
-
- again:
- rc = lnet_ptl_match_md(&info, msg);
- switch (rc) {
- default:
- LBUG();
-
- case LNET_MATCHMD_OK:
- lnet_recv_put(ni, msg);
- return 0;
-
- case LNET_MATCHMD_NONE:
- if (msg->msg_rx_delayed) /* attached on delayed list */
- return 0;
-
- rc = lnet_ni_eager_recv(ni, msg);
- if (rc == 0)
- goto again;
- /* fall through */
-
- case LNET_MATCHMD_DROP:
- CNETERR("Dropping PUT from %s portal %d match %llu offset %d length %d: %d\n",
- libcfs_id2str(info.mi_id), info.mi_portal,
- info.mi_mbits, info.mi_roffset, info.mi_rlength, rc);
-
- return ENOENT; /* +ve: OK but no match */
- }
-}
-
-static int
-lnet_parse_get(lnet_ni_t *ni, lnet_msg_t *msg, int rdma_get)
-{
- struct lnet_match_info info;
- lnet_hdr_t *hdr = &msg->msg_hdr;
- lnet_handle_wire_t reply_wmd;
- int rc;
-
- /* Convert get fields to host byte order */
- hdr->msg.get.match_bits = le64_to_cpu(hdr->msg.get.match_bits);
- hdr->msg.get.ptl_index = le32_to_cpu(hdr->msg.get.ptl_index);
- hdr->msg.get.sink_length = le32_to_cpu(hdr->msg.get.sink_length);
- hdr->msg.get.src_offset = le32_to_cpu(hdr->msg.get.src_offset);
-
- info.mi_id.nid = hdr->src_nid;
- info.mi_id.pid = hdr->src_pid;
- info.mi_opc = LNET_MD_OP_GET;
- info.mi_portal = hdr->msg.get.ptl_index;
- info.mi_rlength = hdr->msg.get.sink_length;
- info.mi_roffset = hdr->msg.get.src_offset;
- info.mi_mbits = hdr->msg.get.match_bits;
-
- rc = lnet_ptl_match_md(&info, msg);
- if (rc == LNET_MATCHMD_DROP) {
- CNETERR("Dropping GET from %s portal %d match %llu offset %d length %d\n",
- libcfs_id2str(info.mi_id), info.mi_portal,
- info.mi_mbits, info.mi_roffset, info.mi_rlength);
- return ENOENT; /* +ve: OK but no match */
- }
-
- LASSERT(rc == LNET_MATCHMD_OK);
-
- lnet_build_msg_event(msg, LNET_EVENT_GET);
-
- reply_wmd = hdr->msg.get.return_wmd;
-
- lnet_prep_send(msg, LNET_MSG_REPLY, info.mi_id,
- msg->msg_offset, msg->msg_wanted);
-
- msg->msg_hdr.msg.reply.dst_wmd = reply_wmd;
-
- if (rdma_get) {
- /* The LND completes the REPLY from her recv procedure */
- lnet_ni_recv(ni, msg->msg_private, msg, 0,
- msg->msg_offset, msg->msg_len, msg->msg_len);
- return 0;
- }
-
- lnet_ni_recv(ni, msg->msg_private, NULL, 0, 0, 0, 0);
- msg->msg_receiving = 0;
-
- rc = lnet_send(ni->ni_nid, msg, LNET_NID_ANY);
- if (rc < 0) {
- /* didn't get as far as lnet_ni_send() */
- CERROR("%s: Unable to send REPLY for GET from %s: %d\n",
- libcfs_nid2str(ni->ni_nid),
- libcfs_id2str(info.mi_id), rc);
-
- lnet_finalize(ni, msg, rc);
- }
-
- return 0;
-}
-
-static int
-lnet_parse_reply(lnet_ni_t *ni, lnet_msg_t *msg)
-{
- void *private = msg->msg_private;
- lnet_hdr_t *hdr = &msg->msg_hdr;
- lnet_process_id_t src = {0};
- lnet_libmd_t *md;
- int rlength;
- int mlength;
- int cpt;
-
- cpt = lnet_cpt_of_cookie(hdr->msg.reply.dst_wmd.wh_object_cookie);
- lnet_res_lock(cpt);
-
- src.nid = hdr->src_nid;
- src.pid = hdr->src_pid;
-
- /* NB handles only looked up by creator (no flips) */
- md = lnet_wire_handle2md(&hdr->msg.reply.dst_wmd);
- if (md == NULL || md->md_threshold == 0 || md->md_me != NULL) {
- CNETERR("%s: Dropping REPLY from %s for %s MD %#llx.%#llx\n",
- libcfs_nid2str(ni->ni_nid), libcfs_id2str(src),
- (md == NULL) ? "invalid" : "inactive",
- hdr->msg.reply.dst_wmd.wh_interface_cookie,
- hdr->msg.reply.dst_wmd.wh_object_cookie);
- if (md != NULL && md->md_me != NULL)
- CERROR("REPLY MD also attached to portal %d\n",
- md->md_me->me_portal);
-
- lnet_res_unlock(cpt);
- return ENOENT; /* +ve: OK but no match */
- }
-
- LASSERT(md->md_offset == 0);
-
- rlength = hdr->payload_length;
- mlength = min_t(uint, rlength, md->md_length);
-
- if (mlength < rlength &&
- (md->md_options & LNET_MD_TRUNCATE) == 0) {
- CNETERR("%s: Dropping REPLY from %s length %d for MD %#llx would overflow (%d)\n",
- libcfs_nid2str(ni->ni_nid), libcfs_id2str(src),
- rlength, hdr->msg.reply.dst_wmd.wh_object_cookie,
- mlength);
- lnet_res_unlock(cpt);
- return ENOENT; /* +ve: OK but no match */
- }
-
- CDEBUG(D_NET, "%s: Reply from %s of length %d/%d into md %#llx\n",
- libcfs_nid2str(ni->ni_nid), libcfs_id2str(src),
- mlength, rlength, hdr->msg.reply.dst_wmd.wh_object_cookie);
-
- lnet_msg_attach_md(msg, md, 0, mlength);
-
- if (mlength != 0)
- lnet_setpayloadbuffer(msg);
-
- lnet_res_unlock(cpt);
-
- lnet_build_msg_event(msg, LNET_EVENT_REPLY);
-
- lnet_ni_recv(ni, private, msg, 0, 0, mlength, rlength);
- return 0;
-}
-
-static int
-lnet_parse_ack(lnet_ni_t *ni, lnet_msg_t *msg)
-{
- lnet_hdr_t *hdr = &msg->msg_hdr;
- lnet_process_id_t src = {0};
- lnet_libmd_t *md;
- int cpt;
-
- src.nid = hdr->src_nid;
- src.pid = hdr->src_pid;
-
- /* Convert ack fields to host byte order */
- hdr->msg.ack.match_bits = le64_to_cpu(hdr->msg.ack.match_bits);
- hdr->msg.ack.mlength = le32_to_cpu(hdr->msg.ack.mlength);
-
- cpt = lnet_cpt_of_cookie(hdr->msg.ack.dst_wmd.wh_object_cookie);
- lnet_res_lock(cpt);
-
- /* NB handles only looked up by creator (no flips) */
- md = lnet_wire_handle2md(&hdr->msg.ack.dst_wmd);
- if (md == NULL || md->md_threshold == 0 || md->md_me != NULL) {
- /* Don't moan; this is expected */
- CDEBUG(D_NET,
- "%s: Dropping ACK from %s to %s MD %#llx.%#llx\n",
- libcfs_nid2str(ni->ni_nid), libcfs_id2str(src),
- (md == NULL) ? "invalid" : "inactive",
- hdr->msg.ack.dst_wmd.wh_interface_cookie,
- hdr->msg.ack.dst_wmd.wh_object_cookie);
- if (md != NULL && md->md_me != NULL)
- CERROR("Source MD also attached to portal %d\n",
- md->md_me->me_portal);
-
- lnet_res_unlock(cpt);
- return ENOENT; /* +ve! */
- }
-
- CDEBUG(D_NET, "%s: ACK from %s into md %#llx\n",
- libcfs_nid2str(ni->ni_nid), libcfs_id2str(src),
- hdr->msg.ack.dst_wmd.wh_object_cookie);
-
- lnet_msg_attach_md(msg, md, 0, 0);
-
- lnet_res_unlock(cpt);
-
- lnet_build_msg_event(msg, LNET_EVENT_ACK);
-
- lnet_ni_recv(ni, msg->msg_private, msg, 0, 0, 0, msg->msg_len);
- return 0;
-}
-
-static int
-lnet_parse_forward_locked(lnet_ni_t *ni, lnet_msg_t *msg)
-{
- int rc = 0;
-
- if (msg->msg_rxpeer->lp_rtrcredits <= 0 ||
- lnet_msg2bufpool(msg)->rbp_credits <= 0) {
- if (ni->ni_lnd->lnd_eager_recv == NULL) {
- msg->msg_rx_ready_delay = 1;
- } else {
- lnet_net_unlock(msg->msg_rx_cpt);
- rc = lnet_ni_eager_recv(ni, msg);
- lnet_net_lock(msg->msg_rx_cpt);
- }
- }
-
- if (rc == 0)
- rc = lnet_post_routed_recv_locked(msg, 0);
- return rc;
-}
-
-char *
-lnet_msgtyp2str(int type)
-{
- switch (type) {
- case LNET_MSG_ACK:
- return "ACK";
- case LNET_MSG_PUT:
- return "PUT";
- case LNET_MSG_GET:
- return "GET";
- case LNET_MSG_REPLY:
- return "REPLY";
- case LNET_MSG_HELLO:
- return "HELLO";
- default:
- return "<UNKNOWN>";
- }
-}
-EXPORT_SYMBOL(lnet_msgtyp2str);
-
-void
-lnet_print_hdr(lnet_hdr_t *hdr)
-{
- lnet_process_id_t src = {0};
- lnet_process_id_t dst = {0};
- char *type_str = lnet_msgtyp2str(hdr->type);
-
- src.nid = hdr->src_nid;
- src.pid = hdr->src_pid;
-
- dst.nid = hdr->dest_nid;
- dst.pid = hdr->dest_pid;
-
- CWARN("P3 Header at %p of type %s\n", hdr, type_str);
- CWARN(" From %s\n", libcfs_id2str(src));
- CWARN(" To %s\n", libcfs_id2str(dst));
-
- switch (hdr->type) {
- default:
- break;
-
- case LNET_MSG_PUT:
- CWARN(" Ptl index %d, ack md %#llx.%#llx, match bits %llu\n",
- hdr->msg.put.ptl_index,
- hdr->msg.put.ack_wmd.wh_interface_cookie,
- hdr->msg.put.ack_wmd.wh_object_cookie,
- hdr->msg.put.match_bits);
- CWARN(" Length %d, offset %d, hdr data %#llx\n",
- hdr->payload_length, hdr->msg.put.offset,
- hdr->msg.put.hdr_data);
- break;
-
- case LNET_MSG_GET:
- CWARN(" Ptl index %d, return md %#llx.%#llx, match bits %llu\n",
- hdr->msg.get.ptl_index,
- hdr->msg.get.return_wmd.wh_interface_cookie,
- hdr->msg.get.return_wmd.wh_object_cookie,
- hdr->msg.get.match_bits);
- CWARN(" Length %d, src offset %d\n",
- hdr->msg.get.sink_length,
- hdr->msg.get.src_offset);
- break;
-
- case LNET_MSG_ACK:
- CWARN(" dst md %#llx.%#llx, manipulated length %d\n",
- hdr->msg.ack.dst_wmd.wh_interface_cookie,
- hdr->msg.ack.dst_wmd.wh_object_cookie,
- hdr->msg.ack.mlength);
- break;
-
- case LNET_MSG_REPLY:
- CWARN(" dst md %#llx.%#llx, length %d\n",
- hdr->msg.reply.dst_wmd.wh_interface_cookie,
- hdr->msg.reply.dst_wmd.wh_object_cookie,
- hdr->payload_length);
- }
-
-}
-
-int
-lnet_parse(lnet_ni_t *ni, lnet_hdr_t *hdr, lnet_nid_t from_nid,
- void *private, int rdma_req)
-{
- int rc = 0;
- int cpt;
- int for_me;
- struct lnet_msg *msg;
- lnet_pid_t dest_pid;
- lnet_nid_t dest_nid;
- lnet_nid_t src_nid;
- __u32 payload_length;
- __u32 type;
-
- LASSERT(!in_interrupt());
-
- type = le32_to_cpu(hdr->type);
- src_nid = le64_to_cpu(hdr->src_nid);
- dest_nid = le64_to_cpu(hdr->dest_nid);
- dest_pid = le32_to_cpu(hdr->dest_pid);
- payload_length = le32_to_cpu(hdr->payload_length);
-
- for_me = (ni->ni_nid == dest_nid);
- cpt = lnet_cpt_of_nid(from_nid);
-
- switch (type) {
- case LNET_MSG_ACK:
- case LNET_MSG_GET:
- if (payload_length > 0) {
- CERROR("%s, src %s: bad %s payload %d (0 expected)\n",
- libcfs_nid2str(from_nid),
- libcfs_nid2str(src_nid),
- lnet_msgtyp2str(type), payload_length);
- return -EPROTO;
- }
- break;
-
- case LNET_MSG_PUT:
- case LNET_MSG_REPLY:
- if (payload_length >
- (__u32)(for_me ? LNET_MAX_PAYLOAD : LNET_MTU)) {
- CERROR("%s, src %s: bad %s payload %d (%d max expected)\n",
- libcfs_nid2str(from_nid),
- libcfs_nid2str(src_nid),
- lnet_msgtyp2str(type),
- payload_length,
- for_me ? LNET_MAX_PAYLOAD : LNET_MTU);
- return -EPROTO;
- }
- break;
-
- default:
- CERROR("%s, src %s: Bad message type 0x%x\n",
- libcfs_nid2str(from_nid),
- libcfs_nid2str(src_nid), type);
- return -EPROTO;
- }
-
- if (the_lnet.ln_routing &&
- ni->ni_last_alive != ktime_get_real_seconds()) {
- lnet_ni_lock(ni);
-
- /* NB: so far here is the only place to set NI status to "up */
- ni->ni_last_alive = ktime_get_real_seconds();
- if (ni->ni_status != NULL &&
- ni->ni_status->ns_status == LNET_NI_STATUS_DOWN)
- ni->ni_status->ns_status = LNET_NI_STATUS_UP;
- lnet_ni_unlock(ni);
- }
-
- /* Regard a bad destination NID as a protocol error. Senders should
- * know what they're doing; if they don't they're misconfigured, buggy
- * or malicious so we chop them off at the knees :) */
-
- if (!for_me) {
- if (LNET_NIDNET(dest_nid) == LNET_NIDNET(ni->ni_nid)) {
- /* should have gone direct */
- CERROR("%s, src %s: Bad dest nid %s (should have been sent direct)\n",
- libcfs_nid2str(from_nid),
- libcfs_nid2str(src_nid),
- libcfs_nid2str(dest_nid));
- return -EPROTO;
- }
-
- if (lnet_islocalnid(dest_nid)) {
- /* dest is another local NI; sender should have used
- * this node's NID on its own network */
- CERROR("%s, src %s: Bad dest nid %s (it's my nid but on a different network)\n",
- libcfs_nid2str(from_nid),
- libcfs_nid2str(src_nid),
- libcfs_nid2str(dest_nid));
- return -EPROTO;
- }
-
- if (rdma_req && type == LNET_MSG_GET) {
- CERROR("%s, src %s: Bad optimized GET for %s (final destination must be me)\n",
- libcfs_nid2str(from_nid),
- libcfs_nid2str(src_nid),
- libcfs_nid2str(dest_nid));
- return -EPROTO;
- }
-
- if (!the_lnet.ln_routing) {
- CERROR("%s, src %s: Dropping message for %s (routing not enabled)\n",
- libcfs_nid2str(from_nid),
- libcfs_nid2str(src_nid),
- libcfs_nid2str(dest_nid));
- goto drop;
- }
- }
-
- /* Message looks OK; we're not going to return an error, so we MUST
- * call back lnd_recv() come what may... */
-
- if (!list_empty(&the_lnet.ln_test_peers) && /* normally we don't */
- fail_peer(src_nid, 0)) { /* shall we now? */
- CERROR("%s, src %s: Dropping %s to simulate failure\n",
- libcfs_nid2str(from_nid), libcfs_nid2str(src_nid),
- lnet_msgtyp2str(type));
- goto drop;
- }
-
- msg = lnet_msg_alloc();
- if (msg == NULL) {
- CERROR("%s, src %s: Dropping %s (out of memory)\n",
- libcfs_nid2str(from_nid), libcfs_nid2str(src_nid),
- lnet_msgtyp2str(type));
- goto drop;
- }
-
- /* msg zeroed in lnet_msg_alloc;
- * i.e. flags all clear, pointers NULL etc
- */
-
- msg->msg_type = type;
- msg->msg_private = private;
- msg->msg_receiving = 1;
- msg->msg_len = msg->msg_wanted = payload_length;
- msg->msg_offset = 0;
- msg->msg_hdr = *hdr;
- /* for building message event */
- msg->msg_from = from_nid;
- if (!for_me) {
- msg->msg_target.pid = dest_pid;
- msg->msg_target.nid = dest_nid;
- msg->msg_routing = 1;
-
- } else {
- /* convert common msg->hdr fields to host byteorder */
- msg->msg_hdr.type = type;
- msg->msg_hdr.src_nid = src_nid;
- msg->msg_hdr.src_pid = le32_to_cpu(msg->msg_hdr.src_pid);
- msg->msg_hdr.dest_nid = dest_nid;
- msg->msg_hdr.dest_pid = dest_pid;
- msg->msg_hdr.payload_length = payload_length;
- }
-
- lnet_net_lock(cpt);
- rc = lnet_nid2peer_locked(&msg->msg_rxpeer, from_nid, cpt);
- if (rc != 0) {
- lnet_net_unlock(cpt);
- CERROR("%s, src %s: Dropping %s (error %d looking up sender)\n",
- libcfs_nid2str(from_nid), libcfs_nid2str(src_nid),
- lnet_msgtyp2str(type), rc);
- lnet_msg_free(msg);
- goto drop;
- }
-
- if (lnet_isrouter(msg->msg_rxpeer)) {
- lnet_peer_set_alive(msg->msg_rxpeer);
- if (avoid_asym_router_failure &&
- LNET_NIDNET(src_nid) != LNET_NIDNET(from_nid)) {
- /* received a remote message from router, update
- * remote NI status on this router.
- * NB: multi-hop routed message will be ignored.
- */
- lnet_router_ni_update_locked(msg->msg_rxpeer,
- LNET_NIDNET(src_nid));
- }
- }
-
- lnet_msg_commit(msg, cpt);
-
- if (!for_me) {
- rc = lnet_parse_forward_locked(ni, msg);
- lnet_net_unlock(cpt);
-
- if (rc < 0)
- goto free_drop;
- if (rc == 0) {
- lnet_ni_recv(ni, msg->msg_private, msg, 0,
- 0, payload_length, payload_length);
- }
- return 0;
- }
-
- lnet_net_unlock(cpt);
-
- switch (type) {
- case LNET_MSG_ACK:
- rc = lnet_parse_ack(ni, msg);
- break;
- case LNET_MSG_PUT:
- rc = lnet_parse_put(ni, msg);
- break;
- case LNET_MSG_GET:
- rc = lnet_parse_get(ni, msg, rdma_req);
- break;
- case LNET_MSG_REPLY:
- rc = lnet_parse_reply(ni, msg);
- break;
- default:
- LASSERT(0);
- rc = -EPROTO;
- goto free_drop; /* prevent an unused label if !kernel */
- }
-
- if (rc == 0)
- return 0;
-
- LASSERT(rc == ENOENT);
-
- free_drop:
- LASSERT(msg->msg_md == NULL);
- lnet_finalize(ni, msg, rc);
-
- drop:
- lnet_drop_message(ni, cpt, private, payload_length);
- return 0;
-}
-EXPORT_SYMBOL(lnet_parse);
-
-void
-lnet_drop_delayed_msg_list(struct list_head *head, char *reason)
-{
- while (!list_empty(head)) {
- lnet_process_id_t id = {0};
- lnet_msg_t *msg;
-
- msg = list_entry(head->next, lnet_msg_t, msg_list);
- list_del(&msg->msg_list);
-
- id.nid = msg->msg_hdr.src_nid;
- id.pid = msg->msg_hdr.src_pid;
-
- LASSERT(msg->msg_md == NULL);
- LASSERT(msg->msg_rx_delayed);
- LASSERT(msg->msg_rxpeer != NULL);
- LASSERT(msg->msg_hdr.type == LNET_MSG_PUT);
-
- CWARN("Dropping delayed PUT from %s portal %d match %llu offset %d length %d: %s\n",
- libcfs_id2str(id),
- msg->msg_hdr.msg.put.ptl_index,
- msg->msg_hdr.msg.put.match_bits,
- msg->msg_hdr.msg.put.offset,
- msg->msg_hdr.payload_length, reason);
-
- /* NB I can't drop msg's ref on msg_rxpeer until after I've
- * called lnet_drop_message(), so I just hang onto msg as well
- * until that's done */
-
- lnet_drop_message(msg->msg_rxpeer->lp_ni,
- msg->msg_rxpeer->lp_cpt,
- msg->msg_private, msg->msg_len);
- /*
- * NB: message will not generate event because w/o attached MD,
- * but we still should give error code so lnet_msg_decommit()
- * can skip counters operations and other checks.
- */
- lnet_finalize(msg->msg_rxpeer->lp_ni, msg, -ENOENT);
- }
-}
-
-void
-lnet_recv_delayed_msg_list(struct list_head *head)
-{
- while (!list_empty(head)) {
- lnet_msg_t *msg;
- lnet_process_id_t id;
-
- msg = list_entry(head->next, lnet_msg_t, msg_list);
- list_del(&msg->msg_list);
-
- /* md won't disappear under me, since each msg
- * holds a ref on it */
-
- id.nid = msg->msg_hdr.src_nid;
- id.pid = msg->msg_hdr.src_pid;
-
- LASSERT(msg->msg_rx_delayed);
- LASSERT(msg->msg_md != NULL);
- LASSERT(msg->msg_rxpeer != NULL);
- LASSERT(msg->msg_hdr.type == LNET_MSG_PUT);
-
- CDEBUG(D_NET, "Resuming delayed PUT from %s portal %d match %llu offset %d length %d.\n",
- libcfs_id2str(id), msg->msg_hdr.msg.put.ptl_index,
- msg->msg_hdr.msg.put.match_bits,
- msg->msg_hdr.msg.put.offset,
- msg->msg_hdr.payload_length);
-
- lnet_recv_put(msg->msg_rxpeer->lp_ni, msg);
- }
-}
-
-/**
- * Initiate an asynchronous PUT operation.
- *
- * There are several events associated with a PUT: completion of the send on
- * the initiator node (LNET_EVENT_SEND), and when the send completes
- * successfully, the receipt of an acknowledgment (LNET_EVENT_ACK) indicating
- * that the operation was accepted by the target. The event LNET_EVENT_PUT is
- * used at the target node to indicate the completion of incoming data
- * delivery.
- *
- * The local events will be logged in the EQ associated with the MD pointed to
- * by \a mdh handle. Using a MD without an associated EQ results in these
- * events being discarded. In this case, the caller must have another
- * mechanism (e.g., a higher level protocol) for determining when it is safe
- * to modify the memory region associated with the MD.
- *
- * Note that LNet does not guarantee the order of LNET_EVENT_SEND and
- * LNET_EVENT_ACK, though intuitively ACK should happen after SEND.
- *
- * \param self Indicates the NID of a local interface through which to send
- * the PUT request. Use LNET_NID_ANY to let LNet choose one by itself.
- * \param mdh A handle for the MD that describes the memory to be sent. The MD
- * must be "free floating" (See LNetMDBind()).
- * \param ack Controls whether an acknowledgment is requested.
- * Acknowledgments are only sent when they are requested by the initiating
- * process and the target MD enables them.
- * \param target A process identifier for the target process.
- * \param portal The index in the \a target's portal table.
- * \param match_bits The match bits to use for MD selection at the target
- * process.
- * \param offset The offset into the target MD (only used when the target
- * MD has the LNET_MD_MANAGE_REMOTE option set).
- * \param hdr_data 64 bits of user data that can be included in the message
- * header. This data is written to an event queue entry at the target if an
- * EQ is present on the matching MD.
- *
- * \retval 0 Success, and only in this case events will be generated
- * and logged to EQ (if it exists).
- * \retval -EIO Simulated failure.
- * \retval -ENOMEM Memory allocation failure.
- * \retval -ENOENT Invalid MD object.
- *
- * \see lnet_event_t::hdr_data and lnet_event_kind_t.
- */
-int
-LNetPut(lnet_nid_t self, lnet_handle_md_t mdh, lnet_ack_req_t ack,
- lnet_process_id_t target, unsigned int portal,
- __u64 match_bits, unsigned int offset,
- __u64 hdr_data)
-{
- struct lnet_msg *msg;
- struct lnet_libmd *md;
- int cpt;
- int rc;
-
- LASSERT(the_lnet.ln_init);
- LASSERT(the_lnet.ln_refcount > 0);
-
- if (!list_empty(&the_lnet.ln_test_peers) && /* normally we don't */
- fail_peer(target.nid, 1)) { /* shall we now? */
- CERROR("Dropping PUT to %s: simulated failure\n",
- libcfs_id2str(target));
- return -EIO;
- }
-
- msg = lnet_msg_alloc();
- if (msg == NULL) {
- CERROR("Dropping PUT to %s: ENOMEM on lnet_msg_t\n",
- libcfs_id2str(target));
- return -ENOMEM;
- }
- msg->msg_vmflush = !!memory_pressure_get();
-
- cpt = lnet_cpt_of_cookie(mdh.cookie);
- lnet_res_lock(cpt);
-
- md = lnet_handle2md(&mdh);
- if (md == NULL || md->md_threshold == 0 || md->md_me != NULL) {
- CERROR("Dropping PUT (%llu:%d:%s): MD (%d) invalid\n",
- match_bits, portal, libcfs_id2str(target),
- md == NULL ? -1 : md->md_threshold);
- if (md != NULL && md->md_me != NULL)
- CERROR("Source MD also attached to portal %d\n",
- md->md_me->me_portal);
- lnet_res_unlock(cpt);
-
- lnet_msg_free(msg);
- return -ENOENT;
- }
-
- CDEBUG(D_NET, "LNetPut -> %s\n", libcfs_id2str(target));
-
- lnet_msg_attach_md(msg, md, 0, 0);
-
- lnet_prep_send(msg, LNET_MSG_PUT, target, 0, md->md_length);
-
- msg->msg_hdr.msg.put.match_bits = cpu_to_le64(match_bits);
- msg->msg_hdr.msg.put.ptl_index = cpu_to_le32(portal);
- msg->msg_hdr.msg.put.offset = cpu_to_le32(offset);
- msg->msg_hdr.msg.put.hdr_data = hdr_data;
-
- /* NB handles only looked up by creator (no flips) */
- if (ack == LNET_ACK_REQ) {
- msg->msg_hdr.msg.put.ack_wmd.wh_interface_cookie =
- the_lnet.ln_interface_cookie;
- msg->msg_hdr.msg.put.ack_wmd.wh_object_cookie =
- md->md_lh.lh_cookie;
- } else {
- msg->msg_hdr.msg.put.ack_wmd.wh_interface_cookie =
- LNET_WIRE_HANDLE_COOKIE_NONE;
- msg->msg_hdr.msg.put.ack_wmd.wh_object_cookie =
- LNET_WIRE_HANDLE_COOKIE_NONE;
- }
-
- lnet_res_unlock(cpt);
-
- lnet_build_msg_event(msg, LNET_EVENT_SEND);
-
- rc = lnet_send(self, msg, LNET_NID_ANY);
- if (rc != 0) {
- CNETERR("Error sending PUT to %s: %d\n",
- libcfs_id2str(target), rc);
- lnet_finalize(NULL, msg, rc);
- }
-
- /* completion will be signalled by an event */
- return 0;
-}
-EXPORT_SYMBOL(LNetPut);
-
-lnet_msg_t *
-lnet_create_reply_msg(lnet_ni_t *ni, lnet_msg_t *getmsg)
-{
- /* The LND can DMA direct to the GET md (i.e. no REPLY msg). This
- * returns a msg for the LND to pass to lnet_finalize() when the sink
- * data has been received.
- *
- * CAVEAT EMPTOR: 'getmsg' is the original GET, which is freed when
- * lnet_finalize() is called on it, so the LND must call this first */
-
- struct lnet_msg *msg = lnet_msg_alloc();
- struct lnet_libmd *getmd = getmsg->msg_md;
- lnet_process_id_t peer_id = getmsg->msg_target;
- int cpt;
-
- LASSERT(!getmsg->msg_target_is_router);
- LASSERT(!getmsg->msg_routing);
-
- cpt = lnet_cpt_of_cookie(getmd->md_lh.lh_cookie);
- lnet_res_lock(cpt);
-
- LASSERT(getmd->md_refcount > 0);
-
- if (msg == NULL) {
- CERROR("%s: Dropping REPLY from %s: can't allocate msg\n",
- libcfs_nid2str(ni->ni_nid), libcfs_id2str(peer_id));
- goto drop;
- }
-
- if (getmd->md_threshold == 0) {
- CERROR("%s: Dropping REPLY from %s for inactive MD %p\n",
- libcfs_nid2str(ni->ni_nid), libcfs_id2str(peer_id),
- getmd);
- lnet_res_unlock(cpt);
- goto drop;
- }
-
- LASSERT(getmd->md_offset == 0);
-
- CDEBUG(D_NET, "%s: Reply from %s md %p\n",
- libcfs_nid2str(ni->ni_nid), libcfs_id2str(peer_id), getmd);
-
- /* setup information for lnet_build_msg_event */
- msg->msg_from = peer_id.nid;
- msg->msg_type = LNET_MSG_GET; /* flag this msg as an "optimized" GET */
- msg->msg_hdr.src_nid = peer_id.nid;
- msg->msg_hdr.payload_length = getmd->md_length;
- msg->msg_receiving = 1; /* required by lnet_msg_attach_md */
-
- lnet_msg_attach_md(msg, getmd, getmd->md_offset, getmd->md_length);
- lnet_res_unlock(cpt);
-
- cpt = lnet_cpt_of_nid(peer_id.nid);
-
- lnet_net_lock(cpt);
- lnet_msg_commit(msg, cpt);
- lnet_net_unlock(cpt);
-
- lnet_build_msg_event(msg, LNET_EVENT_REPLY);
-
- return msg;
-
- drop:
- cpt = lnet_cpt_of_nid(peer_id.nid);
-
- lnet_net_lock(cpt);
- the_lnet.ln_counters[cpt]->drop_count++;
- the_lnet.ln_counters[cpt]->drop_length += getmd->md_length;
- lnet_net_unlock(cpt);
-
- if (msg != NULL)
- lnet_msg_free(msg);
-
- return NULL;
-}
-EXPORT_SYMBOL(lnet_create_reply_msg);
-
-void
-lnet_set_reply_msg_len(lnet_ni_t *ni, lnet_msg_t *reply, unsigned int len)
-{
- /* Set the REPLY length, now the RDMA that elides the REPLY message has
- * completed and I know it. */
- LASSERT(reply != NULL);
- LASSERT(reply->msg_type == LNET_MSG_GET);
- LASSERT(reply->msg_ev.type == LNET_EVENT_REPLY);
-
- /* NB I trusted my peer to RDMA. If she tells me she's written beyond
- * the end of my buffer, I might as well be dead. */
- LASSERT(len <= reply->msg_ev.mlength);
-
- reply->msg_ev.mlength = len;
-}
-EXPORT_SYMBOL(lnet_set_reply_msg_len);
-
-/**
- * Initiate an asynchronous GET operation.
- *
- * On the initiator node, an LNET_EVENT_SEND is logged when the GET request
- * is sent, and an LNET_EVENT_REPLY is logged when the data returned from
- * the target node in the REPLY has been written to local MD.
- *
- * On the target node, an LNET_EVENT_GET is logged when the GET request
- * arrives and is accepted into a MD.
- *
- * \param self,target,portal,match_bits,offset See the discussion in LNetPut().
- * \param mdh A handle for the MD that describes the memory into which the
- * requested data will be received. The MD must be "free floating"
- * (See LNetMDBind()).
- *
- * \retval 0 Success, and only in this case events will be generated
- * and logged to EQ (if it exists) of the MD.
- * \retval -EIO Simulated failure.
- * \retval -ENOMEM Memory allocation failure.
- * \retval -ENOENT Invalid MD object.
- */
-int
-LNetGet(lnet_nid_t self, lnet_handle_md_t mdh,
- lnet_process_id_t target, unsigned int portal,
- __u64 match_bits, unsigned int offset)
-{
- struct lnet_msg *msg;
- struct lnet_libmd *md;
- int cpt;
- int rc;
-
- LASSERT(the_lnet.ln_init);
- LASSERT(the_lnet.ln_refcount > 0);
-
- if (!list_empty(&the_lnet.ln_test_peers) && /* normally we don't */
- fail_peer(target.nid, 1)) { /* shall we now? */
- CERROR("Dropping GET to %s: simulated failure\n",
- libcfs_id2str(target));
- return -EIO;
- }
-
- msg = lnet_msg_alloc();
- if (msg == NULL) {
- CERROR("Dropping GET to %s: ENOMEM on lnet_msg_t\n",
- libcfs_id2str(target));
- return -ENOMEM;
- }
-
- cpt = lnet_cpt_of_cookie(mdh.cookie);
- lnet_res_lock(cpt);
-
- md = lnet_handle2md(&mdh);
- if (md == NULL || md->md_threshold == 0 || md->md_me != NULL) {
- CERROR("Dropping GET (%llu:%d:%s): MD (%d) invalid\n",
- match_bits, portal, libcfs_id2str(target),
- md == NULL ? -1 : md->md_threshold);
- if (md != NULL && md->md_me != NULL)
- CERROR("REPLY MD also attached to portal %d\n",
- md->md_me->me_portal);
-
- lnet_res_unlock(cpt);
-
- lnet_msg_free(msg);
- return -ENOENT;
- }
-
- CDEBUG(D_NET, "LNetGet -> %s\n", libcfs_id2str(target));
-
- lnet_msg_attach_md(msg, md, 0, 0);
-
- lnet_prep_send(msg, LNET_MSG_GET, target, 0, 0);
-
- msg->msg_hdr.msg.get.match_bits = cpu_to_le64(match_bits);
- msg->msg_hdr.msg.get.ptl_index = cpu_to_le32(portal);
- msg->msg_hdr.msg.get.src_offset = cpu_to_le32(offset);
- msg->msg_hdr.msg.get.sink_length = cpu_to_le32(md->md_length);
-
- /* NB handles only looked up by creator (no flips) */
- msg->msg_hdr.msg.get.return_wmd.wh_interface_cookie =
- the_lnet.ln_interface_cookie;
- msg->msg_hdr.msg.get.return_wmd.wh_object_cookie =
- md->md_lh.lh_cookie;
-
- lnet_res_unlock(cpt);
-
- lnet_build_msg_event(msg, LNET_EVENT_SEND);
-
- rc = lnet_send(self, msg, LNET_NID_ANY);
- if (rc < 0) {
- CNETERR("Error sending GET to %s: %d\n",
- libcfs_id2str(target), rc);
- lnet_finalize(NULL, msg, rc);
- }
-
- /* completion will be signalled by an event */
- return 0;
-}
-EXPORT_SYMBOL(LNetGet);
-
-/**
- * Calculate distance to node at \a dstnid.
- *
- * \param dstnid Target NID.
- * \param srcnidp If not NULL, NID of the local interface to reach \a dstnid
- * is saved here.
- * \param orderp If not NULL, order of the route to reach \a dstnid is saved
- * here.
- *
- * \retval 0 If \a dstnid belongs to a local interface, and reserved option
- * local_nid_dist_zero is set, which is the default.
- * \retval positives Distance to target NID, i.e. number of hops plus one.
- * \retval -EHOSTUNREACH If \a dstnid is not reachable.
- */
-int
-LNetDist(lnet_nid_t dstnid, lnet_nid_t *srcnidp, __u32 *orderp)
-{
- struct list_head *e;
- struct lnet_ni *ni;
- lnet_remotenet_t *rnet;
- __u32 dstnet = LNET_NIDNET(dstnid);
- int hops;
- int cpt;
- __u32 order = 2;
- struct list_head *rn_list;
-
- /* if !local_nid_dist_zero, I don't return a distance of 0 ever
- * (when lustre sees a distance of 0, it substitutes 0@lo), so I
- * keep order 0 free for 0@lo and order 1 free for a local NID
- * match */
-
- LASSERT(the_lnet.ln_init);
- LASSERT(the_lnet.ln_refcount > 0);
-
- cpt = lnet_net_lock_current();
-
- list_for_each(e, &the_lnet.ln_nis) {
- ni = list_entry(e, lnet_ni_t, ni_list);
-
- if (ni->ni_nid == dstnid) {
- if (srcnidp != NULL)
- *srcnidp = dstnid;
- if (orderp != NULL) {
- if (LNET_NETTYP(LNET_NIDNET(dstnid)) == LOLND)
- *orderp = 0;
- else
- *orderp = 1;
- }
- lnet_net_unlock(cpt);
-
- return local_nid_dist_zero ? 0 : 1;
- }
-
- if (LNET_NIDNET(ni->ni_nid) == dstnet) {
- if (srcnidp != NULL)
- *srcnidp = ni->ni_nid;
- if (orderp != NULL)
- *orderp = order;
- lnet_net_unlock(cpt);
- return 1;
- }
-
- order++;
- }
-
- rn_list = lnet_net2rnethash(dstnet);
- list_for_each(e, rn_list) {
- rnet = list_entry(e, lnet_remotenet_t, lrn_list);
-
- if (rnet->lrn_net == dstnet) {
- lnet_route_t *route;
- lnet_route_t *shortest = NULL;
-
- LASSERT(!list_empty(&rnet->lrn_routes));
-
- list_for_each_entry(route, &rnet->lrn_routes,
- lr_list) {
- if (shortest == NULL ||
- route->lr_hops < shortest->lr_hops)
- shortest = route;
- }
-
- LASSERT(shortest != NULL);
- hops = shortest->lr_hops;
- if (srcnidp != NULL)
- *srcnidp = shortest->lr_gateway->lp_ni->ni_nid;
- if (orderp != NULL)
- *orderp = order;
- lnet_net_unlock(cpt);
- return hops + 1;
- }
- order++;
- }
-
- lnet_net_unlock(cpt);
- return -EHOSTUNREACH;
-}
-EXPORT_SYMBOL(LNetDist);
diff --git a/drivers/staging/lustre/lnet/lnet/lib-msg.c b/drivers/staging/lustre/lnet/lnet/lib-msg.c
deleted file mode 100644
index 43977e8dffbb..000000000000
--- a/drivers/staging/lustre/lnet/lnet/lib-msg.c
+++ /dev/null
@@ -1,647 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * lnet/lnet/lib-msg.c
- *
- * Message decoding, parsing and finalizing routines
- */
-
-#define DEBUG_SUBSYSTEM S_LNET
-
-#include "../../include/linux/lnet/lib-lnet.h"
-
-void
-lnet_build_unlink_event(lnet_libmd_t *md, lnet_event_t *ev)
-{
- memset(ev, 0, sizeof(*ev));
-
- ev->status = 0;
- ev->unlinked = 1;
- ev->type = LNET_EVENT_UNLINK;
- lnet_md_deconstruct(md, &ev->md);
- lnet_md2handle(&ev->md_handle, md);
-}
-
-/*
- * Don't need any lock, must be called after lnet_commit_md
- */
-void
-lnet_build_msg_event(lnet_msg_t *msg, lnet_event_kind_t ev_type)
-{
- lnet_hdr_t *hdr = &msg->msg_hdr;
- lnet_event_t *ev = &msg->msg_ev;
-
- LASSERT(!msg->msg_routing);
-
- ev->type = ev_type;
-
- if (ev_type == LNET_EVENT_SEND) {
- /* event for active message */
- ev->target.nid = le64_to_cpu(hdr->dest_nid);
- ev->target.pid = le32_to_cpu(hdr->dest_pid);
- ev->initiator.nid = LNET_NID_ANY;
- ev->initiator.pid = the_lnet.ln_pid;
- ev->sender = LNET_NID_ANY;
-
- } else {
- /* event for passive message */
- ev->target.pid = hdr->dest_pid;
- ev->target.nid = hdr->dest_nid;
- ev->initiator.pid = hdr->src_pid;
- ev->initiator.nid = hdr->src_nid;
- ev->rlength = hdr->payload_length;
- ev->sender = msg->msg_from;
- ev->mlength = msg->msg_wanted;
- ev->offset = msg->msg_offset;
- }
-
- switch (ev_type) {
- default:
- LBUG();
-
- case LNET_EVENT_PUT: /* passive PUT */
- ev->pt_index = hdr->msg.put.ptl_index;
- ev->match_bits = hdr->msg.put.match_bits;
- ev->hdr_data = hdr->msg.put.hdr_data;
- return;
-
- case LNET_EVENT_GET: /* passive GET */
- ev->pt_index = hdr->msg.get.ptl_index;
- ev->match_bits = hdr->msg.get.match_bits;
- ev->hdr_data = 0;
- return;
-
- case LNET_EVENT_ACK: /* ACK */
- ev->match_bits = hdr->msg.ack.match_bits;
- ev->mlength = hdr->msg.ack.mlength;
- return;
-
- case LNET_EVENT_REPLY: /* REPLY */
- return;
-
- case LNET_EVENT_SEND: /* active message */
- if (msg->msg_type == LNET_MSG_PUT) {
- ev->pt_index = le32_to_cpu(hdr->msg.put.ptl_index);
- ev->match_bits = le64_to_cpu(hdr->msg.put.match_bits);
- ev->offset = le32_to_cpu(hdr->msg.put.offset);
- ev->mlength =
- ev->rlength = le32_to_cpu(hdr->payload_length);
- ev->hdr_data = le64_to_cpu(hdr->msg.put.hdr_data);
-
- } else {
- LASSERT(msg->msg_type == LNET_MSG_GET);
- ev->pt_index = le32_to_cpu(hdr->msg.get.ptl_index);
- ev->match_bits = le64_to_cpu(hdr->msg.get.match_bits);
- ev->mlength =
- ev->rlength = le32_to_cpu(hdr->msg.get.sink_length);
- ev->offset = le32_to_cpu(hdr->msg.get.src_offset);
- ev->hdr_data = 0;
- }
- return;
- }
-}
-
-void
-lnet_msg_commit(lnet_msg_t *msg, int cpt)
-{
- struct lnet_msg_container *container = the_lnet.ln_msg_containers[cpt];
- lnet_counters_t *counters = the_lnet.ln_counters[cpt];
-
- /* routed message can be committed for both receiving and sending */
- LASSERT(!msg->msg_tx_committed);
-
- if (msg->msg_sending) {
- LASSERT(!msg->msg_receiving);
-
- msg->msg_tx_cpt = cpt;
- msg->msg_tx_committed = 1;
- if (msg->msg_rx_committed) { /* routed message REPLY */
- LASSERT(msg->msg_onactivelist);
- return;
- }
- } else {
- LASSERT(!msg->msg_sending);
- msg->msg_rx_cpt = cpt;
- msg->msg_rx_committed = 1;
- }
-
- LASSERT(!msg->msg_onactivelist);
- msg->msg_onactivelist = 1;
- list_add(&msg->msg_activelist, &container->msc_active);
-
- counters->msgs_alloc++;
- if (counters->msgs_alloc > counters->msgs_max)
- counters->msgs_max = counters->msgs_alloc;
-}
-
-static void
-lnet_msg_decommit_tx(lnet_msg_t *msg, int status)
-{
- lnet_counters_t *counters;
- lnet_event_t *ev = &msg->msg_ev;
-
- LASSERT(msg->msg_tx_committed);
- if (status != 0)
- goto out;
-
- counters = the_lnet.ln_counters[msg->msg_tx_cpt];
- switch (ev->type) {
- default: /* routed message */
- LASSERT(msg->msg_routing);
- LASSERT(msg->msg_rx_committed);
- LASSERT(ev->type == 0);
-
- counters->route_length += msg->msg_len;
- counters->route_count++;
- goto out;
-
- case LNET_EVENT_PUT:
- /* should have been decommitted */
- LASSERT(!msg->msg_rx_committed);
- /* overwritten while sending ACK */
- LASSERT(msg->msg_type == LNET_MSG_ACK);
- msg->msg_type = LNET_MSG_PUT; /* fix type */
- break;
-
- case LNET_EVENT_SEND:
- LASSERT(!msg->msg_rx_committed);
- if (msg->msg_type == LNET_MSG_PUT)
- counters->send_length += msg->msg_len;
- break;
-
- case LNET_EVENT_GET:
- LASSERT(msg->msg_rx_committed);
- /* overwritten while sending reply, we should never be
- * here for optimized GET */
- LASSERT(msg->msg_type == LNET_MSG_REPLY);
- msg->msg_type = LNET_MSG_GET; /* fix type */
- break;
- }
-
- counters->send_count++;
- out:
- lnet_return_tx_credits_locked(msg);
- msg->msg_tx_committed = 0;
-}
-
-static void
-lnet_msg_decommit_rx(lnet_msg_t *msg, int status)
-{
- lnet_counters_t *counters;
- lnet_event_t *ev = &msg->msg_ev;
-
- LASSERT(!msg->msg_tx_committed); /* decommitted or never committed */
- LASSERT(msg->msg_rx_committed);
-
- if (status != 0)
- goto out;
-
- counters = the_lnet.ln_counters[msg->msg_rx_cpt];
- switch (ev->type) {
- default:
- LASSERT(ev->type == 0);
- LASSERT(msg->msg_routing);
- goto out;
-
- case LNET_EVENT_ACK:
- LASSERT(msg->msg_type == LNET_MSG_ACK);
- break;
-
- case LNET_EVENT_GET:
- /* type is "REPLY" if it's an optimized GET on passive side,
- * because optimized GET will never be committed for sending,
- * so message type wouldn't be changed back to "GET" by
- * lnet_msg_decommit_tx(), see details in lnet_parse_get() */
- LASSERT(msg->msg_type == LNET_MSG_REPLY ||
- msg->msg_type == LNET_MSG_GET);
- counters->send_length += msg->msg_wanted;
- break;
-
- case LNET_EVENT_PUT:
- LASSERT(msg->msg_type == LNET_MSG_PUT);
- break;
-
- case LNET_EVENT_REPLY:
- /* type is "GET" if it's an optimized GET on active side,
- * see details in lnet_create_reply_msg() */
- LASSERT(msg->msg_type == LNET_MSG_GET ||
- msg->msg_type == LNET_MSG_REPLY);
- break;
- }
-
- counters->recv_count++;
- if (ev->type == LNET_EVENT_PUT || ev->type == LNET_EVENT_REPLY)
- counters->recv_length += msg->msg_wanted;
-
- out:
- lnet_return_rx_credits_locked(msg);
- msg->msg_rx_committed = 0;
-}
-
-void
-lnet_msg_decommit(lnet_msg_t *msg, int cpt, int status)
-{
- int cpt2 = cpt;
-
- LASSERT(msg->msg_tx_committed || msg->msg_rx_committed);
- LASSERT(msg->msg_onactivelist);
-
- if (msg->msg_tx_committed) { /* always decommit for sending first */
- LASSERT(cpt == msg->msg_tx_cpt);
- lnet_msg_decommit_tx(msg, status);
- }
-
- if (msg->msg_rx_committed) {
- /* forwarding msg committed for both receiving and sending */
- if (cpt != msg->msg_rx_cpt) {
- lnet_net_unlock(cpt);
- cpt2 = msg->msg_rx_cpt;
- lnet_net_lock(cpt2);
- }
- lnet_msg_decommit_rx(msg, status);
- }
-
- list_del(&msg->msg_activelist);
- msg->msg_onactivelist = 0;
-
- the_lnet.ln_counters[cpt2]->msgs_alloc--;
-
- if (cpt2 != cpt) {
- lnet_net_unlock(cpt2);
- lnet_net_lock(cpt);
- }
-}
-
-void
-lnet_msg_attach_md(lnet_msg_t *msg, lnet_libmd_t *md,
- unsigned int offset, unsigned int mlen)
-{
- /* NB: @offset and @len are only useful for receiving */
- /* Here, we attach the MD on lnet_msg and mark it busy and
- * decrementing its threshold. Come what may, the lnet_msg "owns"
- * the MD until a call to lnet_msg_detach_md or lnet_finalize()
- * signals completion. */
- LASSERT(!msg->msg_routing);
-
- msg->msg_md = md;
- if (msg->msg_receiving) { /* committed for receiving */
- msg->msg_offset = offset;
- msg->msg_wanted = mlen;
- }
-
- md->md_refcount++;
- if (md->md_threshold != LNET_MD_THRESH_INF) {
- LASSERT(md->md_threshold > 0);
- md->md_threshold--;
- }
-
- /* build umd in event */
- lnet_md2handle(&msg->msg_ev.md_handle, md);
- lnet_md_deconstruct(md, &msg->msg_ev.md);
-}
-
-void
-lnet_msg_detach_md(lnet_msg_t *msg, int status)
-{
- lnet_libmd_t *md = msg->msg_md;
- int unlink;
-
- /* Now it's safe to drop my caller's ref */
- md->md_refcount--;
- LASSERT(md->md_refcount >= 0);
-
- unlink = lnet_md_unlinkable(md);
- if (md->md_eq != NULL) {
- msg->msg_ev.status = status;
- msg->msg_ev.unlinked = unlink;
- lnet_eq_enqueue_event(md->md_eq, &msg->msg_ev);
- }
-
- if (unlink)
- lnet_md_unlink(md);
-
- msg->msg_md = NULL;
-}
-
-static int
-lnet_complete_msg_locked(lnet_msg_t *msg, int cpt)
-{
- lnet_handle_wire_t ack_wmd;
- int rc;
- int status = msg->msg_ev.status;
-
- LASSERT(msg->msg_onactivelist);
-
- if (status == 0 && msg->msg_ack) {
- /* Only send an ACK if the PUT completed successfully */
-
- lnet_msg_decommit(msg, cpt, 0);
-
- msg->msg_ack = 0;
- lnet_net_unlock(cpt);
-
- LASSERT(msg->msg_ev.type == LNET_EVENT_PUT);
- LASSERT(!msg->msg_routing);
-
- ack_wmd = msg->msg_hdr.msg.put.ack_wmd;
-
- lnet_prep_send(msg, LNET_MSG_ACK, msg->msg_ev.initiator, 0, 0);
-
- msg->msg_hdr.msg.ack.dst_wmd = ack_wmd;
- msg->msg_hdr.msg.ack.match_bits = msg->msg_ev.match_bits;
- msg->msg_hdr.msg.ack.mlength = cpu_to_le32(msg->msg_ev.mlength);
-
- /* NB: we probably want to use NID of msg::msg_from as 3rd
- * parameter (router NID) if it's routed message */
- rc = lnet_send(msg->msg_ev.target.nid, msg, LNET_NID_ANY);
-
- lnet_net_lock(cpt);
- /*
- * NB: message is committed for sending, we should return
- * on success because LND will finalize this message later.
- *
- * Also, there is possibility that message is committed for
- * sending and also failed before delivering to LND,
- * i.e: ENOMEM, in that case we can't fall through either
- * because CPT for sending can be different with CPT for
- * receiving, so we should return back to lnet_finalize()
- * to make sure we are locking the correct partition.
- */
- return rc;
-
- } else if (status == 0 && /* OK so far */
- (msg->msg_routing && !msg->msg_sending)) {
- /* not forwarded */
- LASSERT(!msg->msg_receiving); /* called back recv already */
- lnet_net_unlock(cpt);
-
- rc = lnet_send(LNET_NID_ANY, msg, LNET_NID_ANY);
-
- lnet_net_lock(cpt);
- /*
- * NB: message is committed for sending, we should return
- * on success because LND will finalize this message later.
- *
- * Also, there is possibility that message is committed for
- * sending and also failed before delivering to LND,
- * i.e: ENOMEM, in that case we can't fall through either:
- * - The rule is message must decommit for sending first if
- * the it's committed for both sending and receiving
- * - CPT for sending can be different with CPT for receiving,
- * so we should return back to lnet_finalize() to make
- * sure we are locking the correct partition.
- */
- return rc;
- }
-
- lnet_msg_decommit(msg, cpt, status);
- lnet_msg_free(msg);
- return 0;
-}
-
-void
-lnet_finalize(lnet_ni_t *ni, lnet_msg_t *msg, int status)
-{
- struct lnet_msg_container *container;
- int my_slot;
- int cpt;
- int rc;
- int i;
-
- LASSERT(!in_interrupt());
-
- if (msg == NULL)
- return;
-#if 0
- CDEBUG(D_WARNING, "%s msg->%s Flags:%s%s%s%s%s%s%s%s%s%s%s txp %s rxp %s\n",
- lnet_msgtyp2str(msg->msg_type), libcfs_id2str(msg->msg_target),
- msg->msg_target_is_router ? "t" : "",
- msg->msg_routing ? "X" : "",
- msg->msg_ack ? "A" : "",
- msg->msg_sending ? "S" : "",
- msg->msg_receiving ? "R" : "",
- msg->msg_delayed ? "d" : "",
- msg->msg_txcredit ? "C" : "",
- msg->msg_peertxcredit ? "c" : "",
- msg->msg_rtrcredit ? "F" : "",
- msg->msg_peerrtrcredit ? "f" : "",
- msg->msg_onactivelist ? "!" : "",
- msg->msg_txpeer == NULL ? "<none>" : libcfs_nid2str(msg->msg_txpeer->lp_nid),
- msg->msg_rxpeer == NULL ? "<none>" : libcfs_nid2str(msg->msg_rxpeer->lp_nid));
-#endif
- msg->msg_ev.status = status;
-
- if (msg->msg_md != NULL) {
- cpt = lnet_cpt_of_cookie(msg->msg_md->md_lh.lh_cookie);
-
- lnet_res_lock(cpt);
- lnet_msg_detach_md(msg, status);
- lnet_res_unlock(cpt);
- }
-
- again:
- rc = 0;
- if (!msg->msg_tx_committed && !msg->msg_rx_committed) {
- /* not committed to network yet */
- LASSERT(!msg->msg_onactivelist);
- lnet_msg_free(msg);
- return;
- }
-
- /*
- * NB: routed message can be committed for both receiving and sending,
- * we should finalize in LIFO order and keep counters correct.
- * (finalize sending first then finalize receiving)
- */
- cpt = msg->msg_tx_committed ? msg->msg_tx_cpt : msg->msg_rx_cpt;
- lnet_net_lock(cpt);
-
- container = the_lnet.ln_msg_containers[cpt];
- list_add_tail(&msg->msg_list, &container->msc_finalizing);
-
- /* Recursion breaker. Don't complete the message here if I am (or
- * enough other threads are) already completing messages */
-
- my_slot = -1;
- for (i = 0; i < container->msc_nfinalizers; i++) {
- if (container->msc_finalizers[i] == current)
- break;
-
- if (my_slot < 0 && container->msc_finalizers[i] == NULL)
- my_slot = i;
- }
-
- if (i < container->msc_nfinalizers || my_slot < 0) {
- lnet_net_unlock(cpt);
- return;
- }
-
- container->msc_finalizers[my_slot] = current;
-
- while (!list_empty(&container->msc_finalizing)) {
- msg = list_entry(container->msc_finalizing.next,
- lnet_msg_t, msg_list);
-
- list_del(&msg->msg_list);
-
- /* NB drops and regains the lnet lock if it actually does
- * anything, so my finalizing friends can chomp along too */
- rc = lnet_complete_msg_locked(msg, cpt);
- if (rc != 0)
- break;
- }
-
- container->msc_finalizers[my_slot] = NULL;
- lnet_net_unlock(cpt);
-
- if (rc != 0)
- goto again;
-}
-EXPORT_SYMBOL(lnet_finalize);
-
-void
-lnet_msg_container_cleanup(struct lnet_msg_container *container)
-{
- int count = 0;
-
- if (container->msc_init == 0)
- return;
-
- while (!list_empty(&container->msc_active)) {
- lnet_msg_t *msg = list_entry(container->msc_active.next,
- lnet_msg_t, msg_activelist);
-
- LASSERT(msg->msg_onactivelist);
- msg->msg_onactivelist = 0;
- list_del(&msg->msg_activelist);
- lnet_msg_free(msg);
- count++;
- }
-
- if (count > 0)
- CERROR("%d active msg on exit\n", count);
-
- if (container->msc_finalizers != NULL) {
- LIBCFS_FREE(container->msc_finalizers,
- container->msc_nfinalizers *
- sizeof(*container->msc_finalizers));
- container->msc_finalizers = NULL;
- }
-#ifdef LNET_USE_LIB_FREELIST
- lnet_freelist_fini(&container->msc_freelist);
-#endif
- container->msc_init = 0;
-}
-
-int
-lnet_msg_container_setup(struct lnet_msg_container *container, int cpt)
-{
- int rc;
-
- container->msc_init = 1;
-
- INIT_LIST_HEAD(&container->msc_active);
- INIT_LIST_HEAD(&container->msc_finalizing);
-
-#ifdef LNET_USE_LIB_FREELIST
- memset(&container->msc_freelist, 0, sizeof(lnet_freelist_t));
-
- rc = lnet_freelist_init(&container->msc_freelist,
- LNET_FL_MAX_MSGS, sizeof(lnet_msg_t));
- if (rc != 0) {
- CERROR("Failed to init freelist for message container\n");
- lnet_msg_container_cleanup(container);
- return rc;
- }
-#else
- rc = 0;
-#endif
- /* number of CPUs */
- container->msc_nfinalizers = cfs_cpt_weight(lnet_cpt_table(), cpt);
-
- LIBCFS_CPT_ALLOC(container->msc_finalizers, lnet_cpt_table(), cpt,
- container->msc_nfinalizers *
- sizeof(*container->msc_finalizers));
-
- if (container->msc_finalizers == NULL) {
- CERROR("Failed to allocate message finalizers\n");
- lnet_msg_container_cleanup(container);
- return -ENOMEM;
- }
-
- return rc;
-}
-
-void
-lnet_msg_containers_destroy(void)
-{
- struct lnet_msg_container *container;
- int i;
-
- if (the_lnet.ln_msg_containers == NULL)
- return;
-
- cfs_percpt_for_each(container, i, the_lnet.ln_msg_containers)
- lnet_msg_container_cleanup(container);
-
- cfs_percpt_free(the_lnet.ln_msg_containers);
- the_lnet.ln_msg_containers = NULL;
-}
-
-int
-lnet_msg_containers_create(void)
-{
- struct lnet_msg_container *container;
- int rc;
- int i;
-
- the_lnet.ln_msg_containers = cfs_percpt_alloc(lnet_cpt_table(),
- sizeof(*container));
-
- if (the_lnet.ln_msg_containers == NULL) {
- CERROR("Failed to allocate cpu-partition data for network\n");
- return -ENOMEM;
- }
-
- cfs_percpt_for_each(container, i, the_lnet.ln_msg_containers) {
- rc = lnet_msg_container_setup(container, i);
- if (rc != 0) {
- lnet_msg_containers_destroy();
- return rc;
- }
- }
-
- return 0;
-}
diff --git a/drivers/staging/lustre/lnet/lnet/lib-ptl.c b/drivers/staging/lustre/lnet/lnet/lib-ptl.c
deleted file mode 100644
index b4f573ab62cc..000000000000
--- a/drivers/staging/lustre/lnet/lnet/lib-ptl.c
+++ /dev/null
@@ -1,935 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; if not, write to the
- * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
- * Boston, MA 021110-1307, USA
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * lnet/lnet/lib-ptl.c
- *
- * portal & match routines
- *
- * Author: liang@whamcloud.com
- */
-
-#define DEBUG_SUBSYSTEM S_LNET
-
-#include "../../include/linux/lnet/lib-lnet.h"
-
-/* NB: add /proc interfaces in upcoming patches */
-int portal_rotor = LNET_PTL_ROTOR_HASH_RT;
-module_param(portal_rotor, int, 0644);
-MODULE_PARM_DESC(portal_rotor, "redirect PUTs to different cpu-partitions");
-
-static int
-lnet_ptl_match_type(unsigned int index, lnet_process_id_t match_id,
- __u64 mbits, __u64 ignore_bits)
-{
- struct lnet_portal *ptl = the_lnet.ln_portals[index];
- int unique;
-
- unique = ignore_bits == 0 &&
- match_id.nid != LNET_NID_ANY &&
- match_id.pid != LNET_PID_ANY;
-
- LASSERT(!lnet_ptl_is_unique(ptl) || !lnet_ptl_is_wildcard(ptl));
-
- /* prefer to check w/o any lock */
- if (likely(lnet_ptl_is_unique(ptl) || lnet_ptl_is_wildcard(ptl)))
- goto match;
-
- /* unset, new portal */
- lnet_ptl_lock(ptl);
- /* check again with lock */
- if (unlikely(lnet_ptl_is_unique(ptl) || lnet_ptl_is_wildcard(ptl))) {
- lnet_ptl_unlock(ptl);
- goto match;
- }
-
- /* still not set */
- if (unique)
- lnet_ptl_setopt(ptl, LNET_PTL_MATCH_UNIQUE);
- else
- lnet_ptl_setopt(ptl, LNET_PTL_MATCH_WILDCARD);
-
- lnet_ptl_unlock(ptl);
-
- return 1;
-
- match:
- if ((lnet_ptl_is_unique(ptl) && !unique) ||
- (lnet_ptl_is_wildcard(ptl) && unique))
- return 0;
- return 1;
-}
-
-static void
-lnet_ptl_enable_mt(struct lnet_portal *ptl, int cpt)
-{
- struct lnet_match_table *mtable = ptl->ptl_mtables[cpt];
- int i;
-
- /* with hold of both lnet_res_lock(cpt) and lnet_ptl_lock */
- LASSERT(lnet_ptl_is_wildcard(ptl));
-
- mtable->mt_enabled = 1;
-
- ptl->ptl_mt_maps[ptl->ptl_mt_nmaps] = cpt;
- for (i = ptl->ptl_mt_nmaps - 1; i >= 0; i--) {
- LASSERT(ptl->ptl_mt_maps[i] != cpt);
- if (ptl->ptl_mt_maps[i] < cpt)
- break;
-
- /* swap to order */
- ptl->ptl_mt_maps[i + 1] = ptl->ptl_mt_maps[i];
- ptl->ptl_mt_maps[i] = cpt;
- }
-
- ptl->ptl_mt_nmaps++;
-}
-
-static void
-lnet_ptl_disable_mt(struct lnet_portal *ptl, int cpt)
-{
- struct lnet_match_table *mtable = ptl->ptl_mtables[cpt];
- int i;
-
- /* with hold of both lnet_res_lock(cpt) and lnet_ptl_lock */
- LASSERT(lnet_ptl_is_wildcard(ptl));
-
- if (LNET_CPT_NUMBER == 1)
- return; /* never disable the only match-table */
-
- mtable->mt_enabled = 0;
-
- LASSERT(ptl->ptl_mt_nmaps > 0 &&
- ptl->ptl_mt_nmaps <= LNET_CPT_NUMBER);
-
- /* remove it from mt_maps */
- ptl->ptl_mt_nmaps--;
- for (i = 0; i < ptl->ptl_mt_nmaps; i++) {
- if (ptl->ptl_mt_maps[i] >= cpt) /* overwrite it */
- ptl->ptl_mt_maps[i] = ptl->ptl_mt_maps[i + 1];
- }
-}
-
-static int
-lnet_try_match_md(lnet_libmd_t *md,
- struct lnet_match_info *info, struct lnet_msg *msg)
-{
- /* ALWAYS called holding the lnet_res_lock, and can't lnet_res_unlock;
- * lnet_match_blocked_msg() relies on this to avoid races */
- unsigned int offset;
- unsigned int mlength;
- lnet_me_t *me = md->md_me;
-
- /* MD exhausted */
- if (lnet_md_exhausted(md))
- return LNET_MATCHMD_NONE | LNET_MATCHMD_EXHAUSTED;
-
- /* mismatched MD op */
- if ((md->md_options & info->mi_opc) == 0)
- return LNET_MATCHMD_NONE;
-
- /* mismatched ME nid/pid? */
- if (me->me_match_id.nid != LNET_NID_ANY &&
- me->me_match_id.nid != info->mi_id.nid)
- return LNET_MATCHMD_NONE;
-
- if (me->me_match_id.pid != LNET_PID_ANY &&
- me->me_match_id.pid != info->mi_id.pid)
- return LNET_MATCHMD_NONE;
-
- /* mismatched ME matchbits? */
- if (((me->me_match_bits ^ info->mi_mbits) & ~me->me_ignore_bits) != 0)
- return LNET_MATCHMD_NONE;
-
- /* Hurrah! This _is_ a match; check it out... */
-
- if ((md->md_options & LNET_MD_MANAGE_REMOTE) == 0)
- offset = md->md_offset;
- else
- offset = info->mi_roffset;
-
- if ((md->md_options & LNET_MD_MAX_SIZE) != 0) {
- mlength = md->md_max_size;
- LASSERT(md->md_offset + mlength <= md->md_length);
- } else {
- mlength = md->md_length - offset;
- }
-
- if (info->mi_rlength <= mlength) { /* fits in allowed space */
- mlength = info->mi_rlength;
- } else if ((md->md_options & LNET_MD_TRUNCATE) == 0) {
- /* this packet _really_ is too big */
- CERROR("Matching packet from %s, match %llu length %d too big: %d left, %d allowed\n",
- libcfs_id2str(info->mi_id), info->mi_mbits,
- info->mi_rlength, md->md_length - offset, mlength);
-
- return LNET_MATCHMD_DROP;
- }
-
- /* Commit to this ME/MD */
- CDEBUG(D_NET, "Incoming %s index %x from %s of length %d/%d into md %#llx [%d] + %d\n",
- (info->mi_opc == LNET_MD_OP_PUT) ? "put" : "get",
- info->mi_portal, libcfs_id2str(info->mi_id), mlength,
- info->mi_rlength, md->md_lh.lh_cookie, md->md_niov, offset);
-
- lnet_msg_attach_md(msg, md, offset, mlength);
- md->md_offset = offset + mlength;
-
- if (!lnet_md_exhausted(md))
- return LNET_MATCHMD_OK;
-
- /* Auto-unlink NOW, so the ME gets unlinked if required.
- * We bumped md->md_refcount above so the MD just gets flagged
- * for unlink when it is finalized. */
- if ((md->md_flags & LNET_MD_FLAG_AUTO_UNLINK) != 0)
- lnet_md_unlink(md);
-
- return LNET_MATCHMD_OK | LNET_MATCHMD_EXHAUSTED;
-}
-
-static struct lnet_match_table *
-lnet_match2mt(struct lnet_portal *ptl, lnet_process_id_t id, __u64 mbits)
-{
- if (LNET_CPT_NUMBER == 1)
- return ptl->ptl_mtables[0]; /* the only one */
-
- /* if it's a unique portal, return match-table hashed by NID */
- return lnet_ptl_is_unique(ptl) ?
- ptl->ptl_mtables[lnet_cpt_of_nid(id.nid)] : NULL;
-}
-
-struct lnet_match_table *
-lnet_mt_of_attach(unsigned int index, lnet_process_id_t id,
- __u64 mbits, __u64 ignore_bits, lnet_ins_pos_t pos)
-{
- struct lnet_portal *ptl;
- struct lnet_match_table *mtable;
-
- /* NB: called w/o lock */
- LASSERT(index < the_lnet.ln_nportals);
-
- if (!lnet_ptl_match_type(index, id, mbits, ignore_bits))
- return NULL;
-
- ptl = the_lnet.ln_portals[index];
-
- mtable = lnet_match2mt(ptl, id, mbits);
- if (mtable != NULL) /* unique portal or only one match-table */
- return mtable;
-
- /* it's a wildcard portal */
- switch (pos) {
- default:
- return NULL;
- case LNET_INS_BEFORE:
- case LNET_INS_AFTER:
- /* posted by no affinity thread, always hash to specific
- * match-table to avoid buffer stealing which is heavy */
- return ptl->ptl_mtables[ptl->ptl_index % LNET_CPT_NUMBER];
- case LNET_INS_LOCAL:
- /* posted by cpu-affinity thread */
- return ptl->ptl_mtables[lnet_cpt_current()];
- }
-}
-
-static struct lnet_match_table *
-lnet_mt_of_match(struct lnet_match_info *info, struct lnet_msg *msg)
-{
- struct lnet_match_table *mtable;
- struct lnet_portal *ptl;
- unsigned int nmaps;
- unsigned int rotor;
- unsigned int cpt;
- bool routed;
-
- /* NB: called w/o lock */
- LASSERT(info->mi_portal < the_lnet.ln_nportals);
- ptl = the_lnet.ln_portals[info->mi_portal];
-
- LASSERT(lnet_ptl_is_wildcard(ptl) || lnet_ptl_is_unique(ptl));
-
- mtable = lnet_match2mt(ptl, info->mi_id, info->mi_mbits);
- if (mtable != NULL)
- return mtable;
-
- /* it's a wildcard portal */
- routed = LNET_NIDNET(msg->msg_hdr.src_nid) !=
- LNET_NIDNET(msg->msg_hdr.dest_nid);
-
- if (portal_rotor == LNET_PTL_ROTOR_OFF ||
- (portal_rotor != LNET_PTL_ROTOR_ON && !routed)) {
- cpt = lnet_cpt_current();
- if (ptl->ptl_mtables[cpt]->mt_enabled)
- return ptl->ptl_mtables[cpt];
- }
-
- rotor = ptl->ptl_rotor++; /* get round-robin factor */
- if (portal_rotor == LNET_PTL_ROTOR_HASH_RT && routed)
- cpt = lnet_cpt_of_nid(msg->msg_hdr.src_nid);
- else
- cpt = rotor % LNET_CPT_NUMBER;
-
- if (!ptl->ptl_mtables[cpt]->mt_enabled) {
- /* is there any active entry for this portal? */
- nmaps = ptl->ptl_mt_nmaps;
- /* map to an active mtable to avoid heavy "stealing" */
- if (nmaps != 0) {
- /* NB: there is possibility that ptl_mt_maps is being
- * changed because we are not under protection of
- * lnet_ptl_lock, but it shouldn't hurt anything */
- cpt = ptl->ptl_mt_maps[rotor % nmaps];
- }
- }
-
- return ptl->ptl_mtables[cpt];
-}
-
-static int
-lnet_mt_test_exhausted(struct lnet_match_table *mtable, int pos)
-{
- __u64 *bmap;
- int i;
-
- if (!lnet_ptl_is_wildcard(the_lnet.ln_portals[mtable->mt_portal]))
- return 0;
-
- if (pos < 0) { /* check all bits */
- for (i = 0; i < LNET_MT_EXHAUSTED_BMAP; i++) {
- if (mtable->mt_exhausted[i] != (__u64)(-1))
- return 0;
- }
- return 1;
- }
-
- LASSERT(pos <= LNET_MT_HASH_IGNORE);
- /* mtable::mt_mhash[pos] is marked as exhausted or not */
- bmap = &mtable->mt_exhausted[pos >> LNET_MT_BITS_U64];
- pos &= (1 << LNET_MT_BITS_U64) - 1;
-
- return ((*bmap) & (1ULL << pos)) != 0;
-}
-
-static void
-lnet_mt_set_exhausted(struct lnet_match_table *mtable, int pos, int exhausted)
-{
- __u64 *bmap;
-
- LASSERT(lnet_ptl_is_wildcard(the_lnet.ln_portals[mtable->mt_portal]));
- LASSERT(pos <= LNET_MT_HASH_IGNORE);
-
- /* set mtable::mt_mhash[pos] as exhausted/non-exhausted */
- bmap = &mtable->mt_exhausted[pos >> LNET_MT_BITS_U64];
- pos &= (1 << LNET_MT_BITS_U64) - 1;
-
- if (!exhausted)
- *bmap &= ~(1ULL << pos);
- else
- *bmap |= 1ULL << pos;
-}
-
-struct list_head *
-lnet_mt_match_head(struct lnet_match_table *mtable,
- lnet_process_id_t id, __u64 mbits)
-{
- struct lnet_portal *ptl = the_lnet.ln_portals[mtable->mt_portal];
-
- if (lnet_ptl_is_wildcard(ptl)) {
- return &mtable->mt_mhash[mbits & LNET_MT_HASH_MASK];
- } else {
- unsigned long hash = mbits + id.nid + id.pid;
-
- LASSERT(lnet_ptl_is_unique(ptl));
- hash = hash_long(hash, LNET_MT_HASH_BITS);
- return &mtable->mt_mhash[hash];
- }
-}
-
-int
-lnet_mt_match_md(struct lnet_match_table *mtable,
- struct lnet_match_info *info, struct lnet_msg *msg)
-{
- struct list_head *head;
- lnet_me_t *me;
- lnet_me_t *tmp;
- int exhausted = 0;
- int rc;
-
- /* any ME with ignore bits? */
- if (!list_empty(&mtable->mt_mhash[LNET_MT_HASH_IGNORE]))
- head = &mtable->mt_mhash[LNET_MT_HASH_IGNORE];
- else
- head = lnet_mt_match_head(mtable, info->mi_id, info->mi_mbits);
- again:
- /* NB: only wildcard portal needs to return LNET_MATCHMD_EXHAUSTED */
- if (lnet_ptl_is_wildcard(the_lnet.ln_portals[mtable->mt_portal]))
- exhausted = LNET_MATCHMD_EXHAUSTED;
-
- list_for_each_entry_safe(me, tmp, head, me_list) {
- /* ME attached but MD not attached yet */
- if (me->me_md == NULL)
- continue;
-
- LASSERT(me == me->me_md->md_me);
-
- rc = lnet_try_match_md(me->me_md, info, msg);
- if ((rc & LNET_MATCHMD_EXHAUSTED) == 0)
- exhausted = 0; /* mlist is not empty */
-
- if ((rc & LNET_MATCHMD_FINISH) != 0) {
- /* don't return EXHAUSTED bit because we don't know
- * whether the mlist is empty or not */
- return rc & ~LNET_MATCHMD_EXHAUSTED;
- }
- }
-
- if (exhausted == LNET_MATCHMD_EXHAUSTED) { /* @head is exhausted */
- lnet_mt_set_exhausted(mtable, head - mtable->mt_mhash, 1);
- if (!lnet_mt_test_exhausted(mtable, -1))
- exhausted = 0;
- }
-
- if (exhausted == 0 && head == &mtable->mt_mhash[LNET_MT_HASH_IGNORE]) {
- head = lnet_mt_match_head(mtable, info->mi_id, info->mi_mbits);
- goto again; /* re-check MEs w/o ignore-bits */
- }
-
- if (info->mi_opc == LNET_MD_OP_GET ||
- !lnet_ptl_is_lazy(the_lnet.ln_portals[info->mi_portal]))
- return exhausted | LNET_MATCHMD_DROP;
-
- return exhausted | LNET_MATCHMD_NONE;
-}
-
-static int
-lnet_ptl_match_early(struct lnet_portal *ptl, struct lnet_msg *msg)
-{
- int rc;
-
- /* message arrived before any buffer posting on this portal,
- * simply delay or drop this message */
- if (likely(lnet_ptl_is_wildcard(ptl) || lnet_ptl_is_unique(ptl)))
- return 0;
-
- lnet_ptl_lock(ptl);
- /* check it again with hold of lock */
- if (lnet_ptl_is_wildcard(ptl) || lnet_ptl_is_unique(ptl)) {
- lnet_ptl_unlock(ptl);
- return 0;
- }
-
- if (lnet_ptl_is_lazy(ptl)) {
- if (msg->msg_rx_ready_delay) {
- msg->msg_rx_delayed = 1;
- list_add_tail(&msg->msg_list,
- &ptl->ptl_msg_delayed);
- }
- rc = LNET_MATCHMD_NONE;
- } else {
- rc = LNET_MATCHMD_DROP;
- }
-
- lnet_ptl_unlock(ptl);
- return rc;
-}
-
-static int
-lnet_ptl_match_delay(struct lnet_portal *ptl,
- struct lnet_match_info *info, struct lnet_msg *msg)
-{
- int first = ptl->ptl_mt_maps[0]; /* read w/o lock */
- int rc = 0;
- int i;
-
- /* steal buffer from other CPTs, and delay it if nothing to steal,
- * this function is more expensive than a regular match, but we
- * don't expect it can happen a lot */
- LASSERT(lnet_ptl_is_wildcard(ptl));
-
- for (i = 0; i < LNET_CPT_NUMBER; i++) {
- struct lnet_match_table *mtable;
- int cpt;
-
- cpt = (first + i) % LNET_CPT_NUMBER;
- mtable = ptl->ptl_mtables[cpt];
- if (i != 0 && i != LNET_CPT_NUMBER - 1 && !mtable->mt_enabled)
- continue;
-
- lnet_res_lock(cpt);
- lnet_ptl_lock(ptl);
-
- if (i == 0) { /* the first try, attach on stealing list */
- list_add_tail(&msg->msg_list,
- &ptl->ptl_msg_stealing);
- }
-
- if (!list_empty(&msg->msg_list)) { /* on stealing list */
- rc = lnet_mt_match_md(mtable, info, msg);
-
- if ((rc & LNET_MATCHMD_EXHAUSTED) != 0 &&
- mtable->mt_enabled)
- lnet_ptl_disable_mt(ptl, cpt);
-
- if ((rc & LNET_MATCHMD_FINISH) != 0)
- list_del_init(&msg->msg_list);
-
- } else {
- /* could be matched by lnet_ptl_attach_md()
- * which is called by another thread */
- rc = msg->msg_md == NULL ?
- LNET_MATCHMD_DROP : LNET_MATCHMD_OK;
- }
-
- if (!list_empty(&msg->msg_list) && /* not matched yet */
- (i == LNET_CPT_NUMBER - 1 || /* the last CPT */
- ptl->ptl_mt_nmaps == 0 || /* no active CPT */
- (ptl->ptl_mt_nmaps == 1 && /* the only active CPT */
- ptl->ptl_mt_maps[0] == cpt))) {
- /* nothing to steal, delay or drop */
- list_del_init(&msg->msg_list);
-
- if (lnet_ptl_is_lazy(ptl)) {
- msg->msg_rx_delayed = 1;
- list_add_tail(&msg->msg_list,
- &ptl->ptl_msg_delayed);
- rc = LNET_MATCHMD_NONE;
- } else {
- rc = LNET_MATCHMD_DROP;
- }
- }
-
- lnet_ptl_unlock(ptl);
- lnet_res_unlock(cpt);
-
- if ((rc & LNET_MATCHMD_FINISH) != 0 || msg->msg_rx_delayed)
- break;
- }
-
- return rc;
-}
-
-int
-lnet_ptl_match_md(struct lnet_match_info *info, struct lnet_msg *msg)
-{
- struct lnet_match_table *mtable;
- struct lnet_portal *ptl;
- int rc;
-
- CDEBUG(D_NET, "Request from %s of length %d into portal %d MB=%#llx\n",
- libcfs_id2str(info->mi_id), info->mi_rlength, info->mi_portal,
- info->mi_mbits);
-
- if (info->mi_portal >= the_lnet.ln_nportals) {
- CERROR("Invalid portal %d not in [0-%d]\n",
- info->mi_portal, the_lnet.ln_nportals);
- return LNET_MATCHMD_DROP;
- }
-
- ptl = the_lnet.ln_portals[info->mi_portal];
- rc = lnet_ptl_match_early(ptl, msg);
- if (rc != 0) /* matched or delayed early message */
- return rc;
-
- mtable = lnet_mt_of_match(info, msg);
- lnet_res_lock(mtable->mt_cpt);
-
- if (the_lnet.ln_shutdown) {
- rc = LNET_MATCHMD_DROP;
- goto out1;
- }
-
- rc = lnet_mt_match_md(mtable, info, msg);
- if ((rc & LNET_MATCHMD_EXHAUSTED) != 0 && mtable->mt_enabled) {
- lnet_ptl_lock(ptl);
- lnet_ptl_disable_mt(ptl, mtable->mt_cpt);
- lnet_ptl_unlock(ptl);
- }
-
- if ((rc & LNET_MATCHMD_FINISH) != 0) /* matched or dropping */
- goto out1;
-
- if (!msg->msg_rx_ready_delay)
- goto out1;
-
- LASSERT(lnet_ptl_is_lazy(ptl));
- LASSERT(!msg->msg_rx_delayed);
-
- /* NB: we don't expect "delay" can happen a lot */
- if (lnet_ptl_is_unique(ptl) || LNET_CPT_NUMBER == 1) {
- lnet_ptl_lock(ptl);
-
- msg->msg_rx_delayed = 1;
- list_add_tail(&msg->msg_list, &ptl->ptl_msg_delayed);
-
- lnet_ptl_unlock(ptl);
- lnet_res_unlock(mtable->mt_cpt);
-
- } else {
- lnet_res_unlock(mtable->mt_cpt);
- rc = lnet_ptl_match_delay(ptl, info, msg);
- }
-
- if (msg->msg_rx_delayed) {
- CDEBUG(D_NET,
- "Delaying %s from %s ptl %d MB %#llx off %d len %d\n",
- info->mi_opc == LNET_MD_OP_PUT ? "PUT" : "GET",
- libcfs_id2str(info->mi_id), info->mi_portal,
- info->mi_mbits, info->mi_roffset, info->mi_rlength);
- }
- goto out0;
- out1:
- lnet_res_unlock(mtable->mt_cpt);
- out0:
- /* EXHAUSTED bit is only meaningful for internal functions */
- return rc & ~LNET_MATCHMD_EXHAUSTED;
-}
-
-void
-lnet_ptl_detach_md(lnet_me_t *me, lnet_libmd_t *md)
-{
- LASSERT(me->me_md == md && md->md_me == me);
-
- me->me_md = NULL;
- md->md_me = NULL;
-}
-
-/* called with lnet_res_lock held */
-void
-lnet_ptl_attach_md(lnet_me_t *me, lnet_libmd_t *md,
- struct list_head *matches, struct list_head *drops)
-{
- struct lnet_portal *ptl = the_lnet.ln_portals[me->me_portal];
- struct lnet_match_table *mtable;
- struct list_head *head;
- lnet_msg_t *tmp;
- lnet_msg_t *msg;
- int exhausted = 0;
- int cpt;
-
- LASSERT(md->md_refcount == 0); /* a brand new MD */
-
- me->me_md = md;
- md->md_me = me;
-
- cpt = lnet_cpt_of_cookie(md->md_lh.lh_cookie);
- mtable = ptl->ptl_mtables[cpt];
-
- if (list_empty(&ptl->ptl_msg_stealing) &&
- list_empty(&ptl->ptl_msg_delayed) &&
- !lnet_mt_test_exhausted(mtable, me->me_pos))
- return;
-
- lnet_ptl_lock(ptl);
- head = &ptl->ptl_msg_stealing;
- again:
- list_for_each_entry_safe(msg, tmp, head, msg_list) {
- struct lnet_match_info info;
- lnet_hdr_t *hdr;
- int rc;
-
- LASSERT(msg->msg_rx_delayed || head == &ptl->ptl_msg_stealing);
-
- hdr = &msg->msg_hdr;
- info.mi_id.nid = hdr->src_nid;
- info.mi_id.pid = hdr->src_pid;
- info.mi_opc = LNET_MD_OP_PUT;
- info.mi_portal = hdr->msg.put.ptl_index;
- info.mi_rlength = hdr->payload_length;
- info.mi_roffset = hdr->msg.put.offset;
- info.mi_mbits = hdr->msg.put.match_bits;
-
- rc = lnet_try_match_md(md, &info, msg);
-
- exhausted = (rc & LNET_MATCHMD_EXHAUSTED) != 0;
- if ((rc & LNET_MATCHMD_NONE) != 0) {
- if (exhausted)
- break;
- continue;
- }
-
- /* Hurrah! This _is_ a match */
- LASSERT((rc & LNET_MATCHMD_FINISH) != 0);
- list_del_init(&msg->msg_list);
-
- if (head == &ptl->ptl_msg_stealing) {
- if (exhausted)
- break;
- /* stealing thread will handle the message */
- continue;
- }
-
- if ((rc & LNET_MATCHMD_OK) != 0) {
- list_add_tail(&msg->msg_list, matches);
-
- CDEBUG(D_NET, "Resuming delayed PUT from %s portal %d match %llu offset %d length %d.\n",
- libcfs_id2str(info.mi_id),
- info.mi_portal, info.mi_mbits,
- info.mi_roffset, info.mi_rlength);
- } else {
- list_add_tail(&msg->msg_list, drops);
- }
-
- if (exhausted)
- break;
- }
-
- if (!exhausted && head == &ptl->ptl_msg_stealing) {
- head = &ptl->ptl_msg_delayed;
- goto again;
- }
-
- if (lnet_ptl_is_wildcard(ptl) && !exhausted) {
- lnet_mt_set_exhausted(mtable, me->me_pos, 0);
- if (!mtable->mt_enabled)
- lnet_ptl_enable_mt(ptl, cpt);
- }
-
- lnet_ptl_unlock(ptl);
-}
-
-static void
-lnet_ptl_cleanup(struct lnet_portal *ptl)
-{
- struct lnet_match_table *mtable;
- int i;
-
- if (ptl->ptl_mtables == NULL) /* uninitialized portal */
- return;
-
- LASSERT(list_empty(&ptl->ptl_msg_delayed));
- LASSERT(list_empty(&ptl->ptl_msg_stealing));
- cfs_percpt_for_each(mtable, i, ptl->ptl_mtables) {
- struct list_head *mhash;
- lnet_me_t *me;
- int j;
-
- if (mtable->mt_mhash == NULL) /* uninitialized match-table */
- continue;
-
- mhash = mtable->mt_mhash;
- /* cleanup ME */
- for (j = 0; j < LNET_MT_HASH_SIZE + 1; j++) {
- while (!list_empty(&mhash[j])) {
- me = list_entry(mhash[j].next,
- lnet_me_t, me_list);
- CERROR("Active ME %p on exit\n", me);
- list_del(&me->me_list);
- lnet_me_free(me);
- }
- }
- /* the extra entry is for MEs with ignore bits */
- LIBCFS_FREE(mhash, sizeof(*mhash) * (LNET_MT_HASH_SIZE + 1));
- }
-
- cfs_percpt_free(ptl->ptl_mtables);
- ptl->ptl_mtables = NULL;
-}
-
-static int
-lnet_ptl_setup(struct lnet_portal *ptl, int index)
-{
- struct lnet_match_table *mtable;
- struct list_head *mhash;
- int i;
- int j;
-
- ptl->ptl_mtables = cfs_percpt_alloc(lnet_cpt_table(),
- sizeof(struct lnet_match_table));
- if (ptl->ptl_mtables == NULL) {
- CERROR("Failed to create match table for portal %d\n", index);
- return -ENOMEM;
- }
-
- ptl->ptl_index = index;
- INIT_LIST_HEAD(&ptl->ptl_msg_delayed);
- INIT_LIST_HEAD(&ptl->ptl_msg_stealing);
- spin_lock_init(&ptl->ptl_lock);
- cfs_percpt_for_each(mtable, i, ptl->ptl_mtables) {
- /* the extra entry is for MEs with ignore bits */
- LIBCFS_CPT_ALLOC(mhash, lnet_cpt_table(), i,
- sizeof(*mhash) * (LNET_MT_HASH_SIZE + 1));
- if (mhash == NULL) {
- CERROR("Failed to create match hash for portal %d\n",
- index);
- goto failed;
- }
-
- memset(&mtable->mt_exhausted[0], -1,
- sizeof(mtable->mt_exhausted[0]) *
- LNET_MT_EXHAUSTED_BMAP);
- mtable->mt_mhash = mhash;
- for (j = 0; j < LNET_MT_HASH_SIZE + 1; j++)
- INIT_LIST_HEAD(&mhash[j]);
-
- mtable->mt_portal = index;
- mtable->mt_cpt = i;
- }
-
- return 0;
- failed:
- lnet_ptl_cleanup(ptl);
- return -ENOMEM;
-}
-
-void
-lnet_portals_destroy(void)
-{
- int i;
-
- if (the_lnet.ln_portals == NULL)
- return;
-
- for (i = 0; i < the_lnet.ln_nportals; i++)
- lnet_ptl_cleanup(the_lnet.ln_portals[i]);
-
- cfs_array_free(the_lnet.ln_portals);
- the_lnet.ln_portals = NULL;
-}
-
-int
-lnet_portals_create(void)
-{
- int size;
- int i;
-
- size = offsetof(struct lnet_portal, ptl_mt_maps[LNET_CPT_NUMBER]);
-
- the_lnet.ln_nportals = MAX_PORTALS;
- the_lnet.ln_portals = cfs_array_alloc(the_lnet.ln_nportals, size);
- if (the_lnet.ln_portals == NULL) {
- CERROR("Failed to allocate portals table\n");
- return -ENOMEM;
- }
-
- for (i = 0; i < the_lnet.ln_nportals; i++) {
- if (lnet_ptl_setup(the_lnet.ln_portals[i], i)) {
- lnet_portals_destroy();
- return -ENOMEM;
- }
- }
-
- return 0;
-}
-
-/**
- * Turn on the lazy portal attribute. Use with caution!
- *
- * This portal attribute only affects incoming PUT requests to the portal,
- * and is off by default. By default, if there's no matching MD for an
- * incoming PUT request, it is simply dropped. With the lazy attribute on,
- * such requests are queued indefinitely until either a matching MD is
- * posted to the portal or the lazy attribute is turned off.
- *
- * It would prevent dropped requests, however it should be regarded as the
- * last line of defense - i.e. users must keep a close watch on active
- * buffers on a lazy portal and once it becomes too low post more buffers as
- * soon as possible. This is because delayed requests usually have detrimental
- * effects on underlying network connections. A few delayed requests often
- * suffice to bring an underlying connection to a complete halt, due to flow
- * control mechanisms.
- *
- * There's also a DOS attack risk. If users don't post match-all MDs on a
- * lazy portal, a malicious peer can easily stop a service by sending some
- * PUT requests with match bits that won't match any MD. A routed server is
- * especially vulnerable since the connections to its neighbor routers are
- * shared among all clients.
- *
- * \param portal Index of the portal to enable the lazy attribute on.
- *
- * \retval 0 On success.
- * \retval -EINVAL If \a portal is not a valid index.
- */
-int
-LNetSetLazyPortal(int portal)
-{
- struct lnet_portal *ptl;
-
- if (portal < 0 || portal >= the_lnet.ln_nportals)
- return -EINVAL;
-
- CDEBUG(D_NET, "Setting portal %d lazy\n", portal);
- ptl = the_lnet.ln_portals[portal];
-
- lnet_res_lock(LNET_LOCK_EX);
- lnet_ptl_lock(ptl);
-
- lnet_ptl_setopt(ptl, LNET_PTL_LAZY);
-
- lnet_ptl_unlock(ptl);
- lnet_res_unlock(LNET_LOCK_EX);
-
- return 0;
-}
-EXPORT_SYMBOL(LNetSetLazyPortal);
-
-/**
- * Turn off the lazy portal attribute. Delayed requests on the portal,
- * if any, will be all dropped when this function returns.
- *
- * \param portal Index of the portal to disable the lazy attribute on.
- *
- * \retval 0 On success.
- * \retval -EINVAL If \a portal is not a valid index.
- */
-int
-LNetClearLazyPortal(int portal)
-{
- struct lnet_portal *ptl;
- LIST_HEAD(zombies);
-
- if (portal < 0 || portal >= the_lnet.ln_nportals)
- return -EINVAL;
-
- ptl = the_lnet.ln_portals[portal];
-
- lnet_res_lock(LNET_LOCK_EX);
- lnet_ptl_lock(ptl);
-
- if (!lnet_ptl_is_lazy(ptl)) {
- lnet_ptl_unlock(ptl);
- lnet_res_unlock(LNET_LOCK_EX);
- return 0;
- }
-
- if (the_lnet.ln_shutdown)
- CWARN("Active lazy portal %d on exit\n", portal);
- else
- CDEBUG(D_NET, "clearing portal %d lazy\n", portal);
-
- /* grab all the blocked messages atomically */
- list_splice_init(&ptl->ptl_msg_delayed, &zombies);
-
- lnet_ptl_unsetopt(ptl, LNET_PTL_LAZY);
-
- lnet_ptl_unlock(ptl);
- lnet_res_unlock(LNET_LOCK_EX);
-
- lnet_drop_delayed_msg_list(&zombies, "Clearing lazy portal attr");
-
- return 0;
-}
-EXPORT_SYMBOL(LNetClearLazyPortal);
diff --git a/drivers/staging/lustre/lnet/lnet/lib-socket.c b/drivers/staging/lustre/lnet/lnet/lib-socket.c
deleted file mode 100644
index 6f7ef4c737cd..000000000000
--- a/drivers/staging/lustre/lnet/lnet/lib-socket.c
+++ /dev/null
@@ -1,594 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2012, 2015 Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Seagate, Inc.
- */
-#define DEBUG_SUBSYSTEM S_LNET
-
-#include <linux/if.h>
-#include <linux/in.h>
-#include <linux/net.h>
-#include <linux/file.h>
-#include <linux/pagemap.h>
-/* For sys_open & sys_close */
-#include <linux/syscalls.h>
-#include <net/sock.h>
-
-#include "../../include/linux/libcfs/libcfs.h"
-#include "../../include/linux/lnet/lib-lnet.h"
-
-static int
-kernel_sock_unlocked_ioctl(struct file *filp, int cmd, unsigned long arg)
-{
- mm_segment_t oldfs = get_fs();
- int err;
-
- set_fs(KERNEL_DS);
- err = filp->f_op->unlocked_ioctl(filp, cmd, arg);
- set_fs(oldfs);
-
- return err;
-}
-
-static int
-lnet_sock_ioctl(int cmd, unsigned long arg)
-{
- struct file *sock_filp;
- struct socket *sock;
- int rc;
-
- rc = sock_create(PF_INET, SOCK_STREAM, 0, &sock);
- if (rc != 0) {
- CERROR("Can't create socket: %d\n", rc);
- return rc;
- }
-
- sock_filp = sock_alloc_file(sock, 0, NULL);
- if (IS_ERR(sock_filp)) {
- sock_release(sock);
- rc = PTR_ERR(sock_filp);
- goto out;
- }
-
- rc = kernel_sock_unlocked_ioctl(sock_filp, cmd, arg);
-
- fput(sock_filp);
-out:
- return rc;
-}
-
-int
-lnet_ipif_query(char *name, int *up, __u32 *ip, __u32 *mask)
-{
- struct ifreq ifr;
- int nob;
- int rc;
- __u32 val;
-
- nob = strnlen(name, IFNAMSIZ);
- if (nob == IFNAMSIZ) {
- CERROR("Interface name %s too long\n", name);
- return -EINVAL;
- }
-
- CLASSERT(sizeof(ifr.ifr_name) >= IFNAMSIZ);
-
- strcpy(ifr.ifr_name, name);
- rc = lnet_sock_ioctl(SIOCGIFFLAGS, (unsigned long)&ifr);
- if (rc != 0) {
- CERROR("Can't get flags for interface %s\n", name);
- return rc;
- }
-
- if ((ifr.ifr_flags & IFF_UP) == 0) {
- CDEBUG(D_NET, "Interface %s down\n", name);
- *up = 0;
- *ip = *mask = 0;
- return 0;
- }
- *up = 1;
-
- strcpy(ifr.ifr_name, name);
- ifr.ifr_addr.sa_family = AF_INET;
- rc = lnet_sock_ioctl(SIOCGIFADDR, (unsigned long)&ifr);
- if (rc != 0) {
- CERROR("Can't get IP address for interface %s\n", name);
- return rc;
- }
-
- val = ((struct sockaddr_in *)&ifr.ifr_addr)->sin_addr.s_addr;
- *ip = ntohl(val);
-
- strcpy(ifr.ifr_name, name);
- ifr.ifr_addr.sa_family = AF_INET;
- rc = lnet_sock_ioctl(SIOCGIFNETMASK, (unsigned long)&ifr);
- if (rc != 0) {
- CERROR("Can't get netmask for interface %s\n", name);
- return rc;
- }
-
- val = ((struct sockaddr_in *)&ifr.ifr_netmask)->sin_addr.s_addr;
- *mask = ntohl(val);
-
- return 0;
-}
-EXPORT_SYMBOL(lnet_ipif_query);
-
-int
-lnet_ipif_enumerate(char ***namesp)
-{
- /* Allocate and fill in 'names', returning # interfaces/error */
- char **names;
- int toobig;
- int nalloc;
- int nfound;
- struct ifreq *ifr;
- struct ifconf ifc;
- int rc;
- int nob;
- int i;
-
- nalloc = 16; /* first guess at max interfaces */
- toobig = 0;
- for (;;) {
- if (nalloc * sizeof(*ifr) > PAGE_CACHE_SIZE) {
- toobig = 1;
- nalloc = PAGE_CACHE_SIZE/sizeof(*ifr);
- CWARN("Too many interfaces: only enumerating first %d\n",
- nalloc);
- }
-
- LIBCFS_ALLOC(ifr, nalloc * sizeof(*ifr));
- if (ifr == NULL) {
- CERROR("ENOMEM enumerating up to %d interfaces\n",
- nalloc);
- rc = -ENOMEM;
- goto out0;
- }
-
- ifc.ifc_buf = (char *)ifr;
- ifc.ifc_len = nalloc * sizeof(*ifr);
-
- rc = lnet_sock_ioctl(SIOCGIFCONF, (unsigned long)&ifc);
- if (rc < 0) {
- CERROR("Error %d enumerating interfaces\n", rc);
- goto out1;
- }
-
- LASSERT(rc == 0);
-
- nfound = ifc.ifc_len/sizeof(*ifr);
- LASSERT(nfound <= nalloc);
-
- if (nfound < nalloc || toobig)
- break;
-
- LIBCFS_FREE(ifr, nalloc * sizeof(*ifr));
- nalloc *= 2;
- }
-
- if (nfound == 0)
- goto out1;
-
- LIBCFS_ALLOC(names, nfound * sizeof(*names));
- if (names == NULL) {
- rc = -ENOMEM;
- goto out1;
- }
-
- for (i = 0; i < nfound; i++) {
- nob = strnlen(ifr[i].ifr_name, IFNAMSIZ);
- if (nob == IFNAMSIZ) {
- /* no space for terminating NULL */
- CERROR("interface name %.*s too long (%d max)\n",
- nob, ifr[i].ifr_name, IFNAMSIZ);
- rc = -ENAMETOOLONG;
- goto out2;
- }
-
- LIBCFS_ALLOC(names[i], IFNAMSIZ);
- if (names[i] == NULL) {
- rc = -ENOMEM;
- goto out2;
- }
-
- memcpy(names[i], ifr[i].ifr_name, nob);
- names[i][nob] = 0;
- }
-
- *namesp = names;
- rc = nfound;
-
-out2:
- if (rc < 0)
- lnet_ipif_free_enumeration(names, nfound);
-out1:
- LIBCFS_FREE(ifr, nalloc * sizeof(*ifr));
-out0:
- return rc;
-}
-EXPORT_SYMBOL(lnet_ipif_enumerate);
-
-void
-lnet_ipif_free_enumeration(char **names, int n)
-{
- int i;
-
- LASSERT(n > 0);
-
- for (i = 0; i < n && names[i] != NULL; i++)
- LIBCFS_FREE(names[i], IFNAMSIZ);
-
- LIBCFS_FREE(names, n * sizeof(*names));
-}
-EXPORT_SYMBOL(lnet_ipif_free_enumeration);
-
-int
-lnet_sock_write(struct socket *sock, void *buffer, int nob, int timeout)
-{
- int rc;
- long ticks = timeout * HZ;
- unsigned long then;
- struct timeval tv;
-
- LASSERT(nob > 0);
- /* Caller may pass a zero timeout if she thinks the socket buffer is
- * empty enough to take the whole message immediately */
-
- for (;;) {
- struct kvec iov = {
- .iov_base = buffer,
- .iov_len = nob
- };
- struct msghdr msg = {
- .msg_flags = (timeout == 0) ? MSG_DONTWAIT : 0
- };
-
- if (timeout != 0) {
- /* Set send timeout to remaining time */
- tv = (struct timeval) {
- .tv_sec = ticks / HZ,
- .tv_usec = ((ticks % HZ) * 1000000) / HZ
- };
- rc = kernel_setsockopt(sock, SOL_SOCKET, SO_SNDTIMEO,
- (char *)&tv, sizeof(tv));
- if (rc != 0) {
- CERROR("Can't set socket send timeout %ld.%06d: %d\n",
- (long)tv.tv_sec, (int)tv.tv_usec, rc);
- return rc;
- }
- }
-
- then = jiffies;
- rc = kernel_sendmsg(sock, &msg, &iov, 1, nob);
- ticks -= jiffies - then;
-
- if (rc == nob)
- return 0;
-
- if (rc < 0)
- return rc;
-
- if (rc == 0) {
- CERROR("Unexpected zero rc\n");
- return -ECONNABORTED;
- }
-
- if (ticks <= 0)
- return -EAGAIN;
-
- buffer = ((char *)buffer) + rc;
- nob -= rc;
- }
- return 0;
-}
-EXPORT_SYMBOL(lnet_sock_write);
-
-int
-lnet_sock_read(struct socket *sock, void *buffer, int nob, int timeout)
-{
- int rc;
- long ticks = timeout * HZ;
- unsigned long then;
- struct timeval tv;
-
- LASSERT(nob > 0);
- LASSERT(ticks > 0);
-
- for (;;) {
- struct kvec iov = {
- .iov_base = buffer,
- .iov_len = nob
- };
- struct msghdr msg = {
- .msg_flags = 0
- };
-
- /* Set receive timeout to remaining time */
- tv = (struct timeval) {
- .tv_sec = ticks / HZ,
- .tv_usec = ((ticks % HZ) * 1000000) / HZ
- };
- rc = kernel_setsockopt(sock, SOL_SOCKET, SO_RCVTIMEO,
- (char *)&tv, sizeof(tv));
- if (rc != 0) {
- CERROR("Can't set socket recv timeout %ld.%06d: %d\n",
- (long)tv.tv_sec, (int)tv.tv_usec, rc);
- return rc;
- }
-
- then = jiffies;
- rc = kernel_recvmsg(sock, &msg, &iov, 1, nob, 0);
- ticks -= jiffies - then;
-
- if (rc < 0)
- return rc;
-
- if (rc == 0)
- return -ECONNRESET;
-
- buffer = ((char *)buffer) + rc;
- nob -= rc;
-
- if (nob == 0)
- return 0;
-
- if (ticks <= 0)
- return -ETIMEDOUT;
- }
-}
-EXPORT_SYMBOL(lnet_sock_read);
-
-static int
-lnet_sock_create(struct socket **sockp, int *fatal, __u32 local_ip,
- int local_port)
-{
- struct sockaddr_in locaddr;
- struct socket *sock;
- int rc;
- int option;
-
- /* All errors are fatal except bind failure if the port is in use */
- *fatal = 1;
-
- rc = sock_create(PF_INET, SOCK_STREAM, 0, &sock);
- *sockp = sock;
- if (rc != 0) {
- CERROR("Can't create socket: %d\n", rc);
- return rc;
- }
-
- option = 1;
- rc = kernel_setsockopt(sock, SOL_SOCKET, SO_REUSEADDR,
- (char *)&option, sizeof(option));
- if (rc != 0) {
- CERROR("Can't set SO_REUSEADDR for socket: %d\n", rc);
- goto failed;
- }
-
- if (local_ip != 0 || local_port != 0) {
- memset(&locaddr, 0, sizeof(locaddr));
- locaddr.sin_family = AF_INET;
- locaddr.sin_port = htons(local_port);
- locaddr.sin_addr.s_addr = (local_ip == 0) ?
- INADDR_ANY : htonl(local_ip);
-
- rc = kernel_bind(sock, (struct sockaddr *)&locaddr,
- sizeof(locaddr));
- if (rc == -EADDRINUSE) {
- CDEBUG(D_NET, "Port %d already in use\n", local_port);
- *fatal = 0;
- goto failed;
- }
- if (rc != 0) {
- CERROR("Error trying to bind to port %d: %d\n",
- local_port, rc);
- goto failed;
- }
- }
- return 0;
-
-failed:
- sock_release(sock);
- return rc;
-}
-
-int
-lnet_sock_setbuf(struct socket *sock, int txbufsize, int rxbufsize)
-{
- int option;
- int rc;
-
- if (txbufsize != 0) {
- option = txbufsize;
- rc = kernel_setsockopt(sock, SOL_SOCKET, SO_SNDBUF,
- (char *)&option, sizeof(option));
- if (rc != 0) {
- CERROR("Can't set send buffer %d: %d\n",
- option, rc);
- return rc;
- }
- }
-
- if (rxbufsize != 0) {
- option = rxbufsize;
- rc = kernel_setsockopt(sock, SOL_SOCKET, SO_RCVBUF,
- (char *)&option, sizeof(option));
- if (rc != 0) {
- CERROR("Can't set receive buffer %d: %d\n",
- option, rc);
- return rc;
- }
- }
- return 0;
-}
-EXPORT_SYMBOL(lnet_sock_setbuf);
-
-int
-lnet_sock_getaddr(struct socket *sock, bool remote, __u32 *ip, int *port)
-{
- struct sockaddr_in sin;
- int len = sizeof(sin);
- int rc;
-
- if (remote)
- rc = kernel_getpeername(sock, (struct sockaddr *)&sin, &len);
- else
- rc = kernel_getsockname(sock, (struct sockaddr *)&sin, &len);
- if (rc != 0) {
- CERROR("Error %d getting sock %s IP/port\n",
- rc, remote ? "peer" : "local");
- return rc;
- }
-
- if (ip != NULL)
- *ip = ntohl(sin.sin_addr.s_addr);
-
- if (port != NULL)
- *port = ntohs(sin.sin_port);
-
- return 0;
-}
-EXPORT_SYMBOL(lnet_sock_getaddr);
-
-int
-lnet_sock_getbuf(struct socket *sock, int *txbufsize, int *rxbufsize)
-{
- if (txbufsize != NULL)
- *txbufsize = sock->sk->sk_sndbuf;
-
- if (rxbufsize != NULL)
- *rxbufsize = sock->sk->sk_rcvbuf;
-
- return 0;
-}
-EXPORT_SYMBOL(lnet_sock_getbuf);
-
-int
-lnet_sock_listen(struct socket **sockp, __u32 local_ip, int local_port,
- int backlog)
-{
- int fatal;
- int rc;
-
- rc = lnet_sock_create(sockp, &fatal, local_ip, local_port);
- if (rc != 0) {
- if (!fatal)
- CERROR("Can't create socket: port %d already in use\n",
- local_port);
- return rc;
- }
-
- rc = kernel_listen(*sockp, backlog);
- if (rc == 0)
- return 0;
-
- CERROR("Can't set listen backlog %d: %d\n", backlog, rc);
- sock_release(*sockp);
- return rc;
-}
-EXPORT_SYMBOL(lnet_sock_listen);
-
-int
-lnet_sock_accept(struct socket **newsockp, struct socket *sock)
-{
- wait_queue_t wait;
- struct socket *newsock;
- int rc;
-
- init_waitqueue_entry(&wait, current);
-
- /* XXX this should add a ref to sock->ops->owner, if
- * TCP could be a module */
- rc = sock_create_lite(PF_PACKET, sock->type, IPPROTO_TCP, &newsock);
- if (rc) {
- CERROR("Can't allocate socket\n");
- return rc;
- }
-
- newsock->ops = sock->ops;
-
- rc = sock->ops->accept(sock, newsock, O_NONBLOCK);
- if (rc == -EAGAIN) {
- /* Nothing ready, so wait for activity */
- set_current_state(TASK_INTERRUPTIBLE);
- add_wait_queue(sk_sleep(sock->sk), &wait);
- schedule();
- remove_wait_queue(sk_sleep(sock->sk), &wait);
- set_current_state(TASK_RUNNING);
- rc = sock->ops->accept(sock, newsock, O_NONBLOCK);
- }
-
- if (rc != 0)
- goto failed;
-
- *newsockp = newsock;
- return 0;
-
-failed:
- sock_release(newsock);
- return rc;
-}
-EXPORT_SYMBOL(lnet_sock_accept);
-
-int
-lnet_sock_connect(struct socket **sockp, int *fatal, __u32 local_ip,
- int local_port, __u32 peer_ip, int peer_port)
-{
- struct sockaddr_in srvaddr;
- int rc;
-
- rc = lnet_sock_create(sockp, fatal, local_ip, local_port);
- if (rc != 0)
- return rc;
-
- memset(&srvaddr, 0, sizeof(srvaddr));
- srvaddr.sin_family = AF_INET;
- srvaddr.sin_port = htons(peer_port);
- srvaddr.sin_addr.s_addr = htonl(peer_ip);
-
- rc = kernel_connect(*sockp, (struct sockaddr *)&srvaddr,
- sizeof(srvaddr), 0);
- if (rc == 0)
- return 0;
-
- /* EADDRNOTAVAIL probably means we're already connected to the same
- * peer/port on the same local port on a differently typed
- * connection. Let our caller retry with a different local
- * port... */
- *fatal = !(rc == -EADDRNOTAVAIL);
-
- CDEBUG_LIMIT(*fatal ? D_NETERROR : D_NET,
- "Error %d connecting %pI4h/%d -> %pI4h/%d\n", rc,
- &local_ip, local_port, &peer_ip, peer_port);
-
- sock_release(*sockp);
- return rc;
-}
-EXPORT_SYMBOL(lnet_sock_connect);
diff --git a/drivers/staging/lustre/lnet/lnet/lo.c b/drivers/staging/lustre/lnet/lnet/lo.c
deleted file mode 100644
index 2a137f46800f..000000000000
--- a/drivers/staging/lustre/lnet/lnet/lo.c
+++ /dev/null
@@ -1,120 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- */
-
-#define DEBUG_SUBSYSTEM S_LNET
-#include "../../include/linux/lnet/lib-lnet.h"
-
-static int
-lolnd_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg)
-{
- LASSERT(!lntmsg->msg_routing);
- LASSERT(!lntmsg->msg_target_is_router);
-
- return lnet_parse(ni, &lntmsg->msg_hdr, ni->ni_nid, lntmsg, 0);
-}
-
-static int
-lolnd_recv(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg,
- int delayed, unsigned int niov,
- struct kvec *iov, lnet_kiov_t *kiov,
- unsigned int offset, unsigned int mlen, unsigned int rlen)
-{
- lnet_msg_t *sendmsg = private;
-
- if (lntmsg != NULL) { /* not discarding */
- if (sendmsg->msg_iov != NULL) {
- if (iov != NULL)
- lnet_copy_iov2iov(niov, iov, offset,
- sendmsg->msg_niov,
- sendmsg->msg_iov,
- sendmsg->msg_offset, mlen);
- else
- lnet_copy_iov2kiov(niov, kiov, offset,
- sendmsg->msg_niov,
- sendmsg->msg_iov,
- sendmsg->msg_offset, mlen);
- } else {
- if (iov != NULL)
- lnet_copy_kiov2iov(niov, iov, offset,
- sendmsg->msg_niov,
- sendmsg->msg_kiov,
- sendmsg->msg_offset, mlen);
- else
- lnet_copy_kiov2kiov(niov, kiov, offset,
- sendmsg->msg_niov,
- sendmsg->msg_kiov,
- sendmsg->msg_offset, mlen);
- }
-
- lnet_finalize(ni, lntmsg, 0);
- }
-
- lnet_finalize(ni, sendmsg, 0);
- return 0;
-}
-
-static int lolnd_instanced;
-
-static void
-lolnd_shutdown(lnet_ni_t *ni)
-{
- CDEBUG(D_NET, "shutdown\n");
- LASSERT(lolnd_instanced);
-
- lolnd_instanced = 0;
-}
-
-static int
-lolnd_startup(lnet_ni_t *ni)
-{
- LASSERT(ni->ni_lnd == &the_lolnd);
- LASSERT(!lolnd_instanced);
- lolnd_instanced = 1;
-
- return 0;
-}
-
-lnd_t the_lolnd = {
- /* .lnd_list = */ {&the_lolnd.lnd_list, &the_lolnd.lnd_list},
- /* .lnd_refcount = */ 0,
- /* .lnd_type = */ LOLND,
- /* .lnd_startup = */ lolnd_startup,
- /* .lnd_shutdown = */ lolnd_shutdown,
- /* .lnt_ctl = */ NULL,
- /* .lnd_send = */ lolnd_send,
- /* .lnd_recv = */ lolnd_recv,
- /* .lnd_eager_recv = */ NULL,
- /* .lnd_notify = */ NULL,
- /* .lnd_accept = */ NULL
-};
diff --git a/drivers/staging/lustre/lnet/lnet/module.c b/drivers/staging/lustre/lnet/lnet/module.c
deleted file mode 100644
index 576201a8390c..000000000000
--- a/drivers/staging/lustre/lnet/lnet/module.c
+++ /dev/null
@@ -1,155 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- */
-
-#define DEBUG_SUBSYSTEM S_LNET
-#include "../../include/linux/lnet/lib-lnet.h"
-
-static int config_on_load;
-module_param(config_on_load, int, 0444);
-MODULE_PARM_DESC(config_on_load, "configure network at module load");
-
-static struct mutex lnet_config_mutex;
-
-static int
-lnet_configure(void *arg)
-{
- /* 'arg' only there so I can be passed to cfs_create_thread() */
- int rc = 0;
-
- mutex_lock(&lnet_config_mutex);
-
- if (!the_lnet.ln_niinit_self) {
- rc = LNetNIInit(LUSTRE_SRV_LNET_PID);
- if (rc >= 0) {
- the_lnet.ln_niinit_self = 1;
- rc = 0;
- }
- }
-
- mutex_unlock(&lnet_config_mutex);
- return rc;
-}
-
-static int
-lnet_unconfigure(void)
-{
- int refcount;
-
- mutex_lock(&lnet_config_mutex);
-
- if (the_lnet.ln_niinit_self) {
- the_lnet.ln_niinit_self = 0;
- LNetNIFini();
- }
-
- mutex_lock(&the_lnet.ln_api_mutex);
- refcount = the_lnet.ln_refcount;
- mutex_unlock(&the_lnet.ln_api_mutex);
-
- mutex_unlock(&lnet_config_mutex);
- return (refcount == 0) ? 0 : -EBUSY;
-}
-
-static int
-lnet_ioctl(unsigned int cmd, struct libcfs_ioctl_data *data)
-{
- int rc;
-
- switch (cmd) {
- case IOC_LIBCFS_CONFIGURE:
- return lnet_configure(NULL);
-
- case IOC_LIBCFS_UNCONFIGURE:
- return lnet_unconfigure();
-
- default:
- /* Passing LNET_PID_ANY only gives me a ref if the net is up
- * already; I'll need it to ensure the net can't go down while
- * I'm called into it */
- rc = LNetNIInit(LNET_PID_ANY);
- if (rc >= 0) {
- rc = LNetCtl(cmd, data);
- LNetNIFini();
- }
- return rc;
- }
-}
-
-static DECLARE_IOCTL_HANDLER(lnet_ioctl_handler, lnet_ioctl);
-
-static int __init
-init_lnet(void)
-{
- int rc;
-
- mutex_init(&lnet_config_mutex);
-
- rc = lnet_init();
- if (rc != 0) {
- CERROR("lnet_init: error %d\n", rc);
- return rc;
- }
-
- rc = libcfs_register_ioctl(&lnet_ioctl_handler);
- LASSERT(rc == 0);
-
- if (config_on_load) {
- /* Have to schedule a separate thread to avoid deadlocking
- * in modload */
- (void) kthread_run(lnet_configure, NULL, "lnet_initd");
- }
-
- return 0;
-}
-
-static void __exit
-fini_lnet(void)
-{
- int rc;
-
- rc = libcfs_deregister_ioctl(&lnet_ioctl_handler);
- LASSERT(rc == 0);
-
- lnet_fini();
-}
-
-MODULE_AUTHOR("Peter J. Braam <braam@clusterfs.com>");
-MODULE_DESCRIPTION("LNet v3.1");
-MODULE_LICENSE("GPL");
-MODULE_VERSION("1.0.0");
-
-module_init(init_lnet);
-module_exit(fini_lnet);
diff --git a/drivers/staging/lustre/lnet/lnet/peer.c b/drivers/staging/lustre/lnet/lnet/peer.c
deleted file mode 100644
index 1fceed3c8fc0..000000000000
--- a/drivers/staging/lustre/lnet/lnet/peer.c
+++ /dev/null
@@ -1,338 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * lnet/lnet/peer.c
- */
-
-#define DEBUG_SUBSYSTEM S_LNET
-
-#include "../../include/linux/lnet/lib-lnet.h"
-
-int
-lnet_peer_tables_create(void)
-{
- struct lnet_peer_table *ptable;
- struct list_head *hash;
- int i;
- int j;
-
- the_lnet.ln_peer_tables = cfs_percpt_alloc(lnet_cpt_table(),
- sizeof(*ptable));
- if (the_lnet.ln_peer_tables == NULL) {
- CERROR("Failed to allocate cpu-partition peer tables\n");
- return -ENOMEM;
- }
-
- cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables) {
- INIT_LIST_HEAD(&ptable->pt_deathrow);
-
- LIBCFS_CPT_ALLOC(hash, lnet_cpt_table(), i,
- LNET_PEER_HASH_SIZE * sizeof(*hash));
- if (hash == NULL) {
- CERROR("Failed to create peer hash table\n");
- lnet_peer_tables_destroy();
- return -ENOMEM;
- }
-
- for (j = 0; j < LNET_PEER_HASH_SIZE; j++)
- INIT_LIST_HEAD(&hash[j]);
- ptable->pt_hash = hash; /* sign of initialization */
- }
-
- return 0;
-}
-
-void
-lnet_peer_tables_destroy(void)
-{
- struct lnet_peer_table *ptable;
- struct list_head *hash;
- int i;
- int j;
-
- if (the_lnet.ln_peer_tables == NULL)
- return;
-
- cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables) {
- hash = ptable->pt_hash;
- if (hash == NULL) /* not initialized */
- break;
-
- LASSERT(list_empty(&ptable->pt_deathrow));
-
- ptable->pt_hash = NULL;
- for (j = 0; j < LNET_PEER_HASH_SIZE; j++)
- LASSERT(list_empty(&hash[j]));
-
- LIBCFS_FREE(hash, LNET_PEER_HASH_SIZE * sizeof(*hash));
- }
-
- cfs_percpt_free(the_lnet.ln_peer_tables);
- the_lnet.ln_peer_tables = NULL;
-}
-
-void
-lnet_peer_tables_cleanup(void)
-{
- struct lnet_peer_table *ptable;
- int i;
- int j;
-
- LASSERT(the_lnet.ln_shutdown); /* i.e. no new peers */
-
- cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables) {
- lnet_net_lock(i);
-
- for (j = 0; j < LNET_PEER_HASH_SIZE; j++) {
- struct list_head *peers = &ptable->pt_hash[j];
-
- while (!list_empty(peers)) {
- lnet_peer_t *lp = list_entry(peers->next,
- lnet_peer_t,
- lp_hashlist);
- list_del_init(&lp->lp_hashlist);
- /* lose hash table's ref */
- lnet_peer_decref_locked(lp);
- }
- }
-
- lnet_net_unlock(i);
- }
-
- cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables) {
- LIST_HEAD(deathrow);
- lnet_peer_t *lp;
-
- lnet_net_lock(i);
-
- for (j = 3; ptable->pt_number != 0; j++) {
- lnet_net_unlock(i);
-
- if ((j & (j - 1)) == 0) {
- CDEBUG(D_WARNING,
- "Waiting for %d peers on peer table\n",
- ptable->pt_number);
- }
- set_current_state(TASK_UNINTERRUPTIBLE);
- schedule_timeout(cfs_time_seconds(1) / 2);
- lnet_net_lock(i);
- }
- list_splice_init(&ptable->pt_deathrow, &deathrow);
-
- lnet_net_unlock(i);
-
- while (!list_empty(&deathrow)) {
- lp = list_entry(deathrow.next,
- lnet_peer_t, lp_hashlist);
- list_del(&lp->lp_hashlist);
- LIBCFS_FREE(lp, sizeof(*lp));
- }
- }
-}
-
-void
-lnet_destroy_peer_locked(lnet_peer_t *lp)
-{
- struct lnet_peer_table *ptable;
-
- LASSERT(lp->lp_refcount == 0);
- LASSERT(lp->lp_rtr_refcount == 0);
- LASSERT(list_empty(&lp->lp_txq));
- LASSERT(list_empty(&lp->lp_hashlist));
- LASSERT(lp->lp_txqnob == 0);
-
- ptable = the_lnet.ln_peer_tables[lp->lp_cpt];
- LASSERT(ptable->pt_number > 0);
- ptable->pt_number--;
-
- lnet_ni_decref_locked(lp->lp_ni, lp->lp_cpt);
- lp->lp_ni = NULL;
-
- list_add(&lp->lp_hashlist, &ptable->pt_deathrow);
-}
-
-lnet_peer_t *
-lnet_find_peer_locked(struct lnet_peer_table *ptable, lnet_nid_t nid)
-{
- struct list_head *peers;
- lnet_peer_t *lp;
-
- LASSERT(!the_lnet.ln_shutdown);
-
- peers = &ptable->pt_hash[lnet_nid2peerhash(nid)];
- list_for_each_entry(lp, peers, lp_hashlist) {
- if (lp->lp_nid == nid) {
- lnet_peer_addref_locked(lp);
- return lp;
- }
- }
-
- return NULL;
-}
-
-int
-lnet_nid2peer_locked(lnet_peer_t **lpp, lnet_nid_t nid, int cpt)
-{
- struct lnet_peer_table *ptable;
- lnet_peer_t *lp = NULL;
- lnet_peer_t *lp2;
- int cpt2;
- int rc = 0;
-
- *lpp = NULL;
- if (the_lnet.ln_shutdown) /* it's shutting down */
- return -ESHUTDOWN;
-
- /* cpt can be LNET_LOCK_EX if it's called from router functions */
- cpt2 = cpt != LNET_LOCK_EX ? cpt : lnet_cpt_of_nid_locked(nid);
-
- ptable = the_lnet.ln_peer_tables[cpt2];
- lp = lnet_find_peer_locked(ptable, nid);
- if (lp != NULL) {
- *lpp = lp;
- return 0;
- }
-
- if (!list_empty(&ptable->pt_deathrow)) {
- lp = list_entry(ptable->pt_deathrow.next,
- lnet_peer_t, lp_hashlist);
- list_del(&lp->lp_hashlist);
- }
-
- /*
- * take extra refcount in case another thread has shutdown LNet
- * and destroyed locks and peer-table before I finish the allocation
- */
- ptable->pt_number++;
- lnet_net_unlock(cpt);
-
- if (lp != NULL)
- memset(lp, 0, sizeof(*lp));
- else
- LIBCFS_CPT_ALLOC(lp, lnet_cpt_table(), cpt2, sizeof(*lp));
-
- if (lp == NULL) {
- rc = -ENOMEM;
- lnet_net_lock(cpt);
- goto out;
- }
-
- INIT_LIST_HEAD(&lp->lp_txq);
- INIT_LIST_HEAD(&lp->lp_rtrq);
- INIT_LIST_HEAD(&lp->lp_routes);
-
- lp->lp_notify = 0;
- lp->lp_notifylnd = 0;
- lp->lp_notifying = 0;
- lp->lp_alive_count = 0;
- lp->lp_timestamp = 0;
- lp->lp_alive = !lnet_peers_start_down(); /* 1 bit!! */
- lp->lp_last_alive = cfs_time_current(); /* assumes alive */
- lp->lp_last_query = 0; /* haven't asked NI yet */
- lp->lp_ping_timestamp = 0;
- lp->lp_ping_feats = LNET_PING_FEAT_INVAL;
- lp->lp_nid = nid;
- lp->lp_cpt = cpt2;
- lp->lp_refcount = 2; /* 1 for caller; 1 for hash */
- lp->lp_rtr_refcount = 0;
-
- lnet_net_lock(cpt);
-
- if (the_lnet.ln_shutdown) {
- rc = -ESHUTDOWN;
- goto out;
- }
-
- lp2 = lnet_find_peer_locked(ptable, nid);
- if (lp2 != NULL) {
- *lpp = lp2;
- goto out;
- }
-
- lp->lp_ni = lnet_net2ni_locked(LNET_NIDNET(nid), cpt2);
- if (lp->lp_ni == NULL) {
- rc = -EHOSTUNREACH;
- goto out;
- }
-
- lp->lp_txcredits =
- lp->lp_mintxcredits = lp->lp_ni->ni_peertxcredits;
- lp->lp_rtrcredits =
- lp->lp_minrtrcredits = lnet_peer_buffer_credits(lp->lp_ni);
-
- list_add_tail(&lp->lp_hashlist,
- &ptable->pt_hash[lnet_nid2peerhash(nid)]);
- ptable->pt_version++;
- *lpp = lp;
-
- return 0;
-out:
- if (lp != NULL)
- list_add(&lp->lp_hashlist, &ptable->pt_deathrow);
- ptable->pt_number--;
- return rc;
-}
-
-void
-lnet_debug_peer(lnet_nid_t nid)
-{
- char *aliveness = "NA";
- lnet_peer_t *lp;
- int rc;
- int cpt;
-
- cpt = lnet_cpt_of_nid(nid);
- lnet_net_lock(cpt);
-
- rc = lnet_nid2peer_locked(&lp, nid, cpt);
- if (rc != 0) {
- lnet_net_unlock(cpt);
- CDEBUG(D_WARNING, "No peer %s\n", libcfs_nid2str(nid));
- return;
- }
-
- if (lnet_isrouter(lp) || lnet_peer_aliveness_enabled(lp))
- aliveness = lp->lp_alive ? "up" : "down";
-
- CDEBUG(D_WARNING, "%-24s %4d %5s %5d %5d %5d %5d %5d %ld\n",
- libcfs_nid2str(lp->lp_nid), lp->lp_refcount,
- aliveness, lp->lp_ni->ni_peertxcredits,
- lp->lp_rtrcredits, lp->lp_minrtrcredits,
- lp->lp_txcredits, lp->lp_mintxcredits, lp->lp_txqnob);
-
- lnet_peer_decref_locked(lp);
-
- lnet_net_unlock(cpt);
-}
diff --git a/drivers/staging/lustre/lnet/lnet/router.c b/drivers/staging/lustre/lnet/lnet/router.c
deleted file mode 100644
index 0357b051401f..000000000000
--- a/drivers/staging/lustre/lnet/lnet/router.c
+++ /dev/null
@@ -1,1561 +0,0 @@
-/*
- * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
- *
- * Copyright (c) 2011, 2012, Intel Corporation.
- *
- * This file is part of Portals
- * http://sourceforge.net/projects/sandiaportals/
- *
- * Portals is free software; you can redistribute it and/or
- * modify it under the terms of version 2 of the GNU General Public
- * License as published by the Free Software Foundation.
- *
- * Portals is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with Portals; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- */
-
-#define DEBUG_SUBSYSTEM S_LNET
-#include "../../include/linux/lnet/lib-lnet.h"
-
-#define LNET_NRB_TINY_MIN 512 /* min value for each CPT */
-#define LNET_NRB_TINY (LNET_NRB_TINY_MIN * 4)
-#define LNET_NRB_SMALL_MIN 4096 /* min value for each CPT */
-#define LNET_NRB_SMALL (LNET_NRB_SMALL_MIN * 4)
-#define LNET_NRB_LARGE_MIN 256 /* min value for each CPT */
-#define LNET_NRB_LARGE (LNET_NRB_LARGE_MIN * 4)
-
-static char *forwarding = "";
-module_param(forwarding, charp, 0444);
-MODULE_PARM_DESC(forwarding, "Explicitly enable/disable forwarding between networks");
-
-static int tiny_router_buffers;
-module_param(tiny_router_buffers, int, 0444);
-MODULE_PARM_DESC(tiny_router_buffers, "# of 0 payload messages to buffer in the router");
-static int small_router_buffers;
-module_param(small_router_buffers, int, 0444);
-MODULE_PARM_DESC(small_router_buffers, "# of small (1 page) messages to buffer in the router");
-static int large_router_buffers;
-module_param(large_router_buffers, int, 0444);
-MODULE_PARM_DESC(large_router_buffers, "# of large messages to buffer in the router");
-static int peer_buffer_credits;
-module_param(peer_buffer_credits, int, 0444);
-MODULE_PARM_DESC(peer_buffer_credits, "# router buffer credits per peer");
-
-static int auto_down = 1;
-module_param(auto_down, int, 0444);
-MODULE_PARM_DESC(auto_down, "Automatically mark peers down on comms error");
-
-int
-lnet_peer_buffer_credits(lnet_ni_t *ni)
-{
- /* NI option overrides LNet default */
- if (ni->ni_peerrtrcredits > 0)
- return ni->ni_peerrtrcredits;
- if (peer_buffer_credits > 0)
- return peer_buffer_credits;
-
- /* As an approximation, allow this peer the same number of router
- * buffers as it is allowed outstanding sends */
- return ni->ni_peertxcredits;
-}
-
-/* forward ref's */
-static int lnet_router_checker(void *);
-
-static int check_routers_before_use;
-module_param(check_routers_before_use, int, 0444);
-MODULE_PARM_DESC(check_routers_before_use, "Assume routers are down and ping them before use");
-
-int avoid_asym_router_failure = 1;
-module_param(avoid_asym_router_failure, int, 0644);
-MODULE_PARM_DESC(avoid_asym_router_failure, "Avoid asymmetrical router failures (0 to disable)");
-
-static int dead_router_check_interval = 60;
-module_param(dead_router_check_interval, int, 0644);
-MODULE_PARM_DESC(dead_router_check_interval, "Seconds between dead router health checks (<= 0 to disable)");
-
-static int live_router_check_interval = 60;
-module_param(live_router_check_interval, int, 0644);
-MODULE_PARM_DESC(live_router_check_interval, "Seconds between live router health checks (<= 0 to disable)");
-
-static int router_ping_timeout = 50;
-module_param(router_ping_timeout, int, 0644);
-MODULE_PARM_DESC(router_ping_timeout, "Seconds to wait for the reply to a router health query");
-
-int
-lnet_peers_start_down(void)
-{
- return check_routers_before_use;
-}
-
-void
-lnet_notify_locked(lnet_peer_t *lp, int notifylnd, int alive,
- unsigned long when)
-{
- if (time_before(when, lp->lp_timestamp)) { /* out of date information */
- CDEBUG(D_NET, "Out of date\n");
- return;
- }
-
- lp->lp_timestamp = when; /* update timestamp */
- lp->lp_ping_deadline = 0; /* disable ping timeout */
-
- if (lp->lp_alive_count != 0 && /* got old news */
- (!lp->lp_alive) == (!alive)) { /* new date for old news */
- CDEBUG(D_NET, "Old news\n");
- return;
- }
-
- /* Flag that notification is outstanding */
-
- lp->lp_alive_count++;
- lp->lp_alive = !(!alive); /* 1 bit! */
- lp->lp_notify = 1;
- lp->lp_notifylnd |= notifylnd;
- if (lp->lp_alive)
- lp->lp_ping_feats = LNET_PING_FEAT_INVAL; /* reset */
-
- CDEBUG(D_NET, "set %s %d\n", libcfs_nid2str(lp->lp_nid), alive);
-}
-
-static void
-lnet_ni_notify_locked(lnet_ni_t *ni, lnet_peer_t *lp)
-{
- int alive;
- int notifylnd;
-
- /* Notify only in 1 thread at any time to ensure ordered notification.
- * NB individual events can be missed; the only guarantee is that you
- * always get the most recent news */
-
- if (lp->lp_notifying || ni == NULL)
- return;
-
- lp->lp_notifying = 1;
-
- while (lp->lp_notify) {
- alive = lp->lp_alive;
- notifylnd = lp->lp_notifylnd;
-
- lp->lp_notifylnd = 0;
- lp->lp_notify = 0;
-
- if (notifylnd && ni->ni_lnd->lnd_notify != NULL) {
- lnet_net_unlock(lp->lp_cpt);
-
- /* A new notification could happen now; I'll handle it
- * when control returns to me */
-
- (ni->ni_lnd->lnd_notify)(ni, lp->lp_nid, alive);
-
- lnet_net_lock(lp->lp_cpt);
- }
- }
-
- lp->lp_notifying = 0;
-}
-
-
-static void
-lnet_rtr_addref_locked(lnet_peer_t *lp)
-{
- LASSERT(lp->lp_refcount > 0);
- LASSERT(lp->lp_rtr_refcount >= 0);
-
- /* lnet_net_lock must be exclusively locked */
- lp->lp_rtr_refcount++;
- if (lp->lp_rtr_refcount == 1) {
- struct list_head *pos;
-
- /* a simple insertion sort */
- list_for_each_prev(pos, &the_lnet.ln_routers) {
- lnet_peer_t *rtr = list_entry(pos, lnet_peer_t,
- lp_rtr_list);
-
- if (rtr->lp_nid < lp->lp_nid)
- break;
- }
-
- list_add(&lp->lp_rtr_list, pos);
- /* addref for the_lnet.ln_routers */
- lnet_peer_addref_locked(lp);
- the_lnet.ln_routers_version++;
- }
-}
-
-static void
-lnet_rtr_decref_locked(lnet_peer_t *lp)
-{
- LASSERT(lp->lp_refcount > 0);
- LASSERT(lp->lp_rtr_refcount > 0);
-
- /* lnet_net_lock must be exclusively locked */
- lp->lp_rtr_refcount--;
- if (lp->lp_rtr_refcount == 0) {
- LASSERT(list_empty(&lp->lp_routes));
-
- if (lp->lp_rcd != NULL) {
- list_add(&lp->lp_rcd->rcd_list,
- &the_lnet.ln_rcd_deathrow);
- lp->lp_rcd = NULL;
- }
-
- list_del(&lp->lp_rtr_list);
- /* decref for the_lnet.ln_routers */
- lnet_peer_decref_locked(lp);
- the_lnet.ln_routers_version++;
- }
-}
-
-lnet_remotenet_t *
-lnet_find_net_locked(__u32 net)
-{
- lnet_remotenet_t *rnet;
- struct list_head *tmp;
- struct list_head *rn_list;
-
- LASSERT(!the_lnet.ln_shutdown);
-
- rn_list = lnet_net2rnethash(net);
- list_for_each(tmp, rn_list) {
- rnet = list_entry(tmp, lnet_remotenet_t, lrn_list);
-
- if (rnet->lrn_net == net)
- return rnet;
- }
- return NULL;
-}
-
-static void lnet_shuffle_seed(void)
-{
- static int seeded;
- int lnd_type, seed[2];
- struct timespec64 ts;
- lnet_ni_t *ni;
- struct list_head *tmp;
-
- if (seeded)
- return;
-
- cfs_get_random_bytes(seed, sizeof(seed));
-
- /* Nodes with small feet have little entropy
- * the NID for this node gives the most entropy in the low bits */
- list_for_each(tmp, &the_lnet.ln_nis) {
- ni = list_entry(tmp, lnet_ni_t, ni_list);
- lnd_type = LNET_NETTYP(LNET_NIDNET(ni->ni_nid));
-
- if (lnd_type != LOLND)
- seed[0] ^= (LNET_NIDADDR(ni->ni_nid) | lnd_type);
- }
-
- ktime_get_ts64(&ts);
- cfs_srand(ts.tv_sec ^ seed[0], ts.tv_nsec ^ seed[1]);
- seeded = 1;
-}
-
-/* NB expects LNET_LOCK held */
-static void
-lnet_add_route_to_rnet(lnet_remotenet_t *rnet, lnet_route_t *route)
-{
- unsigned int len = 0;
- unsigned int offset = 0;
- struct list_head *e;
-
- lnet_shuffle_seed();
-
- list_for_each(e, &rnet->lrn_routes) {
- len++;
- }
-
- /* len+1 positions to add a new entry, also prevents division by 0 */
- offset = cfs_rand() % (len + 1);
- list_for_each(e, &rnet->lrn_routes) {
- if (offset == 0)
- break;
- offset--;
- }
- list_add(&route->lr_list, e);
- list_add(&route->lr_gwlist, &route->lr_gateway->lp_routes);
-
- the_lnet.ln_remote_nets_version++;
- lnet_rtr_addref_locked(route->lr_gateway);
-}
-
-int
-lnet_add_route(__u32 net, unsigned int hops, lnet_nid_t gateway,
- unsigned int priority)
-{
- struct list_head *e;
- lnet_remotenet_t *rnet;
- lnet_remotenet_t *rnet2;
- lnet_route_t *route;
- lnet_ni_t *ni;
- int add_route;
- int rc;
-
- CDEBUG(D_NET, "Add route: net %s hops %u priority %u gw %s\n",
- libcfs_net2str(net), hops, priority, libcfs_nid2str(gateway));
-
- if (gateway == LNET_NID_ANY ||
- LNET_NETTYP(LNET_NIDNET(gateway)) == LOLND ||
- net == LNET_NIDNET(LNET_NID_ANY) ||
- LNET_NETTYP(net) == LOLND ||
- LNET_NIDNET(gateway) == net ||
- hops < 1 || hops > 255)
- return -EINVAL;
-
- if (lnet_islocalnet(net)) /* it's a local network */
- return 0; /* ignore the route entry */
-
- /* Assume net, route, all new */
- LIBCFS_ALLOC(route, sizeof(*route));
- LIBCFS_ALLOC(rnet, sizeof(*rnet));
- if (route == NULL || rnet == NULL) {
- CERROR("Out of memory creating route %s %d %s\n",
- libcfs_net2str(net), hops, libcfs_nid2str(gateway));
- if (route != NULL)
- LIBCFS_FREE(route, sizeof(*route));
- if (rnet != NULL)
- LIBCFS_FREE(rnet, sizeof(*rnet));
- return -ENOMEM;
- }
-
- INIT_LIST_HEAD(&rnet->lrn_routes);
- rnet->lrn_net = net;
- route->lr_hops = hops;
- route->lr_net = net;
- route->lr_priority = priority;
-
- lnet_net_lock(LNET_LOCK_EX);
-
- rc = lnet_nid2peer_locked(&route->lr_gateway, gateway, LNET_LOCK_EX);
- if (rc != 0) {
- lnet_net_unlock(LNET_LOCK_EX);
-
- LIBCFS_FREE(route, sizeof(*route));
- LIBCFS_FREE(rnet, sizeof(*rnet));
-
- if (rc == -EHOSTUNREACH) /* gateway is not on a local net */
- return 0; /* ignore the route entry */
- CERROR("Error %d creating route %s %d %s\n", rc,
- libcfs_net2str(net), hops,
- libcfs_nid2str(gateway));
-
- return rc;
- }
-
- LASSERT(!the_lnet.ln_shutdown);
-
- rnet2 = lnet_find_net_locked(net);
- if (rnet2 == NULL) {
- /* new network */
- list_add_tail(&rnet->lrn_list, lnet_net2rnethash(net));
- rnet2 = rnet;
- }
-
- /* Search for a duplicate route (it's a NOOP if it is) */
- add_route = 1;
- list_for_each(e, &rnet2->lrn_routes) {
- lnet_route_t *route2 = list_entry(e, lnet_route_t, lr_list);
-
- if (route2->lr_gateway == route->lr_gateway) {
- add_route = 0;
- break;
- }
-
- /* our lookups must be true */
- LASSERT(route2->lr_gateway->lp_nid != gateway);
- }
-
- if (add_route) {
- lnet_peer_addref_locked(route->lr_gateway); /* +1 for notify */
- lnet_add_route_to_rnet(rnet2, route);
-
- ni = route->lr_gateway->lp_ni;
- lnet_net_unlock(LNET_LOCK_EX);
-
- /* XXX Assume alive */
- if (ni->ni_lnd->lnd_notify != NULL)
- (ni->ni_lnd->lnd_notify)(ni, gateway, 1);
-
- lnet_net_lock(LNET_LOCK_EX);
- }
-
- /* -1 for notify or !add_route */
- lnet_peer_decref_locked(route->lr_gateway);
- lnet_net_unlock(LNET_LOCK_EX);
-
- if (!add_route)
- LIBCFS_FREE(route, sizeof(*route));
-
- if (rnet != rnet2)
- LIBCFS_FREE(rnet, sizeof(*rnet));
-
- return 0;
-}
-
-int
-lnet_check_routes(void)
-{
- lnet_remotenet_t *rnet;
- lnet_route_t *route;
- lnet_route_t *route2;
- struct list_head *e1;
- struct list_head *e2;
- int cpt;
- struct list_head *rn_list;
- int i;
-
- cpt = lnet_net_lock_current();
-
- for (i = 0; i < LNET_REMOTE_NETS_HASH_SIZE; i++) {
- rn_list = &the_lnet.ln_remote_nets_hash[i];
- list_for_each(e1, rn_list) {
- rnet = list_entry(e1, lnet_remotenet_t, lrn_list);
-
- route2 = NULL;
- list_for_each(e2, &rnet->lrn_routes) {
- lnet_nid_t nid1;
- lnet_nid_t nid2;
- int net;
-
- route = list_entry(e2, lnet_route_t,
- lr_list);
-
- if (route2 == NULL) {
- route2 = route;
- continue;
- }
-
- if (route->lr_gateway->lp_ni ==
- route2->lr_gateway->lp_ni)
- continue;
-
- nid1 = route->lr_gateway->lp_nid;
- nid2 = route2->lr_gateway->lp_nid;
- net = rnet->lrn_net;
-
- lnet_net_unlock(cpt);
-
- CERROR("Routes to %s via %s and %s not supported\n",
- libcfs_net2str(net),
- libcfs_nid2str(nid1),
- libcfs_nid2str(nid2));
- return -EINVAL;
- }
- }
- }
-
- lnet_net_unlock(cpt);
- return 0;
-}
-
-int
-lnet_del_route(__u32 net, lnet_nid_t gw_nid)
-{
- struct lnet_peer *gateway;
- lnet_remotenet_t *rnet;
- lnet_route_t *route;
- struct list_head *e1;
- struct list_head *e2;
- int rc = -ENOENT;
- struct list_head *rn_list;
- int idx = 0;
-
- CDEBUG(D_NET, "Del route: net %s : gw %s\n",
- libcfs_net2str(net), libcfs_nid2str(gw_nid));
-
- /* NB Caller may specify either all routes via the given gateway
- * or a specific route entry actual NIDs) */
-
- lnet_net_lock(LNET_LOCK_EX);
- if (net == LNET_NIDNET(LNET_NID_ANY))
- rn_list = &the_lnet.ln_remote_nets_hash[0];
- else
- rn_list = lnet_net2rnethash(net);
-
- again:
- list_for_each(e1, rn_list) {
- rnet = list_entry(e1, lnet_remotenet_t, lrn_list);
-
- if (!(net == LNET_NIDNET(LNET_NID_ANY) ||
- net == rnet->lrn_net))
- continue;
-
- list_for_each(e2, &rnet->lrn_routes) {
- route = list_entry(e2, lnet_route_t, lr_list);
-
- gateway = route->lr_gateway;
- if (!(gw_nid == LNET_NID_ANY ||
- gw_nid == gateway->lp_nid))
- continue;
-
- list_del(&route->lr_list);
- list_del(&route->lr_gwlist);
- the_lnet.ln_remote_nets_version++;
-
- if (list_empty(&rnet->lrn_routes))
- list_del(&rnet->lrn_list);
- else
- rnet = NULL;
-
- lnet_rtr_decref_locked(gateway);
- lnet_peer_decref_locked(gateway);
-
- lnet_net_unlock(LNET_LOCK_EX);
-
- LIBCFS_FREE(route, sizeof(*route));
-
- if (rnet != NULL)
- LIBCFS_FREE(rnet, sizeof(*rnet));
-
- rc = 0;
- lnet_net_lock(LNET_LOCK_EX);
- goto again;
- }
- }
-
- if (net == LNET_NIDNET(LNET_NID_ANY) &&
- ++idx < LNET_REMOTE_NETS_HASH_SIZE) {
- rn_list = &the_lnet.ln_remote_nets_hash[idx];
- goto again;
- }
- lnet_net_unlock(LNET_LOCK_EX);
-
- return rc;
-}
-
-void
-lnet_destroy_routes(void)
-{
- lnet_del_route(LNET_NIDNET(LNET_NID_ANY), LNET_NID_ANY);
-}
-
-int
-lnet_get_route(int idx, __u32 *net, __u32 *hops,
- lnet_nid_t *gateway, __u32 *alive, __u32 *priority)
-{
- struct list_head *e1;
- struct list_head *e2;
- lnet_remotenet_t *rnet;
- lnet_route_t *route;
- int cpt;
- int i;
- struct list_head *rn_list;
-
- cpt = lnet_net_lock_current();
-
- for (i = 0; i < LNET_REMOTE_NETS_HASH_SIZE; i++) {
- rn_list = &the_lnet.ln_remote_nets_hash[i];
- list_for_each(e1, rn_list) {
- rnet = list_entry(e1, lnet_remotenet_t, lrn_list);
-
- list_for_each(e2, &rnet->lrn_routes) {
- route = list_entry(e2, lnet_route_t,
- lr_list);
-
- if (idx-- == 0) {
- *net = rnet->lrn_net;
- *hops = route->lr_hops;
- *priority = route->lr_priority;
- *gateway = route->lr_gateway->lp_nid;
- *alive = route->lr_gateway->lp_alive;
- lnet_net_unlock(cpt);
- return 0;
- }
- }
- }
- }
-
- lnet_net_unlock(cpt);
- return -ENOENT;
-}
-
-void
-lnet_swap_pinginfo(lnet_ping_info_t *info)
-{
- int i;
- lnet_ni_status_t *stat;
-
- __swab32s(&info->pi_magic);
- __swab32s(&info->pi_features);
- __swab32s(&info->pi_pid);
- __swab32s(&info->pi_nnis);
- for (i = 0; i < info->pi_nnis && i < LNET_MAX_RTR_NIS; i++) {
- stat = &info->pi_ni[i];
- __swab64s(&stat->ns_nid);
- __swab32s(&stat->ns_status);
- }
-}
-
-/**
- * parse router-checker pinginfo, record number of down NIs for remote
- * networks on that router.
- */
-static void
-lnet_parse_rc_info(lnet_rc_data_t *rcd)
-{
- lnet_ping_info_t *info = rcd->rcd_pinginfo;
- struct lnet_peer *gw = rcd->rcd_gateway;
- lnet_route_t *rtr;
-
- if (!gw->lp_alive)
- return;
-
- if (info->pi_magic == __swab32(LNET_PROTO_PING_MAGIC))
- lnet_swap_pinginfo(info);
-
- /* NB always racing with network! */
- if (info->pi_magic != LNET_PROTO_PING_MAGIC) {
- CDEBUG(D_NET, "%s: Unexpected magic %08x\n",
- libcfs_nid2str(gw->lp_nid), info->pi_magic);
- gw->lp_ping_feats = LNET_PING_FEAT_INVAL;
- return;
- }
-
- gw->lp_ping_feats = info->pi_features;
- if ((gw->lp_ping_feats & LNET_PING_FEAT_MASK) == 0) {
- CDEBUG(D_NET, "%s: Unexpected features 0x%x\n",
- libcfs_nid2str(gw->lp_nid), gw->lp_ping_feats);
- return; /* nothing I can understand */
- }
-
- if ((gw->lp_ping_feats & LNET_PING_FEAT_NI_STATUS) == 0)
- return; /* can't carry NI status info */
-
- list_for_each_entry(rtr, &gw->lp_routes, lr_gwlist) {
- int ptl_status = LNET_NI_STATUS_INVALID;
- int down = 0;
- int up = 0;
- int i;
-
- for (i = 0; i < info->pi_nnis && i < LNET_MAX_RTR_NIS; i++) {
- lnet_ni_status_t *stat = &info->pi_ni[i];
- lnet_nid_t nid = stat->ns_nid;
-
- if (nid == LNET_NID_ANY) {
- CDEBUG(D_NET, "%s: unexpected LNET_NID_ANY\n",
- libcfs_nid2str(gw->lp_nid));
- gw->lp_ping_feats = LNET_PING_FEAT_INVAL;
- return;
- }
-
- if (LNET_NETTYP(LNET_NIDNET(nid)) == LOLND)
- continue;
-
- if (stat->ns_status == LNET_NI_STATUS_DOWN) {
- if (LNET_NETTYP(LNET_NIDNET(nid)) != PTLLND)
- down++;
- else if (ptl_status != LNET_NI_STATUS_UP)
- ptl_status = LNET_NI_STATUS_DOWN;
- continue;
- }
-
- if (stat->ns_status == LNET_NI_STATUS_UP) {
- if (LNET_NIDNET(nid) == rtr->lr_net) {
- up = 1;
- break;
- }
- /* ptl NIs are considered down only when
- * they're all down */
- if (LNET_NETTYP(LNET_NIDNET(nid)) == PTLLND)
- ptl_status = LNET_NI_STATUS_UP;
- continue;
- }
-
- CDEBUG(D_NET, "%s: Unexpected status 0x%x\n",
- libcfs_nid2str(gw->lp_nid), stat->ns_status);
- gw->lp_ping_feats = LNET_PING_FEAT_INVAL;
- return;
- }
-
- if (up) { /* ignore downed NIs if NI for dest network is up */
- rtr->lr_downis = 0;
- continue;
- }
- rtr->lr_downis = down + (ptl_status == LNET_NI_STATUS_DOWN);
- }
-}
-
-static void
-lnet_router_checker_event(lnet_event_t *event)
-{
- lnet_rc_data_t *rcd = event->md.user_ptr;
- struct lnet_peer *lp;
-
- LASSERT(rcd != NULL);
-
- if (event->unlinked) {
- LNetInvalidateHandle(&rcd->rcd_mdh);
- return;
- }
-
- LASSERT(event->type == LNET_EVENT_SEND ||
- event->type == LNET_EVENT_REPLY);
-
- lp = rcd->rcd_gateway;
- LASSERT(lp != NULL);
-
- /* NB: it's called with holding lnet_res_lock, we have a few
- * places need to hold both locks at the same time, please take
- * care of lock ordering */
- lnet_net_lock(lp->lp_cpt);
- if (!lnet_isrouter(lp) || lp->lp_rcd != rcd) {
- /* ignore if no longer a router or rcd is replaced */
- goto out;
- }
-
- if (event->type == LNET_EVENT_SEND) {
- lp->lp_ping_notsent = 0;
- if (event->status == 0)
- goto out;
- }
-
- /* LNET_EVENT_REPLY */
- /* A successful REPLY means the router is up. If _any_ comms
- * to the router fail I assume it's down (this will happen if
- * we ping alive routers to try to detect router death before
- * apps get burned). */
-
- lnet_notify_locked(lp, 1, (event->status == 0), cfs_time_current());
- /* The router checker will wake up very shortly and do the
- * actual notification.
- * XXX If 'lp' stops being a router before then, it will still
- * have the notification pending!!! */
-
- if (avoid_asym_router_failure && event->status == 0)
- lnet_parse_rc_info(rcd);
-
- out:
- lnet_net_unlock(lp->lp_cpt);
-}
-
-static void
-lnet_wait_known_routerstate(void)
-{
- lnet_peer_t *rtr;
- struct list_head *entry;
- int all_known;
-
- LASSERT(the_lnet.ln_rc_state == LNET_RC_STATE_RUNNING);
-
- for (;;) {
- int cpt = lnet_net_lock_current();
-
- all_known = 1;
- list_for_each(entry, &the_lnet.ln_routers) {
- rtr = list_entry(entry, lnet_peer_t, lp_rtr_list);
-
- if (rtr->lp_alive_count == 0) {
- all_known = 0;
- break;
- }
- }
-
- lnet_net_unlock(cpt);
-
- if (all_known)
- return;
-
- set_current_state(TASK_UNINTERRUPTIBLE);
- schedule_timeout(cfs_time_seconds(1));
- }
-}
-
-void
-lnet_router_ni_update_locked(lnet_peer_t *gw, __u32 net)
-{
- lnet_route_t *rte;
-
- if ((gw->lp_ping_feats & LNET_PING_FEAT_NI_STATUS) != 0) {
- list_for_each_entry(rte, &gw->lp_routes, lr_gwlist) {
- if (rte->lr_net == net) {
- rte->lr_downis = 0;
- break;
- }
- }
- }
-}
-
-static void
-lnet_update_ni_status_locked(void)
-{
- lnet_ni_t *ni;
- time64_t now;
- int timeout;
-
- LASSERT(the_lnet.ln_routing);
-
- timeout = router_ping_timeout +
- max(live_router_check_interval, dead_router_check_interval);
-
- now = ktime_get_real_seconds();
- list_for_each_entry(ni, &the_lnet.ln_nis, ni_list) {
- if (ni->ni_lnd->lnd_type == LOLND)
- continue;
-
- if (now < ni->ni_last_alive + timeout)
- continue;
-
- lnet_ni_lock(ni);
- /* re-check with lock */
- if (now < ni->ni_last_alive + timeout) {
- lnet_ni_unlock(ni);
- continue;
- }
-
- LASSERT(ni->ni_status != NULL);
-
- if (ni->ni_status->ns_status != LNET_NI_STATUS_DOWN) {
- CDEBUG(D_NET, "NI(%s:%d) status changed to down\n",
- libcfs_nid2str(ni->ni_nid), timeout);
- /* NB: so far, this is the only place to set
- * NI status to "down" */
- ni->ni_status->ns_status = LNET_NI_STATUS_DOWN;
- }
- lnet_ni_unlock(ni);
- }
-}
-
-static void
-lnet_destroy_rc_data(lnet_rc_data_t *rcd)
-{
- LASSERT(list_empty(&rcd->rcd_list));
- /* detached from network */
- LASSERT(LNetHandleIsInvalid(rcd->rcd_mdh));
-
- if (rcd->rcd_gateway != NULL) {
- int cpt = rcd->rcd_gateway->lp_cpt;
-
- lnet_net_lock(cpt);
- lnet_peer_decref_locked(rcd->rcd_gateway);
- lnet_net_unlock(cpt);
- }
-
- if (rcd->rcd_pinginfo != NULL)
- LIBCFS_FREE(rcd->rcd_pinginfo, LNET_PINGINFO_SIZE);
-
- LIBCFS_FREE(rcd, sizeof(*rcd));
-}
-
-static lnet_rc_data_t *
-lnet_create_rc_data_locked(lnet_peer_t *gateway)
-{
- lnet_rc_data_t *rcd = NULL;
- lnet_ping_info_t *pi;
- int rc;
- int i;
-
- lnet_net_unlock(gateway->lp_cpt);
-
- LIBCFS_ALLOC(rcd, sizeof(*rcd));
- if (rcd == NULL)
- goto out;
-
- LNetInvalidateHandle(&rcd->rcd_mdh);
- INIT_LIST_HEAD(&rcd->rcd_list);
-
- LIBCFS_ALLOC(pi, LNET_PINGINFO_SIZE);
- if (pi == NULL)
- goto out;
-
- for (i = 0; i < LNET_MAX_RTR_NIS; i++) {
- pi->pi_ni[i].ns_nid = LNET_NID_ANY;
- pi->pi_ni[i].ns_status = LNET_NI_STATUS_INVALID;
- }
- rcd->rcd_pinginfo = pi;
-
- LASSERT(!LNetHandleIsInvalid(the_lnet.ln_rc_eqh));
- rc = LNetMDBind((lnet_md_t){.start = pi,
- .user_ptr = rcd,
- .length = LNET_PINGINFO_SIZE,
- .threshold = LNET_MD_THRESH_INF,
- .options = LNET_MD_TRUNCATE,
- .eq_handle = the_lnet.ln_rc_eqh},
- LNET_UNLINK,
- &rcd->rcd_mdh);
- if (rc < 0) {
- CERROR("Can't bind MD: %d\n", rc);
- goto out;
- }
- LASSERT(rc == 0);
-
- lnet_net_lock(gateway->lp_cpt);
- /* router table changed or someone has created rcd for this gateway */
- if (!lnet_isrouter(gateway) || gateway->lp_rcd != NULL) {
- lnet_net_unlock(gateway->lp_cpt);
- goto out;
- }
-
- lnet_peer_addref_locked(gateway);
- rcd->rcd_gateway = gateway;
- gateway->lp_rcd = rcd;
- gateway->lp_ping_notsent = 0;
-
- return rcd;
-
- out:
- if (rcd != NULL) {
- if (!LNetHandleIsInvalid(rcd->rcd_mdh)) {
- rc = LNetMDUnlink(rcd->rcd_mdh);
- LASSERT(rc == 0);
- }
- lnet_destroy_rc_data(rcd);
- }
-
- lnet_net_lock(gateway->lp_cpt);
- return gateway->lp_rcd;
-}
-
-static int
-lnet_router_check_interval(lnet_peer_t *rtr)
-{
- int secs;
-
- secs = rtr->lp_alive ? live_router_check_interval :
- dead_router_check_interval;
- if (secs < 0)
- secs = 0;
-
- return secs;
-}
-
-static void
-lnet_ping_router_locked(lnet_peer_t *rtr)
-{
- lnet_rc_data_t *rcd = NULL;
- unsigned long now = cfs_time_current();
- int secs;
-
- lnet_peer_addref_locked(rtr);
-
- if (rtr->lp_ping_deadline != 0 && /* ping timed out? */
- cfs_time_after(now, rtr->lp_ping_deadline))
- lnet_notify_locked(rtr, 1, 0, now);
-
- /* Run any outstanding notifications */
- lnet_ni_notify_locked(rtr->lp_ni, rtr);
-
- if (!lnet_isrouter(rtr) ||
- the_lnet.ln_rc_state != LNET_RC_STATE_RUNNING) {
- /* router table changed or router checker is shutting down */
- lnet_peer_decref_locked(rtr);
- return;
- }
-
- rcd = rtr->lp_rcd != NULL ?
- rtr->lp_rcd : lnet_create_rc_data_locked(rtr);
-
- if (rcd == NULL)
- return;
-
- secs = lnet_router_check_interval(rtr);
-
- CDEBUG(D_NET,
- "rtr %s %d: deadline %lu ping_notsent %d alive %d alive_count %d lp_ping_timestamp %lu\n",
- libcfs_nid2str(rtr->lp_nid), secs,
- rtr->lp_ping_deadline, rtr->lp_ping_notsent,
- rtr->lp_alive, rtr->lp_alive_count, rtr->lp_ping_timestamp);
-
- if (secs != 0 && !rtr->lp_ping_notsent &&
- cfs_time_after(now, cfs_time_add(rtr->lp_ping_timestamp,
- cfs_time_seconds(secs)))) {
- int rc;
- lnet_process_id_t id;
- lnet_handle_md_t mdh;
-
- id.nid = rtr->lp_nid;
- id.pid = LUSTRE_SRV_LNET_PID;
- CDEBUG(D_NET, "Check: %s\n", libcfs_id2str(id));
-
- rtr->lp_ping_notsent = 1;
- rtr->lp_ping_timestamp = now;
-
- mdh = rcd->rcd_mdh;
-
- if (rtr->lp_ping_deadline == 0) {
- rtr->lp_ping_deadline =
- cfs_time_shift(router_ping_timeout);
- }
-
- lnet_net_unlock(rtr->lp_cpt);
-
- rc = LNetGet(LNET_NID_ANY, mdh, id, LNET_RESERVED_PORTAL,
- LNET_PROTO_PING_MATCHBITS, 0);
-
- lnet_net_lock(rtr->lp_cpt);
- if (rc != 0)
- rtr->lp_ping_notsent = 0; /* no event pending */
- }
-
- lnet_peer_decref_locked(rtr);
-}
-
-int
-lnet_router_checker_start(void)
-{
- int rc;
- int eqsz;
-
- LASSERT(the_lnet.ln_rc_state == LNET_RC_STATE_SHUTDOWN);
-
- if (check_routers_before_use &&
- dead_router_check_interval <= 0) {
- LCONSOLE_ERROR_MSG(0x10a, "'dead_router_check_interval' must be set if 'check_routers_before_use' is set\n");
- return -EINVAL;
- }
-
- if (!the_lnet.ln_routing &&
- live_router_check_interval <= 0 &&
- dead_router_check_interval <= 0)
- return 0;
-
- sema_init(&the_lnet.ln_rc_signal, 0);
- /* EQ size doesn't matter; the callback is guaranteed to get every
- * event */
- eqsz = 0;
- rc = LNetEQAlloc(eqsz, lnet_router_checker_event,
- &the_lnet.ln_rc_eqh);
- if (rc != 0) {
- CERROR("Can't allocate EQ(%d): %d\n", eqsz, rc);
- return -ENOMEM;
- }
-
- the_lnet.ln_rc_state = LNET_RC_STATE_RUNNING;
- rc = PTR_ERR(kthread_run(lnet_router_checker,
- NULL, "router_checker"));
- if (IS_ERR_VALUE(rc)) {
- CERROR("Can't start router checker thread: %d\n", rc);
- /* block until event callback signals exit */
- down(&the_lnet.ln_rc_signal);
- rc = LNetEQFree(the_lnet.ln_rc_eqh);
- LASSERT(rc == 0);
- the_lnet.ln_rc_state = LNET_RC_STATE_SHUTDOWN;
- return -ENOMEM;
- }
-
- if (check_routers_before_use) {
- /* Note that a helpful side-effect of pinging all known routers
- * at startup is that it makes them drop stale connections they
- * may have to a previous instance of me. */
- lnet_wait_known_routerstate();
- }
-
- return 0;
-}
-
-void
-lnet_router_checker_stop(void)
-{
- int rc;
-
- if (the_lnet.ln_rc_state == LNET_RC_STATE_SHUTDOWN)
- return;
-
- LASSERT(the_lnet.ln_rc_state == LNET_RC_STATE_RUNNING);
- the_lnet.ln_rc_state = LNET_RC_STATE_STOPPING;
-
- /* block until event callback signals exit */
- down(&the_lnet.ln_rc_signal);
- LASSERT(the_lnet.ln_rc_state == LNET_RC_STATE_SHUTDOWN);
-
- rc = LNetEQFree(the_lnet.ln_rc_eqh);
- LASSERT(rc == 0);
-}
-
-static void
-lnet_prune_rc_data(int wait_unlink)
-{
- lnet_rc_data_t *rcd;
- lnet_rc_data_t *tmp;
- lnet_peer_t *lp;
- struct list_head head;
- int i = 2;
-
- if (likely(the_lnet.ln_rc_state == LNET_RC_STATE_RUNNING &&
- list_empty(&the_lnet.ln_rcd_deathrow) &&
- list_empty(&the_lnet.ln_rcd_zombie)))
- return;
-
- INIT_LIST_HEAD(&head);
-
- lnet_net_lock(LNET_LOCK_EX);
-
- if (the_lnet.ln_rc_state != LNET_RC_STATE_RUNNING) {
- /* router checker is stopping, prune all */
- list_for_each_entry(lp, &the_lnet.ln_routers,
- lp_rtr_list) {
- if (lp->lp_rcd == NULL)
- continue;
-
- LASSERT(list_empty(&lp->lp_rcd->rcd_list));
- list_add(&lp->lp_rcd->rcd_list,
- &the_lnet.ln_rcd_deathrow);
- lp->lp_rcd = NULL;
- }
- }
-
- /* unlink all RCDs on deathrow list */
- list_splice_init(&the_lnet.ln_rcd_deathrow, &head);
-
- if (!list_empty(&head)) {
- lnet_net_unlock(LNET_LOCK_EX);
-
- list_for_each_entry(rcd, &head, rcd_list)
- LNetMDUnlink(rcd->rcd_mdh);
-
- lnet_net_lock(LNET_LOCK_EX);
- }
-
- list_splice_init(&head, &the_lnet.ln_rcd_zombie);
-
- /* release all zombie RCDs */
- while (!list_empty(&the_lnet.ln_rcd_zombie)) {
- list_for_each_entry_safe(rcd, tmp, &the_lnet.ln_rcd_zombie,
- rcd_list) {
- if (LNetHandleIsInvalid(rcd->rcd_mdh))
- list_move(&rcd->rcd_list, &head);
- }
-
- wait_unlink = wait_unlink &&
- !list_empty(&the_lnet.ln_rcd_zombie);
-
- lnet_net_unlock(LNET_LOCK_EX);
-
- while (!list_empty(&head)) {
- rcd = list_entry(head.next,
- lnet_rc_data_t, rcd_list);
- list_del_init(&rcd->rcd_list);
- lnet_destroy_rc_data(rcd);
- }
-
- if (!wait_unlink)
- return;
-
- i++;
- CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET,
- "Waiting for rc buffers to unlink\n");
- set_current_state(TASK_UNINTERRUPTIBLE);
- schedule_timeout(cfs_time_seconds(1) / 4);
-
- lnet_net_lock(LNET_LOCK_EX);
- }
-
- lnet_net_unlock(LNET_LOCK_EX);
-}
-
-static int
-lnet_router_checker(void *arg)
-{
- lnet_peer_t *rtr;
- struct list_head *entry;
-
- cfs_block_allsigs();
-
- LASSERT(the_lnet.ln_rc_state == LNET_RC_STATE_RUNNING);
-
- while (the_lnet.ln_rc_state == LNET_RC_STATE_RUNNING) {
- __u64 version;
- int cpt;
- int cpt2;
-
- cpt = lnet_net_lock_current();
-rescan:
- version = the_lnet.ln_routers_version;
-
- list_for_each(entry, &the_lnet.ln_routers) {
- rtr = list_entry(entry, lnet_peer_t, lp_rtr_list);
-
- cpt2 = lnet_cpt_of_nid_locked(rtr->lp_nid);
- if (cpt != cpt2) {
- lnet_net_unlock(cpt);
- cpt = cpt2;
- lnet_net_lock(cpt);
- /* the routers list has changed */
- if (version != the_lnet.ln_routers_version)
- goto rescan;
- }
-
- lnet_ping_router_locked(rtr);
-
- /* NB dropped lock */
- if (version != the_lnet.ln_routers_version) {
- /* the routers list has changed */
- goto rescan;
- }
- }
-
- if (the_lnet.ln_routing)
- lnet_update_ni_status_locked();
-
- lnet_net_unlock(cpt);
-
- lnet_prune_rc_data(0); /* don't wait for UNLINK */
-
- /* Call schedule_timeout() here always adds 1 to load average
- * because kernel counts # active tasks as nr_running
- * + nr_uninterruptible. */
- set_current_state(TASK_INTERRUPTIBLE);
- schedule_timeout(cfs_time_seconds(1));
- }
-
- LASSERT(the_lnet.ln_rc_state == LNET_RC_STATE_STOPPING);
-
- lnet_prune_rc_data(1); /* wait for UNLINK */
-
- the_lnet.ln_rc_state = LNET_RC_STATE_SHUTDOWN;
- up(&the_lnet.ln_rc_signal);
- /* The unlink event callback will signal final completion */
- return 0;
-}
-
-static void
-lnet_destroy_rtrbuf(lnet_rtrbuf_t *rb, int npages)
-{
- int sz = offsetof(lnet_rtrbuf_t, rb_kiov[npages]);
-
- while (--npages >= 0)
- __free_page(rb->rb_kiov[npages].kiov_page);
-
- LIBCFS_FREE(rb, sz);
-}
-
-static lnet_rtrbuf_t *
-lnet_new_rtrbuf(lnet_rtrbufpool_t *rbp, int cpt)
-{
- int npages = rbp->rbp_npages;
- int sz = offsetof(lnet_rtrbuf_t, rb_kiov[npages]);
- struct page *page;
- lnet_rtrbuf_t *rb;
- int i;
-
- LIBCFS_CPT_ALLOC(rb, lnet_cpt_table(), cpt, sz);
- if (rb == NULL)
- return NULL;
-
- rb->rb_pool = rbp;
-
- for (i = 0; i < npages; i++) {
- page = alloc_pages_node(
- cfs_cpt_spread_node(lnet_cpt_table(), cpt),
- __GFP_ZERO | GFP_IOFS, 0);
- if (page == NULL) {
- while (--i >= 0)
- __free_page(rb->rb_kiov[i].kiov_page);
-
- LIBCFS_FREE(rb, sz);
- return NULL;
- }
-
- rb->rb_kiov[i].kiov_len = PAGE_CACHE_SIZE;
- rb->rb_kiov[i].kiov_offset = 0;
- rb->rb_kiov[i].kiov_page = page;
- }
-
- return rb;
-}
-
-static void
-lnet_rtrpool_free_bufs(lnet_rtrbufpool_t *rbp)
-{
- int npages = rbp->rbp_npages;
- int nbuffers = 0;
- lnet_rtrbuf_t *rb;
-
- if (rbp->rbp_nbuffers == 0) /* not initialized or already freed */
- return;
-
- LASSERT(list_empty(&rbp->rbp_msgs));
- LASSERT(rbp->rbp_credits == rbp->rbp_nbuffers);
-
- while (!list_empty(&rbp->rbp_bufs)) {
- LASSERT(rbp->rbp_credits > 0);
-
- rb = list_entry(rbp->rbp_bufs.next,
- lnet_rtrbuf_t, rb_list);
- list_del(&rb->rb_list);
- lnet_destroy_rtrbuf(rb, npages);
- nbuffers++;
- }
-
- LASSERT(rbp->rbp_nbuffers == nbuffers);
- LASSERT(rbp->rbp_credits == nbuffers);
-
- rbp->rbp_nbuffers = rbp->rbp_credits = 0;
-}
-
-static int
-lnet_rtrpool_alloc_bufs(lnet_rtrbufpool_t *rbp, int nbufs, int cpt)
-{
- lnet_rtrbuf_t *rb;
- int i;
-
- if (rbp->rbp_nbuffers != 0) {
- LASSERT(rbp->rbp_nbuffers == nbufs);
- return 0;
- }
-
- for (i = 0; i < nbufs; i++) {
- rb = lnet_new_rtrbuf(rbp, cpt);
-
- if (rb == NULL) {
- CERROR("Failed to allocate %d router bufs of %d pages\n",
- nbufs, rbp->rbp_npages);
- return -ENOMEM;
- }
-
- rbp->rbp_nbuffers++;
- rbp->rbp_credits++;
- rbp->rbp_mincredits++;
- list_add(&rb->rb_list, &rbp->rbp_bufs);
-
- /* No allocation "under fire" */
- /* Otherwise we'd need code to schedule blocked msgs etc */
- LASSERT(!the_lnet.ln_routing);
- }
-
- LASSERT(rbp->rbp_credits == nbufs);
- return 0;
-}
-
-static void
-lnet_rtrpool_init(lnet_rtrbufpool_t *rbp, int npages)
-{
- INIT_LIST_HEAD(&rbp->rbp_msgs);
- INIT_LIST_HEAD(&rbp->rbp_bufs);
-
- rbp->rbp_npages = npages;
- rbp->rbp_credits = 0;
- rbp->rbp_mincredits = 0;
-}
-
-void
-lnet_rtrpools_free(void)
-{
- lnet_rtrbufpool_t *rtrp;
- int i;
-
- if (the_lnet.ln_rtrpools == NULL) /* uninitialized or freed */
- return;
-
- cfs_percpt_for_each(rtrp, i, the_lnet.ln_rtrpools) {
- lnet_rtrpool_free_bufs(&rtrp[0]);
- lnet_rtrpool_free_bufs(&rtrp[1]);
- lnet_rtrpool_free_bufs(&rtrp[2]);
- }
-
- cfs_percpt_free(the_lnet.ln_rtrpools);
- the_lnet.ln_rtrpools = NULL;
-}
-
-static int
-lnet_nrb_tiny_calculate(int npages)
-{
- int nrbs = LNET_NRB_TINY;
-
- if (tiny_router_buffers < 0) {
- LCONSOLE_ERROR_MSG(0x10c,
- "tiny_router_buffers=%d invalid when routing enabled\n",
- tiny_router_buffers);
- return -1;
- }
-
- if (tiny_router_buffers > 0)
- nrbs = tiny_router_buffers;
-
- nrbs /= LNET_CPT_NUMBER;
- return max(nrbs, LNET_NRB_TINY_MIN);
-}
-
-static int
-lnet_nrb_small_calculate(int npages)
-{
- int nrbs = LNET_NRB_SMALL;
-
- if (small_router_buffers < 0) {
- LCONSOLE_ERROR_MSG(0x10c,
- "small_router_buffers=%d invalid when routing enabled\n",
- small_router_buffers);
- return -1;
- }
-
- if (small_router_buffers > 0)
- nrbs = small_router_buffers;
-
- nrbs /= LNET_CPT_NUMBER;
- return max(nrbs, LNET_NRB_SMALL_MIN);
-}
-
-static int
-lnet_nrb_large_calculate(int npages)
-{
- int nrbs = LNET_NRB_LARGE;
-
- if (large_router_buffers < 0) {
- LCONSOLE_ERROR_MSG(0x10c,
- "large_router_buffers=%d invalid when routing enabled\n",
- large_router_buffers);
- return -1;
- }
-
- if (large_router_buffers > 0)
- nrbs = large_router_buffers;
-
- nrbs /= LNET_CPT_NUMBER;
- return max(nrbs, LNET_NRB_LARGE_MIN);
-}
-
-int
-lnet_rtrpools_alloc(int im_a_router)
-{
- lnet_rtrbufpool_t *rtrp;
- int large_pages;
- int small_pages = 1;
- int nrb_tiny;
- int nrb_small;
- int nrb_large;
- int rc;
- int i;
-
- large_pages = (LNET_MTU + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
-
- if (!strcmp(forwarding, "")) {
- /* not set either way */
- if (!im_a_router)
- return 0;
- } else if (!strcmp(forwarding, "disabled")) {
- /* explicitly disabled */
- return 0;
- } else if (!strcmp(forwarding, "enabled")) {
- /* explicitly enabled */
- } else {
- LCONSOLE_ERROR_MSG(0x10b, "'forwarding' not set to either 'enabled' or 'disabled'\n");
- return -EINVAL;
- }
-
- nrb_tiny = lnet_nrb_tiny_calculate(0);
- if (nrb_tiny < 0)
- return -EINVAL;
-
- nrb_small = lnet_nrb_small_calculate(small_pages);
- if (nrb_small < 0)
- return -EINVAL;
-
- nrb_large = lnet_nrb_large_calculate(large_pages);
- if (nrb_large < 0)
- return -EINVAL;
-
- the_lnet.ln_rtrpools = cfs_percpt_alloc(lnet_cpt_table(),
- LNET_NRBPOOLS *
- sizeof(lnet_rtrbufpool_t));
- if (the_lnet.ln_rtrpools == NULL) {
- LCONSOLE_ERROR_MSG(0x10c,
- "Failed to initialize router buffe pool\n");
- return -ENOMEM;
- }
-
- cfs_percpt_for_each(rtrp, i, the_lnet.ln_rtrpools) {
- lnet_rtrpool_init(&rtrp[0], 0);
- rc = lnet_rtrpool_alloc_bufs(&rtrp[0], nrb_tiny, i);
- if (rc != 0)
- goto failed;
-
- lnet_rtrpool_init(&rtrp[1], small_pages);
- rc = lnet_rtrpool_alloc_bufs(&rtrp[1], nrb_small, i);
- if (rc != 0)
- goto failed;
-
- lnet_rtrpool_init(&rtrp[2], large_pages);
- rc = lnet_rtrpool_alloc_bufs(&rtrp[2], nrb_large, i);
- if (rc != 0)
- goto failed;
- }
-
- lnet_net_lock(LNET_LOCK_EX);
- the_lnet.ln_routing = 1;
- lnet_net_unlock(LNET_LOCK_EX);
-
- return 0;
-
- failed:
- lnet_rtrpools_free();
- return rc;
-}
-
-int
-lnet_notify(lnet_ni_t *ni, lnet_nid_t nid, int alive, unsigned long when)
-{
- struct lnet_peer *lp = NULL;
- unsigned long now = cfs_time_current();
- int cpt = lnet_cpt_of_nid(nid);
-
- LASSERT(!in_interrupt());
-
- CDEBUG(D_NET, "%s notifying %s: %s\n",
- (ni == NULL) ? "userspace" : libcfs_nid2str(ni->ni_nid),
- libcfs_nid2str(nid),
- alive ? "up" : "down");
-
- if (ni != NULL &&
- LNET_NIDNET(ni->ni_nid) != LNET_NIDNET(nid)) {
- CWARN("Ignoring notification of %s %s by %s (different net)\n",
- libcfs_nid2str(nid), alive ? "birth" : "death",
- libcfs_nid2str(ni->ni_nid));
- return -EINVAL;
- }
-
- /* can't do predictions... */
- if (cfs_time_after(when, now)) {
- CWARN("Ignoring prediction from %s of %s %s %ld seconds in the future\n",
- (ni == NULL) ? "userspace" : libcfs_nid2str(ni->ni_nid),
- libcfs_nid2str(nid), alive ? "up" : "down",
- cfs_duration_sec(cfs_time_sub(when, now)));
- return -EINVAL;
- }
-
- if (ni != NULL && !alive && /* LND telling me she's down */
- !auto_down) { /* auto-down disabled */
- CDEBUG(D_NET, "Auto-down disabled\n");
- return 0;
- }
-
- lnet_net_lock(cpt);
-
- if (the_lnet.ln_shutdown) {
- lnet_net_unlock(cpt);
- return -ESHUTDOWN;
- }
-
- lp = lnet_find_peer_locked(the_lnet.ln_peer_tables[cpt], nid);
- if (lp == NULL) {
- /* nid not found */
- lnet_net_unlock(cpt);
- CDEBUG(D_NET, "%s not found\n", libcfs_nid2str(nid));
- return 0;
- }
-
- /* We can't fully trust LND on reporting exact peer last_alive
- * if he notifies us about dead peer. For example ksocklnd can
- * call us with when == _time_when_the_node_was_booted_ if
- * no connections were successfully established */
- if (ni != NULL && !alive && when < lp->lp_last_alive)
- when = lp->lp_last_alive;
-
- lnet_notify_locked(lp, ni == NULL, alive, when);
-
- lnet_ni_notify_locked(ni, lp);
-
- lnet_peer_decref_locked(lp);
-
- lnet_net_unlock(cpt);
- return 0;
-}
-EXPORT_SYMBOL(lnet_notify);
diff --git a/drivers/staging/lustre/lnet/lnet/router_proc.c b/drivers/staging/lustre/lnet/lnet/router_proc.c
deleted file mode 100644
index 396c7c4e5c83..000000000000
--- a/drivers/staging/lustre/lnet/lnet/router_proc.c
+++ /dev/null
@@ -1,920 +0,0 @@
-/*
- * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
- *
- * Copyright (c) 2011, 2012, Intel Corporation.
- *
- * This file is part of Portals
- * http://sourceforge.net/projects/sandiaportals/
- *
- * Portals is free software; you can redistribute it and/or
- * modify it under the terms of version 2 of the GNU General Public
- * License as published by the Free Software Foundation.
- *
- * Portals is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with Portals; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- */
-
-#define DEBUG_SUBSYSTEM S_LNET
-#include "../../include/linux/libcfs/libcfs.h"
-#include "../../include/linux/lnet/lib-lnet.h"
-
-/* This is really lnet_proc.c. You might need to update sanity test 215
- * if any file format is changed. */
-
-#define LNET_LOFFT_BITS (sizeof(loff_t) * 8)
-/*
- * NB: max allowed LNET_CPT_BITS is 8 on 64-bit system and 2 on 32-bit system
- */
-#define LNET_PROC_CPT_BITS (LNET_CPT_BITS + 1)
-/* change version, 16 bits or 8 bits */
-#define LNET_PROC_VER_BITS max_t(size_t, min_t(size_t, LNET_LOFFT_BITS, 64) / 4, 8)
-
-#define LNET_PROC_HASH_BITS LNET_PEER_HASH_BITS
-/*
- * bits for peer hash offset
- * NB: we don't use the highest bit of *ppos because it's signed
- */
-#define LNET_PROC_HOFF_BITS (LNET_LOFFT_BITS - \
- LNET_PROC_CPT_BITS - \
- LNET_PROC_VER_BITS - \
- LNET_PROC_HASH_BITS - 1)
-/* bits for hash index + position */
-#define LNET_PROC_HPOS_BITS (LNET_PROC_HASH_BITS + LNET_PROC_HOFF_BITS)
-/* bits for peer hash table + hash version */
-#define LNET_PROC_VPOS_BITS (LNET_PROC_HPOS_BITS + LNET_PROC_VER_BITS)
-
-#define LNET_PROC_CPT_MASK ((1ULL << LNET_PROC_CPT_BITS) - 1)
-#define LNET_PROC_VER_MASK ((1ULL << LNET_PROC_VER_BITS) - 1)
-#define LNET_PROC_HASH_MASK ((1ULL << LNET_PROC_HASH_BITS) - 1)
-#define LNET_PROC_HOFF_MASK ((1ULL << LNET_PROC_HOFF_BITS) - 1)
-
-#define LNET_PROC_CPT_GET(pos) \
- (int)(((pos) >> LNET_PROC_VPOS_BITS) & LNET_PROC_CPT_MASK)
-
-#define LNET_PROC_VER_GET(pos) \
- (int)(((pos) >> LNET_PROC_HPOS_BITS) & LNET_PROC_VER_MASK)
-
-#define LNET_PROC_HASH_GET(pos) \
- (int)(((pos) >> LNET_PROC_HOFF_BITS) & LNET_PROC_HASH_MASK)
-
-#define LNET_PROC_HOFF_GET(pos) \
- (int)((pos) & LNET_PROC_HOFF_MASK)
-
-#define LNET_PROC_POS_MAKE(cpt, ver, hash, off) \
- (((((loff_t)(cpt)) & LNET_PROC_CPT_MASK) << LNET_PROC_VPOS_BITS) | \
- ((((loff_t)(ver)) & LNET_PROC_VER_MASK) << LNET_PROC_HPOS_BITS) | \
- ((((loff_t)(hash)) & LNET_PROC_HASH_MASK) << LNET_PROC_HOFF_BITS) | \
- ((off) & LNET_PROC_HOFF_MASK))
-
-#define LNET_PROC_VERSION(v) ((unsigned int)((v) & LNET_PROC_VER_MASK))
-
-static int proc_call_handler(void *data, int write, loff_t *ppos,
- void __user *buffer, size_t *lenp,
- int (*handler)(void *data, int write,
- loff_t pos, void __user *buffer, int len))
-{
- int rc = handler(data, write, *ppos, buffer, *lenp);
-
- if (rc < 0)
- return rc;
-
- if (write) {
- *ppos += *lenp;
- } else {
- *lenp = rc;
- *ppos += rc;
- }
- return 0;
-}
-
-static int __proc_lnet_stats(void *data, int write,
- loff_t pos, void __user *buffer, int nob)
-{
- int rc;
- lnet_counters_t *ctrs;
- int len;
- char *tmpstr;
- const int tmpsiz = 256; /* 7 %u and 4 %llu */
-
- if (write) {
- lnet_counters_reset();
- return 0;
- }
-
- /* read */
-
- LIBCFS_ALLOC(ctrs, sizeof(*ctrs));
- if (ctrs == NULL)
- return -ENOMEM;
-
- LIBCFS_ALLOC(tmpstr, tmpsiz);
- if (tmpstr == NULL) {
- LIBCFS_FREE(ctrs, sizeof(*ctrs));
- return -ENOMEM;
- }
-
- lnet_counters_get(ctrs);
-
- len = snprintf(tmpstr, tmpsiz,
- "%u %u %u %u %u %u %u %llu %llu %llu %llu",
- ctrs->msgs_alloc, ctrs->msgs_max,
- ctrs->errors,
- ctrs->send_count, ctrs->recv_count,
- ctrs->route_count, ctrs->drop_count,
- ctrs->send_length, ctrs->recv_length,
- ctrs->route_length, ctrs->drop_length);
-
- if (pos >= min_t(int, len, strlen(tmpstr)))
- rc = 0;
- else
- rc = cfs_trace_copyout_string(buffer, nob,
- tmpstr + pos, "\n");
-
- LIBCFS_FREE(tmpstr, tmpsiz);
- LIBCFS_FREE(ctrs, sizeof(*ctrs));
- return rc;
-}
-
-static int proc_lnet_stats(struct ctl_table *table, int write,
- void __user *buffer, size_t *lenp, loff_t *ppos)
-{
- return proc_call_handler(table->data, write, ppos, buffer, lenp,
- __proc_lnet_stats);
-}
-
-static int proc_lnet_routes(struct ctl_table *table, int write,
- void __user *buffer, size_t *lenp, loff_t *ppos)
-{
- const int tmpsiz = 256;
- char *tmpstr;
- char *s;
- int rc = 0;
- int len;
- int ver;
- int off;
-
- CLASSERT(sizeof(loff_t) >= 4);
-
- off = LNET_PROC_HOFF_GET(*ppos);
- ver = LNET_PROC_VER_GET(*ppos);
-
- LASSERT(!write);
-
- if (*lenp == 0)
- return 0;
-
- LIBCFS_ALLOC(tmpstr, tmpsiz);
- if (tmpstr == NULL)
- return -ENOMEM;
-
- s = tmpstr; /* points to current position in tmpstr[] */
-
- if (*ppos == 0) {
- s += snprintf(s, tmpstr + tmpsiz - s, "Routing %s\n",
- the_lnet.ln_routing ? "enabled" : "disabled");
- LASSERT(tmpstr + tmpsiz - s > 0);
-
- s += snprintf(s, tmpstr + tmpsiz - s, "%-8s %4s %8s %7s %s\n",
- "net", "hops", "priority", "state", "router");
- LASSERT(tmpstr + tmpsiz - s > 0);
-
- lnet_net_lock(0);
- ver = (unsigned int)the_lnet.ln_remote_nets_version;
- lnet_net_unlock(0);
- *ppos = LNET_PROC_POS_MAKE(0, ver, 0, off);
- } else {
- struct list_head *n;
- struct list_head *r;
- lnet_route_t *route = NULL;
- lnet_remotenet_t *rnet = NULL;
- int skip = off - 1;
- struct list_head *rn_list;
- int i;
-
- lnet_net_lock(0);
-
- if (ver != LNET_PROC_VERSION(the_lnet.ln_remote_nets_version)) {
- lnet_net_unlock(0);
- LIBCFS_FREE(tmpstr, tmpsiz);
- return -ESTALE;
- }
-
- for (i = 0; i < LNET_REMOTE_NETS_HASH_SIZE && route == NULL;
- i++) {
- rn_list = &the_lnet.ln_remote_nets_hash[i];
-
- n = rn_list->next;
-
- while (n != rn_list && route == NULL) {
- rnet = list_entry(n, lnet_remotenet_t,
- lrn_list);
-
- r = rnet->lrn_routes.next;
-
- while (r != &rnet->lrn_routes) {
- lnet_route_t *re =
- list_entry(r, lnet_route_t,
- lr_list);
- if (skip == 0) {
- route = re;
- break;
- }
-
- skip--;
- r = r->next;
- }
-
- n = n->next;
- }
- }
-
- if (route != NULL) {
- __u32 net = rnet->lrn_net;
- unsigned int hops = route->lr_hops;
- unsigned int priority = route->lr_priority;
- lnet_nid_t nid = route->lr_gateway->lp_nid;
- int alive = route->lr_gateway->lp_alive;
-
- s += snprintf(s, tmpstr + tmpsiz - s,
- "%-8s %4u %8u %7s %s\n",
- libcfs_net2str(net), hops,
- priority,
- alive ? "up" : "down",
- libcfs_nid2str(nid));
- LASSERT(tmpstr + tmpsiz - s > 0);
- }
-
- lnet_net_unlock(0);
- }
-
- len = s - tmpstr; /* how many bytes was written */
-
- if (len > *lenp) { /* linux-supplied buffer is too small */
- rc = -EINVAL;
- } else if (len > 0) { /* wrote something */
- if (copy_to_user(buffer, tmpstr, len))
- rc = -EFAULT;
- else {
- off += 1;
- *ppos = LNET_PROC_POS_MAKE(0, ver, 0, off);
- }
- }
-
- LIBCFS_FREE(tmpstr, tmpsiz);
-
- if (rc == 0)
- *lenp = len;
-
- return rc;
-}
-
-static int proc_lnet_routers(struct ctl_table *table, int write,
- void __user *buffer, size_t *lenp, loff_t *ppos)
-{
- int rc = 0;
- char *tmpstr;
- char *s;
- const int tmpsiz = 256;
- int len;
- int ver;
- int off;
-
- off = LNET_PROC_HOFF_GET(*ppos);
- ver = LNET_PROC_VER_GET(*ppos);
-
- LASSERT(!write);
-
- if (*lenp == 0)
- return 0;
-
- LIBCFS_ALLOC(tmpstr, tmpsiz);
- if (tmpstr == NULL)
- return -ENOMEM;
-
- s = tmpstr; /* points to current position in tmpstr[] */
-
- if (*ppos == 0) {
- s += snprintf(s, tmpstr + tmpsiz - s,
- "%-4s %7s %9s %6s %12s %9s %8s %7s %s\n",
- "ref", "rtr_ref", "alive_cnt", "state",
- "last_ping", "ping_sent", "deadline",
- "down_ni", "router");
- LASSERT(tmpstr + tmpsiz - s > 0);
-
- lnet_net_lock(0);
- ver = (unsigned int)the_lnet.ln_routers_version;
- lnet_net_unlock(0);
- *ppos = LNET_PROC_POS_MAKE(0, ver, 0, off);
- } else {
- struct list_head *r;
- struct lnet_peer *peer = NULL;
- int skip = off - 1;
-
- lnet_net_lock(0);
-
- if (ver != LNET_PROC_VERSION(the_lnet.ln_routers_version)) {
- lnet_net_unlock(0);
-
- LIBCFS_FREE(tmpstr, tmpsiz);
- return -ESTALE;
- }
-
- r = the_lnet.ln_routers.next;
-
- while (r != &the_lnet.ln_routers) {
- lnet_peer_t *lp = list_entry(r, lnet_peer_t,
- lp_rtr_list);
-
- if (skip == 0) {
- peer = lp;
- break;
- }
-
- skip--;
- r = r->next;
- }
-
- if (peer != NULL) {
- lnet_nid_t nid = peer->lp_nid;
- unsigned long now = cfs_time_current();
- unsigned long deadline = peer->lp_ping_deadline;
- int nrefs = peer->lp_refcount;
- int nrtrrefs = peer->lp_rtr_refcount;
- int alive_cnt = peer->lp_alive_count;
- int alive = peer->lp_alive;
- int pingsent = !peer->lp_ping_notsent;
- int last_ping = cfs_duration_sec(cfs_time_sub(now,
- peer->lp_ping_timestamp));
- int down_ni = 0;
- lnet_route_t *rtr;
-
- if ((peer->lp_ping_feats &
- LNET_PING_FEAT_NI_STATUS) != 0) {
- list_for_each_entry(rtr, &peer->lp_routes,
- lr_gwlist) {
- /* downis on any route should be the
- * number of downis on the gateway */
- if (rtr->lr_downis != 0) {
- down_ni = rtr->lr_downis;
- break;
- }
- }
- }
-
- if (deadline == 0)
- s += snprintf(s, tmpstr + tmpsiz - s,
- "%-4d %7d %9d %6s %12d %9d %8s %7d %s\n",
- nrefs, nrtrrefs, alive_cnt,
- alive ? "up" : "down", last_ping,
- pingsent, "NA", down_ni,
- libcfs_nid2str(nid));
- else
- s += snprintf(s, tmpstr + tmpsiz - s,
- "%-4d %7d %9d %6s %12d %9d %8lu %7d %s\n",
- nrefs, nrtrrefs, alive_cnt,
- alive ? "up" : "down", last_ping,
- pingsent,
- cfs_duration_sec(cfs_time_sub(deadline, now)),
- down_ni, libcfs_nid2str(nid));
- LASSERT(tmpstr + tmpsiz - s > 0);
- }
-
- lnet_net_unlock(0);
- }
-
- len = s - tmpstr; /* how many bytes was written */
-
- if (len > *lenp) { /* linux-supplied buffer is too small */
- rc = -EINVAL;
- } else if (len > 0) { /* wrote something */
- if (copy_to_user(buffer, tmpstr, len))
- rc = -EFAULT;
- else {
- off += 1;
- *ppos = LNET_PROC_POS_MAKE(0, ver, 0, off);
- }
- }
-
- LIBCFS_FREE(tmpstr, tmpsiz);
-
- if (rc == 0)
- *lenp = len;
-
- return rc;
-}
-
-static int proc_lnet_peers(struct ctl_table *table, int write,
- void __user *buffer, size_t *lenp, loff_t *ppos)
-{
- const int tmpsiz = 256;
- struct lnet_peer_table *ptable;
- char *tmpstr;
- char *s;
- int cpt = LNET_PROC_CPT_GET(*ppos);
- int ver = LNET_PROC_VER_GET(*ppos);
- int hash = LNET_PROC_HASH_GET(*ppos);
- int hoff = LNET_PROC_HOFF_GET(*ppos);
- int rc = 0;
- int len;
-
- CLASSERT(LNET_PROC_HASH_BITS >= LNET_PEER_HASH_BITS);
- LASSERT(!write);
-
- if (*lenp == 0)
- return 0;
-
- if (cpt >= LNET_CPT_NUMBER) {
- *lenp = 0;
- return 0;
- }
-
- LIBCFS_ALLOC(tmpstr, tmpsiz);
- if (tmpstr == NULL)
- return -ENOMEM;
-
- s = tmpstr; /* points to current position in tmpstr[] */
-
- if (*ppos == 0) {
- s += snprintf(s, tmpstr + tmpsiz - s,
- "%-24s %4s %5s %5s %5s %5s %5s %5s %5s %s\n",
- "nid", "refs", "state", "last", "max",
- "rtr", "min", "tx", "min", "queue");
- LASSERT(tmpstr + tmpsiz - s > 0);
-
- hoff++;
- } else {
- struct lnet_peer *peer;
- struct list_head *p;
- int skip;
- again:
- p = NULL;
- peer = NULL;
- skip = hoff - 1;
-
- lnet_net_lock(cpt);
- ptable = the_lnet.ln_peer_tables[cpt];
- if (hoff == 1)
- ver = LNET_PROC_VERSION(ptable->pt_version);
-
- if (ver != LNET_PROC_VERSION(ptable->pt_version)) {
- lnet_net_unlock(cpt);
- LIBCFS_FREE(tmpstr, tmpsiz);
- return -ESTALE;
- }
-
- while (hash < LNET_PEER_HASH_SIZE) {
- if (p == NULL)
- p = ptable->pt_hash[hash].next;
-
- while (p != &ptable->pt_hash[hash]) {
- lnet_peer_t *lp = list_entry(p, lnet_peer_t,
- lp_hashlist);
- if (skip == 0) {
- peer = lp;
-
- /* minor optimization: start from idx+1
- * on next iteration if we've just
- * drained lp_hashlist */
- if (lp->lp_hashlist.next ==
- &ptable->pt_hash[hash]) {
- hoff = 1;
- hash++;
- } else {
- hoff++;
- }
-
- break;
- }
-
- skip--;
- p = lp->lp_hashlist.next;
- }
-
- if (peer != NULL)
- break;
-
- p = NULL;
- hoff = 1;
- hash++;
- }
-
- if (peer != NULL) {
- lnet_nid_t nid = peer->lp_nid;
- int nrefs = peer->lp_refcount;
- int lastalive = -1;
- char *aliveness = "NA";
- int maxcr = peer->lp_ni->ni_peertxcredits;
- int txcr = peer->lp_txcredits;
- int mintxcr = peer->lp_mintxcredits;
- int rtrcr = peer->lp_rtrcredits;
- int minrtrcr = peer->lp_minrtrcredits;
- int txqnob = peer->lp_txqnob;
-
- if (lnet_isrouter(peer) ||
- lnet_peer_aliveness_enabled(peer))
- aliveness = peer->lp_alive ? "up" : "down";
-
- if (lnet_peer_aliveness_enabled(peer)) {
- unsigned long now = cfs_time_current();
- long delta;
-
- delta = cfs_time_sub(now, peer->lp_last_alive);
- lastalive = cfs_duration_sec(delta);
-
- /* No need to mess up peers contents with
- * arbitrarily long integers - it suffices to
- * know that lastalive is more than 10000s old
- */
- if (lastalive >= 10000)
- lastalive = 9999;
- }
-
- lnet_net_unlock(cpt);
-
- s += snprintf(s, tmpstr + tmpsiz - s,
- "%-24s %4d %5s %5d %5d %5d %5d %5d %5d %d\n",
- libcfs_nid2str(nid), nrefs, aliveness,
- lastalive, maxcr, rtrcr, minrtrcr, txcr,
- mintxcr, txqnob);
- LASSERT(tmpstr + tmpsiz - s > 0);
-
- } else { /* peer is NULL */
- lnet_net_unlock(cpt);
- }
-
- if (hash == LNET_PEER_HASH_SIZE) {
- cpt++;
- hash = 0;
- hoff = 1;
- if (peer == NULL && cpt < LNET_CPT_NUMBER)
- goto again;
- }
- }
-
- len = s - tmpstr; /* how many bytes was written */
-
- if (len > *lenp) { /* linux-supplied buffer is too small */
- rc = -EINVAL;
- } else if (len > 0) { /* wrote something */
- if (copy_to_user(buffer, tmpstr, len))
- rc = -EFAULT;
- else
- *ppos = LNET_PROC_POS_MAKE(cpt, ver, hash, hoff);
- }
-
- LIBCFS_FREE(tmpstr, tmpsiz);
-
- if (rc == 0)
- *lenp = len;
-
- return rc;
-}
-
-static int __proc_lnet_buffers(void *data, int write,
- loff_t pos, void __user *buffer, int nob)
-{
- char *s;
- char *tmpstr;
- int tmpsiz;
- int idx;
- int len;
- int rc;
- int i;
-
- LASSERT(!write);
-
- /* (4 %d) * 4 * LNET_CPT_NUMBER */
- tmpsiz = 64 * (LNET_NRBPOOLS + 1) * LNET_CPT_NUMBER;
- LIBCFS_ALLOC(tmpstr, tmpsiz);
- if (tmpstr == NULL)
- return -ENOMEM;
-
- s = tmpstr; /* points to current position in tmpstr[] */
-
- s += snprintf(s, tmpstr + tmpsiz - s,
- "%5s %5s %7s %7s\n",
- "pages", "count", "credits", "min");
- LASSERT(tmpstr + tmpsiz - s > 0);
-
- if (the_lnet.ln_rtrpools == NULL)
- goto out; /* I'm not a router */
-
- for (idx = 0; idx < LNET_NRBPOOLS; idx++) {
- lnet_rtrbufpool_t *rbp;
-
- lnet_net_lock(LNET_LOCK_EX);
- cfs_percpt_for_each(rbp, i, the_lnet.ln_rtrpools) {
- s += snprintf(s, tmpstr + tmpsiz - s,
- "%5d %5d %7d %7d\n",
- rbp[idx].rbp_npages,
- rbp[idx].rbp_nbuffers,
- rbp[idx].rbp_credits,
- rbp[idx].rbp_mincredits);
- LASSERT(tmpstr + tmpsiz - s > 0);
- }
- lnet_net_unlock(LNET_LOCK_EX);
- }
-
- out:
- len = s - tmpstr;
-
- if (pos >= min_t(int, len, strlen(tmpstr)))
- rc = 0;
- else
- rc = cfs_trace_copyout_string(buffer, nob,
- tmpstr + pos, NULL);
-
- LIBCFS_FREE(tmpstr, tmpsiz);
- return rc;
-}
-
-static int proc_lnet_buffers(struct ctl_table *table, int write,
- void __user *buffer, size_t *lenp, loff_t *ppos)
-{
- return proc_call_handler(table->data, write, ppos, buffer, lenp,
- __proc_lnet_buffers);
-}
-
-static int proc_lnet_nis(struct ctl_table *table, int write,
- void __user *buffer, size_t *lenp, loff_t *ppos)
-{
- int tmpsiz = 128 * LNET_CPT_NUMBER;
- int rc = 0;
- char *tmpstr;
- char *s;
- int len;
-
- LASSERT(!write);
-
- if (*lenp == 0)
- return 0;
-
- LIBCFS_ALLOC(tmpstr, tmpsiz);
- if (tmpstr == NULL)
- return -ENOMEM;
-
- s = tmpstr; /* points to current position in tmpstr[] */
-
- if (*ppos == 0) {
- s += snprintf(s, tmpstr + tmpsiz - s,
- "%-24s %6s %5s %4s %4s %4s %5s %5s %5s\n",
- "nid", "status", "alive", "refs", "peer",
- "rtr", "max", "tx", "min");
- LASSERT(tmpstr + tmpsiz - s > 0);
- } else {
- struct list_head *n;
- lnet_ni_t *ni = NULL;
- int skip = *ppos - 1;
-
- lnet_net_lock(0);
-
- n = the_lnet.ln_nis.next;
-
- while (n != &the_lnet.ln_nis) {
- lnet_ni_t *a_ni = list_entry(n, lnet_ni_t, ni_list);
-
- if (skip == 0) {
- ni = a_ni;
- break;
- }
-
- skip--;
- n = n->next;
- }
-
- if (ni != NULL) {
- struct lnet_tx_queue *tq;
- char *stat;
- time64_t now = ktime_get_real_seconds();
- int last_alive = -1;
- int i;
- int j;
-
- if (the_lnet.ln_routing)
- last_alive = now - ni->ni_last_alive;
-
- /* @lo forever alive */
- if (ni->ni_lnd->lnd_type == LOLND)
- last_alive = 0;
-
- lnet_ni_lock(ni);
- LASSERT(ni->ni_status != NULL);
- stat = (ni->ni_status->ns_status ==
- LNET_NI_STATUS_UP) ? "up" : "down";
- lnet_ni_unlock(ni);
-
- /* we actually output credits information for
- * TX queue of each partition */
- cfs_percpt_for_each(tq, i, ni->ni_tx_queues) {
- for (j = 0; ni->ni_cpts != NULL &&
- j < ni->ni_ncpts; j++) {
- if (i == ni->ni_cpts[j])
- break;
- }
-
- if (j == ni->ni_ncpts)
- continue;
-
- if (i != 0)
- lnet_net_lock(i);
-
- s += snprintf(s, tmpstr + tmpsiz - s,
- "%-24s %6s %5d %4d %4d %4d %5d %5d %5d\n",
- libcfs_nid2str(ni->ni_nid), stat,
- last_alive, *ni->ni_refs[i],
- ni->ni_peertxcredits,
- ni->ni_peerrtrcredits,
- tq->tq_credits_max,
- tq->tq_credits, tq->tq_credits_min);
- if (i != 0)
- lnet_net_unlock(i);
- }
- LASSERT(tmpstr + tmpsiz - s > 0);
- }
-
- lnet_net_unlock(0);
- }
-
- len = s - tmpstr; /* how many bytes was written */
-
- if (len > *lenp) { /* linux-supplied buffer is too small */
- rc = -EINVAL;
- } else if (len > 0) { /* wrote something */
- if (copy_to_user(buffer, tmpstr, len))
- rc = -EFAULT;
- else
- *ppos += 1;
- }
-
- LIBCFS_FREE(tmpstr, tmpsiz);
-
- if (rc == 0)
- *lenp = len;
-
- return rc;
-}
-
-struct lnet_portal_rotors {
- int pr_value;
- const char *pr_name;
- const char *pr_desc;
-};
-
-static struct lnet_portal_rotors portal_rotors[] = {
- {
- .pr_value = LNET_PTL_ROTOR_OFF,
- .pr_name = "OFF",
- .pr_desc = "Turn off message rotor for wildcard portals"
- },
- {
- .pr_value = LNET_PTL_ROTOR_ON,
- .pr_name = "ON",
- .pr_desc = "round-robin dispatch all PUT messages for wildcard portals"
- },
- {
- .pr_value = LNET_PTL_ROTOR_RR_RT,
- .pr_name = "RR_RT",
- .pr_desc = "round-robin dispatch routed PUT message for wildcard portals"
- },
- {
- .pr_value = LNET_PTL_ROTOR_HASH_RT,
- .pr_name = "HASH_RT",
- .pr_desc = "dispatch routed PUT message by hashing source NID for wildcard portals"
- },
- {
- .pr_value = -1,
- .pr_name = NULL,
- .pr_desc = NULL
- },
-};
-
-extern int portal_rotor;
-
-static int __proc_lnet_portal_rotor(void *data, int write,
- loff_t pos, void __user *buffer, int nob)
-{
- const int buf_len = 128;
- char *buf;
- char *tmp;
- int rc;
- int i;
-
- LIBCFS_ALLOC(buf, buf_len);
- if (buf == NULL)
- return -ENOMEM;
-
- if (!write) {
- lnet_res_lock(0);
-
- for (i = 0; portal_rotors[i].pr_value >= 0; i++) {
- if (portal_rotors[i].pr_value == portal_rotor)
- break;
- }
-
- LASSERT(portal_rotors[i].pr_value == portal_rotor);
- lnet_res_unlock(0);
-
- rc = snprintf(buf, buf_len,
- "{\n\tportals: all\n"
- "\trotor: %s\n\tdescription: %s\n}",
- portal_rotors[i].pr_name,
- portal_rotors[i].pr_desc);
-
- if (pos >= min_t(int, rc, buf_len)) {
- rc = 0;
- } else {
- rc = cfs_trace_copyout_string(buffer, nob,
- buf + pos, "\n");
- }
- goto out;
- }
-
- rc = cfs_trace_copyin_string(buf, buf_len, buffer, nob);
- if (rc < 0)
- goto out;
-
- tmp = cfs_trimwhite(buf);
-
- rc = -EINVAL;
- lnet_res_lock(0);
- for (i = 0; portal_rotors[i].pr_name != NULL; i++) {
- if (strncasecmp(portal_rotors[i].pr_name, tmp,
- strlen(portal_rotors[i].pr_name)) == 0) {
- portal_rotor = portal_rotors[i].pr_value;
- rc = 0;
- break;
- }
- }
- lnet_res_unlock(0);
-out:
- LIBCFS_FREE(buf, buf_len);
- return rc;
-}
-
-static int proc_lnet_portal_rotor(struct ctl_table *table, int write,
- void __user *buffer, size_t *lenp,
- loff_t *ppos)
-{
- return proc_call_handler(table->data, write, ppos, buffer, lenp,
- __proc_lnet_portal_rotor);
-}
-
-static struct ctl_table lnet_table[] = {
- /*
- * NB No .strategy entries have been provided since sysctl(8) prefers
- * to go via /proc for portability.
- */
- {
- .procname = "stats",
- .mode = 0644,
- .proc_handler = &proc_lnet_stats,
- },
- {
- .procname = "routes",
- .mode = 0444,
- .proc_handler = &proc_lnet_routes,
- },
- {
- .procname = "routers",
- .mode = 0444,
- .proc_handler = &proc_lnet_routers,
- },
- {
- .procname = "peers",
- .mode = 0444,
- .proc_handler = &proc_lnet_peers,
- },
- {
- .procname = "buffers",
- .mode = 0444,
- .proc_handler = &proc_lnet_buffers,
- },
- {
- .procname = "nis",
- .mode = 0444,
- .proc_handler = &proc_lnet_nis,
- },
- {
- .procname = "portal_rotor",
- .mode = 0644,
- .proc_handler = &proc_lnet_portal_rotor,
- },
- {
- }
-};
-
-void lnet_router_debugfs_init(void)
-{
- lustre_insert_debugfs(lnet_table, NULL);
-}
-
-void lnet_router_debugfs_fini(void)
-{
-}
diff --git a/drivers/staging/lustre/lnet/selftest/Makefile b/drivers/staging/lustre/lnet/selftest/Makefile
deleted file mode 100644
index c0de6e2d96d0..000000000000
--- a/drivers/staging/lustre/lnet/selftest/Makefile
+++ /dev/null
@@ -1,4 +0,0 @@
-obj-$(CONFIG_LNET_SELFTEST) := lnet_selftest.o
-
-lnet_selftest-y := console.o conrpc.o conctl.o framework.o timer.o rpc.o \
- module.o ping_test.o brw_test.o
diff --git a/drivers/staging/lustre/lnet/selftest/brw_test.c b/drivers/staging/lustre/lnet/selftest/brw_test.c
deleted file mode 100644
index 0605c651f797..000000000000
--- a/drivers/staging/lustre/lnet/selftest/brw_test.c
+++ /dev/null
@@ -1,508 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * lnet/selftest/brw_test.c
- *
- * Author: Isaac Huang <isaac@clusterfs.com>
- */
-
-#include "selftest.h"
-
-static int brw_srv_workitems = SFW_TEST_WI_MAX;
-module_param(brw_srv_workitems, int, 0644);
-MODULE_PARM_DESC(brw_srv_workitems, "# BRW server workitems");
-
-static int brw_inject_errors;
-module_param(brw_inject_errors, int, 0644);
-MODULE_PARM_DESC(brw_inject_errors, "# data errors to inject randomly, zero by default");
-
-static void
-brw_client_fini(sfw_test_instance_t *tsi)
-{
- srpc_bulk_t *bulk;
- sfw_test_unit_t *tsu;
-
- LASSERT(tsi->tsi_is_client);
-
- list_for_each_entry(tsu, &tsi->tsi_units, tsu_list) {
- bulk = tsu->tsu_private;
- if (bulk == NULL)
- continue;
-
- srpc_free_bulk(bulk);
- tsu->tsu_private = NULL;
- }
-}
-
-static int
-brw_client_init(sfw_test_instance_t *tsi)
-{
- sfw_session_t *sn = tsi->tsi_batch->bat_session;
- int flags;
- int npg;
- int len;
- int opc;
- srpc_bulk_t *bulk;
- sfw_test_unit_t *tsu;
-
- LASSERT(sn != NULL);
- LASSERT(tsi->tsi_is_client);
-
- if ((sn->sn_features & LST_FEAT_BULK_LEN) == 0) {
- test_bulk_req_t *breq = &tsi->tsi_u.bulk_v0;
-
- opc = breq->blk_opc;
- flags = breq->blk_flags;
- npg = breq->blk_npg;
- /* NB: this is not going to work for variable page size,
- * but we have to keep it for compatibility */
- len = npg * PAGE_CACHE_SIZE;
-
- } else {
- test_bulk_req_v1_t *breq = &tsi->tsi_u.bulk_v1;
-
- /* I should never get this step if it's unknown feature
- * because make_session will reject unknown feature */
- LASSERT((sn->sn_features & ~LST_FEATS_MASK) == 0);
-
- opc = breq->blk_opc;
- flags = breq->blk_flags;
- len = breq->blk_len;
- npg = (len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
- }
-
- if (npg > LNET_MAX_IOV || npg <= 0)
- return -EINVAL;
-
- if (opc != LST_BRW_READ && opc != LST_BRW_WRITE)
- return -EINVAL;
-
- if (flags != LST_BRW_CHECK_NONE &&
- flags != LST_BRW_CHECK_FULL && flags != LST_BRW_CHECK_SIMPLE)
- return -EINVAL;
-
- list_for_each_entry(tsu, &tsi->tsi_units, tsu_list) {
- bulk = srpc_alloc_bulk(lnet_cpt_of_nid(tsu->tsu_dest.nid),
- npg, len, opc == LST_BRW_READ);
- if (bulk == NULL) {
- brw_client_fini(tsi);
- return -ENOMEM;
- }
-
- tsu->tsu_private = bulk;
- }
-
- return 0;
-}
-
-#define BRW_POISON 0xbeefbeefbeefbeefULL
-#define BRW_MAGIC 0xeeb0eeb1eeb2eeb3ULL
-#define BRW_MSIZE sizeof(__u64)
-
-static int
-brw_inject_one_error(void)
-{
- struct timespec64 ts;
-
- if (brw_inject_errors <= 0)
- return 0;
-
- ktime_get_ts64(&ts);
-
- if (((ts.tv_nsec / NSEC_PER_USEC) & 1) == 0)
- return 0;
-
- return brw_inject_errors--;
-}
-
-static void
-brw_fill_page(struct page *pg, int pattern, __u64 magic)
-{
- char *addr = page_address(pg);
- int i;
-
- LASSERT(addr != NULL);
-
- if (pattern == LST_BRW_CHECK_NONE)
- return;
-
- if (magic == BRW_MAGIC)
- magic += brw_inject_one_error();
-
- if (pattern == LST_BRW_CHECK_SIMPLE) {
- memcpy(addr, &magic, BRW_MSIZE);
- addr += PAGE_CACHE_SIZE - BRW_MSIZE;
- memcpy(addr, &magic, BRW_MSIZE);
- return;
- }
-
- if (pattern == LST_BRW_CHECK_FULL) {
- for (i = 0; i < PAGE_CACHE_SIZE / BRW_MSIZE; i++)
- memcpy(addr + i * BRW_MSIZE, &magic, BRW_MSIZE);
- return;
- }
-
- LBUG();
-}
-
-static int
-brw_check_page(struct page *pg, int pattern, __u64 magic)
-{
- char *addr = page_address(pg);
- __u64 data = 0; /* make compiler happy */
- int i;
-
- LASSERT(addr != NULL);
-
- if (pattern == LST_BRW_CHECK_NONE)
- return 0;
-
- if (pattern == LST_BRW_CHECK_SIMPLE) {
- data = *((__u64 *) addr);
- if (data != magic)
- goto bad_data;
-
- addr += PAGE_CACHE_SIZE - BRW_MSIZE;
- data = *((__u64 *) addr);
- if (data != magic)
- goto bad_data;
-
- return 0;
- }
-
- if (pattern == LST_BRW_CHECK_FULL) {
- for (i = 0; i < PAGE_CACHE_SIZE / BRW_MSIZE; i++) {
- data = *(((__u64 *) addr) + i);
- if (data != magic)
- goto bad_data;
- }
-
- return 0;
- }
-
- LBUG();
-
-bad_data:
- CERROR("Bad data in page %p: %#llx, %#llx expected\n",
- pg, data, magic);
- return 1;
-}
-
-static void
-brw_fill_bulk(srpc_bulk_t *bk, int pattern, __u64 magic)
-{
- int i;
- struct page *pg;
-
- for (i = 0; i < bk->bk_niov; i++) {
- pg = bk->bk_iovs[i].kiov_page;
- brw_fill_page(pg, pattern, magic);
- }
-}
-
-static int
-brw_check_bulk(srpc_bulk_t *bk, int pattern, __u64 magic)
-{
- int i;
- struct page *pg;
-
- for (i = 0; i < bk->bk_niov; i++) {
- pg = bk->bk_iovs[i].kiov_page;
- if (brw_check_page(pg, pattern, magic) != 0) {
- CERROR("Bulk page %p (%d/%d) is corrupted!\n",
- pg, i, bk->bk_niov);
- return 1;
- }
- }
-
- return 0;
-}
-
-static int
-brw_client_prep_rpc(sfw_test_unit_t *tsu,
- lnet_process_id_t dest, srpc_client_rpc_t **rpcpp)
-{
- srpc_bulk_t *bulk = tsu->tsu_private;
- sfw_test_instance_t *tsi = tsu->tsu_instance;
- sfw_session_t *sn = tsi->tsi_batch->bat_session;
- srpc_client_rpc_t *rpc;
- srpc_brw_reqst_t *req;
- int flags;
- int npg;
- int len;
- int opc;
- int rc;
-
- LASSERT(sn != NULL);
- LASSERT(bulk != NULL);
-
- if ((sn->sn_features & LST_FEAT_BULK_LEN) == 0) {
- test_bulk_req_t *breq = &tsi->tsi_u.bulk_v0;
-
- opc = breq->blk_opc;
- flags = breq->blk_flags;
- npg = breq->blk_npg;
- len = npg * PAGE_CACHE_SIZE;
-
- } else {
- test_bulk_req_v1_t *breq = &tsi->tsi_u.bulk_v1;
-
- /* I should never get this step if it's unknown feature
- * because make_session will reject unknown feature */
- LASSERT((sn->sn_features & ~LST_FEATS_MASK) == 0);
-
- opc = breq->blk_opc;
- flags = breq->blk_flags;
- len = breq->blk_len;
- npg = (len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
- }
-
- rc = sfw_create_test_rpc(tsu, dest, sn->sn_features, npg, len, &rpc);
- if (rc != 0)
- return rc;
-
- memcpy(&rpc->crpc_bulk, bulk, offsetof(srpc_bulk_t, bk_iovs[npg]));
- if (opc == LST_BRW_WRITE)
- brw_fill_bulk(&rpc->crpc_bulk, flags, BRW_MAGIC);
- else
- brw_fill_bulk(&rpc->crpc_bulk, flags, BRW_POISON);
-
- req = &rpc->crpc_reqstmsg.msg_body.brw_reqst;
- req->brw_flags = flags;
- req->brw_rw = opc;
- req->brw_len = len;
-
- *rpcpp = rpc;
- return 0;
-}
-
-static void
-brw_client_done_rpc(sfw_test_unit_t *tsu, srpc_client_rpc_t *rpc)
-{
- __u64 magic = BRW_MAGIC;
- sfw_test_instance_t *tsi = tsu->tsu_instance;
- sfw_session_t *sn = tsi->tsi_batch->bat_session;
- srpc_msg_t *msg = &rpc->crpc_replymsg;
- srpc_brw_reply_t *reply = &msg->msg_body.brw_reply;
- srpc_brw_reqst_t *reqst = &rpc->crpc_reqstmsg.msg_body.brw_reqst;
-
- LASSERT(sn != NULL);
-
- if (rpc->crpc_status != 0) {
- CERROR("BRW RPC to %s failed with %d\n",
- libcfs_id2str(rpc->crpc_dest), rpc->crpc_status);
- if (!tsi->tsi_stopping) /* rpc could have been aborted */
- atomic_inc(&sn->sn_brw_errors);
- goto out;
- }
-
- if (msg->msg_magic != SRPC_MSG_MAGIC) {
- __swab64s(&magic);
- __swab32s(&reply->brw_status);
- }
-
- CDEBUG(reply->brw_status ? D_WARNING : D_NET,
- "BRW RPC to %s finished with brw_status: %d\n",
- libcfs_id2str(rpc->crpc_dest), reply->brw_status);
-
- if (reply->brw_status != 0) {
- atomic_inc(&sn->sn_brw_errors);
- rpc->crpc_status = -(int)reply->brw_status;
- goto out;
- }
-
- if (reqst->brw_rw == LST_BRW_WRITE)
- goto out;
-
- if (brw_check_bulk(&rpc->crpc_bulk, reqst->brw_flags, magic) != 0) {
- CERROR("Bulk data from %s is corrupted!\n",
- libcfs_id2str(rpc->crpc_dest));
- atomic_inc(&sn->sn_brw_errors);
- rpc->crpc_status = -EBADMSG;
- }
-
-out:
- return;
-}
-
-static void
-brw_server_rpc_done(srpc_server_rpc_t *rpc)
-{
- srpc_bulk_t *blk = rpc->srpc_bulk;
-
- if (blk == NULL)
- return;
-
- if (rpc->srpc_status != 0)
- CERROR("Bulk transfer %s %s has failed: %d\n",
- blk->bk_sink ? "from" : "to",
- libcfs_id2str(rpc->srpc_peer), rpc->srpc_status);
- else
- CDEBUG(D_NET, "Transferred %d pages bulk data %s %s\n",
- blk->bk_niov, blk->bk_sink ? "from" : "to",
- libcfs_id2str(rpc->srpc_peer));
-
- sfw_free_pages(rpc);
-}
-
-static int
-brw_bulk_ready(srpc_server_rpc_t *rpc, int status)
-{
- __u64 magic = BRW_MAGIC;
- srpc_brw_reply_t *reply = &rpc->srpc_replymsg.msg_body.brw_reply;
- srpc_brw_reqst_t *reqst;
- srpc_msg_t *reqstmsg;
-
- LASSERT(rpc->srpc_bulk != NULL);
- LASSERT(rpc->srpc_reqstbuf != NULL);
-
- reqstmsg = &rpc->srpc_reqstbuf->buf_msg;
- reqst = &reqstmsg->msg_body.brw_reqst;
-
- if (status != 0) {
- CERROR("BRW bulk %s failed for RPC from %s: %d\n",
- reqst->brw_rw == LST_BRW_READ ? "READ" : "WRITE",
- libcfs_id2str(rpc->srpc_peer), status);
- return -EIO;
- }
-
- if (reqst->brw_rw == LST_BRW_READ)
- return 0;
-
- if (reqstmsg->msg_magic != SRPC_MSG_MAGIC)
- __swab64s(&magic);
-
- if (brw_check_bulk(rpc->srpc_bulk, reqst->brw_flags, magic) != 0) {
- CERROR("Bulk data from %s is corrupted!\n",
- libcfs_id2str(rpc->srpc_peer));
- reply->brw_status = EBADMSG;
- }
-
- return 0;
-}
-
-static int
-brw_server_handle(struct srpc_server_rpc *rpc)
-{
- struct srpc_service *sv = rpc->srpc_scd->scd_svc;
- srpc_msg_t *replymsg = &rpc->srpc_replymsg;
- srpc_msg_t *reqstmsg = &rpc->srpc_reqstbuf->buf_msg;
- srpc_brw_reply_t *reply = &replymsg->msg_body.brw_reply;
- srpc_brw_reqst_t *reqst = &reqstmsg->msg_body.brw_reqst;
- int npg;
- int rc;
-
- LASSERT(sv->sv_id == SRPC_SERVICE_BRW);
-
- if (reqstmsg->msg_magic != SRPC_MSG_MAGIC) {
- LASSERT(reqstmsg->msg_magic == __swab32(SRPC_MSG_MAGIC));
-
- __swab32s(&reqst->brw_rw);
- __swab32s(&reqst->brw_len);
- __swab32s(&reqst->brw_flags);
- __swab64s(&reqst->brw_rpyid);
- __swab64s(&reqst->brw_bulkid);
- }
- LASSERT(reqstmsg->msg_type == (__u32)srpc_service2request(sv->sv_id));
-
- reply->brw_status = 0;
- rpc->srpc_done = brw_server_rpc_done;
-
- if ((reqst->brw_rw != LST_BRW_READ && reqst->brw_rw != LST_BRW_WRITE) ||
- (reqst->brw_flags != LST_BRW_CHECK_NONE &&
- reqst->brw_flags != LST_BRW_CHECK_FULL &&
- reqst->brw_flags != LST_BRW_CHECK_SIMPLE)) {
- reply->brw_status = EINVAL;
- return 0;
- }
-
- if ((reqstmsg->msg_ses_feats & ~LST_FEATS_MASK) != 0) {
- replymsg->msg_ses_feats = LST_FEATS_MASK;
- reply->brw_status = EPROTO;
- return 0;
- }
-
- if ((reqstmsg->msg_ses_feats & LST_FEAT_BULK_LEN) == 0) {
- /* compat with old version */
- if ((reqst->brw_len & ~CFS_PAGE_MASK) != 0) {
- reply->brw_status = EINVAL;
- return 0;
- }
- npg = reqst->brw_len >> PAGE_CACHE_SHIFT;
-
- } else {
- npg = (reqst->brw_len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
- }
-
- replymsg->msg_ses_feats = reqstmsg->msg_ses_feats;
-
- if (reqst->brw_len == 0 || npg > LNET_MAX_IOV) {
- reply->brw_status = EINVAL;
- return 0;
- }
-
- rc = sfw_alloc_pages(rpc, rpc->srpc_scd->scd_cpt, npg,
- reqst->brw_len,
- reqst->brw_rw == LST_BRW_WRITE);
- if (rc != 0)
- return rc;
-
- if (reqst->brw_rw == LST_BRW_READ)
- brw_fill_bulk(rpc->srpc_bulk, reqst->brw_flags, BRW_MAGIC);
- else
- brw_fill_bulk(rpc->srpc_bulk, reqst->brw_flags, BRW_POISON);
-
- return 0;
-}
-
-sfw_test_client_ops_t brw_test_client;
-void brw_init_test_client(void)
-{
- brw_test_client.tso_init = brw_client_init;
- brw_test_client.tso_fini = brw_client_fini;
- brw_test_client.tso_prep_rpc = brw_client_prep_rpc;
- brw_test_client.tso_done_rpc = brw_client_done_rpc;
-};
-
-srpc_service_t brw_test_service;
-void brw_init_test_service(void)
-{
-
- brw_test_service.sv_id = SRPC_SERVICE_BRW;
- brw_test_service.sv_name = "brw_test";
- brw_test_service.sv_handler = brw_server_handle;
- brw_test_service.sv_bulk_ready = brw_bulk_ready;
- brw_test_service.sv_wi_total = brw_srv_workitems;
-}
diff --git a/drivers/staging/lustre/lnet/selftest/conctl.c b/drivers/staging/lustre/lnet/selftest/conctl.c
deleted file mode 100644
index 817396638652..000000000000
--- a/drivers/staging/lustre/lnet/selftest/conctl.c
+++ /dev/null
@@ -1,929 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * lnet/selftest/conctl.c
- *
- * IOC handle in kernel
- *
- * Author: Liang Zhen <liangzhen@clusterfs.com>
- */
-
-#include "../../include/linux/libcfs/libcfs.h"
-#include "../../include/linux/lnet/lib-lnet.h"
-#include "../../include/linux/lnet/lnetst.h"
-#include "console.h"
-
-static int
-lst_session_new_ioctl(lstio_session_new_args_t *args)
-{
- char *name;
- int rc;
-
- if (args->lstio_ses_idp == NULL || /* address for output sid */
- args->lstio_ses_key == 0 || /* no key is specified */
- args->lstio_ses_namep == NULL || /* session name */
- args->lstio_ses_nmlen <= 0 ||
- args->lstio_ses_nmlen > LST_NAME_SIZE)
- return -EINVAL;
-
- LIBCFS_ALLOC(name, args->lstio_ses_nmlen + 1);
- if (name == NULL)
- return -ENOMEM;
-
- if (copy_from_user(name,
- args->lstio_ses_namep,
- args->lstio_ses_nmlen)) {
- LIBCFS_FREE(name, args->lstio_ses_nmlen + 1);
- return -EFAULT;
- }
-
- name[args->lstio_ses_nmlen] = 0;
-
- rc = lstcon_session_new(name,
- args->lstio_ses_key,
- args->lstio_ses_feats,
- args->lstio_ses_force,
- args->lstio_ses_timeout,
- args->lstio_ses_idp);
-
- LIBCFS_FREE(name, args->lstio_ses_nmlen + 1);
- return rc;
-}
-
-static int
-lst_session_end_ioctl(lstio_session_end_args_t *args)
-{
- if (args->lstio_ses_key != console_session.ses_key)
- return -EACCES;
-
- return lstcon_session_end();
-}
-
-static int
-lst_session_info_ioctl(lstio_session_info_args_t *args)
-{
- /* no checking of key */
-
- if (args->lstio_ses_idp == NULL || /* address for output sid */
- args->lstio_ses_keyp == NULL || /* address for output key */
- args->lstio_ses_featp == NULL || /* address for output features */
- args->lstio_ses_ndinfo == NULL || /* address for output ndinfo */
- args->lstio_ses_namep == NULL || /* address for output name */
- args->lstio_ses_nmlen <= 0 ||
- args->lstio_ses_nmlen > LST_NAME_SIZE)
- return -EINVAL;
-
- return lstcon_session_info(args->lstio_ses_idp,
- args->lstio_ses_keyp,
- args->lstio_ses_featp,
- args->lstio_ses_ndinfo,
- args->lstio_ses_namep,
- args->lstio_ses_nmlen);
-}
-
-static int
-lst_debug_ioctl(lstio_debug_args_t *args)
-{
- char *name = NULL;
- int client = 1;
- int rc;
-
- if (args->lstio_dbg_key != console_session.ses_key)
- return -EACCES;
-
- if (args->lstio_dbg_resultp == NULL)
- return -EINVAL;
-
- if (args->lstio_dbg_namep != NULL && /* name of batch/group */
- (args->lstio_dbg_nmlen <= 0 ||
- args->lstio_dbg_nmlen > LST_NAME_SIZE))
- return -EINVAL;
-
- if (args->lstio_dbg_namep != NULL) {
- LIBCFS_ALLOC(name, args->lstio_dbg_nmlen + 1);
- if (name == NULL)
- return -ENOMEM;
-
- if (copy_from_user(name, args->lstio_dbg_namep,
- args->lstio_dbg_nmlen)) {
- LIBCFS_FREE(name, args->lstio_dbg_nmlen + 1);
-
- return -EFAULT;
- }
-
- name[args->lstio_dbg_nmlen] = 0;
- }
-
- rc = -EINVAL;
-
- switch (args->lstio_dbg_type) {
- case LST_OPC_SESSION:
- rc = lstcon_session_debug(args->lstio_dbg_timeout,
- args->lstio_dbg_resultp);
- break;
-
- case LST_OPC_BATCHSRV:
- client = 0;
- case LST_OPC_BATCHCLI:
- if (name == NULL)
- goto out;
-
- rc = lstcon_batch_debug(args->lstio_dbg_timeout,
- name, client, args->lstio_dbg_resultp);
- break;
-
- case LST_OPC_GROUP:
- if (name == NULL)
- goto out;
-
- rc = lstcon_group_debug(args->lstio_dbg_timeout,
- name, args->lstio_dbg_resultp);
- break;
-
- case LST_OPC_NODES:
- if (args->lstio_dbg_count <= 0 ||
- args->lstio_dbg_idsp == NULL)
- goto out;
-
- rc = lstcon_nodes_debug(args->lstio_dbg_timeout,
- args->lstio_dbg_count,
- args->lstio_dbg_idsp,
- args->lstio_dbg_resultp);
- break;
-
- default:
- break;
- }
-
-out:
- if (name != NULL)
- LIBCFS_FREE(name, args->lstio_dbg_nmlen + 1);
-
- return rc;
-}
-
-static int
-lst_group_add_ioctl(lstio_group_add_args_t *args)
-{
- char *name;
- int rc;
-
- if (args->lstio_grp_key != console_session.ses_key)
- return -EACCES;
-
- if (args->lstio_grp_namep == NULL ||
- args->lstio_grp_nmlen <= 0 ||
- args->lstio_grp_nmlen > LST_NAME_SIZE)
- return -EINVAL;
-
- LIBCFS_ALLOC(name, args->lstio_grp_nmlen + 1);
- if (name == NULL)
- return -ENOMEM;
-
- if (copy_from_user(name,
- args->lstio_grp_namep,
- args->lstio_grp_nmlen)) {
- LIBCFS_FREE(name, args->lstio_grp_nmlen);
- return -EFAULT;
- }
-
- name[args->lstio_grp_nmlen] = 0;
-
- rc = lstcon_group_add(name);
-
- LIBCFS_FREE(name, args->lstio_grp_nmlen + 1);
-
- return rc;
-}
-
-static int
-lst_group_del_ioctl(lstio_group_del_args_t *args)
-{
- int rc;
- char *name;
-
- if (args->lstio_grp_key != console_session.ses_key)
- return -EACCES;
-
- if (args->lstio_grp_namep == NULL ||
- args->lstio_grp_nmlen <= 0 ||
- args->lstio_grp_nmlen > LST_NAME_SIZE)
- return -EINVAL;
-
- LIBCFS_ALLOC(name, args->lstio_grp_nmlen + 1);
- if (name == NULL)
- return -ENOMEM;
-
- if (copy_from_user(name,
- args->lstio_grp_namep,
- args->lstio_grp_nmlen)) {
- LIBCFS_FREE(name, args->lstio_grp_nmlen + 1);
- return -EFAULT;
- }
-
- name[args->lstio_grp_nmlen] = 0;
-
- rc = lstcon_group_del(name);
-
- LIBCFS_FREE(name, args->lstio_grp_nmlen + 1);
-
- return rc;
-}
-
-static int
-lst_group_update_ioctl(lstio_group_update_args_t *args)
-{
- int rc;
- char *name;
-
- if (args->lstio_grp_key != console_session.ses_key)
- return -EACCES;
-
- if (args->lstio_grp_resultp == NULL ||
- args->lstio_grp_namep == NULL ||
- args->lstio_grp_nmlen <= 0 ||
- args->lstio_grp_nmlen > LST_NAME_SIZE)
- return -EINVAL;
-
- LIBCFS_ALLOC(name, args->lstio_grp_nmlen + 1);
- if (name == NULL)
- return -ENOMEM;
-
- if (copy_from_user(name,
- args->lstio_grp_namep,
- args->lstio_grp_nmlen)) {
- LIBCFS_FREE(name, args->lstio_grp_nmlen + 1);
- return -EFAULT;
- }
-
- name[args->lstio_grp_nmlen] = 0;
-
- switch (args->lstio_grp_opc) {
- case LST_GROUP_CLEAN:
- rc = lstcon_group_clean(name, args->lstio_grp_args);
- break;
-
- case LST_GROUP_REFRESH:
- rc = lstcon_group_refresh(name, args->lstio_grp_resultp);
- break;
-
- case LST_GROUP_RMND:
- if (args->lstio_grp_count <= 0 ||
- args->lstio_grp_idsp == NULL) {
- rc = -EINVAL;
- break;
- }
- rc = lstcon_nodes_remove(name, args->lstio_grp_count,
- args->lstio_grp_idsp,
- args->lstio_grp_resultp);
- break;
-
- default:
- rc = -EINVAL;
- break;
- }
-
- LIBCFS_FREE(name, args->lstio_grp_nmlen + 1);
-
- return rc;
-}
-
-static int
-lst_nodes_add_ioctl(lstio_group_nodes_args_t *args)
-{
- unsigned feats;
- int rc;
- char *name;
-
- if (args->lstio_grp_key != console_session.ses_key)
- return -EACCES;
-
- if (args->lstio_grp_idsp == NULL || /* array of ids */
- args->lstio_grp_count <= 0 ||
- args->lstio_grp_resultp == NULL ||
- args->lstio_grp_featp == NULL ||
- args->lstio_grp_namep == NULL ||
- args->lstio_grp_nmlen <= 0 ||
- args->lstio_grp_nmlen > LST_NAME_SIZE)
- return -EINVAL;
-
- LIBCFS_ALLOC(name, args->lstio_grp_nmlen + 1);
- if (name == NULL)
- return -ENOMEM;
-
- if (copy_from_user(name, args->lstio_grp_namep,
- args->lstio_grp_nmlen)) {
- LIBCFS_FREE(name, args->lstio_grp_nmlen + 1);
-
- return -EFAULT;
- }
-
- name[args->lstio_grp_nmlen] = 0;
-
- rc = lstcon_nodes_add(name, args->lstio_grp_count,
- args->lstio_grp_idsp, &feats,
- args->lstio_grp_resultp);
-
- LIBCFS_FREE(name, args->lstio_grp_nmlen + 1);
- if (rc == 0 &&
- copy_to_user(args->lstio_grp_featp, &feats, sizeof(feats))) {
- return -EINVAL;
- }
-
- return rc;
-}
-
-static int
-lst_group_list_ioctl(lstio_group_list_args_t *args)
-{
- if (args->lstio_grp_key != console_session.ses_key)
- return -EACCES;
-
- if (args->lstio_grp_idx < 0 ||
- args->lstio_grp_namep == NULL ||
- args->lstio_grp_nmlen <= 0 ||
- args->lstio_grp_nmlen > LST_NAME_SIZE)
- return -EINVAL;
-
- return lstcon_group_list(args->lstio_grp_idx,
- args->lstio_grp_nmlen,
- args->lstio_grp_namep);
-}
-
-static int
-lst_group_info_ioctl(lstio_group_info_args_t *args)
-{
- char *name;
- int ndent;
- int index;
- int rc;
-
- if (args->lstio_grp_key != console_session.ses_key)
- return -EACCES;
-
- if (args->lstio_grp_namep == NULL ||
- args->lstio_grp_nmlen <= 0 ||
- args->lstio_grp_nmlen > LST_NAME_SIZE)
- return -EINVAL;
-
- if (args->lstio_grp_entp == NULL && /* output: group entry */
- args->lstio_grp_dentsp == NULL) /* output: node entry */
- return -EINVAL;
-
- if (args->lstio_grp_dentsp != NULL) { /* have node entry */
- if (args->lstio_grp_idxp == NULL || /* node index */
- args->lstio_grp_ndentp == NULL) /* # of node entry */
- return -EINVAL;
-
- if (copy_from_user(&ndent, args->lstio_grp_ndentp,
- sizeof(ndent)) ||
- copy_from_user(&index, args->lstio_grp_idxp,
- sizeof(index)))
- return -EFAULT;
-
- if (ndent <= 0 || index < 0)
- return -EINVAL;
- }
-
- LIBCFS_ALLOC(name, args->lstio_grp_nmlen + 1);
- if (name == NULL)
- return -ENOMEM;
-
- if (copy_from_user(name,
- args->lstio_grp_namep,
- args->lstio_grp_nmlen)) {
- LIBCFS_FREE(name, args->lstio_grp_nmlen + 1);
- return -EFAULT;
- }
-
- name[args->lstio_grp_nmlen] = 0;
-
- rc = lstcon_group_info(name, args->lstio_grp_entp,
- &index, &ndent, args->lstio_grp_dentsp);
-
- LIBCFS_FREE(name, args->lstio_grp_nmlen + 1);
-
- if (rc != 0)
- return rc;
-
- if (args->lstio_grp_dentsp != NULL &&
- (copy_to_user(args->lstio_grp_idxp, &index, sizeof(index)) ||
- copy_to_user(args->lstio_grp_ndentp, &ndent, sizeof(ndent))))
- rc = -EFAULT;
-
- return 0;
-}
-
-static int
-lst_batch_add_ioctl(lstio_batch_add_args_t *args)
-{
- int rc;
- char *name;
-
- if (args->lstio_bat_key != console_session.ses_key)
- return -EACCES;
-
- if (args->lstio_bat_namep == NULL ||
- args->lstio_bat_nmlen <= 0 ||
- args->lstio_bat_nmlen > LST_NAME_SIZE)
- return -EINVAL;
-
- LIBCFS_ALLOC(name, args->lstio_bat_nmlen + 1);
- if (name == NULL)
- return -ENOMEM;
-
- if (copy_from_user(name,
- args->lstio_bat_namep,
- args->lstio_bat_nmlen)) {
- LIBCFS_FREE(name, args->lstio_bat_nmlen + 1);
- return -EFAULT;
- }
-
- name[args->lstio_bat_nmlen] = 0;
-
- rc = lstcon_batch_add(name);
-
- LIBCFS_FREE(name, args->lstio_bat_nmlen + 1);
-
- return rc;
-}
-
-static int
-lst_batch_run_ioctl(lstio_batch_run_args_t *args)
-{
- int rc;
- char *name;
-
- if (args->lstio_bat_key != console_session.ses_key)
- return -EACCES;
-
- if (args->lstio_bat_namep == NULL ||
- args->lstio_bat_nmlen <= 0 ||
- args->lstio_bat_nmlen > LST_NAME_SIZE)
- return -EINVAL;
-
- LIBCFS_ALLOC(name, args->lstio_bat_nmlen + 1);
- if (name == NULL)
- return -ENOMEM;
-
- if (copy_from_user(name,
- args->lstio_bat_namep,
- args->lstio_bat_nmlen)) {
- LIBCFS_FREE(name, args->lstio_bat_nmlen + 1);
- return -EFAULT;
- }
-
- name[args->lstio_bat_nmlen] = 0;
-
- rc = lstcon_batch_run(name, args->lstio_bat_timeout,
- args->lstio_bat_resultp);
-
- LIBCFS_FREE(name, args->lstio_bat_nmlen + 1);
-
- return rc;
-}
-
-static int
-lst_batch_stop_ioctl(lstio_batch_stop_args_t *args)
-{
- int rc;
- char *name;
-
- if (args->lstio_bat_key != console_session.ses_key)
- return -EACCES;
-
- if (args->lstio_bat_resultp == NULL ||
- args->lstio_bat_namep == NULL ||
- args->lstio_bat_nmlen <= 0 ||
- args->lstio_bat_nmlen > LST_NAME_SIZE)
- return -EINVAL;
-
- LIBCFS_ALLOC(name, args->lstio_bat_nmlen + 1);
- if (name == NULL)
- return -ENOMEM;
-
- if (copy_from_user(name,
- args->lstio_bat_namep,
- args->lstio_bat_nmlen)) {
- LIBCFS_FREE(name, args->lstio_bat_nmlen + 1);
- return -EFAULT;
- }
-
- name[args->lstio_bat_nmlen] = 0;
-
- rc = lstcon_batch_stop(name, args->lstio_bat_force,
- args->lstio_bat_resultp);
-
- LIBCFS_FREE(name, args->lstio_bat_nmlen + 1);
-
- return rc;
-}
-
-static int
-lst_batch_query_ioctl(lstio_batch_query_args_t *args)
-{
- char *name;
- int rc;
-
- if (args->lstio_bat_key != console_session.ses_key)
- return -EACCES;
-
- if (args->lstio_bat_resultp == NULL ||
- args->lstio_bat_namep == NULL ||
- args->lstio_bat_nmlen <= 0 ||
- args->lstio_bat_nmlen > LST_NAME_SIZE)
- return -EINVAL;
-
- if (args->lstio_bat_testidx < 0)
- return -EINVAL;
-
- LIBCFS_ALLOC(name, args->lstio_bat_nmlen + 1);
- if (name == NULL)
- return -ENOMEM;
-
- if (copy_from_user(name,
- args->lstio_bat_namep,
- args->lstio_bat_nmlen)) {
- LIBCFS_FREE(name, args->lstio_bat_nmlen + 1);
- return -EFAULT;
- }
-
- name[args->lstio_bat_nmlen] = 0;
-
- rc = lstcon_test_batch_query(name,
- args->lstio_bat_testidx,
- args->lstio_bat_client,
- args->lstio_bat_timeout,
- args->lstio_bat_resultp);
-
- LIBCFS_FREE(name, args->lstio_bat_nmlen + 1);
-
- return rc;
-}
-
-static int
-lst_batch_list_ioctl(lstio_batch_list_args_t *args)
-{
- if (args->lstio_bat_key != console_session.ses_key)
- return -EACCES;
-
- if (args->lstio_bat_idx < 0 ||
- args->lstio_bat_namep == NULL ||
- args->lstio_bat_nmlen <= 0 ||
- args->lstio_bat_nmlen > LST_NAME_SIZE)
- return -EINVAL;
-
- return lstcon_batch_list(args->lstio_bat_idx,
- args->lstio_bat_nmlen,
- args->lstio_bat_namep);
-}
-
-static int
-lst_batch_info_ioctl(lstio_batch_info_args_t *args)
-{
- char *name;
- int rc;
- int index;
- int ndent;
-
- if (args->lstio_bat_key != console_session.ses_key)
- return -EACCES;
-
- if (args->lstio_bat_namep == NULL || /* batch name */
- args->lstio_bat_nmlen <= 0 ||
- args->lstio_bat_nmlen > LST_NAME_SIZE)
- return -EINVAL;
-
- if (args->lstio_bat_entp == NULL && /* output: batch entry */
- args->lstio_bat_dentsp == NULL) /* output: node entry */
- return -EINVAL;
-
- if (args->lstio_bat_dentsp != NULL) { /* have node entry */
- if (args->lstio_bat_idxp == NULL || /* node index */
- args->lstio_bat_ndentp == NULL) /* # of node entry */
- return -EINVAL;
-
- if (copy_from_user(&index, args->lstio_bat_idxp,
- sizeof(index)) ||
- copy_from_user(&ndent, args->lstio_bat_ndentp,
- sizeof(ndent)))
- return -EFAULT;
-
- if (ndent <= 0 || index < 0)
- return -EINVAL;
- }
-
- LIBCFS_ALLOC(name, args->lstio_bat_nmlen + 1);
- if (name == NULL)
- return -ENOMEM;
-
- if (copy_from_user(name,
- args->lstio_bat_namep, args->lstio_bat_nmlen)) {
- LIBCFS_FREE(name, args->lstio_bat_nmlen + 1);
- return -EFAULT;
- }
-
- name[args->lstio_bat_nmlen] = 0;
-
- rc = lstcon_batch_info(name,
- args->lstio_bat_entp, args->lstio_bat_server,
- args->lstio_bat_testidx, &index, &ndent,
- args->lstio_bat_dentsp);
-
- LIBCFS_FREE(name, args->lstio_bat_nmlen + 1);
-
- if (rc != 0)
- return rc;
-
- if (args->lstio_bat_dentsp != NULL &&
- (copy_to_user(args->lstio_bat_idxp, &index, sizeof(index)) ||
- copy_to_user(args->lstio_bat_ndentp, &ndent, sizeof(ndent))))
- rc = -EFAULT;
-
- return rc;
-}
-
-static int
-lst_stat_query_ioctl(lstio_stat_args_t *args)
-{
- int rc;
- char *name;
-
- /* TODO: not finished */
- if (args->lstio_sta_key != console_session.ses_key)
- return -EACCES;
-
- if (args->lstio_sta_resultp == NULL ||
- (args->lstio_sta_namep == NULL &&
- args->lstio_sta_idsp == NULL) ||
- args->lstio_sta_nmlen <= 0 ||
- args->lstio_sta_nmlen > LST_NAME_SIZE)
- return -EINVAL;
-
- if (args->lstio_sta_idsp != NULL &&
- args->lstio_sta_count <= 0)
- return -EINVAL;
-
- LIBCFS_ALLOC(name, args->lstio_sta_nmlen + 1);
- if (name == NULL)
- return -ENOMEM;
-
- if (copy_from_user(name, args->lstio_sta_namep,
- args->lstio_sta_nmlen)) {
- LIBCFS_FREE(name, args->lstio_sta_nmlen + 1);
- return -EFAULT;
- }
-
- if (args->lstio_sta_idsp == NULL) {
- rc = lstcon_group_stat(name, args->lstio_sta_timeout,
- args->lstio_sta_resultp);
- } else {
- rc = lstcon_nodes_stat(args->lstio_sta_count,
- args->lstio_sta_idsp,
- args->lstio_sta_timeout,
- args->lstio_sta_resultp);
- }
-
- LIBCFS_FREE(name, args->lstio_sta_nmlen + 1);
-
- return rc;
-}
-
-static int lst_test_add_ioctl(lstio_test_args_t *args)
-{
- char *batch_name;
- char *src_name = NULL;
- char *dst_name = NULL;
- void *param = NULL;
- int ret = 0;
- int rc = -ENOMEM;
-
- if (args->lstio_tes_resultp == NULL ||
- args->lstio_tes_retp == NULL ||
- args->lstio_tes_bat_name == NULL || /* no specified batch */
- args->lstio_tes_bat_nmlen <= 0 ||
- args->lstio_tes_bat_nmlen > LST_NAME_SIZE ||
- args->lstio_tes_sgrp_name == NULL || /* no source group */
- args->lstio_tes_sgrp_nmlen <= 0 ||
- args->lstio_tes_sgrp_nmlen > LST_NAME_SIZE ||
- args->lstio_tes_dgrp_name == NULL || /* no target group */
- args->lstio_tes_dgrp_nmlen <= 0 ||
- args->lstio_tes_dgrp_nmlen > LST_NAME_SIZE)
- return -EINVAL;
-
- if (args->lstio_tes_loop == 0 || /* negative is infinite */
- args->lstio_tes_concur <= 0 ||
- args->lstio_tes_dist <= 0 ||
- args->lstio_tes_span <= 0)
- return -EINVAL;
-
- /* have parameter, check if parameter length is valid */
- if (args->lstio_tes_param != NULL &&
- (args->lstio_tes_param_len <= 0 ||
- args->lstio_tes_param_len > PAGE_CACHE_SIZE - sizeof(lstcon_test_t)))
- return -EINVAL;
-
- LIBCFS_ALLOC(batch_name, args->lstio_tes_bat_nmlen + 1);
- if (batch_name == NULL)
- return rc;
-
- LIBCFS_ALLOC(src_name, args->lstio_tes_sgrp_nmlen + 1);
- if (src_name == NULL)
- goto out;
-
- LIBCFS_ALLOC(dst_name, args->lstio_tes_dgrp_nmlen + 1);
- if (dst_name == NULL)
- goto out;
-
- if (args->lstio_tes_param != NULL) {
- LIBCFS_ALLOC(param, args->lstio_tes_param_len);
- if (param == NULL)
- goto out;
- }
-
- rc = -EFAULT;
- if (copy_from_user(batch_name, args->lstio_tes_bat_name,
- args->lstio_tes_bat_nmlen) ||
- copy_from_user(src_name, args->lstio_tes_sgrp_name,
- args->lstio_tes_sgrp_nmlen) ||
- copy_from_user(dst_name, args->lstio_tes_dgrp_name,
- args->lstio_tes_dgrp_nmlen) ||
- copy_from_user(param, args->lstio_tes_param,
- args->lstio_tes_param_len))
- goto out;
-
- rc = lstcon_test_add(batch_name,
- args->lstio_tes_type,
- args->lstio_tes_loop,
- args->lstio_tes_concur,
- args->lstio_tes_dist, args->lstio_tes_span,
- src_name, dst_name, param,
- args->lstio_tes_param_len,
- &ret, args->lstio_tes_resultp);
-
- if (ret != 0)
- rc = (copy_to_user(args->lstio_tes_retp, &ret,
- sizeof(ret))) ? -EFAULT : 0;
-out:
- if (batch_name != NULL)
- LIBCFS_FREE(batch_name, args->lstio_tes_bat_nmlen + 1);
-
- if (src_name != NULL)
- LIBCFS_FREE(src_name, args->lstio_tes_sgrp_nmlen + 1);
-
- if (dst_name != NULL)
- LIBCFS_FREE(dst_name, args->lstio_tes_dgrp_nmlen + 1);
-
- if (param != NULL)
- LIBCFS_FREE(param, args->lstio_tes_param_len);
-
- return rc;
-}
-
-int
-lstcon_ioctl_entry(unsigned int cmd, struct libcfs_ioctl_data *data)
-{
- char *buf;
- int opc = data->ioc_u32[0];
- int rc;
-
- if (cmd != IOC_LIBCFS_LNETST)
- return -EINVAL;
-
- if (data->ioc_plen1 > PAGE_CACHE_SIZE)
- return -EINVAL;
-
- LIBCFS_ALLOC(buf, data->ioc_plen1);
- if (buf == NULL)
- return -ENOMEM;
-
- /* copy in parameter */
- if (copy_from_user(buf, data->ioc_pbuf1, data->ioc_plen1)) {
- LIBCFS_FREE(buf, data->ioc_plen1);
- return -EFAULT;
- }
-
- mutex_lock(&console_session.ses_mutex);
-
- console_session.ses_laststamp = ktime_get_real_seconds();
-
- if (console_session.ses_shutdown) {
- rc = -ESHUTDOWN;
- goto out;
- }
-
- if (console_session.ses_expired)
- lstcon_session_end();
-
- if (opc != LSTIO_SESSION_NEW &&
- console_session.ses_state == LST_SESSION_NONE) {
- CDEBUG(D_NET, "LST no active session\n");
- rc = -ESRCH;
- goto out;
- }
-
- memset(&console_session.ses_trans_stat, 0, sizeof(lstcon_trans_stat_t));
-
- switch (opc) {
- case LSTIO_SESSION_NEW:
- rc = lst_session_new_ioctl((lstio_session_new_args_t *)buf);
- break;
- case LSTIO_SESSION_END:
- rc = lst_session_end_ioctl((lstio_session_end_args_t *)buf);
- break;
- case LSTIO_SESSION_INFO:
- rc = lst_session_info_ioctl((lstio_session_info_args_t *)buf);
- break;
- case LSTIO_DEBUG:
- rc = lst_debug_ioctl((lstio_debug_args_t *)buf);
- break;
- case LSTIO_GROUP_ADD:
- rc = lst_group_add_ioctl((lstio_group_add_args_t *)buf);
- break;
- case LSTIO_GROUP_DEL:
- rc = lst_group_del_ioctl((lstio_group_del_args_t *)buf);
- break;
- case LSTIO_GROUP_UPDATE:
- rc = lst_group_update_ioctl((lstio_group_update_args_t *)buf);
- break;
- case LSTIO_NODES_ADD:
- rc = lst_nodes_add_ioctl((lstio_group_nodes_args_t *)buf);
- break;
- case LSTIO_GROUP_LIST:
- rc = lst_group_list_ioctl((lstio_group_list_args_t *)buf);
- break;
- case LSTIO_GROUP_INFO:
- rc = lst_group_info_ioctl((lstio_group_info_args_t *)buf);
- break;
- case LSTIO_BATCH_ADD:
- rc = lst_batch_add_ioctl((lstio_batch_add_args_t *)buf);
- break;
- case LSTIO_BATCH_START:
- rc = lst_batch_run_ioctl((lstio_batch_run_args_t *)buf);
- break;
- case LSTIO_BATCH_STOP:
- rc = lst_batch_stop_ioctl((lstio_batch_stop_args_t *)buf);
- break;
- case LSTIO_BATCH_QUERY:
- rc = lst_batch_query_ioctl((lstio_batch_query_args_t *)buf);
- break;
- case LSTIO_BATCH_LIST:
- rc = lst_batch_list_ioctl((lstio_batch_list_args_t *)buf);
- break;
- case LSTIO_BATCH_INFO:
- rc = lst_batch_info_ioctl((lstio_batch_info_args_t *)buf);
- break;
- case LSTIO_TEST_ADD:
- rc = lst_test_add_ioctl((lstio_test_args_t *)buf);
- break;
- case LSTIO_STAT_QUERY:
- rc = lst_stat_query_ioctl((lstio_stat_args_t *)buf);
- break;
- default:
- rc = -EINVAL;
- }
-
- if (copy_to_user(data->ioc_pbuf2, &console_session.ses_trans_stat,
- sizeof(lstcon_trans_stat_t)))
- rc = -EFAULT;
-out:
- mutex_unlock(&console_session.ses_mutex);
-
- LIBCFS_FREE(buf, data->ioc_plen1);
-
- return rc;
-}
-
-EXPORT_SYMBOL(lstcon_ioctl_entry);
diff --git a/drivers/staging/lustre/lnet/selftest/conrpc.c b/drivers/staging/lustre/lnet/selftest/conrpc.c
deleted file mode 100644
index b70680b8bdbe..000000000000
--- a/drivers/staging/lustre/lnet/selftest/conrpc.c
+++ /dev/null
@@ -1,1399 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * lnet/selftest/conctl.c
- *
- * Console framework rpcs
- *
- * Author: Liang Zhen <liang@whamcloud.com>
- */
-
-
-#include "../../include/linux/libcfs/libcfs.h"
-#include "../../include/linux/lnet/lib-lnet.h"
-#include "timer.h"
-#include "conrpc.h"
-#include "console.h"
-
-void lstcon_rpc_stat_reply(lstcon_rpc_trans_t *, srpc_msg_t *,
- lstcon_node_t *, lstcon_trans_stat_t *);
-
-static void
-lstcon_rpc_done(srpc_client_rpc_t *rpc)
-{
- lstcon_rpc_t *crpc = (lstcon_rpc_t *)rpc->crpc_priv;
-
- LASSERT(crpc != NULL && rpc == crpc->crp_rpc);
- LASSERT(crpc->crp_posted && !crpc->crp_finished);
-
- spin_lock(&rpc->crpc_lock);
-
- if (crpc->crp_trans == NULL) {
- /* Orphan RPC is not in any transaction,
- * I'm just a poor body and nobody loves me */
- spin_unlock(&rpc->crpc_lock);
-
- /* release it */
- lstcon_rpc_put(crpc);
- return;
- }
-
- /* not an orphan RPC */
- crpc->crp_finished = 1;
-
- if (crpc->crp_stamp == 0) {
- /* not aborted */
- LASSERT(crpc->crp_status == 0);
-
- crpc->crp_stamp = cfs_time_current();
- crpc->crp_status = rpc->crpc_status;
- }
-
- /* wakeup (transaction)thread if I'm the last RPC in the transaction */
- if (atomic_dec_and_test(&crpc->crp_trans->tas_remaining))
- wake_up(&crpc->crp_trans->tas_waitq);
-
- spin_unlock(&rpc->crpc_lock);
-}
-
-static int
-lstcon_rpc_init(lstcon_node_t *nd, int service, unsigned feats,
- int bulk_npg, int bulk_len, int embedded, lstcon_rpc_t *crpc)
-{
- crpc->crp_rpc = sfw_create_rpc(nd->nd_id, service,
- feats, bulk_npg, bulk_len,
- lstcon_rpc_done, (void *)crpc);
- if (crpc->crp_rpc == NULL)
- return -ENOMEM;
-
- crpc->crp_trans = NULL;
- crpc->crp_node = nd;
- crpc->crp_posted = 0;
- crpc->crp_finished = 0;
- crpc->crp_unpacked = 0;
- crpc->crp_status = 0;
- crpc->crp_stamp = 0;
- crpc->crp_embedded = embedded;
- INIT_LIST_HEAD(&crpc->crp_link);
-
- atomic_inc(&console_session.ses_rpc_counter);
-
- return 0;
-}
-
-static int
-lstcon_rpc_prep(lstcon_node_t *nd, int service, unsigned feats,
- int bulk_npg, int bulk_len, lstcon_rpc_t **crpcpp)
-{
- lstcon_rpc_t *crpc = NULL;
- int rc;
-
- spin_lock(&console_session.ses_rpc_lock);
-
- if (!list_empty(&console_session.ses_rpc_freelist)) {
- crpc = list_entry(console_session.ses_rpc_freelist.next,
- lstcon_rpc_t, crp_link);
- list_del_init(&crpc->crp_link);
- }
-
- spin_unlock(&console_session.ses_rpc_lock);
-
- if (crpc == NULL) {
- LIBCFS_ALLOC(crpc, sizeof(*crpc));
- if (crpc == NULL)
- return -ENOMEM;
- }
-
- rc = lstcon_rpc_init(nd, service, feats, bulk_npg, bulk_len, 0, crpc);
- if (rc == 0) {
- *crpcpp = crpc;
- return 0;
- }
-
- LIBCFS_FREE(crpc, sizeof(*crpc));
-
- return rc;
-}
-
-void
-lstcon_rpc_put(lstcon_rpc_t *crpc)
-{
- srpc_bulk_t *bulk = &crpc->crp_rpc->crpc_bulk;
- int i;
-
- LASSERT(list_empty(&crpc->crp_link));
-
- for (i = 0; i < bulk->bk_niov; i++) {
- if (bulk->bk_iovs[i].kiov_page == NULL)
- continue;
-
- __free_page(bulk->bk_iovs[i].kiov_page);
- }
-
- srpc_client_rpc_decref(crpc->crp_rpc);
-
- if (crpc->crp_embedded) {
- /* embedded RPC, don't recycle it */
- memset(crpc, 0, sizeof(*crpc));
- crpc->crp_embedded = 1;
-
- } else {
- spin_lock(&console_session.ses_rpc_lock);
-
- list_add(&crpc->crp_link,
- &console_session.ses_rpc_freelist);
-
- spin_unlock(&console_session.ses_rpc_lock);
- }
-
- /* RPC is not alive now */
- atomic_dec(&console_session.ses_rpc_counter);
-}
-
-static void
-lstcon_rpc_post(lstcon_rpc_t *crpc)
-{
- lstcon_rpc_trans_t *trans = crpc->crp_trans;
-
- LASSERT(trans != NULL);
-
- atomic_inc(&trans->tas_remaining);
- crpc->crp_posted = 1;
-
- sfw_post_rpc(crpc->crp_rpc);
-}
-
-static char *
-lstcon_rpc_trans_name(int transop)
-{
- if (transop == LST_TRANS_SESNEW)
- return "SESNEW";
-
- if (transop == LST_TRANS_SESEND)
- return "SESEND";
-
- if (transop == LST_TRANS_SESQRY)
- return "SESQRY";
-
- if (transop == LST_TRANS_SESPING)
- return "SESPING";
-
- if (transop == LST_TRANS_TSBCLIADD)
- return "TSBCLIADD";
-
- if (transop == LST_TRANS_TSBSRVADD)
- return "TSBSRVADD";
-
- if (transop == LST_TRANS_TSBRUN)
- return "TSBRUN";
-
- if (transop == LST_TRANS_TSBSTOP)
- return "TSBSTOP";
-
- if (transop == LST_TRANS_TSBCLIQRY)
- return "TSBCLIQRY";
-
- if (transop == LST_TRANS_TSBSRVQRY)
- return "TSBSRVQRY";
-
- if (transop == LST_TRANS_STATQRY)
- return "STATQRY";
-
- return "Unknown";
-}
-
-int
-lstcon_rpc_trans_prep(struct list_head *translist,
- int transop, lstcon_rpc_trans_t **transpp)
-{
- lstcon_rpc_trans_t *trans;
-
- if (translist != NULL) {
- list_for_each_entry(trans, translist, tas_link) {
- /* Can't enqueue two private transaction on
- * the same object */
- if ((trans->tas_opc & transop) == LST_TRANS_PRIVATE)
- return -EPERM;
- }
- }
-
- /* create a trans group */
- LIBCFS_ALLOC(trans, sizeof(*trans));
- if (trans == NULL)
- return -ENOMEM;
-
- trans->tas_opc = transop;
-
- if (translist == NULL)
- INIT_LIST_HEAD(&trans->tas_olink);
- else
- list_add_tail(&trans->tas_olink, translist);
-
- list_add_tail(&trans->tas_link, &console_session.ses_trans_list);
-
- INIT_LIST_HEAD(&trans->tas_rpcs_list);
- atomic_set(&trans->tas_remaining, 0);
- init_waitqueue_head(&trans->tas_waitq);
-
- spin_lock(&console_session.ses_rpc_lock);
- trans->tas_features = console_session.ses_features;
- spin_unlock(&console_session.ses_rpc_lock);
-
- *transpp = trans;
- return 0;
-}
-
-void
-lstcon_rpc_trans_addreq(lstcon_rpc_trans_t *trans, lstcon_rpc_t *crpc)
-{
- list_add_tail(&crpc->crp_link, &trans->tas_rpcs_list);
- crpc->crp_trans = trans;
-}
-
-void
-lstcon_rpc_trans_abort(lstcon_rpc_trans_t *trans, int error)
-{
- srpc_client_rpc_t *rpc;
- lstcon_rpc_t *crpc;
- lstcon_node_t *nd;
-
- list_for_each_entry(crpc, &trans->tas_rpcs_list, crp_link) {
- rpc = crpc->crp_rpc;
-
- spin_lock(&rpc->crpc_lock);
-
- if (!crpc->crp_posted || /* not posted */
- crpc->crp_stamp != 0) { /* rpc done or aborted already */
- if (crpc->crp_stamp == 0) {
- crpc->crp_stamp = cfs_time_current();
- crpc->crp_status = -EINTR;
- }
- spin_unlock(&rpc->crpc_lock);
- continue;
- }
-
- crpc->crp_stamp = cfs_time_current();
- crpc->crp_status = error;
-
- spin_unlock(&rpc->crpc_lock);
-
- sfw_abort_rpc(rpc);
-
- if (error != ETIMEDOUT)
- continue;
-
- nd = crpc->crp_node;
- if (cfs_time_after(nd->nd_stamp, crpc->crp_stamp))
- continue;
-
- nd->nd_stamp = crpc->crp_stamp;
- nd->nd_state = LST_NODE_DOWN;
- }
-}
-
-static int
-lstcon_rpc_trans_check(lstcon_rpc_trans_t *trans)
-{
- if (console_session.ses_shutdown &&
- !list_empty(&trans->tas_olink)) /* Not an end session RPC */
- return 1;
-
- return (atomic_read(&trans->tas_remaining) == 0) ? 1 : 0;
-}
-
-int
-lstcon_rpc_trans_postwait(lstcon_rpc_trans_t *trans, int timeout)
-{
- lstcon_rpc_t *crpc;
- int rc;
-
- if (list_empty(&trans->tas_rpcs_list))
- return 0;
-
- if (timeout < LST_TRANS_MIN_TIMEOUT)
- timeout = LST_TRANS_MIN_TIMEOUT;
-
- CDEBUG(D_NET, "Transaction %s started\n",
- lstcon_rpc_trans_name(trans->tas_opc));
-
- /* post all requests */
- list_for_each_entry(crpc, &trans->tas_rpcs_list, crp_link) {
- LASSERT(!crpc->crp_posted);
-
- lstcon_rpc_post(crpc);
- }
-
- mutex_unlock(&console_session.ses_mutex);
-
- rc = wait_event_interruptible_timeout(trans->tas_waitq,
- lstcon_rpc_trans_check(trans),
- cfs_time_seconds(timeout));
- rc = (rc > 0) ? 0 : ((rc < 0) ? -EINTR : -ETIMEDOUT);
-
- mutex_lock(&console_session.ses_mutex);
-
- if (console_session.ses_shutdown)
- rc = -ESHUTDOWN;
-
- if (rc != 0 || atomic_read(&trans->tas_remaining) != 0) {
- /* treat short timeout as canceled */
- if (rc == -ETIMEDOUT && timeout < LST_TRANS_MIN_TIMEOUT * 2)
- rc = -EINTR;
-
- lstcon_rpc_trans_abort(trans, rc);
- }
-
- CDEBUG(D_NET, "Transaction %s stopped: %d\n",
- lstcon_rpc_trans_name(trans->tas_opc), rc);
-
- lstcon_rpc_trans_stat(trans, lstcon_trans_stat());
-
- return rc;
-}
-
-static int
-lstcon_rpc_get_reply(lstcon_rpc_t *crpc, srpc_msg_t **msgpp)
-{
- lstcon_node_t *nd = crpc->crp_node;
- srpc_client_rpc_t *rpc = crpc->crp_rpc;
- srpc_generic_reply_t *rep;
-
- LASSERT(nd != NULL && rpc != NULL);
- LASSERT(crpc->crp_stamp != 0);
-
- if (crpc->crp_status != 0) {
- *msgpp = NULL;
- return crpc->crp_status;
- }
-
- *msgpp = &rpc->crpc_replymsg;
- if (!crpc->crp_unpacked) {
- sfw_unpack_message(*msgpp);
- crpc->crp_unpacked = 1;
- }
-
- if (cfs_time_after(nd->nd_stamp, crpc->crp_stamp))
- return 0;
-
- nd->nd_stamp = crpc->crp_stamp;
- rep = &(*msgpp)->msg_body.reply;
-
- if (rep->sid.ses_nid == LNET_NID_ANY)
- nd->nd_state = LST_NODE_UNKNOWN;
- else if (lstcon_session_match(rep->sid))
- nd->nd_state = LST_NODE_ACTIVE;
- else
- nd->nd_state = LST_NODE_BUSY;
-
- return 0;
-}
-
-void
-lstcon_rpc_trans_stat(lstcon_rpc_trans_t *trans, lstcon_trans_stat_t *stat)
-{
- lstcon_rpc_t *crpc;
- srpc_msg_t *rep;
- int error;
-
- LASSERT(stat != NULL);
-
- memset(stat, 0, sizeof(*stat));
-
- list_for_each_entry(crpc, &trans->tas_rpcs_list, crp_link) {
- lstcon_rpc_stat_total(stat, 1);
-
- LASSERT(crpc->crp_stamp != 0);
-
- error = lstcon_rpc_get_reply(crpc, &rep);
- if (error != 0) {
- lstcon_rpc_stat_failure(stat, 1);
- if (stat->trs_rpc_errno == 0)
- stat->trs_rpc_errno = -error;
-
- continue;
- }
-
- lstcon_rpc_stat_success(stat, 1);
-
- lstcon_rpc_stat_reply(trans, rep, crpc->crp_node, stat);
- }
-
- if (trans->tas_opc == LST_TRANS_SESNEW && stat->trs_fwk_errno == 0) {
- stat->trs_fwk_errno =
- lstcon_session_feats_check(trans->tas_features);
- }
-
- CDEBUG(D_NET, "transaction %s : success %d, failure %d, total %d, RPC error(%d), Framework error(%d)\n",
- lstcon_rpc_trans_name(trans->tas_opc),
- lstcon_rpc_stat_success(stat, 0),
- lstcon_rpc_stat_failure(stat, 0),
- lstcon_rpc_stat_total(stat, 0),
- stat->trs_rpc_errno, stat->trs_fwk_errno);
-
- return;
-}
-
-int
-lstcon_rpc_trans_interpreter(lstcon_rpc_trans_t *trans,
- struct list_head *head_up,
- lstcon_rpc_readent_func_t readent)
-{
- struct list_head tmp;
- struct list_head *next;
- lstcon_rpc_ent_t *ent;
- srpc_generic_reply_t *rep;
- lstcon_rpc_t *crpc;
- srpc_msg_t *msg;
- lstcon_node_t *nd;
- long dur;
- struct timeval tv;
- int error;
-
- LASSERT(head_up != NULL);
-
- next = head_up;
-
- list_for_each_entry(crpc, &trans->tas_rpcs_list, crp_link) {
- if (copy_from_user(&tmp, next,
- sizeof(struct list_head)))
- return -EFAULT;
-
- if (tmp.next == head_up)
- return 0;
-
- next = tmp.next;
-
- ent = list_entry(next, lstcon_rpc_ent_t, rpe_link);
-
- LASSERT(crpc->crp_stamp != 0);
-
- error = lstcon_rpc_get_reply(crpc, &msg);
-
- nd = crpc->crp_node;
-
- dur = (long)cfs_time_sub(crpc->crp_stamp,
- (unsigned long)console_session.ses_id.ses_stamp);
- jiffies_to_timeval(dur, &tv);
-
- if (copy_to_user(&ent->rpe_peer,
- &nd->nd_id, sizeof(lnet_process_id_t)) ||
- copy_to_user(&ent->rpe_stamp, &tv, sizeof(tv)) ||
- copy_to_user(&ent->rpe_state,
- &nd->nd_state, sizeof(nd->nd_state)) ||
- copy_to_user(&ent->rpe_rpc_errno, &error,
- sizeof(error)))
- return -EFAULT;
-
- if (error != 0)
- continue;
-
- /* RPC is done */
- rep = (srpc_generic_reply_t *)&msg->msg_body.reply;
-
- if (copy_to_user(&ent->rpe_sid,
- &rep->sid, sizeof(lst_sid_t)) ||
- copy_to_user(&ent->rpe_fwk_errno,
- &rep->status, sizeof(rep->status)))
- return -EFAULT;
-
- if (readent == NULL)
- continue;
-
- error = readent(trans->tas_opc, msg, ent);
-
- if (error != 0)
- return error;
- }
-
- return 0;
-}
-
-void
-lstcon_rpc_trans_destroy(lstcon_rpc_trans_t *trans)
-{
- srpc_client_rpc_t *rpc;
- lstcon_rpc_t *crpc;
- lstcon_rpc_t *tmp;
- int count = 0;
-
- list_for_each_entry_safe(crpc, tmp, &trans->tas_rpcs_list,
- crp_link) {
- rpc = crpc->crp_rpc;
-
- spin_lock(&rpc->crpc_lock);
-
- /* free it if not posted or finished already */
- if (!crpc->crp_posted || crpc->crp_finished) {
- spin_unlock(&rpc->crpc_lock);
-
- list_del_init(&crpc->crp_link);
- lstcon_rpc_put(crpc);
-
- continue;
- }
-
- /* rpcs can be still not callbacked (even LNetMDUnlink is called)
- * because huge timeout for inaccessible network, don't make
- * user wait for them, just abandon them, they will be recycled
- * in callback */
-
- LASSERT(crpc->crp_status != 0);
-
- crpc->crp_node = NULL;
- crpc->crp_trans = NULL;
- list_del_init(&crpc->crp_link);
- count++;
-
- spin_unlock(&rpc->crpc_lock);
-
- atomic_dec(&trans->tas_remaining);
- }
-
- LASSERT(atomic_read(&trans->tas_remaining) == 0);
-
- list_del(&trans->tas_link);
- if (!list_empty(&trans->tas_olink))
- list_del(&trans->tas_olink);
-
- CDEBUG(D_NET, "Transaction %s destroyed with %d pending RPCs\n",
- lstcon_rpc_trans_name(trans->tas_opc), count);
-
- LIBCFS_FREE(trans, sizeof(*trans));
-
- return;
-}
-
-int
-lstcon_sesrpc_prep(lstcon_node_t *nd, int transop,
- unsigned feats, lstcon_rpc_t **crpc)
-{
- srpc_mksn_reqst_t *msrq;
- srpc_rmsn_reqst_t *rsrq;
- int rc;
-
- switch (transop) {
- case LST_TRANS_SESNEW:
- rc = lstcon_rpc_prep(nd, SRPC_SERVICE_MAKE_SESSION,
- feats, 0, 0, crpc);
- if (rc != 0)
- return rc;
-
- msrq = &(*crpc)->crp_rpc->crpc_reqstmsg.msg_body.mksn_reqst;
- msrq->mksn_sid = console_session.ses_id;
- msrq->mksn_force = console_session.ses_force;
- strncpy(msrq->mksn_name, console_session.ses_name,
- strlen(console_session.ses_name));
- break;
-
- case LST_TRANS_SESEND:
- rc = lstcon_rpc_prep(nd, SRPC_SERVICE_REMOVE_SESSION,
- feats, 0, 0, crpc);
- if (rc != 0)
- return rc;
-
- rsrq = &(*crpc)->crp_rpc->crpc_reqstmsg.msg_body.rmsn_reqst;
- rsrq->rmsn_sid = console_session.ses_id;
- break;
-
- default:
- LBUG();
- }
-
- return 0;
-}
-
-int
-lstcon_dbgrpc_prep(lstcon_node_t *nd, unsigned feats, lstcon_rpc_t **crpc)
-{
- srpc_debug_reqst_t *drq;
- int rc;
-
- rc = lstcon_rpc_prep(nd, SRPC_SERVICE_DEBUG, feats, 0, 0, crpc);
- if (rc != 0)
- return rc;
-
- drq = &(*crpc)->crp_rpc->crpc_reqstmsg.msg_body.dbg_reqst;
-
- drq->dbg_sid = console_session.ses_id;
- drq->dbg_flags = 0;
-
- return rc;
-}
-
-int
-lstcon_batrpc_prep(lstcon_node_t *nd, int transop, unsigned feats,
- lstcon_tsb_hdr_t *tsb, lstcon_rpc_t **crpc)
-{
- lstcon_batch_t *batch;
- srpc_batch_reqst_t *brq;
- int rc;
-
- rc = lstcon_rpc_prep(nd, SRPC_SERVICE_BATCH, feats, 0, 0, crpc);
- if (rc != 0)
- return rc;
-
- brq = &(*crpc)->crp_rpc->crpc_reqstmsg.msg_body.bat_reqst;
-
- brq->bar_sid = console_session.ses_id;
- brq->bar_bid = tsb->tsb_id;
- brq->bar_testidx = tsb->tsb_index;
- brq->bar_opc = transop == LST_TRANS_TSBRUN ? SRPC_BATCH_OPC_RUN :
- (transop == LST_TRANS_TSBSTOP ? SRPC_BATCH_OPC_STOP :
- SRPC_BATCH_OPC_QUERY);
-
- if (transop != LST_TRANS_TSBRUN &&
- transop != LST_TRANS_TSBSTOP)
- return 0;
-
- LASSERT(tsb->tsb_index == 0);
-
- batch = (lstcon_batch_t *)tsb;
- brq->bar_arg = batch->bat_arg;
-
- return 0;
-}
-
-int
-lstcon_statrpc_prep(lstcon_node_t *nd, unsigned feats, lstcon_rpc_t **crpc)
-{
- srpc_stat_reqst_t *srq;
- int rc;
-
- rc = lstcon_rpc_prep(nd, SRPC_SERVICE_QUERY_STAT, feats, 0, 0, crpc);
- if (rc != 0)
- return rc;
-
- srq = &(*crpc)->crp_rpc->crpc_reqstmsg.msg_body.stat_reqst;
-
- srq->str_sid = console_session.ses_id;
- srq->str_type = 0; /* XXX remove it */
-
- return 0;
-}
-
-static lnet_process_id_packed_t *
-lstcon_next_id(int idx, int nkiov, lnet_kiov_t *kiov)
-{
- lnet_process_id_packed_t *pid;
- int i;
-
- i = idx / SFW_ID_PER_PAGE;
-
- LASSERT(i < nkiov);
-
- pid = (lnet_process_id_packed_t *)page_address(kiov[i].kiov_page);
-
- return &pid[idx % SFW_ID_PER_PAGE];
-}
-
-static int
-lstcon_dstnodes_prep(lstcon_group_t *grp, int idx,
- int dist, int span, int nkiov, lnet_kiov_t *kiov)
-{
- lnet_process_id_packed_t *pid;
- lstcon_ndlink_t *ndl;
- lstcon_node_t *nd;
- int start;
- int end;
- int i = 0;
-
- LASSERT(dist >= 1);
- LASSERT(span >= 1);
- LASSERT(grp->grp_nnode >= 1);
-
- if (span > grp->grp_nnode)
- return -EINVAL;
-
- start = ((idx / dist) * span) % grp->grp_nnode;
- end = ((idx / dist) * span + span - 1) % grp->grp_nnode;
-
- list_for_each_entry(ndl, &grp->grp_ndl_list, ndl_link) {
- nd = ndl->ndl_node;
- if (i < start) {
- i++;
- continue;
- }
-
- if (i > (end >= start ? end : grp->grp_nnode))
- break;
-
- pid = lstcon_next_id((i - start), nkiov, kiov);
- pid->nid = nd->nd_id.nid;
- pid->pid = nd->nd_id.pid;
- i++;
- }
-
- if (start <= end) /* done */
- return 0;
-
- list_for_each_entry(ndl, &grp->grp_ndl_list, ndl_link) {
- if (i > grp->grp_nnode + end)
- break;
-
- nd = ndl->ndl_node;
- pid = lstcon_next_id((i - start), nkiov, kiov);
- pid->nid = nd->nd_id.nid;
- pid->pid = nd->nd_id.pid;
- i++;
- }
-
- return 0;
-}
-
-static int
-lstcon_pingrpc_prep(lst_test_ping_param_t *param, srpc_test_reqst_t *req)
-{
- test_ping_req_t *prq = &req->tsr_u.ping;
-
- prq->png_size = param->png_size;
- prq->png_flags = param->png_flags;
- /* TODO dest */
- return 0;
-}
-
-static int
-lstcon_bulkrpc_v0_prep(lst_test_bulk_param_t *param, srpc_test_reqst_t *req)
-{
- test_bulk_req_t *brq = &req->tsr_u.bulk_v0;
-
- brq->blk_opc = param->blk_opc;
- brq->blk_npg = (param->blk_size + PAGE_CACHE_SIZE - 1) /
- PAGE_CACHE_SIZE;
- brq->blk_flags = param->blk_flags;
-
- return 0;
-}
-
-static int
-lstcon_bulkrpc_v1_prep(lst_test_bulk_param_t *param, srpc_test_reqst_t *req)
-{
- test_bulk_req_v1_t *brq = &req->tsr_u.bulk_v1;
-
- brq->blk_opc = param->blk_opc;
- brq->blk_flags = param->blk_flags;
- brq->blk_len = param->blk_size;
- brq->blk_offset = 0; /* reserved */
-
- return 0;
-}
-
-int
-lstcon_testrpc_prep(lstcon_node_t *nd, int transop, unsigned feats,
- lstcon_test_t *test, lstcon_rpc_t **crpc)
-{
- lstcon_group_t *sgrp = test->tes_src_grp;
- lstcon_group_t *dgrp = test->tes_dst_grp;
- srpc_test_reqst_t *trq;
- srpc_bulk_t *bulk;
- int i;
- int npg = 0;
- int nob = 0;
- int rc = 0;
-
- if (transop == LST_TRANS_TSBCLIADD) {
- npg = sfw_id_pages(test->tes_span);
- nob = (feats & LST_FEAT_BULK_LEN) == 0 ?
- npg * PAGE_CACHE_SIZE :
- sizeof(lnet_process_id_packed_t) * test->tes_span;
- }
-
- rc = lstcon_rpc_prep(nd, SRPC_SERVICE_TEST, feats, npg, nob, crpc);
- if (rc != 0)
- return rc;
-
- trq = &(*crpc)->crp_rpc->crpc_reqstmsg.msg_body.tes_reqst;
-
- if (transop == LST_TRANS_TSBSRVADD) {
- int ndist = (sgrp->grp_nnode + test->tes_dist - 1) /
- test->tes_dist;
- int nspan = (dgrp->grp_nnode + test->tes_span - 1) /
- test->tes_span;
- int nmax = (ndist + nspan - 1) / nspan;
-
- trq->tsr_ndest = 0;
- trq->tsr_loop = nmax * test->tes_dist * test->tes_concur;
-
- } else {
- bulk = &(*crpc)->crp_rpc->crpc_bulk;
-
- for (i = 0; i < npg; i++) {
- int len;
-
- LASSERT(nob > 0);
-
- len = (feats & LST_FEAT_BULK_LEN) == 0 ?
- PAGE_CACHE_SIZE :
- min_t(int, nob, PAGE_CACHE_SIZE);
- nob -= len;
-
- bulk->bk_iovs[i].kiov_offset = 0;
- bulk->bk_iovs[i].kiov_len = len;
- bulk->bk_iovs[i].kiov_page =
- alloc_page(GFP_IOFS);
-
- if (bulk->bk_iovs[i].kiov_page == NULL) {
- lstcon_rpc_put(*crpc);
- return -ENOMEM;
- }
- }
-
- bulk->bk_sink = 0;
-
- LASSERT(transop == LST_TRANS_TSBCLIADD);
-
- rc = lstcon_dstnodes_prep(test->tes_dst_grp,
- test->tes_cliidx++,
- test->tes_dist,
- test->tes_span,
- npg, &bulk->bk_iovs[0]);
- if (rc != 0) {
- lstcon_rpc_put(*crpc);
- return rc;
- }
-
- trq->tsr_ndest = test->tes_span;
- trq->tsr_loop = test->tes_loop;
- }
-
- trq->tsr_sid = console_session.ses_id;
- trq->tsr_bid = test->tes_hdr.tsb_id;
- trq->tsr_concur = test->tes_concur;
- trq->tsr_is_client = (transop == LST_TRANS_TSBCLIADD) ? 1 : 0;
- trq->tsr_stop_onerr = !!test->tes_stop_onerr;
-
- switch (test->tes_type) {
- case LST_TEST_PING:
- trq->tsr_service = SRPC_SERVICE_PING;
- rc = lstcon_pingrpc_prep((lst_test_ping_param_t *)
- &test->tes_param[0], trq);
- break;
-
- case LST_TEST_BULK:
- trq->tsr_service = SRPC_SERVICE_BRW;
- if ((feats & LST_FEAT_BULK_LEN) == 0) {
- rc = lstcon_bulkrpc_v0_prep((lst_test_bulk_param_t *)
- &test->tes_param[0], trq);
- } else {
- rc = lstcon_bulkrpc_v1_prep((lst_test_bulk_param_t *)
- &test->tes_param[0], trq);
- }
-
- break;
- default:
- LBUG();
- break;
- }
-
- return rc;
-}
-
-static int
-lstcon_sesnew_stat_reply(lstcon_rpc_trans_t *trans,
- lstcon_node_t *nd, srpc_msg_t *reply)
-{
- srpc_mksn_reply_t *mksn_rep = &reply->msg_body.mksn_reply;
- int status = mksn_rep->mksn_status;
-
- if (status == 0 &&
- (reply->msg_ses_feats & ~LST_FEATS_MASK) != 0) {
- mksn_rep->mksn_status = EPROTO;
- status = EPROTO;
- }
-
- if (status == EPROTO) {
- CNETERR("session protocol error from %s: %u\n",
- libcfs_nid2str(nd->nd_id.nid),
- reply->msg_ses_feats);
- }
-
- if (status != 0)
- return status;
-
- if (!trans->tas_feats_updated) {
- trans->tas_feats_updated = 1;
- trans->tas_features = reply->msg_ses_feats;
- }
-
- if (reply->msg_ses_feats != trans->tas_features) {
- CNETERR("Framework features %x from %s is different with features on this transaction: %x\n",
- reply->msg_ses_feats, libcfs_nid2str(nd->nd_id.nid),
- trans->tas_features);
- status = mksn_rep->mksn_status = EPROTO;
- }
-
- if (status == 0) {
- /* session timeout on remote node */
- nd->nd_timeout = mksn_rep->mksn_timeout;
- }
-
- return status;
-}
-
-void
-lstcon_rpc_stat_reply(lstcon_rpc_trans_t *trans, srpc_msg_t *msg,
- lstcon_node_t *nd, lstcon_trans_stat_t *stat)
-{
- srpc_rmsn_reply_t *rmsn_rep;
- srpc_debug_reply_t *dbg_rep;
- srpc_batch_reply_t *bat_rep;
- srpc_test_reply_t *test_rep;
- srpc_stat_reply_t *stat_rep;
- int rc = 0;
-
- switch (trans->tas_opc) {
- case LST_TRANS_SESNEW:
- rc = lstcon_sesnew_stat_reply(trans, nd, msg);
- if (rc == 0) {
- lstcon_sesop_stat_success(stat, 1);
- return;
- }
-
- lstcon_sesop_stat_failure(stat, 1);
- break;
-
- case LST_TRANS_SESEND:
- rmsn_rep = &msg->msg_body.rmsn_reply;
- /* ESRCH is not an error for end session */
- if (rmsn_rep->rmsn_status == 0 ||
- rmsn_rep->rmsn_status == ESRCH) {
- lstcon_sesop_stat_success(stat, 1);
- return;
- }
-
- lstcon_sesop_stat_failure(stat, 1);
- rc = rmsn_rep->rmsn_status;
- break;
-
- case LST_TRANS_SESQRY:
- case LST_TRANS_SESPING:
- dbg_rep = &msg->msg_body.dbg_reply;
-
- if (dbg_rep->dbg_status == ESRCH) {
- lstcon_sesqry_stat_unknown(stat, 1);
- return;
- }
-
- if (lstcon_session_match(dbg_rep->dbg_sid))
- lstcon_sesqry_stat_active(stat, 1);
- else
- lstcon_sesqry_stat_busy(stat, 1);
- return;
-
- case LST_TRANS_TSBRUN:
- case LST_TRANS_TSBSTOP:
- bat_rep = &msg->msg_body.bat_reply;
-
- if (bat_rep->bar_status == 0) {
- lstcon_tsbop_stat_success(stat, 1);
- return;
- }
-
- if (bat_rep->bar_status == EPERM &&
- trans->tas_opc == LST_TRANS_TSBSTOP) {
- lstcon_tsbop_stat_success(stat, 1);
- return;
- }
-
- lstcon_tsbop_stat_failure(stat, 1);
- rc = bat_rep->bar_status;
- break;
-
- case LST_TRANS_TSBCLIQRY:
- case LST_TRANS_TSBSRVQRY:
- bat_rep = &msg->msg_body.bat_reply;
-
- if (bat_rep->bar_active != 0)
- lstcon_tsbqry_stat_run(stat, 1);
- else
- lstcon_tsbqry_stat_idle(stat, 1);
-
- if (bat_rep->bar_status == 0)
- return;
-
- lstcon_tsbqry_stat_failure(stat, 1);
- rc = bat_rep->bar_status;
- break;
-
- case LST_TRANS_TSBCLIADD:
- case LST_TRANS_TSBSRVADD:
- test_rep = &msg->msg_body.tes_reply;
-
- if (test_rep->tsr_status == 0) {
- lstcon_tsbop_stat_success(stat, 1);
- return;
- }
-
- lstcon_tsbop_stat_failure(stat, 1);
- rc = test_rep->tsr_status;
- break;
-
- case LST_TRANS_STATQRY:
- stat_rep = &msg->msg_body.stat_reply;
-
- if (stat_rep->str_status == 0) {
- lstcon_statqry_stat_success(stat, 1);
- return;
- }
-
- lstcon_statqry_stat_failure(stat, 1);
- rc = stat_rep->str_status;
- break;
-
- default:
- LBUG();
- }
-
- if (stat->trs_fwk_errno == 0)
- stat->trs_fwk_errno = rc;
-
- return;
-}
-
-int
-lstcon_rpc_trans_ndlist(struct list_head *ndlist,
- struct list_head *translist, int transop,
- void *arg, lstcon_rpc_cond_func_t condition,
- lstcon_rpc_trans_t **transpp)
-{
- lstcon_rpc_trans_t *trans;
- lstcon_ndlink_t *ndl;
- lstcon_node_t *nd;
- lstcon_rpc_t *rpc;
- unsigned feats;
- int rc;
-
- /* Creating session RPG for list of nodes */
-
- rc = lstcon_rpc_trans_prep(translist, transop, &trans);
- if (rc != 0) {
- CERROR("Can't create transaction %d: %d\n", transop, rc);
- return rc;
- }
-
- feats = trans->tas_features;
- list_for_each_entry(ndl, ndlist, ndl_link) {
- rc = condition == NULL ? 1 :
- condition(transop, ndl->ndl_node, arg);
-
- if (rc == 0)
- continue;
-
- if (rc < 0) {
- CDEBUG(D_NET, "Condition error while creating RPC for transaction %d: %d\n",
- transop, rc);
- break;
- }
-
- nd = ndl->ndl_node;
-
- switch (transop) {
- case LST_TRANS_SESNEW:
- case LST_TRANS_SESEND:
- rc = lstcon_sesrpc_prep(nd, transop, feats, &rpc);
- break;
- case LST_TRANS_SESQRY:
- case LST_TRANS_SESPING:
- rc = lstcon_dbgrpc_prep(nd, feats, &rpc);
- break;
- case LST_TRANS_TSBCLIADD:
- case LST_TRANS_TSBSRVADD:
- rc = lstcon_testrpc_prep(nd, transop, feats,
- (lstcon_test_t *)arg, &rpc);
- break;
- case LST_TRANS_TSBRUN:
- case LST_TRANS_TSBSTOP:
- case LST_TRANS_TSBCLIQRY:
- case LST_TRANS_TSBSRVQRY:
- rc = lstcon_batrpc_prep(nd, transop, feats,
- (lstcon_tsb_hdr_t *)arg, &rpc);
- break;
- case LST_TRANS_STATQRY:
- rc = lstcon_statrpc_prep(nd, feats, &rpc);
- break;
- default:
- rc = -EINVAL;
- break;
- }
-
- if (rc != 0) {
- CERROR("Failed to create RPC for transaction %s: %d\n",
- lstcon_rpc_trans_name(transop), rc);
- break;
- }
-
- lstcon_rpc_trans_addreq(trans, rpc);
- }
-
- if (rc == 0) {
- *transpp = trans;
- return 0;
- }
-
- lstcon_rpc_trans_destroy(trans);
-
- return rc;
-}
-
-static void
-lstcon_rpc_pinger(void *arg)
-{
- stt_timer_t *ptimer = (stt_timer_t *)arg;
- lstcon_rpc_trans_t *trans;
- lstcon_rpc_t *crpc;
- srpc_msg_t *rep;
- srpc_debug_reqst_t *drq;
- lstcon_ndlink_t *ndl;
- lstcon_node_t *nd;
- int intv;
- int count = 0;
- int rc;
-
- /* RPC pinger is a special case of transaction,
- * it's called by timer at 8 seconds interval.
- */
- mutex_lock(&console_session.ses_mutex);
-
- if (console_session.ses_shutdown || console_session.ses_expired) {
- mutex_unlock(&console_session.ses_mutex);
- return;
- }
-
- if (!console_session.ses_expired &&
- ktime_get_real_seconds() - console_session.ses_laststamp >
- (time64_t)console_session.ses_timeout)
- console_session.ses_expired = 1;
-
- trans = console_session.ses_ping;
-
- LASSERT(trans != NULL);
-
- list_for_each_entry(ndl, &console_session.ses_ndl_list, ndl_link) {
- nd = ndl->ndl_node;
-
- if (console_session.ses_expired) {
- /* idle console, end session on all nodes */
- if (nd->nd_state != LST_NODE_ACTIVE)
- continue;
-
- rc = lstcon_sesrpc_prep(nd, LST_TRANS_SESEND,
- trans->tas_features, &crpc);
- if (rc != 0) {
- CERROR("Out of memory\n");
- break;
- }
-
- lstcon_rpc_trans_addreq(trans, crpc);
- lstcon_rpc_post(crpc);
-
- continue;
- }
-
- crpc = &nd->nd_ping;
-
- if (crpc->crp_rpc != NULL) {
- LASSERT(crpc->crp_trans == trans);
- LASSERT(!list_empty(&crpc->crp_link));
-
- spin_lock(&crpc->crp_rpc->crpc_lock);
-
- LASSERT(crpc->crp_posted);
-
- if (!crpc->crp_finished) {
- /* in flight */
- spin_unlock(&crpc->crp_rpc->crpc_lock);
- continue;
- }
-
- spin_unlock(&crpc->crp_rpc->crpc_lock);
-
- lstcon_rpc_get_reply(crpc, &rep);
-
- list_del_init(&crpc->crp_link);
-
- lstcon_rpc_put(crpc);
- }
-
- if (nd->nd_state != LST_NODE_ACTIVE)
- continue;
-
- intv = (jiffies - nd->nd_stamp) / HZ;
- if (intv < nd->nd_timeout / 2)
- continue;
-
- rc = lstcon_rpc_init(nd, SRPC_SERVICE_DEBUG,
- trans->tas_features, 0, 0, 1, crpc);
- if (rc != 0) {
- CERROR("Out of memory\n");
- break;
- }
-
- drq = &crpc->crp_rpc->crpc_reqstmsg.msg_body.dbg_reqst;
-
- drq->dbg_sid = console_session.ses_id;
- drq->dbg_flags = 0;
-
- lstcon_rpc_trans_addreq(trans, crpc);
- lstcon_rpc_post(crpc);
-
- count++;
- }
-
- if (console_session.ses_expired) {
- mutex_unlock(&console_session.ses_mutex);
- return;
- }
-
- CDEBUG(D_NET, "Ping %d nodes in session\n", count);
-
- ptimer->stt_expires = ktime_get_real_seconds() + LST_PING_INTERVAL;
- stt_add_timer(ptimer);
-
- mutex_unlock(&console_session.ses_mutex);
-}
-
-int
-lstcon_rpc_pinger_start(void)
-{
- stt_timer_t *ptimer;
- int rc;
-
- LASSERT(list_empty(&console_session.ses_rpc_freelist));
- LASSERT(atomic_read(&console_session.ses_rpc_counter) == 0);
-
- rc = lstcon_rpc_trans_prep(NULL, LST_TRANS_SESPING,
- &console_session.ses_ping);
- if (rc != 0) {
- CERROR("Failed to create console pinger\n");
- return rc;
- }
-
- ptimer = &console_session.ses_ping_timer;
- ptimer->stt_expires = ktime_get_real_seconds() + LST_PING_INTERVAL;
-
- stt_add_timer(ptimer);
-
- return 0;
-}
-
-void
-lstcon_rpc_pinger_stop(void)
-{
- LASSERT(console_session.ses_shutdown);
-
- stt_del_timer(&console_session.ses_ping_timer);
-
- lstcon_rpc_trans_abort(console_session.ses_ping, -ESHUTDOWN);
- lstcon_rpc_trans_stat(console_session.ses_ping, lstcon_trans_stat());
- lstcon_rpc_trans_destroy(console_session.ses_ping);
-
- memset(lstcon_trans_stat(), 0, sizeof(lstcon_trans_stat_t));
-
- console_session.ses_ping = NULL;
-}
-
-void
-lstcon_rpc_cleanup_wait(void)
-{
- lstcon_rpc_trans_t *trans;
- lstcon_rpc_t *crpc;
- struct list_head *pacer;
- struct list_head zlist;
-
- /* Called with hold of global mutex */
-
- LASSERT(console_session.ses_shutdown);
-
- while (!list_empty(&console_session.ses_trans_list)) {
- list_for_each(pacer, &console_session.ses_trans_list) {
- trans = list_entry(pacer, lstcon_rpc_trans_t,
- tas_link);
-
- CDEBUG(D_NET, "Session closed, wakeup transaction %s\n",
- lstcon_rpc_trans_name(trans->tas_opc));
-
- wake_up(&trans->tas_waitq);
- }
-
- mutex_unlock(&console_session.ses_mutex);
-
- CWARN("Session is shutting down, waiting for termination of transactions\n");
- set_current_state(TASK_UNINTERRUPTIBLE);
- schedule_timeout(cfs_time_seconds(1));
-
- mutex_lock(&console_session.ses_mutex);
- }
-
- spin_lock(&console_session.ses_rpc_lock);
-
- lst_wait_until((atomic_read(&console_session.ses_rpc_counter) == 0),
- console_session.ses_rpc_lock,
- "Network is not accessible or target is down, waiting for %d console RPCs to being recycled\n",
- atomic_read(&console_session.ses_rpc_counter));
-
- list_add(&zlist, &console_session.ses_rpc_freelist);
- list_del_init(&console_session.ses_rpc_freelist);
-
- spin_unlock(&console_session.ses_rpc_lock);
-
- while (!list_empty(&zlist)) {
- crpc = list_entry(zlist.next, lstcon_rpc_t, crp_link);
-
- list_del(&crpc->crp_link);
- LIBCFS_FREE(crpc, sizeof(lstcon_rpc_t));
- }
-}
-
-int
-lstcon_rpc_module_init(void)
-{
- INIT_LIST_HEAD(&console_session.ses_ping_timer.stt_list);
- console_session.ses_ping_timer.stt_func = lstcon_rpc_pinger;
- console_session.ses_ping_timer.stt_data = &console_session.ses_ping_timer;
-
- console_session.ses_ping = NULL;
-
- spin_lock_init(&console_session.ses_rpc_lock);
- atomic_set(&console_session.ses_rpc_counter, 0);
- INIT_LIST_HEAD(&console_session.ses_rpc_freelist);
-
- return 0;
-}
-
-void
-lstcon_rpc_module_fini(void)
-{
- LASSERT(list_empty(&console_session.ses_rpc_freelist));
- LASSERT(atomic_read(&console_session.ses_rpc_counter) == 0);
-}
diff --git a/drivers/staging/lustre/lnet/selftest/conrpc.h b/drivers/staging/lustre/lnet/selftest/conrpc.h
deleted file mode 100644
index ae7ed75573b9..000000000000
--- a/drivers/staging/lustre/lnet/selftest/conrpc.h
+++ /dev/null
@@ -1,144 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * /lnet/selftest/conrpc.h
- *
- * Console rpc
- *
- * Author: Liang Zhen <liang@whamcloud.com>
- */
-
-#ifndef __LST_CONRPC_H__
-#define __LST_CONRPC_H__
-
-#include "../../include/linux/libcfs/libcfs.h"
-#include "../../include/linux/lnet/lnet.h"
-#include "../../include/linux/lnet/lib-types.h"
-#include "../../include/linux/lnet/lnetst.h"
-#include "rpc.h"
-#include "selftest.h"
-
-/* Console rpc and rpc transaction */
-#define LST_TRANS_TIMEOUT 30
-#define LST_TRANS_MIN_TIMEOUT 3
-
-#define LST_VALIDATE_TIMEOUT(t) min(max(t, LST_TRANS_MIN_TIMEOUT), LST_TRANS_TIMEOUT)
-
-#define LST_PING_INTERVAL 8
-
-struct lstcon_rpc_trans;
-struct lstcon_tsb_hdr;
-struct lstcon_test;
-struct lstcon_node;
-
-typedef struct lstcon_rpc {
- struct list_head crp_link; /* chain on rpc transaction */
- srpc_client_rpc_t *crp_rpc; /* client rpc */
- struct lstcon_node *crp_node; /* destination node */
- struct lstcon_rpc_trans *crp_trans; /* conrpc transaction */
-
- unsigned int crp_posted:1; /* rpc is posted */
- unsigned int crp_finished:1; /* rpc is finished */
- unsigned int crp_unpacked:1; /* reply is unpacked */
- /** RPC is embedded in other structure and can't free it */
- unsigned int crp_embedded:1;
- int crp_status; /* console rpc errors */
- unsigned long crp_stamp; /* replied time stamp */
-} lstcon_rpc_t;
-
-typedef struct lstcon_rpc_trans {
- struct list_head tas_olink; /* link chain on owner list */
- struct list_head tas_link; /* link chain on global list */
- int tas_opc; /* operation code of transaction */
- unsigned tas_feats_updated; /* features mask is uptodate */
- unsigned tas_features; /* test features mask */
- wait_queue_head_t tas_waitq; /* wait queue head */
- atomic_t tas_remaining; /* # of un-scheduled rpcs */
- struct list_head tas_rpcs_list; /* queued requests */
-} lstcon_rpc_trans_t;
-
-#define LST_TRANS_PRIVATE 0x1000
-
-#define LST_TRANS_SESNEW (LST_TRANS_PRIVATE | 0x01)
-#define LST_TRANS_SESEND (LST_TRANS_PRIVATE | 0x02)
-#define LST_TRANS_SESQRY 0x03
-#define LST_TRANS_SESPING 0x04
-
-#define LST_TRANS_TSBCLIADD (LST_TRANS_PRIVATE | 0x11)
-#define LST_TRANS_TSBSRVADD (LST_TRANS_PRIVATE | 0x12)
-#define LST_TRANS_TSBRUN (LST_TRANS_PRIVATE | 0x13)
-#define LST_TRANS_TSBSTOP (LST_TRANS_PRIVATE | 0x14)
-#define LST_TRANS_TSBCLIQRY 0x15
-#define LST_TRANS_TSBSRVQRY 0x16
-
-#define LST_TRANS_STATQRY 0x21
-
-typedef int (*lstcon_rpc_cond_func_t)(int, struct lstcon_node *, void *);
-typedef int (*lstcon_rpc_readent_func_t)(int, srpc_msg_t *, lstcon_rpc_ent_t *);
-
-int lstcon_sesrpc_prep(struct lstcon_node *nd, int transop,
- unsigned version, lstcon_rpc_t **crpc);
-int lstcon_dbgrpc_prep(struct lstcon_node *nd,
- unsigned version, lstcon_rpc_t **crpc);
-int lstcon_batrpc_prep(struct lstcon_node *nd, int transop, unsigned version,
- struct lstcon_tsb_hdr *tsb, lstcon_rpc_t **crpc);
-int lstcon_testrpc_prep(struct lstcon_node *nd, int transop, unsigned version,
- struct lstcon_test *test, lstcon_rpc_t **crpc);
-int lstcon_statrpc_prep(struct lstcon_node *nd, unsigned version,
- lstcon_rpc_t **crpc);
-void lstcon_rpc_put(lstcon_rpc_t *crpc);
-int lstcon_rpc_trans_prep(struct list_head *translist,
- int transop, lstcon_rpc_trans_t **transpp);
-int lstcon_rpc_trans_ndlist(struct list_head *ndlist,
- struct list_head *translist, int transop,
- void *arg, lstcon_rpc_cond_func_t condition,
- lstcon_rpc_trans_t **transpp);
-void lstcon_rpc_trans_stat(lstcon_rpc_trans_t *trans,
- lstcon_trans_stat_t *stat);
-int lstcon_rpc_trans_interpreter(lstcon_rpc_trans_t *trans,
- struct list_head *head_up,
- lstcon_rpc_readent_func_t readent);
-void lstcon_rpc_trans_abort(lstcon_rpc_trans_t *trans, int error);
-void lstcon_rpc_trans_destroy(lstcon_rpc_trans_t *trans);
-void lstcon_rpc_trans_addreq(lstcon_rpc_trans_t *trans, lstcon_rpc_t *req);
-int lstcon_rpc_trans_postwait(lstcon_rpc_trans_t *trans, int timeout);
-int lstcon_rpc_pinger_start(void);
-void lstcon_rpc_pinger_stop(void);
-void lstcon_rpc_cleanup_wait(void);
-int lstcon_rpc_module_init(void);
-void lstcon_rpc_module_fini(void);
-
-
-#endif
diff --git a/drivers/staging/lustre/lnet/selftest/console.c b/drivers/staging/lustre/lnet/selftest/console.c
deleted file mode 100644
index 024aaee76b7f..000000000000
--- a/drivers/staging/lustre/lnet/selftest/console.c
+++ /dev/null
@@ -1,2096 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * lnet/selftest/conctl.c
- *
- * Infrastructure of LST console
- *
- * Author: Liang Zhen <liangzhen@clusterfs.com>
- */
-
-
-#include "../../include/linux/libcfs/libcfs.h"
-#include "../../include/linux/lnet/lib-lnet.h"
-#include "console.h"
-#include "conrpc.h"
-
-#define LST_NODE_STATE_COUNTER(nd, p) \
-do { \
- if ((nd)->nd_state == LST_NODE_ACTIVE) \
- (p)->nle_nactive++; \
- else if ((nd)->nd_state == LST_NODE_BUSY) \
- (p)->nle_nbusy++; \
- else if ((nd)->nd_state == LST_NODE_DOWN) \
- (p)->nle_ndown++; \
- else \
- (p)->nle_nunknown++; \
- (p)->nle_nnode++; \
-} while (0)
-
-lstcon_session_t console_session;
-
-static void
-lstcon_node_get(lstcon_node_t *nd)
-{
- LASSERT(nd->nd_ref >= 1);
-
- nd->nd_ref++;
-}
-
-static int
-lstcon_node_find(lnet_process_id_t id, lstcon_node_t **ndpp, int create)
-{
- lstcon_ndlink_t *ndl;
- unsigned int idx = LNET_NIDADDR(id.nid) % LST_GLOBAL_HASHSIZE;
-
- LASSERT(id.nid != LNET_NID_ANY);
-
- list_for_each_entry(ndl, &console_session.ses_ndl_hash[idx], ndl_hlink) {
- if (ndl->ndl_node->nd_id.nid != id.nid ||
- ndl->ndl_node->nd_id.pid != id.pid)
- continue;
-
- lstcon_node_get(ndl->ndl_node);
- *ndpp = ndl->ndl_node;
- return 0;
- }
-
- if (!create)
- return -ENOENT;
-
- LIBCFS_ALLOC(*ndpp, sizeof(lstcon_node_t) + sizeof(lstcon_ndlink_t));
- if (*ndpp == NULL)
- return -ENOMEM;
-
- ndl = (lstcon_ndlink_t *)(*ndpp + 1);
-
- ndl->ndl_node = *ndpp;
-
- ndl->ndl_node->nd_ref = 1;
- ndl->ndl_node->nd_id = id;
- ndl->ndl_node->nd_stamp = cfs_time_current();
- ndl->ndl_node->nd_state = LST_NODE_UNKNOWN;
- ndl->ndl_node->nd_timeout = 0;
- memset(&ndl->ndl_node->nd_ping, 0, sizeof(lstcon_rpc_t));
-
- /* queued in global hash & list, no refcount is taken by
- * global hash & list, if caller release his refcount,
- * node will be released */
- list_add_tail(&ndl->ndl_hlink, &console_session.ses_ndl_hash[idx]);
- list_add_tail(&ndl->ndl_link, &console_session.ses_ndl_list);
-
- return 0;
-}
-
-static void
-lstcon_node_put(lstcon_node_t *nd)
-{
- lstcon_ndlink_t *ndl;
-
- LASSERT(nd->nd_ref > 0);
-
- if (--nd->nd_ref > 0)
- return;
-
- ndl = (lstcon_ndlink_t *)(nd + 1);
-
- LASSERT(!list_empty(&ndl->ndl_link));
- LASSERT(!list_empty(&ndl->ndl_hlink));
-
- /* remove from session */
- list_del(&ndl->ndl_link);
- list_del(&ndl->ndl_hlink);
-
- LIBCFS_FREE(nd, sizeof(lstcon_node_t) + sizeof(lstcon_ndlink_t));
-}
-
-static int
-lstcon_ndlink_find(struct list_head *hash,
- lnet_process_id_t id, lstcon_ndlink_t **ndlpp, int create)
-{
- unsigned int idx = LNET_NIDADDR(id.nid) % LST_NODE_HASHSIZE;
- lstcon_ndlink_t *ndl;
- lstcon_node_t *nd;
- int rc;
-
- if (id.nid == LNET_NID_ANY)
- return -EINVAL;
-
- /* search in hash */
- list_for_each_entry(ndl, &hash[idx], ndl_hlink) {
- if (ndl->ndl_node->nd_id.nid != id.nid ||
- ndl->ndl_node->nd_id.pid != id.pid)
- continue;
-
- *ndlpp = ndl;
- return 0;
- }
-
- if (create == 0)
- return -ENOENT;
-
- /* find or create in session hash */
- rc = lstcon_node_find(id, &nd, (create == 1) ? 1 : 0);
- if (rc != 0)
- return rc;
-
- LIBCFS_ALLOC(ndl, sizeof(lstcon_ndlink_t));
- if (ndl == NULL) {
- lstcon_node_put(nd);
- return -ENOMEM;
- }
-
- *ndlpp = ndl;
-
- ndl->ndl_node = nd;
- INIT_LIST_HEAD(&ndl->ndl_link);
- list_add_tail(&ndl->ndl_hlink, &hash[idx]);
-
- return 0;
-}
-
-static void
-lstcon_ndlink_release(lstcon_ndlink_t *ndl)
-{
- LASSERT(list_empty(&ndl->ndl_link));
- LASSERT(!list_empty(&ndl->ndl_hlink));
-
- list_del(&ndl->ndl_hlink); /* delete from hash */
- lstcon_node_put(ndl->ndl_node);
-
- LIBCFS_FREE(ndl, sizeof(*ndl));
-}
-
-static int
-lstcon_group_alloc(char *name, lstcon_group_t **grpp)
-{
- lstcon_group_t *grp;
- int i;
-
- LIBCFS_ALLOC(grp, offsetof(lstcon_group_t,
- grp_ndl_hash[LST_NODE_HASHSIZE]));
- if (grp == NULL)
- return -ENOMEM;
-
- grp->grp_ref = 1;
- if (name != NULL)
- strcpy(grp->grp_name, name);
-
- INIT_LIST_HEAD(&grp->grp_link);
- INIT_LIST_HEAD(&grp->grp_ndl_list);
- INIT_LIST_HEAD(&grp->grp_trans_list);
-
- for (i = 0; i < LST_NODE_HASHSIZE; i++)
- INIT_LIST_HEAD(&grp->grp_ndl_hash[i]);
-
- *grpp = grp;
-
- return 0;
-}
-
-static void
-lstcon_group_addref(lstcon_group_t *grp)
-{
- grp->grp_ref++;
-}
-
-static void lstcon_group_ndlink_release(lstcon_group_t *, lstcon_ndlink_t *);
-
-static void
-lstcon_group_drain(lstcon_group_t *grp, int keep)
-{
- lstcon_ndlink_t *ndl;
- lstcon_ndlink_t *tmp;
-
- list_for_each_entry_safe(ndl, tmp, &grp->grp_ndl_list, ndl_link) {
- if ((ndl->ndl_node->nd_state & keep) == 0)
- lstcon_group_ndlink_release(grp, ndl);
- }
-}
-
-static void
-lstcon_group_decref(lstcon_group_t *grp)
-{
- int i;
-
- if (--grp->grp_ref > 0)
- return;
-
- if (!list_empty(&grp->grp_link))
- list_del(&grp->grp_link);
-
- lstcon_group_drain(grp, 0);
-
- for (i = 0; i < LST_NODE_HASHSIZE; i++) {
- LASSERT(list_empty(&grp->grp_ndl_hash[i]));
- }
-
- LIBCFS_FREE(grp, offsetof(lstcon_group_t,
- grp_ndl_hash[LST_NODE_HASHSIZE]));
-}
-
-static int
-lstcon_group_find(const char *name, lstcon_group_t **grpp)
-{
- lstcon_group_t *grp;
-
- list_for_each_entry(grp, &console_session.ses_grp_list, grp_link) {
- if (strncmp(grp->grp_name, name, LST_NAME_SIZE) != 0)
- continue;
-
- lstcon_group_addref(grp); /* +1 ref for caller */
- *grpp = grp;
- return 0;
- }
-
- return -ENOENT;
-}
-
-static void
-lstcon_group_put(lstcon_group_t *grp)
-{
- lstcon_group_decref(grp);
-}
-
-static int
-lstcon_group_ndlink_find(lstcon_group_t *grp, lnet_process_id_t id,
- lstcon_ndlink_t **ndlpp, int create)
-{
- int rc;
-
- rc = lstcon_ndlink_find(&grp->grp_ndl_hash[0], id, ndlpp, create);
- if (rc != 0)
- return rc;
-
- if (!list_empty(&(*ndlpp)->ndl_link))
- return 0;
-
- list_add_tail(&(*ndlpp)->ndl_link, &grp->grp_ndl_list);
- grp->grp_nnode++;
-
- return 0;
-}
-
-static void
-lstcon_group_ndlink_release(lstcon_group_t *grp, lstcon_ndlink_t *ndl)
-{
- list_del_init(&ndl->ndl_link);
- lstcon_ndlink_release(ndl);
- grp->grp_nnode--;
-}
-
-static void
-lstcon_group_ndlink_move(lstcon_group_t *old,
- lstcon_group_t *new, lstcon_ndlink_t *ndl)
-{
- unsigned int idx = LNET_NIDADDR(ndl->ndl_node->nd_id.nid) %
- LST_NODE_HASHSIZE;
-
- list_del(&ndl->ndl_hlink);
- list_del(&ndl->ndl_link);
- old->grp_nnode--;
-
- list_add_tail(&ndl->ndl_hlink, &new->grp_ndl_hash[idx]);
- list_add_tail(&ndl->ndl_link, &new->grp_ndl_list);
- new->grp_nnode++;
-
- return;
-}
-
-static void
-lstcon_group_move(lstcon_group_t *old, lstcon_group_t *new)
-{
- lstcon_ndlink_t *ndl;
-
- while (!list_empty(&old->grp_ndl_list)) {
- ndl = list_entry(old->grp_ndl_list.next,
- lstcon_ndlink_t, ndl_link);
- lstcon_group_ndlink_move(old, new, ndl);
- }
-}
-
-static int
-lstcon_sesrpc_condition(int transop, lstcon_node_t *nd, void *arg)
-{
- lstcon_group_t *grp = (lstcon_group_t *)arg;
-
- switch (transop) {
- case LST_TRANS_SESNEW:
- if (nd->nd_state == LST_NODE_ACTIVE)
- return 0;
- break;
-
- case LST_TRANS_SESEND:
- if (nd->nd_state != LST_NODE_ACTIVE)
- return 0;
-
- if (grp != NULL && nd->nd_ref > 1)
- return 0;
- break;
-
- case LST_TRANS_SESQRY:
- break;
-
- default:
- LBUG();
- }
-
- return 1;
-}
-
-static int
-lstcon_sesrpc_readent(int transop, srpc_msg_t *msg,
- lstcon_rpc_ent_t *ent_up)
-{
- srpc_debug_reply_t *rep;
-
- switch (transop) {
- case LST_TRANS_SESNEW:
- case LST_TRANS_SESEND:
- return 0;
-
- case LST_TRANS_SESQRY:
- rep = &msg->msg_body.dbg_reply;
-
- if (copy_to_user(&ent_up->rpe_priv[0],
- &rep->dbg_timeout, sizeof(int)) ||
- copy_to_user(&ent_up->rpe_payload[0],
- &rep->dbg_name, LST_NAME_SIZE))
- return -EFAULT;
-
- return 0;
-
- default:
- LBUG();
- }
-
- return 0;
-}
-
-static int
-lstcon_group_nodes_add(lstcon_group_t *grp,
- int count, lnet_process_id_t *ids_up,
- unsigned *featp, struct list_head *result_up)
-{
- lstcon_rpc_trans_t *trans;
- lstcon_ndlink_t *ndl;
- lstcon_group_t *tmp;
- lnet_process_id_t id;
- int i;
- int rc;
-
- rc = lstcon_group_alloc(NULL, &tmp);
- if (rc != 0) {
- CERROR("Out of memory\n");
- return -ENOMEM;
- }
-
- for (i = 0 ; i < count; i++) {
- if (copy_from_user(&id, &ids_up[i], sizeof(id))) {
- rc = -EFAULT;
- break;
- }
-
- /* skip if it's in this group already */
- rc = lstcon_group_ndlink_find(grp, id, &ndl, 0);
- if (rc == 0)
- continue;
-
- /* add to tmp group */
- rc = lstcon_group_ndlink_find(tmp, id, &ndl, 1);
- if (rc != 0) {
- CERROR("Can't create ndlink, out of memory\n");
- break;
- }
- }
-
- if (rc != 0) {
- lstcon_group_put(tmp);
- return rc;
- }
-
- rc = lstcon_rpc_trans_ndlist(&tmp->grp_ndl_list,
- &tmp->grp_trans_list, LST_TRANS_SESNEW,
- tmp, lstcon_sesrpc_condition, &trans);
- if (rc != 0) {
- CERROR("Can't create transaction: %d\n", rc);
- lstcon_group_put(tmp);
- return rc;
- }
-
- /* post all RPCs */
- lstcon_rpc_trans_postwait(trans, LST_TRANS_TIMEOUT);
-
- rc = lstcon_rpc_trans_interpreter(trans, result_up,
- lstcon_sesrpc_readent);
- *featp = trans->tas_features;
-
- /* destroy all RPGs */
- lstcon_rpc_trans_destroy(trans);
-
- lstcon_group_move(tmp, grp);
- lstcon_group_put(tmp);
-
- return rc;
-}
-
-static int
-lstcon_group_nodes_remove(lstcon_group_t *grp,
- int count, lnet_process_id_t *ids_up,
- struct list_head *result_up)
-{
- lstcon_rpc_trans_t *trans;
- lstcon_ndlink_t *ndl;
- lstcon_group_t *tmp;
- lnet_process_id_t id;
- int rc;
- int i;
-
- /* End session and remove node from the group */
-
- rc = lstcon_group_alloc(NULL, &tmp);
- if (rc != 0) {
- CERROR("Out of memory\n");
- return -ENOMEM;
- }
-
- for (i = 0; i < count; i++) {
- if (copy_from_user(&id, &ids_up[i], sizeof(id))) {
- rc = -EFAULT;
- goto error;
- }
-
- /* move node to tmp group */
- if (lstcon_group_ndlink_find(grp, id, &ndl, 0) == 0)
- lstcon_group_ndlink_move(grp, tmp, ndl);
- }
-
- rc = lstcon_rpc_trans_ndlist(&tmp->grp_ndl_list,
- &tmp->grp_trans_list, LST_TRANS_SESEND,
- tmp, lstcon_sesrpc_condition, &trans);
- if (rc != 0) {
- CERROR("Can't create transaction: %d\n", rc);
- goto error;
- }
-
- lstcon_rpc_trans_postwait(trans, LST_TRANS_TIMEOUT);
-
- rc = lstcon_rpc_trans_interpreter(trans, result_up, NULL);
-
- lstcon_rpc_trans_destroy(trans);
- /* release nodes anyway, because we can't rollback status */
- lstcon_group_put(tmp);
-
- return rc;
-error:
- lstcon_group_move(tmp, grp);
- lstcon_group_put(tmp);
-
- return rc;
-}
-
-int
-lstcon_group_add(char *name)
-{
- lstcon_group_t *grp;
- int rc;
-
- rc = (lstcon_group_find(name, &grp) == 0) ? -EEXIST : 0;
- if (rc != 0) {
- /* find a group with same name */
- lstcon_group_put(grp);
- return rc;
- }
-
- rc = lstcon_group_alloc(name, &grp);
- if (rc != 0) {
- CERROR("Can't allocate descriptor for group %s\n", name);
- return -ENOMEM;
- }
-
- list_add_tail(&grp->grp_link, &console_session.ses_grp_list);
-
- return rc;
-}
-
-int
-lstcon_nodes_add(char *name, int count, lnet_process_id_t *ids_up,
- unsigned *featp, struct list_head *result_up)
-{
- lstcon_group_t *grp;
- int rc;
-
- LASSERT(count > 0);
- LASSERT(ids_up != NULL);
-
- rc = lstcon_group_find(name, &grp);
- if (rc != 0) {
- CDEBUG(D_NET, "Can't find group %s\n", name);
- return rc;
- }
-
- if (grp->grp_ref > 2) {
- /* referred by other threads or test */
- CDEBUG(D_NET, "Group %s is busy\n", name);
- lstcon_group_put(grp);
-
- return -EBUSY;
- }
-
- rc = lstcon_group_nodes_add(grp, count, ids_up, featp, result_up);
-
- lstcon_group_put(grp);
-
- return rc;
-}
-
-int
-lstcon_group_del(char *name)
-{
- lstcon_rpc_trans_t *trans;
- lstcon_group_t *grp;
- int rc;
-
- rc = lstcon_group_find(name, &grp);
- if (rc != 0) {
- CDEBUG(D_NET, "Can't find group: %s\n", name);
- return rc;
- }
-
- if (grp->grp_ref > 2) {
- /* referred by others threads or test */
- CDEBUG(D_NET, "Group %s is busy\n", name);
- lstcon_group_put(grp);
- return -EBUSY;
- }
-
- rc = lstcon_rpc_trans_ndlist(&grp->grp_ndl_list,
- &grp->grp_trans_list, LST_TRANS_SESEND,
- grp, lstcon_sesrpc_condition, &trans);
- if (rc != 0) {
- CERROR("Can't create transaction: %d\n", rc);
- lstcon_group_put(grp);
- return rc;
- }
-
- lstcon_rpc_trans_postwait(trans, LST_TRANS_TIMEOUT);
-
- lstcon_rpc_trans_destroy(trans);
-
- lstcon_group_put(grp);
- /* -ref for session, it's destroyed,
- * status can't be rolled back, destroy group anyway */
- lstcon_group_put(grp);
-
- return rc;
-}
-
-int
-lstcon_group_clean(char *name, int args)
-{
- lstcon_group_t *grp = NULL;
- int rc;
-
- rc = lstcon_group_find(name, &grp);
- if (rc != 0) {
- CDEBUG(D_NET, "Can't find group %s\n", name);
- return rc;
- }
-
- if (grp->grp_ref > 2) {
- /* referred by test */
- CDEBUG(D_NET, "Group %s is busy\n", name);
- lstcon_group_put(grp);
- return -EBUSY;
- }
-
- args = (LST_NODE_ACTIVE | LST_NODE_BUSY |
- LST_NODE_DOWN | LST_NODE_UNKNOWN) & ~args;
-
- lstcon_group_drain(grp, args);
-
- lstcon_group_put(grp);
- /* release empty group */
- if (list_empty(&grp->grp_ndl_list))
- lstcon_group_put(grp);
-
- return 0;
-}
-
-int
-lstcon_nodes_remove(char *name, int count,
- lnet_process_id_t *ids_up, struct list_head *result_up)
-{
- lstcon_group_t *grp = NULL;
- int rc;
-
- rc = lstcon_group_find(name, &grp);
- if (rc != 0) {
- CDEBUG(D_NET, "Can't find group: %s\n", name);
- return rc;
- }
-
- if (grp->grp_ref > 2) {
- /* referred by test */
- CDEBUG(D_NET, "Group %s is busy\n", name);
- lstcon_group_put(grp);
- return -EBUSY;
- }
-
- rc = lstcon_group_nodes_remove(grp, count, ids_up, result_up);
-
- lstcon_group_put(grp);
- /* release empty group */
- if (list_empty(&grp->grp_ndl_list))
- lstcon_group_put(grp);
-
- return rc;
-}
-
-int
-lstcon_group_refresh(char *name, struct list_head *result_up)
-{
- lstcon_rpc_trans_t *trans;
- lstcon_group_t *grp;
- int rc;
-
- rc = lstcon_group_find(name, &grp);
- if (rc != 0) {
- CDEBUG(D_NET, "Can't find group: %s\n", name);
- return rc;
- }
-
- if (grp->grp_ref > 2) {
- /* referred by test */
- CDEBUG(D_NET, "Group %s is busy\n", name);
- lstcon_group_put(grp);
- return -EBUSY;
- }
-
- /* re-invite all inactive nodes int the group */
- rc = lstcon_rpc_trans_ndlist(&grp->grp_ndl_list,
- &grp->grp_trans_list, LST_TRANS_SESNEW,
- grp, lstcon_sesrpc_condition, &trans);
- if (rc != 0) {
- /* local error, return */
- CDEBUG(D_NET, "Can't create transaction: %d\n", rc);
- lstcon_group_put(grp);
- return rc;
- }
-
- lstcon_rpc_trans_postwait(trans, LST_TRANS_TIMEOUT);
-
- rc = lstcon_rpc_trans_interpreter(trans, result_up, NULL);
-
- lstcon_rpc_trans_destroy(trans);
- /* -ref for me */
- lstcon_group_put(grp);
-
- return rc;
-}
-
-int
-lstcon_group_list(int index, int len, char *name_up)
-{
- lstcon_group_t *grp;
-
- LASSERT(index >= 0);
- LASSERT(name_up != NULL);
-
- list_for_each_entry(grp, &console_session.ses_grp_list, grp_link) {
- if (index-- == 0) {
- return copy_to_user(name_up, grp->grp_name, len) ?
- -EFAULT : 0;
- }
- }
-
- return -ENOENT;
-}
-
-static int
-lstcon_nodes_getent(struct list_head *head, int *index_p,
- int *count_p, lstcon_node_ent_t *dents_up)
-{
- lstcon_ndlink_t *ndl;
- lstcon_node_t *nd;
- int count = 0;
- int index = 0;
-
- LASSERT(index_p != NULL && count_p != NULL);
- LASSERT(dents_up != NULL);
- LASSERT(*index_p >= 0);
- LASSERT(*count_p > 0);
-
- list_for_each_entry(ndl, head, ndl_link) {
- if (index++ < *index_p)
- continue;
-
- if (count >= *count_p)
- break;
-
- nd = ndl->ndl_node;
- if (copy_to_user(&dents_up[count].nde_id,
- &nd->nd_id, sizeof(nd->nd_id)) ||
- copy_to_user(&dents_up[count].nde_state,
- &nd->nd_state, sizeof(nd->nd_state)))
- return -EFAULT;
-
- count++;
- }
-
- if (index <= *index_p)
- return -ENOENT;
-
- *count_p = count;
- *index_p = index;
-
- return 0;
-}
-
-int
-lstcon_group_info(char *name, lstcon_ndlist_ent_t *gents_p,
- int *index_p, int *count_p, lstcon_node_ent_t *dents_up)
-{
- lstcon_ndlist_ent_t *gentp;
- lstcon_group_t *grp;
- lstcon_ndlink_t *ndl;
- int rc;
-
- rc = lstcon_group_find(name, &grp);
- if (rc != 0) {
- CDEBUG(D_NET, "Can't find group %s\n", name);
- return rc;
- }
-
- if (dents_up) {
- /* verbose query */
- rc = lstcon_nodes_getent(&grp->grp_ndl_list,
- index_p, count_p, dents_up);
- lstcon_group_put(grp);
-
- return rc;
- }
-
- /* non-verbose query */
- LIBCFS_ALLOC(gentp, sizeof(lstcon_ndlist_ent_t));
- if (gentp == NULL) {
- CERROR("Can't allocate ndlist_ent\n");
- lstcon_group_put(grp);
-
- return -ENOMEM;
- }
-
- list_for_each_entry(ndl, &grp->grp_ndl_list, ndl_link)
- LST_NODE_STATE_COUNTER(ndl->ndl_node, gentp);
-
- rc = copy_to_user(gents_p, gentp,
- sizeof(lstcon_ndlist_ent_t)) ? -EFAULT : 0;
-
- LIBCFS_FREE(gentp, sizeof(lstcon_ndlist_ent_t));
-
- lstcon_group_put(grp);
-
- return 0;
-}
-
-static int
-lstcon_batch_find(const char *name, lstcon_batch_t **batpp)
-{
- lstcon_batch_t *bat;
-
- list_for_each_entry(bat, &console_session.ses_bat_list, bat_link) {
- if (strncmp(bat->bat_name, name, LST_NAME_SIZE) == 0) {
- *batpp = bat;
- return 0;
- }
- }
-
- return -ENOENT;
-}
-
-int
-lstcon_batch_add(char *name)
-{
- lstcon_batch_t *bat;
- int i;
- int rc;
-
- rc = (lstcon_batch_find(name, &bat) == 0) ? -EEXIST : 0;
- if (rc != 0) {
- CDEBUG(D_NET, "Batch %s already exists\n", name);
- return rc;
- }
-
- LIBCFS_ALLOC(bat, sizeof(lstcon_batch_t));
- if (bat == NULL) {
- CERROR("Can't allocate descriptor for batch %s\n", name);
- return -ENOMEM;
- }
-
- LIBCFS_ALLOC(bat->bat_cli_hash,
- sizeof(struct list_head) * LST_NODE_HASHSIZE);
- if (bat->bat_cli_hash == NULL) {
- CERROR("Can't allocate hash for batch %s\n", name);
- LIBCFS_FREE(bat, sizeof(lstcon_batch_t));
-
- return -ENOMEM;
- }
-
- LIBCFS_ALLOC(bat->bat_srv_hash,
- sizeof(struct list_head) * LST_NODE_HASHSIZE);
- if (bat->bat_srv_hash == NULL) {
- CERROR("Can't allocate hash for batch %s\n", name);
- LIBCFS_FREE(bat->bat_cli_hash, LST_NODE_HASHSIZE);
- LIBCFS_FREE(bat, sizeof(lstcon_batch_t));
-
- return -ENOMEM;
- }
-
- strcpy(bat->bat_name, name);
- bat->bat_hdr.tsb_index = 0;
- bat->bat_hdr.tsb_id.bat_id = ++console_session.ses_id_cookie;
-
- bat->bat_ntest = 0;
- bat->bat_state = LST_BATCH_IDLE;
-
- INIT_LIST_HEAD(&bat->bat_cli_list);
- INIT_LIST_HEAD(&bat->bat_srv_list);
- INIT_LIST_HEAD(&bat->bat_test_list);
- INIT_LIST_HEAD(&bat->bat_trans_list);
-
- for (i = 0; i < LST_NODE_HASHSIZE; i++) {
- INIT_LIST_HEAD(&bat->bat_cli_hash[i]);
- INIT_LIST_HEAD(&bat->bat_srv_hash[i]);
- }
-
- list_add_tail(&bat->bat_link, &console_session.ses_bat_list);
-
- return rc;
-}
-
-int
-lstcon_batch_list(int index, int len, char *name_up)
-{
- lstcon_batch_t *bat;
-
- LASSERT(name_up != NULL);
- LASSERT(index >= 0);
-
- list_for_each_entry(bat, &console_session.ses_bat_list, bat_link) {
- if (index-- == 0) {
- return copy_to_user(name_up, bat->bat_name, len) ?
- -EFAULT : 0;
- }
- }
-
- return -ENOENT;
-}
-
-int
-lstcon_batch_info(char *name, lstcon_test_batch_ent_t *ent_up, int server,
- int testidx, int *index_p, int *ndent_p,
- lstcon_node_ent_t *dents_up)
-{
- lstcon_test_batch_ent_t *entp;
- struct list_head *clilst;
- struct list_head *srvlst;
- lstcon_test_t *test = NULL;
- lstcon_batch_t *bat;
- lstcon_ndlink_t *ndl;
- int rc;
-
- rc = lstcon_batch_find(name, &bat);
- if (rc != 0) {
- CDEBUG(D_NET, "Can't find batch %s\n", name);
- return -ENOENT;
- }
-
- if (testidx > 0) {
- /* query test, test index start from 1 */
- list_for_each_entry(test, &bat->bat_test_list, tes_link) {
- if (testidx-- == 1)
- break;
- }
-
- if (testidx > 0) {
- CDEBUG(D_NET, "Can't find specified test in batch\n");
- return -ENOENT;
- }
- }
-
- clilst = (test == NULL) ? &bat->bat_cli_list :
- &test->tes_src_grp->grp_ndl_list;
- srvlst = (test == NULL) ? &bat->bat_srv_list :
- &test->tes_dst_grp->grp_ndl_list;
-
- if (dents_up != NULL) {
- rc = lstcon_nodes_getent((server ? srvlst : clilst),
- index_p, ndent_p, dents_up);
- return rc;
- }
-
- /* non-verbose query */
- LIBCFS_ALLOC(entp, sizeof(lstcon_test_batch_ent_t));
- if (entp == NULL)
- return -ENOMEM;
-
- if (test == NULL) {
- entp->u.tbe_batch.bae_ntest = bat->bat_ntest;
- entp->u.tbe_batch.bae_state = bat->bat_state;
-
- } else {
-
- entp->u.tbe_test.tse_type = test->tes_type;
- entp->u.tbe_test.tse_loop = test->tes_loop;
- entp->u.tbe_test.tse_concur = test->tes_concur;
- }
-
- list_for_each_entry(ndl, clilst, ndl_link)
- LST_NODE_STATE_COUNTER(ndl->ndl_node, &entp->tbe_cli_nle);
-
- list_for_each_entry(ndl, srvlst, ndl_link)
- LST_NODE_STATE_COUNTER(ndl->ndl_node, &entp->tbe_srv_nle);
-
- rc = copy_to_user(ent_up, entp,
- sizeof(lstcon_test_batch_ent_t)) ? -EFAULT : 0;
-
- LIBCFS_FREE(entp, sizeof(lstcon_test_batch_ent_t));
-
- return rc;
-}
-
-static int
-lstcon_batrpc_condition(int transop, lstcon_node_t *nd, void *arg)
-{
- switch (transop) {
- case LST_TRANS_TSBRUN:
- if (nd->nd_state != LST_NODE_ACTIVE)
- return -ENETDOWN;
- break;
-
- case LST_TRANS_TSBSTOP:
- if (nd->nd_state != LST_NODE_ACTIVE)
- return 0;
- break;
-
- case LST_TRANS_TSBCLIQRY:
- case LST_TRANS_TSBSRVQRY:
- break;
- }
-
- return 1;
-}
-
-static int
-lstcon_batch_op(lstcon_batch_t *bat, int transop,
- struct list_head *result_up)
-{
- lstcon_rpc_trans_t *trans;
- int rc;
-
- rc = lstcon_rpc_trans_ndlist(&bat->bat_cli_list,
- &bat->bat_trans_list, transop,
- bat, lstcon_batrpc_condition, &trans);
- if (rc != 0) {
- CERROR("Can't create transaction: %d\n", rc);
- return rc;
- }
-
- lstcon_rpc_trans_postwait(trans, LST_TRANS_TIMEOUT);
-
- rc = lstcon_rpc_trans_interpreter(trans, result_up, NULL);
-
- lstcon_rpc_trans_destroy(trans);
-
- return rc;
-}
-
-int
-lstcon_batch_run(char *name, int timeout, struct list_head *result_up)
-{
- lstcon_batch_t *bat;
- int rc;
-
- if (lstcon_batch_find(name, &bat) != 0) {
- CDEBUG(D_NET, "Can't find batch %s\n", name);
- return -ENOENT;
- }
-
- bat->bat_arg = timeout;
-
- rc = lstcon_batch_op(bat, LST_TRANS_TSBRUN, result_up);
-
- /* mark batch as running if it's started in any node */
- if (lstcon_tsbop_stat_success(lstcon_trans_stat(), 0) != 0)
- bat->bat_state = LST_BATCH_RUNNING;
-
- return rc;
-}
-
-int
-lstcon_batch_stop(char *name, int force, struct list_head *result_up)
-{
- lstcon_batch_t *bat;
- int rc;
-
- if (lstcon_batch_find(name, &bat) != 0) {
- CDEBUG(D_NET, "Can't find batch %s\n", name);
- return -ENOENT;
- }
-
- bat->bat_arg = force;
-
- rc = lstcon_batch_op(bat, LST_TRANS_TSBSTOP, result_up);
-
- /* mark batch as stopped if all RPCs finished */
- if (lstcon_tsbop_stat_failure(lstcon_trans_stat(), 0) == 0)
- bat->bat_state = LST_BATCH_IDLE;
-
- return rc;
-}
-
-static void
-lstcon_batch_destroy(lstcon_batch_t *bat)
-{
- lstcon_ndlink_t *ndl;
- lstcon_test_t *test;
- int i;
-
- list_del(&bat->bat_link);
-
- while (!list_empty(&bat->bat_test_list)) {
- test = list_entry(bat->bat_test_list.next,
- lstcon_test_t, tes_link);
- LASSERT(list_empty(&test->tes_trans_list));
-
- list_del(&test->tes_link);
-
- lstcon_group_put(test->tes_src_grp);
- lstcon_group_put(test->tes_dst_grp);
-
- LIBCFS_FREE(test, offsetof(lstcon_test_t,
- tes_param[test->tes_paramlen]));
- }
-
- LASSERT(list_empty(&bat->bat_trans_list));
-
- while (!list_empty(&bat->bat_cli_list)) {
- ndl = list_entry(bat->bat_cli_list.next,
- lstcon_ndlink_t, ndl_link);
- list_del_init(&ndl->ndl_link);
-
- lstcon_ndlink_release(ndl);
- }
-
- while (!list_empty(&bat->bat_srv_list)) {
- ndl = list_entry(bat->bat_srv_list.next,
- lstcon_ndlink_t, ndl_link);
- list_del_init(&ndl->ndl_link);
-
- lstcon_ndlink_release(ndl);
- }
-
- for (i = 0; i < LST_NODE_HASHSIZE; i++) {
- LASSERT(list_empty(&bat->bat_cli_hash[i]));
- LASSERT(list_empty(&bat->bat_srv_hash[i]));
- }
-
- LIBCFS_FREE(bat->bat_cli_hash,
- sizeof(struct list_head) * LST_NODE_HASHSIZE);
- LIBCFS_FREE(bat->bat_srv_hash,
- sizeof(struct list_head) * LST_NODE_HASHSIZE);
- LIBCFS_FREE(bat, sizeof(lstcon_batch_t));
-}
-
-static int
-lstcon_testrpc_condition(int transop, lstcon_node_t *nd, void *arg)
-{
- lstcon_test_t *test;
- lstcon_batch_t *batch;
- lstcon_ndlink_t *ndl;
- struct list_head *hash;
- struct list_head *head;
-
- test = (lstcon_test_t *)arg;
- LASSERT(test != NULL);
-
- batch = test->tes_batch;
- LASSERT(batch != NULL);
-
- if (test->tes_oneside &&
- transop == LST_TRANS_TSBSRVADD)
- return 0;
-
- if (nd->nd_state != LST_NODE_ACTIVE)
- return -ENETDOWN;
-
- if (transop == LST_TRANS_TSBCLIADD) {
- hash = batch->bat_cli_hash;
- head = &batch->bat_cli_list;
-
- } else {
- LASSERT(transop == LST_TRANS_TSBSRVADD);
-
- hash = batch->bat_srv_hash;
- head = &batch->bat_srv_list;
- }
-
- LASSERT(nd->nd_id.nid != LNET_NID_ANY);
-
- if (lstcon_ndlink_find(hash, nd->nd_id, &ndl, 1) != 0)
- return -ENOMEM;
-
- if (list_empty(&ndl->ndl_link))
- list_add_tail(&ndl->ndl_link, head);
-
- return 1;
-}
-
-static int
-lstcon_test_nodes_add(lstcon_test_t *test, struct list_head *result_up)
-{
- lstcon_rpc_trans_t *trans;
- lstcon_group_t *grp;
- int transop;
- int rc;
-
- LASSERT(test->tes_src_grp != NULL);
- LASSERT(test->tes_dst_grp != NULL);
-
- transop = LST_TRANS_TSBSRVADD;
- grp = test->tes_dst_grp;
-again:
- rc = lstcon_rpc_trans_ndlist(&grp->grp_ndl_list,
- &test->tes_trans_list, transop,
- test, lstcon_testrpc_condition, &trans);
- if (rc != 0) {
- CERROR("Can't create transaction: %d\n", rc);
- return rc;
- }
-
- lstcon_rpc_trans_postwait(trans, LST_TRANS_TIMEOUT);
-
- if (lstcon_trans_stat()->trs_rpc_errno != 0 ||
- lstcon_trans_stat()->trs_fwk_errno != 0) {
- lstcon_rpc_trans_interpreter(trans, result_up, NULL);
-
- lstcon_rpc_trans_destroy(trans);
- /* return if any error */
- CDEBUG(D_NET, "Failed to add test %s, RPC error %d, framework error %d\n",
- transop == LST_TRANS_TSBCLIADD ? "client" : "server",
- lstcon_trans_stat()->trs_rpc_errno,
- lstcon_trans_stat()->trs_fwk_errno);
-
- return rc;
- }
-
- lstcon_rpc_trans_destroy(trans);
-
- if (transop == LST_TRANS_TSBCLIADD)
- return rc;
-
- transop = LST_TRANS_TSBCLIADD;
- grp = test->tes_src_grp;
- test->tes_cliidx = 0;
-
- /* requests to test clients */
- goto again;
-}
-
-static int
-lstcon_verify_batch(const char *name, lstcon_batch_t **batch)
-{
- int rc;
-
- rc = lstcon_batch_find(name, batch);
- if (rc != 0) {
- CDEBUG(D_NET, "Can't find batch %s\n", name);
- return rc;
- }
-
- if ((*batch)->bat_state != LST_BATCH_IDLE) {
- CDEBUG(D_NET, "Can't change running batch %s\n", name);
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int
-lstcon_verify_group(const char *name, lstcon_group_t **grp)
-{
- int rc;
- lstcon_ndlink_t *ndl;
-
- rc = lstcon_group_find(name, grp);
- if (rc != 0) {
- CDEBUG(D_NET, "can't find group %s\n", name);
- return rc;
- }
-
- list_for_each_entry(ndl, &(*grp)->grp_ndl_list, ndl_link) {
- if (ndl->ndl_node->nd_state == LST_NODE_ACTIVE)
- return 0;
- }
-
- CDEBUG(D_NET, "Group %s has no ACTIVE nodes\n", name);
-
- return -EINVAL;
-}
-
-int
-lstcon_test_add(char *batch_name, int type, int loop,
- int concur, int dist, int span,
- char *src_name, char *dst_name,
- void *param, int paramlen, int *retp,
- struct list_head *result_up)
-{
- lstcon_test_t *test = NULL;
- int rc;
- lstcon_group_t *src_grp = NULL;
- lstcon_group_t *dst_grp = NULL;
- lstcon_batch_t *batch = NULL;
-
- /*
- * verify that a batch of the given name exists, and the groups
- * that will be part of the batch exist and have at least one
- * active node
- */
- rc = lstcon_verify_batch(batch_name, &batch);
- if (rc != 0)
- goto out;
-
- rc = lstcon_verify_group(src_name, &src_grp);
- if (rc != 0)
- goto out;
-
- rc = lstcon_verify_group(dst_name, &dst_grp);
- if (rc != 0)
- goto out;
-
- if (dst_grp->grp_userland)
- *retp = 1;
-
- LIBCFS_ALLOC(test, offsetof(lstcon_test_t, tes_param[paramlen]));
- if (!test) {
- CERROR("Can't allocate test descriptor\n");
- rc = -ENOMEM;
-
- goto out;
- }
-
- test->tes_hdr.tsb_id = batch->bat_hdr.tsb_id;
- test->tes_batch = batch;
- test->tes_type = type;
- test->tes_oneside = 0; /* TODO */
- test->tes_loop = loop;
- test->tes_concur = concur;
- test->tes_stop_onerr = 1; /* TODO */
- test->tes_span = span;
- test->tes_dist = dist;
- test->tes_cliidx = 0; /* just used for creating RPC */
- test->tes_src_grp = src_grp;
- test->tes_dst_grp = dst_grp;
- INIT_LIST_HEAD(&test->tes_trans_list);
-
- if (param != NULL) {
- test->tes_paramlen = paramlen;
- memcpy(&test->tes_param[0], param, paramlen);
- }
-
- rc = lstcon_test_nodes_add(test, result_up);
-
- if (rc != 0)
- goto out;
-
- if (lstcon_trans_stat()->trs_rpc_errno != 0 ||
- lstcon_trans_stat()->trs_fwk_errno != 0)
- CDEBUG(D_NET, "Failed to add test %d to batch %s\n", type,
- batch_name);
-
- /* add to test list anyway, so user can check what's going on */
- list_add_tail(&test->tes_link, &batch->bat_test_list);
-
- batch->bat_ntest++;
- test->tes_hdr.tsb_index = batch->bat_ntest;
-
- /* hold groups so nobody can change them */
- return rc;
-out:
- if (test != NULL)
- LIBCFS_FREE(test, offsetof(lstcon_test_t, tes_param[paramlen]));
-
- if (dst_grp != NULL)
- lstcon_group_put(dst_grp);
-
- if (src_grp != NULL)
- lstcon_group_put(src_grp);
-
- return rc;
-}
-
-static int
-lstcon_test_find(lstcon_batch_t *batch, int idx, lstcon_test_t **testpp)
-{
- lstcon_test_t *test;
-
- list_for_each_entry(test, &batch->bat_test_list, tes_link) {
- if (idx == test->tes_hdr.tsb_index) {
- *testpp = test;
- return 0;
- }
- }
-
- return -ENOENT;
-}
-
-static int
-lstcon_tsbrpc_readent(int transop, srpc_msg_t *msg,
- lstcon_rpc_ent_t *ent_up)
-{
- srpc_batch_reply_t *rep = &msg->msg_body.bat_reply;
-
- LASSERT(transop == LST_TRANS_TSBCLIQRY ||
- transop == LST_TRANS_TSBSRVQRY);
-
- /* positive errno, framework error code */
- if (copy_to_user(&ent_up->rpe_priv[0],
- &rep->bar_active, sizeof(rep->bar_active)))
- return -EFAULT;
-
- return 0;
-}
-
-int
-lstcon_test_batch_query(char *name, int testidx, int client,
- int timeout, struct list_head *result_up)
-{
- lstcon_rpc_trans_t *trans;
- struct list_head *translist;
- struct list_head *ndlist;
- lstcon_tsb_hdr_t *hdr;
- lstcon_batch_t *batch;
- lstcon_test_t *test = NULL;
- int transop;
- int rc;
-
- rc = lstcon_batch_find(name, &batch);
- if (rc != 0) {
- CDEBUG(D_NET, "Can't find batch: %s\n", name);
- return rc;
- }
-
- if (testidx == 0) {
- translist = &batch->bat_trans_list;
- ndlist = &batch->bat_cli_list;
- hdr = &batch->bat_hdr;
-
- } else {
- /* query specified test only */
- rc = lstcon_test_find(batch, testidx, &test);
- if (rc != 0) {
- CDEBUG(D_NET, "Can't find test: %d\n", testidx);
- return rc;
- }
-
- translist = &test->tes_trans_list;
- ndlist = &test->tes_src_grp->grp_ndl_list;
- hdr = &test->tes_hdr;
- }
-
- transop = client ? LST_TRANS_TSBCLIQRY : LST_TRANS_TSBSRVQRY;
-
- rc = lstcon_rpc_trans_ndlist(ndlist, translist, transop, hdr,
- lstcon_batrpc_condition, &trans);
- if (rc != 0) {
- CERROR("Can't create transaction: %d\n", rc);
- return rc;
- }
-
- lstcon_rpc_trans_postwait(trans, timeout);
-
- if (testidx == 0 && /* query a batch, not a test */
- lstcon_rpc_stat_failure(lstcon_trans_stat(), 0) == 0 &&
- lstcon_tsbqry_stat_run(lstcon_trans_stat(), 0) == 0) {
- /* all RPCs finished, and no active test */
- batch->bat_state = LST_BATCH_IDLE;
- }
-
- rc = lstcon_rpc_trans_interpreter(trans, result_up,
- lstcon_tsbrpc_readent);
- lstcon_rpc_trans_destroy(trans);
-
- return rc;
-}
-
-static int
-lstcon_statrpc_readent(int transop, srpc_msg_t *msg,
- lstcon_rpc_ent_t *ent_up)
-{
- srpc_stat_reply_t *rep = &msg->msg_body.stat_reply;
- sfw_counters_t *sfwk_stat;
- srpc_counters_t *srpc_stat;
- lnet_counters_t *lnet_stat;
-
- if (rep->str_status != 0)
- return 0;
-
- sfwk_stat = (sfw_counters_t *)&ent_up->rpe_payload[0];
- srpc_stat = (srpc_counters_t *)((char *)sfwk_stat + sizeof(*sfwk_stat));
- lnet_stat = (lnet_counters_t *)((char *)srpc_stat + sizeof(*srpc_stat));
-
- if (copy_to_user(sfwk_stat, &rep->str_fw, sizeof(*sfwk_stat)) ||
- copy_to_user(srpc_stat, &rep->str_rpc, sizeof(*srpc_stat)) ||
- copy_to_user(lnet_stat, &rep->str_lnet, sizeof(*lnet_stat)))
- return -EFAULT;
-
- return 0;
-}
-
-static int
-lstcon_ndlist_stat(struct list_head *ndlist,
- int timeout, struct list_head *result_up)
-{
- struct list_head head;
- lstcon_rpc_trans_t *trans;
- int rc;
-
- INIT_LIST_HEAD(&head);
-
- rc = lstcon_rpc_trans_ndlist(ndlist, &head,
- LST_TRANS_STATQRY, NULL, NULL, &trans);
- if (rc != 0) {
- CERROR("Can't create transaction: %d\n", rc);
- return rc;
- }
-
- lstcon_rpc_trans_postwait(trans, LST_VALIDATE_TIMEOUT(timeout));
-
- rc = lstcon_rpc_trans_interpreter(trans, result_up,
- lstcon_statrpc_readent);
- lstcon_rpc_trans_destroy(trans);
-
- return rc;
-}
-
-int
-lstcon_group_stat(char *grp_name, int timeout, struct list_head *result_up)
-{
- lstcon_group_t *grp;
- int rc;
-
- rc = lstcon_group_find(grp_name, &grp);
- if (rc != 0) {
- CDEBUG(D_NET, "Can't find group %s\n", grp_name);
- return rc;
- }
-
- rc = lstcon_ndlist_stat(&grp->grp_ndl_list, timeout, result_up);
-
- lstcon_group_put(grp);
-
- return rc;
-}
-
-int
-lstcon_nodes_stat(int count, lnet_process_id_t *ids_up,
- int timeout, struct list_head *result_up)
-{
- lstcon_ndlink_t *ndl;
- lstcon_group_t *tmp;
- lnet_process_id_t id;
- int i;
- int rc;
-
- rc = lstcon_group_alloc(NULL, &tmp);
- if (rc != 0) {
- CERROR("Out of memory\n");
- return -ENOMEM;
- }
-
- for (i = 0 ; i < count; i++) {
- if (copy_from_user(&id, &ids_up[i], sizeof(id))) {
- rc = -EFAULT;
- break;
- }
-
- /* add to tmp group */
- rc = lstcon_group_ndlink_find(tmp, id, &ndl, 2);
- if (rc != 0) {
- CDEBUG((rc == -ENOMEM) ? D_ERROR : D_NET,
- "Failed to find or create %s: %d\n",
- libcfs_id2str(id), rc);
- break;
- }
- }
-
- if (rc != 0) {
- lstcon_group_put(tmp);
- return rc;
- }
-
- rc = lstcon_ndlist_stat(&tmp->grp_ndl_list, timeout, result_up);
-
- lstcon_group_put(tmp);
-
- return rc;
-}
-
-static int
-lstcon_debug_ndlist(struct list_head *ndlist,
- struct list_head *translist,
- int timeout, struct list_head *result_up)
-{
- lstcon_rpc_trans_t *trans;
- int rc;
-
- rc = lstcon_rpc_trans_ndlist(ndlist, translist, LST_TRANS_SESQRY,
- NULL, lstcon_sesrpc_condition, &trans);
- if (rc != 0) {
- CERROR("Can't create transaction: %d\n", rc);
- return rc;
- }
-
- lstcon_rpc_trans_postwait(trans, LST_VALIDATE_TIMEOUT(timeout));
-
- rc = lstcon_rpc_trans_interpreter(trans, result_up,
- lstcon_sesrpc_readent);
- lstcon_rpc_trans_destroy(trans);
-
- return rc;
-}
-
-int
-lstcon_session_debug(int timeout, struct list_head *result_up)
-{
- return lstcon_debug_ndlist(&console_session.ses_ndl_list,
- NULL, timeout, result_up);
-}
-
-int
-lstcon_batch_debug(int timeout, char *name,
- int client, struct list_head *result_up)
-{
- lstcon_batch_t *bat;
- int rc;
-
- rc = lstcon_batch_find(name, &bat);
- if (rc != 0)
- return -ENOENT;
-
- rc = lstcon_debug_ndlist(client ? &bat->bat_cli_list :
- &bat->bat_srv_list,
- NULL, timeout, result_up);
-
- return rc;
-}
-
-int
-lstcon_group_debug(int timeout, char *name,
- struct list_head *result_up)
-{
- lstcon_group_t *grp;
- int rc;
-
- rc = lstcon_group_find(name, &grp);
- if (rc != 0)
- return -ENOENT;
-
- rc = lstcon_debug_ndlist(&grp->grp_ndl_list, NULL,
- timeout, result_up);
- lstcon_group_put(grp);
-
- return rc;
-}
-
-int
-lstcon_nodes_debug(int timeout,
- int count, lnet_process_id_t *ids_up,
- struct list_head *result_up)
-{
- lnet_process_id_t id;
- lstcon_ndlink_t *ndl;
- lstcon_group_t *grp;
- int i;
- int rc;
-
- rc = lstcon_group_alloc(NULL, &grp);
- if (rc != 0) {
- CDEBUG(D_NET, "Out of memory\n");
- return rc;
- }
-
- for (i = 0; i < count; i++) {
- if (copy_from_user(&id, &ids_up[i], sizeof(id))) {
- rc = -EFAULT;
- break;
- }
-
- /* node is added to tmp group */
- rc = lstcon_group_ndlink_find(grp, id, &ndl, 1);
- if (rc != 0) {
- CERROR("Can't create node link\n");
- break;
- }
- }
-
- if (rc != 0) {
- lstcon_group_put(grp);
- return rc;
- }
-
- rc = lstcon_debug_ndlist(&grp->grp_ndl_list, NULL,
- timeout, result_up);
-
- lstcon_group_put(grp);
-
- return rc;
-}
-
-int
-lstcon_session_match(lst_sid_t sid)
-{
- return (console_session.ses_id.ses_nid == sid.ses_nid &&
- console_session.ses_id.ses_stamp == sid.ses_stamp) ? 1 : 0;
-}
-
-static void
-lstcon_new_session_id(lst_sid_t *sid)
-{
- lnet_process_id_t id;
-
- LASSERT(console_session.ses_state == LST_SESSION_NONE);
-
- LNetGetId(1, &id);
- sid->ses_nid = id.nid;
- sid->ses_stamp = cfs_time_current();
-}
-
-extern srpc_service_t lstcon_acceptor_service;
-
-int
-lstcon_session_new(char *name, int key, unsigned feats,
- int timeout, int force, lst_sid_t *sid_up)
-{
- int rc = 0;
- int i;
-
- if (console_session.ses_state != LST_SESSION_NONE) {
- /* session exists */
- if (!force) {
- CNETERR("Session %s already exists\n",
- console_session.ses_name);
- return -EEXIST;
- }
-
- rc = lstcon_session_end();
-
- /* lstcon_session_end() only return local error */
- if (rc != 0)
- return rc;
- }
-
- if ((feats & ~LST_FEATS_MASK) != 0) {
- CNETERR("Unknown session features %x\n",
- (feats & ~LST_FEATS_MASK));
- return -EINVAL;
- }
-
- for (i = 0; i < LST_GLOBAL_HASHSIZE; i++)
- LASSERT(list_empty(&console_session.ses_ndl_hash[i]));
-
- lstcon_new_session_id(&console_session.ses_id);
-
- console_session.ses_key = key;
- console_session.ses_state = LST_SESSION_ACTIVE;
- console_session.ses_force = !!force;
- console_session.ses_features = feats;
- console_session.ses_feats_updated = 0;
- console_session.ses_timeout = (timeout <= 0) ?
- LST_CONSOLE_TIMEOUT : timeout;
- strcpy(console_session.ses_name, name);
-
- rc = lstcon_batch_add(LST_DEFAULT_BATCH);
- if (rc != 0)
- return rc;
-
- rc = lstcon_rpc_pinger_start();
- if (rc != 0) {
- lstcon_batch_t *bat = NULL;
-
- lstcon_batch_find(LST_DEFAULT_BATCH, &bat);
- lstcon_batch_destroy(bat);
-
- return rc;
- }
-
- if (copy_to_user(sid_up, &console_session.ses_id,
- sizeof(lst_sid_t)) == 0)
- return rc;
-
- lstcon_session_end();
-
- return -EFAULT;
-}
-
-int
-lstcon_session_info(lst_sid_t *sid_up, int *key_up, unsigned *featp,
- lstcon_ndlist_ent_t *ndinfo_up, char *name_up, int len)
-{
- lstcon_ndlist_ent_t *entp;
- lstcon_ndlink_t *ndl;
- int rc = 0;
-
- if (console_session.ses_state != LST_SESSION_ACTIVE)
- return -ESRCH;
-
- LIBCFS_ALLOC(entp, sizeof(*entp));
- if (entp == NULL)
- return -ENOMEM;
-
- list_for_each_entry(ndl, &console_session.ses_ndl_list, ndl_link)
- LST_NODE_STATE_COUNTER(ndl->ndl_node, entp);
-
- if (copy_to_user(sid_up, &console_session.ses_id,
- sizeof(lst_sid_t)) ||
- copy_to_user(key_up, &console_session.ses_key,
- sizeof(*key_up)) ||
- copy_to_user(featp, &console_session.ses_features,
- sizeof(*featp)) ||
- copy_to_user(ndinfo_up, entp, sizeof(*entp)) ||
- copy_to_user(name_up, console_session.ses_name, len))
- rc = -EFAULT;
-
- LIBCFS_FREE(entp, sizeof(*entp));
-
- return rc;
-}
-
-int
-lstcon_session_end(void)
-{
- lstcon_rpc_trans_t *trans;
- lstcon_group_t *grp;
- lstcon_batch_t *bat;
- int rc = 0;
-
- LASSERT(console_session.ses_state == LST_SESSION_ACTIVE);
-
- rc = lstcon_rpc_trans_ndlist(&console_session.ses_ndl_list,
- NULL, LST_TRANS_SESEND, NULL,
- lstcon_sesrpc_condition, &trans);
- if (rc != 0) {
- CERROR("Can't create transaction: %d\n", rc);
- return rc;
- }
-
- console_session.ses_shutdown = 1;
-
- lstcon_rpc_pinger_stop();
-
- lstcon_rpc_trans_postwait(trans, LST_TRANS_TIMEOUT);
-
- lstcon_rpc_trans_destroy(trans);
- /* User can do nothing even rpc failed, so go on */
-
- /* waiting for orphan rpcs to die */
- lstcon_rpc_cleanup_wait();
-
- console_session.ses_id = LST_INVALID_SID;
- console_session.ses_state = LST_SESSION_NONE;
- console_session.ses_key = 0;
- console_session.ses_force = 0;
- console_session.ses_feats_updated = 0;
-
- /* destroy all batches */
- while (!list_empty(&console_session.ses_bat_list)) {
- bat = list_entry(console_session.ses_bat_list.next,
- lstcon_batch_t, bat_link);
-
- lstcon_batch_destroy(bat);
- }
-
- /* destroy all groups */
- while (!list_empty(&console_session.ses_grp_list)) {
- grp = list_entry(console_session.ses_grp_list.next,
- lstcon_group_t, grp_link);
- LASSERT(grp->grp_ref == 1);
-
- lstcon_group_put(grp);
- }
-
- /* all nodes should be released */
- LASSERT(list_empty(&console_session.ses_ndl_list));
-
- console_session.ses_shutdown = 0;
- console_session.ses_expired = 0;
-
- return rc;
-}
-
-int
-lstcon_session_feats_check(unsigned feats)
-{
- int rc = 0;
-
- if ((feats & ~LST_FEATS_MASK) != 0) {
- CERROR("Can't support these features: %x\n",
- (feats & ~LST_FEATS_MASK));
- return -EPROTO;
- }
-
- spin_lock(&console_session.ses_rpc_lock);
-
- if (!console_session.ses_feats_updated) {
- console_session.ses_feats_updated = 1;
- console_session.ses_features = feats;
- }
-
- if (console_session.ses_features != feats)
- rc = -EPROTO;
-
- spin_unlock(&console_session.ses_rpc_lock);
-
- if (rc != 0) {
- CERROR("remote features %x do not match with session features %x of console\n",
- feats, console_session.ses_features);
- }
-
- return rc;
-}
-
-static int
-lstcon_acceptor_handle(srpc_server_rpc_t *rpc)
-{
- srpc_msg_t *rep = &rpc->srpc_replymsg;
- srpc_msg_t *req = &rpc->srpc_reqstbuf->buf_msg;
- srpc_join_reqst_t *jreq = &req->msg_body.join_reqst;
- srpc_join_reply_t *jrep = &rep->msg_body.join_reply;
- lstcon_group_t *grp = NULL;
- lstcon_ndlink_t *ndl;
- int rc = 0;
-
- sfw_unpack_message(req);
-
- mutex_lock(&console_session.ses_mutex);
-
- jrep->join_sid = console_session.ses_id;
-
- if (console_session.ses_id.ses_nid == LNET_NID_ANY) {
- jrep->join_status = ESRCH;
- goto out;
- }
-
- if (lstcon_session_feats_check(req->msg_ses_feats) != 0) {
- jrep->join_status = EPROTO;
- goto out;
- }
-
- if (jreq->join_sid.ses_nid != LNET_NID_ANY &&
- !lstcon_session_match(jreq->join_sid)) {
- jrep->join_status = EBUSY;
- goto out;
- }
-
- if (lstcon_group_find(jreq->join_group, &grp) != 0) {
- rc = lstcon_group_alloc(jreq->join_group, &grp);
- if (rc != 0) {
- CERROR("Out of memory\n");
- goto out;
- }
-
- list_add_tail(&grp->grp_link,
- &console_session.ses_grp_list);
- lstcon_group_addref(grp);
- }
-
- if (grp->grp_ref > 2) {
- /* Group in using */
- jrep->join_status = EBUSY;
- goto out;
- }
-
- rc = lstcon_group_ndlink_find(grp, rpc->srpc_peer, &ndl, 0);
- if (rc == 0) {
- jrep->join_status = EEXIST;
- goto out;
- }
-
- rc = lstcon_group_ndlink_find(grp, rpc->srpc_peer, &ndl, 1);
- if (rc != 0) {
- CERROR("Out of memory\n");
- goto out;
- }
-
- ndl->ndl_node->nd_state = LST_NODE_ACTIVE;
- ndl->ndl_node->nd_timeout = console_session.ses_timeout;
-
- if (grp->grp_userland == 0)
- grp->grp_userland = 1;
-
- strcpy(jrep->join_session, console_session.ses_name);
- jrep->join_timeout = console_session.ses_timeout;
- jrep->join_status = 0;
-
-out:
- rep->msg_ses_feats = console_session.ses_features;
- if (grp != NULL)
- lstcon_group_put(grp);
-
- mutex_unlock(&console_session.ses_mutex);
-
- return rc;
-}
-
-srpc_service_t lstcon_acceptor_service;
-static void lstcon_init_acceptor_service(void)
-{
- /* initialize selftest console acceptor service table */
- lstcon_acceptor_service.sv_name = "join session";
- lstcon_acceptor_service.sv_handler = lstcon_acceptor_handle;
- lstcon_acceptor_service.sv_id = SRPC_SERVICE_JOIN;
- lstcon_acceptor_service.sv_wi_total = SFW_FRWK_WI_MAX;
-}
-
-extern int lstcon_ioctl_entry(unsigned int cmd, struct libcfs_ioctl_data *data);
-
-static DECLARE_IOCTL_HANDLER(lstcon_ioctl_handler, lstcon_ioctl_entry);
-
-/* initialize console */
-int
-lstcon_console_init(void)
-{
- int i;
- int rc;
-
- memset(&console_session, 0, sizeof(lstcon_session_t));
-
- console_session.ses_id = LST_INVALID_SID;
- console_session.ses_state = LST_SESSION_NONE;
- console_session.ses_timeout = 0;
- console_session.ses_force = 0;
- console_session.ses_expired = 0;
- console_session.ses_feats_updated = 0;
- console_session.ses_features = LST_FEATS_MASK;
- console_session.ses_laststamp = ktime_get_real_seconds();
-
- mutex_init(&console_session.ses_mutex);
-
- INIT_LIST_HEAD(&console_session.ses_ndl_list);
- INIT_LIST_HEAD(&console_session.ses_grp_list);
- INIT_LIST_HEAD(&console_session.ses_bat_list);
- INIT_LIST_HEAD(&console_session.ses_trans_list);
-
- LIBCFS_ALLOC(console_session.ses_ndl_hash,
- sizeof(struct list_head) * LST_GLOBAL_HASHSIZE);
- if (console_session.ses_ndl_hash == NULL)
- return -ENOMEM;
-
- for (i = 0; i < LST_GLOBAL_HASHSIZE; i++)
- INIT_LIST_HEAD(&console_session.ses_ndl_hash[i]);
-
-
- /* initialize acceptor service table */
- lstcon_init_acceptor_service();
-
- rc = srpc_add_service(&lstcon_acceptor_service);
- LASSERT(rc != -EBUSY);
- if (rc != 0) {
- LIBCFS_FREE(console_session.ses_ndl_hash,
- sizeof(struct list_head) * LST_GLOBAL_HASHSIZE);
- return rc;
- }
-
- rc = srpc_service_add_buffers(&lstcon_acceptor_service,
- lstcon_acceptor_service.sv_wi_total);
- if (rc != 0) {
- rc = -ENOMEM;
- goto out;
- }
-
- rc = libcfs_register_ioctl(&lstcon_ioctl_handler);
-
- if (rc == 0) {
- lstcon_rpc_module_init();
- return 0;
- }
-
-out:
- srpc_shutdown_service(&lstcon_acceptor_service);
- srpc_remove_service(&lstcon_acceptor_service);
-
- LIBCFS_FREE(console_session.ses_ndl_hash,
- sizeof(struct list_head) * LST_GLOBAL_HASHSIZE);
-
- srpc_wait_service_shutdown(&lstcon_acceptor_service);
-
- return rc;
-}
-
-int
-lstcon_console_fini(void)
-{
- int i;
-
- libcfs_deregister_ioctl(&lstcon_ioctl_handler);
-
- mutex_lock(&console_session.ses_mutex);
-
- srpc_shutdown_service(&lstcon_acceptor_service);
- srpc_remove_service(&lstcon_acceptor_service);
-
- if (console_session.ses_state != LST_SESSION_NONE)
- lstcon_session_end();
-
- lstcon_rpc_module_fini();
-
- mutex_unlock(&console_session.ses_mutex);
-
- LASSERT(list_empty(&console_session.ses_ndl_list));
- LASSERT(list_empty(&console_session.ses_grp_list));
- LASSERT(list_empty(&console_session.ses_bat_list));
- LASSERT(list_empty(&console_session.ses_trans_list));
-
- for (i = 0; i < LST_NODE_HASHSIZE; i++) {
- LASSERT(list_empty(&console_session.ses_ndl_hash[i]));
- }
-
- LIBCFS_FREE(console_session.ses_ndl_hash,
- sizeof(struct list_head) * LST_GLOBAL_HASHSIZE);
-
- srpc_wait_service_shutdown(&lstcon_acceptor_service);
-
- return 0;
-}
diff --git a/drivers/staging/lustre/lnet/selftest/console.h b/drivers/staging/lustre/lnet/selftest/console.h
deleted file mode 100644
index acd131218dd9..000000000000
--- a/drivers/staging/lustre/lnet/selftest/console.h
+++ /dev/null
@@ -1,235 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * lnet/selftest/console.h
- *
- * kernel structure for LST console
- *
- * Author: Liang Zhen <liangzhen@clusterfs.com>
- */
-
-#ifndef __LST_CONSOLE_H__
-#define __LST_CONSOLE_H__
-
-
-#include "../../include/linux/libcfs/libcfs.h"
-#include "../../include/linux/lnet/lnet.h"
-#include "../../include/linux/lnet/lib-types.h"
-#include "../../include/linux/lnet/lnetst.h"
-#include "selftest.h"
-#include "conrpc.h"
-
-typedef struct lstcon_node {
- lnet_process_id_t nd_id; /* id of the node */
- int nd_ref; /* reference count */
- int nd_state; /* state of the node */
- int nd_timeout; /* session timeout */
- unsigned long nd_stamp; /* timestamp of last replied RPC */
- struct lstcon_rpc nd_ping; /* ping rpc */
-} lstcon_node_t; /* node descriptor */
-
-typedef struct {
- struct list_head ndl_link; /* chain on list */
- struct list_head ndl_hlink; /* chain on hash */
- lstcon_node_t *ndl_node; /* pointer to node */
-} lstcon_ndlink_t; /* node link descriptor */
-
-typedef struct {
- struct list_head grp_link; /* chain on global group list
- */
- int grp_ref; /* reference count */
- int grp_userland; /* has userland nodes */
- int grp_nnode; /* # of nodes */
- char grp_name[LST_NAME_SIZE]; /* group name */
-
- struct list_head grp_trans_list; /* transaction list */
- struct list_head grp_ndl_list; /* nodes list */
- struct list_head grp_ndl_hash[0]; /* hash table for nodes */
-} lstcon_group_t; /* (alias of nodes) group descriptor */
-
-#define LST_BATCH_IDLE 0xB0 /* idle batch */
-#define LST_BATCH_RUNNING 0xB1 /* running batch */
-
-typedef struct lstcon_tsb_hdr {
- lst_bid_t tsb_id; /* batch ID */
- int tsb_index; /* test index */
-} lstcon_tsb_hdr_t;
-
-typedef struct {
- lstcon_tsb_hdr_t bat_hdr; /* test_batch header */
- struct list_head bat_link; /* chain on session's batches list */
- int bat_ntest; /* # of test */
- int bat_state; /* state of the batch */
- int bat_arg; /* parameter for run|stop, timeout
- * for run, force for stop */
- char bat_name[LST_NAME_SIZE];/* name of batch */
-
- struct list_head bat_test_list; /* list head of tests (lstcon_test_t)
- */
- struct list_head bat_trans_list; /* list head of transaction */
- struct list_head bat_cli_list; /* list head of client nodes
- * (lstcon_node_t) */
- struct list_head *bat_cli_hash; /* hash table of client nodes */
- struct list_head bat_srv_list; /* list head of server nodes */
- struct list_head *bat_srv_hash; /* hash table of server nodes */
-} lstcon_batch_t; /* (tests ) batch descriptor */
-
-typedef struct lstcon_test {
- lstcon_tsb_hdr_t tes_hdr; /* test batch header */
- struct list_head tes_link; /* chain on batch's tests list */
- lstcon_batch_t *tes_batch; /* pointer to batch */
-
- int tes_type; /* type of the test, i.e: bulk, ping */
- int tes_stop_onerr; /* stop on error */
- int tes_oneside; /* one-sided test */
- int tes_concur; /* concurrency */
- int tes_loop; /* loop count */
- int tes_dist; /* nodes distribution of target group */
- int tes_span; /* nodes span of target group */
- int tes_cliidx; /* client index, used for RPC creating */
-
- struct list_head tes_trans_list; /* transaction list */
- lstcon_group_t *tes_src_grp; /* group run the test */
- lstcon_group_t *tes_dst_grp; /* target group */
-
- int tes_paramlen; /* test parameter length */
- char tes_param[0]; /* test parameter */
-} lstcon_test_t; /* a single test descriptor */
-
-#define LST_GLOBAL_HASHSIZE 503 /* global nodes hash table size */
-#define LST_NODE_HASHSIZE 239 /* node hash table (for batch or group) */
-
-#define LST_SESSION_NONE 0x0 /* no session */
-#define LST_SESSION_ACTIVE 0x1 /* working session */
-
-#define LST_CONSOLE_TIMEOUT 300 /* default console timeout */
-
-typedef struct {
- struct mutex ses_mutex; /* only 1 thread in session */
- lst_sid_t ses_id; /* global session id */
- int ses_key; /* local session key */
- int ses_state; /* state of session */
- int ses_timeout; /* timeout in seconds */
- time64_t ses_laststamp; /* last operation stamp (seconds)
- */
- unsigned ses_features; /* tests features of the session
- */
- unsigned ses_feats_updated:1; /* features are synced with
- * remote test nodes */
- unsigned ses_force:1; /* force creating */
- unsigned ses_shutdown:1; /* session is shutting down */
- unsigned ses_expired:1; /* console is timedout */
- __u64 ses_id_cookie; /* batch id cookie */
- char ses_name[LST_NAME_SIZE];/* session name */
- lstcon_rpc_trans_t *ses_ping; /* session pinger */
- stt_timer_t ses_ping_timer; /* timer for pinger */
- lstcon_trans_stat_t ses_trans_stat; /* transaction stats */
-
- struct list_head ses_trans_list; /* global list of transaction */
- struct list_head ses_grp_list; /* global list of groups */
- struct list_head ses_bat_list; /* global list of batches */
- struct list_head ses_ndl_list; /* global list of nodes */
- struct list_head *ses_ndl_hash; /* hash table of nodes */
-
- spinlock_t ses_rpc_lock; /* serialize */
- atomic_t ses_rpc_counter; /* # of initialized RPCs */
- struct list_head ses_rpc_freelist; /* idle console rpc */
-} lstcon_session_t; /* session descriptor */
-
-extern lstcon_session_t console_session;
-
-static inline lstcon_trans_stat_t *
-lstcon_trans_stat(void)
-{
- return &console_session.ses_trans_stat;
-}
-
-static inline struct list_head *
-lstcon_id2hash (lnet_process_id_t id, struct list_head *hash)
-{
- unsigned int idx = LNET_NIDADDR(id.nid) % LST_NODE_HASHSIZE;
-
- return &hash[idx];
-}
-
-int lstcon_console_init(void);
-int lstcon_ioctl_entry(unsigned int cmd, struct libcfs_ioctl_data *data);
-int lstcon_console_fini(void);
-int lstcon_session_match(lst_sid_t sid);
-int lstcon_session_new(char *name, int key, unsigned version,
- int timeout, int flags, lst_sid_t *sid_up);
-int lstcon_session_info(lst_sid_t *sid_up, int *key, unsigned *verp,
- lstcon_ndlist_ent_t *entp, char *name_up, int len);
-int lstcon_session_end(void);
-int lstcon_session_debug(int timeout, struct list_head *result_up);
-int lstcon_session_feats_check(unsigned feats);
-int lstcon_batch_debug(int timeout, char *name,
- int client, struct list_head *result_up);
-int lstcon_group_debug(int timeout, char *name,
- struct list_head *result_up);
-int lstcon_nodes_debug(int timeout, int nnd, lnet_process_id_t *nds_up,
- struct list_head *result_up);
-int lstcon_group_add(char *name);
-int lstcon_group_del(char *name);
-int lstcon_group_clean(char *name, int args);
-int lstcon_group_refresh(char *name, struct list_head *result_up);
-int lstcon_nodes_add(char *name, int nnd, lnet_process_id_t *nds_up,
- unsigned *featp, struct list_head *result_up);
-int lstcon_nodes_remove(char *name, int nnd, lnet_process_id_t *nds_up,
- struct list_head *result_up);
-int lstcon_group_info(char *name, lstcon_ndlist_ent_t *gent_up,
- int *index_p, int *ndent_p, lstcon_node_ent_t *ndents_up);
-int lstcon_group_list(int idx, int len, char *name_up);
-int lstcon_batch_add(char *name);
-int lstcon_batch_run(char *name, int timeout, struct list_head *result_up);
-int lstcon_batch_stop(char *name, int force, struct list_head *result_up);
-int lstcon_test_batch_query(char *name, int testidx,
- int client, int timeout,
- struct list_head *result_up);
-int lstcon_batch_del(char *name);
-int lstcon_batch_list(int idx, int namelen, char *name_up);
-int lstcon_batch_info(char *name, lstcon_test_batch_ent_t *ent_up,
- int server, int testidx, int *index_p,
- int *ndent_p, lstcon_node_ent_t *dents_up);
-int lstcon_group_stat(char *grp_name, int timeout,
- struct list_head *result_up);
-int lstcon_nodes_stat(int count, lnet_process_id_t *ids_up,
- int timeout, struct list_head *result_up);
-int lstcon_test_add(char *batch_name, int type, int loop,
- int concur, int dist, int span,
- char *src_name, char *dst_name,
- void *param, int paramlen, int *retp,
- struct list_head *result_up);
-#endif
diff --git a/drivers/staging/lustre/lnet/selftest/framework.c b/drivers/staging/lustre/lnet/selftest/framework.c
deleted file mode 100644
index 177beaf496c4..000000000000
--- a/drivers/staging/lustre/lnet/selftest/framework.c
+++ /dev/null
@@ -1,1803 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * lnet/selftest/framework.c
- *
- * Author: Isaac Huang <isaac@clusterfs.com>
- * Author: Liang Zhen <liangzhen@clusterfs.com>
- */
-
-#define DEBUG_SUBSYSTEM S_LNET
-
-#include "selftest.h"
-
-lst_sid_t LST_INVALID_SID = {LNET_NID_ANY, -1};
-
-static int session_timeout = 100;
-module_param(session_timeout, int, 0444);
-MODULE_PARM_DESC(session_timeout, "test session timeout in seconds (100 by default, 0 == never)");
-
-static int rpc_timeout = 64;
-module_param(rpc_timeout, int, 0644);
-MODULE_PARM_DESC(rpc_timeout, "rpc timeout in seconds (64 by default, 0 == never)");
-
-#define sfw_unpack_id(id) \
-do { \
- __swab64s(&(id).nid); \
- __swab32s(&(id).pid); \
-} while (0)
-
-#define sfw_unpack_sid(sid) \
-do { \
- __swab64s(&(sid).ses_nid); \
- __swab64s(&(sid).ses_stamp); \
-} while (0)
-
-#define sfw_unpack_fw_counters(fc) \
-do { \
- __swab32s(&(fc).running_ms); \
- __swab32s(&(fc).active_batches); \
- __swab32s(&(fc).zombie_sessions); \
- __swab32s(&(fc).brw_errors); \
- __swab32s(&(fc).ping_errors); \
-} while (0)
-
-#define sfw_unpack_rpc_counters(rc) \
-do { \
- __swab32s(&(rc).errors); \
- __swab32s(&(rc).rpcs_sent); \
- __swab32s(&(rc).rpcs_rcvd); \
- __swab32s(&(rc).rpcs_dropped); \
- __swab32s(&(rc).rpcs_expired); \
- __swab64s(&(rc).bulk_get); \
- __swab64s(&(rc).bulk_put); \
-} while (0)
-
-#define sfw_unpack_lnet_counters(lc) \
-do { \
- __swab32s(&(lc).errors); \
- __swab32s(&(lc).msgs_max); \
- __swab32s(&(lc).msgs_alloc); \
- __swab32s(&(lc).send_count); \
- __swab32s(&(lc).recv_count); \
- __swab32s(&(lc).drop_count); \
- __swab32s(&(lc).route_count); \
- __swab64s(&(lc).send_length); \
- __swab64s(&(lc).recv_length); \
- __swab64s(&(lc).drop_length); \
- __swab64s(&(lc).route_length); \
-} while (0)
-
-#define sfw_test_active(t) (atomic_read(&(t)->tsi_nactive) != 0)
-#define sfw_batch_active(b) (atomic_read(&(b)->bat_nactive) != 0)
-
-static struct smoketest_framework {
- struct list_head fw_zombie_rpcs; /* RPCs to be recycled */
- struct list_head fw_zombie_sessions; /* stopping sessions */
- struct list_head fw_tests; /* registered test cases */
- atomic_t fw_nzombies; /* # zombie sessions */
- spinlock_t fw_lock; /* serialise */
- sfw_session_t *fw_session; /* _the_ session */
- int fw_shuttingdown; /* shutdown in progress */
- srpc_server_rpc_t *fw_active_srpc; /* running RPC */
-} sfw_data;
-
-/* forward ref's */
-int sfw_stop_batch(sfw_batch_t *tsb, int force);
-void sfw_destroy_session(sfw_session_t *sn);
-
-static inline sfw_test_case_t *
-sfw_find_test_case(int id)
-{
- sfw_test_case_t *tsc;
-
- LASSERT(id <= SRPC_SERVICE_MAX_ID);
- LASSERT(id > SRPC_FRAMEWORK_SERVICE_MAX_ID);
-
- list_for_each_entry(tsc, &sfw_data.fw_tests, tsc_list) {
- if (tsc->tsc_srv_service->sv_id == id)
- return tsc;
- }
-
- return NULL;
-}
-
-static int
-sfw_register_test(srpc_service_t *service, sfw_test_client_ops_t *cliops)
-{
- sfw_test_case_t *tsc;
-
- if (sfw_find_test_case(service->sv_id) != NULL) {
- CERROR("Failed to register test %s (%d)\n",
- service->sv_name, service->sv_id);
- return -EEXIST;
- }
-
- LIBCFS_ALLOC(tsc, sizeof(sfw_test_case_t));
- if (tsc == NULL)
- return -ENOMEM;
-
- tsc->tsc_cli_ops = cliops;
- tsc->tsc_srv_service = service;
-
- list_add_tail(&tsc->tsc_list, &sfw_data.fw_tests);
- return 0;
-}
-
-static void
-sfw_add_session_timer(void)
-{
- sfw_session_t *sn = sfw_data.fw_session;
- stt_timer_t *timer = &sn->sn_timer;
-
- LASSERT(!sfw_data.fw_shuttingdown);
-
- if (sn == NULL || sn->sn_timeout == 0)
- return;
-
- LASSERT(!sn->sn_timer_active);
-
- sn->sn_timer_active = 1;
- timer->stt_expires = ktime_get_real_seconds() + sn->sn_timeout;
- stt_add_timer(timer);
- return;
-}
-
-static int
-sfw_del_session_timer(void)
-{
- sfw_session_t *sn = sfw_data.fw_session;
-
- if (sn == NULL || !sn->sn_timer_active)
- return 0;
-
- LASSERT(sn->sn_timeout != 0);
-
- if (stt_del_timer(&sn->sn_timer)) { /* timer defused */
- sn->sn_timer_active = 0;
- return 0;
- }
-
- return EBUSY; /* racing with sfw_session_expired() */
-}
-
-static void
-sfw_deactivate_session(void)
- __must_hold(&sfw_data.fw_lock)
-{
- sfw_session_t *sn = sfw_data.fw_session;
- int nactive = 0;
- sfw_batch_t *tsb;
- sfw_test_case_t *tsc;
-
- if (sn == NULL)
- return;
-
- LASSERT(!sn->sn_timer_active);
-
- sfw_data.fw_session = NULL;
- atomic_inc(&sfw_data.fw_nzombies);
- list_add(&sn->sn_list, &sfw_data.fw_zombie_sessions);
-
- spin_unlock(&sfw_data.fw_lock);
-
- list_for_each_entry(tsc, &sfw_data.fw_tests, tsc_list) {
- srpc_abort_service(tsc->tsc_srv_service);
- }
-
- spin_lock(&sfw_data.fw_lock);
-
- list_for_each_entry(tsb, &sn->sn_batches, bat_list) {
- if (sfw_batch_active(tsb)) {
- nactive++;
- sfw_stop_batch(tsb, 1);
- }
- }
-
- if (nactive != 0)
- return; /* wait for active batches to stop */
-
- list_del_init(&sn->sn_list);
- spin_unlock(&sfw_data.fw_lock);
-
- sfw_destroy_session(sn);
-
- spin_lock(&sfw_data.fw_lock);
-}
-
-
-static void
-sfw_session_expired(void *data)
-{
- sfw_session_t *sn = data;
-
- spin_lock(&sfw_data.fw_lock);
-
- LASSERT(sn->sn_timer_active);
- LASSERT(sn == sfw_data.fw_session);
-
- CWARN("Session expired! sid: %s-%llu, name: %s\n",
- libcfs_nid2str(sn->sn_id.ses_nid),
- sn->sn_id.ses_stamp, &sn->sn_name[0]);
-
- sn->sn_timer_active = 0;
- sfw_deactivate_session();
-
- spin_unlock(&sfw_data.fw_lock);
-}
-
-static inline void
-sfw_init_session(sfw_session_t *sn, lst_sid_t sid,
- unsigned features, const char *name)
-{
- stt_timer_t *timer = &sn->sn_timer;
-
- memset(sn, 0, sizeof(sfw_session_t));
- INIT_LIST_HEAD(&sn->sn_list);
- INIT_LIST_HEAD(&sn->sn_batches);
- atomic_set(&sn->sn_refcount, 1); /* +1 for caller */
- atomic_set(&sn->sn_brw_errors, 0);
- atomic_set(&sn->sn_ping_errors, 0);
- strlcpy(&sn->sn_name[0], name, sizeof(sn->sn_name));
-
- sn->sn_timer_active = 0;
- sn->sn_id = sid;
- sn->sn_features = features;
- sn->sn_timeout = session_timeout;
- sn->sn_started = cfs_time_current();
-
- timer->stt_data = sn;
- timer->stt_func = sfw_session_expired;
- INIT_LIST_HEAD(&timer->stt_list);
-}
-
-/* completion handler for incoming framework RPCs */
-static void
-sfw_server_rpc_done(struct srpc_server_rpc *rpc)
-{
- struct srpc_service *sv = rpc->srpc_scd->scd_svc;
- int status = rpc->srpc_status;
-
- CDEBUG(D_NET,
- "Incoming framework RPC done: service %s, peer %s, status %s:%d\n",
- sv->sv_name, libcfs_id2str(rpc->srpc_peer),
- swi_state2str(rpc->srpc_wi.swi_state),
- status);
-
- if (rpc->srpc_bulk != NULL)
- sfw_free_pages(rpc);
- return;
-}
-
-static void
-sfw_client_rpc_fini(srpc_client_rpc_t *rpc)
-{
- LASSERT(rpc->crpc_bulk.bk_niov == 0);
- LASSERT(list_empty(&rpc->crpc_list));
- LASSERT(atomic_read(&rpc->crpc_refcount) == 0);
-
- CDEBUG(D_NET,
- "Outgoing framework RPC done: service %d, peer %s, status %s:%d:%d\n",
- rpc->crpc_service, libcfs_id2str(rpc->crpc_dest),
- swi_state2str(rpc->crpc_wi.swi_state),
- rpc->crpc_aborted, rpc->crpc_status);
-
- spin_lock(&sfw_data.fw_lock);
-
- /* my callers must finish all RPCs before shutting me down */
- LASSERT(!sfw_data.fw_shuttingdown);
- list_add(&rpc->crpc_list, &sfw_data.fw_zombie_rpcs);
-
- spin_unlock(&sfw_data.fw_lock);
-}
-
-static sfw_batch_t *
-sfw_find_batch(lst_bid_t bid)
-{
- sfw_session_t *sn = sfw_data.fw_session;
- sfw_batch_t *bat;
-
- LASSERT(sn != NULL);
-
- list_for_each_entry(bat, &sn->sn_batches, bat_list) {
- if (bat->bat_id.bat_id == bid.bat_id)
- return bat;
- }
-
- return NULL;
-}
-
-static sfw_batch_t *
-sfw_bid2batch(lst_bid_t bid)
-{
- sfw_session_t *sn = sfw_data.fw_session;
- sfw_batch_t *bat;
-
- LASSERT(sn != NULL);
-
- bat = sfw_find_batch(bid);
- if (bat != NULL)
- return bat;
-
- LIBCFS_ALLOC(bat, sizeof(sfw_batch_t));
- if (bat == NULL)
- return NULL;
-
- bat->bat_error = 0;
- bat->bat_session = sn;
- bat->bat_id = bid;
- atomic_set(&bat->bat_nactive, 0);
- INIT_LIST_HEAD(&bat->bat_tests);
-
- list_add_tail(&bat->bat_list, &sn->sn_batches);
- return bat;
-}
-
-static int
-sfw_get_stats(srpc_stat_reqst_t *request, srpc_stat_reply_t *reply)
-{
- sfw_session_t *sn = sfw_data.fw_session;
- sfw_counters_t *cnt = &reply->str_fw;
- sfw_batch_t *bat;
-
- reply->str_sid = (sn == NULL) ? LST_INVALID_SID : sn->sn_id;
-
- if (request->str_sid.ses_nid == LNET_NID_ANY) {
- reply->str_status = EINVAL;
- return 0;
- }
-
- if (sn == NULL || !sfw_sid_equal(request->str_sid, sn->sn_id)) {
- reply->str_status = ESRCH;
- return 0;
- }
-
- lnet_counters_get(&reply->str_lnet);
- srpc_get_counters(&reply->str_rpc);
-
- /* send over the msecs since the session was started
- - with 32 bits to send, this is ~49 days */
- cnt->running_ms = jiffies_to_msecs(jiffies - sn->sn_started);
- cnt->brw_errors = atomic_read(&sn->sn_brw_errors);
- cnt->ping_errors = atomic_read(&sn->sn_ping_errors);
- cnt->zombie_sessions = atomic_read(&sfw_data.fw_nzombies);
-
- cnt->active_batches = 0;
- list_for_each_entry(bat, &sn->sn_batches, bat_list) {
- if (atomic_read(&bat->bat_nactive) > 0)
- cnt->active_batches++;
- }
-
- reply->str_status = 0;
- return 0;
-}
-
-int
-sfw_make_session(srpc_mksn_reqst_t *request, srpc_mksn_reply_t *reply)
-{
- sfw_session_t *sn = sfw_data.fw_session;
- srpc_msg_t *msg = container_of(request, srpc_msg_t,
- msg_body.mksn_reqst);
- int cplen = 0;
-
- if (request->mksn_sid.ses_nid == LNET_NID_ANY) {
- reply->mksn_sid = (sn == NULL) ? LST_INVALID_SID : sn->sn_id;
- reply->mksn_status = EINVAL;
- return 0;
- }
-
- if (sn != NULL) {
- reply->mksn_status = 0;
- reply->mksn_sid = sn->sn_id;
- reply->mksn_timeout = sn->sn_timeout;
-
- if (sfw_sid_equal(request->mksn_sid, sn->sn_id)) {
- atomic_inc(&sn->sn_refcount);
- return 0;
- }
-
- if (!request->mksn_force) {
- reply->mksn_status = EBUSY;
- cplen = strlcpy(&reply->mksn_name[0], &sn->sn_name[0],
- sizeof(reply->mksn_name));
- if (cplen >= sizeof(reply->mksn_name))
- return -E2BIG;
- return 0;
- }
- }
-
- /* reject the request if it requires unknown features
- * NB: old version will always accept all features because it's not
- * aware of srpc_msg_t::msg_ses_feats, it's a defect but it's also
- * harmless because it will return zero feature to console, and it's
- * console's responsibility to make sure all nodes in a session have
- * same feature mask. */
- if ((msg->msg_ses_feats & ~LST_FEATS_MASK) != 0) {
- reply->mksn_status = EPROTO;
- return 0;
- }
-
- /* brand new or create by force */
- LIBCFS_ALLOC(sn, sizeof(sfw_session_t));
- if (sn == NULL) {
- CERROR("Dropping RPC (mksn) under memory pressure.\n");
- return -ENOMEM;
- }
-
- sfw_init_session(sn, request->mksn_sid,
- msg->msg_ses_feats, &request->mksn_name[0]);
-
- spin_lock(&sfw_data.fw_lock);
-
- sfw_deactivate_session();
- LASSERT(sfw_data.fw_session == NULL);
- sfw_data.fw_session = sn;
-
- spin_unlock(&sfw_data.fw_lock);
-
- reply->mksn_status = 0;
- reply->mksn_sid = sn->sn_id;
- reply->mksn_timeout = sn->sn_timeout;
- return 0;
-}
-
-static int
-sfw_remove_session(srpc_rmsn_reqst_t *request, srpc_rmsn_reply_t *reply)
-{
- sfw_session_t *sn = sfw_data.fw_session;
-
- reply->rmsn_sid = (sn == NULL) ? LST_INVALID_SID : sn->sn_id;
-
- if (request->rmsn_sid.ses_nid == LNET_NID_ANY) {
- reply->rmsn_status = EINVAL;
- return 0;
- }
-
- if (sn == NULL || !sfw_sid_equal(request->rmsn_sid, sn->sn_id)) {
- reply->rmsn_status = (sn == NULL) ? ESRCH : EBUSY;
- return 0;
- }
-
- if (!atomic_dec_and_test(&sn->sn_refcount)) {
- reply->rmsn_status = 0;
- return 0;
- }
-
- spin_lock(&sfw_data.fw_lock);
- sfw_deactivate_session();
- spin_unlock(&sfw_data.fw_lock);
-
- reply->rmsn_status = 0;
- reply->rmsn_sid = LST_INVALID_SID;
- LASSERT(sfw_data.fw_session == NULL);
- return 0;
-}
-
-static int
-sfw_debug_session(srpc_debug_reqst_t *request, srpc_debug_reply_t *reply)
-{
- sfw_session_t *sn = sfw_data.fw_session;
-
- if (sn == NULL) {
- reply->dbg_status = ESRCH;
- reply->dbg_sid = LST_INVALID_SID;
- return 0;
- }
-
- reply->dbg_status = 0;
- reply->dbg_sid = sn->sn_id;
- reply->dbg_timeout = sn->sn_timeout;
- if (strlcpy(reply->dbg_name, &sn->sn_name[0], sizeof(reply->dbg_name))
- >= sizeof(reply->dbg_name))
- return -E2BIG;
-
- return 0;
-}
-
-static void
-sfw_test_rpc_fini(srpc_client_rpc_t *rpc)
-{
- sfw_test_unit_t *tsu = rpc->crpc_priv;
- sfw_test_instance_t *tsi = tsu->tsu_instance;
-
- /* Called with hold of tsi->tsi_lock */
- LASSERT(list_empty(&rpc->crpc_list));
- list_add(&rpc->crpc_list, &tsi->tsi_free_rpcs);
-}
-
-static inline int
-sfw_test_buffers(sfw_test_instance_t *tsi)
-{
- struct sfw_test_case *tsc = sfw_find_test_case(tsi->tsi_service);
- struct srpc_service *svc = tsc->tsc_srv_service;
- int nbuf;
-
- nbuf = min(svc->sv_wi_total, tsi->tsi_loop) / svc->sv_ncpts;
- return max(SFW_TEST_WI_MIN, nbuf + SFW_TEST_WI_EXTRA);
-}
-
-static int
-sfw_load_test(struct sfw_test_instance *tsi)
-{
- struct sfw_test_case *tsc;
- struct srpc_service *svc;
- int nbuf;
- int rc;
-
- LASSERT(tsi != NULL);
- tsc = sfw_find_test_case(tsi->tsi_service);
- nbuf = sfw_test_buffers(tsi);
- LASSERT(tsc != NULL);
- svc = tsc->tsc_srv_service;
-
- if (tsi->tsi_is_client) {
- tsi->tsi_ops = tsc->tsc_cli_ops;
- return 0;
- }
-
- rc = srpc_service_add_buffers(svc, nbuf);
- if (rc != 0) {
- CWARN("Failed to reserve enough buffers: service %s, %d needed: %d\n",
- svc->sv_name, nbuf, rc);
- /* NB: this error handler is not strictly correct, because
- * it may release more buffers than already allocated,
- * but it doesn't matter because request portal should
- * be lazy portal and will grow buffers if necessary. */
- srpc_service_remove_buffers(svc, nbuf);
- return -ENOMEM;
- }
-
- CDEBUG(D_NET, "Reserved %d buffers for test %s\n",
- nbuf * (srpc_serv_is_framework(svc) ?
- 1 : cfs_cpt_number(cfs_cpt_table)), svc->sv_name);
- return 0;
-}
-
-static void
-sfw_unload_test(struct sfw_test_instance *tsi)
-{
- struct sfw_test_case *tsc = sfw_find_test_case(tsi->tsi_service);
-
- LASSERT(tsc != NULL);
-
- if (tsi->tsi_is_client)
- return;
-
- /* shrink buffers, because request portal is lazy portal
- * which can grow buffers at runtime so we may leave
- * some buffers behind, but never mind... */
- srpc_service_remove_buffers(tsc->tsc_srv_service,
- sfw_test_buffers(tsi));
- return;
-}
-
-static void
-sfw_destroy_test_instance(sfw_test_instance_t *tsi)
-{
- srpc_client_rpc_t *rpc;
- sfw_test_unit_t *tsu;
-
- if (!tsi->tsi_is_client)
- goto clean;
-
- tsi->tsi_ops->tso_fini(tsi);
-
- LASSERT(!tsi->tsi_stopping);
- LASSERT(list_empty(&tsi->tsi_active_rpcs));
- LASSERT(!sfw_test_active(tsi));
-
- while (!list_empty(&tsi->tsi_units)) {
- tsu = list_entry(tsi->tsi_units.next,
- sfw_test_unit_t, tsu_list);
- list_del(&tsu->tsu_list);
- LIBCFS_FREE(tsu, sizeof(*tsu));
- }
-
- while (!list_empty(&tsi->tsi_free_rpcs)) {
- rpc = list_entry(tsi->tsi_free_rpcs.next,
- srpc_client_rpc_t, crpc_list);
- list_del(&rpc->crpc_list);
- LIBCFS_FREE(rpc, srpc_client_rpc_size(rpc));
- }
-
-clean:
- sfw_unload_test(tsi);
- LIBCFS_FREE(tsi, sizeof(*tsi));
- return;
-}
-
-static void
-sfw_destroy_batch(sfw_batch_t *tsb)
-{
- sfw_test_instance_t *tsi;
-
- LASSERT(!sfw_batch_active(tsb));
- LASSERT(list_empty(&tsb->bat_list));
-
- while (!list_empty(&tsb->bat_tests)) {
- tsi = list_entry(tsb->bat_tests.next,
- sfw_test_instance_t, tsi_list);
- list_del_init(&tsi->tsi_list);
- sfw_destroy_test_instance(tsi);
- }
-
- LIBCFS_FREE(tsb, sizeof(sfw_batch_t));
- return;
-}
-
-void
-sfw_destroy_session(sfw_session_t *sn)
-{
- sfw_batch_t *batch;
-
- LASSERT(list_empty(&sn->sn_list));
- LASSERT(sn != sfw_data.fw_session);
-
- while (!list_empty(&sn->sn_batches)) {
- batch = list_entry(sn->sn_batches.next,
- sfw_batch_t, bat_list);
- list_del_init(&batch->bat_list);
- sfw_destroy_batch(batch);
- }
-
- LIBCFS_FREE(sn, sizeof(*sn));
- atomic_dec(&sfw_data.fw_nzombies);
- return;
-}
-
-static void
-sfw_unpack_addtest_req(srpc_msg_t *msg)
-{
- srpc_test_reqst_t *req = &msg->msg_body.tes_reqst;
-
- LASSERT(msg->msg_type == SRPC_MSG_TEST_REQST);
- LASSERT(req->tsr_is_client);
-
- if (msg->msg_magic == SRPC_MSG_MAGIC)
- return; /* no flipping needed */
-
- LASSERT(msg->msg_magic == __swab32(SRPC_MSG_MAGIC));
-
- if (req->tsr_service == SRPC_SERVICE_BRW) {
- if ((msg->msg_ses_feats & LST_FEAT_BULK_LEN) == 0) {
- test_bulk_req_t *bulk = &req->tsr_u.bulk_v0;
-
- __swab32s(&bulk->blk_opc);
- __swab32s(&bulk->blk_npg);
- __swab32s(&bulk->blk_flags);
-
- } else {
- test_bulk_req_v1_t *bulk = &req->tsr_u.bulk_v1;
-
- __swab16s(&bulk->blk_opc);
- __swab16s(&bulk->blk_flags);
- __swab32s(&bulk->blk_offset);
- __swab32s(&bulk->blk_len);
- }
-
- return;
- }
-
- if (req->tsr_service == SRPC_SERVICE_PING) {
- test_ping_req_t *ping = &req->tsr_u.ping;
-
- __swab32s(&ping->png_size);
- __swab32s(&ping->png_flags);
- return;
- }
-
- LBUG();
- return;
-}
-
-static int
-sfw_add_test_instance(sfw_batch_t *tsb, srpc_server_rpc_t *rpc)
-{
- srpc_msg_t *msg = &rpc->srpc_reqstbuf->buf_msg;
- srpc_test_reqst_t *req = &msg->msg_body.tes_reqst;
- srpc_bulk_t *bk = rpc->srpc_bulk;
- int ndest = req->tsr_ndest;
- sfw_test_unit_t *tsu;
- sfw_test_instance_t *tsi;
- int i;
- int rc;
-
- LIBCFS_ALLOC(tsi, sizeof(*tsi));
- if (tsi == NULL) {
- CERROR("Can't allocate test instance for batch: %llu\n",
- tsb->bat_id.bat_id);
- return -ENOMEM;
- }
-
- spin_lock_init(&tsi->tsi_lock);
- atomic_set(&tsi->tsi_nactive, 0);
- INIT_LIST_HEAD(&tsi->tsi_units);
- INIT_LIST_HEAD(&tsi->tsi_free_rpcs);
- INIT_LIST_HEAD(&tsi->tsi_active_rpcs);
-
- tsi->tsi_stopping = 0;
- tsi->tsi_batch = tsb;
- tsi->tsi_loop = req->tsr_loop;
- tsi->tsi_concur = req->tsr_concur;
- tsi->tsi_service = req->tsr_service;
- tsi->tsi_is_client = !!(req->tsr_is_client);
- tsi->tsi_stoptsu_onerr = !!(req->tsr_stop_onerr);
-
- rc = sfw_load_test(tsi);
- if (rc != 0) {
- LIBCFS_FREE(tsi, sizeof(*tsi));
- return rc;
- }
-
- LASSERT(!sfw_batch_active(tsb));
-
- if (!tsi->tsi_is_client) {
- /* it's test server, just add it to tsb */
- list_add_tail(&tsi->tsi_list, &tsb->bat_tests);
- return 0;
- }
-
- LASSERT(bk != NULL);
- LASSERT(bk->bk_niov * SFW_ID_PER_PAGE >= (unsigned int)ndest);
- LASSERT((unsigned int)bk->bk_len >=
- sizeof(lnet_process_id_packed_t) * ndest);
-
- sfw_unpack_addtest_req(msg);
- memcpy(&tsi->tsi_u, &req->tsr_u, sizeof(tsi->tsi_u));
-
- for (i = 0; i < ndest; i++) {
- lnet_process_id_packed_t *dests;
- lnet_process_id_packed_t id;
- int j;
-
- dests = page_address(bk->bk_iovs[i / SFW_ID_PER_PAGE].kiov_page);
- LASSERT(dests != NULL); /* my pages are within KVM always */
- id = dests[i % SFW_ID_PER_PAGE];
- if (msg->msg_magic != SRPC_MSG_MAGIC)
- sfw_unpack_id(id);
-
- for (j = 0; j < tsi->tsi_concur; j++) {
- LIBCFS_ALLOC(tsu, sizeof(sfw_test_unit_t));
- if (tsu == NULL) {
- rc = -ENOMEM;
- CERROR("Can't allocate tsu for %d\n",
- tsi->tsi_service);
- goto error;
- }
-
- tsu->tsu_dest.nid = id.nid;
- tsu->tsu_dest.pid = id.pid;
- tsu->tsu_instance = tsi;
- tsu->tsu_private = NULL;
- list_add_tail(&tsu->tsu_list, &tsi->tsi_units);
- }
- }
-
- rc = tsi->tsi_ops->tso_init(tsi);
- if (rc == 0) {
- list_add_tail(&tsi->tsi_list, &tsb->bat_tests);
- return 0;
- }
-
-error:
- LASSERT(rc != 0);
- sfw_destroy_test_instance(tsi);
- return rc;
-}
-
-static void
-sfw_test_unit_done(sfw_test_unit_t *tsu)
-{
- sfw_test_instance_t *tsi = tsu->tsu_instance;
- sfw_batch_t *tsb = tsi->tsi_batch;
- sfw_session_t *sn = tsb->bat_session;
-
- LASSERT(sfw_test_active(tsi));
-
- if (!atomic_dec_and_test(&tsi->tsi_nactive))
- return;
-
- /* the test instance is done */
- spin_lock(&tsi->tsi_lock);
-
- tsi->tsi_stopping = 0;
-
- spin_unlock(&tsi->tsi_lock);
-
- spin_lock(&sfw_data.fw_lock);
-
- if (!atomic_dec_and_test(&tsb->bat_nactive) ||/* tsb still active */
- sn == sfw_data.fw_session) { /* sn also active */
- spin_unlock(&sfw_data.fw_lock);
- return;
- }
-
- LASSERT(!list_empty(&sn->sn_list)); /* I'm a zombie! */
-
- list_for_each_entry(tsb, &sn->sn_batches, bat_list) {
- if (sfw_batch_active(tsb)) {
- spin_unlock(&sfw_data.fw_lock);
- return;
- }
- }
-
- list_del_init(&sn->sn_list);
- spin_unlock(&sfw_data.fw_lock);
-
- sfw_destroy_session(sn);
- return;
-}
-
-static void
-sfw_test_rpc_done(srpc_client_rpc_t *rpc)
-{
- sfw_test_unit_t *tsu = rpc->crpc_priv;
- sfw_test_instance_t *tsi = tsu->tsu_instance;
- int done = 0;
-
- tsi->tsi_ops->tso_done_rpc(tsu, rpc);
-
- spin_lock(&tsi->tsi_lock);
-
- LASSERT(sfw_test_active(tsi));
- LASSERT(!list_empty(&rpc->crpc_list));
-
- list_del_init(&rpc->crpc_list);
-
- /* batch is stopping or loop is done or get error */
- if (tsi->tsi_stopping ||
- tsu->tsu_loop == 0 ||
- (rpc->crpc_status != 0 && tsi->tsi_stoptsu_onerr))
- done = 1;
-
- /* dec ref for poster */
- srpc_client_rpc_decref(rpc);
-
- spin_unlock(&tsi->tsi_lock);
-
- if (!done) {
- swi_schedule_workitem(&tsu->tsu_worker);
- return;
- }
-
- sfw_test_unit_done(tsu);
- return;
-}
-
-int
-sfw_create_test_rpc(sfw_test_unit_t *tsu, lnet_process_id_t peer,
- unsigned features, int nblk, int blklen,
- srpc_client_rpc_t **rpcpp)
-{
- srpc_client_rpc_t *rpc = NULL;
- sfw_test_instance_t *tsi = tsu->tsu_instance;
-
- spin_lock(&tsi->tsi_lock);
-
- LASSERT(sfw_test_active(tsi));
-
- if (!list_empty(&tsi->tsi_free_rpcs)) {
- /* pick request from buffer */
- rpc = list_entry(tsi->tsi_free_rpcs.next,
- srpc_client_rpc_t, crpc_list);
- LASSERT(nblk == rpc->crpc_bulk.bk_niov);
- list_del_init(&rpc->crpc_list);
- }
-
- spin_unlock(&tsi->tsi_lock);
-
- if (rpc == NULL) {
- rpc = srpc_create_client_rpc(peer, tsi->tsi_service, nblk,
- blklen, sfw_test_rpc_done,
- sfw_test_rpc_fini, tsu);
- } else {
- srpc_init_client_rpc(rpc, peer, tsi->tsi_service, nblk,
- blklen, sfw_test_rpc_done,
- sfw_test_rpc_fini, tsu);
- }
-
- if (rpc == NULL) {
- CERROR("Can't create rpc for test %d\n", tsi->tsi_service);
- return -ENOMEM;
- }
-
- rpc->crpc_reqstmsg.msg_ses_feats = features;
- *rpcpp = rpc;
-
- return 0;
-}
-
-static int
-sfw_run_test(swi_workitem_t *wi)
-{
- sfw_test_unit_t *tsu = wi->swi_workitem.wi_data;
- sfw_test_instance_t *tsi = tsu->tsu_instance;
- srpc_client_rpc_t *rpc = NULL;
-
- LASSERT(wi == &tsu->tsu_worker);
-
- if (tsi->tsi_ops->tso_prep_rpc(tsu, tsu->tsu_dest, &rpc) != 0) {
- LASSERT(rpc == NULL);
- goto test_done;
- }
-
- LASSERT(rpc != NULL);
-
- spin_lock(&tsi->tsi_lock);
-
- if (tsi->tsi_stopping) {
- list_add(&rpc->crpc_list, &tsi->tsi_free_rpcs);
- spin_unlock(&tsi->tsi_lock);
- goto test_done;
- }
-
- if (tsu->tsu_loop > 0)
- tsu->tsu_loop--;
-
- list_add_tail(&rpc->crpc_list, &tsi->tsi_active_rpcs);
- spin_unlock(&tsi->tsi_lock);
-
- rpc->crpc_timeout = rpc_timeout;
-
- spin_lock(&rpc->crpc_lock);
- srpc_post_rpc(rpc);
- spin_unlock(&rpc->crpc_lock);
- return 0;
-
-test_done:
- /*
- * No one can schedule me now since:
- * - previous RPC, if any, has done and
- * - no new RPC is initiated.
- * - my batch is still active; no one can run it again now.
- * Cancel pending schedules and prevent future schedule attempts:
- */
- swi_exit_workitem(wi);
- sfw_test_unit_done(tsu);
- return 1;
-}
-
-static int
-sfw_run_batch(sfw_batch_t *tsb)
-{
- swi_workitem_t *wi;
- sfw_test_unit_t *tsu;
- sfw_test_instance_t *tsi;
-
- if (sfw_batch_active(tsb)) {
- CDEBUG(D_NET, "Batch already active: %llu (%d)\n",
- tsb->bat_id.bat_id, atomic_read(&tsb->bat_nactive));
- return 0;
- }
-
- list_for_each_entry(tsi, &tsb->bat_tests, tsi_list) {
- if (!tsi->tsi_is_client) /* skip server instances */
- continue;
-
- LASSERT(!tsi->tsi_stopping);
- LASSERT(!sfw_test_active(tsi));
-
- atomic_inc(&tsb->bat_nactive);
-
- list_for_each_entry(tsu, &tsi->tsi_units, tsu_list) {
- atomic_inc(&tsi->tsi_nactive);
- tsu->tsu_loop = tsi->tsi_loop;
- wi = &tsu->tsu_worker;
- swi_init_workitem(wi, tsu, sfw_run_test,
- lst_sched_test[\
- lnet_cpt_of_nid(tsu->tsu_dest.nid)]);
- swi_schedule_workitem(wi);
- }
- }
-
- return 0;
-}
-
-int
-sfw_stop_batch(sfw_batch_t *tsb, int force)
-{
- sfw_test_instance_t *tsi;
- srpc_client_rpc_t *rpc;
-
- if (!sfw_batch_active(tsb)) {
- CDEBUG(D_NET, "Batch %llu inactive\n", tsb->bat_id.bat_id);
- return 0;
- }
-
- list_for_each_entry(tsi, &tsb->bat_tests, tsi_list) {
- spin_lock(&tsi->tsi_lock);
-
- if (!tsi->tsi_is_client ||
- !sfw_test_active(tsi) || tsi->tsi_stopping) {
- spin_unlock(&tsi->tsi_lock);
- continue;
- }
-
- tsi->tsi_stopping = 1;
-
- if (!force) {
- spin_unlock(&tsi->tsi_lock);
- continue;
- }
-
- /* abort launched rpcs in the test */
- list_for_each_entry(rpc, &tsi->tsi_active_rpcs, crpc_list) {
- spin_lock(&rpc->crpc_lock);
-
- srpc_abort_rpc(rpc, -EINTR);
-
- spin_unlock(&rpc->crpc_lock);
- }
-
- spin_unlock(&tsi->tsi_lock);
- }
-
- return 0;
-}
-
-static int
-sfw_query_batch(sfw_batch_t *tsb, int testidx, srpc_batch_reply_t *reply)
-{
- sfw_test_instance_t *tsi;
-
- if (testidx < 0)
- return -EINVAL;
-
- if (testidx == 0) {
- reply->bar_active = atomic_read(&tsb->bat_nactive);
- return 0;
- }
-
- list_for_each_entry(tsi, &tsb->bat_tests, tsi_list) {
- if (testidx-- > 1)
- continue;
-
- reply->bar_active = atomic_read(&tsi->tsi_nactive);
- return 0;
- }
-
- return -ENOENT;
-}
-
-void
-sfw_free_pages(srpc_server_rpc_t *rpc)
-{
- srpc_free_bulk(rpc->srpc_bulk);
- rpc->srpc_bulk = NULL;
-}
-
-int
-sfw_alloc_pages(struct srpc_server_rpc *rpc, int cpt, int npages, int len,
- int sink)
-{
- LASSERT(rpc->srpc_bulk == NULL);
- LASSERT(npages > 0 && npages <= LNET_MAX_IOV);
-
- rpc->srpc_bulk = srpc_alloc_bulk(cpt, npages, len, sink);
- if (rpc->srpc_bulk == NULL)
- return -ENOMEM;
-
- return 0;
-}
-
-static int
-sfw_add_test(srpc_server_rpc_t *rpc)
-{
- sfw_session_t *sn = sfw_data.fw_session;
- srpc_test_reply_t *reply = &rpc->srpc_replymsg.msg_body.tes_reply;
- srpc_test_reqst_t *request;
- int rc;
- sfw_batch_t *bat;
-
- request = &rpc->srpc_reqstbuf->buf_msg.msg_body.tes_reqst;
- reply->tsr_sid = (sn == NULL) ? LST_INVALID_SID : sn->sn_id;
-
- if (request->tsr_loop == 0 ||
- request->tsr_concur == 0 ||
- request->tsr_sid.ses_nid == LNET_NID_ANY ||
- request->tsr_ndest > SFW_MAX_NDESTS ||
- (request->tsr_is_client && request->tsr_ndest == 0) ||
- request->tsr_concur > SFW_MAX_CONCUR ||
- request->tsr_service > SRPC_SERVICE_MAX_ID ||
- request->tsr_service <= SRPC_FRAMEWORK_SERVICE_MAX_ID) {
- reply->tsr_status = EINVAL;
- return 0;
- }
-
- if (sn == NULL || !sfw_sid_equal(request->tsr_sid, sn->sn_id) ||
- sfw_find_test_case(request->tsr_service) == NULL) {
- reply->tsr_status = ENOENT;
- return 0;
- }
-
- bat = sfw_bid2batch(request->tsr_bid);
- if (bat == NULL) {
- CERROR("Dropping RPC (%s) from %s under memory pressure.\n",
- rpc->srpc_scd->scd_svc->sv_name,
- libcfs_id2str(rpc->srpc_peer));
- return -ENOMEM;
- }
-
- if (sfw_batch_active(bat)) {
- reply->tsr_status = EBUSY;
- return 0;
- }
-
- if (request->tsr_is_client && rpc->srpc_bulk == NULL) {
- /* rpc will be resumed later in sfw_bulk_ready */
- int npg = sfw_id_pages(request->tsr_ndest);
- int len;
-
- if ((sn->sn_features & LST_FEAT_BULK_LEN) == 0) {
- len = npg * PAGE_CACHE_SIZE;
-
- } else {
- len = sizeof(lnet_process_id_packed_t) *
- request->tsr_ndest;
- }
-
- return sfw_alloc_pages(rpc, CFS_CPT_ANY, npg, len, 1);
- }
-
- rc = sfw_add_test_instance(bat, rpc);
- CDEBUG(rc == 0 ? D_NET : D_WARNING,
- "%s test: sv %d %s, loop %d, concur %d, ndest %d\n",
- rc == 0 ? "Added" : "Failed to add", request->tsr_service,
- request->tsr_is_client ? "client" : "server",
- request->tsr_loop, request->tsr_concur, request->tsr_ndest);
-
- reply->tsr_status = (rc < 0) ? -rc : rc;
- return 0;
-}
-
-static int
-sfw_control_batch(srpc_batch_reqst_t *request, srpc_batch_reply_t *reply)
-{
- sfw_session_t *sn = sfw_data.fw_session;
- int rc = 0;
- sfw_batch_t *bat;
-
- reply->bar_sid = (sn == NULL) ? LST_INVALID_SID : sn->sn_id;
-
- if (sn == NULL || !sfw_sid_equal(request->bar_sid, sn->sn_id)) {
- reply->bar_status = ESRCH;
- return 0;
- }
-
- bat = sfw_find_batch(request->bar_bid);
- if (bat == NULL) {
- reply->bar_status = ENOENT;
- return 0;
- }
-
- switch (request->bar_opc) {
- case SRPC_BATCH_OPC_RUN:
- rc = sfw_run_batch(bat);
- break;
-
- case SRPC_BATCH_OPC_STOP:
- rc = sfw_stop_batch(bat, request->bar_arg);
- break;
-
- case SRPC_BATCH_OPC_QUERY:
- rc = sfw_query_batch(bat, request->bar_testidx, reply);
- break;
-
- default:
- return -EINVAL; /* drop it */
- }
-
- reply->bar_status = (rc < 0) ? -rc : rc;
- return 0;
-}
-
-static int
-sfw_handle_server_rpc(struct srpc_server_rpc *rpc)
-{
- struct srpc_service *sv = rpc->srpc_scd->scd_svc;
- srpc_msg_t *reply = &rpc->srpc_replymsg;
- srpc_msg_t *request = &rpc->srpc_reqstbuf->buf_msg;
- unsigned features = LST_FEATS_MASK;
- int rc = 0;
-
- LASSERT(sfw_data.fw_active_srpc == NULL);
- LASSERT(sv->sv_id <= SRPC_FRAMEWORK_SERVICE_MAX_ID);
-
- spin_lock(&sfw_data.fw_lock);
-
- if (sfw_data.fw_shuttingdown) {
- spin_unlock(&sfw_data.fw_lock);
- return -ESHUTDOWN;
- }
-
- /* Remove timer to avoid racing with it or expiring active session */
- if (sfw_del_session_timer() != 0) {
- CERROR("Dropping RPC (%s) from %s: racing with expiry timer.",
- sv->sv_name, libcfs_id2str(rpc->srpc_peer));
- spin_unlock(&sfw_data.fw_lock);
- return -EAGAIN;
- }
-
- sfw_data.fw_active_srpc = rpc;
- spin_unlock(&sfw_data.fw_lock);
-
- sfw_unpack_message(request);
- LASSERT(request->msg_type == srpc_service2request(sv->sv_id));
-
- /* rpc module should have checked this */
- LASSERT(request->msg_version == SRPC_MSG_VERSION);
-
- if (sv->sv_id != SRPC_SERVICE_MAKE_SESSION &&
- sv->sv_id != SRPC_SERVICE_DEBUG) {
- sfw_session_t *sn = sfw_data.fw_session;
-
- if (sn != NULL &&
- sn->sn_features != request->msg_ses_feats) {
- CNETERR("Features of framework RPC don't match features of current session: %x/%x\n",
- request->msg_ses_feats, sn->sn_features);
- reply->msg_body.reply.status = EPROTO;
- reply->msg_body.reply.sid = sn->sn_id;
- goto out;
- }
-
- } else if ((request->msg_ses_feats & ~LST_FEATS_MASK) != 0) {
- /* NB: at this point, old version will ignore features and
- * create new session anyway, so console should be able
- * to handle this */
- reply->msg_body.reply.status = EPROTO;
- goto out;
- }
-
- switch (sv->sv_id) {
- default:
- LBUG();
- case SRPC_SERVICE_TEST:
- rc = sfw_add_test(rpc);
- break;
-
- case SRPC_SERVICE_BATCH:
- rc = sfw_control_batch(&request->msg_body.bat_reqst,
- &reply->msg_body.bat_reply);
- break;
-
- case SRPC_SERVICE_QUERY_STAT:
- rc = sfw_get_stats(&request->msg_body.stat_reqst,
- &reply->msg_body.stat_reply);
- break;
-
- case SRPC_SERVICE_DEBUG:
- rc = sfw_debug_session(&request->msg_body.dbg_reqst,
- &reply->msg_body.dbg_reply);
- break;
-
- case SRPC_SERVICE_MAKE_SESSION:
- rc = sfw_make_session(&request->msg_body.mksn_reqst,
- &reply->msg_body.mksn_reply);
- break;
-
- case SRPC_SERVICE_REMOVE_SESSION:
- rc = sfw_remove_session(&request->msg_body.rmsn_reqst,
- &reply->msg_body.rmsn_reply);
- break;
- }
-
- if (sfw_data.fw_session != NULL)
- features = sfw_data.fw_session->sn_features;
- out:
- reply->msg_ses_feats = features;
- rpc->srpc_done = sfw_server_rpc_done;
- spin_lock(&sfw_data.fw_lock);
-
- if (!sfw_data.fw_shuttingdown)
- sfw_add_session_timer();
-
- sfw_data.fw_active_srpc = NULL;
- spin_unlock(&sfw_data.fw_lock);
- return rc;
-}
-
-static int
-sfw_bulk_ready(struct srpc_server_rpc *rpc, int status)
-{
- struct srpc_service *sv = rpc->srpc_scd->scd_svc;
- int rc;
-
- LASSERT(rpc->srpc_bulk != NULL);
- LASSERT(sv->sv_id == SRPC_SERVICE_TEST);
- LASSERT(sfw_data.fw_active_srpc == NULL);
- LASSERT(rpc->srpc_reqstbuf->buf_msg.msg_body.tes_reqst.tsr_is_client);
-
- spin_lock(&sfw_data.fw_lock);
-
- if (status != 0) {
- CERROR("Bulk transfer failed for RPC: service %s, peer %s, status %d\n",
- sv->sv_name, libcfs_id2str(rpc->srpc_peer), status);
- spin_unlock(&sfw_data.fw_lock);
- return -EIO;
- }
-
- if (sfw_data.fw_shuttingdown) {
- spin_unlock(&sfw_data.fw_lock);
- return -ESHUTDOWN;
- }
-
- if (sfw_del_session_timer() != 0) {
- CERROR("Dropping RPC (%s) from %s: racing with expiry timer",
- sv->sv_name, libcfs_id2str(rpc->srpc_peer));
- spin_unlock(&sfw_data.fw_lock);
- return -EAGAIN;
- }
-
- sfw_data.fw_active_srpc = rpc;
- spin_unlock(&sfw_data.fw_lock);
-
- rc = sfw_add_test(rpc);
-
- spin_lock(&sfw_data.fw_lock);
-
- if (!sfw_data.fw_shuttingdown)
- sfw_add_session_timer();
-
- sfw_data.fw_active_srpc = NULL;
- spin_unlock(&sfw_data.fw_lock);
- return rc;
-}
-
-srpc_client_rpc_t *
-sfw_create_rpc(lnet_process_id_t peer, int service,
- unsigned features, int nbulkiov, int bulklen,
- void (*done)(srpc_client_rpc_t *), void *priv)
-{
- srpc_client_rpc_t *rpc = NULL;
-
- spin_lock(&sfw_data.fw_lock);
-
- LASSERT(!sfw_data.fw_shuttingdown);
- LASSERT(service <= SRPC_FRAMEWORK_SERVICE_MAX_ID);
-
- if (nbulkiov == 0 && !list_empty(&sfw_data.fw_zombie_rpcs)) {
- rpc = list_entry(sfw_data.fw_zombie_rpcs.next,
- srpc_client_rpc_t, crpc_list);
- list_del(&rpc->crpc_list);
-
- srpc_init_client_rpc(rpc, peer, service, 0, 0,
- done, sfw_client_rpc_fini, priv);
- }
-
- spin_unlock(&sfw_data.fw_lock);
-
- if (rpc == NULL) {
- rpc = srpc_create_client_rpc(peer, service,
- nbulkiov, bulklen, done,
- nbulkiov != 0 ? NULL :
- sfw_client_rpc_fini,
- priv);
- }
-
- if (rpc != NULL) /* "session" is concept in framework */
- rpc->crpc_reqstmsg.msg_ses_feats = features;
-
- return rpc;
-}
-
-void
-sfw_unpack_message(srpc_msg_t *msg)
-{
- if (msg->msg_magic == SRPC_MSG_MAGIC)
- return; /* no flipping needed */
-
- /* srpc module should guarantee I wouldn't get crap */
- LASSERT(msg->msg_magic == __swab32(SRPC_MSG_MAGIC));
-
- if (msg->msg_type == SRPC_MSG_STAT_REQST) {
- srpc_stat_reqst_t *req = &msg->msg_body.stat_reqst;
-
- __swab32s(&req->str_type);
- __swab64s(&req->str_rpyid);
- sfw_unpack_sid(req->str_sid);
- return;
- }
-
- if (msg->msg_type == SRPC_MSG_STAT_REPLY) {
- srpc_stat_reply_t *rep = &msg->msg_body.stat_reply;
-
- __swab32s(&rep->str_status);
- sfw_unpack_sid(rep->str_sid);
- sfw_unpack_fw_counters(rep->str_fw);
- sfw_unpack_rpc_counters(rep->str_rpc);
- sfw_unpack_lnet_counters(rep->str_lnet);
- return;
- }
-
- if (msg->msg_type == SRPC_MSG_MKSN_REQST) {
- srpc_mksn_reqst_t *req = &msg->msg_body.mksn_reqst;
-
- __swab64s(&req->mksn_rpyid);
- __swab32s(&req->mksn_force);
- sfw_unpack_sid(req->mksn_sid);
- return;
- }
-
- if (msg->msg_type == SRPC_MSG_MKSN_REPLY) {
- srpc_mksn_reply_t *rep = &msg->msg_body.mksn_reply;
-
- __swab32s(&rep->mksn_status);
- __swab32s(&rep->mksn_timeout);
- sfw_unpack_sid(rep->mksn_sid);
- return;
- }
-
- if (msg->msg_type == SRPC_MSG_RMSN_REQST) {
- srpc_rmsn_reqst_t *req = &msg->msg_body.rmsn_reqst;
-
- __swab64s(&req->rmsn_rpyid);
- sfw_unpack_sid(req->rmsn_sid);
- return;
- }
-
- if (msg->msg_type == SRPC_MSG_RMSN_REPLY) {
- srpc_rmsn_reply_t *rep = &msg->msg_body.rmsn_reply;
-
- __swab32s(&rep->rmsn_status);
- sfw_unpack_sid(rep->rmsn_sid);
- return;
- }
-
- if (msg->msg_type == SRPC_MSG_DEBUG_REQST) {
- srpc_debug_reqst_t *req = &msg->msg_body.dbg_reqst;
-
- __swab64s(&req->dbg_rpyid);
- __swab32s(&req->dbg_flags);
- sfw_unpack_sid(req->dbg_sid);
- return;
- }
-
- if (msg->msg_type == SRPC_MSG_DEBUG_REPLY) {
- srpc_debug_reply_t *rep = &msg->msg_body.dbg_reply;
-
- __swab32s(&rep->dbg_nbatch);
- __swab32s(&rep->dbg_timeout);
- sfw_unpack_sid(rep->dbg_sid);
- return;
- }
-
- if (msg->msg_type == SRPC_MSG_BATCH_REQST) {
- srpc_batch_reqst_t *req = &msg->msg_body.bat_reqst;
-
- __swab32s(&req->bar_opc);
- __swab64s(&req->bar_rpyid);
- __swab32s(&req->bar_testidx);
- __swab32s(&req->bar_arg);
- sfw_unpack_sid(req->bar_sid);
- __swab64s(&req->bar_bid.bat_id);
- return;
- }
-
- if (msg->msg_type == SRPC_MSG_BATCH_REPLY) {
- srpc_batch_reply_t *rep = &msg->msg_body.bat_reply;
-
- __swab32s(&rep->bar_status);
- sfw_unpack_sid(rep->bar_sid);
- return;
- }
-
- if (msg->msg_type == SRPC_MSG_TEST_REQST) {
- srpc_test_reqst_t *req = &msg->msg_body.tes_reqst;
-
- __swab64s(&req->tsr_rpyid);
- __swab64s(&req->tsr_bulkid);
- __swab32s(&req->tsr_loop);
- __swab32s(&req->tsr_ndest);
- __swab32s(&req->tsr_concur);
- __swab32s(&req->tsr_service);
- sfw_unpack_sid(req->tsr_sid);
- __swab64s(&req->tsr_bid.bat_id);
- return;
- }
-
- if (msg->msg_type == SRPC_MSG_TEST_REPLY) {
- srpc_test_reply_t *rep = &msg->msg_body.tes_reply;
-
- __swab32s(&rep->tsr_status);
- sfw_unpack_sid(rep->tsr_sid);
- return;
- }
-
- if (msg->msg_type == SRPC_MSG_JOIN_REQST) {
- srpc_join_reqst_t *req = &msg->msg_body.join_reqst;
-
- __swab64s(&req->join_rpyid);
- sfw_unpack_sid(req->join_sid);
- return;
- }
-
- if (msg->msg_type == SRPC_MSG_JOIN_REPLY) {
- srpc_join_reply_t *rep = &msg->msg_body.join_reply;
-
- __swab32s(&rep->join_status);
- __swab32s(&rep->join_timeout);
- sfw_unpack_sid(rep->join_sid);
- return;
- }
-
- LBUG();
- return;
-}
-
-void
-sfw_abort_rpc(srpc_client_rpc_t *rpc)
-{
- LASSERT(atomic_read(&rpc->crpc_refcount) > 0);
- LASSERT(rpc->crpc_service <= SRPC_FRAMEWORK_SERVICE_MAX_ID);
-
- spin_lock(&rpc->crpc_lock);
- srpc_abort_rpc(rpc, -EINTR);
- spin_unlock(&rpc->crpc_lock);
- return;
-}
-
-void
-sfw_post_rpc(srpc_client_rpc_t *rpc)
-{
- spin_lock(&rpc->crpc_lock);
-
- LASSERT(!rpc->crpc_closed);
- LASSERT(!rpc->crpc_aborted);
- LASSERT(list_empty(&rpc->crpc_list));
- LASSERT(!sfw_data.fw_shuttingdown);
-
- rpc->crpc_timeout = rpc_timeout;
- srpc_post_rpc(rpc);
-
- spin_unlock(&rpc->crpc_lock);
- return;
-}
-
-static srpc_service_t sfw_services[] = {
- {
- /* sv_id */ SRPC_SERVICE_DEBUG,
- /* sv_name */ "debug",
- 0
- },
- {
- /* sv_id */ SRPC_SERVICE_QUERY_STAT,
- /* sv_name */ "query stats",
- 0
- },
- {
- /* sv_id */ SRPC_SERVICE_MAKE_SESSION,
- /* sv_name */ "make session",
- 0
- },
- {
- /* sv_id */ SRPC_SERVICE_REMOVE_SESSION,
- /* sv_name */ "remove session",
- 0
- },
- {
- /* sv_id */ SRPC_SERVICE_BATCH,
- /* sv_name */ "batch service",
- 0
- },
- {
- /* sv_id */ SRPC_SERVICE_TEST,
- /* sv_name */ "test service",
- 0
- },
- {
- /* sv_id */ 0,
- /* sv_name */ NULL,
- 0
- }
-};
-
-extern sfw_test_client_ops_t ping_test_client;
-extern srpc_service_t ping_test_service;
-extern void ping_init_test_client(void);
-extern void ping_init_test_service(void);
-
-extern sfw_test_client_ops_t brw_test_client;
-extern srpc_service_t brw_test_service;
-extern void brw_init_test_client(void);
-extern void brw_init_test_service(void);
-
-
-int
-sfw_startup(void)
-{
- int i;
- int rc;
- int error;
- srpc_service_t *sv;
- sfw_test_case_t *tsc;
-
-
- if (session_timeout < 0) {
- CERROR("Session timeout must be non-negative: %d\n",
- session_timeout);
- return -EINVAL;
- }
-
- if (rpc_timeout < 0) {
- CERROR("RPC timeout must be non-negative: %d\n",
- rpc_timeout);
- return -EINVAL;
- }
-
- if (session_timeout == 0)
- CWARN("Zero session_timeout specified - test sessions never expire.\n");
-
- if (rpc_timeout == 0)
- CWARN("Zero rpc_timeout specified - test RPC never expire.\n");
-
- memset(&sfw_data, 0, sizeof(struct smoketest_framework));
-
- sfw_data.fw_session = NULL;
- sfw_data.fw_active_srpc = NULL;
- spin_lock_init(&sfw_data.fw_lock);
- atomic_set(&sfw_data.fw_nzombies, 0);
- INIT_LIST_HEAD(&sfw_data.fw_tests);
- INIT_LIST_HEAD(&sfw_data.fw_zombie_rpcs);
- INIT_LIST_HEAD(&sfw_data.fw_zombie_sessions);
-
- brw_init_test_client();
- brw_init_test_service();
- rc = sfw_register_test(&brw_test_service, &brw_test_client);
- LASSERT(rc == 0);
-
- ping_init_test_client();
- ping_init_test_service();
- rc = sfw_register_test(&ping_test_service, &ping_test_client);
- LASSERT(rc == 0);
-
- error = 0;
- list_for_each_entry(tsc, &sfw_data.fw_tests, tsc_list) {
- sv = tsc->tsc_srv_service;
-
- rc = srpc_add_service(sv);
- LASSERT(rc != -EBUSY);
- if (rc != 0) {
- CWARN("Failed to add %s service: %d\n",
- sv->sv_name, rc);
- error = rc;
- }
- }
-
- for (i = 0; ; i++) {
- sv = &sfw_services[i];
- if (sv->sv_name == NULL)
- break;
-
- sv->sv_bulk_ready = NULL;
- sv->sv_handler = sfw_handle_server_rpc;
- sv->sv_wi_total = SFW_FRWK_WI_MAX;
- if (sv->sv_id == SRPC_SERVICE_TEST)
- sv->sv_bulk_ready = sfw_bulk_ready;
-
- rc = srpc_add_service(sv);
- LASSERT(rc != -EBUSY);
- if (rc != 0) {
- CWARN("Failed to add %s service: %d\n",
- sv->sv_name, rc);
- error = rc;
- }
-
- /* about to sfw_shutdown, no need to add buffer */
- if (error)
- continue;
-
- rc = srpc_service_add_buffers(sv, sv->sv_wi_total);
- if (rc != 0) {
- CWARN("Failed to reserve enough buffers: service %s, %d needed: %d\n",
- sv->sv_name, sv->sv_wi_total, rc);
- error = -ENOMEM;
- }
- }
-
- if (error != 0)
- sfw_shutdown();
- return error;
-}
-
-void
-sfw_shutdown(void)
-{
- srpc_service_t *sv;
- sfw_test_case_t *tsc;
- int i;
-
- spin_lock(&sfw_data.fw_lock);
-
- sfw_data.fw_shuttingdown = 1;
- lst_wait_until(sfw_data.fw_active_srpc == NULL, sfw_data.fw_lock,
- "waiting for active RPC to finish.\n");
-
- if (sfw_del_session_timer() != 0)
- lst_wait_until(sfw_data.fw_session == NULL, sfw_data.fw_lock,
- "waiting for session timer to explode.\n");
-
- sfw_deactivate_session();
- lst_wait_until(atomic_read(&sfw_data.fw_nzombies) == 0,
- sfw_data.fw_lock,
- "waiting for %d zombie sessions to die.\n",
- atomic_read(&sfw_data.fw_nzombies));
-
- spin_unlock(&sfw_data.fw_lock);
-
- for (i = 0; ; i++) {
- sv = &sfw_services[i];
- if (sv->sv_name == NULL)
- break;
-
- srpc_shutdown_service(sv);
- srpc_remove_service(sv);
- }
-
- list_for_each_entry(tsc, &sfw_data.fw_tests, tsc_list) {
- sv = tsc->tsc_srv_service;
- srpc_shutdown_service(sv);
- srpc_remove_service(sv);
- }
-
- while (!list_empty(&sfw_data.fw_zombie_rpcs)) {
- srpc_client_rpc_t *rpc;
-
- rpc = list_entry(sfw_data.fw_zombie_rpcs.next,
- srpc_client_rpc_t, crpc_list);
- list_del(&rpc->crpc_list);
-
- LIBCFS_FREE(rpc, srpc_client_rpc_size(rpc));
- }
-
- for (i = 0; ; i++) {
- sv = &sfw_services[i];
- if (sv->sv_name == NULL)
- break;
-
- srpc_wait_service_shutdown(sv);
- }
-
- while (!list_empty(&sfw_data.fw_tests)) {
- tsc = list_entry(sfw_data.fw_tests.next,
- sfw_test_case_t, tsc_list);
-
- srpc_wait_service_shutdown(tsc->tsc_srv_service);
-
- list_del(&tsc->tsc_list);
- LIBCFS_FREE(tsc, sizeof(*tsc));
- }
-
- return;
-}
diff --git a/drivers/staging/lustre/lnet/selftest/module.c b/drivers/staging/lustre/lnet/selftest/module.c
deleted file mode 100644
index 09b8f4649796..000000000000
--- a/drivers/staging/lustre/lnet/selftest/module.c
+++ /dev/null
@@ -1,159 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- */
-
-#define DEBUG_SUBSYSTEM S_LNET
-
-#include "selftest.h"
-
-enum {
- LST_INIT_NONE = 0,
- LST_INIT_WI_SERIAL,
- LST_INIT_WI_TEST,
- LST_INIT_RPC,
- LST_INIT_FW,
- LST_INIT_CONSOLE
-};
-
-extern int lstcon_console_init(void);
-extern int lstcon_console_fini(void);
-
-static int lst_init_step = LST_INIT_NONE;
-
-struct cfs_wi_sched *lst_sched_serial;
-struct cfs_wi_sched **lst_sched_test;
-
-static void
-lnet_selftest_fini(void)
-{
- int i;
-
- switch (lst_init_step) {
- case LST_INIT_CONSOLE:
- lstcon_console_fini();
- case LST_INIT_FW:
- sfw_shutdown();
- case LST_INIT_RPC:
- srpc_shutdown();
- case LST_INIT_WI_TEST:
- for (i = 0;
- i < cfs_cpt_number(lnet_cpt_table()); i++) {
- if (lst_sched_test[i] == NULL)
- continue;
- cfs_wi_sched_destroy(lst_sched_test[i]);
- }
- LIBCFS_FREE(lst_sched_test,
- sizeof(lst_sched_test[0]) *
- cfs_cpt_number(lnet_cpt_table()));
- lst_sched_test = NULL;
-
- case LST_INIT_WI_SERIAL:
- cfs_wi_sched_destroy(lst_sched_serial);
- lst_sched_serial = NULL;
- case LST_INIT_NONE:
- break;
- default:
- LBUG();
- }
-}
-
-static int
-lnet_selftest_init(void)
-{
- int nscheds;
- int rc;
- int i;
-
- rc = cfs_wi_sched_create("lst_s", lnet_cpt_table(), CFS_CPT_ANY,
- 1, &lst_sched_serial);
- if (rc != 0) {
- CERROR("Failed to create serial WI scheduler for LST\n");
- return rc;
- }
- lst_init_step = LST_INIT_WI_SERIAL;
-
- nscheds = cfs_cpt_number(lnet_cpt_table());
- LIBCFS_ALLOC(lst_sched_test, sizeof(lst_sched_test[0]) * nscheds);
- if (lst_sched_test == NULL)
- goto error;
-
- lst_init_step = LST_INIT_WI_TEST;
- for (i = 0; i < nscheds; i++) {
- int nthrs = cfs_cpt_weight(lnet_cpt_table(), i);
-
- /* reserve at least one CPU for LND */
- nthrs = max(nthrs - 1, 1);
- rc = cfs_wi_sched_create("lst_t", lnet_cpt_table(), i,
- nthrs, &lst_sched_test[i]);
- if (rc != 0) {
- CERROR("Failed to create CPT affinity WI scheduler %d for LST\n",
- i);
- goto error;
- }
- }
-
- rc = srpc_startup();
- if (rc != 0) {
- CERROR("LST can't startup rpc\n");
- goto error;
- }
- lst_init_step = LST_INIT_RPC;
-
- rc = sfw_startup();
- if (rc != 0) {
- CERROR("LST can't startup framework\n");
- goto error;
- }
- lst_init_step = LST_INIT_FW;
-
- rc = lstcon_console_init();
- if (rc != 0) {
- CERROR("LST can't startup console\n");
- goto error;
- }
- lst_init_step = LST_INIT_CONSOLE;
- return 0;
-error:
- lnet_selftest_fini();
- return rc;
-}
-
-
-MODULE_DESCRIPTION("LNet Selftest");
-MODULE_LICENSE("GPL");
-MODULE_VERSION("0.9.0");
-
-module_init(lnet_selftest_init);
-module_exit(lnet_selftest_fini);
diff --git a/drivers/staging/lustre/lnet/selftest/ping_test.c b/drivers/staging/lustre/lnet/selftest/ping_test.c
deleted file mode 100644
index d42653654fa8..000000000000
--- a/drivers/staging/lustre/lnet/selftest/ping_test.c
+++ /dev/null
@@ -1,230 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * lnet/selftest/conctl.c
- *
- * Test client & Server
- *
- * Author: Liang Zhen <liangzhen@clusterfs.com>
- */
-
-#include "selftest.h"
-
-#define LST_PING_TEST_MAGIC 0xbabeface
-
-static int ping_srv_workitems = SFW_TEST_WI_MAX;
-module_param(ping_srv_workitems, int, 0644);
-MODULE_PARM_DESC(ping_srv_workitems, "# PING server workitems");
-
-typedef struct {
- spinlock_t pnd_lock; /* serialize */
- int pnd_counter; /* sequence counter */
-} lst_ping_data_t;
-
-static lst_ping_data_t lst_ping_data;
-
-static int
-ping_client_init(sfw_test_instance_t *tsi)
-{
- sfw_session_t *sn = tsi->tsi_batch->bat_session;
-
- LASSERT(tsi->tsi_is_client);
- LASSERT(sn != NULL && (sn->sn_features & ~LST_FEATS_MASK) == 0);
-
- spin_lock_init(&lst_ping_data.pnd_lock);
- lst_ping_data.pnd_counter = 0;
-
- return 0;
-}
-
-static void
-ping_client_fini(sfw_test_instance_t *tsi)
-{
- sfw_session_t *sn = tsi->tsi_batch->bat_session;
- int errors;
-
- LASSERT(sn != NULL);
- LASSERT(tsi->tsi_is_client);
-
- errors = atomic_read(&sn->sn_ping_errors);
- if (errors)
- CWARN("%d pings have failed.\n", errors);
- else
- CDEBUG(D_NET, "Ping test finished OK.\n");
-}
-
-static int
-ping_client_prep_rpc(sfw_test_unit_t *tsu,
- lnet_process_id_t dest, srpc_client_rpc_t **rpc)
-{
- srpc_ping_reqst_t *req;
- sfw_test_instance_t *tsi = tsu->tsu_instance;
- sfw_session_t *sn = tsi->tsi_batch->bat_session;
- struct timespec64 ts;
- int rc;
-
- LASSERT(sn != NULL);
- LASSERT((sn->sn_features & ~LST_FEATS_MASK) == 0);
-
- rc = sfw_create_test_rpc(tsu, dest, sn->sn_features, 0, 0, rpc);
- if (rc != 0)
- return rc;
-
- req = &(*rpc)->crpc_reqstmsg.msg_body.ping_reqst;
-
- req->pnr_magic = LST_PING_TEST_MAGIC;
-
- spin_lock(&lst_ping_data.pnd_lock);
- req->pnr_seq = lst_ping_data.pnd_counter++;
- spin_unlock(&lst_ping_data.pnd_lock);
-
- ktime_get_real_ts64(&ts);
- req->pnr_time_sec = ts.tv_sec;
- req->pnr_time_usec = ts.tv_nsec / NSEC_PER_USEC;
-
- return rc;
-}
-
-static void
-ping_client_done_rpc(sfw_test_unit_t *tsu, srpc_client_rpc_t *rpc)
-{
- sfw_test_instance_t *tsi = tsu->tsu_instance;
- sfw_session_t *sn = tsi->tsi_batch->bat_session;
- srpc_ping_reqst_t *reqst = &rpc->crpc_reqstmsg.msg_body.ping_reqst;
- srpc_ping_reply_t *reply = &rpc->crpc_replymsg.msg_body.ping_reply;
- struct timespec64 ts;
-
- LASSERT(sn != NULL);
-
- if (rpc->crpc_status != 0) {
- if (!tsi->tsi_stopping) /* rpc could have been aborted */
- atomic_inc(&sn->sn_ping_errors);
- CERROR("Unable to ping %s (%d): %d\n",
- libcfs_id2str(rpc->crpc_dest),
- reqst->pnr_seq, rpc->crpc_status);
- return;
- }
-
- if (rpc->crpc_replymsg.msg_magic != SRPC_MSG_MAGIC) {
- __swab32s(&reply->pnr_seq);
- __swab32s(&reply->pnr_magic);
- __swab32s(&reply->pnr_status);
- }
-
- if (reply->pnr_magic != LST_PING_TEST_MAGIC) {
- rpc->crpc_status = -EBADMSG;
- atomic_inc(&sn->sn_ping_errors);
- CERROR("Bad magic %u from %s, %u expected.\n",
- reply->pnr_magic, libcfs_id2str(rpc->crpc_dest),
- LST_PING_TEST_MAGIC);
- return;
- }
-
- if (reply->pnr_seq != reqst->pnr_seq) {
- rpc->crpc_status = -EBADMSG;
- atomic_inc(&sn->sn_ping_errors);
- CERROR("Bad seq %u from %s, %u expected.\n",
- reply->pnr_seq, libcfs_id2str(rpc->crpc_dest),
- reqst->pnr_seq);
- return;
- }
-
- ktime_get_real_ts64(&ts);
- CDEBUG(D_NET, "%d reply in %u usec\n", reply->pnr_seq,
- (unsigned)((ts.tv_sec - reqst->pnr_time_sec) * 1000000 +
- (ts.tv_nsec / NSEC_PER_USEC - reqst->pnr_time_usec)));
- return;
-}
-
-static int
-ping_server_handle(struct srpc_server_rpc *rpc)
-{
- struct srpc_service *sv = rpc->srpc_scd->scd_svc;
- srpc_msg_t *reqstmsg = &rpc->srpc_reqstbuf->buf_msg;
- srpc_msg_t *replymsg = &rpc->srpc_replymsg;
- srpc_ping_reqst_t *req = &reqstmsg->msg_body.ping_reqst;
- srpc_ping_reply_t *rep = &rpc->srpc_replymsg.msg_body.ping_reply;
-
- LASSERT(sv->sv_id == SRPC_SERVICE_PING);
-
- if (reqstmsg->msg_magic != SRPC_MSG_MAGIC) {
- LASSERT(reqstmsg->msg_magic == __swab32(SRPC_MSG_MAGIC));
-
- __swab32s(&req->pnr_seq);
- __swab32s(&req->pnr_magic);
- __swab64s(&req->pnr_time_sec);
- __swab64s(&req->pnr_time_usec);
- }
- LASSERT(reqstmsg->msg_type == srpc_service2request(sv->sv_id));
-
- if (req->pnr_magic != LST_PING_TEST_MAGIC) {
- CERROR("Unexpected magic %08x from %s\n",
- req->pnr_magic, libcfs_id2str(rpc->srpc_peer));
- return -EINVAL;
- }
-
- rep->pnr_seq = req->pnr_seq;
- rep->pnr_magic = LST_PING_TEST_MAGIC;
-
- if ((reqstmsg->msg_ses_feats & ~LST_FEATS_MASK) != 0) {
- replymsg->msg_ses_feats = LST_FEATS_MASK;
- rep->pnr_status = EPROTO;
- return 0;
- }
-
- replymsg->msg_ses_feats = reqstmsg->msg_ses_feats;
-
- CDEBUG(D_NET, "Get ping %d from %s\n",
- req->pnr_seq, libcfs_id2str(rpc->srpc_peer));
- return 0;
-}
-
-sfw_test_client_ops_t ping_test_client;
-void ping_init_test_client(void)
-{
- ping_test_client.tso_init = ping_client_init;
- ping_test_client.tso_fini = ping_client_fini;
- ping_test_client.tso_prep_rpc = ping_client_prep_rpc;
- ping_test_client.tso_done_rpc = ping_client_done_rpc;
-}
-
-srpc_service_t ping_test_service;
-void ping_init_test_service(void)
-{
- ping_test_service.sv_id = SRPC_SERVICE_PING;
- ping_test_service.sv_name = "ping_test";
- ping_test_service.sv_handler = ping_server_handle;
- ping_test_service.sv_wi_total = ping_srv_workitems;
-}
diff --git a/drivers/staging/lustre/lnet/selftest/rpc.c b/drivers/staging/lustre/lnet/selftest/rpc.c
deleted file mode 100644
index 9fc3ce33ec7b..000000000000
--- a/drivers/staging/lustre/lnet/selftest/rpc.c
+++ /dev/null
@@ -1,1672 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * lnet/selftest/rpc.c
- *
- * Author: Isaac Huang <isaac@clusterfs.com>
- *
- * 2012-05-13: Liang Zhen <liang@whamcloud.com>
- * - percpt data for service to improve smp performance
- * - code cleanup
- */
-
-#define DEBUG_SUBSYSTEM S_LNET
-
-#include "selftest.h"
-
-typedef enum {
- SRPC_STATE_NONE,
- SRPC_STATE_NI_INIT,
- SRPC_STATE_EQ_INIT,
- SRPC_STATE_RUNNING,
- SRPC_STATE_STOPPING,
-} srpc_state_t;
-
-static struct smoketest_rpc {
- spinlock_t rpc_glock; /* global lock */
- srpc_service_t *rpc_services[SRPC_SERVICE_MAX_ID + 1];
- lnet_handle_eq_t rpc_lnet_eq; /* _the_ LNet event queue */
- srpc_state_t rpc_state;
- srpc_counters_t rpc_counters;
- __u64 rpc_matchbits; /* matchbits counter */
-} srpc_data;
-
-static inline int
-srpc_serv_portal(int svc_id)
-{
- return svc_id < SRPC_FRAMEWORK_SERVICE_MAX_ID ?
- SRPC_FRAMEWORK_REQUEST_PORTAL : SRPC_REQUEST_PORTAL;
-}
-
-/* forward ref's */
-int srpc_handle_rpc(swi_workitem_t *wi);
-
-void srpc_get_counters(srpc_counters_t *cnt)
-{
- spin_lock(&srpc_data.rpc_glock);
- *cnt = srpc_data.rpc_counters;
- spin_unlock(&srpc_data.rpc_glock);
-}
-
-void srpc_set_counters(const srpc_counters_t *cnt)
-{
- spin_lock(&srpc_data.rpc_glock);
- srpc_data.rpc_counters = *cnt;
- spin_unlock(&srpc_data.rpc_glock);
-}
-
-static int
-srpc_add_bulk_page(srpc_bulk_t *bk, struct page *pg, int i, int nob)
-{
- nob = min_t(int, nob, PAGE_CACHE_SIZE);
-
- LASSERT(nob > 0);
- LASSERT(i >= 0 && i < bk->bk_niov);
-
- bk->bk_iovs[i].kiov_offset = 0;
- bk->bk_iovs[i].kiov_page = pg;
- bk->bk_iovs[i].kiov_len = nob;
- return nob;
-}
-
-void
-srpc_free_bulk(srpc_bulk_t *bk)
-{
- int i;
- struct page *pg;
-
- LASSERT(bk != NULL);
-
- for (i = 0; i < bk->bk_niov; i++) {
- pg = bk->bk_iovs[i].kiov_page;
- if (pg == NULL)
- break;
-
- __free_page(pg);
- }
-
- LIBCFS_FREE(bk, offsetof(srpc_bulk_t, bk_iovs[bk->bk_niov]));
- return;
-}
-
-srpc_bulk_t *
-srpc_alloc_bulk(int cpt, unsigned bulk_npg, unsigned bulk_len, int sink)
-{
- srpc_bulk_t *bk;
- int i;
-
- LASSERT(bulk_npg > 0 && bulk_npg <= LNET_MAX_IOV);
-
- LIBCFS_CPT_ALLOC(bk, lnet_cpt_table(), cpt,
- offsetof(srpc_bulk_t, bk_iovs[bulk_npg]));
- if (bk == NULL) {
- CERROR("Can't allocate descriptor for %d pages\n", bulk_npg);
- return NULL;
- }
-
- memset(bk, 0, offsetof(srpc_bulk_t, bk_iovs[bulk_npg]));
- bk->bk_sink = sink;
- bk->bk_len = bulk_len;
- bk->bk_niov = bulk_npg;
-
- for (i = 0; i < bulk_npg; i++) {
- struct page *pg;
- int nob;
-
- pg = alloc_pages_node(cfs_cpt_spread_node(lnet_cpt_table(), cpt),
- GFP_IOFS, 0);
- if (pg == NULL) {
- CERROR("Can't allocate page %d of %d\n", i, bulk_npg);
- srpc_free_bulk(bk);
- return NULL;
- }
-
- nob = srpc_add_bulk_page(bk, pg, i, bulk_len);
- bulk_len -= nob;
- }
-
- return bk;
-}
-
-static inline __u64
-srpc_next_id(void)
-{
- __u64 id;
-
- spin_lock(&srpc_data.rpc_glock);
- id = srpc_data.rpc_matchbits++;
- spin_unlock(&srpc_data.rpc_glock);
- return id;
-}
-
-static void
-srpc_init_server_rpc(struct srpc_server_rpc *rpc,
- struct srpc_service_cd *scd,
- struct srpc_buffer *buffer)
-{
- memset(rpc, 0, sizeof(*rpc));
- swi_init_workitem(&rpc->srpc_wi, rpc, srpc_handle_rpc,
- srpc_serv_is_framework(scd->scd_svc) ?
- lst_sched_serial : lst_sched_test[scd->scd_cpt]);
-
- rpc->srpc_ev.ev_fired = 1; /* no event expected now */
-
- rpc->srpc_scd = scd;
- rpc->srpc_reqstbuf = buffer;
- rpc->srpc_peer = buffer->buf_peer;
- rpc->srpc_self = buffer->buf_self;
- LNetInvalidateHandle(&rpc->srpc_replymdh);
-}
-
-static void
-srpc_service_fini(struct srpc_service *svc)
-{
- struct srpc_service_cd *scd;
- struct srpc_server_rpc *rpc;
- struct srpc_buffer *buf;
- struct list_head *q;
- int i;
-
- if (svc->sv_cpt_data == NULL)
- return;
-
- cfs_percpt_for_each(scd, i, svc->sv_cpt_data) {
- while (1) {
- if (!list_empty(&scd->scd_buf_posted))
- q = &scd->scd_buf_posted;
- else if (!list_empty(&scd->scd_buf_blocked))
- q = &scd->scd_buf_blocked;
- else
- break;
-
- while (!list_empty(q)) {
- buf = list_entry(q->next,
- struct srpc_buffer,
- buf_list);
- list_del(&buf->buf_list);
- LIBCFS_FREE(buf, sizeof(*buf));
- }
- }
-
- LASSERT(list_empty(&scd->scd_rpc_active));
-
- while (!list_empty(&scd->scd_rpc_free)) {
- rpc = list_entry(scd->scd_rpc_free.next,
- struct srpc_server_rpc,
- srpc_list);
- list_del(&rpc->srpc_list);
- LIBCFS_FREE(rpc, sizeof(*rpc));
- }
- }
-
- cfs_percpt_free(svc->sv_cpt_data);
- svc->sv_cpt_data = NULL;
-}
-
-static int
-srpc_service_nrpcs(struct srpc_service *svc)
-{
- int nrpcs = svc->sv_wi_total / svc->sv_ncpts;
-
- return srpc_serv_is_framework(svc) ?
- max(nrpcs, SFW_FRWK_WI_MIN) : max(nrpcs, SFW_TEST_WI_MIN);
-}
-
-int srpc_add_buffer(struct swi_workitem *wi);
-
-static int
-srpc_service_init(struct srpc_service *svc)
-{
- struct srpc_service_cd *scd;
- struct srpc_server_rpc *rpc;
- int nrpcs;
- int i;
- int j;
-
- svc->sv_shuttingdown = 0;
-
- svc->sv_cpt_data = cfs_percpt_alloc(lnet_cpt_table(),
- sizeof(struct srpc_service_cd));
- if (svc->sv_cpt_data == NULL)
- return -ENOMEM;
-
- svc->sv_ncpts = srpc_serv_is_framework(svc) ?
- 1 : cfs_cpt_number(lnet_cpt_table());
- nrpcs = srpc_service_nrpcs(svc);
-
- cfs_percpt_for_each(scd, i, svc->sv_cpt_data) {
- scd->scd_cpt = i;
- scd->scd_svc = svc;
- spin_lock_init(&scd->scd_lock);
- INIT_LIST_HEAD(&scd->scd_rpc_free);
- INIT_LIST_HEAD(&scd->scd_rpc_active);
- INIT_LIST_HEAD(&scd->scd_buf_posted);
- INIT_LIST_HEAD(&scd->scd_buf_blocked);
-
- scd->scd_ev.ev_data = scd;
- scd->scd_ev.ev_type = SRPC_REQUEST_RCVD;
-
- /* NB: don't use lst_sched_serial for adding buffer,
- * see details in srpc_service_add_buffers() */
- swi_init_workitem(&scd->scd_buf_wi, scd,
- srpc_add_buffer, lst_sched_test[i]);
-
- if (i != 0 && srpc_serv_is_framework(svc)) {
- /* NB: framework service only needs srpc_service_cd for
- * one partition, but we allocate for all to make
- * it easier to implement, it will waste a little
- * memory but nobody should care about this */
- continue;
- }
-
- for (j = 0; j < nrpcs; j++) {
- LIBCFS_CPT_ALLOC(rpc, lnet_cpt_table(),
- i, sizeof(*rpc));
- if (rpc == NULL) {
- srpc_service_fini(svc);
- return -ENOMEM;
- }
- list_add(&rpc->srpc_list, &scd->scd_rpc_free);
- }
- }
-
- return 0;
-}
-
-int
-srpc_add_service(struct srpc_service *sv)
-{
- int id = sv->sv_id;
-
- LASSERT(0 <= id && id <= SRPC_SERVICE_MAX_ID);
-
- if (srpc_service_init(sv) != 0)
- return -ENOMEM;
-
- spin_lock(&srpc_data.rpc_glock);
-
- LASSERT(srpc_data.rpc_state == SRPC_STATE_RUNNING);
-
- if (srpc_data.rpc_services[id] != NULL) {
- spin_unlock(&srpc_data.rpc_glock);
- goto failed;
- }
-
- srpc_data.rpc_services[id] = sv;
- spin_unlock(&srpc_data.rpc_glock);
-
- CDEBUG(D_NET, "Adding service: id %d, name %s\n", id, sv->sv_name);
- return 0;
-
- failed:
- srpc_service_fini(sv);
- return -EBUSY;
-}
-
-int
-srpc_remove_service(srpc_service_t *sv)
-{
- int id = sv->sv_id;
-
- spin_lock(&srpc_data.rpc_glock);
-
- if (srpc_data.rpc_services[id] != sv) {
- spin_unlock(&srpc_data.rpc_glock);
- return -ENOENT;
- }
-
- srpc_data.rpc_services[id] = NULL;
- spin_unlock(&srpc_data.rpc_glock);
- return 0;
-}
-
-static int
-srpc_post_passive_rdma(int portal, int local, __u64 matchbits, void *buf,
- int len, int options, lnet_process_id_t peer,
- lnet_handle_md_t *mdh, srpc_event_t *ev)
-{
- int rc;
- lnet_md_t md;
- lnet_handle_me_t meh;
-
- rc = LNetMEAttach(portal, peer, matchbits, 0, LNET_UNLINK,
- local ? LNET_INS_LOCAL : LNET_INS_AFTER, &meh);
- if (rc != 0) {
- CERROR("LNetMEAttach failed: %d\n", rc);
- LASSERT(rc == -ENOMEM);
- return -ENOMEM;
- }
-
- md.threshold = 1;
- md.user_ptr = ev;
- md.start = buf;
- md.length = len;
- md.options = options;
- md.eq_handle = srpc_data.rpc_lnet_eq;
-
- rc = LNetMDAttach(meh, md, LNET_UNLINK, mdh);
- if (rc != 0) {
- CERROR("LNetMDAttach failed: %d\n", rc);
- LASSERT(rc == -ENOMEM);
-
- rc = LNetMEUnlink(meh);
- LASSERT(rc == 0);
- return -ENOMEM;
- }
-
- CDEBUG(D_NET,
- "Posted passive RDMA: peer %s, portal %d, matchbits %#llx\n",
- libcfs_id2str(peer), portal, matchbits);
- return 0;
-}
-
-static int
-srpc_post_active_rdma(int portal, __u64 matchbits, void *buf, int len,
- int options, lnet_process_id_t peer, lnet_nid_t self,
- lnet_handle_md_t *mdh, srpc_event_t *ev)
-{
- int rc;
- lnet_md_t md;
-
- md.user_ptr = ev;
- md.start = buf;
- md.length = len;
- md.eq_handle = srpc_data.rpc_lnet_eq;
- md.threshold = ((options & LNET_MD_OP_GET) != 0) ? 2 : 1;
- md.options = options & ~(LNET_MD_OP_PUT | LNET_MD_OP_GET);
-
- rc = LNetMDBind(md, LNET_UNLINK, mdh);
- if (rc != 0) {
- CERROR("LNetMDBind failed: %d\n", rc);
- LASSERT(rc == -ENOMEM);
- return -ENOMEM;
- }
-
- /* this is kind of an abuse of the LNET_MD_OP_{PUT,GET} options.
- * they're only meaningful for MDs attached to an ME (i.e. passive
- * buffers... */
- if ((options & LNET_MD_OP_PUT) != 0) {
- rc = LNetPut(self, *mdh, LNET_NOACK_REQ, peer,
- portal, matchbits, 0, 0);
- } else {
- LASSERT((options & LNET_MD_OP_GET) != 0);
-
- rc = LNetGet(self, *mdh, peer, portal, matchbits, 0);
- }
-
- if (rc != 0) {
- CERROR("LNet%s(%s, %d, %lld) failed: %d\n",
- ((options & LNET_MD_OP_PUT) != 0) ? "Put" : "Get",
- libcfs_id2str(peer), portal, matchbits, rc);
-
- /* The forthcoming unlink event will complete this operation
- * with failure, so fall through and return success here.
- */
- rc = LNetMDUnlink(*mdh);
- LASSERT(rc == 0);
- } else {
- CDEBUG(D_NET,
- "Posted active RDMA: peer %s, portal %u, matchbits %#llx\n",
- libcfs_id2str(peer), portal, matchbits);
- }
- return 0;
-}
-
-static int
-srpc_post_active_rqtbuf(lnet_process_id_t peer, int service, void *buf,
- int len, lnet_handle_md_t *mdh, srpc_event_t *ev)
-{
- return srpc_post_active_rdma(srpc_serv_portal(service), service,
- buf, len, LNET_MD_OP_PUT, peer,
- LNET_NID_ANY, mdh, ev);
-}
-
-static int
-srpc_post_passive_rqtbuf(int service, int local, void *buf, int len,
- lnet_handle_md_t *mdh, srpc_event_t *ev)
-{
- lnet_process_id_t any = {0};
-
- any.nid = LNET_NID_ANY;
- any.pid = LNET_PID_ANY;
-
- return srpc_post_passive_rdma(srpc_serv_portal(service),
- local, service, buf, len,
- LNET_MD_OP_PUT, any, mdh, ev);
-}
-
-static int
-srpc_service_post_buffer(struct srpc_service_cd *scd, struct srpc_buffer *buf)
- __must_hold(&scd->scd_lock)
-{
- struct srpc_service *sv = scd->scd_svc;
- struct srpc_msg *msg = &buf->buf_msg;
- int rc;
-
- LNetInvalidateHandle(&buf->buf_mdh);
- list_add(&buf->buf_list, &scd->scd_buf_posted);
- scd->scd_buf_nposted++;
- spin_unlock(&scd->scd_lock);
-
- rc = srpc_post_passive_rqtbuf(sv->sv_id,
- !srpc_serv_is_framework(sv),
- msg, sizeof(*msg), &buf->buf_mdh,
- &scd->scd_ev);
-
- /* At this point, a RPC (new or delayed) may have arrived in
- * msg and its event handler has been called. So we must add
- * buf to scd_buf_posted _before_ dropping scd_lock */
-
- spin_lock(&scd->scd_lock);
-
- if (rc == 0) {
- if (!sv->sv_shuttingdown)
- return 0;
-
- spin_unlock(&scd->scd_lock);
- /* srpc_shutdown_service might have tried to unlink me
- * when my buf_mdh was still invalid */
- LNetMDUnlink(buf->buf_mdh);
- spin_lock(&scd->scd_lock);
- return 0;
- }
-
- scd->scd_buf_nposted--;
- if (sv->sv_shuttingdown)
- return rc; /* don't allow to change scd_buf_posted */
-
- list_del(&buf->buf_list);
- spin_unlock(&scd->scd_lock);
-
- LIBCFS_FREE(buf, sizeof(*buf));
-
- spin_lock(&scd->scd_lock);
- return rc;
-}
-
-int
-srpc_add_buffer(struct swi_workitem *wi)
-{
- struct srpc_service_cd *scd = wi->swi_workitem.wi_data;
- struct srpc_buffer *buf;
- int rc = 0;
-
- /* it's called by workitem scheduler threads, these threads
- * should have been set CPT affinity, so buffers will be posted
- * on CPT local list of Portal */
- spin_lock(&scd->scd_lock);
-
- while (scd->scd_buf_adjust > 0 &&
- !scd->scd_svc->sv_shuttingdown) {
- scd->scd_buf_adjust--; /* consume it */
- scd->scd_buf_posting++;
-
- spin_unlock(&scd->scd_lock);
-
- LIBCFS_ALLOC(buf, sizeof(*buf));
- if (buf == NULL) {
- CERROR("Failed to add new buf to service: %s\n",
- scd->scd_svc->sv_name);
- spin_lock(&scd->scd_lock);
- rc = -ENOMEM;
- break;
- }
-
- spin_lock(&scd->scd_lock);
- if (scd->scd_svc->sv_shuttingdown) {
- spin_unlock(&scd->scd_lock);
- LIBCFS_FREE(buf, sizeof(*buf));
-
- spin_lock(&scd->scd_lock);
- rc = -ESHUTDOWN;
- break;
- }
-
- rc = srpc_service_post_buffer(scd, buf);
- if (rc != 0)
- break; /* buf has been freed inside */
-
- LASSERT(scd->scd_buf_posting > 0);
- scd->scd_buf_posting--;
- scd->scd_buf_total++;
- scd->scd_buf_low = max(2, scd->scd_buf_total / 4);
- }
-
- if (rc != 0) {
- scd->scd_buf_err_stamp = ktime_get_real_seconds();
- scd->scd_buf_err = rc;
-
- LASSERT(scd->scd_buf_posting > 0);
- scd->scd_buf_posting--;
- }
-
- spin_unlock(&scd->scd_lock);
- return 0;
-}
-
-int
-srpc_service_add_buffers(struct srpc_service *sv, int nbuffer)
-{
- struct srpc_service_cd *scd;
- int rc = 0;
- int i;
-
- LASSERTF(nbuffer > 0, "nbuffer must be positive: %d\n", nbuffer);
-
- cfs_percpt_for_each(scd, i, sv->sv_cpt_data) {
- spin_lock(&scd->scd_lock);
-
- scd->scd_buf_err = 0;
- scd->scd_buf_err_stamp = 0;
- scd->scd_buf_posting = 0;
- scd->scd_buf_adjust = nbuffer;
- /* start to post buffers */
- swi_schedule_workitem(&scd->scd_buf_wi);
- spin_unlock(&scd->scd_lock);
-
- /* framework service only post buffer for one partition */
- if (srpc_serv_is_framework(sv))
- break;
- }
-
- cfs_percpt_for_each(scd, i, sv->sv_cpt_data) {
- spin_lock(&scd->scd_lock);
- /*
- * NB: srpc_service_add_buffers() can be called inside
- * thread context of lst_sched_serial, and we don't normally
- * allow to sleep inside thread context of WI scheduler
- * because it will block current scheduler thread from doing
- * anything else, even worse, it could deadlock if it's
- * waiting on result from another WI of the same scheduler.
- * However, it's safe at here because scd_buf_wi is scheduled
- * by thread in a different WI scheduler (lst_sched_test),
- * so we don't have any risk of deadlock, though this could
- * block all WIs pending on lst_sched_serial for a moment
- * which is not good but not fatal.
- */
- lst_wait_until(scd->scd_buf_err != 0 ||
- (scd->scd_buf_adjust == 0 &&
- scd->scd_buf_posting == 0),
- scd->scd_lock, "waiting for adding buffer\n");
-
- if (scd->scd_buf_err != 0 && rc == 0)
- rc = scd->scd_buf_err;
-
- spin_unlock(&scd->scd_lock);
- }
-
- return rc;
-}
-
-void
-srpc_service_remove_buffers(struct srpc_service *sv, int nbuffer)
-{
- struct srpc_service_cd *scd;
- int num;
- int i;
-
- LASSERT(!sv->sv_shuttingdown);
-
- cfs_percpt_for_each(scd, i, sv->sv_cpt_data) {
- spin_lock(&scd->scd_lock);
-
- num = scd->scd_buf_total + scd->scd_buf_posting;
- scd->scd_buf_adjust -= min(nbuffer, num);
-
- spin_unlock(&scd->scd_lock);
- }
-}
-
-/* returns 1 if sv has finished, otherwise 0 */
-int
-srpc_finish_service(struct srpc_service *sv)
-{
- struct srpc_service_cd *scd;
- struct srpc_server_rpc *rpc;
- int i;
-
- LASSERT(sv->sv_shuttingdown); /* srpc_shutdown_service called */
-
- cfs_percpt_for_each(scd, i, sv->sv_cpt_data) {
- spin_lock(&scd->scd_lock);
- if (!swi_deschedule_workitem(&scd->scd_buf_wi)) {
- spin_unlock(&scd->scd_lock);
- return 0;
- }
-
- if (scd->scd_buf_nposted > 0) {
- CDEBUG(D_NET, "waiting for %d posted buffers to unlink",
- scd->scd_buf_nposted);
- spin_unlock(&scd->scd_lock);
- return 0;
- }
-
- if (list_empty(&scd->scd_rpc_active)) {
- spin_unlock(&scd->scd_lock);
- continue;
- }
-
- rpc = list_entry(scd->scd_rpc_active.next,
- struct srpc_server_rpc, srpc_list);
- CNETERR("Active RPC %p on shutdown: sv %s, peer %s, wi %s scheduled %d running %d, ev fired %d type %d status %d lnet %d\n",
- rpc, sv->sv_name, libcfs_id2str(rpc->srpc_peer),
- swi_state2str(rpc->srpc_wi.swi_state),
- rpc->srpc_wi.swi_workitem.wi_scheduled,
- rpc->srpc_wi.swi_workitem.wi_running,
- rpc->srpc_ev.ev_fired, rpc->srpc_ev.ev_type,
- rpc->srpc_ev.ev_status, rpc->srpc_ev.ev_lnet);
- spin_unlock(&scd->scd_lock);
- return 0;
- }
-
- /* no lock needed from now on */
- srpc_service_fini(sv);
- return 1;
-}
-
-/* called with sv->sv_lock held */
-static void
-srpc_service_recycle_buffer(struct srpc_service_cd *scd, srpc_buffer_t *buf)
- __must_hold(&scd->scd_lock)
-{
- if (!scd->scd_svc->sv_shuttingdown && scd->scd_buf_adjust >= 0) {
- if (srpc_service_post_buffer(scd, buf) != 0) {
- CWARN("Failed to post %s buffer\n",
- scd->scd_svc->sv_name);
- }
- return;
- }
-
- /* service is shutting down, or we want to recycle some buffers */
- scd->scd_buf_total--;
-
- if (scd->scd_buf_adjust < 0) {
- scd->scd_buf_adjust++;
- if (scd->scd_buf_adjust < 0 &&
- scd->scd_buf_total == 0 && scd->scd_buf_posting == 0) {
- CDEBUG(D_INFO,
- "Try to recycle %d buffers but nothing left\n",
- scd->scd_buf_adjust);
- scd->scd_buf_adjust = 0;
- }
- }
-
- spin_unlock(&scd->scd_lock);
- LIBCFS_FREE(buf, sizeof(*buf));
- spin_lock(&scd->scd_lock);
-}
-
-void
-srpc_abort_service(struct srpc_service *sv)
-{
- struct srpc_service_cd *scd;
- struct srpc_server_rpc *rpc;
- int i;
-
- CDEBUG(D_NET, "Aborting service: id %d, name %s\n",
- sv->sv_id, sv->sv_name);
-
- cfs_percpt_for_each(scd, i, sv->sv_cpt_data) {
- spin_lock(&scd->scd_lock);
-
- /* schedule in-flight RPCs to notice the abort, NB:
- * racing with incoming RPCs; complete fix should make test
- * RPCs carry session ID in its headers */
- list_for_each_entry(rpc, &scd->scd_rpc_active, srpc_list) {
- rpc->srpc_aborted = 1;
- swi_schedule_workitem(&rpc->srpc_wi);
- }
-
- spin_unlock(&scd->scd_lock);
- }
-}
-
-void
-srpc_shutdown_service(srpc_service_t *sv)
-{
- struct srpc_service_cd *scd;
- struct srpc_server_rpc *rpc;
- srpc_buffer_t *buf;
- int i;
-
- CDEBUG(D_NET, "Shutting down service: id %d, name %s\n",
- sv->sv_id, sv->sv_name);
-
- cfs_percpt_for_each(scd, i, sv->sv_cpt_data)
- spin_lock(&scd->scd_lock);
-
- sv->sv_shuttingdown = 1; /* i.e. no new active RPC */
-
- cfs_percpt_for_each(scd, i, sv->sv_cpt_data)
- spin_unlock(&scd->scd_lock);
-
- cfs_percpt_for_each(scd, i, sv->sv_cpt_data) {
- spin_lock(&scd->scd_lock);
-
- /* schedule in-flight RPCs to notice the shutdown */
- list_for_each_entry(rpc, &scd->scd_rpc_active, srpc_list)
- swi_schedule_workitem(&rpc->srpc_wi);
-
- spin_unlock(&scd->scd_lock);
-
- /* OK to traverse scd_buf_posted without lock, since no one
- * touches scd_buf_posted now */
- list_for_each_entry(buf, &scd->scd_buf_posted, buf_list)
- LNetMDUnlink(buf->buf_mdh);
- }
-}
-
-static int
-srpc_send_request(srpc_client_rpc_t *rpc)
-{
- srpc_event_t *ev = &rpc->crpc_reqstev;
- int rc;
-
- ev->ev_fired = 0;
- ev->ev_data = rpc;
- ev->ev_type = SRPC_REQUEST_SENT;
-
- rc = srpc_post_active_rqtbuf(rpc->crpc_dest, rpc->crpc_service,
- &rpc->crpc_reqstmsg, sizeof(srpc_msg_t),
- &rpc->crpc_reqstmdh, ev);
- if (rc != 0) {
- LASSERT(rc == -ENOMEM);
- ev->ev_fired = 1; /* no more event expected */
- }
- return rc;
-}
-
-static int
-srpc_prepare_reply(srpc_client_rpc_t *rpc)
-{
- srpc_event_t *ev = &rpc->crpc_replyev;
- __u64 *id = &rpc->crpc_reqstmsg.msg_body.reqst.rpyid;
- int rc;
-
- ev->ev_fired = 0;
- ev->ev_data = rpc;
- ev->ev_type = SRPC_REPLY_RCVD;
-
- *id = srpc_next_id();
-
- rc = srpc_post_passive_rdma(SRPC_RDMA_PORTAL, 0, *id,
- &rpc->crpc_replymsg, sizeof(srpc_msg_t),
- LNET_MD_OP_PUT, rpc->crpc_dest,
- &rpc->crpc_replymdh, ev);
- if (rc != 0) {
- LASSERT(rc == -ENOMEM);
- ev->ev_fired = 1; /* no more event expected */
- }
- return rc;
-}
-
-static int
-srpc_prepare_bulk(srpc_client_rpc_t *rpc)
-{
- srpc_bulk_t *bk = &rpc->crpc_bulk;
- srpc_event_t *ev = &rpc->crpc_bulkev;
- __u64 *id = &rpc->crpc_reqstmsg.msg_body.reqst.bulkid;
- int rc;
- int opt;
-
- LASSERT(bk->bk_niov <= LNET_MAX_IOV);
-
- if (bk->bk_niov == 0)
- return 0; /* nothing to do */
-
- opt = bk->bk_sink ? LNET_MD_OP_PUT : LNET_MD_OP_GET;
- opt |= LNET_MD_KIOV;
-
- ev->ev_fired = 0;
- ev->ev_data = rpc;
- ev->ev_type = SRPC_BULK_REQ_RCVD;
-
- *id = srpc_next_id();
-
- rc = srpc_post_passive_rdma(SRPC_RDMA_PORTAL, 0, *id,
- &bk->bk_iovs[0], bk->bk_niov, opt,
- rpc->crpc_dest, &bk->bk_mdh, ev);
- if (rc != 0) {
- LASSERT(rc == -ENOMEM);
- ev->ev_fired = 1; /* no more event expected */
- }
- return rc;
-}
-
-static int
-srpc_do_bulk(srpc_server_rpc_t *rpc)
-{
- srpc_event_t *ev = &rpc->srpc_ev;
- srpc_bulk_t *bk = rpc->srpc_bulk;
- __u64 id = rpc->srpc_reqstbuf->buf_msg.msg_body.reqst.bulkid;
- int rc;
- int opt;
-
- LASSERT(bk != NULL);
-
- opt = bk->bk_sink ? LNET_MD_OP_GET : LNET_MD_OP_PUT;
- opt |= LNET_MD_KIOV;
-
- ev->ev_fired = 0;
- ev->ev_data = rpc;
- ev->ev_type = bk->bk_sink ? SRPC_BULK_GET_RPLD : SRPC_BULK_PUT_SENT;
-
- rc = srpc_post_active_rdma(SRPC_RDMA_PORTAL, id,
- &bk->bk_iovs[0], bk->bk_niov, opt,
- rpc->srpc_peer, rpc->srpc_self,
- &bk->bk_mdh, ev);
- if (rc != 0)
- ev->ev_fired = 1; /* no more event expected */
- return rc;
-}
-
-/* only called from srpc_handle_rpc */
-static void
-srpc_server_rpc_done(srpc_server_rpc_t *rpc, int status)
-{
- struct srpc_service_cd *scd = rpc->srpc_scd;
- struct srpc_service *sv = scd->scd_svc;
- srpc_buffer_t *buffer;
-
- LASSERT(status != 0 || rpc->srpc_wi.swi_state == SWI_STATE_DONE);
-
- rpc->srpc_status = status;
-
- CDEBUG_LIMIT(status == 0 ? D_NET : D_NETERROR,
- "Server RPC %p done: service %s, peer %s, status %s:%d\n",
- rpc, sv->sv_name, libcfs_id2str(rpc->srpc_peer),
- swi_state2str(rpc->srpc_wi.swi_state), status);
-
- if (status != 0) {
- spin_lock(&srpc_data.rpc_glock);
- srpc_data.rpc_counters.rpcs_dropped++;
- spin_unlock(&srpc_data.rpc_glock);
- }
-
- if (rpc->srpc_done != NULL)
- (*rpc->srpc_done) (rpc);
- LASSERT(rpc->srpc_bulk == NULL);
-
- spin_lock(&scd->scd_lock);
-
- if (rpc->srpc_reqstbuf != NULL) {
- /* NB might drop sv_lock in srpc_service_recycle_buffer, but
- * sv won't go away for scd_rpc_active must not be empty */
- srpc_service_recycle_buffer(scd, rpc->srpc_reqstbuf);
- rpc->srpc_reqstbuf = NULL;
- }
-
- list_del(&rpc->srpc_list); /* from scd->scd_rpc_active */
-
- /*
- * No one can schedule me now since:
- * - I'm not on scd_rpc_active.
- * - all LNet events have been fired.
- * Cancel pending schedules and prevent future schedule attempts:
- */
- LASSERT(rpc->srpc_ev.ev_fired);
- swi_exit_workitem(&rpc->srpc_wi);
-
- if (!sv->sv_shuttingdown && !list_empty(&scd->scd_buf_blocked)) {
- buffer = list_entry(scd->scd_buf_blocked.next,
- srpc_buffer_t, buf_list);
- list_del(&buffer->buf_list);
-
- srpc_init_server_rpc(rpc, scd, buffer);
- list_add_tail(&rpc->srpc_list, &scd->scd_rpc_active);
- swi_schedule_workitem(&rpc->srpc_wi);
- } else {
- list_add(&rpc->srpc_list, &scd->scd_rpc_free);
- }
-
- spin_unlock(&scd->scd_lock);
- return;
-}
-
-/* handles an incoming RPC */
-int
-srpc_handle_rpc(swi_workitem_t *wi)
-{
- struct srpc_server_rpc *rpc = wi->swi_workitem.wi_data;
- struct srpc_service_cd *scd = rpc->srpc_scd;
- struct srpc_service *sv = scd->scd_svc;
- srpc_event_t *ev = &rpc->srpc_ev;
- int rc = 0;
-
- LASSERT(wi == &rpc->srpc_wi);
-
- spin_lock(&scd->scd_lock);
-
- if (sv->sv_shuttingdown || rpc->srpc_aborted) {
- spin_unlock(&scd->scd_lock);
-
- if (rpc->srpc_bulk != NULL)
- LNetMDUnlink(rpc->srpc_bulk->bk_mdh);
- LNetMDUnlink(rpc->srpc_replymdh);
-
- if (ev->ev_fired) { /* no more event, OK to finish */
- srpc_server_rpc_done(rpc, -ESHUTDOWN);
- return 1;
- }
- return 0;
- }
-
- spin_unlock(&scd->scd_lock);
-
- switch (wi->swi_state) {
- default:
- LBUG();
- case SWI_STATE_NEWBORN: {
- srpc_msg_t *msg;
- srpc_generic_reply_t *reply;
-
- msg = &rpc->srpc_reqstbuf->buf_msg;
- reply = &rpc->srpc_replymsg.msg_body.reply;
-
- if (msg->msg_magic == 0) {
- /* moaned already in srpc_lnet_ev_handler */
- srpc_server_rpc_done(rpc, EBADMSG);
- return 1;
- }
-
- srpc_unpack_msg_hdr(msg);
- if (msg->msg_version != SRPC_MSG_VERSION) {
- CWARN("Version mismatch: %u, %u expected, from %s\n",
- msg->msg_version, SRPC_MSG_VERSION,
- libcfs_id2str(rpc->srpc_peer));
- reply->status = EPROTO;
- /* drop through and send reply */
- } else {
- reply->status = 0;
- rc = (*sv->sv_handler)(rpc);
- LASSERT(reply->status == 0 || !rpc->srpc_bulk);
- if (rc != 0) {
- srpc_server_rpc_done(rpc, rc);
- return 1;
- }
- }
-
- wi->swi_state = SWI_STATE_BULK_STARTED;
-
- if (rpc->srpc_bulk != NULL) {
- rc = srpc_do_bulk(rpc);
- if (rc == 0)
- return 0; /* wait for bulk */
-
- LASSERT(ev->ev_fired);
- ev->ev_status = rc;
- }
- }
- case SWI_STATE_BULK_STARTED:
- LASSERT(rpc->srpc_bulk == NULL || ev->ev_fired);
-
- if (rpc->srpc_bulk != NULL) {
- rc = ev->ev_status;
-
- if (sv->sv_bulk_ready != NULL)
- rc = (*sv->sv_bulk_ready) (rpc, rc);
-
- if (rc != 0) {
- srpc_server_rpc_done(rpc, rc);
- return 1;
- }
- }
-
- wi->swi_state = SWI_STATE_REPLY_SUBMITTED;
- rc = srpc_send_reply(rpc);
- if (rc == 0)
- return 0; /* wait for reply */
- srpc_server_rpc_done(rpc, rc);
- return 1;
-
- case SWI_STATE_REPLY_SUBMITTED:
- if (!ev->ev_fired) {
- CERROR("RPC %p: bulk %p, service %d\n",
- rpc, rpc->srpc_bulk, sv->sv_id);
- CERROR("Event: status %d, type %d, lnet %d\n",
- ev->ev_status, ev->ev_type, ev->ev_lnet);
- LASSERT(ev->ev_fired);
- }
-
- wi->swi_state = SWI_STATE_DONE;
- srpc_server_rpc_done(rpc, ev->ev_status);
- return 1;
- }
-
- return 0;
-}
-
-static void
-srpc_client_rpc_expired(void *data)
-{
- srpc_client_rpc_t *rpc = data;
-
- CWARN("Client RPC expired: service %d, peer %s, timeout %d.\n",
- rpc->crpc_service, libcfs_id2str(rpc->crpc_dest),
- rpc->crpc_timeout);
-
- spin_lock(&rpc->crpc_lock);
-
- rpc->crpc_timeout = 0;
- srpc_abort_rpc(rpc, -ETIMEDOUT);
-
- spin_unlock(&rpc->crpc_lock);
-
- spin_lock(&srpc_data.rpc_glock);
- srpc_data.rpc_counters.rpcs_expired++;
- spin_unlock(&srpc_data.rpc_glock);
-}
-
-inline void
-srpc_add_client_rpc_timer(srpc_client_rpc_t *rpc)
-{
- stt_timer_t *timer = &rpc->crpc_timer;
-
- if (rpc->crpc_timeout == 0)
- return;
-
- INIT_LIST_HEAD(&timer->stt_list);
- timer->stt_data = rpc;
- timer->stt_func = srpc_client_rpc_expired;
- timer->stt_expires = ktime_get_real_seconds() + rpc->crpc_timeout;
- stt_add_timer(timer);
- return;
-}
-
-/*
- * Called with rpc->crpc_lock held.
- *
- * Upon exit the RPC expiry timer is not queued and the handler is not
- * running on any CPU. */
-static void
-srpc_del_client_rpc_timer(srpc_client_rpc_t *rpc)
-{
- /* timer not planted or already exploded */
- if (rpc->crpc_timeout == 0)
- return;
-
- /* timer successfully defused */
- if (stt_del_timer(&rpc->crpc_timer))
- return;
-
- /* timer detonated, wait for it to explode */
- while (rpc->crpc_timeout != 0) {
- spin_unlock(&rpc->crpc_lock);
-
- schedule();
-
- spin_lock(&rpc->crpc_lock);
- }
-}
-
-static void
-srpc_client_rpc_done(srpc_client_rpc_t *rpc, int status)
-{
- swi_workitem_t *wi = &rpc->crpc_wi;
-
- LASSERT(status != 0 || wi->swi_state == SWI_STATE_DONE);
-
- spin_lock(&rpc->crpc_lock);
-
- rpc->crpc_closed = 1;
- if (rpc->crpc_status == 0)
- rpc->crpc_status = status;
-
- srpc_del_client_rpc_timer(rpc);
-
- CDEBUG_LIMIT((status == 0) ? D_NET : D_NETERROR,
- "Client RPC done: service %d, peer %s, status %s:%d:%d\n",
- rpc->crpc_service, libcfs_id2str(rpc->crpc_dest),
- swi_state2str(wi->swi_state), rpc->crpc_aborted, status);
-
- /*
- * No one can schedule me now since:
- * - RPC timer has been defused.
- * - all LNet events have been fired.
- * - crpc_closed has been set, preventing srpc_abort_rpc from
- * scheduling me.
- * Cancel pending schedules and prevent future schedule attempts:
- */
- LASSERT(!srpc_event_pending(rpc));
- swi_exit_workitem(wi);
-
- spin_unlock(&rpc->crpc_lock);
-
- (*rpc->crpc_done)(rpc);
- return;
-}
-
-/* sends an outgoing RPC */
-int
-srpc_send_rpc(swi_workitem_t *wi)
-{
- int rc = 0;
- srpc_client_rpc_t *rpc;
- srpc_msg_t *reply;
- int do_bulk;
-
- LASSERT(wi != NULL);
-
- rpc = wi->swi_workitem.wi_data;
-
- LASSERT(rpc != NULL);
- LASSERT(wi == &rpc->crpc_wi);
-
- reply = &rpc->crpc_replymsg;
- do_bulk = rpc->crpc_bulk.bk_niov > 0;
-
- spin_lock(&rpc->crpc_lock);
-
- if (rpc->crpc_aborted) {
- spin_unlock(&rpc->crpc_lock);
- goto abort;
- }
-
- spin_unlock(&rpc->crpc_lock);
-
- switch (wi->swi_state) {
- default:
- LBUG();
- case SWI_STATE_NEWBORN:
- LASSERT(!srpc_event_pending(rpc));
-
- rc = srpc_prepare_reply(rpc);
- if (rc != 0) {
- srpc_client_rpc_done(rpc, rc);
- return 1;
- }
-
- rc = srpc_prepare_bulk(rpc);
- if (rc != 0)
- break;
-
- wi->swi_state = SWI_STATE_REQUEST_SUBMITTED;
- rc = srpc_send_request(rpc);
- break;
-
- case SWI_STATE_REQUEST_SUBMITTED:
- /* CAVEAT EMPTOR: rqtev, rpyev, and bulkev may come in any
- * order; however, they're processed in a strict order:
- * rqt, rpy, and bulk. */
- if (!rpc->crpc_reqstev.ev_fired)
- break;
-
- rc = rpc->crpc_reqstev.ev_status;
- if (rc != 0)
- break;
-
- wi->swi_state = SWI_STATE_REQUEST_SENT;
- /* perhaps more events, fall thru */
- case SWI_STATE_REQUEST_SENT: {
- srpc_msg_type_t type = srpc_service2reply(rpc->crpc_service);
-
- if (!rpc->crpc_replyev.ev_fired)
- break;
-
- rc = rpc->crpc_replyev.ev_status;
- if (rc != 0)
- break;
-
- srpc_unpack_msg_hdr(reply);
- if (reply->msg_type != type ||
- (reply->msg_magic != SRPC_MSG_MAGIC &&
- reply->msg_magic != __swab32(SRPC_MSG_MAGIC))) {
- CWARN("Bad message from %s: type %u (%d expected), magic %u (%d expected).\n",
- libcfs_id2str(rpc->crpc_dest),
- reply->msg_type, type,
- reply->msg_magic, SRPC_MSG_MAGIC);
- rc = -EBADMSG;
- break;
- }
-
- if (do_bulk && reply->msg_body.reply.status != 0) {
- CWARN("Remote error %d at %s, unlink bulk buffer in case peer didn't initiate bulk transfer\n",
- reply->msg_body.reply.status,
- libcfs_id2str(rpc->crpc_dest));
- LNetMDUnlink(rpc->crpc_bulk.bk_mdh);
- }
-
- wi->swi_state = SWI_STATE_REPLY_RECEIVED;
- }
- case SWI_STATE_REPLY_RECEIVED:
- if (do_bulk && !rpc->crpc_bulkev.ev_fired)
- break;
-
- rc = do_bulk ? rpc->crpc_bulkev.ev_status : 0;
-
- /* Bulk buffer was unlinked due to remote error. Clear error
- * since reply buffer still contains valid data.
- * NB rpc->crpc_done shouldn't look into bulk data in case of
- * remote error. */
- if (do_bulk && rpc->crpc_bulkev.ev_lnet == LNET_EVENT_UNLINK &&
- rpc->crpc_status == 0 && reply->msg_body.reply.status != 0)
- rc = 0;
-
- wi->swi_state = SWI_STATE_DONE;
- srpc_client_rpc_done(rpc, rc);
- return 1;
- }
-
- if (rc != 0) {
- spin_lock(&rpc->crpc_lock);
- srpc_abort_rpc(rpc, rc);
- spin_unlock(&rpc->crpc_lock);
- }
-
-abort:
- if (rpc->crpc_aborted) {
- LNetMDUnlink(rpc->crpc_reqstmdh);
- LNetMDUnlink(rpc->crpc_replymdh);
- LNetMDUnlink(rpc->crpc_bulk.bk_mdh);
-
- if (!srpc_event_pending(rpc)) {
- srpc_client_rpc_done(rpc, -EINTR);
- return 1;
- }
- }
- return 0;
-}
-
-srpc_client_rpc_t *
-srpc_create_client_rpc(lnet_process_id_t peer, int service,
- int nbulkiov, int bulklen,
- void (*rpc_done)(srpc_client_rpc_t *),
- void (*rpc_fini)(srpc_client_rpc_t *), void *priv)
-{
- srpc_client_rpc_t *rpc;
-
- LIBCFS_ALLOC(rpc, offsetof(srpc_client_rpc_t,
- crpc_bulk.bk_iovs[nbulkiov]));
- if (rpc == NULL)
- return NULL;
-
- srpc_init_client_rpc(rpc, peer, service, nbulkiov,
- bulklen, rpc_done, rpc_fini, priv);
- return rpc;
-}
-
-/* called with rpc->crpc_lock held */
-void
-srpc_abort_rpc(srpc_client_rpc_t *rpc, int why)
-{
- LASSERT(why != 0);
-
- if (rpc->crpc_aborted || /* already aborted */
- rpc->crpc_closed) /* callback imminent */
- return;
-
- CDEBUG(D_NET,
- "Aborting RPC: service %d, peer %s, state %s, why %d\n",
- rpc->crpc_service, libcfs_id2str(rpc->crpc_dest),
- swi_state2str(rpc->crpc_wi.swi_state), why);
-
- rpc->crpc_aborted = 1;
- rpc->crpc_status = why;
- swi_schedule_workitem(&rpc->crpc_wi);
- return;
-}
-
-/* called with rpc->crpc_lock held */
-void
-srpc_post_rpc(srpc_client_rpc_t *rpc)
-{
- LASSERT(!rpc->crpc_aborted);
- LASSERT(srpc_data.rpc_state == SRPC_STATE_RUNNING);
-
- CDEBUG(D_NET, "Posting RPC: peer %s, service %d, timeout %d\n",
- libcfs_id2str(rpc->crpc_dest), rpc->crpc_service,
- rpc->crpc_timeout);
-
- srpc_add_client_rpc_timer(rpc);
- swi_schedule_workitem(&rpc->crpc_wi);
- return;
-}
-
-
-int
-srpc_send_reply(struct srpc_server_rpc *rpc)
-{
- srpc_event_t *ev = &rpc->srpc_ev;
- struct srpc_msg *msg = &rpc->srpc_replymsg;
- struct srpc_buffer *buffer = rpc->srpc_reqstbuf;
- struct srpc_service_cd *scd = rpc->srpc_scd;
- struct srpc_service *sv = scd->scd_svc;
- __u64 rpyid;
- int rc;
-
- LASSERT(buffer != NULL);
- rpyid = buffer->buf_msg.msg_body.reqst.rpyid;
-
- spin_lock(&scd->scd_lock);
-
- if (!sv->sv_shuttingdown && !srpc_serv_is_framework(sv)) {
- /* Repost buffer before replying since test client
- * might send me another RPC once it gets the reply */
- if (srpc_service_post_buffer(scd, buffer) != 0)
- CWARN("Failed to repost %s buffer\n", sv->sv_name);
- rpc->srpc_reqstbuf = NULL;
- }
-
- spin_unlock(&scd->scd_lock);
-
- ev->ev_fired = 0;
- ev->ev_data = rpc;
- ev->ev_type = SRPC_REPLY_SENT;
-
- msg->msg_magic = SRPC_MSG_MAGIC;
- msg->msg_version = SRPC_MSG_VERSION;
- msg->msg_type = srpc_service2reply(sv->sv_id);
-
- rc = srpc_post_active_rdma(SRPC_RDMA_PORTAL, rpyid, msg,
- sizeof(*msg), LNET_MD_OP_PUT,
- rpc->srpc_peer, rpc->srpc_self,
- &rpc->srpc_replymdh, ev);
- if (rc != 0)
- ev->ev_fired = 1; /* no more event expected */
- return rc;
-}
-
-/* when in kernel always called with LNET_LOCK() held, and in thread context */
-static void
-srpc_lnet_ev_handler(lnet_event_t *ev)
-{
- struct srpc_service_cd *scd;
- srpc_event_t *rpcev = ev->md.user_ptr;
- srpc_client_rpc_t *crpc;
- srpc_server_rpc_t *srpc;
- srpc_buffer_t *buffer;
- srpc_service_t *sv;
- srpc_msg_t *msg;
- srpc_msg_type_t type;
-
- LASSERT(!in_interrupt());
-
- if (ev->status != 0) {
- spin_lock(&srpc_data.rpc_glock);
- srpc_data.rpc_counters.errors++;
- spin_unlock(&srpc_data.rpc_glock);
- }
-
- rpcev->ev_lnet = ev->type;
-
- switch (rpcev->ev_type) {
- default:
- CERROR("Unknown event: status %d, type %d, lnet %d\n",
- rpcev->ev_status, rpcev->ev_type, rpcev->ev_lnet);
- LBUG();
- case SRPC_REQUEST_SENT:
- if (ev->status == 0 && ev->type != LNET_EVENT_UNLINK) {
- spin_lock(&srpc_data.rpc_glock);
- srpc_data.rpc_counters.rpcs_sent++;
- spin_unlock(&srpc_data.rpc_glock);
- }
- case SRPC_REPLY_RCVD:
- case SRPC_BULK_REQ_RCVD:
- crpc = rpcev->ev_data;
-
- if (rpcev != &crpc->crpc_reqstev &&
- rpcev != &crpc->crpc_replyev &&
- rpcev != &crpc->crpc_bulkev) {
- CERROR("rpcev %p, crpc %p, reqstev %p, replyev %p, bulkev %p\n",
- rpcev, crpc, &crpc->crpc_reqstev,
- &crpc->crpc_replyev, &crpc->crpc_bulkev);
- CERROR("Bad event: status %d, type %d, lnet %d\n",
- rpcev->ev_status, rpcev->ev_type, rpcev->ev_lnet);
- LBUG();
- }
-
- spin_lock(&crpc->crpc_lock);
-
- LASSERT(rpcev->ev_fired == 0);
- rpcev->ev_fired = 1;
- rpcev->ev_status = (ev->type == LNET_EVENT_UNLINK) ?
- -EINTR : ev->status;
- swi_schedule_workitem(&crpc->crpc_wi);
-
- spin_unlock(&crpc->crpc_lock);
- break;
-
- case SRPC_REQUEST_RCVD:
- scd = rpcev->ev_data;
- sv = scd->scd_svc;
-
- LASSERT(rpcev == &scd->scd_ev);
-
- spin_lock(&scd->scd_lock);
-
- LASSERT(ev->unlinked);
- LASSERT(ev->type == LNET_EVENT_PUT ||
- ev->type == LNET_EVENT_UNLINK);
- LASSERT(ev->type != LNET_EVENT_UNLINK ||
- sv->sv_shuttingdown);
-
- buffer = container_of(ev->md.start, srpc_buffer_t, buf_msg);
- buffer->buf_peer = ev->initiator;
- buffer->buf_self = ev->target.nid;
-
- LASSERT(scd->scd_buf_nposted > 0);
- scd->scd_buf_nposted--;
-
- if (sv->sv_shuttingdown) {
- /* Leave buffer on scd->scd_buf_nposted since
- * srpc_finish_service needs to traverse it. */
- spin_unlock(&scd->scd_lock);
- break;
- }
-
- if (scd->scd_buf_err_stamp != 0 &&
- scd->scd_buf_err_stamp < ktime_get_real_seconds()) {
- /* re-enable adding buffer */
- scd->scd_buf_err_stamp = 0;
- scd->scd_buf_err = 0;
- }
-
- if (scd->scd_buf_err == 0 && /* adding buffer is enabled */
- scd->scd_buf_adjust == 0 &&
- scd->scd_buf_nposted < scd->scd_buf_low) {
- scd->scd_buf_adjust = max(scd->scd_buf_total / 2,
- SFW_TEST_WI_MIN);
- swi_schedule_workitem(&scd->scd_buf_wi);
- }
-
- list_del(&buffer->buf_list); /* from scd->scd_buf_posted */
- msg = &buffer->buf_msg;
- type = srpc_service2request(sv->sv_id);
-
- if (ev->status != 0 || ev->mlength != sizeof(*msg) ||
- (msg->msg_type != type &&
- msg->msg_type != __swab32(type)) ||
- (msg->msg_magic != SRPC_MSG_MAGIC &&
- msg->msg_magic != __swab32(SRPC_MSG_MAGIC))) {
- CERROR("Dropping RPC (%s) from %s: status %d mlength %d type %u magic %u.\n",
- sv->sv_name, libcfs_id2str(ev->initiator),
- ev->status, ev->mlength,
- msg->msg_type, msg->msg_magic);
-
- /* NB can't call srpc_service_recycle_buffer here since
- * it may call LNetM[DE]Attach. The invalid magic tells
- * srpc_handle_rpc to drop this RPC */
- msg->msg_magic = 0;
- }
-
- if (!list_empty(&scd->scd_rpc_free)) {
- srpc = list_entry(scd->scd_rpc_free.next,
- struct srpc_server_rpc,
- srpc_list);
- list_del(&srpc->srpc_list);
-
- srpc_init_server_rpc(srpc, scd, buffer);
- list_add_tail(&srpc->srpc_list,
- &scd->scd_rpc_active);
- swi_schedule_workitem(&srpc->srpc_wi);
- } else {
- list_add_tail(&buffer->buf_list,
- &scd->scd_buf_blocked);
- }
-
- spin_unlock(&scd->scd_lock);
-
- spin_lock(&srpc_data.rpc_glock);
- srpc_data.rpc_counters.rpcs_rcvd++;
- spin_unlock(&srpc_data.rpc_glock);
- break;
-
- case SRPC_BULK_GET_RPLD:
- LASSERT(ev->type == LNET_EVENT_SEND ||
- ev->type == LNET_EVENT_REPLY ||
- ev->type == LNET_EVENT_UNLINK);
-
- if (!ev->unlinked)
- break; /* wait for final event */
-
- case SRPC_BULK_PUT_SENT:
- if (ev->status == 0 && ev->type != LNET_EVENT_UNLINK) {
- spin_lock(&srpc_data.rpc_glock);
-
- if (rpcev->ev_type == SRPC_BULK_GET_RPLD)
- srpc_data.rpc_counters.bulk_get += ev->mlength;
- else
- srpc_data.rpc_counters.bulk_put += ev->mlength;
-
- spin_unlock(&srpc_data.rpc_glock);
- }
- case SRPC_REPLY_SENT:
- srpc = rpcev->ev_data;
- scd = srpc->srpc_scd;
-
- LASSERT(rpcev == &srpc->srpc_ev);
-
- spin_lock(&scd->scd_lock);
-
- rpcev->ev_fired = 1;
- rpcev->ev_status = (ev->type == LNET_EVENT_UNLINK) ?
- -EINTR : ev->status;
- swi_schedule_workitem(&srpc->srpc_wi);
-
- spin_unlock(&scd->scd_lock);
- break;
- }
-}
-
-
-int
-srpc_startup(void)
-{
- int rc;
-
- memset(&srpc_data, 0, sizeof(struct smoketest_rpc));
- spin_lock_init(&srpc_data.rpc_glock);
-
- /* 1 second pause to avoid timestamp reuse */
- set_current_state(TASK_UNINTERRUPTIBLE);
- schedule_timeout(cfs_time_seconds(1));
- srpc_data.rpc_matchbits = ((__u64)ktime_get_real_seconds()) << 48;
-
- srpc_data.rpc_state = SRPC_STATE_NONE;
-
- rc = LNetNIInit(LUSTRE_SRV_LNET_PID);
- if (rc < 0) {
- CERROR("LNetNIInit() has failed: %d\n", rc);
- return rc;
- }
-
- srpc_data.rpc_state = SRPC_STATE_NI_INIT;
-
- LNetInvalidateHandle(&srpc_data.rpc_lnet_eq);
- rc = LNetEQAlloc(0, srpc_lnet_ev_handler, &srpc_data.rpc_lnet_eq);
- if (rc != 0) {
- CERROR("LNetEQAlloc() has failed: %d\n", rc);
- goto bail;
- }
-
- rc = LNetSetLazyPortal(SRPC_FRAMEWORK_REQUEST_PORTAL);
- LASSERT(rc == 0);
- rc = LNetSetLazyPortal(SRPC_REQUEST_PORTAL);
- LASSERT(rc == 0);
-
- srpc_data.rpc_state = SRPC_STATE_EQ_INIT;
-
- rc = stt_startup();
-
-bail:
- if (rc != 0)
- srpc_shutdown();
- else
- srpc_data.rpc_state = SRPC_STATE_RUNNING;
-
- return rc;
-}
-
-void
-srpc_shutdown(void)
-{
- int i;
- int rc;
- int state;
-
- state = srpc_data.rpc_state;
- srpc_data.rpc_state = SRPC_STATE_STOPPING;
-
- switch (state) {
- default:
- LBUG();
- case SRPC_STATE_RUNNING:
- spin_lock(&srpc_data.rpc_glock);
-
- for (i = 0; i <= SRPC_SERVICE_MAX_ID; i++) {
- srpc_service_t *sv = srpc_data.rpc_services[i];
-
- LASSERTF(sv == NULL,
- "service not empty: id %d, name %s\n",
- i, sv->sv_name);
- }
-
- spin_unlock(&srpc_data.rpc_glock);
-
- stt_shutdown();
-
- case SRPC_STATE_EQ_INIT:
- rc = LNetClearLazyPortal(SRPC_FRAMEWORK_REQUEST_PORTAL);
- rc = LNetClearLazyPortal(SRPC_REQUEST_PORTAL);
- LASSERT(rc == 0);
- rc = LNetEQFree(srpc_data.rpc_lnet_eq);
- LASSERT(rc == 0); /* the EQ should have no user by now */
-
- case SRPC_STATE_NI_INIT:
- LNetNIFini();
- }
-
- return;
-}
diff --git a/drivers/staging/lustre/lnet/selftest/rpc.h b/drivers/staging/lustre/lnet/selftest/rpc.h
deleted file mode 100644
index b7b00c6b1004..000000000000
--- a/drivers/staging/lustre/lnet/selftest/rpc.h
+++ /dev/null
@@ -1,295 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- */
-
-#ifndef __SELFTEST_RPC_H__
-#define __SELFTEST_RPC_H__
-
-#include "../../include/linux/lnet/lnetst.h"
-
-/*
- * LST wired structures
- *
- * XXX: *REPLY == *REQST + 1
- */
-typedef enum {
- SRPC_MSG_MKSN_REQST = 0,
- SRPC_MSG_MKSN_REPLY = 1,
- SRPC_MSG_RMSN_REQST = 2,
- SRPC_MSG_RMSN_REPLY = 3,
- SRPC_MSG_BATCH_REQST = 4,
- SRPC_MSG_BATCH_REPLY = 5,
- SRPC_MSG_STAT_REQST = 6,
- SRPC_MSG_STAT_REPLY = 7,
- SRPC_MSG_TEST_REQST = 8,
- SRPC_MSG_TEST_REPLY = 9,
- SRPC_MSG_DEBUG_REQST = 10,
- SRPC_MSG_DEBUG_REPLY = 11,
- SRPC_MSG_BRW_REQST = 12,
- SRPC_MSG_BRW_REPLY = 13,
- SRPC_MSG_PING_REQST = 14,
- SRPC_MSG_PING_REPLY = 15,
- SRPC_MSG_JOIN_REQST = 16,
- SRPC_MSG_JOIN_REPLY = 17,
-} srpc_msg_type_t;
-
-
-/* CAVEAT EMPTOR:
- * All srpc_*_reqst_t's 1st field must be matchbits of reply buffer,
- * and 2nd field matchbits of bulk buffer if any.
- *
- * All srpc_*_reply_t's 1st field must be a __u32 status, and 2nd field
- * session id if needed.
- */
-typedef struct {
- __u64 rpyid; /* reply buffer matchbits */
- __u64 bulkid; /* bulk buffer matchbits */
-} WIRE_ATTR srpc_generic_reqst_t;
-
-typedef struct {
- __u32 status;
- lst_sid_t sid;
-} WIRE_ATTR srpc_generic_reply_t;
-
-/* FRAMEWORK RPCs */
-typedef struct {
- __u64 mksn_rpyid; /* reply buffer matchbits */
- lst_sid_t mksn_sid; /* session id */
- __u32 mksn_force; /* use brute force */
- char mksn_name[LST_NAME_SIZE];
-} WIRE_ATTR srpc_mksn_reqst_t; /* make session request */
-
-typedef struct {
- __u32 mksn_status; /* session status */
- lst_sid_t mksn_sid; /* session id */
- __u32 mksn_timeout; /* session timeout */
- char mksn_name[LST_NAME_SIZE];
-} WIRE_ATTR srpc_mksn_reply_t; /* make session reply */
-
-typedef struct {
- __u64 rmsn_rpyid; /* reply buffer matchbits */
- lst_sid_t rmsn_sid; /* session id */
-} WIRE_ATTR srpc_rmsn_reqst_t; /* remove session request */
-
-typedef struct {
- __u32 rmsn_status;
- lst_sid_t rmsn_sid; /* session id */
-} WIRE_ATTR srpc_rmsn_reply_t; /* remove session reply */
-
-typedef struct {
- __u64 join_rpyid; /* reply buffer matchbits */
- lst_sid_t join_sid; /* session id to join */
- char join_group[LST_NAME_SIZE]; /* group name */
-} WIRE_ATTR srpc_join_reqst_t;
-
-typedef struct {
- __u32 join_status; /* returned status */
- lst_sid_t join_sid; /* session id */
- __u32 join_timeout; /* # seconds' inactivity to
- * expire */
- char join_session[LST_NAME_SIZE]; /* session name */
-} WIRE_ATTR srpc_join_reply_t;
-
-typedef struct {
- __u64 dbg_rpyid; /* reply buffer matchbits */
- lst_sid_t dbg_sid; /* session id */
- __u32 dbg_flags; /* bitmap of debug */
-} WIRE_ATTR srpc_debug_reqst_t;
-
-typedef struct {
- __u32 dbg_status; /* returned code */
- lst_sid_t dbg_sid; /* session id */
- __u32 dbg_timeout; /* session timeout */
- __u32 dbg_nbatch; /* # of batches in the node */
- char dbg_name[LST_NAME_SIZE]; /* session name */
-} WIRE_ATTR srpc_debug_reply_t;
-
-#define SRPC_BATCH_OPC_RUN 1
-#define SRPC_BATCH_OPC_STOP 2
-#define SRPC_BATCH_OPC_QUERY 3
-
-typedef struct {
- __u64 bar_rpyid; /* reply buffer matchbits */
- lst_sid_t bar_sid; /* session id */
- lst_bid_t bar_bid; /* batch id */
- __u32 bar_opc; /* create/start/stop batch */
- __u32 bar_testidx; /* index of test */
- __u32 bar_arg; /* parameters */
-} WIRE_ATTR srpc_batch_reqst_t;
-
-typedef struct {
- __u32 bar_status; /* status of request */
- lst_sid_t bar_sid; /* session id */
- __u32 bar_active; /* # of active tests in batch/test */
- __u32 bar_time; /* remained time */
-} WIRE_ATTR srpc_batch_reply_t;
-
-typedef struct {
- __u64 str_rpyid; /* reply buffer matchbits */
- lst_sid_t str_sid; /* session id */
- __u32 str_type; /* type of stat */
-} WIRE_ATTR srpc_stat_reqst_t;
-
-typedef struct {
- __u32 str_status;
- lst_sid_t str_sid;
- sfw_counters_t str_fw;
- srpc_counters_t str_rpc;
- lnet_counters_t str_lnet;
-} WIRE_ATTR srpc_stat_reply_t;
-
-typedef struct {
- __u32 blk_opc; /* bulk operation code */
- __u32 blk_npg; /* # of pages */
- __u32 blk_flags; /* reserved flags */
-} WIRE_ATTR test_bulk_req_t;
-
-typedef struct {
- __u16 blk_opc; /* bulk operation code */
- __u16 blk_flags; /* data check flags */
- __u32 blk_len; /* data length */
- __u32 blk_offset; /* reserved: offset */
-} WIRE_ATTR test_bulk_req_v1_t;
-
-typedef struct {
- __u32 png_size; /* size of ping message */
- __u32 png_flags; /* reserved flags */
-} WIRE_ATTR test_ping_req_t;
-
-typedef struct {
- __u64 tsr_rpyid; /* reply buffer matchbits */
- __u64 tsr_bulkid; /* bulk buffer matchbits */
- lst_sid_t tsr_sid; /* session id */
- lst_bid_t tsr_bid; /* batch id */
- __u32 tsr_service; /* test type: bulk|ping|... */
- __u32 tsr_loop; /* test client loop count or
- * # server buffers needed */
- __u32 tsr_concur; /* concurrency of test */
- __u8 tsr_is_client; /* is test client or not */
- __u8 tsr_stop_onerr; /* stop on error */
- __u32 tsr_ndest; /* # of dest nodes */
-
- union {
- test_ping_req_t ping;
- test_bulk_req_t bulk_v0;
- test_bulk_req_v1_t bulk_v1;
- } tsr_u;
-} WIRE_ATTR srpc_test_reqst_t;
-
-typedef struct {
- __u32 tsr_status; /* returned code */
- lst_sid_t tsr_sid;
-} WIRE_ATTR srpc_test_reply_t;
-
-/* TEST RPCs */
-typedef struct {
- __u64 pnr_rpyid;
- __u32 pnr_magic;
- __u32 pnr_seq;
- __u64 pnr_time_sec;
- __u64 pnr_time_usec;
-} WIRE_ATTR srpc_ping_reqst_t;
-
-typedef struct {
- __u32 pnr_status;
- __u32 pnr_magic;
- __u32 pnr_seq;
-} WIRE_ATTR srpc_ping_reply_t;
-
-typedef struct {
- __u64 brw_rpyid; /* reply buffer matchbits */
- __u64 brw_bulkid; /* bulk buffer matchbits */
- __u32 brw_rw; /* read or write */
- __u32 brw_len; /* bulk data len */
- __u32 brw_flags; /* bulk data patterns */
-} WIRE_ATTR srpc_brw_reqst_t; /* bulk r/w request */
-
-typedef struct {
- __u32 brw_status;
-} WIRE_ATTR srpc_brw_reply_t; /* bulk r/w reply */
-
-#define SRPC_MSG_MAGIC 0xeeb0f00d
-#define SRPC_MSG_VERSION 1
-
-typedef struct srpc_msg {
- __u32 msg_magic; /* magic number */
- __u32 msg_version; /* message version number */
- __u32 msg_type; /* type of message body: srpc_msg_type_t */
- __u32 msg_reserved0;
- __u32 msg_reserved1;
- __u32 msg_ses_feats; /* test session features */
- union {
- srpc_generic_reqst_t reqst;
- srpc_generic_reply_t reply;
-
- srpc_mksn_reqst_t mksn_reqst;
- srpc_mksn_reply_t mksn_reply;
- srpc_rmsn_reqst_t rmsn_reqst;
- srpc_rmsn_reply_t rmsn_reply;
- srpc_debug_reqst_t dbg_reqst;
- srpc_debug_reply_t dbg_reply;
- srpc_batch_reqst_t bat_reqst;
- srpc_batch_reply_t bat_reply;
- srpc_stat_reqst_t stat_reqst;
- srpc_stat_reply_t stat_reply;
- srpc_test_reqst_t tes_reqst;
- srpc_test_reply_t tes_reply;
- srpc_join_reqst_t join_reqst;
- srpc_join_reply_t join_reply;
-
- srpc_ping_reqst_t ping_reqst;
- srpc_ping_reply_t ping_reply;
- srpc_brw_reqst_t brw_reqst;
- srpc_brw_reply_t brw_reply;
- } msg_body;
-} WIRE_ATTR srpc_msg_t;
-
-static inline void
-srpc_unpack_msg_hdr(srpc_msg_t *msg)
-{
- if (msg->msg_magic == SRPC_MSG_MAGIC)
- return; /* no flipping needed */
-
- /* We do not swap the magic number here as it is needed to
- determine whether the body needs to be swapped. */
- /* __swab32s(&msg->msg_magic); */
- __swab32s(&msg->msg_type);
- __swab32s(&msg->msg_version);
- __swab32s(&msg->msg_ses_feats);
- __swab32s(&msg->msg_reserved0);
- __swab32s(&msg->msg_reserved1);
-}
-
-#endif /* __SELFTEST_RPC_H__ */
diff --git a/drivers/staging/lustre/lnet/selftest/selftest.h b/drivers/staging/lustre/lnet/selftest/selftest.h
deleted file mode 100644
index 48322539a3a3..000000000000
--- a/drivers/staging/lustre/lnet/selftest/selftest.h
+++ /dev/null
@@ -1,629 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- * copy of GPLv2].
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * lnet/selftest/selftest.h
- *
- * Author: Isaac Huang <isaac@clusterfs.com>
- */
-#ifndef __SELFTEST_SELFTEST_H__
-#define __SELFTEST_SELFTEST_H__
-
-#define LNET_ONLY
-
-#include "../../include/linux/libcfs/libcfs.h"
-#include "../../include/linux/lnet/lnet.h"
-#include "../../include/linux/lnet/lib-lnet.h"
-#include "../../include/linux/lnet/lib-types.h"
-#include "../../include/linux/lnet/lnetst.h"
-
-#include "rpc.h"
-#include "timer.h"
-
-#ifndef MADE_WITHOUT_COMPROMISE
-#define MADE_WITHOUT_COMPROMISE
-#endif
-
-
-#define SWI_STATE_NEWBORN 0
-#define SWI_STATE_REPLY_SUBMITTED 1
-#define SWI_STATE_REPLY_SENT 2
-#define SWI_STATE_REQUEST_SUBMITTED 3
-#define SWI_STATE_REQUEST_SENT 4
-#define SWI_STATE_REPLY_RECEIVED 5
-#define SWI_STATE_BULK_STARTED 6
-#define SWI_STATE_DONE 10
-
-/* forward refs */
-struct srpc_service;
-struct srpc_service_cd;
-struct sfw_test_unit;
-struct sfw_test_instance;
-
-/* services below SRPC_FRAMEWORK_SERVICE_MAX_ID are framework
- * services, e.g. create/modify session.
- */
-#define SRPC_SERVICE_DEBUG 0
-#define SRPC_SERVICE_MAKE_SESSION 1
-#define SRPC_SERVICE_REMOVE_SESSION 2
-#define SRPC_SERVICE_BATCH 3
-#define SRPC_SERVICE_TEST 4
-#define SRPC_SERVICE_QUERY_STAT 5
-#define SRPC_SERVICE_JOIN 6
-#define SRPC_FRAMEWORK_SERVICE_MAX_ID 10
-/* other services start from SRPC_FRAMEWORK_SERVICE_MAX_ID+1 */
-#define SRPC_SERVICE_BRW 11
-#define SRPC_SERVICE_PING 12
-#define SRPC_SERVICE_MAX_ID 12
-
-#define SRPC_REQUEST_PORTAL 50
-/* a lazy portal for framework RPC requests */
-#define SRPC_FRAMEWORK_REQUEST_PORTAL 51
-/* all reply/bulk RDMAs go to this portal */
-#define SRPC_RDMA_PORTAL 52
-
-static inline srpc_msg_type_t
-srpc_service2request (int service)
-{
- switch (service) {
- default:
- LBUG ();
- case SRPC_SERVICE_DEBUG:
- return SRPC_MSG_DEBUG_REQST;
-
- case SRPC_SERVICE_MAKE_SESSION:
- return SRPC_MSG_MKSN_REQST;
-
- case SRPC_SERVICE_REMOVE_SESSION:
- return SRPC_MSG_RMSN_REQST;
-
- case SRPC_SERVICE_BATCH:
- return SRPC_MSG_BATCH_REQST;
-
- case SRPC_SERVICE_TEST:
- return SRPC_MSG_TEST_REQST;
-
- case SRPC_SERVICE_QUERY_STAT:
- return SRPC_MSG_STAT_REQST;
-
- case SRPC_SERVICE_BRW:
- return SRPC_MSG_BRW_REQST;
-
- case SRPC_SERVICE_PING:
- return SRPC_MSG_PING_REQST;
-
- case SRPC_SERVICE_JOIN:
- return SRPC_MSG_JOIN_REQST;
- }
-}
-
-static inline srpc_msg_type_t
-srpc_service2reply (int service)
-{
- return srpc_service2request(service) + 1;
-}
-
-typedef enum {
- SRPC_BULK_REQ_RCVD = 1, /* passive bulk request(PUT sink/GET source)
- * received */
- SRPC_BULK_PUT_SENT = 2, /* active bulk PUT sent (source) */
- SRPC_BULK_GET_RPLD = 3, /* active bulk GET replied (sink) */
- SRPC_REPLY_RCVD = 4, /* incoming reply received */
- SRPC_REPLY_SENT = 5, /* outgoing reply sent */
- SRPC_REQUEST_RCVD = 6, /* incoming request received */
- SRPC_REQUEST_SENT = 7, /* outgoing request sent */
-} srpc_event_type_t;
-
-/* RPC event */
-typedef struct {
- srpc_event_type_t ev_type; /* what's up */
- lnet_event_kind_t ev_lnet; /* LNet event type */
- int ev_fired; /* LNet event fired? */
- int ev_status; /* LNet event status */
- void *ev_data; /* owning server/client RPC */
-} srpc_event_t;
-
-typedef struct {
- int bk_len; /* len of bulk data */
- lnet_handle_md_t bk_mdh;
- int bk_sink; /* sink/source */
- int bk_niov; /* # iov in bk_iovs */
- lnet_kiov_t bk_iovs[0];
-} srpc_bulk_t; /* bulk descriptor */
-
-/* message buffer descriptor */
-typedef struct srpc_buffer {
- struct list_head buf_list; /* chain on srpc_service::*_msgq */
- srpc_msg_t buf_msg;
- lnet_handle_md_t buf_mdh;
- lnet_nid_t buf_self;
- lnet_process_id_t buf_peer;
-} srpc_buffer_t;
-
-struct swi_workitem;
-typedef int (*swi_action_t) (struct swi_workitem *);
-
-typedef struct swi_workitem {
- struct cfs_wi_sched *swi_sched;
- cfs_workitem_t swi_workitem;
- swi_action_t swi_action;
- int swi_state;
-} swi_workitem_t;
-
-/* server-side state of a RPC */
-typedef struct srpc_server_rpc {
- /* chain on srpc_service::*_rpcq */
- struct list_head srpc_list;
- struct srpc_service_cd *srpc_scd;
- swi_workitem_t srpc_wi;
- srpc_event_t srpc_ev; /* bulk/reply event */
- lnet_nid_t srpc_self;
- lnet_process_id_t srpc_peer;
- srpc_msg_t srpc_replymsg;
- lnet_handle_md_t srpc_replymdh;
- srpc_buffer_t *srpc_reqstbuf;
- srpc_bulk_t *srpc_bulk;
-
- unsigned int srpc_aborted; /* being given up */
- int srpc_status;
- void (*srpc_done)(struct srpc_server_rpc *);
-} srpc_server_rpc_t;
-
-/* client-side state of a RPC */
-typedef struct srpc_client_rpc {
- struct list_head crpc_list; /* chain on user's lists */
- spinlock_t crpc_lock; /* serialize */
- int crpc_service;
- atomic_t crpc_refcount;
- int crpc_timeout; /* # seconds to wait for reply */
- stt_timer_t crpc_timer;
- swi_workitem_t crpc_wi;
- lnet_process_id_t crpc_dest;
-
- void (*crpc_done)(struct srpc_client_rpc *);
- void (*crpc_fini)(struct srpc_client_rpc *);
- int crpc_status; /* completion status */
- void *crpc_priv; /* caller data */
-
- /* state flags */
- unsigned int crpc_aborted:1; /* being given up */
- unsigned int crpc_closed:1; /* completed */
-
- /* RPC events */
- srpc_event_t crpc_bulkev; /* bulk event */
- srpc_event_t crpc_reqstev; /* request event */
- srpc_event_t crpc_replyev; /* reply event */
-
- /* bulk, request(reqst), and reply exchanged on wire */
- srpc_msg_t crpc_reqstmsg;
- srpc_msg_t crpc_replymsg;
- lnet_handle_md_t crpc_reqstmdh;
- lnet_handle_md_t crpc_replymdh;
- srpc_bulk_t crpc_bulk;
-} srpc_client_rpc_t;
-
-#define srpc_client_rpc_size(rpc) \
-offsetof(srpc_client_rpc_t, crpc_bulk.bk_iovs[(rpc)->crpc_bulk.bk_niov])
-
-#define srpc_client_rpc_addref(rpc) \
-do { \
- CDEBUG(D_NET, "RPC[%p] -> %s (%d)++\n", \
- (rpc), libcfs_id2str((rpc)->crpc_dest), \
- atomic_read(&(rpc)->crpc_refcount)); \
- LASSERT(atomic_read(&(rpc)->crpc_refcount) > 0); \
- atomic_inc(&(rpc)->crpc_refcount); \
-} while (0)
-
-#define srpc_client_rpc_decref(rpc) \
-do { \
- CDEBUG(D_NET, "RPC[%p] -> %s (%d)--\n", \
- (rpc), libcfs_id2str((rpc)->crpc_dest), \
- atomic_read(&(rpc)->crpc_refcount)); \
- LASSERT(atomic_read(&(rpc)->crpc_refcount) > 0); \
- if (atomic_dec_and_test(&(rpc)->crpc_refcount)) \
- srpc_destroy_client_rpc(rpc); \
-} while (0)
-
-#define srpc_event_pending(rpc) ((rpc)->crpc_bulkev.ev_fired == 0 || \
- (rpc)->crpc_reqstev.ev_fired == 0 || \
- (rpc)->crpc_replyev.ev_fired == 0)
-
-/* CPU partition data of srpc service */
-struct srpc_service_cd {
- /** serialize */
- spinlock_t scd_lock;
- /** backref to service */
- struct srpc_service *scd_svc;
- /** event buffer */
- srpc_event_t scd_ev;
- /** free RPC descriptors */
- struct list_head scd_rpc_free;
- /** in-flight RPCs */
- struct list_head scd_rpc_active;
- /** workitem for posting buffer */
- swi_workitem_t scd_buf_wi;
- /** CPT id */
- int scd_cpt;
- /** error code for scd_buf_wi */
- int scd_buf_err;
- /** timestamp for scd_buf_err */
- time64_t scd_buf_err_stamp;
- /** total # request buffers */
- int scd_buf_total;
- /** # posted request buffers */
- int scd_buf_nposted;
- /** in progress of buffer posting */
- int scd_buf_posting;
- /** allocate more buffers if scd_buf_nposted < scd_buf_low */
- int scd_buf_low;
- /** increase/decrease some buffers */
- int scd_buf_adjust;
- /** posted message buffers */
- struct list_head scd_buf_posted;
- /** blocked for RPC descriptor */
- struct list_head scd_buf_blocked;
-};
-
-/* number of server workitems (mini-thread) for testing service */
-#define SFW_TEST_WI_MIN 256
-#define SFW_TEST_WI_MAX 2048
-/* extra buffers for tolerating buggy peers, or unbalanced number
- * of peers between partitions */
-#define SFW_TEST_WI_EXTRA 64
-
-/* number of server workitems (mini-thread) for framework service */
-#define SFW_FRWK_WI_MIN 16
-#define SFW_FRWK_WI_MAX 256
-
-typedef struct srpc_service {
- int sv_id; /* service id */
- const char *sv_name; /* human readable name */
- int sv_wi_total; /* total server workitems */
- int sv_shuttingdown;
- int sv_ncpts;
- /* percpt data for srpc_service */
- struct srpc_service_cd **sv_cpt_data;
- /* Service callbacks:
- * - sv_handler: process incoming RPC request
- * - sv_bulk_ready: notify bulk data
- */
- int (*sv_handler) (srpc_server_rpc_t *);
- int (*sv_bulk_ready) (srpc_server_rpc_t *, int);
-} srpc_service_t;
-
-typedef struct {
- struct list_head sn_list; /* chain on fw_zombie_sessions */
- lst_sid_t sn_id; /* unique identifier */
- unsigned int sn_timeout; /* # seconds' inactivity to expire */
- int sn_timer_active;
- unsigned int sn_features;
- stt_timer_t sn_timer;
- struct list_head sn_batches; /* list of batches */
- char sn_name[LST_NAME_SIZE];
- atomic_t sn_refcount;
- atomic_t sn_brw_errors;
- atomic_t sn_ping_errors;
- unsigned long sn_started;
-} sfw_session_t;
-
-#define sfw_sid_equal(sid0, sid1) ((sid0).ses_nid == (sid1).ses_nid && \
- (sid0).ses_stamp == (sid1).ses_stamp)
-
-typedef struct {
- struct list_head bat_list; /* chain on sn_batches */
- lst_bid_t bat_id; /* batch id */
- int bat_error; /* error code of batch */
- sfw_session_t *bat_session; /* batch's session */
- atomic_t bat_nactive; /* # of active tests */
- struct list_head bat_tests; /* test instances */
-} sfw_batch_t;
-
-typedef struct {
- int (*tso_init)(struct sfw_test_instance *tsi); /* initialize test
- * client */
- void (*tso_fini)(struct sfw_test_instance *tsi); /* finalize test
- * client */
- int (*tso_prep_rpc)(struct sfw_test_unit *tsu,
- lnet_process_id_t dest,
- srpc_client_rpc_t **rpc); /* prep a tests rpc */
- void (*tso_done_rpc)(struct sfw_test_unit *tsu,
- srpc_client_rpc_t *rpc); /* done a test rpc */
-} sfw_test_client_ops_t;
-
-typedef struct sfw_test_instance {
- struct list_head tsi_list; /* chain on batch */
- int tsi_service; /* test type */
- sfw_batch_t *tsi_batch; /* batch */
- sfw_test_client_ops_t *tsi_ops; /* test client operation
- */
-
- /* public parameter for all test units */
- unsigned int tsi_is_client:1; /* is test client */
- unsigned int tsi_stoptsu_onerr:1; /* stop tsu on error */
- int tsi_concur; /* concurrency */
- int tsi_loop; /* loop count */
-
- /* status of test instance */
- spinlock_t tsi_lock; /* serialize */
- unsigned int tsi_stopping:1; /* test is stopping */
- atomic_t tsi_nactive; /* # of active test
- * unit */
- struct list_head tsi_units; /* test units */
- struct list_head tsi_free_rpcs; /* free rpcs */
- struct list_head tsi_active_rpcs; /* active rpcs */
-
- union {
- test_ping_req_t ping; /* ping parameter */
- test_bulk_req_t bulk_v0; /* bulk parameter */
- test_bulk_req_v1_t bulk_v1; /* bulk v1 parameter */
- } tsi_u;
-} sfw_test_instance_t;
-
-/* XXX: trailing (PAGE_CACHE_SIZE % sizeof(lnet_process_id_t)) bytes at
- * the end of pages are not used */
-#define SFW_MAX_CONCUR LST_MAX_CONCUR
-#define SFW_ID_PER_PAGE (PAGE_CACHE_SIZE / sizeof(lnet_process_id_packed_t))
-#define SFW_MAX_NDESTS (LNET_MAX_IOV * SFW_ID_PER_PAGE)
-#define sfw_id_pages(n) (((n) + SFW_ID_PER_PAGE - 1) / SFW_ID_PER_PAGE)
-
-typedef struct sfw_test_unit {
- struct list_head tsu_list; /* chain on lst_test_instance */
- lnet_process_id_t tsu_dest; /* id of dest node */
- int tsu_loop; /* loop count of the test */
- sfw_test_instance_t *tsu_instance; /* pointer to test instance */
- void *tsu_private; /* private data */
- swi_workitem_t tsu_worker; /* workitem of the test unit */
-} sfw_test_unit_t;
-
-typedef struct sfw_test_case {
- struct list_head tsc_list; /* chain on fw_tests */
- srpc_service_t *tsc_srv_service; /* test service */
- sfw_test_client_ops_t *tsc_cli_ops; /* ops of test client */
-} sfw_test_case_t;
-
-srpc_client_rpc_t *
-sfw_create_rpc(lnet_process_id_t peer, int service,
- unsigned features, int nbulkiov, int bulklen,
- void (*done) (srpc_client_rpc_t *), void *priv);
-int sfw_create_test_rpc(sfw_test_unit_t *tsu,
- lnet_process_id_t peer, unsigned features,
- int nblk, int blklen, srpc_client_rpc_t **rpc);
-void sfw_abort_rpc(srpc_client_rpc_t *rpc);
-void sfw_post_rpc(srpc_client_rpc_t *rpc);
-void sfw_client_rpc_done(srpc_client_rpc_t *rpc);
-void sfw_unpack_message(srpc_msg_t *msg);
-void sfw_free_pages(srpc_server_rpc_t *rpc);
-void sfw_add_bulk_page(srpc_bulk_t *bk, struct page *pg, int i);
-int sfw_alloc_pages(srpc_server_rpc_t *rpc, int cpt, int npages, int len,
- int sink);
-int sfw_make_session (srpc_mksn_reqst_t *request, srpc_mksn_reply_t *reply);
-
-srpc_client_rpc_t *
-srpc_create_client_rpc(lnet_process_id_t peer, int service,
- int nbulkiov, int bulklen,
- void (*rpc_done)(srpc_client_rpc_t *),
- void (*rpc_fini)(srpc_client_rpc_t *), void *priv);
-void srpc_post_rpc(srpc_client_rpc_t *rpc);
-void srpc_abort_rpc(srpc_client_rpc_t *rpc, int why);
-void srpc_free_bulk(srpc_bulk_t *bk);
-srpc_bulk_t *srpc_alloc_bulk(int cpt, unsigned bulk_npg, unsigned bulk_len,
- int sink);
-int srpc_send_rpc(swi_workitem_t *wi);
-int srpc_send_reply(srpc_server_rpc_t *rpc);
-int srpc_add_service(srpc_service_t *sv);
-int srpc_remove_service(srpc_service_t *sv);
-void srpc_shutdown_service(srpc_service_t *sv);
-void srpc_abort_service(srpc_service_t *sv);
-int srpc_finish_service(srpc_service_t *sv);
-int srpc_service_add_buffers(srpc_service_t *sv, int nbuffer);
-void srpc_service_remove_buffers(srpc_service_t *sv, int nbuffer);
-void srpc_get_counters(srpc_counters_t *cnt);
-void srpc_set_counters(const srpc_counters_t *cnt);
-
-extern struct cfs_wi_sched *lst_sched_serial;
-extern struct cfs_wi_sched **lst_sched_test;
-
-static inline int
-srpc_serv_is_framework(struct srpc_service *svc)
-{
- return svc->sv_id < SRPC_FRAMEWORK_SERVICE_MAX_ID;
-}
-
-static inline int
-swi_wi_action(cfs_workitem_t *wi)
-{
- swi_workitem_t *swi = container_of(wi, swi_workitem_t, swi_workitem);
-
- return swi->swi_action(swi);
-}
-
-static inline void
-swi_init_workitem(swi_workitem_t *swi, void *data,
- swi_action_t action, struct cfs_wi_sched *sched)
-{
- swi->swi_sched = sched;
- swi->swi_action = action;
- swi->swi_state = SWI_STATE_NEWBORN;
- cfs_wi_init(&swi->swi_workitem, data, swi_wi_action);
-}
-
-static inline void
-swi_schedule_workitem(swi_workitem_t *wi)
-{
- cfs_wi_schedule(wi->swi_sched, &wi->swi_workitem);
-}
-
-static inline void
-swi_exit_workitem(swi_workitem_t *swi)
-{
- cfs_wi_exit(swi->swi_sched, &swi->swi_workitem);
-}
-
-static inline int
-swi_deschedule_workitem(swi_workitem_t *swi)
-{
- return cfs_wi_deschedule(swi->swi_sched, &swi->swi_workitem);
-}
-
-
-int sfw_startup(void);
-int srpc_startup(void);
-void sfw_shutdown(void);
-void srpc_shutdown(void);
-
-static inline void
-srpc_destroy_client_rpc (srpc_client_rpc_t *rpc)
-{
- LASSERT(rpc != NULL);
- LASSERT(!srpc_event_pending(rpc));
- LASSERT(atomic_read(&rpc->crpc_refcount) == 0);
-
- if (rpc->crpc_fini == NULL) {
- LIBCFS_FREE(rpc, srpc_client_rpc_size(rpc));
- } else {
- (*rpc->crpc_fini) (rpc);
- }
-
- return;
-}
-
-static inline void
-srpc_init_client_rpc (srpc_client_rpc_t *rpc, lnet_process_id_t peer,
- int service, int nbulkiov, int bulklen,
- void (*rpc_done)(srpc_client_rpc_t *),
- void (*rpc_fini)(srpc_client_rpc_t *), void *priv)
-{
- LASSERT(nbulkiov <= LNET_MAX_IOV);
-
- memset(rpc, 0, offsetof(srpc_client_rpc_t,
- crpc_bulk.bk_iovs[nbulkiov]));
-
- INIT_LIST_HEAD(&rpc->crpc_list);
- swi_init_workitem(&rpc->crpc_wi, rpc, srpc_send_rpc,
- lst_sched_test[lnet_cpt_of_nid(peer.nid)]);
- spin_lock_init(&rpc->crpc_lock);
- atomic_set(&rpc->crpc_refcount, 1); /* 1 ref for caller */
-
- rpc->crpc_dest = peer;
- rpc->crpc_priv = priv;
- rpc->crpc_service = service;
- rpc->crpc_bulk.bk_len = bulklen;
- rpc->crpc_bulk.bk_niov = nbulkiov;
- rpc->crpc_done = rpc_done;
- rpc->crpc_fini = rpc_fini;
- LNetInvalidateHandle(&rpc->crpc_reqstmdh);
- LNetInvalidateHandle(&rpc->crpc_replymdh);
- LNetInvalidateHandle(&rpc->crpc_bulk.bk_mdh);
-
- /* no event is expected at this point */
- rpc->crpc_bulkev.ev_fired =
- rpc->crpc_reqstev.ev_fired =
- rpc->crpc_replyev.ev_fired = 1;
-
- rpc->crpc_reqstmsg.msg_magic = SRPC_MSG_MAGIC;
- rpc->crpc_reqstmsg.msg_version = SRPC_MSG_VERSION;
- rpc->crpc_reqstmsg.msg_type = srpc_service2request(service);
- return;
-}
-
-static inline const char *
-swi_state2str (int state)
-{
-#define STATE2STR(x) case x: return #x
- switch (state) {
- default:
- LBUG();
- STATE2STR(SWI_STATE_NEWBORN);
- STATE2STR(SWI_STATE_REPLY_SUBMITTED);
- STATE2STR(SWI_STATE_REPLY_SENT);
- STATE2STR(SWI_STATE_REQUEST_SUBMITTED);
- STATE2STR(SWI_STATE_REQUEST_SENT);
- STATE2STR(SWI_STATE_REPLY_RECEIVED);
- STATE2STR(SWI_STATE_BULK_STARTED);
- STATE2STR(SWI_STATE_DONE);
- }
-#undef STATE2STR
-}
-
-#define selftest_wait_events() \
- do { \
- set_current_state(TASK_UNINTERRUPTIBLE); \
- schedule_timeout(cfs_time_seconds(1) / 10); \
- } while (0)
-
-
-#define lst_wait_until(cond, lock, fmt, ...) \
-do { \
- int __I = 2; \
- while (!(cond)) { \
- CDEBUG(IS_PO2(++__I) ? D_WARNING : D_NET, \
- fmt, ## __VA_ARGS__); \
- spin_unlock(&(lock)); \
- \
- selftest_wait_events(); \
- \
- spin_lock(&(lock)); \
- } \
-} while (0)
-
-static inline void
-srpc_wait_service_shutdown(srpc_service_t *sv)
-{
- int i = 2;
-
- LASSERT(sv->sv_shuttingdown);
-
- while (srpc_finish_service(sv) == 0) {
- i++;
- CDEBUG (((i & -i) == i) ? D_WARNING : D_NET,
- "Waiting for %s service to shutdown...\n",
- sv->sv_name);
- selftest_wait_events();
- }
-}
-
-extern sfw_test_client_ops_t brw_test_client;
-void brw_init_test_client(void);
-
-extern srpc_service_t brw_test_service;
-void brw_init_test_service(void);
-
-extern sfw_test_client_ops_t ping_test_client;
-void ping_init_test_client(void);
-
-extern srpc_service_t ping_test_service;
-void ping_init_test_service(void);
-
-#endif /* __SELFTEST_SELFTEST_H__ */
diff --git a/drivers/staging/lustre/lnet/selftest/timer.c b/drivers/staging/lustre/lnet/selftest/timer.c
deleted file mode 100644
index ebd5f79126c9..000000000000
--- a/drivers/staging/lustre/lnet/selftest/timer.c
+++ /dev/null
@@ -1,248 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * lnet/selftest/timer.c
- *
- * Author: Isaac Huang <isaac@clusterfs.com>
- */
-
-#define DEBUG_SUBSYSTEM S_LNET
-
-#include "selftest.h"
-
-
-/*
- * Timers are implemented as a sorted queue of expiry times. The queue
- * is slotted, with each slot holding timers which expire in a
- * 2**STTIMER_MINPOLL (8) second period. The timers in each slot are
- * sorted by increasing expiry time. The number of slots is 2**7 (128),
- * to cover a time period of 1024 seconds into the future before wrapping.
- */
-#define STTIMER_MINPOLL 3 /* log2 min poll interval (8 s) */
-#define STTIMER_SLOTTIME (1 << STTIMER_MINPOLL)
-#define STTIMER_SLOTTIMEMASK (~(STTIMER_SLOTTIME - 1))
-#define STTIMER_NSLOTS (1 << 7)
-#define STTIMER_SLOT(t) (&stt_data.stt_hash[(((t) >> STTIMER_MINPOLL) & \
- (STTIMER_NSLOTS - 1))])
-
-static struct st_timer_data {
- spinlock_t stt_lock;
- unsigned long stt_prev_slot; /* start time of the slot processed
- * previously */
- struct list_head stt_hash[STTIMER_NSLOTS];
- int stt_shuttingdown;
- wait_queue_head_t stt_waitq;
- int stt_nthreads;
-} stt_data;
-
-void
-stt_add_timer(stt_timer_t *timer)
-{
- struct list_head *pos;
-
- spin_lock(&stt_data.stt_lock);
-
- LASSERT(stt_data.stt_nthreads > 0);
- LASSERT(!stt_data.stt_shuttingdown);
- LASSERT(timer->stt_func != NULL);
- LASSERT(list_empty(&timer->stt_list));
- LASSERT(timer->stt_expires > ktime_get_real_seconds());
-
- /* a simple insertion sort */
- list_for_each_prev(pos, STTIMER_SLOT(timer->stt_expires)) {
- stt_timer_t *old = list_entry(pos, stt_timer_t, stt_list);
-
- if (timer->stt_expires >= old->stt_expires)
- break;
- }
- list_add(&timer->stt_list, pos);
-
- spin_unlock(&stt_data.stt_lock);
-}
-
-/*
- * The function returns whether it has deactivated a pending timer or not.
- * (ie. del_timer() of an inactive timer returns 0, del_timer() of an
- * active timer returns 1.)
- *
- * CAVEAT EMPTOR:
- * When 0 is returned, it is possible that timer->stt_func _is_ running on
- * another CPU.
- */
-int
-stt_del_timer(stt_timer_t *timer)
-{
- int ret = 0;
-
- spin_lock(&stt_data.stt_lock);
-
- LASSERT(stt_data.stt_nthreads > 0);
- LASSERT(!stt_data.stt_shuttingdown);
-
- if (!list_empty(&timer->stt_list)) {
- ret = 1;
- list_del_init(&timer->stt_list);
- }
-
- spin_unlock(&stt_data.stt_lock);
- return ret;
-}
-
-/* called with stt_data.stt_lock held */
-static int
-stt_expire_list(struct list_head *slot, time64_t now)
-{
- int expired = 0;
- stt_timer_t *timer;
-
- while (!list_empty(slot)) {
- timer = list_entry(slot->next, stt_timer_t, stt_list);
-
- if (timer->stt_expires > now)
- break;
-
- list_del_init(&timer->stt_list);
- spin_unlock(&stt_data.stt_lock);
-
- expired++;
- (*timer->stt_func) (timer->stt_data);
-
- spin_lock(&stt_data.stt_lock);
- }
-
- return expired;
-}
-
-static int
-stt_check_timers(unsigned long *last)
-{
- int expired = 0;
- time64_t now;
- unsigned long this_slot;
-
- now = ktime_get_real_seconds();
- this_slot = now & STTIMER_SLOTTIMEMASK;
-
- spin_lock(&stt_data.stt_lock);
-
- while (cfs_time_aftereq(this_slot, *last)) {
- expired += stt_expire_list(STTIMER_SLOT(this_slot), now);
- this_slot = cfs_time_sub(this_slot, STTIMER_SLOTTIME);
- }
-
- *last = now & STTIMER_SLOTTIMEMASK;
- spin_unlock(&stt_data.stt_lock);
- return expired;
-}
-
-
-static int
-stt_timer_main(void *arg)
-{
- cfs_block_allsigs();
-
- while (!stt_data.stt_shuttingdown) {
- stt_check_timers(&stt_data.stt_prev_slot);
-
- wait_event_timeout(stt_data.stt_waitq,
- stt_data.stt_shuttingdown,
- cfs_time_seconds(STTIMER_SLOTTIME));
- }
-
- spin_lock(&stt_data.stt_lock);
- stt_data.stt_nthreads--;
- spin_unlock(&stt_data.stt_lock);
- return 0;
-}
-
-static int
-stt_start_timer_thread(void)
-{
- struct task_struct *task;
-
- LASSERT(!stt_data.stt_shuttingdown);
-
- task = kthread_run(stt_timer_main, NULL, "st_timer");
- if (IS_ERR(task))
- return PTR_ERR(task);
-
- spin_lock(&stt_data.stt_lock);
- stt_data.stt_nthreads++;
- spin_unlock(&stt_data.stt_lock);
- return 0;
-}
-
-
-int
-stt_startup(void)
-{
- int rc = 0;
- int i;
-
- stt_data.stt_shuttingdown = 0;
- stt_data.stt_prev_slot = ktime_get_real_seconds() & STTIMER_SLOTTIMEMASK;
-
- spin_lock_init(&stt_data.stt_lock);
- for (i = 0; i < STTIMER_NSLOTS; i++)
- INIT_LIST_HEAD(&stt_data.stt_hash[i]);
-
- stt_data.stt_nthreads = 0;
- init_waitqueue_head(&stt_data.stt_waitq);
- rc = stt_start_timer_thread();
- if (rc != 0)
- CERROR("Can't spawn timer thread: %d\n", rc);
-
- return rc;
-}
-
-void
-stt_shutdown(void)
-{
- int i;
-
- spin_lock(&stt_data.stt_lock);
-
- for (i = 0; i < STTIMER_NSLOTS; i++)
- LASSERT(list_empty(&stt_data.stt_hash[i]));
-
- stt_data.stt_shuttingdown = 1;
-
- wake_up(&stt_data.stt_waitq);
- lst_wait_until(stt_data.stt_nthreads == 0, stt_data.stt_lock,
- "waiting for %d threads to terminate\n",
- stt_data.stt_nthreads);
-
- spin_unlock(&stt_data.stt_lock);
-}
diff --git a/drivers/staging/lustre/lnet/selftest/timer.h b/drivers/staging/lustre/lnet/selftest/timer.h
deleted file mode 100644
index 03e2ee294c1c..000000000000
--- a/drivers/staging/lustre/lnet/selftest/timer.h
+++ /dev/null
@@ -1,53 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * lnet/selftest/timer.h
- *
- * Author: Isaac Huang <isaac@clusterfs.com>
- */
-#ifndef __SELFTEST_TIMER_H__
-#define __SELFTEST_TIMER_H__
-
-typedef struct {
- struct list_head stt_list;
- time64_t stt_expires;
- void (*stt_func) (void *);
- void *stt_data;
-} stt_timer_t;
-
-void stt_add_timer(stt_timer_t *timer);
-int stt_del_timer(stt_timer_t *timer);
-int stt_startup(void);
-void stt_shutdown(void);
-
-#endif /* __SELFTEST_TIMER_H__ */
diff --git a/drivers/staging/lustre/lustre/Kconfig b/drivers/staging/lustre/lustre/Kconfig
deleted file mode 100644
index 62c7bba75274..000000000000
--- a/drivers/staging/lustre/lustre/Kconfig
+++ /dev/null
@@ -1,62 +0,0 @@
-config LUSTRE_FS
- tristate "Lustre file system client support"
- depends on INET && m && !MIPS && !XTENSA && !SUPERH
- select LNET
- select CRYPTO
- select CRYPTO_CRC32
- select CRYPTO_CRC32_PCLMUL if X86
- select CRYPTO_CRC32C
- select CRYPTO_MD5
- select CRYPTO_SHA1
- select CRYPTO_SHA256
- select CRYPTO_SHA512
- depends on MULTIUSER
- help
- This option enables Lustre file system client support. Choose Y
- here if you want to access a Lustre file system cluster. To compile
- this file system support as a module, choose M here: the module will
- be called lustre.
-
- To mount Lustre file systems, you also need to install the user space
- mount.lustre and other user space commands which can be found in the
- lustre-client package, available from
- http://downloads.whamcloud.com/public/lustre/
-
- Lustre file system is the most popular cluster file system in high
- performance computing. Source code of both kernel space and user space
- Lustre components can also be found at
- http://git.whamcloud.com/?p=fs/lustre-release.git;a=summary
-
- If unsure, say N.
-
- See also http://wiki.lustre.org/
-
-config LUSTRE_OBD_MAX_IOCTL_BUFFER
- int "Lustre obd max ioctl buffer bytes (default 8KB)"
- depends on LUSTRE_FS
- default 8192
- help
- This option defines the maximum size of buffer in bytes that user space
- applications can pass to Lustre kernel module through ioctl interface.
-
- If unsure, use default.
-
-config LUSTRE_DEBUG_EXPENSIVE_CHECK
- bool "Enable Lustre DEBUG checks"
- depends on LUSTRE_FS
- help
- This option is mainly for debug purpose. It enables Lustre code to do
- expensive checks that may have a performance impact.
-
- Use with caution. If unsure, say N.
-
-config LUSTRE_TRANSLATE_ERRNOS
- bool
- depends on LUSTRE_FS && !X86
- default y
-
-config LUSTRE_LLITE_LLOOP
- tristate "Lustre virtual block device"
- depends on LUSTRE_FS && BLOCK
- depends on !PPC_64K_PAGES && !ARM64_64K_PAGES && !MICROBLAZE_64K_PAGES && !PAGE_SIZE_64KB && !IA64_PAGE_SIZE_64KB && !PARISC_PAGE_SIZE_64KB
- default m
diff --git a/drivers/staging/lustre/lustre/Makefile b/drivers/staging/lustre/lustre/Makefile
deleted file mode 100644
index 35d8b0b2dff4..000000000000
--- a/drivers/staging/lustre/lustre/Makefile
+++ /dev/null
@@ -1,2 +0,0 @@
-obj-$(CONFIG_LUSTRE_FS) += libcfs/ obdclass/ ptlrpc/ fld/ osc/ mgc/ \
- fid/ lov/ mdc/ lmv/ llite/ obdecho/
diff --git a/drivers/staging/lustre/lustre/fid/Makefile b/drivers/staging/lustre/lustre/fid/Makefile
deleted file mode 100644
index b7ef314b4b84..000000000000
--- a/drivers/staging/lustre/lustre/fid/Makefile
+++ /dev/null
@@ -1,2 +0,0 @@
-obj-$(CONFIG_LUSTRE_FS) += fid.o
-fid-y := fid_request.o fid_lib.o lproc_fid.o
diff --git a/drivers/staging/lustre/lustre/fid/fid_internal.h b/drivers/staging/lustre/lustre/fid/fid_internal.h
deleted file mode 100644
index 84daee1154dc..000000000000
--- a/drivers/staging/lustre/lustre/fid/fid_internal.h
+++ /dev/null
@@ -1,52 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * lustre/fid/fid_internal.h
- *
- * Author: Yury Umanets <umka@clusterfs.com>
- */
-#ifndef __FID_INTERNAL_H
-#define __FID_INTERNAL_H
-
-#include "../include/lustre/lustre_idl.h"
-#include "../../include/linux/libcfs/libcfs.h"
-
-/* Functions used internally in module. */
-int seq_client_alloc_super(struct lu_client_seq *seq,
- const struct lu_env *env);
-
-extern struct lprocfs_vars seq_client_debugfs_list[];
-
-#endif /* __FID_INTERNAL_H */
diff --git a/drivers/staging/lustre/lustre/fid/fid_lib.c b/drivers/staging/lustre/lustre/fid/fid_lib.c
deleted file mode 100644
index dd65159ebb38..000000000000
--- a/drivers/staging/lustre/lustre/fid/fid_lib.c
+++ /dev/null
@@ -1,95 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * lustre/fid/fid_lib.c
- *
- * Miscellaneous fid functions.
- *
- * Author: Nikita Danilov <nikita@clusterfs.com>
- * Author: Yury Umanets <umka@clusterfs.com>
- */
-
-#define DEBUG_SUBSYSTEM S_FID
-
-#include "../../include/linux/libcfs/libcfs.h"
-#include <linux/module.h>
-#include "../include/lustre/lustre_idl.h"
-#include "../include/lustre_fid.h"
-
-/**
- * A cluster-wide range from which fid-sequences are granted to servers and
- * then clients.
- *
- * Fid namespace:
- * <pre>
- * Normal FID: seq:64 [2^33,2^64-1] oid:32 ver:32
- * IGIF : 0:32, ino:32 gen:32 0:32
- * IDIF : 0:31, 1:1, ost-index:16, objd:48 0:32
- * </pre>
- *
- * The first 0x400 sequences of normal FID are reserved for special purpose.
- * FID_SEQ_START + 1 is for local file id generation.
- * FID_SEQ_START + 2 is for .lustre directory and its objects
- */
-const struct lu_seq_range LUSTRE_SEQ_SPACE_RANGE = {
- FID_SEQ_NORMAL,
- (__u64)~0ULL
-};
-EXPORT_SYMBOL(LUSTRE_SEQ_SPACE_RANGE);
-
-/* Zero range, used for init and other purposes. */
-const struct lu_seq_range LUSTRE_SEQ_ZERO_RANGE = {
- 0,
- 0
-};
-EXPORT_SYMBOL(LUSTRE_SEQ_ZERO_RANGE);
-
-/* Lustre Big Fs Lock fid. */
-const struct lu_fid LUSTRE_BFL_FID = { .f_seq = FID_SEQ_SPECIAL,
- .f_oid = FID_OID_SPECIAL_BFL,
- .f_ver = 0x0000000000000000 };
-EXPORT_SYMBOL(LUSTRE_BFL_FID);
-
-/** Special fid for ".lustre" directory */
-const struct lu_fid LU_DOT_LUSTRE_FID = { .f_seq = FID_SEQ_DOT_LUSTRE,
- .f_oid = FID_OID_DOT_LUSTRE,
- .f_ver = 0x0000000000000000 };
-EXPORT_SYMBOL(LU_DOT_LUSTRE_FID);
-
-/** Special fid for "fid" special object in .lustre */
-const struct lu_fid LU_OBF_FID = { .f_seq = FID_SEQ_DOT_LUSTRE,
- .f_oid = FID_OID_DOT_LUSTRE_OBF,
- .f_ver = 0x0000000000000000 };
-EXPORT_SYMBOL(LU_OBF_FID);
diff --git a/drivers/staging/lustre/lustre/fid/fid_request.c b/drivers/staging/lustre/lustre/fid/fid_request.c
deleted file mode 100644
index ce553406318d..000000000000
--- a/drivers/staging/lustre/lustre/fid/fid_request.c
+++ /dev/null
@@ -1,506 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2013, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * lustre/fid/fid_request.c
- *
- * Lustre Sequence Manager
- *
- * Author: Yury Umanets <umka@clusterfs.com>
- */
-
-#define DEBUG_SUBSYSTEM S_FID
-
-#include "../../include/linux/libcfs/libcfs.h"
-#include <linux/module.h>
-
-#include "../include/obd.h"
-#include "../include/obd_class.h"
-#include "../include/obd_support.h"
-#include "../include/lustre_fid.h"
-/* mdc RPC locks */
-#include "../include/lustre_mdc.h"
-#include "fid_internal.h"
-
-static struct dentry *seq_debugfs_dir;
-
-static int seq_client_rpc(struct lu_client_seq *seq,
- struct lu_seq_range *output, __u32 opc,
- const char *opcname)
-{
- struct obd_export *exp = seq->lcs_exp;
- struct ptlrpc_request *req;
- struct lu_seq_range *out, *in;
- __u32 *op;
- unsigned int debug_mask;
- int rc;
-
- req = ptlrpc_request_alloc_pack(class_exp2cliimp(exp), &RQF_SEQ_QUERY,
- LUSTRE_MDS_VERSION, SEQ_QUERY);
- if (req == NULL)
- return -ENOMEM;
-
- /* Init operation code */
- op = req_capsule_client_get(&req->rq_pill, &RMF_SEQ_OPC);
- *op = opc;
-
- /* Zero out input range, this is not recovery yet. */
- in = req_capsule_client_get(&req->rq_pill, &RMF_SEQ_RANGE);
- range_init(in);
-
- ptlrpc_request_set_replen(req);
-
- in->lsr_index = seq->lcs_space.lsr_index;
- if (seq->lcs_type == LUSTRE_SEQ_METADATA)
- fld_range_set_mdt(in);
- else
- fld_range_set_ost(in);
-
- if (opc == SEQ_ALLOC_SUPER) {
- req->rq_request_portal = SEQ_CONTROLLER_PORTAL;
- req->rq_reply_portal = MDC_REPLY_PORTAL;
- /* During allocating super sequence for data object,
- * the current thread might hold the export of MDT0(MDT0
- * precreating objects on this OST), and it will send the
- * request to MDT0 here, so we can not keep resending the
- * request here, otherwise if MDT0 is failed(umounted),
- * it can not release the export of MDT0 */
- if (seq->lcs_type == LUSTRE_SEQ_DATA)
- req->rq_no_delay = req->rq_no_resend = 1;
- debug_mask = D_CONSOLE;
- } else {
- if (seq->lcs_type == LUSTRE_SEQ_METADATA)
- req->rq_request_portal = SEQ_METADATA_PORTAL;
- else
- req->rq_request_portal = SEQ_DATA_PORTAL;
- debug_mask = D_INFO;
- }
-
- ptlrpc_at_set_req_timeout(req);
-
- if (seq->lcs_type == LUSTRE_SEQ_METADATA)
- mdc_get_rpc_lock(exp->exp_obd->u.cli.cl_rpc_lock, NULL);
- rc = ptlrpc_queue_wait(req);
- if (seq->lcs_type == LUSTRE_SEQ_METADATA)
- mdc_put_rpc_lock(exp->exp_obd->u.cli.cl_rpc_lock, NULL);
- if (rc)
- goto out_req;
-
- out = req_capsule_server_get(&req->rq_pill, &RMF_SEQ_RANGE);
- *output = *out;
-
- if (!range_is_sane(output)) {
- CERROR("%s: Invalid range received from server: "
- DRANGE"\n", seq->lcs_name, PRANGE(output));
- rc = -EINVAL;
- goto out_req;
- }
-
- if (range_is_exhausted(output)) {
- CERROR("%s: Range received from server is exhausted: "
- DRANGE"]\n", seq->lcs_name, PRANGE(output));
- rc = -EINVAL;
- goto out_req;
- }
-
- CDEBUG_LIMIT(debug_mask, "%s: Allocated %s-sequence "DRANGE"]\n",
- seq->lcs_name, opcname, PRANGE(output));
-
-out_req:
- ptlrpc_req_finished(req);
- return rc;
-}
-
-/* Request sequence-controller node to allocate new super-sequence. */
-int seq_client_alloc_super(struct lu_client_seq *seq,
- const struct lu_env *env)
-{
- int rc;
-
- mutex_lock(&seq->lcs_mutex);
-
- if (seq->lcs_srv) {
- rc = 0;
- } else {
- /* Check whether the connection to seq controller has been
- * setup (lcs_exp != NULL) */
- if (seq->lcs_exp == NULL) {
- mutex_unlock(&seq->lcs_mutex);
- return -EINPROGRESS;
- }
-
- rc = seq_client_rpc(seq, &seq->lcs_space,
- SEQ_ALLOC_SUPER, "super");
- }
- mutex_unlock(&seq->lcs_mutex);
- return rc;
-}
-
-/* Request sequence-controller node to allocate new meta-sequence. */
-static int seq_client_alloc_meta(const struct lu_env *env,
- struct lu_client_seq *seq)
-{
- int rc;
-
- if (seq->lcs_srv) {
- rc = 0;
- } else {
- do {
- /* If meta server return -EINPROGRESS or EAGAIN,
- * it means meta server might not be ready to
- * allocate super sequence from sequence controller
- * (MDT0)yet */
- rc = seq_client_rpc(seq, &seq->lcs_space,
- SEQ_ALLOC_META, "meta");
- } while (rc == -EINPROGRESS || rc == -EAGAIN);
- }
-
- return rc;
-}
-
-/* Allocate new sequence for client. */
-static int seq_client_alloc_seq(const struct lu_env *env,
- struct lu_client_seq *seq, u64 *seqnr)
-{
- int rc;
-
- LASSERT(range_is_sane(&seq->lcs_space));
-
- if (range_is_exhausted(&seq->lcs_space)) {
- rc = seq_client_alloc_meta(env, seq);
- if (rc) {
- CERROR("%s: Can't allocate new meta-sequence, rc %d\n",
- seq->lcs_name, rc);
- return rc;
- }
- CDEBUG(D_INFO, "%s: New range - "DRANGE"\n",
- seq->lcs_name, PRANGE(&seq->lcs_space));
- } else {
- rc = 0;
- }
-
- LASSERT(!range_is_exhausted(&seq->lcs_space));
- *seqnr = seq->lcs_space.lsr_start;
- seq->lcs_space.lsr_start += 1;
-
- CDEBUG(D_INFO, "%s: Allocated sequence [%#llx]\n", seq->lcs_name,
- *seqnr);
-
- return rc;
-}
-
-static int seq_fid_alloc_prep(struct lu_client_seq *seq,
- wait_queue_t *link)
-{
- if (seq->lcs_update) {
- add_wait_queue(&seq->lcs_waitq, link);
- set_current_state(TASK_UNINTERRUPTIBLE);
- mutex_unlock(&seq->lcs_mutex);
-
- schedule();
-
- mutex_lock(&seq->lcs_mutex);
- remove_wait_queue(&seq->lcs_waitq, link);
- set_current_state(TASK_RUNNING);
- return -EAGAIN;
- }
- ++seq->lcs_update;
- mutex_unlock(&seq->lcs_mutex);
- return 0;
-}
-
-static void seq_fid_alloc_fini(struct lu_client_seq *seq)
-{
- LASSERT(seq->lcs_update == 1);
- mutex_lock(&seq->lcs_mutex);
- --seq->lcs_update;
- wake_up(&seq->lcs_waitq);
-}
-
-/* Allocate new fid on passed client @seq and save it to @fid. */
-int seq_client_alloc_fid(const struct lu_env *env,
- struct lu_client_seq *seq, struct lu_fid *fid)
-{
- wait_queue_t link;
- int rc;
-
- LASSERT(seq != NULL);
- LASSERT(fid != NULL);
-
- init_waitqueue_entry(&link, current);
- mutex_lock(&seq->lcs_mutex);
-
- if (OBD_FAIL_CHECK(OBD_FAIL_SEQ_EXHAUST))
- seq->lcs_fid.f_oid = seq->lcs_width;
-
- while (1) {
- u64 seqnr;
-
- if (!fid_is_zero(&seq->lcs_fid) &&
- fid_oid(&seq->lcs_fid) < seq->lcs_width) {
- /* Just bump last allocated fid and return to caller. */
- seq->lcs_fid.f_oid += 1;
- rc = 0;
- break;
- }
-
- rc = seq_fid_alloc_prep(seq, &link);
- if (rc)
- continue;
-
- rc = seq_client_alloc_seq(env, seq, &seqnr);
- if (rc) {
- CERROR("%s: Can't allocate new sequence, rc %d\n",
- seq->lcs_name, rc);
- seq_fid_alloc_fini(seq);
- mutex_unlock(&seq->lcs_mutex);
- return rc;
- }
-
- CDEBUG(D_INFO, "%s: Switch to sequence [0x%16.16Lx]\n",
- seq->lcs_name, seqnr);
-
- seq->lcs_fid.f_oid = LUSTRE_FID_INIT_OID;
- seq->lcs_fid.f_seq = seqnr;
- seq->lcs_fid.f_ver = 0;
-
- /*
- * Inform caller that sequence switch is performed to allow it
- * to setup FLD for it.
- */
- rc = 1;
-
- seq_fid_alloc_fini(seq);
- break;
- }
-
- *fid = seq->lcs_fid;
- mutex_unlock(&seq->lcs_mutex);
-
- CDEBUG(D_INFO, "%s: Allocated FID "DFID"\n", seq->lcs_name, PFID(fid));
- return rc;
-}
-EXPORT_SYMBOL(seq_client_alloc_fid);
-
-/*
- * Finish the current sequence due to disconnect.
- * See mdc_import_event()
- */
-void seq_client_flush(struct lu_client_seq *seq)
-{
- wait_queue_t link;
-
- LASSERT(seq != NULL);
- init_waitqueue_entry(&link, current);
- mutex_lock(&seq->lcs_mutex);
-
- while (seq->lcs_update) {
- add_wait_queue(&seq->lcs_waitq, &link);
- set_current_state(TASK_UNINTERRUPTIBLE);
- mutex_unlock(&seq->lcs_mutex);
-
- schedule();
-
- mutex_lock(&seq->lcs_mutex);
- remove_wait_queue(&seq->lcs_waitq, &link);
- set_current_state(TASK_RUNNING);
- }
-
- fid_zero(&seq->lcs_fid);
- /**
- * this id shld not be used for seq range allocation.
- * set to -1 for dgb check.
- */
-
- seq->lcs_space.lsr_index = -1;
-
- range_init(&seq->lcs_space);
- mutex_unlock(&seq->lcs_mutex);
-}
-EXPORT_SYMBOL(seq_client_flush);
-
-static void seq_client_debugfs_fini(struct lu_client_seq *seq)
-{
- if (!IS_ERR_OR_NULL(seq->lcs_debugfs_entry))
- ldebugfs_remove(&seq->lcs_debugfs_entry);
-}
-
-static int seq_client_debugfs_init(struct lu_client_seq *seq)
-{
- int rc;
-
- seq->lcs_debugfs_entry = ldebugfs_register(seq->lcs_name,
- seq_debugfs_dir,
- NULL, NULL);
-
- if (IS_ERR_OR_NULL(seq->lcs_debugfs_entry)) {
- CERROR("%s: LdebugFS failed in seq-init\n", seq->lcs_name);
- rc = seq->lcs_debugfs_entry ? PTR_ERR(seq->lcs_debugfs_entry)
- : -ENOMEM;
- seq->lcs_debugfs_entry = NULL;
- return rc;
- }
-
- rc = ldebugfs_add_vars(seq->lcs_debugfs_entry,
- seq_client_debugfs_list, seq);
- if (rc) {
- CERROR("%s: Can't init sequence manager debugfs, rc %d\n",
- seq->lcs_name, rc);
- goto out_cleanup;
- }
-
- return 0;
-
-out_cleanup:
- seq_client_debugfs_fini(seq);
- return rc;
-}
-
-static void seq_client_fini(struct lu_client_seq *seq)
-{
- seq_client_debugfs_fini(seq);
-
- if (seq->lcs_exp) {
- class_export_put(seq->lcs_exp);
- seq->lcs_exp = NULL;
- }
-
- seq->lcs_srv = NULL;
-}
-
-static int seq_client_init(struct lu_client_seq *seq,
- struct obd_export *exp,
- enum lu_cli_type type,
- const char *prefix)
-{
- int rc;
-
- LASSERT(seq != NULL);
- LASSERT(prefix != NULL);
-
- seq->lcs_srv = NULL;
- seq->lcs_type = type;
-
- mutex_init(&seq->lcs_mutex);
- if (type == LUSTRE_SEQ_METADATA)
- seq->lcs_width = LUSTRE_METADATA_SEQ_MAX_WIDTH;
- else
- seq->lcs_width = LUSTRE_DATA_SEQ_MAX_WIDTH;
-
- init_waitqueue_head(&seq->lcs_waitq);
- /* Make sure that things are clear before work is started. */
- seq_client_flush(seq);
-
- if (exp != NULL)
- seq->lcs_exp = class_export_get(exp);
- else if (type == LUSTRE_SEQ_METADATA)
- LASSERT(seq->lcs_srv != NULL);
-
- snprintf(seq->lcs_name, sizeof(seq->lcs_name),
- "cli-%s", prefix);
-
- rc = seq_client_debugfs_init(seq);
- if (rc)
- seq_client_fini(seq);
- return rc;
-}
-
-int client_fid_init(struct obd_device *obd,
- struct obd_export *exp, enum lu_cli_type type)
-{
- struct client_obd *cli = &obd->u.cli;
- char *prefix;
- int rc;
-
- cli->cl_seq = kzalloc(sizeof(*cli->cl_seq), GFP_NOFS);
- if (!cli->cl_seq)
- return -ENOMEM;
-
- prefix = kzalloc(MAX_OBD_NAME + 5, GFP_NOFS);
- if (!prefix) {
- rc = -ENOMEM;
- goto out_free_seq;
- }
-
- snprintf(prefix, MAX_OBD_NAME + 5, "cli-%s", obd->obd_name);
-
- /* Init client side sequence-manager */
- rc = seq_client_init(cli->cl_seq, exp, type, prefix);
- kfree(prefix);
- if (rc)
- goto out_free_seq;
-
- return rc;
-out_free_seq:
- kfree(cli->cl_seq);
- cli->cl_seq = NULL;
- return rc;
-}
-EXPORT_SYMBOL(client_fid_init);
-
-int client_fid_fini(struct obd_device *obd)
-{
- struct client_obd *cli = &obd->u.cli;
-
- if (cli->cl_seq != NULL) {
- seq_client_fini(cli->cl_seq);
- kfree(cli->cl_seq);
- cli->cl_seq = NULL;
- }
-
- return 0;
-}
-EXPORT_SYMBOL(client_fid_fini);
-
-static int __init fid_mod_init(void)
-{
- seq_debugfs_dir = ldebugfs_register(LUSTRE_SEQ_NAME,
- debugfs_lustre_root,
- NULL, NULL);
- return PTR_ERR_OR_ZERO(seq_debugfs_dir);
-}
-
-static void __exit fid_mod_exit(void)
-{
- if (!IS_ERR_OR_NULL(seq_debugfs_dir))
- ldebugfs_remove(&seq_debugfs_dir);
-}
-
-MODULE_AUTHOR("Sun Microsystems, Inc. <http://www.lustre.org/>");
-MODULE_DESCRIPTION("Lustre FID Module");
-MODULE_LICENSE("GPL");
-MODULE_VERSION("0.1.0");
-
-module_init(fid_mod_init);
-module_exit(fid_mod_exit);
diff --git a/drivers/staging/lustre/lustre/fid/lproc_fid.c b/drivers/staging/lustre/lustre/fid/lproc_fid.c
deleted file mode 100644
index bf12723c9b1c..000000000000
--- a/drivers/staging/lustre/lustre/fid/lproc_fid.c
+++ /dev/null
@@ -1,225 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * lustre/fid/lproc_fid.c
- *
- * Lustre Sequence Manager
- *
- * Author: Yury Umanets <umka@clusterfs.com>
- */
-
-#define DEBUG_SUBSYSTEM S_FID
-
-#include "../../include/linux/libcfs/libcfs.h"
-#include <linux/module.h>
-
-#include "../include/obd.h"
-#include "../include/obd_class.h"
-#include "../include/obd_support.h"
-#include "../include/lustre_req_layout.h"
-#include "../include/lustre_fid.h"
-#include "fid_internal.h"
-
-/* Format: [0x64BIT_INT - 0x64BIT_INT] + 32 bytes just in case */
-#define MAX_FID_RANGE_STRLEN (32 + 2 * 2 * sizeof(__u64))
-/*
- * Note: this function is only used for testing, it is no safe for production
- * use.
- */
-static int
-ldebugfs_fid_write_common(const char __user *buffer, size_t count,
- struct lu_seq_range *range)
-{
- struct lu_seq_range tmp;
- int rc;
- char kernbuf[MAX_FID_RANGE_STRLEN];
-
- LASSERT(range != NULL);
-
- if (count >= sizeof(kernbuf))
- return -EINVAL;
-
- if (copy_from_user(kernbuf, buffer, count))
- return -EFAULT;
-
- kernbuf[count] = 0;
-
- if (count == 5 && strcmp(kernbuf, "clear") == 0) {
- memset(range, 0, sizeof(*range));
- return count;
- }
-
- /* of the form "[0x0000000240000400 - 0x000000028000400]" */
- rc = sscanf(kernbuf, "[%llx - %llx]\n",
- (unsigned long long *)&tmp.lsr_start,
- (unsigned long long *)&tmp.lsr_end);
- if (!range_is_sane(&tmp) || range_is_zero(&tmp) ||
- tmp.lsr_start < range->lsr_start || tmp.lsr_end > range->lsr_end)
- return -EINVAL;
- *range = tmp;
- return count;
-}
-
-/* Client side debugfs stuff */
-static ssize_t
-ldebugfs_fid_space_seq_write(struct file *file,
- const char __user *buffer,
- size_t count, loff_t *off)
-{
- struct lu_client_seq *seq;
- int rc;
-
- seq = ((struct seq_file *)file->private_data)->private;
- LASSERT(seq != NULL);
-
- mutex_lock(&seq->lcs_mutex);
- rc = ldebugfs_fid_write_common(buffer, count, &seq->lcs_space);
-
- if (rc == 0) {
- CDEBUG(D_INFO, "%s: Space: "DRANGE"\n",
- seq->lcs_name, PRANGE(&seq->lcs_space));
- }
-
- mutex_unlock(&seq->lcs_mutex);
-
- return count;
-}
-
-static int
-ldebugfs_fid_space_seq_show(struct seq_file *m, void *unused)
-{
- struct lu_client_seq *seq = (struct lu_client_seq *)m->private;
-
- LASSERT(seq != NULL);
-
- mutex_lock(&seq->lcs_mutex);
- seq_printf(m, "[%#llx - %#llx]:%x:%s\n", PRANGE(&seq->lcs_space));
- mutex_unlock(&seq->lcs_mutex);
-
- return 0;
-}
-
-static ssize_t
-ldebugfs_fid_width_seq_write(struct file *file,
- const char __user *buffer,
- size_t count, loff_t *off)
-{
- struct lu_client_seq *seq;
- __u64 max;
- int rc, val;
-
- seq = ((struct seq_file *)file->private_data)->private;
- LASSERT(seq != NULL);
-
- rc = lprocfs_write_helper(buffer, count, &val);
- if (rc)
- return rc;
-
- mutex_lock(&seq->lcs_mutex);
- if (seq->lcs_type == LUSTRE_SEQ_DATA)
- max = LUSTRE_DATA_SEQ_MAX_WIDTH;
- else
- max = LUSTRE_METADATA_SEQ_MAX_WIDTH;
-
- if (val <= max && val > 0) {
- seq->lcs_width = val;
-
- CDEBUG(D_INFO, "%s: Sequence size: %llu\n", seq->lcs_name,
- seq->lcs_width);
- }
-
- mutex_unlock(&seq->lcs_mutex);
-
- return count;
-}
-
-static int
-ldebugfs_fid_width_seq_show(struct seq_file *m, void *unused)
-{
- struct lu_client_seq *seq = (struct lu_client_seq *)m->private;
-
- LASSERT(seq != NULL);
-
- mutex_lock(&seq->lcs_mutex);
- seq_printf(m, "%llu\n", seq->lcs_width);
- mutex_unlock(&seq->lcs_mutex);
-
- return 0;
-}
-
-static int
-ldebugfs_fid_fid_seq_show(struct seq_file *m, void *unused)
-{
- struct lu_client_seq *seq = (struct lu_client_seq *)m->private;
-
- LASSERT(seq != NULL);
-
- mutex_lock(&seq->lcs_mutex);
- seq_printf(m, DFID "\n", PFID(&seq->lcs_fid));
- mutex_unlock(&seq->lcs_mutex);
-
- return 0;
-}
-
-static int
-ldebugfs_fid_server_seq_show(struct seq_file *m, void *unused)
-{
- struct lu_client_seq *seq = (struct lu_client_seq *)m->private;
- struct client_obd *cli;
-
- LASSERT(seq != NULL);
-
- if (seq->lcs_exp != NULL) {
- cli = &seq->lcs_exp->exp_obd->u.cli;
- seq_printf(m, "%s\n", cli->cl_target_uuid.uuid);
- } else {
- seq_printf(m, "%s\n", seq->lcs_srv->lss_name);
- }
-
- return 0;
-}
-
-LPROC_SEQ_FOPS(ldebugfs_fid_space);
-LPROC_SEQ_FOPS(ldebugfs_fid_width);
-LPROC_SEQ_FOPS_RO(ldebugfs_fid_server);
-LPROC_SEQ_FOPS_RO(ldebugfs_fid_fid);
-
-struct lprocfs_vars seq_client_debugfs_list[] = {
- { "space", &ldebugfs_fid_space_fops },
- { "width", &ldebugfs_fid_width_fops },
- { "server", &ldebugfs_fid_server_fops },
- { "fid", &ldebugfs_fid_fid_fops },
- { NULL }
-};
diff --git a/drivers/staging/lustre/lustre/fld/Makefile b/drivers/staging/lustre/lustre/fld/Makefile
deleted file mode 100644
index 646e315d1aa8..000000000000
--- a/drivers/staging/lustre/lustre/fld/Makefile
+++ /dev/null
@@ -1,2 +0,0 @@
-obj-$(CONFIG_LUSTRE_FS) += fld.o
-fld-y := fld_request.o fld_cache.o lproc_fld.o
diff --git a/drivers/staging/lustre/lustre/fld/fld_cache.c b/drivers/staging/lustre/lustre/fld/fld_cache.c
deleted file mode 100644
index 446917484637..000000000000
--- a/drivers/staging/lustre/lustre/fld/fld_cache.c
+++ /dev/null
@@ -1,545 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2012, 2013, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * lustre/fld/fld_cache.c
- *
- * FLD (Fids Location Database)
- *
- * Author: Pravin Shelar <pravin.shelar@sun.com>
- * Author: Yury Umanets <umka@clusterfs.com>
- */
-
-#define DEBUG_SUBSYSTEM S_FLD
-
-#include "../../include/linux/libcfs/libcfs.h"
-#include <linux/module.h>
-#include <asm/div64.h>
-
-#include "../include/obd.h"
-#include "../include/obd_class.h"
-#include "../include/lustre_ver.h"
-#include "../include/obd_support.h"
-#include "../include/lprocfs_status.h"
-
-#include "../include/lustre_req_layout.h"
-#include "../include/lustre_fld.h"
-#include "fld_internal.h"
-
-/**
- * create fld cache.
- */
-struct fld_cache *fld_cache_init(const char *name,
- int cache_size, int cache_threshold)
-{
- struct fld_cache *cache;
-
- LASSERT(name != NULL);
- LASSERT(cache_threshold < cache_size);
-
- cache = kzalloc(sizeof(*cache), GFP_NOFS);
- if (!cache)
- return ERR_PTR(-ENOMEM);
-
- INIT_LIST_HEAD(&cache->fci_entries_head);
- INIT_LIST_HEAD(&cache->fci_lru);
-
- cache->fci_cache_count = 0;
- rwlock_init(&cache->fci_lock);
-
- strlcpy(cache->fci_name, name,
- sizeof(cache->fci_name));
-
- cache->fci_cache_size = cache_size;
- cache->fci_threshold = cache_threshold;
-
- /* Init fld cache info. */
- memset(&cache->fci_stat, 0, sizeof(cache->fci_stat));
-
- CDEBUG(D_INFO, "%s: FLD cache - Size: %d, Threshold: %d\n",
- cache->fci_name, cache_size, cache_threshold);
-
- return cache;
-}
-
-/**
- * destroy fld cache.
- */
-void fld_cache_fini(struct fld_cache *cache)
-{
- __u64 pct;
-
- LASSERT(cache != NULL);
- fld_cache_flush(cache);
-
- if (cache->fci_stat.fst_count > 0) {
- pct = cache->fci_stat.fst_cache * 100;
- do_div(pct, cache->fci_stat.fst_count);
- } else {
- pct = 0;
- }
-
- CDEBUG(D_INFO, "FLD cache statistics (%s):\n", cache->fci_name);
- CDEBUG(D_INFO, " Total reqs: %llu\n", cache->fci_stat.fst_count);
- CDEBUG(D_INFO, " Cache reqs: %llu\n", cache->fci_stat.fst_cache);
- CDEBUG(D_INFO, " Cache hits: %llu%%\n", pct);
-
- kfree(cache);
-}
-
-/**
- * delete given node from list.
- */
-void fld_cache_entry_delete(struct fld_cache *cache,
- struct fld_cache_entry *node)
-{
- list_del(&node->fce_list);
- list_del(&node->fce_lru);
- cache->fci_cache_count--;
- kfree(node);
-}
-
-/**
- * fix list by checking new entry with NEXT entry in order.
- */
-static void fld_fix_new_list(struct fld_cache *cache)
-{
- struct fld_cache_entry *f_curr;
- struct fld_cache_entry *f_next;
- struct lu_seq_range *c_range;
- struct lu_seq_range *n_range;
- struct list_head *head = &cache->fci_entries_head;
-
-restart_fixup:
-
- list_for_each_entry_safe(f_curr, f_next, head, fce_list) {
- c_range = &f_curr->fce_range;
- n_range = &f_next->fce_range;
-
- LASSERT(range_is_sane(c_range));
- if (&f_next->fce_list == head)
- break;
-
- if (c_range->lsr_flags != n_range->lsr_flags)
- continue;
-
- LASSERTF(c_range->lsr_start <= n_range->lsr_start,
- "cur lsr_start "DRANGE" next lsr_start "DRANGE"\n",
- PRANGE(c_range), PRANGE(n_range));
-
- /* check merge possibility with next range */
- if (c_range->lsr_end == n_range->lsr_start) {
- if (c_range->lsr_index != n_range->lsr_index)
- continue;
- n_range->lsr_start = c_range->lsr_start;
- fld_cache_entry_delete(cache, f_curr);
- continue;
- }
-
- /* check if current range overlaps with next range. */
- if (n_range->lsr_start < c_range->lsr_end) {
- if (c_range->lsr_index == n_range->lsr_index) {
- n_range->lsr_start = c_range->lsr_start;
- n_range->lsr_end = max(c_range->lsr_end,
- n_range->lsr_end);
- fld_cache_entry_delete(cache, f_curr);
- } else {
- if (n_range->lsr_end <= c_range->lsr_end) {
- *n_range = *c_range;
- fld_cache_entry_delete(cache, f_curr);
- } else
- n_range->lsr_start = c_range->lsr_end;
- }
-
- /* we could have overlap over next
- * range too. better restart. */
- goto restart_fixup;
- }
-
- /* kill duplicates */
- if (c_range->lsr_start == n_range->lsr_start &&
- c_range->lsr_end == n_range->lsr_end)
- fld_cache_entry_delete(cache, f_curr);
- }
-}
-
-/**
- * add node to fld cache
- */
-static inline void fld_cache_entry_add(struct fld_cache *cache,
- struct fld_cache_entry *f_new,
- struct list_head *pos)
-{
- list_add(&f_new->fce_list, pos);
- list_add(&f_new->fce_lru, &cache->fci_lru);
-
- cache->fci_cache_count++;
- fld_fix_new_list(cache);
-}
-
-/**
- * Check if cache needs to be shrunk. If so - do it.
- * Remove one entry in list and so on until cache is shrunk enough.
- */
-static int fld_cache_shrink(struct fld_cache *cache)
-{
- struct fld_cache_entry *flde;
- struct list_head *curr;
- int num = 0;
-
- LASSERT(cache != NULL);
-
- if (cache->fci_cache_count < cache->fci_cache_size)
- return 0;
-
- curr = cache->fci_lru.prev;
-
- while (cache->fci_cache_count + cache->fci_threshold >
- cache->fci_cache_size && curr != &cache->fci_lru) {
-
- flde = list_entry(curr, struct fld_cache_entry, fce_lru);
- curr = curr->prev;
- fld_cache_entry_delete(cache, flde);
- num++;
- }
-
- CDEBUG(D_INFO, "%s: FLD cache - Shrunk by %d entries\n",
- cache->fci_name, num);
-
- return 0;
-}
-
-/**
- * kill all fld cache entries.
- */
-void fld_cache_flush(struct fld_cache *cache)
-{
- write_lock(&cache->fci_lock);
- cache->fci_cache_size = 0;
- fld_cache_shrink(cache);
- write_unlock(&cache->fci_lock);
-}
-
-/**
- * punch hole in existing range. divide this range and add new
- * entry accordingly.
- */
-
-static void fld_cache_punch_hole(struct fld_cache *cache,
- struct fld_cache_entry *f_curr,
- struct fld_cache_entry *f_new)
-{
- const struct lu_seq_range *range = &f_new->fce_range;
- const u64 new_start = range->lsr_start;
- const u64 new_end = range->lsr_end;
- struct fld_cache_entry *fldt;
-
- fldt = kzalloc(sizeof(*fldt), GFP_ATOMIC);
- if (!fldt) {
- kfree(f_new);
- /* overlap is not allowed, so dont mess up list. */
- return;
- }
- /* break f_curr RANGE into three RANGES:
- * f_curr, f_new , fldt
- */
-
- /* f_new = *range */
-
- /* fldt */
- fldt->fce_range.lsr_start = new_end;
- fldt->fce_range.lsr_end = f_curr->fce_range.lsr_end;
- fldt->fce_range.lsr_index = f_curr->fce_range.lsr_index;
-
- /* f_curr */
- f_curr->fce_range.lsr_end = new_start;
-
- /* add these two entries to list */
- fld_cache_entry_add(cache, f_new, &f_curr->fce_list);
- fld_cache_entry_add(cache, fldt, &f_new->fce_list);
-
- /* no need to fixup */
-}
-
-/**
- * handle range overlap in fld cache.
- */
-static void fld_cache_overlap_handle(struct fld_cache *cache,
- struct fld_cache_entry *f_curr,
- struct fld_cache_entry *f_new)
-{
- const struct lu_seq_range *range = &f_new->fce_range;
- const u64 new_start = range->lsr_start;
- const u64 new_end = range->lsr_end;
- const u32 mdt = range->lsr_index;
-
- /* this is overlap case, these case are checking overlapping with
- * prev range only. fixup will handle overlapping with next range. */
-
- if (f_curr->fce_range.lsr_index == mdt) {
- f_curr->fce_range.lsr_start = min(f_curr->fce_range.lsr_start,
- new_start);
-
- f_curr->fce_range.lsr_end = max(f_curr->fce_range.lsr_end,
- new_end);
-
- kfree(f_new);
- fld_fix_new_list(cache);
-
- } else if (new_start <= f_curr->fce_range.lsr_start &&
- f_curr->fce_range.lsr_end <= new_end) {
- /* case 1: new range completely overshadowed existing range.
- * e.g. whole range migrated. update fld cache entry */
-
- f_curr->fce_range = *range;
- kfree(f_new);
- fld_fix_new_list(cache);
-
- } else if (f_curr->fce_range.lsr_start < new_start &&
- new_end < f_curr->fce_range.lsr_end) {
- /* case 2: new range fit within existing range. */
-
- fld_cache_punch_hole(cache, f_curr, f_new);
-
- } else if (new_end <= f_curr->fce_range.lsr_end) {
- /* case 3: overlap:
- * [new_start [c_start new_end) c_end)
- */
-
- LASSERT(new_start <= f_curr->fce_range.lsr_start);
-
- f_curr->fce_range.lsr_start = new_end;
- fld_cache_entry_add(cache, f_new, f_curr->fce_list.prev);
-
- } else if (f_curr->fce_range.lsr_start <= new_start) {
- /* case 4: overlap:
- * [c_start [new_start c_end) new_end)
- */
-
- LASSERT(f_curr->fce_range.lsr_end <= new_end);
-
- f_curr->fce_range.lsr_end = new_start;
- fld_cache_entry_add(cache, f_new, &f_curr->fce_list);
- } else
- CERROR("NEW range ="DRANGE" curr = "DRANGE"\n",
- PRANGE(range), PRANGE(&f_curr->fce_range));
-}
-
-struct fld_cache_entry
-*fld_cache_entry_create(const struct lu_seq_range *range)
-{
- struct fld_cache_entry *f_new;
-
- LASSERT(range_is_sane(range));
-
- f_new = kzalloc(sizeof(*f_new), GFP_NOFS);
- if (!f_new)
- return ERR_PTR(-ENOMEM);
-
- f_new->fce_range = *range;
- return f_new;
-}
-
-/**
- * Insert FLD entry in FLD cache.
- *
- * This function handles all cases of merging and breaking up of
- * ranges.
- */
-int fld_cache_insert_nolock(struct fld_cache *cache,
- struct fld_cache_entry *f_new)
-{
- struct fld_cache_entry *f_curr;
- struct fld_cache_entry *n;
- struct list_head *head;
- struct list_head *prev = NULL;
- const u64 new_start = f_new->fce_range.lsr_start;
- const u64 new_end = f_new->fce_range.lsr_end;
- __u32 new_flags = f_new->fce_range.lsr_flags;
-
- /*
- * Duplicate entries are eliminated in insert op.
- * So we don't need to search new entry before starting
- * insertion loop.
- */
-
- if (!cache->fci_no_shrink)
- fld_cache_shrink(cache);
-
- head = &cache->fci_entries_head;
-
- list_for_each_entry_safe(f_curr, n, head, fce_list) {
- /* add list if next is end of list */
- if (new_end < f_curr->fce_range.lsr_start ||
- (new_end == f_curr->fce_range.lsr_start &&
- new_flags != f_curr->fce_range.lsr_flags))
- break;
-
- prev = &f_curr->fce_list;
- /* check if this range is to left of new range. */
- if (new_start < f_curr->fce_range.lsr_end &&
- new_flags == f_curr->fce_range.lsr_flags) {
- fld_cache_overlap_handle(cache, f_curr, f_new);
- goto out;
- }
- }
-
- if (prev == NULL)
- prev = head;
-
- CDEBUG(D_INFO, "insert range "DRANGE"\n", PRANGE(&f_new->fce_range));
- /* Add new entry to cache and lru list. */
- fld_cache_entry_add(cache, f_new, prev);
-out:
- return 0;
-}
-
-int fld_cache_insert(struct fld_cache *cache,
- const struct lu_seq_range *range)
-{
- struct fld_cache_entry *flde;
- int rc;
-
- flde = fld_cache_entry_create(range);
- if (IS_ERR(flde))
- return PTR_ERR(flde);
-
- write_lock(&cache->fci_lock);
- rc = fld_cache_insert_nolock(cache, flde);
- write_unlock(&cache->fci_lock);
- if (rc)
- kfree(flde);
-
- return rc;
-}
-
-void fld_cache_delete_nolock(struct fld_cache *cache,
- const struct lu_seq_range *range)
-{
- struct fld_cache_entry *flde;
- struct fld_cache_entry *tmp;
- struct list_head *head;
-
- head = &cache->fci_entries_head;
- list_for_each_entry_safe(flde, tmp, head, fce_list) {
- /* add list if next is end of list */
- if (range->lsr_start == flde->fce_range.lsr_start ||
- (range->lsr_end == flde->fce_range.lsr_end &&
- range->lsr_flags == flde->fce_range.lsr_flags)) {
- fld_cache_entry_delete(cache, flde);
- break;
- }
- }
-}
-
-/**
- * Delete FLD entry in FLD cache.
- *
- */
-void fld_cache_delete(struct fld_cache *cache,
- const struct lu_seq_range *range)
-{
- write_lock(&cache->fci_lock);
- fld_cache_delete_nolock(cache, range);
- write_unlock(&cache->fci_lock);
-}
-
-struct fld_cache_entry
-*fld_cache_entry_lookup_nolock(struct fld_cache *cache,
- struct lu_seq_range *range)
-{
- struct fld_cache_entry *flde;
- struct fld_cache_entry *got = NULL;
- struct list_head *head;
-
- head = &cache->fci_entries_head;
- list_for_each_entry(flde, head, fce_list) {
- if (range->lsr_start == flde->fce_range.lsr_start ||
- (range->lsr_end == flde->fce_range.lsr_end &&
- range->lsr_flags == flde->fce_range.lsr_flags)) {
- got = flde;
- break;
- }
- }
-
- return got;
-}
-
-/**
- * lookup \a seq sequence for range in fld cache.
- */
-struct fld_cache_entry
-*fld_cache_entry_lookup(struct fld_cache *cache, struct lu_seq_range *range)
-{
- struct fld_cache_entry *got = NULL;
-
- read_lock(&cache->fci_lock);
- got = fld_cache_entry_lookup_nolock(cache, range);
- read_unlock(&cache->fci_lock);
- return got;
-}
-
-/**
- * lookup \a seq sequence for range in fld cache.
- */
-int fld_cache_lookup(struct fld_cache *cache,
- const u64 seq, struct lu_seq_range *range)
-{
- struct fld_cache_entry *flde;
- struct fld_cache_entry *prev = NULL;
- struct list_head *head;
-
- read_lock(&cache->fci_lock);
- head = &cache->fci_entries_head;
-
- cache->fci_stat.fst_count++;
- list_for_each_entry(flde, head, fce_list) {
- if (flde->fce_range.lsr_start > seq) {
- if (prev != NULL)
- *range = prev->fce_range;
- break;
- }
-
- prev = flde;
- if (range_within(&flde->fce_range, seq)) {
- *range = flde->fce_range;
-
- cache->fci_stat.fst_cache++;
- read_unlock(&cache->fci_lock);
- return 0;
- }
- }
- read_unlock(&cache->fci_lock);
- return -ENOENT;
-}
diff --git a/drivers/staging/lustre/lustre/fld/fld_internal.h b/drivers/staging/lustre/lustre/fld/fld_internal.h
deleted file mode 100644
index fbb232de6c74..000000000000
--- a/drivers/staging/lustre/lustre/fld/fld_internal.h
+++ /dev/null
@@ -1,187 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2012, 2013, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * lustre/fld/fld_internal.h
- *
- * Author: Yury Umanets <umka@clusterfs.com>
- * Author: Tom WangDi <wangdi@clusterfs.com>
- */
-#ifndef __FLD_INTERNAL_H
-#define __FLD_INTERNAL_H
-
-#include "../include/lustre/lustre_idl.h"
-
-#include "../../include/linux/libcfs/libcfs.h"
-#include "../include/lustre_req_layout.h"
-#include "../include/lustre_fld.h"
-
-enum {
- LUSTRE_FLD_INIT = 1 << 0,
- LUSTRE_FLD_RUN = 1 << 1
-};
-
-struct fld_stats {
- __u64 fst_count;
- __u64 fst_cache;
- __u64 fst_inflight;
-};
-
-typedef int (*fld_hash_func_t) (struct lu_client_fld *, __u64);
-
-typedef struct lu_fld_target *
-(*fld_scan_func_t) (struct lu_client_fld *, __u64);
-
-struct lu_fld_hash {
- const char *fh_name;
- fld_hash_func_t fh_hash_func;
- fld_scan_func_t fh_scan_func;
-};
-
-struct fld_cache_entry {
- struct list_head fce_lru;
- struct list_head fce_list;
- /**
- * fld cache entries are sorted on range->lsr_start field. */
- struct lu_seq_range fce_range;
-};
-
-struct fld_cache {
- /**
- * Cache guard, protects fci_hash mostly because others immutable after
- * init is finished.
- */
- rwlock_t fci_lock;
-
- /**
- * Cache shrink threshold */
- int fci_threshold;
-
- /**
- * Preferred number of cached entries */
- int fci_cache_size;
-
- /**
- * Current number of cached entries. Protected by \a fci_lock */
- int fci_cache_count;
-
- /**
- * LRU list fld entries. */
- struct list_head fci_lru;
-
- /**
- * sorted fld entries. */
- struct list_head fci_entries_head;
-
- /**
- * Cache statistics. */
- struct fld_stats fci_stat;
-
- /**
- * Cache name used for debug and messages. */
- char fci_name[LUSTRE_MDT_MAXNAMELEN];
- unsigned int fci_no_shrink:1;
-};
-
-enum fld_op {
- FLD_CREATE = 0,
- FLD_DELETE = 1,
- FLD_LOOKUP = 2
-};
-
-enum {
- /* 4M of FLD cache will not hurt client a lot. */
- FLD_SERVER_CACHE_SIZE = (4 * 0x100000),
-
- /* 1M of FLD cache will not hurt client a lot. */
- FLD_CLIENT_CACHE_SIZE = (1 * 0x100000)
-};
-
-enum {
- /* Cache threshold is 10 percent of size. */
- FLD_SERVER_CACHE_THRESHOLD = 10,
-
- /* Cache threshold is 10 percent of size. */
- FLD_CLIENT_CACHE_THRESHOLD = 10
-};
-
-extern struct lu_fld_hash fld_hash[];
-
-int fld_client_rpc(struct obd_export *exp,
- struct lu_seq_range *range, __u32 fld_op);
-
-extern struct lprocfs_vars fld_client_debugfs_list[];
-
-struct fld_cache *fld_cache_init(const char *name,
- int cache_size, int cache_threshold);
-
-void fld_cache_fini(struct fld_cache *cache);
-
-void fld_cache_flush(struct fld_cache *cache);
-
-int fld_cache_insert(struct fld_cache *cache,
- const struct lu_seq_range *range);
-
-struct fld_cache_entry
-*fld_cache_entry_create(const struct lu_seq_range *range);
-
-int fld_cache_insert_nolock(struct fld_cache *cache,
- struct fld_cache_entry *f_new);
-void fld_cache_delete(struct fld_cache *cache,
- const struct lu_seq_range *range);
-void fld_cache_delete_nolock(struct fld_cache *cache,
- const struct lu_seq_range *range);
-int fld_cache_lookup(struct fld_cache *cache,
- const u64 seq, struct lu_seq_range *range);
-
-struct fld_cache_entry*
-fld_cache_entry_lookup(struct fld_cache *cache, struct lu_seq_range *range);
-void fld_cache_entry_delete(struct fld_cache *cache,
- struct fld_cache_entry *node);
-void fld_dump_cache_entries(struct fld_cache *cache);
-
-struct fld_cache_entry
-*fld_cache_entry_lookup_nolock(struct fld_cache *cache,
- struct lu_seq_range *range);
-
-static inline const char *
-fld_target_name(struct lu_fld_target *tar)
-{
- if (tar->ft_srv != NULL)
- return tar->ft_srv->lsf_name;
-
- return (const char *)tar->ft_exp->exp_obd->obd_name;
-}
-
-#endif /* __FLD_INTERNAL_H */
diff --git a/drivers/staging/lustre/lustre/fld/fld_request.c b/drivers/staging/lustre/lustre/fld/fld_request.c
deleted file mode 100644
index 3fd91bc77da5..000000000000
--- a/drivers/staging/lustre/lustre/fld/fld_request.c
+++ /dev/null
@@ -1,509 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2013, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * lustre/fld/fld_request.c
- *
- * FLD (Fids Location Database)
- *
- * Author: Yury Umanets <umka@clusterfs.com>
- */
-
-#define DEBUG_SUBSYSTEM S_FLD
-
-#include "../../include/linux/libcfs/libcfs.h"
-#include <linux/module.h>
-#include <asm/div64.h>
-
-#include "../include/obd.h"
-#include "../include/obd_class.h"
-#include "../include/lustre_ver.h"
-#include "../include/obd_support.h"
-#include "../include/lprocfs_status.h"
-
-#include "../include/lustre_req_layout.h"
-#include "../include/lustre_fld.h"
-#include "../include/lustre_mdc.h"
-#include "fld_internal.h"
-
-/* TODO: these 3 functions are copies of flow-control code from mdc_lib.c
- * It should be common thing. The same about mdc RPC lock */
-static int fld_req_avail(struct client_obd *cli, struct mdc_cache_waiter *mcw)
-{
- int rc;
-
- client_obd_list_lock(&cli->cl_loi_list_lock);
- rc = list_empty(&mcw->mcw_entry);
- client_obd_list_unlock(&cli->cl_loi_list_lock);
- return rc;
-};
-
-static void fld_enter_request(struct client_obd *cli)
-{
- struct mdc_cache_waiter mcw;
- struct l_wait_info lwi = { 0 };
-
- client_obd_list_lock(&cli->cl_loi_list_lock);
- if (cli->cl_r_in_flight >= cli->cl_max_rpcs_in_flight) {
- list_add_tail(&mcw.mcw_entry, &cli->cl_cache_waiters);
- init_waitqueue_head(&mcw.mcw_waitq);
- client_obd_list_unlock(&cli->cl_loi_list_lock);
- l_wait_event(mcw.mcw_waitq, fld_req_avail(cli, &mcw), &lwi);
- } else {
- cli->cl_r_in_flight++;
- client_obd_list_unlock(&cli->cl_loi_list_lock);
- }
-}
-
-static void fld_exit_request(struct client_obd *cli)
-{
- struct list_head *l, *tmp;
- struct mdc_cache_waiter *mcw;
-
- client_obd_list_lock(&cli->cl_loi_list_lock);
- cli->cl_r_in_flight--;
- list_for_each_safe(l, tmp, &cli->cl_cache_waiters) {
-
- if (cli->cl_r_in_flight >= cli->cl_max_rpcs_in_flight) {
- /* No free request slots anymore */
- break;
- }
-
- mcw = list_entry(l, struct mdc_cache_waiter, mcw_entry);
- list_del_init(&mcw->mcw_entry);
- cli->cl_r_in_flight++;
- wake_up(&mcw->mcw_waitq);
- }
- client_obd_list_unlock(&cli->cl_loi_list_lock);
-}
-
-static int fld_rrb_hash(struct lu_client_fld *fld, u64 seq)
-{
- LASSERT(fld->lcf_count > 0);
- return do_div(seq, fld->lcf_count);
-}
-
-static struct lu_fld_target *
-fld_rrb_scan(struct lu_client_fld *fld, u64 seq)
-{
- struct lu_fld_target *target;
- int hash;
-
- /* Because almost all of special sequence located in MDT0,
- * it should go to index 0 directly, instead of calculating
- * hash again, and also if other MDTs is not being connected,
- * the fld lookup requests(for seq on MDT0) should not be
- * blocked because of other MDTs */
- if (fid_seq_is_norm(seq))
- hash = fld_rrb_hash(fld, seq);
- else
- hash = 0;
-
-again:
- list_for_each_entry(target, &fld->lcf_targets, ft_chain) {
- if (target->ft_idx == hash)
- return target;
- }
-
- if (hash != 0) {
- /* It is possible the remote target(MDT) are not connected to
- * with client yet, so we will refer this to MDT0, which should
- * be connected during mount */
- hash = 0;
- goto again;
- }
-
- CERROR("%s: Can't find target by hash %d (seq %#llx). Targets (%d):\n",
- fld->lcf_name, hash, seq, fld->lcf_count);
-
- list_for_each_entry(target, &fld->lcf_targets, ft_chain) {
- const char *srv_name = target->ft_srv != NULL ?
- target->ft_srv->lsf_name : "<null>";
- const char *exp_name = target->ft_exp != NULL ?
- (char *)target->ft_exp->exp_obd->obd_uuid.uuid :
- "<null>";
-
- CERROR(" exp: 0x%p (%s), srv: 0x%p (%s), idx: %llu\n",
- target->ft_exp, exp_name, target->ft_srv,
- srv_name, target->ft_idx);
- }
-
- /*
- * If target is not found, there is logical error anyway, so here is
- * LBUG() to catch this situation.
- */
- LBUG();
- return NULL;
-}
-
-struct lu_fld_hash fld_hash[] = {
- {
- .fh_name = "RRB",
- .fh_hash_func = fld_rrb_hash,
- .fh_scan_func = fld_rrb_scan
- },
- {
- NULL,
- }
-};
-
-static struct lu_fld_target *
-fld_client_get_target(struct lu_client_fld *fld, u64 seq)
-{
- struct lu_fld_target *target;
-
- LASSERT(fld->lcf_hash != NULL);
-
- spin_lock(&fld->lcf_lock);
- target = fld->lcf_hash->fh_scan_func(fld, seq);
- spin_unlock(&fld->lcf_lock);
-
- if (target != NULL) {
- CDEBUG(D_INFO, "%s: Found target (idx %llu) by seq %#llx\n",
- fld->lcf_name, target->ft_idx, seq);
- }
-
- return target;
-}
-
-/*
- * Add export to FLD. This is usually done by CMM and LMV as they are main users
- * of FLD module.
- */
-int fld_client_add_target(struct lu_client_fld *fld,
- struct lu_fld_target *tar)
-{
- const char *name;
- struct lu_fld_target *target, *tmp;
-
- LASSERT(tar != NULL);
- name = fld_target_name(tar);
- LASSERT(name != NULL);
- LASSERT(tar->ft_srv != NULL || tar->ft_exp != NULL);
-
- if (fld->lcf_flags != LUSTRE_FLD_INIT) {
- CERROR("%s: Attempt to add target %s (idx %llu) on fly - skip it\n",
- fld->lcf_name, name, tar->ft_idx);
- return 0;
- }
- CDEBUG(D_INFO, "%s: Adding target %s (idx %llu)\n",
- fld->lcf_name, name, tar->ft_idx);
-
- target = kzalloc(sizeof(*target), GFP_NOFS);
- if (!target)
- return -ENOMEM;
-
- spin_lock(&fld->lcf_lock);
- list_for_each_entry(tmp, &fld->lcf_targets, ft_chain) {
- if (tmp->ft_idx == tar->ft_idx) {
- spin_unlock(&fld->lcf_lock);
- kfree(target);
- CERROR("Target %s exists in FLD and known as %s:#%llu\n",
- name, fld_target_name(tmp), tmp->ft_idx);
- return -EEXIST;
- }
- }
-
- target->ft_exp = tar->ft_exp;
- if (target->ft_exp != NULL)
- class_export_get(target->ft_exp);
- target->ft_srv = tar->ft_srv;
- target->ft_idx = tar->ft_idx;
-
- list_add_tail(&target->ft_chain,
- &fld->lcf_targets);
-
- fld->lcf_count++;
- spin_unlock(&fld->lcf_lock);
-
- return 0;
-}
-EXPORT_SYMBOL(fld_client_add_target);
-
-/* Remove export from FLD */
-int fld_client_del_target(struct lu_client_fld *fld, __u64 idx)
-{
- struct lu_fld_target *target, *tmp;
-
- spin_lock(&fld->lcf_lock);
- list_for_each_entry_safe(target, tmp,
- &fld->lcf_targets, ft_chain) {
- if (target->ft_idx == idx) {
- fld->lcf_count--;
- list_del(&target->ft_chain);
- spin_unlock(&fld->lcf_lock);
-
- if (target->ft_exp != NULL)
- class_export_put(target->ft_exp);
-
- kfree(target);
- return 0;
- }
- }
- spin_unlock(&fld->lcf_lock);
- return -ENOENT;
-}
-EXPORT_SYMBOL(fld_client_del_target);
-
-static struct dentry *fld_debugfs_dir;
-
-static int fld_client_debugfs_init(struct lu_client_fld *fld)
-{
- int rc;
-
- fld->lcf_debugfs_entry = ldebugfs_register(fld->lcf_name,
- fld_debugfs_dir,
- NULL, NULL);
-
- if (IS_ERR_OR_NULL(fld->lcf_debugfs_entry)) {
- CERROR("%s: LdebugFS failed in fld-init\n", fld->lcf_name);
- rc = fld->lcf_debugfs_entry ? PTR_ERR(fld->lcf_debugfs_entry)
- : -ENOMEM;
- fld->lcf_debugfs_entry = NULL;
- return rc;
- }
-
- rc = ldebugfs_add_vars(fld->lcf_debugfs_entry,
- fld_client_debugfs_list, fld);
- if (rc) {
- CERROR("%s: Can't init FLD debufs, rc %d\n", fld->lcf_name, rc);
- goto out_cleanup;
- }
-
- return 0;
-
-out_cleanup:
- fld_client_debugfs_fini(fld);
- return rc;
-}
-
-void fld_client_debugfs_fini(struct lu_client_fld *fld)
-{
- if (!IS_ERR_OR_NULL(fld->lcf_debugfs_entry))
- ldebugfs_remove(&fld->lcf_debugfs_entry);
-}
-EXPORT_SYMBOL(fld_client_debugfs_fini);
-
-static inline int hash_is_sane(int hash)
-{
- return (hash >= 0 && hash < ARRAY_SIZE(fld_hash));
-}
-
-int fld_client_init(struct lu_client_fld *fld,
- const char *prefix, int hash)
-{
- int cache_size, cache_threshold;
- int rc;
-
- LASSERT(fld != NULL);
-
- snprintf(fld->lcf_name, sizeof(fld->lcf_name),
- "cli-%s", prefix);
-
- if (!hash_is_sane(hash)) {
- CERROR("%s: Wrong hash function %#x\n",
- fld->lcf_name, hash);
- return -EINVAL;
- }
-
- fld->lcf_count = 0;
- spin_lock_init(&fld->lcf_lock);
- fld->lcf_hash = &fld_hash[hash];
- fld->lcf_flags = LUSTRE_FLD_INIT;
- INIT_LIST_HEAD(&fld->lcf_targets);
-
- cache_size = FLD_CLIENT_CACHE_SIZE /
- sizeof(struct fld_cache_entry);
-
- cache_threshold = cache_size *
- FLD_CLIENT_CACHE_THRESHOLD / 100;
-
- fld->lcf_cache = fld_cache_init(fld->lcf_name,
- cache_size, cache_threshold);
- if (IS_ERR(fld->lcf_cache)) {
- rc = PTR_ERR(fld->lcf_cache);
- fld->lcf_cache = NULL;
- goto out;
- }
-
- rc = fld_client_debugfs_init(fld);
- if (rc)
- goto out;
-out:
- if (rc)
- fld_client_fini(fld);
- else
- CDEBUG(D_INFO, "%s: Using \"%s\" hash\n",
- fld->lcf_name, fld->lcf_hash->fh_name);
- return rc;
-}
-EXPORT_SYMBOL(fld_client_init);
-
-void fld_client_fini(struct lu_client_fld *fld)
-{
- struct lu_fld_target *target, *tmp;
-
- spin_lock(&fld->lcf_lock);
- list_for_each_entry_safe(target, tmp,
- &fld->lcf_targets, ft_chain) {
- fld->lcf_count--;
- list_del(&target->ft_chain);
- if (target->ft_exp != NULL)
- class_export_put(target->ft_exp);
- kfree(target);
- }
- spin_unlock(&fld->lcf_lock);
-
- if (fld->lcf_cache != NULL) {
- if (!IS_ERR(fld->lcf_cache))
- fld_cache_fini(fld->lcf_cache);
- fld->lcf_cache = NULL;
- }
-}
-EXPORT_SYMBOL(fld_client_fini);
-
-int fld_client_rpc(struct obd_export *exp,
- struct lu_seq_range *range, __u32 fld_op)
-{
- struct ptlrpc_request *req;
- struct lu_seq_range *prange;
- __u32 *op;
- int rc;
- struct obd_import *imp;
-
- LASSERT(exp != NULL);
-
- imp = class_exp2cliimp(exp);
- req = ptlrpc_request_alloc_pack(imp, &RQF_FLD_QUERY, LUSTRE_MDS_VERSION,
- FLD_QUERY);
- if (req == NULL)
- return -ENOMEM;
-
- op = req_capsule_client_get(&req->rq_pill, &RMF_FLD_OPC);
- *op = fld_op;
-
- prange = req_capsule_client_get(&req->rq_pill, &RMF_FLD_MDFLD);
- *prange = *range;
-
- ptlrpc_request_set_replen(req);
- req->rq_request_portal = FLD_REQUEST_PORTAL;
- req->rq_reply_portal = MDC_REPLY_PORTAL;
- ptlrpc_at_set_req_timeout(req);
-
- if (fld_op == FLD_LOOKUP &&
- imp->imp_connect_flags_orig & OBD_CONNECT_MDS_MDS)
- req->rq_allow_replay = 1;
-
- if (fld_op != FLD_LOOKUP)
- mdc_get_rpc_lock(exp->exp_obd->u.cli.cl_rpc_lock, NULL);
- fld_enter_request(&exp->exp_obd->u.cli);
- rc = ptlrpc_queue_wait(req);
- fld_exit_request(&exp->exp_obd->u.cli);
- if (fld_op != FLD_LOOKUP)
- mdc_put_rpc_lock(exp->exp_obd->u.cli.cl_rpc_lock, NULL);
- if (rc)
- goto out_req;
-
- prange = req_capsule_server_get(&req->rq_pill, &RMF_FLD_MDFLD);
- if (prange == NULL) {
- rc = -EFAULT;
- goto out_req;
- }
- *range = *prange;
-out_req:
- ptlrpc_req_finished(req);
- return rc;
-}
-
-int fld_client_lookup(struct lu_client_fld *fld, u64 seq, u32 *mds,
- __u32 flags, const struct lu_env *env)
-{
- struct lu_seq_range res = { 0 };
- struct lu_fld_target *target;
- int rc;
-
- fld->lcf_flags |= LUSTRE_FLD_RUN;
-
- rc = fld_cache_lookup(fld->lcf_cache, seq, &res);
- if (rc == 0) {
- *mds = res.lsr_index;
- return 0;
- }
-
- /* Can not find it in the cache */
- target = fld_client_get_target(fld, seq);
- LASSERT(target != NULL);
-
- CDEBUG(D_INFO, "%s: Lookup fld entry (seq: %#llx) on target %s (idx %llu)\n",
- fld->lcf_name, seq, fld_target_name(target), target->ft_idx);
-
- res.lsr_start = seq;
- fld_range_set_type(&res, flags);
- rc = fld_client_rpc(target->ft_exp, &res, FLD_LOOKUP);
-
- if (rc == 0) {
- *mds = res.lsr_index;
-
- fld_cache_insert(fld->lcf_cache, &res);
- }
- return rc;
-}
-EXPORT_SYMBOL(fld_client_lookup);
-
-void fld_client_flush(struct lu_client_fld *fld)
-{
- fld_cache_flush(fld->lcf_cache);
-}
-EXPORT_SYMBOL(fld_client_flush);
-
-static int __init fld_mod_init(void)
-{
- fld_debugfs_dir = ldebugfs_register(LUSTRE_FLD_NAME,
- debugfs_lustre_root,
- NULL, NULL);
- return PTR_ERR_OR_ZERO(fld_debugfs_dir);
-}
-
-static void __exit fld_mod_exit(void)
-{
- if (!IS_ERR_OR_NULL(fld_debugfs_dir))
- ldebugfs_remove(&fld_debugfs_dir);
-}
-
-MODULE_AUTHOR("Sun Microsystems, Inc. <http://www.lustre.org/>");
-MODULE_DESCRIPTION("Lustre FLD");
-MODULE_LICENSE("GPL");
-
-module_init(fld_mod_init)
-module_exit(fld_mod_exit)
diff --git a/drivers/staging/lustre/lustre/fld/lproc_fld.c b/drivers/staging/lustre/lustre/fld/lproc_fld.c
deleted file mode 100644
index 603f56e6095b..000000000000
--- a/drivers/staging/lustre/lustre/fld/lproc_fld.c
+++ /dev/null
@@ -1,166 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2012, 2013, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * lustre/fld/lproc_fld.c
- *
- * FLD (FIDs Location Database)
- *
- * Author: Yury Umanets <umka@clusterfs.com>
- * Di Wang <di.wang@whamcloud.com>
- */
-
-#define DEBUG_SUBSYSTEM S_FLD
-
-#include "../../include/linux/libcfs/libcfs.h"
-#include <linux/module.h>
-
-#include "../include/obd.h"
-#include "../include/obd_class.h"
-#include "../include/obd_support.h"
-#include "../include/lustre_req_layout.h"
-#include "../include/lustre_fld.h"
-#include "../include/lustre_fid.h"
-#include "fld_internal.h"
-
-static int
-fld_debugfs_targets_seq_show(struct seq_file *m, void *unused)
-{
- struct lu_client_fld *fld = (struct lu_client_fld *)m->private;
- struct lu_fld_target *target;
-
- LASSERT(fld != NULL);
-
- spin_lock(&fld->lcf_lock);
- list_for_each_entry(target,
- &fld->lcf_targets, ft_chain)
- seq_printf(m, "%s\n", fld_target_name(target));
- spin_unlock(&fld->lcf_lock);
-
- return 0;
-}
-
-static int
-fld_debugfs_hash_seq_show(struct seq_file *m, void *unused)
-{
- struct lu_client_fld *fld = (struct lu_client_fld *)m->private;
-
- LASSERT(fld != NULL);
-
- spin_lock(&fld->lcf_lock);
- seq_printf(m, "%s\n", fld->lcf_hash->fh_name);
- spin_unlock(&fld->lcf_lock);
-
- return 0;
-}
-
-static ssize_t
-fld_debugfs_hash_seq_write(struct file *file,
- const char __user *buffer,
- size_t count, loff_t *off)
-{
- struct lu_client_fld *fld;
- struct lu_fld_hash *hash = NULL;
- char fh_name[8];
- int i;
-
- if (count > sizeof(fh_name))
- return -ENAMETOOLONG;
-
- if (copy_from_user(fh_name, buffer, count) != 0)
- return -EFAULT;
-
- fld = ((struct seq_file *)file->private_data)->private;
- LASSERT(fld != NULL);
-
- for (i = 0; fld_hash[i].fh_name != NULL; i++) {
- if (count != strlen(fld_hash[i].fh_name))
- continue;
-
- if (!strncmp(fld_hash[i].fh_name, fh_name, count)) {
- hash = &fld_hash[i];
- break;
- }
- }
-
- if (hash != NULL) {
- spin_lock(&fld->lcf_lock);
- fld->lcf_hash = hash;
- spin_unlock(&fld->lcf_lock);
-
- CDEBUG(D_INFO, "%s: Changed hash to \"%s\"\n",
- fld->lcf_name, hash->fh_name);
- }
-
- return count;
-}
-
-static ssize_t
-fld_debugfs_cache_flush_write(struct file *file, const char __user *buffer,
- size_t count, loff_t *pos)
-{
- struct lu_client_fld *fld = file->private_data;
-
- LASSERT(fld != NULL);
-
- fld_cache_flush(fld->lcf_cache);
-
- CDEBUG(D_INFO, "%s: Lookup cache is flushed\n", fld->lcf_name);
-
- return count;
-}
-
-static int
-fld_debugfs_cache_flush_release(struct inode *inode, struct file *file)
-{
- file->private_data = NULL;
- return 0;
-}
-
-static struct file_operations fld_debugfs_cache_flush_fops = {
- .owner = THIS_MODULE,
- .open = simple_open,
- .write = fld_debugfs_cache_flush_write,
- .release = fld_debugfs_cache_flush_release,
-};
-
-LPROC_SEQ_FOPS_RO(fld_debugfs_targets);
-LPROC_SEQ_FOPS(fld_debugfs_hash);
-
-struct lprocfs_vars fld_client_debugfs_list[] = {
- { "targets", &fld_debugfs_targets_fops },
- { "hash", &fld_debugfs_hash_fops },
- { "cache_flush", &fld_debugfs_cache_flush_fops },
- { NULL }
-};
diff --git a/drivers/staging/lustre/lustre/include/cl_object.h b/drivers/staging/lustre/lustre/include/cl_object.h
deleted file mode 100644
index b0733134ee48..000000000000
--- a/drivers/staging/lustre/lustre/include/cl_object.h
+++ /dev/null
@@ -1,3257 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- */
-#ifndef _LUSTRE_CL_OBJECT_H
-#define _LUSTRE_CL_OBJECT_H
-
-/** \defgroup clio clio
- *
- * Client objects implement io operations and cache pages.
- *
- * Examples: lov and osc are implementations of cl interface.
- *
- * Big Theory Statement.
- *
- * Layered objects.
- *
- * Client implementation is based on the following data-types:
- *
- * - cl_object
- *
- * - cl_page
- *
- * - cl_lock represents an extent lock on an object.
- *
- * - cl_io represents high-level i/o activity such as whole read/write
- * system call, or write-out of pages from under the lock being
- * canceled. cl_io has sub-ios that can be stopped and resumed
- * independently, thus achieving high degree of transfer
- * parallelism. Single cl_io can be advanced forward by
- * the multiple threads (although in the most usual case of
- * read/write system call it is associated with the single user
- * thread, that issued the system call).
- *
- * - cl_req represents a collection of pages for a transfer. cl_req is
- * constructed by req-forming engine that tries to saturate
- * transport with large and continuous transfers.
- *
- * Terminology
- *
- * - to avoid confusion high-level I/O operation like read or write system
- * call is referred to as "an io", whereas low-level I/O operation, like
- * RPC, is referred to as "a transfer"
- *
- * - "generic code" means generic (not file system specific) code in the
- * hosting environment. "cl-code" means code (mostly in cl_*.c files) that
- * is not layer specific.
- *
- * Locking.
- *
- * - i_mutex
- * - PG_locked
- * - cl_object_header::coh_page_guard
- * - cl_object_header::coh_lock_guard
- * - lu_site::ls_guard
- *
- * See the top comment in cl_object.c for the description of overall locking and
- * reference-counting design.
- *
- * See comments below for the description of i/o, page, and dlm-locking
- * design.
- *
- * @{
- */
-
-/*
- * super-class definitions.
- */
-#include "lu_object.h"
-#include "linux/lustre_compat25.h"
-#include <linux/mutex.h>
-#include <linux/radix-tree.h>
-
-struct inode;
-
-struct cl_device;
-struct cl_device_operations;
-
-struct cl_object;
-struct cl_object_page_operations;
-struct cl_object_lock_operations;
-
-struct cl_page;
-struct cl_page_slice;
-struct cl_lock;
-struct cl_lock_slice;
-
-struct cl_lock_operations;
-struct cl_page_operations;
-
-struct cl_io;
-struct cl_io_slice;
-
-struct cl_req;
-struct cl_req_slice;
-
-/**
- * Operations for each data device in the client stack.
- *
- * \see vvp_cl_ops, lov_cl_ops, lovsub_cl_ops, osc_cl_ops
- */
-struct cl_device_operations {
- /**
- * Initialize cl_req. This method is called top-to-bottom on all
- * devices in the stack to get them a chance to allocate layer-private
- * data, and to attach them to the cl_req by calling
- * cl_req_slice_add().
- *
- * \see osc_req_init(), lov_req_init(), lovsub_req_init()
- * \see ccc_req_init()
- */
- int (*cdo_req_init)(const struct lu_env *env, struct cl_device *dev,
- struct cl_req *req);
-};
-
-/**
- * Device in the client stack.
- *
- * \see ccc_device, lov_device, lovsub_device, osc_device
- */
-struct cl_device {
- /** Super-class. */
- struct lu_device cd_lu_dev;
- /** Per-layer operation vector. */
- const struct cl_device_operations *cd_ops;
-};
-
-/** \addtogroup cl_object cl_object
- * @{ */
-/**
- * "Data attributes" of cl_object. Data attributes can be updated
- * independently for a sub-object, and top-object's attributes are calculated
- * from sub-objects' ones.
- */
-struct cl_attr {
- /** Object size, in bytes */
- loff_t cat_size;
- /**
- * Known minimal size, in bytes.
- *
- * This is only valid when at least one DLM lock is held.
- */
- loff_t cat_kms;
- /** Modification time. Measured in seconds since epoch. */
- time64_t cat_mtime;
- /** Access time. Measured in seconds since epoch. */
- time64_t cat_atime;
- /** Change time. Measured in seconds since epoch. */
- time64_t cat_ctime;
- /**
- * Blocks allocated to this cl_object on the server file system.
- *
- * \todo XXX An interface for block size is needed.
- */
- __u64 cat_blocks;
- /**
- * User identifier for quota purposes.
- */
- uid_t cat_uid;
- /**
- * Group identifier for quota purposes.
- */
- gid_t cat_gid;
-};
-
-/**
- * Fields in cl_attr that are being set.
- */
-enum cl_attr_valid {
- CAT_SIZE = 1 << 0,
- CAT_KMS = 1 << 1,
- CAT_MTIME = 1 << 3,
- CAT_ATIME = 1 << 4,
- CAT_CTIME = 1 << 5,
- CAT_BLOCKS = 1 << 6,
- CAT_UID = 1 << 7,
- CAT_GID = 1 << 8
-};
-
-/**
- * Sub-class of lu_object with methods common for objects on the client
- * stacks.
- *
- * cl_object: represents a regular file system object, both a file and a
- * stripe. cl_object is based on lu_object: it is identified by a fid,
- * layered, cached, hashed, and lrued. Important distinction with the server
- * side, where md_object and dt_object are used, is that cl_object "fans out"
- * at the lov/sns level: depending on the file layout, single file is
- * represented as a set of "sub-objects" (stripes). At the implementation
- * level, struct lov_object contains an array of cl_objects. Each sub-object
- * is a full-fledged cl_object, having its fid, living in the lru and hash
- * table.
- *
- * This leads to the next important difference with the server side: on the
- * client, it's quite usual to have objects with the different sequence of
- * layers. For example, typical top-object is composed of the following
- * layers:
- *
- * - vvp
- * - lov
- *
- * whereas its sub-objects are composed of
- *
- * - lovsub
- * - osc
- *
- * layers. Here "lovsub" is a mostly dummy layer, whose purpose is to keep
- * track of the object-subobject relationship.
- *
- * Sub-objects are not cached independently: when top-object is about to
- * be discarded from the memory, all its sub-objects are torn-down and
- * destroyed too.
- *
- * \see ccc_object, lov_object, lovsub_object, osc_object
- */
-struct cl_object {
- /** super class */
- struct lu_object co_lu;
- /** per-object-layer operations */
- const struct cl_object_operations *co_ops;
- /** offset of page slice in cl_page buffer */
- int co_slice_off;
-};
-
-/**
- * Description of the client object configuration. This is used for the
- * creation of a new client object that is identified by a more state than
- * fid.
- */
-struct cl_object_conf {
- /** Super-class. */
- struct lu_object_conf coc_lu;
- union {
- /**
- * Object layout. This is consumed by lov.
- */
- struct lustre_md *coc_md;
- /**
- * Description of particular stripe location in the
- * cluster. This is consumed by osc.
- */
- struct lov_oinfo *coc_oinfo;
- } u;
- /**
- * VFS inode. This is consumed by vvp.
- */
- struct inode *coc_inode;
- /**
- * Layout lock handle.
- */
- struct ldlm_lock *coc_lock;
- /**
- * Operation to handle layout, OBJECT_CONF_XYZ.
- */
- int coc_opc;
-};
-
-enum {
- /** configure layout, set up a new stripe, must be called while
- * holding layout lock. */
- OBJECT_CONF_SET = 0,
- /** invalidate the current stripe configuration due to losing
- * layout lock. */
- OBJECT_CONF_INVALIDATE = 1,
- /** wait for old layout to go away so that new layout can be
- * set up. */
- OBJECT_CONF_WAIT = 2
-};
-
-/**
- * Operations implemented for each cl object layer.
- *
- * \see vvp_ops, lov_ops, lovsub_ops, osc_ops
- */
-struct cl_object_operations {
- /**
- * Initialize page slice for this layer. Called top-to-bottom through
- * every object layer when a new cl_page is instantiated. Layer
- * keeping private per-page data, or requiring its own page operations
- * vector should allocate these data here, and attach then to the page
- * by calling cl_page_slice_add(). \a vmpage is locked (in the VM
- * sense). Optional.
- *
- * \retval NULL success.
- *
- * \retval ERR_PTR(errno) failure code.
- *
- * \retval valid-pointer pointer to already existing referenced page
- * to be used instead of newly created.
- */
- int (*coo_page_init)(const struct lu_env *env, struct cl_object *obj,
- struct cl_page *page, struct page *vmpage);
- /**
- * Initialize lock slice for this layer. Called top-to-bottom through
- * every object layer when a new cl_lock is instantiated. Layer
- * keeping private per-lock data, or requiring its own lock operations
- * vector should allocate these data here, and attach then to the lock
- * by calling cl_lock_slice_add(). Mandatory.
- */
- int (*coo_lock_init)(const struct lu_env *env,
- struct cl_object *obj, struct cl_lock *lock,
- const struct cl_io *io);
- /**
- * Initialize io state for a given layer.
- *
- * called top-to-bottom once per io existence to initialize io
- * state. If layer wants to keep some state for this type of io, it
- * has to embed struct cl_io_slice in lu_env::le_ses, and register
- * slice with cl_io_slice_add(). It is guaranteed that all threads
- * participating in this io share the same session.
- */
- int (*coo_io_init)(const struct lu_env *env,
- struct cl_object *obj, struct cl_io *io);
- /**
- * Fill portion of \a attr that this layer controls. This method is
- * called top-to-bottom through all object layers.
- *
- * \pre cl_object_header::coh_attr_guard of the top-object is locked.
- *
- * \return 0: to continue
- * \return +ve: to stop iterating through layers (but 0 is returned
- * from enclosing cl_object_attr_get())
- * \return -ve: to signal error
- */
- int (*coo_attr_get)(const struct lu_env *env, struct cl_object *obj,
- struct cl_attr *attr);
- /**
- * Update attributes.
- *
- * \a valid is a bitmask composed from enum #cl_attr_valid, and
- * indicating what attributes are to be set.
- *
- * \pre cl_object_header::coh_attr_guard of the top-object is locked.
- *
- * \return the same convention as for
- * cl_object_operations::coo_attr_get() is used.
- */
- int (*coo_attr_set)(const struct lu_env *env, struct cl_object *obj,
- const struct cl_attr *attr, unsigned valid);
- /**
- * Update object configuration. Called top-to-bottom to modify object
- * configuration.
- *
- * XXX error conditions and handling.
- */
- int (*coo_conf_set)(const struct lu_env *env, struct cl_object *obj,
- const struct cl_object_conf *conf);
- /**
- * Glimpse ast. Executed when glimpse ast arrives for a lock on this
- * object. Layers are supposed to fill parts of \a lvb that will be
- * shipped to the glimpse originator as a glimpse result.
- *
- * \see ccc_object_glimpse(), lovsub_object_glimpse(),
- * \see osc_object_glimpse()
- */
- int (*coo_glimpse)(const struct lu_env *env,
- const struct cl_object *obj, struct ost_lvb *lvb);
-};
-
-/**
- * Extended header for client object.
- */
-struct cl_object_header {
- /** Standard lu_object_header. cl_object::co_lu::lo_header points
- * here. */
- struct lu_object_header coh_lu;
- /** \name locks
- * \todo XXX move locks below to the separate cache-lines, they are
- * mostly useless otherwise.
- */
- /** @{ */
- /** Lock protecting page tree. */
- spinlock_t coh_page_guard;
- /** Lock protecting lock list. */
- spinlock_t coh_lock_guard;
- /** @} locks */
- /** Radix tree of cl_page's, cached for this object. */
- struct radix_tree_root coh_tree;
- /** # of pages in radix tree. */
- unsigned long coh_pages;
- /** List of cl_lock's granted for this object. */
- struct list_head coh_locks;
-
- /**
- * Parent object. It is assumed that an object has a well-defined
- * parent, but not a well-defined child (there may be multiple
- * sub-objects, for the same top-object). cl_object_header::coh_parent
- * field allows certain code to be written generically, without
- * limiting possible cl_object layouts unduly.
- */
- struct cl_object_header *coh_parent;
- /**
- * Protects consistency between cl_attr of parent object and
- * attributes of sub-objects, that the former is calculated ("merged")
- * from.
- *
- * \todo XXX this can be read/write lock if needed.
- */
- spinlock_t coh_attr_guard;
- /**
- * Size of cl_page + page slices
- */
- unsigned short coh_page_bufsize;
- /**
- * Number of objects above this one: 0 for a top-object, 1 for its
- * sub-object, etc.
- */
- unsigned char coh_nesting;
-};
-
-/**
- * Helper macro: iterate over all layers of the object \a obj, assigning every
- * layer top-to-bottom to \a slice.
- */
-#define cl_object_for_each(slice, obj) \
- list_for_each_entry((slice), \
- &(obj)->co_lu.lo_header->loh_layers, \
- co_lu.lo_linkage)
-/**
- * Helper macro: iterate over all layers of the object \a obj, assigning every
- * layer bottom-to-top to \a slice.
- */
-#define cl_object_for_each_reverse(slice, obj) \
- list_for_each_entry_reverse((slice), \
- &(obj)->co_lu.lo_header->loh_layers, \
- co_lu.lo_linkage)
-/** @} cl_object */
-
-#ifndef pgoff_t
-#define pgoff_t unsigned long
-#endif
-
-#define CL_PAGE_EOF ((pgoff_t)~0ull)
-
-/** \addtogroup cl_page cl_page
- * @{ */
-
-/** \struct cl_page
- * Layered client page.
- *
- * cl_page: represents a portion of a file, cached in the memory. All pages
- * of the given file are of the same size, and are kept in the radix tree
- * hanging off the cl_object. cl_page doesn't fan out, but as sub-objects
- * of the top-level file object are first class cl_objects, they have their
- * own radix trees of pages and hence page is implemented as a sequence of
- * struct cl_pages's, linked into double-linked list through
- * cl_page::cp_parent and cl_page::cp_child pointers, each residing in the
- * corresponding radix tree at the corresponding logical offset.
- *
- * cl_page is associated with VM page of the hosting environment (struct
- * page in Linux kernel, for example), struct page. It is assumed, that this
- * association is implemented by one of cl_page layers (top layer in the
- * current design) that
- *
- * - intercepts per-VM-page call-backs made by the environment (e.g.,
- * memory pressure),
- *
- * - translates state (page flag bits) and locking between lustre and
- * environment.
- *
- * The association between cl_page and struct page is immutable and
- * established when cl_page is created.
- *
- * cl_page can be "owned" by a particular cl_io (see below), guaranteeing
- * this io an exclusive access to this page w.r.t. other io attempts and
- * various events changing page state (such as transfer completion, or
- * eviction of the page from the memory). Note, that in general cl_io
- * cannot be identified with a particular thread, and page ownership is not
- * exactly equal to the current thread holding a lock on the page. Layer
- * implementing association between cl_page and struct page has to implement
- * ownership on top of available synchronization mechanisms.
- *
- * While lustre client maintains the notion of an page ownership by io,
- * hosting MM/VM usually has its own page concurrency control
- * mechanisms. For example, in Linux, page access is synchronized by the
- * per-page PG_locked bit-lock, and generic kernel code (generic_file_*())
- * takes care to acquire and release such locks as necessary around the
- * calls to the file system methods (->readpage(), ->prepare_write(),
- * ->commit_write(), etc.). This leads to the situation when there are two
- * different ways to own a page in the client:
- *
- * - client code explicitly and voluntary owns the page (cl_page_own());
- *
- * - VM locks a page and then calls the client, that has "to assume"
- * the ownership from the VM (cl_page_assume()).
- *
- * Dual methods to release ownership are cl_page_disown() and
- * cl_page_unassume().
- *
- * cl_page is reference counted (cl_page::cp_ref). When reference counter
- * drops to 0, the page is returned to the cache, unless it is in
- * cl_page_state::CPS_FREEING state, in which case it is immediately
- * destroyed.
- *
- * The general logic guaranteeing the absence of "existential races" for
- * pages is the following:
- *
- * - there are fixed known ways for a thread to obtain a new reference
- * to a page:
- *
- * - by doing a lookup in the cl_object radix tree, protected by the
- * spin-lock;
- *
- * - by starting from VM-locked struct page and following some
- * hosting environment method (e.g., following ->private pointer in
- * the case of Linux kernel), see cl_vmpage_page();
- *
- * - when the page enters cl_page_state::CPS_FREEING state, all these
- * ways are severed with the proper synchronization
- * (cl_page_delete());
- *
- * - entry into cl_page_state::CPS_FREEING is serialized by the VM page
- * lock;
- *
- * - no new references to the page in cl_page_state::CPS_FREEING state
- * are allowed (checked in cl_page_get()).
- *
- * Together this guarantees that when last reference to a
- * cl_page_state::CPS_FREEING page is released, it is safe to destroy the
- * page, as neither references to it can be acquired at that point, nor
- * ones exist.
- *
- * cl_page is a state machine. States are enumerated in enum
- * cl_page_state. Possible state transitions are enumerated in
- * cl_page_state_set(). State transition process (i.e., actual changing of
- * cl_page::cp_state field) is protected by the lock on the underlying VM
- * page.
- *
- * Linux Kernel implementation.
- *
- * Binding between cl_page and struct page (which is a typedef for
- * struct page) is implemented in the vvp layer. cl_page is attached to the
- * ->private pointer of the struct page, together with the setting of
- * PG_private bit in page->flags, and acquiring additional reference on the
- * struct page (much like struct buffer_head, or any similar file system
- * private data structures).
- *
- * PG_locked lock is used to implement both ownership and transfer
- * synchronization, that is, page is VM-locked in CPS_{OWNED,PAGE{IN,OUT}}
- * states. No additional references are acquired for the duration of the
- * transfer.
- *
- * \warning *THIS IS NOT* the behavior expected by the Linux kernel, where
- * write-out is "protected" by the special PG_writeback bit.
- */
-
-/**
- * States of cl_page. cl_page.c assumes particular order here.
- *
- * The page state machine is rather crude, as it doesn't recognize finer page
- * states like "dirty" or "up to date". This is because such states are not
- * always well defined for the whole stack (see, for example, the
- * implementation of the read-ahead, that hides page up-to-dateness to track
- * cache hits accurately). Such sub-states are maintained by the layers that
- * are interested in them.
- */
-enum cl_page_state {
- /**
- * Page is in the cache, un-owned. Page leaves cached state in the
- * following cases:
- *
- * - [cl_page_state::CPS_OWNED] io comes across the page and
- * owns it;
- *
- * - [cl_page_state::CPS_PAGEOUT] page is dirty, the
- * req-formation engine decides that it wants to include this page
- * into an cl_req being constructed, and yanks it from the cache;
- *
- * - [cl_page_state::CPS_FREEING] VM callback is executed to
- * evict the page form the memory;
- *
- * \invariant cl_page::cp_owner == NULL && cl_page::cp_req == NULL
- */
- CPS_CACHED,
- /**
- * Page is exclusively owned by some cl_io. Page may end up in this
- * state as a result of
- *
- * - io creating new page and immediately owning it;
- *
- * - [cl_page_state::CPS_CACHED] io finding existing cached page
- * and owning it;
- *
- * - [cl_page_state::CPS_OWNED] io finding existing owned page
- * and waiting for owner to release the page;
- *
- * Page leaves owned state in the following cases:
- *
- * - [cl_page_state::CPS_CACHED] io decides to leave the page in
- * the cache, doing nothing;
- *
- * - [cl_page_state::CPS_PAGEIN] io starts read transfer for
- * this page;
- *
- * - [cl_page_state::CPS_PAGEOUT] io starts immediate write
- * transfer for this page;
- *
- * - [cl_page_state::CPS_FREEING] io decides to destroy this
- * page (e.g., as part of truncate or extent lock cancellation).
- *
- * \invariant cl_page::cp_owner != NULL && cl_page::cp_req == NULL
- */
- CPS_OWNED,
- /**
- * Page is being written out, as a part of a transfer. This state is
- * entered when req-formation logic decided that it wants this page to
- * be sent through the wire _now_. Specifically, it means that once
- * this state is achieved, transfer completion handler (with either
- * success or failure indication) is guaranteed to be executed against
- * this page independently of any locks and any scheduling decisions
- * made by the hosting environment (that effectively means that the
- * page is never put into cl_page_state::CPS_PAGEOUT state "in
- * advance". This property is mentioned, because it is important when
- * reasoning about possible dead-locks in the system). The page can
- * enter this state as a result of
- *
- * - [cl_page_state::CPS_OWNED] an io requesting an immediate
- * write-out of this page, or
- *
- * - [cl_page_state::CPS_CACHED] req-forming engine deciding
- * that it has enough dirty pages cached to issue a "good"
- * transfer.
- *
- * The page leaves cl_page_state::CPS_PAGEOUT state when the transfer
- * is completed---it is moved into cl_page_state::CPS_CACHED state.
- *
- * Underlying VM page is locked for the duration of transfer.
- *
- * \invariant: cl_page::cp_owner == NULL && cl_page::cp_req != NULL
- */
- CPS_PAGEOUT,
- /**
- * Page is being read in, as a part of a transfer. This is quite
- * similar to the cl_page_state::CPS_PAGEOUT state, except that
- * read-in is always "immediate"---there is no such thing a sudden
- * construction of read cl_req from cached, presumably not up to date,
- * pages.
- *
- * Underlying VM page is locked for the duration of transfer.
- *
- * \invariant: cl_page::cp_owner == NULL && cl_page::cp_req != NULL
- */
- CPS_PAGEIN,
- /**
- * Page is being destroyed. This state is entered when client decides
- * that page has to be deleted from its host object, as, e.g., a part
- * of truncate.
- *
- * Once this state is reached, there is no way to escape it.
- *
- * \invariant: cl_page::cp_owner == NULL && cl_page::cp_req == NULL
- */
- CPS_FREEING,
- CPS_NR
-};
-
-enum cl_page_type {
- /** Host page, the page is from the host inode which the cl_page
- * belongs to. */
- CPT_CACHEABLE = 1,
-
- /** Transient page, the transient cl_page is used to bind a cl_page
- * to vmpage which is not belonging to the same object of cl_page.
- * it is used in DirectIO, lockless IO and liblustre. */
- CPT_TRANSIENT,
-};
-
-/**
- * Flags maintained for every cl_page.
- */
-enum cl_page_flags {
- /**
- * Set when pagein completes. Used for debugging (read completes at
- * most once for a page).
- */
- CPF_READ_COMPLETED = 1 << 0
-};
-
-/**
- * Fields are protected by the lock on struct page, except for atomics and
- * immutables.
- *
- * \invariant Data type invariants are in cl_page_invariant(). Basically:
- * cl_page::cp_parent and cl_page::cp_child are a well-formed double-linked
- * list, consistent with the parent/child pointers in the cl_page::cp_obj and
- * cl_page::cp_owner (when set).
- */
-struct cl_page {
- /** Reference counter. */
- atomic_t cp_ref;
- /** An object this page is a part of. Immutable after creation. */
- struct cl_object *cp_obj;
- /** Logical page index within the object. Immutable after creation. */
- pgoff_t cp_index;
- /** List of slices. Immutable after creation. */
- struct list_head cp_layers;
- /** Parent page, NULL for top-level page. Immutable after creation. */
- struct cl_page *cp_parent;
- /** Lower-layer page. NULL for bottommost page. Immutable after
- * creation. */
- struct cl_page *cp_child;
- /**
- * Page state. This field is const to avoid accidental update, it is
- * modified only internally within cl_page.c. Protected by a VM lock.
- */
- const enum cl_page_state cp_state;
- /** Linkage of pages within group. Protected by cl_page::cp_mutex. */
- struct list_head cp_batch;
- /** Mutex serializing membership of a page in a batch. */
- struct mutex cp_mutex;
- /** Linkage of pages within cl_req. */
- struct list_head cp_flight;
- /** Transfer error. */
- int cp_error;
-
- /**
- * Page type. Only CPT_TRANSIENT is used so far. Immutable after
- * creation.
- */
- enum cl_page_type cp_type;
-
- /**
- * Owning IO in cl_page_state::CPS_OWNED state. Sub-page can be owned
- * by sub-io. Protected by a VM lock.
- */
- struct cl_io *cp_owner;
- /**
- * Debug information, the task is owning the page.
- */
- struct task_struct *cp_task;
- /**
- * Owning IO request in cl_page_state::CPS_PAGEOUT and
- * cl_page_state::CPS_PAGEIN states. This field is maintained only in
- * the top-level pages. Protected by a VM lock.
- */
- struct cl_req *cp_req;
- /** List of references to this page, for debugging. */
- struct lu_ref cp_reference;
- /** Link to an object, for debugging. */
- struct lu_ref_link cp_obj_ref;
- /** Link to a queue, for debugging. */
- struct lu_ref_link cp_queue_ref;
- /** Per-page flags from enum cl_page_flags. Protected by a VM lock. */
- unsigned cp_flags;
- /** Assigned if doing a sync_io */
- struct cl_sync_io *cp_sync_io;
-};
-
-/**
- * Per-layer part of cl_page.
- *
- * \see ccc_page, lov_page, osc_page
- */
-struct cl_page_slice {
- struct cl_page *cpl_page;
- /**
- * Object slice corresponding to this page slice. Immutable after
- * creation.
- */
- struct cl_object *cpl_obj;
- const struct cl_page_operations *cpl_ops;
- /** Linkage into cl_page::cp_layers. Immutable after creation. */
- struct list_head cpl_linkage;
-};
-
-/**
- * Lock mode. For the client extent locks.
- *
- * \warning: cl_lock_mode_match() assumes particular ordering here.
- * \ingroup cl_lock
- */
-enum cl_lock_mode {
- /**
- * Mode of a lock that protects no data, and exists only as a
- * placeholder. This is used for `glimpse' requests. A phantom lock
- * might get promoted to real lock at some point.
- */
- CLM_PHANTOM,
- CLM_READ,
- CLM_WRITE,
- CLM_GROUP
-};
-
-/**
- * Requested transfer type.
- * \ingroup cl_req
- */
-enum cl_req_type {
- CRT_READ,
- CRT_WRITE,
- CRT_NR
-};
-
-/**
- * Per-layer page operations.
- *
- * Methods taking an \a io argument are for the activity happening in the
- * context of given \a io. Page is assumed to be owned by that io, except for
- * the obvious cases (like cl_page_operations::cpo_own()).
- *
- * \see vvp_page_ops, lov_page_ops, osc_page_ops
- */
-struct cl_page_operations {
- /**
- * cl_page<->struct page methods. Only one layer in the stack has to
- * implement these. Current code assumes that this functionality is
- * provided by the topmost layer, see cl_page_disown0() as an example.
- */
-
- /**
- * \return the underlying VM page. Optional.
- */
- struct page *(*cpo_vmpage)(const struct lu_env *env,
- const struct cl_page_slice *slice);
- /**
- * Called when \a io acquires this page into the exclusive
- * ownership. When this method returns, it is guaranteed that the is
- * not owned by other io, and no transfer is going on against
- * it. Optional.
- *
- * \see cl_page_own()
- * \see vvp_page_own(), lov_page_own()
- */
- int (*cpo_own)(const struct lu_env *env,
- const struct cl_page_slice *slice,
- struct cl_io *io, int nonblock);
- /** Called when ownership it yielded. Optional.
- *
- * \see cl_page_disown()
- * \see vvp_page_disown()
- */
- void (*cpo_disown)(const struct lu_env *env,
- const struct cl_page_slice *slice, struct cl_io *io);
- /**
- * Called for a page that is already "owned" by \a io from VM point of
- * view. Optional.
- *
- * \see cl_page_assume()
- * \see vvp_page_assume(), lov_page_assume()
- */
- void (*cpo_assume)(const struct lu_env *env,
- const struct cl_page_slice *slice, struct cl_io *io);
- /** Dual to cl_page_operations::cpo_assume(). Optional. Called
- * bottom-to-top when IO releases a page without actually unlocking
- * it.
- *
- * \see cl_page_unassume()
- * \see vvp_page_unassume()
- */
- void (*cpo_unassume)(const struct lu_env *env,
- const struct cl_page_slice *slice,
- struct cl_io *io);
- /**
- * Announces whether the page contains valid data or not by \a uptodate.
- *
- * \see cl_page_export()
- * \see vvp_page_export()
- */
- void (*cpo_export)(const struct lu_env *env,
- const struct cl_page_slice *slice, int uptodate);
- /**
- * Unmaps page from the user space (if it is mapped).
- *
- * \see cl_page_unmap()
- * \see vvp_page_unmap()
- */
- int (*cpo_unmap)(const struct lu_env *env,
- const struct cl_page_slice *slice, struct cl_io *io);
- /**
- * Checks whether underlying VM page is locked (in the suitable
- * sense). Used for assertions.
- *
- * \retval -EBUSY: page is protected by a lock of a given mode;
- * \retval -ENODATA: page is not protected by a lock;
- * \retval 0: this layer cannot decide. (Should never happen.)
- */
- int (*cpo_is_vmlocked)(const struct lu_env *env,
- const struct cl_page_slice *slice);
- /**
- * Page destruction.
- */
-
- /**
- * Called when page is truncated from the object. Optional.
- *
- * \see cl_page_discard()
- * \see vvp_page_discard(), osc_page_discard()
- */
- void (*cpo_discard)(const struct lu_env *env,
- const struct cl_page_slice *slice,
- struct cl_io *io);
- /**
- * Called when page is removed from the cache, and is about to being
- * destroyed. Optional.
- *
- * \see cl_page_delete()
- * \see vvp_page_delete(), osc_page_delete()
- */
- void (*cpo_delete)(const struct lu_env *env,
- const struct cl_page_slice *slice);
- /** Destructor. Frees resources and slice itself. */
- void (*cpo_fini)(const struct lu_env *env,
- struct cl_page_slice *slice);
-
- /**
- * Checks whether the page is protected by a cl_lock. This is a
- * per-layer method, because certain layers have ways to check for the
- * lock much more efficiently than through the generic locks scan, or
- * implement locking mechanisms separate from cl_lock, e.g.,
- * LL_FILE_GROUP_LOCKED in vvp. If \a pending is true, check for locks
- * being canceled, or scheduled for cancellation as soon as the last
- * user goes away, too.
- *
- * \retval -EBUSY: page is protected by a lock of a given mode;
- * \retval -ENODATA: page is not protected by a lock;
- * \retval 0: this layer cannot decide.
- *
- * \see cl_page_is_under_lock()
- */
- int (*cpo_is_under_lock)(const struct lu_env *env,
- const struct cl_page_slice *slice,
- struct cl_io *io);
-
- /**
- * Optional debugging helper. Prints given page slice.
- *
- * \see cl_page_print()
- */
- int (*cpo_print)(const struct lu_env *env,
- const struct cl_page_slice *slice,
- void *cookie, lu_printer_t p);
- /**
- * \name transfer
- *
- * Transfer methods. See comment on cl_req for a description of
- * transfer formation and life-cycle.
- *
- * @{
- */
- /**
- * Request type dependent vector of operations.
- *
- * Transfer operations depend on transfer mode (cl_req_type). To avoid
- * passing transfer mode to each and every of these methods, and to
- * avoid branching on request type inside of the methods, separate
- * methods for cl_req_type:CRT_READ and cl_req_type:CRT_WRITE are
- * provided. That is, method invocation usually looks like
- *
- * slice->cp_ops.io[req->crq_type].cpo_method(env, slice, ...);
- */
- struct {
- /**
- * Called when a page is submitted for a transfer as a part of
- * cl_page_list.
- *
- * \return 0 : page is eligible for submission;
- * \return -EALREADY : skip this page;
- * \return -ve : error.
- *
- * \see cl_page_prep()
- */
- int (*cpo_prep)(const struct lu_env *env,
- const struct cl_page_slice *slice,
- struct cl_io *io);
- /**
- * Completion handler. This is guaranteed to be eventually
- * fired after cl_page_operations::cpo_prep() or
- * cl_page_operations::cpo_make_ready() call.
- *
- * This method can be called in a non-blocking context. It is
- * guaranteed however, that the page involved and its object
- * are pinned in memory (and, hence, calling cl_page_put() is
- * safe).
- *
- * \see cl_page_completion()
- */
- void (*cpo_completion)(const struct lu_env *env,
- const struct cl_page_slice *slice,
- int ioret);
- /**
- * Called when cached page is about to be added to the
- * cl_req as a part of req formation.
- *
- * \return 0 : proceed with this page;
- * \return -EAGAIN : skip this page;
- * \return -ve : error.
- *
- * \see cl_page_make_ready()
- */
- int (*cpo_make_ready)(const struct lu_env *env,
- const struct cl_page_slice *slice);
- /**
- * Announce that this page is to be written out
- * opportunistically, that is, page is dirty, it is not
- * necessary to start write-out transfer right now, but
- * eventually page has to be written out.
- *
- * Main caller of this is the write path (see
- * vvp_io_commit_write()), using this method to build a
- * "transfer cache" from which large transfers are then
- * constructed by the req-formation engine.
- *
- * \todo XXX it would make sense to add page-age tracking
- * semantics here, and to oblige the req-formation engine to
- * send the page out not later than it is too old.
- *
- * \see cl_page_cache_add()
- */
- int (*cpo_cache_add)(const struct lu_env *env,
- const struct cl_page_slice *slice,
- struct cl_io *io);
- } io[CRT_NR];
- /**
- * Tell transfer engine that only [to, from] part of a page should be
- * transmitted.
- *
- * This is used for immediate transfers.
- *
- * \todo XXX this is not very good interface. It would be much better
- * if all transfer parameters were supplied as arguments to
- * cl_io_operations::cio_submit() call, but it is not clear how to do
- * this for page queues.
- *
- * \see cl_page_clip()
- */
- void (*cpo_clip)(const struct lu_env *env,
- const struct cl_page_slice *slice,
- int from, int to);
- /**
- * \pre the page was queued for transferring.
- * \post page is removed from client's pending list, or -EBUSY
- * is returned if it has already been in transferring.
- *
- * This is one of seldom page operation which is:
- * 0. called from top level;
- * 1. don't have vmpage locked;
- * 2. every layer should synchronize execution of its ->cpo_cancel()
- * with completion handlers. Osc uses client obd lock for this
- * purpose. Based on there is no vvp_page_cancel and
- * lov_page_cancel(), cpo_cancel is defacto protected by client lock.
- *
- * \see osc_page_cancel().
- */
- int (*cpo_cancel)(const struct lu_env *env,
- const struct cl_page_slice *slice);
- /**
- * Write out a page by kernel. This is only called by ll_writepage
- * right now.
- *
- * \see cl_page_flush()
- */
- int (*cpo_flush)(const struct lu_env *env,
- const struct cl_page_slice *slice,
- struct cl_io *io);
- /** @} transfer */
-};
-
-/**
- * Helper macro, dumping detailed information about \a page into a log.
- */
-#define CL_PAGE_DEBUG(mask, env, page, format, ...) \
-do { \
- LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, mask, NULL); \
- \
- if (cfs_cdebug_show(mask, DEBUG_SUBSYSTEM)) { \
- cl_page_print(env, &msgdata, lu_cdebug_printer, page); \
- CDEBUG(mask, format, ## __VA_ARGS__); \
- } \
-} while (0)
-
-/**
- * Helper macro, dumping shorter information about \a page into a log.
- */
-#define CL_PAGE_HEADER(mask, env, page, format, ...) \
-do { \
- LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, mask, NULL); \
- \
- if (cfs_cdebug_show(mask, DEBUG_SUBSYSTEM)) { \
- cl_page_header_print(env, &msgdata, lu_cdebug_printer, page); \
- CDEBUG(mask, format, ## __VA_ARGS__); \
- } \
-} while (0)
-
-static inline int __page_in_use(const struct cl_page *page, int refc)
-{
- if (page->cp_type == CPT_CACHEABLE)
- ++refc;
- LASSERT(atomic_read(&page->cp_ref) > 0);
- return (atomic_read(&page->cp_ref) > refc);
-}
-#define cl_page_in_use(pg) __page_in_use(pg, 1)
-#define cl_page_in_use_noref(pg) __page_in_use(pg, 0)
-
-/** @} cl_page */
-
-/** \addtogroup cl_lock cl_lock
- * @{ */
-/** \struct cl_lock
- *
- * Extent locking on the client.
- *
- * LAYERING
- *
- * The locking model of the new client code is built around
- *
- * struct cl_lock
- *
- * data-type representing an extent lock on a regular file. cl_lock is a
- * layered object (much like cl_object and cl_page), it consists of a header
- * (struct cl_lock) and a list of layers (struct cl_lock_slice), linked to
- * cl_lock::cll_layers list through cl_lock_slice::cls_linkage.
- *
- * All locks for a given object are linked into cl_object_header::coh_locks
- * list (protected by cl_object_header::coh_lock_guard spin-lock) through
- * cl_lock::cll_linkage. Currently this list is not sorted in any way. We can
- * sort it in starting lock offset, or use altogether different data structure
- * like a tree.
- *
- * Typical cl_lock consists of the two layers:
- *
- * - vvp_lock (vvp specific data), and
- * - lov_lock (lov specific data).
- *
- * lov_lock contains an array of sub-locks. Each of these sub-locks is a
- * normal cl_lock: it has a header (struct cl_lock) and a list of layers:
- *
- * - lovsub_lock, and
- * - osc_lock
- *
- * Each sub-lock is associated with a cl_object (representing stripe
- * sub-object or the file to which top-level cl_lock is associated to), and is
- * linked into that cl_object::coh_locks. In this respect cl_lock is similar to
- * cl_object (that at lov layer also fans out into multiple sub-objects), and
- * is different from cl_page, that doesn't fan out (there is usually exactly
- * one osc_page for every vvp_page). We shall call vvp-lov portion of the lock
- * a "top-lock" and its lovsub-osc portion a "sub-lock".
- *
- * LIFE CYCLE
- *
- * cl_lock is reference counted. When reference counter drops to 0, lock is
- * placed in the cache, except when lock is in CLS_FREEING state. CLS_FREEING
- * lock is destroyed when last reference is released. Referencing between
- * top-lock and its sub-locks is described in the lov documentation module.
- *
- * STATE MACHINE
- *
- * Also, cl_lock is a state machine. This requires some clarification. One of
- * the goals of client IO re-write was to make IO path non-blocking, or at
- * least to make it easier to make it non-blocking in the future. Here
- * `non-blocking' means that when a system call (read, write, truncate)
- * reaches a situation where it has to wait for a communication with the
- * server, it should --instead of waiting-- remember its current state and
- * switch to some other work. E.g,. instead of waiting for a lock enqueue,
- * client should proceed doing IO on the next stripe, etc. Obviously this is
- * rather radical redesign, and it is not planned to be fully implemented at
- * this time, instead we are putting some infrastructure in place, that would
- * make it easier to do asynchronous non-blocking IO easier in the
- * future. Specifically, where old locking code goes to sleep (waiting for
- * enqueue, for example), new code returns cl_lock_transition::CLO_WAIT. When
- * enqueue reply comes, its completion handler signals that lock state-machine
- * is ready to transit to the next state. There is some generic code in
- * cl_lock.c that sleeps, waiting for these signals. As a result, for users of
- * this cl_lock.c code, it looks like locking is done in normal blocking
- * fashion, and it the same time it is possible to switch to the non-blocking
- * locking (simply by returning cl_lock_transition::CLO_WAIT from cl_lock.c
- * functions).
- *
- * For a description of state machine states and transitions see enum
- * cl_lock_state.
- *
- * There are two ways to restrict a set of states which lock might move to:
- *
- * - placing a "hold" on a lock guarantees that lock will not be moved
- * into cl_lock_state::CLS_FREEING state until hold is released. Hold
- * can be only acquired on a lock that is not in
- * cl_lock_state::CLS_FREEING. All holds on a lock are counted in
- * cl_lock::cll_holds. Hold protects lock from cancellation and
- * destruction. Requests to cancel and destroy a lock on hold will be
- * recorded, but only honored when last hold on a lock is released;
- *
- * - placing a "user" on a lock guarantees that lock will not leave
- * cl_lock_state::CLS_NEW, cl_lock_state::CLS_QUEUING,
- * cl_lock_state::CLS_ENQUEUED and cl_lock_state::CLS_HELD set of
- * states, once it enters this set. That is, if a user is added onto a
- * lock in a state not from this set, it doesn't immediately enforce
- * lock to move to this set, but once lock enters this set it will
- * remain there until all users are removed. Lock users are counted in
- * cl_lock::cll_users.
- *
- * User is used to assure that lock is not canceled or destroyed while
- * it is being enqueued, or actively used by some IO.
- *
- * Currently, a user always comes with a hold (cl_lock_invariant()
- * checks that a number of holds is not less than a number of users).
- *
- * CONCURRENCY
- *
- * This is how lock state-machine operates. struct cl_lock contains a mutex
- * cl_lock::cll_guard that protects struct fields.
- *
- * - mutex is taken, and cl_lock::cll_state is examined.
- *
- * - for every state there are possible target states where lock can move
- * into. They are tried in order. Attempts to move into next state are
- * done by _try() functions in cl_lock.c:cl_{enqueue,unlock,wait}_try().
- *
- * - if the transition can be performed immediately, state is changed,
- * and mutex is released.
- *
- * - if the transition requires blocking, _try() function returns
- * cl_lock_transition::CLO_WAIT. Caller unlocks mutex and goes to
- * sleep, waiting for possibility of lock state change. It is woken
- * up when some event occurs, that makes lock state change possible
- * (e.g., the reception of the reply from the server), and repeats
- * the loop.
- *
- * Top-lock and sub-lock has separate mutexes and the latter has to be taken
- * first to avoid dead-lock.
- *
- * To see an example of interaction of all these issues, take a look at the
- * lov_cl.c:lov_lock_enqueue() function. It is called as a part of
- * cl_enqueue_try(), and tries to advance top-lock to ENQUEUED state, by
- * advancing state-machines of its sub-locks (lov_lock_enqueue_one()). Note
- * also, that it uses trylock to grab sub-lock mutex to avoid dead-lock. It
- * also has to handle CEF_ASYNC enqueue, when sub-locks enqueues have to be
- * done in parallel, rather than one after another (this is used for glimpse
- * locks, that cannot dead-lock).
- *
- * INTERFACE AND USAGE
- *
- * struct cl_lock_operations provide a number of call-backs that are invoked
- * when events of interest occurs. Layers can intercept and handle glimpse,
- * blocking, cancel ASTs and a reception of the reply from the server.
- *
- * One important difference with the old client locking model is that new
- * client has a representation for the top-lock, whereas in the old code only
- * sub-locks existed as real data structures and file-level locks are
- * represented by "request sets" that are created and destroyed on each and
- * every lock creation.
- *
- * Top-locks are cached, and can be found in the cache by the system calls. It
- * is possible that top-lock is in cache, but some of its sub-locks were
- * canceled and destroyed. In that case top-lock has to be enqueued again
- * before it can be used.
- *
- * Overall process of the locking during IO operation is as following:
- *
- * - once parameters for IO are setup in cl_io, cl_io_operations::cio_lock()
- * is called on each layer. Responsibility of this method is to add locks,
- * needed by a given layer into cl_io.ci_lockset.
- *
- * - once locks for all layers were collected, they are sorted to avoid
- * dead-locks (cl_io_locks_sort()), and enqueued.
- *
- * - when all locks are acquired, IO is performed;
- *
- * - locks are released into cache.
- *
- * Striping introduces major additional complexity into locking. The
- * fundamental problem is that it is generally unsafe to actively use (hold)
- * two locks on the different OST servers at the same time, as this introduces
- * inter-server dependency and can lead to cascading evictions.
- *
- * Basic solution is to sub-divide large read/write IOs into smaller pieces so
- * that no multi-stripe locks are taken (note that this design abandons POSIX
- * read/write semantics). Such pieces ideally can be executed concurrently. At
- * the same time, certain types of IO cannot be sub-divived, without
- * sacrificing correctness. This includes:
- *
- * - O_APPEND write, where [0, EOF] lock has to be taken, to guarantee
- * atomicity;
- *
- * - ftruncate(fd, offset), where [offset, EOF] lock has to be taken.
- *
- * Also, in the case of read(fd, buf, count) or write(fd, buf, count), where
- * buf is a part of memory mapped Lustre file, a lock or locks protecting buf
- * has to be held together with the usual lock on [offset, offset + count].
- *
- * As multi-stripe locks have to be allowed, it makes sense to cache them, so
- * that, for example, a sequence of O_APPEND writes can proceed quickly
- * without going down to the individual stripes to do lock matching. On the
- * other hand, multi-stripe locks shouldn't be used by normal read/write
- * calls. To achieve this, every layer can implement ->clo_fits_into() method,
- * that is called by lock matching code (cl_lock_lookup()), and that can be
- * used to selectively disable matching of certain locks for certain IOs. For
- * example, lov layer implements lov_lock_fits_into() that allow multi-stripe
- * locks to be matched only for truncates and O_APPEND writes.
- *
- * Interaction with DLM
- *
- * In the expected setup, cl_lock is ultimately backed up by a collection of
- * DLM locks (struct ldlm_lock). Association between cl_lock and DLM lock is
- * implemented in osc layer, that also matches DLM events (ASTs, cancellation,
- * etc.) into cl_lock_operation calls. See struct osc_lock for a more detailed
- * description of interaction with DLM.
- */
-
-/**
- * Lock description.
- */
-struct cl_lock_descr {
- /** Object this lock is granted for. */
- struct cl_object *cld_obj;
- /** Index of the first page protected by this lock. */
- pgoff_t cld_start;
- /** Index of the last page (inclusive) protected by this lock. */
- pgoff_t cld_end;
- /** Group ID, for group lock */
- __u64 cld_gid;
- /** Lock mode. */
- enum cl_lock_mode cld_mode;
- /**
- * flags to enqueue lock. A combination of bit-flags from
- * enum cl_enq_flags.
- */
- __u32 cld_enq_flags;
-};
-
-#define DDESCR "%s(%d):[%lu, %lu]"
-#define PDESCR(descr) \
- cl_lock_mode_name((descr)->cld_mode), (descr)->cld_mode, \
- (descr)->cld_start, (descr)->cld_end
-
-const char *cl_lock_mode_name(const enum cl_lock_mode mode);
-
-/**
- * Lock state-machine states.
- *
- * \htmlonly
- * <pre>
- *
- * Possible state transitions:
- *
- * +------------------>NEW
- * | |
- * | | cl_enqueue_try()
- * | |
- * | cl_unuse_try() V
- * | +--------------QUEUING (*)
- * | | |
- * | | | cl_enqueue_try()
- * | | |
- * | | cl_unuse_try() V
- * sub-lock | +-------------ENQUEUED (*)
- * canceled | | |
- * | | | cl_wait_try()
- * | | |
- * | | (R)
- * | | |
- * | | V
- * | | HELD<---------+
- * | | | |
- * | | | | cl_use_try()
- * | | cl_unuse_try() | |
- * | | | |
- * | | V ---+
- * | +------------>INTRANSIT (D) <--+
- * | | |
- * | cl_unuse_try() | | cached lock found
- * | | | cl_use_try()
- * | | |
- * | V |
- * +------------------CACHED---------+
- * |
- * (C)
- * |
- * V
- * FREEING
- *
- * Legend:
- *
- * In states marked with (*) transition to the same state (i.e., a loop
- * in the diagram) is possible.
- *
- * (R) is the point where Receive call-back is invoked: it allows layers
- * to handle arrival of lock reply.
- *
- * (C) is the point where Cancellation call-back is invoked.
- *
- * (D) is the transit state which means the lock is changing.
- *
- * Transition to FREEING state is possible from any other state in the
- * diagram in case of unrecoverable error.
- * </pre>
- * \endhtmlonly
- *
- * These states are for individual cl_lock object. Top-lock and its sub-locks
- * can be in the different states. Another way to say this is that we have
- * nested state-machines.
- *
- * Separate QUEUING and ENQUEUED states are needed to support non-blocking
- * operation for locks with multiple sub-locks. Imagine lock on a file F, that
- * intersects 3 stripes S0, S1, and S2. To enqueue F client has to send
- * enqueue to S0, wait for its completion, then send enqueue for S1, wait for
- * its completion and at last enqueue lock for S2, and wait for its
- * completion. In that case, top-lock is in QUEUING state while S0, S1 are
- * handled, and is in ENQUEUED state after enqueue to S2 has been sent (note
- * that in this case, sub-locks move from state to state, and top-lock remains
- * in the same state).
- */
-enum cl_lock_state {
- /**
- * Lock that wasn't yet enqueued
- */
- CLS_NEW,
- /**
- * Enqueue is in progress, blocking for some intermediate interaction
- * with the other side.
- */
- CLS_QUEUING,
- /**
- * Lock is fully enqueued, waiting for server to reply when it is
- * granted.
- */
- CLS_ENQUEUED,
- /**
- * Lock granted, actively used by some IO.
- */
- CLS_HELD,
- /**
- * This state is used to mark the lock is being used, or unused.
- * We need this state because the lock may have several sublocks,
- * so it's impossible to have an atomic way to bring all sublocks
- * into CLS_HELD state at use case, or all sublocks to CLS_CACHED
- * at unuse case.
- * If a thread is referring to a lock, and it sees the lock is in this
- * state, it must wait for the lock.
- * See state diagram for details.
- */
- CLS_INTRANSIT,
- /**
- * Lock granted, not used.
- */
- CLS_CACHED,
- /**
- * Lock is being destroyed.
- */
- CLS_FREEING,
- CLS_NR
-};
-
-enum cl_lock_flags {
- /**
- * lock has been cancelled. This flag is never cleared once set (by
- * cl_lock_cancel0()).
- */
- CLF_CANCELLED = 1 << 0,
- /** cancellation is pending for this lock. */
- CLF_CANCELPEND = 1 << 1,
- /** destruction is pending for this lock. */
- CLF_DOOMED = 1 << 2,
- /** from enqueue RPC reply upcall. */
- CLF_FROM_UPCALL = 1 << 3,
-};
-
-/**
- * Lock closure.
- *
- * Lock closure is a collection of locks (both top-locks and sub-locks) that
- * might be updated in a result of an operation on a certain lock (which lock
- * this is a closure of).
- *
- * Closures are needed to guarantee dead-lock freedom in the presence of
- *
- * - nested state-machines (top-lock state-machine composed of sub-lock
- * state-machines), and
- *
- * - shared sub-locks.
- *
- * Specifically, many operations, such as lock enqueue, wait, unlock,
- * etc. start from a top-lock, and then operate on a sub-locks of this
- * top-lock, holding a top-lock mutex. When sub-lock state changes as a result
- * of such operation, this change has to be propagated to all top-locks that
- * share this sub-lock. Obviously, no natural lock ordering (e.g.,
- * top-to-bottom or bottom-to-top) captures this scenario, so try-locking has
- * to be used. Lock closure systematizes this try-and-repeat logic.
- */
-struct cl_lock_closure {
- /**
- * Lock that is mutexed when closure construction is started. When
- * closure in is `wait' mode (cl_lock_closure::clc_wait), mutex on
- * origin is released before waiting.
- */
- struct cl_lock *clc_origin;
- /**
- * List of enclosed locks, so far. Locks are linked here through
- * cl_lock::cll_inclosure.
- */
- struct list_head clc_list;
- /**
- * True iff closure is in a `wait' mode. This determines what
- * cl_lock_enclosure() does when a lock L to be added to the closure
- * is currently mutexed by some other thread.
- *
- * If cl_lock_closure::clc_wait is not set, then closure construction
- * fails with CLO_REPEAT immediately.
- *
- * In wait mode, cl_lock_enclosure() waits until next attempt to build
- * a closure might succeed. To this end it releases an origin mutex
- * (cl_lock_closure::clc_origin), that has to be the only lock mutex
- * owned by the current thread, and then waits on L mutex (by grabbing
- * it and immediately releasing), before returning CLO_REPEAT to the
- * caller.
- */
- int clc_wait;
- /** Number of locks in the closure. */
- int clc_nr;
-};
-
-/**
- * Layered client lock.
- */
-struct cl_lock {
- /** Reference counter. */
- atomic_t cll_ref;
- /** List of slices. Immutable after creation. */
- struct list_head cll_layers;
- /**
- * Linkage into cl_lock::cll_descr::cld_obj::coh_locks list. Protected
- * by cl_lock::cll_descr::cld_obj::coh_lock_guard.
- */
- struct list_head cll_linkage;
- /**
- * Parameters of this lock. Protected by
- * cl_lock::cll_descr::cld_obj::coh_lock_guard nested within
- * cl_lock::cll_guard. Modified only on lock creation and in
- * cl_lock_modify().
- */
- struct cl_lock_descr cll_descr;
- /** Protected by cl_lock::cll_guard. */
- enum cl_lock_state cll_state;
- /** signals state changes. */
- wait_queue_head_t cll_wq;
- /**
- * Recursive lock, most fields in cl_lock{} are protected by this.
- *
- * Locking rules: this mutex is never held across network
- * communication, except when lock is being canceled.
- *
- * Lock ordering: a mutex of a sub-lock is taken first, then a mutex
- * on a top-lock. Other direction is implemented through a
- * try-lock-repeat loop. Mutices of unrelated locks can be taken only
- * by try-locking.
- *
- * \see osc_lock_enqueue_wait(), lov_lock_cancel(), lov_sublock_wait().
- */
- struct mutex cll_guard;
- struct task_struct *cll_guarder;
- int cll_depth;
-
- /**
- * the owner for INTRANSIT state
- */
- struct task_struct *cll_intransit_owner;
- int cll_error;
- /**
- * Number of holds on a lock. A hold prevents a lock from being
- * canceled and destroyed. Protected by cl_lock::cll_guard.
- *
- * \see cl_lock_hold(), cl_lock_unhold(), cl_lock_release()
- */
- int cll_holds;
- /**
- * Number of lock users. Valid in cl_lock_state::CLS_HELD state
- * only. Lock user pins lock in CLS_HELD state. Protected by
- * cl_lock::cll_guard.
- *
- * \see cl_wait(), cl_unuse().
- */
- int cll_users;
- /**
- * Flag bit-mask. Values from enum cl_lock_flags. Updates are
- * protected by cl_lock::cll_guard.
- */
- unsigned long cll_flags;
- /**
- * A linkage into a list of locks in a closure.
- *
- * \see cl_lock_closure
- */
- struct list_head cll_inclosure;
- /**
- * Confict lock at queuing time.
- */
- struct cl_lock *cll_conflict;
- /**
- * A list of references to this lock, for debugging.
- */
- struct lu_ref cll_reference;
- /**
- * A list of holds on this lock, for debugging.
- */
- struct lu_ref cll_holders;
- /**
- * A reference for cl_lock::cll_descr::cld_obj. For debugging.
- */
- struct lu_ref_link cll_obj_ref;
-#ifdef CONFIG_LOCKDEP
- /* "dep_map" name is assumed by lockdep.h macros. */
- struct lockdep_map dep_map;
-#endif
-};
-
-/**
- * Per-layer part of cl_lock
- *
- * \see ccc_lock, lov_lock, lovsub_lock, osc_lock
- */
-struct cl_lock_slice {
- struct cl_lock *cls_lock;
- /** Object slice corresponding to this lock slice. Immutable after
- * creation. */
- struct cl_object *cls_obj;
- const struct cl_lock_operations *cls_ops;
- /** Linkage into cl_lock::cll_layers. Immutable after creation. */
- struct list_head cls_linkage;
-};
-
-/**
- * Possible (non-error) return values of ->clo_{enqueue,wait,unlock}().
- *
- * NOTE: lov_subresult() depends on ordering here.
- */
-enum cl_lock_transition {
- /** operation cannot be completed immediately. Wait for state change. */
- CLO_WAIT = 1,
- /** operation had to release lock mutex, restart. */
- CLO_REPEAT = 2,
- /** lower layer re-enqueued. */
- CLO_REENQUEUED = 3,
-};
-
-/**
- *
- * \see vvp_lock_ops, lov_lock_ops, lovsub_lock_ops, osc_lock_ops
- */
-struct cl_lock_operations {
- /**
- * \name statemachine
- *
- * State machine transitions. These 3 methods are called to transfer
- * lock from one state to another, as described in the commentary
- * above enum #cl_lock_state.
- *
- * \retval 0 this layer has nothing more to do to before
- * transition to the target state happens;
- *
- * \retval CLO_REPEAT method had to release and re-acquire cl_lock
- * mutex, repeat invocation of transition method
- * across all layers;
- *
- * \retval CLO_WAIT this layer cannot move to the target state
- * immediately, as it has to wait for certain event
- * (e.g., the communication with the server). It
- * is guaranteed, that when the state transfer
- * becomes possible, cl_lock::cll_wq wait-queue
- * is signaled. Caller can wait for this event by
- * calling cl_lock_state_wait();
- *
- * \retval -ve failure, abort state transition, move the lock
- * into cl_lock_state::CLS_FREEING state, and set
- * cl_lock::cll_error.
- *
- * Once all layers voted to agree to transition (by returning 0), lock
- * is moved into corresponding target state. All state transition
- * methods are optional.
- */
- /** @{ */
- /**
- * Attempts to enqueue the lock. Called top-to-bottom.
- *
- * \see ccc_lock_enqueue(), lov_lock_enqueue(), lovsub_lock_enqueue(),
- * \see osc_lock_enqueue()
- */
- int (*clo_enqueue)(const struct lu_env *env,
- const struct cl_lock_slice *slice,
- struct cl_io *io, __u32 enqflags);
- /**
- * Attempts to wait for enqueue result. Called top-to-bottom.
- *
- * \see ccc_lock_wait(), lov_lock_wait(), osc_lock_wait()
- */
- int (*clo_wait)(const struct lu_env *env,
- const struct cl_lock_slice *slice);
- /**
- * Attempts to unlock the lock. Called bottom-to-top. In addition to
- * usual return values of lock state-machine methods, this can return
- * -ESTALE to indicate that lock cannot be returned to the cache, and
- * has to be re-initialized.
- * unuse is a one-shot operation, so it must NOT return CLO_WAIT.
- *
- * \see ccc_lock_unuse(), lov_lock_unuse(), osc_lock_unuse()
- */
- int (*clo_unuse)(const struct lu_env *env,
- const struct cl_lock_slice *slice);
- /**
- * Notifies layer that cached lock is started being used.
- *
- * \pre lock->cll_state == CLS_CACHED
- *
- * \see lov_lock_use(), osc_lock_use()
- */
- int (*clo_use)(const struct lu_env *env,
- const struct cl_lock_slice *slice);
- /** @} statemachine */
- /**
- * A method invoked when lock state is changed (as a result of state
- * transition). This is used, for example, to track when the state of
- * a sub-lock changes, to propagate this change to the corresponding
- * top-lock. Optional
- *
- * \see lovsub_lock_state()
- */
- void (*clo_state)(const struct lu_env *env,
- const struct cl_lock_slice *slice,
- enum cl_lock_state st);
- /**
- * Returns true, iff given lock is suitable for the given io, idea
- * being, that there are certain "unsafe" locks, e.g., ones acquired
- * for O_APPEND writes, that we don't want to re-use for a normal
- * write, to avoid the danger of cascading evictions. Optional. Runs
- * under cl_object_header::coh_lock_guard.
- *
- * XXX this should take more information about lock needed by
- * io. Probably lock description or something similar.
- *
- * \see lov_fits_into()
- */
- int (*clo_fits_into)(const struct lu_env *env,
- const struct cl_lock_slice *slice,
- const struct cl_lock_descr *need,
- const struct cl_io *io);
- /**
- * \name ast
- * Asynchronous System Traps. All of then are optional, all are
- * executed bottom-to-top.
- */
- /** @{ */
-
- /**
- * Cancellation callback. Cancel a lock voluntarily, or under
- * the request of server.
- */
- void (*clo_cancel)(const struct lu_env *env,
- const struct cl_lock_slice *slice);
- /**
- * Lock weighting ast. Executed to estimate how precious this lock
- * is. The sum of results across all layers is used to determine
- * whether lock worth keeping in cache given present memory usage.
- *
- * \see osc_lock_weigh(), vvp_lock_weigh(), lovsub_lock_weigh().
- */
- unsigned long (*clo_weigh)(const struct lu_env *env,
- const struct cl_lock_slice *slice);
- /** @} ast */
-
- /**
- * \see lovsub_lock_closure()
- */
- int (*clo_closure)(const struct lu_env *env,
- const struct cl_lock_slice *slice,
- struct cl_lock_closure *closure);
- /**
- * Executed bottom-to-top when lock description changes (e.g., as a
- * result of server granting more generous lock than was requested).
- *
- * \see lovsub_lock_modify()
- */
- int (*clo_modify)(const struct lu_env *env,
- const struct cl_lock_slice *slice,
- const struct cl_lock_descr *updated);
- /**
- * Notifies layers (bottom-to-top) that lock is going to be
- * destroyed. Responsibility of layers is to prevent new references on
- * this lock from being acquired once this method returns.
- *
- * This can be called multiple times due to the races.
- *
- * \see cl_lock_delete()
- * \see osc_lock_delete(), lovsub_lock_delete()
- */
- void (*clo_delete)(const struct lu_env *env,
- const struct cl_lock_slice *slice);
- /**
- * Destructor. Frees resources and the slice.
- *
- * \see ccc_lock_fini(), lov_lock_fini(), lovsub_lock_fini(),
- * \see osc_lock_fini()
- */
- void (*clo_fini)(const struct lu_env *env, struct cl_lock_slice *slice);
- /**
- * Optional debugging helper. Prints given lock slice.
- */
- int (*clo_print)(const struct lu_env *env,
- void *cookie, lu_printer_t p,
- const struct cl_lock_slice *slice);
-};
-
-#define CL_LOCK_DEBUG(mask, env, lock, format, ...) \
-do { \
- LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, mask, NULL); \
- \
- if (cfs_cdebug_show(mask, DEBUG_SUBSYSTEM)) { \
- cl_lock_print(env, &msgdata, lu_cdebug_printer, lock); \
- CDEBUG(mask, format, ## __VA_ARGS__); \
- } \
-} while (0)
-
-#define CL_LOCK_ASSERT(expr, env, lock) do { \
- if (likely(expr)) \
- break; \
- \
- CL_LOCK_DEBUG(D_ERROR, env, lock, "failed at %s.\n", #expr); \
- LBUG(); \
-} while (0)
-
-/** @} cl_lock */
-
-/** \addtogroup cl_page_list cl_page_list
- * Page list used to perform collective operations on a group of pages.
- *
- * Pages are added to the list one by one. cl_page_list acquires a reference
- * for every page in it. Page list is used to perform collective operations on
- * pages:
- *
- * - submit pages for an immediate transfer,
- *
- * - own pages on behalf of certain io (waiting for each page in turn),
- *
- * - discard pages.
- *
- * When list is finalized, it releases references on all pages it still has.
- *
- * \todo XXX concurrency control.
- *
- * @{
- */
-struct cl_page_list {
- unsigned pl_nr;
- struct list_head pl_pages;
- struct task_struct *pl_owner;
-};
-
-/**
- * A 2-queue of pages. A convenience data-type for common use case, 2-queue
- * contains an incoming page list and an outgoing page list.
- */
-struct cl_2queue {
- struct cl_page_list c2_qin;
- struct cl_page_list c2_qout;
-};
-
-/** @} cl_page_list */
-
-/** \addtogroup cl_io cl_io
- * @{ */
-/** \struct cl_io
- * I/O
- *
- * cl_io represents a high level I/O activity like
- * read(2)/write(2)/truncate(2) system call, or cancellation of an extent
- * lock.
- *
- * cl_io is a layered object, much like cl_{object,page,lock} but with one
- * important distinction. We want to minimize number of calls to the allocator
- * in the fast path, e.g., in the case of read(2) when everything is cached:
- * client already owns the lock over region being read, and data are cached
- * due to read-ahead. To avoid allocation of cl_io layers in such situations,
- * per-layer io state is stored in the session, associated with the io, see
- * struct {vvp,lov,osc}_io for example. Sessions allocation is amortized
- * by using free-lists, see cl_env_get().
- *
- * There is a small predefined number of possible io types, enumerated in enum
- * cl_io_type.
- *
- * cl_io is a state machine, that can be advanced concurrently by the multiple
- * threads. It is up to these threads to control the concurrency and,
- * specifically, to detect when io is done, and its state can be safely
- * released.
- *
- * For read/write io overall execution plan is as following:
- *
- * (0) initialize io state through all layers;
- *
- * (1) loop: prepare chunk of work to do
- *
- * (2) call all layers to collect locks they need to process current chunk
- *
- * (3) sort all locks to avoid dead-locks, and acquire them
- *
- * (4) process the chunk: call per-page methods
- * (cl_io_operations::cio_read_page() for read,
- * cl_io_operations::cio_prepare_write(),
- * cl_io_operations::cio_commit_write() for write)
- *
- * (5) release locks
- *
- * (6) repeat loop.
- *
- * To implement the "parallel IO mode", lov layer creates sub-io's (lazily to
- * address allocation efficiency issues mentioned above), and returns with the
- * special error condition from per-page method when current sub-io has to
- * block. This causes io loop to be repeated, and lov switches to the next
- * sub-io in its cl_io_operations::cio_iter_init() implementation.
- */
-
-/** IO types */
-enum cl_io_type {
- /** read system call */
- CIT_READ,
- /** write system call */
- CIT_WRITE,
- /** truncate, utime system calls */
- CIT_SETATTR,
- /**
- * page fault handling
- */
- CIT_FAULT,
- /**
- * fsync system call handling
- * To write out a range of file
- */
- CIT_FSYNC,
- /**
- * Miscellaneous io. This is used for occasional io activity that
- * doesn't fit into other types. Currently this is used for:
- *
- * - cancellation of an extent lock. This io exists as a context
- * to write dirty pages from under the lock being canceled back
- * to the server;
- *
- * - VM induced page write-out. An io context for writing page out
- * for memory cleansing;
- *
- * - glimpse. An io context to acquire glimpse lock.
- *
- * - grouplock. An io context to acquire group lock.
- *
- * CIT_MISC io is used simply as a context in which locks and pages
- * are manipulated. Such io has no internal "process", that is,
- * cl_io_loop() is never called for it.
- */
- CIT_MISC,
- CIT_OP_NR
-};
-
-/**
- * States of cl_io state machine
- */
-enum cl_io_state {
- /** Not initialized. */
- CIS_ZERO,
- /** Initialized. */
- CIS_INIT,
- /** IO iteration started. */
- CIS_IT_STARTED,
- /** Locks taken. */
- CIS_LOCKED,
- /** Actual IO is in progress. */
- CIS_IO_GOING,
- /** IO for the current iteration finished. */
- CIS_IO_FINISHED,
- /** Locks released. */
- CIS_UNLOCKED,
- /** Iteration completed. */
- CIS_IT_ENDED,
- /** cl_io finalized. */
- CIS_FINI
-};
-
-/**
- * IO state private for a layer.
- *
- * This is usually embedded into layer session data, rather than allocated
- * dynamically.
- *
- * \see vvp_io, lov_io, osc_io, ccc_io
- */
-struct cl_io_slice {
- struct cl_io *cis_io;
- /** corresponding object slice. Immutable after creation. */
- struct cl_object *cis_obj;
- /** io operations. Immutable after creation. */
- const struct cl_io_operations *cis_iop;
- /**
- * linkage into a list of all slices for a given cl_io, hanging off
- * cl_io::ci_layers. Immutable after creation.
- */
- struct list_head cis_linkage;
-};
-
-
-/**
- * Per-layer io operations.
- * \see vvp_io_ops, lov_io_ops, lovsub_io_ops, osc_io_ops
- */
-struct cl_io_operations {
- /**
- * Vector of io state transition methods for every io type.
- *
- * \see cl_page_operations::io
- */
- struct {
- /**
- * Prepare io iteration at a given layer.
- *
- * Called top-to-bottom at the beginning of each iteration of
- * "io loop" (if it makes sense for this type of io). Here
- * layer selects what work it will do during this iteration.
- *
- * \see cl_io_operations::cio_iter_fini()
- */
- int (*cio_iter_init) (const struct lu_env *env,
- const struct cl_io_slice *slice);
- /**
- * Finalize io iteration.
- *
- * Called bottom-to-top at the end of each iteration of "io
- * loop". Here layers can decide whether IO has to be
- * continued.
- *
- * \see cl_io_operations::cio_iter_init()
- */
- void (*cio_iter_fini) (const struct lu_env *env,
- const struct cl_io_slice *slice);
- /**
- * Collect locks for the current iteration of io.
- *
- * Called top-to-bottom to collect all locks necessary for
- * this iteration. This methods shouldn't actually enqueue
- * anything, instead it should post a lock through
- * cl_io_lock_add(). Once all locks are collected, they are
- * sorted and enqueued in the proper order.
- */
- int (*cio_lock) (const struct lu_env *env,
- const struct cl_io_slice *slice);
- /**
- * Finalize unlocking.
- *
- * Called bottom-to-top to finish layer specific unlocking
- * functionality, after generic code released all locks
- * acquired by cl_io_operations::cio_lock().
- */
- void (*cio_unlock)(const struct lu_env *env,
- const struct cl_io_slice *slice);
- /**
- * Start io iteration.
- *
- * Once all locks are acquired, called top-to-bottom to
- * commence actual IO. In the current implementation,
- * top-level vvp_io_{read,write}_start() does all the work
- * synchronously by calling generic_file_*(), so other layers
- * are called when everything is done.
- */
- int (*cio_start)(const struct lu_env *env,
- const struct cl_io_slice *slice);
- /**
- * Called top-to-bottom at the end of io loop. Here layer
- * might wait for an unfinished asynchronous io.
- */
- void (*cio_end) (const struct lu_env *env,
- const struct cl_io_slice *slice);
- /**
- * Called bottom-to-top to notify layers that read/write IO
- * iteration finished, with \a nob bytes transferred.
- */
- void (*cio_advance)(const struct lu_env *env,
- const struct cl_io_slice *slice,
- size_t nob);
- /**
- * Called once per io, bottom-to-top to release io resources.
- */
- void (*cio_fini) (const struct lu_env *env,
- const struct cl_io_slice *slice);
- } op[CIT_OP_NR];
- struct {
- /**
- * Submit pages from \a queue->c2_qin for IO, and move
- * successfully submitted pages into \a queue->c2_qout. Return
- * non-zero if failed to submit even the single page. If
- * submission failed after some pages were moved into \a
- * queue->c2_qout, completion callback with non-zero ioret is
- * executed on them.
- */
- int (*cio_submit)(const struct lu_env *env,
- const struct cl_io_slice *slice,
- enum cl_req_type crt,
- struct cl_2queue *queue);
- } req_op[CRT_NR];
- /**
- * Read missing page.
- *
- * Called by a top-level cl_io_operations::op[CIT_READ]::cio_start()
- * method, when it hits not-up-to-date page in the range. Optional.
- *
- * \pre io->ci_type == CIT_READ
- */
- int (*cio_read_page)(const struct lu_env *env,
- const struct cl_io_slice *slice,
- const struct cl_page_slice *page);
- /**
- * Prepare write of a \a page. Called bottom-to-top by a top-level
- * cl_io_operations::op[CIT_WRITE]::cio_start() to prepare page for
- * get data from user-level buffer.
- *
- * \pre io->ci_type == CIT_WRITE
- *
- * \see vvp_io_prepare_write(), lov_io_prepare_write(),
- * osc_io_prepare_write().
- */
- int (*cio_prepare_write)(const struct lu_env *env,
- const struct cl_io_slice *slice,
- const struct cl_page_slice *page,
- unsigned from, unsigned to);
- /**
- *
- * \pre io->ci_type == CIT_WRITE
- *
- * \see vvp_io_commit_write(), lov_io_commit_write(),
- * osc_io_commit_write().
- */
- int (*cio_commit_write)(const struct lu_env *env,
- const struct cl_io_slice *slice,
- const struct cl_page_slice *page,
- unsigned from, unsigned to);
- /**
- * Optional debugging helper. Print given io slice.
- */
- int (*cio_print)(const struct lu_env *env, void *cookie,
- lu_printer_t p, const struct cl_io_slice *slice);
-};
-
-/**
- * Flags to lock enqueue procedure.
- * \ingroup cl_lock
- */
-enum cl_enq_flags {
- /**
- * instruct server to not block, if conflicting lock is found. Instead
- * -EWOULDBLOCK is returned immediately.
- */
- CEF_NONBLOCK = 0x00000001,
- /**
- * take lock asynchronously (out of order), as it cannot
- * deadlock. This is for LDLM_FL_HAS_INTENT locks used for glimpsing.
- */
- CEF_ASYNC = 0x00000002,
- /**
- * tell the server to instruct (though a flag in the blocking ast) an
- * owner of the conflicting lock, that it can drop dirty pages
- * protected by this lock, without sending them to the server.
- */
- CEF_DISCARD_DATA = 0x00000004,
- /**
- * tell the sub layers that it must be a `real' lock. This is used for
- * mmapped-buffer locks and glimpse locks that must be never converted
- * into lockless mode.
- *
- * \see vvp_mmap_locks(), cl_glimpse_lock().
- */
- CEF_MUST = 0x00000008,
- /**
- * tell the sub layers that never request a `real' lock. This flag is
- * not used currently.
- *
- * cl_io::ci_lockreq and CEF_{MUST,NEVER} flags specify lockless
- * conversion policy: ci_lockreq describes generic information of lock
- * requirement for this IO, especially for locks which belong to the
- * object doing IO; however, lock itself may have precise requirements
- * that are described by the enqueue flags.
- */
- CEF_NEVER = 0x00000010,
- /**
- * for async glimpse lock.
- */
- CEF_AGL = 0x00000020,
- /**
- * mask of enq_flags.
- */
- CEF_MASK = 0x0000003f,
-};
-
-/**
- * Link between lock and io. Intermediate structure is needed, because the
- * same lock can be part of multiple io's simultaneously.
- */
-struct cl_io_lock_link {
- /** linkage into one of cl_lockset lists. */
- struct list_head cill_linkage;
- struct cl_lock_descr cill_descr;
- struct cl_lock *cill_lock;
- /** optional destructor */
- void (*cill_fini)(const struct lu_env *env,
- struct cl_io_lock_link *link);
-};
-
-/**
- * Lock-set represents a collection of locks, that io needs at a
- * time. Generally speaking, client tries to avoid holding multiple locks when
- * possible, because
- *
- * - holding extent locks over multiple ost's introduces the danger of
- * "cascading timeouts";
- *
- * - holding multiple locks over the same ost is still dead-lock prone,
- * see comment in osc_lock_enqueue(),
- *
- * but there are certain situations where this is unavoidable:
- *
- * - O_APPEND writes have to take [0, EOF] lock for correctness;
- *
- * - truncate has to take [new-size, EOF] lock for correctness;
- *
- * - SNS has to take locks across full stripe for correctness;
- *
- * - in the case when user level buffer, supplied to {read,write}(file0),
- * is a part of a memory mapped lustre file, client has to take a dlm
- * locks on file0, and all files that back up the buffer (or a part of
- * the buffer, that is being processed in the current chunk, in any
- * case, there are situations where at least 2 locks are necessary).
- *
- * In such cases we at least try to take locks in the same consistent
- * order. To this end, all locks are first collected, then sorted, and then
- * enqueued.
- */
-struct cl_lockset {
- /** locks to be acquired. */
- struct list_head cls_todo;
- /** locks currently being processed. */
- struct list_head cls_curr;
- /** locks acquired. */
- struct list_head cls_done;
-};
-
-/**
- * Lock requirements(demand) for IO. It should be cl_io_lock_req,
- * but 'req' is always to be thought as 'request' :-)
- */
-enum cl_io_lock_dmd {
- /** Always lock data (e.g., O_APPEND). */
- CILR_MANDATORY = 0,
- /** Layers are free to decide between local and global locking. */
- CILR_MAYBE,
- /** Never lock: there is no cache (e.g., liblustre). */
- CILR_NEVER
-};
-
-enum cl_fsync_mode {
- /** start writeback, do not wait for them to finish */
- CL_FSYNC_NONE = 0,
- /** start writeback and wait for them to finish */
- CL_FSYNC_LOCAL = 1,
- /** discard all of dirty pages in a specific file range */
- CL_FSYNC_DISCARD = 2,
- /** start writeback and make sure they have reached storage before
- * return. OST_SYNC RPC must be issued and finished */
- CL_FSYNC_ALL = 3
-};
-
-struct cl_io_rw_common {
- loff_t crw_pos;
- size_t crw_count;
- int crw_nonblock;
-};
-
-
-/**
- * State for io.
- *
- * cl_io is shared by all threads participating in this IO (in current
- * implementation only one thread advances IO, but parallel IO design and
- * concurrent copy_*_user() require multiple threads acting on the same IO. It
- * is up to these threads to serialize their activities, including updates to
- * mutable cl_io fields.
- */
-struct cl_io {
- /** type of this IO. Immutable after creation. */
- enum cl_io_type ci_type;
- /** current state of cl_io state machine. */
- enum cl_io_state ci_state;
- /** main object this io is against. Immutable after creation. */
- struct cl_object *ci_obj;
- /**
- * Upper layer io, of which this io is a part of. Immutable after
- * creation.
- */
- struct cl_io *ci_parent;
- /** List of slices. Immutable after creation. */
- struct list_head ci_layers;
- /** list of locks (to be) acquired by this io. */
- struct cl_lockset ci_lockset;
- /** lock requirements, this is just a help info for sublayers. */
- enum cl_io_lock_dmd ci_lockreq;
- union {
- struct cl_rd_io {
- struct cl_io_rw_common rd;
- } ci_rd;
- struct cl_wr_io {
- struct cl_io_rw_common wr;
- int wr_append;
- int wr_sync;
- } ci_wr;
- struct cl_io_rw_common ci_rw;
- struct cl_setattr_io {
- struct ost_lvb sa_attr;
- unsigned int sa_valid;
- } ci_setattr;
- struct cl_fault_io {
- /** page index within file. */
- pgoff_t ft_index;
- /** bytes valid byte on a faulted page. */
- int ft_nob;
- /** writable page? for nopage() only */
- int ft_writable;
- /** page of an executable? */
- int ft_executable;
- /** page_mkwrite() */
- int ft_mkwrite;
- /** resulting page */
- struct cl_page *ft_page;
- } ci_fault;
- struct cl_fsync_io {
- loff_t fi_start;
- loff_t fi_end;
- /** file system level fid */
- struct lu_fid *fi_fid;
- enum cl_fsync_mode fi_mode;
- /* how many pages were written/discarded */
- unsigned int fi_nr_written;
- } ci_fsync;
- } u;
- struct cl_2queue ci_queue;
- size_t ci_nob;
- int ci_result;
- unsigned int ci_continue:1,
- /**
- * This io has held grouplock, to inform sublayers that
- * don't do lockless i/o.
- */
- ci_no_srvlock:1,
- /**
- * The whole IO need to be restarted because layout has been changed
- */
- ci_need_restart:1,
- /**
- * to not refresh layout - the IO issuer knows that the layout won't
- * change(page operations, layout change causes all page to be
- * discarded), or it doesn't matter if it changes(sync).
- */
- ci_ignore_layout:1,
- /**
- * Check if layout changed after the IO finishes. Mainly for HSM
- * requirement. If IO occurs to openning files, it doesn't need to
- * verify layout because HSM won't release openning files.
- * Right now, only two operations need to verify layout: glimpse
- * and setattr.
- */
- ci_verify_layout:1,
- /**
- * file is released, restore has to to be triggered by vvp layer
- */
- ci_restore_needed:1,
- /**
- * O_NOATIME
- */
- ci_noatime:1;
- /**
- * Number of pages owned by this IO. For invariant checking.
- */
- unsigned ci_owned_nr;
-};
-
-/** @} cl_io */
-
-/** \addtogroup cl_req cl_req
- * @{ */
-/** \struct cl_req
- * Transfer.
- *
- * There are two possible modes of transfer initiation on the client:
- *
- * - immediate transfer: this is started when a high level io wants a page
- * or a collection of pages to be transferred right away. Examples:
- * read-ahead, synchronous read in the case of non-page aligned write,
- * page write-out as a part of extent lock cancellation, page write-out
- * as a part of memory cleansing. Immediate transfer can be both
- * cl_req_type::CRT_READ and cl_req_type::CRT_WRITE;
- *
- * - opportunistic transfer (cl_req_type::CRT_WRITE only), that happens
- * when io wants to transfer a page to the server some time later, when
- * it can be done efficiently. Example: pages dirtied by the write(2)
- * path.
- *
- * In any case, transfer takes place in the form of a cl_req, which is a
- * representation for a network RPC.
- *
- * Pages queued for an opportunistic transfer are cached until it is decided
- * that efficient RPC can be composed of them. This decision is made by "a
- * req-formation engine", currently implemented as a part of osc
- * layer. Req-formation depends on many factors: the size of the resulting
- * RPC, whether or not multi-object RPCs are supported by the server,
- * max-rpc-in-flight limitations, size of the dirty cache, etc.
- *
- * For the immediate transfer io submits a cl_page_list, that req-formation
- * engine slices into cl_req's, possibly adding cached pages to some of
- * the resulting req's.
- *
- * Whenever a page from cl_page_list is added to a newly constructed req, its
- * cl_page_operations::cpo_prep() layer methods are called. At that moment,
- * page state is atomically changed from cl_page_state::CPS_OWNED to
- * cl_page_state::CPS_PAGEOUT or cl_page_state::CPS_PAGEIN, cl_page::cp_owner
- * is zeroed, and cl_page::cp_req is set to the
- * req. cl_page_operations::cpo_prep() method at the particular layer might
- * return -EALREADY to indicate that it does not need to submit this page
- * at all. This is possible, for example, if page, submitted for read,
- * became up-to-date in the meantime; and for write, the page don't have
- * dirty bit marked. \see cl_io_submit_rw()
- *
- * Whenever a cached page is added to a newly constructed req, its
- * cl_page_operations::cpo_make_ready() layer methods are called. At that
- * moment, page state is atomically changed from cl_page_state::CPS_CACHED to
- * cl_page_state::CPS_PAGEOUT, and cl_page::cp_req is set to
- * req. cl_page_operations::cpo_make_ready() method at the particular layer
- * might return -EAGAIN to indicate that this page is not eligible for the
- * transfer right now.
- *
- * FUTURE
- *
- * Plan is to divide transfers into "priority bands" (indicated when
- * submitting cl_page_list, and queuing a page for the opportunistic transfer)
- * and allow glueing of cached pages to immediate transfers only within single
- * band. This would make high priority transfers (like lock cancellation or
- * memory pressure induced write-out) really high priority.
- *
- */
-
-/**
- * Per-transfer attributes.
- */
-struct cl_req_attr {
- /** Generic attributes for the server consumption. */
- struct obdo *cra_oa;
- /** Jobid */
- char cra_jobid[JOBSTATS_JOBID_SIZE];
-};
-
-/**
- * Transfer request operations definable at every layer.
- *
- * Concurrency: transfer formation engine synchronizes calls to all transfer
- * methods.
- */
-struct cl_req_operations {
- /**
- * Invoked top-to-bottom by cl_req_prep() when transfer formation is
- * complete (all pages are added).
- *
- * \see osc_req_prep()
- */
- int (*cro_prep)(const struct lu_env *env,
- const struct cl_req_slice *slice);
- /**
- * Called top-to-bottom to fill in \a oa fields. This is called twice
- * with different flags, see bug 10150 and osc_build_req().
- *
- * \param obj an object from cl_req which attributes are to be set in
- * \a oa.
- *
- * \param oa struct obdo where attributes are placed
- *
- * \param flags \a oa fields to be filled.
- */
- void (*cro_attr_set)(const struct lu_env *env,
- const struct cl_req_slice *slice,
- const struct cl_object *obj,
- struct cl_req_attr *attr, u64 flags);
- /**
- * Called top-to-bottom from cl_req_completion() to notify layers that
- * transfer completed. Has to free all state allocated by
- * cl_device_operations::cdo_req_init().
- */
- void (*cro_completion)(const struct lu_env *env,
- const struct cl_req_slice *slice, int ioret);
-};
-
-/**
- * A per-object state that (potentially multi-object) transfer request keeps.
- */
-struct cl_req_obj {
- /** object itself */
- struct cl_object *ro_obj;
- /** reference to cl_req_obj::ro_obj. For debugging. */
- struct lu_ref_link ro_obj_ref;
- /* something else? Number of pages for a given object? */
-};
-
-/**
- * Transfer request.
- *
- * Transfer requests are not reference counted, because IO sub-system owns
- * them exclusively and knows when to free them.
- *
- * Life cycle.
- *
- * cl_req is created by cl_req_alloc() that calls
- * cl_device_operations::cdo_req_init() device methods to allocate per-req
- * state in every layer.
- *
- * Then pages are added (cl_req_page_add()), req keeps track of all objects it
- * contains pages for.
- *
- * Once all pages were collected, cl_page_operations::cpo_prep() method is
- * called top-to-bottom. At that point layers can modify req, let it pass, or
- * deny it completely. This is to support things like SNS that have transfer
- * ordering requirements invisible to the individual req-formation engine.
- *
- * On transfer completion (or transfer timeout, or failure to initiate the
- * transfer of an allocated req), cl_req_operations::cro_completion() method
- * is called, after execution of cl_page_operations::cpo_completion() of all
- * req's pages.
- */
-struct cl_req {
- enum cl_req_type crq_type;
- /** A list of pages being transferred */
- struct list_head crq_pages;
- /** Number of pages in cl_req::crq_pages */
- unsigned crq_nrpages;
- /** An array of objects which pages are in ->crq_pages */
- struct cl_req_obj *crq_o;
- /** Number of elements in cl_req::crq_objs[] */
- unsigned crq_nrobjs;
- struct list_head crq_layers;
-};
-
-/**
- * Per-layer state for request.
- */
-struct cl_req_slice {
- struct cl_req *crs_req;
- struct cl_device *crs_dev;
- struct list_head crs_linkage;
- const struct cl_req_operations *crs_ops;
-};
-
-/* @} cl_req */
-
-enum cache_stats_item {
- /** how many cache lookups were performed */
- CS_lookup = 0,
- /** how many times cache lookup resulted in a hit */
- CS_hit,
- /** how many entities are in the cache right now */
- CS_total,
- /** how many entities in the cache are actively used (and cannot be
- * evicted) right now */
- CS_busy,
- /** how many entities were created at all */
- CS_create,
- CS_NR
-};
-
-#define CS_NAMES { "lookup", "hit", "total", "busy", "create" }
-
-/**
- * Stats for a generic cache (similar to inode, lu_object, etc. caches).
- */
-struct cache_stats {
- const char *cs_name;
- atomic_t cs_stats[CS_NR];
-};
-
-/** These are not exported so far */
-void cache_stats_init (struct cache_stats *cs, const char *name);
-
-/**
- * Client-side site. This represents particular client stack. "Global"
- * variables should (directly or indirectly) be added here to allow multiple
- * clients to co-exist in the single address space.
- */
-struct cl_site {
- struct lu_site cs_lu;
- /**
- * Statistical counters. Atomics do not scale, something better like
- * per-cpu counters is needed.
- *
- * These are exported as /proc/fs/lustre/llite/.../site
- *
- * When interpreting keep in mind that both sub-locks (and sub-pages)
- * and top-locks (and top-pages) are accounted here.
- */
- struct cache_stats cs_pages;
- struct cache_stats cs_locks;
- atomic_t cs_pages_state[CPS_NR];
- atomic_t cs_locks_state[CLS_NR];
-};
-
-int cl_site_init (struct cl_site *s, struct cl_device *top);
-void cl_site_fini (struct cl_site *s);
-void cl_stack_fini(const struct lu_env *env, struct cl_device *cl);
-
-/**
- * Output client site statistical counters into a buffer. Suitable for
- * ll_rd_*()-style functions.
- */
-int cl_site_stats_print(const struct cl_site *site, struct seq_file *m);
-
-/**
- * \name helpers
- *
- * Type conversion and accessory functions.
- */
-/** @{ */
-
-static inline struct cl_site *lu2cl_site(const struct lu_site *site)
-{
- return container_of(site, struct cl_site, cs_lu);
-}
-
-static inline int lu_device_is_cl(const struct lu_device *d)
-{
- return d->ld_type->ldt_tags & LU_DEVICE_CL;
-}
-
-static inline struct cl_device *lu2cl_dev(const struct lu_device *d)
-{
- LASSERT(d == NULL || IS_ERR(d) || lu_device_is_cl(d));
- return container_of0(d, struct cl_device, cd_lu_dev);
-}
-
-static inline struct lu_device *cl2lu_dev(struct cl_device *d)
-{
- return &d->cd_lu_dev;
-}
-
-static inline struct cl_object *lu2cl(const struct lu_object *o)
-{
- LASSERT(o == NULL || IS_ERR(o) || lu_device_is_cl(o->lo_dev));
- return container_of0(o, struct cl_object, co_lu);
-}
-
-static inline const struct cl_object_conf *
-lu2cl_conf(const struct lu_object_conf *conf)
-{
- return container_of0(conf, struct cl_object_conf, coc_lu);
-}
-
-static inline struct cl_object *cl_object_next(const struct cl_object *obj)
-{
- return obj ? lu2cl(lu_object_next(&obj->co_lu)) : NULL;
-}
-
-static inline struct cl_device *cl_object_device(const struct cl_object *o)
-{
- LASSERT(o == NULL || IS_ERR(o) || lu_device_is_cl(o->co_lu.lo_dev));
- return container_of0(o->co_lu.lo_dev, struct cl_device, cd_lu_dev);
-}
-
-static inline struct cl_object_header *luh2coh(const struct lu_object_header *h)
-{
- return container_of0(h, struct cl_object_header, coh_lu);
-}
-
-static inline struct cl_site *cl_object_site(const struct cl_object *obj)
-{
- return lu2cl_site(obj->co_lu.lo_dev->ld_site);
-}
-
-static inline
-struct cl_object_header *cl_object_header(const struct cl_object *obj)
-{
- return luh2coh(obj->co_lu.lo_header);
-}
-
-static inline int cl_device_init(struct cl_device *d, struct lu_device_type *t)
-{
- return lu_device_init(&d->cd_lu_dev, t);
-}
-
-static inline void cl_device_fini(struct cl_device *d)
-{
- lu_device_fini(&d->cd_lu_dev);
-}
-
-void cl_page_slice_add(struct cl_page *page, struct cl_page_slice *slice,
- struct cl_object *obj,
- const struct cl_page_operations *ops);
-void cl_lock_slice_add(struct cl_lock *lock, struct cl_lock_slice *slice,
- struct cl_object *obj,
- const struct cl_lock_operations *ops);
-void cl_io_slice_add(struct cl_io *io, struct cl_io_slice *slice,
- struct cl_object *obj, const struct cl_io_operations *ops);
-void cl_req_slice_add(struct cl_req *req, struct cl_req_slice *slice,
- struct cl_device *dev,
- const struct cl_req_operations *ops);
-/** @} helpers */
-
-/** \defgroup cl_object cl_object
- * @{ */
-struct cl_object *cl_object_top (struct cl_object *o);
-struct cl_object *cl_object_find(const struct lu_env *env, struct cl_device *cd,
- const struct lu_fid *fid,
- const struct cl_object_conf *c);
-
-int cl_object_header_init(struct cl_object_header *h);
-void cl_object_put (const struct lu_env *env, struct cl_object *o);
-void cl_object_get (struct cl_object *o);
-void cl_object_attr_lock (struct cl_object *o);
-void cl_object_attr_unlock(struct cl_object *o);
-int cl_object_attr_get (const struct lu_env *env, struct cl_object *obj,
- struct cl_attr *attr);
-int cl_object_attr_set (const struct lu_env *env, struct cl_object *obj,
- const struct cl_attr *attr, unsigned valid);
-int cl_object_glimpse (const struct lu_env *env, struct cl_object *obj,
- struct ost_lvb *lvb);
-int cl_conf_set (const struct lu_env *env, struct cl_object *obj,
- const struct cl_object_conf *conf);
-void cl_object_prune (const struct lu_env *env, struct cl_object *obj);
-void cl_object_kill (const struct lu_env *env, struct cl_object *obj);
-
-/**
- * Returns true, iff \a o0 and \a o1 are slices of the same object.
- */
-static inline int cl_object_same(struct cl_object *o0, struct cl_object *o1)
-{
- return cl_object_header(o0) == cl_object_header(o1);
-}
-
-static inline void cl_object_page_init(struct cl_object *clob, int size)
-{
- clob->co_slice_off = cl_object_header(clob)->coh_page_bufsize;
- cl_object_header(clob)->coh_page_bufsize += ALIGN(size, 8);
-}
-
-static inline void *cl_object_page_slice(struct cl_object *clob,
- struct cl_page *page)
-{
- return (void *)((char *)page + clob->co_slice_off);
-}
-
-/** @} cl_object */
-
-/** \defgroup cl_page cl_page
- * @{ */
-enum {
- CLP_GANG_OKAY = 0,
- CLP_GANG_RESCHED,
- CLP_GANG_AGAIN,
- CLP_GANG_ABORT
-};
-
-/* callback of cl_page_gang_lookup() */
-typedef int (*cl_page_gang_cb_t) (const struct lu_env *, struct cl_io *,
- struct cl_page *, void *);
-int cl_page_gang_lookup (const struct lu_env *env,
- struct cl_object *obj,
- struct cl_io *io,
- pgoff_t start, pgoff_t end,
- cl_page_gang_cb_t cb, void *cbdata);
-struct cl_page *cl_page_lookup (struct cl_object_header *hdr,
- pgoff_t index);
-struct cl_page *cl_page_find (const struct lu_env *env,
- struct cl_object *obj,
- pgoff_t idx, struct page *vmpage,
- enum cl_page_type type);
-struct cl_page *cl_page_find_sub (const struct lu_env *env,
- struct cl_object *obj,
- pgoff_t idx, struct page *vmpage,
- struct cl_page *parent);
-void cl_page_get (struct cl_page *page);
-void cl_page_put (const struct lu_env *env,
- struct cl_page *page);
-void cl_page_print (const struct lu_env *env, void *cookie,
- lu_printer_t printer,
- const struct cl_page *pg);
-void cl_page_header_print(const struct lu_env *env, void *cookie,
- lu_printer_t printer,
- const struct cl_page *pg);
-struct page *cl_page_vmpage (const struct lu_env *env,
- struct cl_page *page);
-struct cl_page *cl_vmpage_page (struct page *vmpage, struct cl_object *obj);
-struct cl_page *cl_page_top (struct cl_page *page);
-
-const struct cl_page_slice *cl_page_at(const struct cl_page *page,
- const struct lu_device_type *dtype);
-
-/**
- * \name ownership
- *
- * Functions dealing with the ownership of page by io.
- */
-/** @{ */
-
-int cl_page_own (const struct lu_env *env,
- struct cl_io *io, struct cl_page *page);
-int cl_page_own_try (const struct lu_env *env,
- struct cl_io *io, struct cl_page *page);
-void cl_page_assume (const struct lu_env *env,
- struct cl_io *io, struct cl_page *page);
-void cl_page_unassume (const struct lu_env *env,
- struct cl_io *io, struct cl_page *pg);
-void cl_page_disown (const struct lu_env *env,
- struct cl_io *io, struct cl_page *page);
-int cl_page_is_owned (const struct cl_page *pg, const struct cl_io *io);
-
-/** @} ownership */
-
-/**
- * \name transfer
- *
- * Functions dealing with the preparation of a page for a transfer, and
- * tracking transfer state.
- */
-/** @{ */
-int cl_page_prep (const struct lu_env *env, struct cl_io *io,
- struct cl_page *pg, enum cl_req_type crt);
-void cl_page_completion (const struct lu_env *env,
- struct cl_page *pg, enum cl_req_type crt, int ioret);
-int cl_page_make_ready (const struct lu_env *env, struct cl_page *pg,
- enum cl_req_type crt);
-int cl_page_cache_add (const struct lu_env *env, struct cl_io *io,
- struct cl_page *pg, enum cl_req_type crt);
-void cl_page_clip (const struct lu_env *env, struct cl_page *pg,
- int from, int to);
-int cl_page_cancel (const struct lu_env *env, struct cl_page *page);
-int cl_page_flush (const struct lu_env *env, struct cl_io *io,
- struct cl_page *pg);
-
-/** @} transfer */
-
-
-/**
- * \name helper routines
- * Functions to discard, delete and export a cl_page.
- */
-/** @{ */
-void cl_page_discard (const struct lu_env *env, struct cl_io *io,
- struct cl_page *pg);
-void cl_page_delete (const struct lu_env *env, struct cl_page *pg);
-int cl_page_unmap (const struct lu_env *env, struct cl_io *io,
- struct cl_page *pg);
-int cl_page_is_vmlocked (const struct lu_env *env,
- const struct cl_page *pg);
-void cl_page_export (const struct lu_env *env,
- struct cl_page *pg, int uptodate);
-int cl_page_is_under_lock(const struct lu_env *env, struct cl_io *io,
- struct cl_page *page);
-loff_t cl_offset (const struct cl_object *obj, pgoff_t idx);
-pgoff_t cl_index (const struct cl_object *obj, loff_t offset);
-int cl_page_size (const struct cl_object *obj);
-int cl_pages_prune (const struct lu_env *env, struct cl_object *obj);
-
-void cl_lock_print (const struct lu_env *env, void *cookie,
- lu_printer_t printer, const struct cl_lock *lock);
-void cl_lock_descr_print(const struct lu_env *env, void *cookie,
- lu_printer_t printer,
- const struct cl_lock_descr *descr);
-/* @} helper */
-
-/** @} cl_page */
-
-/** \defgroup cl_lock cl_lock
- * @{ */
-
-struct cl_lock *cl_lock_hold(const struct lu_env *env, const struct cl_io *io,
- const struct cl_lock_descr *need,
- const char *scope, const void *source);
-struct cl_lock *cl_lock_peek(const struct lu_env *env, const struct cl_io *io,
- const struct cl_lock_descr *need,
- const char *scope, const void *source);
-struct cl_lock *cl_lock_request(const struct lu_env *env, struct cl_io *io,
- const struct cl_lock_descr *need,
- const char *scope, const void *source);
-struct cl_lock *cl_lock_at_pgoff(const struct lu_env *env,
- struct cl_object *obj, pgoff_t index,
- struct cl_lock *except, int pending,
- int canceld);
-static inline struct cl_lock *cl_lock_at_page(const struct lu_env *env,
- struct cl_object *obj,
- struct cl_page *page,
- struct cl_lock *except,
- int pending, int canceld)
-{
- LASSERT(cl_object_header(obj) == cl_object_header(page->cp_obj));
- return cl_lock_at_pgoff(env, obj, page->cp_index, except,
- pending, canceld);
-}
-
-const struct cl_lock_slice *cl_lock_at(const struct cl_lock *lock,
- const struct lu_device_type *dtype);
-
-void cl_lock_get (struct cl_lock *lock);
-void cl_lock_get_trust (struct cl_lock *lock);
-void cl_lock_put (const struct lu_env *env, struct cl_lock *lock);
-void cl_lock_hold_add (const struct lu_env *env, struct cl_lock *lock,
- const char *scope, const void *source);
-void cl_lock_hold_release(const struct lu_env *env, struct cl_lock *lock,
- const char *scope, const void *source);
-void cl_lock_unhold (const struct lu_env *env, struct cl_lock *lock,
- const char *scope, const void *source);
-void cl_lock_release (const struct lu_env *env, struct cl_lock *lock,
- const char *scope, const void *source);
-void cl_lock_user_add (const struct lu_env *env, struct cl_lock *lock);
-void cl_lock_user_del (const struct lu_env *env, struct cl_lock *lock);
-
-int cl_lock_is_intransit(struct cl_lock *lock);
-
-int cl_lock_enqueue_wait(const struct lu_env *env, struct cl_lock *lock,
- int keep_mutex);
-
-/** \name statemachine statemachine
- * Interface to lock state machine consists of 3 parts:
- *
- * - "try" functions that attempt to effect a state transition. If state
- * transition is not possible right now (e.g., if it has to wait for some
- * asynchronous event to occur), these functions return
- * cl_lock_transition::CLO_WAIT.
- *
- * - "non-try" functions that implement synchronous blocking interface on
- * top of non-blocking "try" functions. These functions repeatedly call
- * corresponding "try" versions, and if state transition is not possible
- * immediately, wait for lock state change.
- *
- * - methods from cl_lock_operations, called by "try" functions. Lock can
- * be advanced to the target state only when all layers voted that they
- * are ready for this transition. "Try" functions call methods under lock
- * mutex. If a layer had to release a mutex, it re-acquires it and returns
- * cl_lock_transition::CLO_REPEAT, causing "try" function to call all
- * layers again.
- *
- * TRY NON-TRY METHOD FINAL STATE
- *
- * cl_enqueue_try() cl_enqueue() cl_lock_operations::clo_enqueue() CLS_ENQUEUED
- *
- * cl_wait_try() cl_wait() cl_lock_operations::clo_wait() CLS_HELD
- *
- * cl_unuse_try() cl_unuse() cl_lock_operations::clo_unuse() CLS_CACHED
- *
- * cl_use_try() NONE cl_lock_operations::clo_use() CLS_HELD
- *
- * @{ */
-
-int cl_wait (const struct lu_env *env, struct cl_lock *lock);
-void cl_unuse (const struct lu_env *env, struct cl_lock *lock);
-int cl_enqueue_try(const struct lu_env *env, struct cl_lock *lock,
- struct cl_io *io, __u32 flags);
-int cl_unuse_try (const struct lu_env *env, struct cl_lock *lock);
-int cl_wait_try (const struct lu_env *env, struct cl_lock *lock);
-int cl_use_try (const struct lu_env *env, struct cl_lock *lock, int atomic);
-
-/** @} statemachine */
-
-void cl_lock_signal (const struct lu_env *env, struct cl_lock *lock);
-int cl_lock_state_wait (const struct lu_env *env, struct cl_lock *lock);
-void cl_lock_state_set (const struct lu_env *env, struct cl_lock *lock,
- enum cl_lock_state state);
-int cl_queue_match (const struct list_head *queue,
- const struct cl_lock_descr *need);
-
-void cl_lock_mutex_get (const struct lu_env *env, struct cl_lock *lock);
-void cl_lock_mutex_put (const struct lu_env *env, struct cl_lock *lock);
-int cl_lock_is_mutexed (struct cl_lock *lock);
-int cl_lock_nr_mutexed (const struct lu_env *env);
-int cl_lock_discard_pages(const struct lu_env *env, struct cl_lock *lock);
-int cl_lock_ext_match (const struct cl_lock_descr *has,
- const struct cl_lock_descr *need);
-int cl_lock_descr_match(const struct cl_lock_descr *has,
- const struct cl_lock_descr *need);
-int cl_lock_mode_match (enum cl_lock_mode has, enum cl_lock_mode need);
-int cl_lock_modify (const struct lu_env *env, struct cl_lock *lock,
- const struct cl_lock_descr *desc);
-
-void cl_lock_closure_init (const struct lu_env *env,
- struct cl_lock_closure *closure,
- struct cl_lock *origin, int wait);
-void cl_lock_closure_fini (struct cl_lock_closure *closure);
-int cl_lock_closure_build(const struct lu_env *env, struct cl_lock *lock,
- struct cl_lock_closure *closure);
-void cl_lock_disclosure (const struct lu_env *env,
- struct cl_lock_closure *closure);
-int cl_lock_enclosure (const struct lu_env *env, struct cl_lock *lock,
- struct cl_lock_closure *closure);
-
-void cl_lock_cancel(const struct lu_env *env, struct cl_lock *lock);
-void cl_lock_delete(const struct lu_env *env, struct cl_lock *lock);
-void cl_lock_error (const struct lu_env *env, struct cl_lock *lock, int error);
-void cl_locks_prune(const struct lu_env *env, struct cl_object *obj, int wait);
-
-unsigned long cl_lock_weigh(const struct lu_env *env, struct cl_lock *lock);
-
-/** @} cl_lock */
-
-/** \defgroup cl_io cl_io
- * @{ */
-
-int cl_io_init (const struct lu_env *env, struct cl_io *io,
- enum cl_io_type iot, struct cl_object *obj);
-int cl_io_sub_init (const struct lu_env *env, struct cl_io *io,
- enum cl_io_type iot, struct cl_object *obj);
-int cl_io_rw_init (const struct lu_env *env, struct cl_io *io,
- enum cl_io_type iot, loff_t pos, size_t count);
-int cl_io_loop (const struct lu_env *env, struct cl_io *io);
-
-void cl_io_fini (const struct lu_env *env, struct cl_io *io);
-int cl_io_iter_init (const struct lu_env *env, struct cl_io *io);
-void cl_io_iter_fini (const struct lu_env *env, struct cl_io *io);
-int cl_io_lock (const struct lu_env *env, struct cl_io *io);
-void cl_io_unlock (const struct lu_env *env, struct cl_io *io);
-int cl_io_start (const struct lu_env *env, struct cl_io *io);
-void cl_io_end (const struct lu_env *env, struct cl_io *io);
-int cl_io_lock_add (const struct lu_env *env, struct cl_io *io,
- struct cl_io_lock_link *link);
-int cl_io_lock_alloc_add(const struct lu_env *env, struct cl_io *io,
- struct cl_lock_descr *descr);
-int cl_io_read_page (const struct lu_env *env, struct cl_io *io,
- struct cl_page *page);
-int cl_io_prepare_write(const struct lu_env *env, struct cl_io *io,
- struct cl_page *page, unsigned from, unsigned to);
-int cl_io_commit_write (const struct lu_env *env, struct cl_io *io,
- struct cl_page *page, unsigned from, unsigned to);
-int cl_io_submit_rw (const struct lu_env *env, struct cl_io *io,
- enum cl_req_type iot, struct cl_2queue *queue);
-int cl_io_submit_sync (const struct lu_env *env, struct cl_io *io,
- enum cl_req_type iot, struct cl_2queue *queue,
- long timeout);
-int cl_io_is_going (const struct lu_env *env);
-
-/**
- * True, iff \a io is an O_APPEND write(2).
- */
-static inline int cl_io_is_append(const struct cl_io *io)
-{
- return io->ci_type == CIT_WRITE && io->u.ci_wr.wr_append;
-}
-
-static inline int cl_io_is_sync_write(const struct cl_io *io)
-{
- return io->ci_type == CIT_WRITE && io->u.ci_wr.wr_sync;
-}
-
-static inline int cl_io_is_mkwrite(const struct cl_io *io)
-{
- return io->ci_type == CIT_FAULT && io->u.ci_fault.ft_mkwrite;
-}
-
-/**
- * True, iff \a io is a truncate(2).
- */
-static inline int cl_io_is_trunc(const struct cl_io *io)
-{
- return io->ci_type == CIT_SETATTR &&
- (io->u.ci_setattr.sa_valid & ATTR_SIZE);
-}
-
-struct cl_io *cl_io_top(struct cl_io *io);
-
-#define CL_IO_SLICE_CLEAN(foo_io, base) \
-do { \
- typeof(foo_io) __foo_io = (foo_io); \
- \
- CLASSERT(offsetof(typeof(*__foo_io), base) == 0); \
- memset(&__foo_io->base + 1, 0, \
- sizeof(*__foo_io) - sizeof(__foo_io->base)); \
-} while (0)
-
-/** @} cl_io */
-
-/** \defgroup cl_page_list cl_page_list
- * @{ */
-
-/**
- * Last page in the page list.
- */
-static inline struct cl_page *cl_page_list_last(struct cl_page_list *plist)
-{
- LASSERT(plist->pl_nr > 0);
- return list_entry(plist->pl_pages.prev, struct cl_page, cp_batch);
-}
-
-/**
- * Iterate over pages in a page list.
- */
-#define cl_page_list_for_each(page, list) \
- list_for_each_entry((page), &(list)->pl_pages, cp_batch)
-
-/**
- * Iterate over pages in a page list, taking possible removals into account.
- */
-#define cl_page_list_for_each_safe(page, temp, list) \
- list_for_each_entry_safe((page), (temp), &(list)->pl_pages, cp_batch)
-
-void cl_page_list_init (struct cl_page_list *plist);
-void cl_page_list_add (struct cl_page_list *plist, struct cl_page *page);
-void cl_page_list_move (struct cl_page_list *dst, struct cl_page_list *src,
- struct cl_page *page);
-void cl_page_list_splice (struct cl_page_list *list,
- struct cl_page_list *head);
-void cl_page_list_del (const struct lu_env *env,
- struct cl_page_list *plist, struct cl_page *page);
-void cl_page_list_disown (const struct lu_env *env,
- struct cl_io *io, struct cl_page_list *plist);
-void cl_page_list_discard(const struct lu_env *env,
- struct cl_io *io, struct cl_page_list *plist);
-void cl_page_list_fini (const struct lu_env *env, struct cl_page_list *plist);
-
-void cl_2queue_init (struct cl_2queue *queue);
-void cl_2queue_add (struct cl_2queue *queue, struct cl_page *page);
-void cl_2queue_disown (const struct lu_env *env,
- struct cl_io *io, struct cl_2queue *queue);
-void cl_2queue_discard (const struct lu_env *env,
- struct cl_io *io, struct cl_2queue *queue);
-void cl_2queue_fini (const struct lu_env *env, struct cl_2queue *queue);
-void cl_2queue_init_page(struct cl_2queue *queue, struct cl_page *page);
-
-/** @} cl_page_list */
-
-/** \defgroup cl_req cl_req
- * @{ */
-struct cl_req *cl_req_alloc(const struct lu_env *env, struct cl_page *page,
- enum cl_req_type crt, int nr_objects);
-
-void cl_req_page_add (const struct lu_env *env, struct cl_req *req,
- struct cl_page *page);
-void cl_req_page_done (const struct lu_env *env, struct cl_page *page);
-int cl_req_prep (const struct lu_env *env, struct cl_req *req);
-void cl_req_attr_set (const struct lu_env *env, struct cl_req *req,
- struct cl_req_attr *attr, u64 flags);
-void cl_req_completion(const struct lu_env *env, struct cl_req *req, int ioret);
-
-/** \defgroup cl_sync_io cl_sync_io
- * @{ */
-
-/**
- * Anchor for synchronous transfer. This is allocated on a stack by thread
- * doing synchronous transfer, and a pointer to this structure is set up in
- * every page submitted for transfer. Transfer completion routine updates
- * anchor and wakes up waiting thread when transfer is complete.
- */
-struct cl_sync_io {
- /** number of pages yet to be transferred. */
- atomic_t csi_sync_nr;
- /** error code. */
- int csi_sync_rc;
- /** barrier of destroy this structure */
- atomic_t csi_barrier;
- /** completion to be signaled when transfer is complete. */
- wait_queue_head_t csi_waitq;
-};
-
-void cl_sync_io_init(struct cl_sync_io *anchor, int nrpages);
-int cl_sync_io_wait(const struct lu_env *env, struct cl_io *io,
- struct cl_page_list *queue, struct cl_sync_io *anchor,
- long timeout);
-void cl_sync_io_note(struct cl_sync_io *anchor, int ioret);
-
-/** @} cl_sync_io */
-
-/** @} cl_req */
-
-/** \defgroup cl_env cl_env
- *
- * lu_env handling for a client.
- *
- * lu_env is an environment within which lustre code executes. Its major part
- * is lu_context---a fast memory allocation mechanism that is used to conserve
- * precious kernel stack space. Originally lu_env was designed for a server,
- * where
- *
- * - there is a (mostly) fixed number of threads, and
- *
- * - call chains have no non-lustre portions inserted between lustre code.
- *
- * On a client both these assumption fails, because every user thread can
- * potentially execute lustre code as part of a system call, and lustre calls
- * into VFS or MM that call back into lustre.
- *
- * To deal with that, cl_env wrapper functions implement the following
- * optimizations:
- *
- * - allocation and destruction of environment is amortized by caching no
- * longer used environments instead of destroying them;
- *
- * - there is a notion of "current" environment, attached to the kernel
- * data structure representing current thread Top-level lustre code
- * allocates an environment and makes it current, then calls into
- * non-lustre code, that in turn calls lustre back. Low-level lustre
- * code thus called can fetch environment created by the top-level code
- * and reuse it, avoiding additional environment allocation.
- * Right now, three interfaces can attach the cl_env to running thread:
- * - cl_env_get
- * - cl_env_implant
- * - cl_env_reexit(cl_env_reenter had to be called priorly)
- *
- * \see lu_env, lu_context, lu_context_key
- * @{ */
-
-struct cl_env_nest {
- int cen_refcheck;
- void *cen_cookie;
-};
-
-struct lu_env *cl_env_get (int *refcheck);
-struct lu_env *cl_env_alloc (int *refcheck, __u32 tags);
-struct lu_env *cl_env_nested_get (struct cl_env_nest *nest);
-void cl_env_put (struct lu_env *env, int *refcheck);
-void cl_env_nested_put (struct cl_env_nest *nest, struct lu_env *env);
-void *cl_env_reenter (void);
-void cl_env_reexit (void *cookie);
-void cl_env_implant (struct lu_env *env, int *refcheck);
-void cl_env_unplant (struct lu_env *env, int *refcheck);
-
-/** @} cl_env */
-
-/*
- * Misc
- */
-void cl_lvb2attr(struct cl_attr *attr, const struct ost_lvb *lvb);
-
-struct cl_device *cl_type_setup(const struct lu_env *env, struct lu_site *site,
- struct lu_device_type *ldt,
- struct lu_device *next);
-/** @} clio */
-
-int cl_global_init(void);
-void cl_global_fini(void);
-
-#endif /* _LINUX_CL_OBJECT_H */
diff --git a/drivers/staging/lustre/lustre/include/interval_tree.h b/drivers/staging/lustre/lustre/include/interval_tree.h
deleted file mode 100644
index bf9027d5f773..000000000000
--- a/drivers/staging/lustre/lustre/include/interval_tree.h
+++ /dev/null
@@ -1,124 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * lustre/include/interval_tree.h
- *
- * Author: Huang Wei <huangwei@clusterfs.com>
- * Author: Jay Xiong <jinshan.xiong@sun.com>
- */
-
-#ifndef _INTERVAL_H__
-#define _INTERVAL_H__
-
-#include "../../include/linux/libcfs/libcfs.h" /* LASSERT. */
-
-struct interval_node {
- struct interval_node *in_left;
- struct interval_node *in_right;
- struct interval_node *in_parent;
- unsigned in_color:1,
- in_intree:1, /** set if the node is in tree */
- in_res1:30;
- __u8 in_res2[4]; /** tags, 8-bytes aligned */
- __u64 in_max_high;
- struct interval_node_extent {
- __u64 start;
- __u64 end;
- } in_extent;
-};
-
-enum interval_iter {
- INTERVAL_ITER_CONT = 1,
- INTERVAL_ITER_STOP = 2
-};
-
-static inline int interval_is_intree(struct interval_node *node)
-{
- return node->in_intree == 1;
-}
-
-static inline __u64 interval_low(struct interval_node *node)
-{
- return node->in_extent.start;
-}
-
-static inline __u64 interval_high(struct interval_node *node)
-{
- return node->in_extent.end;
-}
-
-static inline void interval_set(struct interval_node *node,
- __u64 start, __u64 end)
-{
- LASSERT(start <= end);
- node->in_extent.start = start;
- node->in_extent.end = end;
- node->in_max_high = end;
-}
-
-/* Rules to write an interval callback.
- * - the callback returns INTERVAL_ITER_STOP when it thinks the iteration
- * should be stopped. It will then cause the iteration function to return
- * immediately with return value INTERVAL_ITER_STOP.
- * - callbacks for interval_iterate and interval_iterate_reverse: Every
- * nodes in the tree will be set to @node before the callback being called
- * - callback for interval_search: Only overlapped node will be set to @node
- * before the callback being called.
- */
-typedef enum interval_iter (*interval_callback_t)(struct interval_node *node,
- void *args);
-
-struct interval_node *interval_insert(struct interval_node *node,
- struct interval_node **root);
-void interval_erase(struct interval_node *node, struct interval_node **root);
-
-/* Search the extents in the tree and call @func for each overlapped
- * extents. */
-enum interval_iter interval_search(struct interval_node *root,
- struct interval_node_extent *ex,
- interval_callback_t func, void *data);
-
-/* Iterate every node in the tree - by reverse order or regular order. */
-enum interval_iter interval_iterate(struct interval_node *root,
- interval_callback_t func, void *data);
-enum interval_iter interval_iterate_reverse(struct interval_node *root,
- interval_callback_t func, void *data);
-
-void interval_expand(struct interval_node *root,
- struct interval_node_extent *ext,
- struct interval_node_extent *limiter);
-int interval_is_overlapped(struct interval_node *root,
- struct interval_node_extent *ex);
-struct interval_node *interval_find(struct interval_node *root,
- struct interval_node_extent *ex);
-#endif
diff --git a/drivers/staging/lustre/lustre/include/lclient.h b/drivers/staging/lustre/lustre/include/lclient.h
deleted file mode 100644
index 67d41bb8bbc0..000000000000
--- a/drivers/staging/lustre/lustre/include/lclient.h
+++ /dev/null
@@ -1,432 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * Definitions shared between vvp and liblustre, and other clients in the
- * future.
- *
- * Author: Oleg Drokin <oleg.drokin@sun.com>
- * Author: Nikita Danilov <nikita.danilov@sun.com>
- */
-
-#ifndef LCLIENT_H
-#define LCLIENT_H
-
-blkcnt_t dirty_cnt(struct inode *inode);
-
-int cl_glimpse_size0(struct inode *inode, int agl);
-int cl_glimpse_lock(const struct lu_env *env, struct cl_io *io,
- struct inode *inode, struct cl_object *clob, int agl);
-
-static inline int cl_glimpse_size(struct inode *inode)
-{
- return cl_glimpse_size0(inode, 0);
-}
-
-static inline int cl_agl(struct inode *inode)
-{
- return cl_glimpse_size0(inode, 1);
-}
-
-/**
- * Locking policy for setattr.
- */
-enum ccc_setattr_lock_type {
- /** Locking is done by server */
- SETATTR_NOLOCK,
- /** Extent lock is enqueued */
- SETATTR_EXTENT_LOCK,
- /** Existing local extent lock is used */
- SETATTR_MATCH_LOCK
-};
-
-
-/**
- * IO state private to vvp or slp layers.
- */
-struct ccc_io {
- /** super class */
- struct cl_io_slice cui_cl;
- struct cl_io_lock_link cui_link;
- /**
- * I/O vector information to or from which read/write is going.
- */
- struct iov_iter *cui_iter;
- /**
- * Total size for the left IO.
- */
- size_t cui_tot_count;
-
- union {
- struct {
- enum ccc_setattr_lock_type cui_local_lock;
- } setattr;
- } u;
- /**
- * True iff io is processing glimpse right now.
- */
- int cui_glimpse;
- /**
- * Layout version when this IO is initialized
- */
- __u32 cui_layout_gen;
- /**
- * File descriptor against which IO is done.
- */
- struct ll_file_data *cui_fd;
- struct kiocb *cui_iocb;
-};
-
-/**
- * True, if \a io is a normal io, False for splice_{read,write}.
- * must be implemented in arch specific code.
- */
-int cl_is_normalio(const struct lu_env *env, const struct cl_io *io);
-
-extern struct lu_context_key ccc_key;
-extern struct lu_context_key ccc_session_key;
-
-struct ccc_thread_info {
- struct cl_lock_descr cti_descr;
- struct cl_io cti_io;
- struct cl_attr cti_attr;
-};
-
-static inline struct ccc_thread_info *ccc_env_info(const struct lu_env *env)
-{
- struct ccc_thread_info *info;
-
- info = lu_context_key_get(&env->le_ctx, &ccc_key);
- LASSERT(info != NULL);
- return info;
-}
-
-static inline struct cl_attr *ccc_env_thread_attr(const struct lu_env *env)
-{
- struct cl_attr *attr = &ccc_env_info(env)->cti_attr;
-
- memset(attr, 0, sizeof(*attr));
- return attr;
-}
-
-static inline struct cl_io *ccc_env_thread_io(const struct lu_env *env)
-{
- struct cl_io *io = &ccc_env_info(env)->cti_io;
-
- memset(io, 0, sizeof(*io));
- return io;
-}
-
-struct ccc_session {
- struct ccc_io cs_ios;
-};
-
-static inline struct ccc_session *ccc_env_session(const struct lu_env *env)
-{
- struct ccc_session *ses;
-
- ses = lu_context_key_get(env->le_ses, &ccc_session_key);
- LASSERT(ses != NULL);
- return ses;
-}
-
-static inline struct ccc_io *ccc_env_io(const struct lu_env *env)
-{
- return &ccc_env_session(env)->cs_ios;
-}
-
-/**
- * ccc-private object state.
- */
-struct ccc_object {
- struct cl_object_header cob_header;
- struct cl_object cob_cl;
- struct inode *cob_inode;
-
- /**
- * A list of dirty pages pending IO in the cache. Used by
- * SOM. Protected by ll_inode_info::lli_lock.
- *
- * \see ccc_page::cpg_pending_linkage
- */
- struct list_head cob_pending_list;
-
- /**
- * Access this counter is protected by inode->i_sem. Now that
- * the lifetime of transient pages must be covered by inode sem,
- * we don't need to hold any lock..
- */
- int cob_transient_pages;
- /**
- * Number of outstanding mmaps on this file.
- *
- * \see ll_vm_open(), ll_vm_close().
- */
- atomic_t cob_mmap_cnt;
-
- /**
- * various flags
- * cob_discard_page_warned
- * if pages belonging to this object are discarded when a client
- * is evicted, some debug info will be printed, this flag will be set
- * during processing the first discarded page, then avoid flooding
- * debug message for lots of discarded pages.
- *
- * \see ll_dirty_page_discard_warn.
- */
- unsigned int cob_discard_page_warned:1;
-};
-
-/**
- * ccc-private page state.
- */
-struct ccc_page {
- struct cl_page_slice cpg_cl;
- int cpg_defer_uptodate;
- int cpg_ra_used;
- int cpg_write_queued;
- /**
- * Non-empty iff this page is already counted in
- * ccc_object::cob_pending_list. Protected by
- * ccc_object::cob_pending_guard. This list is only used as a flag,
- * that is, never iterated through, only checked for list_empty(), but
- * having a list is useful for debugging.
- */
- struct list_head cpg_pending_linkage;
- /** VM page */
- struct page *cpg_page;
-};
-
-static inline struct ccc_page *cl2ccc_page(const struct cl_page_slice *slice)
-{
- return container_of(slice, struct ccc_page, cpg_cl);
-}
-
-struct cl_page *ccc_vmpage_page_transient(struct page *vmpage);
-
-struct ccc_device {
- struct cl_device cdv_cl;
- struct super_block *cdv_sb;
- struct cl_device *cdv_next;
-};
-
-struct ccc_lock {
- struct cl_lock_slice clk_cl;
-};
-
-struct ccc_req {
- struct cl_req_slice crq_cl;
-};
-
-void *ccc_key_init (const struct lu_context *ctx,
- struct lu_context_key *key);
-void ccc_key_fini (const struct lu_context *ctx,
- struct lu_context_key *key, void *data);
-void *ccc_session_key_init(const struct lu_context *ctx,
- struct lu_context_key *key);
-void ccc_session_key_fini(const struct lu_context *ctx,
- struct lu_context_key *key, void *data);
-
-int ccc_device_init (const struct lu_env *env,
- struct lu_device *d,
- const char *name, struct lu_device *next);
-struct lu_device *ccc_device_fini (const struct lu_env *env,
- struct lu_device *d);
-struct lu_device *ccc_device_alloc(const struct lu_env *env,
- struct lu_device_type *t,
- struct lustre_cfg *cfg,
- const struct lu_device_operations *luops,
- const struct cl_device_operations *clops);
-struct lu_device *ccc_device_free (const struct lu_env *env,
- struct lu_device *d);
-struct lu_object *ccc_object_alloc(const struct lu_env *env,
- const struct lu_object_header *hdr,
- struct lu_device *dev,
- const struct cl_object_operations *clops,
- const struct lu_object_operations *luops);
-
-int ccc_req_init(const struct lu_env *env, struct cl_device *dev,
- struct cl_req *req);
-void ccc_umount(const struct lu_env *env, struct cl_device *dev);
-int ccc_global_init(struct lu_device_type *device_type);
-void ccc_global_fini(struct lu_device_type *device_type);
-int ccc_object_init0(const struct lu_env *env, struct ccc_object *vob,
- const struct cl_object_conf *conf);
-int ccc_object_init(const struct lu_env *env, struct lu_object *obj,
- const struct lu_object_conf *conf);
-void ccc_object_free(const struct lu_env *env, struct lu_object *obj);
-int ccc_lock_init(const struct lu_env *env, struct cl_object *obj,
- struct cl_lock *lock, const struct cl_io *io,
- const struct cl_lock_operations *lkops);
-int ccc_attr_set(const struct lu_env *env, struct cl_object *obj,
- const struct cl_attr *attr, unsigned valid);
-int ccc_object_glimpse(const struct lu_env *env,
- const struct cl_object *obj, struct ost_lvb *lvb);
-int ccc_conf_set(const struct lu_env *env, struct cl_object *obj,
- const struct cl_object_conf *conf);
-struct page *ccc_page_vmpage(const struct lu_env *env,
- const struct cl_page_slice *slice);
-int ccc_page_is_under_lock(const struct lu_env *env,
- const struct cl_page_slice *slice, struct cl_io *io);
-int ccc_fail(const struct lu_env *env, const struct cl_page_slice *slice);
-void ccc_transient_page_verify(const struct cl_page *page);
-int ccc_transient_page_own(const struct lu_env *env,
- const struct cl_page_slice *slice,
- struct cl_io *io, int nonblock);
-void ccc_transient_page_assume(const struct lu_env *env,
- const struct cl_page_slice *slice,
- struct cl_io *io);
-void ccc_transient_page_unassume(const struct lu_env *env,
- const struct cl_page_slice *slice,
- struct cl_io *io);
-void ccc_transient_page_disown(const struct lu_env *env,
- const struct cl_page_slice *slice,
- struct cl_io *io);
-void ccc_transient_page_discard(const struct lu_env *env,
- const struct cl_page_slice *slice,
- struct cl_io *io);
-int ccc_transient_page_prep(const struct lu_env *env,
- const struct cl_page_slice *slice,
- struct cl_io *io);
-void ccc_lock_delete(const struct lu_env *env,
- const struct cl_lock_slice *slice);
-void ccc_lock_fini(const struct lu_env *env, struct cl_lock_slice *slice);
-int ccc_lock_enqueue(const struct lu_env *env,
- const struct cl_lock_slice *slice,
- struct cl_io *io, __u32 enqflags);
-int ccc_lock_use(const struct lu_env *env, const struct cl_lock_slice *slice);
-int ccc_lock_unuse(const struct lu_env *env, const struct cl_lock_slice *slice);
-int ccc_lock_wait(const struct lu_env *env, const struct cl_lock_slice *slice);
-int ccc_lock_fits_into(const struct lu_env *env,
- const struct cl_lock_slice *slice,
- const struct cl_lock_descr *need,
- const struct cl_io *io);
-void ccc_lock_state(const struct lu_env *env,
- const struct cl_lock_slice *slice,
- enum cl_lock_state state);
-
-void ccc_io_fini(const struct lu_env *env, const struct cl_io_slice *ios);
-int ccc_io_one_lock_index(const struct lu_env *env, struct cl_io *io,
- __u32 enqflags, enum cl_lock_mode mode,
- pgoff_t start, pgoff_t end);
-int ccc_io_one_lock(const struct lu_env *env, struct cl_io *io,
- __u32 enqflags, enum cl_lock_mode mode,
- loff_t start, loff_t end);
-void ccc_io_end(const struct lu_env *env, const struct cl_io_slice *ios);
-void ccc_io_advance(const struct lu_env *env, const struct cl_io_slice *ios,
- size_t nob);
-void ccc_io_update_iov(const struct lu_env *env, struct ccc_io *cio,
- struct cl_io *io);
-int ccc_prep_size(const struct lu_env *env, struct cl_object *obj,
- struct cl_io *io, loff_t start, size_t count, int *exceed);
-void ccc_req_completion(const struct lu_env *env,
- const struct cl_req_slice *slice, int ioret);
-void ccc_req_attr_set(const struct lu_env *env,
- const struct cl_req_slice *slice,
- const struct cl_object *obj,
- struct cl_req_attr *oa, u64 flags);
-
-struct lu_device *ccc2lu_dev (struct ccc_device *vdv);
-struct lu_object *ccc2lu (struct ccc_object *vob);
-struct ccc_device *lu2ccc_dev (const struct lu_device *d);
-struct ccc_device *cl2ccc_dev (const struct cl_device *d);
-struct ccc_object *lu2ccc (const struct lu_object *obj);
-struct ccc_object *cl2ccc (const struct cl_object *obj);
-struct ccc_lock *cl2ccc_lock (const struct cl_lock_slice *slice);
-struct ccc_io *cl2ccc_io (const struct lu_env *env,
- const struct cl_io_slice *slice);
-struct ccc_req *cl2ccc_req (const struct cl_req_slice *slice);
-struct page *cl2vm_page (const struct cl_page_slice *slice);
-struct inode *ccc_object_inode(const struct cl_object *obj);
-struct ccc_object *cl_inode2ccc (struct inode *inode);
-
-int cl_setattr_ost(struct inode *inode, const struct iattr *attr);
-
-struct cl_page *ccc_vmpage_page_transient(struct page *vmpage);
-int ccc_object_invariant(const struct cl_object *obj);
-int cl_file_inode_init(struct inode *inode, struct lustre_md *md);
-void cl_inode_fini(struct inode *inode);
-int cl_local_size(struct inode *inode);
-
-__u16 ll_dirent_type_get(struct lu_dirent *ent);
-__u64 cl_fid_build_ino(const struct lu_fid *fid, int api32);
-__u32 cl_fid_build_gen(const struct lu_fid *fid);
-
-# define CLOBINVRNT(env, clob, expr) \
- ((void)sizeof(env), (void)sizeof(clob), (void)sizeof(!!(expr)))
-
-int cl_init_ea_size(struct obd_export *md_exp, struct obd_export *dt_exp);
-int cl_ocd_update(struct obd_device *host,
- struct obd_device *watched,
- enum obd_notify_event ev, void *owner, void *data);
-
-struct ccc_grouplock {
- struct lu_env *cg_env;
- struct cl_io *cg_io;
- struct cl_lock *cg_lock;
- unsigned long cg_gid;
-};
-
-int cl_get_grouplock(struct cl_object *obj, unsigned long gid, int nonblock,
- struct ccc_grouplock *cg);
-void cl_put_grouplock(struct ccc_grouplock *cg);
-
-/**
- * New interfaces to get and put lov_stripe_md from lov layer. This violates
- * layering because lov_stripe_md is supposed to be a private data in lov.
- *
- * NB: If you find you have to use these interfaces for your new code, please
- * think about it again. These interfaces may be removed in the future for
- * better layering. */
-struct lov_stripe_md *lov_lsm_get(struct cl_object *clobj);
-void lov_lsm_put(struct cl_object *clobj, struct lov_stripe_md *lsm);
-int lov_read_and_clear_async_rc(struct cl_object *clob);
-
-struct lov_stripe_md *ccc_inode_lsm_get(struct inode *inode);
-void ccc_inode_lsm_put(struct inode *inode, struct lov_stripe_md *lsm);
-
-/**
- * Data structure managing a client's cached clean pages. An LRU of
- * pages is maintained, along with other statistics.
- */
-struct cl_client_cache {
- atomic_t ccc_users; /* # of users (OSCs) of this data */
- struct list_head ccc_lru; /* LRU list of cached clean pages */
- spinlock_t ccc_lru_lock; /* lock for list */
- atomic_t ccc_lru_left; /* # of LRU entries available */
- unsigned long ccc_lru_max; /* Max # of LRU entries possible */
- unsigned int ccc_lru_shrinkers; /* # of threads reclaiming */
-};
-
-#endif /*LCLIENT_H */
diff --git a/drivers/staging/lustre/lustre/include/linux/lustre_compat25.h b/drivers/staging/lustre/lustre/include/linux/lustre_compat25.h
deleted file mode 100644
index 6b14406b2920..000000000000
--- a/drivers/staging/lustre/lustre/include/linux/lustre_compat25.h
+++ /dev/null
@@ -1,82 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- */
-
-#ifndef _LINUX_COMPAT25_H
-#define _LINUX_COMPAT25_H
-
-#include <linux/fs_struct.h>
-#include <linux/namei.h>
-
-#include "lustre_patchless_compat.h"
-
-/*
- * set ATTR_BLOCKS to a high value to avoid any risk of collision with other
- * ATTR_* attributes (see bug 13828)
- */
-#define ATTR_BLOCKS (1 << 27)
-
-#define current_ngroups current_cred()->group_info->ngroups
-#define current_groups current_cred()->group_info->small_block
-
-/*
- * OBD need working random driver, thus all our
- * initialization routines must be called after device
- * driver initialization
- */
-#ifndef MODULE
-#undef module_init
-#define module_init(a) late_initcall(a)
-#endif
-
-
-#define LTIME_S(time) (time.tv_sec)
-
-#ifndef QUOTA_OK
-# define QUOTA_OK 0
-#endif
-#ifndef NO_QUOTA
-# define NO_QUOTA (-EDQUOT)
-#endif
-
-#if !defined(_ASM_GENERIC_BITOPS_EXT2_NON_ATOMIC_H_) && !defined(ext2_set_bit)
-# define ext2_set_bit __test_and_set_bit_le
-# define ext2_clear_bit __test_and_clear_bit_le
-# define ext2_test_bit test_bit_le
-# define ext2_find_first_zero_bit find_first_zero_bit_le
-# define ext2_find_next_zero_bit find_next_zero_bit_le
-#endif
-
-#endif /* _COMPAT25_H */
diff --git a/drivers/staging/lustre/lustre/include/linux/lustre_lite.h b/drivers/staging/lustre/lustre/include/linux/lustre_lite.h
deleted file mode 100644
index f5d83ebe2fdd..000000000000
--- a/drivers/staging/lustre/lustre/include/linux/lustre_lite.h
+++ /dev/null
@@ -1,97 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- */
-
-#ifndef _LINUX_LL_H
-#define _LINUX_LL_H
-
-#ifndef _LL_H
-#error Do not #include this file directly. #include <lustre_lite.h> instead
-#endif
-
-
-#include <linux/statfs.h>
-
-#include <linux/fs.h>
-#include <linux/dcache.h>
-
-#include "../obd_class.h"
-#include "../lustre_net.h"
-#include "../lustre_ha.h"
-
-#include <linux/rbtree.h>
-#include "../../include/linux/lustre_compat25.h"
-#include <linux/pagemap.h>
-
-/* lprocfs.c */
-enum {
- LPROC_LL_DIRTY_HITS = 0,
- LPROC_LL_DIRTY_MISSES,
- LPROC_LL_READ_BYTES,
- LPROC_LL_WRITE_BYTES,
- LPROC_LL_BRW_READ,
- LPROC_LL_BRW_WRITE,
- LPROC_LL_OSC_READ,
- LPROC_LL_OSC_WRITE,
- LPROC_LL_IOCTL,
- LPROC_LL_OPEN,
- LPROC_LL_RELEASE,
- LPROC_LL_MAP,
- LPROC_LL_LLSEEK,
- LPROC_LL_FSYNC,
- LPROC_LL_READDIR,
- LPROC_LL_SETATTR,
- LPROC_LL_TRUNC,
- LPROC_LL_FLOCK,
- LPROC_LL_GETATTR,
- LPROC_LL_CREATE,
- LPROC_LL_LINK,
- LPROC_LL_UNLINK,
- LPROC_LL_SYMLINK,
- LPROC_LL_MKDIR,
- LPROC_LL_RMDIR,
- LPROC_LL_MKNOD,
- LPROC_LL_RENAME,
- LPROC_LL_STAFS,
- LPROC_LL_ALLOC_INODE,
- LPROC_LL_SETXATTR,
- LPROC_LL_GETXATTR,
- LPROC_LL_GETXATTR_HITS,
- LPROC_LL_LISTXATTR,
- LPROC_LL_REMOVEXATTR,
- LPROC_LL_INODE_PERM,
- LPROC_LL_FILE_OPCODES
-};
-
-
-#endif
diff --git a/drivers/staging/lustre/lustre/include/linux/lustre_patchless_compat.h b/drivers/staging/lustre/lustre/include/linux/lustre_patchless_compat.h
deleted file mode 100644
index ebe8d68ed813..000000000000
--- a/drivers/staging/lustre/lustre/include/linux/lustre_patchless_compat.h
+++ /dev/null
@@ -1,71 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- */
-
-#ifndef LUSTRE_PATCHLESS_COMPAT_H
-#define LUSTRE_PATCHLESS_COMPAT_H
-
-#include <linux/fs.h>
-
-#include <linux/list.h>
-#include <linux/mm.h>
-#include <linux/hash.h>
-
-
-#define ll_delete_from_page_cache(page) delete_from_page_cache(page)
-
-static inline void
-truncate_complete_page(struct address_space *mapping, struct page *page)
-{
- if (page->mapping != mapping)
- return;
-
- if (PagePrivate(page))
- page->mapping->a_ops->invalidatepage(page, 0, PAGE_CACHE_SIZE);
-
- cancel_dirty_page(page);
- ClearPageMappedToDisk(page);
- ll_delete_from_page_cache(page);
-}
-
-#ifndef ATTR_CTIME_SET
-/*
- * set ATTR_CTIME_SET to a high value to avoid any risk of collision with other
- * ATTR_* attributes (see bug 13828)
- */
-#define ATTR_CTIME_SET (1 << 28)
-#endif
-
-#endif /* LUSTRE_PATCHLESS_COMPAT_H */
diff --git a/drivers/staging/lustre/lustre/include/linux/lustre_user.h b/drivers/staging/lustre/lustre/include/linux/lustre_user.h
deleted file mode 100644
index 9cc2849f3f85..000000000000
--- a/drivers/staging/lustre/lustre/include/linux/lustre_user.h
+++ /dev/null
@@ -1,70 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * lustre/include/linux/lustre_user.h
- *
- * Lustre public user-space interface definitions.
- */
-
-#ifndef _LINUX_LUSTRE_USER_H
-#define _LINUX_LUSTRE_USER_H
-
-# include <linux/quota.h>
-
-/*
- * asm-x86_64/processor.h on some SLES 9 distros seems to use
- * kernel-only typedefs. fortunately skipping it altogether is ok
- * (for now).
- */
-#define __ASM_X86_64_PROCESSOR_H
-
-#include <linux/string.h>
-
-/*
- * We need to always use 64bit version because the structure
- * is shared across entire cluster where 32bit and 64bit machines
- * are co-existing.
- */
-#if __BITS_PER_LONG != 64 || defined(__ARCH_WANT_STAT64)
-typedef struct stat64 lstat_t;
-#define lstat_f lstat64
-#else
-typedef struct stat lstat_t;
-#define lstat_f lstat
-#endif
-
-#define HAVE_LOV_USER_MDS_DATA
-
-#endif /* _LUSTRE_USER_H */
diff --git a/drivers/staging/lustre/lustre/include/linux/obd.h b/drivers/staging/lustre/lustre/include/linux/obd.h
deleted file mode 100644
index 2817e88e014a..000000000000
--- a/drivers/staging/lustre/lustre/include/linux/obd.h
+++ /dev/null
@@ -1,125 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- */
-
-#ifndef __LINUX_OBD_H
-#define __LINUX_OBD_H
-
-#ifndef __OBD_H
-#error Do not #include this file directly. #include <obd.h> instead
-#endif
-
-#include "../obd_support.h"
-
-#include <linux/fs.h>
-#include <linux/list.h>
-#include <linux/sched.h> /* for struct task_struct, for current.h */
-#include <linux/mount.h>
-
-#include "../lustre_intent.h"
-
-struct ll_iattr {
- struct iattr iattr;
- unsigned int ia_attr_flags;
-};
-
-#define CLIENT_OBD_LIST_LOCK_DEBUG 1
-
-typedef struct {
- spinlock_t lock;
-
- unsigned long time;
- struct task_struct *task;
- const char *func;
- int line;
-} client_obd_lock_t;
-
-static inline void __client_obd_list_lock(client_obd_lock_t *lock,
- const char *func, int line)
-{
- unsigned long cur = jiffies;
- while (1) {
- if (spin_trylock(&lock->lock)) {
- LASSERT(lock->task == NULL);
- lock->task = current;
- lock->func = func;
- lock->line = line;
- lock->time = jiffies;
- break;
- }
-
- if (time_before(cur + 5 * HZ, jiffies) &&
- time_before(lock->time + 5 * HZ, jiffies)) {
- struct task_struct *task = lock->task;
-
- if (task == NULL)
- continue;
-
- LCONSOLE_WARN("%s:%d: lock %p was acquired by <%s:%d:%s:%d> for %lu seconds.\n",
- current->comm, current->pid,
- lock, task->comm, task->pid,
- lock->func, lock->line,
- (jiffies - lock->time) / HZ);
- LCONSOLE_WARN("====== for current process =====\n");
- dump_stack();
- LCONSOLE_WARN("====== end =======\n");
- set_current_state(TASK_UNINTERRUPTIBLE);
- schedule_timeout(1000 * HZ);
- }
- cpu_relax();
- }
-}
-
-#define client_obd_list_lock(lock) \
- __client_obd_list_lock(lock, __func__, __LINE__)
-
-static inline void client_obd_list_unlock(client_obd_lock_t *lock)
-{
- LASSERT(lock->task != NULL);
- lock->task = NULL;
- lock->time = jiffies;
- spin_unlock(&lock->lock);
-}
-
-
-static inline void client_obd_list_lock_init(client_obd_lock_t *lock)
-{
- spin_lock_init(&lock->lock);
-}
-
-static inline void client_obd_list_lock_done(client_obd_lock_t *lock)
-{}
-
-#endif /* __LINUX_OBD_H */
diff --git a/drivers/staging/lustre/lustre/include/lprocfs_status.h b/drivers/staging/lustre/lustre/include/lprocfs_status.h
deleted file mode 100644
index 22f3777c72b8..000000000000
--- a/drivers/staging/lustre/lustre/include/lprocfs_status.h
+++ /dev/null
@@ -1,782 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * lustre/include/lprocfs_status.h
- *
- * Top level header file for LProc SNMP
- *
- * Author: Hariharan Thantry thantry@users.sourceforge.net
- */
-#ifndef _LPROCFS_SNMP_H
-#define _LPROCFS_SNMP_H
-
-#include <linux/debugfs.h>
-#include <linux/seq_file.h>
-#include <linux/spinlock.h>
-#include <linux/types.h>
-
-#include "lustre/lustre_idl.h"
-
-struct lprocfs_vars {
- const char *name;
- struct file_operations *fops;
- void *data;
- /**
- * /proc file mode.
- */
- umode_t proc_mode;
-};
-
-struct lprocfs_static_vars {
- struct lprocfs_vars *obd_vars;
- struct attribute_group *sysfs_vars;
-};
-
-/* if we find more consumers this could be generalized */
-#define OBD_HIST_MAX 32
-struct obd_histogram {
- spinlock_t oh_lock;
- unsigned long oh_buckets[OBD_HIST_MAX];
-};
-
-enum {
- BRW_R_PAGES = 0,
- BRW_W_PAGES,
- BRW_R_RPC_HIST,
- BRW_W_RPC_HIST,
- BRW_R_IO_TIME,
- BRW_W_IO_TIME,
- BRW_R_DISCONT_PAGES,
- BRW_W_DISCONT_PAGES,
- BRW_R_DISCONT_BLOCKS,
- BRW_W_DISCONT_BLOCKS,
- BRW_R_DISK_IOSIZE,
- BRW_W_DISK_IOSIZE,
- BRW_R_DIO_FRAGS,
- BRW_W_DIO_FRAGS,
- BRW_LAST,
-};
-
-struct brw_stats {
- struct obd_histogram hist[BRW_LAST];
-};
-
-enum {
- RENAME_SAMEDIR_SIZE = 0,
- RENAME_CROSSDIR_SRC_SIZE,
- RENAME_CROSSDIR_TGT_SIZE,
- RENAME_LAST,
-};
-
-struct rename_stats {
- struct obd_histogram hist[RENAME_LAST];
-};
-
-/* An lprocfs counter can be configured using the enum bit masks below.
- *
- * LPROCFS_CNTR_EXTERNALLOCK indicates that an external lock already
- * protects this counter from concurrent updates. If not specified,
- * lprocfs an internal per-counter lock variable. External locks are
- * not used to protect counter increments, but are used to protect
- * counter readout and resets.
- *
- * LPROCFS_CNTR_AVGMINMAX indicates a multi-valued counter samples,
- * (i.e. counter can be incremented by more than "1"). When specified,
- * the counter maintains min, max and sum in addition to a simple
- * invocation count. This allows averages to be be computed.
- * If not specified, the counter is an increment-by-1 counter.
- * min, max, sum, etc. are not maintained.
- *
- * LPROCFS_CNTR_STDDEV indicates that the counter should track sum of
- * squares (for multi-valued counter samples only). This allows
- * external computation of standard deviation, but involves a 64-bit
- * multiply per counter increment.
- */
-
-enum {
- LPROCFS_CNTR_EXTERNALLOCK = 0x0001,
- LPROCFS_CNTR_AVGMINMAX = 0x0002,
- LPROCFS_CNTR_STDDEV = 0x0004,
-
- /* counter data type */
- LPROCFS_TYPE_REGS = 0x0100,
- LPROCFS_TYPE_BYTES = 0x0200,
- LPROCFS_TYPE_PAGES = 0x0400,
- LPROCFS_TYPE_CYCLE = 0x0800,
-};
-
-#define LC_MIN_INIT ((~(__u64)0) >> 1)
-
-struct lprocfs_counter_header {
- unsigned int lc_config;
- const char *lc_name; /* must be static */
- const char *lc_units; /* must be static */
-};
-
-struct lprocfs_counter {
- __s64 lc_count;
- __s64 lc_min;
- __s64 lc_max;
- __s64 lc_sumsquare;
- /*
- * Every counter has lc_array_sum[0], while lc_array_sum[1] is only
- * for irq context counter, i.e. stats with
- * LPROCFS_STATS_FLAG_IRQ_SAFE flag, its counter need
- * lc_array_sum[1]
- */
- __s64 lc_array_sum[1];
-};
-#define lc_sum lc_array_sum[0]
-#define lc_sum_irq lc_array_sum[1]
-
-struct lprocfs_percpu {
-#ifndef __GNUC__
- __s64 pad;
-#endif
- struct lprocfs_counter lp_cntr[0];
-};
-
-#define LPROCFS_GET_NUM_CPU 0x0001
-#define LPROCFS_GET_SMP_ID 0x0002
-
-enum lprocfs_stats_flags {
- LPROCFS_STATS_FLAG_NONE = 0x0000, /* per cpu counter */
- LPROCFS_STATS_FLAG_NOPERCPU = 0x0001, /* stats have no percpu
- * area and need locking */
- LPROCFS_STATS_FLAG_IRQ_SAFE = 0x0002, /* alloc need irq safe */
-};
-
-enum lprocfs_fields_flags {
- LPROCFS_FIELDS_FLAGS_CONFIG = 0x0001,
- LPROCFS_FIELDS_FLAGS_SUM = 0x0002,
- LPROCFS_FIELDS_FLAGS_MIN = 0x0003,
- LPROCFS_FIELDS_FLAGS_MAX = 0x0004,
- LPROCFS_FIELDS_FLAGS_AVG = 0x0005,
- LPROCFS_FIELDS_FLAGS_SUMSQUARE = 0x0006,
- LPROCFS_FIELDS_FLAGS_COUNT = 0x0007,
-};
-
-struct lprocfs_stats {
- /* # of counters */
- unsigned short ls_num;
- /* 1 + the biggest cpu # whose ls_percpu slot has been allocated */
- unsigned short ls_biggest_alloc_num;
- enum lprocfs_stats_flags ls_flags;
- /* Lock used when there are no percpu stats areas; For percpu stats,
- * it is used to protect ls_biggest_alloc_num change */
- spinlock_t ls_lock;
-
- /* has ls_num of counter headers */
- struct lprocfs_counter_header *ls_cnt_header;
- struct lprocfs_percpu *ls_percpu[0];
-};
-
-#define OPC_RANGE(seg) (seg ## _LAST_OPC - seg ## _FIRST_OPC)
-
-/* Pack all opcodes down into a single monotonically increasing index */
-static inline int opcode_offset(__u32 opc)
-{
- if (opc < OST_LAST_OPC) {
- /* OST opcode */
- return (opc - OST_FIRST_OPC);
- } else if (opc < MDS_LAST_OPC) {
- /* MDS opcode */
- return (opc - MDS_FIRST_OPC +
- OPC_RANGE(OST));
- } else if (opc < LDLM_LAST_OPC) {
- /* LDLM Opcode */
- return (opc - LDLM_FIRST_OPC +
- OPC_RANGE(MDS) +
- OPC_RANGE(OST));
- } else if (opc < MGS_LAST_OPC) {
- /* MGS Opcode */
- return (opc - MGS_FIRST_OPC +
- OPC_RANGE(LDLM) +
- OPC_RANGE(MDS) +
- OPC_RANGE(OST));
- } else if (opc < OBD_LAST_OPC) {
- /* OBD Ping */
- return (opc - OBD_FIRST_OPC +
- OPC_RANGE(MGS) +
- OPC_RANGE(LDLM) +
- OPC_RANGE(MDS) +
- OPC_RANGE(OST));
- } else if (opc < LLOG_LAST_OPC) {
- /* LLOG Opcode */
- return (opc - LLOG_FIRST_OPC +
- OPC_RANGE(OBD) +
- OPC_RANGE(MGS) +
- OPC_RANGE(LDLM) +
- OPC_RANGE(MDS) +
- OPC_RANGE(OST));
- } else if (opc < QUOTA_LAST_OPC) {
- /* LQUOTA Opcode */
- return (opc - QUOTA_FIRST_OPC +
- OPC_RANGE(LLOG) +
- OPC_RANGE(OBD) +
- OPC_RANGE(MGS) +
- OPC_RANGE(LDLM) +
- OPC_RANGE(MDS) +
- OPC_RANGE(OST));
- } else if (opc < SEQ_LAST_OPC) {
- /* SEQ opcode */
- return (opc - SEQ_FIRST_OPC +
- OPC_RANGE(QUOTA) +
- OPC_RANGE(LLOG) +
- OPC_RANGE(OBD) +
- OPC_RANGE(MGS) +
- OPC_RANGE(LDLM) +
- OPC_RANGE(MDS) +
- OPC_RANGE(OST));
- } else if (opc < SEC_LAST_OPC) {
- /* SEC opcode */
- return (opc - SEC_FIRST_OPC +
- OPC_RANGE(SEQ) +
- OPC_RANGE(QUOTA) +
- OPC_RANGE(LLOG) +
- OPC_RANGE(OBD) +
- OPC_RANGE(MGS) +
- OPC_RANGE(LDLM) +
- OPC_RANGE(MDS) +
- OPC_RANGE(OST));
- } else if (opc < FLD_LAST_OPC) {
- /* FLD opcode */
- return (opc - FLD_FIRST_OPC +
- OPC_RANGE(SEC) +
- OPC_RANGE(SEQ) +
- OPC_RANGE(QUOTA) +
- OPC_RANGE(LLOG) +
- OPC_RANGE(OBD) +
- OPC_RANGE(MGS) +
- OPC_RANGE(LDLM) +
- OPC_RANGE(MDS) +
- OPC_RANGE(OST));
- } else if (opc < UPDATE_LAST_OPC) {
- /* update opcode */
- return (opc - UPDATE_FIRST_OPC +
- OPC_RANGE(FLD) +
- OPC_RANGE(SEC) +
- OPC_RANGE(SEQ) +
- OPC_RANGE(QUOTA) +
- OPC_RANGE(LLOG) +
- OPC_RANGE(OBD) +
- OPC_RANGE(MGS) +
- OPC_RANGE(LDLM) +
- OPC_RANGE(MDS) +
- OPC_RANGE(OST));
- } else {
- /* Unknown Opcode */
- return -1;
- }
-}
-
-
-#define LUSTRE_MAX_OPCODES (OPC_RANGE(OST) + \
- OPC_RANGE(MDS) + \
- OPC_RANGE(LDLM) + \
- OPC_RANGE(MGS) + \
- OPC_RANGE(OBD) + \
- OPC_RANGE(LLOG) + \
- OPC_RANGE(SEC) + \
- OPC_RANGE(SEQ) + \
- OPC_RANGE(SEC) + \
- OPC_RANGE(FLD) + \
- OPC_RANGE(UPDATE))
-
-#define EXTRA_MAX_OPCODES ((PTLRPC_LAST_CNTR - PTLRPC_FIRST_CNTR) + \
- OPC_RANGE(EXTRA))
-
-enum {
- PTLRPC_REQWAIT_CNTR = 0,
- PTLRPC_REQQDEPTH_CNTR,
- PTLRPC_REQACTIVE_CNTR,
- PTLRPC_TIMEOUT,
- PTLRPC_REQBUF_AVAIL_CNTR,
- PTLRPC_LAST_CNTR
-};
-
-#define PTLRPC_FIRST_CNTR PTLRPC_REQWAIT_CNTR
-
-enum {
- LDLM_GLIMPSE_ENQUEUE = 0,
- LDLM_PLAIN_ENQUEUE,
- LDLM_EXTENT_ENQUEUE,
- LDLM_FLOCK_ENQUEUE,
- LDLM_IBITS_ENQUEUE,
- MDS_REINT_SETATTR,
- MDS_REINT_CREATE,
- MDS_REINT_LINK,
- MDS_REINT_UNLINK,
- MDS_REINT_RENAME,
- MDS_REINT_OPEN,
- MDS_REINT_SETXATTR,
- BRW_READ_BYTES,
- BRW_WRITE_BYTES,
- EXTRA_LAST_OPC
-};
-
-#define EXTRA_FIRST_OPC LDLM_GLIMPSE_ENQUEUE
-/* class_obd.c */
-extern struct dentry *debugfs_lustre_root;
-extern struct kobject *lustre_kobj;
-
-struct obd_device;
-struct obd_histogram;
-
-/* Days / hours / mins / seconds format */
-struct dhms {
- int d, h, m, s;
-};
-
-static inline void s2dhms(struct dhms *ts, time64_t secs64)
-{
- unsigned int secs;
-
- ts->d = div_u64_rem(secs64, 86400, &secs);
- ts->h = secs / 3600;
- secs = secs % 3600;
- ts->m = secs / 60;
- ts->s = secs % 60;
-}
-#define DHMS_FMT "%dd%dh%02dm%02ds"
-#define DHMS_VARS(x) (x)->d, (x)->h, (x)->m, (x)->s
-
-#define JOBSTATS_JOBID_VAR_MAX_LEN 20
-#define JOBSTATS_DISABLE "disable"
-#define JOBSTATS_PROCNAME_UID "procname_uid"
-#define JOBSTATS_NODELOCAL "nodelocal"
-
-int lprocfs_write_frac_helper(const char __user *buffer,
- unsigned long count, int *val, int mult);
-int lprocfs_read_frac_helper(char *buffer, unsigned long count,
- long val, int mult);
-int lprocfs_stats_alloc_one(struct lprocfs_stats *stats, unsigned int cpuid);
-/*
- * \return value
- * < 0 : on error (only possible for opc as LPROCFS_GET_SMP_ID)
- */
-static inline int lprocfs_stats_lock(struct lprocfs_stats *stats, int opc,
- unsigned long *flags)
-{
- int rc = 0;
-
- switch (opc) {
- default:
- LBUG();
-
- case LPROCFS_GET_SMP_ID:
- if (stats->ls_flags & LPROCFS_STATS_FLAG_NOPERCPU) {
- if (stats->ls_flags & LPROCFS_STATS_FLAG_IRQ_SAFE)
- spin_lock_irqsave(&stats->ls_lock, *flags);
- else
- spin_lock(&stats->ls_lock);
- return 0;
- } else {
- unsigned int cpuid = get_cpu();
-
- if (unlikely(stats->ls_percpu[cpuid] == NULL)) {
- rc = lprocfs_stats_alloc_one(stats, cpuid);
- if (rc < 0) {
- put_cpu();
- return rc;
- }
- }
- return cpuid;
- }
-
- case LPROCFS_GET_NUM_CPU:
- if (stats->ls_flags & LPROCFS_STATS_FLAG_NOPERCPU) {
- if (stats->ls_flags & LPROCFS_STATS_FLAG_IRQ_SAFE)
- spin_lock_irqsave(&stats->ls_lock, *flags);
- else
- spin_lock(&stats->ls_lock);
- return 1;
- }
- return stats->ls_biggest_alloc_num;
- }
-}
-
-static inline void lprocfs_stats_unlock(struct lprocfs_stats *stats, int opc,
- unsigned long *flags)
-{
- switch (opc) {
- default:
- LBUG();
-
- case LPROCFS_GET_SMP_ID:
- if (stats->ls_flags & LPROCFS_STATS_FLAG_NOPERCPU) {
- if (stats->ls_flags & LPROCFS_STATS_FLAG_IRQ_SAFE) {
- spin_unlock_irqrestore(&stats->ls_lock,
- *flags);
- } else {
- spin_unlock(&stats->ls_lock);
- }
- } else {
- put_cpu();
- }
- return;
-
- case LPROCFS_GET_NUM_CPU:
- if (stats->ls_flags & LPROCFS_STATS_FLAG_NOPERCPU) {
- if (stats->ls_flags & LPROCFS_STATS_FLAG_IRQ_SAFE) {
- spin_unlock_irqrestore(&stats->ls_lock,
- *flags);
- } else {
- spin_unlock(&stats->ls_lock);
- }
- }
- return;
- }
-}
-
-static inline unsigned int
-lprocfs_stats_counter_size(struct lprocfs_stats *stats)
-{
- unsigned int percpusize;
-
- percpusize = offsetof(struct lprocfs_percpu, lp_cntr[stats->ls_num]);
-
- /* irq safe stats need lc_array_sum[1] */
- if ((stats->ls_flags & LPROCFS_STATS_FLAG_IRQ_SAFE) != 0)
- percpusize += stats->ls_num * sizeof(__s64);
-
- if ((stats->ls_flags & LPROCFS_STATS_FLAG_NOPERCPU) == 0)
- percpusize = L1_CACHE_ALIGN(percpusize);
-
- return percpusize;
-}
-
-static inline struct lprocfs_counter *
-lprocfs_stats_counter_get(struct lprocfs_stats *stats, unsigned int cpuid,
- int index)
-{
- struct lprocfs_counter *cntr;
-
- cntr = &stats->ls_percpu[cpuid]->lp_cntr[index];
-
- if ((stats->ls_flags & LPROCFS_STATS_FLAG_IRQ_SAFE) != 0)
- cntr = (void *)cntr + index * sizeof(__s64);
-
- return cntr;
-}
-
-/* Two optimized LPROCFS counter increment functions are provided:
- * lprocfs_counter_incr(cntr, value) - optimized for by-one counters
- * lprocfs_counter_add(cntr) - use for multi-valued counters
- * Counter data layout allows config flag, counter lock and the
- * count itself to reside within a single cache line.
- */
-
-void lprocfs_counter_add(struct lprocfs_stats *stats, int idx, long amount);
-void lprocfs_counter_sub(struct lprocfs_stats *stats, int idx, long amount);
-
-#define lprocfs_counter_incr(stats, idx) \
- lprocfs_counter_add(stats, idx, 1)
-#define lprocfs_counter_decr(stats, idx) \
- lprocfs_counter_sub(stats, idx, 1)
-
-__s64 lprocfs_read_helper(struct lprocfs_counter *lc,
- struct lprocfs_counter_header *header,
- enum lprocfs_stats_flags flags,
- enum lprocfs_fields_flags field);
-static inline __u64 lprocfs_stats_collector(struct lprocfs_stats *stats,
- int idx,
- enum lprocfs_fields_flags field)
-{
- int i;
- unsigned int num_cpu;
- unsigned long flags = 0;
- __u64 ret = 0;
-
- LASSERT(stats != NULL);
-
- num_cpu = lprocfs_stats_lock(stats, LPROCFS_GET_NUM_CPU, &flags);
- for (i = 0; i < num_cpu; i++) {
- if (stats->ls_percpu[i] == NULL)
- continue;
- ret += lprocfs_read_helper(
- lprocfs_stats_counter_get(stats, i, idx),
- &stats->ls_cnt_header[idx], stats->ls_flags,
- field);
- }
- lprocfs_stats_unlock(stats, LPROCFS_GET_NUM_CPU, &flags);
- return ret;
-}
-
-extern struct lprocfs_stats *
-lprocfs_alloc_stats(unsigned int num, enum lprocfs_stats_flags flags);
-void lprocfs_clear_stats(struct lprocfs_stats *stats);
-void lprocfs_free_stats(struct lprocfs_stats **stats);
-void lprocfs_counter_init(struct lprocfs_stats *stats, int index,
- unsigned conf, const char *name, const char *units);
-struct obd_export;
-int lprocfs_exp_cleanup(struct obd_export *exp);
-struct dentry *ldebugfs_add_simple(struct dentry *root,
- char *name,
- void *data,
- struct file_operations *fops);
-
-int ldebugfs_register_stats(struct dentry *parent,
- const char *name,
- struct lprocfs_stats *stats);
-
-/* lprocfs_status.c */
-int ldebugfs_add_vars(struct dentry *parent,
- struct lprocfs_vars *var,
- void *data);
-
-struct dentry *ldebugfs_register(const char *name,
- struct dentry *parent,
- struct lprocfs_vars *list,
- void *data);
-
-void ldebugfs_remove(struct dentry **entryp);
-
-int lprocfs_obd_setup(struct obd_device *obd, struct lprocfs_vars *list,
- struct attribute_group *attrs);
-int lprocfs_obd_cleanup(struct obd_device *obd);
-
-int ldebugfs_seq_create(struct dentry *parent,
- const char *name,
- umode_t mode,
- const struct file_operations *seq_fops,
- void *data);
-int ldebugfs_obd_seq_create(struct obd_device *dev,
- const char *name,
- umode_t mode,
- const struct file_operations *seq_fops,
- void *data);
-
-/* Generic callbacks */
-
-int lprocfs_rd_uint(struct seq_file *m, void *data);
-int lprocfs_wr_uint(struct file *file, const char __user *buffer,
- unsigned long count, void *data);
-int lprocfs_rd_server_uuid(struct seq_file *m, void *data);
-int lprocfs_rd_conn_uuid(struct seq_file *m, void *data);
-int lprocfs_rd_import(struct seq_file *m, void *data);
-int lprocfs_rd_state(struct seq_file *m, void *data);
-int lprocfs_rd_connect_flags(struct seq_file *m, void *data);
-
-struct adaptive_timeout;
-int lprocfs_at_hist_helper(struct seq_file *m, struct adaptive_timeout *at);
-int lprocfs_rd_timeouts(struct seq_file *m, void *data);
-int lprocfs_wr_timeouts(struct file *file, const char __user *buffer,
- unsigned long count, void *data);
-int lprocfs_wr_ping(struct file *file, const char __user *buffer,
- size_t count, loff_t *off);
-int lprocfs_wr_import(struct file *file, const char __user *buffer,
- size_t count, loff_t *off);
-int lprocfs_rd_pinger_recov(struct seq_file *m, void *n);
-int lprocfs_wr_pinger_recov(struct file *file, const char __user *buffer,
- size_t count, loff_t *off);
-
-/* Statfs helpers */
-
-int lprocfs_write_helper(const char __user *buffer, unsigned long count,
- int *val);
-int lprocfs_write_u64_helper(const char __user *buffer,
- unsigned long count, __u64 *val);
-int lprocfs_write_frac_u64_helper(const char *buffer,
- unsigned long count,
- __u64 *val, int mult);
-char *lprocfs_find_named_value(const char *buffer, const char *name,
- size_t *count);
-void lprocfs_oh_tally(struct obd_histogram *oh, unsigned int value);
-void lprocfs_oh_tally_log2(struct obd_histogram *oh, unsigned int value);
-void lprocfs_oh_clear(struct obd_histogram *oh);
-unsigned long lprocfs_oh_sum(struct obd_histogram *oh);
-
-void lprocfs_stats_collect(struct lprocfs_stats *stats, int idx,
- struct lprocfs_counter *cnt);
-
-int lprocfs_single_release(struct inode *, struct file *);
-int lprocfs_seq_release(struct inode *, struct file *);
-
-/* You must use these macros when you want to refer to
- * the import in a client obd_device for a lprocfs entry */
-#define LPROCFS_CLIMP_CHECK(obd) do { \
- typecheck(struct obd_device *, obd); \
- down_read(&(obd)->u.cli.cl_sem); \
- if ((obd)->u.cli.cl_import == NULL) { \
- up_read(&(obd)->u.cli.cl_sem); \
- return -ENODEV; \
- } \
-} while (0)
-#define LPROCFS_CLIMP_EXIT(obd) \
- up_read(&(obd)->u.cli.cl_sem)
-
-
-/* write the name##_seq_show function, call LPROC_SEQ_FOPS_RO for read-only
- proc entries; otherwise, you will define name##_seq_write function also for
- a read-write proc entry, and then call LPROC_SEQ_SEQ instead. Finally,
- call ldebugfs_obd_seq_create(obd, filename, 0444, &name#_fops, data); */
-#define __LPROC_SEQ_FOPS(name, custom_seq_write) \
-static int name##_single_open(struct inode *inode, struct file *file) \
-{ \
- return single_open(file, name##_seq_show, inode->i_private); \
-} \
-static struct file_operations name##_fops = { \
- .owner = THIS_MODULE, \
- .open = name##_single_open, \
- .read = seq_read, \
- .write = custom_seq_write, \
- .llseek = seq_lseek, \
- .release = lprocfs_single_release, \
-}
-
-#define LPROC_SEQ_FOPS_RO(name) __LPROC_SEQ_FOPS(name, NULL)
-#define LPROC_SEQ_FOPS(name) __LPROC_SEQ_FOPS(name, name##_seq_write)
-
-#define LPROC_SEQ_FOPS_RO_TYPE(name, type) \
- static int name##_##type##_seq_show(struct seq_file *m, void *v)\
- { \
- return lprocfs_rd_##type(m, m->private); \
- } \
- LPROC_SEQ_FOPS_RO(name##_##type)
-
-#define LPROC_SEQ_FOPS_RW_TYPE(name, type) \
- static int name##_##type##_seq_show(struct seq_file *m, void *v)\
- { \
- return lprocfs_rd_##type(m, m->private); \
- } \
- static ssize_t name##_##type##_seq_write(struct file *file, \
- const char __user *buffer, size_t count, \
- loff_t *off) \
- { \
- struct seq_file *seq = file->private_data; \
- return lprocfs_wr_##type(file, buffer, \
- count, seq->private); \
- } \
- LPROC_SEQ_FOPS(name##_##type)
-
-#define LPROC_SEQ_FOPS_WR_ONLY(name, type) \
- static ssize_t name##_##type##_write(struct file *file, \
- const char __user *buffer, size_t count, \
- loff_t *off) \
- { \
- return lprocfs_wr_##type(file, buffer, count, off); \
- } \
- static int name##_##type##_open(struct inode *inode, struct file *file) \
- { \
- return single_open(file, NULL, inode->i_private); \
- } \
- static struct file_operations name##_##type##_fops = { \
- .open = name##_##type##_open, \
- .write = name##_##type##_write, \
- .release = lprocfs_single_release, \
- }
-
-struct lustre_attr {
- struct attribute attr;
- ssize_t (*show)(struct kobject *kobj, struct attribute *attr,
- char *buf);
- ssize_t (*store)(struct kobject *kobj, struct attribute *attr,
- const char *buf, size_t len);
-};
-
-#define LUSTRE_ATTR(name, mode, show, store) \
-static struct lustre_attr lustre_attr_##name = __ATTR(name, mode, show, store)
-
-#define LUSTRE_RO_ATTR(name) LUSTRE_ATTR(name, 0444, name##_show, NULL)
-#define LUSTRE_RW_ATTR(name) LUSTRE_ATTR(name, 0644, name##_show, name##_store)
-
-ssize_t lustre_attr_show(struct kobject *kobj, struct attribute *attr,
- char *buf);
-ssize_t lustre_attr_store(struct kobject *kobj, struct attribute *attr,
- const char *buf, size_t len);
-
-extern const struct sysfs_ops lustre_sysfs_ops;
-
-/* all quota proc functions */
-int lprocfs_quota_rd_bunit(char *page, char **start,
- loff_t off, int count,
- int *eof, void *data);
-int lprocfs_quota_wr_bunit(struct file *file, const char *buffer,
- unsigned long count, void *data);
-int lprocfs_quota_rd_btune(char *page, char **start,
- loff_t off, int count,
- int *eof, void *data);
-int lprocfs_quota_wr_btune(struct file *file, const char *buffer,
- unsigned long count, void *data);
-int lprocfs_quota_rd_iunit(char *page, char **start,
- loff_t off, int count,
- int *eof, void *data);
-int lprocfs_quota_wr_iunit(struct file *file, const char *buffer,
- unsigned long count, void *data);
-int lprocfs_quota_rd_itune(char *page, char **start,
- loff_t off, int count,
- int *eof, void *data);
-int lprocfs_quota_wr_itune(struct file *file, const char *buffer,
- unsigned long count, void *data);
-int lprocfs_quota_rd_type(char *page, char **start, loff_t off, int count,
- int *eof, void *data);
-int lprocfs_quota_wr_type(struct file *file, const char *buffer,
- unsigned long count, void *data);
-int lprocfs_quota_rd_switch_seconds(char *page, char **start, loff_t off,
- int count, int *eof, void *data);
-int lprocfs_quota_wr_switch_seconds(struct file *file,
- const char *buffer,
- unsigned long count, void *data);
-int lprocfs_quota_rd_sync_blk(char *page, char **start, loff_t off,
- int count, int *eof, void *data);
-int lprocfs_quota_wr_sync_blk(struct file *file, const char *buffer,
- unsigned long count, void *data);
-int lprocfs_quota_rd_switch_qs(char *page, char **start, loff_t off,
- int count, int *eof, void *data);
-int lprocfs_quota_wr_switch_qs(struct file *file,
- const char *buffer, unsigned long count,
- void *data);
-int lprocfs_quota_rd_boundary_factor(char *page, char **start, loff_t off,
- int count, int *eof, void *data);
-int lprocfs_quota_wr_boundary_factor(struct file *file,
- const char *buffer, unsigned long count,
- void *data);
-int lprocfs_quota_rd_least_bunit(char *page, char **start, loff_t off,
- int count, int *eof, void *data);
-int lprocfs_quota_wr_least_bunit(struct file *file,
- const char *buffer, unsigned long count,
- void *data);
-int lprocfs_quota_rd_least_iunit(char *page, char **start, loff_t off,
- int count, int *eof, void *data);
-int lprocfs_quota_wr_least_iunit(struct file *file,
- const char *buffer, unsigned long count,
- void *data);
-int lprocfs_quota_rd_qs_factor(char *page, char **start, loff_t off,
- int count, int *eof, void *data);
-int lprocfs_quota_wr_qs_factor(struct file *file,
- const char *buffer, unsigned long count,
- void *data);
-#endif /* LPROCFS_SNMP_H */
diff --git a/drivers/staging/lustre/lustre/include/lu_object.h b/drivers/staging/lustre/lustre/include/lu_object.h
deleted file mode 100644
index e2199c2cea44..000000000000
--- a/drivers/staging/lustre/lustre/include/lu_object.h
+++ /dev/null
@@ -1,1312 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- */
-
-#ifndef __LUSTRE_LU_OBJECT_H
-#define __LUSTRE_LU_OBJECT_H
-
-#include <stdarg.h>
-#include "../../include/linux/libcfs/libcfs.h"
-#include "lustre/lustre_idl.h"
-#include "lu_ref.h"
-
-struct seq_file;
-struct lustre_cfg;
-struct lprocfs_stats;
-
-/** \defgroup lu lu
- * lu_* data-types represent server-side entities shared by data and meta-data
- * stacks.
- *
- * Design goals:
- *
- * -# support for layering.
- *
- * Server side object is split into layers, one per device in the
- * corresponding device stack. Individual layer is represented by struct
- * lu_object. Compound layered object --- by struct lu_object_header. Most
- * interface functions take lu_object as an argument and operate on the
- * whole compound object. This decision was made due to the following
- * reasons:
- *
- * - it's envisaged that lu_object will be used much more often than
- * lu_object_header;
- *
- * - we want lower (non-top) layers to be able to initiate operations
- * on the whole object.
- *
- * Generic code supports layering more complex than simple stacking, e.g.,
- * it is possible that at some layer object "spawns" multiple sub-objects
- * on the lower layer.
- *
- * -# fid-based identification.
- *
- * Compound object is uniquely identified by its fid. Objects are indexed
- * by their fids (hash table is used for index).
- *
- * -# caching and life-cycle management.
- *
- * Object's life-time is controlled by reference counting. When reference
- * count drops to 0, object is returned to cache. Cached objects still
- * retain their identity (i.e., fid), and can be recovered from cache.
- *
- * Objects are kept in the global LRU list, and lu_site_purge() function
- * can be used to reclaim given number of unused objects from the tail of
- * the LRU.
- *
- * -# avoiding recursion.
- *
- * Generic code tries to replace recursion through layers by iterations
- * where possible. Additionally to the end of reducing stack consumption,
- * data, when practically possible, are allocated through lu_context_key
- * interface rather than on stack.
- * @{
- */
-
-struct lu_site;
-struct lu_object;
-struct lu_device;
-struct lu_object_header;
-struct lu_context;
-struct lu_env;
-
-/**
- * Operations common for data and meta-data devices.
- */
-struct lu_device_operations {
- /**
- * Allocate object for the given device (without lower-layer
- * parts). This is called by lu_object_operations::loo_object_init()
- * from the parent layer, and should setup at least lu_object::lo_dev
- * and lu_object::lo_ops fields of resulting lu_object.
- *
- * Object creation protocol.
- *
- * Due to design goal of avoiding recursion, object creation (see
- * lu_object_alloc()) is somewhat involved:
- *
- * - first, lu_device_operations::ldo_object_alloc() method of the
- * top-level device in the stack is called. It should allocate top
- * level object (including lu_object_header), but without any
- * lower-layer sub-object(s).
- *
- * - then lu_object_alloc() sets fid in the header of newly created
- * object.
- *
- * - then lu_object_operations::loo_object_init() is called. It has
- * to allocate lower-layer object(s). To do this,
- * lu_object_operations::loo_object_init() calls ldo_object_alloc()
- * of the lower-layer device(s).
- *
- * - for all new objects allocated by
- * lu_object_operations::loo_object_init() (and inserted into object
- * stack), lu_object_operations::loo_object_init() is called again
- * repeatedly, until no new objects are created.
- *
- * \post ergo(!IS_ERR(result), result->lo_dev == d &&
- * result->lo_ops != NULL);
- */
- struct lu_object *(*ldo_object_alloc)(const struct lu_env *env,
- const struct lu_object_header *h,
- struct lu_device *d);
- /**
- * process config specific for device.
- */
- int (*ldo_process_config)(const struct lu_env *env,
- struct lu_device *, struct lustre_cfg *);
- int (*ldo_recovery_complete)(const struct lu_env *,
- struct lu_device *);
-
- /**
- * initialize local objects for device. this method called after layer has
- * been initialized (after LCFG_SETUP stage) and before it starts serving
- * user requests.
- */
-
- int (*ldo_prepare)(const struct lu_env *,
- struct lu_device *parent,
- struct lu_device *dev);
-
-};
-
-/**
- * For lu_object_conf flags
- */
-typedef enum {
- /* This is a new object to be allocated, or the file
- * corresponding to the object does not exists. */
- LOC_F_NEW = 0x00000001,
-} loc_flags_t;
-
-/**
- * Object configuration, describing particulars of object being created. On
- * server this is not used, as server objects are full identified by fid. On
- * client configuration contains struct lustre_md.
- */
-struct lu_object_conf {
- /**
- * Some hints for obj find and alloc.
- */
- loc_flags_t loc_flags;
-};
-
-/**
- * Type of "printer" function used by lu_object_operations::loo_object_print()
- * method.
- *
- * Printer function is needed to provide some flexibility in (semi-)debugging
- * output: possible implementations: printk, CDEBUG, sysfs/seq_file
- */
-typedef int (*lu_printer_t)(const struct lu_env *env,
- void *cookie, const char *format, ...)
- __printf(3, 4);
-
-/**
- * Operations specific for particular lu_object.
- */
-struct lu_object_operations {
-
- /**
- * Allocate lower-layer parts of the object by calling
- * lu_device_operations::ldo_object_alloc() of the corresponding
- * underlying device.
- *
- * This method is called once for each object inserted into object
- * stack. It's responsibility of this method to insert lower-layer
- * object(s) it create into appropriate places of object stack.
- */
- int (*loo_object_init)(const struct lu_env *env,
- struct lu_object *o,
- const struct lu_object_conf *conf);
- /**
- * Called (in top-to-bottom order) during object allocation after all
- * layers were allocated and initialized. Can be used to perform
- * initialization depending on lower layers.
- */
- int (*loo_object_start)(const struct lu_env *env,
- struct lu_object *o);
- /**
- * Called before lu_object_operations::loo_object_free() to signal
- * that object is being destroyed. Dual to
- * lu_object_operations::loo_object_init().
- */
- void (*loo_object_delete)(const struct lu_env *env,
- struct lu_object *o);
- /**
- * Dual to lu_device_operations::ldo_object_alloc(). Called when
- * object is removed from memory.
- */
- void (*loo_object_free)(const struct lu_env *env,
- struct lu_object *o);
- /**
- * Called when last active reference to the object is released (and
- * object returns to the cache). This method is optional.
- */
- void (*loo_object_release)(const struct lu_env *env,
- struct lu_object *o);
- /**
- * Optional debugging helper. Print given object.
- */
- int (*loo_object_print)(const struct lu_env *env, void *cookie,
- lu_printer_t p, const struct lu_object *o);
- /**
- * Optional debugging method. Returns true iff method is internally
- * consistent.
- */
- int (*loo_object_invariant)(const struct lu_object *o);
-};
-
-/**
- * Type of lu_device.
- */
-struct lu_device_type;
-
-/**
- * Device: a layer in the server side abstraction stacking.
- */
-struct lu_device {
- /**
- * reference count. This is incremented, in particular, on each object
- * created at this layer.
- *
- * \todo XXX which means that atomic_t is probably too small.
- */
- atomic_t ld_ref;
- /**
- * Pointer to device type. Never modified once set.
- */
- struct lu_device_type *ld_type;
- /**
- * Operation vector for this device.
- */
- const struct lu_device_operations *ld_ops;
- /**
- * Stack this device belongs to.
- */
- struct lu_site *ld_site;
-
- /** \todo XXX: temporary back pointer into obd. */
- struct obd_device *ld_obd;
- /**
- * A list of references to this object, for debugging.
- */
- struct lu_ref ld_reference;
- /**
- * Link the device to the site.
- **/
- struct list_head ld_linkage;
-};
-
-struct lu_device_type_operations;
-
-/**
- * Tag bits for device type. They are used to distinguish certain groups of
- * device types.
- */
-enum lu_device_tag {
- /** this is meta-data device */
- LU_DEVICE_MD = (1 << 0),
- /** this is data device */
- LU_DEVICE_DT = (1 << 1),
- /** data device in the client stack */
- LU_DEVICE_CL = (1 << 2)
-};
-
-/**
- * Type of device.
- */
-struct lu_device_type {
- /**
- * Tag bits. Taken from enum lu_device_tag. Never modified once set.
- */
- __u32 ldt_tags;
- /**
- * Name of this class. Unique system-wide. Never modified once set.
- */
- char *ldt_name;
- /**
- * Operations for this type.
- */
- const struct lu_device_type_operations *ldt_ops;
- /**
- * \todo XXX: temporary pointer to associated obd_type.
- */
- struct obd_type *ldt_obd_type;
- /**
- * \todo XXX: temporary: context tags used by obd_*() calls.
- */
- __u32 ldt_ctx_tags;
- /**
- * Number of existing device type instances.
- */
- unsigned ldt_device_nr;
- /**
- * Linkage into a global list of all device types.
- *
- * \see lu_device_types.
- */
- struct list_head ldt_linkage;
-};
-
-/**
- * Operations on a device type.
- */
-struct lu_device_type_operations {
- /**
- * Allocate new device.
- */
- struct lu_device *(*ldto_device_alloc)(const struct lu_env *env,
- struct lu_device_type *t,
- struct lustre_cfg *lcfg);
- /**
- * Free device. Dual to
- * lu_device_type_operations::ldto_device_alloc(). Returns pointer to
- * the next device in the stack.
- */
- struct lu_device *(*ldto_device_free)(const struct lu_env *,
- struct lu_device *);
-
- /**
- * Initialize the devices after allocation
- */
- int (*ldto_device_init)(const struct lu_env *env,
- struct lu_device *, const char *,
- struct lu_device *);
- /**
- * Finalize device. Dual to
- * lu_device_type_operations::ldto_device_init(). Returns pointer to
- * the next device in the stack.
- */
- struct lu_device *(*ldto_device_fini)(const struct lu_env *env,
- struct lu_device *);
- /**
- * Initialize device type. This is called on module load.
- */
- int (*ldto_init)(struct lu_device_type *t);
- /**
- * Finalize device type. Dual to
- * lu_device_type_operations::ldto_init(). Called on module unload.
- */
- void (*ldto_fini)(struct lu_device_type *t);
- /**
- * Called when the first device is created.
- */
- void (*ldto_start)(struct lu_device_type *t);
- /**
- * Called when number of devices drops to 0.
- */
- void (*ldto_stop)(struct lu_device_type *t);
-};
-
-static inline int lu_device_is_md(const struct lu_device *d)
-{
- return ergo(d != NULL, d->ld_type->ldt_tags & LU_DEVICE_MD);
-}
-
-/**
- * Common object attributes.
- */
-struct lu_attr {
- /** size in bytes */
- __u64 la_size;
- /** modification time in seconds since Epoch */
- s64 la_mtime;
- /** access time in seconds since Epoch */
- s64 la_atime;
- /** change time in seconds since Epoch */
- s64 la_ctime;
- /** 512-byte blocks allocated to object */
- __u64 la_blocks;
- /** permission bits and file type */
- __u32 la_mode;
- /** owner id */
- __u32 la_uid;
- /** group id */
- __u32 la_gid;
- /** object flags */
- __u32 la_flags;
- /** number of persistent references to this object */
- __u32 la_nlink;
- /** blk bits of the object*/
- __u32 la_blkbits;
- /** blk size of the object*/
- __u32 la_blksize;
- /** real device */
- __u32 la_rdev;
- /**
- * valid bits
- *
- * \see enum la_valid
- */
- __u64 la_valid;
-};
-
-/** Bit-mask of valid attributes */
-enum la_valid {
- LA_ATIME = 1 << 0,
- LA_MTIME = 1 << 1,
- LA_CTIME = 1 << 2,
- LA_SIZE = 1 << 3,
- LA_MODE = 1 << 4,
- LA_UID = 1 << 5,
- LA_GID = 1 << 6,
- LA_BLOCKS = 1 << 7,
- LA_TYPE = 1 << 8,
- LA_FLAGS = 1 << 9,
- LA_NLINK = 1 << 10,
- LA_RDEV = 1 << 11,
- LA_BLKSIZE = 1 << 12,
- LA_KILL_SUID = 1 << 13,
- LA_KILL_SGID = 1 << 14,
-};
-
-/**
- * Layer in the layered object.
- */
-struct lu_object {
- /**
- * Header for this object.
- */
- struct lu_object_header *lo_header;
- /**
- * Device for this layer.
- */
- struct lu_device *lo_dev;
- /**
- * Operations for this object.
- */
- const struct lu_object_operations *lo_ops;
- /**
- * Linkage into list of all layers.
- */
- struct list_head lo_linkage;
- /**
- * Link to the device, for debugging.
- */
- struct lu_ref_link lo_dev_ref;
-};
-
-enum lu_object_header_flags {
- /**
- * Don't keep this object in cache. Object will be destroyed as soon
- * as last reference to it is released. This flag cannot be cleared
- * once set.
- */
- LU_OBJECT_HEARD_BANSHEE = 0,
- /**
- * Mark this object has already been taken out of cache.
- */
- LU_OBJECT_UNHASHED = 1
-};
-
-enum lu_object_header_attr {
- LOHA_EXISTS = 1 << 0,
- LOHA_REMOTE = 1 << 1,
- /**
- * UNIX file type is stored in S_IFMT bits.
- */
- LOHA_FT_START = 001 << 12, /**< S_IFIFO */
- LOHA_FT_END = 017 << 12, /**< S_IFMT */
-};
-
-/**
- * "Compound" object, consisting of multiple layers.
- *
- * Compound object with given fid is unique with given lu_site.
- *
- * Note, that object does *not* necessary correspond to the real object in the
- * persistent storage: object is an anchor for locking and method calling, so
- * it is created for things like not-yet-existing child created by mkdir or
- * create calls. lu_object_operations::loo_exists() can be used to check
- * whether object is backed by persistent storage entity.
- */
-struct lu_object_header {
- /**
- * Fid, uniquely identifying this object.
- */
- struct lu_fid loh_fid;
- /**
- * Object flags from enum lu_object_header_flags. Set and checked
- * atomically.
- */
- unsigned long loh_flags;
- /**
- * Object reference count. Protected by lu_site::ls_guard.
- */
- atomic_t loh_ref;
- /**
- * Common object attributes, cached for efficiency. From enum
- * lu_object_header_attr.
- */
- __u32 loh_attr;
- /**
- * Linkage into per-site hash table. Protected by lu_site::ls_guard.
- */
- struct hlist_node loh_hash;
- /**
- * Linkage into per-site LRU list. Protected by lu_site::ls_guard.
- */
- struct list_head loh_lru;
- /**
- * Linkage into list of layers. Never modified once set (except lately
- * during object destruction). No locking is necessary.
- */
- struct list_head loh_layers;
- /**
- * A list of references to this object, for debugging.
- */
- struct lu_ref loh_reference;
-};
-
-struct fld;
-
-struct lu_site_bkt_data {
- /**
- * number of object in this bucket on the lsb_lru list.
- */
- long lsb_lru_len;
- /**
- * LRU list, updated on each access to object. Protected by
- * bucket lock of lu_site::ls_obj_hash.
- *
- * "Cold" end of LRU is lu_site::ls_lru.next. Accessed object are
- * moved to the lu_site::ls_lru.prev (this is due to the non-existence
- * of list_for_each_entry_safe_reverse()).
- */
- struct list_head lsb_lru;
- /**
- * Wait-queue signaled when an object in this site is ultimately
- * destroyed (lu_object_free()). It is used by lu_object_find() to
- * wait before re-trying when object in the process of destruction is
- * found in the hash table.
- *
- * \see htable_lookup().
- */
- wait_queue_head_t lsb_marche_funebre;
-};
-
-enum {
- LU_SS_CREATED = 0,
- LU_SS_CACHE_HIT,
- LU_SS_CACHE_MISS,
- LU_SS_CACHE_RACE,
- LU_SS_CACHE_DEATH_RACE,
- LU_SS_LRU_PURGED,
- LU_SS_LRU_LEN, /* # of objects in lsb_lru lists */
- LU_SS_LAST_STAT
-};
-
-/**
- * lu_site is a "compartment" within which objects are unique, and LRU
- * discipline is maintained.
- *
- * lu_site exists so that multiple layered stacks can co-exist in the same
- * address space.
- *
- * lu_site has the same relation to lu_device as lu_object_header to
- * lu_object.
- */
-struct lu_site {
- /**
- * objects hash table
- */
- struct cfs_hash *ls_obj_hash;
- /**
- * index of bucket on hash table while purging
- */
- int ls_purge_start;
- /**
- * Top-level device for this stack.
- */
- struct lu_device *ls_top_dev;
- /**
- * Bottom-level device for this stack
- */
- struct lu_device *ls_bottom_dev;
- /**
- * Linkage into global list of sites.
- */
- struct list_head ls_linkage;
- /**
- * List for lu device for this site, protected
- * by ls_ld_lock.
- **/
- struct list_head ls_ld_linkage;
- spinlock_t ls_ld_lock;
-
- /**
- * lu_site stats
- */
- struct lprocfs_stats *ls_stats;
- /**
- * XXX: a hack! fld has to find md_site via site, remove when possible
- */
- struct seq_server_site *ld_seq_site;
-};
-
-static inline struct lu_site_bkt_data *
-lu_site_bkt_from_fid(struct lu_site *site, struct lu_fid *fid)
-{
- struct cfs_hash_bd bd;
-
- cfs_hash_bd_get(site->ls_obj_hash, fid, &bd);
- return cfs_hash_bd_extra_get(site->ls_obj_hash, &bd);
-}
-
-static inline struct seq_server_site *lu_site2seq(const struct lu_site *s)
-{
- return s->ld_seq_site;
-}
-
-/** \name ctors
- * Constructors/destructors.
- * @{
- */
-
-int lu_site_init (struct lu_site *s, struct lu_device *d);
-void lu_site_fini (struct lu_site *s);
-int lu_site_init_finish (struct lu_site *s);
-void lu_stack_fini (const struct lu_env *env, struct lu_device *top);
-void lu_device_get (struct lu_device *d);
-void lu_device_put (struct lu_device *d);
-int lu_device_init (struct lu_device *d, struct lu_device_type *t);
-void lu_device_fini (struct lu_device *d);
-int lu_object_header_init(struct lu_object_header *h);
-void lu_object_header_fini(struct lu_object_header *h);
-int lu_object_init (struct lu_object *o,
- struct lu_object_header *h, struct lu_device *d);
-void lu_object_fini (struct lu_object *o);
-void lu_object_add_top (struct lu_object_header *h, struct lu_object *o);
-void lu_object_add (struct lu_object *before, struct lu_object *o);
-
-void lu_dev_add_linkage(struct lu_site *s, struct lu_device *d);
-
-/**
- * Helpers to initialize and finalize device types.
- */
-
-int lu_device_type_init(struct lu_device_type *ldt);
-void lu_device_type_fini(struct lu_device_type *ldt);
-void lu_types_stop(void);
-
-/** @} ctors */
-
-/** \name caching
- * Caching and reference counting.
- * @{
- */
-
-/**
- * Acquire additional reference to the given object. This function is used to
- * attain additional reference. To acquire initial reference use
- * lu_object_find().
- */
-static inline void lu_object_get(struct lu_object *o)
-{
- LASSERT(atomic_read(&o->lo_header->loh_ref) > 0);
- atomic_inc(&o->lo_header->loh_ref);
-}
-
-/**
- * Return true of object will not be cached after last reference to it is
- * released.
- */
-static inline int lu_object_is_dying(const struct lu_object_header *h)
-{
- return test_bit(LU_OBJECT_HEARD_BANSHEE, &h->loh_flags);
-}
-
-void lu_object_put(const struct lu_env *env, struct lu_object *o);
-void lu_object_unhash(const struct lu_env *env, struct lu_object *o);
-
-int lu_site_purge(const struct lu_env *env, struct lu_site *s, int nr);
-
-void lu_site_print(const struct lu_env *env, struct lu_site *s, void *cookie,
- lu_printer_t printer);
-struct lu_object *lu_object_find(const struct lu_env *env,
- struct lu_device *dev, const struct lu_fid *f,
- const struct lu_object_conf *conf);
-struct lu_object *lu_object_find_at(const struct lu_env *env,
- struct lu_device *dev,
- const struct lu_fid *f,
- const struct lu_object_conf *conf);
-struct lu_object *lu_object_find_slice(const struct lu_env *env,
- struct lu_device *dev,
- const struct lu_fid *f,
- const struct lu_object_conf *conf);
-/** @} caching */
-
-/** \name helpers
- * Helpers.
- * @{
- */
-
-/**
- * First (topmost) sub-object of given compound object
- */
-static inline struct lu_object *lu_object_top(struct lu_object_header *h)
-{
- LASSERT(!list_empty(&h->loh_layers));
- return container_of0(h->loh_layers.next, struct lu_object, lo_linkage);
-}
-
-/**
- * Next sub-object in the layering
- */
-static inline struct lu_object *lu_object_next(const struct lu_object *o)
-{
- return container_of0(o->lo_linkage.next, struct lu_object, lo_linkage);
-}
-
-/**
- * Pointer to the fid of this object.
- */
-static inline const struct lu_fid *lu_object_fid(const struct lu_object *o)
-{
- return &o->lo_header->loh_fid;
-}
-
-/**
- * return device operations vector for this object
- */
-static const inline struct lu_device_operations *
-lu_object_ops(const struct lu_object *o)
-{
- return o->lo_dev->ld_ops;
-}
-
-/**
- * Given a compound object, find its slice, corresponding to the device type
- * \a dtype.
- */
-struct lu_object *lu_object_locate(struct lu_object_header *h,
- const struct lu_device_type *dtype);
-
-/**
- * Printer function emitting messages through libcfs_debug_msg().
- */
-int lu_cdebug_printer(const struct lu_env *env,
- void *cookie, const char *format, ...);
-
-/**
- * Print object description followed by a user-supplied message.
- */
-#define LU_OBJECT_DEBUG(mask, env, object, format, ...) \
-do { \
- LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, mask, NULL); \
- \
- if (cfs_cdebug_show(mask, DEBUG_SUBSYSTEM)) { \
- lu_object_print(env, &msgdata, lu_cdebug_printer, object);\
- CDEBUG(mask, format, ## __VA_ARGS__); \
- } \
-} while (0)
-
-/**
- * Print short object description followed by a user-supplied message.
- */
-#define LU_OBJECT_HEADER(mask, env, object, format, ...) \
-do { \
- LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, mask, NULL); \
- \
- if (cfs_cdebug_show(mask, DEBUG_SUBSYSTEM)) { \
- lu_object_header_print(env, &msgdata, lu_cdebug_printer,\
- (object)->lo_header); \
- lu_cdebug_printer(env, &msgdata, "\n"); \
- CDEBUG(mask, format, ## __VA_ARGS__); \
- } \
-} while (0)
-
-void lu_object_print (const struct lu_env *env, void *cookie,
- lu_printer_t printer, const struct lu_object *o);
-void lu_object_header_print(const struct lu_env *env, void *cookie,
- lu_printer_t printer,
- const struct lu_object_header *hdr);
-
-/**
- * Check object consistency.
- */
-int lu_object_invariant(const struct lu_object *o);
-
-
-/**
- * Check whether object exists, no matter on local or remote storage.
- * Note: LOHA_EXISTS will be set once some one created the object,
- * and it does not needs to be committed to storage.
- */
-#define lu_object_exists(o) ((o)->lo_header->loh_attr & LOHA_EXISTS)
-
-/**
- * Check whether object on the remote storage.
- */
-#define lu_object_remote(o) unlikely((o)->lo_header->loh_attr & LOHA_REMOTE)
-
-static inline int lu_object_assert_exists(const struct lu_object *o)
-{
- return lu_object_exists(o);
-}
-
-static inline int lu_object_assert_not_exists(const struct lu_object *o)
-{
- return !lu_object_exists(o);
-}
-
-/**
- * Attr of this object.
- */
-static inline __u32 lu_object_attr(const struct lu_object *o)
-{
- LASSERT(lu_object_exists(o) != 0);
- return o->lo_header->loh_attr;
-}
-
-static inline void lu_object_ref_add(struct lu_object *o,
- const char *scope,
- const void *source)
-{
- lu_ref_add(&o->lo_header->loh_reference, scope, source);
-}
-
-static inline void lu_object_ref_add_at(struct lu_object *o,
- struct lu_ref_link *link,
- const char *scope,
- const void *source)
-{
- lu_ref_add_at(&o->lo_header->loh_reference, link, scope, source);
-}
-
-static inline void lu_object_ref_del(struct lu_object *o,
- const char *scope, const void *source)
-{
- lu_ref_del(&o->lo_header->loh_reference, scope, source);
-}
-
-static inline void lu_object_ref_del_at(struct lu_object *o,
- struct lu_ref_link *link,
- const char *scope, const void *source)
-{
- lu_ref_del_at(&o->lo_header->loh_reference, link, scope, source);
-}
-
-/** input params, should be filled out by mdt */
-struct lu_rdpg {
- /** hash */
- __u64 rp_hash;
- /** count in bytes */
- unsigned int rp_count;
- /** number of pages */
- unsigned int rp_npages;
- /** requested attr */
- __u32 rp_attrs;
- /** pointers to pages */
- struct page **rp_pages;
-};
-
-enum lu_xattr_flags {
- LU_XATTR_REPLACE = (1 << 0),
- LU_XATTR_CREATE = (1 << 1)
-};
-
-/** @} helpers */
-
-/** \name lu_context
- * @{ */
-
-/** For lu_context health-checks */
-enum lu_context_state {
- LCS_INITIALIZED = 1,
- LCS_ENTERED,
- LCS_LEFT,
- LCS_FINALIZED
-};
-
-/**
- * lu_context. Execution context for lu_object methods. Currently associated
- * with thread.
- *
- * All lu_object methods, except device and device type methods (called during
- * system initialization and shutdown) are executed "within" some
- * lu_context. This means, that pointer to some "current" lu_context is passed
- * as an argument to all methods.
- *
- * All service ptlrpc threads create lu_context as part of their
- * initialization. It is possible to create "stand-alone" context for other
- * execution environments (like system calls).
- *
- * lu_object methods mainly use lu_context through lu_context_key interface
- * that allows each layer to associate arbitrary pieces of data with each
- * context (see pthread_key_create(3) for similar interface).
- *
- * On a client, lu_context is bound to a thread, see cl_env_get().
- *
- * \see lu_context_key
- */
-struct lu_context {
- /**
- * lu_context is used on the client side too. Yet we don't want to
- * allocate values of server-side keys for the client contexts and
- * vice versa.
- *
- * To achieve this, set of tags in introduced. Contexts and keys are
- * marked with tags. Key value are created only for context whose set
- * of tags has non-empty intersection with one for key. Tags are taken
- * from enum lu_context_tag.
- */
- __u32 lc_tags;
- enum lu_context_state lc_state;
- /**
- * Pointer to the home service thread. NULL for other execution
- * contexts.
- */
- struct ptlrpc_thread *lc_thread;
- /**
- * Pointer to an array with key values. Internal implementation
- * detail.
- */
- void **lc_value;
- /**
- * Linkage into a list of all remembered contexts. Only
- * `non-transient' contexts, i.e., ones created for service threads
- * are placed here.
- */
- struct list_head lc_remember;
- /**
- * Version counter used to skip calls to lu_context_refill() when no
- * keys were registered.
- */
- unsigned lc_version;
- /**
- * Debugging cookie.
- */
- unsigned lc_cookie;
-};
-
-/**
- * lu_context_key interface. Similar to pthread_key.
- */
-
-enum lu_context_tag {
- /**
- * Thread on md server
- */
- LCT_MD_THREAD = 1 << 0,
- /**
- * Thread on dt server
- */
- LCT_DT_THREAD = 1 << 1,
- /**
- * Context for transaction handle
- */
- LCT_TX_HANDLE = 1 << 2,
- /**
- * Thread on client
- */
- LCT_CL_THREAD = 1 << 3,
- /**
- * A per-request session on a server, and a per-system-call session on
- * a client.
- */
- LCT_SESSION = 1 << 4,
- /**
- * A per-request data on OSP device
- */
- LCT_OSP_THREAD = 1 << 5,
- /**
- * MGS device thread
- */
- LCT_MG_THREAD = 1 << 6,
- /**
- * Context for local operations
- */
- LCT_LOCAL = 1 << 7,
- /**
- * Set when at least one of keys, having values in this context has
- * non-NULL lu_context_key::lct_exit() method. This is used to
- * optimize lu_context_exit() call.
- */
- LCT_HAS_EXIT = 1 << 28,
- /**
- * Don't add references for modules creating key values in that context.
- * This is only for contexts used internally by lu_object framework.
- */
- LCT_NOREF = 1 << 29,
- /**
- * Key is being prepared for retiring, don't create new values for it.
- */
- LCT_QUIESCENT = 1 << 30,
- /**
- * Context should be remembered.
- */
- LCT_REMEMBER = 1 << 31,
- /**
- * Contexts usable in cache shrinker thread.
- */
- LCT_SHRINKER = LCT_MD_THREAD|LCT_DT_THREAD|LCT_CL_THREAD|LCT_NOREF
-};
-
-/**
- * Key. Represents per-context value slot.
- *
- * Keys are usually registered when module owning the key is initialized, and
- * de-registered when module is unloaded. Once key is registered, all new
- * contexts with matching tags, will get key value. "Old" contexts, already
- * initialized at the time of key registration, can be forced to get key value
- * by calling lu_context_refill().
- *
- * Every key value is counted in lu_context_key::lct_used and acquires a
- * reference on an owning module. This means, that all key values have to be
- * destroyed before module can be unloaded. This is usually achieved by
- * stopping threads started by the module, that created contexts in their
- * entry functions. Situation is complicated by the threads shared by multiple
- * modules, like ptlrpcd daemon on a client. To work around this problem,
- * contexts, created in such threads, are `remembered' (see
- * LCT_REMEMBER)---i.e., added into a global list. When module is preparing
- * for unloading it does the following:
- *
- * - marks its keys as `quiescent' (lu_context_tag::LCT_QUIESCENT)
- * preventing new key values from being allocated in the new contexts,
- * and
- *
- * - scans a list of remembered contexts, destroying values of module
- * keys, thus releasing references to the module.
- *
- * This is done by lu_context_key_quiesce(). If module is re-activated
- * before key has been de-registered, lu_context_key_revive() call clears
- * `quiescent' marker.
- *
- * lu_context code doesn't provide any internal synchronization for these
- * activities---it's assumed that startup (including threads start-up) and
- * shutdown are serialized by some external means.
- *
- * \see lu_context
- */
-struct lu_context_key {
- /**
- * Set of tags for which values of this key are to be instantiated.
- */
- __u32 lct_tags;
- /**
- * Value constructor. This is called when new value is created for a
- * context. Returns pointer to new value of error pointer.
- */
- void *(*lct_init)(const struct lu_context *ctx,
- struct lu_context_key *key);
- /**
- * Value destructor. Called when context with previously allocated
- * value of this slot is destroyed. \a data is a value that was returned
- * by a matching call to lu_context_key::lct_init().
- */
- void (*lct_fini)(const struct lu_context *ctx,
- struct lu_context_key *key, void *data);
- /**
- * Optional method called on lu_context_exit() for all allocated
- * keys. Can be used by debugging code checking that locks are
- * released, etc.
- */
- void (*lct_exit)(const struct lu_context *ctx,
- struct lu_context_key *key, void *data);
- /**
- * Internal implementation detail: index within lu_context::lc_value[]
- * reserved for this key.
- */
- int lct_index;
- /**
- * Internal implementation detail: number of values created for this
- * key.
- */
- atomic_t lct_used;
- /**
- * Internal implementation detail: module for this key.
- */
- struct module *lct_owner;
- /**
- * References to this key. For debugging.
- */
- struct lu_ref lct_reference;
-};
-
-#define LU_KEY_INIT(mod, type) \
- static void *mod##_key_init(const struct lu_context *ctx, \
- struct lu_context_key *key) \
- { \
- type *value; \
- \
- CLASSERT(PAGE_CACHE_SIZE >= sizeof (*value)); \
- \
- value = kzalloc(sizeof(*value), GFP_NOFS); \
- if (value == NULL) \
- value = ERR_PTR(-ENOMEM); \
- \
- return value; \
- } \
- struct __##mod##__dummy_init {; } /* semicolon catcher */
-
-#define LU_KEY_FINI(mod, type) \
- static void mod##_key_fini(const struct lu_context *ctx, \
- struct lu_context_key *key, void *data) \
- { \
- type *info = data; \
- \
- kfree(info); \
- } \
- struct __##mod##__dummy_fini {; } /* semicolon catcher */
-
-#define LU_KEY_INIT_FINI(mod, type) \
- LU_KEY_INIT(mod, type); \
- LU_KEY_FINI(mod, type)
-
-#define LU_CONTEXT_KEY_DEFINE(mod, tags) \
- struct lu_context_key mod##_thread_key = { \
- .lct_tags = tags, \
- .lct_init = mod##_key_init, \
- .lct_fini = mod##_key_fini \
- }
-
-#define LU_CONTEXT_KEY_INIT(key) \
-do { \
- (key)->lct_owner = THIS_MODULE; \
-} while (0)
-
-int lu_context_key_register(struct lu_context_key *key);
-void lu_context_key_degister(struct lu_context_key *key);
-void *lu_context_key_get (const struct lu_context *ctx,
- const struct lu_context_key *key);
-void lu_context_key_quiesce (struct lu_context_key *key);
-void lu_context_key_revive (struct lu_context_key *key);
-
-
-/*
- * LU_KEY_INIT_GENERIC() has to be a macro to correctly determine an
- * owning module.
- */
-
-#define LU_KEY_INIT_GENERIC(mod) \
- static void mod##_key_init_generic(struct lu_context_key *k, ...) \
- { \
- struct lu_context_key *key = k; \
- va_list args; \
- \
- va_start(args, k); \
- do { \
- LU_CONTEXT_KEY_INIT(key); \
- key = va_arg(args, struct lu_context_key *); \
- } while (key != NULL); \
- va_end(args); \
- }
-
-#define LU_TYPE_INIT(mod, ...) \
- LU_KEY_INIT_GENERIC(mod) \
- static int mod##_type_init(struct lu_device_type *t) \
- { \
- mod##_key_init_generic(__VA_ARGS__, NULL); \
- return lu_context_key_register_many(__VA_ARGS__, NULL); \
- } \
- struct __##mod##_dummy_type_init {; }
-
-#define LU_TYPE_FINI(mod, ...) \
- static void mod##_type_fini(struct lu_device_type *t) \
- { \
- lu_context_key_degister_many(__VA_ARGS__, NULL); \
- } \
- struct __##mod##_dummy_type_fini {; }
-
-#define LU_TYPE_START(mod, ...) \
- static void mod##_type_start(struct lu_device_type *t) \
- { \
- lu_context_key_revive_many(__VA_ARGS__, NULL); \
- } \
- struct __##mod##_dummy_type_start {; }
-
-#define LU_TYPE_STOP(mod, ...) \
- static void mod##_type_stop(struct lu_device_type *t) \
- { \
- lu_context_key_quiesce_many(__VA_ARGS__, NULL); \
- } \
- struct __##mod##_dummy_type_stop {; }
-
-
-
-#define LU_TYPE_INIT_FINI(mod, ...) \
- LU_TYPE_INIT(mod, __VA_ARGS__); \
- LU_TYPE_FINI(mod, __VA_ARGS__); \
- LU_TYPE_START(mod, __VA_ARGS__); \
- LU_TYPE_STOP(mod, __VA_ARGS__)
-
-int lu_context_init (struct lu_context *ctx, __u32 tags);
-void lu_context_fini (struct lu_context *ctx);
-void lu_context_enter (struct lu_context *ctx);
-void lu_context_exit (struct lu_context *ctx);
-int lu_context_refill(struct lu_context *ctx);
-
-/*
- * Helper functions to operate on multiple keys. These are used by the default
- * device type operations, defined by LU_TYPE_INIT_FINI().
- */
-
-int lu_context_key_register_many(struct lu_context_key *k, ...);
-void lu_context_key_degister_many(struct lu_context_key *k, ...);
-void lu_context_key_revive_many (struct lu_context_key *k, ...);
-void lu_context_key_quiesce_many (struct lu_context_key *k, ...);
-
-/**
- * Environment.
- */
-struct lu_env {
- /**
- * "Local" context, used to store data instead of stack.
- */
- struct lu_context le_ctx;
- /**
- * "Session" context for per-request data.
- */
- struct lu_context *le_ses;
-};
-
-int lu_env_init (struct lu_env *env, __u32 tags);
-void lu_env_fini (struct lu_env *env);
-int lu_env_refill(struct lu_env *env);
-
-/** @} lu_context */
-
-/**
- * Output site statistical counters into a buffer. Suitable for
- * ll_rd_*()-style functions.
- */
-int lu_site_stats_print(const struct lu_site *s, struct seq_file *m);
-
-/**
- * Common name structure to be passed around for various name related methods.
- */
-struct lu_name {
- const char *ln_name;
- int ln_namelen;
-};
-
-/**
- * Common buffer structure to be passed around for various xattr_{s,g}et()
- * methods.
- */
-struct lu_buf {
- void *lb_buf;
- ssize_t lb_len;
-};
-
-#define DLUBUF "(%p %zu)"
-#define PLUBUF(buf) (buf)->lb_buf, (buf)->lb_len
-/**
- * One-time initializers, called at obdclass module initialization, not
- * exported.
- */
-
-/**
- * Initialization of global lu_* data.
- */
-int lu_global_init(void);
-
-/**
- * Dual to lu_global_init().
- */
-void lu_global_fini(void);
-
-struct lu_kmem_descr {
- struct kmem_cache **ckd_cache;
- const char *ckd_name;
- const size_t ckd_size;
-};
-
-int lu_kmem_init(struct lu_kmem_descr *caches);
-void lu_kmem_fini(struct lu_kmem_descr *caches);
-
-/** @} lu */
-#endif /* __LUSTRE_LU_OBJECT_H */
diff --git a/drivers/staging/lustre/lustre/include/lu_ref.h b/drivers/staging/lustre/lustre/include/lu_ref.h
deleted file mode 100644
index b451a888ca3a..000000000000
--- a/drivers/staging/lustre/lustre/include/lu_ref.h
+++ /dev/null
@@ -1,182 +0,0 @@
-/*
- * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2012, Intel Corporation.
- *
- * Author: Nikita Danilov <nikita.danilov@sun.com>
- *
- * This file is part of Lustre, http://www.lustre.org.
- *
- * Lustre is free software; you can redistribute it and/or
- * modify it under the terms of version 2 of the GNU General Public
- * License as published by the Free Software Foundation.
- *
- * Lustre is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with Lustre; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- */
-
-#ifndef __LUSTRE_LU_REF_H
-#define __LUSTRE_LU_REF_H
-
-#include <linux/list.h>
-
-/** \defgroup lu_ref lu_ref
- *
- * An interface to track references between objects. Mostly for debugging.
- *
- * Suppose there is a reference counted data-structure struct foo. To track
- * who acquired references to instance of struct foo, add lu_ref field to it:
- *
- * \code
- * struct foo {
- * atomic_t foo_refcount;
- * struct lu_ref foo_reference;
- * ...
- * };
- * \endcode
- *
- * foo::foo_reference has to be initialized by calling
- * lu_ref_init(). Typically there will be functions or macros to increment and
- * decrement foo::foo_refcount, let's say they are foo_get(struct foo *foo)
- * and foo_put(struct foo *foo), respectively.
- *
- * Whenever foo_get() is called to acquire a reference on a foo, lu_ref_add()
- * has to be called to insert into foo::foo_reference a record, describing
- * acquired reference. Dually, lu_ref_del() removes matching record. Typical
- * usages are:
- *
- * \code
- * struct bar *bar;
- *
- * // bar owns a reference to foo.
- * bar->bar_foo = foo_get(foo);
- * lu_ref_add(&foo->foo_reference, "bar", bar);
- *
- * ...
- *
- * // reference from bar to foo is released.
- * lu_ref_del(&foo->foo_reference, "bar", bar);
- * foo_put(bar->bar_foo);
- *
- *
- * // current thread acquired a temporary reference to foo.
- * foo_get(foo);
- * lu_ref_add(&foo->reference, __func__, current);
- *
- * ...
- *
- * // temporary reference is released.
- * lu_ref_del(&foo->reference, __func__, current);
- * foo_put(foo);
- * \endcode
- *
- * \e Et \e cetera. Often it makes sense to include lu_ref_add() and
- * lu_ref_del() calls into foo_get() and foo_put(). When an instance of struct
- * foo is destroyed, lu_ref_fini() has to be called that checks that no
- * pending references remain. lu_ref_print() can be used to dump a list of
- * pending references, while hunting down a leak.
- *
- * For objects to which a large number of references can be acquired,
- * lu_ref_del() can become cpu consuming, as it has to scan the list of
- * references. To work around this, remember result of lu_ref_add() (usually
- * in the same place where pointer to struct foo is stored), and use
- * lu_ref_del_at():
- *
- * \code
- * // There is a large number of bar's for a single foo.
- * bar->bar_foo = foo_get(foo);
- * bar->bar_foo_ref = lu_ref_add(&foo->foo_reference, "bar", bar);
- *
- * ...
- *
- * // reference from bar to foo is released.
- * lu_ref_del_at(&foo->foo_reference, bar->bar_foo_ref, "bar", bar);
- * foo_put(bar->bar_foo);
- * \endcode
- *
- * lu_ref interface degrades gracefully in case of memory shortages.
- *
- * @{
- */
-
-
-/*
- * dummy data structures/functions to pass compile for now.
- * We need to reimplement them with kref.
- */
-struct lu_ref {};
-struct lu_ref_link {};
-
-static inline void lu_ref_init(struct lu_ref *ref)
-{
-}
-
-static inline void lu_ref_fini(struct lu_ref *ref)
-{
-}
-
-static inline struct lu_ref_link *lu_ref_add(struct lu_ref *ref,
- const char *scope,
- const void *source)
-{
- return NULL;
-}
-
-static inline struct lu_ref_link *lu_ref_add_atomic(struct lu_ref *ref,
- const char *scope,
- const void *source)
-{
- return NULL;
-}
-
-static inline void lu_ref_add_at(struct lu_ref *ref,
- struct lu_ref_link *link,
- const char *scope,
- const void *source)
-{
-}
-
-static inline void lu_ref_del(struct lu_ref *ref, const char *scope,
- const void *source)
-{
-}
-
-static inline void lu_ref_set_at(struct lu_ref *ref, struct lu_ref_link *link,
- const char *scope, const void *source0,
- const void *source1)
-{
-}
-
-static inline void lu_ref_del_at(struct lu_ref *ref, struct lu_ref_link *link,
- const char *scope, const void *source)
-{
-}
-
-static inline int lu_ref_global_init(void)
-{
- return 0;
-}
-
-static inline void lu_ref_global_fini(void)
-{
-}
-
-static inline void lu_ref_print(const struct lu_ref *ref)
-{
-}
-
-static inline void lu_ref_print_all(void)
-{
-}
-
-/** @} lu */
-
-#endif /* __LUSTRE_LU_REF_H */
diff --git a/drivers/staging/lustre/lustre/include/lustre/libiam.h b/drivers/staging/lustre/lustre/include/lustre/libiam.h
deleted file mode 100644
index e8e0b084a6bc..000000000000
--- a/drivers/staging/lustre/lustre/include/lustre/libiam.h
+++ /dev/null
@@ -1,145 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * lustre/include/lustre/libiam.h
- *
- * iam user level library
- *
- * Author: Wang Di <wangdi@clusterfs.com>
- * Author: Nikita Danilov <nikita@clusterfs.com>
- * Author: Fan Yong <fanyong@clusterfs.com>
- */
-
-/*
- * lustre/libiam.h
- */
-
-#ifndef __IAM_ULIB_H__
-#define __IAM_ULIB_H__
-
-/** \defgroup libiam libiam
- *
- * @{
- */
-
-
-#define DX_FMT_NAME_LEN 16
-
-enum iam_fmt_t {
- FMT_LFIX,
- FMT_LVAR
-};
-
-struct iam_uapi_info {
- __u16 iui_keysize;
- __u16 iui_recsize;
- __u16 iui_ptrsize;
- __u16 iui_height;
- char iui_fmt_name[DX_FMT_NAME_LEN];
-};
-
-/*
- * Creat an iam file, but do NOT open it.
- * Return 0 if success, else -1.
- */
-int iam_creat(char *filename, enum iam_fmt_t fmt,
- int blocksize, int keysize, int recsize, int ptrsize);
-
-/*
- * Open an iam file, but do NOT creat it if the file doesn't exist.
- * Please use iam_creat for creating the file before use iam_open.
- * Return file id (fd) if success, else -1.
- */
-int iam_open(char *filename, struct iam_uapi_info *ua);
-
-/*
- * Close file opened by iam_open.
- */
-int iam_close(int fd);
-
-/*
- * Please use iam_open before use this function.
- */
-int iam_insert(int fd, struct iam_uapi_info *ua,
- int key_need_convert, char *keybuf,
- int rec_need_convert, char *recbuf);
-
-/*
- * Please use iam_open before use this function.
- */
-int iam_lookup(int fd, struct iam_uapi_info *ua,
- int key_need_convert, char *key_buf,
- int *keysize, char *save_key,
- int rec_need_convert, char *rec_buf,
- int *recsize, char *save_rec);
-
-/*
- * Please use iam_open before use this function.
- */
-int iam_delete(int fd, struct iam_uapi_info *ua,
- int key_need_convert, char *keybuf,
- int rec_need_convert, char *recbuf);
-
-/*
- * Please use iam_open before use this function.
- */
-int iam_it_start(int fd, struct iam_uapi_info *ua,
- int key_need_convert, char *key_buf,
- int *keysize, char *save_key,
- int rec_need_convert, char *rec_buf,
- int *recsize, char *save_rec);
-
-/*
- * Please use iam_open before use this function.
- */
-int iam_it_next(int fd, struct iam_uapi_info *ua,
- int key_need_convert, char *key_buf,
- int *keysize, char *save_key,
- int rec_need_convert, char *rec_buf,
- int *recsize, char *save_rec);
-
-/*
- * Please use iam_open before use this function.
- */
-int iam_it_stop(int fd, struct iam_uapi_info *ua,
- int key_need_convert, char *keybuf,
- int rec_need_convert, char *recbuf);
-
-/*
- * Change iam file mode.
- */
-int iam_polymorph(char *filename, unsigned long mode);
-
-/** @} libiam */
-
-#endif
diff --git a/drivers/staging/lustre/lustre/include/lustre/ll_fiemap.h b/drivers/staging/lustre/lustre/include/lustre/ll_fiemap.h
deleted file mode 100644
index ad253c6deadd..000000000000
--- a/drivers/staging/lustre/lustre/include/lustre/ll_fiemap.h
+++ /dev/null
@@ -1,121 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * lustre/include/lustre/ll_fiemap.h
- *
- * FIEMAP data structures and flags. This header file will be used until
- * fiemap.h is available in the upstream kernel.
- *
- * Author: Kalpak Shah <kalpak.shah@sun.com>
- * Author: Andreas Dilger <adilger@sun.com>
- */
-
-#ifndef _LUSTRE_FIEMAP_H
-#define _LUSTRE_FIEMAP_H
-
-
-
-struct ll_fiemap_extent {
- __u64 fe_logical; /* logical offset in bytes for the start of
- * the extent from the beginning of the file */
- __u64 fe_physical; /* physical offset in bytes for the start
- * of the extent from the beginning of the disk */
- __u64 fe_length; /* length in bytes for this extent */
- __u64 fe_reserved64[2];
- __u32 fe_flags; /* FIEMAP_EXTENT_* flags for this extent */
- __u32 fe_device; /* device number for this extent */
- __u32 fe_reserved[2];
-};
-
-struct ll_user_fiemap {
- __u64 fm_start; /* logical offset (inclusive) at
- * which to start mapping (in) */
- __u64 fm_length; /* logical length of mapping which
- * userspace wants (in) */
- __u32 fm_flags; /* FIEMAP_FLAG_* flags for request (in/out) */
- __u32 fm_mapped_extents;/* number of extents that were mapped (out) */
- __u32 fm_extent_count; /* size of fm_extents array (in) */
- __u32 fm_reserved;
- struct ll_fiemap_extent fm_extents[0]; /* array of mapped extents (out) */
-};
-
-#define FIEMAP_MAX_OFFSET (~0ULL)
-
-#define FIEMAP_FLAG_SYNC 0x00000001 /* sync file data before map */
-#define FIEMAP_FLAG_XATTR 0x00000002 /* map extended attribute tree */
-
-#define FIEMAP_EXTENT_LAST 0x00000001 /* Last extent in file. */
-#define FIEMAP_EXTENT_UNKNOWN 0x00000002 /* Data location unknown. */
-#define FIEMAP_EXTENT_DELALLOC 0x00000004 /* Location still pending.
- * Sets EXTENT_UNKNOWN. */
-#define FIEMAP_EXTENT_ENCODED 0x00000008 /* Data can not be read
- * while fs is unmounted */
-#define FIEMAP_EXTENT_DATA_ENCRYPTED 0x00000080 /* Data is encrypted by fs.
- * Sets EXTENT_NO_DIRECT. */
-#define FIEMAP_EXTENT_NOT_ALIGNED 0x00000100 /* Extent offsets may not be
- * block aligned. */
-#define FIEMAP_EXTENT_DATA_INLINE 0x00000200 /* Data mixed with metadata.
- * Sets EXTENT_NOT_ALIGNED.*/
-#define FIEMAP_EXTENT_DATA_TAIL 0x00000400 /* Multiple files in block.
- * Sets EXTENT_NOT_ALIGNED.*/
-#define FIEMAP_EXTENT_UNWRITTEN 0x00000800 /* Space allocated, but
- * no data (i.e. zero). */
-#define FIEMAP_EXTENT_MERGED 0x00001000 /* File does not natively
- * support extents. Result
- * merged for efficiency. */
-
-
-static inline size_t fiemap_count_to_size(size_t extent_count)
-{
- return (sizeof(struct ll_user_fiemap) + extent_count *
- sizeof(struct ll_fiemap_extent));
-}
-
-static inline unsigned fiemap_size_to_count(size_t array_size)
-{
- return ((array_size - sizeof(struct ll_user_fiemap)) /
- sizeof(struct ll_fiemap_extent));
-}
-
-#define FIEMAP_FLAG_DEVICE_ORDER 0x40000000 /* return device ordered mapping */
-
-#ifdef FIEMAP_FLAGS_COMPAT
-#undef FIEMAP_FLAGS_COMPAT
-#endif
-
-/* Lustre specific flags - use a high bit, don't conflict with upstream flag */
-#define FIEMAP_EXTENT_NO_DIRECT 0x40000000 /* Data mapping undefined */
-#define FIEMAP_EXTENT_NET 0x80000000 /* Data stored remotely.
- * Sets NO_DIRECT flag */
-
-#endif /* _LUSTRE_FIEMAP_H */
diff --git a/drivers/staging/lustre/lustre/include/lustre/lustre_build_version.h b/drivers/staging/lustre/lustre/include/lustre/lustre_build_version.h
deleted file mode 100644
index 93a3d7db3010..000000000000
--- a/drivers/staging/lustre/lustre/include/lustre/lustre_build_version.h
+++ /dev/null
@@ -1,2 +0,0 @@
-#define BUILD_VERSION "v2_3_64_0-g6e62c21-CHANGED-3.9.0"
-#define LUSTRE_RELEASE 3.9.0_g6e62c21
diff --git a/drivers/staging/lustre/lustre/include/lustre/lustre_errno.h b/drivers/staging/lustre/lustre/include/lustre/lustre_errno.h
deleted file mode 100644
index 35aefa2cdad1..000000000000
--- a/drivers/staging/lustre/lustre/include/lustre/lustre_errno.h
+++ /dev/null
@@ -1,215 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.txt
- *
- * GPL HEADER END
- */
-/*
- * Copyright (C) 2011 FUJITSU LIMITED. All rights reserved.
- *
- * Copyright (c) 2013, Intel Corporation.
- */
-
-#ifndef LUSTRE_ERRNO_H
-#define LUSTRE_ERRNO_H
-
-/*
- * Only "network" errnos, which are defined below, are allowed on wire (or on
- * disk). Generic routines exist to help translate between these and a subset
- * of the "host" errnos. Some host errnos (e.g., EDEADLOCK) are intentionally
- * left out. See also the comment on lustre_errno_hton_mapping[].
- *
- * To maintain compatibility with existing x86 clients and servers, each of
- * these network errnos has the same numerical value as its corresponding host
- * errno on x86.
- */
-#define LUSTRE_EPERM 1 /* Operation not permitted */
-#define LUSTRE_ENOENT 2 /* No such file or directory */
-#define LUSTRE_ESRCH 3 /* No such process */
-#define LUSTRE_EINTR 4 /* Interrupted system call */
-#define LUSTRE_EIO 5 /* I/O error */
-#define LUSTRE_ENXIO 6 /* No such device or address */
-#define LUSTRE_E2BIG 7 /* Argument list too long */
-#define LUSTRE_ENOEXEC 8 /* Exec format error */
-#define LUSTRE_EBADF 9 /* Bad file number */
-#define LUSTRE_ECHILD 10 /* No child processes */
-#define LUSTRE_EAGAIN 11 /* Try again */
-#define LUSTRE_ENOMEM 12 /* Out of memory */
-#define LUSTRE_EACCES 13 /* Permission denied */
-#define LUSTRE_EFAULT 14 /* Bad address */
-#define LUSTRE_ENOTBLK 15 /* Block device required */
-#define LUSTRE_EBUSY 16 /* Device or resource busy */
-#define LUSTRE_EEXIST 17 /* File exists */
-#define LUSTRE_EXDEV 18 /* Cross-device link */
-#define LUSTRE_ENODEV 19 /* No such device */
-#define LUSTRE_ENOTDIR 20 /* Not a directory */
-#define LUSTRE_EISDIR 21 /* Is a directory */
-#define LUSTRE_EINVAL 22 /* Invalid argument */
-#define LUSTRE_ENFILE 23 /* File table overflow */
-#define LUSTRE_EMFILE 24 /* Too many open files */
-#define LUSTRE_ENOTTY 25 /* Not a typewriter */
-#define LUSTRE_ETXTBSY 26 /* Text file busy */
-#define LUSTRE_EFBIG 27 /* File too large */
-#define LUSTRE_ENOSPC 28 /* No space left on device */
-#define LUSTRE_ESPIPE 29 /* Illegal seek */
-#define LUSTRE_EROFS 30 /* Read-only file system */
-#define LUSTRE_EMLINK 31 /* Too many links */
-#define LUSTRE_EPIPE 32 /* Broken pipe */
-#define LUSTRE_EDOM 33 /* Math argument out of domain of
- func */
-#define LUSTRE_ERANGE 34 /* Math result not representable */
-#define LUSTRE_EDEADLK 35 /* Resource deadlock would occur */
-#define LUSTRE_ENAMETOOLONG 36 /* File name too long */
-#define LUSTRE_ENOLCK 37 /* No record locks available */
-#define LUSTRE_ENOSYS 38 /* Function not implemented */
-#define LUSTRE_ENOTEMPTY 39 /* Directory not empty */
-#define LUSTRE_ELOOP 40 /* Too many symbolic links
- encountered */
-#define LUSTRE_ENOMSG 42 /* No message of desired type */
-#define LUSTRE_EIDRM 43 /* Identifier removed */
-#define LUSTRE_ECHRNG 44 /* Channel number out of range */
-#define LUSTRE_EL2NSYNC 45 /* Level 2 not synchronized */
-#define LUSTRE_EL3HLT 46 /* Level 3 halted */
-#define LUSTRE_EL3RST 47 /* Level 3 reset */
-#define LUSTRE_ELNRNG 48 /* Link number out of range */
-#define LUSTRE_EUNATCH 49 /* Protocol driver not attached */
-#define LUSTRE_ENOCSI 50 /* No CSI structure available */
-#define LUSTRE_EL2HLT 51 /* Level 2 halted */
-#define LUSTRE_EBADE 52 /* Invalid exchange */
-#define LUSTRE_EBADR 53 /* Invalid request descriptor */
-#define LUSTRE_EXFULL 54 /* Exchange full */
-#define LUSTRE_ENOANO 55 /* No anode */
-#define LUSTRE_EBADRQC 56 /* Invalid request code */
-#define LUSTRE_EBADSLT 57 /* Invalid slot */
-#define LUSTRE_EBFONT 59 /* Bad font file format */
-#define LUSTRE_ENOSTR 60 /* Device not a stream */
-#define LUSTRE_ENODATA 61 /* No data available */
-#define LUSTRE_ETIME 62 /* Timer expired */
-#define LUSTRE_ENOSR 63 /* Out of streams resources */
-#define LUSTRE_ENONET 64 /* Machine is not on the network */
-#define LUSTRE_ENOPKG 65 /* Package not installed */
-#define LUSTRE_EREMOTE 66 /* Object is remote */
-#define LUSTRE_ENOLINK 67 /* Link has been severed */
-#define LUSTRE_EADV 68 /* Advertise error */
-#define LUSTRE_ESRMNT 69 /* Srmount error */
-#define LUSTRE_ECOMM 70 /* Communication error on send */
-#define LUSTRE_EPROTO 71 /* Protocol error */
-#define LUSTRE_EMULTIHOP 72 /* Multihop attempted */
-#define LUSTRE_EDOTDOT 73 /* RFS specific error */
-#define LUSTRE_EBADMSG 74 /* Not a data message */
-#define LUSTRE_EOVERFLOW 75 /* Value too large for defined data
- type */
-#define LUSTRE_ENOTUNIQ 76 /* Name not unique on network */
-#define LUSTRE_EBADFD 77 /* File descriptor in bad state */
-#define LUSTRE_EREMCHG 78 /* Remote address changed */
-#define LUSTRE_ELIBACC 79 /* Can not access a needed shared
- library */
-#define LUSTRE_ELIBBAD 80 /* Accessing a corrupted shared
- library */
-#define LUSTRE_ELIBSCN 81 /* .lib section in a.out corrupted */
-#define LUSTRE_ELIBMAX 82 /* Attempting to link in too many shared
- libraries */
-#define LUSTRE_ELIBEXEC 83 /* Cannot exec a shared library
- directly */
-#define LUSTRE_EILSEQ 84 /* Illegal byte sequence */
-#define LUSTRE_ERESTART 85 /* Interrupted system call should be
- restarted */
-#define LUSTRE_ESTRPIPE 86 /* Streams pipe error */
-#define LUSTRE_EUSERS 87 /* Too many users */
-#define LUSTRE_ENOTSOCK 88 /* Socket operation on non-socket */
-#define LUSTRE_EDESTADDRREQ 89 /* Destination address required */
-#define LUSTRE_EMSGSIZE 90 /* Message too long */
-#define LUSTRE_EPROTOTYPE 91 /* Protocol wrong type for socket */
-#define LUSTRE_ENOPROTOOPT 92 /* Protocol not available */
-#define LUSTRE_EPROTONOSUPPORT 93 /* Protocol not supported */
-#define LUSTRE_ESOCKTNOSUPPORT 94 /* Socket type not supported */
-#define LUSTRE_EOPNOTSUPP 95 /* Operation not supported on transport
- endpoint */
-#define LUSTRE_EPFNOSUPPORT 96 /* Protocol family not supported */
-#define LUSTRE_EAFNOSUPPORT 97 /* Address family not supported by
- protocol */
-#define LUSTRE_EADDRINUSE 98 /* Address already in use */
-#define LUSTRE_EADDRNOTAVAIL 99 /* Cannot assign requested address */
-#define LUSTRE_ENETDOWN 100 /* Network is down */
-#define LUSTRE_ENETUNREACH 101 /* Network is unreachable */
-#define LUSTRE_ENETRESET 102 /* Network dropped connection because of
- reset */
-#define LUSTRE_ECONNABORTED 103 /* Software caused connection abort */
-#define LUSTRE_ECONNRESET 104 /* Connection reset by peer */
-#define LUSTRE_ENOBUFS 105 /* No buffer space available */
-#define LUSTRE_EISCONN 106 /* Transport endpoint is already
- connected */
-#define LUSTRE_ENOTCONN 107 /* Transport endpoint is not
- connected */
-#define LUSTRE_ESHUTDOWN 108 /* Cannot send after transport endpoint
- shutdown */
-#define LUSTRE_ETOOMANYREFS 109 /* Too many references: cannot splice */
-#define LUSTRE_ETIMEDOUT 110 /* Connection timed out */
-#define LUSTRE_ECONNREFUSED 111 /* Connection refused */
-#define LUSTRE_EHOSTDOWN 112 /* Host is down */
-#define LUSTRE_EHOSTUNREACH 113 /* No route to host */
-#define LUSTRE_EALREADY 114 /* Operation already in progress */
-#define LUSTRE_EINPROGRESS 115 /* Operation now in progress */
-#define LUSTRE_ESTALE 116 /* Stale file handle */
-#define LUSTRE_EUCLEAN 117 /* Structure needs cleaning */
-#define LUSTRE_ENOTNAM 118 /* Not a XENIX named type file */
-#define LUSTRE_ENAVAIL 119 /* No XENIX semaphores available */
-#define LUSTRE_EISNAM 120 /* Is a named type file */
-#define LUSTRE_EREMOTEIO 121 /* Remote I/O error */
-#define LUSTRE_EDQUOT 122 /* Quota exceeded */
-#define LUSTRE_ENOMEDIUM 123 /* No medium found */
-#define LUSTRE_EMEDIUMTYPE 124 /* Wrong medium type */
-#define LUSTRE_ECANCELED 125 /* Operation Canceled */
-#define LUSTRE_ENOKEY 126 /* Required key not available */
-#define LUSTRE_EKEYEXPIRED 127 /* Key has expired */
-#define LUSTRE_EKEYREVOKED 128 /* Key has been revoked */
-#define LUSTRE_EKEYREJECTED 129 /* Key was rejected by service */
-#define LUSTRE_EOWNERDEAD 130 /* Owner died */
-#define LUSTRE_ENOTRECOVERABLE 131 /* State not recoverable */
-#define LUSTRE_ERESTARTSYS 512
-#define LUSTRE_ERESTARTNOINTR 513
-#define LUSTRE_ERESTARTNOHAND 514 /* restart if no handler.. */
-#define LUSTRE_ENOIOCTLCMD 515 /* No ioctl command */
-#define LUSTRE_ERESTART_RESTARTBLOCK 516 /* restart by calling
- sys_restart_syscall */
-#define LUSTRE_EBADHANDLE 521 /* Illegal NFS file handle */
-#define LUSTRE_ENOTSYNC 522 /* Update synchronization mismatch */
-#define LUSTRE_EBADCOOKIE 523 /* Cookie is stale */
-#define LUSTRE_ENOTSUPP 524 /* Operation is not supported */
-#define LUSTRE_ETOOSMALL 525 /* Buffer or request is too small */
-#define LUSTRE_ESERVERFAULT 526 /* An untranslatable error occurred */
-#define LUSTRE_EBADTYPE 527 /* Type not supported by server */
-#define LUSTRE_EJUKEBOX 528 /* Request initiated, but will not
- complete before timeout */
-#define LUSTRE_EIOCBQUEUED 529 /* iocb queued, will get completion
- event */
-#define LUSTRE_EIOCBRETRY 530 /* iocb queued, will trigger a retry */
-
-/*
- * Translations are optimized away on x86. Host errnos that shouldn't be put
- * on wire could leak through as a result. Do not count on this side effect.
- */
-#ifdef CONFIG_LUSTRE_TRANSLATE_ERRNOS
-unsigned int lustre_errno_hton(unsigned int h);
-unsigned int lustre_errno_ntoh(unsigned int n);
-#else
-#define lustre_errno_hton(h) (h)
-#define lustre_errno_ntoh(n) (n)
-#endif
-
-#endif /* LUSTRE_ERRNO_H */
diff --git a/drivers/staging/lustre/lustre/include/lustre/lustre_idl.h b/drivers/staging/lustre/lustre/include/lustre/lustre_idl.h
deleted file mode 100644
index 9cc92bbc21d5..000000000000
--- a/drivers/staging/lustre/lustre/include/lustre/lustre_idl.h
+++ /dev/null
@@ -1,3696 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * lustre/include/lustre/lustre_idl.h
- *
- * Lustre wire protocol definitions.
- */
-
-/** \defgroup lustreidl lustreidl
- *
- * Lustre wire protocol definitions.
- *
- * ALL structs passing over the wire should be declared here. Structs
- * that are used in interfaces with userspace should go in lustre_user.h.
- *
- * All structs being declared here should be built from simple fixed-size
- * types (__u8, __u16, __u32, __u64) or be built from other types or
- * structs also declared in this file. Similarly, all flags and magic
- * values in those structs should also be declared here. This ensures
- * that the Lustre wire protocol is not influenced by external dependencies.
- *
- * The only other acceptable items in this file are VERY SIMPLE accessor
- * functions to avoid callers grubbing inside the structures, and the
- * prototypes of the swabber functions for each struct. Nothing that
- * depends on external functions or definitions should be in here.
- *
- * Structs must be properly aligned to put 64-bit values on an 8-byte
- * boundary. Any structs being added here must also be added to
- * utils/wirecheck.c and "make newwiretest" run to regenerate the
- * utils/wiretest.c sources. This allows us to verify that wire structs
- * have the proper alignment/size on all architectures.
- *
- * DO NOT CHANGE any of the structs, flags, values declared here and used
- * in released Lustre versions. Some structs may have padding fields that
- * can be used. Some structs might allow addition at the end (verify this
- * in the code to ensure that new/old clients that see this larger struct
- * do not fail, otherwise you need to implement protocol compatibility).
- *
- * We assume all nodes are either little-endian or big-endian, and we
- * always send messages in the sender's native format. The receiver
- * detects the message format by checking the 'magic' field of the message
- * (see lustre_msg_swabbed() below).
- *
- * Each wire type has corresponding 'lustre_swab_xxxtypexxx()' routines,
- * implemented either here, inline (trivial implementations) or in
- * ptlrpc/pack_generic.c. These 'swabbers' convert the type from "other"
- * endian, in-place in the message buffer.
- *
- * A swabber takes a single pointer argument. The caller must already have
- * verified that the length of the message buffer >= sizeof (type).
- *
- * For variable length types, a second 'lustre_swab_v_xxxtypexxx()' routine
- * may be defined that swabs just the variable part, after the caller has
- * verified that the message buffer is large enough.
- *
- * @{
- */
-
-#ifndef _LUSTRE_IDL_H_
-#define _LUSTRE_IDL_H_
-
-#include "../../../include/linux/libcfs/libcfs.h"
-#include "../../../include/linux/lnet/types.h"
-
-/* Defn's shared with user-space. */
-#include "lustre_user.h"
-#include "lustre_errno.h"
-
-/*
- * GENERAL STUFF
- */
-/* FOO_REQUEST_PORTAL is for incoming requests on the FOO
- * FOO_REPLY_PORTAL is for incoming replies on the FOO
- * FOO_BULK_PORTAL is for incoming bulk on the FOO
- */
-
-/* Lustre service names are following the format
- * service name + MDT + seq name
- */
-#define LUSTRE_MDT_MAXNAMELEN 80
-
-#define CONNMGR_REQUEST_PORTAL 1
-#define CONNMGR_REPLY_PORTAL 2
-//#define OSC_REQUEST_PORTAL 3
-#define OSC_REPLY_PORTAL 4
-//#define OSC_BULK_PORTAL 5
-#define OST_IO_PORTAL 6
-#define OST_CREATE_PORTAL 7
-#define OST_BULK_PORTAL 8
-//#define MDC_REQUEST_PORTAL 9
-#define MDC_REPLY_PORTAL 10
-//#define MDC_BULK_PORTAL 11
-#define MDS_REQUEST_PORTAL 12
-//#define MDS_REPLY_PORTAL 13
-#define MDS_BULK_PORTAL 14
-#define LDLM_CB_REQUEST_PORTAL 15
-#define LDLM_CB_REPLY_PORTAL 16
-#define LDLM_CANCEL_REQUEST_PORTAL 17
-#define LDLM_CANCEL_REPLY_PORTAL 18
-//#define PTLBD_REQUEST_PORTAL 19
-//#define PTLBD_REPLY_PORTAL 20
-//#define PTLBD_BULK_PORTAL 21
-#define MDS_SETATTR_PORTAL 22
-#define MDS_READPAGE_PORTAL 23
-#define OUT_PORTAL 24
-
-#define MGC_REPLY_PORTAL 25
-#define MGS_REQUEST_PORTAL 26
-#define MGS_REPLY_PORTAL 27
-#define OST_REQUEST_PORTAL 28
-#define FLD_REQUEST_PORTAL 29
-#define SEQ_METADATA_PORTAL 30
-#define SEQ_DATA_PORTAL 31
-#define SEQ_CONTROLLER_PORTAL 32
-#define MGS_BULK_PORTAL 33
-
-/* Portal 63 is reserved for the Cray Inc DVS - nic@cray.com, roe@cray.com, n8851@cray.com */
-
-/* packet types */
-#define PTL_RPC_MSG_REQUEST 4711
-#define PTL_RPC_MSG_ERR 4712
-#define PTL_RPC_MSG_REPLY 4713
-
-/* DON'T use swabbed values of MAGIC as magic! */
-#define LUSTRE_MSG_MAGIC_V2 0x0BD00BD3
-#define LUSTRE_MSG_MAGIC_V2_SWABBED 0xD30BD00B
-
-#define LUSTRE_MSG_MAGIC LUSTRE_MSG_MAGIC_V2
-
-#define PTLRPC_MSG_VERSION 0x00000003
-#define LUSTRE_VERSION_MASK 0xffff0000
-#define LUSTRE_OBD_VERSION 0x00010000
-#define LUSTRE_MDS_VERSION 0x00020000
-#define LUSTRE_OST_VERSION 0x00030000
-#define LUSTRE_DLM_VERSION 0x00040000
-#define LUSTRE_LOG_VERSION 0x00050000
-#define LUSTRE_MGS_VERSION 0x00060000
-
-/**
- * Describes a range of sequence, lsr_start is included but lsr_end is
- * not in the range.
- * Same structure is used in fld module where lsr_index field holds mdt id
- * of the home mdt.
- */
-struct lu_seq_range {
- __u64 lsr_start;
- __u64 lsr_end;
- __u32 lsr_index;
- __u32 lsr_flags;
-};
-
-#define LU_SEQ_RANGE_MDT 0x0
-#define LU_SEQ_RANGE_OST 0x1
-#define LU_SEQ_RANGE_ANY 0x3
-
-#define LU_SEQ_RANGE_MASK 0x3
-
-static inline unsigned fld_range_type(const struct lu_seq_range *range)
-{
- return range->lsr_flags & LU_SEQ_RANGE_MASK;
-}
-
-static inline int fld_range_is_ost(const struct lu_seq_range *range)
-{
- return fld_range_type(range) == LU_SEQ_RANGE_OST;
-}
-
-static inline int fld_range_is_mdt(const struct lu_seq_range *range)
-{
- return fld_range_type(range) == LU_SEQ_RANGE_MDT;
-}
-
-/**
- * This all range is only being used when fld client sends fld query request,
- * but it does not know whether the seq is MDT or OST, so it will send req
- * with ALL type, which means either seq type gotten from lookup can be
- * expected.
- */
-static inline unsigned fld_range_is_any(const struct lu_seq_range *range)
-{
- return fld_range_type(range) == LU_SEQ_RANGE_ANY;
-}
-
-static inline void fld_range_set_type(struct lu_seq_range *range,
- unsigned flags)
-{
- range->lsr_flags |= flags;
-}
-
-static inline void fld_range_set_mdt(struct lu_seq_range *range)
-{
- fld_range_set_type(range, LU_SEQ_RANGE_MDT);
-}
-
-static inline void fld_range_set_ost(struct lu_seq_range *range)
-{
- fld_range_set_type(range, LU_SEQ_RANGE_OST);
-}
-
-static inline void fld_range_set_any(struct lu_seq_range *range)
-{
- fld_range_set_type(range, LU_SEQ_RANGE_ANY);
-}
-
-/**
- * returns width of given range \a r
- */
-
-static inline __u64 range_space(const struct lu_seq_range *range)
-{
- return range->lsr_end - range->lsr_start;
-}
-
-/**
- * initialize range to zero
- */
-
-static inline void range_init(struct lu_seq_range *range)
-{
- memset(range, 0, sizeof(*range));
-}
-
-/**
- * check if given seq id \a s is within given range \a r
- */
-
-static inline int range_within(const struct lu_seq_range *range,
- __u64 s)
-{
- return s >= range->lsr_start && s < range->lsr_end;
-}
-
-static inline int range_is_sane(const struct lu_seq_range *range)
-{
- return (range->lsr_end >= range->lsr_start);
-}
-
-static inline int range_is_zero(const struct lu_seq_range *range)
-{
- return (range->lsr_start == 0 && range->lsr_end == 0);
-}
-
-static inline int range_is_exhausted(const struct lu_seq_range *range)
-
-{
- return range_space(range) == 0;
-}
-
-/* return 0 if two range have the same location */
-static inline int range_compare_loc(const struct lu_seq_range *r1,
- const struct lu_seq_range *r2)
-{
- return r1->lsr_index != r2->lsr_index ||
- r1->lsr_flags != r2->lsr_flags;
-}
-
-#define DRANGE "[%#16.16Lx-%#16.16Lx):%x:%s"
-
-#define PRANGE(range) \
- (range)->lsr_start, \
- (range)->lsr_end, \
- (range)->lsr_index, \
- fld_range_is_mdt(range) ? "mdt" : "ost"
-
-
-/** \defgroup lu_fid lu_fid
- * @{ */
-
-/**
- * Flags for lustre_mdt_attrs::lma_compat and lustre_mdt_attrs::lma_incompat.
- * Deprecated since HSM and SOM attributes are now stored in separate on-disk
- * xattr.
- */
-enum lma_compat {
- LMAC_HSM = 0x00000001,
- LMAC_SOM = 0x00000002,
- LMAC_NOT_IN_OI = 0x00000004, /* the object does NOT need OI mapping */
- LMAC_FID_ON_OST = 0x00000008, /* For OST-object, its OI mapping is
- * under /O/<seq>/d<x>. */
-};
-
-/**
- * Masks for all features that should be supported by a Lustre version to
- * access a specific file.
- * This information is stored in lustre_mdt_attrs::lma_incompat.
- */
-enum lma_incompat {
- LMAI_RELEASED = 0x00000001, /* file is released */
- LMAI_AGENT = 0x00000002, /* agent inode */
- LMAI_REMOTE_PARENT = 0x00000004, /* the parent of the object
- is on the remote MDT */
-};
-#define LMA_INCOMPAT_SUPP (LMAI_AGENT | LMAI_REMOTE_PARENT)
-
-/**
- * fid constants
- */
-enum {
- /** LASTID file has zero OID */
- LUSTRE_FID_LASTID_OID = 0UL,
- /** initial fid id value */
- LUSTRE_FID_INIT_OID = 1UL
-};
-
-/** returns fid object sequence */
-static inline __u64 fid_seq(const struct lu_fid *fid)
-{
- return fid->f_seq;
-}
-
-/** returns fid object id */
-static inline __u32 fid_oid(const struct lu_fid *fid)
-{
- return fid->f_oid;
-}
-
-/** returns fid object version */
-static inline __u32 fid_ver(const struct lu_fid *fid)
-{
- return fid->f_ver;
-}
-
-static inline void fid_zero(struct lu_fid *fid)
-{
- memset(fid, 0, sizeof(*fid));
-}
-
-static inline __u64 fid_ver_oid(const struct lu_fid *fid)
-{
- return ((__u64)fid_ver(fid) << 32 | fid_oid(fid));
-}
-
-/* copytool uses a 32b bitmask field to encode archive-Ids during register
- * with MDT thru kuc.
- * archive num = 0 => all
- * archive num from 1 to 32
- */
-#define LL_HSM_MAX_ARCHIVE (sizeof(__u32) * 8)
-
-/**
- * Note that reserved SEQ numbers below 12 will conflict with ldiskfs
- * inodes in the IGIF namespace, so these reserved SEQ numbers can be
- * used for other purposes and not risk collisions with existing inodes.
- *
- * Different FID Format
- * http://arch.lustre.org/index.php?title=Interoperability_fids_zfs#NEW.0
- */
-enum fid_seq {
- FID_SEQ_OST_MDT0 = 0,
- FID_SEQ_LLOG = 1, /* unnamed llogs */
- FID_SEQ_ECHO = 2,
- FID_SEQ_OST_MDT1 = 3,
- FID_SEQ_OST_MAX = 9, /* Max MDT count before OST_on_FID */
- FID_SEQ_LLOG_NAME = 10, /* named llogs */
- FID_SEQ_RSVD = 11,
- FID_SEQ_IGIF = 12,
- FID_SEQ_IGIF_MAX = 0x0ffffffffULL,
- FID_SEQ_IDIF = 0x100000000ULL,
- FID_SEQ_IDIF_MAX = 0x1ffffffffULL,
- /* Normal FID sequence starts from this value, i.e. 1<<33 */
- FID_SEQ_START = 0x200000000ULL,
- /* sequence for local pre-defined FIDs listed in local_oid */
- FID_SEQ_LOCAL_FILE = 0x200000001ULL,
- FID_SEQ_DOT_LUSTRE = 0x200000002ULL,
- /* sequence is used for local named objects FIDs generated
- * by local_object_storage library */
- FID_SEQ_LOCAL_NAME = 0x200000003ULL,
- /* Because current FLD will only cache the fid sequence, instead
- * of oid on the client side, if the FID needs to be exposed to
- * clients sides, it needs to make sure all of fids under one
- * sequence will be located in one MDT. */
- FID_SEQ_SPECIAL = 0x200000004ULL,
- FID_SEQ_QUOTA = 0x200000005ULL,
- FID_SEQ_QUOTA_GLB = 0x200000006ULL,
- FID_SEQ_ROOT = 0x200000007ULL, /* Located on MDT0 */
- FID_SEQ_NORMAL = 0x200000400ULL,
- FID_SEQ_LOV_DEFAULT = 0xffffffffffffffffULL
-};
-
-#define OBIF_OID_MAX_BITS 32
-#define OBIF_MAX_OID (1ULL << OBIF_OID_MAX_BITS)
-#define OBIF_OID_MASK ((1ULL << OBIF_OID_MAX_BITS) - 1)
-#define IDIF_OID_MAX_BITS 48
-#define IDIF_MAX_OID (1ULL << IDIF_OID_MAX_BITS)
-#define IDIF_OID_MASK ((1ULL << IDIF_OID_MAX_BITS) - 1)
-
-/** OID for FID_SEQ_SPECIAL */
-enum special_oid {
- /* Big Filesystem Lock to serialize rename operations */
- FID_OID_SPECIAL_BFL = 1UL,
-};
-
-/** OID for FID_SEQ_DOT_LUSTRE */
-enum dot_lustre_oid {
- FID_OID_DOT_LUSTRE = 1UL,
- FID_OID_DOT_LUSTRE_OBF = 2UL,
-};
-
-static inline int fid_seq_is_mdt0(__u64 seq)
-{
- return (seq == FID_SEQ_OST_MDT0);
-}
-
-static inline int fid_seq_is_mdt(const __u64 seq)
-{
- return seq == FID_SEQ_OST_MDT0 || seq >= FID_SEQ_NORMAL;
-};
-
-static inline int fid_seq_is_echo(__u64 seq)
-{
- return (seq == FID_SEQ_ECHO);
-}
-
-static inline int fid_is_echo(const struct lu_fid *fid)
-{
- return fid_seq_is_echo(fid_seq(fid));
-}
-
-static inline int fid_seq_is_llog(__u64 seq)
-{
- return (seq == FID_SEQ_LLOG);
-}
-
-static inline int fid_is_llog(const struct lu_fid *fid)
-{
- /* file with OID == 0 is not llog but contains last oid */
- return fid_seq_is_llog(fid_seq(fid)) && fid_oid(fid) > 0;
-}
-
-static inline int fid_seq_is_rsvd(const __u64 seq)
-{
- return (seq > FID_SEQ_OST_MDT0 && seq <= FID_SEQ_RSVD);
-};
-
-static inline int fid_seq_is_special(const __u64 seq)
-{
- return seq == FID_SEQ_SPECIAL;
-};
-
-static inline int fid_seq_is_local_file(const __u64 seq)
-{
- return seq == FID_SEQ_LOCAL_FILE ||
- seq == FID_SEQ_LOCAL_NAME;
-};
-
-static inline int fid_seq_is_root(const __u64 seq)
-{
- return seq == FID_SEQ_ROOT;
-}
-
-static inline int fid_seq_is_dot(const __u64 seq)
-{
- return seq == FID_SEQ_DOT_LUSTRE;
-}
-
-static inline int fid_seq_is_default(const __u64 seq)
-{
- return seq == FID_SEQ_LOV_DEFAULT;
-}
-
-static inline int fid_is_mdt0(const struct lu_fid *fid)
-{
- return fid_seq_is_mdt0(fid_seq(fid));
-}
-
-static inline void lu_root_fid(struct lu_fid *fid)
-{
- fid->f_seq = FID_SEQ_ROOT;
- fid->f_oid = 1;
- fid->f_ver = 0;
-}
-
-/**
- * Check if a fid is igif or not.
- * \param fid the fid to be tested.
- * \return true if the fid is a igif; otherwise false.
- */
-static inline int fid_seq_is_igif(const __u64 seq)
-{
- return seq >= FID_SEQ_IGIF && seq <= FID_SEQ_IGIF_MAX;
-}
-
-static inline int fid_is_igif(const struct lu_fid *fid)
-{
- return fid_seq_is_igif(fid_seq(fid));
-}
-
-/**
- * Check if a fid is idif or not.
- * \param fid the fid to be tested.
- * \return true if the fid is a idif; otherwise false.
- */
-static inline int fid_seq_is_idif(const __u64 seq)
-{
- return seq >= FID_SEQ_IDIF && seq <= FID_SEQ_IDIF_MAX;
-}
-
-static inline int fid_is_idif(const struct lu_fid *fid)
-{
- return fid_seq_is_idif(fid_seq(fid));
-}
-
-static inline int fid_is_local_file(const struct lu_fid *fid)
-{
- return fid_seq_is_local_file(fid_seq(fid));
-}
-
-static inline int fid_seq_is_norm(const __u64 seq)
-{
- return (seq >= FID_SEQ_NORMAL);
-}
-
-static inline int fid_is_norm(const struct lu_fid *fid)
-{
- return fid_seq_is_norm(fid_seq(fid));
-}
-
-/* convert an OST objid into an IDIF FID SEQ number */
-static inline __u64 fid_idif_seq(__u64 id, __u32 ost_idx)
-{
- return FID_SEQ_IDIF | (ost_idx << 16) | ((id >> 32) & 0xffff);
-}
-
-/* convert a packed IDIF FID into an OST objid */
-static inline __u64 fid_idif_id(__u64 seq, __u32 oid, __u32 ver)
-{
- return ((__u64)ver << 48) | ((seq & 0xffff) << 32) | oid;
-}
-
-/* extract ost index from IDIF FID */
-static inline __u32 fid_idif_ost_idx(const struct lu_fid *fid)
-{
- return (fid_seq(fid) >> 16) & 0xffff;
-}
-
-/* extract OST sequence (group) from a wire ost_id (id/seq) pair */
-static inline __u64 ostid_seq(const struct ost_id *ostid)
-{
- if (fid_seq_is_mdt0(ostid->oi.oi_seq))
- return FID_SEQ_OST_MDT0;
-
- if (fid_seq_is_default(ostid->oi.oi_seq))
- return FID_SEQ_LOV_DEFAULT;
-
- if (fid_is_idif(&ostid->oi_fid))
- return FID_SEQ_OST_MDT0;
-
- return fid_seq(&ostid->oi_fid);
-}
-
-/* extract OST objid from a wire ost_id (id/seq) pair */
-static inline __u64 ostid_id(const struct ost_id *ostid)
-{
- if (fid_seq_is_mdt0(ostid_seq(ostid)))
- return ostid->oi.oi_id & IDIF_OID_MASK;
-
- if (fid_is_idif(&ostid->oi_fid))
- return fid_idif_id(fid_seq(&ostid->oi_fid),
- fid_oid(&ostid->oi_fid), 0);
-
- return fid_oid(&ostid->oi_fid);
-}
-
-static inline void ostid_set_seq(struct ost_id *oi, __u64 seq)
-{
- if (fid_seq_is_mdt0(seq) || fid_seq_is_default(seq)) {
- oi->oi.oi_seq = seq;
- } else {
- oi->oi_fid.f_seq = seq;
- /* Note: if f_oid + f_ver is zero, we need init it
- * to be 1, otherwise, ostid_seq will treat this
- * as old ostid (oi_seq == 0) */
- if (oi->oi_fid.f_oid == 0 && oi->oi_fid.f_ver == 0)
- oi->oi_fid.f_oid = LUSTRE_FID_INIT_OID;
- }
-}
-
-static inline void ostid_set_seq_mdt0(struct ost_id *oi)
-{
- ostid_set_seq(oi, FID_SEQ_OST_MDT0);
-}
-
-static inline void ostid_set_seq_echo(struct ost_id *oi)
-{
- ostid_set_seq(oi, FID_SEQ_ECHO);
-}
-
-static inline void ostid_set_seq_llog(struct ost_id *oi)
-{
- ostid_set_seq(oi, FID_SEQ_LLOG);
-}
-
-/**
- * Note: we need check oi_seq to decide where to set oi_id,
- * so oi_seq should always be set ahead of oi_id.
- */
-static inline void ostid_set_id(struct ost_id *oi, __u64 oid)
-{
- if (fid_seq_is_mdt0(ostid_seq(oi))) {
- if (oid >= IDIF_MAX_OID) {
- CERROR("Bad %llu to set "DOSTID"\n",
- oid, POSTID(oi));
- return;
- }
- oi->oi.oi_id = oid;
- } else {
- if (oid > OBIF_MAX_OID) {
- CERROR("Bad %llu to set "DOSTID"\n",
- oid, POSTID(oi));
- return;
- }
- oi->oi_fid.f_oid = oid;
- }
-}
-
-static inline void ostid_inc_id(struct ost_id *oi)
-{
- if (fid_seq_is_mdt0(ostid_seq(oi))) {
- if (unlikely(ostid_id(oi) + 1 > IDIF_MAX_OID)) {
- CERROR("Bad inc "DOSTID"\n", POSTID(oi));
- return;
- }
- oi->oi.oi_id++;
- } else {
- oi->oi_fid.f_oid++;
- }
-}
-
-static inline void ostid_dec_id(struct ost_id *oi)
-{
- if (fid_seq_is_mdt0(ostid_seq(oi)))
- oi->oi.oi_id--;
- else
- oi->oi_fid.f_oid--;
-}
-
-/**
- * Unpack an OST object id/seq (group) into a FID. This is needed for
- * converting all obdo, lmm, lsm, etc. 64-bit id/seq pairs into proper
- * FIDs. Note that if an id/seq is already in FID/IDIF format it will
- * be passed through unchanged. Only legacy OST objects in "group 0"
- * will be mapped into the IDIF namespace so that they can fit into the
- * struct lu_fid fields without loss. For reference see:
- * http://arch.lustre.org/index.php?title=Interoperability_fids_zfs
- */
-static inline int ostid_to_fid(struct lu_fid *fid, struct ost_id *ostid,
- __u32 ost_idx)
-{
- if (ost_idx > 0xffff) {
- CERROR("bad ost_idx, "DOSTID" ost_idx:%u\n", POSTID(ostid),
- ost_idx);
- return -EBADF;
- }
-
- if (fid_seq_is_mdt0(ostid_seq(ostid))) {
- /* This is a "legacy" (old 1.x/2.early) OST object in "group 0"
- * that we map into the IDIF namespace. It allows up to 2^48
- * objects per OST, as this is the object namespace that has
- * been in production for years. This can handle create rates
- * of 1M objects/s/OST for 9 years, or combinations thereof. */
- if (ostid_id(ostid) >= IDIF_MAX_OID) {
- CERROR("bad MDT0 id, "DOSTID" ost_idx:%u\n",
- POSTID(ostid), ost_idx);
- return -EBADF;
- }
- fid->f_seq = fid_idif_seq(ostid_id(ostid), ost_idx);
- /* truncate to 32 bits by assignment */
- fid->f_oid = ostid_id(ostid);
- /* in theory, not currently used */
- fid->f_ver = ostid_id(ostid) >> 48;
- } else /* if (fid_seq_is_idif(seq) || fid_seq_is_norm(seq)) */ {
- /* This is either an IDIF object, which identifies objects across
- * all OSTs, or a regular FID. The IDIF namespace maps legacy
- * OST objects into the FID namespace. In both cases, we just
- * pass the FID through, no conversion needed. */
- if (ostid->oi_fid.f_ver != 0) {
- CERROR("bad MDT0 id, "DOSTID" ost_idx:%u\n",
- POSTID(ostid), ost_idx);
- return -EBADF;
- }
- *fid = ostid->oi_fid;
- }
-
- return 0;
-}
-
-/* pack any OST FID into an ostid (id/seq) for the wire/disk */
-static inline int fid_to_ostid(const struct lu_fid *fid, struct ost_id *ostid)
-{
- if (unlikely(fid_seq_is_igif(fid->f_seq))) {
- CERROR("bad IGIF, "DFID"\n", PFID(fid));
- return -EBADF;
- }
-
- if (fid_is_idif(fid)) {
- ostid_set_seq_mdt0(ostid);
- ostid_set_id(ostid, fid_idif_id(fid_seq(fid), fid_oid(fid),
- fid_ver(fid)));
- } else {
- ostid->oi_fid = *fid;
- }
-
- return 0;
-}
-
-/* Check whether the fid is for LAST_ID */
-static inline int fid_is_last_id(const struct lu_fid *fid)
-{
- return (fid_oid(fid) == 0);
-}
-
-/**
- * Get inode number from a igif.
- * \param fid a igif to get inode number from.
- * \return inode number for the igif.
- */
-static inline ino_t lu_igif_ino(const struct lu_fid *fid)
-{
- return fid_seq(fid);
-}
-
-void lustre_swab_ost_id(struct ost_id *oid);
-
-/**
- * Get inode generation from a igif.
- * \param fid a igif to get inode generation from.
- * \return inode generation for the igif.
- */
-static inline __u32 lu_igif_gen(const struct lu_fid *fid)
-{
- return fid_oid(fid);
-}
-
-/**
- * Build igif from the inode number/generation.
- */
-static inline void lu_igif_build(struct lu_fid *fid, __u32 ino, __u32 gen)
-{
- fid->f_seq = ino;
- fid->f_oid = gen;
- fid->f_ver = 0;
-}
-
-/*
- * Fids are transmitted across network (in the sender byte-ordering),
- * and stored on disk in big-endian order.
- */
-static inline void fid_cpu_to_le(struct lu_fid *dst, const struct lu_fid *src)
-{
- dst->f_seq = cpu_to_le64(fid_seq(src));
- dst->f_oid = cpu_to_le32(fid_oid(src));
- dst->f_ver = cpu_to_le32(fid_ver(src));
-}
-
-static inline void fid_le_to_cpu(struct lu_fid *dst, const struct lu_fid *src)
-{
- dst->f_seq = le64_to_cpu(fid_seq(src));
- dst->f_oid = le32_to_cpu(fid_oid(src));
- dst->f_ver = le32_to_cpu(fid_ver(src));
-}
-
-static inline void fid_cpu_to_be(struct lu_fid *dst, const struct lu_fid *src)
-{
- dst->f_seq = cpu_to_be64(fid_seq(src));
- dst->f_oid = cpu_to_be32(fid_oid(src));
- dst->f_ver = cpu_to_be32(fid_ver(src));
-}
-
-static inline void fid_be_to_cpu(struct lu_fid *dst, const struct lu_fid *src)
-{
- dst->f_seq = be64_to_cpu(fid_seq(src));
- dst->f_oid = be32_to_cpu(fid_oid(src));
- dst->f_ver = be32_to_cpu(fid_ver(src));
-}
-
-static inline int fid_is_sane(const struct lu_fid *fid)
-{
- return fid != NULL &&
- ((fid_seq(fid) >= FID_SEQ_START && fid_ver(fid) == 0) ||
- fid_is_igif(fid) || fid_is_idif(fid) ||
- fid_seq_is_rsvd(fid_seq(fid)));
-}
-
-static inline int fid_is_zero(const struct lu_fid *fid)
-{
- return fid_seq(fid) == 0 && fid_oid(fid) == 0;
-}
-
-void lustre_swab_lu_fid(struct lu_fid *fid);
-void lustre_swab_lu_seq_range(struct lu_seq_range *range);
-
-static inline int lu_fid_eq(const struct lu_fid *f0, const struct lu_fid *f1)
-{
- return memcmp(f0, f1, sizeof(*f0)) == 0;
-}
-
-#define __diff_normalize(val0, val1) \
-({ \
- typeof(val0) __val0 = (val0); \
- typeof(val1) __val1 = (val1); \
- \
- (__val0 == __val1 ? 0 : __val0 > __val1 ? 1 : -1); \
-})
-
-static inline int lu_fid_cmp(const struct lu_fid *f0,
- const struct lu_fid *f1)
-{
- return
- __diff_normalize(fid_seq(f0), fid_seq(f1)) ?:
- __diff_normalize(fid_oid(f0), fid_oid(f1)) ?:
- __diff_normalize(fid_ver(f0), fid_ver(f1));
-}
-
-static inline void ostid_cpu_to_le(const struct ost_id *src_oi,
- struct ost_id *dst_oi)
-{
- if (fid_seq_is_mdt0(ostid_seq(src_oi))) {
- dst_oi->oi.oi_id = cpu_to_le64(src_oi->oi.oi_id);
- dst_oi->oi.oi_seq = cpu_to_le64(src_oi->oi.oi_seq);
- } else {
- fid_cpu_to_le(&dst_oi->oi_fid, &src_oi->oi_fid);
- }
-}
-
-static inline void ostid_le_to_cpu(const struct ost_id *src_oi,
- struct ost_id *dst_oi)
-{
- if (fid_seq_is_mdt0(ostid_seq(src_oi))) {
- dst_oi->oi.oi_id = le64_to_cpu(src_oi->oi.oi_id);
- dst_oi->oi.oi_seq = le64_to_cpu(src_oi->oi.oi_seq);
- } else {
- fid_le_to_cpu(&dst_oi->oi_fid, &src_oi->oi_fid);
- }
-}
-
-/** @} lu_fid */
-
-/** \defgroup lu_dir lu_dir
- * @{ */
-
-/**
- * Enumeration of possible directory entry attributes.
- *
- * Attributes follow directory entry header in the order they appear in this
- * enumeration.
- */
-enum lu_dirent_attrs {
- LUDA_FID = 0x0001,
- LUDA_TYPE = 0x0002,
- LUDA_64BITHASH = 0x0004,
-
- /* The following attrs are used for MDT internal only,
- * not visible to client */
-
- /* Verify the dirent consistency */
- LUDA_VERIFY = 0x8000,
- /* Only check but not repair the dirent inconsistency */
- LUDA_VERIFY_DRYRUN = 0x4000,
- /* The dirent has been repaired, or to be repaired (dryrun). */
- LUDA_REPAIR = 0x2000,
- /* The system is upgraded, has beed or to be repaired (dryrun). */
- LUDA_UPGRADE = 0x1000,
- /* Ignore this record, go to next directly. */
- LUDA_IGNORE = 0x0800,
-};
-
-#define LU_DIRENT_ATTRS_MASK 0xf800
-
-/**
- * Layout of readdir pages, as transmitted on wire.
- */
-struct lu_dirent {
- /** valid if LUDA_FID is set. */
- struct lu_fid lde_fid;
- /** a unique entry identifier: a hash or an offset. */
- __u64 lde_hash;
- /** total record length, including all attributes. */
- __u16 lde_reclen;
- /** name length */
- __u16 lde_namelen;
- /** optional variable size attributes following this entry.
- * taken from enum lu_dirent_attrs.
- */
- __u32 lde_attrs;
- /** name is followed by the attributes indicated in ->ldp_attrs, in
- * their natural order. After the last attribute, padding bytes are
- * added to make ->lde_reclen a multiple of 8.
- */
- char lde_name[0];
-};
-
-/*
- * Definitions of optional directory entry attributes formats.
- *
- * Individual attributes do not have their length encoded in a generic way. It
- * is assumed that consumer of an attribute knows its format. This means that
- * it is impossible to skip over an unknown attribute, except by skipping over all
- * remaining attributes (by using ->lde_reclen), which is not too
- * constraining, because new server versions will append new attributes at
- * the end of an entry.
- */
-
-/**
- * Fid directory attribute: a fid of an object referenced by the entry. This
- * will be almost always requested by the client and supplied by the server.
- *
- * Aligned to 8 bytes.
- */
-/* To have compatibility with 1.8, lets have fid in lu_dirent struct. */
-
-/**
- * File type.
- *
- * Aligned to 2 bytes.
- */
-struct luda_type {
- __u16 lt_type;
-};
-
-#ifndef IFSHIFT
-#define IFSHIFT 12
-#endif
-
-#ifndef IFTODT
-#define IFTODT(type) (((type) & S_IFMT) >> IFSHIFT)
-#endif
-#ifndef DTTOIF
-#define DTTOIF(dirtype) ((dirtype) << IFSHIFT)
-#endif
-
-
-struct lu_dirpage {
- __u64 ldp_hash_start;
- __u64 ldp_hash_end;
- __u32 ldp_flags;
- __u32 ldp_pad0;
- struct lu_dirent ldp_entries[0];
-};
-
-enum lu_dirpage_flags {
- /**
- * dirpage contains no entry.
- */
- LDF_EMPTY = 1 << 0,
- /**
- * last entry's lde_hash equals ldp_hash_end.
- */
- LDF_COLLIDE = 1 << 1
-};
-
-static inline struct lu_dirent *lu_dirent_start(struct lu_dirpage *dp)
-{
- if (le32_to_cpu(dp->ldp_flags) & LDF_EMPTY)
- return NULL;
- else
- return dp->ldp_entries;
-}
-
-static inline struct lu_dirent *lu_dirent_next(struct lu_dirent *ent)
-{
- struct lu_dirent *next;
-
- if (le16_to_cpu(ent->lde_reclen) != 0)
- next = ((void *)ent) + le16_to_cpu(ent->lde_reclen);
- else
- next = NULL;
-
- return next;
-}
-
-static inline int lu_dirent_calc_size(int namelen, __u16 attr)
-{
- int size;
-
- if (attr & LUDA_TYPE) {
- const unsigned align = sizeof(struct luda_type) - 1;
- size = (sizeof(struct lu_dirent) + namelen + align) & ~align;
- size += sizeof(struct luda_type);
- } else
- size = sizeof(struct lu_dirent) + namelen;
-
- return (size + 7) & ~7;
-}
-
-static inline int lu_dirent_size(struct lu_dirent *ent)
-{
- if (le16_to_cpu(ent->lde_reclen) == 0) {
- return lu_dirent_calc_size(le16_to_cpu(ent->lde_namelen),
- le32_to_cpu(ent->lde_attrs));
- }
- return le16_to_cpu(ent->lde_reclen);
-}
-
-#define MDS_DIR_END_OFF 0xfffffffffffffffeULL
-
-/**
- * MDS_READPAGE page size
- *
- * This is the directory page size packed in MDS_READPAGE RPC.
- * It's different than PAGE_CACHE_SIZE because the client needs to
- * access the struct lu_dirpage header packed at the beginning of
- * the "page" and without this there isn't any way to know find the
- * lu_dirpage header is if client and server PAGE_CACHE_SIZE differ.
- */
-#define LU_PAGE_SHIFT 12
-#define LU_PAGE_SIZE (1UL << LU_PAGE_SHIFT)
-#define LU_PAGE_MASK (~(LU_PAGE_SIZE - 1))
-
-#define LU_PAGE_COUNT (1 << (PAGE_CACHE_SHIFT - LU_PAGE_SHIFT))
-
-/** @} lu_dir */
-
-struct lustre_handle {
- __u64 cookie;
-};
-#define DEAD_HANDLE_MAGIC 0xdeadbeefcafebabeULL
-
-static inline int lustre_handle_is_used(struct lustre_handle *lh)
-{
- return lh->cookie != 0ull;
-}
-
-static inline int lustre_handle_equal(const struct lustre_handle *lh1,
- const struct lustre_handle *lh2)
-{
- return lh1->cookie == lh2->cookie;
-}
-
-static inline void lustre_handle_copy(struct lustre_handle *tgt,
- struct lustre_handle *src)
-{
- tgt->cookie = src->cookie;
-}
-
-/* flags for lm_flags */
-#define MSGHDR_AT_SUPPORT 0x1
-#define MSGHDR_CKSUM_INCOMPAT18 0x2
-
-#define lustre_msg lustre_msg_v2
-/* we depend on this structure to be 8-byte aligned */
-/* this type is only endian-adjusted in lustre_unpack_msg() */
-struct lustre_msg_v2 {
- __u32 lm_bufcount;
- __u32 lm_secflvr;
- __u32 lm_magic;
- __u32 lm_repsize;
- __u32 lm_cksum;
- __u32 lm_flags;
- __u32 lm_padding_2;
- __u32 lm_padding_3;
- __u32 lm_buflens[0];
-};
-
-/* without gss, ptlrpc_body is put at the first buffer. */
-#define PTLRPC_NUM_VERSIONS 4
-#define JOBSTATS_JOBID_SIZE 32 /* 32 bytes string */
-struct ptlrpc_body_v3 {
- struct lustre_handle pb_handle;
- __u32 pb_type;
- __u32 pb_version;
- __u32 pb_opc;
- __u32 pb_status;
- __u64 pb_last_xid;
- __u64 pb_last_seen;
- __u64 pb_last_committed;
- __u64 pb_transno;
- __u32 pb_flags;
- __u32 pb_op_flags;
- __u32 pb_conn_cnt;
- __u32 pb_timeout; /* for req, the deadline, for rep, the service est */
- __u32 pb_service_time; /* for rep, actual service time */
- __u32 pb_limit;
- __u64 pb_slv;
- /* VBR: pre-versions */
- __u64 pb_pre_versions[PTLRPC_NUM_VERSIONS];
- /* padding for future needs */
- __u64 pb_padding[4];
- char pb_jobid[JOBSTATS_JOBID_SIZE];
-};
-#define ptlrpc_body ptlrpc_body_v3
-
-struct ptlrpc_body_v2 {
- struct lustre_handle pb_handle;
- __u32 pb_type;
- __u32 pb_version;
- __u32 pb_opc;
- __u32 pb_status;
- __u64 pb_last_xid;
- __u64 pb_last_seen;
- __u64 pb_last_committed;
- __u64 pb_transno;
- __u32 pb_flags;
- __u32 pb_op_flags;
- __u32 pb_conn_cnt;
- __u32 pb_timeout; /* for req, the deadline, for rep, the service est */
- __u32 pb_service_time; /* for rep, actual service time, also used for
- net_latency of req */
- __u32 pb_limit;
- __u64 pb_slv;
- /* VBR: pre-versions */
- __u64 pb_pre_versions[PTLRPC_NUM_VERSIONS];
- /* padding for future needs */
- __u64 pb_padding[4];
-};
-
-void lustre_swab_ptlrpc_body(struct ptlrpc_body *pb);
-
-/* message body offset for lustre_msg_v2 */
-/* ptlrpc body offset in all request/reply messages */
-#define MSG_PTLRPC_BODY_OFF 0
-
-/* normal request/reply message record offset */
-#define REQ_REC_OFF 1
-#define REPLY_REC_OFF 1
-
-/* ldlm request message body offset */
-#define DLM_LOCKREQ_OFF 1 /* lockreq offset */
-#define DLM_REQ_REC_OFF 2 /* normal dlm request record offset */
-
-/* ldlm intent lock message body offset */
-#define DLM_INTENT_IT_OFF 2 /* intent lock it offset */
-#define DLM_INTENT_REC_OFF 3 /* intent lock record offset */
-
-/* ldlm reply message body offset */
-#define DLM_LOCKREPLY_OFF 1 /* lockrep offset */
-#define DLM_REPLY_REC_OFF 2 /* reply record offset */
-
-/** only use in req->rq_{req,rep}_swab_mask */
-#define MSG_PTLRPC_HEADER_OFF 31
-
-/* Flags that are operation-specific go in the top 16 bits. */
-#define MSG_OP_FLAG_MASK 0xffff0000
-#define MSG_OP_FLAG_SHIFT 16
-
-/* Flags that apply to all requests are in the bottom 16 bits */
-#define MSG_GEN_FLAG_MASK 0x0000ffff
-#define MSG_LAST_REPLAY 0x0001
-#define MSG_RESENT 0x0002
-#define MSG_REPLAY 0x0004
-/* #define MSG_AT_SUPPORT 0x0008
- * This was used in early prototypes of adaptive timeouts, and while there
- * shouldn't be any users of that code there also isn't a need for using this
- * bits. Defer usage until at least 1.10 to avoid potential conflict. */
-#define MSG_DELAY_REPLAY 0x0010
-#define MSG_VERSION_REPLAY 0x0020
-#define MSG_REQ_REPLAY_DONE 0x0040
-#define MSG_LOCK_REPLAY_DONE 0x0080
-
-/*
- * Flags for all connect opcodes (MDS_CONNECT, OST_CONNECT)
- */
-
-#define MSG_CONNECT_RECOVERING 0x00000001
-#define MSG_CONNECT_RECONNECT 0x00000002
-#define MSG_CONNECT_REPLAYABLE 0x00000004
-//#define MSG_CONNECT_PEER 0x8
-#define MSG_CONNECT_LIBCLIENT 0x00000010
-#define MSG_CONNECT_INITIAL 0x00000020
-#define MSG_CONNECT_ASYNC 0x00000040
-#define MSG_CONNECT_NEXT_VER 0x00000080 /* use next version of lustre_msg */
-#define MSG_CONNECT_TRANSNO 0x00000100 /* report transno */
-
-/* Connect flags */
-#define OBD_CONNECT_RDONLY 0x1ULL /*client has read-only access*/
-#define OBD_CONNECT_INDEX 0x2ULL /*connect specific LOV idx */
-#define OBD_CONNECT_MDS 0x4ULL /*connect from MDT to OST */
-#define OBD_CONNECT_GRANT 0x8ULL /*OSC gets grant at connect */
-#define OBD_CONNECT_SRVLOCK 0x10ULL /*server takes locks for cli */
-#define OBD_CONNECT_VERSION 0x20ULL /*Lustre versions in ocd */
-#define OBD_CONNECT_REQPORTAL 0x40ULL /*Separate non-IO req portal */
-#define OBD_CONNECT_ACL 0x80ULL /*access control lists */
-#define OBD_CONNECT_XATTR 0x100ULL /*client use extended attr */
-#define OBD_CONNECT_CROW 0x200ULL /*MDS+OST create obj on write*/
-#define OBD_CONNECT_TRUNCLOCK 0x400ULL /*locks on server for punch */
-#define OBD_CONNECT_TRANSNO 0x800ULL /*replay sends init transno */
-#define OBD_CONNECT_IBITS 0x1000ULL /*support for inodebits locks*/
-#define OBD_CONNECT_JOIN 0x2000ULL /*files can be concatenated.
- *We do not support JOIN FILE
- *anymore, reserve this flags
- *just for preventing such bit
- *to be reused.*/
-#define OBD_CONNECT_ATTRFID 0x4000ULL /*Server can GetAttr By Fid*/
-#define OBD_CONNECT_NODEVOH 0x8000ULL /*No open hndl on specl nodes*/
-#define OBD_CONNECT_RMT_CLIENT 0x10000ULL /*Remote client */
-#define OBD_CONNECT_RMT_CLIENT_FORCE 0x20000ULL /*Remote client by force */
-#define OBD_CONNECT_BRW_SIZE 0x40000ULL /*Max bytes per rpc */
-#define OBD_CONNECT_QUOTA64 0x80000ULL /*Not used since 2.4 */
-#define OBD_CONNECT_MDS_CAPA 0x100000ULL /*MDS capability */
-#define OBD_CONNECT_OSS_CAPA 0x200000ULL /*OSS capability */
-#define OBD_CONNECT_CANCELSET 0x400000ULL /*Early batched cancels. */
-#define OBD_CONNECT_SOM 0x800000ULL /*Size on MDS */
-#define OBD_CONNECT_AT 0x1000000ULL /*client uses AT */
-#define OBD_CONNECT_LRU_RESIZE 0x2000000ULL /*LRU resize feature. */
-#define OBD_CONNECT_MDS_MDS 0x4000000ULL /*MDS-MDS connection */
-#define OBD_CONNECT_REAL 0x8000000ULL /*real connection */
-#define OBD_CONNECT_CHANGE_QS 0x10000000ULL /*Not used since 2.4 */
-#define OBD_CONNECT_CKSUM 0x20000000ULL /*support several cksum algos*/
-#define OBD_CONNECT_FID 0x40000000ULL /*FID is supported by server */
-#define OBD_CONNECT_VBR 0x80000000ULL /*version based recovery */
-#define OBD_CONNECT_LOV_V3 0x100000000ULL /*client supports LOV v3 EA */
-#define OBD_CONNECT_GRANT_SHRINK 0x200000000ULL /* support grant shrink */
-#define OBD_CONNECT_SKIP_ORPHAN 0x400000000ULL /* don't reuse orphan objids */
-#define OBD_CONNECT_MAX_EASIZE 0x800000000ULL /* preserved for large EA */
-#define OBD_CONNECT_FULL20 0x1000000000ULL /* it is 2.0 client */
-#define OBD_CONNECT_LAYOUTLOCK 0x2000000000ULL /* client uses layout lock */
-#define OBD_CONNECT_64BITHASH 0x4000000000ULL /* client supports 64-bits
- * directory hash */
-#define OBD_CONNECT_MAXBYTES 0x8000000000ULL /* max stripe size */
-#define OBD_CONNECT_IMP_RECOV 0x10000000000ULL /* imp recovery support */
-#define OBD_CONNECT_JOBSTATS 0x20000000000ULL /* jobid in ptlrpc_body */
-#define OBD_CONNECT_UMASK 0x40000000000ULL /* create uses client umask */
-#define OBD_CONNECT_EINPROGRESS 0x80000000000ULL /* client handles -EINPROGRESS
- * RPC error properly */
-#define OBD_CONNECT_GRANT_PARAM 0x100000000000ULL/* extra grant params used for
- * finer space reservation */
-#define OBD_CONNECT_FLOCK_OWNER 0x200000000000ULL /* for the fixed 1.8
- * policy and 2.x server */
-#define OBD_CONNECT_LVB_TYPE 0x400000000000ULL /* variable type of LVB */
-#define OBD_CONNECT_NANOSEC_TIME 0x800000000000ULL /* nanosecond timestamps */
-#define OBD_CONNECT_LIGHTWEIGHT 0x1000000000000ULL/* lightweight connection */
-#define OBD_CONNECT_SHORTIO 0x2000000000000ULL/* short io */
-#define OBD_CONNECT_PINGLESS 0x4000000000000ULL/* pings not required */
-#define OBD_CONNECT_FLOCK_DEAD 0x8000000000000ULL/* flock deadlock detection */
-#define OBD_CONNECT_DISP_STRIPE 0x10000000000000ULL/*create stripe disposition*/
-
-/* XXX README XXX:
- * Please DO NOT add flag values here before first ensuring that this same
- * flag value is not in use on some other branch. Please clear any such
- * changes with senior engineers before starting to use a new flag. Then,
- * submit a small patch against EVERY branch that ONLY adds the new flag,
- * updates obd_connect_names[] for lprocfs_rd_connect_flags(), adds the
- * flag to check_obd_connect_data(), and updates wiretests accordingly, so it
- * can be approved and landed easily to reserve the flag for future use. */
-
-/* The MNE_SWAB flag is overloading the MDS_MDS bit only for the MGS
- * connection. It is a temporary bug fix for Imperative Recovery interop
- * between 2.2 and 2.3 x86/ppc nodes, and can be removed when interop for
- * 2.2 clients/servers is no longer needed. LU-1252/LU-1644. */
-#define OBD_CONNECT_MNE_SWAB OBD_CONNECT_MDS_MDS
-
-#define OCD_HAS_FLAG(ocd, flg) \
- (!!((ocd)->ocd_connect_flags & OBD_CONNECT_##flg))
-
-
-#define LRU_RESIZE_CONNECT_FLAG OBD_CONNECT_LRU_RESIZE
-
-#define MDT_CONNECT_SUPPORTED (OBD_CONNECT_RDONLY | OBD_CONNECT_VERSION | \
- OBD_CONNECT_ACL | OBD_CONNECT_XATTR | \
- OBD_CONNECT_IBITS | \
- OBD_CONNECT_NODEVOH | OBD_CONNECT_ATTRFID | \
- OBD_CONNECT_CANCELSET | OBD_CONNECT_AT | \
- OBD_CONNECT_RMT_CLIENT | \
- OBD_CONNECT_RMT_CLIENT_FORCE | \
- OBD_CONNECT_BRW_SIZE | OBD_CONNECT_MDS_CAPA | \
- OBD_CONNECT_OSS_CAPA | OBD_CONNECT_MDS_MDS | \
- OBD_CONNECT_FID | LRU_RESIZE_CONNECT_FLAG | \
- OBD_CONNECT_VBR | OBD_CONNECT_LOV_V3 | \
- OBD_CONNECT_SOM | OBD_CONNECT_FULL20 | \
- OBD_CONNECT_64BITHASH | OBD_CONNECT_JOBSTATS | \
- OBD_CONNECT_EINPROGRESS | \
- OBD_CONNECT_LIGHTWEIGHT | OBD_CONNECT_UMASK | \
- OBD_CONNECT_LVB_TYPE | OBD_CONNECT_LAYOUTLOCK |\
- OBD_CONNECT_PINGLESS | OBD_CONNECT_MAX_EASIZE |\
- OBD_CONNECT_FLOCK_DEAD | \
- OBD_CONNECT_DISP_STRIPE)
-
-#define OST_CONNECT_SUPPORTED (OBD_CONNECT_SRVLOCK | OBD_CONNECT_GRANT | \
- OBD_CONNECT_REQPORTAL | OBD_CONNECT_VERSION | \
- OBD_CONNECT_TRUNCLOCK | OBD_CONNECT_INDEX | \
- OBD_CONNECT_BRW_SIZE | OBD_CONNECT_OSS_CAPA | \
- OBD_CONNECT_CANCELSET | OBD_CONNECT_AT | \
- LRU_RESIZE_CONNECT_FLAG | OBD_CONNECT_CKSUM | \
- OBD_CONNECT_RMT_CLIENT | \
- OBD_CONNECT_RMT_CLIENT_FORCE | OBD_CONNECT_VBR | \
- OBD_CONNECT_MDS | OBD_CONNECT_SKIP_ORPHAN | \
- OBD_CONNECT_GRANT_SHRINK | OBD_CONNECT_FULL20 | \
- OBD_CONNECT_64BITHASH | OBD_CONNECT_MAXBYTES | \
- OBD_CONNECT_MAX_EASIZE | \
- OBD_CONNECT_EINPROGRESS | \
- OBD_CONNECT_JOBSTATS | \
- OBD_CONNECT_LIGHTWEIGHT | OBD_CONNECT_LVB_TYPE|\
- OBD_CONNECT_LAYOUTLOCK | OBD_CONNECT_FID | \
- OBD_CONNECT_PINGLESS)
-#define ECHO_CONNECT_SUPPORTED (0)
-#define MGS_CONNECT_SUPPORTED (OBD_CONNECT_VERSION | OBD_CONNECT_AT | \
- OBD_CONNECT_FULL20 | OBD_CONNECT_IMP_RECOV | \
- OBD_CONNECT_MNE_SWAB | OBD_CONNECT_PINGLESS)
-
-/* Features required for this version of the client to work with server */
-#define CLIENT_CONNECT_MDT_REQD (OBD_CONNECT_IBITS | OBD_CONNECT_FID | \
- OBD_CONNECT_FULL20)
-
-#define OBD_OCD_VERSION(major, minor, patch, fix) (((major)<<24) + \
- ((minor)<<16) + \
- ((patch)<<8) + (fix))
-#define OBD_OCD_VERSION_MAJOR(version) ((int)((version)>>24)&255)
-#define OBD_OCD_VERSION_MINOR(version) ((int)((version)>>16)&255)
-#define OBD_OCD_VERSION_PATCH(version) ((int)((version)>>8)&255)
-#define OBD_OCD_VERSION_FIX(version) ((int)(version)&255)
-
-/* This structure is used for both request and reply.
- *
- * If we eventually have separate connect data for different types, which we
- * almost certainly will, then perhaps we stick a union in here. */
-struct obd_connect_data_v1 {
- __u64 ocd_connect_flags; /* OBD_CONNECT_* per above */
- __u32 ocd_version; /* lustre release version number */
- __u32 ocd_grant; /* initial cache grant amount (bytes) */
- __u32 ocd_index; /* LOV index to connect to */
- __u32 ocd_brw_size; /* Maximum BRW size in bytes, must be 2^n */
- __u64 ocd_ibits_known; /* inode bits this client understands */
- __u8 ocd_blocksize; /* log2 of the backend filesystem blocksize */
- __u8 ocd_inodespace; /* log2 of the per-inode space consumption */
- __u16 ocd_grant_extent; /* per-extent grant overhead, in 1K blocks */
- __u32 ocd_unused; /* also fix lustre_swab_connect */
- __u64 ocd_transno; /* first transno from client to be replayed */
- __u32 ocd_group; /* MDS group on OST */
- __u32 ocd_cksum_types; /* supported checksum algorithms */
- __u32 ocd_max_easize; /* How big LOV EA can be on MDS */
- __u32 ocd_instance; /* also fix lustre_swab_connect */
- __u64 ocd_maxbytes; /* Maximum stripe size in bytes */
-};
-
-struct obd_connect_data {
- __u64 ocd_connect_flags; /* OBD_CONNECT_* per above */
- __u32 ocd_version; /* lustre release version number */
- __u32 ocd_grant; /* initial cache grant amount (bytes) */
- __u32 ocd_index; /* LOV index to connect to */
- __u32 ocd_brw_size; /* Maximum BRW size in bytes */
- __u64 ocd_ibits_known; /* inode bits this client understands */
- __u8 ocd_blocksize; /* log2 of the backend filesystem blocksize */
- __u8 ocd_inodespace; /* log2 of the per-inode space consumption */
- __u16 ocd_grant_extent; /* per-extent grant overhead, in 1K blocks */
- __u32 ocd_unused; /* also fix lustre_swab_connect */
- __u64 ocd_transno; /* first transno from client to be replayed */
- __u32 ocd_group; /* MDS group on OST */
- __u32 ocd_cksum_types; /* supported checksum algorithms */
- __u32 ocd_max_easize; /* How big LOV EA can be on MDS */
- __u32 ocd_instance; /* instance # of this target */
- __u64 ocd_maxbytes; /* Maximum stripe size in bytes */
- /* Fields after ocd_maxbytes are only accessible by the receiver
- * if the corresponding flag in ocd_connect_flags is set. Accessing
- * any field after ocd_maxbytes on the receiver without a valid flag
- * may result in out-of-bound memory access and kernel oops. */
- __u64 padding1; /* added 2.1.0. also fix lustre_swab_connect */
- __u64 padding2; /* added 2.1.0. also fix lustre_swab_connect */
- __u64 padding3; /* added 2.1.0. also fix lustre_swab_connect */
- __u64 padding4; /* added 2.1.0. also fix lustre_swab_connect */
- __u64 padding5; /* added 2.1.0. also fix lustre_swab_connect */
- __u64 padding6; /* added 2.1.0. also fix lustre_swab_connect */
- __u64 padding7; /* added 2.1.0. also fix lustre_swab_connect */
- __u64 padding8; /* added 2.1.0. also fix lustre_swab_connect */
- __u64 padding9; /* added 2.1.0. also fix lustre_swab_connect */
- __u64 paddingA; /* added 2.1.0. also fix lustre_swab_connect */
- __u64 paddingB; /* added 2.1.0. also fix lustre_swab_connect */
- __u64 paddingC; /* added 2.1.0. also fix lustre_swab_connect */
- __u64 paddingD; /* added 2.1.0. also fix lustre_swab_connect */
- __u64 paddingE; /* added 2.1.0. also fix lustre_swab_connect */
- __u64 paddingF; /* added 2.1.0. also fix lustre_swab_connect */
-};
-/* XXX README XXX:
- * Please DO NOT use any fields here before first ensuring that this same
- * field is not in use on some other branch. Please clear any such changes
- * with senior engineers before starting to use a new field. Then, submit
- * a small patch against EVERY branch that ONLY adds the new field along with
- * the matching OBD_CONNECT flag, so that can be approved and landed easily to
- * reserve the flag for future use. */
-
-
-void lustre_swab_connect(struct obd_connect_data *ocd);
-
-/*
- * Supported checksum algorithms. Up to 32 checksum types are supported.
- * (32-bit mask stored in obd_connect_data::ocd_cksum_types)
- * Please update DECLARE_CKSUM_NAME/OBD_CKSUM_ALL in obd.h when adding a new
- * algorithm and also the OBD_FL_CKSUM* flags.
- */
-typedef enum {
- OBD_CKSUM_CRC32 = 0x00000001,
- OBD_CKSUM_ADLER = 0x00000002,
- OBD_CKSUM_CRC32C = 0x00000004,
-} cksum_type_t;
-
-/*
- * OST requests: OBDO & OBD request records
- */
-
-/* opcodes */
-typedef enum {
- OST_REPLY = 0, /* reply ? */
- OST_GETATTR = 1,
- OST_SETATTR = 2,
- OST_READ = 3,
- OST_WRITE = 4,
- OST_CREATE = 5,
- OST_DESTROY = 6,
- OST_GET_INFO = 7,
- OST_CONNECT = 8,
- OST_DISCONNECT = 9,
- OST_PUNCH = 10,
- OST_OPEN = 11,
- OST_CLOSE = 12,
- OST_STATFS = 13,
- OST_SYNC = 16,
- OST_SET_INFO = 17,
- OST_QUOTACHECK = 18,
- OST_QUOTACTL = 19,
- OST_QUOTA_ADJUST_QUNIT = 20, /* not used since 2.4 */
- OST_LAST_OPC
-} ost_cmd_t;
-#define OST_FIRST_OPC OST_REPLY
-
-enum obdo_flags {
- OBD_FL_INLINEDATA = 0x00000001,
- OBD_FL_OBDMDEXISTS = 0x00000002,
- OBD_FL_DELORPHAN = 0x00000004, /* if set in o_flags delete orphans */
- OBD_FL_NORPC = 0x00000008, /* set in o_flags do in OSC not OST */
- OBD_FL_IDONLY = 0x00000010, /* set in o_flags only adjust obj id*/
- OBD_FL_RECREATE_OBJS = 0x00000020, /* recreate missing obj */
- OBD_FL_DEBUG_CHECK = 0x00000040, /* echo client/server debug check */
- OBD_FL_NO_USRQUOTA = 0x00000100, /* the object's owner is over quota */
- OBD_FL_NO_GRPQUOTA = 0x00000200, /* the object's group is over quota */
- OBD_FL_CREATE_CROW = 0x00000400, /* object should be create on write */
- OBD_FL_SRVLOCK = 0x00000800, /* delegate DLM locking to server */
- OBD_FL_CKSUM_CRC32 = 0x00001000, /* CRC32 checksum type */
- OBD_FL_CKSUM_ADLER = 0x00002000, /* ADLER checksum type */
- OBD_FL_CKSUM_CRC32C = 0x00004000, /* CRC32C checksum type */
- OBD_FL_CKSUM_RSVD2 = 0x00008000, /* for future cksum types */
- OBD_FL_CKSUM_RSVD3 = 0x00010000, /* for future cksum types */
- OBD_FL_SHRINK_GRANT = 0x00020000, /* object shrink the grant */
- OBD_FL_MMAP = 0x00040000, /* object is mmapped on the client.
- * XXX: obsoleted - reserved for old
- * clients prior than 2.2 */
- OBD_FL_RECOV_RESEND = 0x00080000, /* recoverable resent */
- OBD_FL_NOSPC_BLK = 0x00100000, /* no more block space on OST */
-
- /* Note that while these checksum values are currently separate bits,
- * in 2.x we can actually allow all values from 1-31 if we wanted. */
- OBD_FL_CKSUM_ALL = OBD_FL_CKSUM_CRC32 | OBD_FL_CKSUM_ADLER |
- OBD_FL_CKSUM_CRC32C,
-
- /* mask for local-only flag, which won't be sent over network */
- OBD_FL_LOCAL_MASK = 0xF0000000,
-};
-
-#define LOV_MAGIC_V1 0x0BD10BD0
-#define LOV_MAGIC LOV_MAGIC_V1
-#define LOV_MAGIC_JOIN_V1 0x0BD20BD0
-#define LOV_MAGIC_V3 0x0BD30BD0
-
-/*
- * magic for fully defined striping
- * the idea is that we should have different magics for striping "hints"
- * (struct lov_user_md_v[13]) and defined ready-to-use striping (struct
- * lov_mds_md_v[13]). at the moment the magics are used in wire protocol,
- * we can't just change it w/o long way preparation, but we still need a
- * mechanism to allow LOD to differentiate hint versus ready striping.
- * so, at the moment we do a trick: MDT knows what to expect from request
- * depending on the case (replay uses ready striping, non-replay req uses
- * hints), so MDT replaces magic with appropriate one and now LOD can
- * easily understand what's inside -bzzz
- */
-#define LOV_MAGIC_V1_DEF 0x0CD10BD0
-#define LOV_MAGIC_V3_DEF 0x0CD30BD0
-
-#define LOV_PATTERN_RAID0 0x001 /* stripes are used round-robin */
-#define LOV_PATTERN_RAID1 0x002 /* stripes are mirrors of each other */
-#define LOV_PATTERN_FIRST 0x100 /* first stripe is not in round-robin */
-#define LOV_PATTERN_CMOBD 0x200
-
-#define LOV_PATTERN_F_MASK 0xffff0000
-#define LOV_PATTERN_F_RELEASED 0x80000000 /* HSM released file */
-
-#define lov_pattern(pattern) (pattern & ~LOV_PATTERN_F_MASK)
-#define lov_pattern_flags(pattern) (pattern & LOV_PATTERN_F_MASK)
-
-#define lov_ost_data lov_ost_data_v1
-struct lov_ost_data_v1 { /* per-stripe data structure (little-endian)*/
- struct ost_id l_ost_oi; /* OST object ID */
- __u32 l_ost_gen; /* generation of this l_ost_idx */
- __u32 l_ost_idx; /* OST index in LOV (lov_tgt_desc->tgts) */
-};
-
-#define lov_mds_md lov_mds_md_v1
-struct lov_mds_md_v1 { /* LOV EA mds/wire data (little-endian) */
- __u32 lmm_magic; /* magic number = LOV_MAGIC_V1 */
- __u32 lmm_pattern; /* LOV_PATTERN_RAID0, LOV_PATTERN_RAID1 */
- struct ost_id lmm_oi; /* LOV object ID */
- __u32 lmm_stripe_size; /* size of stripe in bytes */
- /* lmm_stripe_count used to be __u32 */
- __u16 lmm_stripe_count; /* num stripes in use for this object */
- __u16 lmm_layout_gen; /* layout generation number */
- struct lov_ost_data_v1 lmm_objects[0]; /* per-stripe data */
-};
-
-/**
- * Sigh, because pre-2.4 uses
- * struct lov_mds_md_v1 {
- * ........
- * __u64 lmm_object_id;
- * __u64 lmm_object_seq;
- * ......
- * }
- * to identify the LOV(MDT) object, and lmm_object_seq will
- * be normal_fid, which make it hard to combine these conversion
- * to ostid_to FID. so we will do lmm_oi/fid conversion separately
- *
- * We can tell the lmm_oi by this way,
- * 1.8: lmm_object_id = {inode}, lmm_object_gr = 0
- * 2.1: lmm_object_id = {oid < 128k}, lmm_object_seq = FID_SEQ_NORMAL
- * 2.4: lmm_oi.f_seq = FID_SEQ_NORMAL, lmm_oi.f_oid = {oid < 128k},
- * lmm_oi.f_ver = 0
- *
- * But currently lmm_oi/lsm_oi does not have any "real" usages,
- * except for printing some information, and the user can always
- * get the real FID from LMA, besides this multiple case check might
- * make swab more complicate. So we will keep using id/seq for lmm_oi.
- */
-
-static inline void fid_to_lmm_oi(const struct lu_fid *fid,
- struct ost_id *oi)
-{
- oi->oi.oi_id = fid_oid(fid);
- oi->oi.oi_seq = fid_seq(fid);
-}
-
-static inline void lmm_oi_set_seq(struct ost_id *oi, __u64 seq)
-{
- oi->oi.oi_seq = seq;
-}
-
-static inline __u64 lmm_oi_id(struct ost_id *oi)
-{
- return oi->oi.oi_id;
-}
-
-static inline __u64 lmm_oi_seq(struct ost_id *oi)
-{
- return oi->oi.oi_seq;
-}
-
-static inline void lmm_oi_le_to_cpu(struct ost_id *dst_oi,
- struct ost_id *src_oi)
-{
- dst_oi->oi.oi_id = le64_to_cpu(src_oi->oi.oi_id);
- dst_oi->oi.oi_seq = le64_to_cpu(src_oi->oi.oi_seq);
-}
-
-static inline void lmm_oi_cpu_to_le(struct ost_id *dst_oi,
- struct ost_id *src_oi)
-{
- dst_oi->oi.oi_id = cpu_to_le64(src_oi->oi.oi_id);
- dst_oi->oi.oi_seq = cpu_to_le64(src_oi->oi.oi_seq);
-}
-
-/* extern void lustre_swab_lov_mds_md(struct lov_mds_md *llm); */
-
-#define MAX_MD_SIZE \
- (sizeof(struct lov_mds_md) + 4 * sizeof(struct lov_ost_data))
-#define MIN_MD_SIZE \
- (sizeof(struct lov_mds_md) + 1 * sizeof(struct lov_ost_data))
-
-#define XATTR_NAME_ACL_ACCESS "system.posix_acl_access"
-#define XATTR_NAME_ACL_DEFAULT "system.posix_acl_default"
-#define XATTR_USER_PREFIX "user."
-#define XATTR_TRUSTED_PREFIX "trusted."
-#define XATTR_SECURITY_PREFIX "security."
-#define XATTR_LUSTRE_PREFIX "lustre."
-
-#define XATTR_NAME_LOV "trusted.lov"
-#define XATTR_NAME_LMA "trusted.lma"
-#define XATTR_NAME_LMV "trusted.lmv"
-#define XATTR_NAME_LINK "trusted.link"
-#define XATTR_NAME_FID "trusted.fid"
-#define XATTR_NAME_VERSION "trusted.version"
-#define XATTR_NAME_SOM "trusted.som"
-#define XATTR_NAME_HSM "trusted.hsm"
-#define XATTR_NAME_LFSCK_NAMESPACE "trusted.lfsck_namespace"
-
-struct lov_mds_md_v3 { /* LOV EA mds/wire data (little-endian) */
- __u32 lmm_magic; /* magic number = LOV_MAGIC_V3 */
- __u32 lmm_pattern; /* LOV_PATTERN_RAID0, LOV_PATTERN_RAID1 */
- struct ost_id lmm_oi; /* LOV object ID */
- __u32 lmm_stripe_size; /* size of stripe in bytes */
- /* lmm_stripe_count used to be __u32 */
- __u16 lmm_stripe_count; /* num stripes in use for this object */
- __u16 lmm_layout_gen; /* layout generation number */
- char lmm_pool_name[LOV_MAXPOOLNAME]; /* must be 32bit aligned */
- struct lov_ost_data_v1 lmm_objects[0]; /* per-stripe data */
-};
-
-static inline __u32 lov_mds_md_size(__u16 stripes, __u32 lmm_magic)
-{
- if (lmm_magic == LOV_MAGIC_V3)
- return sizeof(struct lov_mds_md_v3) +
- stripes * sizeof(struct lov_ost_data_v1);
- else
- return sizeof(struct lov_mds_md_v1) +
- stripes * sizeof(struct lov_ost_data_v1);
-}
-
-static inline __u32
-lov_mds_md_max_stripe_count(size_t buf_size, __u32 lmm_magic)
-{
- switch (lmm_magic) {
- case LOV_MAGIC_V1: {
- struct lov_mds_md_v1 lmm;
-
- if (buf_size < sizeof(lmm))
- return 0;
-
- return (buf_size - sizeof(lmm)) / sizeof(lmm.lmm_objects[0]);
- }
- case LOV_MAGIC_V3: {
- struct lov_mds_md_v3 lmm;
-
- if (buf_size < sizeof(lmm))
- return 0;
-
- return (buf_size - sizeof(lmm)) / sizeof(lmm.lmm_objects[0]);
- }
- default:
- return 0;
- }
-}
-
-#define OBD_MD_FLID (0x00000001ULL) /* object ID */
-#define OBD_MD_FLATIME (0x00000002ULL) /* access time */
-#define OBD_MD_FLMTIME (0x00000004ULL) /* data modification time */
-#define OBD_MD_FLCTIME (0x00000008ULL) /* change time */
-#define OBD_MD_FLSIZE (0x00000010ULL) /* size */
-#define OBD_MD_FLBLOCKS (0x00000020ULL) /* allocated blocks count */
-#define OBD_MD_FLBLKSZ (0x00000040ULL) /* block size */
-#define OBD_MD_FLMODE (0x00000080ULL) /* access bits (mode & ~S_IFMT) */
-#define OBD_MD_FLTYPE (0x00000100ULL) /* object type (mode & S_IFMT) */
-#define OBD_MD_FLUID (0x00000200ULL) /* user ID */
-#define OBD_MD_FLGID (0x00000400ULL) /* group ID */
-#define OBD_MD_FLFLAGS (0x00000800ULL) /* flags word */
-#define OBD_MD_FLNLINK (0x00002000ULL) /* link count */
-#define OBD_MD_FLGENER (0x00004000ULL) /* generation number */
-/*#define OBD_MD_FLINLINE (0x00008000ULL) inline data. used until 1.6.5 */
-#define OBD_MD_FLRDEV (0x00010000ULL) /* device number */
-#define OBD_MD_FLEASIZE (0x00020000ULL) /* extended attribute data */
-#define OBD_MD_LINKNAME (0x00040000ULL) /* symbolic link target */
-#define OBD_MD_FLHANDLE (0x00080000ULL) /* file/lock handle */
-#define OBD_MD_FLCKSUM (0x00100000ULL) /* bulk data checksum */
-#define OBD_MD_FLQOS (0x00200000ULL) /* quality of service stats */
-/*#define OBD_MD_FLOSCOPQ (0x00400000ULL) osc opaque data, never used */
-#define OBD_MD_FLCOOKIE (0x00800000ULL) /* log cancellation cookie */
-#define OBD_MD_FLGROUP (0x01000000ULL) /* group */
-#define OBD_MD_FLFID (0x02000000ULL) /* ->ost write inline fid */
-#define OBD_MD_FLEPOCH (0x04000000ULL) /* ->ost write with ioepoch */
- /* ->mds if epoch opens or closes */
-#define OBD_MD_FLGRANT (0x08000000ULL) /* ost preallocation space grant */
-#define OBD_MD_FLDIREA (0x10000000ULL) /* dir's extended attribute data */
-#define OBD_MD_FLUSRQUOTA (0x20000000ULL) /* over quota flags sent from ost */
-#define OBD_MD_FLGRPQUOTA (0x40000000ULL) /* over quota flags sent from ost */
-#define OBD_MD_FLMODEASIZE (0x80000000ULL) /* EA size will be changed */
-
-#define OBD_MD_MDS (0x0000000100000000ULL) /* where an inode lives on */
-#define OBD_MD_REINT (0x0000000200000000ULL) /* reintegrate oa */
-#define OBD_MD_MEA (0x0000000400000000ULL) /* CMD split EA */
-#define OBD_MD_TSTATE (0x0000000800000000ULL) /* transient state field */
-
-#define OBD_MD_FLXATTR (0x0000001000000000ULL) /* xattr */
-#define OBD_MD_FLXATTRLS (0x0000002000000000ULL) /* xattr list */
-#define OBD_MD_FLXATTRRM (0x0000004000000000ULL) /* xattr remove */
-#define OBD_MD_FLACL (0x0000008000000000ULL) /* ACL */
-#define OBD_MD_FLRMTPERM (0x0000010000000000ULL) /* remote permission */
-#define OBD_MD_FLMDSCAPA (0x0000020000000000ULL) /* MDS capability */
-#define OBD_MD_FLOSSCAPA (0x0000040000000000ULL) /* OSS capability */
-#define OBD_MD_FLCKSPLIT (0x0000080000000000ULL) /* Check split on server */
-#define OBD_MD_FLCROSSREF (0x0000100000000000ULL) /* Cross-ref case */
-#define OBD_MD_FLGETATTRLOCK (0x0000200000000000ULL) /* Get IOEpoch attributes
- * under lock; for xattr
- * requests means the
- * client holds the lock */
-#define OBD_MD_FLOBJCOUNT (0x0000400000000000ULL) /* for multiple destroy */
-
-#define OBD_MD_FLRMTLSETFACL (0x0001000000000000ULL) /* lfs lsetfacl case */
-#define OBD_MD_FLRMTLGETFACL (0x0002000000000000ULL) /* lfs lgetfacl case */
-#define OBD_MD_FLRMTRSETFACL (0x0004000000000000ULL) /* lfs rsetfacl case */
-#define OBD_MD_FLRMTRGETFACL (0x0008000000000000ULL) /* lfs rgetfacl case */
-
-#define OBD_MD_FLDATAVERSION (0x0010000000000000ULL) /* iversion sum */
-#define OBD_MD_FLRELEASED (0x0020000000000000ULL) /* file released */
-
-#define OBD_MD_FLGETATTR (OBD_MD_FLID | OBD_MD_FLATIME | OBD_MD_FLMTIME | \
- OBD_MD_FLCTIME | OBD_MD_FLSIZE | OBD_MD_FLBLKSZ | \
- OBD_MD_FLMODE | OBD_MD_FLTYPE | OBD_MD_FLUID | \
- OBD_MD_FLGID | OBD_MD_FLFLAGS | OBD_MD_FLNLINK | \
- OBD_MD_FLGENER | OBD_MD_FLRDEV | OBD_MD_FLGROUP)
-
-#define OBD_MD_FLXATTRALL (OBD_MD_FLXATTR | OBD_MD_FLXATTRLS)
-
-/* don't forget obdo_fid which is way down at the bottom so it can
- * come after the definition of llog_cookie */
-
-enum hss_valid {
- HSS_SETMASK = 0x01,
- HSS_CLEARMASK = 0x02,
- HSS_ARCHIVE_ID = 0x04,
-};
-
-struct hsm_state_set {
- __u32 hss_valid;
- __u32 hss_archive_id;
- __u64 hss_setmask;
- __u64 hss_clearmask;
-};
-
-void lustre_swab_hsm_user_state(struct hsm_user_state *hus);
-void lustre_swab_hsm_state_set(struct hsm_state_set *hss);
-
-void lustre_swab_obd_statfs(struct obd_statfs *os);
-
-/* ost_body.data values for OST_BRW */
-
-#define OBD_BRW_READ 0x01
-#define OBD_BRW_WRITE 0x02
-#define OBD_BRW_RWMASK (OBD_BRW_READ | OBD_BRW_WRITE)
-#define OBD_BRW_SYNC 0x08 /* this page is a part of synchronous
- * transfer and is not accounted in
- * the grant. */
-#define OBD_BRW_CHECK 0x10
-#define OBD_BRW_FROM_GRANT 0x20 /* the osc manages this under llite */
-#define OBD_BRW_GRANTED 0x40 /* the ost manages this */
-#define OBD_BRW_NOCACHE 0x80 /* this page is a part of non-cached IO */
-#define OBD_BRW_NOQUOTA 0x100
-#define OBD_BRW_SRVLOCK 0x200 /* Client holds no lock over this page */
-#define OBD_BRW_ASYNC 0x400 /* Server may delay commit to disk */
-#define OBD_BRW_MEMALLOC 0x800 /* Client runs in the "kswapd" context */
-#define OBD_BRW_OVER_USRQUOTA 0x1000 /* Running out of user quota */
-#define OBD_BRW_OVER_GRPQUOTA 0x2000 /* Running out of group quota */
-
-#define OBD_OBJECT_EOF 0xffffffffffffffffULL
-
-#define OST_MIN_PRECREATE 32
-#define OST_MAX_PRECREATE 20000
-
-struct obd_ioobj {
- struct ost_id ioo_oid; /* object ID, if multi-obj BRW */
- __u32 ioo_max_brw; /* low 16 bits were o_mode before 2.4,
- * now (PTLRPC_BULK_OPS_COUNT - 1) in
- * high 16 bits in 2.4 and later */
- __u32 ioo_bufcnt; /* number of niobufs for this object */
-};
-
-#define IOOBJ_MAX_BRW_BITS 16
-#define IOOBJ_TYPE_MASK ((1U << IOOBJ_MAX_BRW_BITS) - 1)
-#define ioobj_max_brw_get(ioo) (((ioo)->ioo_max_brw >> IOOBJ_MAX_BRW_BITS) + 1)
-#define ioobj_max_brw_set(ioo, num) \
-do { (ioo)->ioo_max_brw = ((num) - 1) << IOOBJ_MAX_BRW_BITS; } while (0)
-
-void lustre_swab_obd_ioobj(struct obd_ioobj *ioo);
-
-/* multiple of 8 bytes => can array */
-struct niobuf_remote {
- __u64 offset;
- __u32 len;
- __u32 flags;
-};
-
-void lustre_swab_niobuf_remote(struct niobuf_remote *nbr);
-
-/* lock value block communicated between the filter and llite */
-
-/* OST_LVB_ERR_INIT is needed because the return code in rc is
- * negative, i.e. because ((MASK + rc) & MASK) != MASK. */
-#define OST_LVB_ERR_INIT 0xffbadbad80000000ULL
-#define OST_LVB_ERR_MASK 0xffbadbad00000000ULL
-#define OST_LVB_IS_ERR(blocks) \
- ((blocks & OST_LVB_ERR_MASK) == OST_LVB_ERR_MASK)
-#define OST_LVB_SET_ERR(blocks, rc) \
- do { blocks = OST_LVB_ERR_INIT + rc; } while (0)
-#define OST_LVB_GET_ERR(blocks) (int)(blocks - OST_LVB_ERR_INIT)
-
-struct ost_lvb_v1 {
- __u64 lvb_size;
- __s64 lvb_mtime;
- __s64 lvb_atime;
- __s64 lvb_ctime;
- __u64 lvb_blocks;
-};
-
-void lustre_swab_ost_lvb_v1(struct ost_lvb_v1 *lvb);
-
-struct ost_lvb {
- __u64 lvb_size;
- __s64 lvb_mtime;
- __s64 lvb_atime;
- __s64 lvb_ctime;
- __u64 lvb_blocks;
- __u32 lvb_mtime_ns;
- __u32 lvb_atime_ns;
- __u32 lvb_ctime_ns;
- __u32 lvb_padding;
-};
-
-void lustre_swab_ost_lvb(struct ost_lvb *lvb);
-
-/*
- * lquota data structures
- */
-
-#ifndef QUOTABLOCK_BITS
-#define QUOTABLOCK_BITS 10
-#endif
-
-#ifndef QUOTABLOCK_SIZE
-#define QUOTABLOCK_SIZE (1 << QUOTABLOCK_BITS)
-#endif
-
-#ifndef toqb
-#define toqb(x) (((x) + QUOTABLOCK_SIZE - 1) >> QUOTABLOCK_BITS)
-#endif
-
-/* The lquota_id structure is an union of all the possible identifier types that
- * can be used with quota, this includes:
- * - 64-bit user ID
- * - 64-bit group ID
- * - a FID which can be used for per-directory quota in the future */
-union lquota_id {
- struct lu_fid qid_fid; /* FID for per-directory quota */
- __u64 qid_uid; /* user identifier */
- __u64 qid_gid; /* group identifier */
-};
-
-/* quotactl management */
-struct obd_quotactl {
- __u32 qc_cmd;
- __u32 qc_type; /* see Q_* flag below */
- __u32 qc_id;
- __u32 qc_stat;
- struct obd_dqinfo qc_dqinfo;
- struct obd_dqblk qc_dqblk;
-};
-
-void lustre_swab_obd_quotactl(struct obd_quotactl *q);
-
-#define Q_QUOTACHECK 0x800100 /* deprecated as of 2.4 */
-#define Q_INITQUOTA 0x800101 /* deprecated as of 2.4 */
-#define Q_GETOINFO 0x800102 /* get obd quota info */
-#define Q_GETOQUOTA 0x800103 /* get obd quotas */
-#define Q_FINVALIDATE 0x800104 /* deprecated as of 2.4 */
-
-#define Q_COPY(out, in, member) (out)->member = (in)->member
-
-#define QCTL_COPY(out, in) \
-do { \
- Q_COPY(out, in, qc_cmd); \
- Q_COPY(out, in, qc_type); \
- Q_COPY(out, in, qc_id); \
- Q_COPY(out, in, qc_stat); \
- Q_COPY(out, in, qc_dqinfo); \
- Q_COPY(out, in, qc_dqblk); \
-} while (0)
-
-/* Body of quota request used for quota acquire/release RPCs between quota
- * master (aka QMT) and slaves (ak QSD). */
-struct quota_body {
- struct lu_fid qb_fid; /* FID of global index packing the pool ID
- * and type (data or metadata) as well as
- * the quota type (user or group). */
- union lquota_id qb_id; /* uid or gid or directory FID */
- __u32 qb_flags; /* see below */
- __u32 qb_padding;
- __u64 qb_count; /* acquire/release count (kbytes/inodes) */
- __u64 qb_usage; /* current slave usage (kbytes/inodes) */
- __u64 qb_slv_ver; /* slave index file version */
- struct lustre_handle qb_lockh; /* per-ID lock handle */
- struct lustre_handle qb_glb_lockh; /* global lock handle */
- __u64 qb_padding1[4];
-};
-
-/* When the quota_body is used in the reply of quota global intent
- * lock (IT_QUOTA_CONN) reply, qb_fid contains slave index file FID. */
-#define qb_slv_fid qb_fid
-/* qb_usage is the current qunit (in kbytes/inodes) when quota_body is used in
- * quota reply */
-#define qb_qunit qb_usage
-
-#define QUOTA_DQACQ_FL_ACQ 0x1 /* acquire quota */
-#define QUOTA_DQACQ_FL_PREACQ 0x2 /* pre-acquire */
-#define QUOTA_DQACQ_FL_REL 0x4 /* release quota */
-#define QUOTA_DQACQ_FL_REPORT 0x8 /* report usage */
-
-void lustre_swab_quota_body(struct quota_body *b);
-
-/* Quota types currently supported */
-enum {
- LQUOTA_TYPE_USR = 0x00, /* maps to USRQUOTA */
- LQUOTA_TYPE_GRP = 0x01, /* maps to GRPQUOTA */
- LQUOTA_TYPE_MAX
-};
-
-/* There are 2 different resource types on which a quota limit can be enforced:
- * - inodes on the MDTs
- * - blocks on the OSTs */
-enum {
- LQUOTA_RES_MD = 0x01, /* skip 0 to avoid null oid in FID */
- LQUOTA_RES_DT = 0x02,
- LQUOTA_LAST_RES,
- LQUOTA_FIRST_RES = LQUOTA_RES_MD
-};
-#define LQUOTA_NR_RES (LQUOTA_LAST_RES - LQUOTA_FIRST_RES + 1)
-
-/*
- * Space accounting support
- * Format of an accounting record, providing disk usage information for a given
- * user or group
- */
-struct lquota_acct_rec { /* 16 bytes */
- __u64 bspace; /* current space in use */
- __u64 ispace; /* current # inodes in use */
-};
-
-/*
- * Global quota index support
- * Format of a global record, providing global quota settings for a given quota
- * identifier
- */
-struct lquota_glb_rec { /* 32 bytes */
- __u64 qbr_hardlimit; /* quota hard limit, in #inodes or kbytes */
- __u64 qbr_softlimit; /* quota soft limit, in #inodes or kbytes */
- __u64 qbr_time; /* grace time, in seconds */
- __u64 qbr_granted; /* how much is granted to slaves, in #inodes or
- * kbytes */
-};
-
-/*
- * Slave index support
- * Format of a slave record, recording how much space is granted to a given
- * slave
- */
-struct lquota_slv_rec { /* 8 bytes */
- __u64 qsr_granted; /* space granted to the slave for the key=ID,
- * in #inodes or kbytes */
-};
-
-/* Data structures associated with the quota locks */
-
-/* Glimpse descriptor used for the index & per-ID quota locks */
-struct ldlm_gl_lquota_desc {
- union lquota_id gl_id; /* quota ID subject to the glimpse */
- __u64 gl_flags; /* see LQUOTA_FL* below */
- __u64 gl_ver; /* new index version */
- __u64 gl_hardlimit; /* new hardlimit or qunit value */
- __u64 gl_softlimit; /* new softlimit */
- __u64 gl_time;
- __u64 gl_pad2;
-};
-#define gl_qunit gl_hardlimit /* current qunit value used when
- * glimpsing per-ID quota locks */
-
-/* quota glimpse flags */
-#define LQUOTA_FL_EDQUOT 0x1 /* user/group out of quota space on QMT */
-
-/* LVB used with quota (global and per-ID) locks */
-struct lquota_lvb {
- __u64 lvb_flags; /* see LQUOTA_FL* above */
- __u64 lvb_id_may_rel; /* space that might be released later */
- __u64 lvb_id_rel; /* space released by the slave for this ID */
- __u64 lvb_id_qunit; /* current qunit value */
- __u64 lvb_pad1;
-};
-
-void lustre_swab_lquota_lvb(struct lquota_lvb *lvb);
-
-/* LVB used with global quota lock */
-#define lvb_glb_ver lvb_id_may_rel /* current version of the global index */
-
-/* op codes */
-typedef enum {
- QUOTA_DQACQ = 601,
- QUOTA_DQREL = 602,
- QUOTA_LAST_OPC
-} quota_cmd_t;
-#define QUOTA_FIRST_OPC QUOTA_DQACQ
-
-/*
- * MDS REQ RECORDS
- */
-
-/* opcodes */
-typedef enum {
- MDS_GETATTR = 33,
- MDS_GETATTR_NAME = 34,
- MDS_CLOSE = 35,
- MDS_REINT = 36,
- MDS_READPAGE = 37,
- MDS_CONNECT = 38,
- MDS_DISCONNECT = 39,
- MDS_GETSTATUS = 40,
- MDS_STATFS = 41,
- MDS_PIN = 42,
- MDS_UNPIN = 43,
- MDS_SYNC = 44,
- MDS_DONE_WRITING = 45,
- MDS_SET_INFO = 46,
- MDS_QUOTACHECK = 47,
- MDS_QUOTACTL = 48,
- MDS_GETXATTR = 49,
- MDS_SETXATTR = 50, /* obsolete, now it's MDS_REINT op */
- MDS_WRITEPAGE = 51,
- MDS_IS_SUBDIR = 52,
- MDS_GET_INFO = 53,
- MDS_HSM_STATE_GET = 54,
- MDS_HSM_STATE_SET = 55,
- MDS_HSM_ACTION = 56,
- MDS_HSM_PROGRESS = 57,
- MDS_HSM_REQUEST = 58,
- MDS_HSM_CT_REGISTER = 59,
- MDS_HSM_CT_UNREGISTER = 60,
- MDS_SWAP_LAYOUTS = 61,
- MDS_LAST_OPC
-} mds_cmd_t;
-
-#define MDS_FIRST_OPC MDS_GETATTR
-
-
-/* opcodes for object update */
-typedef enum {
- UPDATE_OBJ = 1000,
- UPDATE_LAST_OPC
-} update_cmd_t;
-
-#define UPDATE_FIRST_OPC UPDATE_OBJ
-
-/*
- * Do not exceed 63
- */
-
-typedef enum {
- REINT_SETATTR = 1,
- REINT_CREATE = 2,
- REINT_LINK = 3,
- REINT_UNLINK = 4,
- REINT_RENAME = 5,
- REINT_OPEN = 6,
- REINT_SETXATTR = 7,
- REINT_RMENTRY = 8,
-// REINT_WRITE = 9,
- REINT_MAX
-} mds_reint_t, mdt_reint_t;
-
-void lustre_swab_generic_32s(__u32 *val);
-
-/* the disposition of the intent outlines what was executed */
-#define DISP_IT_EXECD 0x00000001
-#define DISP_LOOKUP_EXECD 0x00000002
-#define DISP_LOOKUP_NEG 0x00000004
-#define DISP_LOOKUP_POS 0x00000008
-#define DISP_OPEN_CREATE 0x00000010
-#define DISP_OPEN_OPEN 0x00000020
-#define DISP_ENQ_COMPLETE 0x00400000 /* obsolete and unused */
-#define DISP_ENQ_OPEN_REF 0x00800000
-#define DISP_ENQ_CREATE_REF 0x01000000
-#define DISP_OPEN_LOCK 0x02000000
-#define DISP_OPEN_LEASE 0x04000000
-#define DISP_OPEN_STRIPE 0x08000000
-
-/* INODE LOCK PARTS */
-#define MDS_INODELOCK_LOOKUP 0x000001 /* For namespace, dentry etc, and also
- * was used to protect permission (mode,
- * owner, group etc) before 2.4. */
-#define MDS_INODELOCK_UPDATE 0x000002 /* size, links, timestamps */
-#define MDS_INODELOCK_OPEN 0x000004 /* For opened files */
-#define MDS_INODELOCK_LAYOUT 0x000008 /* for layout */
-
-/* The PERM bit is added int 2.4, and it is used to protect permission(mode,
- * owner, group, acl etc), so to separate the permission from LOOKUP lock.
- * Because for remote directories(in DNE), these locks will be granted by
- * different MDTs(different ldlm namespace).
- *
- * For local directory, MDT will always grant UPDATE_LOCK|PERM_LOCK together.
- * For Remote directory, the master MDT, where the remote directory is, will
- * grant UPDATE_LOCK|PERM_LOCK, and the remote MDT, where the name entry is,
- * will grant LOOKUP_LOCK. */
-#define MDS_INODELOCK_PERM 0x000010
-#define MDS_INODELOCK_XATTR 0x000020 /* extended attributes */
-
-#define MDS_INODELOCK_MAXSHIFT 5
-/* This FULL lock is useful to take on unlink sort of operations */
-#define MDS_INODELOCK_FULL ((1<<(MDS_INODELOCK_MAXSHIFT+1))-1)
-
-void lustre_swab_ll_fid(struct ll_fid *fid);
-
-/* NOTE: until Lustre 1.8.7/2.1.1 the fid_ver() was packed into name[2],
- * but was moved into name[1] along with the OID to avoid consuming the
- * name[2,3] fields that need to be used for the quota id (also a FID). */
-enum {
- LUSTRE_RES_ID_SEQ_OFF = 0,
- LUSTRE_RES_ID_VER_OID_OFF = 1,
- LUSTRE_RES_ID_WAS_VER_OFF = 2, /* see note above */
- LUSTRE_RES_ID_QUOTA_SEQ_OFF = 2,
- LUSTRE_RES_ID_QUOTA_VER_OID_OFF = 3,
- LUSTRE_RES_ID_HSH_OFF = 3
-};
-
-#define MDS_STATUS_CONN 1
-#define MDS_STATUS_LOV 2
-
-/* mdt_thread_info.mti_flags. */
-enum md_op_flags {
- /* The flag indicates Size-on-MDS attributes are changed. */
- MF_SOM_CHANGE = (1 << 0),
- /* Flags indicates an epoch opens or closes. */
- MF_EPOCH_OPEN = (1 << 1),
- MF_EPOCH_CLOSE = (1 << 2),
- MF_MDC_CANCEL_FID1 = (1 << 3),
- MF_MDC_CANCEL_FID2 = (1 << 4),
- MF_MDC_CANCEL_FID3 = (1 << 5),
- MF_MDC_CANCEL_FID4 = (1 << 6),
- /* There is a pending attribute update. */
- MF_SOM_AU = (1 << 7),
- /* Cancel OST locks while getattr OST attributes. */
- MF_GETATTR_LOCK = (1 << 8),
- MF_GET_MDT_IDX = (1 << 9),
-};
-
-#define MF_SOM_LOCAL_FLAGS (MF_SOM_CHANGE | MF_EPOCH_OPEN | MF_EPOCH_CLOSE)
-
-#define LUSTRE_BFLAG_UNCOMMITTED_WRITES 0x1
-
-/* these should be identical to their EXT4_*_FL counterparts, they are
- * redefined here only to avoid dragging in fs/ext4/ext4.h */
-#define LUSTRE_SYNC_FL 0x00000008 /* Synchronous updates */
-#define LUSTRE_IMMUTABLE_FL 0x00000010 /* Immutable file */
-#define LUSTRE_APPEND_FL 0x00000020 /* writes to file may only append */
-#define LUSTRE_NOATIME_FL 0x00000080 /* do not update atime */
-#define LUSTRE_DIRSYNC_FL 0x00010000 /* dirsync behaviour (dir only) */
-
-/* Convert wire LUSTRE_*_FL to corresponding client local VFS S_* values
- * for the client inode i_flags. The LUSTRE_*_FL are the Lustre wire
- * protocol equivalents of LDISKFS_*_FL values stored on disk, while
- * the S_* flags are kernel-internal values that change between kernel
- * versions. These flags are set/cleared via FSFILT_IOC_{GET,SET}_FLAGS.
- * See b=16526 for a full history. */
-static inline int ll_ext_to_inode_flags(int flags)
-{
- return (((flags & LUSTRE_SYNC_FL) ? S_SYNC : 0) |
- ((flags & LUSTRE_NOATIME_FL) ? S_NOATIME : 0) |
- ((flags & LUSTRE_APPEND_FL) ? S_APPEND : 0) |
-#if defined(S_DIRSYNC)
- ((flags & LUSTRE_DIRSYNC_FL) ? S_DIRSYNC : 0) |
-#endif
- ((flags & LUSTRE_IMMUTABLE_FL) ? S_IMMUTABLE : 0));
-}
-
-static inline int ll_inode_to_ext_flags(int iflags)
-{
- return (((iflags & S_SYNC) ? LUSTRE_SYNC_FL : 0) |
- ((iflags & S_NOATIME) ? LUSTRE_NOATIME_FL : 0) |
- ((iflags & S_APPEND) ? LUSTRE_APPEND_FL : 0) |
-#if defined(S_DIRSYNC)
- ((iflags & S_DIRSYNC) ? LUSTRE_DIRSYNC_FL : 0) |
-#endif
- ((iflags & S_IMMUTABLE) ? LUSTRE_IMMUTABLE_FL : 0));
-}
-
-/* 64 possible states */
-enum md_transient_state {
- MS_RESTORE = (1 << 0), /* restore is running */
-};
-
-struct mdt_body {
- struct lu_fid fid1;
- struct lu_fid fid2;
- struct lustre_handle handle;
- __u64 valid;
- __u64 size; /* Offset, in the case of MDS_READPAGE */
- __s64 mtime;
- __s64 atime;
- __s64 ctime;
- __u64 blocks; /* XID, in the case of MDS_READPAGE */
- __u64 ioepoch;
- __u64 t_state; /* transient file state defined in
- * enum md_transient_state
- * was "ino" until 2.4.0 */
- __u32 fsuid;
- __u32 fsgid;
- __u32 capability;
- __u32 mode;
- __u32 uid;
- __u32 gid;
- __u32 flags; /* from vfs for pin/unpin, LUSTRE_BFLAG close */
- __u32 rdev;
- __u32 nlink; /* #bytes to read in the case of MDS_READPAGE */
- __u32 unused2; /* was "generation" until 2.4.0 */
- __u32 suppgid;
- __u32 eadatasize;
- __u32 aclsize;
- __u32 max_mdsize;
- __u32 max_cookiesize;
- __u32 uid_h; /* high 32-bits of uid, for FUID */
- __u32 gid_h; /* high 32-bits of gid, for FUID */
- __u32 padding_5; /* also fix lustre_swab_mdt_body */
- __u64 padding_6;
- __u64 padding_7;
- __u64 padding_8;
- __u64 padding_9;
- __u64 padding_10;
-}; /* 216 */
-
-void lustre_swab_mdt_body(struct mdt_body *b);
-
-struct mdt_ioepoch {
- struct lustre_handle handle;
- __u64 ioepoch;
- __u32 flags;
- __u32 padding;
-};
-
-void lustre_swab_mdt_ioepoch(struct mdt_ioepoch *b);
-
-/* permissions for md_perm.mp_perm */
-enum {
- CFS_SETUID_PERM = 0x01,
- CFS_SETGID_PERM = 0x02,
- CFS_SETGRP_PERM = 0x04,
- CFS_RMTACL_PERM = 0x08,
- CFS_RMTOWN_PERM = 0x10
-};
-
-/* inode access permission for remote user, the inode info are omitted,
- * for client knows them. */
-struct mdt_remote_perm {
- __u32 rp_uid;
- __u32 rp_gid;
- __u32 rp_fsuid;
- __u32 rp_fsuid_h;
- __u32 rp_fsgid;
- __u32 rp_fsgid_h;
- __u32 rp_access_perm; /* MAY_READ/WRITE/EXEC */
- __u32 rp_padding;
-};
-
-void lustre_swab_mdt_remote_perm(struct mdt_remote_perm *p);
-
-struct mdt_rec_setattr {
- __u32 sa_opcode;
- __u32 sa_cap;
- __u32 sa_fsuid;
- __u32 sa_fsuid_h;
- __u32 sa_fsgid;
- __u32 sa_fsgid_h;
- __u32 sa_suppgid;
- __u32 sa_suppgid_h;
- __u32 sa_padding_1;
- __u32 sa_padding_1_h;
- struct lu_fid sa_fid;
- __u64 sa_valid;
- __u32 sa_uid;
- __u32 sa_gid;
- __u64 sa_size;
- __u64 sa_blocks;
- __s64 sa_mtime;
- __s64 sa_atime;
- __s64 sa_ctime;
- __u32 sa_attr_flags;
- __u32 sa_mode;
- __u32 sa_bias; /* some operation flags */
- __u32 sa_padding_3;
- __u32 sa_padding_4;
- __u32 sa_padding_5;
-};
-
-void lustre_swab_mdt_rec_setattr(struct mdt_rec_setattr *sa);
-
-/*
- * Attribute flags used in mdt_rec_setattr::sa_valid.
- * The kernel's #defines for ATTR_* should not be used over the network
- * since the client and MDS may run different kernels (see bug 13828)
- * Therefore, we should only use MDS_ATTR_* attributes for sa_valid.
- */
-#define MDS_ATTR_MODE 0x1ULL /* = 1 */
-#define MDS_ATTR_UID 0x2ULL /* = 2 */
-#define MDS_ATTR_GID 0x4ULL /* = 4 */
-#define MDS_ATTR_SIZE 0x8ULL /* = 8 */
-#define MDS_ATTR_ATIME 0x10ULL /* = 16 */
-#define MDS_ATTR_MTIME 0x20ULL /* = 32 */
-#define MDS_ATTR_CTIME 0x40ULL /* = 64 */
-#define MDS_ATTR_ATIME_SET 0x80ULL /* = 128 */
-#define MDS_ATTR_MTIME_SET 0x100ULL /* = 256 */
-#define MDS_ATTR_FORCE 0x200ULL /* = 512, Not a change, but a change it */
-#define MDS_ATTR_ATTR_FLAG 0x400ULL /* = 1024 */
-#define MDS_ATTR_KILL_SUID 0x800ULL /* = 2048 */
-#define MDS_ATTR_KILL_SGID 0x1000ULL /* = 4096 */
-#define MDS_ATTR_CTIME_SET 0x2000ULL /* = 8192 */
-#define MDS_ATTR_FROM_OPEN 0x4000ULL /* = 16384, called from open path, ie O_TRUNC */
-#define MDS_ATTR_BLOCKS 0x8000ULL /* = 32768 */
-
-#ifndef FMODE_READ
-#define FMODE_READ 00000001
-#define FMODE_WRITE 00000002
-#endif
-
-#define MDS_FMODE_CLOSED 00000000
-#define MDS_FMODE_EXEC 00000004
-/* IO Epoch is opened on a closed file. */
-#define MDS_FMODE_EPOCH 01000000
-/* IO Epoch is opened on a file truncate. */
-#define MDS_FMODE_TRUNC 02000000
-/* Size-on-MDS Attribute Update is pending. */
-#define MDS_FMODE_SOM 04000000
-
-#define MDS_OPEN_CREATED 00000010
-#define MDS_OPEN_CROSS 00000020
-
-#define MDS_OPEN_CREAT 00000100
-#define MDS_OPEN_EXCL 00000200
-#define MDS_OPEN_TRUNC 00001000
-#define MDS_OPEN_APPEND 00002000
-#define MDS_OPEN_SYNC 00010000
-#define MDS_OPEN_DIRECTORY 00200000
-
-#define MDS_OPEN_BY_FID 040000000 /* open_by_fid for known object */
-#define MDS_OPEN_DELAY_CREATE 0100000000 /* delay initial object create */
-#define MDS_OPEN_OWNEROVERRIDE 0200000000 /* NFSD rw-reopen ro file for owner */
-#define MDS_OPEN_JOIN_FILE 0400000000 /* open for join file.
- * We do not support JOIN FILE
- * anymore, reserve this flags
- * just for preventing such bit
- * to be reused. */
-
-#define MDS_OPEN_LOCK 04000000000 /* This open requires open lock */
-#define MDS_OPEN_HAS_EA 010000000000 /* specify object create pattern */
-#define MDS_OPEN_HAS_OBJS 020000000000 /* Just set the EA the obj exist */
-#define MDS_OPEN_NORESTORE 0100000000000ULL /* Do not restore file at open */
-#define MDS_OPEN_NEWSTRIPE 0200000000000ULL /* New stripe needed (restripe or
- * hsm restore) */
-#define MDS_OPEN_VOLATILE 0400000000000ULL /* File is volatile = created
- unlinked */
-#define MDS_OPEN_LEASE 01000000000000ULL /* Open the file and grant lease
- * delegation, succeed if it's not
- * being opened with conflict mode.
- */
-#define MDS_OPEN_RELEASE 02000000000000ULL /* Open the file for HSM release */
-
-enum mds_op_bias {
- MDS_CHECK_SPLIT = 1 << 0,
- MDS_CROSS_REF = 1 << 1,
- MDS_VTX_BYPASS = 1 << 2,
- MDS_PERM_BYPASS = 1 << 3,
- MDS_SOM = 1 << 4,
- MDS_QUOTA_IGNORE = 1 << 5,
- MDS_CLOSE_CLEANUP = 1 << 6,
- MDS_KEEP_ORPHAN = 1 << 7,
- MDS_RECOV_OPEN = 1 << 8,
- MDS_DATA_MODIFIED = 1 << 9,
- MDS_CREATE_VOLATILE = 1 << 10,
- MDS_OWNEROVERRIDE = 1 << 11,
- MDS_HSM_RELEASE = 1 << 12,
-};
-
-/* instance of mdt_reint_rec */
-struct mdt_rec_create {
- __u32 cr_opcode;
- __u32 cr_cap;
- __u32 cr_fsuid;
- __u32 cr_fsuid_h;
- __u32 cr_fsgid;
- __u32 cr_fsgid_h;
- __u32 cr_suppgid1;
- __u32 cr_suppgid1_h;
- __u32 cr_suppgid2;
- __u32 cr_suppgid2_h;
- struct lu_fid cr_fid1;
- struct lu_fid cr_fid2;
- struct lustre_handle cr_old_handle; /* handle in case of open replay */
- __s64 cr_time;
- __u64 cr_rdev;
- __u64 cr_ioepoch;
- __u64 cr_padding_1; /* rr_blocks */
- __u32 cr_mode;
- __u32 cr_bias;
- /* use of helpers set/get_mrc_cr_flags() is needed to access
- * 64 bits cr_flags [cr_flags_l, cr_flags_h], this is done to
- * extend cr_flags size without breaking 1.8 compat */
- __u32 cr_flags_l; /* for use with open, low 32 bits */
- __u32 cr_flags_h; /* for use with open, high 32 bits */
- __u32 cr_umask; /* umask for create */
- __u32 cr_padding_4; /* rr_padding_4 */
-};
-
-static inline void set_mrc_cr_flags(struct mdt_rec_create *mrc, __u64 flags)
-{
- mrc->cr_flags_l = (__u32)(flags & 0xFFFFFFFFUll);
- mrc->cr_flags_h = (__u32)(flags >> 32);
-}
-
-static inline __u64 get_mrc_cr_flags(struct mdt_rec_create *mrc)
-{
- return ((__u64)(mrc->cr_flags_l) | ((__u64)mrc->cr_flags_h << 32));
-}
-
-/* instance of mdt_reint_rec */
-struct mdt_rec_link {
- __u32 lk_opcode;
- __u32 lk_cap;
- __u32 lk_fsuid;
- __u32 lk_fsuid_h;
- __u32 lk_fsgid;
- __u32 lk_fsgid_h;
- __u32 lk_suppgid1;
- __u32 lk_suppgid1_h;
- __u32 lk_suppgid2;
- __u32 lk_suppgid2_h;
- struct lu_fid lk_fid1;
- struct lu_fid lk_fid2;
- __s64 lk_time;
- __u64 lk_padding_1; /* rr_atime */
- __u64 lk_padding_2; /* rr_ctime */
- __u64 lk_padding_3; /* rr_size */
- __u64 lk_padding_4; /* rr_blocks */
- __u32 lk_bias;
- __u32 lk_padding_5; /* rr_mode */
- __u32 lk_padding_6; /* rr_flags */
- __u32 lk_padding_7; /* rr_padding_2 */
- __u32 lk_padding_8; /* rr_padding_3 */
- __u32 lk_padding_9; /* rr_padding_4 */
-};
-
-/* instance of mdt_reint_rec */
-struct mdt_rec_unlink {
- __u32 ul_opcode;
- __u32 ul_cap;
- __u32 ul_fsuid;
- __u32 ul_fsuid_h;
- __u32 ul_fsgid;
- __u32 ul_fsgid_h;
- __u32 ul_suppgid1;
- __u32 ul_suppgid1_h;
- __u32 ul_suppgid2;
- __u32 ul_suppgid2_h;
- struct lu_fid ul_fid1;
- struct lu_fid ul_fid2;
- __s64 ul_time;
- __u64 ul_padding_2; /* rr_atime */
- __u64 ul_padding_3; /* rr_ctime */
- __u64 ul_padding_4; /* rr_size */
- __u64 ul_padding_5; /* rr_blocks */
- __u32 ul_bias;
- __u32 ul_mode;
- __u32 ul_padding_6; /* rr_flags */
- __u32 ul_padding_7; /* rr_padding_2 */
- __u32 ul_padding_8; /* rr_padding_3 */
- __u32 ul_padding_9; /* rr_padding_4 */
-};
-
-/* instance of mdt_reint_rec */
-struct mdt_rec_rename {
- __u32 rn_opcode;
- __u32 rn_cap;
- __u32 rn_fsuid;
- __u32 rn_fsuid_h;
- __u32 rn_fsgid;
- __u32 rn_fsgid_h;
- __u32 rn_suppgid1;
- __u32 rn_suppgid1_h;
- __u32 rn_suppgid2;
- __u32 rn_suppgid2_h;
- struct lu_fid rn_fid1;
- struct lu_fid rn_fid2;
- __s64 rn_time;
- __u64 rn_padding_1; /* rr_atime */
- __u64 rn_padding_2; /* rr_ctime */
- __u64 rn_padding_3; /* rr_size */
- __u64 rn_padding_4; /* rr_blocks */
- __u32 rn_bias; /* some operation flags */
- __u32 rn_mode; /* cross-ref rename has mode */
- __u32 rn_padding_5; /* rr_flags */
- __u32 rn_padding_6; /* rr_padding_2 */
- __u32 rn_padding_7; /* rr_padding_3 */
- __u32 rn_padding_8; /* rr_padding_4 */
-};
-
-/* instance of mdt_reint_rec */
-struct mdt_rec_setxattr {
- __u32 sx_opcode;
- __u32 sx_cap;
- __u32 sx_fsuid;
- __u32 sx_fsuid_h;
- __u32 sx_fsgid;
- __u32 sx_fsgid_h;
- __u32 sx_suppgid1;
- __u32 sx_suppgid1_h;
- __u32 sx_suppgid2;
- __u32 sx_suppgid2_h;
- struct lu_fid sx_fid;
- __u64 sx_padding_1; /* These three are rr_fid2 */
- __u32 sx_padding_2;
- __u32 sx_padding_3;
- __u64 sx_valid;
- __s64 sx_time;
- __u64 sx_padding_5; /* rr_ctime */
- __u64 sx_padding_6; /* rr_size */
- __u64 sx_padding_7; /* rr_blocks */
- __u32 sx_size;
- __u32 sx_flags;
- __u32 sx_padding_8; /* rr_flags */
- __u32 sx_padding_9; /* rr_padding_2 */
- __u32 sx_padding_10; /* rr_padding_3 */
- __u32 sx_padding_11; /* rr_padding_4 */
-};
-
-/*
- * mdt_rec_reint is the template for all mdt_reint_xxx structures.
- * Do NOT change the size of various members, otherwise the value
- * will be broken in lustre_swab_mdt_rec_reint().
- *
- * If you add new members in other mdt_reint_xxx structures and need to use the
- * rr_padding_x fields, then update lustre_swab_mdt_rec_reint() also.
- */
-struct mdt_rec_reint {
- __u32 rr_opcode;
- __u32 rr_cap;
- __u32 rr_fsuid;
- __u32 rr_fsuid_h;
- __u32 rr_fsgid;
- __u32 rr_fsgid_h;
- __u32 rr_suppgid1;
- __u32 rr_suppgid1_h;
- __u32 rr_suppgid2;
- __u32 rr_suppgid2_h;
- struct lu_fid rr_fid1;
- struct lu_fid rr_fid2;
- __s64 rr_mtime;
- __s64 rr_atime;
- __s64 rr_ctime;
- __u64 rr_size;
- __u64 rr_blocks;
- __u32 rr_bias;
- __u32 rr_mode;
- __u32 rr_flags;
- __u32 rr_flags_h;
- __u32 rr_umask;
- __u32 rr_padding_4; /* also fix lustre_swab_mdt_rec_reint */
-};
-
-void lustre_swab_mdt_rec_reint(struct mdt_rec_reint *rr);
-
-struct lmv_desc {
- __u32 ld_tgt_count; /* how many MDS's */
- __u32 ld_active_tgt_count; /* how many active */
- __u32 ld_default_stripe_count; /* how many objects are used */
- __u32 ld_pattern; /* default MEA_MAGIC_* */
- __u64 ld_default_hash_size;
- __u64 ld_padding_1; /* also fix lustre_swab_lmv_desc */
- __u32 ld_padding_2; /* also fix lustre_swab_lmv_desc */
- __u32 ld_qos_maxage; /* in second */
- __u32 ld_padding_3; /* also fix lustre_swab_lmv_desc */
- __u32 ld_padding_4; /* also fix lustre_swab_lmv_desc */
- struct obd_uuid ld_uuid;
-};
-
-/* TODO: lmv_stripe_md should contain mds capabilities for all slave fids */
-struct lmv_stripe_md {
- __u32 mea_magic;
- __u32 mea_count;
- __u32 mea_master;
- __u32 mea_padding;
- char mea_pool_name[LOV_MAXPOOLNAME];
- struct lu_fid mea_ids[0];
-};
-
-/* lmv structures */
-#define MEA_MAGIC_LAST_CHAR 0xb2221ca1
-#define MEA_MAGIC_ALL_CHARS 0xb222a11c
-#define MEA_MAGIC_HASH_SEGMENT 0xb222a11b
-
-#define MAX_HASH_SIZE_32 0x7fffffffUL
-#define MAX_HASH_SIZE 0x7fffffffffffffffULL
-#define MAX_HASH_HIGHEST_BIT 0x1000000000000000ULL
-
-enum fld_rpc_opc {
- FLD_QUERY = 900,
- FLD_LAST_OPC,
- FLD_FIRST_OPC = FLD_QUERY
-};
-
-enum seq_rpc_opc {
- SEQ_QUERY = 700,
- SEQ_LAST_OPC,
- SEQ_FIRST_OPC = SEQ_QUERY
-};
-
-enum seq_op {
- SEQ_ALLOC_SUPER = 0,
- SEQ_ALLOC_META = 1
-};
-
-/*
- * LOV data structures
- */
-
-#define LOV_MAX_UUID_BUFFER_SIZE 8192
-/* The size of the buffer the lov/mdc reserves for the
- * array of UUIDs returned by the MDS. With the current
- * protocol, this will limit the max number of OSTs per LOV */
-
-#define LOV_DESC_MAGIC 0xB0CCDE5C
-#define LOV_DESC_QOS_MAXAGE_DEFAULT 5 /* Seconds */
-#define LOV_DESC_STRIPE_SIZE_DEFAULT (1 << LNET_MTU_BITS)
-
-/* LOV settings descriptor (should only contain static info) */
-struct lov_desc {
- __u32 ld_tgt_count; /* how many OBD's */
- __u32 ld_active_tgt_count; /* how many active */
- __u32 ld_default_stripe_count; /* how many objects are used */
- __u32 ld_pattern; /* default PATTERN_RAID0 */
- __u64 ld_default_stripe_size; /* in bytes */
- __u64 ld_default_stripe_offset; /* in bytes */
- __u32 ld_padding_0; /* unused */
- __u32 ld_qos_maxage; /* in second */
- __u32 ld_padding_1; /* also fix lustre_swab_lov_desc */
- __u32 ld_padding_2; /* also fix lustre_swab_lov_desc */
- struct obd_uuid ld_uuid;
-};
-
-#define ld_magic ld_active_tgt_count /* for swabbing from llogs */
-
-void lustre_swab_lov_desc(struct lov_desc *ld);
-
-/*
- * LDLM requests:
- */
-/* opcodes -- MUST be distinct from OST/MDS opcodes */
-typedef enum {
- LDLM_ENQUEUE = 101,
- LDLM_CONVERT = 102,
- LDLM_CANCEL = 103,
- LDLM_BL_CALLBACK = 104,
- LDLM_CP_CALLBACK = 105,
- LDLM_GL_CALLBACK = 106,
- LDLM_SET_INFO = 107,
- LDLM_LAST_OPC
-} ldlm_cmd_t;
-#define LDLM_FIRST_OPC LDLM_ENQUEUE
-
-#define RES_NAME_SIZE 4
-struct ldlm_res_id {
- __u64 name[RES_NAME_SIZE];
-};
-
-#define DLDLMRES "[%#llx:%#llx:%#llx].%llx"
-#define PLDLMRES(res) (res)->lr_name.name[0], (res)->lr_name.name[1], \
- (res)->lr_name.name[2], (res)->lr_name.name[3]
-
-void lustre_swab_ldlm_res_id(struct ldlm_res_id *id);
-
-static inline int ldlm_res_eq(const struct ldlm_res_id *res0,
- const struct ldlm_res_id *res1)
-{
- return !memcmp(res0, res1, sizeof(*res0));
-}
-
-/* lock types */
-typedef enum {
- LCK_MINMODE = 0,
- LCK_EX = 1,
- LCK_PW = 2,
- LCK_PR = 4,
- LCK_CW = 8,
- LCK_CR = 16,
- LCK_NL = 32,
- LCK_GROUP = 64,
- LCK_COS = 128,
- LCK_MAXMODE
-} ldlm_mode_t;
-
-#define LCK_MODE_NUM 8
-
-typedef enum {
- LDLM_PLAIN = 10,
- LDLM_EXTENT = 11,
- LDLM_FLOCK = 12,
- LDLM_IBITS = 13,
- LDLM_MAX_TYPE
-} ldlm_type_t;
-
-#define LDLM_MIN_TYPE LDLM_PLAIN
-
-struct ldlm_extent {
- __u64 start;
- __u64 end;
- __u64 gid;
-};
-
-static inline int ldlm_extent_overlap(struct ldlm_extent *ex1,
- struct ldlm_extent *ex2)
-{
- return (ex1->start <= ex2->end) && (ex2->start <= ex1->end);
-}
-
-/* check if @ex1 contains @ex2 */
-static inline int ldlm_extent_contain(struct ldlm_extent *ex1,
- struct ldlm_extent *ex2)
-{
- return (ex1->start <= ex2->start) && (ex1->end >= ex2->end);
-}
-
-struct ldlm_inodebits {
- __u64 bits;
-};
-
-struct ldlm_flock_wire {
- __u64 lfw_start;
- __u64 lfw_end;
- __u64 lfw_owner;
- __u32 lfw_padding;
- __u32 lfw_pid;
-};
-
-/* it's important that the fields of the ldlm_extent structure match
- * the first fields of the ldlm_flock structure because there is only
- * one ldlm_swab routine to process the ldlm_policy_data_t union. if
- * this ever changes we will need to swab the union differently based
- * on the resource type. */
-
-typedef union {
- struct ldlm_extent l_extent;
- struct ldlm_flock_wire l_flock;
- struct ldlm_inodebits l_inodebits;
-} ldlm_wire_policy_data_t;
-
-void lustre_swab_ldlm_policy_data(ldlm_wire_policy_data_t *d);
-
-union ldlm_gl_desc {
- struct ldlm_gl_lquota_desc lquota_desc;
-};
-
-void lustre_swab_gl_desc(union ldlm_gl_desc *);
-
-struct ldlm_intent {
- __u64 opc;
-};
-
-void lustre_swab_ldlm_intent(struct ldlm_intent *i);
-
-struct ldlm_resource_desc {
- ldlm_type_t lr_type;
- __u32 lr_padding; /* also fix lustre_swab_ldlm_resource_desc */
- struct ldlm_res_id lr_name;
-};
-
-void lustre_swab_ldlm_resource_desc(struct ldlm_resource_desc *r);
-
-struct ldlm_lock_desc {
- struct ldlm_resource_desc l_resource;
- ldlm_mode_t l_req_mode;
- ldlm_mode_t l_granted_mode;
- ldlm_wire_policy_data_t l_policy_data;
-};
-
-void lustre_swab_ldlm_lock_desc(struct ldlm_lock_desc *l);
-
-#define LDLM_LOCKREQ_HANDLES 2
-#define LDLM_ENQUEUE_CANCEL_OFF 1
-
-struct ldlm_request {
- __u32 lock_flags;
- __u32 lock_count;
- struct ldlm_lock_desc lock_desc;
- struct lustre_handle lock_handle[LDLM_LOCKREQ_HANDLES];
-};
-
-void lustre_swab_ldlm_request(struct ldlm_request *rq);
-
-/* If LDLM_ENQUEUE, 1 slot is already occupied, 1 is available.
- * Otherwise, 2 are available. */
-#define ldlm_request_bufsize(count, type) \
-({ \
- int _avail = LDLM_LOCKREQ_HANDLES; \
- _avail -= (type == LDLM_ENQUEUE ? LDLM_ENQUEUE_CANCEL_OFF : 0); \
- sizeof(struct ldlm_request) + \
- (count > _avail ? count - _avail : 0) * \
- sizeof(struct lustre_handle); \
-})
-
-struct ldlm_reply {
- __u32 lock_flags;
- __u32 lock_padding; /* also fix lustre_swab_ldlm_reply */
- struct ldlm_lock_desc lock_desc;
- struct lustre_handle lock_handle;
- __u64 lock_policy_res1;
- __u64 lock_policy_res2;
-};
-
-void lustre_swab_ldlm_reply(struct ldlm_reply *r);
-
-#define ldlm_flags_to_wire(flags) ((__u32)(flags))
-#define ldlm_flags_from_wire(flags) ((__u64)(flags))
-
-/*
- * Opcodes for mountconf (mgs and mgc)
- */
-typedef enum {
- MGS_CONNECT = 250,
- MGS_DISCONNECT,
- MGS_EXCEPTION, /* node died, etc. */
- MGS_TARGET_REG, /* whenever target starts up */
- MGS_TARGET_DEL,
- MGS_SET_INFO,
- MGS_CONFIG_READ,
- MGS_LAST_OPC
-} mgs_cmd_t;
-#define MGS_FIRST_OPC MGS_CONNECT
-
-#define MGS_PARAM_MAXLEN 1024
-#define KEY_SET_INFO "set_info"
-
-struct mgs_send_param {
- char mgs_param[MGS_PARAM_MAXLEN];
-};
-
-/* We pass this info to the MGS so it can write config logs */
-#define MTI_NAME_MAXLEN 64
-#define MTI_PARAM_MAXLEN 4096
-#define MTI_NIDS_MAX 32
-struct mgs_target_info {
- __u32 mti_lustre_ver;
- __u32 mti_stripe_index;
- __u32 mti_config_ver;
- __u32 mti_flags;
- __u32 mti_nid_count;
- __u32 mti_instance; /* Running instance of target */
- char mti_fsname[MTI_NAME_MAXLEN];
- char mti_svname[MTI_NAME_MAXLEN];
- char mti_uuid[sizeof(struct obd_uuid)];
- __u64 mti_nids[MTI_NIDS_MAX]; /* host nids (lnet_nid_t)*/
- char mti_params[MTI_PARAM_MAXLEN];
-};
-
-void lustre_swab_mgs_target_info(struct mgs_target_info *oinfo);
-
-struct mgs_nidtbl_entry {
- __u64 mne_version; /* table version of this entry */
- __u32 mne_instance; /* target instance # */
- __u32 mne_index; /* target index */
- __u32 mne_length; /* length of this entry - by bytes */
- __u8 mne_type; /* target type LDD_F_SV_TYPE_OST/MDT */
- __u8 mne_nid_type; /* type of nid(mbz). for ipv6. */
- __u8 mne_nid_size; /* size of each NID, by bytes */
- __u8 mne_nid_count; /* # of NIDs in buffer */
- union {
- lnet_nid_t nids[0]; /* variable size buffer for NIDs. */
- } u;
-};
-
-void lustre_swab_mgs_nidtbl_entry(struct mgs_nidtbl_entry *oinfo);
-
-struct mgs_config_body {
- char mcb_name[MTI_NAME_MAXLEN]; /* logname */
- __u64 mcb_offset; /* next index of config log to request */
- __u16 mcb_type; /* type of log: CONFIG_T_[CONFIG|RECOVER] */
- __u8 mcb_reserved;
- __u8 mcb_bits; /* bits unit size of config log */
- __u32 mcb_units; /* # of units for bulk transfer */
-};
-
-void lustre_swab_mgs_config_body(struct mgs_config_body *body);
-
-struct mgs_config_res {
- __u64 mcr_offset; /* index of last config log */
- __u64 mcr_size; /* size of the log */
-};
-
-void lustre_swab_mgs_config_res(struct mgs_config_res *body);
-
-/* Config marker flags (in config log) */
-#define CM_START 0x01
-#define CM_END 0x02
-#define CM_SKIP 0x04
-#define CM_UPGRADE146 0x08
-#define CM_EXCLUDE 0x10
-#define CM_START_SKIP (CM_START | CM_SKIP)
-
-struct cfg_marker {
- __u32 cm_step; /* aka config version */
- __u32 cm_flags;
- __u32 cm_vers; /* lustre release version number */
- __u32 cm_padding; /* 64 bit align */
- __s64 cm_createtime; /*when this record was first created */
- __s64 cm_canceltime; /*when this record is no longer valid*/
- char cm_tgtname[MTI_NAME_MAXLEN];
- char cm_comment[MTI_NAME_MAXLEN];
-};
-
-void lustre_swab_cfg_marker(struct cfg_marker *marker, int swab, int size);
-
-/*
- * Opcodes for multiple servers.
- */
-
-typedef enum {
- OBD_PING = 400,
- OBD_LOG_CANCEL,
- OBD_QC_CALLBACK,
- OBD_IDX_READ,
- OBD_LAST_OPC
-} obd_cmd_t;
-#define OBD_FIRST_OPC OBD_PING
-
-/* catalog of log objects */
-
-/** Identifier for a single log object */
-struct llog_logid {
- struct ost_id lgl_oi;
- __u32 lgl_ogen;
-} __attribute__((packed));
-
-/** Records written to the CATALOGS list */
-#define CATLIST "CATALOGS"
-struct llog_catid {
- struct llog_logid lci_logid;
- __u32 lci_padding1;
- __u32 lci_padding2;
- __u32 lci_padding3;
-} __attribute__((packed));
-
-/* Log data record types - there is no specific reason that these need to
- * be related to the RPC opcodes, but no reason not to (may be handy later?)
- */
-#define LLOG_OP_MAGIC 0x10600000
-#define LLOG_OP_MASK 0xfff00000
-
-typedef enum {
- LLOG_PAD_MAGIC = LLOG_OP_MAGIC | 0x00000,
- OST_SZ_REC = LLOG_OP_MAGIC | 0x00f00,
- /* OST_RAID1_REC = LLOG_OP_MAGIC | 0x01000, never used */
- MDS_UNLINK_REC = LLOG_OP_MAGIC | 0x10000 | (MDS_REINT << 8) |
- REINT_UNLINK, /* obsolete after 2.5.0 */
- MDS_UNLINK64_REC = LLOG_OP_MAGIC | 0x90000 | (MDS_REINT << 8) |
- REINT_UNLINK,
- /* MDS_SETATTR_REC = LLOG_OP_MAGIC | 0x12401, obsolete 1.8.0 */
- MDS_SETATTR64_REC = LLOG_OP_MAGIC | 0x90000 | (MDS_REINT << 8) |
- REINT_SETATTR,
- OBD_CFG_REC = LLOG_OP_MAGIC | 0x20000,
- /* PTL_CFG_REC = LLOG_OP_MAGIC | 0x30000, obsolete 1.4.0 */
- LLOG_GEN_REC = LLOG_OP_MAGIC | 0x40000,
- /* LLOG_JOIN_REC = LLOG_OP_MAGIC | 0x50000, obsolete 1.8.0 */
- CHANGELOG_REC = LLOG_OP_MAGIC | 0x60000,
- CHANGELOG_USER_REC = LLOG_OP_MAGIC | 0x70000,
- HSM_AGENT_REC = LLOG_OP_MAGIC | 0x80000,
- LLOG_HDR_MAGIC = LLOG_OP_MAGIC | 0x45539,
- LLOG_LOGID_MAGIC = LLOG_OP_MAGIC | 0x4553b,
-} llog_op_type;
-
-#define LLOG_REC_HDR_NEEDS_SWABBING(r) \
- (((r)->lrh_type & __swab32(LLOG_OP_MASK)) == __swab32(LLOG_OP_MAGIC))
-
-/** Log record header - stored in little endian order.
- * Each record must start with this struct, end with a llog_rec_tail,
- * and be a multiple of 256 bits in size.
- */
-struct llog_rec_hdr {
- __u32 lrh_len;
- __u32 lrh_index;
- __u32 lrh_type;
- __u32 lrh_id;
-};
-
-struct llog_rec_tail {
- __u32 lrt_len;
- __u32 lrt_index;
-};
-
-/* Where data follow just after header */
-#define REC_DATA(ptr) \
- ((void *)((char *)ptr + sizeof(struct llog_rec_hdr)))
-
-#define REC_DATA_LEN(rec) \
- (rec->lrh_len - sizeof(struct llog_rec_hdr) - \
- sizeof(struct llog_rec_tail))
-
-struct llog_logid_rec {
- struct llog_rec_hdr lid_hdr;
- struct llog_logid lid_id;
- __u32 lid_padding1;
- __u64 lid_padding2;
- __u64 lid_padding3;
- struct llog_rec_tail lid_tail;
-} __attribute__((packed));
-
-struct llog_unlink_rec {
- struct llog_rec_hdr lur_hdr;
- __u64 lur_oid;
- __u32 lur_oseq;
- __u32 lur_count;
- struct llog_rec_tail lur_tail;
-} __attribute__((packed));
-
-struct llog_unlink64_rec {
- struct llog_rec_hdr lur_hdr;
- struct lu_fid lur_fid;
- __u32 lur_count; /* to destroy the lost precreated */
- __u32 lur_padding1;
- __u64 lur_padding2;
- __u64 lur_padding3;
- struct llog_rec_tail lur_tail;
-} __attribute__((packed));
-
-struct llog_setattr64_rec {
- struct llog_rec_hdr lsr_hdr;
- struct ost_id lsr_oi;
- __u32 lsr_uid;
- __u32 lsr_uid_h;
- __u32 lsr_gid;
- __u32 lsr_gid_h;
- __u64 lsr_padding;
- struct llog_rec_tail lsr_tail;
-} __attribute__((packed));
-
-struct llog_size_change_rec {
- struct llog_rec_hdr lsc_hdr;
- struct ll_fid lsc_fid;
- __u32 lsc_ioepoch;
- __u32 lsc_padding1;
- __u64 lsc_padding2;
- __u64 lsc_padding3;
- struct llog_rec_tail lsc_tail;
-} __attribute__((packed));
-
-#define CHANGELOG_MAGIC 0xca103000
-
-/** \a changelog_rec_type's that can't be masked */
-#define CHANGELOG_MINMASK (1 << CL_MARK)
-/** bits covering all \a changelog_rec_type's */
-#define CHANGELOG_ALLMASK 0XFFFFFFFF
-/** default \a changelog_rec_type mask */
-#define CHANGELOG_DEFMASK CHANGELOG_ALLMASK & ~(1 << CL_ATIME | 1 << CL_CLOSE)
-
-/* changelog llog name, needed by client replicators */
-#define CHANGELOG_CATALOG "changelog_catalog"
-
-struct changelog_setinfo {
- __u64 cs_recno;
- __u32 cs_id;
-} __attribute__((packed));
-
-/** changelog record */
-struct llog_changelog_rec {
- struct llog_rec_hdr cr_hdr;
- struct changelog_rec cr;
- struct llog_rec_tail cr_tail; /**< for_sizezof_only */
-} __attribute__((packed));
-
-struct llog_changelog_ext_rec {
- struct llog_rec_hdr cr_hdr;
- struct changelog_ext_rec cr;
- struct llog_rec_tail cr_tail; /**< for_sizezof_only */
-} __attribute__((packed));
-
-#define CHANGELOG_USER_PREFIX "cl"
-
-struct llog_changelog_user_rec {
- struct llog_rec_hdr cur_hdr;
- __u32 cur_id;
- __u32 cur_padding;
- __u64 cur_endrec;
- struct llog_rec_tail cur_tail;
-} __attribute__((packed));
-
-enum agent_req_status {
- ARS_WAITING,
- ARS_STARTED,
- ARS_FAILED,
- ARS_CANCELED,
- ARS_SUCCEED,
-};
-
-static inline char *agent_req_status2name(enum agent_req_status ars)
-{
- switch (ars) {
- case ARS_WAITING:
- return "WAITING";
- case ARS_STARTED:
- return "STARTED";
- case ARS_FAILED:
- return "FAILED";
- case ARS_CANCELED:
- return "CANCELED";
- case ARS_SUCCEED:
- return "SUCCEED";
- default:
- return "UNKNOWN";
- }
-}
-
-static inline bool agent_req_in_final_state(enum agent_req_status ars)
-{
- return ((ars == ARS_SUCCEED) || (ars == ARS_FAILED) ||
- (ars == ARS_CANCELED));
-}
-
-struct llog_agent_req_rec {
- struct llog_rec_hdr arr_hdr; /**< record header */
- __u32 arr_status; /**< status of the request */
- /* must match enum
- * agent_req_status */
- __u32 arr_archive_id; /**< backend archive number */
- __u64 arr_flags; /**< req flags */
- __u64 arr_compound_id; /**< compound cookie */
- __u64 arr_req_create; /**< req. creation time */
- __u64 arr_req_change; /**< req. status change time */
- struct hsm_action_item arr_hai; /**< req. to the agent */
- struct llog_rec_tail arr_tail; /**< record tail for_sizezof_only */
-} __attribute__((packed));
-
-/* Old llog gen for compatibility */
-struct llog_gen {
- __u64 mnt_cnt;
- __u64 conn_cnt;
-} __attribute__((packed));
-
-struct llog_gen_rec {
- struct llog_rec_hdr lgr_hdr;
- struct llog_gen lgr_gen;
- __u64 padding1;
- __u64 padding2;
- __u64 padding3;
- struct llog_rec_tail lgr_tail;
-};
-
-/* On-disk header structure of each log object, stored in little endian order */
-#define LLOG_CHUNK_SIZE 8192
-#define LLOG_HEADER_SIZE (96)
-#define LLOG_BITMAP_BYTES (LLOG_CHUNK_SIZE - LLOG_HEADER_SIZE)
-
-#define LLOG_MIN_REC_SIZE (24) /* round(llog_rec_hdr + llog_rec_tail) */
-
-/* flags for the logs */
-enum llog_flag {
- LLOG_F_ZAP_WHEN_EMPTY = 0x1,
- LLOG_F_IS_CAT = 0x2,
- LLOG_F_IS_PLAIN = 0x4,
-};
-
-struct llog_log_hdr {
- struct llog_rec_hdr llh_hdr;
- __s64 llh_timestamp;
- __u32 llh_count;
- __u32 llh_bitmap_offset;
- __u32 llh_size;
- __u32 llh_flags;
- __u32 llh_cat_idx;
- /* for a catalog the first plain slot is next to it */
- struct obd_uuid llh_tgtuuid;
- __u32 llh_reserved[LLOG_HEADER_SIZE/sizeof(__u32) - 23];
- __u32 llh_bitmap[LLOG_BITMAP_BYTES/sizeof(__u32)];
- struct llog_rec_tail llh_tail;
-} __attribute__((packed));
-
-#define LLOG_BITMAP_SIZE(llh) (__u32)((llh->llh_hdr.lrh_len - \
- llh->llh_bitmap_offset - \
- sizeof(llh->llh_tail)) * 8)
-
-/** log cookies are used to reference a specific log file and a record therein */
-struct llog_cookie {
- struct llog_logid lgc_lgl;
- __u32 lgc_subsys;
- __u32 lgc_index;
- __u32 lgc_padding;
-} __attribute__((packed));
-
-/** llog protocol */
-enum llogd_rpc_ops {
- LLOG_ORIGIN_HANDLE_CREATE = 501,
- LLOG_ORIGIN_HANDLE_NEXT_BLOCK = 502,
- LLOG_ORIGIN_HANDLE_READ_HEADER = 503,
- LLOG_ORIGIN_HANDLE_WRITE_REC = 504,
- LLOG_ORIGIN_HANDLE_CLOSE = 505,
- LLOG_ORIGIN_CONNECT = 506,
- LLOG_CATINFO = 507, /* deprecated */
- LLOG_ORIGIN_HANDLE_PREV_BLOCK = 508,
- LLOG_ORIGIN_HANDLE_DESTROY = 509, /* for destroy llog object*/
- LLOG_LAST_OPC,
- LLOG_FIRST_OPC = LLOG_ORIGIN_HANDLE_CREATE
-};
-
-struct llogd_body {
- struct llog_logid lgd_logid;
- __u32 lgd_ctxt_idx;
- __u32 lgd_llh_flags;
- __u32 lgd_index;
- __u32 lgd_saved_index;
- __u32 lgd_len;
- __u64 lgd_cur_offset;
-} __attribute__((packed));
-
-struct llogd_conn_body {
- struct llog_gen lgdc_gen;
- struct llog_logid lgdc_logid;
- __u32 lgdc_ctxt_idx;
-} __attribute__((packed));
-
-/* Note: 64-bit types are 64-bit aligned in structure */
-struct obdo {
- __u64 o_valid; /* hot fields in this obdo */
- struct ost_id o_oi;
- __u64 o_parent_seq;
- __u64 o_size; /* o_size-o_blocks == ost_lvb */
- __s64 o_mtime;
- __s64 o_atime;
- __s64 o_ctime;
- __u64 o_blocks; /* brw: cli sent cached bytes */
- __u64 o_grant;
-
- /* 32-bit fields start here: keep an even number of them via padding */
- __u32 o_blksize; /* optimal IO blocksize */
- __u32 o_mode; /* brw: cli sent cache remain */
- __u32 o_uid;
- __u32 o_gid;
- __u32 o_flags;
- __u32 o_nlink; /* brw: checksum */
- __u32 o_parent_oid;
- __u32 o_misc; /* brw: o_dropped */
-
- __u64 o_ioepoch; /* epoch in ost writes */
- __u32 o_stripe_idx; /* holds stripe idx */
- __u32 o_parent_ver;
- struct lustre_handle o_handle; /* brw: lock handle to prolong
- * locks */
- struct llog_cookie o_lcookie; /* destroy: unlink cookie from
- * MDS */
- __u32 o_uid_h;
- __u32 o_gid_h;
-
- __u64 o_data_version; /* getattr: sum of iversion for
- * each stripe.
- * brw: grant space consumed on
- * the client for the write */
- __u64 o_padding_4;
- __u64 o_padding_5;
- __u64 o_padding_6;
-};
-
-#define o_dirty o_blocks
-#define o_undirty o_mode
-#define o_dropped o_misc
-#define o_cksum o_nlink
-#define o_grant_used o_data_version
-
-static inline void lustre_set_wire_obdo(struct obd_connect_data *ocd,
- struct obdo *wobdo,
- const struct obdo *lobdo)
-{
- *wobdo = *lobdo;
- wobdo->o_flags &= ~OBD_FL_LOCAL_MASK;
- if (ocd == NULL)
- return;
-
- if (unlikely(!(ocd->ocd_connect_flags & OBD_CONNECT_FID)) &&
- fid_seq_is_echo(ostid_seq(&lobdo->o_oi))) {
- /* Currently OBD_FL_OSTID will only be used when 2.4 echo
- * client communicate with pre-2.4 server */
- wobdo->o_oi.oi.oi_id = fid_oid(&lobdo->o_oi.oi_fid);
- wobdo->o_oi.oi.oi_seq = fid_seq(&lobdo->o_oi.oi_fid);
- }
-}
-
-static inline void lustre_get_wire_obdo(struct obd_connect_data *ocd,
- struct obdo *lobdo,
- const struct obdo *wobdo)
-{
- __u32 local_flags = 0;
-
- if (lobdo->o_valid & OBD_MD_FLFLAGS)
- local_flags = lobdo->o_flags & OBD_FL_LOCAL_MASK;
-
- *lobdo = *wobdo;
- if (local_flags != 0) {
- lobdo->o_valid |= OBD_MD_FLFLAGS;
- lobdo->o_flags &= ~OBD_FL_LOCAL_MASK;
- lobdo->o_flags |= local_flags;
- }
- if (ocd == NULL)
- return;
-
- if (unlikely(!(ocd->ocd_connect_flags & OBD_CONNECT_FID)) &&
- fid_seq_is_echo(wobdo->o_oi.oi.oi_seq)) {
- /* see above */
- lobdo->o_oi.oi_fid.f_seq = wobdo->o_oi.oi.oi_seq;
- lobdo->o_oi.oi_fid.f_oid = wobdo->o_oi.oi.oi_id;
- lobdo->o_oi.oi_fid.f_ver = 0;
- }
-}
-
-void lustre_swab_obdo(struct obdo *o);
-
-/* request structure for OST's */
-struct ost_body {
- struct obdo oa;
-};
-
-/* Key for FIEMAP to be used in get_info calls */
-struct ll_fiemap_info_key {
- char name[8];
- struct obdo oa;
- struct ll_user_fiemap fiemap;
-};
-
-void lustre_swab_ost_body(struct ost_body *b);
-void lustre_swab_ost_last_id(__u64 *id);
-void lustre_swab_fiemap(struct ll_user_fiemap *fiemap);
-
-void lustre_swab_lov_user_md_v1(struct lov_user_md_v1 *lum);
-void lustre_swab_lov_user_md_v3(struct lov_user_md_v3 *lum);
-void lustre_swab_lov_user_md_objects(struct lov_user_ost_data *lod,
- int stripe_count);
-void lustre_swab_lov_mds_md(struct lov_mds_md *lmm);
-
-/* llog_swab.c */
-void lustre_swab_llogd_body(struct llogd_body *d);
-void lustre_swab_llog_hdr(struct llog_log_hdr *h);
-void lustre_swab_llogd_conn_body(struct llogd_conn_body *d);
-void lustre_swab_llog_rec(struct llog_rec_hdr *rec);
-void lustre_swab_llog_id(struct llog_logid *lid);
-
-struct lustre_cfg;
-void lustre_swab_lustre_cfg(struct lustre_cfg *lcfg);
-
-/* Functions for dumping PTLRPC fields */
-void dump_rniobuf(struct niobuf_remote *rnb);
-void dump_ioo(struct obd_ioobj *nb);
-void dump_obdo(struct obdo *oa);
-void dump_ost_body(struct ost_body *ob);
-void dump_rcs(__u32 *rc);
-
-#define IDX_INFO_MAGIC 0x3D37CC37
-
-/* Index file transfer through the network. The server serializes the index into
- * a byte stream which is sent to the client via a bulk transfer */
-struct idx_info {
- __u32 ii_magic;
-
- /* reply: see idx_info_flags below */
- __u32 ii_flags;
-
- /* request & reply: number of lu_idxpage (to be) transferred */
- __u16 ii_count;
- __u16 ii_pad0;
-
- /* request: requested attributes passed down to the iterator API */
- __u32 ii_attrs;
-
- /* request & reply: index file identifier (FID) */
- struct lu_fid ii_fid;
-
- /* reply: version of the index file before starting to walk the index.
- * Please note that the version can be modified at any time during the
- * transfer */
- __u64 ii_version;
-
- /* request: hash to start with:
- * reply: hash of the first entry of the first lu_idxpage and hash
- * of the entry to read next if any */
- __u64 ii_hash_start;
- __u64 ii_hash_end;
-
- /* reply: size of keys in lu_idxpages, minimal one if II_FL_VARKEY is
- * set */
- __u16 ii_keysize;
-
- /* reply: size of records in lu_idxpages, minimal one if II_FL_VARREC
- * is set */
- __u16 ii_recsize;
-
- __u32 ii_pad1;
- __u64 ii_pad2;
- __u64 ii_pad3;
-};
-
-void lustre_swab_idx_info(struct idx_info *ii);
-
-#define II_END_OFF MDS_DIR_END_OFF /* all entries have been read */
-
-/* List of flags used in idx_info::ii_flags */
-enum idx_info_flags {
- II_FL_NOHASH = 1 << 0, /* client doesn't care about hash value */
- II_FL_VARKEY = 1 << 1, /* keys can be of variable size */
- II_FL_VARREC = 1 << 2, /* records can be of variable size */
- II_FL_NONUNQ = 1 << 3, /* index supports non-unique keys */
-};
-
-#define LIP_MAGIC 0x8A6D6B6C
-
-/* 4KB (= LU_PAGE_SIZE) container gathering key/record pairs */
-struct lu_idxpage {
- /* 16-byte header */
- __u32 lip_magic;
- __u16 lip_flags;
- __u16 lip_nr; /* number of entries in the container */
- __u64 lip_pad0; /* additional padding for future use */
-
- /* key/record pairs are stored in the remaining 4080 bytes.
- * depending upon the flags in idx_info::ii_flags, each key/record
- * pair might be preceded by:
- * - a hash value
- * - the key size (II_FL_VARKEY is set)
- * - the record size (II_FL_VARREC is set)
- *
- * For the time being, we only support fixed-size key & record. */
- char lip_entries[0];
-};
-
-#define LIP_HDR_SIZE (offsetof(struct lu_idxpage, lip_entries))
-
-/* Gather all possible type associated with a 4KB container */
-union lu_page {
- struct lu_dirpage lp_dir; /* for MDS_READPAGE */
- struct lu_idxpage lp_idx; /* for OBD_IDX_READ */
- char lp_array[LU_PAGE_SIZE];
-};
-
-/* security opcodes */
-typedef enum {
- SEC_CTX_INIT = 801,
- SEC_CTX_INIT_CONT = 802,
- SEC_CTX_FINI = 803,
- SEC_LAST_OPC,
- SEC_FIRST_OPC = SEC_CTX_INIT
-} sec_cmd_t;
-
-/*
- * capa related definitions
- */
-#define CAPA_HMAC_MAX_LEN 64
-#define CAPA_HMAC_KEY_MAX_LEN 56
-
-/* NB take care when changing the sequence of elements this struct,
- * because the offset info is used in find_capa() */
-struct lustre_capa {
- struct lu_fid lc_fid; /** fid */
- __u64 lc_opc; /** operations allowed */
- __u64 lc_uid; /** file owner */
- __u64 lc_gid; /** file group */
- __u32 lc_flags; /** HMAC algorithm & flags */
- __u32 lc_keyid; /** key# used for the capability */
- __u32 lc_timeout; /** capa timeout value (sec) */
-/* FIXME: y2038 time_t overflow: */
- __u32 lc_expiry; /** expiry time (sec) */
- __u8 lc_hmac[CAPA_HMAC_MAX_LEN]; /** HMAC */
-} __attribute__((packed));
-
-void lustre_swab_lustre_capa(struct lustre_capa *c);
-
-/** lustre_capa::lc_opc */
-enum {
- CAPA_OPC_BODY_WRITE = 1<<0, /**< write object data */
- CAPA_OPC_BODY_READ = 1<<1, /**< read object data */
- CAPA_OPC_INDEX_LOOKUP = 1<<2, /**< lookup object fid */
- CAPA_OPC_INDEX_INSERT = 1<<3, /**< insert object fid */
- CAPA_OPC_INDEX_DELETE = 1<<4, /**< delete object fid */
- CAPA_OPC_OSS_WRITE = 1<<5, /**< write oss object data */
- CAPA_OPC_OSS_READ = 1<<6, /**< read oss object data */
- CAPA_OPC_OSS_TRUNC = 1<<7, /**< truncate oss object */
- CAPA_OPC_OSS_DESTROY = 1<<8, /**< destroy oss object */
- CAPA_OPC_META_WRITE = 1<<9, /**< write object meta data */
- CAPA_OPC_META_READ = 1<<10, /**< read object meta data */
-};
-
-#define CAPA_OPC_OSS_RW (CAPA_OPC_OSS_READ | CAPA_OPC_OSS_WRITE)
-#define CAPA_OPC_MDS_ONLY \
- (CAPA_OPC_BODY_WRITE | CAPA_OPC_BODY_READ | CAPA_OPC_INDEX_LOOKUP | \
- CAPA_OPC_INDEX_INSERT | CAPA_OPC_INDEX_DELETE)
-#define CAPA_OPC_OSS_ONLY \
- (CAPA_OPC_OSS_WRITE | CAPA_OPC_OSS_READ | CAPA_OPC_OSS_TRUNC | \
- CAPA_OPC_OSS_DESTROY)
-#define CAPA_OPC_MDS_DEFAULT ~CAPA_OPC_OSS_ONLY
-#define CAPA_OPC_OSS_DEFAULT ~(CAPA_OPC_MDS_ONLY | CAPA_OPC_OSS_ONLY)
-
-struct lustre_capa_key {
- __u64 lk_seq; /**< mds# */
- __u32 lk_keyid; /**< key# */
- __u32 lk_padding;
- __u8 lk_key[CAPA_HMAC_KEY_MAX_LEN]; /**< key */
-} __attribute__((packed));
-
-/** The link ea holds 1 \a link_ea_entry for each hardlink */
-#define LINK_EA_MAGIC 0x11EAF1DFUL
-struct link_ea_header {
- __u32 leh_magic;
- __u32 leh_reccount;
- __u64 leh_len; /* total size */
- /* future use */
- __u32 padding1;
- __u32 padding2;
-};
-
-/** Hardlink data is name and parent fid.
- * Stored in this crazy struct for maximum packing and endian-neutrality
- */
-struct link_ea_entry {
- /** __u16 stored big-endian, unaligned */
- unsigned char lee_reclen[2];
- unsigned char lee_parent_fid[sizeof(struct lu_fid)];
- char lee_name[0];
-} __attribute__((packed));
-
-/** fid2path request/reply structure */
-struct getinfo_fid2path {
- struct lu_fid gf_fid;
- __u64 gf_recno;
- __u32 gf_linkno;
- __u32 gf_pathlen;
- char gf_path[0];
-} __attribute__((packed));
-
-void lustre_swab_fid2path (struct getinfo_fid2path *gf);
-
-enum {
- LAYOUT_INTENT_ACCESS = 0,
- LAYOUT_INTENT_READ = 1,
- LAYOUT_INTENT_WRITE = 2,
- LAYOUT_INTENT_GLIMPSE = 3,
- LAYOUT_INTENT_TRUNC = 4,
- LAYOUT_INTENT_RELEASE = 5,
- LAYOUT_INTENT_RESTORE = 6
-};
-
-/* enqueue layout lock with intent */
-struct layout_intent {
- __u32 li_opc; /* intent operation for enqueue, read, write etc */
- __u32 li_flags;
- __u64 li_start;
- __u64 li_end;
-};
-
-void lustre_swab_layout_intent(struct layout_intent *li);
-
-/**
- * On the wire version of hsm_progress structure.
- *
- * Contains the userspace hsm_progress and some internal fields.
- */
-struct hsm_progress_kernel {
- /* Field taken from struct hsm_progress */
- lustre_fid hpk_fid;
- __u64 hpk_cookie;
- struct hsm_extent hpk_extent;
- __u16 hpk_flags;
- __u16 hpk_errval; /* positive val */
- __u32 hpk_padding1;
- /* Additional fields */
- __u64 hpk_data_version;
- __u64 hpk_padding2;
-} __attribute__((packed));
-
-void lustre_swab_hsm_user_state(struct hsm_user_state *hus);
-void lustre_swab_hsm_current_action(struct hsm_current_action *action);
-void lustre_swab_hsm_progress_kernel(struct hsm_progress_kernel *hpk);
-void lustre_swab_hsm_user_state(struct hsm_user_state *hus);
-void lustre_swab_hsm_user_item(struct hsm_user_item *hui);
-void lustre_swab_hsm_request(struct hsm_request *hr);
-
-/**
- * These are object update opcode under UPDATE_OBJ, which is currently
- * being used by cross-ref operations between MDT.
- *
- * During the cross-ref operation, the Master MDT, which the client send the
- * request to, will disassembly the operation into object updates, then OSP
- * will send these updates to the remote MDT to be executed.
- *
- * Update request format
- * magic: UPDATE_BUFFER_MAGIC_V1
- * Count: How many updates in the req.
- * bufs[0] : following are packets of object.
- * update[0]:
- * type: object_update_op, the op code of update
- * fid: The object fid of the update.
- * lens/bufs: other parameters of the update.
- * update[1]:
- * type: object_update_op, the op code of update
- * fid: The object fid of the update.
- * lens/bufs: other parameters of the update.
- * ..........
- * update[7]: type: object_update_op, the op code of update
- * fid: The object fid of the update.
- * lens/bufs: other parameters of the update.
- * Current 8 maxim updates per object update request.
- *
- *******************************************************************
- * update reply format:
- *
- * ur_version: UPDATE_REPLY_V1
- * ur_count: The count of the reply, which is usually equal
- * to the number of updates in the request.
- * ur_lens: The reply lengths of each object update.
- *
- * replies: 1st update reply [4bytes_ret: other body]
- * 2nd update reply [4bytes_ret: other body]
- * .....
- * nth update reply [4bytes_ret: other body]
- *
- * For each reply of the update, the format would be
- * result(4 bytes):Other stuff
- */
-
-#define UPDATE_MAX_OPS 10
-#define UPDATE_BUFFER_MAGIC_V1 0xBDDE0001
-#define UPDATE_BUFFER_MAGIC UPDATE_BUFFER_MAGIC_V1
-#define UPDATE_BUF_COUNT 8
-enum object_update_op {
- OBJ_CREATE = 1,
- OBJ_DESTROY = 2,
- OBJ_REF_ADD = 3,
- OBJ_REF_DEL = 4,
- OBJ_ATTR_SET = 5,
- OBJ_ATTR_GET = 6,
- OBJ_XATTR_SET = 7,
- OBJ_XATTR_GET = 8,
- OBJ_INDEX_LOOKUP = 9,
- OBJ_INDEX_INSERT = 10,
- OBJ_INDEX_DELETE = 11,
- OBJ_LAST
-};
-
-struct update {
- __u32 u_type;
- __u32 u_batchid;
- struct lu_fid u_fid;
- __u32 u_lens[UPDATE_BUF_COUNT];
- __u32 u_bufs[0];
-};
-
-struct update_buf {
- __u32 ub_magic;
- __u32 ub_count;
- __u32 ub_bufs[0];
-};
-
-#define UPDATE_REPLY_V1 0x00BD0001
-struct update_reply {
- __u32 ur_version;
- __u32 ur_count;
- __u32 ur_lens[0];
-};
-
-void lustre_swab_update_buf(struct update_buf *ub);
-void lustre_swab_update_reply_buf(struct update_reply *ur);
-
-/** layout swap request structure
- * fid1 and fid2 are in mdt_body
- */
-struct mdc_swap_layouts {
- __u64 msl_flags;
-} __packed;
-
-void lustre_swab_swap_layouts(struct mdc_swap_layouts *msl);
-
-struct close_data {
- struct lustre_handle cd_handle;
- struct lu_fid cd_fid;
- __u64 cd_data_version;
- __u64 cd_reserved[8];
-};
-
-void lustre_swab_close_data(struct close_data *data);
-
-#endif
-/** @} lustreidl */
diff --git a/drivers/staging/lustre/lustre/include/lustre/lustre_lfsck_user.h b/drivers/staging/lustre/lustre/include/lustre/lustre_lfsck_user.h
deleted file mode 100644
index 1c87a61a7fc1..000000000000
--- a/drivers/staging/lustre/lustre/include/lustre/lustre_lfsck_user.h
+++ /dev/null
@@ -1,95 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
-
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License version 2 for more details. A copy is
- * included in the COPYING file that accompanied this code.
-
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2012, Intel Corporation.
- */
-/*
- * lustre/include/lustre/lustre_lfsck_user.h
- *
- * Lustre LFSCK userspace interfaces.
- *
- * Author: Fan Yong <yong.fan@whamcloud.com>
- */
-
-#ifndef _LUSTRE_LFSCK_USER_H
-# define _LUSTRE_LFSCK_USER_H
-
-enum lfsck_param_flags {
- /* Reset LFSCK iterator position to the device beginning. */
- LPF_RESET = 0x0001,
-
- /* Exit when fail. */
- LPF_FAILOUT = 0x0002,
-
- /* Dryrun mode, only check without modification */
- LPF_DRYRUN = 0x0004,
-};
-
-enum lfsck_type {
- /* For MDT-OST consistency check/repair. */
- LT_LAYOUT = 0x0001,
-
- /* For MDT-MDT consistency check/repair. */
- LT_DNE = 0x0002,
-
- /* For FID-in-dirent and linkEA consistency check/repair. */
- LT_NAMESPACE = 0x0004,
-};
-
-#define LFSCK_VERSION_V1 1
-#define LFSCK_VERSION_V2 2
-
-#define LFSCK_TYPES_ALL ((__u16)(~0))
-#define LFSCK_TYPES_DEF ((__u16)0)
-#define LFSCK_TYPES_SUPPORTED LT_NAMESPACE
-
-#define LFSCK_SPEED_NO_LIMIT 0
-#define LFSCK_SPEED_LIMIT_DEF LFSCK_SPEED_NO_LIMIT
-
-enum lfsck_start_valid {
- LSV_SPEED_LIMIT = 0x00000001,
- LSV_ERROR_HANDLE = 0x00000002,
- LSV_DRYRUN = 0x00000004,
-};
-
-/* Arguments for starting lfsck. */
-struct lfsck_start {
- /* Which arguments are valid, see 'enum lfsck_start_valid'. */
- __u32 ls_valid;
-
- /* How many items can be scanned at most per second. */
- __u32 ls_speed_limit;
-
- /* For compatibility between user space tools and kernel service. */
- __u16 ls_version;
-
- /* Which LFSCK components to be (have been) started. */
- __u16 ls_active;
-
- /* Flags for the LFSCK, see 'enum lfsck_param_flags'. */
- __u16 ls_flags;
-
- /* For 64-bits aligned. */
- __u16 ls_padding;
-};
-
-#endif /* _LUSTRE_LFSCK_USER_H */
diff --git a/drivers/staging/lustre/lustre/include/lustre/lustre_user.h b/drivers/staging/lustre/lustre/include/lustre/lustre_user.h
deleted file mode 100644
index 1a41366322fe..000000000000
--- a/drivers/staging/lustre/lustre/include/lustre/lustre_user.h
+++ /dev/null
@@ -1,1177 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2010, 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * lustre/include/lustre/lustre_user.h
- *
- * Lustre public user-space interface definitions.
- */
-
-#ifndef _LUSTRE_USER_H
-#define _LUSTRE_USER_H
-
-/** \defgroup lustreuser lustreuser
- *
- * @{
- */
-
-#include "ll_fiemap.h"
-#include "../linux/lustre_user.h"
-
-/* for statfs() */
-#define LL_SUPER_MAGIC 0x0BD00BD0
-
-#ifndef FSFILT_IOC_GETFLAGS
-#define FSFILT_IOC_GETFLAGS _IOR('f', 1, long)
-#define FSFILT_IOC_SETFLAGS _IOW('f', 2, long)
-#define FSFILT_IOC_GETVERSION _IOR('f', 3, long)
-#define FSFILT_IOC_SETVERSION _IOW('f', 4, long)
-#define FSFILT_IOC_GETVERSION_OLD _IOR('v', 1, long)
-#define FSFILT_IOC_SETVERSION_OLD _IOW('v', 2, long)
-#define FSFILT_IOC_FIEMAP _IOWR('f', 11, struct ll_user_fiemap)
-#endif
-
-/* FIEMAP flags supported by Lustre */
-#define LUSTRE_FIEMAP_FLAGS_COMPAT (FIEMAP_FLAG_SYNC | FIEMAP_FLAG_DEVICE_ORDER)
-
-enum obd_statfs_state {
- OS_STATE_DEGRADED = 0x00000001, /**< RAID degraded/rebuilding */
- OS_STATE_READONLY = 0x00000002, /**< filesystem is read-only */
- OS_STATE_RDONLY_1 = 0x00000004, /**< obsolete 1.6, was EROFS=30 */
- OS_STATE_RDONLY_2 = 0x00000008, /**< obsolete 1.6, was EROFS=30 */
- OS_STATE_RDONLY_3 = 0x00000010, /**< obsolete 1.6, was EROFS=30 */
-};
-
-struct obd_statfs {
- __u64 os_type;
- __u64 os_blocks;
- __u64 os_bfree;
- __u64 os_bavail;
- __u64 os_files;
- __u64 os_ffree;
- __u8 os_fsid[40];
- __u32 os_bsize;
- __u32 os_namelen;
- __u64 os_maxbytes;
- __u32 os_state; /**< obd_statfs_state OS_STATE_* flag */
- __u32 os_fprecreated; /* objs available now to the caller */
- /* used in QoS code to find preferred
- * OSTs */
- __u32 os_spare2;
- __u32 os_spare3;
- __u32 os_spare4;
- __u32 os_spare5;
- __u32 os_spare6;
- __u32 os_spare7;
- __u32 os_spare8;
- __u32 os_spare9;
-};
-
-/**
- * File IDentifier.
- *
- * FID is a cluster-wide unique identifier of a file or an object (stripe).
- * FIDs are never reused.
- **/
-struct lu_fid {
- /**
- * FID sequence. Sequence is a unit of migration: all files (objects)
- * with FIDs from a given sequence are stored on the same server.
- * Lustre should support 2^64 objects, so even if each sequence
- * has only a single object we can still enumerate 2^64 objects.
- **/
- __u64 f_seq;
- /* FID number within sequence. */
- __u32 f_oid;
- /**
- * FID version, used to distinguish different versions (in the sense
- * of snapshots, etc.) of the same file system object. Not currently
- * used.
- **/
- __u32 f_ver;
-};
-
-struct filter_fid {
- struct lu_fid ff_parent; /* ff_parent.f_ver == file stripe number */
-};
-
-/* keep this one for compatibility */
-struct filter_fid_old {
- struct lu_fid ff_parent;
- __u64 ff_objid;
- __u64 ff_seq;
-};
-
-/* Userspace should treat lu_fid as opaque, and only use the following methods
- * to print or parse them. Other functions (e.g. compare, swab) could be moved
- * here from lustre_idl.h if needed. */
-typedef struct lu_fid lustre_fid;
-
-/**
- * Following struct for object attributes, that will be kept inode's EA.
- * Introduced in 2.0 release (please see b15993, for details)
- * Added to all objects since Lustre 2.4 as contains self FID
- */
-struct lustre_mdt_attrs {
- /**
- * Bitfield for supported data in this structure. From enum lma_compat.
- * lma_self_fid and lma_flags are always available.
- */
- __u32 lma_compat;
- /**
- * Per-file incompat feature list. Lustre version should support all
- * flags set in this field. The supported feature mask is available in
- * LMA_INCOMPAT_SUPP.
- */
- __u32 lma_incompat;
- /** FID of this inode */
- struct lu_fid lma_self_fid;
-};
-
-/**
- * Prior to 2.4, the LMA structure also included SOM attributes which has since
- * been moved to a dedicated xattr
- * lma_flags was also removed because of lma_compat/incompat fields.
- */
-#define LMA_OLD_SIZE (sizeof(struct lustre_mdt_attrs) + 5 * sizeof(__u64))
-
-/**
- * OST object IDentifier.
- */
-struct ost_id {
- union {
- struct ostid {
- __u64 oi_id;
- __u64 oi_seq;
- } oi;
- struct lu_fid oi_fid;
- };
-};
-
-#define DOSTID "%#llx:%llu"
-#define POSTID(oi) ostid_seq(oi), ostid_id(oi)
-
-/*
- * The ioctl naming rules:
- * LL_* - works on the currently opened filehandle instead of parent dir
- * *_OBD_* - gets data for both OSC or MDC (LOV, LMV indirectly)
- * *_MDC_* - gets/sets data related to MDC
- * *_LOV_* - gets/sets data related to OSC/LOV
- * *FILE* - called on parent dir and passes in a filename
- * *STRIPE* - set/get lov_user_md
- * *INFO - set/get lov_user_mds_data
- */
-/* see <lustre_lib.h> for ioctl numberss 101-150 */
-#define LL_IOC_GETFLAGS _IOR ('f', 151, long)
-#define LL_IOC_SETFLAGS _IOW ('f', 152, long)
-#define LL_IOC_CLRFLAGS _IOW ('f', 153, long)
-/* LL_IOC_LOV_SETSTRIPE: See also OBD_IOC_LOV_SETSTRIPE */
-#define LL_IOC_LOV_SETSTRIPE _IOW ('f', 154, long)
-/* LL_IOC_LOV_GETSTRIPE: See also OBD_IOC_LOV_GETSTRIPE */
-#define LL_IOC_LOV_GETSTRIPE _IOW ('f', 155, long)
-/* LL_IOC_LOV_SETEA: See also OBD_IOC_LOV_SETEA */
-#define LL_IOC_LOV_SETEA _IOW ('f', 156, long)
-#define LL_IOC_RECREATE_OBJ _IOW ('f', 157, long)
-#define LL_IOC_RECREATE_FID _IOW ('f', 157, struct lu_fid)
-#define LL_IOC_GROUP_LOCK _IOW ('f', 158, long)
-#define LL_IOC_GROUP_UNLOCK _IOW ('f', 159, long)
-/* LL_IOC_QUOTACHECK: See also OBD_IOC_QUOTACHECK */
-#define LL_IOC_QUOTACHECK _IOW ('f', 160, int)
-/* LL_IOC_POLL_QUOTACHECK: See also OBD_IOC_POLL_QUOTACHECK */
-#define LL_IOC_POLL_QUOTACHECK _IOR ('f', 161, struct if_quotacheck *)
-/* LL_IOC_QUOTACTL: See also OBD_IOC_QUOTACTL */
-#define LL_IOC_QUOTACTL _IOWR('f', 162, struct if_quotactl)
-#define IOC_OBD_STATFS _IOWR('f', 164, struct obd_statfs *)
-#define IOC_LOV_GETINFO _IOWR('f', 165, struct lov_user_mds_data *)
-#define LL_IOC_FLUSHCTX _IOW ('f', 166, long)
-#define LL_IOC_RMTACL _IOW ('f', 167, long)
-#define LL_IOC_GETOBDCOUNT _IOR ('f', 168, long)
-#define LL_IOC_LLOOP_ATTACH _IOWR('f', 169, long)
-#define LL_IOC_LLOOP_DETACH _IOWR('f', 170, long)
-#define LL_IOC_LLOOP_INFO _IOWR('f', 171, struct lu_fid)
-#define LL_IOC_LLOOP_DETACH_BYDEV _IOWR('f', 172, long)
-#define LL_IOC_PATH2FID _IOR ('f', 173, long)
-#define LL_IOC_GET_CONNECT_FLAGS _IOWR('f', 174, __u64 *)
-#define LL_IOC_GET_MDTIDX _IOR ('f', 175, int)
-
-/* see <lustre_lib.h> for ioctl numbers 177-210 */
-
-#define LL_IOC_HSM_STATE_GET _IOR('f', 211, struct hsm_user_state)
-#define LL_IOC_HSM_STATE_SET _IOW('f', 212, struct hsm_state_set)
-#define LL_IOC_HSM_CT_START _IOW('f', 213, struct lustre_kernelcomm)
-#define LL_IOC_HSM_COPY_START _IOW('f', 214, struct hsm_copy *)
-#define LL_IOC_HSM_COPY_END _IOW('f', 215, struct hsm_copy *)
-#define LL_IOC_HSM_PROGRESS _IOW('f', 216, struct hsm_user_request)
-#define LL_IOC_HSM_REQUEST _IOW('f', 217, struct hsm_user_request)
-#define LL_IOC_DATA_VERSION _IOR('f', 218, struct ioc_data_version)
-#define LL_IOC_LOV_SWAP_LAYOUTS _IOW('f', 219, \
- struct lustre_swap_layouts)
-#define LL_IOC_HSM_ACTION _IOR('f', 220, \
- struct hsm_current_action)
-/* see <lustre_lib.h> for ioctl numbers 221-232 */
-
-#define LL_IOC_LMV_SETSTRIPE _IOWR('f', 240, struct lmv_user_md)
-#define LL_IOC_LMV_GETSTRIPE _IOWR('f', 241, struct lmv_user_md)
-#define LL_IOC_SET_LEASE _IOWR('f', 243, long)
-#define LL_IOC_GET_LEASE _IO('f', 244)
-#define LL_IOC_HSM_IMPORT _IOWR('f', 245, struct hsm_user_import)
-
-#define LL_STATFS_LMV 1
-#define LL_STATFS_LOV 2
-#define LL_STATFS_NODELAY 4
-
-#define IOC_MDC_TYPE 'i'
-#define IOC_MDC_LOOKUP _IOWR(IOC_MDC_TYPE, 20, struct obd_device *)
-#define IOC_MDC_GETFILESTRIPE _IOWR(IOC_MDC_TYPE, 21, struct lov_user_md *)
-#define IOC_MDC_GETFILEINFO _IOWR(IOC_MDC_TYPE, 22, struct lov_user_mds_data *)
-#define LL_IOC_MDC_GETINFO _IOWR(IOC_MDC_TYPE, 23, struct lov_user_mds_data *)
-
-/* Keep these for backward compartability. */
-#define LL_IOC_OBD_STATFS IOC_OBD_STATFS
-#define IOC_MDC_GETSTRIPE IOC_MDC_GETFILESTRIPE
-
-
-#define MAX_OBD_NAME 128 /* If this changes, a NEW ioctl must be added */
-
-/* Define O_LOV_DELAY_CREATE to be a mask that is not useful for regular
- * files, but are unlikely to be used in practice and are not harmful if
- * used incorrectly. O_NOCTTY and FASYNC are only meaningful for character
- * devices and are safe for use on new files (See LU-812, LU-4209). */
-#define O_LOV_DELAY_CREATE (O_NOCTTY | FASYNC)
-
-#define LL_FILE_IGNORE_LOCK 0x00000001
-#define LL_FILE_GROUP_LOCKED 0x00000002
-#define LL_FILE_READAHEA 0x00000004
-#define LL_FILE_LOCKED_DIRECTIO 0x00000008 /* client-side locks with dio */
-#define LL_FILE_LOCKLESS_IO 0x00000010 /* server-side locks with cio */
-#define LL_FILE_RMTACL 0x00000020
-
-#define LOV_USER_MAGIC_V1 0x0BD10BD0
-#define LOV_USER_MAGIC LOV_USER_MAGIC_V1
-#define LOV_USER_MAGIC_JOIN_V1 0x0BD20BD0
-#define LOV_USER_MAGIC_V3 0x0BD30BD0
-
-#define LMV_MAGIC_V1 0x0CD10CD0 /*normal stripe lmv magic */
-#define LMV_USER_MAGIC 0x0CD20CD0 /*default lmv magic*/
-
-#define LOV_PATTERN_RAID0 0x001
-#define LOV_PATTERN_RAID1 0x002
-#define LOV_PATTERN_FIRST 0x100
-
-#define LOV_MAXPOOLNAME 16
-#define LOV_POOLNAMEF "%.16s"
-
-#define LOV_MIN_STRIPE_BITS 16 /* maximum PAGE_SIZE (ia64), power of 2 */
-#define LOV_MIN_STRIPE_SIZE (1 << LOV_MIN_STRIPE_BITS)
-#define LOV_MAX_STRIPE_COUNT_OLD 160
-/* This calculation is crafted so that input of 4096 will result in 160
- * which in turn is equal to old maximal stripe count.
- * XXX: In fact this is too simplified for now, what it also need is to get
- * ea_type argument to clearly know how much space each stripe consumes.
- *
- * The limit of 12 pages is somewhat arbitrary, but is a reasonably large
- * allocation that is sufficient for the current generation of systems.
- *
- * (max buffer size - lov+rpc header) / sizeof(struct lov_ost_data_v1) */
-#define LOV_MAX_STRIPE_COUNT 2000 /* ((12 * 4096 - 256) / 24) */
-#define LOV_ALL_STRIPES 0xffff /* only valid for directories */
-#define LOV_V1_INSANE_STRIPE_COUNT 65532 /* maximum stripe count bz13933 */
-
-#define lov_user_ost_data lov_user_ost_data_v1
-struct lov_user_ost_data_v1 { /* per-stripe data structure */
- struct ost_id l_ost_oi; /* OST object ID */
- __u32 l_ost_gen; /* generation of this OST index */
- __u32 l_ost_idx; /* OST index in LOV */
-} __attribute__((packed));
-
-#define lov_user_md lov_user_md_v1
-struct lov_user_md_v1 { /* LOV EA user data (host-endian) */
- __u32 lmm_magic; /* magic number = LOV_USER_MAGIC_V1 */
- __u32 lmm_pattern; /* LOV_PATTERN_RAID0, LOV_PATTERN_RAID1 */
- struct ost_id lmm_oi; /* LOV object ID */
- __u32 lmm_stripe_size; /* size of stripe in bytes */
- __u16 lmm_stripe_count; /* num stripes in use for this object */
- union {
- __u16 lmm_stripe_offset; /* starting stripe offset in
- * lmm_objects, use when writing */
- __u16 lmm_layout_gen; /* layout generation number
- * used when reading */
- };
- struct lov_user_ost_data_v1 lmm_objects[0]; /* per-stripe data */
-} __attribute__((packed, __may_alias__));
-
-struct lov_user_md_v3 { /* LOV EA user data (host-endian) */
- __u32 lmm_magic; /* magic number = LOV_USER_MAGIC_V3 */
- __u32 lmm_pattern; /* LOV_PATTERN_RAID0, LOV_PATTERN_RAID1 */
- struct ost_id lmm_oi; /* LOV object ID */
- __u32 lmm_stripe_size; /* size of stripe in bytes */
- __u16 lmm_stripe_count; /* num stripes in use for this object */
- union {
- __u16 lmm_stripe_offset; /* starting stripe offset in
- * lmm_objects, use when writing */
- __u16 lmm_layout_gen; /* layout generation number
- * used when reading */
- };
- char lmm_pool_name[LOV_MAXPOOLNAME]; /* pool name */
- struct lov_user_ost_data_v1 lmm_objects[0]; /* per-stripe data */
-} __attribute__((packed));
-
-static inline __u32 lov_user_md_size(__u16 stripes, __u32 lmm_magic)
-{
- if (lmm_magic == LOV_USER_MAGIC_V3)
- return sizeof(struct lov_user_md_v3) +
- stripes * sizeof(struct lov_user_ost_data_v1);
- else
- return sizeof(struct lov_user_md_v1) +
- stripes * sizeof(struct lov_user_ost_data_v1);
-}
-
-/* Compile with -D_LARGEFILE64_SOURCE or -D_GNU_SOURCE (or #define) to
- * use this. It is unsafe to #define those values in this header as it
- * is possible the application has already #included <sys/stat.h>. */
-#ifdef HAVE_LOV_USER_MDS_DATA
-#define lov_user_mds_data lov_user_mds_data_v1
-struct lov_user_mds_data_v1 {
- lstat_t lmd_st; /* MDS stat struct */
- struct lov_user_md_v1 lmd_lmm; /* LOV EA V1 user data */
-} __attribute__((packed));
-
-struct lov_user_mds_data_v3 {
- lstat_t lmd_st; /* MDS stat struct */
- struct lov_user_md_v3 lmd_lmm; /* LOV EA V3 user data */
-} __attribute__((packed));
-#endif
-
-/* keep this to be the same size as lov_user_ost_data_v1 */
-struct lmv_user_mds_data {
- struct lu_fid lum_fid;
- __u32 lum_padding;
- __u32 lum_mds;
-};
-
-/* lum_type */
-enum {
- LMV_STRIPE_TYPE = 0,
- LMV_DEFAULT_TYPE = 1,
-};
-
-#define lmv_user_md lmv_user_md_v1
-struct lmv_user_md_v1 {
- __u32 lum_magic; /* must be the first field */
- __u32 lum_stripe_count; /* dirstripe count */
- __u32 lum_stripe_offset; /* MDT idx for default dirstripe */
- __u32 lum_hash_type; /* Dir stripe policy */
- __u32 lum_type; /* LMV type: default or normal */
- __u32 lum_padding1;
- __u32 lum_padding2;
- __u32 lum_padding3;
- char lum_pool_name[LOV_MAXPOOLNAME];
- struct lmv_user_mds_data lum_objects[0];
-};
-
-static inline int lmv_user_md_size(int stripes, int lmm_magic)
-{
- return sizeof(struct lmv_user_md) +
- stripes * sizeof(struct lmv_user_mds_data);
-}
-
-struct ll_recreate_obj {
- __u64 lrc_id;
- __u32 lrc_ost_idx;
-};
-
-struct ll_fid {
- __u64 id; /* holds object id */
- __u32 generation; /* holds object generation */
- __u32 f_type; /* holds object type or stripe idx when passing it to
- * OST for saving into EA. */
-};
-
-#define UUID_MAX 40
-struct obd_uuid {
- char uuid[UUID_MAX];
-};
-
-static inline bool obd_uuid_equals(const struct obd_uuid *u1,
- const struct obd_uuid *u2)
-{
- return strcmp((char *)u1->uuid, (char *)u2->uuid) == 0;
-}
-
-static inline int obd_uuid_empty(struct obd_uuid *uuid)
-{
- return uuid->uuid[0] == '\0';
-}
-
-static inline void obd_str2uuid(struct obd_uuid *uuid, const char *tmp)
-{
- strncpy((char *)uuid->uuid, tmp, sizeof(*uuid));
- uuid->uuid[sizeof(*uuid) - 1] = '\0';
-}
-
-/* For printf's only, make sure uuid is terminated */
-static inline char *obd_uuid2str(const struct obd_uuid *uuid)
-{
- if (uuid->uuid[sizeof(*uuid) - 1] != '\0') {
- /* Obviously not safe, but for printfs, no real harm done...
- we're always null-terminated, even in a race. */
- static char temp[sizeof(*uuid)];
- memcpy(temp, uuid->uuid, sizeof(*uuid) - 1);
- temp[sizeof(*uuid) - 1] = '\0';
- return temp;
- }
- return (char *)(uuid->uuid);
-}
-
-/* Extract fsname from uuid (or target name) of a target
- e.g. (myfs-OST0007_UUID -> myfs)
- see also deuuidify. */
-static inline void obd_uuid2fsname(char *buf, char *uuid, int buflen)
-{
- char *p;
-
- strncpy(buf, uuid, buflen - 1);
- buf[buflen - 1] = '\0';
- p = strrchr(buf, '-');
- if (p)
- *p = '\0';
-}
-
-/* printf display format
- e.g. printf("file FID is "DFID"\n", PFID(fid)); */
-#define FID_NOBRACE_LEN 40
-#define FID_LEN (FID_NOBRACE_LEN + 2)
-#define DFID_NOBRACE "%#llx:0x%x:0x%x"
-#define DFID "["DFID_NOBRACE"]"
-#define PFID(fid) \
- (fid)->f_seq, \
- (fid)->f_oid, \
- (fid)->f_ver
-
-/* scanf input parse format -- strip '[' first.
- e.g. sscanf(fidstr, SFID, RFID(&fid)); */
-#define SFID "0x%llx:0x%x:0x%x"
-#define RFID(fid) \
- &((fid)->f_seq), \
- &((fid)->f_oid), \
- &((fid)->f_ver)
-
-
-/********* Quotas **********/
-
-/* these must be explicitly translated into linux Q_* in ll_dir_ioctl */
-#define LUSTRE_Q_QUOTAON 0x800002 /* turn quotas on */
-#define LUSTRE_Q_QUOTAOFF 0x800003 /* turn quotas off */
-#define LUSTRE_Q_GETINFO 0x800005 /* get information about quota files */
-#define LUSTRE_Q_SETINFO 0x800006 /* set information about quota files */
-#define LUSTRE_Q_GETQUOTA 0x800007 /* get user quota structure */
-#define LUSTRE_Q_SETQUOTA 0x800008 /* set user quota structure */
-/* lustre-specific control commands */
-#define LUSTRE_Q_INVALIDATE 0x80000b /* invalidate quota data */
-#define LUSTRE_Q_FINVALIDATE 0x80000c /* invalidate filter quota data */
-
-#define UGQUOTA 2 /* set both USRQUOTA and GRPQUOTA */
-
-struct if_quotacheck {
- char obd_type[16];
- struct obd_uuid obd_uuid;
-};
-
-#define IDENTITY_DOWNCALL_MAGIC 0x6d6dd629
-
-/* permission */
-#define N_PERMS_MAX 64
-
-struct perm_downcall_data {
- __u64 pdd_nid;
- __u32 pdd_perm;
- __u32 pdd_padding;
-};
-
-struct identity_downcall_data {
- __u32 idd_magic;
- __u32 idd_err;
- __u32 idd_uid;
- __u32 idd_gid;
- __u32 idd_nperms;
- __u32 idd_ngroups;
- struct perm_downcall_data idd_perms[N_PERMS_MAX];
- __u32 idd_groups[0];
-};
-
-/* for non-mapped uid/gid */
-#define NOBODY_UID 99
-#define NOBODY_GID 99
-
-#define INVALID_ID (-1)
-
-enum {
- RMT_LSETFACL = 1,
- RMT_LGETFACL = 2,
- RMT_RSETFACL = 3,
- RMT_RGETFACL = 4
-};
-
-#ifdef NEED_QUOTA_DEFS
-#ifndef QIF_BLIMITS
-#define QIF_BLIMITS 1
-#define QIF_SPACE 2
-#define QIF_ILIMITS 4
-#define QIF_INODES 8
-#define QIF_BTIME 16
-#define QIF_ITIME 32
-#define QIF_LIMITS (QIF_BLIMITS | QIF_ILIMITS)
-#define QIF_USAGE (QIF_SPACE | QIF_INODES)
-#define QIF_TIMES (QIF_BTIME | QIF_ITIME)
-#define QIF_ALL (QIF_LIMITS | QIF_USAGE | QIF_TIMES)
-#endif
-
-#endif /* !__KERNEL__ */
-
-/* lustre volatile file support
- * file name header: .^L^S^T^R:volatile"
- */
-#define LUSTRE_VOLATILE_HDR ".\x0c\x13\x14\x12:VOLATILE"
-#define LUSTRE_VOLATILE_HDR_LEN 14
-/* hdr + MDT index */
-#define LUSTRE_VOLATILE_IDX LUSTRE_VOLATILE_HDR":%.4X:"
-
-typedef enum lustre_quota_version {
- LUSTRE_QUOTA_V2 = 1
-} lustre_quota_version_t;
-
-/* XXX: same as if_dqinfo struct in kernel */
-struct obd_dqinfo {
- __u64 dqi_bgrace;
- __u64 dqi_igrace;
- __u32 dqi_flags;
- __u32 dqi_valid;
-};
-
-/* XXX: same as if_dqblk struct in kernel, plus one padding */
-struct obd_dqblk {
- __u64 dqb_bhardlimit;
- __u64 dqb_bsoftlimit;
- __u64 dqb_curspace;
- __u64 dqb_ihardlimit;
- __u64 dqb_isoftlimit;
- __u64 dqb_curinodes;
- __u64 dqb_btime;
- __u64 dqb_itime;
- __u32 dqb_valid;
- __u32 dqb_padding;
-};
-
-enum {
- QC_GENERAL = 0,
- QC_MDTIDX = 1,
- QC_OSTIDX = 2,
- QC_UUID = 3
-};
-
-struct if_quotactl {
- __u32 qc_cmd;
- __u32 qc_type;
- __u32 qc_id;
- __u32 qc_stat;
- __u32 qc_valid;
- __u32 qc_idx;
- struct obd_dqinfo qc_dqinfo;
- struct obd_dqblk qc_dqblk;
- char obd_type[16];
- struct obd_uuid obd_uuid;
-};
-
-/* swap layout flags */
-#define SWAP_LAYOUTS_CHECK_DV1 (1 << 0)
-#define SWAP_LAYOUTS_CHECK_DV2 (1 << 1)
-#define SWAP_LAYOUTS_KEEP_MTIME (1 << 2)
-#define SWAP_LAYOUTS_KEEP_ATIME (1 << 3)
-
-/* Swap XATTR_NAME_HSM as well, only on the MDT so far */
-#define SWAP_LAYOUTS_MDS_HSM (1 << 31)
-struct lustre_swap_layouts {
- __u64 sl_flags;
- __u32 sl_fd;
- __u32 sl_gid;
- __u64 sl_dv1;
- __u64 sl_dv2;
-};
-
-
-/********* Changelogs **********/
-/** Changelog record types */
-enum changelog_rec_type {
- CL_MARK = 0,
- CL_CREATE = 1, /* namespace */
- CL_MKDIR = 2, /* namespace */
- CL_HARDLINK = 3, /* namespace */
- CL_SOFTLINK = 4, /* namespace */
- CL_MKNOD = 5, /* namespace */
- CL_UNLINK = 6, /* namespace */
- CL_RMDIR = 7, /* namespace */
- CL_RENAME = 8, /* namespace */
- CL_EXT = 9, /* namespace extended record (2nd half of rename) */
- CL_OPEN = 10, /* not currently used */
- CL_CLOSE = 11, /* may be written to log only with mtime change */
- CL_LAYOUT = 12, /* file layout/striping modified */
- CL_TRUNC = 13,
- CL_SETATTR = 14,
- CL_XATTR = 15,
- CL_HSM = 16, /* HSM specific events, see flags */
- CL_MTIME = 17, /* Precedence: setattr > mtime > ctime > atime */
- CL_CTIME = 18,
- CL_ATIME = 19,
- CL_LAST
-};
-
-static inline const char *changelog_type2str(int type)
-{
- static const char *changelog_str[] = {
- "MARK", "CREAT", "MKDIR", "HLINK", "SLINK", "MKNOD", "UNLNK",
- "RMDIR", "RENME", "RNMTO", "OPEN", "CLOSE", "LYOUT", "TRUNC",
- "SATTR", "XATTR", "HSM", "MTIME", "CTIME", "ATIME",
- };
-
- if (type >= 0 && type < CL_LAST)
- return changelog_str[type];
- return NULL;
-}
-
-/* per-record flags */
-#define CLF_VERSION 0x1000
-#define CLF_EXT_VERSION 0x2000
-#define CLF_FLAGSHIFT 12
-#define CLF_FLAGMASK ((1U << CLF_FLAGSHIFT) - 1)
-#define CLF_VERMASK (~CLF_FLAGMASK)
-/* Anything under the flagmask may be per-type (if desired) */
-/* Flags for unlink */
-#define CLF_UNLINK_LAST 0x0001 /* Unlink of last hardlink */
-#define CLF_UNLINK_HSM_EXISTS 0x0002 /* File has something in HSM */
- /* HSM cleaning needed */
-/* Flags for rename */
-#define CLF_RENAME_LAST 0x0001 /* rename unlink last hardlink of target */
-
-/* Flags for HSM */
-/* 12b used (from high weight to low weight):
- * 2b for flags
- * 3b for event
- * 7b for error code
- */
-#define CLF_HSM_ERR_L 0 /* HSM return code, 7 bits */
-#define CLF_HSM_ERR_H 6
-#define CLF_HSM_EVENT_L 7 /* HSM event, 3 bits, see enum hsm_event */
-#define CLF_HSM_EVENT_H 9
-#define CLF_HSM_FLAG_L 10 /* HSM flags, 2 bits, 1 used, 1 spare */
-#define CLF_HSM_FLAG_H 11
-#define CLF_HSM_SPARE_L 12 /* 4 spare bits */
-#define CLF_HSM_SPARE_H 15
-#define CLF_HSM_LAST 15
-
-/* Remove bits higher than _h, then extract the value
- * between _h and _l by shifting lower weigth to bit 0. */
-#define CLF_GET_BITS(_b, _h, _l) (((_b << (CLF_HSM_LAST - _h)) & 0xFFFF) \
- >> (CLF_HSM_LAST - _h + _l))
-
-#define CLF_HSM_SUCCESS 0x00
-#define CLF_HSM_MAXERROR 0x7E
-#define CLF_HSM_ERROVERFLOW 0x7F
-
-#define CLF_HSM_DIRTY 1 /* file is dirty after HSM request end */
-
-/* 3 bits field => 8 values allowed */
-enum hsm_event {
- HE_ARCHIVE = 0,
- HE_RESTORE = 1,
- HE_CANCEL = 2,
- HE_RELEASE = 3,
- HE_REMOVE = 4,
- HE_STATE = 5,
- HE_SPARE1 = 6,
- HE_SPARE2 = 7,
-};
-
-static inline enum hsm_event hsm_get_cl_event(__u16 flags)
-{
- return CLF_GET_BITS(flags, CLF_HSM_EVENT_H, CLF_HSM_EVENT_L);
-}
-
-static inline void hsm_set_cl_event(int *flags, enum hsm_event he)
-{
- *flags |= (he << CLF_HSM_EVENT_L);
-}
-
-static inline __u16 hsm_get_cl_flags(int flags)
-{
- return CLF_GET_BITS(flags, CLF_HSM_FLAG_H, CLF_HSM_FLAG_L);
-}
-
-static inline void hsm_set_cl_flags(int *flags, int bits)
-{
- *flags |= (bits << CLF_HSM_FLAG_L);
-}
-
-static inline int hsm_get_cl_error(int flags)
-{
- return CLF_GET_BITS(flags, CLF_HSM_ERR_H, CLF_HSM_ERR_L);
-}
-
-static inline void hsm_set_cl_error(int *flags, int error)
-{
- *flags |= (error << CLF_HSM_ERR_L);
-}
-
-#define CR_MAXSIZE cfs_size_round(2*NAME_MAX + 1 + \
- sizeof(struct changelog_ext_rec))
-
-struct changelog_rec {
- __u16 cr_namelen;
- __u16 cr_flags; /**< (flags&CLF_FLAGMASK)|CLF_VERSION */
- __u32 cr_type; /**< \a changelog_rec_type */
- __u64 cr_index; /**< changelog record number */
- __u64 cr_prev; /**< last index for this target fid */
- __u64 cr_time;
- union {
- lustre_fid cr_tfid; /**< target fid */
- __u32 cr_markerflags; /**< CL_MARK flags */
- };
- lustre_fid cr_pfid; /**< parent fid */
- char cr_name[0]; /**< last element */
-} __attribute__((packed));
-
-/* changelog_ext_rec is 2*sizeof(lu_fid) bigger than changelog_rec, to save
- * space, only rename uses changelog_ext_rec, while others use changelog_rec to
- * store records.
- */
-struct changelog_ext_rec {
- __u16 cr_namelen;
- __u16 cr_flags; /**< (flags & CLF_FLAGMASK) |
- CLF_EXT_VERSION */
- __u32 cr_type; /**< \a changelog_rec_type */
- __u64 cr_index; /**< changelog record number */
- __u64 cr_prev; /**< last index for this target fid */
- __u64 cr_time;
- union {
- lustre_fid cr_tfid; /**< target fid */
- __u32 cr_markerflags; /**< CL_MARK flags */
- };
- lustre_fid cr_pfid; /**< target parent fid */
- lustre_fid cr_sfid; /**< source fid, or zero */
- lustre_fid cr_spfid; /**< source parent fid, or zero */
- char cr_name[0]; /**< last element */
-} __attribute__((packed));
-
-#define CHANGELOG_REC_EXTENDED(rec) \
- (((rec)->cr_flags & CLF_VERMASK) == CLF_EXT_VERSION)
-
-static inline int changelog_rec_size(struct changelog_rec *rec)
-{
- return CHANGELOG_REC_EXTENDED(rec) ? sizeof(struct changelog_ext_rec) :
- sizeof(*rec);
-}
-
-static inline char *changelog_rec_name(struct changelog_rec *rec)
-{
- return CHANGELOG_REC_EXTENDED(rec) ?
- ((struct changelog_ext_rec *)rec)->cr_name : rec->cr_name;
-}
-
-static inline int changelog_rec_snamelen(struct changelog_ext_rec *rec)
-{
- return rec->cr_namelen - strlen(rec->cr_name) - 1;
-}
-
-static inline char *changelog_rec_sname(struct changelog_ext_rec *rec)
-{
- return rec->cr_name + strlen(rec->cr_name) + 1;
-}
-
-struct ioc_changelog {
- __u64 icc_recno;
- __u32 icc_mdtindex;
- __u32 icc_id;
- __u32 icc_flags;
-};
-
-enum changelog_message_type {
- CL_RECORD = 10, /* message is a changelog_rec */
- CL_EOF = 11, /* at end of current changelog */
-};
-
-/********* Misc **********/
-
-struct ioc_data_version {
- __u64 idv_version;
- __u64 idv_flags; /* See LL_DV_xxx */
-};
-#define LL_DV_NOFLUSH 0x01 /* Do not take READ EXTENT LOCK before sampling
- version. Dirty caches are left unchanged. */
-
-#ifndef offsetof
-# define offsetof(typ, memb) ((unsigned long)((char *)&(((typ *)0)->memb)))
-#endif
-
-#define dot_lustre_name ".lustre"
-
-
-/********* HSM **********/
-
-/** HSM per-file state
- * See HSM_FLAGS below.
- */
-enum hsm_states {
- HS_EXISTS = 0x00000001,
- HS_DIRTY = 0x00000002,
- HS_RELEASED = 0x00000004,
- HS_ARCHIVED = 0x00000008,
- HS_NORELEASE = 0x00000010,
- HS_NOARCHIVE = 0x00000020,
- HS_LOST = 0x00000040,
-};
-
-/* HSM user-setable flags. */
-#define HSM_USER_MASK (HS_NORELEASE | HS_NOARCHIVE | HS_DIRTY)
-
-/* Other HSM flags. */
-#define HSM_STATUS_MASK (HS_EXISTS | HS_LOST | HS_RELEASED | HS_ARCHIVED)
-
-/*
- * All HSM-related possible flags that could be applied to a file.
- * This should be kept in sync with hsm_states.
- */
-#define HSM_FLAGS_MASK (HSM_USER_MASK | HSM_STATUS_MASK)
-
-/**
- * HSM request progress state
- */
-enum hsm_progress_states {
- HPS_WAITING = 1,
- HPS_RUNNING = 2,
- HPS_DONE = 3,
-};
-#define HPS_NONE 0
-
-static inline char *hsm_progress_state2name(enum hsm_progress_states s)
-{
- switch (s) {
- case HPS_WAITING: return "waiting";
- case HPS_RUNNING: return "running";
- case HPS_DONE: return "done";
- default: return "unknown";
- }
-}
-
-struct hsm_extent {
- __u64 offset;
- __u64 length;
-} __attribute__((packed));
-
-/**
- * Current HSM states of a Lustre file.
- *
- * This structure purpose is to be sent to user-space mainly. It describes the
- * current HSM flags and in-progress action.
- */
-struct hsm_user_state {
- /** Current HSM states, from enum hsm_states. */
- __u32 hus_states;
- __u32 hus_archive_id;
- /** The current undergoing action, if there is one */
- __u32 hus_in_progress_state;
- __u32 hus_in_progress_action;
- struct hsm_extent hus_in_progress_location;
- char hus_extended_info[];
-};
-
-struct hsm_state_set_ioc {
- struct lu_fid hssi_fid;
- __u64 hssi_setmask;
- __u64 hssi_clearmask;
-};
-
-/*
- * This structure describes the current in-progress action for a file.
- * it is returned to user space and send over the wire
- */
-struct hsm_current_action {
- /** The current undergoing action, if there is one */
- /* state is one of hsm_progress_states */
- __u32 hca_state;
- /* action is one of hsm_user_action */
- __u32 hca_action;
- struct hsm_extent hca_location;
-};
-
-/***** HSM user requests ******/
-/* User-generated (lfs/ioctl) request types */
-enum hsm_user_action {
- HUA_NONE = 1, /* no action (noop) */
- HUA_ARCHIVE = 10, /* copy to hsm */
- HUA_RESTORE = 11, /* prestage */
- HUA_RELEASE = 12, /* drop ost objects */
- HUA_REMOVE = 13, /* remove from archive */
- HUA_CANCEL = 14 /* cancel a request */
-};
-
-static inline char *hsm_user_action2name(enum hsm_user_action a)
-{
- switch (a) {
- case HUA_NONE: return "NOOP";
- case HUA_ARCHIVE: return "ARCHIVE";
- case HUA_RESTORE: return "RESTORE";
- case HUA_RELEASE: return "RELEASE";
- case HUA_REMOVE: return "REMOVE";
- case HUA_CANCEL: return "CANCEL";
- default: return "UNKNOWN";
- }
-}
-
-/*
- * List of hr_flags (bit field)
- */
-#define HSM_FORCE_ACTION 0x0001
-/* used by CT, connot be set by user */
-#define HSM_GHOST_COPY 0x0002
-
-/**
- * Contains all the fixed part of struct hsm_user_request.
- *
- */
-struct hsm_request {
- __u32 hr_action; /* enum hsm_user_action */
- __u32 hr_archive_id; /* archive id, used only with HUA_ARCHIVE */
- __u64 hr_flags; /* request flags */
- __u32 hr_itemcount; /* item count in hur_user_item vector */
- __u32 hr_data_len;
-};
-
-struct hsm_user_item {
- lustre_fid hui_fid;
- struct hsm_extent hui_extent;
-} __attribute__((packed));
-
-struct hsm_user_request {
- struct hsm_request hur_request;
- struct hsm_user_item hur_user_item[0];
- /* extra data blob at end of struct (after all
- * hur_user_items), only use helpers to access it
- */
-} __attribute__((packed));
-
-/** Return pointer to data field in a hsm user request */
-static inline void *hur_data(struct hsm_user_request *hur)
-{
- return &(hur->hur_user_item[hur->hur_request.hr_itemcount]);
-}
-
-/**
- * Compute the current length of the provided hsm_user_request. This returns -1
- * instead of an errno because ssize_t is defined to be only [ -1, SSIZE_MAX ]
- *
- * return -1 on bounds check error.
- */
-static inline ssize_t hur_len(struct hsm_user_request *hur)
-{
- __u64 size;
-
- /* can't overflow a __u64 since hr_itemcount is only __u32 */
- size = offsetof(struct hsm_user_request, hur_user_item[0]) +
- (__u64)hur->hur_request.hr_itemcount *
- sizeof(hur->hur_user_item[0]) + hur->hur_request.hr_data_len;
-
- if (size != (ssize_t)size)
- return -1;
-
- return size;
-}
-
-/****** HSM RPCs to copytool *****/
-/* Message types the copytool may receive */
-enum hsm_message_type {
- HMT_ACTION_LIST = 100, /* message is a hsm_action_list */
-};
-
-/* Actions the copytool may be instructed to take for a given action_item */
-enum hsm_copytool_action {
- HSMA_NONE = 10, /* no action */
- HSMA_ARCHIVE = 20, /* arbitrary offset */
- HSMA_RESTORE = 21,
- HSMA_REMOVE = 22,
- HSMA_CANCEL = 23
-};
-
-static inline char *hsm_copytool_action2name(enum hsm_copytool_action a)
-{
- switch (a) {
- case HSMA_NONE: return "NOOP";
- case HSMA_ARCHIVE: return "ARCHIVE";
- case HSMA_RESTORE: return "RESTORE";
- case HSMA_REMOVE: return "REMOVE";
- case HSMA_CANCEL: return "CANCEL";
- default: return "UNKNOWN";
- }
-}
-
-/* Copytool item action description */
-struct hsm_action_item {
- __u32 hai_len; /* valid size of this struct */
- __u32 hai_action; /* hsm_copytool_action, but use known size */
- lustre_fid hai_fid; /* Lustre FID to operated on */
- lustre_fid hai_dfid; /* fid used for data access */
- struct hsm_extent hai_extent; /* byte range to operate on */
- __u64 hai_cookie; /* action cookie from coordinator */
- __u64 hai_gid; /* grouplock id */
- char hai_data[0]; /* variable length */
-} __attribute__((packed));
-
-/*
- * helper function which print in hexa the first bytes of
- * hai opaque field
- * \param hai [IN] record to print
- * \param buffer [OUT] output buffer
- * \param len [IN] max buffer len
- * \retval buffer
- */
-static inline char *hai_dump_data_field(struct hsm_action_item *hai,
- char *buffer, int len)
-{
- int i, sz, data_len;
- char *ptr;
-
- ptr = buffer;
- sz = len;
- data_len = hai->hai_len - sizeof(*hai);
- for (i = 0 ; (i < data_len) && (sz > 0) ; i++) {
- int cnt;
-
- cnt = snprintf(ptr, sz, "%.2X",
- (unsigned char)hai->hai_data[i]);
- ptr += cnt;
- sz -= cnt;
- }
- *ptr = '\0';
- return buffer;
-}
-
-/* Copytool action list */
-#define HAL_VERSION 1
-#define HAL_MAXSIZE LNET_MTU /* bytes, used in userspace only */
-struct hsm_action_list {
- __u32 hal_version;
- __u32 hal_count; /* number of hai's to follow */
- __u64 hal_compound_id; /* returned by coordinator */
- __u64 hal_flags;
- __u32 hal_archive_id; /* which archive backend */
- __u32 padding1;
- char hal_fsname[0]; /* null-terminated */
- /* struct hsm_action_item[hal_count] follows, aligned on 8-byte
- boundaries. See hai_zero */
-} __attribute__((packed));
-
-#ifndef HAVE_CFS_SIZE_ROUND
-static inline int cfs_size_round (int val)
-{
- return (val + 7) & (~0x7);
-}
-#define HAVE_CFS_SIZE_ROUND
-#endif
-
-/* Return pointer to first hai in action list */
-static inline struct hsm_action_item *hai_zero(struct hsm_action_list *hal)
-{
- return (struct hsm_action_item *)(hal->hal_fsname +
- cfs_size_round(strlen(hal-> \
- hal_fsname)
- + 1));
-}
-/* Return pointer to next hai */
-static inline struct hsm_action_item *hai_next(struct hsm_action_item *hai)
-{
- return (struct hsm_action_item *)((char *)hai +
- cfs_size_round(hai->hai_len));
-}
-
-/* Return size of an hsm_action_list */
-static inline int hal_size(struct hsm_action_list *hal)
-{
- int i, sz;
- struct hsm_action_item *hai;
-
- sz = sizeof(*hal) + cfs_size_round(strlen(hal->hal_fsname) + 1);
- hai = hai_zero(hal);
- for (i = 0; i < hal->hal_count; i++, hai = hai_next(hai))
- sz += cfs_size_round(hai->hai_len);
-
- return sz;
-}
-
-/* HSM file import
- * describe the attributes to be set on imported file
- */
-struct hsm_user_import {
- __u64 hui_size;
- __u64 hui_atime;
- __u64 hui_mtime;
- __u32 hui_atime_ns;
- __u32 hui_mtime_ns;
- __u32 hui_uid;
- __u32 hui_gid;
- __u32 hui_mode;
- __u32 hui_archive_id;
-};
-
-/* Copytool progress reporting */
-#define HP_FLAG_COMPLETED 0x01
-#define HP_FLAG_RETRY 0x02
-
-struct hsm_progress {
- lustre_fid hp_fid;
- __u64 hp_cookie;
- struct hsm_extent hp_extent;
- __u16 hp_flags;
- __u16 hp_errval; /* positive val */
- __u32 padding;
-};
-
-struct hsm_copy {
- __u64 hc_data_version;
- __u16 hc_flags;
- __u16 hc_errval; /* positive val */
- __u32 padding;
- struct hsm_action_item hc_hai;
-};
-
-/** @} lustreuser */
-
-#endif /* _LUSTRE_USER_H */
diff --git a/drivers/staging/lustre/lustre/include/lustre_acl.h b/drivers/staging/lustre/lustre/include/lustre_acl.h
deleted file mode 100644
index aa4cfa7b749d..000000000000
--- a/drivers/staging/lustre/lustre/include/lustre_acl.h
+++ /dev/null
@@ -1,49 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * lustre/include/lustre_acl.h
- */
-
-#ifndef _LUSTRE_ACL_H
-#define _LUSTRE_ACL_H
-
-#include <linux/fs.h>
-#include <linux/dcache.h>
-#include <linux/posix_acl_xattr.h>
-
-#define LUSTRE_POSIX_ACL_MAX_ENTRIES 32
-#define LUSTRE_POSIX_ACL_MAX_SIZE \
- (sizeof(posix_acl_xattr_header) + \
- LUSTRE_POSIX_ACL_MAX_ENTRIES * sizeof(posix_acl_xattr_entry))
-
-#endif
diff --git a/drivers/staging/lustre/lustre/include/lustre_cfg.h b/drivers/staging/lustre/lustre/include/lustre_cfg.h
deleted file mode 100644
index c80d78e5b30d..000000000000
--- a/drivers/staging/lustre/lustre/include/lustre_cfg.h
+++ /dev/null
@@ -1,293 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- */
-
-#ifndef _LUSTRE_CFG_H
-#define _LUSTRE_CFG_H
-
-/** \defgroup cfg cfg
- *
- * @{
- */
-
-/*
- * 1cf6
- * lcfG
- */
-#define LUSTRE_CFG_VERSION 0x1cf60001
-#define LUSTRE_CFG_MAX_BUFCOUNT 8
-
-#define LCFG_HDR_SIZE(count) \
- cfs_size_round(offsetof (struct lustre_cfg, lcfg_buflens[(count)]))
-
-/** If the LCFG_REQUIRED bit is set in a configuration command,
- * then the client is required to understand this parameter
- * in order to mount the filesystem. If it does not understand
- * a REQUIRED command the client mount will fail. */
-#define LCFG_REQUIRED 0x0001000
-
-enum lcfg_command_type {
- LCFG_ATTACH = 0x00cf001, /**< create a new obd instance */
- LCFG_DETACH = 0x00cf002, /**< destroy obd instance */
- LCFG_SETUP = 0x00cf003, /**< call type-specific setup */
- LCFG_CLEANUP = 0x00cf004, /**< call type-specific cleanup */
- LCFG_ADD_UUID = 0x00cf005, /**< add a nid to a niduuid */
- LCFG_DEL_UUID = 0x00cf006, /**< remove a nid from a niduuid */
- LCFG_MOUNTOPT = 0x00cf007, /**< create a profile (mdc, osc) */
- LCFG_DEL_MOUNTOPT = 0x00cf008, /**< destroy a profile */
- LCFG_SET_TIMEOUT = 0x00cf009, /**< set obd_timeout */
- LCFG_SET_UPCALL = 0x00cf00a, /**< deprecated */
- LCFG_ADD_CONN = 0x00cf00b, /**< add a failover niduuid to an obd */
- LCFG_DEL_CONN = 0x00cf00c, /**< remove a failover niduuid */
- LCFG_LOV_ADD_OBD = 0x00cf00d, /**< add an osc to a lov */
- LCFG_LOV_DEL_OBD = 0x00cf00e, /**< remove an osc from a lov */
- LCFG_PARAM = 0x00cf00f, /**< set a proc parameter */
- LCFG_MARKER = 0x00cf010, /**< metadata about next cfg rec */
- LCFG_LOG_START = 0x00ce011, /**< mgc only, process a cfg log */
- LCFG_LOG_END = 0x00ce012, /**< stop processing updates */
- LCFG_LOV_ADD_INA = 0x00ce013, /**< like LOV_ADD_OBD, inactive */
- LCFG_ADD_MDC = 0x00cf014, /**< add an mdc to a lmv */
- LCFG_DEL_MDC = 0x00cf015, /**< remove an mdc from a lmv */
- LCFG_SPTLRPC_CONF = 0x00ce016, /**< security */
- LCFG_POOL_NEW = 0x00ce020, /**< create an ost pool name */
- LCFG_POOL_ADD = 0x00ce021, /**< add an ost to a pool */
- LCFG_POOL_REM = 0x00ce022, /**< remove an ost from a pool */
- LCFG_POOL_DEL = 0x00ce023, /**< destroy an ost pool name */
- LCFG_SET_LDLM_TIMEOUT = 0x00ce030, /**< set ldlm_timeout */
- LCFG_PRE_CLEANUP = 0x00cf031, /**< call type-specific pre
- * cleanup cleanup */
- LCFG_SET_PARAM = 0x00ce032, /**< use set_param syntax to set
- *a proc parameters */
-};
-
-struct lustre_cfg_bufs {
- void *lcfg_buf[LUSTRE_CFG_MAX_BUFCOUNT];
- __u32 lcfg_buflen[LUSTRE_CFG_MAX_BUFCOUNT];
- __u32 lcfg_bufcount;
-};
-
-struct lustre_cfg {
- __u32 lcfg_version;
- __u32 lcfg_command;
-
- __u32 lcfg_num;
- __u32 lcfg_flags;
- __u64 lcfg_nid;
- __u32 lcfg_nal; /* not used any more */
-
- __u32 lcfg_bufcount;
- __u32 lcfg_buflens[0];
-};
-
-enum cfg_record_type {
- PORTALS_CFG_TYPE = 1,
- LUSTRE_CFG_TYPE = 123,
-};
-
-#define LUSTRE_CFG_BUFLEN(lcfg, idx) \
- ((lcfg)->lcfg_bufcount <= (idx) \
- ? 0 \
- : (lcfg)->lcfg_buflens[(idx)])
-
-static inline void lustre_cfg_bufs_set(struct lustre_cfg_bufs *bufs,
- __u32 index,
- void *buf,
- __u32 buflen)
-{
- if (index >= LUSTRE_CFG_MAX_BUFCOUNT)
- return;
- if (bufs == NULL)
- return;
-
- if (bufs->lcfg_bufcount <= index)
- bufs->lcfg_bufcount = index + 1;
-
- bufs->lcfg_buf[index] = buf;
- bufs->lcfg_buflen[index] = buflen;
-}
-
-static inline void lustre_cfg_bufs_set_string(struct lustre_cfg_bufs *bufs,
- __u32 index,
- char *str)
-{
- lustre_cfg_bufs_set(bufs, index, str, str ? strlen(str) + 1 : 0);
-}
-
-static inline void lustre_cfg_bufs_reset(struct lustre_cfg_bufs *bufs, char *name)
-{
- memset((bufs), 0, sizeof(*bufs));
- if (name)
- lustre_cfg_bufs_set_string(bufs, 0, name);
-}
-
-static inline void *lustre_cfg_buf(struct lustre_cfg *lcfg, int index)
-{
- int i;
- int offset;
- int bufcount;
- LASSERT (lcfg != NULL);
- LASSERT (index >= 0);
-
- bufcount = lcfg->lcfg_bufcount;
- if (index >= bufcount)
- return NULL;
-
- offset = LCFG_HDR_SIZE(lcfg->lcfg_bufcount);
- for (i = 0; i < index; i++)
- offset += cfs_size_round(lcfg->lcfg_buflens[i]);
- return (char *)lcfg + offset;
-}
-
-static inline void lustre_cfg_bufs_init(struct lustre_cfg_bufs *bufs,
- struct lustre_cfg *lcfg)
-{
- int i;
- bufs->lcfg_bufcount = lcfg->lcfg_bufcount;
- for (i = 0; i < bufs->lcfg_bufcount; i++) {
- bufs->lcfg_buflen[i] = lcfg->lcfg_buflens[i];
- bufs->lcfg_buf[i] = lustre_cfg_buf(lcfg, i);
- }
-}
-
-static inline char *lustre_cfg_string(struct lustre_cfg *lcfg, int index)
-{
- char *s;
-
- if (lcfg->lcfg_buflens[index] == 0)
- return NULL;
-
- s = lustre_cfg_buf(lcfg, index);
- if (s == NULL)
- return NULL;
-
- /*
- * make sure it's NULL terminated, even if this kills a char
- * of data. Try to use the padding first though.
- */
- if (s[lcfg->lcfg_buflens[index] - 1] != '\0') {
- int last = min((int)lcfg->lcfg_buflens[index],
- cfs_size_round(lcfg->lcfg_buflens[index]) - 1);
- char lost = s[last];
- s[last] = '\0';
- if (lost != '\0') {
- CWARN("Truncated buf %d to '%s' (lost '%c'...)\n",
- index, s, lost);
- }
- }
- return s;
-}
-
-static inline int lustre_cfg_len(__u32 bufcount, __u32 *buflens)
-{
- int i;
- int len;
-
- len = LCFG_HDR_SIZE(bufcount);
- for (i = 0; i < bufcount; i++)
- len += cfs_size_round(buflens[i]);
-
- return cfs_size_round(len);
-}
-
-
-#include "obd_support.h"
-
-static inline struct lustre_cfg *lustre_cfg_new(int cmd,
- struct lustre_cfg_bufs *bufs)
-{
- struct lustre_cfg *lcfg;
- char *ptr;
- int i;
-
- lcfg = kzalloc(lustre_cfg_len(bufs->lcfg_bufcount, bufs->lcfg_buflen),
- GFP_NOFS);
- if (!lcfg)
- return ERR_PTR(-ENOMEM);
-
- lcfg->lcfg_version = LUSTRE_CFG_VERSION;
- lcfg->lcfg_command = cmd;
- lcfg->lcfg_bufcount = bufs->lcfg_bufcount;
-
- ptr = (char *)lcfg + LCFG_HDR_SIZE(lcfg->lcfg_bufcount);
- for (i = 0; i < lcfg->lcfg_bufcount; i++) {
- lcfg->lcfg_buflens[i] = bufs->lcfg_buflen[i];
- LOGL((char *)bufs->lcfg_buf[i], bufs->lcfg_buflen[i], ptr);
- }
- return lcfg;
-}
-
-static inline void lustre_cfg_free(struct lustre_cfg *lcfg)
-{
- int len;
-
- len = lustre_cfg_len(lcfg->lcfg_bufcount, lcfg->lcfg_buflens);
-
- kfree(lcfg);
- return;
-}
-
-static inline int lustre_cfg_sanity_check(void *buf, int len)
-{
- struct lustre_cfg *lcfg = (struct lustre_cfg *)buf;
-
- if (!lcfg)
- return -EINVAL;
-
- /* check that the first bits of the struct are valid */
- if (len < LCFG_HDR_SIZE(0))
- return -EINVAL;
-
- if (lcfg->lcfg_version != LUSTRE_CFG_VERSION)
- return -EINVAL;
-
- if (lcfg->lcfg_bufcount >= LUSTRE_CFG_MAX_BUFCOUNT)
- return -EINVAL;
-
- /* check that the buflens are valid */
- if (len < LCFG_HDR_SIZE(lcfg->lcfg_bufcount))
- return -EINVAL;
-
- /* make sure all the pointers point inside the data */
- if (len < lustre_cfg_len(lcfg->lcfg_bufcount, lcfg->lcfg_buflens))
- return -EINVAL;
-
- return 0;
-}
-
-#include "lustre/lustre_user.h"
-
-/** @} cfg */
-
-#endif /* _LUSTRE_CFG_H */
diff --git a/drivers/staging/lustre/lustre/include/lustre_debug.h b/drivers/staging/lustre/lustre/include/lustre_debug.h
deleted file mode 100644
index 8a089413c92e..000000000000
--- a/drivers/staging/lustre/lustre/include/lustre_debug.h
+++ /dev/null
@@ -1,55 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- */
-
-#ifndef _LUSTRE_DEBUG_H
-#define _LUSTRE_DEBUG_H
-
-/** \defgroup debug debug
- *
- * @{
- */
-
-#include "lustre_net.h"
-#include "obd.h"
-
-/* lib/debug.c */
-int dump_req(struct ptlrpc_request *req);
-int block_debug_setup(void *addr, int len, __u64 off, __u64 id);
-int block_debug_check(char *who, void *addr, int len, __u64 off, __u64 id);
-
-/** @} debug */
-
-#endif
diff --git a/drivers/staging/lustre/lustre/include/lustre_disk.h b/drivers/staging/lustre/lustre/include/lustre_disk.h
deleted file mode 100644
index 4178c8563dd3..000000000000
--- a/drivers/staging/lustre/lustre/include/lustre_disk.h
+++ /dev/null
@@ -1,403 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * lustre/include/lustre_disk.h
- *
- * Lustre disk format definitions.
- *
- * Author: Nathan Rutman <nathan@clusterfs.com>
- */
-
-#ifndef _LUSTRE_DISK_H
-#define _LUSTRE_DISK_H
-
-/** \defgroup disk disk
- *
- * @{
- */
-
-#include "../../include/linux/libcfs/libcfs.h"
-#include "../../include/linux/lnet/types.h"
-#include <linux/backing-dev.h>
-
-/****************** persistent mount data *********************/
-
-#define LDD_F_SV_TYPE_MDT 0x0001
-#define LDD_F_SV_TYPE_OST 0x0002
-#define LDD_F_SV_TYPE_MGS 0x0004
-#define LDD_F_SV_TYPE_MASK (LDD_F_SV_TYPE_MDT | \
- LDD_F_SV_TYPE_OST | \
- LDD_F_SV_TYPE_MGS)
-#define LDD_F_SV_ALL 0x0008
-
-/****************** mount command *********************/
-
-/* The lmd is only used internally by Lustre; mount simply passes
- everything as string options */
-
-#define LMD_MAGIC 0xbdacbd03
-
-/* gleaned from the mount command - no persistent info here */
-struct lustre_mount_data {
- __u32 lmd_magic;
- __u32 lmd_flags; /* lustre mount flags */
- int lmd_mgs_failnodes; /* mgs failover node count */
- int lmd_exclude_count;
- int lmd_recovery_time_soft;
- int lmd_recovery_time_hard;
- char *lmd_dev; /* device name */
- char *lmd_profile; /* client only */
- char *lmd_mgssec; /* sptlrpc flavor to mgs */
- char *lmd_opts; /* lustre mount options (as opposed to
- _device_ mount options) */
- char *lmd_params; /* lustre params */
- __u32 *lmd_exclude; /* array of OSTs to ignore */
- char *lmd_mgs; /* MGS nid */
- char *lmd_osd_type; /* OSD type */
-};
-
-#define LMD_FLG_SERVER 0x0001 /* Mounting a server */
-#define LMD_FLG_CLIENT 0x0002 /* Mounting a client */
-#define LMD_FLG_ABORT_RECOV 0x0008 /* Abort recovery */
-#define LMD_FLG_NOSVC 0x0010 /* Only start MGS/MGC for servers,
- no other services */
-#define LMD_FLG_NOMGS 0x0020 /* Only start target for servers, reusing
- existing MGS services */
-#define LMD_FLG_WRITECONF 0x0040 /* Rewrite config log */
-#define LMD_FLG_NOIR 0x0080 /* NO imperative recovery */
-#define LMD_FLG_NOSCRUB 0x0100 /* Do not trigger scrub automatically */
-#define LMD_FLG_MGS 0x0200 /* Also start MGS along with server */
-#define LMD_FLG_IAM 0x0400 /* IAM dir */
-#define LMD_FLG_NO_PRIMNODE 0x0800 /* all nodes are service nodes */
-#define LMD_FLG_VIRGIN 0x1000 /* the service registers first time */
-#define LMD_FLG_UPDATE 0x2000 /* update parameters */
-#define LMD_FLG_HSM 0x4000 /* Start coordinator */
-
-#define lmd_is_client(x) ((x)->lmd_flags & LMD_FLG_CLIENT)
-
-
-/****************** last_rcvd file *********************/
-
-/** version recovery epoch */
-#define LR_EPOCH_BITS 32
-#define lr_epoch(a) ((a) >> LR_EPOCH_BITS)
-#define LR_EXPIRE_INTERVALS 16 /**< number of intervals to track transno */
-#define ENOENT_VERSION 1 /** 'virtual' version of non-existent object */
-
-#define LR_SERVER_SIZE 512
-#define LR_CLIENT_START 8192
-#define LR_CLIENT_SIZE 128
-#if LR_CLIENT_START < LR_SERVER_SIZE
-#error "Can't have LR_CLIENT_START < LR_SERVER_SIZE"
-#endif
-
-/*
- * This limit is arbitrary (131072 clients on x86), but it is convenient to use
- * 2^n * PAGE_CACHE_SIZE * 8 for the number of bits that fit an order-n allocation.
- * If we need more than 131072 clients (order-2 allocation on x86) then this
- * should become an array of single-page pointers that are allocated on demand.
- */
-#if (128 * 1024UL) > (PAGE_CACHE_SIZE * 8)
-#define LR_MAX_CLIENTS (128 * 1024UL)
-#else
-#define LR_MAX_CLIENTS (PAGE_CACHE_SIZE * 8)
-#endif
-
-/** COMPAT_146: this is an OST (temporary) */
-#define OBD_COMPAT_OST 0x00000002
-/** COMPAT_146: this is an MDT (temporary) */
-#define OBD_COMPAT_MDT 0x00000004
-/** 2.0 server, interop flag to show server version is changed */
-#define OBD_COMPAT_20 0x00000008
-
-/** MDS handles LOV_OBJID file */
-#define OBD_ROCOMPAT_LOVOBJID 0x00000001
-
-/** OST handles group subdirs */
-#define OBD_INCOMPAT_GROUPS 0x00000001
-/** this is an OST */
-#define OBD_INCOMPAT_OST 0x00000002
-/** this is an MDT */
-#define OBD_INCOMPAT_MDT 0x00000004
-/** common last_rvcd format */
-#define OBD_INCOMPAT_COMMON_LR 0x00000008
-/** FID is enabled */
-#define OBD_INCOMPAT_FID 0x00000010
-/** Size-on-MDS is enabled */
-#define OBD_INCOMPAT_SOM 0x00000020
-/** filesystem using iam format to store directory entries */
-#define OBD_INCOMPAT_IAM_DIR 0x00000040
-/** LMA attribute contains per-inode incompatible flags */
-#define OBD_INCOMPAT_LMA 0x00000080
-/** lmm_stripe_count has been shrunk from __u32 to __u16 and the remaining 16
- * bits are now used to store a generation. Once we start changing the layout
- * and bumping the generation, old versions expecting a 32-bit lmm_stripe_count
- * will be confused by interpreting stripe_count | gen << 16 as the actual
- * stripe count */
-#define OBD_INCOMPAT_LMM_VER 0x00000100
-/** multiple OI files for MDT */
-#define OBD_INCOMPAT_MULTI_OI 0x00000200
-
-/* Data stored per server at the head of the last_rcvd file. In le32 order.
- This should be common to filter_internal.h, lustre_mds.h */
-struct lr_server_data {
- __u8 lsd_uuid[40]; /* server UUID */
- __u64 lsd_last_transno; /* last completed transaction ID */
- __u64 lsd_compat14; /* reserved - compat with old last_rcvd */
- __u64 lsd_mount_count; /* incarnation number */
- __u32 lsd_feature_compat; /* compatible feature flags */
- __u32 lsd_feature_rocompat;/* read-only compatible feature flags */
- __u32 lsd_feature_incompat;/* incompatible feature flags */
- __u32 lsd_server_size; /* size of server data area */
- __u32 lsd_client_start; /* start of per-client data area */
- __u16 lsd_client_size; /* size of per-client data area */
- __u16 lsd_subdir_count; /* number of subdirectories for objects */
- __u64 lsd_catalog_oid; /* recovery catalog object id */
- __u32 lsd_catalog_ogen; /* recovery catalog inode generation */
- __u8 lsd_peeruuid[40]; /* UUID of MDS associated with this OST */
- __u32 lsd_osd_index; /* index number of OST in LOV */
- __u32 lsd_padding1; /* was lsd_mdt_index, unused in 2.4.0 */
- __u32 lsd_start_epoch; /* VBR: start epoch from last boot */
- /** transaction values since lsd_trans_table_time */
- __u64 lsd_trans_table[LR_EXPIRE_INTERVALS];
- /** start point of transno table below */
- __u32 lsd_trans_table_time; /* time of first slot in table above */
- __u32 lsd_expire_intervals; /* LR_EXPIRE_INTERVALS */
- __u8 lsd_padding[LR_SERVER_SIZE - 288];
-};
-
-/* Data stored per client in the last_rcvd file. In le32 order. */
-struct lsd_client_data {
- __u8 lcd_uuid[40]; /* client UUID */
- __u64 lcd_last_transno; /* last completed transaction ID */
- __u64 lcd_last_xid; /* xid for the last transaction */
- __u32 lcd_last_result; /* result from last RPC */
- __u32 lcd_last_data; /* per-op data (disposition for open &c.) */
- /* for MDS_CLOSE requests */
- __u64 lcd_last_close_transno; /* last completed transaction ID */
- __u64 lcd_last_close_xid; /* xid for the last transaction */
- __u32 lcd_last_close_result; /* result from last RPC */
- __u32 lcd_last_close_data; /* per-op data */
- /* VBR: last versions */
- __u64 lcd_pre_versions[4];
- __u32 lcd_last_epoch;
- /** orphans handling for delayed export rely on that */
- __u32 lcd_first_epoch;
- __u8 lcd_padding[LR_CLIENT_SIZE - 128];
-};
-
-/* bug20354: the lcd_uuid for export of clients may be wrong */
-static inline void check_lcd(char *obd_name, int index,
- struct lsd_client_data *lcd)
-{
- int length = sizeof(lcd->lcd_uuid);
- if (strnlen((char *)lcd->lcd_uuid, length) == length) {
- lcd->lcd_uuid[length - 1] = '\0';
-
- LCONSOLE_ERROR("the client UUID (%s) on %s for exports stored in last_rcvd(index = %d) is bad!\n",
- lcd->lcd_uuid, obd_name, index);
- }
-}
-
-/* last_rcvd handling */
-static inline void lsd_le_to_cpu(struct lr_server_data *buf,
- struct lr_server_data *lsd)
-{
- int i;
- memcpy(lsd->lsd_uuid, buf->lsd_uuid, sizeof(lsd->lsd_uuid));
- lsd->lsd_last_transno = le64_to_cpu(buf->lsd_last_transno);
- lsd->lsd_compat14 = le64_to_cpu(buf->lsd_compat14);
- lsd->lsd_mount_count = le64_to_cpu(buf->lsd_mount_count);
- lsd->lsd_feature_compat = le32_to_cpu(buf->lsd_feature_compat);
- lsd->lsd_feature_rocompat = le32_to_cpu(buf->lsd_feature_rocompat);
- lsd->lsd_feature_incompat = le32_to_cpu(buf->lsd_feature_incompat);
- lsd->lsd_server_size = le32_to_cpu(buf->lsd_server_size);
- lsd->lsd_client_start = le32_to_cpu(buf->lsd_client_start);
- lsd->lsd_client_size = le16_to_cpu(buf->lsd_client_size);
- lsd->lsd_subdir_count = le16_to_cpu(buf->lsd_subdir_count);
- lsd->lsd_catalog_oid = le64_to_cpu(buf->lsd_catalog_oid);
- lsd->lsd_catalog_ogen = le32_to_cpu(buf->lsd_catalog_ogen);
- memcpy(lsd->lsd_peeruuid, buf->lsd_peeruuid, sizeof(lsd->lsd_peeruuid));
- lsd->lsd_osd_index = le32_to_cpu(buf->lsd_osd_index);
- lsd->lsd_padding1 = le32_to_cpu(buf->lsd_padding1);
- lsd->lsd_start_epoch = le32_to_cpu(buf->lsd_start_epoch);
- for (i = 0; i < LR_EXPIRE_INTERVALS; i++)
- lsd->lsd_trans_table[i] = le64_to_cpu(buf->lsd_trans_table[i]);
- lsd->lsd_trans_table_time = le32_to_cpu(buf->lsd_trans_table_time);
- lsd->lsd_expire_intervals = le32_to_cpu(buf->lsd_expire_intervals);
-}
-
-static inline void lsd_cpu_to_le(struct lr_server_data *lsd,
- struct lr_server_data *buf)
-{
- int i;
- memcpy(buf->lsd_uuid, lsd->lsd_uuid, sizeof(buf->lsd_uuid));
- buf->lsd_last_transno = cpu_to_le64(lsd->lsd_last_transno);
- buf->lsd_compat14 = cpu_to_le64(lsd->lsd_compat14);
- buf->lsd_mount_count = cpu_to_le64(lsd->lsd_mount_count);
- buf->lsd_feature_compat = cpu_to_le32(lsd->lsd_feature_compat);
- buf->lsd_feature_rocompat = cpu_to_le32(lsd->lsd_feature_rocompat);
- buf->lsd_feature_incompat = cpu_to_le32(lsd->lsd_feature_incompat);
- buf->lsd_server_size = cpu_to_le32(lsd->lsd_server_size);
- buf->lsd_client_start = cpu_to_le32(lsd->lsd_client_start);
- buf->lsd_client_size = cpu_to_le16(lsd->lsd_client_size);
- buf->lsd_subdir_count = cpu_to_le16(lsd->lsd_subdir_count);
- buf->lsd_catalog_oid = cpu_to_le64(lsd->lsd_catalog_oid);
- buf->lsd_catalog_ogen = cpu_to_le32(lsd->lsd_catalog_ogen);
- memcpy(buf->lsd_peeruuid, lsd->lsd_peeruuid, sizeof(buf->lsd_peeruuid));
- buf->lsd_osd_index = cpu_to_le32(lsd->lsd_osd_index);
- buf->lsd_padding1 = cpu_to_le32(lsd->lsd_padding1);
- buf->lsd_start_epoch = cpu_to_le32(lsd->lsd_start_epoch);
- for (i = 0; i < LR_EXPIRE_INTERVALS; i++)
- buf->lsd_trans_table[i] = cpu_to_le64(lsd->lsd_trans_table[i]);
- buf->lsd_trans_table_time = cpu_to_le32(lsd->lsd_trans_table_time);
- buf->lsd_expire_intervals = cpu_to_le32(lsd->lsd_expire_intervals);
-}
-
-static inline void lcd_le_to_cpu(struct lsd_client_data *buf,
- struct lsd_client_data *lcd)
-{
- memcpy(lcd->lcd_uuid, buf->lcd_uuid, sizeof (lcd->lcd_uuid));
- lcd->lcd_last_transno = le64_to_cpu(buf->lcd_last_transno);
- lcd->lcd_last_xid = le64_to_cpu(buf->lcd_last_xid);
- lcd->lcd_last_result = le32_to_cpu(buf->lcd_last_result);
- lcd->lcd_last_data = le32_to_cpu(buf->lcd_last_data);
- lcd->lcd_last_close_transno = le64_to_cpu(buf->lcd_last_close_transno);
- lcd->lcd_last_close_xid = le64_to_cpu(buf->lcd_last_close_xid);
- lcd->lcd_last_close_result = le32_to_cpu(buf->lcd_last_close_result);
- lcd->lcd_last_close_data = le32_to_cpu(buf->lcd_last_close_data);
- lcd->lcd_pre_versions[0] = le64_to_cpu(buf->lcd_pre_versions[0]);
- lcd->lcd_pre_versions[1] = le64_to_cpu(buf->lcd_pre_versions[1]);
- lcd->lcd_pre_versions[2] = le64_to_cpu(buf->lcd_pre_versions[2]);
- lcd->lcd_pre_versions[3] = le64_to_cpu(buf->lcd_pre_versions[3]);
- lcd->lcd_last_epoch = le32_to_cpu(buf->lcd_last_epoch);
- lcd->lcd_first_epoch = le32_to_cpu(buf->lcd_first_epoch);
-}
-
-static inline void lcd_cpu_to_le(struct lsd_client_data *lcd,
- struct lsd_client_data *buf)
-{
- memcpy(buf->lcd_uuid, lcd->lcd_uuid, sizeof (lcd->lcd_uuid));
- buf->lcd_last_transno = cpu_to_le64(lcd->lcd_last_transno);
- buf->lcd_last_xid = cpu_to_le64(lcd->lcd_last_xid);
- buf->lcd_last_result = cpu_to_le32(lcd->lcd_last_result);
- buf->lcd_last_data = cpu_to_le32(lcd->lcd_last_data);
- buf->lcd_last_close_transno = cpu_to_le64(lcd->lcd_last_close_transno);
- buf->lcd_last_close_xid = cpu_to_le64(lcd->lcd_last_close_xid);
- buf->lcd_last_close_result = cpu_to_le32(lcd->lcd_last_close_result);
- buf->lcd_last_close_data = cpu_to_le32(lcd->lcd_last_close_data);
- buf->lcd_pre_versions[0] = cpu_to_le64(lcd->lcd_pre_versions[0]);
- buf->lcd_pre_versions[1] = cpu_to_le64(lcd->lcd_pre_versions[1]);
- buf->lcd_pre_versions[2] = cpu_to_le64(lcd->lcd_pre_versions[2]);
- buf->lcd_pre_versions[3] = cpu_to_le64(lcd->lcd_pre_versions[3]);
- buf->lcd_last_epoch = cpu_to_le32(lcd->lcd_last_epoch);
- buf->lcd_first_epoch = cpu_to_le32(lcd->lcd_first_epoch);
-}
-
-static inline __u64 lcd_last_transno(struct lsd_client_data *lcd)
-{
- return (lcd->lcd_last_transno > lcd->lcd_last_close_transno ?
- lcd->lcd_last_transno : lcd->lcd_last_close_transno);
-}
-
-static inline __u64 lcd_last_xid(struct lsd_client_data *lcd)
-{
- return (lcd->lcd_last_xid > lcd->lcd_last_close_xid ?
- lcd->lcd_last_xid : lcd->lcd_last_close_xid);
-}
-
-/****************** superblock additional info *********************/
-
-struct ll_sb_info;
-
-struct lustre_sb_info {
- int lsi_flags;
- struct obd_device *lsi_mgc; /* mgc obd */
- struct lustre_mount_data *lsi_lmd; /* mount command info */
- struct ll_sb_info *lsi_llsbi; /* add'l client sbi info */
- struct dt_device *lsi_dt_dev; /* dt device to access disk fs*/
- struct vfsmount *lsi_srv_mnt; /* the one server mount */
- atomic_t lsi_mounts; /* references to the srv_mnt */
- char lsi_svname[MTI_NAME_MAXLEN];
- char lsi_osd_obdname[64];
- char lsi_osd_uuid[64];
- struct obd_export *lsi_osd_exp;
- char lsi_osd_type[16];
- char lsi_fstype[16];
- struct backing_dev_info lsi_bdi; /* each client mountpoint needs
- own backing_dev_info */
-};
-
-#define LSI_UMOUNT_FAILOVER 0x00200000
-#define LSI_BDI_INITIALIZED 0x00400000
-
-#define s2lsi(sb) ((struct lustre_sb_info *)((sb)->s_fs_info))
-#define s2lsi_nocast(sb) ((sb)->s_fs_info)
-
-#define get_profile_name(sb) (s2lsi(sb)->lsi_lmd->lmd_profile)
-#define get_mount_flags(sb) (s2lsi(sb)->lsi_lmd->lmd_flags)
-#define get_mntdev_name(sb) (s2lsi(sb)->lsi_lmd->lmd_dev)
-
-
-/****************** mount lookup info *********************/
-
-struct lustre_mount_info {
- char *lmi_name;
- struct super_block *lmi_sb;
- struct vfsmount *lmi_mnt;
- struct list_head lmi_list_chain;
-};
-
-/****************** prototypes *********************/
-
-/* obd_mount.c */
-int server_name2fsname(const char *svname, char *fsname, const char **endptr);
-int server_name2index(const char *svname, __u32 *idx, const char **endptr);
-
-int lustre_put_lsi(struct super_block *sb);
-int lustre_start_simple(char *obdname, char *type, char *uuid,
- char *s1, char *s2, char *s3, char *s4);
-int lustre_start_mgc(struct super_block *sb);
-void lustre_register_client_fill_super(int (*cfs)(struct super_block *sb,
- struct vfsmount *mnt));
-void lustre_register_kill_super_cb(void (*cfs)(struct super_block *sb));
-int lustre_common_put_super(struct super_block *sb);
-
-
-int mgc_fsname2resid(char *fsname, struct ldlm_res_id *res_id, int type);
-
-/** @} disk */
-
-#endif /* _LUSTRE_DISK_H */
diff --git a/drivers/staging/lustre/lustre/include/lustre_dlm.h b/drivers/staging/lustre/lustre/include/lustre_dlm.h
deleted file mode 100644
index 1ac08e1c0559..000000000000
--- a/drivers/staging/lustre/lustre/include/lustre_dlm.h
+++ /dev/null
@@ -1,1443 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2010, 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- */
-
-/** \defgroup LDLM Lustre Distributed Lock Manager
- *
- * Lustre DLM is based on VAX DLM.
- * Its two main roles are:
- * - To provide locking assuring consistency of data on all Lustre nodes.
- * - To allow clients to cache state protected by a lock by holding the
- * lock until a conflicting lock is requested or it is expired by the LRU.
- *
- * @{
- */
-
-#ifndef _LUSTRE_DLM_H__
-#define _LUSTRE_DLM_H__
-
-#include "lustre_lib.h"
-#include "lustre_net.h"
-#include "lustre_import.h"
-#include "lustre_handles.h"
-#include "interval_tree.h" /* for interval_node{}, ldlm_extent */
-#include "lu_ref.h"
-
-#include "lustre_dlm_flags.h"
-
-struct obd_ops;
-struct obd_device;
-
-extern struct kset *ldlm_ns_kset;
-extern struct kset *ldlm_svc_kset;
-
-#define OBD_LDLM_DEVICENAME "ldlm"
-
-#define LDLM_DEFAULT_LRU_SIZE (100 * num_online_cpus())
-#define LDLM_DEFAULT_MAX_ALIVE (cfs_time_seconds(36000))
-#define LDLM_DEFAULT_PARALLEL_AST_LIMIT 1024
-
-/**
- * LDLM non-error return states
- */
-typedef enum {
- ELDLM_OK = 0,
-
- ELDLM_LOCK_CHANGED = 300,
- ELDLM_LOCK_ABORTED = 301,
- ELDLM_LOCK_REPLACED = 302,
- ELDLM_NO_LOCK_DATA = 303,
- ELDLM_LOCK_WOULDBLOCK = 304,
-
- ELDLM_NAMESPACE_EXISTS = 400,
- ELDLM_BAD_NAMESPACE = 401
-} ldlm_error_t;
-
-/**
- * LDLM namespace type.
- * The "client" type is actually an indication that this is a narrow local view
- * into complete namespace on the server. Such namespaces cannot make any
- * decisions about lack of conflicts or do any autonomous lock granting without
- * first speaking to a server.
- */
-typedef enum {
- LDLM_NAMESPACE_SERVER = 1 << 0,
- LDLM_NAMESPACE_CLIENT = 1 << 1
-} ldlm_side_t;
-
-/**
- * The blocking callback is overloaded to perform two functions. These flags
- * indicate which operation should be performed.
- */
-#define LDLM_CB_BLOCKING 1
-#define LDLM_CB_CANCELING 2
-
-/**
- * \name Lock Compatibility Matrix.
- *
- * A lock has both a type (extent, flock, inode bits, or plain) and a mode.
- * Lock types are described in their respective implementation files:
- * ldlm_{extent,flock,inodebits,plain}.c.
- *
- * There are six lock modes along with a compatibility matrix to indicate if
- * two locks are compatible.
- *
- * - EX: Exclusive mode. Before a new file is created, MDS requests EX lock
- * on the parent.
- * - PW: Protective Write (normal write) mode. When a client requests a write
- * lock from an OST, a lock with PW mode will be issued.
- * - PR: Protective Read (normal read) mode. When a client requests a read from
- * an OST, a lock with PR mode will be issued. Also, if the client opens a
- * file for execution, it is granted a lock with PR mode.
- * - CW: Concurrent Write mode. The type of lock that the MDS grants if a client
- * requests a write lock during a file open operation.
- * - CR Concurrent Read mode. When a client performs a path lookup, MDS grants
- * an inodebit lock with the CR mode on the intermediate path component.
- * - NL Null mode.
- *
- * <PRE>
- * NL CR CW PR PW EX
- * NL 1 1 1 1 1 1
- * CR 1 1 1 1 1 0
- * CW 1 1 1 0 0 0
- * PR 1 1 0 1 0 0
- * PW 1 1 0 0 0 0
- * EX 1 0 0 0 0 0
- * </PRE>
- */
-/** @{ */
-#define LCK_COMPAT_EX LCK_NL
-#define LCK_COMPAT_PW (LCK_COMPAT_EX | LCK_CR)
-#define LCK_COMPAT_PR (LCK_COMPAT_PW | LCK_PR)
-#define LCK_COMPAT_CW (LCK_COMPAT_PW | LCK_CW)
-#define LCK_COMPAT_CR (LCK_COMPAT_CW | LCK_PR | LCK_PW)
-#define LCK_COMPAT_NL (LCK_COMPAT_CR | LCK_EX | LCK_GROUP)
-#define LCK_COMPAT_GROUP (LCK_GROUP | LCK_NL)
-#define LCK_COMPAT_COS (LCK_COS)
-/** @} Lock Compatibility Matrix */
-
-extern ldlm_mode_t lck_compat_array[];
-
-static inline void lockmode_verify(ldlm_mode_t mode)
-{
- LASSERT(mode > LCK_MINMODE && mode < LCK_MAXMODE);
-}
-
-static inline int lockmode_compat(ldlm_mode_t exist_mode, ldlm_mode_t new_mode)
-{
- return (lck_compat_array[exist_mode] & new_mode);
-}
-
-/*
- *
- * cluster name spaces
- *
- */
-
-#define DLM_OST_NAMESPACE 1
-#define DLM_MDS_NAMESPACE 2
-
-/* XXX
- - do we just separate this by security domains and use a prefix for
- multiple namespaces in the same domain?
- -
-*/
-
-/**
- * Locking rules for LDLM:
- *
- * lr_lock
- *
- * lr_lock
- * waiting_locks_spinlock
- *
- * lr_lock
- * led_lock
- *
- * lr_lock
- * ns_lock
- *
- * lr_lvb_mutex
- * lr_lock
- *
- */
-
-struct ldlm_pool;
-struct ldlm_lock;
-struct ldlm_resource;
-struct ldlm_namespace;
-
-/**
- * Operations on LDLM pools.
- * LDLM pool is a pool of locks in the namespace without any implicitly
- * specified limits.
- * Locks in the pool are organized in LRU.
- * Local memory pressure or server instructions (e.g. mempressure on server)
- * can trigger freeing of locks from the pool
- */
-struct ldlm_pool_ops {
- /** Recalculate pool \a pl usage */
- int (*po_recalc)(struct ldlm_pool *pl);
- /** Cancel at least \a nr locks from pool \a pl */
- int (*po_shrink)(struct ldlm_pool *pl, int nr,
- gfp_t gfp_mask);
- int (*po_setup)(struct ldlm_pool *pl, int limit);
-};
-
-/** One second for pools thread check interval. Each pool has own period. */
-#define LDLM_POOLS_THREAD_PERIOD (1)
-
-/** ~6% margin for modest pools. See ldlm_pool.c for details. */
-#define LDLM_POOLS_MODEST_MARGIN_SHIFT (4)
-
-/** Default recalc period for server side pools in sec. */
-#define LDLM_POOL_SRV_DEF_RECALC_PERIOD (1)
-
-/** Default recalc period for client side pools in sec. */
-#define LDLM_POOL_CLI_DEF_RECALC_PERIOD (10)
-
-/**
- * LDLM pool structure to track granted locks.
- * For purposes of determining when to release locks on e.g. memory pressure.
- * This feature is commonly referred to as lru_resize.
- */
-struct ldlm_pool {
- /** Pool debugfs directory. */
- struct dentry *pl_debugfs_entry;
- /** Pool name, must be long enough to hold compound proc entry name. */
- char pl_name[100];
- /** Lock for protecting SLV/CLV updates. */
- spinlock_t pl_lock;
- /** Number of allowed locks in in pool, both, client and server side. */
- atomic_t pl_limit;
- /** Number of granted locks in */
- atomic_t pl_granted;
- /** Grant rate per T. */
- atomic_t pl_grant_rate;
- /** Cancel rate per T. */
- atomic_t pl_cancel_rate;
- /** Server lock volume (SLV). Protected by pl_lock. */
- __u64 pl_server_lock_volume;
- /** Current biggest client lock volume. Protected by pl_lock. */
- __u64 pl_client_lock_volume;
- /** Lock volume factor. SLV on client is calculated as following:
- * server_slv * lock_volume_factor. */
- atomic_t pl_lock_volume_factor;
- /** Time when last SLV from server was obtained. */
- time64_t pl_recalc_time;
- /** Recalculation period for pool. */
- time64_t pl_recalc_period;
- /** Recalculation and shrink operations. */
- const struct ldlm_pool_ops *pl_ops;
- /** Number of planned locks for next period. */
- int pl_grant_plan;
- /** Pool statistics. */
- struct lprocfs_stats *pl_stats;
-
- /* sysfs object */
- struct kobject pl_kobj;
- struct completion pl_kobj_unregister;
-};
-
-typedef int (*ldlm_res_policy)(struct ldlm_namespace *, struct ldlm_lock **,
- void *req_cookie, ldlm_mode_t mode, __u64 flags,
- void *data);
-
-typedef int (*ldlm_cancel_for_recovery)(struct ldlm_lock *lock);
-
-/**
- * LVB operations.
- * LVB is Lock Value Block. This is a special opaque (to LDLM) value that could
- * be associated with an LDLM lock and transferred from client to server and
- * back.
- *
- * Currently LVBs are used by:
- * - OSC-OST code to maintain current object size/times
- * - layout lock code to return the layout when the layout lock is granted
- */
-struct ldlm_valblock_ops {
- int (*lvbo_init)(struct ldlm_resource *res);
- int (*lvbo_update)(struct ldlm_resource *res,
- struct ptlrpc_request *r,
- int increase);
- int (*lvbo_free)(struct ldlm_resource *res);
- /* Return size of lvb data appropriate RPC size can be reserved */
- int (*lvbo_size)(struct ldlm_lock *lock);
- /* Called to fill in lvb data to RPC buffer @buf */
- int (*lvbo_fill)(struct ldlm_lock *lock, void *buf, int buflen);
-};
-
-/**
- * LDLM pools related, type of lock pool in the namespace.
- * Greedy means release cached locks aggressively
- */
-typedef enum {
- LDLM_NAMESPACE_GREEDY = 1 << 0,
- LDLM_NAMESPACE_MODEST = 1 << 1
-} ldlm_appetite_t;
-
-struct ldlm_ns_bucket {
- /** back pointer to namespace */
- struct ldlm_namespace *nsb_namespace;
- /**
- * Estimated lock callback time. Used by adaptive timeout code to
- * avoid spurious client evictions due to unresponsiveness when in
- * fact the network or overall system load is at fault
- */
- struct adaptive_timeout nsb_at_estimate;
-};
-
-enum {
- /** LDLM namespace lock stats */
- LDLM_NSS_LOCKS = 0,
- LDLM_NSS_LAST
-};
-
-typedef enum {
- /** invalid type */
- LDLM_NS_TYPE_UNKNOWN = 0,
- /** mdc namespace */
- LDLM_NS_TYPE_MDC,
- /** mds namespace */
- LDLM_NS_TYPE_MDT,
- /** osc namespace */
- LDLM_NS_TYPE_OSC,
- /** ost namespace */
- LDLM_NS_TYPE_OST,
- /** mgc namespace */
- LDLM_NS_TYPE_MGC,
- /** mgs namespace */
- LDLM_NS_TYPE_MGT,
-} ldlm_ns_type_t;
-
-/**
- * LDLM Namespace.
- *
- * Namespace serves to contain locks related to a particular service.
- * There are two kinds of namespaces:
- * - Server namespace has knowledge of all locks and is therefore authoritative
- * to make decisions like what locks could be granted and what conflicts
- * exist during new lock enqueue.
- * - Client namespace only has limited knowledge about locks in the namespace,
- * only seeing locks held by the client.
- *
- * Every Lustre service has one server namespace present on the server serving
- * that service. Every client connected to the service has a client namespace
- * for it.
- * Every lock obtained by client in that namespace is actually represented by
- * two in-memory locks. One on the server and one on the client. The locks are
- * linked by a special cookie by which one node can tell to the other which lock
- * it actually means during communications. Such locks are called remote locks.
- * The locks held by server only without any reference to a client are called
- * local locks.
- */
-struct ldlm_namespace {
- /** Backward link to OBD, required for LDLM pool to store new SLV. */
- struct obd_device *ns_obd;
-
- /** Flag indicating if namespace is on client instead of server */
- ldlm_side_t ns_client;
-
- /** Resource hash table for namespace. */
- struct cfs_hash *ns_rs_hash;
-
- /** serialize */
- spinlock_t ns_lock;
-
- /** big refcount (by bucket) */
- atomic_t ns_bref;
-
- /**
- * Namespace connect flags supported by server (may be changed via
- * /proc, LRU resize may be disabled/enabled).
- */
- __u64 ns_connect_flags;
-
- /** Client side original connect flags supported by server. */
- __u64 ns_orig_connect_flags;
-
- /* namespace debugfs dir entry */
- struct dentry *ns_debugfs_entry;
-
- /**
- * Position in global namespace list linking all namespaces on
- * the node.
- */
- struct list_head ns_list_chain;
-
- /**
- * List of unused locks for this namespace. This list is also called
- * LRU lock list.
- * Unused locks are locks with zero reader/writer reference counts.
- * This list is only used on clients for lock caching purposes.
- * When we want to release some locks voluntarily or if server wants
- * us to release some locks due to e.g. memory pressure, we take locks
- * to release from the head of this list.
- * Locks are linked via l_lru field in \see struct ldlm_lock.
- */
- struct list_head ns_unused_list;
- /** Number of locks in the LRU list above */
- int ns_nr_unused;
-
- /**
- * Maximum number of locks permitted in the LRU. If 0, means locks
- * are managed by pools and there is no preset limit, rather it is all
- * controlled by available memory on this client and on server.
- */
- unsigned int ns_max_unused;
- /** Maximum allowed age (last used time) for locks in the LRU */
- unsigned int ns_max_age;
-
- /**
- * Used to rate-limit ldlm_namespace_dump calls.
- * \see ldlm_namespace_dump. Increased by 10 seconds every time
- * it is called.
- */
- unsigned long ns_next_dump;
-
- /** "policy" function that does actual lock conflict determination */
- ldlm_res_policy ns_policy;
-
- /**
- * LVB operations for this namespace.
- * \see struct ldlm_valblock_ops
- */
- struct ldlm_valblock_ops *ns_lvbo;
-
- /**
- * Used by filter code to store pointer to OBD of the service.
- * Should be dropped in favor of \a ns_obd
- */
- void *ns_lvbp;
-
- /**
- * Wait queue used by __ldlm_namespace_free. Gets woken up every time
- * a resource is removed.
- */
- wait_queue_head_t ns_waitq;
- /** LDLM pool structure for this namespace */
- struct ldlm_pool ns_pool;
- /** Definition of how eagerly unused locks will be released from LRU */
- ldlm_appetite_t ns_appetite;
-
- /** Limit of parallel AST RPC count. */
- unsigned ns_max_parallel_ast;
-
- /** Callback to cancel locks before replaying it during recovery. */
- ldlm_cancel_for_recovery ns_cancel_for_recovery;
-
- /** LDLM lock stats */
- struct lprocfs_stats *ns_stats;
-
- /**
- * Flag to indicate namespace is being freed. Used to determine if
- * recalculation of LDLM pool statistics should be skipped.
- */
- unsigned ns_stopping:1;
-
- struct kobject ns_kobj; /* sysfs object */
- struct completion ns_kobj_unregister;
-};
-
-/**
- * Returns 1 if namespace \a ns is a client namespace.
- */
-static inline int ns_is_client(struct ldlm_namespace *ns)
-{
- LASSERT(ns != NULL);
- LASSERT(!(ns->ns_client & ~(LDLM_NAMESPACE_CLIENT |
- LDLM_NAMESPACE_SERVER)));
- LASSERT(ns->ns_client == LDLM_NAMESPACE_CLIENT ||
- ns->ns_client == LDLM_NAMESPACE_SERVER);
- return ns->ns_client == LDLM_NAMESPACE_CLIENT;
-}
-
-/**
- * Returns 1 if namespace \a ns is a server namespace.
- */
-static inline int ns_is_server(struct ldlm_namespace *ns)
-{
- LASSERT(ns != NULL);
- LASSERT(!(ns->ns_client & ~(LDLM_NAMESPACE_CLIENT |
- LDLM_NAMESPACE_SERVER)));
- LASSERT(ns->ns_client == LDLM_NAMESPACE_CLIENT ||
- ns->ns_client == LDLM_NAMESPACE_SERVER);
- return ns->ns_client == LDLM_NAMESPACE_SERVER;
-}
-
-/**
- * Returns 1 if namespace \a ns supports early lock cancel (ELC).
- */
-static inline int ns_connect_cancelset(struct ldlm_namespace *ns)
-{
- LASSERT(ns != NULL);
- return !!(ns->ns_connect_flags & OBD_CONNECT_CANCELSET);
-}
-
-/**
- * Returns 1 if this namespace supports lru_resize.
- */
-static inline int ns_connect_lru_resize(struct ldlm_namespace *ns)
-{
- LASSERT(ns != NULL);
- return !!(ns->ns_connect_flags & OBD_CONNECT_LRU_RESIZE);
-}
-
-static inline void ns_register_cancel(struct ldlm_namespace *ns,
- ldlm_cancel_for_recovery arg)
-{
- LASSERT(ns != NULL);
- ns->ns_cancel_for_recovery = arg;
-}
-
-struct ldlm_lock;
-
-/** Type for blocking callback function of a lock. */
-typedef int (*ldlm_blocking_callback)(struct ldlm_lock *lock,
- struct ldlm_lock_desc *new, void *data,
- int flag);
-/** Type for completion callback function of a lock. */
-typedef int (*ldlm_completion_callback)(struct ldlm_lock *lock, __u64 flags,
- void *data);
-/** Type for glimpse callback function of a lock. */
-typedef int (*ldlm_glimpse_callback)(struct ldlm_lock *lock, void *data);
-
-/** Work list for sending GL ASTs to multiple locks. */
-struct ldlm_glimpse_work {
- struct ldlm_lock *gl_lock; /* lock to glimpse */
- struct list_head gl_list; /* linkage to other gl work structs */
- __u32 gl_flags;/* see LDLM_GL_WORK_* below */
- union ldlm_gl_desc *gl_desc; /* glimpse descriptor to be packed in
- * glimpse callback request */
-};
-
-/** The ldlm_glimpse_work is allocated on the stack and should not be freed. */
-#define LDLM_GL_WORK_NOFREE 0x1
-
-/** Interval node data for each LDLM_EXTENT lock. */
-struct ldlm_interval {
- struct interval_node li_node; /* node for tree management */
- struct list_head li_group; /* the locks which have the same
- * policy - group of the policy */
-};
-#define to_ldlm_interval(n) container_of(n, struct ldlm_interval, li_node)
-
-/**
- * Interval tree for extent locks.
- * The interval tree must be accessed under the resource lock.
- * Interval trees are used for granted extent locks to speed up conflicts
- * lookup. See ldlm/interval_tree.c for more details.
- */
-struct ldlm_interval_tree {
- /** Tree size. */
- int lit_size;
- ldlm_mode_t lit_mode; /* lock mode */
- struct interval_node *lit_root; /* actual ldlm_interval */
-};
-
-/** Whether to track references to exports by LDLM locks. */
-#define LUSTRE_TRACKS_LOCK_EXP_REFS (0)
-
-/** Cancel flags. */
-typedef enum {
- LCF_ASYNC = 0x1, /* Cancel locks asynchronously. */
- LCF_LOCAL = 0x2, /* Cancel locks locally, not notifing server */
- LCF_BL_AST = 0x4, /* Cancel locks marked as LDLM_FL_BL_AST
- * in the same RPC */
-} ldlm_cancel_flags_t;
-
-struct ldlm_flock {
- __u64 start;
- __u64 end;
- __u64 owner;
- __u64 blocking_owner;
- struct obd_export *blocking_export;
- /* Protected by the hash lock */
- __u32 blocking_refs;
- __u32 pid;
-};
-
-typedef union {
- struct ldlm_extent l_extent;
- struct ldlm_flock l_flock;
- struct ldlm_inodebits l_inodebits;
-} ldlm_policy_data_t;
-
-void ldlm_convert_policy_to_wire(ldlm_type_t type,
- const ldlm_policy_data_t *lpolicy,
- ldlm_wire_policy_data_t *wpolicy);
-void ldlm_convert_policy_to_local(struct obd_export *exp, ldlm_type_t type,
- const ldlm_wire_policy_data_t *wpolicy,
- ldlm_policy_data_t *lpolicy);
-
-enum lvb_type {
- LVB_T_NONE = 0,
- LVB_T_OST = 1,
- LVB_T_LQUOTA = 2,
- LVB_T_LAYOUT = 3,
-};
-
-/**
- * LDLM lock structure
- *
- * Represents a single LDLM lock and its state in memory. Each lock is
- * associated with a single ldlm_resource, the object which is being
- * locked. There may be multiple ldlm_locks on a single resource,
- * depending on the lock type and whether the locks are conflicting or
- * not.
- */
-struct ldlm_lock {
- /**
- * Local lock handle.
- * When remote side wants to tell us about a lock, they address
- * it by this opaque handle. The handle does not hold a
- * reference on the ldlm_lock, so it can be safely passed to
- * other threads or nodes. When the lock needs to be accessed
- * from the handle, it is looked up again in the lock table, and
- * may no longer exist.
- *
- * Must be first in the structure.
- */
- struct portals_handle l_handle;
- /**
- * Lock reference count.
- * This is how many users have pointers to actual structure, so that
- * we do not accidentally free lock structure that is in use.
- */
- atomic_t l_refc;
- /**
- * Internal spinlock protects l_resource. We should hold this lock
- * first before taking res_lock.
- */
- spinlock_t l_lock;
- /**
- * Pointer to actual resource this lock is in.
- * ldlm_lock_change_resource() can change this.
- */
- struct ldlm_resource *l_resource;
- /**
- * List item for client side LRU list.
- * Protected by ns_lock in struct ldlm_namespace.
- */
- struct list_head l_lru;
- /**
- * Linkage to resource's lock queues according to current lock state.
- * (could be granted, waiting or converting)
- * Protected by lr_lock in struct ldlm_resource.
- */
- struct list_head l_res_link;
- /**
- * Tree node for ldlm_extent.
- */
- struct ldlm_interval *l_tree_node;
- /**
- * Per export hash of locks.
- * Protected by per-bucket exp->exp_lock_hash locks.
- */
- struct hlist_node l_exp_hash;
- /**
- * Per export hash of flock locks.
- * Protected by per-bucket exp->exp_flock_hash locks.
- */
- struct hlist_node l_exp_flock_hash;
- /**
- * Requested mode.
- * Protected by lr_lock.
- */
- ldlm_mode_t l_req_mode;
- /**
- * Granted mode, also protected by lr_lock.
- */
- ldlm_mode_t l_granted_mode;
- /** Lock completion handler pointer. Called when lock is granted. */
- ldlm_completion_callback l_completion_ast;
- /**
- * Lock blocking AST handler pointer.
- * It plays two roles:
- * - as a notification of an attempt to queue a conflicting lock (once)
- * - as a notification when the lock is being cancelled.
- *
- * As such it's typically called twice: once for the initial conflict
- * and then once more when the last user went away and the lock is
- * cancelled (could happen recursively).
- */
- ldlm_blocking_callback l_blocking_ast;
- /**
- * Lock glimpse handler.
- * Glimpse handler is used to obtain LVB updates from a client by
- * server
- */
- ldlm_glimpse_callback l_glimpse_ast;
-
- /**
- * Lock export.
- * This is a pointer to actual client export for locks that were granted
- * to clients. Used server-side.
- */
- struct obd_export *l_export;
- /**
- * Lock connection export.
- * Pointer to server export on a client.
- */
- struct obd_export *l_conn_export;
-
- /**
- * Remote lock handle.
- * If the lock is remote, this is the handle of the other side lock
- * (l_handle)
- */
- struct lustre_handle l_remote_handle;
-
- /**
- * Representation of private data specific for a lock type.
- * Examples are: extent range for extent lock or bitmask for ibits locks
- */
- ldlm_policy_data_t l_policy_data;
-
- /**
- * Lock state flags. Protected by lr_lock.
- * \see lustre_dlm_flags.h where the bits are defined.
- */
- __u64 l_flags;
-
- /**
- * Lock r/w usage counters.
- * Protected by lr_lock.
- */
- __u32 l_readers;
- __u32 l_writers;
- /**
- * If the lock is granted, a process sleeps on this waitq to learn when
- * it's no longer in use. If the lock is not granted, a process sleeps
- * on this waitq to learn when it becomes granted.
- */
- wait_queue_head_t l_waitq;
-
- /**
- * Seconds. It will be updated if there is any activity related to
- * the lock, e.g. enqueue the lock or send blocking AST.
- */
- time64_t l_last_activity;
-
- /**
- * Time last used by e.g. being matched by lock match.
- * Jiffies. Should be converted to time if needed.
- */
- unsigned long l_last_used;
-
- /** Originally requested extent for the extent lock. */
- struct ldlm_extent l_req_extent;
-
- /*
- * Client-side-only members.
- */
-
- enum lvb_type l_lvb_type;
-
- /**
- * Temporary storage for a LVB received during an enqueue operation.
- */
- __u32 l_lvb_len;
- void *l_lvb_data;
-
- /** Private storage for lock user. Opaque to LDLM. */
- void *l_ast_data;
-
- /*
- * Server-side-only members.
- */
-
- /**
- * Connection cookie for the client originating the operation.
- * Used by Commit on Share (COS) code. Currently only used for
- * inodebits locks on MDS.
- */
- __u64 l_client_cookie;
-
- /**
- * List item for locks waiting for cancellation from clients.
- * The lists this could be linked into are:
- * waiting_locks_list (protected by waiting_locks_spinlock),
- * then if the lock timed out, it is moved to
- * expired_lock_thread.elt_expired_locks for further processing.
- * Protected by elt_lock.
- */
- struct list_head l_pending_chain;
-
- /**
- * Set when lock is sent a blocking AST. Time in seconds when timeout
- * is reached and client holding this lock could be evicted.
- * This timeout could be further extended by e.g. certain IO activity
- * under this lock.
- * \see ost_rw_prolong_locks
- */
- unsigned long l_callback_timeout;
-
- /** Local PID of process which created this lock. */
- __u32 l_pid;
-
- /**
- * Number of times blocking AST was sent for this lock.
- * This is for debugging. Valid values are 0 and 1, if there is an
- * attempt to send blocking AST more than once, an assertion would be
- * hit. \see ldlm_work_bl_ast_lock
- */
- int l_bl_ast_run;
- /** List item ldlm_add_ast_work_item() for case of blocking ASTs. */
- struct list_head l_bl_ast;
- /** List item ldlm_add_ast_work_item() for case of completion ASTs. */
- struct list_head l_cp_ast;
- /** For ldlm_add_ast_work_item() for "revoke" AST used in COS. */
- struct list_head l_rk_ast;
-
- /**
- * Pointer to a conflicting lock that caused blocking AST to be sent
- * for this lock
- */
- struct ldlm_lock *l_blocking_lock;
-
- /**
- * Protected by lr_lock, linkages to "skip lists".
- * For more explanations of skip lists see ldlm/ldlm_inodebits.c
- */
- struct list_head l_sl_mode;
- struct list_head l_sl_policy;
-
- /** Reference tracking structure to debug leaked locks. */
- struct lu_ref l_reference;
-#if LUSTRE_TRACKS_LOCK_EXP_REFS
- /* Debugging stuff for bug 20498, for tracking export references. */
- /** number of export references taken */
- int l_exp_refs_nr;
- /** link all locks referencing one export */
- struct list_head l_exp_refs_link;
- /** referenced export object */
- struct obd_export *l_exp_refs_target;
-#endif
- /**
- * export blocking dlm lock list, protected by
- * l_export->exp_bl_list_lock.
- * Lock order of waiting_lists_spinlock, exp_bl_list_lock and res lock
- * is: res lock -> exp_bl_list_lock -> wanting_lists_spinlock.
- */
- struct list_head l_exp_list;
-};
-
-/**
- * LDLM resource description.
- * Basically, resource is a representation for a single object.
- * Object has a name which is currently 4 64-bit integers. LDLM user is
- * responsible for creation of a mapping between objects it wants to be
- * protected and resource names.
- *
- * A resource can only hold locks of a single lock type, though there may be
- * multiple ldlm_locks on a single resource, depending on the lock type and
- * whether the locks are conflicting or not.
- */
-struct ldlm_resource {
- struct ldlm_ns_bucket *lr_ns_bucket;
-
- /**
- * List item for list in namespace hash.
- * protected by ns_lock
- */
- struct hlist_node lr_hash;
-
- /** Spinlock to protect locks under this resource. */
- spinlock_t lr_lock;
-
- /**
- * protected by lr_lock
- * @{ */
- /** List of locks in granted state */
- struct list_head lr_granted;
- /** List of locks waiting to change their granted mode (converted) */
- struct list_head lr_converting;
- /**
- * List of locks that could not be granted due to conflicts and
- * that are waiting for conflicts to go away */
- struct list_head lr_waiting;
- /** @} */
-
- /* XXX No longer needed? Remove ASAP */
- ldlm_mode_t lr_most_restr;
-
- /** Type of locks this resource can hold. Only one type per resource. */
- ldlm_type_t lr_type; /* LDLM_{PLAIN,EXTENT,FLOCK,IBITS} */
-
- /** Resource name */
- struct ldlm_res_id lr_name;
- /** Reference count for this resource */
- atomic_t lr_refcount;
-
- /**
- * Interval trees (only for extent locks) for all modes of this resource
- */
- struct ldlm_interval_tree lr_itree[LCK_MODE_NUM];
-
- /**
- * Server-side-only lock value block elements.
- * To serialize lvbo_init.
- */
- struct mutex lr_lvb_mutex;
- int lr_lvb_len;
- /** protected by lr_lock */
- void *lr_lvb_data;
-
- /** When the resource was considered as contended. */
- unsigned long lr_contention_time;
- /** List of references to this resource. For debugging. */
- struct lu_ref lr_reference;
-
- struct inode *lr_lvb_inode;
-};
-
-static inline bool ldlm_has_layout(struct ldlm_lock *lock)
-{
- return lock->l_resource->lr_type == LDLM_IBITS &&
- lock->l_policy_data.l_inodebits.bits & MDS_INODELOCK_LAYOUT;
-}
-
-static inline char *
-ldlm_ns_name(struct ldlm_namespace *ns)
-{
- return ns->ns_rs_hash->hs_name;
-}
-
-static inline struct ldlm_namespace *
-ldlm_res_to_ns(struct ldlm_resource *res)
-{
- return res->lr_ns_bucket->nsb_namespace;
-}
-
-static inline struct ldlm_namespace *
-ldlm_lock_to_ns(struct ldlm_lock *lock)
-{
- return ldlm_res_to_ns(lock->l_resource);
-}
-
-static inline char *
-ldlm_lock_to_ns_name(struct ldlm_lock *lock)
-{
- return ldlm_ns_name(ldlm_lock_to_ns(lock));
-}
-
-static inline struct adaptive_timeout *
-ldlm_lock_to_ns_at(struct ldlm_lock *lock)
-{
- return &lock->l_resource->lr_ns_bucket->nsb_at_estimate;
-}
-
-static inline int ldlm_lvbo_init(struct ldlm_resource *res)
-{
- struct ldlm_namespace *ns = ldlm_res_to_ns(res);
-
- if (ns->ns_lvbo != NULL && ns->ns_lvbo->lvbo_init != NULL)
- return ns->ns_lvbo->lvbo_init(res);
-
- return 0;
-}
-
-static inline int ldlm_lvbo_size(struct ldlm_lock *lock)
-{
- struct ldlm_namespace *ns = ldlm_lock_to_ns(lock);
-
- if (ns->ns_lvbo != NULL && ns->ns_lvbo->lvbo_size != NULL)
- return ns->ns_lvbo->lvbo_size(lock);
-
- return 0;
-}
-
-static inline int ldlm_lvbo_fill(struct ldlm_lock *lock, void *buf, int len)
-{
- struct ldlm_namespace *ns = ldlm_lock_to_ns(lock);
-
- if (ns->ns_lvbo != NULL) {
- LASSERT(ns->ns_lvbo->lvbo_fill != NULL);
- return ns->ns_lvbo->lvbo_fill(lock, buf, len);
- }
- return 0;
-}
-
-struct ldlm_ast_work {
- struct ldlm_lock *w_lock;
- int w_blocking;
- struct ldlm_lock_desc w_desc;
- struct list_head w_list;
- int w_flags;
- void *w_data;
- int w_datalen;
-};
-
-/**
- * Common ldlm_enqueue parameters
- */
-struct ldlm_enqueue_info {
- __u32 ei_type; /** Type of the lock being enqueued. */
- __u32 ei_mode; /** Mode of the lock being enqueued. */
- void *ei_cb_bl; /** blocking lock callback */
- void *ei_cb_cp; /** lock completion callback */
- void *ei_cb_gl; /** lock glimpse callback */
- void *ei_cbdata; /** Data to be passed into callbacks. */
-};
-
-extern struct obd_ops ldlm_obd_ops;
-
-extern char *ldlm_lockname[];
-extern char *ldlm_typename[];
-char *ldlm_it2str(int it);
-
-/**
- * Just a fancy CDEBUG call with log level preset to LDLM_DEBUG.
- * For the cases where we do not have actual lock to print along
- * with a debugging message that is ldlm-related
- */
-#define LDLM_DEBUG_NOLOCK(format, a...) \
- CDEBUG(D_DLMTRACE, "### " format "\n", ##a)
-
-/**
- * Support function for lock information printing into debug logs.
- * \see LDLM_DEBUG
- */
-#define ldlm_lock_debug(msgdata, mask, cdls, lock, fmt, a...) do { \
- CFS_CHECK_STACK(msgdata, mask, cdls); \
- \
- if (((mask) & D_CANTMASK) != 0 || \
- ((libcfs_debug & (mask)) != 0 && \
- (libcfs_subsystem_debug & DEBUG_SUBSYSTEM) != 0)) \
- _ldlm_lock_debug(lock, msgdata, fmt, ##a); \
-} while (0)
-
-void _ldlm_lock_debug(struct ldlm_lock *lock,
- struct libcfs_debug_msg_data *data,
- const char *fmt, ...)
- __printf(3, 4);
-
-/**
- * Rate-limited version of lock printing function.
- */
-#define LDLM_DEBUG_LIMIT(mask, lock, fmt, a...) do { \
- static struct cfs_debug_limit_state _ldlm_cdls; \
- LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, mask, &_ldlm_cdls); \
- ldlm_lock_debug(&msgdata, mask, &_ldlm_cdls, lock, "### " fmt, ##a);\
-} while (0)
-
-#define LDLM_ERROR(lock, fmt, a...) LDLM_DEBUG_LIMIT(D_ERROR, lock, fmt, ## a)
-#define LDLM_WARN(lock, fmt, a...) LDLM_DEBUG_LIMIT(D_WARNING, lock, fmt, ## a)
-
-/** Non-rate-limited lock printing function for debugging purposes. */
-#define LDLM_DEBUG(lock, fmt, a...) do { \
- if (likely(lock != NULL)) { \
- LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, D_DLMTRACE, NULL); \
- ldlm_lock_debug(&msgdata, D_DLMTRACE, NULL, lock, \
- "### " fmt, ##a); \
- } else { \
- LDLM_DEBUG_NOLOCK("no dlm lock: " fmt, ##a); \
- } \
-} while (0)
-
-typedef int (*ldlm_processing_policy)(struct ldlm_lock *lock, __u64 *flags,
- int first_enq, ldlm_error_t *err,
- struct list_head *work_list);
-
-/**
- * Return values for lock iterators.
- * Also used during deciding of lock grants and cancellations.
- */
-#define LDLM_ITER_CONTINUE 1 /* keep iterating */
-#define LDLM_ITER_STOP 2 /* stop iterating */
-
-typedef int (*ldlm_iterator_t)(struct ldlm_lock *, void *);
-typedef int (*ldlm_res_iterator_t)(struct ldlm_resource *, void *);
-
-/** \defgroup ldlm_iterator Lock iterators
- *
- * LDLM provides for a way to iterate through every lock on a resource or
- * namespace or every resource in a namespace.
- * @{ */
-int ldlm_resource_foreach(struct ldlm_resource *res, ldlm_iterator_t iter,
- void *closure);
-void ldlm_namespace_foreach(struct ldlm_namespace *ns, ldlm_iterator_t iter,
- void *closure);
-int ldlm_resource_iterate(struct ldlm_namespace *, const struct ldlm_res_id *,
- ldlm_iterator_t iter, void *data);
-/** @} ldlm_iterator */
-
-int ldlm_replay_locks(struct obd_import *imp);
-
-/* ldlm_flock.c */
-int ldlm_flock_completion_ast(struct ldlm_lock *lock, __u64 flags, void *data);
-
-/* ldlm_extent.c */
-__u64 ldlm_extent_shift_kms(struct ldlm_lock *lock, __u64 old_kms);
-
-struct ldlm_callback_suite {
- ldlm_completion_callback lcs_completion;
- ldlm_blocking_callback lcs_blocking;
- ldlm_glimpse_callback lcs_glimpse;
-};
-
-/* ldlm_lockd.c */
-int ldlm_del_waiting_lock(struct ldlm_lock *lock);
-int ldlm_refresh_waiting_lock(struct ldlm_lock *lock, int timeout);
-int ldlm_get_ref(void);
-void ldlm_put_ref(void);
-int ldlm_init_export(struct obd_export *exp);
-void ldlm_destroy_export(struct obd_export *exp);
-struct ldlm_lock *ldlm_request_lock(struct ptlrpc_request *req);
-
-/* ldlm_lock.c */
-void ldlm_register_intent(struct ldlm_namespace *ns, ldlm_res_policy arg);
-void ldlm_lock2handle(const struct ldlm_lock *lock,
- struct lustre_handle *lockh);
-struct ldlm_lock *__ldlm_handle2lock(const struct lustre_handle *, __u64 flags);
-void ldlm_cancel_callback(struct ldlm_lock *);
-int ldlm_lock_remove_from_lru(struct ldlm_lock *);
-int ldlm_lock_set_data(struct lustre_handle *, void *);
-
-/**
- * Obtain a lock reference by its handle.
- */
-static inline struct ldlm_lock *ldlm_handle2lock(const struct lustre_handle *h)
-{
- return __ldlm_handle2lock(h, 0);
-}
-
-#define LDLM_LOCK_REF_DEL(lock) \
- lu_ref_del(&lock->l_reference, "handle", current)
-
-static inline struct ldlm_lock *
-ldlm_handle2lock_long(const struct lustre_handle *h, __u64 flags)
-{
- struct ldlm_lock *lock;
-
- lock = __ldlm_handle2lock(h, flags);
- if (lock != NULL)
- LDLM_LOCK_REF_DEL(lock);
- return lock;
-}
-
-/**
- * Update Lock Value Block Operations (LVBO) on a resource taking into account
- * data from request \a r
- */
-static inline int ldlm_res_lvbo_update(struct ldlm_resource *res,
- struct ptlrpc_request *r, int increase)
-{
- if (ldlm_res_to_ns(res)->ns_lvbo &&
- ldlm_res_to_ns(res)->ns_lvbo->lvbo_update) {
- return ldlm_res_to_ns(res)->ns_lvbo->lvbo_update(res, r,
- increase);
- }
- return 0;
-}
-
-int ldlm_error2errno(ldlm_error_t error);
-ldlm_error_t ldlm_errno2error(int err_no); /* don't call it `errno': this
- * confuses user-space. */
-#if LUSTRE_TRACKS_LOCK_EXP_REFS
-void ldlm_dump_export_locks(struct obd_export *exp);
-#endif
-
-/**
- * Release a temporary lock reference obtained by ldlm_handle2lock() or
- * __ldlm_handle2lock().
- */
-#define LDLM_LOCK_PUT(lock) \
-do { \
- LDLM_LOCK_REF_DEL(lock); \
- /*LDLM_DEBUG((lock), "put");*/ \
- ldlm_lock_put(lock); \
-} while (0)
-
-/**
- * Release a lock reference obtained by some other means (see
- * LDLM_LOCK_PUT()).
- */
-#define LDLM_LOCK_RELEASE(lock) \
-do { \
- /*LDLM_DEBUG((lock), "put");*/ \
- ldlm_lock_put(lock); \
-} while (0)
-
-#define LDLM_LOCK_GET(lock) \
-({ \
- ldlm_lock_get(lock); \
- /*LDLM_DEBUG((lock), "get");*/ \
- lock; \
-})
-
-#define ldlm_lock_list_put(head, member, count) \
-({ \
- struct ldlm_lock *_lock, *_next; \
- int c = count; \
- list_for_each_entry_safe(_lock, _next, head, member) { \
- if (c-- == 0) \
- break; \
- list_del_init(&_lock->member); \
- LDLM_LOCK_RELEASE(_lock); \
- } \
- LASSERT(c <= 0); \
-})
-
-struct ldlm_lock *ldlm_lock_get(struct ldlm_lock *lock);
-void ldlm_lock_put(struct ldlm_lock *lock);
-void ldlm_lock_destroy(struct ldlm_lock *lock);
-void ldlm_lock2desc(struct ldlm_lock *lock, struct ldlm_lock_desc *desc);
-void ldlm_lock_addref(struct lustre_handle *lockh, __u32 mode);
-int ldlm_lock_addref_try(struct lustre_handle *lockh, __u32 mode);
-void ldlm_lock_decref(struct lustre_handle *lockh, __u32 mode);
-void ldlm_lock_decref_and_cancel(struct lustre_handle *lockh, __u32 mode);
-void ldlm_lock_fail_match_locked(struct ldlm_lock *lock);
-void ldlm_lock_fail_match(struct ldlm_lock *lock);
-void ldlm_lock_allow_match(struct ldlm_lock *lock);
-void ldlm_lock_allow_match_locked(struct ldlm_lock *lock);
-ldlm_mode_t ldlm_lock_match(struct ldlm_namespace *ns, __u64 flags,
- const struct ldlm_res_id *, ldlm_type_t type,
- ldlm_policy_data_t *, ldlm_mode_t mode,
- struct lustre_handle *, int unref);
-ldlm_mode_t ldlm_revalidate_lock_handle(struct lustre_handle *lockh,
- __u64 *bits);
-struct ldlm_resource *ldlm_lock_convert(struct ldlm_lock *lock, int new_mode,
- __u32 *flags);
-void ldlm_lock_downgrade(struct ldlm_lock *lock, int new_mode);
-void ldlm_lock_cancel(struct ldlm_lock *lock);
-void ldlm_reprocess_all(struct ldlm_resource *res);
-void ldlm_reprocess_all_ns(struct ldlm_namespace *ns);
-void ldlm_lock_dump_handle(int level, struct lustre_handle *);
-void ldlm_unlink_lock_skiplist(struct ldlm_lock *req);
-
-/* resource.c */
-struct ldlm_namespace *
-ldlm_namespace_new(struct obd_device *obd, char *name,
- ldlm_side_t client, ldlm_appetite_t apt,
- ldlm_ns_type_t ns_type);
-int ldlm_namespace_cleanup(struct ldlm_namespace *ns, __u64 flags);
-void ldlm_namespace_free(struct ldlm_namespace *ns,
- struct obd_import *imp, int force);
-void ldlm_namespace_register(struct ldlm_namespace *ns, ldlm_side_t client);
-void ldlm_namespace_unregister(struct ldlm_namespace *ns, ldlm_side_t client);
-void ldlm_namespace_get(struct ldlm_namespace *ns);
-void ldlm_namespace_put(struct ldlm_namespace *ns);
-int ldlm_debugfs_setup(void);
-void ldlm_debugfs_cleanup(void);
-
-/* resource.c - internal */
-struct ldlm_resource *ldlm_resource_get(struct ldlm_namespace *ns,
- struct ldlm_resource *parent,
- const struct ldlm_res_id *,
- ldlm_type_t type, int create);
-struct ldlm_resource *ldlm_resource_getref(struct ldlm_resource *res);
-int ldlm_resource_putref(struct ldlm_resource *res);
-void ldlm_resource_add_lock(struct ldlm_resource *res,
- struct list_head *head,
- struct ldlm_lock *lock);
-void ldlm_resource_unlink_lock(struct ldlm_lock *lock);
-void ldlm_res2desc(struct ldlm_resource *res, struct ldlm_resource_desc *desc);
-void ldlm_dump_all_namespaces(ldlm_side_t client, int level);
-void ldlm_namespace_dump(int level, struct ldlm_namespace *);
-void ldlm_resource_dump(int level, struct ldlm_resource *);
-int ldlm_lock_change_resource(struct ldlm_namespace *, struct ldlm_lock *,
- const struct ldlm_res_id *);
-
-#define LDLM_RESOURCE_ADDREF(res) do { \
- lu_ref_add_atomic(&(res)->lr_reference, __func__, current); \
-} while (0)
-
-#define LDLM_RESOURCE_DELREF(res) do { \
- lu_ref_del(&(res)->lr_reference, __func__, current); \
-} while (0)
-
-/* ldlm_request.c */
-int ldlm_expired_completion_wait(void *data);
-/** \defgroup ldlm_local_ast Default AST handlers for local locks
- * These AST handlers are typically used for server-side local locks and are
- * also used by client-side lock handlers to perform minimum level base
- * processing.
- * @{ */
-int ldlm_blocking_ast_nocheck(struct ldlm_lock *lock);
-int ldlm_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc,
- void *data, int flag);
-int ldlm_glimpse_ast(struct ldlm_lock *lock, void *reqp);
-int ldlm_completion_ast_async(struct ldlm_lock *lock, __u64 flags, void *data);
-int ldlm_completion_ast(struct ldlm_lock *lock, __u64 flags, void *data);
-/** @} ldlm_local_ast */
-
-/** \defgroup ldlm_cli_api API to operate on locks from actual LDLM users.
- * These are typically used by client and server (*_local versions)
- * to obtain and release locks.
- * @{ */
-int ldlm_cli_enqueue(struct obd_export *exp, struct ptlrpc_request **reqp,
- struct ldlm_enqueue_info *einfo,
- const struct ldlm_res_id *res_id,
- ldlm_policy_data_t const *policy, __u64 *flags,
- void *lvb, __u32 lvb_len, enum lvb_type lvb_type,
- struct lustre_handle *lockh, int async);
-int ldlm_prep_enqueue_req(struct obd_export *exp,
- struct ptlrpc_request *req,
- struct list_head *cancels,
- int count);
-int ldlm_prep_elc_req(struct obd_export *exp,
- struct ptlrpc_request *req,
- int version, int opc, int canceloff,
- struct list_head *cancels, int count);
-
-struct ptlrpc_request *ldlm_enqueue_pack(struct obd_export *exp, int lvb_len);
-int ldlm_handle_enqueue0(struct ldlm_namespace *ns, struct ptlrpc_request *req,
- const struct ldlm_request *dlm_req,
- const struct ldlm_callback_suite *cbs);
-int ldlm_cli_enqueue_fini(struct obd_export *exp, struct ptlrpc_request *req,
- ldlm_type_t type, __u8 with_policy, ldlm_mode_t mode,
- __u64 *flags, void *lvb, __u32 lvb_len,
- struct lustre_handle *lockh, int rc);
-int ldlm_cli_enqueue_local(struct ldlm_namespace *ns,
- const struct ldlm_res_id *res_id,
- ldlm_type_t type, ldlm_policy_data_t *policy,
- ldlm_mode_t mode, __u64 *flags,
- ldlm_blocking_callback blocking,
- ldlm_completion_callback completion,
- ldlm_glimpse_callback glimpse,
- void *data, __u32 lvb_len, enum lvb_type lvb_type,
- const __u64 *client_cookie,
- struct lustre_handle *lockh);
-int ldlm_server_ast(struct lustre_handle *lockh, struct ldlm_lock_desc *new,
- void *data, __u32 data_len);
-int ldlm_cli_convert(struct lustre_handle *, int new_mode, __u32 *flags);
-int ldlm_cli_update_pool(struct ptlrpc_request *req);
-int ldlm_cli_cancel(struct lustre_handle *lockh,
- ldlm_cancel_flags_t cancel_flags);
-int ldlm_cli_cancel_unused(struct ldlm_namespace *, const struct ldlm_res_id *,
- ldlm_cancel_flags_t flags, void *opaque);
-int ldlm_cli_cancel_unused_resource(struct ldlm_namespace *ns,
- const struct ldlm_res_id *res_id,
- ldlm_policy_data_t *policy,
- ldlm_mode_t mode,
- ldlm_cancel_flags_t flags,
- void *opaque);
-int ldlm_cli_cancel_req(struct obd_export *exp, struct list_head *head,
- int count, ldlm_cancel_flags_t flags);
-int ldlm_cancel_resource_local(struct ldlm_resource *res,
- struct list_head *cancels,
- ldlm_policy_data_t *policy,
- ldlm_mode_t mode, __u64 lock_flags,
- ldlm_cancel_flags_t cancel_flags, void *opaque);
-int ldlm_cli_cancel_list_local(struct list_head *cancels, int count,
- ldlm_cancel_flags_t flags);
-int ldlm_cli_cancel_list(struct list_head *head, int count,
- struct ptlrpc_request *req, ldlm_cancel_flags_t flags);
-/** @} ldlm_cli_api */
-
-/* mds/handler.c */
-/* This has to be here because recursive inclusion sucks. */
-int intent_disposition(struct ldlm_reply *rep, int flag);
-void intent_set_disposition(struct ldlm_reply *rep, int flag);
-
-
-/* ioctls for trying requests */
-#define IOC_LDLM_TYPE 'f'
-#define IOC_LDLM_MIN_NR 40
-
-#define IOC_LDLM_TEST _IOWR('f', 40, long)
-#define IOC_LDLM_DUMP _IOWR('f', 41, long)
-#define IOC_LDLM_REGRESS_START _IOWR('f', 42, long)
-#define IOC_LDLM_REGRESS_STOP _IOWR('f', 43, long)
-#define IOC_LDLM_MAX_NR 43
-
-/**
- * "Modes" of acquiring lock_res, necessary to tell lockdep that taking more
- * than one lock_res is dead-lock safe.
- */
-enum lock_res_type {
- LRT_NORMAL,
- LRT_NEW
-};
-
-/** Lock resource. */
-static inline void lock_res(struct ldlm_resource *res)
-{
- spin_lock(&res->lr_lock);
-}
-
-/** Lock resource with a way to instruct lockdep code about nestedness-safe. */
-static inline void lock_res_nested(struct ldlm_resource *res,
- enum lock_res_type mode)
-{
- spin_lock_nested(&res->lr_lock, mode);
-}
-
-/** Unlock resource. */
-static inline void unlock_res(struct ldlm_resource *res)
-{
- spin_unlock(&res->lr_lock);
-}
-
-/** Check if resource is already locked, assert if not. */
-static inline void check_res_locked(struct ldlm_resource *res)
-{
- assert_spin_locked(&res->lr_lock);
-}
-
-struct ldlm_resource *lock_res_and_lock(struct ldlm_lock *lock);
-void unlock_res_and_lock(struct ldlm_lock *lock);
-
-/* ldlm_pool.c */
-/** \defgroup ldlm_pools Various LDLM pool related functions
- * There are not used outside of ldlm.
- * @{
- */
-int ldlm_pools_recalc(ldlm_side_t client);
-int ldlm_pools_init(void);
-void ldlm_pools_fini(void);
-
-int ldlm_pool_init(struct ldlm_pool *pl, struct ldlm_namespace *ns,
- int idx, ldlm_side_t client);
-int ldlm_pool_shrink(struct ldlm_pool *pl, int nr,
- gfp_t gfp_mask);
-void ldlm_pool_fini(struct ldlm_pool *pl);
-int ldlm_pool_setup(struct ldlm_pool *pl, int limit);
-int ldlm_pool_recalc(struct ldlm_pool *pl);
-__u32 ldlm_pool_get_lvf(struct ldlm_pool *pl);
-__u64 ldlm_pool_get_slv(struct ldlm_pool *pl);
-__u64 ldlm_pool_get_clv(struct ldlm_pool *pl);
-__u32 ldlm_pool_get_limit(struct ldlm_pool *pl);
-void ldlm_pool_set_slv(struct ldlm_pool *pl, __u64 slv);
-void ldlm_pool_set_clv(struct ldlm_pool *pl, __u64 clv);
-void ldlm_pool_set_limit(struct ldlm_pool *pl, __u32 limit);
-void ldlm_pool_add(struct ldlm_pool *pl, struct ldlm_lock *lock);
-void ldlm_pool_del(struct ldlm_pool *pl, struct ldlm_lock *lock);
-/** @} */
-
-#endif
-/** @} LDLM */
diff --git a/drivers/staging/lustre/lustre/include/lustre_dlm_flags.h b/drivers/staging/lustre/lustre/include/lustre_dlm_flags.h
deleted file mode 100644
index 0d3ed87d38e1..000000000000
--- a/drivers/staging/lustre/lustre/include/lustre_dlm_flags.h
+++ /dev/null
@@ -1,467 +0,0 @@
-/* -*- buffer-read-only: t -*- vi: set ro:
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * Lustre is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
- * See the GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program. If not, see <http://www.gnu.org/licenses/>.
- */
-/**
- * \file lustre_dlm_flags.h
- * The flags and collections of flags (masks) for \see struct ldlm_lock.
- *
- * \addtogroup LDLM Lustre Distributed Lock Manager
- * @{
- *
- * \name flags
- * The flags and collections of flags (masks) for \see struct ldlm_lock.
- * @{
- */
-#ifndef LDLM_ALL_FLAGS_MASK
-
-/** l_flags bits marked as "all_flags" bits */
-#define LDLM_FL_ALL_FLAGS_MASK 0x00FFFFFFC08F932FULL
-
-/** l_flags bits marked as "ast" bits */
-#define LDLM_FL_AST_MASK 0x0000000080008000ULL
-
-/** l_flags bits marked as "blocked" bits */
-#define LDLM_FL_BLOCKED_MASK 0x000000000000000EULL
-
-/** l_flags bits marked as "gone" bits */
-#define LDLM_FL_GONE_MASK 0x0006004000000000ULL
-
-/** l_flags bits marked as "hide_lock" bits */
-#define LDLM_FL_HIDE_LOCK_MASK 0x0000206400000000ULL
-
-/** l_flags bits marked as "inherit" bits */
-#define LDLM_FL_INHERIT_MASK 0x0000000000800000ULL
-
-/** l_flags bits marked as "local_only" bits */
-#define LDLM_FL_LOCAL_ONLY_MASK 0x00FFFFFF00000000ULL
-
-/** l_flags bits marked as "on_wire" bits */
-#define LDLM_FL_ON_WIRE_MASK 0x00000000C08F932FULL
-
-/** extent, mode, or resource changed */
-#define LDLM_FL_LOCK_CHANGED 0x0000000000000001ULL /* bit 0 */
-#define ldlm_is_lock_changed(_l) LDLM_TEST_FLAG((_l), 1ULL << 0)
-#define ldlm_set_lock_changed(_l) LDLM_SET_FLAG((_l), 1ULL << 0)
-#define ldlm_clear_lock_changed(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 0)
-
-/**
- * Server placed lock on granted list, or a recovering client wants the
- * lock added to the granted list, no questions asked. */
-#define LDLM_FL_BLOCK_GRANTED 0x0000000000000002ULL /* bit 1 */
-#define ldlm_is_block_granted(_l) LDLM_TEST_FLAG((_l), 1ULL << 1)
-#define ldlm_set_block_granted(_l) LDLM_SET_FLAG((_l), 1ULL << 1)
-#define ldlm_clear_block_granted(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 1)
-
-/**
- * Server placed lock on conv list, or a recovering client wants the lock
- * added to the conv list, no questions asked. */
-#define LDLM_FL_BLOCK_CONV 0x0000000000000004ULL /* bit 2 */
-#define ldlm_is_block_conv(_l) LDLM_TEST_FLAG((_l), 1ULL << 2)
-#define ldlm_set_block_conv(_l) LDLM_SET_FLAG((_l), 1ULL << 2)
-#define ldlm_clear_block_conv(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 2)
-
-/**
- * Server placed lock on wait list, or a recovering client wants the lock
- * added to the wait list, no questions asked. */
-#define LDLM_FL_BLOCK_WAIT 0x0000000000000008ULL /* bit 3 */
-#define ldlm_is_block_wait(_l) LDLM_TEST_FLAG((_l), 1ULL << 3)
-#define ldlm_set_block_wait(_l) LDLM_SET_FLAG((_l), 1ULL << 3)
-#define ldlm_clear_block_wait(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 3)
-
-/** blocking or cancel packet was queued for sending. */
-#define LDLM_FL_AST_SENT 0x0000000000000020ULL /* bit 5 */
-#define ldlm_is_ast_sent(_l) LDLM_TEST_FLAG((_l), 1ULL << 5)
-#define ldlm_set_ast_sent(_l) LDLM_SET_FLAG((_l), 1ULL << 5)
-#define ldlm_clear_ast_sent(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 5)
-
-/**
- * Lock is being replayed. This could probably be implied by the fact that
- * one of BLOCK_{GRANTED,CONV,WAIT} is set, but that is pretty dangerous. */
-#define LDLM_FL_REPLAY 0x0000000000000100ULL /* bit 8 */
-#define ldlm_is_replay(_l) LDLM_TEST_FLAG((_l), 1ULL << 8)
-#define ldlm_set_replay(_l) LDLM_SET_FLAG((_l), 1ULL << 8)
-#define ldlm_clear_replay(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 8)
-
-/** Don't grant lock, just do intent. */
-#define LDLM_FL_INTENT_ONLY 0x0000000000000200ULL /* bit 9 */
-#define ldlm_is_intent_only(_l) LDLM_TEST_FLAG((_l), 1ULL << 9)
-#define ldlm_set_intent_only(_l) LDLM_SET_FLAG((_l), 1ULL << 9)
-#define ldlm_clear_intent_only(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 9)
-
-/** lock request has intent */
-#define LDLM_FL_HAS_INTENT 0x0000000000001000ULL /* bit 12 */
-#define ldlm_is_has_intent(_l) LDLM_TEST_FLAG((_l), 1ULL << 12)
-#define ldlm_set_has_intent(_l) LDLM_SET_FLAG((_l), 1ULL << 12)
-#define ldlm_clear_has_intent(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 12)
-
-/** flock deadlock detected */
-#define LDLM_FL_FLOCK_DEADLOCK 0x0000000000008000ULL /* bit 15 */
-#define ldlm_is_flock_deadlock(_l) LDLM_TEST_FLAG((_l), 1ULL << 15)
-#define ldlm_set_flock_deadlock(_l) LDLM_SET_FLAG((_l), 1ULL << 15)
-#define ldlm_clear_flock_deadlock(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 15)
-
-/** discard (no writeback) on cancel */
-#define LDLM_FL_DISCARD_DATA 0x0000000000010000ULL /* bit 16 */
-#define ldlm_is_discard_data(_l) LDLM_TEST_FLAG((_l), 1ULL << 16)
-#define ldlm_set_discard_data(_l) LDLM_SET_FLAG((_l), 1ULL << 16)
-#define ldlm_clear_discard_data(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 16)
-
-/** Blocked by group lock - wait indefinitely */
-#define LDLM_FL_NO_TIMEOUT 0x0000000000020000ULL /* bit 17 */
-#define ldlm_is_no_timeout(_l) LDLM_TEST_FLAG((_l), 1ULL << 17)
-#define ldlm_set_no_timeout(_l) LDLM_SET_FLAG((_l), 1ULL << 17)
-#define ldlm_clear_no_timeout(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 17)
-
-/**
- * Server told not to wait if blocked. For AGL, OST will not send glimpse
- * callback. */
-#define LDLM_FL_BLOCK_NOWAIT 0x0000000000040000ULL /* bit 18 */
-#define ldlm_is_block_nowait(_l) LDLM_TEST_FLAG((_l), 1ULL << 18)
-#define ldlm_set_block_nowait(_l) LDLM_SET_FLAG((_l), 1ULL << 18)
-#define ldlm_clear_block_nowait(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 18)
-
-/** return blocking lock */
-#define LDLM_FL_TEST_LOCK 0x0000000000080000ULL /* bit 19 */
-#define ldlm_is_test_lock(_l) LDLM_TEST_FLAG((_l), 1ULL << 19)
-#define ldlm_set_test_lock(_l) LDLM_SET_FLAG((_l), 1ULL << 19)
-#define ldlm_clear_test_lock(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 19)
-
-/**
- * Immediately cancel such locks when they block some other locks. Send
- * cancel notification to original lock holder, but expect no reply. This
- * is for clients (like liblustre) that cannot be expected to reliably
- * response to blocking AST. */
-#define LDLM_FL_CANCEL_ON_BLOCK 0x0000000000800000ULL /* bit 23 */
-#define ldlm_is_cancel_on_block(_l) LDLM_TEST_FLAG((_l), 1ULL << 23)
-#define ldlm_set_cancel_on_block(_l) LDLM_SET_FLAG((_l), 1ULL << 23)
-#define ldlm_clear_cancel_on_block(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 23)
-
-/**
- * measure lock contention and return -EUSERS if locking contention is high */
-#define LDLM_FL_DENY_ON_CONTENTION 0x0000000040000000ULL /* bit 30 */
-#define ldlm_is_deny_on_contention(_l) LDLM_TEST_FLAG((_l), 1ULL << 30)
-#define ldlm_set_deny_on_contention(_l) LDLM_SET_FLAG((_l), 1ULL << 30)
-#define ldlm_clear_deny_on_contention(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 30)
-
-/**
- * These are flags that are mapped into the flags and ASTs of blocking
- * locks Add FL_DISCARD to blocking ASTs */
-#define LDLM_FL_AST_DISCARD_DATA 0x0000000080000000ULL /* bit 31 */
-#define ldlm_is_ast_discard_data(_l) LDLM_TEST_FLAG((_l), 1ULL << 31)
-#define ldlm_set_ast_discard_data(_l) LDLM_SET_FLAG((_l), 1ULL << 31)
-#define ldlm_clear_ast_discard_data(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 31)
-
-/**
- * Used for marking lock as a target for -EINTR while cp_ast sleep emulation
- * + race with upcoming bl_ast. */
-#define LDLM_FL_FAIL_LOC 0x0000000100000000ULL /* bit 32 */
-#define ldlm_is_fail_loc(_l) LDLM_TEST_FLAG((_l), 1ULL << 32)
-#define ldlm_set_fail_loc(_l) LDLM_SET_FLAG((_l), 1ULL << 32)
-#define ldlm_clear_fail_loc(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 32)
-
-/**
- * Used while processing the unused list to know that we have already
- * handled this lock and decided to skip it. */
-#define LDLM_FL_SKIPPED 0x0000000200000000ULL /* bit 33 */
-#define ldlm_is_skipped(_l) LDLM_TEST_FLAG((_l), 1ULL << 33)
-#define ldlm_set_skipped(_l) LDLM_SET_FLAG((_l), 1ULL << 33)
-#define ldlm_clear_skipped(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 33)
-
-/** this lock is being destroyed */
-#define LDLM_FL_CBPENDING 0x0000000400000000ULL /* bit 34 */
-#define ldlm_is_cbpending(_l) LDLM_TEST_FLAG((_l), 1ULL << 34)
-#define ldlm_set_cbpending(_l) LDLM_SET_FLAG((_l), 1ULL << 34)
-#define ldlm_clear_cbpending(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 34)
-
-/** not a real flag, not saved in lock */
-#define LDLM_FL_WAIT_NOREPROC 0x0000000800000000ULL /* bit 35 */
-#define ldlm_is_wait_noreproc(_l) LDLM_TEST_FLAG((_l), 1ULL << 35)
-#define ldlm_set_wait_noreproc(_l) LDLM_SET_FLAG((_l), 1ULL << 35)
-#define ldlm_clear_wait_noreproc(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 35)
-
-/** cancellation callback already run */
-#define LDLM_FL_CANCEL 0x0000001000000000ULL /* bit 36 */
-#define ldlm_is_cancel(_l) LDLM_TEST_FLAG((_l), 1ULL << 36)
-#define ldlm_set_cancel(_l) LDLM_SET_FLAG((_l), 1ULL << 36)
-#define ldlm_clear_cancel(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 36)
-
-/** whatever it might mean */
-#define LDLM_FL_LOCAL_ONLY 0x0000002000000000ULL /* bit 37 */
-#define ldlm_is_local_only(_l) LDLM_TEST_FLAG((_l), 1ULL << 37)
-#define ldlm_set_local_only(_l) LDLM_SET_FLAG((_l), 1ULL << 37)
-#define ldlm_clear_local_only(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 37)
-
-/** don't run the cancel callback under ldlm_cli_cancel_unused */
-#define LDLM_FL_FAILED 0x0000004000000000ULL /* bit 38 */
-#define ldlm_is_failed(_l) LDLM_TEST_FLAG((_l), 1ULL << 38)
-#define ldlm_set_failed(_l) LDLM_SET_FLAG((_l), 1ULL << 38)
-#define ldlm_clear_failed(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 38)
-
-/** lock cancel has already been sent */
-#define LDLM_FL_CANCELING 0x0000008000000000ULL /* bit 39 */
-#define ldlm_is_canceling(_l) LDLM_TEST_FLAG((_l), 1ULL << 39)
-#define ldlm_set_canceling(_l) LDLM_SET_FLAG((_l), 1ULL << 39)
-#define ldlm_clear_canceling(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 39)
-
-/** local lock (ie, no srv/cli split) */
-#define LDLM_FL_LOCAL 0x0000010000000000ULL /* bit 40 */
-#define ldlm_is_local(_l) LDLM_TEST_FLAG((_l), 1ULL << 40)
-#define ldlm_set_local(_l) LDLM_SET_FLAG((_l), 1ULL << 40)
-#define ldlm_clear_local(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 40)
-
-/**
- * XXX FIXME: This is being added to b_size as a low-risk fix to the
- * fact that the LVB filling happens _after_ the lock has been granted,
- * so another thread can match it before the LVB has been updated. As a
- * dirty hack, we set LDLM_FL_LVB_READY only after we've done the LVB poop.
- * this is only needed on LOV/OSC now, where LVB is actually used and
- * callers must set it in input flags.
- *
- * The proper fix is to do the granting inside of the completion AST,
- * which can be replaced with a LVB-aware wrapping function for OSC locks.
- * That change is pretty high-risk, though, and would need a lot more
- * testing. */
-#define LDLM_FL_LVB_READY 0x0000020000000000ULL /* bit 41 */
-#define ldlm_is_lvb_ready(_l) LDLM_TEST_FLAG((_l), 1ULL << 41)
-#define ldlm_set_lvb_ready(_l) LDLM_SET_FLAG((_l), 1ULL << 41)
-#define ldlm_clear_lvb_ready(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 41)
-
-/**
- * A lock contributes to the known minimum size (KMS) calculation until it
- * has finished the part of its cancellation that performs write back on its
- * dirty pages. It can remain on the granted list during this whole time.
- * Threads racing to update the KMS after performing their writeback need
- * to know to exclude each other's locks from the calculation as they walk
- * the granted list. */
-#define LDLM_FL_KMS_IGNORE 0x0000040000000000ULL /* bit 42 */
-#define ldlm_is_kms_ignore(_l) LDLM_TEST_FLAG((_l), 1ULL << 42)
-#define ldlm_set_kms_ignore(_l) LDLM_SET_FLAG((_l), 1ULL << 42)
-#define ldlm_clear_kms_ignore(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 42)
-
-/** completion AST to be executed */
-#define LDLM_FL_CP_REQD 0x0000080000000000ULL /* bit 43 */
-#define ldlm_is_cp_reqd(_l) LDLM_TEST_FLAG((_l), 1ULL << 43)
-#define ldlm_set_cp_reqd(_l) LDLM_SET_FLAG((_l), 1ULL << 43)
-#define ldlm_clear_cp_reqd(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 43)
-
-/** cleanup_resource has already handled the lock */
-#define LDLM_FL_CLEANED 0x0000100000000000ULL /* bit 44 */
-#define ldlm_is_cleaned(_l) LDLM_TEST_FLAG((_l), 1ULL << 44)
-#define ldlm_set_cleaned(_l) LDLM_SET_FLAG((_l), 1ULL << 44)
-#define ldlm_clear_cleaned(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 44)
-
-/**
- * optimization hint: LDLM can run blocking callback from current context
- * w/o involving separate thread. in order to decrease cs rate */
-#define LDLM_FL_ATOMIC_CB 0x0000200000000000ULL /* bit 45 */
-#define ldlm_is_atomic_cb(_l) LDLM_TEST_FLAG((_l), 1ULL << 45)
-#define ldlm_set_atomic_cb(_l) LDLM_SET_FLAG((_l), 1ULL << 45)
-#define ldlm_clear_atomic_cb(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 45)
-
-/**
- * It may happen that a client initiates two operations, e.g. unlink and
- * mkdir, such that the server sends a blocking AST for conflicting locks
- * to this client for the first operation, whereas the second operation
- * has canceled this lock and is waiting for rpc_lock which is taken by
- * the first operation. LDLM_FL_BL_AST is set by ldlm_callback_handler() in
- * the lock to prevent the Early Lock Cancel (ELC) code from cancelling it.
- *
- * LDLM_FL_BL_DONE is to be set by ldlm_cancel_callback() when lock cache is
- * dropped to let ldlm_callback_handler() return EINVAL to the server. It
- * is used when ELC RPC is already prepared and is waiting for rpc_lock,
- * too late to send a separate CANCEL RPC. */
-#define LDLM_FL_BL_AST 0x0000400000000000ULL /* bit 46 */
-#define ldlm_is_bl_ast(_l) LDLM_TEST_FLAG((_l), 1ULL << 46)
-#define ldlm_set_bl_ast(_l) LDLM_SET_FLAG((_l), 1ULL << 46)
-#define ldlm_clear_bl_ast(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 46)
-
-/** whatever it might mean */
-#define LDLM_FL_BL_DONE 0x0000800000000000ULL /* bit 47 */
-#define ldlm_is_bl_done(_l) LDLM_TEST_FLAG((_l), 1ULL << 47)
-#define ldlm_set_bl_done(_l) LDLM_SET_FLAG((_l), 1ULL << 47)
-#define ldlm_clear_bl_done(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 47)
-
-/**
- * Don't put lock into the LRU list, so that it is not canceled due
- * to aging. Used by MGC locks, they are cancelled only at unmount or
- * by callback. */
-#define LDLM_FL_NO_LRU 0x0001000000000000ULL /* bit 48 */
-#define ldlm_is_no_lru(_l) LDLM_TEST_FLAG((_l), 1ULL << 48)
-#define ldlm_set_no_lru(_l) LDLM_SET_FLAG((_l), 1ULL << 48)
-#define ldlm_clear_no_lru(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 48)
-
-/**
- * Set for locks that failed and where the server has been notified.
- *
- * Protected by lock and resource locks. */
-#define LDLM_FL_FAIL_NOTIFIED 0x0002000000000000ULL /* bit 49 */
-#define ldlm_is_fail_notified(_l) LDLM_TEST_FLAG((_l), 1ULL << 49)
-#define ldlm_set_fail_notified(_l) LDLM_SET_FLAG((_l), 1ULL << 49)
-#define ldlm_clear_fail_notified(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 49)
-
-/**
- * Set for locks that were removed from class hash table and will
- * be destroyed when last reference to them is released. Set by
- * ldlm_lock_destroy_internal().
- *
- * Protected by lock and resource locks. */
-#define LDLM_FL_DESTROYED 0x0004000000000000ULL /* bit 50 */
-#define ldlm_is_destroyed(_l) LDLM_TEST_FLAG((_l), 1ULL << 50)
-#define ldlm_set_destroyed(_l) LDLM_SET_FLAG((_l), 1ULL << 50)
-#define ldlm_clear_destroyed(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 50)
-
-/** flag whether this is a server namespace lock */
-#define LDLM_FL_SERVER_LOCK 0x0008000000000000ULL /* bit 51 */
-#define ldlm_is_server_lock(_l) LDLM_TEST_FLAG((_l), 1ULL << 51)
-#define ldlm_set_server_lock(_l) LDLM_SET_FLAG((_l), 1ULL << 51)
-#define ldlm_clear_server_lock(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 51)
-
-/**
- * It's set in lock_res_and_lock() and unset in unlock_res_and_lock().
- *
- * NB: compared with check_res_locked(), checking this bit is cheaper.
- * Also, spin_is_locked() is deprecated for kernel code; one reason is
- * because it works only for SMP so user needs to add extra macros like
- * LASSERT_SPIN_LOCKED for uniprocessor kernels. */
-#define LDLM_FL_RES_LOCKED 0x0010000000000000ULL /* bit 52 */
-#define ldlm_is_res_locked(_l) LDLM_TEST_FLAG((_l), 1ULL << 52)
-#define ldlm_set_res_locked(_l) LDLM_SET_FLAG((_l), 1ULL << 52)
-#define ldlm_clear_res_locked(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 52)
-
-/**
- * It's set once we call ldlm_add_waiting_lock_res_locked() to start the
- * lock-timeout timer and it will never be reset.
- *
- * Protected by lock and resource locks. */
-#define LDLM_FL_WAITED 0x0020000000000000ULL /* bit 53 */
-#define ldlm_is_waited(_l) LDLM_TEST_FLAG((_l), 1ULL << 53)
-#define ldlm_set_waited(_l) LDLM_SET_FLAG((_l), 1ULL << 53)
-#define ldlm_clear_waited(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 53)
-
-/** Flag whether this is a server namespace lock. */
-#define LDLM_FL_NS_SRV 0x0040000000000000ULL /* bit 54 */
-#define ldlm_is_ns_srv(_l) LDLM_TEST_FLAG((_l), 1ULL << 54)
-#define ldlm_set_ns_srv(_l) LDLM_SET_FLAG((_l), 1ULL << 54)
-#define ldlm_clear_ns_srv(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 54)
-
-/** Flag whether this lock can be reused. Used by exclusive open. */
-#define LDLM_FL_EXCL 0x0080000000000000ULL /* bit 55 */
-#define ldlm_is_excl(_l) LDLM_TEST_FLAG((_l), 1ULL << 55)
-#define ldlm_set_excl(_l) LDLM_SET_FLAG((_l), 1ULL << 55)
-#define ldlm_clear_excl(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 55)
-
-/** test for ldlm_lock flag bit set */
-#define LDLM_TEST_FLAG(_l, _b) (((_l)->l_flags & (_b)) != 0)
-
-/** set a ldlm_lock flag bit */
-#define LDLM_SET_FLAG(_l, _b) (((_l)->l_flags |= (_b))
-
-/** clear a ldlm_lock flag bit */
-#define LDLM_CLEAR_FLAG(_l, _b) (((_l)->l_flags &= ~(_b))
-
-/** Mask of flags inherited from parent lock when doing intents. */
-#define LDLM_INHERIT_FLAGS LDLM_FL_INHERIT_MASK
-
-/** Mask of Flags sent in AST lock_flags to map into the receiving lock. */
-#define LDLM_AST_FLAGS LDLM_FL_AST_MASK
-
-/** @} subgroup */
-/** @} group */
-#ifdef WIRESHARK_COMPILE
-static int hf_lustre_ldlm_fl_lock_changed = -1;
-static int hf_lustre_ldlm_fl_block_granted = -1;
-static int hf_lustre_ldlm_fl_block_conv = -1;
-static int hf_lustre_ldlm_fl_block_wait = -1;
-static int hf_lustre_ldlm_fl_ast_sent = -1;
-static int hf_lustre_ldlm_fl_replay = -1;
-static int hf_lustre_ldlm_fl_intent_only = -1;
-static int hf_lustre_ldlm_fl_has_intent = -1;
-static int hf_lustre_ldlm_fl_flock_deadlock = -1;
-static int hf_lustre_ldlm_fl_discard_data = -1;
-static int hf_lustre_ldlm_fl_no_timeout = -1;
-static int hf_lustre_ldlm_fl_block_nowait = -1;
-static int hf_lustre_ldlm_fl_test_lock = -1;
-static int hf_lustre_ldlm_fl_cancel_on_block = -1;
-static int hf_lustre_ldlm_fl_deny_on_contention = -1;
-static int hf_lustre_ldlm_fl_ast_discard_data = -1;
-static int hf_lustre_ldlm_fl_fail_loc = -1;
-static int hf_lustre_ldlm_fl_skipped = -1;
-static int hf_lustre_ldlm_fl_cbpending = -1;
-static int hf_lustre_ldlm_fl_wait_noreproc = -1;
-static int hf_lustre_ldlm_fl_cancel = -1;
-static int hf_lustre_ldlm_fl_local_only = -1;
-static int hf_lustre_ldlm_fl_failed = -1;
-static int hf_lustre_ldlm_fl_canceling = -1;
-static int hf_lustre_ldlm_fl_local = -1;
-static int hf_lustre_ldlm_fl_lvb_ready = -1;
-static int hf_lustre_ldlm_fl_kms_ignore = -1;
-static int hf_lustre_ldlm_fl_cp_reqd = -1;
-static int hf_lustre_ldlm_fl_cleaned = -1;
-static int hf_lustre_ldlm_fl_atomic_cb = -1;
-static int hf_lustre_ldlm_fl_bl_ast = -1;
-static int hf_lustre_ldlm_fl_bl_done = -1;
-static int hf_lustre_ldlm_fl_no_lru = -1;
-static int hf_lustre_ldlm_fl_fail_notified = -1;
-static int hf_lustre_ldlm_fl_destroyed = -1;
-static int hf_lustre_ldlm_fl_server_lock = -1;
-static int hf_lustre_ldlm_fl_res_locked = -1;
-static int hf_lustre_ldlm_fl_waited = -1;
-static int hf_lustre_ldlm_fl_ns_srv = -1;
-static int hf_lustre_ldlm_fl_excl = -1;
-
-const value_string lustre_ldlm_flags_vals[] = {
- {LDLM_FL_LOCK_CHANGED, "LDLM_FL_LOCK_CHANGED"},
- {LDLM_FL_BLOCK_GRANTED, "LDLM_FL_BLOCK_GRANTED"},
- {LDLM_FL_BLOCK_CONV, "LDLM_FL_BLOCK_CONV"},
- {LDLM_FL_BLOCK_WAIT, "LDLM_FL_BLOCK_WAIT"},
- {LDLM_FL_AST_SENT, "LDLM_FL_AST_SENT"},
- {LDLM_FL_REPLAY, "LDLM_FL_REPLAY"},
- {LDLM_FL_INTENT_ONLY, "LDLM_FL_INTENT_ONLY"},
- {LDLM_FL_HAS_INTENT, "LDLM_FL_HAS_INTENT"},
- {LDLM_FL_FLOCK_DEADLOCK, "LDLM_FL_FLOCK_DEADLOCK"},
- {LDLM_FL_DISCARD_DATA, "LDLM_FL_DISCARD_DATA"},
- {LDLM_FL_NO_TIMEOUT, "LDLM_FL_NO_TIMEOUT"},
- {LDLM_FL_BLOCK_NOWAIT, "LDLM_FL_BLOCK_NOWAIT"},
- {LDLM_FL_TEST_LOCK, "LDLM_FL_TEST_LOCK"},
- {LDLM_FL_CANCEL_ON_BLOCK, "LDLM_FL_CANCEL_ON_BLOCK"},
- {LDLM_FL_DENY_ON_CONTENTION, "LDLM_FL_DENY_ON_CONTENTION"},
- {LDLM_FL_AST_DISCARD_DATA, "LDLM_FL_AST_DISCARD_DATA"},
- {LDLM_FL_FAIL_LOC, "LDLM_FL_FAIL_LOC"},
- {LDLM_FL_SKIPPED, "LDLM_FL_SKIPPED"},
- {LDLM_FL_CBPENDING, "LDLM_FL_CBPENDING"},
- {LDLM_FL_WAIT_NOREPROC, "LDLM_FL_WAIT_NOREPROC"},
- {LDLM_FL_CANCEL, "LDLM_FL_CANCEL"},
- {LDLM_FL_LOCAL_ONLY, "LDLM_FL_LOCAL_ONLY"},
- {LDLM_FL_FAILED, "LDLM_FL_FAILED"},
- {LDLM_FL_CANCELING, "LDLM_FL_CANCELING"},
- {LDLM_FL_LOCAL, "LDLM_FL_LOCAL"},
- {LDLM_FL_LVB_READY, "LDLM_FL_LVB_READY"},
- {LDLM_FL_KMS_IGNORE, "LDLM_FL_KMS_IGNORE"},
- {LDLM_FL_CP_REQD, "LDLM_FL_CP_REQD"},
- {LDLM_FL_CLEANED, "LDLM_FL_CLEANED"},
- {LDLM_FL_ATOMIC_CB, "LDLM_FL_ATOMIC_CB"},
- {LDLM_FL_BL_AST, "LDLM_FL_BL_AST"},
- {LDLM_FL_BL_DONE, "LDLM_FL_BL_DONE"},
- {LDLM_FL_NO_LRU, "LDLM_FL_NO_LRU"},
- {LDLM_FL_FAIL_NOTIFIED, "LDLM_FL_FAIL_NOTIFIED"},
- {LDLM_FL_DESTROYED, "LDLM_FL_DESTROYED"},
- {LDLM_FL_SERVER_LOCK, "LDLM_FL_SERVER_LOCK"},
- {LDLM_FL_RES_LOCKED, "LDLM_FL_RES_LOCKED"},
- {LDLM_FL_WAITED, "LDLM_FL_WAITED"},
- {LDLM_FL_NS_SRV, "LDLM_FL_NS_SRV"},
- {LDLM_FL_EXCL, "LDLM_FL_EXCL"},
- { 0, NULL }
-};
-#endif /* WIRESHARK_COMPILE */
-#endif /* LDLM_ALL_FLAGS_MASK */
diff --git a/drivers/staging/lustre/lustre/include/lustre_eacl.h b/drivers/staging/lustre/lustre/include/lustre_eacl.h
deleted file mode 100644
index 499f9c8e1ae1..000000000000
--- a/drivers/staging/lustre/lustre/include/lustre_eacl.h
+++ /dev/null
@@ -1,91 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- */
-/*
- * Copyright (c) 2011, 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * lustre/lustre/include/lustre_idmap.h
- *
- * MDS data structures.
- * See also lustre_idl.h for wire formats of requests.
- */
-
-#ifndef _LUSTRE_EACL_H
-#define _LUSTRE_EACL_H
-
-/** \defgroup eacl eacl
- *
- * @{
- */
-
-#ifdef CONFIG_FS_POSIX_ACL
-
-#include <linux/posix_acl_xattr.h>
-
-typedef struct {
- __u16 e_tag;
- __u16 e_perm;
- __u32 e_id;
- __u32 e_stat;
-} ext_acl_xattr_entry;
-
-typedef struct {
- __u32 a_count;
- ext_acl_xattr_entry a_entries[0];
-} ext_acl_xattr_header;
-
-#define CFS_ACL_XATTR_SIZE(count, prefix) \
- (sizeof(prefix ## _header) + (count) * sizeof(prefix ## _entry))
-
-#define CFS_ACL_XATTR_COUNT(size, prefix) \
- (((size) - sizeof(prefix ## _header)) / sizeof(prefix ## _entry))
-
-
-extern ext_acl_xattr_header *
-lustre_posix_acl_xattr_2ext(posix_acl_xattr_header *header, int size);
-extern int
-lustre_posix_acl_xattr_filter(posix_acl_xattr_header *header, size_t size,
- posix_acl_xattr_header **out);
-extern void
-lustre_posix_acl_xattr_free(posix_acl_xattr_header *header, int size);
-extern void
-lustre_ext_acl_xattr_free(ext_acl_xattr_header *header);
-extern ext_acl_xattr_header *
-lustre_acl_xattr_merge2ext(posix_acl_xattr_header *posix_header, int size,
- ext_acl_xattr_header *ext_header);
-
-#endif /* CONFIG_FS_POSIX_ACL */
-
-/** @} eacl */
-
-#endif
diff --git a/drivers/staging/lustre/lustre/include/lustre_export.h b/drivers/staging/lustre/lustre/include/lustre_export.h
deleted file mode 100644
index 7634deafa9fa..000000000000
--- a/drivers/staging/lustre/lustre/include/lustre_export.h
+++ /dev/null
@@ -1,360 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- */
-/** \defgroup obd_export PortalRPC export definitions
- *
- * @{
- */
-
-#ifndef __EXPORT_H
-#define __EXPORT_H
-
-/** \defgroup export export
- *
- * @{
- */
-
-#include "lprocfs_status.h"
-#include "lustre/lustre_idl.h"
-#include "lustre_dlm.h"
-
-struct mds_client_data;
-struct mdt_client_data;
-struct mds_idmap_table;
-struct mdt_idmap_table;
-
-/**
- * Target-specific export data
- */
-struct tg_export_data {
- /** Protects led_lcd below */
- struct mutex ted_lcd_lock;
- /** Per-client data for each export */
- struct lsd_client_data *ted_lcd;
- /** Offset of record in last_rcvd file */
- loff_t ted_lr_off;
- /** Client index in last_rcvd file */
- int ted_lr_idx;
-};
-
-/**
- * MDT-specific export data
- */
-struct mdt_export_data {
- struct tg_export_data med_ted;
- /** List of all files opened by client on this MDT */
- struct list_head med_open_head;
- spinlock_t med_open_lock; /* med_open_head, mfd_list */
- /** Bitmask of all ibit locks this MDT understands */
- __u64 med_ibits_known;
- struct mutex med_idmap_mutex;
- struct lustre_idmap_table *med_idmap;
-};
-
-struct ec_export_data { /* echo client */
- struct list_head eced_locks;
-};
-
-/* In-memory access to client data from OST struct */
-/** Filter (oss-side) specific import data */
-struct filter_export_data {
- struct tg_export_data fed_ted;
- spinlock_t fed_lock; /**< protects fed_mod_list */
- long fed_dirty; /* in bytes */
- long fed_grant; /* in bytes */
- struct list_head fed_mod_list; /* files being modified */
- int fed_mod_count;/* items in fed_writing list */
- long fed_pending; /* bytes just being written */
- __u32 fed_group;
- __u8 fed_pagesize; /* log2 of client page size */
-};
-
-struct mgs_export_data {
- struct list_head med_clients; /* mgc fs client via this exp */
- spinlock_t med_lock; /* protect med_clients */
-};
-
-enum obd_option {
- OBD_OPT_FORCE = 0x0001,
- OBD_OPT_FAILOVER = 0x0002,
- OBD_OPT_ABORT_RECOV = 0x0004,
-};
-
-/**
- * Export structure. Represents target-side of connection in portals.
- * Also used in Lustre to connect between layers on the same node when
- * there is no network-connection in-between.
- * For every connected client there is an export structure on the server
- * attached to the same obd device.
- */
-struct obd_export {
- /**
- * Export handle, it's id is provided to client on connect
- * Subsequent client RPCs contain this handle id to identify
- * what export they are talking to.
- */
- struct portals_handle exp_handle;
- atomic_t exp_refcount;
- /**
- * Set of counters below is to track where export references are
- * kept. The exp_rpc_count is used for reconnect handling also,
- * the cb_count and locks_count are for debug purposes only for now.
- * The sum of them should be less than exp_refcount by 3
- */
- atomic_t exp_rpc_count; /* RPC references */
- atomic_t exp_cb_count; /* Commit callback references */
- /** Number of queued replay requests to be processes */
- atomic_t exp_replay_count;
- atomic_t exp_locks_count; /** Lock references */
-#if LUSTRE_TRACKS_LOCK_EXP_REFS
- struct list_head exp_locks_list;
- spinlock_t exp_locks_list_guard;
-#endif
- /** UUID of client connected to this export */
- struct obd_uuid exp_client_uuid;
- /** To link all exports on an obd device */
- struct list_head exp_obd_chain;
- struct hlist_node exp_uuid_hash; /** uuid-export hash*/
- struct hlist_node exp_nid_hash; /** nid-export hash */
- /** Obd device of this export */
- struct obd_device *exp_obd;
- /**
- * "reverse" import to send requests (e.g. from ldlm) back to client
- * exp_lock protect its change
- */
- struct obd_import *exp_imp_reverse;
- struct lprocfs_stats *exp_md_stats;
- /** Active connection */
- struct ptlrpc_connection *exp_connection;
- /** Connection count value from last successful reconnect rpc */
- __u32 exp_conn_cnt;
- /** Hash list of all ldlm locks granted on this export */
- struct cfs_hash *exp_lock_hash;
- /**
- * Hash list for Posix lock deadlock detection, added with
- * ldlm_lock::l_exp_flock_hash.
- */
- struct cfs_hash *exp_flock_hash;
- struct list_head exp_outstanding_replies;
- struct list_head exp_uncommitted_replies;
- spinlock_t exp_uncommitted_replies_lock;
- /** Last committed transno for this export */
- __u64 exp_last_committed;
- /** On replay all requests waiting for replay are linked here */
- struct list_head exp_req_replay_queue;
- /**
- * protects exp_flags, exp_outstanding_replies and the change
- * of exp_imp_reverse
- */
- spinlock_t exp_lock;
- /** Compatibility flags for this export are embedded into
- * exp_connect_data */
- struct obd_connect_data exp_connect_data;
- enum obd_option exp_flags;
- unsigned long exp_failed:1,
- exp_in_recovery:1,
- exp_disconnected:1,
- exp_connecting:1,
- /** VBR: export missed recovery */
- exp_delayed:1,
- /** VBR: failed version checking */
- exp_vbr_failed:1,
- exp_req_replay_needed:1,
- exp_lock_replay_needed:1,
- exp_need_sync:1,
- exp_flvr_changed:1,
- exp_flvr_adapt:1,
- exp_libclient:1, /* liblustre client? */
- /* client timed out and tried to reconnect,
- * but couldn't because of active rpcs */
- exp_abort_active_req:1,
- /* if to swap nidtbl entries for 2.2 clients.
- * Only used by the MGS to fix LU-1644. */
- exp_need_mne_swab:1;
- /* also protected by exp_lock */
- enum lustre_sec_part exp_sp_peer;
- struct sptlrpc_flavor exp_flvr; /* current */
- struct sptlrpc_flavor exp_flvr_old[2]; /* about-to-expire */
- time64_t exp_flvr_expire[2]; /* seconds */
-
- /** protects exp_hp_rpcs */
- spinlock_t exp_rpc_lock;
- struct list_head exp_hp_rpcs; /* (potential) HP RPCs */
-
- /** blocking dlm lock list, protected by exp_bl_list_lock */
- struct list_head exp_bl_list;
- spinlock_t exp_bl_list_lock;
-
- /** Target specific data */
- union {
- struct tg_export_data eu_target_data;
- struct mdt_export_data eu_mdt_data;
- struct filter_export_data eu_filter_data;
- struct ec_export_data eu_ec_data;
- struct mgs_export_data eu_mgs_data;
- } u;
-};
-
-#define exp_target_data u.eu_target_data
-#define exp_mdt_data u.eu_mdt_data
-#define exp_filter_data u.eu_filter_data
-#define exp_ec_data u.eu_ec_data
-
-static inline __u64 *exp_connect_flags_ptr(struct obd_export *exp)
-{
- return &exp->exp_connect_data.ocd_connect_flags;
-}
-
-static inline __u64 exp_connect_flags(struct obd_export *exp)
-{
- return *exp_connect_flags_ptr(exp);
-}
-
-static inline int exp_max_brw_size(struct obd_export *exp)
-{
- LASSERT(exp != NULL);
- if (exp_connect_flags(exp) & OBD_CONNECT_BRW_SIZE)
- return exp->exp_connect_data.ocd_brw_size;
-
- return ONE_MB_BRW_SIZE;
-}
-
-static inline int exp_connect_multibulk(struct obd_export *exp)
-{
- return exp_max_brw_size(exp) > ONE_MB_BRW_SIZE;
-}
-
-static inline int exp_connect_cancelset(struct obd_export *exp)
-{
- LASSERT(exp != NULL);
- return !!(exp_connect_flags(exp) & OBD_CONNECT_CANCELSET);
-}
-
-static inline int exp_connect_lru_resize(struct obd_export *exp)
-{
- LASSERT(exp != NULL);
- return !!(exp_connect_flags(exp) & OBD_CONNECT_LRU_RESIZE);
-}
-
-static inline int exp_connect_rmtclient(struct obd_export *exp)
-{
- LASSERT(exp != NULL);
- return !!(exp_connect_flags(exp) & OBD_CONNECT_RMT_CLIENT);
-}
-
-static inline int client_is_remote(struct obd_export *exp)
-{
- struct obd_import *imp = class_exp2cliimp(exp);
-
- return !!(imp->imp_connect_data.ocd_connect_flags &
- OBD_CONNECT_RMT_CLIENT);
-}
-
-static inline int exp_connect_vbr(struct obd_export *exp)
-{
- LASSERT(exp != NULL);
- LASSERT(exp->exp_connection);
- return !!(exp_connect_flags(exp) & OBD_CONNECT_VBR);
-}
-
-static inline int exp_connect_som(struct obd_export *exp)
-{
- LASSERT(exp != NULL);
- return !!(exp_connect_flags(exp) & OBD_CONNECT_SOM);
-}
-
-static inline int exp_connect_umask(struct obd_export *exp)
-{
- return !!(exp_connect_flags(exp) & OBD_CONNECT_UMASK);
-}
-
-static inline int imp_connect_lru_resize(struct obd_import *imp)
-{
- struct obd_connect_data *ocd;
-
- LASSERT(imp != NULL);
- ocd = &imp->imp_connect_data;
- return !!(ocd->ocd_connect_flags & OBD_CONNECT_LRU_RESIZE);
-}
-
-static inline int exp_connect_layout(struct obd_export *exp)
-{
- return !!(exp_connect_flags(exp) & OBD_CONNECT_LAYOUTLOCK);
-}
-
-static inline bool exp_connect_lvb_type(struct obd_export *exp)
-{
- LASSERT(exp != NULL);
- if (exp_connect_flags(exp) & OBD_CONNECT_LVB_TYPE)
- return true;
- else
- return false;
-}
-
-static inline bool imp_connect_lvb_type(struct obd_import *imp)
-{
- struct obd_connect_data *ocd;
-
- LASSERT(imp != NULL);
- ocd = &imp->imp_connect_data;
- if (ocd->ocd_connect_flags & OBD_CONNECT_LVB_TYPE)
- return true;
- else
- return false;
-}
-
-static inline __u64 exp_connect_ibits(struct obd_export *exp)
-{
- struct obd_connect_data *ocd;
-
- ocd = &exp->exp_connect_data;
- return ocd->ocd_ibits_known;
-}
-
-static inline bool imp_connect_disp_stripe(struct obd_import *imp)
-{
- struct obd_connect_data *ocd;
-
- LASSERT(imp != NULL);
- ocd = &imp->imp_connect_data;
- return ocd->ocd_connect_flags & OBD_CONNECT_DISP_STRIPE;
-}
-
-struct obd_export *class_conn2export(struct lustre_handle *conn);
-
-/** @} export */
-
-#endif /* __EXPORT_H */
-/** @} obd_export */
diff --git a/drivers/staging/lustre/lustre/include/lustre_fid.h b/drivers/staging/lustre/lustre/include/lustre_fid.h
deleted file mode 100644
index 22fc96ef25b4..000000000000
--- a/drivers/staging/lustre/lustre/include/lustre_fid.h
+++ /dev/null
@@ -1,726 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * lustre/include/lustre_fid.h
- *
- * Author: Yury Umanets <umka@clusterfs.com>
- */
-
-#ifndef __LUSTRE_FID_H
-#define __LUSTRE_FID_H
-
-/** \defgroup fid fid
- *
- * @{
- *
- * http://wiki.lustre.org/index.php/Architecture_-_Interoperability_fids_zfs
- * describes the FID namespace and interoperability requirements for FIDs.
- * The important parts of that document are included here for reference.
- *
- * FID
- * File IDentifier generated by client from range allocated by the SEQuence
- * service and stored in struct lu_fid. The FID is composed of three parts:
- * SEQuence, ObjectID, and VERsion. The SEQ component is a filesystem
- * unique 64-bit integer, and only one client is ever assigned any SEQ value.
- * The first 0x400 FID_SEQ_NORMAL [2^33, 2^33 + 0x400] values are reserved
- * for system use. The OID component is a 32-bit value generated by the
- * client on a per-SEQ basis to allow creating many unique FIDs without
- * communication with the server. The VER component is a 32-bit value that
- * distinguishes between different FID instantiations, such as snapshots or
- * separate subtrees within the filesystem. FIDs with the same VER field
- * are considered part of the same namespace.
- *
- * OLD filesystems are those upgraded from Lustre 1.x that predate FIDs, and
- * MDTs use 32-bit ldiskfs internal inode/generation numbers (IGIFs), while
- * OSTs use 64-bit Lustre object IDs and generation numbers.
- *
- * NEW filesystems are those formatted since the introduction of FIDs.
- *
- * IGIF
- * Inode and Generation In FID, a surrogate FID used to globally identify
- * an existing object on OLD formatted MDT file system. This would only be
- * used on MDT0 in a DNE filesystem, because there cannot be more than one
- * MDT in an OLD formatted filesystem. Belongs to sequence in [12, 2^32 - 1]
- * range, where inode number is stored in SEQ, and inode generation is in OID.
- * NOTE: This assumes no more than 2^32-1 inodes exist in the MDT filesystem,
- * which is the maximum possible for an ldiskfs backend. It also assumes
- * that the reserved ext3/ext4/ldiskfs inode numbers [0-11] are never visible
- * to clients, which has always been true.
- *
- * IDIF
- * object ID In FID, a surrogate FID used to globally identify an existing
- * OST object on OLD formatted OST file system. Belongs to a sequence in
- * [2^32, 2^33 - 1]. Sequence number is calculated as:
- *
- * 1 << 32 | (ost_index << 16) | ((objid >> 32) & 0xffff)
- *
- * that is, SEQ consists of 16-bit OST index, and higher 16 bits of object
- * ID. The generation of unique SEQ values per OST allows the IDIF FIDs to
- * be identified in the FLD correctly. The OID field is calculated as:
- *
- * objid & 0xffffffff
- *
- * that is, it consists of lower 32 bits of object ID. For objects within
- * the IDIF range, object ID extraction will be:
- *
- * o_id = (fid->f_seq & 0x7fff) << 16 | fid->f_oid;
- * o_seq = 0; // formerly group number
- *
- * NOTE: This assumes that no more than 2^48-1 objects have ever been created
- * on any OST, and that no more than 65535 OSTs are in use. Both are very
- * reasonable assumptions, i.e. an IDIF can uniquely map all objects assuming
- * a maximum creation rate of 1M objects per second for a maximum of 9 years,
- * or combinations thereof.
- *
- * OST_MDT0
- * Surrogate FID used to identify an existing object on OLD formatted OST
- * filesystem. Belongs to the reserved SEQuence 0, and is used prior to
- * the introduction of FID-on-OST, at which point IDIF will be used to
- * identify objects as residing on a specific OST.
- *
- * LLOG
- * For Lustre Log objects the object sequence 1 is used. This is compatible
- * with both OLD and NEW namespaces, as this SEQ number is in the
- * ext3/ldiskfs reserved inode range and does not conflict with IGIF
- * sequence numbers.
- *
- * ECHO
- * For testing OST IO performance the object sequence 2 is used. This is
- * compatible with both OLD and NEW namespaces, as this SEQ number is in
- * the ext3/ldiskfs reserved inode range and does not conflict with IGIF
- * sequence numbers.
- *
- * OST_MDT1 .. OST_MAX
- * For testing with multiple MDTs the object sequence 3 through 9 is used,
- * allowing direct mapping of MDTs 1 through 7 respectively, for a total
- * of 8 MDTs including OST_MDT0. This matches the legacy CMD project "group"
- * mappings. However, this SEQ range is only for testing prior to any
- * production DNE release, as the objects in this range conflict across all
- * OSTs, as the OST index is not part of the FID. For production DNE usage,
- * OST objects created by MDT1+ will use FID_SEQ_NORMAL FIDs.
- *
- * DLM OST objid to IDIF mapping
- * For compatibility with existing OLD OST network protocol structures, the
- * FID must map onto the o_id and o_seq in a manner that ensures existing
- * objects are identified consistently for IO, as well as onto the LDLM
- * namespace to ensure IDIFs there is only a single resource name for any
- * object in the DLM. The OLD OST object DLM resource mapping is:
- *
- * resource[] = {o_id, o_seq, 0, 0}; // o_seq == 0 for production releases
- *
- * The NEW OST object DLM resource mapping is the same for both MDT and OST:
- *
- * resource[] = {SEQ, OID, VER, HASH};
- *
- * NOTE: for mapping IDIF values to DLM resource names the o_id may be
- * larger than the 2^33 reserved sequence numbers for IDIF, so it is possible
- * for the o_id numbers to overlap FID SEQ numbers in the resource. However,
- * in all production releases the OLD o_seq field is always zero, and all
- * valid FID OID values are non-zero, so the lock resources will not collide.
- * Even so, the MDT and OST resources are also in different LDLM namespaces.
- */
-
-#include "../../include/linux/libcfs/libcfs.h"
-#include "lustre/lustre_idl.h"
-
-struct lu_env;
-struct lu_site;
-struct lu_context;
-struct obd_device;
-struct obd_export;
-
-/* Whole sequences space range and zero range definitions */
-extern const struct lu_seq_range LUSTRE_SEQ_SPACE_RANGE;
-extern const struct lu_seq_range LUSTRE_SEQ_ZERO_RANGE;
-extern const struct lu_fid LUSTRE_BFL_FID;
-extern const struct lu_fid LU_OBF_FID;
-extern const struct lu_fid LU_DOT_LUSTRE_FID;
-
-enum {
- /*
- * This is how may metadata FIDs may be allocated in one sequence(128k)
- */
- LUSTRE_METADATA_SEQ_MAX_WIDTH = 0x0000000000020000ULL,
-
- /*
- * This is how many data FIDs could be allocated in one sequence(4B - 1)
- */
- LUSTRE_DATA_SEQ_MAX_WIDTH = 0x00000000FFFFFFFFULL,
-
- /*
- * How many sequences to allocate to a client at once.
- */
- LUSTRE_SEQ_META_WIDTH = 0x0000000000000001ULL,
-
- /*
- * seq allocation pool size.
- */
- LUSTRE_SEQ_BATCH_WIDTH = LUSTRE_SEQ_META_WIDTH * 1000,
-
- /*
- * This is how many sequences may be in one super-sequence allocated to
- * MDTs.
- */
- LUSTRE_SEQ_SUPER_WIDTH = ((1ULL << 30ULL) * LUSTRE_SEQ_META_WIDTH)
-};
-
-enum {
- /** 2^6 FIDs for OI containers */
- OSD_OI_FID_OID_BITS = 6,
- /** reserve enough FIDs in case we want more in the future */
- OSD_OI_FID_OID_BITS_MAX = 10,
-};
-
-/** special OID for local objects */
-enum local_oid {
- /** \see fld_mod_init */
- FLD_INDEX_OID = 3UL,
- /** \see fid_mod_init */
- FID_SEQ_CTL_OID = 4UL,
- FID_SEQ_SRV_OID = 5UL,
- /** \see mdd_mod_init */
- MDD_ROOT_INDEX_OID = 6UL, /* deprecated in 2.4 */
- MDD_ORPHAN_OID = 7UL, /* deprecated in 2.4 */
- MDD_LOV_OBJ_OID = 8UL,
- MDD_CAPA_KEYS_OID = 9UL,
- /** \see mdt_mod_init */
- LAST_RECV_OID = 11UL,
- OSD_FS_ROOT_OID = 13UL,
- ACCT_USER_OID = 15UL,
- ACCT_GROUP_OID = 16UL,
- LFSCK_BOOKMARK_OID = 17UL,
- OTABLE_IT_OID = 18UL,
- /* These two definitions are obsolete
- * OFD_GROUP0_LAST_OID = 20UL,
- * OFD_GROUP4K_LAST_OID = 20UL+4096,
- */
- OFD_LAST_GROUP_OID = 4117UL,
- LLOG_CATALOGS_OID = 4118UL,
- MGS_CONFIGS_OID = 4119UL,
- OFD_HEALTH_CHECK_OID = 4120UL,
- MDD_LOV_OBJ_OSEQ = 4121UL,
- LFSCK_NAMESPACE_OID = 4122UL,
- REMOTE_PARENT_DIR_OID = 4123UL,
-};
-
-static inline void lu_local_obj_fid(struct lu_fid *fid, __u32 oid)
-{
- fid->f_seq = FID_SEQ_LOCAL_FILE;
- fid->f_oid = oid;
- fid->f_ver = 0;
-}
-
-static inline void lu_local_name_obj_fid(struct lu_fid *fid, __u32 oid)
-{
- fid->f_seq = FID_SEQ_LOCAL_NAME;
- fid->f_oid = oid;
- fid->f_ver = 0;
-}
-
-/* For new FS (>= 2.4), the root FID will be changed to
- * [FID_SEQ_ROOT:1:0], for existing FS, (upgraded to 2.4),
- * the root FID will still be IGIF */
-static inline int fid_is_root(const struct lu_fid *fid)
-{
- return unlikely((fid_seq(fid) == FID_SEQ_ROOT &&
- fid_oid(fid) == 1));
-}
-
-static inline int fid_is_dot_lustre(const struct lu_fid *fid)
-{
- return unlikely(fid_seq(fid) == FID_SEQ_DOT_LUSTRE &&
- fid_oid(fid) == FID_OID_DOT_LUSTRE);
-}
-
-static inline int fid_is_obf(const struct lu_fid *fid)
-{
- return unlikely(fid_seq(fid) == FID_SEQ_DOT_LUSTRE &&
- fid_oid(fid) == FID_OID_DOT_LUSTRE_OBF);
-}
-
-static inline int fid_is_otable_it(const struct lu_fid *fid)
-{
- return unlikely(fid_seq(fid) == FID_SEQ_LOCAL_FILE &&
- fid_oid(fid) == OTABLE_IT_OID);
-}
-
-static inline int fid_is_acct(const struct lu_fid *fid)
-{
- return fid_seq(fid) == FID_SEQ_LOCAL_FILE &&
- (fid_oid(fid) == ACCT_USER_OID ||
- fid_oid(fid) == ACCT_GROUP_OID);
-}
-
-static inline int fid_is_quota(const struct lu_fid *fid)
-{
- return fid_seq(fid) == FID_SEQ_QUOTA ||
- fid_seq(fid) == FID_SEQ_QUOTA_GLB;
-}
-
-static inline int fid_is_namespace_visible(const struct lu_fid *fid)
-{
- const __u64 seq = fid_seq(fid);
-
- /* Here, we cannot distinguish whether the normal FID is for OST
- * object or not. It is caller's duty to check more if needed. */
- return (!fid_is_last_id(fid) &&
- (fid_seq_is_norm(seq) || fid_seq_is_igif(seq))) ||
- fid_is_root(fid) || fid_is_dot_lustre(fid);
-}
-
-static inline int fid_seq_in_fldb(__u64 seq)
-{
- return fid_seq_is_igif(seq) || fid_seq_is_norm(seq) ||
- fid_seq_is_root(seq) || fid_seq_is_dot(seq);
-}
-
-static inline void lu_last_id_fid(struct lu_fid *fid, __u64 seq)
-{
- if (fid_seq_is_mdt0(seq)) {
- fid->f_seq = fid_idif_seq(0, 0);
- } else {
- LASSERTF(fid_seq_is_norm(seq) || fid_seq_is_echo(seq) ||
- fid_seq_is_idif(seq), "%#llx\n", seq);
- fid->f_seq = seq;
- }
- fid->f_oid = 0;
- fid->f_ver = 0;
-}
-
-/* seq client type */
-enum lu_cli_type {
- LUSTRE_SEQ_METADATA = 1,
- LUSTRE_SEQ_DATA
-};
-
-enum lu_mgr_type {
- LUSTRE_SEQ_SERVER,
- LUSTRE_SEQ_CONTROLLER
-};
-
-struct lu_server_seq;
-
-/* Client sequence manager interface. */
-struct lu_client_seq {
- /* Sequence-controller export. */
- struct obd_export *lcs_exp;
- struct mutex lcs_mutex;
-
- /*
- * Range of allowed for allocation sequences. When using lu_client_seq on
- * clients, this contains meta-sequence range. And for servers this
- * contains super-sequence range.
- */
- struct lu_seq_range lcs_space;
-
- /* Seq related proc */
- struct dentry *lcs_debugfs_entry;
-
- /* This holds last allocated fid in last obtained seq */
- struct lu_fid lcs_fid;
-
- /* LUSTRE_SEQ_METADATA or LUSTRE_SEQ_DATA */
- enum lu_cli_type lcs_type;
-
- /*
- * Service uuid, passed from MDT + seq name to form unique seq name to
- * use it with procfs.
- */
- char lcs_name[LUSTRE_MDT_MAXNAMELEN];
-
- /*
- * Sequence width, that is how many objects may be allocated in one
- * sequence. Default value for it is LUSTRE_SEQ_MAX_WIDTH.
- */
- __u64 lcs_width;
-
- /* Seq-server for direct talking */
- struct lu_server_seq *lcs_srv;
-
- /* wait queue for fid allocation and update indicator */
- wait_queue_head_t lcs_waitq;
- int lcs_update;
-};
-
-/* server sequence manager interface */
-struct lu_server_seq {
- /* Available sequences space */
- struct lu_seq_range lss_space;
-
- /* keeps highwater in lsr_end for seq allocation algorithm */
- struct lu_seq_range lss_lowater_set;
- struct lu_seq_range lss_hiwater_set;
-
- /*
- * Device for server side seq manager needs (saving sequences to backing
- * store).
- */
- struct dt_device *lss_dev;
-
- /* LUSTRE_SEQ_SERVER or LUSTRE_SEQ_CONTROLLER */
- enum lu_mgr_type lss_type;
-
- /* Client interface to request controller */
- struct lu_client_seq *lss_cli;
-
- /* Mutex for protecting allocation */
- struct mutex lss_mutex;
-
- /*
- * Service uuid, passed from MDT + seq name to form unique seq name to
- * use it with procfs.
- */
- char lss_name[LUSTRE_MDT_MAXNAMELEN];
-
- /*
- * Allocation chunks for super and meta sequences. Default values are
- * LUSTRE_SEQ_SUPER_WIDTH and LUSTRE_SEQ_META_WIDTH.
- */
- __u64 lss_width;
-
- /*
- * minimum lss_alloc_set size that should be allocated from
- * lss_space
- */
- __u64 lss_set_width;
-
- /* sync is needed for update operation */
- __u32 lss_need_sync;
-
- /**
- * Pointer to site object, required to access site fld.
- */
- struct seq_server_site *lss_site;
-};
-
-/* Client methods */
-void seq_client_flush(struct lu_client_seq *seq);
-
-int seq_client_alloc_fid(const struct lu_env *env, struct lu_client_seq *seq,
- struct lu_fid *fid);
-/* Fids common stuff */
-int fid_is_local(const struct lu_env *env,
- struct lu_site *site, const struct lu_fid *fid);
-
-enum lu_cli_type;
-int client_fid_init(struct obd_device *obd, struct obd_export *exp,
- enum lu_cli_type type);
-int client_fid_fini(struct obd_device *obd);
-
-/* fid locking */
-
-struct ldlm_namespace;
-
-/*
- * Build (DLM) resource name from FID.
- *
- * NOTE: until Lustre 1.8.7/2.1.1 the fid_ver() was packed into name[2],
- * but was moved into name[1] along with the OID to avoid consuming the
- * renaming name[2,3] fields that need to be used for the quota identifier.
- */
-static inline struct ldlm_res_id *
-fid_build_reg_res_name(const struct lu_fid *fid, struct ldlm_res_id *res)
-{
- memset(res, 0, sizeof(*res));
- res->name[LUSTRE_RES_ID_SEQ_OFF] = fid_seq(fid);
- res->name[LUSTRE_RES_ID_VER_OID_OFF] = fid_ver_oid(fid);
-
- return res;
-}
-
-/*
- * Return true if resource is for object identified by FID.
- */
-static inline int fid_res_name_eq(const struct lu_fid *fid,
- const struct ldlm_res_id *res)
-{
- return res->name[LUSTRE_RES_ID_SEQ_OFF] == fid_seq(fid) &&
- res->name[LUSTRE_RES_ID_VER_OID_OFF] == fid_ver_oid(fid);
-}
-
-/*
- * Extract FID from LDLM resource. Reverse of fid_build_reg_res_name().
- */
-static inline struct lu_fid *
-fid_extract_from_res_name(struct lu_fid *fid, const struct ldlm_res_id *res)
-{
- fid->f_seq = res->name[LUSTRE_RES_ID_SEQ_OFF];
- fid->f_oid = (__u32)(res->name[LUSTRE_RES_ID_VER_OID_OFF]);
- fid->f_ver = (__u32)(res->name[LUSTRE_RES_ID_VER_OID_OFF] >> 32);
- LASSERT(fid_res_name_eq(fid, res));
-
- return fid;
-}
-
-/*
- * Build (DLM) resource identifier from global quota FID and quota ID.
- */
-static inline struct ldlm_res_id *
-fid_build_quota_res_name(const struct lu_fid *glb_fid, union lquota_id *qid,
- struct ldlm_res_id *res)
-{
- fid_build_reg_res_name(glb_fid, res);
- res->name[LUSTRE_RES_ID_QUOTA_SEQ_OFF] = fid_seq(&qid->qid_fid);
- res->name[LUSTRE_RES_ID_QUOTA_VER_OID_OFF] = fid_ver_oid(&qid->qid_fid);
-
- return res;
-}
-
-/*
- * Extract global FID and quota ID from resource name
- */
-static inline void fid_extract_from_quota_res(struct lu_fid *glb_fid,
- union lquota_id *qid,
- const struct ldlm_res_id *res)
-{
- fid_extract_from_res_name(glb_fid, res);
- qid->qid_fid.f_seq = res->name[LUSTRE_RES_ID_QUOTA_SEQ_OFF];
- qid->qid_fid.f_oid = (__u32)res->name[LUSTRE_RES_ID_QUOTA_VER_OID_OFF];
- qid->qid_fid.f_ver =
- (__u32)(res->name[LUSTRE_RES_ID_QUOTA_VER_OID_OFF] >> 32);
-}
-
-static inline struct ldlm_res_id *
-fid_build_pdo_res_name(const struct lu_fid *fid, unsigned int hash,
- struct ldlm_res_id *res)
-{
- fid_build_reg_res_name(fid, res);
- res->name[LUSTRE_RES_ID_HSH_OFF] = hash;
-
- return res;
-}
-
-/**
- * Build DLM resource name from object id & seq, which will be removed
- * finally, when we replace ost_id with FID in data stack.
- *
- * Currently, resid from the old client, whose res[0] = object_id,
- * res[1] = object_seq, is just opposite with Metatdata
- * resid, where, res[0] = fid->f_seq, res[1] = fid->f_oid.
- * To unify the resid identification, we will reverse the data
- * resid to keep it same with Metadata resid, i.e.
- *
- * For resid from the old client,
- * res[0] = objid, res[1] = 0, still keep the original order,
- * for compatibility.
- *
- * For new resid
- * res will be built from normal FID directly, i.e. res[0] = f_seq,
- * res[1] = f_oid + f_ver.
- */
-static inline void ostid_build_res_name(struct ost_id *oi,
- struct ldlm_res_id *name)
-{
- memset(name, 0, sizeof(*name));
- if (fid_seq_is_mdt0(ostid_seq(oi))) {
- name->name[LUSTRE_RES_ID_SEQ_OFF] = ostid_id(oi);
- name->name[LUSTRE_RES_ID_VER_OID_OFF] = ostid_seq(oi);
- } else {
- fid_build_reg_res_name(&oi->oi_fid, name);
- }
-}
-
-static inline void ostid_res_name_to_id(struct ost_id *oi,
- struct ldlm_res_id *name)
-{
- if (fid_seq_is_mdt0(name->name[LUSTRE_RES_ID_SEQ_OFF])) {
- /* old resid */
- ostid_set_seq(oi, name->name[LUSTRE_RES_ID_VER_OID_OFF]);
- ostid_set_id(oi, name->name[LUSTRE_RES_ID_SEQ_OFF]);
- } else {
- /* new resid */
- fid_extract_from_res_name(&oi->oi_fid, name);
- }
-}
-
-/**
- * Return true if the resource is for the object identified by this id & group.
- */
-static inline int ostid_res_name_eq(struct ost_id *oi,
- struct ldlm_res_id *name)
-{
- /* Note: it is just a trick here to save some effort, probably the
- * correct way would be turn them into the FID and compare */
- if (fid_seq_is_mdt0(ostid_seq(oi))) {
- return name->name[LUSTRE_RES_ID_SEQ_OFF] == ostid_id(oi) &&
- name->name[LUSTRE_RES_ID_VER_OID_OFF] == ostid_seq(oi);
- } else {
- return name->name[LUSTRE_RES_ID_SEQ_OFF] == ostid_seq(oi) &&
- name->name[LUSTRE_RES_ID_VER_OID_OFF] == ostid_id(oi);
- }
-}
-
-/* The same as osc_build_res_name() */
-static inline void ost_fid_build_resid(const struct lu_fid *fid,
- struct ldlm_res_id *resname)
-{
- if (fid_is_mdt0(fid) || fid_is_idif(fid)) {
- struct ost_id oi;
- oi.oi.oi_id = 0; /* gcc 4.7.2 complains otherwise */
- if (fid_to_ostid(fid, &oi) != 0)
- return;
- ostid_build_res_name(&oi, resname);
- } else {
- fid_build_reg_res_name(fid, resname);
- }
-}
-
-static inline void ost_fid_from_resid(struct lu_fid *fid,
- const struct ldlm_res_id *name)
-{
- if (fid_seq_is_mdt0(name->name[LUSTRE_RES_ID_VER_OID_OFF])) {
- /* old resid */
- struct ost_id oi;
- ostid_set_seq(&oi, name->name[LUSTRE_RES_ID_VER_OID_OFF]);
- ostid_set_id(&oi, name->name[LUSTRE_RES_ID_SEQ_OFF]);
- ostid_to_fid(fid, &oi, 0);
- } else {
- /* new resid */
- fid_extract_from_res_name(fid, name);
- }
-}
-
-/**
- * Flatten 128-bit FID values into a 64-bit value for use as an inode number.
- * For non-IGIF FIDs this starts just over 2^32, and continues without
- * conflict until 2^64, at which point we wrap the high 24 bits of the SEQ
- * into the range where there may not be many OID values in use, to minimize
- * the risk of conflict.
- *
- * Suppose LUSTRE_SEQ_MAX_WIDTH less than (1 << 24) which is currently true,
- * the time between re-used inode numbers is very long - 2^40 SEQ numbers,
- * or about 2^40 client mounts, if clients create less than 2^24 files/mount.
- */
-static inline __u64 fid_flatten(const struct lu_fid *fid)
-{
- __u64 ino;
- __u64 seq;
-
- if (fid_is_igif(fid)) {
- ino = lu_igif_ino(fid);
- return ino;
- }
-
- seq = fid_seq(fid);
-
- ino = (seq << 24) + ((seq >> 24) & 0xffffff0000ULL) + fid_oid(fid);
-
- return ino ? ino : fid_oid(fid);
-}
-
-static inline __u32 fid_hash(const struct lu_fid *f, int bits)
-{
- /* all objects with same id and different versions will belong to same
- * collisions list. */
- return hash_long(fid_flatten(f), bits);
-}
-
-/**
- * map fid to 32 bit value for ino on 32bit systems. */
-static inline __u32 fid_flatten32(const struct lu_fid *fid)
-{
- __u32 ino;
- __u64 seq;
-
- if (fid_is_igif(fid)) {
- ino = lu_igif_ino(fid);
- return ino;
- }
-
- seq = fid_seq(fid) - FID_SEQ_START;
-
- /* Map the high bits of the OID into higher bits of the inode number so
- * that inodes generated at about the same time have a reduced chance
- * of collisions. This will give a period of 2^12 = 1024 unique clients
- * (from SEQ) and up to min(LUSTRE_SEQ_MAX_WIDTH, 2^20) = 128k objects
- * (from OID), or up to 128M inodes without collisions for new files. */
- ino = ((seq & 0x000fffffULL) << 12) + ((seq >> 8) & 0xfffff000) +
- (seq >> (64 - (40-8)) & 0xffffff00) +
- (fid_oid(fid) & 0xff000fff) + ((fid_oid(fid) & 0x00fff000) << 8);
-
- return ino ? ino : fid_oid(fid);
-}
-
-static inline int lu_fid_diff(struct lu_fid *fid1, struct lu_fid *fid2)
-{
- LASSERTF(fid_seq(fid1) == fid_seq(fid2), "fid1:"DFID", fid2:"DFID"\n",
- PFID(fid1), PFID(fid2));
-
- if (fid_is_idif(fid1) && fid_is_idif(fid2))
- return fid_idif_id(fid1->f_seq, fid1->f_oid, fid1->f_ver) -
- fid_idif_id(fid2->f_seq, fid2->f_oid, fid2->f_ver);
-
- return fid_oid(fid1) - fid_oid(fid2);
-}
-
-#define LUSTRE_SEQ_SRV_NAME "seq_srv"
-#define LUSTRE_SEQ_CTL_NAME "seq_ctl"
-
-/* Range common stuff */
-static inline void range_cpu_to_le(struct lu_seq_range *dst, const struct lu_seq_range *src)
-{
- dst->lsr_start = cpu_to_le64(src->lsr_start);
- dst->lsr_end = cpu_to_le64(src->lsr_end);
- dst->lsr_index = cpu_to_le32(src->lsr_index);
- dst->lsr_flags = cpu_to_le32(src->lsr_flags);
-}
-
-static inline void range_le_to_cpu(struct lu_seq_range *dst, const struct lu_seq_range *src)
-{
- dst->lsr_start = le64_to_cpu(src->lsr_start);
- dst->lsr_end = le64_to_cpu(src->lsr_end);
- dst->lsr_index = le32_to_cpu(src->lsr_index);
- dst->lsr_flags = le32_to_cpu(src->lsr_flags);
-}
-
-static inline void range_cpu_to_be(struct lu_seq_range *dst, const struct lu_seq_range *src)
-{
- dst->lsr_start = cpu_to_be64(src->lsr_start);
- dst->lsr_end = cpu_to_be64(src->lsr_end);
- dst->lsr_index = cpu_to_be32(src->lsr_index);
- dst->lsr_flags = cpu_to_be32(src->lsr_flags);
-}
-
-static inline void range_be_to_cpu(struct lu_seq_range *dst, const struct lu_seq_range *src)
-{
- dst->lsr_start = be64_to_cpu(src->lsr_start);
- dst->lsr_end = be64_to_cpu(src->lsr_end);
- dst->lsr_index = be32_to_cpu(src->lsr_index);
- dst->lsr_flags = be32_to_cpu(src->lsr_flags);
-}
-
-/** @} fid */
-
-#endif /* __LUSTRE_FID_H */
diff --git a/drivers/staging/lustre/lustre/include/lustre_fld.h b/drivers/staging/lustre/lustre/include/lustre_fld.h
deleted file mode 100644
index ff7230e18940..000000000000
--- a/drivers/staging/lustre/lustre/include/lustre_fld.h
+++ /dev/null
@@ -1,152 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2013, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- */
-
-#ifndef __LINUX_FLD_H
-#define __LINUX_FLD_H
-
-/** \defgroup fld fld
- *
- * @{
- */
-
-#include "lustre/lustre_idl.h"
-#include "../../include/linux/libcfs/libcfs.h"
-
-struct lu_client_fld;
-struct lu_server_fld;
-struct lu_fld_hash;
-struct fld_cache;
-
-extern const struct dt_index_features fld_index_features;
-extern const char fld_index_name[];
-
-/*
- * FLD (Fid Location Database) interface.
- */
-enum {
- LUSTRE_CLI_FLD_HASH_DHT = 0,
- LUSTRE_CLI_FLD_HASH_RRB
-};
-
-
-struct lu_fld_target {
- struct list_head ft_chain;
- struct obd_export *ft_exp;
- struct lu_server_fld *ft_srv;
- __u64 ft_idx;
-};
-
-struct lu_server_fld {
- /**
- * super sequence controller export, needed to forward fld
- * lookup request. */
- struct obd_export *lsf_control_exp;
-
- /**
- * Client FLD cache. */
- struct fld_cache *lsf_cache;
-
- /**
- * Protect index modifications */
- struct mutex lsf_lock;
-
- /**
- * Fld service name in form "fld-srv-lustre-MDTXXX" */
- char lsf_name[LUSTRE_MDT_MAXNAMELEN];
-
-};
-
-struct lu_client_fld {
- /**
- * Client side debugfs entry. */
- struct dentry *lcf_debugfs_entry;
-
- /**
- * List of exports client FLD knows about. */
- struct list_head lcf_targets;
-
- /**
- * Current hash to be used to chose an export. */
- struct lu_fld_hash *lcf_hash;
-
- /**
- * Exports count. */
- int lcf_count;
-
- /**
- * Lock protecting exports list and fld_hash. */
- spinlock_t lcf_lock;
-
- /**
- * Client FLD cache. */
- struct fld_cache *lcf_cache;
-
- /**
- * Client fld debugfs entry name. */
- char lcf_name[LUSTRE_MDT_MAXNAMELEN];
-
- int lcf_flags;
-};
-
-/* Client methods */
-int fld_client_init(struct lu_client_fld *fld,
- const char *prefix, int hash);
-
-void fld_client_fini(struct lu_client_fld *fld);
-
-void fld_client_flush(struct lu_client_fld *fld);
-
-int fld_client_lookup(struct lu_client_fld *fld, u64 seq, u32 *mds,
- __u32 flags, const struct lu_env *env);
-
-int fld_client_create(struct lu_client_fld *fld,
- struct lu_seq_range *range,
- const struct lu_env *env);
-
-int fld_client_delete(struct lu_client_fld *fld, u64 seq,
- const struct lu_env *env);
-
-int fld_client_add_target(struct lu_client_fld *fld,
- struct lu_fld_target *tar);
-
-int fld_client_del_target(struct lu_client_fld *fld,
- __u64 idx);
-
-void fld_client_debugfs_fini(struct lu_client_fld *fld);
-
-/** @} fld */
-
-#endif
diff --git a/drivers/staging/lustre/lustre/include/lustre_ha.h b/drivers/staging/lustre/lustre/include/lustre_ha.h
deleted file mode 100644
index f3ae02b3eef3..000000000000
--- a/drivers/staging/lustre/lustre/include/lustre_ha.h
+++ /dev/null
@@ -1,64 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- */
-
-#ifndef _LUSTRE_HA_H
-#define _LUSTRE_HA_H
-
-/** \defgroup ha ha
- *
- * @{
- */
-
-struct obd_import;
-struct obd_export;
-struct obd_device;
-struct ptlrpc_request;
-
-
-int ptlrpc_replay(struct obd_import *imp);
-int ptlrpc_resend(struct obd_import *imp);
-void ptlrpc_free_committed(struct obd_import *imp);
-void ptlrpc_wake_delayed(struct obd_import *imp);
-int ptlrpc_recover_import(struct obd_import *imp, char *new_uuid, int async);
-int ptlrpc_set_import_active(struct obd_import *imp, int active);
-void ptlrpc_activate_import(struct obd_import *imp);
-void ptlrpc_deactivate_import(struct obd_import *imp);
-void ptlrpc_invalidate_import(struct obd_import *imp);
-void ptlrpc_fail_import(struct obd_import *imp, __u32 conn_cnt);
-
-/** @} ha */
-
-#endif
diff --git a/drivers/staging/lustre/lustre/include/lustre_handles.h b/drivers/staging/lustre/lustre/include/lustre_handles.h
deleted file mode 100644
index 4d51c8ac3906..000000000000
--- a/drivers/staging/lustre/lustre/include/lustre_handles.h
+++ /dev/null
@@ -1,96 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- */
-
-#ifndef __LUSTRE_HANDLES_H_
-#define __LUSTRE_HANDLES_H_
-
-/** \defgroup handles handles
- *
- * @{
- */
-
-#include <linux/atomic.h>
-#include <linux/list.h>
-#include <linux/rcupdate.h>
-#include <linux/spinlock.h>
-#include <linux/types.h>
-
-#include "../../include/linux/libcfs/libcfs.h"
-
-
-struct portals_handle_ops {
- void (*hop_addref)(void *object);
- void (*hop_free)(void *object, int size);
-};
-
-/* These handles are most easily used by having them appear at the very top of
- * whatever object that you want to make handles for. ie:
- *
- * struct ldlm_lock {
- * struct portals_handle handle;
- * ...
- * };
- *
- * Now you're able to assign the results of cookie2handle directly to an
- * ldlm_lock. If it's not at the top, you'll want to use container_of()
- * to compute the start of the structure based on the handle field. */
-struct portals_handle {
- struct list_head h_link;
- __u64 h_cookie;
- struct portals_handle_ops *h_ops;
-
- /* newly added fields to handle the RCU issue. -jxiong */
- struct rcu_head h_rcu;
- spinlock_t h_lock;
- unsigned int h_size:31;
- unsigned int h_in:1;
-};
-#define RCU2HANDLE(rcu) container_of(rcu, struct portals_handle, h_rcu)
-
-/* handles.c */
-
-/* Add a handle to the hash table */
-void class_handle_hash(struct portals_handle *,
- struct portals_handle_ops *ops);
-void class_handle_unhash(struct portals_handle *);
-void *class_handle2object(__u64 cookie);
-void class_handle_free_cb(struct rcu_head *rcu);
-int class_handle_init(void);
-void class_handle_cleanup(void);
-
-/** @} handles */
-
-#endif
diff --git a/drivers/staging/lustre/lustre/include/lustre_import.h b/drivers/staging/lustre/lustre/include/lustre_import.h
deleted file mode 100644
index 660d29032ad4..000000000000
--- a/drivers/staging/lustre/lustre/include/lustre_import.h
+++ /dev/null
@@ -1,382 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- */
-/** \defgroup obd_import PtlRPC import definitions
- * Imports are client-side representation of remote obd target.
- *
- * @{
- */
-
-#ifndef __IMPORT_H
-#define __IMPORT_H
-
-/** \defgroup export export
- *
- * @{
- */
-
-#include "lustre_handles.h"
-#include "lustre/lustre_idl.h"
-
-
-/**
- * Adaptive Timeout stuff
- *
- * @{
- */
-#define D_ADAPTTO D_OTHER
-#define AT_BINS 4 /* "bin" means "N seconds of history" */
-#define AT_FLG_NOHIST 0x1 /* use last reported value only */
-
-struct adaptive_timeout {
- time64_t at_binstart; /* bin start time */
- unsigned int at_hist[AT_BINS]; /* timeout history bins */
- unsigned int at_flags;
- unsigned int at_current; /* current timeout value */
- unsigned int at_worst_ever; /* worst-ever timeout value */
- time64_t at_worst_time; /* worst-ever timeout timestamp */
- spinlock_t at_lock;
-};
-
-struct ptlrpc_at_array {
- struct list_head *paa_reqs_array; /** array to hold requests */
- __u32 paa_size; /** the size of array */
- __u32 paa_count; /** the total count of reqs */
- time64_t paa_deadline; /** the earliest deadline of reqs */
- __u32 *paa_reqs_count; /** the count of reqs in each entry */
-};
-
-#define IMP_AT_MAX_PORTALS 8
-struct imp_at {
- int iat_portal[IMP_AT_MAX_PORTALS];
- struct adaptive_timeout iat_net_latency;
- struct adaptive_timeout iat_service_estimate[IMP_AT_MAX_PORTALS];
-};
-
-
-/** @} */
-
-/** Possible import states */
-enum lustre_imp_state {
- LUSTRE_IMP_CLOSED = 1,
- LUSTRE_IMP_NEW = 2,
- LUSTRE_IMP_DISCON = 3,
- LUSTRE_IMP_CONNECTING = 4,
- LUSTRE_IMP_REPLAY = 5,
- LUSTRE_IMP_REPLAY_LOCKS = 6,
- LUSTRE_IMP_REPLAY_WAIT = 7,
- LUSTRE_IMP_RECOVER = 8,
- LUSTRE_IMP_FULL = 9,
- LUSTRE_IMP_EVICTED = 10,
-};
-
-/** Returns test string representation of numeric import state \a state */
-static inline char *ptlrpc_import_state_name(enum lustre_imp_state state)
-{
- static char *import_state_names[] = {
- "<UNKNOWN>", "CLOSED", "NEW", "DISCONN",
- "CONNECTING", "REPLAY", "REPLAY_LOCKS", "REPLAY_WAIT",
- "RECOVER", "FULL", "EVICTED",
- };
-
- LASSERT (state <= LUSTRE_IMP_EVICTED);
- return import_state_names[state];
-}
-
-/**
- * List of import event types
- */
-enum obd_import_event {
- IMP_EVENT_DISCON = 0x808001,
- IMP_EVENT_INACTIVE = 0x808002,
- IMP_EVENT_INVALIDATE = 0x808003,
- IMP_EVENT_ACTIVE = 0x808004,
- IMP_EVENT_OCD = 0x808005,
- IMP_EVENT_DEACTIVATE = 0x808006,
- IMP_EVENT_ACTIVATE = 0x808007,
-};
-
-/**
- * Definition of import connection structure
- */
-struct obd_import_conn {
- /** Item for linking connections together */
- struct list_head oic_item;
- /** Pointer to actual PortalRPC connection */
- struct ptlrpc_connection *oic_conn;
- /** uuid of remote side */
- struct obd_uuid oic_uuid;
- /**
- * Time (64 bit jiffies) of last connection attempt on this connection
- */
- __u64 oic_last_attempt;
-};
-
-/* state history */
-#define IMP_STATE_HIST_LEN 16
-struct import_state_hist {
- enum lustre_imp_state ish_state;
- time64_t ish_time;
-};
-
-/**
- * Definition of PortalRPC import structure.
- * Imports are representing client-side view to remote target.
- */
-struct obd_import {
- /** Local handle (== id) for this import. */
- struct portals_handle imp_handle;
- /** Reference counter */
- atomic_t imp_refcount;
- struct lustre_handle imp_dlm_handle; /* client's ldlm export */
- /** Currently active connection */
- struct ptlrpc_connection *imp_connection;
- /** PortalRPC client structure for this import */
- struct ptlrpc_client *imp_client;
- /** List element for linking into pinger chain */
- struct list_head imp_pinger_chain;
- /** List element for linking into chain for destruction */
- struct list_head imp_zombie_chain;
-
- /**
- * Lists of requests that are retained for replay, waiting for a reply,
- * or waiting for recovery to complete, respectively.
- * @{
- */
- struct list_head imp_replay_list;
- struct list_head imp_sending_list;
- struct list_head imp_delayed_list;
- /** @} */
-
- /**
- * List of requests that are retained for committed open replay. Once
- * open is committed, open replay request will be moved from the
- * imp_replay_list into the imp_committed_list.
- * The imp_replay_cursor is for accelerating searching during replay.
- * @{
- */
- struct list_head imp_committed_list;
- struct list_head *imp_replay_cursor;
- /** @} */
-
- /** obd device for this import */
- struct obd_device *imp_obd;
-
- /**
- * some seciruty-related fields
- * @{
- */
- struct ptlrpc_sec *imp_sec;
- struct mutex imp_sec_mutex;
- time64_t imp_sec_expire;
- /** @} */
-
- /** Wait queue for those who need to wait for recovery completion */
- wait_queue_head_t imp_recovery_waitq;
-
- /** Number of requests currently in-flight */
- atomic_t imp_inflight;
- /** Number of requests currently unregistering */
- atomic_t imp_unregistering;
- /** Number of replay requests inflight */
- atomic_t imp_replay_inflight;
- /** Number of currently happening import invalidations */
- atomic_t imp_inval_count;
- /** Numbner of request timeouts */
- atomic_t imp_timeouts;
- /** Current import state */
- enum lustre_imp_state imp_state;
- /** Last replay state */
- enum lustre_imp_state imp_replay_state;
- /** History of import states */
- struct import_state_hist imp_state_hist[IMP_STATE_HIST_LEN];
- int imp_state_hist_idx;
- /** Current import generation. Incremented on every reconnect */
- int imp_generation;
- /** Incremented every time we send reconnection request */
- __u32 imp_conn_cnt;
- /**
- * \see ptlrpc_free_committed remembers imp_generation value here
- * after a check to save on unnecessary replay list iterations
- */
- int imp_last_generation_checked;
- /** Last transno we replayed */
- __u64 imp_last_replay_transno;
- /** Last transno committed on remote side */
- __u64 imp_peer_committed_transno;
- /**
- * \see ptlrpc_free_committed remembers last_transno since its last
- * check here and if last_transno did not change since last run of
- * ptlrpc_free_committed and import generation is the same, we can
- * skip looking for requests to remove from replay list as optimisation
- */
- __u64 imp_last_transno_checked;
- /**
- * Remote export handle. This is how remote side knows what export
- * we are talking to. Filled from response to connect request
- */
- struct lustre_handle imp_remote_handle;
- /** When to perform next ping. time in jiffies. */
- unsigned long imp_next_ping;
- /** When we last successfully connected. time in 64bit jiffies */
- __u64 imp_last_success_conn;
-
- /** List of all possible connection for import. */
- struct list_head imp_conn_list;
- /**
- * Current connection. \a imp_connection is imp_conn_current->oic_conn
- */
- struct obd_import_conn *imp_conn_current;
-
- /** Protects flags, level, generation, conn_cnt, *_list */
- spinlock_t imp_lock;
-
- /* flags */
- unsigned long imp_no_timeout:1, /* timeouts are disabled */
- imp_invalid:1, /* evicted */
- /* administratively disabled */
- imp_deactive:1,
- /* try to recover the import */
- imp_replayable:1,
- /* don't run recovery (timeout instead) */
- imp_dlm_fake:1,
- /* use 1/2 timeout on MDS' OSCs */
- imp_server_timeout:1,
- /* VBR: imp in delayed recovery */
- imp_delayed_recovery:1,
- /* VBR: if gap was found then no lock replays
- */
- imp_no_lock_replay:1,
- /* recovery by versions was failed */
- imp_vbr_failed:1,
- /* force an immediate ping */
- imp_force_verify:1,
- /* force a scheduled ping */
- imp_force_next_verify:1,
- /* pingable */
- imp_pingable:1,
- /* resend for replay */
- imp_resend_replay:1,
- /* disable normal recovery, for test only. */
- imp_no_pinger_recover:1,
- /* need IR MNE swab */
- imp_need_mne_swab:1,
- /* import must be reconnected instead of
- * chose new connection */
- imp_force_reconnect:1,
- /* import has tried to connect with server */
- imp_connect_tried:1;
- __u32 imp_connect_op;
- struct obd_connect_data imp_connect_data;
- __u64 imp_connect_flags_orig;
- int imp_connect_error;
-
- __u32 imp_msg_magic;
- __u32 imp_msghdr_flags; /* adjusted based on server capability */
-
- struct imp_at imp_at; /* adaptive timeout data */
- time64_t imp_last_reply_time; /* for health check */
-};
-
-typedef void (*obd_import_callback)(struct obd_import *imp, void *closure,
- int event, void *event_arg, void *cb_data);
-
-/**
- * Structure for import observer.
- * It is possible to register "observer" on an import and every time
- * something happens to an import (like connect/evict/disconnect)
- * obderver will get its callback called with event type
- */
-struct obd_import_observer {
- struct list_head oio_chain;
- obd_import_callback oio_cb;
- void *oio_cb_data;
-};
-
-void class_observe_import(struct obd_import *imp, obd_import_callback cb,
- void *cb_data);
-void class_unobserve_import(struct obd_import *imp, obd_import_callback cb,
- void *cb_data);
-void class_notify_import_observers(struct obd_import *imp, int event,
- void *event_arg);
-
-/* import.c */
-static inline unsigned int at_est2timeout(unsigned int val)
-{
- /* add an arbitrary minimum: 125% +5 sec */
- return (val + (val >> 2) + 5);
-}
-
-static inline unsigned int at_timeout2est(unsigned int val)
-{
- /* restore estimate value from timeout: e=4/5(t-5) */
- LASSERT(val);
- return (max((val << 2) / 5, 5U) - 4);
-}
-
-static inline void at_reset(struct adaptive_timeout *at, int val)
-{
- spin_lock(&at->at_lock);
- at->at_current = val;
- at->at_worst_ever = val;
- at->at_worst_time = ktime_get_real_seconds();
- spin_unlock(&at->at_lock);
-}
-static inline void at_init(struct adaptive_timeout *at, int val, int flags)
-{
- memset(at, 0, sizeof(*at));
- spin_lock_init(&at->at_lock);
- at->at_flags = flags;
- at_reset(at, val);
-}
-extern unsigned int at_min;
-static inline int at_get(struct adaptive_timeout *at)
-{
- return (at->at_current > at_min) ? at->at_current : at_min;
-}
-int at_measured(struct adaptive_timeout *at, unsigned int val);
-int import_at_get_index(struct obd_import *imp, int portal);
-extern unsigned int at_max;
-#define AT_OFF (at_max == 0)
-
-/* genops.c */
-struct obd_export;
-struct obd_import *class_exp2cliimp(struct obd_export *);
-
-/** @} import */
-
-#endif /* __IMPORT_H */
-
-/** @} obd_import */
diff --git a/drivers/staging/lustre/lustre/include/lustre_intent.h b/drivers/staging/lustre/lustre/include/lustre_intent.h
deleted file mode 100644
index c491d52d86a2..000000000000
--- a/drivers/staging/lustre/lustre/include/lustre_intent.h
+++ /dev/null
@@ -1,62 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- */
-
-#ifndef LUSTRE_INTENT_H
-#define LUSTRE_INTENT_H
-
-/* intent IT_XXX are defined in lustre/include/obd.h */
-struct lustre_intent_data {
- int it_disposition;
- int it_status;
- __u64 it_lock_handle;
- __u64 it_lock_bits;
- int it_lock_mode;
- int it_remote_lock_mode;
- __u64 it_remote_lock_handle;
- void *it_data;
- unsigned int it_lock_set:1;
-};
-
-struct lookup_intent {
- int it_op;
- int it_create_mode;
- __u64 it_flags;
- union {
- struct lustre_intent_data lustre;
- } d;
-};
-
-#endif
diff --git a/drivers/staging/lustre/lustre/include/lustre_lib.h b/drivers/staging/lustre/lustre/include/lustre_lib.h
deleted file mode 100644
index 35175fd3da88..000000000000
--- a/drivers/staging/lustre/lustre/include/lustre_lib.h
+++ /dev/null
@@ -1,660 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * lustre/include/lustre_lib.h
- *
- * Basic Lustre library routines.
- */
-
-#ifndef _LUSTRE_LIB_H
-#define _LUSTRE_LIB_H
-
-/** \defgroup lib lib
- *
- * @{
- */
-
-#include <linux/sched.h>
-#include <linux/signal.h>
-#include <linux/types.h>
-#include "../../include/linux/libcfs/libcfs.h"
-#include "lustre/lustre_idl.h"
-#include "lustre_ver.h"
-#include "lustre_cfg.h"
-
-/* target.c */
-struct kstatfs;
-struct ptlrpc_request;
-struct obd_export;
-struct lu_target;
-struct l_wait_info;
-#include "lustre_ha.h"
-#include "lustre_net.h"
-
-#define LI_POISON 0x5a5a5a5a
-#if BITS_PER_LONG > 32
-# define LL_POISON 0x5a5a5a5a5a5a5a5aL
-#else
-# define LL_POISON 0x5a5a5a5aL
-#endif
-#define LP_POISON ((void *)LL_POISON)
-
-int target_pack_pool_reply(struct ptlrpc_request *req);
-int do_set_info_async(struct obd_import *imp,
- int opcode, int version,
- u32 keylen, void *key,
- u32 vallen, void *val,
- struct ptlrpc_request_set *set);
-
-#define OBD_RECOVERY_MAX_TIME (obd_timeout * 18) /* b13079 */
-#define OBD_MAX_IOCTL_BUFFER CONFIG_LUSTRE_OBD_MAX_IOCTL_BUFFER
-
-void target_send_reply(struct ptlrpc_request *req, int rc, int fail_id);
-
-/* client.c */
-
-int client_sanobd_setup(struct obd_device *obddev, struct lustre_cfg *lcfg);
-struct client_obd *client_conn2cli(struct lustre_handle *conn);
-
-struct md_open_data;
-struct obd_client_handle {
- struct lustre_handle och_fh;
- struct lu_fid och_fid;
- struct md_open_data *och_mod;
- struct lustre_handle och_lease_handle; /* open lock for lease */
- __u32 och_magic;
- fmode_t och_flags;
-};
-#define OBD_CLIENT_HANDLE_MAGIC 0xd15ea5ed
-
-/* statfs_pack.c */
-void statfs_unpack(struct kstatfs *sfs, struct obd_statfs *osfs);
-
-/*
- * For md echo client
- */
-enum md_echo_cmd {
- ECHO_MD_CREATE = 1, /* Open/Create file on MDT */
- ECHO_MD_MKDIR = 2, /* Mkdir on MDT */
- ECHO_MD_DESTROY = 3, /* Unlink file on MDT */
- ECHO_MD_RMDIR = 4, /* Rmdir on MDT */
- ECHO_MD_LOOKUP = 5, /* Lookup on MDT */
- ECHO_MD_GETATTR = 6, /* Getattr on MDT */
- ECHO_MD_SETATTR = 7, /* Setattr on MDT */
- ECHO_MD_ALLOC_FID = 8, /* Get FIDs from MDT */
-};
-
-/*
- * OBD IOCTLS
- */
-#define OBD_IOCTL_VERSION 0x00010004
-
-struct obd_ioctl_data {
- __u32 ioc_len;
- __u32 ioc_version;
-
- union {
- __u64 ioc_cookie;
- __u64 ioc_u64_1;
- };
- union {
- __u32 ioc_conn1;
- __u32 ioc_u32_1;
- };
- union {
- __u32 ioc_conn2;
- __u32 ioc_u32_2;
- };
-
- struct obdo ioc_obdo1;
- struct obdo ioc_obdo2;
-
- u64 ioc_count;
- u64 ioc_offset;
- __u32 ioc_dev;
- __u32 ioc_command;
-
- __u64 ioc_nid;
- __u32 ioc_nal;
- __u32 ioc_type;
-
- /* buffers the kernel will treat as user pointers */
- __u32 ioc_plen1;
- char *ioc_pbuf1;
- __u32 ioc_plen2;
- char *ioc_pbuf2;
-
- /* inline buffers for various arguments */
- __u32 ioc_inllen1;
- char *ioc_inlbuf1;
- __u32 ioc_inllen2;
- char *ioc_inlbuf2;
- __u32 ioc_inllen3;
- char *ioc_inlbuf3;
- __u32 ioc_inllen4;
- char *ioc_inlbuf4;
-
- char ioc_bulk[0];
-};
-
-struct obd_ioctl_hdr {
- __u32 ioc_len;
- __u32 ioc_version;
-};
-
-static inline int obd_ioctl_packlen(struct obd_ioctl_data *data)
-{
- int len = cfs_size_round(sizeof(struct obd_ioctl_data));
- len += cfs_size_round(data->ioc_inllen1);
- len += cfs_size_round(data->ioc_inllen2);
- len += cfs_size_round(data->ioc_inllen3);
- len += cfs_size_round(data->ioc_inllen4);
- return len;
-}
-
-
-static inline int obd_ioctl_is_invalid(struct obd_ioctl_data *data)
-{
- if (data->ioc_len > OBD_MAX_IOCTL_BUFFER) {
- CERROR("OBD ioctl: ioc_len larger than %d\n",
- OBD_MAX_IOCTL_BUFFER);
- return 1;
- }
- if (data->ioc_inllen1 > OBD_MAX_IOCTL_BUFFER) {
- CERROR("OBD ioctl: ioc_inllen1 larger than ioc_len\n");
- return 1;
- }
- if (data->ioc_inllen2 > OBD_MAX_IOCTL_BUFFER) {
- CERROR("OBD ioctl: ioc_inllen2 larger than ioc_len\n");
- return 1;
- }
- if (data->ioc_inllen3 > OBD_MAX_IOCTL_BUFFER) {
- CERROR("OBD ioctl: ioc_inllen3 larger than ioc_len\n");
- return 1;
- }
- if (data->ioc_inllen4 > OBD_MAX_IOCTL_BUFFER) {
- CERROR("OBD ioctl: ioc_inllen4 larger than ioc_len\n");
- return 1;
- }
- if (data->ioc_inlbuf1 && !data->ioc_inllen1) {
- CERROR("OBD ioctl: inlbuf1 pointer but 0 length\n");
- return 1;
- }
- if (data->ioc_inlbuf2 && !data->ioc_inllen2) {
- CERROR("OBD ioctl: inlbuf2 pointer but 0 length\n");
- return 1;
- }
- if (data->ioc_inlbuf3 && !data->ioc_inllen3) {
- CERROR("OBD ioctl: inlbuf3 pointer but 0 length\n");
- return 1;
- }
- if (data->ioc_inlbuf4 && !data->ioc_inllen4) {
- CERROR("OBD ioctl: inlbuf4 pointer but 0 length\n");
- return 1;
- }
- if (data->ioc_pbuf1 && !data->ioc_plen1) {
- CERROR("OBD ioctl: pbuf1 pointer but 0 length\n");
- return 1;
- }
- if (data->ioc_pbuf2 && !data->ioc_plen2) {
- CERROR("OBD ioctl: pbuf2 pointer but 0 length\n");
- return 1;
- }
- if (data->ioc_plen1 && !data->ioc_pbuf1) {
- CERROR("OBD ioctl: plen1 set but NULL pointer\n");
- return 1;
- }
- if (data->ioc_plen2 && !data->ioc_pbuf2) {
- CERROR("OBD ioctl: plen2 set but NULL pointer\n");
- return 1;
- }
- if (obd_ioctl_packlen(data) > data->ioc_len) {
- CERROR("OBD ioctl: packlen exceeds ioc_len (%d > %d)\n",
- obd_ioctl_packlen(data), data->ioc_len);
- return 1;
- }
- return 0;
-}
-
-
-#include "obd_support.h"
-
-/* function defined in lustre/obdclass/<platform>/<platform>-module.c */
-int obd_ioctl_getdata(char **buf, int *len, void *arg);
-int obd_ioctl_popdata(void *arg, void *data, int len);
-
-static inline void obd_ioctl_freedata(char *buf, int len)
-{
- kvfree(buf);
- return;
-}
-
-/*
- * BSD ioctl description:
- * #define IOC_V1 _IOR(g, n1, long)
- * #define IOC_V2 _IOW(g, n2, long)
- *
- * ioctl(f, IOC_V1, arg);
- * arg will be treated as a long value,
- *
- * ioctl(f, IOC_V2, arg)
- * arg will be treated as a pointer, bsd will call
- * copyin(buf, arg, sizeof(long))
- *
- * To make BSD ioctl handles argument correctly and simplely,
- * we change _IOR to _IOWR so BSD will copyin obd_ioctl_data
- * for us. Does this change affect Linux? (XXX Liang)
- */
-#define OBD_IOC_DATA_TYPE long
-
-#define OBD_IOC_CREATE _IOWR('f', 101, OBD_IOC_DATA_TYPE)
-#define OBD_IOC_DESTROY _IOW ('f', 104, OBD_IOC_DATA_TYPE)
-#define OBD_IOC_PREALLOCATE _IOWR('f', 105, OBD_IOC_DATA_TYPE)
-
-#define OBD_IOC_SETATTR _IOW ('f', 107, OBD_IOC_DATA_TYPE)
-#define OBD_IOC_GETATTR _IOWR ('f', 108, OBD_IOC_DATA_TYPE)
-#define OBD_IOC_READ _IOWR('f', 109, OBD_IOC_DATA_TYPE)
-#define OBD_IOC_WRITE _IOWR('f', 110, OBD_IOC_DATA_TYPE)
-
-
-#define OBD_IOC_STATFS _IOWR('f', 113, OBD_IOC_DATA_TYPE)
-#define OBD_IOC_SYNC _IOW ('f', 114, OBD_IOC_DATA_TYPE)
-#define OBD_IOC_READ2 _IOWR('f', 115, OBD_IOC_DATA_TYPE)
-#define OBD_IOC_FORMAT _IOWR('f', 116, OBD_IOC_DATA_TYPE)
-#define OBD_IOC_PARTITION _IOWR('f', 117, OBD_IOC_DATA_TYPE)
-#define OBD_IOC_COPY _IOWR('f', 120, OBD_IOC_DATA_TYPE)
-#define OBD_IOC_MIGR _IOWR('f', 121, OBD_IOC_DATA_TYPE)
-#define OBD_IOC_PUNCH _IOWR('f', 122, OBD_IOC_DATA_TYPE)
-
-#define OBD_IOC_MODULE_DEBUG _IOWR('f', 124, OBD_IOC_DATA_TYPE)
-#define OBD_IOC_BRW_READ _IOWR('f', 125, OBD_IOC_DATA_TYPE)
-#define OBD_IOC_BRW_WRITE _IOWR('f', 126, OBD_IOC_DATA_TYPE)
-#define OBD_IOC_NAME2DEV _IOWR('f', 127, OBD_IOC_DATA_TYPE)
-#define OBD_IOC_UUID2DEV _IOWR('f', 130, OBD_IOC_DATA_TYPE)
-
-#define OBD_IOC_GETNAME _IOWR('f', 131, OBD_IOC_DATA_TYPE)
-#define OBD_IOC_GETMDNAME _IOR('f', 131, char[MAX_OBD_NAME])
-#define OBD_IOC_GETDTNAME OBD_IOC_GETNAME
-
-#define OBD_IOC_LOV_GET_CONFIG _IOWR('f', 132, OBD_IOC_DATA_TYPE)
-#define OBD_IOC_CLIENT_RECOVER _IOW ('f', 133, OBD_IOC_DATA_TYPE)
-#define OBD_IOC_PING_TARGET _IOW ('f', 136, OBD_IOC_DATA_TYPE)
-
-#define OBD_IOC_DEC_FS_USE_COUNT _IO ('f', 139)
-#define OBD_IOC_NO_TRANSNO _IOW ('f', 140, OBD_IOC_DATA_TYPE)
-#define OBD_IOC_SET_READONLY _IOW ('f', 141, OBD_IOC_DATA_TYPE)
-#define OBD_IOC_ABORT_RECOVERY _IOR ('f', 142, OBD_IOC_DATA_TYPE)
-
-#define OBD_IOC_ROOT_SQUASH _IOWR('f', 143, OBD_IOC_DATA_TYPE)
-
-#define OBD_GET_VERSION _IOWR ('f', 144, OBD_IOC_DATA_TYPE)
-
-#define OBD_IOC_GSS_SUPPORT _IOWR('f', 145, OBD_IOC_DATA_TYPE)
-
-#define OBD_IOC_CLOSE_UUID _IOWR ('f', 147, OBD_IOC_DATA_TYPE)
-
-#define OBD_IOC_CHANGELOG_SEND _IOW ('f', 148, OBD_IOC_DATA_TYPE)
-#define OBD_IOC_GETDEVICE _IOWR ('f', 149, OBD_IOC_DATA_TYPE)
-#define OBD_IOC_FID2PATH _IOWR ('f', 150, OBD_IOC_DATA_TYPE)
-/* see also <lustre/lustre_user.h> for ioctls 151-153 */
-/* OBD_IOC_LOV_SETSTRIPE: See also LL_IOC_LOV_SETSTRIPE */
-#define OBD_IOC_LOV_SETSTRIPE _IOW ('f', 154, OBD_IOC_DATA_TYPE)
-/* OBD_IOC_LOV_GETSTRIPE: See also LL_IOC_LOV_GETSTRIPE */
-#define OBD_IOC_LOV_GETSTRIPE _IOW ('f', 155, OBD_IOC_DATA_TYPE)
-/* OBD_IOC_LOV_SETEA: See also LL_IOC_LOV_SETEA */
-#define OBD_IOC_LOV_SETEA _IOW ('f', 156, OBD_IOC_DATA_TYPE)
-/* see <lustre/lustre_user.h> for ioctls 157-159 */
-/* OBD_IOC_QUOTACHECK: See also LL_IOC_QUOTACHECK */
-#define OBD_IOC_QUOTACHECK _IOW ('f', 160, int)
-/* OBD_IOC_POLL_QUOTACHECK: See also LL_IOC_POLL_QUOTACHECK */
-#define OBD_IOC_POLL_QUOTACHECK _IOR ('f', 161, struct if_quotacheck *)
-/* OBD_IOC_QUOTACTL: See also LL_IOC_QUOTACTL */
-#define OBD_IOC_QUOTACTL _IOWR('f', 162, struct if_quotactl)
-/* see also <lustre/lustre_user.h> for ioctls 163-176 */
-#define OBD_IOC_CHANGELOG_REG _IOW ('f', 177, struct obd_ioctl_data)
-#define OBD_IOC_CHANGELOG_DEREG _IOW ('f', 178, struct obd_ioctl_data)
-#define OBD_IOC_CHANGELOG_CLEAR _IOW ('f', 179, struct obd_ioctl_data)
-#define OBD_IOC_RECORD _IOWR('f', 180, OBD_IOC_DATA_TYPE)
-#define OBD_IOC_ENDRECORD _IOWR('f', 181, OBD_IOC_DATA_TYPE)
-#define OBD_IOC_PARSE _IOWR('f', 182, OBD_IOC_DATA_TYPE)
-#define OBD_IOC_DORECORD _IOWR('f', 183, OBD_IOC_DATA_TYPE)
-#define OBD_IOC_PROCESS_CFG _IOWR('f', 184, OBD_IOC_DATA_TYPE)
-#define OBD_IOC_DUMP_LOG _IOWR('f', 185, OBD_IOC_DATA_TYPE)
-#define OBD_IOC_CLEAR_LOG _IOWR('f', 186, OBD_IOC_DATA_TYPE)
-#define OBD_IOC_PARAM _IOW ('f', 187, OBD_IOC_DATA_TYPE)
-#define OBD_IOC_POOL _IOWR('f', 188, OBD_IOC_DATA_TYPE)
-#define OBD_IOC_REPLACE_NIDS _IOWR('f', 189, OBD_IOC_DATA_TYPE)
-
-#define OBD_IOC_CATLOGLIST _IOWR('f', 190, OBD_IOC_DATA_TYPE)
-#define OBD_IOC_LLOG_INFO _IOWR('f', 191, OBD_IOC_DATA_TYPE)
-#define OBD_IOC_LLOG_PRINT _IOWR('f', 192, OBD_IOC_DATA_TYPE)
-#define OBD_IOC_LLOG_CANCEL _IOWR('f', 193, OBD_IOC_DATA_TYPE)
-#define OBD_IOC_LLOG_REMOVE _IOWR('f', 194, OBD_IOC_DATA_TYPE)
-#define OBD_IOC_LLOG_CHECK _IOWR('f', 195, OBD_IOC_DATA_TYPE)
-/* OBD_IOC_LLOG_CATINFO is deprecated */
-#define OBD_IOC_LLOG_CATINFO _IOWR('f', 196, OBD_IOC_DATA_TYPE)
-
-#define ECHO_IOC_GET_STRIPE _IOWR('f', 200, OBD_IOC_DATA_TYPE)
-#define ECHO_IOC_SET_STRIPE _IOWR('f', 201, OBD_IOC_DATA_TYPE)
-#define ECHO_IOC_ENQUEUE _IOWR('f', 202, OBD_IOC_DATA_TYPE)
-#define ECHO_IOC_CANCEL _IOWR('f', 203, OBD_IOC_DATA_TYPE)
-
-#define OBD_IOC_GET_OBJ_VERSION _IOR('f', 210, OBD_IOC_DATA_TYPE)
-
-/* <lustre/lustre_user.h> defines ioctl number 218-219 */
-#define OBD_IOC_GET_MNTOPT _IOW('f', 220, mntopt_t)
-
-#define OBD_IOC_ECHO_MD _IOR('f', 221, struct obd_ioctl_data)
-#define OBD_IOC_ECHO_ALLOC_SEQ _IOWR('f', 222, struct obd_ioctl_data)
-
-#define OBD_IOC_START_LFSCK _IOWR('f', 230, OBD_IOC_DATA_TYPE)
-#define OBD_IOC_STOP_LFSCK _IOW('f', 231, OBD_IOC_DATA_TYPE)
-#define OBD_IOC_PAUSE_LFSCK _IOW('f', 232, OBD_IOC_DATA_TYPE)
-
-/* XXX _IOWR('f', 250, long) has been defined in
- * libcfs/include/libcfs/libcfs_private.h for debug, don't use it
- */
-
-/* Until such time as we get_info the per-stripe maximum from the OST,
- * we define this to be 2T - 4k, which is the ext3 maxbytes. */
-#define LUSTRE_STRIPE_MAXBYTES 0x1fffffff000ULL
-
-/* Special values for remove LOV EA from disk */
-#define LOVEA_DELETE_VALUES(size, count, offset) (size == 0 && count == 0 && \
- offset == (typeof(offset))(-1))
-
-/* #define POISON_BULK 0 */
-
-/*
- * l_wait_event is a flexible sleeping function, permitting simple caller
- * configuration of interrupt and timeout sensitivity along with actions to
- * be performed in the event of either exception.
- *
- * The first form of usage looks like this:
- *
- * struct l_wait_info lwi = LWI_TIMEOUT_INTR(timeout, timeout_handler,
- * intr_handler, callback_data);
- * rc = l_wait_event(waitq, condition, &lwi);
- *
- * l_wait_event() makes the current process wait on 'waitq' until 'condition'
- * is TRUE or a "killable" signal (SIGTERM, SIKGILL, SIGINT) is pending. It
- * returns 0 to signify 'condition' is TRUE, but if a signal wakes it before
- * 'condition' becomes true, it optionally calls the specified 'intr_handler'
- * if not NULL, and returns -EINTR.
- *
- * If a non-zero timeout is specified, signals are ignored until the timeout
- * has expired. At this time, if 'timeout_handler' is not NULL it is called.
- * If it returns FALSE l_wait_event() continues to wait as described above with
- * signals enabled. Otherwise it returns -ETIMEDOUT.
- *
- * LWI_INTR(intr_handler, callback_data) is shorthand for
- * LWI_TIMEOUT_INTR(0, NULL, intr_handler, callback_data)
- *
- * The second form of usage looks like this:
- *
- * struct l_wait_info lwi = LWI_TIMEOUT(timeout, timeout_handler);
- * rc = l_wait_event(waitq, condition, &lwi);
- *
- * This form is the same as the first except that it COMPLETELY IGNORES
- * SIGNALS. The caller must therefore beware that if 'timeout' is zero, or if
- * 'timeout_handler' is not NULL and returns FALSE, then the ONLY thing that
- * can unblock the current process is 'condition' becoming TRUE.
- *
- * Another form of usage is:
- * struct l_wait_info lwi = LWI_TIMEOUT_INTERVAL(timeout, interval,
- * timeout_handler);
- * rc = l_wait_event(waitq, condition, &lwi);
- * This is the same as previous case, but condition is checked once every
- * 'interval' jiffies (if non-zero).
- *
- * Subtle synchronization point: this macro does *not* necessary takes
- * wait-queue spin-lock before returning, and, hence, following idiom is safe
- * ONLY when caller provides some external locking:
- *
- * Thread1 Thread2
- *
- * l_wait_event(&obj->wq, ....); (1)
- *
- * wake_up(&obj->wq): (2)
- * spin_lock(&q->lock); (2.1)
- * __wake_up_common(q, ...); (2.2)
- * spin_unlock(&q->lock, flags); (2.3)
- *
- * kfree(obj); (3)
- *
- * As l_wait_event() may "short-cut" execution and return without taking
- * wait-queue spin-lock, some additional synchronization is necessary to
- * guarantee that step (3) can begin only after (2.3) finishes.
- *
- * XXX nikita: some ptlrpc daemon threads have races of that sort.
- *
- */
-static inline int back_to_sleep(void *arg)
-{
- return 0;
-}
-
-#define LWI_ON_SIGNAL_NOOP ((void (*)(void *))(-1))
-
-struct l_wait_info {
- long lwi_timeout;
- long lwi_interval;
- int lwi_allow_intr;
- int (*lwi_on_timeout)(void *);
- void (*lwi_on_signal)(void *);
- void *lwi_cb_data;
-};
-
-/* NB: LWI_TIMEOUT ignores signals completely */
-#define LWI_TIMEOUT(time, cb, data) \
-((struct l_wait_info) { \
- .lwi_timeout = time, \
- .lwi_on_timeout = cb, \
- .lwi_cb_data = data, \
- .lwi_interval = 0, \
- .lwi_allow_intr = 0 \
-})
-
-#define LWI_TIMEOUT_INTERVAL(time, interval, cb, data) \
-((struct l_wait_info) { \
- .lwi_timeout = time, \
- .lwi_on_timeout = cb, \
- .lwi_cb_data = data, \
- .lwi_interval = interval, \
- .lwi_allow_intr = 0 \
-})
-
-#define LWI_TIMEOUT_INTR(time, time_cb, sig_cb, data) \
-((struct l_wait_info) { \
- .lwi_timeout = time, \
- .lwi_on_timeout = time_cb, \
- .lwi_on_signal = sig_cb, \
- .lwi_cb_data = data, \
- .lwi_interval = 0, \
- .lwi_allow_intr = 0 \
-})
-
-#define LWI_TIMEOUT_INTR_ALL(time, time_cb, sig_cb, data) \
-((struct l_wait_info) { \
- .lwi_timeout = time, \
- .lwi_on_timeout = time_cb, \
- .lwi_on_signal = sig_cb, \
- .lwi_cb_data = data, \
- .lwi_interval = 0, \
- .lwi_allow_intr = 1 \
-})
-
-#define LWI_INTR(cb, data) LWI_TIMEOUT_INTR(0, NULL, cb, data)
-
-#define LUSTRE_FATAL_SIGS (sigmask(SIGKILL) | sigmask(SIGINT) | \
- sigmask(SIGTERM) | sigmask(SIGQUIT) | \
- sigmask(SIGALRM))
-
-
-/*
- * wait for @condition to become true, but no longer than timeout, specified
- * by @info.
- */
-#define __l_wait_event(wq, condition, info, ret, l_add_wait) \
-do { \
- wait_queue_t __wait; \
- long __timeout = info->lwi_timeout; \
- sigset_t __blocked; \
- int __allow_intr = info->lwi_allow_intr; \
- \
- ret = 0; \
- if (condition) \
- break; \
- \
- init_waitqueue_entry(&__wait, current); \
- l_add_wait(&wq, &__wait); \
- \
- /* Block all signals (just the non-fatal ones if no timeout). */ \
- if (info->lwi_on_signal != NULL && (__timeout == 0 || __allow_intr)) \
- __blocked = cfs_block_sigsinv(LUSTRE_FATAL_SIGS); \
- else \
- __blocked = cfs_block_sigsinv(0); \
- \
- for (;;) { \
- if (condition) \
- break; \
- \
- set_current_state(TASK_INTERRUPTIBLE); \
- \
- if (__timeout == 0) { \
- schedule(); \
- } else { \
- long interval = info->lwi_interval ? \
- min_t(long, \
- info->lwi_interval, __timeout) : \
- __timeout; \
- long remaining = schedule_timeout(interval);\
- __timeout = cfs_time_sub(__timeout, \
- cfs_time_sub(interval, remaining));\
- if (__timeout == 0) { \
- if (info->lwi_on_timeout == NULL || \
- info->lwi_on_timeout(info->lwi_cb_data)) { \
- ret = -ETIMEDOUT; \
- break; \
- } \
- /* Take signals after the timeout expires. */ \
- if (info->lwi_on_signal != NULL) \
- (void)cfs_block_sigsinv(LUSTRE_FATAL_SIGS);\
- } \
- } \
- \
- set_current_state(TASK_RUNNING); \
- \
- if (condition) \
- break; \
- if (cfs_signal_pending()) { \
- if (info->lwi_on_signal != NULL && \
- (__timeout == 0 || __allow_intr)) { \
- if (info->lwi_on_signal != LWI_ON_SIGNAL_NOOP) \
- info->lwi_on_signal(info->lwi_cb_data);\
- ret = -EINTR; \
- break; \
- } \
- /* We have to do this here because some signals */ \
- /* are not blockable - ie from strace(1). */ \
- /* In these cases we want to schedule_timeout() */ \
- /* again, because we don't want that to return */ \
- /* -EINTR when the RPC actually succeeded. */ \
- /* the recalc_sigpending() below will deliver the */ \
- /* signal properly. */ \
- cfs_clear_sigpending(); \
- } \
- } \
- \
- cfs_restore_sigs(__blocked); \
- \
- remove_wait_queue(&wq, &__wait); \
-} while (0)
-
-
-
-#define l_wait_event(wq, condition, info) \
-({ \
- int __ret; \
- struct l_wait_info *__info = (info); \
- \
- __l_wait_event(wq, condition, __info, \
- __ret, add_wait_queue); \
- __ret; \
-})
-
-#define l_wait_event_exclusive(wq, condition, info) \
-({ \
- int __ret; \
- struct l_wait_info *__info = (info); \
- \
- __l_wait_event(wq, condition, __info, \
- __ret, add_wait_queue_exclusive); \
- __ret; \
-})
-
-#define l_wait_event_exclusive_head(wq, condition, info) \
-({ \
- int __ret; \
- struct l_wait_info *__info = (info); \
- \
- __l_wait_event(wq, condition, __info, \
- __ret, add_wait_queue_exclusive_head); \
- __ret; \
-})
-
-#define l_wait_condition(wq, condition) \
-({ \
- struct l_wait_info lwi = { 0 }; \
- l_wait_event(wq, condition, &lwi); \
-})
-
-#define l_wait_condition_exclusive(wq, condition) \
-({ \
- struct l_wait_info lwi = { 0 }; \
- l_wait_event_exclusive(wq, condition, &lwi); \
-})
-
-#define l_wait_condition_exclusive_head(wq, condition) \
-({ \
- struct l_wait_info lwi = { 0 }; \
- l_wait_event_exclusive_head(wq, condition, &lwi); \
-})
-
-#define LIBLUSTRE_CLIENT (0)
-
-/** @} lib */
-
-#endif /* _LUSTRE_LIB_H */
diff --git a/drivers/staging/lustre/lustre/include/lustre_lite.h b/drivers/staging/lustre/lustre/include/lustre_lite.h
deleted file mode 100644
index df557c22abbe..000000000000
--- a/drivers/staging/lustre/lustre/include/lustre_lite.h
+++ /dev/null
@@ -1,150 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- */
-
-#ifndef _LL_H
-#define _LL_H
-
-/** \defgroup lite lite
- *
- * @{
- */
-
-#include "linux/lustre_lite.h"
-
-#include "obd_class.h"
-#include "lustre_net.h"
-#include "lustre_mds.h"
-#include "lustre_ha.h"
-
-/* 4UL * 1024 * 1024 */
-#define LL_MAX_BLKSIZE_BITS (22)
-#define LL_MAX_BLKSIZE (1UL<<LL_MAX_BLKSIZE_BITS)
-
-#include "lustre/lustre_user.h"
-
-
-struct lustre_rw_params {
- int lrp_lock_mode;
- ldlm_policy_data_t lrp_policy;
- u32 lrp_brw_flags;
- int lrp_ast_flags;
-};
-
-/*
- * XXX nikita: this function lives in the header because it is used by both
- * llite kernel module and liblustre library, and there is no (?) better place
- * to put it in.
- */
-static inline void lustre_build_lock_params(int cmd, unsigned long open_flags,
- __u64 connect_flags,
- loff_t pos, ssize_t len,
- struct lustre_rw_params *params)
-{
- params->lrp_lock_mode = (cmd == OBD_BRW_READ) ? LCK_PR : LCK_PW;
- params->lrp_brw_flags = 0;
-
- params->lrp_policy.l_extent.start = pos;
- params->lrp_policy.l_extent.end = pos + len - 1;
- /*
- * for now O_APPEND always takes local locks.
- */
- if (cmd == OBD_BRW_WRITE && (open_flags & O_APPEND)) {
- params->lrp_policy.l_extent.start = 0;
- params->lrp_policy.l_extent.end = OBD_OBJECT_EOF;
- } else if (LIBLUSTRE_CLIENT && (connect_flags & OBD_CONNECT_SRVLOCK)) {
- /*
- * liblustre: OST-side locking for all non-O_APPEND
- * reads/writes.
- */
- params->lrp_lock_mode = LCK_NL;
- params->lrp_brw_flags = OBD_BRW_SRVLOCK;
- } else {
- /*
- * nothing special for the kernel. In the future llite may use
- * OST-side locks for small writes into highly contended
- * files.
- */
- }
- params->lrp_ast_flags = (open_flags & O_NONBLOCK) ?
- LDLM_FL_BLOCK_NOWAIT : 0;
-}
-
-/*
- * This is embedded into liblustre and llite super-blocks to keep track of
- * connect flags (capabilities) supported by all imports given mount is
- * connected to.
- */
-struct lustre_client_ocd {
- /*
- * This is conjunction of connect_flags across all imports (LOVs) this
- * mount is connected to. This field is updated by cl_ocd_update()
- * under ->lco_lock.
- */
- __u64 lco_flags;
- struct mutex lco_lock;
- struct obd_export *lco_md_exp;
- struct obd_export *lco_dt_exp;
-};
-
-/*
- * Chain of hash overflow pages.
- */
-struct ll_dir_chain {
- /* XXX something. Later */
-};
-
-static inline void ll_dir_chain_init(struct ll_dir_chain *chain)
-{
-}
-
-static inline void ll_dir_chain_fini(struct ll_dir_chain *chain)
-{
-}
-
-static inline unsigned long hash_x_index(__u64 hash, int hash64)
-{
- if (BITS_PER_LONG == 32 && hash64)
- hash >>= 32;
- /* save hash 0 as index 0 because otherwise we'll save it at
- * page index end (~0UL) and it causes truncate_inode_pages_range()
- * to loop forever.
- */
- return ~0UL - (hash + !hash);
-}
-
-/** @} lite */
-
-#endif
diff --git a/drivers/staging/lustre/lustre/include/lustre_log.h b/drivers/staging/lustre/lustre/include/lustre_log.h
deleted file mode 100644
index 958d012a4388..000000000000
--- a/drivers/staging/lustre/lustre/include/lustre_log.h
+++ /dev/null
@@ -1,475 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * lustre/include/lustre_log.h
- *
- * Generic infrastructure for managing a collection of logs.
- * These logs are used for:
- *
- * - orphan recovery: OST adds record on create
- * - mtime/size consistency: the OST adds a record on first write
- * - open/unlinked objects: OST adds a record on destroy
- *
- * - mds unlink log: the MDS adds an entry upon delete
- *
- * - raid1 replication log between OST's
- * - MDS replication logs
- */
-
-#ifndef _LUSTRE_LOG_H
-#define _LUSTRE_LOG_H
-
-/** \defgroup log log
- *
- * @{
- */
-
-#include "obd_class.h"
-#include "lustre/lustre_idl.h"
-
-#define LOG_NAME_LIMIT(logname, name) \
- snprintf(logname, sizeof(logname), "LOGS/%s", name)
-#define LLOG_EEMPTY 4711
-
-enum llog_open_param {
- LLOG_OPEN_EXISTS = 0x0000,
- LLOG_OPEN_NEW = 0x0001,
-};
-
-struct plain_handle_data {
- struct list_head phd_entry;
- struct llog_handle *phd_cat_handle;
- struct llog_cookie phd_cookie; /* cookie of this log in its cat */
-};
-
-struct cat_handle_data {
- struct list_head chd_head;
- struct llog_handle *chd_current_log; /* currently open log */
- struct llog_handle *chd_next_log; /* llog to be used next */
-};
-
-static inline void logid_to_fid(struct llog_logid *id, struct lu_fid *fid)
-{
- /* For compatibility purposes we identify pre-OSD (~< 2.3.51 MDS)
- * logid's by non-zero ogen (inode generation) and convert them
- * into IGIF */
- if (id->lgl_ogen == 0) {
- fid->f_seq = id->lgl_oi.oi.oi_seq;
- fid->f_oid = id->lgl_oi.oi.oi_id;
- fid->f_ver = 0;
- } else {
- lu_igif_build(fid, id->lgl_oi.oi.oi_id, id->lgl_ogen);
- }
-}
-
-static inline void fid_to_logid(struct lu_fid *fid, struct llog_logid *id)
-{
- id->lgl_oi.oi.oi_seq = fid->f_seq;
- id->lgl_oi.oi.oi_id = fid->f_oid;
- id->lgl_ogen = 0;
-}
-
-static inline void logid_set_id(struct llog_logid *log_id, __u64 id)
-{
- log_id->lgl_oi.oi.oi_id = id;
-}
-
-static inline __u64 logid_id(struct llog_logid *log_id)
-{
- return log_id->lgl_oi.oi.oi_id;
-}
-
-struct llog_handle;
-
-/* llog.c - general API */
-int llog_init_handle(const struct lu_env *env, struct llog_handle *handle,
- int flags, struct obd_uuid *uuid);
-int llog_process(const struct lu_env *env, struct llog_handle *loghandle,
- llog_cb_t cb, void *data, void *catdata);
-int llog_process_or_fork(const struct lu_env *env,
- struct llog_handle *loghandle,
- llog_cb_t cb, void *data, void *catdata, bool fork);
-int llog_open(const struct lu_env *env, struct llog_ctxt *ctxt,
- struct llog_handle **lgh, struct llog_logid *logid,
- char *name, enum llog_open_param open_param);
-int llog_close(const struct lu_env *env, struct llog_handle *cathandle);
-int llog_backup(const struct lu_env *env, struct obd_device *obd,
- struct llog_ctxt *ctxt, struct llog_ctxt *bak_ctxt,
- char *name, char *backup);
-
-/* llog_process flags */
-#define LLOG_FLAG_NODEAMON 0x0001
-
-/* llog_cat.c - catalog api */
-struct llog_process_data {
- /**
- * Any useful data needed while processing catalog. This is
- * passed later to process callback.
- */
- void *lpd_data;
- /**
- * Catalog process callback function, called for each record
- * in catalog.
- */
- llog_cb_t lpd_cb;
- /**
- * Start processing the catalog from startcat/startidx
- */
- int lpd_startcat;
- int lpd_startidx;
-};
-
-struct llog_process_cat_data {
- /**
- * Temporary stored first_idx while scanning log.
- */
- int lpcd_first_idx;
- /**
- * Temporary stored last_idx while scanning log.
- */
- int lpcd_last_idx;
-};
-
-struct thandle;
-
-int llog_cat_close(const struct lu_env *env, struct llog_handle *cathandle);
-int llog_cat_process_or_fork(const struct lu_env *env,
- struct llog_handle *cat_llh, llog_cb_t cb,
- void *data, int startcat, int startidx, bool fork);
-int llog_cat_process(const struct lu_env *env, struct llog_handle *cat_llh,
- llog_cb_t cb, void *data, int startcat, int startidx);
-
-/* llog_obd.c */
-int llog_setup(const struct lu_env *env, struct obd_device *obd,
- struct obd_llog_group *olg, int index,
- struct obd_device *disk_obd, struct llog_operations *op);
-int __llog_ctxt_put(const struct lu_env *env, struct llog_ctxt *ctxt);
-int llog_cleanup(const struct lu_env *env, struct llog_ctxt *);
-
-/* llog_net.c */
-int llog_initiator_connect(struct llog_ctxt *ctxt);
-
-struct llog_operations {
- int (*lop_next_block)(const struct lu_env *env, struct llog_handle *h,
- int *curr_idx, int next_idx, __u64 *offset,
- void *buf, int len);
- int (*lop_prev_block)(const struct lu_env *env, struct llog_handle *h,
- int prev_idx, void *buf, int len);
- int (*lop_read_header)(const struct lu_env *env,
- struct llog_handle *handle);
- int (*lop_setup)(const struct lu_env *env, struct obd_device *obd,
- struct obd_llog_group *olg, int ctxt_idx,
- struct obd_device *disk_obd);
- int (*lop_sync)(struct llog_ctxt *ctxt, struct obd_export *exp,
- int flags);
- int (*lop_cleanup)(const struct lu_env *env, struct llog_ctxt *ctxt);
- int (*lop_cancel)(const struct lu_env *env, struct llog_ctxt *ctxt,
- struct llog_cookie *cookies, int flags);
- int (*lop_connect)(struct llog_ctxt *ctxt, struct llog_logid *logid,
- struct llog_gen *gen, struct obd_uuid *uuid);
- /**
- * Any llog file must be opened first using llog_open(). Llog can be
- * opened by name, logid or without both, in last case the new logid
- * will be generated.
- */
- int (*lop_open)(const struct lu_env *env, struct llog_handle *lgh,
- struct llog_logid *logid, char *name,
- enum llog_open_param);
- /**
- * Opened llog may not exist and this must be checked where needed using
- * the llog_exist() call.
- */
- int (*lop_exist)(struct llog_handle *lgh);
- /**
- * Close llog file and calls llog_free_handle() implicitly.
- * Any opened llog must be closed by llog_close() call.
- */
- int (*lop_close)(const struct lu_env *env, struct llog_handle *handle);
- /**
- * Create new llog file. The llog must be opened.
- * Must be used only for local llog operations.
- */
- int (*lop_declare_create)(const struct lu_env *env,
- struct llog_handle *handle,
- struct thandle *th);
- /**
- * write new record in llog. It appends records usually but can edit
- * existing records too.
- */
- int (*lop_declare_write_rec)(const struct lu_env *env,
- struct llog_handle *lgh,
- struct llog_rec_hdr *rec,
- int idx, struct thandle *th);
- int (*lop_write_rec)(const struct lu_env *env,
- struct llog_handle *loghandle,
- struct llog_rec_hdr *rec,
- struct llog_cookie *cookie, int cookiecount,
- void *buf, int idx, struct thandle *th);
- /**
- * Add new record in llog catalog. Does the same as llog_write_rec()
- * but using llog catalog.
- */
- int (*lop_declare_add)(const struct lu_env *env,
- struct llog_handle *lgh,
- struct llog_rec_hdr *rec, struct thandle *th);
- int (*lop_add)(const struct lu_env *env, struct llog_handle *lgh,
- struct llog_rec_hdr *rec, struct llog_cookie *cookie,
- void *buf, struct thandle *th);
-};
-
-/* In-memory descriptor for a log object or log catalog */
-struct llog_handle {
- struct rw_semaphore lgh_lock;
- spinlock_t lgh_hdr_lock; /* protect lgh_hdr data */
- struct llog_logid lgh_id; /* id of this log */
- struct llog_log_hdr *lgh_hdr;
- int lgh_last_idx;
- int lgh_cur_idx; /* used during llog_process */
- __u64 lgh_cur_offset; /* used during llog_process */
- struct llog_ctxt *lgh_ctxt;
- union {
- struct plain_handle_data phd;
- struct cat_handle_data chd;
- } u;
- char *lgh_name;
- void *private_data;
- struct llog_operations *lgh_logops;
- atomic_t lgh_refcount;
-};
-
-#define LLOG_CTXT_FLAG_UNINITIALIZED 0x00000001
-#define LLOG_CTXT_FLAG_STOP 0x00000002
-
-struct llog_ctxt {
- int loc_idx; /* my index the obd array of ctxt's */
- struct obd_device *loc_obd; /* points back to the containing obd*/
- struct obd_llog_group *loc_olg; /* group containing that ctxt */
- struct obd_export *loc_exp; /* parent "disk" export (e.g. MDS) */
- struct obd_import *loc_imp; /* to use in RPC's: can be backward
- pointing import */
- struct llog_operations *loc_logops;
- struct llog_handle *loc_handle;
- struct mutex loc_mutex; /* protect loc_imp */
- atomic_t loc_refcount;
- long loc_flags; /* flags, see above defines */
-};
-
-#define LLOG_PROC_BREAK 0x0001
-#define LLOG_DEL_RECORD 0x0002
-
-static inline int llog_obd2ops(struct llog_ctxt *ctxt,
- struct llog_operations **lop)
-{
- if (ctxt == NULL)
- return -ENOTCONN;
-
- *lop = ctxt->loc_logops;
- if (*lop == NULL)
- return -EOPNOTSUPP;
-
- return 0;
-}
-
-static inline int llog_handle2ops(struct llog_handle *loghandle,
- struct llog_operations **lop)
-{
- if (loghandle == NULL || loghandle->lgh_logops == NULL)
- return -EINVAL;
-
- *lop = loghandle->lgh_logops;
- return 0;
-}
-
-static inline int llog_data_len(int len)
-{
- return cfs_size_round(len);
-}
-
-static inline int llog_get_size(struct llog_handle *loghandle)
-{
- if (loghandle && loghandle->lgh_hdr)
- return loghandle->lgh_hdr->llh_count;
- return 0;
-}
-
-static inline struct llog_ctxt *llog_ctxt_get(struct llog_ctxt *ctxt)
-{
- atomic_inc(&ctxt->loc_refcount);
- CDEBUG(D_INFO, "GETting ctxt %p : new refcount %d\n", ctxt,
- atomic_read(&ctxt->loc_refcount));
- return ctxt;
-}
-
-static inline void llog_ctxt_put(struct llog_ctxt *ctxt)
-{
- if (ctxt == NULL)
- return;
- LASSERT_ATOMIC_GT_LT(&ctxt->loc_refcount, 0, LI_POISON);
- CDEBUG(D_INFO, "PUTting ctxt %p : new refcount %d\n", ctxt,
- atomic_read(&ctxt->loc_refcount) - 1);
- __llog_ctxt_put(NULL, ctxt);
-}
-
-static inline void llog_group_init(struct obd_llog_group *olg, int group)
-{
- init_waitqueue_head(&olg->olg_waitq);
- spin_lock_init(&olg->olg_lock);
- mutex_init(&olg->olg_cat_processing);
- olg->olg_seq = group;
-}
-
-static inline int llog_group_set_ctxt(struct obd_llog_group *olg,
- struct llog_ctxt *ctxt, int index)
-{
- LASSERT(index >= 0 && index < LLOG_MAX_CTXTS);
-
- spin_lock(&olg->olg_lock);
- if (olg->olg_ctxts[index] != NULL) {
- spin_unlock(&olg->olg_lock);
- return -EEXIST;
- }
- olg->olg_ctxts[index] = ctxt;
- spin_unlock(&olg->olg_lock);
- return 0;
-}
-
-static inline struct llog_ctxt *llog_group_get_ctxt(struct obd_llog_group *olg,
- int index)
-{
- struct llog_ctxt *ctxt;
-
- LASSERT(index >= 0 && index < LLOG_MAX_CTXTS);
-
- spin_lock(&olg->olg_lock);
- if (olg->olg_ctxts[index] == NULL)
- ctxt = NULL;
- else
- ctxt = llog_ctxt_get(olg->olg_ctxts[index]);
- spin_unlock(&olg->olg_lock);
- return ctxt;
-}
-
-static inline void llog_group_clear_ctxt(struct obd_llog_group *olg, int index)
-{
- LASSERT(index >= 0 && index < LLOG_MAX_CTXTS);
- spin_lock(&olg->olg_lock);
- olg->olg_ctxts[index] = NULL;
- spin_unlock(&olg->olg_lock);
-}
-
-static inline struct llog_ctxt *llog_get_context(struct obd_device *obd,
- int index)
-{
- return llog_group_get_ctxt(&obd->obd_olg, index);
-}
-
-static inline int llog_group_ctxt_null(struct obd_llog_group *olg, int index)
-{
- return (olg->olg_ctxts[index] == NULL);
-}
-
-static inline int llog_ctxt_null(struct obd_device *obd, int index)
-{
- return llog_group_ctxt_null(&obd->obd_olg, index);
-}
-
-static inline int llog_next_block(const struct lu_env *env,
- struct llog_handle *loghandle, int *cur_idx,
- int next_idx, __u64 *cur_offset, void *buf,
- int len)
-{
- struct llog_operations *lop;
- int rc;
-
- rc = llog_handle2ops(loghandle, &lop);
- if (rc)
- return rc;
- if (lop->lop_next_block == NULL)
- return -EOPNOTSUPP;
-
- rc = lop->lop_next_block(env, loghandle, cur_idx, next_idx,
- cur_offset, buf, len);
- return rc;
-}
-
-static inline int llog_prev_block(const struct lu_env *env,
- struct llog_handle *loghandle,
- int prev_idx, void *buf, int len)
-{
- struct llog_operations *lop;
- int rc;
-
- rc = llog_handle2ops(loghandle, &lop);
- if (rc)
- return rc;
- if (lop->lop_prev_block == NULL)
- return -EOPNOTSUPP;
-
- rc = lop->lop_prev_block(env, loghandle, prev_idx, buf, len);
- return rc;
-}
-
-static inline int llog_connect(struct llog_ctxt *ctxt,
- struct llog_logid *logid, struct llog_gen *gen,
- struct obd_uuid *uuid)
-{
- struct llog_operations *lop;
- int rc;
-
- rc = llog_obd2ops(ctxt, &lop);
- if (rc)
- return rc;
- if (lop->lop_connect == NULL)
- return -EOPNOTSUPP;
-
- rc = lop->lop_connect(ctxt, logid, gen, uuid);
- return rc;
-}
-
-/* llog.c */
-int llog_declare_write_rec(const struct lu_env *env,
- struct llog_handle *handle,
- struct llog_rec_hdr *rec, int idx,
- struct thandle *th);
-int llog_write_rec(const struct lu_env *env, struct llog_handle *handle,
- struct llog_rec_hdr *rec, struct llog_cookie *logcookies,
- int numcookies, void *buf, int idx, struct thandle *th);
-int lustre_process_log(struct super_block *sb, char *logname,
- struct config_llog_instance *cfg);
-int lustre_end_log(struct super_block *sb, char *logname,
- struct config_llog_instance *cfg);
-/** @} log */
-
-#endif
diff --git a/drivers/staging/lustre/lustre/include/lustre_mdc.h b/drivers/staging/lustre/lustre/include/lustre_mdc.h
deleted file mode 100644
index b1b05c8a371a..000000000000
--- a/drivers/staging/lustre/lustre/include/lustre_mdc.h
+++ /dev/null
@@ -1,191 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- */
-/*
- * Copyright (c) 2011, 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * lustre/include/lustre_mdc.h
- *
- * MDS data structures.
- * See also lustre_idl.h for wire formats of requests.
- */
-
-#ifndef _LUSTRE_MDC_H
-#define _LUSTRE_MDC_H
-
-/** \defgroup mdc mdc
- *
- * @{
- */
-
-#include <linux/fs.h>
-#include <linux/dcache.h>
-#include "lustre_intent.h"
-#include "lustre_handles.h"
-#include "../../include/linux/libcfs/libcfs.h"
-#include "obd_class.h"
-#include "lustre/lustre_idl.h"
-#include "lustre_lib.h"
-#include "lustre_dlm.h"
-#include "lustre_export.h"
-
-struct ptlrpc_client;
-struct obd_export;
-struct ptlrpc_request;
-struct obd_device;
-
-struct mdc_rpc_lock {
- struct mutex rpcl_mutex;
- struct lookup_intent *rpcl_it;
- int rpcl_fakes;
-};
-
-#define MDC_FAKE_RPCL_IT ((void *)0x2c0012bfUL)
-
-static inline void mdc_init_rpc_lock(struct mdc_rpc_lock *lck)
-{
- mutex_init(&lck->rpcl_mutex);
- lck->rpcl_it = NULL;
-}
-
-static inline void mdc_get_rpc_lock(struct mdc_rpc_lock *lck,
- struct lookup_intent *it)
-{
- if (it != NULL && (it->it_op == IT_GETATTR || it->it_op == IT_LOOKUP ||
- it->it_op == IT_LAYOUT))
- return;
-
- /* This would normally block until the existing request finishes.
- * If fail_loc is set it will block until the regular request is
- * done, then set rpcl_it to MDC_FAKE_RPCL_IT. Once that is set
- * it will only be cleared when all fake requests are finished.
- * Only when all fake requests are finished can normal requests
- * be sent, to ensure they are recoverable again. */
- again:
- mutex_lock(&lck->rpcl_mutex);
-
- if (CFS_FAIL_CHECK_QUIET(OBD_FAIL_MDC_RPCS_SEM)) {
- lck->rpcl_it = MDC_FAKE_RPCL_IT;
- lck->rpcl_fakes++;
- mutex_unlock(&lck->rpcl_mutex);
- return;
- }
-
- /* This will only happen when the CFS_FAIL_CHECK() was
- * just turned off but there are still requests in progress.
- * Wait until they finish. It doesn't need to be efficient
- * in this extremely rare case, just have low overhead in
- * the common case when it isn't true. */
- while (unlikely(lck->rpcl_it == MDC_FAKE_RPCL_IT)) {
- mutex_unlock(&lck->rpcl_mutex);
- schedule_timeout(cfs_time_seconds(1) / 4);
- goto again;
- }
-
- LASSERT(lck->rpcl_it == NULL);
- lck->rpcl_it = it;
-}
-
-static inline void mdc_put_rpc_lock(struct mdc_rpc_lock *lck,
- struct lookup_intent *it)
-{
- if (it != NULL && (it->it_op == IT_GETATTR || it->it_op == IT_LOOKUP ||
- it->it_op == IT_LAYOUT))
- return;
-
- if (lck->rpcl_it == MDC_FAKE_RPCL_IT) { /* OBD_FAIL_MDC_RPCS_SEM */
- mutex_lock(&lck->rpcl_mutex);
-
- LASSERTF(lck->rpcl_fakes > 0, "%d\n", lck->rpcl_fakes);
- lck->rpcl_fakes--;
-
- if (lck->rpcl_fakes == 0)
- lck->rpcl_it = NULL;
-
- } else {
- LASSERTF(it == lck->rpcl_it, "%p != %p\n", it, lck->rpcl_it);
- lck->rpcl_it = NULL;
- }
-
- mutex_unlock(&lck->rpcl_mutex);
-}
-
-/* Update the maximum observed easize and cookiesize. The default easize
- * and cookiesize is initialized to the minimum value but allowed to grow
- * up to a single page in size if required to handle the common case.
- */
-static inline void mdc_update_max_ea_from_body(struct obd_export *exp,
- struct mdt_body *body)
-{
- if (body->valid & OBD_MD_FLMODEASIZE) {
- struct client_obd *cli = &exp->exp_obd->u.cli;
-
- if (cli->cl_max_mds_easize < body->max_mdsize) {
- cli->cl_max_mds_easize = body->max_mdsize;
- cli->cl_default_mds_easize =
- min_t(__u32, body->max_mdsize, PAGE_CACHE_SIZE);
- }
- if (cli->cl_max_mds_cookiesize < body->max_cookiesize) {
- cli->cl_max_mds_cookiesize = body->max_cookiesize;
- cli->cl_default_mds_cookiesize =
- min_t(__u32, body->max_cookiesize, PAGE_CACHE_SIZE);
- }
- }
-}
-
-
-struct mdc_cache_waiter {
- struct list_head mcw_entry;
- wait_queue_head_t mcw_waitq;
-};
-
-/* mdc/mdc_locks.c */
-int it_disposition(struct lookup_intent *it, int flag);
-void it_clear_disposition(struct lookup_intent *it, int flag);
-void it_set_disposition(struct lookup_intent *it, int flag);
-int it_open_error(int phase, struct lookup_intent *it);
-
-static inline bool cl_is_lov_delay_create(unsigned int flags)
-{
- return (flags & O_LOV_DELAY_CREATE) == O_LOV_DELAY_CREATE;
-}
-
-static inline void cl_lov_delay_create_clear(unsigned int *flags)
-{
- if ((*flags & O_LOV_DELAY_CREATE) == O_LOV_DELAY_CREATE)
- *flags &= ~O_LOV_DELAY_CREATE;
-}
-
-/** @} mdc */
-
-#endif
diff --git a/drivers/staging/lustre/lustre/include/lustre_mds.h b/drivers/staging/lustre/lustre/include/lustre_mds.h
deleted file mode 100644
index a16eb8b61178..000000000000
--- a/drivers/staging/lustre/lustre/include/lustre_mds.h
+++ /dev/null
@@ -1,76 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * lustre/include/lustre_mds.h
- *
- * MDS data structures.
- * See also lustre_idl.h for wire formats of requests.
- */
-
-#ifndef _LUSTRE_MDS_H
-#define _LUSTRE_MDS_H
-
-/** \defgroup mds mds
- *
- * @{
- */
-
-#include "lustre_handles.h"
-#include "../../include/linux/libcfs/libcfs.h"
-#include "lustre/lustre_idl.h"
-#include "lustre_lib.h"
-#include "lustre_dlm.h"
-#include "lustre_export.h"
-
-struct mds_group_info {
- struct obd_uuid *uuid;
- int group;
-};
-
-#define MDD_OBD_NAME "mdd_obd"
-#define MDD_OBD_UUID "mdd_obd_uuid"
-
-static inline int md_should_create(__u64 flags)
-{
- return !(flags & MDS_OPEN_DELAY_CREATE ||
- !(flags & FMODE_WRITE));
-}
-
-/* these are local flags, used only on the client, private */
-#define M_CHECK_STALE 0200000000
-
-/** @} mds */
-
-#endif
diff --git a/drivers/staging/lustre/lustre/include/lustre_net.h b/drivers/staging/lustre/lustre/include/lustre_net.h
deleted file mode 100644
index af3bf2164ef9..000000000000
--- a/drivers/staging/lustre/lustre/include/lustre_net.h
+++ /dev/null
@@ -1,2893 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2010, 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- */
-/** \defgroup PtlRPC Portal RPC and networking module.
- *
- * PortalRPC is the layer used by rest of lustre code to achieve network
- * communications: establish connections with corresponding export and import
- * states, listen for a service, send and receive RPCs.
- * PortalRPC also includes base recovery framework: packet resending and
- * replaying, reconnections, pinger.
- *
- * PortalRPC utilizes LNet as its transport layer.
- *
- * @{
- */
-
-
-#ifndef _LUSTRE_NET_H
-#define _LUSTRE_NET_H
-
-/** \defgroup net net
- *
- * @{
- */
-
-#include "../../include/linux/libcfs/libcfs.h"
-#include "../../include/linux/lnet/nidstr.h"
-#include "../../include/linux/lnet/api.h"
-#include "lustre/lustre_idl.h"
-#include "lustre_ha.h"
-#include "lustre_sec.h"
-#include "lustre_import.h"
-#include "lprocfs_status.h"
-#include "lu_object.h"
-#include "lustre_req_layout.h"
-
-#include "obd_support.h"
-#include "lustre_ver.h"
-
-/* MD flags we _always_ use */
-#define PTLRPC_MD_OPTIONS 0
-
-/**
- * Max # of bulk operations in one request.
- * In order for the client and server to properly negotiate the maximum
- * possible transfer size, PTLRPC_BULK_OPS_COUNT must be a power-of-two
- * value. The client is free to limit the actual RPC size for any bulk
- * transfer via cl_max_pages_per_rpc to some non-power-of-two value. */
-#define PTLRPC_BULK_OPS_BITS 2
-#define PTLRPC_BULK_OPS_COUNT (1U << PTLRPC_BULK_OPS_BITS)
-/**
- * PTLRPC_BULK_OPS_MASK is for the convenience of the client only, and
- * should not be used on the server at all. Otherwise, it imposes a
- * protocol limitation on the maximum RPC size that can be used by any
- * RPC sent to that server in the future. Instead, the server should
- * use the negotiated per-client ocd_brw_size to determine the bulk
- * RPC count. */
-#define PTLRPC_BULK_OPS_MASK (~((__u64)PTLRPC_BULK_OPS_COUNT - 1))
-
-/**
- * Define maxima for bulk I/O.
- *
- * A single PTLRPC BRW request is sent via up to PTLRPC_BULK_OPS_COUNT
- * of LNET_MTU sized RDMA transfers. Clients and servers negotiate the
- * currently supported maximum between peers at connect via ocd_brw_size.
- */
-#define PTLRPC_MAX_BRW_BITS (LNET_MTU_BITS + PTLRPC_BULK_OPS_BITS)
-#define PTLRPC_MAX_BRW_SIZE (1 << PTLRPC_MAX_BRW_BITS)
-#define PTLRPC_MAX_BRW_PAGES (PTLRPC_MAX_BRW_SIZE >> PAGE_CACHE_SHIFT)
-
-#define ONE_MB_BRW_SIZE (1 << LNET_MTU_BITS)
-#define MD_MAX_BRW_SIZE (1 << LNET_MTU_BITS)
-#define MD_MAX_BRW_PAGES (MD_MAX_BRW_SIZE >> PAGE_CACHE_SHIFT)
-#define DT_MAX_BRW_SIZE PTLRPC_MAX_BRW_SIZE
-#define DT_MAX_BRW_PAGES (DT_MAX_BRW_SIZE >> PAGE_CACHE_SHIFT)
-#define OFD_MAX_BRW_SIZE (1 << LNET_MTU_BITS)
-
-/* When PAGE_SIZE is a constant, we can check our arithmetic here with cpp! */
-# if ((PTLRPC_MAX_BRW_PAGES & (PTLRPC_MAX_BRW_PAGES - 1)) != 0)
-# error "PTLRPC_MAX_BRW_PAGES isn't a power of two"
-# endif
-# if (PTLRPC_MAX_BRW_SIZE != (PTLRPC_MAX_BRW_PAGES * PAGE_CACHE_SIZE))
-# error "PTLRPC_MAX_BRW_SIZE isn't PTLRPC_MAX_BRW_PAGES * PAGE_CACHE_SIZE"
-# endif
-# if (PTLRPC_MAX_BRW_SIZE > LNET_MTU * PTLRPC_BULK_OPS_COUNT)
-# error "PTLRPC_MAX_BRW_SIZE too big"
-# endif
-# if (PTLRPC_MAX_BRW_PAGES > LNET_MAX_IOV * PTLRPC_BULK_OPS_COUNT)
-# error "PTLRPC_MAX_BRW_PAGES too big"
-# endif
-
-#define PTLRPC_NTHRS_INIT 2
-
-/**
- * Buffer Constants
- *
- * Constants determine how memory is used to buffer incoming service requests.
- *
- * ?_NBUFS # buffers to allocate when growing the pool
- * ?_BUFSIZE # bytes in a single request buffer
- * ?_MAXREQSIZE # maximum request service will receive
- *
- * When fewer than ?_NBUFS/2 buffers are posted for receive, another chunk
- * of ?_NBUFS is added to the pool.
- *
- * Messages larger than ?_MAXREQSIZE are dropped. Request buffers are
- * considered full when less than ?_MAXREQSIZE is left in them.
- */
-/**
- * Thread Constants
- *
- * Constants determine how threads are created for ptlrpc service.
- *
- * ?_NTHRS_INIT # threads to create for each service partition on
- * initializing. If it's non-affinity service and
- * there is only one partition, it's the overall #
- * threads for the service while initializing.
- * ?_NTHRS_BASE # threads should be created at least for each
- * ptlrpc partition to keep the service healthy.
- * It's the low-water mark of threads upper-limit
- * for each partition.
- * ?_THR_FACTOR # threads can be added on threads upper-limit for
- * each CPU core. This factor is only for reference,
- * we might decrease value of factor if number of cores
- * per CPT is above a limit.
- * ?_NTHRS_MAX # overall threads can be created for a service,
- * it's a soft limit because if service is running
- * on machine with hundreds of cores and tens of
- * CPU partitions, we need to guarantee each partition
- * has ?_NTHRS_BASE threads, which means total threads
- * will be ?_NTHRS_BASE * number_of_cpts which can
- * exceed ?_NTHRS_MAX.
- *
- * Examples
- *
- * #define MDS_NTHRS_INIT 2
- * #define MDS_NTHRS_BASE 64
- * #define MDS_NTHRS_FACTOR 8
- * #define MDS_NTHRS_MAX 1024
- *
- * Example 1):
- * ---------------------------------------------------------------------
- * Server(A) has 16 cores, user configured it to 4 partitions so each
- * partition has 4 cores, then actual number of service threads on each
- * partition is:
- * MDS_NTHRS_BASE(64) + cores(4) * MDS_NTHRS_FACTOR(8) = 96
- *
- * Total number of threads for the service is:
- * 96 * partitions(4) = 384
- *
- * Example 2):
- * ---------------------------------------------------------------------
- * Server(B) has 32 cores, user configured it to 4 partitions so each
- * partition has 8 cores, then actual number of service threads on each
- * partition is:
- * MDS_NTHRS_BASE(64) + cores(8) * MDS_NTHRS_FACTOR(8) = 128
- *
- * Total number of threads for the service is:
- * 128 * partitions(4) = 512
- *
- * Example 3):
- * ---------------------------------------------------------------------
- * Server(B) has 96 cores, user configured it to 8 partitions so each
- * partition has 12 cores, then actual number of service threads on each
- * partition is:
- * MDS_NTHRS_BASE(64) + cores(12) * MDS_NTHRS_FACTOR(8) = 160
- *
- * Total number of threads for the service is:
- * 160 * partitions(8) = 1280
- *
- * However, it's above the soft limit MDS_NTHRS_MAX, so we choose this number
- * as upper limit of threads number for each partition:
- * MDS_NTHRS_MAX(1024) / partitions(8) = 128
- *
- * Example 4):
- * ---------------------------------------------------------------------
- * Server(C) have a thousand of cores and user configured it to 32 partitions
- * MDS_NTHRS_BASE(64) * 32 = 2048
- *
- * which is already above soft limit MDS_NTHRS_MAX(1024), but we still need
- * to guarantee that each partition has at least MDS_NTHRS_BASE(64) threads
- * to keep service healthy, so total number of threads will just be 2048.
- *
- * NB: we don't suggest to choose server with that many cores because backend
- * filesystem itself, buffer cache, or underlying network stack might
- * have some SMP scalability issues at that large scale.
- *
- * If user already has a fat machine with hundreds or thousands of cores,
- * there are two choices for configuration:
- * a) create CPU table from subset of all CPUs and run Lustre on
- * top of this subset
- * b) bind service threads on a few partitions, see modparameters of
- * MDS and OSS for details
-*
- * NB: these calculations (and examples below) are simplified to help
- * understanding, the real implementation is a little more complex,
- * please see ptlrpc_server_nthreads_check() for details.
- *
- */
-
- /*
- * LDLM threads constants:
- *
- * Given 8 as factor and 24 as base threads number
- *
- * example 1)
- * On 4-core machine we will have 24 + 8 * 4 = 56 threads.
- *
- * example 2)
- * On 8-core machine with 2 partitions we will have 24 + 4 * 8 = 56
- * threads for each partition and total threads number will be 112.
- *
- * example 3)
- * On 64-core machine with 8 partitions we will need LDLM_NTHRS_BASE(24)
- * threads for each partition to keep service healthy, so total threads
- * number should be 24 * 8 = 192.
- *
- * So with these constants, threads number will be at the similar level
- * of old versions, unless target machine has over a hundred cores
- */
-#define LDLM_THR_FACTOR 8
-#define LDLM_NTHRS_INIT PTLRPC_NTHRS_INIT
-#define LDLM_NTHRS_BASE 24
-#define LDLM_NTHRS_MAX (num_online_cpus() == 1 ? 64 : 128)
-
-#define LDLM_BL_THREADS LDLM_NTHRS_AUTO_INIT
-#define LDLM_CLIENT_NBUFS 1
-#define LDLM_SERVER_NBUFS 64
-#define LDLM_BUFSIZE (8 * 1024)
-#define LDLM_MAXREQSIZE (5 * 1024)
-#define LDLM_MAXREPSIZE (1024)
-
-#define MDS_MAXREQSIZE (5 * 1024) /* >= 4736 */
-
-#define OST_MAXREQSIZE (5 * 1024)
-
-/* Macro to hide a typecast. */
-#define ptlrpc_req_async_args(req) ((void *)&req->rq_async_args)
-
-/**
- * Structure to single define portal connection.
- */
-struct ptlrpc_connection {
- /** linkage for connections hash table */
- struct hlist_node c_hash;
- /** Our own lnet nid for this connection */
- lnet_nid_t c_self;
- /** Remote side nid for this connection */
- lnet_process_id_t c_peer;
- /** UUID of the other side */
- struct obd_uuid c_remote_uuid;
- /** reference counter for this connection */
- atomic_t c_refcount;
-};
-
-/** Client definition for PortalRPC */
-struct ptlrpc_client {
- /** What lnet portal does this client send messages to by default */
- __u32 cli_request_portal;
- /** What portal do we expect replies on */
- __u32 cli_reply_portal;
- /** Name of the client */
- char *cli_name;
-};
-
-/** state flags of requests */
-/* XXX only ones left are those used by the bulk descs as well! */
-#define PTL_RPC_FL_INTR (1 << 0) /* reply wait was interrupted by user */
-#define PTL_RPC_FL_TIMEOUT (1 << 7) /* request timed out waiting for reply */
-
-#define REQ_MAX_ACK_LOCKS 8
-
-union ptlrpc_async_args {
- /**
- * Scratchpad for passing args to completion interpreter. Users
- * cast to the struct of their choosing, and CLASSERT that this is
- * big enough. For _tons_ of context, kmalloc a struct and store
- * a pointer to it here. The pointer_arg ensures this struct is at
- * least big enough for that.
- */
- void *pointer_arg[11];
- __u64 space[7];
-};
-
-struct ptlrpc_request_set;
-typedef int (*set_interpreter_func)(struct ptlrpc_request_set *, void *, int);
-typedef int (*set_producer_func)(struct ptlrpc_request_set *, void *);
-
-/**
- * Definition of request set structure.
- * Request set is a list of requests (not necessary to the same target) that
- * once populated with RPCs could be sent in parallel.
- * There are two kinds of request sets. General purpose and with dedicated
- * serving thread. Example of the latter is ptlrpcd set.
- * For general purpose sets once request set started sending it is impossible
- * to add new requests to such set.
- * Provides a way to call "completion callbacks" when all requests in the set
- * returned.
- */
-struct ptlrpc_request_set {
- atomic_t set_refcount;
- /** number of in queue requests */
- atomic_t set_new_count;
- /** number of uncompleted requests */
- atomic_t set_remaining;
- /** wait queue to wait on for request events */
- wait_queue_head_t set_waitq;
- wait_queue_head_t *set_wakeup_ptr;
- /** List of requests in the set */
- struct list_head set_requests;
- /**
- * List of completion callbacks to be called when the set is completed
- * This is only used if \a set_interpret is NULL.
- * Links struct ptlrpc_set_cbdata.
- */
- struct list_head set_cblist;
- /** Completion callback, if only one. */
- set_interpreter_func set_interpret;
- /** opaq argument passed to completion \a set_interpret callback. */
- void *set_arg;
- /**
- * Lock for \a set_new_requests manipulations
- * locked so that any old caller can communicate requests to
- * the set holder who can then fold them into the lock-free set
- */
- spinlock_t set_new_req_lock;
- /** List of new yet unsent requests. Only used with ptlrpcd now. */
- struct list_head set_new_requests;
-
- /** rq_status of requests that have been freed already */
- int set_rc;
- /** Additional fields used by the flow control extension */
- /** Maximum number of RPCs in flight */
- int set_max_inflight;
- /** Callback function used to generate RPCs */
- set_producer_func set_producer;
- /** opaq argument passed to the producer callback */
- void *set_producer_arg;
-};
-
-/**
- * Description of a single ptrlrpc_set callback
- */
-struct ptlrpc_set_cbdata {
- /** List linkage item */
- struct list_head psc_item;
- /** Pointer to interpreting function */
- set_interpreter_func psc_interpret;
- /** Opaq argument to pass to the callback */
- void *psc_data;
-};
-
-struct ptlrpc_bulk_desc;
-struct ptlrpc_service_part;
-struct ptlrpc_service;
-
-/**
- * ptlrpc callback & work item stuff
- */
-struct ptlrpc_cb_id {
- void (*cbid_fn)(lnet_event_t *ev); /* specific callback fn */
- void *cbid_arg; /* additional arg */
-};
-
-/** Maximum number of locks to fit into reply state */
-#define RS_MAX_LOCKS 8
-#define RS_DEBUG 0
-
-/**
- * Structure to define reply state on the server
- * Reply state holds various reply message information. Also for "difficult"
- * replies (rep-ack case) we store the state after sending reply and wait
- * for the client to acknowledge the reception. In these cases locks could be
- * added to the state for replay/failover consistency guarantees.
- */
-struct ptlrpc_reply_state {
- /** Callback description */
- struct ptlrpc_cb_id rs_cb_id;
- /** Linkage for list of all reply states in a system */
- struct list_head rs_list;
- /** Linkage for list of all reply states on same export */
- struct list_head rs_exp_list;
- /** Linkage for list of all reply states for same obd */
- struct list_head rs_obd_list;
-#if RS_DEBUG
- struct list_head rs_debug_list;
-#endif
- /** A spinlock to protect the reply state flags */
- spinlock_t rs_lock;
- /** Reply state flags */
- unsigned long rs_difficult:1; /* ACK/commit stuff */
- unsigned long rs_no_ack:1; /* no ACK, even for
- difficult requests */
- unsigned long rs_scheduled:1; /* being handled? */
- unsigned long rs_scheduled_ever:1;/* any schedule attempts? */
- unsigned long rs_handled:1; /* been handled yet? */
- unsigned long rs_on_net:1; /* reply_out_callback pending? */
- unsigned long rs_prealloc:1; /* rs from prealloc list */
- unsigned long rs_committed:1;/* the transaction was committed
- * and the rs was dispatched */
- /** Size of the state */
- int rs_size;
- /** opcode */
- __u32 rs_opc;
- /** Transaction number */
- __u64 rs_transno;
- /** xid */
- __u64 rs_xid;
- struct obd_export *rs_export;
- struct ptlrpc_service_part *rs_svcpt;
- /** Lnet metadata handle for the reply */
- lnet_handle_md_t rs_md_h;
- atomic_t rs_refcount;
-
- /** Context for the service thread */
- struct ptlrpc_svc_ctx *rs_svc_ctx;
- /** Reply buffer (actually sent to the client), encoded if needed */
- struct lustre_msg *rs_repbuf; /* wrapper */
- /** Size of the reply buffer */
- int rs_repbuf_len; /* wrapper buf length */
- /** Size of the reply message */
- int rs_repdata_len; /* wrapper msg length */
- /**
- * Actual reply message. Its content is encrypted (if needed) to
- * produce reply buffer for actual sending. In simple case
- * of no network encryption we just set \a rs_repbuf to \a rs_msg
- */
- struct lustre_msg *rs_msg; /* reply message */
-
- /** Number of locks awaiting client ACK */
- int rs_nlocks;
- /** Handles of locks awaiting client reply ACK */
- struct lustre_handle rs_locks[RS_MAX_LOCKS];
- /** Lock modes of locks in \a rs_locks */
- ldlm_mode_t rs_modes[RS_MAX_LOCKS];
-};
-
-struct ptlrpc_thread;
-
-/** RPC stages */
-enum rq_phase {
- RQ_PHASE_NEW = 0xebc0de00,
- RQ_PHASE_RPC = 0xebc0de01,
- RQ_PHASE_BULK = 0xebc0de02,
- RQ_PHASE_INTERPRET = 0xebc0de03,
- RQ_PHASE_COMPLETE = 0xebc0de04,
- RQ_PHASE_UNREGISTERING = 0xebc0de05,
- RQ_PHASE_UNDEFINED = 0xebc0de06
-};
-
-/** Type of request interpreter call-back */
-typedef int (*ptlrpc_interpterer_t)(const struct lu_env *env,
- struct ptlrpc_request *req,
- void *arg, int rc);
-
-/**
- * Definition of request pool structure.
- * The pool is used to store empty preallocated requests for the case
- * when we would actually need to send something without performing
- * any allocations (to avoid e.g. OOM).
- */
-struct ptlrpc_request_pool {
- /** Locks the list */
- spinlock_t prp_lock;
- /** list of ptlrpc_request structs */
- struct list_head prp_req_list;
- /** Maximum message size that would fit into a request from this pool */
- int prp_rq_size;
- /** Function to allocate more requests for this pool */
- int (*prp_populate)(struct ptlrpc_request_pool *, int);
-};
-
-struct lu_context;
-struct lu_env;
-
-struct ldlm_lock;
-
-/**
- * \defgroup nrs Network Request Scheduler
- * @{
- */
-struct ptlrpc_nrs_policy;
-struct ptlrpc_nrs_resource;
-struct ptlrpc_nrs_request;
-
-/**
- * NRS control operations.
- *
- * These are common for all policies.
- */
-enum ptlrpc_nrs_ctl {
- /**
- * Not a valid opcode.
- */
- PTLRPC_NRS_CTL_INVALID,
- /**
- * Activate the policy.
- */
- PTLRPC_NRS_CTL_START,
- /**
- * Reserved for multiple primary policies, which may be a possibility
- * in the future.
- */
- PTLRPC_NRS_CTL_STOP,
- /**
- * Policies can start using opcodes from this value and onwards for
- * their own purposes; the assigned value itself is arbitrary.
- */
- PTLRPC_NRS_CTL_1ST_POL_SPEC = 0x20,
-};
-
-/**
- * ORR policy operations
- */
-enum nrs_ctl_orr {
- NRS_CTL_ORR_RD_QUANTUM = PTLRPC_NRS_CTL_1ST_POL_SPEC,
- NRS_CTL_ORR_WR_QUANTUM,
- NRS_CTL_ORR_RD_OFF_TYPE,
- NRS_CTL_ORR_WR_OFF_TYPE,
- NRS_CTL_ORR_RD_SUPP_REQ,
- NRS_CTL_ORR_WR_SUPP_REQ,
-};
-
-/**
- * NRS policy operations.
- *
- * These determine the behaviour of a policy, and are called in response to
- * NRS core events.
- */
-struct ptlrpc_nrs_pol_ops {
- /**
- * Called during policy registration; this operation is optional.
- *
- * \param[in,out] policy The policy being initialized
- */
- int (*op_policy_init) (struct ptlrpc_nrs_policy *policy);
- /**
- * Called during policy unregistration; this operation is optional.
- *
- * \param[in,out] policy The policy being unregistered/finalized
- */
- void (*op_policy_fini) (struct ptlrpc_nrs_policy *policy);
- /**
- * Called when activating a policy via lprocfs; policies allocate and
- * initialize their resources here; this operation is optional.
- *
- * \param[in,out] policy The policy being started
- *
- * \see nrs_policy_start_locked()
- */
- int (*op_policy_start) (struct ptlrpc_nrs_policy *policy);
- /**
- * Called when deactivating a policy via lprocfs; policies deallocate
- * their resources here; this operation is optional
- *
- * \param[in,out] policy The policy being stopped
- *
- * \see nrs_policy_stop0()
- */
- void (*op_policy_stop) (struct ptlrpc_nrs_policy *policy);
- /**
- * Used for policy-specific operations; i.e. not generic ones like
- * \e PTLRPC_NRS_CTL_START and \e PTLRPC_NRS_CTL_GET_INFO; analogous
- * to an ioctl; this operation is optional.
- *
- * \param[in,out] policy The policy carrying out operation \a opc
- * \param[in] opc The command operation being carried out
- * \param[in,out] arg An generic buffer for communication between the
- * user and the control operation
- *
- * \retval -ve error
- * \retval 0 success
- *
- * \see ptlrpc_nrs_policy_control()
- */
- int (*op_policy_ctl) (struct ptlrpc_nrs_policy *policy,
- enum ptlrpc_nrs_ctl opc, void *arg);
-
- /**
- * Called when obtaining references to the resources of the resource
- * hierarchy for a request that has arrived for handling at the PTLRPC
- * service. Policies should return -ve for requests they do not wish
- * to handle. This operation is mandatory.
- *
- * \param[in,out] policy The policy we're getting resources for.
- * \param[in,out] nrq The request we are getting resources for.
- * \param[in] parent The parent resource of the resource being
- * requested; set to NULL if none.
- * \param[out] resp The resource is to be returned here; the
- * fallback policy in an NRS head should
- * \e always return a non-NULL pointer value.
- * \param[in] moving_req When set, signifies that this is an attempt
- * to obtain resources for a request being moved
- * to the high-priority NRS head by
- * ldlm_lock_reorder_req().
- * This implies two things:
- * 1. We are under obd_export::exp_rpc_lock and
- * so should not sleep.
- * 2. We should not perform non-idempotent or can
- * skip performing idempotent operations that
- * were carried out when resources were first
- * taken for the request when it was initialized
- * in ptlrpc_nrs_req_initialize().
- *
- * \retval 0, +ve The level of the returned resource in the resource
- * hierarchy; currently only 0 (for a non-leaf resource)
- * and 1 (for a leaf resource) are supported by the
- * framework.
- * \retval -ve error
- *
- * \see ptlrpc_nrs_req_initialize()
- * \see ptlrpc_nrs_hpreq_add_nolock()
- */
- int (*op_res_get) (struct ptlrpc_nrs_policy *policy,
- struct ptlrpc_nrs_request *nrq,
- const struct ptlrpc_nrs_resource *parent,
- struct ptlrpc_nrs_resource **resp,
- bool moving_req);
- /**
- * Called when releasing references taken for resources in the resource
- * hierarchy for the request; this operation is optional.
- *
- * \param[in,out] policy The policy the resource belongs to
- * \param[in] res The resource to be freed
- *
- * \see ptlrpc_nrs_req_finalize()
- * \see ptlrpc_nrs_hpreq_add_nolock()
- */
- void (*op_res_put) (struct ptlrpc_nrs_policy *policy,
- const struct ptlrpc_nrs_resource *res);
-
- /**
- * Obtains a request for handling from the policy, and optionally
- * removes the request from the policy; this operation is mandatory.
- *
- * \param[in,out] policy The policy to poll
- * \param[in] peek When set, signifies that we just want to
- * examine the request, and not handle it, so the
- * request is not removed from the policy.
- * \param[in] force When set, it will force a policy to return a
- * request if it has one queued.
- *
- * \retval NULL No request available for handling
- * \retval valid-pointer The request polled for handling
- *
- * \see ptlrpc_nrs_req_get_nolock()
- */
- struct ptlrpc_nrs_request *
- (*op_req_get) (struct ptlrpc_nrs_policy *policy, bool peek,
- bool force);
- /**
- * Called when attempting to add a request to a policy for later
- * handling; this operation is mandatory.
- *
- * \param[in,out] policy The policy on which to enqueue \a nrq
- * \param[in,out] nrq The request to enqueue
- *
- * \retval 0 success
- * \retval != 0 error
- *
- * \see ptlrpc_nrs_req_add_nolock()
- */
- int (*op_req_enqueue) (struct ptlrpc_nrs_policy *policy,
- struct ptlrpc_nrs_request *nrq);
- /**
- * Removes a request from the policy's set of pending requests. Normally
- * called after a request has been polled successfully from the policy
- * for handling; this operation is mandatory.
- *
- * \param[in,out] policy The policy the request \a nrq belongs to
- * \param[in,out] nrq The request to dequeue
- */
- void (*op_req_dequeue) (struct ptlrpc_nrs_policy *policy,
- struct ptlrpc_nrs_request *nrq);
- /**
- * Called after the request being carried out. Could be used for
- * job/resource control; this operation is optional.
- *
- * \param[in,out] policy The policy which is stopping to handle request
- * \a nrq
- * \param[in,out] nrq The request
- *
- * \pre assert_spin_locked(&svcpt->scp_req_lock)
- *
- * \see ptlrpc_nrs_req_stop_nolock()
- */
- void (*op_req_stop) (struct ptlrpc_nrs_policy *policy,
- struct ptlrpc_nrs_request *nrq);
- /**
- * Registers the policy's lprocfs interface with a PTLRPC service.
- *
- * \param[in] svc The service
- *
- * \retval 0 success
- * \retval != 0 error
- */
- int (*op_lprocfs_init) (struct ptlrpc_service *svc);
- /**
- * Unegisters the policy's lprocfs interface with a PTLRPC service.
- *
- * In cases of failed policy registration in
- * \e ptlrpc_nrs_policy_register(), this function may be called for a
- * service which has not registered the policy successfully, so
- * implementations of this method should make sure their operations are
- * safe in such cases.
- *
- * \param[in] svc The service
- */
- void (*op_lprocfs_fini) (struct ptlrpc_service *svc);
-};
-
-/**
- * Policy flags
- */
-enum nrs_policy_flags {
- /**
- * Fallback policy, use this flag only on a single supported policy per
- * service. The flag cannot be used on policies that use
- * \e PTLRPC_NRS_FL_REG_EXTERN
- */
- PTLRPC_NRS_FL_FALLBACK = (1 << 0),
- /**
- * Start policy immediately after registering.
- */
- PTLRPC_NRS_FL_REG_START = (1 << 1),
- /**
- * This is a policy registering from a module different to the one NRS
- * core ships in (currently ptlrpc).
- */
- PTLRPC_NRS_FL_REG_EXTERN = (1 << 2),
-};
-
-/**
- * NRS queue type.
- *
- * Denotes whether an NRS instance is for handling normal or high-priority
- * RPCs, or whether an operation pertains to one or both of the NRS instances
- * in a service.
- */
-enum ptlrpc_nrs_queue_type {
- PTLRPC_NRS_QUEUE_REG = (1 << 0),
- PTLRPC_NRS_QUEUE_HP = (1 << 1),
- PTLRPC_NRS_QUEUE_BOTH = (PTLRPC_NRS_QUEUE_REG | PTLRPC_NRS_QUEUE_HP)
-};
-
-/**
- * NRS head
- *
- * A PTLRPC service has at least one NRS head instance for handling normal
- * priority RPCs, and may optionally have a second NRS head instance for
- * handling high-priority RPCs. Each NRS head maintains a list of available
- * policies, of which one and only one policy is acting as the fallback policy,
- * and optionally a different policy may be acting as the primary policy. For
- * all RPCs handled by this NRS head instance, NRS core will first attempt to
- * enqueue the RPC using the primary policy (if any). The fallback policy is
- * used in the following cases:
- * - when there was no primary policy in the
- * ptlrpc_nrs_pol_state::NRS_POL_STATE_STARTED state at the time the request
- * was initialized.
- * - when the primary policy that was at the
- * ptlrpc_nrs_pol_state::PTLRPC_NRS_POL_STATE_STARTED state at the time the
- * RPC was initialized, denoted it did not wish, or for some other reason was
- * not able to handle the request, by returning a non-valid NRS resource
- * reference.
- * - when the primary policy that was at the
- * ptlrpc_nrs_pol_state::PTLRPC_NRS_POL_STATE_STARTED state at the time the
- * RPC was initialized, fails later during the request enqueueing stage.
- *
- * \see nrs_resource_get_safe()
- * \see nrs_request_enqueue()
- */
-struct ptlrpc_nrs {
- spinlock_t nrs_lock;
- /** XXX Possibly replace svcpt->scp_req_lock with another lock here. */
- /**
- * List of registered policies
- */
- struct list_head nrs_policy_list;
- /**
- * List of policies with queued requests. Policies that have any
- * outstanding requests are queued here, and this list is queried
- * in a round-robin manner from NRS core when obtaining a request
- * for handling. This ensures that requests from policies that at some
- * point transition away from the
- * ptlrpc_nrs_pol_state::NRS_POL_STATE_STARTED state are drained.
- */
- struct list_head nrs_policy_queued;
- /**
- * Service partition for this NRS head
- */
- struct ptlrpc_service_part *nrs_svcpt;
- /**
- * Primary policy, which is the preferred policy for handling RPCs
- */
- struct ptlrpc_nrs_policy *nrs_policy_primary;
- /**
- * Fallback policy, which is the backup policy for handling RPCs
- */
- struct ptlrpc_nrs_policy *nrs_policy_fallback;
- /**
- * This NRS head handles either HP or regular requests
- */
- enum ptlrpc_nrs_queue_type nrs_queue_type;
- /**
- * # queued requests from all policies in this NRS head
- */
- unsigned long nrs_req_queued;
- /**
- * # scheduled requests from all policies in this NRS head
- */
- unsigned long nrs_req_started;
- /**
- * # policies on this NRS
- */
- unsigned nrs_num_pols;
- /**
- * This NRS head is in progress of starting a policy
- */
- unsigned nrs_policy_starting:1;
- /**
- * In progress of shutting down the whole NRS head; used during
- * unregistration
- */
- unsigned nrs_stopping:1;
-};
-
-#define NRS_POL_NAME_MAX 16
-
-struct ptlrpc_nrs_pol_desc;
-
-/**
- * Service compatibility predicate; this determines whether a policy is adequate
- * for handling RPCs of a particular PTLRPC service.
- *
- * XXX:This should give the same result during policy registration and
- * unregistration, and for all partitions of a service; so the result should not
- * depend on temporal service or other properties, that may influence the
- * result.
- */
-typedef bool (*nrs_pol_desc_compat_t) (const struct ptlrpc_service *svc,
- const struct ptlrpc_nrs_pol_desc *desc);
-
-struct ptlrpc_nrs_pol_conf {
- /**
- * Human-readable policy name
- */
- char nc_name[NRS_POL_NAME_MAX];
- /**
- * NRS operations for this policy
- */
- const struct ptlrpc_nrs_pol_ops *nc_ops;
- /**
- * Service compatibility predicate
- */
- nrs_pol_desc_compat_t nc_compat;
- /**
- * Set for policies that support a single ptlrpc service, i.e. ones that
- * have \a pd_compat set to nrs_policy_compat_one(). The variable value
- * depicts the name of the single service that such policies are
- * compatible with.
- */
- const char *nc_compat_svc_name;
- /**
- * Owner module for this policy descriptor; policies registering from a
- * different module to the one the NRS framework is held within
- * (currently ptlrpc), should set this field to THIS_MODULE.
- */
- struct module *nc_owner;
- /**
- * Policy registration flags; a bitmask of \e nrs_policy_flags
- */
- unsigned nc_flags;
-};
-
-/**
- * NRS policy registering descriptor
- *
- * Is used to hold a description of a policy that can be passed to NRS core in
- * order to register the policy with NRS heads in different PTLRPC services.
- */
-struct ptlrpc_nrs_pol_desc {
- /**
- * Human-readable policy name
- */
- char pd_name[NRS_POL_NAME_MAX];
- /**
- * Link into nrs_core::nrs_policies
- */
- struct list_head pd_list;
- /**
- * NRS operations for this policy
- */
- const struct ptlrpc_nrs_pol_ops *pd_ops;
- /**
- * Service compatibility predicate
- */
- nrs_pol_desc_compat_t pd_compat;
- /**
- * Set for policies that are compatible with only one PTLRPC service.
- *
- * \see ptlrpc_nrs_pol_conf::nc_compat_svc_name
- */
- const char *pd_compat_svc_name;
- /**
- * Owner module for this policy descriptor.
- *
- * We need to hold a reference to the module whenever we might make use
- * of any of the module's contents, i.e.
- * - If one or more instances of the policy are at a state where they
- * might be handling a request, i.e.
- * ptlrpc_nrs_pol_state::NRS_POL_STATE_STARTED or
- * ptlrpc_nrs_pol_state::NRS_POL_STATE_STOPPING as we will have to
- * call into the policy's ptlrpc_nrs_pol_ops() handlers. A reference
- * is taken on the module when
- * \e ptlrpc_nrs_pol_desc::pd_refs becomes 1, and released when it
- * becomes 0, so that we hold only one reference to the module maximum
- * at any time.
- *
- * We do not need to hold a reference to the module, even though we
- * might use code and data from the module, in the following cases:
- * - During external policy registration, because this should happen in
- * the module's init() function, in which case the module is safe from
- * removal because a reference is being held on the module by the
- * kernel, and iirc kmod (and I guess module-init-tools also) will
- * serialize any racing processes properly anyway.
- * - During external policy unregistration, because this should happen
- * in a module's exit() function, and any attempts to start a policy
- * instance would need to take a reference on the module, and this is
- * not possible once we have reached the point where the exit()
- * handler is called.
- * - During service registration and unregistration, as service setup
- * and cleanup, and policy registration, unregistration and policy
- * instance starting, are serialized by \e nrs_core::nrs_mutex, so
- * as long as users adhere to the convention of registering policies
- * in init() and unregistering them in module exit() functions, there
- * should not be a race between these operations.
- * - During any policy-specific lprocfs operations, because a reference
- * is held by the kernel on a proc entry that has been entered by a
- * syscall, so as long as proc entries are removed during unregistration time,
- * then unregistration and lprocfs operations will be properly
- * serialized.
- */
- struct module *pd_owner;
- /**
- * Bitmask of \e nrs_policy_flags
- */
- unsigned pd_flags;
- /**
- * # of references on this descriptor
- */
- atomic_t pd_refs;
-};
-
-/**
- * NRS policy state
- *
- * Policies transition from one state to the other during their lifetime
- */
-enum ptlrpc_nrs_pol_state {
- /**
- * Not a valid policy state.
- */
- NRS_POL_STATE_INVALID,
- /**
- * Policies are at this state either at the start of their life, or
- * transition here when the user selects a different policy to act
- * as the primary one.
- */
- NRS_POL_STATE_STOPPED,
- /**
- * Policy is progress of stopping
- */
- NRS_POL_STATE_STOPPING,
- /**
- * Policy is in progress of starting
- */
- NRS_POL_STATE_STARTING,
- /**
- * A policy is in this state in two cases:
- * - it is the fallback policy, which is always in this state.
- * - it has been activated by the user; i.e. it is the primary policy,
- */
- NRS_POL_STATE_STARTED,
-};
-
-/**
- * NRS policy information
- *
- * Used for obtaining information for the status of a policy via lprocfs
- */
-struct ptlrpc_nrs_pol_info {
- /**
- * Policy name
- */
- char pi_name[NRS_POL_NAME_MAX];
- /**
- * Current policy state
- */
- enum ptlrpc_nrs_pol_state pi_state;
- /**
- * # RPCs enqueued for later dispatching by the policy
- */
- long pi_req_queued;
- /**
- * # RPCs started for dispatch by the policy
- */
- long pi_req_started;
- /**
- * Is this a fallback policy?
- */
- unsigned pi_fallback:1;
-};
-
-/**
- * NRS policy
- *
- * There is one instance of this for each policy in each NRS head of each
- * PTLRPC service partition.
- */
-struct ptlrpc_nrs_policy {
- /**
- * Linkage into the NRS head's list of policies,
- * ptlrpc_nrs:nrs_policy_list
- */
- struct list_head pol_list;
- /**
- * Linkage into the NRS head's list of policies with enqueued
- * requests ptlrpc_nrs:nrs_policy_queued
- */
- struct list_head pol_list_queued;
- /**
- * Current state of this policy
- */
- enum ptlrpc_nrs_pol_state pol_state;
- /**
- * Bitmask of nrs_policy_flags
- */
- unsigned pol_flags;
- /**
- * # RPCs enqueued for later dispatching by the policy
- */
- long pol_req_queued;
- /**
- * # RPCs started for dispatch by the policy
- */
- long pol_req_started;
- /**
- * Usage Reference count taken on the policy instance
- */
- long pol_ref;
- /**
- * The NRS head this policy has been created at
- */
- struct ptlrpc_nrs *pol_nrs;
- /**
- * Private policy data; varies by policy type
- */
- void *pol_private;
- /**
- * Policy descriptor for this policy instance.
- */
- struct ptlrpc_nrs_pol_desc *pol_desc;
-};
-
-/**
- * NRS resource
- *
- * Resources are embedded into two types of NRS entities:
- * - Inside NRS policies, in the policy's private data in
- * ptlrpc_nrs_policy::pol_private
- * - In objects that act as prime-level scheduling entities in different NRS
- * policies; e.g. on a policy that performs round robin or similar order
- * scheduling across client NIDs, there would be one NRS resource per unique
- * client NID. On a policy which performs round robin scheduling across
- * backend filesystem objects, there would be one resource associated with
- * each of the backend filesystem objects partaking in the scheduling
- * performed by the policy.
- *
- * NRS resources share a parent-child relationship, in which resources embedded
- * in policy instances are the parent entities, with all scheduling entities
- * a policy schedules across being the children, thus forming a simple resource
- * hierarchy. This hierarchy may be extended with one or more levels in the
- * future if the ability to have more than one primary policy is added.
- *
- * Upon request initialization, references to the then active NRS policies are
- * taken and used to later handle the dispatching of the request with one of
- * these policies.
- *
- * \see nrs_resource_get_safe()
- * \see ptlrpc_nrs_req_add()
- */
-struct ptlrpc_nrs_resource {
- /**
- * This NRS resource's parent; is NULL for resources embedded in NRS
- * policy instances; i.e. those are top-level ones.
- */
- struct ptlrpc_nrs_resource *res_parent;
- /**
- * The policy associated with this resource.
- */
- struct ptlrpc_nrs_policy *res_policy;
-};
-
-enum {
- NRS_RES_FALLBACK,
- NRS_RES_PRIMARY,
- NRS_RES_MAX
-};
-
-/* \name fifo
- *
- * FIFO policy
- *
- * This policy is a logical wrapper around previous, non-NRS functionality.
- * It dispatches RPCs in the same order as they arrive from the network. This
- * policy is currently used as the fallback policy, and the only enabled policy
- * on all NRS heads of all PTLRPC service partitions.
- * @{
- */
-
-/**
- * Private data structure for the FIFO policy
- */
-struct nrs_fifo_head {
- /**
- * Resource object for policy instance.
- */
- struct ptlrpc_nrs_resource fh_res;
- /**
- * List of queued requests.
- */
- struct list_head fh_list;
- /**
- * For debugging purposes.
- */
- __u64 fh_sequence;
-};
-
-struct nrs_fifo_req {
- struct list_head fr_list;
- __u64 fr_sequence;
-};
-
-/** @} fifo */
-
-/**
- * NRS request
- *
- * Instances of this object exist embedded within ptlrpc_request; the main
- * purpose of this object is to hold references to the request's resources
- * for the lifetime of the request, and to hold properties that policies use
- * use for determining the request's scheduling priority.
- * */
-struct ptlrpc_nrs_request {
- /**
- * The request's resource hierarchy.
- */
- struct ptlrpc_nrs_resource *nr_res_ptrs[NRS_RES_MAX];
- /**
- * Index into ptlrpc_nrs_request::nr_res_ptrs of the resource of the
- * policy that was used to enqueue the request.
- *
- * \see nrs_request_enqueue()
- */
- unsigned nr_res_idx;
- unsigned nr_initialized:1;
- unsigned nr_enqueued:1;
- unsigned nr_started:1;
- unsigned nr_finalized:1;
-
- /**
- * Policy-specific fields, used for determining a request's scheduling
- * priority, and other supporting functionality.
- */
- union {
- /**
- * Fields for the FIFO policy
- */
- struct nrs_fifo_req fifo;
- } nr_u;
- /**
- * Externally-registering policies may want to use this to allocate
- * their own request properties.
- */
- void *ext;
-};
-
-/** @} nrs */
-
-/**
- * Basic request prioritization operations structure.
- * The whole idea is centered around locks and RPCs that might affect locks.
- * When a lock is contended we try to give priority to RPCs that might lead
- * to fastest release of that lock.
- * Currently only implemented for OSTs only in a way that makes all
- * IO and truncate RPCs that are coming from a locked region where a lock is
- * contended a priority over other requests.
- */
-struct ptlrpc_hpreq_ops {
- /**
- * Check if the lock handle of the given lock is the same as
- * taken from the request.
- */
- int (*hpreq_lock_match)(struct ptlrpc_request *, struct ldlm_lock *);
- /**
- * Check if the request is a high priority one.
- */
- int (*hpreq_check)(struct ptlrpc_request *);
- /**
- * Called after the request has been handled.
- */
- void (*hpreq_fini)(struct ptlrpc_request *);
-};
-
-/**
- * Represents remote procedure call.
- *
- * This is a staple structure used by everybody wanting to send a request
- * in Lustre.
- */
-struct ptlrpc_request {
- /* Request type: one of PTL_RPC_MSG_* */
- int rq_type;
- /** Result of request processing */
- int rq_status;
- /**
- * Linkage item through which this request is included into
- * sending/delayed lists on client and into rqbd list on server
- */
- struct list_head rq_list;
- /**
- * Server side list of incoming unserved requests sorted by arrival
- * time. Traversed from time to time to notice about to expire
- * requests and sent back "early replies" to clients to let them
- * know server is alive and well, just very busy to service their
- * requests in time
- */
- struct list_head rq_timed_list;
- /** server-side history, used for debugging purposes. */
- struct list_head rq_history_list;
- /** server-side per-export list */
- struct list_head rq_exp_list;
- /** server-side hp handlers */
- struct ptlrpc_hpreq_ops *rq_ops;
-
- /** initial thread servicing this request */
- struct ptlrpc_thread *rq_svc_thread;
-
- /** history sequence # */
- __u64 rq_history_seq;
- /** \addtogroup nrs
- * @{
- */
- /** stub for NRS request */
- struct ptlrpc_nrs_request rq_nrq;
- /** @} nrs */
- /** the index of service's srv_at_array into which request is linked */
- u32 rq_at_index;
- /** Lock to protect request flags and some other important bits, like
- * rq_list
- */
- spinlock_t rq_lock;
- /** client-side flags are serialized by rq_lock */
- unsigned int rq_intr:1, rq_replied:1, rq_err:1,
- rq_timedout:1, rq_resend:1, rq_restart:1,
- /**
- * when ->rq_replay is set, request is kept by the client even
- * after server commits corresponding transaction. This is
- * used for operations that require sequence of multiple
- * requests to be replayed. The only example currently is file
- * open/close. When last request in such a sequence is
- * committed, ->rq_replay is cleared on all requests in the
- * sequence.
- */
- rq_replay:1,
- rq_no_resend:1, rq_waiting:1, rq_receiving_reply:1,
- rq_no_delay:1, rq_net_err:1, rq_wait_ctx:1,
- rq_early:1,
- rq_req_unlink:1, rq_reply_unlink:1,
- rq_memalloc:1, /* req originated from "kswapd" */
- /* server-side flags */
- rq_packed_final:1, /* packed final reply */
- rq_hp:1, /* high priority RPC */
- rq_at_linked:1, /* link into service's srv_at_array */
- rq_reply_truncate:1,
- rq_committed:1,
- /* whether the "rq_set" is a valid one */
- rq_invalid_rqset:1,
- rq_generation_set:1,
- /* do not resend request on -EINPROGRESS */
- rq_no_retry_einprogress:1,
- /* allow the req to be sent if the import is in recovery
- * status */
- rq_allow_replay:1;
-
- unsigned int rq_nr_resend;
-
- enum rq_phase rq_phase; /* one of RQ_PHASE_* */
- enum rq_phase rq_next_phase; /* one of RQ_PHASE_* to be used next */
- atomic_t rq_refcount;/* client-side refcount for SENT race,
- server-side refcount for multiple replies */
-
- /** Portal to which this request would be sent */
- short rq_request_portal; /* XXX FIXME bug 249 */
- /** Portal where to wait for reply and where reply would be sent */
- short rq_reply_portal; /* XXX FIXME bug 249 */
-
- /**
- * client-side:
- * !rq_truncate : # reply bytes actually received,
- * rq_truncate : required repbuf_len for resend
- */
- int rq_nob_received;
- /** Request length */
- int rq_reqlen;
- /** Reply length */
- int rq_replen;
- /** Request message - what client sent */
- struct lustre_msg *rq_reqmsg;
- /** Reply message - server response */
- struct lustre_msg *rq_repmsg;
- /** Transaction number */
- __u64 rq_transno;
- /** xid */
- __u64 rq_xid;
- /**
- * List item to for replay list. Not yet committed requests get linked
- * there.
- * Also see \a rq_replay comment above.
- */
- struct list_head rq_replay_list;
-
- /**
- * security and encryption data
- * @{ */
- struct ptlrpc_cli_ctx *rq_cli_ctx; /**< client's half ctx */
- struct ptlrpc_svc_ctx *rq_svc_ctx; /**< server's half ctx */
- struct list_head rq_ctx_chain; /**< link to waited ctx */
-
- struct sptlrpc_flavor rq_flvr; /**< for client & server */
- enum lustre_sec_part rq_sp_from;
-
- /* client/server security flags */
- unsigned int
- rq_ctx_init:1, /* context initiation */
- rq_ctx_fini:1, /* context destroy */
- rq_bulk_read:1, /* request bulk read */
- rq_bulk_write:1, /* request bulk write */
- /* server authentication flags */
- rq_auth_gss:1, /* authenticated by gss */
- rq_auth_remote:1, /* authed as remote user */
- rq_auth_usr_root:1, /* authed as root */
- rq_auth_usr_mdt:1, /* authed as mdt */
- rq_auth_usr_ost:1, /* authed as ost */
- /* security tfm flags */
- rq_pack_udesc:1,
- rq_pack_bulk:1,
- /* doesn't expect reply FIXME */
- rq_no_reply:1,
- rq_pill_init:1; /* pill initialized */
-
- uid_t rq_auth_uid; /* authed uid */
- uid_t rq_auth_mapped_uid; /* authed uid mapped to */
-
- /* (server side), pointed directly into req buffer */
- struct ptlrpc_user_desc *rq_user_desc;
-
- /* various buffer pointers */
- struct lustre_msg *rq_reqbuf; /* req wrapper */
- char *rq_repbuf; /* rep buffer */
- struct lustre_msg *rq_repdata; /* rep wrapper msg */
- struct lustre_msg *rq_clrbuf; /* only in priv mode */
- int rq_reqbuf_len; /* req wrapper buf len */
- int rq_reqdata_len; /* req wrapper msg len */
- int rq_repbuf_len; /* rep buffer len */
- int rq_repdata_len; /* rep wrapper msg len */
- int rq_clrbuf_len; /* only in priv mode */
- int rq_clrdata_len; /* only in priv mode */
-
- /** early replies go to offset 0, regular replies go after that */
- unsigned int rq_reply_off;
-
- /** @} */
-
- /** Fields that help to see if request and reply were swabbed or not */
- __u32 rq_req_swab_mask;
- __u32 rq_rep_swab_mask;
-
- /** What was import generation when this request was sent */
- int rq_import_generation;
- enum lustre_imp_state rq_send_state;
-
- /** how many early replies (for stats) */
- int rq_early_count;
-
- /** client+server request */
- lnet_handle_md_t rq_req_md_h;
- struct ptlrpc_cb_id rq_req_cbid;
- /** optional time limit for send attempts */
- long rq_delay_limit;
- /** time request was first queued */
- unsigned long rq_queued_time;
-
- /* server-side... */
- /** request arrival time */
- struct timespec64 rq_arrival_time;
- /** separated reply state */
- struct ptlrpc_reply_state *rq_reply_state;
- /** incoming request buffer */
- struct ptlrpc_request_buffer_desc *rq_rqbd;
-
- /** client-only incoming reply */
- lnet_handle_md_t rq_reply_md_h;
- wait_queue_head_t rq_reply_waitq;
- struct ptlrpc_cb_id rq_reply_cbid;
-
- /** our LNet NID */
- lnet_nid_t rq_self;
- /** Peer description (the other side) */
- lnet_process_id_t rq_peer;
- /** Server-side, export on which request was received */
- struct obd_export *rq_export;
- /** Client side, import where request is being sent */
- struct obd_import *rq_import;
-
- /** Replay callback, called after request is replayed at recovery */
- void (*rq_replay_cb)(struct ptlrpc_request *);
- /**
- * Commit callback, called when request is committed and about to be
- * freed.
- */
- void (*rq_commit_cb)(struct ptlrpc_request *);
- /** Opaq data for replay and commit callbacks. */
- void *rq_cb_data;
-
- /** For bulk requests on client only: bulk descriptor */
- struct ptlrpc_bulk_desc *rq_bulk;
-
- /** client outgoing req */
- /**
- * when request/reply sent (secs), or time when request should be sent
- */
- time64_t rq_sent;
- /** time for request really sent out */
- time64_t rq_real_sent;
-
- /** when request must finish. volatile
- * so that servers' early reply updates to the deadline aren't
- * kept in per-cpu cache */
- volatile time64_t rq_deadline;
- /** when req reply unlink must finish. */
- time64_t rq_reply_deadline;
- /** when req bulk unlink must finish. */
- time64_t rq_bulk_deadline;
- /**
- * service time estimate (secs)
- * If the requestsis not served by this time, it is marked as timed out.
- */
- int rq_timeout;
-
- /** Multi-rpc bits */
- /** Per-request waitq introduced by bug 21938 for recovery waiting */
- wait_queue_head_t rq_set_waitq;
- /** Link item for request set lists */
- struct list_head rq_set_chain;
- /** Link back to the request set */
- struct ptlrpc_request_set *rq_set;
- /** Async completion handler, called when reply is received */
- ptlrpc_interpterer_t rq_interpret_reply;
- /** Async completion context */
- union ptlrpc_async_args rq_async_args;
-
- /** Pool if request is from preallocated list */
- struct ptlrpc_request_pool *rq_pool;
-
- struct lu_context rq_session;
- struct lu_context rq_recov_session;
-
- /** request format description */
- struct req_capsule rq_pill;
-};
-
-/**
- * Call completion handler for rpc if any, return it's status or original
- * rc if there was no handler defined for this request.
- */
-static inline int ptlrpc_req_interpret(const struct lu_env *env,
- struct ptlrpc_request *req, int rc)
-{
- if (req->rq_interpret_reply != NULL) {
- req->rq_status = req->rq_interpret_reply(env, req,
- &req->rq_async_args,
- rc);
- return req->rq_status;
- }
- return rc;
-}
-
-/** \addtogroup nrs
- * @{
- */
-int ptlrpc_nrs_policy_register(struct ptlrpc_nrs_pol_conf *conf);
-
-/*
- * Can the request be moved from the regular NRS head to the high-priority NRS
- * head (of the same PTLRPC service partition), if any?
- *
- * For a reliable result, this should be checked under svcpt->scp_req lock.
- */
-static inline bool ptlrpc_nrs_req_can_move(struct ptlrpc_request *req)
-{
- struct ptlrpc_nrs_request *nrq = &req->rq_nrq;
-
- /**
- * LU-898: Check ptlrpc_nrs_request::nr_enqueued to make sure the
- * request has been enqueued first, and ptlrpc_nrs_request::nr_started
- * to make sure it has not been scheduled yet (analogous to previous
- * (non-NRS) checking of !list_empty(&ptlrpc_request::rq_list).
- */
- return nrq->nr_enqueued && !nrq->nr_started && !req->rq_hp;
-}
-/** @} nrs */
-
-/**
- * Returns 1 if request buffer at offset \a index was already swabbed
- */
-static inline int lustre_req_swabbed(struct ptlrpc_request *req, int index)
-{
- LASSERT(index < sizeof(req->rq_req_swab_mask) * 8);
- return req->rq_req_swab_mask & (1 << index);
-}
-
-/**
- * Returns 1 if request reply buffer at offset \a index was already swabbed
- */
-static inline int lustre_rep_swabbed(struct ptlrpc_request *req, int index)
-{
- LASSERT(index < sizeof(req->rq_rep_swab_mask) * 8);
- return req->rq_rep_swab_mask & (1 << index);
-}
-
-/**
- * Returns 1 if request needs to be swabbed into local cpu byteorder
- */
-static inline int ptlrpc_req_need_swab(struct ptlrpc_request *req)
-{
- return lustre_req_swabbed(req, MSG_PTLRPC_HEADER_OFF);
-}
-
-/**
- * Returns 1 if request reply needs to be swabbed into local cpu byteorder
- */
-static inline int ptlrpc_rep_need_swab(struct ptlrpc_request *req)
-{
- return lustre_rep_swabbed(req, MSG_PTLRPC_HEADER_OFF);
-}
-
-/**
- * Mark request buffer at offset \a index that it was already swabbed
- */
-static inline void lustre_set_req_swabbed(struct ptlrpc_request *req, int index)
-{
- LASSERT(index < sizeof(req->rq_req_swab_mask) * 8);
- LASSERT((req->rq_req_swab_mask & (1 << index)) == 0);
- req->rq_req_swab_mask |= 1 << index;
-}
-
-/**
- * Mark request reply buffer at offset \a index that it was already swabbed
- */
-static inline void lustre_set_rep_swabbed(struct ptlrpc_request *req, int index)
-{
- LASSERT(index < sizeof(req->rq_rep_swab_mask) * 8);
- LASSERT((req->rq_rep_swab_mask & (1 << index)) == 0);
- req->rq_rep_swab_mask |= 1 << index;
-}
-
-/**
- * Convert numerical request phase value \a phase into text string description
- */
-static inline const char *
-ptlrpc_phase2str(enum rq_phase phase)
-{
- switch (phase) {
- case RQ_PHASE_NEW:
- return "New";
- case RQ_PHASE_RPC:
- return "Rpc";
- case RQ_PHASE_BULK:
- return "Bulk";
- case RQ_PHASE_INTERPRET:
- return "Interpret";
- case RQ_PHASE_COMPLETE:
- return "Complete";
- case RQ_PHASE_UNREGISTERING:
- return "Unregistering";
- default:
- return "?Phase?";
- }
-}
-
-/**
- * Convert numerical request phase of the request \a req into text stringi
- * description
- */
-static inline const char *
-ptlrpc_rqphase2str(struct ptlrpc_request *req)
-{
- return ptlrpc_phase2str(req->rq_phase);
-}
-
-/**
- * Debugging functions and helpers to print request structure into debug log
- * @{
- */
-/* Spare the preprocessor, spoil the bugs. */
-#define FLAG(field, str) (field ? str : "")
-
-/** Convert bit flags into a string */
-#define DEBUG_REQ_FLAGS(req) \
- ptlrpc_rqphase2str(req), \
- FLAG(req->rq_intr, "I"), FLAG(req->rq_replied, "R"), \
- FLAG(req->rq_err, "E"), \
- FLAG(req->rq_timedout, "X") /* eXpired */, FLAG(req->rq_resend, "S"), \
- FLAG(req->rq_restart, "T"), FLAG(req->rq_replay, "P"), \
- FLAG(req->rq_no_resend, "N"), \
- FLAG(req->rq_waiting, "W"), \
- FLAG(req->rq_wait_ctx, "C"), FLAG(req->rq_hp, "H"), \
- FLAG(req->rq_committed, "M")
-
-#define REQ_FLAGS_FMT "%s:%s%s%s%s%s%s%s%s%s%s%s%s"
-
-void _debug_req(struct ptlrpc_request *req,
- struct libcfs_debug_msg_data *data, const char *fmt, ...)
- __printf(3, 4);
-
-/**
- * Helper that decides if we need to print request according to current debug
- * level settings
- */
-#define debug_req(msgdata, mask, cdls, req, fmt, a...) \
-do { \
- CFS_CHECK_STACK(msgdata, mask, cdls); \
- \
- if (((mask) & D_CANTMASK) != 0 || \
- ((libcfs_debug & (mask)) != 0 && \
- (libcfs_subsystem_debug & DEBUG_SUBSYSTEM) != 0)) \
- _debug_req((req), msgdata, fmt, ##a); \
-} while (0)
-
-/**
- * This is the debug print function you need to use to print request structure
- * content into lustre debug log.
- * for most callers (level is a constant) this is resolved at compile time */
-#define DEBUG_REQ(level, req, fmt, args...) \
-do { \
- if ((level) & (D_ERROR | D_WARNING)) { \
- static struct cfs_debug_limit_state cdls; \
- LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, level, &cdls); \
- debug_req(&msgdata, level, &cdls, req, "@@@ "fmt" ", ## args);\
- } else { \
- LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, level, NULL); \
- debug_req(&msgdata, level, NULL, req, "@@@ "fmt" ", ## args); \
- } \
-} while (0)
-/** @} */
-
-/**
- * Structure that defines a single page of a bulk transfer
- */
-struct ptlrpc_bulk_page {
- /** Linkage to list of pages in a bulk */
- struct list_head bp_link;
- /**
- * Number of bytes in a page to transfer starting from \a bp_pageoffset
- */
- int bp_buflen;
- /** offset within a page */
- int bp_pageoffset;
- /** The page itself */
- struct page *bp_page;
-};
-
-#define BULK_GET_SOURCE 0
-#define BULK_PUT_SINK 1
-#define BULK_GET_SINK 2
-#define BULK_PUT_SOURCE 3
-
-/**
- * Definition of bulk descriptor.
- * Bulks are special "Two phase" RPCs where initial request message
- * is sent first and it is followed bt a transfer (o receiving) of a large
- * amount of data to be settled into pages referenced from the bulk descriptors.
- * Bulks transfers (the actual data following the small requests) are done
- * on separate LNet portals.
- * In lustre we use bulk transfers for READ and WRITE transfers from/to OSTs.
- * Another user is readpage for MDT.
- */
-struct ptlrpc_bulk_desc {
- /** completed with failure */
- unsigned long bd_failure:1;
- /** {put,get}{source,sink} */
- unsigned long bd_type:2;
- /** client side */
- unsigned long bd_registered:1;
- /** For serialization with callback */
- spinlock_t bd_lock;
- /** Import generation when request for this bulk was sent */
- int bd_import_generation;
- /** LNet portal for this bulk */
- __u32 bd_portal;
- /** Server side - export this bulk created for */
- struct obd_export *bd_export;
- /** Client side - import this bulk was sent on */
- struct obd_import *bd_import;
- /** Back pointer to the request */
- struct ptlrpc_request *bd_req;
- wait_queue_head_t bd_waitq; /* server side only WQ */
- int bd_iov_count; /* # entries in bd_iov */
- int bd_max_iov; /* allocated size of bd_iov */
- int bd_nob; /* # bytes covered */
- int bd_nob_transferred; /* # bytes GOT/PUT */
-
- __u64 bd_last_xid;
-
- struct ptlrpc_cb_id bd_cbid; /* network callback info */
- lnet_nid_t bd_sender; /* stash event::sender */
- int bd_md_count; /* # valid entries in bd_mds */
- int bd_md_max_brw; /* max entries in bd_mds */
- /** array of associated MDs */
- lnet_handle_md_t bd_mds[PTLRPC_BULK_OPS_COUNT];
-
- /*
- * encrypt iov, size is either 0 or bd_iov_count.
- */
- lnet_kiov_t *bd_enc_iov;
-
- lnet_kiov_t bd_iov[0];
-};
-
-enum {
- SVC_STOPPED = 1 << 0,
- SVC_STOPPING = 1 << 1,
- SVC_STARTING = 1 << 2,
- SVC_RUNNING = 1 << 3,
- SVC_EVENT = 1 << 4,
- SVC_SIGNAL = 1 << 5,
-};
-
-#define PTLRPC_THR_NAME_LEN 32
-/**
- * Definition of server service thread structure
- */
-struct ptlrpc_thread {
- /**
- * List of active threads in svc->srv_threads
- */
- struct list_head t_link;
- /**
- * thread-private data (preallocated memory)
- */
- void *t_data;
- __u32 t_flags;
- /**
- * service thread index, from ptlrpc_start_threads
- */
- unsigned int t_id;
- /**
- * service thread pid
- */
- pid_t t_pid;
- /**
- * put watchdog in the structure per thread b=14840
- *
- * Lustre watchdog is removed for client in the hope
- * of a generic watchdog can be merged in kernel.
- * When that happens, we should add below back.
- *
- * struct lc_watchdog *t_watchdog;
- */
- /**
- * the svc this thread belonged to b=18582
- */
- struct ptlrpc_service_part *t_svcpt;
- wait_queue_head_t t_ctl_waitq;
- struct lu_env *t_env;
- char t_name[PTLRPC_THR_NAME_LEN];
-};
-
-static inline int thread_is_init(struct ptlrpc_thread *thread)
-{
- return thread->t_flags == 0;
-}
-
-static inline int thread_is_stopped(struct ptlrpc_thread *thread)
-{
- return !!(thread->t_flags & SVC_STOPPED);
-}
-
-static inline int thread_is_stopping(struct ptlrpc_thread *thread)
-{
- return !!(thread->t_flags & SVC_STOPPING);
-}
-
-static inline int thread_is_starting(struct ptlrpc_thread *thread)
-{
- return !!(thread->t_flags & SVC_STARTING);
-}
-
-static inline int thread_is_running(struct ptlrpc_thread *thread)
-{
- return !!(thread->t_flags & SVC_RUNNING);
-}
-
-static inline int thread_is_event(struct ptlrpc_thread *thread)
-{
- return !!(thread->t_flags & SVC_EVENT);
-}
-
-static inline int thread_is_signal(struct ptlrpc_thread *thread)
-{
- return !!(thread->t_flags & SVC_SIGNAL);
-}
-
-static inline void thread_clear_flags(struct ptlrpc_thread *thread, __u32 flags)
-{
- thread->t_flags &= ~flags;
-}
-
-static inline void thread_set_flags(struct ptlrpc_thread *thread, __u32 flags)
-{
- thread->t_flags = flags;
-}
-
-static inline void thread_add_flags(struct ptlrpc_thread *thread, __u32 flags)
-{
- thread->t_flags |= flags;
-}
-
-static inline int thread_test_and_clear_flags(struct ptlrpc_thread *thread,
- __u32 flags)
-{
- if (thread->t_flags & flags) {
- thread->t_flags &= ~flags;
- return 1;
- }
- return 0;
-}
-
-/**
- * Request buffer descriptor structure.
- * This is a structure that contains one posted request buffer for service.
- * Once data land into a buffer, event callback creates actual request and
- * notifies wakes one of the service threads to process new incoming request.
- * More than one request can fit into the buffer.
- */
-struct ptlrpc_request_buffer_desc {
- /** Link item for rqbds on a service */
- struct list_head rqbd_list;
- /** History of requests for this buffer */
- struct list_head rqbd_reqs;
- /** Back pointer to service for which this buffer is registered */
- struct ptlrpc_service_part *rqbd_svcpt;
- /** LNet descriptor */
- lnet_handle_md_t rqbd_md_h;
- int rqbd_refcount;
- /** The buffer itself */
- char *rqbd_buffer;
- struct ptlrpc_cb_id rqbd_cbid;
- /**
- * This "embedded" request structure is only used for the
- * last request to fit into the buffer
- */
- struct ptlrpc_request rqbd_req;
-};
-
-typedef int (*svc_handler_t)(struct ptlrpc_request *req);
-
-struct ptlrpc_service_ops {
- /**
- * if non-NULL called during thread creation (ptlrpc_start_thread())
- * to initialize service specific per-thread state.
- */
- int (*so_thr_init)(struct ptlrpc_thread *thr);
- /**
- * if non-NULL called during thread shutdown (ptlrpc_main()) to
- * destruct state created by ->srv_init().
- */
- void (*so_thr_done)(struct ptlrpc_thread *thr);
- /**
- * Handler function for incoming requests for this service
- */
- int (*so_req_handler)(struct ptlrpc_request *req);
- /**
- * function to determine priority of the request, it's called
- * on every new request
- */
- int (*so_hpreq_handler)(struct ptlrpc_request *);
- /**
- * service-specific print fn
- */
- void (*so_req_printer)(void *, struct ptlrpc_request *);
-};
-
-#ifndef __cfs_cacheline_aligned
-/* NB: put it here for reducing patche dependence */
-# define __cfs_cacheline_aligned
-#endif
-
-/**
- * How many high priority requests to serve before serving one normal
- * priority request
- */
-#define PTLRPC_SVC_HP_RATIO 10
-
-/**
- * Definition of PortalRPC service.
- * The service is listening on a particular portal (like tcp port)
- * and perform actions for a specific server like IO service for OST
- * or general metadata service for MDS.
- */
-struct ptlrpc_service {
- /** serialize /proc operations */
- spinlock_t srv_lock;
- /** most often accessed fields */
- /** chain thru all services */
- struct list_head srv_list;
- /** service operations table */
- struct ptlrpc_service_ops srv_ops;
- /** only statically allocated strings here; we don't clean them */
- char *srv_name;
- /** only statically allocated strings here; we don't clean them */
- char *srv_thread_name;
- /** service thread list */
- struct list_head srv_threads;
- /** threads # should be created for each partition on initializing */
- int srv_nthrs_cpt_init;
- /** limit of threads number for each partition */
- int srv_nthrs_cpt_limit;
- /** Root of debugfs dir tree for this service */
- struct dentry *srv_debugfs_entry;
- /** Pointer to statistic data for this service */
- struct lprocfs_stats *srv_stats;
- /** # hp per lp reqs to handle */
- int srv_hpreq_ratio;
- /** biggest request to receive */
- int srv_max_req_size;
- /** biggest reply to send */
- int srv_max_reply_size;
- /** size of individual buffers */
- int srv_buf_size;
- /** # buffers to allocate in 1 group */
- int srv_nbuf_per_group;
- /** Local portal on which to receive requests */
- __u32 srv_req_portal;
- /** Portal on the client to send replies to */
- __u32 srv_rep_portal;
- /**
- * Tags for lu_context associated with this thread, see struct
- * lu_context.
- */
- __u32 srv_ctx_tags;
- /** soft watchdog timeout multiplier */
- int srv_watchdog_factor;
- /** under unregister_service */
- unsigned srv_is_stopping:1;
-
- /** max # request buffers in history per partition */
- int srv_hist_nrqbds_cpt_max;
- /** number of CPTs this service bound on */
- int srv_ncpts;
- /** CPTs array this service bound on */
- __u32 *srv_cpts;
- /** 2^srv_cptab_bits >= cfs_cpt_numbert(srv_cptable) */
- int srv_cpt_bits;
- /** CPT table this service is running over */
- struct cfs_cpt_table *srv_cptable;
-
- /* sysfs object */
- struct kobject srv_kobj;
- struct completion srv_kobj_unregister;
- /**
- * partition data for ptlrpc service
- */
- struct ptlrpc_service_part *srv_parts[0];
-};
-
-/**
- * Definition of PortalRPC service partition data.
- * Although a service only has one instance of it right now, but we
- * will have multiple instances very soon (instance per CPT).
- *
- * it has four locks:
- * \a scp_lock
- * serialize operations on rqbd and requests waiting for preprocess
- * \a scp_req_lock
- * serialize operations active requests sent to this portal
- * \a scp_at_lock
- * serialize adaptive timeout stuff
- * \a scp_rep_lock
- * serialize operations on RS list (reply states)
- *
- * We don't have any use-case to take two or more locks at the same time
- * for now, so there is no lock order issue.
- */
-struct ptlrpc_service_part {
- /** back reference to owner */
- struct ptlrpc_service *scp_service __cfs_cacheline_aligned;
- /* CPT id, reserved */
- int scp_cpt;
- /** always increasing number */
- int scp_thr_nextid;
- /** # of starting threads */
- int scp_nthrs_starting;
- /** # of stopping threads, reserved for shrinking threads */
- int scp_nthrs_stopping;
- /** # running threads */
- int scp_nthrs_running;
- /** service threads list */
- struct list_head scp_threads;
-
- /**
- * serialize the following fields, used for protecting
- * rqbd list and incoming requests waiting for preprocess,
- * threads starting & stopping are also protected by this lock.
- */
- spinlock_t scp_lock __cfs_cacheline_aligned;
- /** total # req buffer descs allocated */
- int scp_nrqbds_total;
- /** # posted request buffers for receiving */
- int scp_nrqbds_posted;
- /** in progress of allocating rqbd */
- int scp_rqbd_allocating;
- /** # incoming reqs */
- int scp_nreqs_incoming;
- /** request buffers to be reposted */
- struct list_head scp_rqbd_idle;
- /** req buffers receiving */
- struct list_head scp_rqbd_posted;
- /** incoming reqs */
- struct list_head scp_req_incoming;
- /** timeout before re-posting reqs, in tick */
- long scp_rqbd_timeout;
- /**
- * all threads sleep on this. This wait-queue is signalled when new
- * incoming request arrives and when difficult reply has to be handled.
- */
- wait_queue_head_t scp_waitq;
-
- /** request history */
- struct list_head scp_hist_reqs;
- /** request buffer history */
- struct list_head scp_hist_rqbds;
- /** # request buffers in history */
- int scp_hist_nrqbds;
- /** sequence number for request */
- __u64 scp_hist_seq;
- /** highest seq culled from history */
- __u64 scp_hist_seq_culled;
-
- /**
- * serialize the following fields, used for processing requests
- * sent to this portal
- */
- spinlock_t scp_req_lock __cfs_cacheline_aligned;
- /** # reqs in either of the NRS heads below */
- /** # reqs being served */
- int scp_nreqs_active;
- /** # HPreqs being served */
- int scp_nhreqs_active;
- /** # hp requests handled */
- int scp_hreq_count;
-
- /** NRS head for regular requests */
- struct ptlrpc_nrs scp_nrs_reg;
- /** NRS head for HP requests; this is only valid for services that can
- * handle HP requests */
- struct ptlrpc_nrs *scp_nrs_hp;
-
- /** AT stuff */
- /** @{ */
- /**
- * serialize the following fields, used for changes on
- * adaptive timeout
- */
- spinlock_t scp_at_lock __cfs_cacheline_aligned;
- /** estimated rpc service time */
- struct adaptive_timeout scp_at_estimate;
- /** reqs waiting for replies */
- struct ptlrpc_at_array scp_at_array;
- /** early reply timer */
- struct timer_list scp_at_timer;
- /** debug */
- unsigned long scp_at_checktime;
- /** check early replies */
- unsigned scp_at_check;
- /** @} */
-
- /**
- * serialize the following fields, used for processing
- * replies for this portal
- */
- spinlock_t scp_rep_lock __cfs_cacheline_aligned;
- /** all the active replies */
- struct list_head scp_rep_active;
- /** List of free reply_states */
- struct list_head scp_rep_idle;
- /** waitq to run, when adding stuff to srv_free_rs_list */
- wait_queue_head_t scp_rep_waitq;
- /** # 'difficult' replies */
- atomic_t scp_nreps_difficult;
-};
-
-#define ptlrpc_service_for_each_part(part, i, svc) \
- for (i = 0; \
- i < (svc)->srv_ncpts && \
- (svc)->srv_parts != NULL && \
- ((part) = (svc)->srv_parts[i]) != NULL; i++)
-
-/**
- * Declaration of ptlrpcd control structure
- */
-struct ptlrpcd_ctl {
- /**
- * Ptlrpc thread control flags (LIOD_START, LIOD_STOP, LIOD_FORCE)
- */
- unsigned long pc_flags;
- /**
- * Thread lock protecting structure fields.
- */
- spinlock_t pc_lock;
- /**
- * Start completion.
- */
- struct completion pc_starting;
- /**
- * Stop completion.
- */
- struct completion pc_finishing;
- /**
- * Thread requests set.
- */
- struct ptlrpc_request_set *pc_set;
- /**
- * Thread name used in kthread_run()
- */
- char pc_name[16];
- /**
- * Environment for request interpreters to run in.
- */
- struct lu_env pc_env;
- /**
- * CPT the thread is bound on.
- */
- int pc_cpt;
- /**
- * Index of ptlrpcd thread in the array.
- */
- int pc_index;
- /**
- * Pointer to the array of partners' ptlrpcd_ctl structure.
- */
- struct ptlrpcd_ctl **pc_partners;
- /**
- * Number of the ptlrpcd's partners.
- */
- int pc_npartners;
- /**
- * Record the partner index to be processed next.
- */
- int pc_cursor;
- /**
- * Error code if the thread failed to fully start.
- */
- int pc_error;
-};
-
-/* Bits for pc_flags */
-enum ptlrpcd_ctl_flags {
- /**
- * Ptlrpc thread start flag.
- */
- LIOD_START = 1 << 0,
- /**
- * Ptlrpc thread stop flag.
- */
- LIOD_STOP = 1 << 1,
- /**
- * Ptlrpc thread force flag (only stop force so far).
- * This will cause aborting any inflight rpcs handled
- * by thread if LIOD_STOP is specified.
- */
- LIOD_FORCE = 1 << 2,
- /**
- * This is a recovery ptlrpc thread.
- */
- LIOD_RECOVERY = 1 << 3,
-};
-
-/**
- * \addtogroup nrs
- * @{
- *
- * Service compatibility function; the policy is compatible with all services.
- *
- * \param[in] svc The service the policy is attempting to register with.
- * \param[in] desc The policy descriptor
- *
- * \retval true The policy is compatible with the service
- *
- * \see ptlrpc_nrs_pol_desc::pd_compat()
- */
-static inline bool nrs_policy_compat_all(const struct ptlrpc_service *svc,
- const struct ptlrpc_nrs_pol_desc *desc)
-{
- return true;
-}
-
-/**
- * Service compatibility function; the policy is compatible with only a specific
- * service which is identified by its human-readable name at
- * ptlrpc_service::srv_name.
- *
- * \param[in] svc The service the policy is attempting to register with.
- * \param[in] desc The policy descriptor
- *
- * \retval false The policy is not compatible with the service
- * \retval true The policy is compatible with the service
- *
- * \see ptlrpc_nrs_pol_desc::pd_compat()
- */
-static inline bool nrs_policy_compat_one(const struct ptlrpc_service *svc,
- const struct ptlrpc_nrs_pol_desc *desc)
-{
- LASSERT(desc->pd_compat_svc_name != NULL);
- return strcmp(svc->srv_name, desc->pd_compat_svc_name) == 0;
-}
-
-/** @} nrs */
-
-/* ptlrpc/events.c */
-extern lnet_handle_eq_t ptlrpc_eq_h;
-int ptlrpc_uuid_to_peer(struct obd_uuid *uuid,
- lnet_process_id_t *peer, lnet_nid_t *self);
-/**
- * These callbacks are invoked by LNet when something happened to
- * underlying buffer
- * @{
- */
-void request_out_callback(lnet_event_t *ev);
-void reply_in_callback(lnet_event_t *ev);
-void client_bulk_callback(lnet_event_t *ev);
-void request_in_callback(lnet_event_t *ev);
-void reply_out_callback(lnet_event_t *ev);
-/** @} */
-
-/* ptlrpc/connection.c */
-struct ptlrpc_connection *ptlrpc_connection_get(lnet_process_id_t peer,
- lnet_nid_t self,
- struct obd_uuid *uuid);
-int ptlrpc_connection_put(struct ptlrpc_connection *c);
-struct ptlrpc_connection *ptlrpc_connection_addref(struct ptlrpc_connection *);
-int ptlrpc_connection_init(void);
-void ptlrpc_connection_fini(void);
-lnet_pid_t ptl_get_pid(void);
-
-/* ptlrpc/niobuf.c */
-/**
- * Actual interfacing with LNet to put/get/register/unregister stuff
- * @{
- */
-
-int ptlrpc_register_bulk(struct ptlrpc_request *req);
-int ptlrpc_unregister_bulk(struct ptlrpc_request *req, int async);
-
-static inline int ptlrpc_client_bulk_active(struct ptlrpc_request *req)
-{
- struct ptlrpc_bulk_desc *desc;
- int rc;
-
- LASSERT(req != NULL);
- desc = req->rq_bulk;
-
- if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_BULK_UNLINK) &&
- req->rq_bulk_deadline > ktime_get_real_seconds())
- return 1;
-
- if (!desc)
- return 0;
-
- spin_lock(&desc->bd_lock);
- rc = desc->bd_md_count;
- spin_unlock(&desc->bd_lock);
- return rc;
-}
-
-#define PTLRPC_REPLY_MAYBE_DIFFICULT 0x01
-#define PTLRPC_REPLY_EARLY 0x02
-int ptlrpc_send_reply(struct ptlrpc_request *req, int flags);
-int ptlrpc_reply(struct ptlrpc_request *req);
-int ptlrpc_send_error(struct ptlrpc_request *req, int difficult);
-int ptlrpc_error(struct ptlrpc_request *req);
-void ptlrpc_resend_req(struct ptlrpc_request *request);
-int ptlrpc_at_get_net_latency(struct ptlrpc_request *req);
-int ptl_send_rpc(struct ptlrpc_request *request, int noreply);
-int ptlrpc_register_rqbd(struct ptlrpc_request_buffer_desc *rqbd);
-/** @} */
-
-/* ptlrpc/client.c */
-/**
- * Client-side portals API. Everything to send requests, receive replies,
- * request queues, request management, etc.
- * @{
- */
-void ptlrpc_request_committed(struct ptlrpc_request *req, int force);
-
-void ptlrpc_init_client(int req_portal, int rep_portal, char *name,
- struct ptlrpc_client *);
-struct ptlrpc_connection *ptlrpc_uuid_to_connection(struct obd_uuid *uuid);
-
-int ptlrpc_queue_wait(struct ptlrpc_request *req);
-int ptlrpc_replay_req(struct ptlrpc_request *req);
-int ptlrpc_unregister_reply(struct ptlrpc_request *req, int async);
-void ptlrpc_abort_inflight(struct obd_import *imp);
-void ptlrpc_abort_set(struct ptlrpc_request_set *set);
-
-struct ptlrpc_request_set *ptlrpc_prep_set(void);
-struct ptlrpc_request_set *ptlrpc_prep_fcset(int max, set_producer_func func,
- void *arg);
-int ptlrpc_set_next_timeout(struct ptlrpc_request_set *);
-int ptlrpc_check_set(const struct lu_env *env, struct ptlrpc_request_set *set);
-int ptlrpc_set_wait(struct ptlrpc_request_set *);
-int ptlrpc_expired_set(void *data);
-void ptlrpc_interrupted_set(void *data);
-void ptlrpc_mark_interrupted(struct ptlrpc_request *req);
-void ptlrpc_set_destroy(struct ptlrpc_request_set *);
-void ptlrpc_set_add_req(struct ptlrpc_request_set *, struct ptlrpc_request *);
-void ptlrpc_set_add_new_req(struct ptlrpcd_ctl *pc,
- struct ptlrpc_request *req);
-
-void ptlrpc_free_rq_pool(struct ptlrpc_request_pool *pool);
-int ptlrpc_add_rqs_to_pool(struct ptlrpc_request_pool *pool, int num_rq);
-
-struct ptlrpc_request_pool *
-ptlrpc_init_rq_pool(int, int,
- int (*populate_pool)(struct ptlrpc_request_pool *, int));
-
-void ptlrpc_at_set_req_timeout(struct ptlrpc_request *req);
-struct ptlrpc_request *ptlrpc_request_alloc(struct obd_import *imp,
- const struct req_format *format);
-struct ptlrpc_request *ptlrpc_request_alloc_pool(struct obd_import *imp,
- struct ptlrpc_request_pool *,
- const struct req_format *format);
-void ptlrpc_request_free(struct ptlrpc_request *request);
-int ptlrpc_request_pack(struct ptlrpc_request *request,
- __u32 version, int opcode);
-struct ptlrpc_request *ptlrpc_request_alloc_pack(struct obd_import *imp,
- const struct req_format *format,
- __u32 version, int opcode);
-int ptlrpc_request_bufs_pack(struct ptlrpc_request *request,
- __u32 version, int opcode, char **bufs,
- struct ptlrpc_cli_ctx *ctx);
-void ptlrpc_req_finished(struct ptlrpc_request *request);
-struct ptlrpc_request *ptlrpc_request_addref(struct ptlrpc_request *req);
-struct ptlrpc_bulk_desc *ptlrpc_prep_bulk_imp(struct ptlrpc_request *req,
- unsigned npages, unsigned max_brw,
- unsigned type, unsigned portal);
-void __ptlrpc_free_bulk(struct ptlrpc_bulk_desc *bulk, int pin);
-static inline void ptlrpc_free_bulk_pin(struct ptlrpc_bulk_desc *bulk)
-{
- __ptlrpc_free_bulk(bulk, 1);
-}
-static inline void ptlrpc_free_bulk_nopin(struct ptlrpc_bulk_desc *bulk)
-{
- __ptlrpc_free_bulk(bulk, 0);
-}
-void __ptlrpc_prep_bulk_page(struct ptlrpc_bulk_desc *desc,
- struct page *page, int pageoffset, int len, int);
-static inline void ptlrpc_prep_bulk_page_pin(struct ptlrpc_bulk_desc *desc,
- struct page *page, int pageoffset,
- int len)
-{
- __ptlrpc_prep_bulk_page(desc, page, pageoffset, len, 1);
-}
-
-static inline void ptlrpc_prep_bulk_page_nopin(struct ptlrpc_bulk_desc *desc,
- struct page *page, int pageoffset,
- int len)
-{
- __ptlrpc_prep_bulk_page(desc, page, pageoffset, len, 0);
-}
-
-void ptlrpc_retain_replayable_request(struct ptlrpc_request *req,
- struct obd_import *imp);
-__u64 ptlrpc_next_xid(void);
-__u64 ptlrpc_sample_next_xid(void);
-__u64 ptlrpc_req_xid(struct ptlrpc_request *request);
-
-/* Set of routines to run a function in ptlrpcd context */
-void *ptlrpcd_alloc_work(struct obd_import *imp,
- int (*cb)(const struct lu_env *, void *), void *data);
-void ptlrpcd_destroy_work(void *handler);
-int ptlrpcd_queue_work(void *handler);
-
-/** @} */
-struct ptlrpc_service_buf_conf {
- /* nbufs is buffers # to allocate when growing the pool */
- unsigned int bc_nbufs;
- /* buffer size to post */
- unsigned int bc_buf_size;
- /* portal to listed for requests on */
- unsigned int bc_req_portal;
- /* portal of where to send replies to */
- unsigned int bc_rep_portal;
- /* maximum request size to be accepted for this service */
- unsigned int bc_req_max_size;
- /* maximum reply size this service can ever send */
- unsigned int bc_rep_max_size;
-};
-
-struct ptlrpc_service_thr_conf {
- /* threadname should be 8 characters or less - 6 will be added on */
- char *tc_thr_name;
- /* threads increasing factor for each CPU */
- unsigned int tc_thr_factor;
- /* service threads # to start on each partition while initializing */
- unsigned int tc_nthrs_init;
- /*
- * low water of threads # upper-limit on each partition while running,
- * service availability may be impacted if threads number is lower
- * than this value. It can be ZERO if the service doesn't require
- * CPU affinity or there is only one partition.
- */
- unsigned int tc_nthrs_base;
- /* "soft" limit for total threads number */
- unsigned int tc_nthrs_max;
- /* user specified threads number, it will be validated due to
- * other members of this structure. */
- unsigned int tc_nthrs_user;
- /* set NUMA node affinity for service threads */
- unsigned int tc_cpu_affinity;
- /* Tags for lu_context associated with service thread */
- __u32 tc_ctx_tags;
-};
-
-struct ptlrpc_service_cpt_conf {
- struct cfs_cpt_table *cc_cptable;
- /* string pattern to describe CPTs for a service */
- char *cc_pattern;
-};
-
-struct ptlrpc_service_conf {
- /* service name */
- char *psc_name;
- /* soft watchdog timeout multiplifier to print stuck service traces */
- unsigned int psc_watchdog_factor;
- /* buffer information */
- struct ptlrpc_service_buf_conf psc_buf;
- /* thread information */
- struct ptlrpc_service_thr_conf psc_thr;
- /* CPU partition information */
- struct ptlrpc_service_cpt_conf psc_cpt;
- /* function table */
- struct ptlrpc_service_ops psc_ops;
-};
-
-/* ptlrpc/service.c */
-/**
- * Server-side services API. Register/unregister service, request state
- * management, service thread management
- *
- * @{
- */
-void ptlrpc_dispatch_difficult_reply(struct ptlrpc_reply_state *rs);
-void ptlrpc_schedule_difficult_reply(struct ptlrpc_reply_state *rs);
-struct ptlrpc_service *ptlrpc_register_service(
- struct ptlrpc_service_conf *conf,
- struct kset *parent,
- struct dentry *debugfs_entry);
-void ptlrpc_stop_all_threads(struct ptlrpc_service *svc);
-
-int ptlrpc_start_threads(struct ptlrpc_service *svc);
-int ptlrpc_unregister_service(struct ptlrpc_service *service);
-int liblustre_check_services(void *arg);
-void ptlrpc_server_drop_request(struct ptlrpc_request *req);
-
-int ptlrpc_hr_init(void);
-void ptlrpc_hr_fini(void);
-
-/** @} */
-
-/* ptlrpc/import.c */
-/**
- * Import API
- * @{
- */
-int ptlrpc_connect_import(struct obd_import *imp);
-int ptlrpc_init_import(struct obd_import *imp);
-int ptlrpc_disconnect_import(struct obd_import *imp, int noclose);
-int ptlrpc_import_recovery_state_machine(struct obd_import *imp);
-void deuuidify(char *uuid, const char *prefix, char **uuid_start,
- int *uuid_len);
-
-/* ptlrpc/pack_generic.c */
-int ptlrpc_reconnect_import(struct obd_import *imp);
-/** @} */
-
-/**
- * ptlrpc msg buffer and swab interface
- *
- * @{
- */
-int ptlrpc_buf_need_swab(struct ptlrpc_request *req, const int inout,
- int index);
-void ptlrpc_buf_set_swabbed(struct ptlrpc_request *req, const int inout,
- int index);
-int ptlrpc_unpack_rep_msg(struct ptlrpc_request *req, int len);
-int ptlrpc_unpack_req_msg(struct ptlrpc_request *req, int len);
-
-void lustre_init_msg_v2(struct lustre_msg_v2 *msg, int count, __u32 *lens,
- char **bufs);
-int lustre_pack_request(struct ptlrpc_request *, __u32 magic, int count,
- __u32 *lens, char **bufs);
-int lustre_pack_reply(struct ptlrpc_request *, int count, __u32 *lens,
- char **bufs);
-int lustre_pack_reply_v2(struct ptlrpc_request *req, int count,
- __u32 *lens, char **bufs, int flags);
-#define LPRFL_EARLY_REPLY 1
-int lustre_pack_reply_flags(struct ptlrpc_request *, int count, __u32 *lens,
- char **bufs, int flags);
-int lustre_shrink_msg(struct lustre_msg *msg, int segment,
- unsigned int newlen, int move_data);
-void lustre_free_reply_state(struct ptlrpc_reply_state *rs);
-int __lustre_unpack_msg(struct lustre_msg *m, int len);
-int lustre_msg_hdr_size(__u32 magic, int count);
-int lustre_msg_size(__u32 magic, int count, __u32 *lengths);
-int lustre_msg_size_v2(int count, __u32 *lengths);
-int lustre_packed_msg_size(struct lustre_msg *msg);
-int lustre_msg_early_size(void);
-void *lustre_msg_buf_v2(struct lustre_msg_v2 *m, int n, int min_size);
-void *lustre_msg_buf(struct lustre_msg *m, int n, int minlen);
-int lustre_msg_buflen(struct lustre_msg *m, int n);
-int lustre_msg_bufcount(struct lustre_msg *m);
-char *lustre_msg_string(struct lustre_msg *m, int n, int max_len);
-__u32 lustre_msghdr_get_flags(struct lustre_msg *msg);
-void lustre_msghdr_set_flags(struct lustre_msg *msg, __u32 flags);
-__u32 lustre_msg_get_flags(struct lustre_msg *msg);
-void lustre_msg_add_flags(struct lustre_msg *msg, int flags);
-void lustre_msg_set_flags(struct lustre_msg *msg, int flags);
-void lustre_msg_clear_flags(struct lustre_msg *msg, int flags);
-__u32 lustre_msg_get_op_flags(struct lustre_msg *msg);
-void lustre_msg_add_op_flags(struct lustre_msg *msg, int flags);
-struct lustre_handle *lustre_msg_get_handle(struct lustre_msg *msg);
-__u32 lustre_msg_get_type(struct lustre_msg *msg);
-void lustre_msg_add_version(struct lustre_msg *msg, int version);
-__u32 lustre_msg_get_opc(struct lustre_msg *msg);
-__u64 lustre_msg_get_last_committed(struct lustre_msg *msg);
-__u64 *lustre_msg_get_versions(struct lustre_msg *msg);
-__u64 lustre_msg_get_transno(struct lustre_msg *msg);
-__u64 lustre_msg_get_slv(struct lustre_msg *msg);
-__u32 lustre_msg_get_limit(struct lustre_msg *msg);
-void lustre_msg_set_slv(struct lustre_msg *msg, __u64 slv);
-void lustre_msg_set_limit(struct lustre_msg *msg, __u64 limit);
-int lustre_msg_get_status(struct lustre_msg *msg);
-__u32 lustre_msg_get_conn_cnt(struct lustre_msg *msg);
-__u32 lustre_msg_get_magic(struct lustre_msg *msg);
-__u32 lustre_msg_get_timeout(struct lustre_msg *msg);
-__u32 lustre_msg_get_service_time(struct lustre_msg *msg);
-__u32 lustre_msg_get_cksum(struct lustre_msg *msg);
-__u32 lustre_msg_calc_cksum(struct lustre_msg *msg);
-void lustre_msg_set_handle(struct lustre_msg *msg,
- struct lustre_handle *handle);
-void lustre_msg_set_type(struct lustre_msg *msg, __u32 type);
-void lustre_msg_set_opc(struct lustre_msg *msg, __u32 opc);
-void lustre_msg_set_versions(struct lustre_msg *msg, __u64 *versions);
-void lustre_msg_set_transno(struct lustre_msg *msg, __u64 transno);
-void lustre_msg_set_status(struct lustre_msg *msg, __u32 status);
-void lustre_msg_set_conn_cnt(struct lustre_msg *msg, __u32 conn_cnt);
-void ptlrpc_request_set_replen(struct ptlrpc_request *req);
-void lustre_msg_set_timeout(struct lustre_msg *msg, __u32 timeout);
-void lustre_msg_set_service_time(struct lustre_msg *msg, __u32 service_time);
-void lustre_msg_set_jobid(struct lustre_msg *msg, char *jobid);
-void lustre_msg_set_cksum(struct lustre_msg *msg, __u32 cksum);
-
-static inline void
-lustre_shrink_reply(struct ptlrpc_request *req, int segment,
- unsigned int newlen, int move_data)
-{
- LASSERT(req->rq_reply_state);
- LASSERT(req->rq_repmsg);
- req->rq_replen = lustre_shrink_msg(req->rq_repmsg, segment,
- newlen, move_data);
-}
-
-#ifdef CONFIG_LUSTRE_TRANSLATE_ERRNOS
-
-static inline int ptlrpc_status_hton(int h)
-{
- /*
- * Positive errnos must be network errnos, such as LUSTRE_EDEADLK,
- * ELDLM_LOCK_ABORTED, etc.
- */
- if (h < 0)
- return -lustre_errno_hton(-h);
- else
- return h;
-}
-
-static inline int ptlrpc_status_ntoh(int n)
-{
- /*
- * See the comment in ptlrpc_status_hton().
- */
- if (n < 0)
- return -lustre_errno_ntoh(-n);
- else
- return n;
-}
-
-#else
-
-#define ptlrpc_status_hton(h) (h)
-#define ptlrpc_status_ntoh(n) (n)
-
-#endif
-/** @} */
-
-/** Change request phase of \a req to \a new_phase */
-static inline void
-ptlrpc_rqphase_move(struct ptlrpc_request *req, enum rq_phase new_phase)
-{
- if (req->rq_phase == new_phase)
- return;
-
- if (new_phase == RQ_PHASE_UNREGISTERING) {
- req->rq_next_phase = req->rq_phase;
- if (req->rq_import)
- atomic_inc(&req->rq_import->imp_unregistering);
- }
-
- if (req->rq_phase == RQ_PHASE_UNREGISTERING) {
- if (req->rq_import)
- atomic_dec(&req->rq_import->imp_unregistering);
- }
-
- DEBUG_REQ(D_INFO, req, "move req \"%s\" -> \"%s\"",
- ptlrpc_rqphase2str(req), ptlrpc_phase2str(new_phase));
-
- req->rq_phase = new_phase;
-}
-
-/**
- * Returns true if request \a req got early reply and hard deadline is not met
- */
-static inline int
-ptlrpc_client_early(struct ptlrpc_request *req)
-{
- if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REPL_UNLINK) &&
- req->rq_reply_deadline > ktime_get_real_seconds())
- return 0;
- return req->rq_early;
-}
-
-/**
- * Returns true if we got real reply from server for this request
- */
-static inline int
-ptlrpc_client_replied(struct ptlrpc_request *req)
-{
- if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REPL_UNLINK) &&
- req->rq_reply_deadline > ktime_get_real_seconds())
- return 0;
- return req->rq_replied;
-}
-
-/** Returns true if request \a req is in process of receiving server reply */
-static inline int
-ptlrpc_client_recv(struct ptlrpc_request *req)
-{
- if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REPL_UNLINK) &&
- req->rq_reply_deadline > ktime_get_real_seconds())
- return 1;
- return req->rq_receiving_reply;
-}
-
-static inline int
-ptlrpc_client_recv_or_unlink(struct ptlrpc_request *req)
-{
- int rc;
-
- spin_lock(&req->rq_lock);
- if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REPL_UNLINK) &&
- req->rq_reply_deadline > ktime_get_real_seconds()) {
- spin_unlock(&req->rq_lock);
- return 1;
- }
- rc = req->rq_receiving_reply;
- rc = rc || req->rq_req_unlink || req->rq_reply_unlink;
- spin_unlock(&req->rq_lock);
- return rc;
-}
-
-static inline void
-ptlrpc_client_wake_req(struct ptlrpc_request *req)
-{
- if (req->rq_set == NULL)
- wake_up(&req->rq_reply_waitq);
- else
- wake_up(&req->rq_set->set_waitq);
-}
-
-static inline void
-ptlrpc_rs_addref(struct ptlrpc_reply_state *rs)
-{
- LASSERT(atomic_read(&rs->rs_refcount) > 0);
- atomic_inc(&rs->rs_refcount);
-}
-
-static inline void
-ptlrpc_rs_decref(struct ptlrpc_reply_state *rs)
-{
- LASSERT(atomic_read(&rs->rs_refcount) > 0);
- if (atomic_dec_and_test(&rs->rs_refcount))
- lustre_free_reply_state(rs);
-}
-
-/* Should only be called once per req */
-static inline void ptlrpc_req_drop_rs(struct ptlrpc_request *req)
-{
- if (req->rq_reply_state == NULL)
- return; /* shouldn't occur */
- ptlrpc_rs_decref(req->rq_reply_state);
- req->rq_reply_state = NULL;
- req->rq_repmsg = NULL;
-}
-
-static inline __u32 lustre_request_magic(struct ptlrpc_request *req)
-{
- return lustre_msg_get_magic(req->rq_reqmsg);
-}
-
-static inline int ptlrpc_req_get_repsize(struct ptlrpc_request *req)
-{
- switch (req->rq_reqmsg->lm_magic) {
- case LUSTRE_MSG_MAGIC_V2:
- return req->rq_reqmsg->lm_repsize;
- default:
- LASSERTF(0, "incorrect message magic: %08x\n",
- req->rq_reqmsg->lm_magic);
- return -EFAULT;
- }
-}
-
-static inline int ptlrpc_send_limit_expired(struct ptlrpc_request *req)
-{
- if (req->rq_delay_limit != 0 &&
- time_before(cfs_time_add(req->rq_queued_time,
- cfs_time_seconds(req->rq_delay_limit)),
- cfs_time_current())) {
- return 1;
- }
- return 0;
-}
-
-static inline int ptlrpc_no_resend(struct ptlrpc_request *req)
-{
- if (!req->rq_no_resend && ptlrpc_send_limit_expired(req)) {
- spin_lock(&req->rq_lock);
- req->rq_no_resend = 1;
- spin_unlock(&req->rq_lock);
- }
- return req->rq_no_resend;
-}
-
-static inline int
-ptlrpc_server_get_timeout(struct ptlrpc_service_part *svcpt)
-{
- int at = AT_OFF ? 0 : at_get(&svcpt->scp_at_estimate);
-
- return svcpt->scp_service->srv_watchdog_factor *
- max_t(int, at, obd_timeout);
-}
-
-static inline struct ptlrpc_service *
-ptlrpc_req2svc(struct ptlrpc_request *req)
-{
- LASSERT(req->rq_rqbd != NULL);
- return req->rq_rqbd->rqbd_svcpt->scp_service;
-}
-
-/* ldlm/ldlm_lib.c */
-/**
- * Target client logic
- * @{
- */
-int client_obd_setup(struct obd_device *obddev, struct lustre_cfg *lcfg);
-int client_obd_cleanup(struct obd_device *obddev);
-int client_connect_import(const struct lu_env *env,
- struct obd_export **exp, struct obd_device *obd,
- struct obd_uuid *cluuid, struct obd_connect_data *,
- void *localdata);
-int client_disconnect_export(struct obd_export *exp);
-int client_import_add_conn(struct obd_import *imp, struct obd_uuid *uuid,
- int priority);
-int client_import_del_conn(struct obd_import *imp, struct obd_uuid *uuid);
-int client_import_find_conn(struct obd_import *imp, lnet_nid_t peer,
- struct obd_uuid *uuid);
-int import_set_conn_priority(struct obd_import *imp, struct obd_uuid *uuid);
-void client_destroy_import(struct obd_import *imp);
-/** @} */
-
-
-/* ptlrpc/pinger.c */
-/**
- * Pinger API (client side only)
- * @{
- */
-enum timeout_event {
- TIMEOUT_GRANT = 1
-};
-struct timeout_item;
-typedef int (*timeout_cb_t)(struct timeout_item *, void *);
-int ptlrpc_pinger_add_import(struct obd_import *imp);
-int ptlrpc_pinger_del_import(struct obd_import *imp);
-int ptlrpc_add_timeout_client(int time, enum timeout_event event,
- timeout_cb_t cb, void *data,
- struct list_head *obd_list);
-int ptlrpc_del_timeout_client(struct list_head *obd_list,
- enum timeout_event event);
-struct ptlrpc_request *ptlrpc_prep_ping(struct obd_import *imp);
-int ptlrpc_obd_ping(struct obd_device *obd);
-void ptlrpc_pinger_ir_up(void);
-void ptlrpc_pinger_ir_down(void);
-/** @} */
-int ptlrpc_pinger_suppress_pings(void);
-
-/* ptlrpc/ptlrpcd.c */
-void ptlrpcd_stop(struct ptlrpcd_ctl *pc, int force);
-void ptlrpcd_free(struct ptlrpcd_ctl *pc);
-void ptlrpcd_wake(struct ptlrpc_request *req);
-void ptlrpcd_add_req(struct ptlrpc_request *req);
-int ptlrpcd_addref(void);
-void ptlrpcd_decref(void);
-
-/* ptlrpc/lproc_ptlrpc.c */
-/**
- * procfs output related functions
- * @{
- */
-const char *ll_opcode2str(__u32 opcode);
-void ptlrpc_lprocfs_register_obd(struct obd_device *obd);
-void ptlrpc_lprocfs_unregister_obd(struct obd_device *obd);
-void ptlrpc_lprocfs_brw(struct ptlrpc_request *req, int bytes);
-/** @} */
-
-/* ptlrpc/llog_client.c */
-extern struct llog_operations llog_client_ops;
-/** @} net */
-
-#endif
-/** @} PtlRPC */
diff --git a/drivers/staging/lustre/lustre/include/lustre_param.h b/drivers/staging/lustre/lustre/include/lustre_param.h
deleted file mode 100644
index de49b414e912..000000000000
--- a/drivers/staging/lustre/lustre/include/lustre_param.h
+++ /dev/null
@@ -1,115 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * lustre/include/lustre_param.h
- *
- * User-settable parameter keys
- *
- * Author: Nathan Rutman <nathan@clusterfs.com>
- */
-
-#ifndef _LUSTRE_PARAM_H
-#define _LUSTRE_PARAM_H
-
-/** \defgroup param param
- *
- * @{
- */
-
-/* For interoperability */
-struct cfg_interop_param {
- char *old_param;
- char *new_param;
-};
-
-/* obd_config.c */
-int class_find_param(char *buf, char *key, char **valp);
-int class_match_param(char *buf, char *key, char **valp);
-int class_parse_nid(char *buf, lnet_nid_t *nid, char **endh);
-int class_parse_nid_quiet(char *buf, lnet_nid_t *nid, char **endh);
-/* obd_mount.c */
-int do_lcfg(char *cfgname, lnet_nid_t nid, int cmd,
- char *s1, char *s2, char *s3, char *s4);
-
-
-
-/****************** User-settable parameter keys *********************/
-/* e.g.
- tunefs.lustre --param="failover.node=192.168.0.13@tcp0" /dev/sda
- lctl conf_param testfs-OST0000 failover.node=3@elan,192.168.0.3@tcp0
- ... testfs-MDT0000.lov.stripesize=4M
- ... testfs-OST0000.ost.client_cache_seconds=15
- ... testfs.sys.timeout=<secs>
- ... testfs.llite.max_read_ahead_mb=16
-*/
-
-/* System global or special params not handled in obd's proc
- * See mgs_write_log_sys()
- */
-#define PARAM_TIMEOUT "timeout=" /* global */
-#define PARAM_LDLM_TIMEOUT "ldlm_timeout=" /* global */
-#define PARAM_AT_MIN "at_min=" /* global */
-#define PARAM_AT_MAX "at_max=" /* global */
-#define PARAM_AT_EXTRA "at_extra=" /* global */
-#define PARAM_AT_EARLY_MARGIN "at_early_margin=" /* global */
-#define PARAM_AT_HISTORY "at_history=" /* global */
-#define PARAM_JOBID_VAR "jobid_var=" /* global */
-#define PARAM_MGSNODE "mgsnode=" /* only at mounttime */
-#define PARAM_FAILNODE "failover.node=" /* add failover nid */
-#define PARAM_FAILMODE "failover.mode=" /* initial mount only */
-#define PARAM_ACTIVE "active=" /* activate/deactivate */
-#define PARAM_NETWORK "network=" /* bind on nid */
-#define PARAM_ID_UPCALL "identity_upcall=" /* identity upcall */
-
-/* Prefixes for parameters handled by obd's proc methods (XXX_process_config) */
-#define PARAM_OST "ost."
-#define PARAM_OSC "osc."
-#define PARAM_MDT "mdt."
-#define PARAM_MDD "mdd."
-#define PARAM_MDC "mdc."
-#define PARAM_LLITE "llite."
-#define PARAM_LOV "lov."
-#define PARAM_LOD "lod."
-#define PARAM_OSP "osp."
-#define PARAM_SYS "sys." /* global */
-#define PARAM_SRPC "srpc."
-#define PARAM_SRPC_FLVR "srpc.flavor."
-#define PARAM_SRPC_UDESC "srpc.udesc.cli2mdt"
-#define PARAM_SEC "security."
-#define PARAM_QUOTA "quota." /* global */
-
-/** @} param */
-
-#endif /* _LUSTRE_PARAM_H */
diff --git a/drivers/staging/lustre/lustre/include/lustre_req_layout.h b/drivers/staging/lustre/lustre/include/lustre_req_layout.h
deleted file mode 100644
index cfcc995585c0..000000000000
--- a/drivers/staging/lustre/lustre/include/lustre_req_layout.h
+++ /dev/null
@@ -1,334 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * lustre/include/lustre_req_layout.h
- *
- * Lustre Metadata Target (mdt) request handler
- *
- * Author: Nikita Danilov <nikita@clusterfs.com>
- */
-
-#ifndef _LUSTRE_REQ_LAYOUT_H__
-#define _LUSTRE_REQ_LAYOUT_H__
-
-/** \defgroup req_layout req_layout
- *
- * @{
- */
-
-struct req_msg_field;
-struct req_format;
-struct req_capsule;
-
-struct ptlrpc_request;
-
-enum req_location {
- RCL_CLIENT,
- RCL_SERVER,
- RCL_NR
-};
-
-/* Maximal number of fields (buffers) in a request message. */
-#define REQ_MAX_FIELD_NR 9
-
-struct req_capsule {
- struct ptlrpc_request *rc_req;
- const struct req_format *rc_fmt;
- enum req_location rc_loc;
- __u32 rc_area[RCL_NR][REQ_MAX_FIELD_NR];
-};
-
-#if !defined(__REQ_LAYOUT_USER__)
-
-/* struct ptlrpc_request, lustre_msg* */
-#include "lustre_net.h"
-
-void req_capsule_init(struct req_capsule *pill, struct ptlrpc_request *req,
- enum req_location location);
-void req_capsule_fini(struct req_capsule *pill);
-
-void req_capsule_set(struct req_capsule *pill, const struct req_format *fmt);
-void req_capsule_init_area(struct req_capsule *pill);
-int req_capsule_filled_sizes(struct req_capsule *pill, enum req_location loc);
-int req_capsule_server_pack(struct req_capsule *pill);
-
-void *req_capsule_client_get(struct req_capsule *pill,
- const struct req_msg_field *field);
-void *req_capsule_client_swab_get(struct req_capsule *pill,
- const struct req_msg_field *field,
- void *swabber);
-void *req_capsule_client_sized_get(struct req_capsule *pill,
- const struct req_msg_field *field,
- int len);
-void *req_capsule_server_get(struct req_capsule *pill,
- const struct req_msg_field *field);
-void *req_capsule_server_sized_get(struct req_capsule *pill,
- const struct req_msg_field *field,
- int len);
-void *req_capsule_server_swab_get(struct req_capsule *pill,
- const struct req_msg_field *field,
- void *swabber);
-void *req_capsule_server_sized_swab_get(struct req_capsule *pill,
- const struct req_msg_field *field,
- int len, void *swabber);
-
-void req_capsule_set_size(struct req_capsule *pill,
- const struct req_msg_field *field,
- enum req_location loc, int size);
-int req_capsule_get_size(const struct req_capsule *pill,
- const struct req_msg_field *field,
- enum req_location loc);
-int req_capsule_msg_size(struct req_capsule *pill, enum req_location loc);
-int req_capsule_fmt_size(__u32 magic, const struct req_format *fmt,
- enum req_location loc);
-void req_capsule_extend(struct req_capsule *pill, const struct req_format *fmt);
-
-int req_capsule_has_field(const struct req_capsule *pill,
- const struct req_msg_field *field,
- enum req_location loc);
-int req_capsule_field_present(const struct req_capsule *pill,
- const struct req_msg_field *field,
- enum req_location loc);
-void req_capsule_shrink(struct req_capsule *pill,
- const struct req_msg_field *field,
- unsigned int newlen,
- enum req_location loc);
-int req_layout_init(void);
-void req_layout_fini(void);
-
-/* __REQ_LAYOUT_USER__ */
-#endif
-
-extern struct req_format RQF_OBD_PING;
-extern struct req_format RQF_OBD_SET_INFO;
-extern struct req_format RQF_SEC_CTX;
-extern struct req_format RQF_OBD_IDX_READ;
-/* MGS req_format */
-extern struct req_format RQF_MGS_TARGET_REG;
-extern struct req_format RQF_MGS_SET_INFO;
-extern struct req_format RQF_MGS_CONFIG_READ;
-/* fid/fld req_format */
-extern struct req_format RQF_SEQ_QUERY;
-extern struct req_format RQF_FLD_QUERY;
-/* MDS req_format */
-extern struct req_format RQF_MDS_CONNECT;
-extern struct req_format RQF_MDS_DISCONNECT;
-extern struct req_format RQF_MDS_STATFS;
-extern struct req_format RQF_MDS_GETSTATUS;
-extern struct req_format RQF_MDS_SYNC;
-extern struct req_format RQF_MDS_GETXATTR;
-extern struct req_format RQF_MDS_GETATTR;
-extern struct req_format RQF_UPDATE_OBJ;
-
-/*
- * This is format of direct (non-intent) MDS_GETATTR_NAME request.
- */
-extern struct req_format RQF_MDS_GETATTR_NAME;
-extern struct req_format RQF_MDS_CLOSE;
-extern struct req_format RQF_MDS_RELEASE_CLOSE;
-extern struct req_format RQF_MDS_PIN;
-extern struct req_format RQF_MDS_UNPIN;
-extern struct req_format RQF_MDS_CONNECT;
-extern struct req_format RQF_MDS_DISCONNECT;
-extern struct req_format RQF_MDS_GET_INFO;
-extern struct req_format RQF_MDS_READPAGE;
-extern struct req_format RQF_MDS_WRITEPAGE;
-extern struct req_format RQF_MDS_IS_SUBDIR;
-extern struct req_format RQF_MDS_DONE_WRITING;
-extern struct req_format RQF_MDS_REINT;
-extern struct req_format RQF_MDS_REINT_CREATE;
-extern struct req_format RQF_MDS_REINT_CREATE_RMT_ACL;
-extern struct req_format RQF_MDS_REINT_CREATE_SLAVE;
-extern struct req_format RQF_MDS_REINT_CREATE_SYM;
-extern struct req_format RQF_MDS_REINT_OPEN;
-extern struct req_format RQF_MDS_REINT_UNLINK;
-extern struct req_format RQF_MDS_REINT_LINK;
-extern struct req_format RQF_MDS_REINT_RENAME;
-extern struct req_format RQF_MDS_REINT_SETATTR;
-extern struct req_format RQF_MDS_REINT_SETXATTR;
-extern struct req_format RQF_MDS_QUOTACHECK;
-extern struct req_format RQF_MDS_QUOTACTL;
-extern struct req_format RQF_QC_CALLBACK;
-extern struct req_format RQF_QUOTA_DQACQ;
-extern struct req_format RQF_MDS_SWAP_LAYOUTS;
-/* MDS hsm formats */
-extern struct req_format RQF_MDS_HSM_STATE_GET;
-extern struct req_format RQF_MDS_HSM_STATE_SET;
-extern struct req_format RQF_MDS_HSM_ACTION;
-extern struct req_format RQF_MDS_HSM_PROGRESS;
-extern struct req_format RQF_MDS_HSM_CT_REGISTER;
-extern struct req_format RQF_MDS_HSM_CT_UNREGISTER;
-extern struct req_format RQF_MDS_HSM_REQUEST;
-/* OST req_format */
-extern struct req_format RQF_OST_CONNECT;
-extern struct req_format RQF_OST_DISCONNECT;
-extern struct req_format RQF_OST_QUOTACHECK;
-extern struct req_format RQF_OST_QUOTACTL;
-extern struct req_format RQF_OST_GETATTR;
-extern struct req_format RQF_OST_SETATTR;
-extern struct req_format RQF_OST_CREATE;
-extern struct req_format RQF_OST_PUNCH;
-extern struct req_format RQF_OST_SYNC;
-extern struct req_format RQF_OST_DESTROY;
-extern struct req_format RQF_OST_BRW_READ;
-extern struct req_format RQF_OST_BRW_WRITE;
-extern struct req_format RQF_OST_STATFS;
-extern struct req_format RQF_OST_SET_GRANT_INFO;
-extern struct req_format RQF_OST_GET_INFO_GENERIC;
-extern struct req_format RQF_OST_GET_INFO_LAST_ID;
-extern struct req_format RQF_OST_GET_INFO_LAST_FID;
-extern struct req_format RQF_OST_SET_INFO_LAST_FID;
-extern struct req_format RQF_OST_GET_INFO_FIEMAP;
-
-/* LDLM req_format */
-extern struct req_format RQF_LDLM_ENQUEUE;
-extern struct req_format RQF_LDLM_ENQUEUE_LVB;
-extern struct req_format RQF_LDLM_CONVERT;
-extern struct req_format RQF_LDLM_INTENT;
-extern struct req_format RQF_LDLM_INTENT_BASIC;
-extern struct req_format RQF_LDLM_INTENT_LAYOUT;
-extern struct req_format RQF_LDLM_INTENT_GETATTR;
-extern struct req_format RQF_LDLM_INTENT_OPEN;
-extern struct req_format RQF_LDLM_INTENT_CREATE;
-extern struct req_format RQF_LDLM_INTENT_UNLINK;
-extern struct req_format RQF_LDLM_INTENT_GETXATTR;
-extern struct req_format RQF_LDLM_INTENT_QUOTA;
-extern struct req_format RQF_LDLM_CANCEL;
-extern struct req_format RQF_LDLM_CALLBACK;
-extern struct req_format RQF_LDLM_CP_CALLBACK;
-extern struct req_format RQF_LDLM_BL_CALLBACK;
-extern struct req_format RQF_LDLM_GL_CALLBACK;
-extern struct req_format RQF_LDLM_GL_DESC_CALLBACK;
-/* LOG req_format */
-extern struct req_format RQF_LOG_CANCEL;
-extern struct req_format RQF_LLOG_ORIGIN_HANDLE_CREATE;
-extern struct req_format RQF_LLOG_ORIGIN_HANDLE_DESTROY;
-extern struct req_format RQF_LLOG_ORIGIN_HANDLE_NEXT_BLOCK;
-extern struct req_format RQF_LLOG_ORIGIN_HANDLE_PREV_BLOCK;
-extern struct req_format RQF_LLOG_ORIGIN_HANDLE_READ_HEADER;
-extern struct req_format RQF_LLOG_ORIGIN_CONNECT;
-
-extern struct req_format RQF_CONNECT;
-
-extern struct req_msg_field RMF_GENERIC_DATA;
-extern struct req_msg_field RMF_PTLRPC_BODY;
-extern struct req_msg_field RMF_MDT_BODY;
-extern struct req_msg_field RMF_MDT_EPOCH;
-extern struct req_msg_field RMF_OBD_STATFS;
-extern struct req_msg_field RMF_NAME;
-extern struct req_msg_field RMF_SYMTGT;
-extern struct req_msg_field RMF_TGTUUID;
-extern struct req_msg_field RMF_CLUUID;
-extern struct req_msg_field RMF_SETINFO_VAL;
-extern struct req_msg_field RMF_SETINFO_KEY;
-extern struct req_msg_field RMF_GETINFO_VAL;
-extern struct req_msg_field RMF_GETINFO_VALLEN;
-extern struct req_msg_field RMF_GETINFO_KEY;
-extern struct req_msg_field RMF_IDX_INFO;
-extern struct req_msg_field RMF_CLOSE_DATA;
-
-/*
- * connection handle received in MDS_CONNECT request.
- */
-extern struct req_msg_field RMF_CONN;
-extern struct req_msg_field RMF_CONNECT_DATA;
-extern struct req_msg_field RMF_DLM_REQ;
-extern struct req_msg_field RMF_DLM_REP;
-extern struct req_msg_field RMF_DLM_LVB;
-extern struct req_msg_field RMF_DLM_GL_DESC;
-extern struct req_msg_field RMF_LDLM_INTENT;
-extern struct req_msg_field RMF_LAYOUT_INTENT;
-extern struct req_msg_field RMF_MDT_MD;
-extern struct req_msg_field RMF_REC_REINT;
-extern struct req_msg_field RMF_EADATA;
-extern struct req_msg_field RMF_EAVALS;
-extern struct req_msg_field RMF_EAVALS_LENS;
-extern struct req_msg_field RMF_ACL;
-extern struct req_msg_field RMF_LOGCOOKIES;
-extern struct req_msg_field RMF_CAPA1;
-extern struct req_msg_field RMF_CAPA2;
-extern struct req_msg_field RMF_OBD_QUOTACHECK;
-extern struct req_msg_field RMF_OBD_QUOTACTL;
-extern struct req_msg_field RMF_QUOTA_BODY;
-extern struct req_msg_field RMF_STRING;
-extern struct req_msg_field RMF_SWAP_LAYOUTS;
-extern struct req_msg_field RMF_MDS_HSM_PROGRESS;
-extern struct req_msg_field RMF_MDS_HSM_REQUEST;
-extern struct req_msg_field RMF_MDS_HSM_USER_ITEM;
-extern struct req_msg_field RMF_MDS_HSM_ARCHIVE;
-extern struct req_msg_field RMF_HSM_USER_STATE;
-extern struct req_msg_field RMF_HSM_STATE_SET;
-extern struct req_msg_field RMF_MDS_HSM_CURRENT_ACTION;
-extern struct req_msg_field RMF_MDS_HSM_REQUEST;
-
-/* seq-mgr fields */
-extern struct req_msg_field RMF_SEQ_OPC;
-extern struct req_msg_field RMF_SEQ_RANGE;
-extern struct req_msg_field RMF_FID_SPACE;
-
-/* FLD fields */
-extern struct req_msg_field RMF_FLD_OPC;
-extern struct req_msg_field RMF_FLD_MDFLD;
-
-extern struct req_msg_field RMF_LLOGD_BODY;
-extern struct req_msg_field RMF_LLOG_LOG_HDR;
-extern struct req_msg_field RMF_LLOGD_CONN_BODY;
-
-extern struct req_msg_field RMF_MGS_TARGET_INFO;
-extern struct req_msg_field RMF_MGS_SEND_PARAM;
-
-extern struct req_msg_field RMF_OST_BODY;
-extern struct req_msg_field RMF_OBD_IOOBJ;
-extern struct req_msg_field RMF_OBD_ID;
-extern struct req_msg_field RMF_FID;
-extern struct req_msg_field RMF_NIOBUF_REMOTE;
-extern struct req_msg_field RMF_RCS;
-extern struct req_msg_field RMF_FIEMAP_KEY;
-extern struct req_msg_field RMF_FIEMAP_VAL;
-extern struct req_msg_field RMF_OST_ID;
-
-/* MGS config read message format */
-extern struct req_msg_field RMF_MGS_CONFIG_BODY;
-extern struct req_msg_field RMF_MGS_CONFIG_RES;
-
-/* generic uint32 */
-extern struct req_msg_field RMF_U32;
-
-/* OBJ update format */
-extern struct req_msg_field RMF_UPDATE;
-extern struct req_msg_field RMF_UPDATE_REPLY;
-/** @} req_layout */
-
-#endif /* _LUSTRE_REQ_LAYOUT_H__ */
diff --git a/drivers/staging/lustre/lustre/include/lustre_sec.h b/drivers/staging/lustre/lustre/include/lustre_sec.h
deleted file mode 100644
index 1d2c5724d1df..000000000000
--- a/drivers/staging/lustre/lustre/include/lustre_sec.h
+++ /dev/null
@@ -1,1119 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- */
-
-#ifndef _LUSTRE_SEC_H_
-#define _LUSTRE_SEC_H_
-
-/** \defgroup sptlrpc sptlrpc
- *
- * @{
- */
-
-/*
- * to avoid include
- */
-struct obd_import;
-struct obd_export;
-struct ptlrpc_request;
-struct ptlrpc_reply_state;
-struct ptlrpc_bulk_desc;
-struct brw_page;
-/* Linux specific */
-struct key;
-struct seq_file;
-
-/*
- * forward declaration
- */
-struct ptlrpc_sec_policy;
-struct ptlrpc_sec_cops;
-struct ptlrpc_sec_sops;
-struct ptlrpc_sec;
-struct ptlrpc_svc_ctx;
-struct ptlrpc_cli_ctx;
-struct ptlrpc_ctx_ops;
-
-/**
- * \addtogroup flavor flavor
- *
- * RPC flavor is represented by a 32 bits integer. Currently the high 12 bits
- * are unused, must be set to 0 for future expansion.
- * <pre>
- * ------------------------------------------------------------------------
- * | 4b (bulk svc) | 4b (bulk type) | 4b (svc) | 4b (mech) | 4b (policy) |
- * ------------------------------------------------------------------------
- * </pre>
- *
- * @{
- */
-
-/*
- * flavor constants
- */
-enum sptlrpc_policy {
- SPTLRPC_POLICY_NULL = 0,
- SPTLRPC_POLICY_PLAIN = 1,
- SPTLRPC_POLICY_GSS = 2,
- SPTLRPC_POLICY_MAX,
-};
-
-enum sptlrpc_mech_null {
- SPTLRPC_MECH_NULL = 0,
- SPTLRPC_MECH_NULL_MAX,
-};
-
-enum sptlrpc_mech_plain {
- SPTLRPC_MECH_PLAIN = 0,
- SPTLRPC_MECH_PLAIN_MAX,
-};
-
-enum sptlrpc_mech_gss {
- SPTLRPC_MECH_GSS_NULL = 0,
- SPTLRPC_MECH_GSS_KRB5 = 1,
- SPTLRPC_MECH_GSS_MAX,
-};
-
-enum sptlrpc_service_type {
- SPTLRPC_SVC_NULL = 0, /**< no security */
- SPTLRPC_SVC_AUTH = 1, /**< authentication only */
- SPTLRPC_SVC_INTG = 2, /**< integrity */
- SPTLRPC_SVC_PRIV = 3, /**< privacy */
- SPTLRPC_SVC_MAX,
-};
-
-enum sptlrpc_bulk_type {
- SPTLRPC_BULK_DEFAULT = 0, /**< follow rpc flavor */
- SPTLRPC_BULK_HASH = 1, /**< hash integrity */
- SPTLRPC_BULK_MAX,
-};
-
-enum sptlrpc_bulk_service {
- SPTLRPC_BULK_SVC_NULL = 0, /**< no security */
- SPTLRPC_BULK_SVC_AUTH = 1, /**< authentication only */
- SPTLRPC_BULK_SVC_INTG = 2, /**< integrity */
- SPTLRPC_BULK_SVC_PRIV = 3, /**< privacy */
- SPTLRPC_BULK_SVC_MAX,
-};
-
-/*
- * compose/extract macros
- */
-#define FLVR_POLICY_OFFSET (0)
-#define FLVR_MECH_OFFSET (4)
-#define FLVR_SVC_OFFSET (8)
-#define FLVR_BULK_TYPE_OFFSET (12)
-#define FLVR_BULK_SVC_OFFSET (16)
-
-#define MAKE_FLVR(policy, mech, svc, btype, bsvc) \
- (((__u32)(policy) << FLVR_POLICY_OFFSET) | \
- ((__u32)(mech) << FLVR_MECH_OFFSET) | \
- ((__u32)(svc) << FLVR_SVC_OFFSET) | \
- ((__u32)(btype) << FLVR_BULK_TYPE_OFFSET) | \
- ((__u32)(bsvc) << FLVR_BULK_SVC_OFFSET))
-
-/*
- * extraction
- */
-#define SPTLRPC_FLVR_POLICY(flavor) \
- ((((__u32)(flavor)) >> FLVR_POLICY_OFFSET) & 0xF)
-#define SPTLRPC_FLVR_MECH(flavor) \
- ((((__u32)(flavor)) >> FLVR_MECH_OFFSET) & 0xF)
-#define SPTLRPC_FLVR_SVC(flavor) \
- ((((__u32)(flavor)) >> FLVR_SVC_OFFSET) & 0xF)
-#define SPTLRPC_FLVR_BULK_TYPE(flavor) \
- ((((__u32)(flavor)) >> FLVR_BULK_TYPE_OFFSET) & 0xF)
-#define SPTLRPC_FLVR_BULK_SVC(flavor) \
- ((((__u32)(flavor)) >> FLVR_BULK_SVC_OFFSET) & 0xF)
-
-#define SPTLRPC_FLVR_BASE(flavor) \
- ((((__u32)(flavor)) >> FLVR_POLICY_OFFSET) & 0xFFF)
-#define SPTLRPC_FLVR_BASE_SUB(flavor) \
- ((((__u32)(flavor)) >> FLVR_MECH_OFFSET) & 0xFF)
-
-/*
- * gss subflavors
- */
-#define MAKE_BASE_SUBFLVR(mech, svc) \
- ((__u32)(mech) | \
- ((__u32)(svc) << (FLVR_SVC_OFFSET - FLVR_MECH_OFFSET)))
-
-#define SPTLRPC_SUBFLVR_KRB5N \
- MAKE_BASE_SUBFLVR(SPTLRPC_MECH_GSS_KRB5, SPTLRPC_SVC_NULL)
-#define SPTLRPC_SUBFLVR_KRB5A \
- MAKE_BASE_SUBFLVR(SPTLRPC_MECH_GSS_KRB5, SPTLRPC_SVC_AUTH)
-#define SPTLRPC_SUBFLVR_KRB5I \
- MAKE_BASE_SUBFLVR(SPTLRPC_MECH_GSS_KRB5, SPTLRPC_SVC_INTG)
-#define SPTLRPC_SUBFLVR_KRB5P \
- MAKE_BASE_SUBFLVR(SPTLRPC_MECH_GSS_KRB5, SPTLRPC_SVC_PRIV)
-
-/*
- * "end user" flavors
- */
-#define SPTLRPC_FLVR_NULL \
- MAKE_FLVR(SPTLRPC_POLICY_NULL, \
- SPTLRPC_MECH_NULL, \
- SPTLRPC_SVC_NULL, \
- SPTLRPC_BULK_DEFAULT, \
- SPTLRPC_BULK_SVC_NULL)
-#define SPTLRPC_FLVR_PLAIN \
- MAKE_FLVR(SPTLRPC_POLICY_PLAIN, \
- SPTLRPC_MECH_PLAIN, \
- SPTLRPC_SVC_NULL, \
- SPTLRPC_BULK_HASH, \
- SPTLRPC_BULK_SVC_INTG)
-#define SPTLRPC_FLVR_KRB5N \
- MAKE_FLVR(SPTLRPC_POLICY_GSS, \
- SPTLRPC_MECH_GSS_KRB5, \
- SPTLRPC_SVC_NULL, \
- SPTLRPC_BULK_DEFAULT, \
- SPTLRPC_BULK_SVC_NULL)
-#define SPTLRPC_FLVR_KRB5A \
- MAKE_FLVR(SPTLRPC_POLICY_GSS, \
- SPTLRPC_MECH_GSS_KRB5, \
- SPTLRPC_SVC_AUTH, \
- SPTLRPC_BULK_DEFAULT, \
- SPTLRPC_BULK_SVC_NULL)
-#define SPTLRPC_FLVR_KRB5I \
- MAKE_FLVR(SPTLRPC_POLICY_GSS, \
- SPTLRPC_MECH_GSS_KRB5, \
- SPTLRPC_SVC_INTG, \
- SPTLRPC_BULK_DEFAULT, \
- SPTLRPC_BULK_SVC_INTG)
-#define SPTLRPC_FLVR_KRB5P \
- MAKE_FLVR(SPTLRPC_POLICY_GSS, \
- SPTLRPC_MECH_GSS_KRB5, \
- SPTLRPC_SVC_PRIV, \
- SPTLRPC_BULK_DEFAULT, \
- SPTLRPC_BULK_SVC_PRIV)
-
-#define SPTLRPC_FLVR_DEFAULT SPTLRPC_FLVR_NULL
-
-#define SPTLRPC_FLVR_INVALID ((__u32) 0xFFFFFFFF)
-#define SPTLRPC_FLVR_ANY ((__u32) 0xFFF00000)
-
-/**
- * extract the useful part from wire flavor
- */
-#define WIRE_FLVR(wflvr) (((__u32) (wflvr)) & 0x000FFFFF)
-
-/** @} flavor */
-
-static inline void flvr_set_svc(__u32 *flvr, __u32 svc)
-{
- LASSERT(svc < SPTLRPC_SVC_MAX);
- *flvr = MAKE_FLVR(SPTLRPC_FLVR_POLICY(*flvr),
- SPTLRPC_FLVR_MECH(*flvr),
- svc,
- SPTLRPC_FLVR_BULK_TYPE(*flvr),
- SPTLRPC_FLVR_BULK_SVC(*flvr));
-}
-
-static inline void flvr_set_bulk_svc(__u32 *flvr, __u32 svc)
-{
- LASSERT(svc < SPTLRPC_BULK_SVC_MAX);
- *flvr = MAKE_FLVR(SPTLRPC_FLVR_POLICY(*flvr),
- SPTLRPC_FLVR_MECH(*flvr),
- SPTLRPC_FLVR_SVC(*flvr),
- SPTLRPC_FLVR_BULK_TYPE(*flvr),
- svc);
-}
-
-struct bulk_spec_hash {
- __u8 hash_alg;
-};
-
-/**
- * Full description of flavors being used on a ptlrpc connection, include
- * both regular RPC and bulk transfer parts.
- */
-struct sptlrpc_flavor {
- /**
- * wire flavor, should be renamed to sf_wire.
- */
- __u32 sf_rpc;
- /**
- * general flags of PTLRPC_SEC_FL_*
- */
- __u32 sf_flags;
- /**
- * rpc flavor specification
- */
- union {
- /* nothing for now */
- } u_rpc;
- /**
- * bulk flavor specification
- */
- union {
- struct bulk_spec_hash hash;
- } u_bulk;
-};
-
-/**
- * identify the RPC is generated from what part of Lustre. It's encoded into
- * RPC requests and to be checked by ptlrpc service.
- */
-enum lustre_sec_part {
- LUSTRE_SP_CLI = 0,
- LUSTRE_SP_MDT,
- LUSTRE_SP_OST,
- LUSTRE_SP_MGC,
- LUSTRE_SP_MGS,
- LUSTRE_SP_ANY = 0xFF
-};
-
-enum lustre_sec_part sptlrpc_target_sec_part(struct obd_device *obd);
-
-/**
- * A rule specifies a flavor to be used by a ptlrpc connection between
- * two Lustre parts.
- */
-struct sptlrpc_rule {
- __u32 sr_netid; /* LNET network ID */
- __u8 sr_from; /* sec_part */
- __u8 sr_to; /* sec_part */
- __u16 sr_padding;
- struct sptlrpc_flavor sr_flvr;
-};
-
-/**
- * A set of rules in memory.
- *
- * Rules are generated and stored on MGS, and propagated to MDT, OST,
- * and client when needed.
- */
-struct sptlrpc_rule_set {
- int srs_nslot;
- int srs_nrule;
- struct sptlrpc_rule *srs_rules;
-};
-
-int sptlrpc_parse_flavor(const char *str, struct sptlrpc_flavor *flvr);
-int sptlrpc_flavor_has_bulk(struct sptlrpc_flavor *flvr);
-
-static inline void sptlrpc_rule_set_init(struct sptlrpc_rule_set *set)
-{
- memset(set, 0, sizeof(*set));
-}
-
-void sptlrpc_rule_set_free(struct sptlrpc_rule_set *set);
-int sptlrpc_rule_set_expand(struct sptlrpc_rule_set *set);
-int sptlrpc_rule_set_merge(struct sptlrpc_rule_set *set,
- struct sptlrpc_rule *rule);
-int sptlrpc_rule_set_choose(struct sptlrpc_rule_set *rset,
- enum lustre_sec_part from,
- enum lustre_sec_part to,
- lnet_nid_t nid,
- struct sptlrpc_flavor *sf);
-
-int sptlrpc_process_config(struct lustre_cfg *lcfg);
-void sptlrpc_conf_log_start(const char *logname);
-void sptlrpc_conf_log_stop(const char *logname);
-void sptlrpc_conf_log_update_begin(const char *logname);
-void sptlrpc_conf_log_update_end(const char *logname);
-void sptlrpc_conf_client_adapt(struct obd_device *obd);
-
-/* The maximum length of security payload. 1024 is enough for Kerberos 5,
- * and should be enough for other future mechanisms but not sure.
- * Only used by pre-allocated request/reply pool.
- */
-#define SPTLRPC_MAX_PAYLOAD (1024)
-
-
-struct vfs_cred {
- uint32_t vc_uid;
- uint32_t vc_gid;
-};
-
-struct ptlrpc_ctx_ops {
- /**
- * To determine whether it's suitable to use the \a ctx for \a vcred.
- */
- int (*match) (struct ptlrpc_cli_ctx *ctx,
- struct vfs_cred *vcred);
-
- /**
- * To bring the \a ctx uptodate.
- */
- int (*refresh) (struct ptlrpc_cli_ctx *ctx);
-
- /**
- * Validate the \a ctx.
- */
- int (*validate) (struct ptlrpc_cli_ctx *ctx);
-
- /**
- * Force the \a ctx to die.
- */
- void (*force_die) (struct ptlrpc_cli_ctx *ctx,
- int grace);
- int (*display) (struct ptlrpc_cli_ctx *ctx,
- char *buf, int bufsize);
-
- /**
- * Sign the request message using \a ctx.
- *
- * \pre req->rq_reqmsg point to request message.
- * \pre req->rq_reqlen is the request message length.
- * \post req->rq_reqbuf point to request message with signature.
- * \post req->rq_reqdata_len is set to the final request message size.
- *
- * \see null_ctx_sign(), plain_ctx_sign(), gss_cli_ctx_sign().
- */
- int (*sign) (struct ptlrpc_cli_ctx *ctx,
- struct ptlrpc_request *req);
-
- /**
- * Verify the reply message using \a ctx.
- *
- * \pre req->rq_repdata point to reply message with signature.
- * \pre req->rq_repdata_len is the total reply message length.
- * \post req->rq_repmsg point to reply message without signature.
- * \post req->rq_replen is the reply message length.
- *
- * \see null_ctx_verify(), plain_ctx_verify(), gss_cli_ctx_verify().
- */
- int (*verify) (struct ptlrpc_cli_ctx *ctx,
- struct ptlrpc_request *req);
-
- /**
- * Encrypt the request message using \a ctx.
- *
- * \pre req->rq_reqmsg point to request message in clear text.
- * \pre req->rq_reqlen is the request message length.
- * \post req->rq_reqbuf point to request message.
- * \post req->rq_reqdata_len is set to the final request message size.
- *
- * \see gss_cli_ctx_seal().
- */
- int (*seal) (struct ptlrpc_cli_ctx *ctx,
- struct ptlrpc_request *req);
-
- /**
- * Decrypt the reply message using \a ctx.
- *
- * \pre req->rq_repdata point to encrypted reply message.
- * \pre req->rq_repdata_len is the total cipher text length.
- * \post req->rq_repmsg point to reply message in clear text.
- * \post req->rq_replen is the reply message length in clear text.
- *
- * \see gss_cli_ctx_unseal().
- */
- int (*unseal) (struct ptlrpc_cli_ctx *ctx,
- struct ptlrpc_request *req);
-
- /**
- * Wrap bulk request data. This is called before wrapping RPC
- * request message.
- *
- * \pre bulk buffer is descripted by desc->bd_iov and
- * desc->bd_iov_count. note for read it's just buffer, no data
- * need to be sent; for write it contains data in clear text.
- * \post when necessary, ptlrpc_bulk_sec_desc was properly prepared
- * (usually inside of RPC request message).
- * - encryption: cipher text bulk buffer is descripted by
- * desc->bd_enc_iov and desc->bd_iov_count (currently assume iov
- * count remains the same).
- * - otherwise: bulk buffer is still desc->bd_iov and
- * desc->bd_iov_count.
- *
- * \return 0: success.
- * \return -ev: error code.
- *
- * \see plain_cli_wrap_bulk(), gss_cli_ctx_wrap_bulk().
- */
- int (*wrap_bulk) (struct ptlrpc_cli_ctx *ctx,
- struct ptlrpc_request *req,
- struct ptlrpc_bulk_desc *desc);
-
- /**
- * Unwrap bulk reply data. This is called after wrapping RPC
- * reply message.
- *
- * \pre bulk buffer is descripted by desc->bd_iov/desc->bd_enc_iov and
- * desc->bd_iov_count, according to wrap_bulk().
- * \post final bulk data in clear text is placed in buffer described
- * by desc->bd_iov and desc->bd_iov_count.
- * \return +ve nob of actual bulk data in clear text.
- * \return -ve error code.
- *
- * \see plain_cli_unwrap_bulk(), gss_cli_ctx_unwrap_bulk().
- */
- int (*unwrap_bulk) (struct ptlrpc_cli_ctx *ctx,
- struct ptlrpc_request *req,
- struct ptlrpc_bulk_desc *desc);
-};
-
-#define PTLRPC_CTX_NEW_BIT (0) /* newly created */
-#define PTLRPC_CTX_UPTODATE_BIT (1) /* uptodate */
-#define PTLRPC_CTX_DEAD_BIT (2) /* mark expired gracefully */
-#define PTLRPC_CTX_ERROR_BIT (3) /* fatal error (refresh, etc.) */
-#define PTLRPC_CTX_CACHED_BIT (8) /* in ctx cache (hash etc.) */
-#define PTLRPC_CTX_ETERNAL_BIT (9) /* always valid */
-
-#define PTLRPC_CTX_NEW (1 << PTLRPC_CTX_NEW_BIT)
-#define PTLRPC_CTX_UPTODATE (1 << PTLRPC_CTX_UPTODATE_BIT)
-#define PTLRPC_CTX_DEAD (1 << PTLRPC_CTX_DEAD_BIT)
-#define PTLRPC_CTX_ERROR (1 << PTLRPC_CTX_ERROR_BIT)
-#define PTLRPC_CTX_CACHED (1 << PTLRPC_CTX_CACHED_BIT)
-#define PTLRPC_CTX_ETERNAL (1 << PTLRPC_CTX_ETERNAL_BIT)
-
-#define PTLRPC_CTX_STATUS_MASK (PTLRPC_CTX_NEW_BIT | \
- PTLRPC_CTX_UPTODATE | \
- PTLRPC_CTX_DEAD | \
- PTLRPC_CTX_ERROR)
-
-struct ptlrpc_cli_ctx {
- struct hlist_node cc_cache; /* linked into ctx cache */
- atomic_t cc_refcount;
- struct ptlrpc_sec *cc_sec;
- struct ptlrpc_ctx_ops *cc_ops;
- unsigned long cc_expire; /* in seconds */
- unsigned int cc_early_expire:1;
- unsigned long cc_flags;
- struct vfs_cred cc_vcred;
- spinlock_t cc_lock;
- struct list_head cc_req_list; /* waiting reqs linked here */
- struct list_head cc_gc_chain; /* linked to gc chain */
-};
-
-/**
- * client side policy operation vector.
- */
-struct ptlrpc_sec_cops {
- /**
- * Given an \a imp, create and initialize a ptlrpc_sec structure.
- * \param ctx service context:
- * - regular import: \a ctx should be NULL;
- * - reverse import: \a ctx is obtained from incoming request.
- * \param flavor specify what flavor to use.
- *
- * When necessary, policy module is responsible for taking reference
- * on the import.
- *
- * \see null_create_sec(), plain_create_sec(), gss_sec_create_kr().
- */
- struct ptlrpc_sec * (*create_sec) (struct obd_import *imp,
- struct ptlrpc_svc_ctx *ctx,
- struct sptlrpc_flavor *flavor);
-
- /**
- * Destructor of ptlrpc_sec. When called, refcount has been dropped
- * to 0 and all contexts has been destroyed.
- *
- * \see null_destroy_sec(), plain_destroy_sec(), gss_sec_destroy_kr().
- */
- void (*destroy_sec) (struct ptlrpc_sec *sec);
-
- /**
- * Notify that this ptlrpc_sec is going to die. Optionally, policy
- * module is supposed to set sec->ps_dying and whatever necessary
- * actions.
- *
- * \see plain_kill_sec(), gss_sec_kill().
- */
- void (*kill_sec) (struct ptlrpc_sec *sec);
-
- /**
- * Given \a vcred, lookup and/or create its context. The policy module
- * is supposed to maintain its own context cache.
- * XXX currently \a create and \a remove_dead is always 1, perhaps
- * should be removed completely.
- *
- * \see null_lookup_ctx(), plain_lookup_ctx(), gss_sec_lookup_ctx_kr().
- */
- struct ptlrpc_cli_ctx * (*lookup_ctx) (struct ptlrpc_sec *sec,
- struct vfs_cred *vcred,
- int create,
- int remove_dead);
-
- /**
- * Called then the reference of \a ctx dropped to 0. The policy module
- * is supposed to destroy this context or whatever else according to
- * its cache maintenance mechanism.
- *
- * \param sync if zero, we shouldn't wait for the context being
- * destroyed completely.
- *
- * \see plain_release_ctx(), gss_sec_release_ctx_kr().
- */
- void (*release_ctx) (struct ptlrpc_sec *sec,
- struct ptlrpc_cli_ctx *ctx,
- int sync);
-
- /**
- * Flush the context cache.
- *
- * \param uid context of which user, -1 means all contexts.
- * \param grace if zero, the PTLRPC_CTX_UPTODATE_BIT of affected
- * contexts should be cleared immediately.
- * \param force if zero, only idle contexts will be flushed.
- *
- * \see plain_flush_ctx_cache(), gss_sec_flush_ctx_cache_kr().
- */
- int (*flush_ctx_cache)
- (struct ptlrpc_sec *sec,
- uid_t uid,
- int grace,
- int force);
-
- /**
- * Called periodically by garbage collector to remove dead contexts
- * from cache.
- *
- * \see gss_sec_gc_ctx_kr().
- */
- void (*gc_ctx) (struct ptlrpc_sec *sec);
-
- /**
- * Given an context \a ctx, install a corresponding reverse service
- * context on client side.
- * XXX currently it's only used by GSS module, maybe we should remove
- * this from general API.
- */
- int (*install_rctx)(struct obd_import *imp,
- struct ptlrpc_sec *sec,
- struct ptlrpc_cli_ctx *ctx);
-
- /**
- * To allocate request buffer for \a req.
- *
- * \pre req->rq_reqmsg == NULL.
- * \pre req->rq_reqbuf == NULL, otherwise it must be pre-allocated,
- * we are not supposed to free it.
- * \post if success, req->rq_reqmsg point to a buffer with size
- * at least \a lustre_msg_size.
- *
- * \see null_alloc_reqbuf(), plain_alloc_reqbuf(), gss_alloc_reqbuf().
- */
- int (*alloc_reqbuf)(struct ptlrpc_sec *sec,
- struct ptlrpc_request *req,
- int lustre_msg_size);
-
- /**
- * To free request buffer for \a req.
- *
- * \pre req->rq_reqbuf != NULL.
- *
- * \see null_free_reqbuf(), plain_free_reqbuf(), gss_free_reqbuf().
- */
- void (*free_reqbuf) (struct ptlrpc_sec *sec,
- struct ptlrpc_request *req);
-
- /**
- * To allocate reply buffer for \a req.
- *
- * \pre req->rq_repbuf == NULL.
- * \post if success, req->rq_repbuf point to a buffer with size
- * req->rq_repbuf_len, the size should be large enough to receive
- * reply which be transformed from \a lustre_msg_size of clear text.
- *
- * \see null_alloc_repbuf(), plain_alloc_repbuf(), gss_alloc_repbuf().
- */
- int (*alloc_repbuf)(struct ptlrpc_sec *sec,
- struct ptlrpc_request *req,
- int lustre_msg_size);
-
- /**
- * To free reply buffer for \a req.
- *
- * \pre req->rq_repbuf != NULL.
- * \post req->rq_repbuf == NULL.
- * \post req->rq_repbuf_len == 0.
- *
- * \see null_free_repbuf(), plain_free_repbuf(), gss_free_repbuf().
- */
- void (*free_repbuf) (struct ptlrpc_sec *sec,
- struct ptlrpc_request *req);
-
- /**
- * To expand the request buffer of \a req, thus the \a segment in
- * the request message pointed by req->rq_reqmsg can accommodate
- * at least \a newsize of data.
- *
- * \pre req->rq_reqmsg->lm_buflens[segment] < newsize.
- *
- * \see null_enlarge_reqbuf(), plain_enlarge_reqbuf(),
- * gss_enlarge_reqbuf().
- */
- int (*enlarge_reqbuf)
- (struct ptlrpc_sec *sec,
- struct ptlrpc_request *req,
- int segment, int newsize);
- /*
- * misc
- */
- int (*display) (struct ptlrpc_sec *sec,
- struct seq_file *seq);
-};
-
-/**
- * server side policy operation vector.
- */
-struct ptlrpc_sec_sops {
- /**
- * verify an incoming request.
- *
- * \pre request message is pointed by req->rq_reqbuf, size is
- * req->rq_reqdata_len; and the message has been unpacked to
- * host byte order.
- *
- * \retval SECSVC_OK success, req->rq_reqmsg point to request message
- * in clear text, size is req->rq_reqlen; req->rq_svc_ctx is set;
- * req->rq_sp_from is decoded from request.
- * \retval SECSVC_COMPLETE success, the request has been fully
- * processed, and reply message has been prepared; req->rq_sp_from is
- * decoded from request.
- * \retval SECSVC_DROP failed, this request should be dropped.
- *
- * \see null_accept(), plain_accept(), gss_svc_accept_kr().
- */
- int (*accept) (struct ptlrpc_request *req);
-
- /**
- * Perform security transformation upon reply message.
- *
- * \pre reply message is pointed by req->rq_reply_state->rs_msg, size
- * is req->rq_replen.
- * \post req->rs_repdata_len is the final message size.
- * \post req->rq_reply_off is set.
- *
- * \see null_authorize(), plain_authorize(), gss_svc_authorize().
- */
- int (*authorize) (struct ptlrpc_request *req);
-
- /**
- * Invalidate server context \a ctx.
- *
- * \see gss_svc_invalidate_ctx().
- */
- void (*invalidate_ctx)
- (struct ptlrpc_svc_ctx *ctx);
-
- /**
- * Allocate a ptlrpc_reply_state.
- *
- * \param msgsize size of the reply message in clear text.
- * \pre if req->rq_reply_state != NULL, then it's pre-allocated, we
- * should simply use it; otherwise we'll responsible for allocating
- * a new one.
- * \post req->rq_reply_state != NULL;
- * \post req->rq_reply_state->rs_msg != NULL;
- *
- * \see null_alloc_rs(), plain_alloc_rs(), gss_svc_alloc_rs().
- */
- int (*alloc_rs) (struct ptlrpc_request *req,
- int msgsize);
-
- /**
- * Free a ptlrpc_reply_state.
- */
- void (*free_rs) (struct ptlrpc_reply_state *rs);
-
- /**
- * Release the server context \a ctx.
- *
- * \see gss_svc_free_ctx().
- */
- void (*free_ctx) (struct ptlrpc_svc_ctx *ctx);
-
- /**
- * Install a reverse context based on the server context \a ctx.
- *
- * \see gss_svc_install_rctx_kr().
- */
- int (*install_rctx)(struct obd_import *imp,
- struct ptlrpc_svc_ctx *ctx);
-
- /**
- * Prepare buffer for incoming bulk write.
- *
- * \pre desc->bd_iov and desc->bd_iov_count describes the buffer
- * intended to receive the write.
- *
- * \see gss_svc_prep_bulk().
- */
- int (*prep_bulk) (struct ptlrpc_request *req,
- struct ptlrpc_bulk_desc *desc);
-
- /**
- * Unwrap the bulk write data.
- *
- * \see plain_svc_unwrap_bulk(), gss_svc_unwrap_bulk().
- */
- int (*unwrap_bulk) (struct ptlrpc_request *req,
- struct ptlrpc_bulk_desc *desc);
-
- /**
- * Wrap the bulk read data.
- *
- * \see plain_svc_wrap_bulk(), gss_svc_wrap_bulk().
- */
- int (*wrap_bulk) (struct ptlrpc_request *req,
- struct ptlrpc_bulk_desc *desc);
-};
-
-struct ptlrpc_sec_policy {
- struct module *sp_owner;
- char *sp_name;
- __u16 sp_policy; /* policy number */
- struct ptlrpc_sec_cops *sp_cops; /* client ops */
- struct ptlrpc_sec_sops *sp_sops; /* server ops */
-};
-
-#define PTLRPC_SEC_FL_REVERSE 0x0001 /* reverse sec */
-#define PTLRPC_SEC_FL_ROOTONLY 0x0002 /* treat everyone as root */
-#define PTLRPC_SEC_FL_UDESC 0x0004 /* ship udesc */
-#define PTLRPC_SEC_FL_BULK 0x0008 /* intensive bulk i/o expected */
-#define PTLRPC_SEC_FL_PAG 0x0010 /* PAG mode */
-
-/**
- * The ptlrpc_sec represents the client side ptlrpc security facilities,
- * each obd_import (both regular and reverse import) must associate with
- * a ptlrpc_sec.
- *
- * \see sptlrpc_import_sec_adapt().
- */
-struct ptlrpc_sec {
- struct ptlrpc_sec_policy *ps_policy;
- atomic_t ps_refcount;
- /** statistic only */
- atomic_t ps_nctx;
- /** unique identifier */
- int ps_id;
- struct sptlrpc_flavor ps_flvr;
- enum lustre_sec_part ps_part;
- /** after set, no more new context will be created */
- unsigned int ps_dying:1;
- /** owning import */
- struct obd_import *ps_import;
- spinlock_t ps_lock;
-
- /*
- * garbage collection
- */
- struct list_head ps_gc_list;
- unsigned long ps_gc_interval; /* in seconds */
- time64_t ps_gc_next; /* in seconds */
-};
-
-static inline int sec_is_reverse(struct ptlrpc_sec *sec)
-{
- return (sec->ps_flvr.sf_flags & PTLRPC_SEC_FL_REVERSE);
-}
-
-static inline int sec_is_rootonly(struct ptlrpc_sec *sec)
-{
- return (sec->ps_flvr.sf_flags & PTLRPC_SEC_FL_ROOTONLY);
-}
-
-
-struct ptlrpc_svc_ctx {
- atomic_t sc_refcount;
- struct ptlrpc_sec_policy *sc_policy;
-};
-
-/*
- * user identity descriptor
- */
-#define LUSTRE_MAX_GROUPS (128)
-
-struct ptlrpc_user_desc {
- __u32 pud_uid;
- __u32 pud_gid;
- __u32 pud_fsuid;
- __u32 pud_fsgid;
- __u32 pud_cap;
- __u32 pud_ngroups;
- __u32 pud_groups[0];
-};
-
-/*
- * bulk flavors
- */
-enum sptlrpc_bulk_hash_alg {
- BULK_HASH_ALG_NULL = 0,
- BULK_HASH_ALG_ADLER32,
- BULK_HASH_ALG_CRC32,
- BULK_HASH_ALG_MD5,
- BULK_HASH_ALG_SHA1,
- BULK_HASH_ALG_SHA256,
- BULK_HASH_ALG_SHA384,
- BULK_HASH_ALG_SHA512,
- BULK_HASH_ALG_MAX
-};
-
-const char *sptlrpc_get_hash_name(__u8 hash_alg);
-__u8 sptlrpc_get_hash_alg(const char *algname);
-
-enum {
- BSD_FL_ERR = 1,
-};
-
-struct ptlrpc_bulk_sec_desc {
- __u8 bsd_version; /* 0 */
- __u8 bsd_type; /* SPTLRPC_BULK_XXX */
- __u8 bsd_svc; /* SPTLRPC_BULK_SVC_XXXX */
- __u8 bsd_flags; /* flags */
- __u32 bsd_nob; /* nob of bulk data */
- __u8 bsd_data[0]; /* policy-specific token */
-};
-
-
-/*
- * round size up to next power of 2, for slab allocation.
- * @size must be sane (can't overflow after round up)
- */
-static inline int size_roundup_power2(int size)
-{
- size--;
- size |= size >> 1;
- size |= size >> 2;
- size |= size >> 4;
- size |= size >> 8;
- size |= size >> 16;
- size++;
- return size;
-}
-
-/*
- * internal support libraries
- */
-void _sptlrpc_enlarge_msg_inplace(struct lustre_msg *msg,
- int segment, int newsize);
-
-/*
- * security policies
- */
-int sptlrpc_register_policy(struct ptlrpc_sec_policy *policy);
-int sptlrpc_unregister_policy(struct ptlrpc_sec_policy *policy);
-
-__u32 sptlrpc_name2flavor_base(const char *name);
-const char *sptlrpc_flavor2name_base(__u32 flvr);
-char *sptlrpc_flavor2name_bulk(struct sptlrpc_flavor *sf,
- char *buf, int bufsize);
-char *sptlrpc_flavor2name(struct sptlrpc_flavor *sf, char *buf, int bufsize);
-char *sptlrpc_secflags2str(__u32 flags, char *buf, int bufsize);
-
-static inline
-struct ptlrpc_sec_policy *sptlrpc_policy_get(struct ptlrpc_sec_policy *policy)
-{
- __module_get(policy->sp_owner);
- return policy;
-}
-
-static inline
-void sptlrpc_policy_put(struct ptlrpc_sec_policy *policy)
-{
- module_put(policy->sp_owner);
-}
-
-/*
- * client credential
- */
-static inline
-unsigned long cli_ctx_status(struct ptlrpc_cli_ctx *ctx)
-{
- return (ctx->cc_flags & PTLRPC_CTX_STATUS_MASK);
-}
-
-static inline
-int cli_ctx_is_ready(struct ptlrpc_cli_ctx *ctx)
-{
- return (cli_ctx_status(ctx) == PTLRPC_CTX_UPTODATE);
-}
-
-static inline
-int cli_ctx_is_refreshed(struct ptlrpc_cli_ctx *ctx)
-{
- return (cli_ctx_status(ctx) != 0);
-}
-
-static inline
-int cli_ctx_is_uptodate(struct ptlrpc_cli_ctx *ctx)
-{
- return ((ctx->cc_flags & PTLRPC_CTX_UPTODATE) != 0);
-}
-
-static inline
-int cli_ctx_is_error(struct ptlrpc_cli_ctx *ctx)
-{
- return ((ctx->cc_flags & PTLRPC_CTX_ERROR) != 0);
-}
-
-static inline
-int cli_ctx_is_dead(struct ptlrpc_cli_ctx *ctx)
-{
- return ((ctx->cc_flags & (PTLRPC_CTX_DEAD | PTLRPC_CTX_ERROR)) != 0);
-}
-
-static inline
-int cli_ctx_is_eternal(struct ptlrpc_cli_ctx *ctx)
-{
- return ((ctx->cc_flags & PTLRPC_CTX_ETERNAL) != 0);
-}
-
-/*
- * sec get/put
- */
-struct ptlrpc_sec *sptlrpc_sec_get(struct ptlrpc_sec *sec);
-void sptlrpc_sec_put(struct ptlrpc_sec *sec);
-
-/*
- * internal apis which only used by policy implementation
- */
-int sptlrpc_get_next_secid(void);
-
-/*
- * exported client context api
- */
-struct ptlrpc_cli_ctx *sptlrpc_cli_ctx_get(struct ptlrpc_cli_ctx *ctx);
-void sptlrpc_cli_ctx_put(struct ptlrpc_cli_ctx *ctx, int sync);
-
-/*
- * exported client context wrap/buffers
- */
-int sptlrpc_cli_wrap_request(struct ptlrpc_request *req);
-int sptlrpc_cli_unwrap_reply(struct ptlrpc_request *req);
-int sptlrpc_cli_alloc_reqbuf(struct ptlrpc_request *req, int msgsize);
-void sptlrpc_cli_free_reqbuf(struct ptlrpc_request *req);
-int sptlrpc_cli_alloc_repbuf(struct ptlrpc_request *req, int msgsize);
-void sptlrpc_cli_free_repbuf(struct ptlrpc_request *req);
-int sptlrpc_cli_enlarge_reqbuf(struct ptlrpc_request *req,
- int segment, int newsize);
-int sptlrpc_cli_unwrap_early_reply(struct ptlrpc_request *req,
- struct ptlrpc_request **req_ret);
-void sptlrpc_cli_finish_early_reply(struct ptlrpc_request *early_req);
-
-void sptlrpc_request_out_callback(struct ptlrpc_request *req);
-
-/*
- * exported higher interface of import & request
- */
-int sptlrpc_import_sec_adapt(struct obd_import *imp,
- struct ptlrpc_svc_ctx *ctx,
- struct sptlrpc_flavor *flvr);
-struct ptlrpc_sec *sptlrpc_import_sec_ref(struct obd_import *imp);
-void sptlrpc_import_sec_put(struct obd_import *imp);
-
-int sptlrpc_import_check_ctx(struct obd_import *imp);
-void sptlrpc_import_flush_root_ctx(struct obd_import *imp);
-void sptlrpc_import_flush_my_ctx(struct obd_import *imp);
-void sptlrpc_import_flush_all_ctx(struct obd_import *imp);
-int sptlrpc_req_get_ctx(struct ptlrpc_request *req);
-void sptlrpc_req_put_ctx(struct ptlrpc_request *req, int sync);
-int sptlrpc_req_refresh_ctx(struct ptlrpc_request *req, long timeout);
-int sptlrpc_req_replace_dead_ctx(struct ptlrpc_request *req);
-void sptlrpc_req_set_flavor(struct ptlrpc_request *req, int opcode);
-
-int sptlrpc_parse_rule(char *param, struct sptlrpc_rule *rule);
-
-/* gc */
-void sptlrpc_gc_add_sec(struct ptlrpc_sec *sec);
-void sptlrpc_gc_del_sec(struct ptlrpc_sec *sec);
-
-/* misc */
-const char *sec2target_str(struct ptlrpc_sec *sec);
-/*
- * lprocfs
- */
-int sptlrpc_lprocfs_cliobd_attach(struct obd_device *dev);
-
-/*
- * server side
- */
-enum secsvc_accept_res {
- SECSVC_OK = 0,
- SECSVC_COMPLETE,
- SECSVC_DROP,
-};
-
-int sptlrpc_svc_unwrap_request(struct ptlrpc_request *req);
-int sptlrpc_svc_alloc_rs(struct ptlrpc_request *req, int msglen);
-int sptlrpc_svc_wrap_reply(struct ptlrpc_request *req);
-void sptlrpc_svc_free_rs(struct ptlrpc_reply_state *rs);
-void sptlrpc_svc_ctx_addref(struct ptlrpc_request *req);
-void sptlrpc_svc_ctx_decref(struct ptlrpc_request *req);
-
-int sptlrpc_target_export_check(struct obd_export *exp,
- struct ptlrpc_request *req);
-/*
- * reverse context
- */
-int sptlrpc_svc_install_rvs_ctx(struct obd_import *imp,
- struct ptlrpc_svc_ctx *ctx);
-
-/* bulk security api */
-void sptlrpc_enc_pool_put_pages(struct ptlrpc_bulk_desc *desc);
-
-int sptlrpc_cli_wrap_bulk(struct ptlrpc_request *req,
- struct ptlrpc_bulk_desc *desc);
-int sptlrpc_cli_unwrap_bulk_read(struct ptlrpc_request *req,
- struct ptlrpc_bulk_desc *desc,
- int nob);
-int sptlrpc_cli_unwrap_bulk_write(struct ptlrpc_request *req,
- struct ptlrpc_bulk_desc *desc);
-
-/* bulk helpers (internal use only by policies) */
-int sptlrpc_get_bulk_checksum(struct ptlrpc_bulk_desc *desc, __u8 alg,
- void *buf, int buflen);
-
-int bulk_sec_desc_unpack(struct lustre_msg *msg, int offset, int swabbed);
-
-/* user descriptor helpers */
-static inline int sptlrpc_user_desc_size(int ngroups)
-{
- return sizeof(struct ptlrpc_user_desc) + ngroups * sizeof(__u32);
-}
-
-int sptlrpc_current_user_desc_size(void);
-int sptlrpc_pack_user_desc(struct lustre_msg *msg, int offset);
-int sptlrpc_unpack_user_desc(struct lustre_msg *req, int offset, int swabbed);
-
-
-#define CFS_CAP_CHOWN_MASK (1 << CFS_CAP_CHOWN)
-#define CFS_CAP_SYS_RESOURCE_MASK (1 << CFS_CAP_SYS_RESOURCE)
-
-enum {
- LUSTRE_SEC_NONE = 0,
- LUSTRE_SEC_REMOTE = 1,
- LUSTRE_SEC_SPECIFY = 2,
- LUSTRE_SEC_ALL = 3
-};
-
-/** @} sptlrpc */
-
-#endif /* _LUSTRE_SEC_H_ */
diff --git a/drivers/staging/lustre/lustre/include/lustre_ver.h b/drivers/staging/lustre/lustre/include/lustre_ver.h
deleted file mode 100644
index caa4da12f37a..000000000000
--- a/drivers/staging/lustre/lustre/include/lustre_ver.h
+++ /dev/null
@@ -1,26 +0,0 @@
-#ifndef _LUSTRE_VER_H_
-#define _LUSTRE_VER_H_
-/* This file automatically generated from lustre/include/lustre_ver.h.in,
- * based on parameters in lustre/autoconf/lustre-version.ac.
- * Changes made directly to this file will be lost. */
-
-#define LUSTRE_MAJOR 2
-#define LUSTRE_MINOR 3
-#define LUSTRE_PATCH 64
-#define LUSTRE_FIX 0
-#define LUSTRE_VERSION_STRING "2.3.64"
-
-#define LUSTRE_VERSION_CODE OBD_OCD_VERSION(LUSTRE_MAJOR, \
- LUSTRE_MINOR, LUSTRE_PATCH, \
- LUSTRE_FIX)
-
-/* liblustre clients are only allowed to connect if their LUSTRE_FIX mismatches
- * by this amount (set in lustre/autoconf/lustre-version.ac). */
-#define LUSTRE_VERSION_ALLOWED_OFFSET OBD_OCD_VERSION(0, 0, 1, 32)
-
-/* If lustre version of client and servers it connects to differs by more
- * than this amount, client would issue a warning.
- * (set in lustre/autoconf/lustre-version.ac) */
-#define LUSTRE_VERSION_OFFSET_WARN OBD_OCD_VERSION(0, 4, 0, 0)
-
-#endif
diff --git a/drivers/staging/lustre/lustre/include/obd.h b/drivers/staging/lustre/lustre/include/obd.h
deleted file mode 100644
index e8317b8e1faa..000000000000
--- a/drivers/staging/lustre/lustre/include/obd.h
+++ /dev/null
@@ -1,1371 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- */
-
-#ifndef __OBD_H
-#define __OBD_H
-
-#include "linux/obd.h"
-
-#define IOC_OSC_TYPE 'h'
-#define IOC_OSC_MIN_NR 20
-#define IOC_OSC_SET_ACTIVE _IOWR(IOC_OSC_TYPE, 21, struct obd_device *)
-#define IOC_OSC_MAX_NR 50
-
-#define IOC_MDC_TYPE 'i'
-#define IOC_MDC_MIN_NR 20
-#define IOC_MDC_MAX_NR 50
-
-#include "lustre/lustre_idl.h"
-#include "lustre_lib.h"
-#include "lu_ref.h"
-#include "lustre_export.h"
-#include "lustre_fid.h"
-#include "lustre_fld.h"
-
-#define MAX_OBD_DEVICES 8192
-
-struct osc_async_rc {
- int ar_rc;
- int ar_force_sync;
- __u64 ar_min_xid;
-};
-
-struct lov_oinfo { /* per-stripe data structure */
- struct ost_id loi_oi; /* object ID/Sequence on the target OST */
- int loi_ost_idx; /* OST stripe index in lov_tgt_desc->tgts */
- int loi_ost_gen; /* generation of this loi_ost_idx */
-
- unsigned long loi_kms_valid:1;
- __u64 loi_kms; /* known minimum size */
- struct ost_lvb loi_lvb;
- struct osc_async_rc loi_ar;
-};
-
-static inline void loi_kms_set(struct lov_oinfo *oinfo, __u64 kms)
-{
- oinfo->loi_kms = kms;
- oinfo->loi_kms_valid = 1;
-}
-
-static inline void loi_init(struct lov_oinfo *loi)
-{
-}
-
-struct lov_stripe_md {
- atomic_t lsm_refc;
- spinlock_t lsm_lock;
- pid_t lsm_lock_owner; /* debugging */
-
- /* maximum possible file size, might change as OSTs status changes,
- * e.g. disconnected, deactivated */
- __u64 lsm_maxbytes;
- struct {
- /* Public members. */
- struct ost_id lw_object_oi; /* lov object id/seq */
-
- /* LOV-private members start here -- only for use in lov/. */
- __u32 lw_magic;
- __u32 lw_stripe_size; /* size of the stripe */
- __u32 lw_pattern; /* striping pattern (RAID0, RAID1) */
- __u16 lw_stripe_count; /* number of objects being striped over */
- __u16 lw_layout_gen; /* generation of the layout */
- char lw_pool_name[LOV_MAXPOOLNAME]; /* pool name */
- } lsm_wire;
-
- struct lov_oinfo *lsm_oinfo[0];
-};
-
-#define lsm_oi lsm_wire.lw_object_oi
-#define lsm_magic lsm_wire.lw_magic
-#define lsm_layout_gen lsm_wire.lw_layout_gen
-#define lsm_stripe_size lsm_wire.lw_stripe_size
-#define lsm_pattern lsm_wire.lw_pattern
-#define lsm_stripe_count lsm_wire.lw_stripe_count
-#define lsm_pool_name lsm_wire.lw_pool_name
-
-static inline bool lsm_is_released(struct lov_stripe_md *lsm)
-{
- return !!(lsm->lsm_pattern & LOV_PATTERN_F_RELEASED);
-}
-
-static inline bool lsm_has_objects(struct lov_stripe_md *lsm)
-{
- if (lsm == NULL)
- return false;
- if (lsm_is_released(lsm))
- return false;
- return true;
-}
-
-static inline int lov_stripe_md_size(unsigned int stripe_count)
-{
- struct lov_stripe_md lsm;
-
- return sizeof(lsm) + stripe_count * sizeof(lsm.lsm_oinfo[0]);
-}
-
-struct obd_info;
-
-typedef int (*obd_enqueue_update_f)(void *cookie, int rc);
-
-/* obd info for a particular level (lov, osc). */
-struct obd_info {
- /* Flags used for set request specific flags:
- - while lock handling, the flags obtained on the enqueue
- request are set here.
- - while stats, the flags used for control delay/resend.
- - while setattr, the flags used for distinguish punch operation
- */
- __u64 oi_flags;
- /* lsm data specific for every OSC. */
- struct lov_stripe_md *oi_md;
- /* obdo data specific for every OSC, if needed at all. */
- struct obdo *oi_oa;
- /* statfs data specific for every OSC, if needed at all. */
- struct obd_statfs *oi_osfs;
- /* An update callback which is called to update some data on upper
- * level. E.g. it is used for update lsm->lsm_oinfo at every received
- * request in osc level for enqueue requests. It is also possible to
- * update some caller data from LOV layer if needed. */
- obd_enqueue_update_f oi_cb_up;
-};
-
-void lov_stripe_lock(struct lov_stripe_md *md);
-void lov_stripe_unlock(struct lov_stripe_md *md);
-
-struct obd_type {
- struct list_head typ_chain;
- struct obd_ops *typ_dt_ops;
- struct md_ops *typ_md_ops;
- struct dentry *typ_debugfs_entry;
- char *typ_name;
- int typ_refcnt;
- struct lu_device_type *typ_lu;
- spinlock_t obd_type_lock;
- struct kobject *typ_kobj;
-};
-
-struct brw_page {
- u64 off;
- struct page *pg;
- int count;
- u32 flag;
-};
-
-/* llog contexts */
-enum llog_ctxt_id {
- LLOG_CONFIG_ORIG_CTXT = 0,
- LLOG_CONFIG_REPL_CTXT,
- LLOG_MDS_OST_ORIG_CTXT,
- LLOG_MDS_OST_REPL_CTXT,
- LLOG_SIZE_ORIG_CTXT,
- LLOG_SIZE_REPL_CTXT,
- LLOG_RD1_ORIG_CTXT,
- LLOG_RD1_REPL_CTXT,
- LLOG_TEST_ORIG_CTXT,
- LLOG_TEST_REPL_CTXT,
- LLOG_LOVEA_ORIG_CTXT,
- LLOG_LOVEA_REPL_CTXT,
- LLOG_CHANGELOG_ORIG_CTXT, /**< changelog generation on mdd */
- LLOG_CHANGELOG_REPL_CTXT, /**< changelog access on clients */
- LLOG_CHANGELOG_USER_ORIG_CTXT, /**< for multiple changelog consumers */
- LLOG_AGENT_ORIG_CTXT, /**< agent requests generation on cdt */
- LLOG_MAX_CTXTS
-};
-
-struct timeout_item {
- enum timeout_event ti_event;
- unsigned long ti_timeout;
- timeout_cb_t ti_cb;
- void *ti_cb_data;
- struct list_head ti_obd_list;
- struct list_head ti_chain;
-};
-
-#define OSC_MAX_RIF_DEFAULT 8
-#define MDS_OSC_MAX_RIF_DEFAULT 50
-#define OSC_MAX_RIF_MAX 256
-#define OSC_MAX_DIRTY_DEFAULT (OSC_MAX_RIF_DEFAULT * 4)
-#define OSC_MAX_DIRTY_MB_MAX 2048 /* arbitrary, but < MAX_LONG bytes */
-#define OSC_DEFAULT_RESENDS 10
-
-/* possible values for fo_sync_lock_cancel */
-enum {
- NEVER_SYNC_ON_CANCEL = 0,
- BLOCKING_SYNC_ON_CANCEL = 1,
- ALWAYS_SYNC_ON_CANCEL = 2,
- NUM_SYNC_ON_CANCEL_STATES
-};
-
-#define MDC_MAX_RIF_DEFAULT 8
-#define MDC_MAX_RIF_MAX 512
-
-struct mdc_rpc_lock;
-struct obd_import;
-struct client_obd {
- struct rw_semaphore cl_sem;
- struct obd_uuid cl_target_uuid;
- struct obd_import *cl_import; /* ptlrpc connection state */
- int cl_conn_count;
- /* max_mds_easize is purely a performance thing so we don't have to
- * call obd_size_diskmd() all the time. */
- int cl_default_mds_easize;
- int cl_max_mds_easize;
- int cl_default_mds_cookiesize;
- int cl_max_mds_cookiesize;
-
- enum lustre_sec_part cl_sp_me;
- enum lustre_sec_part cl_sp_to;
- struct sptlrpc_flavor cl_flvr_mgc; /* fixed flavor of mgc->mgs */
-
- /* the grant values are protected by loi_list_lock below */
- long cl_dirty; /* all _dirty_ in bytes */
- long cl_dirty_max; /* allowed w/o rpc */
- long cl_dirty_transit; /* dirty synchronous */
- long cl_avail_grant; /* bytes of credit for ost */
- long cl_lost_grant; /* lost credits (trunc) */
-
- /* since we allocate grant by blocks, we don't know how many grant will
- * be used to add a page into cache. As a solution, we reserve maximum
- * grant before trying to dirty a page and unreserve the rest.
- * See osc_{reserve|unreserve}_grant for details. */
- long cl_reserved_grant;
- struct list_head cl_cache_waiters; /* waiting for cache/grant */
- unsigned long cl_next_shrink_grant; /* jiffies */
- struct list_head cl_grant_shrink_list; /* Timeout event list */
- int cl_grant_shrink_interval; /* seconds */
-
- /* A chunk is an optimal size used by osc_extent to determine
- * the extent size. A chunk is max(PAGE_CACHE_SIZE, OST block size) */
- int cl_chunkbits;
- int cl_chunk;
- int cl_extent_tax; /* extent overhead, by bytes */
-
- /* keep track of objects that have lois that contain pages which
- * have been queued for async brw. this lock also protects the
- * lists of osc_client_pages that hang off of the loi */
- /*
- * ->cl_loi_list_lock protects consistency of
- * ->cl_loi_{ready,read,write}_list. ->ap_make_ready() and
- * ->ap_completion() call-backs are executed under this lock. As we
- * cannot guarantee that these call-backs never block on all platforms
- * (as a matter of fact they do block on Mac OS X), type of
- * ->cl_loi_list_lock is platform dependent: it's a spin-lock on Linux
- * and blocking mutex on Mac OS X. (Alternative is to make this lock
- * blocking everywhere, but we don't want to slow down fast-path of
- * our main platform.)
- *
- * Exact type of ->cl_loi_list_lock is defined in arch/obd.h together
- * with client_obd_list_{un,}lock() and
- * client_obd_list_lock_{init,done}() functions.
- *
- * NB by Jinshan: though field names are still _loi_, but actually
- * osc_object{}s are in the list.
- */
- client_obd_lock_t cl_loi_list_lock;
- struct list_head cl_loi_ready_list;
- struct list_head cl_loi_hp_ready_list;
- struct list_head cl_loi_write_list;
- struct list_head cl_loi_read_list;
- int cl_r_in_flight;
- int cl_w_in_flight;
- /* just a sum of the loi/lop pending numbers to be exported by /proc */
- atomic_t cl_pending_w_pages;
- atomic_t cl_pending_r_pages;
- __u32 cl_max_pages_per_rpc;
- int cl_max_rpcs_in_flight;
- struct obd_histogram cl_read_rpc_hist;
- struct obd_histogram cl_write_rpc_hist;
- struct obd_histogram cl_read_page_hist;
- struct obd_histogram cl_write_page_hist;
- struct obd_histogram cl_read_offset_hist;
- struct obd_histogram cl_write_offset_hist;
-
- /* lru for osc caching pages */
- struct cl_client_cache *cl_cache;
- struct list_head cl_lru_osc; /* member of cl_cache->ccc_lru */
- atomic_t *cl_lru_left;
- atomic_t cl_lru_busy;
- atomic_t cl_lru_shrinkers;
- atomic_t cl_lru_in_list;
- struct list_head cl_lru_list; /* lru page list */
- client_obd_lock_t cl_lru_list_lock; /* page list protector */
-
- /* number of in flight destroy rpcs is limited to max_rpcs_in_flight */
- atomic_t cl_destroy_in_flight;
- wait_queue_head_t cl_destroy_waitq;
-
- struct mdc_rpc_lock *cl_rpc_lock;
- struct mdc_rpc_lock *cl_close_lock;
-
- /* mgc datastruct */
- atomic_t cl_mgc_refcount;
- struct obd_export *cl_mgc_mgsexp;
-
- /* checksumming for data sent over the network */
- unsigned int cl_checksum:1; /* 0 = disabled, 1 = enabled */
- /* supported checksum types that are worked out at connect time */
- __u32 cl_supp_cksum_types;
- /* checksum algorithm to be used */
- cksum_type_t cl_cksum_type;
-
- /* also protected by the poorly named _loi_list_lock lock above */
- struct osc_async_rc cl_ar;
-
- /* used by quotacheck when the servers are older than 2.4 */
- int cl_qchk_stat; /* quotacheck stat of the peer */
-#define CL_NOT_QUOTACHECKED 1 /* client->cl_qchk_stat init value */
-#if LUSTRE_VERSION_CODE >= OBD_OCD_VERSION(2, 7, 50, 0)
-#warning "please consider removing quotacheck compatibility code"
-#endif
-
- /* sequence manager */
- struct lu_client_seq *cl_seq;
-
- atomic_t cl_resends; /* resend count */
-
- /* ptlrpc work for writeback in ptlrpcd context */
- void *cl_writeback_work;
- /* hash tables for osc_quota_info */
- struct cfs_hash *cl_quota_hash[MAXQUOTAS];
-};
-#define obd2cli_tgt(obd) ((char *)(obd)->u.cli.cl_target_uuid.uuid)
-
-struct obd_id_info {
- __u32 idx;
- u64 *data;
-};
-
-struct echo_client_obd {
- struct obd_export *ec_exp; /* the local connection to osc/lov */
- spinlock_t ec_lock;
- struct list_head ec_objects;
- struct list_head ec_locks;
- int ec_nstripes;
- __u64 ec_unique;
-};
-
-/* Generic subset of OSTs */
-struct ost_pool {
- __u32 *op_array; /* array of index of
- lov_obd->lov_tgts */
- unsigned int op_count; /* number of OSTs in the array */
- unsigned int op_size; /* allocated size of lp_array */
- struct rw_semaphore op_rw_sem; /* to protect ost_pool use */
-};
-
-/* Round-robin allocator data */
-struct lov_qos_rr {
- __u32 lqr_start_idx; /* start index of new inode */
- __u32 lqr_offset_idx; /* aliasing for start_idx */
- int lqr_start_count; /* reseed counter */
- struct ost_pool lqr_pool; /* round-robin optimized list */
- unsigned long lqr_dirty:1; /* recalc round-robin list */
-};
-
-/* allow statfs data caching for 1 second */
-#define OBD_STATFS_CACHE_SECONDS 1
-
-struct lov_statfs_data {
- struct obd_info lsd_oi;
- struct obd_statfs lsd_statfs;
-};
-/* Stripe placement optimization */
-struct lov_qos {
- struct list_head lq_oss_list; /* list of OSSs that targets use */
- struct rw_semaphore lq_rw_sem;
- __u32 lq_active_oss_count;
- unsigned int lq_prio_free; /* priority for free space */
- unsigned int lq_threshold_rr;/* priority for rr */
- struct lov_qos_rr lq_rr; /* round robin qos data */
- unsigned long lq_dirty:1, /* recalc qos data */
- lq_same_space:1,/* the ost's all have approx.
- the same space avail */
- lq_reset:1, /* zero current penalties */
- lq_statfs_in_progress:1; /* statfs op in
- progress */
- /* qos statfs data */
- struct lov_statfs_data *lq_statfs_data;
- wait_queue_head_t lq_statfs_waitq; /* waitqueue to notify statfs
- * requests completion */
-};
-
-struct lov_tgt_desc {
- struct list_head ltd_kill;
- struct obd_uuid ltd_uuid;
- struct obd_device *ltd_obd;
- struct obd_export *ltd_exp;
- __u32 ltd_gen;
- __u32 ltd_index; /* index in lov_obd->tgts */
- unsigned long ltd_active:1,/* is this target up for requests */
- ltd_activate:1,/* should target be activated */
- ltd_reap:1; /* should this target be deleted */
-};
-
-/* Pool metadata */
-#define pool_tgt_size(_p) _p->pool_obds.op_size
-#define pool_tgt_count(_p) _p->pool_obds.op_count
-#define pool_tgt_array(_p) _p->pool_obds.op_array
-#define pool_tgt_rw_sem(_p) _p->pool_obds.op_rw_sem
-
-struct pool_desc {
- char pool_name[LOV_MAXPOOLNAME + 1]; /* name of pool */
- struct ost_pool pool_obds; /* pool members */
- atomic_t pool_refcount; /* pool ref. counter */
- struct lov_qos_rr pool_rr; /* round robin qos */
- struct hlist_node pool_hash; /* access by poolname */
- struct list_head pool_list; /* serial access */
- struct dentry *pool_debugfs_entry; /* file in /proc */
- struct obd_device *pool_lobd; /* obd of the lov/lod to which
- * this pool belongs */
-};
-
-struct lov_obd {
- struct lov_desc desc;
- struct lov_tgt_desc **lov_tgts; /* sparse array */
- struct ost_pool lov_packed; /* all OSTs in a packed
- array */
- struct mutex lov_lock;
- struct obd_connect_data lov_ocd;
- atomic_t lov_refcount;
- __u32 lov_tgt_count; /* how many OBD's */
- __u32 lov_active_tgt_count; /* how many active */
- __u32 lov_death_row;/* tgts scheduled to be deleted */
- __u32 lov_tgt_size; /* size of tgts array */
- int lov_connects;
- int lov_pool_count;
- struct cfs_hash *lov_pools_hash_body; /* used for key access */
- struct list_head lov_pool_list; /* used for sequential access */
- struct dentry *lov_pool_debugfs_entry;
- enum lustre_sec_part lov_sp_me;
-
- /* Cached LRU pages from upper layer */
- void *lov_cache;
-
- struct rw_semaphore lov_notify_lock;
-
- struct kobject *lov_tgts_kobj;
-};
-
-struct lmv_tgt_desc {
- struct obd_uuid ltd_uuid;
- struct obd_export *ltd_exp;
- int ltd_idx;
- struct mutex ltd_fid_mutex;
- unsigned long ltd_active:1; /* target up for requests */
-};
-
-enum placement_policy {
- PLACEMENT_CHAR_POLICY = 0,
- PLACEMENT_NID_POLICY = 1,
- PLACEMENT_INVAL_POLICY = 2,
- PLACEMENT_MAX_POLICY
-};
-
-struct lmv_obd {
- int refcount;
- struct lu_client_fld lmv_fld;
- spinlock_t lmv_lock;
- enum placement_policy lmv_placement;
- struct lmv_desc desc;
- struct obd_uuid cluuid;
- struct obd_export *exp;
-
- struct mutex init_mutex;
- int connected;
- int max_easize;
- int max_def_easize;
- int max_cookiesize;
- int max_def_cookiesize;
- int server_timeout;
-
- int tgts_size; /* size of tgts array */
- struct lmv_tgt_desc **tgts;
-
- struct obd_connect_data conn_data;
- struct kobject *lmv_tgts_kobj;
-};
-
-struct niobuf_local {
- __u64 lnb_file_offset;
- __u32 lnb_page_offset;
- __u32 len;
- __u32 flags;
- struct page *page;
- struct dentry *dentry;
- int lnb_grant_used;
- int rc;
-};
-
-#define LUSTRE_FLD_NAME "fld"
-#define LUSTRE_SEQ_NAME "seq"
-
-#define LUSTRE_MDD_NAME "mdd"
-#define LUSTRE_OSD_LDISKFS_NAME "osd-ldiskfs"
-#define LUSTRE_OSD_ZFS_NAME "osd-zfs"
-#define LUSTRE_VVP_NAME "vvp"
-#define LUSTRE_LMV_NAME "lmv"
-#define LUSTRE_SLP_NAME "slp"
-#define LUSTRE_LOD_NAME "lod"
-#define LUSTRE_OSP_NAME "osp"
-#define LUSTRE_LWP_NAME "lwp"
-
-/* obd device type names */
- /* FIXME all the references to LUSTRE_MDS_NAME should be swapped with LUSTRE_MDT_NAME */
-#define LUSTRE_MDS_NAME "mds"
-#define LUSTRE_MDT_NAME "mdt"
-#define LUSTRE_MDC_NAME "mdc"
-#define LUSTRE_OSS_NAME "ost" /* FIXME change name to oss */
-#define LUSTRE_OST_NAME "obdfilter" /* FIXME change name to ost */
-#define LUSTRE_OSC_NAME "osc"
-#define LUSTRE_LOV_NAME "lov"
-#define LUSTRE_MGS_NAME "mgs"
-#define LUSTRE_MGC_NAME "mgc"
-
-#define LUSTRE_ECHO_NAME "obdecho"
-#define LUSTRE_ECHO_CLIENT_NAME "echo_client"
-#define LUSTRE_QMT_NAME "qmt"
-
-/* Constant obd names (post-rename) */
-#define LUSTRE_MDS_OBDNAME "MDS"
-#define LUSTRE_OSS_OBDNAME "OSS"
-#define LUSTRE_MGS_OBDNAME "MGS"
-#define LUSTRE_MGC_OBDNAME "MGC"
-
-/* Don't conflict with on-wire flags OBD_BRW_WRITE, etc */
-#define N_LOCAL_TEMP_PAGE 0x10000000
-
-struct obd_trans_info {
- __u64 oti_transno;
- __u64 oti_xid;
- /* Only used on the server side for tracking acks. */
- struct oti_req_ack_lock {
- struct lustre_handle lock;
- __u32 mode;
- } oti_ack_locks[4];
- void *oti_handle;
- struct llog_cookie oti_onecookie;
- struct llog_cookie *oti_logcookies;
- int oti_numcookies;
- /** synchronous write is needed */
- unsigned long oti_sync_write:1;
-
- /* initial thread handling transaction */
- struct ptlrpc_thread *oti_thread;
- __u32 oti_conn_cnt;
- /** VBR: versions */
- __u64 oti_pre_version;
- /** JobID */
- char *oti_jobid;
-
- struct obd_uuid *oti_ost_uuid;
-};
-
-static inline void oti_init(struct obd_trans_info *oti,
- struct ptlrpc_request *req)
-{
- if (oti == NULL)
- return;
- memset(oti, 0, sizeof(*oti));
-
- if (req == NULL)
- return;
-
- oti->oti_xid = req->rq_xid;
- /** VBR: take versions from request */
- if (req->rq_reqmsg != NULL &&
- lustre_msg_get_flags(req->rq_reqmsg) & MSG_REPLAY) {
- __u64 *pre_version = lustre_msg_get_versions(req->rq_reqmsg);
-
- oti->oti_pre_version = pre_version ? pre_version[0] : 0;
- oti->oti_transno = lustre_msg_get_transno(req->rq_reqmsg);
- }
-
- /** called from mds_create_objects */
- if (req->rq_repmsg != NULL)
- oti->oti_transno = lustre_msg_get_transno(req->rq_repmsg);
- oti->oti_thread = req->rq_svc_thread;
- if (req->rq_reqmsg != NULL)
- oti->oti_conn_cnt = lustre_msg_get_conn_cnt(req->rq_reqmsg);
-}
-
-static inline void oti_alloc_cookies(struct obd_trans_info *oti,
- int num_cookies)
-{
- if (!oti)
- return;
-
- if (num_cookies == 1)
- oti->oti_logcookies = &oti->oti_onecookie;
- else
- oti->oti_logcookies = libcfs_kvzalloc(num_cookies * sizeof(oti->oti_onecookie),
- GFP_NOFS);
-
- oti->oti_numcookies = num_cookies;
-}
-
-static inline void oti_free_cookies(struct obd_trans_info *oti)
-{
- if (!oti || !oti->oti_logcookies)
- return;
-
- if (oti->oti_logcookies == &oti->oti_onecookie)
- LASSERT(oti->oti_numcookies == 1);
- else
- kvfree(oti->oti_logcookies);
-
- oti->oti_logcookies = NULL;
- oti->oti_numcookies = 0;
-}
-
-/*
- * Events signalled through obd_notify() upcall-chain.
- */
-enum obd_notify_event {
- /* target added */
- OBD_NOTIFY_CREATE,
- /* Device connect start */
- OBD_NOTIFY_CONNECT,
- /* Device activated */
- OBD_NOTIFY_ACTIVE,
- /* Device deactivated */
- OBD_NOTIFY_INACTIVE,
- /* Device disconnected */
- OBD_NOTIFY_DISCON,
- /* Connect data for import were changed */
- OBD_NOTIFY_OCD,
- /* Sync request */
- OBD_NOTIFY_SYNC_NONBLOCK,
- OBD_NOTIFY_SYNC,
- /* Configuration event */
- OBD_NOTIFY_CONFIG,
- /* Administratively deactivate/activate event */
- OBD_NOTIFY_DEACTIVATE,
- OBD_NOTIFY_ACTIVATE
-};
-
-/*
- * Data structure used to pass obd_notify()-event to non-obd listeners (llite
- * and liblustre being main examples).
- */
-struct obd_notify_upcall {
- int (*onu_upcall)(struct obd_device *host, struct obd_device *watched,
- enum obd_notify_event ev, void *owner, void *data);
- /* Opaque datum supplied by upper layer listener */
- void *onu_owner;
-};
-
-struct target_recovery_data {
- svc_handler_t trd_recovery_handler;
- pid_t trd_processing_task;
- struct completion trd_starting;
- struct completion trd_finishing;
-};
-
-struct obd_llog_group {
- int olg_seq;
- struct llog_ctxt *olg_ctxts[LLOG_MAX_CTXTS];
- wait_queue_head_t olg_waitq;
- spinlock_t olg_lock;
- struct mutex olg_cat_processing;
-};
-
-/* corresponds to one of the obd's */
-#define OBD_DEVICE_MAGIC 0XAB5CD6EF
-#define OBD_DEV_BY_DEVNAME 0xffffd0de
-
-struct lvfs_run_ctxt {
- struct dt_device *dt;
-};
-
-struct obd_device {
- struct obd_type *obd_type;
- __u32 obd_magic;
-
- /* common and UUID name of this device */
- char obd_name[MAX_OBD_NAME];
- struct obd_uuid obd_uuid;
-
- struct lu_device *obd_lu_dev;
-
- int obd_minor;
- /* bitfield modification is protected by obd_dev_lock */
- unsigned long obd_attached:1, /* finished attach */
- obd_set_up:1, /* finished setup */
- obd_recovering:1, /* there are recoverable clients */
- obd_abort_recovery:1,/* recovery expired */
- obd_version_recov:1, /* obd uses version checking */
- obd_replayable:1, /* recovery is enabled; inform clients */
- obd_no_transno:1, /* no committed-transno notification */
- obd_no_recov:1, /* fail instead of retry messages */
- obd_stopping:1, /* started cleanup */
- obd_starting:1, /* started setup */
- obd_force:1, /* cleanup with > 0 obd refcount */
- obd_fail:1, /* cleanup with failover */
- obd_async_recov:1, /* allow asynchronous orphan cleanup */
- obd_no_conn:1, /* deny new connections */
- obd_inactive:1, /* device active/inactive
- * (for /proc/status only!!) */
- obd_no_ir:1, /* no imperative recovery. */
- obd_process_conf:1; /* device is processing mgs config */
- /* use separate field as it is set in interrupt to don't mess with
- * protection of other bits using _bh lock */
- unsigned long obd_recovery_expired:1;
- /* uuid-export hash body */
- struct cfs_hash *obd_uuid_hash;
- /* nid-export hash body */
- struct cfs_hash *obd_nid_hash;
- atomic_t obd_refcount;
- wait_queue_head_t obd_refcount_waitq;
- struct list_head obd_exports;
- struct list_head obd_unlinked_exports;
- struct list_head obd_delayed_exports;
- int obd_num_exports;
- spinlock_t obd_nid_lock;
- struct ldlm_namespace *obd_namespace;
- struct ptlrpc_client obd_ldlm_client; /* XXX OST/MDS only */
- /* a spinlock is OK for what we do now, may need a semaphore later */
- spinlock_t obd_dev_lock; /* protect OBD bitfield above */
- struct mutex obd_dev_mutex;
- __u64 obd_last_committed;
- spinlock_t obd_osfs_lock;
- struct obd_statfs obd_osfs; /* locked by obd_osfs_lock */
- __u64 obd_osfs_age;
- struct lvfs_run_ctxt obd_lvfs_ctxt;
- struct obd_llog_group obd_olg; /* default llog group */
- struct obd_device *obd_observer;
- struct rw_semaphore obd_observer_link_sem;
- struct obd_notify_upcall obd_upcall;
- struct obd_export *obd_self_export;
-
- int obd_max_recoverable_clients;
- atomic_t obd_connected_clients;
- int obd_stale_clients;
- int obd_delayed_clients;
- /* this lock protects all recovery list_heads, timer and
- * obd_next_recovery_transno value */
- spinlock_t obd_recovery_task_lock;
- __u64 obd_next_recovery_transno;
- int obd_replayed_requests;
- int obd_requests_queued_for_recovery;
- wait_queue_head_t obd_next_transno_waitq;
- /* protected by obd_recovery_task_lock */
- int obd_recovery_timeout;
-
- /* new recovery stuff from CMD2 */
- struct target_recovery_data obd_recovery_data;
- int obd_replayed_locks;
- atomic_t obd_req_replay_clients;
- atomic_t obd_lock_replay_clients;
- /* all lists are protected by obd_recovery_task_lock */
- struct list_head obd_req_replay_queue;
- struct list_head obd_lock_replay_queue;
- struct list_head obd_final_req_queue;
- int obd_recovery_stage;
-
- union {
- struct client_obd cli;
- struct echo_client_obd echo_client;
- struct lov_obd lov;
- struct lmv_obd lmv;
- } u;
- /* Fields used by LProcFS */
- unsigned int obd_cntr_base;
- struct lprocfs_stats *obd_stats;
-
- unsigned int md_cntr_base;
- struct lprocfs_stats *md_stats;
-
- struct dentry *obd_debugfs_entry;
- struct dentry *obd_svc_debugfs_entry;
- struct lprocfs_stats *obd_svc_stats;
- atomic_t obd_evict_inprogress;
- wait_queue_head_t obd_evict_inprogress_waitq;
- struct list_head obd_evict_list; /* protected with pet_lock */
-
- /**
- * Ldlm pool part. Save last calculated SLV and Limit.
- */
- rwlock_t obd_pool_lock;
- int obd_pool_limit;
- __u64 obd_pool_slv;
-
- /**
- * A list of outstanding class_incref()'s against this obd. For
- * debugging.
- */
- struct lu_ref obd_reference;
-
- int obd_conn_inprogress;
-
- struct kobject obd_kobj; /* sysfs object */
- struct completion obd_kobj_unregister;
-};
-
-#define OBD_LLOG_FL_SENDNOW 0x0001
-#define OBD_LLOG_FL_EXIT 0x0002
-
-enum obd_cleanup_stage {
-/* Special case hack for MDS LOVs */
- OBD_CLEANUP_EARLY,
-/* can be directly mapped to .ldto_device_fini() */
- OBD_CLEANUP_EXPORTS,
-};
-
-/* get/set_info keys */
-#define KEY_ASYNC "async"
-#define KEY_BLOCKSIZE_BITS "blocksize_bits"
-#define KEY_BLOCKSIZE "blocksize"
-#define KEY_CHANGELOG_CLEAR "changelog_clear"
-#define KEY_FID2PATH "fid2path"
-#define KEY_CHECKSUM "checksum"
-#define KEY_CLEAR_FS "clear_fs"
-#define KEY_CONN_DATA "conn_data"
-#define KEY_EVICT_BY_NID "evict_by_nid"
-#define KEY_FIEMAP "fiemap"
-#define KEY_FLUSH_CTX "flush_ctx"
-#define KEY_GRANT_SHRINK "grant_shrink"
-#define KEY_HSM_COPYTOOL_SEND "hsm_send"
-#define KEY_INIT_RECOV_BACKUP "init_recov_bk"
-#define KEY_INIT_RECOV "initial_recov"
-#define KEY_INTERMDS "inter_mds"
-#define KEY_LAST_ID "last_id"
-#define KEY_LAST_FID "last_fid"
-#define KEY_LOCK_TO_STRIPE "lock_to_stripe"
-#define KEY_LOVDESC "lovdesc"
-#define KEY_LOV_IDX "lov_idx"
-#define KEY_MAX_EASIZE "max_easize"
-#define KEY_DEFAULT_EASIZE "default_easize"
-#define KEY_MAX_COOKIESIZE "max_cookiesize"
-#define KEY_DEFAULT_COOKIESIZE "default_cookiesize"
-#define KEY_MDS_CONN "mds_conn"
-#define KEY_MGSSEC "mgssec"
-#define KEY_NEXT_ID "next_id"
-#define KEY_READ_ONLY "read-only"
-#define KEY_REGISTER_TARGET "register_target"
-#define KEY_SET_FS "set_fs"
-#define KEY_TGT_COUNT "tgt_count"
-/* KEY_SET_INFO in lustre_idl.h */
-#define KEY_SPTLRPC_CONF "sptlrpc_conf"
-#define KEY_CONNECT_FLAG "connect_flags"
-#define KEY_SYNC_LOCK_CANCEL "sync_lock_cancel"
-
-#define KEY_CACHE_SET "cache_set"
-#define KEY_CACHE_LRU_SHRINK "cache_lru_shrink"
-#define KEY_CHANGELOG_INDEX "changelog_index"
-
-struct lu_context;
-
-/* /!\ must be coherent with include/linux/namei.h on patched kernel */
-#define IT_OPEN (1 << 0)
-#define IT_CREAT (1 << 1)
-#define IT_READDIR (1 << 2)
-#define IT_GETATTR (1 << 3)
-#define IT_LOOKUP (1 << 4)
-#define IT_UNLINK (1 << 5)
-#define IT_TRUNC (1 << 6)
-#define IT_GETXATTR (1 << 7)
-#define IT_EXEC (1 << 8)
-#define IT_PIN (1 << 9)
-#define IT_LAYOUT (1 << 10)
-#define IT_QUOTA_DQACQ (1 << 11)
-#define IT_QUOTA_CONN (1 << 12)
-#define IT_SETXATTR (1 << 13)
-
-static inline int it_to_lock_mode(struct lookup_intent *it)
-{
- /* CREAT needs to be tested before open (both could be set) */
- if (it->it_op & IT_CREAT)
- return LCK_CW;
- else if (it->it_op & (IT_READDIR | IT_GETATTR | IT_OPEN | IT_LOOKUP |
- IT_LAYOUT))
- return LCK_CR;
- else if (it->it_op & IT_GETXATTR)
- return LCK_PR;
- else if (it->it_op & IT_SETXATTR)
- return LCK_PW;
-
- LASSERTF(0, "Invalid it_op: %d\n", it->it_op);
- return -EINVAL;
-}
-
-struct md_op_data {
- struct lu_fid op_fid1; /* operation fid1 (usually parent) */
- struct lu_fid op_fid2; /* operation fid2 (usually child) */
- struct lu_fid op_fid3; /* 2 extra fids to find conflicting */
- struct lu_fid op_fid4; /* to the operation locks. */
- u32 op_mds; /* what mds server open will go to */
- struct lustre_handle op_handle;
- s64 op_mod_time;
- const char *op_name;
- int op_namelen;
- __u32 op_mode;
- struct lmv_stripe_md *op_mea1;
- struct lmv_stripe_md *op_mea2;
- __u32 op_suppgids[2];
- __u32 op_fsuid;
- __u32 op_fsgid;
- cfs_cap_t op_cap;
- void *op_data;
-
- /* iattr fields and blocks. */
- struct iattr op_attr;
- unsigned int op_attr_flags;
- __u64 op_valid;
- loff_t op_attr_blocks;
-
- /* Size-on-MDS epoch and flags. */
- __u64 op_ioepoch;
- __u32 op_flags;
-
- /* Various operation flags. */
- enum mds_op_bias op_bias;
-
- /* Operation type */
- __u32 op_opc;
-
- /* Used by readdir */
- __u64 op_offset;
-
- /* Used by readdir */
- __u32 op_npages;
-
- /* used to transfer info between the stacks of MD client
- * see enum op_cli_flags */
- __u32 op_cli_flags;
-
- /* File object data version for HSM release, on client */
- __u64 op_data_version;
- struct lustre_handle op_lease_handle;
-};
-
-enum op_cli_flags {
- CLI_SET_MEA = 1 << 0,
- CLI_RM_ENTRY = 1 << 1,
-};
-
-struct md_enqueue_info;
-/* metadata stat-ahead */
-
-struct md_enqueue_info {
- struct md_op_data mi_data;
- struct lookup_intent mi_it;
- struct lustre_handle mi_lockh;
- struct inode *mi_dir;
- int (*mi_cb)(struct ptlrpc_request *req,
- struct md_enqueue_info *minfo, int rc);
- __u64 mi_cbdata;
- unsigned int mi_generation;
-};
-
-struct obd_ops {
- struct module *o_owner;
- int (*o_iocontrol)(unsigned int cmd, struct obd_export *exp, int len,
- void *karg, void *uarg);
- int (*o_get_info)(const struct lu_env *env, struct obd_export *,
- __u32 keylen, void *key, __u32 *vallen, void *val,
- struct lov_stripe_md *lsm);
- int (*o_set_info_async)(const struct lu_env *, struct obd_export *,
- __u32 keylen, void *key,
- __u32 vallen, void *val,
- struct ptlrpc_request_set *set);
- int (*o_attach)(struct obd_device *dev, u32 len, void *data);
- int (*o_detach)(struct obd_device *dev);
- int (*o_setup)(struct obd_device *dev, struct lustre_cfg *cfg);
- int (*o_precleanup)(struct obd_device *dev,
- enum obd_cleanup_stage cleanup_stage);
- int (*o_cleanup)(struct obd_device *dev);
- int (*o_process_config)(struct obd_device *dev, u32 len, void *data);
- int (*o_postrecov)(struct obd_device *dev);
- int (*o_add_conn)(struct obd_import *imp, struct obd_uuid *uuid,
- int priority);
- int (*o_del_conn)(struct obd_import *imp, struct obd_uuid *uuid);
- /* connect to the target device with given connection
- * data. @ocd->ocd_connect_flags is modified to reflect flags actually
- * granted by the target, which are guaranteed to be a subset of flags
- * asked for. If @ocd == NULL, use default parameters. */
- int (*o_connect)(const struct lu_env *env,
- struct obd_export **exp, struct obd_device *src,
- struct obd_uuid *cluuid, struct obd_connect_data *ocd,
- void *localdata);
- int (*o_reconnect)(const struct lu_env *env,
- struct obd_export *exp, struct obd_device *src,
- struct obd_uuid *cluuid,
- struct obd_connect_data *ocd,
- void *localdata);
- int (*o_disconnect)(struct obd_export *exp);
-
- /* Initialize/finalize fids infrastructure. */
- int (*o_fid_init)(struct obd_device *obd,
- struct obd_export *exp, enum lu_cli_type type);
- int (*o_fid_fini)(struct obd_device *obd);
-
- /* Allocate new fid according to passed @hint. */
- int (*o_fid_alloc)(struct obd_export *exp, struct lu_fid *fid,
- struct md_op_data *op_data);
-
- /*
- * Object with @fid is getting deleted, we may want to do something
- * about this.
- */
- int (*o_statfs)(const struct lu_env *, struct obd_export *exp,
- struct obd_statfs *osfs, __u64 max_age, __u32 flags);
- int (*o_statfs_async)(struct obd_export *exp, struct obd_info *oinfo,
- __u64 max_age, struct ptlrpc_request_set *set);
- int (*o_packmd)(struct obd_export *exp, struct lov_mds_md **disk_tgt,
- struct lov_stripe_md *mem_src);
- int (*o_unpackmd)(struct obd_export *exp,
- struct lov_stripe_md **mem_tgt,
- struct lov_mds_md *disk_src, int disk_len);
- int (*o_preallocate)(struct lustre_handle *, u32 *req, u64 *ids);
- int (*o_create)(const struct lu_env *env, struct obd_export *exp,
- struct obdo *oa, struct lov_stripe_md **ea,
- struct obd_trans_info *oti);
- int (*o_destroy)(const struct lu_env *env, struct obd_export *exp,
- struct obdo *oa, struct lov_stripe_md *ea,
- struct obd_trans_info *oti, struct obd_export *md_exp);
- int (*o_setattr)(const struct lu_env *, struct obd_export *exp,
- struct obd_info *oinfo, struct obd_trans_info *oti);
- int (*o_setattr_async)(struct obd_export *exp, struct obd_info *oinfo,
- struct obd_trans_info *oti,
- struct ptlrpc_request_set *rqset);
- int (*o_getattr)(const struct lu_env *env, struct obd_export *exp,
- struct obd_info *oinfo);
- int (*o_getattr_async)(struct obd_export *exp, struct obd_info *oinfo,
- struct ptlrpc_request_set *set);
- int (*o_adjust_kms)(struct obd_export *exp, struct lov_stripe_md *lsm,
- u64 size, int shrink);
- int (*o_preprw)(const struct lu_env *env, int cmd,
- struct obd_export *exp, struct obdo *oa, int objcount,
- struct obd_ioobj *obj, struct niobuf_remote *remote,
- int *nr_pages, struct niobuf_local *local,
- struct obd_trans_info *oti);
- int (*o_commitrw)(const struct lu_env *env, int cmd,
- struct obd_export *exp, struct obdo *oa,
- int objcount, struct obd_ioobj *obj,
- struct niobuf_remote *remote, int pages,
- struct niobuf_local *local,
- struct obd_trans_info *oti, int rc);
- int (*o_find_cbdata)(struct obd_export *, struct lov_stripe_md *,
- ldlm_iterator_t it, void *data);
- int (*o_init_export)(struct obd_export *exp);
- int (*o_destroy_export)(struct obd_export *exp);
-
- /* metadata-only methods */
- int (*o_import_event)(struct obd_device *, struct obd_import *,
- enum obd_import_event);
-
- int (*o_notify)(struct obd_device *obd, struct obd_device *watched,
- enum obd_notify_event ev, void *data);
-
- int (*o_health_check)(const struct lu_env *env, struct obd_device *);
- struct obd_uuid *(*o_get_uuid)(struct obd_export *exp);
-
- /* quota methods */
- int (*o_quotacheck)(struct obd_device *, struct obd_export *,
- struct obd_quotactl *);
- int (*o_quotactl)(struct obd_device *, struct obd_export *,
- struct obd_quotactl *);
-
- /* pools methods */
- int (*o_pool_new)(struct obd_device *obd, char *poolname);
- int (*o_pool_del)(struct obd_device *obd, char *poolname);
- int (*o_pool_add)(struct obd_device *obd, char *poolname,
- char *ostname);
- int (*o_pool_rem)(struct obd_device *obd, char *poolname,
- char *ostname);
- void (*o_getref)(struct obd_device *obd);
- void (*o_putref)(struct obd_device *obd);
- /*
- * NOTE: If adding ops, add another LPROCFS_OBD_OP_INIT() line
- * to lprocfs_alloc_obd_stats() in obdclass/lprocfs_status.c.
- * Also, add a wrapper function in include/linux/obd_class.h. */
-};
-
-enum {
- LUSTRE_OPC_MKDIR = (1 << 0),
- LUSTRE_OPC_SYMLINK = (1 << 1),
- LUSTRE_OPC_MKNOD = (1 << 2),
- LUSTRE_OPC_CREATE = (1 << 3),
- LUSTRE_OPC_ANY = (1 << 4)
-};
-
-/* lmv structures */
-#define MEA_MAGIC_LAST_CHAR 0xb2221ca1
-#define MEA_MAGIC_ALL_CHARS 0xb222a11c
-#define MEA_MAGIC_HASH_SEGMENT 0xb222a11b
-
-#define MAX_HASH_SIZE_32 0x7fffffffUL
-#define MAX_HASH_SIZE 0x7fffffffffffffffULL
-#define MAX_HASH_HIGHEST_BIT 0x1000000000000000ULL
-
-struct lustre_md {
- struct mdt_body *body;
- struct lov_stripe_md *lsm;
- struct lmv_stripe_md *mea;
-#ifdef CONFIG_FS_POSIX_ACL
- struct posix_acl *posix_acl;
-#endif
- struct mdt_remote_perm *remote_perm;
-};
-
-struct md_open_data {
- struct obd_client_handle *mod_och;
- struct ptlrpc_request *mod_open_req;
- struct ptlrpc_request *mod_close_req;
- atomic_t mod_refcount;
- bool mod_is_create;
-};
-
-struct lookup_intent;
-
-struct md_ops {
- int (*m_getstatus)(struct obd_export *, struct lu_fid *);
- int (*m_null_inode)(struct obd_export *, const struct lu_fid *);
- int (*m_find_cbdata)(struct obd_export *, const struct lu_fid *,
- ldlm_iterator_t, void *);
- int (*m_close)(struct obd_export *, struct md_op_data *,
- struct md_open_data *, struct ptlrpc_request **);
- int (*m_create)(struct obd_export *, struct md_op_data *,
- const void *, int, int, __u32, __u32, cfs_cap_t,
- __u64, struct ptlrpc_request **);
- int (*m_done_writing)(struct obd_export *, struct md_op_data *,
- struct md_open_data *);
- int (*m_enqueue)(struct obd_export *, struct ldlm_enqueue_info *,
- struct lookup_intent *, struct md_op_data *,
- struct lustre_handle *, void *, int,
- struct ptlrpc_request **, __u64);
- int (*m_getattr)(struct obd_export *, struct md_op_data *,
- struct ptlrpc_request **);
- int (*m_getattr_name)(struct obd_export *, struct md_op_data *,
- struct ptlrpc_request **);
- int (*m_intent_lock)(struct obd_export *, struct md_op_data *,
- void *, int, struct lookup_intent *, int,
- struct ptlrpc_request **,
- ldlm_blocking_callback, __u64);
- int (*m_link)(struct obd_export *, struct md_op_data *,
- struct ptlrpc_request **);
- int (*m_rename)(struct obd_export *, struct md_op_data *,
- const char *, int, const char *, int,
- struct ptlrpc_request **);
- int (*m_is_subdir)(struct obd_export *, const struct lu_fid *,
- const struct lu_fid *,
- struct ptlrpc_request **);
- int (*m_setattr)(struct obd_export *, struct md_op_data *, void *,
- int, void *, int, struct ptlrpc_request **,
- struct md_open_data **mod);
- int (*m_sync)(struct obd_export *, const struct lu_fid *,
- struct ptlrpc_request **);
- int (*m_readpage)(struct obd_export *, struct md_op_data *,
- struct page **, struct ptlrpc_request **);
-
- int (*m_unlink)(struct obd_export *, struct md_op_data *,
- struct ptlrpc_request **);
-
- int (*m_setxattr)(struct obd_export *, const struct lu_fid *,
- u64, const char *, const char *, int, int, int, __u32,
- struct ptlrpc_request **);
-
- int (*m_getxattr)(struct obd_export *, const struct lu_fid *,
- u64, const char *, const char *, int, int, int,
- struct ptlrpc_request **);
-
- int (*m_init_ea_size)(struct obd_export *, int, int, int, int);
-
- int (*m_get_lustre_md)(struct obd_export *, struct ptlrpc_request *,
- struct obd_export *, struct obd_export *,
- struct lustre_md *);
-
- int (*m_free_lustre_md)(struct obd_export *, struct lustre_md *);
-
- int (*m_set_open_replay_data)(struct obd_export *,
- struct obd_client_handle *,
- struct lookup_intent *);
- int (*m_clear_open_replay_data)(struct obd_export *,
- struct obd_client_handle *);
- int (*m_set_lock_data)(struct obd_export *, __u64 *, void *, __u64 *);
-
- ldlm_mode_t (*m_lock_match)(struct obd_export *, __u64,
- const struct lu_fid *, ldlm_type_t,
- ldlm_policy_data_t *, ldlm_mode_t,
- struct lustre_handle *);
-
- int (*m_cancel_unused)(struct obd_export *, const struct lu_fid *,
- ldlm_policy_data_t *, ldlm_mode_t,
- ldlm_cancel_flags_t flags, void *opaque);
-
- int (*m_get_remote_perm)(struct obd_export *, const struct lu_fid *,
- __u32, struct ptlrpc_request **);
-
- int (*m_intent_getattr_async)(struct obd_export *,
- struct md_enqueue_info *,
- struct ldlm_enqueue_info *);
-
- int (*m_revalidate_lock)(struct obd_export *, struct lookup_intent *,
- struct lu_fid *, __u64 *bits);
-
- /*
- * NOTE: If adding ops, add another LPROCFS_MD_OP_INIT() line to
- * lprocfs_alloc_md_stats() in obdclass/lprocfs_status.c. Also, add a
- * wrapper function in include/linux/obd_class.h.
- */
-};
-
-struct lsm_operations {
- void (*lsm_free)(struct lov_stripe_md *);
- int (*lsm_destroy)(struct lov_stripe_md *, struct obdo *oa,
- struct obd_export *md_exp);
- void (*lsm_stripe_by_index)(struct lov_stripe_md *, int *, u64 *,
- u64 *);
- void (*lsm_stripe_by_offset)(struct lov_stripe_md *, int *, u64 *,
- u64 *);
- int (*lsm_lmm_verify)(struct lov_mds_md *lmm, int lmm_bytes,
- __u16 *stripe_count);
- int (*lsm_unpackmd)(struct lov_obd *lov, struct lov_stripe_md *lsm,
- struct lov_mds_md *lmm);
-};
-
-extern const struct lsm_operations lsm_v1_ops;
-extern const struct lsm_operations lsm_v3_ops;
-static inline const struct lsm_operations *lsm_op_find(int magic)
-{
- switch (magic) {
- case LOV_MAGIC_V1:
- return &lsm_v1_ops;
- case LOV_MAGIC_V3:
- return &lsm_v3_ops;
- default:
- CERROR("Cannot recognize lsm_magic %08x\n", magic);
- return NULL;
- }
-}
-
-/* Requests for obd_extent_calc() */
-#define OBD_CALC_STRIPE_START 1
-#define OBD_CALC_STRIPE_END 2
-
-static inline struct md_open_data *obd_mod_alloc(void)
-{
- struct md_open_data *mod;
-
- mod = kzalloc(sizeof(*mod), GFP_NOFS);
- if (mod == NULL)
- return NULL;
- atomic_set(&mod->mod_refcount, 1);
- return mod;
-}
-
-#define obd_mod_get(mod) atomic_inc(&(mod)->mod_refcount)
-#define obd_mod_put(mod) \
-({ \
- if (atomic_dec_and_test(&(mod)->mod_refcount)) { \
- if ((mod)->mod_open_req) \
- ptlrpc_req_finished((mod)->mod_open_req); \
- kfree(mod); \
- } \
-})
-
-void obdo_from_inode(struct obdo *dst, struct inode *src, u32 valid);
-void obdo_set_parent_fid(struct obdo *dst, const struct lu_fid *parent);
-
-/* return 1 if client should be resend request */
-static inline int client_should_resend(int resend, struct client_obd *cli)
-{
- return atomic_read(&cli->cl_resends) ?
- atomic_read(&cli->cl_resends) > resend : 1;
-}
-
-/**
- * Return device name for this device
- *
- * XXX: lu_device is declared before obd_device, while a pointer pointing
- * back to obd_device in lu_device, so this helper function defines here
- * instead of in lu_object.h
- */
-static inline const char *lu_dev_name(const struct lu_device *lu_dev)
-{
- return lu_dev->ld_obd->obd_name;
-}
-
-static inline bool filename_is_volatile(const char *name, int namelen, int *idx)
-{
- const char *start;
- char *end;
-
- if (strncmp(name, LUSTRE_VOLATILE_HDR, LUSTRE_VOLATILE_HDR_LEN) != 0)
- return false;
-
- /* caller does not care of idx */
- if (idx == NULL)
- return true;
-
- /* volatile file, the MDT can be set from name */
- /* name format is LUSTRE_VOLATILE_HDR:[idx]: */
- /* if no MDT is specified, use std way */
- if (namelen < LUSTRE_VOLATILE_HDR_LEN + 2)
- goto bad_format;
- /* test for no MDT idx case */
- if ((*(name + LUSTRE_VOLATILE_HDR_LEN) == ':') &&
- (*(name + LUSTRE_VOLATILE_HDR_LEN + 1) == ':')) {
- *idx = -1;
- return true;
- }
- /* we have an idx, read it */
- start = name + LUSTRE_VOLATILE_HDR_LEN + 1;
- *idx = simple_strtoul(start, &end, 0);
- /* error cases:
- * no digit, no trailing :, negative value
- */
- if (((*idx == 0) && (end == start)) ||
- (*end != ':') || (*idx < 0))
- goto bad_format;
-
- return true;
-bad_format:
- /* bad format of mdt idx, we cannot return an error
- * to caller so we use hash algo */
- CERROR("Bad volatile file name format: %s\n",
- name + LUSTRE_VOLATILE_HDR_LEN);
- return false;
-}
-
-static inline int cli_brw_size(struct obd_device *obd)
-{
- LASSERT(obd != NULL);
- return obd->u.cli.cl_max_pages_per_rpc << PAGE_CACHE_SHIFT;
-}
-
-#endif /* __OBD_H */
diff --git a/drivers/staging/lustre/lustre/include/obd_cache.h b/drivers/staging/lustre/lustre/include/obd_cache.h
deleted file mode 100644
index c8249fbb0d72..000000000000
--- a/drivers/staging/lustre/lustre/include/obd_cache.h
+++ /dev/null
@@ -1,39 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- */
-
-#ifndef _OBD_CACHE_H__
-#define _OBD_CACHE_H__
-
-
-#endif
diff --git a/drivers/staging/lustre/lustre/include/obd_cksum.h b/drivers/staging/lustre/lustre/include/obd_cksum.h
deleted file mode 100644
index 3a63462aa943..000000000000
--- a/drivers/staging/lustre/lustre/include/obd_cksum.h
+++ /dev/null
@@ -1,176 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- */
-
-#ifndef __OBD_CKSUM
-#define __OBD_CKSUM
-#include "../../include/linux/libcfs/libcfs.h"
-#include "lustre/lustre_idl.h"
-
-static inline unsigned char cksum_obd2cfs(cksum_type_t cksum_type)
-{
- switch (cksum_type) {
- case OBD_CKSUM_CRC32:
- return CFS_HASH_ALG_CRC32;
- case OBD_CKSUM_ADLER:
- return CFS_HASH_ALG_ADLER32;
- case OBD_CKSUM_CRC32C:
- return CFS_HASH_ALG_CRC32C;
- default:
- CERROR("Unknown checksum type (%x)!!!\n", cksum_type);
- LBUG();
- }
- return 0;
-}
-
-/* The OBD_FL_CKSUM_* flags is packed into 5 bits of o_flags, since there can
- * only be a single checksum type per RPC.
- *
- * The OBD_CHECKSUM_* type bits passed in ocd_cksum_types are a 32-bit bitmask
- * since they need to represent the full range of checksum algorithms that
- * both the client and server can understand.
- *
- * In case of an unsupported types/flags we fall back to ADLER
- * because that is supported by all clients since 1.8
- *
- * In case multiple algorithms are supported the best one is used. */
-static inline u32 cksum_type_pack(cksum_type_t cksum_type)
-{
- unsigned int performance = 0, tmp;
- u32 flag = OBD_FL_CKSUM_ADLER;
-
- if (cksum_type & OBD_CKSUM_CRC32) {
- tmp = cfs_crypto_hash_speed(cksum_obd2cfs(OBD_CKSUM_CRC32));
- if (tmp > performance) {
- performance = tmp;
- flag = OBD_FL_CKSUM_CRC32;
- }
- }
- if (cksum_type & OBD_CKSUM_CRC32C) {
- tmp = cfs_crypto_hash_speed(cksum_obd2cfs(OBD_CKSUM_CRC32C));
- if (tmp > performance) {
- performance = tmp;
- flag = OBD_FL_CKSUM_CRC32C;
- }
- }
- if (cksum_type & OBD_CKSUM_ADLER) {
- tmp = cfs_crypto_hash_speed(cksum_obd2cfs(OBD_CKSUM_ADLER));
- if (tmp > performance) {
- performance = tmp;
- flag = OBD_FL_CKSUM_ADLER;
- }
- }
- if (unlikely(cksum_type && !(cksum_type & (OBD_CKSUM_CRC32C |
- OBD_CKSUM_CRC32 |
- OBD_CKSUM_ADLER))))
- CWARN("unknown cksum type %x\n", cksum_type);
-
- return flag;
-}
-
-static inline cksum_type_t cksum_type_unpack(u32 o_flags)
-{
- switch (o_flags & OBD_FL_CKSUM_ALL) {
- case OBD_FL_CKSUM_CRC32C:
- return OBD_CKSUM_CRC32C;
- case OBD_FL_CKSUM_CRC32:
- return OBD_CKSUM_CRC32;
- default:
- break;
- }
-
- return OBD_CKSUM_ADLER;
-}
-
-/* Return a bitmask of the checksum types supported on this system.
- * 1.8 supported ADLER it is base and not depend on hw
- * Client uses all available local algos
- */
-static inline cksum_type_t cksum_types_supported_client(void)
-{
- cksum_type_t ret = OBD_CKSUM_ADLER;
-
- CDEBUG(D_INFO, "Crypto hash speed: crc %d, crc32c %d, adler %d\n",
- cfs_crypto_hash_speed(cksum_obd2cfs(OBD_CKSUM_CRC32)),
- cfs_crypto_hash_speed(cksum_obd2cfs(OBD_CKSUM_CRC32C)),
- cfs_crypto_hash_speed(cksum_obd2cfs(OBD_CKSUM_ADLER)));
-
- if (cfs_crypto_hash_speed(cksum_obd2cfs(OBD_CKSUM_CRC32C)) > 0)
- ret |= OBD_CKSUM_CRC32C;
- if (cfs_crypto_hash_speed(cksum_obd2cfs(OBD_CKSUM_CRC32)) > 0)
- ret |= OBD_CKSUM_CRC32;
-
- return ret;
-}
-
-/* Server uses algos that perform at 50% or better of the Adler */
-static inline cksum_type_t cksum_types_supported_server(void)
-{
- int base_speed;
- cksum_type_t ret = OBD_CKSUM_ADLER;
-
- CDEBUG(D_INFO, "Crypto hash speed: crc %d, crc32c %d, adler %d\n",
- cfs_crypto_hash_speed(cksum_obd2cfs(OBD_CKSUM_CRC32)),
- cfs_crypto_hash_speed(cksum_obd2cfs(OBD_CKSUM_CRC32C)),
- cfs_crypto_hash_speed(cksum_obd2cfs(OBD_CKSUM_ADLER)));
-
- base_speed = cfs_crypto_hash_speed(cksum_obd2cfs(OBD_CKSUM_ADLER)) / 2;
-
- if (cfs_crypto_hash_speed(cksum_obd2cfs(OBD_CKSUM_CRC32C)) >=
- base_speed)
- ret |= OBD_CKSUM_CRC32C;
- if (cfs_crypto_hash_speed(cksum_obd2cfs(OBD_CKSUM_CRC32)) >=
- base_speed)
- ret |= OBD_CKSUM_CRC32;
-
- return ret;
-}
-
-
-/* Select the best checksum algorithm among those supplied in the cksum_types
- * input.
- *
- * Currently, calling cksum_type_pack() with a mask will return the fastest
- * checksum type due to its benchmarking at libcfs module load.
- * Caution is advised, however, since what is fastest on a single client may
- * not be the fastest or most efficient algorithm on the server. */
-static inline cksum_type_t cksum_type_select(cksum_type_t cksum_types)
-{
- return cksum_type_unpack(cksum_type_pack(cksum_types));
-}
-
-/* Checksum algorithm names. Must be defined in the same order as the
- * OBD_CKSUM_* flags. */
-#define DECLARE_CKSUM_NAME char *cksum_name[] = {"crc32", "adler", "crc32c"}
-
-#endif /* __OBD_H */
diff --git a/drivers/staging/lustre/lustre/include/obd_class.h b/drivers/staging/lustre/lustre/include/obd_class.h
deleted file mode 100644
index 73fb29fe1481..000000000000
--- a/drivers/staging/lustre/lustre/include/obd_class.h
+++ /dev/null
@@ -1,1829 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- */
-#ifndef __CLASS_OBD_H
-#define __CLASS_OBD_H
-
-
-#include "obd_support.h"
-#include "lustre_import.h"
-#include "lustre_net.h"
-#include "obd.h"
-#include "lustre_lib.h"
-#include "lustre/lustre_idl.h"
-#include "lprocfs_status.h"
-
-#define OBD_STATFS_NODELAY 0x0001 /* requests should be send without delay
- * and resends for avoid deadlocks */
-#define OBD_STATFS_FROM_CACHE 0x0002 /* the statfs callback should not update
- * obd_osfs_age */
-#define OBD_STATFS_PTLRPCD 0x0004 /* requests will be sent via ptlrpcd
- * instead of a specific set. This
- * means that we cannot rely on the set
- * interpret routine to be called.
- * lov_statfs_fini() must thus be called
- * by the request interpret routine */
-#define OBD_STATFS_FOR_MDT0 0x0008 /* The statfs is only for retrieving
- * information from MDT0. */
-#define OBD_FL_PUNCH 0x00000001 /* To indicate it is punch operation */
-
-/* OBD Device Declarations */
-extern struct obd_device *obd_devs[MAX_OBD_DEVICES];
-extern rwlock_t obd_dev_lock;
-
-/* OBD Operations Declarations */
-struct obd_device *class_exp2obd(struct obd_export *);
-int class_handle_ioctl(unsigned int cmd, unsigned long arg);
-int lustre_get_jobid(char *jobid);
-
-struct lu_device_type;
-
-/* genops.c */
-extern struct list_head obd_types;
-struct obd_export *class_conn2export(struct lustre_handle *);
-int class_register_type(struct obd_ops *, struct md_ops *,
- const char *nm, struct lu_device_type *ldt);
-int class_unregister_type(const char *nm);
-
-struct obd_device *class_newdev(const char *type_name, const char *name);
-void class_release_dev(struct obd_device *obd);
-
-int class_name2dev(const char *name);
-struct obd_device *class_name2obd(const char *name);
-int class_uuid2dev(struct obd_uuid *uuid);
-struct obd_device *class_find_client_obd(struct obd_uuid *tgt_uuid,
- const char *typ_name,
- struct obd_uuid *grp_uuid);
-struct obd_device *class_devices_in_group(struct obd_uuid *grp_uuid,
- int *next);
-struct obd_device *class_num2obd(int num);
-
-int class_notify_sptlrpc_conf(const char *fsname, int namelen);
-
-char *obd_export_nid2str(struct obd_export *exp);
-
-int obd_connect_flags2str(char *page, int count, __u64 flags, char *sep);
-
-int obd_zombie_impexp_init(void);
-void obd_zombie_impexp_stop(void);
-void obd_zombie_impexp_cull(void);
-void obd_zombie_barrier(void);
-
-struct llog_handle;
-struct llog_rec_hdr;
-typedef int (*llog_cb_t)(const struct lu_env *, struct llog_handle *,
- struct llog_rec_hdr *, void *);
-/* obd_config.c */
-int class_process_config(struct lustre_cfg *lcfg);
-int class_process_proc_param(char *prefix, struct lprocfs_vars *lvars,
- struct lustre_cfg *lcfg, void *data);
-int class_attach(struct lustre_cfg *lcfg);
-int class_setup(struct obd_device *obd, struct lustre_cfg *lcfg);
-int class_cleanup(struct obd_device *obd, struct lustre_cfg *lcfg);
-int class_detach(struct obd_device *obd, struct lustre_cfg *lcfg);
-struct obd_device *class_incref(struct obd_device *obd,
- const char *scope, const void *source);
-void class_decref(struct obd_device *obd,
- const char *scope, const void *source);
-void dump_exports(struct obd_device *obd, int locks);
-int class_config_llog_handler(const struct lu_env *env,
- struct llog_handle *handle,
- struct llog_rec_hdr *rec, void *data);
-int class_add_conn(struct obd_device *obd, struct lustre_cfg *lcfg);
-int class_add_uuid(const char *uuid, __u64 nid);
-
-/*obdecho*/
-void lprocfs_echo_init_vars(struct lprocfs_static_vars *lvars);
-
-#define CFG_F_START 0x01 /* Set when we start updating from a log */
-#define CFG_F_MARKER 0x02 /* We are within a maker */
-#define CFG_F_SKIP 0x04 /* We should ignore this cfg command */
-#define CFG_F_COMPAT146 0x08 /* Allow old-style logs */
-#define CFG_F_EXCLUDE 0x10 /* OST exclusion list */
-
-/* Passed as data param to class_config_parse_llog */
-struct config_llog_instance {
- char *cfg_obdname;
- void *cfg_instance;
- struct super_block *cfg_sb;
- struct obd_uuid cfg_uuid;
- llog_cb_t cfg_callback;
- int cfg_last_idx; /* for partial llog processing */
- int cfg_flags;
-};
-int class_config_parse_llog(const struct lu_env *env, struct llog_ctxt *ctxt,
- char *name, struct config_llog_instance *cfg);
-enum {
- CONFIG_T_CONFIG = 0,
- CONFIG_T_SPTLRPC = 1,
- CONFIG_T_RECOVER = 2,
- CONFIG_T_PARAMS = 3,
- CONFIG_T_MAX = 4
-};
-
-#define PARAMS_FILENAME "params"
-#define LCTL_UPCALL "lctl"
-
-/* list of active configuration logs */
-struct config_llog_data {
- struct ldlm_res_id cld_resid;
- struct config_llog_instance cld_cfg;
- struct list_head cld_list_chain;
- atomic_t cld_refcount;
- struct config_llog_data *cld_sptlrpc;/* depended sptlrpc log */
- struct config_llog_data *cld_params; /* common parameters log */
- struct config_llog_data *cld_recover;/* imperative recover log */
- struct obd_export *cld_mgcexp;
- struct mutex cld_lock;
- int cld_type;
- unsigned int cld_stopping:1, /* we were told to stop
- * watching */
- cld_lostlock:1; /* lock not requeued */
- char cld_logname[0];
-};
-
-struct lustre_profile {
- struct list_head lp_list;
- char *lp_profile;
- char *lp_dt;
- char *lp_md;
-};
-
-struct lustre_profile *class_get_profile(const char *prof);
-void class_del_profile(const char *prof);
-void class_del_profiles(void);
-
-#if LUSTRE_TRACKS_LOCK_EXP_REFS
-
-void __class_export_add_lock_ref(struct obd_export *, struct ldlm_lock *);
-void __class_export_del_lock_ref(struct obd_export *, struct ldlm_lock *);
-extern void (*class_export_dump_hook)(struct obd_export *);
-
-#else
-
-#define __class_export_add_lock_ref(exp, lock) do {} while (0)
-#define __class_export_del_lock_ref(exp, lock) do {} while (0)
-
-#endif
-
-static inline void class_export_rpc_inc(struct obd_export *exp)
-{
- atomic_inc(&(exp)->exp_rpc_count);
- CDEBUG(D_INFO, "RPC GETting export %p : new rpc_count %d\n",
- (exp), atomic_read(&(exp)->exp_rpc_count));
-}
-
-static inline void class_export_rpc_dec(struct obd_export *exp)
-{
- LASSERT_ATOMIC_POS(&exp->exp_rpc_count);
- atomic_dec(&(exp)->exp_rpc_count);
- CDEBUG(D_INFO, "RPC PUTting export %p : new rpc_count %d\n",
- (exp), atomic_read(&(exp)->exp_rpc_count));
-}
-
-#define class_export_lock_get(exp, lock) \
-({ \
- atomic_inc(&(exp)->exp_locks_count); \
- __class_export_add_lock_ref(exp, lock); \
- CDEBUG(D_INFO, "lock GETting export %p : new locks_count %d\n", \
- (exp), atomic_read(&(exp)->exp_locks_count)); \
- class_export_get(exp); \
-})
-
-#define class_export_lock_put(exp, lock) \
-({ \
- LASSERT_ATOMIC_POS(&exp->exp_locks_count); \
- atomic_dec(&(exp)->exp_locks_count); \
- __class_export_del_lock_ref(exp, lock); \
- CDEBUG(D_INFO, "lock PUTting export %p : new locks_count %d\n", \
- (exp), atomic_read(&(exp)->exp_locks_count)); \
- class_export_put(exp); \
-})
-
-#define class_export_cb_get(exp) \
-({ \
- atomic_inc(&(exp)->exp_cb_count); \
- CDEBUG(D_INFO, "callback GETting export %p : new cb_count %d\n",\
- (exp), atomic_read(&(exp)->exp_cb_count)); \
- class_export_get(exp); \
-})
-
-#define class_export_cb_put(exp) \
-({ \
- LASSERT_ATOMIC_POS(&exp->exp_cb_count); \
- atomic_dec(&(exp)->exp_cb_count); \
- CDEBUG(D_INFO, "callback PUTting export %p : new cb_count %d\n",\
- (exp), atomic_read(&(exp)->exp_cb_count)); \
- class_export_put(exp); \
-})
-
-/* genops.c */
-struct obd_export *class_export_get(struct obd_export *exp);
-void class_export_put(struct obd_export *exp);
-struct obd_export *class_new_export(struct obd_device *obddev,
- struct obd_uuid *cluuid);
-void class_unlink_export(struct obd_export *exp);
-
-struct obd_import *class_import_get(struct obd_import *);
-void class_import_put(struct obd_import *);
-struct obd_import *class_new_import(struct obd_device *obd);
-void class_destroy_import(struct obd_import *exp);
-
-struct obd_type *class_search_type(const char *name);
-struct obd_type *class_get_type(const char *name);
-void class_put_type(struct obd_type *type);
-int class_connect(struct lustre_handle *conn, struct obd_device *obd,
- struct obd_uuid *cluuid);
-int class_disconnect(struct obd_export *exp);
-void class_fail_export(struct obd_export *exp);
-int class_manual_cleanup(struct obd_device *obd);
-static inline enum obd_option exp_flags_from_obd(struct obd_device *obd)
-{
- return ((obd->obd_fail ? OBD_OPT_FAILOVER : 0) |
- (obd->obd_force ? OBD_OPT_FORCE : 0) |
- (obd->obd_abort_recovery ? OBD_OPT_ABORT_RECOV : 0) |
- 0);
-}
-
-struct inode;
-struct lu_attr;
-struct obdo;
-void obdo_refresh_inode(struct inode *dst, struct obdo *src, u32 valid);
-
-void obdo_to_ioobj(struct obdo *oa, struct obd_ioobj *ioobj);
-void iattr_from_obdo(struct iattr *attr, struct obdo *oa, u32 valid);
-void md_from_obdo(struct md_op_data *op_data, struct obdo *oa, u32 valid);
-
-#define OBT(dev) (dev)->obd_type
-#define OBP(dev, op) (dev)->obd_type->typ_dt_ops->o_ ## op
-#define MDP(dev, op) (dev)->obd_type->typ_md_ops->m_ ## op
-#define CTXTP(ctxt, op) (ctxt)->loc_logops->lop_##op
-
-/* Ensure obd_setup: used for cleanup which must be called
- while obd is stopping */
-static inline int obd_check_dev(struct obd_device *obd)
-{
- if (!obd) {
- CERROR("NULL device\n");
- return -ENODEV;
- }
- return 0;
-}
-
-/* ensure obd_setup and !obd_stopping */
-static inline int obd_check_dev_active(struct obd_device *obd)
-{
- int rc;
-
- rc = obd_check_dev(obd);
- if (rc)
- return rc;
- if (!obd->obd_set_up || obd->obd_stopping) {
- CERROR("Device %d not setup\n", obd->obd_minor);
- return -ENODEV;
- }
- return rc;
-}
-
-#define OBD_COUNTER_OFFSET(op) \
- ((offsetof(struct obd_ops, o_ ## op) - \
- offsetof(struct obd_ops, o_iocontrol)) \
- / sizeof(((struct obd_ops *)(0))->o_iocontrol))
-
-#define OBD_COUNTER_INCREMENT(obdx, op) \
- if ((obdx)->obd_stats != NULL) { \
- unsigned int coffset; \
- coffset = (unsigned int)((obdx)->obd_cntr_base) + \
- OBD_COUNTER_OFFSET(op); \
- LASSERT(coffset < (obdx)->obd_stats->ls_num); \
- lprocfs_counter_incr((obdx)->obd_stats, coffset); \
- }
-
-#define EXP_COUNTER_INCREMENT(export, op) \
- if ((export)->exp_obd->obd_stats != NULL) { \
- unsigned int coffset; \
- coffset = (unsigned int)((export)->exp_obd->obd_cntr_base) + \
- OBD_COUNTER_OFFSET(op); \
- LASSERT(coffset < (export)->exp_obd->obd_stats->ls_num); \
- lprocfs_counter_incr((export)->exp_obd->obd_stats, coffset); \
- }
-
-#define MD_COUNTER_OFFSET(op) \
- ((offsetof(struct md_ops, m_ ## op) - \
- offsetof(struct md_ops, m_getstatus)) \
- / sizeof(((struct md_ops *)(0))->m_getstatus))
-
-#define MD_COUNTER_INCREMENT(obdx, op) \
- if ((obd)->md_stats != NULL) { \
- unsigned int coffset; \
- coffset = (unsigned int)((obdx)->md_cntr_base) + \
- MD_COUNTER_OFFSET(op); \
- LASSERT(coffset < (obdx)->md_stats->ls_num); \
- lprocfs_counter_incr((obdx)->md_stats, coffset); \
- }
-
-#define EXP_MD_COUNTER_INCREMENT(export, op) \
- if ((export)->exp_obd->obd_stats != NULL) { \
- unsigned int coffset; \
- coffset = (unsigned int)((export)->exp_obd->md_cntr_base) + \
- MD_COUNTER_OFFSET(op); \
- LASSERT(coffset < (export)->exp_obd->md_stats->ls_num); \
- lprocfs_counter_incr((export)->exp_obd->md_stats, coffset); \
- if ((export)->exp_md_stats != NULL) \
- lprocfs_counter_incr( \
- (export)->exp_md_stats, coffset); \
- }
-
-
-#define OBD_CHECK_MD_OP(obd, op, err) \
-do { \
- if (!OBT(obd) || !MDP((obd), op)) { \
- if (err) \
- CERROR("md_" #op ": dev %s/%d no operation\n", \
- obd->obd_name, obd->obd_minor); \
- return err; \
- } \
-} while (0)
-
-#define EXP_CHECK_MD_OP(exp, op) \
-do { \
- if ((exp) == NULL) { \
- CERROR("obd_" #op ": NULL export\n"); \
- return -ENODEV; \
- } \
- if ((exp)->exp_obd == NULL || !OBT((exp)->exp_obd)) { \
- CERROR("obd_" #op ": cleaned up obd\n"); \
- return -EOPNOTSUPP; \
- } \
- if (!OBT((exp)->exp_obd) || !MDP((exp)->exp_obd, op)) { \
- CERROR("obd_" #op ": dev %s/%d no operation\n", \
- (exp)->exp_obd->obd_name, \
- (exp)->exp_obd->obd_minor); \
- return -EOPNOTSUPP; \
- } \
-} while (0)
-
-
-#define OBD_CHECK_DT_OP(obd, op, err) \
-do { \
- if (!OBT(obd) || !OBP((obd), op)) { \
- if (err) \
- CERROR("obd_" #op ": dev %d no operation\n", \
- obd->obd_minor); \
- return err; \
- } \
-} while (0)
-
-#define EXP_CHECK_DT_OP(exp, op) \
-do { \
- if ((exp) == NULL) { \
- CERROR("obd_" #op ": NULL export\n"); \
- return -ENODEV; \
- } \
- if ((exp)->exp_obd == NULL || !OBT((exp)->exp_obd)) { \
- CERROR("obd_" #op ": cleaned up obd\n"); \
- return -EOPNOTSUPP; \
- } \
- if (!OBT((exp)->exp_obd) || !OBP((exp)->exp_obd, op)) { \
- CERROR("obd_" #op ": dev %d no operation\n", \
- (exp)->exp_obd->obd_minor); \
- return -EOPNOTSUPP; \
- } \
-} while (0)
-
-#define CTXT_CHECK_OP(ctxt, op, err) \
-do { \
- if (!OBT(ctxt->loc_obd) || !CTXTP((ctxt), op)) { \
- if (err) \
- CERROR("lop_" #op ": dev %d no operation\n", \
- ctxt->loc_obd->obd_minor); \
- return err; \
- } \
-} while (0)
-
-static inline int class_devno_max(void)
-{
- return MAX_OBD_DEVICES;
-}
-
-static inline int obd_get_info(const struct lu_env *env,
- struct obd_export *exp, __u32 keylen,
- void *key, __u32 *vallen, void *val,
- struct lov_stripe_md *lsm)
-{
- int rc;
-
- EXP_CHECK_DT_OP(exp, get_info);
- EXP_COUNTER_INCREMENT(exp, get_info);
-
- rc = OBP(exp->exp_obd, get_info)(env, exp, keylen, key, vallen, val,
- lsm);
- return rc;
-}
-
-static inline int obd_set_info_async(const struct lu_env *env,
- struct obd_export *exp, u32 keylen,
- void *key, u32 vallen, void *val,
- struct ptlrpc_request_set *set)
-{
- int rc;
-
- EXP_CHECK_DT_OP(exp, set_info_async);
- EXP_COUNTER_INCREMENT(exp, set_info_async);
-
- rc = OBP(exp->exp_obd, set_info_async)(env, exp, keylen, key, vallen,
- val, set);
- return rc;
-}
-
-/*
- * obd-lu integration.
- *
- * Functionality is being moved into new lu_device-based layering, but some
- * pieces of configuration process are still based on obd devices.
- *
- * Specifically, lu_device_type_operations::ldto_device_alloc() methods fully
- * subsume ->o_setup() methods of obd devices they replace. The same for
- * lu_device_operations::ldo_process_config() and ->o_process_config(). As a
- * result, obd_setup() and obd_process_config() branch and call one XOR
- * another.
- *
- * Yet neither lu_device_type_operations::ldto_device_fini() nor
- * lu_device_type_operations::ldto_device_free() fully implement the
- * functionality of ->o_precleanup() and ->o_cleanup() they override. Hence,
- * obd_precleanup() and obd_cleanup() call both lu_device and obd operations.
- */
-
-#define DECLARE_LU_VARS(ldt, d) \
- struct lu_device_type *ldt; \
- struct lu_device *d
-
-static inline int obd_setup(struct obd_device *obd, struct lustre_cfg *cfg)
-{
- int rc;
- DECLARE_LU_VARS(ldt, d);
-
- ldt = obd->obd_type->typ_lu;
- if (ldt != NULL) {
- struct lu_context session_ctx;
- struct lu_env env;
- lu_context_init(&session_ctx, LCT_SESSION);
- session_ctx.lc_thread = NULL;
- lu_context_enter(&session_ctx);
-
- rc = lu_env_init(&env, ldt->ldt_ctx_tags);
- if (rc == 0) {
- env.le_ses = &session_ctx;
- d = ldt->ldt_ops->ldto_device_alloc(&env, ldt, cfg);
- lu_env_fini(&env);
- if (!IS_ERR(d)) {
- obd->obd_lu_dev = d;
- d->ld_obd = obd;
- rc = 0;
- } else
- rc = PTR_ERR(d);
- }
- lu_context_exit(&session_ctx);
- lu_context_fini(&session_ctx);
-
- } else {
- OBD_CHECK_DT_OP(obd, setup, -EOPNOTSUPP);
- OBD_COUNTER_INCREMENT(obd, setup);
- rc = OBP(obd, setup)(obd, cfg);
- }
- return rc;
-}
-
-static inline int obd_precleanup(struct obd_device *obd,
- enum obd_cleanup_stage cleanup_stage)
-{
- int rc;
- DECLARE_LU_VARS(ldt, d);
-
- rc = obd_check_dev(obd);
- if (rc)
- return rc;
- ldt = obd->obd_type->typ_lu;
- d = obd->obd_lu_dev;
- if (ldt != NULL && d != NULL) {
- if (cleanup_stage == OBD_CLEANUP_EXPORTS) {
- struct lu_env env;
-
- rc = lu_env_init(&env, ldt->ldt_ctx_tags);
- if (rc == 0) {
- ldt->ldt_ops->ldto_device_fini(&env, d);
- lu_env_fini(&env);
- }
- }
- }
- OBD_CHECK_DT_OP(obd, precleanup, 0);
- OBD_COUNTER_INCREMENT(obd, precleanup);
-
- rc = OBP(obd, precleanup)(obd, cleanup_stage);
- return rc;
-}
-
-static inline int obd_cleanup(struct obd_device *obd)
-{
- int rc;
- DECLARE_LU_VARS(ldt, d);
-
- rc = obd_check_dev(obd);
- if (rc)
- return rc;
-
- ldt = obd->obd_type->typ_lu;
- d = obd->obd_lu_dev;
- if (ldt != NULL && d != NULL) {
- struct lu_env env;
-
- rc = lu_env_init(&env, ldt->ldt_ctx_tags);
- if (rc == 0) {
- ldt->ldt_ops->ldto_device_free(&env, d);
- lu_env_fini(&env);
- obd->obd_lu_dev = NULL;
- }
- }
- OBD_CHECK_DT_OP(obd, cleanup, 0);
- OBD_COUNTER_INCREMENT(obd, cleanup);
-
- rc = OBP(obd, cleanup)(obd);
- return rc;
-}
-
-static inline void obd_cleanup_client_import(struct obd_device *obd)
-{
- /* If we set up but never connected, the
- client import will not have been cleaned. */
- down_write(&obd->u.cli.cl_sem);
- if (obd->u.cli.cl_import) {
- struct obd_import *imp;
- imp = obd->u.cli.cl_import;
- CDEBUG(D_CONFIG, "%s: client import never connected\n",
- obd->obd_name);
- ptlrpc_invalidate_import(imp);
- client_destroy_import(imp);
- obd->u.cli.cl_import = NULL;
- }
- up_write(&obd->u.cli.cl_sem);
-}
-
-static inline int
-obd_process_config(struct obd_device *obd, int datalen, void *data)
-{
- int rc;
- DECLARE_LU_VARS(ldt, d);
-
- rc = obd_check_dev(obd);
- if (rc)
- return rc;
-
- obd->obd_process_conf = 1;
- ldt = obd->obd_type->typ_lu;
- d = obd->obd_lu_dev;
- if (ldt != NULL && d != NULL) {
- struct lu_env env;
-
- rc = lu_env_init(&env, ldt->ldt_ctx_tags);
- if (rc == 0) {
- rc = d->ld_ops->ldo_process_config(&env, d, data);
- lu_env_fini(&env);
- }
- } else {
- OBD_CHECK_DT_OP(obd, process_config, -EOPNOTSUPP);
- rc = OBP(obd, process_config)(obd, datalen, data);
- }
- OBD_COUNTER_INCREMENT(obd, process_config);
- obd->obd_process_conf = 0;
-
- return rc;
-}
-
-/* Pack an in-memory MD struct for storage on disk.
- * Returns +ve size of packed MD (0 for free), or -ve error.
- *
- * If @disk_tgt == NULL, MD size is returned (max size if @mem_src == NULL).
- * If @*disk_tgt != NULL and @mem_src == NULL, @*disk_tgt will be freed.
- * If @*disk_tgt == NULL, it will be allocated
- */
-static inline int obd_packmd(struct obd_export *exp,
- struct lov_mds_md **disk_tgt,
- struct lov_stripe_md *mem_src)
-{
- int rc;
-
- EXP_CHECK_DT_OP(exp, packmd);
- EXP_COUNTER_INCREMENT(exp, packmd);
-
- rc = OBP(exp->exp_obd, packmd)(exp, disk_tgt, mem_src);
- return rc;
-}
-
-static inline int obd_size_diskmd(struct obd_export *exp,
- struct lov_stripe_md *mem_src)
-{
- return obd_packmd(exp, NULL, mem_src);
-}
-
-static inline int obd_free_diskmd(struct obd_export *exp,
- struct lov_mds_md **disk_tgt)
-{
- LASSERT(disk_tgt);
- LASSERT(*disk_tgt);
- /*
- * LU-2590, for caller's convenience, *disk_tgt could be host
- * endianness, it needs swab to LE if necessary, while just
- * lov_mds_md header needs it for figuring out how much memory
- * needs to be freed.
- */
- if ((cpu_to_le32(LOV_MAGIC) != LOV_MAGIC) &&
- (((*disk_tgt)->lmm_magic == LOV_MAGIC_V1) ||
- ((*disk_tgt)->lmm_magic == LOV_MAGIC_V3)))
- lustre_swab_lov_mds_md(*disk_tgt);
- return obd_packmd(exp, disk_tgt, NULL);
-}
-
-/* Unpack an MD struct from disk to in-memory format.
- * Returns +ve size of unpacked MD (0 for free), or -ve error.
- *
- * If @mem_tgt == NULL, MD size is returned (max size if @disk_src == NULL).
- * If @*mem_tgt != NULL and @disk_src == NULL, @*mem_tgt will be freed.
- * If @*mem_tgt == NULL, it will be allocated
- */
-static inline int obd_unpackmd(struct obd_export *exp,
- struct lov_stripe_md **mem_tgt,
- struct lov_mds_md *disk_src,
- int disk_len)
-{
- int rc;
-
- EXP_CHECK_DT_OP(exp, unpackmd);
- EXP_COUNTER_INCREMENT(exp, unpackmd);
-
- rc = OBP(exp->exp_obd, unpackmd)(exp, mem_tgt, disk_src, disk_len);
- return rc;
-}
-
-/* helper functions */
-static inline int obd_alloc_memmd(struct obd_export *exp,
- struct lov_stripe_md **mem_tgt)
-{
- LASSERT(mem_tgt);
- LASSERT(*mem_tgt == NULL);
- return obd_unpackmd(exp, mem_tgt, NULL, 0);
-}
-
-static inline int obd_free_memmd(struct obd_export *exp,
- struct lov_stripe_md **mem_tgt)
-{
- int rc;
-
- LASSERT(mem_tgt);
- LASSERT(*mem_tgt);
- rc = obd_unpackmd(exp, mem_tgt, NULL, 0);
- *mem_tgt = NULL;
- return rc;
-}
-
-static inline int obd_create(const struct lu_env *env, struct obd_export *exp,
- struct obdo *obdo, struct lov_stripe_md **ea,
- struct obd_trans_info *oti)
-{
- int rc;
-
- EXP_CHECK_DT_OP(exp, create);
- EXP_COUNTER_INCREMENT(exp, create);
-
- rc = OBP(exp->exp_obd, create)(env, exp, obdo, ea, oti);
- return rc;
-}
-
-static inline int obd_destroy(const struct lu_env *env, struct obd_export *exp,
- struct obdo *obdo, struct lov_stripe_md *ea,
- struct obd_trans_info *oti,
- struct obd_export *md_exp)
-{
- int rc;
-
- EXP_CHECK_DT_OP(exp, destroy);
- EXP_COUNTER_INCREMENT(exp, destroy);
-
- rc = OBP(exp->exp_obd, destroy)(env, exp, obdo, ea, oti, md_exp);
- return rc;
-}
-
-static inline int obd_getattr(const struct lu_env *env, struct obd_export *exp,
- struct obd_info *oinfo)
-{
- int rc;
-
- EXP_CHECK_DT_OP(exp, getattr);
- EXP_COUNTER_INCREMENT(exp, getattr);
-
- rc = OBP(exp->exp_obd, getattr)(env, exp, oinfo);
- return rc;
-}
-
-static inline int obd_getattr_async(struct obd_export *exp,
- struct obd_info *oinfo,
- struct ptlrpc_request_set *set)
-{
- int rc;
-
- EXP_CHECK_DT_OP(exp, getattr_async);
- EXP_COUNTER_INCREMENT(exp, getattr_async);
-
- rc = OBP(exp->exp_obd, getattr_async)(exp, oinfo, set);
- return rc;
-}
-
-static inline int obd_setattr(const struct lu_env *env, struct obd_export *exp,
- struct obd_info *oinfo,
- struct obd_trans_info *oti)
-{
- int rc;
-
- EXP_CHECK_DT_OP(exp, setattr);
- EXP_COUNTER_INCREMENT(exp, setattr);
-
- rc = OBP(exp->exp_obd, setattr)(env, exp, oinfo, oti);
- return rc;
-}
-
-/* This performs all the requests set init/wait/destroy actions. */
-static inline int obd_setattr_rqset(struct obd_export *exp,
- struct obd_info *oinfo,
- struct obd_trans_info *oti)
-{
- struct ptlrpc_request_set *set = NULL;
- int rc;
-
- EXP_CHECK_DT_OP(exp, setattr_async);
- EXP_COUNTER_INCREMENT(exp, setattr_async);
-
- set = ptlrpc_prep_set();
- if (set == NULL)
- return -ENOMEM;
-
- rc = OBP(exp->exp_obd, setattr_async)(exp, oinfo, oti, set);
- if (rc == 0)
- rc = ptlrpc_set_wait(set);
- ptlrpc_set_destroy(set);
- return rc;
-}
-
-/* This adds all the requests into @set if @set != NULL, otherwise
- all requests are sent asynchronously without waiting for response. */
-static inline int obd_setattr_async(struct obd_export *exp,
- struct obd_info *oinfo,
- struct obd_trans_info *oti,
- struct ptlrpc_request_set *set)
-{
- int rc;
-
- EXP_CHECK_DT_OP(exp, setattr_async);
- EXP_COUNTER_INCREMENT(exp, setattr_async);
-
- rc = OBP(exp->exp_obd, setattr_async)(exp, oinfo, oti, set);
- return rc;
-}
-
-static inline int obd_add_conn(struct obd_import *imp, struct obd_uuid *uuid,
- int priority)
-{
- struct obd_device *obd = imp->imp_obd;
- int rc;
-
- rc = obd_check_dev_active(obd);
- if (rc)
- return rc;
- OBD_CHECK_DT_OP(obd, add_conn, -EOPNOTSUPP);
- OBD_COUNTER_INCREMENT(obd, add_conn);
-
- rc = OBP(obd, add_conn)(imp, uuid, priority);
- return rc;
-}
-
-static inline int obd_del_conn(struct obd_import *imp, struct obd_uuid *uuid)
-{
- struct obd_device *obd = imp->imp_obd;
- int rc;
-
- rc = obd_check_dev_active(obd);
- if (rc)
- return rc;
- OBD_CHECK_DT_OP(obd, del_conn, -EOPNOTSUPP);
- OBD_COUNTER_INCREMENT(obd, del_conn);
-
- rc = OBP(obd, del_conn)(imp, uuid);
- return rc;
-}
-
-static inline struct obd_uuid *obd_get_uuid(struct obd_export *exp)
-{
- struct obd_uuid *uuid;
-
- OBD_CHECK_DT_OP(exp->exp_obd, get_uuid, NULL);
- EXP_COUNTER_INCREMENT(exp, get_uuid);
-
- uuid = OBP(exp->exp_obd, get_uuid)(exp);
- return uuid;
-}
-
-/** Create a new /a exp on device /a obd for the uuid /a cluuid
- * @param exp New export handle
- * @param d Connect data, supported flags are set, flags also understood
- * by obd are returned.
- */
-static inline int obd_connect(const struct lu_env *env,
- struct obd_export **exp, struct obd_device *obd,
- struct obd_uuid *cluuid,
- struct obd_connect_data *data,
- void *localdata)
-{
- int rc;
- __u64 ocf = data ? data->ocd_connect_flags : 0; /* for post-condition
- * check */
-
- rc = obd_check_dev_active(obd);
- if (rc)
- return rc;
- OBD_CHECK_DT_OP(obd, connect, -EOPNOTSUPP);
- OBD_COUNTER_INCREMENT(obd, connect);
-
- rc = OBP(obd, connect)(env, exp, obd, cluuid, data, localdata);
- /* check that only subset is granted */
- LASSERT(ergo(data != NULL, (data->ocd_connect_flags & ocf) ==
- data->ocd_connect_flags));
- return rc;
-}
-
-static inline int obd_reconnect(const struct lu_env *env,
- struct obd_export *exp,
- struct obd_device *obd,
- struct obd_uuid *cluuid,
- struct obd_connect_data *d,
- void *localdata)
-{
- int rc;
- __u64 ocf = d ? d->ocd_connect_flags : 0; /* for post-condition
- * check */
-
- rc = obd_check_dev_active(obd);
- if (rc)
- return rc;
- OBD_CHECK_DT_OP(obd, reconnect, 0);
- OBD_COUNTER_INCREMENT(obd, reconnect);
-
- rc = OBP(obd, reconnect)(env, exp, obd, cluuid, d, localdata);
- /* check that only subset is granted */
- LASSERT(ergo(d != NULL,
- (d->ocd_connect_flags & ocf) == d->ocd_connect_flags));
- return rc;
-}
-
-static inline int obd_disconnect(struct obd_export *exp)
-{
- int rc;
-
- EXP_CHECK_DT_OP(exp, disconnect);
- EXP_COUNTER_INCREMENT(exp, disconnect);
-
- rc = OBP(exp->exp_obd, disconnect)(exp);
- return rc;
-}
-
-static inline int obd_fid_init(struct obd_device *obd, struct obd_export *exp,
- enum lu_cli_type type)
-{
- int rc;
-
- OBD_CHECK_DT_OP(obd, fid_init, 0);
- OBD_COUNTER_INCREMENT(obd, fid_init);
-
- rc = OBP(obd, fid_init)(obd, exp, type);
- return rc;
-}
-
-static inline int obd_fid_fini(struct obd_device *obd)
-{
- int rc;
-
- OBD_CHECK_DT_OP(obd, fid_fini, 0);
- OBD_COUNTER_INCREMENT(obd, fid_fini);
-
- rc = OBP(obd, fid_fini)(obd);
- return rc;
-}
-
-static inline int obd_fid_alloc(struct obd_export *exp,
- struct lu_fid *fid,
- struct md_op_data *op_data)
-{
- int rc;
-
- EXP_CHECK_DT_OP(exp, fid_alloc);
- EXP_COUNTER_INCREMENT(exp, fid_alloc);
-
- rc = OBP(exp->exp_obd, fid_alloc)(exp, fid, op_data);
- return rc;
-}
-
-static inline int obd_pool_new(struct obd_device *obd, char *poolname)
-{
- int rc;
-
- OBD_CHECK_DT_OP(obd, pool_new, -EOPNOTSUPP);
- OBD_COUNTER_INCREMENT(obd, pool_new);
-
- rc = OBP(obd, pool_new)(obd, poolname);
- return rc;
-}
-
-static inline int obd_pool_del(struct obd_device *obd, char *poolname)
-{
- int rc;
-
- OBD_CHECK_DT_OP(obd, pool_del, -EOPNOTSUPP);
- OBD_COUNTER_INCREMENT(obd, pool_del);
-
- rc = OBP(obd, pool_del)(obd, poolname);
- return rc;
-}
-
-static inline int obd_pool_add(struct obd_device *obd, char *poolname, char *ostname)
-{
- int rc;
-
- OBD_CHECK_DT_OP(obd, pool_add, -EOPNOTSUPP);
- OBD_COUNTER_INCREMENT(obd, pool_add);
-
- rc = OBP(obd, pool_add)(obd, poolname, ostname);
- return rc;
-}
-
-static inline int obd_pool_rem(struct obd_device *obd, char *poolname, char *ostname)
-{
- int rc;
-
- OBD_CHECK_DT_OP(obd, pool_rem, -EOPNOTSUPP);
- OBD_COUNTER_INCREMENT(obd, pool_rem);
-
- rc = OBP(obd, pool_rem)(obd, poolname, ostname);
- return rc;
-}
-
-static inline void obd_getref(struct obd_device *obd)
-{
- if (OBT(obd) && OBP(obd, getref)) {
- OBD_COUNTER_INCREMENT(obd, getref);
- OBP(obd, getref)(obd);
- }
-}
-
-static inline void obd_putref(struct obd_device *obd)
-{
- if (OBT(obd) && OBP(obd, putref)) {
- OBD_COUNTER_INCREMENT(obd, putref);
- OBP(obd, putref)(obd);
- }
-}
-
-static inline int obd_init_export(struct obd_export *exp)
-{
- int rc = 0;
-
- if ((exp)->exp_obd != NULL && OBT((exp)->exp_obd) &&
- OBP((exp)->exp_obd, init_export))
- rc = OBP(exp->exp_obd, init_export)(exp);
- return rc;
-}
-
-static inline int obd_destroy_export(struct obd_export *exp)
-{
- if ((exp)->exp_obd != NULL && OBT((exp)->exp_obd) &&
- OBP((exp)->exp_obd, destroy_export))
- OBP(exp->exp_obd, destroy_export)(exp);
- return 0;
-}
-
-/* @max_age is the oldest time in jiffies that we accept using a cached data.
- * If the cache is older than @max_age we will get a new value from the
- * target. Use a value of "cfs_time_current() + HZ" to guarantee freshness. */
-static inline int obd_statfs_async(struct obd_export *exp,
- struct obd_info *oinfo,
- __u64 max_age,
- struct ptlrpc_request_set *rqset)
-{
- int rc = 0;
- struct obd_device *obd;
-
- if (exp == NULL || exp->exp_obd == NULL)
- return -EINVAL;
-
- obd = exp->exp_obd;
- OBD_CHECK_DT_OP(obd, statfs, -EOPNOTSUPP);
- OBD_COUNTER_INCREMENT(obd, statfs);
-
- CDEBUG(D_SUPER, "%s: osfs %p age %llu, max_age %llu\n",
- obd->obd_name, &obd->obd_osfs, obd->obd_osfs_age, max_age);
- if (cfs_time_before_64(obd->obd_osfs_age, max_age)) {
- rc = OBP(obd, statfs_async)(exp, oinfo, max_age, rqset);
- } else {
- CDEBUG(D_SUPER,
- "%s: use %p cache blocks %llu/%llu objects %llu/%llu\n",
- obd->obd_name, &obd->obd_osfs,
- obd->obd_osfs.os_bavail, obd->obd_osfs.os_blocks,
- obd->obd_osfs.os_ffree, obd->obd_osfs.os_files);
- spin_lock(&obd->obd_osfs_lock);
- memcpy(oinfo->oi_osfs, &obd->obd_osfs, sizeof(*oinfo->oi_osfs));
- spin_unlock(&obd->obd_osfs_lock);
- oinfo->oi_flags |= OBD_STATFS_FROM_CACHE;
- if (oinfo->oi_cb_up)
- oinfo->oi_cb_up(oinfo, 0);
- }
- return rc;
-}
-
-static inline int obd_statfs_rqset(struct obd_export *exp,
- struct obd_statfs *osfs, __u64 max_age,
- __u32 flags)
-{
- struct ptlrpc_request_set *set = NULL;
- struct obd_info oinfo = { };
- int rc = 0;
-
- set = ptlrpc_prep_set();
- if (set == NULL)
- return -ENOMEM;
-
- oinfo.oi_osfs = osfs;
- oinfo.oi_flags = flags;
- rc = obd_statfs_async(exp, &oinfo, max_age, set);
- if (rc == 0)
- rc = ptlrpc_set_wait(set);
- ptlrpc_set_destroy(set);
- return rc;
-}
-
-/* @max_age is the oldest time in jiffies that we accept using a cached data.
- * If the cache is older than @max_age we will get a new value from the
- * target. Use a value of "cfs_time_current() + HZ" to guarantee freshness. */
-static inline int obd_statfs(const struct lu_env *env, struct obd_export *exp,
- struct obd_statfs *osfs, __u64 max_age,
- __u32 flags)
-{
- int rc = 0;
- struct obd_device *obd = exp->exp_obd;
-
- if (obd == NULL)
- return -EINVAL;
-
- OBD_CHECK_DT_OP(obd, statfs, -EOPNOTSUPP);
- OBD_COUNTER_INCREMENT(obd, statfs);
-
- CDEBUG(D_SUPER, "osfs %llu, max_age %llu\n",
- obd->obd_osfs_age, max_age);
- if (cfs_time_before_64(obd->obd_osfs_age, max_age)) {
- rc = OBP(obd, statfs)(env, exp, osfs, max_age, flags);
- if (rc == 0) {
- spin_lock(&obd->obd_osfs_lock);
- memcpy(&obd->obd_osfs, osfs, sizeof(obd->obd_osfs));
- obd->obd_osfs_age = cfs_time_current_64();
- spin_unlock(&obd->obd_osfs_lock);
- }
- } else {
- CDEBUG(D_SUPER, "%s: use %p cache blocks %llu/%llu objects %llu/%llu\n",
- obd->obd_name, &obd->obd_osfs,
- obd->obd_osfs.os_bavail, obd->obd_osfs.os_blocks,
- obd->obd_osfs.os_ffree, obd->obd_osfs.os_files);
- spin_lock(&obd->obd_osfs_lock);
- memcpy(osfs, &obd->obd_osfs, sizeof(*osfs));
- spin_unlock(&obd->obd_osfs_lock);
- }
- return rc;
-}
-
-static inline int obd_preprw(const struct lu_env *env, int cmd,
- struct obd_export *exp, struct obdo *oa,
- int objcount, struct obd_ioobj *obj,
- struct niobuf_remote *remote, int *pages,
- struct niobuf_local *local,
- struct obd_trans_info *oti)
-{
- int rc;
-
- EXP_CHECK_DT_OP(exp, preprw);
- EXP_COUNTER_INCREMENT(exp, preprw);
-
- rc = OBP(exp->exp_obd, preprw)(env, cmd, exp, oa, objcount, obj, remote,
- pages, local, oti);
- return rc;
-}
-
-static inline int obd_commitrw(const struct lu_env *env, int cmd,
- struct obd_export *exp, struct obdo *oa,
- int objcount, struct obd_ioobj *obj,
- struct niobuf_remote *rnb, int pages,
- struct niobuf_local *local,
- struct obd_trans_info *oti, int rc)
-{
- EXP_CHECK_DT_OP(exp, commitrw);
- EXP_COUNTER_INCREMENT(exp, commitrw);
-
- rc = OBP(exp->exp_obd, commitrw)(env, cmd, exp, oa, objcount, obj,
- rnb, pages, local, oti, rc);
- return rc;
-}
-
-static inline int obd_adjust_kms(struct obd_export *exp,
- struct lov_stripe_md *lsm, u64 size,
- int shrink)
-{
- int rc;
-
- EXP_CHECK_DT_OP(exp, adjust_kms);
- EXP_COUNTER_INCREMENT(exp, adjust_kms);
-
- rc = OBP(exp->exp_obd, adjust_kms)(exp, lsm, size, shrink);
- return rc;
-}
-
-static inline int obd_iocontrol(unsigned int cmd, struct obd_export *exp,
- int len, void *karg, void *uarg)
-{
- int rc;
-
- EXP_CHECK_DT_OP(exp, iocontrol);
- EXP_COUNTER_INCREMENT(exp, iocontrol);
-
- rc = OBP(exp->exp_obd, iocontrol)(cmd, exp, len, karg, uarg);
- return rc;
-}
-
-static inline int obd_find_cbdata(struct obd_export *exp,
- struct lov_stripe_md *lsm,
- ldlm_iterator_t it, void *data)
-{
- int rc;
-
- EXP_CHECK_DT_OP(exp, find_cbdata);
- EXP_COUNTER_INCREMENT(exp, find_cbdata);
-
- rc = OBP(exp->exp_obd, find_cbdata)(exp, lsm, it, data);
- return rc;
-}
-
-static inline void obd_import_event(struct obd_device *obd,
- struct obd_import *imp,
- enum obd_import_event event)
-{
- if (!obd) {
- CERROR("NULL device\n");
- return;
- }
- if (obd->obd_set_up && OBP(obd, import_event)) {
- OBD_COUNTER_INCREMENT(obd, import_event);
- OBP(obd, import_event)(obd, imp, event);
- }
-}
-
-static inline int obd_notify(struct obd_device *obd,
- struct obd_device *watched,
- enum obd_notify_event ev,
- void *data)
-{
- int rc;
-
- rc = obd_check_dev(obd);
- if (rc)
- return rc;
-
- /* the check for async_recov is a complete hack - I'm hereby
- overloading the meaning to also mean "this was called from
- mds_postsetup". I know that my mds is able to handle notifies
- by this point, and it needs to get them to execute mds_postrecov. */
- if (!obd->obd_set_up && !obd->obd_async_recov) {
- CDEBUG(D_HA, "obd %s not set up\n", obd->obd_name);
- return -EINVAL;
- }
-
- if (!OBP(obd, notify)) {
- CDEBUG(D_HA, "obd %s has no notify handler\n", obd->obd_name);
- return -ENOSYS;
- }
-
- OBD_COUNTER_INCREMENT(obd, notify);
- rc = OBP(obd, notify)(obd, watched, ev, data);
- return rc;
-}
-
-static inline int obd_notify_observer(struct obd_device *observer,
- struct obd_device *observed,
- enum obd_notify_event ev,
- void *data)
-{
- int rc1;
- int rc2;
-
- struct obd_notify_upcall *onu;
-
- if (observer->obd_observer)
- rc1 = obd_notify(observer->obd_observer, observed, ev, data);
- else
- rc1 = 0;
- /*
- * Also, call non-obd listener, if any
- */
- onu = &observer->obd_upcall;
- if (onu->onu_upcall != NULL)
- rc2 = onu->onu_upcall(observer, observed, ev,
- onu->onu_owner, NULL);
- else
- rc2 = 0;
-
- return rc1 ? rc1 : rc2;
-}
-
-static inline int obd_quotacheck(struct obd_export *exp,
- struct obd_quotactl *oqctl)
-{
- int rc;
-
- EXP_CHECK_DT_OP(exp, quotacheck);
- EXP_COUNTER_INCREMENT(exp, quotacheck);
-
- rc = OBP(exp->exp_obd, quotacheck)(exp->exp_obd, exp, oqctl);
- return rc;
-}
-
-static inline int obd_quotactl(struct obd_export *exp,
- struct obd_quotactl *oqctl)
-{
- int rc;
-
- EXP_CHECK_DT_OP(exp, quotactl);
- EXP_COUNTER_INCREMENT(exp, quotactl);
-
- rc = OBP(exp->exp_obd, quotactl)(exp->exp_obd, exp, oqctl);
- return rc;
-}
-
-static inline int obd_health_check(const struct lu_env *env,
- struct obd_device *obd)
-{
- /* returns: 0 on healthy
- * >0 on unhealthy + reason code/flag
- * however the only supported reason == 1 right now
- * We'll need to define some better reasons
- * or flags in the future.
- * <0 on error
- */
- int rc;
-
- /* don't use EXP_CHECK_DT_OP, because NULL method is normal here */
- if (obd == NULL || !OBT(obd)) {
- CERROR("cleaned up obd\n");
- return -EOPNOTSUPP;
- }
- if (!obd->obd_set_up || obd->obd_stopping)
- return 0;
- if (!OBP(obd, health_check))
- return 0;
-
- rc = OBP(obd, health_check)(env, obd);
- return rc;
-}
-
-static inline int obd_register_observer(struct obd_device *obd,
- struct obd_device *observer)
-{
- int rc;
-
- rc = obd_check_dev(obd);
- if (rc)
- return rc;
- down_write(&obd->obd_observer_link_sem);
- if (obd->obd_observer && observer) {
- up_write(&obd->obd_observer_link_sem);
- return -EALREADY;
- }
- obd->obd_observer = observer;
- up_write(&obd->obd_observer_link_sem);
- return 0;
-}
-
-#if 0
-static inline int obd_register_page_removal_cb(struct obd_export *exp,
- obd_page_removal_cb_t cb,
- obd_pin_extent_cb pin_cb)
-{
- int rc;
-
- OBD_CHECK_DT_OP(exp->exp_obd, register_page_removal_cb, 0);
- OBD_COUNTER_INCREMENT(exp->exp_obd, register_page_removal_cb);
-
- rc = OBP(exp->exp_obd, register_page_removal_cb)(exp, cb, pin_cb);
- return rc;
-}
-
-static inline int obd_unregister_page_removal_cb(struct obd_export *exp,
- obd_page_removal_cb_t cb)
-{
- int rc;
-
- OBD_CHECK_DT_OP(exp->exp_obd, unregister_page_removal_cb, 0);
- OBD_COUNTER_INCREMENT(exp->exp_obd, unregister_page_removal_cb);
-
- rc = OBP(exp->exp_obd, unregister_page_removal_cb)(exp, cb);
- return rc;
-}
-
-static inline int obd_register_lock_cancel_cb(struct obd_export *exp,
- obd_lock_cancel_cb cb)
-{
- int rc;
-
- OBD_CHECK_DT_OP(exp->exp_obd, register_lock_cancel_cb, 0);
- OBD_COUNTER_INCREMENT(exp->exp_obd, register_lock_cancel_cb);
-
- rc = OBP(exp->exp_obd, register_lock_cancel_cb)(exp, cb);
- return rc;
-}
-
-static inline int obd_unregister_lock_cancel_cb(struct obd_export *exp,
- obd_lock_cancel_cb cb)
-{
- int rc;
-
- OBD_CHECK_DT_OP(exp->exp_obd, unregister_lock_cancel_cb, 0);
- OBD_COUNTER_INCREMENT(exp->exp_obd, unregister_lock_cancel_cb);
-
- rc = OBP(exp->exp_obd, unregister_lock_cancel_cb)(exp, cb);
- return rc;
-}
-#endif
-
-/* metadata helpers */
-static inline int md_getstatus(struct obd_export *exp, struct lu_fid *fid)
-{
- int rc;
-
- EXP_CHECK_MD_OP(exp, getstatus);
- EXP_MD_COUNTER_INCREMENT(exp, getstatus);
- rc = MDP(exp->exp_obd, getstatus)(exp, fid);
- return rc;
-}
-
-static inline int md_getattr(struct obd_export *exp, struct md_op_data *op_data,
- struct ptlrpc_request **request)
-{
- int rc;
-
- EXP_CHECK_MD_OP(exp, getattr);
- EXP_MD_COUNTER_INCREMENT(exp, getattr);
- rc = MDP(exp->exp_obd, getattr)(exp, op_data, request);
- return rc;
-}
-
-static inline int md_null_inode(struct obd_export *exp,
- const struct lu_fid *fid)
-{
- int rc;
-
- EXP_CHECK_MD_OP(exp, null_inode);
- EXP_MD_COUNTER_INCREMENT(exp, null_inode);
- rc = MDP(exp->exp_obd, null_inode)(exp, fid);
- return rc;
-}
-
-static inline int md_find_cbdata(struct obd_export *exp,
- const struct lu_fid *fid,
- ldlm_iterator_t it, void *data)
-{
- int rc;
-
- EXP_CHECK_MD_OP(exp, find_cbdata);
- EXP_MD_COUNTER_INCREMENT(exp, find_cbdata);
- rc = MDP(exp->exp_obd, find_cbdata)(exp, fid, it, data);
- return rc;
-}
-
-static inline int md_close(struct obd_export *exp, struct md_op_data *op_data,
- struct md_open_data *mod,
- struct ptlrpc_request **request)
-{
- int rc;
-
- EXP_CHECK_MD_OP(exp, close);
- EXP_MD_COUNTER_INCREMENT(exp, close);
- rc = MDP(exp->exp_obd, close)(exp, op_data, mod, request);
- return rc;
-}
-
-static inline int md_create(struct obd_export *exp, struct md_op_data *op_data,
- const void *data, int datalen, int mode, __u32 uid,
- __u32 gid, cfs_cap_t cap_effective, __u64 rdev,
- struct ptlrpc_request **request)
-{
- int rc;
-
- EXP_CHECK_MD_OP(exp, create);
- EXP_MD_COUNTER_INCREMENT(exp, create);
- rc = MDP(exp->exp_obd, create)(exp, op_data, data, datalen, mode,
- uid, gid, cap_effective, rdev, request);
- return rc;
-}
-
-static inline int md_done_writing(struct obd_export *exp,
- struct md_op_data *op_data,
- struct md_open_data *mod)
-{
- int rc;
-
- EXP_CHECK_MD_OP(exp, done_writing);
- EXP_MD_COUNTER_INCREMENT(exp, done_writing);
- rc = MDP(exp->exp_obd, done_writing)(exp, op_data, mod);
- return rc;
-}
-
-static inline int md_enqueue(struct obd_export *exp,
- struct ldlm_enqueue_info *einfo,
- struct lookup_intent *it,
- struct md_op_data *op_data,
- struct lustre_handle *lockh,
- void *lmm, int lmmsize,
- struct ptlrpc_request **req,
- __u64 extra_lock_flags)
-{
- int rc;
-
- EXP_CHECK_MD_OP(exp, enqueue);
- EXP_MD_COUNTER_INCREMENT(exp, enqueue);
- rc = MDP(exp->exp_obd, enqueue)(exp, einfo, it, op_data, lockh,
- lmm, lmmsize, req, extra_lock_flags);
- return rc;
-}
-
-static inline int md_getattr_name(struct obd_export *exp,
- struct md_op_data *op_data,
- struct ptlrpc_request **request)
-{
- int rc;
-
- EXP_CHECK_MD_OP(exp, getattr_name);
- EXP_MD_COUNTER_INCREMENT(exp, getattr_name);
- rc = MDP(exp->exp_obd, getattr_name)(exp, op_data, request);
- return rc;
-}
-
-static inline int md_intent_lock(struct obd_export *exp,
- struct md_op_data *op_data, void *lmm,
- int lmmsize, struct lookup_intent *it,
- int lookup_flags, struct ptlrpc_request **reqp,
- ldlm_blocking_callback cb_blocking,
- __u64 extra_lock_flags)
-{
- int rc;
-
- EXP_CHECK_MD_OP(exp, intent_lock);
- EXP_MD_COUNTER_INCREMENT(exp, intent_lock);
- rc = MDP(exp->exp_obd, intent_lock)(exp, op_data, lmm, lmmsize,
- it, lookup_flags, reqp, cb_blocking,
- extra_lock_flags);
- return rc;
-}
-
-static inline int md_link(struct obd_export *exp, struct md_op_data *op_data,
- struct ptlrpc_request **request)
-{
- int rc;
-
- EXP_CHECK_MD_OP(exp, link);
- EXP_MD_COUNTER_INCREMENT(exp, link);
- rc = MDP(exp->exp_obd, link)(exp, op_data, request);
- return rc;
-}
-
-static inline int md_rename(struct obd_export *exp, struct md_op_data *op_data,
- const char *old, int oldlen, const char *new,
- int newlen, struct ptlrpc_request **request)
-{
- int rc;
-
- EXP_CHECK_MD_OP(exp, rename);
- EXP_MD_COUNTER_INCREMENT(exp, rename);
- rc = MDP(exp->exp_obd, rename)(exp, op_data, old, oldlen, new,
- newlen, request);
- return rc;
-}
-
-static inline int md_is_subdir(struct obd_export *exp,
- const struct lu_fid *pfid,
- const struct lu_fid *cfid,
- struct ptlrpc_request **request)
-{
- int rc;
-
- EXP_CHECK_MD_OP(exp, is_subdir);
- EXP_MD_COUNTER_INCREMENT(exp, is_subdir);
- rc = MDP(exp->exp_obd, is_subdir)(exp, pfid, cfid, request);
- return rc;
-}
-
-static inline int md_setattr(struct obd_export *exp, struct md_op_data *op_data,
- void *ea, int ealen, void *ea2, int ea2len,
- struct ptlrpc_request **request,
- struct md_open_data **mod)
-{
- int rc;
-
- EXP_CHECK_MD_OP(exp, setattr);
- EXP_MD_COUNTER_INCREMENT(exp, setattr);
- rc = MDP(exp->exp_obd, setattr)(exp, op_data, ea, ealen,
- ea2, ea2len, request, mod);
- return rc;
-}
-
-static inline int md_sync(struct obd_export *exp, const struct lu_fid *fid,
- struct ptlrpc_request **request)
-{
- int rc;
-
- EXP_CHECK_MD_OP(exp, sync);
- EXP_MD_COUNTER_INCREMENT(exp, sync);
- rc = MDP(exp->exp_obd, sync)(exp, fid, request);
- return rc;
-}
-
-static inline int md_readpage(struct obd_export *exp, struct md_op_data *opdata,
- struct page **pages,
- struct ptlrpc_request **request)
-{
- int rc;
-
- EXP_CHECK_MD_OP(exp, readpage);
- EXP_MD_COUNTER_INCREMENT(exp, readpage);
- rc = MDP(exp->exp_obd, readpage)(exp, opdata, pages, request);
- return rc;
-}
-
-static inline int md_unlink(struct obd_export *exp, struct md_op_data *op_data,
- struct ptlrpc_request **request)
-{
- int rc;
-
- EXP_CHECK_MD_OP(exp, unlink);
- EXP_MD_COUNTER_INCREMENT(exp, unlink);
- rc = MDP(exp->exp_obd, unlink)(exp, op_data, request);
- return rc;
-}
-
-static inline int md_get_lustre_md(struct obd_export *exp,
- struct ptlrpc_request *req,
- struct obd_export *dt_exp,
- struct obd_export *md_exp,
- struct lustre_md *md)
-{
- EXP_CHECK_MD_OP(exp, get_lustre_md);
- EXP_MD_COUNTER_INCREMENT(exp, get_lustre_md);
- return MDP(exp->exp_obd, get_lustre_md)(exp, req, dt_exp, md_exp, md);
-}
-
-static inline int md_free_lustre_md(struct obd_export *exp,
- struct lustre_md *md)
-{
- EXP_CHECK_MD_OP(exp, free_lustre_md);
- EXP_MD_COUNTER_INCREMENT(exp, free_lustre_md);
- return MDP(exp->exp_obd, free_lustre_md)(exp, md);
-}
-
-static inline int md_setxattr(struct obd_export *exp, const struct lu_fid *fid,
- u64 valid, const char *name,
- const char *input, int input_size,
- int output_size, int flags, __u32 suppgid,
- struct ptlrpc_request **request)
-{
- EXP_CHECK_MD_OP(exp, setxattr);
- EXP_MD_COUNTER_INCREMENT(exp, setxattr);
- return MDP(exp->exp_obd, setxattr)(exp, fid, valid, name, input,
- input_size, output_size, flags,
- suppgid, request);
-}
-
-static inline int md_getxattr(struct obd_export *exp, const struct lu_fid *fid,
- u64 valid, const char *name,
- const char *input, int input_size,
- int output_size, int flags,
- struct ptlrpc_request **request)
-{
- EXP_CHECK_MD_OP(exp, getxattr);
- EXP_MD_COUNTER_INCREMENT(exp, getxattr);
- return MDP(exp->exp_obd, getxattr)(exp, fid, valid, name, input,
- input_size, output_size, flags,
- request);
-}
-
-static inline int md_set_open_replay_data(struct obd_export *exp,
- struct obd_client_handle *och,
- struct lookup_intent *it)
-{
- EXP_CHECK_MD_OP(exp, set_open_replay_data);
- EXP_MD_COUNTER_INCREMENT(exp, set_open_replay_data);
- return MDP(exp->exp_obd, set_open_replay_data)(exp, och, it);
-}
-
-static inline int md_clear_open_replay_data(struct obd_export *exp,
- struct obd_client_handle *och)
-{
- EXP_CHECK_MD_OP(exp, clear_open_replay_data);
- EXP_MD_COUNTER_INCREMENT(exp, clear_open_replay_data);
- return MDP(exp->exp_obd, clear_open_replay_data)(exp, och);
-}
-
-static inline int md_set_lock_data(struct obd_export *exp,
- __u64 *lockh, void *data, __u64 *bits)
-{
- EXP_CHECK_MD_OP(exp, set_lock_data);
- EXP_MD_COUNTER_INCREMENT(exp, set_lock_data);
- return MDP(exp->exp_obd, set_lock_data)(exp, lockh, data, bits);
-}
-
-static inline int md_cancel_unused(struct obd_export *exp,
- const struct lu_fid *fid,
- ldlm_policy_data_t *policy,
- ldlm_mode_t mode,
- ldlm_cancel_flags_t flags,
- void *opaque)
-{
- int rc;
-
- EXP_CHECK_MD_OP(exp, cancel_unused);
- EXP_MD_COUNTER_INCREMENT(exp, cancel_unused);
-
- rc = MDP(exp->exp_obd, cancel_unused)(exp, fid, policy, mode,
- flags, opaque);
- return rc;
-}
-
-static inline ldlm_mode_t md_lock_match(struct obd_export *exp, __u64 flags,
- const struct lu_fid *fid,
- ldlm_type_t type,
- ldlm_policy_data_t *policy,
- ldlm_mode_t mode,
- struct lustre_handle *lockh)
-{
- EXP_CHECK_MD_OP(exp, lock_match);
- EXP_MD_COUNTER_INCREMENT(exp, lock_match);
- return MDP(exp->exp_obd, lock_match)(exp, flags, fid, type,
- policy, mode, lockh);
-}
-
-static inline int md_init_ea_size(struct obd_export *exp, int easize,
- int def_asize, int cookiesize,
- int def_cookiesize)
-{
- EXP_CHECK_MD_OP(exp, init_ea_size);
- EXP_MD_COUNTER_INCREMENT(exp, init_ea_size);
- return MDP(exp->exp_obd, init_ea_size)(exp, easize, def_asize,
- cookiesize, def_cookiesize);
-}
-
-static inline int md_get_remote_perm(struct obd_export *exp,
- const struct lu_fid *fid, __u32 suppgid,
- struct ptlrpc_request **request)
-{
- EXP_CHECK_MD_OP(exp, get_remote_perm);
- EXP_MD_COUNTER_INCREMENT(exp, get_remote_perm);
- return MDP(exp->exp_obd, get_remote_perm)(exp, fid, suppgid,
- request);
-}
-
-static inline int md_intent_getattr_async(struct obd_export *exp,
- struct md_enqueue_info *minfo,
- struct ldlm_enqueue_info *einfo)
-{
- int rc;
-
- EXP_CHECK_MD_OP(exp, intent_getattr_async);
- EXP_MD_COUNTER_INCREMENT(exp, intent_getattr_async);
- rc = MDP(exp->exp_obd, intent_getattr_async)(exp, minfo, einfo);
- return rc;
-}
-
-static inline int md_revalidate_lock(struct obd_export *exp,
- struct lookup_intent *it,
- struct lu_fid *fid, __u64 *bits)
-{
- int rc;
-
- EXP_CHECK_MD_OP(exp, revalidate_lock);
- EXP_MD_COUNTER_INCREMENT(exp, revalidate_lock);
- rc = MDP(exp->exp_obd, revalidate_lock)(exp, it, fid, bits);
- return rc;
-}
-
-
-/* OBD Metadata Support */
-
-int obd_init_caches(void);
-void obd_cleanup_caches(void);
-
-/* support routines */
-extern struct kmem_cache *obdo_cachep;
-
-#define OBDO_ALLOC(ptr) \
-do { \
- OBD_SLAB_ALLOC_PTR_GFP((ptr), obdo_cachep, GFP_NOFS); \
-} while (0)
-
-#define OBDO_FREE(ptr) \
-do { \
- OBD_SLAB_FREE_PTR((ptr), obdo_cachep); \
-} while (0)
-
-
-static inline void obdo2fid(struct obdo *oa, struct lu_fid *fid)
-{
- /* something here */
-}
-
-static inline void fid2obdo(struct lu_fid *fid, struct obdo *oa)
-{
- /* something here */
-}
-
-typedef int (*register_lwp_cb)(void *data);
-
-struct lwp_register_item {
- struct obd_export **lri_exp;
- register_lwp_cb lri_cb_func;
- void *lri_cb_data;
- struct list_head lri_list;
- char lri_name[MTI_NAME_MAXLEN];
-};
-
-/* I'm as embarrassed about this as you are.
- *
- * <shaver> // XXX do not look into _superhack with remaining eye
- * <shaver> // XXX if this were any uglier, I'd get my own show on MTV */
-extern int (*ptlrpc_put_connection_superhack)(struct ptlrpc_connection *c);
-
-/* obd_mount.c */
-
-/* sysctl.c */
-int obd_sysctl_init(void);
-
-/* uuid.c */
-typedef __u8 class_uuid_t[16];
-void class_uuid_unparse(class_uuid_t in, struct obd_uuid *out);
-
-/* lustre_peer.c */
-int lustre_uuid_to_peer(const char *uuid, lnet_nid_t *peer_nid, int index);
-int class_add_uuid(const char *uuid, __u64 nid);
-int class_del_uuid (const char *uuid);
-int class_check_uuid(struct obd_uuid *uuid, __u64 nid);
-void class_init_uuidlist(void);
-void class_exit_uuidlist(void);
-
-/* class_obd.c */
-extern char obd_jobid_node[];
-extern struct miscdevice obd_psdev;
-extern spinlock_t obd_types_lock;
-
-/* prng.c */
-#define ll_generate_random_uuid(uuid_out) cfs_get_random_bytes(uuid_out, sizeof(class_uuid_t))
-
-#endif /* __LINUX_OBD_CLASS_H */
diff --git a/drivers/staging/lustre/lustre/include/obd_support.h b/drivers/staging/lustre/lustre/include/obd_support.h
deleted file mode 100644
index 80ab85d7ec3c..000000000000
--- a/drivers/staging/lustre/lustre/include/obd_support.h
+++ /dev/null
@@ -1,602 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- */
-
-#ifndef _OBD_SUPPORT
-#define _OBD_SUPPORT
-
-#include <linux/slab.h>
-#include "../../include/linux/libcfs/libcfs.h"
-#include "linux/lustre_compat25.h"
-#include "lprocfs_status.h"
-
-/* global variables */
-extern unsigned int obd_debug_peer_on_timeout;
-extern unsigned int obd_dump_on_timeout;
-extern unsigned int obd_dump_on_eviction;
-/* obd_timeout should only be used for recovery, not for
- networking / disk / timings affected by load (use Adaptive Timeouts) */
-extern unsigned int obd_timeout; /* seconds */
-extern unsigned int obd_timeout_set;
-extern unsigned int at_min;
-extern unsigned int at_max;
-extern unsigned int at_history;
-extern int at_early_margin;
-extern int at_extra;
-extern unsigned int obd_sync_filter;
-extern unsigned int obd_max_dirty_pages;
-extern atomic_t obd_dirty_pages;
-extern atomic_t obd_dirty_transit_pages;
-extern char obd_jobid_var[];
-
-/* Some hash init argument constants */
-#define HASH_POOLS_BKT_BITS 3
-#define HASH_POOLS_CUR_BITS 3
-#define HASH_POOLS_MAX_BITS 7
-#define HASH_UUID_BKT_BITS 5
-#define HASH_UUID_CUR_BITS 7
-#define HASH_UUID_MAX_BITS 12
-#define HASH_NID_BKT_BITS 5
-#define HASH_NID_CUR_BITS 7
-#define HASH_NID_MAX_BITS 12
-#define HASH_NID_STATS_BKT_BITS 5
-#define HASH_NID_STATS_CUR_BITS 7
-#define HASH_NID_STATS_MAX_BITS 12
-#define HASH_LQE_BKT_BITS 5
-#define HASH_LQE_CUR_BITS 7
-#define HASH_LQE_MAX_BITS 12
-#define HASH_CONN_BKT_BITS 5
-#define HASH_CONN_CUR_BITS 5
-#define HASH_CONN_MAX_BITS 15
-#define HASH_EXP_LOCK_BKT_BITS 5
-#define HASH_EXP_LOCK_CUR_BITS 7
-#define HASH_EXP_LOCK_MAX_BITS 16
-#define HASH_CL_ENV_BKT_BITS 5
-#define HASH_CL_ENV_BITS 10
-#define HASH_JOB_STATS_BKT_BITS 5
-#define HASH_JOB_STATS_CUR_BITS 7
-#define HASH_JOB_STATS_MAX_BITS 12
-
-/* Timeout definitions */
-#define OBD_TIMEOUT_DEFAULT 100
-/* Time to wait for all clients to reconnect during recovery (hard limit) */
-#define OBD_RECOVERY_TIME_HARD (obd_timeout * 9)
-/* Time to wait for all clients to reconnect during recovery (soft limit) */
-/* Should be very conservative; must catch the first reconnect after reboot */
-#define OBD_RECOVERY_TIME_SOFT (obd_timeout * 3)
-/* Change recovery-small 26b time if you change this */
-#define PING_INTERVAL max(obd_timeout / 4, 1U)
-/* a bit more than maximal journal commit time in seconds */
-#define PING_INTERVAL_SHORT min(PING_INTERVAL, 7U)
-/* Client may skip 1 ping; we must wait at least 2.5. But for multiple
- * failover targets the client only pings one server at a time, and pings
- * can be lost on a loaded network. Since eviction has serious consequences,
- * and there's no urgent need to evict a client just because it's idle, we
- * should be very conservative here. */
-#define PING_EVICT_TIMEOUT (PING_INTERVAL * 6)
-#define DISK_TIMEOUT 50 /* Beyond this we warn about disk speed */
-#define CONNECTION_SWITCH_MIN 5U /* Connection switching rate limiter */
- /* Max connect interval for nonresponsive servers; ~50s to avoid building up
- connect requests in the LND queues, but within obd_timeout so we don't
- miss the recovery window */
-#define CONNECTION_SWITCH_MAX min(50U, max(CONNECTION_SWITCH_MIN, obd_timeout))
-#define CONNECTION_SWITCH_INC 5 /* Connection timeout backoff */
-/* In general this should be low to have quick detection of a system
- running on a backup server. (If it's too low, import_select_connection
- will increase the timeout anyhow.) */
-#define INITIAL_CONNECT_TIMEOUT max(CONNECTION_SWITCH_MIN, obd_timeout/20)
-/* The max delay between connects is SWITCH_MAX + SWITCH_INC + INITIAL */
-#define RECONNECT_DELAY_MAX (CONNECTION_SWITCH_MAX + CONNECTION_SWITCH_INC + \
- INITIAL_CONNECT_TIMEOUT)
-/* The min time a target should wait for clients to reconnect in recovery */
-#define OBD_RECOVERY_TIME_MIN (2*RECONNECT_DELAY_MAX)
-#define OBD_IR_FACTOR_MIN 1
-#define OBD_IR_FACTOR_MAX 10
-#define OBD_IR_FACTOR_DEFAULT (OBD_IR_FACTOR_MAX/2)
-/* default timeout for the MGS to become IR_FULL */
-#define OBD_IR_MGS_TIMEOUT (4*obd_timeout)
-#define LONG_UNLINK 300 /* Unlink should happen before now */
-
-/**
- * Time interval of shrink, if the client is "idle" more than this interval,
- * then the ll_grant thread will return the requested grant space to filter
- */
-#define GRANT_SHRINK_INTERVAL 1200/*20 minutes*/
-
-#define OBD_FAIL_MDS 0x100
-#define OBD_FAIL_MDS_HANDLE_UNPACK 0x101
-#define OBD_FAIL_MDS_GETATTR_NET 0x102
-#define OBD_FAIL_MDS_GETATTR_PACK 0x103
-#define OBD_FAIL_MDS_READPAGE_NET 0x104
-#define OBD_FAIL_MDS_READPAGE_PACK 0x105
-#define OBD_FAIL_MDS_SENDPAGE 0x106
-#define OBD_FAIL_MDS_REINT_NET 0x107
-#define OBD_FAIL_MDS_REINT_UNPACK 0x108
-#define OBD_FAIL_MDS_REINT_SETATTR 0x109
-#define OBD_FAIL_MDS_REINT_SETATTR_WRITE 0x10a
-#define OBD_FAIL_MDS_REINT_CREATE 0x10b
-#define OBD_FAIL_MDS_REINT_CREATE_WRITE 0x10c
-#define OBD_FAIL_MDS_REINT_UNLINK 0x10d
-#define OBD_FAIL_MDS_REINT_UNLINK_WRITE 0x10e
-#define OBD_FAIL_MDS_REINT_LINK 0x10f
-#define OBD_FAIL_MDS_REINT_LINK_WRITE 0x110
-#define OBD_FAIL_MDS_REINT_RENAME 0x111
-#define OBD_FAIL_MDS_REINT_RENAME_WRITE 0x112
-#define OBD_FAIL_MDS_OPEN_NET 0x113
-#define OBD_FAIL_MDS_OPEN_PACK 0x114
-#define OBD_FAIL_MDS_CLOSE_NET 0x115
-#define OBD_FAIL_MDS_CLOSE_PACK 0x116
-#define OBD_FAIL_MDS_CONNECT_NET 0x117
-#define OBD_FAIL_MDS_CONNECT_PACK 0x118
-#define OBD_FAIL_MDS_REINT_NET_REP 0x119
-#define OBD_FAIL_MDS_DISCONNECT_NET 0x11a
-#define OBD_FAIL_MDS_GETSTATUS_NET 0x11b
-#define OBD_FAIL_MDS_GETSTATUS_PACK 0x11c
-#define OBD_FAIL_MDS_STATFS_PACK 0x11d
-#define OBD_FAIL_MDS_STATFS_NET 0x11e
-#define OBD_FAIL_MDS_GETATTR_NAME_NET 0x11f
-#define OBD_FAIL_MDS_PIN_NET 0x120
-#define OBD_FAIL_MDS_UNPIN_NET 0x121
-#define OBD_FAIL_MDS_ALL_REPLY_NET 0x122
-#define OBD_FAIL_MDS_ALL_REQUEST_NET 0x123
-#define OBD_FAIL_MDS_SYNC_NET 0x124
-#define OBD_FAIL_MDS_SYNC_PACK 0x125
-#define OBD_FAIL_MDS_DONE_WRITING_NET 0x126
-#define OBD_FAIL_MDS_DONE_WRITING_PACK 0x127
-#define OBD_FAIL_MDS_ALLOC_OBDO 0x128
-#define OBD_FAIL_MDS_PAUSE_OPEN 0x129
-#define OBD_FAIL_MDS_STATFS_LCW_SLEEP 0x12a
-#define OBD_FAIL_MDS_OPEN_CREATE 0x12b
-#define OBD_FAIL_MDS_OST_SETATTR 0x12c
-#define OBD_FAIL_MDS_QUOTACHECK_NET 0x12d
-#define OBD_FAIL_MDS_QUOTACTL_NET 0x12e
-#define OBD_FAIL_MDS_CLIENT_ADD 0x12f
-#define OBD_FAIL_MDS_GETXATTR_NET 0x130
-#define OBD_FAIL_MDS_GETXATTR_PACK 0x131
-#define OBD_FAIL_MDS_SETXATTR_NET 0x132
-#define OBD_FAIL_MDS_SETXATTR 0x133
-#define OBD_FAIL_MDS_SETXATTR_WRITE 0x134
-#define OBD_FAIL_MDS_FS_SETUP 0x135
-#define OBD_FAIL_MDS_RESEND 0x136
-#define OBD_FAIL_MDS_LLOG_CREATE_FAILED 0x137
-#define OBD_FAIL_MDS_LOV_SYNC_RACE 0x138
-#define OBD_FAIL_MDS_OSC_PRECREATE 0x139
-#define OBD_FAIL_MDS_LLOG_SYNC_TIMEOUT 0x13a
-#define OBD_FAIL_MDS_CLOSE_NET_REP 0x13b
-#define OBD_FAIL_MDS_BLOCK_QUOTA_REQ 0x13c
-#define OBD_FAIL_MDS_DROP_QUOTA_REQ 0x13d
-#define OBD_FAIL_MDS_REMOVE_COMMON_EA 0x13e
-#define OBD_FAIL_MDS_ALLOW_COMMON_EA_SETTING 0x13f
-#define OBD_FAIL_MDS_FAIL_LOV_LOG_ADD 0x140
-#define OBD_FAIL_MDS_LOV_PREP_CREATE 0x141
-#define OBD_FAIL_MDS_REINT_DELAY 0x142
-#define OBD_FAIL_MDS_READLINK_EPROTO 0x143
-#define OBD_FAIL_MDS_OPEN_WAIT_CREATE 0x144
-#define OBD_FAIL_MDS_PDO_LOCK 0x145
-#define OBD_FAIL_MDS_PDO_LOCK2 0x146
-#define OBD_FAIL_MDS_OSC_CREATE_FAIL 0x147
-#define OBD_FAIL_MDS_NEGATIVE_POSITIVE 0x148
-#define OBD_FAIL_MDS_HSM_STATE_GET_NET 0x149
-#define OBD_FAIL_MDS_HSM_STATE_SET_NET 0x14a
-#define OBD_FAIL_MDS_HSM_PROGRESS_NET 0x14b
-#define OBD_FAIL_MDS_HSM_REQUEST_NET 0x14c
-#define OBD_FAIL_MDS_HSM_CT_REGISTER_NET 0x14d
-#define OBD_FAIL_MDS_HSM_CT_UNREGISTER_NET 0x14e
-#define OBD_FAIL_MDS_SWAP_LAYOUTS_NET 0x14f
-#define OBD_FAIL_MDS_HSM_ACTION_NET 0x150
-#define OBD_FAIL_MDS_CHANGELOG_INIT 0x151
-
-/* layout lock */
-#define OBD_FAIL_MDS_NO_LL_GETATTR 0x170
-#define OBD_FAIL_MDS_NO_LL_OPEN 0x171
-#define OBD_FAIL_MDS_LL_BLOCK 0x172
-
-/* CMD */
-#define OBD_FAIL_MDS_IS_SUBDIR_NET 0x180
-#define OBD_FAIL_MDS_IS_SUBDIR_PACK 0x181
-#define OBD_FAIL_MDS_SET_INFO_NET 0x182
-#define OBD_FAIL_MDS_WRITEPAGE_NET 0x183
-#define OBD_FAIL_MDS_WRITEPAGE_PACK 0x184
-#define OBD_FAIL_MDS_RECOVERY_ACCEPTS_GAPS 0x185
-#define OBD_FAIL_MDS_GET_INFO_NET 0x186
-#define OBD_FAIL_MDS_DQACQ_NET 0x187
-
-/* OI scrub */
-#define OBD_FAIL_OSD_SCRUB_DELAY 0x190
-#define OBD_FAIL_OSD_SCRUB_CRASH 0x191
-#define OBD_FAIL_OSD_SCRUB_FATAL 0x192
-#define OBD_FAIL_OSD_FID_MAPPING 0x193
-#define OBD_FAIL_OSD_LMA_INCOMPAT 0x194
-#define OBD_FAIL_OSD_COMPAT_INVALID_ENTRY 0x195
-
-#define OBD_FAIL_OST 0x200
-#define OBD_FAIL_OST_CONNECT_NET 0x201
-#define OBD_FAIL_OST_DISCONNECT_NET 0x202
-#define OBD_FAIL_OST_GET_INFO_NET 0x203
-#define OBD_FAIL_OST_CREATE_NET 0x204
-#define OBD_FAIL_OST_DESTROY_NET 0x205
-#define OBD_FAIL_OST_GETATTR_NET 0x206
-#define OBD_FAIL_OST_SETATTR_NET 0x207
-#define OBD_FAIL_OST_OPEN_NET 0x208
-#define OBD_FAIL_OST_CLOSE_NET 0x209
-#define OBD_FAIL_OST_BRW_NET 0x20a
-#define OBD_FAIL_OST_PUNCH_NET 0x20b
-#define OBD_FAIL_OST_STATFS_NET 0x20c
-#define OBD_FAIL_OST_HANDLE_UNPACK 0x20d
-#define OBD_FAIL_OST_BRW_WRITE_BULK 0x20e
-#define OBD_FAIL_OST_BRW_READ_BULK 0x20f
-#define OBD_FAIL_OST_SYNC_NET 0x210
-#define OBD_FAIL_OST_ALL_REPLY_NET 0x211
-#define OBD_FAIL_OST_ALL_REQUEST_NET 0x212
-#define OBD_FAIL_OST_LDLM_REPLY_NET 0x213
-#define OBD_FAIL_OST_BRW_PAUSE_BULK 0x214
-#define OBD_FAIL_OST_ENOSPC 0x215
-#define OBD_FAIL_OST_EROFS 0x216
-#define OBD_FAIL_OST_ENOENT 0x217
-#define OBD_FAIL_OST_QUOTACHECK_NET 0x218
-#define OBD_FAIL_OST_QUOTACTL_NET 0x219
-#define OBD_FAIL_OST_CHECKSUM_RECEIVE 0x21a
-#define OBD_FAIL_OST_CHECKSUM_SEND 0x21b
-#define OBD_FAIL_OST_BRW_SIZE 0x21c
-#define OBD_FAIL_OST_DROP_REQ 0x21d
-#define OBD_FAIL_OST_SETATTR_CREDITS 0x21e
-#define OBD_FAIL_OST_HOLD_WRITE_RPC 0x21f
-#define OBD_FAIL_OST_BRW_WRITE_BULK2 0x220
-#define OBD_FAIL_OST_LLOG_RECOVERY_TIMEOUT 0x221
-#define OBD_FAIL_OST_CANCEL_COOKIE_TIMEOUT 0x222
-#define OBD_FAIL_OST_PAUSE_CREATE 0x223
-#define OBD_FAIL_OST_BRW_PAUSE_PACK 0x224
-#define OBD_FAIL_OST_CONNECT_NET2 0x225
-#define OBD_FAIL_OST_NOMEM 0x226
-#define OBD_FAIL_OST_BRW_PAUSE_BULK2 0x227
-#define OBD_FAIL_OST_MAPBLK_ENOSPC 0x228
-#define OBD_FAIL_OST_ENOINO 0x229
-#define OBD_FAIL_OST_DQACQ_NET 0x230
-#define OBD_FAIL_OST_STATFS_EINPROGRESS 0x231
-
-#define OBD_FAIL_LDLM 0x300
-#define OBD_FAIL_LDLM_NAMESPACE_NEW 0x301
-#define OBD_FAIL_LDLM_ENQUEUE_NET 0x302
-#define OBD_FAIL_LDLM_CONVERT_NET 0x303
-#define OBD_FAIL_LDLM_CANCEL_NET 0x304
-#define OBD_FAIL_LDLM_BL_CALLBACK_NET 0x305
-#define OBD_FAIL_LDLM_CP_CALLBACK_NET 0x306
-#define OBD_FAIL_LDLM_GL_CALLBACK_NET 0x307
-#define OBD_FAIL_LDLM_ENQUEUE_EXTENT_ERR 0x308
-#define OBD_FAIL_LDLM_ENQUEUE_INTENT_ERR 0x309
-#define OBD_FAIL_LDLM_CREATE_RESOURCE 0x30a
-#define OBD_FAIL_LDLM_ENQUEUE_BLOCKED 0x30b
-#define OBD_FAIL_LDLM_REPLY 0x30c
-#define OBD_FAIL_LDLM_RECOV_CLIENTS 0x30d
-#define OBD_FAIL_LDLM_ENQUEUE_OLD_EXPORT 0x30e
-#define OBD_FAIL_LDLM_GLIMPSE 0x30f
-#define OBD_FAIL_LDLM_CANCEL_RACE 0x310
-#define OBD_FAIL_LDLM_CANCEL_EVICT_RACE 0x311
-#define OBD_FAIL_LDLM_PAUSE_CANCEL 0x312
-#define OBD_FAIL_LDLM_CLOSE_THREAD 0x313
-#define OBD_FAIL_LDLM_CANCEL_BL_CB_RACE 0x314
-#define OBD_FAIL_LDLM_CP_CB_WAIT 0x315
-#define OBD_FAIL_LDLM_OST_FAIL_RACE 0x316
-#define OBD_FAIL_LDLM_INTR_CP_AST 0x317
-#define OBD_FAIL_LDLM_CP_BL_RACE 0x318
-#define OBD_FAIL_LDLM_NEW_LOCK 0x319
-#define OBD_FAIL_LDLM_AGL_DELAY 0x31a
-#define OBD_FAIL_LDLM_AGL_NOLOCK 0x31b
-#define OBD_FAIL_LDLM_OST_LVB 0x31c
-
-/* LOCKLESS IO */
-#define OBD_FAIL_LDLM_SET_CONTENTION 0x385
-
-#define OBD_FAIL_OSC 0x400
-#define OBD_FAIL_OSC_BRW_READ_BULK 0x401
-#define OBD_FAIL_OSC_BRW_WRITE_BULK 0x402
-#define OBD_FAIL_OSC_LOCK_BL_AST 0x403
-#define OBD_FAIL_OSC_LOCK_CP_AST 0x404
-#define OBD_FAIL_OSC_MATCH 0x405
-#define OBD_FAIL_OSC_BRW_PREP_REQ 0x406
-#define OBD_FAIL_OSC_SHUTDOWN 0x407
-#define OBD_FAIL_OSC_CHECKSUM_RECEIVE 0x408
-#define OBD_FAIL_OSC_CHECKSUM_SEND 0x409
-#define OBD_FAIL_OSC_BRW_PREP_REQ2 0x40a
-#define OBD_FAIL_OSC_CONNECT_CKSUM 0x40b
-#define OBD_FAIL_OSC_CKSUM_ADLER_ONLY 0x40c
-#define OBD_FAIL_OSC_DIO_PAUSE 0x40d
-#define OBD_FAIL_OSC_OBJECT_CONTENTION 0x40e
-#define OBD_FAIL_OSC_CP_CANCEL_RACE 0x40f
-#define OBD_FAIL_OSC_CP_ENQ_RACE 0x410
-#define OBD_FAIL_OSC_NO_GRANT 0x411
-#define OBD_FAIL_OSC_DELAY_SETTIME 0x412
-
-#define OBD_FAIL_PTLRPC 0x500
-#define OBD_FAIL_PTLRPC_ACK 0x501
-#define OBD_FAIL_PTLRPC_RQBD 0x502
-#define OBD_FAIL_PTLRPC_BULK_GET_NET 0x503
-#define OBD_FAIL_PTLRPC_BULK_PUT_NET 0x504
-#define OBD_FAIL_PTLRPC_DROP_RPC 0x505
-#define OBD_FAIL_PTLRPC_DELAY_SEND 0x506
-#define OBD_FAIL_PTLRPC_DELAY_RECOV 0x507
-#define OBD_FAIL_PTLRPC_CLIENT_BULK_CB 0x508
-#define OBD_FAIL_PTLRPC_PAUSE_REQ 0x50a
-#define OBD_FAIL_PTLRPC_PAUSE_REP 0x50c
-#define OBD_FAIL_PTLRPC_IMP_DEACTIVE 0x50d
-#define OBD_FAIL_PTLRPC_DUMP_LOG 0x50e
-#define OBD_FAIL_PTLRPC_LONG_REPL_UNLINK 0x50f
-#define OBD_FAIL_PTLRPC_LONG_BULK_UNLINK 0x510
-#define OBD_FAIL_PTLRPC_HPREQ_TIMEOUT 0x511
-#define OBD_FAIL_PTLRPC_HPREQ_NOTIMEOUT 0x512
-#define OBD_FAIL_PTLRPC_DROP_REQ_OPC 0x513
-#define OBD_FAIL_PTLRPC_FINISH_REPLAY 0x514
-#define OBD_FAIL_PTLRPC_CLIENT_BULK_CB2 0x515
-#define OBD_FAIL_PTLRPC_DELAY_IMP_FULL 0x516
-#define OBD_FAIL_PTLRPC_CANCEL_RESEND 0x517
-
-#define OBD_FAIL_OBD_PING_NET 0x600
-#define OBD_FAIL_OBD_LOG_CANCEL_NET 0x601
-#define OBD_FAIL_OBD_LOGD_NET 0x602
-#define OBD_FAIL_OBD_QC_CALLBACK_NET 0x603
-#define OBD_FAIL_OBD_DQACQ 0x604
-#define OBD_FAIL_OBD_LLOG_SETUP 0x605
-#define OBD_FAIL_OBD_LOG_CANCEL_REP 0x606
-#define OBD_FAIL_OBD_IDX_READ_NET 0x607
-#define OBD_FAIL_OBD_IDX_READ_BREAK 0x608
-#define OBD_FAIL_OBD_NO_LRU 0x609
-
-#define OBD_FAIL_TGT_REPLY_NET 0x700
-#define OBD_FAIL_TGT_CONN_RACE 0x701
-#define OBD_FAIL_TGT_FORCE_RECONNECT 0x702
-#define OBD_FAIL_TGT_DELAY_CONNECT 0x703
-#define OBD_FAIL_TGT_DELAY_RECONNECT 0x704
-#define OBD_FAIL_TGT_DELAY_PRECREATE 0x705
-#define OBD_FAIL_TGT_TOOMANY_THREADS 0x706
-#define OBD_FAIL_TGT_REPLAY_DROP 0x707
-#define OBD_FAIL_TGT_FAKE_EXP 0x708
-#define OBD_FAIL_TGT_REPLAY_DELAY 0x709
-#define OBD_FAIL_TGT_LAST_REPLAY 0x710
-#define OBD_FAIL_TGT_CLIENT_ADD 0x711
-#define OBD_FAIL_TGT_RCVG_FLAG 0x712
-#define OBD_FAIL_TGT_DELAY_CONDITIONAL 0x713
-
-#define OBD_FAIL_MDC_REVALIDATE_PAUSE 0x800
-#define OBD_FAIL_MDC_ENQUEUE_PAUSE 0x801
-#define OBD_FAIL_MDC_OLD_EXT_FLAGS 0x802
-#define OBD_FAIL_MDC_GETATTR_ENQUEUE 0x803
-#define OBD_FAIL_MDC_RPCS_SEM 0x804
-#define OBD_FAIL_MDC_LIGHTWEIGHT 0x805
-
-#define OBD_FAIL_MGS 0x900
-#define OBD_FAIL_MGS_ALL_REQUEST_NET 0x901
-#define OBD_FAIL_MGS_ALL_REPLY_NET 0x902
-#define OBD_FAIL_MGC_PAUSE_PROCESS_LOG 0x903
-#define OBD_FAIL_MGS_PAUSE_REQ 0x904
-#define OBD_FAIL_MGS_PAUSE_TARGET_REG 0x905
-#define OBD_FAIL_MGS_CONNECT_NET 0x906
-#define OBD_FAIL_MGS_DISCONNECT_NET 0x907
-#define OBD_FAIL_MGS_SET_INFO_NET 0x908
-#define OBD_FAIL_MGS_EXCEPTION_NET 0x909
-#define OBD_FAIL_MGS_TARGET_REG_NET 0x90a
-#define OBD_FAIL_MGS_TARGET_DEL_NET 0x90b
-#define OBD_FAIL_MGS_CONFIG_READ_NET 0x90c
-
-#define OBD_FAIL_QUOTA_DQACQ_NET 0xA01
-#define OBD_FAIL_QUOTA_EDQUOT 0xA02
-#define OBD_FAIL_QUOTA_DELAY_REINT 0xA03
-#define OBD_FAIL_QUOTA_RECOVERABLE_ERR 0xA04
-
-#define OBD_FAIL_LPROC_REMOVE 0xB00
-
-#define OBD_FAIL_SEQ 0x1000
-#define OBD_FAIL_SEQ_QUERY_NET 0x1001
-#define OBD_FAIL_SEQ_EXHAUST 0x1002
-
-#define OBD_FAIL_FLD 0x1100
-#define OBD_FAIL_FLD_QUERY_NET 0x1101
-
-#define OBD_FAIL_SEC_CTX 0x1200
-#define OBD_FAIL_SEC_CTX_INIT_NET 0x1201
-#define OBD_FAIL_SEC_CTX_INIT_CONT_NET 0x1202
-#define OBD_FAIL_SEC_CTX_FINI_NET 0x1203
-#define OBD_FAIL_SEC_CTX_HDL_PAUSE 0x1204
-
-#define OBD_FAIL_LLOG 0x1300
-#define OBD_FAIL_LLOG_ORIGIN_CONNECT_NET 0x1301
-#define OBD_FAIL_LLOG_ORIGIN_HANDLE_CREATE_NET 0x1302
-#define OBD_FAIL_LLOG_ORIGIN_HANDLE_DESTROY_NET 0x1303
-#define OBD_FAIL_LLOG_ORIGIN_HANDLE_READ_HEADER_NET 0x1304
-#define OBD_FAIL_LLOG_ORIGIN_HANDLE_NEXT_BLOCK_NET 0x1305
-#define OBD_FAIL_LLOG_ORIGIN_HANDLE_PREV_BLOCK_NET 0x1306
-#define OBD_FAIL_LLOG_ORIGIN_HANDLE_WRITE_REC_NET 0x1307
-#define OBD_FAIL_LLOG_ORIGIN_HANDLE_CLOSE_NET 0x1308
-#define OBD_FAIL_LLOG_CATINFO_NET 0x1309
-#define OBD_FAIL_MDS_SYNC_CAPA_SL 0x1310
-#define OBD_FAIL_SEQ_ALLOC 0x1311
-
-#define OBD_FAIL_LLITE 0x1400
-#define OBD_FAIL_LLITE_FAULT_TRUNC_RACE 0x1401
-#define OBD_FAIL_LOCK_STATE_WAIT_INTR 0x1402
-#define OBD_FAIL_LOV_INIT 0x1403
-#define OBD_FAIL_GLIMPSE_DELAY 0x1404
-#define OBD_FAIL_LLITE_XATTR_ENOMEM 0x1405
-
-#define OBD_FAIL_FID_INDIR 0x1501
-#define OBD_FAIL_FID_INLMA 0x1502
-#define OBD_FAIL_FID_IGIF 0x1504
-#define OBD_FAIL_FID_LOOKUP 0x1505
-#define OBD_FAIL_FID_NOLMA 0x1506
-
-/* LFSCK */
-#define OBD_FAIL_LFSCK_DELAY1 0x1600
-#define OBD_FAIL_LFSCK_DELAY2 0x1601
-#define OBD_FAIL_LFSCK_DELAY3 0x1602
-#define OBD_FAIL_LFSCK_LINKEA_CRASH 0x1603
-#define OBD_FAIL_LFSCK_LINKEA_MORE 0x1604
-#define OBD_FAIL_LFSCK_LINKEA_MORE2 0x1605
-#define OBD_FAIL_LFSCK_FATAL1 0x1608
-#define OBD_FAIL_LFSCK_FATAL2 0x1609
-#define OBD_FAIL_LFSCK_CRASH 0x160a
-#define OBD_FAIL_LFSCK_NO_AUTO 0x160b
-#define OBD_FAIL_LFSCK_NO_DOUBLESCAN 0x160c
-
-/* UPDATE */
-#define OBD_FAIL_UPDATE_OBJ_NET 0x1700
-#define OBD_FAIL_UPDATE_OBJ_NET_REP 0x1701
-
-
-/* Assign references to moved code to reduce code changes */
-#define OBD_FAIL_PRECHECK(id) CFS_FAIL_PRECHECK(id)
-#define OBD_FAIL_CHECK(id) CFS_FAIL_CHECK(id)
-#define OBD_FAIL_CHECK_VALUE(id, value) CFS_FAIL_CHECK_VALUE(id, value)
-#define OBD_FAIL_CHECK_ORSET(id, value) CFS_FAIL_CHECK_ORSET(id, value)
-#define OBD_FAIL_CHECK_RESET(id, value) CFS_FAIL_CHECK_RESET(id, value)
-#define OBD_FAIL_RETURN(id, ret) CFS_FAIL_RETURN(id, ret)
-#define OBD_FAIL_TIMEOUT(id, secs) CFS_FAIL_TIMEOUT(id, secs)
-#define OBD_FAIL_TIMEOUT_MS(id, ms) CFS_FAIL_TIMEOUT_MS(id, ms)
-#define OBD_FAIL_TIMEOUT_ORSET(id, value, secs) CFS_FAIL_TIMEOUT_ORSET(id, value, secs)
-#define OBD_RACE(id) CFS_RACE(id)
-#define OBD_FAIL_ONCE CFS_FAIL_ONCE
-#define OBD_FAILED CFS_FAILED
-
-#ifdef CONFIG_DEBUG_SLAB
-#define POISON(ptr, c, s) do {} while (0)
-#define POISON_PTR(ptr) ((void)0)
-#else
-#define POISON(ptr, c, s) memset(ptr, c, s)
-#define POISON_PTR(ptr) (ptr) = (void *)0xdeadbeef
-#endif
-
-#ifdef POISON_BULK
-#define POISON_PAGE(page, val) do { \
- memset(kmap(page), val, PAGE_CACHE_SIZE); \
- kunmap(page); \
-} while (0)
-#else
-#define POISON_PAGE(page, val) do { } while (0)
-#endif
-
-#define OBD_FREE_RCU(ptr, size, handle) \
-do { \
- struct portals_handle *__h = (handle); \
- \
- LASSERT(handle != NULL); \
- __h->h_cookie = (unsigned long)(ptr); \
- __h->h_size = (size); \
- call_rcu(&__h->h_rcu, class_handle_free_cb); \
- POISON_PTR(ptr); \
-} while (0)
-
-/* we memset() the slab object to 0 when allocation succeeds, so DO NOT
- * HAVE A CTOR THAT DOES ANYTHING. its work will be cleared here. we'd
- * love to assert on that, but slab.c keeps kmem_cache_s all to itself. */
-#define OBD_SLAB_FREE_RTN0(ptr, slab) \
-({ \
- kmem_cache_free((slab), (ptr)); \
- (ptr) = NULL; \
- 0; \
-})
-
-#define __OBD_SLAB_ALLOC_VERBOSE(ptr, slab, cptab, cpt, size, type) \
-do { \
- LASSERT(ergo((type) != GFP_ATOMIC, !in_interrupt())); \
- (ptr) = (cptab) == NULL ? \
- kmem_cache_alloc(slab, type | __GFP_ZERO) : \
- kmem_cache_alloc_node(slab, type | __GFP_ZERO, \
- cfs_cpt_spread_node(cptab, cpt)); \
-} while (0)
-
-#define OBD_SLAB_ALLOC_GFP(ptr, slab, size, flags) \
- __OBD_SLAB_ALLOC_VERBOSE(ptr, slab, NULL, 0, size, flags)
-#define OBD_SLAB_CPT_ALLOC_GFP(ptr, slab, cptab, cpt, size, flags) \
- __OBD_SLAB_ALLOC_VERBOSE(ptr, slab, cptab, cpt, size, flags)
-
-#define OBD_SLAB_FREE(ptr, slab, size) \
-do { \
- kmem_cache_free(slab, ptr); \
- POISON_PTR(ptr); \
-} while (0)
-
-#define OBD_SLAB_ALLOC(ptr, slab, size) \
- OBD_SLAB_ALLOC_GFP(ptr, slab, size, GFP_NOFS)
-
-#define OBD_SLAB_CPT_ALLOC(ptr, slab, cptab, cpt, size) \
- OBD_SLAB_CPT_ALLOC_GFP(ptr, slab, cptab, cpt, size, GFP_NOFS)
-
-#define OBD_SLAB_ALLOC_PTR(ptr, slab) \
- OBD_SLAB_ALLOC(ptr, slab, sizeof(*(ptr)))
-
-#define OBD_SLAB_CPT_ALLOC_PTR(ptr, slab, cptab, cpt) \
- OBD_SLAB_CPT_ALLOC(ptr, slab, cptab, cpt, sizeof(*(ptr)))
-
-#define OBD_SLAB_ALLOC_PTR_GFP(ptr, slab, flags) \
- OBD_SLAB_ALLOC_GFP(ptr, slab, sizeof(*(ptr)), flags)
-
-#define OBD_SLAB_CPT_ALLOC_PTR_GFP(ptr, slab, cptab, cpt, flags) \
- OBD_SLAB_CPT_ALLOC_GFP(ptr, slab, cptab, cpt, sizeof(*(ptr)), flags)
-
-#define OBD_SLAB_FREE_PTR(ptr, slab) \
- OBD_SLAB_FREE((ptr), (slab), sizeof(*(ptr)))
-
-#define KEY_IS(str) \
- (keylen >= (sizeof(str)-1) && memcmp(key, str, (sizeof(str)-1)) == 0)
-
-/* Wrapper for contiguous page frame allocation */
-#define __OBD_PAGE_ALLOC_VERBOSE(ptr, cptab, cpt, gfp_mask) \
-do { \
- (ptr) = (cptab) == NULL ? \
- alloc_page(gfp_mask) : \
- alloc_pages_node(cfs_cpt_spread_node(cptab, cpt), gfp_mask, 0);\
- if (ptr) { \
- CDEBUG(D_MALLOC, "alloc_pages '" #ptr "': %d page(s) / " \
- "%llu bytes at %p.\n", \
- (int)1, \
- (__u64)(1 << PAGE_CACHE_SHIFT), ptr); \
- } \
-} while (0)
-
-#define OBD_PAGE_ALLOC(ptr, gfp_mask) \
- __OBD_PAGE_ALLOC_VERBOSE(ptr, NULL, 0, gfp_mask)
-#define OBD_PAGE_CPT_ALLOC(ptr, cptab, cpt, gfp_mask) \
- __OBD_PAGE_ALLOC_VERBOSE(ptr, cptab, cpt, gfp_mask)
-
-#define OBD_PAGE_FREE(ptr) \
-do { \
- LASSERT(ptr); \
- CDEBUG(D_MALLOC, "free_pages '" #ptr "': %d page(s) / %llu bytes " \
- "at %p.\n", \
- (int)1, (__u64)(1 << PAGE_CACHE_SHIFT), \
- ptr); \
- __free_page(ptr); \
- (ptr) = (void *)0xdeadbeef; \
-} while (0)
-
-#endif
diff --git a/drivers/staging/lustre/lustre/lclient/glimpse.c b/drivers/staging/lustre/lustre/lclient/glimpse.c
deleted file mode 100644
index b9f2bb66de21..000000000000
--- a/drivers/staging/lustre/lustre/lclient/glimpse.c
+++ /dev/null
@@ -1,269 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * glimpse code shared between vvp and liblustre (and other Lustre clients in
- * the future).
- *
- * Author: Nikita Danilov <nikita.danilov@sun.com>
- * Author: Oleg Drokin <oleg.drokin@sun.com>
- */
-
-#include "../../include/linux/libcfs/libcfs.h"
-#include "../include/obd_class.h"
-#include "../include/obd_support.h"
-#include "../include/obd.h"
-
-#include "../include/lustre_dlm.h"
-#include "../include/lustre_lite.h"
-#include "../include/lustre_mdc.h"
-#include <linux/pagemap.h>
-#include <linux/file.h>
-
-#include "../include/cl_object.h"
-#include "../include/lclient.h"
-#include "../llite/llite_internal.h"
-
-static const struct cl_lock_descr whole_file = {
- .cld_start = 0,
- .cld_end = CL_PAGE_EOF,
- .cld_mode = CLM_READ
-};
-
-/*
- * Check whether file has possible unwriten pages.
- *
- * \retval 1 file is mmap-ed or has dirty pages
- * 0 otherwise
- */
-blkcnt_t dirty_cnt(struct inode *inode)
-{
- blkcnt_t cnt = 0;
- struct ccc_object *vob = cl_inode2ccc(inode);
- void *results[1];
-
- if (inode->i_mapping != NULL)
- cnt += radix_tree_gang_lookup_tag(&inode->i_mapping->page_tree,
- results, 0, 1,
- PAGECACHE_TAG_DIRTY);
- if (cnt == 0 && atomic_read(&vob->cob_mmap_cnt) > 0)
- cnt = 1;
-
- return (cnt > 0) ? 1 : 0;
-}
-
-int cl_glimpse_lock(const struct lu_env *env, struct cl_io *io,
- struct inode *inode, struct cl_object *clob, int agl)
-{
- struct cl_lock_descr *descr = &ccc_env_info(env)->cti_descr;
- struct cl_inode_info *lli = cl_i2info(inode);
- const struct lu_fid *fid = lu_object_fid(&clob->co_lu);
- struct ccc_io *cio = ccc_env_io(env);
- struct cl_lock *lock;
- int result;
-
- result = 0;
- if (!(lli->lli_flags & LLIF_MDS_SIZE_LOCK)) {
- CDEBUG(D_DLMTRACE, "Glimpsing inode "DFID"\n", PFID(fid));
- if (lli->lli_has_smd) {
- /* NOTE: this looks like DLM lock request, but it may
- * not be one. Due to CEF_ASYNC flag (translated
- * to LDLM_FL_HAS_INTENT by osc), this is
- * glimpse request, that won't revoke any
- * conflicting DLM locks held. Instead,
- * ll_glimpse_callback() will be called on each
- * client holding a DLM lock against this file,
- * and resulting size will be returned for each
- * stripe. DLM lock on [0, EOF] is acquired only
- * if there were no conflicting locks. If there
- * were conflicting locks, enqueuing or waiting
- * fails with -ENAVAIL, but valid inode
- * attributes are returned anyway. */
- *descr = whole_file;
- descr->cld_obj = clob;
- descr->cld_mode = CLM_PHANTOM;
- descr->cld_enq_flags = CEF_ASYNC | CEF_MUST;
- if (agl)
- descr->cld_enq_flags |= CEF_AGL;
- cio->cui_glimpse = 1;
- /*
- * CEF_ASYNC is used because glimpse sub-locks cannot
- * deadlock (because they never conflict with other
- * locks) and, hence, can be enqueued out-of-order.
- *
- * CEF_MUST protects glimpse lock from conversion into
- * a lockless mode.
- */
- lock = cl_lock_request(env, io, descr, "glimpse",
- current);
- cio->cui_glimpse = 0;
-
- if (lock == NULL)
- return 0;
-
- if (IS_ERR(lock))
- return PTR_ERR(lock);
-
- LASSERT(agl == 0);
- result = cl_wait(env, lock);
- if (result == 0) {
- cl_merge_lvb(env, inode);
- if (cl_isize_read(inode) > 0 &&
- inode->i_blocks == 0) {
- /*
- * LU-417: Add dirty pages block count
- * lest i_blocks reports 0, some "cp" or
- * "tar" may think it's a completely
- * sparse file and skip it.
- */
- inode->i_blocks = dirty_cnt(inode);
- }
- cl_unuse(env, lock);
- }
- cl_lock_release(env, lock, "glimpse", current);
- } else {
- CDEBUG(D_DLMTRACE, "No objects for inode\n");
- cl_merge_lvb(env, inode);
- }
- }
-
- return result;
-}
-
-static int cl_io_get(struct inode *inode, struct lu_env **envout,
- struct cl_io **ioout, int *refcheck)
-{
- struct lu_env *env;
- struct cl_io *io;
- struct cl_inode_info *lli = cl_i2info(inode);
- struct cl_object *clob = lli->lli_clob;
- int result;
-
- if (S_ISREG(cl_inode_mode(inode))) {
- env = cl_env_get(refcheck);
- if (!IS_ERR(env)) {
- io = ccc_env_thread_io(env);
- io->ci_obj = clob;
- *envout = env;
- *ioout = io;
- result = 1;
- } else
- result = PTR_ERR(env);
- } else
- result = 0;
- return result;
-}
-
-int cl_glimpse_size0(struct inode *inode, int agl)
-{
- /*
- * We don't need ast_flags argument to cl_glimpse_size(), because
- * osc_lock_enqueue() takes care of the possible deadlock that said
- * argument was introduced to avoid.
- */
- /*
- * XXX but note that ll_file_seek() passes LDLM_FL_BLOCK_NOWAIT to
- * cl_glimpse_size(), which doesn't make sense: glimpse locks are not
- * blocking anyway.
- */
- struct lu_env *env = NULL;
- struct cl_io *io = NULL;
- int result;
- int refcheck;
-
- result = cl_io_get(inode, &env, &io, &refcheck);
- if (result > 0) {
-again:
- io->ci_verify_layout = 1;
- result = cl_io_init(env, io, CIT_MISC, io->ci_obj);
- if (result > 0)
- /*
- * nothing to do for this io. This currently happens
- * when stripe sub-object's are not yet created.
- */
- result = io->ci_result;
- else if (result == 0)
- result = cl_glimpse_lock(env, io, inode, io->ci_obj,
- agl);
-
- OBD_FAIL_TIMEOUT(OBD_FAIL_GLIMPSE_DELAY, 2);
- cl_io_fini(env, io);
- if (unlikely(io->ci_need_restart))
- goto again;
- cl_env_put(env, &refcheck);
- }
- return result;
-}
-
-int cl_local_size(struct inode *inode)
-{
- struct lu_env *env = NULL;
- struct cl_io *io = NULL;
- struct ccc_thread_info *cti;
- struct cl_object *clob;
- struct cl_lock_descr *descr;
- struct cl_lock *lock;
- int result;
- int refcheck;
-
- if (!cl_i2info(inode)->lli_has_smd)
- return 0;
-
- result = cl_io_get(inode, &env, &io, &refcheck);
- if (result <= 0)
- return result;
-
- clob = io->ci_obj;
- result = cl_io_init(env, io, CIT_MISC, clob);
- if (result > 0)
- result = io->ci_result;
- else if (result == 0) {
- cti = ccc_env_info(env);
- descr = &cti->cti_descr;
-
- *descr = whole_file;
- descr->cld_obj = clob;
- lock = cl_lock_peek(env, io, descr, "localsize", current);
- if (lock != NULL) {
- cl_merge_lvb(env, inode);
- cl_unuse(env, lock);
- cl_lock_release(env, lock, "localsize", current);
- result = 0;
- } else
- result = -ENODATA;
- }
- cl_io_fini(env, io);
- cl_env_put(env, &refcheck);
- return result;
-}
diff --git a/drivers/staging/lustre/lustre/lclient/lcommon_cl.c b/drivers/staging/lustre/lustre/lclient/lcommon_cl.c
deleted file mode 100644
index f049d5598ad4..000000000000
--- a/drivers/staging/lustre/lustre/lclient/lcommon_cl.c
+++ /dev/null
@@ -1,1277 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * cl code shared between vvp and liblustre (and other Lustre clients in the
- * future).
- *
- * Author: Nikita Danilov <nikita.danilov@sun.com>
- */
-
-#define DEBUG_SUBSYSTEM S_LLITE
-
-#include "../../include/linux/libcfs/libcfs.h"
-# include <linux/fs.h>
-# include <linux/sched.h>
-# include <linux/mm.h>
-# include <linux/quotaops.h>
-# include <linux/highmem.h>
-# include <linux/pagemap.h>
-# include <linux/rbtree.h>
-
-#include "../include/obd.h"
-#include "../include/obd_support.h"
-#include "../include/lustre_fid.h"
-#include "../include/lustre_lite.h"
-#include "../include/lustre_dlm.h"
-#include "../include/lustre_ver.h"
-#include "../include/lustre_mdc.h"
-#include "../include/cl_object.h"
-
-#include "../include/lclient.h"
-
-#include "../llite/llite_internal.h"
-
-static const struct cl_req_operations ccc_req_ops;
-
-/*
- * ccc_ prefix stands for "Common Client Code".
- */
-
-static struct kmem_cache *ccc_lock_kmem;
-static struct kmem_cache *ccc_object_kmem;
-static struct kmem_cache *ccc_thread_kmem;
-static struct kmem_cache *ccc_session_kmem;
-static struct kmem_cache *ccc_req_kmem;
-
-static struct lu_kmem_descr ccc_caches[] = {
- {
- .ckd_cache = &ccc_lock_kmem,
- .ckd_name = "ccc_lock_kmem",
- .ckd_size = sizeof(struct ccc_lock)
- },
- {
- .ckd_cache = &ccc_object_kmem,
- .ckd_name = "ccc_object_kmem",
- .ckd_size = sizeof(struct ccc_object)
- },
- {
- .ckd_cache = &ccc_thread_kmem,
- .ckd_name = "ccc_thread_kmem",
- .ckd_size = sizeof(struct ccc_thread_info),
- },
- {
- .ckd_cache = &ccc_session_kmem,
- .ckd_name = "ccc_session_kmem",
- .ckd_size = sizeof(struct ccc_session)
- },
- {
- .ckd_cache = &ccc_req_kmem,
- .ckd_name = "ccc_req_kmem",
- .ckd_size = sizeof(struct ccc_req)
- },
- {
- .ckd_cache = NULL
- }
-};
-
-/*****************************************************************************
- *
- * Vvp device and device type functions.
- *
- */
-
-void *ccc_key_init(const struct lu_context *ctx, struct lu_context_key *key)
-{
- struct ccc_thread_info *info;
-
- OBD_SLAB_ALLOC_PTR_GFP(info, ccc_thread_kmem, GFP_NOFS);
- if (info == NULL)
- info = ERR_PTR(-ENOMEM);
- return info;
-}
-
-void ccc_key_fini(const struct lu_context *ctx,
- struct lu_context_key *key, void *data)
-{
- struct ccc_thread_info *info = data;
-
- OBD_SLAB_FREE_PTR(info, ccc_thread_kmem);
-}
-
-void *ccc_session_key_init(const struct lu_context *ctx,
- struct lu_context_key *key)
-{
- struct ccc_session *session;
-
- OBD_SLAB_ALLOC_PTR_GFP(session, ccc_session_kmem, GFP_NOFS);
- if (session == NULL)
- session = ERR_PTR(-ENOMEM);
- return session;
-}
-
-void ccc_session_key_fini(const struct lu_context *ctx,
- struct lu_context_key *key, void *data)
-{
- struct ccc_session *session = data;
-
- OBD_SLAB_FREE_PTR(session, ccc_session_kmem);
-}
-
-struct lu_context_key ccc_key = {
- .lct_tags = LCT_CL_THREAD,
- .lct_init = ccc_key_init,
- .lct_fini = ccc_key_fini
-};
-
-struct lu_context_key ccc_session_key = {
- .lct_tags = LCT_SESSION,
- .lct_init = ccc_session_key_init,
- .lct_fini = ccc_session_key_fini
-};
-
-
-/* type constructor/destructor: ccc_type_{init,fini,start,stop}(). */
-/* LU_TYPE_INIT_FINI(ccc, &ccc_key, &ccc_session_key); */
-
-int ccc_device_init(const struct lu_env *env, struct lu_device *d,
- const char *name, struct lu_device *next)
-{
- struct ccc_device *vdv;
- int rc;
-
- vdv = lu2ccc_dev(d);
- vdv->cdv_next = lu2cl_dev(next);
-
- LASSERT(d->ld_site != NULL && next->ld_type != NULL);
- next->ld_site = d->ld_site;
- rc = next->ld_type->ldt_ops->ldto_device_init(
- env, next, next->ld_type->ldt_name, NULL);
- if (rc == 0) {
- lu_device_get(next);
- lu_ref_add(&next->ld_reference, "lu-stack", &lu_site_init);
- }
- return rc;
-}
-
-struct lu_device *ccc_device_fini(const struct lu_env *env,
- struct lu_device *d)
-{
- return cl2lu_dev(lu2ccc_dev(d)->cdv_next);
-}
-
-struct lu_device *ccc_device_alloc(const struct lu_env *env,
- struct lu_device_type *t,
- struct lustre_cfg *cfg,
- const struct lu_device_operations *luops,
- const struct cl_device_operations *clops)
-{
- struct ccc_device *vdv;
- struct lu_device *lud;
- struct cl_site *site;
- int rc;
-
- vdv = kzalloc(sizeof(*vdv), GFP_NOFS);
- if (!vdv)
- return ERR_PTR(-ENOMEM);
-
- lud = &vdv->cdv_cl.cd_lu_dev;
- cl_device_init(&vdv->cdv_cl, t);
- ccc2lu_dev(vdv)->ld_ops = luops;
- vdv->cdv_cl.cd_ops = clops;
-
- site = kzalloc(sizeof(*site), GFP_NOFS);
- if (site != NULL) {
- rc = cl_site_init(site, &vdv->cdv_cl);
- if (rc == 0)
- rc = lu_site_init_finish(&site->cs_lu);
- else {
- LASSERT(lud->ld_site == NULL);
- CERROR("Cannot init lu_site, rc %d.\n", rc);
- kfree(site);
- }
- } else
- rc = -ENOMEM;
- if (rc != 0) {
- ccc_device_free(env, lud);
- lud = ERR_PTR(rc);
- }
- return lud;
-}
-
-struct lu_device *ccc_device_free(const struct lu_env *env,
- struct lu_device *d)
-{
- struct ccc_device *vdv = lu2ccc_dev(d);
- struct cl_site *site = lu2cl_site(d->ld_site);
- struct lu_device *next = cl2lu_dev(vdv->cdv_next);
-
- if (d->ld_site != NULL) {
- cl_site_fini(site);
- kfree(site);
- }
- cl_device_fini(lu2cl_dev(d));
- kfree(vdv);
- return next;
-}
-
-int ccc_req_init(const struct lu_env *env, struct cl_device *dev,
- struct cl_req *req)
-{
- struct ccc_req *vrq;
- int result;
-
- OBD_SLAB_ALLOC_PTR_GFP(vrq, ccc_req_kmem, GFP_NOFS);
- if (vrq != NULL) {
- cl_req_slice_add(req, &vrq->crq_cl, dev, &ccc_req_ops);
- result = 0;
- } else
- result = -ENOMEM;
- return result;
-}
-
-/**
- * An `emergency' environment used by ccc_inode_fini() when cl_env_get()
- * fails. Access to this environment is serialized by ccc_inode_fini_guard
- * mutex.
- */
-static struct lu_env *ccc_inode_fini_env;
-
-/**
- * A mutex serializing calls to slp_inode_fini() under extreme memory
- * pressure, when environments cannot be allocated.
- */
-static DEFINE_MUTEX(ccc_inode_fini_guard);
-static int dummy_refcheck;
-
-int ccc_global_init(struct lu_device_type *device_type)
-{
- int result;
-
- result = lu_kmem_init(ccc_caches);
- if (result)
- return result;
-
- result = lu_device_type_init(device_type);
- if (result)
- goto out_kmem;
-
- ccc_inode_fini_env = cl_env_alloc(&dummy_refcheck,
- LCT_REMEMBER|LCT_NOREF);
- if (IS_ERR(ccc_inode_fini_env)) {
- result = PTR_ERR(ccc_inode_fini_env);
- goto out_device;
- }
-
- ccc_inode_fini_env->le_ctx.lc_cookie = 0x4;
- return 0;
-out_device:
- lu_device_type_fini(device_type);
-out_kmem:
- lu_kmem_fini(ccc_caches);
- return result;
-}
-
-void ccc_global_fini(struct lu_device_type *device_type)
-{
- if (ccc_inode_fini_env != NULL) {
- cl_env_put(ccc_inode_fini_env, &dummy_refcheck);
- ccc_inode_fini_env = NULL;
- }
- lu_device_type_fini(device_type);
- lu_kmem_fini(ccc_caches);
-}
-
-/*****************************************************************************
- *
- * Object operations.
- *
- */
-
-struct lu_object *ccc_object_alloc(const struct lu_env *env,
- const struct lu_object_header *unused,
- struct lu_device *dev,
- const struct cl_object_operations *clops,
- const struct lu_object_operations *luops)
-{
- struct ccc_object *vob;
- struct lu_object *obj;
-
- OBD_SLAB_ALLOC_PTR_GFP(vob, ccc_object_kmem, GFP_NOFS);
- if (vob != NULL) {
- struct cl_object_header *hdr;
-
- obj = ccc2lu(vob);
- hdr = &vob->cob_header;
- cl_object_header_init(hdr);
- lu_object_init(obj, &hdr->coh_lu, dev);
- lu_object_add_top(&hdr->coh_lu, obj);
-
- vob->cob_cl.co_ops = clops;
- obj->lo_ops = luops;
- } else
- obj = NULL;
- return obj;
-}
-
-int ccc_object_init0(const struct lu_env *env,
- struct ccc_object *vob,
- const struct cl_object_conf *conf)
-{
- vob->cob_inode = conf->coc_inode;
- vob->cob_transient_pages = 0;
- cl_object_page_init(&vob->cob_cl, sizeof(struct ccc_page));
- return 0;
-}
-
-int ccc_object_init(const struct lu_env *env, struct lu_object *obj,
- const struct lu_object_conf *conf)
-{
- struct ccc_device *dev = lu2ccc_dev(obj->lo_dev);
- struct ccc_object *vob = lu2ccc(obj);
- struct lu_object *below;
- struct lu_device *under;
- int result;
-
- under = &dev->cdv_next->cd_lu_dev;
- below = under->ld_ops->ldo_object_alloc(env, obj->lo_header, under);
- if (below != NULL) {
- const struct cl_object_conf *cconf;
-
- cconf = lu2cl_conf(conf);
- INIT_LIST_HEAD(&vob->cob_pending_list);
- lu_object_add(obj, below);
- result = ccc_object_init0(env, vob, cconf);
- } else
- result = -ENOMEM;
- return result;
-}
-
-void ccc_object_free(const struct lu_env *env, struct lu_object *obj)
-{
- struct ccc_object *vob = lu2ccc(obj);
-
- lu_object_fini(obj);
- lu_object_header_fini(obj->lo_header);
- OBD_SLAB_FREE_PTR(vob, ccc_object_kmem);
-}
-
-int ccc_lock_init(const struct lu_env *env,
- struct cl_object *obj, struct cl_lock *lock,
- const struct cl_io *unused,
- const struct cl_lock_operations *lkops)
-{
- struct ccc_lock *clk;
- int result;
-
- CLOBINVRNT(env, obj, ccc_object_invariant(obj));
-
- OBD_SLAB_ALLOC_PTR_GFP(clk, ccc_lock_kmem, GFP_NOFS);
- if (clk != NULL) {
- cl_lock_slice_add(lock, &clk->clk_cl, obj, lkops);
- result = 0;
- } else
- result = -ENOMEM;
- return result;
-}
-
-int ccc_attr_set(const struct lu_env *env, struct cl_object *obj,
- const struct cl_attr *attr, unsigned valid)
-{
- return 0;
-}
-
-int ccc_object_glimpse(const struct lu_env *env,
- const struct cl_object *obj, struct ost_lvb *lvb)
-{
- struct inode *inode = ccc_object_inode(obj);
-
- lvb->lvb_mtime = cl_inode_mtime(inode);
- lvb->lvb_atime = cl_inode_atime(inode);
- lvb->lvb_ctime = cl_inode_ctime(inode);
- /*
- * LU-417: Add dirty pages block count lest i_blocks reports 0, some
- * "cp" or "tar" on remote node may think it's a completely sparse file
- * and skip it.
- */
- if (lvb->lvb_size > 0 && lvb->lvb_blocks == 0)
- lvb->lvb_blocks = dirty_cnt(inode);
- return 0;
-}
-
-
-
-int ccc_conf_set(const struct lu_env *env, struct cl_object *obj,
- const struct cl_object_conf *conf)
-{
- /* TODO: destroy all pages attached to this object. */
- return 0;
-}
-
-static void ccc_object_size_lock(struct cl_object *obj)
-{
- struct inode *inode = ccc_object_inode(obj);
-
- cl_isize_lock(inode);
- cl_object_attr_lock(obj);
-}
-
-static void ccc_object_size_unlock(struct cl_object *obj)
-{
- struct inode *inode = ccc_object_inode(obj);
-
- cl_object_attr_unlock(obj);
- cl_isize_unlock(inode);
-}
-
-/*****************************************************************************
- *
- * Page operations.
- *
- */
-
-struct page *ccc_page_vmpage(const struct lu_env *env,
- const struct cl_page_slice *slice)
-{
- return cl2vm_page(slice);
-}
-
-int ccc_page_is_under_lock(const struct lu_env *env,
- const struct cl_page_slice *slice,
- struct cl_io *io)
-{
- struct ccc_io *cio = ccc_env_io(env);
- struct cl_lock_descr *desc = &ccc_env_info(env)->cti_descr;
- struct cl_page *page = slice->cpl_page;
-
- int result;
-
- if (io->ci_type == CIT_READ || io->ci_type == CIT_WRITE ||
- io->ci_type == CIT_FAULT) {
- if (cio->cui_fd->fd_flags & LL_FILE_GROUP_LOCKED)
- result = -EBUSY;
- else {
- desc->cld_start = page->cp_index;
- desc->cld_end = page->cp_index;
- desc->cld_obj = page->cp_obj;
- desc->cld_mode = CLM_READ;
- result = cl_queue_match(&io->ci_lockset.cls_done,
- desc) ? -EBUSY : 0;
- }
- } else
- result = 0;
- return result;
-}
-
-int ccc_fail(const struct lu_env *env, const struct cl_page_slice *slice)
-{
- /*
- * Cached read?
- */
- LBUG();
- return 0;
-}
-
-void ccc_transient_page_verify(const struct cl_page *page)
-{
-}
-
-int ccc_transient_page_own(const struct lu_env *env,
- const struct cl_page_slice *slice,
- struct cl_io *unused,
- int nonblock)
-{
- ccc_transient_page_verify(slice->cpl_page);
- return 0;
-}
-
-void ccc_transient_page_assume(const struct lu_env *env,
- const struct cl_page_slice *slice,
- struct cl_io *unused)
-{
- ccc_transient_page_verify(slice->cpl_page);
-}
-
-void ccc_transient_page_unassume(const struct lu_env *env,
- const struct cl_page_slice *slice,
- struct cl_io *unused)
-{
- ccc_transient_page_verify(slice->cpl_page);
-}
-
-void ccc_transient_page_disown(const struct lu_env *env,
- const struct cl_page_slice *slice,
- struct cl_io *unused)
-{
- ccc_transient_page_verify(slice->cpl_page);
-}
-
-void ccc_transient_page_discard(const struct lu_env *env,
- const struct cl_page_slice *slice,
- struct cl_io *unused)
-{
- struct cl_page *page = slice->cpl_page;
-
- ccc_transient_page_verify(slice->cpl_page);
-
- /*
- * For transient pages, remove it from the radix tree.
- */
- cl_page_delete(env, page);
-}
-
-int ccc_transient_page_prep(const struct lu_env *env,
- const struct cl_page_slice *slice,
- struct cl_io *unused)
-{
- /* transient page should always be sent. */
- return 0;
-}
-
-/*****************************************************************************
- *
- * Lock operations.
- *
- */
-
-void ccc_lock_delete(const struct lu_env *env,
- const struct cl_lock_slice *slice)
-{
- CLOBINVRNT(env, slice->cls_obj, ccc_object_invariant(slice->cls_obj));
-}
-
-void ccc_lock_fini(const struct lu_env *env, struct cl_lock_slice *slice)
-{
- struct ccc_lock *clk = cl2ccc_lock(slice);
-
- OBD_SLAB_FREE_PTR(clk, ccc_lock_kmem);
-}
-
-int ccc_lock_enqueue(const struct lu_env *env,
- const struct cl_lock_slice *slice,
- struct cl_io *unused, __u32 enqflags)
-{
- CLOBINVRNT(env, slice->cls_obj, ccc_object_invariant(slice->cls_obj));
- return 0;
-}
-
-int ccc_lock_use(const struct lu_env *env, const struct cl_lock_slice *slice)
-{
- CLOBINVRNT(env, slice->cls_obj, ccc_object_invariant(slice->cls_obj));
- return 0;
-}
-
-int ccc_lock_unuse(const struct lu_env *env, const struct cl_lock_slice *slice)
-{
- CLOBINVRNT(env, slice->cls_obj, ccc_object_invariant(slice->cls_obj));
- return 0;
-}
-
-int ccc_lock_wait(const struct lu_env *env, const struct cl_lock_slice *slice)
-{
- CLOBINVRNT(env, slice->cls_obj, ccc_object_invariant(slice->cls_obj));
- return 0;
-}
-
-/**
- * Implementation of cl_lock_operations::clo_fits_into() methods for ccc
- * layer. This function is executed every time io finds an existing lock in
- * the lock cache while creating new lock. This function has to decide whether
- * cached lock "fits" into io.
- *
- * \param slice lock to be checked
- * \param io IO that wants a lock.
- *
- * \see lov_lock_fits_into().
- */
-int ccc_lock_fits_into(const struct lu_env *env,
- const struct cl_lock_slice *slice,
- const struct cl_lock_descr *need,
- const struct cl_io *io)
-{
- const struct cl_lock *lock = slice->cls_lock;
- const struct cl_lock_descr *descr = &lock->cll_descr;
- const struct ccc_io *cio = ccc_env_io(env);
- int result;
-
- /*
- * Work around DLM peculiarity: it assumes that glimpse
- * (LDLM_FL_HAS_INTENT) lock is always LCK_PR, and returns reads lock
- * when asked for LCK_PW lock with LDLM_FL_HAS_INTENT flag set. Make
- * sure that glimpse doesn't get CLM_WRITE top-lock, so that it
- * doesn't enqueue CLM_WRITE sub-locks.
- */
- if (cio->cui_glimpse)
- result = descr->cld_mode != CLM_WRITE;
-
- /*
- * Also, don't match incomplete write locks for read, otherwise read
- * would enqueue missing sub-locks in the write mode.
- */
- else if (need->cld_mode != descr->cld_mode)
- result = lock->cll_state >= CLS_ENQUEUED;
- else
- result = 1;
- return result;
-}
-
-/**
- * Implements cl_lock_operations::clo_state() method for ccc layer, invoked
- * whenever lock state changes. Transfers object attributes, that might be
- * updated as a result of lock acquiring into inode.
- */
-void ccc_lock_state(const struct lu_env *env,
- const struct cl_lock_slice *slice,
- enum cl_lock_state state)
-{
- struct cl_lock *lock = slice->cls_lock;
-
- /*
- * Refresh inode attributes when the lock is moving into CLS_HELD
- * state, and only when this is a result of real enqueue, rather than
- * of finding lock in the cache.
- */
- if (state == CLS_HELD && lock->cll_state < CLS_HELD) {
- struct cl_object *obj;
- struct inode *inode;
-
- obj = slice->cls_obj;
- inode = ccc_object_inode(obj);
-
- /* vmtruncate() sets the i_size
- * under both a DLM lock and the
- * ll_inode_size_lock(). If we don't get the
- * ll_inode_size_lock() here we can match the DLM lock and
- * reset i_size. generic_file_write can then trust the
- * stale i_size when doing appending writes and effectively
- * cancel the result of the truncate. Getting the
- * ll_inode_size_lock() after the enqueue maintains the DLM
- * -> ll_inode_size_lock() acquiring order. */
- if (lock->cll_descr.cld_start == 0 &&
- lock->cll_descr.cld_end == CL_PAGE_EOF)
- cl_merge_lvb(env, inode);
- }
-}
-
-/*****************************************************************************
- *
- * io operations.
- *
- */
-
-void ccc_io_fini(const struct lu_env *env, const struct cl_io_slice *ios)
-{
- struct cl_io *io = ios->cis_io;
-
- CLOBINVRNT(env, io->ci_obj, ccc_object_invariant(io->ci_obj));
-}
-
-int ccc_io_one_lock_index(const struct lu_env *env, struct cl_io *io,
- __u32 enqflags, enum cl_lock_mode mode,
- pgoff_t start, pgoff_t end)
-{
- struct ccc_io *cio = ccc_env_io(env);
- struct cl_lock_descr *descr = &cio->cui_link.cill_descr;
- struct cl_object *obj = io->ci_obj;
-
- CLOBINVRNT(env, obj, ccc_object_invariant(obj));
-
- CDEBUG(D_VFSTRACE, "lock: %d [%lu, %lu]\n", mode, start, end);
-
- memset(&cio->cui_link, 0, sizeof(cio->cui_link));
-
- if (cio->cui_fd && (cio->cui_fd->fd_flags & LL_FILE_GROUP_LOCKED)) {
- descr->cld_mode = CLM_GROUP;
- descr->cld_gid = cio->cui_fd->fd_grouplock.cg_gid;
- } else {
- descr->cld_mode = mode;
- }
- descr->cld_obj = obj;
- descr->cld_start = start;
- descr->cld_end = end;
- descr->cld_enq_flags = enqflags;
-
- cl_io_lock_add(env, io, &cio->cui_link);
- return 0;
-}
-
-void ccc_io_update_iov(const struct lu_env *env,
- struct ccc_io *cio, struct cl_io *io)
-{
- size_t size = io->u.ci_rw.crw_count;
-
- if (!cl_is_normalio(env, io) || cio->cui_iter == NULL)
- return;
-
- iov_iter_truncate(cio->cui_iter, size);
-}
-
-int ccc_io_one_lock(const struct lu_env *env, struct cl_io *io,
- __u32 enqflags, enum cl_lock_mode mode,
- loff_t start, loff_t end)
-{
- struct cl_object *obj = io->ci_obj;
-
- return ccc_io_one_lock_index(env, io, enqflags, mode,
- cl_index(obj, start), cl_index(obj, end));
-}
-
-void ccc_io_end(const struct lu_env *env, const struct cl_io_slice *ios)
-{
- CLOBINVRNT(env, ios->cis_io->ci_obj,
- ccc_object_invariant(ios->cis_io->ci_obj));
-}
-
-void ccc_io_advance(const struct lu_env *env,
- const struct cl_io_slice *ios,
- size_t nob)
-{
- struct ccc_io *cio = cl2ccc_io(env, ios);
- struct cl_io *io = ios->cis_io;
- struct cl_object *obj = ios->cis_io->ci_obj;
-
- CLOBINVRNT(env, obj, ccc_object_invariant(obj));
-
- if (!cl_is_normalio(env, io))
- return;
-
- iov_iter_reexpand(cio->cui_iter, cio->cui_tot_count -= nob);
-}
-
-/**
- * Helper function that if necessary adjusts file size (inode->i_size), when
- * position at the offset \a pos is accessed. File size can be arbitrary stale
- * on a Lustre client, but client at least knows KMS. If accessed area is
- * inside [0, KMS], set file size to KMS, otherwise glimpse file size.
- *
- * Locking: cl_isize_lock is used to serialize changes to inode size and to
- * protect consistency between inode size and cl_object
- * attributes. cl_object_size_lock() protects consistency between cl_attr's of
- * top-object and sub-objects.
- */
-int ccc_prep_size(const struct lu_env *env, struct cl_object *obj,
- struct cl_io *io, loff_t start, size_t count, int *exceed)
-{
- struct cl_attr *attr = ccc_env_thread_attr(env);
- struct inode *inode = ccc_object_inode(obj);
- loff_t pos = start + count - 1;
- loff_t kms;
- int result;
-
- /*
- * Consistency guarantees: following possibilities exist for the
- * relation between region being accessed and real file size at this
- * moment:
- *
- * (A): the region is completely inside of the file;
- *
- * (B-x): x bytes of region are inside of the file, the rest is
- * outside;
- *
- * (C): the region is completely outside of the file.
- *
- * This classification is stable under DLM lock already acquired by
- * the caller, because to change the class, other client has to take
- * DLM lock conflicting with our lock. Also, any updates to ->i_size
- * by other threads on this client are serialized by
- * ll_inode_size_lock(). This guarantees that short reads are handled
- * correctly in the face of concurrent writes and truncates.
- */
- ccc_object_size_lock(obj);
- result = cl_object_attr_get(env, obj, attr);
- if (result == 0) {
- kms = attr->cat_kms;
- if (pos > kms) {
- /*
- * A glimpse is necessary to determine whether we
- * return a short read (B) or some zeroes at the end
- * of the buffer (C)
- */
- ccc_object_size_unlock(obj);
- result = cl_glimpse_lock(env, io, inode, obj, 0);
- if (result == 0 && exceed != NULL) {
- /* If objective page index exceed end-of-file
- * page index, return directly. Do not expect
- * kernel will check such case correctly.
- * linux-2.6.18-128.1.1 miss to do that.
- * --bug 17336 */
- loff_t size = cl_isize_read(inode);
- loff_t cur_index = start >> PAGE_CACHE_SHIFT;
- loff_t size_index = (size - 1) >>
- PAGE_CACHE_SHIFT;
-
- if ((size == 0 && cur_index != 0) ||
- size_index < cur_index)
- *exceed = 1;
- }
- return result;
- }
- /*
- * region is within kms and, hence, within real file
- * size (A). We need to increase i_size to cover the
- * read region so that generic_file_read() will do its
- * job, but that doesn't mean the kms size is
- * _correct_, it is only the _minimum_ size. If
- * someone does a stat they will get the correct size
- * which will always be >= the kms value here.
- * b=11081
- */
- if (cl_isize_read(inode) < kms) {
- cl_isize_write_nolock(inode, kms);
- CDEBUG(D_VFSTRACE,
- DFID" updating i_size %llu\n",
- PFID(lu_object_fid(&obj->co_lu)),
- (__u64)cl_isize_read(inode));
-
- }
- }
- ccc_object_size_unlock(obj);
- return result;
-}
-
-/*****************************************************************************
- *
- * Transfer operations.
- *
- */
-
-void ccc_req_completion(const struct lu_env *env,
- const struct cl_req_slice *slice, int ioret)
-{
- struct ccc_req *vrq;
-
- if (ioret > 0)
- cl_stats_tally(slice->crs_dev, slice->crs_req->crq_type, ioret);
-
- vrq = cl2ccc_req(slice);
- OBD_SLAB_FREE_PTR(vrq, ccc_req_kmem);
-}
-
-/**
- * Implementation of struct cl_req_operations::cro_attr_set() for ccc
- * layer. ccc is responsible for
- *
- * - o_[mac]time
- *
- * - o_mode
- *
- * - o_parent_seq
- *
- * - o_[ug]id
- *
- * - o_parent_oid
- *
- * - o_parent_ver
- *
- * - o_ioepoch,
- *
- */
-void ccc_req_attr_set(const struct lu_env *env,
- const struct cl_req_slice *slice,
- const struct cl_object *obj,
- struct cl_req_attr *attr, u64 flags)
-{
- struct inode *inode;
- struct obdo *oa;
- u32 valid_flags;
-
- oa = attr->cra_oa;
- inode = ccc_object_inode(obj);
- valid_flags = OBD_MD_FLTYPE;
-
- if (slice->crs_req->crq_type == CRT_WRITE) {
- if (flags & OBD_MD_FLEPOCH) {
- oa->o_valid |= OBD_MD_FLEPOCH;
- oa->o_ioepoch = cl_i2info(inode)->lli_ioepoch;
- valid_flags |= OBD_MD_FLMTIME | OBD_MD_FLCTIME |
- OBD_MD_FLUID | OBD_MD_FLGID;
- }
- }
- obdo_from_inode(oa, inode, valid_flags & flags);
- obdo_set_parent_fid(oa, &cl_i2info(inode)->lli_fid);
- memcpy(attr->cra_jobid, cl_i2info(inode)->lli_jobid,
- JOBSTATS_JOBID_SIZE);
-}
-
-static const struct cl_req_operations ccc_req_ops = {
- .cro_attr_set = ccc_req_attr_set,
- .cro_completion = ccc_req_completion
-};
-
-int cl_setattr_ost(struct inode *inode, const struct iattr *attr)
-{
- struct lu_env *env;
- struct cl_io *io;
- int result;
- int refcheck;
-
- env = cl_env_get(&refcheck);
- if (IS_ERR(env))
- return PTR_ERR(env);
-
- io = ccc_env_thread_io(env);
- io->ci_obj = cl_i2info(inode)->lli_clob;
-
- io->u.ci_setattr.sa_attr.lvb_atime = LTIME_S(attr->ia_atime);
- io->u.ci_setattr.sa_attr.lvb_mtime = LTIME_S(attr->ia_mtime);
- io->u.ci_setattr.sa_attr.lvb_ctime = LTIME_S(attr->ia_ctime);
- io->u.ci_setattr.sa_attr.lvb_size = attr->ia_size;
- io->u.ci_setattr.sa_valid = attr->ia_valid;
-
-again:
- if (cl_io_init(env, io, CIT_SETATTR, io->ci_obj) == 0) {
- struct ccc_io *cio = ccc_env_io(env);
-
- if (attr->ia_valid & ATTR_FILE)
- /* populate the file descriptor for ftruncate to honor
- * group lock - see LU-787 */
- cio->cui_fd = cl_iattr2fd(inode, attr);
-
- result = cl_io_loop(env, io);
- } else {
- result = io->ci_result;
- }
- cl_io_fini(env, io);
- if (unlikely(io->ci_need_restart))
- goto again;
- /* HSM import case: file is released, cannot be restored
- * no need to fail except if restore registration failed
- * with -ENODATA */
- if (result == -ENODATA && io->ci_restore_needed &&
- io->ci_result != -ENODATA)
- result = 0;
- cl_env_put(env, &refcheck);
- return result;
-}
-
-/*****************************************************************************
- *
- * Type conversions.
- *
- */
-
-struct lu_device *ccc2lu_dev(struct ccc_device *vdv)
-{
- return &vdv->cdv_cl.cd_lu_dev;
-}
-
-struct ccc_device *lu2ccc_dev(const struct lu_device *d)
-{
- return container_of0(d, struct ccc_device, cdv_cl.cd_lu_dev);
-}
-
-struct ccc_device *cl2ccc_dev(const struct cl_device *d)
-{
- return container_of0(d, struct ccc_device, cdv_cl);
-}
-
-struct lu_object *ccc2lu(struct ccc_object *vob)
-{
- return &vob->cob_cl.co_lu;
-}
-
-struct ccc_object *lu2ccc(const struct lu_object *obj)
-{
- return container_of0(obj, struct ccc_object, cob_cl.co_lu);
-}
-
-struct ccc_object *cl2ccc(const struct cl_object *obj)
-{
- return container_of0(obj, struct ccc_object, cob_cl);
-}
-
-struct ccc_lock *cl2ccc_lock(const struct cl_lock_slice *slice)
-{
- return container_of(slice, struct ccc_lock, clk_cl);
-}
-
-struct ccc_io *cl2ccc_io(const struct lu_env *env,
- const struct cl_io_slice *slice)
-{
- struct ccc_io *cio;
-
- cio = container_of(slice, struct ccc_io, cui_cl);
- LASSERT(cio == ccc_env_io(env));
- return cio;
-}
-
-struct ccc_req *cl2ccc_req(const struct cl_req_slice *slice)
-{
- return container_of0(slice, struct ccc_req, crq_cl);
-}
-
-struct page *cl2vm_page(const struct cl_page_slice *slice)
-{
- return cl2ccc_page(slice)->cpg_page;
-}
-
-/*****************************************************************************
- *
- * Accessors.
- *
- */
-int ccc_object_invariant(const struct cl_object *obj)
-{
- struct inode *inode = ccc_object_inode(obj);
- struct cl_inode_info *lli = cl_i2info(inode);
-
- return (S_ISREG(cl_inode_mode(inode)) ||
- /* i_mode of unlinked inode is zeroed. */
- cl_inode_mode(inode) == 0) && lli->lli_clob == obj;
-}
-
-struct inode *ccc_object_inode(const struct cl_object *obj)
-{
- return cl2ccc(obj)->cob_inode;
-}
-
-/**
- * Returns a pointer to cl_page associated with \a vmpage, without acquiring
- * additional reference to the resulting page. This is an unsafe version of
- * cl_vmpage_page() that can only be used under vmpage lock.
- */
-struct cl_page *ccc_vmpage_page_transient(struct page *vmpage)
-{
- KLASSERT(PageLocked(vmpage));
- return (struct cl_page *)vmpage->private;
-}
-
-/**
- * Initialize or update CLIO structures for regular files when new
- * meta-data arrives from the server.
- *
- * \param inode regular file inode
- * \param md new file metadata from MDS
- * - allocates cl_object if necessary,
- * - updated layout, if object was already here.
- */
-int cl_file_inode_init(struct inode *inode, struct lustre_md *md)
-{
- struct lu_env *env;
- struct cl_inode_info *lli;
- struct cl_object *clob;
- struct lu_site *site;
- struct lu_fid *fid;
- struct cl_object_conf conf = {
- .coc_inode = inode,
- .u = {
- .coc_md = md
- }
- };
- int result = 0;
- int refcheck;
-
- LASSERT(md->body->valid & OBD_MD_FLID);
- LASSERT(S_ISREG(cl_inode_mode(inode)));
-
- env = cl_env_get(&refcheck);
- if (IS_ERR(env))
- return PTR_ERR(env);
-
- site = cl_i2sbi(inode)->ll_site;
- lli = cl_i2info(inode);
- fid = &lli->lli_fid;
- LASSERT(fid_is_sane(fid));
-
- if (lli->lli_clob == NULL) {
- /* clob is slave of inode, empty lli_clob means for new inode,
- * there is no clob in cache with the given fid, so it is
- * unnecessary to perform lookup-alloc-lookup-insert, just
- * alloc and insert directly. */
- LASSERT(inode->i_state & I_NEW);
- conf.coc_lu.loc_flags = LOC_F_NEW;
- clob = cl_object_find(env, lu2cl_dev(site->ls_top_dev),
- fid, &conf);
- if (!IS_ERR(clob)) {
- /*
- * No locking is necessary, as new inode is
- * locked by I_NEW bit.
- */
- lli->lli_clob = clob;
- lli->lli_has_smd = lsm_has_objects(md->lsm);
- lu_object_ref_add(&clob->co_lu, "inode", inode);
- } else
- result = PTR_ERR(clob);
- } else {
- result = cl_conf_set(env, lli->lli_clob, &conf);
- }
-
- cl_env_put(env, &refcheck);
-
- if (result != 0)
- CERROR("Failure to initialize cl object "DFID": %d\n",
- PFID(fid), result);
- return result;
-}
-
-/**
- * Wait for others drop their references of the object at first, then we drop
- * the last one, which will lead to the object be destroyed immediately.
- * Must be called after cl_object_kill() against this object.
- *
- * The reason we want to do this is: destroying top object will wait for sub
- * objects being destroyed first, so we can't let bottom layer (e.g. from ASTs)
- * to initiate top object destroying which may deadlock. See bz22520.
- */
-static void cl_object_put_last(struct lu_env *env, struct cl_object *obj)
-{
- struct lu_object_header *header = obj->co_lu.lo_header;
- wait_queue_t waiter;
-
- if (unlikely(atomic_read(&header->loh_ref) != 1)) {
- struct lu_site *site = obj->co_lu.lo_dev->ld_site;
- struct lu_site_bkt_data *bkt;
-
- bkt = lu_site_bkt_from_fid(site, &header->loh_fid);
-
- init_waitqueue_entry(&waiter, current);
- add_wait_queue(&bkt->lsb_marche_funebre, &waiter);
-
- while (1) {
- set_current_state(TASK_UNINTERRUPTIBLE);
- if (atomic_read(&header->loh_ref) == 1)
- break;
- schedule();
- }
-
- set_current_state(TASK_RUNNING);
- remove_wait_queue(&bkt->lsb_marche_funebre, &waiter);
- }
-
- cl_object_put(env, obj);
-}
-
-void cl_inode_fini(struct inode *inode)
-{
- struct lu_env *env;
- struct cl_inode_info *lli = cl_i2info(inode);
- struct cl_object *clob = lli->lli_clob;
- int refcheck;
- int emergency;
-
- if (clob != NULL) {
- void *cookie;
-
- cookie = cl_env_reenter();
- env = cl_env_get(&refcheck);
- emergency = IS_ERR(env);
- if (emergency) {
- mutex_lock(&ccc_inode_fini_guard);
- LASSERT(ccc_inode_fini_env != NULL);
- cl_env_implant(ccc_inode_fini_env, &refcheck);
- env = ccc_inode_fini_env;
- }
- /*
- * cl_object cache is a slave to inode cache (which, in turn
- * is a slave to dentry cache), don't keep cl_object in memory
- * when its master is evicted.
- */
- cl_object_kill(env, clob);
- lu_object_ref_del(&clob->co_lu, "inode", inode);
- cl_object_put_last(env, clob);
- lli->lli_clob = NULL;
- if (emergency) {
- cl_env_unplant(ccc_inode_fini_env, &refcheck);
- mutex_unlock(&ccc_inode_fini_guard);
- } else
- cl_env_put(env, &refcheck);
- cl_env_reexit(cookie);
- }
-}
-
-/**
- * return IF_* type for given lu_dirent entry.
- * IF_* flag shld be converted to particular OS file type in
- * platform llite module.
- */
-__u16 ll_dirent_type_get(struct lu_dirent *ent)
-{
- __u16 type = 0;
- struct luda_type *lt;
- int len = 0;
-
- if (le32_to_cpu(ent->lde_attrs) & LUDA_TYPE) {
- const unsigned align = sizeof(struct luda_type) - 1;
-
- len = le16_to_cpu(ent->lde_namelen);
- len = (len + align) & ~align;
- lt = (void *)ent->lde_name + len;
- type = IFTODT(le16_to_cpu(lt->lt_type));
- }
- return type;
-}
-
-/**
- * build inode number from passed @fid */
-__u64 cl_fid_build_ino(const struct lu_fid *fid, int api32)
-{
- if (BITS_PER_LONG == 32 || api32)
- return fid_flatten32(fid);
- else
- return fid_flatten(fid);
-}
-
-/**
- * build inode generation from passed @fid. If our FID overflows the 32-bit
- * inode number then return a non-zero generation to distinguish them. */
-__u32 cl_fid_build_gen(const struct lu_fid *fid)
-{
- __u32 gen;
-
- if (fid_is_igif(fid)) {
- gen = lu_igif_gen(fid);
- return gen;
- }
-
- gen = fid_flatten(fid) >> 32;
- return gen;
-}
-
-/* lsm is unreliable after hsm implementation as layout can be changed at
- * any time. This is only to support old, non-clio-ized interfaces. It will
- * cause deadlock if clio operations are called with this extra layout refcount
- * because in case the layout changed during the IO, ll_layout_refresh() will
- * have to wait for the refcount to become zero to destroy the older layout.
- *
- * Notice that the lsm returned by this function may not be valid unless called
- * inside layout lock - MDS_INODELOCK_LAYOUT. */
-struct lov_stripe_md *ccc_inode_lsm_get(struct inode *inode)
-{
- return lov_lsm_get(cl_i2info(inode)->lli_clob);
-}
-
-inline void ccc_inode_lsm_put(struct inode *inode, struct lov_stripe_md *lsm)
-{
- lov_lsm_put(cl_i2info(inode)->lli_clob, lsm);
-}
diff --git a/drivers/staging/lustre/lustre/lclient/lcommon_misc.c b/drivers/staging/lustre/lustre/lclient/lcommon_misc.c
deleted file mode 100644
index 01bf894d4a87..000000000000
--- a/drivers/staging/lustre/lustre/lclient/lcommon_misc.c
+++ /dev/null
@@ -1,199 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * cl code shared between vvp and liblustre (and other Lustre clients in the
- * future).
- *
- */
-#include "../include/obd_class.h"
-#include "../include/obd_support.h"
-#include "../include/obd.h"
-#include "../include/cl_object.h"
-#include "../include/lclient.h"
-
-#include "../include/lustre_lite.h"
-
-
-/* Initialize the default and maximum LOV EA and cookie sizes. This allows
- * us to make MDS RPCs with large enough reply buffers to hold the
- * maximum-sized (= maximum striped) EA and cookie without having to
- * calculate this (via a call into the LOV + OSCs) each time we make an RPC. */
-int cl_init_ea_size(struct obd_export *md_exp, struct obd_export *dt_exp)
-{
- struct lov_stripe_md lsm = { .lsm_magic = LOV_MAGIC_V3 };
- __u32 valsize = sizeof(struct lov_desc);
- int rc, easize, def_easize, cookiesize;
- struct lov_desc desc;
- __u16 stripes, def_stripes;
-
- rc = obd_get_info(NULL, dt_exp, sizeof(KEY_LOVDESC), KEY_LOVDESC,
- &valsize, &desc, NULL);
- if (rc)
- return rc;
-
- stripes = min_t(__u32, desc.ld_tgt_count, LOV_MAX_STRIPE_COUNT);
- lsm.lsm_stripe_count = stripes;
- easize = obd_size_diskmd(dt_exp, &lsm);
-
- def_stripes = min_t(__u32, desc.ld_default_stripe_count,
- LOV_MAX_STRIPE_COUNT);
- lsm.lsm_stripe_count = def_stripes;
- def_easize = obd_size_diskmd(dt_exp, &lsm);
-
- cookiesize = stripes * sizeof(struct llog_cookie);
-
- /* default cookiesize is 0 because from 2.4 server doesn't send
- * llog cookies to client. */
- CDEBUG(D_HA,
- "updating def/max_easize: %d/%d def/max_cookiesize: 0/%d\n",
- def_easize, easize, cookiesize);
-
- rc = md_init_ea_size(md_exp, easize, def_easize, cookiesize, 0);
- return rc;
-}
-
-/**
- * This function is used as an upcall-callback hooked by liblustre and llite
- * clients into obd_notify() listeners chain to handle notifications about
- * change of import connect_flags. See llu_fsswop_mount() and
- * lustre_common_fill_super().
- */
-int cl_ocd_update(struct obd_device *host,
- struct obd_device *watched,
- enum obd_notify_event ev, void *owner, void *data)
-{
- struct lustre_client_ocd *lco;
- struct client_obd *cli;
- __u64 flags;
- int result;
-
- if (!strcmp(watched->obd_type->typ_name, LUSTRE_OSC_NAME)) {
- cli = &watched->u.cli;
- lco = owner;
- flags = cli->cl_import->imp_connect_data.ocd_connect_flags;
- CDEBUG(D_SUPER, "Changing connect_flags: %#llx -> %#llx\n",
- lco->lco_flags, flags);
- mutex_lock(&lco->lco_lock);
- lco->lco_flags &= flags;
- /* for each osc event update ea size */
- if (lco->lco_dt_exp)
- cl_init_ea_size(lco->lco_md_exp, lco->lco_dt_exp);
-
- mutex_unlock(&lco->lco_lock);
- result = 0;
- } else {
- CERROR("unexpected notification from %s %s!\n",
- watched->obd_type->typ_name,
- watched->obd_name);
- result = -EINVAL;
- }
- return result;
-}
-
-#define GROUPLOCK_SCOPE "grouplock"
-
-int cl_get_grouplock(struct cl_object *obj, unsigned long gid, int nonblock,
- struct ccc_grouplock *cg)
-{
- struct lu_env *env;
- struct cl_io *io;
- struct cl_lock *lock;
- struct cl_lock_descr *descr;
- __u32 enqflags;
- int refcheck;
- int rc;
-
- env = cl_env_get(&refcheck);
- if (IS_ERR(env))
- return PTR_ERR(env);
-
- io = ccc_env_thread_io(env);
- io->ci_obj = obj;
- io->ci_ignore_layout = 1;
-
- rc = cl_io_init(env, io, CIT_MISC, io->ci_obj);
- if (rc) {
- /* Does not make sense to take GL for released layout */
- if (rc > 0)
- rc = -ENOTSUPP;
- cl_env_put(env, &refcheck);
- return rc;
- }
-
- descr = &ccc_env_info(env)->cti_descr;
- descr->cld_obj = obj;
- descr->cld_start = 0;
- descr->cld_end = CL_PAGE_EOF;
- descr->cld_gid = gid;
- descr->cld_mode = CLM_GROUP;
-
- enqflags = CEF_MUST | (nonblock ? CEF_NONBLOCK : 0);
- descr->cld_enq_flags = enqflags;
-
- lock = cl_lock_request(env, io, descr, GROUPLOCK_SCOPE, current);
- if (IS_ERR(lock)) {
- cl_io_fini(env, io);
- cl_env_put(env, &refcheck);
- return PTR_ERR(lock);
- }
-
- cg->cg_env = cl_env_get(&refcheck);
- cg->cg_io = io;
- cg->cg_lock = lock;
- cg->cg_gid = gid;
- LASSERT(cg->cg_env == env);
-
- cl_env_unplant(env, &refcheck);
- return 0;
-}
-
-void cl_put_grouplock(struct ccc_grouplock *cg)
-{
- struct lu_env *env = cg->cg_env;
- struct cl_io *io = cg->cg_io;
- struct cl_lock *lock = cg->cg_lock;
- int refcheck;
-
- LASSERT(cg->cg_env);
- LASSERT(cg->cg_gid);
-
- cl_env_implant(env, &refcheck);
- cl_env_put(env, &refcheck);
-
- cl_unuse(env, lock);
- cl_lock_release(env, lock, GROUPLOCK_SCOPE, current);
- cl_io_fini(env, io);
- cl_env_put(env, NULL);
-}
diff --git a/drivers/staging/lustre/lustre/ldlm/interval_tree.c b/drivers/staging/lustre/lustre/ldlm/interval_tree.c
deleted file mode 100644
index eab2bd60241b..000000000000
--- a/drivers/staging/lustre/lustre/ldlm/interval_tree.c
+++ /dev/null
@@ -1,751 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * lustre/ldlm/interval_tree.c
- *
- * Interval tree library used by ldlm extent lock code
- *
- * Author: Huang Wei <huangwei@clusterfs.com>
- * Author: Jay Xiong <jinshan.xiong@sun.com>
- */
-#include "../include/lustre_dlm.h"
-#include "../include/obd_support.h"
-#include "../include/interval_tree.h"
-
-enum {
- INTERVAL_RED = 0,
- INTERVAL_BLACK = 1
-};
-
-static inline int node_is_left_child(struct interval_node *node)
-{
- LASSERT(node->in_parent != NULL);
- return node == node->in_parent->in_left;
-}
-
-static inline int node_is_right_child(struct interval_node *node)
-{
- LASSERT(node->in_parent != NULL);
- return node == node->in_parent->in_right;
-}
-
-static inline int node_is_red(struct interval_node *node)
-{
- return node->in_color == INTERVAL_RED;
-}
-
-static inline int node_is_black(struct interval_node *node)
-{
- return node->in_color == INTERVAL_BLACK;
-}
-
-static inline int extent_compare(struct interval_node_extent *e1,
- struct interval_node_extent *e2)
-{
- int rc;
-
- if (e1->start == e2->start) {
- if (e1->end < e2->end)
- rc = -1;
- else if (e1->end > e2->end)
- rc = 1;
- else
- rc = 0;
- } else {
- if (e1->start < e2->start)
- rc = -1;
- else
- rc = 1;
- }
- return rc;
-}
-
-static inline int extent_equal(struct interval_node_extent *e1,
- struct interval_node_extent *e2)
-{
- return (e1->start == e2->start) && (e1->end == e2->end);
-}
-
-static inline int extent_overlapped(struct interval_node_extent *e1,
- struct interval_node_extent *e2)
-{
- return (e1->start <= e2->end) && (e2->start <= e1->end);
-}
-
-static inline int node_compare(struct interval_node *n1,
- struct interval_node *n2)
-{
- return extent_compare(&n1->in_extent, &n2->in_extent);
-}
-
-static inline int node_equal(struct interval_node *n1,
- struct interval_node *n2)
-{
- return extent_equal(&n1->in_extent, &n2->in_extent);
-}
-
-static inline __u64 max_u64(__u64 x, __u64 y)
-{
- return x > y ? x : y;
-}
-
-static inline __u64 min_u64(__u64 x, __u64 y)
-{
- return x < y ? x : y;
-}
-
-#define interval_for_each(node, root) \
-for (node = interval_first(root); node != NULL; \
- node = interval_next(node))
-
-#define interval_for_each_reverse(node, root) \
-for (node = interval_last(root); node != NULL; \
- node = interval_prev(node))
-
-static struct interval_node *interval_first(struct interval_node *node)
-{
- if (!node)
- return NULL;
- while (node->in_left)
- node = node->in_left;
- return node;
-}
-
-static struct interval_node *interval_last(struct interval_node *node)
-{
- if (!node)
- return NULL;
- while (node->in_right)
- node = node->in_right;
- return node;
-}
-
-static struct interval_node *interval_next(struct interval_node *node)
-{
- if (!node)
- return NULL;
- if (node->in_right)
- return interval_first(node->in_right);
- while (node->in_parent && node_is_right_child(node))
- node = node->in_parent;
- return node->in_parent;
-}
-
-static struct interval_node *interval_prev(struct interval_node *node)
-{
- if (!node)
- return NULL;
-
- if (node->in_left)
- return interval_last(node->in_left);
-
- while (node->in_parent && node_is_left_child(node))
- node = node->in_parent;
-
- return node->in_parent;
-}
-
-enum interval_iter interval_iterate(struct interval_node *root,
- interval_callback_t func,
- void *data)
-{
- struct interval_node *node;
- enum interval_iter rc = INTERVAL_ITER_CONT;
-
- interval_for_each(node, root) {
- rc = func(node, data);
- if (rc == INTERVAL_ITER_STOP)
- break;
- }
-
- return rc;
-}
-EXPORT_SYMBOL(interval_iterate);
-
-enum interval_iter interval_iterate_reverse(struct interval_node *root,
- interval_callback_t func,
- void *data)
-{
- struct interval_node *node;
- enum interval_iter rc = INTERVAL_ITER_CONT;
-
- interval_for_each_reverse(node, root) {
- rc = func(node, data);
- if (rc == INTERVAL_ITER_STOP)
- break;
- }
-
- return rc;
-}
-EXPORT_SYMBOL(interval_iterate_reverse);
-
-/* try to find a node with same interval in the tree,
- * if found, return the pointer to the node, otherwise return NULL*/
-struct interval_node *interval_find(struct interval_node *root,
- struct interval_node_extent *ex)
-{
- struct interval_node *walk = root;
- int rc;
-
- while (walk) {
- rc = extent_compare(ex, &walk->in_extent);
- if (rc == 0)
- break;
- else if (rc < 0)
- walk = walk->in_left;
- else
- walk = walk->in_right;
- }
-
- return walk;
-}
-EXPORT_SYMBOL(interval_find);
-
-static void __rotate_change_maxhigh(struct interval_node *node,
- struct interval_node *rotate)
-{
- __u64 left_max, right_max;
-
- rotate->in_max_high = node->in_max_high;
- left_max = node->in_left ? node->in_left->in_max_high : 0;
- right_max = node->in_right ? node->in_right->in_max_high : 0;
- node->in_max_high = max_u64(interval_high(node),
- max_u64(left_max, right_max));
-}
-
-/* The left rotation "pivots" around the link from node to node->right, and
- * - node will be linked to node->right's left child, and
- * - node->right's left child will be linked to node's right child. */
-static void __rotate_left(struct interval_node *node,
- struct interval_node **root)
-{
- struct interval_node *right = node->in_right;
- struct interval_node *parent = node->in_parent;
-
- node->in_right = right->in_left;
- if (node->in_right)
- right->in_left->in_parent = node;
-
- right->in_left = node;
- right->in_parent = parent;
- if (parent) {
- if (node_is_left_child(node))
- parent->in_left = right;
- else
- parent->in_right = right;
- } else {
- *root = right;
- }
- node->in_parent = right;
-
- /* update max_high for node and right */
- __rotate_change_maxhigh(node, right);
-}
-
-/* The right rotation "pivots" around the link from node to node->left, and
- * - node will be linked to node->left's right child, and
- * - node->left's right child will be linked to node's left child. */
-static void __rotate_right(struct interval_node *node,
- struct interval_node **root)
-{
- struct interval_node *left = node->in_left;
- struct interval_node *parent = node->in_parent;
-
- node->in_left = left->in_right;
- if (node->in_left)
- left->in_right->in_parent = node;
- left->in_right = node;
-
- left->in_parent = parent;
- if (parent) {
- if (node_is_right_child(node))
- parent->in_right = left;
- else
- parent->in_left = left;
- } else {
- *root = left;
- }
- node->in_parent = left;
-
- /* update max_high for node and left */
- __rotate_change_maxhigh(node, left);
-}
-
-#define interval_swap(a, b) do { \
- struct interval_node *c = a; a = b; b = c; \
-} while (0)
-
-/*
- * Operations INSERT and DELETE, when run on a tree with n keys,
- * take O(logN) time.Because they modify the tree, the result
- * may violate the red-black properties.To restore these properties,
- * we must change the colors of some of the nodes in the tree
- * and also change the pointer structure.
- */
-static void interval_insert_color(struct interval_node *node,
- struct interval_node **root)
-{
- struct interval_node *parent, *gparent;
-
- while ((parent = node->in_parent) && node_is_red(parent)) {
- gparent = parent->in_parent;
- /* Parent is RED, so gparent must not be NULL */
- if (node_is_left_child(parent)) {
- struct interval_node *uncle;
-
- uncle = gparent->in_right;
- if (uncle && node_is_red(uncle)) {
- uncle->in_color = INTERVAL_BLACK;
- parent->in_color = INTERVAL_BLACK;
- gparent->in_color = INTERVAL_RED;
- node = gparent;
- continue;
- }
-
- if (parent->in_right == node) {
- __rotate_left(parent, root);
- interval_swap(node, parent);
- }
-
- parent->in_color = INTERVAL_BLACK;
- gparent->in_color = INTERVAL_RED;
- __rotate_right(gparent, root);
- } else {
- struct interval_node *uncle;
-
- uncle = gparent->in_left;
- if (uncle && node_is_red(uncle)) {
- uncle->in_color = INTERVAL_BLACK;
- parent->in_color = INTERVAL_BLACK;
- gparent->in_color = INTERVAL_RED;
- node = gparent;
- continue;
- }
-
- if (node_is_left_child(node)) {
- __rotate_right(parent, root);
- interval_swap(node, parent);
- }
-
- parent->in_color = INTERVAL_BLACK;
- gparent->in_color = INTERVAL_RED;
- __rotate_left(gparent, root);
- }
- }
-
- (*root)->in_color = INTERVAL_BLACK;
-}
-
-struct interval_node *interval_insert(struct interval_node *node,
- struct interval_node **root)
-
-{
- struct interval_node **p, *parent = NULL;
-
- LASSERT(!interval_is_intree(node));
- p = root;
- while (*p) {
- parent = *p;
- if (node_equal(parent, node))
- return parent;
-
- /* max_high field must be updated after each iteration */
- if (parent->in_max_high < interval_high(node))
- parent->in_max_high = interval_high(node);
-
- if (node_compare(node, parent) < 0)
- p = &parent->in_left;
- else
- p = &parent->in_right;
- }
-
- /* link node into the tree */
- node->in_parent = parent;
- node->in_color = INTERVAL_RED;
- node->in_left = node->in_right = NULL;
- *p = node;
-
- interval_insert_color(node, root);
- node->in_intree = 1;
-
- return NULL;
-}
-EXPORT_SYMBOL(interval_insert);
-
-static inline int node_is_black_or_0(struct interval_node *node)
-{
- return !node || node_is_black(node);
-}
-
-static void interval_erase_color(struct interval_node *node,
- struct interval_node *parent,
- struct interval_node **root)
-{
- struct interval_node *tmp;
-
- while (node_is_black_or_0(node) && node != *root) {
- if (parent->in_left == node) {
- tmp = parent->in_right;
- if (node_is_red(tmp)) {
- tmp->in_color = INTERVAL_BLACK;
- parent->in_color = INTERVAL_RED;
- __rotate_left(parent, root);
- tmp = parent->in_right;
- }
- if (node_is_black_or_0(tmp->in_left) &&
- node_is_black_or_0(tmp->in_right)) {
- tmp->in_color = INTERVAL_RED;
- node = parent;
- parent = node->in_parent;
- } else {
- if (node_is_black_or_0(tmp->in_right)) {
- struct interval_node *o_left;
-
- o_left = tmp->in_left;
- if (o_left)
- o_left->in_color = INTERVAL_BLACK;
- tmp->in_color = INTERVAL_RED;
- __rotate_right(tmp, root);
- tmp = parent->in_right;
- }
- tmp->in_color = parent->in_color;
- parent->in_color = INTERVAL_BLACK;
- if (tmp->in_right)
- tmp->in_right->in_color = INTERVAL_BLACK;
- __rotate_left(parent, root);
- node = *root;
- break;
- }
- } else {
- tmp = parent->in_left;
- if (node_is_red(tmp)) {
- tmp->in_color = INTERVAL_BLACK;
- parent->in_color = INTERVAL_RED;
- __rotate_right(parent, root);
- tmp = parent->in_left;
- }
- if (node_is_black_or_0(tmp->in_left) &&
- node_is_black_or_0(tmp->in_right)) {
- tmp->in_color = INTERVAL_RED;
- node = parent;
- parent = node->in_parent;
- } else {
- if (node_is_black_or_0(tmp->in_left)) {
- struct interval_node *o_right;
-
- o_right = tmp->in_right;
- if (o_right)
- o_right->in_color = INTERVAL_BLACK;
- tmp->in_color = INTERVAL_RED;
- __rotate_left(tmp, root);
- tmp = parent->in_left;
- }
- tmp->in_color = parent->in_color;
- parent->in_color = INTERVAL_BLACK;
- if (tmp->in_left)
- tmp->in_left->in_color = INTERVAL_BLACK;
- __rotate_right(parent, root);
- node = *root;
- break;
- }
- }
- }
- if (node)
- node->in_color = INTERVAL_BLACK;
-}
-
-/*
- * if the @max_high value of @node is changed, this function traverse a path
- * from node up to the root to update max_high for the whole tree.
- */
-static void update_maxhigh(struct interval_node *node,
- __u64 old_maxhigh)
-{
- __u64 left_max, right_max;
-
- while (node) {
- left_max = node->in_left ? node->in_left->in_max_high : 0;
- right_max = node->in_right ? node->in_right->in_max_high : 0;
- node->in_max_high = max_u64(interval_high(node),
- max_u64(left_max, right_max));
-
- if (node->in_max_high >= old_maxhigh)
- break;
- node = node->in_parent;
- }
-}
-
-void interval_erase(struct interval_node *node,
- struct interval_node **root)
-{
- struct interval_node *child, *parent;
- int color;
-
- LASSERT(interval_is_intree(node));
- node->in_intree = 0;
- if (!node->in_left) {
- child = node->in_right;
- } else if (!node->in_right) {
- child = node->in_left;
- } else { /* Both left and right child are not NULL */
- struct interval_node *old = node;
-
- node = interval_next(node);
- child = node->in_right;
- parent = node->in_parent;
- color = node->in_color;
-
- if (child)
- child->in_parent = parent;
- if (parent == old)
- parent->in_right = child;
- else
- parent->in_left = child;
-
- node->in_color = old->in_color;
- node->in_right = old->in_right;
- node->in_left = old->in_left;
- node->in_parent = old->in_parent;
-
- if (old->in_parent) {
- if (node_is_left_child(old))
- old->in_parent->in_left = node;
- else
- old->in_parent->in_right = node;
- } else {
- *root = node;
- }
-
- old->in_left->in_parent = node;
- if (old->in_right)
- old->in_right->in_parent = node;
- update_maxhigh(child ? : parent, node->in_max_high);
- update_maxhigh(node, old->in_max_high);
- if (parent == old)
- parent = node;
- goto color;
- }
- parent = node->in_parent;
- color = node->in_color;
-
- if (child)
- child->in_parent = parent;
- if (parent) {
- if (node_is_left_child(node))
- parent->in_left = child;
- else
- parent->in_right = child;
- } else {
- *root = child;
- }
-
- update_maxhigh(child ? : parent, node->in_max_high);
-
-color:
- if (color == INTERVAL_BLACK)
- interval_erase_color(child, parent, root);
-}
-EXPORT_SYMBOL(interval_erase);
-
-static inline int interval_may_overlap(struct interval_node *node,
- struct interval_node_extent *ext)
-{
- return (ext->start <= node->in_max_high &&
- ext->end >= interval_low(node));
-}
-
-/*
- * This function finds all intervals that overlap interval ext,
- * and calls func to handle resulted intervals one by one.
- * in lustre, this function will find all conflicting locks in
- * the granted queue and add these locks to the ast work list.
- *
- * {
- * if (node == NULL)
- * return 0;
- * if (ext->end < interval_low(node)) {
- * interval_search(node->in_left, ext, func, data);
- * } else if (interval_may_overlap(node, ext)) {
- * if (extent_overlapped(ext, &node->in_extent))
- * func(node, data);
- * interval_search(node->in_left, ext, func, data);
- * interval_search(node->in_right, ext, func, data);
- * }
- * return 0;
- * }
- *
- */
-enum interval_iter interval_search(struct interval_node *node,
- struct interval_node_extent *ext,
- interval_callback_t func,
- void *data)
-{
- struct interval_node *parent;
- enum interval_iter rc = INTERVAL_ITER_CONT;
-
- LASSERT(ext != NULL);
- LASSERT(func != NULL);
-
- while (node) {
- if (ext->end < interval_low(node)) {
- if (node->in_left) {
- node = node->in_left;
- continue;
- }
- } else if (interval_may_overlap(node, ext)) {
- if (extent_overlapped(ext, &node->in_extent)) {
- rc = func(node, data);
- if (rc == INTERVAL_ITER_STOP)
- break;
- }
-
- if (node->in_left) {
- node = node->in_left;
- continue;
- }
- if (node->in_right) {
- node = node->in_right;
- continue;
- }
- }
-
- parent = node->in_parent;
- while (parent) {
- if (node_is_left_child(node) &&
- parent->in_right) {
- /* If we ever got the left, it means that the
- * parent met ext->end<interval_low(parent), or
- * may_overlap(parent). If the former is true,
- * we needn't go back. So stop early and check
- * may_overlap(parent) after this loop. */
- node = parent->in_right;
- break;
- }
- node = parent;
- parent = parent->in_parent;
- }
- if (parent == NULL || !interval_may_overlap(parent, ext))
- break;
- }
-
- return rc;
-}
-EXPORT_SYMBOL(interval_search);
-
-static enum interval_iter interval_overlap_cb(struct interval_node *n,
- void *args)
-{
- *(int *)args = 1;
- return INTERVAL_ITER_STOP;
-}
-
-int interval_is_overlapped(struct interval_node *root,
- struct interval_node_extent *ext)
-{
- int has = 0;
- (void)interval_search(root, ext, interval_overlap_cb, &has);
- return has;
-}
-EXPORT_SYMBOL(interval_is_overlapped);
-
-/* Don't expand to low. Expanding downwards is expensive, and meaningless to
- * some extents, because programs seldom do IO backward.
- *
- * The recursive algorithm of expanding low:
- * expand_low {
- * struct interval_node *tmp;
- * static __u64 res = 0;
- *
- * if (root == NULL)
- * return res;
- * if (root->in_max_high < low) {
- * res = max_u64(root->in_max_high + 1, res);
- * return res;
- * } else if (low < interval_low(root)) {
- * interval_expand_low(root->in_left, low);
- * return res;
- * }
- *
- * if (interval_high(root) < low)
- * res = max_u64(interval_high(root) + 1, res);
- * interval_expand_low(root->in_left, low);
- * interval_expand_low(root->in_right, low);
- *
- * return res;
- * }
- *
- * It's much easy to eliminate the recursion, see interval_search for
- * an example. -jay
- */
-static inline __u64 interval_expand_low(struct interval_node *root, __u64 low)
-{
- /* we only concern the empty tree right now. */
- if (root == NULL)
- return 0;
- return low;
-}
-
-static inline __u64 interval_expand_high(struct interval_node *node, __u64 high)
-{
- __u64 result = ~0;
-
- while (node != NULL) {
- if (node->in_max_high < high)
- break;
-
- if (interval_low(node) > high) {
- result = interval_low(node) - 1;
- node = node->in_left;
- } else {
- node = node->in_right;
- }
- }
-
- return result;
-}
-
-/* expanding the extent based on @ext. */
-void interval_expand(struct interval_node *root,
- struct interval_node_extent *ext,
- struct interval_node_extent *limiter)
-{
- /* The assertion of interval_is_overlapped is expensive because we may
- * travel many nodes to find the overlapped node. */
- LASSERT(interval_is_overlapped(root, ext) == 0);
- if (!limiter || limiter->start < ext->start)
- ext->start = interval_expand_low(root, ext->start);
- if (!limiter || limiter->end > ext->end)
- ext->end = interval_expand_high(root, ext->end);
- LASSERT(interval_is_overlapped(root, ext) == 0);
-}
-EXPORT_SYMBOL(interval_expand);
diff --git a/drivers/staging/lustre/lustre/ldlm/l_lock.c b/drivers/staging/lustre/lustre/ldlm/l_lock.c
deleted file mode 100644
index cd8ab40e3267..000000000000
--- a/drivers/staging/lustre/lustre/ldlm/l_lock.c
+++ /dev/null
@@ -1,76 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- */
-
-#define DEBUG_SUBSYSTEM S_LDLM
-#include "../../include/linux/libcfs/libcfs.h"
-
-#include "../include/lustre_dlm.h"
-#include "../include/lustre_lib.h"
-
-/**
- * Lock a lock and its resource.
- *
- * LDLM locking uses resource to serialize access to locks
- * but there is a case when we change resource of lock upon
- * enqueue reply. We rely on lock->l_resource = new_res
- * being an atomic operation.
- */
-struct ldlm_resource *lock_res_and_lock(struct ldlm_lock *lock)
-{
- /* on server-side resource of lock doesn't change */
- if ((lock->l_flags & LDLM_FL_NS_SRV) == 0)
- spin_lock(&lock->l_lock);
-
- lock_res(lock->l_resource);
-
- lock->l_flags |= LDLM_FL_RES_LOCKED;
- return lock->l_resource;
-}
-EXPORT_SYMBOL(lock_res_and_lock);
-
-/**
- * Unlock a lock and its resource previously locked with lock_res_and_lock
- */
-void unlock_res_and_lock(struct ldlm_lock *lock)
-{
- /* on server-side resource of lock doesn't change */
- lock->l_flags &= ~LDLM_FL_RES_LOCKED;
-
- unlock_res(lock->l_resource);
- if ((lock->l_flags & LDLM_FL_NS_SRV) == 0)
- spin_unlock(&lock->l_lock);
-}
-EXPORT_SYMBOL(unlock_res_and_lock);
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_extent.c b/drivers/staging/lustre/lustre/ldlm/ldlm_extent.c
deleted file mode 100644
index fd9b059361f9..000000000000
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_extent.c
+++ /dev/null
@@ -1,241 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2010, 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * lustre/ldlm/ldlm_extent.c
- *
- * Author: Peter Braam <braam@clusterfs.com>
- * Author: Phil Schwan <phil@clusterfs.com>
- */
-
-/**
- * This file contains implementation of EXTENT lock type
- *
- * EXTENT lock type is for locking a contiguous range of values, represented
- * by 64-bit starting and ending offsets (inclusive). There are several extent
- * lock modes, some of which may be mutually incompatible. Extent locks are
- * considered incompatible if their modes are incompatible and their extents
- * intersect. See the lock mode compatibility matrix in lustre_dlm.h.
- */
-
-#define DEBUG_SUBSYSTEM S_LDLM
-#include "../../include/linux/libcfs/libcfs.h"
-#include "../include/lustre_dlm.h"
-#include "../include/obd_support.h"
-#include "../include/obd.h"
-#include "../include/obd_class.h"
-#include "../include/lustre_lib.h"
-#include "ldlm_internal.h"
-
-
-/* When a lock is cancelled by a client, the KMS may undergo change if this
- * is the "highest lock". This function returns the new KMS value.
- * Caller must hold lr_lock already.
- *
- * NB: A lock on [x,y] protects a KMS of up to y + 1 bytes! */
-__u64 ldlm_extent_shift_kms(struct ldlm_lock *lock, __u64 old_kms)
-{
- struct ldlm_resource *res = lock->l_resource;
- struct list_head *tmp;
- struct ldlm_lock *lck;
- __u64 kms = 0;
-
- /* don't let another thread in ldlm_extent_shift_kms race in
- * just after we finish and take our lock into account in its
- * calculation of the kms */
- lock->l_flags |= LDLM_FL_KMS_IGNORE;
-
- list_for_each(tmp, &res->lr_granted) {
- lck = list_entry(tmp, struct ldlm_lock, l_res_link);
-
- if (lck->l_flags & LDLM_FL_KMS_IGNORE)
- continue;
-
- if (lck->l_policy_data.l_extent.end >= old_kms)
- return old_kms;
-
- /* This extent _has_ to be smaller than old_kms (checked above)
- * so kms can only ever be smaller or the same as old_kms. */
- if (lck->l_policy_data.l_extent.end + 1 > kms)
- kms = lck->l_policy_data.l_extent.end + 1;
- }
- LASSERTF(kms <= old_kms, "kms %llu old_kms %llu\n", kms, old_kms);
-
- return kms;
-}
-EXPORT_SYMBOL(ldlm_extent_shift_kms);
-
-struct kmem_cache *ldlm_interval_slab;
-struct ldlm_interval *ldlm_interval_alloc(struct ldlm_lock *lock)
-{
- struct ldlm_interval *node;
-
- LASSERT(lock->l_resource->lr_type == LDLM_EXTENT);
- OBD_SLAB_ALLOC_PTR_GFP(node, ldlm_interval_slab, GFP_NOFS);
- if (node == NULL)
- return NULL;
-
- INIT_LIST_HEAD(&node->li_group);
- ldlm_interval_attach(node, lock);
- return node;
-}
-
-void ldlm_interval_free(struct ldlm_interval *node)
-{
- if (node) {
- LASSERT(list_empty(&node->li_group));
- LASSERT(!interval_is_intree(&node->li_node));
- OBD_SLAB_FREE(node, ldlm_interval_slab, sizeof(*node));
- }
-}
-
-/* interval tree, for LDLM_EXTENT. */
-void ldlm_interval_attach(struct ldlm_interval *n,
- struct ldlm_lock *l)
-{
- LASSERT(l->l_tree_node == NULL);
- LASSERT(l->l_resource->lr_type == LDLM_EXTENT);
-
- list_add_tail(&l->l_sl_policy, &n->li_group);
- l->l_tree_node = n;
-}
-
-struct ldlm_interval *ldlm_interval_detach(struct ldlm_lock *l)
-{
- struct ldlm_interval *n = l->l_tree_node;
-
- if (n == NULL)
- return NULL;
-
- LASSERT(!list_empty(&n->li_group));
- l->l_tree_node = NULL;
- list_del_init(&l->l_sl_policy);
-
- return list_empty(&n->li_group) ? n : NULL;
-}
-
-static inline int lock_mode_to_index(ldlm_mode_t mode)
-{
- int index;
-
- LASSERT(mode != 0);
- LASSERT(IS_PO2(mode));
- for (index = -1; mode; index++)
- mode >>= 1;
- LASSERT(index < LCK_MODE_NUM);
- return index;
-}
-
-/** Add newly granted lock into interval tree for the resource. */
-void ldlm_extent_add_lock(struct ldlm_resource *res,
- struct ldlm_lock *lock)
-{
- struct interval_node *found, **root;
- struct ldlm_interval *node;
- struct ldlm_extent *extent;
- int idx;
-
- LASSERT(lock->l_granted_mode == lock->l_req_mode);
-
- node = lock->l_tree_node;
- LASSERT(node != NULL);
- LASSERT(!interval_is_intree(&node->li_node));
-
- idx = lock_mode_to_index(lock->l_granted_mode);
- LASSERT(lock->l_granted_mode == 1 << idx);
- LASSERT(lock->l_granted_mode == res->lr_itree[idx].lit_mode);
-
- /* node extent initialize */
- extent = &lock->l_policy_data.l_extent;
- interval_set(&node->li_node, extent->start, extent->end);
-
- root = &res->lr_itree[idx].lit_root;
- found = interval_insert(&node->li_node, root);
- if (found) { /* The policy group found. */
- struct ldlm_interval *tmp;
-
- tmp = ldlm_interval_detach(lock);
- LASSERT(tmp != NULL);
- ldlm_interval_free(tmp);
- ldlm_interval_attach(to_ldlm_interval(found), lock);
- }
- res->lr_itree[idx].lit_size++;
-
- /* even though we use interval tree to manage the extent lock, we also
- * add the locks into grant list, for debug purpose, .. */
- ldlm_resource_add_lock(res, &res->lr_granted, lock);
-}
-
-/** Remove cancelled lock from resource interval tree. */
-void ldlm_extent_unlink_lock(struct ldlm_lock *lock)
-{
- struct ldlm_resource *res = lock->l_resource;
- struct ldlm_interval *node = lock->l_tree_node;
- struct ldlm_interval_tree *tree;
- int idx;
-
- if (!node || !interval_is_intree(&node->li_node)) /* duplicate unlink */
- return;
-
- idx = lock_mode_to_index(lock->l_granted_mode);
- LASSERT(lock->l_granted_mode == 1 << idx);
- tree = &res->lr_itree[idx];
-
- LASSERT(tree->lit_root != NULL); /* assure the tree is not null */
-
- tree->lit_size--;
- node = ldlm_interval_detach(lock);
- if (node) {
- interval_erase(&node->li_node, &tree->lit_root);
- ldlm_interval_free(node);
- }
-}
-
-void ldlm_extent_policy_wire_to_local(const ldlm_wire_policy_data_t *wpolicy,
- ldlm_policy_data_t *lpolicy)
-{
- memset(lpolicy, 0, sizeof(*lpolicy));
- lpolicy->l_extent.start = wpolicy->l_extent.start;
- lpolicy->l_extent.end = wpolicy->l_extent.end;
- lpolicy->l_extent.gid = wpolicy->l_extent.gid;
-}
-
-void ldlm_extent_policy_local_to_wire(const ldlm_policy_data_t *lpolicy,
- ldlm_wire_policy_data_t *wpolicy)
-{
- memset(wpolicy, 0, sizeof(*wpolicy));
- wpolicy->l_extent.start = lpolicy->l_extent.start;
- wpolicy->l_extent.end = lpolicy->l_extent.end;
- wpolicy->l_extent.gid = lpolicy->l_extent.gid;
-}
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c b/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c
deleted file mode 100644
index ab670fcf852d..000000000000
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c
+++ /dev/null
@@ -1,859 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2003 Hewlett-Packard Development Company LP.
- * Developed under the sponsorship of the US Government under
- * Subcontract No. B514193
- *
- * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2010, 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- */
-
-/**
- * This file implements POSIX lock type for Lustre.
- * Its policy properties are start and end of extent and PID.
- *
- * These locks are only done through MDS due to POSIX semantics requiring
- * e.g. that locks could be only partially released and as such split into
- * two parts, and also that two adjacent locks from the same process may be
- * merged into a single wider lock.
- *
- * Lock modes are mapped like this:
- * PR and PW for READ and WRITE locks
- * NL to request a releasing of a portion of the lock
- *
- * These flock locks never timeout.
- */
-
-#define DEBUG_SUBSYSTEM S_LDLM
-
-#include "../include/lustre_dlm.h"
-#include "../include/obd_support.h"
-#include "../include/obd_class.h"
-#include "../include/lustre_lib.h"
-#include <linux/list.h>
-#include "ldlm_internal.h"
-
-int ldlm_flock_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc,
- void *data, int flag);
-
-/**
- * list_for_remaining_safe - iterate over the remaining entries in a list
- * and safeguard against removal of a list entry.
- * \param pos the &struct list_head to use as a loop counter. pos MUST
- * have been initialized prior to using it in this macro.
- * \param n another &struct list_head to use as temporary storage
- * \param head the head for your list.
- */
-#define list_for_remaining_safe(pos, n, head) \
- for (n = pos->next; pos != (head); pos = n, n = pos->next)
-
-static inline int
-ldlm_same_flock_owner(struct ldlm_lock *lock, struct ldlm_lock *new)
-{
- return((new->l_policy_data.l_flock.owner ==
- lock->l_policy_data.l_flock.owner) &&
- (new->l_export == lock->l_export));
-}
-
-static inline int
-ldlm_flocks_overlap(struct ldlm_lock *lock, struct ldlm_lock *new)
-{
- return((new->l_policy_data.l_flock.start <=
- lock->l_policy_data.l_flock.end) &&
- (new->l_policy_data.l_flock.end >=
- lock->l_policy_data.l_flock.start));
-}
-
-static inline void ldlm_flock_blocking_link(struct ldlm_lock *req,
- struct ldlm_lock *lock)
-{
- /* For server only */
- if (req->l_export == NULL)
- return;
-
- LASSERT(hlist_unhashed(&req->l_exp_flock_hash));
-
- req->l_policy_data.l_flock.blocking_owner =
- lock->l_policy_data.l_flock.owner;
- req->l_policy_data.l_flock.blocking_export =
- lock->l_export;
- req->l_policy_data.l_flock.blocking_refs = 0;
-
- cfs_hash_add(req->l_export->exp_flock_hash,
- &req->l_policy_data.l_flock.owner,
- &req->l_exp_flock_hash);
-}
-
-static inline void ldlm_flock_blocking_unlink(struct ldlm_lock *req)
-{
- /* For server only */
- if (req->l_export == NULL)
- return;
-
- check_res_locked(req->l_resource);
- if (req->l_export->exp_flock_hash != NULL &&
- !hlist_unhashed(&req->l_exp_flock_hash))
- cfs_hash_del(req->l_export->exp_flock_hash,
- &req->l_policy_data.l_flock.owner,
- &req->l_exp_flock_hash);
-}
-
-static inline void
-ldlm_flock_destroy(struct ldlm_lock *lock, ldlm_mode_t mode, __u64 flags)
-{
- LDLM_DEBUG(lock, "ldlm_flock_destroy(mode: %d, flags: 0x%llx)",
- mode, flags);
-
- /* Safe to not lock here, since it should be empty anyway */
- LASSERT(hlist_unhashed(&lock->l_exp_flock_hash));
-
- list_del_init(&lock->l_res_link);
- if (flags == LDLM_FL_WAIT_NOREPROC &&
- !(lock->l_flags & LDLM_FL_FAILED)) {
- /* client side - set a flag to prevent sending a CANCEL */
- lock->l_flags |= LDLM_FL_LOCAL_ONLY | LDLM_FL_CBPENDING;
-
- /* when reaching here, it is under lock_res_and_lock(). Thus,
- need call the nolock version of ldlm_lock_decref_internal*/
- ldlm_lock_decref_internal_nolock(lock, mode);
- }
-
- ldlm_lock_destroy_nolock(lock);
-}
-
-/**
- * POSIX locks deadlock detection code.
- *
- * Given a new lock \a req and an existing lock \a bl_lock it conflicts
- * with, we need to iterate through all blocked POSIX locks for this
- * export and see if there is a deadlock condition arising. (i.e. when
- * one client holds a lock on something and want a lock on something
- * else and at the same time another client has the opposite situation).
- */
-static int
-ldlm_flock_deadlock(struct ldlm_lock *req, struct ldlm_lock *bl_lock)
-{
- struct obd_export *req_exp = req->l_export;
- struct obd_export *bl_exp = bl_lock->l_export;
- __u64 req_owner = req->l_policy_data.l_flock.owner;
- __u64 bl_owner = bl_lock->l_policy_data.l_flock.owner;
-
- /* For server only */
- if (req_exp == NULL)
- return 0;
-
- class_export_get(bl_exp);
- while (1) {
- struct obd_export *bl_exp_new;
- struct ldlm_lock *lock = NULL;
- struct ldlm_flock *flock;
-
- if (bl_exp->exp_flock_hash != NULL)
- lock = cfs_hash_lookup(bl_exp->exp_flock_hash,
- &bl_owner);
- if (lock == NULL)
- break;
-
- LASSERT(req != lock);
- flock = &lock->l_policy_data.l_flock;
- LASSERT(flock->owner == bl_owner);
- bl_owner = flock->blocking_owner;
- bl_exp_new = class_export_get(flock->blocking_export);
- class_export_put(bl_exp);
-
- cfs_hash_put(bl_exp->exp_flock_hash, &lock->l_exp_flock_hash);
- bl_exp = bl_exp_new;
-
- if (bl_owner == req_owner && bl_exp == req_exp) {
- class_export_put(bl_exp);
- return 1;
- }
- }
- class_export_put(bl_exp);
-
- return 0;
-}
-
-static void ldlm_flock_cancel_on_deadlock(struct ldlm_lock *lock,
- struct list_head *work_list)
-{
- CDEBUG(D_INFO, "reprocess deadlock req=%p\n", lock);
-
- if ((exp_connect_flags(lock->l_export) &
- OBD_CONNECT_FLOCK_DEAD) == 0) {
- CERROR(
- "deadlock found, but client doesn't support flock canceliation\n");
- } else {
- LASSERT(lock->l_completion_ast);
- LASSERT((lock->l_flags & LDLM_FL_AST_SENT) == 0);
- lock->l_flags |= LDLM_FL_AST_SENT | LDLM_FL_CANCEL_ON_BLOCK |
- LDLM_FL_FLOCK_DEADLOCK;
- ldlm_flock_blocking_unlink(lock);
- ldlm_resource_unlink_lock(lock);
- ldlm_add_ast_work_item(lock, NULL, work_list);
- }
-}
-
-/**
- * Process a granting attempt for flock lock.
- * Must be called under ns lock held.
- *
- * This function looks for any conflicts for \a lock in the granted or
- * waiting queues. The lock is granted if no conflicts are found in
- * either queue.
- *
- * It is also responsible for splitting a lock if a portion of the lock
- * is released.
- *
- * If \a first_enq is 0 (ie, called from ldlm_reprocess_queue):
- * - blocking ASTs have already been sent
- *
- * If \a first_enq is 1 (ie, called from ldlm_lock_enqueue):
- * - blocking ASTs have not been sent yet, so list of conflicting locks
- * would be collected and ASTs sent.
- */
-int
-ldlm_process_flock_lock(struct ldlm_lock *req, __u64 *flags, int first_enq,
- ldlm_error_t *err, struct list_head *work_list)
-{
- struct ldlm_resource *res = req->l_resource;
- struct ldlm_namespace *ns = ldlm_res_to_ns(res);
- struct list_head *tmp;
- struct list_head *ownlocks = NULL;
- struct ldlm_lock *lock = NULL;
- struct ldlm_lock *new = req;
- struct ldlm_lock *new2 = NULL;
- ldlm_mode_t mode = req->l_req_mode;
- int local = ns_is_client(ns);
- int added = (mode == LCK_NL);
- int overlaps = 0;
- int splitted = 0;
- const struct ldlm_callback_suite null_cbs = { NULL };
-
- CDEBUG(D_DLMTRACE,
- "flags %#llx owner %llu pid %u mode %u start %llu end %llu\n",
- *flags, new->l_policy_data.l_flock.owner,
- new->l_policy_data.l_flock.pid, mode,
- req->l_policy_data.l_flock.start,
- req->l_policy_data.l_flock.end);
-
- *err = ELDLM_OK;
-
- if (local) {
- /* No blocking ASTs are sent to the clients for
- * Posix file & record locks */
- req->l_blocking_ast = NULL;
- } else {
- /* Called on the server for lock cancels. */
- req->l_blocking_ast = ldlm_flock_blocking_ast;
- }
-
-reprocess:
- if ((*flags == LDLM_FL_WAIT_NOREPROC) || (mode == LCK_NL)) {
- /* This loop determines where this processes locks start
- * in the resource lr_granted list. */
- list_for_each(tmp, &res->lr_granted) {
- lock = list_entry(tmp, struct ldlm_lock,
- l_res_link);
- if (ldlm_same_flock_owner(lock, req)) {
- ownlocks = tmp;
- break;
- }
- }
- } else {
- int reprocess_failed = 0;
-
- lockmode_verify(mode);
-
- /* This loop determines if there are existing locks
- * that conflict with the new lock request. */
- list_for_each(tmp, &res->lr_granted) {
- lock = list_entry(tmp, struct ldlm_lock,
- l_res_link);
-
- if (ldlm_same_flock_owner(lock, req)) {
- if (!ownlocks)
- ownlocks = tmp;
- continue;
- }
-
- /* locks are compatible, overlap doesn't matter */
- if (lockmode_compat(lock->l_granted_mode, mode))
- continue;
-
- if (!ldlm_flocks_overlap(lock, req))
- continue;
-
- if (!first_enq) {
- reprocess_failed = 1;
- if (ldlm_flock_deadlock(req, lock)) {
- ldlm_flock_cancel_on_deadlock(req,
- work_list);
- return LDLM_ITER_CONTINUE;
- }
- continue;
- }
-
- if (*flags & LDLM_FL_BLOCK_NOWAIT) {
- ldlm_flock_destroy(req, mode, *flags);
- *err = -EAGAIN;
- return LDLM_ITER_STOP;
- }
-
- if (*flags & LDLM_FL_TEST_LOCK) {
- ldlm_flock_destroy(req, mode, *flags);
- req->l_req_mode = lock->l_granted_mode;
- req->l_policy_data.l_flock.pid =
- lock->l_policy_data.l_flock.pid;
- req->l_policy_data.l_flock.start =
- lock->l_policy_data.l_flock.start;
- req->l_policy_data.l_flock.end =
- lock->l_policy_data.l_flock.end;
- *flags |= LDLM_FL_LOCK_CHANGED;
- return LDLM_ITER_STOP;
- }
-
- /* add lock to blocking list before deadlock
- * check to prevent race */
- ldlm_flock_blocking_link(req, lock);
-
- if (ldlm_flock_deadlock(req, lock)) {
- ldlm_flock_blocking_unlink(req);
- ldlm_flock_destroy(req, mode, *flags);
- *err = -EDEADLK;
- return LDLM_ITER_STOP;
- }
-
- ldlm_resource_add_lock(res, &res->lr_waiting, req);
- *flags |= LDLM_FL_BLOCK_GRANTED;
- return LDLM_ITER_STOP;
- }
- if (reprocess_failed)
- return LDLM_ITER_CONTINUE;
- }
-
- if (*flags & LDLM_FL_TEST_LOCK) {
- ldlm_flock_destroy(req, mode, *flags);
- req->l_req_mode = LCK_NL;
- *flags |= LDLM_FL_LOCK_CHANGED;
- return LDLM_ITER_STOP;
- }
-
- /* In case we had slept on this lock request take it off of the
- * deadlock detection hash list. */
- ldlm_flock_blocking_unlink(req);
-
- /* Scan the locks owned by this process that overlap this request.
- * We may have to merge or split existing locks. */
-
- if (!ownlocks)
- ownlocks = &res->lr_granted;
-
- list_for_remaining_safe(ownlocks, tmp, &res->lr_granted) {
- lock = list_entry(ownlocks, struct ldlm_lock, l_res_link);
-
- if (!ldlm_same_flock_owner(lock, new))
- break;
-
- if (lock->l_granted_mode == mode) {
- /* If the modes are the same then we need to process
- * locks that overlap OR adjoin the new lock. The extra
- * logic condition is necessary to deal with arithmetic
- * overflow and underflow. */
- if ((new->l_policy_data.l_flock.start >
- (lock->l_policy_data.l_flock.end + 1))
- && (lock->l_policy_data.l_flock.end !=
- OBD_OBJECT_EOF))
- continue;
-
- if ((new->l_policy_data.l_flock.end <
- (lock->l_policy_data.l_flock.start - 1))
- && (lock->l_policy_data.l_flock.start != 0))
- break;
-
- if (new->l_policy_data.l_flock.start <
- lock->l_policy_data.l_flock.start) {
- lock->l_policy_data.l_flock.start =
- new->l_policy_data.l_flock.start;
- } else {
- new->l_policy_data.l_flock.start =
- lock->l_policy_data.l_flock.start;
- }
-
- if (new->l_policy_data.l_flock.end >
- lock->l_policy_data.l_flock.end) {
- lock->l_policy_data.l_flock.end =
- new->l_policy_data.l_flock.end;
- } else {
- new->l_policy_data.l_flock.end =
- lock->l_policy_data.l_flock.end;
- }
-
- if (added) {
- ldlm_flock_destroy(lock, mode, *flags);
- } else {
- new = lock;
- added = 1;
- }
- continue;
- }
-
- if (new->l_policy_data.l_flock.start >
- lock->l_policy_data.l_flock.end)
- continue;
-
- if (new->l_policy_data.l_flock.end <
- lock->l_policy_data.l_flock.start)
- break;
-
- ++overlaps;
-
- if (new->l_policy_data.l_flock.start <=
- lock->l_policy_data.l_flock.start) {
- if (new->l_policy_data.l_flock.end <
- lock->l_policy_data.l_flock.end) {
- lock->l_policy_data.l_flock.start =
- new->l_policy_data.l_flock.end + 1;
- break;
- }
- ldlm_flock_destroy(lock, lock->l_req_mode, *flags);
- continue;
- }
- if (new->l_policy_data.l_flock.end >=
- lock->l_policy_data.l_flock.end) {
- lock->l_policy_data.l_flock.end =
- new->l_policy_data.l_flock.start - 1;
- continue;
- }
-
- /* split the existing lock into two locks */
-
- /* if this is an F_UNLCK operation then we could avoid
- * allocating a new lock and use the req lock passed in
- * with the request but this would complicate the reply
- * processing since updates to req get reflected in the
- * reply. The client side replays the lock request so
- * it must see the original lock data in the reply. */
-
- /* XXX - if ldlm_lock_new() can sleep we should
- * release the lr_lock, allocate the new lock,
- * and restart processing this lock. */
- if (!new2) {
- unlock_res_and_lock(req);
- new2 = ldlm_lock_create(ns, &res->lr_name, LDLM_FLOCK,
- lock->l_granted_mode, &null_cbs,
- NULL, 0, LVB_T_NONE);
- lock_res_and_lock(req);
- if (!new2) {
- ldlm_flock_destroy(req, lock->l_granted_mode,
- *flags);
- *err = -ENOLCK;
- return LDLM_ITER_STOP;
- }
- goto reprocess;
- }
-
- splitted = 1;
-
- new2->l_granted_mode = lock->l_granted_mode;
- new2->l_policy_data.l_flock.pid =
- new->l_policy_data.l_flock.pid;
- new2->l_policy_data.l_flock.owner =
- new->l_policy_data.l_flock.owner;
- new2->l_policy_data.l_flock.start =
- lock->l_policy_data.l_flock.start;
- new2->l_policy_data.l_flock.end =
- new->l_policy_data.l_flock.start - 1;
- lock->l_policy_data.l_flock.start =
- new->l_policy_data.l_flock.end + 1;
- new2->l_conn_export = lock->l_conn_export;
- if (lock->l_export != NULL) {
- new2->l_export = class_export_lock_get(lock->l_export,
- new2);
- if (new2->l_export->exp_lock_hash &&
- hlist_unhashed(&new2->l_exp_hash))
- cfs_hash_add(new2->l_export->exp_lock_hash,
- &new2->l_remote_handle,
- &new2->l_exp_hash);
- }
- if (*flags == LDLM_FL_WAIT_NOREPROC)
- ldlm_lock_addref_internal_nolock(new2,
- lock->l_granted_mode);
-
- /* insert new2 at lock */
- ldlm_resource_add_lock(res, ownlocks, new2);
- LDLM_LOCK_RELEASE(new2);
- break;
- }
-
- /* if new2 is created but never used, destroy it*/
- if (splitted == 0 && new2 != NULL)
- ldlm_lock_destroy_nolock(new2);
-
- /* At this point we're granting the lock request. */
- req->l_granted_mode = req->l_req_mode;
-
- /* Add req to the granted queue before calling ldlm_reprocess_all(). */
- if (!added) {
- list_del_init(&req->l_res_link);
- /* insert new lock before ownlocks in list. */
- ldlm_resource_add_lock(res, ownlocks, req);
- }
-
- if (*flags != LDLM_FL_WAIT_NOREPROC) {
- /* The only one possible case for client-side calls flock
- * policy function is ldlm_flock_completion_ast inside which
- * carries LDLM_FL_WAIT_NOREPROC flag. */
- CERROR("Illegal parameter for client-side-only module.\n");
- LBUG();
- }
-
- /* In case we're reprocessing the requested lock we can't destroy
- * it until after calling ldlm_add_ast_work_item() above so that laawi()
- * can bump the reference count on \a req. Otherwise \a req
- * could be freed before the completion AST can be sent. */
- if (added)
- ldlm_flock_destroy(req, mode, *flags);
-
- ldlm_resource_dump(D_INFO, res);
- return LDLM_ITER_CONTINUE;
-}
-
-struct ldlm_flock_wait_data {
- struct ldlm_lock *fwd_lock;
- int fwd_generation;
-};
-
-static void
-ldlm_flock_interrupted_wait(void *data)
-{
- struct ldlm_lock *lock;
-
- lock = ((struct ldlm_flock_wait_data *)data)->fwd_lock;
-
- /* take lock off the deadlock detection hash list. */
- lock_res_and_lock(lock);
- ldlm_flock_blocking_unlink(lock);
-
- /* client side - set flag to prevent lock from being put on LRU list */
- lock->l_flags |= LDLM_FL_CBPENDING;
- unlock_res_and_lock(lock);
-}
-
-/**
- * Flock completion callback function.
- *
- * \param lock [in,out]: A lock to be handled
- * \param flags [in]: flags
- * \param *data [in]: ldlm_work_cp_ast_lock() will use ldlm_cb_set_arg
- *
- * \retval 0 : success
- * \retval <0 : failure
- */
-int
-ldlm_flock_completion_ast(struct ldlm_lock *lock, __u64 flags, void *data)
-{
- struct file_lock *getlk = lock->l_ast_data;
- struct obd_device *obd;
- struct obd_import *imp = NULL;
- struct ldlm_flock_wait_data fwd;
- struct l_wait_info lwi;
- ldlm_error_t err;
- int rc = 0;
-
- CDEBUG(D_DLMTRACE, "flags: 0x%llx data: %p getlk: %p\n",
- flags, data, getlk);
-
- /* Import invalidation. We need to actually release the lock
- * references being held, so that it can go away. No point in
- * holding the lock even if app still believes it has it, since
- * server already dropped it anyway. Only for granted locks too. */
- if ((lock->l_flags & (LDLM_FL_FAILED|LDLM_FL_LOCAL_ONLY)) ==
- (LDLM_FL_FAILED|LDLM_FL_LOCAL_ONLY)) {
- if (lock->l_req_mode == lock->l_granted_mode &&
- lock->l_granted_mode != LCK_NL &&
- data == NULL)
- ldlm_lock_decref_internal(lock, lock->l_req_mode);
-
- /* Need to wake up the waiter if we were evicted */
- wake_up(&lock->l_waitq);
- return 0;
- }
-
- LASSERT(flags != LDLM_FL_WAIT_NOREPROC);
-
- if (!(flags & (LDLM_FL_BLOCK_WAIT | LDLM_FL_BLOCK_GRANTED |
- LDLM_FL_BLOCK_CONV))) {
- if (data == NULL)
- /* mds granted the lock in the reply */
- goto granted;
- /* CP AST RPC: lock get granted, wake it up */
- wake_up(&lock->l_waitq);
- return 0;
- }
-
- LDLM_DEBUG(lock, "client-side enqueue returned a blocked lock, sleeping");
- fwd.fwd_lock = lock;
- obd = class_exp2obd(lock->l_conn_export);
-
- /* if this is a local lock, there is no import */
- if (obd != NULL)
- imp = obd->u.cli.cl_import;
-
- if (imp != NULL) {
- spin_lock(&imp->imp_lock);
- fwd.fwd_generation = imp->imp_generation;
- spin_unlock(&imp->imp_lock);
- }
-
- lwi = LWI_TIMEOUT_INTR(0, NULL, ldlm_flock_interrupted_wait, &fwd);
-
- /* Go to sleep until the lock is granted. */
- rc = l_wait_event(lock->l_waitq, is_granted_or_cancelled(lock), &lwi);
-
- if (rc) {
- LDLM_DEBUG(lock, "client-side enqueue waking up: failed (%d)",
- rc);
- return rc;
- }
-
-granted:
- OBD_FAIL_TIMEOUT(OBD_FAIL_LDLM_CP_CB_WAIT, 10);
-
- if (lock->l_flags & LDLM_FL_DESTROYED) {
- LDLM_DEBUG(lock, "client-side enqueue waking up: destroyed");
- return 0;
- }
-
- if (lock->l_flags & LDLM_FL_FAILED) {
- LDLM_DEBUG(lock, "client-side enqueue waking up: failed");
- return -EIO;
- }
-
- if (rc) {
- LDLM_DEBUG(lock, "client-side enqueue waking up: failed (%d)",
- rc);
- return rc;
- }
-
- LDLM_DEBUG(lock, "client-side enqueue granted");
-
- lock_res_and_lock(lock);
-
- /* take lock off the deadlock detection hash list. */
- ldlm_flock_blocking_unlink(lock);
-
- /* ldlm_lock_enqueue() has already placed lock on the granted list. */
- list_del_init(&lock->l_res_link);
-
- if (lock->l_flags & LDLM_FL_FLOCK_DEADLOCK) {
- LDLM_DEBUG(lock, "client-side enqueue deadlock received");
- rc = -EDEADLK;
- } else if (flags & LDLM_FL_TEST_LOCK) {
- /* fcntl(F_GETLK) request */
- /* The old mode was saved in getlk->fl_type so that if the mode
- * in the lock changes we can decref the appropriate refcount.*/
- ldlm_flock_destroy(lock, getlk->fl_type, LDLM_FL_WAIT_NOREPROC);
- switch (lock->l_granted_mode) {
- case LCK_PR:
- getlk->fl_type = F_RDLCK;
- break;
- case LCK_PW:
- getlk->fl_type = F_WRLCK;
- break;
- default:
- getlk->fl_type = F_UNLCK;
- }
- getlk->fl_pid = (pid_t)lock->l_policy_data.l_flock.pid;
- getlk->fl_start = (loff_t)lock->l_policy_data.l_flock.start;
- getlk->fl_end = (loff_t)lock->l_policy_data.l_flock.end;
- } else {
- __u64 noreproc = LDLM_FL_WAIT_NOREPROC;
-
- /* We need to reprocess the lock to do merges or splits
- * with existing locks owned by this process. */
- ldlm_process_flock_lock(lock, &noreproc, 1, &err, NULL);
- }
- unlock_res_and_lock(lock);
- return rc;
-}
-EXPORT_SYMBOL(ldlm_flock_completion_ast);
-
-int ldlm_flock_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc,
- void *data, int flag)
-{
- LASSERT(lock);
- LASSERT(flag == LDLM_CB_CANCELING);
-
- /* take lock off the deadlock detection hash list. */
- lock_res_and_lock(lock);
- ldlm_flock_blocking_unlink(lock);
- unlock_res_and_lock(lock);
- return 0;
-}
-
-void ldlm_flock_policy_wire18_to_local(const ldlm_wire_policy_data_t *wpolicy,
- ldlm_policy_data_t *lpolicy)
-{
- memset(lpolicy, 0, sizeof(*lpolicy));
- lpolicy->l_flock.start = wpolicy->l_flock.lfw_start;
- lpolicy->l_flock.end = wpolicy->l_flock.lfw_end;
- lpolicy->l_flock.pid = wpolicy->l_flock.lfw_pid;
- /* Compat code, old clients had no idea about owner field and
- * relied solely on pid for ownership. Introduced in LU-104, 2.1,
- * April 2011 */
- lpolicy->l_flock.owner = wpolicy->l_flock.lfw_pid;
-}
-
-
-void ldlm_flock_policy_wire21_to_local(const ldlm_wire_policy_data_t *wpolicy,
- ldlm_policy_data_t *lpolicy)
-{
- memset(lpolicy, 0, sizeof(*lpolicy));
- lpolicy->l_flock.start = wpolicy->l_flock.lfw_start;
- lpolicy->l_flock.end = wpolicy->l_flock.lfw_end;
- lpolicy->l_flock.pid = wpolicy->l_flock.lfw_pid;
- lpolicy->l_flock.owner = wpolicy->l_flock.lfw_owner;
-}
-
-void ldlm_flock_policy_local_to_wire(const ldlm_policy_data_t *lpolicy,
- ldlm_wire_policy_data_t *wpolicy)
-{
- memset(wpolicy, 0, sizeof(*wpolicy));
- wpolicy->l_flock.lfw_start = lpolicy->l_flock.start;
- wpolicy->l_flock.lfw_end = lpolicy->l_flock.end;
- wpolicy->l_flock.lfw_pid = lpolicy->l_flock.pid;
- wpolicy->l_flock.lfw_owner = lpolicy->l_flock.owner;
-}
-
-/*
- * Export handle<->flock hash operations.
- */
-static unsigned
-ldlm_export_flock_hash(struct cfs_hash *hs, const void *key, unsigned mask)
-{
- return cfs_hash_u64_hash(*(__u64 *)key, mask);
-}
-
-static void *
-ldlm_export_flock_key(struct hlist_node *hnode)
-{
- struct ldlm_lock *lock;
-
- lock = hlist_entry(hnode, struct ldlm_lock, l_exp_flock_hash);
- return &lock->l_policy_data.l_flock.owner;
-}
-
-static int
-ldlm_export_flock_keycmp(const void *key, struct hlist_node *hnode)
-{
- return !memcmp(ldlm_export_flock_key(hnode), key, sizeof(__u64));
-}
-
-static void *
-ldlm_export_flock_object(struct hlist_node *hnode)
-{
- return hlist_entry(hnode, struct ldlm_lock, l_exp_flock_hash);
-}
-
-static void
-ldlm_export_flock_get(struct cfs_hash *hs, struct hlist_node *hnode)
-{
- struct ldlm_lock *lock;
- struct ldlm_flock *flock;
-
- lock = hlist_entry(hnode, struct ldlm_lock, l_exp_flock_hash);
- LDLM_LOCK_GET(lock);
-
- flock = &lock->l_policy_data.l_flock;
- LASSERT(flock->blocking_export != NULL);
- class_export_get(flock->blocking_export);
- flock->blocking_refs++;
-}
-
-static void
-ldlm_export_flock_put(struct cfs_hash *hs, struct hlist_node *hnode)
-{
- struct ldlm_lock *lock;
- struct ldlm_flock *flock;
-
- lock = hlist_entry(hnode, struct ldlm_lock, l_exp_flock_hash);
- LDLM_LOCK_RELEASE(lock);
-
- flock = &lock->l_policy_data.l_flock;
- LASSERT(flock->blocking_export != NULL);
- class_export_put(flock->blocking_export);
- if (--flock->blocking_refs == 0) {
- flock->blocking_owner = 0;
- flock->blocking_export = NULL;
- }
-}
-
-static cfs_hash_ops_t ldlm_export_flock_ops = {
- .hs_hash = ldlm_export_flock_hash,
- .hs_key = ldlm_export_flock_key,
- .hs_keycmp = ldlm_export_flock_keycmp,
- .hs_object = ldlm_export_flock_object,
- .hs_get = ldlm_export_flock_get,
- .hs_put = ldlm_export_flock_put,
- .hs_put_locked = ldlm_export_flock_put,
-};
-
-int ldlm_init_flock_export(struct obd_export *exp)
-{
- if (strcmp(exp->exp_obd->obd_type->typ_name, LUSTRE_MDT_NAME) != 0)
- return 0;
-
- exp->exp_flock_hash =
- cfs_hash_create(obd_uuid2str(&exp->exp_client_uuid),
- HASH_EXP_LOCK_CUR_BITS,
- HASH_EXP_LOCK_MAX_BITS,
- HASH_EXP_LOCK_BKT_BITS, 0,
- CFS_HASH_MIN_THETA, CFS_HASH_MAX_THETA,
- &ldlm_export_flock_ops,
- CFS_HASH_DEFAULT | CFS_HASH_NBLK_CHANGE);
- if (!exp->exp_flock_hash)
- return -ENOMEM;
-
- return 0;
-}
-EXPORT_SYMBOL(ldlm_init_flock_export);
-
-void ldlm_destroy_flock_export(struct obd_export *exp)
-{
- if (exp->exp_flock_hash) {
- cfs_hash_putref(exp->exp_flock_hash);
- exp->exp_flock_hash = NULL;
- }
-}
-EXPORT_SYMBOL(ldlm_destroy_flock_export);
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_inodebits.c b/drivers/staging/lustre/lustre/ldlm/ldlm_inodebits.c
deleted file mode 100644
index 40d3338506ae..000000000000
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_inodebits.c
+++ /dev/null
@@ -1,74 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * lustre/ldlm/ldlm_inodebits.c
- *
- * Author: Peter Braam <braam@clusterfs.com>
- * Author: Phil Schwan <phil@clusterfs.com>
- */
-
-/**
- * This file contains implementation of IBITS lock type
- *
- * IBITS lock type contains a bit mask determining various properties of an
- * object. The meanings of specific bits are specific to the caller and are
- * opaque to LDLM code.
- *
- * Locks with intersecting bitmasks and conflicting lock modes (e.g. LCK_PW)
- * are considered conflicting. See the lock mode compatibility matrix
- * in lustre_dlm.h.
- */
-
-#define DEBUG_SUBSYSTEM S_LDLM
-
-#include "../include/lustre_dlm.h"
-#include "../include/obd_support.h"
-#include "../include/lustre_lib.h"
-#include "ldlm_internal.h"
-
-
-void ldlm_ibits_policy_wire_to_local(const ldlm_wire_policy_data_t *wpolicy,
- ldlm_policy_data_t *lpolicy)
-{
- memset(lpolicy, 0, sizeof(*lpolicy));
- lpolicy->l_inodebits.bits = wpolicy->l_inodebits.bits;
-}
-
-void ldlm_ibits_policy_local_to_wire(const ldlm_policy_data_t *lpolicy,
- ldlm_wire_policy_data_t *wpolicy)
-{
- memset(wpolicy, 0, sizeof(*wpolicy));
- wpolicy->l_inodebits.bits = lpolicy->l_inodebits.bits;
-}
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_internal.h b/drivers/staging/lustre/lustre/ldlm/ldlm_internal.h
deleted file mode 100644
index fa4b7c760d49..000000000000
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_internal.h
+++ /dev/null
@@ -1,360 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- */
-
-#define MAX_STRING_SIZE 128
-
-extern int ldlm_srv_namespace_nr;
-extern int ldlm_cli_namespace_nr;
-extern struct mutex ldlm_srv_namespace_lock;
-extern struct list_head ldlm_srv_namespace_list;
-extern struct mutex ldlm_cli_namespace_lock;
-extern struct list_head ldlm_cli_active_namespace_list;
-extern struct list_head ldlm_cli_inactive_namespace_list;
-
-static inline int ldlm_namespace_nr_read(ldlm_side_t client)
-{
- return client == LDLM_NAMESPACE_SERVER ?
- ldlm_srv_namespace_nr : ldlm_cli_namespace_nr;
-}
-
-static inline void ldlm_namespace_nr_inc(ldlm_side_t client)
-{
- if (client == LDLM_NAMESPACE_SERVER)
- ldlm_srv_namespace_nr++;
- else
- ldlm_cli_namespace_nr++;
-}
-
-static inline void ldlm_namespace_nr_dec(ldlm_side_t client)
-{
- if (client == LDLM_NAMESPACE_SERVER)
- ldlm_srv_namespace_nr--;
- else
- ldlm_cli_namespace_nr--;
-}
-
-static inline struct list_head *ldlm_namespace_list(ldlm_side_t client)
-{
- return client == LDLM_NAMESPACE_SERVER ?
- &ldlm_srv_namespace_list : &ldlm_cli_active_namespace_list;
-}
-
-static inline struct list_head *ldlm_namespace_inactive_list(ldlm_side_t client)
-{
- return client == LDLM_NAMESPACE_SERVER ?
- &ldlm_srv_namespace_list : &ldlm_cli_inactive_namespace_list;
-}
-
-static inline struct mutex *ldlm_namespace_lock(ldlm_side_t client)
-{
- return client == LDLM_NAMESPACE_SERVER ?
- &ldlm_srv_namespace_lock : &ldlm_cli_namespace_lock;
-}
-
-/* ns_bref is the number of resources in this namespace */
-static inline int ldlm_ns_empty(struct ldlm_namespace *ns)
-{
- return atomic_read(&ns->ns_bref) == 0;
-}
-
-void ldlm_namespace_move_to_active_locked(struct ldlm_namespace *, ldlm_side_t);
-void ldlm_namespace_move_to_inactive_locked(struct ldlm_namespace *,
- ldlm_side_t);
-struct ldlm_namespace *ldlm_namespace_first_locked(ldlm_side_t);
-
-/* ldlm_request.c */
-/* Cancel lru flag, it indicates we cancel aged locks. */
-enum {
- LDLM_CANCEL_AGED = 1 << 0, /* Cancel aged locks (non lru resize). */
- LDLM_CANCEL_PASSED = 1 << 1, /* Cancel passed number of locks. */
- LDLM_CANCEL_SHRINK = 1 << 2, /* Cancel locks from shrinker. */
- LDLM_CANCEL_LRUR = 1 << 3, /* Cancel locks from lru resize. */
- LDLM_CANCEL_NO_WAIT = 1 << 4 /* Cancel locks w/o blocking (neither
- * sending nor waiting for any rpcs) */
-};
-
-int ldlm_cancel_lru(struct ldlm_namespace *ns, int nr,
- ldlm_cancel_flags_t sync, int flags);
-int ldlm_cancel_lru_local(struct ldlm_namespace *ns,
- struct list_head *cancels, int count, int max,
- ldlm_cancel_flags_t cancel_flags, int flags);
-extern int ldlm_enqueue_min;
-int ldlm_get_enq_timeout(struct ldlm_lock *lock);
-
-/* ldlm_resource.c */
-int ldlm_resource_putref_locked(struct ldlm_resource *res);
-void ldlm_resource_insert_lock_after(struct ldlm_lock *original,
- struct ldlm_lock *new);
-void ldlm_namespace_free_prior(struct ldlm_namespace *ns,
- struct obd_import *imp, int force);
-void ldlm_namespace_free_post(struct ldlm_namespace *ns);
-/* ldlm_lock.c */
-
-struct ldlm_cb_set_arg {
- struct ptlrpc_request_set *set;
- int type; /* LDLM_{CP,BL,GL}_CALLBACK */
- atomic_t restart;
- struct list_head *list;
- union ldlm_gl_desc *gl_desc; /* glimpse AST descriptor */
-};
-
-enum ldlm_desc_ast_t {
- LDLM_WORK_BL_AST,
- LDLM_WORK_CP_AST,
- LDLM_WORK_REVOKE_AST,
- LDLM_WORK_GL_AST
-};
-
-void ldlm_grant_lock(struct ldlm_lock *lock, struct list_head *work_list);
-int ldlm_fill_lvb(struct ldlm_lock *lock, struct req_capsule *pill,
- enum req_location loc, void *data, int size);
-struct ldlm_lock *
-ldlm_lock_create(struct ldlm_namespace *ns, const struct ldlm_res_id *,
- ldlm_type_t type, ldlm_mode_t,
- const struct ldlm_callback_suite *cbs,
- void *data, __u32 lvb_len, enum lvb_type lvb_type);
-ldlm_error_t ldlm_lock_enqueue(struct ldlm_namespace *, struct ldlm_lock **,
- void *cookie, __u64 *flags);
-void ldlm_lock_addref_internal(struct ldlm_lock *, __u32 mode);
-void ldlm_lock_addref_internal_nolock(struct ldlm_lock *, __u32 mode);
-void ldlm_lock_decref_internal(struct ldlm_lock *, __u32 mode);
-void ldlm_lock_decref_internal_nolock(struct ldlm_lock *, __u32 mode);
-void ldlm_add_ast_work_item(struct ldlm_lock *lock, struct ldlm_lock *new,
- struct list_head *work_list);
-int ldlm_run_ast_work(struct ldlm_namespace *ns, struct list_head *rpc_list,
- enum ldlm_desc_ast_t ast_type);
-int ldlm_work_gl_ast_lock(struct ptlrpc_request_set *rqset, void *opaq);
-int ldlm_lock_remove_from_lru(struct ldlm_lock *lock);
-int ldlm_lock_remove_from_lru_nolock(struct ldlm_lock *lock);
-void ldlm_lock_add_to_lru_nolock(struct ldlm_lock *lock);
-void ldlm_lock_add_to_lru(struct ldlm_lock *lock);
-void ldlm_lock_touch_in_lru(struct ldlm_lock *lock);
-void ldlm_lock_destroy_nolock(struct ldlm_lock *lock);
-
-void ldlm_cancel_locks_for_export(struct obd_export *export);
-
-/* ldlm_lockd.c */
-int ldlm_bl_to_thread_lock(struct ldlm_namespace *ns, struct ldlm_lock_desc *ld,
- struct ldlm_lock *lock);
-int ldlm_bl_to_thread_list(struct ldlm_namespace *ns,
- struct ldlm_lock_desc *ld,
- struct list_head *cancels, int count,
- ldlm_cancel_flags_t cancel_flags);
-
-void ldlm_handle_bl_callback(struct ldlm_namespace *ns,
- struct ldlm_lock_desc *ld, struct ldlm_lock *lock);
-
-extern struct kmem_cache *ldlm_resource_slab;
-
-/* ldlm_lockd.c & ldlm_lock.c */
-extern struct kmem_cache *ldlm_lock_slab;
-
-/* ldlm_extent.c */
-void ldlm_extent_add_lock(struct ldlm_resource *res, struct ldlm_lock *lock);
-void ldlm_extent_unlink_lock(struct ldlm_lock *lock);
-
-/* ldlm_flock.c */
-int ldlm_process_flock_lock(struct ldlm_lock *req, __u64 *flags,
- int first_enq, ldlm_error_t *err,
- struct list_head *work_list);
-int ldlm_init_flock_export(struct obd_export *exp);
-void ldlm_destroy_flock_export(struct obd_export *exp);
-
-/* l_lock.c */
-void l_check_ns_lock(struct ldlm_namespace *ns);
-void l_check_no_ns_lock(struct ldlm_namespace *ns);
-
-extern struct dentry *ldlm_svc_debugfs_dir;
-
-struct ldlm_state {
- struct ptlrpc_service *ldlm_cb_service;
- struct ptlrpc_service *ldlm_cancel_service;
- struct ptlrpc_client *ldlm_client;
- struct ptlrpc_connection *ldlm_server_conn;
- struct ldlm_bl_pool *ldlm_bl_pool;
-};
-
-/* interval tree, for LDLM_EXTENT. */
-extern struct kmem_cache *ldlm_interval_slab; /* slab cache for ldlm_interval */
-void ldlm_interval_attach(struct ldlm_interval *n, struct ldlm_lock *l);
-struct ldlm_interval *ldlm_interval_detach(struct ldlm_lock *l);
-struct ldlm_interval *ldlm_interval_alloc(struct ldlm_lock *lock);
-void ldlm_interval_free(struct ldlm_interval *node);
-/* this function must be called with res lock held */
-static inline struct ldlm_extent *
-ldlm_interval_extent(struct ldlm_interval *node)
-{
- struct ldlm_lock *lock;
-
- LASSERT(!list_empty(&node->li_group));
-
- lock = list_entry(node->li_group.next, struct ldlm_lock,
- l_sl_policy);
- return &lock->l_policy_data.l_extent;
-}
-
-int ldlm_init(void);
-void ldlm_exit(void);
-
-enum ldlm_policy_res {
- LDLM_POLICY_CANCEL_LOCK,
- LDLM_POLICY_KEEP_LOCK,
- LDLM_POLICY_SKIP_LOCK
-};
-
-typedef enum ldlm_policy_res ldlm_policy_res_t;
-
-#define LDLM_POOL_SYSFS_PRINT_int(v) sprintf(buf, "%d\n", v)
-#define LDLM_POOL_SYSFS_SET_int(a, b) { a = b; }
-#define LDLM_POOL_SYSFS_PRINT_u64(v) sprintf(buf, "%lld\n", v)
-#define LDLM_POOL_SYSFS_SET_u64(a, b) { a = b; }
-#define LDLM_POOL_SYSFS_PRINT_atomic(v) sprintf(buf, "%d\n", atomic_read(&v))
-#define LDLM_POOL_SYSFS_SET_atomic(a, b) atomic_set(&a, b)
-
-#define LDLM_POOL_SYSFS_READER_SHOW(var, type) \
- static ssize_t var##_show(struct kobject *kobj, \
- struct attribute *attr, \
- char *buf) \
- { \
- struct ldlm_pool *pl = container_of(kobj, struct ldlm_pool, \
- pl_kobj); \
- type tmp; \
- \
- spin_lock(&pl->pl_lock); \
- tmp = pl->pl_##var; \
- spin_unlock(&pl->pl_lock); \
- \
- return LDLM_POOL_SYSFS_PRINT_##type(tmp); \
- } \
- struct __##var##__dummy_read {; } /* semicolon catcher */
-
-#define LDLM_POOL_SYSFS_WRITER_STORE(var, type) \
- static ssize_t var##_store(struct kobject *kobj, \
- struct attribute *attr, \
- const char *buffer, \
- size_t count) \
- { \
- struct ldlm_pool *pl = container_of(kobj, struct ldlm_pool, \
- pl_kobj); \
- unsigned long tmp; \
- int rc; \
- \
- rc = kstrtoul(buffer, 10, &tmp); \
- if (rc < 0) { \
- return rc; \
- } \
- \
- spin_lock(&pl->pl_lock); \
- LDLM_POOL_SYSFS_SET_##type(pl->pl_##var, tmp); \
- spin_unlock(&pl->pl_lock); \
- \
- return count; \
- } \
- struct __##var##__dummy_write {; } /* semicolon catcher */
-
-#define LDLM_POOL_SYSFS_READER_NOLOCK_SHOW(var, type) \
- static ssize_t var##_show(struct kobject *kobj, \
- struct attribute *attr, \
- char *buf) \
- { \
- struct ldlm_pool *pl = container_of(kobj, struct ldlm_pool, \
- pl_kobj); \
- \
- return LDLM_POOL_SYSFS_PRINT_##type(pl->pl_##var); \
- } \
- struct __##var##__dummy_read {; } /* semicolon catcher */
-
-#define LDLM_POOL_SYSFS_WRITER_NOLOCK_STORE(var, type) \
- static ssize_t var##_store(struct kobject *kobj, \
- struct attribute *attr, \
- const char *buffer, \
- size_t count) \
- { \
- struct ldlm_pool *pl = container_of(kobj, struct ldlm_pool, \
- pl_kobj); \
- unsigned long tmp; \
- int rc; \
- \
- rc = kstrtoul(buffer, 10, &tmp); \
- if (rc < 0) { \
- return rc; \
- } \
- \
- LDLM_POOL_SYSFS_SET_##type(pl->pl_##var, tmp); \
- \
- return count; \
- } \
- struct __##var##__dummy_write {; } /* semicolon catcher */
-
-static inline int is_granted_or_cancelled(struct ldlm_lock *lock)
-{
- int ret = 0;
-
- lock_res_and_lock(lock);
- if (((lock->l_req_mode == lock->l_granted_mode) &&
- !(lock->l_flags & LDLM_FL_CP_REQD)) ||
- (lock->l_flags & (LDLM_FL_FAILED | LDLM_FL_CANCEL)))
- ret = 1;
- unlock_res_and_lock(lock);
-
- return ret;
-}
-
-typedef void (*ldlm_policy_wire_to_local_t)(const ldlm_wire_policy_data_t *,
- ldlm_policy_data_t *);
-
-typedef void (*ldlm_policy_local_to_wire_t)(const ldlm_policy_data_t *,
- ldlm_wire_policy_data_t *);
-
-void ldlm_plain_policy_wire_to_local(const ldlm_wire_policy_data_t *wpolicy,
- ldlm_policy_data_t *lpolicy);
-void ldlm_plain_policy_local_to_wire(const ldlm_policy_data_t *lpolicy,
- ldlm_wire_policy_data_t *wpolicy);
-void ldlm_ibits_policy_wire_to_local(const ldlm_wire_policy_data_t *wpolicy,
- ldlm_policy_data_t *lpolicy);
-void ldlm_ibits_policy_local_to_wire(const ldlm_policy_data_t *lpolicy,
- ldlm_wire_policy_data_t *wpolicy);
-void ldlm_extent_policy_wire_to_local(const ldlm_wire_policy_data_t *wpolicy,
- ldlm_policy_data_t *lpolicy);
-void ldlm_extent_policy_local_to_wire(const ldlm_policy_data_t *lpolicy,
- ldlm_wire_policy_data_t *wpolicy);
-void ldlm_flock_policy_wire18_to_local(const ldlm_wire_policy_data_t *wpolicy,
- ldlm_policy_data_t *lpolicy);
-void ldlm_flock_policy_wire21_to_local(const ldlm_wire_policy_data_t *wpolicy,
- ldlm_policy_data_t *lpolicy);
-
-void ldlm_flock_policy_local_to_wire(const ldlm_policy_data_t *lpolicy,
- ldlm_wire_policy_data_t *wpolicy);
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_lib.c b/drivers/staging/lustre/lustre/ldlm/ldlm_lib.c
deleted file mode 100644
index 584c4e67d90e..000000000000
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_lib.c
+++ /dev/null
@@ -1,868 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2010, 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- */
-
-/**
- * This file deals with various client/target related logic including recovery.
- *
- * TODO: This code more logically belongs in the ptlrpc module than in ldlm and
- * should be moved.
- */
-
-#define DEBUG_SUBSYSTEM S_LDLM
-
-#include "../../include/linux/libcfs/libcfs.h"
-#include "../include/obd.h"
-#include "../include/obd_class.h"
-#include "../include/lustre_dlm.h"
-#include "../include/lustre_net.h"
-#include "../include/lustre_sec.h"
-#include "ldlm_internal.h"
-
-/* @priority: If non-zero, move the selected connection to the list head.
- * @create: If zero, only search in existing connections.
- */
-static int import_set_conn(struct obd_import *imp, struct obd_uuid *uuid,
- int priority, int create)
-{
- struct ptlrpc_connection *ptlrpc_conn;
- struct obd_import_conn *imp_conn = NULL, *item;
- int rc = 0;
-
- if (!create && !priority) {
- CDEBUG(D_HA, "Nothing to do\n");
- return -EINVAL;
- }
-
- ptlrpc_conn = ptlrpc_uuid_to_connection(uuid);
- if (!ptlrpc_conn) {
- CDEBUG(D_HA, "can't find connection %s\n", uuid->uuid);
- return -ENOENT;
- }
-
- if (create) {
- imp_conn = kzalloc(sizeof(*imp_conn), GFP_NOFS);
- if (!imp_conn) {
- rc = -ENOMEM;
- goto out_put;
- }
- }
-
- spin_lock(&imp->imp_lock);
- list_for_each_entry(item, &imp->imp_conn_list, oic_item) {
- if (obd_uuid_equals(uuid, &item->oic_uuid)) {
- if (priority) {
- list_del(&item->oic_item);
- list_add(&item->oic_item,
- &imp->imp_conn_list);
- item->oic_last_attempt = 0;
- }
- CDEBUG(D_HA, "imp %p@%s: found existing conn %s%s\n",
- imp, imp->imp_obd->obd_name, uuid->uuid,
- (priority ? ", moved to head" : ""));
- spin_unlock(&imp->imp_lock);
- rc = 0;
- goto out_free;
- }
- }
- /* No existing import connection found for \a uuid. */
- if (create) {
- imp_conn->oic_conn = ptlrpc_conn;
- imp_conn->oic_uuid = *uuid;
- imp_conn->oic_last_attempt = 0;
- if (priority)
- list_add(&imp_conn->oic_item, &imp->imp_conn_list);
- else
- list_add_tail(&imp_conn->oic_item,
- &imp->imp_conn_list);
- CDEBUG(D_HA, "imp %p@%s: add connection %s at %s\n",
- imp, imp->imp_obd->obd_name, uuid->uuid,
- (priority ? "head" : "tail"));
- } else {
- spin_unlock(&imp->imp_lock);
- rc = -ENOENT;
- goto out_free;
- }
-
- spin_unlock(&imp->imp_lock);
- return 0;
-out_free:
- kfree(imp_conn);
-out_put:
- ptlrpc_connection_put(ptlrpc_conn);
- return rc;
-}
-
-int import_set_conn_priority(struct obd_import *imp, struct obd_uuid *uuid)
-{
- return import_set_conn(imp, uuid, 1, 0);
-}
-
-int client_import_add_conn(struct obd_import *imp, struct obd_uuid *uuid,
- int priority)
-{
- return import_set_conn(imp, uuid, priority, 1);
-}
-EXPORT_SYMBOL(client_import_add_conn);
-
-int client_import_del_conn(struct obd_import *imp, struct obd_uuid *uuid)
-{
- struct obd_import_conn *imp_conn;
- struct obd_export *dlmexp;
- int rc = -ENOENT;
-
- spin_lock(&imp->imp_lock);
- if (list_empty(&imp->imp_conn_list)) {
- LASSERT(!imp->imp_connection);
- goto out;
- }
-
- list_for_each_entry(imp_conn, &imp->imp_conn_list, oic_item) {
- if (!obd_uuid_equals(uuid, &imp_conn->oic_uuid))
- continue;
- LASSERT(imp_conn->oic_conn);
-
- if (imp_conn == imp->imp_conn_current) {
- LASSERT(imp_conn->oic_conn == imp->imp_connection);
-
- if (imp->imp_state != LUSTRE_IMP_CLOSED &&
- imp->imp_state != LUSTRE_IMP_DISCON) {
- CERROR("can't remove current connection\n");
- rc = -EBUSY;
- goto out;
- }
-
- ptlrpc_connection_put(imp->imp_connection);
- imp->imp_connection = NULL;
-
- dlmexp = class_conn2export(&imp->imp_dlm_handle);
- if (dlmexp && dlmexp->exp_connection) {
- LASSERT(dlmexp->exp_connection ==
- imp_conn->oic_conn);
- ptlrpc_connection_put(dlmexp->exp_connection);
- dlmexp->exp_connection = NULL;
- }
- }
-
- list_del(&imp_conn->oic_item);
- ptlrpc_connection_put(imp_conn->oic_conn);
- kfree(imp_conn);
- CDEBUG(D_HA, "imp %p@%s: remove connection %s\n",
- imp, imp->imp_obd->obd_name, uuid->uuid);
- rc = 0;
- break;
- }
-out:
- spin_unlock(&imp->imp_lock);
- if (rc == -ENOENT)
- CERROR("connection %s not found\n", uuid->uuid);
- return rc;
-}
-EXPORT_SYMBOL(client_import_del_conn);
-
-/**
- * Find conn UUID by peer NID. \a peer is a server NID. This function is used
- * to find a conn uuid of \a imp which can reach \a peer.
- */
-int client_import_find_conn(struct obd_import *imp, lnet_nid_t peer,
- struct obd_uuid *uuid)
-{
- struct obd_import_conn *conn;
- int rc = -ENOENT;
-
- spin_lock(&imp->imp_lock);
- list_for_each_entry(conn, &imp->imp_conn_list, oic_item) {
- /* Check if conn UUID does have this peer NID. */
- if (class_check_uuid(&conn->oic_uuid, peer)) {
- *uuid = conn->oic_uuid;
- rc = 0;
- break;
- }
- }
- spin_unlock(&imp->imp_lock);
- return rc;
-}
-EXPORT_SYMBOL(client_import_find_conn);
-
-void client_destroy_import(struct obd_import *imp)
-{
- /* Drop security policy instance after all RPCs have finished/aborted
- * to let all busy contexts be released. */
- class_import_get(imp);
- class_destroy_import(imp);
- sptlrpc_import_sec_put(imp);
- class_import_put(imp);
-}
-EXPORT_SYMBOL(client_destroy_import);
-
-/**
- * Check whether or not the OSC is on MDT.
- * In the config log,
- * osc on MDT
- * setup 0:{fsname}-OSTxxxx-osc[-MDTxxxx] 1:lustre-OST0000_UUID 2:NID
- * osc on client
- * setup 0:{fsname}-OSTxxxx-osc 1:lustre-OST0000_UUID 2:NID
- *
- **/
-static int osc_on_mdt(char *obdname)
-{
- char *ptr;
-
- ptr = strrchr(obdname, '-');
- if (ptr == NULL)
- return 0;
-
- if (strncmp(ptr + 1, "MDT", 3) == 0)
- return 1;
-
- return 0;
-}
-
-/* Configure an RPC client OBD device.
- *
- * lcfg parameters:
- * 1 - client UUID
- * 2 - server UUID
- * 3 - inactive-on-startup
- */
-int client_obd_setup(struct obd_device *obddev, struct lustre_cfg *lcfg)
-{
- struct client_obd *cli = &obddev->u.cli;
- struct obd_import *imp;
- struct obd_uuid server_uuid;
- int rq_portal, rp_portal, connect_op;
- char *name = obddev->obd_type->typ_name;
- ldlm_ns_type_t ns_type = LDLM_NS_TYPE_UNKNOWN;
- int rc;
-
- /* In a more perfect world, we would hang a ptlrpc_client off of
- * obd_type and just use the values from there. */
- if (!strcmp(name, LUSTRE_OSC_NAME)) {
- rq_portal = OST_REQUEST_PORTAL;
- rp_portal = OSC_REPLY_PORTAL;
- connect_op = OST_CONNECT;
- cli->cl_sp_me = LUSTRE_SP_CLI;
- cli->cl_sp_to = LUSTRE_SP_OST;
- ns_type = LDLM_NS_TYPE_OSC;
- } else if (!strcmp(name, LUSTRE_MDC_NAME) ||
- !strcmp(name, LUSTRE_LWP_NAME)) {
- rq_portal = MDS_REQUEST_PORTAL;
- rp_portal = MDC_REPLY_PORTAL;
- connect_op = MDS_CONNECT;
- cli->cl_sp_me = LUSTRE_SP_CLI;
- cli->cl_sp_to = LUSTRE_SP_MDT;
- ns_type = LDLM_NS_TYPE_MDC;
- } else if (!strcmp(name, LUSTRE_OSP_NAME)) {
- if (strstr(lustre_cfg_buf(lcfg, 1), "OST") == NULL) {
- /* OSP_on_MDT for other MDTs */
- connect_op = MDS_CONNECT;
- cli->cl_sp_to = LUSTRE_SP_MDT;
- ns_type = LDLM_NS_TYPE_MDC;
- rq_portal = OUT_PORTAL;
- } else {
- /* OSP on MDT for OST */
- connect_op = OST_CONNECT;
- cli->cl_sp_to = LUSTRE_SP_OST;
- ns_type = LDLM_NS_TYPE_OSC;
- rq_portal = OST_REQUEST_PORTAL;
- }
- rp_portal = OSC_REPLY_PORTAL;
- cli->cl_sp_me = LUSTRE_SP_CLI;
- } else if (!strcmp(name, LUSTRE_MGC_NAME)) {
- rq_portal = MGS_REQUEST_PORTAL;
- rp_portal = MGC_REPLY_PORTAL;
- connect_op = MGS_CONNECT;
- cli->cl_sp_me = LUSTRE_SP_MGC;
- cli->cl_sp_to = LUSTRE_SP_MGS;
- cli->cl_flvr_mgc.sf_rpc = SPTLRPC_FLVR_INVALID;
- ns_type = LDLM_NS_TYPE_MGC;
- } else {
- CERROR("unknown client OBD type \"%s\", can't setup\n",
- name);
- return -EINVAL;
- }
-
- if (LUSTRE_CFG_BUFLEN(lcfg, 1) < 1) {
- CERROR("requires a TARGET UUID\n");
- return -EINVAL;
- }
-
- if (LUSTRE_CFG_BUFLEN(lcfg, 1) > 37) {
- CERROR("client UUID must be less than 38 characters\n");
- return -EINVAL;
- }
-
- if (LUSTRE_CFG_BUFLEN(lcfg, 2) < 1) {
- CERROR("setup requires a SERVER UUID\n");
- return -EINVAL;
- }
-
- if (LUSTRE_CFG_BUFLEN(lcfg, 2) > 37) {
- CERROR("target UUID must be less than 38 characters\n");
- return -EINVAL;
- }
-
- init_rwsem(&cli->cl_sem);
- cli->cl_conn_count = 0;
- memcpy(server_uuid.uuid, lustre_cfg_buf(lcfg, 2),
- min_t(unsigned int, LUSTRE_CFG_BUFLEN(lcfg, 2),
- sizeof(server_uuid)));
-
- cli->cl_dirty = 0;
- cli->cl_avail_grant = 0;
- /* FIXME: Should limit this for the sum of all cl_dirty_max. */
- cli->cl_dirty_max = OSC_MAX_DIRTY_DEFAULT * 1024 * 1024;
- if (cli->cl_dirty_max >> PAGE_CACHE_SHIFT > totalram_pages / 8)
- cli->cl_dirty_max = totalram_pages << (PAGE_CACHE_SHIFT - 3);
- INIT_LIST_HEAD(&cli->cl_cache_waiters);
- INIT_LIST_HEAD(&cli->cl_loi_ready_list);
- INIT_LIST_HEAD(&cli->cl_loi_hp_ready_list);
- INIT_LIST_HEAD(&cli->cl_loi_write_list);
- INIT_LIST_HEAD(&cli->cl_loi_read_list);
- client_obd_list_lock_init(&cli->cl_loi_list_lock);
- atomic_set(&cli->cl_pending_w_pages, 0);
- atomic_set(&cli->cl_pending_r_pages, 0);
- cli->cl_r_in_flight = 0;
- cli->cl_w_in_flight = 0;
-
- spin_lock_init(&cli->cl_read_rpc_hist.oh_lock);
- spin_lock_init(&cli->cl_write_rpc_hist.oh_lock);
- spin_lock_init(&cli->cl_read_page_hist.oh_lock);
- spin_lock_init(&cli->cl_write_page_hist.oh_lock);
- spin_lock_init(&cli->cl_read_offset_hist.oh_lock);
- spin_lock_init(&cli->cl_write_offset_hist.oh_lock);
-
- /* lru for osc. */
- INIT_LIST_HEAD(&cli->cl_lru_osc);
- atomic_set(&cli->cl_lru_shrinkers, 0);
- atomic_set(&cli->cl_lru_busy, 0);
- atomic_set(&cli->cl_lru_in_list, 0);
- INIT_LIST_HEAD(&cli->cl_lru_list);
- client_obd_list_lock_init(&cli->cl_lru_list_lock);
-
- init_waitqueue_head(&cli->cl_destroy_waitq);
- atomic_set(&cli->cl_destroy_in_flight, 0);
- /* Turn on checksumming by default. */
- cli->cl_checksum = 1;
- /*
- * The supported checksum types will be worked out at connect time
- * Set cl_chksum* to CRC32 for now to avoid returning screwed info
- * through procfs.
- */
- cli->cl_cksum_type = cli->cl_supp_cksum_types = OBD_CKSUM_CRC32;
- atomic_set(&cli->cl_resends, OSC_DEFAULT_RESENDS);
-
- /* This value may be reduced at connect time in
- * ptlrpc_connect_interpret() . We initialize it to only
- * 1MB until we know what the performance looks like.
- * In the future this should likely be increased. LU-1431 */
- cli->cl_max_pages_per_rpc = min_t(int, PTLRPC_MAX_BRW_PAGES,
- LNET_MTU >> PAGE_CACHE_SHIFT);
-
- if (!strcmp(name, LUSTRE_MDC_NAME)) {
- cli->cl_max_rpcs_in_flight = MDC_MAX_RIF_DEFAULT;
- } else if (totalram_pages >> (20 - PAGE_CACHE_SHIFT) <= 128 /* MB */) {
- cli->cl_max_rpcs_in_flight = 2;
- } else if (totalram_pages >> (20 - PAGE_CACHE_SHIFT) <= 256 /* MB */) {
- cli->cl_max_rpcs_in_flight = 3;
- } else if (totalram_pages >> (20 - PAGE_CACHE_SHIFT) <= 512 /* MB */) {
- cli->cl_max_rpcs_in_flight = 4;
- } else {
- if (osc_on_mdt(obddev->obd_name))
- cli->cl_max_rpcs_in_flight = MDS_OSC_MAX_RIF_DEFAULT;
- else
- cli->cl_max_rpcs_in_flight = OSC_MAX_RIF_DEFAULT;
- }
- rc = ldlm_get_ref();
- if (rc) {
- CERROR("ldlm_get_ref failed: %d\n", rc);
- goto err;
- }
-
- ptlrpc_init_client(rq_portal, rp_portal, name,
- &obddev->obd_ldlm_client);
-
- imp = class_new_import(obddev);
- if (imp == NULL) {
- rc = -ENOENT;
- goto err_ldlm;
- }
- imp->imp_client = &obddev->obd_ldlm_client;
- imp->imp_connect_op = connect_op;
- memcpy(cli->cl_target_uuid.uuid, lustre_cfg_buf(lcfg, 1),
- LUSTRE_CFG_BUFLEN(lcfg, 1));
- class_import_put(imp);
-
- rc = client_import_add_conn(imp, &server_uuid, 1);
- if (rc) {
- CERROR("can't add initial connection\n");
- goto err_import;
- }
-
- cli->cl_import = imp;
- /* cli->cl_max_mds_{easize,cookiesize} updated by mdc_init_ea_size() */
- cli->cl_max_mds_easize = sizeof(struct lov_mds_md_v3);
- cli->cl_max_mds_cookiesize = sizeof(struct llog_cookie);
-
- if (LUSTRE_CFG_BUFLEN(lcfg, 3) > 0) {
- if (!strcmp(lustre_cfg_string(lcfg, 3), "inactive")) {
- CDEBUG(D_HA, "marking %s %s->%s as inactive\n",
- name, obddev->obd_name,
- cli->cl_target_uuid.uuid);
- spin_lock(&imp->imp_lock);
- imp->imp_deactive = 1;
- spin_unlock(&imp->imp_lock);
- }
- }
-
- obddev->obd_namespace = ldlm_namespace_new(obddev, obddev->obd_name,
- LDLM_NAMESPACE_CLIENT,
- LDLM_NAMESPACE_GREEDY,
- ns_type);
- if (obddev->obd_namespace == NULL) {
- CERROR("Unable to create client namespace - %s\n",
- obddev->obd_name);
- rc = -ENOMEM;
- goto err_import;
- }
-
- cli->cl_qchk_stat = CL_NOT_QUOTACHECKED;
-
- return rc;
-
-err_import:
- class_destroy_import(imp);
-err_ldlm:
- ldlm_put_ref();
-err:
- return rc;
-
-}
-EXPORT_SYMBOL(client_obd_setup);
-
-int client_obd_cleanup(struct obd_device *obddev)
-{
- ldlm_namespace_free_post(obddev->obd_namespace);
- obddev->obd_namespace = NULL;
-
- LASSERT(obddev->u.cli.cl_import == NULL);
-
- ldlm_put_ref();
- return 0;
-}
-EXPORT_SYMBOL(client_obd_cleanup);
-
-/* ->o_connect() method for client side (OSC and MDC and MGC) */
-int client_connect_import(const struct lu_env *env,
- struct obd_export **exp,
- struct obd_device *obd, struct obd_uuid *cluuid,
- struct obd_connect_data *data, void *localdata)
-{
- struct client_obd *cli = &obd->u.cli;
- struct obd_import *imp = cli->cl_import;
- struct obd_connect_data *ocd;
- struct lustre_handle conn = { 0 };
- int rc;
-
- *exp = NULL;
- down_write(&cli->cl_sem);
- if (cli->cl_conn_count > 0) {
- rc = -EALREADY;
- goto out_sem;
- }
-
- rc = class_connect(&conn, obd, cluuid);
- if (rc)
- goto out_sem;
-
- cli->cl_conn_count++;
- *exp = class_conn2export(&conn);
-
- LASSERT(obd->obd_namespace);
-
- imp->imp_dlm_handle = conn;
- rc = ptlrpc_init_import(imp);
- if (rc != 0)
- goto out_ldlm;
-
- ocd = &imp->imp_connect_data;
- if (data) {
- *ocd = *data;
- imp->imp_connect_flags_orig = data->ocd_connect_flags;
- }
-
- rc = ptlrpc_connect_import(imp);
- if (rc != 0) {
- LASSERT(imp->imp_state == LUSTRE_IMP_DISCON);
- goto out_ldlm;
- }
- LASSERT(*exp != NULL && (*exp)->exp_connection);
-
- if (data) {
- LASSERTF((ocd->ocd_connect_flags & data->ocd_connect_flags) ==
- ocd->ocd_connect_flags, "old %#llx, new %#llx\n",
- data->ocd_connect_flags, ocd->ocd_connect_flags);
- data->ocd_connect_flags = ocd->ocd_connect_flags;
- }
-
- ptlrpc_pinger_add_import(imp);
-
- if (rc) {
-out_ldlm:
- cli->cl_conn_count--;
- class_disconnect(*exp);
- *exp = NULL;
- }
-out_sem:
- up_write(&cli->cl_sem);
-
- return rc;
-}
-EXPORT_SYMBOL(client_connect_import);
-
-int client_disconnect_export(struct obd_export *exp)
-{
- struct obd_device *obd = class_exp2obd(exp);
- struct client_obd *cli;
- struct obd_import *imp;
- int rc = 0, err;
-
- if (!obd) {
- CERROR("invalid export for disconnect: exp %p cookie %#llx\n",
- exp, exp ? exp->exp_handle.h_cookie : -1);
- return -EINVAL;
- }
-
- cli = &obd->u.cli;
- imp = cli->cl_import;
-
- down_write(&cli->cl_sem);
- CDEBUG(D_INFO, "disconnect %s - %d\n", obd->obd_name,
- cli->cl_conn_count);
-
- if (!cli->cl_conn_count) {
- CERROR("disconnecting disconnected device (%s)\n",
- obd->obd_name);
- rc = -EINVAL;
- goto out_disconnect;
- }
-
- cli->cl_conn_count--;
- if (cli->cl_conn_count) {
- rc = 0;
- goto out_disconnect;
- }
-
- /* Mark import deactivated now, so we don't try to reconnect if any
- * of the cleanup RPCs fails (e.g. LDLM cancel, etc). We don't
- * fully deactivate the import, or that would drop all requests. */
- spin_lock(&imp->imp_lock);
- imp->imp_deactive = 1;
- spin_unlock(&imp->imp_lock);
-
- /* Some non-replayable imports (MDS's OSCs) are pinged, so just
- * delete it regardless. (It's safe to delete an import that was
- * never added.) */
- (void)ptlrpc_pinger_del_import(imp);
-
- if (obd->obd_namespace != NULL) {
- /* obd_force == local only */
- ldlm_cli_cancel_unused(obd->obd_namespace, NULL,
- obd->obd_force ? LCF_LOCAL : 0, NULL);
- ldlm_namespace_free_prior(obd->obd_namespace, imp,
- obd->obd_force);
- }
-
- /* There's no need to hold sem while disconnecting an import,
- * and it may actually cause deadlock in GSS. */
- up_write(&cli->cl_sem);
- rc = ptlrpc_disconnect_import(imp, 0);
- down_write(&cli->cl_sem);
-
- ptlrpc_invalidate_import(imp);
-
-out_disconnect:
- /* Use server style - class_disconnect should be always called for
- * o_disconnect. */
- err = class_disconnect(exp);
- if (!rc && err)
- rc = err;
-
- up_write(&cli->cl_sem);
-
- return rc;
-}
-EXPORT_SYMBOL(client_disconnect_export);
-
-
-/**
- * Packs current SLV and Limit into \a req.
- */
-int target_pack_pool_reply(struct ptlrpc_request *req)
-{
- struct obd_device *obd;
-
- /* Check that we still have all structures alive as this may
- * be some late RPC at shutdown time. */
- if (unlikely(!req->rq_export || !req->rq_export->exp_obd ||
- !exp_connect_lru_resize(req->rq_export))) {
- lustre_msg_set_slv(req->rq_repmsg, 0);
- lustre_msg_set_limit(req->rq_repmsg, 0);
- return 0;
- }
-
- /* OBD is alive here as export is alive, which we checked above. */
- obd = req->rq_export->exp_obd;
-
- read_lock(&obd->obd_pool_lock);
- lustre_msg_set_slv(req->rq_repmsg, obd->obd_pool_slv);
- lustre_msg_set_limit(req->rq_repmsg, obd->obd_pool_limit);
- read_unlock(&obd->obd_pool_lock);
-
- return 0;
-}
-EXPORT_SYMBOL(target_pack_pool_reply);
-
-static int
-target_send_reply_msg(struct ptlrpc_request *req, int rc, int fail_id)
-{
- if (OBD_FAIL_CHECK_ORSET(fail_id & ~OBD_FAIL_ONCE, OBD_FAIL_ONCE)) {
- DEBUG_REQ(D_ERROR, req, "dropping reply");
- return -ECOMM;
- }
-
- if (unlikely(rc)) {
- DEBUG_REQ(D_NET, req, "processing error (%d)", rc);
- req->rq_status = rc;
- return ptlrpc_send_error(req, 1);
- }
-
- DEBUG_REQ(D_NET, req, "sending reply");
- return ptlrpc_send_reply(req, PTLRPC_REPLY_MAYBE_DIFFICULT);
-}
-
-void target_send_reply(struct ptlrpc_request *req, int rc, int fail_id)
-{
- struct ptlrpc_service_part *svcpt;
- int netrc;
- struct ptlrpc_reply_state *rs;
- struct obd_export *exp;
-
- if (req->rq_no_reply)
- return;
-
- svcpt = req->rq_rqbd->rqbd_svcpt;
- rs = req->rq_reply_state;
- if (rs == NULL || !rs->rs_difficult) {
- /* no notifiers */
- target_send_reply_msg(req, rc, fail_id);
- return;
- }
-
- /* must be an export if locks saved */
- LASSERT(req->rq_export != NULL);
- /* req/reply consistent */
- LASSERT(rs->rs_svcpt == svcpt);
-
- /* "fresh" reply */
- LASSERT(!rs->rs_scheduled);
- LASSERT(!rs->rs_scheduled_ever);
- LASSERT(!rs->rs_handled);
- LASSERT(!rs->rs_on_net);
- LASSERT(rs->rs_export == NULL);
- LASSERT(list_empty(&rs->rs_obd_list));
- LASSERT(list_empty(&rs->rs_exp_list));
-
- exp = class_export_get(req->rq_export);
-
- /* disable reply scheduling while I'm setting up */
- rs->rs_scheduled = 1;
- rs->rs_on_net = 1;
- rs->rs_xid = req->rq_xid;
- rs->rs_transno = req->rq_transno;
- rs->rs_export = exp;
- rs->rs_opc = lustre_msg_get_opc(req->rq_reqmsg);
-
- spin_lock(&exp->exp_uncommitted_replies_lock);
- CDEBUG(D_NET, "rs transno = %llu, last committed = %llu\n",
- rs->rs_transno, exp->exp_last_committed);
- if (rs->rs_transno > exp->exp_last_committed) {
- /* not committed already */
- list_add_tail(&rs->rs_obd_list,
- &exp->exp_uncommitted_replies);
- }
- spin_unlock(&exp->exp_uncommitted_replies_lock);
-
- spin_lock(&exp->exp_lock);
- list_add_tail(&rs->rs_exp_list, &exp->exp_outstanding_replies);
- spin_unlock(&exp->exp_lock);
-
- netrc = target_send_reply_msg(req, rc, fail_id);
-
- spin_lock(&svcpt->scp_rep_lock);
-
- atomic_inc(&svcpt->scp_nreps_difficult);
-
- if (netrc != 0) {
- /* error sending: reply is off the net. Also we need +1
- * reply ref until ptlrpc_handle_rs() is done
- * with the reply state (if the send was successful, there
- * would have been +1 ref for the net, which
- * reply_out_callback leaves alone) */
- rs->rs_on_net = 0;
- ptlrpc_rs_addref(rs);
- }
-
- spin_lock(&rs->rs_lock);
- if (rs->rs_transno <= exp->exp_last_committed ||
- (!rs->rs_on_net && !rs->rs_no_ack) ||
- list_empty(&rs->rs_exp_list) || /* completed already */
- list_empty(&rs->rs_obd_list)) {
- CDEBUG(D_HA, "Schedule reply immediately\n");
- ptlrpc_dispatch_difficult_reply(rs);
- } else {
- list_add(&rs->rs_list, &svcpt->scp_rep_active);
- rs->rs_scheduled = 0; /* allow notifier to schedule */
- }
- spin_unlock(&rs->rs_lock);
- spin_unlock(&svcpt->scp_rep_lock);
-}
-EXPORT_SYMBOL(target_send_reply);
-
-ldlm_mode_t lck_compat_array[] = {
- [LCK_EX] = LCK_COMPAT_EX,
- [LCK_PW] = LCK_COMPAT_PW,
- [LCK_PR] = LCK_COMPAT_PR,
- [LCK_CW] = LCK_COMPAT_CW,
- [LCK_CR] = LCK_COMPAT_CR,
- [LCK_NL] = LCK_COMPAT_NL,
- [LCK_GROUP] = LCK_COMPAT_GROUP,
- [LCK_COS] = LCK_COMPAT_COS,
-};
-
-/**
- * Rather arbitrary mapping from LDLM error codes to errno values. This should
- * not escape to the user level.
- */
-int ldlm_error2errno(ldlm_error_t error)
-{
- int result;
-
- switch (error) {
- case ELDLM_OK:
- result = 0;
- break;
- case ELDLM_LOCK_CHANGED:
- result = -ESTALE;
- break;
- case ELDLM_LOCK_ABORTED:
- result = -ENAVAIL;
- break;
- case ELDLM_LOCK_REPLACED:
- result = -ESRCH;
- break;
- case ELDLM_NO_LOCK_DATA:
- result = -ENOENT;
- break;
- case ELDLM_NAMESPACE_EXISTS:
- result = -EEXIST;
- break;
- case ELDLM_BAD_NAMESPACE:
- result = -EBADF;
- break;
- default:
- if (((int)error) < 0) /* cast to signed type */
- result = error; /* as ldlm_error_t can be unsigned */
- else {
- CERROR("Invalid DLM result code: %d\n", error);
- result = -EPROTO;
- }
- }
- return result;
-}
-EXPORT_SYMBOL(ldlm_error2errno);
-
-/**
- * Dual to ldlm_error2errno(): maps errno values back to ldlm_error_t.
- */
-ldlm_error_t ldlm_errno2error(int err_no)
-{
- int error;
-
- switch (err_no) {
- case 0:
- error = ELDLM_OK;
- break;
- case -ESTALE:
- error = ELDLM_LOCK_CHANGED;
- break;
- case -ENAVAIL:
- error = ELDLM_LOCK_ABORTED;
- break;
- case -ESRCH:
- error = ELDLM_LOCK_REPLACED;
- break;
- case -ENOENT:
- error = ELDLM_NO_LOCK_DATA;
- break;
- case -EEXIST:
- error = ELDLM_NAMESPACE_EXISTS;
- break;
- case -EBADF:
- error = ELDLM_BAD_NAMESPACE;
- break;
- default:
- error = err_no;
- }
- return error;
-}
-EXPORT_SYMBOL(ldlm_errno2error);
-
-#if LUSTRE_TRACKS_LOCK_EXP_REFS
-void ldlm_dump_export_locks(struct obd_export *exp)
-{
- spin_lock(&exp->exp_locks_list_guard);
- if (!list_empty(&exp->exp_locks_list)) {
- struct ldlm_lock *lock;
-
- CERROR("dumping locks for export %p,ignore if the unmount doesn't hang\n",
- exp);
- list_for_each_entry(lock, &exp->exp_locks_list,
- l_exp_refs_link)
- LDLM_ERROR(lock, "lock:");
- }
- spin_unlock(&exp->exp_locks_list_guard);
-}
-#endif
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_lock.c b/drivers/staging/lustre/lustre/ldlm/ldlm_lock.c
deleted file mode 100644
index 12eb5ac3b3d8..000000000000
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_lock.c
+++ /dev/null
@@ -1,2322 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2010, 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * lustre/ldlm/ldlm_lock.c
- *
- * Author: Peter Braam <braam@clusterfs.com>
- * Author: Phil Schwan <phil@clusterfs.com>
- */
-
-#define DEBUG_SUBSYSTEM S_LDLM
-
-#include "../../include/linux/libcfs/libcfs.h"
-#include "../include/lustre_intent.h"
-#include "../include/obd_class.h"
-#include "ldlm_internal.h"
-
-/* lock types */
-char *ldlm_lockname[] = {
- [0] = "--",
- [LCK_EX] = "EX",
- [LCK_PW] = "PW",
- [LCK_PR] = "PR",
- [LCK_CW] = "CW",
- [LCK_CR] = "CR",
- [LCK_NL] = "NL",
- [LCK_GROUP] = "GROUP",
- [LCK_COS] = "COS",
-};
-EXPORT_SYMBOL(ldlm_lockname);
-
-char *ldlm_typename[] = {
- [LDLM_PLAIN] = "PLN",
- [LDLM_EXTENT] = "EXT",
- [LDLM_FLOCK] = "FLK",
- [LDLM_IBITS] = "IBT",
-};
-EXPORT_SYMBOL(ldlm_typename);
-
-static ldlm_policy_wire_to_local_t ldlm_policy_wire18_to_local[] = {
- [LDLM_PLAIN - LDLM_MIN_TYPE] = ldlm_plain_policy_wire_to_local,
- [LDLM_EXTENT - LDLM_MIN_TYPE] = ldlm_extent_policy_wire_to_local,
- [LDLM_FLOCK - LDLM_MIN_TYPE] = ldlm_flock_policy_wire18_to_local,
- [LDLM_IBITS - LDLM_MIN_TYPE] = ldlm_ibits_policy_wire_to_local,
-};
-
-static ldlm_policy_wire_to_local_t ldlm_policy_wire21_to_local[] = {
- [LDLM_PLAIN - LDLM_MIN_TYPE] = ldlm_plain_policy_wire_to_local,
- [LDLM_EXTENT - LDLM_MIN_TYPE] = ldlm_extent_policy_wire_to_local,
- [LDLM_FLOCK - LDLM_MIN_TYPE] = ldlm_flock_policy_wire21_to_local,
- [LDLM_IBITS - LDLM_MIN_TYPE] = ldlm_ibits_policy_wire_to_local,
-};
-
-static ldlm_policy_local_to_wire_t ldlm_policy_local_to_wire[] = {
- [LDLM_PLAIN - LDLM_MIN_TYPE] = ldlm_plain_policy_local_to_wire,
- [LDLM_EXTENT - LDLM_MIN_TYPE] = ldlm_extent_policy_local_to_wire,
- [LDLM_FLOCK - LDLM_MIN_TYPE] = ldlm_flock_policy_local_to_wire,
- [LDLM_IBITS - LDLM_MIN_TYPE] = ldlm_ibits_policy_local_to_wire,
-};
-
-/**
- * Converts lock policy from local format to on the wire lock_desc format
- */
-void ldlm_convert_policy_to_wire(ldlm_type_t type,
- const ldlm_policy_data_t *lpolicy,
- ldlm_wire_policy_data_t *wpolicy)
-{
- ldlm_policy_local_to_wire_t convert;
-
- convert = ldlm_policy_local_to_wire[type - LDLM_MIN_TYPE];
-
- convert(lpolicy, wpolicy);
-}
-
-/**
- * Converts lock policy from on the wire lock_desc format to local format
- */
-void ldlm_convert_policy_to_local(struct obd_export *exp, ldlm_type_t type,
- const ldlm_wire_policy_data_t *wpolicy,
- ldlm_policy_data_t *lpolicy)
-{
- ldlm_policy_wire_to_local_t convert;
- int new_client;
-
- /** some badness for 2.0.0 clients, but 2.0.0 isn't supported */
- new_client = (exp_connect_flags(exp) & OBD_CONNECT_FULL20) != 0;
- if (new_client)
- convert = ldlm_policy_wire21_to_local[type - LDLM_MIN_TYPE];
- else
- convert = ldlm_policy_wire18_to_local[type - LDLM_MIN_TYPE];
-
- convert(wpolicy, lpolicy);
-}
-
-char *ldlm_it2str(int it)
-{
- switch (it) {
- case IT_OPEN:
- return "open";
- case IT_CREAT:
- return "creat";
- case (IT_OPEN | IT_CREAT):
- return "open|creat";
- case IT_READDIR:
- return "readdir";
- case IT_GETATTR:
- return "getattr";
- case IT_LOOKUP:
- return "lookup";
- case IT_UNLINK:
- return "unlink";
- case IT_GETXATTR:
- return "getxattr";
- case IT_LAYOUT:
- return "layout";
- default:
- CERROR("Unknown intent %d\n", it);
- return "UNKNOWN";
- }
-}
-EXPORT_SYMBOL(ldlm_it2str);
-
-
-void ldlm_register_intent(struct ldlm_namespace *ns, ldlm_res_policy arg)
-{
- ns->ns_policy = arg;
-}
-EXPORT_SYMBOL(ldlm_register_intent);
-
-/*
- * REFCOUNTED LOCK OBJECTS
- */
-
-
-/**
- * Get a reference on a lock.
- *
- * Lock refcounts, during creation:
- * - one special one for allocation, dec'd only once in destroy
- * - one for being a lock that's in-use
- * - one for the addref associated with a new lock
- */
-struct ldlm_lock *ldlm_lock_get(struct ldlm_lock *lock)
-{
- atomic_inc(&lock->l_refc);
- return lock;
-}
-EXPORT_SYMBOL(ldlm_lock_get);
-
-/**
- * Release lock reference.
- *
- * Also frees the lock if it was last reference.
- */
-void ldlm_lock_put(struct ldlm_lock *lock)
-{
- LASSERT(lock->l_resource != LP_POISON);
- LASSERT(atomic_read(&lock->l_refc) > 0);
- if (atomic_dec_and_test(&lock->l_refc)) {
- struct ldlm_resource *res;
-
- LDLM_DEBUG(lock,
- "final lock_put on destroyed lock, freeing it.");
-
- res = lock->l_resource;
- LASSERT(lock->l_flags & LDLM_FL_DESTROYED);
- LASSERT(list_empty(&lock->l_res_link));
- LASSERT(list_empty(&lock->l_pending_chain));
-
- lprocfs_counter_decr(ldlm_res_to_ns(res)->ns_stats,
- LDLM_NSS_LOCKS);
- lu_ref_del(&res->lr_reference, "lock", lock);
- ldlm_resource_putref(res);
- lock->l_resource = NULL;
- if (lock->l_export) {
- class_export_lock_put(lock->l_export, lock);
- lock->l_export = NULL;
- }
-
- kfree(lock->l_lvb_data);
-
- ldlm_interval_free(ldlm_interval_detach(lock));
- lu_ref_fini(&lock->l_reference);
- OBD_FREE_RCU(lock, sizeof(*lock), &lock->l_handle);
- }
-}
-EXPORT_SYMBOL(ldlm_lock_put);
-
-/**
- * Removes LDLM lock \a lock from LRU. Assumes LRU is already locked.
- */
-int ldlm_lock_remove_from_lru_nolock(struct ldlm_lock *lock)
-{
- int rc = 0;
-
- if (!list_empty(&lock->l_lru)) {
- struct ldlm_namespace *ns = ldlm_lock_to_ns(lock);
-
- LASSERT(lock->l_resource->lr_type != LDLM_FLOCK);
- list_del_init(&lock->l_lru);
- LASSERT(ns->ns_nr_unused > 0);
- ns->ns_nr_unused--;
- rc = 1;
- }
- return rc;
-}
-
-/**
- * Removes LDLM lock \a lock from LRU. Obtains the LRU lock first.
- */
-int ldlm_lock_remove_from_lru(struct ldlm_lock *lock)
-{
- struct ldlm_namespace *ns = ldlm_lock_to_ns(lock);
- int rc;
-
- if (lock->l_flags & LDLM_FL_NS_SRV) {
- LASSERT(list_empty(&lock->l_lru));
- return 0;
- }
-
- spin_lock(&ns->ns_lock);
- rc = ldlm_lock_remove_from_lru_nolock(lock);
- spin_unlock(&ns->ns_lock);
- return rc;
-}
-
-/**
- * Adds LDLM lock \a lock to namespace LRU. Assumes LRU is already locked.
- */
-void ldlm_lock_add_to_lru_nolock(struct ldlm_lock *lock)
-{
- struct ldlm_namespace *ns = ldlm_lock_to_ns(lock);
-
- lock->l_last_used = cfs_time_current();
- LASSERT(list_empty(&lock->l_lru));
- LASSERT(lock->l_resource->lr_type != LDLM_FLOCK);
- list_add_tail(&lock->l_lru, &ns->ns_unused_list);
- if (lock->l_flags & LDLM_FL_SKIPPED)
- lock->l_flags &= ~LDLM_FL_SKIPPED;
- LASSERT(ns->ns_nr_unused >= 0);
- ns->ns_nr_unused++;
-}
-
-/**
- * Adds LDLM lock \a lock to namespace LRU. Obtains necessary LRU locks
- * first.
- */
-void ldlm_lock_add_to_lru(struct ldlm_lock *lock)
-{
- struct ldlm_namespace *ns = ldlm_lock_to_ns(lock);
-
- spin_lock(&ns->ns_lock);
- ldlm_lock_add_to_lru_nolock(lock);
- spin_unlock(&ns->ns_lock);
-}
-
-/**
- * Moves LDLM lock \a lock that is already in namespace LRU to the tail of
- * the LRU. Performs necessary LRU locking
- */
-void ldlm_lock_touch_in_lru(struct ldlm_lock *lock)
-{
- struct ldlm_namespace *ns = ldlm_lock_to_ns(lock);
-
- if (lock->l_flags & LDLM_FL_NS_SRV) {
- LASSERT(list_empty(&lock->l_lru));
- return;
- }
-
- spin_lock(&ns->ns_lock);
- if (!list_empty(&lock->l_lru)) {
- ldlm_lock_remove_from_lru_nolock(lock);
- ldlm_lock_add_to_lru_nolock(lock);
- }
- spin_unlock(&ns->ns_lock);
-}
-
-/**
- * Helper to destroy a locked lock.
- *
- * Used by ldlm_lock_destroy and ldlm_lock_destroy_nolock
- * Must be called with l_lock and lr_lock held.
- *
- * Does not actually free the lock data, but rather marks the lock as
- * destroyed by setting l_destroyed field in the lock to 1. Destroys a
- * handle->lock association too, so that the lock can no longer be found
- * and removes the lock from LRU list. Actual lock freeing occurs when
- * last lock reference goes away.
- *
- * Original comment (of some historical value):
- * This used to have a 'strict' flag, which recovery would use to mark an
- * in-use lock as needing-to-die. Lest I am ever tempted to put it back, I
- * shall explain why it's gone: with the new hash table scheme, once you call
- * ldlm_lock_destroy, you can never drop your final references on this lock.
- * Because it's not in the hash table anymore. -phil
- */
-int ldlm_lock_destroy_internal(struct ldlm_lock *lock)
-{
- if (lock->l_readers || lock->l_writers) {
- LDLM_ERROR(lock, "lock still has references");
- LBUG();
- }
-
- if (!list_empty(&lock->l_res_link)) {
- LDLM_ERROR(lock, "lock still on resource");
- LBUG();
- }
-
- if (lock->l_flags & LDLM_FL_DESTROYED) {
- LASSERT(list_empty(&lock->l_lru));
- return 0;
- }
- lock->l_flags |= LDLM_FL_DESTROYED;
-
- if (lock->l_export && lock->l_export->exp_lock_hash) {
- /* NB: it's safe to call cfs_hash_del() even lock isn't
- * in exp_lock_hash. */
- /* In the function below, .hs_keycmp resolves to
- * ldlm_export_lock_keycmp() */
- /* coverity[overrun-buffer-val] */
- cfs_hash_del(lock->l_export->exp_lock_hash,
- &lock->l_remote_handle, &lock->l_exp_hash);
- }
-
- ldlm_lock_remove_from_lru(lock);
- class_handle_unhash(&lock->l_handle);
-
-#if 0
- /* Wake anyone waiting for this lock */
- /* FIXME: I should probably add yet another flag, instead of using
- * l_export to only call this on clients */
- if (lock->l_export)
- class_export_put(lock->l_export);
- lock->l_export = NULL;
- if (lock->l_export && lock->l_completion_ast)
- lock->l_completion_ast(lock, 0);
-#endif
- return 1;
-}
-
-/**
- * Destroys a LDLM lock \a lock. Performs necessary locking first.
- */
-void ldlm_lock_destroy(struct ldlm_lock *lock)
-{
- int first;
-
- lock_res_and_lock(lock);
- first = ldlm_lock_destroy_internal(lock);
- unlock_res_and_lock(lock);
-
- /* drop reference from hashtable only for first destroy */
- if (first) {
- lu_ref_del(&lock->l_reference, "hash", lock);
- LDLM_LOCK_RELEASE(lock);
- }
-}
-
-/**
- * Destroys a LDLM lock \a lock that is already locked.
- */
-void ldlm_lock_destroy_nolock(struct ldlm_lock *lock)
-{
- int first;
-
- first = ldlm_lock_destroy_internal(lock);
- /* drop reference from hashtable only for first destroy */
- if (first) {
- lu_ref_del(&lock->l_reference, "hash", lock);
- LDLM_LOCK_RELEASE(lock);
- }
-}
-
-/* this is called by portals_handle2object with the handle lock taken */
-static void lock_handle_addref(void *lock)
-{
- LDLM_LOCK_GET((struct ldlm_lock *)lock);
-}
-
-static void lock_handle_free(void *lock, int size)
-{
- LASSERT(size == sizeof(struct ldlm_lock));
- OBD_SLAB_FREE(lock, ldlm_lock_slab, size);
-}
-
-struct portals_handle_ops lock_handle_ops = {
- .hop_addref = lock_handle_addref,
- .hop_free = lock_handle_free,
-};
-
-/**
- *
- * Allocate and initialize new lock structure.
- *
- * usage: pass in a resource on which you have done ldlm_resource_get
- * new lock will take over the refcount.
- * returns: lock with refcount 2 - one for current caller and one for remote
- */
-static struct ldlm_lock *ldlm_lock_new(struct ldlm_resource *resource)
-{
- struct ldlm_lock *lock;
-
- if (resource == NULL)
- LBUG();
-
- OBD_SLAB_ALLOC_PTR_GFP(lock, ldlm_lock_slab, GFP_NOFS);
- if (lock == NULL)
- return NULL;
-
- spin_lock_init(&lock->l_lock);
- lock->l_resource = resource;
- lu_ref_add(&resource->lr_reference, "lock", lock);
-
- atomic_set(&lock->l_refc, 2);
- INIT_LIST_HEAD(&lock->l_res_link);
- INIT_LIST_HEAD(&lock->l_lru);
- INIT_LIST_HEAD(&lock->l_pending_chain);
- INIT_LIST_HEAD(&lock->l_bl_ast);
- INIT_LIST_HEAD(&lock->l_cp_ast);
- INIT_LIST_HEAD(&lock->l_rk_ast);
- init_waitqueue_head(&lock->l_waitq);
- lock->l_blocking_lock = NULL;
- INIT_LIST_HEAD(&lock->l_sl_mode);
- INIT_LIST_HEAD(&lock->l_sl_policy);
- INIT_HLIST_NODE(&lock->l_exp_hash);
- INIT_HLIST_NODE(&lock->l_exp_flock_hash);
-
- lprocfs_counter_incr(ldlm_res_to_ns(resource)->ns_stats,
- LDLM_NSS_LOCKS);
- INIT_LIST_HEAD(&lock->l_handle.h_link);
- class_handle_hash(&lock->l_handle, &lock_handle_ops);
-
- lu_ref_init(&lock->l_reference);
- lu_ref_add(&lock->l_reference, "hash", lock);
- lock->l_callback_timeout = 0;
-
-#if LUSTRE_TRACKS_LOCK_EXP_REFS
- INIT_LIST_HEAD(&lock->l_exp_refs_link);
- lock->l_exp_refs_nr = 0;
- lock->l_exp_refs_target = NULL;
-#endif
- INIT_LIST_HEAD(&lock->l_exp_list);
-
- return lock;
-}
-
-/**
- * Moves LDLM lock \a lock to another resource.
- * This is used on client when server returns some other lock than requested
- * (typically as a result of intent operation)
- */
-int ldlm_lock_change_resource(struct ldlm_namespace *ns, struct ldlm_lock *lock,
- const struct ldlm_res_id *new_resid)
-{
- struct ldlm_resource *oldres = lock->l_resource;
- struct ldlm_resource *newres;
- int type;
-
- LASSERT(ns_is_client(ns));
-
- lock_res_and_lock(lock);
- if (memcmp(new_resid, &lock->l_resource->lr_name,
- sizeof(lock->l_resource->lr_name)) == 0) {
- /* Nothing to do */
- unlock_res_and_lock(lock);
- return 0;
- }
-
- LASSERT(new_resid->name[0] != 0);
-
- /* This function assumes that the lock isn't on any lists */
- LASSERT(list_empty(&lock->l_res_link));
-
- type = oldres->lr_type;
- unlock_res_and_lock(lock);
-
- newres = ldlm_resource_get(ns, NULL, new_resid, type, 1);
- if (newres == NULL)
- return -ENOMEM;
-
- lu_ref_add(&newres->lr_reference, "lock", lock);
- /*
- * To flip the lock from the old to the new resource, lock, oldres and
- * newres have to be locked. Resource spin-locks are nested within
- * lock->l_lock, and are taken in the memory address order to avoid
- * dead-locks.
- */
- spin_lock(&lock->l_lock);
- oldres = lock->l_resource;
- if (oldres < newres) {
- lock_res(oldres);
- lock_res_nested(newres, LRT_NEW);
- } else {
- lock_res(newres);
- lock_res_nested(oldres, LRT_NEW);
- }
- LASSERT(memcmp(new_resid, &oldres->lr_name,
- sizeof(oldres->lr_name)) != 0);
- lock->l_resource = newres;
- unlock_res(oldres);
- unlock_res_and_lock(lock);
-
- /* ...and the flowers are still standing! */
- lu_ref_del(&oldres->lr_reference, "lock", lock);
- ldlm_resource_putref(oldres);
-
- return 0;
-}
-EXPORT_SYMBOL(ldlm_lock_change_resource);
-
-/** \defgroup ldlm_handles LDLM HANDLES
- * Ways to get hold of locks without any addresses.
- * @{
- */
-
-/**
- * Fills in handle for LDLM lock \a lock into supplied \a lockh
- * Does not take any references.
- */
-void ldlm_lock2handle(const struct ldlm_lock *lock, struct lustre_handle *lockh)
-{
- lockh->cookie = lock->l_handle.h_cookie;
-}
-EXPORT_SYMBOL(ldlm_lock2handle);
-
-/**
- * Obtain a lock reference by handle.
- *
- * if \a flags: atomically get the lock and set the flags.
- * Return NULL if flag already set
- */
-struct ldlm_lock *__ldlm_handle2lock(const struct lustre_handle *handle,
- __u64 flags)
-{
- struct ldlm_lock *lock;
-
- LASSERT(handle);
-
- lock = class_handle2object(handle->cookie);
- if (lock == NULL)
- return NULL;
-
- /* It's unlikely but possible that someone marked the lock as
- * destroyed after we did handle2object on it */
- if (flags == 0 && ((lock->l_flags & LDLM_FL_DESTROYED) == 0)) {
- lu_ref_add(&lock->l_reference, "handle", current);
- return lock;
- }
-
- lock_res_and_lock(lock);
-
- LASSERT(lock->l_resource != NULL);
-
- lu_ref_add_atomic(&lock->l_reference, "handle", current);
- if (unlikely(lock->l_flags & LDLM_FL_DESTROYED)) {
- unlock_res_and_lock(lock);
- CDEBUG(D_INFO, "lock already destroyed: lock %p\n", lock);
- LDLM_LOCK_PUT(lock);
- return NULL;
- }
-
- if (flags && (lock->l_flags & flags)) {
- unlock_res_and_lock(lock);
- LDLM_LOCK_PUT(lock);
- return NULL;
- }
-
- if (flags)
- lock->l_flags |= flags;
-
- unlock_res_and_lock(lock);
- return lock;
-}
-EXPORT_SYMBOL(__ldlm_handle2lock);
-/** @} ldlm_handles */
-
-/**
- * Fill in "on the wire" representation for given LDLM lock into supplied
- * lock descriptor \a desc structure.
- */
-void ldlm_lock2desc(struct ldlm_lock *lock, struct ldlm_lock_desc *desc)
-{
- ldlm_res2desc(lock->l_resource, &desc->l_resource);
- desc->l_req_mode = lock->l_req_mode;
- desc->l_granted_mode = lock->l_granted_mode;
- ldlm_convert_policy_to_wire(lock->l_resource->lr_type,
- &lock->l_policy_data,
- &desc->l_policy_data);
-}
-EXPORT_SYMBOL(ldlm_lock2desc);
-
-/**
- * Add a lock to list of conflicting locks to send AST to.
- *
- * Only add if we have not sent a blocking AST to the lock yet.
- */
-void ldlm_add_bl_work_item(struct ldlm_lock *lock, struct ldlm_lock *new,
- struct list_head *work_list)
-{
- if ((lock->l_flags & LDLM_FL_AST_SENT) == 0) {
- LDLM_DEBUG(lock, "lock incompatible; sending blocking AST.");
- lock->l_flags |= LDLM_FL_AST_SENT;
- /* If the enqueuing client said so, tell the AST recipient to
- * discard dirty data, rather than writing back. */
- if (new->l_flags & LDLM_FL_AST_DISCARD_DATA)
- lock->l_flags |= LDLM_FL_DISCARD_DATA;
- LASSERT(list_empty(&lock->l_bl_ast));
- list_add(&lock->l_bl_ast, work_list);
- LDLM_LOCK_GET(lock);
- LASSERT(lock->l_blocking_lock == NULL);
- lock->l_blocking_lock = LDLM_LOCK_GET(new);
- }
-}
-
-/**
- * Add a lock to list of just granted locks to send completion AST to.
- */
-void ldlm_add_cp_work_item(struct ldlm_lock *lock, struct list_head *work_list)
-{
- if ((lock->l_flags & LDLM_FL_CP_REQD) == 0) {
- lock->l_flags |= LDLM_FL_CP_REQD;
- LDLM_DEBUG(lock, "lock granted; sending completion AST.");
- LASSERT(list_empty(&lock->l_cp_ast));
- list_add(&lock->l_cp_ast, work_list);
- LDLM_LOCK_GET(lock);
- }
-}
-
-/**
- * Aggregator function to add AST work items into a list. Determines
- * what sort of an AST work needs to be done and calls the proper
- * adding function.
- * Must be called with lr_lock held.
- */
-void ldlm_add_ast_work_item(struct ldlm_lock *lock, struct ldlm_lock *new,
- struct list_head *work_list)
-{
- check_res_locked(lock->l_resource);
- if (new)
- ldlm_add_bl_work_item(lock, new, work_list);
- else
- ldlm_add_cp_work_item(lock, work_list);
-}
-
-/**
- * Add specified reader/writer reference to LDLM lock with handle \a lockh.
- * r/w reference type is determined by \a mode
- * Calls ldlm_lock_addref_internal.
- */
-void ldlm_lock_addref(struct lustre_handle *lockh, __u32 mode)
-{
- struct ldlm_lock *lock;
-
- lock = ldlm_handle2lock(lockh);
- LASSERT(lock != NULL);
- ldlm_lock_addref_internal(lock, mode);
- LDLM_LOCK_PUT(lock);
-}
-EXPORT_SYMBOL(ldlm_lock_addref);
-
-/**
- * Helper function.
- * Add specified reader/writer reference to LDLM lock \a lock.
- * r/w reference type is determined by \a mode
- * Removes lock from LRU if it is there.
- * Assumes the LDLM lock is already locked.
- */
-void ldlm_lock_addref_internal_nolock(struct ldlm_lock *lock, __u32 mode)
-{
- ldlm_lock_remove_from_lru(lock);
- if (mode & (LCK_NL | LCK_CR | LCK_PR)) {
- lock->l_readers++;
- lu_ref_add_atomic(&lock->l_reference, "reader", lock);
- }
- if (mode & (LCK_EX | LCK_CW | LCK_PW | LCK_GROUP | LCK_COS)) {
- lock->l_writers++;
- lu_ref_add_atomic(&lock->l_reference, "writer", lock);
- }
- LDLM_LOCK_GET(lock);
- lu_ref_add_atomic(&lock->l_reference, "user", lock);
- LDLM_DEBUG(lock, "ldlm_lock_addref(%s)", ldlm_lockname[mode]);
-}
-
-/**
- * Attempts to add reader/writer reference to a lock with handle \a lockh, and
- * fails if lock is already LDLM_FL_CBPENDING or destroyed.
- *
- * \retval 0 success, lock was addref-ed
- *
- * \retval -EAGAIN lock is being canceled.
- */
-int ldlm_lock_addref_try(struct lustre_handle *lockh, __u32 mode)
-{
- struct ldlm_lock *lock;
- int result;
-
- result = -EAGAIN;
- lock = ldlm_handle2lock(lockh);
- if (lock != NULL) {
- lock_res_and_lock(lock);
- if (lock->l_readers != 0 || lock->l_writers != 0 ||
- !(lock->l_flags & LDLM_FL_CBPENDING)) {
- ldlm_lock_addref_internal_nolock(lock, mode);
- result = 0;
- }
- unlock_res_and_lock(lock);
- LDLM_LOCK_PUT(lock);
- }
- return result;
-}
-EXPORT_SYMBOL(ldlm_lock_addref_try);
-
-/**
- * Add specified reader/writer reference to LDLM lock \a lock.
- * Locks LDLM lock and calls ldlm_lock_addref_internal_nolock to do the work.
- * Only called for local locks.
- */
-void ldlm_lock_addref_internal(struct ldlm_lock *lock, __u32 mode)
-{
- lock_res_and_lock(lock);
- ldlm_lock_addref_internal_nolock(lock, mode);
- unlock_res_and_lock(lock);
-}
-
-/**
- * Removes reader/writer reference for LDLM lock \a lock.
- * Assumes LDLM lock is already locked.
- * only called in ldlm_flock_destroy and for local locks.
- * Does NOT add lock to LRU if no r/w references left to accommodate flock locks
- * that cannot be placed in LRU.
- */
-void ldlm_lock_decref_internal_nolock(struct ldlm_lock *lock, __u32 mode)
-{
- LDLM_DEBUG(lock, "ldlm_lock_decref(%s)", ldlm_lockname[mode]);
- if (mode & (LCK_NL | LCK_CR | LCK_PR)) {
- LASSERT(lock->l_readers > 0);
- lu_ref_del(&lock->l_reference, "reader", lock);
- lock->l_readers--;
- }
- if (mode & (LCK_EX | LCK_CW | LCK_PW | LCK_GROUP | LCK_COS)) {
- LASSERT(lock->l_writers > 0);
- lu_ref_del(&lock->l_reference, "writer", lock);
- lock->l_writers--;
- }
-
- lu_ref_del(&lock->l_reference, "user", lock);
- LDLM_LOCK_RELEASE(lock); /* matches the LDLM_LOCK_GET() in addref */
-}
-
-/**
- * Removes reader/writer reference for LDLM lock \a lock.
- * Locks LDLM lock first.
- * If the lock is determined to be client lock on a client and r/w refcount
- * drops to zero and the lock is not blocked, the lock is added to LRU lock
- * on the namespace.
- * For blocked LDLM locks if r/w count drops to zero, blocking_ast is called.
- */
-void ldlm_lock_decref_internal(struct ldlm_lock *lock, __u32 mode)
-{
- struct ldlm_namespace *ns;
-
- lock_res_and_lock(lock);
-
- ns = ldlm_lock_to_ns(lock);
-
- ldlm_lock_decref_internal_nolock(lock, mode);
-
- if (lock->l_flags & LDLM_FL_LOCAL &&
- !lock->l_readers && !lock->l_writers) {
- /* If this is a local lock on a server namespace and this was
- * the last reference, cancel the lock. */
- CDEBUG(D_INFO, "forcing cancel of local lock\n");
- lock->l_flags |= LDLM_FL_CBPENDING;
- }
-
- if (!lock->l_readers && !lock->l_writers &&
- (lock->l_flags & LDLM_FL_CBPENDING)) {
- /* If we received a blocked AST and this was the last reference,
- * run the callback. */
- if ((lock->l_flags & LDLM_FL_NS_SRV) && lock->l_export)
- CERROR("FL_CBPENDING set on non-local lock--just a warning\n");
-
- LDLM_DEBUG(lock, "final decref done on cbpending lock");
-
- LDLM_LOCK_GET(lock); /* dropped by bl thread */
- ldlm_lock_remove_from_lru(lock);
- unlock_res_and_lock(lock);
-
- if (lock->l_flags & LDLM_FL_FAIL_LOC)
- OBD_RACE(OBD_FAIL_LDLM_CP_BL_RACE);
-
- if ((lock->l_flags & LDLM_FL_ATOMIC_CB) ||
- ldlm_bl_to_thread_lock(ns, NULL, lock) != 0)
- ldlm_handle_bl_callback(ns, NULL, lock);
- } else if (ns_is_client(ns) &&
- !lock->l_readers && !lock->l_writers &&
- !(lock->l_flags & LDLM_FL_NO_LRU) &&
- !(lock->l_flags & LDLM_FL_BL_AST)) {
-
- LDLM_DEBUG(lock, "add lock into lru list");
-
- /* If this is a client-side namespace and this was the last
- * reference, put it on the LRU. */
- ldlm_lock_add_to_lru(lock);
- unlock_res_and_lock(lock);
-
- if (lock->l_flags & LDLM_FL_FAIL_LOC)
- OBD_RACE(OBD_FAIL_LDLM_CP_BL_RACE);
-
- /* Call ldlm_cancel_lru() only if EARLY_CANCEL and LRU RESIZE
- * are not supported by the server, otherwise, it is done on
- * enqueue. */
- if (!exp_connect_cancelset(lock->l_conn_export) &&
- !ns_connect_lru_resize(ns))
- ldlm_cancel_lru(ns, 0, LCF_ASYNC, 0);
- } else {
- LDLM_DEBUG(lock, "do not add lock into lru list");
- unlock_res_and_lock(lock);
- }
-}
-
-/**
- * Decrease reader/writer refcount for LDLM lock with handle \a lockh
- */
-void ldlm_lock_decref(struct lustre_handle *lockh, __u32 mode)
-{
- struct ldlm_lock *lock = __ldlm_handle2lock(lockh, 0);
-
- LASSERTF(lock != NULL, "Non-existing lock: %#llx\n", lockh->cookie);
- ldlm_lock_decref_internal(lock, mode);
- LDLM_LOCK_PUT(lock);
-}
-EXPORT_SYMBOL(ldlm_lock_decref);
-
-/**
- * Decrease reader/writer refcount for LDLM lock with handle
- * \a lockh and mark it for subsequent cancellation once r/w refcount
- * drops to zero instead of putting into LRU.
- *
- * Typical usage is for GROUP locks which we cannot allow to be cached.
- */
-void ldlm_lock_decref_and_cancel(struct lustre_handle *lockh, __u32 mode)
-{
- struct ldlm_lock *lock = __ldlm_handle2lock(lockh, 0);
-
- LASSERT(lock != NULL);
-
- LDLM_DEBUG(lock, "ldlm_lock_decref(%s)", ldlm_lockname[mode]);
- lock_res_and_lock(lock);
- lock->l_flags |= LDLM_FL_CBPENDING;
- unlock_res_and_lock(lock);
- ldlm_lock_decref_internal(lock, mode);
- LDLM_LOCK_PUT(lock);
-}
-EXPORT_SYMBOL(ldlm_lock_decref_and_cancel);
-
-struct sl_insert_point {
- struct list_head *res_link;
- struct list_head *mode_link;
- struct list_head *policy_link;
-};
-
-/**
- * Finds a position to insert the new lock into granted lock list.
- *
- * Used for locks eligible for skiplist optimization.
- *
- * Parameters:
- * queue [input]: the granted list where search acts on;
- * req [input]: the lock whose position to be located;
- * prev [output]: positions within 3 lists to insert @req to
- * Return Value:
- * filled @prev
- * NOTE: called by
- * - ldlm_grant_lock_with_skiplist
- */
-static void search_granted_lock(struct list_head *queue,
- struct ldlm_lock *req,
- struct sl_insert_point *prev)
-{
- struct list_head *tmp;
- struct ldlm_lock *lock, *mode_end, *policy_end;
-
- list_for_each(tmp, queue) {
- lock = list_entry(tmp, struct ldlm_lock, l_res_link);
-
- mode_end = list_entry(lock->l_sl_mode.prev,
- struct ldlm_lock, l_sl_mode);
-
- if (lock->l_req_mode != req->l_req_mode) {
- /* jump to last lock of mode group */
- tmp = &mode_end->l_res_link;
- continue;
- }
-
- /* suitable mode group is found */
- if (lock->l_resource->lr_type == LDLM_PLAIN) {
- /* insert point is last lock of the mode group */
- prev->res_link = &mode_end->l_res_link;
- prev->mode_link = &mode_end->l_sl_mode;
- prev->policy_link = &req->l_sl_policy;
- return;
- }
-
- if (lock->l_resource->lr_type == LDLM_IBITS) {
- for (;;) {
- policy_end =
- list_entry(lock->l_sl_policy.prev,
- struct ldlm_lock,
- l_sl_policy);
-
- if (lock->l_policy_data.l_inodebits.bits ==
- req->l_policy_data.l_inodebits.bits) {
- /* insert point is last lock of
- * the policy group */
- prev->res_link =
- &policy_end->l_res_link;
- prev->mode_link =
- &policy_end->l_sl_mode;
- prev->policy_link =
- &policy_end->l_sl_policy;
- return;
- }
-
- if (policy_end == mode_end)
- /* done with mode group */
- break;
-
- /* go to next policy group within mode group */
- tmp = policy_end->l_res_link.next;
- lock = list_entry(tmp, struct ldlm_lock,
- l_res_link);
- } /* loop over policy groups within the mode group */
-
- /* insert point is last lock of the mode group,
- * new policy group is started */
- prev->res_link = &mode_end->l_res_link;
- prev->mode_link = &mode_end->l_sl_mode;
- prev->policy_link = &req->l_sl_policy;
- return;
- }
-
- LDLM_ERROR(lock, "is not LDLM_PLAIN or LDLM_IBITS lock");
- LBUG();
- }
-
- /* insert point is last lock on the queue,
- * new mode group and new policy group are started */
- prev->res_link = queue->prev;
- prev->mode_link = &req->l_sl_mode;
- prev->policy_link = &req->l_sl_policy;
-}
-
-/**
- * Add a lock into resource granted list after a position described by
- * \a prev.
- */
-static void ldlm_granted_list_add_lock(struct ldlm_lock *lock,
- struct sl_insert_point *prev)
-{
- struct ldlm_resource *res = lock->l_resource;
-
- check_res_locked(res);
-
- ldlm_resource_dump(D_INFO, res);
- LDLM_DEBUG(lock, "About to add lock:");
-
- if (lock->l_flags & LDLM_FL_DESTROYED) {
- CDEBUG(D_OTHER, "Lock destroyed, not adding to resource\n");
- return;
- }
-
- LASSERT(list_empty(&lock->l_res_link));
- LASSERT(list_empty(&lock->l_sl_mode));
- LASSERT(list_empty(&lock->l_sl_policy));
-
- /*
- * lock->link == prev->link means lock is first starting the group.
- * Don't re-add to itself to suppress kernel warnings.
- */
- if (&lock->l_res_link != prev->res_link)
- list_add(&lock->l_res_link, prev->res_link);
- if (&lock->l_sl_mode != prev->mode_link)
- list_add(&lock->l_sl_mode, prev->mode_link);
- if (&lock->l_sl_policy != prev->policy_link)
- list_add(&lock->l_sl_policy, prev->policy_link);
-}
-
-/**
- * Add a lock to granted list on a resource maintaining skiplist
- * correctness.
- */
-static void ldlm_grant_lock_with_skiplist(struct ldlm_lock *lock)
-{
- struct sl_insert_point prev;
-
- LASSERT(lock->l_req_mode == lock->l_granted_mode);
-
- search_granted_lock(&lock->l_resource->lr_granted, lock, &prev);
- ldlm_granted_list_add_lock(lock, &prev);
-}
-
-/**
- * Perform lock granting bookkeeping.
- *
- * Includes putting the lock into granted list and updating lock mode.
- * NOTE: called by
- * - ldlm_lock_enqueue
- * - ldlm_reprocess_queue
- * - ldlm_lock_convert
- *
- * must be called with lr_lock held
- */
-void ldlm_grant_lock(struct ldlm_lock *lock, struct list_head *work_list)
-{
- struct ldlm_resource *res = lock->l_resource;
-
- check_res_locked(res);
-
- lock->l_granted_mode = lock->l_req_mode;
- if (res->lr_type == LDLM_PLAIN || res->lr_type == LDLM_IBITS)
- ldlm_grant_lock_with_skiplist(lock);
- else if (res->lr_type == LDLM_EXTENT)
- ldlm_extent_add_lock(res, lock);
- else
- ldlm_resource_add_lock(res, &res->lr_granted, lock);
-
- if (lock->l_granted_mode < res->lr_most_restr)
- res->lr_most_restr = lock->l_granted_mode;
-
- if (work_list && lock->l_completion_ast != NULL)
- ldlm_add_ast_work_item(lock, NULL, work_list);
-
- ldlm_pool_add(&ldlm_res_to_ns(res)->ns_pool, lock);
-}
-
-/**
- * Search for a lock with given properties in a queue.
- *
- * \retval a referenced lock or NULL. See the flag descriptions below, in the
- * comment above ldlm_lock_match
- */
-static struct ldlm_lock *search_queue(struct list_head *queue,
- ldlm_mode_t *mode,
- ldlm_policy_data_t *policy,
- struct ldlm_lock *old_lock,
- __u64 flags, int unref)
-{
- struct ldlm_lock *lock;
- struct list_head *tmp;
-
- list_for_each(tmp, queue) {
- ldlm_mode_t match;
-
- lock = list_entry(tmp, struct ldlm_lock, l_res_link);
-
- if (lock == old_lock)
- break;
-
- /* Check if this lock can be matched.
- * Used by LU-2919(exclusive open) for open lease lock */
- if (ldlm_is_excl(lock))
- continue;
-
- /* llite sometimes wants to match locks that will be
- * canceled when their users drop, but we allow it to match
- * if it passes in CBPENDING and the lock still has users.
- * this is generally only going to be used by children
- * whose parents already hold a lock so forward progress
- * can still happen. */
- if (lock->l_flags & LDLM_FL_CBPENDING &&
- !(flags & LDLM_FL_CBPENDING))
- continue;
- if (!unref && lock->l_flags & LDLM_FL_CBPENDING &&
- lock->l_readers == 0 && lock->l_writers == 0)
- continue;
-
- if (!(lock->l_req_mode & *mode))
- continue;
- match = lock->l_req_mode;
-
- if (lock->l_resource->lr_type == LDLM_EXTENT &&
- (lock->l_policy_data.l_extent.start >
- policy->l_extent.start ||
- lock->l_policy_data.l_extent.end < policy->l_extent.end))
- continue;
-
- if (unlikely(match == LCK_GROUP) &&
- lock->l_resource->lr_type == LDLM_EXTENT &&
- lock->l_policy_data.l_extent.gid != policy->l_extent.gid)
- continue;
-
- /* We match if we have existing lock with same or wider set
- of bits. */
- if (lock->l_resource->lr_type == LDLM_IBITS &&
- ((lock->l_policy_data.l_inodebits.bits &
- policy->l_inodebits.bits) !=
- policy->l_inodebits.bits))
- continue;
-
- if (!unref && (lock->l_flags & LDLM_FL_GONE_MASK))
- continue;
-
- if ((flags & LDLM_FL_LOCAL_ONLY) &&
- !(lock->l_flags & LDLM_FL_LOCAL))
- continue;
-
- if (flags & LDLM_FL_TEST_LOCK) {
- LDLM_LOCK_GET(lock);
- ldlm_lock_touch_in_lru(lock);
- } else {
- ldlm_lock_addref_internal_nolock(lock, match);
- }
- *mode = match;
- return lock;
- }
-
- return NULL;
-}
-
-void ldlm_lock_fail_match_locked(struct ldlm_lock *lock)
-{
- if ((lock->l_flags & LDLM_FL_FAIL_NOTIFIED) == 0) {
- lock->l_flags |= LDLM_FL_FAIL_NOTIFIED;
- wake_up_all(&lock->l_waitq);
- }
-}
-EXPORT_SYMBOL(ldlm_lock_fail_match_locked);
-
-void ldlm_lock_fail_match(struct ldlm_lock *lock)
-{
- lock_res_and_lock(lock);
- ldlm_lock_fail_match_locked(lock);
- unlock_res_and_lock(lock);
-}
-EXPORT_SYMBOL(ldlm_lock_fail_match);
-
-/**
- * Mark lock as "matchable" by OST.
- *
- * Used to prevent certain races in LOV/OSC where the lock is granted, but LVB
- * is not yet valid.
- * Assumes LDLM lock is already locked.
- */
-void ldlm_lock_allow_match_locked(struct ldlm_lock *lock)
-{
- lock->l_flags |= LDLM_FL_LVB_READY;
- wake_up_all(&lock->l_waitq);
-}
-EXPORT_SYMBOL(ldlm_lock_allow_match_locked);
-
-/**
- * Mark lock as "matchable" by OST.
- * Locks the lock and then \see ldlm_lock_allow_match_locked
- */
-void ldlm_lock_allow_match(struct ldlm_lock *lock)
-{
- lock_res_and_lock(lock);
- ldlm_lock_allow_match_locked(lock);
- unlock_res_and_lock(lock);
-}
-EXPORT_SYMBOL(ldlm_lock_allow_match);
-
-/**
- * Attempt to find a lock with specified properties.
- *
- * Typically returns a reference to matched lock unless LDLM_FL_TEST_LOCK is
- * set in \a flags
- *
- * Can be called in two ways:
- *
- * If 'ns' is NULL, then lockh describes an existing lock that we want to look
- * for a duplicate of.
- *
- * Otherwise, all of the fields must be filled in, to match against.
- *
- * If 'flags' contains LDLM_FL_LOCAL_ONLY, then only match local locks on the
- * server (ie, connh is NULL)
- * If 'flags' contains LDLM_FL_BLOCK_GRANTED, then only locks on the granted
- * list will be considered
- * If 'flags' contains LDLM_FL_CBPENDING, then locks that have been marked
- * to be canceled can still be matched as long as they still have reader
- * or writer referneces
- * If 'flags' contains LDLM_FL_TEST_LOCK, then don't actually reference a lock,
- * just tell us if we would have matched.
- *
- * \retval 1 if it finds an already-existing lock that is compatible; in this
- * case, lockh is filled in with a addref()ed lock
- *
- * We also check security context, and if that fails we simply return 0 (to
- * keep caller code unchanged), the context failure will be discovered by
- * caller sometime later.
- */
-ldlm_mode_t ldlm_lock_match(struct ldlm_namespace *ns, __u64 flags,
- const struct ldlm_res_id *res_id, ldlm_type_t type,
- ldlm_policy_data_t *policy, ldlm_mode_t mode,
- struct lustre_handle *lockh, int unref)
-{
- struct ldlm_resource *res;
- struct ldlm_lock *lock, *old_lock = NULL;
- int rc = 0;
-
- if (ns == NULL) {
- old_lock = ldlm_handle2lock(lockh);
- LASSERT(old_lock);
-
- ns = ldlm_lock_to_ns(old_lock);
- res_id = &old_lock->l_resource->lr_name;
- type = old_lock->l_resource->lr_type;
- mode = old_lock->l_req_mode;
- }
-
- res = ldlm_resource_get(ns, NULL, res_id, type, 0);
- if (res == NULL) {
- LASSERT(old_lock == NULL);
- return 0;
- }
-
- LDLM_RESOURCE_ADDREF(res);
- lock_res(res);
-
- lock = search_queue(&res->lr_granted, &mode, policy, old_lock,
- flags, unref);
- if (lock != NULL) {
- rc = 1;
- goto out;
- }
- if (flags & LDLM_FL_BLOCK_GRANTED) {
- rc = 0;
- goto out;
- }
- lock = search_queue(&res->lr_converting, &mode, policy, old_lock,
- flags, unref);
- if (lock != NULL) {
- rc = 1;
- goto out;
- }
- lock = search_queue(&res->lr_waiting, &mode, policy, old_lock,
- flags, unref);
- if (lock != NULL) {
- rc = 1;
- goto out;
- }
-
- out:
- unlock_res(res);
- LDLM_RESOURCE_DELREF(res);
- ldlm_resource_putref(res);
-
- if (lock) {
- ldlm_lock2handle(lock, lockh);
- if ((flags & LDLM_FL_LVB_READY) &&
- (!(lock->l_flags & LDLM_FL_LVB_READY))) {
- __u64 wait_flags = LDLM_FL_LVB_READY |
- LDLM_FL_DESTROYED | LDLM_FL_FAIL_NOTIFIED;
- struct l_wait_info lwi;
-
- if (lock->l_completion_ast) {
- int err = lock->l_completion_ast(lock,
- LDLM_FL_WAIT_NOREPROC,
- NULL);
- if (err) {
- if (flags & LDLM_FL_TEST_LOCK)
- LDLM_LOCK_RELEASE(lock);
- else
- ldlm_lock_decref_internal(lock,
- mode);
- rc = 0;
- goto out2;
- }
- }
-
- lwi = LWI_TIMEOUT_INTR(cfs_time_seconds(obd_timeout),
- NULL, LWI_ON_SIGNAL_NOOP, NULL);
-
- /* XXX FIXME see comment on CAN_MATCH in lustre_dlm.h */
- l_wait_event(lock->l_waitq,
- lock->l_flags & wait_flags,
- &lwi);
- if (!(lock->l_flags & LDLM_FL_LVB_READY)) {
- if (flags & LDLM_FL_TEST_LOCK)
- LDLM_LOCK_RELEASE(lock);
- else
- ldlm_lock_decref_internal(lock, mode);
- rc = 0;
- }
- }
- }
- out2:
- if (rc) {
- LDLM_DEBUG(lock, "matched (%llu %llu)",
- (type == LDLM_PLAIN || type == LDLM_IBITS) ?
- res_id->name[2] : policy->l_extent.start,
- (type == LDLM_PLAIN || type == LDLM_IBITS) ?
- res_id->name[3] : policy->l_extent.end);
-
- /* check user's security context */
- if (lock->l_conn_export &&
- sptlrpc_import_check_ctx(
- class_exp2cliimp(lock->l_conn_export))) {
- if (!(flags & LDLM_FL_TEST_LOCK))
- ldlm_lock_decref_internal(lock, mode);
- rc = 0;
- }
-
- if (flags & LDLM_FL_TEST_LOCK)
- LDLM_LOCK_RELEASE(lock);
-
- } else if (!(flags & LDLM_FL_TEST_LOCK)) {/*less verbose for test-only*/
- LDLM_DEBUG_NOLOCK("not matched ns %p type %u mode %u res %llu/%llu (%llu %llu)",
- ns, type, mode, res_id->name[0],
- res_id->name[1],
- (type == LDLM_PLAIN || type == LDLM_IBITS) ?
- res_id->name[2] : policy->l_extent.start,
- (type == LDLM_PLAIN || type == LDLM_IBITS) ?
- res_id->name[3] : policy->l_extent.end);
- }
- if (old_lock)
- LDLM_LOCK_PUT(old_lock);
-
- return rc ? mode : 0;
-}
-EXPORT_SYMBOL(ldlm_lock_match);
-
-ldlm_mode_t ldlm_revalidate_lock_handle(struct lustre_handle *lockh,
- __u64 *bits)
-{
- struct ldlm_lock *lock;
- ldlm_mode_t mode = 0;
-
- lock = ldlm_handle2lock(lockh);
- if (lock != NULL) {
- lock_res_and_lock(lock);
- if (lock->l_flags & LDLM_FL_GONE_MASK)
- goto out;
-
- if (lock->l_flags & LDLM_FL_CBPENDING &&
- lock->l_readers == 0 && lock->l_writers == 0)
- goto out;
-
- if (bits)
- *bits = lock->l_policy_data.l_inodebits.bits;
- mode = lock->l_granted_mode;
- ldlm_lock_addref_internal_nolock(lock, mode);
- }
-
-out:
- if (lock != NULL) {
- unlock_res_and_lock(lock);
- LDLM_LOCK_PUT(lock);
- }
- return mode;
-}
-EXPORT_SYMBOL(ldlm_revalidate_lock_handle);
-
-/** The caller must guarantee that the buffer is large enough. */
-int ldlm_fill_lvb(struct ldlm_lock *lock, struct req_capsule *pill,
- enum req_location loc, void *data, int size)
-{
- void *lvb;
-
- LASSERT(data != NULL);
- LASSERT(size >= 0);
-
- switch (lock->l_lvb_type) {
- case LVB_T_OST:
- if (size == sizeof(struct ost_lvb)) {
- if (loc == RCL_CLIENT)
- lvb = req_capsule_client_swab_get(pill,
- &RMF_DLM_LVB,
- lustre_swab_ost_lvb);
- else
- lvb = req_capsule_server_swab_get(pill,
- &RMF_DLM_LVB,
- lustre_swab_ost_lvb);
- if (unlikely(lvb == NULL)) {
- LDLM_ERROR(lock, "no LVB");
- return -EPROTO;
- }
-
- memcpy(data, lvb, size);
- } else if (size == sizeof(struct ost_lvb_v1)) {
- struct ost_lvb *olvb = data;
-
- if (loc == RCL_CLIENT)
- lvb = req_capsule_client_swab_get(pill,
- &RMF_DLM_LVB,
- lustre_swab_ost_lvb_v1);
- else
- lvb = req_capsule_server_sized_swab_get(pill,
- &RMF_DLM_LVB, size,
- lustre_swab_ost_lvb_v1);
- if (unlikely(lvb == NULL)) {
- LDLM_ERROR(lock, "no LVB");
- return -EPROTO;
- }
-
- memcpy(data, lvb, size);
- olvb->lvb_mtime_ns = 0;
- olvb->lvb_atime_ns = 0;
- olvb->lvb_ctime_ns = 0;
- } else {
- LDLM_ERROR(lock, "Replied unexpected ost LVB size %d",
- size);
- return -EINVAL;
- }
- break;
- case LVB_T_LQUOTA:
- if (size == sizeof(struct lquota_lvb)) {
- if (loc == RCL_CLIENT)
- lvb = req_capsule_client_swab_get(pill,
- &RMF_DLM_LVB,
- lustre_swab_lquota_lvb);
- else
- lvb = req_capsule_server_swab_get(pill,
- &RMF_DLM_LVB,
- lustre_swab_lquota_lvb);
- if (unlikely(lvb == NULL)) {
- LDLM_ERROR(lock, "no LVB");
- return -EPROTO;
- }
-
- memcpy(data, lvb, size);
- } else {
- LDLM_ERROR(lock,
- "Replied unexpected lquota LVB size %d",
- size);
- return -EINVAL;
- }
- break;
- case LVB_T_LAYOUT:
- if (size == 0)
- break;
-
- if (loc == RCL_CLIENT)
- lvb = req_capsule_client_get(pill, &RMF_DLM_LVB);
- else
- lvb = req_capsule_server_get(pill, &RMF_DLM_LVB);
- if (unlikely(lvb == NULL)) {
- LDLM_ERROR(lock, "no LVB");
- return -EPROTO;
- }
-
- memcpy(data, lvb, size);
- break;
- default:
- LDLM_ERROR(lock, "Unknown LVB type: %d\n", lock->l_lvb_type);
- dump_stack();
- return -EINVAL;
- }
-
- return 0;
-}
-
-/**
- * Create and fill in new LDLM lock with specified properties.
- * Returns a referenced lock
- */
-struct ldlm_lock *ldlm_lock_create(struct ldlm_namespace *ns,
- const struct ldlm_res_id *res_id,
- ldlm_type_t type,
- ldlm_mode_t mode,
- const struct ldlm_callback_suite *cbs,
- void *data, __u32 lvb_len,
- enum lvb_type lvb_type)
-{
- struct ldlm_lock *lock;
- struct ldlm_resource *res;
-
- res = ldlm_resource_get(ns, NULL, res_id, type, 1);
- if (res == NULL)
- return NULL;
-
- lock = ldlm_lock_new(res);
-
- if (lock == NULL)
- return NULL;
-
- lock->l_req_mode = mode;
- lock->l_ast_data = data;
- lock->l_pid = current_pid();
- if (ns_is_server(ns))
- lock->l_flags |= LDLM_FL_NS_SRV;
- if (cbs) {
- lock->l_blocking_ast = cbs->lcs_blocking;
- lock->l_completion_ast = cbs->lcs_completion;
- lock->l_glimpse_ast = cbs->lcs_glimpse;
- }
-
- lock->l_tree_node = NULL;
- /* if this is the extent lock, allocate the interval tree node */
- if (type == LDLM_EXTENT) {
- if (ldlm_interval_alloc(lock) == NULL)
- goto out;
- }
-
- if (lvb_len) {
- lock->l_lvb_len = lvb_len;
- lock->l_lvb_data = kzalloc(lvb_len, GFP_NOFS);
- if (!lock->l_lvb_data)
- goto out;
- }
-
- lock->l_lvb_type = lvb_type;
- if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_NEW_LOCK))
- goto out;
-
- return lock;
-
-out:
- ldlm_lock_destroy(lock);
- LDLM_LOCK_RELEASE(lock);
- return NULL;
-}
-
-/**
- * Enqueue (request) a lock.
- *
- * Does not block. As a result of enqueue the lock would be put
- * into granted or waiting list.
- *
- * If namespace has intent policy sent and the lock has LDLM_FL_HAS_INTENT flag
- * set, skip all the enqueueing and delegate lock processing to intent policy
- * function.
- */
-ldlm_error_t ldlm_lock_enqueue(struct ldlm_namespace *ns,
- struct ldlm_lock **lockp,
- void *cookie, __u64 *flags)
-{
- struct ldlm_lock *lock = *lockp;
- struct ldlm_resource *res = lock->l_resource;
- int local = ns_is_client(ldlm_res_to_ns(res));
- ldlm_error_t rc = ELDLM_OK;
- struct ldlm_interval *node = NULL;
-
- lock->l_last_activity = ktime_get_real_seconds();
- /* policies are not executed on the client or during replay */
- if ((*flags & (LDLM_FL_HAS_INTENT|LDLM_FL_REPLAY)) == LDLM_FL_HAS_INTENT
- && !local && ns->ns_policy) {
- rc = ns->ns_policy(ns, lockp, cookie, lock->l_req_mode, *flags,
- NULL);
- if (rc == ELDLM_LOCK_REPLACED) {
- /* The lock that was returned has already been granted,
- * and placed into lockp. If it's not the same as the
- * one we passed in, then destroy the old one and our
- * work here is done. */
- if (lock != *lockp) {
- ldlm_lock_destroy(lock);
- LDLM_LOCK_RELEASE(lock);
- }
- *flags |= LDLM_FL_LOCK_CHANGED;
- return 0;
- } else if (rc != ELDLM_OK ||
- (rc == ELDLM_OK && (*flags & LDLM_FL_INTENT_ONLY))) {
- ldlm_lock_destroy(lock);
- return rc;
- }
- }
-
- /* For a replaying lock, it might be already in granted list. So
- * unlinking the lock will cause the interval node to be freed, we
- * have to allocate the interval node early otherwise we can't regrant
- * this lock in the future. - jay */
- if (!local && (*flags & LDLM_FL_REPLAY) && res->lr_type == LDLM_EXTENT)
- OBD_SLAB_ALLOC_PTR_GFP(node, ldlm_interval_slab, GFP_NOFS);
-
- lock_res_and_lock(lock);
- if (local && lock->l_req_mode == lock->l_granted_mode) {
- /* The server returned a blocked lock, but it was granted
- * before we got a chance to actually enqueue it. We don't
- * need to do anything else. */
- *flags &= ~(LDLM_FL_BLOCK_GRANTED |
- LDLM_FL_BLOCK_CONV | LDLM_FL_BLOCK_WAIT);
- goto out;
- }
-
- ldlm_resource_unlink_lock(lock);
- if (res->lr_type == LDLM_EXTENT && lock->l_tree_node == NULL) {
- if (node == NULL) {
- ldlm_lock_destroy_nolock(lock);
- rc = -ENOMEM;
- goto out;
- }
-
- INIT_LIST_HEAD(&node->li_group);
- ldlm_interval_attach(node, lock);
- node = NULL;
- }
-
- /* Some flags from the enqueue want to make it into the AST, via the
- * lock's l_flags. */
- lock->l_flags |= *flags & LDLM_FL_AST_DISCARD_DATA;
-
- /* This distinction between local lock trees is very important; a client
- * namespace only has information about locks taken by that client, and
- * thus doesn't have enough information to decide for itself if it can
- * be granted (below). In this case, we do exactly what the server
- * tells us to do, as dictated by the 'flags'.
- *
- * We do exactly the same thing during recovery, when the server is
- * more or less trusting the clients not to lie.
- *
- * FIXME (bug 268): Detect obvious lies by checking compatibility in
- * granted/converting queues. */
- if (local) {
- if (*flags & LDLM_FL_BLOCK_CONV)
- ldlm_resource_add_lock(res, &res->lr_converting, lock);
- else if (*flags & (LDLM_FL_BLOCK_WAIT | LDLM_FL_BLOCK_GRANTED))
- ldlm_resource_add_lock(res, &res->lr_waiting, lock);
- else
- ldlm_grant_lock(lock, NULL);
- goto out;
- } else {
- CERROR("This is client-side-only module, cannot handle LDLM_NAMESPACE_SERVER resource type lock.\n");
- LBUG();
- }
-
-out:
- unlock_res_and_lock(lock);
- if (node)
- OBD_SLAB_FREE(node, ldlm_interval_slab, sizeof(*node));
- return rc;
-}
-
-
-/**
- * Process a call to blocking AST callback for a lock in ast_work list
- */
-static int
-ldlm_work_bl_ast_lock(struct ptlrpc_request_set *rqset, void *opaq)
-{
- struct ldlm_cb_set_arg *arg = opaq;
- struct ldlm_lock_desc d;
- int rc;
- struct ldlm_lock *lock;
-
- if (list_empty(arg->list))
- return -ENOENT;
-
- lock = list_entry(arg->list->next, struct ldlm_lock, l_bl_ast);
-
- /* nobody should touch l_bl_ast */
- lock_res_and_lock(lock);
- list_del_init(&lock->l_bl_ast);
-
- LASSERT(lock->l_flags & LDLM_FL_AST_SENT);
- LASSERT(lock->l_bl_ast_run == 0);
- LASSERT(lock->l_blocking_lock);
- lock->l_bl_ast_run++;
- unlock_res_and_lock(lock);
-
- ldlm_lock2desc(lock->l_blocking_lock, &d);
-
- rc = lock->l_blocking_ast(lock, &d, (void *)arg, LDLM_CB_BLOCKING);
- LDLM_LOCK_RELEASE(lock->l_blocking_lock);
- lock->l_blocking_lock = NULL;
- LDLM_LOCK_RELEASE(lock);
-
- return rc;
-}
-
-/**
- * Process a call to completion AST callback for a lock in ast_work list
- */
-static int
-ldlm_work_cp_ast_lock(struct ptlrpc_request_set *rqset, void *opaq)
-{
- struct ldlm_cb_set_arg *arg = opaq;
- int rc = 0;
- struct ldlm_lock *lock;
- ldlm_completion_callback completion_callback;
-
- if (list_empty(arg->list))
- return -ENOENT;
-
- lock = list_entry(arg->list->next, struct ldlm_lock, l_cp_ast);
-
- /* It's possible to receive a completion AST before we've set
- * the l_completion_ast pointer: either because the AST arrived
- * before the reply, or simply because there's a small race
- * window between receiving the reply and finishing the local
- * enqueue. (bug 842)
- *
- * This can't happen with the blocking_ast, however, because we
- * will never call the local blocking_ast until we drop our
- * reader/writer reference, which we won't do until we get the
- * reply and finish enqueueing. */
-
- /* nobody should touch l_cp_ast */
- lock_res_and_lock(lock);
- list_del_init(&lock->l_cp_ast);
- LASSERT(lock->l_flags & LDLM_FL_CP_REQD);
- /* save l_completion_ast since it can be changed by
- * mds_intent_policy(), see bug 14225 */
- completion_callback = lock->l_completion_ast;
- lock->l_flags &= ~LDLM_FL_CP_REQD;
- unlock_res_and_lock(lock);
-
- if (completion_callback != NULL)
- rc = completion_callback(lock, 0, (void *)arg);
- LDLM_LOCK_RELEASE(lock);
-
- return rc;
-}
-
-/**
- * Process a call to revocation AST callback for a lock in ast_work list
- */
-static int
-ldlm_work_revoke_ast_lock(struct ptlrpc_request_set *rqset, void *opaq)
-{
- struct ldlm_cb_set_arg *arg = opaq;
- struct ldlm_lock_desc desc;
- int rc;
- struct ldlm_lock *lock;
-
- if (list_empty(arg->list))
- return -ENOENT;
-
- lock = list_entry(arg->list->next, struct ldlm_lock, l_rk_ast);
- list_del_init(&lock->l_rk_ast);
-
- /* the desc just pretend to exclusive */
- ldlm_lock2desc(lock, &desc);
- desc.l_req_mode = LCK_EX;
- desc.l_granted_mode = 0;
-
- rc = lock->l_blocking_ast(lock, &desc, (void *)arg, LDLM_CB_BLOCKING);
- LDLM_LOCK_RELEASE(lock);
-
- return rc;
-}
-
-/**
- * Process a call to glimpse AST callback for a lock in ast_work list
- */
-int ldlm_work_gl_ast_lock(struct ptlrpc_request_set *rqset, void *opaq)
-{
- struct ldlm_cb_set_arg *arg = opaq;
- struct ldlm_glimpse_work *gl_work;
- struct ldlm_lock *lock;
- int rc = 0;
-
- if (list_empty(arg->list))
- return -ENOENT;
-
- gl_work = list_entry(arg->list->next, struct ldlm_glimpse_work,
- gl_list);
- list_del_init(&gl_work->gl_list);
-
- lock = gl_work->gl_lock;
-
- /* transfer the glimpse descriptor to ldlm_cb_set_arg */
- arg->gl_desc = gl_work->gl_desc;
-
- /* invoke the actual glimpse callback */
- if (lock->l_glimpse_ast(lock, (void *)arg) == 0)
- rc = 1;
-
- LDLM_LOCK_RELEASE(lock);
-
- if ((gl_work->gl_flags & LDLM_GL_WORK_NOFREE) == 0)
- kfree(gl_work);
-
- return rc;
-}
-
-/**
- * Process list of locks in need of ASTs being sent.
- *
- * Used on server to send multiple ASTs together instead of sending one by
- * one.
- */
-int ldlm_run_ast_work(struct ldlm_namespace *ns, struct list_head *rpc_list,
- enum ldlm_desc_ast_t ast_type)
-{
- struct ldlm_cb_set_arg *arg;
- set_producer_func work_ast_lock;
- int rc;
-
- if (list_empty(rpc_list))
- return 0;
-
- arg = kzalloc(sizeof(*arg), GFP_NOFS);
- if (!arg)
- return -ENOMEM;
-
- atomic_set(&arg->restart, 0);
- arg->list = rpc_list;
-
- switch (ast_type) {
- case LDLM_WORK_BL_AST:
- arg->type = LDLM_BL_CALLBACK;
- work_ast_lock = ldlm_work_bl_ast_lock;
- break;
- case LDLM_WORK_CP_AST:
- arg->type = LDLM_CP_CALLBACK;
- work_ast_lock = ldlm_work_cp_ast_lock;
- break;
- case LDLM_WORK_REVOKE_AST:
- arg->type = LDLM_BL_CALLBACK;
- work_ast_lock = ldlm_work_revoke_ast_lock;
- break;
- case LDLM_WORK_GL_AST:
- arg->type = LDLM_GL_CALLBACK;
- work_ast_lock = ldlm_work_gl_ast_lock;
- break;
- default:
- LBUG();
- }
-
- /* We create a ptlrpc request set with flow control extension.
- * This request set will use the work_ast_lock function to produce new
- * requests and will send a new request each time one completes in order
- * to keep the number of requests in flight to ns_max_parallel_ast */
- arg->set = ptlrpc_prep_fcset(ns->ns_max_parallel_ast ? : UINT_MAX,
- work_ast_lock, arg);
- if (arg->set == NULL) {
- rc = -ENOMEM;
- goto out;
- }
-
- ptlrpc_set_wait(arg->set);
- ptlrpc_set_destroy(arg->set);
-
- rc = atomic_read(&arg->restart) ? -ERESTART : 0;
- goto out;
-out:
- kfree(arg);
- return rc;
-}
-
-static int reprocess_one_queue(struct ldlm_resource *res, void *closure)
-{
- ldlm_reprocess_all(res);
- return LDLM_ITER_CONTINUE;
-}
-
-static int ldlm_reprocess_res(struct cfs_hash *hs, struct cfs_hash_bd *bd,
- struct hlist_node *hnode, void *arg)
-{
- struct ldlm_resource *res = cfs_hash_object(hs, hnode);
- int rc;
-
- rc = reprocess_one_queue(res, arg);
-
- return rc == LDLM_ITER_STOP;
-}
-
-/**
- * Iterate through all resources on a namespace attempting to grant waiting
- * locks.
- */
-void ldlm_reprocess_all_ns(struct ldlm_namespace *ns)
-{
- if (ns != NULL) {
- cfs_hash_for_each_nolock(ns->ns_rs_hash,
- ldlm_reprocess_res, NULL);
- }
-}
-EXPORT_SYMBOL(ldlm_reprocess_all_ns);
-
-/**
- * Try to grant all waiting locks on a resource.
- *
- * Calls ldlm_reprocess_queue on converting and waiting queues.
- *
- * Typically called after some resource locks are cancelled to see
- * if anything could be granted as a result of the cancellation.
- */
-void ldlm_reprocess_all(struct ldlm_resource *res)
-{
- LIST_HEAD(rpc_list);
-
- if (!ns_is_client(ldlm_res_to_ns(res))) {
- CERROR("This is client-side-only module, cannot handle LDLM_NAMESPACE_SERVER resource type lock.\n");
- LBUG();
- }
-}
-
-/**
- * Helper function to call blocking AST for LDLM lock \a lock in a
- * "cancelling" mode.
- */
-void ldlm_cancel_callback(struct ldlm_lock *lock)
-{
- check_res_locked(lock->l_resource);
- if (!(lock->l_flags & LDLM_FL_CANCEL)) {
- lock->l_flags |= LDLM_FL_CANCEL;
- if (lock->l_blocking_ast) {
- unlock_res_and_lock(lock);
- lock->l_blocking_ast(lock, NULL, lock->l_ast_data,
- LDLM_CB_CANCELING);
- lock_res_and_lock(lock);
- } else {
- LDLM_DEBUG(lock, "no blocking ast");
- }
- }
- lock->l_flags |= LDLM_FL_BL_DONE;
-}
-
-/**
- * Remove skiplist-enabled LDLM lock \a req from granted list
- */
-void ldlm_unlink_lock_skiplist(struct ldlm_lock *req)
-{
- if (req->l_resource->lr_type != LDLM_PLAIN &&
- req->l_resource->lr_type != LDLM_IBITS)
- return;
-
- list_del_init(&req->l_sl_policy);
- list_del_init(&req->l_sl_mode);
-}
-
-/**
- * Attempts to cancel LDLM lock \a lock that has no reader/writer references.
- */
-void ldlm_lock_cancel(struct ldlm_lock *lock)
-{
- struct ldlm_resource *res;
- struct ldlm_namespace *ns;
-
- lock_res_and_lock(lock);
-
- res = lock->l_resource;
- ns = ldlm_res_to_ns(res);
-
- /* Please do not, no matter how tempting, remove this LBUG without
- * talking to me first. -phik */
- if (lock->l_readers || lock->l_writers) {
- LDLM_ERROR(lock, "lock still has references");
- LBUG();
- }
-
- if (lock->l_flags & LDLM_FL_WAITED)
- ldlm_del_waiting_lock(lock);
-
- /* Releases cancel callback. */
- ldlm_cancel_callback(lock);
-
- /* Yes, second time, just in case it was added again while we were
- * running with no res lock in ldlm_cancel_callback */
- if (lock->l_flags & LDLM_FL_WAITED)
- ldlm_del_waiting_lock(lock);
-
- ldlm_resource_unlink_lock(lock);
- ldlm_lock_destroy_nolock(lock);
-
- if (lock->l_granted_mode == lock->l_req_mode)
- ldlm_pool_del(&ns->ns_pool, lock);
-
- /* Make sure we will not be called again for same lock what is possible
- * if not to zero out lock->l_granted_mode */
- lock->l_granted_mode = LCK_MINMODE;
- unlock_res_and_lock(lock);
-}
-EXPORT_SYMBOL(ldlm_lock_cancel);
-
-/**
- * Set opaque data into the lock that only makes sense to upper layer.
- */
-int ldlm_lock_set_data(struct lustre_handle *lockh, void *data)
-{
- struct ldlm_lock *lock = ldlm_handle2lock(lockh);
- int rc = -EINVAL;
-
- if (lock) {
- if (lock->l_ast_data == NULL)
- lock->l_ast_data = data;
- if (lock->l_ast_data == data)
- rc = 0;
- LDLM_LOCK_PUT(lock);
- }
- return rc;
-}
-EXPORT_SYMBOL(ldlm_lock_set_data);
-
-struct export_cl_data {
- struct obd_export *ecl_exp;
- int ecl_loop;
-};
-
-/**
- * Iterator function for ldlm_cancel_locks_for_export.
- * Cancels passed locks.
- */
-int ldlm_cancel_locks_for_export_cb(struct cfs_hash *hs, struct cfs_hash_bd *bd,
- struct hlist_node *hnode, void *data)
-
-{
- struct export_cl_data *ecl = (struct export_cl_data *)data;
- struct obd_export *exp = ecl->ecl_exp;
- struct ldlm_lock *lock = cfs_hash_object(hs, hnode);
- struct ldlm_resource *res;
-
- res = ldlm_resource_getref(lock->l_resource);
- LDLM_LOCK_GET(lock);
-
- LDLM_DEBUG(lock, "export %p", exp);
- ldlm_res_lvbo_update(res, NULL, 1);
- ldlm_lock_cancel(lock);
- ldlm_reprocess_all(res);
- ldlm_resource_putref(res);
- LDLM_LOCK_RELEASE(lock);
-
- ecl->ecl_loop++;
- if ((ecl->ecl_loop & -ecl->ecl_loop) == ecl->ecl_loop) {
- CDEBUG(D_INFO,
- "Cancel lock %p for export %p (loop %d), still have %d locks left on hash table.\n",
- lock, exp, ecl->ecl_loop,
- atomic_read(&hs->hs_count));
- }
-
- return 0;
-}
-
-/**
- * Cancel all locks for given export.
- *
- * Typically called on client disconnection/eviction
- */
-void ldlm_cancel_locks_for_export(struct obd_export *exp)
-{
- struct export_cl_data ecl = {
- .ecl_exp = exp,
- .ecl_loop = 0,
- };
-
- cfs_hash_for_each_empty(exp->exp_lock_hash,
- ldlm_cancel_locks_for_export_cb, &ecl);
-}
-
-/**
- * Downgrade an exclusive lock.
- *
- * A fast variant of ldlm_lock_convert for conversion of exclusive
- * locks. The conversion is always successful.
- * Used by Commit on Sharing (COS) code.
- *
- * \param lock A lock to convert
- * \param new_mode new lock mode
- */
-void ldlm_lock_downgrade(struct ldlm_lock *lock, int new_mode)
-{
- LASSERT(lock->l_granted_mode & (LCK_PW | LCK_EX));
- LASSERT(new_mode == LCK_COS);
-
- lock_res_and_lock(lock);
- ldlm_resource_unlink_lock(lock);
- /*
- * Remove the lock from pool as it will be added again in
- * ldlm_grant_lock() called below.
- */
- ldlm_pool_del(&ldlm_lock_to_ns(lock)->ns_pool, lock);
-
- lock->l_req_mode = new_mode;
- ldlm_grant_lock(lock, NULL);
- unlock_res_and_lock(lock);
- ldlm_reprocess_all(lock->l_resource);
-}
-EXPORT_SYMBOL(ldlm_lock_downgrade);
-
-/**
- * Attempt to convert already granted lock to a different mode.
- *
- * While lock conversion is not currently used, future client-side
- * optimizations could take advantage of it to avoid discarding cached
- * pages on a file.
- */
-struct ldlm_resource *ldlm_lock_convert(struct ldlm_lock *lock, int new_mode,
- __u32 *flags)
-{
- LIST_HEAD(rpc_list);
- struct ldlm_resource *res;
- struct ldlm_namespace *ns;
- int granted = 0;
- struct ldlm_interval *node;
-
- /* Just return if mode is unchanged. */
- if (new_mode == lock->l_granted_mode) {
- *flags |= LDLM_FL_BLOCK_GRANTED;
- return lock->l_resource;
- }
-
- /* I can't check the type of lock here because the bitlock of lock
- * is not held here, so do the allocation blindly. -jay */
- OBD_SLAB_ALLOC_PTR_GFP(node, ldlm_interval_slab, GFP_NOFS);
- if (node == NULL)
- /* Actually, this causes EDEADLOCK to be returned */
- return NULL;
-
- LASSERTF((new_mode == LCK_PW && lock->l_granted_mode == LCK_PR),
- "new_mode %u, granted %u\n", new_mode, lock->l_granted_mode);
-
- lock_res_and_lock(lock);
-
- res = lock->l_resource;
- ns = ldlm_res_to_ns(res);
-
- lock->l_req_mode = new_mode;
- if (res->lr_type == LDLM_PLAIN || res->lr_type == LDLM_IBITS) {
- ldlm_resource_unlink_lock(lock);
- } else {
- ldlm_resource_unlink_lock(lock);
- if (res->lr_type == LDLM_EXTENT) {
- /* FIXME: ugly code, I have to attach the lock to a
- * interval node again since perhaps it will be granted
- * soon */
- INIT_LIST_HEAD(&node->li_group);
- ldlm_interval_attach(node, lock);
- node = NULL;
- }
- }
-
- /*
- * Remove old lock from the pool before adding the lock with new
- * mode below in ->policy()
- */
- ldlm_pool_del(&ns->ns_pool, lock);
-
- /* If this is a local resource, put it on the appropriate list. */
- if (ns_is_client(ldlm_res_to_ns(res))) {
- if (*flags & (LDLM_FL_BLOCK_CONV | LDLM_FL_BLOCK_GRANTED)) {
- ldlm_resource_add_lock(res, &res->lr_converting, lock);
- } else {
- /* This should never happen, because of the way the
- * server handles conversions. */
- LDLM_ERROR(lock, "Erroneous flags %x on local lock\n",
- *flags);
- LBUG();
-
- ldlm_grant_lock(lock, &rpc_list);
- granted = 1;
- /* FIXME: completion handling not with lr_lock held ! */
- if (lock->l_completion_ast)
- lock->l_completion_ast(lock, 0, NULL);
- }
- } else {
- CERROR("This is client-side-only module, cannot handle LDLM_NAMESPACE_SERVER resource type lock.\n");
- LBUG();
- }
- unlock_res_and_lock(lock);
-
- if (granted)
- ldlm_run_ast_work(ns, &rpc_list, LDLM_WORK_CP_AST);
- if (node)
- OBD_SLAB_FREE(node, ldlm_interval_slab, sizeof(*node));
- return res;
-}
-EXPORT_SYMBOL(ldlm_lock_convert);
-
-/**
- * Print lock with lock handle \a lockh description into debug log.
- *
- * Used when printing all locks on a resource for debug purposes.
- */
-void ldlm_lock_dump_handle(int level, struct lustre_handle *lockh)
-{
- struct ldlm_lock *lock;
-
- if (!((libcfs_debug | D_ERROR) & level))
- return;
-
- lock = ldlm_handle2lock(lockh);
- if (lock == NULL)
- return;
-
- LDLM_DEBUG_LIMIT(level, lock, "###");
-
- LDLM_LOCK_PUT(lock);
-}
-EXPORT_SYMBOL(ldlm_lock_dump_handle);
-
-/**
- * Print lock information with custom message into debug log.
- * Helper function.
- */
-void _ldlm_lock_debug(struct ldlm_lock *lock,
- struct libcfs_debug_msg_data *msgdata,
- const char *fmt, ...)
-{
- va_list args;
- struct obd_export *exp = lock->l_export;
- struct ldlm_resource *resource = lock->l_resource;
- char *nid = "local";
-
- va_start(args, fmt);
-
- if (exp && exp->exp_connection) {
- nid = libcfs_nid2str(exp->exp_connection->c_peer.nid);
- } else if (exp && exp->exp_obd != NULL) {
- struct obd_import *imp = exp->exp_obd->u.cli.cl_import;
-
- nid = libcfs_nid2str(imp->imp_connection->c_peer.nid);
- }
-
- if (resource == NULL) {
- libcfs_debug_vmsg2(msgdata, fmt, args,
- " ns: \?\? lock: %p/%#llx lrc: %d/%d,%d mode: %s/%s res: \?\? rrc=\?\? type: \?\?\? flags: %#llx nid: %s remote: %#llx expref: %d pid: %u timeout: %lu lvb_type: %d\n",
- lock,
- lock->l_handle.h_cookie, atomic_read(&lock->l_refc),
- lock->l_readers, lock->l_writers,
- ldlm_lockname[lock->l_granted_mode],
- ldlm_lockname[lock->l_req_mode],
- lock->l_flags, nid, lock->l_remote_handle.cookie,
- exp ? atomic_read(&exp->exp_refcount) : -99,
- lock->l_pid, lock->l_callback_timeout, lock->l_lvb_type);
- va_end(args);
- return;
- }
-
- switch (resource->lr_type) {
- case LDLM_EXTENT:
- libcfs_debug_vmsg2(msgdata, fmt, args,
- " ns: %s lock: %p/%#llx lrc: %d/%d,%d mode: %s/%s res: " DLDLMRES " rrc: %d type: %s [%llu->%llu] (req %llu->%llu) flags: %#llx nid: %s remote: %#llx expref: %d pid: %u timeout: %lu lvb_type: %d\n",
- ldlm_lock_to_ns_name(lock), lock,
- lock->l_handle.h_cookie, atomic_read(&lock->l_refc),
- lock->l_readers, lock->l_writers,
- ldlm_lockname[lock->l_granted_mode],
- ldlm_lockname[lock->l_req_mode],
- PLDLMRES(resource),
- atomic_read(&resource->lr_refcount),
- ldlm_typename[resource->lr_type],
- lock->l_policy_data.l_extent.start,
- lock->l_policy_data.l_extent.end,
- lock->l_req_extent.start, lock->l_req_extent.end,
- lock->l_flags, nid, lock->l_remote_handle.cookie,
- exp ? atomic_read(&exp->exp_refcount) : -99,
- lock->l_pid, lock->l_callback_timeout,
- lock->l_lvb_type);
- break;
-
- case LDLM_FLOCK:
- libcfs_debug_vmsg2(msgdata, fmt, args,
- " ns: %s lock: %p/%#llx lrc: %d/%d,%d mode: %s/%s res: " DLDLMRES " rrc: %d type: %s pid: %d [%llu->%llu] flags: %#llx nid: %s remote: %#llx expref: %d pid: %u timeout: %lu\n",
- ldlm_lock_to_ns_name(lock), lock,
- lock->l_handle.h_cookie, atomic_read(&lock->l_refc),
- lock->l_readers, lock->l_writers,
- ldlm_lockname[lock->l_granted_mode],
- ldlm_lockname[lock->l_req_mode],
- PLDLMRES(resource),
- atomic_read(&resource->lr_refcount),
- ldlm_typename[resource->lr_type],
- lock->l_policy_data.l_flock.pid,
- lock->l_policy_data.l_flock.start,
- lock->l_policy_data.l_flock.end,
- lock->l_flags, nid, lock->l_remote_handle.cookie,
- exp ? atomic_read(&exp->exp_refcount) : -99,
- lock->l_pid, lock->l_callback_timeout);
- break;
-
- case LDLM_IBITS:
- libcfs_debug_vmsg2(msgdata, fmt, args,
- " ns: %s lock: %p/%#llx lrc: %d/%d,%d mode: %s/%s res: " DLDLMRES " bits %#llx rrc: %d type: %s flags: %#llx nid: %s remote: %#llx expref: %d pid: %u timeout: %lu lvb_type: %d\n",
- ldlm_lock_to_ns_name(lock),
- lock, lock->l_handle.h_cookie,
- atomic_read(&lock->l_refc),
- lock->l_readers, lock->l_writers,
- ldlm_lockname[lock->l_granted_mode],
- ldlm_lockname[lock->l_req_mode],
- PLDLMRES(resource),
- lock->l_policy_data.l_inodebits.bits,
- atomic_read(&resource->lr_refcount),
- ldlm_typename[resource->lr_type],
- lock->l_flags, nid, lock->l_remote_handle.cookie,
- exp ? atomic_read(&exp->exp_refcount) : -99,
- lock->l_pid, lock->l_callback_timeout,
- lock->l_lvb_type);
- break;
-
- default:
- libcfs_debug_vmsg2(msgdata, fmt, args,
- " ns: %s lock: %p/%#llx lrc: %d/%d,%d mode: %s/%s res: " DLDLMRES " rrc: %d type: %s flags: %#llx nid: %s remote: %#llx expref: %d pid: %u timeout: %lu lvb_type: %d\n",
- ldlm_lock_to_ns_name(lock),
- lock, lock->l_handle.h_cookie,
- atomic_read(&lock->l_refc),
- lock->l_readers, lock->l_writers,
- ldlm_lockname[lock->l_granted_mode],
- ldlm_lockname[lock->l_req_mode],
- PLDLMRES(resource),
- atomic_read(&resource->lr_refcount),
- ldlm_typename[resource->lr_type],
- lock->l_flags, nid, lock->l_remote_handle.cookie,
- exp ? atomic_read(&exp->exp_refcount) : -99,
- lock->l_pid, lock->l_callback_timeout,
- lock->l_lvb_type);
- break;
- }
- va_end(args);
-}
-EXPORT_SYMBOL(_ldlm_lock_debug);
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_lockd.c b/drivers/staging/lustre/lustre/ldlm/ldlm_lockd.c
deleted file mode 100644
index a78c4a4c9bd4..000000000000
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_lockd.c
+++ /dev/null
@@ -1,1245 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2010, 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * lustre/ldlm/ldlm_lockd.c
- *
- * Author: Peter Braam <braam@clusterfs.com>
- * Author: Phil Schwan <phil@clusterfs.com>
- */
-
-#define DEBUG_SUBSYSTEM S_LDLM
-
-#include "../../include/linux/libcfs/libcfs.h"
-#include "../include/lustre_dlm.h"
-#include "../include/obd_class.h"
-#include <linux/list.h>
-#include "ldlm_internal.h"
-
-static int ldlm_num_threads;
-module_param(ldlm_num_threads, int, 0444);
-MODULE_PARM_DESC(ldlm_num_threads, "number of DLM service threads to start");
-
-static char *ldlm_cpts;
-module_param(ldlm_cpts, charp, 0444);
-MODULE_PARM_DESC(ldlm_cpts, "CPU partitions ldlm threads should run on");
-
-static struct mutex ldlm_ref_mutex;
-static int ldlm_refcount;
-
-struct kobject *ldlm_kobj;
-struct kset *ldlm_ns_kset;
-struct kset *ldlm_svc_kset;
-
-struct ldlm_cb_async_args {
- struct ldlm_cb_set_arg *ca_set_arg;
- struct ldlm_lock *ca_lock;
-};
-
-/* LDLM state */
-
-static struct ldlm_state *ldlm_state;
-
-#define ELT_STOPPED 0
-#define ELT_READY 1
-#define ELT_TERMINATE 2
-
-struct ldlm_bl_pool {
- spinlock_t blp_lock;
-
- /*
- * blp_prio_list is used for callbacks that should be handled
- * as a priority. It is used for LDLM_FL_DISCARD_DATA requests.
- * see bug 13843
- */
- struct list_head blp_prio_list;
-
- /*
- * blp_list is used for all other callbacks which are likely
- * to take longer to process.
- */
- struct list_head blp_list;
-
- wait_queue_head_t blp_waitq;
- struct completion blp_comp;
- atomic_t blp_num_threads;
- atomic_t blp_busy_threads;
- int blp_min_threads;
- int blp_max_threads;
-};
-
-struct ldlm_bl_work_item {
- struct list_head blwi_entry;
- struct ldlm_namespace *blwi_ns;
- struct ldlm_lock_desc blwi_ld;
- struct ldlm_lock *blwi_lock;
- struct list_head blwi_head;
- int blwi_count;
- struct completion blwi_comp;
- ldlm_cancel_flags_t blwi_flags;
- int blwi_mem_pressure;
-};
-
-
-int ldlm_del_waiting_lock(struct ldlm_lock *lock)
-{
- return 0;
-}
-
-int ldlm_refresh_waiting_lock(struct ldlm_lock *lock, int timeout)
-{
- return 0;
-}
-
-
-
-/**
- * Callback handler for receiving incoming blocking ASTs.
- *
- * This can only happen on client side.
- */
-void ldlm_handle_bl_callback(struct ldlm_namespace *ns,
- struct ldlm_lock_desc *ld, struct ldlm_lock *lock)
-{
- int do_ast;
-
- LDLM_DEBUG(lock, "client blocking AST callback handler");
-
- lock_res_and_lock(lock);
- lock->l_flags |= LDLM_FL_CBPENDING;
-
- if (lock->l_flags & LDLM_FL_CANCEL_ON_BLOCK)
- lock->l_flags |= LDLM_FL_CANCEL;
-
- do_ast = !lock->l_readers && !lock->l_writers;
- unlock_res_and_lock(lock);
-
- if (do_ast) {
- CDEBUG(D_DLMTRACE,
- "Lock %p already unused, calling callback (%p)\n", lock,
- lock->l_blocking_ast);
- if (lock->l_blocking_ast != NULL)
- lock->l_blocking_ast(lock, ld, lock->l_ast_data,
- LDLM_CB_BLOCKING);
- } else {
- CDEBUG(D_DLMTRACE,
- "Lock %p is referenced, will be cancelled later\n",
- lock);
- }
-
- LDLM_DEBUG(lock, "client blocking callback handler END");
- LDLM_LOCK_RELEASE(lock);
-}
-
-/**
- * Callback handler for receiving incoming completion ASTs.
- *
- * This only can happen on client side.
- */
-static void ldlm_handle_cp_callback(struct ptlrpc_request *req,
- struct ldlm_namespace *ns,
- struct ldlm_request *dlm_req,
- struct ldlm_lock *lock)
-{
- int lvb_len;
- LIST_HEAD(ast_list);
- int rc = 0;
-
- LDLM_DEBUG(lock, "client completion callback handler START");
-
- if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_CANCEL_BL_CB_RACE)) {
- int to = cfs_time_seconds(1);
-
- while (to > 0) {
- set_current_state(TASK_INTERRUPTIBLE);
- schedule_timeout(to);
- if (lock->l_granted_mode == lock->l_req_mode ||
- lock->l_flags & LDLM_FL_DESTROYED)
- break;
- }
- }
-
- lvb_len = req_capsule_get_size(&req->rq_pill, &RMF_DLM_LVB, RCL_CLIENT);
- if (lvb_len < 0) {
- LDLM_ERROR(lock, "Fail to get lvb_len, rc = %d", lvb_len);
- rc = lvb_len;
- goto out;
- } else if (lvb_len > 0) {
- if (lock->l_lvb_len > 0) {
- /* for extent lock, lvb contains ost_lvb{}. */
- LASSERT(lock->l_lvb_data != NULL);
-
- if (unlikely(lock->l_lvb_len < lvb_len)) {
- LDLM_ERROR(lock, "Replied LVB is larger than expectation, expected = %d, replied = %d",
- lock->l_lvb_len, lvb_len);
- rc = -EINVAL;
- goto out;
- }
- } else if (ldlm_has_layout(lock)) { /* for layout lock, lvb has
- * variable length */
- void *lvb_data;
-
- lvb_data = kzalloc(lvb_len, GFP_NOFS);
- if (!lvb_data) {
- LDLM_ERROR(lock, "No memory: %d.\n", lvb_len);
- rc = -ENOMEM;
- goto out;
- }
-
- lock_res_and_lock(lock);
- LASSERT(lock->l_lvb_data == NULL);
- lock->l_lvb_type = LVB_T_LAYOUT;
- lock->l_lvb_data = lvb_data;
- lock->l_lvb_len = lvb_len;
- unlock_res_and_lock(lock);
- }
- }
-
- lock_res_and_lock(lock);
- if ((lock->l_flags & LDLM_FL_DESTROYED) ||
- lock->l_granted_mode == lock->l_req_mode) {
- /* bug 11300: the lock has already been granted */
- unlock_res_and_lock(lock);
- LDLM_DEBUG(lock, "Double grant race happened");
- rc = 0;
- goto out;
- }
-
- /* If we receive the completion AST before the actual enqueue returned,
- * then we might need to switch lock modes, resources, or extents. */
- if (dlm_req->lock_desc.l_granted_mode != lock->l_req_mode) {
- lock->l_req_mode = dlm_req->lock_desc.l_granted_mode;
- LDLM_DEBUG(lock, "completion AST, new lock mode");
- }
-
- if (lock->l_resource->lr_type != LDLM_PLAIN) {
- ldlm_convert_policy_to_local(req->rq_export,
- dlm_req->lock_desc.l_resource.lr_type,
- &dlm_req->lock_desc.l_policy_data,
- &lock->l_policy_data);
- LDLM_DEBUG(lock, "completion AST, new policy data");
- }
-
- ldlm_resource_unlink_lock(lock);
- if (memcmp(&dlm_req->lock_desc.l_resource.lr_name,
- &lock->l_resource->lr_name,
- sizeof(lock->l_resource->lr_name)) != 0) {
- unlock_res_and_lock(lock);
- rc = ldlm_lock_change_resource(ns, lock,
- &dlm_req->lock_desc.l_resource.lr_name);
- if (rc < 0) {
- LDLM_ERROR(lock, "Failed to allocate resource");
- goto out;
- }
- LDLM_DEBUG(lock, "completion AST, new resource");
- CERROR("change resource!\n");
- lock_res_and_lock(lock);
- }
-
- if (dlm_req->lock_flags & LDLM_FL_AST_SENT) {
- /* BL_AST locks are not needed in LRU.
- * Let ldlm_cancel_lru() be fast. */
- ldlm_lock_remove_from_lru(lock);
- lock->l_flags |= LDLM_FL_CBPENDING | LDLM_FL_BL_AST;
- LDLM_DEBUG(lock, "completion AST includes blocking AST");
- }
-
- if (lock->l_lvb_len > 0) {
- rc = ldlm_fill_lvb(lock, &req->rq_pill, RCL_CLIENT,
- lock->l_lvb_data, lvb_len);
- if (rc < 0) {
- unlock_res_and_lock(lock);
- goto out;
- }
- }
-
- ldlm_grant_lock(lock, &ast_list);
- unlock_res_and_lock(lock);
-
- LDLM_DEBUG(lock, "callback handler finished, about to run_ast_work");
-
- /* Let Enqueue to call osc_lock_upcall() and initialize
- * l_ast_data */
- OBD_FAIL_TIMEOUT(OBD_FAIL_OSC_CP_ENQ_RACE, 2);
-
- ldlm_run_ast_work(ns, &ast_list, LDLM_WORK_CP_AST);
-
- LDLM_DEBUG_NOLOCK("client completion callback handler END (lock %p)",
- lock);
- goto out;
-
-out:
- if (rc < 0) {
- lock_res_and_lock(lock);
- lock->l_flags |= LDLM_FL_FAILED;
- unlock_res_and_lock(lock);
- wake_up(&lock->l_waitq);
- }
- LDLM_LOCK_RELEASE(lock);
-}
-
-/**
- * Callback handler for receiving incoming glimpse ASTs.
- *
- * This only can happen on client side. After handling the glimpse AST
- * we also consider dropping the lock here if it is unused locally for a
- * long time.
- */
-static void ldlm_handle_gl_callback(struct ptlrpc_request *req,
- struct ldlm_namespace *ns,
- struct ldlm_request *dlm_req,
- struct ldlm_lock *lock)
-{
- int rc = -ENOSYS;
-
- LDLM_DEBUG(lock, "client glimpse AST callback handler");
-
- if (lock->l_glimpse_ast != NULL)
- rc = lock->l_glimpse_ast(lock, req);
-
- if (req->rq_repmsg != NULL) {
- ptlrpc_reply(req);
- } else {
- req->rq_status = rc;
- ptlrpc_error(req);
- }
-
- lock_res_and_lock(lock);
- if (lock->l_granted_mode == LCK_PW &&
- !lock->l_readers && !lock->l_writers &&
- cfs_time_after(cfs_time_current(),
- cfs_time_add(lock->l_last_used,
- cfs_time_seconds(10)))) {
- unlock_res_and_lock(lock);
- if (ldlm_bl_to_thread_lock(ns, NULL, lock))
- ldlm_handle_bl_callback(ns, NULL, lock);
-
- return;
- }
- unlock_res_and_lock(lock);
- LDLM_LOCK_RELEASE(lock);
-}
-
-static int ldlm_callback_reply(struct ptlrpc_request *req, int rc)
-{
- if (req->rq_no_reply)
- return 0;
-
- req->rq_status = rc;
- if (!req->rq_packed_final) {
- rc = lustre_pack_reply(req, 1, NULL, NULL);
- if (rc)
- return rc;
- }
- return ptlrpc_reply(req);
-}
-
-static int __ldlm_bl_to_thread(struct ldlm_bl_work_item *blwi,
- ldlm_cancel_flags_t cancel_flags)
-{
- struct ldlm_bl_pool *blp = ldlm_state->ldlm_bl_pool;
-
- spin_lock(&blp->blp_lock);
- if (blwi->blwi_lock &&
- blwi->blwi_lock->l_flags & LDLM_FL_DISCARD_DATA) {
- /* add LDLM_FL_DISCARD_DATA requests to the priority list */
- list_add_tail(&blwi->blwi_entry, &blp->blp_prio_list);
- } else {
- /* other blocking callbacks are added to the regular list */
- list_add_tail(&blwi->blwi_entry, &blp->blp_list);
- }
- spin_unlock(&blp->blp_lock);
-
- wake_up(&blp->blp_waitq);
-
- /* can not check blwi->blwi_flags as blwi could be already freed in
- LCF_ASYNC mode */
- if (!(cancel_flags & LCF_ASYNC))
- wait_for_completion(&blwi->blwi_comp);
-
- return 0;
-}
-
-static inline void init_blwi(struct ldlm_bl_work_item *blwi,
- struct ldlm_namespace *ns,
- struct ldlm_lock_desc *ld,
- struct list_head *cancels, int count,
- struct ldlm_lock *lock,
- ldlm_cancel_flags_t cancel_flags)
-{
- init_completion(&blwi->blwi_comp);
- INIT_LIST_HEAD(&blwi->blwi_head);
-
- if (memory_pressure_get())
- blwi->blwi_mem_pressure = 1;
-
- blwi->blwi_ns = ns;
- blwi->blwi_flags = cancel_flags;
- if (ld != NULL)
- blwi->blwi_ld = *ld;
- if (count) {
- list_add(&blwi->blwi_head, cancels);
- list_del_init(cancels);
- blwi->blwi_count = count;
- } else {
- blwi->blwi_lock = lock;
- }
-}
-
-/**
- * Queues a list of locks \a cancels containing \a count locks
- * for later processing by a blocking thread. If \a count is zero,
- * then the lock referenced as \a lock is queued instead.
- *
- * The blocking thread would then call ->l_blocking_ast callback in the lock.
- * If list addition fails an error is returned and caller is supposed to
- * call ->l_blocking_ast itself.
- */
-static int ldlm_bl_to_thread(struct ldlm_namespace *ns,
- struct ldlm_lock_desc *ld,
- struct ldlm_lock *lock,
- struct list_head *cancels, int count,
- ldlm_cancel_flags_t cancel_flags)
-{
- if (cancels && count == 0)
- return 0;
-
- if (cancel_flags & LCF_ASYNC) {
- struct ldlm_bl_work_item *blwi;
-
- blwi = kzalloc(sizeof(*blwi), GFP_NOFS);
- if (!blwi)
- return -ENOMEM;
- init_blwi(blwi, ns, ld, cancels, count, lock, cancel_flags);
-
- return __ldlm_bl_to_thread(blwi, cancel_flags);
- } else {
- /* if it is synchronous call do minimum mem alloc, as it could
- * be triggered from kernel shrinker
- */
- struct ldlm_bl_work_item blwi;
-
- memset(&blwi, 0, sizeof(blwi));
- init_blwi(&blwi, ns, ld, cancels, count, lock, cancel_flags);
- return __ldlm_bl_to_thread(&blwi, cancel_flags);
- }
-}
-
-
-int ldlm_bl_to_thread_lock(struct ldlm_namespace *ns, struct ldlm_lock_desc *ld,
- struct ldlm_lock *lock)
-{
- return ldlm_bl_to_thread(ns, ld, lock, NULL, 0, LCF_ASYNC);
-}
-
-int ldlm_bl_to_thread_list(struct ldlm_namespace *ns, struct ldlm_lock_desc *ld,
- struct list_head *cancels, int count,
- ldlm_cancel_flags_t cancel_flags)
-{
- return ldlm_bl_to_thread(ns, ld, NULL, cancels, count, cancel_flags);
-}
-
-/* Setinfo coming from Server (eg MDT) to Client (eg MDC)! */
-static int ldlm_handle_setinfo(struct ptlrpc_request *req)
-{
- struct obd_device *obd = req->rq_export->exp_obd;
- char *key;
- void *val;
- int keylen, vallen;
- int rc = -ENOSYS;
-
- DEBUG_REQ(D_HSM, req, "%s: handle setinfo\n", obd->obd_name);
-
- req_capsule_set(&req->rq_pill, &RQF_OBD_SET_INFO);
-
- key = req_capsule_client_get(&req->rq_pill, &RMF_SETINFO_KEY);
- if (key == NULL) {
- DEBUG_REQ(D_IOCTL, req, "no set_info key");
- return -EFAULT;
- }
- keylen = req_capsule_get_size(&req->rq_pill, &RMF_SETINFO_KEY,
- RCL_CLIENT);
- val = req_capsule_client_get(&req->rq_pill, &RMF_SETINFO_VAL);
- if (val == NULL) {
- DEBUG_REQ(D_IOCTL, req, "no set_info val");
- return -EFAULT;
- }
- vallen = req_capsule_get_size(&req->rq_pill, &RMF_SETINFO_VAL,
- RCL_CLIENT);
-
- /* We are responsible for swabbing contents of val */
-
- if (KEY_IS(KEY_HSM_COPYTOOL_SEND))
- /* Pass it on to mdc (the "export" in this case) */
- rc = obd_set_info_async(req->rq_svc_thread->t_env,
- req->rq_export,
- sizeof(KEY_HSM_COPYTOOL_SEND),
- KEY_HSM_COPYTOOL_SEND,
- vallen, val, NULL);
- else
- DEBUG_REQ(D_WARNING, req, "ignoring unknown key %s", key);
-
- return rc;
-}
-
-static inline void ldlm_callback_errmsg(struct ptlrpc_request *req,
- const char *msg, int rc,
- struct lustre_handle *handle)
-{
- DEBUG_REQ((req->rq_no_reply || rc) ? D_WARNING : D_DLMTRACE, req,
- "%s: [nid %s] [rc %d] [lock %#llx]",
- msg, libcfs_id2str(req->rq_peer), rc,
- handle ? handle->cookie : 0);
- if (req->rq_no_reply)
- CWARN("No reply was sent, maybe cause bug 21636.\n");
- else if (rc)
- CWARN("Send reply failed, maybe cause bug 21636.\n");
-}
-
-static int ldlm_handle_qc_callback(struct ptlrpc_request *req)
-{
- struct obd_quotactl *oqctl;
- struct client_obd *cli = &req->rq_export->exp_obd->u.cli;
-
- oqctl = req_capsule_client_get(&req->rq_pill, &RMF_OBD_QUOTACTL);
- if (oqctl == NULL) {
- CERROR("Can't unpack obd_quotactl\n");
- return -EPROTO;
- }
-
- oqctl->qc_stat = ptlrpc_status_ntoh(oqctl->qc_stat);
-
- cli->cl_qchk_stat = oqctl->qc_stat;
- return 0;
-}
-
-/* TODO: handle requests in a similar way as MDT: see mdt_handle_common() */
-static int ldlm_callback_handler(struct ptlrpc_request *req)
-{
- struct ldlm_namespace *ns;
- struct ldlm_request *dlm_req;
- struct ldlm_lock *lock;
- int rc;
-
- /* Requests arrive in sender's byte order. The ptlrpc service
- * handler has already checked and, if necessary, byte-swapped the
- * incoming request message body, but I am responsible for the
- * message buffers. */
-
- /* do nothing for sec context finalize */
- if (lustre_msg_get_opc(req->rq_reqmsg) == SEC_CTX_FINI)
- return 0;
-
- req_capsule_init(&req->rq_pill, req, RCL_SERVER);
-
- if (req->rq_export == NULL) {
- rc = ldlm_callback_reply(req, -ENOTCONN);
- ldlm_callback_errmsg(req, "Operate on unconnected server",
- rc, NULL);
- return 0;
- }
-
- LASSERT(req->rq_export != NULL);
- LASSERT(req->rq_export->exp_obd != NULL);
-
- switch (lustre_msg_get_opc(req->rq_reqmsg)) {
- case LDLM_BL_CALLBACK:
- if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_BL_CALLBACK_NET))
- return 0;
- break;
- case LDLM_CP_CALLBACK:
- if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_CP_CALLBACK_NET))
- return 0;
- break;
- case LDLM_GL_CALLBACK:
- if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_GL_CALLBACK_NET))
- return 0;
- break;
- case LDLM_SET_INFO:
- rc = ldlm_handle_setinfo(req);
- ldlm_callback_reply(req, rc);
- return 0;
- case OBD_QC_CALLBACK:
- req_capsule_set(&req->rq_pill, &RQF_QC_CALLBACK);
- if (OBD_FAIL_CHECK(OBD_FAIL_OBD_QC_CALLBACK_NET))
- return 0;
- rc = ldlm_handle_qc_callback(req);
- ldlm_callback_reply(req, rc);
- return 0;
- default:
- CERROR("unknown opcode %u\n",
- lustre_msg_get_opc(req->rq_reqmsg));
- ldlm_callback_reply(req, -EPROTO);
- return 0;
- }
-
- ns = req->rq_export->exp_obd->obd_namespace;
- LASSERT(ns != NULL);
-
- req_capsule_set(&req->rq_pill, &RQF_LDLM_CALLBACK);
-
- dlm_req = req_capsule_client_get(&req->rq_pill, &RMF_DLM_REQ);
- if (dlm_req == NULL) {
- rc = ldlm_callback_reply(req, -EPROTO);
- ldlm_callback_errmsg(req, "Operate without parameter", rc,
- NULL);
- return 0;
- }
-
- /* Force a known safe race, send a cancel to the server for a lock
- * which the server has already started a blocking callback on. */
- if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_CANCEL_BL_CB_RACE) &&
- lustre_msg_get_opc(req->rq_reqmsg) == LDLM_BL_CALLBACK) {
- rc = ldlm_cli_cancel(&dlm_req->lock_handle[0], 0);
- if (rc < 0)
- CERROR("ldlm_cli_cancel: %d\n", rc);
- }
-
- lock = ldlm_handle2lock_long(&dlm_req->lock_handle[0], 0);
- if (!lock) {
- CDEBUG(D_DLMTRACE, "callback on lock %#llx - lock disappeared\n",
- dlm_req->lock_handle[0].cookie);
- rc = ldlm_callback_reply(req, -EINVAL);
- ldlm_callback_errmsg(req, "Operate with invalid parameter", rc,
- &dlm_req->lock_handle[0]);
- return 0;
- }
-
- if ((lock->l_flags & LDLM_FL_FAIL_LOC) &&
- lustre_msg_get_opc(req->rq_reqmsg) == LDLM_BL_CALLBACK)
- OBD_RACE(OBD_FAIL_LDLM_CP_BL_RACE);
-
- /* Copy hints/flags (e.g. LDLM_FL_DISCARD_DATA) from AST. */
- lock_res_and_lock(lock);
- lock->l_flags |= ldlm_flags_from_wire(dlm_req->lock_flags &
- LDLM_AST_FLAGS);
- if (lustre_msg_get_opc(req->rq_reqmsg) == LDLM_BL_CALLBACK) {
- /* If somebody cancels lock and cache is already dropped,
- * or lock is failed before cp_ast received on client,
- * we can tell the server we have no lock. Otherwise, we
- * should send cancel after dropping the cache. */
- if (((lock->l_flags & LDLM_FL_CANCELING) &&
- (lock->l_flags & LDLM_FL_BL_DONE)) ||
- (lock->l_flags & LDLM_FL_FAILED)) {
- LDLM_DEBUG(lock, "callback on lock %#llx - lock disappeared\n",
- dlm_req->lock_handle[0].cookie);
- unlock_res_and_lock(lock);
- LDLM_LOCK_RELEASE(lock);
- rc = ldlm_callback_reply(req, -EINVAL);
- ldlm_callback_errmsg(req, "Operate on stale lock", rc,
- &dlm_req->lock_handle[0]);
- return 0;
- }
- /* BL_AST locks are not needed in LRU.
- * Let ldlm_cancel_lru() be fast. */
- ldlm_lock_remove_from_lru(lock);
- lock->l_flags |= LDLM_FL_BL_AST;
- }
- unlock_res_and_lock(lock);
-
- /* We want the ost thread to get this reply so that it can respond
- * to ost requests (write cache writeback) that might be triggered
- * in the callback.
- *
- * But we'd also like to be able to indicate in the reply that we're
- * cancelling right now, because it's unused, or have an intent result
- * in the reply, so we might have to push the responsibility for sending
- * the reply down into the AST handlers, alas. */
-
- switch (lustre_msg_get_opc(req->rq_reqmsg)) {
- case LDLM_BL_CALLBACK:
- CDEBUG(D_INODE, "blocking ast\n");
- req_capsule_extend(&req->rq_pill, &RQF_LDLM_BL_CALLBACK);
- if (!(lock->l_flags & LDLM_FL_CANCEL_ON_BLOCK)) {
- rc = ldlm_callback_reply(req, 0);
- if (req->rq_no_reply || rc)
- ldlm_callback_errmsg(req, "Normal process", rc,
- &dlm_req->lock_handle[0]);
- }
- if (ldlm_bl_to_thread_lock(ns, &dlm_req->lock_desc, lock))
- ldlm_handle_bl_callback(ns, &dlm_req->lock_desc, lock);
- break;
- case LDLM_CP_CALLBACK:
- CDEBUG(D_INODE, "completion ast\n");
- req_capsule_extend(&req->rq_pill, &RQF_LDLM_CP_CALLBACK);
- ldlm_callback_reply(req, 0);
- ldlm_handle_cp_callback(req, ns, dlm_req, lock);
- break;
- case LDLM_GL_CALLBACK:
- CDEBUG(D_INODE, "glimpse ast\n");
- req_capsule_extend(&req->rq_pill, &RQF_LDLM_GL_CALLBACK);
- ldlm_handle_gl_callback(req, ns, dlm_req, lock);
- break;
- default:
- LBUG(); /* checked above */
- }
-
- return 0;
-}
-
-
-static struct ldlm_bl_work_item *ldlm_bl_get_work(struct ldlm_bl_pool *blp)
-{
- struct ldlm_bl_work_item *blwi = NULL;
- static unsigned int num_bl;
-
- spin_lock(&blp->blp_lock);
- /* process a request from the blp_list at least every blp_num_threads */
- if (!list_empty(&blp->blp_list) &&
- (list_empty(&blp->blp_prio_list) || num_bl == 0))
- blwi = list_entry(blp->blp_list.next,
- struct ldlm_bl_work_item, blwi_entry);
- else
- if (!list_empty(&blp->blp_prio_list))
- blwi = list_entry(blp->blp_prio_list.next,
- struct ldlm_bl_work_item,
- blwi_entry);
-
- if (blwi) {
- if (++num_bl >= atomic_read(&blp->blp_num_threads))
- num_bl = 0;
- list_del(&blwi->blwi_entry);
- }
- spin_unlock(&blp->blp_lock);
-
- return blwi;
-}
-
-/* This only contains temporary data until the thread starts */
-struct ldlm_bl_thread_data {
- char bltd_name[CFS_CURPROC_COMM_MAX];
- struct ldlm_bl_pool *bltd_blp;
- struct completion bltd_comp;
- int bltd_num;
-};
-
-static int ldlm_bl_thread_main(void *arg);
-
-static int ldlm_bl_thread_start(struct ldlm_bl_pool *blp)
-{
- struct ldlm_bl_thread_data bltd = { .bltd_blp = blp };
- struct task_struct *task;
-
- init_completion(&bltd.bltd_comp);
- bltd.bltd_num = atomic_read(&blp->blp_num_threads);
- snprintf(bltd.bltd_name, sizeof(bltd.bltd_name),
- "ldlm_bl_%02d", bltd.bltd_num);
- task = kthread_run(ldlm_bl_thread_main, &bltd, "%s", bltd.bltd_name);
- if (IS_ERR(task)) {
- CERROR("cannot start LDLM thread ldlm_bl_%02d: rc %ld\n",
- atomic_read(&blp->blp_num_threads), PTR_ERR(task));
- return PTR_ERR(task);
- }
- wait_for_completion(&bltd.bltd_comp);
-
- return 0;
-}
-
-/**
- * Main blocking requests processing thread.
- *
- * Callers put locks into its queue by calling ldlm_bl_to_thread.
- * This thread in the end ends up doing actual call to ->l_blocking_ast
- * for queued locks.
- */
-static int ldlm_bl_thread_main(void *arg)
-{
- struct ldlm_bl_pool *blp;
-
- {
- struct ldlm_bl_thread_data *bltd = arg;
-
- blp = bltd->bltd_blp;
-
- atomic_inc(&blp->blp_num_threads);
- atomic_inc(&blp->blp_busy_threads);
-
- complete(&bltd->bltd_comp);
- /* cannot use bltd after this, it is only on caller's stack */
- }
-
- while (1) {
- struct l_wait_info lwi = { 0 };
- struct ldlm_bl_work_item *blwi = NULL;
- int busy;
-
- blwi = ldlm_bl_get_work(blp);
-
- if (blwi == NULL) {
- atomic_dec(&blp->blp_busy_threads);
- l_wait_event_exclusive(blp->blp_waitq,
- (blwi = ldlm_bl_get_work(blp)) != NULL,
- &lwi);
- busy = atomic_inc_return(&blp->blp_busy_threads);
- } else {
- busy = atomic_read(&blp->blp_busy_threads);
- }
-
- if (blwi->blwi_ns == NULL)
- /* added by ldlm_cleanup() */
- break;
-
- /* Not fatal if racy and have a few too many threads */
- if (unlikely(busy < blp->blp_max_threads &&
- busy >= atomic_read(&blp->blp_num_threads) &&
- !blwi->blwi_mem_pressure))
- /* discard the return value, we tried */
- ldlm_bl_thread_start(blp);
-
- if (blwi->blwi_mem_pressure)
- memory_pressure_set();
-
- if (blwi->blwi_count) {
- int count;
- /* The special case when we cancel locks in LRU
- * asynchronously, we pass the list of locks here.
- * Thus locks are marked LDLM_FL_CANCELING, but NOT
- * canceled locally yet. */
- count = ldlm_cli_cancel_list_local(&blwi->blwi_head,
- blwi->blwi_count,
- LCF_BL_AST);
- ldlm_cli_cancel_list(&blwi->blwi_head, count, NULL,
- blwi->blwi_flags);
- } else {
- ldlm_handle_bl_callback(blwi->blwi_ns, &blwi->blwi_ld,
- blwi->blwi_lock);
- }
- if (blwi->blwi_mem_pressure)
- memory_pressure_clr();
-
- if (blwi->blwi_flags & LCF_ASYNC)
- kfree(blwi);
- else
- complete(&blwi->blwi_comp);
- }
-
- atomic_dec(&blp->blp_busy_threads);
- atomic_dec(&blp->blp_num_threads);
- complete(&blp->blp_comp);
- return 0;
-}
-
-
-static int ldlm_setup(void);
-static int ldlm_cleanup(void);
-
-int ldlm_get_ref(void)
-{
- int rc = 0;
-
- mutex_lock(&ldlm_ref_mutex);
- if (++ldlm_refcount == 1) {
- rc = ldlm_setup();
- if (rc)
- ldlm_refcount--;
- }
- mutex_unlock(&ldlm_ref_mutex);
-
- return rc;
-}
-EXPORT_SYMBOL(ldlm_get_ref);
-
-void ldlm_put_ref(void)
-{
- mutex_lock(&ldlm_ref_mutex);
- if (ldlm_refcount == 1) {
- int rc = ldlm_cleanup();
-
- if (rc)
- CERROR("ldlm_cleanup failed: %d\n", rc);
- else
- ldlm_refcount--;
- } else {
- ldlm_refcount--;
- }
- mutex_unlock(&ldlm_ref_mutex);
-}
-EXPORT_SYMBOL(ldlm_put_ref);
-
-/*
- * Export handle<->lock hash operations.
- */
-static unsigned
-ldlm_export_lock_hash(struct cfs_hash *hs, const void *key, unsigned mask)
-{
- return cfs_hash_u64_hash(((struct lustre_handle *)key)->cookie, mask);
-}
-
-static void *
-ldlm_export_lock_key(struct hlist_node *hnode)
-{
- struct ldlm_lock *lock;
-
- lock = hlist_entry(hnode, struct ldlm_lock, l_exp_hash);
- return &lock->l_remote_handle;
-}
-
-static void
-ldlm_export_lock_keycpy(struct hlist_node *hnode, void *key)
-{
- struct ldlm_lock *lock;
-
- lock = hlist_entry(hnode, struct ldlm_lock, l_exp_hash);
- lock->l_remote_handle = *(struct lustre_handle *)key;
-}
-
-static int
-ldlm_export_lock_keycmp(const void *key, struct hlist_node *hnode)
-{
- return lustre_handle_equal(ldlm_export_lock_key(hnode), key);
-}
-
-static void *
-ldlm_export_lock_object(struct hlist_node *hnode)
-{
- return hlist_entry(hnode, struct ldlm_lock, l_exp_hash);
-}
-
-static void
-ldlm_export_lock_get(struct cfs_hash *hs, struct hlist_node *hnode)
-{
- struct ldlm_lock *lock;
-
- lock = hlist_entry(hnode, struct ldlm_lock, l_exp_hash);
- LDLM_LOCK_GET(lock);
-}
-
-static void
-ldlm_export_lock_put(struct cfs_hash *hs, struct hlist_node *hnode)
-{
- struct ldlm_lock *lock;
-
- lock = hlist_entry(hnode, struct ldlm_lock, l_exp_hash);
- LDLM_LOCK_RELEASE(lock);
-}
-
-static cfs_hash_ops_t ldlm_export_lock_ops = {
- .hs_hash = ldlm_export_lock_hash,
- .hs_key = ldlm_export_lock_key,
- .hs_keycmp = ldlm_export_lock_keycmp,
- .hs_keycpy = ldlm_export_lock_keycpy,
- .hs_object = ldlm_export_lock_object,
- .hs_get = ldlm_export_lock_get,
- .hs_put = ldlm_export_lock_put,
- .hs_put_locked = ldlm_export_lock_put,
-};
-
-int ldlm_init_export(struct obd_export *exp)
-{
- int rc;
-
- exp->exp_lock_hash =
- cfs_hash_create(obd_uuid2str(&exp->exp_client_uuid),
- HASH_EXP_LOCK_CUR_BITS,
- HASH_EXP_LOCK_MAX_BITS,
- HASH_EXP_LOCK_BKT_BITS, 0,
- CFS_HASH_MIN_THETA, CFS_HASH_MAX_THETA,
- &ldlm_export_lock_ops,
- CFS_HASH_DEFAULT | CFS_HASH_REHASH_KEY |
- CFS_HASH_NBLK_CHANGE);
-
- if (!exp->exp_lock_hash)
- return -ENOMEM;
-
- rc = ldlm_init_flock_export(exp);
- if (rc)
- goto err;
-
- return 0;
-err:
- ldlm_destroy_export(exp);
- return rc;
-}
-EXPORT_SYMBOL(ldlm_init_export);
-
-void ldlm_destroy_export(struct obd_export *exp)
-{
- cfs_hash_putref(exp->exp_lock_hash);
- exp->exp_lock_hash = NULL;
-
- ldlm_destroy_flock_export(exp);
-}
-EXPORT_SYMBOL(ldlm_destroy_export);
-
-extern unsigned int ldlm_cancel_unused_locks_before_replay;
-
-static ssize_t cancel_unused_locks_before_replay_show(struct kobject *kobj,
- struct attribute *attr,
- char *buf)
-{
- return sprintf(buf, "%d\n", ldlm_cancel_unused_locks_before_replay);
-}
-static ssize_t cancel_unused_locks_before_replay_store(struct kobject *kobj,
- struct attribute *attr,
- const char *buffer,
- size_t count)
-{
- int rc;
- unsigned long val;
-
- rc = kstrtoul(buffer, 10, &val);
- if (rc)
- return rc;
-
- ldlm_cancel_unused_locks_before_replay = val;
-
- return count;
-}
-LUSTRE_RW_ATTR(cancel_unused_locks_before_replay);
-
-/* These are for root of /sys/fs/lustre/ldlm */
-static struct attribute *ldlm_attrs[] = {
- &lustre_attr_cancel_unused_locks_before_replay.attr,
- NULL,
-};
-
-static struct attribute_group ldlm_attr_group = {
- .attrs = ldlm_attrs,
-};
-
-static int ldlm_setup(void)
-{
- static struct ptlrpc_service_conf conf;
- struct ldlm_bl_pool *blp = NULL;
- int rc = 0;
- int i;
-
- if (ldlm_state != NULL)
- return -EALREADY;
-
- ldlm_state = kzalloc(sizeof(*ldlm_state), GFP_NOFS);
- if (!ldlm_state)
- return -ENOMEM;
-
- ldlm_kobj = kobject_create_and_add("ldlm", lustre_kobj);
- if (!ldlm_kobj) {
- rc = -ENOMEM;
- goto out;
- }
-
- rc = sysfs_create_group(ldlm_kobj, &ldlm_attr_group);
- if (rc)
- goto out;
-
- ldlm_ns_kset = kset_create_and_add("namespaces", NULL, ldlm_kobj);
- if (!ldlm_ns_kset) {
- rc = -ENOMEM;
- goto out;
- }
-
- ldlm_svc_kset = kset_create_and_add("services", NULL, ldlm_kobj);
- if (!ldlm_svc_kset) {
- rc = -ENOMEM;
- goto out;
- }
-
- rc = ldlm_debugfs_setup();
- if (rc != 0)
- goto out;
-
- memset(&conf, 0, sizeof(conf));
- conf = (typeof(conf)) {
- .psc_name = "ldlm_cbd",
- .psc_watchdog_factor = 2,
- .psc_buf = {
- .bc_nbufs = LDLM_CLIENT_NBUFS,
- .bc_buf_size = LDLM_BUFSIZE,
- .bc_req_max_size = LDLM_MAXREQSIZE,
- .bc_rep_max_size = LDLM_MAXREPSIZE,
- .bc_req_portal = LDLM_CB_REQUEST_PORTAL,
- .bc_rep_portal = LDLM_CB_REPLY_PORTAL,
- },
- .psc_thr = {
- .tc_thr_name = "ldlm_cb",
- .tc_thr_factor = LDLM_THR_FACTOR,
- .tc_nthrs_init = LDLM_NTHRS_INIT,
- .tc_nthrs_base = LDLM_NTHRS_BASE,
- .tc_nthrs_max = LDLM_NTHRS_MAX,
- .tc_nthrs_user = ldlm_num_threads,
- .tc_cpu_affinity = 1,
- .tc_ctx_tags = LCT_MD_THREAD | LCT_DT_THREAD,
- },
- .psc_cpt = {
- .cc_pattern = ldlm_cpts,
- },
- .psc_ops = {
- .so_req_handler = ldlm_callback_handler,
- },
- };
- ldlm_state->ldlm_cb_service =
- ptlrpc_register_service(&conf, ldlm_svc_kset,
- ldlm_svc_debugfs_dir);
- if (IS_ERR(ldlm_state->ldlm_cb_service)) {
- CERROR("failed to start service\n");
- rc = PTR_ERR(ldlm_state->ldlm_cb_service);
- ldlm_state->ldlm_cb_service = NULL;
- goto out;
- }
-
-
- blp = kzalloc(sizeof(*blp), GFP_NOFS);
- if (!blp) {
- rc = -ENOMEM;
- goto out;
- }
- ldlm_state->ldlm_bl_pool = blp;
-
- spin_lock_init(&blp->blp_lock);
- INIT_LIST_HEAD(&blp->blp_list);
- INIT_LIST_HEAD(&blp->blp_prio_list);
- init_waitqueue_head(&blp->blp_waitq);
- atomic_set(&blp->blp_num_threads, 0);
- atomic_set(&blp->blp_busy_threads, 0);
-
- if (ldlm_num_threads == 0) {
- blp->blp_min_threads = LDLM_NTHRS_INIT;
- blp->blp_max_threads = LDLM_NTHRS_MAX;
- } else {
- blp->blp_min_threads = blp->blp_max_threads =
- min_t(int, LDLM_NTHRS_MAX, max_t(int, LDLM_NTHRS_INIT,
- ldlm_num_threads));
- }
-
- for (i = 0; i < blp->blp_min_threads; i++) {
- rc = ldlm_bl_thread_start(blp);
- if (rc < 0)
- goto out;
- }
-
- rc = ldlm_pools_init();
- if (rc) {
- CERROR("Failed to initialize LDLM pools: %d\n", rc);
- goto out;
- }
- return 0;
-
- out:
- ldlm_cleanup();
- return rc;
-}
-
-static int ldlm_cleanup(void)
-{
- if (!list_empty(ldlm_namespace_list(LDLM_NAMESPACE_SERVER)) ||
- !list_empty(ldlm_namespace_list(LDLM_NAMESPACE_CLIENT))) {
- CERROR("ldlm still has namespaces; clean these up first.\n");
- ldlm_dump_all_namespaces(LDLM_NAMESPACE_SERVER, D_DLMTRACE);
- ldlm_dump_all_namespaces(LDLM_NAMESPACE_CLIENT, D_DLMTRACE);
- return -EBUSY;
- }
-
- ldlm_pools_fini();
-
- if (ldlm_state->ldlm_bl_pool != NULL) {
- struct ldlm_bl_pool *blp = ldlm_state->ldlm_bl_pool;
-
- while (atomic_read(&blp->blp_num_threads) > 0) {
- struct ldlm_bl_work_item blwi = { .blwi_ns = NULL };
-
- init_completion(&blp->blp_comp);
-
- spin_lock(&blp->blp_lock);
- list_add_tail(&blwi.blwi_entry, &blp->blp_list);
- wake_up(&blp->blp_waitq);
- spin_unlock(&blp->blp_lock);
-
- wait_for_completion(&blp->blp_comp);
- }
-
- kfree(blp);
- }
-
- if (ldlm_state->ldlm_cb_service != NULL)
- ptlrpc_unregister_service(ldlm_state->ldlm_cb_service);
-
- if (ldlm_ns_kset)
- kset_unregister(ldlm_ns_kset);
- if (ldlm_svc_kset)
- kset_unregister(ldlm_svc_kset);
- if (ldlm_kobj)
- kobject_put(ldlm_kobj);
-
- ldlm_debugfs_cleanup();
-
- kfree(ldlm_state);
- ldlm_state = NULL;
-
- return 0;
-}
-
-int ldlm_init(void)
-{
- mutex_init(&ldlm_ref_mutex);
- mutex_init(ldlm_namespace_lock(LDLM_NAMESPACE_SERVER));
- mutex_init(ldlm_namespace_lock(LDLM_NAMESPACE_CLIENT));
- ldlm_resource_slab = kmem_cache_create("ldlm_resources",
- sizeof(struct ldlm_resource), 0,
- SLAB_HWCACHE_ALIGN, NULL);
- if (ldlm_resource_slab == NULL)
- return -ENOMEM;
-
- ldlm_lock_slab = kmem_cache_create("ldlm_locks",
- sizeof(struct ldlm_lock), 0,
- SLAB_HWCACHE_ALIGN | SLAB_DESTROY_BY_RCU, NULL);
- if (ldlm_lock_slab == NULL) {
- kmem_cache_destroy(ldlm_resource_slab);
- return -ENOMEM;
- }
-
- ldlm_interval_slab = kmem_cache_create("interval_node",
- sizeof(struct ldlm_interval),
- 0, SLAB_HWCACHE_ALIGN, NULL);
- if (ldlm_interval_slab == NULL) {
- kmem_cache_destroy(ldlm_resource_slab);
- kmem_cache_destroy(ldlm_lock_slab);
- return -ENOMEM;
- }
-#if LUSTRE_TRACKS_LOCK_EXP_REFS
- class_export_dump_hook = ldlm_dump_export_locks;
-#endif
- return 0;
-}
-
-void ldlm_exit(void)
-{
- if (ldlm_refcount)
- CERROR("ldlm_refcount is %d in ldlm_exit!\n", ldlm_refcount);
- kmem_cache_destroy(ldlm_resource_slab);
- /* ldlm_lock_put() use RCU to call ldlm_lock_free, so need call
- * synchronize_rcu() to wait a grace period elapsed, so that
- * ldlm_lock_free() get a chance to be called. */
- synchronize_rcu();
- kmem_cache_destroy(ldlm_lock_slab);
- kmem_cache_destroy(ldlm_interval_slab);
-}
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_plain.c b/drivers/staging/lustre/lustre/ldlm/ldlm_plain.c
deleted file mode 100644
index a1fe2c161e38..000000000000
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_plain.c
+++ /dev/null
@@ -1,72 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * lustre/ldlm/ldlm_plain.c
- *
- * Author: Peter Braam <braam@clusterfs.com>
- * Author: Phil Schwan <phil@clusterfs.com>
- */
-
-/**
- * This file contains implementation of PLAIN lock type.
- *
- * PLAIN locks are the simplest form of LDLM locking, and are used when
- * there only needs to be a single lock on a resource. This avoids some
- * of the complexity of EXTENT and IBITS lock types, but doesn't allow
- * different "parts" of a resource to be locked concurrently. Example
- * use cases for PLAIN locks include locking of MGS configuration logs
- * and (as of Lustre 2.4) quota records.
- */
-
-#define DEBUG_SUBSYSTEM S_LDLM
-
-#include "../include/lustre_dlm.h"
-#include "../include/obd_support.h"
-#include "../include/lustre_lib.h"
-
-#include "ldlm_internal.h"
-
-
-void ldlm_plain_policy_wire_to_local(const ldlm_wire_policy_data_t *wpolicy,
- ldlm_policy_data_t *lpolicy)
-{
- /* No policy for plain locks */
-}
-
-void ldlm_plain_policy_local_to_wire(const ldlm_policy_data_t *lpolicy,
- ldlm_wire_policy_data_t *wpolicy)
-{
- /* No policy for plain locks */
-}
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_pool.c b/drivers/staging/lustre/lustre/ldlm/ldlm_pool.c
deleted file mode 100644
index 1c9d67fbeb27..000000000000
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_pool.c
+++ /dev/null
@@ -1,1495 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2010, 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * lustre/ldlm/ldlm_pool.c
- *
- * Author: Yury Umanets <umka@clusterfs.com>
- */
-
-/*
- * Idea of this code is rather simple. Each second, for each server namespace
- * we have SLV - server lock volume which is calculated on current number of
- * granted locks, grant speed for past period, etc - that is, locking load.
- * This SLV number may be thought as a flow definition for simplicity. It is
- * sent to clients with each occasion to let them know what is current load
- * situation on the server. By default, at the beginning, SLV on server is
- * set max value which is calculated as the following: allow to one client
- * have all locks of limit ->pl_limit for 10h.
- *
- * Next, on clients, number of cached locks is not limited artificially in any
- * way as it was before. Instead, client calculates CLV, that is, client lock
- * volume for each lock and compares it with last SLV from the server. CLV is
- * calculated as the number of locks in LRU * lock live time in seconds. If
- * CLV > SLV - lock is canceled.
- *
- * Client has LVF, that is, lock volume factor which regulates how much
- * sensitive client should be about last SLV from server. The higher LVF is the
- * more locks will be canceled on client. Default value for it is 1. Setting LVF
- * to 2 means that client will cancel locks 2 times faster.
- *
- * Locks on a client will be canceled more intensively in these cases:
- * (1) if SLV is smaller, that is, load is higher on the server;
- * (2) client has a lot of locks (the more locks are held by client, the bigger
- * chances that some of them should be canceled);
- * (3) client has old locks (taken some time ago);
- *
- * Thus, according to flow paradigm that we use for better understanding SLV,
- * CLV is the volume of particle in flow described by SLV. According to this,
- * if flow is getting thinner, more and more particles become outside of it and
- * as particles are locks, they should be canceled.
- *
- * General idea of this belongs to Vitaly Fertman (vitaly@clusterfs.com).
- * Andreas Dilger (adilger@clusterfs.com) proposed few nice ideas like using
- * LVF and many cleanups. Flow definition to allow more easy understanding of
- * the logic belongs to Nikita Danilov (nikita@clusterfs.com) as well as many
- * cleanups and fixes. And design and implementation are done by Yury Umanets
- * (umka@clusterfs.com).
- *
- * Glossary for terms used:
- *
- * pl_limit - Number of allowed locks in pool. Applies to server and client
- * side (tunable);
- *
- * pl_granted - Number of granted locks (calculated);
- * pl_grant_rate - Number of granted locks for last T (calculated);
- * pl_cancel_rate - Number of canceled locks for last T (calculated);
- * pl_grant_speed - Grant speed (GR - CR) for last T (calculated);
- * pl_grant_plan - Planned number of granted locks for next T (calculated);
- * pl_server_lock_volume - Current server lock volume (calculated);
- *
- * As it may be seen from list above, we have few possible tunables which may
- * affect behavior much. They all may be modified via sysfs. However, they also
- * give a possibility for constructing few pre-defined behavior policies. If
- * none of predefines is suitable for a working pattern being used, new one may
- * be "constructed" via sysfs tunables.
- */
-
-#define DEBUG_SUBSYSTEM S_LDLM
-
-#include "../include/lustre_dlm.h"
-#include "../include/cl_object.h"
-#include "../include/obd_class.h"
-#include "../include/obd_support.h"
-#include "ldlm_internal.h"
-
-
-/*
- * 50 ldlm locks for 1MB of RAM.
- */
-#define LDLM_POOL_HOST_L ((NUM_CACHEPAGES >> (20 - PAGE_CACHE_SHIFT)) * 50)
-
-/*
- * Maximal possible grant step plan in %.
- */
-#define LDLM_POOL_MAX_GSP (30)
-
-/*
- * Minimal possible grant step plan in %.
- */
-#define LDLM_POOL_MIN_GSP (1)
-
-/*
- * This controls the speed of reaching LDLM_POOL_MAX_GSP
- * with increasing thread period.
- */
-#define LDLM_POOL_GSP_STEP_SHIFT (2)
-
-/*
- * LDLM_POOL_GSP% of all locks is default GP.
- */
-#define LDLM_POOL_GP(L) (((L) * LDLM_POOL_MAX_GSP) / 100)
-
-/*
- * Max age for locks on clients.
- */
-#define LDLM_POOL_MAX_AGE (36000)
-
-/*
- * The granularity of SLV calculation.
- */
-#define LDLM_POOL_SLV_SHIFT (10)
-
-static inline __u64 dru(__u64 val, __u32 shift, int round_up)
-{
- return (val + (round_up ? (1 << shift) - 1 : 0)) >> shift;
-}
-
-static inline __u64 ldlm_pool_slv_max(__u32 L)
-{
- /*
- * Allow to have all locks for 1 client for 10 hrs.
- * Formula is the following: limit * 10h / 1 client.
- */
- __u64 lim = (__u64)L * LDLM_POOL_MAX_AGE / 1;
- return lim;
-}
-
-static inline __u64 ldlm_pool_slv_min(__u32 L)
-{
- return 1;
-}
-
-enum {
- LDLM_POOL_FIRST_STAT = 0,
- LDLM_POOL_GRANTED_STAT = LDLM_POOL_FIRST_STAT,
- LDLM_POOL_GRANT_STAT,
- LDLM_POOL_CANCEL_STAT,
- LDLM_POOL_GRANT_RATE_STAT,
- LDLM_POOL_CANCEL_RATE_STAT,
- LDLM_POOL_GRANT_PLAN_STAT,
- LDLM_POOL_SLV_STAT,
- LDLM_POOL_SHRINK_REQTD_STAT,
- LDLM_POOL_SHRINK_FREED_STAT,
- LDLM_POOL_RECALC_STAT,
- LDLM_POOL_TIMING_STAT,
- LDLM_POOL_LAST_STAT
-};
-
-static inline struct ldlm_namespace *ldlm_pl2ns(struct ldlm_pool *pl)
-{
- return container_of(pl, struct ldlm_namespace, ns_pool);
-}
-
-/**
- * Calculates suggested grant_step in % of available locks for passed
- * \a period. This is later used in grant_plan calculations.
- */
-static inline int ldlm_pool_t2gsp(unsigned int t)
-{
- /*
- * This yields 1% grant step for anything below LDLM_POOL_GSP_STEP
- * and up to 30% for anything higher than LDLM_POOL_GSP_STEP.
- *
- * How this will affect execution is the following:
- *
- * - for thread period 1s we will have grant_step 1% which good from
- * pov of taking some load off from server and push it out to clients.
- * This is like that because 1% for grant_step means that server will
- * not allow clients to get lots of locks in short period of time and
- * keep all old locks in their caches. Clients will always have to
- * get some locks back if they want to take some new;
- *
- * - for thread period 10s (which is default) we will have 23% which
- * means that clients will have enough of room to take some new locks
- * without getting some back. All locks from this 23% which were not
- * taken by clients in current period will contribute in SLV growing.
- * SLV growing means more locks cached on clients until limit or grant
- * plan is reached.
- */
- return LDLM_POOL_MAX_GSP -
- ((LDLM_POOL_MAX_GSP - LDLM_POOL_MIN_GSP) >>
- (t >> LDLM_POOL_GSP_STEP_SHIFT));
-}
-
-/**
- * Recalculates next grant limit on passed \a pl.
- *
- * \pre ->pl_lock is locked.
- */
-static void ldlm_pool_recalc_grant_plan(struct ldlm_pool *pl)
-{
- int granted, grant_step, limit;
-
- limit = ldlm_pool_get_limit(pl);
- granted = atomic_read(&pl->pl_granted);
-
- grant_step = ldlm_pool_t2gsp(pl->pl_recalc_period);
- grant_step = ((limit - granted) * grant_step) / 100;
- pl->pl_grant_plan = granted + grant_step;
- limit = (limit * 5) >> 2;
- if (pl->pl_grant_plan > limit)
- pl->pl_grant_plan = limit;
-}
-
-/**
- * Recalculates next SLV on passed \a pl.
- *
- * \pre ->pl_lock is locked.
- */
-static void ldlm_pool_recalc_slv(struct ldlm_pool *pl)
-{
- int granted;
- int grant_plan;
- int round_up;
- __u64 slv;
- __u64 slv_factor;
- __u64 grant_usage;
- __u32 limit;
-
- slv = pl->pl_server_lock_volume;
- grant_plan = pl->pl_grant_plan;
- limit = ldlm_pool_get_limit(pl);
- granted = atomic_read(&pl->pl_granted);
- round_up = granted < limit;
-
- grant_usage = max_t(int, limit - (granted - grant_plan), 1);
-
- /*
- * Find out SLV change factor which is the ratio of grant usage
- * from limit. SLV changes as fast as the ratio of grant plan
- * consumption. The more locks from grant plan are not consumed
- * by clients in last interval (idle time), the faster grows
- * SLV. And the opposite, the more grant plan is over-consumed
- * (load time) the faster drops SLV.
- */
- slv_factor = grant_usage << LDLM_POOL_SLV_SHIFT;
- do_div(slv_factor, limit);
- slv = slv * slv_factor;
- slv = dru(slv, LDLM_POOL_SLV_SHIFT, round_up);
-
- if (slv > ldlm_pool_slv_max(limit))
- slv = ldlm_pool_slv_max(limit);
- else if (slv < ldlm_pool_slv_min(limit))
- slv = ldlm_pool_slv_min(limit);
-
- pl->pl_server_lock_volume = slv;
-}
-
-/**
- * Recalculates next stats on passed \a pl.
- *
- * \pre ->pl_lock is locked.
- */
-static void ldlm_pool_recalc_stats(struct ldlm_pool *pl)
-{
- int grant_plan = pl->pl_grant_plan;
- __u64 slv = pl->pl_server_lock_volume;
- int granted = atomic_read(&pl->pl_granted);
- int grant_rate = atomic_read(&pl->pl_grant_rate);
- int cancel_rate = atomic_read(&pl->pl_cancel_rate);
-
- lprocfs_counter_add(pl->pl_stats, LDLM_POOL_SLV_STAT,
- slv);
- lprocfs_counter_add(pl->pl_stats, LDLM_POOL_GRANTED_STAT,
- granted);
- lprocfs_counter_add(pl->pl_stats, LDLM_POOL_GRANT_RATE_STAT,
- grant_rate);
- lprocfs_counter_add(pl->pl_stats, LDLM_POOL_GRANT_PLAN_STAT,
- grant_plan);
- lprocfs_counter_add(pl->pl_stats, LDLM_POOL_CANCEL_RATE_STAT,
- cancel_rate);
-}
-
-/**
- * Sets current SLV into obd accessible via ldlm_pl2ns(pl)->ns_obd.
- */
-static void ldlm_srv_pool_push_slv(struct ldlm_pool *pl)
-{
- struct obd_device *obd;
-
- /*
- * Set new SLV in obd field for using it later without accessing the
- * pool. This is required to avoid race between sending reply to client
- * with new SLV and cleanup server stack in which we can't guarantee
- * that namespace is still alive. We know only that obd is alive as
- * long as valid export is alive.
- */
- obd = ldlm_pl2ns(pl)->ns_obd;
- LASSERT(obd != NULL);
- write_lock(&obd->obd_pool_lock);
- obd->obd_pool_slv = pl->pl_server_lock_volume;
- write_unlock(&obd->obd_pool_lock);
-}
-
-/**
- * Recalculates all pool fields on passed \a pl.
- *
- * \pre ->pl_lock is not locked.
- */
-static int ldlm_srv_pool_recalc(struct ldlm_pool *pl)
-{
- time64_t recalc_interval_sec;
-
- recalc_interval_sec = ktime_get_real_seconds() - pl->pl_recalc_time;
- if (recalc_interval_sec < pl->pl_recalc_period)
- return 0;
-
- spin_lock(&pl->pl_lock);
- recalc_interval_sec = ktime_get_real_seconds() - pl->pl_recalc_time;
- if (recalc_interval_sec < pl->pl_recalc_period) {
- spin_unlock(&pl->pl_lock);
- return 0;
- }
- /*
- * Recalc SLV after last period. This should be done
- * _before_ recalculating new grant plan.
- */
- ldlm_pool_recalc_slv(pl);
-
- /*
- * Make sure that pool informed obd of last SLV changes.
- */
- ldlm_srv_pool_push_slv(pl);
-
- /*
- * Update grant_plan for new period.
- */
- ldlm_pool_recalc_grant_plan(pl);
-
- pl->pl_recalc_time = ktime_get_real_seconds();
- lprocfs_counter_add(pl->pl_stats, LDLM_POOL_TIMING_STAT,
- recalc_interval_sec);
- spin_unlock(&pl->pl_lock);
- return 0;
-}
-
-/**
- * This function is used on server side as main entry point for memory
- * pressure handling. It decreases SLV on \a pl according to passed
- * \a nr and \a gfp_mask.
- *
- * Our goal here is to decrease SLV such a way that clients hold \a nr
- * locks smaller in next 10h.
- */
-static int ldlm_srv_pool_shrink(struct ldlm_pool *pl,
- int nr, gfp_t gfp_mask)
-{
- __u32 limit;
-
- /*
- * VM is asking how many entries may be potentially freed.
- */
- if (nr == 0)
- return atomic_read(&pl->pl_granted);
-
- /*
- * Client already canceled locks but server is already in shrinker
- * and can't cancel anything. Let's catch this race.
- */
- if (atomic_read(&pl->pl_granted) == 0)
- return 0;
-
- spin_lock(&pl->pl_lock);
-
- /*
- * We want shrinker to possibly cause cancellation of @nr locks from
- * clients or grant approximately @nr locks smaller next intervals.
- *
- * This is why we decreased SLV by @nr. This effect will only be as
- * long as one re-calc interval (1s these days) and this should be
- * enough to pass this decreased SLV to all clients. On next recalc
- * interval pool will either increase SLV if locks load is not high
- * or will keep on same level or even decrease again, thus, shrinker
- * decreased SLV will affect next recalc intervals and this way will
- * make locking load lower.
- */
- if (nr < pl->pl_server_lock_volume) {
- pl->pl_server_lock_volume = pl->pl_server_lock_volume - nr;
- } else {
- limit = ldlm_pool_get_limit(pl);
- pl->pl_server_lock_volume = ldlm_pool_slv_min(limit);
- }
-
- /*
- * Make sure that pool informed obd of last SLV changes.
- */
- ldlm_srv_pool_push_slv(pl);
- spin_unlock(&pl->pl_lock);
-
- /*
- * We did not really free any memory here so far, it only will be
- * freed later may be, so that we return 0 to not confuse VM.
- */
- return 0;
-}
-
-/**
- * Setup server side pool \a pl with passed \a limit.
- */
-static int ldlm_srv_pool_setup(struct ldlm_pool *pl, int limit)
-{
- struct obd_device *obd;
-
- obd = ldlm_pl2ns(pl)->ns_obd;
- LASSERT(obd != NULL && obd != LP_POISON);
- LASSERT(obd->obd_type != LP_POISON);
- write_lock(&obd->obd_pool_lock);
- obd->obd_pool_limit = limit;
- write_unlock(&obd->obd_pool_lock);
-
- ldlm_pool_set_limit(pl, limit);
- return 0;
-}
-
-/**
- * Sets SLV and Limit from ldlm_pl2ns(pl)->ns_obd tp passed \a pl.
- */
-static void ldlm_cli_pool_pop_slv(struct ldlm_pool *pl)
-{
- struct obd_device *obd;
-
- /*
- * Get new SLV and Limit from obd which is updated with coming
- * RPCs.
- */
- obd = ldlm_pl2ns(pl)->ns_obd;
- LASSERT(obd != NULL);
- read_lock(&obd->obd_pool_lock);
- pl->pl_server_lock_volume = obd->obd_pool_slv;
- ldlm_pool_set_limit(pl, obd->obd_pool_limit);
- read_unlock(&obd->obd_pool_lock);
-}
-
-/**
- * Recalculates client size pool \a pl according to current SLV and Limit.
- */
-static int ldlm_cli_pool_recalc(struct ldlm_pool *pl)
-{
- time64_t recalc_interval_sec;
- int ret;
-
- recalc_interval_sec = ktime_get_real_seconds() - pl->pl_recalc_time;
- if (recalc_interval_sec < pl->pl_recalc_period)
- return 0;
-
- spin_lock(&pl->pl_lock);
- /*
- * Check if we need to recalc lists now.
- */
- recalc_interval_sec = ktime_get_real_seconds() - pl->pl_recalc_time;
- if (recalc_interval_sec < pl->pl_recalc_period) {
- spin_unlock(&pl->pl_lock);
- return 0;
- }
-
- /*
- * Make sure that pool knows last SLV and Limit from obd.
- */
- ldlm_cli_pool_pop_slv(pl);
-
- spin_unlock(&pl->pl_lock);
-
- /*
- * Do not cancel locks in case lru resize is disabled for this ns.
- */
- if (!ns_connect_lru_resize(ldlm_pl2ns(pl))) {
- ret = 0;
- goto out;
- }
-
- /*
- * In the time of canceling locks on client we do not need to maintain
- * sharp timing, we only want to cancel locks asap according to new SLV.
- * It may be called when SLV has changed much, this is why we do not
- * take into account pl->pl_recalc_time here.
- */
- ret = ldlm_cancel_lru(ldlm_pl2ns(pl), 0, LCF_ASYNC, LDLM_CANCEL_LRUR);
-
-out:
- spin_lock(&pl->pl_lock);
- /*
- * Time of LRU resizing might be longer than period,
- * so update after LRU resizing rather than before it.
- */
- pl->pl_recalc_time = ktime_get_real_seconds();
- lprocfs_counter_add(pl->pl_stats, LDLM_POOL_TIMING_STAT,
- recalc_interval_sec);
- spin_unlock(&pl->pl_lock);
- return ret;
-}
-
-/**
- * This function is main entry point for memory pressure handling on client
- * side. Main goal of this function is to cancel some number of locks on
- * passed \a pl according to \a nr and \a gfp_mask.
- */
-static int ldlm_cli_pool_shrink(struct ldlm_pool *pl,
- int nr, gfp_t gfp_mask)
-{
- struct ldlm_namespace *ns;
- int unused;
-
- ns = ldlm_pl2ns(pl);
-
- /*
- * Do not cancel locks in case lru resize is disabled for this ns.
- */
- if (!ns_connect_lru_resize(ns))
- return 0;
-
- /*
- * Make sure that pool knows last SLV and Limit from obd.
- */
- ldlm_cli_pool_pop_slv(pl);
-
- spin_lock(&ns->ns_lock);
- unused = ns->ns_nr_unused;
- spin_unlock(&ns->ns_lock);
-
- if (nr == 0)
- return (unused / 100) * sysctl_vfs_cache_pressure;
- else
- return ldlm_cancel_lru(ns, nr, LCF_ASYNC, LDLM_CANCEL_SHRINK);
-}
-
-static const struct ldlm_pool_ops ldlm_srv_pool_ops = {
- .po_recalc = ldlm_srv_pool_recalc,
- .po_shrink = ldlm_srv_pool_shrink,
- .po_setup = ldlm_srv_pool_setup
-};
-
-static const struct ldlm_pool_ops ldlm_cli_pool_ops = {
- .po_recalc = ldlm_cli_pool_recalc,
- .po_shrink = ldlm_cli_pool_shrink
-};
-
-/**
- * Pool recalc wrapper. Will call either client or server pool recalc callback
- * depending what pool \a pl is used.
- */
-int ldlm_pool_recalc(struct ldlm_pool *pl)
-{
- u32 recalc_interval_sec;
- int count;
-
- recalc_interval_sec = ktime_get_seconds() - pl->pl_recalc_time;
- if (recalc_interval_sec <= 0)
- goto recalc;
-
- spin_lock(&pl->pl_lock);
- if (recalc_interval_sec > 0) {
- /*
- * Update pool statistics every 1s.
- */
- ldlm_pool_recalc_stats(pl);
-
- /*
- * Zero out all rates and speed for the last period.
- */
- atomic_set(&pl->pl_grant_rate, 0);
- atomic_set(&pl->pl_cancel_rate, 0);
- }
- spin_unlock(&pl->pl_lock);
-
- recalc:
- if (pl->pl_ops->po_recalc != NULL) {
- count = pl->pl_ops->po_recalc(pl);
- lprocfs_counter_add(pl->pl_stats, LDLM_POOL_RECALC_STAT,
- count);
- }
- recalc_interval_sec = pl->pl_recalc_time - ktime_get_seconds() +
- pl->pl_recalc_period;
- if (recalc_interval_sec <= 0) {
- /* Prevent too frequent recalculation. */
- CDEBUG(D_DLMTRACE,
- "Negative interval(%d), too short period(%lld)",
- recalc_interval_sec,
- (s64)pl->pl_recalc_period);
- recalc_interval_sec = 1;
- }
-
- return recalc_interval_sec;
-}
-
-/*
- * Pool shrink wrapper. Will call either client or server pool recalc callback
- * depending what pool pl is used. When nr == 0, just return the number of
- * freeable locks. Otherwise, return the number of canceled locks.
- */
-int ldlm_pool_shrink(struct ldlm_pool *pl, int nr,
- gfp_t gfp_mask)
-{
- int cancel = 0;
-
- if (pl->pl_ops->po_shrink != NULL) {
- cancel = pl->pl_ops->po_shrink(pl, nr, gfp_mask);
- if (nr > 0) {
- lprocfs_counter_add(pl->pl_stats,
- LDLM_POOL_SHRINK_REQTD_STAT,
- nr);
- lprocfs_counter_add(pl->pl_stats,
- LDLM_POOL_SHRINK_FREED_STAT,
- cancel);
- CDEBUG(D_DLMTRACE, "%s: request to shrink %d locks, shrunk %d\n",
- pl->pl_name, nr, cancel);
- }
- }
- return cancel;
-}
-EXPORT_SYMBOL(ldlm_pool_shrink);
-
-/**
- * Pool setup wrapper. Will call either client or server pool recalc callback
- * depending what pool \a pl is used.
- *
- * Sets passed \a limit into pool \a pl.
- */
-int ldlm_pool_setup(struct ldlm_pool *pl, int limit)
-{
- if (pl->pl_ops->po_setup != NULL)
- return pl->pl_ops->po_setup(pl, limit);
- return 0;
-}
-EXPORT_SYMBOL(ldlm_pool_setup);
-
-static int lprocfs_pool_state_seq_show(struct seq_file *m, void *unused)
-{
- int granted, grant_rate, cancel_rate, grant_step;
- int grant_speed, grant_plan, lvf;
- struct ldlm_pool *pl = m->private;
- __u64 slv, clv;
- __u32 limit;
-
- spin_lock(&pl->pl_lock);
- slv = pl->pl_server_lock_volume;
- clv = pl->pl_client_lock_volume;
- limit = ldlm_pool_get_limit(pl);
- grant_plan = pl->pl_grant_plan;
- granted = atomic_read(&pl->pl_granted);
- grant_rate = atomic_read(&pl->pl_grant_rate);
- cancel_rate = atomic_read(&pl->pl_cancel_rate);
- grant_speed = grant_rate - cancel_rate;
- lvf = atomic_read(&pl->pl_lock_volume_factor);
- grant_step = ldlm_pool_t2gsp(pl->pl_recalc_period);
- spin_unlock(&pl->pl_lock);
-
- seq_printf(m, "LDLM pool state (%s):\n"
- " SLV: %llu\n"
- " CLV: %llu\n"
- " LVF: %d\n",
- pl->pl_name, slv, clv, lvf);
-
- if (ns_is_server(ldlm_pl2ns(pl))) {
- seq_printf(m, " GSP: %d%%\n"
- " GP: %d\n",
- grant_step, grant_plan);
- }
- seq_printf(m, " GR: %d\n CR: %d\n GS: %d\n"
- " G: %d\n L: %d\n",
- grant_rate, cancel_rate, grant_speed,
- granted, limit);
-
- return 0;
-}
-LPROC_SEQ_FOPS_RO(lprocfs_pool_state);
-
-static ssize_t grant_speed_show(struct kobject *kobj, struct attribute *attr,
- char *buf)
-{
- struct ldlm_pool *pl = container_of(kobj, struct ldlm_pool,
- pl_kobj);
-
- int grant_speed;
-
- spin_lock(&pl->pl_lock);
- /* serialize with ldlm_pool_recalc */
- grant_speed = atomic_read(&pl->pl_grant_rate) -
- atomic_read(&pl->pl_cancel_rate);
- spin_unlock(&pl->pl_lock);
- return sprintf(buf, "%d\n", grant_speed);
-}
-LUSTRE_RO_ATTR(grant_speed);
-
-LDLM_POOL_SYSFS_READER_SHOW(grant_plan, int);
-LUSTRE_RO_ATTR(grant_plan);
-
-LDLM_POOL_SYSFS_READER_SHOW(recalc_period, int);
-LDLM_POOL_SYSFS_WRITER_STORE(recalc_period, int);
-LUSTRE_RW_ATTR(recalc_period);
-
-LDLM_POOL_SYSFS_READER_NOLOCK_SHOW(server_lock_volume, u64);
-LUSTRE_RO_ATTR(server_lock_volume);
-
-LDLM_POOL_SYSFS_READER_NOLOCK_SHOW(limit, atomic);
-LDLM_POOL_SYSFS_WRITER_NOLOCK_STORE(limit, atomic);
-LUSTRE_RW_ATTR(limit);
-
-LDLM_POOL_SYSFS_READER_NOLOCK_SHOW(granted, atomic);
-LUSTRE_RO_ATTR(granted);
-
-LDLM_POOL_SYSFS_READER_NOLOCK_SHOW(cancel_rate, atomic);
-LUSTRE_RO_ATTR(cancel_rate);
-
-LDLM_POOL_SYSFS_READER_NOLOCK_SHOW(grant_rate, atomic);
-LUSTRE_RO_ATTR(grant_rate);
-
-LDLM_POOL_SYSFS_READER_NOLOCK_SHOW(lock_volume_factor, atomic);
-LDLM_POOL_SYSFS_WRITER_NOLOCK_STORE(lock_volume_factor, atomic);
-LUSTRE_RW_ATTR(lock_volume_factor);
-
-#define LDLM_POOL_ADD_VAR(name, var, ops) \
- do { \
- snprintf(var_name, MAX_STRING_SIZE, #name); \
- pool_vars[0].data = var; \
- pool_vars[0].fops = ops; \
- ldebugfs_add_vars(pl->pl_debugfs_entry, pool_vars, NULL);\
- } while (0)
-
-/* These are for pools in /sys/fs/lustre/ldlm/namespaces/.../pool */
-static struct attribute *ldlm_pl_attrs[] = {
- &lustre_attr_grant_speed.attr,
- &lustre_attr_grant_plan.attr,
- &lustre_attr_recalc_period.attr,
- &lustre_attr_server_lock_volume.attr,
- &lustre_attr_limit.attr,
- &lustre_attr_granted.attr,
- &lustre_attr_cancel_rate.attr,
- &lustre_attr_grant_rate.attr,
- &lustre_attr_lock_volume_factor.attr,
- NULL,
-};
-
-static void ldlm_pl_release(struct kobject *kobj)
-{
- struct ldlm_pool *pl = container_of(kobj, struct ldlm_pool,
- pl_kobj);
- complete(&pl->pl_kobj_unregister);
-}
-
-static struct kobj_type ldlm_pl_ktype = {
- .default_attrs = ldlm_pl_attrs,
- .sysfs_ops = &lustre_sysfs_ops,
- .release = ldlm_pl_release,
-};
-
-static int ldlm_pool_sysfs_init(struct ldlm_pool *pl)
-{
- struct ldlm_namespace *ns = ldlm_pl2ns(pl);
- int err;
-
- init_completion(&pl->pl_kobj_unregister);
- err = kobject_init_and_add(&pl->pl_kobj, &ldlm_pl_ktype, &ns->ns_kobj,
- "pool");
-
- return err;
-}
-
-static int ldlm_pool_debugfs_init(struct ldlm_pool *pl)
-{
- struct ldlm_namespace *ns = ldlm_pl2ns(pl);
- struct dentry *debugfs_ns_parent;
- struct lprocfs_vars pool_vars[2];
- char *var_name = NULL;
- int rc = 0;
-
- var_name = kzalloc(MAX_STRING_SIZE + 1, GFP_NOFS);
- if (!var_name)
- return -ENOMEM;
-
- debugfs_ns_parent = ns->ns_debugfs_entry;
- if (IS_ERR_OR_NULL(debugfs_ns_parent)) {
- CERROR("%s: debugfs entry is not initialized\n",
- ldlm_ns_name(ns));
- rc = -EINVAL;
- goto out_free_name;
- }
- pl->pl_debugfs_entry = ldebugfs_register("pool", debugfs_ns_parent,
- NULL, NULL);
- if (IS_ERR(pl->pl_debugfs_entry)) {
- CERROR("LdebugFS failed in ldlm-pool-init\n");
- rc = PTR_ERR(pl->pl_debugfs_entry);
- pl->pl_debugfs_entry = NULL;
- goto out_free_name;
- }
-
- var_name[MAX_STRING_SIZE] = '\0';
- memset(pool_vars, 0, sizeof(pool_vars));
- pool_vars[0].name = var_name;
-
- LDLM_POOL_ADD_VAR(state, pl, &lprocfs_pool_state_fops);
-
- pl->pl_stats = lprocfs_alloc_stats(LDLM_POOL_LAST_STAT -
- LDLM_POOL_FIRST_STAT, 0);
- if (!pl->pl_stats) {
- rc = -ENOMEM;
- goto out_free_name;
- }
-
- lprocfs_counter_init(pl->pl_stats, LDLM_POOL_GRANTED_STAT,
- LPROCFS_CNTR_AVGMINMAX | LPROCFS_CNTR_STDDEV,
- "granted", "locks");
- lprocfs_counter_init(pl->pl_stats, LDLM_POOL_GRANT_STAT,
- LPROCFS_CNTR_AVGMINMAX | LPROCFS_CNTR_STDDEV,
- "grant", "locks");
- lprocfs_counter_init(pl->pl_stats, LDLM_POOL_CANCEL_STAT,
- LPROCFS_CNTR_AVGMINMAX | LPROCFS_CNTR_STDDEV,
- "cancel", "locks");
- lprocfs_counter_init(pl->pl_stats, LDLM_POOL_GRANT_RATE_STAT,
- LPROCFS_CNTR_AVGMINMAX | LPROCFS_CNTR_STDDEV,
- "grant_rate", "locks/s");
- lprocfs_counter_init(pl->pl_stats, LDLM_POOL_CANCEL_RATE_STAT,
- LPROCFS_CNTR_AVGMINMAX | LPROCFS_CNTR_STDDEV,
- "cancel_rate", "locks/s");
- lprocfs_counter_init(pl->pl_stats, LDLM_POOL_GRANT_PLAN_STAT,
- LPROCFS_CNTR_AVGMINMAX | LPROCFS_CNTR_STDDEV,
- "grant_plan", "locks/s");
- lprocfs_counter_init(pl->pl_stats, LDLM_POOL_SLV_STAT,
- LPROCFS_CNTR_AVGMINMAX | LPROCFS_CNTR_STDDEV,
- "slv", "slv");
- lprocfs_counter_init(pl->pl_stats, LDLM_POOL_SHRINK_REQTD_STAT,
- LPROCFS_CNTR_AVGMINMAX | LPROCFS_CNTR_STDDEV,
- "shrink_request", "locks");
- lprocfs_counter_init(pl->pl_stats, LDLM_POOL_SHRINK_FREED_STAT,
- LPROCFS_CNTR_AVGMINMAX | LPROCFS_CNTR_STDDEV,
- "shrink_freed", "locks");
- lprocfs_counter_init(pl->pl_stats, LDLM_POOL_RECALC_STAT,
- LPROCFS_CNTR_AVGMINMAX | LPROCFS_CNTR_STDDEV,
- "recalc_freed", "locks");
- lprocfs_counter_init(pl->pl_stats, LDLM_POOL_TIMING_STAT,
- LPROCFS_CNTR_AVGMINMAX | LPROCFS_CNTR_STDDEV,
- "recalc_timing", "sec");
- rc = ldebugfs_register_stats(pl->pl_debugfs_entry, "stats",
- pl->pl_stats);
-
-out_free_name:
- kfree(var_name);
- return rc;
-}
-
-static void ldlm_pool_sysfs_fini(struct ldlm_pool *pl)
-{
- kobject_put(&pl->pl_kobj);
- wait_for_completion(&pl->pl_kobj_unregister);
-}
-
-static void ldlm_pool_debugfs_fini(struct ldlm_pool *pl)
-{
- if (pl->pl_stats != NULL) {
- lprocfs_free_stats(&pl->pl_stats);
- pl->pl_stats = NULL;
- }
- if (pl->pl_debugfs_entry != NULL) {
- ldebugfs_remove(&pl->pl_debugfs_entry);
- pl->pl_debugfs_entry = NULL;
- }
-}
-
-int ldlm_pool_init(struct ldlm_pool *pl, struct ldlm_namespace *ns,
- int idx, ldlm_side_t client)
-{
- int rc;
-
- spin_lock_init(&pl->pl_lock);
- atomic_set(&pl->pl_granted, 0);
- pl->pl_recalc_time = ktime_get_seconds();
- atomic_set(&pl->pl_lock_volume_factor, 1);
-
- atomic_set(&pl->pl_grant_rate, 0);
- atomic_set(&pl->pl_cancel_rate, 0);
- pl->pl_grant_plan = LDLM_POOL_GP(LDLM_POOL_HOST_L);
-
- snprintf(pl->pl_name, sizeof(pl->pl_name), "ldlm-pool-%s-%d",
- ldlm_ns_name(ns), idx);
-
- if (client == LDLM_NAMESPACE_SERVER) {
- pl->pl_ops = &ldlm_srv_pool_ops;
- ldlm_pool_set_limit(pl, LDLM_POOL_HOST_L);
- pl->pl_recalc_period = LDLM_POOL_SRV_DEF_RECALC_PERIOD;
- pl->pl_server_lock_volume = ldlm_pool_slv_max(LDLM_POOL_HOST_L);
- } else {
- ldlm_pool_set_limit(pl, 1);
- pl->pl_server_lock_volume = 0;
- pl->pl_ops = &ldlm_cli_pool_ops;
- pl->pl_recalc_period = LDLM_POOL_CLI_DEF_RECALC_PERIOD;
- }
- pl->pl_client_lock_volume = 0;
- rc = ldlm_pool_debugfs_init(pl);
- if (rc)
- return rc;
-
- rc = ldlm_pool_sysfs_init(pl);
- if (rc)
- return rc;
-
- CDEBUG(D_DLMTRACE, "Lock pool %s is initialized\n", pl->pl_name);
-
- return rc;
-}
-EXPORT_SYMBOL(ldlm_pool_init);
-
-void ldlm_pool_fini(struct ldlm_pool *pl)
-{
- ldlm_pool_sysfs_fini(pl);
- ldlm_pool_debugfs_fini(pl);
-
- /*
- * Pool should not be used after this point. We can't free it here as
- * it lives in struct ldlm_namespace, but still interested in catching
- * any abnormal using cases.
- */
- POISON(pl, 0x5a, sizeof(*pl));
-}
-EXPORT_SYMBOL(ldlm_pool_fini);
-
-/**
- * Add new taken ldlm lock \a lock into pool \a pl accounting.
- */
-void ldlm_pool_add(struct ldlm_pool *pl, struct ldlm_lock *lock)
-{
- /*
- * FLOCK locks are special in a sense that they are almost never
- * cancelled, instead special kind of lock is used to drop them.
- * also there is no LRU for flock locks, so no point in tracking
- * them anyway.
- */
- if (lock->l_resource->lr_type == LDLM_FLOCK)
- return;
-
- atomic_inc(&pl->pl_granted);
- atomic_inc(&pl->pl_grant_rate);
- lprocfs_counter_incr(pl->pl_stats, LDLM_POOL_GRANT_STAT);
- /*
- * Do not do pool recalc for client side as all locks which
- * potentially may be canceled has already been packed into
- * enqueue/cancel rpc. Also we do not want to run out of stack
- * with too long call paths.
- */
- if (ns_is_server(ldlm_pl2ns(pl)))
- ldlm_pool_recalc(pl);
-}
-EXPORT_SYMBOL(ldlm_pool_add);
-
-/**
- * Remove ldlm lock \a lock from pool \a pl accounting.
- */
-void ldlm_pool_del(struct ldlm_pool *pl, struct ldlm_lock *lock)
-{
- /*
- * Filter out FLOCK locks. Read above comment in ldlm_pool_add().
- */
- if (lock->l_resource->lr_type == LDLM_FLOCK)
- return;
-
- LASSERT(atomic_read(&pl->pl_granted) > 0);
- atomic_dec(&pl->pl_granted);
- atomic_inc(&pl->pl_cancel_rate);
-
- lprocfs_counter_incr(pl->pl_stats, LDLM_POOL_CANCEL_STAT);
-
- if (ns_is_server(ldlm_pl2ns(pl)))
- ldlm_pool_recalc(pl);
-}
-EXPORT_SYMBOL(ldlm_pool_del);
-
-/**
- * Returns current \a pl SLV.
- *
- * \pre ->pl_lock is not locked.
- */
-__u64 ldlm_pool_get_slv(struct ldlm_pool *pl)
-{
- __u64 slv;
-
- spin_lock(&pl->pl_lock);
- slv = pl->pl_server_lock_volume;
- spin_unlock(&pl->pl_lock);
- return slv;
-}
-EXPORT_SYMBOL(ldlm_pool_get_slv);
-
-/**
- * Sets passed \a slv to \a pl.
- *
- * \pre ->pl_lock is not locked.
- */
-void ldlm_pool_set_slv(struct ldlm_pool *pl, __u64 slv)
-{
- spin_lock(&pl->pl_lock);
- pl->pl_server_lock_volume = slv;
- spin_unlock(&pl->pl_lock);
-}
-EXPORT_SYMBOL(ldlm_pool_set_slv);
-
-/**
- * Returns current \a pl CLV.
- *
- * \pre ->pl_lock is not locked.
- */
-__u64 ldlm_pool_get_clv(struct ldlm_pool *pl)
-{
- __u64 slv;
-
- spin_lock(&pl->pl_lock);
- slv = pl->pl_client_lock_volume;
- spin_unlock(&pl->pl_lock);
- return slv;
-}
-EXPORT_SYMBOL(ldlm_pool_get_clv);
-
-/**
- * Sets passed \a clv to \a pl.
- *
- * \pre ->pl_lock is not locked.
- */
-void ldlm_pool_set_clv(struct ldlm_pool *pl, __u64 clv)
-{
- spin_lock(&pl->pl_lock);
- pl->pl_client_lock_volume = clv;
- spin_unlock(&pl->pl_lock);
-}
-EXPORT_SYMBOL(ldlm_pool_set_clv);
-
-/**
- * Returns current \a pl limit.
- */
-__u32 ldlm_pool_get_limit(struct ldlm_pool *pl)
-{
- return atomic_read(&pl->pl_limit);
-}
-EXPORT_SYMBOL(ldlm_pool_get_limit);
-
-/**
- * Sets passed \a limit to \a pl.
- */
-void ldlm_pool_set_limit(struct ldlm_pool *pl, __u32 limit)
-{
- atomic_set(&pl->pl_limit, limit);
-}
-EXPORT_SYMBOL(ldlm_pool_set_limit);
-
-/**
- * Returns current LVF from \a pl.
- */
-__u32 ldlm_pool_get_lvf(struct ldlm_pool *pl)
-{
- return atomic_read(&pl->pl_lock_volume_factor);
-}
-EXPORT_SYMBOL(ldlm_pool_get_lvf);
-
-static int ldlm_pool_granted(struct ldlm_pool *pl)
-{
- return atomic_read(&pl->pl_granted);
-}
-
-static struct ptlrpc_thread *ldlm_pools_thread;
-static struct completion ldlm_pools_comp;
-
-/*
- * count locks from all namespaces (if possible). Returns number of
- * cached locks.
- */
-static unsigned long ldlm_pools_count(ldlm_side_t client, gfp_t gfp_mask)
-{
- int total = 0, nr_ns;
- struct ldlm_namespace *ns;
- struct ldlm_namespace *ns_old = NULL; /* loop detection */
- void *cookie;
-
- if (client == LDLM_NAMESPACE_CLIENT && !(gfp_mask & __GFP_FS))
- return 0;
-
- CDEBUG(D_DLMTRACE, "Request to count %s locks from all pools\n",
- client == LDLM_NAMESPACE_CLIENT ? "client" : "server");
-
- cookie = cl_env_reenter();
-
- /*
- * Find out how many resources we may release.
- */
- for (nr_ns = ldlm_namespace_nr_read(client);
- nr_ns > 0; nr_ns--) {
- mutex_lock(ldlm_namespace_lock(client));
- if (list_empty(ldlm_namespace_list(client))) {
- mutex_unlock(ldlm_namespace_lock(client));
- cl_env_reexit(cookie);
- return 0;
- }
- ns = ldlm_namespace_first_locked(client);
-
- if (ns == ns_old) {
- mutex_unlock(ldlm_namespace_lock(client));
- break;
- }
-
- if (ldlm_ns_empty(ns)) {
- ldlm_namespace_move_to_inactive_locked(ns, client);
- mutex_unlock(ldlm_namespace_lock(client));
- continue;
- }
-
- if (ns_old == NULL)
- ns_old = ns;
-
- ldlm_namespace_get(ns);
- ldlm_namespace_move_to_active_locked(ns, client);
- mutex_unlock(ldlm_namespace_lock(client));
- total += ldlm_pool_shrink(&ns->ns_pool, 0, gfp_mask);
- ldlm_namespace_put(ns);
- }
-
- cl_env_reexit(cookie);
- return total;
-}
-
-static unsigned long ldlm_pools_scan(ldlm_side_t client, int nr, gfp_t gfp_mask)
-{
- unsigned long freed = 0;
- int tmp, nr_ns;
- struct ldlm_namespace *ns;
- void *cookie;
-
- if (client == LDLM_NAMESPACE_CLIENT && !(gfp_mask & __GFP_FS))
- return -1;
-
- cookie = cl_env_reenter();
-
- /*
- * Shrink at least ldlm_namespace_nr_read(client) namespaces.
- */
- for (tmp = nr_ns = ldlm_namespace_nr_read(client);
- tmp > 0; tmp--) {
- int cancel, nr_locks;
-
- /*
- * Do not call shrink under ldlm_namespace_lock(client)
- */
- mutex_lock(ldlm_namespace_lock(client));
- if (list_empty(ldlm_namespace_list(client))) {
- mutex_unlock(ldlm_namespace_lock(client));
- break;
- }
- ns = ldlm_namespace_first_locked(client);
- ldlm_namespace_get(ns);
- ldlm_namespace_move_to_active_locked(ns, client);
- mutex_unlock(ldlm_namespace_lock(client));
-
- nr_locks = ldlm_pool_granted(&ns->ns_pool);
- /*
- * We use to shrink propotionally but with new shrinker API,
- * we lost the total number of freeable locks.
- */
- cancel = 1 + min_t(int, nr_locks, nr / nr_ns);
- freed += ldlm_pool_shrink(&ns->ns_pool, cancel, gfp_mask);
- ldlm_namespace_put(ns);
- }
- cl_env_reexit(cookie);
- /*
- * we only decrease the SLV in server pools shrinker, return
- * SHRINK_STOP to kernel to avoid needless loop. LU-1128
- */
- return (client == LDLM_NAMESPACE_SERVER) ? SHRINK_STOP : freed;
-}
-
-static unsigned long ldlm_pools_srv_count(struct shrinker *s,
- struct shrink_control *sc)
-{
- return ldlm_pools_count(LDLM_NAMESPACE_SERVER, sc->gfp_mask);
-}
-
-static unsigned long ldlm_pools_srv_scan(struct shrinker *s,
- struct shrink_control *sc)
-{
- return ldlm_pools_scan(LDLM_NAMESPACE_SERVER, sc->nr_to_scan,
- sc->gfp_mask);
-}
-
-static unsigned long ldlm_pools_cli_count(struct shrinker *s,
- struct shrink_control *sc)
-{
- return ldlm_pools_count(LDLM_NAMESPACE_CLIENT, sc->gfp_mask);
-}
-
-static unsigned long ldlm_pools_cli_scan(struct shrinker *s,
- struct shrink_control *sc)
-{
- return ldlm_pools_scan(LDLM_NAMESPACE_CLIENT, sc->nr_to_scan,
- sc->gfp_mask);
-}
-
-int ldlm_pools_recalc(ldlm_side_t client)
-{
- __u32 nr_l = 0, nr_p = 0, l;
- struct ldlm_namespace *ns;
- struct ldlm_namespace *ns_old = NULL;
- int nr, equal = 0;
- int time = 50; /* seconds of sleep if no active namespaces */
-
- /*
- * No need to setup pool limit for client pools.
- */
- if (client == LDLM_NAMESPACE_SERVER) {
- /*
- * Check all modest namespaces first.
- */
- mutex_lock(ldlm_namespace_lock(client));
- list_for_each_entry(ns, ldlm_namespace_list(client),
- ns_list_chain) {
- if (ns->ns_appetite != LDLM_NAMESPACE_MODEST)
- continue;
-
- l = ldlm_pool_granted(&ns->ns_pool);
- if (l == 0)
- l = 1;
-
- /*
- * Set the modest pools limit equal to their avg granted
- * locks + ~6%.
- */
- l += dru(l, LDLM_POOLS_MODEST_MARGIN_SHIFT, 0);
- ldlm_pool_setup(&ns->ns_pool, l);
- nr_l += l;
- nr_p++;
- }
-
- /*
- * Make sure that modest namespaces did not eat more that 2/3
- * of limit.
- */
- if (nr_l >= 2 * (LDLM_POOL_HOST_L / 3)) {
- CWARN("\"Modest\" pools eat out 2/3 of server locks limit (%d of %lu). This means that you have too many clients for this amount of server RAM. Upgrade server!\n",
- nr_l, LDLM_POOL_HOST_L);
- equal = 1;
- }
-
- /*
- * The rest is given to greedy namespaces.
- */
- list_for_each_entry(ns, ldlm_namespace_list(client),
- ns_list_chain) {
- if (!equal && ns->ns_appetite != LDLM_NAMESPACE_GREEDY)
- continue;
-
- if (equal) {
- /*
- * In the case 2/3 locks are eaten out by
- * modest pools, we re-setup equal limit
- * for _all_ pools.
- */
- l = LDLM_POOL_HOST_L /
- ldlm_namespace_nr_read(client);
- } else {
- /*
- * All the rest of greedy pools will have
- * all locks in equal parts.
- */
- l = (LDLM_POOL_HOST_L - nr_l) /
- (ldlm_namespace_nr_read(client) -
- nr_p);
- }
- ldlm_pool_setup(&ns->ns_pool, l);
- }
- mutex_unlock(ldlm_namespace_lock(client));
- }
-
- /*
- * Recalc at least ldlm_namespace_nr_read(client) namespaces.
- */
- for (nr = ldlm_namespace_nr_read(client); nr > 0; nr--) {
- int skip;
- /*
- * Lock the list, get first @ns in the list, getref, move it
- * to the tail, unlock and call pool recalc. This way we avoid
- * calling recalc under @ns lock what is really good as we get
- * rid of potential deadlock on client nodes when canceling
- * locks synchronously.
- */
- mutex_lock(ldlm_namespace_lock(client));
- if (list_empty(ldlm_namespace_list(client))) {
- mutex_unlock(ldlm_namespace_lock(client));
- break;
- }
- ns = ldlm_namespace_first_locked(client);
-
- if (ns_old == ns) { /* Full pass complete */
- mutex_unlock(ldlm_namespace_lock(client));
- break;
- }
-
- /* We got an empty namespace, need to move it back to inactive
- * list.
- * The race with parallel resource creation is fine:
- * - If they do namespace_get before our check, we fail the
- * check and they move this item to the end of the list anyway
- * - If we do the check and then they do namespace_get, then
- * we move the namespace to inactive and they will move
- * it back to active (synchronised by the lock, so no clash
- * there).
- */
- if (ldlm_ns_empty(ns)) {
- ldlm_namespace_move_to_inactive_locked(ns, client);
- mutex_unlock(ldlm_namespace_lock(client));
- continue;
- }
-
- if (ns_old == NULL)
- ns_old = ns;
-
- spin_lock(&ns->ns_lock);
- /*
- * skip ns which is being freed, and we don't want to increase
- * its refcount again, not even temporarily. bz21519 & LU-499.
- */
- if (ns->ns_stopping) {
- skip = 1;
- } else {
- skip = 0;
- ldlm_namespace_get(ns);
- }
- spin_unlock(&ns->ns_lock);
-
- ldlm_namespace_move_to_active_locked(ns, client);
- mutex_unlock(ldlm_namespace_lock(client));
-
- /*
- * After setup is done - recalc the pool.
- */
- if (!skip) {
- int ttime = ldlm_pool_recalc(&ns->ns_pool);
-
- if (ttime < time)
- time = ttime;
-
- ldlm_namespace_put(ns);
- }
- }
- return time;
-}
-EXPORT_SYMBOL(ldlm_pools_recalc);
-
-static int ldlm_pools_thread_main(void *arg)
-{
- struct ptlrpc_thread *thread = (struct ptlrpc_thread *)arg;
- int s_time, c_time;
-
- thread_set_flags(thread, SVC_RUNNING);
- wake_up(&thread->t_ctl_waitq);
-
- CDEBUG(D_DLMTRACE, "%s: pool thread starting, process %d\n",
- "ldlm_poold", current_pid());
-
- while (1) {
- struct l_wait_info lwi;
-
- /*
- * Recal all pools on this tick.
- */
- s_time = ldlm_pools_recalc(LDLM_NAMESPACE_SERVER);
- c_time = ldlm_pools_recalc(LDLM_NAMESPACE_CLIENT);
-
- /*
- * Wait until the next check time, or until we're
- * stopped.
- */
- lwi = LWI_TIMEOUT(cfs_time_seconds(min(s_time, c_time)),
- NULL, NULL);
- l_wait_event(thread->t_ctl_waitq,
- thread_is_stopping(thread) ||
- thread_is_event(thread),
- &lwi);
-
- if (thread_test_and_clear_flags(thread, SVC_STOPPING))
- break;
- thread_test_and_clear_flags(thread, SVC_EVENT);
- }
-
- thread_set_flags(thread, SVC_STOPPED);
- wake_up(&thread->t_ctl_waitq);
-
- CDEBUG(D_DLMTRACE, "%s: pool thread exiting, process %d\n",
- "ldlm_poold", current_pid());
-
- complete_and_exit(&ldlm_pools_comp, 0);
-}
-
-static int ldlm_pools_thread_start(void)
-{
- struct l_wait_info lwi = { 0 };
- struct task_struct *task;
-
- if (ldlm_pools_thread != NULL)
- return -EALREADY;
-
- ldlm_pools_thread = kzalloc(sizeof(*ldlm_pools_thread), GFP_NOFS);
- if (!ldlm_pools_thread)
- return -ENOMEM;
-
- init_completion(&ldlm_pools_comp);
- init_waitqueue_head(&ldlm_pools_thread->t_ctl_waitq);
-
- task = kthread_run(ldlm_pools_thread_main, ldlm_pools_thread,
- "ldlm_poold");
- if (IS_ERR(task)) {
- CERROR("Can't start pool thread, error %ld\n", PTR_ERR(task));
- kfree(ldlm_pools_thread);
- ldlm_pools_thread = NULL;
- return PTR_ERR(task);
- }
- l_wait_event(ldlm_pools_thread->t_ctl_waitq,
- thread_is_running(ldlm_pools_thread), &lwi);
- return 0;
-}
-
-static void ldlm_pools_thread_stop(void)
-{
- if (ldlm_pools_thread == NULL)
- return;
-
- thread_set_flags(ldlm_pools_thread, SVC_STOPPING);
- wake_up(&ldlm_pools_thread->t_ctl_waitq);
-
- /*
- * Make sure that pools thread is finished before freeing @thread.
- * This fixes possible race and oops due to accessing freed memory
- * in pools thread.
- */
- wait_for_completion(&ldlm_pools_comp);
- kfree(ldlm_pools_thread);
- ldlm_pools_thread = NULL;
-}
-
-static struct shrinker ldlm_pools_srv_shrinker = {
- .count_objects = ldlm_pools_srv_count,
- .scan_objects = ldlm_pools_srv_scan,
- .seeks = DEFAULT_SEEKS,
-};
-
-static struct shrinker ldlm_pools_cli_shrinker = {
- .count_objects = ldlm_pools_cli_count,
- .scan_objects = ldlm_pools_cli_scan,
- .seeks = DEFAULT_SEEKS,
-};
-
-int ldlm_pools_init(void)
-{
- int rc;
-
- rc = ldlm_pools_thread_start();
- if (rc == 0) {
- register_shrinker(&ldlm_pools_srv_shrinker);
- register_shrinker(&ldlm_pools_cli_shrinker);
- }
- return rc;
-}
-EXPORT_SYMBOL(ldlm_pools_init);
-
-void ldlm_pools_fini(void)
-{
- if (ldlm_pools_thread) {
- unregister_shrinker(&ldlm_pools_srv_shrinker);
- unregister_shrinker(&ldlm_pools_cli_shrinker);
- }
- ldlm_pools_thread_stop();
-}
-EXPORT_SYMBOL(ldlm_pools_fini);
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_request.c b/drivers/staging/lustre/lustre/ldlm/ldlm_request.c
deleted file mode 100644
index f6d61e5a38e7..000000000000
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_request.c
+++ /dev/null
@@ -1,2291 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2010, 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- */
-/**
- * This file contains Asynchronous System Trap (AST) handlers and related
- * LDLM request-processing routines.
- *
- * An AST is a callback issued on a lock when its state is changed. There are
- * several different types of ASTs (callbacks) registered for each lock:
- *
- * - completion AST: when a lock is enqueued by some process, but cannot be
- * granted immediately due to other conflicting locks on the same resource,
- * the completion AST is sent to notify the caller when the lock is
- * eventually granted
- *
- * - blocking AST: when a lock is granted to some process, if another process
- * enqueues a conflicting (blocking) lock on a resource, a blocking AST is
- * sent to notify the holder(s) of the lock(s) of the conflicting lock
- * request. The lock holder(s) must release their lock(s) on that resource in
- * a timely manner or be evicted by the server.
- *
- * - glimpse AST: this is used when a process wants information about a lock
- * (i.e. the lock value block (LVB)) but does not necessarily require holding
- * the lock. If the resource is locked, the lock holder(s) are sent glimpse
- * ASTs and the LVB is returned to the caller, and lock holder(s) may CANCEL
- * their lock(s) if they are idle. If the resource is not locked, the server
- * may grant the lock.
- */
-
-#define DEBUG_SUBSYSTEM S_LDLM
-
-#include "../include/lustre_dlm.h"
-#include "../include/obd_class.h"
-#include "../include/obd.h"
-
-#include "ldlm_internal.h"
-
-int ldlm_enqueue_min = OBD_TIMEOUT_DEFAULT;
-module_param(ldlm_enqueue_min, int, 0644);
-MODULE_PARM_DESC(ldlm_enqueue_min, "lock enqueue timeout minimum");
-
-/* in client side, whether the cached locks will be canceled before replay */
-unsigned int ldlm_cancel_unused_locks_before_replay = 1;
-
-static void interrupted_completion_wait(void *data)
-{
-}
-
-struct lock_wait_data {
- struct ldlm_lock *lwd_lock;
- __u32 lwd_conn_cnt;
-};
-
-struct ldlm_async_args {
- struct lustre_handle lock_handle;
-};
-
-int ldlm_expired_completion_wait(void *data)
-{
- struct lock_wait_data *lwd = data;
- struct ldlm_lock *lock = lwd->lwd_lock;
- struct obd_import *imp;
- struct obd_device *obd;
-
- if (lock->l_conn_export == NULL) {
- static unsigned long next_dump, last_dump;
-
- LCONSOLE_WARN("lock timed out (enqueued at %lld, %llds ago)\n",
- (s64)lock->l_last_activity,
- (s64)(ktime_get_real_seconds() -
- lock->l_last_activity));
- LDLM_DEBUG(lock, "lock timed out (enqueued at %lld, %llds ago); not entering recovery in server code, just going back to sleep",
- (s64)lock->l_last_activity,
- (s64)(ktime_get_real_seconds() -
- lock->l_last_activity));
- if (cfs_time_after(cfs_time_current(), next_dump)) {
- last_dump = next_dump;
- next_dump = cfs_time_shift(300);
- ldlm_namespace_dump(D_DLMTRACE,
- ldlm_lock_to_ns(lock));
- if (last_dump == 0)
- libcfs_debug_dumplog();
- }
- return 0;
- }
-
- obd = lock->l_conn_export->exp_obd;
- imp = obd->u.cli.cl_import;
- ptlrpc_fail_import(imp, lwd->lwd_conn_cnt);
- LDLM_ERROR(lock, "lock timed out (enqueued at %lld, %llds ago), entering recovery for %s@%s",
- (s64)lock->l_last_activity,
- (s64)(ktime_get_real_seconds() - lock->l_last_activity),
- obd2cli_tgt(obd), imp->imp_connection->c_remote_uuid.uuid);
-
- return 0;
-}
-EXPORT_SYMBOL(ldlm_expired_completion_wait);
-
-/* We use the same basis for both server side and client side functions
- from a single node. */
-int ldlm_get_enq_timeout(struct ldlm_lock *lock)
-{
- int timeout = at_get(ldlm_lock_to_ns_at(lock));
-
- if (AT_OFF)
- return obd_timeout / 2;
- /* Since these are non-updating timeouts, we should be conservative.
- It would be nice to have some kind of "early reply" mechanism for
- lock callbacks too... */
- timeout = min_t(int, at_max, timeout + (timeout >> 1)); /* 150% */
- return max(timeout, ldlm_enqueue_min);
-}
-EXPORT_SYMBOL(ldlm_get_enq_timeout);
-
-/**
- * Helper function for ldlm_completion_ast(), updating timings when lock is
- * actually granted.
- */
-static int ldlm_completion_tail(struct ldlm_lock *lock)
-{
- long delay;
- int result;
-
- if (lock->l_flags & (LDLM_FL_DESTROYED | LDLM_FL_FAILED)) {
- LDLM_DEBUG(lock, "client-side enqueue: destroyed");
- result = -EIO;
- } else {
- delay = ktime_get_real_seconds() - lock->l_last_activity;
- LDLM_DEBUG(lock, "client-side enqueue: granted after %lds",
- delay);
-
- /* Update our time estimate */
- at_measured(ldlm_lock_to_ns_at(lock),
- delay);
- result = 0;
- }
- return result;
-}
-
-/**
- * Implementation of ->l_completion_ast() for a client, that doesn't wait
- * until lock is granted. Suitable for locks enqueued through ptlrpcd, of
- * other threads that cannot block for long.
- */
-int ldlm_completion_ast_async(struct ldlm_lock *lock, __u64 flags, void *data)
-{
- if (flags == LDLM_FL_WAIT_NOREPROC) {
- LDLM_DEBUG(lock, "client-side enqueue waiting on pending lock");
- return 0;
- }
-
- if (!(flags & (LDLM_FL_BLOCK_WAIT | LDLM_FL_BLOCK_GRANTED |
- LDLM_FL_BLOCK_CONV))) {
- wake_up(&lock->l_waitq);
- return ldlm_completion_tail(lock);
- }
-
- LDLM_DEBUG(lock, "client-side enqueue returned a blocked lock, going forward");
- ldlm_reprocess_all(lock->l_resource);
- return 0;
-}
-EXPORT_SYMBOL(ldlm_completion_ast_async);
-
-/**
- * Generic LDLM "completion" AST. This is called in several cases:
- *
- * - when a reply to an ENQUEUE RPC is received from the server
- * (ldlm_cli_enqueue_fini()). Lock might be granted or not granted at
- * this point (determined by flags);
- *
- * - when LDLM_CP_CALLBACK RPC comes to client to notify it that lock has
- * been granted;
- *
- * - when ldlm_lock_match(LDLM_FL_LVB_READY) is about to wait until lock
- * gets correct lvb;
- *
- * - to force all locks when resource is destroyed (cleanup_resource());
- *
- * - during lock conversion (not used currently).
- *
- * If lock is not granted in the first case, this function waits until second
- * or penultimate cases happen in some other thread.
- *
- */
-int ldlm_completion_ast(struct ldlm_lock *lock, __u64 flags, void *data)
-{
- /* XXX ALLOCATE - 160 bytes */
- struct lock_wait_data lwd;
- struct obd_device *obd;
- struct obd_import *imp = NULL;
- struct l_wait_info lwi;
- __u32 timeout;
- int rc = 0;
-
- if (flags == LDLM_FL_WAIT_NOREPROC) {
- LDLM_DEBUG(lock, "client-side enqueue waiting on pending lock");
- goto noreproc;
- }
-
- if (!(flags & (LDLM_FL_BLOCK_WAIT | LDLM_FL_BLOCK_GRANTED |
- LDLM_FL_BLOCK_CONV))) {
- wake_up(&lock->l_waitq);
- return 0;
- }
-
- LDLM_DEBUG(lock, "client-side enqueue returned a blocked lock, sleeping");
-
-noreproc:
-
- obd = class_exp2obd(lock->l_conn_export);
-
- /* if this is a local lock, then there is no import */
- if (obd != NULL)
- imp = obd->u.cli.cl_import;
-
- /* Wait a long time for enqueue - server may have to callback a
- lock from another client. Server will evict the other client if it
- doesn't respond reasonably, and then give us the lock. */
- timeout = ldlm_get_enq_timeout(lock) * 2;
-
- lwd.lwd_lock = lock;
-
- if (lock->l_flags & LDLM_FL_NO_TIMEOUT) {
- LDLM_DEBUG(lock, "waiting indefinitely because of NO_TIMEOUT");
- lwi = LWI_INTR(interrupted_completion_wait, &lwd);
- } else {
- lwi = LWI_TIMEOUT_INTR(cfs_time_seconds(timeout),
- ldlm_expired_completion_wait,
- interrupted_completion_wait, &lwd);
- }
-
- if (imp != NULL) {
- spin_lock(&imp->imp_lock);
- lwd.lwd_conn_cnt = imp->imp_conn_cnt;
- spin_unlock(&imp->imp_lock);
- }
-
- if (ns_is_client(ldlm_lock_to_ns(lock)) &&
- OBD_FAIL_CHECK_RESET(OBD_FAIL_LDLM_INTR_CP_AST,
- OBD_FAIL_LDLM_CP_BL_RACE | OBD_FAIL_ONCE)) {
- lock->l_flags |= LDLM_FL_FAIL_LOC;
- rc = -EINTR;
- } else {
- /* Go to sleep until the lock is granted or cancelled. */
- rc = l_wait_event(lock->l_waitq,
- is_granted_or_cancelled(lock), &lwi);
- }
-
- if (rc) {
- LDLM_DEBUG(lock, "client-side enqueue waking up: failed (%d)",
- rc);
- return rc;
- }
-
- return ldlm_completion_tail(lock);
-}
-EXPORT_SYMBOL(ldlm_completion_ast);
-
-/**
- * A helper to build a blocking AST function
- *
- * Perform a common operation for blocking ASTs:
- * deferred lock cancellation.
- *
- * \param lock the lock blocking or canceling AST was called on
- * \retval 0
- * \see mdt_blocking_ast
- * \see ldlm_blocking_ast
- */
-int ldlm_blocking_ast_nocheck(struct ldlm_lock *lock)
-{
- int do_ast;
-
- lock->l_flags |= LDLM_FL_CBPENDING;
- do_ast = !lock->l_readers && !lock->l_writers;
- unlock_res_and_lock(lock);
-
- if (do_ast) {
- struct lustre_handle lockh;
- int rc;
-
- LDLM_DEBUG(lock, "already unused, calling ldlm_cli_cancel");
- ldlm_lock2handle(lock, &lockh);
- rc = ldlm_cli_cancel(&lockh, LCF_ASYNC);
- if (rc < 0)
- CERROR("ldlm_cli_cancel: %d\n", rc);
- } else {
- LDLM_DEBUG(lock, "Lock still has references, will be cancelled later");
- }
- return 0;
-}
-EXPORT_SYMBOL(ldlm_blocking_ast_nocheck);
-
-/**
- * Server blocking AST
- *
- * ->l_blocking_ast() callback for LDLM locks acquired by server-side
- * OBDs.
- *
- * \param lock the lock which blocks a request or cancelling lock
- * \param desc unused
- * \param data unused
- * \param flag indicates whether this cancelling or blocking callback
- * \retval 0
- * \see ldlm_blocking_ast_nocheck
- */
-int ldlm_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc,
- void *data, int flag)
-{
- if (flag == LDLM_CB_CANCELING) {
- /* Don't need to do anything here. */
- return 0;
- }
-
- lock_res_and_lock(lock);
- /* Get this: if ldlm_blocking_ast is racing with intent_policy, such
- * that ldlm_blocking_ast is called just before intent_policy method
- * takes the lr_lock, then by the time we get the lock, we might not
- * be the correct blocking function anymore. So check, and return
- * early, if so. */
- if (lock->l_blocking_ast != ldlm_blocking_ast) {
- unlock_res_and_lock(lock);
- return 0;
- }
- return ldlm_blocking_ast_nocheck(lock);
-}
-EXPORT_SYMBOL(ldlm_blocking_ast);
-
-/**
- * ->l_glimpse_ast() for DLM extent locks acquired on the server-side. See
- * comment in filter_intent_policy() on why you may need this.
- */
-int ldlm_glimpse_ast(struct ldlm_lock *lock, void *reqp)
-{
- /*
- * Returning -ELDLM_NO_LOCK_DATA actually works, but the reason for
- * that is rather subtle: with OST-side locking, it may so happen that
- * _all_ extent locks are held by the OST. If client wants to obtain
- * current file size it calls ll{,u}_glimpse_size(), and (as locks are
- * on the server), dummy glimpse callback fires and does
- * nothing. Client still receives correct file size due to the
- * following fragment in filter_intent_policy():
- *
- * rc = l->l_glimpse_ast(l, NULL); // this will update the LVB
- * if (rc != 0 && res->lr_namespace->ns_lvbo &&
- * res->lr_namespace->ns_lvbo->lvbo_update) {
- * res->lr_namespace->ns_lvbo->lvbo_update(res, NULL, 0, 1);
- * }
- *
- * that is, after glimpse_ast() fails, filter_lvbo_update() runs, and
- * returns correct file size to the client.
- */
- return -ELDLM_NO_LOCK_DATA;
-}
-EXPORT_SYMBOL(ldlm_glimpse_ast);
-
-/**
- * Enqueue a local lock (typically on a server).
- */
-int ldlm_cli_enqueue_local(struct ldlm_namespace *ns,
- const struct ldlm_res_id *res_id,
- ldlm_type_t type, ldlm_policy_data_t *policy,
- ldlm_mode_t mode, __u64 *flags,
- ldlm_blocking_callback blocking,
- ldlm_completion_callback completion,
- ldlm_glimpse_callback glimpse,
- void *data, __u32 lvb_len, enum lvb_type lvb_type,
- const __u64 *client_cookie,
- struct lustre_handle *lockh)
-{
- struct ldlm_lock *lock;
- int err;
- const struct ldlm_callback_suite cbs = { .lcs_completion = completion,
- .lcs_blocking = blocking,
- .lcs_glimpse = glimpse,
- };
-
- LASSERT(!(*flags & LDLM_FL_REPLAY));
- if (unlikely(ns_is_client(ns))) {
- CERROR("Trying to enqueue local lock in a shadow namespace\n");
- LBUG();
- }
-
- lock = ldlm_lock_create(ns, res_id, type, mode, &cbs, data, lvb_len,
- lvb_type);
- if (unlikely(!lock)) {
- err = -ENOMEM;
- goto out_nolock;
- }
-
- ldlm_lock2handle(lock, lockh);
-
- /* NB: we don't have any lock now (lock_res_and_lock)
- * because it's a new lock */
- ldlm_lock_addref_internal_nolock(lock, mode);
- lock->l_flags |= LDLM_FL_LOCAL;
- if (*flags & LDLM_FL_ATOMIC_CB)
- lock->l_flags |= LDLM_FL_ATOMIC_CB;
-
- if (policy != NULL)
- lock->l_policy_data = *policy;
- if (client_cookie != NULL)
- lock->l_client_cookie = *client_cookie;
- if (type == LDLM_EXTENT)
- lock->l_req_extent = policy->l_extent;
-
- err = ldlm_lock_enqueue(ns, &lock, policy, flags);
- if (unlikely(err != ELDLM_OK))
- goto out;
-
- if (policy != NULL)
- *policy = lock->l_policy_data;
-
- if (lock->l_completion_ast)
- lock->l_completion_ast(lock, *flags, NULL);
-
- LDLM_DEBUG(lock, "client-side local enqueue handler, new lock created");
- out:
- LDLM_LOCK_RELEASE(lock);
- out_nolock:
- return err;
-}
-EXPORT_SYMBOL(ldlm_cli_enqueue_local);
-
-static void failed_lock_cleanup(struct ldlm_namespace *ns,
- struct ldlm_lock *lock, int mode)
-{
- int need_cancel = 0;
-
- /* Set a flag to prevent us from sending a CANCEL (bug 407) */
- lock_res_and_lock(lock);
- /* Check that lock is not granted or failed, we might race. */
- if ((lock->l_req_mode != lock->l_granted_mode) &&
- !(lock->l_flags & LDLM_FL_FAILED)) {
- /* Make sure that this lock will not be found by raced
- * bl_ast and -EINVAL reply is sent to server anyways.
- * bug 17645 */
- lock->l_flags |= LDLM_FL_LOCAL_ONLY | LDLM_FL_FAILED |
- LDLM_FL_ATOMIC_CB | LDLM_FL_CBPENDING;
- need_cancel = 1;
- }
- unlock_res_and_lock(lock);
-
- if (need_cancel)
- LDLM_DEBUG(lock,
- "setting FL_LOCAL_ONLY | LDLM_FL_FAILED | LDLM_FL_ATOMIC_CB | LDLM_FL_CBPENDING");
- else
- LDLM_DEBUG(lock, "lock was granted or failed in race");
-
- ldlm_lock_decref_internal(lock, mode);
-
- /* XXX - HACK because we shouldn't call ldlm_lock_destroy()
- * from llite/file.c/ll_file_flock(). */
- /* This code makes for the fact that we do not have blocking handler on
- * a client for flock locks. As such this is the place where we must
- * completely kill failed locks. (interrupted and those that
- * were waiting to be granted when server evicted us. */
- if (lock->l_resource->lr_type == LDLM_FLOCK) {
- lock_res_and_lock(lock);
- ldlm_resource_unlink_lock(lock);
- ldlm_lock_destroy_nolock(lock);
- unlock_res_and_lock(lock);
- }
-}
-
-/**
- * Finishing portion of client lock enqueue code.
- *
- * Called after receiving reply from server.
- */
-int ldlm_cli_enqueue_fini(struct obd_export *exp, struct ptlrpc_request *req,
- ldlm_type_t type, __u8 with_policy, ldlm_mode_t mode,
- __u64 *flags, void *lvb, __u32 lvb_len,
- struct lustre_handle *lockh, int rc)
-{
- struct ldlm_namespace *ns = exp->exp_obd->obd_namespace;
- int is_replay = *flags & LDLM_FL_REPLAY;
- struct ldlm_lock *lock;
- struct ldlm_reply *reply;
- int cleanup_phase = 1;
- int size = 0;
-
- lock = ldlm_handle2lock(lockh);
- /* ldlm_cli_enqueue is holding a reference on this lock. */
- if (!lock) {
- LASSERT(type == LDLM_FLOCK);
- return -ENOLCK;
- }
-
- LASSERTF(ergo(lvb_len != 0, lvb_len == lock->l_lvb_len),
- "lvb_len = %d, l_lvb_len = %d\n", lvb_len, lock->l_lvb_len);
-
- if (rc != ELDLM_OK) {
- LASSERT(!is_replay);
- LDLM_DEBUG(lock, "client-side enqueue END (%s)",
- rc == ELDLM_LOCK_ABORTED ? "ABORTED" : "FAILED");
-
- if (rc != ELDLM_LOCK_ABORTED)
- goto cleanup;
- }
-
- /* Before we return, swab the reply */
- reply = req_capsule_server_get(&req->rq_pill, &RMF_DLM_REP);
- if (reply == NULL) {
- rc = -EPROTO;
- goto cleanup;
- }
-
- if (lvb_len != 0) {
- LASSERT(lvb != NULL);
-
- size = req_capsule_get_size(&req->rq_pill, &RMF_DLM_LVB,
- RCL_SERVER);
- if (size < 0) {
- LDLM_ERROR(lock, "Fail to get lvb_len, rc = %d", size);
- rc = size;
- goto cleanup;
- } else if (unlikely(size > lvb_len)) {
- LDLM_ERROR(lock, "Replied LVB is larger than expectation, expected = %d, replied = %d",
- lvb_len, size);
- rc = -EINVAL;
- goto cleanup;
- }
- }
-
- if (rc == ELDLM_LOCK_ABORTED) {
- if (lvb_len != 0)
- rc = ldlm_fill_lvb(lock, &req->rq_pill, RCL_SERVER,
- lvb, size);
- if (rc == 0)
- rc = ELDLM_LOCK_ABORTED;
- goto cleanup;
- }
-
- /* lock enqueued on the server */
- cleanup_phase = 0;
-
- lock_res_and_lock(lock);
- /* Key change rehash lock in per-export hash with new key */
- if (exp->exp_lock_hash) {
- /* In the function below, .hs_keycmp resolves to
- * ldlm_export_lock_keycmp() */
- /* coverity[overrun-buffer-val] */
- cfs_hash_rehash_key(exp->exp_lock_hash,
- &lock->l_remote_handle,
- &reply->lock_handle,
- &lock->l_exp_hash);
- } else {
- lock->l_remote_handle = reply->lock_handle;
- }
-
- *flags = ldlm_flags_from_wire(reply->lock_flags);
- lock->l_flags |= ldlm_flags_from_wire(reply->lock_flags &
- LDLM_INHERIT_FLAGS);
- /* move NO_TIMEOUT flag to the lock to force ldlm_lock_match()
- * to wait with no timeout as well */
- lock->l_flags |= ldlm_flags_from_wire(reply->lock_flags &
- LDLM_FL_NO_TIMEOUT);
- unlock_res_and_lock(lock);
-
- CDEBUG(D_INFO, "local: %p, remote cookie: %#llx, flags: 0x%llx\n",
- lock, reply->lock_handle.cookie, *flags);
-
- /* If enqueue returned a blocked lock but the completion handler has
- * already run, then it fixed up the resource and we don't need to do it
- * again. */
- if ((*flags) & LDLM_FL_LOCK_CHANGED) {
- int newmode = reply->lock_desc.l_req_mode;
-
- LASSERT(!is_replay);
- if (newmode && newmode != lock->l_req_mode) {
- LDLM_DEBUG(lock, "server returned different mode %s",
- ldlm_lockname[newmode]);
- lock->l_req_mode = newmode;
- }
-
- if (!ldlm_res_eq(&reply->lock_desc.l_resource.lr_name,
- &lock->l_resource->lr_name)) {
- CDEBUG(D_INFO, "remote intent success, locking "DLDLMRES
- " instead of "DLDLMRES"\n",
- PLDLMRES(&reply->lock_desc.l_resource),
- PLDLMRES(lock->l_resource));
-
- rc = ldlm_lock_change_resource(ns, lock,
- &reply->lock_desc.l_resource.lr_name);
- if (rc || lock->l_resource == NULL) {
- rc = -ENOMEM;
- goto cleanup;
- }
- LDLM_DEBUG(lock, "client-side enqueue, new resource");
- }
- if (with_policy)
- if (!(type == LDLM_IBITS &&
- !(exp_connect_flags(exp) & OBD_CONNECT_IBITS)))
- /* We assume lock type cannot change on server*/
- ldlm_convert_policy_to_local(exp,
- lock->l_resource->lr_type,
- &reply->lock_desc.l_policy_data,
- &lock->l_policy_data);
- if (type != LDLM_PLAIN)
- LDLM_DEBUG(lock,
- "client-side enqueue, new policy data");
- }
-
- if ((*flags) & LDLM_FL_AST_SENT ||
- /* Cancel extent locks as soon as possible on a liblustre client,
- * because it cannot handle asynchronous ASTs robustly (see
- * bug 7311). */
- (LIBLUSTRE_CLIENT && type == LDLM_EXTENT)) {
- lock_res_and_lock(lock);
- lock->l_flags |= LDLM_FL_CBPENDING | LDLM_FL_BL_AST;
- unlock_res_and_lock(lock);
- LDLM_DEBUG(lock, "enqueue reply includes blocking AST");
- }
-
- /* If the lock has already been granted by a completion AST, don't
- * clobber the LVB with an older one. */
- if (lvb_len != 0) {
- /* We must lock or a racing completion might update lvb without
- * letting us know and we'll clobber the correct value.
- * Cannot unlock after the check either, a that still leaves
- * a tiny window for completion to get in */
- lock_res_and_lock(lock);
- if (lock->l_req_mode != lock->l_granted_mode)
- rc = ldlm_fill_lvb(lock, &req->rq_pill, RCL_SERVER,
- lock->l_lvb_data, size);
- unlock_res_and_lock(lock);
- if (rc < 0) {
- cleanup_phase = 1;
- goto cleanup;
- }
- }
-
- if (!is_replay) {
- rc = ldlm_lock_enqueue(ns, &lock, NULL, flags);
- if (lock->l_completion_ast != NULL) {
- int err = lock->l_completion_ast(lock, *flags, NULL);
-
- if (!rc)
- rc = err;
- if (rc)
- cleanup_phase = 1;
- }
- }
-
- if (lvb_len && lvb != NULL) {
- /* Copy the LVB here, and not earlier, because the completion
- * AST (if any) can override what we got in the reply */
- memcpy(lvb, lock->l_lvb_data, lvb_len);
- }
-
- LDLM_DEBUG(lock, "client-side enqueue END");
-cleanup:
- if (cleanup_phase == 1 && rc)
- failed_lock_cleanup(ns, lock, mode);
- /* Put lock 2 times, the second reference is held by ldlm_cli_enqueue */
- LDLM_LOCK_PUT(lock);
- LDLM_LOCK_RELEASE(lock);
- return rc;
-}
-EXPORT_SYMBOL(ldlm_cli_enqueue_fini);
-
-/**
- * Estimate number of lock handles that would fit into request of given
- * size. PAGE_SIZE-512 is to allow TCP/IP and LNET headers to fit into
- * a single page on the send/receive side. XXX: 512 should be changed to
- * more adequate value.
- */
-static inline int ldlm_req_handles_avail(int req_size, int off)
-{
- int avail;
-
- avail = min_t(int, LDLM_MAXREQSIZE, PAGE_CACHE_SIZE - 512) - req_size;
- if (likely(avail >= 0))
- avail /= (int)sizeof(struct lustre_handle);
- else
- avail = 0;
- avail += LDLM_LOCKREQ_HANDLES - off;
-
- return avail;
-}
-
-static inline int ldlm_capsule_handles_avail(struct req_capsule *pill,
- enum req_location loc,
- int off)
-{
- int size = req_capsule_msg_size(pill, loc);
-
- return ldlm_req_handles_avail(size, off);
-}
-
-static inline int ldlm_format_handles_avail(struct obd_import *imp,
- const struct req_format *fmt,
- enum req_location loc, int off)
-{
- int size = req_capsule_fmt_size(imp->imp_msg_magic, fmt, loc);
-
- return ldlm_req_handles_avail(size, off);
-}
-
-/**
- * Cancel LRU locks and pack them into the enqueue request. Pack there the given
- * \a count locks in \a cancels.
- *
- * This is to be called by functions preparing their own requests that
- * might contain lists of locks to cancel in addition to actual operation
- * that needs to be performed.
- */
-int ldlm_prep_elc_req(struct obd_export *exp, struct ptlrpc_request *req,
- int version, int opc, int canceloff,
- struct list_head *cancels, int count)
-{
- struct ldlm_namespace *ns = exp->exp_obd->obd_namespace;
- struct req_capsule *pill = &req->rq_pill;
- struct ldlm_request *dlm = NULL;
- int flags, avail, to_free, pack = 0;
- LIST_HEAD(head);
- int rc;
-
- if (cancels == NULL)
- cancels = &head;
- if (ns_connect_cancelset(ns)) {
- /* Estimate the amount of available space in the request. */
- req_capsule_filled_sizes(pill, RCL_CLIENT);
- avail = ldlm_capsule_handles_avail(pill, RCL_CLIENT, canceloff);
-
- flags = ns_connect_lru_resize(ns) ?
- LDLM_CANCEL_LRUR : LDLM_CANCEL_AGED;
- to_free = !ns_connect_lru_resize(ns) &&
- opc == LDLM_ENQUEUE ? 1 : 0;
-
- /* Cancel LRU locks here _only_ if the server supports
- * EARLY_CANCEL. Otherwise we have to send extra CANCEL
- * RPC, which will make us slower. */
- if (avail > count)
- count += ldlm_cancel_lru_local(ns, cancels, to_free,
- avail - count, 0, flags);
- if (avail > count)
- pack = count;
- else
- pack = avail;
- req_capsule_set_size(pill, &RMF_DLM_REQ, RCL_CLIENT,
- ldlm_request_bufsize(pack, opc));
- }
-
- rc = ptlrpc_request_pack(req, version, opc);
- if (rc) {
- ldlm_lock_list_put(cancels, l_bl_ast, count);
- return rc;
- }
-
- if (ns_connect_cancelset(ns)) {
- if (canceloff) {
- dlm = req_capsule_client_get(pill, &RMF_DLM_REQ);
- LASSERT(dlm);
- /* Skip first lock handler in ldlm_request_pack(),
- * this method will increment @lock_count according
- * to the lock handle amount actually written to
- * the buffer. */
- dlm->lock_count = canceloff;
- }
- /* Pack into the request @pack lock handles. */
- ldlm_cli_cancel_list(cancels, pack, req, 0);
- /* Prepare and send separate cancel RPC for others. */
- ldlm_cli_cancel_list(cancels, count - pack, NULL, 0);
- } else {
- ldlm_lock_list_put(cancels, l_bl_ast, count);
- }
- return 0;
-}
-EXPORT_SYMBOL(ldlm_prep_elc_req);
-
-int ldlm_prep_enqueue_req(struct obd_export *exp, struct ptlrpc_request *req,
- struct list_head *cancels, int count)
-{
- return ldlm_prep_elc_req(exp, req, LUSTRE_DLM_VERSION, LDLM_ENQUEUE,
- LDLM_ENQUEUE_CANCEL_OFF, cancels, count);
-}
-EXPORT_SYMBOL(ldlm_prep_enqueue_req);
-
-struct ptlrpc_request *ldlm_enqueue_pack(struct obd_export *exp, int lvb_len)
-{
- struct ptlrpc_request *req;
- int rc;
-
- req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_LDLM_ENQUEUE);
- if (req == NULL)
- return ERR_PTR(-ENOMEM);
-
- rc = ldlm_prep_enqueue_req(exp, req, NULL, 0);
- if (rc) {
- ptlrpc_request_free(req);
- return ERR_PTR(rc);
- }
-
- req_capsule_set_size(&req->rq_pill, &RMF_DLM_LVB, RCL_SERVER, lvb_len);
- ptlrpc_request_set_replen(req);
- return req;
-}
-EXPORT_SYMBOL(ldlm_enqueue_pack);
-
-/**
- * Client-side lock enqueue.
- *
- * If a request has some specific initialisation it is passed in \a reqp,
- * otherwise it is created in ldlm_cli_enqueue.
- *
- * Supports sync and async requests, pass \a async flag accordingly. If a
- * request was created in ldlm_cli_enqueue and it is the async request,
- * pass it to the caller in \a reqp.
- */
-int ldlm_cli_enqueue(struct obd_export *exp, struct ptlrpc_request **reqp,
- struct ldlm_enqueue_info *einfo,
- const struct ldlm_res_id *res_id,
- ldlm_policy_data_t const *policy, __u64 *flags,
- void *lvb, __u32 lvb_len, enum lvb_type lvb_type,
- struct lustre_handle *lockh, int async)
-{
- struct ldlm_namespace *ns;
- struct ldlm_lock *lock;
- struct ldlm_request *body;
- int is_replay = *flags & LDLM_FL_REPLAY;
- int req_passed_in = 1;
- int rc, err;
- struct ptlrpc_request *req;
-
- LASSERT(exp != NULL);
-
- ns = exp->exp_obd->obd_namespace;
-
- /* If we're replaying this lock, just check some invariants.
- * If we're creating a new lock, get everything all setup nice. */
- if (is_replay) {
- lock = ldlm_handle2lock_long(lockh, 0);
- LASSERT(lock != NULL);
- LDLM_DEBUG(lock, "client-side enqueue START");
- LASSERT(exp == lock->l_conn_export);
- } else {
- const struct ldlm_callback_suite cbs = {
- .lcs_completion = einfo->ei_cb_cp,
- .lcs_blocking = einfo->ei_cb_bl,
- .lcs_glimpse = einfo->ei_cb_gl
- };
- lock = ldlm_lock_create(ns, res_id, einfo->ei_type,
- einfo->ei_mode, &cbs, einfo->ei_cbdata,
- lvb_len, lvb_type);
- if (lock == NULL)
- return -ENOMEM;
- /* for the local lock, add the reference */
- ldlm_lock_addref_internal(lock, einfo->ei_mode);
- ldlm_lock2handle(lock, lockh);
- if (policy != NULL)
- lock->l_policy_data = *policy;
-
- if (einfo->ei_type == LDLM_EXTENT)
- lock->l_req_extent = policy->l_extent;
- LDLM_DEBUG(lock, "client-side enqueue START, flags %llx\n",
- *flags);
- }
-
- lock->l_conn_export = exp;
- lock->l_export = NULL;
- lock->l_blocking_ast = einfo->ei_cb_bl;
- lock->l_flags |= (*flags & (LDLM_FL_NO_LRU | LDLM_FL_EXCL));
-
- /* lock not sent to server yet */
-
- if (reqp == NULL || *reqp == NULL) {
- req = ptlrpc_request_alloc_pack(class_exp2cliimp(exp),
- &RQF_LDLM_ENQUEUE,
- LUSTRE_DLM_VERSION,
- LDLM_ENQUEUE);
- if (req == NULL) {
- failed_lock_cleanup(ns, lock, einfo->ei_mode);
- LDLM_LOCK_RELEASE(lock);
- return -ENOMEM;
- }
- req_passed_in = 0;
- if (reqp)
- *reqp = req;
- } else {
- int len;
-
- req = *reqp;
- len = req_capsule_get_size(&req->rq_pill, &RMF_DLM_REQ,
- RCL_CLIENT);
- LASSERTF(len >= sizeof(*body), "buflen[%d] = %d, not %d\n",
- DLM_LOCKREQ_OFF, len, (int)sizeof(*body));
- }
-
- /* Dump lock data into the request buffer */
- body = req_capsule_client_get(&req->rq_pill, &RMF_DLM_REQ);
- ldlm_lock2desc(lock, &body->lock_desc);
- body->lock_flags = ldlm_flags_to_wire(*flags);
- body->lock_handle[0] = *lockh;
-
- /* Continue as normal. */
- if (!req_passed_in) {
- if (lvb_len > 0)
- req_capsule_extend(&req->rq_pill,
- &RQF_LDLM_ENQUEUE_LVB);
- req_capsule_set_size(&req->rq_pill, &RMF_DLM_LVB, RCL_SERVER,
- lvb_len);
- ptlrpc_request_set_replen(req);
- }
-
- /*
- * Liblustre client doesn't get extent locks, except for O_APPEND case
- * where [0, OBD_OBJECT_EOF] lock is taken, or truncate, where
- * [i_size, OBD_OBJECT_EOF] lock is taken.
- */
- LASSERT(ergo(LIBLUSTRE_CLIENT, einfo->ei_type != LDLM_EXTENT ||
- policy->l_extent.end == OBD_OBJECT_EOF));
-
- if (async) {
- LASSERT(reqp != NULL);
- return 0;
- }
-
- LDLM_DEBUG(lock, "sending request");
-
- rc = ptlrpc_queue_wait(req);
-
- err = ldlm_cli_enqueue_fini(exp, req, einfo->ei_type, policy ? 1 : 0,
- einfo->ei_mode, flags, lvb, lvb_len,
- lockh, rc);
-
- /* If ldlm_cli_enqueue_fini did not find the lock, we need to free
- * one reference that we took */
- if (err == -ENOLCK)
- LDLM_LOCK_RELEASE(lock);
- else
- rc = err;
-
- if (!req_passed_in && req != NULL) {
- ptlrpc_req_finished(req);
- if (reqp)
- *reqp = NULL;
- }
-
- return rc;
-}
-EXPORT_SYMBOL(ldlm_cli_enqueue);
-
-static int ldlm_cli_convert_local(struct ldlm_lock *lock, int new_mode,
- __u32 *flags)
-{
- struct ldlm_resource *res;
- int rc;
-
- if (ns_is_client(ldlm_lock_to_ns(lock))) {
- CERROR("Trying to cancel local lock\n");
- LBUG();
- }
- LDLM_DEBUG(lock, "client-side local convert");
-
- res = ldlm_lock_convert(lock, new_mode, flags);
- if (res) {
- ldlm_reprocess_all(res);
- rc = 0;
- } else {
- rc = LUSTRE_EDEADLK;
- }
- LDLM_DEBUG(lock, "client-side local convert handler END");
- LDLM_LOCK_PUT(lock);
- return rc;
-}
-
-/* FIXME: one of ldlm_cli_convert or the server side should reject attempted
- * conversion of locks which are on the waiting or converting queue */
-/* Caller of this code is supposed to take care of lock readers/writers
- accounting */
-int ldlm_cli_convert(struct lustre_handle *lockh, int new_mode, __u32 *flags)
-{
- struct ldlm_request *body;
- struct ldlm_reply *reply;
- struct ldlm_lock *lock;
- struct ldlm_resource *res;
- struct ptlrpc_request *req;
- int rc;
-
- lock = ldlm_handle2lock(lockh);
- if (!lock) {
- LBUG();
- return -EINVAL;
- }
- *flags = 0;
-
- if (lock->l_conn_export == NULL)
- return ldlm_cli_convert_local(lock, new_mode, flags);
-
- LDLM_DEBUG(lock, "client-side convert");
-
- req = ptlrpc_request_alloc_pack(class_exp2cliimp(lock->l_conn_export),
- &RQF_LDLM_CONVERT, LUSTRE_DLM_VERSION,
- LDLM_CONVERT);
- if (req == NULL) {
- LDLM_LOCK_PUT(lock);
- return -ENOMEM;
- }
-
- body = req_capsule_client_get(&req->rq_pill, &RMF_DLM_REQ);
- body->lock_handle[0] = lock->l_remote_handle;
-
- body->lock_desc.l_req_mode = new_mode;
- body->lock_flags = ldlm_flags_to_wire(*flags);
-
-
- ptlrpc_request_set_replen(req);
- rc = ptlrpc_queue_wait(req);
- if (rc != ELDLM_OK)
- goto out;
-
- reply = req_capsule_server_get(&req->rq_pill, &RMF_DLM_REP);
- if (reply == NULL) {
- rc = -EPROTO;
- goto out;
- }
-
- if (req->rq_status) {
- rc = req->rq_status;
- goto out;
- }
-
- res = ldlm_lock_convert(lock, new_mode, &reply->lock_flags);
- if (res != NULL) {
- ldlm_reprocess_all(res);
- /* Go to sleep until the lock is granted. */
- /* FIXME: or cancelled. */
- if (lock->l_completion_ast) {
- rc = lock->l_completion_ast(lock, LDLM_FL_WAIT_NOREPROC,
- NULL);
- if (rc)
- goto out;
- }
- } else {
- rc = LUSTRE_EDEADLK;
- }
- out:
- LDLM_LOCK_PUT(lock);
- ptlrpc_req_finished(req);
- return rc;
-}
-EXPORT_SYMBOL(ldlm_cli_convert);
-
-/**
- * Cancel locks locally.
- * Returns:
- * \retval LDLM_FL_LOCAL_ONLY if there is no need for a CANCEL RPC to the server
- * \retval LDLM_FL_CANCELING otherwise;
- * \retval LDLM_FL_BL_AST if there is a need for a separate CANCEL RPC.
- */
-static __u64 ldlm_cli_cancel_local(struct ldlm_lock *lock)
-{
- __u64 rc = LDLM_FL_LOCAL_ONLY;
-
- if (lock->l_conn_export) {
- bool local_only;
-
- LDLM_DEBUG(lock, "client-side cancel");
- /* Set this flag to prevent others from getting new references*/
- lock_res_and_lock(lock);
- lock->l_flags |= LDLM_FL_CBPENDING;
- local_only = !!(lock->l_flags &
- (LDLM_FL_LOCAL_ONLY|LDLM_FL_CANCEL_ON_BLOCK));
- ldlm_cancel_callback(lock);
- rc = (lock->l_flags & LDLM_FL_BL_AST) ?
- LDLM_FL_BL_AST : LDLM_FL_CANCELING;
- unlock_res_and_lock(lock);
-
- if (local_only) {
- CDEBUG(D_DLMTRACE, "not sending request (at caller's instruction)\n");
- rc = LDLM_FL_LOCAL_ONLY;
- }
- ldlm_lock_cancel(lock);
- } else {
- if (ns_is_client(ldlm_lock_to_ns(lock))) {
- LDLM_ERROR(lock, "Trying to cancel local lock");
- LBUG();
- }
- LDLM_DEBUG(lock, "server-side local cancel");
- ldlm_lock_cancel(lock);
- ldlm_reprocess_all(lock->l_resource);
- }
-
- return rc;
-}
-
-/**
- * Pack \a count locks in \a head into ldlm_request buffer of request \a req.
- */
-static void ldlm_cancel_pack(struct ptlrpc_request *req,
- struct list_head *head, int count)
-{
- struct ldlm_request *dlm;
- struct ldlm_lock *lock;
- int max, packed = 0;
-
- dlm = req_capsule_client_get(&req->rq_pill, &RMF_DLM_REQ);
- LASSERT(dlm != NULL);
-
- /* Check the room in the request buffer. */
- max = req_capsule_get_size(&req->rq_pill, &RMF_DLM_REQ, RCL_CLIENT) -
- sizeof(struct ldlm_request);
- max /= sizeof(struct lustre_handle);
- max += LDLM_LOCKREQ_HANDLES;
- LASSERT(max >= dlm->lock_count + count);
-
- /* XXX: it would be better to pack lock handles grouped by resource.
- * so that the server cancel would call filter_lvbo_update() less
- * frequently. */
- list_for_each_entry(lock, head, l_bl_ast) {
- if (!count--)
- break;
- LASSERT(lock->l_conn_export);
- /* Pack the lock handle to the given request buffer. */
- LDLM_DEBUG(lock, "packing");
- dlm->lock_handle[dlm->lock_count++] = lock->l_remote_handle;
- packed++;
- }
- CDEBUG(D_DLMTRACE, "%d locks packed\n", packed);
-}
-
-/**
- * Prepare and send a batched cancel RPC. It will include \a count lock
- * handles of locks given in \a cancels list. */
-int ldlm_cli_cancel_req(struct obd_export *exp, struct list_head *cancels,
- int count, ldlm_cancel_flags_t flags)
-{
- struct ptlrpc_request *req = NULL;
- struct obd_import *imp;
- int free, sent = 0;
- int rc = 0;
-
- LASSERT(exp != NULL);
- LASSERT(count > 0);
-
- CFS_FAIL_TIMEOUT(OBD_FAIL_LDLM_PAUSE_CANCEL, cfs_fail_val);
-
- if (CFS_FAIL_CHECK(OBD_FAIL_LDLM_CANCEL_RACE))
- return count;
-
- free = ldlm_format_handles_avail(class_exp2cliimp(exp),
- &RQF_LDLM_CANCEL, RCL_CLIENT, 0);
- if (count > free)
- count = free;
-
- while (1) {
- imp = class_exp2cliimp(exp);
- if (imp == NULL || imp->imp_invalid) {
- CDEBUG(D_DLMTRACE,
- "skipping cancel on invalid import %p\n", imp);
- return count;
- }
-
- req = ptlrpc_request_alloc(imp, &RQF_LDLM_CANCEL);
- if (req == NULL) {
- rc = -ENOMEM;
- goto out;
- }
-
- req_capsule_filled_sizes(&req->rq_pill, RCL_CLIENT);
- req_capsule_set_size(&req->rq_pill, &RMF_DLM_REQ, RCL_CLIENT,
- ldlm_request_bufsize(count, LDLM_CANCEL));
-
- rc = ptlrpc_request_pack(req, LUSTRE_DLM_VERSION, LDLM_CANCEL);
- if (rc) {
- ptlrpc_request_free(req);
- goto out;
- }
-
- req->rq_request_portal = LDLM_CANCEL_REQUEST_PORTAL;
- req->rq_reply_portal = LDLM_CANCEL_REPLY_PORTAL;
- ptlrpc_at_set_req_timeout(req);
-
- ldlm_cancel_pack(req, cancels, count);
-
- ptlrpc_request_set_replen(req);
- if (flags & LCF_ASYNC) {
- ptlrpcd_add_req(req);
- sent = count;
- goto out;
- }
-
- rc = ptlrpc_queue_wait(req);
- if (rc == LUSTRE_ESTALE) {
- CDEBUG(D_DLMTRACE, "client/server (nid %s) out of sync -- not fatal\n",
- libcfs_nid2str(req->rq_import->
- imp_connection->c_peer.nid));
- rc = 0;
- } else if (rc == -ETIMEDOUT && /* check there was no reconnect*/
- req->rq_import_generation == imp->imp_generation) {
- ptlrpc_req_finished(req);
- continue;
- } else if (rc != ELDLM_OK) {
- /* -ESHUTDOWN is common on umount */
- CDEBUG_LIMIT(rc == -ESHUTDOWN ? D_DLMTRACE : D_ERROR,
- "Got rc %d from cancel RPC: canceling anyway\n",
- rc);
- break;
- }
- sent = count;
- break;
- }
-
- ptlrpc_req_finished(req);
-out:
- return sent ? sent : rc;
-}
-EXPORT_SYMBOL(ldlm_cli_cancel_req);
-
-static inline struct ldlm_pool *ldlm_imp2pl(struct obd_import *imp)
-{
- LASSERT(imp != NULL);
- return &imp->imp_obd->obd_namespace->ns_pool;
-}
-
-/**
- * Update client's OBD pool related fields with new SLV and Limit from \a req.
- */
-int ldlm_cli_update_pool(struct ptlrpc_request *req)
-{
- struct obd_device *obd;
- __u64 new_slv;
- __u32 new_limit;
-
- if (unlikely(!req->rq_import || !req->rq_import->imp_obd ||
- !imp_connect_lru_resize(req->rq_import))) {
- /*
- * Do nothing for corner cases.
- */
- return 0;
- }
-
- /* In some cases RPC may contain SLV and limit zeroed out. This
- * is the case when server does not support LRU resize feature.
- * This is also possible in some recovery cases when server-side
- * reqs have no reference to the OBD export and thus access to
- * server-side namespace is not possible. */
- if (lustre_msg_get_slv(req->rq_repmsg) == 0 ||
- lustre_msg_get_limit(req->rq_repmsg) == 0) {
- DEBUG_REQ(D_HA, req,
- "Zero SLV or Limit found (SLV: %llu, Limit: %u)",
- lustre_msg_get_slv(req->rq_repmsg),
- lustre_msg_get_limit(req->rq_repmsg));
- return 0;
- }
-
- new_limit = lustre_msg_get_limit(req->rq_repmsg);
- new_slv = lustre_msg_get_slv(req->rq_repmsg);
- obd = req->rq_import->imp_obd;
-
- /* Set new SLV and limit in OBD fields to make them accessible
- * to the pool thread. We do not access obd_namespace and pool
- * directly here as there is no reliable way to make sure that
- * they are still alive at cleanup time. Evil races are possible
- * which may cause Oops at that time. */
- write_lock(&obd->obd_pool_lock);
- obd->obd_pool_slv = new_slv;
- obd->obd_pool_limit = new_limit;
- write_unlock(&obd->obd_pool_lock);
-
- return 0;
-}
-EXPORT_SYMBOL(ldlm_cli_update_pool);
-
-/**
- * Client side lock cancel.
- *
- * Lock must not have any readers or writers by this time.
- */
-int ldlm_cli_cancel(struct lustre_handle *lockh,
- ldlm_cancel_flags_t cancel_flags)
-{
- struct obd_export *exp;
- int avail, flags, count = 1;
- __u64 rc = 0;
- struct ldlm_namespace *ns;
- struct ldlm_lock *lock;
- LIST_HEAD(cancels);
-
- /* concurrent cancels on the same handle can happen */
- lock = ldlm_handle2lock_long(lockh, LDLM_FL_CANCELING);
- if (lock == NULL) {
- LDLM_DEBUG_NOLOCK("lock is already being destroyed\n");
- return 0;
- }
-
- rc = ldlm_cli_cancel_local(lock);
- if (rc == LDLM_FL_LOCAL_ONLY || cancel_flags & LCF_LOCAL) {
- LDLM_LOCK_RELEASE(lock);
- return 0;
- }
- /* Even if the lock is marked as LDLM_FL_BL_AST, this is a LDLM_CANCEL
- * RPC which goes to canceld portal, so we can cancel other LRU locks
- * here and send them all as one LDLM_CANCEL RPC. */
- LASSERT(list_empty(&lock->l_bl_ast));
- list_add(&lock->l_bl_ast, &cancels);
-
- exp = lock->l_conn_export;
- if (exp_connect_cancelset(exp)) {
- avail = ldlm_format_handles_avail(class_exp2cliimp(exp),
- &RQF_LDLM_CANCEL,
- RCL_CLIENT, 0);
- LASSERT(avail > 0);
-
- ns = ldlm_lock_to_ns(lock);
- flags = ns_connect_lru_resize(ns) ?
- LDLM_CANCEL_LRUR : LDLM_CANCEL_AGED;
- count += ldlm_cancel_lru_local(ns, &cancels, 0, avail - 1,
- LCF_BL_AST, flags);
- }
- ldlm_cli_cancel_list(&cancels, count, NULL, cancel_flags);
- return 0;
-}
-EXPORT_SYMBOL(ldlm_cli_cancel);
-
-/**
- * Locally cancel up to \a count locks in list \a cancels.
- * Return the number of cancelled locks.
- */
-int ldlm_cli_cancel_list_local(struct list_head *cancels, int count,
- ldlm_cancel_flags_t flags)
-{
- LIST_HEAD(head);
- struct ldlm_lock *lock, *next;
- int left = 0, bl_ast = 0;
- __u64 rc;
-
- left = count;
- list_for_each_entry_safe(lock, next, cancels, l_bl_ast) {
- if (left-- == 0)
- break;
-
- if (flags & LCF_LOCAL) {
- rc = LDLM_FL_LOCAL_ONLY;
- ldlm_lock_cancel(lock);
- } else {
- rc = ldlm_cli_cancel_local(lock);
- }
- /* Until we have compound requests and can send LDLM_CANCEL
- * requests batched with generic RPCs, we need to send cancels
- * with the LDLM_FL_BL_AST flag in a separate RPC from
- * the one being generated now. */
- if (!(flags & LCF_BL_AST) && (rc == LDLM_FL_BL_AST)) {
- LDLM_DEBUG(lock, "Cancel lock separately");
- list_del_init(&lock->l_bl_ast);
- list_add(&lock->l_bl_ast, &head);
- bl_ast++;
- continue;
- }
- if (rc == LDLM_FL_LOCAL_ONLY) {
- /* CANCEL RPC should not be sent to server. */
- list_del_init(&lock->l_bl_ast);
- LDLM_LOCK_RELEASE(lock);
- count--;
- }
- }
- if (bl_ast > 0) {
- count -= bl_ast;
- ldlm_cli_cancel_list(&head, bl_ast, NULL, 0);
- }
-
- return count;
-}
-EXPORT_SYMBOL(ldlm_cli_cancel_list_local);
-
-/**
- * Cancel as many locks as possible w/o sending any RPCs (e.g. to write back
- * dirty data, to close a file, ...) or waiting for any RPCs in-flight (e.g.
- * readahead requests, ...)
- */
-static ldlm_policy_res_t ldlm_cancel_no_wait_policy(struct ldlm_namespace *ns,
- struct ldlm_lock *lock,
- int unused, int added,
- int count)
-{
- ldlm_policy_res_t result = LDLM_POLICY_CANCEL_LOCK;
- ldlm_cancel_for_recovery cb = ns->ns_cancel_for_recovery;
-
- lock_res_and_lock(lock);
-
- /* don't check added & count since we want to process all locks
- * from unused list */
- switch (lock->l_resource->lr_type) {
- case LDLM_EXTENT:
- case LDLM_IBITS:
- if (cb && cb(lock))
- break;
- default:
- result = LDLM_POLICY_SKIP_LOCK;
- lock->l_flags |= LDLM_FL_SKIPPED;
- break;
- }
-
- unlock_res_and_lock(lock);
- return result;
-}
-
-/**
- * Callback function for LRU-resize policy. Decides whether to keep
- * \a lock in LRU for current \a LRU size \a unused, added in current
- * scan \a added and number of locks to be preferably canceled \a count.
- *
- * \retval LDLM_POLICY_KEEP_LOCK keep lock in LRU in stop scanning
- *
- * \retval LDLM_POLICY_CANCEL_LOCK cancel lock from LRU
- */
-static ldlm_policy_res_t ldlm_cancel_lrur_policy(struct ldlm_namespace *ns,
- struct ldlm_lock *lock,
- int unused, int added,
- int count)
-{
- unsigned long cur = cfs_time_current();
- struct ldlm_pool *pl = &ns->ns_pool;
- __u64 slv, lvf, lv;
- unsigned long la;
-
- /* Stop LRU processing when we reach past @count or have checked all
- * locks in LRU. */
- if (count && added >= count)
- return LDLM_POLICY_KEEP_LOCK;
-
- slv = ldlm_pool_get_slv(pl);
- lvf = ldlm_pool_get_lvf(pl);
- la = cfs_duration_sec(cfs_time_sub(cur,
- lock->l_last_used));
- lv = lvf * la * unused;
-
- /* Inform pool about current CLV to see it via debugfs. */
- ldlm_pool_set_clv(pl, lv);
-
- /* Stop when SLV is not yet come from server or lv is smaller than
- * it is. */
- return (slv == 0 || lv < slv) ?
- LDLM_POLICY_KEEP_LOCK : LDLM_POLICY_CANCEL_LOCK;
-}
-
-/**
- * Callback function for debugfs used policy. Makes decision whether to keep
- * \a lock in LRU for current \a LRU size \a unused, added in current scan \a
- * added and number of locks to be preferably canceled \a count.
- *
- * \retval LDLM_POLICY_KEEP_LOCK keep lock in LRU in stop scanning
- *
- * \retval LDLM_POLICY_CANCEL_LOCK cancel lock from LRU
- */
-static ldlm_policy_res_t ldlm_cancel_passed_policy(struct ldlm_namespace *ns,
- struct ldlm_lock *lock,
- int unused, int added,
- int count)
-{
- /* Stop LRU processing when we reach past @count or have checked all
- * locks in LRU. */
- return (added >= count) ?
- LDLM_POLICY_KEEP_LOCK : LDLM_POLICY_CANCEL_LOCK;
-}
-
-/**
- * Callback function for aged policy. Makes decision whether to keep \a lock in
- * LRU for current LRU size \a unused, added in current scan \a added and
- * number of locks to be preferably canceled \a count.
- *
- * \retval LDLM_POLICY_KEEP_LOCK keep lock in LRU in stop scanning
- *
- * \retval LDLM_POLICY_CANCEL_LOCK cancel lock from LRU
- */
-static ldlm_policy_res_t ldlm_cancel_aged_policy(struct ldlm_namespace *ns,
- struct ldlm_lock *lock,
- int unused, int added,
- int count)
-{
- /* Stop LRU processing if young lock is found and we reach past count */
- return ((added >= count) &&
- time_before(cfs_time_current(),
- cfs_time_add(lock->l_last_used, ns->ns_max_age))) ?
- LDLM_POLICY_KEEP_LOCK : LDLM_POLICY_CANCEL_LOCK;
-}
-
-/**
- * Callback function for default policy. Makes decision whether to keep \a lock
- * in LRU for current LRU size \a unused, added in current scan \a added and
- * number of locks to be preferably canceled \a count.
- *
- * \retval LDLM_POLICY_KEEP_LOCK keep lock in LRU in stop scanning
- *
- * \retval LDLM_POLICY_CANCEL_LOCK cancel lock from LRU
- */
-static ldlm_policy_res_t ldlm_cancel_default_policy(struct ldlm_namespace *ns,
- struct ldlm_lock *lock,
- int unused, int added,
- int count)
-{
- /* Stop LRU processing when we reach past count or have checked all
- * locks in LRU. */
- return (added >= count) ?
- LDLM_POLICY_KEEP_LOCK : LDLM_POLICY_CANCEL_LOCK;
-}
-
-typedef ldlm_policy_res_t (*ldlm_cancel_lru_policy_t)(struct ldlm_namespace *,
- struct ldlm_lock *, int,
- int, int);
-
-static ldlm_cancel_lru_policy_t
-ldlm_cancel_lru_policy(struct ldlm_namespace *ns, int flags)
-{
- if (flags & LDLM_CANCEL_NO_WAIT)
- return ldlm_cancel_no_wait_policy;
-
- if (ns_connect_lru_resize(ns)) {
- if (flags & LDLM_CANCEL_SHRINK)
- /* We kill passed number of old locks. */
- return ldlm_cancel_passed_policy;
- else if (flags & LDLM_CANCEL_LRUR)
- return ldlm_cancel_lrur_policy;
- else if (flags & LDLM_CANCEL_PASSED)
- return ldlm_cancel_passed_policy;
- } else {
- if (flags & LDLM_CANCEL_AGED)
- return ldlm_cancel_aged_policy;
- }
-
- return ldlm_cancel_default_policy;
-}
-
-/**
- * - Free space in LRU for \a count new locks,
- * redundant unused locks are canceled locally;
- * - also cancel locally unused aged locks;
- * - do not cancel more than \a max locks;
- * - GET the found locks and add them into the \a cancels list.
- *
- * A client lock can be added to the l_bl_ast list only when it is
- * marked LDLM_FL_CANCELING. Otherwise, somebody is already doing
- * CANCEL. There are the following use cases:
- * ldlm_cancel_resource_local(), ldlm_cancel_lru_local() and
- * ldlm_cli_cancel(), which check and set this flag properly. As any
- * attempt to cancel a lock rely on this flag, l_bl_ast list is accessed
- * later without any special locking.
- *
- * Calling policies for enabled LRU resize:
- * ----------------------------------------
- * flags & LDLM_CANCEL_LRUR - use LRU resize policy (SLV from server) to
- * cancel not more than \a count locks;
- *
- * flags & LDLM_CANCEL_PASSED - cancel \a count number of old locks (located at
- * the beginning of LRU list);
- *
- * flags & LDLM_CANCEL_SHRINK - cancel not more than \a count locks according to
- * memory pressure policy function;
- *
- * flags & LDLM_CANCEL_AGED - cancel \a count locks according to "aged policy".
- *
- * flags & LDLM_CANCEL_NO_WAIT - cancel as many unused locks as possible
- * (typically before replaying locks) w/o
- * sending any RPCs or waiting for any
- * outstanding RPC to complete.
- */
-static int ldlm_prepare_lru_list(struct ldlm_namespace *ns,
- struct list_head *cancels, int count, int max,
- int flags)
-{
- ldlm_cancel_lru_policy_t pf;
- struct ldlm_lock *lock, *next;
- int added = 0, unused, remained;
-
- spin_lock(&ns->ns_lock);
- unused = ns->ns_nr_unused;
- remained = unused;
-
- if (!ns_connect_lru_resize(ns))
- count += unused - ns->ns_max_unused;
-
- pf = ldlm_cancel_lru_policy(ns, flags);
- LASSERT(pf != NULL);
-
- while (!list_empty(&ns->ns_unused_list)) {
- ldlm_policy_res_t result;
-
- /* all unused locks */
- if (remained-- <= 0)
- break;
-
- /* For any flags, stop scanning if @max is reached. */
- if (max && added >= max)
- break;
-
- list_for_each_entry_safe(lock, next, &ns->ns_unused_list,
- l_lru) {
- /* No locks which got blocking requests. */
- LASSERT(!(lock->l_flags & LDLM_FL_BL_AST));
-
- if (flags & LDLM_CANCEL_NO_WAIT &&
- lock->l_flags & LDLM_FL_SKIPPED)
- /* already processed */
- continue;
-
- /* Somebody is already doing CANCEL. No need for this
- * lock in LRU, do not traverse it again. */
- if (!(lock->l_flags & LDLM_FL_CANCELING))
- break;
-
- ldlm_lock_remove_from_lru_nolock(lock);
- }
- if (&lock->l_lru == &ns->ns_unused_list)
- break;
-
- LDLM_LOCK_GET(lock);
- spin_unlock(&ns->ns_lock);
- lu_ref_add(&lock->l_reference, __func__, current);
-
- /* Pass the lock through the policy filter and see if it
- * should stay in LRU.
- *
- * Even for shrinker policy we stop scanning if
- * we find a lock that should stay in the cache.
- * We should take into account lock age anyway
- * as a new lock is a valuable resource even if
- * it has a low weight.
- *
- * That is, for shrinker policy we drop only
- * old locks, but additionally choose them by
- * their weight. Big extent locks will stay in
- * the cache. */
- result = pf(ns, lock, unused, added, count);
- if (result == LDLM_POLICY_KEEP_LOCK) {
- lu_ref_del(&lock->l_reference,
- __func__, current);
- LDLM_LOCK_RELEASE(lock);
- spin_lock(&ns->ns_lock);
- break;
- }
- if (result == LDLM_POLICY_SKIP_LOCK) {
- lu_ref_del(&lock->l_reference,
- __func__, current);
- LDLM_LOCK_RELEASE(lock);
- spin_lock(&ns->ns_lock);
- continue;
- }
-
- lock_res_and_lock(lock);
- /* Check flags again under the lock. */
- if ((lock->l_flags & LDLM_FL_CANCELING) ||
- (ldlm_lock_remove_from_lru(lock) == 0)) {
- /* Another thread is removing lock from LRU, or
- * somebody is already doing CANCEL, or there
- * is a blocking request which will send cancel
- * by itself, or the lock is no longer unused. */
- unlock_res_and_lock(lock);
- lu_ref_del(&lock->l_reference,
- __func__, current);
- LDLM_LOCK_RELEASE(lock);
- spin_lock(&ns->ns_lock);
- continue;
- }
- LASSERT(!lock->l_readers && !lock->l_writers);
-
- /* If we have chosen to cancel this lock voluntarily, we
- * better send cancel notification to server, so that it
- * frees appropriate state. This might lead to a race
- * where while we are doing cancel here, server is also
- * silently cancelling this lock. */
- lock->l_flags &= ~LDLM_FL_CANCEL_ON_BLOCK;
-
- /* Setting the CBPENDING flag is a little misleading,
- * but prevents an important race; namely, once
- * CBPENDING is set, the lock can accumulate no more
- * readers/writers. Since readers and writers are
- * already zero here, ldlm_lock_decref() won't see
- * this flag and call l_blocking_ast */
- lock->l_flags |= LDLM_FL_CBPENDING | LDLM_FL_CANCELING;
-
- /* We can't re-add to l_lru as it confuses the
- * refcounting in ldlm_lock_remove_from_lru() if an AST
- * arrives after we drop lr_lock below. We use l_bl_ast
- * and can't use l_pending_chain as it is used both on
- * server and client nevertheless bug 5666 says it is
- * used only on server */
- LASSERT(list_empty(&lock->l_bl_ast));
- list_add(&lock->l_bl_ast, cancels);
- unlock_res_and_lock(lock);
- lu_ref_del(&lock->l_reference, __func__, current);
- spin_lock(&ns->ns_lock);
- added++;
- unused--;
- }
- spin_unlock(&ns->ns_lock);
- return added;
-}
-
-int ldlm_cancel_lru_local(struct ldlm_namespace *ns, struct list_head *cancels,
- int count, int max, ldlm_cancel_flags_t cancel_flags,
- int flags)
-{
- int added;
-
- added = ldlm_prepare_lru_list(ns, cancels, count, max, flags);
- if (added <= 0)
- return added;
- return ldlm_cli_cancel_list_local(cancels, added, cancel_flags);
-}
-
-/**
- * Cancel at least \a nr locks from given namespace LRU.
- *
- * When called with LCF_ASYNC the blocking callback will be handled
- * in a thread and this function will return after the thread has been
- * asked to call the callback. When called with LCF_ASYNC the blocking
- * callback will be performed in this function.
- */
-int ldlm_cancel_lru(struct ldlm_namespace *ns, int nr,
- ldlm_cancel_flags_t cancel_flags,
- int flags)
-{
- LIST_HEAD(cancels);
- int count, rc;
-
- /* Just prepare the list of locks, do not actually cancel them yet.
- * Locks are cancelled later in a separate thread. */
- count = ldlm_prepare_lru_list(ns, &cancels, nr, 0, flags);
- rc = ldlm_bl_to_thread_list(ns, NULL, &cancels, count, cancel_flags);
- if (rc == 0)
- return count;
-
- return 0;
-}
-
-/**
- * Find and cancel locally unused locks found on resource, matched to the
- * given policy, mode. GET the found locks and add them into the \a cancels
- * list.
- */
-int ldlm_cancel_resource_local(struct ldlm_resource *res,
- struct list_head *cancels,
- ldlm_policy_data_t *policy,
- ldlm_mode_t mode, __u64 lock_flags,
- ldlm_cancel_flags_t cancel_flags, void *opaque)
-{
- struct ldlm_lock *lock;
- int count = 0;
-
- lock_res(res);
- list_for_each_entry(lock, &res->lr_granted, l_res_link) {
- if (opaque != NULL && lock->l_ast_data != opaque) {
- LDLM_ERROR(lock, "data %p doesn't match opaque %p",
- lock->l_ast_data, opaque);
- continue;
- }
-
- if (lock->l_readers || lock->l_writers)
- continue;
-
- /* If somebody is already doing CANCEL, or blocking AST came,
- * skip this lock. */
- if (lock->l_flags & LDLM_FL_BL_AST ||
- lock->l_flags & LDLM_FL_CANCELING)
- continue;
-
- if (lockmode_compat(lock->l_granted_mode, mode))
- continue;
-
- /* If policy is given and this is IBITS lock, add to list only
- * those locks that match by policy. */
- if (policy && (lock->l_resource->lr_type == LDLM_IBITS) &&
- !(lock->l_policy_data.l_inodebits.bits &
- policy->l_inodebits.bits))
- continue;
-
- /* See CBPENDING comment in ldlm_cancel_lru */
- lock->l_flags |= LDLM_FL_CBPENDING | LDLM_FL_CANCELING |
- lock_flags;
-
- LASSERT(list_empty(&lock->l_bl_ast));
- list_add(&lock->l_bl_ast, cancels);
- LDLM_LOCK_GET(lock);
- count++;
- }
- unlock_res(res);
-
- return ldlm_cli_cancel_list_local(cancels, count, cancel_flags);
-}
-EXPORT_SYMBOL(ldlm_cancel_resource_local);
-
-/**
- * Cancel client-side locks from a list and send/prepare cancel RPCs to the
- * server.
- * If \a req is NULL, send CANCEL request to server with handles of locks
- * in the \a cancels. If EARLY_CANCEL is not supported, send CANCEL requests
- * separately per lock.
- * If \a req is not NULL, put handles of locks in \a cancels into the request
- * buffer at the offset \a off.
- * Destroy \a cancels at the end.
- */
-int ldlm_cli_cancel_list(struct list_head *cancels, int count,
- struct ptlrpc_request *req, ldlm_cancel_flags_t flags)
-{
- struct ldlm_lock *lock;
- int res = 0;
-
- if (list_empty(cancels) || count == 0)
- return 0;
-
- /* XXX: requests (both batched and not) could be sent in parallel.
- * Usually it is enough to have just 1 RPC, but it is possible that
- * there are too many locks to be cancelled in LRU or on a resource.
- * It would also speed up the case when the server does not support
- * the feature. */
- while (count > 0) {
- LASSERT(!list_empty(cancels));
- lock = list_entry(cancels->next, struct ldlm_lock,
- l_bl_ast);
- LASSERT(lock->l_conn_export);
-
- if (exp_connect_cancelset(lock->l_conn_export)) {
- res = count;
- if (req)
- ldlm_cancel_pack(req, cancels, count);
- else
- res = ldlm_cli_cancel_req(lock->l_conn_export,
- cancels, count,
- flags);
- } else {
- res = ldlm_cli_cancel_req(lock->l_conn_export,
- cancels, 1, flags);
- }
-
- if (res < 0) {
- CDEBUG_LIMIT(res == -ESHUTDOWN ? D_DLMTRACE : D_ERROR,
- "ldlm_cli_cancel_list: %d\n", res);
- res = count;
- }
-
- count -= res;
- ldlm_lock_list_put(cancels, l_bl_ast, res);
- }
- LASSERT(count == 0);
- return 0;
-}
-EXPORT_SYMBOL(ldlm_cli_cancel_list);
-
-/**
- * Cancel all locks on a resource that have 0 readers/writers.
- *
- * If flags & LDLM_FL_LOCAL_ONLY, throw the locks away without trying
- * to notify the server. */
-int ldlm_cli_cancel_unused_resource(struct ldlm_namespace *ns,
- const struct ldlm_res_id *res_id,
- ldlm_policy_data_t *policy,
- ldlm_mode_t mode,
- ldlm_cancel_flags_t flags,
- void *opaque)
-{
- struct ldlm_resource *res;
- LIST_HEAD(cancels);
- int count;
- int rc;
-
- res = ldlm_resource_get(ns, NULL, res_id, 0, 0);
- if (res == NULL) {
- /* This is not a problem. */
- CDEBUG(D_INFO, "No resource %llu\n", res_id->name[0]);
- return 0;
- }
-
- LDLM_RESOURCE_ADDREF(res);
- count = ldlm_cancel_resource_local(res, &cancels, policy, mode,
- 0, flags | LCF_BL_AST, opaque);
- rc = ldlm_cli_cancel_list(&cancels, count, NULL, flags);
- if (rc != ELDLM_OK)
- CERROR("canceling unused lock "DLDLMRES": rc = %d\n",
- PLDLMRES(res), rc);
-
- LDLM_RESOURCE_DELREF(res);
- ldlm_resource_putref(res);
- return 0;
-}
-EXPORT_SYMBOL(ldlm_cli_cancel_unused_resource);
-
-struct ldlm_cli_cancel_arg {
- int lc_flags;
- void *lc_opaque;
-};
-
-static int ldlm_cli_hash_cancel_unused(struct cfs_hash *hs,
- struct cfs_hash_bd *bd,
- struct hlist_node *hnode, void *arg)
-{
- struct ldlm_resource *res = cfs_hash_object(hs, hnode);
- struct ldlm_cli_cancel_arg *lc = arg;
-
- ldlm_cli_cancel_unused_resource(ldlm_res_to_ns(res), &res->lr_name,
- NULL, LCK_MINMODE,
- lc->lc_flags, lc->lc_opaque);
- /* must return 0 for hash iteration */
- return 0;
-}
-
-/**
- * Cancel all locks on a namespace (or a specific resource, if given)
- * that have 0 readers/writers.
- *
- * If flags & LCF_LOCAL, throw the locks away without trying
- * to notify the server. */
-int ldlm_cli_cancel_unused(struct ldlm_namespace *ns,
- const struct ldlm_res_id *res_id,
- ldlm_cancel_flags_t flags, void *opaque)
-{
- struct ldlm_cli_cancel_arg arg = {
- .lc_flags = flags,
- .lc_opaque = opaque,
- };
-
- if (ns == NULL)
- return ELDLM_OK;
-
- if (res_id != NULL) {
- return ldlm_cli_cancel_unused_resource(ns, res_id, NULL,
- LCK_MINMODE, flags,
- opaque);
- } else {
- cfs_hash_for_each_nolock(ns->ns_rs_hash,
- ldlm_cli_hash_cancel_unused, &arg);
- return ELDLM_OK;
- }
-}
-EXPORT_SYMBOL(ldlm_cli_cancel_unused);
-
-/* Lock iterators. */
-
-int ldlm_resource_foreach(struct ldlm_resource *res, ldlm_iterator_t iter,
- void *closure)
-{
- struct list_head *tmp, *next;
- struct ldlm_lock *lock;
- int rc = LDLM_ITER_CONTINUE;
-
- if (!res)
- return LDLM_ITER_CONTINUE;
-
- lock_res(res);
- list_for_each_safe(tmp, next, &res->lr_granted) {
- lock = list_entry(tmp, struct ldlm_lock, l_res_link);
-
- if (iter(lock, closure) == LDLM_ITER_STOP) {
- rc = LDLM_ITER_STOP;
- goto out;
- }
- }
-
- list_for_each_safe(tmp, next, &res->lr_converting) {
- lock = list_entry(tmp, struct ldlm_lock, l_res_link);
-
- if (iter(lock, closure) == LDLM_ITER_STOP) {
- rc = LDLM_ITER_STOP;
- goto out;
- }
- }
-
- list_for_each_safe(tmp, next, &res->lr_waiting) {
- lock = list_entry(tmp, struct ldlm_lock, l_res_link);
-
- if (iter(lock, closure) == LDLM_ITER_STOP) {
- rc = LDLM_ITER_STOP;
- goto out;
- }
- }
- out:
- unlock_res(res);
- return rc;
-}
-EXPORT_SYMBOL(ldlm_resource_foreach);
-
-struct iter_helper_data {
- ldlm_iterator_t iter;
- void *closure;
-};
-
-static int ldlm_iter_helper(struct ldlm_lock *lock, void *closure)
-{
- struct iter_helper_data *helper = closure;
-
- return helper->iter(lock, helper->closure);
-}
-
-static int ldlm_res_iter_helper(struct cfs_hash *hs, struct cfs_hash_bd *bd,
- struct hlist_node *hnode, void *arg)
-
-{
- struct ldlm_resource *res = cfs_hash_object(hs, hnode);
-
- return ldlm_resource_foreach(res, ldlm_iter_helper, arg) ==
- LDLM_ITER_STOP;
-}
-
-void ldlm_namespace_foreach(struct ldlm_namespace *ns,
- ldlm_iterator_t iter, void *closure)
-
-{
- struct iter_helper_data helper = {
- .iter = iter,
- .closure = closure,
- };
-
- cfs_hash_for_each_nolock(ns->ns_rs_hash,
- ldlm_res_iter_helper, &helper);
-
-}
-EXPORT_SYMBOL(ldlm_namespace_foreach);
-
-/* non-blocking function to manipulate a lock whose cb_data is being put away.
- * return 0: find no resource
- * > 0: must be LDLM_ITER_STOP/LDLM_ITER_CONTINUE.
- * < 0: errors
- */
-int ldlm_resource_iterate(struct ldlm_namespace *ns,
- const struct ldlm_res_id *res_id,
- ldlm_iterator_t iter, void *data)
-{
- struct ldlm_resource *res;
- int rc;
-
- if (ns == NULL) {
- CERROR("must pass in namespace\n");
- LBUG();
- }
-
- res = ldlm_resource_get(ns, NULL, res_id, 0, 0);
- if (res == NULL)
- return 0;
-
- LDLM_RESOURCE_ADDREF(res);
- rc = ldlm_resource_foreach(res, iter, data);
- LDLM_RESOURCE_DELREF(res);
- ldlm_resource_putref(res);
- return rc;
-}
-EXPORT_SYMBOL(ldlm_resource_iterate);
-
-/* Lock replay */
-
-static int ldlm_chain_lock_for_replay(struct ldlm_lock *lock, void *closure)
-{
- struct list_head *list = closure;
-
- /* we use l_pending_chain here, because it's unused on clients. */
- LASSERTF(list_empty(&lock->l_pending_chain),
- "lock %p next %p prev %p\n",
- lock, &lock->l_pending_chain.next,
- &lock->l_pending_chain.prev);
- /* bug 9573: don't replay locks left after eviction, or
- * bug 17614: locks being actively cancelled. Get a reference
- * on a lock so that it does not disappear under us (e.g. due to cancel)
- */
- if (!(lock->l_flags & (LDLM_FL_FAILED|LDLM_FL_CANCELING))) {
- list_add(&lock->l_pending_chain, list);
- LDLM_LOCK_GET(lock);
- }
-
- return LDLM_ITER_CONTINUE;
-}
-
-static int replay_lock_interpret(const struct lu_env *env,
- struct ptlrpc_request *req,
- struct ldlm_async_args *aa, int rc)
-{
- struct ldlm_lock *lock;
- struct ldlm_reply *reply;
- struct obd_export *exp;
-
- atomic_dec(&req->rq_import->imp_replay_inflight);
- if (rc != ELDLM_OK)
- goto out;
-
-
- reply = req_capsule_server_get(&req->rq_pill, &RMF_DLM_REP);
- if (reply == NULL) {
- rc = -EPROTO;
- goto out;
- }
-
- lock = ldlm_handle2lock(&aa->lock_handle);
- if (!lock) {
- CERROR("received replay ack for unknown local cookie %#llx remote cookie %#llx from server %s id %s\n",
- aa->lock_handle.cookie, reply->lock_handle.cookie,
- req->rq_export->exp_client_uuid.uuid,
- libcfs_id2str(req->rq_peer));
- rc = -ESTALE;
- goto out;
- }
-
- /* Key change rehash lock in per-export hash with new key */
- exp = req->rq_export;
- if (exp && exp->exp_lock_hash) {
- /* In the function below, .hs_keycmp resolves to
- * ldlm_export_lock_keycmp() */
- /* coverity[overrun-buffer-val] */
- cfs_hash_rehash_key(exp->exp_lock_hash,
- &lock->l_remote_handle,
- &reply->lock_handle,
- &lock->l_exp_hash);
- } else {
- lock->l_remote_handle = reply->lock_handle;
- }
-
- LDLM_DEBUG(lock, "replayed lock:");
- ptlrpc_import_recovery_state_machine(req->rq_import);
- LDLM_LOCK_PUT(lock);
-out:
- if (rc != ELDLM_OK)
- ptlrpc_connect_import(req->rq_import);
-
- return rc;
-}
-
-static int replay_one_lock(struct obd_import *imp, struct ldlm_lock *lock)
-{
- struct ptlrpc_request *req;
- struct ldlm_async_args *aa;
- struct ldlm_request *body;
- int flags;
-
- /* Bug 11974: Do not replay a lock which is actively being canceled */
- if (lock->l_flags & LDLM_FL_CANCELING) {
- LDLM_DEBUG(lock, "Not replaying canceled lock:");
- return 0;
- }
-
- /* If this is reply-less callback lock, we cannot replay it, since
- * server might have long dropped it, but notification of that event was
- * lost by network. (and server granted conflicting lock already) */
- if (lock->l_flags & LDLM_FL_CANCEL_ON_BLOCK) {
- LDLM_DEBUG(lock, "Not replaying reply-less lock:");
- ldlm_lock_cancel(lock);
- return 0;
- }
-
- /*
- * If granted mode matches the requested mode, this lock is granted.
- *
- * If they differ, but we have a granted mode, then we were granted
- * one mode and now want another: ergo, converting.
- *
- * If we haven't been granted anything and are on a resource list,
- * then we're blocked/waiting.
- *
- * If we haven't been granted anything and we're NOT on a resource list,
- * then we haven't got a reply yet and don't have a known disposition.
- * This happens whenever a lock enqueue is the request that triggers
- * recovery.
- */
- if (lock->l_granted_mode == lock->l_req_mode)
- flags = LDLM_FL_REPLAY | LDLM_FL_BLOCK_GRANTED;
- else if (lock->l_granted_mode)
- flags = LDLM_FL_REPLAY | LDLM_FL_BLOCK_CONV;
- else if (!list_empty(&lock->l_res_link))
- flags = LDLM_FL_REPLAY | LDLM_FL_BLOCK_WAIT;
- else
- flags = LDLM_FL_REPLAY;
-
- req = ptlrpc_request_alloc_pack(imp, &RQF_LDLM_ENQUEUE,
- LUSTRE_DLM_VERSION, LDLM_ENQUEUE);
- if (req == NULL)
- return -ENOMEM;
-
- /* We're part of recovery, so don't wait for it. */
- req->rq_send_state = LUSTRE_IMP_REPLAY_LOCKS;
-
- body = req_capsule_client_get(&req->rq_pill, &RMF_DLM_REQ);
- ldlm_lock2desc(lock, &body->lock_desc);
- body->lock_flags = ldlm_flags_to_wire(flags);
-
- ldlm_lock2handle(lock, &body->lock_handle[0]);
- if (lock->l_lvb_len > 0)
- req_capsule_extend(&req->rq_pill, &RQF_LDLM_ENQUEUE_LVB);
- req_capsule_set_size(&req->rq_pill, &RMF_DLM_LVB, RCL_SERVER,
- lock->l_lvb_len);
- ptlrpc_request_set_replen(req);
- /* notify the server we've replayed all requests.
- * also, we mark the request to be put on a dedicated
- * queue to be processed after all request replayes.
- * bug 6063 */
- lustre_msg_set_flags(req->rq_reqmsg, MSG_REQ_REPLAY_DONE);
-
- LDLM_DEBUG(lock, "replaying lock:");
-
- atomic_inc(&req->rq_import->imp_replay_inflight);
- CLASSERT(sizeof(*aa) <= sizeof(req->rq_async_args));
- aa = ptlrpc_req_async_args(req);
- aa->lock_handle = body->lock_handle[0];
- req->rq_interpret_reply = (ptlrpc_interpterer_t)replay_lock_interpret;
- ptlrpcd_add_req(req);
-
- return 0;
-}
-
-/**
- * Cancel as many unused locks as possible before replay. since we are
- * in recovery, we can't wait for any outstanding RPCs to send any RPC
- * to the server.
- *
- * Called only in recovery before replaying locks. there is no need to
- * replay locks that are unused. since the clients may hold thousands of
- * cached unused locks, dropping the unused locks can greatly reduce the
- * load on the servers at recovery time.
- */
-static void ldlm_cancel_unused_locks_for_replay(struct ldlm_namespace *ns)
-{
- int canceled;
- LIST_HEAD(cancels);
-
- CDEBUG(D_DLMTRACE, "Dropping as many unused locks as possible before replay for namespace %s (%d)\n",
- ldlm_ns_name(ns), ns->ns_nr_unused);
-
- /* We don't need to care whether or not LRU resize is enabled
- * because the LDLM_CANCEL_NO_WAIT policy doesn't use the
- * count parameter */
- canceled = ldlm_cancel_lru_local(ns, &cancels, ns->ns_nr_unused, 0,
- LCF_LOCAL, LDLM_CANCEL_NO_WAIT);
-
- CDEBUG(D_DLMTRACE, "Canceled %d unused locks from namespace %s\n",
- canceled, ldlm_ns_name(ns));
-}
-
-int ldlm_replay_locks(struct obd_import *imp)
-{
- struct ldlm_namespace *ns = imp->imp_obd->obd_namespace;
- LIST_HEAD(list);
- struct ldlm_lock *lock, *next;
- int rc = 0;
-
- LASSERT(atomic_read(&imp->imp_replay_inflight) == 0);
-
- /* don't replay locks if import failed recovery */
- if (imp->imp_vbr_failed)
- return 0;
-
- /* ensure this doesn't fall to 0 before all have been queued */
- atomic_inc(&imp->imp_replay_inflight);
-
- if (ldlm_cancel_unused_locks_before_replay)
- ldlm_cancel_unused_locks_for_replay(ns);
-
- ldlm_namespace_foreach(ns, ldlm_chain_lock_for_replay, &list);
-
- list_for_each_entry_safe(lock, next, &list, l_pending_chain) {
- list_del_init(&lock->l_pending_chain);
- if (rc) {
- LDLM_LOCK_RELEASE(lock);
- continue; /* or try to do the rest? */
- }
- rc = replay_one_lock(imp, lock);
- LDLM_LOCK_RELEASE(lock);
- }
-
- atomic_dec(&imp->imp_replay_inflight);
-
- return rc;
-}
-EXPORT_SYMBOL(ldlm_replay_locks);
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_resource.c b/drivers/staging/lustre/lustre/ldlm/ldlm_resource.c
deleted file mode 100644
index cc212b97abaa..000000000000
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_resource.c
+++ /dev/null
@@ -1,1464 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2010, 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * lustre/ldlm/ldlm_resource.c
- *
- * Author: Phil Schwan <phil@clusterfs.com>
- * Author: Peter Braam <braam@clusterfs.com>
- */
-
-#define DEBUG_SUBSYSTEM S_LDLM
-#include "../include/lustre_dlm.h"
-#include "../include/lustre_fid.h"
-#include "../include/obd_class.h"
-#include "ldlm_internal.h"
-
-struct kmem_cache *ldlm_resource_slab, *ldlm_lock_slab;
-
-int ldlm_srv_namespace_nr;
-int ldlm_cli_namespace_nr;
-
-struct mutex ldlm_srv_namespace_lock;
-LIST_HEAD(ldlm_srv_namespace_list);
-
-struct mutex ldlm_cli_namespace_lock;
-/* Client Namespaces that have active resources in them.
- * Once all resources go away, ldlm_poold moves such namespaces to the
- * inactive list */
-LIST_HEAD(ldlm_cli_active_namespace_list);
-/* Client namespaces that don't have any locks in them */
-LIST_HEAD(ldlm_cli_inactive_namespace_list);
-
-static struct dentry *ldlm_debugfs_dir;
-static struct dentry *ldlm_ns_debugfs_dir;
-struct dentry *ldlm_svc_debugfs_dir;
-
-/* during debug dump certain amount of granted locks for one resource to avoid
- * DDOS. */
-unsigned int ldlm_dump_granted_max = 256;
-
-static ssize_t
-lprocfs_wr_dump_ns(struct file *file, const char __user *buffer,
- size_t count, loff_t *off)
-{
- ldlm_dump_all_namespaces(LDLM_NAMESPACE_SERVER, D_DLMTRACE);
- ldlm_dump_all_namespaces(LDLM_NAMESPACE_CLIENT, D_DLMTRACE);
- return count;
-}
-LPROC_SEQ_FOPS_WR_ONLY(ldlm, dump_ns);
-
-LPROC_SEQ_FOPS_RW_TYPE(ldlm_rw, uint);
-
-static struct lprocfs_vars ldlm_debugfs_list[] = {
- { "dump_namespaces", &ldlm_dump_ns_fops, NULL, 0222 },
- { "dump_granted_max", &ldlm_rw_uint_fops, &ldlm_dump_granted_max },
- { NULL }
-};
-
-int ldlm_debugfs_setup(void)
-{
- int rc;
-
- ldlm_debugfs_dir = ldebugfs_register(OBD_LDLM_DEVICENAME,
- debugfs_lustre_root,
- NULL, NULL);
- if (IS_ERR_OR_NULL(ldlm_debugfs_dir)) {
- CERROR("LProcFS failed in ldlm-init\n");
- rc = ldlm_debugfs_dir ? PTR_ERR(ldlm_debugfs_dir) : -ENOMEM;
- goto err;
- }
-
- ldlm_ns_debugfs_dir = ldebugfs_register("namespaces",
- ldlm_debugfs_dir,
- NULL, NULL);
- if (IS_ERR_OR_NULL(ldlm_ns_debugfs_dir)) {
- CERROR("LProcFS failed in ldlm-init\n");
- rc = ldlm_ns_debugfs_dir ? PTR_ERR(ldlm_ns_debugfs_dir)
- : -ENOMEM;
- goto err_type;
- }
-
- ldlm_svc_debugfs_dir = ldebugfs_register("services",
- ldlm_debugfs_dir,
- NULL, NULL);
- if (IS_ERR_OR_NULL(ldlm_svc_debugfs_dir)) {
- CERROR("LProcFS failed in ldlm-init\n");
- rc = ldlm_svc_debugfs_dir ? PTR_ERR(ldlm_svc_debugfs_dir)
- : -ENOMEM;
- goto err_ns;
- }
-
- rc = ldebugfs_add_vars(ldlm_debugfs_dir, ldlm_debugfs_list, NULL);
-
- return 0;
-
-err_ns:
- ldebugfs_remove(&ldlm_ns_debugfs_dir);
-err_type:
- ldebugfs_remove(&ldlm_debugfs_dir);
-err:
- ldlm_svc_debugfs_dir = NULL;
- ldlm_ns_debugfs_dir = NULL;
- ldlm_debugfs_dir = NULL;
- return rc;
-}
-
-void ldlm_debugfs_cleanup(void)
-{
- if (!IS_ERR_OR_NULL(ldlm_svc_debugfs_dir))
- ldebugfs_remove(&ldlm_svc_debugfs_dir);
-
- if (!IS_ERR_OR_NULL(ldlm_ns_debugfs_dir))
- ldebugfs_remove(&ldlm_ns_debugfs_dir);
-
- if (!IS_ERR_OR_NULL(ldlm_debugfs_dir))
- ldebugfs_remove(&ldlm_debugfs_dir);
-
- ldlm_svc_debugfs_dir = NULL;
- ldlm_ns_debugfs_dir = NULL;
- ldlm_debugfs_dir = NULL;
-}
-
-static ssize_t resource_count_show(struct kobject *kobj, struct attribute *attr,
- char *buf)
-{
- struct ldlm_namespace *ns = container_of(kobj, struct ldlm_namespace,
- ns_kobj);
- __u64 res = 0;
- struct cfs_hash_bd bd;
- int i;
-
- /* result is not strictly consistent */
- cfs_hash_for_each_bucket(ns->ns_rs_hash, &bd, i)
- res += cfs_hash_bd_count_get(&bd);
- return sprintf(buf, "%lld\n", res);
-}
-LUSTRE_RO_ATTR(resource_count);
-
-static ssize_t lock_count_show(struct kobject *kobj, struct attribute *attr,
- char *buf)
-{
- struct ldlm_namespace *ns = container_of(kobj, struct ldlm_namespace,
- ns_kobj);
- __u64 locks;
-
- locks = lprocfs_stats_collector(ns->ns_stats, LDLM_NSS_LOCKS,
- LPROCFS_FIELDS_FLAGS_SUM);
- return sprintf(buf, "%lld\n", locks);
-}
-LUSTRE_RO_ATTR(lock_count);
-
-static ssize_t lock_unused_count_show(struct kobject *kobj,
- struct attribute *attr,
- char *buf)
-{
- struct ldlm_namespace *ns = container_of(kobj, struct ldlm_namespace,
- ns_kobj);
-
- return sprintf(buf, "%d\n", ns->ns_nr_unused);
-}
-LUSTRE_RO_ATTR(lock_unused_count);
-
-static ssize_t lru_size_show(struct kobject *kobj, struct attribute *attr,
- char *buf)
-{
- struct ldlm_namespace *ns = container_of(kobj, struct ldlm_namespace,
- ns_kobj);
- __u32 *nr = &ns->ns_max_unused;
-
- if (ns_connect_lru_resize(ns))
- nr = &ns->ns_nr_unused;
- return sprintf(buf, "%u", *nr);
-}
-
-static ssize_t lru_size_store(struct kobject *kobj, struct attribute *attr,
- const char *buffer, size_t count)
-{
- struct ldlm_namespace *ns = container_of(kobj, struct ldlm_namespace,
- ns_kobj);
- unsigned long tmp;
- int lru_resize;
- int err;
-
- if (strncmp(buffer, "clear", 5) == 0) {
- CDEBUG(D_DLMTRACE,
- "dropping all unused locks from namespace %s\n",
- ldlm_ns_name(ns));
- if (ns_connect_lru_resize(ns)) {
- int canceled, unused = ns->ns_nr_unused;
-
- /* Try to cancel all @ns_nr_unused locks. */
- canceled = ldlm_cancel_lru(ns, unused, 0,
- LDLM_CANCEL_PASSED);
- if (canceled < unused) {
- CDEBUG(D_DLMTRACE,
- "not all requested locks are canceled, requested: %d, canceled: %d\n",
- unused,
- canceled);
- return -EINVAL;
- }
- } else {
- tmp = ns->ns_max_unused;
- ns->ns_max_unused = 0;
- ldlm_cancel_lru(ns, 0, 0, LDLM_CANCEL_PASSED);
- ns->ns_max_unused = tmp;
- }
- return count;
- }
-
- err = kstrtoul(buffer, 10, &tmp);
- if (err != 0) {
- CERROR("lru_size: invalid value written\n");
- return -EINVAL;
- }
- lru_resize = (tmp == 0);
-
- if (ns_connect_lru_resize(ns)) {
- if (!lru_resize)
- ns->ns_max_unused = (unsigned int)tmp;
-
- if (tmp > ns->ns_nr_unused)
- tmp = ns->ns_nr_unused;
- tmp = ns->ns_nr_unused - tmp;
-
- CDEBUG(D_DLMTRACE,
- "changing namespace %s unused locks from %u to %u\n",
- ldlm_ns_name(ns), ns->ns_nr_unused,
- (unsigned int)tmp);
- ldlm_cancel_lru(ns, tmp, LCF_ASYNC, LDLM_CANCEL_PASSED);
-
- if (!lru_resize) {
- CDEBUG(D_DLMTRACE,
- "disable lru_resize for namespace %s\n",
- ldlm_ns_name(ns));
- ns->ns_connect_flags &= ~OBD_CONNECT_LRU_RESIZE;
- }
- } else {
- CDEBUG(D_DLMTRACE,
- "changing namespace %s max_unused from %u to %u\n",
- ldlm_ns_name(ns), ns->ns_max_unused,
- (unsigned int)tmp);
- ns->ns_max_unused = (unsigned int)tmp;
- ldlm_cancel_lru(ns, 0, LCF_ASYNC, LDLM_CANCEL_PASSED);
-
- /* Make sure that LRU resize was originally supported before
- * turning it on here. */
- if (lru_resize &&
- (ns->ns_orig_connect_flags & OBD_CONNECT_LRU_RESIZE)) {
- CDEBUG(D_DLMTRACE,
- "enable lru_resize for namespace %s\n",
- ldlm_ns_name(ns));
- ns->ns_connect_flags |= OBD_CONNECT_LRU_RESIZE;
- }
- }
-
- return count;
-}
-LUSTRE_RW_ATTR(lru_size);
-
-static ssize_t lru_max_age_show(struct kobject *kobj, struct attribute *attr,
- char *buf)
-{
- struct ldlm_namespace *ns = container_of(kobj, struct ldlm_namespace,
- ns_kobj);
-
- return sprintf(buf, "%u", ns->ns_max_age);
-}
-
-static ssize_t lru_max_age_store(struct kobject *kobj, struct attribute *attr,
- const char *buffer, size_t count)
-{
- struct ldlm_namespace *ns = container_of(kobj, struct ldlm_namespace,
- ns_kobj);
- unsigned long tmp;
- int err;
-
- err = kstrtoul(buffer, 10, &tmp);
- if (err != 0)
- return -EINVAL;
-
- ns->ns_max_age = tmp;
-
- return count;
-}
-LUSTRE_RW_ATTR(lru_max_age);
-
-static ssize_t early_lock_cancel_show(struct kobject *kobj,
- struct attribute *attr,
- char *buf)
-{
- struct ldlm_namespace *ns = container_of(kobj, struct ldlm_namespace,
- ns_kobj);
-
- return sprintf(buf, "%d\n", ns_connect_cancelset(ns));
-}
-
-static ssize_t early_lock_cancel_store(struct kobject *kobj,
- struct attribute *attr,
- const char *buffer,
- size_t count)
-{
- struct ldlm_namespace *ns = container_of(kobj, struct ldlm_namespace,
- ns_kobj);
- unsigned long supp = -1;
- int rc;
-
- rc = kstrtoul(buffer, 10, &supp);
- if (rc < 0)
- return rc;
-
- if (supp == 0)
- ns->ns_connect_flags &= ~OBD_CONNECT_CANCELSET;
- else if (ns->ns_orig_connect_flags & OBD_CONNECT_CANCELSET)
- ns->ns_connect_flags |= OBD_CONNECT_CANCELSET;
- return count;
-}
-LUSTRE_RW_ATTR(early_lock_cancel);
-
-/* These are for namespaces in /sys/fs/lustre/ldlm/namespaces/ */
-static struct attribute *ldlm_ns_attrs[] = {
- &lustre_attr_resource_count.attr,
- &lustre_attr_lock_count.attr,
- &lustre_attr_lock_unused_count.attr,
- &lustre_attr_lru_size.attr,
- &lustre_attr_lru_max_age.attr,
- &lustre_attr_early_lock_cancel.attr,
- NULL,
-};
-
-static void ldlm_ns_release(struct kobject *kobj)
-{
- struct ldlm_namespace *ns = container_of(kobj, struct ldlm_namespace,
- ns_kobj);
- complete(&ns->ns_kobj_unregister);
-}
-
-static struct kobj_type ldlm_ns_ktype = {
- .default_attrs = ldlm_ns_attrs,
- .sysfs_ops = &lustre_sysfs_ops,
- .release = ldlm_ns_release,
-};
-
-static void ldlm_namespace_debugfs_unregister(struct ldlm_namespace *ns)
-{
- if (IS_ERR_OR_NULL(ns->ns_debugfs_entry))
- CERROR("dlm namespace %s has no procfs dir?\n",
- ldlm_ns_name(ns));
- else
- ldebugfs_remove(&ns->ns_debugfs_entry);
-
- if (ns->ns_stats != NULL)
- lprocfs_free_stats(&ns->ns_stats);
-}
-
-void ldlm_namespace_sysfs_unregister(struct ldlm_namespace *ns)
-{
- kobject_put(&ns->ns_kobj);
- wait_for_completion(&ns->ns_kobj_unregister);
-}
-
-int ldlm_namespace_sysfs_register(struct ldlm_namespace *ns)
-{
- int err;
-
- ns->ns_kobj.kset = ldlm_ns_kset;
- init_completion(&ns->ns_kobj_unregister);
- err = kobject_init_and_add(&ns->ns_kobj, &ldlm_ns_ktype, NULL,
- "%s", ldlm_ns_name(ns));
-
- ns->ns_stats = lprocfs_alloc_stats(LDLM_NSS_LAST, 0);
- if (ns->ns_stats == NULL) {
- kobject_put(&ns->ns_kobj);
- return -ENOMEM;
- }
-
- lprocfs_counter_init(ns->ns_stats, LDLM_NSS_LOCKS,
- LPROCFS_CNTR_AVGMINMAX, "locks", "locks");
-
- return err;
-}
-
-static int ldlm_namespace_debugfs_register(struct ldlm_namespace *ns)
-{
- struct dentry *ns_entry;
-
- if (!IS_ERR_OR_NULL(ns->ns_debugfs_entry)) {
- ns_entry = ns->ns_debugfs_entry;
- } else {
- ns_entry = debugfs_create_dir(ldlm_ns_name(ns),
- ldlm_ns_debugfs_dir);
- if (ns_entry == NULL)
- return -ENOMEM;
- ns->ns_debugfs_entry = ns_entry;
- }
-
- return 0;
-}
-#undef MAX_STRING_SIZE
-
-static unsigned ldlm_res_hop_hash(struct cfs_hash *hs,
- const void *key, unsigned mask)
-{
- const struct ldlm_res_id *id = key;
- unsigned val = 0;
- unsigned i;
-
- for (i = 0; i < RES_NAME_SIZE; i++)
- val += id->name[i];
- return val & mask;
-}
-
-static unsigned ldlm_res_hop_fid_hash(struct cfs_hash *hs,
- const void *key, unsigned mask)
-{
- const struct ldlm_res_id *id = key;
- struct lu_fid fid;
- __u32 hash;
- __u32 val;
-
- fid.f_seq = id->name[LUSTRE_RES_ID_SEQ_OFF];
- fid.f_oid = (__u32)id->name[LUSTRE_RES_ID_VER_OID_OFF];
- fid.f_ver = (__u32)(id->name[LUSTRE_RES_ID_VER_OID_OFF] >> 32);
-
- hash = fid_flatten32(&fid);
- hash += (hash >> 4) + (hash << 12); /* mixing oid and seq */
- if (id->name[LUSTRE_RES_ID_HSH_OFF] != 0) {
- val = id->name[LUSTRE_RES_ID_HSH_OFF];
- hash += (val >> 5) + (val << 11);
- } else {
- val = fid_oid(&fid);
- }
- hash = hash_long(hash, hs->hs_bkt_bits);
- /* give me another random factor */
- hash -= hash_long((unsigned long)hs, val % 11 + 3);
-
- hash <<= hs->hs_cur_bits - hs->hs_bkt_bits;
- hash |= ldlm_res_hop_hash(hs, key, CFS_HASH_NBKT(hs) - 1);
-
- return hash & mask;
-}
-
-static void *ldlm_res_hop_key(struct hlist_node *hnode)
-{
- struct ldlm_resource *res;
-
- res = hlist_entry(hnode, struct ldlm_resource, lr_hash);
- return &res->lr_name;
-}
-
-static int ldlm_res_hop_keycmp(const void *key, struct hlist_node *hnode)
-{
- struct ldlm_resource *res;
-
- res = hlist_entry(hnode, struct ldlm_resource, lr_hash);
- return ldlm_res_eq((const struct ldlm_res_id *)key,
- (const struct ldlm_res_id *)&res->lr_name);
-}
-
-static void *ldlm_res_hop_object(struct hlist_node *hnode)
-{
- return hlist_entry(hnode, struct ldlm_resource, lr_hash);
-}
-
-static void ldlm_res_hop_get_locked(struct cfs_hash *hs,
- struct hlist_node *hnode)
-{
- struct ldlm_resource *res;
-
- res = hlist_entry(hnode, struct ldlm_resource, lr_hash);
- ldlm_resource_getref(res);
-}
-
-static void ldlm_res_hop_put_locked(struct cfs_hash *hs,
- struct hlist_node *hnode)
-{
- struct ldlm_resource *res;
-
- res = hlist_entry(hnode, struct ldlm_resource, lr_hash);
- /* cfs_hash_for_each_nolock is the only chance we call it */
- ldlm_resource_putref_locked(res);
-}
-
-static void ldlm_res_hop_put(struct cfs_hash *hs, struct hlist_node *hnode)
-{
- struct ldlm_resource *res;
-
- res = hlist_entry(hnode, struct ldlm_resource, lr_hash);
- ldlm_resource_putref(res);
-}
-
-cfs_hash_ops_t ldlm_ns_hash_ops = {
- .hs_hash = ldlm_res_hop_hash,
- .hs_key = ldlm_res_hop_key,
- .hs_keycmp = ldlm_res_hop_keycmp,
- .hs_keycpy = NULL,
- .hs_object = ldlm_res_hop_object,
- .hs_get = ldlm_res_hop_get_locked,
- .hs_put_locked = ldlm_res_hop_put_locked,
- .hs_put = ldlm_res_hop_put
-};
-
-cfs_hash_ops_t ldlm_ns_fid_hash_ops = {
- .hs_hash = ldlm_res_hop_fid_hash,
- .hs_key = ldlm_res_hop_key,
- .hs_keycmp = ldlm_res_hop_keycmp,
- .hs_keycpy = NULL,
- .hs_object = ldlm_res_hop_object,
- .hs_get = ldlm_res_hop_get_locked,
- .hs_put_locked = ldlm_res_hop_put_locked,
- .hs_put = ldlm_res_hop_put
-};
-
-struct ldlm_ns_hash_def {
- ldlm_ns_type_t nsd_type;
- /** hash bucket bits */
- unsigned nsd_bkt_bits;
- /** hash bits */
- unsigned nsd_all_bits;
- /** hash operations */
- cfs_hash_ops_t *nsd_hops;
-};
-
-struct ldlm_ns_hash_def ldlm_ns_hash_defs[] = {
- {
- .nsd_type = LDLM_NS_TYPE_MDC,
- .nsd_bkt_bits = 11,
- .nsd_all_bits = 16,
- .nsd_hops = &ldlm_ns_fid_hash_ops,
- },
- {
- .nsd_type = LDLM_NS_TYPE_MDT,
- .nsd_bkt_bits = 14,
- .nsd_all_bits = 21,
- .nsd_hops = &ldlm_ns_fid_hash_ops,
- },
- {
- .nsd_type = LDLM_NS_TYPE_OSC,
- .nsd_bkt_bits = 8,
- .nsd_all_bits = 12,
- .nsd_hops = &ldlm_ns_hash_ops,
- },
- {
- .nsd_type = LDLM_NS_TYPE_OST,
- .nsd_bkt_bits = 11,
- .nsd_all_bits = 17,
- .nsd_hops = &ldlm_ns_hash_ops,
- },
- {
- .nsd_type = LDLM_NS_TYPE_MGC,
- .nsd_bkt_bits = 4,
- .nsd_all_bits = 4,
- .nsd_hops = &ldlm_ns_hash_ops,
- },
- {
- .nsd_type = LDLM_NS_TYPE_MGT,
- .nsd_bkt_bits = 4,
- .nsd_all_bits = 4,
- .nsd_hops = &ldlm_ns_hash_ops,
- },
- {
- .nsd_type = LDLM_NS_TYPE_UNKNOWN,
- },
-};
-
-/**
- * Create and initialize new empty namespace.
- */
-struct ldlm_namespace *ldlm_namespace_new(struct obd_device *obd, char *name,
- ldlm_side_t client,
- ldlm_appetite_t apt,
- ldlm_ns_type_t ns_type)
-{
- struct ldlm_namespace *ns = NULL;
- struct ldlm_ns_bucket *nsb;
- struct ldlm_ns_hash_def *nsd;
- struct cfs_hash_bd bd;
- int idx;
- int rc;
-
- LASSERT(obd != NULL);
-
- rc = ldlm_get_ref();
- if (rc) {
- CERROR("ldlm_get_ref failed: %d\n", rc);
- return NULL;
- }
-
- for (idx = 0;; idx++) {
- nsd = &ldlm_ns_hash_defs[idx];
- if (nsd->nsd_type == LDLM_NS_TYPE_UNKNOWN) {
- CERROR("Unknown type %d for ns %s\n", ns_type, name);
- goto out_ref;
- }
-
- if (nsd->nsd_type == ns_type)
- break;
- }
-
- ns = kzalloc(sizeof(*ns), GFP_NOFS);
- if (!ns)
- goto out_ref;
-
- ns->ns_rs_hash = cfs_hash_create(name,
- nsd->nsd_all_bits, nsd->nsd_all_bits,
- nsd->nsd_bkt_bits, sizeof(*nsb),
- CFS_HASH_MIN_THETA,
- CFS_HASH_MAX_THETA,
- nsd->nsd_hops,
- CFS_HASH_DEPTH |
- CFS_HASH_BIGNAME |
- CFS_HASH_SPIN_BKTLOCK |
- CFS_HASH_NO_ITEMREF);
- if (ns->ns_rs_hash == NULL)
- goto out_ns;
-
- cfs_hash_for_each_bucket(ns->ns_rs_hash, &bd, idx) {
- nsb = cfs_hash_bd_extra_get(ns->ns_rs_hash, &bd);
- at_init(&nsb->nsb_at_estimate, ldlm_enqueue_min, 0);
- nsb->nsb_namespace = ns;
- }
-
- ns->ns_obd = obd;
- ns->ns_appetite = apt;
- ns->ns_client = client;
-
- INIT_LIST_HEAD(&ns->ns_list_chain);
- INIT_LIST_HEAD(&ns->ns_unused_list);
- spin_lock_init(&ns->ns_lock);
- atomic_set(&ns->ns_bref, 0);
- init_waitqueue_head(&ns->ns_waitq);
-
- ns->ns_max_parallel_ast = LDLM_DEFAULT_PARALLEL_AST_LIMIT;
- ns->ns_nr_unused = 0;
- ns->ns_max_unused = LDLM_DEFAULT_LRU_SIZE;
- ns->ns_max_age = LDLM_DEFAULT_MAX_ALIVE;
- ns->ns_orig_connect_flags = 0;
- ns->ns_connect_flags = 0;
- ns->ns_stopping = 0;
-
- rc = ldlm_namespace_sysfs_register(ns);
- if (rc != 0) {
- CERROR("Can't initialize ns sysfs, rc %d\n", rc);
- goto out_hash;
- }
-
- rc = ldlm_namespace_debugfs_register(ns);
- if (rc != 0) {
- CERROR("Can't initialize ns proc, rc %d\n", rc);
- goto out_sysfs;
- }
-
- idx = ldlm_namespace_nr_read(client);
- rc = ldlm_pool_init(&ns->ns_pool, ns, idx, client);
- if (rc) {
- CERROR("Can't initialize lock pool, rc %d\n", rc);
- goto out_proc;
- }
-
- ldlm_namespace_register(ns, client);
- return ns;
-out_proc:
- ldlm_namespace_debugfs_unregister(ns);
-out_sysfs:
- ldlm_namespace_sysfs_unregister(ns);
- ldlm_namespace_cleanup(ns, 0);
-out_hash:
- cfs_hash_putref(ns->ns_rs_hash);
-out_ns:
- kfree(ns);
-out_ref:
- ldlm_put_ref();
- return NULL;
-}
-EXPORT_SYMBOL(ldlm_namespace_new);
-
-extern struct ldlm_lock *ldlm_lock_get(struct ldlm_lock *lock);
-
-/**
- * Cancel and destroy all locks on a resource.
- *
- * If flags contains FL_LOCAL_ONLY, don't try to tell the server, just
- * clean up. This is currently only used for recovery, and we make
- * certain assumptions as a result--notably, that we shouldn't cancel
- * locks with refs.
- */
-static void cleanup_resource(struct ldlm_resource *res, struct list_head *q,
- __u64 flags)
-{
- struct list_head *tmp;
- int rc = 0, client = ns_is_client(ldlm_res_to_ns(res));
- bool local_only = !!(flags & LDLM_FL_LOCAL_ONLY);
-
- do {
- struct ldlm_lock *lock = NULL;
-
- /* First, we look for non-cleaned-yet lock
- * all cleaned locks are marked by CLEANED flag. */
- lock_res(res);
- list_for_each(tmp, q) {
- lock = list_entry(tmp, struct ldlm_lock,
- l_res_link);
- if (lock->l_flags & LDLM_FL_CLEANED) {
- lock = NULL;
- continue;
- }
- LDLM_LOCK_GET(lock);
- lock->l_flags |= LDLM_FL_CLEANED;
- break;
- }
-
- if (lock == NULL) {
- unlock_res(res);
- break;
- }
-
- /* Set CBPENDING so nothing in the cancellation path
- * can match this lock. */
- lock->l_flags |= LDLM_FL_CBPENDING;
- lock->l_flags |= LDLM_FL_FAILED;
- lock->l_flags |= flags;
-
- /* ... without sending a CANCEL message for local_only. */
- if (local_only)
- lock->l_flags |= LDLM_FL_LOCAL_ONLY;
-
- if (local_only && (lock->l_readers || lock->l_writers)) {
- /* This is a little bit gross, but much better than the
- * alternative: pretend that we got a blocking AST from
- * the server, so that when the lock is decref'd, it
- * will go away ... */
- unlock_res(res);
- LDLM_DEBUG(lock, "setting FL_LOCAL_ONLY");
- if (lock->l_completion_ast)
- lock->l_completion_ast(lock, 0, NULL);
- LDLM_LOCK_RELEASE(lock);
- continue;
- }
-
- if (client) {
- struct lustre_handle lockh;
-
- unlock_res(res);
- ldlm_lock2handle(lock, &lockh);
- rc = ldlm_cli_cancel(&lockh, LCF_ASYNC);
- if (rc)
- CERROR("ldlm_cli_cancel: %d\n", rc);
- } else {
- ldlm_resource_unlink_lock(lock);
- unlock_res(res);
- LDLM_DEBUG(lock, "Freeing a lock still held by a client node");
- ldlm_lock_destroy(lock);
- }
- LDLM_LOCK_RELEASE(lock);
- } while (1);
-}
-
-static int ldlm_resource_clean(struct cfs_hash *hs, struct cfs_hash_bd *bd,
- struct hlist_node *hnode, void *arg)
-{
- struct ldlm_resource *res = cfs_hash_object(hs, hnode);
- __u64 flags = *(__u64 *)arg;
-
- cleanup_resource(res, &res->lr_granted, flags);
- cleanup_resource(res, &res->lr_converting, flags);
- cleanup_resource(res, &res->lr_waiting, flags);
-
- return 0;
-}
-
-static int ldlm_resource_complain(struct cfs_hash *hs, struct cfs_hash_bd *bd,
- struct hlist_node *hnode, void *arg)
-{
- struct ldlm_resource *res = cfs_hash_object(hs, hnode);
-
- lock_res(res);
- CERROR("%s: namespace resource "DLDLMRES
- " (%p) refcount nonzero (%d) after lock cleanup; forcing cleanup.\n",
- ldlm_ns_name(ldlm_res_to_ns(res)), PLDLMRES(res), res,
- atomic_read(&res->lr_refcount) - 1);
-
- ldlm_resource_dump(D_ERROR, res);
- unlock_res(res);
- return 0;
-}
-
-/**
- * Cancel and destroy all locks in the namespace.
- *
- * Typically used during evictions when server notified client that it was
- * evicted and all of its state needs to be destroyed.
- * Also used during shutdown.
- */
-int ldlm_namespace_cleanup(struct ldlm_namespace *ns, __u64 flags)
-{
- if (ns == NULL) {
- CDEBUG(D_INFO, "NULL ns, skipping cleanup\n");
- return ELDLM_OK;
- }
-
- cfs_hash_for_each_nolock(ns->ns_rs_hash, ldlm_resource_clean, &flags);
- cfs_hash_for_each_nolock(ns->ns_rs_hash, ldlm_resource_complain, NULL);
- return ELDLM_OK;
-}
-EXPORT_SYMBOL(ldlm_namespace_cleanup);
-
-/**
- * Attempts to free namespace.
- *
- * Only used when namespace goes away, like during an unmount.
- */
-static int __ldlm_namespace_free(struct ldlm_namespace *ns, int force)
-{
- /* At shutdown time, don't call the cancellation callback */
- ldlm_namespace_cleanup(ns, force ? LDLM_FL_LOCAL_ONLY : 0);
-
- if (atomic_read(&ns->ns_bref) > 0) {
- struct l_wait_info lwi = LWI_INTR(LWI_ON_SIGNAL_NOOP, NULL);
- int rc;
-
- CDEBUG(D_DLMTRACE,
- "dlm namespace %s free waiting on refcount %d\n",
- ldlm_ns_name(ns), atomic_read(&ns->ns_bref));
-force_wait:
- if (force)
- lwi = LWI_TIMEOUT(obd_timeout * HZ / 4, NULL, NULL);
-
- rc = l_wait_event(ns->ns_waitq,
- atomic_read(&ns->ns_bref) == 0, &lwi);
-
- /* Forced cleanups should be able to reclaim all references,
- * so it's safe to wait forever... we can't leak locks... */
- if (force && rc == -ETIMEDOUT) {
- LCONSOLE_ERROR("Forced cleanup waiting for %s namespace with %d resources in use, (rc=%d)\n",
- ldlm_ns_name(ns),
- atomic_read(&ns->ns_bref), rc);
- goto force_wait;
- }
-
- if (atomic_read(&ns->ns_bref)) {
- LCONSOLE_ERROR("Cleanup waiting for %s namespace with %d resources in use, (rc=%d)\n",
- ldlm_ns_name(ns),
- atomic_read(&ns->ns_bref), rc);
- return ELDLM_NAMESPACE_EXISTS;
- }
- CDEBUG(D_DLMTRACE, "dlm namespace %s free done waiting\n",
- ldlm_ns_name(ns));
- }
-
- return ELDLM_OK;
-}
-
-/**
- * Performs various cleanups for passed \a ns to make it drop refc and be
- * ready for freeing. Waits for refc == 0.
- *
- * The following is done:
- * (0) Unregister \a ns from its list to make inaccessible for potential
- * users like pools thread and others;
- * (1) Clear all locks in \a ns.
- */
-void ldlm_namespace_free_prior(struct ldlm_namespace *ns,
- struct obd_import *imp,
- int force)
-{
- int rc;
-
- if (!ns)
- return;
-
- spin_lock(&ns->ns_lock);
- ns->ns_stopping = 1;
- spin_unlock(&ns->ns_lock);
-
- /*
- * Can fail with -EINTR when force == 0 in which case try harder.
- */
- rc = __ldlm_namespace_free(ns, force);
- if (rc != ELDLM_OK) {
- if (imp) {
- ptlrpc_disconnect_import(imp, 0);
- ptlrpc_invalidate_import(imp);
- }
-
- /*
- * With all requests dropped and the import inactive
- * we are guaranteed all reference will be dropped.
- */
- rc = __ldlm_namespace_free(ns, 1);
- LASSERT(rc == 0);
- }
-}
-
-/**
- * Performs freeing memory structures related to \a ns. This is only done
- * when ldlm_namespce_free_prior() successfully removed all resources
- * referencing \a ns and its refc == 0.
- */
-void ldlm_namespace_free_post(struct ldlm_namespace *ns)
-{
- if (!ns)
- return;
-
- /* Make sure that nobody can find this ns in its list. */
- ldlm_namespace_unregister(ns, ns->ns_client);
- /* Fini pool _before_ parent proc dir is removed. This is important as
- * ldlm_pool_fini() removes own proc dir which is child to @dir.
- * Removing it after @dir may cause oops. */
- ldlm_pool_fini(&ns->ns_pool);
-
- ldlm_namespace_debugfs_unregister(ns);
- ldlm_namespace_sysfs_unregister(ns);
- cfs_hash_putref(ns->ns_rs_hash);
- /* Namespace \a ns should be not on list at this time, otherwise
- * this will cause issues related to using freed \a ns in poold
- * thread. */
- LASSERT(list_empty(&ns->ns_list_chain));
- kfree(ns);
- ldlm_put_ref();
-}
-
-/**
- * Cleanup the resource, and free namespace.
- * bug 12864:
- * Deadlock issue:
- * proc1: destroy import
- * class_disconnect_export(grab cl_sem) ->
- * -> ldlm_namespace_free ->
- * -> ldebugfs_remove(grab _lprocfs_lock).
- * proc2: read proc info
- * lprocfs_fops_read(grab _lprocfs_lock) ->
- * -> osc_rd_active, etc(grab cl_sem).
- *
- * So that I have to split the ldlm_namespace_free into two parts - the first
- * part ldlm_namespace_free_prior is used to cleanup the resource which is
- * being used; the 2nd part ldlm_namespace_free_post is used to unregister the
- * lprocfs entries, and then free memory. It will be called w/o cli->cl_sem
- * held.
- */
-void ldlm_namespace_free(struct ldlm_namespace *ns,
- struct obd_import *imp,
- int force)
-{
- ldlm_namespace_free_prior(ns, imp, force);
- ldlm_namespace_free_post(ns);
-}
-EXPORT_SYMBOL(ldlm_namespace_free);
-
-void ldlm_namespace_get(struct ldlm_namespace *ns)
-{
- atomic_inc(&ns->ns_bref);
-}
-EXPORT_SYMBOL(ldlm_namespace_get);
-
-/* This is only for callers that care about refcount */
-int ldlm_namespace_get_return(struct ldlm_namespace *ns)
-{
- return atomic_inc_return(&ns->ns_bref);
-}
-
-void ldlm_namespace_put(struct ldlm_namespace *ns)
-{
- if (atomic_dec_and_lock(&ns->ns_bref, &ns->ns_lock)) {
- wake_up(&ns->ns_waitq);
- spin_unlock(&ns->ns_lock);
- }
-}
-EXPORT_SYMBOL(ldlm_namespace_put);
-
-/** Register \a ns in the list of namespaces */
-void ldlm_namespace_register(struct ldlm_namespace *ns, ldlm_side_t client)
-{
- mutex_lock(ldlm_namespace_lock(client));
- LASSERT(list_empty(&ns->ns_list_chain));
- list_add(&ns->ns_list_chain, ldlm_namespace_inactive_list(client));
- ldlm_namespace_nr_inc(client);
- mutex_unlock(ldlm_namespace_lock(client));
-}
-
-/** Unregister \a ns from the list of namespaces. */
-void ldlm_namespace_unregister(struct ldlm_namespace *ns, ldlm_side_t client)
-{
- mutex_lock(ldlm_namespace_lock(client));
- LASSERT(!list_empty(&ns->ns_list_chain));
- /* Some asserts and possibly other parts of the code are still
- * using list_empty(&ns->ns_list_chain). This is why it is
- * important to use list_del_init() here. */
- list_del_init(&ns->ns_list_chain);
- ldlm_namespace_nr_dec(client);
- mutex_unlock(ldlm_namespace_lock(client));
-}
-
-/** Should be called with ldlm_namespace_lock(client) taken. */
-void ldlm_namespace_move_to_active_locked(struct ldlm_namespace *ns,
- ldlm_side_t client)
-{
- LASSERT(!list_empty(&ns->ns_list_chain));
- LASSERT(mutex_is_locked(ldlm_namespace_lock(client)));
- list_move_tail(&ns->ns_list_chain, ldlm_namespace_list(client));
-}
-
-/** Should be called with ldlm_namespace_lock(client) taken. */
-void ldlm_namespace_move_to_inactive_locked(struct ldlm_namespace *ns,
- ldlm_side_t client)
-{
- LASSERT(!list_empty(&ns->ns_list_chain));
- LASSERT(mutex_is_locked(ldlm_namespace_lock(client)));
- list_move_tail(&ns->ns_list_chain,
- ldlm_namespace_inactive_list(client));
-}
-
-/** Should be called with ldlm_namespace_lock(client) taken. */
-struct ldlm_namespace *ldlm_namespace_first_locked(ldlm_side_t client)
-{
- LASSERT(mutex_is_locked(ldlm_namespace_lock(client)));
- LASSERT(!list_empty(ldlm_namespace_list(client)));
- return container_of(ldlm_namespace_list(client)->next,
- struct ldlm_namespace, ns_list_chain);
-}
-
-/** Create and initialize new resource. */
-static struct ldlm_resource *ldlm_resource_new(void)
-{
- struct ldlm_resource *res;
- int idx;
-
- OBD_SLAB_ALLOC_PTR_GFP(res, ldlm_resource_slab, GFP_NOFS);
- if (res == NULL)
- return NULL;
-
- INIT_LIST_HEAD(&res->lr_granted);
- INIT_LIST_HEAD(&res->lr_converting);
- INIT_LIST_HEAD(&res->lr_waiting);
-
- /* Initialize interval trees for each lock mode. */
- for (idx = 0; idx < LCK_MODE_NUM; idx++) {
- res->lr_itree[idx].lit_size = 0;
- res->lr_itree[idx].lit_mode = 1 << idx;
- res->lr_itree[idx].lit_root = NULL;
- }
-
- atomic_set(&res->lr_refcount, 1);
- spin_lock_init(&res->lr_lock);
- lu_ref_init(&res->lr_reference);
-
- /* The creator of the resource must unlock the mutex after LVB
- * initialization. */
- mutex_init(&res->lr_lvb_mutex);
- mutex_lock(&res->lr_lvb_mutex);
-
- return res;
-}
-
-/**
- * Return a reference to resource with given name, creating it if necessary.
- * Args: namespace with ns_lock unlocked
- * Locks: takes and releases NS hash-lock and res->lr_lock
- * Returns: referenced, unlocked ldlm_resource or NULL
- */
-struct ldlm_resource *
-ldlm_resource_get(struct ldlm_namespace *ns, struct ldlm_resource *parent,
- const struct ldlm_res_id *name, ldlm_type_t type, int create)
-{
- struct hlist_node *hnode;
- struct ldlm_resource *res;
- struct cfs_hash_bd bd;
- __u64 version;
- int ns_refcount = 0;
-
- LASSERT(ns != NULL);
- LASSERT(parent == NULL);
- LASSERT(ns->ns_rs_hash != NULL);
- LASSERT(name->name[0] != 0);
-
- cfs_hash_bd_get_and_lock(ns->ns_rs_hash, (void *)name, &bd, 0);
- hnode = cfs_hash_bd_lookup_locked(ns->ns_rs_hash, &bd, (void *)name);
- if (hnode != NULL) {
- cfs_hash_bd_unlock(ns->ns_rs_hash, &bd, 0);
- res = hlist_entry(hnode, struct ldlm_resource, lr_hash);
- /* Synchronize with regard to resource creation. */
- if (ns->ns_lvbo && ns->ns_lvbo->lvbo_init) {
- mutex_lock(&res->lr_lvb_mutex);
- mutex_unlock(&res->lr_lvb_mutex);
- }
-
- if (unlikely(res->lr_lvb_len < 0)) {
- ldlm_resource_putref(res);
- res = NULL;
- }
- return res;
- }
-
- version = cfs_hash_bd_version_get(&bd);
- cfs_hash_bd_unlock(ns->ns_rs_hash, &bd, 0);
-
- if (create == 0)
- return NULL;
-
- LASSERTF(type >= LDLM_MIN_TYPE && type < LDLM_MAX_TYPE,
- "type: %d\n", type);
- res = ldlm_resource_new();
- if (!res)
- return NULL;
-
- res->lr_ns_bucket = cfs_hash_bd_extra_get(ns->ns_rs_hash, &bd);
- res->lr_name = *name;
- res->lr_type = type;
- res->lr_most_restr = LCK_NL;
-
- cfs_hash_bd_lock(ns->ns_rs_hash, &bd, 1);
- hnode = (version == cfs_hash_bd_version_get(&bd)) ? NULL :
- cfs_hash_bd_lookup_locked(ns->ns_rs_hash, &bd, (void *)name);
-
- if (hnode != NULL) {
- /* Someone won the race and already added the resource. */
- cfs_hash_bd_unlock(ns->ns_rs_hash, &bd, 1);
- /* Clean lu_ref for failed resource. */
- lu_ref_fini(&res->lr_reference);
- /* We have taken lr_lvb_mutex. Drop it. */
- mutex_unlock(&res->lr_lvb_mutex);
- OBD_SLAB_FREE(res, ldlm_resource_slab, sizeof(*res));
-
- res = hlist_entry(hnode, struct ldlm_resource, lr_hash);
- /* Synchronize with regard to resource creation. */
- if (ns->ns_lvbo && ns->ns_lvbo->lvbo_init) {
- mutex_lock(&res->lr_lvb_mutex);
- mutex_unlock(&res->lr_lvb_mutex);
- }
-
- if (unlikely(res->lr_lvb_len < 0)) {
- ldlm_resource_putref(res);
- res = NULL;
- }
- return res;
- }
- /* We won! Let's add the resource. */
- cfs_hash_bd_add_locked(ns->ns_rs_hash, &bd, &res->lr_hash);
- if (cfs_hash_bd_count_get(&bd) == 1)
- ns_refcount = ldlm_namespace_get_return(ns);
-
- cfs_hash_bd_unlock(ns->ns_rs_hash, &bd, 1);
- if (ns->ns_lvbo && ns->ns_lvbo->lvbo_init) {
- int rc;
-
- OBD_FAIL_TIMEOUT(OBD_FAIL_LDLM_CREATE_RESOURCE, 2);
- rc = ns->ns_lvbo->lvbo_init(res);
- if (rc < 0) {
- CERROR("%s: lvbo_init failed for resource %#llx:%#llx: rc = %d\n",
- ns->ns_obd->obd_name, name->name[0],
- name->name[1], rc);
- kfree(res->lr_lvb_data);
- res->lr_lvb_data = NULL;
- res->lr_lvb_len = rc;
- mutex_unlock(&res->lr_lvb_mutex);
- ldlm_resource_putref(res);
- return NULL;
- }
- }
-
- /* We create resource with locked lr_lvb_mutex. */
- mutex_unlock(&res->lr_lvb_mutex);
-
- /* Let's see if we happened to be the very first resource in this
- * namespace. If so, and this is a client namespace, we need to move
- * the namespace into the active namespaces list to be patrolled by
- * the ldlm_poold. */
- if (ns_is_client(ns) && ns_refcount == 1) {
- mutex_lock(ldlm_namespace_lock(LDLM_NAMESPACE_CLIENT));
- ldlm_namespace_move_to_active_locked(ns, LDLM_NAMESPACE_CLIENT);
- mutex_unlock(ldlm_namespace_lock(LDLM_NAMESPACE_CLIENT));
- }
-
- return res;
-}
-EXPORT_SYMBOL(ldlm_resource_get);
-
-struct ldlm_resource *ldlm_resource_getref(struct ldlm_resource *res)
-{
- LASSERT(res != NULL);
- LASSERT(res != LP_POISON);
- atomic_inc(&res->lr_refcount);
- CDEBUG(D_INFO, "getref res: %p count: %d\n", res,
- atomic_read(&res->lr_refcount));
- return res;
-}
-
-static void __ldlm_resource_putref_final(struct cfs_hash_bd *bd,
- struct ldlm_resource *res)
-{
- struct ldlm_ns_bucket *nsb = res->lr_ns_bucket;
-
- if (!list_empty(&res->lr_granted)) {
- ldlm_resource_dump(D_ERROR, res);
- LBUG();
- }
-
- if (!list_empty(&res->lr_converting)) {
- ldlm_resource_dump(D_ERROR, res);
- LBUG();
- }
-
- if (!list_empty(&res->lr_waiting)) {
- ldlm_resource_dump(D_ERROR, res);
- LBUG();
- }
-
- cfs_hash_bd_del_locked(nsb->nsb_namespace->ns_rs_hash,
- bd, &res->lr_hash);
- lu_ref_fini(&res->lr_reference);
- if (cfs_hash_bd_count_get(bd) == 0)
- ldlm_namespace_put(nsb->nsb_namespace);
-}
-
-/* Returns 1 if the resource was freed, 0 if it remains. */
-int ldlm_resource_putref(struct ldlm_resource *res)
-{
- struct ldlm_namespace *ns = ldlm_res_to_ns(res);
- struct cfs_hash_bd bd;
-
- LASSERT_ATOMIC_GT_LT(&res->lr_refcount, 0, LI_POISON);
- CDEBUG(D_INFO, "putref res: %p count: %d\n",
- res, atomic_read(&res->lr_refcount) - 1);
-
- cfs_hash_bd_get(ns->ns_rs_hash, &res->lr_name, &bd);
- if (cfs_hash_bd_dec_and_lock(ns->ns_rs_hash, &bd, &res->lr_refcount)) {
- __ldlm_resource_putref_final(&bd, res);
- cfs_hash_bd_unlock(ns->ns_rs_hash, &bd, 1);
- if (ns->ns_lvbo && ns->ns_lvbo->lvbo_free)
- ns->ns_lvbo->lvbo_free(res);
- OBD_SLAB_FREE(res, ldlm_resource_slab, sizeof(*res));
- return 1;
- }
- return 0;
-}
-EXPORT_SYMBOL(ldlm_resource_putref);
-
-/* Returns 1 if the resource was freed, 0 if it remains. */
-int ldlm_resource_putref_locked(struct ldlm_resource *res)
-{
- struct ldlm_namespace *ns = ldlm_res_to_ns(res);
-
- LASSERT_ATOMIC_GT_LT(&res->lr_refcount, 0, LI_POISON);
- CDEBUG(D_INFO, "putref res: %p count: %d\n",
- res, atomic_read(&res->lr_refcount) - 1);
-
- if (atomic_dec_and_test(&res->lr_refcount)) {
- struct cfs_hash_bd bd;
-
- cfs_hash_bd_get(ldlm_res_to_ns(res)->ns_rs_hash,
- &res->lr_name, &bd);
- __ldlm_resource_putref_final(&bd, res);
- cfs_hash_bd_unlock(ns->ns_rs_hash, &bd, 1);
- /* NB: ns_rs_hash is created with CFS_HASH_NO_ITEMREF,
- * so we should never be here while calling cfs_hash_del,
- * cfs_hash_for_each_nolock is the only case we can get
- * here, which is safe to release cfs_hash_bd_lock.
- */
- if (ns->ns_lvbo && ns->ns_lvbo->lvbo_free)
- ns->ns_lvbo->lvbo_free(res);
- OBD_SLAB_FREE(res, ldlm_resource_slab, sizeof(*res));
-
- cfs_hash_bd_lock(ns->ns_rs_hash, &bd, 1);
- return 1;
- }
- return 0;
-}
-
-/**
- * Add a lock into a given resource into specified lock list.
- */
-void ldlm_resource_add_lock(struct ldlm_resource *res, struct list_head *head,
- struct ldlm_lock *lock)
-{
- check_res_locked(res);
-
- LDLM_DEBUG(lock, "About to add this lock:\n");
-
- if (lock->l_flags & LDLM_FL_DESTROYED) {
- CDEBUG(D_OTHER, "Lock destroyed, not adding to resource\n");
- return;
- }
-
- LASSERT(list_empty(&lock->l_res_link));
-
- list_add_tail(&lock->l_res_link, head);
-}
-
-/**
- * Insert a lock into resource after specified lock.
- *
- * Obtain resource description from the lock we are inserting after.
- */
-void ldlm_resource_insert_lock_after(struct ldlm_lock *original,
- struct ldlm_lock *new)
-{
- struct ldlm_resource *res = original->l_resource;
-
- check_res_locked(res);
-
- ldlm_resource_dump(D_INFO, res);
- LDLM_DEBUG(new, "About to insert this lock after %p:\n", original);
-
- if (new->l_flags & LDLM_FL_DESTROYED) {
- CDEBUG(D_OTHER, "Lock destroyed, not adding to resource\n");
- goto out;
- }
-
- LASSERT(list_empty(&new->l_res_link));
-
- list_add(&new->l_res_link, &original->l_res_link);
- out:;
-}
-
-void ldlm_resource_unlink_lock(struct ldlm_lock *lock)
-{
- int type = lock->l_resource->lr_type;
-
- check_res_locked(lock->l_resource);
- if (type == LDLM_IBITS || type == LDLM_PLAIN)
- ldlm_unlink_lock_skiplist(lock);
- else if (type == LDLM_EXTENT)
- ldlm_extent_unlink_lock(lock);
- list_del_init(&lock->l_res_link);
-}
-EXPORT_SYMBOL(ldlm_resource_unlink_lock);
-
-void ldlm_res2desc(struct ldlm_resource *res, struct ldlm_resource_desc *desc)
-{
- desc->lr_type = res->lr_type;
- desc->lr_name = res->lr_name;
-}
-
-/**
- * Print information about all locks in all namespaces on this node to debug
- * log.
- */
-void ldlm_dump_all_namespaces(ldlm_side_t client, int level)
-{
- struct list_head *tmp;
-
- if (!((libcfs_debug | D_ERROR) & level))
- return;
-
- mutex_lock(ldlm_namespace_lock(client));
-
- list_for_each(tmp, ldlm_namespace_list(client)) {
- struct ldlm_namespace *ns;
-
- ns = list_entry(tmp, struct ldlm_namespace, ns_list_chain);
- ldlm_namespace_dump(level, ns);
- }
-
- mutex_unlock(ldlm_namespace_lock(client));
-}
-EXPORT_SYMBOL(ldlm_dump_all_namespaces);
-
-static int ldlm_res_hash_dump(struct cfs_hash *hs, struct cfs_hash_bd *bd,
- struct hlist_node *hnode, void *arg)
-{
- struct ldlm_resource *res = cfs_hash_object(hs, hnode);
- int level = (int)(unsigned long)arg;
-
- lock_res(res);
- ldlm_resource_dump(level, res);
- unlock_res(res);
-
- return 0;
-}
-
-/**
- * Print information about all locks in this namespace on this node to debug
- * log.
- */
-void ldlm_namespace_dump(int level, struct ldlm_namespace *ns)
-{
- if (!((libcfs_debug | D_ERROR) & level))
- return;
-
- CDEBUG(level, "--- Namespace: %s (rc: %d, side: %s)\n",
- ldlm_ns_name(ns), atomic_read(&ns->ns_bref),
- ns_is_client(ns) ? "client" : "server");
-
- if (time_before(cfs_time_current(), ns->ns_next_dump))
- return;
-
- cfs_hash_for_each_nolock(ns->ns_rs_hash,
- ldlm_res_hash_dump,
- (void *)(unsigned long)level);
- spin_lock(&ns->ns_lock);
- ns->ns_next_dump = cfs_time_shift(10);
- spin_unlock(&ns->ns_lock);
-}
-EXPORT_SYMBOL(ldlm_namespace_dump);
-
-/**
- * Print information about all locks in this resource to debug log.
- */
-void ldlm_resource_dump(int level, struct ldlm_resource *res)
-{
- struct ldlm_lock *lock;
- unsigned int granted = 0;
-
- CLASSERT(RES_NAME_SIZE == 4);
-
- if (!((libcfs_debug | D_ERROR) & level))
- return;
-
- CDEBUG(level, "--- Resource: "DLDLMRES" (%p) refcount = %d\n",
- PLDLMRES(res), res, atomic_read(&res->lr_refcount));
-
- if (!list_empty(&res->lr_granted)) {
- CDEBUG(level, "Granted locks (in reverse order):\n");
- list_for_each_entry_reverse(lock, &res->lr_granted,
- l_res_link) {
- LDLM_DEBUG_LIMIT(level, lock, "###");
- if (!(level & D_CANTMASK) &&
- ++granted > ldlm_dump_granted_max) {
- CDEBUG(level, "only dump %d granted locks to avoid DDOS.\n",
- granted);
- break;
- }
- }
- }
- if (!list_empty(&res->lr_converting)) {
- CDEBUG(level, "Converting locks:\n");
- list_for_each_entry(lock, &res->lr_converting, l_res_link)
- LDLM_DEBUG_LIMIT(level, lock, "###");
- }
- if (!list_empty(&res->lr_waiting)) {
- CDEBUG(level, "Waiting locks:\n");
- list_for_each_entry(lock, &res->lr_waiting, l_res_link)
- LDLM_DEBUG_LIMIT(level, lock, "###");
- }
-}
diff --git a/drivers/staging/lustre/lustre/libcfs/Makefile b/drivers/staging/lustre/lustre/libcfs/Makefile
deleted file mode 100644
index ec98f44a10dd..000000000000
--- a/drivers/staging/lustre/lustre/libcfs/Makefile
+++ /dev/null
@@ -1,18 +0,0 @@
-obj-$(CONFIG_LUSTRE_FS) += libcfs.o
-
-libcfs-linux-objs := linux-tracefile.o linux-debug.o
-libcfs-linux-objs += linux-prim.o linux-cpu.o
-libcfs-linux-objs += linux-curproc.o
-libcfs-linux-objs += linux-module.o
-libcfs-linux-objs += linux-crypto.o
-libcfs-linux-objs += linux-crypto-adler.o
-libcfs-linux-objs += linux-mem.o
-
-libcfs-linux-objs := $(addprefix linux/,$(libcfs-linux-objs))
-
-libcfs-all-objs := debug.o fail.o nidstrings.o module.o tracefile.o \
- libcfs_string.o hash.o kernel_user_comm.o \
- prng.o workitem.o libcfs_cpu.o \
- libcfs_mem.o libcfs_lock.o
-
-libcfs-objs := $(libcfs-linux-objs) $(libcfs-all-objs)
diff --git a/drivers/staging/lustre/lustre/libcfs/debug.c b/drivers/staging/lustre/lustre/libcfs/debug.c
deleted file mode 100644
index ae325f750eeb..000000000000
--- a/drivers/staging/lustre/lustre/libcfs/debug.c
+++ /dev/null
@@ -1,575 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * libcfs/libcfs/debug.c
- *
- * Author: Phil Schwan <phil@clusterfs.com>
- *
- */
-
-# define DEBUG_SUBSYSTEM S_LNET
-
-#include "../../include/linux/libcfs/libcfs.h"
-#include "tracefile.h"
-
-static char debug_file_name[1024];
-
-unsigned int libcfs_subsystem_debug = ~0;
-module_param(libcfs_subsystem_debug, int, 0644);
-MODULE_PARM_DESC(libcfs_subsystem_debug, "Lustre kernel debug subsystem mask");
-EXPORT_SYMBOL(libcfs_subsystem_debug);
-
-unsigned int libcfs_debug = (D_CANTMASK |
- D_NETERROR | D_HA | D_CONFIG | D_IOCTL);
-module_param(libcfs_debug, int, 0644);
-MODULE_PARM_DESC(libcfs_debug, "Lustre kernel debug mask");
-EXPORT_SYMBOL(libcfs_debug);
-
-static int libcfs_param_debug_mb_set(const char *val,
- const struct kernel_param *kp)
-{
- int rc;
- unsigned num;
-
- rc = kstrtouint(val, 0, &num);
- if (rc < 0)
- return rc;
-
- if (!*((unsigned int *)kp->arg)) {
- *((unsigned int *)kp->arg) = num;
- return 0;
- }
-
- rc = cfs_trace_set_debug_mb(num);
-
- if (!rc)
- *((unsigned int *)kp->arg) = cfs_trace_get_debug_mb();
-
- return rc;
-}
-
-/* While debug_mb setting look like unsigned int, in fact
- * it needs quite a bunch of extra processing, so we define special
- * debugmb parameter type with corresponding methods to handle this case */
-static struct kernel_param_ops param_ops_debugmb = {
- .set = libcfs_param_debug_mb_set,
- .get = param_get_uint,
-};
-
-#define param_check_debugmb(name, p) \
- __param_check(name, p, unsigned int)
-
-static unsigned int libcfs_debug_mb;
-module_param(libcfs_debug_mb, debugmb, 0644);
-MODULE_PARM_DESC(libcfs_debug_mb, "Total debug buffer size.");
-EXPORT_SYMBOL(libcfs_debug_mb);
-
-unsigned int libcfs_printk = D_CANTMASK;
-module_param(libcfs_printk, uint, 0644);
-MODULE_PARM_DESC(libcfs_printk, "Lustre kernel debug console mask");
-EXPORT_SYMBOL(libcfs_printk);
-
-unsigned int libcfs_console_ratelimit = 1;
-module_param(libcfs_console_ratelimit, uint, 0644);
-MODULE_PARM_DESC(libcfs_console_ratelimit, "Lustre kernel debug console ratelimit (0 to disable)");
-EXPORT_SYMBOL(libcfs_console_ratelimit);
-
-static int param_set_delay_minmax(const char *val,
- const struct kernel_param *kp,
- long min, long max)
-{
- long d;
- int sec;
- int rc;
-
- rc = kstrtoint(val, 0, &sec);
- if (rc)
- return -EINVAL;
-
- d = cfs_time_seconds(sec) / 100;
- if (d < min || d > max)
- return -EINVAL;
-
- *((unsigned int *)kp->arg) = d;
-
- return 0;
-}
-
-static int param_get_delay(char *buffer, const struct kernel_param *kp)
-{
- unsigned int d = *(unsigned int *)kp->arg;
-
- return sprintf(buffer, "%u", (unsigned int)cfs_duration_sec(d * 100));
-}
-
-unsigned int libcfs_console_max_delay;
-EXPORT_SYMBOL(libcfs_console_max_delay);
-unsigned int libcfs_console_min_delay;
-EXPORT_SYMBOL(libcfs_console_min_delay);
-
-static int param_set_console_max_delay(const char *val,
- const struct kernel_param *kp)
-{
- return param_set_delay_minmax(val, kp,
- libcfs_console_min_delay, INT_MAX);
-}
-
-static struct kernel_param_ops param_ops_console_max_delay = {
- .set = param_set_console_max_delay,
- .get = param_get_delay,
-};
-
-#define param_check_console_max_delay(name, p) \
- __param_check(name, p, unsigned int)
-
-module_param(libcfs_console_max_delay, console_max_delay, 0644);
-MODULE_PARM_DESC(libcfs_console_max_delay, "Lustre kernel debug console max delay (jiffies)");
-
-static int param_set_console_min_delay(const char *val,
- const struct kernel_param *kp)
-{
- return param_set_delay_minmax(val, kp,
- 1, libcfs_console_max_delay);
-}
-
-static struct kernel_param_ops param_ops_console_min_delay = {
- .set = param_set_console_min_delay,
- .get = param_get_delay,
-};
-
-#define param_check_console_min_delay(name, p) \
- __param_check(name, p, unsigned int)
-
-module_param(libcfs_console_min_delay, console_min_delay, 0644);
-MODULE_PARM_DESC(libcfs_console_min_delay, "Lustre kernel debug console min delay (jiffies)");
-
-static int param_set_uint_minmax(const char *val,
- const struct kernel_param *kp,
- unsigned int min, unsigned int max)
-{
- unsigned int num;
- int ret;
-
- if (!val)
- return -EINVAL;
- ret = kstrtouint(val, 0, &num);
- if (ret < 0 || num < min || num > max)
- return -EINVAL;
- *((unsigned int *)kp->arg) = num;
- return 0;
-}
-
-static int param_set_uintpos(const char *val, const struct kernel_param *kp)
-{
- return param_set_uint_minmax(val, kp, 1, -1);
-}
-
-static struct kernel_param_ops param_ops_uintpos = {
- .set = param_set_uintpos,
- .get = param_get_uint,
-};
-
-#define param_check_uintpos(name, p) \
- __param_check(name, p, unsigned int)
-
-unsigned int libcfs_console_backoff = CDEBUG_DEFAULT_BACKOFF;
-module_param(libcfs_console_backoff, uintpos, 0644);
-MODULE_PARM_DESC(libcfs_console_backoff, "Lustre kernel debug console backoff factor");
-EXPORT_SYMBOL(libcfs_console_backoff);
-
-unsigned int libcfs_debug_binary = 1;
-EXPORT_SYMBOL(libcfs_debug_binary);
-
-unsigned int libcfs_stack = 3 * THREAD_SIZE / 4;
-EXPORT_SYMBOL(libcfs_stack);
-
-unsigned int libcfs_catastrophe;
-EXPORT_SYMBOL(libcfs_catastrophe);
-
-unsigned int libcfs_panic_on_lbug = 1;
-module_param(libcfs_panic_on_lbug, uint, 0644);
-MODULE_PARM_DESC(libcfs_panic_on_lbug, "Lustre kernel panic on LBUG");
-EXPORT_SYMBOL(libcfs_panic_on_lbug);
-
-static wait_queue_head_t debug_ctlwq;
-
-char libcfs_debug_file_path_arr[PATH_MAX] = LIBCFS_DEBUG_FILE_PATH_DEFAULT;
-
-/* We need to pass a pointer here, but elsewhere this must be a const */
-static char *libcfs_debug_file_path;
-module_param(libcfs_debug_file_path, charp, 0644);
-MODULE_PARM_DESC(libcfs_debug_file_path,
- "Path for dumping debug logs, set 'NONE' to prevent log dumping");
-
-int libcfs_panic_in_progress;
-
-/* libcfs_debug_token2mask() expects the returned
- * string in lower-case */
-static const char *
-libcfs_debug_subsys2str(int subsys)
-{
- switch (1 << subsys) {
- default:
- return NULL;
- case S_UNDEFINED:
- return "undefined";
- case S_MDC:
- return "mdc";
- case S_MDS:
- return "mds";
- case S_OSC:
- return "osc";
- case S_OST:
- return "ost";
- case S_CLASS:
- return "class";
- case S_LOG:
- return "log";
- case S_LLITE:
- return "llite";
- case S_RPC:
- return "rpc";
- case S_LNET:
- return "lnet";
- case S_LND:
- return "lnd";
- case S_PINGER:
- return "pinger";
- case S_FILTER:
- return "filter";
- case S_ECHO:
- return "echo";
- case S_LDLM:
- return "ldlm";
- case S_LOV:
- return "lov";
- case S_LQUOTA:
- return "lquota";
- case S_OSD:
- return "osd";
- case S_LMV:
- return "lmv";
- case S_SEC:
- return "sec";
- case S_GSS:
- return "gss";
- case S_MGC:
- return "mgc";
- case S_MGS:
- return "mgs";
- case S_FID:
- return "fid";
- case S_FLD:
- return "fld";
- }
-}
-
-/* libcfs_debug_token2mask() expects the returned
- * string in lower-case */
-static const char *
-libcfs_debug_dbg2str(int debug)
-{
- switch (1 << debug) {
- default:
- return NULL;
- case D_TRACE:
- return "trace";
- case D_INODE:
- return "inode";
- case D_SUPER:
- return "super";
- case D_EXT2:
- return "ext2";
- case D_MALLOC:
- return "malloc";
- case D_CACHE:
- return "cache";
- case D_INFO:
- return "info";
- case D_IOCTL:
- return "ioctl";
- case D_NETERROR:
- return "neterror";
- case D_NET:
- return "net";
- case D_WARNING:
- return "warning";
- case D_BUFFS:
- return "buffs";
- case D_OTHER:
- return "other";
- case D_DENTRY:
- return "dentry";
- case D_NETTRACE:
- return "nettrace";
- case D_PAGE:
- return "page";
- case D_DLMTRACE:
- return "dlmtrace";
- case D_ERROR:
- return "error";
- case D_EMERG:
- return "emerg";
- case D_HA:
- return "ha";
- case D_RPCTRACE:
- return "rpctrace";
- case D_VFSTRACE:
- return "vfstrace";
- case D_READA:
- return "reada";
- case D_MMAP:
- return "mmap";
- case D_CONFIG:
- return "config";
- case D_CONSOLE:
- return "console";
- case D_QUOTA:
- return "quota";
- case D_SEC:
- return "sec";
- case D_LFSCK:
- return "lfsck";
- }
-}
-
-int
-libcfs_debug_mask2str(char *str, int size, int mask, int is_subsys)
-{
- const char *(*fn)(int bit) = is_subsys ? libcfs_debug_subsys2str :
- libcfs_debug_dbg2str;
- int len = 0;
- const char *token;
- int i;
-
- if (mask == 0) { /* "0" */
- if (size > 0)
- str[0] = '0';
- len = 1;
- } else { /* space-separated tokens */
- for (i = 0; i < 32; i++) {
- if ((mask & (1 << i)) == 0)
- continue;
-
- token = fn(i);
- if (token == NULL) /* unused bit */
- continue;
-
- if (len > 0) { /* separator? */
- if (len < size)
- str[len] = ' ';
- len++;
- }
-
- while (*token != 0) {
- if (len < size)
- str[len] = *token;
- token++;
- len++;
- }
- }
- }
-
- /* terminate 'str' */
- if (len < size)
- str[len] = 0;
- else
- str[size - 1] = 0;
-
- return len;
-}
-
-int
-libcfs_debug_str2mask(int *mask, const char *str, int is_subsys)
-{
- const char *(*fn)(int bit) = is_subsys ? libcfs_debug_subsys2str :
- libcfs_debug_dbg2str;
- int m = 0;
- int matched;
- int n;
- int t;
-
- /* Allow a number for backwards compatibility */
-
- for (n = strlen(str); n > 0; n--)
- if (!isspace(str[n-1]))
- break;
- matched = n;
- t = sscanf(str, "%i%n", &m, &matched);
- if (t >= 1 && matched == n) {
- /* don't print warning for lctl set_param debug=0 or -1 */
- if (m != 0 && m != -1)
- CWARN("You are trying to use a numerical value for the mask - this will be deprecated in a future release.\n");
- *mask = m;
- return 0;
- }
-
- return cfs_str2mask(str, fn, mask, is_subsys ? 0 : D_CANTMASK,
- 0xffffffff);
-}
-
-/**
- * Dump Lustre log to ::debug_file_path by calling tracefile_dump_all_pages()
- */
-void libcfs_debug_dumplog_internal(void *arg)
-{
- void *journal_info;
-
- journal_info = current->journal_info;
- current->journal_info = NULL;
-
- if (strncmp(libcfs_debug_file_path_arr, "NONE", 4) != 0) {
- snprintf(debug_file_name, sizeof(debug_file_name) - 1,
- "%s.%lld.%ld", libcfs_debug_file_path_arr,
- (s64)ktime_get_real_seconds(), (long_ptr_t)arg);
- pr_alert("LustreError: dumping log to %s\n",
- debug_file_name);
- cfs_tracefile_dump_all_pages(debug_file_name);
- libcfs_run_debug_log_upcall(debug_file_name);
- }
-
- current->journal_info = journal_info;
-}
-
-static int libcfs_debug_dumplog_thread(void *arg)
-{
- libcfs_debug_dumplog_internal(arg);
- wake_up(&debug_ctlwq);
- return 0;
-}
-
-void libcfs_debug_dumplog(void)
-{
- wait_queue_t wait;
- struct task_struct *dumper;
-
- /* we're being careful to ensure that the kernel thread is
- * able to set our state to running as it exits before we
- * get to schedule() */
- init_waitqueue_entry(&wait, current);
- set_current_state(TASK_INTERRUPTIBLE);
- add_wait_queue(&debug_ctlwq, &wait);
-
- dumper = kthread_run(libcfs_debug_dumplog_thread,
- (void *)(long)current_pid(),
- "libcfs_debug_dumper");
- if (IS_ERR(dumper))
- pr_err("LustreError: cannot start log dump thread: %ld\n",
- PTR_ERR(dumper));
- else
- schedule();
-
- /* be sure to teardown if cfs_create_thread() failed */
- remove_wait_queue(&debug_ctlwq, &wait);
- set_current_state(TASK_RUNNING);
-}
-EXPORT_SYMBOL(libcfs_debug_dumplog);
-
-int libcfs_debug_init(unsigned long bufsize)
-{
- int rc = 0;
- unsigned int max = libcfs_debug_mb;
-
- init_waitqueue_head(&debug_ctlwq);
-
- if (libcfs_console_max_delay <= 0 || /* not set by user or */
- libcfs_console_min_delay <= 0 || /* set to invalid values */
- libcfs_console_min_delay >= libcfs_console_max_delay) {
- libcfs_console_max_delay = CDEBUG_DEFAULT_MAX_DELAY;
- libcfs_console_min_delay = CDEBUG_DEFAULT_MIN_DELAY;
- }
-
- if (libcfs_debug_file_path != NULL) {
- strncpy(libcfs_debug_file_path_arr,
- libcfs_debug_file_path, PATH_MAX-1);
- libcfs_debug_file_path_arr[PATH_MAX - 1] = '\0';
- }
-
- /* If libcfs_debug_mb is set to an invalid value or uninitialized
- * then just make the total buffers smp_num_cpus * TCD_MAX_PAGES */
- if (max > cfs_trace_max_debug_mb() || max < num_possible_cpus()) {
- max = TCD_MAX_PAGES;
- } else {
- max = max / num_possible_cpus();
- max <<= (20 - PAGE_CACHE_SHIFT);
- }
- rc = cfs_tracefile_init(max);
-
- if (rc == 0) {
- libcfs_register_panic_notifier();
- libcfs_debug_mb = cfs_trace_get_debug_mb();
- }
-
- return rc;
-}
-
-int libcfs_debug_cleanup(void)
-{
- libcfs_unregister_panic_notifier();
- cfs_tracefile_exit();
- return 0;
-}
-
-int libcfs_debug_clear_buffer(void)
-{
- cfs_trace_flush_pages();
- return 0;
-}
-
-/* Debug markers, although printed by S_LNET
- * should not be be marked as such. */
-#undef DEBUG_SUBSYSTEM
-#define DEBUG_SUBSYSTEM S_UNDEFINED
-int libcfs_debug_mark_buffer(const char *text)
-{
- CDEBUG(D_TRACE,
- "***************************************************\n");
- LCONSOLE(D_WARNING, "DEBUG MARKER: %s\n", text);
- CDEBUG(D_TRACE,
- "***************************************************\n");
-
- return 0;
-}
-#undef DEBUG_SUBSYSTEM
-#define DEBUG_SUBSYSTEM S_LNET
-
-void libcfs_debug_set_level(unsigned int debug_level)
-{
- pr_warn("Lustre: Setting portals debug level to %08x\n",
- debug_level);
- libcfs_debug = debug_level;
-}
-
-EXPORT_SYMBOL(libcfs_debug_set_level);
diff --git a/drivers/staging/lustre/lustre/libcfs/fail.c b/drivers/staging/lustre/lustre/libcfs/fail.c
deleted file mode 100644
index d39fecebd12d..000000000000
--- a/drivers/staging/lustre/lustre/libcfs/fail.c
+++ /dev/null
@@ -1,138 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see http://www.gnu.org/licenses
- *
- * Please contact Oracle Corporation, Inc., 500 Oracle Parkway, Redwood Shores,
- * CA 94065 USA or visit www.oracle.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Oracle Corporation, Inc.
- */
-
-#include "../../include/linux/libcfs/libcfs.h"
-
-unsigned long cfs_fail_loc;
-EXPORT_SYMBOL(cfs_fail_loc);
-
-unsigned int cfs_fail_val;
-EXPORT_SYMBOL(cfs_fail_val);
-
-DECLARE_WAIT_QUEUE_HEAD(cfs_race_waitq);
-EXPORT_SYMBOL(cfs_race_waitq);
-
-int cfs_race_state;
-EXPORT_SYMBOL(cfs_race_state);
-
-int __cfs_fail_check_set(__u32 id, __u32 value, int set)
-{
- static atomic_t cfs_fail_count = ATOMIC_INIT(0);
-
- LASSERT(!(id & CFS_FAIL_ONCE));
-
- if ((cfs_fail_loc & (CFS_FAILED | CFS_FAIL_ONCE)) ==
- (CFS_FAILED | CFS_FAIL_ONCE)) {
- atomic_set(&cfs_fail_count, 0); /* paranoia */
- return 0;
- }
-
- /* Fail 1/cfs_fail_val times */
- if (cfs_fail_loc & CFS_FAIL_RAND) {
- if (cfs_fail_val < 2 || cfs_rand() % cfs_fail_val > 0)
- return 0;
- }
-
- /* Skip the first cfs_fail_val, then fail */
- if (cfs_fail_loc & CFS_FAIL_SKIP) {
- if (atomic_inc_return(&cfs_fail_count) <= cfs_fail_val)
- return 0;
- }
-
- /* check cfs_fail_val... */
- if (set == CFS_FAIL_LOC_VALUE) {
- if (cfs_fail_val != -1 && cfs_fail_val != value)
- return 0;
- }
-
- /* Fail cfs_fail_val times, overridden by FAIL_ONCE */
- if (cfs_fail_loc & CFS_FAIL_SOME &&
- (!(cfs_fail_loc & CFS_FAIL_ONCE) || cfs_fail_val <= 1)) {
- int count = atomic_inc_return(&cfs_fail_count);
-
- if (count >= cfs_fail_val) {
- set_bit(CFS_FAIL_ONCE_BIT, &cfs_fail_loc);
- atomic_set(&cfs_fail_count, 0);
- /* we are lost race to increase */
- if (count > cfs_fail_val)
- return 0;
- }
- }
-
- if ((set == CFS_FAIL_LOC_ORSET || set == CFS_FAIL_LOC_RESET) &&
- (value & CFS_FAIL_ONCE))
- set_bit(CFS_FAIL_ONCE_BIT, &cfs_fail_loc);
- /* Lost race to set CFS_FAILED_BIT. */
- if (test_and_set_bit(CFS_FAILED_BIT, &cfs_fail_loc)) {
- /* If CFS_FAIL_ONCE is valid, only one process can fail,
- * otherwise multi-process can fail at the same time. */
- if (cfs_fail_loc & CFS_FAIL_ONCE)
- return 0;
- }
-
- switch (set) {
- case CFS_FAIL_LOC_NOSET:
- case CFS_FAIL_LOC_VALUE:
- break;
- case CFS_FAIL_LOC_ORSET:
- cfs_fail_loc |= value & ~(CFS_FAILED | CFS_FAIL_ONCE);
- break;
- case CFS_FAIL_LOC_RESET:
- cfs_fail_loc = value;
- break;
- default:
- LASSERTF(0, "called with bad set %u\n", set);
- break;
- }
-
- return 1;
-}
-EXPORT_SYMBOL(__cfs_fail_check_set);
-
-int __cfs_fail_timeout_set(__u32 id, __u32 value, int ms, int set)
-{
- int ret;
-
- ret = __cfs_fail_check_set(id, value, set);
- if (ret) {
- CERROR("cfs_fail_timeout id %x sleeping for %dms\n",
- id, ms);
- set_current_state(TASK_UNINTERRUPTIBLE);
- schedule_timeout(cfs_time_seconds(ms) / 1000);
- CERROR("cfs_fail_timeout id %x awake\n", id);
- }
- return ret;
-}
-EXPORT_SYMBOL(__cfs_fail_timeout_set);
diff --git a/drivers/staging/lustre/lustre/libcfs/hash.c b/drivers/staging/lustre/lustre/libcfs/hash.c
deleted file mode 100644
index 6f4c7d47fa33..000000000000
--- a/drivers/staging/lustre/lustre/libcfs/hash.c
+++ /dev/null
@@ -1,2102 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * libcfs/libcfs/hash.c
- *
- * Implement a hash class for hash process in lustre system.
- *
- * Author: YuZhangyong <yzy@clusterfs.com>
- *
- * 2008-08-15: Brian Behlendorf <behlendorf1@llnl.gov>
- * - Simplified API and improved documentation
- * - Added per-hash feature flags:
- * * CFS_HASH_DEBUG additional validation
- * * CFS_HASH_REHASH dynamic rehashing
- * - Added per-hash statistics
- * - General performance enhancements
- *
- * 2009-07-31: Liang Zhen <zhen.liang@sun.com>
- * - move all stuff to libcfs
- * - don't allow cur_bits != max_bits without setting of CFS_HASH_REHASH
- * - ignore hs_rwlock if without CFS_HASH_REHASH setting
- * - buckets are allocated one by one(instead of contiguous memory),
- * to avoid unnecessary cacheline conflict
- *
- * 2010-03-01: Liang Zhen <zhen.liang@sun.com>
- * - "bucket" is a group of hlist_head now, user can specify bucket size
- * by bkt_bits of cfs_hash_create(), all hlist_heads in a bucket share
- * one lock for reducing memory overhead.
- *
- * - support lockless hash, caller will take care of locks:
- * avoid lock overhead for hash tables that are already protected
- * by locking in the caller for another reason
- *
- * - support both spin_lock/rwlock for bucket:
- * overhead of spinlock contention is lower than read/write
- * contention of rwlock, so using spinlock to serialize operations on
- * bucket is more reasonable for those frequently changed hash tables
- *
- * - support one-single lock mode:
- * one lock to protect all hash operations to avoid overhead of
- * multiple locks if hash table is always small
- *
- * - removed a lot of unnecessary addref & decref on hash element:
- * addref & decref are atomic operations in many use-cases which
- * are expensive.
- *
- * - support non-blocking cfs_hash_add() and cfs_hash_findadd():
- * some lustre use-cases require these functions to be strictly
- * non-blocking, we need to schedule required rehash on a different
- * thread on those cases.
- *
- * - safer rehash on large hash table
- * In old implementation, rehash function will exclusively lock the
- * hash table and finish rehash in one batch, it's dangerous on SMP
- * system because rehash millions of elements could take long time.
- * New implemented rehash can release lock and relax CPU in middle
- * of rehash, it's safe for another thread to search/change on the
- * hash table even it's in rehasing.
- *
- * - support two different refcount modes
- * . hash table has refcount on element
- * . hash table doesn't change refcount on adding/removing element
- *
- * - support long name hash table (for param-tree)
- *
- * - fix a bug for cfs_hash_rehash_key:
- * in old implementation, cfs_hash_rehash_key could screw up the
- * hash-table because @key is overwritten without any protection.
- * Now we need user to define hs_keycpy for those rehash enabled
- * hash tables, cfs_hash_rehash_key will overwrite hash-key
- * inside lock by calling hs_keycpy.
- *
- * - better hash iteration:
- * Now we support both locked iteration & lockless iteration of hash
- * table. Also, user can break the iteration by return 1 in callback.
- */
-
-#include "../../include/linux/libcfs/libcfs.h"
-#include <linux/seq_file.h>
-
-#if CFS_HASH_DEBUG_LEVEL >= CFS_HASH_DEBUG_1
-static unsigned int warn_on_depth = 8;
-module_param(warn_on_depth, uint, 0644);
-MODULE_PARM_DESC(warn_on_depth, "warning when hash depth is high.");
-#endif
-
-struct cfs_wi_sched *cfs_sched_rehash;
-
-static inline void
-cfs_hash_nl_lock(union cfs_hash_lock *lock, int exclusive) {}
-
-static inline void
-cfs_hash_nl_unlock(union cfs_hash_lock *lock, int exclusive) {}
-
-static inline void
-cfs_hash_spin_lock(union cfs_hash_lock *lock, int exclusive)
- __acquires(&lock->spin)
-{
- spin_lock(&lock->spin);
-}
-
-static inline void
-cfs_hash_spin_unlock(union cfs_hash_lock *lock, int exclusive)
- __releases(&lock->spin)
-{
- spin_unlock(&lock->spin);
-}
-
-static inline void
-cfs_hash_rw_lock(union cfs_hash_lock *lock, int exclusive)
- __acquires(&lock->rw)
-{
- if (!exclusive)
- read_lock(&lock->rw);
- else
- write_lock(&lock->rw);
-}
-
-static inline void
-cfs_hash_rw_unlock(union cfs_hash_lock *lock, int exclusive)
- __releases(&lock->rw)
-{
- if (!exclusive)
- read_unlock(&lock->rw);
- else
- write_unlock(&lock->rw);
-}
-
-/** No lock hash */
-static cfs_hash_lock_ops_t cfs_hash_nl_lops = {
- .hs_lock = cfs_hash_nl_lock,
- .hs_unlock = cfs_hash_nl_unlock,
- .hs_bkt_lock = cfs_hash_nl_lock,
- .hs_bkt_unlock = cfs_hash_nl_unlock,
-};
-
-/** no bucket lock, one spinlock to protect everything */
-static cfs_hash_lock_ops_t cfs_hash_nbl_lops = {
- .hs_lock = cfs_hash_spin_lock,
- .hs_unlock = cfs_hash_spin_unlock,
- .hs_bkt_lock = cfs_hash_nl_lock,
- .hs_bkt_unlock = cfs_hash_nl_unlock,
-};
-
-/** spin bucket lock, rehash is enabled */
-static cfs_hash_lock_ops_t cfs_hash_bkt_spin_lops = {
- .hs_lock = cfs_hash_rw_lock,
- .hs_unlock = cfs_hash_rw_unlock,
- .hs_bkt_lock = cfs_hash_spin_lock,
- .hs_bkt_unlock = cfs_hash_spin_unlock,
-};
-
-/** rw bucket lock, rehash is enabled */
-static cfs_hash_lock_ops_t cfs_hash_bkt_rw_lops = {
- .hs_lock = cfs_hash_rw_lock,
- .hs_unlock = cfs_hash_rw_unlock,
- .hs_bkt_lock = cfs_hash_rw_lock,
- .hs_bkt_unlock = cfs_hash_rw_unlock,
-};
-
-/** spin bucket lock, rehash is disabled */
-static cfs_hash_lock_ops_t cfs_hash_nr_bkt_spin_lops = {
- .hs_lock = cfs_hash_nl_lock,
- .hs_unlock = cfs_hash_nl_unlock,
- .hs_bkt_lock = cfs_hash_spin_lock,
- .hs_bkt_unlock = cfs_hash_spin_unlock,
-};
-
-/** rw bucket lock, rehash is disabled */
-static cfs_hash_lock_ops_t cfs_hash_nr_bkt_rw_lops = {
- .hs_lock = cfs_hash_nl_lock,
- .hs_unlock = cfs_hash_nl_unlock,
- .hs_bkt_lock = cfs_hash_rw_lock,
- .hs_bkt_unlock = cfs_hash_rw_unlock,
-};
-
-static void
-cfs_hash_lock_setup(struct cfs_hash *hs)
-{
- if (cfs_hash_with_no_lock(hs)) {
- hs->hs_lops = &cfs_hash_nl_lops;
-
- } else if (cfs_hash_with_no_bktlock(hs)) {
- hs->hs_lops = &cfs_hash_nbl_lops;
- spin_lock_init(&hs->hs_lock.spin);
-
- } else if (cfs_hash_with_rehash(hs)) {
- rwlock_init(&hs->hs_lock.rw);
-
- if (cfs_hash_with_rw_bktlock(hs))
- hs->hs_lops = &cfs_hash_bkt_rw_lops;
- else if (cfs_hash_with_spin_bktlock(hs))
- hs->hs_lops = &cfs_hash_bkt_spin_lops;
- else
- LBUG();
- } else {
- if (cfs_hash_with_rw_bktlock(hs))
- hs->hs_lops = &cfs_hash_nr_bkt_rw_lops;
- else if (cfs_hash_with_spin_bktlock(hs))
- hs->hs_lops = &cfs_hash_nr_bkt_spin_lops;
- else
- LBUG();
- }
-}
-
-/**
- * Simple hash head without depth tracking
- * new element is always added to head of hlist
- */
-typedef struct {
- struct hlist_head hh_head; /**< entries list */
-} cfs_hash_head_t;
-
-static int
-cfs_hash_hh_hhead_size(struct cfs_hash *hs)
-{
- return sizeof(cfs_hash_head_t);
-}
-
-static struct hlist_head *
-cfs_hash_hh_hhead(struct cfs_hash *hs, struct cfs_hash_bd *bd)
-{
- cfs_hash_head_t *head = (cfs_hash_head_t *)&bd->bd_bucket->hsb_head[0];
-
- return &head[bd->bd_offset].hh_head;
-}
-
-static int
-cfs_hash_hh_hnode_add(struct cfs_hash *hs, struct cfs_hash_bd *bd,
- struct hlist_node *hnode)
-{
- hlist_add_head(hnode, cfs_hash_hh_hhead(hs, bd));
- return -1; /* unknown depth */
-}
-
-static int
-cfs_hash_hh_hnode_del(struct cfs_hash *hs, struct cfs_hash_bd *bd,
- struct hlist_node *hnode)
-{
- hlist_del_init(hnode);
- return -1; /* unknown depth */
-}
-
-/**
- * Simple hash head with depth tracking
- * new element is always added to head of hlist
- */
-typedef struct {
- struct hlist_head hd_head; /**< entries list */
- unsigned int hd_depth; /**< list length */
-} cfs_hash_head_dep_t;
-
-static int
-cfs_hash_hd_hhead_size(struct cfs_hash *hs)
-{
- return sizeof(cfs_hash_head_dep_t);
-}
-
-static struct hlist_head *
-cfs_hash_hd_hhead(struct cfs_hash *hs, struct cfs_hash_bd *bd)
-{
- cfs_hash_head_dep_t *head;
-
- head = (cfs_hash_head_dep_t *)&bd->bd_bucket->hsb_head[0];
- return &head[bd->bd_offset].hd_head;
-}
-
-static int
-cfs_hash_hd_hnode_add(struct cfs_hash *hs, struct cfs_hash_bd *bd,
- struct hlist_node *hnode)
-{
- cfs_hash_head_dep_t *hh = container_of(cfs_hash_hd_hhead(hs, bd),
- cfs_hash_head_dep_t, hd_head);
- hlist_add_head(hnode, &hh->hd_head);
- return ++hh->hd_depth;
-}
-
-static int
-cfs_hash_hd_hnode_del(struct cfs_hash *hs, struct cfs_hash_bd *bd,
- struct hlist_node *hnode)
-{
- cfs_hash_head_dep_t *hh = container_of(cfs_hash_hd_hhead(hs, bd),
- cfs_hash_head_dep_t, hd_head);
- hlist_del_init(hnode);
- return --hh->hd_depth;
-}
-
-/**
- * double links hash head without depth tracking
- * new element is always added to tail of hlist
- */
-typedef struct {
- struct hlist_head dh_head; /**< entries list */
- struct hlist_node *dh_tail; /**< the last entry */
-} cfs_hash_dhead_t;
-
-static int
-cfs_hash_dh_hhead_size(struct cfs_hash *hs)
-{
- return sizeof(cfs_hash_dhead_t);
-}
-
-static struct hlist_head *
-cfs_hash_dh_hhead(struct cfs_hash *hs, struct cfs_hash_bd *bd)
-{
- cfs_hash_dhead_t *head;
-
- head = (cfs_hash_dhead_t *)&bd->bd_bucket->hsb_head[0];
- return &head[bd->bd_offset].dh_head;
-}
-
-static int
-cfs_hash_dh_hnode_add(struct cfs_hash *hs, struct cfs_hash_bd *bd,
- struct hlist_node *hnode)
-{
- cfs_hash_dhead_t *dh = container_of(cfs_hash_dh_hhead(hs, bd),
- cfs_hash_dhead_t, dh_head);
-
- if (dh->dh_tail != NULL) /* not empty */
- hlist_add_behind(hnode, dh->dh_tail);
- else /* empty list */
- hlist_add_head(hnode, &dh->dh_head);
- dh->dh_tail = hnode;
- return -1; /* unknown depth */
-}
-
-static int
-cfs_hash_dh_hnode_del(struct cfs_hash *hs, struct cfs_hash_bd *bd,
- struct hlist_node *hnd)
-{
- cfs_hash_dhead_t *dh = container_of(cfs_hash_dh_hhead(hs, bd),
- cfs_hash_dhead_t, dh_head);
-
- if (hnd->next == NULL) { /* it's the tail */
- dh->dh_tail = (hnd->pprev == &dh->dh_head.first) ? NULL :
- container_of(hnd->pprev, struct hlist_node, next);
- }
- hlist_del_init(hnd);
- return -1; /* unknown depth */
-}
-
-/**
- * double links hash head with depth tracking
- * new element is always added to tail of hlist
- */
-typedef struct {
- struct hlist_head dd_head; /**< entries list */
- struct hlist_node *dd_tail; /**< the last entry */
- unsigned int dd_depth; /**< list length */
-} cfs_hash_dhead_dep_t;
-
-static int
-cfs_hash_dd_hhead_size(struct cfs_hash *hs)
-{
- return sizeof(cfs_hash_dhead_dep_t);
-}
-
-static struct hlist_head *
-cfs_hash_dd_hhead(struct cfs_hash *hs, struct cfs_hash_bd *bd)
-{
- cfs_hash_dhead_dep_t *head;
-
- head = (cfs_hash_dhead_dep_t *)&bd->bd_bucket->hsb_head[0];
- return &head[bd->bd_offset].dd_head;
-}
-
-static int
-cfs_hash_dd_hnode_add(struct cfs_hash *hs, struct cfs_hash_bd *bd,
- struct hlist_node *hnode)
-{
- cfs_hash_dhead_dep_t *dh = container_of(cfs_hash_dd_hhead(hs, bd),
- cfs_hash_dhead_dep_t, dd_head);
-
- if (dh->dd_tail != NULL) /* not empty */
- hlist_add_behind(hnode, dh->dd_tail);
- else /* empty list */
- hlist_add_head(hnode, &dh->dd_head);
- dh->dd_tail = hnode;
- return ++dh->dd_depth;
-}
-
-static int
-cfs_hash_dd_hnode_del(struct cfs_hash *hs, struct cfs_hash_bd *bd,
- struct hlist_node *hnd)
-{
- cfs_hash_dhead_dep_t *dh = container_of(cfs_hash_dd_hhead(hs, bd),
- cfs_hash_dhead_dep_t, dd_head);
-
- if (hnd->next == NULL) { /* it's the tail */
- dh->dd_tail = (hnd->pprev == &dh->dd_head.first) ? NULL :
- container_of(hnd->pprev, struct hlist_node, next);
- }
- hlist_del_init(hnd);
- return --dh->dd_depth;
-}
-
-static cfs_hash_hlist_ops_t cfs_hash_hh_hops = {
- .hop_hhead = cfs_hash_hh_hhead,
- .hop_hhead_size = cfs_hash_hh_hhead_size,
- .hop_hnode_add = cfs_hash_hh_hnode_add,
- .hop_hnode_del = cfs_hash_hh_hnode_del,
-};
-
-static cfs_hash_hlist_ops_t cfs_hash_hd_hops = {
- .hop_hhead = cfs_hash_hd_hhead,
- .hop_hhead_size = cfs_hash_hd_hhead_size,
- .hop_hnode_add = cfs_hash_hd_hnode_add,
- .hop_hnode_del = cfs_hash_hd_hnode_del,
-};
-
-static cfs_hash_hlist_ops_t cfs_hash_dh_hops = {
- .hop_hhead = cfs_hash_dh_hhead,
- .hop_hhead_size = cfs_hash_dh_hhead_size,
- .hop_hnode_add = cfs_hash_dh_hnode_add,
- .hop_hnode_del = cfs_hash_dh_hnode_del,
-};
-
-static cfs_hash_hlist_ops_t cfs_hash_dd_hops = {
- .hop_hhead = cfs_hash_dd_hhead,
- .hop_hhead_size = cfs_hash_dd_hhead_size,
- .hop_hnode_add = cfs_hash_dd_hnode_add,
- .hop_hnode_del = cfs_hash_dd_hnode_del,
-};
-
-static void
-cfs_hash_hlist_setup(struct cfs_hash *hs)
-{
- if (cfs_hash_with_add_tail(hs)) {
- hs->hs_hops = cfs_hash_with_depth(hs) ?
- &cfs_hash_dd_hops : &cfs_hash_dh_hops;
- } else {
- hs->hs_hops = cfs_hash_with_depth(hs) ?
- &cfs_hash_hd_hops : &cfs_hash_hh_hops;
- }
-}
-
-static void
-cfs_hash_bd_from_key(struct cfs_hash *hs, struct cfs_hash_bucket **bkts,
- unsigned int bits, const void *key, struct cfs_hash_bd *bd)
-{
- unsigned int index = cfs_hash_id(hs, key, (1U << bits) - 1);
-
- LASSERT(bits == hs->hs_cur_bits || bits == hs->hs_rehash_bits);
-
- bd->bd_bucket = bkts[index & ((1U << (bits - hs->hs_bkt_bits)) - 1)];
- bd->bd_offset = index >> (bits - hs->hs_bkt_bits);
-}
-
-void
-cfs_hash_bd_get(struct cfs_hash *hs, const void *key, struct cfs_hash_bd *bd)
-{
- /* NB: caller should hold hs->hs_rwlock if REHASH is set */
- if (likely(hs->hs_rehash_buckets == NULL)) {
- cfs_hash_bd_from_key(hs, hs->hs_buckets,
- hs->hs_cur_bits, key, bd);
- } else {
- LASSERT(hs->hs_rehash_bits != 0);
- cfs_hash_bd_from_key(hs, hs->hs_rehash_buckets,
- hs->hs_rehash_bits, key, bd);
- }
-}
-EXPORT_SYMBOL(cfs_hash_bd_get);
-
-static inline void
-cfs_hash_bd_dep_record(struct cfs_hash *hs, struct cfs_hash_bd *bd, int dep_cur)
-{
- if (likely(dep_cur <= bd->bd_bucket->hsb_depmax))
- return;
-
- bd->bd_bucket->hsb_depmax = dep_cur;
-# if CFS_HASH_DEBUG_LEVEL >= CFS_HASH_DEBUG_1
- if (likely(warn_on_depth == 0 ||
- max(warn_on_depth, hs->hs_dep_max) >= dep_cur))
- return;
-
- spin_lock(&hs->hs_dep_lock);
- hs->hs_dep_max = dep_cur;
- hs->hs_dep_bkt = bd->bd_bucket->hsb_index;
- hs->hs_dep_off = bd->bd_offset;
- hs->hs_dep_bits = hs->hs_cur_bits;
- spin_unlock(&hs->hs_dep_lock);
-
- cfs_wi_schedule(cfs_sched_rehash, &hs->hs_dep_wi);
-# endif
-}
-
-void
-cfs_hash_bd_add_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd,
- struct hlist_node *hnode)
-{
- int rc;
-
- rc = hs->hs_hops->hop_hnode_add(hs, bd, hnode);
- cfs_hash_bd_dep_record(hs, bd, rc);
- bd->bd_bucket->hsb_version++;
- if (unlikely(bd->bd_bucket->hsb_version == 0))
- bd->bd_bucket->hsb_version++;
- bd->bd_bucket->hsb_count++;
-
- if (cfs_hash_with_counter(hs))
- atomic_inc(&hs->hs_count);
- if (!cfs_hash_with_no_itemref(hs))
- cfs_hash_get(hs, hnode);
-}
-EXPORT_SYMBOL(cfs_hash_bd_add_locked);
-
-void
-cfs_hash_bd_del_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd,
- struct hlist_node *hnode)
-{
- hs->hs_hops->hop_hnode_del(hs, bd, hnode);
-
- LASSERT(bd->bd_bucket->hsb_count > 0);
- bd->bd_bucket->hsb_count--;
- bd->bd_bucket->hsb_version++;
- if (unlikely(bd->bd_bucket->hsb_version == 0))
- bd->bd_bucket->hsb_version++;
-
- if (cfs_hash_with_counter(hs)) {
- LASSERT(atomic_read(&hs->hs_count) > 0);
- atomic_dec(&hs->hs_count);
- }
- if (!cfs_hash_with_no_itemref(hs))
- cfs_hash_put_locked(hs, hnode);
-}
-EXPORT_SYMBOL(cfs_hash_bd_del_locked);
-
-void
-cfs_hash_bd_move_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd_old,
- struct cfs_hash_bd *bd_new, struct hlist_node *hnode)
-{
- struct cfs_hash_bucket *obkt = bd_old->bd_bucket;
- struct cfs_hash_bucket *nbkt = bd_new->bd_bucket;
- int rc;
-
- if (cfs_hash_bd_compare(bd_old, bd_new) == 0)
- return;
-
- /* use cfs_hash_bd_hnode_add/del, to avoid atomic & refcount ops
- * in cfs_hash_bd_del/add_locked */
- hs->hs_hops->hop_hnode_del(hs, bd_old, hnode);
- rc = hs->hs_hops->hop_hnode_add(hs, bd_new, hnode);
- cfs_hash_bd_dep_record(hs, bd_new, rc);
-
- LASSERT(obkt->hsb_count > 0);
- obkt->hsb_count--;
- obkt->hsb_version++;
- if (unlikely(obkt->hsb_version == 0))
- obkt->hsb_version++;
- nbkt->hsb_count++;
- nbkt->hsb_version++;
- if (unlikely(nbkt->hsb_version == 0))
- nbkt->hsb_version++;
-}
-EXPORT_SYMBOL(cfs_hash_bd_move_locked);
-
-enum {
- /** always set, for sanity (avoid ZERO intent) */
- CFS_HS_LOOKUP_MASK_FIND = BIT(0),
- /** return entry with a ref */
- CFS_HS_LOOKUP_MASK_REF = BIT(1),
- /** add entry if not existing */
- CFS_HS_LOOKUP_MASK_ADD = BIT(2),
- /** delete entry, ignore other masks */
- CFS_HS_LOOKUP_MASK_DEL = BIT(3),
-};
-
-typedef enum cfs_hash_lookup_intent {
- /** return item w/o refcount */
- CFS_HS_LOOKUP_IT_PEEK = CFS_HS_LOOKUP_MASK_FIND,
- /** return item with refcount */
- CFS_HS_LOOKUP_IT_FIND = (CFS_HS_LOOKUP_MASK_FIND |
- CFS_HS_LOOKUP_MASK_REF),
- /** return item w/o refcount if existed, otherwise add */
- CFS_HS_LOOKUP_IT_ADD = (CFS_HS_LOOKUP_MASK_FIND |
- CFS_HS_LOOKUP_MASK_ADD),
- /** return item with refcount if existed, otherwise add */
- CFS_HS_LOOKUP_IT_FINDADD = (CFS_HS_LOOKUP_IT_FIND |
- CFS_HS_LOOKUP_MASK_ADD),
- /** delete if existed */
- CFS_HS_LOOKUP_IT_FINDDEL = (CFS_HS_LOOKUP_MASK_FIND |
- CFS_HS_LOOKUP_MASK_DEL)
-} cfs_hash_lookup_intent_t;
-
-static struct hlist_node *
-cfs_hash_bd_lookup_intent(struct cfs_hash *hs, struct cfs_hash_bd *bd,
- const void *key, struct hlist_node *hnode,
- cfs_hash_lookup_intent_t intent)
-
-{
- struct hlist_head *hhead = cfs_hash_bd_hhead(hs, bd);
- struct hlist_node *ehnode;
- struct hlist_node *match;
- int intent_add = (intent & CFS_HS_LOOKUP_MASK_ADD) != 0;
-
- /* with this function, we can avoid a lot of useless refcount ops,
- * which are expensive atomic operations most time. */
- match = intent_add ? NULL : hnode;
- hlist_for_each(ehnode, hhead) {
- if (!cfs_hash_keycmp(hs, key, ehnode))
- continue;
-
- if (match != NULL && match != ehnode) /* can't match */
- continue;
-
- /* match and ... */
- if ((intent & CFS_HS_LOOKUP_MASK_DEL) != 0) {
- cfs_hash_bd_del_locked(hs, bd, ehnode);
- return ehnode;
- }
-
- /* caller wants refcount? */
- if ((intent & CFS_HS_LOOKUP_MASK_REF) != 0)
- cfs_hash_get(hs, ehnode);
- return ehnode;
- }
- /* no match item */
- if (!intent_add)
- return NULL;
-
- LASSERT(hnode != NULL);
- cfs_hash_bd_add_locked(hs, bd, hnode);
- return hnode;
-}
-
-struct hlist_node *
-cfs_hash_bd_lookup_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd, const void *key)
-{
- return cfs_hash_bd_lookup_intent(hs, bd, key, NULL,
- CFS_HS_LOOKUP_IT_FIND);
-}
-EXPORT_SYMBOL(cfs_hash_bd_lookup_locked);
-
-struct hlist_node *
-cfs_hash_bd_peek_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd, const void *key)
-{
- return cfs_hash_bd_lookup_intent(hs, bd, key, NULL,
- CFS_HS_LOOKUP_IT_PEEK);
-}
-EXPORT_SYMBOL(cfs_hash_bd_peek_locked);
-
-struct hlist_node *
-cfs_hash_bd_findadd_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd,
- const void *key, struct hlist_node *hnode,
- int noref)
-{
- return cfs_hash_bd_lookup_intent(hs, bd, key, hnode,
- (!noref * CFS_HS_LOOKUP_MASK_REF) |
- CFS_HS_LOOKUP_IT_ADD);
-}
-EXPORT_SYMBOL(cfs_hash_bd_findadd_locked);
-
-struct hlist_node *
-cfs_hash_bd_finddel_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd,
- const void *key, struct hlist_node *hnode)
-{
- /* hnode can be NULL, we find the first item with @key */
- return cfs_hash_bd_lookup_intent(hs, bd, key, hnode,
- CFS_HS_LOOKUP_IT_FINDDEL);
-}
-EXPORT_SYMBOL(cfs_hash_bd_finddel_locked);
-
-static void
-cfs_hash_multi_bd_lock(struct cfs_hash *hs, struct cfs_hash_bd *bds,
- unsigned n, int excl)
-{
- struct cfs_hash_bucket *prev = NULL;
- int i;
-
- /**
- * bds must be ascendantly ordered by bd->bd_bucket->hsb_index.
- * NB: it's possible that several bds point to the same bucket but
- * have different bd::bd_offset, so need take care of deadlock.
- */
- cfs_hash_for_each_bd(bds, n, i) {
- if (prev == bds[i].bd_bucket)
- continue;
-
- LASSERT(prev == NULL ||
- prev->hsb_index < bds[i].bd_bucket->hsb_index);
- cfs_hash_bd_lock(hs, &bds[i], excl);
- prev = bds[i].bd_bucket;
- }
-}
-
-static void
-cfs_hash_multi_bd_unlock(struct cfs_hash *hs, struct cfs_hash_bd *bds,
- unsigned n, int excl)
-{
- struct cfs_hash_bucket *prev = NULL;
- int i;
-
- cfs_hash_for_each_bd(bds, n, i) {
- if (prev != bds[i].bd_bucket) {
- cfs_hash_bd_unlock(hs, &bds[i], excl);
- prev = bds[i].bd_bucket;
- }
- }
-}
-
-static struct hlist_node *
-cfs_hash_multi_bd_lookup_locked(struct cfs_hash *hs, struct cfs_hash_bd *bds,
- unsigned n, const void *key)
-{
- struct hlist_node *ehnode;
- unsigned i;
-
- cfs_hash_for_each_bd(bds, n, i) {
- ehnode = cfs_hash_bd_lookup_intent(hs, &bds[i], key, NULL,
- CFS_HS_LOOKUP_IT_FIND);
- if (ehnode != NULL)
- return ehnode;
- }
- return NULL;
-}
-
-static struct hlist_node *
-cfs_hash_multi_bd_findadd_locked(struct cfs_hash *hs,
- struct cfs_hash_bd *bds, unsigned n, const void *key,
- struct hlist_node *hnode, int noref)
-{
- struct hlist_node *ehnode;
- int intent;
- unsigned i;
-
- LASSERT(hnode != NULL);
- intent = (!noref * CFS_HS_LOOKUP_MASK_REF) | CFS_HS_LOOKUP_IT_PEEK;
-
- cfs_hash_for_each_bd(bds, n, i) {
- ehnode = cfs_hash_bd_lookup_intent(hs, &bds[i], key,
- NULL, intent);
- if (ehnode != NULL)
- return ehnode;
- }
-
- if (i == 1) { /* only one bucket */
- cfs_hash_bd_add_locked(hs, &bds[0], hnode);
- } else {
- struct cfs_hash_bd mybd;
-
- cfs_hash_bd_get(hs, key, &mybd);
- cfs_hash_bd_add_locked(hs, &mybd, hnode);
- }
-
- return hnode;
-}
-
-static struct hlist_node *
-cfs_hash_multi_bd_finddel_locked(struct cfs_hash *hs, struct cfs_hash_bd *bds,
- unsigned n, const void *key,
- struct hlist_node *hnode)
-{
- struct hlist_node *ehnode;
- unsigned i;
-
- cfs_hash_for_each_bd(bds, n, i) {
- ehnode = cfs_hash_bd_lookup_intent(hs, &bds[i], key, hnode,
- CFS_HS_LOOKUP_IT_FINDDEL);
- if (ehnode != NULL)
- return ehnode;
- }
- return NULL;
-}
-
-static void
-cfs_hash_bd_order(struct cfs_hash_bd *bd1, struct cfs_hash_bd *bd2)
-{
- int rc;
-
- if (bd2->bd_bucket == NULL)
- return;
-
- if (bd1->bd_bucket == NULL) {
- *bd1 = *bd2;
- bd2->bd_bucket = NULL;
- return;
- }
-
- rc = cfs_hash_bd_compare(bd1, bd2);
- if (rc == 0) {
- bd2->bd_bucket = NULL;
-
- } else if (rc > 0) { /* swab bd1 and bd2 */
- struct cfs_hash_bd tmp;
-
- tmp = *bd2;
- *bd2 = *bd1;
- *bd1 = tmp;
- }
-}
-
-void
-cfs_hash_dual_bd_get(struct cfs_hash *hs, const void *key, struct cfs_hash_bd *bds)
-{
- /* NB: caller should hold hs_lock.rw if REHASH is set */
- cfs_hash_bd_from_key(hs, hs->hs_buckets,
- hs->hs_cur_bits, key, &bds[0]);
- if (likely(hs->hs_rehash_buckets == NULL)) {
- /* no rehash or not rehashing */
- bds[1].bd_bucket = NULL;
- return;
- }
-
- LASSERT(hs->hs_rehash_bits != 0);
- cfs_hash_bd_from_key(hs, hs->hs_rehash_buckets,
- hs->hs_rehash_bits, key, &bds[1]);
-
- cfs_hash_bd_order(&bds[0], &bds[1]);
-}
-EXPORT_SYMBOL(cfs_hash_dual_bd_get);
-
-void
-cfs_hash_dual_bd_lock(struct cfs_hash *hs, struct cfs_hash_bd *bds, int excl)
-{
- cfs_hash_multi_bd_lock(hs, bds, 2, excl);
-}
-EXPORT_SYMBOL(cfs_hash_dual_bd_lock);
-
-void
-cfs_hash_dual_bd_unlock(struct cfs_hash *hs, struct cfs_hash_bd *bds, int excl)
-{
- cfs_hash_multi_bd_unlock(hs, bds, 2, excl);
-}
-EXPORT_SYMBOL(cfs_hash_dual_bd_unlock);
-
-struct hlist_node *
-cfs_hash_dual_bd_lookup_locked(struct cfs_hash *hs, struct cfs_hash_bd *bds,
- const void *key)
-{
- return cfs_hash_multi_bd_lookup_locked(hs, bds, 2, key);
-}
-EXPORT_SYMBOL(cfs_hash_dual_bd_lookup_locked);
-
-struct hlist_node *
-cfs_hash_dual_bd_findadd_locked(struct cfs_hash *hs, struct cfs_hash_bd *bds,
- const void *key, struct hlist_node *hnode,
- int noref)
-{
- return cfs_hash_multi_bd_findadd_locked(hs, bds, 2, key,
- hnode, noref);
-}
-EXPORT_SYMBOL(cfs_hash_dual_bd_findadd_locked);
-
-struct hlist_node *
-cfs_hash_dual_bd_finddel_locked(struct cfs_hash *hs, struct cfs_hash_bd *bds,
- const void *key, struct hlist_node *hnode)
-{
- return cfs_hash_multi_bd_finddel_locked(hs, bds, 2, key, hnode);
-}
-EXPORT_SYMBOL(cfs_hash_dual_bd_finddel_locked);
-
-static void
-cfs_hash_buckets_free(struct cfs_hash_bucket **buckets,
- int bkt_size, int prev_size, int size)
-{
- int i;
-
- for (i = prev_size; i < size; i++) {
- if (buckets[i] != NULL)
- LIBCFS_FREE(buckets[i], bkt_size);
- }
-
- LIBCFS_FREE(buckets, sizeof(buckets[0]) * size);
-}
-
-/*
- * Create or grow bucket memory. Return old_buckets if no allocation was
- * needed, the newly allocated buckets if allocation was needed and
- * successful, and NULL on error.
- */
-static struct cfs_hash_bucket **
-cfs_hash_buckets_realloc(struct cfs_hash *hs, struct cfs_hash_bucket **old_bkts,
- unsigned int old_size, unsigned int new_size)
-{
- struct cfs_hash_bucket **new_bkts;
- int i;
-
- LASSERT(old_size == 0 || old_bkts != NULL);
-
- if (old_bkts != NULL && old_size == new_size)
- return old_bkts;
-
- LIBCFS_ALLOC(new_bkts, sizeof(new_bkts[0]) * new_size);
- if (new_bkts == NULL)
- return NULL;
-
- if (old_bkts != NULL) {
- memcpy(new_bkts, old_bkts,
- min(old_size, new_size) * sizeof(*old_bkts));
- }
-
- for (i = old_size; i < new_size; i++) {
- struct hlist_head *hhead;
- struct cfs_hash_bd bd;
-
- LIBCFS_ALLOC(new_bkts[i], cfs_hash_bkt_size(hs));
- if (new_bkts[i] == NULL) {
- cfs_hash_buckets_free(new_bkts, cfs_hash_bkt_size(hs),
- old_size, new_size);
- return NULL;
- }
-
- new_bkts[i]->hsb_index = i;
- new_bkts[i]->hsb_version = 1; /* shouldn't be zero */
- new_bkts[i]->hsb_depmax = -1; /* unknown */
- bd.bd_bucket = new_bkts[i];
- cfs_hash_bd_for_each_hlist(hs, &bd, hhead)
- INIT_HLIST_HEAD(hhead);
-
- if (cfs_hash_with_no_lock(hs) ||
- cfs_hash_with_no_bktlock(hs))
- continue;
-
- if (cfs_hash_with_rw_bktlock(hs))
- rwlock_init(&new_bkts[i]->hsb_lock.rw);
- else if (cfs_hash_with_spin_bktlock(hs))
- spin_lock_init(&new_bkts[i]->hsb_lock.spin);
- else
- LBUG(); /* invalid use-case */
- }
- return new_bkts;
-}
-
-/**
- * Initialize new libcfs hash, where:
- * @name - Descriptive hash name
- * @cur_bits - Initial hash table size, in bits
- * @max_bits - Maximum allowed hash table resize, in bits
- * @ops - Registered hash table operations
- * @flags - CFS_HASH_REHASH enable synamic hash resizing
- * - CFS_HASH_SORT enable chained hash sort
- */
-static int cfs_hash_rehash_worker(cfs_workitem_t *wi);
-
-#if CFS_HASH_DEBUG_LEVEL >= CFS_HASH_DEBUG_1
-static int cfs_hash_dep_print(cfs_workitem_t *wi)
-{
- struct cfs_hash *hs = container_of(wi, struct cfs_hash, hs_dep_wi);
- int dep;
- int bkt;
- int off;
- int bits;
-
- spin_lock(&hs->hs_dep_lock);
- dep = hs->hs_dep_max;
- bkt = hs->hs_dep_bkt;
- off = hs->hs_dep_off;
- bits = hs->hs_dep_bits;
- spin_unlock(&hs->hs_dep_lock);
-
- LCONSOLE_WARN("#### HASH %s (bits: %d): max depth %d at bucket %d/%d\n",
- hs->hs_name, bits, dep, bkt, off);
- spin_lock(&hs->hs_dep_lock);
- hs->hs_dep_bits = 0; /* mark as workitem done */
- spin_unlock(&hs->hs_dep_lock);
- return 0;
-}
-
-static void cfs_hash_depth_wi_init(struct cfs_hash *hs)
-{
- spin_lock_init(&hs->hs_dep_lock);
- cfs_wi_init(&hs->hs_dep_wi, hs, cfs_hash_dep_print);
-}
-
-static void cfs_hash_depth_wi_cancel(struct cfs_hash *hs)
-{
- if (cfs_wi_deschedule(cfs_sched_rehash, &hs->hs_dep_wi))
- return;
-
- spin_lock(&hs->hs_dep_lock);
- while (hs->hs_dep_bits != 0) {
- spin_unlock(&hs->hs_dep_lock);
- cond_resched();
- spin_lock(&hs->hs_dep_lock);
- }
- spin_unlock(&hs->hs_dep_lock);
-}
-
-#else /* CFS_HASH_DEBUG_LEVEL < CFS_HASH_DEBUG_1 */
-
-static inline void cfs_hash_depth_wi_init(struct cfs_hash *hs) {}
-static inline void cfs_hash_depth_wi_cancel(struct cfs_hash *hs) {}
-
-#endif /* CFS_HASH_DEBUG_LEVEL >= CFS_HASH_DEBUG_1 */
-
-struct cfs_hash *
-cfs_hash_create(char *name, unsigned cur_bits, unsigned max_bits,
- unsigned bkt_bits, unsigned extra_bytes,
- unsigned min_theta, unsigned max_theta,
- cfs_hash_ops_t *ops, unsigned flags)
-{
- struct cfs_hash *hs;
- int len;
-
- CLASSERT(CFS_HASH_THETA_BITS < 15);
-
- LASSERT(name != NULL);
- LASSERT(ops != NULL);
- LASSERT(ops->hs_key);
- LASSERT(ops->hs_hash);
- LASSERT(ops->hs_object);
- LASSERT(ops->hs_keycmp);
- LASSERT(ops->hs_get != NULL);
- LASSERT(ops->hs_put_locked != NULL);
-
- if ((flags & CFS_HASH_REHASH) != 0)
- flags |= CFS_HASH_COUNTER; /* must have counter */
-
- LASSERT(cur_bits > 0);
- LASSERT(cur_bits >= bkt_bits);
- LASSERT(max_bits >= cur_bits && max_bits < 31);
- LASSERT(ergo((flags & CFS_HASH_REHASH) == 0, cur_bits == max_bits));
- LASSERT(ergo((flags & CFS_HASH_REHASH) != 0,
- (flags & CFS_HASH_NO_LOCK) == 0));
- LASSERT(ergo((flags & CFS_HASH_REHASH_KEY) != 0,
- ops->hs_keycpy != NULL));
-
- len = (flags & CFS_HASH_BIGNAME) == 0 ?
- CFS_HASH_NAME_LEN : CFS_HASH_BIGNAME_LEN;
- LIBCFS_ALLOC(hs, offsetof(struct cfs_hash, hs_name[len]));
- if (hs == NULL)
- return NULL;
-
- strncpy(hs->hs_name, name, len);
- hs->hs_name[len - 1] = '\0';
- hs->hs_flags = flags;
-
- atomic_set(&hs->hs_refcount, 1);
- atomic_set(&hs->hs_count, 0);
-
- cfs_hash_lock_setup(hs);
- cfs_hash_hlist_setup(hs);
-
- hs->hs_cur_bits = (__u8)cur_bits;
- hs->hs_min_bits = (__u8)cur_bits;
- hs->hs_max_bits = (__u8)max_bits;
- hs->hs_bkt_bits = (__u8)bkt_bits;
-
- hs->hs_ops = ops;
- hs->hs_extra_bytes = extra_bytes;
- hs->hs_rehash_bits = 0;
- cfs_wi_init(&hs->hs_rehash_wi, hs, cfs_hash_rehash_worker);
- cfs_hash_depth_wi_init(hs);
-
- if (cfs_hash_with_rehash(hs))
- __cfs_hash_set_theta(hs, min_theta, max_theta);
-
- hs->hs_buckets = cfs_hash_buckets_realloc(hs, NULL, 0,
- CFS_HASH_NBKT(hs));
- if (hs->hs_buckets != NULL)
- return hs;
-
- LIBCFS_FREE(hs, offsetof(struct cfs_hash, hs_name[len]));
- return NULL;
-}
-EXPORT_SYMBOL(cfs_hash_create);
-
-/**
- * Cleanup libcfs hash @hs.
- */
-static void
-cfs_hash_destroy(struct cfs_hash *hs)
-{
- struct hlist_node *hnode;
- struct hlist_node *pos;
- struct cfs_hash_bd bd;
- int i;
-
- LASSERT(hs != NULL);
- LASSERT(!cfs_hash_is_exiting(hs) &&
- !cfs_hash_is_iterating(hs));
-
- /**
- * prohibit further rehashes, don't need any lock because
- * I'm the only (last) one can change it.
- */
- hs->hs_exiting = 1;
- if (cfs_hash_with_rehash(hs))
- cfs_hash_rehash_cancel(hs);
-
- cfs_hash_depth_wi_cancel(hs);
- /* rehash should be done/canceled */
- LASSERT(hs->hs_buckets != NULL &&
- hs->hs_rehash_buckets == NULL);
-
- cfs_hash_for_each_bucket(hs, &bd, i) {
- struct hlist_head *hhead;
-
- LASSERT(bd.bd_bucket != NULL);
- /* no need to take this lock, just for consistent code */
- cfs_hash_bd_lock(hs, &bd, 1);
-
- cfs_hash_bd_for_each_hlist(hs, &bd, hhead) {
- hlist_for_each_safe(hnode, pos, hhead) {
- LASSERTF(!cfs_hash_with_assert_empty(hs),
- "hash %s bucket %u(%u) is not empty: %u items left\n",
- hs->hs_name, bd.bd_bucket->hsb_index,
- bd.bd_offset, bd.bd_bucket->hsb_count);
- /* can't assert key valicate, because we
- * can interrupt rehash */
- cfs_hash_bd_del_locked(hs, &bd, hnode);
- cfs_hash_exit(hs, hnode);
- }
- }
- LASSERT(bd.bd_bucket->hsb_count == 0);
- cfs_hash_bd_unlock(hs, &bd, 1);
- cond_resched();
- }
-
- LASSERT(atomic_read(&hs->hs_count) == 0);
-
- cfs_hash_buckets_free(hs->hs_buckets, cfs_hash_bkt_size(hs),
- 0, CFS_HASH_NBKT(hs));
- i = cfs_hash_with_bigname(hs) ?
- CFS_HASH_BIGNAME_LEN : CFS_HASH_NAME_LEN;
- LIBCFS_FREE(hs, offsetof(struct cfs_hash, hs_name[i]));
-}
-
-struct cfs_hash *cfs_hash_getref(struct cfs_hash *hs)
-{
- if (atomic_inc_not_zero(&hs->hs_refcount))
- return hs;
- return NULL;
-}
-EXPORT_SYMBOL(cfs_hash_getref);
-
-void cfs_hash_putref(struct cfs_hash *hs)
-{
- if (atomic_dec_and_test(&hs->hs_refcount))
- cfs_hash_destroy(hs);
-}
-EXPORT_SYMBOL(cfs_hash_putref);
-
-static inline int
-cfs_hash_rehash_bits(struct cfs_hash *hs)
-{
- if (cfs_hash_with_no_lock(hs) ||
- !cfs_hash_with_rehash(hs))
- return -EOPNOTSUPP;
-
- if (unlikely(cfs_hash_is_exiting(hs)))
- return -ESRCH;
-
- if (unlikely(cfs_hash_is_rehashing(hs)))
- return -EALREADY;
-
- if (unlikely(cfs_hash_is_iterating(hs)))
- return -EAGAIN;
-
- /* XXX: need to handle case with max_theta != 2.0
- * and the case with min_theta != 0.5 */
- if ((hs->hs_cur_bits < hs->hs_max_bits) &&
- (__cfs_hash_theta(hs) > hs->hs_max_theta))
- return hs->hs_cur_bits + 1;
-
- if (!cfs_hash_with_shrink(hs))
- return 0;
-
- if ((hs->hs_cur_bits > hs->hs_min_bits) &&
- (__cfs_hash_theta(hs) < hs->hs_min_theta))
- return hs->hs_cur_bits - 1;
-
- return 0;
-}
-
-/**
- * don't allow inline rehash if:
- * - user wants non-blocking change (add/del) on hash table
- * - too many elements
- */
-static inline int
-cfs_hash_rehash_inline(struct cfs_hash *hs)
-{
- return !cfs_hash_with_nblk_change(hs) &&
- atomic_read(&hs->hs_count) < CFS_HASH_LOOP_HOG;
-}
-
-/**
- * Add item @hnode to libcfs hash @hs using @key. The registered
- * ops->hs_get function will be called when the item is added.
- */
-void
-cfs_hash_add(struct cfs_hash *hs, const void *key, struct hlist_node *hnode)
-{
- struct cfs_hash_bd bd;
- int bits;
-
- LASSERT(hlist_unhashed(hnode));
-
- cfs_hash_lock(hs, 0);
- cfs_hash_bd_get_and_lock(hs, key, &bd, 1);
-
- cfs_hash_key_validate(hs, key, hnode);
- cfs_hash_bd_add_locked(hs, &bd, hnode);
-
- cfs_hash_bd_unlock(hs, &bd, 1);
-
- bits = cfs_hash_rehash_bits(hs);
- cfs_hash_unlock(hs, 0);
- if (bits > 0)
- cfs_hash_rehash(hs, cfs_hash_rehash_inline(hs));
-}
-EXPORT_SYMBOL(cfs_hash_add);
-
-static struct hlist_node *
-cfs_hash_find_or_add(struct cfs_hash *hs, const void *key,
- struct hlist_node *hnode, int noref)
-{
- struct hlist_node *ehnode;
- struct cfs_hash_bd bds[2];
- int bits = 0;
-
- LASSERT(hlist_unhashed(hnode));
-
- cfs_hash_lock(hs, 0);
- cfs_hash_dual_bd_get_and_lock(hs, key, bds, 1);
-
- cfs_hash_key_validate(hs, key, hnode);
- ehnode = cfs_hash_dual_bd_findadd_locked(hs, bds, key,
- hnode, noref);
- cfs_hash_dual_bd_unlock(hs, bds, 1);
-
- if (ehnode == hnode) /* new item added */
- bits = cfs_hash_rehash_bits(hs);
- cfs_hash_unlock(hs, 0);
- if (bits > 0)
- cfs_hash_rehash(hs, cfs_hash_rehash_inline(hs));
-
- return ehnode;
-}
-
-/**
- * Add item @hnode to libcfs hash @hs using @key. The registered
- * ops->hs_get function will be called if the item was added.
- * Returns 0 on success or -EALREADY on key collisions.
- */
-int
-cfs_hash_add_unique(struct cfs_hash *hs, const void *key, struct hlist_node *hnode)
-{
- return cfs_hash_find_or_add(hs, key, hnode, 1) != hnode ?
- -EALREADY : 0;
-}
-EXPORT_SYMBOL(cfs_hash_add_unique);
-
-/**
- * Add item @hnode to libcfs hash @hs using @key. If this @key
- * already exists in the hash then ops->hs_get will be called on the
- * conflicting entry and that entry will be returned to the caller.
- * Otherwise ops->hs_get is called on the item which was added.
- */
-void *
-cfs_hash_findadd_unique(struct cfs_hash *hs, const void *key,
- struct hlist_node *hnode)
-{
- hnode = cfs_hash_find_or_add(hs, key, hnode, 0);
-
- return cfs_hash_object(hs, hnode);
-}
-EXPORT_SYMBOL(cfs_hash_findadd_unique);
-
-/**
- * Delete item @hnode from the libcfs hash @hs using @key. The @key
- * is required to ensure the correct hash bucket is locked since there
- * is no direct linkage from the item to the bucket. The object
- * removed from the hash will be returned and obs->hs_put is called
- * on the removed object.
- */
-void *
-cfs_hash_del(struct cfs_hash *hs, const void *key, struct hlist_node *hnode)
-{
- void *obj = NULL;
- int bits = 0;
- struct cfs_hash_bd bds[2];
-
- cfs_hash_lock(hs, 0);
- cfs_hash_dual_bd_get_and_lock(hs, key, bds, 1);
-
- /* NB: do nothing if @hnode is not in hash table */
- if (hnode == NULL || !hlist_unhashed(hnode)) {
- if (bds[1].bd_bucket == NULL && hnode != NULL) {
- cfs_hash_bd_del_locked(hs, &bds[0], hnode);
- } else {
- hnode = cfs_hash_dual_bd_finddel_locked(hs, bds,
- key, hnode);
- }
- }
-
- if (hnode != NULL) {
- obj = cfs_hash_object(hs, hnode);
- bits = cfs_hash_rehash_bits(hs);
- }
-
- cfs_hash_dual_bd_unlock(hs, bds, 1);
- cfs_hash_unlock(hs, 0);
- if (bits > 0)
- cfs_hash_rehash(hs, cfs_hash_rehash_inline(hs));
-
- return obj;
-}
-EXPORT_SYMBOL(cfs_hash_del);
-
-/**
- * Delete item given @key in libcfs hash @hs. The first @key found in
- * the hash will be removed, if the key exists multiple times in the hash
- * @hs this function must be called once per key. The removed object
- * will be returned and ops->hs_put is called on the removed object.
- */
-void *
-cfs_hash_del_key(struct cfs_hash *hs, const void *key)
-{
- return cfs_hash_del(hs, key, NULL);
-}
-EXPORT_SYMBOL(cfs_hash_del_key);
-
-/**
- * Lookup an item using @key in the libcfs hash @hs and return it.
- * If the @key is found in the hash hs->hs_get() is called and the
- * matching objects is returned. It is the callers responsibility
- * to call the counterpart ops->hs_put using the cfs_hash_put() macro
- * when when finished with the object. If the @key was not found
- * in the hash @hs NULL is returned.
- */
-void *
-cfs_hash_lookup(struct cfs_hash *hs, const void *key)
-{
- void *obj = NULL;
- struct hlist_node *hnode;
- struct cfs_hash_bd bds[2];
-
- cfs_hash_lock(hs, 0);
- cfs_hash_dual_bd_get_and_lock(hs, key, bds, 0);
-
- hnode = cfs_hash_dual_bd_lookup_locked(hs, bds, key);
- if (hnode != NULL)
- obj = cfs_hash_object(hs, hnode);
-
- cfs_hash_dual_bd_unlock(hs, bds, 0);
- cfs_hash_unlock(hs, 0);
-
- return obj;
-}
-EXPORT_SYMBOL(cfs_hash_lookup);
-
-static void
-cfs_hash_for_each_enter(struct cfs_hash *hs) {
- LASSERT(!cfs_hash_is_exiting(hs));
-
- if (!cfs_hash_with_rehash(hs))
- return;
- /*
- * NB: it's race on cfs_has_t::hs_iterating, but doesn't matter
- * because it's just an unreliable signal to rehash-thread,
- * rehash-thread will try to finish rehash ASAP when seeing this.
- */
- hs->hs_iterating = 1;
-
- cfs_hash_lock(hs, 1);
- hs->hs_iterators++;
-
- /* NB: iteration is mostly called by service thread,
- * we tend to cancel pending rehash-request, instead of
- * blocking service thread, we will relaunch rehash request
- * after iteration */
- if (cfs_hash_is_rehashing(hs))
- cfs_hash_rehash_cancel_locked(hs);
- cfs_hash_unlock(hs, 1);
-}
-
-static void
-cfs_hash_for_each_exit(struct cfs_hash *hs) {
- int remained;
- int bits;
-
- if (!cfs_hash_with_rehash(hs))
- return;
- cfs_hash_lock(hs, 1);
- remained = --hs->hs_iterators;
- bits = cfs_hash_rehash_bits(hs);
- cfs_hash_unlock(hs, 1);
- /* NB: it's race on cfs_has_t::hs_iterating, see above */
- if (remained == 0)
- hs->hs_iterating = 0;
- if (bits > 0) {
- cfs_hash_rehash(hs, atomic_read(&hs->hs_count) <
- CFS_HASH_LOOP_HOG);
- }
-}
-
-/**
- * For each item in the libcfs hash @hs call the passed callback @func
- * and pass to it as an argument each hash item and the private @data.
- *
- * a) the function may sleep!
- * b) during the callback:
- * . the bucket lock is held so the callback must never sleep.
- * . if @removal_safe is true, use can remove current item by
- * cfs_hash_bd_del_locked
- */
-static __u64
-cfs_hash_for_each_tight(struct cfs_hash *hs, cfs_hash_for_each_cb_t func,
- void *data, int remove_safe) {
- struct hlist_node *hnode;
- struct hlist_node *pos;
- struct cfs_hash_bd bd;
- __u64 count = 0;
- int excl = !!remove_safe;
- int loop = 0;
- int i;
-
- cfs_hash_for_each_enter(hs);
-
- cfs_hash_lock(hs, 0);
- LASSERT(!cfs_hash_is_rehashing(hs));
-
- cfs_hash_for_each_bucket(hs, &bd, i) {
- struct hlist_head *hhead;
-
- cfs_hash_bd_lock(hs, &bd, excl);
- if (func == NULL) { /* only glimpse size */
- count += bd.bd_bucket->hsb_count;
- cfs_hash_bd_unlock(hs, &bd, excl);
- continue;
- }
-
- cfs_hash_bd_for_each_hlist(hs, &bd, hhead) {
- hlist_for_each_safe(hnode, pos, hhead) {
- cfs_hash_bucket_validate(hs, &bd, hnode);
- count++;
- loop++;
- if (func(hs, &bd, hnode, data)) {
- cfs_hash_bd_unlock(hs, &bd, excl);
- goto out;
- }
- }
- }
- cfs_hash_bd_unlock(hs, &bd, excl);
- if (loop < CFS_HASH_LOOP_HOG)
- continue;
- loop = 0;
- cfs_hash_unlock(hs, 0);
- cond_resched();
- cfs_hash_lock(hs, 0);
- }
- out:
- cfs_hash_unlock(hs, 0);
-
- cfs_hash_for_each_exit(hs);
- return count;
-}
-
-typedef struct {
- cfs_hash_cond_opt_cb_t func;
- void *arg;
-} cfs_hash_cond_arg_t;
-
-static int
-cfs_hash_cond_del_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd,
- struct hlist_node *hnode, void *data)
-{
- cfs_hash_cond_arg_t *cond = data;
-
- if (cond->func(cfs_hash_object(hs, hnode), cond->arg))
- cfs_hash_bd_del_locked(hs, bd, hnode);
- return 0;
-}
-
-/**
- * Delete item from the libcfs hash @hs when @func return true.
- * The write lock being hold during loop for each bucket to avoid
- * any object be reference.
- */
-void
-cfs_hash_cond_del(struct cfs_hash *hs, cfs_hash_cond_opt_cb_t func, void *data)
-{
- cfs_hash_cond_arg_t arg = {
- .func = func,
- .arg = data,
- };
-
- cfs_hash_for_each_tight(hs, cfs_hash_cond_del_locked, &arg, 1);
-}
-EXPORT_SYMBOL(cfs_hash_cond_del);
-
-void
-cfs_hash_for_each(struct cfs_hash *hs,
- cfs_hash_for_each_cb_t func, void *data)
-{
- cfs_hash_for_each_tight(hs, func, data, 0);
-}
-EXPORT_SYMBOL(cfs_hash_for_each);
-
-void
-cfs_hash_for_each_safe(struct cfs_hash *hs,
- cfs_hash_for_each_cb_t func, void *data) {
- cfs_hash_for_each_tight(hs, func, data, 1);
-}
-EXPORT_SYMBOL(cfs_hash_for_each_safe);
-
-static int
-cfs_hash_peek(struct cfs_hash *hs, struct cfs_hash_bd *bd,
- struct hlist_node *hnode, void *data)
-{
- *(int *)data = 0;
- return 1; /* return 1 to break the loop */
-}
-
-int
-cfs_hash_is_empty(struct cfs_hash *hs)
-{
- int empty = 1;
-
- cfs_hash_for_each_tight(hs, cfs_hash_peek, &empty, 0);
- return empty;
-}
-EXPORT_SYMBOL(cfs_hash_is_empty);
-
-__u64
-cfs_hash_size_get(struct cfs_hash *hs)
-{
- return cfs_hash_with_counter(hs) ?
- atomic_read(&hs->hs_count) :
- cfs_hash_for_each_tight(hs, NULL, NULL, 0);
-}
-EXPORT_SYMBOL(cfs_hash_size_get);
-
-/*
- * cfs_hash_for_each_relax:
- * Iterate the hash table and call @func on each item without
- * any lock. This function can't guarantee to finish iteration
- * if these features are enabled:
- *
- * a. if rehash_key is enabled, an item can be moved from
- * one bucket to another bucket
- * b. user can remove non-zero-ref item from hash-table,
- * so the item can be removed from hash-table, even worse,
- * it's possible that user changed key and insert to another
- * hash bucket.
- * there's no way for us to finish iteration correctly on previous
- * two cases, so iteration has to be stopped on change.
- */
-static int
-cfs_hash_for_each_relax(struct cfs_hash *hs, cfs_hash_for_each_cb_t func,
- void *data) {
- struct hlist_node *hnode;
- struct hlist_node *tmp;
- struct cfs_hash_bd bd;
- __u32 version;
- int count = 0;
- int stop_on_change;
- int rc;
- int i;
-
- stop_on_change = cfs_hash_with_rehash_key(hs) ||
- !cfs_hash_with_no_itemref(hs) ||
- hs->hs_ops->hs_put_locked == NULL;
- cfs_hash_lock(hs, 0);
- LASSERT(!cfs_hash_is_rehashing(hs));
-
- cfs_hash_for_each_bucket(hs, &bd, i) {
- struct hlist_head *hhead;
-
- cfs_hash_bd_lock(hs, &bd, 0);
- version = cfs_hash_bd_version_get(&bd);
-
- cfs_hash_bd_for_each_hlist(hs, &bd, hhead) {
- for (hnode = hhead->first; hnode != NULL;) {
- cfs_hash_bucket_validate(hs, &bd, hnode);
- cfs_hash_get(hs, hnode);
- cfs_hash_bd_unlock(hs, &bd, 0);
- cfs_hash_unlock(hs, 0);
-
- rc = func(hs, &bd, hnode, data);
- if (stop_on_change)
- cfs_hash_put(hs, hnode);
- cond_resched();
- count++;
-
- cfs_hash_lock(hs, 0);
- cfs_hash_bd_lock(hs, &bd, 0);
- if (!stop_on_change) {
- tmp = hnode->next;
- cfs_hash_put_locked(hs, hnode);
- hnode = tmp;
- } else { /* bucket changed? */
- if (version !=
- cfs_hash_bd_version_get(&bd))
- break;
- /* safe to continue because no change */
- hnode = hnode->next;
- }
- if (rc) /* callback wants to break iteration */
- break;
- }
- if (rc) /* callback wants to break iteration */
- break;
- }
- cfs_hash_bd_unlock(hs, &bd, 0);
- if (rc) /* callback wants to break iteration */
- break;
- }
- cfs_hash_unlock(hs, 0);
-
- return count;
-}
-
-int
-cfs_hash_for_each_nolock(struct cfs_hash *hs,
- cfs_hash_for_each_cb_t func, void *data) {
- if (cfs_hash_with_no_lock(hs) ||
- cfs_hash_with_rehash_key(hs) ||
- !cfs_hash_with_no_itemref(hs))
- return -EOPNOTSUPP;
-
- if (hs->hs_ops->hs_get == NULL ||
- (hs->hs_ops->hs_put == NULL &&
- hs->hs_ops->hs_put_locked == NULL))
- return -EOPNOTSUPP;
-
- cfs_hash_for_each_enter(hs);
- cfs_hash_for_each_relax(hs, func, data);
- cfs_hash_for_each_exit(hs);
-
- return 0;
-}
-EXPORT_SYMBOL(cfs_hash_for_each_nolock);
-
-/**
- * For each hash bucket in the libcfs hash @hs call the passed callback
- * @func until all the hash buckets are empty. The passed callback @func
- * or the previously registered callback hs->hs_put must remove the item
- * from the hash. You may either use the cfs_hash_del() or hlist_del()
- * functions. No rwlocks will be held during the callback @func it is
- * safe to sleep if needed. This function will not terminate until the
- * hash is empty. Note it is still possible to concurrently add new
- * items in to the hash. It is the callers responsibility to ensure
- * the required locking is in place to prevent concurrent insertions.
- */
-int
-cfs_hash_for_each_empty(struct cfs_hash *hs,
- cfs_hash_for_each_cb_t func, void *data) {
- unsigned i = 0;
-
- if (cfs_hash_with_no_lock(hs))
- return -EOPNOTSUPP;
-
- if (hs->hs_ops->hs_get == NULL ||
- (hs->hs_ops->hs_put == NULL &&
- hs->hs_ops->hs_put_locked == NULL))
- return -EOPNOTSUPP;
-
- cfs_hash_for_each_enter(hs);
- while (cfs_hash_for_each_relax(hs, func, data)) {
- CDEBUG(D_INFO, "Try to empty hash: %s, loop: %u\n",
- hs->hs_name, i++);
- }
- cfs_hash_for_each_exit(hs);
- return 0;
-}
-EXPORT_SYMBOL(cfs_hash_for_each_empty);
-
-void
-cfs_hash_hlist_for_each(struct cfs_hash *hs, unsigned hindex,
- cfs_hash_for_each_cb_t func, void *data)
-{
- struct hlist_head *hhead;
- struct hlist_node *hnode;
- struct cfs_hash_bd bd;
-
- cfs_hash_for_each_enter(hs);
- cfs_hash_lock(hs, 0);
- if (hindex >= CFS_HASH_NHLIST(hs))
- goto out;
-
- cfs_hash_bd_index_set(hs, hindex, &bd);
-
- cfs_hash_bd_lock(hs, &bd, 0);
- hhead = cfs_hash_bd_hhead(hs, &bd);
- hlist_for_each(hnode, hhead) {
- if (func(hs, &bd, hnode, data))
- break;
- }
- cfs_hash_bd_unlock(hs, &bd, 0);
- out:
- cfs_hash_unlock(hs, 0);
- cfs_hash_for_each_exit(hs);
-}
-
-EXPORT_SYMBOL(cfs_hash_hlist_for_each);
-
-/*
- * For each item in the libcfs hash @hs which matches the @key call
- * the passed callback @func and pass to it as an argument each hash
- * item and the private @data. During the callback the bucket lock
- * is held so the callback must never sleep.
- */
-void
-cfs_hash_for_each_key(struct cfs_hash *hs, const void *key,
- cfs_hash_for_each_cb_t func, void *data) {
- struct hlist_node *hnode;
- struct cfs_hash_bd bds[2];
- unsigned i;
-
- cfs_hash_lock(hs, 0);
-
- cfs_hash_dual_bd_get_and_lock(hs, key, bds, 0);
-
- cfs_hash_for_each_bd(bds, 2, i) {
- struct hlist_head *hlist = cfs_hash_bd_hhead(hs, &bds[i]);
-
- hlist_for_each(hnode, hlist) {
- cfs_hash_bucket_validate(hs, &bds[i], hnode);
-
- if (cfs_hash_keycmp(hs, key, hnode)) {
- if (func(hs, &bds[i], hnode, data))
- break;
- }
- }
- }
-
- cfs_hash_dual_bd_unlock(hs, bds, 0);
- cfs_hash_unlock(hs, 0);
-}
-EXPORT_SYMBOL(cfs_hash_for_each_key);
-
-/**
- * Rehash the libcfs hash @hs to the given @bits. This can be used
- * to grow the hash size when excessive chaining is detected, or to
- * shrink the hash when it is larger than needed. When the CFS_HASH_REHASH
- * flag is set in @hs the libcfs hash may be dynamically rehashed
- * during addition or removal if the hash's theta value exceeds
- * either the hs->hs_min_theta or hs->max_theta values. By default
- * these values are tuned to keep the chained hash depth small, and
- * this approach assumes a reasonably uniform hashing function. The
- * theta thresholds for @hs are tunable via cfs_hash_set_theta().
- */
-void
-cfs_hash_rehash_cancel_locked(struct cfs_hash *hs)
-{
- int i;
-
- /* need hold cfs_hash_lock(hs, 1) */
- LASSERT(cfs_hash_with_rehash(hs) &&
- !cfs_hash_with_no_lock(hs));
-
- if (!cfs_hash_is_rehashing(hs))
- return;
-
- if (cfs_wi_deschedule(cfs_sched_rehash, &hs->hs_rehash_wi)) {
- hs->hs_rehash_bits = 0;
- return;
- }
-
- for (i = 2; cfs_hash_is_rehashing(hs); i++) {
- cfs_hash_unlock(hs, 1);
- /* raise console warning while waiting too long */
- CDEBUG(IS_PO2(i >> 3) ? D_WARNING : D_INFO,
- "hash %s is still rehashing, rescheded %d\n",
- hs->hs_name, i - 1);
- cond_resched();
- cfs_hash_lock(hs, 1);
- }
-}
-EXPORT_SYMBOL(cfs_hash_rehash_cancel_locked);
-
-void
-cfs_hash_rehash_cancel(struct cfs_hash *hs)
-{
- cfs_hash_lock(hs, 1);
- cfs_hash_rehash_cancel_locked(hs);
- cfs_hash_unlock(hs, 1);
-}
-EXPORT_SYMBOL(cfs_hash_rehash_cancel);
-
-int
-cfs_hash_rehash(struct cfs_hash *hs, int do_rehash)
-{
- int rc;
-
- LASSERT(cfs_hash_with_rehash(hs) && !cfs_hash_with_no_lock(hs));
-
- cfs_hash_lock(hs, 1);
-
- rc = cfs_hash_rehash_bits(hs);
- if (rc <= 0) {
- cfs_hash_unlock(hs, 1);
- return rc;
- }
-
- hs->hs_rehash_bits = rc;
- if (!do_rehash) {
- /* launch and return */
- cfs_wi_schedule(cfs_sched_rehash, &hs->hs_rehash_wi);
- cfs_hash_unlock(hs, 1);
- return 0;
- }
-
- /* rehash right now */
- cfs_hash_unlock(hs, 1);
-
- return cfs_hash_rehash_worker(&hs->hs_rehash_wi);
-}
-EXPORT_SYMBOL(cfs_hash_rehash);
-
-static int
-cfs_hash_rehash_bd(struct cfs_hash *hs, struct cfs_hash_bd *old)
-{
- struct cfs_hash_bd new;
- struct hlist_head *hhead;
- struct hlist_node *hnode;
- struct hlist_node *pos;
- void *key;
- int c = 0;
-
- /* hold cfs_hash_lock(hs, 1), so don't need any bucket lock */
- cfs_hash_bd_for_each_hlist(hs, old, hhead) {
- hlist_for_each_safe(hnode, pos, hhead) {
- key = cfs_hash_key(hs, hnode);
- LASSERT(key != NULL);
- /* Validate hnode is in the correct bucket. */
- cfs_hash_bucket_validate(hs, old, hnode);
- /*
- * Delete from old hash bucket; move to new bucket.
- * ops->hs_key must be defined.
- */
- cfs_hash_bd_from_key(hs, hs->hs_rehash_buckets,
- hs->hs_rehash_bits, key, &new);
- cfs_hash_bd_move_locked(hs, old, &new, hnode);
- c++;
- }
- }
-
- return c;
-}
-
-static int
-cfs_hash_rehash_worker(cfs_workitem_t *wi)
-{
- struct cfs_hash *hs = container_of(wi, struct cfs_hash, hs_rehash_wi);
- struct cfs_hash_bucket **bkts;
- struct cfs_hash_bd bd;
- unsigned int old_size;
- unsigned int new_size;
- int bsize;
- int count = 0;
- int rc = 0;
- int i;
-
- LASSERT (hs != NULL && cfs_hash_with_rehash(hs));
-
- cfs_hash_lock(hs, 0);
- LASSERT(cfs_hash_is_rehashing(hs));
-
- old_size = CFS_HASH_NBKT(hs);
- new_size = CFS_HASH_RH_NBKT(hs);
-
- cfs_hash_unlock(hs, 0);
-
- /*
- * don't need hs::hs_rwlock for hs::hs_buckets,
- * because nobody can change bkt-table except me.
- */
- bkts = cfs_hash_buckets_realloc(hs, hs->hs_buckets,
- old_size, new_size);
- cfs_hash_lock(hs, 1);
- if (bkts == NULL) {
- rc = -ENOMEM;
- goto out;
- }
-
- if (bkts == hs->hs_buckets) {
- bkts = NULL; /* do nothing */
- goto out;
- }
-
- rc = __cfs_hash_theta(hs);
- if ((rc >= hs->hs_min_theta) && (rc <= hs->hs_max_theta)) {
- /* free the new allocated bkt-table */
- old_size = new_size;
- new_size = CFS_HASH_NBKT(hs);
- rc = -EALREADY;
- goto out;
- }
-
- LASSERT(hs->hs_rehash_buckets == NULL);
- hs->hs_rehash_buckets = bkts;
-
- rc = 0;
- cfs_hash_for_each_bucket(hs, &bd, i) {
- if (cfs_hash_is_exiting(hs)) {
- rc = -ESRCH;
- /* someone wants to destroy the hash, abort now */
- if (old_size < new_size) /* OK to free old bkt-table */
- break;
- /* it's shrinking, need free new bkt-table */
- hs->hs_rehash_buckets = NULL;
- old_size = new_size;
- new_size = CFS_HASH_NBKT(hs);
- goto out;
- }
-
- count += cfs_hash_rehash_bd(hs, &bd);
- if (count < CFS_HASH_LOOP_HOG ||
- cfs_hash_is_iterating(hs)) { /* need to finish ASAP */
- continue;
- }
-
- count = 0;
- cfs_hash_unlock(hs, 1);
- cond_resched();
- cfs_hash_lock(hs, 1);
- }
-
- hs->hs_rehash_count++;
-
- bkts = hs->hs_buckets;
- hs->hs_buckets = hs->hs_rehash_buckets;
- hs->hs_rehash_buckets = NULL;
-
- hs->hs_cur_bits = hs->hs_rehash_bits;
- out:
- hs->hs_rehash_bits = 0;
- if (rc == -ESRCH) /* never be scheduled again */
- cfs_wi_exit(cfs_sched_rehash, wi);
- bsize = cfs_hash_bkt_size(hs);
- cfs_hash_unlock(hs, 1);
- /* can't refer to @hs anymore because it could be destroyed */
- if (bkts != NULL)
- cfs_hash_buckets_free(bkts, bsize, new_size, old_size);
- if (rc != 0)
- CDEBUG(D_INFO, "early quit of rehashing: %d\n", rc);
- /* return 1 only if cfs_wi_exit is called */
- return rc == -ESRCH;
-}
-
-/**
- * Rehash the object referenced by @hnode in the libcfs hash @hs. The
- * @old_key must be provided to locate the objects previous location
- * in the hash, and the @new_key will be used to reinsert the object.
- * Use this function instead of a cfs_hash_add() + cfs_hash_del()
- * combo when it is critical that there is no window in time where the
- * object is missing from the hash. When an object is being rehashed
- * the registered cfs_hash_get() and cfs_hash_put() functions will
- * not be called.
- */
-void cfs_hash_rehash_key(struct cfs_hash *hs, const void *old_key,
- void *new_key, struct hlist_node *hnode)
-{
- struct cfs_hash_bd bds[3];
- struct cfs_hash_bd old_bds[2];
- struct cfs_hash_bd new_bd;
-
- LASSERT(!hlist_unhashed(hnode));
-
- cfs_hash_lock(hs, 0);
-
- cfs_hash_dual_bd_get(hs, old_key, old_bds);
- cfs_hash_bd_get(hs, new_key, &new_bd);
-
- bds[0] = old_bds[0];
- bds[1] = old_bds[1];
- bds[2] = new_bd;
-
- /* NB: bds[0] and bds[1] are ordered already */
- cfs_hash_bd_order(&bds[1], &bds[2]);
- cfs_hash_bd_order(&bds[0], &bds[1]);
-
- cfs_hash_multi_bd_lock(hs, bds, 3, 1);
- if (likely(old_bds[1].bd_bucket == NULL)) {
- cfs_hash_bd_move_locked(hs, &old_bds[0], &new_bd, hnode);
- } else {
- cfs_hash_dual_bd_finddel_locked(hs, old_bds, old_key, hnode);
- cfs_hash_bd_add_locked(hs, &new_bd, hnode);
- }
- /* overwrite key inside locks, otherwise may screw up with
- * other operations, i.e: rehash */
- cfs_hash_keycpy(hs, new_key, hnode);
-
- cfs_hash_multi_bd_unlock(hs, bds, 3, 1);
- cfs_hash_unlock(hs, 0);
-}
-EXPORT_SYMBOL(cfs_hash_rehash_key);
-
-void cfs_hash_debug_header(struct seq_file *m)
-{
- seq_printf(m, "%-*s cur min max theta t-min t-max flags rehash count maxdep maxdepb distribution\n",
- CFS_HASH_BIGNAME_LEN, "name");
-}
-EXPORT_SYMBOL(cfs_hash_debug_header);
-
-static struct cfs_hash_bucket **
-cfs_hash_full_bkts(struct cfs_hash *hs)
-{
- /* NB: caller should hold hs->hs_rwlock if REHASH is set */
- if (hs->hs_rehash_buckets == NULL)
- return hs->hs_buckets;
-
- LASSERT(hs->hs_rehash_bits != 0);
- return hs->hs_rehash_bits > hs->hs_cur_bits ?
- hs->hs_rehash_buckets : hs->hs_buckets;
-}
-
-static unsigned int
-cfs_hash_full_nbkt(struct cfs_hash *hs)
-{
- /* NB: caller should hold hs->hs_rwlock if REHASH is set */
- if (hs->hs_rehash_buckets == NULL)
- return CFS_HASH_NBKT(hs);
-
- LASSERT(hs->hs_rehash_bits != 0);
- return hs->hs_rehash_bits > hs->hs_cur_bits ?
- CFS_HASH_RH_NBKT(hs) : CFS_HASH_NBKT(hs);
-}
-
-void cfs_hash_debug_str(struct cfs_hash *hs, struct seq_file *m)
-{
- int dist[8] = { 0, };
- int maxdep = -1;
- int maxdepb = -1;
- int total = 0;
- int theta;
- int i;
-
- cfs_hash_lock(hs, 0);
- theta = __cfs_hash_theta(hs);
-
- seq_printf(m, "%-*s %5d %5d %5d %d.%03d %d.%03d %d.%03d 0x%02x %6d ",
- CFS_HASH_BIGNAME_LEN, hs->hs_name,
- 1 << hs->hs_cur_bits, 1 << hs->hs_min_bits,
- 1 << hs->hs_max_bits,
- __cfs_hash_theta_int(theta), __cfs_hash_theta_frac(theta),
- __cfs_hash_theta_int(hs->hs_min_theta),
- __cfs_hash_theta_frac(hs->hs_min_theta),
- __cfs_hash_theta_int(hs->hs_max_theta),
- __cfs_hash_theta_frac(hs->hs_max_theta),
- hs->hs_flags, hs->hs_rehash_count);
-
- /*
- * The distribution is a summary of the chained hash depth in
- * each of the libcfs hash buckets. Each buckets hsb_count is
- * divided by the hash theta value and used to generate a
- * histogram of the hash distribution. A uniform hash will
- * result in all hash buckets being close to the average thus
- * only the first few entries in the histogram will be non-zero.
- * If you hash function results in a non-uniform hash the will
- * be observable by outlier bucks in the distribution histogram.
- *
- * Uniform hash distribution: 128/128/0/0/0/0/0/0
- * Non-Uniform hash distribution: 128/125/0/0/0/0/2/1
- */
- for (i = 0; i < cfs_hash_full_nbkt(hs); i++) {
- struct cfs_hash_bd bd;
-
- bd.bd_bucket = cfs_hash_full_bkts(hs)[i];
- cfs_hash_bd_lock(hs, &bd, 0);
- if (maxdep < bd.bd_bucket->hsb_depmax) {
- maxdep = bd.bd_bucket->hsb_depmax;
- maxdepb = ffz(~maxdep);
- }
- total += bd.bd_bucket->hsb_count;
- dist[min(fls(bd.bd_bucket->hsb_count / max(theta, 1)), 7)]++;
- cfs_hash_bd_unlock(hs, &bd, 0);
- }
-
- seq_printf(m, "%7d %7d %7d ", total, maxdep, maxdepb);
- for (i = 0; i < 8; i++)
- seq_printf(m, "%d%c", dist[i], (i == 7) ? '\n' : '/');
-
- cfs_hash_unlock(hs, 0);
-}
-EXPORT_SYMBOL(cfs_hash_debug_str);
diff --git a/drivers/staging/lustre/lustre/libcfs/kernel_user_comm.c b/drivers/staging/lustre/lustre/libcfs/kernel_user_comm.c
deleted file mode 100644
index d9b7c6b69db4..000000000000
--- a/drivers/staging/lustre/lustre/libcfs/kernel_user_comm.c
+++ /dev/null
@@ -1,240 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * Author: Nathan Rutman <nathan.rutman@sun.com>
- *
- * Kernel <-> userspace communication routines.
- * Using pipes for all arches.
- */
-
-#define DEBUG_SUBSYSTEM S_CLASS
-#define D_KUC D_OTHER
-
-#include "../../include/linux/libcfs/libcfs.h"
-
-/* This is the kernel side (liblustre as well). */
-
-/**
- * libcfs_kkuc_msg_put - send an message from kernel to userspace
- * @param fp to send the message to
- * @param payload Payload data. First field of payload is always
- * struct kuc_hdr
- */
-int libcfs_kkuc_msg_put(struct file *filp, void *payload)
-{
- struct kuc_hdr *kuch = (struct kuc_hdr *)payload;
- ssize_t count = kuch->kuc_msglen;
- loff_t offset = 0;
- mm_segment_t fs;
- int rc = -ENOSYS;
-
- if (filp == NULL || IS_ERR(filp))
- return -EBADF;
-
- if (kuch->kuc_magic != KUC_MAGIC) {
- CERROR("KernelComm: bad magic %x\n", kuch->kuc_magic);
- return -ENOSYS;
- }
-
- fs = get_fs();
- set_fs(KERNEL_DS);
- while (count > 0) {
- rc = vfs_write(filp, (void __force __user *)payload,
- count, &offset);
- if (rc < 0)
- break;
- count -= rc;
- payload += rc;
- rc = 0;
- }
- set_fs(fs);
-
- if (rc < 0)
- CWARN("message send failed (%d)\n", rc);
- else
- CDEBUG(D_KUC, "Sent message rc=%d, fp=%p\n", rc, filp);
-
- return rc;
-}
-EXPORT_SYMBOL(libcfs_kkuc_msg_put);
-
-/* Broadcast groups are global across all mounted filesystems;
- * i.e. registering for a group on 1 fs will get messages for that
- * group from any fs */
-/** A single group registration has a uid and a file pointer */
-struct kkuc_reg {
- struct list_head kr_chain;
- int kr_uid;
- struct file *kr_fp;
- __u32 kr_data;
-};
-static struct list_head kkuc_groups[KUC_GRP_MAX+1] = {};
-/* Protect message sending against remove and adds */
-static DECLARE_RWSEM(kg_sem);
-
-/** Add a receiver to a broadcast group
- * @param filp pipe to write into
- * @param uid identifier for this receiver
- * @param group group number
- */
-int libcfs_kkuc_group_add(struct file *filp, int uid, int group, __u32 data)
-{
- struct kkuc_reg *reg;
-
- if (group > KUC_GRP_MAX) {
- CDEBUG(D_WARNING, "Kernelcomm: bad group %d\n", group);
- return -EINVAL;
- }
-
- /* fput in group_rem */
- if (filp == NULL)
- return -EBADF;
-
- /* freed in group_rem */
- reg = kmalloc(sizeof(*reg), 0);
- if (reg == NULL)
- return -ENOMEM;
-
- reg->kr_fp = filp;
- reg->kr_uid = uid;
- reg->kr_data = data;
-
- down_write(&kg_sem);
- if (kkuc_groups[group].next == NULL)
- INIT_LIST_HEAD(&kkuc_groups[group]);
- list_add(&reg->kr_chain, &kkuc_groups[group]);
- up_write(&kg_sem);
-
- CDEBUG(D_KUC, "Added uid=%d fp=%p to group %d\n", uid, filp, group);
-
- return 0;
-}
-EXPORT_SYMBOL(libcfs_kkuc_group_add);
-
-int libcfs_kkuc_group_rem(int uid, int group)
-{
- struct kkuc_reg *reg, *next;
-
- if (kkuc_groups[group].next == NULL)
- return 0;
-
- if (uid == 0) {
- /* Broadcast a shutdown message */
- struct kuc_hdr lh;
-
- lh.kuc_magic = KUC_MAGIC;
- lh.kuc_transport = KUC_TRANSPORT_GENERIC;
- lh.kuc_msgtype = KUC_MSG_SHUTDOWN;
- lh.kuc_msglen = sizeof(lh);
- libcfs_kkuc_group_put(group, &lh);
- }
-
- down_write(&kg_sem);
- list_for_each_entry_safe(reg, next, &kkuc_groups[group], kr_chain) {
- if ((uid == 0) || (uid == reg->kr_uid)) {
- list_del(&reg->kr_chain);
- CDEBUG(D_KUC, "Removed uid=%d fp=%p from group %d\n",
- reg->kr_uid, reg->kr_fp, group);
- if (reg->kr_fp != NULL)
- fput(reg->kr_fp);
- kfree(reg);
- }
- }
- up_write(&kg_sem);
-
- return 0;
-}
-EXPORT_SYMBOL(libcfs_kkuc_group_rem);
-
-int libcfs_kkuc_group_put(int group, void *payload)
-{
- struct kkuc_reg *reg;
- int rc = 0;
- int one_success = 0;
-
- down_read(&kg_sem);
- list_for_each_entry(reg, &kkuc_groups[group], kr_chain) {
- if (reg->kr_fp != NULL) {
- rc = libcfs_kkuc_msg_put(reg->kr_fp, payload);
- if (rc == 0)
- one_success = 1;
- else if (rc == -EPIPE) {
- fput(reg->kr_fp);
- reg->kr_fp = NULL;
- }
- }
- }
- up_read(&kg_sem);
-
- /* don't return an error if the message has been delivered
- * at least to one agent */
- if (one_success)
- rc = 0;
-
- return rc;
-}
-EXPORT_SYMBOL(libcfs_kkuc_group_put);
-
-/**
- * Calls a callback function for each link of the given kuc group.
- * @param group the group to call the function on.
- * @param cb_func the function to be called.
- * @param cb_arg iextra argument to be passed to the callback function.
- */
-int libcfs_kkuc_group_foreach(int group, libcfs_kkuc_cb_t cb_func,
- void *cb_arg)
-{
- struct kkuc_reg *reg;
- int rc = 0;
-
- if (group > KUC_GRP_MAX) {
- CDEBUG(D_WARNING, "Kernelcomm: bad group %d\n", group);
- return -EINVAL;
- }
-
- /* no link for this group */
- if (kkuc_groups[group].next == NULL)
- return 0;
-
- down_write(&kg_sem);
- list_for_each_entry(reg, &kkuc_groups[group], kr_chain) {
- if (reg->kr_fp != NULL)
- rc = cb_func(reg->kr_data, cb_arg);
- }
- up_write(&kg_sem);
-
- return rc;
-}
-EXPORT_SYMBOL(libcfs_kkuc_group_foreach);
diff --git a/drivers/staging/lustre/lustre/libcfs/libcfs_cpu.c b/drivers/staging/lustre/lustre/libcfs/libcfs_cpu.c
deleted file mode 100644
index 933525c73da1..000000000000
--- a/drivers/staging/lustre/lustre/libcfs/libcfs_cpu.c
+++ /dev/null
@@ -1,224 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; if not, write to the
- * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
- * Boston, MA 021110-1307, USA
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * Please see comments in libcfs/include/libcfs/libcfs_cpu.h for introduction
- *
- * Author: liang@whamcloud.com
- */
-
-#define DEBUG_SUBSYSTEM S_LNET
-
-#include "../../include/linux/libcfs/libcfs.h"
-
-/** Global CPU partition table */
-struct cfs_cpt_table *cfs_cpt_table __read_mostly;
-EXPORT_SYMBOL(cfs_cpt_table);
-
-#ifndef HAVE_LIBCFS_CPT
-
-#define CFS_CPU_VERSION_MAGIC 0xbabecafe
-
-struct cfs_cpt_table *
-cfs_cpt_table_alloc(unsigned int ncpt)
-{
- struct cfs_cpt_table *cptab;
-
- if (ncpt != 1) {
- CERROR("Can't support cpu partition number %d\n", ncpt);
- return NULL;
- }
-
- LIBCFS_ALLOC(cptab, sizeof(*cptab));
- if (cptab != NULL) {
- cptab->ctb_version = CFS_CPU_VERSION_MAGIC;
- cptab->ctb_nparts = ncpt;
- }
-
- return cptab;
-}
-EXPORT_SYMBOL(cfs_cpt_table_alloc);
-
-void
-cfs_cpt_table_free(struct cfs_cpt_table *cptab)
-{
- LASSERT(cptab->ctb_version == CFS_CPU_VERSION_MAGIC);
-
- LIBCFS_FREE(cptab, sizeof(*cptab));
-}
-EXPORT_SYMBOL(cfs_cpt_table_free);
-
-#ifdef CONFIG_SMP
-int
-cfs_cpt_table_print(struct cfs_cpt_table *cptab, char *buf, int len)
-{
- int rc;
-
- rc = snprintf(buf, len, "%d\t: %d\n", 0, 0);
- len -= rc;
- if (len <= 0)
- return -EFBIG;
-
- return rc;
-}
-EXPORT_SYMBOL(cfs_cpt_table_print);
-#endif /* CONFIG_SMP */
-
-int
-cfs_cpt_number(struct cfs_cpt_table *cptab)
-{
- return 1;
-}
-EXPORT_SYMBOL(cfs_cpt_number);
-
-int
-cfs_cpt_weight(struct cfs_cpt_table *cptab, int cpt)
-{
- return 1;
-}
-EXPORT_SYMBOL(cfs_cpt_weight);
-
-int
-cfs_cpt_online(struct cfs_cpt_table *cptab, int cpt)
-{
- return 1;
-}
-EXPORT_SYMBOL(cfs_cpt_online);
-
-int
-cfs_cpt_set_cpu(struct cfs_cpt_table *cptab, int cpt, int cpu)
-{
- return 1;
-}
-EXPORT_SYMBOL(cfs_cpt_set_cpu);
-
-void
-cfs_cpt_unset_cpu(struct cfs_cpt_table *cptab, int cpt, int cpu)
-{
-}
-EXPORT_SYMBOL(cfs_cpt_unset_cpu);
-
-int
-cfs_cpt_set_cpumask(struct cfs_cpt_table *cptab, int cpt, cpumask_t *mask)
-{
- return 1;
-}
-EXPORT_SYMBOL(cfs_cpt_set_cpumask);
-
-void
-cfs_cpt_unset_cpumask(struct cfs_cpt_table *cptab, int cpt, cpumask_t *mask)
-{
-}
-EXPORT_SYMBOL(cfs_cpt_unset_cpumask);
-
-int
-cfs_cpt_set_node(struct cfs_cpt_table *cptab, int cpt, int node)
-{
- return 1;
-}
-EXPORT_SYMBOL(cfs_cpt_set_node);
-
-void
-cfs_cpt_unset_node(struct cfs_cpt_table *cptab, int cpt, int node)
-{
-}
-EXPORT_SYMBOL(cfs_cpt_unset_node);
-
-int
-cfs_cpt_set_nodemask(struct cfs_cpt_table *cptab, int cpt, nodemask_t *mask)
-{
- return 1;
-}
-EXPORT_SYMBOL(cfs_cpt_set_nodemask);
-
-void
-cfs_cpt_unset_nodemask(struct cfs_cpt_table *cptab, int cpt, nodemask_t *mask)
-{
-}
-EXPORT_SYMBOL(cfs_cpt_unset_nodemask);
-
-void
-cfs_cpt_clear(struct cfs_cpt_table *cptab, int cpt)
-{
-}
-EXPORT_SYMBOL(cfs_cpt_clear);
-
-int
-cfs_cpt_spread_node(struct cfs_cpt_table *cptab, int cpt)
-{
- return 0;
-}
-EXPORT_SYMBOL(cfs_cpt_spread_node);
-
-int
-cfs_cpu_ht_nsiblings(int cpu)
-{
- return 1;
-}
-EXPORT_SYMBOL(cfs_cpu_ht_nsiblings);
-
-int
-cfs_cpt_current(struct cfs_cpt_table *cptab, int remap)
-{
- return 0;
-}
-EXPORT_SYMBOL(cfs_cpt_current);
-
-int
-cfs_cpt_of_cpu(struct cfs_cpt_table *cptab, int cpu)
-{
- return 0;
-}
-EXPORT_SYMBOL(cfs_cpt_of_cpu);
-
-int
-cfs_cpt_bind(struct cfs_cpt_table *cptab, int cpt)
-{
- return 0;
-}
-EXPORT_SYMBOL(cfs_cpt_bind);
-
-void
-cfs_cpu_fini(void)
-{
- if (cfs_cpt_table != NULL) {
- cfs_cpt_table_free(cfs_cpt_table);
- cfs_cpt_table = NULL;
- }
-}
-
-int
-cfs_cpu_init(void)
-{
- cfs_cpt_table = cfs_cpt_table_alloc(1);
-
- return cfs_cpt_table != NULL ? 0 : -1;
-}
-
-#endif /* HAVE_LIBCFS_CPT */
diff --git a/drivers/staging/lustre/lustre/libcfs/libcfs_lock.c b/drivers/staging/lustre/lustre/libcfs/libcfs_lock.c
deleted file mode 100644
index 29308ba85393..000000000000
--- a/drivers/staging/lustre/lustre/libcfs/libcfs_lock.c
+++ /dev/null
@@ -1,189 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; if not, write to the
- * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
- * Boston, MA 021110-1307, USA
- *
- * GPL HEADER END
- */
-/* Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * Author: liang@whamcloud.com
- */
-
-#define DEBUG_SUBSYSTEM S_LNET
-
-#include "../../include/linux/libcfs/libcfs.h"
-
-
-/** destroy cpu-partition lock, see libcfs_private.h for more detail */
-void
-cfs_percpt_lock_free(struct cfs_percpt_lock *pcl)
-{
- LASSERT(pcl->pcl_locks != NULL);
- LASSERT(!pcl->pcl_locked);
-
- cfs_percpt_free(pcl->pcl_locks);
- LIBCFS_FREE(pcl, sizeof(*pcl));
-}
-EXPORT_SYMBOL(cfs_percpt_lock_free);
-
-/**
- * create cpu-partition lock, see libcfs_private.h for more detail.
- *
- * cpu-partition lock is designed for large-scale SMP system, so we need to
- * reduce cacheline conflict as possible as we can, that's the
- * reason we always allocate cacheline-aligned memory block.
- */
-struct cfs_percpt_lock *
-cfs_percpt_lock_alloc(struct cfs_cpt_table *cptab)
-{
- struct cfs_percpt_lock *pcl;
- spinlock_t *lock;
- int i;
-
- /* NB: cptab can be NULL, pcl will be for HW CPUs on that case */
- LIBCFS_ALLOC(pcl, sizeof(*pcl));
- if (!pcl)
- return NULL;
-
- pcl->pcl_cptab = cptab;
- pcl->pcl_locks = cfs_percpt_alloc(cptab, sizeof(*lock));
- if (!pcl->pcl_locks) {
- LIBCFS_FREE(pcl, sizeof(*pcl));
- return NULL;
- }
-
- cfs_percpt_for_each(lock, i, pcl->pcl_locks)
- spin_lock_init(lock);
-
- return pcl;
-}
-EXPORT_SYMBOL(cfs_percpt_lock_alloc);
-
-/**
- * lock a CPU partition
- *
- * \a index != CFS_PERCPT_LOCK_EX
- * hold private lock indexed by \a index
- *
- * \a index == CFS_PERCPT_LOCK_EX
- * exclusively lock @pcl and nobody can take private lock
- */
-void
-cfs_percpt_lock(struct cfs_percpt_lock *pcl, int index)
-{
- int ncpt = cfs_cpt_number(pcl->pcl_cptab);
- int i;
-
- LASSERT(index >= CFS_PERCPT_LOCK_EX && index < ncpt);
-
- if (ncpt == 1) {
- index = 0;
- } else { /* serialize with exclusive lock */
- while (pcl->pcl_locked)
- cpu_relax();
- }
-
- if (likely(index != CFS_PERCPT_LOCK_EX)) {
- spin_lock(pcl->pcl_locks[index]);
- return;
- }
-
- /* exclusive lock request */
- for (i = 0; i < ncpt; i++) {
- spin_lock(pcl->pcl_locks[i]);
- if (i == 0) {
- LASSERT(!pcl->pcl_locked);
- /* nobody should take private lock after this
- * so I wouldn't starve for too long time */
- pcl->pcl_locked = 1;
- }
- }
-}
-EXPORT_SYMBOL(cfs_percpt_lock);
-
-/** unlock a CPU partition */
-void
-cfs_percpt_unlock(struct cfs_percpt_lock *pcl, int index)
-{
- int ncpt = cfs_cpt_number(pcl->pcl_cptab);
- int i;
-
- index = ncpt == 1 ? 0 : index;
-
- if (likely(index != CFS_PERCPT_LOCK_EX)) {
- spin_unlock(pcl->pcl_locks[index]);
- return;
- }
-
- for (i = ncpt - 1; i >= 0; i--) {
- if (i == 0) {
- LASSERT(pcl->pcl_locked);
- pcl->pcl_locked = 0;
- }
- spin_unlock(pcl->pcl_locks[i]);
- }
-}
-EXPORT_SYMBOL(cfs_percpt_unlock);
-
-
-/** free cpu-partition refcount */
-void
-cfs_percpt_atomic_free(atomic_t **refs)
-{
- cfs_percpt_free(refs);
-}
-EXPORT_SYMBOL(cfs_percpt_atomic_free);
-
-/** allocate cpu-partition refcount with initial value @init_val */
-atomic_t **
-cfs_percpt_atomic_alloc(struct cfs_cpt_table *cptab, int init_val)
-{
- atomic_t **refs;
- atomic_t *ref;
- int i;
-
- refs = cfs_percpt_alloc(cptab, sizeof(*ref));
- if (!refs)
- return NULL;
-
- cfs_percpt_for_each(ref, i, refs)
- atomic_set(ref, init_val);
- return refs;
-}
-EXPORT_SYMBOL(cfs_percpt_atomic_alloc);
-
-/** return sum of cpu-partition refs */
-int
-cfs_percpt_atomic_summary(atomic_t **refs)
-{
- atomic_t *ref;
- int i;
- int val = 0;
-
- cfs_percpt_for_each(ref, i, refs)
- val += atomic_read(ref);
-
- return val;
-}
-EXPORT_SYMBOL(cfs_percpt_atomic_summary);
diff --git a/drivers/staging/lustre/lustre/libcfs/libcfs_mem.c b/drivers/staging/lustre/lustre/libcfs/libcfs_mem.c
deleted file mode 100644
index f4e08daba4d9..000000000000
--- a/drivers/staging/lustre/lustre/libcfs/libcfs_mem.c
+++ /dev/null
@@ -1,202 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; if not, write to the
- * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
- * Boston, MA 021110-1307, USA
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * Author: liang@whamcloud.com
- */
-
-#define DEBUG_SUBSYSTEM S_LNET
-
-#include "../../include/linux/libcfs/libcfs.h"
-
-struct cfs_var_array {
- unsigned int va_count; /* # of buffers */
- unsigned int va_size; /* size of each var */
- struct cfs_cpt_table *va_cptab; /* cpu partition table */
- void *va_ptrs[0]; /* buffer addresses */
-};
-
-/*
- * free per-cpu data, see more detail in cfs_percpt_free
- */
-void
-cfs_percpt_free(void *vars)
-{
- struct cfs_var_array *arr;
- int i;
-
- arr = container_of(vars, struct cfs_var_array, va_ptrs[0]);
-
- for (i = 0; i < arr->va_count; i++) {
- if (arr->va_ptrs[i] != NULL)
- LIBCFS_FREE(arr->va_ptrs[i], arr->va_size);
- }
-
- LIBCFS_FREE(arr, offsetof(struct cfs_var_array,
- va_ptrs[arr->va_count]));
-}
-EXPORT_SYMBOL(cfs_percpt_free);
-
-/*
- * allocate per cpu-partition variables, returned value is an array of pointers,
- * variable can be indexed by CPU partition ID, i.e:
- *
- * arr = cfs_percpt_alloc(cfs_cpu_pt, size);
- * then caller can access memory block for CPU 0 by arr[0],
- * memory block for CPU 1 by arr[1]...
- * memory block for CPU N by arr[N]...
- *
- * cacheline aligned.
- */
-void *
-cfs_percpt_alloc(struct cfs_cpt_table *cptab, unsigned int size)
-{
- struct cfs_var_array *arr;
- int count;
- int i;
-
- count = cfs_cpt_number(cptab);
-
- LIBCFS_ALLOC(arr, offsetof(struct cfs_var_array, va_ptrs[count]));
- if (!arr)
- return NULL;
-
- arr->va_size = size = L1_CACHE_ALIGN(size);
- arr->va_count = count;
- arr->va_cptab = cptab;
-
- for (i = 0; i < count; i++) {
- LIBCFS_CPT_ALLOC(arr->va_ptrs[i], cptab, i, size);
- if (!arr->va_ptrs[i]) {
- cfs_percpt_free((void *)&arr->va_ptrs[0]);
- return NULL;
- }
- }
-
- return (void *)&arr->va_ptrs[0];
-}
-EXPORT_SYMBOL(cfs_percpt_alloc);
-
-/*
- * return number of CPUs (or number of elements in per-cpu data)
- * according to cptab of @vars
- */
-int
-cfs_percpt_number(void *vars)
-{
- struct cfs_var_array *arr;
-
- arr = container_of(vars, struct cfs_var_array, va_ptrs[0]);
-
- return arr->va_count;
-}
-EXPORT_SYMBOL(cfs_percpt_number);
-
-/*
- * return memory block shadowed from current CPU
- */
-void *
-cfs_percpt_current(void *vars)
-{
- struct cfs_var_array *arr;
- int cpt;
-
- arr = container_of(vars, struct cfs_var_array, va_ptrs[0]);
- cpt = cfs_cpt_current(arr->va_cptab, 0);
- if (cpt < 0)
- return NULL;
-
- return arr->va_ptrs[cpt];
-}
-EXPORT_SYMBOL(cfs_percpt_current);
-
-void *
-cfs_percpt_index(void *vars, int idx)
-{
- struct cfs_var_array *arr;
-
- arr = container_of(vars, struct cfs_var_array, va_ptrs[0]);
-
- LASSERT(idx >= 0 && idx < arr->va_count);
- return arr->va_ptrs[idx];
-}
-EXPORT_SYMBOL(cfs_percpt_index);
-
-/*
- * free variable array, see more detail in cfs_array_alloc
- */
-void
-cfs_array_free(void *vars)
-{
- struct cfs_var_array *arr;
- int i;
-
- arr = container_of(vars, struct cfs_var_array, va_ptrs[0]);
-
- for (i = 0; i < arr->va_count; i++) {
- if (!arr->va_ptrs[i])
- continue;
-
- LIBCFS_FREE(arr->va_ptrs[i], arr->va_size);
- }
- LIBCFS_FREE(arr, offsetof(struct cfs_var_array,
- va_ptrs[arr->va_count]));
-}
-EXPORT_SYMBOL(cfs_array_free);
-
-/*
- * allocate a variable array, returned value is an array of pointers.
- * Caller can specify length of array by @count, @size is size of each
- * memory block in array.
- */
-void *
-cfs_array_alloc(int count, unsigned int size)
-{
- struct cfs_var_array *arr;
- int i;
-
- LIBCFS_ALLOC(arr, offsetof(struct cfs_var_array, va_ptrs[count]));
- if (!arr)
- return NULL;
-
- arr->va_count = count;
- arr->va_size = size;
-
- for (i = 0; i < count; i++) {
- LIBCFS_ALLOC(arr->va_ptrs[i], size);
-
- if (!arr->va_ptrs[i]) {
- cfs_array_free((void *)&arr->va_ptrs[0]);
- return NULL;
- }
- }
-
- return (void *)&arr->va_ptrs[0];
-}
-EXPORT_SYMBOL(cfs_array_alloc);
diff --git a/drivers/staging/lustre/lustre/libcfs/libcfs_string.c b/drivers/staging/lustre/lustre/libcfs/libcfs_string.c
deleted file mode 100644
index efe5e667a2e5..000000000000
--- a/drivers/staging/lustre/lustre/libcfs/libcfs_string.c
+++ /dev/null
@@ -1,562 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * String manipulation functions.
- *
- * libcfs/libcfs/libcfs_string.c
- *
- * Author: Nathan Rutman <nathan.rutman@sun.com>
- */
-
-#include "../../include/linux/libcfs/libcfs.h"
-
-/* Convert a text string to a bitmask */
-int cfs_str2mask(const char *str, const char *(*bit2str)(int bit),
- int *oldmask, int minmask, int allmask)
-{
- const char *debugstr;
- char op = '\0';
- int newmask = minmask, i, len, found = 0;
-
- /* <str> must be a list of tokens separated by whitespace
- * and optionally an operator ('+' or '-'). If an operator
- * appears first in <str>, '*oldmask' is used as the starting point
- * (relative), otherwise minmask is used (absolute). An operator
- * applies to all following tokens up to the next operator. */
- while (*str != '\0') {
- while (isspace(*str))
- str++;
- if (*str == '\0')
- break;
- if (*str == '+' || *str == '-') {
- op = *str++;
- if (!found)
- /* only if first token is relative */
- newmask = *oldmask;
- while (isspace(*str))
- str++;
- if (*str == '\0') /* trailing op */
- return -EINVAL;
- }
-
- /* find token length */
- len = 0;
- while (str[len] != '\0' && !isspace(str[len]) &&
- str[len] != '+' && str[len] != '-')
- len++;
-
- /* match token */
- found = 0;
- for (i = 0; i < 32; i++) {
- debugstr = bit2str(i);
- if (debugstr != NULL &&
- strlen(debugstr) == len &&
- strncasecmp(str, debugstr, len) == 0) {
- if (op == '-')
- newmask &= ~(1 << i);
- else
- newmask |= (1 << i);
- found = 1;
- break;
- }
- }
- if (!found && len == 3 &&
- (strncasecmp(str, "ALL", len) == 0)) {
- if (op == '-')
- newmask = minmask;
- else
- newmask = allmask;
- found = 1;
- }
- if (!found) {
- CWARN("unknown mask '%.*s'.\n"
- "mask usage: [+|-]<all|type> ...\n", len, str);
- return -EINVAL;
- }
- str += len;
- }
-
- *oldmask = newmask;
- return 0;
-}
-
-/* get the first string out of @str */
-char *cfs_firststr(char *str, size_t size)
-{
- size_t i = 0;
- char *end;
-
- /* trim leading spaces */
- while (i < size && *str && isspace(*str)) {
- ++i;
- ++str;
- }
-
- /* string with all spaces */
- if (*str == '\0')
- goto out;
-
- end = str;
- while (i < size && *end != '\0' && !isspace(*end)) {
- ++i;
- ++end;
- }
-
- *end = '\0';
-out:
- return str;
-}
-EXPORT_SYMBOL(cfs_firststr);
-
-char *
-cfs_trimwhite(char *str)
-{
- char *end;
-
- while (isspace(*str))
- str++;
-
- end = str + strlen(str);
- while (end > str) {
- if (!isspace(end[-1]))
- break;
- end--;
- }
-
- *end = 0;
- return str;
-}
-EXPORT_SYMBOL(cfs_trimwhite);
-
-/**
- * Extracts tokens from strings.
- *
- * Looks for \a delim in string \a next, sets \a res to point to
- * substring before the delimiter, sets \a next right after the found
- * delimiter.
- *
- * \retval 1 if \a res points to a string of non-whitespace characters
- * \retval 0 otherwise
- */
-int
-cfs_gettok(struct cfs_lstr *next, char delim, struct cfs_lstr *res)
-{
- char *end;
-
- if (next->ls_str == NULL)
- return 0;
-
- /* skip leading white spaces */
- while (next->ls_len) {
- if (!isspace(*next->ls_str))
- break;
- next->ls_str++;
- next->ls_len--;
- }
-
- if (next->ls_len == 0) /* whitespaces only */
- return 0;
-
- if (*next->ls_str == delim) {
- /* first non-writespace is the delimiter */
- return 0;
- }
-
- res->ls_str = next->ls_str;
- end = memchr(next->ls_str, delim, next->ls_len);
- if (end == NULL) {
- /* there is no the delimeter in the string */
- end = next->ls_str + next->ls_len;
- next->ls_str = NULL;
- } else {
- next->ls_str = end + 1;
- next->ls_len -= (end - res->ls_str + 1);
- }
-
- /* skip ending whitespaces */
- while (--end != res->ls_str) {
- if (!isspace(*end))
- break;
- }
-
- res->ls_len = end - res->ls_str + 1;
- return 1;
-}
-
-/**
- * Converts string to integer.
- *
- * Accepts decimal and hexadecimal number recordings.
- *
- * \retval 1 if first \a nob chars of \a str convert to decimal or
- * hexadecimal integer in the range [\a min, \a max]
- * \retval 0 otherwise
- */
-int
-cfs_str2num_check(char *str, int nob, unsigned *num,
- unsigned min, unsigned max)
-{
- char *endp;
-
- str = cfs_trimwhite(str);
- *num = simple_strtoul(str, &endp, 0);
- if (endp == str)
- return 0;
-
- for (; endp < str + nob; endp++) {
- if (!isspace(*endp))
- return 0;
- }
-
- return (*num >= min && *num <= max);
-}
-
-/**
- * Parses \<range_expr\> token of the syntax. If \a bracketed is false,
- * \a src should only have a single token which can be \<number\> or \*
- *
- * \retval pointer to allocated range_expr and initialized
- * range_expr::re_lo, range_expr::re_hi and range_expr:re_stride if \a
- `* src parses to
- * \<number\> |
- * \<number\> '-' \<number\> |
- * \<number\> '-' \<number\> '/' \<number\>
- * \retval 0 will be returned if it can be parsed, otherwise -EINVAL or
- * -ENOMEM will be returned.
- */
-static int
-cfs_range_expr_parse(struct cfs_lstr *src, unsigned min, unsigned max,
- int bracketed, struct cfs_range_expr **expr)
-{
- struct cfs_range_expr *re;
- struct cfs_lstr tok;
-
- LIBCFS_ALLOC(re, sizeof(*re));
- if (re == NULL)
- return -ENOMEM;
-
- if (src->ls_len == 1 && src->ls_str[0] == '*') {
- re->re_lo = min;
- re->re_hi = max;
- re->re_stride = 1;
- goto out;
- }
-
- if (cfs_str2num_check(src->ls_str, src->ls_len,
- &re->re_lo, min, max)) {
- /* <number> is parsed */
- re->re_hi = re->re_lo;
- re->re_stride = 1;
- goto out;
- }
-
- if (!bracketed || !cfs_gettok(src, '-', &tok))
- goto failed;
-
- if (!cfs_str2num_check(tok.ls_str, tok.ls_len,
- &re->re_lo, min, max))
- goto failed;
-
- /* <number> - */
- if (cfs_str2num_check(src->ls_str, src->ls_len,
- &re->re_hi, min, max)) {
- /* <number> - <number> is parsed */
- re->re_stride = 1;
- goto out;
- }
-
- /* go to check <number> '-' <number> '/' <number> */
- if (cfs_gettok(src, '/', &tok)) {
- if (!cfs_str2num_check(tok.ls_str, tok.ls_len,
- &re->re_hi, min, max))
- goto failed;
-
- /* <number> - <number> / ... */
- if (cfs_str2num_check(src->ls_str, src->ls_len,
- &re->re_stride, min, max)) {
- /* <number> - <number> / <number> is parsed */
- goto out;
- }
- }
-
- out:
- *expr = re;
- return 0;
-
- failed:
- LIBCFS_FREE(re, sizeof(*re));
- return -EINVAL;
-}
-
-/**
- * Matches value (\a value) against ranges expression list \a expr_list.
- *
- * \retval 1 if \a value matches
- * \retval 0 otherwise
- */
-int
-cfs_expr_list_match(__u32 value, struct cfs_expr_list *expr_list)
-{
- struct cfs_range_expr *expr;
-
- list_for_each_entry(expr, &expr_list->el_exprs, re_link) {
- if (value >= expr->re_lo && value <= expr->re_hi &&
- ((value - expr->re_lo) % expr->re_stride) == 0)
- return 1;
- }
-
- return 0;
-}
-
-/**
- * Convert express list (\a expr_list) to an array of all matched values
- *
- * \retval N N is total number of all matched values
- * \retval 0 if expression list is empty
- * \retval < 0 for failure
- */
-int
-cfs_expr_list_values(struct cfs_expr_list *expr_list, int max, __u32 **valpp)
-{
- struct cfs_range_expr *expr;
- __u32 *val;
- int count = 0;
- int i;
-
- list_for_each_entry(expr, &expr_list->el_exprs, re_link) {
- for (i = expr->re_lo; i <= expr->re_hi; i++) {
- if (((i - expr->re_lo) % expr->re_stride) == 0)
- count++;
- }
- }
-
- if (count == 0) /* empty expression list */
- return 0;
-
- if (count > max) {
- CERROR("Number of values %d exceeds max allowed %d\n",
- max, count);
- return -EINVAL;
- }
-
- LIBCFS_ALLOC(val, sizeof(val[0]) * count);
- if (val == NULL)
- return -ENOMEM;
-
- count = 0;
- list_for_each_entry(expr, &expr_list->el_exprs, re_link) {
- for (i = expr->re_lo; i <= expr->re_hi; i++) {
- if (((i - expr->re_lo) % expr->re_stride) == 0)
- val[count++] = i;
- }
- }
-
- *valpp = val;
- return count;
-}
-EXPORT_SYMBOL(cfs_expr_list_values);
-
-/**
- * Frees cfs_range_expr structures of \a expr_list.
- *
- * \retval none
- */
-void
-cfs_expr_list_free(struct cfs_expr_list *expr_list)
-{
- while (!list_empty(&expr_list->el_exprs)) {
- struct cfs_range_expr *expr;
-
- expr = list_entry(expr_list->el_exprs.next,
- struct cfs_range_expr, re_link);
- list_del(&expr->re_link);
- LIBCFS_FREE(expr, sizeof(*expr));
- }
-
- LIBCFS_FREE(expr_list, sizeof(*expr_list));
-}
-EXPORT_SYMBOL(cfs_expr_list_free);
-
-/**
- * Parses \<cfs_expr_list\> token of the syntax.
- *
- * \retval 1 if \a str parses to \<number\> | \<expr_list\>
- * \retval 0 otherwise
- */
-int
-cfs_expr_list_parse(char *str, int len, unsigned min, unsigned max,
- struct cfs_expr_list **elpp)
-{
- struct cfs_expr_list *expr_list;
- struct cfs_range_expr *expr;
- struct cfs_lstr src;
- int rc;
-
- LIBCFS_ALLOC(expr_list, sizeof(*expr_list));
- if (expr_list == NULL)
- return -ENOMEM;
-
- src.ls_str = str;
- src.ls_len = len;
-
- INIT_LIST_HEAD(&expr_list->el_exprs);
-
- if (src.ls_str[0] == '[' &&
- src.ls_str[src.ls_len - 1] == ']') {
- src.ls_str++;
- src.ls_len -= 2;
-
- rc = -EINVAL;
- while (src.ls_str != NULL) {
- struct cfs_lstr tok;
-
- if (!cfs_gettok(&src, ',', &tok)) {
- rc = -EINVAL;
- break;
- }
-
- rc = cfs_range_expr_parse(&tok, min, max, 1, &expr);
- if (rc != 0)
- break;
-
- list_add_tail(&expr->re_link,
- &expr_list->el_exprs);
- }
- } else {
- rc = cfs_range_expr_parse(&src, min, max, 0, &expr);
- if (rc == 0) {
- list_add_tail(&expr->re_link,
- &expr_list->el_exprs);
- }
- }
-
- if (rc != 0)
- cfs_expr_list_free(expr_list);
- else
- *elpp = expr_list;
-
- return rc;
-}
-EXPORT_SYMBOL(cfs_expr_list_parse);
-
-/**
- * Frees cfs_expr_list structures of \a list.
- *
- * For each struct cfs_expr_list structure found on \a list it frees
- * range_expr list attached to it and frees the cfs_expr_list itself.
- *
- * \retval none
- */
-void
-cfs_expr_list_free_list(struct list_head *list)
-{
- struct cfs_expr_list *el;
-
- while (!list_empty(list)) {
- el = list_entry(list->next,
- struct cfs_expr_list, el_link);
- list_del(&el->el_link);
- cfs_expr_list_free(el);
- }
-}
-
-int
-cfs_ip_addr_parse(char *str, int len, struct list_head *list)
-{
- struct cfs_expr_list *el;
- struct cfs_lstr src;
- int rc;
- int i;
-
- src.ls_str = str;
- src.ls_len = len;
- i = 0;
-
- while (src.ls_str != NULL) {
- struct cfs_lstr res;
-
- if (!cfs_gettok(&src, '.', &res)) {
- rc = -EINVAL;
- goto out;
- }
-
- rc = cfs_expr_list_parse(res.ls_str, res.ls_len, 0, 255, &el);
- if (rc != 0)
- goto out;
-
- list_add_tail(&el->el_link, list);
- i++;
- }
-
- if (i == 4)
- return 0;
-
- rc = -EINVAL;
- out:
- cfs_expr_list_free_list(list);
-
- return rc;
-}
-EXPORT_SYMBOL(cfs_ip_addr_parse);
-
-/**
- * Matches address (\a addr) against address set encoded in \a list.
- *
- * \retval 1 if \a addr matches
- * \retval 0 otherwise
- */
-int
-cfs_ip_addr_match(__u32 addr, struct list_head *list)
-{
- struct cfs_expr_list *el;
- int i = 0;
-
- list_for_each_entry_reverse(el, list, el_link) {
- if (!cfs_expr_list_match(addr & 0xff, el))
- return 0;
- addr >>= 8;
- i++;
- }
-
- return i == 4;
-}
-EXPORT_SYMBOL(cfs_ip_addr_match);
-
-void
-cfs_ip_addr_free(struct list_head *list)
-{
- cfs_expr_list_free_list(list);
-}
-EXPORT_SYMBOL(cfs_ip_addr_free);
diff --git a/drivers/staging/lustre/lustre/libcfs/linux/linux-cpu.c b/drivers/staging/lustre/lustre/libcfs/linux/linux-cpu.c
deleted file mode 100644
index 209736454d06..000000000000
--- a/drivers/staging/lustre/lustre/libcfs/linux/linux-cpu.c
+++ /dev/null
@@ -1,1057 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; if not, write to the
- * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
- * Boston, MA 021110-1307, USA
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * Author: liang@whamcloud.com
- */
-
-#define DEBUG_SUBSYSTEM S_LNET
-
-#include <linux/cpu.h>
-#include <linux/sched.h>
-#include "../../../include/linux/libcfs/libcfs.h"
-
-#ifdef CONFIG_SMP
-
-/**
- * modparam for setting number of partitions
- *
- * 0 : estimate best value based on cores or NUMA nodes
- * 1 : disable multiple partitions
- * >1 : specify number of partitions
- */
-static int cpu_npartitions;
-module_param(cpu_npartitions, int, 0444);
-MODULE_PARM_DESC(cpu_npartitions, "# of CPU partitions");
-
-/**
- * modparam for setting CPU partitions patterns:
- *
- * i.e: "0[0,1,2,3] 1[4,5,6,7]", number before bracket is CPU partition ID,
- * number in bracket is processor ID (core or HT)
- *
- * i.e: "N 0[0,1] 1[2,3]" the first character 'N' means numbers in bracket
- * are NUMA node ID, number before bracket is CPU partition ID.
- *
- * NB: If user specified cpu_pattern, cpu_npartitions will be ignored
- */
-static char *cpu_pattern = "";
-module_param(cpu_pattern, charp, 0444);
-MODULE_PARM_DESC(cpu_pattern, "CPU partitions pattern");
-
-struct cfs_cpt_data {
- /* serialize hotplug etc */
- spinlock_t cpt_lock;
- /* reserved for hotplug */
- unsigned long cpt_version;
- /* mutex to protect cpt_cpumask */
- struct mutex cpt_mutex;
- /* scratch buffer for set/unset_node */
- cpumask_t *cpt_cpumask;
-};
-
-static struct cfs_cpt_data cpt_data;
-
-static void cfs_cpu_core_siblings(int cpu, cpumask_t *mask)
-{
- /* return cpumask of cores in the same socket */
- cpumask_copy(mask, topology_core_cpumask(cpu));
-}
-
-/* return cpumask of HTs in the same core */
-static void cfs_cpu_ht_siblings(int cpu, cpumask_t *mask)
-{
- cpumask_copy(mask, topology_sibling_cpumask(cpu));
-}
-
-static void cfs_node_to_cpumask(int node, cpumask_t *mask)
-{
- cpumask_copy(mask, cpumask_of_node(node));
-}
-
-void
-cfs_cpt_table_free(struct cfs_cpt_table *cptab)
-{
- int i;
-
- if (cptab->ctb_cpu2cpt != NULL) {
- LIBCFS_FREE(cptab->ctb_cpu2cpt,
- num_possible_cpus() *
- sizeof(cptab->ctb_cpu2cpt[0]));
- }
-
- for (i = 0; cptab->ctb_parts != NULL && i < cptab->ctb_nparts; i++) {
- struct cfs_cpu_partition *part = &cptab->ctb_parts[i];
-
- if (part->cpt_nodemask != NULL) {
- LIBCFS_FREE(part->cpt_nodemask,
- sizeof(*part->cpt_nodemask));
- }
-
- if (part->cpt_cpumask != NULL)
- LIBCFS_FREE(part->cpt_cpumask, cpumask_size());
- }
-
- if (cptab->ctb_parts != NULL) {
- LIBCFS_FREE(cptab->ctb_parts,
- cptab->ctb_nparts * sizeof(cptab->ctb_parts[0]));
- }
-
- if (cptab->ctb_nodemask != NULL)
- LIBCFS_FREE(cptab->ctb_nodemask, sizeof(*cptab->ctb_nodemask));
- if (cptab->ctb_cpumask != NULL)
- LIBCFS_FREE(cptab->ctb_cpumask, cpumask_size());
-
- LIBCFS_FREE(cptab, sizeof(*cptab));
-}
-EXPORT_SYMBOL(cfs_cpt_table_free);
-
-struct cfs_cpt_table *
-cfs_cpt_table_alloc(unsigned int ncpt)
-{
- struct cfs_cpt_table *cptab;
- int i;
-
- LIBCFS_ALLOC(cptab, sizeof(*cptab));
- if (cptab == NULL)
- return NULL;
-
- cptab->ctb_nparts = ncpt;
-
- LIBCFS_ALLOC(cptab->ctb_cpumask, cpumask_size());
- LIBCFS_ALLOC(cptab->ctb_nodemask, sizeof(*cptab->ctb_nodemask));
-
- if (cptab->ctb_cpumask == NULL || cptab->ctb_nodemask == NULL)
- goto failed;
-
- LIBCFS_ALLOC(cptab->ctb_cpu2cpt,
- num_possible_cpus() * sizeof(cptab->ctb_cpu2cpt[0]));
- if (cptab->ctb_cpu2cpt == NULL)
- goto failed;
-
- memset(cptab->ctb_cpu2cpt, -1,
- num_possible_cpus() * sizeof(cptab->ctb_cpu2cpt[0]));
-
- LIBCFS_ALLOC(cptab->ctb_parts, ncpt * sizeof(cptab->ctb_parts[0]));
- if (cptab->ctb_parts == NULL)
- goto failed;
-
- for (i = 0; i < ncpt; i++) {
- struct cfs_cpu_partition *part = &cptab->ctb_parts[i];
-
- LIBCFS_ALLOC(part->cpt_cpumask, cpumask_size());
- LIBCFS_ALLOC(part->cpt_nodemask, sizeof(*part->cpt_nodemask));
- if (part->cpt_cpumask == NULL || part->cpt_nodemask == NULL)
- goto failed;
- }
-
- spin_lock(&cpt_data.cpt_lock);
- /* Reserved for hotplug */
- cptab->ctb_version = cpt_data.cpt_version;
- spin_unlock(&cpt_data.cpt_lock);
-
- return cptab;
-
- failed:
- cfs_cpt_table_free(cptab);
- return NULL;
-}
-EXPORT_SYMBOL(cfs_cpt_table_alloc);
-
-int
-cfs_cpt_table_print(struct cfs_cpt_table *cptab, char *buf, int len)
-{
- char *tmp = buf;
- int rc = 0;
- int i;
- int j;
-
- for (i = 0; i < cptab->ctb_nparts; i++) {
- if (len > 0) {
- rc = snprintf(tmp, len, "%d\t: ", i);
- len -= rc;
- }
-
- if (len <= 0) {
- rc = -EFBIG;
- goto out;
- }
-
- tmp += rc;
- for_each_cpu(j, cptab->ctb_parts[i].cpt_cpumask) {
- rc = snprintf(tmp, len, "%d ", j);
- len -= rc;
- if (len <= 0) {
- rc = -EFBIG;
- goto out;
- }
- tmp += rc;
- }
-
- *tmp = '\n';
- tmp++;
- len--;
- }
-
- out:
- if (rc < 0)
- return rc;
-
- return tmp - buf;
-}
-EXPORT_SYMBOL(cfs_cpt_table_print);
-
-int
-cfs_cpt_number(struct cfs_cpt_table *cptab)
-{
- return cptab->ctb_nparts;
-}
-EXPORT_SYMBOL(cfs_cpt_number);
-
-int
-cfs_cpt_weight(struct cfs_cpt_table *cptab, int cpt)
-{
- LASSERT(cpt == CFS_CPT_ANY || (cpt >= 0 && cpt < cptab->ctb_nparts));
-
- return cpt == CFS_CPT_ANY ?
- cpumask_weight(cptab->ctb_cpumask) :
- cpumask_weight(cptab->ctb_parts[cpt].cpt_cpumask);
-}
-EXPORT_SYMBOL(cfs_cpt_weight);
-
-int
-cfs_cpt_online(struct cfs_cpt_table *cptab, int cpt)
-{
- LASSERT(cpt == CFS_CPT_ANY || (cpt >= 0 && cpt < cptab->ctb_nparts));
-
- return cpt == CFS_CPT_ANY ?
- cpumask_any_and(cptab->ctb_cpumask,
- cpu_online_mask) < nr_cpu_ids :
- cpumask_any_and(cptab->ctb_parts[cpt].cpt_cpumask,
- cpu_online_mask) < nr_cpu_ids;
-}
-EXPORT_SYMBOL(cfs_cpt_online);
-
-cpumask_t *
-cfs_cpt_cpumask(struct cfs_cpt_table *cptab, int cpt)
-{
- LASSERT(cpt == CFS_CPT_ANY || (cpt >= 0 && cpt < cptab->ctb_nparts));
-
- return cpt == CFS_CPT_ANY ?
- cptab->ctb_cpumask : cptab->ctb_parts[cpt].cpt_cpumask;
-}
-EXPORT_SYMBOL(cfs_cpt_cpumask);
-
-nodemask_t *
-cfs_cpt_nodemask(struct cfs_cpt_table *cptab, int cpt)
-{
- LASSERT(cpt == CFS_CPT_ANY || (cpt >= 0 && cpt < cptab->ctb_nparts));
-
- return cpt == CFS_CPT_ANY ?
- cptab->ctb_nodemask : cptab->ctb_parts[cpt].cpt_nodemask;
-}
-EXPORT_SYMBOL(cfs_cpt_nodemask);
-
-int
-cfs_cpt_set_cpu(struct cfs_cpt_table *cptab, int cpt, int cpu)
-{
- int node;
-
- LASSERT(cpt >= 0 && cpt < cptab->ctb_nparts);
-
- if (cpu < 0 || cpu >= nr_cpu_ids || !cpu_online(cpu)) {
- CDEBUG(D_INFO, "CPU %d is invalid or it's offline\n", cpu);
- return 0;
- }
-
- if (cptab->ctb_cpu2cpt[cpu] != -1) {
- CDEBUG(D_INFO, "CPU %d is already in partition %d\n",
- cpu, cptab->ctb_cpu2cpt[cpu]);
- return 0;
- }
-
- cptab->ctb_cpu2cpt[cpu] = cpt;
-
- LASSERT(!cpumask_test_cpu(cpu, cptab->ctb_cpumask));
- LASSERT(!cpumask_test_cpu(cpu, cptab->ctb_parts[cpt].cpt_cpumask));
-
- cpumask_set_cpu(cpu, cptab->ctb_cpumask);
- cpumask_set_cpu(cpu, cptab->ctb_parts[cpt].cpt_cpumask);
-
- node = cpu_to_node(cpu);
-
- /* first CPU of @node in this CPT table */
- if (!node_isset(node, *cptab->ctb_nodemask))
- node_set(node, *cptab->ctb_nodemask);
-
- /* first CPU of @node in this partition */
- if (!node_isset(node, *cptab->ctb_parts[cpt].cpt_nodemask))
- node_set(node, *cptab->ctb_parts[cpt].cpt_nodemask);
-
- return 1;
-}
-EXPORT_SYMBOL(cfs_cpt_set_cpu);
-
-void
-cfs_cpt_unset_cpu(struct cfs_cpt_table *cptab, int cpt, int cpu)
-{
- int node;
- int i;
-
- LASSERT(cpt == CFS_CPT_ANY || (cpt >= 0 && cpt < cptab->ctb_nparts));
-
- if (cpu < 0 || cpu >= nr_cpu_ids) {
- CDEBUG(D_INFO, "Invalid CPU id %d\n", cpu);
- return;
- }
-
- if (cpt == CFS_CPT_ANY) {
- /* caller doesn't know the partition ID */
- cpt = cptab->ctb_cpu2cpt[cpu];
- if (cpt < 0) { /* not set in this CPT-table */
- CDEBUG(D_INFO, "Try to unset cpu %d which is not in CPT-table %p\n",
- cpt, cptab);
- return;
- }
-
- } else if (cpt != cptab->ctb_cpu2cpt[cpu]) {
- CDEBUG(D_INFO,
- "CPU %d is not in cpu-partition %d\n", cpu, cpt);
- return;
- }
-
- LASSERT(cpumask_test_cpu(cpu, cptab->ctb_parts[cpt].cpt_cpumask));
- LASSERT(cpumask_test_cpu(cpu, cptab->ctb_cpumask));
-
- cpumask_clear_cpu(cpu, cptab->ctb_parts[cpt].cpt_cpumask);
- cpumask_clear_cpu(cpu, cptab->ctb_cpumask);
- cptab->ctb_cpu2cpt[cpu] = -1;
-
- node = cpu_to_node(cpu);
-
- LASSERT(node_isset(node, *cptab->ctb_parts[cpt].cpt_nodemask));
- LASSERT(node_isset(node, *cptab->ctb_nodemask));
-
- for_each_cpu(i, cptab->ctb_parts[cpt].cpt_cpumask) {
- /* this CPT has other CPU belonging to this node? */
- if (cpu_to_node(i) == node)
- break;
- }
-
- if (i >= nr_cpu_ids)
- node_clear(node, *cptab->ctb_parts[cpt].cpt_nodemask);
-
- for_each_cpu(i, cptab->ctb_cpumask) {
- /* this CPT-table has other CPU belonging to this node? */
- if (cpu_to_node(i) == node)
- break;
- }
-
- if (i >= nr_cpu_ids)
- node_clear(node, *cptab->ctb_nodemask);
-
- return;
-}
-EXPORT_SYMBOL(cfs_cpt_unset_cpu);
-
-int
-cfs_cpt_set_cpumask(struct cfs_cpt_table *cptab, int cpt, cpumask_t *mask)
-{
- int i;
-
- if (cpumask_weight(mask) == 0 ||
- cpumask_any_and(mask, cpu_online_mask) >= nr_cpu_ids) {
- CDEBUG(D_INFO, "No online CPU is found in the CPU mask for CPU partition %d\n",
- cpt);
- return 0;
- }
-
- for_each_cpu(i, mask) {
- if (!cfs_cpt_set_cpu(cptab, cpt, i))
- return 0;
- }
-
- return 1;
-}
-EXPORT_SYMBOL(cfs_cpt_set_cpumask);
-
-void
-cfs_cpt_unset_cpumask(struct cfs_cpt_table *cptab, int cpt, cpumask_t *mask)
-{
- int i;
-
- for_each_cpu(i, mask)
- cfs_cpt_unset_cpu(cptab, cpt, i);
-}
-EXPORT_SYMBOL(cfs_cpt_unset_cpumask);
-
-int
-cfs_cpt_set_node(struct cfs_cpt_table *cptab, int cpt, int node)
-{
- cpumask_t *mask;
- int rc;
-
- if (node < 0 || node >= MAX_NUMNODES) {
- CDEBUG(D_INFO,
- "Invalid NUMA id %d for CPU partition %d\n", node, cpt);
- return 0;
- }
-
- mutex_lock(&cpt_data.cpt_mutex);
-
- mask = cpt_data.cpt_cpumask;
- cfs_node_to_cpumask(node, mask);
-
- rc = cfs_cpt_set_cpumask(cptab, cpt, mask);
-
- mutex_unlock(&cpt_data.cpt_mutex);
-
- return rc;
-}
-EXPORT_SYMBOL(cfs_cpt_set_node);
-
-void
-cfs_cpt_unset_node(struct cfs_cpt_table *cptab, int cpt, int node)
-{
- cpumask_t *mask;
-
- if (node < 0 || node >= MAX_NUMNODES) {
- CDEBUG(D_INFO,
- "Invalid NUMA id %d for CPU partition %d\n", node, cpt);
- return;
- }
-
- mutex_lock(&cpt_data.cpt_mutex);
-
- mask = cpt_data.cpt_cpumask;
- cfs_node_to_cpumask(node, mask);
-
- cfs_cpt_unset_cpumask(cptab, cpt, mask);
-
- mutex_unlock(&cpt_data.cpt_mutex);
-}
-EXPORT_SYMBOL(cfs_cpt_unset_node);
-
-int
-cfs_cpt_set_nodemask(struct cfs_cpt_table *cptab, int cpt, nodemask_t *mask)
-{
- int i;
-
- for_each_node_mask(i, *mask) {
- if (!cfs_cpt_set_node(cptab, cpt, i))
- return 0;
- }
-
- return 1;
-}
-EXPORT_SYMBOL(cfs_cpt_set_nodemask);
-
-void
-cfs_cpt_unset_nodemask(struct cfs_cpt_table *cptab, int cpt, nodemask_t *mask)
-{
- int i;
-
- for_each_node_mask(i, *mask)
- cfs_cpt_unset_node(cptab, cpt, i);
-}
-EXPORT_SYMBOL(cfs_cpt_unset_nodemask);
-
-void
-cfs_cpt_clear(struct cfs_cpt_table *cptab, int cpt)
-{
- int last;
- int i;
-
- if (cpt == CFS_CPT_ANY) {
- last = cptab->ctb_nparts - 1;
- cpt = 0;
- } else {
- last = cpt;
- }
-
- for (; cpt <= last; cpt++) {
- for_each_cpu(i, cptab->ctb_parts[cpt].cpt_cpumask)
- cfs_cpt_unset_cpu(cptab, cpt, i);
- }
-}
-EXPORT_SYMBOL(cfs_cpt_clear);
-
-int
-cfs_cpt_spread_node(struct cfs_cpt_table *cptab, int cpt)
-{
- nodemask_t *mask;
- int weight;
- int rotor;
- int node;
-
- /* convert CPU partition ID to HW node id */
-
- if (cpt < 0 || cpt >= cptab->ctb_nparts) {
- mask = cptab->ctb_nodemask;
- rotor = cptab->ctb_spread_rotor++;
- } else {
- mask = cptab->ctb_parts[cpt].cpt_nodemask;
- rotor = cptab->ctb_parts[cpt].cpt_spread_rotor++;
- }
-
- weight = nodes_weight(*mask);
- LASSERT(weight > 0);
-
- rotor %= weight;
-
- for_each_node_mask(node, *mask) {
- if (rotor-- == 0)
- return node;
- }
-
- LBUG();
- return 0;
-}
-EXPORT_SYMBOL(cfs_cpt_spread_node);
-
-int
-cfs_cpt_current(struct cfs_cpt_table *cptab, int remap)
-{
- int cpu = smp_processor_id();
- int cpt = cptab->ctb_cpu2cpt[cpu];
-
- if (cpt < 0) {
- if (!remap)
- return cpt;
-
- /* don't return negative value for safety of upper layer,
- * instead we shadow the unknown cpu to a valid partition ID */
- cpt = cpu % cptab->ctb_nparts;
- }
-
- return cpt;
-}
-EXPORT_SYMBOL(cfs_cpt_current);
-
-int
-cfs_cpt_of_cpu(struct cfs_cpt_table *cptab, int cpu)
-{
- LASSERT(cpu >= 0 && cpu < nr_cpu_ids);
-
- return cptab->ctb_cpu2cpt[cpu];
-}
-EXPORT_SYMBOL(cfs_cpt_of_cpu);
-
-int
-cfs_cpt_bind(struct cfs_cpt_table *cptab, int cpt)
-{
- cpumask_t *cpumask;
- nodemask_t *nodemask;
- int rc;
- int i;
-
- LASSERT(cpt == CFS_CPT_ANY || (cpt >= 0 && cpt < cptab->ctb_nparts));
-
- if (cpt == CFS_CPT_ANY) {
- cpumask = cptab->ctb_cpumask;
- nodemask = cptab->ctb_nodemask;
- } else {
- cpumask = cptab->ctb_parts[cpt].cpt_cpumask;
- nodemask = cptab->ctb_parts[cpt].cpt_nodemask;
- }
-
- if (cpumask_any_and(cpumask, cpu_online_mask) >= nr_cpu_ids) {
- CERROR("No online CPU found in CPU partition %d, did someone do CPU hotplug on system? You might need to reload Lustre modules to keep system working well.\n",
- cpt);
- return -EINVAL;
- }
-
- for_each_online_cpu(i) {
- if (cpumask_test_cpu(i, cpumask))
- continue;
-
- rc = set_cpus_allowed_ptr(current, cpumask);
- set_mems_allowed(*nodemask);
- if (rc == 0)
- schedule(); /* switch to allowed CPU */
-
- return rc;
- }
-
- /* don't need to set affinity because all online CPUs are covered */
- return 0;
-}
-EXPORT_SYMBOL(cfs_cpt_bind);
-
-/**
- * Choose max to \a number CPUs from \a node and set them in \a cpt.
- * We always prefer to choose CPU in the same core/socket.
- */
-static int
-cfs_cpt_choose_ncpus(struct cfs_cpt_table *cptab, int cpt,
- cpumask_t *node, int number)
-{
- cpumask_t *socket = NULL;
- cpumask_t *core = NULL;
- int rc = 0;
- int cpu;
-
- LASSERT(number > 0);
-
- if (number >= cpumask_weight(node)) {
- while (!cpumask_empty(node)) {
- cpu = cpumask_first(node);
-
- rc = cfs_cpt_set_cpu(cptab, cpt, cpu);
- if (!rc)
- return -EINVAL;
- cpumask_clear_cpu(cpu, node);
- }
- return 0;
- }
-
- /* allocate scratch buffer */
- LIBCFS_ALLOC(socket, cpumask_size());
- LIBCFS_ALLOC(core, cpumask_size());
- if (socket == NULL || core == NULL) {
- rc = -ENOMEM;
- goto out;
- }
-
- while (!cpumask_empty(node)) {
- cpu = cpumask_first(node);
-
- /* get cpumask for cores in the same socket */
- cfs_cpu_core_siblings(cpu, socket);
- cpumask_and(socket, socket, node);
-
- LASSERT(!cpumask_empty(socket));
-
- while (!cpumask_empty(socket)) {
- int i;
-
- /* get cpumask for hts in the same core */
- cfs_cpu_ht_siblings(cpu, core);
- cpumask_and(core, core, node);
-
- LASSERT(!cpumask_empty(core));
-
- for_each_cpu(i, core) {
- cpumask_clear_cpu(i, socket);
- cpumask_clear_cpu(i, node);
-
- rc = cfs_cpt_set_cpu(cptab, cpt, i);
- if (!rc) {
- rc = -EINVAL;
- goto out;
- }
-
- if (--number == 0)
- goto out;
- }
- cpu = cpumask_first(socket);
- }
- }
-
- out:
- if (socket != NULL)
- LIBCFS_FREE(socket, cpumask_size());
- if (core != NULL)
- LIBCFS_FREE(core, cpumask_size());
- return rc;
-}
-
-#define CPT_WEIGHT_MIN 4u
-
-static unsigned int
-cfs_cpt_num_estimate(void)
-{
- unsigned nnode = num_online_nodes();
- unsigned ncpu = num_online_cpus();
- unsigned ncpt;
-
- if (ncpu <= CPT_WEIGHT_MIN) {
- ncpt = 1;
- goto out;
- }
-
- /* generate reasonable number of CPU partitions based on total number
- * of CPUs, Preferred N should be power2 and match this condition:
- * 2 * (N - 1)^2 < NCPUS <= 2 * N^2 */
- for (ncpt = 2; ncpu > 2 * ncpt * ncpt; ncpt <<= 1)
- ;
-
- if (ncpt <= nnode) { /* fat numa system */
- while (nnode > ncpt)
- nnode >>= 1;
-
- } else { /* ncpt > nnode */
- while ((nnode << 1) <= ncpt)
- nnode <<= 1;
- }
-
- ncpt = nnode;
-
- out:
-#if (BITS_PER_LONG == 32)
- /* config many CPU partitions on 32-bit system could consume
- * too much memory */
- ncpt = min(2U, ncpt);
-#endif
- while (ncpu % ncpt != 0)
- ncpt--; /* worst case is 1 */
-
- return ncpt;
-}
-
-static struct cfs_cpt_table *
-cfs_cpt_table_create(int ncpt)
-{
- struct cfs_cpt_table *cptab = NULL;
- cpumask_t *mask = NULL;
- int cpt = 0;
- int num;
- int rc;
- int i;
-
- rc = cfs_cpt_num_estimate();
- if (ncpt <= 0)
- ncpt = rc;
-
- if (ncpt > num_online_cpus() || ncpt > 4 * rc) {
- CWARN("CPU partition number %d is larger than suggested value (%d), your system may have performance issue or run out of memory while under pressure\n",
- ncpt, rc);
- }
-
- if (num_online_cpus() % ncpt != 0) {
- CERROR("CPU number %d is not multiple of cpu_npartition %d, please try different cpu_npartitions value or set pattern string by cpu_pattern=STRING\n",
- (int)num_online_cpus(), ncpt);
- goto failed;
- }
-
- cptab = cfs_cpt_table_alloc(ncpt);
- if (cptab == NULL) {
- CERROR("Failed to allocate CPU map(%d)\n", ncpt);
- goto failed;
- }
-
- num = num_online_cpus() / ncpt;
- if (num == 0) {
- CERROR("CPU changed while setting CPU partition\n");
- goto failed;
- }
-
- LIBCFS_ALLOC(mask, cpumask_size());
- if (mask == NULL) {
- CERROR("Failed to allocate scratch cpumask\n");
- goto failed;
- }
-
- for_each_online_node(i) {
- cfs_node_to_cpumask(i, mask);
-
- while (!cpumask_empty(mask)) {
- struct cfs_cpu_partition *part;
- int n;
-
- if (cpt >= ncpt)
- goto failed;
-
- part = &cptab->ctb_parts[cpt];
-
- n = num - cpumask_weight(part->cpt_cpumask);
- LASSERT(n > 0);
-
- rc = cfs_cpt_choose_ncpus(cptab, cpt, mask, n);
- if (rc < 0)
- goto failed;
-
- LASSERT(num >= cpumask_weight(part->cpt_cpumask));
- if (num == cpumask_weight(part->cpt_cpumask))
- cpt++;
- }
- }
-
- if (cpt != ncpt ||
- num != cpumask_weight(cptab->ctb_parts[ncpt - 1].cpt_cpumask)) {
- CERROR("Expect %d(%d) CPU partitions but got %d(%d), CPU hotplug/unplug while setting?\n",
- cptab->ctb_nparts, num, cpt,
- cpumask_weight(cptab->ctb_parts[ncpt - 1].cpt_cpumask));
- goto failed;
- }
-
- LIBCFS_FREE(mask, cpumask_size());
-
- return cptab;
-
- failed:
- CERROR("Failed to setup CPU-partition-table with %d CPU-partitions, online HW nodes: %d, HW cpus: %d.\n",
- ncpt, num_online_nodes(), num_online_cpus());
-
- if (mask != NULL)
- LIBCFS_FREE(mask, cpumask_size());
-
- if (cptab != NULL)
- cfs_cpt_table_free(cptab);
-
- return NULL;
-}
-
-static struct cfs_cpt_table *
-cfs_cpt_table_create_pattern(char *pattern)
-{
- struct cfs_cpt_table *cptab;
- char *str = pattern;
- int node = 0;
- int high;
- int ncpt;
- int c;
-
- for (ncpt = 0;; ncpt++) { /* quick scan bracket */
- str = strchr(str, '[');
- if (str == NULL)
- break;
- str++;
- }
-
- str = cfs_trimwhite(pattern);
- if (*str == 'n' || *str == 'N') {
- pattern = str + 1;
- node = 1;
- }
-
- if (ncpt == 0 ||
- (node && ncpt > num_online_nodes()) ||
- (!node && ncpt > num_online_cpus())) {
- CERROR("Invalid pattern %s, or too many partitions %d\n",
- pattern, ncpt);
- return NULL;
- }
-
- high = node ? MAX_NUMNODES - 1 : nr_cpu_ids - 1;
-
- cptab = cfs_cpt_table_alloc(ncpt);
- if (cptab == NULL) {
- CERROR("Failed to allocate cpu partition table\n");
- return NULL;
- }
-
- for (str = cfs_trimwhite(pattern), c = 0;; c++) {
- struct cfs_range_expr *range;
- struct cfs_expr_list *el;
- char *bracket = strchr(str, '[');
- int cpt;
- int rc;
- int i;
- int n;
-
- if (bracket == NULL) {
- if (*str != 0) {
- CERROR("Invalid pattern %s\n", str);
- goto failed;
- } else if (c != ncpt) {
- CERROR("expect %d partitions but found %d\n",
- ncpt, c);
- goto failed;
- }
- break;
- }
-
- if (sscanf(str, "%d%n", &cpt, &n) < 1) {
- CERROR("Invalid cpu pattern %s\n", str);
- goto failed;
- }
-
- if (cpt < 0 || cpt >= ncpt) {
- CERROR("Invalid partition id %d, total partitions %d\n",
- cpt, ncpt);
- goto failed;
- }
-
- if (cfs_cpt_weight(cptab, cpt) != 0) {
- CERROR("Partition %d has already been set.\n", cpt);
- goto failed;
- }
-
- str = cfs_trimwhite(str + n);
- if (str != bracket) {
- CERROR("Invalid pattern %s\n", str);
- goto failed;
- }
-
- bracket = strchr(str, ']');
- if (bracket == NULL) {
- CERROR("missing right bracket for cpt %d, %s\n",
- cpt, str);
- goto failed;
- }
-
- if (cfs_expr_list_parse(str, (bracket - str) + 1,
- 0, high, &el) != 0) {
- CERROR("Can't parse number range: %s\n", str);
- goto failed;
- }
-
- list_for_each_entry(range, &el->el_exprs, re_link) {
- for (i = range->re_lo; i <= range->re_hi; i++) {
- if ((i - range->re_lo) % range->re_stride != 0)
- continue;
-
- rc = node ? cfs_cpt_set_node(cptab, cpt, i) :
- cfs_cpt_set_cpu(cptab, cpt, i);
- if (!rc) {
- cfs_expr_list_free(el);
- goto failed;
- }
- }
- }
-
- cfs_expr_list_free(el);
-
- if (!cfs_cpt_online(cptab, cpt)) {
- CERROR("No online CPU is found on partition %d\n", cpt);
- goto failed;
- }
-
- str = cfs_trimwhite(bracket + 1);
- }
-
- return cptab;
-
- failed:
- cfs_cpt_table_free(cptab);
- return NULL;
-}
-
-#ifdef CONFIG_HOTPLUG_CPU
-static int
-cfs_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu)
-{
- unsigned int cpu = (unsigned long)hcpu;
- bool warn;
-
- switch (action) {
- case CPU_DEAD:
- case CPU_DEAD_FROZEN:
- case CPU_ONLINE:
- case CPU_ONLINE_FROZEN:
- spin_lock(&cpt_data.cpt_lock);
- cpt_data.cpt_version++;
- spin_unlock(&cpt_data.cpt_lock);
- default:
- if (action != CPU_DEAD && action != CPU_DEAD_FROZEN) {
- CDEBUG(D_INFO, "CPU changed [cpu %u action %lx]\n",
- cpu, action);
- break;
- }
-
- mutex_lock(&cpt_data.cpt_mutex);
- /* if all HTs in a core are offline, it may break affinity */
- cfs_cpu_ht_siblings(cpu, cpt_data.cpt_cpumask);
- warn = cpumask_any_and(cpt_data.cpt_cpumask,
- cpu_online_mask) >= nr_cpu_ids;
- mutex_unlock(&cpt_data.cpt_mutex);
- CDEBUG(warn ? D_WARNING : D_INFO,
- "Lustre: can't support CPU plug-out well now, performance and stability could be impacted [CPU %u action: %lx]\n",
- cpu, action);
- }
-
- return NOTIFY_OK;
-}
-
-static struct notifier_block cfs_cpu_notifier = {
- .notifier_call = cfs_cpu_notify,
- .priority = 0
-};
-
-#endif
-
-void
-cfs_cpu_fini(void)
-{
- if (cfs_cpt_table != NULL)
- cfs_cpt_table_free(cfs_cpt_table);
-
-#ifdef CONFIG_HOTPLUG_CPU
- unregister_hotcpu_notifier(&cfs_cpu_notifier);
-#endif
- if (cpt_data.cpt_cpumask != NULL)
- LIBCFS_FREE(cpt_data.cpt_cpumask, cpumask_size());
-}
-
-int
-cfs_cpu_init(void)
-{
- LASSERT(cfs_cpt_table == NULL);
-
- memset(&cpt_data, 0, sizeof(cpt_data));
-
- LIBCFS_ALLOC(cpt_data.cpt_cpumask, cpumask_size());
- if (cpt_data.cpt_cpumask == NULL) {
- CERROR("Failed to allocate scratch buffer\n");
- return -1;
- }
-
- spin_lock_init(&cpt_data.cpt_lock);
- mutex_init(&cpt_data.cpt_mutex);
-
-#ifdef CONFIG_HOTPLUG_CPU
- register_hotcpu_notifier(&cfs_cpu_notifier);
-#endif
-
- if (*cpu_pattern != 0) {
- cfs_cpt_table = cfs_cpt_table_create_pattern(cpu_pattern);
- if (cfs_cpt_table == NULL) {
- CERROR("Failed to create cptab from pattern %s\n",
- cpu_pattern);
- goto failed;
- }
-
- } else {
- cfs_cpt_table = cfs_cpt_table_create(cpu_npartitions);
- if (cfs_cpt_table == NULL) {
- CERROR("Failed to create ptable with npartitions %d\n",
- cpu_npartitions);
- goto failed;
- }
- }
-
- spin_lock(&cpt_data.cpt_lock);
- if (cfs_cpt_table->ctb_version != cpt_data.cpt_version) {
- spin_unlock(&cpt_data.cpt_lock);
- CERROR("CPU hotplug/unplug during setup\n");
- goto failed;
- }
- spin_unlock(&cpt_data.cpt_lock);
-
- LCONSOLE(0, "HW CPU cores: %d, npartitions: %d\n",
- num_online_cpus(), cfs_cpt_number(cfs_cpt_table));
- return 0;
-
- failed:
- cfs_cpu_fini();
- return -1;
-}
-
-#endif
diff --git a/drivers/staging/lustre/lustre/libcfs/linux/linux-crypto-adler.c b/drivers/staging/lustre/lustre/libcfs/linux/linux-crypto-adler.c
deleted file mode 100644
index 5e185fa5942a..000000000000
--- a/drivers/staging/lustre/lustre/libcfs/linux/linux-crypto-adler.c
+++ /dev/null
@@ -1,141 +0,0 @@
-/* GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see http://www.gnu.org/licenses
- *
- * Please visit http://www.xyratex.com/contact if you need additional
- * information or have any questions.
- *
- * GPL HEADER END
- */
-
-/*
- * Copyright 2012 Xyratex Technology Limited
- */
-
-/*
- * This is crypto api shash wrappers to zlib_adler32.
- */
-
-#include <linux/module.h>
-#include <linux/zutil.h>
-#include <crypto/internal/hash.h>
-#include "linux-crypto.h"
-
-#define CHKSUM_BLOCK_SIZE 1
-#define CHKSUM_DIGEST_SIZE 4
-
-static u32 __adler32(u32 cksum, unsigned char const *p, size_t len)
-{
- return zlib_adler32(cksum, p, len);
-}
-
-static int adler32_cra_init(struct crypto_tfm *tfm)
-{
- u32 *key = crypto_tfm_ctx(tfm);
-
- *key = 1;
-
- return 0;
-}
-
-static int adler32_setkey(struct crypto_shash *hash, const u8 *key,
- unsigned int keylen)
-{
- u32 *mctx = crypto_shash_ctx(hash);
-
- if (keylen != sizeof(u32)) {
- crypto_shash_set_flags(hash, CRYPTO_TFM_RES_BAD_KEY_LEN);
- return -EINVAL;
- }
- *mctx = *(u32 *)key;
- return 0;
-}
-
-static int adler32_init(struct shash_desc *desc)
-{
- u32 *mctx = crypto_shash_ctx(desc->tfm);
- u32 *cksump = shash_desc_ctx(desc);
-
- *cksump = *mctx;
-
- return 0;
-}
-
-static int adler32_update(struct shash_desc *desc, const u8 *data,
- unsigned int len)
-{
- u32 *cksump = shash_desc_ctx(desc);
-
- *cksump = __adler32(*cksump, data, len);
- return 0;
-}
-static int __adler32_finup(u32 *cksump, const u8 *data, unsigned int len,
- u8 *out)
-{
- *(u32 *)out = __adler32(*cksump, data, len);
- return 0;
-}
-
-static int adler32_finup(struct shash_desc *desc, const u8 *data,
- unsigned int len, u8 *out)
-{
- return __adler32_finup(shash_desc_ctx(desc), data, len, out);
-}
-
-static int adler32_final(struct shash_desc *desc, u8 *out)
-{
- u32 *cksump = shash_desc_ctx(desc);
-
- *(u32 *)out = *cksump;
- return 0;
-}
-
-static int adler32_digest(struct shash_desc *desc, const u8 *data,
- unsigned int len, u8 *out)
-{
- return __adler32_finup(crypto_shash_ctx(desc->tfm), data, len,
- out);
-}
-static struct shash_alg alg = {
- .setkey = adler32_setkey,
- .init = adler32_init,
- .update = adler32_update,
- .final = adler32_final,
- .finup = adler32_finup,
- .digest = adler32_digest,
- .descsize = sizeof(u32),
- .digestsize = CHKSUM_DIGEST_SIZE,
- .base = {
- .cra_name = "adler32",
- .cra_driver_name = "adler32-zlib",
- .cra_priority = 100,
- .cra_blocksize = CHKSUM_BLOCK_SIZE,
- .cra_ctxsize = sizeof(u32),
- .cra_module = THIS_MODULE,
- .cra_init = adler32_cra_init,
- }
-};
-
-
-int cfs_crypto_adler32_register(void)
-{
- return crypto_register_shash(&alg);
-}
-
-void cfs_crypto_adler32_unregister(void)
-{
- crypto_unregister_shash(&alg);
-}
diff --git a/drivers/staging/lustre/lustre/libcfs/linux/linux-crypto.c b/drivers/staging/lustre/lustre/libcfs/linux/linux-crypto.c
deleted file mode 100644
index 328a27860411..000000000000
--- a/drivers/staging/lustre/lustre/libcfs/linux/linux-crypto.c
+++ /dev/null
@@ -1,290 +0,0 @@
-/* GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see http://www.gnu.org/licenses
- *
- * Please visit http://www.xyratex.com/contact if you need additional
- * information or have any questions.
- *
- * GPL HEADER END
- */
-
-/*
- * Copyright 2012 Xyratex Technology Limited
- *
- * Copyright (c) 2012, Intel Corporation.
- */
-
-#include <linux/crypto.h>
-#include <linux/scatterlist.h>
-#include "../../../include/linux/libcfs/libcfs.h"
-#include "linux-crypto.h"
-/**
- * Array of hash algorithm speed in MByte per second
- */
-static int cfs_crypto_hash_speeds[CFS_HASH_ALG_MAX];
-
-
-
-static int cfs_crypto_hash_alloc(unsigned char alg_id,
- const struct cfs_crypto_hash_type **type,
- struct hash_desc *desc, unsigned char *key,
- unsigned int key_len)
-{
- int err = 0;
-
- *type = cfs_crypto_hash_type(alg_id);
-
- if (*type == NULL) {
- CWARN("Unsupported hash algorithm id = %d, max id is %d\n",
- alg_id, CFS_HASH_ALG_MAX);
- return -EINVAL;
- }
- desc->tfm = crypto_alloc_hash((*type)->cht_name, 0, 0);
-
- if (desc->tfm == NULL)
- return -EINVAL;
-
- if (IS_ERR(desc->tfm)) {
- CDEBUG(D_INFO, "Failed to alloc crypto hash %s\n",
- (*type)->cht_name);
- return PTR_ERR(desc->tfm);
- }
-
- desc->flags = 0;
-
- /** Shash have different logic for initialization then digest
- * shash: crypto_hash_setkey, crypto_hash_init
- * digest: crypto_digest_init, crypto_digest_setkey
- * Skip this function for digest, because we use shash logic at
- * cfs_crypto_hash_alloc.
- */
- if (key != NULL)
- err = crypto_hash_setkey(desc->tfm, key, key_len);
- else if ((*type)->cht_key != 0)
- err = crypto_hash_setkey(desc->tfm,
- (unsigned char *)&((*type)->cht_key),
- (*type)->cht_size);
-
- if (err != 0) {
- crypto_free_hash(desc->tfm);
- return err;
- }
-
- CDEBUG(D_INFO, "Using crypto hash: %s (%s) speed %d MB/s\n",
- (crypto_hash_tfm(desc->tfm))->__crt_alg->cra_name,
- (crypto_hash_tfm(desc->tfm))->__crt_alg->cra_driver_name,
- cfs_crypto_hash_speeds[alg_id]);
-
- return crypto_hash_init(desc);
-}
-
-int cfs_crypto_hash_digest(unsigned char alg_id,
- const void *buf, unsigned int buf_len,
- unsigned char *key, unsigned int key_len,
- unsigned char *hash, unsigned int *hash_len)
-{
- struct scatterlist sl;
- struct hash_desc hdesc;
- int err;
- const struct cfs_crypto_hash_type *type;
-
- if (buf == NULL || buf_len == 0 || hash_len == NULL)
- return -EINVAL;
-
- err = cfs_crypto_hash_alloc(alg_id, &type, &hdesc, key, key_len);
- if (err != 0)
- return err;
-
- if (hash == NULL || *hash_len < type->cht_size) {
- *hash_len = type->cht_size;
- crypto_free_hash(hdesc.tfm);
- return -ENOSPC;
- }
- sg_init_one(&sl, buf, buf_len);
-
- hdesc.flags = 0;
- err = crypto_hash_digest(&hdesc, &sl, sl.length, hash);
- crypto_free_hash(hdesc.tfm);
-
- return err;
-}
-EXPORT_SYMBOL(cfs_crypto_hash_digest);
-
-struct cfs_crypto_hash_desc *
- cfs_crypto_hash_init(unsigned char alg_id,
- unsigned char *key, unsigned int key_len)
-{
-
- struct hash_desc *hdesc;
- int err;
- const struct cfs_crypto_hash_type *type;
-
- hdesc = kmalloc(sizeof(*hdesc), 0);
- if (hdesc == NULL)
- return ERR_PTR(-ENOMEM);
-
- err = cfs_crypto_hash_alloc(alg_id, &type, hdesc, key, key_len);
-
- if (err) {
- kfree(hdesc);
- return ERR_PTR(err);
- }
- return (struct cfs_crypto_hash_desc *)hdesc;
-}
-EXPORT_SYMBOL(cfs_crypto_hash_init);
-
-int cfs_crypto_hash_update_page(struct cfs_crypto_hash_desc *hdesc,
- struct page *page, unsigned int offset,
- unsigned int len)
-{
- struct scatterlist sl;
-
- sg_init_table(&sl, 1);
- sg_set_page(&sl, page, len, offset & ~CFS_PAGE_MASK);
-
- return crypto_hash_update((struct hash_desc *)hdesc, &sl, sl.length);
-}
-EXPORT_SYMBOL(cfs_crypto_hash_update_page);
-
-int cfs_crypto_hash_update(struct cfs_crypto_hash_desc *hdesc,
- const void *buf, unsigned int buf_len)
-{
- struct scatterlist sl;
-
- sg_init_one(&sl, buf, buf_len);
-
- return crypto_hash_update((struct hash_desc *)hdesc, &sl, sl.length);
-}
-EXPORT_SYMBOL(cfs_crypto_hash_update);
-
-/* If hash_len pointer is NULL - destroy descriptor. */
-int cfs_crypto_hash_final(struct cfs_crypto_hash_desc *hdesc,
- unsigned char *hash, unsigned int *hash_len)
-{
- int err;
- int size = crypto_hash_digestsize(((struct hash_desc *)hdesc)->tfm);
-
- if (hash_len == NULL) {
- crypto_free_hash(((struct hash_desc *)hdesc)->tfm);
- kfree(hdesc);
- return 0;
- }
- if (hash == NULL || *hash_len < size) {
- *hash_len = size;
- return -ENOSPC;
- }
- err = crypto_hash_final((struct hash_desc *) hdesc, hash);
-
- if (err < 0) {
- /* May be caller can fix error */
- return err;
- }
- crypto_free_hash(((struct hash_desc *)hdesc)->tfm);
- kfree(hdesc);
- return err;
-}
-EXPORT_SYMBOL(cfs_crypto_hash_final);
-
-static void cfs_crypto_performance_test(unsigned char alg_id,
- const unsigned char *buf,
- unsigned int buf_len)
-{
- unsigned long start, end;
- int bcount, err = 0;
- int sec = 1; /* do test only 1 sec */
- unsigned char hash[64];
- unsigned int hash_len = 64;
-
- for (start = jiffies, end = start + sec * HZ, bcount = 0;
- time_before(jiffies, end); bcount++) {
- err = cfs_crypto_hash_digest(alg_id, buf, buf_len, NULL, 0,
- hash, &hash_len);
- if (err)
- break;
-
- }
- end = jiffies;
-
- if (err) {
- cfs_crypto_hash_speeds[alg_id] = -1;
- CDEBUG(D_INFO, "Crypto hash algorithm %s, err = %d\n",
- cfs_crypto_hash_name(alg_id), err);
- } else {
- unsigned long tmp;
- tmp = ((bcount * buf_len / jiffies_to_msecs(end - start)) *
- 1000) / (1024 * 1024);
- cfs_crypto_hash_speeds[alg_id] = (int)tmp;
- }
- CDEBUG(D_INFO, "Crypto hash algorithm %s speed = %d MB/s\n",
- cfs_crypto_hash_name(alg_id), cfs_crypto_hash_speeds[alg_id]);
-}
-
-int cfs_crypto_hash_speed(unsigned char hash_alg)
-{
- if (hash_alg < CFS_HASH_ALG_MAX)
- return cfs_crypto_hash_speeds[hash_alg];
- else
- return -1;
-}
-EXPORT_SYMBOL(cfs_crypto_hash_speed);
-
-/**
- * Do performance test for all hash algorithms.
- */
-static int cfs_crypto_test_hashes(void)
-{
- unsigned char i;
- unsigned char *data;
- unsigned int j;
- /* Data block size for testing hash. Maximum
- * kmalloc size for 2.6.18 kernel is 128K */
- unsigned int data_len = 1 * 128 * 1024;
-
- data = kmalloc(data_len, 0);
- if (data == NULL) {
- CERROR("Failed to allocate mem\n");
- return -ENOMEM;
- }
-
- for (j = 0; j < data_len; j++)
- data[j] = j & 0xff;
-
- for (i = 0; i < CFS_HASH_ALG_MAX; i++)
- cfs_crypto_performance_test(i, data, data_len);
-
- kfree(data);
- return 0;
-}
-
-static int adler32;
-
-int cfs_crypto_register(void)
-{
- request_module("crc32c");
-
- adler32 = cfs_crypto_adler32_register();
-
- /* check all algorithms and do performance test */
- cfs_crypto_test_hashes();
- return 0;
-}
-void cfs_crypto_unregister(void)
-{
- if (adler32 == 0)
- cfs_crypto_adler32_unregister();
-
- return;
-}
diff --git a/drivers/staging/lustre/lustre/libcfs/linux/linux-crypto.h b/drivers/staging/lustre/lustre/libcfs/linux/linux-crypto.h
deleted file mode 100644
index 18e8cd4d8758..000000000000
--- a/drivers/staging/lustre/lustre/libcfs/linux/linux-crypto.h
+++ /dev/null
@@ -1,29 +0,0 @@
- /*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see http://www.gnu.org/licenses
- *
- * Please visit http://www.xyratex.com/contact if you need additional
- * information or have any questions.
- *
- * GPL HEADER END
- */
-
-/**
- * Functions for start/stop shash adler32 algorithm.
- */
-int cfs_crypto_adler32_register(void);
-void cfs_crypto_adler32_unregister(void);
diff --git a/drivers/staging/lustre/lustre/libcfs/linux/linux-curproc.c b/drivers/staging/lustre/lustre/libcfs/linux/linux-curproc.c
deleted file mode 100644
index 277f6b890e09..000000000000
--- a/drivers/staging/lustre/lustre/libcfs/linux/linux-curproc.c
+++ /dev/null
@@ -1,111 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * libcfs/libcfs/linux/linux-curproc.c
- *
- * Lustre curproc API implementation for Linux kernel
- *
- * Author: Nikita Danilov <nikita@clusterfs.com>
- */
-
-#include <linux/sched.h>
-#include <linux/fs_struct.h>
-
-#include <linux/compat.h>
-#include <linux/thread_info.h>
-
-#define DEBUG_SUBSYSTEM S_LNET
-
-#include "../../../include/linux/libcfs/libcfs.h"
-
-/*
- * Implementation of cfs_curproc API (see portals/include/libcfs/curproc.h)
- * for Linux kernel.
- */
-
-void cfs_cap_raise(cfs_cap_t cap)
-{
- struct cred *cred;
-
- cred = prepare_creds();
- if (cred) {
- cap_raise(cred->cap_effective, cap);
- commit_creds(cred);
- }
-}
-
-void cfs_cap_lower(cfs_cap_t cap)
-{
- struct cred *cred;
-
- cred = prepare_creds();
- if (cred) {
- cap_lower(cred->cap_effective, cap);
- commit_creds(cred);
- }
-}
-
-int cfs_cap_raised(cfs_cap_t cap)
-{
- return cap_raised(current_cap(), cap);
-}
-
-static void cfs_kernel_cap_pack(kernel_cap_t kcap, cfs_cap_t *cap)
-{
- /* XXX lost high byte */
- *cap = kcap.cap[0];
-}
-
-cfs_cap_t cfs_curproc_cap_pack(void)
-{
- cfs_cap_t cap;
- cfs_kernel_cap_pack(current_cap(), &cap);
- return cap;
-}
-
-EXPORT_SYMBOL(cfs_cap_raise);
-EXPORT_SYMBOL(cfs_cap_lower);
-EXPORT_SYMBOL(cfs_cap_raised);
-EXPORT_SYMBOL(cfs_curproc_cap_pack);
-
-/*
- * Local variables:
- * c-indentation-style: "K&R"
- * c-basic-offset: 8
- * tab-width: 8
- * fill-column: 80
- * scroll-step: 1
- * End:
- */
diff --git a/drivers/staging/lustre/lustre/libcfs/linux/linux-debug.c b/drivers/staging/lustre/lustre/libcfs/linux/linux-debug.c
deleted file mode 100644
index 8689ea757c99..000000000000
--- a/drivers/staging/lustre/lustre/libcfs/linux/linux-debug.c
+++ /dev/null
@@ -1,200 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * libcfs/libcfs/linux/linux-debug.c
- *
- * Author: Phil Schwan <phil@clusterfs.com>
- */
-
-#include <linux/module.h>
-#include <linux/kmod.h>
-#include <linux/notifier.h>
-#include <linux/kernel.h>
-#include <linux/mm.h>
-#include <linux/string.h>
-#include <linux/stat.h>
-#include <linux/errno.h>
-#include <linux/unistd.h>
-#include <linux/interrupt.h>
-#include <linux/completion.h>
-#include <linux/fs.h>
-#include <linux/uaccess.h>
-#include <linux/miscdevice.h>
-
-# define DEBUG_SUBSYSTEM S_LNET
-
-#include "../../../include/linux/libcfs/libcfs.h"
-
-#include "../tracefile.h"
-
-#include <linux/kallsyms.h>
-
-char lnet_upcall[1024] = "/usr/lib/lustre/lnet_upcall";
-char lnet_debug_log_upcall[1024] = "/usr/lib/lustre/lnet_debug_log_upcall";
-
-/**
- * Upcall function once a Lustre log has been dumped.
- *
- * \param file path of the dumped log
- */
-void libcfs_run_debug_log_upcall(char *file)
-{
- char *argv[3];
- int rc;
- char *envp[] = {
- "HOME=/",
- "PATH=/sbin:/bin:/usr/sbin:/usr/bin",
- NULL};
-
- argv[0] = lnet_debug_log_upcall;
-
- LASSERTF(file != NULL, "called on a null filename\n");
- argv[1] = file; /* only need to pass the path of the file */
-
- argv[2] = NULL;
-
- rc = call_usermodehelper(argv[0], argv, envp, 1);
- if (rc < 0 && rc != -ENOENT) {
- CERROR("Error %d invoking LNET debug log upcall %s %s; check /proc/sys/lnet/debug_log_upcall\n",
- rc, argv[0], argv[1]);
- } else {
- CDEBUG(D_HA, "Invoked LNET debug log upcall %s %s\n",
- argv[0], argv[1]);
- }
-}
-
-void libcfs_run_upcall(char **argv)
-{
- int rc;
- int argc;
- char *envp[] = {
- "HOME=/",
- "PATH=/sbin:/bin:/usr/sbin:/usr/bin",
- NULL};
-
- argv[0] = lnet_upcall;
- argc = 1;
- while (argv[argc] != NULL)
- argc++;
-
- LASSERT(argc >= 2);
-
- rc = call_usermodehelper(argv[0], argv, envp, 1);
- if (rc < 0 && rc != -ENOENT) {
- CERROR("Error %d invoking LNET upcall %s %s%s%s%s%s%s%s%s; check /proc/sys/lnet/upcall\n",
- rc, argv[0], argv[1],
- argc < 3 ? "" : ",", argc < 3 ? "" : argv[2],
- argc < 4 ? "" : ",", argc < 4 ? "" : argv[3],
- argc < 5 ? "" : ",", argc < 5 ? "" : argv[4],
- argc < 6 ? "" : ",...");
- } else {
- CDEBUG(D_HA, "Invoked LNET upcall %s %s%s%s%s%s%s%s%s\n",
- argv[0], argv[1],
- argc < 3 ? "" : ",", argc < 3 ? "" : argv[2],
- argc < 4 ? "" : ",", argc < 4 ? "" : argv[3],
- argc < 5 ? "" : ",", argc < 5 ? "" : argv[4],
- argc < 6 ? "" : ",...");
- }
-}
-
-void libcfs_run_lbug_upcall(struct libcfs_debug_msg_data *msgdata)
-{
- char *argv[6];
- char buf[32];
-
- snprintf(buf, sizeof(buf), "%d", msgdata->msg_line);
-
- argv[1] = "LBUG";
- argv[2] = (char *)msgdata->msg_file;
- argv[3] = (char *)msgdata->msg_fn;
- argv[4] = buf;
- argv[5] = NULL;
-
- libcfs_run_upcall (argv);
-}
-
-/* coverity[+kill] */
-void __noreturn lbug_with_loc(struct libcfs_debug_msg_data *msgdata)
-{
- libcfs_catastrophe = 1;
- libcfs_debug_msg(msgdata, "LBUG\n");
-
- if (in_interrupt()) {
- panic("LBUG in interrupt.\n");
- /* not reached */
- }
-
- dump_stack();
- if (!libcfs_panic_on_lbug)
- libcfs_debug_dumplog();
- libcfs_run_lbug_upcall(msgdata);
- if (libcfs_panic_on_lbug)
- panic("LBUG");
- set_task_state(current, TASK_UNINTERRUPTIBLE);
- while (1)
- schedule();
-}
-
-static int panic_notifier(struct notifier_block *self, unsigned long unused1,
- void *unused2)
-{
- if (libcfs_panic_in_progress)
- return 0;
-
- libcfs_panic_in_progress = 1;
- mb();
-
- return 0;
-}
-
-static struct notifier_block libcfs_panic_notifier = {
- .notifier_call = panic_notifier,
- .next = NULL,
- .priority = 10000,
-};
-
-void libcfs_register_panic_notifier(void)
-{
- atomic_notifier_chain_register(&panic_notifier_list, &libcfs_panic_notifier);
-}
-
-void libcfs_unregister_panic_notifier(void)
-{
- atomic_notifier_chain_unregister(&panic_notifier_list, &libcfs_panic_notifier);
-}
-
-EXPORT_SYMBOL(libcfs_run_upcall);
-EXPORT_SYMBOL(libcfs_run_lbug_upcall);
-EXPORT_SYMBOL(lbug_with_loc);
diff --git a/drivers/staging/lustre/lustre/libcfs/linux/linux-mem.c b/drivers/staging/lustre/lustre/libcfs/linux/linux-mem.c
deleted file mode 100644
index 025e2f0028ab..000000000000
--- a/drivers/staging/lustre/lustre/libcfs/linux/linux-mem.c
+++ /dev/null
@@ -1,59 +0,0 @@
-/*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- */
-/*
- * This file creates a memory allocation primitive for Lustre, that
- * allows to fallback to vmalloc allocations should regular kernel allocations
- * fail due to size or system memory fragmentation.
- *
- * Author: Oleg Drokin <green@linuxhacker.ru>
- *
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Seagate Technology.
- */
-#include <linux/slab.h>
-#include <linux/vmalloc.h>
-
-#include "../../../include/linux/libcfs/libcfs.h"
-
-void *libcfs_kvzalloc(size_t size, gfp_t flags)
-{
- void *ret;
-
- ret = kzalloc(size, flags | __GFP_NOWARN);
- if (!ret)
- ret = __vmalloc(size, flags | __GFP_ZERO, PAGE_KERNEL);
- return ret;
-}
-EXPORT_SYMBOL(libcfs_kvzalloc);
-
-void *libcfs_kvzalloc_cpt(struct cfs_cpt_table *cptab, int cpt, size_t size,
- gfp_t flags)
-{
- void *ret;
-
- ret = kzalloc_node(size, flags | __GFP_NOWARN,
- cfs_cpt_spread_node(cptab, cpt));
- if (!ret) {
- WARN_ON(!(flags & (__GFP_FS|__GFP_HIGH)));
- ret = vmalloc_node(size, cfs_cpt_spread_node(cptab, cpt));
- }
-
- return ret;
-}
-EXPORT_SYMBOL(libcfs_kvzalloc_cpt);
diff --git a/drivers/staging/lustre/lustre/libcfs/linux/linux-module.c b/drivers/staging/lustre/lustre/libcfs/linux/linux-module.c
deleted file mode 100644
index 64a984b42845..000000000000
--- a/drivers/staging/lustre/lustre/libcfs/linux/linux-module.c
+++ /dev/null
@@ -1,181 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- */
-
-#define DEBUG_SUBSYSTEM S_LNET
-
-#include "../../../include/linux/libcfs/libcfs.h"
-
-#define LNET_MINOR 240
-
-int libcfs_ioctl_getdata(char *buf, char *end, void *arg)
-{
- struct libcfs_ioctl_hdr *hdr;
- struct libcfs_ioctl_data *data;
- int orig_len;
-
- hdr = (struct libcfs_ioctl_hdr *)buf;
- data = (struct libcfs_ioctl_data *)buf;
-
- if (copy_from_user(buf, arg, sizeof(*hdr)))
- return -EFAULT;
-
- if (hdr->ioc_version != LIBCFS_IOCTL_VERSION) {
- CERROR("PORTALS: version mismatch kernel vs application\n");
- return -EINVAL;
- }
-
- if (hdr->ioc_len >= end - buf) {
- CERROR("PORTALS: user buffer exceeds kernel buffer\n");
- return -EINVAL;
- }
-
-
- if (hdr->ioc_len < sizeof(struct libcfs_ioctl_data)) {
- CERROR("PORTALS: user buffer too small for ioctl\n");
- return -EINVAL;
- }
-
- orig_len = hdr->ioc_len;
- if (copy_from_user(buf, arg, hdr->ioc_len))
- return -EFAULT;
- if (orig_len != data->ioc_len)
- return -EINVAL;
-
- if (libcfs_ioctl_is_invalid(data)) {
- CERROR("PORTALS: ioctl not correctly formatted\n");
- return -EINVAL;
- }
-
- if (data->ioc_inllen1)
- data->ioc_inlbuf1 = &data->ioc_bulk[0];
-
- if (data->ioc_inllen2)
- data->ioc_inlbuf2 = &data->ioc_bulk[0] +
- cfs_size_round(data->ioc_inllen1);
-
- return 0;
-}
-
-int libcfs_ioctl_popdata(void *arg, void *data, int size)
-{
- if (copy_to_user((char *)arg, data, size))
- return -EFAULT;
- return 0;
-}
-
-static int
-libcfs_psdev_open(struct inode *inode, struct file *file)
-{
- struct libcfs_device_userstate **pdu = NULL;
- int rc = 0;
-
- if (!inode)
- return -EINVAL;
- pdu = (struct libcfs_device_userstate **)&file->private_data;
- if (libcfs_psdev_ops.p_open != NULL)
- rc = libcfs_psdev_ops.p_open(0, (void *)pdu);
- else
- return -EPERM;
- return rc;
-}
-
-/* called when closing /dev/device */
-static int
-libcfs_psdev_release(struct inode *inode, struct file *file)
-{
- struct libcfs_device_userstate *pdu;
- int rc = 0;
-
- if (!inode)
- return -EINVAL;
- pdu = file->private_data;
- if (libcfs_psdev_ops.p_close != NULL)
- rc = libcfs_psdev_ops.p_close(0, (void *)pdu);
- else
- rc = -EPERM;
- return rc;
-}
-
-static long libcfs_ioctl(struct file *file,
- unsigned int cmd, unsigned long arg)
-{
- struct cfs_psdev_file pfile;
- int rc = 0;
-
- if (!capable(CAP_SYS_ADMIN))
- return -EACCES;
-
- if (_IOC_TYPE(cmd) != IOC_LIBCFS_TYPE ||
- _IOC_NR(cmd) < IOC_LIBCFS_MIN_NR ||
- _IOC_NR(cmd) > IOC_LIBCFS_MAX_NR) {
- CDEBUG(D_IOCTL, "invalid ioctl ( type %d, nr %d, size %d )\n",
- _IOC_TYPE(cmd), _IOC_NR(cmd), _IOC_SIZE(cmd));
- return -EINVAL;
- }
-
- /* Handle platform-dependent IOC requests */
- switch (cmd) {
- case IOC_LIBCFS_PANIC:
- if (!capable(CFS_CAP_SYS_BOOT))
- return -EPERM;
- panic("debugctl-invoked panic");
- return 0;
- case IOC_LIBCFS_MEMHOG:
- if (!capable(CFS_CAP_SYS_ADMIN))
- return -EPERM;
- /* go thought */
- }
-
- pfile.off = 0;
- pfile.private_data = file->private_data;
- if (libcfs_psdev_ops.p_ioctl != NULL)
- rc = libcfs_psdev_ops.p_ioctl(&pfile, cmd, (void *)arg);
- else
- rc = -EPERM;
- return rc;
-}
-
-static const struct file_operations libcfs_fops = {
- .unlocked_ioctl = libcfs_ioctl,
- .open = libcfs_psdev_open,
- .release = libcfs_psdev_release,
-};
-
-struct miscdevice libcfs_dev = {
- .minor = LNET_MINOR,
- .name = "lnet",
- .fops = &libcfs_fops,
-};
diff --git a/drivers/staging/lustre/lustre/libcfs/linux/linux-prim.c b/drivers/staging/lustre/lustre/libcfs/linux/linux-prim.c
deleted file mode 100644
index 89084460231a..000000000000
--- a/drivers/staging/lustre/lustre/libcfs/linux/linux-prim.c
+++ /dev/null
@@ -1,147 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- */
-
-#define DEBUG_SUBSYSTEM S_LNET
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/fs_struct.h>
-#include <linux/sched.h>
-
-#include "../../../include/linux/libcfs/libcfs.h"
-
-#if defined(CONFIG_KGDB)
-#include <linux/kgdb.h>
-#endif
-
-/**
- * wait_queue_t of Linux (version < 2.6.34) is a FIFO list for exclusively
- * waiting threads, which is not always desirable because all threads will
- * be waken up again and again, even user only needs a few of them to be
- * active most time. This is not good for performance because cache can
- * be polluted by different threads.
- *
- * LIFO list can resolve this problem because we always wakeup the most
- * recent active thread by default.
- *
- * NB: please don't call non-exclusive & exclusive wait on the same
- * waitq if add_wait_queue_exclusive_head is used.
- */
-void
-add_wait_queue_exclusive_head(wait_queue_head_t *waitq, wait_queue_t *link)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&waitq->lock, flags);
- __add_wait_queue_exclusive(waitq, link);
- spin_unlock_irqrestore(&waitq->lock, flags);
-}
-EXPORT_SYMBOL(add_wait_queue_exclusive_head);
-
-sigset_t
-cfs_block_allsigs(void)
-{
- unsigned long flags;
- sigset_t old;
-
- spin_lock_irqsave(&current->sighand->siglock, flags);
- old = current->blocked;
- sigfillset(&current->blocked);
- recalc_sigpending();
- spin_unlock_irqrestore(&current->sighand->siglock, flags);
-
- return old;
-}
-EXPORT_SYMBOL(cfs_block_allsigs);
-
-sigset_t cfs_block_sigs(unsigned long sigs)
-{
- unsigned long flags;
- sigset_t old;
-
- spin_lock_irqsave(&current->sighand->siglock, flags);
- old = current->blocked;
- sigaddsetmask(&current->blocked, sigs);
- recalc_sigpending();
- spin_unlock_irqrestore(&current->sighand->siglock, flags);
- return old;
-}
-EXPORT_SYMBOL(cfs_block_sigs);
-
-/* Block all signals except for the @sigs */
-sigset_t cfs_block_sigsinv(unsigned long sigs)
-{
- unsigned long flags;
- sigset_t old;
-
- spin_lock_irqsave(&current->sighand->siglock, flags);
- old = current->blocked;
- sigaddsetmask(&current->blocked, ~sigs);
- recalc_sigpending();
- spin_unlock_irqrestore(&current->sighand->siglock, flags);
-
- return old;
-}
-EXPORT_SYMBOL(cfs_block_sigsinv);
-
-void
-cfs_restore_sigs(sigset_t old)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&current->sighand->siglock, flags);
- current->blocked = old;
- recalc_sigpending();
- spin_unlock_irqrestore(&current->sighand->siglock, flags);
-}
-EXPORT_SYMBOL(cfs_restore_sigs);
-
-int
-cfs_signal_pending(void)
-{
- return signal_pending(current);
-}
-EXPORT_SYMBOL(cfs_signal_pending);
-
-void
-cfs_clear_sigpending(void)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&current->sighand->siglock, flags);
- clear_tsk_thread_flag(current, TIF_SIGPENDING);
- spin_unlock_irqrestore(&current->sighand->siglock, flags);
-}
-EXPORT_SYMBOL(cfs_clear_sigpending);
diff --git a/drivers/staging/lustre/lustre/libcfs/linux/linux-tracefile.c b/drivers/staging/lustre/lustre/libcfs/linux/linux-tracefile.c
deleted file mode 100644
index 64a136cd503d..000000000000
--- a/drivers/staging/lustre/lustre/libcfs/linux/linux-tracefile.c
+++ /dev/null
@@ -1,272 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- */
-
-#define DEBUG_SUBSYSTEM S_LNET
-#define LUSTRE_TRACEFILE_PRIVATE
-
-#include "../../../include/linux/libcfs/libcfs.h"
-#include "../tracefile.h"
-
-/* percents to share the total debug memory for each type */
-static unsigned int pages_factor[CFS_TCD_TYPE_MAX] = {
- 80, /* 80% pages for CFS_TCD_TYPE_PROC */
- 10, /* 10% pages for CFS_TCD_TYPE_SOFTIRQ */
- 10 /* 10% pages for CFS_TCD_TYPE_IRQ */
-};
-
-char *cfs_trace_console_buffers[NR_CPUS][CFS_TCD_TYPE_MAX];
-
-static DECLARE_RWSEM(cfs_tracefile_sem);
-
-int cfs_tracefile_init_arch(void)
-{
- int i;
- int j;
- struct cfs_trace_cpu_data *tcd;
-
- /* initialize trace_data */
- memset(cfs_trace_data, 0, sizeof(cfs_trace_data));
- for (i = 0; i < CFS_TCD_TYPE_MAX; i++) {
- cfs_trace_data[i] =
- kmalloc(sizeof(union cfs_trace_data_union) *
- num_possible_cpus(), GFP_KERNEL);
- if (cfs_trace_data[i] == NULL)
- goto out;
-
- }
-
- /* arch related info initialized */
- cfs_tcd_for_each(tcd, i, j) {
- spin_lock_init(&tcd->tcd_lock);
- tcd->tcd_pages_factor = pages_factor[i];
- tcd->tcd_type = i;
- tcd->tcd_cpu = j;
- }
-
- for (i = 0; i < num_possible_cpus(); i++)
- for (j = 0; j < 3; j++) {
- cfs_trace_console_buffers[i][j] =
- kmalloc(CFS_TRACE_CONSOLE_BUFFER_SIZE,
- GFP_KERNEL);
-
- if (cfs_trace_console_buffers[i][j] == NULL)
- goto out;
- }
-
- return 0;
-
-out:
- cfs_tracefile_fini_arch();
- printk(KERN_ERR "lnet: Not enough memory\n");
- return -ENOMEM;
-}
-
-void cfs_tracefile_fini_arch(void)
-{
- int i;
- int j;
-
- for (i = 0; i < num_possible_cpus(); i++)
- for (j = 0; j < 3; j++) {
- kfree(cfs_trace_console_buffers[i][j]);
- cfs_trace_console_buffers[i][j] = NULL;
- }
-
- for (i = 0; cfs_trace_data[i] != NULL; i++) {
- kfree(cfs_trace_data[i]);
- cfs_trace_data[i] = NULL;
- }
-}
-
-void cfs_tracefile_read_lock(void)
-{
- down_read(&cfs_tracefile_sem);
-}
-
-void cfs_tracefile_read_unlock(void)
-{
- up_read(&cfs_tracefile_sem);
-}
-
-void cfs_tracefile_write_lock(void)
-{
- down_write(&cfs_tracefile_sem);
-}
-
-void cfs_tracefile_write_unlock(void)
-{
- up_write(&cfs_tracefile_sem);
-}
-
-cfs_trace_buf_type_t cfs_trace_buf_idx_get(void)
-{
- if (in_irq())
- return CFS_TCD_TYPE_IRQ;
- else if (in_softirq())
- return CFS_TCD_TYPE_SOFTIRQ;
- else
- return CFS_TCD_TYPE_PROC;
-}
-
-/*
- * The walking argument indicates the locking comes from all tcd types
- * iterator and we must lock it and dissable local irqs to avoid deadlocks
- * with other interrupt locks that might be happening. See LU-1311
- * for details.
- */
-int cfs_trace_lock_tcd(struct cfs_trace_cpu_data *tcd, int walking)
- __acquires(&tcd->tc_lock)
-{
- __LASSERT(tcd->tcd_type < CFS_TCD_TYPE_MAX);
- if (tcd->tcd_type == CFS_TCD_TYPE_IRQ)
- spin_lock_irqsave(&tcd->tcd_lock, tcd->tcd_lock_flags);
- else if (tcd->tcd_type == CFS_TCD_TYPE_SOFTIRQ)
- spin_lock_bh(&tcd->tcd_lock);
- else if (unlikely(walking))
- spin_lock_irq(&tcd->tcd_lock);
- else
- spin_lock(&tcd->tcd_lock);
- return 1;
-}
-
-void cfs_trace_unlock_tcd(struct cfs_trace_cpu_data *tcd, int walking)
- __releases(&tcd->tcd_lock)
-{
- __LASSERT(tcd->tcd_type < CFS_TCD_TYPE_MAX);
- if (tcd->tcd_type == CFS_TCD_TYPE_IRQ)
- spin_unlock_irqrestore(&tcd->tcd_lock, tcd->tcd_lock_flags);
- else if (tcd->tcd_type == CFS_TCD_TYPE_SOFTIRQ)
- spin_unlock_bh(&tcd->tcd_lock);
- else if (unlikely(walking))
- spin_unlock_irq(&tcd->tcd_lock);
- else
- spin_unlock(&tcd->tcd_lock);
-}
-
-int cfs_tcd_owns_tage(struct cfs_trace_cpu_data *tcd,
- struct cfs_trace_page *tage)
-{
- /*
- * XXX nikita: do NOT call portals_debug_msg() (CDEBUG/ENTRY/EXIT)
- * from here: this will lead to infinite recursion.
- */
- return tcd->tcd_cpu == tage->cpu;
-}
-
-void
-cfs_set_ptldebug_header(struct ptldebug_header *header,
- struct libcfs_debug_msg_data *msgdata,
- unsigned long stack)
-{
- struct timespec64 ts;
-
- ktime_get_real_ts64(&ts);
-
- header->ph_subsys = msgdata->msg_subsys;
- header->ph_mask = msgdata->msg_mask;
- header->ph_cpu_id = smp_processor_id();
- header->ph_type = cfs_trace_buf_idx_get();
- /* y2038 safe since all user space treats this as unsigned, but
- * will overflow in 2106 */
- header->ph_sec = (u32)ts.tv_sec;
- header->ph_usec = ts.tv_nsec / NSEC_PER_USEC;
- header->ph_stack = stack;
- header->ph_pid = current->pid;
- header->ph_line_num = msgdata->msg_line;
- header->ph_extern_pid = 0;
- return;
-}
-
-static char *
-dbghdr_to_err_string(struct ptldebug_header *hdr)
-{
- switch (hdr->ph_subsys) {
- case S_LND:
- case S_LNET:
- return "LNetError";
- default:
- return "LustreError";
- }
-}
-
-static char *
-dbghdr_to_info_string(struct ptldebug_header *hdr)
-{
- switch (hdr->ph_subsys) {
- case S_LND:
- case S_LNET:
- return "LNet";
- default:
- return "Lustre";
- }
-}
-
-void cfs_print_to_console(struct ptldebug_header *hdr, int mask,
- const char *buf, int len, const char *file,
- const char *fn)
-{
- char *prefix = "Lustre", *ptype = NULL;
-
- if ((mask & D_EMERG) != 0) {
- prefix = dbghdr_to_err_string(hdr);
- ptype = KERN_EMERG;
- } else if ((mask & D_ERROR) != 0) {
- prefix = dbghdr_to_err_string(hdr);
- ptype = KERN_ERR;
- } else if ((mask & D_WARNING) != 0) {
- prefix = dbghdr_to_info_string(hdr);
- ptype = KERN_WARNING;
- } else if ((mask & (D_CONSOLE | libcfs_printk)) != 0) {
- prefix = dbghdr_to_info_string(hdr);
- ptype = KERN_INFO;
- }
-
- if ((mask & D_CONSOLE) != 0) {
- printk("%s%s: %.*s", ptype, prefix, len, buf);
- } else {
- printk("%s%s: %d:%d:(%s:%d:%s()) %.*s", ptype, prefix,
- hdr->ph_pid, hdr->ph_extern_pid, file, hdr->ph_line_num,
- fn, len, buf);
- }
- return;
-}
-
-int cfs_trace_max_debug_mb(void)
-{
- int total_mb = (totalram_pages >> (20 - PAGE_SHIFT));
-
- return max(512, (total_mb * 80)/100);
-}
diff --git a/drivers/staging/lustre/lustre/libcfs/linux/linux-tracefile.h b/drivers/staging/lustre/lustre/libcfs/linux/linux-tracefile.h
deleted file mode 100644
index ba84e4ffddd1..000000000000
--- a/drivers/staging/lustre/lustre/libcfs/linux/linux-tracefile.h
+++ /dev/null
@@ -1,48 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- */
-
-#ifndef __LIBCFS_LINUX_TRACEFILE_H__
-#define __LIBCFS_LINUX_TRACEFILE_H__
-
-/**
- * three types of trace_data in linux
- */
-typedef enum {
- CFS_TCD_TYPE_PROC = 0,
- CFS_TCD_TYPE_SOFTIRQ,
- CFS_TCD_TYPE_IRQ,
- CFS_TCD_TYPE_MAX
-} cfs_trace_buf_type_t;
-
-#endif
diff --git a/drivers/staging/lustre/lustre/libcfs/module.c b/drivers/staging/lustre/lustre/libcfs/module.c
deleted file mode 100644
index 5e4262a2e6f3..000000000000
--- a/drivers/staging/lustre/lustre/libcfs/module.c
+++ /dev/null
@@ -1,784 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- */
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/mm.h>
-#include <linux/string.h>
-#include <linux/stat.h>
-#include <linux/errno.h>
-#include <linux/unistd.h>
-#include <net/sock.h>
-#include <linux/uio.h>
-
-#include <linux/uaccess.h>
-
-#include <linux/fs.h>
-#include <linux/file.h>
-#include <linux/list.h>
-
-#include <linux/sysctl.h>
-#include <linux/debugfs.h>
-
-# define DEBUG_SUBSYSTEM S_LNET
-
-#include "../../include/linux/libcfs/libcfs.h"
-#include <asm/div64.h>
-
-#include "../../include/linux/libcfs/libcfs_crypto.h"
-#include "../../include/linux/lnet/lib-lnet.h"
-#include "../../include/linux/lnet/lnet.h"
-#include "tracefile.h"
-
-MODULE_AUTHOR("Peter J. Braam <braam@clusterfs.com>");
-MODULE_DESCRIPTION("Portals v3.1");
-MODULE_LICENSE("GPL");
-
-static struct dentry *lnet_debugfs_root;
-
-static void kportal_memhog_free(struct libcfs_device_userstate *ldu)
-{
- struct page **level0p = &ldu->ldu_memhog_root_page;
- struct page **level1p;
- struct page **level2p;
- int count1;
- int count2;
-
- if (*level0p != NULL) {
-
- level1p = (struct page **)page_address(*level0p);
- count1 = 0;
-
- while (count1 < PAGE_CACHE_SIZE/sizeof(struct page *) &&
- *level1p != NULL) {
-
- level2p = (struct page **)page_address(*level1p);
- count2 = 0;
-
- while (count2 < PAGE_CACHE_SIZE/sizeof(struct page *) &&
- *level2p != NULL) {
-
- __free_page(*level2p);
- ldu->ldu_memhog_pages--;
- level2p++;
- count2++;
- }
-
- __free_page(*level1p);
- ldu->ldu_memhog_pages--;
- level1p++;
- count1++;
- }
-
- __free_page(*level0p);
- ldu->ldu_memhog_pages--;
-
- *level0p = NULL;
- }
-
- LASSERT(ldu->ldu_memhog_pages == 0);
-}
-
-static int kportal_memhog_alloc(struct libcfs_device_userstate *ldu, int npages,
- gfp_t flags)
-{
- struct page **level0p;
- struct page **level1p;
- struct page **level2p;
- int count1;
- int count2;
-
- LASSERT(ldu->ldu_memhog_pages == 0);
- LASSERT(ldu->ldu_memhog_root_page == NULL);
-
- if (npages < 0)
- return -EINVAL;
-
- if (npages == 0)
- return 0;
-
- level0p = &ldu->ldu_memhog_root_page;
- *level0p = alloc_page(flags);
- if (*level0p == NULL)
- return -ENOMEM;
- ldu->ldu_memhog_pages++;
-
- level1p = (struct page **)page_address(*level0p);
- count1 = 0;
- memset(level1p, 0, PAGE_CACHE_SIZE);
-
- while (ldu->ldu_memhog_pages < npages &&
- count1 < PAGE_CACHE_SIZE/sizeof(struct page *)) {
-
- if (cfs_signal_pending())
- return -EINTR;
-
- *level1p = alloc_page(flags);
- if (*level1p == NULL)
- return -ENOMEM;
- ldu->ldu_memhog_pages++;
-
- level2p = (struct page **)page_address(*level1p);
- count2 = 0;
- memset(level2p, 0, PAGE_CACHE_SIZE);
-
- while (ldu->ldu_memhog_pages < npages &&
- count2 < PAGE_CACHE_SIZE/sizeof(struct page *)) {
-
- if (cfs_signal_pending())
- return -EINTR;
-
- *level2p = alloc_page(flags);
- if (*level2p == NULL)
- return -ENOMEM;
- ldu->ldu_memhog_pages++;
-
- level2p++;
- count2++;
- }
-
- level1p++;
- count1++;
- }
-
- return 0;
-}
-
-/* called when opening /dev/device */
-static int libcfs_psdev_open(unsigned long flags, void *args)
-{
- struct libcfs_device_userstate *ldu;
-
- try_module_get(THIS_MODULE);
-
- LIBCFS_ALLOC(ldu, sizeof(*ldu));
- if (ldu != NULL) {
- ldu->ldu_memhog_pages = 0;
- ldu->ldu_memhog_root_page = NULL;
- }
- *(struct libcfs_device_userstate **)args = ldu;
-
- return 0;
-}
-
-/* called when closing /dev/device */
-static int libcfs_psdev_release(unsigned long flags, void *args)
-{
- struct libcfs_device_userstate *ldu;
-
- ldu = (struct libcfs_device_userstate *)args;
- if (ldu != NULL) {
- kportal_memhog_free(ldu);
- LIBCFS_FREE(ldu, sizeof(*ldu));
- }
-
- module_put(THIS_MODULE);
- return 0;
-}
-
-static DECLARE_RWSEM(ioctl_list_sem);
-static LIST_HEAD(ioctl_list);
-
-int libcfs_register_ioctl(struct libcfs_ioctl_handler *hand)
-{
- int rc = 0;
-
- down_write(&ioctl_list_sem);
- if (!list_empty(&hand->item))
- rc = -EBUSY;
- else
- list_add_tail(&hand->item, &ioctl_list);
- up_write(&ioctl_list_sem);
-
- return rc;
-}
-EXPORT_SYMBOL(libcfs_register_ioctl);
-
-int libcfs_deregister_ioctl(struct libcfs_ioctl_handler *hand)
-{
- int rc = 0;
-
- down_write(&ioctl_list_sem);
- if (list_empty(&hand->item))
- rc = -ENOENT;
- else
- list_del_init(&hand->item);
- up_write(&ioctl_list_sem);
-
- return rc;
-}
-EXPORT_SYMBOL(libcfs_deregister_ioctl);
-
-static int libcfs_ioctl_int(struct cfs_psdev_file *pfile, unsigned long cmd,
- void *arg, struct libcfs_ioctl_data *data)
-{
- int err = -EINVAL;
-
- switch (cmd) {
- case IOC_LIBCFS_CLEAR_DEBUG:
- libcfs_debug_clear_buffer();
- return 0;
- /*
- * case IOC_LIBCFS_PANIC:
- * Handled in arch/cfs_module.c
- */
- case IOC_LIBCFS_MARK_DEBUG:
- if (data->ioc_inlbuf1 == NULL ||
- data->ioc_inlbuf1[data->ioc_inllen1 - 1] != '\0')
- return -EINVAL;
- libcfs_debug_mark_buffer(data->ioc_inlbuf1);
- return 0;
- case IOC_LIBCFS_MEMHOG:
- if (pfile->private_data == NULL) {
- err = -EINVAL;
- } else {
- kportal_memhog_free(pfile->private_data);
- /* XXX The ioc_flags is not GFP flags now, need to be fixed */
- err = kportal_memhog_alloc(pfile->private_data,
- data->ioc_count,
- data->ioc_flags);
- if (err != 0)
- kportal_memhog_free(pfile->private_data);
- }
- break;
-
- case IOC_LIBCFS_PING_TEST: {
- extern void (kping_client)(struct libcfs_ioctl_data *);
- void (*ping)(struct libcfs_ioctl_data *);
-
- CDEBUG(D_IOCTL, "doing %d pings to nid %s (%s)\n",
- data->ioc_count, libcfs_nid2str(data->ioc_nid),
- libcfs_nid2str(data->ioc_nid));
- ping = symbol_get(kping_client);
- if (!ping)
- CERROR("symbol_get failed\n");
- else {
- ping(data);
- symbol_put(kping_client);
- }
- return 0;
- }
-
- default: {
- struct libcfs_ioctl_handler *hand;
- err = -EINVAL;
- down_read(&ioctl_list_sem);
- list_for_each_entry(hand, &ioctl_list, item) {
- err = hand->handle_ioctl(cmd, data);
- if (err != -EINVAL) {
- if (err == 0)
- err = libcfs_ioctl_popdata(arg,
- data, sizeof(*data));
- break;
- }
- }
- up_read(&ioctl_list_sem);
- break;
- }
- }
-
- return err;
-}
-
-static int libcfs_ioctl(struct cfs_psdev_file *pfile, unsigned long cmd, void *arg)
-{
- char *buf;
- struct libcfs_ioctl_data *data;
- int err = 0;
-
- LIBCFS_ALLOC_GFP(buf, 1024, GFP_IOFS);
- if (buf == NULL)
- return -ENOMEM;
-
- /* 'cmd' and permissions get checked in our arch-specific caller */
- if (libcfs_ioctl_getdata(buf, buf + 800, arg)) {
- CERROR("PORTALS ioctl: data error\n");
- err = -EINVAL;
- goto out;
- }
- data = (struct libcfs_ioctl_data *)buf;
-
- err = libcfs_ioctl_int(pfile, cmd, arg, data);
-
-out:
- LIBCFS_FREE(buf, 1024);
- return err;
-}
-
-
-struct cfs_psdev_ops libcfs_psdev_ops = {
- libcfs_psdev_open,
- libcfs_psdev_release,
- NULL,
- NULL,
- libcfs_ioctl
-};
-
-static int proc_call_handler(void *data, int write, loff_t *ppos,
- void __user *buffer, size_t *lenp,
- int (*handler)(void *data, int write,
- loff_t pos, void __user *buffer, int len))
-{
- int rc = handler(data, write, *ppos, buffer, *lenp);
-
- if (rc < 0)
- return rc;
-
- if (write) {
- *ppos += *lenp;
- } else {
- *lenp = rc;
- *ppos += rc;
- }
- return 0;
-}
-
-static int __proc_dobitmasks(void *data, int write,
- loff_t pos, void __user *buffer, int nob)
-{
- const int tmpstrlen = 512;
- char *tmpstr;
- int rc;
- unsigned int *mask = data;
- int is_subsys = (mask == &libcfs_subsystem_debug) ? 1 : 0;
- int is_printk = (mask == &libcfs_printk) ? 1 : 0;
-
- rc = cfs_trace_allocate_string_buffer(&tmpstr, tmpstrlen);
- if (rc < 0)
- return rc;
-
- if (!write) {
- libcfs_debug_mask2str(tmpstr, tmpstrlen, *mask, is_subsys);
- rc = strlen(tmpstr);
-
- if (pos >= rc) {
- rc = 0;
- } else {
- rc = cfs_trace_copyout_string(buffer, nob,
- tmpstr + pos, "\n");
- }
- } else {
- rc = cfs_trace_copyin_string(tmpstr, tmpstrlen, buffer, nob);
- if (rc < 0) {
- cfs_trace_free_string_buffer(tmpstr, tmpstrlen);
- return rc;
- }
-
- rc = libcfs_debug_str2mask(mask, tmpstr, is_subsys);
- /* Always print LBUG/LASSERT to console, so keep this mask */
- if (is_printk)
- *mask |= D_EMERG;
- }
-
- cfs_trace_free_string_buffer(tmpstr, tmpstrlen);
- return rc;
-}
-
-static int proc_dobitmasks(struct ctl_table *table, int write,
- void __user *buffer, size_t *lenp, loff_t *ppos)
-{
- return proc_call_handler(table->data, write, ppos, buffer, lenp,
- __proc_dobitmasks);
-}
-
-static int __proc_dump_kernel(void *data, int write,
- loff_t pos, void __user *buffer, int nob)
-{
- if (!write)
- return 0;
-
- return cfs_trace_dump_debug_buffer_usrstr(buffer, nob);
-}
-
-static int proc_dump_kernel(struct ctl_table *table, int write,
- void __user *buffer, size_t *lenp, loff_t *ppos)
-{
- return proc_call_handler(table->data, write, ppos, buffer, lenp,
- __proc_dump_kernel);
-}
-
-static int __proc_daemon_file(void *data, int write,
- loff_t pos, void __user *buffer, int nob)
-{
- if (!write) {
- int len = strlen(cfs_tracefile);
-
- if (pos >= len)
- return 0;
-
- return cfs_trace_copyout_string(buffer, nob,
- cfs_tracefile + pos, "\n");
- }
-
- return cfs_trace_daemon_command_usrstr(buffer, nob);
-}
-
-static int proc_daemon_file(struct ctl_table *table, int write,
- void __user *buffer, size_t *lenp, loff_t *ppos)
-{
- return proc_call_handler(table->data, write, ppos, buffer, lenp,
- __proc_daemon_file);
-}
-
-static int libcfs_force_lbug(struct ctl_table *table, int write,
- void __user *buffer,
- size_t *lenp, loff_t *ppos)
-{
- if (write)
- LBUG();
- return 0;
-}
-
-static int proc_fail_loc(struct ctl_table *table, int write,
- void __user *buffer,
- size_t *lenp, loff_t *ppos)
-{
- int rc;
- long old_fail_loc = cfs_fail_loc;
-
- rc = proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
- if (old_fail_loc != cfs_fail_loc)
- wake_up(&cfs_race_waitq);
- return rc;
-}
-
-static int __proc_cpt_table(void *data, int write,
- loff_t pos, void __user *buffer, int nob)
-{
- char *buf = NULL;
- int len = 4096;
- int rc = 0;
-
- if (write)
- return -EPERM;
-
- LASSERT(cfs_cpt_table != NULL);
-
- while (1) {
- LIBCFS_ALLOC(buf, len);
- if (buf == NULL)
- return -ENOMEM;
-
- rc = cfs_cpt_table_print(cfs_cpt_table, buf, len);
- if (rc >= 0)
- break;
-
- if (rc == -EFBIG) {
- LIBCFS_FREE(buf, len);
- len <<= 1;
- continue;
- }
- goto out;
- }
-
- if (pos >= rc) {
- rc = 0;
- goto out;
- }
-
- rc = cfs_trace_copyout_string(buffer, nob, buf + pos, NULL);
- out:
- if (buf != NULL)
- LIBCFS_FREE(buf, len);
- return rc;
-}
-
-static int proc_cpt_table(struct ctl_table *table, int write,
- void __user *buffer, size_t *lenp, loff_t *ppos)
-{
- return proc_call_handler(table->data, write, ppos, buffer, lenp,
- __proc_cpt_table);
-}
-
-static struct ctl_table lnet_table[] = {
- /*
- * NB No .strategy entries have been provided since sysctl(8) prefers
- * to go via /proc for portability.
- */
- {
- .procname = "debug",
- .data = &libcfs_debug,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = &proc_dobitmasks,
- },
- {
- .procname = "subsystem_debug",
- .data = &libcfs_subsystem_debug,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = &proc_dobitmasks,
- },
- {
- .procname = "printk",
- .data = &libcfs_printk,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = &proc_dobitmasks,
- },
- {
- .procname = "cpu_partition_table",
- .maxlen = 128,
- .mode = 0444,
- .proc_handler = &proc_cpt_table,
- },
-
- {
- .procname = "upcall",
- .data = lnet_upcall,
- .maxlen = sizeof(lnet_upcall),
- .mode = 0644,
- .proc_handler = &proc_dostring,
- },
- {
- .procname = "debug_log_upcall",
- .data = lnet_debug_log_upcall,
- .maxlen = sizeof(lnet_debug_log_upcall),
- .mode = 0644,
- .proc_handler = &proc_dostring,
- },
- {
- .procname = "catastrophe",
- .data = &libcfs_catastrophe,
- .maxlen = sizeof(int),
- .mode = 0444,
- .proc_handler = &proc_dointvec,
- },
- {
- .procname = "dump_kernel",
- .maxlen = 256,
- .mode = 0200,
- .proc_handler = &proc_dump_kernel,
- },
- {
- .procname = "daemon_file",
- .mode = 0644,
- .maxlen = 256,
- .proc_handler = &proc_daemon_file,
- },
- {
- .procname = "force_lbug",
- .data = NULL,
- .maxlen = 0,
- .mode = 0200,
- .proc_handler = &libcfs_force_lbug
- },
- {
- .procname = "fail_loc",
- .data = &cfs_fail_loc,
- .maxlen = sizeof(cfs_fail_loc),
- .mode = 0644,
- .proc_handler = &proc_fail_loc
- },
- {
- .procname = "fail_val",
- .data = &cfs_fail_val,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = &proc_dointvec
- },
- {
- }
-};
-
-static const struct lnet_debugfs_symlink_def lnet_debugfs_symlinks[] = {
- { "console_ratelimit",
- "/sys/module/libcfs/parameters/libcfs_console_ratelimit"},
- { "debug_path",
- "/sys/module/libcfs/parameters/libcfs_debug_file_path"},
- { "panic_on_lbug",
- "/sys/module/libcfs/parameters/libcfs_panic_on_lbug"},
- { "libcfs_console_backoff",
- "/sys/module/libcfs/parameters/libcfs_console_backoff"},
- { "debug_mb",
- "/sys/module/libcfs/parameters/libcfs_debug_mb"},
- { "console_min_delay_centisecs",
- "/sys/module/libcfs/parameters/libcfs_console_min_delay"},
- { "console_max_delay_centisecs",
- "/sys/module/libcfs/parameters/libcfs_console_max_delay"},
- {},
-};
-
-static ssize_t lnet_debugfs_read(struct file *filp, char __user *buf,
- size_t count, loff_t *ppos)
-{
- struct ctl_table *table = filp->private_data;
- int error;
-
- error = table->proc_handler(table, 0, (void __user *)buf, &count, ppos);
- if (!error)
- error = count;
-
- return error;
-}
-
-static ssize_t lnet_debugfs_write(struct file *filp, const char __user *buf,
- size_t count, loff_t *ppos)
-{
- struct ctl_table *table = filp->private_data;
- int error;
-
- error = table->proc_handler(table, 1, (void __user *)buf, &count, ppos);
- if (!error)
- error = count;
-
- return error;
-}
-
-static const struct file_operations lnet_debugfs_file_operations = {
- .open = simple_open,
- .read = lnet_debugfs_read,
- .write = lnet_debugfs_write,
- .llseek = default_llseek,
-};
-
-void lustre_insert_debugfs(struct ctl_table *table,
- const struct lnet_debugfs_symlink_def *symlinks)
-{
- struct dentry *entry;
-
- if (lnet_debugfs_root == NULL)
- lnet_debugfs_root = debugfs_create_dir("lnet", NULL);
-
- /* Even if we cannot create, just ignore it altogether) */
- if (IS_ERR_OR_NULL(lnet_debugfs_root))
- return;
-
- for (; table->procname; table++)
- entry = debugfs_create_file(table->procname, table->mode,
- lnet_debugfs_root, table,
- &lnet_debugfs_file_operations);
-
- for (; symlinks && symlinks->name; symlinks++)
- entry = debugfs_create_symlink(symlinks->name,
- lnet_debugfs_root,
- symlinks->target);
-
-}
-EXPORT_SYMBOL_GPL(lustre_insert_debugfs);
-
-static void lustre_remove_debugfs(void)
-{
- if (lnet_debugfs_root != NULL)
- debugfs_remove_recursive(lnet_debugfs_root);
-
- lnet_debugfs_root = NULL;
-}
-
-static int init_libcfs_module(void)
-{
- int rc;
-
- libcfs_init_nidstrings();
-
- rc = libcfs_debug_init(5 * 1024 * 1024);
- if (rc < 0) {
- pr_err("LustreError: libcfs_debug_init: %d\n", rc);
- return rc;
- }
-
- rc = cfs_cpu_init();
- if (rc != 0)
- goto cleanup_debug;
-
- rc = misc_register(&libcfs_dev);
- if (rc) {
- CERROR("misc_register: error %d\n", rc);
- goto cleanup_cpu;
- }
-
- rc = cfs_wi_startup();
- if (rc) {
- CERROR("initialize workitem: error %d\n", rc);
- goto cleanup_deregister;
- }
-
- /* max to 4 threads, should be enough for rehash */
- rc = min(cfs_cpt_weight(cfs_cpt_table, CFS_CPT_ANY), 4);
- rc = cfs_wi_sched_create("cfs_rh", cfs_cpt_table, CFS_CPT_ANY,
- rc, &cfs_sched_rehash);
- if (rc != 0) {
- CERROR("Startup workitem scheduler: error: %d\n", rc);
- goto cleanup_deregister;
- }
-
- rc = cfs_crypto_register();
- if (rc) {
- CERROR("cfs_crypto_register: error %d\n", rc);
- goto cleanup_wi;
- }
-
- lustre_insert_debugfs(lnet_table, lnet_debugfs_symlinks);
-
- CDEBUG(D_OTHER, "portals setup OK\n");
- return 0;
- cleanup_wi:
- cfs_wi_shutdown();
- cleanup_deregister:
- misc_deregister(&libcfs_dev);
-cleanup_cpu:
- cfs_cpu_fini();
- cleanup_debug:
- libcfs_debug_cleanup();
- return rc;
-}
-
-static void exit_libcfs_module(void)
-{
- int rc;
-
- lustre_remove_debugfs();
-
- if (cfs_sched_rehash) {
- cfs_wi_sched_destroy(cfs_sched_rehash);
- cfs_sched_rehash = NULL;
- }
-
- cfs_crypto_unregister();
- cfs_wi_shutdown();
-
- misc_deregister(&libcfs_dev);
-
- cfs_cpu_fini();
-
- rc = libcfs_debug_cleanup();
- if (rc)
- pr_err("LustreError: libcfs_debug_cleanup: %d\n", rc);
-}
-
-MODULE_VERSION("1.0.0");
-
-module_init(init_libcfs_module);
-module_exit(exit_libcfs_module);
diff --git a/drivers/staging/lustre/lustre/libcfs/nidstrings.c b/drivers/staging/lustre/lustre/libcfs/nidstrings.c
deleted file mode 100644
index 087449f4e6c1..000000000000
--- a/drivers/staging/lustre/lustre/libcfs/nidstrings.c
+++ /dev/null
@@ -1,842 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * libcfs/libcfs/nidstrings.c
- *
- * Author: Phil Schwan <phil@clusterfs.com>
- */
-
-#define DEBUG_SUBSYSTEM S_LNET
-
-#include "../../include/linux/libcfs/libcfs.h"
-#include "../../include/linux/lnet/lnet.h"
-
-/* CAVEAT VENDITOR! Keep the canonical string representation of nets/nids
- * consistent in all conversion functions. Some code fragments are copied
- * around for the sake of clarity...
- */
-
-/* CAVEAT EMPTOR! Racey temporary buffer allocation!
- * Choose the number of nidstrings to support the MAXIMUM expected number of
- * concurrent users. If there are more, the returned string will be volatile.
- * NB this number must allow for a process to be descheduled for a timeslice
- * between getting its string and using it.
- */
-
-static char libcfs_nidstrings[LNET_NIDSTR_COUNT][LNET_NIDSTR_SIZE];
-static int libcfs_nidstring_idx;
-
-static spinlock_t libcfs_nidstring_lock;
-
-void libcfs_init_nidstrings(void)
-{
- spin_lock_init(&libcfs_nidstring_lock);
-}
-
-static char *
-libcfs_next_nidstring(void)
-{
- char *str;
- unsigned long flags;
-
- spin_lock_irqsave(&libcfs_nidstring_lock, flags);
-
- str = libcfs_nidstrings[libcfs_nidstring_idx++];
- if (libcfs_nidstring_idx == ARRAY_SIZE(libcfs_nidstrings))
- libcfs_nidstring_idx = 0;
-
- spin_unlock_irqrestore(&libcfs_nidstring_lock, flags);
- return str;
-}
-
-static int libcfs_lo_str2addr(const char *str, int nob, __u32 *addr)
-{
- *addr = 0;
- return 1;
-}
-
-static void libcfs_ip_addr2str(__u32 addr, char *str)
-{
- snprintf(str, LNET_NIDSTR_SIZE, "%u.%u.%u.%u",
- (addr >> 24) & 0xff, (addr >> 16) & 0xff,
- (addr >> 8) & 0xff, addr & 0xff);
-}
-
-static int libcfs_ip_str2addr(const char *str, int nob, __u32 *addr)
-{
- unsigned int a;
- unsigned int b;
- unsigned int c;
- unsigned int d;
- int n = nob; /* XscanfX */
-
- /* numeric IP? */
- if (sscanf(str, "%u.%u.%u.%u%n", &a, &b, &c, &d, &n) >= 4 &&
- n == nob &&
- (a & ~0xff) == 0 && (b & ~0xff) == 0 &&
- (c & ~0xff) == 0 && (d & ~0xff) == 0) {
- *addr = ((a<<24)|(b<<16)|(c<<8)|d);
- return 1;
- }
-
- return 0;
-}
-
-static void libcfs_decnum_addr2str(__u32 addr, char *str)
-{
- snprintf(str, LNET_NIDSTR_SIZE, "%u", addr);
-}
-
-static void libcfs_hexnum_addr2str(__u32 addr, char *str)
-{
- snprintf(str, LNET_NIDSTR_SIZE, "0x%x", addr);
-}
-
-static int libcfs_num_str2addr(const char *str, int nob, __u32 *addr)
-{
- int n;
-
- n = nob;
- if (sscanf(str, "0x%x%n", addr, &n) >= 1 && n == nob)
- return 1;
-
- n = nob;
- if (sscanf(str, "0X%x%n", addr, &n) >= 1 && n == nob)
- return 1;
-
- n = nob;
- if (sscanf(str, "%u%n", addr, &n) >= 1 && n == nob)
- return 1;
-
- return 0;
-}
-
-/**
- * Nf_parse_addrlist method for networks using numeric addresses.
- *
- * Examples of such networks are gm and elan.
- *
- * \retval 0 if \a str parsed to numeric address
- * \retval errno otherwise
- */
-static int
-libcfs_num_parse(char *str, int len, struct list_head *list)
-{
- struct cfs_expr_list *el;
- int rc;
-
- rc = cfs_expr_list_parse(str, len, 0, MAX_NUMERIC_VALUE, &el);
- if (rc == 0)
- list_add_tail(&el->el_link, list);
-
- return rc;
-}
-
-/*
- * Nf_match_addr method for networks using numeric addresses
- *
- * \retval 1 on match
- * \retval 0 otherwise
- */
-static int
-libcfs_num_match(__u32 addr, struct list_head *numaddr)
-{
- struct cfs_expr_list *el;
-
- LASSERT(!list_empty(numaddr));
- el = list_entry(numaddr->next, struct cfs_expr_list, el_link);
-
- return cfs_expr_list_match(addr, el);
-}
-
-struct netstrfns {
- int nf_type;
- char *nf_name;
- char *nf_modname;
- void (*nf_addr2str)(__u32 addr, char *str);
- int (*nf_str2addr)(const char *str, int nob, __u32 *addr);
- int (*nf_parse_addrlist)(char *str, int len,
- struct list_head *list);
- int (*nf_match_addr)(__u32 addr, struct list_head *list);
-};
-
-static struct netstrfns libcfs_netstrfns[] = {
- {/* .nf_type */ LOLND,
- /* .nf_name */ "lo",
- /* .nf_modname */ "klolnd",
- /* .nf_addr2str */ libcfs_decnum_addr2str,
- /* .nf_str2addr */ libcfs_lo_str2addr,
- /* .nf_parse_addr*/ libcfs_num_parse,
- /* .nf_match_addr*/ libcfs_num_match},
- {/* .nf_type */ SOCKLND,
- /* .nf_name */ "tcp",
- /* .nf_modname */ "ksocklnd",
- /* .nf_addr2str */ libcfs_ip_addr2str,
- /* .nf_str2addr */ libcfs_ip_str2addr,
- /* .nf_parse_addrlist*/ cfs_ip_addr_parse,
- /* .nf_match_addr*/ cfs_ip_addr_match},
- {/* .nf_type */ O2IBLND,
- /* .nf_name */ "o2ib",
- /* .nf_modname */ "ko2iblnd",
- /* .nf_addr2str */ libcfs_ip_addr2str,
- /* .nf_str2addr */ libcfs_ip_str2addr,
- /* .nf_parse_addrlist*/ cfs_ip_addr_parse,
- /* .nf_match_addr*/ cfs_ip_addr_match},
- {/* .nf_type */ CIBLND,
- /* .nf_name */ "cib",
- /* .nf_modname */ "kciblnd",
- /* .nf_addr2str */ libcfs_ip_addr2str,
- /* .nf_str2addr */ libcfs_ip_str2addr,
- /* .nf_parse_addrlist*/ cfs_ip_addr_parse,
- /* .nf_match_addr*/ cfs_ip_addr_match},
- {/* .nf_type */ OPENIBLND,
- /* .nf_name */ "openib",
- /* .nf_modname */ "kopeniblnd",
- /* .nf_addr2str */ libcfs_ip_addr2str,
- /* .nf_str2addr */ libcfs_ip_str2addr,
- /* .nf_parse_addrlist*/ cfs_ip_addr_parse,
- /* .nf_match_addr*/ cfs_ip_addr_match},
- {/* .nf_type */ IIBLND,
- /* .nf_name */ "iib",
- /* .nf_modname */ "kiiblnd",
- /* .nf_addr2str */ libcfs_ip_addr2str,
- /* .nf_str2addr */ libcfs_ip_str2addr,
- /* .nf_parse_addrlist*/ cfs_ip_addr_parse,
- /* .nf_match_addr*/ cfs_ip_addr_match},
- {/* .nf_type */ VIBLND,
- /* .nf_name */ "vib",
- /* .nf_modname */ "kviblnd",
- /* .nf_addr2str */ libcfs_ip_addr2str,
- /* .nf_str2addr */ libcfs_ip_str2addr,
- /* .nf_parse_addrlist*/ cfs_ip_addr_parse,
- /* .nf_match_addr*/ cfs_ip_addr_match},
- {/* .nf_type */ RALND,
- /* .nf_name */ "ra",
- /* .nf_modname */ "kralnd",
- /* .nf_addr2str */ libcfs_ip_addr2str,
- /* .nf_str2addr */ libcfs_ip_str2addr,
- /* .nf_parse_addrlist*/ cfs_ip_addr_parse,
- /* .nf_match_addr*/ cfs_ip_addr_match},
- {/* .nf_type */ QSWLND,
- /* .nf_name */ "elan",
- /* .nf_modname */ "kqswlnd",
- /* .nf_addr2str */ libcfs_decnum_addr2str,
- /* .nf_str2addr */ libcfs_num_str2addr,
- /* .nf_parse_addrlist*/ libcfs_num_parse,
- /* .nf_match_addr*/ libcfs_num_match},
- {/* .nf_type */ GMLND,
- /* .nf_name */ "gm",
- /* .nf_modname */ "kgmlnd",
- /* .nf_addr2str */ libcfs_hexnum_addr2str,
- /* .nf_str2addr */ libcfs_num_str2addr,
- /* .nf_parse_addrlist*/ libcfs_num_parse,
- /* .nf_match_addr*/ libcfs_num_match},
- {/* .nf_type */ MXLND,
- /* .nf_name */ "mx",
- /* .nf_modname */ "kmxlnd",
- /* .nf_addr2str */ libcfs_ip_addr2str,
- /* .nf_str2addr */ libcfs_ip_str2addr,
- /* .nf_parse_addrlist*/ cfs_ip_addr_parse,
- /* .nf_match_addr*/ cfs_ip_addr_match},
- {/* .nf_type */ PTLLND,
- /* .nf_name */ "ptl",
- /* .nf_modname */ "kptllnd",
- /* .nf_addr2str */ libcfs_decnum_addr2str,
- /* .nf_str2addr */ libcfs_num_str2addr,
- /* .nf_parse_addrlist*/ libcfs_num_parse,
- /* .nf_match_addr*/ libcfs_num_match},
- {/* .nf_type */ GNILND,
- /* .nf_name */ "gni",
- /* .nf_modname */ "kgnilnd",
- /* .nf_addr2str */ libcfs_decnum_addr2str,
- /* .nf_str2addr */ libcfs_num_str2addr,
- /* .nf_parse_addrlist*/ libcfs_num_parse,
- /* .nf_match_addr*/ libcfs_num_match},
- /* placeholder for net0 alias. It MUST BE THE LAST ENTRY */
- {/* .nf_type */ -1},
-};
-
-static const int libcfs_nnetstrfns = ARRAY_SIZE(libcfs_netstrfns);
-
-/* CAVEAT EMPTOR XscanfX
- * I use "%n" at the end of a sscanf format to detect trailing junk. However
- * sscanf may return immediately if it sees the terminating '0' in a string, so
- * I initialise the %n variable to the expected length. If sscanf sets it;
- * fine, if it doesn't, then the scan ended at the end of the string, which is
- * fine too :) */
-
-static struct netstrfns *
-libcfs_lnd2netstrfns(int lnd)
-{
- int i;
-
- if (lnd >= 0)
- for (i = 0; i < libcfs_nnetstrfns; i++)
- if (lnd == libcfs_netstrfns[i].nf_type)
- return &libcfs_netstrfns[i];
-
- return NULL;
-}
-
-static struct netstrfns *
-libcfs_namenum2netstrfns(const char *name)
-{
- struct netstrfns *nf;
- int i;
-
- for (i = 0; i < libcfs_nnetstrfns; i++) {
- nf = &libcfs_netstrfns[i];
- if (nf->nf_type >= 0 &&
- !strncmp(name, nf->nf_name, strlen(nf->nf_name)))
- return nf;
- }
- return NULL;
-}
-
-static struct netstrfns *
-libcfs_name2netstrfns(const char *name)
-{
- int i;
-
- for (i = 0; i < libcfs_nnetstrfns; i++)
- if (libcfs_netstrfns[i].nf_type >= 0 &&
- !strcmp(libcfs_netstrfns[i].nf_name, name))
- return &libcfs_netstrfns[i];
-
- return NULL;
-}
-
-int
-libcfs_isknown_lnd(int type)
-{
- return libcfs_lnd2netstrfns(type) != NULL;
-}
-EXPORT_SYMBOL(libcfs_isknown_lnd);
-
-char *
-libcfs_lnd2modname(int lnd)
-{
- struct netstrfns *nf = libcfs_lnd2netstrfns(lnd);
-
- return (nf == NULL) ? NULL : nf->nf_modname;
-}
-EXPORT_SYMBOL(libcfs_lnd2modname);
-
-char *
-libcfs_lnd2str(int lnd)
-{
- char *str;
- struct netstrfns *nf = libcfs_lnd2netstrfns(lnd);
-
- if (nf != NULL)
- return nf->nf_name;
-
- str = libcfs_next_nidstring();
- snprintf(str, LNET_NIDSTR_SIZE, "?%d?", lnd);
- return str;
-}
-EXPORT_SYMBOL(libcfs_lnd2str);
-
-int
-libcfs_str2lnd(const char *str)
-{
- struct netstrfns *nf = libcfs_name2netstrfns(str);
-
- if (nf != NULL)
- return nf->nf_type;
-
- return -1;
-}
-EXPORT_SYMBOL(libcfs_str2lnd);
-
-char *
-libcfs_net2str(__u32 net)
-{
- int lnd = LNET_NETTYP(net);
- int num = LNET_NETNUM(net);
- struct netstrfns *nf = libcfs_lnd2netstrfns(lnd);
- char *str = libcfs_next_nidstring();
-
- if (nf == NULL)
- snprintf(str, LNET_NIDSTR_SIZE, "<%d:%d>", lnd, num);
- else if (num == 0)
- snprintf(str, LNET_NIDSTR_SIZE, "%s", nf->nf_name);
- else
- snprintf(str, LNET_NIDSTR_SIZE, "%s%d", nf->nf_name, num);
-
- return str;
-}
-EXPORT_SYMBOL(libcfs_net2str);
-
-char *
-libcfs_nid2str(lnet_nid_t nid)
-{
- __u32 addr = LNET_NIDADDR(nid);
- __u32 net = LNET_NIDNET(nid);
- int lnd = LNET_NETTYP(net);
- int nnum = LNET_NETNUM(net);
- struct netstrfns *nf;
- char *str;
- int nob;
-
- if (nid == LNET_NID_ANY)
- return "<?>";
-
- nf = libcfs_lnd2netstrfns(lnd);
- str = libcfs_next_nidstring();
-
- if (nf == NULL)
- snprintf(str, LNET_NIDSTR_SIZE, "%x@<%d:%d>", addr, lnd, nnum);
- else {
- nf->nf_addr2str(addr, str);
- nob = strlen(str);
- if (nnum == 0)
- snprintf(str + nob, LNET_NIDSTR_SIZE - nob, "@%s",
- nf->nf_name);
- else
- snprintf(str + nob, LNET_NIDSTR_SIZE - nob, "@%s%d",
- nf->nf_name, nnum);
- }
-
- return str;
-}
-EXPORT_SYMBOL(libcfs_nid2str);
-
-static struct netstrfns *
-libcfs_str2net_internal(const char *str, __u32 *net)
-{
- struct netstrfns *uninitialized_var(nf);
- int nob;
- unsigned int netnum;
- int i;
-
- for (i = 0; i < libcfs_nnetstrfns; i++) {
- nf = &libcfs_netstrfns[i];
- if (nf->nf_type >= 0 &&
- !strncmp(str, nf->nf_name, strlen(nf->nf_name)))
- break;
- }
-
- if (i == libcfs_nnetstrfns)
- return NULL;
-
- nob = strlen(nf->nf_name);
-
- if (strlen(str) == (unsigned int)nob) {
- netnum = 0;
- } else {
- if (nf->nf_type == LOLND) /* net number not allowed */
- return NULL;
-
- str += nob;
- i = strlen(str);
- if (sscanf(str, "%u%n", &netnum, &i) < 1 ||
- i != (int)strlen(str))
- return NULL;
- }
-
- *net = LNET_MKNET(nf->nf_type, netnum);
- return nf;
-}
-
-__u32
-libcfs_str2net(const char *str)
-{
- __u32 net;
-
- if (libcfs_str2net_internal(str, &net) != NULL)
- return net;
-
- return LNET_NIDNET(LNET_NID_ANY);
-}
-EXPORT_SYMBOL(libcfs_str2net);
-
-lnet_nid_t
-libcfs_str2nid(const char *str)
-{
- const char *sep = strchr(str, '@');
- struct netstrfns *nf;
- __u32 net;
- __u32 addr;
-
- if (sep != NULL) {
- nf = libcfs_str2net_internal(sep + 1, &net);
- if (nf == NULL)
- return LNET_NID_ANY;
- } else {
- sep = str + strlen(str);
- net = LNET_MKNET(SOCKLND, 0);
- nf = libcfs_lnd2netstrfns(SOCKLND);
- LASSERT(nf != NULL);
- }
-
- if (!nf->nf_str2addr(str, (int)(sep - str), &addr))
- return LNET_NID_ANY;
-
- return LNET_MKNID(net, addr);
-}
-EXPORT_SYMBOL(libcfs_str2nid);
-
-char *
-libcfs_id2str(lnet_process_id_t id)
-{
- char *str = libcfs_next_nidstring();
-
- if (id.pid == LNET_PID_ANY) {
- snprintf(str, LNET_NIDSTR_SIZE,
- "LNET_PID_ANY-%s", libcfs_nid2str(id.nid));
- return str;
- }
-
- snprintf(str, LNET_NIDSTR_SIZE, "%s%u-%s",
- ((id.pid & LNET_PID_USERFLAG) != 0) ? "U" : "",
- (id.pid & ~LNET_PID_USERFLAG), libcfs_nid2str(id.nid));
- return str;
-}
-EXPORT_SYMBOL(libcfs_id2str);
-
-int
-libcfs_str2anynid(lnet_nid_t *nidp, const char *str)
-{
- if (!strcmp(str, "*")) {
- *nidp = LNET_NID_ANY;
- return 1;
- }
-
- *nidp = libcfs_str2nid(str);
- return *nidp != LNET_NID_ANY;
-}
-EXPORT_SYMBOL(libcfs_str2anynid);
-
-/**
- * Nid range list syntax.
- * \verbatim
- *
- * <nidlist> :== <nidrange> [ ' ' <nidrange> ]
- * <nidrange> :== <addrrange> '@' <net>
- * <addrrange> :== '*' |
- * <ipaddr_range> |
- * <cfs_expr_list>
- * <ipaddr_range> :== <cfs_expr_list>.<cfs_expr_list>.<cfs_expr_list>.
- * <cfs_expr_list>
- * <cfs_expr_list> :== <number> |
- * <expr_list>
- * <expr_list> :== '[' <range_expr> [ ',' <range_expr>] ']'
- * <range_expr> :== <number> |
- * <number> '-' <number> |
- * <number> '-' <number> '/' <number>
- * <net> :== <netname> | <netname><number>
- * <netname> :== "lo" | "tcp" | "o2ib" | "cib" | "openib" | "iib" |
- * "vib" | "ra" | "elan" | "mx" | "ptl"
- * \endverbatim
- */
-
-/**
- * Structure to represent \<nidrange\> token of the syntax.
- *
- * One of this is created for each \<net\> parsed.
- */
-struct nidrange {
- /**
- * Link to list of this structures which is built on nid range
- * list parsing.
- */
- struct list_head nr_link;
- /**
- * List head for addrrange::ar_link.
- */
- struct list_head nr_addrranges;
- /**
- * Flag indicating that *@<net> is found.
- */
- int nr_all;
- /**
- * Pointer to corresponding element of libcfs_netstrfns.
- */
- struct netstrfns *nr_netstrfns;
- /**
- * Number of network. E.g. 5 if \<net\> is "elan5".
- */
- int nr_netnum;
-};
-
-/**
- * Structure to represent \<addrrange\> token of the syntax.
- */
-struct addrrange {
- /**
- * Link to nidrange::nr_addrranges.
- */
- struct list_head ar_link;
- /**
- * List head for cfs_expr_list::el_list.
- */
- struct list_head ar_numaddr_ranges;
-};
-
-/**
- * Parses \<addrrange\> token on the syntax.
- *
- * Allocates struct addrrange and links to \a nidrange via
- * (nidrange::nr_addrranges)
- *
- * \retval 1 if \a src parses to '*' | \<ipaddr_range\> | \<cfs_expr_list\>
- * \retval 0 otherwise
- */
-static int
-parse_addrange(const struct cfs_lstr *src, struct nidrange *nidrange)
-{
- struct addrrange *addrrange;
-
- if (src->ls_len == 1 && src->ls_str[0] == '*') {
- nidrange->nr_all = 1;
- return 1;
- }
-
- LIBCFS_ALLOC(addrrange, sizeof(struct addrrange));
- if (addrrange == NULL)
- return 0;
- list_add_tail(&addrrange->ar_link, &nidrange->nr_addrranges);
- INIT_LIST_HEAD(&addrrange->ar_numaddr_ranges);
-
- return nidrange->nr_netstrfns->nf_parse_addrlist(src->ls_str,
- src->ls_len,
- &addrrange->ar_numaddr_ranges);
-}
-
-/**
- * Finds or creates struct nidrange.
- *
- * Checks if \a src is a valid network name, looks for corresponding
- * nidrange on the ist of nidranges (\a nidlist), creates new struct
- * nidrange if it is not found.
- *
- * \retval pointer to struct nidrange matching network specified via \a src
- * \retval NULL if \a src does not match any network
- */
-static struct nidrange *
-add_nidrange(const struct cfs_lstr *src,
- struct list_head *nidlist)
-{
- struct netstrfns *nf;
- struct nidrange *nr;
- int endlen;
- unsigned netnum;
-
- if (src->ls_len >= LNET_NIDSTR_SIZE)
- return NULL;
-
- nf = libcfs_namenum2netstrfns(src->ls_str);
- if (nf == NULL)
- return NULL;
- endlen = src->ls_len - strlen(nf->nf_name);
- if (endlen == 0)
- /* network name only, e.g. "elan" or "tcp" */
- netnum = 0;
- else {
- /* e.g. "elan25" or "tcp23", refuse to parse if
- * network name is not appended with decimal or
- * hexadecimal number */
- if (!cfs_str2num_check(src->ls_str + strlen(nf->nf_name),
- endlen, &netnum, 0, MAX_NUMERIC_VALUE))
- return NULL;
- }
-
- list_for_each_entry(nr, nidlist, nr_link) {
- if (nr->nr_netstrfns != nf)
- continue;
- if (nr->nr_netnum != netnum)
- continue;
- return nr;
- }
-
- LIBCFS_ALLOC(nr, sizeof(struct nidrange));
- if (nr == NULL)
- return NULL;
- list_add_tail(&nr->nr_link, nidlist);
- INIT_LIST_HEAD(&nr->nr_addrranges);
- nr->nr_netstrfns = nf;
- nr->nr_all = 0;
- nr->nr_netnum = netnum;
-
- return nr;
-}
-
-/**
- * Parses \<nidrange\> token of the syntax.
- *
- * \retval 1 if \a src parses to \<addrrange\> '@' \<net\>
- * \retval 0 otherwise
- */
-static int
-parse_nidrange(struct cfs_lstr *src, struct list_head *nidlist)
-{
- struct cfs_lstr addrrange;
- struct cfs_lstr net;
- struct cfs_lstr tmp;
- struct nidrange *nr;
-
- tmp = *src;
- if (cfs_gettok(src, '@', &addrrange) == 0)
- goto failed;
-
- if (cfs_gettok(src, '@', &net) == 0 || src->ls_str != NULL)
- goto failed;
-
- nr = add_nidrange(&net, nidlist);
- if (nr == NULL)
- goto failed;
-
- if (parse_addrange(&addrrange, nr) != 0)
- goto failed;
-
- return 1;
- failed:
- CWARN("can't parse nidrange: \"%.*s\"\n", tmp.ls_len, tmp.ls_str);
- return 0;
-}
-
-/**
- * Frees addrrange structures of \a list.
- *
- * For each struct addrrange structure found on \a list it frees
- * cfs_expr_list list attached to it and frees the addrrange itself.
- *
- * \retval none
- */
-static void
-free_addrranges(struct list_head *list)
-{
- while (!list_empty(list)) {
- struct addrrange *ar;
-
- ar = list_entry(list->next, struct addrrange, ar_link);
-
- cfs_expr_list_free_list(&ar->ar_numaddr_ranges);
- list_del(&ar->ar_link);
- LIBCFS_FREE(ar, sizeof(struct addrrange));
- }
-}
-
-/**
- * Frees nidrange strutures of \a list.
- *
- * For each struct nidrange structure found on \a list it frees
- * addrrange list attached to it and frees the nidrange itself.
- *
- * \retval none
- */
-void
-cfs_free_nidlist(struct list_head *list)
-{
- struct list_head *pos, *next;
- struct nidrange *nr;
-
- list_for_each_safe(pos, next, list) {
- nr = list_entry(pos, struct nidrange, nr_link);
- free_addrranges(&nr->nr_addrranges);
- list_del(pos);
- LIBCFS_FREE(nr, sizeof(struct nidrange));
- }
-}
-EXPORT_SYMBOL(cfs_free_nidlist);
-
-/**
- * Parses nid range list.
- *
- * Parses with rigorous syntax and overflow checking \a str into
- * \<nidrange\> [ ' ' \<nidrange\> ], compiles \a str into set of
- * structures and links that structure to \a nidlist. The resulting
- * list can be used to match a NID againts set of NIDS defined by \a
- * str.
- * \see cfs_match_nid
- *
- * \retval 1 on success
- * \retval 0 otherwise
- */
-int
-cfs_parse_nidlist(char *str, int len, struct list_head *nidlist)
-{
- struct cfs_lstr src;
- struct cfs_lstr res;
- int rc;
-
- src.ls_str = str;
- src.ls_len = len;
- INIT_LIST_HEAD(nidlist);
- while (src.ls_str) {
- rc = cfs_gettok(&src, ' ', &res);
- if (rc == 0) {
- cfs_free_nidlist(nidlist);
- return 0;
- }
- rc = parse_nidrange(&res, nidlist);
- if (rc == 0) {
- cfs_free_nidlist(nidlist);
- return 0;
- }
- }
- return 1;
-}
-EXPORT_SYMBOL(cfs_parse_nidlist);
-
-/**
- * Matches a nid (\a nid) against the compiled list of nidranges (\a nidlist).
- *
- * \see cfs_parse_nidlist()
- *
- * \retval 1 on match
- * \retval 0 otherwises
- */
-int cfs_match_nid(lnet_nid_t nid, struct list_head *nidlist)
-{
- struct nidrange *nr;
- struct addrrange *ar;
-
- list_for_each_entry(nr, nidlist, nr_link) {
- if (nr->nr_netstrfns->nf_type != LNET_NETTYP(LNET_NIDNET(nid)))
- continue;
- if (nr->nr_netnum != LNET_NETNUM(LNET_NIDNET(nid)))
- continue;
- if (nr->nr_all)
- return 1;
- list_for_each_entry(ar, &nr->nr_addrranges, ar_link)
- if (nr->nr_netstrfns->nf_match_addr(LNET_NIDADDR(nid),
- &ar->ar_numaddr_ranges))
- return 1;
- }
- return 0;
-}
-EXPORT_SYMBOL(cfs_match_nid);
diff --git a/drivers/staging/lustre/lustre/libcfs/prng.c b/drivers/staging/lustre/lustre/libcfs/prng.c
deleted file mode 100644
index 4147664ff57a..000000000000
--- a/drivers/staging/lustre/lustre/libcfs/prng.c
+++ /dev/null
@@ -1,139 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * libcfs/libcfs/prng.c
- *
- * concatenation of following two 16-bit multiply with carry generators
- * x(n)=a*x(n-1)+carry mod 2^16 and y(n)=b*y(n-1)+carry mod 2^16,
- * number and carry packed within the same 32 bit integer.
- * algorithm recommended by Marsaglia
-*/
-
-#include "../../include/linux/libcfs/libcfs.h"
-
-/*
-From: George Marsaglia <geo@stat.fsu.edu>
-Newsgroups: sci.math
-Subject: Re: A RANDOM NUMBER GENERATOR FOR C
-Date: Tue, 30 Sep 1997 05:29:35 -0700
-
- * You may replace the two constants 36969 and 18000 by any
- * pair of distinct constants from this list:
- * 18000 18030 18273 18513 18879 19074 19098 19164 19215 19584
- * 19599 19950 20088 20508 20544 20664 20814 20970 21153 21243
- * 21423 21723 21954 22125 22188 22293 22860 22938 22965 22974
- * 23109 23124 23163 23208 23508 23520 23553 23658 23865 24114
- * 24219 24660 24699 24864 24948 25023 25308 25443 26004 26088
- * 26154 26550 26679 26838 27183 27258 27753 27795 27810 27834
- * 27960 28320 28380 28689 28710 28794 28854 28959 28980 29013
- * 29379 29889 30135 30345 30459 30714 30903 30963 31059 31083
- * (or any other 16-bit constants k for which both k*2^16-1
- * and k*2^15-1 are prime) */
-
-#define RANDOM_CONST_A 18030
-#define RANDOM_CONST_B 29013
-
-static unsigned int seed_x = 521288629;
-static unsigned int seed_y = 362436069;
-
-/**
- * cfs_rand - creates new seeds
- *
- * First it creates new seeds from the previous seeds. Then it generates a
- * new pseudo random number for use.
- *
- * Returns a pseudo-random 32-bit integer
- */
-unsigned int cfs_rand(void)
-{
- seed_x = RANDOM_CONST_A * (seed_x & 65535) + (seed_x >> 16);
- seed_y = RANDOM_CONST_B * (seed_y & 65535) + (seed_y >> 16);
-
- return ((seed_x << 16) + (seed_y & 65535));
-}
-EXPORT_SYMBOL(cfs_rand);
-
-/**
- * cfs_srand - sets the initial seed
- * @seed1 : (seed_x) should have the most entropy in the low bits of the word
- * @seed2 : (seed_y) should have the most entropy in the high bits of the word
- *
- * Replaces the original seeds with new values. Used to generate a new pseudo
- * random numbers.
- */
-void cfs_srand(unsigned int seed1, unsigned int seed2)
-{
- if (seed1)
- seed_x = seed1; /* use default seeds if parameter is 0 */
- if (seed2)
- seed_y = seed2;
-}
-EXPORT_SYMBOL(cfs_srand);
-
-/**
- * cfs_get_random_bytes - generate a bunch of random numbers
- * @buf : buffer to fill with random numbers
- * @size: size of passed in buffer
- *
- * Fills a buffer with random bytes
- */
-void cfs_get_random_bytes(void *buf, int size)
-{
- int *p = buf;
- int rem, tmp;
-
- LASSERT(size >= 0);
-
- rem = min((int)((unsigned long)buf & (sizeof(int) - 1)), size);
- if (rem) {
- get_random_bytes(&tmp, sizeof(tmp));
- tmp ^= cfs_rand();
- memcpy(buf, &tmp, rem);
- p = buf + rem;
- size -= rem;
- }
-
- while (size >= sizeof(int)) {
- get_random_bytes(&tmp, sizeof(tmp));
- *p = cfs_rand() ^ tmp;
- size -= sizeof(int);
- p++;
- }
- buf = p;
- if (size) {
- get_random_bytes(&tmp, sizeof(tmp));
- tmp ^= cfs_rand();
- memcpy(buf, &tmp, size);
- }
-}
-EXPORT_SYMBOL(cfs_get_random_bytes);
diff --git a/drivers/staging/lustre/lustre/libcfs/tracefile.c b/drivers/staging/lustre/lustre/libcfs/tracefile.c
deleted file mode 100644
index 5a2e5ea00570..000000000000
--- a/drivers/staging/lustre/lustre/libcfs/tracefile.c
+++ /dev/null
@@ -1,1184 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * libcfs/libcfs/tracefile.c
- *
- * Author: Zach Brown <zab@clusterfs.com>
- * Author: Phil Schwan <phil@clusterfs.com>
- */
-
-
-#define DEBUG_SUBSYSTEM S_LNET
-#define LUSTRE_TRACEFILE_PRIVATE
-#include "tracefile.h"
-
-#include "../../include/linux/libcfs/libcfs.h"
-
-/* XXX move things up to the top, comment */
-union cfs_trace_data_union (*cfs_trace_data[TCD_MAX_TYPES])[NR_CPUS] __cacheline_aligned;
-
-char cfs_tracefile[TRACEFILE_NAME_SIZE];
-long long cfs_tracefile_size = CFS_TRACEFILE_SIZE;
-static struct tracefiled_ctl trace_tctl;
-static DEFINE_MUTEX(cfs_trace_thread_mutex);
-static int thread_running;
-
-static atomic_t cfs_tage_allocated = ATOMIC_INIT(0);
-
-static void put_pages_on_tcd_daemon_list(struct page_collection *pc,
- struct cfs_trace_cpu_data *tcd);
-
-static inline struct cfs_trace_page *
-cfs_tage_from_list(struct list_head *list)
-{
- return list_entry(list, struct cfs_trace_page, linkage);
-}
-
-static struct cfs_trace_page *cfs_tage_alloc(gfp_t gfp)
-{
- struct page *page;
- struct cfs_trace_page *tage;
-
- /* My caller is trying to free memory */
- if (!in_interrupt() && memory_pressure_get())
- return NULL;
-
- /*
- * Don't spam console with allocation failures: they will be reported
- * by upper layer anyway.
- */
- gfp |= __GFP_NOWARN;
- page = alloc_page(gfp);
- if (page == NULL)
- return NULL;
-
- tage = kmalloc(sizeof(*tage), gfp);
- if (tage == NULL) {
- __free_page(page);
- return NULL;
- }
-
- tage->page = page;
- atomic_inc(&cfs_tage_allocated);
- return tage;
-}
-
-static void cfs_tage_free(struct cfs_trace_page *tage)
-{
- __LASSERT(tage != NULL);
- __LASSERT(tage->page != NULL);
-
- __free_page(tage->page);
- kfree(tage);
- atomic_dec(&cfs_tage_allocated);
-}
-
-static void cfs_tage_to_tail(struct cfs_trace_page *tage,
- struct list_head *queue)
-{
- __LASSERT(tage != NULL);
- __LASSERT(queue != NULL);
-
- list_move_tail(&tage->linkage, queue);
-}
-
-int cfs_trace_refill_stock(struct cfs_trace_cpu_data *tcd, gfp_t gfp,
- struct list_head *stock)
-{
- int i;
-
- /*
- * XXX nikita: do NOT call portals_debug_msg() (CDEBUG/ENTRY/EXIT)
- * from here: this will lead to infinite recursion.
- */
-
- for (i = 0; i + tcd->tcd_cur_stock_pages < TCD_STOCK_PAGES ; ++i) {
- struct cfs_trace_page *tage;
-
- tage = cfs_tage_alloc(gfp);
- if (tage == NULL)
- break;
- list_add_tail(&tage->linkage, stock);
- }
- return i;
-}
-
-/* return a page that has 'len' bytes left at the end */
-static struct cfs_trace_page *
-cfs_trace_get_tage_try(struct cfs_trace_cpu_data *tcd, unsigned long len)
-{
- struct cfs_trace_page *tage;
-
- if (tcd->tcd_cur_pages > 0) {
- __LASSERT(!list_empty(&tcd->tcd_pages));
- tage = cfs_tage_from_list(tcd->tcd_pages.prev);
- if (tage->used + len <= PAGE_CACHE_SIZE)
- return tage;
- }
-
- if (tcd->tcd_cur_pages < tcd->tcd_max_pages) {
- if (tcd->tcd_cur_stock_pages > 0) {
- tage = cfs_tage_from_list(tcd->tcd_stock_pages.prev);
- --tcd->tcd_cur_stock_pages;
- list_del_init(&tage->linkage);
- } else {
- tage = cfs_tage_alloc(GFP_ATOMIC);
- if (unlikely(tage == NULL)) {
- if ((!memory_pressure_get() ||
- in_interrupt()) && printk_ratelimit())
- printk(KERN_WARNING
- "cannot allocate a tage (%ld)\n",
- tcd->tcd_cur_pages);
- return NULL;
- }
- }
-
- tage->used = 0;
- tage->cpu = smp_processor_id();
- tage->type = tcd->tcd_type;
- list_add_tail(&tage->linkage, &tcd->tcd_pages);
- tcd->tcd_cur_pages++;
-
- if (tcd->tcd_cur_pages > 8 && thread_running) {
- struct tracefiled_ctl *tctl = &trace_tctl;
- /*
- * wake up tracefiled to process some pages.
- */
- wake_up(&tctl->tctl_waitq);
- }
- return tage;
- }
- return NULL;
-}
-
-static void cfs_tcd_shrink(struct cfs_trace_cpu_data *tcd)
-{
- int pgcount = tcd->tcd_cur_pages / 10;
- struct page_collection pc;
- struct cfs_trace_page *tage;
- struct cfs_trace_page *tmp;
-
- /*
- * XXX nikita: do NOT call portals_debug_msg() (CDEBUG/ENTRY/EXIT)
- * from here: this will lead to infinite recursion.
- */
-
- if (printk_ratelimit())
- printk(KERN_WARNING "debug daemon buffer overflowed; discarding 10%% of pages (%d of %ld)\n",
- pgcount + 1, tcd->tcd_cur_pages);
-
- INIT_LIST_HEAD(&pc.pc_pages);
- spin_lock_init(&pc.pc_lock);
-
- list_for_each_entry_safe(tage, tmp, &tcd->tcd_pages, linkage) {
- if (pgcount-- == 0)
- break;
-
- list_move_tail(&tage->linkage, &pc.pc_pages);
- tcd->tcd_cur_pages--;
- }
- put_pages_on_tcd_daemon_list(&pc, tcd);
-}
-
-/* return a page that has 'len' bytes left at the end */
-static struct cfs_trace_page *cfs_trace_get_tage(struct cfs_trace_cpu_data *tcd,
- unsigned long len)
-{
- struct cfs_trace_page *tage;
-
- /*
- * XXX nikita: do NOT call portals_debug_msg() (CDEBUG/ENTRY/EXIT)
- * from here: this will lead to infinite recursion.
- */
-
- if (len > PAGE_CACHE_SIZE) {
- pr_err("cowardly refusing to write %lu bytes in a page\n", len);
- return NULL;
- }
-
- tage = cfs_trace_get_tage_try(tcd, len);
- if (tage != NULL)
- return tage;
- if (thread_running)
- cfs_tcd_shrink(tcd);
- if (tcd->tcd_cur_pages > 0) {
- tage = cfs_tage_from_list(tcd->tcd_pages.next);
- tage->used = 0;
- cfs_tage_to_tail(tage, &tcd->tcd_pages);
- }
- return tage;
-}
-
-int libcfs_debug_msg(struct libcfs_debug_msg_data *msgdata,
- const char *format, ...)
-{
- va_list args;
- int rc;
-
- va_start(args, format);
- rc = libcfs_debug_vmsg2(msgdata, format, args, NULL);
- va_end(args);
-
- return rc;
-}
-EXPORT_SYMBOL(libcfs_debug_msg);
-
-int libcfs_debug_vmsg2(struct libcfs_debug_msg_data *msgdata,
- const char *format1, va_list args,
- const char *format2, ...)
-{
- struct cfs_trace_cpu_data *tcd = NULL;
- struct ptldebug_header header = {0};
- struct cfs_trace_page *tage;
- /* string_buf is used only if tcd != NULL, and is always set then */
- char *string_buf = NULL;
- char *debug_buf;
- int known_size;
- int needed = 85; /* average message length */
- int max_nob;
- va_list ap;
- int depth;
- int i;
- int remain;
- int mask = msgdata->msg_mask;
- const char *file = kbasename(msgdata->msg_file);
- struct cfs_debug_limit_state *cdls = msgdata->msg_cdls;
-
- tcd = cfs_trace_get_tcd();
-
- /* cfs_trace_get_tcd() grabs a lock, which disables preemption and
- * pins us to a particular CPU. This avoids an smp_processor_id()
- * warning on Linux when debugging is enabled. */
- cfs_set_ptldebug_header(&header, msgdata, CDEBUG_STACK());
-
- if (tcd == NULL) /* arch may not log in IRQ context */
- goto console;
-
- if (tcd->tcd_cur_pages == 0)
- header.ph_flags |= PH_FLAG_FIRST_RECORD;
-
- if (tcd->tcd_shutting_down) {
- cfs_trace_put_tcd(tcd);
- tcd = NULL;
- goto console;
- }
-
- depth = __current_nesting_level();
- known_size = strlen(file) + 1 + depth;
- if (msgdata->msg_fn)
- known_size += strlen(msgdata->msg_fn) + 1;
-
- if (libcfs_debug_binary)
- known_size += sizeof(header);
-
- /*/
- * '2' used because vsnprintf return real size required for output
- * _without_ terminating NULL.
- * if needed is to small for this format.
- */
- for (i = 0; i < 2; i++) {
- tage = cfs_trace_get_tage(tcd, needed + known_size + 1);
- if (tage == NULL) {
- if (needed + known_size > PAGE_CACHE_SIZE)
- mask |= D_ERROR;
-
- cfs_trace_put_tcd(tcd);
- tcd = NULL;
- goto console;
- }
-
- string_buf = (char *)page_address(tage->page) +
- tage->used + known_size;
-
- max_nob = PAGE_CACHE_SIZE - tage->used - known_size;
- if (max_nob <= 0) {
- printk(KERN_EMERG "negative max_nob: %d\n",
- max_nob);
- mask |= D_ERROR;
- cfs_trace_put_tcd(tcd);
- tcd = NULL;
- goto console;
- }
-
- needed = 0;
- if (format1) {
- va_copy(ap, args);
- needed = vsnprintf(string_buf, max_nob, format1, ap);
- va_end(ap);
- }
-
- if (format2) {
- remain = max_nob - needed;
- if (remain < 0)
- remain = 0;
-
- va_start(ap, format2);
- needed += vsnprintf(string_buf + needed, remain,
- format2, ap);
- va_end(ap);
- }
-
- if (needed < max_nob) /* well. printing ok.. */
- break;
- }
-
- if (*(string_buf+needed-1) != '\n')
- printk(KERN_INFO "format at %s:%d:%s doesn't end in newline\n",
- file, msgdata->msg_line, msgdata->msg_fn);
-
- header.ph_len = known_size + needed;
- debug_buf = (char *)page_address(tage->page) + tage->used;
-
- if (libcfs_debug_binary) {
- memcpy(debug_buf, &header, sizeof(header));
- tage->used += sizeof(header);
- debug_buf += sizeof(header);
- }
-
- /* indent message according to the nesting level */
- while (depth-- > 0) {
- *(debug_buf++) = '.';
- ++tage->used;
- }
-
- strcpy(debug_buf, file);
- tage->used += strlen(file) + 1;
- debug_buf += strlen(file) + 1;
-
- if (msgdata->msg_fn) {
- strcpy(debug_buf, msgdata->msg_fn);
- tage->used += strlen(msgdata->msg_fn) + 1;
- debug_buf += strlen(msgdata->msg_fn) + 1;
- }
-
- __LASSERT(debug_buf == string_buf);
-
- tage->used += needed;
- __LASSERT (tage->used <= PAGE_CACHE_SIZE);
-
-console:
- if ((mask & libcfs_printk) == 0) {
- /* no console output requested */
- if (tcd != NULL)
- cfs_trace_put_tcd(tcd);
- return 1;
- }
-
- if (cdls != NULL) {
- if (libcfs_console_ratelimit &&
- cdls->cdls_next != 0 && /* not first time ever */
- !cfs_time_after(cfs_time_current(), cdls->cdls_next)) {
- /* skipping a console message */
- cdls->cdls_count++;
- if (tcd != NULL)
- cfs_trace_put_tcd(tcd);
- return 1;
- }
-
- if (cfs_time_after(cfs_time_current(), cdls->cdls_next +
- libcfs_console_max_delay
- + cfs_time_seconds(10))) {
- /* last timeout was a long time ago */
- cdls->cdls_delay /= libcfs_console_backoff * 4;
- } else {
- cdls->cdls_delay *= libcfs_console_backoff;
- }
-
- if (cdls->cdls_delay < libcfs_console_min_delay)
- cdls->cdls_delay = libcfs_console_min_delay;
- else if (cdls->cdls_delay > libcfs_console_max_delay)
- cdls->cdls_delay = libcfs_console_max_delay;
-
- /* ensure cdls_next is never zero after it's been seen */
- cdls->cdls_next = (cfs_time_current() + cdls->cdls_delay) | 1;
- }
-
- if (tcd != NULL) {
- cfs_print_to_console(&header, mask, string_buf, needed, file,
- msgdata->msg_fn);
- cfs_trace_put_tcd(tcd);
- } else {
- string_buf = cfs_trace_get_console_buffer();
-
- needed = 0;
- if (format1 != NULL) {
- va_copy(ap, args);
- needed = vsnprintf(string_buf,
- CFS_TRACE_CONSOLE_BUFFER_SIZE,
- format1, ap);
- va_end(ap);
- }
- if (format2 != NULL) {
- remain = CFS_TRACE_CONSOLE_BUFFER_SIZE - needed;
- if (remain > 0) {
- va_start(ap, format2);
- needed += vsnprintf(string_buf+needed, remain,
- format2, ap);
- va_end(ap);
- }
- }
- cfs_print_to_console(&header, mask,
- string_buf, needed, file, msgdata->msg_fn);
-
- cfs_trace_put_console_buffer(string_buf);
- }
-
- if (cdls != NULL && cdls->cdls_count != 0) {
- string_buf = cfs_trace_get_console_buffer();
-
- needed = snprintf(string_buf, CFS_TRACE_CONSOLE_BUFFER_SIZE,
- "Skipped %d previous similar message%s\n",
- cdls->cdls_count,
- (cdls->cdls_count > 1) ? "s" : "");
-
- cfs_print_to_console(&header, mask,
- string_buf, needed, file, msgdata->msg_fn);
-
- cfs_trace_put_console_buffer(string_buf);
- cdls->cdls_count = 0;
- }
-
- return 0;
-}
-EXPORT_SYMBOL(libcfs_debug_vmsg2);
-
-void
-cfs_trace_assertion_failed(const char *str,
- struct libcfs_debug_msg_data *msgdata)
-{
- struct ptldebug_header hdr;
-
- libcfs_panic_in_progress = 1;
- libcfs_catastrophe = 1;
- mb();
-
- cfs_set_ptldebug_header(&hdr, msgdata, CDEBUG_STACK());
-
- cfs_print_to_console(&hdr, D_EMERG, str, strlen(str),
- msgdata->msg_file, msgdata->msg_fn);
-
- panic("Lustre debug assertion failure\n");
-
- /* not reached */
-}
-
-static void
-panic_collect_pages(struct page_collection *pc)
-{
- /* Do the collect_pages job on a single CPU: assumes that all other
- * CPUs have been stopped during a panic. If this isn't true for some
- * arch, this will have to be implemented separately in each arch. */
- int i;
- int j;
- struct cfs_trace_cpu_data *tcd;
-
- INIT_LIST_HEAD(&pc->pc_pages);
-
- cfs_tcd_for_each(tcd, i, j) {
- list_splice_init(&tcd->tcd_pages, &pc->pc_pages);
- tcd->tcd_cur_pages = 0;
-
- if (pc->pc_want_daemon_pages) {
- list_splice_init(&tcd->tcd_daemon_pages,
- &pc->pc_pages);
- tcd->tcd_cur_daemon_pages = 0;
- }
- }
-}
-
-static void collect_pages_on_all_cpus(struct page_collection *pc)
-{
- struct cfs_trace_cpu_data *tcd;
- int i, cpu;
-
- spin_lock(&pc->pc_lock);
- for_each_possible_cpu(cpu) {
- cfs_tcd_for_each_type_lock(tcd, i, cpu) {
- list_splice_init(&tcd->tcd_pages, &pc->pc_pages);
- tcd->tcd_cur_pages = 0;
- if (pc->pc_want_daemon_pages) {
- list_splice_init(&tcd->tcd_daemon_pages,
- &pc->pc_pages);
- tcd->tcd_cur_daemon_pages = 0;
- }
- }
- }
- spin_unlock(&pc->pc_lock);
-}
-
-static void collect_pages(struct page_collection *pc)
-{
- INIT_LIST_HEAD(&pc->pc_pages);
-
- if (libcfs_panic_in_progress)
- panic_collect_pages(pc);
- else
- collect_pages_on_all_cpus(pc);
-}
-
-static void put_pages_back_on_all_cpus(struct page_collection *pc)
-{
- struct cfs_trace_cpu_data *tcd;
- struct list_head *cur_head;
- struct cfs_trace_page *tage;
- struct cfs_trace_page *tmp;
- int i, cpu;
-
- spin_lock(&pc->pc_lock);
- for_each_possible_cpu(cpu) {
- cfs_tcd_for_each_type_lock(tcd, i, cpu) {
- cur_head = tcd->tcd_pages.next;
-
- list_for_each_entry_safe(tage, tmp, &pc->pc_pages,
- linkage) {
-
- __LASSERT_TAGE_INVARIANT(tage);
-
- if (tage->cpu != cpu || tage->type != i)
- continue;
-
- cfs_tage_to_tail(tage, cur_head);
- tcd->tcd_cur_pages++;
- }
- }
- }
- spin_unlock(&pc->pc_lock);
-}
-
-static void put_pages_back(struct page_collection *pc)
-{
- if (!libcfs_panic_in_progress)
- put_pages_back_on_all_cpus(pc);
-}
-
-/* Add pages to a per-cpu debug daemon ringbuffer. This buffer makes sure that
- * we have a good amount of data at all times for dumping during an LBUG, even
- * if we have been steadily writing (and otherwise discarding) pages via the
- * debug daemon. */
-static void put_pages_on_tcd_daemon_list(struct page_collection *pc,
- struct cfs_trace_cpu_data *tcd)
-{
- struct cfs_trace_page *tage;
- struct cfs_trace_page *tmp;
-
- spin_lock(&pc->pc_lock);
- list_for_each_entry_safe(tage, tmp, &pc->pc_pages, linkage) {
-
- __LASSERT_TAGE_INVARIANT(tage);
-
- if (tage->cpu != tcd->tcd_cpu || tage->type != tcd->tcd_type)
- continue;
-
- cfs_tage_to_tail(tage, &tcd->tcd_daemon_pages);
- tcd->tcd_cur_daemon_pages++;
-
- if (tcd->tcd_cur_daemon_pages > tcd->tcd_max_pages) {
- struct cfs_trace_page *victim;
-
- __LASSERT(!list_empty(&tcd->tcd_daemon_pages));
- victim = cfs_tage_from_list(tcd->tcd_daemon_pages.next);
-
- __LASSERT_TAGE_INVARIANT(victim);
-
- list_del(&victim->linkage);
- cfs_tage_free(victim);
- tcd->tcd_cur_daemon_pages--;
- }
- }
- spin_unlock(&pc->pc_lock);
-}
-
-static void put_pages_on_daemon_list(struct page_collection *pc)
-{
- struct cfs_trace_cpu_data *tcd;
- int i, cpu;
-
- for_each_possible_cpu(cpu) {
- cfs_tcd_for_each_type_lock(tcd, i, cpu)
- put_pages_on_tcd_daemon_list(pc, tcd);
- }
-}
-
-void cfs_trace_debug_print(void)
-{
- struct page_collection pc;
- struct cfs_trace_page *tage;
- struct cfs_trace_page *tmp;
-
- spin_lock_init(&pc.pc_lock);
-
- pc.pc_want_daemon_pages = 1;
- collect_pages(&pc);
- list_for_each_entry_safe(tage, tmp, &pc.pc_pages, linkage) {
- char *p, *file, *fn;
- struct page *page;
-
- __LASSERT_TAGE_INVARIANT(tage);
-
- page = tage->page;
- p = page_address(page);
- while (p < ((char *)page_address(page) + tage->used)) {
- struct ptldebug_header *hdr;
- int len;
- hdr = (void *)p;
- p += sizeof(*hdr);
- file = p;
- p += strlen(file) + 1;
- fn = p;
- p += strlen(fn) + 1;
- len = hdr->ph_len - (int)(p - (char *)hdr);
-
- cfs_print_to_console(hdr, D_EMERG, p, len, file, fn);
-
- p += len;
- }
-
- list_del(&tage->linkage);
- cfs_tage_free(tage);
- }
-}
-
-int cfs_tracefile_dump_all_pages(char *filename)
-{
- struct page_collection pc;
- struct file *filp;
- struct cfs_trace_page *tage;
- struct cfs_trace_page *tmp;
- char *buf;
- int rc;
-
- DECL_MMSPACE;
-
- cfs_tracefile_write_lock();
-
- filp = filp_open(filename, O_CREAT|O_EXCL|O_WRONLY|O_LARGEFILE, 0600);
- if (IS_ERR(filp)) {
- rc = PTR_ERR(filp);
- filp = NULL;
- pr_err("LustreError: can't open %s for dump: rc %d\n",
- filename, rc);
- goto out;
- }
-
- spin_lock_init(&pc.pc_lock);
- pc.pc_want_daemon_pages = 1;
- collect_pages(&pc);
- if (list_empty(&pc.pc_pages)) {
- rc = 0;
- goto close;
- }
-
- /* ok, for now, just write the pages. in the future we'll be building
- * iobufs with the pages and calling generic_direct_IO */
- MMSPACE_OPEN;
- list_for_each_entry_safe(tage, tmp, &pc.pc_pages, linkage) {
-
- __LASSERT_TAGE_INVARIANT(tage);
-
- buf = kmap(tage->page);
- rc = vfs_write(filp, (__force const char __user *)buf,
- tage->used, &filp->f_pos);
- kunmap(tage->page);
-
- if (rc != (int)tage->used) {
- printk(KERN_WARNING "wanted to write %u but wrote %d\n",
- tage->used, rc);
- put_pages_back(&pc);
- __LASSERT(list_empty(&pc.pc_pages));
- break;
- }
- list_del(&tage->linkage);
- cfs_tage_free(tage);
- }
- MMSPACE_CLOSE;
- rc = vfs_fsync(filp, 1);
- if (rc)
- pr_err("sync returns %d\n", rc);
-close:
- filp_close(filp, NULL);
-out:
- cfs_tracefile_write_unlock();
- return rc;
-}
-
-void cfs_trace_flush_pages(void)
-{
- struct page_collection pc;
- struct cfs_trace_page *tage;
- struct cfs_trace_page *tmp;
-
- spin_lock_init(&pc.pc_lock);
-
- pc.pc_want_daemon_pages = 1;
- collect_pages(&pc);
- list_for_each_entry_safe(tage, tmp, &pc.pc_pages, linkage) {
-
- __LASSERT_TAGE_INVARIANT(tage);
-
- list_del(&tage->linkage);
- cfs_tage_free(tage);
- }
-}
-
-int cfs_trace_copyin_string(char *knl_buffer, int knl_buffer_nob,
- const char __user *usr_buffer, int usr_buffer_nob)
-{
- int nob;
-
- if (usr_buffer_nob > knl_buffer_nob)
- return -EOVERFLOW;
-
- if (copy_from_user((void *)knl_buffer,
- usr_buffer, usr_buffer_nob))
- return -EFAULT;
-
- nob = strnlen(knl_buffer, usr_buffer_nob);
- while (nob-- >= 0) /* strip trailing whitespace */
- if (!isspace(knl_buffer[nob]))
- break;
-
- if (nob < 0) /* empty string */
- return -EINVAL;
-
- if (nob == knl_buffer_nob) /* no space to terminate */
- return -EOVERFLOW;
-
- knl_buffer[nob + 1] = 0; /* terminate */
- return 0;
-}
-EXPORT_SYMBOL(cfs_trace_copyin_string);
-
-int cfs_trace_copyout_string(char __user *usr_buffer, int usr_buffer_nob,
- const char *knl_buffer, char *append)
-{
- /* NB if 'append' != NULL, it's a single character to append to the
- * copied out string - usually "\n", for /proc entries and "" (i.e. a
- * terminating zero byte) for sysctl entries */
- int nob = strlen(knl_buffer);
-
- if (nob > usr_buffer_nob)
- nob = usr_buffer_nob;
-
- if (copy_to_user(usr_buffer, knl_buffer, nob))
- return -EFAULT;
-
- if (append != NULL && nob < usr_buffer_nob) {
- if (copy_to_user(usr_buffer + nob, append, 1))
- return -EFAULT;
-
- nob++;
- }
-
- return nob;
-}
-EXPORT_SYMBOL(cfs_trace_copyout_string);
-
-int cfs_trace_allocate_string_buffer(char **str, int nob)
-{
- if (nob > 2 * PAGE_CACHE_SIZE) /* string must be "sensible" */
- return -EINVAL;
-
- *str = kmalloc(nob, GFP_IOFS | __GFP_ZERO);
- if (*str == NULL)
- return -ENOMEM;
-
- return 0;
-}
-
-void cfs_trace_free_string_buffer(char *str, int nob)
-{
- kfree(str);
-}
-
-int cfs_trace_dump_debug_buffer_usrstr(void __user *usr_str, int usr_str_nob)
-{
- char *str;
- int rc;
-
- rc = cfs_trace_allocate_string_buffer(&str, usr_str_nob + 1);
- if (rc != 0)
- return rc;
-
- rc = cfs_trace_copyin_string(str, usr_str_nob + 1,
- usr_str, usr_str_nob);
- if (rc != 0)
- goto out;
-
- if (str[0] != '/') {
- rc = -EINVAL;
- goto out;
- }
- rc = cfs_tracefile_dump_all_pages(str);
-out:
- cfs_trace_free_string_buffer(str, usr_str_nob + 1);
- return rc;
-}
-
-int cfs_trace_daemon_command(char *str)
-{
- int rc = 0;
-
- cfs_tracefile_write_lock();
-
- if (strcmp(str, "stop") == 0) {
- cfs_tracefile_write_unlock();
- cfs_trace_stop_thread();
- cfs_tracefile_write_lock();
- memset(cfs_tracefile, 0, sizeof(cfs_tracefile));
-
- } else if (strncmp(str, "size=", 5) == 0) {
- cfs_tracefile_size = simple_strtoul(str + 5, NULL, 0);
- if (cfs_tracefile_size < 10 || cfs_tracefile_size > 20480)
- cfs_tracefile_size = CFS_TRACEFILE_SIZE;
- else
- cfs_tracefile_size <<= 20;
-
- } else if (strlen(str) >= sizeof(cfs_tracefile)) {
- rc = -ENAMETOOLONG;
- } else if (str[0] != '/') {
- rc = -EINVAL;
- } else {
- strcpy(cfs_tracefile, str);
-
- printk(KERN_INFO
- "Lustre: debug daemon will attempt to start writing to %s (%lukB max)\n",
- cfs_tracefile,
- (long)(cfs_tracefile_size >> 10));
-
- cfs_trace_start_thread();
- }
-
- cfs_tracefile_write_unlock();
- return rc;
-}
-
-int cfs_trace_daemon_command_usrstr(void __user *usr_str, int usr_str_nob)
-{
- char *str;
- int rc;
-
- rc = cfs_trace_allocate_string_buffer(&str, usr_str_nob + 1);
- if (rc != 0)
- return rc;
-
- rc = cfs_trace_copyin_string(str, usr_str_nob + 1,
- usr_str, usr_str_nob);
- if (rc == 0)
- rc = cfs_trace_daemon_command(str);
-
- cfs_trace_free_string_buffer(str, usr_str_nob + 1);
- return rc;
-}
-
-int cfs_trace_set_debug_mb(int mb)
-{
- int i;
- int j;
- int pages;
- int limit = cfs_trace_max_debug_mb();
- struct cfs_trace_cpu_data *tcd;
-
- if (mb < num_possible_cpus()) {
- printk(KERN_WARNING
- "Lustre: %d MB is too small for debug buffer size, setting it to %d MB.\n",
- mb, num_possible_cpus());
- mb = num_possible_cpus();
- }
-
- if (mb > limit) {
- printk(KERN_WARNING
- "Lustre: %d MB is too large for debug buffer size, setting it to %d MB.\n",
- mb, limit);
- mb = limit;
- }
-
- mb /= num_possible_cpus();
- pages = mb << (20 - PAGE_CACHE_SHIFT);
-
- cfs_tracefile_write_lock();
-
- cfs_tcd_for_each(tcd, i, j)
- tcd->tcd_max_pages = (pages * tcd->tcd_pages_factor) / 100;
-
- cfs_tracefile_write_unlock();
-
- return 0;
-}
-
-int cfs_trace_get_debug_mb(void)
-{
- int i;
- int j;
- struct cfs_trace_cpu_data *tcd;
- int total_pages = 0;
-
- cfs_tracefile_read_lock();
-
- cfs_tcd_for_each(tcd, i, j)
- total_pages += tcd->tcd_max_pages;
-
- cfs_tracefile_read_unlock();
-
- return (total_pages >> (20 - PAGE_CACHE_SHIFT)) + 1;
-}
-
-static int tracefiled(void *arg)
-{
- struct page_collection pc;
- struct tracefiled_ctl *tctl = arg;
- struct cfs_trace_page *tage;
- struct cfs_trace_page *tmp;
- struct file *filp;
- char *buf;
- int last_loop = 0;
- int rc;
-
- DECL_MMSPACE;
-
- /* we're started late enough that we pick up init's fs context */
- /* this is so broken in uml? what on earth is going on? */
-
- spin_lock_init(&pc.pc_lock);
- complete(&tctl->tctl_start);
-
- while (1) {
- wait_queue_t __wait;
-
- pc.pc_want_daemon_pages = 0;
- collect_pages(&pc);
- if (list_empty(&pc.pc_pages))
- goto end_loop;
-
- filp = NULL;
- cfs_tracefile_read_lock();
- if (cfs_tracefile[0] != 0) {
- filp = filp_open(cfs_tracefile,
- O_CREAT | O_RDWR | O_LARGEFILE,
- 0600);
- if (IS_ERR(filp)) {
- rc = PTR_ERR(filp);
- filp = NULL;
- printk(KERN_WARNING "couldn't open %s: %d\n",
- cfs_tracefile, rc);
- }
- }
- cfs_tracefile_read_unlock();
- if (filp == NULL) {
- put_pages_on_daemon_list(&pc);
- __LASSERT(list_empty(&pc.pc_pages));
- goto end_loop;
- }
-
- MMSPACE_OPEN;
-
- list_for_each_entry_safe(tage, tmp, &pc.pc_pages,
- linkage) {
- static loff_t f_pos;
-
- __LASSERT_TAGE_INVARIANT(tage);
-
- if (f_pos >= (off_t)cfs_tracefile_size)
- f_pos = 0;
- else if (f_pos > i_size_read(file_inode(filp)))
- f_pos = i_size_read(file_inode(filp));
-
- buf = kmap(tage->page);
- rc = vfs_write(filp, (__force const char __user *)buf,
- tage->used, &f_pos);
- kunmap(tage->page);
-
- if (rc != (int)tage->used) {
- printk(KERN_WARNING "wanted to write %u but wrote %d\n",
- tage->used, rc);
- put_pages_back(&pc);
- __LASSERT(list_empty(&pc.pc_pages));
- break;
- }
- }
- MMSPACE_CLOSE;
-
- filp_close(filp, NULL);
- put_pages_on_daemon_list(&pc);
- if (!list_empty(&pc.pc_pages)) {
- int i;
-
- printk(KERN_ALERT "Lustre: trace pages aren't empty\n");
- pr_err("total cpus(%d): ",
- num_possible_cpus());
- for (i = 0; i < num_possible_cpus(); i++)
- if (cpu_online(i))
- pr_cont("%d(on) ", i);
- else
- pr_cont("%d(off) ", i);
- pr_cont("\n");
-
- i = 0;
- list_for_each_entry_safe(tage, tmp, &pc.pc_pages,
- linkage)
- pr_err("page %d belongs to cpu %d\n",
- ++i, tage->cpu);
- pr_err("There are %d pages unwritten\n", i);
- }
- __LASSERT(list_empty(&pc.pc_pages));
-end_loop:
- if (atomic_read(&tctl->tctl_shutdown)) {
- if (last_loop == 0) {
- last_loop = 1;
- continue;
- } else {
- break;
- }
- }
- init_waitqueue_entry(&__wait, current);
- add_wait_queue(&tctl->tctl_waitq, &__wait);
- set_current_state(TASK_INTERRUPTIBLE);
- schedule_timeout(cfs_time_seconds(1));
- remove_wait_queue(&tctl->tctl_waitq, &__wait);
- }
- complete(&tctl->tctl_stop);
- return 0;
-}
-
-int cfs_trace_start_thread(void)
-{
- struct tracefiled_ctl *tctl = &trace_tctl;
- int rc = 0;
-
- mutex_lock(&cfs_trace_thread_mutex);
- if (thread_running)
- goto out;
-
- init_completion(&tctl->tctl_start);
- init_completion(&tctl->tctl_stop);
- init_waitqueue_head(&tctl->tctl_waitq);
- atomic_set(&tctl->tctl_shutdown, 0);
-
- if (IS_ERR(kthread_run(tracefiled, tctl, "ktracefiled"))) {
- rc = -ECHILD;
- goto out;
- }
-
- wait_for_completion(&tctl->tctl_start);
- thread_running = 1;
-out:
- mutex_unlock(&cfs_trace_thread_mutex);
- return rc;
-}
-
-void cfs_trace_stop_thread(void)
-{
- struct tracefiled_ctl *tctl = &trace_tctl;
-
- mutex_lock(&cfs_trace_thread_mutex);
- if (thread_running) {
- printk(KERN_INFO
- "Lustre: shutting down debug daemon thread...\n");
- atomic_set(&tctl->tctl_shutdown, 1);
- wait_for_completion(&tctl->tctl_stop);
- thread_running = 0;
- }
- mutex_unlock(&cfs_trace_thread_mutex);
-}
-
-int cfs_tracefile_init(int max_pages)
-{
- struct cfs_trace_cpu_data *tcd;
- int i;
- int j;
- int rc;
- int factor;
-
- rc = cfs_tracefile_init_arch();
- if (rc != 0)
- return rc;
-
- cfs_tcd_for_each(tcd, i, j) {
- /* tcd_pages_factor is initialized int tracefile_init_arch. */
- factor = tcd->tcd_pages_factor;
- INIT_LIST_HEAD(&tcd->tcd_pages);
- INIT_LIST_HEAD(&tcd->tcd_stock_pages);
- INIT_LIST_HEAD(&tcd->tcd_daemon_pages);
- tcd->tcd_cur_pages = 0;
- tcd->tcd_cur_stock_pages = 0;
- tcd->tcd_cur_daemon_pages = 0;
- tcd->tcd_max_pages = (max_pages * factor) / 100;
- LASSERT(tcd->tcd_max_pages > 0);
- tcd->tcd_shutting_down = 0;
- }
-
- return 0;
-}
-
-static void trace_cleanup_on_all_cpus(void)
-{
- struct cfs_trace_cpu_data *tcd;
- struct cfs_trace_page *tage;
- struct cfs_trace_page *tmp;
- int i, cpu;
-
- for_each_possible_cpu(cpu) {
- cfs_tcd_for_each_type_lock(tcd, i, cpu) {
- tcd->tcd_shutting_down = 1;
-
- list_for_each_entry_safe(tage, tmp, &tcd->tcd_pages,
- linkage) {
- __LASSERT_TAGE_INVARIANT(tage);
-
- list_del(&tage->linkage);
- cfs_tage_free(tage);
- }
-
- tcd->tcd_cur_pages = 0;
- }
- }
-}
-
-static void cfs_trace_cleanup(void)
-{
- struct page_collection pc;
-
- INIT_LIST_HEAD(&pc.pc_pages);
- spin_lock_init(&pc.pc_lock);
-
- trace_cleanup_on_all_cpus();
-
- cfs_tracefile_fini_arch();
-}
-
-void cfs_tracefile_exit(void)
-{
- cfs_trace_stop_thread();
- cfs_trace_cleanup();
-}
diff --git a/drivers/staging/lustre/lustre/libcfs/tracefile.h b/drivers/staging/lustre/lustre/libcfs/tracefile.h
deleted file mode 100644
index e931f6d98de9..000000000000
--- a/drivers/staging/lustre/lustre/libcfs/tracefile.h
+++ /dev/null
@@ -1,339 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- */
-
-#ifndef __LIBCFS_TRACEFILE_H__
-#define __LIBCFS_TRACEFILE_H__
-
-#include "../../include/linux/libcfs/libcfs.h"
-
-#include "linux/linux-tracefile.h"
-
-/* trace file lock routines */
-
-#define TRACEFILE_NAME_SIZE 1024
-extern char cfs_tracefile[TRACEFILE_NAME_SIZE];
-extern long long cfs_tracefile_size;
-
-void libcfs_run_debug_log_upcall(char *file);
-
-int cfs_tracefile_init_arch(void);
-void cfs_tracefile_fini_arch(void);
-
-void cfs_tracefile_read_lock(void);
-void cfs_tracefile_read_unlock(void);
-void cfs_tracefile_write_lock(void);
-void cfs_tracefile_write_unlock(void);
-
-int cfs_tracefile_dump_all_pages(char *filename);
-void cfs_trace_debug_print(void);
-void cfs_trace_flush_pages(void);
-int cfs_trace_start_thread(void);
-void cfs_trace_stop_thread(void);
-int cfs_tracefile_init(int max_pages);
-void cfs_tracefile_exit(void);
-
-
-
-int cfs_trace_copyin_string(char *knl_buffer, int knl_buffer_nob,
- const char __user *usr_buffer, int usr_buffer_nob);
-int cfs_trace_copyout_string(char __user *usr_buffer, int usr_buffer_nob,
- const char *knl_str, char *append);
-int cfs_trace_allocate_string_buffer(char **str, int nob);
-void cfs_trace_free_string_buffer(char *str, int nob);
-int cfs_trace_dump_debug_buffer_usrstr(void __user *usr_str, int usr_str_nob);
-int cfs_trace_daemon_command(char *str);
-int cfs_trace_daemon_command_usrstr(void __user *usr_str, int usr_str_nob);
-int cfs_trace_set_debug_mb(int mb);
-int cfs_trace_get_debug_mb(void);
-
-void libcfs_debug_dumplog_internal(void *arg);
-void libcfs_register_panic_notifier(void);
-void libcfs_unregister_panic_notifier(void);
-extern int libcfs_panic_in_progress;
-int cfs_trace_max_debug_mb(void);
-
-#define TCD_MAX_PAGES (5 << (20 - PAGE_CACHE_SHIFT))
-#define TCD_STOCK_PAGES (TCD_MAX_PAGES)
-#define CFS_TRACEFILE_SIZE (500 << 20)
-
-#ifdef LUSTRE_TRACEFILE_PRIVATE
-
-/*
- * Private declare for tracefile
- */
-#define TCD_MAX_PAGES (5 << (20 - PAGE_CACHE_SHIFT))
-#define TCD_STOCK_PAGES (TCD_MAX_PAGES)
-
-#define CFS_TRACEFILE_SIZE (500 << 20)
-
-/* Size of a buffer for sprinting console messages if we can't get a page
- * from system */
-#define CFS_TRACE_CONSOLE_BUFFER_SIZE 1024
-
-union cfs_trace_data_union {
- struct cfs_trace_cpu_data {
- /*
- * Even though this structure is meant to be per-CPU, locking
- * is needed because in some places the data may be accessed
- * from other CPUs. This lock is directly used in trace_get_tcd
- * and trace_put_tcd, which are called in libcfs_debug_vmsg2 and
- * tcd_for_each_type_lock
- */
- spinlock_t tcd_lock;
- unsigned long tcd_lock_flags;
-
- /*
- * pages with trace records not yet processed by tracefiled.
- */
- struct list_head tcd_pages;
- /* number of pages on ->tcd_pages */
- unsigned long tcd_cur_pages;
-
- /*
- * pages with trace records already processed by
- * tracefiled. These pages are kept in memory, so that some
- * portion of log can be written in the event of LBUG. This
- * list is maintained in LRU order.
- *
- * Pages are moved to ->tcd_daemon_pages by tracefiled()
- * (put_pages_on_daemon_list()). LRU pages from this list are
- * discarded when list grows too large.
- */
- struct list_head tcd_daemon_pages;
- /* number of pages on ->tcd_daemon_pages */
- unsigned long tcd_cur_daemon_pages;
-
- /*
- * Maximal number of pages allowed on ->tcd_pages and
- * ->tcd_daemon_pages each.
- * Always TCD_MAX_PAGES * tcd_pages_factor / 100 in current
- * implementation.
- */
- unsigned long tcd_max_pages;
-
- /*
- * preallocated pages to write trace records into. Pages from
- * ->tcd_stock_pages are moved to ->tcd_pages by
- * portals_debug_msg().
- *
- * This list is necessary, because on some platforms it's
- * impossible to perform efficient atomic page allocation in a
- * non-blockable context.
- *
- * Such platforms fill ->tcd_stock_pages "on occasion", when
- * tracing code is entered in blockable context.
- *
- * trace_get_tage_try() tries to get a page from
- * ->tcd_stock_pages first and resorts to atomic page
- * allocation only if this queue is empty. ->tcd_stock_pages
- * is replenished when tracing code is entered in blocking
- * context (darwin-tracefile.c:trace_get_tcd()). We try to
- * maintain TCD_STOCK_PAGES (40 by default) pages in this
- * queue. Atomic allocation is only required if more than
- * TCD_STOCK_PAGES pagesful are consumed by trace records all
- * emitted in non-blocking contexts. Which is quite unlikely.
- */
- struct list_head tcd_stock_pages;
- /* number of pages on ->tcd_stock_pages */
- unsigned long tcd_cur_stock_pages;
-
- unsigned short tcd_shutting_down;
- unsigned short tcd_cpu;
- unsigned short tcd_type;
- /* The factors to share debug memory. */
- unsigned short tcd_pages_factor;
- } tcd;
- char __pad[L1_CACHE_ALIGN(sizeof(struct cfs_trace_cpu_data))];
-};
-
-#define TCD_MAX_TYPES 8
-extern union cfs_trace_data_union (*cfs_trace_data[TCD_MAX_TYPES])[NR_CPUS];
-
-#define cfs_tcd_for_each(tcd, i, j) \
- for (i = 0; cfs_trace_data[i] != NULL; i++) \
- for (j = 0, ((tcd) = &(*cfs_trace_data[i])[j].tcd); \
- j < num_possible_cpus(); \
- j++, (tcd) = &(*cfs_trace_data[i])[j].tcd)
-
-#define cfs_tcd_for_each_type_lock(tcd, i, cpu) \
- for (i = 0; cfs_trace_data[i] && \
- (tcd = &(*cfs_trace_data[i])[cpu].tcd) && \
- cfs_trace_lock_tcd(tcd, 1); cfs_trace_unlock_tcd(tcd, 1), i++)
-
-/* XXX nikita: this declaration is internal to tracefile.c and should probably
- * be moved there */
-struct page_collection {
- struct list_head pc_pages;
- /*
- * spin-lock protecting ->pc_pages. It is taken by smp_call_function()
- * call-back functions. XXX nikita: Which is horrible: all processors
- * receive NMI at the same time only to be serialized by this
- * lock. Probably ->pc_pages should be replaced with an array of
- * NR_CPUS elements accessed locklessly.
- */
- spinlock_t pc_lock;
- /*
- * if this flag is set, collect_pages() will spill both
- * ->tcd_daemon_pages and ->tcd_pages to the ->pc_pages. Otherwise,
- * only ->tcd_pages are spilled.
- */
- int pc_want_daemon_pages;
-};
-
-/* XXX nikita: this declaration is internal to tracefile.c and should probably
- * be moved there */
-struct tracefiled_ctl {
- struct completion tctl_start;
- struct completion tctl_stop;
- wait_queue_head_t tctl_waitq;
- pid_t tctl_pid;
- atomic_t tctl_shutdown;
-};
-
-/*
- * small data-structure for each page owned by tracefiled.
- */
-/* XXX nikita: this declaration is internal to tracefile.c and should probably
- * be moved there */
-struct cfs_trace_page {
- /*
- * page itself
- */
- struct page *page;
- /*
- * linkage into one of the lists in trace_data_union or
- * page_collection
- */
- struct list_head linkage;
- /*
- * number of bytes used within this page
- */
- unsigned int used;
- /*
- * cpu that owns this page
- */
- unsigned short cpu;
- /*
- * type(context) of this page
- */
- unsigned short type;
-};
-
-void cfs_set_ptldebug_header(struct ptldebug_header *header,
- struct libcfs_debug_msg_data *m,
- unsigned long stack);
-void cfs_print_to_console(struct ptldebug_header *hdr, int mask,
- const char *buf, int len, const char *file,
- const char *fn);
-
-int cfs_trace_lock_tcd(struct cfs_trace_cpu_data *tcd, int walking);
-void cfs_trace_unlock_tcd(struct cfs_trace_cpu_data *tcd, int walking);
-
-/**
- * trace_buf_type_t, trace_buf_idx_get() and trace_console_buffers[][]
- * are not public libcfs API; they should be defined in
- * platform-specific tracefile include files
- * (see, for example, linux-tracefile.h).
- */
-
-extern char *cfs_trace_console_buffers[NR_CPUS][CFS_TCD_TYPE_MAX];
-cfs_trace_buf_type_t cfs_trace_buf_idx_get(void);
-
-static inline char *
-cfs_trace_get_console_buffer(void)
-{
- unsigned int i = get_cpu();
- unsigned int j = cfs_trace_buf_idx_get();
-
- return cfs_trace_console_buffers[i][j];
-}
-
-static inline void
-cfs_trace_put_console_buffer(char *buffer)
-{
- put_cpu();
-}
-
-static inline struct cfs_trace_cpu_data *
-cfs_trace_get_tcd(void)
-{
- struct cfs_trace_cpu_data *tcd =
- &(*cfs_trace_data[cfs_trace_buf_idx_get()])[get_cpu()].tcd;
-
- cfs_trace_lock_tcd(tcd, 0);
-
- return tcd;
-}
-
-static inline void
-cfs_trace_put_tcd (struct cfs_trace_cpu_data *tcd)
-{
- cfs_trace_unlock_tcd(tcd, 0);
-
- put_cpu();
-}
-
-int cfs_trace_refill_stock(struct cfs_trace_cpu_data *tcd, gfp_t gfp,
- struct list_head *stock);
-
-
-int cfs_tcd_owns_tage(struct cfs_trace_cpu_data *tcd,
- struct cfs_trace_page *tage);
-
-void cfs_trace_assertion_failed(const char *str,
- struct libcfs_debug_msg_data *m);
-
-/* ASSERTION that is safe to use within the debug system */
-#define __LASSERT(cond) \
-do { \
- if (unlikely(!(cond))) { \
- LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, D_EMERG, NULL); \
- cfs_trace_assertion_failed("ASSERTION("#cond") failed", \
- &msgdata); \
- } \
-} while (0)
-
-#define __LASSERT_TAGE_INVARIANT(tage) \
-do { \
- __LASSERT(tage != NULL); \
- __LASSERT(tage->page != NULL); \
- __LASSERT(tage->used <= PAGE_CACHE_SIZE); \
- __LASSERT(page_count(tage->page) > 0); \
-} while (0)
-
-#endif /* LUSTRE_TRACEFILE_PRIVATE */
-
-#endif /* __LIBCFS_TRACEFILE_H__ */
diff --git a/drivers/staging/lustre/lustre/libcfs/workitem.c b/drivers/staging/lustre/lustre/libcfs/workitem.c
deleted file mode 100644
index 02d18f558fdf..000000000000
--- a/drivers/staging/lustre/lustre/libcfs/workitem.c
+++ /dev/null
@@ -1,479 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * libcfs/libcfs/workitem.c
- *
- * Author: Isaac Huang <isaac@clusterfs.com>
- * Liang Zhen <zhen.liang@sun.com>
- */
-
-#define DEBUG_SUBSYSTEM S_LNET
-
-#include "../../include/linux/libcfs/libcfs.h"
-
-#define CFS_WS_NAME_LEN 16
-
-struct cfs_wi_sched {
- struct list_head ws_list; /* chain on global list */
- /** serialised workitems */
- spinlock_t ws_lock;
- /** where schedulers sleep */
- wait_queue_head_t ws_waitq;
- /** concurrent workitems */
- struct list_head ws_runq;
- /** rescheduled running-workitems, a workitem can be rescheduled
- * while running in wi_action(), but we don't to execute it again
- * unless it returns from wi_action(), so we put it on ws_rerunq
- * while rescheduling, and move it to runq after it returns
- * from wi_action() */
- struct list_head ws_rerunq;
- /** CPT-table for this scheduler */
- struct cfs_cpt_table *ws_cptab;
- /** CPT id for affinity */
- int ws_cpt;
- /** number of scheduled workitems */
- int ws_nscheduled;
- /** started scheduler thread, protected by cfs_wi_data::wi_glock */
- unsigned int ws_nthreads:30;
- /** shutting down, protected by cfs_wi_data::wi_glock */
- unsigned int ws_stopping:1;
- /** serialize starting thread, protected by cfs_wi_data::wi_glock */
- unsigned int ws_starting:1;
- /** scheduler name */
- char ws_name[CFS_WS_NAME_LEN];
-};
-
-static struct cfs_workitem_data {
- /** serialize */
- spinlock_t wi_glock;
- /** list of all schedulers */
- struct list_head wi_scheds;
- /** WI module is initialized */
- int wi_init;
- /** shutting down the whole WI module */
- int wi_stopping;
-} cfs_wi_data;
-
-static inline void
-cfs_wi_sched_lock(struct cfs_wi_sched *sched)
-{
- spin_lock(&sched->ws_lock);
-}
-
-static inline void
-cfs_wi_sched_unlock(struct cfs_wi_sched *sched)
-{
- spin_unlock(&sched->ws_lock);
-}
-
-static inline int
-cfs_wi_sched_cansleep(struct cfs_wi_sched *sched)
-{
- cfs_wi_sched_lock(sched);
- if (sched->ws_stopping) {
- cfs_wi_sched_unlock(sched);
- return 0;
- }
-
- if (!list_empty(&sched->ws_runq)) {
- cfs_wi_sched_unlock(sched);
- return 0;
- }
- cfs_wi_sched_unlock(sched);
- return 1;
-}
-
-
-/* XXX:
- * 0. it only works when called from wi->wi_action.
- * 1. when it returns no one shall try to schedule the workitem.
- */
-void
-cfs_wi_exit(struct cfs_wi_sched *sched, cfs_workitem_t *wi)
-{
- LASSERT(!in_interrupt()); /* because we use plain spinlock */
- LASSERT(!sched->ws_stopping);
-
- cfs_wi_sched_lock(sched);
-
- LASSERT(wi->wi_running);
- if (wi->wi_scheduled) { /* cancel pending schedules */
- LASSERT(!list_empty(&wi->wi_list));
- list_del_init(&wi->wi_list);
-
- LASSERT(sched->ws_nscheduled > 0);
- sched->ws_nscheduled--;
- }
-
- LASSERT(list_empty(&wi->wi_list));
-
- wi->wi_scheduled = 1; /* LBUG future schedule attempts */
- cfs_wi_sched_unlock(sched);
-
- return;
-}
-EXPORT_SYMBOL(cfs_wi_exit);
-
-/**
- * cancel schedule request of workitem \a wi
- */
-int
-cfs_wi_deschedule(struct cfs_wi_sched *sched, cfs_workitem_t *wi)
-{
- int rc;
-
- LASSERT(!in_interrupt()); /* because we use plain spinlock */
- LASSERT(!sched->ws_stopping);
-
- /*
- * return 0 if it's running already, otherwise return 1, which
- * means the workitem will not be scheduled and will not have
- * any race with wi_action.
- */
- cfs_wi_sched_lock(sched);
-
- rc = !(wi->wi_running);
-
- if (wi->wi_scheduled) { /* cancel pending schedules */
- LASSERT(!list_empty(&wi->wi_list));
- list_del_init(&wi->wi_list);
-
- LASSERT(sched->ws_nscheduled > 0);
- sched->ws_nscheduled--;
-
- wi->wi_scheduled = 0;
- }
-
- LASSERT (list_empty(&wi->wi_list));
-
- cfs_wi_sched_unlock(sched);
- return rc;
-}
-EXPORT_SYMBOL(cfs_wi_deschedule);
-
-/*
- * Workitem scheduled with (serial == 1) is strictly serialised not only with
- * itself, but also with others scheduled this way.
- *
- * Now there's only one static serialised queue, but in the future more might
- * be added, and even dynamic creation of serialised queues might be supported.
- */
-void
-cfs_wi_schedule(struct cfs_wi_sched *sched, cfs_workitem_t *wi)
-{
- LASSERT(!in_interrupt()); /* because we use plain spinlock */
- LASSERT(!sched->ws_stopping);
-
- cfs_wi_sched_lock(sched);
-
- if (!wi->wi_scheduled) {
- LASSERT (list_empty(&wi->wi_list));
-
- wi->wi_scheduled = 1;
- sched->ws_nscheduled++;
- if (!wi->wi_running) {
- list_add_tail(&wi->wi_list, &sched->ws_runq);
- wake_up(&sched->ws_waitq);
- } else {
- list_add(&wi->wi_list, &sched->ws_rerunq);
- }
- }
-
- LASSERT (!list_empty(&wi->wi_list));
- cfs_wi_sched_unlock(sched);
- return;
-}
-EXPORT_SYMBOL(cfs_wi_schedule);
-
-
-static int
-cfs_wi_scheduler (void *arg)
-{
- struct cfs_wi_sched *sched = (struct cfs_wi_sched *)arg;
-
- cfs_block_allsigs();
-
- /* CPT affinity scheduler? */
- if (sched->ws_cptab != NULL)
- cfs_cpt_bind(sched->ws_cptab, sched->ws_cpt);
-
- spin_lock(&cfs_wi_data.wi_glock);
-
- LASSERT(sched->ws_starting == 1);
- sched->ws_starting--;
- sched->ws_nthreads++;
-
- spin_unlock(&cfs_wi_data.wi_glock);
-
- cfs_wi_sched_lock(sched);
-
- while (!sched->ws_stopping) {
- int nloops = 0;
- int rc;
- cfs_workitem_t *wi;
-
- while (!list_empty(&sched->ws_runq) &&
- nloops < CFS_WI_RESCHED) {
- wi = list_entry(sched->ws_runq.next,
- cfs_workitem_t, wi_list);
- LASSERT(wi->wi_scheduled && !wi->wi_running);
-
- list_del_init(&wi->wi_list);
-
- LASSERT(sched->ws_nscheduled > 0);
- sched->ws_nscheduled--;
-
- wi->wi_running = 1;
- wi->wi_scheduled = 0;
-
-
- cfs_wi_sched_unlock(sched);
- nloops++;
-
- rc = (*wi->wi_action) (wi);
-
- cfs_wi_sched_lock(sched);
- if (rc != 0) /* WI should be dead, even be freed! */
- continue;
-
- wi->wi_running = 0;
- if (list_empty(&wi->wi_list))
- continue;
-
- LASSERT(wi->wi_scheduled);
- /* wi is rescheduled, should be on rerunq now, we
- * move it to runq so it can run action now */
- list_move_tail(&wi->wi_list, &sched->ws_runq);
- }
-
- if (!list_empty(&sched->ws_runq)) {
- cfs_wi_sched_unlock(sched);
- /* don't sleep because some workitems still
- * expect me to come back soon */
- cond_resched();
- cfs_wi_sched_lock(sched);
- continue;
- }
-
- cfs_wi_sched_unlock(sched);
- rc = wait_event_interruptible_exclusive(sched->ws_waitq,
- !cfs_wi_sched_cansleep(sched));
- cfs_wi_sched_lock(sched);
- }
-
- cfs_wi_sched_unlock(sched);
-
- spin_lock(&cfs_wi_data.wi_glock);
- sched->ws_nthreads--;
- spin_unlock(&cfs_wi_data.wi_glock);
-
- return 0;
-}
-
-
-void
-cfs_wi_sched_destroy(struct cfs_wi_sched *sched)
-{
- int i;
-
- LASSERT(cfs_wi_data.wi_init);
- LASSERT(!cfs_wi_data.wi_stopping);
-
- spin_lock(&cfs_wi_data.wi_glock);
- if (sched->ws_stopping) {
- CDEBUG(D_INFO, "%s is in progress of stopping\n",
- sched->ws_name);
- spin_unlock(&cfs_wi_data.wi_glock);
- return;
- }
-
- LASSERT(!list_empty(&sched->ws_list));
- sched->ws_stopping = 1;
-
- spin_unlock(&cfs_wi_data.wi_glock);
-
- i = 2;
- wake_up_all(&sched->ws_waitq);
-
- spin_lock(&cfs_wi_data.wi_glock);
- while (sched->ws_nthreads > 0) {
- CDEBUG(IS_PO2(++i) ? D_WARNING : D_NET,
- "waiting for %d threads of WI sched[%s] to terminate\n",
- sched->ws_nthreads, sched->ws_name);
-
- spin_unlock(&cfs_wi_data.wi_glock);
- set_current_state(TASK_UNINTERRUPTIBLE);
- schedule_timeout(cfs_time_seconds(1) / 20);
- spin_lock(&cfs_wi_data.wi_glock);
- }
-
- list_del(&sched->ws_list);
-
- spin_unlock(&cfs_wi_data.wi_glock);
- LASSERT(sched->ws_nscheduled == 0);
-
- LIBCFS_FREE(sched, sizeof(*sched));
-}
-EXPORT_SYMBOL(cfs_wi_sched_destroy);
-
-int
-cfs_wi_sched_create(char *name, struct cfs_cpt_table *cptab,
- int cpt, int nthrs, struct cfs_wi_sched **sched_pp)
-{
- struct cfs_wi_sched *sched;
- int rc;
-
- LASSERT(cfs_wi_data.wi_init);
- LASSERT(!cfs_wi_data.wi_stopping);
- LASSERT(cptab == NULL || cpt == CFS_CPT_ANY ||
- (cpt >= 0 && cpt < cfs_cpt_number(cptab)));
-
- LIBCFS_ALLOC(sched, sizeof(*sched));
- if (sched == NULL)
- return -ENOMEM;
-
- strncpy(sched->ws_name, name, CFS_WS_NAME_LEN);
- sched->ws_name[CFS_WS_NAME_LEN - 1] = '\0';
- sched->ws_cptab = cptab;
- sched->ws_cpt = cpt;
-
- spin_lock_init(&sched->ws_lock);
- init_waitqueue_head(&sched->ws_waitq);
- INIT_LIST_HEAD(&sched->ws_runq);
- INIT_LIST_HEAD(&sched->ws_rerunq);
- INIT_LIST_HEAD(&sched->ws_list);
-
- rc = 0;
- while (nthrs > 0) {
- char name[16];
- struct task_struct *task;
-
- spin_lock(&cfs_wi_data.wi_glock);
- while (sched->ws_starting > 0) {
- spin_unlock(&cfs_wi_data.wi_glock);
- schedule();
- spin_lock(&cfs_wi_data.wi_glock);
- }
-
- sched->ws_starting++;
- spin_unlock(&cfs_wi_data.wi_glock);
-
- if (sched->ws_cptab != NULL && sched->ws_cpt >= 0) {
- snprintf(name, sizeof(name), "%s_%02d_%02u",
- sched->ws_name, sched->ws_cpt,
- sched->ws_nthreads);
- } else {
- snprintf(name, sizeof(name), "%s_%02u",
- sched->ws_name, sched->ws_nthreads);
- }
-
- task = kthread_run(cfs_wi_scheduler, sched, "%s", name);
- if (!IS_ERR(task)) {
- nthrs--;
- continue;
- }
- rc = PTR_ERR(task);
-
- CERROR("Failed to create thread for WI scheduler %s: %d\n",
- name, rc);
-
- spin_lock(&cfs_wi_data.wi_glock);
-
- /* make up for cfs_wi_sched_destroy */
- list_add(&sched->ws_list, &cfs_wi_data.wi_scheds);
- sched->ws_starting--;
-
- spin_unlock(&cfs_wi_data.wi_glock);
-
- cfs_wi_sched_destroy(sched);
- return rc;
- }
- spin_lock(&cfs_wi_data.wi_glock);
- list_add(&sched->ws_list, &cfs_wi_data.wi_scheds);
- spin_unlock(&cfs_wi_data.wi_glock);
-
- *sched_pp = sched;
- return 0;
-}
-EXPORT_SYMBOL(cfs_wi_sched_create);
-
-int
-cfs_wi_startup(void)
-{
- memset(&cfs_wi_data, 0, sizeof(cfs_wi_data));
-
- spin_lock_init(&cfs_wi_data.wi_glock);
- INIT_LIST_HEAD(&cfs_wi_data.wi_scheds);
- cfs_wi_data.wi_init = 1;
-
- return 0;
-}
-
-void
-cfs_wi_shutdown(void)
-{
- struct cfs_wi_sched *sched;
-
- spin_lock(&cfs_wi_data.wi_glock);
- cfs_wi_data.wi_stopping = 1;
- spin_unlock(&cfs_wi_data.wi_glock);
-
- /* nobody should contend on this list */
- list_for_each_entry(sched, &cfs_wi_data.wi_scheds, ws_list) {
- sched->ws_stopping = 1;
- wake_up_all(&sched->ws_waitq);
- }
-
- list_for_each_entry(sched, &cfs_wi_data.wi_scheds, ws_list) {
- spin_lock(&cfs_wi_data.wi_glock);
-
- while (sched->ws_nthreads != 0) {
- spin_unlock(&cfs_wi_data.wi_glock);
- set_current_state(TASK_UNINTERRUPTIBLE);
- schedule_timeout(cfs_time_seconds(1) / 20);
- spin_lock(&cfs_wi_data.wi_glock);
- }
- spin_unlock(&cfs_wi_data.wi_glock);
- }
- while (!list_empty(&cfs_wi_data.wi_scheds)) {
- sched = list_entry(cfs_wi_data.wi_scheds.next,
- struct cfs_wi_sched, ws_list);
- list_del(&sched->ws_list);
- LIBCFS_FREE(sched, sizeof(*sched));
- }
-
- cfs_wi_data.wi_stopping = 0;
- cfs_wi_data.wi_init = 0;
-}
diff --git a/drivers/staging/lustre/lustre/llite/Makefile b/drivers/staging/lustre/lustre/llite/Makefile
deleted file mode 100644
index 9ac29e718da3..000000000000
--- a/drivers/staging/lustre/lustre/llite/Makefile
+++ /dev/null
@@ -1,10 +0,0 @@
-obj-$(CONFIG_LUSTRE_FS) += lustre.o
-obj-$(CONFIG_LUSTRE_LLITE_LLOOP) += llite_lloop.o
-lustre-y := dcache.o dir.o file.o llite_close.o llite_lib.o llite_nfs.o \
- rw.o namei.o symlink.o llite_mmap.o \
- xattr.o xattr_cache.o remote_perm.o llite_rmtacl.o \
- rw26.o super25.o statahead.o \
- ../lclient/glimpse.o ../lclient/lcommon_cl.o ../lclient/lcommon_misc.o \
- vvp_dev.o vvp_page.o vvp_lock.o vvp_io.o vvp_object.o lproc_llite.o
-
-llite_lloop-y := lloop.o
diff --git a/drivers/staging/lustre/lustre/llite/dcache.c b/drivers/staging/lustre/lustre/llite/dcache.c
deleted file mode 100644
index b86685912d28..000000000000
--- a/drivers/staging/lustre/lustre/llite/dcache.c
+++ /dev/null
@@ -1,362 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- */
-
-#include <linux/fs.h>
-#include <linux/sched.h>
-#include <linux/quotaops.h>
-
-#define DEBUG_SUBSYSTEM S_LLITE
-
-#include "../include/obd_support.h"
-#include "../include/lustre_lite.h"
-#include "../include/lustre/lustre_idl.h"
-#include "../include/lustre_dlm.h"
-
-#include "llite_internal.h"
-
-static void free_dentry_data(struct rcu_head *head)
-{
- struct ll_dentry_data *lld;
-
- lld = container_of(head, struct ll_dentry_data, lld_rcu_head);
- kfree(lld);
-}
-
-/* should NOT be called with the dcache lock, see fs/dcache.c */
-static void ll_release(struct dentry *de)
-{
- struct ll_dentry_data *lld;
-
- LASSERT(de != NULL);
- lld = ll_d2d(de);
- if (lld == NULL) /* NFS copies the de->d_op methods (bug 4655) */
- return;
-
- if (lld->lld_it) {
- ll_intent_release(lld->lld_it);
- kfree(lld->lld_it);
- }
-
- de->d_fsdata = NULL;
- call_rcu(&lld->lld_rcu_head, free_dentry_data);
-}
-
-/* Compare if two dentries are the same. Don't match if the existing dentry
- * is marked invalid. Returns 1 if different, 0 if the same.
- *
- * This avoids a race where ll_lookup_it() instantiates a dentry, but we get
- * an AST before calling d_revalidate_it(). The dentry still exists (marked
- * INVALID) so d_lookup() matches it, but we have no lock on it (so
- * lock_match() fails) and we spin around real_lookup(). */
-static int ll_dcompare(const struct dentry *parent, const struct dentry *dentry,
- unsigned int len, const char *str,
- const struct qstr *name)
-{
- if (len != name->len)
- return 1;
-
- if (memcmp(str, name->name, len))
- return 1;
-
- CDEBUG(D_DENTRY, "found name %.*s(%p) flags %#x refc %d\n",
- name->len, name->name, dentry, dentry->d_flags,
- d_count(dentry));
-
- /* mountpoint is always valid */
- if (d_mountpoint((struct dentry *)dentry))
- return 0;
-
- if (d_lustre_invalid(dentry))
- return 1;
-
- return 0;
-}
-
-static inline int return_if_equal(struct ldlm_lock *lock, void *data)
-{
- if ((lock->l_flags &
- (LDLM_FL_CANCELING | LDLM_FL_DISCARD_DATA)) ==
- (LDLM_FL_CANCELING | LDLM_FL_DISCARD_DATA))
- return LDLM_ITER_CONTINUE;
- return LDLM_ITER_STOP;
-}
-
-/* find any ldlm lock of the inode in mdc and lov
- * return 0 not find
- * 1 find one
- * < 0 error */
-static int find_cbdata(struct inode *inode)
-{
- struct ll_sb_info *sbi = ll_i2sbi(inode);
- struct lov_stripe_md *lsm;
- int rc = 0;
-
- LASSERT(inode);
- rc = md_find_cbdata(sbi->ll_md_exp, ll_inode2fid(inode),
- return_if_equal, NULL);
- if (rc != 0)
- return rc;
-
- lsm = ccc_inode_lsm_get(inode);
- if (lsm == NULL)
- return rc;
-
- rc = obd_find_cbdata(sbi->ll_dt_exp, lsm, return_if_equal, NULL);
- ccc_inode_lsm_put(inode, lsm);
-
- return rc;
-}
-
-/**
- * Called when last reference to a dentry is dropped and dcache wants to know
- * whether or not it should cache it:
- * - return 1 to delete the dentry immediately
- * - return 0 to cache the dentry
- * Should NOT be called with the dcache lock, see fs/dcache.c
- */
-static int ll_ddelete(const struct dentry *de)
-{
- LASSERT(de);
-
- CDEBUG(D_DENTRY, "%s dentry %pd (%p, parent %p, inode %p) %s%s\n",
- d_lustre_invalid((struct dentry *)de) ? "deleting" : "keeping",
- de, de, de->d_parent, d_inode(de),
- d_unhashed(de) ? "" : "hashed,",
- list_empty(&de->d_subdirs) ? "" : "subdirs");
-
- /* kernel >= 2.6.38 last refcount is decreased after this function. */
- LASSERT(d_count(de) == 1);
-
- /* Disable this piece of code temporarily because this is called
- * inside dcache_lock so it's not appropriate to do lots of work
- * here. ATTENTION: Before this piece of code enabling, LU-2487 must be
- * resolved. */
-#if 0
- /* if not ldlm lock for this inode, set i_nlink to 0 so that
- * this inode can be recycled later b=20433 */
- if (d_really_is_positive(de) && !find_cbdata(d_inode(de)))
- clear_nlink(d_inode(de));
-#endif
-
- if (d_lustre_invalid((struct dentry *)de))
- return 1;
- return 0;
-}
-
-int ll_d_init(struct dentry *de)
-{
- LASSERT(de != NULL);
-
- CDEBUG(D_DENTRY, "ldd on dentry %pd (%p) parent %p inode %p refc %d\n",
- de, de, de->d_parent, d_inode(de),
- d_count(de));
-
- if (de->d_fsdata == NULL) {
- struct ll_dentry_data *lld;
-
- lld = kzalloc(sizeof(*lld), GFP_NOFS);
- if (likely(lld)) {
- spin_lock(&de->d_lock);
- if (likely(de->d_fsdata == NULL)) {
- de->d_fsdata = lld;
- __d_lustre_invalidate(de);
- } else {
- kfree(lld);
- }
- spin_unlock(&de->d_lock);
- } else {
- return -ENOMEM;
- }
- }
- LASSERT(de->d_op == &ll_d_ops);
-
- return 0;
-}
-
-void ll_intent_drop_lock(struct lookup_intent *it)
-{
- if (it->it_op && it->d.lustre.it_lock_mode) {
- struct lustre_handle handle;
-
- handle.cookie = it->d.lustre.it_lock_handle;
-
- CDEBUG(D_DLMTRACE, "releasing lock with cookie %#llx from it %p\n",
- handle.cookie, it);
- ldlm_lock_decref(&handle, it->d.lustre.it_lock_mode);
-
- /* bug 494: intent_release may be called multiple times, from
- * this thread and we don't want to double-decref this lock */
- it->d.lustre.it_lock_mode = 0;
- if (it->d.lustre.it_remote_lock_mode != 0) {
- handle.cookie = it->d.lustre.it_remote_lock_handle;
-
- CDEBUG(D_DLMTRACE, "releasing remote lock with cookie%#llx from it %p\n",
- handle.cookie, it);
- ldlm_lock_decref(&handle,
- it->d.lustre.it_remote_lock_mode);
- it->d.lustre.it_remote_lock_mode = 0;
- }
- }
-}
-
-void ll_intent_release(struct lookup_intent *it)
-{
- CDEBUG(D_INFO, "intent %p released\n", it);
- ll_intent_drop_lock(it);
- /* We are still holding extra reference on a request, need to free it */
- if (it_disposition(it, DISP_ENQ_OPEN_REF))
- ptlrpc_req_finished(it->d.lustre.it_data); /* ll_file_open */
-
- if (it_disposition(it, DISP_ENQ_CREATE_REF)) /* create rec */
- ptlrpc_req_finished(it->d.lustre.it_data);
-
- it->d.lustre.it_disposition = 0;
- it->d.lustre.it_data = NULL;
-}
-
-void ll_invalidate_aliases(struct inode *inode)
-{
- struct dentry *dentry;
-
- LASSERT(inode != NULL);
-
- CDEBUG(D_INODE, "marking dentries for ino %lu/%u(%p) invalid\n",
- inode->i_ino, inode->i_generation, inode);
-
- ll_lock_dcache(inode);
- hlist_for_each_entry(dentry, &inode->i_dentry, d_u.d_alias) {
- CDEBUG(D_DENTRY, "dentry in drop %pd (%p) parent %p inode %p flags %d\n",
- dentry, dentry, dentry->d_parent,
- d_inode(dentry), dentry->d_flags);
-
- d_lustre_invalidate(dentry, 0);
- }
- ll_unlock_dcache(inode);
-}
-
-int ll_revalidate_it_finish(struct ptlrpc_request *request,
- struct lookup_intent *it,
- struct inode *inode)
-{
- int rc = 0;
-
- if (!request)
- return 0;
-
- if (it_disposition(it, DISP_LOOKUP_NEG))
- return -ENOENT;
-
- rc = ll_prep_inode(&inode, request, NULL, it);
-
- return rc;
-}
-
-void ll_lookup_finish_locks(struct lookup_intent *it, struct inode *inode)
-{
- LASSERT(it != NULL);
-
- if (it->d.lustre.it_lock_mode && inode != NULL) {
- struct ll_sb_info *sbi = ll_i2sbi(inode);
-
- CDEBUG(D_DLMTRACE, "setting l_data to inode %p (%lu/%u)\n",
- inode, inode->i_ino, inode->i_generation);
- ll_set_lock_data(sbi->ll_md_exp, inode, it, NULL);
- }
-
- /* drop lookup or getattr locks immediately */
- if (it->it_op == IT_LOOKUP || it->it_op == IT_GETATTR) {
- /* on 2.6 there are situation when several lookups and
- * revalidations may be requested during single operation.
- * therefore, we don't release intent here -bzzz */
- ll_intent_drop_lock(it);
- }
-}
-
-static int ll_revalidate_dentry(struct dentry *dentry,
- unsigned int lookup_flags)
-{
- struct inode *dir = d_inode(dentry->d_parent);
-
- /*
- * if open&create is set, talk to MDS to make sure file is created if
- * necessary, because we can't do this in ->open() later since that's
- * called on an inode. return 0 here to let lookup to handle this.
- */
- if ((lookup_flags & (LOOKUP_OPEN | LOOKUP_CREATE)) ==
- (LOOKUP_OPEN | LOOKUP_CREATE))
- return 0;
-
- if (lookup_flags & (LOOKUP_PARENT | LOOKUP_OPEN | LOOKUP_CREATE))
- return 1;
-
- if (d_need_statahead(dir, dentry) <= 0)
- return 1;
-
- if (lookup_flags & LOOKUP_RCU)
- return -ECHILD;
-
- do_statahead_enter(dir, &dentry, d_inode(dentry) == NULL);
- ll_statahead_mark(dir, dentry);
- return 1;
-}
-
-/*
- * Always trust cached dentries. Update statahead window if necessary.
- */
-static int ll_revalidate_nd(struct dentry *dentry, unsigned int flags)
-{
- CDEBUG(D_VFSTRACE, "VFS Op:name=%pd, flags=%u\n",
- dentry, flags);
-
- return ll_revalidate_dentry(dentry, flags);
-}
-
-
-static void ll_d_iput(struct dentry *de, struct inode *inode)
-{
- LASSERT(inode);
- if (!find_cbdata(inode))
- clear_nlink(inode);
- iput(inode);
-}
-
-const struct dentry_operations ll_d_ops = {
- .d_revalidate = ll_revalidate_nd,
- .d_release = ll_release,
- .d_delete = ll_ddelete,
- .d_iput = ll_d_iput,
- .d_compare = ll_dcompare,
-};
diff --git a/drivers/staging/lustre/lustre/llite/dir.c b/drivers/staging/lustre/lustre/llite/dir.c
deleted file mode 100644
index cc6f0f596ffe..000000000000
--- a/drivers/staging/lustre/lustre/llite/dir.c
+++ /dev/null
@@ -1,1928 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * lustre/llite/dir.c
- *
- * Directory code for lustre client.
- */
-
-#include <linux/fs.h>
-#include <linux/pagemap.h>
-#include <linux/mm.h>
-#include <linux/uaccess.h>
-#include <linux/buffer_head.h> /* for wait_on_buffer */
-#include <linux/pagevec.h>
-#include <linux/prefetch.h>
-
-#define DEBUG_SUBSYSTEM S_LLITE
-
-#include "../include/obd_support.h"
-#include "../include/obd_class.h"
-#include "../include/lustre_lib.h"
-#include "../include/lustre/lustre_idl.h"
-#include "../include/lustre_lite.h"
-#include "../include/lustre_dlm.h"
-#include "../include/lustre_fid.h"
-#include "llite_internal.h"
-
-/*
- * (new) readdir implementation overview.
- *
- * Original lustre readdir implementation cached exact copy of raw directory
- * pages on the client. These pages were indexed in client page cache by
- * logical offset in the directory file. This design, while very simple and
- * intuitive had some inherent problems:
- *
- * . it implies that byte offset to the directory entry serves as a
- * telldir(3)/seekdir(3) cookie, but that offset is not stable: in
- * ext3/htree directory entries may move due to splits, and more
- * importantly,
- *
- * . it is incompatible with the design of split directories for cmd3,
- * that assumes that names are distributed across nodes based on their
- * hash, and so readdir should be done in hash order.
- *
- * New readdir implementation does readdir in hash order, and uses hash of a
- * file name as a telldir/seekdir cookie. This led to number of complications:
- *
- * . hash is not unique, so it cannot be used to index cached directory
- * pages on the client (note, that it requires a whole pageful of hash
- * collided entries to cause two pages to have identical hashes);
- *
- * . hash is not unique, so it cannot, strictly speaking, be used as an
- * entry cookie. ext3/htree has the same problem and lustre implementation
- * mimics their solution: seekdir(hash) positions directory at the first
- * entry with the given hash.
- *
- * Client side.
- *
- * 0. caching
- *
- * Client caches directory pages using hash of the first entry as an index. As
- * noted above hash is not unique, so this solution doesn't work as is:
- * special processing is needed for "page hash chains" (i.e., sequences of
- * pages filled with entries all having the same hash value).
- *
- * First, such chains have to be detected. To this end, server returns to the
- * client the hash of the first entry on the page next to one returned. When
- * client detects that this hash is the same as hash of the first entry on the
- * returned page, page hash collision has to be handled. Pages in the
- * hash chain, except first one, are termed "overflow pages".
- *
- * Solution to index uniqueness problem is to not cache overflow
- * pages. Instead, when page hash collision is detected, all overflow pages
- * from emerging chain are immediately requested from the server and placed in
- * a special data structure (struct ll_dir_chain). This data structure is used
- * by ll_readdir() to process entries from overflow pages. When readdir
- * invocation finishes, overflow pages are discarded. If page hash collision
- * chain weren't completely processed, next call to readdir will again detect
- * page hash collision, again read overflow pages in, process next portion of
- * entries and again discard the pages. This is not as wasteful as it looks,
- * because, given reasonable hash, page hash collisions are extremely rare.
- *
- * 1. directory positioning
- *
- * When seekdir(hash) is called, original
- *
- *
- *
- *
- *
- *
- *
- *
- * Server.
- *
- * identification of and access to overflow pages
- *
- * page format
- *
- * Page in MDS_READPAGE RPC is packed in LU_PAGE_SIZE, and each page contains
- * a header lu_dirpage which describes the start/end hash, and whether this
- * page is empty (contains no dir entry) or hash collide with next page.
- * After client receives reply, several pages will be integrated into dir page
- * in PAGE_CACHE_SIZE (if PAGE_CACHE_SIZE greater than LU_PAGE_SIZE), and the
- * lu_dirpage for this integrated page will be adjusted. See
- * lmv_adjust_dirpages().
- *
- */
-
-/* returns the page unlocked, but with a reference */
-static int ll_dir_filler(void *_hash, struct page *page0)
-{
- struct inode *inode = page0->mapping->host;
- int hash64 = ll_i2sbi(inode)->ll_flags & LL_SBI_64BIT_HASH;
- struct obd_export *exp = ll_i2sbi(inode)->ll_md_exp;
- struct ptlrpc_request *request;
- struct mdt_body *body;
- struct md_op_data *op_data;
- __u64 hash = *((__u64 *)_hash);
- struct page **page_pool;
- struct page *page;
- struct lu_dirpage *dp;
- int max_pages = ll_i2sbi(inode)->ll_md_brw_size >> PAGE_CACHE_SHIFT;
- int nrdpgs = 0; /* number of pages read actually */
- int npages;
- int i;
- int rc;
-
- CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p) hash %llu\n",
- inode->i_ino, inode->i_generation, inode, hash);
-
- LASSERT(max_pages > 0 && max_pages <= MD_MAX_BRW_PAGES);
-
- page_pool = kcalloc(max_pages, sizeof(page), GFP_NOFS);
- if (page_pool) {
- page_pool[0] = page0;
- } else {
- page_pool = &page0;
- max_pages = 1;
- }
- for (npages = 1; npages < max_pages; npages++) {
- page = page_cache_alloc_cold(inode->i_mapping);
- if (!page)
- break;
- page_pool[npages] = page;
- }
-
- op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL, 0, 0,
- LUSTRE_OPC_ANY, NULL);
- op_data->op_npages = npages;
- op_data->op_offset = hash;
- rc = md_readpage(exp, op_data, page_pool, &request);
- ll_finish_md_op_data(op_data);
- if (rc < 0) {
- /* page0 is special, which was added into page cache early */
- delete_from_page_cache(page0);
- } else if (rc == 0) {
- body = req_capsule_server_get(&request->rq_pill, &RMF_MDT_BODY);
- /* Checked by mdc_readpage() */
- LASSERT(body != NULL);
-
- if (body->valid & OBD_MD_FLSIZE)
- cl_isize_write(inode, body->size);
-
- nrdpgs = (request->rq_bulk->bd_nob_transferred+PAGE_CACHE_SIZE-1)
- >> PAGE_CACHE_SHIFT;
- SetPageUptodate(page0);
- }
- unlock_page(page0);
- ptlrpc_req_finished(request);
-
- CDEBUG(D_VFSTRACE, "read %d/%d pages\n", nrdpgs, npages);
-
- for (i = 1; i < npages; i++) {
- unsigned long offset;
- int ret;
-
- page = page_pool[i];
-
- if (rc < 0 || i >= nrdpgs) {
- page_cache_release(page);
- continue;
- }
-
- SetPageUptodate(page);
-
- dp = kmap(page);
- hash = le64_to_cpu(dp->ldp_hash_start);
- kunmap(page);
-
- offset = hash_x_index(hash, hash64);
-
- prefetchw(&page->flags);
- ret = add_to_page_cache_lru(page, inode->i_mapping, offset,
- GFP_KERNEL);
- if (ret == 0) {
- unlock_page(page);
- } else {
- CDEBUG(D_VFSTRACE, "page %lu add to page cache failed: %d\n",
- offset, ret);
- }
- page_cache_release(page);
- }
-
- if (page_pool != &page0)
- kfree(page_pool);
- return rc;
-}
-
-static void ll_check_page(struct inode *dir, struct page *page)
-{
- /* XXX: check page format later */
- SetPageChecked(page);
-}
-
-void ll_release_page(struct page *page, int remove)
-{
- kunmap(page);
- if (remove) {
- lock_page(page);
- if (likely(page->mapping != NULL))
- truncate_complete_page(page->mapping, page);
- unlock_page(page);
- }
- page_cache_release(page);
-}
-
-/*
- * Find, kmap and return page that contains given hash.
- */
-static struct page *ll_dir_page_locate(struct inode *dir, __u64 *hash,
- __u64 *start, __u64 *end)
-{
- int hash64 = ll_i2sbi(dir)->ll_flags & LL_SBI_64BIT_HASH;
- struct address_space *mapping = dir->i_mapping;
- /*
- * Complement of hash is used as an index so that
- * radix_tree_gang_lookup() can be used to find a page with starting
- * hash _smaller_ than one we are looking for.
- */
- unsigned long offset = hash_x_index(*hash, hash64);
- struct page *page;
- int found;
-
- spin_lock_irq(&mapping->tree_lock);
- found = radix_tree_gang_lookup(&mapping->page_tree,
- (void **)&page, offset, 1);
- if (found > 0 && !radix_tree_exceptional_entry(page)) {
- struct lu_dirpage *dp;
-
- page_cache_get(page);
- spin_unlock_irq(&mapping->tree_lock);
- /*
- * In contrast to find_lock_page() we are sure that directory
- * page cannot be truncated (while DLM lock is held) and,
- * hence, can avoid restart.
- *
- * In fact, page cannot be locked here at all, because
- * ll_dir_filler() does synchronous io.
- */
- wait_on_page_locked(page);
- if (PageUptodate(page)) {
- dp = kmap(page);
- if (BITS_PER_LONG == 32 && hash64) {
- *start = le64_to_cpu(dp->ldp_hash_start) >> 32;
- *end = le64_to_cpu(dp->ldp_hash_end) >> 32;
- *hash = *hash >> 32;
- } else {
- *start = le64_to_cpu(dp->ldp_hash_start);
- *end = le64_to_cpu(dp->ldp_hash_end);
- }
- LASSERTF(*start <= *hash, "start = %#llx,end = %#llx,hash = %#llx\n",
- *start, *end, *hash);
- CDEBUG(D_VFSTRACE, "page %lu [%llu %llu], hash %llu\n",
- offset, *start, *end, *hash);
- if (*hash > *end) {
- ll_release_page(page, 0);
- page = NULL;
- } else if (*end != *start && *hash == *end) {
- /*
- * upon hash collision, remove this page,
- * otherwise put page reference, and
- * ll_get_dir_page() will issue RPC to fetch
- * the page we want.
- */
- ll_release_page(page,
- le32_to_cpu(dp->ldp_flags) & LDF_COLLIDE);
- page = NULL;
- }
- } else {
- page_cache_release(page);
- page = ERR_PTR(-EIO);
- }
-
- } else {
- spin_unlock_irq(&mapping->tree_lock);
- page = NULL;
- }
- return page;
-}
-
-struct page *ll_get_dir_page(struct inode *dir, __u64 hash,
- struct ll_dir_chain *chain)
-{
- ldlm_policy_data_t policy = {.l_inodebits = {MDS_INODELOCK_UPDATE} };
- struct address_space *mapping = dir->i_mapping;
- struct lustre_handle lockh;
- struct lu_dirpage *dp;
- struct page *page;
- ldlm_mode_t mode;
- int rc;
- __u64 start = 0;
- __u64 end = 0;
- __u64 lhash = hash;
- struct ll_inode_info *lli = ll_i2info(dir);
- int hash64 = ll_i2sbi(dir)->ll_flags & LL_SBI_64BIT_HASH;
-
- mode = LCK_PR;
- rc = md_lock_match(ll_i2sbi(dir)->ll_md_exp, LDLM_FL_BLOCK_GRANTED,
- ll_inode2fid(dir), LDLM_IBITS, &policy, mode, &lockh);
- if (!rc) {
- struct ldlm_enqueue_info einfo = {
- .ei_type = LDLM_IBITS,
- .ei_mode = mode,
- .ei_cb_bl = ll_md_blocking_ast,
- .ei_cb_cp = ldlm_completion_ast,
- };
- struct lookup_intent it = { .it_op = IT_READDIR };
- struct ptlrpc_request *request;
- struct md_op_data *op_data;
-
- op_data = ll_prep_md_op_data(NULL, dir, dir, NULL, 0, 0,
- LUSTRE_OPC_ANY, NULL);
- if (IS_ERR(op_data))
- return (void *)op_data;
-
- rc = md_enqueue(ll_i2sbi(dir)->ll_md_exp, &einfo, &it,
- op_data, &lockh, NULL, 0, NULL, 0);
-
- ll_finish_md_op_data(op_data);
-
- request = (struct ptlrpc_request *)it.d.lustre.it_data;
- if (request)
- ptlrpc_req_finished(request);
- if (rc < 0) {
- CERROR("lock enqueue: "DFID" at %llu: rc %d\n",
- PFID(ll_inode2fid(dir)), hash, rc);
- return ERR_PTR(rc);
- }
-
- CDEBUG(D_INODE, "setting lr_lvb_inode to inode %p (%lu/%u)\n",
- dir, dir->i_ino, dir->i_generation);
- md_set_lock_data(ll_i2sbi(dir)->ll_md_exp,
- &it.d.lustre.it_lock_handle, dir, NULL);
- } else {
- /* for cross-ref object, l_ast_data of the lock may not be set,
- * we reset it here */
- md_set_lock_data(ll_i2sbi(dir)->ll_md_exp, &lockh.cookie,
- dir, NULL);
- }
- ldlm_lock_dump_handle(D_OTHER, &lockh);
-
- mutex_lock(&lli->lli_readdir_mutex);
- page = ll_dir_page_locate(dir, &lhash, &start, &end);
- if (IS_ERR(page)) {
- CERROR("dir page locate: "DFID" at %llu: rc %ld\n",
- PFID(ll_inode2fid(dir)), lhash, PTR_ERR(page));
- goto out_unlock;
- } else if (page != NULL) {
- /*
- * XXX nikita: not entirely correct handling of a corner case:
- * suppose hash chain of entries with hash value HASH crosses
- * border between pages P0 and P1. First both P0 and P1 are
- * cached, seekdir() is called for some entry from the P0 part
- * of the chain. Later P0 goes out of cache. telldir(HASH)
- * happens and finds P1, as it starts with matching hash
- * value. Remaining entries from P0 part of the chain are
- * skipped. (Is that really a bug?)
- *
- * Possible solutions: 0. don't cache P1 is such case, handle
- * it as an "overflow" page. 1. invalidate all pages at
- * once. 2. use HASH|1 as an index for P1.
- */
- goto hash_collision;
- }
-
- page = read_cache_page(mapping, hash_x_index(hash, hash64),
- ll_dir_filler, &lhash);
- if (IS_ERR(page)) {
- CERROR("read cache page: "DFID" at %llu: rc %ld\n",
- PFID(ll_inode2fid(dir)), hash, PTR_ERR(page));
- goto out_unlock;
- }
-
- wait_on_page_locked(page);
- (void)kmap(page);
- if (!PageUptodate(page)) {
- CERROR("page not updated: "DFID" at %llu: rc %d\n",
- PFID(ll_inode2fid(dir)), hash, -5);
- goto fail;
- }
- if (!PageChecked(page))
- ll_check_page(dir, page);
- if (PageError(page)) {
- CERROR("page error: "DFID" at %llu: rc %d\n",
- PFID(ll_inode2fid(dir)), hash, -5);
- goto fail;
- }
-hash_collision:
- dp = page_address(page);
- if (BITS_PER_LONG == 32 && hash64) {
- start = le64_to_cpu(dp->ldp_hash_start) >> 32;
- end = le64_to_cpu(dp->ldp_hash_end) >> 32;
- lhash = hash >> 32;
- } else {
- start = le64_to_cpu(dp->ldp_hash_start);
- end = le64_to_cpu(dp->ldp_hash_end);
- lhash = hash;
- }
- if (end == start) {
- LASSERT(start == lhash);
- CWARN("Page-wide hash collision: %llu\n", end);
- if (BITS_PER_LONG == 32 && hash64)
- CWARN("Real page-wide hash collision at [%llu %llu] with hash %llu\n",
- le64_to_cpu(dp->ldp_hash_start),
- le64_to_cpu(dp->ldp_hash_end), hash);
- /*
- * Fetch whole overflow chain...
- *
- * XXX not yet.
- */
- goto fail;
- }
-out_unlock:
- mutex_unlock(&lli->lli_readdir_mutex);
- ldlm_lock_decref(&lockh, mode);
- return page;
-
-fail:
- ll_release_page(page, 1);
- page = ERR_PTR(-EIO);
- goto out_unlock;
-}
-
-int ll_dir_read(struct inode *inode, struct dir_context *ctx)
-{
- struct ll_inode_info *info = ll_i2info(inode);
- struct ll_sb_info *sbi = ll_i2sbi(inode);
- __u64 pos = ctx->pos;
- int api32 = ll_need_32bit_api(sbi);
- int hash64 = sbi->ll_flags & LL_SBI_64BIT_HASH;
- struct page *page;
- struct ll_dir_chain chain;
- int done = 0;
- int rc = 0;
-
- ll_dir_chain_init(&chain);
-
- page = ll_get_dir_page(inode, pos, &chain);
-
- while (rc == 0 && !done) {
- struct lu_dirpage *dp;
- struct lu_dirent *ent;
-
- if (!IS_ERR(page)) {
- /*
- * If page is empty (end of directory is reached),
- * use this value.
- */
- __u64 hash = MDS_DIR_END_OFF;
- __u64 next;
-
- dp = page_address(page);
- for (ent = lu_dirent_start(dp); ent != NULL && !done;
- ent = lu_dirent_next(ent)) {
- __u16 type;
- int namelen;
- struct lu_fid fid;
- __u64 lhash;
- __u64 ino;
-
- /*
- * XXX: implement correct swabbing here.
- */
-
- hash = le64_to_cpu(ent->lde_hash);
- if (hash < pos)
- /*
- * Skip until we find target hash
- * value.
- */
- continue;
-
- namelen = le16_to_cpu(ent->lde_namelen);
- if (namelen == 0)
- /*
- * Skip dummy record.
- */
- continue;
-
- if (api32 && hash64)
- lhash = hash >> 32;
- else
- lhash = hash;
- fid_le_to_cpu(&fid, &ent->lde_fid);
- ino = cl_fid_build_ino(&fid, api32);
- type = ll_dirent_type_get(ent);
- ctx->pos = lhash;
- /* For 'll_nfs_get_name_filldir()', it will try
- * to access the 'ent' through its 'lde_name',
- * so the parameter 'name' for 'ctx->actor()'
- * must be part of the 'ent'.
- */
- done = !dir_emit(ctx, ent->lde_name,
- namelen, ino, type);
- }
- next = le64_to_cpu(dp->ldp_hash_end);
- if (!done) {
- pos = next;
- if (pos == MDS_DIR_END_OFF) {
- /*
- * End of directory reached.
- */
- done = 1;
- ll_release_page(page, 0);
- } else if (1 /* chain is exhausted*/) {
- /*
- * Normal case: continue to the next
- * page.
- */
- ll_release_page(page,
- le32_to_cpu(dp->ldp_flags) &
- LDF_COLLIDE);
- next = pos;
- page = ll_get_dir_page(inode, pos,
- &chain);
- } else {
- /*
- * go into overflow page.
- */
- LASSERT(le32_to_cpu(dp->ldp_flags) &
- LDF_COLLIDE);
- ll_release_page(page, 1);
- }
- } else {
- pos = hash;
- ll_release_page(page, 0);
- }
- } else {
- rc = PTR_ERR(page);
- CERROR("error reading dir "DFID" at %lu: rc %d\n",
- PFID(&info->lli_fid), (unsigned long)pos, rc);
- }
- }
-
- ctx->pos = pos;
- ll_dir_chain_fini(&chain);
- return rc;
-}
-
-static int ll_readdir(struct file *filp, struct dir_context *ctx)
-{
- struct inode *inode = file_inode(filp);
- struct ll_file_data *lfd = LUSTRE_FPRIVATE(filp);
- struct ll_sb_info *sbi = ll_i2sbi(inode);
- int hash64 = sbi->ll_flags & LL_SBI_64BIT_HASH;
- int api32 = ll_need_32bit_api(sbi);
- int rc;
-
- CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p) pos %lu/%llu 32bit_api %d\n",
- inode->i_ino, inode->i_generation,
- inode, (unsigned long)lfd->lfd_pos, i_size_read(inode), api32);
-
- if (lfd->lfd_pos == MDS_DIR_END_OFF) {
- /*
- * end-of-file.
- */
- rc = 0;
- goto out;
- }
-
- ctx->pos = lfd->lfd_pos;
- rc = ll_dir_read(inode, ctx);
- lfd->lfd_pos = ctx->pos;
- if (ctx->pos == MDS_DIR_END_OFF) {
- if (api32)
- ctx->pos = LL_DIR_END_OFF_32BIT;
- else
- ctx->pos = LL_DIR_END_OFF;
- } else {
- if (api32 && hash64)
- ctx->pos >>= 32;
- }
- filp->f_version = inode->i_version;
-
-out:
- if (!rc)
- ll_stats_ops_tally(sbi, LPROC_LL_READDIR, 1);
-
- return rc;
-}
-
-static int ll_send_mgc_param(struct obd_export *mgc, char *string)
-{
- struct mgs_send_param *msp;
- int rc = 0;
-
- msp = kzalloc(sizeof(*msp), GFP_NOFS);
- if (!msp)
- return -ENOMEM;
-
- strncpy(msp->mgs_param, string, MGS_PARAM_MAXLEN);
- rc = obd_set_info_async(NULL, mgc, sizeof(KEY_SET_INFO), KEY_SET_INFO,
- sizeof(struct mgs_send_param), msp, NULL);
- if (rc)
- CERROR("Failed to set parameter: %d\n", rc);
- kfree(msp);
-
- return rc;
-}
-
-static int ll_dir_setdirstripe(struct inode *dir, struct lmv_user_md *lump,
- char *filename)
-{
- struct ptlrpc_request *request = NULL;
- struct md_op_data *op_data;
- struct ll_sb_info *sbi = ll_i2sbi(dir);
- int mode;
- int err;
-
- mode = (~current_umask() & 0755) | S_IFDIR;
- op_data = ll_prep_md_op_data(NULL, dir, NULL, filename,
- strlen(filename), mode, LUSTRE_OPC_MKDIR,
- lump);
- if (IS_ERR(op_data)) {
- err = PTR_ERR(op_data);
- goto err_exit;
- }
-
- op_data->op_cli_flags |= CLI_SET_MEA;
- err = md_create(sbi->ll_md_exp, op_data, lump, sizeof(*lump), mode,
- from_kuid(&init_user_ns, current_fsuid()),
- from_kgid(&init_user_ns, current_fsgid()),
- cfs_curproc_cap_pack(), 0, &request);
- ll_finish_md_op_data(op_data);
- if (err)
- goto err_exit;
-err_exit:
- ptlrpc_req_finished(request);
- return err;
-}
-
-int ll_dir_setstripe(struct inode *inode, struct lov_user_md *lump,
- int set_default)
-{
- struct ll_sb_info *sbi = ll_i2sbi(inode);
- struct md_op_data *op_data;
- struct ptlrpc_request *req = NULL;
- int rc = 0;
- struct lustre_sb_info *lsi = s2lsi(inode->i_sb);
- struct obd_device *mgc = lsi->lsi_mgc;
- int lum_size;
-
- if (lump != NULL) {
- /*
- * This is coming from userspace, so should be in
- * local endian. But the MDS would like it in little
- * endian, so we swab it before we send it.
- */
- switch (lump->lmm_magic) {
- case LOV_USER_MAGIC_V1: {
- if (lump->lmm_magic != cpu_to_le32(LOV_USER_MAGIC_V1))
- lustre_swab_lov_user_md_v1(lump);
- lum_size = sizeof(struct lov_user_md_v1);
- break;
- }
- case LOV_USER_MAGIC_V3: {
- if (lump->lmm_magic != cpu_to_le32(LOV_USER_MAGIC_V3))
- lustre_swab_lov_user_md_v3(
- (struct lov_user_md_v3 *)lump);
- lum_size = sizeof(struct lov_user_md_v3);
- break;
- }
- default: {
- CDEBUG(D_IOCTL, "bad userland LOV MAGIC: %#08x != %#08x nor %#08x\n",
- lump->lmm_magic, LOV_USER_MAGIC_V1,
- LOV_USER_MAGIC_V3);
- return -EINVAL;
- }
- }
- } else {
- lum_size = sizeof(struct lov_user_md_v1);
- }
-
- op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL, 0, 0,
- LUSTRE_OPC_ANY, NULL);
- if (IS_ERR(op_data))
- return PTR_ERR(op_data);
-
- if (lump != NULL && lump->lmm_magic == cpu_to_le32(LMV_USER_MAGIC))
- op_data->op_cli_flags |= CLI_SET_MEA;
-
- /* swabbing is done in lov_setstripe() on server side */
- rc = md_setattr(sbi->ll_md_exp, op_data, lump, lum_size,
- NULL, 0, &req, NULL);
- ll_finish_md_op_data(op_data);
- ptlrpc_req_finished(req);
- if (rc) {
- if (rc != -EPERM && rc != -EACCES)
- CERROR("mdc_setattr fails: rc = %d\n", rc);
- }
-
- /* In the following we use the fact that LOV_USER_MAGIC_V1 and
- LOV_USER_MAGIC_V3 have the same initial fields so we do not
- need to make the distinction between the 2 versions */
- if (set_default && mgc->u.cli.cl_mgc_mgsexp) {
- char *param = NULL;
- char *buf;
-
- param = kzalloc(MGS_PARAM_MAXLEN, GFP_NOFS);
- if (!param)
- return -ENOMEM;
-
- buf = param;
- /* Get fsname and assume devname to be -MDT0000. */
- ll_get_fsname(inode->i_sb, buf, MTI_NAME_MAXLEN);
- strcat(buf, "-MDT0000.lov");
- buf += strlen(buf);
-
- /* Set root stripesize */
- sprintf(buf, ".stripesize=%u",
- lump ? le32_to_cpu(lump->lmm_stripe_size) : 0);
- rc = ll_send_mgc_param(mgc->u.cli.cl_mgc_mgsexp, param);
- if (rc)
- goto end;
-
- /* Set root stripecount */
- sprintf(buf, ".stripecount=%hd",
- lump ? le16_to_cpu(lump->lmm_stripe_count) : 0);
- rc = ll_send_mgc_param(mgc->u.cli.cl_mgc_mgsexp, param);
- if (rc)
- goto end;
-
- /* Set root stripeoffset */
- sprintf(buf, ".stripeoffset=%hd",
- lump ? le16_to_cpu(lump->lmm_stripe_offset) :
- (typeof(lump->lmm_stripe_offset))(-1));
- rc = ll_send_mgc_param(mgc->u.cli.cl_mgc_mgsexp, param);
-
-end:
- kfree(param);
- }
- return rc;
-}
-
-int ll_dir_getstripe(struct inode *inode, struct lov_mds_md **lmmp,
- int *lmm_size, struct ptlrpc_request **request)
-{
- struct ll_sb_info *sbi = ll_i2sbi(inode);
- struct mdt_body *body;
- struct lov_mds_md *lmm = NULL;
- struct ptlrpc_request *req = NULL;
- int rc, lmmsize;
- struct md_op_data *op_data;
-
- rc = ll_get_default_mdsize(sbi, &lmmsize);
- if (rc)
- return rc;
-
- op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL,
- 0, lmmsize, LUSTRE_OPC_ANY,
- NULL);
- if (IS_ERR(op_data))
- return PTR_ERR(op_data);
-
- op_data->op_valid = OBD_MD_FLEASIZE | OBD_MD_FLDIREA;
- rc = md_getattr(sbi->ll_md_exp, op_data, &req);
- ll_finish_md_op_data(op_data);
- if (rc < 0) {
- CDEBUG(D_INFO, "md_getattr failed on inode %lu/%u: rc %d\n",
- inode->i_ino,
- inode->i_generation, rc);
- goto out;
- }
-
- body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
- LASSERT(body != NULL);
-
- lmmsize = body->eadatasize;
-
- if (!(body->valid & (OBD_MD_FLEASIZE | OBD_MD_FLDIREA)) ||
- lmmsize == 0) {
- rc = -ENODATA;
- goto out;
- }
-
- lmm = req_capsule_server_sized_get(&req->rq_pill,
- &RMF_MDT_MD, lmmsize);
- LASSERT(lmm != NULL);
-
- /*
- * This is coming from the MDS, so is probably in
- * little endian. We convert it to host endian before
- * passing it to userspace.
- */
- /* We don't swab objects for directories */
- switch (le32_to_cpu(lmm->lmm_magic)) {
- case LOV_MAGIC_V1:
- if (cpu_to_le32(LOV_MAGIC) != LOV_MAGIC)
- lustre_swab_lov_user_md_v1((struct lov_user_md_v1 *)lmm);
- break;
- case LOV_MAGIC_V3:
- if (cpu_to_le32(LOV_MAGIC) != LOV_MAGIC)
- lustre_swab_lov_user_md_v3((struct lov_user_md_v3 *)lmm);
- break;
- default:
- CERROR("unknown magic: %lX\n", (unsigned long)lmm->lmm_magic);
- rc = -EPROTO;
- }
-out:
- *lmmp = lmm;
- *lmm_size = lmmsize;
- *request = req;
- return rc;
-}
-
-/*
- * Get MDT index for the inode.
- */
-int ll_get_mdt_idx(struct inode *inode)
-{
- struct ll_sb_info *sbi = ll_i2sbi(inode);
- struct md_op_data *op_data;
- int rc, mdtidx;
-
- op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL, 0,
- 0, LUSTRE_OPC_ANY, NULL);
- if (IS_ERR(op_data))
- return PTR_ERR(op_data);
-
- op_data->op_flags |= MF_GET_MDT_IDX;
- rc = md_getattr(sbi->ll_md_exp, op_data, NULL);
- mdtidx = op_data->op_mds;
- ll_finish_md_op_data(op_data);
- if (rc < 0) {
- CDEBUG(D_INFO, "md_getattr_name: %d\n", rc);
- return rc;
- }
- return mdtidx;
-}
-
-/**
- * Generic handler to do any pre-copy work.
- *
- * It send a first hsm_progress (with extent length == 0) to coordinator as a
- * first information for it that real work has started.
- *
- * Moreover, for a ARCHIVE request, it will sample the file data version and
- * store it in \a copy.
- *
- * \return 0 on success.
- */
-static int ll_ioc_copy_start(struct super_block *sb, struct hsm_copy *copy)
-{
- struct ll_sb_info *sbi = ll_s2sbi(sb);
- struct hsm_progress_kernel hpk;
- int rc;
-
- /* Forge a hsm_progress based on data from copy. */
- hpk.hpk_fid = copy->hc_hai.hai_fid;
- hpk.hpk_cookie = copy->hc_hai.hai_cookie;
- hpk.hpk_extent.offset = copy->hc_hai.hai_extent.offset;
- hpk.hpk_extent.length = 0;
- hpk.hpk_flags = 0;
- hpk.hpk_errval = 0;
- hpk.hpk_data_version = 0;
-
-
- /* For archive request, we need to read the current file version. */
- if (copy->hc_hai.hai_action == HSMA_ARCHIVE) {
- struct inode *inode;
- __u64 data_version = 0;
-
- /* Get inode for this fid */
- inode = search_inode_for_lustre(sb, &copy->hc_hai.hai_fid);
- if (IS_ERR(inode)) {
- hpk.hpk_flags |= HP_FLAG_RETRY;
- /* hpk_errval is >= 0 */
- hpk.hpk_errval = -PTR_ERR(inode);
- rc = PTR_ERR(inode);
- goto progress;
- }
-
- /* Read current file data version */
- rc = ll_data_version(inode, &data_version, 1);
- iput(inode);
- if (rc != 0) {
- CDEBUG(D_HSM, "Could not read file data version of "
- DFID" (rc = %d). Archive request (%#llx) could not be done.\n",
- PFID(&copy->hc_hai.hai_fid), rc,
- copy->hc_hai.hai_cookie);
- hpk.hpk_flags |= HP_FLAG_RETRY;
- /* hpk_errval must be >= 0 */
- hpk.hpk_errval = -rc;
- goto progress;
- }
-
- /* Store it the hsm_copy for later copytool use.
- * Always modified even if no lsm. */
- copy->hc_data_version = data_version;
- }
-
-progress:
- rc = obd_iocontrol(LL_IOC_HSM_PROGRESS, sbi->ll_md_exp, sizeof(hpk),
- &hpk, NULL);
-
- return rc;
-}
-
-/**
- * Generic handler to do any post-copy work.
- *
- * It will send the last hsm_progress update to coordinator to inform it
- * that copy is finished and whether it was successful or not.
- *
- * Moreover,
- * - for ARCHIVE request, it will sample the file data version and compare it
- * with the version saved in ll_ioc_copy_start(). If they do not match, copy
- * will be considered as failed.
- * - for RESTORE request, it will sample the file data version and send it to
- * coordinator which is useful if the file was imported as 'released'.
- *
- * \return 0 on success.
- */
-static int ll_ioc_copy_end(struct super_block *sb, struct hsm_copy *copy)
-{
- struct ll_sb_info *sbi = ll_s2sbi(sb);
- struct hsm_progress_kernel hpk;
- int rc;
-
- /* If you modify the logic here, also check llapi_hsm_copy_end(). */
- /* Take care: copy->hc_hai.hai_action, len, gid and data are not
- * initialized if copy_end was called with copy == NULL.
- */
-
- /* Forge a hsm_progress based on data from copy. */
- hpk.hpk_fid = copy->hc_hai.hai_fid;
- hpk.hpk_cookie = copy->hc_hai.hai_cookie;
- hpk.hpk_extent = copy->hc_hai.hai_extent;
- hpk.hpk_flags = copy->hc_flags | HP_FLAG_COMPLETED;
- hpk.hpk_errval = copy->hc_errval;
- hpk.hpk_data_version = 0;
-
- /* For archive request, we need to check the file data was not changed.
- *
- * For restore request, we need to send the file data version, this is
- * useful when the file was created using hsm_import.
- */
- if (((copy->hc_hai.hai_action == HSMA_ARCHIVE) ||
- (copy->hc_hai.hai_action == HSMA_RESTORE)) &&
- (copy->hc_errval == 0)) {
- struct inode *inode;
- __u64 data_version = 0;
-
- /* Get lsm for this fid */
- inode = search_inode_for_lustre(sb, &copy->hc_hai.hai_fid);
- if (IS_ERR(inode)) {
- hpk.hpk_flags |= HP_FLAG_RETRY;
- /* hpk_errval must be >= 0 */
- hpk.hpk_errval = -PTR_ERR(inode);
- rc = PTR_ERR(inode);
- goto progress;
- }
-
- rc = ll_data_version(inode, &data_version,
- copy->hc_hai.hai_action == HSMA_ARCHIVE);
- iput(inode);
- if (rc) {
- CDEBUG(D_HSM, "Could not read file data version. Request could not be confirmed.\n");
- if (hpk.hpk_errval == 0)
- hpk.hpk_errval = -rc;
- goto progress;
- }
-
- /* Store it the hsm_copy for later copytool use.
- * Always modified even if no lsm. */
- hpk.hpk_data_version = data_version;
-
- /* File could have been stripped during archiving, so we need
- * to check anyway. */
- if ((copy->hc_hai.hai_action == HSMA_ARCHIVE) &&
- (copy->hc_data_version != data_version)) {
- CDEBUG(D_HSM, "File data version mismatched. File content was changed during archiving. "
- DFID", start:%#llx current:%#llx\n",
- PFID(&copy->hc_hai.hai_fid),
- copy->hc_data_version, data_version);
- /* File was changed, send error to cdt. Do not ask for
- * retry because if a file is modified frequently,
- * the cdt will loop on retried archive requests.
- * The policy engine will ask for a new archive later
- * when the file will not be modified for some tunable
- * time */
- /* we do not notify caller */
- hpk.hpk_flags &= ~HP_FLAG_RETRY;
- /* hpk_errval must be >= 0 */
- hpk.hpk_errval = EBUSY;
- }
-
- }
-
-progress:
- rc = obd_iocontrol(LL_IOC_HSM_PROGRESS, sbi->ll_md_exp, sizeof(hpk),
- &hpk, NULL);
-
- return rc;
-}
-
-
-static int copy_and_ioctl(int cmd, struct obd_export *exp,
- const void __user *data, size_t size)
-{
- void *copy;
- int rc;
-
- copy = kzalloc(size, GFP_NOFS);
- if (!copy)
- return -ENOMEM;
-
- if (copy_from_user(copy, data, size)) {
- rc = -EFAULT;
- goto out;
- }
-
- rc = obd_iocontrol(cmd, exp, size, copy, NULL);
-out:
- kfree(copy);
-
- return rc;
-}
-
-static int quotactl_ioctl(struct ll_sb_info *sbi, struct if_quotactl *qctl)
-{
- int cmd = qctl->qc_cmd;
- int type = qctl->qc_type;
- int id = qctl->qc_id;
- int valid = qctl->qc_valid;
- int rc = 0;
-
- switch (cmd) {
- case LUSTRE_Q_INVALIDATE:
- case LUSTRE_Q_FINVALIDATE:
- case Q_QUOTAON:
- case Q_QUOTAOFF:
- case Q_SETQUOTA:
- case Q_SETINFO:
- if (!capable(CFS_CAP_SYS_ADMIN) ||
- sbi->ll_flags & LL_SBI_RMT_CLIENT)
- return -EPERM;
- break;
- case Q_GETQUOTA:
- if (((type == USRQUOTA &&
- !uid_eq(current_euid(), make_kuid(&init_user_ns, id))) ||
- (type == GRPQUOTA &&
- !in_egroup_p(make_kgid(&init_user_ns, id)))) &&
- (!capable(CFS_CAP_SYS_ADMIN) ||
- sbi->ll_flags & LL_SBI_RMT_CLIENT))
- return -EPERM;
- break;
- case Q_GETINFO:
- break;
- default:
- CERROR("unsupported quotactl op: %#x\n", cmd);
- return -ENOTTY;
- }
-
- if (valid != QC_GENERAL) {
- if (sbi->ll_flags & LL_SBI_RMT_CLIENT)
- return -EOPNOTSUPP;
-
- if (cmd == Q_GETINFO)
- qctl->qc_cmd = Q_GETOINFO;
- else if (cmd == Q_GETQUOTA)
- qctl->qc_cmd = Q_GETOQUOTA;
- else
- return -EINVAL;
-
- switch (valid) {
- case QC_MDTIDX:
- rc = obd_iocontrol(OBD_IOC_QUOTACTL, sbi->ll_md_exp,
- sizeof(*qctl), qctl, NULL);
- break;
- case QC_OSTIDX:
- rc = obd_iocontrol(OBD_IOC_QUOTACTL, sbi->ll_dt_exp,
- sizeof(*qctl), qctl, NULL);
- break;
- case QC_UUID:
- rc = obd_iocontrol(OBD_IOC_QUOTACTL, sbi->ll_md_exp,
- sizeof(*qctl), qctl, NULL);
- if (rc == -EAGAIN)
- rc = obd_iocontrol(OBD_IOC_QUOTACTL,
- sbi->ll_dt_exp,
- sizeof(*qctl), qctl, NULL);
- break;
- default:
- rc = -EINVAL;
- break;
- }
-
- if (rc)
- return rc;
-
- qctl->qc_cmd = cmd;
- } else {
- struct obd_quotactl *oqctl;
-
- oqctl = kzalloc(sizeof(*oqctl), GFP_NOFS);
- if (!oqctl)
- return -ENOMEM;
-
- QCTL_COPY(oqctl, qctl);
- rc = obd_quotactl(sbi->ll_md_exp, oqctl);
- if (rc) {
- if (rc != -EALREADY && cmd == Q_QUOTAON) {
- oqctl->qc_cmd = Q_QUOTAOFF;
- obd_quotactl(sbi->ll_md_exp, oqctl);
- }
- kfree(oqctl);
- return rc;
- }
- /* If QIF_SPACE is not set, client should collect the
- * space usage from OSSs by itself */
- if (cmd == Q_GETQUOTA &&
- !(oqctl->qc_dqblk.dqb_valid & QIF_SPACE) &&
- !oqctl->qc_dqblk.dqb_curspace) {
- struct obd_quotactl *oqctl_tmp;
-
- oqctl_tmp = kzalloc(sizeof(*oqctl_tmp), GFP_NOFS);
- if (!oqctl_tmp) {
- rc = -ENOMEM;
- goto out;
- }
-
- oqctl_tmp->qc_cmd = Q_GETOQUOTA;
- oqctl_tmp->qc_id = oqctl->qc_id;
- oqctl_tmp->qc_type = oqctl->qc_type;
-
- /* collect space usage from OSTs */
- oqctl_tmp->qc_dqblk.dqb_curspace = 0;
- rc = obd_quotactl(sbi->ll_dt_exp, oqctl_tmp);
- if (!rc || rc == -EREMOTEIO) {
- oqctl->qc_dqblk.dqb_curspace =
- oqctl_tmp->qc_dqblk.dqb_curspace;
- oqctl->qc_dqblk.dqb_valid |= QIF_SPACE;
- }
-
- /* collect space & inode usage from MDTs */
- oqctl_tmp->qc_dqblk.dqb_curspace = 0;
- oqctl_tmp->qc_dqblk.dqb_curinodes = 0;
- rc = obd_quotactl(sbi->ll_md_exp, oqctl_tmp);
- if (!rc || rc == -EREMOTEIO) {
- oqctl->qc_dqblk.dqb_curspace +=
- oqctl_tmp->qc_dqblk.dqb_curspace;
- oqctl->qc_dqblk.dqb_curinodes =
- oqctl_tmp->qc_dqblk.dqb_curinodes;
- oqctl->qc_dqblk.dqb_valid |= QIF_INODES;
- } else {
- oqctl->qc_dqblk.dqb_valid &= ~QIF_SPACE;
- }
-
- kfree(oqctl_tmp);
- }
-out:
- QCTL_COPY(qctl, oqctl);
- kfree(oqctl);
- }
-
- return rc;
-}
-
-/* This function tries to get a single name component,
- * to send to the server. No actual path traversal involved,
- * so we limit to NAME_MAX */
-static char *ll_getname(const char __user *filename)
-{
- int ret = 0, len;
- char *tmp;
-
- tmp = kzalloc(NAME_MAX + 1, GFP_KERNEL);
- if (!tmp)
- return ERR_PTR(-ENOMEM);
-
- len = strncpy_from_user(tmp, filename, NAME_MAX + 1);
- if (len < 0)
- ret = len;
- else if (len == 0)
- ret = -ENOENT;
- else if (len > NAME_MAX && tmp[NAME_MAX] != 0)
- ret = -ENAMETOOLONG;
-
- if (ret) {
- kfree(tmp);
- tmp = ERR_PTR(ret);
- }
- return tmp;
-}
-
-#define ll_putname(filename) kfree(filename)
-
-static long ll_dir_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
-{
- struct inode *inode = file_inode(file);
- struct ll_sb_info *sbi = ll_i2sbi(inode);
- struct obd_ioctl_data *data;
- int rc = 0;
-
- CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p), cmd=%#x\n",
- inode->i_ino, inode->i_generation, inode, cmd);
-
- /* asm-ppc{,64} declares TCGETS, et. al. as type 't' not 'T' */
- if (_IOC_TYPE(cmd) == 'T' || _IOC_TYPE(cmd) == 't') /* tty ioctls */
- return -ENOTTY;
-
- ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_IOCTL, 1);
- switch (cmd) {
- case FSFILT_IOC_GETFLAGS:
- case FSFILT_IOC_SETFLAGS:
- return ll_iocontrol(inode, file, cmd, arg);
- case FSFILT_IOC_GETVERSION_OLD:
- case FSFILT_IOC_GETVERSION:
- return put_user(inode->i_generation, (int *)arg);
- /* We need to special case any other ioctls we want to handle,
- * to send them to the MDS/OST as appropriate and to properly
- * network encode the arg field.
- case FSFILT_IOC_SETVERSION_OLD:
- case FSFILT_IOC_SETVERSION:
- */
- case LL_IOC_GET_MDTIDX: {
- int mdtidx;
-
- mdtidx = ll_get_mdt_idx(inode);
- if (mdtidx < 0)
- return mdtidx;
-
- if (put_user((int)mdtidx, (int *)arg))
- return -EFAULT;
-
- return 0;
- }
- case IOC_MDC_LOOKUP: {
- struct ptlrpc_request *request = NULL;
- int namelen, len = 0;
- char *buf = NULL;
- char *filename;
- struct md_op_data *op_data;
-
- rc = obd_ioctl_getdata(&buf, &len, (void *)arg);
- if (rc)
- return rc;
- data = (void *)buf;
-
- filename = data->ioc_inlbuf1;
- namelen = strlen(filename);
-
- if (namelen < 1) {
- CDEBUG(D_INFO, "IOC_MDC_LOOKUP missing filename\n");
- rc = -EINVAL;
- goto out_free;
- }
-
- op_data = ll_prep_md_op_data(NULL, inode, NULL, filename, namelen,
- 0, LUSTRE_OPC_ANY, NULL);
- if (IS_ERR(op_data)) {
- rc = PTR_ERR(op_data);
- goto out_free;
- }
-
- op_data->op_valid = OBD_MD_FLID;
- rc = md_getattr_name(sbi->ll_md_exp, op_data, &request);
- ll_finish_md_op_data(op_data);
- if (rc < 0) {
- CDEBUG(D_INFO, "md_getattr_name: %d\n", rc);
- goto out_free;
- }
- ptlrpc_req_finished(request);
-out_free:
- obd_ioctl_freedata(buf, len);
- return rc;
- }
- case LL_IOC_LMV_SETSTRIPE: {
- struct lmv_user_md *lum;
- char *buf = NULL;
- char *filename;
- int namelen = 0;
- int lumlen = 0;
- int len;
- int rc;
-
- rc = obd_ioctl_getdata(&buf, &len, (void *)arg);
- if (rc)
- return rc;
-
- data = (void *)buf;
- if (data->ioc_inlbuf1 == NULL || data->ioc_inlbuf2 == NULL ||
- data->ioc_inllen1 == 0 || data->ioc_inllen2 == 0) {
- rc = -EINVAL;
- goto lmv_out_free;
- }
-
- filename = data->ioc_inlbuf1;
- namelen = data->ioc_inllen1;
-
- if (namelen < 1) {
- CDEBUG(D_INFO, "IOC_MDC_LOOKUP missing filename\n");
- rc = -EINVAL;
- goto lmv_out_free;
- }
- lum = (struct lmv_user_md *)data->ioc_inlbuf2;
- lumlen = data->ioc_inllen2;
-
- if (lum->lum_magic != LMV_USER_MAGIC ||
- lumlen != sizeof(*lum)) {
- CERROR("%s: wrong lum magic %x or size %d: rc = %d\n",
- filename, lum->lum_magic, lumlen, -EFAULT);
- rc = -EINVAL;
- goto lmv_out_free;
- }
-
- /**
- * ll_dir_setdirstripe will be used to set dir stripe
- * mdc_create--->mdt_reint_create (with dirstripe)
- */
- rc = ll_dir_setdirstripe(inode, lum, filename);
-lmv_out_free:
- obd_ioctl_freedata(buf, len);
- return rc;
-
- }
- case LL_IOC_LOV_SETSTRIPE: {
- struct lov_user_md_v3 lumv3;
- struct lov_user_md_v1 *lumv1 = (struct lov_user_md_v1 *)&lumv3;
- struct lov_user_md_v1 *lumv1p = (struct lov_user_md_v1 *)arg;
- struct lov_user_md_v3 *lumv3p = (struct lov_user_md_v3 *)arg;
-
- int set_default = 0;
-
- LASSERT(sizeof(lumv3) == sizeof(*lumv3p));
- LASSERT(sizeof(lumv3.lmm_objects[0]) ==
- sizeof(lumv3p->lmm_objects[0]));
- /* first try with v1 which is smaller than v3 */
- if (copy_from_user(lumv1, lumv1p, sizeof(*lumv1)))
- return -EFAULT;
-
- if (lumv1->lmm_magic == LOV_USER_MAGIC_V3) {
- if (copy_from_user(&lumv3, lumv3p, sizeof(lumv3)))
- return -EFAULT;
- }
-
- if (is_root_inode(inode))
- set_default = 1;
-
- /* in v1 and v3 cases lumv1 points to data */
- rc = ll_dir_setstripe(inode, lumv1, set_default);
-
- return rc;
- }
- case LL_IOC_LMV_GETSTRIPE: {
- struct lmv_user_md *lump = (struct lmv_user_md *)arg;
- struct lmv_user_md lum;
- struct lmv_user_md *tmp;
- int lum_size;
- int rc = 0;
- int mdtindex;
-
- if (copy_from_user(&lum, lump, sizeof(struct lmv_user_md)))
- return -EFAULT;
-
- if (lum.lum_magic != LMV_MAGIC_V1)
- return -EINVAL;
-
- lum_size = lmv_user_md_size(1, LMV_MAGIC_V1);
- tmp = kzalloc(lum_size, GFP_NOFS);
- if (!tmp) {
- rc = -ENOMEM;
- goto free_lmv;
- }
-
- *tmp = lum;
- tmp->lum_type = LMV_STRIPE_TYPE;
- tmp->lum_stripe_count = 1;
- mdtindex = ll_get_mdt_idx(inode);
- if (mdtindex < 0) {
- rc = -ENOMEM;
- goto free_lmv;
- }
-
- tmp->lum_stripe_offset = mdtindex;
- tmp->lum_objects[0].lum_mds = mdtindex;
- memcpy(&tmp->lum_objects[0].lum_fid, ll_inode2fid(inode),
- sizeof(struct lu_fid));
- if (copy_to_user((void *)arg, tmp, lum_size)) {
- rc = -EFAULT;
- goto free_lmv;
- }
-free_lmv:
- kfree(tmp);
- return rc;
- }
- case LL_IOC_LOV_SWAP_LAYOUTS:
- return -EPERM;
- case LL_IOC_OBD_STATFS:
- return ll_obd_statfs(inode, (void *)arg);
- case LL_IOC_LOV_GETSTRIPE:
- case LL_IOC_MDC_GETINFO:
- case IOC_MDC_GETFILEINFO:
- case IOC_MDC_GETFILESTRIPE: {
- struct ptlrpc_request *request = NULL;
- struct lov_user_md *lump;
- struct lov_mds_md *lmm = NULL;
- struct mdt_body *body;
- char *filename = NULL;
- int lmmsize;
-
- if (cmd == IOC_MDC_GETFILEINFO ||
- cmd == IOC_MDC_GETFILESTRIPE) {
- filename = ll_getname((const char *)arg);
- if (IS_ERR(filename))
- return PTR_ERR(filename);
-
- rc = ll_lov_getstripe_ea_info(inode, filename, &lmm,
- &lmmsize, &request);
- } else {
- rc = ll_dir_getstripe(inode, &lmm, &lmmsize, &request);
- }
-
- if (request) {
- body = req_capsule_server_get(&request->rq_pill,
- &RMF_MDT_BODY);
- LASSERT(body != NULL);
- } else {
- goto out_req;
- }
-
- if (rc < 0) {
- if (rc == -ENODATA && (cmd == IOC_MDC_GETFILEINFO ||
- cmd == LL_IOC_MDC_GETINFO)) {
- rc = 0;
- goto skip_lmm;
- } else
- goto out_req;
- }
-
- if (cmd == IOC_MDC_GETFILESTRIPE ||
- cmd == LL_IOC_LOV_GETSTRIPE) {
- lump = (struct lov_user_md *)arg;
- } else {
- struct lov_user_mds_data *lmdp;
-
- lmdp = (struct lov_user_mds_data *)arg;
- lump = &lmdp->lmd_lmm;
- }
- if (copy_to_user(lump, lmm, lmmsize)) {
- if (copy_to_user(lump, lmm, sizeof(*lump))) {
- rc = -EFAULT;
- goto out_req;
- }
- rc = -EOVERFLOW;
- }
-skip_lmm:
- if (cmd == IOC_MDC_GETFILEINFO || cmd == LL_IOC_MDC_GETINFO) {
- struct lov_user_mds_data *lmdp;
- lstat_t st = { 0 };
-
- st.st_dev = inode->i_sb->s_dev;
- st.st_mode = body->mode;
- st.st_nlink = body->nlink;
- st.st_uid = body->uid;
- st.st_gid = body->gid;
- st.st_rdev = body->rdev;
- st.st_size = body->size;
- st.st_blksize = PAGE_CACHE_SIZE;
- st.st_blocks = body->blocks;
- st.st_atime = body->atime;
- st.st_mtime = body->mtime;
- st.st_ctime = body->ctime;
- st.st_ino = inode->i_ino;
-
- lmdp = (struct lov_user_mds_data *)arg;
- if (copy_to_user(&lmdp->lmd_st, &st, sizeof(st))) {
- rc = -EFAULT;
- goto out_req;
- }
- }
-
-out_req:
- ptlrpc_req_finished(request);
- if (filename)
- ll_putname(filename);
- return rc;
- }
- case IOC_LOV_GETINFO: {
- struct lov_user_mds_data *lumd;
- struct lov_stripe_md *lsm;
- struct lov_user_md *lum;
- struct lov_mds_md *lmm;
- int lmmsize;
- lstat_t st;
-
- lumd = (struct lov_user_mds_data *)arg;
- lum = &lumd->lmd_lmm;
-
- rc = ll_get_max_mdsize(sbi, &lmmsize);
- if (rc)
- return rc;
-
- lmm = libcfs_kvzalloc(lmmsize, GFP_NOFS);
- if (lmm == NULL)
- return -ENOMEM;
- if (copy_from_user(lmm, lum, lmmsize)) {
- rc = -EFAULT;
- goto free_lmm;
- }
-
- switch (lmm->lmm_magic) {
- case LOV_USER_MAGIC_V1:
- if (cpu_to_le32(LOV_USER_MAGIC_V1) == LOV_USER_MAGIC_V1)
- break;
- /* swab objects first so that stripes num will be sane */
- lustre_swab_lov_user_md_objects(
- ((struct lov_user_md_v1 *)lmm)->lmm_objects,
- ((struct lov_user_md_v1 *)lmm)->lmm_stripe_count);
- lustre_swab_lov_user_md_v1((struct lov_user_md_v1 *)lmm);
- break;
- case LOV_USER_MAGIC_V3:
- if (cpu_to_le32(LOV_USER_MAGIC_V3) == LOV_USER_MAGIC_V3)
- break;
- /* swab objects first so that stripes num will be sane */
- lustre_swab_lov_user_md_objects(
- ((struct lov_user_md_v3 *)lmm)->lmm_objects,
- ((struct lov_user_md_v3 *)lmm)->lmm_stripe_count);
- lustre_swab_lov_user_md_v3((struct lov_user_md_v3 *)lmm);
- break;
- default:
- rc = -EINVAL;
- goto free_lmm;
- }
-
- rc = obd_unpackmd(sbi->ll_dt_exp, &lsm, lmm, lmmsize);
- if (rc < 0) {
- rc = -ENOMEM;
- goto free_lmm;
- }
-
- /* Perform glimpse_size operation. */
- memset(&st, 0, sizeof(st));
-
- rc = ll_glimpse_ioctl(sbi, lsm, &st);
- if (rc)
- goto free_lsm;
-
- if (copy_to_user(&lumd->lmd_st, &st, sizeof(st))) {
- rc = -EFAULT;
- goto free_lsm;
- }
-
-free_lsm:
- obd_free_memmd(sbi->ll_dt_exp, &lsm);
-free_lmm:
- kvfree(lmm);
- return rc;
- }
- case OBD_IOC_LLOG_CATINFO: {
- return -EOPNOTSUPP;
- }
- case OBD_IOC_QUOTACHECK: {
- struct obd_quotactl *oqctl;
- int error = 0;
-
- if (!capable(CFS_CAP_SYS_ADMIN) ||
- sbi->ll_flags & LL_SBI_RMT_CLIENT)
- return -EPERM;
-
- oqctl = kzalloc(sizeof(*oqctl), GFP_NOFS);
- if (!oqctl)
- return -ENOMEM;
- oqctl->qc_type = arg;
- rc = obd_quotacheck(sbi->ll_md_exp, oqctl);
- if (rc < 0) {
- CDEBUG(D_INFO, "md_quotacheck failed: rc %d\n", rc);
- error = rc;
- }
-
- rc = obd_quotacheck(sbi->ll_dt_exp, oqctl);
- if (rc < 0)
- CDEBUG(D_INFO, "obd_quotacheck failed: rc %d\n", rc);
-
- kfree(oqctl);
- return error ?: rc;
- }
- case OBD_IOC_POLL_QUOTACHECK: {
- struct if_quotacheck *check;
-
- if (!capable(CFS_CAP_SYS_ADMIN) ||
- sbi->ll_flags & LL_SBI_RMT_CLIENT)
- return -EPERM;
-
- check = kzalloc(sizeof(*check), GFP_NOFS);
- if (!check)
- return -ENOMEM;
-
- rc = obd_iocontrol(cmd, sbi->ll_md_exp, 0, (void *)check,
- NULL);
- if (rc) {
- CDEBUG(D_QUOTA, "mdc ioctl %d failed: %d\n", cmd, rc);
- if (copy_to_user((void *)arg, check,
- sizeof(*check)))
- CDEBUG(D_QUOTA, "copy_to_user failed\n");
- goto out_poll;
- }
-
- rc = obd_iocontrol(cmd, sbi->ll_dt_exp, 0, (void *)check,
- NULL);
- if (rc) {
- CDEBUG(D_QUOTA, "osc ioctl %d failed: %d\n", cmd, rc);
- if (copy_to_user((void *)arg, check,
- sizeof(*check)))
- CDEBUG(D_QUOTA, "copy_to_user failed\n");
- goto out_poll;
- }
-out_poll:
- kfree(check);
- return rc;
- }
- case LL_IOC_QUOTACTL: {
- struct if_quotactl *qctl;
-
- qctl = kzalloc(sizeof(*qctl), GFP_NOFS);
- if (!qctl)
- return -ENOMEM;
-
- if (copy_from_user(qctl, (void *)arg, sizeof(*qctl))) {
- rc = -EFAULT;
- goto out_quotactl;
- }
-
- rc = quotactl_ioctl(sbi, qctl);
-
- if (rc == 0 && copy_to_user((void *)arg, qctl, sizeof(*qctl)))
- rc = -EFAULT;
-
-out_quotactl:
- kfree(qctl);
- return rc;
- }
- case OBD_IOC_GETDTNAME:
- case OBD_IOC_GETMDNAME:
- return ll_get_obd_name(inode, cmd, arg);
- case LL_IOC_FLUSHCTX:
- return ll_flush_ctx(inode);
-#ifdef CONFIG_FS_POSIX_ACL
- case LL_IOC_RMTACL: {
- if (sbi->ll_flags & LL_SBI_RMT_CLIENT && is_root_inode(inode)) {
- struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
-
- LASSERT(fd != NULL);
- rc = rct_add(&sbi->ll_rct, current_pid(), arg);
- if (!rc)
- fd->fd_flags |= LL_FILE_RMTACL;
- return rc;
- } else
- return 0;
- }
-#endif
- case LL_IOC_GETOBDCOUNT: {
- int count, vallen;
- struct obd_export *exp;
-
- if (copy_from_user(&count, (int *)arg, sizeof(int)))
- return -EFAULT;
-
- /* get ost count when count is zero, get mdt count otherwise */
- exp = count ? sbi->ll_md_exp : sbi->ll_dt_exp;
- vallen = sizeof(count);
- rc = obd_get_info(NULL, exp, sizeof(KEY_TGT_COUNT),
- KEY_TGT_COUNT, &vallen, &count, NULL);
- if (rc) {
- CERROR("get target count failed: %d\n", rc);
- return rc;
- }
-
- if (copy_to_user((int *)arg, &count, sizeof(int)))
- return -EFAULT;
-
- return 0;
- }
- case LL_IOC_PATH2FID:
- if (copy_to_user((void *)arg, ll_inode2fid(inode),
- sizeof(struct lu_fid)))
- return -EFAULT;
- return 0;
- case LL_IOC_GET_CONNECT_FLAGS: {
- return obd_iocontrol(cmd, sbi->ll_md_exp, 0, NULL, (void *)arg);
- }
- case OBD_IOC_CHANGELOG_SEND:
- case OBD_IOC_CHANGELOG_CLEAR:
- if (!capable(CFS_CAP_SYS_ADMIN))
- return -EPERM;
-
- rc = copy_and_ioctl(cmd, sbi->ll_md_exp, (void *)arg,
- sizeof(struct ioc_changelog));
- return rc;
- case OBD_IOC_FID2PATH:
- return ll_fid2path(inode, (void *)arg);
- case LL_IOC_HSM_REQUEST: {
- struct hsm_user_request *hur;
- ssize_t totalsize;
-
- hur = memdup_user((void *)arg, sizeof(*hur));
- if (IS_ERR(hur))
- return PTR_ERR(hur);
-
- /* Compute the whole struct size */
- totalsize = hur_len(hur);
- kfree(hur);
- if (totalsize < 0)
- return -E2BIG;
-
- /* Final size will be more than double totalsize */
- if (totalsize >= MDS_MAXREQSIZE / 3)
- return -E2BIG;
-
- hur = libcfs_kvzalloc(totalsize, GFP_NOFS);
- if (hur == NULL)
- return -ENOMEM;
-
- /* Copy the whole struct */
- if (copy_from_user(hur, (void *)arg, totalsize)) {
- kvfree(hur);
- return -EFAULT;
- }
-
- if (hur->hur_request.hr_action == HUA_RELEASE) {
- const struct lu_fid *fid;
- struct inode *f;
- int i;
-
- for (i = 0; i < hur->hur_request.hr_itemcount; i++) {
- fid = &hur->hur_user_item[i].hui_fid;
- f = search_inode_for_lustre(inode->i_sb, fid);
- if (IS_ERR(f)) {
- rc = PTR_ERR(f);
- break;
- }
-
- rc = ll_hsm_release(f);
- iput(f);
- if (rc != 0)
- break;
- }
- } else {
- rc = obd_iocontrol(cmd, ll_i2mdexp(inode), totalsize,
- hur, NULL);
- }
-
- kvfree(hur);
-
- return rc;
- }
- case LL_IOC_HSM_PROGRESS: {
- struct hsm_progress_kernel hpk;
- struct hsm_progress hp;
-
- if (copy_from_user(&hp, (void *)arg, sizeof(hp)))
- return -EFAULT;
-
- hpk.hpk_fid = hp.hp_fid;
- hpk.hpk_cookie = hp.hp_cookie;
- hpk.hpk_extent = hp.hp_extent;
- hpk.hpk_flags = hp.hp_flags;
- hpk.hpk_errval = hp.hp_errval;
- hpk.hpk_data_version = 0;
-
- /* File may not exist in Lustre; all progress
- * reported to Lustre root */
- rc = obd_iocontrol(cmd, sbi->ll_md_exp, sizeof(hpk), &hpk,
- NULL);
- return rc;
- }
- case LL_IOC_HSM_CT_START:
- rc = copy_and_ioctl(cmd, sbi->ll_md_exp, (void *)arg,
- sizeof(struct lustre_kernelcomm));
- return rc;
-
- case LL_IOC_HSM_COPY_START: {
- struct hsm_copy *copy;
- int rc;
-
- copy = memdup_user((char *)arg, sizeof(*copy));
- if (IS_ERR(copy))
- return PTR_ERR(copy);
-
- rc = ll_ioc_copy_start(inode->i_sb, copy);
- if (copy_to_user((char *)arg, copy, sizeof(*copy)))
- rc = -EFAULT;
-
- kfree(copy);
- return rc;
- }
- case LL_IOC_HSM_COPY_END: {
- struct hsm_copy *copy;
- int rc;
-
- copy = memdup_user((char *)arg, sizeof(*copy));
- if (IS_ERR(copy))
- return PTR_ERR(copy);
-
- rc = ll_ioc_copy_end(inode->i_sb, copy);
- if (copy_to_user((char *)arg, copy, sizeof(*copy)))
- rc = -EFAULT;
-
- kfree(copy);
- return rc;
- }
- default:
- return obd_iocontrol(cmd, sbi->ll_dt_exp, 0, NULL, (void *)arg);
- }
-}
-
-static loff_t ll_dir_seek(struct file *file, loff_t offset, int origin)
-{
- struct inode *inode = file->f_mapping->host;
- struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
- struct ll_sb_info *sbi = ll_i2sbi(inode);
- int api32 = ll_need_32bit_api(sbi);
- loff_t ret = -EINVAL;
-
- mutex_lock(&inode->i_mutex);
- switch (origin) {
- case SEEK_SET:
- break;
- case SEEK_CUR:
- offset += file->f_pos;
- break;
- case SEEK_END:
- if (offset > 0)
- goto out;
- if (api32)
- offset += LL_DIR_END_OFF_32BIT;
- else
- offset += LL_DIR_END_OFF;
- break;
- default:
- goto out;
- }
-
- if (offset >= 0 &&
- ((api32 && offset <= LL_DIR_END_OFF_32BIT) ||
- (!api32 && offset <= LL_DIR_END_OFF))) {
- if (offset != file->f_pos) {
- if ((api32 && offset == LL_DIR_END_OFF_32BIT) ||
- (!api32 && offset == LL_DIR_END_OFF))
- fd->lfd_pos = MDS_DIR_END_OFF;
- else if (api32 && sbi->ll_flags & LL_SBI_64BIT_HASH)
- fd->lfd_pos = offset << 32;
- else
- fd->lfd_pos = offset;
- file->f_pos = offset;
- file->f_version = 0;
- }
- ret = offset;
- }
- goto out;
-
-out:
- mutex_unlock(&inode->i_mutex);
- return ret;
-}
-
-static int ll_dir_open(struct inode *inode, struct file *file)
-{
- return ll_file_open(inode, file);
-}
-
-static int ll_dir_release(struct inode *inode, struct file *file)
-{
- return ll_file_release(inode, file);
-}
-
-const struct file_operations ll_dir_operations = {
- .llseek = ll_dir_seek,
- .open = ll_dir_open,
- .release = ll_dir_release,
- .read = generic_read_dir,
- .iterate = ll_readdir,
- .unlocked_ioctl = ll_dir_ioctl,
- .fsync = ll_fsync,
-};
diff --git a/drivers/staging/lustre/lustre/llite/file.c b/drivers/staging/lustre/lustre/llite/file.c
deleted file mode 100644
index 51b6bc5d4d3a..000000000000
--- a/drivers/staging/lustre/lustre/llite/file.c
+++ /dev/null
@@ -1,3589 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * lustre/llite/file.c
- *
- * Author: Peter Braam <braam@clusterfs.com>
- * Author: Phil Schwan <phil@clusterfs.com>
- * Author: Andreas Dilger <adilger@clusterfs.com>
- */
-
-#define DEBUG_SUBSYSTEM S_LLITE
-#include "../include/lustre_dlm.h"
-#include "../include/lustre_lite.h"
-#include <linux/pagemap.h>
-#include <linux/file.h>
-#include "llite_internal.h"
-#include "../include/lustre/ll_fiemap.h"
-
-#include "../include/cl_object.h"
-
-static int
-ll_put_grouplock(struct inode *inode, struct file *file, unsigned long arg);
-
-static int ll_lease_close(struct obd_client_handle *och, struct inode *inode,
- bool *lease_broken);
-
-static enum llioc_iter
-ll_iocontrol_call(struct inode *inode, struct file *file,
- unsigned int cmd, unsigned long arg, int *rcp);
-
-static struct ll_file_data *ll_file_data_get(void)
-{
- struct ll_file_data *fd;
-
- OBD_SLAB_ALLOC_PTR_GFP(fd, ll_file_data_slab, GFP_NOFS);
- if (fd == NULL)
- return NULL;
- fd->fd_write_failed = false;
- return fd;
-}
-
-static void ll_file_data_put(struct ll_file_data *fd)
-{
- if (fd != NULL)
- OBD_SLAB_FREE_PTR(fd, ll_file_data_slab);
-}
-
-void ll_pack_inode2opdata(struct inode *inode, struct md_op_data *op_data,
- struct lustre_handle *fh)
-{
- op_data->op_fid1 = ll_i2info(inode)->lli_fid;
- op_data->op_attr.ia_mode = inode->i_mode;
- op_data->op_attr.ia_atime = inode->i_atime;
- op_data->op_attr.ia_mtime = inode->i_mtime;
- op_data->op_attr.ia_ctime = inode->i_ctime;
- op_data->op_attr.ia_size = i_size_read(inode);
- op_data->op_attr_blocks = inode->i_blocks;
- ((struct ll_iattr *)&op_data->op_attr)->ia_attr_flags =
- ll_inode_to_ext_flags(inode->i_flags);
- op_data->op_ioepoch = ll_i2info(inode)->lli_ioepoch;
- if (fh)
- op_data->op_handle = *fh;
-
- if (ll_i2info(inode)->lli_flags & LLIF_DATA_MODIFIED)
- op_data->op_bias |= MDS_DATA_MODIFIED;
-}
-
-/**
- * Closes the IO epoch and packs all the attributes into @op_data for
- * the CLOSE rpc.
- */
-static void ll_prepare_close(struct inode *inode, struct md_op_data *op_data,
- struct obd_client_handle *och)
-{
- op_data->op_attr.ia_valid = ATTR_MODE | ATTR_ATIME | ATTR_ATIME_SET |
- ATTR_MTIME | ATTR_MTIME_SET |
- ATTR_CTIME | ATTR_CTIME_SET;
-
- if (!(och->och_flags & FMODE_WRITE))
- goto out;
-
- if (!exp_connect_som(ll_i2mdexp(inode)) || !S_ISREG(inode->i_mode))
- op_data->op_attr.ia_valid |= ATTR_SIZE | ATTR_BLOCKS;
- else
- ll_ioepoch_close(inode, op_data, &och, 0);
-
-out:
- ll_pack_inode2opdata(inode, op_data, &och->och_fh);
- ll_prep_md_op_data(op_data, inode, NULL, NULL,
- 0, 0, LUSTRE_OPC_ANY, NULL);
-}
-
-static int ll_close_inode_openhandle(struct obd_export *md_exp,
- struct inode *inode,
- struct obd_client_handle *och,
- const __u64 *data_version)
-{
- struct obd_export *exp = ll_i2mdexp(inode);
- struct md_op_data *op_data;
- struct ptlrpc_request *req = NULL;
- struct obd_device *obd = class_exp2obd(exp);
- int epoch_close = 1;
- int rc;
-
- if (obd == NULL) {
- /*
- * XXX: in case of LMV, is this correct to access
- * ->exp_handle?
- */
- CERROR("Invalid MDC connection handle %#llx\n",
- ll_i2mdexp(inode)->exp_handle.h_cookie);
- rc = 0;
- goto out;
- }
-
- op_data = kzalloc(sizeof(*op_data), GFP_NOFS);
- if (!op_data) {
- /* XXX We leak openhandle and request here. */
- rc = -ENOMEM;
- goto out;
- }
-
- ll_prepare_close(inode, op_data, och);
- if (data_version != NULL) {
- /* Pass in data_version implies release. */
- op_data->op_bias |= MDS_HSM_RELEASE;
- op_data->op_data_version = *data_version;
- op_data->op_lease_handle = och->och_lease_handle;
- op_data->op_attr.ia_valid |= ATTR_SIZE | ATTR_BLOCKS;
- }
- epoch_close = op_data->op_flags & MF_EPOCH_CLOSE;
- rc = md_close(md_exp, op_data, och->och_mod, &req);
- if (rc == -EAGAIN) {
- /* This close must have the epoch closed. */
- LASSERT(epoch_close);
- /* MDS has instructed us to obtain Size-on-MDS attribute from
- * OSTs and send setattr to back to MDS. */
- rc = ll_som_update(inode, op_data);
- if (rc) {
- CERROR("inode %lu mdc Size-on-MDS update failed: rc = %d\n",
- inode->i_ino, rc);
- rc = 0;
- }
- } else if (rc) {
- CERROR("inode %lu mdc close failed: rc = %d\n",
- inode->i_ino, rc);
- }
-
- /* DATA_MODIFIED flag was successfully sent on close, cancel data
- * modification flag. */
- if (rc == 0 && (op_data->op_bias & MDS_DATA_MODIFIED)) {
- struct ll_inode_info *lli = ll_i2info(inode);
-
- spin_lock(&lli->lli_lock);
- lli->lli_flags &= ~LLIF_DATA_MODIFIED;
- spin_unlock(&lli->lli_lock);
- }
-
- if (rc == 0) {
- rc = ll_objects_destroy(req, inode);
- if (rc)
- CERROR("inode %lu ll_objects destroy: rc = %d\n",
- inode->i_ino, rc);
- }
- if (rc == 0 && op_data->op_bias & MDS_HSM_RELEASE) {
- struct mdt_body *body;
-
- body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
- if (!(body->valid & OBD_MD_FLRELEASED))
- rc = -EBUSY;
- }
-
- ll_finish_md_op_data(op_data);
-
-out:
- if (exp_connect_som(exp) && !epoch_close &&
- S_ISREG(inode->i_mode) && (och->och_flags & FMODE_WRITE)) {
- ll_queue_done_writing(inode, LLIF_DONE_WRITING);
- } else {
- md_clear_open_replay_data(md_exp, och);
- /* Free @och if it is not waiting for DONE_WRITING. */
- och->och_fh.cookie = DEAD_HANDLE_MAGIC;
- kfree(och);
- }
- if (req) /* This is close request */
- ptlrpc_req_finished(req);
- return rc;
-}
-
-int ll_md_real_close(struct inode *inode, fmode_t fmode)
-{
- struct ll_inode_info *lli = ll_i2info(inode);
- struct obd_client_handle **och_p;
- struct obd_client_handle *och;
- __u64 *och_usecount;
- int rc = 0;
-
- if (fmode & FMODE_WRITE) {
- och_p = &lli->lli_mds_write_och;
- och_usecount = &lli->lli_open_fd_write_count;
- } else if (fmode & FMODE_EXEC) {
- och_p = &lli->lli_mds_exec_och;
- och_usecount = &lli->lli_open_fd_exec_count;
- } else {
- LASSERT(fmode & FMODE_READ);
- och_p = &lli->lli_mds_read_och;
- och_usecount = &lli->lli_open_fd_read_count;
- }
-
- mutex_lock(&lli->lli_och_mutex);
- if (*och_usecount > 0) {
- /* There are still users of this handle, so skip
- * freeing it. */
- mutex_unlock(&lli->lli_och_mutex);
- return 0;
- }
-
- och = *och_p;
- *och_p = NULL;
- mutex_unlock(&lli->lli_och_mutex);
-
- if (och != NULL) {
- /* There might be a race and this handle may already
- be closed. */
- rc = ll_close_inode_openhandle(ll_i2sbi(inode)->ll_md_exp,
- inode, och, NULL);
- }
-
- return rc;
-}
-
-static int ll_md_close(struct obd_export *md_exp, struct inode *inode,
- struct file *file)
-{
- struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
- struct ll_inode_info *lli = ll_i2info(inode);
- int lockmode;
- __u64 flags = LDLM_FL_BLOCK_GRANTED | LDLM_FL_TEST_LOCK;
- struct lustre_handle lockh;
- ldlm_policy_data_t policy = {.l_inodebits = {MDS_INODELOCK_OPEN} };
- int rc = 0;
-
- /* clear group lock, if present */
- if (unlikely(fd->fd_flags & LL_FILE_GROUP_LOCKED))
- ll_put_grouplock(inode, file, fd->fd_grouplock.cg_gid);
-
- if (fd->fd_lease_och != NULL) {
- bool lease_broken;
-
- /* Usually the lease is not released when the
- * application crashed, we need to release here. */
- rc = ll_lease_close(fd->fd_lease_och, inode, &lease_broken);
- CDEBUG(rc ? D_ERROR : D_INODE, "Clean up lease "DFID" %d/%d\n",
- PFID(&lli->lli_fid), rc, lease_broken);
-
- fd->fd_lease_och = NULL;
- }
-
- if (fd->fd_och != NULL) {
- rc = ll_close_inode_openhandle(md_exp, inode, fd->fd_och, NULL);
- fd->fd_och = NULL;
- goto out;
- }
-
- /* Let's see if we have good enough OPEN lock on the file and if
- we can skip talking to MDS */
-
- mutex_lock(&lli->lli_och_mutex);
- if (fd->fd_omode & FMODE_WRITE) {
- lockmode = LCK_CW;
- LASSERT(lli->lli_open_fd_write_count);
- lli->lli_open_fd_write_count--;
- } else if (fd->fd_omode & FMODE_EXEC) {
- lockmode = LCK_PR;
- LASSERT(lli->lli_open_fd_exec_count);
- lli->lli_open_fd_exec_count--;
- } else {
- lockmode = LCK_CR;
- LASSERT(lli->lli_open_fd_read_count);
- lli->lli_open_fd_read_count--;
- }
- mutex_unlock(&lli->lli_och_mutex);
-
- if (!md_lock_match(md_exp, flags, ll_inode2fid(inode),
- LDLM_IBITS, &policy, lockmode, &lockh))
- rc = ll_md_real_close(inode, fd->fd_omode);
-
-out:
- LUSTRE_FPRIVATE(file) = NULL;
- ll_file_data_put(fd);
-
- return rc;
-}
-
-/* While this returns an error code, fput() the caller does not, so we need
- * to make every effort to clean up all of our state here. Also, applications
- * rarely check close errors and even if an error is returned they will not
- * re-try the close call.
- */
-int ll_file_release(struct inode *inode, struct file *file)
-{
- struct ll_file_data *fd;
- struct ll_sb_info *sbi = ll_i2sbi(inode);
- struct ll_inode_info *lli = ll_i2info(inode);
- int rc;
-
- CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p)\n", inode->i_ino,
- inode->i_generation, inode);
-
-#ifdef CONFIG_FS_POSIX_ACL
- if (sbi->ll_flags & LL_SBI_RMT_CLIENT && is_root_inode(inode)) {
- struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
-
- LASSERT(fd != NULL);
- if (unlikely(fd->fd_flags & LL_FILE_RMTACL)) {
- fd->fd_flags &= ~LL_FILE_RMTACL;
- rct_del(&sbi->ll_rct, current_pid());
- et_search_free(&sbi->ll_et, current_pid());
- }
- }
-#endif
-
- if (!is_root_inode(inode))
- ll_stats_ops_tally(sbi, LPROC_LL_RELEASE, 1);
- fd = LUSTRE_FPRIVATE(file);
- LASSERT(fd != NULL);
-
- /* The last ref on @file, maybe not the owner pid of statahead.
- * Different processes can open the same dir, "ll_opendir_key" means:
- * it is me that should stop the statahead thread. */
- if (S_ISDIR(inode->i_mode) && lli->lli_opendir_key == fd &&
- lli->lli_opendir_pid != 0)
- ll_stop_statahead(inode, lli->lli_opendir_key);
-
- if (is_root_inode(inode)) {
- LUSTRE_FPRIVATE(file) = NULL;
- ll_file_data_put(fd);
- return 0;
- }
-
- if (!S_ISDIR(inode->i_mode)) {
- lov_read_and_clear_async_rc(lli->lli_clob);
- lli->lli_async_rc = 0;
- }
-
- rc = ll_md_close(sbi->ll_md_exp, inode, file);
-
- if (CFS_FAIL_TIMEOUT_MS(OBD_FAIL_PTLRPC_DUMP_LOG, cfs_fail_val))
- libcfs_debug_dumplog();
-
- return rc;
-}
-
-static int ll_intent_file_open(struct dentry *dentry, void *lmm,
- int lmmsize, struct lookup_intent *itp)
-{
- struct inode *inode = d_inode(dentry);
- struct ll_sb_info *sbi = ll_i2sbi(inode);
- struct dentry *parent = dentry->d_parent;
- const char *name = dentry->d_name.name;
- const int len = dentry->d_name.len;
- struct md_op_data *op_data;
- struct ptlrpc_request *req;
- __u32 opc = LUSTRE_OPC_ANY;
- int rc;
-
- /* Usually we come here only for NFSD, and we want open lock.
- But we can also get here with pre 2.6.15 patchless kernels, and in
- that case that lock is also ok */
- /* We can also get here if there was cached open handle in revalidate_it
- * but it disappeared while we were getting from there to ll_file_open.
- * But this means this file was closed and immediately opened which
- * makes a good candidate for using OPEN lock */
- /* If lmmsize & lmm are not 0, we are just setting stripe info
- * parameters. No need for the open lock */
- if (lmm == NULL && lmmsize == 0) {
- itp->it_flags |= MDS_OPEN_LOCK;
- if (itp->it_flags & FMODE_WRITE)
- opc = LUSTRE_OPC_CREATE;
- }
-
- op_data = ll_prep_md_op_data(NULL, d_inode(parent),
- inode, name, len,
- O_RDWR, opc, NULL);
- if (IS_ERR(op_data))
- return PTR_ERR(op_data);
-
- itp->it_flags |= MDS_OPEN_BY_FID;
- rc = md_intent_lock(sbi->ll_md_exp, op_data, lmm, lmmsize, itp,
- 0 /*unused */, &req, ll_md_blocking_ast, 0);
- ll_finish_md_op_data(op_data);
- if (rc == -ESTALE) {
- /* reason for keep own exit path - don`t flood log
- * with messages with -ESTALE errors.
- */
- if (!it_disposition(itp, DISP_OPEN_OPEN) ||
- it_open_error(DISP_OPEN_OPEN, itp))
- goto out;
- ll_release_openhandle(inode, itp);
- goto out;
- }
-
- if (it_disposition(itp, DISP_LOOKUP_NEG)) {
- rc = -ENOENT;
- goto out;
- }
-
- if (rc != 0 || it_open_error(DISP_OPEN_OPEN, itp)) {
- rc = rc ? rc : it_open_error(DISP_OPEN_OPEN, itp);
- CDEBUG(D_VFSTRACE, "lock enqueue: err: %d\n", rc);
- goto out;
- }
-
- rc = ll_prep_inode(&inode, req, NULL, itp);
- if (!rc && itp->d.lustre.it_lock_mode)
- ll_set_lock_data(sbi->ll_md_exp, inode, itp, NULL);
-
-out:
- ptlrpc_req_finished(req);
- ll_intent_drop_lock(itp);
-
- return rc;
-}
-
-/**
- * Assign an obtained @ioepoch to client's inode. No lock is needed, MDS does
- * not believe attributes if a few ioepoch holders exist. Attributes for
- * previous ioepoch if new one is opened are also skipped by MDS.
- */
-void ll_ioepoch_open(struct ll_inode_info *lli, __u64 ioepoch)
-{
- if (ioepoch && lli->lli_ioepoch != ioepoch) {
- lli->lli_ioepoch = ioepoch;
- CDEBUG(D_INODE, "Epoch %llu opened on "DFID"\n",
- ioepoch, PFID(&lli->lli_fid));
- }
-}
-
-static int ll_och_fill(struct obd_export *md_exp, struct lookup_intent *it,
- struct obd_client_handle *och)
-{
- struct ptlrpc_request *req = it->d.lustre.it_data;
- struct mdt_body *body;
-
- body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
- och->och_fh = body->handle;
- och->och_fid = body->fid1;
- och->och_lease_handle.cookie = it->d.lustre.it_lock_handle;
- och->och_magic = OBD_CLIENT_HANDLE_MAGIC;
- och->och_flags = it->it_flags;
-
- return md_set_open_replay_data(md_exp, och, it);
-}
-
-static int ll_local_open(struct file *file, struct lookup_intent *it,
- struct ll_file_data *fd, struct obd_client_handle *och)
-{
- struct inode *inode = file_inode(file);
- struct ll_inode_info *lli = ll_i2info(inode);
-
- LASSERT(!LUSTRE_FPRIVATE(file));
-
- LASSERT(fd != NULL);
-
- if (och) {
- struct ptlrpc_request *req = it->d.lustre.it_data;
- struct mdt_body *body;
- int rc;
-
- rc = ll_och_fill(ll_i2sbi(inode)->ll_md_exp, it, och);
- if (rc != 0)
- return rc;
-
- body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
- ll_ioepoch_open(lli, body->ioepoch);
- }
-
- LUSTRE_FPRIVATE(file) = fd;
- ll_readahead_init(inode, &fd->fd_ras);
- fd->fd_omode = it->it_flags & (FMODE_READ | FMODE_WRITE | FMODE_EXEC);
- return 0;
-}
-
-/* Open a file, and (for the very first open) create objects on the OSTs at
- * this time. If opened with O_LOV_DELAY_CREATE, then we don't do the object
- * creation or open until ll_lov_setstripe() ioctl is called.
- *
- * If we already have the stripe MD locally then we don't request it in
- * md_open(), by passing a lmm_size = 0.
- *
- * It is up to the application to ensure no other processes open this file
- * in the O_LOV_DELAY_CREATE case, or the default striping pattern will be
- * used. We might be able to avoid races of that sort by getting lli_open_sem
- * before returning in the O_LOV_DELAY_CREATE case and dropping it here
- * or in ll_file_release(), but I'm not sure that is desirable/necessary.
- */
-int ll_file_open(struct inode *inode, struct file *file)
-{
- struct ll_inode_info *lli = ll_i2info(inode);
- struct lookup_intent *it, oit = { .it_op = IT_OPEN,
- .it_flags = file->f_flags };
- struct obd_client_handle **och_p = NULL;
- __u64 *och_usecount = NULL;
- struct ll_file_data *fd;
- int rc = 0, opendir_set = 0;
-
- CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p), flags %o\n", inode->i_ino,
- inode->i_generation, inode, file->f_flags);
-
- it = file->private_data; /* XXX: compat macro */
- file->private_data = NULL; /* prevent ll_local_open assertion */
-
- fd = ll_file_data_get();
- if (fd == NULL) {
- rc = -ENOMEM;
- goto out_openerr;
- }
-
- fd->fd_file = file;
- if (S_ISDIR(inode->i_mode)) {
- spin_lock(&lli->lli_sa_lock);
- if (lli->lli_opendir_key == NULL && lli->lli_sai == NULL &&
- lli->lli_opendir_pid == 0) {
- lli->lli_opendir_key = fd;
- lli->lli_opendir_pid = current_pid();
- opendir_set = 1;
- }
- spin_unlock(&lli->lli_sa_lock);
- }
-
- if (is_root_inode(inode)) {
- LUSTRE_FPRIVATE(file) = fd;
- return 0;
- }
-
- if (!it || !it->d.lustre.it_disposition) {
- /* Convert f_flags into access mode. We cannot use file->f_mode,
- * because everything but O_ACCMODE mask was stripped from
- * there */
- if ((oit.it_flags + 1) & O_ACCMODE)
- oit.it_flags++;
- if (file->f_flags & O_TRUNC)
- oit.it_flags |= FMODE_WRITE;
-
- /* kernel only call f_op->open in dentry_open. filp_open calls
- * dentry_open after call to open_namei that checks permissions.
- * Only nfsd_open call dentry_open directly without checking
- * permissions and because of that this code below is safe. */
- if (oit.it_flags & (FMODE_WRITE | FMODE_READ))
- oit.it_flags |= MDS_OPEN_OWNEROVERRIDE;
-
- /* We do not want O_EXCL here, presumably we opened the file
- * already? XXX - NFS implications? */
- oit.it_flags &= ~O_EXCL;
-
- /* bug20584, if "it_flags" contains O_CREAT, the file will be
- * created if necessary, then "IT_CREAT" should be set to keep
- * consistent with it */
- if (oit.it_flags & O_CREAT)
- oit.it_op |= IT_CREAT;
-
- it = &oit;
- }
-
-restart:
- /* Let's see if we have file open on MDS already. */
- if (it->it_flags & FMODE_WRITE) {
- och_p = &lli->lli_mds_write_och;
- och_usecount = &lli->lli_open_fd_write_count;
- } else if (it->it_flags & FMODE_EXEC) {
- och_p = &lli->lli_mds_exec_och;
- och_usecount = &lli->lli_open_fd_exec_count;
- } else {
- och_p = &lli->lli_mds_read_och;
- och_usecount = &lli->lli_open_fd_read_count;
- }
-
- mutex_lock(&lli->lli_och_mutex);
- if (*och_p) { /* Open handle is present */
- if (it_disposition(it, DISP_OPEN_OPEN)) {
- /* Well, there's extra open request that we do not need,
- let's close it somehow. This will decref request. */
- rc = it_open_error(DISP_OPEN_OPEN, it);
- if (rc) {
- mutex_unlock(&lli->lli_och_mutex);
- goto out_openerr;
- }
-
- ll_release_openhandle(inode, it);
- }
- (*och_usecount)++;
-
- rc = ll_local_open(file, it, fd, NULL);
- if (rc) {
- (*och_usecount)--;
- mutex_unlock(&lli->lli_och_mutex);
- goto out_openerr;
- }
- } else {
- LASSERT(*och_usecount == 0);
- if (!it->d.lustre.it_disposition) {
- /* We cannot just request lock handle now, new ELC code
- means that one of other OPEN locks for this file
- could be cancelled, and since blocking ast handler
- would attempt to grab och_mutex as well, that would
- result in a deadlock */
- mutex_unlock(&lli->lli_och_mutex);
- it->it_create_mode |= M_CHECK_STALE;
- rc = ll_intent_file_open(file->f_path.dentry, NULL, 0, it);
- it->it_create_mode &= ~M_CHECK_STALE;
- if (rc)
- goto out_openerr;
-
- goto restart;
- }
- *och_p = kzalloc(sizeof(struct obd_client_handle), GFP_NOFS);
- if (!*och_p) {
- rc = -ENOMEM;
- goto out_och_free;
- }
-
- (*och_usecount)++;
-
- /* md_intent_lock() didn't get a request ref if there was an
- * open error, so don't do cleanup on the request here
- * (bug 3430) */
- /* XXX (green): Should not we bail out on any error here, not
- * just open error? */
- rc = it_open_error(DISP_OPEN_OPEN, it);
- if (rc)
- goto out_och_free;
-
- LASSERT(it_disposition(it, DISP_ENQ_OPEN_REF));
-
- rc = ll_local_open(file, it, fd, *och_p);
- if (rc)
- goto out_och_free;
- }
- mutex_unlock(&lli->lli_och_mutex);
- fd = NULL;
-
- /* Must do this outside lli_och_mutex lock to prevent deadlock where
- different kind of OPEN lock for this same inode gets cancelled
- by ldlm_cancel_lru */
- if (!S_ISREG(inode->i_mode))
- goto out_och_free;
-
- if (!lli->lli_has_smd &&
- (cl_is_lov_delay_create(file->f_flags) ||
- (file->f_mode & FMODE_WRITE) == 0)) {
- CDEBUG(D_INODE, "object creation was delayed\n");
- goto out_och_free;
- }
- cl_lov_delay_create_clear(&file->f_flags);
- goto out_och_free;
-
-out_och_free:
- if (rc) {
- if (och_p && *och_p) {
- kfree(*och_p);
- *och_p = NULL;
- (*och_usecount)--;
- }
- mutex_unlock(&lli->lli_och_mutex);
-
-out_openerr:
- if (opendir_set != 0)
- ll_stop_statahead(inode, lli->lli_opendir_key);
- ll_file_data_put(fd);
- } else {
- ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_OPEN, 1);
- }
-
- if (it && it_disposition(it, DISP_ENQ_OPEN_REF)) {
- ptlrpc_req_finished(it->d.lustre.it_data);
- it_clear_disposition(it, DISP_ENQ_OPEN_REF);
- }
-
- return rc;
-}
-
-static int ll_md_blocking_lease_ast(struct ldlm_lock *lock,
- struct ldlm_lock_desc *desc, void *data, int flag)
-{
- int rc;
- struct lustre_handle lockh;
-
- switch (flag) {
- case LDLM_CB_BLOCKING:
- ldlm_lock2handle(lock, &lockh);
- rc = ldlm_cli_cancel(&lockh, LCF_ASYNC);
- if (rc < 0) {
- CDEBUG(D_INODE, "ldlm_cli_cancel: %d\n", rc);
- return rc;
- }
- break;
- case LDLM_CB_CANCELING:
- /* do nothing */
- break;
- }
- return 0;
-}
-
-/**
- * Acquire a lease and open the file.
- */
-static struct obd_client_handle *
-ll_lease_open(struct inode *inode, struct file *file, fmode_t fmode,
- __u64 open_flags)
-{
- struct lookup_intent it = { .it_op = IT_OPEN };
- struct ll_sb_info *sbi = ll_i2sbi(inode);
- struct md_op_data *op_data;
- struct ptlrpc_request *req;
- struct lustre_handle old_handle = { 0 };
- struct obd_client_handle *och = NULL;
- int rc;
- int rc2;
-
- if (fmode != FMODE_WRITE && fmode != FMODE_READ)
- return ERR_PTR(-EINVAL);
-
- if (file != NULL) {
- struct ll_inode_info *lli = ll_i2info(inode);
- struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
- struct obd_client_handle **och_p;
- __u64 *och_usecount;
-
- if (!(fmode & file->f_mode) || (file->f_mode & FMODE_EXEC))
- return ERR_PTR(-EPERM);
-
- /* Get the openhandle of the file */
- rc = -EBUSY;
- mutex_lock(&lli->lli_och_mutex);
- if (fd->fd_lease_och != NULL) {
- mutex_unlock(&lli->lli_och_mutex);
- return ERR_PTR(rc);
- }
-
- if (fd->fd_och == NULL) {
- if (file->f_mode & FMODE_WRITE) {
- LASSERT(lli->lli_mds_write_och != NULL);
- och_p = &lli->lli_mds_write_och;
- och_usecount = &lli->lli_open_fd_write_count;
- } else {
- LASSERT(lli->lli_mds_read_och != NULL);
- och_p = &lli->lli_mds_read_och;
- och_usecount = &lli->lli_open_fd_read_count;
- }
- if (*och_usecount == 1) {
- fd->fd_och = *och_p;
- *och_p = NULL;
- *och_usecount = 0;
- rc = 0;
- }
- }
- mutex_unlock(&lli->lli_och_mutex);
- if (rc < 0) /* more than 1 opener */
- return ERR_PTR(rc);
-
- LASSERT(fd->fd_och != NULL);
- old_handle = fd->fd_och->och_fh;
- }
-
- och = kzalloc(sizeof(*och), GFP_NOFS);
- if (!och)
- return ERR_PTR(-ENOMEM);
-
- op_data = ll_prep_md_op_data(NULL, inode, inode, NULL, 0, 0,
- LUSTRE_OPC_ANY, NULL);
- if (IS_ERR(op_data)) {
- rc = PTR_ERR(op_data);
- goto out;
- }
-
- /* To tell the MDT this openhandle is from the same owner */
- op_data->op_handle = old_handle;
-
- it.it_flags = fmode | open_flags;
- it.it_flags |= MDS_OPEN_LOCK | MDS_OPEN_BY_FID | MDS_OPEN_LEASE;
- rc = md_intent_lock(sbi->ll_md_exp, op_data, NULL, 0, &it, 0, &req,
- ll_md_blocking_lease_ast,
- /* LDLM_FL_NO_LRU: To not put the lease lock into LRU list, otherwise
- * it can be cancelled which may mislead applications that the lease is
- * broken;
- * LDLM_FL_EXCL: Set this flag so that it won't be matched by normal
- * open in ll_md_blocking_ast(). Otherwise as ll_md_blocking_lease_ast
- * doesn't deal with openhandle, so normal openhandle will be leaked. */
- LDLM_FL_NO_LRU | LDLM_FL_EXCL);
- ll_finish_md_op_data(op_data);
- ptlrpc_req_finished(req);
- if (rc < 0)
- goto out_release_it;
-
- if (it_disposition(&it, DISP_LOOKUP_NEG)) {
- rc = -ENOENT;
- goto out_release_it;
- }
-
- rc = it_open_error(DISP_OPEN_OPEN, &it);
- if (rc)
- goto out_release_it;
-
- LASSERT(it_disposition(&it, DISP_ENQ_OPEN_REF));
- ll_och_fill(sbi->ll_md_exp, &it, och);
-
- if (!it_disposition(&it, DISP_OPEN_LEASE)) /* old server? */ {
- rc = -EOPNOTSUPP;
- goto out_close;
- }
-
- /* already get lease, handle lease lock */
- ll_set_lock_data(sbi->ll_md_exp, inode, &it, NULL);
- if (it.d.lustre.it_lock_mode == 0 ||
- it.d.lustre.it_lock_bits != MDS_INODELOCK_OPEN) {
- /* open lock must return for lease */
- CERROR(DFID "lease granted but no open lock, %d/%llu.\n",
- PFID(ll_inode2fid(inode)), it.d.lustre.it_lock_mode,
- it.d.lustre.it_lock_bits);
- rc = -EPROTO;
- goto out_close;
- }
-
- ll_intent_release(&it);
- return och;
-
-out_close:
- rc2 = ll_close_inode_openhandle(sbi->ll_md_exp, inode, och, NULL);
- if (rc2)
- CERROR("Close openhandle returned %d\n", rc2);
-
- /* cancel open lock */
- if (it.d.lustre.it_lock_mode != 0) {
- ldlm_lock_decref_and_cancel(&och->och_lease_handle,
- it.d.lustre.it_lock_mode);
- it.d.lustre.it_lock_mode = 0;
- }
-out_release_it:
- ll_intent_release(&it);
-out:
- kfree(och);
- return ERR_PTR(rc);
-}
-
-/**
- * Release lease and close the file.
- * It will check if the lease has ever broken.
- */
-static int ll_lease_close(struct obd_client_handle *och, struct inode *inode,
- bool *lease_broken)
-{
- struct ldlm_lock *lock;
- bool cancelled = true;
- int rc;
-
- lock = ldlm_handle2lock(&och->och_lease_handle);
- if (lock != NULL) {
- lock_res_and_lock(lock);
- cancelled = ldlm_is_cancel(lock);
- unlock_res_and_lock(lock);
- ldlm_lock_put(lock);
- }
-
- CDEBUG(D_INODE, "lease for "DFID" broken? %d\n",
- PFID(&ll_i2info(inode)->lli_fid), cancelled);
-
- if (!cancelled)
- ldlm_cli_cancel(&och->och_lease_handle, 0);
- if (lease_broken != NULL)
- *lease_broken = cancelled;
-
- rc = ll_close_inode_openhandle(ll_i2sbi(inode)->ll_md_exp, inode, och,
- NULL);
- return rc;
-}
-
-/* Fills the obdo with the attributes for the lsm */
-static int ll_lsm_getattr(struct lov_stripe_md *lsm, struct obd_export *exp,
- struct obdo *obdo, __u64 ioepoch, int sync)
-{
- struct ptlrpc_request_set *set;
- struct obd_info oinfo = { };
- int rc;
-
- LASSERT(lsm != NULL);
-
- oinfo.oi_md = lsm;
- oinfo.oi_oa = obdo;
- oinfo.oi_oa->o_oi = lsm->lsm_oi;
- oinfo.oi_oa->o_mode = S_IFREG;
- oinfo.oi_oa->o_ioepoch = ioepoch;
- oinfo.oi_oa->o_valid = OBD_MD_FLID | OBD_MD_FLTYPE |
- OBD_MD_FLSIZE | OBD_MD_FLBLOCKS |
- OBD_MD_FLBLKSZ | OBD_MD_FLATIME |
- OBD_MD_FLMTIME | OBD_MD_FLCTIME |
- OBD_MD_FLGROUP | OBD_MD_FLEPOCH |
- OBD_MD_FLDATAVERSION;
- if (sync) {
- oinfo.oi_oa->o_valid |= OBD_MD_FLFLAGS;
- oinfo.oi_oa->o_flags |= OBD_FL_SRVLOCK;
- }
-
- set = ptlrpc_prep_set();
- if (set == NULL) {
- CERROR("can't allocate ptlrpc set\n");
- rc = -ENOMEM;
- } else {
- rc = obd_getattr_async(exp, &oinfo, set);
- if (rc == 0)
- rc = ptlrpc_set_wait(set);
- ptlrpc_set_destroy(set);
- }
- if (rc == 0)
- oinfo.oi_oa->o_valid &= (OBD_MD_FLBLOCKS | OBD_MD_FLBLKSZ |
- OBD_MD_FLATIME | OBD_MD_FLMTIME |
- OBD_MD_FLCTIME | OBD_MD_FLSIZE |
- OBD_MD_FLDATAVERSION);
- return rc;
-}
-
-/**
- * Performs the getattr on the inode and updates its fields.
- * If @sync != 0, perform the getattr under the server-side lock.
- */
-int ll_inode_getattr(struct inode *inode, struct obdo *obdo,
- __u64 ioepoch, int sync)
-{
- struct lov_stripe_md *lsm;
- int rc;
-
- lsm = ccc_inode_lsm_get(inode);
- rc = ll_lsm_getattr(lsm, ll_i2dtexp(inode),
- obdo, ioepoch, sync);
- if (rc == 0) {
- struct ost_id *oi = lsm ? &lsm->lsm_oi : &obdo->o_oi;
-
- obdo_refresh_inode(inode, obdo, obdo->o_valid);
- CDEBUG(D_INODE, "objid " DOSTID " size %llu, blocks %llu, blksize %lu\n",
- POSTID(oi), i_size_read(inode),
- (unsigned long long)inode->i_blocks,
- 1UL << inode->i_blkbits);
- }
- ccc_inode_lsm_put(inode, lsm);
- return rc;
-}
-
-int ll_merge_lvb(const struct lu_env *env, struct inode *inode)
-{
- struct ll_inode_info *lli = ll_i2info(inode);
- struct cl_object *obj = lli->lli_clob;
- struct cl_attr *attr = ccc_env_thread_attr(env);
- struct ost_lvb lvb;
- int rc = 0;
-
- ll_inode_size_lock(inode);
- /* merge timestamps the most recently obtained from mds with
- timestamps obtained from osts */
- LTIME_S(inode->i_atime) = lli->lli_lvb.lvb_atime;
- LTIME_S(inode->i_mtime) = lli->lli_lvb.lvb_mtime;
- LTIME_S(inode->i_ctime) = lli->lli_lvb.lvb_ctime;
-
- lvb.lvb_size = i_size_read(inode);
- lvb.lvb_blocks = inode->i_blocks;
- lvb.lvb_mtime = LTIME_S(inode->i_mtime);
- lvb.lvb_atime = LTIME_S(inode->i_atime);
- lvb.lvb_ctime = LTIME_S(inode->i_ctime);
-
- cl_object_attr_lock(obj);
- rc = cl_object_attr_get(env, obj, attr);
- cl_object_attr_unlock(obj);
-
- if (rc == 0) {
- if (lvb.lvb_atime < attr->cat_atime)
- lvb.lvb_atime = attr->cat_atime;
- if (lvb.lvb_ctime < attr->cat_ctime)
- lvb.lvb_ctime = attr->cat_ctime;
- if (lvb.lvb_mtime < attr->cat_mtime)
- lvb.lvb_mtime = attr->cat_mtime;
-
- CDEBUG(D_VFSTRACE, DFID" updating i_size %llu\n",
- PFID(&lli->lli_fid), attr->cat_size);
- cl_isize_write_nolock(inode, attr->cat_size);
-
- inode->i_blocks = attr->cat_blocks;
-
- LTIME_S(inode->i_mtime) = lvb.lvb_mtime;
- LTIME_S(inode->i_atime) = lvb.lvb_atime;
- LTIME_S(inode->i_ctime) = lvb.lvb_ctime;
- }
- ll_inode_size_unlock(inode);
-
- return rc;
-}
-
-int ll_glimpse_ioctl(struct ll_sb_info *sbi, struct lov_stripe_md *lsm,
- lstat_t *st)
-{
- struct obdo obdo = { 0 };
- int rc;
-
- rc = ll_lsm_getattr(lsm, sbi->ll_dt_exp, &obdo, 0, 0);
- if (rc == 0) {
- st->st_size = obdo.o_size;
- st->st_blocks = obdo.o_blocks;
- st->st_mtime = obdo.o_mtime;
- st->st_atime = obdo.o_atime;
- st->st_ctime = obdo.o_ctime;
- }
- return rc;
-}
-
-static bool file_is_noatime(const struct file *file)
-{
- const struct vfsmount *mnt = file->f_path.mnt;
- const struct inode *inode = file_inode(file);
-
- /* Adapted from file_accessed() and touch_atime().*/
- if (file->f_flags & O_NOATIME)
- return true;
-
- if (inode->i_flags & S_NOATIME)
- return true;
-
- if (IS_NOATIME(inode))
- return true;
-
- if (mnt->mnt_flags & (MNT_NOATIME | MNT_READONLY))
- return true;
-
- if ((mnt->mnt_flags & MNT_NODIRATIME) && S_ISDIR(inode->i_mode))
- return true;
-
- if ((inode->i_sb->s_flags & MS_NODIRATIME) && S_ISDIR(inode->i_mode))
- return true;
-
- return false;
-}
-
-void ll_io_init(struct cl_io *io, const struct file *file, int write)
-{
- struct inode *inode = file_inode(file);
-
- io->u.ci_rw.crw_nonblock = file->f_flags & O_NONBLOCK;
- if (write) {
- io->u.ci_wr.wr_append = !!(file->f_flags & O_APPEND);
- io->u.ci_wr.wr_sync = file->f_flags & O_SYNC ||
- file->f_flags & O_DIRECT ||
- IS_SYNC(inode);
- }
- io->ci_obj = ll_i2info(inode)->lli_clob;
- io->ci_lockreq = CILR_MAYBE;
- if (ll_file_nolock(file)) {
- io->ci_lockreq = CILR_NEVER;
- io->ci_no_srvlock = 1;
- } else if (file->f_flags & O_APPEND) {
- io->ci_lockreq = CILR_MANDATORY;
- }
-
- io->ci_noatime = file_is_noatime(file);
-}
-
-static ssize_t
-ll_file_io_generic(const struct lu_env *env, struct vvp_io_args *args,
- struct file *file, enum cl_io_type iot,
- loff_t *ppos, size_t count)
-{
- struct ll_inode_info *lli = ll_i2info(file_inode(file));
- struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
- struct cl_io *io;
- ssize_t result;
-
-restart:
- io = ccc_env_thread_io(env);
- ll_io_init(io, file, iot == CIT_WRITE);
-
- if (cl_io_rw_init(env, io, iot, *ppos, count) == 0) {
- struct vvp_io *vio = vvp_env_io(env);
- struct ccc_io *cio = ccc_env_io(env);
- int write_mutex_locked = 0;
-
- cio->cui_fd = LUSTRE_FPRIVATE(file);
- vio->cui_io_subtype = args->via_io_subtype;
-
- switch (vio->cui_io_subtype) {
- case IO_NORMAL:
- cio->cui_iter = args->u.normal.via_iter;
- cio->cui_iocb = args->u.normal.via_iocb;
- if ((iot == CIT_WRITE) &&
- !(cio->cui_fd->fd_flags & LL_FILE_GROUP_LOCKED)) {
- if (mutex_lock_interruptible(&lli->
- lli_write_mutex)) {
- result = -ERESTARTSYS;
- goto out;
- }
- write_mutex_locked = 1;
- } else if (iot == CIT_READ) {
- down_read(&lli->lli_trunc_sem);
- }
- break;
- case IO_SPLICE:
- vio->u.splice.cui_pipe = args->u.splice.via_pipe;
- vio->u.splice.cui_flags = args->u.splice.via_flags;
- break;
- default:
- CERROR("Unknown IO type - %u\n", vio->cui_io_subtype);
- LBUG();
- }
- result = cl_io_loop(env, io);
- if (write_mutex_locked)
- mutex_unlock(&lli->lli_write_mutex);
- else if (args->via_io_subtype == IO_NORMAL && iot == CIT_READ)
- up_read(&lli->lli_trunc_sem);
- } else {
- /* cl_io_rw_init() handled IO */
- result = io->ci_result;
- }
-
- if (io->ci_nob > 0) {
- result = io->ci_nob;
- *ppos = io->u.ci_wr.wr.crw_pos;
- }
- goto out;
-out:
- cl_io_fini(env, io);
- /* If any bit been read/written (result != 0), we just return
- * short read/write instead of restart io. */
- if ((result == 0 || result == -ENODATA) && io->ci_need_restart) {
- CDEBUG(D_VFSTRACE, "Restart %s on %pD from %lld, count:%zd\n",
- iot == CIT_READ ? "read" : "write",
- file, *ppos, count);
- LASSERTF(io->ci_nob == 0, "%zd", io->ci_nob);
- goto restart;
- }
-
- if (iot == CIT_READ) {
- if (result >= 0)
- ll_stats_ops_tally(ll_i2sbi(file_inode(file)),
- LPROC_LL_READ_BYTES, result);
- } else if (iot == CIT_WRITE) {
- if (result >= 0) {
- ll_stats_ops_tally(ll_i2sbi(file_inode(file)),
- LPROC_LL_WRITE_BYTES, result);
- fd->fd_write_failed = false;
- } else if (result != -ERESTARTSYS) {
- fd->fd_write_failed = true;
- }
- }
-
- return result;
-}
-
-static ssize_t ll_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
-{
- struct lu_env *env;
- struct vvp_io_args *args;
- ssize_t result;
- int refcheck;
-
- env = cl_env_get(&refcheck);
- if (IS_ERR(env))
- return PTR_ERR(env);
-
- args = vvp_env_args(env, IO_NORMAL);
- args->u.normal.via_iter = to;
- args->u.normal.via_iocb = iocb;
-
- result = ll_file_io_generic(env, args, iocb->ki_filp, CIT_READ,
- &iocb->ki_pos, iov_iter_count(to));
- cl_env_put(env, &refcheck);
- return result;
-}
-
-/*
- * Write to a file (through the page cache).
- */
-static ssize_t ll_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
-{
- struct lu_env *env;
- struct vvp_io_args *args;
- ssize_t result;
- int refcheck;
-
- env = cl_env_get(&refcheck);
- if (IS_ERR(env))
- return PTR_ERR(env);
-
- args = vvp_env_args(env, IO_NORMAL);
- args->u.normal.via_iter = from;
- args->u.normal.via_iocb = iocb;
-
- result = ll_file_io_generic(env, args, iocb->ki_filp, CIT_WRITE,
- &iocb->ki_pos, iov_iter_count(from));
- cl_env_put(env, &refcheck);
- return result;
-}
-
-/*
- * Send file content (through pagecache) somewhere with helper
- */
-static ssize_t ll_file_splice_read(struct file *in_file, loff_t *ppos,
- struct pipe_inode_info *pipe, size_t count,
- unsigned int flags)
-{
- struct lu_env *env;
- struct vvp_io_args *args;
- ssize_t result;
- int refcheck;
-
- env = cl_env_get(&refcheck);
- if (IS_ERR(env))
- return PTR_ERR(env);
-
- args = vvp_env_args(env, IO_SPLICE);
- args->u.splice.via_pipe = pipe;
- args->u.splice.via_flags = flags;
-
- result = ll_file_io_generic(env, args, in_file, CIT_READ, ppos, count);
- cl_env_put(env, &refcheck);
- return result;
-}
-
-static int ll_lov_recreate(struct inode *inode, struct ost_id *oi, u32 ost_idx)
-{
- struct obd_export *exp = ll_i2dtexp(inode);
- struct obd_trans_info oti = { 0 };
- struct obdo *oa = NULL;
- int lsm_size;
- int rc = 0;
- struct lov_stripe_md *lsm = NULL, *lsm2;
-
- OBDO_ALLOC(oa);
- if (oa == NULL)
- return -ENOMEM;
-
- lsm = ccc_inode_lsm_get(inode);
- if (!lsm_has_objects(lsm)) {
- rc = -ENOENT;
- goto out;
- }
-
- lsm_size = sizeof(*lsm) + (sizeof(struct lov_oinfo) *
- (lsm->lsm_stripe_count));
-
- lsm2 = libcfs_kvzalloc(lsm_size, GFP_NOFS);
- if (lsm2 == NULL) {
- rc = -ENOMEM;
- goto out;
- }
-
- oa->o_oi = *oi;
- oa->o_nlink = ost_idx;
- oa->o_flags |= OBD_FL_RECREATE_OBJS;
- oa->o_valid = OBD_MD_FLID | OBD_MD_FLFLAGS | OBD_MD_FLGROUP;
- obdo_from_inode(oa, inode, OBD_MD_FLTYPE | OBD_MD_FLATIME |
- OBD_MD_FLMTIME | OBD_MD_FLCTIME);
- obdo_set_parent_fid(oa, &ll_i2info(inode)->lli_fid);
- memcpy(lsm2, lsm, lsm_size);
- ll_inode_size_lock(inode);
- rc = obd_create(NULL, exp, oa, &lsm2, &oti);
- ll_inode_size_unlock(inode);
-
- kvfree(lsm2);
- goto out;
-out:
- ccc_inode_lsm_put(inode, lsm);
- OBDO_FREE(oa);
- return rc;
-}
-
-static int ll_lov_recreate_obj(struct inode *inode, unsigned long arg)
-{
- struct ll_recreate_obj ucreat;
- struct ost_id oi;
-
- if (!capable(CFS_CAP_SYS_ADMIN))
- return -EPERM;
-
- if (copy_from_user(&ucreat, (struct ll_recreate_obj *)arg,
- sizeof(ucreat)))
- return -EFAULT;
-
- ostid_set_seq_mdt0(&oi);
- ostid_set_id(&oi, ucreat.lrc_id);
- return ll_lov_recreate(inode, &oi, ucreat.lrc_ost_idx);
-}
-
-static int ll_lov_recreate_fid(struct inode *inode, unsigned long arg)
-{
- struct lu_fid fid;
- struct ost_id oi;
- u32 ost_idx;
-
- if (!capable(CFS_CAP_SYS_ADMIN))
- return -EPERM;
-
- if (copy_from_user(&fid, (struct lu_fid *)arg, sizeof(fid)))
- return -EFAULT;
-
- fid_to_ostid(&fid, &oi);
- ost_idx = (fid_seq(&fid) >> 16) & 0xffff;
- return ll_lov_recreate(inode, &oi, ost_idx);
-}
-
-int ll_lov_setstripe_ea_info(struct inode *inode, struct dentry *dentry,
- int flags, struct lov_user_md *lum, int lum_size)
-{
- struct lov_stripe_md *lsm = NULL;
- struct lookup_intent oit = {.it_op = IT_OPEN, .it_flags = flags};
- int rc = 0;
-
- lsm = ccc_inode_lsm_get(inode);
- if (lsm != NULL) {
- ccc_inode_lsm_put(inode, lsm);
- CDEBUG(D_IOCTL, "stripe already exists for ino %lu\n",
- inode->i_ino);
- rc = -EEXIST;
- goto out;
- }
-
- ll_inode_size_lock(inode);
- rc = ll_intent_file_open(dentry, lum, lum_size, &oit);
- if (rc)
- goto out_unlock;
- rc = oit.d.lustre.it_status;
- if (rc < 0)
- goto out_req_free;
-
- ll_release_openhandle(inode, &oit);
-
-out_unlock:
- ll_inode_size_unlock(inode);
- ll_intent_release(&oit);
- ccc_inode_lsm_put(inode, lsm);
-out:
- return rc;
-out_req_free:
- ptlrpc_req_finished((struct ptlrpc_request *) oit.d.lustre.it_data);
- goto out;
-}
-
-int ll_lov_getstripe_ea_info(struct inode *inode, const char *filename,
- struct lov_mds_md **lmmp, int *lmm_size,
- struct ptlrpc_request **request)
-{
- struct ll_sb_info *sbi = ll_i2sbi(inode);
- struct mdt_body *body;
- struct lov_mds_md *lmm = NULL;
- struct ptlrpc_request *req = NULL;
- struct md_op_data *op_data;
- int rc, lmmsize;
-
- rc = ll_get_default_mdsize(sbi, &lmmsize);
- if (rc)
- return rc;
-
- op_data = ll_prep_md_op_data(NULL, inode, NULL, filename,
- strlen(filename), lmmsize,
- LUSTRE_OPC_ANY, NULL);
- if (IS_ERR(op_data))
- return PTR_ERR(op_data);
-
- op_data->op_valid = OBD_MD_FLEASIZE | OBD_MD_FLDIREA;
- rc = md_getattr_name(sbi->ll_md_exp, op_data, &req);
- ll_finish_md_op_data(op_data);
- if (rc < 0) {
- CDEBUG(D_INFO, "md_getattr_name failed on %s: rc %d\n",
- filename, rc);
- goto out;
- }
-
- body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
- LASSERT(body != NULL); /* checked by mdc_getattr_name */
-
- lmmsize = body->eadatasize;
-
- if (!(body->valid & (OBD_MD_FLEASIZE | OBD_MD_FLDIREA)) ||
- lmmsize == 0) {
- rc = -ENODATA;
- goto out;
- }
-
- lmm = req_capsule_server_sized_get(&req->rq_pill, &RMF_MDT_MD, lmmsize);
- LASSERT(lmm != NULL);
-
- if ((lmm->lmm_magic != cpu_to_le32(LOV_MAGIC_V1)) &&
- (lmm->lmm_magic != cpu_to_le32(LOV_MAGIC_V3))) {
- rc = -EPROTO;
- goto out;
- }
-
- /*
- * This is coming from the MDS, so is probably in
- * little endian. We convert it to host endian before
- * passing it to userspace.
- */
- if (cpu_to_le32(LOV_MAGIC) != LOV_MAGIC) {
- int stripe_count;
-
- stripe_count = le16_to_cpu(lmm->lmm_stripe_count);
- if (le32_to_cpu(lmm->lmm_pattern) & LOV_PATTERN_F_RELEASED)
- stripe_count = 0;
-
- /* if function called for directory - we should
- * avoid swab not existent lsm objects */
- if (lmm->lmm_magic == cpu_to_le32(LOV_MAGIC_V1)) {
- lustre_swab_lov_user_md_v1((struct lov_user_md_v1 *)lmm);
- if (S_ISREG(body->mode))
- lustre_swab_lov_user_md_objects(
- ((struct lov_user_md_v1 *)lmm)->lmm_objects,
- stripe_count);
- } else if (lmm->lmm_magic == cpu_to_le32(LOV_MAGIC_V3)) {
- lustre_swab_lov_user_md_v3((struct lov_user_md_v3 *)lmm);
- if (S_ISREG(body->mode))
- lustre_swab_lov_user_md_objects(
- ((struct lov_user_md_v3 *)lmm)->lmm_objects,
- stripe_count);
- }
- }
-
-out:
- *lmmp = lmm;
- *lmm_size = lmmsize;
- *request = req;
- return rc;
-}
-
-static int ll_lov_setea(struct inode *inode, struct file *file,
- unsigned long arg)
-{
- int flags = MDS_OPEN_HAS_OBJS | FMODE_WRITE;
- struct lov_user_md *lump;
- int lum_size = sizeof(struct lov_user_md) +
- sizeof(struct lov_user_ost_data);
- int rc;
-
- if (!capable(CFS_CAP_SYS_ADMIN))
- return -EPERM;
-
- lump = libcfs_kvzalloc(lum_size, GFP_NOFS);
- if (lump == NULL)
- return -ENOMEM;
-
- if (copy_from_user(lump, (struct lov_user_md *)arg, lum_size)) {
- kvfree(lump);
- return -EFAULT;
- }
-
- rc = ll_lov_setstripe_ea_info(inode, file->f_path.dentry, flags, lump,
- lum_size);
- cl_lov_delay_create_clear(&file->f_flags);
-
- kvfree(lump);
- return rc;
-}
-
-static int ll_lov_setstripe(struct inode *inode, struct file *file,
- unsigned long arg)
-{
- struct lov_user_md_v3 lumv3;
- struct lov_user_md_v1 *lumv1 = (struct lov_user_md_v1 *)&lumv3;
- struct lov_user_md_v1 *lumv1p = (struct lov_user_md_v1 *)arg;
- struct lov_user_md_v3 *lumv3p = (struct lov_user_md_v3 *)arg;
- int lum_size, rc;
- int flags = FMODE_WRITE;
-
- /* first try with v1 which is smaller than v3 */
- lum_size = sizeof(struct lov_user_md_v1);
- if (copy_from_user(lumv1, lumv1p, lum_size))
- return -EFAULT;
-
- if (lumv1->lmm_magic == LOV_USER_MAGIC_V3) {
- lum_size = sizeof(struct lov_user_md_v3);
- if (copy_from_user(&lumv3, lumv3p, lum_size))
- return -EFAULT;
- }
-
- rc = ll_lov_setstripe_ea_info(inode, file->f_path.dentry, flags, lumv1,
- lum_size);
- cl_lov_delay_create_clear(&file->f_flags);
- if (rc == 0) {
- struct lov_stripe_md *lsm;
- __u32 gen;
-
- put_user(0, &lumv1p->lmm_stripe_count);
-
- ll_layout_refresh(inode, &gen);
- lsm = ccc_inode_lsm_get(inode);
- rc = obd_iocontrol(LL_IOC_LOV_GETSTRIPE, ll_i2dtexp(inode),
- 0, lsm, (void *)arg);
- ccc_inode_lsm_put(inode, lsm);
- }
- return rc;
-}
-
-static int ll_lov_getstripe(struct inode *inode, unsigned long arg)
-{
- struct lov_stripe_md *lsm;
- int rc = -ENODATA;
-
- lsm = ccc_inode_lsm_get(inode);
- if (lsm != NULL)
- rc = obd_iocontrol(LL_IOC_LOV_GETSTRIPE, ll_i2dtexp(inode), 0,
- lsm, (void *)arg);
- ccc_inode_lsm_put(inode, lsm);
- return rc;
-}
-
-static int
-ll_get_grouplock(struct inode *inode, struct file *file, unsigned long arg)
-{
- struct ll_inode_info *lli = ll_i2info(inode);
- struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
- struct ccc_grouplock grouplock;
- int rc;
-
- if (arg == 0) {
- CWARN("group id for group lock must not be 0\n");
- return -EINVAL;
- }
-
- if (ll_file_nolock(file))
- return -EOPNOTSUPP;
-
- spin_lock(&lli->lli_lock);
- if (fd->fd_flags & LL_FILE_GROUP_LOCKED) {
- CWARN("group lock already existed with gid %lu\n",
- fd->fd_grouplock.cg_gid);
- spin_unlock(&lli->lli_lock);
- return -EINVAL;
- }
- LASSERT(fd->fd_grouplock.cg_lock == NULL);
- spin_unlock(&lli->lli_lock);
-
- rc = cl_get_grouplock(cl_i2info(inode)->lli_clob,
- arg, (file->f_flags & O_NONBLOCK), &grouplock);
- if (rc)
- return rc;
-
- spin_lock(&lli->lli_lock);
- if (fd->fd_flags & LL_FILE_GROUP_LOCKED) {
- spin_unlock(&lli->lli_lock);
- CERROR("another thread just won the race\n");
- cl_put_grouplock(&grouplock);
- return -EINVAL;
- }
-
- fd->fd_flags |= LL_FILE_GROUP_LOCKED;
- fd->fd_grouplock = grouplock;
- spin_unlock(&lli->lli_lock);
-
- CDEBUG(D_INFO, "group lock %lu obtained\n", arg);
- return 0;
-}
-
-static int ll_put_grouplock(struct inode *inode, struct file *file,
- unsigned long arg)
-{
- struct ll_inode_info *lli = ll_i2info(inode);
- struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
- struct ccc_grouplock grouplock;
-
- spin_lock(&lli->lli_lock);
- if (!(fd->fd_flags & LL_FILE_GROUP_LOCKED)) {
- spin_unlock(&lli->lli_lock);
- CWARN("no group lock held\n");
- return -EINVAL;
- }
- LASSERT(fd->fd_grouplock.cg_lock != NULL);
-
- if (fd->fd_grouplock.cg_gid != arg) {
- CWARN("group lock %lu doesn't match current id %lu\n",
- arg, fd->fd_grouplock.cg_gid);
- spin_unlock(&lli->lli_lock);
- return -EINVAL;
- }
-
- grouplock = fd->fd_grouplock;
- memset(&fd->fd_grouplock, 0, sizeof(fd->fd_grouplock));
- fd->fd_flags &= ~LL_FILE_GROUP_LOCKED;
- spin_unlock(&lli->lli_lock);
-
- cl_put_grouplock(&grouplock);
- CDEBUG(D_INFO, "group lock %lu released\n", arg);
- return 0;
-}
-
-/**
- * Close inode open handle
- *
- * \param inode [in] inode in question
- * \param it [in,out] intent which contains open info and result
- *
- * \retval 0 success
- * \retval <0 failure
- */
-int ll_release_openhandle(struct inode *inode, struct lookup_intent *it)
-{
- struct obd_client_handle *och;
- int rc;
-
- LASSERT(inode);
-
- /* Root ? Do nothing. */
- if (is_root_inode(inode))
- return 0;
-
- /* No open handle to close? Move away */
- if (!it_disposition(it, DISP_OPEN_OPEN))
- return 0;
-
- LASSERT(it_open_error(DISP_OPEN_OPEN, it) == 0);
-
- och = kzalloc(sizeof(*och), GFP_NOFS);
- if (!och) {
- rc = -ENOMEM;
- goto out;
- }
-
- ll_och_fill(ll_i2sbi(inode)->ll_md_exp, it, och);
-
- rc = ll_close_inode_openhandle(ll_i2sbi(inode)->ll_md_exp,
- inode, och, NULL);
-out:
- /* this one is in place of ll_file_open */
- if (it_disposition(it, DISP_ENQ_OPEN_REF)) {
- ptlrpc_req_finished(it->d.lustre.it_data);
- it_clear_disposition(it, DISP_ENQ_OPEN_REF);
- }
- return rc;
-}
-
-/**
- * Get size for inode for which FIEMAP mapping is requested.
- * Make the FIEMAP get_info call and returns the result.
- */
-static int ll_do_fiemap(struct inode *inode, struct ll_user_fiemap *fiemap,
- size_t num_bytes)
-{
- struct obd_export *exp = ll_i2dtexp(inode);
- struct lov_stripe_md *lsm = NULL;
- struct ll_fiemap_info_key fm_key = { .name = KEY_FIEMAP, };
- __u32 vallen = num_bytes;
- int rc;
-
- /* Checks for fiemap flags */
- if (fiemap->fm_flags & ~LUSTRE_FIEMAP_FLAGS_COMPAT) {
- fiemap->fm_flags &= ~LUSTRE_FIEMAP_FLAGS_COMPAT;
- return -EBADR;
- }
-
- /* Check for FIEMAP_FLAG_SYNC */
- if (fiemap->fm_flags & FIEMAP_FLAG_SYNC) {
- rc = filemap_fdatawrite(inode->i_mapping);
- if (rc)
- return rc;
- }
-
- lsm = ccc_inode_lsm_get(inode);
- if (lsm == NULL)
- return -ENOENT;
-
- /* If the stripe_count > 1 and the application does not understand
- * DEVICE_ORDER flag, then it cannot interpret the extents correctly.
- */
- if (lsm->lsm_stripe_count > 1 &&
- !(fiemap->fm_flags & FIEMAP_FLAG_DEVICE_ORDER)) {
- rc = -EOPNOTSUPP;
- goto out;
- }
-
- fm_key.oa.o_oi = lsm->lsm_oi;
- fm_key.oa.o_valid = OBD_MD_FLID | OBD_MD_FLGROUP;
-
- if (i_size_read(inode) == 0) {
- rc = ll_glimpse_size(inode);
- if (rc)
- goto out;
- }
-
- obdo_from_inode(&fm_key.oa, inode, OBD_MD_FLSIZE);
- obdo_set_parent_fid(&fm_key.oa, &ll_i2info(inode)->lli_fid);
- /* If filesize is 0, then there would be no objects for mapping */
- if (fm_key.oa.o_size == 0) {
- fiemap->fm_mapped_extents = 0;
- rc = 0;
- goto out;
- }
-
- memcpy(&fm_key.fiemap, fiemap, sizeof(*fiemap));
-
- rc = obd_get_info(NULL, exp, sizeof(fm_key), &fm_key, &vallen,
- fiemap, lsm);
- if (rc)
- CERROR("obd_get_info failed: rc = %d\n", rc);
-
-out:
- ccc_inode_lsm_put(inode, lsm);
- return rc;
-}
-
-int ll_fid2path(struct inode *inode, void __user *arg)
-{
- struct obd_export *exp = ll_i2mdexp(inode);
- const struct getinfo_fid2path __user *gfin = arg;
- struct getinfo_fid2path *gfout;
- u32 pathlen;
- size_t outsize;
- int rc;
-
- if (!capable(CFS_CAP_DAC_READ_SEARCH) &&
- !(ll_i2sbi(inode)->ll_flags & LL_SBI_USER_FID2PATH))
- return -EPERM;
-
- /* Only need to get the buflen */
- if (get_user(pathlen, &gfin->gf_pathlen))
- return -EFAULT;
-
- if (pathlen > PATH_MAX)
- return -EINVAL;
-
- outsize = sizeof(*gfout) + pathlen;
-
- gfout = kzalloc(outsize, GFP_NOFS);
- if (!gfout)
- return -ENOMEM;
-
- if (copy_from_user(gfout, arg, sizeof(*gfout))) {
- rc = -EFAULT;
- goto gf_free;
- }
-
- /* Call mdc_iocontrol */
- rc = obd_iocontrol(OBD_IOC_FID2PATH, exp, outsize, gfout, NULL);
- if (rc != 0)
- goto gf_free;
-
- if (copy_to_user(arg, gfout, outsize))
- rc = -EFAULT;
-
-gf_free:
- kfree(gfout);
- return rc;
-}
-
-static int ll_ioctl_fiemap(struct inode *inode, unsigned long arg)
-{
- struct ll_user_fiemap *fiemap_s;
- size_t num_bytes, ret_bytes;
- unsigned int extent_count;
- int rc = 0;
-
- /* Get the extent count so we can calculate the size of
- * required fiemap buffer */
- if (get_user(extent_count,
- &((struct ll_user_fiemap __user *)arg)->fm_extent_count))
- return -EFAULT;
-
- if (extent_count >=
- (SIZE_MAX - sizeof(*fiemap_s)) / sizeof(struct ll_fiemap_extent))
- return -EINVAL;
- num_bytes = sizeof(*fiemap_s) + (extent_count *
- sizeof(struct ll_fiemap_extent));
-
- fiemap_s = libcfs_kvzalloc(num_bytes, GFP_NOFS);
- if (fiemap_s == NULL)
- return -ENOMEM;
-
- /* get the fiemap value */
- if (copy_from_user(fiemap_s, (struct ll_user_fiemap __user *)arg,
- sizeof(*fiemap_s))) {
- rc = -EFAULT;
- goto error;
- }
-
- /* If fm_extent_count is non-zero, read the first extent since
- * it is used to calculate end_offset and device from previous
- * fiemap call. */
- if (extent_count) {
- if (copy_from_user(&fiemap_s->fm_extents[0],
- (char __user *)arg + sizeof(*fiemap_s),
- sizeof(struct ll_fiemap_extent))) {
- rc = -EFAULT;
- goto error;
- }
- }
-
- rc = ll_do_fiemap(inode, fiemap_s, num_bytes);
- if (rc)
- goto error;
-
- ret_bytes = sizeof(struct ll_user_fiemap);
-
- if (extent_count != 0)
- ret_bytes += (fiemap_s->fm_mapped_extents *
- sizeof(struct ll_fiemap_extent));
-
- if (copy_to_user((void *)arg, fiemap_s, ret_bytes))
- rc = -EFAULT;
-
-error:
- kvfree(fiemap_s);
- return rc;
-}
-
-/*
- * Read the data_version for inode.
- *
- * This value is computed using stripe object version on OST.
- * Version is computed using server side locking.
- *
- * @param extent_lock Take extent lock. Not needed if a process is already
- * holding the OST object group locks.
- */
-int ll_data_version(struct inode *inode, __u64 *data_version,
- int extent_lock)
-{
- struct lov_stripe_md *lsm = NULL;
- struct ll_sb_info *sbi = ll_i2sbi(inode);
- struct obdo *obdo = NULL;
- int rc;
-
- /* If no stripe, we consider version is 0. */
- lsm = ccc_inode_lsm_get(inode);
- if (!lsm_has_objects(lsm)) {
- *data_version = 0;
- CDEBUG(D_INODE, "No object for inode\n");
- rc = 0;
- goto out;
- }
-
- obdo = kzalloc(sizeof(*obdo), GFP_NOFS);
- if (!obdo) {
- rc = -ENOMEM;
- goto out;
- }
-
- rc = ll_lsm_getattr(lsm, sbi->ll_dt_exp, obdo, 0, extent_lock);
- if (rc == 0) {
- if (!(obdo->o_valid & OBD_MD_FLDATAVERSION))
- rc = -EOPNOTSUPP;
- else
- *data_version = obdo->o_data_version;
- }
-
- kfree(obdo);
-out:
- ccc_inode_lsm_put(inode, lsm);
- return rc;
-}
-
-/*
- * Trigger a HSM release request for the provided inode.
- */
-int ll_hsm_release(struct inode *inode)
-{
- struct cl_env_nest nest;
- struct lu_env *env;
- struct obd_client_handle *och = NULL;
- __u64 data_version = 0;
- int rc;
-
-
- CDEBUG(D_INODE, "%s: Releasing file "DFID".\n",
- ll_get_fsname(inode->i_sb, NULL, 0),
- PFID(&ll_i2info(inode)->lli_fid));
-
- och = ll_lease_open(inode, NULL, FMODE_WRITE, MDS_OPEN_RELEASE);
- if (IS_ERR(och)) {
- rc = PTR_ERR(och);
- goto out;
- }
-
- /* Grab latest data_version and [am]time values */
- rc = ll_data_version(inode, &data_version, 1);
- if (rc != 0)
- goto out;
-
- env = cl_env_nested_get(&nest);
- if (IS_ERR(env)) {
- rc = PTR_ERR(env);
- goto out;
- }
-
- ll_merge_lvb(env, inode);
- cl_env_nested_put(&nest, env);
-
- /* Release the file.
- * NB: lease lock handle is released in mdc_hsm_release_pack() because
- * we still need it to pack l_remote_handle to MDT. */
- rc = ll_close_inode_openhandle(ll_i2sbi(inode)->ll_md_exp, inode, och,
- &data_version);
- och = NULL;
-
-
-out:
- if (och != NULL && !IS_ERR(och)) /* close the file */
- ll_lease_close(och, inode, NULL);
-
- return rc;
-}
-
-struct ll_swap_stack {
- struct iattr ia1, ia2;
- __u64 dv1, dv2;
- struct inode *inode1, *inode2;
- bool check_dv1, check_dv2;
-};
-
-static int ll_swap_layouts(struct file *file1, struct file *file2,
- struct lustre_swap_layouts *lsl)
-{
- struct mdc_swap_layouts msl;
- struct md_op_data *op_data;
- __u32 gid;
- __u64 dv;
- struct ll_swap_stack *llss = NULL;
- int rc;
-
- llss = kzalloc(sizeof(*llss), GFP_NOFS);
- if (!llss)
- return -ENOMEM;
-
- llss->inode1 = file_inode(file1);
- llss->inode2 = file_inode(file2);
-
- if (!S_ISREG(llss->inode2->i_mode)) {
- rc = -EINVAL;
- goto free;
- }
-
- if (inode_permission(llss->inode1, MAY_WRITE) ||
- inode_permission(llss->inode2, MAY_WRITE)) {
- rc = -EPERM;
- goto free;
- }
-
- if (llss->inode2->i_sb != llss->inode1->i_sb) {
- rc = -EXDEV;
- goto free;
- }
-
- /* we use 2 bool because it is easier to swap than 2 bits */
- if (lsl->sl_flags & SWAP_LAYOUTS_CHECK_DV1)
- llss->check_dv1 = true;
-
- if (lsl->sl_flags & SWAP_LAYOUTS_CHECK_DV2)
- llss->check_dv2 = true;
-
- /* we cannot use lsl->sl_dvX directly because we may swap them */
- llss->dv1 = lsl->sl_dv1;
- llss->dv2 = lsl->sl_dv2;
-
- rc = lu_fid_cmp(ll_inode2fid(llss->inode1), ll_inode2fid(llss->inode2));
- if (rc == 0) /* same file, done! */ {
- rc = 0;
- goto free;
- }
-
- if (rc < 0) { /* sequentialize it */
- swap(llss->inode1, llss->inode2);
- swap(file1, file2);
- swap(llss->dv1, llss->dv2);
- swap(llss->check_dv1, llss->check_dv2);
- }
-
- gid = lsl->sl_gid;
- if (gid != 0) { /* application asks to flush dirty cache */
- rc = ll_get_grouplock(llss->inode1, file1, gid);
- if (rc < 0)
- goto free;
-
- rc = ll_get_grouplock(llss->inode2, file2, gid);
- if (rc < 0) {
- ll_put_grouplock(llss->inode1, file1, gid);
- goto free;
- }
- }
-
- /* to be able to restore mtime and atime after swap
- * we need to first save them */
- if (lsl->sl_flags &
- (SWAP_LAYOUTS_KEEP_MTIME | SWAP_LAYOUTS_KEEP_ATIME)) {
- llss->ia1.ia_mtime = llss->inode1->i_mtime;
- llss->ia1.ia_atime = llss->inode1->i_atime;
- llss->ia1.ia_valid = ATTR_MTIME | ATTR_ATIME;
- llss->ia2.ia_mtime = llss->inode2->i_mtime;
- llss->ia2.ia_atime = llss->inode2->i_atime;
- llss->ia2.ia_valid = ATTR_MTIME | ATTR_ATIME;
- }
-
- /* ultimate check, before swapping the layouts we check if
- * dataversion has changed (if requested) */
- if (llss->check_dv1) {
- rc = ll_data_version(llss->inode1, &dv, 0);
- if (rc)
- goto putgl;
- if (dv != llss->dv1) {
- rc = -EAGAIN;
- goto putgl;
- }
- }
-
- if (llss->check_dv2) {
- rc = ll_data_version(llss->inode2, &dv, 0);
- if (rc)
- goto putgl;
- if (dv != llss->dv2) {
- rc = -EAGAIN;
- goto putgl;
- }
- }
-
- /* struct md_op_data is used to send the swap args to the mdt
- * only flags is missing, so we use struct mdc_swap_layouts
- * through the md_op_data->op_data */
- /* flags from user space have to be converted before they are send to
- * server, no flag is sent today, they are only used on the client */
- msl.msl_flags = 0;
- rc = -ENOMEM;
- op_data = ll_prep_md_op_data(NULL, llss->inode1, llss->inode2, NULL, 0,
- 0, LUSTRE_OPC_ANY, &msl);
- if (IS_ERR(op_data)) {
- rc = PTR_ERR(op_data);
- goto free;
- }
-
- rc = obd_iocontrol(LL_IOC_LOV_SWAP_LAYOUTS, ll_i2mdexp(llss->inode1),
- sizeof(*op_data), op_data, NULL);
- ll_finish_md_op_data(op_data);
-
-putgl:
- if (gid != 0) {
- ll_put_grouplock(llss->inode2, file2, gid);
- ll_put_grouplock(llss->inode1, file1, gid);
- }
-
- /* rc can be set from obd_iocontrol() or from a GOTO(putgl, ...) */
- if (rc != 0)
- goto free;
-
- /* clear useless flags */
- if (!(lsl->sl_flags & SWAP_LAYOUTS_KEEP_MTIME)) {
- llss->ia1.ia_valid &= ~ATTR_MTIME;
- llss->ia2.ia_valid &= ~ATTR_MTIME;
- }
-
- if (!(lsl->sl_flags & SWAP_LAYOUTS_KEEP_ATIME)) {
- llss->ia1.ia_valid &= ~ATTR_ATIME;
- llss->ia2.ia_valid &= ~ATTR_ATIME;
- }
-
- /* update time if requested */
- rc = 0;
- if (llss->ia2.ia_valid != 0) {
- mutex_lock(&llss->inode1->i_mutex);
- rc = ll_setattr(file1->f_path.dentry, &llss->ia2);
- mutex_unlock(&llss->inode1->i_mutex);
- }
-
- if (llss->ia1.ia_valid != 0) {
- int rc1;
-
- mutex_lock(&llss->inode2->i_mutex);
- rc1 = ll_setattr(file2->f_path.dentry, &llss->ia1);
- mutex_unlock(&llss->inode2->i_mutex);
- if (rc == 0)
- rc = rc1;
- }
-
-free:
- kfree(llss);
-
- return rc;
-}
-
-static int ll_hsm_state_set(struct inode *inode, struct hsm_state_set *hss)
-{
- struct md_op_data *op_data;
- int rc;
-
- /* Detect out-of range masks */
- if ((hss->hss_setmask | hss->hss_clearmask) & ~HSM_FLAGS_MASK)
- return -EINVAL;
-
- /* Non-root users are forbidden to set or clear flags which are
- * NOT defined in HSM_USER_MASK. */
- if (((hss->hss_setmask | hss->hss_clearmask) & ~HSM_USER_MASK) &&
- !capable(CFS_CAP_SYS_ADMIN))
- return -EPERM;
-
- /* Detect out-of range archive id */
- if ((hss->hss_valid & HSS_ARCHIVE_ID) &&
- (hss->hss_archive_id > LL_HSM_MAX_ARCHIVE))
- return -EINVAL;
-
- op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL, 0, 0,
- LUSTRE_OPC_ANY, hss);
- if (IS_ERR(op_data))
- return PTR_ERR(op_data);
-
- rc = obd_iocontrol(LL_IOC_HSM_STATE_SET, ll_i2mdexp(inode),
- sizeof(*op_data), op_data, NULL);
-
- ll_finish_md_op_data(op_data);
-
- return rc;
-}
-
-static int ll_hsm_import(struct inode *inode, struct file *file,
- struct hsm_user_import *hui)
-{
- struct hsm_state_set *hss = NULL;
- struct iattr *attr = NULL;
- int rc;
-
-
- if (!S_ISREG(inode->i_mode))
- return -EINVAL;
-
- /* set HSM flags */
- hss = kzalloc(sizeof(*hss), GFP_NOFS);
- if (!hss)
- return -ENOMEM;
-
- hss->hss_valid = HSS_SETMASK | HSS_ARCHIVE_ID;
- hss->hss_archive_id = hui->hui_archive_id;
- hss->hss_setmask = HS_ARCHIVED | HS_EXISTS | HS_RELEASED;
- rc = ll_hsm_state_set(inode, hss);
- if (rc != 0)
- goto free_hss;
-
- attr = kzalloc(sizeof(*attr), GFP_NOFS);
- if (!attr) {
- rc = -ENOMEM;
- goto free_hss;
- }
-
- attr->ia_mode = hui->hui_mode & (S_IRWXU | S_IRWXG | S_IRWXO);
- attr->ia_mode |= S_IFREG;
- attr->ia_uid = make_kuid(&init_user_ns, hui->hui_uid);
- attr->ia_gid = make_kgid(&init_user_ns, hui->hui_gid);
- attr->ia_size = hui->hui_size;
- attr->ia_mtime.tv_sec = hui->hui_mtime;
- attr->ia_mtime.tv_nsec = hui->hui_mtime_ns;
- attr->ia_atime.tv_sec = hui->hui_atime;
- attr->ia_atime.tv_nsec = hui->hui_atime_ns;
-
- attr->ia_valid = ATTR_SIZE | ATTR_MODE | ATTR_FORCE |
- ATTR_UID | ATTR_GID |
- ATTR_MTIME | ATTR_MTIME_SET |
- ATTR_ATIME | ATTR_ATIME_SET;
-
- mutex_lock(&inode->i_mutex);
-
- rc = ll_setattr_raw(file->f_path.dentry, attr, true);
- if (rc == -ENODATA)
- rc = 0;
-
- mutex_unlock(&inode->i_mutex);
-
- kfree(attr);
-free_hss:
- kfree(hss);
- return rc;
-}
-
-static long
-ll_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
-{
- struct inode *inode = file_inode(file);
- struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
- int flags, rc;
-
- CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p),cmd=%x\n", inode->i_ino,
- inode->i_generation, inode, cmd);
- ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_IOCTL, 1);
-
- /* asm-ppc{,64} declares TCGETS, et. al. as type 't' not 'T' */
- if (_IOC_TYPE(cmd) == 'T' || _IOC_TYPE(cmd) == 't') /* tty ioctls */
- return -ENOTTY;
-
- switch (cmd) {
- case LL_IOC_GETFLAGS:
- /* Get the current value of the file flags */
- return put_user(fd->fd_flags, (int *)arg);
- case LL_IOC_SETFLAGS:
- case LL_IOC_CLRFLAGS:
- /* Set or clear specific file flags */
- /* XXX This probably needs checks to ensure the flags are
- * not abused, and to handle any flag side effects.
- */
- if (get_user(flags, (int *) arg))
- return -EFAULT;
-
- if (cmd == LL_IOC_SETFLAGS) {
- if ((flags & LL_FILE_IGNORE_LOCK) &&
- !(file->f_flags & O_DIRECT)) {
- CERROR("%s: unable to disable locking on non-O_DIRECT file\n",
- current->comm);
- return -EINVAL;
- }
-
- fd->fd_flags |= flags;
- } else {
- fd->fd_flags &= ~flags;
- }
- return 0;
- case LL_IOC_LOV_SETSTRIPE:
- return ll_lov_setstripe(inode, file, arg);
- case LL_IOC_LOV_SETEA:
- return ll_lov_setea(inode, file, arg);
- case LL_IOC_LOV_SWAP_LAYOUTS: {
- struct file *file2;
- struct lustre_swap_layouts lsl;
-
- if (copy_from_user(&lsl, (char *)arg,
- sizeof(struct lustre_swap_layouts)))
- return -EFAULT;
-
- if ((file->f_flags & O_ACCMODE) == 0) /* O_RDONLY */
- return -EPERM;
-
- file2 = fget(lsl.sl_fd);
- if (file2 == NULL)
- return -EBADF;
-
- rc = -EPERM;
- if ((file2->f_flags & O_ACCMODE) != 0) /* O_WRONLY or O_RDWR */
- rc = ll_swap_layouts(file, file2, &lsl);
- fput(file2);
- return rc;
- }
- case LL_IOC_LOV_GETSTRIPE:
- return ll_lov_getstripe(inode, arg);
- case LL_IOC_RECREATE_OBJ:
- return ll_lov_recreate_obj(inode, arg);
- case LL_IOC_RECREATE_FID:
- return ll_lov_recreate_fid(inode, arg);
- case FSFILT_IOC_FIEMAP:
- return ll_ioctl_fiemap(inode, arg);
- case FSFILT_IOC_GETFLAGS:
- case FSFILT_IOC_SETFLAGS:
- return ll_iocontrol(inode, file, cmd, arg);
- case FSFILT_IOC_GETVERSION_OLD:
- case FSFILT_IOC_GETVERSION:
- return put_user(inode->i_generation, (int *)arg);
- case LL_IOC_GROUP_LOCK:
- return ll_get_grouplock(inode, file, arg);
- case LL_IOC_GROUP_UNLOCK:
- return ll_put_grouplock(inode, file, arg);
- case IOC_OBD_STATFS:
- return ll_obd_statfs(inode, (void *)arg);
-
- /* We need to special case any other ioctls we want to handle,
- * to send them to the MDS/OST as appropriate and to properly
- * network encode the arg field.
- case FSFILT_IOC_SETVERSION_OLD:
- case FSFILT_IOC_SETVERSION:
- */
- case LL_IOC_FLUSHCTX:
- return ll_flush_ctx(inode);
- case LL_IOC_PATH2FID: {
- if (copy_to_user((void *)arg, ll_inode2fid(inode),
- sizeof(struct lu_fid)))
- return -EFAULT;
-
- return 0;
- }
- case OBD_IOC_FID2PATH:
- return ll_fid2path(inode, (void *)arg);
- case LL_IOC_DATA_VERSION: {
- struct ioc_data_version idv;
- int rc;
-
- if (copy_from_user(&idv, (char *)arg, sizeof(idv)))
- return -EFAULT;
-
- rc = ll_data_version(inode, &idv.idv_version,
- !(idv.idv_flags & LL_DV_NOFLUSH));
-
- if (rc == 0 && copy_to_user((char *) arg, &idv, sizeof(idv)))
- return -EFAULT;
-
- return rc;
- }
-
- case LL_IOC_GET_MDTIDX: {
- int mdtidx;
-
- mdtidx = ll_get_mdt_idx(inode);
- if (mdtidx < 0)
- return mdtidx;
-
- if (put_user((int)mdtidx, (int *)arg))
- return -EFAULT;
-
- return 0;
- }
- case OBD_IOC_GETDTNAME:
- case OBD_IOC_GETMDNAME:
- return ll_get_obd_name(inode, cmd, arg);
- case LL_IOC_HSM_STATE_GET: {
- struct md_op_data *op_data;
- struct hsm_user_state *hus;
- int rc;
-
- hus = kzalloc(sizeof(*hus), GFP_NOFS);
- if (!hus)
- return -ENOMEM;
-
- op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL, 0, 0,
- LUSTRE_OPC_ANY, hus);
- if (IS_ERR(op_data)) {
- kfree(hus);
- return PTR_ERR(op_data);
- }
-
- rc = obd_iocontrol(cmd, ll_i2mdexp(inode), sizeof(*op_data),
- op_data, NULL);
-
- if (copy_to_user((void *)arg, hus, sizeof(*hus)))
- rc = -EFAULT;
-
- ll_finish_md_op_data(op_data);
- kfree(hus);
- return rc;
- }
- case LL_IOC_HSM_STATE_SET: {
- struct hsm_state_set *hss;
- int rc;
-
- hss = memdup_user((char *)arg, sizeof(*hss));
- if (IS_ERR(hss))
- return PTR_ERR(hss);
-
- rc = ll_hsm_state_set(inode, hss);
-
- kfree(hss);
- return rc;
- }
- case LL_IOC_HSM_ACTION: {
- struct md_op_data *op_data;
- struct hsm_current_action *hca;
- int rc;
-
- hca = kzalloc(sizeof(*hca), GFP_NOFS);
- if (!hca)
- return -ENOMEM;
-
- op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL, 0, 0,
- LUSTRE_OPC_ANY, hca);
- if (IS_ERR(op_data)) {
- kfree(hca);
- return PTR_ERR(op_data);
- }
-
- rc = obd_iocontrol(cmd, ll_i2mdexp(inode), sizeof(*op_data),
- op_data, NULL);
-
- if (copy_to_user((char *)arg, hca, sizeof(*hca)))
- rc = -EFAULT;
-
- ll_finish_md_op_data(op_data);
- kfree(hca);
- return rc;
- }
- case LL_IOC_SET_LEASE: {
- struct ll_inode_info *lli = ll_i2info(inode);
- struct obd_client_handle *och = NULL;
- bool lease_broken;
- fmode_t mode = 0;
-
- switch (arg) {
- case F_WRLCK:
- if (!(file->f_mode & FMODE_WRITE))
- return -EPERM;
- mode = FMODE_WRITE;
- break;
- case F_RDLCK:
- if (!(file->f_mode & FMODE_READ))
- return -EPERM;
- mode = FMODE_READ;
- break;
- case F_UNLCK:
- mutex_lock(&lli->lli_och_mutex);
- if (fd->fd_lease_och != NULL) {
- och = fd->fd_lease_och;
- fd->fd_lease_och = NULL;
- }
- mutex_unlock(&lli->lli_och_mutex);
-
- if (och != NULL) {
- mode = och->och_flags &
- (FMODE_READ|FMODE_WRITE);
- rc = ll_lease_close(och, inode, &lease_broken);
- if (rc == 0 && lease_broken)
- mode = 0;
- } else {
- rc = -ENOLCK;
- }
-
- /* return the type of lease or error */
- return rc < 0 ? rc : (int)mode;
- default:
- return -EINVAL;
- }
-
- CDEBUG(D_INODE, "Set lease with mode %d\n", mode);
-
- /* apply for lease */
- och = ll_lease_open(inode, file, mode, 0);
- if (IS_ERR(och))
- return PTR_ERR(och);
-
- rc = 0;
- mutex_lock(&lli->lli_och_mutex);
- if (fd->fd_lease_och == NULL) {
- fd->fd_lease_och = och;
- och = NULL;
- }
- mutex_unlock(&lli->lli_och_mutex);
- if (och != NULL) {
- /* impossible now that only excl is supported for now */
- ll_lease_close(och, inode, &lease_broken);
- rc = -EBUSY;
- }
- return rc;
- }
- case LL_IOC_GET_LEASE: {
- struct ll_inode_info *lli = ll_i2info(inode);
- struct ldlm_lock *lock = NULL;
-
- rc = 0;
- mutex_lock(&lli->lli_och_mutex);
- if (fd->fd_lease_och != NULL) {
- struct obd_client_handle *och = fd->fd_lease_och;
-
- lock = ldlm_handle2lock(&och->och_lease_handle);
- if (lock != NULL) {
- lock_res_and_lock(lock);
- if (!ldlm_is_cancel(lock))
- rc = och->och_flags &
- (FMODE_READ | FMODE_WRITE);
- unlock_res_and_lock(lock);
- ldlm_lock_put(lock);
- }
- }
- mutex_unlock(&lli->lli_och_mutex);
- return rc;
- }
- case LL_IOC_HSM_IMPORT: {
- struct hsm_user_import *hui;
-
- hui = memdup_user((void *)arg, sizeof(*hui));
- if (IS_ERR(hui))
- return PTR_ERR(hui);
-
- rc = ll_hsm_import(inode, file, hui);
-
- kfree(hui);
- return rc;
- }
- default: {
- int err;
-
- if (ll_iocontrol_call(inode, file, cmd, arg, &err) ==
- LLIOC_STOP)
- return err;
-
- return obd_iocontrol(cmd, ll_i2dtexp(inode), 0, NULL,
- (void *)arg);
- }
- }
-}
-
-
-static loff_t ll_file_seek(struct file *file, loff_t offset, int origin)
-{
- struct inode *inode = file_inode(file);
- loff_t retval, eof = 0;
-
- retval = offset + ((origin == SEEK_END) ? i_size_read(inode) :
- (origin == SEEK_CUR) ? file->f_pos : 0);
- CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p), to=%llu=%#llx(%d)\n",
- inode->i_ino, inode->i_generation, inode, retval, retval,
- origin);
- ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_LLSEEK, 1);
-
- if (origin == SEEK_END || origin == SEEK_HOLE || origin == SEEK_DATA) {
- retval = ll_glimpse_size(inode);
- if (retval != 0)
- return retval;
- eof = i_size_read(inode);
- }
-
- retval = generic_file_llseek_size(file, offset, origin,
- ll_file_maxbytes(inode), eof);
- return retval;
-}
-
-static int ll_flush(struct file *file, fl_owner_t id)
-{
- struct inode *inode = file_inode(file);
- struct ll_inode_info *lli = ll_i2info(inode);
- struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
- int rc, err;
-
- LASSERT(!S_ISDIR(inode->i_mode));
-
- /* catch async errors that were recorded back when async writeback
- * failed for pages in this mapping. */
- rc = lli->lli_async_rc;
- lli->lli_async_rc = 0;
- err = lov_read_and_clear_async_rc(lli->lli_clob);
- if (rc == 0)
- rc = err;
-
- /* The application has been told write failure already.
- * Do not report failure again. */
- if (fd->fd_write_failed)
- return 0;
- return rc ? -EIO : 0;
-}
-
-/**
- * Called to make sure a portion of file has been written out.
- * if @mode is not CL_FSYNC_LOCAL, it will send OST_SYNC RPCs to OST.
- *
- * Return how many pages have been written.
- */
-int cl_sync_file_range(struct inode *inode, loff_t start, loff_t end,
- enum cl_fsync_mode mode, int ignore_layout)
-{
- struct cl_env_nest nest;
- struct lu_env *env;
- struct cl_io *io;
- struct cl_fsync_io *fio;
- int result;
-
- if (mode != CL_FSYNC_NONE && mode != CL_FSYNC_LOCAL &&
- mode != CL_FSYNC_DISCARD && mode != CL_FSYNC_ALL)
- return -EINVAL;
-
- env = cl_env_nested_get(&nest);
- if (IS_ERR(env))
- return PTR_ERR(env);
-
- io = ccc_env_thread_io(env);
- io->ci_obj = cl_i2info(inode)->lli_clob;
- io->ci_ignore_layout = ignore_layout;
-
- /* initialize parameters for sync */
- fio = &io->u.ci_fsync;
- fio->fi_start = start;
- fio->fi_end = end;
- fio->fi_fid = ll_inode2fid(inode);
- fio->fi_mode = mode;
- fio->fi_nr_written = 0;
-
- if (cl_io_init(env, io, CIT_FSYNC, io->ci_obj) == 0)
- result = cl_io_loop(env, io);
- else
- result = io->ci_result;
- if (result == 0)
- result = fio->fi_nr_written;
- cl_io_fini(env, io);
- cl_env_nested_put(&nest, env);
-
- return result;
-}
-
-int ll_fsync(struct file *file, loff_t start, loff_t end, int datasync)
-{
- struct inode *inode = file_inode(file);
- struct ll_inode_info *lli = ll_i2info(inode);
- struct ptlrpc_request *req;
- int rc, err;
-
- CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p)\n", inode->i_ino,
- inode->i_generation, inode);
- ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_FSYNC, 1);
-
- rc = filemap_write_and_wait_range(inode->i_mapping, start, end);
- mutex_lock(&inode->i_mutex);
-
- /* catch async errors that were recorded back when async writeback
- * failed for pages in this mapping. */
- if (!S_ISDIR(inode->i_mode)) {
- err = lli->lli_async_rc;
- lli->lli_async_rc = 0;
- if (rc == 0)
- rc = err;
- err = lov_read_and_clear_async_rc(lli->lli_clob);
- if (rc == 0)
- rc = err;
- }
-
- err = md_sync(ll_i2sbi(inode)->ll_md_exp, ll_inode2fid(inode), &req);
- if (!rc)
- rc = err;
- if (!err)
- ptlrpc_req_finished(req);
-
- if (S_ISREG(inode->i_mode)) {
- struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
-
- err = cl_sync_file_range(inode, start, end, CL_FSYNC_ALL, 0);
- if (rc == 0 && err < 0)
- rc = err;
- if (rc < 0)
- fd->fd_write_failed = true;
- else
- fd->fd_write_failed = false;
- }
-
- mutex_unlock(&inode->i_mutex);
- return rc;
-}
-
-static int
-ll_file_flock(struct file *file, int cmd, struct file_lock *file_lock)
-{
- struct inode *inode = file_inode(file);
- struct ll_sb_info *sbi = ll_i2sbi(inode);
- struct ldlm_enqueue_info einfo = {
- .ei_type = LDLM_FLOCK,
- .ei_cb_cp = ldlm_flock_completion_ast,
- .ei_cbdata = file_lock,
- };
- struct md_op_data *op_data;
- struct lustre_handle lockh = {0};
- ldlm_policy_data_t flock = { {0} };
- __u64 flags = 0;
- int rc;
- int rc2 = 0;
-
- CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu file_lock=%p\n",
- inode->i_ino, file_lock);
-
- ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_FLOCK, 1);
-
- if (file_lock->fl_flags & FL_FLOCK)
- LASSERT((cmd == F_SETLKW) || (cmd == F_SETLK));
- else if (!(file_lock->fl_flags & FL_POSIX))
- return -EINVAL;
-
- flock.l_flock.owner = (unsigned long)file_lock->fl_owner;
- flock.l_flock.pid = file_lock->fl_pid;
- flock.l_flock.start = file_lock->fl_start;
- flock.l_flock.end = file_lock->fl_end;
-
- /* Somewhat ugly workaround for svc lockd.
- * lockd installs custom fl_lmops->lm_compare_owner that checks
- * for the fl_owner to be the same (which it always is on local node
- * I guess between lockd processes) and then compares pid.
- * As such we assign pid to the owner field to make it all work,
- * conflict with normal locks is unlikely since pid space and
- * pointer space for current->files are not intersecting */
- if (file_lock->fl_lmops && file_lock->fl_lmops->lm_compare_owner)
- flock.l_flock.owner = (unsigned long)file_lock->fl_pid;
-
- switch (file_lock->fl_type) {
- case F_RDLCK:
- einfo.ei_mode = LCK_PR;
- break;
- case F_UNLCK:
- /* An unlock request may or may not have any relation to
- * existing locks so we may not be able to pass a lock handle
- * via a normal ldlm_lock_cancel() request. The request may even
- * unlock a byte range in the middle of an existing lock. In
- * order to process an unlock request we need all of the same
- * information that is given with a normal read or write record
- * lock request. To avoid creating another ldlm unlock (cancel)
- * message we'll treat a LCK_NL flock request as an unlock. */
- einfo.ei_mode = LCK_NL;
- break;
- case F_WRLCK:
- einfo.ei_mode = LCK_PW;
- break;
- default:
- CDEBUG(D_INFO, "Unknown fcntl lock type: %d\n",
- file_lock->fl_type);
- return -ENOTSUPP;
- }
-
- switch (cmd) {
- case F_SETLKW:
-#ifdef F_SETLKW64
- case F_SETLKW64:
-#endif
- flags = 0;
- break;
- case F_SETLK:
-#ifdef F_SETLK64
- case F_SETLK64:
-#endif
- flags = LDLM_FL_BLOCK_NOWAIT;
- break;
- case F_GETLK:
-#ifdef F_GETLK64
- case F_GETLK64:
-#endif
- flags = LDLM_FL_TEST_LOCK;
- /* Save the old mode so that if the mode in the lock changes we
- * can decrement the appropriate reader or writer refcount. */
- file_lock->fl_type = einfo.ei_mode;
- break;
- default:
- CERROR("unknown fcntl lock command: %d\n", cmd);
- return -EINVAL;
- }
-
- op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL, 0, 0,
- LUSTRE_OPC_ANY, NULL);
- if (IS_ERR(op_data))
- return PTR_ERR(op_data);
-
- CDEBUG(D_DLMTRACE, "inode=%lu, pid=%u, flags=%#llx, mode=%u, start=%llu, end=%llu\n",
- inode->i_ino, flock.l_flock.pid, flags, einfo.ei_mode,
- flock.l_flock.start, flock.l_flock.end);
-
- rc = md_enqueue(sbi->ll_md_exp, &einfo, NULL,
- op_data, &lockh, &flock, 0, NULL /* req */, flags);
-
- if ((file_lock->fl_flags & FL_FLOCK) &&
- (rc == 0 || file_lock->fl_type == F_UNLCK))
- rc2 = flock_lock_file_wait(file, file_lock);
- if ((file_lock->fl_flags & FL_POSIX) &&
- (rc == 0 || file_lock->fl_type == F_UNLCK) &&
- !(flags & LDLM_FL_TEST_LOCK))
- rc2 = posix_lock_file_wait(file, file_lock);
-
- if (rc2 && file_lock->fl_type != F_UNLCK) {
- einfo.ei_mode = LCK_NL;
- md_enqueue(sbi->ll_md_exp, &einfo, NULL,
- op_data, &lockh, &flock, 0, NULL /* req */, flags);
- rc = rc2;
- }
-
- ll_finish_md_op_data(op_data);
-
- return rc;
-}
-
-static int
-ll_file_noflock(struct file *file, int cmd, struct file_lock *file_lock)
-{
- return -ENOSYS;
-}
-
-/**
- * test if some locks matching bits and l_req_mode are acquired
- * - bits can be in different locks
- * - if found clear the common lock bits in *bits
- * - the bits not found, are kept in *bits
- * \param inode [IN]
- * \param bits [IN] searched lock bits [IN]
- * \param l_req_mode [IN] searched lock mode
- * \retval boolean, true iff all bits are found
- */
-int ll_have_md_lock(struct inode *inode, __u64 *bits, ldlm_mode_t l_req_mode)
-{
- struct lustre_handle lockh;
- ldlm_policy_data_t policy;
- ldlm_mode_t mode = (l_req_mode == LCK_MINMODE) ?
- (LCK_CR|LCK_CW|LCK_PR|LCK_PW) : l_req_mode;
- struct lu_fid *fid;
- __u64 flags;
- int i;
-
- if (!inode)
- return 0;
-
- fid = &ll_i2info(inode)->lli_fid;
- CDEBUG(D_INFO, "trying to match res "DFID" mode %s\n", PFID(fid),
- ldlm_lockname[mode]);
-
- flags = LDLM_FL_BLOCK_GRANTED | LDLM_FL_CBPENDING | LDLM_FL_TEST_LOCK;
- for (i = 0; i <= MDS_INODELOCK_MAXSHIFT && *bits != 0; i++) {
- policy.l_inodebits.bits = *bits & (1 << i);
- if (policy.l_inodebits.bits == 0)
- continue;
-
- if (md_lock_match(ll_i2mdexp(inode), flags, fid, LDLM_IBITS,
- &policy, mode, &lockh)) {
- struct ldlm_lock *lock;
-
- lock = ldlm_handle2lock(&lockh);
- if (lock) {
- *bits &=
- ~(lock->l_policy_data.l_inodebits.bits);
- LDLM_LOCK_PUT(lock);
- } else {
- *bits &= ~policy.l_inodebits.bits;
- }
- }
- }
- return *bits == 0;
-}
-
-ldlm_mode_t ll_take_md_lock(struct inode *inode, __u64 bits,
- struct lustre_handle *lockh, __u64 flags,
- ldlm_mode_t mode)
-{
- ldlm_policy_data_t policy = { .l_inodebits = {bits} };
- struct lu_fid *fid;
- ldlm_mode_t rc;
-
- fid = &ll_i2info(inode)->lli_fid;
- CDEBUG(D_INFO, "trying to match res "DFID"\n", PFID(fid));
-
- rc = md_lock_match(ll_i2mdexp(inode), flags | LDLM_FL_BLOCK_GRANTED,
- fid, LDLM_IBITS, &policy, mode, lockh);
-
- return rc;
-}
-
-static int ll_inode_revalidate_fini(struct inode *inode, int rc)
-{
- /* Already unlinked. Just update nlink and return success */
- if (rc == -ENOENT) {
- clear_nlink(inode);
- /* This path cannot be hit for regular files unless in
- * case of obscure races, so no need to validate size.
- */
- if (!S_ISREG(inode->i_mode) && !S_ISDIR(inode->i_mode))
- return 0;
- } else if (rc != 0) {
- CDEBUG_LIMIT((rc == -EACCES || rc == -EIDRM) ? D_INFO : D_ERROR,
- "%s: revalidate FID "DFID" error: rc = %d\n",
- ll_get_fsname(inode->i_sb, NULL, 0),
- PFID(ll_inode2fid(inode)), rc);
- }
-
- return rc;
-}
-
-static int __ll_inode_revalidate(struct dentry *dentry, __u64 ibits)
-{
- struct inode *inode = d_inode(dentry);
- struct ptlrpc_request *req = NULL;
- struct obd_export *exp;
- int rc = 0;
-
- LASSERT(inode != NULL);
-
- CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p),name=%pd\n",
- inode->i_ino, inode->i_generation, inode, dentry);
-
- exp = ll_i2mdexp(inode);
-
- /* XXX: Enable OBD_CONNECT_ATTRFID to reduce unnecessary getattr RPC.
- * But under CMD case, it caused some lock issues, should be fixed
- * with new CMD ibits lock. See bug 12718 */
- if (exp_connect_flags(exp) & OBD_CONNECT_ATTRFID) {
- struct lookup_intent oit = { .it_op = IT_GETATTR };
- struct md_op_data *op_data;
-
- if (ibits == MDS_INODELOCK_LOOKUP)
- oit.it_op = IT_LOOKUP;
-
- /* Call getattr by fid, so do not provide name at all. */
- op_data = ll_prep_md_op_data(NULL, inode,
- inode, NULL, 0, 0,
- LUSTRE_OPC_ANY, NULL);
- if (IS_ERR(op_data))
- return PTR_ERR(op_data);
-
- oit.it_create_mode |= M_CHECK_STALE;
- rc = md_intent_lock(exp, op_data, NULL, 0,
- /* we are not interested in name
- based lookup */
- &oit, 0, &req,
- ll_md_blocking_ast, 0);
- ll_finish_md_op_data(op_data);
- oit.it_create_mode &= ~M_CHECK_STALE;
- if (rc < 0) {
- rc = ll_inode_revalidate_fini(inode, rc);
- goto out;
- }
-
- rc = ll_revalidate_it_finish(req, &oit, inode);
- if (rc != 0) {
- ll_intent_release(&oit);
- goto out;
- }
-
- /* Unlinked? Unhash dentry, so it is not picked up later by
- do_lookup() -> ll_revalidate_it(). We cannot use d_drop
- here to preserve get_cwd functionality on 2.6.
- Bug 10503 */
- if (!d_inode(dentry)->i_nlink)
- d_lustre_invalidate(dentry, 0);
-
- ll_lookup_finish_locks(&oit, inode);
- } else if (!ll_have_md_lock(d_inode(dentry), &ibits, LCK_MINMODE)) {
- struct ll_sb_info *sbi = ll_i2sbi(d_inode(dentry));
- u64 valid = OBD_MD_FLGETATTR;
- struct md_op_data *op_data;
- int ealen = 0;
-
- if (S_ISREG(inode->i_mode)) {
- rc = ll_get_default_mdsize(sbi, &ealen);
- if (rc)
- return rc;
- valid |= OBD_MD_FLEASIZE | OBD_MD_FLMODEASIZE;
- }
-
- op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL,
- 0, ealen, LUSTRE_OPC_ANY,
- NULL);
- if (IS_ERR(op_data))
- return PTR_ERR(op_data);
-
- op_data->op_valid = valid;
- rc = md_getattr(sbi->ll_md_exp, op_data, &req);
- ll_finish_md_op_data(op_data);
- if (rc) {
- rc = ll_inode_revalidate_fini(inode, rc);
- return rc;
- }
-
- rc = ll_prep_inode(&inode, req, NULL, NULL);
- }
-out:
- ptlrpc_req_finished(req);
- return rc;
-}
-
-static int ll_inode_revalidate(struct dentry *dentry, __u64 ibits)
-{
- struct inode *inode = d_inode(dentry);
- int rc;
-
- rc = __ll_inode_revalidate(dentry, ibits);
- if (rc != 0)
- return rc;
-
- /* if object isn't regular file, don't validate size */
- if (!S_ISREG(inode->i_mode)) {
- LTIME_S(inode->i_atime) = ll_i2info(inode)->lli_lvb.lvb_atime;
- LTIME_S(inode->i_mtime) = ll_i2info(inode)->lli_lvb.lvb_mtime;
- LTIME_S(inode->i_ctime) = ll_i2info(inode)->lli_lvb.lvb_ctime;
- } else {
- /* In case of restore, the MDT has the right size and has
- * already send it back without granting the layout lock,
- * inode is up-to-date so glimpse is useless.
- * Also to glimpse we need the layout, in case of a running
- * restore the MDT holds the layout lock so the glimpse will
- * block up to the end of restore (getattr will block)
- */
- if (!(ll_i2info(inode)->lli_flags & LLIF_FILE_RESTORING))
- rc = ll_glimpse_size(inode);
- }
- return rc;
-}
-
-int ll_getattr(struct vfsmount *mnt, struct dentry *de, struct kstat *stat)
-{
- struct inode *inode = d_inode(de);
- struct ll_sb_info *sbi = ll_i2sbi(inode);
- struct ll_inode_info *lli = ll_i2info(inode);
- int res;
-
- res = ll_inode_revalidate(de, MDS_INODELOCK_UPDATE |
- MDS_INODELOCK_LOOKUP);
- ll_stats_ops_tally(sbi, LPROC_LL_GETATTR, 1);
-
- if (res)
- return res;
-
- stat->dev = inode->i_sb->s_dev;
- if (ll_need_32bit_api(sbi))
- stat->ino = cl_fid_build_ino(&lli->lli_fid, 1);
- else
- stat->ino = inode->i_ino;
- stat->mode = inode->i_mode;
- stat->nlink = inode->i_nlink;
- stat->uid = inode->i_uid;
- stat->gid = inode->i_gid;
- stat->rdev = inode->i_rdev;
- stat->atime = inode->i_atime;
- stat->mtime = inode->i_mtime;
- stat->ctime = inode->i_ctime;
- stat->blksize = 1 << inode->i_blkbits;
-
- stat->size = i_size_read(inode);
- stat->blocks = inode->i_blocks;
-
- return 0;
-}
-
-static int ll_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
- __u64 start, __u64 len)
-{
- int rc;
- size_t num_bytes;
- struct ll_user_fiemap *fiemap;
- unsigned int extent_count = fieinfo->fi_extents_max;
-
- num_bytes = sizeof(*fiemap) + (extent_count *
- sizeof(struct ll_fiemap_extent));
- fiemap = libcfs_kvzalloc(num_bytes, GFP_NOFS);
-
- if (fiemap == NULL)
- return -ENOMEM;
-
- fiemap->fm_flags = fieinfo->fi_flags;
- fiemap->fm_extent_count = fieinfo->fi_extents_max;
- fiemap->fm_start = start;
- fiemap->fm_length = len;
- if (extent_count > 0)
- memcpy(&fiemap->fm_extents[0], fieinfo->fi_extents_start,
- sizeof(struct ll_fiemap_extent));
-
- rc = ll_do_fiemap(inode, fiemap, num_bytes);
-
- fieinfo->fi_flags = fiemap->fm_flags;
- fieinfo->fi_extents_mapped = fiemap->fm_mapped_extents;
- if (extent_count > 0)
- memcpy(fieinfo->fi_extents_start, &fiemap->fm_extents[0],
- fiemap->fm_mapped_extents *
- sizeof(struct ll_fiemap_extent));
-
- kvfree(fiemap);
- return rc;
-}
-
-struct posix_acl *ll_get_acl(struct inode *inode, int type)
-{
- struct ll_inode_info *lli = ll_i2info(inode);
- struct posix_acl *acl = NULL;
-
- spin_lock(&lli->lli_lock);
- /* VFS' acl_permission_check->check_acl will release the refcount */
- acl = posix_acl_dup(lli->lli_posix_acl);
- spin_unlock(&lli->lli_lock);
-
- return acl;
-}
-
-
-int ll_inode_permission(struct inode *inode, int mask)
-{
- int rc = 0;
-
-#ifdef MAY_NOT_BLOCK
- if (mask & MAY_NOT_BLOCK)
- return -ECHILD;
-#endif
-
- /* as root inode are NOT getting validated in lookup operation,
- * need to do it before permission check. */
-
- if (is_root_inode(inode)) {
- rc = __ll_inode_revalidate(inode->i_sb->s_root,
- MDS_INODELOCK_LOOKUP);
- if (rc)
- return rc;
- }
-
- CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p), inode mode %x mask %o\n",
- inode->i_ino, inode->i_generation, inode, inode->i_mode, mask);
-
- if (ll_i2sbi(inode)->ll_flags & LL_SBI_RMT_CLIENT)
- return lustre_check_remote_perm(inode, mask);
-
- ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_INODE_PERM, 1);
- rc = generic_permission(inode, mask);
-
- return rc;
-}
-
-/* -o localflock - only provides locally consistent flock locks */
-struct file_operations ll_file_operations = {
- .read_iter = ll_file_read_iter,
- .write_iter = ll_file_write_iter,
- .unlocked_ioctl = ll_file_ioctl,
- .open = ll_file_open,
- .release = ll_file_release,
- .mmap = ll_file_mmap,
- .llseek = ll_file_seek,
- .splice_read = ll_file_splice_read,
- .fsync = ll_fsync,
- .flush = ll_flush
-};
-
-struct file_operations ll_file_operations_flock = {
- .read_iter = ll_file_read_iter,
- .write_iter = ll_file_write_iter,
- .unlocked_ioctl = ll_file_ioctl,
- .open = ll_file_open,
- .release = ll_file_release,
- .mmap = ll_file_mmap,
- .llseek = ll_file_seek,
- .splice_read = ll_file_splice_read,
- .fsync = ll_fsync,
- .flush = ll_flush,
- .flock = ll_file_flock,
- .lock = ll_file_flock
-};
-
-/* These are for -o noflock - to return ENOSYS on flock calls */
-struct file_operations ll_file_operations_noflock = {
- .read_iter = ll_file_read_iter,
- .write_iter = ll_file_write_iter,
- .unlocked_ioctl = ll_file_ioctl,
- .open = ll_file_open,
- .release = ll_file_release,
- .mmap = ll_file_mmap,
- .llseek = ll_file_seek,
- .splice_read = ll_file_splice_read,
- .fsync = ll_fsync,
- .flush = ll_flush,
- .flock = ll_file_noflock,
- .lock = ll_file_noflock
-};
-
-struct inode_operations ll_file_inode_operations = {
- .setattr = ll_setattr,
- .getattr = ll_getattr,
- .permission = ll_inode_permission,
- .setxattr = ll_setxattr,
- .getxattr = ll_getxattr,
- .listxattr = ll_listxattr,
- .removexattr = ll_removexattr,
- .fiemap = ll_fiemap,
- .get_acl = ll_get_acl,
-};
-
-/* dynamic ioctl number support routines */
-static struct llioc_ctl_data {
- struct rw_semaphore ioc_sem;
- struct list_head ioc_head;
-} llioc = {
- __RWSEM_INITIALIZER(llioc.ioc_sem),
- LIST_HEAD_INIT(llioc.ioc_head)
-};
-
-
-struct llioc_data {
- struct list_head iocd_list;
- unsigned int iocd_size;
- llioc_callback_t iocd_cb;
- unsigned int iocd_count;
- unsigned int iocd_cmd[0];
-};
-
-void *ll_iocontrol_register(llioc_callback_t cb, int count, unsigned int *cmd)
-{
- unsigned int size;
- struct llioc_data *in_data = NULL;
-
- if (cb == NULL || cmd == NULL ||
- count > LLIOC_MAX_CMD || count < 0)
- return NULL;
-
- size = sizeof(*in_data) + count * sizeof(unsigned int);
- in_data = kzalloc(size, GFP_NOFS);
- if (!in_data)
- return NULL;
-
- memset(in_data, 0, sizeof(*in_data));
- in_data->iocd_size = size;
- in_data->iocd_cb = cb;
- in_data->iocd_count = count;
- memcpy(in_data->iocd_cmd, cmd, sizeof(unsigned int) * count);
-
- down_write(&llioc.ioc_sem);
- list_add_tail(&in_data->iocd_list, &llioc.ioc_head);
- up_write(&llioc.ioc_sem);
-
- return in_data;
-}
-EXPORT_SYMBOL(ll_iocontrol_register);
-
-void ll_iocontrol_unregister(void *magic)
-{
- struct llioc_data *tmp;
-
- if (magic == NULL)
- return;
-
- down_write(&llioc.ioc_sem);
- list_for_each_entry(tmp, &llioc.ioc_head, iocd_list) {
- if (tmp == magic) {
- list_del(&tmp->iocd_list);
- up_write(&llioc.ioc_sem);
-
- kfree(tmp);
- return;
- }
- }
- up_write(&llioc.ioc_sem);
-
- CWARN("didn't find iocontrol register block with magic: %p\n", magic);
-}
-EXPORT_SYMBOL(ll_iocontrol_unregister);
-
-static enum llioc_iter
-ll_iocontrol_call(struct inode *inode, struct file *file,
- unsigned int cmd, unsigned long arg, int *rcp)
-{
- enum llioc_iter ret = LLIOC_CONT;
- struct llioc_data *data;
- int rc = -EINVAL, i;
-
- down_read(&llioc.ioc_sem);
- list_for_each_entry(data, &llioc.ioc_head, iocd_list) {
- for (i = 0; i < data->iocd_count; i++) {
- if (cmd != data->iocd_cmd[i])
- continue;
-
- ret = data->iocd_cb(inode, file, cmd, arg, data, &rc);
- break;
- }
-
- if (ret == LLIOC_STOP)
- break;
- }
- up_read(&llioc.ioc_sem);
-
- if (rcp)
- *rcp = rc;
- return ret;
-}
-
-int ll_layout_conf(struct inode *inode, const struct cl_object_conf *conf)
-{
- struct ll_inode_info *lli = ll_i2info(inode);
- struct cl_env_nest nest;
- struct lu_env *env;
- int result;
-
- if (lli->lli_clob == NULL)
- return 0;
-
- env = cl_env_nested_get(&nest);
- if (IS_ERR(env))
- return PTR_ERR(env);
-
- result = cl_conf_set(env, lli->lli_clob, conf);
- cl_env_nested_put(&nest, env);
-
- if (conf->coc_opc == OBJECT_CONF_SET) {
- struct ldlm_lock *lock = conf->coc_lock;
-
- LASSERT(lock != NULL);
- LASSERT(ldlm_has_layout(lock));
- if (result == 0) {
- /* it can only be allowed to match after layout is
- * applied to inode otherwise false layout would be
- * seen. Applying layout should happen before dropping
- * the intent lock. */
- ldlm_lock_allow_match(lock);
- }
- }
- return result;
-}
-
-/* Fetch layout from MDT with getxattr request, if it's not ready yet */
-static int ll_layout_fetch(struct inode *inode, struct ldlm_lock *lock)
-
-{
- struct ll_sb_info *sbi = ll_i2sbi(inode);
- struct ptlrpc_request *req;
- struct mdt_body *body;
- void *lvbdata;
- void *lmm;
- int lmmsize;
- int rc;
-
- CDEBUG(D_INODE, DFID" LVB_READY=%d l_lvb_data=%p l_lvb_len=%d\n",
- PFID(ll_inode2fid(inode)), !!(lock->l_flags & LDLM_FL_LVB_READY),
- lock->l_lvb_data, lock->l_lvb_len);
-
- if ((lock->l_lvb_data != NULL) && (lock->l_flags & LDLM_FL_LVB_READY))
- return 0;
-
- /* if layout lock was granted right away, the layout is returned
- * within DLM_LVB of dlm reply; otherwise if the lock was ever
- * blocked and then granted via completion ast, we have to fetch
- * layout here. Please note that we can't use the LVB buffer in
- * completion AST because it doesn't have a large enough buffer */
- rc = ll_get_default_mdsize(sbi, &lmmsize);
- if (rc == 0)
- rc = md_getxattr(sbi->ll_md_exp, ll_inode2fid(inode),
- OBD_MD_FLXATTR, XATTR_NAME_LOV, NULL, 0,
- lmmsize, 0, &req);
- if (rc < 0)
- return rc;
-
- body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
- if (body == NULL) {
- rc = -EPROTO;
- goto out;
- }
-
- lmmsize = body->eadatasize;
- if (lmmsize == 0) /* empty layout */ {
- rc = 0;
- goto out;
- }
-
- lmm = req_capsule_server_sized_get(&req->rq_pill, &RMF_EADATA, lmmsize);
- if (lmm == NULL) {
- rc = -EFAULT;
- goto out;
- }
-
- lvbdata = libcfs_kvzalloc(lmmsize, GFP_NOFS);
- if (lvbdata == NULL) {
- rc = -ENOMEM;
- goto out;
- }
-
- memcpy(lvbdata, lmm, lmmsize);
- lock_res_and_lock(lock);
- if (lock->l_lvb_data != NULL)
- kvfree(lock->l_lvb_data);
-
- lock->l_lvb_data = lvbdata;
- lock->l_lvb_len = lmmsize;
- unlock_res_and_lock(lock);
-
-out:
- ptlrpc_req_finished(req);
- return rc;
-}
-
-/**
- * Apply the layout to the inode. Layout lock is held and will be released
- * in this function.
- */
-static int ll_layout_lock_set(struct lustre_handle *lockh, ldlm_mode_t mode,
- struct inode *inode, __u32 *gen, bool reconf)
-{
- struct ll_inode_info *lli = ll_i2info(inode);
- struct ll_sb_info *sbi = ll_i2sbi(inode);
- struct ldlm_lock *lock;
- struct lustre_md md = { NULL };
- struct cl_object_conf conf;
- int rc = 0;
- bool lvb_ready;
- bool wait_layout = false;
-
- LASSERT(lustre_handle_is_used(lockh));
-
- lock = ldlm_handle2lock(lockh);
- LASSERT(lock != NULL);
- LASSERT(ldlm_has_layout(lock));
-
- LDLM_DEBUG(lock, "File %p/"DFID" being reconfigured: %d.\n",
- inode, PFID(&lli->lli_fid), reconf);
-
- /* in case this is a caching lock and reinstate with new inode */
- md_set_lock_data(sbi->ll_md_exp, &lockh->cookie, inode, NULL);
-
- lock_res_and_lock(lock);
- lvb_ready = !!(lock->l_flags & LDLM_FL_LVB_READY);
- unlock_res_and_lock(lock);
- /* checking lvb_ready is racy but this is okay. The worst case is
- * that multi processes may configure the file on the same time. */
- if (lvb_ready || !reconf) {
- rc = -ENODATA;
- if (lvb_ready) {
- /* layout_gen must be valid if layout lock is not
- * cancelled and stripe has already set */
- *gen = ll_layout_version_get(lli);
- rc = 0;
- }
- goto out;
- }
-
- rc = ll_layout_fetch(inode, lock);
- if (rc < 0)
- goto out;
-
- /* for layout lock, lmm is returned in lock's lvb.
- * lvb_data is immutable if the lock is held so it's safe to access it
- * without res lock. See the description in ldlm_lock_decref_internal()
- * for the condition to free lvb_data of layout lock */
- if (lock->l_lvb_data != NULL) {
- rc = obd_unpackmd(sbi->ll_dt_exp, &md.lsm,
- lock->l_lvb_data, lock->l_lvb_len);
- if (rc >= 0) {
- *gen = LL_LAYOUT_GEN_EMPTY;
- if (md.lsm != NULL)
- *gen = md.lsm->lsm_layout_gen;
- rc = 0;
- } else {
- CERROR("%s: file "DFID" unpackmd error: %d\n",
- ll_get_fsname(inode->i_sb, NULL, 0),
- PFID(&lli->lli_fid), rc);
- }
- }
- if (rc < 0)
- goto out;
-
- /* set layout to file. Unlikely this will fail as old layout was
- * surely eliminated */
- memset(&conf, 0, sizeof(conf));
- conf.coc_opc = OBJECT_CONF_SET;
- conf.coc_inode = inode;
- conf.coc_lock = lock;
- conf.u.coc_md = &md;
- rc = ll_layout_conf(inode, &conf);
-
- if (md.lsm != NULL)
- obd_free_memmd(sbi->ll_dt_exp, &md.lsm);
-
- /* refresh layout failed, need to wait */
- wait_layout = rc == -EBUSY;
-
-out:
- LDLM_LOCK_PUT(lock);
- ldlm_lock_decref(lockh, mode);
-
- /* wait for IO to complete if it's still being used. */
- if (wait_layout) {
- CDEBUG(D_INODE, "%s: %p/"DFID" wait for layout reconf.\n",
- ll_get_fsname(inode->i_sb, NULL, 0),
- inode, PFID(&lli->lli_fid));
-
- memset(&conf, 0, sizeof(conf));
- conf.coc_opc = OBJECT_CONF_WAIT;
- conf.coc_inode = inode;
- rc = ll_layout_conf(inode, &conf);
- if (rc == 0)
- rc = -EAGAIN;
-
- CDEBUG(D_INODE, "file: "DFID" waiting layout return: %d.\n",
- PFID(&lli->lli_fid), rc);
- }
- return rc;
-}
-
-/**
- * This function checks if there exists a LAYOUT lock on the client side,
- * or enqueues it if it doesn't have one in cache.
- *
- * This function will not hold layout lock so it may be revoked any time after
- * this function returns. Any operations depend on layout should be redone
- * in that case.
- *
- * This function should be called before lov_io_init() to get an uptodate
- * layout version, the caller should save the version number and after IO
- * is finished, this function should be called again to verify that layout
- * is not changed during IO time.
- */
-int ll_layout_refresh(struct inode *inode, __u32 *gen)
-{
- struct ll_inode_info *lli = ll_i2info(inode);
- struct ll_sb_info *sbi = ll_i2sbi(inode);
- struct md_op_data *op_data;
- struct lookup_intent it;
- struct lustre_handle lockh;
- ldlm_mode_t mode;
- struct ldlm_enqueue_info einfo = {
- .ei_type = LDLM_IBITS,
- .ei_mode = LCK_CR,
- .ei_cb_bl = ll_md_blocking_ast,
- .ei_cb_cp = ldlm_completion_ast,
- };
- int rc;
-
- *gen = ll_layout_version_get(lli);
- if (!(sbi->ll_flags & LL_SBI_LAYOUT_LOCK) || *gen != LL_LAYOUT_GEN_NONE)
- return 0;
-
- /* sanity checks */
- LASSERT(fid_is_sane(ll_inode2fid(inode)));
- LASSERT(S_ISREG(inode->i_mode));
-
- /* take layout lock mutex to enqueue layout lock exclusively. */
- mutex_lock(&lli->lli_layout_mutex);
-
-again:
- /* mostly layout lock is caching on the local side, so try to match
- * it before grabbing layout lock mutex. */
- mode = ll_take_md_lock(inode, MDS_INODELOCK_LAYOUT, &lockh, 0,
- LCK_CR | LCK_CW | LCK_PR | LCK_PW);
- if (mode != 0) { /* hit cached lock */
- rc = ll_layout_lock_set(&lockh, mode, inode, gen, true);
- if (rc == -EAGAIN)
- goto again;
-
- mutex_unlock(&lli->lli_layout_mutex);
- return rc;
- }
-
- op_data = ll_prep_md_op_data(NULL, inode, inode, NULL,
- 0, 0, LUSTRE_OPC_ANY, NULL);
- if (IS_ERR(op_data)) {
- mutex_unlock(&lli->lli_layout_mutex);
- return PTR_ERR(op_data);
- }
-
- /* have to enqueue one */
- memset(&it, 0, sizeof(it));
- it.it_op = IT_LAYOUT;
- lockh.cookie = 0ULL;
-
- LDLM_DEBUG_NOLOCK("%s: requeue layout lock for file %p/"DFID".\n",
- ll_get_fsname(inode->i_sb, NULL, 0), inode,
- PFID(&lli->lli_fid));
-
- rc = md_enqueue(sbi->ll_md_exp, &einfo, &it, op_data, &lockh,
- NULL, 0, NULL, 0);
- if (it.d.lustre.it_data != NULL)
- ptlrpc_req_finished(it.d.lustre.it_data);
- it.d.lustre.it_data = NULL;
-
- ll_finish_md_op_data(op_data);
-
- mode = it.d.lustre.it_lock_mode;
- it.d.lustre.it_lock_mode = 0;
- ll_intent_drop_lock(&it);
-
- if (rc == 0) {
- /* set lock data in case this is a new lock */
- ll_set_lock_data(sbi->ll_md_exp, inode, &it, NULL);
- rc = ll_layout_lock_set(&lockh, mode, inode, gen, true);
- if (rc == -EAGAIN)
- goto again;
- }
- mutex_unlock(&lli->lli_layout_mutex);
-
- return rc;
-}
-
-/**
- * This function send a restore request to the MDT
- */
-int ll_layout_restore(struct inode *inode)
-{
- struct hsm_user_request *hur;
- int len, rc;
-
- len = sizeof(struct hsm_user_request) +
- sizeof(struct hsm_user_item);
- hur = kzalloc(len, GFP_NOFS);
- if (!hur)
- return -ENOMEM;
-
- hur->hur_request.hr_action = HUA_RESTORE;
- hur->hur_request.hr_archive_id = 0;
- hur->hur_request.hr_flags = 0;
- memcpy(&hur->hur_user_item[0].hui_fid, &ll_i2info(inode)->lli_fid,
- sizeof(hur->hur_user_item[0].hui_fid));
- hur->hur_user_item[0].hui_extent.length = -1;
- hur->hur_request.hr_itemcount = 1;
- rc = obd_iocontrol(LL_IOC_HSM_REQUEST, cl_i2sbi(inode)->ll_md_exp,
- len, hur, NULL);
- kfree(hur);
- return rc;
-}
diff --git a/drivers/staging/lustre/lustre/llite/llite_close.c b/drivers/staging/lustre/lustre/llite/llite_close.c
deleted file mode 100644
index e31cad82d5af..000000000000
--- a/drivers/staging/lustre/lustre/llite/llite_close.c
+++ /dev/null
@@ -1,392 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * lustre/llite/llite_close.c
- *
- * Lustre Lite routines to issue a secondary close after writeback
- */
-
-#include <linux/module.h>
-
-#define DEBUG_SUBSYSTEM S_LLITE
-
-#include "../include/lustre_lite.h"
-#include "llite_internal.h"
-
-/** records that a write is in flight */
-void vvp_write_pending(struct ccc_object *club, struct ccc_page *page)
-{
- struct ll_inode_info *lli = ll_i2info(club->cob_inode);
-
- spin_lock(&lli->lli_lock);
- lli->lli_flags |= LLIF_SOM_DIRTY;
- if (page != NULL && list_empty(&page->cpg_pending_linkage))
- list_add(&page->cpg_pending_linkage,
- &club->cob_pending_list);
- spin_unlock(&lli->lli_lock);
-}
-
-/** records that a write has completed */
-void vvp_write_complete(struct ccc_object *club, struct ccc_page *page)
-{
- struct ll_inode_info *lli = ll_i2info(club->cob_inode);
- int rc = 0;
-
- spin_lock(&lli->lli_lock);
- if (page != NULL && !list_empty(&page->cpg_pending_linkage)) {
- list_del_init(&page->cpg_pending_linkage);
- rc = 1;
- }
- spin_unlock(&lli->lli_lock);
- if (rc)
- ll_queue_done_writing(club->cob_inode, 0);
-}
-
-/** Queues DONE_WRITING if
- * - done writing is allowed;
- * - inode has no no dirty pages; */
-void ll_queue_done_writing(struct inode *inode, unsigned long flags)
-{
- struct ll_inode_info *lli = ll_i2info(inode);
- struct ccc_object *club = cl2ccc(ll_i2info(inode)->lli_clob);
-
- spin_lock(&lli->lli_lock);
- lli->lli_flags |= flags;
-
- if ((lli->lli_flags & LLIF_DONE_WRITING) &&
- list_empty(&club->cob_pending_list)) {
- struct ll_close_queue *lcq = ll_i2sbi(inode)->ll_lcq;
-
- if (lli->lli_flags & LLIF_MDS_SIZE_LOCK)
- CWARN("ino %lu/%u(flags %u) som valid it just after recovery\n",
- inode->i_ino, inode->i_generation,
- lli->lli_flags);
- /* DONE_WRITING is allowed and inode has no dirty page. */
- spin_lock(&lcq->lcq_lock);
-
- LASSERT(list_empty(&lli->lli_close_list));
- CDEBUG(D_INODE, "adding inode %lu/%u to close list\n",
- inode->i_ino, inode->i_generation);
- list_add_tail(&lli->lli_close_list, &lcq->lcq_head);
-
- /* Avoid a concurrent insertion into the close thread queue:
- * an inode is already in the close thread, open(), write(),
- * close() happen, epoch is closed as the inode is marked as
- * LLIF_EPOCH_PENDING. When pages are written inode should not
- * be inserted into the queue again, clear this flag to avoid
- * it. */
- lli->lli_flags &= ~LLIF_DONE_WRITING;
-
- wake_up(&lcq->lcq_waitq);
- spin_unlock(&lcq->lcq_lock);
- }
- spin_unlock(&lli->lli_lock);
-}
-
-/** Pack SOM attributes info @opdata for CLOSE, DONE_WRITING rpc. */
-void ll_done_writing_attr(struct inode *inode, struct md_op_data *op_data)
-{
- struct ll_inode_info *lli = ll_i2info(inode);
-
- op_data->op_flags |= MF_SOM_CHANGE;
- /* Check if Size-on-MDS attributes are valid. */
- if (lli->lli_flags & LLIF_MDS_SIZE_LOCK)
- CERROR("ino %lu/%u(flags %u) som valid it just after recovery\n",
- inode->i_ino, inode->i_generation,
- lli->lli_flags);
-
- if (!cl_local_size(inode)) {
- /* Send Size-on-MDS Attributes if valid. */
- op_data->op_attr.ia_valid |= ATTR_MTIME_SET | ATTR_CTIME_SET |
- ATTR_ATIME_SET | ATTR_SIZE | ATTR_BLOCKS;
- }
-}
-
-/** Closes ioepoch and packs Size-on-MDS attribute if needed into @op_data. */
-void ll_ioepoch_close(struct inode *inode, struct md_op_data *op_data,
- struct obd_client_handle **och, unsigned long flags)
-{
- struct ll_inode_info *lli = ll_i2info(inode);
- struct ccc_object *club = cl2ccc(ll_i2info(inode)->lli_clob);
-
- spin_lock(&lli->lli_lock);
- if (!(list_empty(&club->cob_pending_list))) {
- if (!(lli->lli_flags & LLIF_EPOCH_PENDING)) {
- LASSERT(*och != NULL);
- LASSERT(lli->lli_pending_och == NULL);
- /* Inode is dirty and there is no pending write done
- * request yet, DONE_WRITE is to be sent later. */
- lli->lli_flags |= LLIF_EPOCH_PENDING;
- lli->lli_pending_och = *och;
- spin_unlock(&lli->lli_lock);
-
- inode = igrab(inode);
- LASSERT(inode);
- goto out;
- }
- if (flags & LLIF_DONE_WRITING) {
- /* Some pages are still dirty, it is early to send
- * DONE_WRITE. Wait until all pages will be flushed
- * and try DONE_WRITE again later. */
- LASSERT(!(lli->lli_flags & LLIF_DONE_WRITING));
- lli->lli_flags |= LLIF_DONE_WRITING;
- spin_unlock(&lli->lli_lock);
-
- inode = igrab(inode);
- LASSERT(inode);
- goto out;
- }
- }
- CDEBUG(D_INODE, "Epoch %llu closed on "DFID"\n",
- ll_i2info(inode)->lli_ioepoch, PFID(&lli->lli_fid));
- op_data->op_flags |= MF_EPOCH_CLOSE;
-
- if (flags & LLIF_DONE_WRITING) {
- LASSERT(lli->lli_flags & LLIF_SOM_DIRTY);
- LASSERT(!(lli->lli_flags & LLIF_DONE_WRITING));
- *och = lli->lli_pending_och;
- lli->lli_pending_och = NULL;
- lli->lli_flags &= ~LLIF_EPOCH_PENDING;
- } else {
- /* Pack Size-on-MDS inode attributes only if they has changed */
- if (!(lli->lli_flags & LLIF_SOM_DIRTY)) {
- spin_unlock(&lli->lli_lock);
- goto out;
- }
-
- /* There is a pending DONE_WRITE -- close epoch with no
- * attribute change. */
- if (lli->lli_flags & LLIF_EPOCH_PENDING) {
- spin_unlock(&lli->lli_lock);
- goto out;
- }
- }
-
- LASSERT(list_empty(&club->cob_pending_list));
- lli->lli_flags &= ~LLIF_SOM_DIRTY;
- spin_unlock(&lli->lli_lock);
- ll_done_writing_attr(inode, op_data);
-
-out:
- return;
-}
-
-/**
- * Cliens updates SOM attributes on MDS (including llog cookies):
- * obd_getattr with no lock and md_setattr.
- */
-int ll_som_update(struct inode *inode, struct md_op_data *op_data)
-{
- struct ll_inode_info *lli = ll_i2info(inode);
- struct ptlrpc_request *request = NULL;
- __u32 old_flags;
- struct obdo *oa;
- int rc;
-
- LASSERT(op_data != NULL);
- if (lli->lli_flags & LLIF_MDS_SIZE_LOCK)
- CERROR("ino %lu/%u(flags %u) som valid it just after recovery\n",
- inode->i_ino, inode->i_generation,
- lli->lli_flags);
-
- OBDO_ALLOC(oa);
- if (!oa) {
- CERROR("can't allocate memory for Size-on-MDS update.\n");
- return -ENOMEM;
- }
-
- old_flags = op_data->op_flags;
- op_data->op_flags = MF_SOM_CHANGE;
-
- /* If inode is already in another epoch, skip getattr from OSTs. */
- if (lli->lli_ioepoch == op_data->op_ioepoch) {
- rc = ll_inode_getattr(inode, oa, op_data->op_ioepoch,
- old_flags & MF_GETATTR_LOCK);
- if (rc) {
- oa->o_valid = 0;
- if (rc != -ENOENT)
- CERROR("inode_getattr failed (%d): unable to send a Size-on-MDS attribute update for inode %lu/%u\n",
- rc, inode->i_ino,
- inode->i_generation);
- } else {
- CDEBUG(D_INODE, "Size-on-MDS update on "DFID"\n",
- PFID(&lli->lli_fid));
- }
- /* Install attributes into op_data. */
- md_from_obdo(op_data, oa, oa->o_valid);
- }
-
- rc = md_setattr(ll_i2sbi(inode)->ll_md_exp, op_data,
- NULL, 0, NULL, 0, &request, NULL);
- ptlrpc_req_finished(request);
-
- OBDO_FREE(oa);
- return rc;
-}
-
-/**
- * Closes the ioepoch and packs all the attributes into @op_data for
- * DONE_WRITING rpc.
- */
-static void ll_prepare_done_writing(struct inode *inode,
- struct md_op_data *op_data,
- struct obd_client_handle **och)
-{
- ll_ioepoch_close(inode, op_data, och, LLIF_DONE_WRITING);
- /* If there is no @och, we do not do D_W yet. */
- if (*och == NULL)
- return;
-
- ll_pack_inode2opdata(inode, op_data, &(*och)->och_fh);
- ll_prep_md_op_data(op_data, inode, NULL, NULL,
- 0, 0, LUSTRE_OPC_ANY, NULL);
-}
-
-/** Send a DONE_WRITING rpc. */
-static void ll_done_writing(struct inode *inode)
-{
- struct obd_client_handle *och = NULL;
- struct md_op_data *op_data;
- int rc;
-
- LASSERT(exp_connect_som(ll_i2mdexp(inode)));
-
- op_data = kzalloc(sizeof(*op_data), GFP_NOFS);
- if (!op_data)
- return;
-
- ll_prepare_done_writing(inode, op_data, &och);
- /* If there is no @och, we do not do D_W yet. */
- if (och == NULL)
- goto out;
-
- rc = md_done_writing(ll_i2sbi(inode)->ll_md_exp, op_data, NULL);
- if (rc == -EAGAIN)
- /* MDS has instructed us to obtain Size-on-MDS attribute from
- * OSTs and send setattr to back to MDS. */
- rc = ll_som_update(inode, op_data);
- else if (rc)
- CERROR("inode %lu mdc done_writing failed: rc = %d\n",
- inode->i_ino, rc);
-out:
- ll_finish_md_op_data(op_data);
- if (och) {
- md_clear_open_replay_data(ll_i2sbi(inode)->ll_md_exp, och);
- kfree(och);
- }
-}
-
-static struct ll_inode_info *ll_close_next_lli(struct ll_close_queue *lcq)
-{
- struct ll_inode_info *lli = NULL;
-
- spin_lock(&lcq->lcq_lock);
-
- if (!list_empty(&lcq->lcq_head)) {
- lli = list_entry(lcq->lcq_head.next, struct ll_inode_info,
- lli_close_list);
- list_del_init(&lli->lli_close_list);
- } else if (atomic_read(&lcq->lcq_stop))
- lli = ERR_PTR(-EALREADY);
-
- spin_unlock(&lcq->lcq_lock);
- return lli;
-}
-
-static int ll_close_thread(void *arg)
-{
- struct ll_close_queue *lcq = arg;
-
- complete(&lcq->lcq_comp);
-
- while (1) {
- struct l_wait_info lwi = { 0 };
- struct ll_inode_info *lli;
- struct inode *inode;
-
- l_wait_event_exclusive(lcq->lcq_waitq,
- (lli = ll_close_next_lli(lcq)) != NULL,
- &lwi);
- if (IS_ERR(lli))
- break;
-
- inode = ll_info2i(lli);
- CDEBUG(D_INFO, "done_writing for inode %lu/%u\n",
- inode->i_ino, inode->i_generation);
- ll_done_writing(inode);
- iput(inode);
- }
-
- CDEBUG(D_INFO, "ll_close exiting\n");
- complete(&lcq->lcq_comp);
- return 0;
-}
-
-int ll_close_thread_start(struct ll_close_queue **lcq_ret)
-{
- struct ll_close_queue *lcq;
- struct task_struct *task;
-
- if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_CLOSE_THREAD))
- return -EINTR;
-
- lcq = kzalloc(sizeof(*lcq), GFP_NOFS);
- if (!lcq)
- return -ENOMEM;
-
- spin_lock_init(&lcq->lcq_lock);
- INIT_LIST_HEAD(&lcq->lcq_head);
- init_waitqueue_head(&lcq->lcq_waitq);
- init_completion(&lcq->lcq_comp);
-
- task = kthread_run(ll_close_thread, lcq, "ll_close");
- if (IS_ERR(task)) {
- kfree(lcq);
- return PTR_ERR(task);
- }
-
- wait_for_completion(&lcq->lcq_comp);
- *lcq_ret = lcq;
- return 0;
-}
-
-void ll_close_thread_shutdown(struct ll_close_queue *lcq)
-{
- init_completion(&lcq->lcq_comp);
- atomic_inc(&lcq->lcq_stop);
- wake_up(&lcq->lcq_waitq);
- wait_for_completion(&lcq->lcq_comp);
- kfree(lcq);
-}
diff --git a/drivers/staging/lustre/lustre/llite/llite_internal.h b/drivers/staging/lustre/lustre/llite/llite_internal.h
deleted file mode 100644
index dcf7c6b7f056..000000000000
--- a/drivers/staging/lustre/lustre/llite/llite_internal.h
+++ /dev/null
@@ -1,1479 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- */
-
-#ifndef LLITE_INTERNAL_H
-#define LLITE_INTERNAL_H
-#include "../include/lustre_debug.h"
-#include "../include/lustre_ver.h"
-#include "../include/lustre_disk.h" /* for s2sbi */
-#include "../include/lustre_eacl.h"
-
-/* for struct cl_lock_descr and struct cl_io */
-#include "../include/cl_object.h"
-#include "../include/lclient.h"
-#include "../include/lustre_mdc.h"
-#include "../include/lustre_intent.h"
-#include <linux/compat.h>
-#include <linux/posix_acl_xattr.h>
-
-#ifndef FMODE_EXEC
-#define FMODE_EXEC 0
-#endif
-
-#ifndef VM_FAULT_RETRY
-#define VM_FAULT_RETRY 0
-#endif
-
-/** Only used on client-side for indicating the tail of dir hash/offset. */
-#define LL_DIR_END_OFF 0x7fffffffffffffffULL
-#define LL_DIR_END_OFF_32BIT 0x7fffffffUL
-
-#define LL_IT2STR(it) ((it) ? ldlm_it2str((it)->it_op) : "0")
-#define LUSTRE_FPRIVATE(file) ((file)->private_data)
-
-struct ll_dentry_data {
- struct lookup_intent *lld_it;
- unsigned int lld_sa_generation;
- unsigned int lld_invalid:1;
- struct rcu_head lld_rcu_head;
-};
-
-#define ll_d2d(de) ((struct ll_dentry_data *)((de)->d_fsdata))
-
-#define LLI_INODE_MAGIC 0x111d0de5
-#define LLI_INODE_DEAD 0xdeadd00d
-
-/* remote client permission cache */
-#define REMOTE_PERM_HASHSIZE 16
-
-struct ll_getname_data {
- struct dir_context ctx;
- char *lgd_name; /* points to a buffer with NAME_MAX+1 size */
- struct lu_fid lgd_fid; /* target fid we are looking for */
- int lgd_found; /* inode matched? */
-};
-
-/* llite setxid/access permission for user on remote client */
-struct ll_remote_perm {
- struct hlist_node lrp_list;
- uid_t lrp_uid;
- gid_t lrp_gid;
- uid_t lrp_fsuid;
- gid_t lrp_fsgid;
- int lrp_access_perm; /* MAY_READ/WRITE/EXEC, this
- is access permission with
- lrp_fsuid/lrp_fsgid. */
-};
-
-enum lli_flags {
- /* MDS has an authority for the Size-on-MDS attributes. */
- LLIF_MDS_SIZE_LOCK = (1 << 0),
- /* Epoch close is postponed. */
- LLIF_EPOCH_PENDING = (1 << 1),
- /* DONE WRITING is allowed. */
- LLIF_DONE_WRITING = (1 << 2),
- /* Sizeon-on-MDS attributes are changed. An attribute update needs to
- * be sent to MDS. */
- LLIF_SOM_DIRTY = (1 << 3),
- /* File data is modified. */
- LLIF_DATA_MODIFIED = (1 << 4),
- /* File is being restored */
- LLIF_FILE_RESTORING = (1 << 5),
- /* Xattr cache is attached to the file */
- LLIF_XATTR_CACHE = (1 << 6),
-};
-
-struct ll_inode_info {
- __u32 lli_inode_magic;
- __u32 lli_flags;
- __u64 lli_ioepoch;
-
- spinlock_t lli_lock;
- struct posix_acl *lli_posix_acl;
-
- struct hlist_head *lli_remote_perms;
- struct mutex lli_rmtperm_mutex;
-
- /* identifying fields for both metadata and data stacks. */
- struct lu_fid lli_fid;
- /* Parent fid for accessing default stripe data on parent directory
- * for allocating OST objects after a mknod() and later open-by-FID. */
- struct lu_fid lli_pfid;
-
- struct list_head lli_close_list;
- /* open count currently used by capability only, indicate whether
- * capability needs renewal */
- atomic_t lli_open_count;
- unsigned long lli_rmtperm_time;
-
- /* handle is to be sent to MDS later on done_writing and setattr.
- * Open handle data are needed for the recovery to reconstruct
- * the inode state on the MDS. XXX: recovery is not ready yet. */
- struct obd_client_handle *lli_pending_och;
-
- /* We need all three because every inode may be opened in different
- * modes */
- struct obd_client_handle *lli_mds_read_och;
- struct obd_client_handle *lli_mds_write_och;
- struct obd_client_handle *lli_mds_exec_och;
- __u64 lli_open_fd_read_count;
- __u64 lli_open_fd_write_count;
- __u64 lli_open_fd_exec_count;
- /* Protects access to och pointers and their usage counters */
- struct mutex lli_och_mutex;
-
- struct inode lli_vfs_inode;
-
- /* the most recent timestamps obtained from mds */
- struct ost_lvb lli_lvb;
- spinlock_t lli_agl_lock;
-
- /* Try to make the d::member and f::member are aligned. Before using
- * these members, make clear whether it is directory or not. */
- union {
- /* for directory */
- struct {
- /* serialize normal readdir and statahead-readdir. */
- struct mutex d_readdir_mutex;
-
- /* metadata statahead */
- /* since parent-child threads can share the same @file
- * struct, "opendir_key" is the token when dir close for
- * case of parent exit before child -- it is me should
- * cleanup the dir readahead. */
- void *d_opendir_key;
- struct ll_statahead_info *d_sai;
- /* protect statahead stuff. */
- spinlock_t d_sa_lock;
- /* "opendir_pid" is the token when lookup/revalid
- * -- I am the owner of dir statahead. */
- pid_t d_opendir_pid;
- } d;
-
-#define lli_readdir_mutex u.d.d_readdir_mutex
-#define lli_opendir_key u.d.d_opendir_key
-#define lli_sai u.d.d_sai
-#define lli_sa_lock u.d.d_sa_lock
-#define lli_opendir_pid u.d.d_opendir_pid
-
- /* for non-directory */
- struct {
- struct mutex f_size_mutex;
- char *f_symlink_name;
- __u64 f_maxbytes;
- /*
- * struct rw_semaphore {
- * signed long count; // align d.d_def_acl
- * spinlock_t wait_lock; // align d.d_sa_lock
- * struct list_head wait_list;
- * }
- */
- struct rw_semaphore f_trunc_sem;
- struct mutex f_write_mutex;
-
- struct rw_semaphore f_glimpse_sem;
- unsigned long f_glimpse_time;
- struct list_head f_agl_list;
- __u64 f_agl_index;
-
- /* for writepage() only to communicate to fsync */
- int f_async_rc;
-
- /*
- * whenever a process try to read/write the file, the
- * jobid of the process will be saved here, and it'll
- * be packed into the write PRC when flush later.
- *
- * so the read/write statistics for jobid will not be
- * accurate if the file is shared by different jobs.
- */
- char f_jobid[JOBSTATS_JOBID_SIZE];
- } f;
-
-#define lli_size_mutex u.f.f_size_mutex
-#define lli_symlink_name u.f.f_symlink_name
-#define lli_maxbytes u.f.f_maxbytes
-#define lli_trunc_sem u.f.f_trunc_sem
-#define lli_write_mutex u.f.f_write_mutex
-#define lli_glimpse_sem u.f.f_glimpse_sem
-#define lli_glimpse_time u.f.f_glimpse_time
-#define lli_agl_list u.f.f_agl_list
-#define lli_agl_index u.f.f_agl_index
-#define lli_async_rc u.f.f_async_rc
-#define lli_jobid u.f.f_jobid
-
- } u;
-
- /* XXX: For following frequent used members, although they maybe special
- * used for non-directory object, it is some time-wasting to check
- * whether the object is directory or not before using them. On the
- * other hand, currently, sizeof(f) > sizeof(d), it cannot reduce
- * the "ll_inode_info" size even if moving those members into u.f.
- * So keep them out side.
- *
- * In the future, if more members are added only for directory,
- * some of the following members can be moved into u.f.
- */
- bool lli_has_smd;
- struct cl_object *lli_clob;
-
- /* mutex to request for layout lock exclusively. */
- struct mutex lli_layout_mutex;
- /* Layout version, protected by lli_layout_lock */
- __u32 lli_layout_gen;
- spinlock_t lli_layout_lock;
-
- struct rw_semaphore lli_xattrs_list_rwsem;
- struct mutex lli_xattrs_enq_lock;
- struct list_head lli_xattrs;/* ll_xattr_entry->xe_list */
-};
-
-static inline __u32 ll_layout_version_get(struct ll_inode_info *lli)
-{
- __u32 gen;
-
- spin_lock(&lli->lli_layout_lock);
- gen = lli->lli_layout_gen;
- spin_unlock(&lli->lli_layout_lock);
-
- return gen;
-}
-
-static inline void ll_layout_version_set(struct ll_inode_info *lli, __u32 gen)
-{
- spin_lock(&lli->lli_layout_lock);
- lli->lli_layout_gen = gen;
- spin_unlock(&lli->lli_layout_lock);
-}
-
-int ll_xattr_cache_destroy(struct inode *inode);
-
-int ll_xattr_cache_get(struct inode *inode,
- const char *name,
- char *buffer,
- size_t size,
- __u64 valid);
-
-/*
- * Locking to guarantee consistency of non-atomic updates to long long i_size,
- * consistency between file size and KMS.
- *
- * Implemented by ->lli_size_mutex and ->lsm_lock, nested in that order.
- */
-
-void ll_inode_size_lock(struct inode *inode);
-void ll_inode_size_unlock(struct inode *inode);
-
-/* FIXME: replace the name of this with LL_I to conform to kernel stuff */
-/* static inline struct ll_inode_info *LL_I(struct inode *inode) */
-static inline struct ll_inode_info *ll_i2info(struct inode *inode)
-{
- return container_of(inode, struct ll_inode_info, lli_vfs_inode);
-}
-
-/* default to about 40meg of readahead on a given system. That much tied
- * up in 512k readahead requests serviced at 40ms each is about 1GB/s. */
-#define SBI_DEFAULT_READAHEAD_MAX (40UL << (20 - PAGE_CACHE_SHIFT))
-
-/* default to read-ahead full files smaller than 2MB on the second read */
-#define SBI_DEFAULT_READAHEAD_WHOLE_MAX (2UL << (20 - PAGE_CACHE_SHIFT))
-
-enum ra_stat {
- RA_STAT_HIT = 0,
- RA_STAT_MISS,
- RA_STAT_DISTANT_READPAGE,
- RA_STAT_MISS_IN_WINDOW,
- RA_STAT_FAILED_GRAB_PAGE,
- RA_STAT_FAILED_MATCH,
- RA_STAT_DISCARDED,
- RA_STAT_ZERO_LEN,
- RA_STAT_ZERO_WINDOW,
- RA_STAT_EOF,
- RA_STAT_MAX_IN_FLIGHT,
- RA_STAT_WRONG_GRAB_PAGE,
- _NR_RA_STAT,
-};
-
-struct ll_ra_info {
- atomic_t ra_cur_pages;
- unsigned long ra_max_pages;
- unsigned long ra_max_pages_per_file;
- unsigned long ra_max_read_ahead_whole_pages;
-};
-
-/* ra_io_arg will be filled in the beginning of ll_readahead with
- * ras_lock, then the following ll_read_ahead_pages will read RA
- * pages according to this arg, all the items in this structure are
- * counted by page index.
- */
-struct ra_io_arg {
- unsigned long ria_start; /* start offset of read-ahead*/
- unsigned long ria_end; /* end offset of read-ahead*/
- /* If stride read pattern is detected, ria_stoff means where
- * stride read is started. Note: for normal read-ahead, the
- * value here is meaningless, and also it will not be accessed*/
- pgoff_t ria_stoff;
- /* ria_length and ria_pages are the length and pages length in the
- * stride I/O mode. And they will also be used to check whether
- * it is stride I/O read-ahead in the read-ahead pages*/
- unsigned long ria_length;
- unsigned long ria_pages;
-};
-
-/* LL_HIST_MAX=32 causes an overflow */
-#define LL_HIST_MAX 28
-#define LL_HIST_START 12 /* buckets start at 2^12 = 4k */
-#define LL_PROCESS_HIST_MAX 10
-struct per_process_info {
- pid_t pid;
- struct obd_histogram pp_r_hist;
- struct obd_histogram pp_w_hist;
-};
-
-/* pp_extents[LL_PROCESS_HIST_MAX] will hold the combined process info */
-struct ll_rw_extents_info {
- struct per_process_info pp_extents[LL_PROCESS_HIST_MAX + 1];
-};
-
-#define LL_OFFSET_HIST_MAX 100
-struct ll_rw_process_info {
- pid_t rw_pid;
- int rw_op;
- loff_t rw_range_start;
- loff_t rw_range_end;
- loff_t rw_last_file_pos;
- loff_t rw_offset;
- size_t rw_smallest_extent;
- size_t rw_largest_extent;
- struct ll_file_data *rw_last_file;
-};
-
-enum stats_track_type {
- STATS_TRACK_ALL = 0, /* track all processes */
- STATS_TRACK_PID, /* track process with this pid */
- STATS_TRACK_PPID, /* track processes with this ppid */
- STATS_TRACK_GID, /* track processes with this gid */
- STATS_TRACK_LAST,
-};
-
-/* flags for sbi->ll_flags */
-#define LL_SBI_NOLCK 0x01 /* DLM locking disabled (directio-only) */
-#define LL_SBI_CHECKSUM 0x02 /* checksum each page as it's written */
-#define LL_SBI_FLOCK 0x04
-#define LL_SBI_USER_XATTR 0x08 /* support user xattr */
-#define LL_SBI_ACL 0x10 /* support ACL */
-#define LL_SBI_RMT_CLIENT 0x40 /* remote client */
-#define LL_SBI_MDS_CAPA 0x80 /* support mds capa, obsolete */
-#define LL_SBI_OSS_CAPA 0x100 /* support oss capa, obsolete */
-#define LL_SBI_LOCALFLOCK 0x200 /* Local flocks support by kernel */
-#define LL_SBI_LRU_RESIZE 0x400 /* lru resize support */
-#define LL_SBI_LAZYSTATFS 0x800 /* lazystatfs mount option */
-#define LL_SBI_SOM_PREVIEW 0x1000 /* SOM preview mount option */
-#define LL_SBI_32BIT_API 0x2000 /* generate 32 bit inodes. */
-#define LL_SBI_64BIT_HASH 0x4000 /* support 64-bits dir hash/offset */
-#define LL_SBI_AGL_ENABLED 0x8000 /* enable agl */
-#define LL_SBI_VERBOSE 0x10000 /* verbose mount/umount */
-#define LL_SBI_LAYOUT_LOCK 0x20000 /* layout lock support */
-#define LL_SBI_USER_FID2PATH 0x40000 /* allow fid2path by unprivileged users */
-#define LL_SBI_XATTR_CACHE 0x80000 /* support for xattr cache */
-
-#define LL_SBI_FLAGS { \
- "nolck", \
- "checksum", \
- "flock", \
- "xattr", \
- "acl", \
- "???", \
- "rmt_client", \
- "mds_capa", \
- "oss_capa", \
- "flock", \
- "lru_resize", \
- "lazy_statfs", \
- "som", \
- "32bit_api", \
- "64bit_hash", \
- "agl", \
- "verbose", \
- "layout", \
- "user_fid2path",\
- "xattr", \
-}
-
-#define RCE_HASHES 32
-
-struct rmtacl_ctl_entry {
- struct list_head rce_list;
- pid_t rce_key; /* hash key */
- int rce_ops; /* acl operation type */
-};
-
-struct rmtacl_ctl_table {
- spinlock_t rct_lock;
- struct list_head rct_entries[RCE_HASHES];
-};
-
-#define EE_HASHES 32
-
-struct eacl_table {
- spinlock_t et_lock;
- struct list_head et_entries[EE_HASHES];
-};
-
-struct ll_sb_info {
- /* this protects pglist and ra_info. It isn't safe to
- * grab from interrupt contexts */
- spinlock_t ll_lock;
- spinlock_t ll_pp_extent_lock; /* pp_extent entry*/
- spinlock_t ll_process_lock; /* ll_rw_process_info */
- struct obd_uuid ll_sb_uuid;
- struct obd_export *ll_md_exp;
- struct obd_export *ll_dt_exp;
- struct dentry *ll_debugfs_entry;
- struct lu_fid ll_root_fid; /* root object fid */
-
- int ll_flags;
- unsigned int ll_umounting:1,
- ll_xattr_cache_enabled:1;
- struct list_head ll_conn_chain; /* per-conn chain of SBs */
- struct lustre_client_ocd ll_lco;
-
- struct list_head ll_orphan_dentry_list; /*please don't ask -p*/
- struct ll_close_queue *ll_lcq;
-
- struct lprocfs_stats *ll_stats; /* lprocfs stats counter */
-
- struct cl_client_cache ll_cache;
-
- struct lprocfs_stats *ll_ra_stats;
-
- struct ll_ra_info ll_ra_info;
- unsigned int ll_namelen;
- struct file_operations *ll_fop;
-
- unsigned int ll_md_brw_size; /* used by readdir */
-
- struct lu_site *ll_site;
- struct cl_device *ll_cl;
- /* Statistics */
- struct ll_rw_extents_info ll_rw_extents_info;
- int ll_extent_process_count;
- struct ll_rw_process_info ll_rw_process_info[LL_PROCESS_HIST_MAX];
- unsigned int ll_offset_process_count;
- struct ll_rw_process_info ll_rw_offset_info[LL_OFFSET_HIST_MAX];
- unsigned int ll_rw_offset_entry_count;
- int ll_stats_track_id;
- enum stats_track_type ll_stats_track_type;
- int ll_rw_stats_on;
-
- /* metadata stat-ahead */
- unsigned int ll_sa_max; /* max statahead RPCs */
- atomic_t ll_sa_total; /* statahead thread started
- * count */
- atomic_t ll_sa_wrong; /* statahead thread stopped for
- * low hit ratio */
- atomic_t ll_agl_total; /* AGL thread started count */
-
- dev_t ll_sdev_orig; /* save s_dev before assign for
- * clustered nfs */
- struct rmtacl_ctl_table ll_rct;
- struct eacl_table ll_et;
- __kernel_fsid_t ll_fsid;
- struct kobject ll_kobj; /* sysfs object */
- struct super_block *ll_sb; /* struct super_block (for sysfs code)*/
- struct completion ll_kobj_unregister;
-};
-
-struct ll_ra_read {
- pgoff_t lrr_start;
- pgoff_t lrr_count;
- struct task_struct *lrr_reader;
- struct list_head lrr_linkage;
-};
-
-/*
- * per file-descriptor read-ahead data.
- */
-struct ll_readahead_state {
- spinlock_t ras_lock;
- /*
- * index of the last page that read(2) needed and that wasn't in the
- * cache. Used by ras_update() to detect seeks.
- *
- * XXX nikita: if access seeks into cached region, Lustre doesn't see
- * this.
- */
- unsigned long ras_last_readpage;
- /*
- * number of pages read after last read-ahead window reset. As window
- * is reset on each seek, this is effectively a number of consecutive
- * accesses. Maybe ->ras_accessed_in_window is better name.
- *
- * XXX nikita: window is also reset (by ras_update()) when Lustre
- * believes that memory pressure evicts read-ahead pages. In that
- * case, it probably doesn't make sense to expand window to
- * PTLRPC_MAX_BRW_PAGES on the third access.
- */
- unsigned long ras_consecutive_pages;
- /*
- * number of read requests after the last read-ahead window reset
- * As window is reset on each seek, this is effectively the number
- * on consecutive read request and is used to trigger read-ahead.
- */
- unsigned long ras_consecutive_requests;
- /*
- * Parameters of current read-ahead window. Handled by
- * ras_update(). On the initial access to the file or after a seek,
- * window is reset to 0. After 3 consecutive accesses, window is
- * expanded to PTLRPC_MAX_BRW_PAGES. Afterwards, window is enlarged by
- * PTLRPC_MAX_BRW_PAGES chunks up to ->ra_max_pages.
- */
- unsigned long ras_window_start, ras_window_len;
- /*
- * Where next read-ahead should start at. This lies within read-ahead
- * window. Read-ahead window is read in pieces rather than at once
- * because: 1. lustre limits total number of pages under read-ahead by
- * ->ra_max_pages (see ll_ra_count_get()), 2. client cannot read pages
- * not covered by DLM lock.
- */
- unsigned long ras_next_readahead;
- /*
- * Total number of ll_file_read requests issued, reads originating
- * due to mmap are not counted in this total. This value is used to
- * trigger full file read-ahead after multiple reads to a small file.
- */
- unsigned long ras_requests;
- /*
- * Page index with respect to the current request, these value
- * will not be accurate when dealing with reads issued via mmap.
- */
- unsigned long ras_request_index;
- /*
- * list of struct ll_ra_read's one per read(2) call current in
- * progress against this file descriptor. Used by read-ahead code,
- * protected by ->ras_lock.
- */
- struct list_head ras_read_beads;
- /*
- * The following 3 items are used for detecting the stride I/O
- * mode.
- * In stride I/O mode,
- * ...............|-----data-----|****gap*****|--------|******|....
- * offset |-stride_pages-|-stride_gap-|
- * ras_stride_offset = offset;
- * ras_stride_length = stride_pages + stride_gap;
- * ras_stride_pages = stride_pages;
- * Note: all these three items are counted by pages.
- */
- unsigned long ras_stride_length;
- unsigned long ras_stride_pages;
- pgoff_t ras_stride_offset;
- /*
- * number of consecutive stride request count, and it is similar as
- * ras_consecutive_requests, but used for stride I/O mode.
- * Note: only more than 2 consecutive stride request are detected,
- * stride read-ahead will be enable
- */
- unsigned long ras_consecutive_stride_requests;
-};
-
-extern struct kmem_cache *ll_file_data_slab;
-struct lustre_handle;
-struct ll_file_data {
- struct ll_readahead_state fd_ras;
- struct ccc_grouplock fd_grouplock;
- __u64 lfd_pos;
- __u32 fd_flags;
- fmode_t fd_omode;
- /* openhandle if lease exists for this file.
- * Borrow lli->lli_och_mutex to protect assignment */
- struct obd_client_handle *fd_lease_och;
- struct obd_client_handle *fd_och;
- struct file *fd_file;
- /* Indicate whether need to report failure when close.
- * true: failure is known, not report again.
- * false: unknown failure, should report. */
- bool fd_write_failed;
-};
-
-struct lov_stripe_md;
-
-extern spinlock_t inode_lock;
-
-extern struct dentry *llite_root;
-extern struct kset *llite_kset;
-
-static inline struct inode *ll_info2i(struct ll_inode_info *lli)
-{
- return &lli->lli_vfs_inode;
-}
-
-__u32 ll_i2suppgid(struct inode *i);
-void ll_i2gids(__u32 *suppgids, struct inode *i1, struct inode *i2);
-
-static inline int ll_need_32bit_api(struct ll_sb_info *sbi)
-{
-#if BITS_PER_LONG == 32
- return 1;
-#elif defined(CONFIG_COMPAT)
- return unlikely(is_compat_task() || (sbi->ll_flags & LL_SBI_32BIT_API));
-#else
- return unlikely(sbi->ll_flags & LL_SBI_32BIT_API);
-#endif
-}
-
-void ll_ra_read_in(struct file *f, struct ll_ra_read *rar);
-void ll_ra_read_ex(struct file *f, struct ll_ra_read *rar);
-struct ll_ra_read *ll_ra_read_get(struct file *f);
-
-/* llite/lproc_llite.c */
-int ldebugfs_register_mountpoint(struct dentry *parent,
- struct super_block *sb, char *osc, char *mdc);
-void ldebugfs_unregister_mountpoint(struct ll_sb_info *sbi);
-void ll_stats_ops_tally(struct ll_sb_info *sbi, int op, int count);
-void lprocfs_llite_init_vars(struct lprocfs_static_vars *lvars);
-void ll_rw_stats_tally(struct ll_sb_info *sbi, pid_t pid,
- struct ll_file_data *file, loff_t pos,
- size_t count, int rw);
-
-/* llite/dir.c */
-void ll_release_page(struct page *page, int remove);
-extern const struct file_operations ll_dir_operations;
-extern const struct inode_operations ll_dir_inode_operations;
-struct page *ll_get_dir_page(struct inode *dir, __u64 hash,
- struct ll_dir_chain *chain);
-int ll_dir_read(struct inode *inode, struct dir_context *ctx);
-
-int ll_get_mdt_idx(struct inode *inode);
-/* llite/namei.c */
-extern const struct inode_operations ll_special_inode_operations;
-
-int ll_objects_destroy(struct ptlrpc_request *request,
- struct inode *dir);
-struct inode *ll_iget(struct super_block *sb, ino_t hash,
- struct lustre_md *lic);
-int ll_md_blocking_ast(struct ldlm_lock *, struct ldlm_lock_desc *,
- void *data, int flag);
-struct dentry *ll_splice_alias(struct inode *inode, struct dentry *de);
-int ll_rmdir_entry(struct inode *dir, char *name, int namelen);
-
-/* llite/rw.c */
-int ll_prepare_write(struct file *, struct page *, unsigned from, unsigned to);
-int ll_commit_write(struct file *, struct page *, unsigned from, unsigned to);
-int ll_writepage(struct page *page, struct writeback_control *wbc);
-int ll_writepages(struct address_space *, struct writeback_control *wbc);
-int ll_readpage(struct file *file, struct page *page);
-void ll_readahead_init(struct inode *inode, struct ll_readahead_state *ras);
-int ll_readahead(const struct lu_env *env, struct cl_io *io,
- struct ll_readahead_state *ras, struct address_space *mapping,
- struct cl_page_list *queue, int flags);
-
-extern const struct address_space_operations ll_aops;
-
-/* llite/file.c */
-extern struct file_operations ll_file_operations;
-extern struct file_operations ll_file_operations_flock;
-extern struct file_operations ll_file_operations_noflock;
-extern struct inode_operations ll_file_inode_operations;
-int ll_have_md_lock(struct inode *inode, __u64 *bits,
- ldlm_mode_t l_req_mode);
-ldlm_mode_t ll_take_md_lock(struct inode *inode, __u64 bits,
- struct lustre_handle *lockh, __u64 flags,
- ldlm_mode_t mode);
-int ll_file_open(struct inode *inode, struct file *file);
-int ll_file_release(struct inode *inode, struct file *file);
-int ll_glimpse_ioctl(struct ll_sb_info *sbi,
- struct lov_stripe_md *lsm, lstat_t *st);
-void ll_ioepoch_open(struct ll_inode_info *lli, __u64 ioepoch);
-int ll_release_openhandle(struct inode *, struct lookup_intent *);
-int ll_md_real_close(struct inode *inode, fmode_t fmode);
-void ll_ioepoch_close(struct inode *inode, struct md_op_data *op_data,
- struct obd_client_handle **och, unsigned long flags);
-void ll_done_writing_attr(struct inode *inode, struct md_op_data *op_data);
-int ll_som_update(struct inode *inode, struct md_op_data *op_data);
-int ll_inode_getattr(struct inode *inode, struct obdo *obdo,
- __u64 ioepoch, int sync);
-void ll_pack_inode2opdata(struct inode *inode, struct md_op_data *op_data,
- struct lustre_handle *fh);
-int ll_getattr(struct vfsmount *mnt, struct dentry *de, struct kstat *stat);
-struct posix_acl *ll_get_acl(struct inode *inode, int type);
-
-int ll_inode_permission(struct inode *inode, int mask);
-
-int ll_lov_setstripe_ea_info(struct inode *inode, struct dentry *dentry,
- int flags, struct lov_user_md *lum,
- int lum_size);
-int ll_lov_getstripe_ea_info(struct inode *inode, const char *filename,
- struct lov_mds_md **lmm, int *lmm_size,
- struct ptlrpc_request **request);
-int ll_dir_setstripe(struct inode *inode, struct lov_user_md *lump,
- int set_default);
-int ll_dir_getstripe(struct inode *inode, struct lov_mds_md **lmmp,
- int *lmm_size, struct ptlrpc_request **request);
-int ll_fsync(struct file *file, loff_t start, loff_t end, int data);
-int ll_merge_lvb(const struct lu_env *env, struct inode *inode);
-int ll_fid2path(struct inode *inode, void __user *arg);
-int ll_data_version(struct inode *inode, __u64 *data_version, int extent_lock);
-int ll_hsm_release(struct inode *inode);
-
-/* llite/dcache.c */
-
-int ll_d_init(struct dentry *de);
-extern const struct dentry_operations ll_d_ops;
-void ll_intent_drop_lock(struct lookup_intent *);
-void ll_intent_release(struct lookup_intent *);
-void ll_invalidate_aliases(struct inode *);
-void ll_lookup_finish_locks(struct lookup_intent *it, struct inode *inode);
-int ll_revalidate_it_finish(struct ptlrpc_request *request,
- struct lookup_intent *it, struct inode *inode);
-
-/* llite/llite_lib.c */
-extern struct super_operations lustre_super_operations;
-
-void ll_lli_init(struct ll_inode_info *lli);
-int ll_fill_super(struct super_block *sb, struct vfsmount *mnt);
-void ll_put_super(struct super_block *sb);
-void ll_kill_super(struct super_block *sb);
-struct inode *ll_inode_from_resource_lock(struct ldlm_lock *lock);
-void ll_clear_inode(struct inode *inode);
-int ll_setattr_raw(struct dentry *dentry, struct iattr *attr, bool hsm_import);
-int ll_setattr(struct dentry *de, struct iattr *attr);
-int ll_statfs(struct dentry *de, struct kstatfs *sfs);
-int ll_statfs_internal(struct super_block *sb, struct obd_statfs *osfs,
- __u64 max_age, __u32 flags);
-void ll_update_inode(struct inode *inode, struct lustre_md *md);
-void ll_read_inode2(struct inode *inode, void *opaque);
-void ll_delete_inode(struct inode *inode);
-int ll_iocontrol(struct inode *inode, struct file *file,
- unsigned int cmd, unsigned long arg);
-int ll_flush_ctx(struct inode *inode);
-void ll_umount_begin(struct super_block *sb);
-int ll_remount_fs(struct super_block *sb, int *flags, char *data);
-int ll_show_options(struct seq_file *seq, struct dentry *dentry);
-void ll_dirty_page_discard_warn(struct page *page, int ioret);
-int ll_prep_inode(struct inode **inode, struct ptlrpc_request *req,
- struct super_block *, struct lookup_intent *);
-int ll_obd_statfs(struct inode *inode, void *arg);
-int ll_get_max_mdsize(struct ll_sb_info *sbi, int *max_mdsize);
-int ll_get_default_mdsize(struct ll_sb_info *sbi, int *default_mdsize);
-int ll_get_max_cookiesize(struct ll_sb_info *sbi, int *max_cookiesize);
-int ll_get_default_cookiesize(struct ll_sb_info *sbi, int *default_cookiesize);
-int ll_process_config(struct lustre_cfg *lcfg);
-struct md_op_data *ll_prep_md_op_data(struct md_op_data *op_data,
- struct inode *i1, struct inode *i2,
- const char *name, int namelen,
- int mode, __u32 opc, void *data);
-void ll_finish_md_op_data(struct md_op_data *op_data);
-int ll_get_obd_name(struct inode *inode, unsigned int cmd, unsigned long arg);
-char *ll_get_fsname(struct super_block *sb, char *buf, int buflen);
-void ll_open_cleanup(struct super_block *sb, struct ptlrpc_request *open_req);
-
-/* llite/llite_nfs.c */
-extern struct export_operations lustre_export_operations;
-__u32 get_uuid2int(const char *name, int len);
-void get_uuid2fsid(const char *name, int len, __kernel_fsid_t *fsid);
-struct inode *search_inode_for_lustre(struct super_block *sb,
- const struct lu_fid *fid);
-
-/* llite/symlink.c */
-extern struct inode_operations ll_fast_symlink_inode_operations;
-
-/* llite/llite_close.c */
-struct ll_close_queue {
- spinlock_t lcq_lock;
- struct list_head lcq_head;
- wait_queue_head_t lcq_waitq;
- struct completion lcq_comp;
- atomic_t lcq_stop;
-};
-
-struct ccc_object *cl_inode2ccc(struct inode *inode);
-
-
-void vvp_write_pending (struct ccc_object *club, struct ccc_page *page);
-void vvp_write_complete(struct ccc_object *club, struct ccc_page *page);
-
-/* specific architecture can implement only part of this list */
-enum vvp_io_subtype {
- /** normal IO */
- IO_NORMAL,
- /** io started from splice_{read|write} */
- IO_SPLICE
-};
-
-/* IO subtypes */
-struct vvp_io {
- /** io subtype */
- enum vvp_io_subtype cui_io_subtype;
-
- union {
- struct {
- struct pipe_inode_info *cui_pipe;
- unsigned int cui_flags;
- } splice;
- struct vvp_fault_io {
- /**
- * Inode modification time that is checked across DLM
- * lock request.
- */
- time64_t ft_mtime;
- struct vm_area_struct *ft_vma;
- /**
- * locked page returned from vvp_io
- */
- struct page *ft_vmpage;
- struct vm_fault_api {
- /**
- * kernel fault info
- */
- struct vm_fault *ft_vmf;
- /**
- * fault API used bitflags for return code.
- */
- unsigned int ft_flags;
- /**
- * check that flags are from filemap_fault
- */
- bool ft_flags_valid;
- } fault;
- } fault;
- } u;
- /**
- * Read-ahead state used by read and page-fault IO contexts.
- */
- struct ll_ra_read cui_bead;
- /**
- * Set when cui_bead has been initialized.
- */
- int cui_ra_window_set;
-};
-
-/**
- * IO arguments for various VFS I/O interfaces.
- */
-struct vvp_io_args {
- /** normal/splice */
- enum vvp_io_subtype via_io_subtype;
-
- union {
- struct {
- struct kiocb *via_iocb;
- struct iov_iter *via_iter;
- } normal;
- struct {
- struct pipe_inode_info *via_pipe;
- unsigned int via_flags;
- } splice;
- } u;
-};
-
-struct ll_cl_context {
- void *lcc_cookie;
- struct cl_io *lcc_io;
- struct cl_page *lcc_page;
- struct lu_env *lcc_env;
- int lcc_refcheck;
-};
-
-struct vvp_thread_info {
- struct vvp_io_args vti_args;
- struct ra_io_arg vti_ria;
- struct ll_cl_context vti_io_ctx;
-};
-
-static inline struct vvp_thread_info *vvp_env_info(const struct lu_env *env)
-{
- extern struct lu_context_key vvp_key;
- struct vvp_thread_info *info;
-
- info = lu_context_key_get(&env->le_ctx, &vvp_key);
- LASSERT(info != NULL);
- return info;
-}
-
-static inline struct vvp_io_args *vvp_env_args(const struct lu_env *env,
- enum vvp_io_subtype type)
-{
- struct vvp_io_args *ret = &vvp_env_info(env)->vti_args;
-
- ret->via_io_subtype = type;
-
- return ret;
-}
-
-struct vvp_session {
- struct vvp_io vs_ios;
-};
-
-static inline struct vvp_session *vvp_env_session(const struct lu_env *env)
-{
- extern struct lu_context_key vvp_session_key;
- struct vvp_session *ses;
-
- ses = lu_context_key_get(env->le_ses, &vvp_session_key);
- LASSERT(ses != NULL);
- return ses;
-}
-
-static inline struct vvp_io *vvp_env_io(const struct lu_env *env)
-{
- return &vvp_env_session(env)->vs_ios;
-}
-
-int vvp_global_init(void);
-void vvp_global_fini(void);
-
-void ll_queue_done_writing(struct inode *inode, unsigned long flags);
-void ll_close_thread_shutdown(struct ll_close_queue *lcq);
-int ll_close_thread_start(struct ll_close_queue **lcq_ret);
-
-/* llite/llite_mmap.c */
-
-int ll_teardown_mmaps(struct address_space *mapping, __u64 first, __u64 last);
-int ll_file_mmap(struct file *file, struct vm_area_struct *vma);
-void policy_from_vma(ldlm_policy_data_t *policy,
- struct vm_area_struct *vma, unsigned long addr, size_t count);
-struct vm_area_struct *our_vma(struct mm_struct *mm, unsigned long addr,
- size_t count);
-
-static inline void ll_invalidate_page(struct page *vmpage)
-{
- struct address_space *mapping = vmpage->mapping;
- loff_t offset = vmpage->index << PAGE_CACHE_SHIFT;
-
- LASSERT(PageLocked(vmpage));
- if (mapping == NULL)
- return;
-
- ll_teardown_mmaps(mapping, offset, offset + PAGE_CACHE_SIZE);
- truncate_complete_page(mapping, vmpage);
-}
-
-#define ll_s2sbi(sb) (s2lsi(sb)->lsi_llsbi)
-
-/* don't need an addref as the sb_info should be holding one */
-static inline struct obd_export *ll_s2dtexp(struct super_block *sb)
-{
- return ll_s2sbi(sb)->ll_dt_exp;
-}
-
-/* don't need an addref as the sb_info should be holding one */
-static inline struct obd_export *ll_s2mdexp(struct super_block *sb)
-{
- return ll_s2sbi(sb)->ll_md_exp;
-}
-
-static inline struct client_obd *sbi2mdc(struct ll_sb_info *sbi)
-{
- struct obd_device *obd = sbi->ll_md_exp->exp_obd;
- if (obd == NULL)
- LBUG();
- return &obd->u.cli;
-}
-
-/* FIXME: replace the name of this with LL_SB to conform to kernel stuff */
-static inline struct ll_sb_info *ll_i2sbi(struct inode *inode)
-{
- return ll_s2sbi(inode->i_sb);
-}
-
-static inline struct obd_export *ll_i2dtexp(struct inode *inode)
-{
- return ll_s2dtexp(inode->i_sb);
-}
-
-static inline struct obd_export *ll_i2mdexp(struct inode *inode)
-{
- return ll_s2mdexp(inode->i_sb);
-}
-
-static inline struct lu_fid *ll_inode2fid(struct inode *inode)
-{
- struct lu_fid *fid;
-
- LASSERT(inode != NULL);
- fid = &ll_i2info(inode)->lli_fid;
-
- return fid;
-}
-
-static inline __u64 ll_file_maxbytes(struct inode *inode)
-{
- return ll_i2info(inode)->lli_maxbytes;
-}
-
-/* llite/xattr.c */
-int ll_setxattr(struct dentry *dentry, const char *name,
- const void *value, size_t size, int flags);
-ssize_t ll_getxattr(struct dentry *dentry, const char *name,
- void *buffer, size_t size);
-ssize_t ll_listxattr(struct dentry *dentry, char *buffer, size_t size);
-int ll_removexattr(struct dentry *dentry, const char *name);
-
-/* llite/remote_perm.c */
-extern struct kmem_cache *ll_remote_perm_cachep;
-extern struct kmem_cache *ll_rmtperm_hash_cachep;
-
-void free_rmtperm_hash(struct hlist_head *hash);
-int ll_update_remote_perm(struct inode *inode, struct mdt_remote_perm *perm);
-int lustre_check_remote_perm(struct inode *inode, int mask);
-
-/* llite/llite_cl.c */
-extern struct lu_device_type vvp_device_type;
-
-/**
- * Common IO arguments for various VFS I/O interfaces.
- */
-int cl_sb_init(struct super_block *sb);
-int cl_sb_fini(struct super_block *sb);
-void ll_io_init(struct cl_io *io, const struct file *file, int write);
-
-void ras_update(struct ll_sb_info *sbi, struct inode *inode,
- struct ll_readahead_state *ras, unsigned long index,
- unsigned hit);
-void ll_ra_count_put(struct ll_sb_info *sbi, unsigned long len);
-void ll_ra_stats_inc(struct address_space *mapping, enum ra_stat which);
-
-/* llite/llite_rmtacl.c */
-#ifdef CONFIG_FS_POSIX_ACL
-struct eacl_entry {
- struct list_head ee_list;
- pid_t ee_key; /* hash key */
- struct lu_fid ee_fid;
- int ee_type; /* ACL type for ACCESS or DEFAULT */
- ext_acl_xattr_header *ee_acl;
-};
-
-u64 rce_ops2valid(int ops);
-struct rmtacl_ctl_entry *rct_search(struct rmtacl_ctl_table *rct, pid_t key);
-int rct_add(struct rmtacl_ctl_table *rct, pid_t key, int ops);
-int rct_del(struct rmtacl_ctl_table *rct, pid_t key);
-void rct_init(struct rmtacl_ctl_table *rct);
-void rct_fini(struct rmtacl_ctl_table *rct);
-
-void ee_free(struct eacl_entry *ee);
-int ee_add(struct eacl_table *et, pid_t key, struct lu_fid *fid, int type,
- ext_acl_xattr_header *header);
-struct eacl_entry *et_search_del(struct eacl_table *et, pid_t key,
- struct lu_fid *fid, int type);
-void et_search_free(struct eacl_table *et, pid_t key);
-void et_init(struct eacl_table *et);
-void et_fini(struct eacl_table *et);
-#else
-static inline u64 rce_ops2valid(int ops)
-{
- return 0;
-}
-#endif
-
-/* statahead.c */
-
-#define LL_SA_RPC_MIN 2
-#define LL_SA_RPC_DEF 32
-#define LL_SA_RPC_MAX 8192
-
-#define LL_SA_CACHE_BIT 5
-#define LL_SA_CACHE_SIZE (1 << LL_SA_CACHE_BIT)
-#define LL_SA_CACHE_MASK (LL_SA_CACHE_SIZE - 1)
-
-/* per inode struct, for dir only */
-struct ll_statahead_info {
- struct inode *sai_inode;
- atomic_t sai_refcount; /* when access this struct, hold
- * refcount */
- unsigned int sai_generation; /* generation for statahead */
- unsigned int sai_max; /* max ahead of lookup */
- __u64 sai_sent; /* stat requests sent count */
- __u64 sai_replied; /* stat requests which received
- * reply */
- __u64 sai_index; /* index of statahead entry */
- __u64 sai_index_wait; /* index of entry which is the
- * caller is waiting for */
- __u64 sai_hit; /* hit count */
- __u64 sai_miss; /* miss count:
- * for "ls -al" case, it includes
- * hidden dentry miss;
- * for "ls -l" case, it does not
- * include hidden dentry miss.
- * "sai_miss_hidden" is used for
- * the later case.
- */
- unsigned int sai_consecutive_miss; /* consecutive miss */
- unsigned int sai_miss_hidden;/* "ls -al", but first dentry
- * is not a hidden one */
- unsigned int sai_skip_hidden;/* skipped hidden dentry count */
- unsigned int sai_ls_all:1, /* "ls -al", do stat-ahead for
- * hidden entries */
- sai_agl_valid:1;/* AGL is valid for the dir */
- wait_queue_head_t sai_waitq; /* stat-ahead wait queue */
- struct ptlrpc_thread sai_thread; /* stat-ahead thread */
- struct ptlrpc_thread sai_agl_thread; /* AGL thread */
- struct list_head sai_entries; /* entry list */
- struct list_head sai_entries_received; /* entries returned */
- struct list_head sai_entries_stated; /* entries stated */
- struct list_head sai_entries_agl; /* AGL entries to be sent */
- struct list_head sai_cache[LL_SA_CACHE_SIZE];
- spinlock_t sai_cache_lock[LL_SA_CACHE_SIZE];
- atomic_t sai_cache_count; /* entry count in cache */
-};
-
-int do_statahead_enter(struct inode *dir, struct dentry **dentry,
- int only_unplug);
-void ll_stop_statahead(struct inode *dir, void *key);
-
-static inline int ll_glimpse_size(struct inode *inode)
-{
- struct ll_inode_info *lli = ll_i2info(inode);
- int rc;
-
- down_read(&lli->lli_glimpse_sem);
- rc = cl_glimpse_size(inode);
- lli->lli_glimpse_time = cfs_time_current();
- up_read(&lli->lli_glimpse_sem);
- return rc;
-}
-
-static inline void
-ll_statahead_mark(struct inode *dir, struct dentry *dentry)
-{
- struct ll_inode_info *lli = ll_i2info(dir);
- struct ll_statahead_info *sai = lli->lli_sai;
- struct ll_dentry_data *ldd = ll_d2d(dentry);
-
- /* not the same process, don't mark */
- if (lli->lli_opendir_pid != current_pid())
- return;
-
- LASSERT(ldd != NULL);
- if (sai != NULL)
- ldd->lld_sa_generation = sai->sai_generation;
-}
-
-static inline int
-d_need_statahead(struct inode *dir, struct dentry *dentryp)
-{
- struct ll_inode_info *lli;
- struct ll_dentry_data *ldd;
-
- if (ll_i2sbi(dir)->ll_sa_max == 0)
- return -EAGAIN;
-
- lli = ll_i2info(dir);
- /* not the same process, don't statahead */
- if (lli->lli_opendir_pid != current_pid())
- return -EAGAIN;
-
- /* statahead has been stopped */
- if (lli->lli_opendir_key == NULL)
- return -EAGAIN;
-
- ldd = ll_d2d(dentryp);
- /*
- * When stats a dentry, the system trigger more than once "revalidate"
- * or "lookup", for "getattr", for "getxattr", and maybe for others.
- * Under patchless client mode, the operation intent is not accurate,
- * which maybe misguide the statahead thread. For example:
- * The "revalidate" call for "getattr" and "getxattr" of a dentry maybe
- * have the same operation intent -- "IT_GETATTR".
- * In fact, one dentry should has only one chance to interact with the
- * statahead thread, otherwise the statahead windows will be confused.
- * The solution is as following:
- * Assign "lld_sa_generation" with "sai_generation" when a dentry
- * "IT_GETATTR" for the first time, and the subsequent "IT_GETATTR"
- * will bypass interacting with statahead thread for checking:
- * "lld_sa_generation == lli_sai->sai_generation"
- */
- if (ldd && lli->lli_sai &&
- ldd->lld_sa_generation == lli->lli_sai->sai_generation)
- return -EAGAIN;
-
- return 1;
-}
-
-static inline int
-ll_statahead_enter(struct inode *dir, struct dentry **dentryp, int only_unplug)
-{
- int ret;
-
- ret = d_need_statahead(dir, *dentryp);
- if (ret <= 0)
- return ret;
-
- return do_statahead_enter(dir, dentryp, only_unplug);
-}
-
-/* llite ioctl register support routine */
-enum llioc_iter {
- LLIOC_CONT = 0,
- LLIOC_STOP
-};
-
-#define LLIOC_MAX_CMD 256
-
-/*
- * Rules to write a callback function:
- *
- * Parameters:
- * @magic: Dynamic ioctl call routine will feed this value with the pointer
- * returned to ll_iocontrol_register. Callback functions should use this
- * data to check the potential collasion of ioctl cmd. If collasion is
- * found, callback function should return LLIOC_CONT.
- * @rcp: The result of ioctl command.
- *
- * Return values:
- * If @magic matches the pointer returned by ll_iocontrol_data, the
- * callback should return LLIOC_STOP; return LLIOC_STOP otherwise.
- */
-typedef enum llioc_iter (*llioc_callback_t)(struct inode *inode,
- struct file *file, unsigned int cmd, unsigned long arg,
- void *magic, int *rcp);
-
-/* export functions */
-/* Register ioctl block dynamatically for a regular file.
- *
- * @cmd: the array of ioctl command set
- * @count: number of commands in the @cmd
- * @cb: callback function, it will be called if an ioctl command is found to
- * belong to the command list @cmd.
- *
- * Return value:
- * A magic pointer will be returned if success;
- * otherwise, NULL will be returned.
- * */
-void *ll_iocontrol_register(llioc_callback_t cb, int count, unsigned int *cmd);
-void ll_iocontrol_unregister(void *magic);
-
-
-/* lclient compat stuff */
-#define cl_inode_info ll_inode_info
-#define cl_i2info(info) ll_i2info(info)
-#define cl_inode_mode(inode) ((inode)->i_mode)
-#define cl_i2sbi ll_i2sbi
-
-static inline struct ll_file_data *cl_iattr2fd(struct inode *inode,
- const struct iattr *attr)
-{
- LASSERT(attr->ia_valid & ATTR_FILE);
- return LUSTRE_FPRIVATE(attr->ia_file);
-}
-
-static inline void cl_isize_lock(struct inode *inode)
-{
- ll_inode_size_lock(inode);
-}
-
-static inline void cl_isize_unlock(struct inode *inode)
-{
- ll_inode_size_unlock(inode);
-}
-
-static inline void cl_isize_write_nolock(struct inode *inode, loff_t kms)
-{
- LASSERT(mutex_is_locked(&ll_i2info(inode)->lli_size_mutex));
- i_size_write(inode, kms);
-}
-
-static inline void cl_isize_write(struct inode *inode, loff_t kms)
-{
- ll_inode_size_lock(inode);
- i_size_write(inode, kms);
- ll_inode_size_unlock(inode);
-}
-
-#define cl_isize_read(inode) i_size_read(inode)
-
-static inline int cl_merge_lvb(const struct lu_env *env, struct inode *inode)
-{
- return ll_merge_lvb(env, inode);
-}
-
-#define cl_inode_atime(inode) LTIME_S((inode)->i_atime)
-#define cl_inode_ctime(inode) LTIME_S((inode)->i_ctime)
-#define cl_inode_mtime(inode) LTIME_S((inode)->i_mtime)
-
-int cl_sync_file_range(struct inode *inode, loff_t start, loff_t end,
- enum cl_fsync_mode mode, int ignore_layout);
-
-/** direct write pages */
-struct ll_dio_pages {
- /** page array to be written. we don't support
- * partial pages except the last one. */
- struct page **ldp_pages;
- /* offset of each page */
- loff_t *ldp_offsets;
- /** if ldp_offsets is NULL, it means a sequential
- * pages to be written, then this is the file offset
- * of the * first page. */
- loff_t ldp_start_offset;
- /** how many bytes are to be written. */
- size_t ldp_size;
- /** # of pages in the array. */
- int ldp_nr;
-};
-
-static inline void cl_stats_tally(struct cl_device *dev, enum cl_req_type crt,
- int rc)
-{
- int opc = (crt == CRT_READ) ? LPROC_LL_OSC_READ :
- LPROC_LL_OSC_WRITE;
-
- ll_stats_ops_tally(ll_s2sbi(cl2ccc_dev(dev)->cdv_sb), opc, rc);
-}
-
-ssize_t ll_direct_rw_pages(const struct lu_env *env, struct cl_io *io,
- int rw, struct inode *inode,
- struct ll_dio_pages *pv);
-
-static inline int ll_file_nolock(const struct file *file)
-{
- struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
- struct inode *inode = file_inode(file);
-
- LASSERT(fd != NULL);
- return ((fd->fd_flags & LL_FILE_IGNORE_LOCK) ||
- (ll_i2sbi(inode)->ll_flags & LL_SBI_NOLCK));
-}
-
-static inline void ll_set_lock_data(struct obd_export *exp, struct inode *inode,
- struct lookup_intent *it, __u64 *bits)
-{
- if (!it->d.lustre.it_lock_set) {
- struct lustre_handle handle;
-
- /* If this inode is a remote object, it will get two
- * separate locks in different namespaces, Master MDT,
- * where the name entry is, will grant LOOKUP lock,
- * remote MDT, where the object is, will grant
- * UPDATE|PERM lock. The inode will be attached to both
- * LOOKUP and PERM locks, so revoking either locks will
- * case the dcache being cleared */
- if (it->d.lustre.it_remote_lock_mode) {
- handle.cookie = it->d.lustre.it_remote_lock_handle;
- CDEBUG(D_DLMTRACE, "setting l_data to inode %p(%lu/%u) for remote lock %#llx\n",
- inode,
- inode->i_ino, inode->i_generation,
- handle.cookie);
- md_set_lock_data(exp, &handle.cookie, inode, NULL);
- }
-
- handle.cookie = it->d.lustre.it_lock_handle;
-
- CDEBUG(D_DLMTRACE, "setting l_data to inode %p (%lu/%u) for lock %#llx\n",
- inode, inode->i_ino,
- inode->i_generation, handle.cookie);
-
- md_set_lock_data(exp, &handle.cookie, inode,
- &it->d.lustre.it_lock_bits);
- it->d.lustre.it_lock_set = 1;
- }
-
- if (bits != NULL)
- *bits = it->d.lustre.it_lock_bits;
-}
-
-static inline void ll_lock_dcache(struct inode *inode)
-{
- spin_lock(&inode->i_lock);
-}
-
-static inline void ll_unlock_dcache(struct inode *inode)
-{
- spin_unlock(&inode->i_lock);
-}
-
-static inline int d_lustre_invalid(const struct dentry *dentry)
-{
- struct ll_dentry_data *lld = ll_d2d(dentry);
-
- return (lld == NULL) || lld->lld_invalid;
-}
-
-static inline void __d_lustre_invalidate(struct dentry *dentry)
-{
- struct ll_dentry_data *lld = ll_d2d(dentry);
-
- if (lld != NULL)
- lld->lld_invalid = 1;
-}
-
-/*
- * Mark dentry INVALID, if dentry refcount is zero (this is normally case for
- * ll_md_blocking_ast), unhash this dentry, and let dcache to reclaim it later;
- * else dput() of the last refcount will unhash this dentry and kill it.
- */
-static inline void d_lustre_invalidate(struct dentry *dentry, int nested)
-{
- CDEBUG(D_DENTRY, "invalidate dentry %pd (%p) parent %p inode %p refc %d\n",
- dentry, dentry,
- dentry->d_parent, d_inode(dentry), d_count(dentry));
-
- spin_lock_nested(&dentry->d_lock,
- nested ? DENTRY_D_LOCK_NESTED : DENTRY_D_LOCK_NORMAL);
- __d_lustre_invalidate(dentry);
- /*
- * We should be careful about dentries created by d_obtain_alias().
- * These dentries are not put in the dentry tree, instead they are
- * linked to sb->s_anon through dentry->d_hash.
- * shrink_dcache_for_umount() shrinks the tree and sb->s_anon list.
- * If we unhashed such a dentry, unmount would not be able to find
- * it and busy inodes would be reported.
- */
- if (d_count(dentry) == 0 && !(dentry->d_flags & DCACHE_DISCONNECTED))
- __d_drop(dentry);
- spin_unlock(&dentry->d_lock);
-}
-
-static inline void d_lustre_revalidate(struct dentry *dentry)
-{
- spin_lock(&dentry->d_lock);
- LASSERT(ll_d2d(dentry) != NULL);
- ll_d2d(dentry)->lld_invalid = 0;
- spin_unlock(&dentry->d_lock);
-}
-
-enum {
- LL_LAYOUT_GEN_NONE = ((__u32)-2), /* layout lock was cancelled */
- LL_LAYOUT_GEN_EMPTY = ((__u32)-1) /* for empty layout */
-};
-
-int ll_layout_conf(struct inode *inode, const struct cl_object_conf *conf);
-int ll_layout_refresh(struct inode *inode, __u32 *gen);
-int ll_layout_restore(struct inode *inode);
-
-int ll_xattr_init(void);
-void ll_xattr_fini(void);
-
-#endif /* LLITE_INTERNAL_H */
diff --git a/drivers/staging/lustre/lustre/llite/llite_lib.c b/drivers/staging/lustre/lustre/llite/llite_lib.c
deleted file mode 100644
index b6234b294d0d..000000000000
--- a/drivers/staging/lustre/lustre/llite/llite_lib.c
+++ /dev/null
@@ -1,2315 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * lustre/llite/llite_lib.c
- *
- * Lustre Light Super operations
- */
-
-#define DEBUG_SUBSYSTEM S_LLITE
-
-#include <linux/module.h>
-#include <linux/statfs.h>
-#include <linux/types.h>
-#include <linux/mm.h>
-
-#include "../include/lustre_lite.h"
-#include "../include/lustre_ha.h"
-#include "../include/lustre_dlm.h"
-#include "../include/lprocfs_status.h"
-#include "../include/lustre_disk.h"
-#include "../include/lustre_param.h"
-#include "../include/lustre_log.h"
-#include "../include/cl_object.h"
-#include "../include/obd_cksum.h"
-#include "llite_internal.h"
-
-struct kmem_cache *ll_file_data_slab;
-struct dentry *llite_root;
-struct kset *llite_kset;
-
-#ifndef log2
-#define log2(n) ffz(~(n))
-#endif
-
-static struct ll_sb_info *ll_init_sbi(struct super_block *sb)
-{
- struct ll_sb_info *sbi = NULL;
- unsigned long pages;
- unsigned long lru_page_max;
- struct sysinfo si;
- class_uuid_t uuid;
- int i;
-
- sbi = kzalloc(sizeof(*sbi), GFP_NOFS);
- if (!sbi)
- return NULL;
-
- spin_lock_init(&sbi->ll_lock);
- mutex_init(&sbi->ll_lco.lco_lock);
- spin_lock_init(&sbi->ll_pp_extent_lock);
- spin_lock_init(&sbi->ll_process_lock);
- sbi->ll_rw_stats_on = 0;
-
- si_meminfo(&si);
- pages = si.totalram - si.totalhigh;
- if (pages >> (20 - PAGE_CACHE_SHIFT) < 512)
- lru_page_max = pages / 2;
- else
- lru_page_max = (pages / 4) * 3;
-
- /* initialize lru data */
- atomic_set(&sbi->ll_cache.ccc_users, 0);
- sbi->ll_cache.ccc_lru_max = lru_page_max;
- atomic_set(&sbi->ll_cache.ccc_lru_left, lru_page_max);
- spin_lock_init(&sbi->ll_cache.ccc_lru_lock);
- INIT_LIST_HEAD(&sbi->ll_cache.ccc_lru);
-
- sbi->ll_ra_info.ra_max_pages_per_file = min(pages / 32,
- SBI_DEFAULT_READAHEAD_MAX);
- sbi->ll_ra_info.ra_max_pages = sbi->ll_ra_info.ra_max_pages_per_file;
- sbi->ll_ra_info.ra_max_read_ahead_whole_pages =
- SBI_DEFAULT_READAHEAD_WHOLE_MAX;
- INIT_LIST_HEAD(&sbi->ll_conn_chain);
- INIT_LIST_HEAD(&sbi->ll_orphan_dentry_list);
-
- ll_generate_random_uuid(uuid);
- class_uuid_unparse(uuid, &sbi->ll_sb_uuid);
- CDEBUG(D_CONFIG, "generated uuid: %s\n", sbi->ll_sb_uuid.uuid);
-
- sbi->ll_flags |= LL_SBI_VERBOSE;
- sbi->ll_flags |= LL_SBI_CHECKSUM;
-
- sbi->ll_flags |= LL_SBI_LRU_RESIZE;
-
- for (i = 0; i <= LL_PROCESS_HIST_MAX; i++) {
- spin_lock_init(&sbi->ll_rw_extents_info.pp_extents[i].
- pp_r_hist.oh_lock);
- spin_lock_init(&sbi->ll_rw_extents_info.pp_extents[i].
- pp_w_hist.oh_lock);
- }
-
- /* metadata statahead is enabled by default */
- sbi->ll_sa_max = LL_SA_RPC_DEF;
- atomic_set(&sbi->ll_sa_total, 0);
- atomic_set(&sbi->ll_sa_wrong, 0);
- atomic_set(&sbi->ll_agl_total, 0);
- sbi->ll_flags |= LL_SBI_AGL_ENABLED;
-
- sbi->ll_sb = sb;
-
- return sbi;
-}
-
-static void ll_free_sbi(struct super_block *sb)
-{
- struct ll_sb_info *sbi = ll_s2sbi(sb);
-
- kfree(sbi);
-}
-
-static int client_common_fill_super(struct super_block *sb, char *md, char *dt,
- struct vfsmount *mnt)
-{
- struct inode *root = NULL;
- struct ll_sb_info *sbi = ll_s2sbi(sb);
- struct obd_device *obd;
- struct obd_statfs *osfs = NULL;
- struct ptlrpc_request *request = NULL;
- struct obd_connect_data *data = NULL;
- struct obd_uuid *uuid;
- struct md_op_data *op_data;
- struct lustre_md lmd;
- u64 valid;
- int size, err, checksum;
-
- obd = class_name2obd(md);
- if (!obd) {
- CERROR("MD %s: not setup or attached\n", md);
- return -EINVAL;
- }
-
- data = kzalloc(sizeof(*data), GFP_NOFS);
- if (!data)
- return -ENOMEM;
-
- osfs = kzalloc(sizeof(*osfs), GFP_NOFS);
- if (!osfs) {
- kfree(data);
- return -ENOMEM;
- }
-
- if (llite_root != NULL) {
- err = ldebugfs_register_mountpoint(llite_root, sb, dt, md);
- if (err < 0)
- CERROR("could not register mount in <debugfs>/lustre/llite\n");
- }
-
- /* indicate the features supported by this client */
- data->ocd_connect_flags = OBD_CONNECT_IBITS | OBD_CONNECT_NODEVOH |
- OBD_CONNECT_ATTRFID |
- OBD_CONNECT_VERSION | OBD_CONNECT_BRW_SIZE |
- OBD_CONNECT_CANCELSET | OBD_CONNECT_FID |
- OBD_CONNECT_AT | OBD_CONNECT_LOV_V3 |
- OBD_CONNECT_RMT_CLIENT | OBD_CONNECT_VBR |
- OBD_CONNECT_FULL20 | OBD_CONNECT_64BITHASH|
- OBD_CONNECT_EINPROGRESS |
- OBD_CONNECT_JOBSTATS | OBD_CONNECT_LVB_TYPE |
- OBD_CONNECT_LAYOUTLOCK |
- OBD_CONNECT_PINGLESS |
- OBD_CONNECT_MAX_EASIZE |
- OBD_CONNECT_FLOCK_DEAD |
- OBD_CONNECT_DISP_STRIPE;
-
- if (sbi->ll_flags & LL_SBI_SOM_PREVIEW)
- data->ocd_connect_flags |= OBD_CONNECT_SOM;
-
- if (sbi->ll_flags & LL_SBI_LRU_RESIZE)
- data->ocd_connect_flags |= OBD_CONNECT_LRU_RESIZE;
-#ifdef CONFIG_FS_POSIX_ACL
- data->ocd_connect_flags |= OBD_CONNECT_ACL | OBD_CONNECT_UMASK;
-#endif
-
- if (OBD_FAIL_CHECK(OBD_FAIL_MDC_LIGHTWEIGHT))
- /* flag mdc connection as lightweight, only used for test
- * purpose, use with care */
- data->ocd_connect_flags |= OBD_CONNECT_LIGHTWEIGHT;
-
- data->ocd_ibits_known = MDS_INODELOCK_FULL;
- data->ocd_version = LUSTRE_VERSION_CODE;
-
- if (sb->s_flags & MS_RDONLY)
- data->ocd_connect_flags |= OBD_CONNECT_RDONLY;
- if (sbi->ll_flags & LL_SBI_USER_XATTR)
- data->ocd_connect_flags |= OBD_CONNECT_XATTR;
-
- if (sbi->ll_flags & LL_SBI_FLOCK)
- sbi->ll_fop = &ll_file_operations_flock;
- else if (sbi->ll_flags & LL_SBI_LOCALFLOCK)
- sbi->ll_fop = &ll_file_operations;
- else
- sbi->ll_fop = &ll_file_operations_noflock;
-
- /* real client */
- data->ocd_connect_flags |= OBD_CONNECT_REAL;
- if (sbi->ll_flags & LL_SBI_RMT_CLIENT)
- data->ocd_connect_flags |= OBD_CONNECT_RMT_CLIENT_FORCE;
-
- data->ocd_brw_size = MD_MAX_BRW_SIZE;
-
- err = obd_connect(NULL, &sbi->ll_md_exp, obd, &sbi->ll_sb_uuid,
- data, NULL);
- if (err == -EBUSY) {
- LCONSOLE_ERROR_MSG(0x14f, "An MDT (md %s) is performing recovery, of which this client is not a part. Please wait for recovery to complete, abort, or time out.\n",
- md);
- goto out;
- } else if (err) {
- CERROR("cannot connect to %s: rc = %d\n", md, err);
- goto out;
- }
-
- sbi->ll_md_exp->exp_connect_data = *data;
-
- err = obd_fid_init(sbi->ll_md_exp->exp_obd, sbi->ll_md_exp,
- LUSTRE_SEQ_METADATA);
- if (err) {
- CERROR("%s: Can't init metadata layer FID infrastructure, rc = %d\n",
- sbi->ll_md_exp->exp_obd->obd_name, err);
- goto out_md;
- }
-
- /* For mount, we only need fs info from MDT0, and also in DNE, it
- * can make sure the client can be mounted as long as MDT0 is
- * available */
- err = obd_statfs(NULL, sbi->ll_md_exp, osfs,
- cfs_time_shift_64(-OBD_STATFS_CACHE_SECONDS),
- OBD_STATFS_FOR_MDT0);
- if (err)
- goto out_md_fid;
-
- /* This needs to be after statfs to ensure connect has finished.
- * Note that "data" does NOT contain the valid connect reply.
- * If connecting to a 1.8 server there will be no LMV device, so
- * we can access the MDC export directly and exp_connect_flags will
- * be non-zero, but if accessing an upgraded 2.1 server it will
- * have the correct flags filled in.
- * XXX: fill in the LMV exp_connect_flags from MDC(s). */
- valid = exp_connect_flags(sbi->ll_md_exp) & CLIENT_CONNECT_MDT_REQD;
- if (exp_connect_flags(sbi->ll_md_exp) != 0 &&
- valid != CLIENT_CONNECT_MDT_REQD) {
- char *buf;
-
- buf = kzalloc(PAGE_CACHE_SIZE, GFP_KERNEL);
- if (!buf) {
- err = -ENOMEM;
- goto out_md_fid;
- }
- obd_connect_flags2str(buf, PAGE_CACHE_SIZE,
- valid ^ CLIENT_CONNECT_MDT_REQD, ",");
- LCONSOLE_ERROR_MSG(0x170, "Server %s does not support feature(s) needed for correct operation of this client (%s). Please upgrade server or downgrade client.\n",
- sbi->ll_md_exp->exp_obd->obd_name, buf);
- kfree(buf);
- err = -EPROTO;
- goto out_md_fid;
- }
-
- size = sizeof(*data);
- err = obd_get_info(NULL, sbi->ll_md_exp, sizeof(KEY_CONN_DATA),
- KEY_CONN_DATA, &size, data, NULL);
- if (err) {
- CERROR("%s: Get connect data failed: rc = %d\n",
- sbi->ll_md_exp->exp_obd->obd_name, err);
- goto out_md_fid;
- }
-
- LASSERT(osfs->os_bsize);
- sb->s_blocksize = osfs->os_bsize;
- sb->s_blocksize_bits = log2(osfs->os_bsize);
- sb->s_magic = LL_SUPER_MAGIC;
- sb->s_maxbytes = MAX_LFS_FILESIZE;
- sbi->ll_namelen = osfs->os_namelen;
-
- if ((sbi->ll_flags & LL_SBI_USER_XATTR) &&
- !(data->ocd_connect_flags & OBD_CONNECT_XATTR)) {
- LCONSOLE_INFO("Disabling user_xattr feature because it is not supported on the server\n");
- sbi->ll_flags &= ~LL_SBI_USER_XATTR;
- }
-
- if (data->ocd_connect_flags & OBD_CONNECT_ACL) {
-#ifdef MS_POSIXACL
- sb->s_flags |= MS_POSIXACL;
-#endif
- sbi->ll_flags |= LL_SBI_ACL;
- } else {
- LCONSOLE_INFO("client wants to enable acl, but mdt not!\n");
-#ifdef MS_POSIXACL
- sb->s_flags &= ~MS_POSIXACL;
-#endif
- sbi->ll_flags &= ~LL_SBI_ACL;
- }
-
- if (data->ocd_connect_flags & OBD_CONNECT_RMT_CLIENT) {
- if (!(sbi->ll_flags & LL_SBI_RMT_CLIENT)) {
- sbi->ll_flags |= LL_SBI_RMT_CLIENT;
- LCONSOLE_INFO("client is set as remote by default.\n");
- }
- } else {
- if (sbi->ll_flags & LL_SBI_RMT_CLIENT) {
- sbi->ll_flags &= ~LL_SBI_RMT_CLIENT;
- LCONSOLE_INFO("client claims to be remote, but server rejected, forced to be local.\n");
- }
- }
-
- if (data->ocd_connect_flags & OBD_CONNECT_64BITHASH)
- sbi->ll_flags |= LL_SBI_64BIT_HASH;
-
- if (data->ocd_connect_flags & OBD_CONNECT_BRW_SIZE)
- sbi->ll_md_brw_size = data->ocd_brw_size;
- else
- sbi->ll_md_brw_size = PAGE_CACHE_SIZE;
-
- if (data->ocd_connect_flags & OBD_CONNECT_LAYOUTLOCK) {
- LCONSOLE_INFO("Layout lock feature supported.\n");
- sbi->ll_flags |= LL_SBI_LAYOUT_LOCK;
- }
-
- if (data->ocd_ibits_known & MDS_INODELOCK_XATTR) {
- if (!(data->ocd_connect_flags & OBD_CONNECT_MAX_EASIZE)) {
- LCONSOLE_INFO(
- "%s: disabling xattr cache due to unknown maximum xattr size.\n",
- dt);
- } else {
- sbi->ll_flags |= LL_SBI_XATTR_CACHE;
- sbi->ll_xattr_cache_enabled = 1;
- }
- }
-
- obd = class_name2obd(dt);
- if (!obd) {
- CERROR("DT %s: not setup or attached\n", dt);
- err = -ENODEV;
- goto out_md_fid;
- }
-
- data->ocd_connect_flags = OBD_CONNECT_GRANT | OBD_CONNECT_VERSION |
- OBD_CONNECT_REQPORTAL | OBD_CONNECT_BRW_SIZE |
- OBD_CONNECT_CANCELSET | OBD_CONNECT_FID |
- OBD_CONNECT_SRVLOCK | OBD_CONNECT_TRUNCLOCK|
- OBD_CONNECT_AT | OBD_CONNECT_RMT_CLIENT |
- OBD_CONNECT_OSS_CAPA | OBD_CONNECT_VBR|
- OBD_CONNECT_FULL20 | OBD_CONNECT_64BITHASH |
- OBD_CONNECT_MAXBYTES |
- OBD_CONNECT_EINPROGRESS |
- OBD_CONNECT_JOBSTATS | OBD_CONNECT_LVB_TYPE |
- OBD_CONNECT_LAYOUTLOCK | OBD_CONNECT_PINGLESS;
-
- if (sbi->ll_flags & LL_SBI_SOM_PREVIEW)
- data->ocd_connect_flags |= OBD_CONNECT_SOM;
-
- if (!OBD_FAIL_CHECK(OBD_FAIL_OSC_CONNECT_CKSUM)) {
- /* OBD_CONNECT_CKSUM should always be set, even if checksums are
- * disabled by default, because it can still be enabled on the
- * fly via /sys. As a consequence, we still need to come to an
- * agreement on the supported algorithms at connect time */
- data->ocd_connect_flags |= OBD_CONNECT_CKSUM;
-
- if (OBD_FAIL_CHECK(OBD_FAIL_OSC_CKSUM_ADLER_ONLY))
- data->ocd_cksum_types = OBD_CKSUM_ADLER;
- else
- data->ocd_cksum_types = cksum_types_supported_client();
- }
-
- data->ocd_connect_flags |= OBD_CONNECT_LRU_RESIZE;
- if (sbi->ll_flags & LL_SBI_RMT_CLIENT)
- data->ocd_connect_flags |= OBD_CONNECT_RMT_CLIENT_FORCE;
-
- CDEBUG(D_RPCTRACE, "ocd_connect_flags: %#llx ocd_version: %d ocd_grant: %d\n",
- data->ocd_connect_flags,
- data->ocd_version, data->ocd_grant);
-
- obd->obd_upcall.onu_owner = &sbi->ll_lco;
- obd->obd_upcall.onu_upcall = cl_ocd_update;
-
- data->ocd_brw_size = DT_MAX_BRW_SIZE;
-
- err = obd_connect(NULL, &sbi->ll_dt_exp, obd, &sbi->ll_sb_uuid, data,
- NULL);
- if (err == -EBUSY) {
- LCONSOLE_ERROR_MSG(0x150, "An OST (dt %s) is performing recovery, of which this client is not a part. Please wait for recovery to complete, abort, or time out.\n",
- dt);
- goto out_md;
- } else if (err) {
- CERROR("%s: Cannot connect to %s: rc = %d\n",
- sbi->ll_dt_exp->exp_obd->obd_name, dt, err);
- goto out_md;
- }
-
- sbi->ll_dt_exp->exp_connect_data = *data;
-
- err = obd_fid_init(sbi->ll_dt_exp->exp_obd, sbi->ll_dt_exp,
- LUSTRE_SEQ_METADATA);
- if (err) {
- CERROR("%s: Can't init data layer FID infrastructure, rc = %d\n",
- sbi->ll_dt_exp->exp_obd->obd_name, err);
- goto out_dt;
- }
-
- mutex_lock(&sbi->ll_lco.lco_lock);
- sbi->ll_lco.lco_flags = data->ocd_connect_flags;
- sbi->ll_lco.lco_md_exp = sbi->ll_md_exp;
- sbi->ll_lco.lco_dt_exp = sbi->ll_dt_exp;
- mutex_unlock(&sbi->ll_lco.lco_lock);
-
- fid_zero(&sbi->ll_root_fid);
- err = md_getstatus(sbi->ll_md_exp, &sbi->ll_root_fid);
- if (err) {
- CERROR("cannot mds_connect: rc = %d\n", err);
- goto out_lock_cn_cb;
- }
- if (!fid_is_sane(&sbi->ll_root_fid)) {
- CERROR("%s: Invalid root fid "DFID" during mount\n",
- sbi->ll_md_exp->exp_obd->obd_name,
- PFID(&sbi->ll_root_fid));
- err = -EINVAL;
- goto out_lock_cn_cb;
- }
- CDEBUG(D_SUPER, "rootfid "DFID"\n", PFID(&sbi->ll_root_fid));
-
- sb->s_op = &lustre_super_operations;
-#if THREAD_SIZE >= 8192 /*b=17630*/
- sb->s_export_op = &lustre_export_operations;
-#endif
-
- /* make root inode
- * XXX: move this to after cbd setup? */
- valid = OBD_MD_FLGETATTR | OBD_MD_FLBLOCKS;
- if (sbi->ll_flags & LL_SBI_RMT_CLIENT)
- valid |= OBD_MD_FLRMTPERM;
- else if (sbi->ll_flags & LL_SBI_ACL)
- valid |= OBD_MD_FLACL;
-
- op_data = kzalloc(sizeof(*op_data), GFP_NOFS);
- if (!op_data) {
- err = -ENOMEM;
- goto out_lock_cn_cb;
- }
-
- op_data->op_fid1 = sbi->ll_root_fid;
- op_data->op_mode = 0;
- op_data->op_valid = valid;
-
- err = md_getattr(sbi->ll_md_exp, op_data, &request);
- kfree(op_data);
- if (err) {
- CERROR("%s: md_getattr failed for root: rc = %d\n",
- sbi->ll_md_exp->exp_obd->obd_name, err);
- goto out_lock_cn_cb;
- }
-
- err = md_get_lustre_md(sbi->ll_md_exp, request, sbi->ll_dt_exp,
- sbi->ll_md_exp, &lmd);
- if (err) {
- CERROR("failed to understand root inode md: rc = %d\n", err);
- ptlrpc_req_finished(request);
- goto out_lock_cn_cb;
- }
-
- LASSERT(fid_is_sane(&sbi->ll_root_fid));
- root = ll_iget(sb, cl_fid_build_ino(&sbi->ll_root_fid,
- sbi->ll_flags & LL_SBI_32BIT_API),
- &lmd);
- md_free_lustre_md(sbi->ll_md_exp, &lmd);
- ptlrpc_req_finished(request);
-
- if (root == NULL || IS_ERR(root)) {
- if (lmd.lsm)
- obd_free_memmd(sbi->ll_dt_exp, &lmd.lsm);
-#ifdef CONFIG_FS_POSIX_ACL
- if (lmd.posix_acl) {
- posix_acl_release(lmd.posix_acl);
- lmd.posix_acl = NULL;
- }
-#endif
- err = IS_ERR(root) ? PTR_ERR(root) : -EBADF;
- root = NULL;
- CERROR("lustre_lite: bad iget4 for root\n");
- goto out_root;
- }
-
- err = ll_close_thread_start(&sbi->ll_lcq);
- if (err) {
- CERROR("cannot start close thread: rc %d\n", err);
- goto out_root;
- }
-
-#ifdef CONFIG_FS_POSIX_ACL
- if (sbi->ll_flags & LL_SBI_RMT_CLIENT) {
- rct_init(&sbi->ll_rct);
- et_init(&sbi->ll_et);
- }
-#endif
-
- checksum = sbi->ll_flags & LL_SBI_CHECKSUM;
- err = obd_set_info_async(NULL, sbi->ll_dt_exp, sizeof(KEY_CHECKSUM),
- KEY_CHECKSUM, sizeof(checksum), &checksum,
- NULL);
- cl_sb_init(sb);
-
- err = obd_set_info_async(NULL, sbi->ll_dt_exp, sizeof(KEY_CACHE_SET),
- KEY_CACHE_SET, sizeof(sbi->ll_cache),
- &sbi->ll_cache, NULL);
-
- sb->s_root = d_make_root(root);
- if (sb->s_root == NULL) {
- CERROR("%s: can't make root dentry\n",
- ll_get_fsname(sb, NULL, 0));
- err = -ENOMEM;
- goto out_lock_cn_cb;
- }
-
- sbi->ll_sdev_orig = sb->s_dev;
-
- /* We set sb->s_dev equal on all lustre clients in order to support
- * NFS export clustering. NFSD requires that the FSID be the same
- * on all clients. */
- /* s_dev is also used in lt_compare() to compare two fs, but that is
- * only a node-local comparison. */
- uuid = obd_get_uuid(sbi->ll_md_exp);
- if (uuid != NULL) {
- sb->s_dev = get_uuid2int(uuid->uuid, strlen(uuid->uuid));
- get_uuid2fsid(uuid->uuid, strlen(uuid->uuid), &sbi->ll_fsid);
- }
-
- kfree(data);
- kfree(osfs);
-
- return err;
-out_root:
- iput(root);
-out_lock_cn_cb:
- obd_fid_fini(sbi->ll_dt_exp->exp_obd);
-out_dt:
- obd_disconnect(sbi->ll_dt_exp);
- sbi->ll_dt_exp = NULL;
- /* Make sure all OScs are gone, since cl_cache is accessing sbi. */
- obd_zombie_barrier();
-out_md_fid:
- obd_fid_fini(sbi->ll_md_exp->exp_obd);
-out_md:
- obd_disconnect(sbi->ll_md_exp);
- sbi->ll_md_exp = NULL;
-out:
- kfree(data);
- kfree(osfs);
- ldebugfs_unregister_mountpoint(sbi);
- return err;
-}
-
-int ll_get_max_mdsize(struct ll_sb_info *sbi, int *lmmsize)
-{
- int size, rc;
-
- *lmmsize = obd_size_diskmd(sbi->ll_dt_exp, NULL);
- size = sizeof(int);
- rc = obd_get_info(NULL, sbi->ll_md_exp, sizeof(KEY_MAX_EASIZE),
- KEY_MAX_EASIZE, &size, lmmsize, NULL);
- if (rc)
- CERROR("Get max mdsize error rc %d\n", rc);
-
- return rc;
-}
-
-int ll_get_default_mdsize(struct ll_sb_info *sbi, int *lmmsize)
-{
- int size, rc;
-
- size = sizeof(int);
- rc = obd_get_info(NULL, sbi->ll_md_exp, sizeof(KEY_DEFAULT_EASIZE),
- KEY_DEFAULT_EASIZE, &size, lmmsize, NULL);
- if (rc)
- CERROR("Get default mdsize error rc %d\n", rc);
-
- return rc;
-}
-
-int ll_get_max_cookiesize(struct ll_sb_info *sbi, int *lmmsize)
-{
- int size, rc;
-
- size = sizeof(int);
- rc = obd_get_info(NULL, sbi->ll_md_exp, sizeof(KEY_MAX_COOKIESIZE),
- KEY_MAX_COOKIESIZE, &size, lmmsize, NULL);
- if (rc)
- CERROR("Get max cookiesize error rc %d\n", rc);
-
- return rc;
-}
-
-int ll_get_default_cookiesize(struct ll_sb_info *sbi, int *lmmsize)
-{
- int size, rc;
-
- size = sizeof(int);
- rc = obd_get_info(NULL, sbi->ll_md_exp, sizeof(KEY_DEFAULT_COOKIESIZE),
- KEY_DEFAULT_COOKIESIZE, &size, lmmsize, NULL);
- if (rc)
- CERROR("Get default cookiesize error rc %d\n", rc);
-
- return rc;
-}
-
-static void client_common_put_super(struct super_block *sb)
-{
- struct ll_sb_info *sbi = ll_s2sbi(sb);
-
-#ifdef CONFIG_FS_POSIX_ACL
- if (sbi->ll_flags & LL_SBI_RMT_CLIENT) {
- et_fini(&sbi->ll_et);
- rct_fini(&sbi->ll_rct);
- }
-#endif
-
- ll_close_thread_shutdown(sbi->ll_lcq);
-
- cl_sb_fini(sb);
-
- list_del(&sbi->ll_conn_chain);
-
- obd_fid_fini(sbi->ll_dt_exp->exp_obd);
- obd_disconnect(sbi->ll_dt_exp);
- sbi->ll_dt_exp = NULL;
- /* wait till all OSCs are gone, since cl_cache is accessing sbi.
- * see LU-2543. */
- obd_zombie_barrier();
-
- ldebugfs_unregister_mountpoint(sbi);
-
- obd_fid_fini(sbi->ll_md_exp->exp_obd);
- obd_disconnect(sbi->ll_md_exp);
- sbi->ll_md_exp = NULL;
-}
-
-void ll_kill_super(struct super_block *sb)
-{
- struct ll_sb_info *sbi;
-
- /* not init sb ?*/
- if (!(sb->s_flags & MS_ACTIVE))
- return;
-
- sbi = ll_s2sbi(sb);
- /* we need to restore s_dev from changed for clustered NFS before
- * put_super because new kernels have cached s_dev and change sb->s_dev
- * in put_super not affected real removing devices */
- if (sbi) {
- sb->s_dev = sbi->ll_sdev_orig;
- sbi->ll_umounting = 1;
- }
-}
-
-static inline int ll_set_opt(const char *opt, char *data, int fl)
-{
- if (strncmp(opt, data, strlen(opt)) != 0)
- return 0;
- else
- return fl;
-}
-
-/* non-client-specific mount options are parsed in lmd_parse */
-static int ll_options(char *options, int *flags)
-{
- int tmp;
- char *s1 = options, *s2;
-
- if (!options)
- return 0;
-
- CDEBUG(D_CONFIG, "Parsing opts %s\n", options);
-
- while (*s1) {
- CDEBUG(D_SUPER, "next opt=%s\n", s1);
- tmp = ll_set_opt("nolock", s1, LL_SBI_NOLCK);
- if (tmp) {
- *flags |= tmp;
- goto next;
- }
- tmp = ll_set_opt("flock", s1, LL_SBI_FLOCK);
- if (tmp) {
- *flags |= tmp;
- goto next;
- }
- tmp = ll_set_opt("localflock", s1, LL_SBI_LOCALFLOCK);
- if (tmp) {
- *flags |= tmp;
- goto next;
- }
- tmp = ll_set_opt("noflock", s1, LL_SBI_FLOCK|LL_SBI_LOCALFLOCK);
- if (tmp) {
- *flags &= ~tmp;
- goto next;
- }
- tmp = ll_set_opt("user_xattr", s1, LL_SBI_USER_XATTR);
- if (tmp) {
- *flags |= tmp;
- goto next;
- }
- tmp = ll_set_opt("nouser_xattr", s1, LL_SBI_USER_XATTR);
- if (tmp) {
- *flags &= ~tmp;
- goto next;
- }
- tmp = ll_set_opt("remote_client", s1, LL_SBI_RMT_CLIENT);
- if (tmp) {
- *flags |= tmp;
- goto next;
- }
- tmp = ll_set_opt("user_fid2path", s1, LL_SBI_USER_FID2PATH);
- if (tmp) {
- *flags |= tmp;
- goto next;
- }
- tmp = ll_set_opt("nouser_fid2path", s1, LL_SBI_USER_FID2PATH);
- if (tmp) {
- *flags &= ~tmp;
- goto next;
- }
-
- tmp = ll_set_opt("checksum", s1, LL_SBI_CHECKSUM);
- if (tmp) {
- *flags |= tmp;
- goto next;
- }
- tmp = ll_set_opt("nochecksum", s1, LL_SBI_CHECKSUM);
- if (tmp) {
- *flags &= ~tmp;
- goto next;
- }
- tmp = ll_set_opt("lruresize", s1, LL_SBI_LRU_RESIZE);
- if (tmp) {
- *flags |= tmp;
- goto next;
- }
- tmp = ll_set_opt("nolruresize", s1, LL_SBI_LRU_RESIZE);
- if (tmp) {
- *flags &= ~tmp;
- goto next;
- }
- tmp = ll_set_opt("lazystatfs", s1, LL_SBI_LAZYSTATFS);
- if (tmp) {
- *flags |= tmp;
- goto next;
- }
- tmp = ll_set_opt("nolazystatfs", s1, LL_SBI_LAZYSTATFS);
- if (tmp) {
- *flags &= ~tmp;
- goto next;
- }
- tmp = ll_set_opt("som_preview", s1, LL_SBI_SOM_PREVIEW);
- if (tmp) {
- *flags |= tmp;
- goto next;
- }
- tmp = ll_set_opt("32bitapi", s1, LL_SBI_32BIT_API);
- if (tmp) {
- *flags |= tmp;
- goto next;
- }
- tmp = ll_set_opt("verbose", s1, LL_SBI_VERBOSE);
- if (tmp) {
- *flags |= tmp;
- goto next;
- }
- tmp = ll_set_opt("noverbose", s1, LL_SBI_VERBOSE);
- if (tmp) {
- *flags &= ~tmp;
- goto next;
- }
- LCONSOLE_ERROR_MSG(0x152, "Unknown option '%s', won't mount.\n",
- s1);
- return -EINVAL;
-
-next:
- /* Find next opt */
- s2 = strchr(s1, ',');
- if (s2 == NULL)
- break;
- s1 = s2 + 1;
- }
- return 0;
-}
-
-void ll_lli_init(struct ll_inode_info *lli)
-{
- lli->lli_inode_magic = LLI_INODE_MAGIC;
- lli->lli_flags = 0;
- lli->lli_ioepoch = 0;
- lli->lli_maxbytes = MAX_LFS_FILESIZE;
- spin_lock_init(&lli->lli_lock);
- lli->lli_posix_acl = NULL;
- lli->lli_remote_perms = NULL;
- mutex_init(&lli->lli_rmtperm_mutex);
- /* Do not set lli_fid, it has been initialized already. */
- fid_zero(&lli->lli_pfid);
- INIT_LIST_HEAD(&lli->lli_close_list);
- atomic_set(&lli->lli_open_count, 0);
- lli->lli_rmtperm_time = 0;
- lli->lli_pending_och = NULL;
- lli->lli_mds_read_och = NULL;
- lli->lli_mds_write_och = NULL;
- lli->lli_mds_exec_och = NULL;
- lli->lli_open_fd_read_count = 0;
- lli->lli_open_fd_write_count = 0;
- lli->lli_open_fd_exec_count = 0;
- mutex_init(&lli->lli_och_mutex);
- spin_lock_init(&lli->lli_agl_lock);
- lli->lli_has_smd = false;
- spin_lock_init(&lli->lli_layout_lock);
- ll_layout_version_set(lli, LL_LAYOUT_GEN_NONE);
- lli->lli_clob = NULL;
-
- init_rwsem(&lli->lli_xattrs_list_rwsem);
- mutex_init(&lli->lli_xattrs_enq_lock);
-
- LASSERT(lli->lli_vfs_inode.i_mode != 0);
- if (S_ISDIR(lli->lli_vfs_inode.i_mode)) {
- mutex_init(&lli->lli_readdir_mutex);
- lli->lli_opendir_key = NULL;
- lli->lli_sai = NULL;
- spin_lock_init(&lli->lli_sa_lock);
- lli->lli_opendir_pid = 0;
- } else {
- mutex_init(&lli->lli_size_mutex);
- lli->lli_symlink_name = NULL;
- init_rwsem(&lli->lli_trunc_sem);
- mutex_init(&lli->lli_write_mutex);
- init_rwsem(&lli->lli_glimpse_sem);
- lli->lli_glimpse_time = 0;
- INIT_LIST_HEAD(&lli->lli_agl_list);
- lli->lli_agl_index = 0;
- lli->lli_async_rc = 0;
- }
- mutex_init(&lli->lli_layout_mutex);
-}
-
-static inline int ll_bdi_register(struct backing_dev_info *bdi)
-{
- static atomic_t ll_bdi_num = ATOMIC_INIT(0);
-
- bdi->name = "lustre";
- return bdi_register(bdi, NULL, "lustre-%d",
- atomic_inc_return(&ll_bdi_num));
-}
-
-int ll_fill_super(struct super_block *sb, struct vfsmount *mnt)
-{
- struct lustre_profile *lprof = NULL;
- struct lustre_sb_info *lsi = s2lsi(sb);
- struct ll_sb_info *sbi;
- char *dt = NULL, *md = NULL;
- char *profilenm = get_profile_name(sb);
- struct config_llog_instance *cfg;
- int err;
-
- CDEBUG(D_VFSTRACE, "VFS Op: sb %p\n", sb);
-
- cfg = kzalloc(sizeof(*cfg), GFP_NOFS);
- if (!cfg)
- return -ENOMEM;
-
- try_module_get(THIS_MODULE);
-
- /* client additional sb info */
- lsi->lsi_llsbi = sbi = ll_init_sbi(sb);
- if (!sbi) {
- module_put(THIS_MODULE);
- kfree(cfg);
- return -ENOMEM;
- }
-
- err = ll_options(lsi->lsi_lmd->lmd_opts, &sbi->ll_flags);
- if (err)
- goto out_free;
-
- err = bdi_init(&lsi->lsi_bdi);
- if (err)
- goto out_free;
- lsi->lsi_flags |= LSI_BDI_INITIALIZED;
- lsi->lsi_bdi.capabilities = 0;
- err = ll_bdi_register(&lsi->lsi_bdi);
- if (err)
- goto out_free;
-
- sb->s_bdi = &lsi->lsi_bdi;
- /* kernel >= 2.6.38 store dentry operations in sb->s_d_op. */
- sb->s_d_op = &ll_d_ops;
-
- /* Generate a string unique to this super, in case some joker tries
- to mount the same fs at two mount points.
- Use the address of the super itself.*/
- cfg->cfg_instance = sb;
- cfg->cfg_uuid = lsi->lsi_llsbi->ll_sb_uuid;
- cfg->cfg_callback = class_config_llog_handler;
- /* set up client obds */
- err = lustre_process_log(sb, profilenm, cfg);
- if (err < 0) {
- CERROR("Unable to process log: %d\n", err);
- goto out_free;
- }
-
- /* Profile set with LCFG_MOUNTOPT so we can find our mdc and osc obds */
- lprof = class_get_profile(profilenm);
- if (lprof == NULL) {
- LCONSOLE_ERROR_MSG(0x156, "The client profile '%s' could not be read from the MGS. Does that filesystem exist?\n",
- profilenm);
- err = -EINVAL;
- goto out_free;
- }
- CDEBUG(D_CONFIG, "Found profile %s: mdc=%s osc=%s\n", profilenm,
- lprof->lp_md, lprof->lp_dt);
-
- dt = kasprintf(GFP_NOFS, "%s-%p", lprof->lp_dt, cfg->cfg_instance);
- if (!dt) {
- err = -ENOMEM;
- goto out_free;
- }
-
- md = kasprintf(GFP_NOFS, "%s-%p", lprof->lp_md, cfg->cfg_instance);
- if (!md) {
- err = -ENOMEM;
- goto out_free;
- }
-
- /* connections, registrations, sb setup */
- err = client_common_fill_super(sb, md, dt, mnt);
-
-out_free:
- kfree(md);
- kfree(dt);
- if (err)
- ll_put_super(sb);
- else if (sbi->ll_flags & LL_SBI_VERBOSE)
- LCONSOLE_WARN("Mounted %s\n", profilenm);
-
- kfree(cfg);
- return err;
-} /* ll_fill_super */
-
-void ll_put_super(struct super_block *sb)
-{
- struct config_llog_instance cfg, params_cfg;
- struct obd_device *obd;
- struct lustre_sb_info *lsi = s2lsi(sb);
- struct ll_sb_info *sbi = ll_s2sbi(sb);
- char *profilenm = get_profile_name(sb);
- int next, force = 1;
-
- CDEBUG(D_VFSTRACE, "VFS Op: sb %p - %s\n", sb, profilenm);
-
- cfg.cfg_instance = sb;
- lustre_end_log(sb, profilenm, &cfg);
-
- params_cfg.cfg_instance = sb;
- lustre_end_log(sb, PARAMS_FILENAME, &params_cfg);
-
- if (sbi->ll_md_exp) {
- obd = class_exp2obd(sbi->ll_md_exp);
- if (obd)
- force = obd->obd_force;
- }
-
- /* We need to set force before the lov_disconnect in
- lustre_common_put_super, since l_d cleans up osc's as well. */
- if (force) {
- next = 0;
- while ((obd = class_devices_in_group(&sbi->ll_sb_uuid,
- &next)) != NULL) {
- obd->obd_force = force;
- }
- }
-
- if (sbi->ll_lcq) {
- /* Only if client_common_fill_super succeeded */
- client_common_put_super(sb);
- }
-
- next = 0;
- while ((obd = class_devices_in_group(&sbi->ll_sb_uuid, &next)))
- class_manual_cleanup(obd);
-
- if (sbi->ll_flags & LL_SBI_VERBOSE)
- LCONSOLE_WARN("Unmounted %s\n", profilenm ? profilenm : "");
-
- if (profilenm)
- class_del_profile(profilenm);
-
- if (lsi->lsi_flags & LSI_BDI_INITIALIZED) {
- bdi_destroy(&lsi->lsi_bdi);
- lsi->lsi_flags &= ~LSI_BDI_INITIALIZED;
- }
-
- ll_free_sbi(sb);
- lsi->lsi_llsbi = NULL;
-
- lustre_common_put_super(sb);
-
- module_put(THIS_MODULE);
-} /* client_put_super */
-
-struct inode *ll_inode_from_resource_lock(struct ldlm_lock *lock)
-{
- struct inode *inode = NULL;
-
- /* NOTE: we depend on atomic igrab() -bzzz */
- lock_res_and_lock(lock);
- if (lock->l_resource->lr_lvb_inode) {
- struct ll_inode_info *lli;
-
- lli = ll_i2info(lock->l_resource->lr_lvb_inode);
- if (lli->lli_inode_magic == LLI_INODE_MAGIC) {
- inode = igrab(lock->l_resource->lr_lvb_inode);
- } else {
- inode = lock->l_resource->lr_lvb_inode;
- LDLM_DEBUG_LIMIT(inode->i_state & I_FREEING ? D_INFO :
- D_WARNING, lock, "lr_lvb_inode %p is bogus: magic %08x",
- lock->l_resource->lr_lvb_inode,
- lli->lli_inode_magic);
- inode = NULL;
- }
- }
- unlock_res_and_lock(lock);
- return inode;
-}
-
-void ll_clear_inode(struct inode *inode)
-{
- struct ll_inode_info *lli = ll_i2info(inode);
- struct ll_sb_info *sbi = ll_i2sbi(inode);
-
- CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p)\n", inode->i_ino,
- inode->i_generation, inode);
-
- if (S_ISDIR(inode->i_mode)) {
- /* these should have been cleared in ll_file_release */
- LASSERT(lli->lli_opendir_key == NULL);
- LASSERT(lli->lli_sai == NULL);
- LASSERT(lli->lli_opendir_pid == 0);
- }
-
- spin_lock(&lli->lli_lock);
- ll_i2info(inode)->lli_flags &= ~LLIF_MDS_SIZE_LOCK;
- spin_unlock(&lli->lli_lock);
- md_null_inode(sbi->ll_md_exp, ll_inode2fid(inode));
-
- LASSERT(!lli->lli_open_fd_write_count);
- LASSERT(!lli->lli_open_fd_read_count);
- LASSERT(!lli->lli_open_fd_exec_count);
-
- if (lli->lli_mds_write_och)
- ll_md_real_close(inode, FMODE_WRITE);
- if (lli->lli_mds_exec_och)
- ll_md_real_close(inode, FMODE_EXEC);
- if (lli->lli_mds_read_och)
- ll_md_real_close(inode, FMODE_READ);
-
- if (S_ISLNK(inode->i_mode)) {
- kfree(lli->lli_symlink_name);
- lli->lli_symlink_name = NULL;
- }
-
- ll_xattr_cache_destroy(inode);
-
- if (sbi->ll_flags & LL_SBI_RMT_CLIENT) {
- LASSERT(lli->lli_posix_acl == NULL);
- if (lli->lli_remote_perms) {
- free_rmtperm_hash(lli->lli_remote_perms);
- lli->lli_remote_perms = NULL;
- }
- }
-#ifdef CONFIG_FS_POSIX_ACL
- else if (lli->lli_posix_acl) {
- LASSERT(atomic_read(&lli->lli_posix_acl->a_refcount) == 1);
- LASSERT(lli->lli_remote_perms == NULL);
- posix_acl_release(lli->lli_posix_acl);
- lli->lli_posix_acl = NULL;
- }
-#endif
- lli->lli_inode_magic = LLI_INODE_DEAD;
-
- if (!S_ISDIR(inode->i_mode))
- LASSERT(list_empty(&lli->lli_agl_list));
-
- /*
- * XXX This has to be done before lsm is freed below, because
- * cl_object still uses inode lsm.
- */
- cl_inode_fini(inode);
- lli->lli_has_smd = false;
-}
-
-#define TIMES_SET_FLAGS (ATTR_MTIME_SET | ATTR_ATIME_SET | ATTR_TIMES_SET)
-
-static int ll_md_setattr(struct dentry *dentry, struct md_op_data *op_data,
- struct md_open_data **mod)
-{
- struct lustre_md md;
- struct inode *inode = d_inode(dentry);
- struct ll_sb_info *sbi = ll_i2sbi(inode);
- struct ptlrpc_request *request = NULL;
- int rc, ia_valid;
-
- op_data = ll_prep_md_op_data(op_data, inode, NULL, NULL, 0, 0,
- LUSTRE_OPC_ANY, NULL);
- if (IS_ERR(op_data))
- return PTR_ERR(op_data);
-
- rc = md_setattr(sbi->ll_md_exp, op_data, NULL, 0, NULL, 0,
- &request, mod);
- if (rc) {
- ptlrpc_req_finished(request);
- if (rc == -ENOENT) {
- clear_nlink(inode);
- /* Unlinked special device node? Or just a race?
- * Pretend we done everything. */
- if (!S_ISREG(inode->i_mode) &&
- !S_ISDIR(inode->i_mode)) {
- ia_valid = op_data->op_attr.ia_valid;
- op_data->op_attr.ia_valid &= ~TIMES_SET_FLAGS;
- rc = simple_setattr(dentry, &op_data->op_attr);
- op_data->op_attr.ia_valid = ia_valid;
- }
- } else if (rc != -EPERM && rc != -EACCES && rc != -ETXTBSY) {
- CERROR("md_setattr fails: rc = %d\n", rc);
- }
- return rc;
- }
-
- rc = md_get_lustre_md(sbi->ll_md_exp, request, sbi->ll_dt_exp,
- sbi->ll_md_exp, &md);
- if (rc) {
- ptlrpc_req_finished(request);
- return rc;
- }
-
- ia_valid = op_data->op_attr.ia_valid;
- /* inode size will be in cl_setattr_ost, can't do it now since dirty
- * cache is not cleared yet. */
- op_data->op_attr.ia_valid &= ~(TIMES_SET_FLAGS | ATTR_SIZE);
- rc = simple_setattr(dentry, &op_data->op_attr);
- op_data->op_attr.ia_valid = ia_valid;
-
- /* Extract epoch data if obtained. */
- op_data->op_handle = md.body->handle;
- op_data->op_ioepoch = md.body->ioepoch;
-
- ll_update_inode(inode, &md);
- ptlrpc_req_finished(request);
-
- return rc;
-}
-
-/* Close IO epoch and send Size-on-MDS attribute update. */
-static int ll_setattr_done_writing(struct inode *inode,
- struct md_op_data *op_data,
- struct md_open_data *mod)
-{
- struct ll_inode_info *lli = ll_i2info(inode);
- int rc = 0;
-
- LASSERT(op_data != NULL);
- if (!S_ISREG(inode->i_mode))
- return 0;
-
- CDEBUG(D_INODE, "Epoch %llu closed on "DFID" for truncate\n",
- op_data->op_ioepoch, PFID(&lli->lli_fid));
-
- op_data->op_flags = MF_EPOCH_CLOSE;
- ll_done_writing_attr(inode, op_data);
- ll_pack_inode2opdata(inode, op_data, NULL);
-
- rc = md_done_writing(ll_i2sbi(inode)->ll_md_exp, op_data, mod);
- if (rc == -EAGAIN)
- /* MDS has instructed us to obtain Size-on-MDS attribute
- * from OSTs and send setattr to back to MDS. */
- rc = ll_som_update(inode, op_data);
- else if (rc)
- CERROR("inode %lu mdc truncate failed: rc = %d\n",
- inode->i_ino, rc);
- return rc;
-}
-
-/* If this inode has objects allocated to it (lsm != NULL), then the OST
- * object(s) determine the file size and mtime. Otherwise, the MDS will
- * keep these values until such a time that objects are allocated for it.
- * We do the MDS operations first, as it is checking permissions for us.
- * We don't to the MDS RPC if there is nothing that we want to store there,
- * otherwise there is no harm in updating mtime/atime on the MDS if we are
- * going to do an RPC anyways.
- *
- * If we are doing a truncate, we will send the mtime and ctime updates
- * to the OST with the punch RPC, otherwise we do an explicit setattr RPC.
- * I don't believe it is possible to get e.g. ATTR_MTIME_SET and ATTR_SIZE
- * at the same time.
- *
- * In case of HSMimport, we only set attr on MDS.
- */
-int ll_setattr_raw(struct dentry *dentry, struct iattr *attr, bool hsm_import)
-{
- struct inode *inode = d_inode(dentry);
- struct ll_inode_info *lli = ll_i2info(inode);
- struct md_op_data *op_data = NULL;
- struct md_open_data *mod = NULL;
- bool file_is_released = false;
- int rc = 0, rc1 = 0;
-
- CDEBUG(D_VFSTRACE,
- "%s: setattr inode %p/fid:"DFID
- " from %llu to %llu, valid %x, hsm_import %d\n",
- ll_get_fsname(inode->i_sb, NULL, 0), inode,
- PFID(&lli->lli_fid), i_size_read(inode), attr->ia_size,
- attr->ia_valid, hsm_import);
-
- if (attr->ia_valid & ATTR_SIZE) {
- /* Check new size against VFS/VM file size limit and rlimit */
- rc = inode_newsize_ok(inode, attr->ia_size);
- if (rc)
- return rc;
-
- /* The maximum Lustre file size is variable, based on the
- * OST maximum object size and number of stripes. This
- * needs another check in addition to the VFS check above. */
- if (attr->ia_size > ll_file_maxbytes(inode)) {
- CDEBUG(D_INODE, "file "DFID" too large %llu > %llu\n",
- PFID(&lli->lli_fid), attr->ia_size,
- ll_file_maxbytes(inode));
- return -EFBIG;
- }
-
- attr->ia_valid |= ATTR_MTIME | ATTR_CTIME;
- }
-
- /* POSIX: check before ATTR_*TIME_SET set (from inode_change_ok) */
- if (attr->ia_valid & TIMES_SET_FLAGS) {
- if ((!uid_eq(current_fsuid(), inode->i_uid)) &&
- !capable(CFS_CAP_FOWNER))
- return -EPERM;
- }
-
- /* We mark all of the fields "set" so MDS/OST does not re-set them */
- if (attr->ia_valid & ATTR_CTIME) {
- attr->ia_ctime = CURRENT_TIME;
- attr->ia_valid |= ATTR_CTIME_SET;
- }
- if (!(attr->ia_valid & ATTR_ATIME_SET) &&
- (attr->ia_valid & ATTR_ATIME)) {
- attr->ia_atime = CURRENT_TIME;
- attr->ia_valid |= ATTR_ATIME_SET;
- }
- if (!(attr->ia_valid & ATTR_MTIME_SET) &&
- (attr->ia_valid & ATTR_MTIME)) {
- attr->ia_mtime = CURRENT_TIME;
- attr->ia_valid |= ATTR_MTIME_SET;
- }
-
- if (attr->ia_valid & (ATTR_MTIME | ATTR_CTIME))
- CDEBUG(D_INODE, "setting mtime %lu, ctime %lu, now = %llu\n",
- LTIME_S(attr->ia_mtime), LTIME_S(attr->ia_ctime),
- (s64)ktime_get_real_seconds());
-
- /* If we are changing file size, file content is modified, flag it. */
- if (attr->ia_valid & ATTR_SIZE) {
- attr->ia_valid |= MDS_OPEN_OWNEROVERRIDE;
- spin_lock(&lli->lli_lock);
- lli->lli_flags |= LLIF_DATA_MODIFIED;
- spin_unlock(&lli->lli_lock);
- }
-
- /* We always do an MDS RPC, even if we're only changing the size;
- * only the MDS knows whether truncate() should fail with -ETXTBUSY */
-
- op_data = kzalloc(sizeof(*op_data), GFP_NOFS);
- if (!op_data)
- return -ENOMEM;
-
- if (!S_ISDIR(inode->i_mode))
- mutex_unlock(&inode->i_mutex);
-
- memcpy(&op_data->op_attr, attr, sizeof(*attr));
-
- /* Open epoch for truncate. */
- if (exp_connect_som(ll_i2mdexp(inode)) &&
- (attr->ia_valid & (ATTR_SIZE | ATTR_MTIME | ATTR_MTIME_SET)))
- op_data->op_flags = MF_EPOCH_OPEN;
-
- /* truncate on a released file must failed with -ENODATA,
- * so size must not be set on MDS for released file
- * but other attributes must be set
- */
- if (S_ISREG(inode->i_mode)) {
- struct lov_stripe_md *lsm;
- __u32 gen;
-
- ll_layout_refresh(inode, &gen);
- lsm = ccc_inode_lsm_get(inode);
- if (lsm && lsm->lsm_pattern & LOV_PATTERN_F_RELEASED)
- file_is_released = true;
- ccc_inode_lsm_put(inode, lsm);
- }
-
- /* if not in HSM import mode, clear size attr for released file
- * we clear the attribute send to MDT in op_data, not the original
- * received from caller in attr which is used later to
- * decide return code */
- if (file_is_released && (attr->ia_valid & ATTR_SIZE) && !hsm_import)
- op_data->op_attr.ia_valid &= ~ATTR_SIZE;
-
- rc = ll_md_setattr(dentry, op_data, &mod);
- if (rc)
- goto out;
-
- /* truncate failed (only when non HSM import), others succeed */
- if (file_is_released) {
- if ((attr->ia_valid & ATTR_SIZE) && !hsm_import)
- rc = -ENODATA;
- else
- rc = 0;
- goto out;
- }
-
- /* RPC to MDT is sent, cancel data modification flag */
- if (rc == 0 && (op_data->op_bias & MDS_DATA_MODIFIED)) {
- spin_lock(&lli->lli_lock);
- lli->lli_flags &= ~LLIF_DATA_MODIFIED;
- spin_unlock(&lli->lli_lock);
- }
-
- ll_ioepoch_open(lli, op_data->op_ioepoch);
- if (!S_ISREG(inode->i_mode)) {
- rc = 0;
- goto out;
- }
-
- if (attr->ia_valid & (ATTR_SIZE |
- ATTR_ATIME | ATTR_ATIME_SET |
- ATTR_MTIME | ATTR_MTIME_SET)) {
- /* For truncate and utimes sending attributes to OSTs, setting
- * mtime/atime to the past will be performed under PW [0:EOF]
- * extent lock (new_size:EOF for truncate). It may seem
- * excessive to send mtime/atime updates to OSTs when not
- * setting times to past, but it is necessary due to possible
- * time de-synchronization between MDT inode and OST objects */
- if (attr->ia_valid & ATTR_SIZE)
- down_write(&lli->lli_trunc_sem);
- rc = cl_setattr_ost(inode, attr);
- if (attr->ia_valid & ATTR_SIZE)
- up_write(&lli->lli_trunc_sem);
- }
-out:
- if (op_data) {
- if (op_data->op_ioepoch) {
- rc1 = ll_setattr_done_writing(inode, op_data, mod);
- if (!rc)
- rc = rc1;
- }
- ll_finish_md_op_data(op_data);
- }
- if (!S_ISDIR(inode->i_mode)) {
- mutex_lock(&inode->i_mutex);
- if ((attr->ia_valid & ATTR_SIZE) && !hsm_import)
- inode_dio_wait(inode);
- }
-
- ll_stats_ops_tally(ll_i2sbi(inode), (attr->ia_valid & ATTR_SIZE) ?
- LPROC_LL_TRUNC : LPROC_LL_SETATTR, 1);
-
- return rc;
-}
-
-int ll_setattr(struct dentry *de, struct iattr *attr)
-{
- int mode = d_inode(de)->i_mode;
-
- if ((attr->ia_valid & (ATTR_CTIME|ATTR_SIZE|ATTR_MODE)) ==
- (ATTR_CTIME|ATTR_SIZE|ATTR_MODE))
- attr->ia_valid |= MDS_OPEN_OWNEROVERRIDE;
-
- if (((attr->ia_valid & (ATTR_MODE|ATTR_FORCE|ATTR_SIZE)) ==
- (ATTR_SIZE|ATTR_MODE)) &&
- (((mode & S_ISUID) && !(attr->ia_mode & S_ISUID)) ||
- (((mode & (S_ISGID|S_IXGRP)) == (S_ISGID|S_IXGRP)) &&
- !(attr->ia_mode & S_ISGID))))
- attr->ia_valid |= ATTR_FORCE;
-
- if ((attr->ia_valid & ATTR_MODE) &&
- (mode & S_ISUID) &&
- !(attr->ia_mode & S_ISUID) &&
- !(attr->ia_valid & ATTR_KILL_SUID))
- attr->ia_valid |= ATTR_KILL_SUID;
-
- if ((attr->ia_valid & ATTR_MODE) &&
- ((mode & (S_ISGID|S_IXGRP)) == (S_ISGID|S_IXGRP)) &&
- !(attr->ia_mode & S_ISGID) &&
- !(attr->ia_valid & ATTR_KILL_SGID))
- attr->ia_valid |= ATTR_KILL_SGID;
-
- return ll_setattr_raw(de, attr, false);
-}
-
-int ll_statfs_internal(struct super_block *sb, struct obd_statfs *osfs,
- __u64 max_age, __u32 flags)
-{
- struct ll_sb_info *sbi = ll_s2sbi(sb);
- struct obd_statfs obd_osfs;
- int rc;
-
- rc = obd_statfs(NULL, sbi->ll_md_exp, osfs, max_age, flags);
- if (rc) {
- CERROR("md_statfs fails: rc = %d\n", rc);
- return rc;
- }
-
- osfs->os_type = sb->s_magic;
-
- CDEBUG(D_SUPER, "MDC blocks %llu/%llu objects %llu/%llu\n",
- osfs->os_bavail, osfs->os_blocks, osfs->os_ffree,
- osfs->os_files);
-
- if (sbi->ll_flags & LL_SBI_LAZYSTATFS)
- flags |= OBD_STATFS_NODELAY;
-
- rc = obd_statfs_rqset(sbi->ll_dt_exp, &obd_osfs, max_age, flags);
- if (rc) {
- CERROR("obd_statfs fails: rc = %d\n", rc);
- return rc;
- }
-
- CDEBUG(D_SUPER, "OSC blocks %llu/%llu objects %llu/%llu\n",
- obd_osfs.os_bavail, obd_osfs.os_blocks, obd_osfs.os_ffree,
- obd_osfs.os_files);
-
- osfs->os_bsize = obd_osfs.os_bsize;
- osfs->os_blocks = obd_osfs.os_blocks;
- osfs->os_bfree = obd_osfs.os_bfree;
- osfs->os_bavail = obd_osfs.os_bavail;
-
- /* If we don't have as many objects free on the OST as inodes
- * on the MDS, we reduce the total number of inodes to
- * compensate, so that the "inodes in use" number is correct.
- */
- if (obd_osfs.os_ffree < osfs->os_ffree) {
- osfs->os_files = (osfs->os_files - osfs->os_ffree) +
- obd_osfs.os_ffree;
- osfs->os_ffree = obd_osfs.os_ffree;
- }
-
- return rc;
-}
-int ll_statfs(struct dentry *de, struct kstatfs *sfs)
-{
- struct super_block *sb = de->d_sb;
- struct obd_statfs osfs;
- int rc;
-
- CDEBUG(D_VFSTRACE, "VFS Op: at %llu jiffies\n", get_jiffies_64());
- ll_stats_ops_tally(ll_s2sbi(sb), LPROC_LL_STAFS, 1);
-
- /* Some amount of caching on the client is allowed */
- rc = ll_statfs_internal(sb, &osfs,
- cfs_time_shift_64(-OBD_STATFS_CACHE_SECONDS),
- 0);
- if (rc)
- return rc;
-
- statfs_unpack(sfs, &osfs);
-
- /* We need to downshift for all 32-bit kernels, because we can't
- * tell if the kernel is being called via sys_statfs64() or not.
- * Stop before overflowing f_bsize - in which case it is better
- * to just risk EOVERFLOW if caller is using old sys_statfs(). */
- if (sizeof(long) < 8) {
- while (osfs.os_blocks > ~0UL && sfs->f_bsize < 0x40000000) {
- sfs->f_bsize <<= 1;
-
- osfs.os_blocks >>= 1;
- osfs.os_bfree >>= 1;
- osfs.os_bavail >>= 1;
- }
- }
-
- sfs->f_blocks = osfs.os_blocks;
- sfs->f_bfree = osfs.os_bfree;
- sfs->f_bavail = osfs.os_bavail;
- sfs->f_fsid = ll_s2sbi(sb)->ll_fsid;
- return 0;
-}
-
-void ll_inode_size_lock(struct inode *inode)
-{
- struct ll_inode_info *lli;
-
- LASSERT(!S_ISDIR(inode->i_mode));
-
- lli = ll_i2info(inode);
- mutex_lock(&lli->lli_size_mutex);
-}
-
-void ll_inode_size_unlock(struct inode *inode)
-{
- struct ll_inode_info *lli;
-
- lli = ll_i2info(inode);
- mutex_unlock(&lli->lli_size_mutex);
-}
-
-void ll_update_inode(struct inode *inode, struct lustre_md *md)
-{
- struct ll_inode_info *lli = ll_i2info(inode);
- struct mdt_body *body = md->body;
- struct lov_stripe_md *lsm = md->lsm;
- struct ll_sb_info *sbi = ll_i2sbi(inode);
-
- LASSERT((lsm != NULL) == ((body->valid & OBD_MD_FLEASIZE) != 0));
- if (lsm != NULL) {
- if (!lli->lli_has_smd &&
- !(sbi->ll_flags & LL_SBI_LAYOUT_LOCK))
- cl_file_inode_init(inode, md);
-
- lli->lli_maxbytes = lsm->lsm_maxbytes;
- if (lli->lli_maxbytes > MAX_LFS_FILESIZE)
- lli->lli_maxbytes = MAX_LFS_FILESIZE;
- }
-
- if (sbi->ll_flags & LL_SBI_RMT_CLIENT) {
- if (body->valid & OBD_MD_FLRMTPERM)
- ll_update_remote_perm(inode, md->remote_perm);
- }
-#ifdef CONFIG_FS_POSIX_ACL
- else if (body->valid & OBD_MD_FLACL) {
- spin_lock(&lli->lli_lock);
- if (lli->lli_posix_acl)
- posix_acl_release(lli->lli_posix_acl);
- lli->lli_posix_acl = md->posix_acl;
- spin_unlock(&lli->lli_lock);
- }
-#endif
- inode->i_ino = cl_fid_build_ino(&body->fid1,
- sbi->ll_flags & LL_SBI_32BIT_API);
- inode->i_generation = cl_fid_build_gen(&body->fid1);
-
- if (body->valid & OBD_MD_FLATIME) {
- if (body->atime > LTIME_S(inode->i_atime))
- LTIME_S(inode->i_atime) = body->atime;
- lli->lli_lvb.lvb_atime = body->atime;
- }
- if (body->valid & OBD_MD_FLMTIME) {
- if (body->mtime > LTIME_S(inode->i_mtime)) {
- CDEBUG(D_INODE, "setting ino %lu mtime from %lu to %llu\n",
- inode->i_ino, LTIME_S(inode->i_mtime),
- body->mtime);
- LTIME_S(inode->i_mtime) = body->mtime;
- }
- lli->lli_lvb.lvb_mtime = body->mtime;
- }
- if (body->valid & OBD_MD_FLCTIME) {
- if (body->ctime > LTIME_S(inode->i_ctime))
- LTIME_S(inode->i_ctime) = body->ctime;
- lli->lli_lvb.lvb_ctime = body->ctime;
- }
- if (body->valid & OBD_MD_FLMODE)
- inode->i_mode = (inode->i_mode & S_IFMT)|(body->mode & ~S_IFMT);
- if (body->valid & OBD_MD_FLTYPE)
- inode->i_mode = (inode->i_mode & ~S_IFMT)|(body->mode & S_IFMT);
- LASSERT(inode->i_mode != 0);
- if (S_ISREG(inode->i_mode))
- inode->i_blkbits = min(PTLRPC_MAX_BRW_BITS + 1,
- LL_MAX_BLKSIZE_BITS);
- else
- inode->i_blkbits = inode->i_sb->s_blocksize_bits;
- if (body->valid & OBD_MD_FLUID)
- inode->i_uid = make_kuid(&init_user_ns, body->uid);
- if (body->valid & OBD_MD_FLGID)
- inode->i_gid = make_kgid(&init_user_ns, body->gid);
- if (body->valid & OBD_MD_FLFLAGS)
- inode->i_flags = ll_ext_to_inode_flags(body->flags);
- if (body->valid & OBD_MD_FLNLINK)
- set_nlink(inode, body->nlink);
- if (body->valid & OBD_MD_FLRDEV)
- inode->i_rdev = old_decode_dev(body->rdev);
-
- if (body->valid & OBD_MD_FLID) {
- /* FID shouldn't be changed! */
- if (fid_is_sane(&lli->lli_fid)) {
- LASSERTF(lu_fid_eq(&lli->lli_fid, &body->fid1),
- "Trying to change FID "DFID
- " to the "DFID", inode %lu/%u(%p)\n",
- PFID(&lli->lli_fid), PFID(&body->fid1),
- inode->i_ino, inode->i_generation, inode);
- } else
- lli->lli_fid = body->fid1;
- }
-
- LASSERT(fid_seq(&lli->lli_fid) != 0);
-
- if (body->valid & OBD_MD_FLSIZE) {
- if (exp_connect_som(ll_i2mdexp(inode)) &&
- S_ISREG(inode->i_mode)) {
- struct lustre_handle lockh;
- ldlm_mode_t mode;
-
- /* As it is possible a blocking ast has been processed
- * by this time, we need to check there is an UPDATE
- * lock on the client and set LLIF_MDS_SIZE_LOCK holding
- * it. */
- mode = ll_take_md_lock(inode, MDS_INODELOCK_UPDATE,
- &lockh, LDLM_FL_CBPENDING,
- LCK_CR | LCK_CW |
- LCK_PR | LCK_PW);
- if (mode) {
- if (lli->lli_flags & (LLIF_DONE_WRITING |
- LLIF_EPOCH_PENDING |
- LLIF_SOM_DIRTY)) {
- CERROR("ino %lu flags %u still has size authority! do not trust the size got from MDS\n",
- inode->i_ino, lli->lli_flags);
- } else {
- /* Use old size assignment to avoid
- * deadlock bz14138 & bz14326 */
- i_size_write(inode, body->size);
- spin_lock(&lli->lli_lock);
- lli->lli_flags |= LLIF_MDS_SIZE_LOCK;
- spin_unlock(&lli->lli_lock);
- }
- ldlm_lock_decref(&lockh, mode);
- }
- } else {
- /* Use old size assignment to avoid
- * deadlock bz14138 & bz14326 */
- i_size_write(inode, body->size);
-
- CDEBUG(D_VFSTRACE, "inode=%lu, updating i_size %llu\n",
- inode->i_ino, (unsigned long long)body->size);
- }
-
- if (body->valid & OBD_MD_FLBLOCKS)
- inode->i_blocks = body->blocks;
- }
-
- if (body->valid & OBD_MD_TSTATE) {
- if (body->t_state & MS_RESTORE)
- lli->lli_flags |= LLIF_FILE_RESTORING;
- }
-}
-
-void ll_read_inode2(struct inode *inode, void *opaque)
-{
- struct lustre_md *md = opaque;
- struct ll_inode_info *lli = ll_i2info(inode);
-
- CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p)\n",
- PFID(&lli->lli_fid), inode);
-
- LASSERT(!lli->lli_has_smd);
-
- /* Core attributes from the MDS first. This is a new inode, and
- * the VFS doesn't zero times in the core inode so we have to do
- * it ourselves. They will be overwritten by either MDS or OST
- * attributes - we just need to make sure they aren't newer. */
- LTIME_S(inode->i_mtime) = 0;
- LTIME_S(inode->i_atime) = 0;
- LTIME_S(inode->i_ctime) = 0;
- inode->i_rdev = 0;
- ll_update_inode(inode, md);
-
- /* OIDEBUG(inode); */
-
- if (S_ISREG(inode->i_mode)) {
- struct ll_sb_info *sbi = ll_i2sbi(inode);
-
- inode->i_op = &ll_file_inode_operations;
- inode->i_fop = sbi->ll_fop;
- inode->i_mapping->a_ops = (struct address_space_operations *)&ll_aops;
- } else if (S_ISDIR(inode->i_mode)) {
- inode->i_op = &ll_dir_inode_operations;
- inode->i_fop = &ll_dir_operations;
- } else if (S_ISLNK(inode->i_mode)) {
- inode->i_op = &ll_fast_symlink_inode_operations;
- } else {
- inode->i_op = &ll_special_inode_operations;
-
- init_special_inode(inode, inode->i_mode,
- inode->i_rdev);
- }
-}
-
-void ll_delete_inode(struct inode *inode)
-{
- struct cl_inode_info *lli = cl_i2info(inode);
-
- if (S_ISREG(inode->i_mode) && lli->lli_clob != NULL)
- /* discard all dirty pages before truncating them, required by
- * osc_extent implementation at LU-1030. */
- cl_sync_file_range(inode, 0, OBD_OBJECT_EOF,
- CL_FSYNC_DISCARD, 1);
-
- truncate_inode_pages_final(&inode->i_data);
-
- /* Workaround for LU-118 */
- if (inode->i_data.nrpages) {
- spin_lock_irq(&inode->i_data.tree_lock);
- spin_unlock_irq(&inode->i_data.tree_lock);
- LASSERTF(inode->i_data.nrpages == 0,
- "inode=%lu/%u(%p) nrpages=%lu, see http://jira.whamcloud.com/browse/LU-118\n",
- inode->i_ino, inode->i_generation, inode,
- inode->i_data.nrpages);
- }
- /* Workaround end */
-
- ll_clear_inode(inode);
- clear_inode(inode);
-}
-
-int ll_iocontrol(struct inode *inode, struct file *file,
- unsigned int cmd, unsigned long arg)
-{
- struct ll_sb_info *sbi = ll_i2sbi(inode);
- struct ptlrpc_request *req = NULL;
- int rc, flags = 0;
-
- switch (cmd) {
- case FSFILT_IOC_GETFLAGS: {
- struct mdt_body *body;
- struct md_op_data *op_data;
-
- op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL,
- 0, 0, LUSTRE_OPC_ANY,
- NULL);
- if (IS_ERR(op_data))
- return PTR_ERR(op_data);
-
- op_data->op_valid = OBD_MD_FLFLAGS;
- rc = md_getattr(sbi->ll_md_exp, op_data, &req);
- ll_finish_md_op_data(op_data);
- if (rc) {
- CERROR("failure %d inode %lu\n", rc, inode->i_ino);
- return -abs(rc);
- }
-
- body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
-
- flags = body->flags;
-
- ptlrpc_req_finished(req);
-
- return put_user(flags, (int *)arg);
- }
- case FSFILT_IOC_SETFLAGS: {
- struct lov_stripe_md *lsm;
- struct obd_info oinfo = { };
- struct md_op_data *op_data;
-
- if (get_user(flags, (int *)arg))
- return -EFAULT;
-
- op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL, 0, 0,
- LUSTRE_OPC_ANY, NULL);
- if (IS_ERR(op_data))
- return PTR_ERR(op_data);
-
- ((struct ll_iattr *)&op_data->op_attr)->ia_attr_flags = flags;
- op_data->op_attr.ia_valid |= ATTR_ATTR_FLAG;
- rc = md_setattr(sbi->ll_md_exp, op_data,
- NULL, 0, NULL, 0, &req, NULL);
- ll_finish_md_op_data(op_data);
- ptlrpc_req_finished(req);
- if (rc)
- return rc;
-
- inode->i_flags = ll_ext_to_inode_flags(flags);
-
- lsm = ccc_inode_lsm_get(inode);
- if (!lsm_has_objects(lsm)) {
- ccc_inode_lsm_put(inode, lsm);
- return 0;
- }
-
- OBDO_ALLOC(oinfo.oi_oa);
- if (!oinfo.oi_oa) {
- ccc_inode_lsm_put(inode, lsm);
- return -ENOMEM;
- }
- oinfo.oi_md = lsm;
- oinfo.oi_oa->o_oi = lsm->lsm_oi;
- oinfo.oi_oa->o_flags = flags;
- oinfo.oi_oa->o_valid = OBD_MD_FLID | OBD_MD_FLFLAGS |
- OBD_MD_FLGROUP;
- obdo_set_parent_fid(oinfo.oi_oa, &ll_i2info(inode)->lli_fid);
- rc = obd_setattr_rqset(sbi->ll_dt_exp, &oinfo, NULL);
- OBDO_FREE(oinfo.oi_oa);
- ccc_inode_lsm_put(inode, lsm);
-
- if (rc && rc != -EPERM && rc != -EACCES)
- CERROR("osc_setattr_async fails: rc = %d\n", rc);
-
- return rc;
- }
- default:
- return -ENOSYS;
- }
-
- return 0;
-}
-
-int ll_flush_ctx(struct inode *inode)
-{
- struct ll_sb_info *sbi = ll_i2sbi(inode);
-
- CDEBUG(D_SEC, "flush context for user %d\n",
- from_kuid(&init_user_ns, current_uid()));
-
- obd_set_info_async(NULL, sbi->ll_md_exp,
- sizeof(KEY_FLUSH_CTX), KEY_FLUSH_CTX,
- 0, NULL, NULL);
- obd_set_info_async(NULL, sbi->ll_dt_exp,
- sizeof(KEY_FLUSH_CTX), KEY_FLUSH_CTX,
- 0, NULL, NULL);
- return 0;
-}
-
-/* umount -f client means force down, don't save state */
-void ll_umount_begin(struct super_block *sb)
-{
- struct ll_sb_info *sbi = ll_s2sbi(sb);
- struct obd_device *obd;
- struct obd_ioctl_data *ioc_data;
-
- CDEBUG(D_VFSTRACE, "VFS Op: superblock %p count %d active %d\n", sb,
- sb->s_count, atomic_read(&sb->s_active));
-
- obd = class_exp2obd(sbi->ll_md_exp);
- if (obd == NULL) {
- CERROR("Invalid MDC connection handle %#llx\n",
- sbi->ll_md_exp->exp_handle.h_cookie);
- return;
- }
- obd->obd_force = 1;
-
- obd = class_exp2obd(sbi->ll_dt_exp);
- if (obd == NULL) {
- CERROR("Invalid LOV connection handle %#llx\n",
- sbi->ll_dt_exp->exp_handle.h_cookie);
- return;
- }
- obd->obd_force = 1;
-
- ioc_data = kzalloc(sizeof(*ioc_data), GFP_NOFS);
- if (ioc_data) {
- obd_iocontrol(IOC_OSC_SET_ACTIVE, sbi->ll_md_exp,
- sizeof(*ioc_data), ioc_data, NULL);
-
- obd_iocontrol(IOC_OSC_SET_ACTIVE, sbi->ll_dt_exp,
- sizeof(*ioc_data), ioc_data, NULL);
-
- kfree(ioc_data);
- }
-
- /* Really, we'd like to wait until there are no requests outstanding,
- * and then continue. For now, we just invalidate the requests,
- * schedule() and sleep one second if needed, and hope.
- */
- schedule();
-}
-
-int ll_remount_fs(struct super_block *sb, int *flags, char *data)
-{
- struct ll_sb_info *sbi = ll_s2sbi(sb);
- char *profilenm = get_profile_name(sb);
- int err;
- __u32 read_only;
-
- if ((*flags & MS_RDONLY) != (sb->s_flags & MS_RDONLY)) {
- read_only = *flags & MS_RDONLY;
- err = obd_set_info_async(NULL, sbi->ll_md_exp,
- sizeof(KEY_READ_ONLY),
- KEY_READ_ONLY, sizeof(read_only),
- &read_only, NULL);
- if (err) {
- LCONSOLE_WARN("Failed to remount %s %s (%d)\n",
- profilenm, read_only ?
- "read-only" : "read-write", err);
- return err;
- }
-
- if (read_only)
- sb->s_flags |= MS_RDONLY;
- else
- sb->s_flags &= ~MS_RDONLY;
-
- if (sbi->ll_flags & LL_SBI_VERBOSE)
- LCONSOLE_WARN("Remounted %s %s\n", profilenm,
- read_only ? "read-only" : "read-write");
- }
- return 0;
-}
-
-/**
- * Cleanup the open handle that is cached on MDT-side.
- *
- * For open case, the client side open handling thread may hit error
- * after the MDT grant the open. Under such case, the client should
- * send close RPC to the MDT as cleanup; otherwise, the open handle
- * on the MDT will be leaked there until the client umount or evicted.
- *
- * In further, if someone unlinked the file, because the open handle
- * holds the reference on such file/object, then it will block the
- * subsequent threads that want to locate such object via FID.
- *
- * \param[in] sb super block for this file-system
- * \param[in] open_req pointer to the original open request
- */
-void ll_open_cleanup(struct super_block *sb, struct ptlrpc_request *open_req)
-{
- struct mdt_body *body;
- struct md_op_data *op_data;
- struct ptlrpc_request *close_req = NULL;
- struct obd_export *exp = ll_s2sbi(sb)->ll_md_exp;
-
- body = req_capsule_server_get(&open_req->rq_pill, &RMF_MDT_BODY);
- op_data = kzalloc(sizeof(*op_data), GFP_NOFS);
- if (!op_data) {
- CWARN("%s: cannot allocate op_data to release open handle for "
- DFID "\n",
- ll_get_fsname(sb, NULL, 0), PFID(&body->fid1));
-
- return;
- }
-
- op_data->op_fid1 = body->fid1;
- op_data->op_ioepoch = body->ioepoch;
- op_data->op_handle = body->handle;
- op_data->op_mod_time = get_seconds();
- md_close(exp, op_data, NULL, &close_req);
- ptlrpc_req_finished(close_req);
- ll_finish_md_op_data(op_data);
-}
-
-int ll_prep_inode(struct inode **inode, struct ptlrpc_request *req,
- struct super_block *sb, struct lookup_intent *it)
-{
- struct ll_sb_info *sbi = NULL;
- struct lustre_md md;
- int rc;
-
- LASSERT(*inode || sb);
- sbi = sb ? ll_s2sbi(sb) : ll_i2sbi(*inode);
- rc = md_get_lustre_md(sbi->ll_md_exp, req, sbi->ll_dt_exp,
- sbi->ll_md_exp, &md);
- if (rc)
- goto cleanup;
-
- if (*inode) {
- ll_update_inode(*inode, &md);
- } else {
- LASSERT(sb != NULL);
-
- /*
- * At this point server returns to client's same fid as client
- * generated for creating. So using ->fid1 is okay here.
- */
- LASSERT(fid_is_sane(&md.body->fid1));
-
- *inode = ll_iget(sb, cl_fid_build_ino(&md.body->fid1,
- sbi->ll_flags & LL_SBI_32BIT_API),
- &md);
- if (*inode == NULL || IS_ERR(*inode)) {
-#ifdef CONFIG_FS_POSIX_ACL
- if (md.posix_acl) {
- posix_acl_release(md.posix_acl);
- md.posix_acl = NULL;
- }
-#endif
- rc = IS_ERR(*inode) ? PTR_ERR(*inode) : -ENOMEM;
- *inode = NULL;
- CERROR("new_inode -fatal: rc %d\n", rc);
- goto out;
- }
- }
-
- /* Handling piggyback layout lock.
- * Layout lock can be piggybacked by getattr and open request.
- * The lsm can be applied to inode only if it comes with a layout lock
- * otherwise correct layout may be overwritten, for example:
- * 1. proc1: mdt returns a lsm but not granting layout
- * 2. layout was changed by another client
- * 3. proc2: refresh layout and layout lock granted
- * 4. proc1: to apply a stale layout */
- if (it != NULL && it->d.lustre.it_lock_mode != 0) {
- struct lustre_handle lockh;
- struct ldlm_lock *lock;
-
- lockh.cookie = it->d.lustre.it_lock_handle;
- lock = ldlm_handle2lock(&lockh);
- LASSERT(lock != NULL);
- if (ldlm_has_layout(lock)) {
- struct cl_object_conf conf;
-
- memset(&conf, 0, sizeof(conf));
- conf.coc_opc = OBJECT_CONF_SET;
- conf.coc_inode = *inode;
- conf.coc_lock = lock;
- conf.u.coc_md = &md;
- (void)ll_layout_conf(*inode, &conf);
- }
- LDLM_LOCK_PUT(lock);
- }
-
-out:
- if (md.lsm != NULL)
- obd_free_memmd(sbi->ll_dt_exp, &md.lsm);
- md_free_lustre_md(sbi->ll_md_exp, &md);
-
-cleanup:
- if (rc != 0 && it && it->it_op & IT_OPEN)
- ll_open_cleanup(sb ? sb : (*inode)->i_sb, req);
-
- return rc;
-}
-
-int ll_obd_statfs(struct inode *inode, void *arg)
-{
- struct ll_sb_info *sbi = NULL;
- struct obd_export *exp;
- char *buf = NULL;
- struct obd_ioctl_data *data = NULL;
- __u32 type;
- __u32 flags;
- int len = 0, rc;
-
- if (!inode) {
- rc = -EINVAL;
- goto out_statfs;
- }
-
- sbi = ll_i2sbi(inode);
- if (!sbi) {
- rc = -EINVAL;
- goto out_statfs;
- }
-
- rc = obd_ioctl_getdata(&buf, &len, arg);
- if (rc)
- goto out_statfs;
-
- data = (void *)buf;
- if (!data->ioc_inlbuf1 || !data->ioc_inlbuf2 ||
- !data->ioc_pbuf1 || !data->ioc_pbuf2) {
- rc = -EINVAL;
- goto out_statfs;
- }
-
- if (data->ioc_inllen1 != sizeof(__u32) ||
- data->ioc_inllen2 != sizeof(__u32) ||
- data->ioc_plen1 != sizeof(struct obd_statfs) ||
- data->ioc_plen2 != sizeof(struct obd_uuid)) {
- rc = -EINVAL;
- goto out_statfs;
- }
-
- memcpy(&type, data->ioc_inlbuf1, sizeof(__u32));
- if (type & LL_STATFS_LMV)
- exp = sbi->ll_md_exp;
- else if (type & LL_STATFS_LOV)
- exp = sbi->ll_dt_exp;
- else {
- rc = -ENODEV;
- goto out_statfs;
- }
-
- flags = (type & LL_STATFS_NODELAY) ? OBD_STATFS_NODELAY : 0;
- rc = obd_iocontrol(IOC_OBD_STATFS, exp, len, buf, &flags);
- if (rc)
- goto out_statfs;
-out_statfs:
- if (buf)
- obd_ioctl_freedata(buf, len);
- return rc;
-}
-
-int ll_process_config(struct lustre_cfg *lcfg)
-{
- char *ptr;
- void *sb;
- struct lprocfs_static_vars lvars;
- unsigned long x;
- int rc = 0;
-
- lprocfs_llite_init_vars(&lvars);
-
- /* The instance name contains the sb: lustre-client-aacfe000 */
- ptr = strrchr(lustre_cfg_string(lcfg, 0), '-');
- if (!ptr || !*(++ptr))
- return -EINVAL;
- rc = kstrtoul(ptr, 16, &x);
- if (rc != 0)
- return -EINVAL;
- sb = (void *)x;
- /* This better be a real Lustre superblock! */
- LASSERT(s2lsi((struct super_block *)sb)->lsi_lmd->lmd_magic == LMD_MAGIC);
-
- /* Note we have not called client_common_fill_super yet, so
- proc fns must be able to handle that! */
- rc = class_process_proc_param(PARAM_LLITE, lvars.obd_vars,
- lcfg, sb);
- if (rc > 0)
- rc = 0;
- return rc;
-}
-
-/* this function prepares md_op_data hint for passing ot down to MD stack. */
-struct md_op_data *ll_prep_md_op_data(struct md_op_data *op_data,
- struct inode *i1, struct inode *i2,
- const char *name, int namelen,
- int mode, __u32 opc, void *data)
-{
- LASSERT(i1 != NULL);
-
- if (namelen > ll_i2sbi(i1)->ll_namelen)
- return ERR_PTR(-ENAMETOOLONG);
-
- if (op_data == NULL)
- op_data = kzalloc(sizeof(*op_data), GFP_NOFS);
-
- if (op_data == NULL)
- return ERR_PTR(-ENOMEM);
-
- ll_i2gids(op_data->op_suppgids, i1, i2);
- op_data->op_fid1 = *ll_inode2fid(i1);
-
- if (i2)
- op_data->op_fid2 = *ll_inode2fid(i2);
- else
- fid_zero(&op_data->op_fid2);
-
- op_data->op_name = name;
- op_data->op_namelen = namelen;
- op_data->op_mode = mode;
- op_data->op_mod_time = ktime_get_real_seconds();
- op_data->op_fsuid = from_kuid(&init_user_ns, current_fsuid());
- op_data->op_fsgid = from_kgid(&init_user_ns, current_fsgid());
- op_data->op_cap = cfs_curproc_cap_pack();
- op_data->op_bias = 0;
- op_data->op_cli_flags = 0;
- if ((opc == LUSTRE_OPC_CREATE) && (name != NULL) &&
- filename_is_volatile(name, namelen, NULL))
- op_data->op_bias |= MDS_CREATE_VOLATILE;
- op_data->op_opc = opc;
- op_data->op_mds = 0;
- op_data->op_data = data;
-
- /* If the file is being opened after mknod() (normally due to NFS)
- * try to use the default stripe data from parent directory for
- * allocating OST objects. Try to pass the parent FID to MDS. */
- if (opc == LUSTRE_OPC_CREATE && i1 == i2 && S_ISREG(i2->i_mode) &&
- !ll_i2info(i2)->lli_has_smd) {
- struct ll_inode_info *lli = ll_i2info(i2);
-
- spin_lock(&lli->lli_lock);
- if (likely(!lli->lli_has_smd && !fid_is_zero(&lli->lli_pfid)))
- op_data->op_fid1 = lli->lli_pfid;
- spin_unlock(&lli->lli_lock);
- }
-
- /* When called by ll_setattr_raw, file is i1. */
- if (ll_i2info(i1)->lli_flags & LLIF_DATA_MODIFIED)
- op_data->op_bias |= MDS_DATA_MODIFIED;
-
- return op_data;
-}
-
-void ll_finish_md_op_data(struct md_op_data *op_data)
-{
- kfree(op_data);
-}
-
-int ll_show_options(struct seq_file *seq, struct dentry *dentry)
-{
- struct ll_sb_info *sbi;
-
- LASSERT((seq != NULL) && (dentry != NULL));
- sbi = ll_s2sbi(dentry->d_sb);
-
- if (sbi->ll_flags & LL_SBI_NOLCK)
- seq_puts(seq, ",nolock");
-
- if (sbi->ll_flags & LL_SBI_FLOCK)
- seq_puts(seq, ",flock");
-
- if (sbi->ll_flags & LL_SBI_LOCALFLOCK)
- seq_puts(seq, ",localflock");
-
- if (sbi->ll_flags & LL_SBI_USER_XATTR)
- seq_puts(seq, ",user_xattr");
-
- if (sbi->ll_flags & LL_SBI_LAZYSTATFS)
- seq_puts(seq, ",lazystatfs");
-
- if (sbi->ll_flags & LL_SBI_USER_FID2PATH)
- seq_puts(seq, ",user_fid2path");
-
- return 0;
-}
-
-/**
- * Get obd name by cmd, and copy out to user space
- */
-int ll_get_obd_name(struct inode *inode, unsigned int cmd, unsigned long arg)
-{
- struct ll_sb_info *sbi = ll_i2sbi(inode);
- struct obd_device *obd;
-
- if (cmd == OBD_IOC_GETDTNAME)
- obd = class_exp2obd(sbi->ll_dt_exp);
- else if (cmd == OBD_IOC_GETMDNAME)
- obd = class_exp2obd(sbi->ll_md_exp);
- else
- return -EINVAL;
-
- if (!obd)
- return -ENOENT;
-
- if (copy_to_user((void *)arg, obd->obd_name,
- strlen(obd->obd_name) + 1))
- return -EFAULT;
-
- return 0;
-}
-
-/**
- * Get lustre file system name by \a sbi. If \a buf is provided(non-NULL), the
- * fsname will be returned in this buffer; otherwise, a static buffer will be
- * used to store the fsname and returned to caller.
- */
-char *ll_get_fsname(struct super_block *sb, char *buf, int buflen)
-{
- static char fsname_static[MTI_NAME_MAXLEN];
- struct lustre_sb_info *lsi = s2lsi(sb);
- char *ptr;
- int len;
-
- if (buf == NULL) {
- /* this means the caller wants to use static buffer
- * and it doesn't care about race. Usually this is
- * in error reporting path */
- buf = fsname_static;
- buflen = sizeof(fsname_static);
- }
-
- len = strlen(lsi->lsi_lmd->lmd_profile);
- ptr = strrchr(lsi->lsi_lmd->lmd_profile, '-');
- if (ptr && (strcmp(ptr, "-client") == 0))
- len -= 7;
-
- if (unlikely(len >= buflen))
- len = buflen - 1;
- strncpy(buf, lsi->lsi_lmd->lmd_profile, len);
- buf[len] = '\0';
-
- return buf;
-}
-
-void ll_dirty_page_discard_warn(struct page *page, int ioret)
-{
- char *buf, *path = NULL;
- struct dentry *dentry = NULL;
- struct ccc_object *obj = cl_inode2ccc(page->mapping->host);
-
- /* this can be called inside spin lock so use GFP_ATOMIC. */
- buf = (char *)__get_free_page(GFP_ATOMIC);
- if (buf != NULL) {
- dentry = d_find_alias(page->mapping->host);
- if (dentry != NULL)
- path = dentry_path_raw(dentry, buf, PAGE_SIZE);
- }
-
- CDEBUG(D_WARNING,
- "%s: dirty page discard: %s/fid: " DFID "/%s may get corrupted (rc %d)\n",
- ll_get_fsname(page->mapping->host->i_sb, NULL, 0),
- s2lsi(page->mapping->host->i_sb)->lsi_lmd->lmd_dev,
- PFID(&obj->cob_header.coh_lu.loh_fid),
- (path && !IS_ERR(path)) ? path : "", ioret);
-
- if (dentry != NULL)
- dput(dentry);
-
- if (buf != NULL)
- free_page((unsigned long)buf);
-}
diff --git a/drivers/staging/lustre/lustre/llite/llite_mmap.c b/drivers/staging/lustre/lustre/llite/llite_mmap.c
deleted file mode 100644
index a90214bb84dd..000000000000
--- a/drivers/staging/lustre/lustre/llite/llite_mmap.c
+++ /dev/null
@@ -1,492 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- */
-
-#include <linux/kernel.h>
-#include <linux/mm.h>
-#include <linux/string.h>
-#include <linux/stat.h>
-#include <linux/errno.h>
-#include <linux/unistd.h>
-#include <linux/uaccess.h>
-
-#include <linux/fs.h>
-#include <linux/pagemap.h>
-
-#define DEBUG_SUBSYSTEM S_LLITE
-
-#include "../include/lustre_lite.h"
-#include "llite_internal.h"
-#include "../include/linux/lustre_compat25.h"
-
-static const struct vm_operations_struct ll_file_vm_ops;
-
-void policy_from_vma(ldlm_policy_data_t *policy,
- struct vm_area_struct *vma, unsigned long addr,
- size_t count)
-{
- policy->l_extent.start = ((addr - vma->vm_start) & CFS_PAGE_MASK) +
- (vma->vm_pgoff << PAGE_CACHE_SHIFT);
- policy->l_extent.end = (policy->l_extent.start + count - 1) |
- ~CFS_PAGE_MASK;
-}
-
-struct vm_area_struct *our_vma(struct mm_struct *mm, unsigned long addr,
- size_t count)
-{
- struct vm_area_struct *vma, *ret = NULL;
-
- /* mmap_sem must have been held by caller. */
- LASSERT(!down_write_trylock(&mm->mmap_sem));
-
- for (vma = find_vma(mm, addr);
- vma != NULL && vma->vm_start < (addr + count); vma = vma->vm_next) {
- if (vma->vm_ops && vma->vm_ops == &ll_file_vm_ops &&
- vma->vm_flags & VM_SHARED) {
- ret = vma;
- break;
- }
- }
- return ret;
-}
-
-/**
- * API independent part for page fault initialization.
- * \param vma - virtual memory area addressed to page fault
- * \param env - corespondent lu_env to processing
- * \param nest - nested level
- * \param index - page index corespondent to fault.
- * \parm ra_flags - vma readahead flags.
- *
- * \return allocated and initialized env for fault operation.
- * \retval EINVAL if env can't allocated
- * \return other error codes from cl_io_init.
- */
-static struct cl_io *
-ll_fault_io_init(struct vm_area_struct *vma, struct lu_env **env_ret,
- struct cl_env_nest *nest, pgoff_t index,
- unsigned long *ra_flags)
-{
- struct file *file = vma->vm_file;
- struct inode *inode = file_inode(file);
- struct cl_io *io;
- struct cl_fault_io *fio;
- struct lu_env *env;
- int rc;
-
- *env_ret = NULL;
- if (ll_file_nolock(file))
- return ERR_PTR(-EOPNOTSUPP);
-
- /*
- * page fault can be called when lustre IO is
- * already active for the current thread, e.g., when doing read/write
- * against user level buffer mapped from Lustre buffer. To avoid
- * stomping on existing context, optionally force an allocation of a new
- * one.
- */
- env = cl_env_nested_get(nest);
- if (IS_ERR(env))
- return ERR_PTR(-EINVAL);
-
- *env_ret = env;
-
- io = ccc_env_thread_io(env);
- io->ci_obj = ll_i2info(inode)->lli_clob;
- LASSERT(io->ci_obj != NULL);
-
- fio = &io->u.ci_fault;
- fio->ft_index = index;
- fio->ft_executable = vma->vm_flags&VM_EXEC;
-
- /*
- * disable VM_SEQ_READ and use VM_RAND_READ to make sure that
- * the kernel will not read other pages not covered by ldlm in
- * filemap_nopage. we do our readahead in ll_readpage.
- */
- if (ra_flags != NULL)
- *ra_flags = vma->vm_flags & (VM_RAND_READ|VM_SEQ_READ);
- vma->vm_flags &= ~VM_SEQ_READ;
- vma->vm_flags |= VM_RAND_READ;
-
- CDEBUG(D_MMAP, "vm_flags: %lx (%lu %d)\n", vma->vm_flags,
- fio->ft_index, fio->ft_executable);
-
- rc = cl_io_init(env, io, CIT_FAULT, io->ci_obj);
- if (rc == 0) {
- struct ccc_io *cio = ccc_env_io(env);
- struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
-
- LASSERT(cio->cui_cl.cis_io == io);
-
- /* mmap lock must be MANDATORY it has to cache
- * pages. */
- io->ci_lockreq = CILR_MANDATORY;
- cio->cui_fd = fd;
- } else {
- LASSERT(rc < 0);
- cl_io_fini(env, io);
- cl_env_nested_put(nest, env);
- io = ERR_PTR(rc);
- }
-
- return io;
-}
-
-/* Sharing code of page_mkwrite method for rhel5 and rhel6 */
-static int ll_page_mkwrite0(struct vm_area_struct *vma, struct page *vmpage,
- bool *retry)
-{
- struct lu_env *env;
- struct cl_io *io;
- struct vvp_io *vio;
- struct cl_env_nest nest;
- int result;
- sigset_t set;
- struct inode *inode;
- struct ll_inode_info *lli;
-
- LASSERT(vmpage != NULL);
-
- io = ll_fault_io_init(vma, &env, &nest, vmpage->index, NULL);
- if (IS_ERR(io)) {
- result = PTR_ERR(io);
- goto out;
- }
-
- result = io->ci_result;
- if (result < 0)
- goto out_io;
-
- io->u.ci_fault.ft_mkwrite = 1;
- io->u.ci_fault.ft_writable = 1;
-
- vio = vvp_env_io(env);
- vio->u.fault.ft_vma = vma;
- vio->u.fault.ft_vmpage = vmpage;
-
- set = cfs_block_sigsinv(sigmask(SIGKILL) | sigmask(SIGTERM));
-
- /* we grab lli_trunc_sem to exclude truncate case.
- * Otherwise, we could add dirty pages into osc cache
- * while truncate is on-going. */
- inode = ccc_object_inode(io->ci_obj);
- lli = ll_i2info(inode);
- down_read(&lli->lli_trunc_sem);
-
- result = cl_io_loop(env, io);
-
- up_read(&lli->lli_trunc_sem);
-
- cfs_restore_sigs(set);
-
- if (result == 0) {
- struct inode *inode = file_inode(vma->vm_file);
- struct ll_inode_info *lli = ll_i2info(inode);
-
- lock_page(vmpage);
- if (vmpage->mapping == NULL) {
- unlock_page(vmpage);
-
- /* page was truncated and lock was cancelled, return
- * ENODATA so that VM_FAULT_NOPAGE will be returned
- * to handle_mm_fault(). */
- if (result == 0)
- result = -ENODATA;
- } else if (!PageDirty(vmpage)) {
- /* race, the page has been cleaned by ptlrpcd after
- * it was unlocked, it has to be added into dirty
- * cache again otherwise this soon-to-dirty page won't
- * consume any grants, even worse if this page is being
- * transferred because it will break RPC checksum.
- */
- unlock_page(vmpage);
-
- CDEBUG(D_MMAP, "Race on page_mkwrite %p/%lu, page has been written out, retry.\n",
- vmpage, vmpage->index);
-
- *retry = true;
- result = -EAGAIN;
- }
-
- if (result == 0) {
- spin_lock(&lli->lli_lock);
- lli->lli_flags |= LLIF_DATA_MODIFIED;
- spin_unlock(&lli->lli_lock);
- }
- }
-
-out_io:
- cl_io_fini(env, io);
- cl_env_nested_put(&nest, env);
-out:
- CDEBUG(D_MMAP, "%s mkwrite with %d\n", current->comm, result);
- LASSERT(ergo(result == 0, PageLocked(vmpage)));
-
- return result;
-}
-
-
-
-static inline int to_fault_error(int result)
-{
- switch (result) {
- case 0:
- result = VM_FAULT_LOCKED;
- break;
- case -EFAULT:
- result = VM_FAULT_NOPAGE;
- break;
- case -ENOMEM:
- result = VM_FAULT_OOM;
- break;
- default:
- result = VM_FAULT_SIGBUS;
- break;
- }
- return result;
-}
-
-/**
- * Lustre implementation of a vm_operations_struct::fault() method, called by
- * VM to server page fault (both in kernel and user space).
- *
- * \param vma - is virtual area struct related to page fault
- * \param vmf - structure which describe type and address where hit fault
- *
- * \return allocated and filled _locked_ page for address
- * \retval VM_FAULT_ERROR on general error
- * \retval NOPAGE_OOM not have memory for allocate new page
- */
-static int ll_fault0(struct vm_area_struct *vma, struct vm_fault *vmf)
-{
- struct lu_env *env;
- struct cl_io *io;
- struct vvp_io *vio = NULL;
- struct page *vmpage;
- unsigned long ra_flags;
- struct cl_env_nest nest;
- int result;
- int fault_ret = 0;
-
- io = ll_fault_io_init(vma, &env, &nest, vmf->pgoff, &ra_flags);
- if (IS_ERR(io))
- return to_fault_error(PTR_ERR(io));
-
- result = io->ci_result;
- if (result == 0) {
- vio = vvp_env_io(env);
- vio->u.fault.ft_vma = vma;
- vio->u.fault.ft_vmpage = NULL;
- vio->u.fault.fault.ft_vmf = vmf;
- vio->u.fault.fault.ft_flags = 0;
- vio->u.fault.fault.ft_flags_valid = false;
-
- result = cl_io_loop(env, io);
-
- /* ft_flags are only valid if we reached
- * the call to filemap_fault */
- if (vio->u.fault.fault.ft_flags_valid)
- fault_ret = vio->u.fault.fault.ft_flags;
-
- vmpage = vio->u.fault.ft_vmpage;
- if (result != 0 && vmpage != NULL) {
- page_cache_release(vmpage);
- vmf->page = NULL;
- }
- }
- cl_io_fini(env, io);
- cl_env_nested_put(&nest, env);
-
- vma->vm_flags |= ra_flags;
- if (result != 0 && !(fault_ret & VM_FAULT_RETRY))
- fault_ret |= to_fault_error(result);
-
- CDEBUG(D_MMAP, "%s fault %d/%d\n",
- current->comm, fault_ret, result);
- return fault_ret;
-}
-
-static int ll_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
-{
- int count = 0;
- bool printed = false;
- int result;
- sigset_t set;
-
- /* Only SIGKILL and SIGTERM is allowed for fault/nopage/mkwrite
- * so that it can be killed by admin but not cause segfault by
- * other signals. */
- set = cfs_block_sigsinv(sigmask(SIGKILL) | sigmask(SIGTERM));
-
-restart:
- result = ll_fault0(vma, vmf);
- LASSERT(!(result & VM_FAULT_LOCKED));
- if (result == 0) {
- struct page *vmpage = vmf->page;
-
- /* check if this page has been truncated */
- lock_page(vmpage);
- if (unlikely(vmpage->mapping == NULL)) { /* unlucky */
- unlock_page(vmpage);
- page_cache_release(vmpage);
- vmf->page = NULL;
-
- if (!printed && ++count > 16) {
- CWARN("the page is under heavy contention, maybe your app(%s) needs revising :-)\n",
- current->comm);
- printed = true;
- }
-
- goto restart;
- }
-
- result = VM_FAULT_LOCKED;
- }
- cfs_restore_sigs(set);
- return result;
-}
-
-static int ll_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
-{
- int count = 0;
- bool printed = false;
- bool retry;
- int result;
-
- do {
- retry = false;
- result = ll_page_mkwrite0(vma, vmf->page, &retry);
-
- if (!printed && ++count > 16) {
- CWARN("app(%s): the page %lu of file %lu is under heavy contention.\n",
- current->comm, vmf->pgoff,
- file_inode(vma->vm_file)->i_ino);
- printed = true;
- }
- } while (retry);
-
- switch (result) {
- case 0:
- LASSERT(PageLocked(vmf->page));
- result = VM_FAULT_LOCKED;
- break;
- case -ENODATA:
- case -EFAULT:
- result = VM_FAULT_NOPAGE;
- break;
- case -ENOMEM:
- result = VM_FAULT_OOM;
- break;
- case -EAGAIN:
- result = VM_FAULT_RETRY;
- break;
- default:
- result = VM_FAULT_SIGBUS;
- break;
- }
-
- return result;
-}
-
-/**
- * To avoid cancel the locks covering mmapped region for lock cache pressure,
- * we track the mapped vma count in ccc_object::cob_mmap_cnt.
- */
-static void ll_vm_open(struct vm_area_struct *vma)
-{
- struct inode *inode = file_inode(vma->vm_file);
- struct ccc_object *vob = cl_inode2ccc(inode);
-
- LASSERT(vma->vm_file);
- LASSERT(atomic_read(&vob->cob_mmap_cnt) >= 0);
- atomic_inc(&vob->cob_mmap_cnt);
-}
-
-/**
- * Dual to ll_vm_open().
- */
-static void ll_vm_close(struct vm_area_struct *vma)
-{
- struct inode *inode = file_inode(vma->vm_file);
- struct ccc_object *vob = cl_inode2ccc(inode);
-
- LASSERT(vma->vm_file);
- atomic_dec(&vob->cob_mmap_cnt);
- LASSERT(atomic_read(&vob->cob_mmap_cnt) >= 0);
-}
-
-/* XXX put nice comment here. talk about __free_pte -> dirty pages and
- * nopage's reference passing to the pte */
-int ll_teardown_mmaps(struct address_space *mapping, __u64 first, __u64 last)
-{
- int rc = -ENOENT;
-
- LASSERTF(last > first, "last %llu first %llu\n", last, first);
- if (mapping_mapped(mapping)) {
- rc = 0;
- unmap_mapping_range(mapping, first + PAGE_CACHE_SIZE - 1,
- last - first + 1, 0);
- }
-
- return rc;
-}
-
-static const struct vm_operations_struct ll_file_vm_ops = {
- .fault = ll_fault,
- .page_mkwrite = ll_page_mkwrite,
- .open = ll_vm_open,
- .close = ll_vm_close,
-};
-
-int ll_file_mmap(struct file *file, struct vm_area_struct *vma)
-{
- struct inode *inode = file_inode(file);
- int rc;
-
- if (ll_file_nolock(file))
- return -EOPNOTSUPP;
-
- ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_MAP, 1);
- rc = generic_file_mmap(file, vma);
- if (rc == 0) {
- vma->vm_ops = &ll_file_vm_ops;
- vma->vm_ops->open(vma);
- /* update the inode's size and mtime */
- rc = ll_glimpse_size(inode);
- }
-
- return rc;
-}
diff --git a/drivers/staging/lustre/lustre/llite/llite_nfs.c b/drivers/staging/lustre/lustre/llite/llite_nfs.c
deleted file mode 100644
index 400acacc3590..000000000000
--- a/drivers/staging/lustre/lustre/llite/llite_nfs.c
+++ /dev/null
@@ -1,332 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * lustre/lustre/llite/llite_nfs.c
- *
- * NFS export of Lustre Light File System
- *
- * Author: Yury Umanets <umka@clusterfs.com>
- * Author: Huang Hua <huanghua@clusterfs.com>
- */
-
-#define DEBUG_SUBSYSTEM S_LLITE
-#include "../include/lustre_lite.h"
-#include "llite_internal.h"
-#include <linux/exportfs.h>
-
-__u32 get_uuid2int(const char *name, int len)
-{
- __u32 key0 = 0x12a3fe2d, key1 = 0x37abe8f9;
- while (len--) {
- __u32 key = key1 + (key0 ^ (*name++ * 7152373));
-
- if (key & 0x80000000)
- key -= 0x7fffffff;
- key1 = key0;
- key0 = key;
- }
- return (key0 << 1);
-}
-
-void get_uuid2fsid(const char *name, int len, __kernel_fsid_t *fsid)
-{
- __u64 key = 0, key0 = 0x12a3fe2d, key1 = 0x37abe8f9;
-
- while (len--) {
- key = key1 + (key0 ^ (*name++ * 7152373));
- if (key & 0x8000000000000000ULL)
- key -= 0x7fffffffffffffffULL;
- key1 = key0;
- key0 = key;
- }
-
- fsid->val[0] = key;
- fsid->val[1] = key >> 32;
-}
-
-static int ll_nfs_test_inode(struct inode *inode, void *opaque)
-{
- return lu_fid_eq(&ll_i2info(inode)->lli_fid,
- (struct lu_fid *)opaque);
-}
-
-struct inode *search_inode_for_lustre(struct super_block *sb,
- const struct lu_fid *fid)
-{
- struct ll_sb_info *sbi = ll_s2sbi(sb);
- struct ptlrpc_request *req = NULL;
- struct inode *inode = NULL;
- int eadatalen = 0;
- unsigned long hash = cl_fid_build_ino(fid,
- ll_need_32bit_api(sbi));
- struct md_op_data *op_data;
- int rc;
-
- CDEBUG(D_INFO, "searching inode for:(%lu,"DFID")\n", hash, PFID(fid));
-
- inode = ilookup5(sb, hash, ll_nfs_test_inode, (void *)fid);
- if (inode)
- return inode;
-
- rc = ll_get_default_mdsize(sbi, &eadatalen);
- if (rc)
- return ERR_PTR(rc);
-
- /* Because inode is NULL, ll_prep_md_op_data can not
- * be used here. So we allocate op_data ourselves */
- op_data = kzalloc(sizeof(*op_data), GFP_NOFS);
- if (!op_data)
- return ERR_PTR(-ENOMEM);
-
- op_data->op_fid1 = *fid;
- op_data->op_mode = eadatalen;
- op_data->op_valid = OBD_MD_FLEASIZE;
-
- /* mds_fid2dentry ignores f_type */
- rc = md_getattr(sbi->ll_md_exp, op_data, &req);
- kfree(op_data);
- if (rc) {
- CERROR("can't get object attrs, fid "DFID", rc %d\n",
- PFID(fid), rc);
- return ERR_PTR(rc);
- }
- rc = ll_prep_inode(&inode, req, sb, NULL);
- ptlrpc_req_finished(req);
- if (rc)
- return ERR_PTR(rc);
-
- return inode;
-}
-
-struct lustre_nfs_fid {
- struct lu_fid lnf_child;
- struct lu_fid lnf_parent;
-};
-
-static struct dentry *
-ll_iget_for_nfs(struct super_block *sb, struct lu_fid *fid, struct lu_fid *parent)
-{
- struct inode *inode;
- struct dentry *result;
-
- CDEBUG(D_INFO, "Get dentry for fid: "DFID"\n", PFID(fid));
- if (!fid_is_sane(fid))
- return ERR_PTR(-ESTALE);
-
- inode = search_inode_for_lustre(sb, fid);
- if (IS_ERR(inode))
- return ERR_CAST(inode);
-
- if (is_bad_inode(inode)) {
- /* we didn't find the right inode.. */
- iput(inode);
- return ERR_PTR(-ESTALE);
- }
-
- /**
- * It is an anonymous dentry without OST objects created yet.
- * We have to find the parent to tell MDS how to init lov objects.
- */
- if (S_ISREG(inode->i_mode) && !ll_i2info(inode)->lli_has_smd &&
- parent != NULL) {
- struct ll_inode_info *lli = ll_i2info(inode);
-
- spin_lock(&lli->lli_lock);
- lli->lli_pfid = *parent;
- spin_unlock(&lli->lli_lock);
- }
-
- /* N.B. d_obtain_alias() drops inode ref on error */
- result = d_obtain_alias(inode);
-
- return result;
-}
-
-#define LUSTRE_NFS_FID 0x97
-
-/**
- * \a connectable - is nfsd will connect himself or this should be done
- * at lustre
- *
- * The return value is file handle type:
- * 1 -- contains child file handle;
- * 2 -- contains child file handle and parent file handle;
- * 255 -- error.
- */
-static int ll_encode_fh(struct inode *inode, __u32 *fh, int *plen,
- struct inode *parent)
-{
- struct lustre_nfs_fid *nfs_fid = (void *)fh;
-
- CDEBUG(D_INFO, "encoding for (%lu,"DFID") maxlen=%d minlen=%d\n",
- inode->i_ino, PFID(ll_inode2fid(inode)), *plen,
- (int)sizeof(struct lustre_nfs_fid));
-
- if (*plen < sizeof(struct lustre_nfs_fid) / 4)
- return 255;
-
- nfs_fid->lnf_child = *ll_inode2fid(inode);
- nfs_fid->lnf_parent = *ll_inode2fid(parent);
- *plen = sizeof(struct lustre_nfs_fid) / 4;
-
- return LUSTRE_NFS_FID;
-}
-
-static int ll_nfs_get_name_filldir(struct dir_context *ctx, const char *name,
- int namelen, loff_t hash, u64 ino,
- unsigned type)
-{
- /* It is hack to access lde_fid for comparison with lgd_fid.
- * So the input 'name' must be part of the 'lu_dirent'. */
- struct lu_dirent *lde = container_of0(name, struct lu_dirent, lde_name);
- struct ll_getname_data *lgd =
- container_of(ctx, struct ll_getname_data, ctx);
- struct lu_fid fid;
-
- fid_le_to_cpu(&fid, &lde->lde_fid);
- if (lu_fid_eq(&fid, &lgd->lgd_fid)) {
- memcpy(lgd->lgd_name, name, namelen);
- lgd->lgd_name[namelen] = 0;
- lgd->lgd_found = 1;
- }
- return lgd->lgd_found;
-}
-
-static int ll_get_name(struct dentry *dentry, char *name,
- struct dentry *child)
-{
- struct inode *dir = d_inode(dentry);
- int rc;
- struct ll_getname_data lgd = {
- .lgd_name = name,
- .lgd_fid = ll_i2info(d_inode(child))->lli_fid,
- .ctx.actor = ll_nfs_get_name_filldir,
- };
-
- if (!dir || !S_ISDIR(dir->i_mode)) {
- rc = -ENOTDIR;
- goto out;
- }
-
- if (!dir->i_fop) {
- rc = -EINVAL;
- goto out;
- }
-
- mutex_lock(&dir->i_mutex);
- rc = ll_dir_read(dir, &lgd.ctx);
- mutex_unlock(&dir->i_mutex);
- if (!rc && !lgd.lgd_found)
- rc = -ENOENT;
-out:
- return rc;
-}
-
-static struct dentry *ll_fh_to_dentry(struct super_block *sb, struct fid *fid,
- int fh_len, int fh_type)
-{
- struct lustre_nfs_fid *nfs_fid = (struct lustre_nfs_fid *)fid;
-
- if (fh_type != LUSTRE_NFS_FID)
- return ERR_PTR(-EPROTO);
-
- return ll_iget_for_nfs(sb, &nfs_fid->lnf_child, &nfs_fid->lnf_parent);
-}
-
-static struct dentry *ll_fh_to_parent(struct super_block *sb, struct fid *fid,
- int fh_len, int fh_type)
-{
- struct lustre_nfs_fid *nfs_fid = (struct lustre_nfs_fid *)fid;
-
- if (fh_type != LUSTRE_NFS_FID)
- return ERR_PTR(-EPROTO);
-
- return ll_iget_for_nfs(sb, &nfs_fid->lnf_parent, NULL);
-}
-
-static struct dentry *ll_get_parent(struct dentry *dchild)
-{
- struct ptlrpc_request *req = NULL;
- struct inode *dir = d_inode(dchild);
- struct ll_sb_info *sbi;
- struct dentry *result = NULL;
- struct mdt_body *body;
- static char dotdot[] = "..";
- struct md_op_data *op_data;
- int rc;
- int lmmsize;
-
- LASSERT(dir && S_ISDIR(dir->i_mode));
-
- sbi = ll_s2sbi(dir->i_sb);
-
- CDEBUG(D_INFO, "getting parent for (%lu,"DFID")\n",
- dir->i_ino, PFID(ll_inode2fid(dir)));
-
- rc = ll_get_default_mdsize(sbi, &lmmsize);
- if (rc != 0)
- return ERR_PTR(rc);
-
- op_data = ll_prep_md_op_data(NULL, dir, NULL, dotdot,
- strlen(dotdot), lmmsize,
- LUSTRE_OPC_ANY, NULL);
- if (IS_ERR(op_data))
- return (void *)op_data;
-
- rc = md_getattr_name(sbi->ll_md_exp, op_data, &req);
- ll_finish_md_op_data(op_data);
- if (rc) {
- CERROR("failure %d inode %lu get parent\n", rc, dir->i_ino);
- return ERR_PTR(rc);
- }
- body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
- LASSERT(body->valid & OBD_MD_FLID);
-
- CDEBUG(D_INFO, "parent for "DFID" is "DFID"\n",
- PFID(ll_inode2fid(dir)), PFID(&body->fid1));
-
- result = ll_iget_for_nfs(dir->i_sb, &body->fid1, NULL);
-
- ptlrpc_req_finished(req);
- return result;
-}
-
-struct export_operations lustre_export_operations = {
- .get_parent = ll_get_parent,
- .encode_fh = ll_encode_fh,
- .get_name = ll_get_name,
- .fh_to_dentry = ll_fh_to_dentry,
- .fh_to_parent = ll_fh_to_parent,
-};
diff --git a/drivers/staging/lustre/lustre/llite/llite_rmtacl.c b/drivers/staging/lustre/lustre/llite/llite_rmtacl.c
deleted file mode 100644
index c8a450b5cb18..000000000000
--- a/drivers/staging/lustre/lustre/llite/llite_rmtacl.c
+++ /dev/null
@@ -1,300 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * lustre/llite/llite_rmtacl.c
- *
- * Lustre Remote User Access Control List.
- *
- * Author: Fan Yong <fanyong@clusterfs.com>
- */
-
-#define DEBUG_SUBSYSTEM S_LLITE
-
-#ifdef CONFIG_FS_POSIX_ACL
-
-#include "../include/lustre_lite.h"
-#include "../include/lustre_eacl.h"
-#include "llite_internal.h"
-
-static inline __u32 rce_hashfunc(uid_t id)
-{
- return id & (RCE_HASHES - 1);
-}
-
-static inline __u32 ee_hashfunc(uid_t id)
-{
- return id & (EE_HASHES - 1);
-}
-
-u64 rce_ops2valid(int ops)
-{
- switch (ops) {
- case RMT_LSETFACL:
- return OBD_MD_FLRMTLSETFACL;
- case RMT_LGETFACL:
- return OBD_MD_FLRMTLGETFACL;
- case RMT_RSETFACL:
- return OBD_MD_FLRMTRSETFACL;
- case RMT_RGETFACL:
- return OBD_MD_FLRMTRGETFACL;
- default:
- return 0;
- }
-}
-
-static struct rmtacl_ctl_entry *rce_alloc(pid_t key, int ops)
-{
- struct rmtacl_ctl_entry *rce;
-
- rce = kzalloc(sizeof(*rce), GFP_NOFS);
- if (!rce)
- return NULL;
-
- INIT_LIST_HEAD(&rce->rce_list);
- rce->rce_key = key;
- rce->rce_ops = ops;
-
- return rce;
-}
-
-static void rce_free(struct rmtacl_ctl_entry *rce)
-{
- if (!list_empty(&rce->rce_list))
- list_del(&rce->rce_list);
-
- kfree(rce);
-}
-
-static struct rmtacl_ctl_entry *__rct_search(struct rmtacl_ctl_table *rct,
- pid_t key)
-{
- struct rmtacl_ctl_entry *rce;
- struct list_head *head = &rct->rct_entries[rce_hashfunc(key)];
-
- list_for_each_entry(rce, head, rce_list)
- if (rce->rce_key == key)
- return rce;
-
- return NULL;
-}
-
-struct rmtacl_ctl_entry *rct_search(struct rmtacl_ctl_table *rct, pid_t key)
-{
- struct rmtacl_ctl_entry *rce;
-
- spin_lock(&rct->rct_lock);
- rce = __rct_search(rct, key);
- spin_unlock(&rct->rct_lock);
- return rce;
-}
-
-int rct_add(struct rmtacl_ctl_table *rct, pid_t key, int ops)
-{
- struct rmtacl_ctl_entry *rce, *e;
-
- rce = rce_alloc(key, ops);
- if (rce == NULL)
- return -ENOMEM;
-
- spin_lock(&rct->rct_lock);
- e = __rct_search(rct, key);
- if (unlikely(e != NULL)) {
- CWARN("Unexpected stale rmtacl_entry found: [key: %d] [ops: %d]\n",
- (int)key, ops);
- rce_free(e);
- }
- list_add_tail(&rce->rce_list, &rct->rct_entries[rce_hashfunc(key)]);
- spin_unlock(&rct->rct_lock);
-
- return 0;
-}
-
-int rct_del(struct rmtacl_ctl_table *rct, pid_t key)
-{
- struct rmtacl_ctl_entry *rce;
-
- spin_lock(&rct->rct_lock);
- rce = __rct_search(rct, key);
- if (rce)
- rce_free(rce);
- spin_unlock(&rct->rct_lock);
-
- return rce ? 0 : -ENOENT;
-}
-
-void rct_init(struct rmtacl_ctl_table *rct)
-{
- int i;
-
- spin_lock_init(&rct->rct_lock);
- for (i = 0; i < RCE_HASHES; i++)
- INIT_LIST_HEAD(&rct->rct_entries[i]);
-}
-
-void rct_fini(struct rmtacl_ctl_table *rct)
-{
- struct rmtacl_ctl_entry *rce;
- int i;
-
- spin_lock(&rct->rct_lock);
- for (i = 0; i < RCE_HASHES; i++)
- while (!list_empty(&rct->rct_entries[i])) {
- rce = list_entry(rct->rct_entries[i].next,
- struct rmtacl_ctl_entry, rce_list);
- rce_free(rce);
- }
- spin_unlock(&rct->rct_lock);
-}
-
-
-static struct eacl_entry *ee_alloc(pid_t key, struct lu_fid *fid, int type,
- ext_acl_xattr_header *header)
-{
- struct eacl_entry *ee;
-
- ee = kzalloc(sizeof(*ee), GFP_NOFS);
- if (!ee)
- return NULL;
-
- INIT_LIST_HEAD(&ee->ee_list);
- ee->ee_key = key;
- ee->ee_fid = *fid;
- ee->ee_type = type;
- ee->ee_acl = header;
-
- return ee;
-}
-
-void ee_free(struct eacl_entry *ee)
-{
- if (!list_empty(&ee->ee_list))
- list_del(&ee->ee_list);
-
- if (ee->ee_acl)
- lustre_ext_acl_xattr_free(ee->ee_acl);
-
- kfree(ee);
-}
-
-static struct eacl_entry *__et_search_del(struct eacl_table *et, pid_t key,
- struct lu_fid *fid, int type)
-{
- struct eacl_entry *ee;
- struct list_head *head = &et->et_entries[ee_hashfunc(key)];
-
- LASSERT(fid != NULL);
- list_for_each_entry(ee, head, ee_list)
- if (ee->ee_key == key) {
- if (lu_fid_eq(&ee->ee_fid, fid) &&
- ee->ee_type == type) {
- list_del_init(&ee->ee_list);
- return ee;
- }
- }
-
- return NULL;
-}
-
-struct eacl_entry *et_search_del(struct eacl_table *et, pid_t key,
- struct lu_fid *fid, int type)
-{
- struct eacl_entry *ee;
-
- spin_lock(&et->et_lock);
- ee = __et_search_del(et, key, fid, type);
- spin_unlock(&et->et_lock);
- return ee;
-}
-
-void et_search_free(struct eacl_table *et, pid_t key)
-{
- struct eacl_entry *ee, *next;
- struct list_head *head = &et->et_entries[ee_hashfunc(key)];
-
- spin_lock(&et->et_lock);
- list_for_each_entry_safe(ee, next, head, ee_list)
- if (ee->ee_key == key)
- ee_free(ee);
-
- spin_unlock(&et->et_lock);
-}
-
-int ee_add(struct eacl_table *et, pid_t key, struct lu_fid *fid, int type,
- ext_acl_xattr_header *header)
-{
- struct eacl_entry *ee, *e;
-
- ee = ee_alloc(key, fid, type, header);
- if (ee == NULL)
- return -ENOMEM;
-
- spin_lock(&et->et_lock);
- e = __et_search_del(et, key, fid, type);
- if (unlikely(e != NULL)) {
- CWARN("Unexpected stale eacl_entry found: [key: %d] [fid: " DFID "] [type: %d]\n",
- (int)key, PFID(fid), type);
- ee_free(e);
- }
- list_add_tail(&ee->ee_list, &et->et_entries[ee_hashfunc(key)]);
- spin_unlock(&et->et_lock);
-
- return 0;
-}
-
-void et_init(struct eacl_table *et)
-{
- int i;
-
- spin_lock_init(&et->et_lock);
- for (i = 0; i < EE_HASHES; i++)
- INIT_LIST_HEAD(&et->et_entries[i]);
-}
-
-void et_fini(struct eacl_table *et)
-{
- struct eacl_entry *ee;
- int i;
-
- spin_lock(&et->et_lock);
- for (i = 0; i < EE_HASHES; i++)
- while (!list_empty(&et->et_entries[i])) {
- ee = list_entry(et->et_entries[i].next,
- struct eacl_entry, ee_list);
- ee_free(ee);
- }
- spin_unlock(&et->et_lock);
-}
-
-#endif
diff --git a/drivers/staging/lustre/lustre/llite/lloop.c b/drivers/staging/lustre/lustre/llite/lloop.c
deleted file mode 100644
index 5f0d80cc9718..000000000000
--- a/drivers/staging/lustre/lustre/llite/lloop.c
+++ /dev/null
@@ -1,880 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- */
-
-/*
- * linux/drivers/block/loop.c
- *
- * Written by Theodore Ts'o, 3/29/93
- *
- * Copyright 1993 by Theodore Ts'o. Redistribution of this file is
- * permitted under the GNU General Public License.
- *
- * Modularized and updated for 1.1.16 kernel - Mitch Dsouza 28th May 1994
- * Adapted for 1.3.59 kernel - Andries Brouwer, 1 Feb 1996
- *
- * Fixed do_loop_request() re-entrancy - Vincent.Renardias@waw.com Mar 20, 1997
- *
- * Added devfs support - Richard Gooch <rgooch@atnf.csiro.au> 16-Jan-1998
- *
- * Handle sparse backing files correctly - Kenn Humborg, Jun 28, 1998
- *
- * Loadable modules and other fixes by AK, 1998
- *
- * Maximum number of loop devices now dynamic via max_loop module parameter.
- * Russell Kroll <rkroll@exploits.org> 19990701
- *
- * Maximum number of loop devices when compiled-in now selectable by passing
- * max_loop=<1-255> to the kernel on boot.
- * Erik I. Bols?, <eriki@himolde.no>, Oct 31, 1999
- *
- * Completely rewrite request handling to be make_request_fn style and
- * non blocking, pushing work to a helper thread. Lots of fixes from
- * Al Viro too.
- * Jens Axboe <axboe@suse.de>, Nov 2000
- *
- * Support up to 256 loop devices
- * Heinz Mauelshagen <mge@sistina.com>, Feb 2002
- *
- * Support for falling back on the write file operation when the address space
- * operations prepare_write and/or commit_write are not available on the
- * backing filesystem.
- * Anton Altaparmakov, 16 Feb 2005
- *
- * Still To Fix:
- * - Advisory locking is ignored here.
- * - Should use an own CAP_* category instead of CAP_SYS_ADMIN
- *
- */
-
-#include <linux/module.h>
-
-#include <linux/sched.h>
-#include <linux/fs.h>
-#include <linux/file.h>
-#include <linux/stat.h>
-#include <linux/errno.h>
-#include <linux/major.h>
-#include <linux/wait.h>
-#include <linux/blkdev.h>
-#include <linux/blkpg.h>
-#include <linux/init.h>
-#include <linux/swap.h>
-#include <linux/slab.h>
-#include <linux/suspend.h>
-#include <linux/writeback.h>
-#include <linux/buffer_head.h> /* for invalidate_bdev() */
-#include <linux/completion.h>
-#include <linux/highmem.h>
-#include <linux/gfp.h>
-#include <linux/pagevec.h>
-#include <linux/uaccess.h>
-
-#include "../include/lustre_lib.h"
-#include "../include/lustre_lite.h"
-#include "llite_internal.h"
-
-#define LLOOP_MAX_SEGMENTS LNET_MAX_IOV
-
-/* Possible states of device */
-enum {
- LLOOP_UNBOUND,
- LLOOP_BOUND,
- LLOOP_RUNDOWN,
-};
-
-struct lloop_device {
- int lo_number;
- int lo_refcnt;
- loff_t lo_offset;
- loff_t lo_sizelimit;
- int lo_flags;
- struct file *lo_backing_file;
- struct block_device *lo_device;
- unsigned lo_blocksize;
-
- gfp_t old_gfp_mask;
-
- spinlock_t lo_lock;
- struct bio *lo_bio;
- struct bio *lo_biotail;
- int lo_state;
- struct semaphore lo_sem;
- struct mutex lo_ctl_mutex;
- atomic_t lo_pending;
- wait_queue_head_t lo_bh_wait;
-
- struct request_queue *lo_queue;
-
- const struct lu_env *lo_env;
- struct cl_io lo_io;
- struct ll_dio_pages lo_pvec;
-
- /* data to handle bio for lustre. */
- struct lo_request_data {
- struct page *lrd_pages[LLOOP_MAX_SEGMENTS];
- loff_t lrd_offsets[LLOOP_MAX_SEGMENTS];
- } lo_requests[1];
-};
-
-/*
- * Loop flags
- */
-enum {
- LO_FLAGS_READ_ONLY = 1,
-};
-
-static int lloop_major;
-#define MAX_LOOP_DEFAULT 16
-static int max_loop = MAX_LOOP_DEFAULT;
-static struct lloop_device *loop_dev;
-static struct gendisk **disks;
-static struct mutex lloop_mutex;
-static void *ll_iocontrol_magic;
-
-static loff_t get_loop_size(struct lloop_device *lo, struct file *file)
-{
- loff_t size, offset, loopsize;
-
- /* Compute loopsize in bytes */
- size = i_size_read(file->f_mapping->host);
- offset = lo->lo_offset;
- loopsize = size - offset;
- if (lo->lo_sizelimit > 0 && lo->lo_sizelimit < loopsize)
- loopsize = lo->lo_sizelimit;
-
- /*
- * Unfortunately, if we want to do I/O on the device,
- * the number of 512-byte sectors has to fit into a sector_t.
- */
- return loopsize >> 9;
-}
-
-static int do_bio_lustrebacked(struct lloop_device *lo, struct bio *head)
-{
- const struct lu_env *env = lo->lo_env;
- struct cl_io *io = &lo->lo_io;
- struct inode *inode = file_inode(lo->lo_backing_file);
- struct cl_object *obj = ll_i2info(inode)->lli_clob;
- pgoff_t offset;
- int ret;
- int rw;
- u32 page_count = 0;
- struct bio_vec bvec;
- struct bvec_iter iter;
- struct bio *bio;
- ssize_t bytes;
-
- struct ll_dio_pages *pvec = &lo->lo_pvec;
- struct page **pages = pvec->ldp_pages;
- loff_t *offsets = pvec->ldp_offsets;
-
- truncate_inode_pages(inode->i_mapping, 0);
-
- /* initialize the IO */
- memset(io, 0, sizeof(*io));
- io->ci_obj = obj;
- ret = cl_io_init(env, io, CIT_MISC, obj);
- if (ret)
- return io->ci_result;
- io->ci_lockreq = CILR_NEVER;
-
- LASSERT(head != NULL);
- rw = head->bi_rw;
- for (bio = head; bio != NULL; bio = bio->bi_next) {
- LASSERT(rw == bio->bi_rw);
-
- offset = (pgoff_t)(bio->bi_iter.bi_sector << 9) + lo->lo_offset;
- bio_for_each_segment(bvec, bio, iter) {
- BUG_ON(bvec.bv_offset != 0);
- BUG_ON(bvec.bv_len != PAGE_CACHE_SIZE);
-
- pages[page_count] = bvec.bv_page;
- offsets[page_count] = offset;
- page_count++;
- offset += bvec.bv_len;
- }
- LASSERT(page_count <= LLOOP_MAX_SEGMENTS);
- }
-
- ll_stats_ops_tally(ll_i2sbi(inode),
- (rw == WRITE) ? LPROC_LL_BRW_WRITE : LPROC_LL_BRW_READ,
- page_count);
-
- pvec->ldp_size = page_count << PAGE_CACHE_SHIFT;
- pvec->ldp_nr = page_count;
-
- /* FIXME: in ll_direct_rw_pages, it has to allocate many cl_page{}s to
- * write those pages into OST. Even worse case is that more pages
- * would be asked to write out to swap space, and then finally get here
- * again.
- * Unfortunately this is NOT easy to fix.
- * Thoughts on solution:
- * 0. Define a reserved pool for cl_pages, which could be a list of
- * pre-allocated cl_pages;
- * 1. Define a new operation in cl_object_operations{}, says clo_depth,
- * which measures how many layers for this lustre object. Generally
- * speaking, the depth would be 2, one for llite, and one for lovsub.
- * However, for SNS, there will be more since we need additional page
- * to store parity;
- * 2. Reserve the # of (page_count * depth) cl_pages from the reserved
- * pool. Afterwards, the clio would allocate the pages from reserved
- * pool, this guarantees we needn't allocate the cl_pages from
- * generic cl_page slab cache.
- * Of course, if there is NOT enough pages in the pool, we might
- * be asked to write less pages once, this purely depends on
- * implementation. Anyway, we should be careful to avoid deadlocking.
- */
- mutex_lock(&inode->i_mutex);
- bytes = ll_direct_rw_pages(env, io, rw, inode, pvec);
- mutex_unlock(&inode->i_mutex);
- cl_io_fini(env, io);
- return (bytes == pvec->ldp_size) ? 0 : (int)bytes;
-}
-
-/*
- * Add bio to back of pending list
- */
-static void loop_add_bio(struct lloop_device *lo, struct bio *bio)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&lo->lo_lock, flags);
- if (lo->lo_biotail) {
- lo->lo_biotail->bi_next = bio;
- lo->lo_biotail = bio;
- } else
- lo->lo_bio = lo->lo_biotail = bio;
- spin_unlock_irqrestore(&lo->lo_lock, flags);
-
- atomic_inc(&lo->lo_pending);
- if (waitqueue_active(&lo->lo_bh_wait))
- wake_up(&lo->lo_bh_wait);
-}
-
-/*
- * Grab first pending buffer
- */
-static unsigned int loop_get_bio(struct lloop_device *lo, struct bio **req)
-{
- struct bio *first;
- struct bio **bio;
- unsigned int count = 0;
- unsigned int page_count = 0;
- int rw;
-
- spin_lock_irq(&lo->lo_lock);
- first = lo->lo_bio;
- if (unlikely(first == NULL)) {
- spin_unlock_irq(&lo->lo_lock);
- return 0;
- }
-
- /* TODO: need to split the bio, too bad. */
- LASSERT(first->bi_vcnt <= LLOOP_MAX_SEGMENTS);
-
- rw = first->bi_rw;
- bio = &lo->lo_bio;
- while (*bio && (*bio)->bi_rw == rw) {
- CDEBUG(D_INFO, "bio sector %llu size %u count %u vcnt%u \n",
- (unsigned long long)(*bio)->bi_iter.bi_sector,
- (*bio)->bi_iter.bi_size,
- page_count, (*bio)->bi_vcnt);
- if (page_count + (*bio)->bi_vcnt > LLOOP_MAX_SEGMENTS)
- break;
-
-
- page_count += (*bio)->bi_vcnt;
- count++;
- bio = &(*bio)->bi_next;
- }
- if (*bio) {
- /* Some of bios can't be mergeable. */
- lo->lo_bio = *bio;
- *bio = NULL;
- } else {
- /* Hit the end of queue */
- lo->lo_biotail = NULL;
- lo->lo_bio = NULL;
- }
- *req = first;
- spin_unlock_irq(&lo->lo_lock);
- return count;
-}
-
-static void loop_make_request(struct request_queue *q, struct bio *old_bio)
-{
- struct lloop_device *lo = q->queuedata;
- int rw = bio_rw(old_bio);
- int inactive;
-
- blk_queue_split(q, &old_bio, q->bio_split);
-
- if (!lo)
- goto err;
-
- CDEBUG(D_INFO, "submit bio sector %llu size %u\n",
- (unsigned long long)old_bio->bi_iter.bi_sector,
- old_bio->bi_iter.bi_size);
-
- spin_lock_irq(&lo->lo_lock);
- inactive = lo->lo_state != LLOOP_BOUND;
- spin_unlock_irq(&lo->lo_lock);
- if (inactive)
- goto err;
-
- if (rw == WRITE) {
- if (lo->lo_flags & LO_FLAGS_READ_ONLY)
- goto err;
- } else if (rw == READA) {
- rw = READ;
- } else if (rw != READ) {
- CERROR("lloop: unknown command (%x)\n", rw);
- goto err;
- }
- loop_add_bio(lo, old_bio);
- return;
-err:
- bio_io_error(old_bio);
-}
-
-
-static inline void loop_handle_bio(struct lloop_device *lo, struct bio *bio)
-{
- int ret;
- ret = do_bio_lustrebacked(lo, bio);
- while (bio) {
- struct bio *tmp = bio->bi_next;
- bio->bi_next = NULL;
- bio->bi_error = ret;
- bio_endio(bio);
- bio = tmp;
- }
-}
-
-static inline int loop_active(struct lloop_device *lo)
-{
- return atomic_read(&lo->lo_pending) ||
- (lo->lo_state == LLOOP_RUNDOWN);
-}
-
-/*
- * worker thread that handles reads/writes to file backed loop devices,
- * to avoid blocking in our make_request_fn.
- */
-static int loop_thread(void *data)
-{
- struct lloop_device *lo = data;
- struct bio *bio;
- unsigned int count;
- unsigned long times = 0;
- unsigned long total_count = 0;
-
- struct lu_env *env;
- int refcheck;
- int ret = 0;
-
- set_user_nice(current, MIN_NICE);
-
- lo->lo_state = LLOOP_BOUND;
-
- env = cl_env_get(&refcheck);
- if (IS_ERR(env)) {
- ret = PTR_ERR(env);
- goto out;
- }
-
- lo->lo_env = env;
- memset(&lo->lo_pvec, 0, sizeof(lo->lo_pvec));
- lo->lo_pvec.ldp_pages = lo->lo_requests[0].lrd_pages;
- lo->lo_pvec.ldp_offsets = lo->lo_requests[0].lrd_offsets;
-
- /*
- * up sem, we are running
- */
- up(&lo->lo_sem);
-
- for (;;) {
- wait_event(lo->lo_bh_wait, loop_active(lo));
- if (!atomic_read(&lo->lo_pending)) {
- int exiting = 0;
- spin_lock_irq(&lo->lo_lock);
- exiting = (lo->lo_state == LLOOP_RUNDOWN);
- spin_unlock_irq(&lo->lo_lock);
- if (exiting)
- break;
- }
-
- bio = NULL;
- count = loop_get_bio(lo, &bio);
- if (!count) {
- CWARN("lloop(minor: %d): missing bio\n", lo->lo_number);
- continue;
- }
-
- total_count += count;
- if (total_count < count) { /* overflow */
- total_count = count;
- times = 1;
- } else {
- times++;
- }
- if ((times & 127) == 0) {
- CDEBUG(D_INFO, "total: %lu, count: %lu, avg: %lu\n",
- total_count, times, total_count / times);
- }
-
- LASSERT(bio != NULL);
- LASSERT(count <= atomic_read(&lo->lo_pending));
- loop_handle_bio(lo, bio);
- atomic_sub(count, &lo->lo_pending);
- }
- cl_env_put(env, &refcheck);
-
-out:
- up(&lo->lo_sem);
- return ret;
-}
-
-static int loop_set_fd(struct lloop_device *lo, struct file *unused,
- struct block_device *bdev, struct file *file)
-{
- struct inode *inode;
- struct address_space *mapping;
- int lo_flags = 0;
- int error;
- loff_t size;
-
- if (!try_module_get(THIS_MODULE))
- return -ENODEV;
-
- error = -EBUSY;
- if (lo->lo_state != LLOOP_UNBOUND)
- goto out;
-
- mapping = file->f_mapping;
- inode = mapping->host;
-
- error = -EINVAL;
- if (!S_ISREG(inode->i_mode) || inode->i_sb->s_magic != LL_SUPER_MAGIC)
- goto out;
-
- if (!(file->f_mode & FMODE_WRITE))
- lo_flags |= LO_FLAGS_READ_ONLY;
-
- size = get_loop_size(lo, file);
-
- if ((loff_t)(sector_t)size != size) {
- error = -EFBIG;
- goto out;
- }
-
- /* remove all pages in cache so as dirty pages not to be existent. */
- truncate_inode_pages(mapping, 0);
-
- set_device_ro(bdev, (lo_flags & LO_FLAGS_READ_ONLY) != 0);
-
- lo->lo_blocksize = PAGE_CACHE_SIZE;
- lo->lo_device = bdev;
- lo->lo_flags = lo_flags;
- lo->lo_backing_file = file;
- lo->lo_sizelimit = 0;
- lo->old_gfp_mask = mapping_gfp_mask(mapping);
- mapping_set_gfp_mask(mapping, lo->old_gfp_mask & ~(__GFP_IO|__GFP_FS));
-
- lo->lo_bio = lo->lo_biotail = NULL;
-
- /*
- * set queue make_request_fn, and add limits based on lower level
- * device
- */
- blk_queue_make_request(lo->lo_queue, loop_make_request);
- lo->lo_queue->queuedata = lo;
-
- /* queue parameters */
- CLASSERT(PAGE_CACHE_SIZE < (1 << (sizeof(unsigned short) * 8)));
- blk_queue_logical_block_size(lo->lo_queue,
- (unsigned short)PAGE_CACHE_SIZE);
- blk_queue_max_hw_sectors(lo->lo_queue,
- LLOOP_MAX_SEGMENTS << (PAGE_CACHE_SHIFT - 9));
- blk_queue_max_segments(lo->lo_queue, LLOOP_MAX_SEGMENTS);
-
- set_capacity(disks[lo->lo_number], size);
- bd_set_size(bdev, size << 9);
-
- set_blocksize(bdev, lo->lo_blocksize);
-
- kthread_run(loop_thread, lo, "lloop%d", lo->lo_number);
- down(&lo->lo_sem);
- return 0;
-
-out:
- /* This is safe: open() is still holding a reference. */
- module_put(THIS_MODULE);
- return error;
-}
-
-static int loop_clr_fd(struct lloop_device *lo, struct block_device *bdev,
- int count)
-{
- struct file *filp = lo->lo_backing_file;
- gfp_t gfp = lo->old_gfp_mask;
-
- if (lo->lo_state != LLOOP_BOUND)
- return -ENXIO;
-
- if (lo->lo_refcnt > count) /* we needed one fd for the ioctl */
- return -EBUSY;
-
- if (filp == NULL)
- return -EINVAL;
-
- spin_lock_irq(&lo->lo_lock);
- lo->lo_state = LLOOP_RUNDOWN;
- spin_unlock_irq(&lo->lo_lock);
- wake_up(&lo->lo_bh_wait);
-
- down(&lo->lo_sem);
- lo->lo_backing_file = NULL;
- lo->lo_device = NULL;
- lo->lo_offset = 0;
- lo->lo_sizelimit = 0;
- lo->lo_flags = 0;
- invalidate_bdev(bdev);
- set_capacity(disks[lo->lo_number], 0);
- bd_set_size(bdev, 0);
- mapping_set_gfp_mask(filp->f_mapping, gfp);
- lo->lo_state = LLOOP_UNBOUND;
- fput(filp);
- /* This is safe: open() is still holding a reference. */
- module_put(THIS_MODULE);
- return 0;
-}
-
-static int lo_open(struct block_device *bdev, fmode_t mode)
-{
- struct lloop_device *lo = bdev->bd_disk->private_data;
-
- mutex_lock(&lo->lo_ctl_mutex);
- lo->lo_refcnt++;
- mutex_unlock(&lo->lo_ctl_mutex);
-
- return 0;
-}
-
-static void lo_release(struct gendisk *disk, fmode_t mode)
-{
- struct lloop_device *lo = disk->private_data;
-
- mutex_lock(&lo->lo_ctl_mutex);
- --lo->lo_refcnt;
- mutex_unlock(&lo->lo_ctl_mutex);
-}
-
-/* lloop device node's ioctl function. */
-static int lo_ioctl(struct block_device *bdev, fmode_t mode,
- unsigned int cmd, unsigned long arg)
-{
- struct lloop_device *lo = bdev->bd_disk->private_data;
- struct inode *inode = NULL;
- int err = 0;
-
- mutex_lock(&lloop_mutex);
- switch (cmd) {
- case LL_IOC_LLOOP_DETACH: {
- err = loop_clr_fd(lo, bdev, 2);
- if (err == 0)
- blkdev_put(bdev, 0); /* grabbed in LLOOP_ATTACH */
- break;
- }
-
- case LL_IOC_LLOOP_INFO: {
- struct lu_fid fid;
-
- if (lo->lo_backing_file == NULL) {
- err = -ENOENT;
- break;
- }
- if (inode == NULL)
- inode = file_inode(lo->lo_backing_file);
- if (lo->lo_state == LLOOP_BOUND)
- fid = ll_i2info(inode)->lli_fid;
- else
- fid_zero(&fid);
-
- if (copy_to_user((struct lu_fid *)arg, &fid, sizeof(fid)))
- err = -EFAULT;
- break;
- }
-
- default:
- err = -EINVAL;
- break;
- }
- mutex_unlock(&lloop_mutex);
-
- return err;
-}
-
-static struct block_device_operations lo_fops = {
- .owner = THIS_MODULE,
- .open = lo_open,
- .release = lo_release,
- .ioctl = lo_ioctl,
-};
-
-/* dynamic iocontrol callback.
- * This callback is registered in lloop_init and will be called by
- * ll_iocontrol_call.
- *
- * This is a llite regular file ioctl function. It takes the responsibility
- * of attaching or detaching a file by a lloop's device number.
- */
-static enum llioc_iter lloop_ioctl(struct inode *unused, struct file *file,
- unsigned int cmd, unsigned long arg,
- void *magic, int *rcp)
-{
- struct lloop_device *lo = NULL;
- struct block_device *bdev = NULL;
- int err = 0;
- dev_t dev;
-
- if (magic != ll_iocontrol_magic)
- return LLIOC_CONT;
-
- if (disks == NULL) {
- err = -ENODEV;
- goto out1;
- }
-
- CWARN("Enter llop_ioctl\n");
-
- mutex_lock(&lloop_mutex);
- switch (cmd) {
- case LL_IOC_LLOOP_ATTACH: {
- struct lloop_device *lo_free = NULL;
- int i;
-
- for (i = 0; i < max_loop; i++, lo = NULL) {
- lo = &loop_dev[i];
- if (lo->lo_state == LLOOP_UNBOUND) {
- if (!lo_free)
- lo_free = lo;
- continue;
- }
- if (file_inode(lo->lo_backing_file) == file_inode(file))
- break;
- }
- if (lo || !lo_free) {
- err = -EBUSY;
- goto out;
- }
-
- lo = lo_free;
- dev = MKDEV(lloop_major, lo->lo_number);
-
- /* quit if the used pointer is writable */
- if (put_user((long)old_encode_dev(dev), (long *)arg)) {
- err = -EFAULT;
- goto out;
- }
-
- bdev = blkdev_get_by_dev(dev, file->f_mode, NULL);
- if (IS_ERR(bdev)) {
- err = PTR_ERR(bdev);
- goto out;
- }
-
- get_file(file);
- err = loop_set_fd(lo, NULL, bdev, file);
- if (err) {
- fput(file);
- blkdev_put(bdev, 0);
- }
-
- break;
- }
-
- case LL_IOC_LLOOP_DETACH_BYDEV: {
- int minor;
-
- dev = old_decode_dev(arg);
- if (MAJOR(dev) != lloop_major) {
- err = -EINVAL;
- goto out;
- }
-
- minor = MINOR(dev);
- if (minor > max_loop - 1) {
- err = -EINVAL;
- goto out;
- }
-
- lo = &loop_dev[minor];
- if (lo->lo_state != LLOOP_BOUND) {
- err = -EINVAL;
- goto out;
- }
-
- bdev = lo->lo_device;
- err = loop_clr_fd(lo, bdev, 1);
- if (err == 0)
- blkdev_put(bdev, 0); /* grabbed in LLOOP_ATTACH */
-
- break;
- }
-
- default:
- err = -EINVAL;
- break;
- }
-
-out:
- mutex_unlock(&lloop_mutex);
-out1:
- if (rcp)
- *rcp = err;
- return LLIOC_STOP;
-}
-
-static int __init lloop_init(void)
-{
- int i;
- unsigned int cmdlist[] = {
- LL_IOC_LLOOP_ATTACH,
- LL_IOC_LLOOP_DETACH_BYDEV,
- };
-
- if (max_loop < 1 || max_loop > 256) {
- max_loop = MAX_LOOP_DEFAULT;
- CWARN("lloop: invalid max_loop (must be between 1 and 256), using default (%u)\n",
- max_loop);
- }
-
- lloop_major = register_blkdev(0, "lloop");
- if (lloop_major < 0)
- return -EIO;
-
- CDEBUG(D_CONFIG, "registered lloop major %d with %u minors\n",
- lloop_major, max_loop);
-
- ll_iocontrol_magic = ll_iocontrol_register(lloop_ioctl, 2, cmdlist);
- if (ll_iocontrol_magic == NULL)
- goto out_mem1;
-
- loop_dev = kcalloc(max_loop, sizeof(*loop_dev), GFP_KERNEL);
- if (!loop_dev)
- goto out_mem1;
-
- disks = kcalloc(max_loop, sizeof(*disks), GFP_KERNEL);
- if (!disks)
- goto out_mem2;
-
- for (i = 0; i < max_loop; i++) {
- disks[i] = alloc_disk(1);
- if (!disks[i])
- goto out_mem3;
- }
-
- mutex_init(&lloop_mutex);
-
- for (i = 0; i < max_loop; i++) {
- struct lloop_device *lo = &loop_dev[i];
- struct gendisk *disk = disks[i];
-
- lo->lo_queue = blk_alloc_queue(GFP_KERNEL);
- if (!lo->lo_queue)
- goto out_mem4;
-
- mutex_init(&lo->lo_ctl_mutex);
- sema_init(&lo->lo_sem, 0);
- init_waitqueue_head(&lo->lo_bh_wait);
- lo->lo_number = i;
- spin_lock_init(&lo->lo_lock);
- disk->major = lloop_major;
- disk->first_minor = i;
- disk->fops = &lo_fops;
- sprintf(disk->disk_name, "lloop%d", i);
- disk->private_data = lo;
- disk->queue = lo->lo_queue;
- }
-
- /* We cannot fail after we call this, so another loop!*/
- for (i = 0; i < max_loop; i++)
- add_disk(disks[i]);
- return 0;
-
-out_mem4:
- while (i--)
- blk_cleanup_queue(loop_dev[i].lo_queue);
- i = max_loop;
-out_mem3:
- while (i--)
- put_disk(disks[i]);
- kfree(disks);
-out_mem2:
- kfree(loop_dev);
-out_mem1:
- unregister_blkdev(lloop_major, "lloop");
- ll_iocontrol_unregister(ll_iocontrol_magic);
- CERROR("lloop: ran out of memory\n");
- return -ENOMEM;
-}
-
-static void lloop_exit(void)
-{
- int i;
-
- ll_iocontrol_unregister(ll_iocontrol_magic);
- for (i = 0; i < max_loop; i++) {
- del_gendisk(disks[i]);
- blk_cleanup_queue(loop_dev[i].lo_queue);
- put_disk(disks[i]);
- }
-
- unregister_blkdev(lloop_major, "lloop");
-
- kfree(disks);
- kfree(loop_dev);
-}
-
-module_init(lloop_init);
-module_exit(lloop_exit);
-
-module_param(max_loop, int, 0444);
-MODULE_PARM_DESC(max_loop, "maximum of lloop_device");
-MODULE_AUTHOR("Sun Microsystems, Inc. <http://www.lustre.org/>");
-MODULE_DESCRIPTION("Lustre virtual block device");
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/lustre/lustre/llite/lproc_llite.c b/drivers/staging/lustre/lustre/llite/lproc_llite.c
deleted file mode 100644
index 08d32027d7be..000000000000
--- a/drivers/staging/lustre/lustre/llite/lproc_llite.c
+++ /dev/null
@@ -1,1502 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- */
-#define DEBUG_SUBSYSTEM S_LLITE
-
-#include "../include/lustre_lite.h"
-#include "../include/lprocfs_status.h"
-#include <linux/seq_file.h>
-#include "../include/obd_support.h"
-
-#include "llite_internal.h"
-#include "vvp_internal.h"
-
-/* /proc/lustre/llite mount point registration */
-static struct file_operations ll_rw_extents_stats_fops;
-static struct file_operations ll_rw_extents_stats_pp_fops;
-static struct file_operations ll_rw_offset_stats_fops;
-
-static ssize_t blocksize_show(struct kobject *kobj, struct attribute *attr,
- char *buf)
-{
- struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
- ll_kobj);
- struct obd_statfs osfs;
- int rc;
-
- rc = ll_statfs_internal(sbi->ll_sb, &osfs,
- cfs_time_shift_64(-OBD_STATFS_CACHE_SECONDS),
- OBD_STATFS_NODELAY);
- if (!rc)
- return sprintf(buf, "%u\n", osfs.os_bsize);
-
- return rc;
-}
-LUSTRE_RO_ATTR(blocksize);
-
-static ssize_t kbytestotal_show(struct kobject *kobj, struct attribute *attr,
- char *buf)
-{
- struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
- ll_kobj);
- struct obd_statfs osfs;
- int rc;
-
- rc = ll_statfs_internal(sbi->ll_sb, &osfs,
- cfs_time_shift_64(-OBD_STATFS_CACHE_SECONDS),
- OBD_STATFS_NODELAY);
- if (!rc) {
- __u32 blk_size = osfs.os_bsize >> 10;
- __u64 result = osfs.os_blocks;
-
- while (blk_size >>= 1)
- result <<= 1;
-
- rc = sprintf(buf, "%llu\n", result);
- }
-
- return rc;
-}
-LUSTRE_RO_ATTR(kbytestotal);
-
-static ssize_t kbytesfree_show(struct kobject *kobj, struct attribute *attr,
- char *buf)
-{
- struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
- ll_kobj);
- struct obd_statfs osfs;
- int rc;
-
- rc = ll_statfs_internal(sbi->ll_sb, &osfs,
- cfs_time_shift_64(-OBD_STATFS_CACHE_SECONDS),
- OBD_STATFS_NODELAY);
- if (!rc) {
- __u32 blk_size = osfs.os_bsize >> 10;
- __u64 result = osfs.os_bfree;
-
- while (blk_size >>= 1)
- result <<= 1;
-
- rc = sprintf(buf, "%llu\n", result);
- }
-
- return rc;
-}
-LUSTRE_RO_ATTR(kbytesfree);
-
-static ssize_t kbytesavail_show(struct kobject *kobj, struct attribute *attr,
- char *buf)
-{
- struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
- ll_kobj);
- struct obd_statfs osfs;
- int rc;
-
- rc = ll_statfs_internal(sbi->ll_sb, &osfs,
- cfs_time_shift_64(-OBD_STATFS_CACHE_SECONDS),
- OBD_STATFS_NODELAY);
- if (!rc) {
- __u32 blk_size = osfs.os_bsize >> 10;
- __u64 result = osfs.os_bavail;
-
- while (blk_size >>= 1)
- result <<= 1;
-
- rc = sprintf(buf, "%llu\n", result);
- }
-
- return rc;
-}
-LUSTRE_RO_ATTR(kbytesavail);
-
-static ssize_t filestotal_show(struct kobject *kobj, struct attribute *attr,
- char *buf)
-{
- struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
- ll_kobj);
- struct obd_statfs osfs;
- int rc;
-
- rc = ll_statfs_internal(sbi->ll_sb, &osfs,
- cfs_time_shift_64(-OBD_STATFS_CACHE_SECONDS),
- OBD_STATFS_NODELAY);
- if (!rc)
- return sprintf(buf, "%llu\n", osfs.os_files);
-
- return rc;
-}
-LUSTRE_RO_ATTR(filestotal);
-
-static ssize_t filesfree_show(struct kobject *kobj, struct attribute *attr,
- char *buf)
-{
- struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
- ll_kobj);
- struct obd_statfs osfs;
- int rc;
-
- rc = ll_statfs_internal(sbi->ll_sb, &osfs,
- cfs_time_shift_64(-OBD_STATFS_CACHE_SECONDS),
- OBD_STATFS_NODELAY);
- if (!rc)
- return sprintf(buf, "%llu\n", osfs.os_ffree);
-
- return rc;
-}
-LUSTRE_RO_ATTR(filesfree);
-
-static ssize_t client_type_show(struct kobject *kobj, struct attribute *attr,
- char *buf)
-{
- struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
- ll_kobj);
-
- return sprintf(buf, "%s client\n",
- sbi->ll_flags & LL_SBI_RMT_CLIENT ? "remote" : "local");
-}
-LUSTRE_RO_ATTR(client_type);
-
-static ssize_t fstype_show(struct kobject *kobj, struct attribute *attr,
- char *buf)
-{
- struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
- ll_kobj);
-
- return sprintf(buf, "%s\n", sbi->ll_sb->s_type->name);
-}
-LUSTRE_RO_ATTR(fstype);
-
-static ssize_t uuid_show(struct kobject *kobj, struct attribute *attr,
- char *buf)
-{
- struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
- ll_kobj);
-
- return sprintf(buf, "%s\n", sbi->ll_sb_uuid.uuid);
-}
-LUSTRE_RO_ATTR(uuid);
-
-static int ll_site_stats_seq_show(struct seq_file *m, void *v)
-{
- struct super_block *sb = m->private;
-
- /*
- * See description of statistical counters in struct cl_site, and
- * struct lu_site.
- */
- return cl_site_stats_print(lu2cl_site(ll_s2sbi(sb)->ll_site), m);
-}
-LPROC_SEQ_FOPS_RO(ll_site_stats);
-
-static ssize_t max_read_ahead_mb_show(struct kobject *kobj,
- struct attribute *attr, char *buf)
-{
- struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
- ll_kobj);
- long pages_number;
- int mult;
-
- spin_lock(&sbi->ll_lock);
- pages_number = sbi->ll_ra_info.ra_max_pages;
- spin_unlock(&sbi->ll_lock);
-
- mult = 1 << (20 - PAGE_CACHE_SHIFT);
- return lprocfs_read_frac_helper(buf, PAGE_SIZE, pages_number, mult);
-}
-
-static ssize_t max_read_ahead_mb_store(struct kobject *kobj,
- struct attribute *attr,
- const char *buffer,
- size_t count)
-{
- struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
- ll_kobj);
- int rc;
- unsigned long pages_number;
-
- rc = kstrtoul(buffer, 10, &pages_number);
- if (rc)
- return rc;
-
- pages_number *= 1 << (20 - PAGE_CACHE_SHIFT); /* MB -> pages */
-
- if (pages_number > totalram_pages / 2) {
-
- CERROR("can't set file readahead more than %lu MB\n",
- totalram_pages >> (20 - PAGE_CACHE_SHIFT + 1)); /*1/2 of RAM*/
- return -ERANGE;
- }
-
- spin_lock(&sbi->ll_lock);
- sbi->ll_ra_info.ra_max_pages = pages_number;
- spin_unlock(&sbi->ll_lock);
-
- return count;
-}
-LUSTRE_RW_ATTR(max_read_ahead_mb);
-
-static ssize_t max_read_ahead_per_file_mb_show(struct kobject *kobj,
- struct attribute *attr,
- char *buf)
-{
- struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
- ll_kobj);
- long pages_number;
- int mult;
-
- spin_lock(&sbi->ll_lock);
- pages_number = sbi->ll_ra_info.ra_max_pages_per_file;
- spin_unlock(&sbi->ll_lock);
-
- mult = 1 << (20 - PAGE_CACHE_SHIFT);
- return lprocfs_read_frac_helper(buf, PAGE_SIZE, pages_number, mult);
-}
-
-static ssize_t max_read_ahead_per_file_mb_store(struct kobject *kobj,
- struct attribute *attr,
- const char *buffer,
- size_t count)
-{
- struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
- ll_kobj);
- int rc;
- unsigned long pages_number;
-
- rc = kstrtoul(buffer, 10, &pages_number);
- if (rc)
- return rc;
-
- if (pages_number > sbi->ll_ra_info.ra_max_pages) {
- CERROR("can't set file readahead more than max_read_ahead_mb %lu MB\n",
- sbi->ll_ra_info.ra_max_pages);
- return -ERANGE;
- }
-
- spin_lock(&sbi->ll_lock);
- sbi->ll_ra_info.ra_max_pages_per_file = pages_number;
- spin_unlock(&sbi->ll_lock);
-
- return count;
-}
-LUSTRE_RW_ATTR(max_read_ahead_per_file_mb);
-
-static ssize_t max_read_ahead_whole_mb_show(struct kobject *kobj,
- struct attribute *attr,
- char *buf)
-{
- struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
- ll_kobj);
- long pages_number;
- int mult;
-
- spin_lock(&sbi->ll_lock);
- pages_number = sbi->ll_ra_info.ra_max_read_ahead_whole_pages;
- spin_unlock(&sbi->ll_lock);
-
- mult = 1 << (20 - PAGE_CACHE_SHIFT);
- return lprocfs_read_frac_helper(buf, PAGE_SIZE, pages_number, mult);
-}
-
-static ssize_t max_read_ahead_whole_mb_store(struct kobject *kobj,
- struct attribute *attr,
- const char *buffer,
- size_t count)
-{
- struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
- ll_kobj);
- int rc;
- unsigned long pages_number;
-
- rc = kstrtoul(buffer, 10, &pages_number);
- if (rc)
- return rc;
-
- /* Cap this at the current max readahead window size, the readahead
- * algorithm does this anyway so it's pointless to set it larger. */
- if (pages_number > sbi->ll_ra_info.ra_max_pages_per_file) {
- CERROR("can't set max_read_ahead_whole_mb more than max_read_ahead_per_file_mb: %lu\n",
- sbi->ll_ra_info.ra_max_pages_per_file >> (20 - PAGE_CACHE_SHIFT));
- return -ERANGE;
- }
-
- spin_lock(&sbi->ll_lock);
- sbi->ll_ra_info.ra_max_read_ahead_whole_pages = pages_number;
- spin_unlock(&sbi->ll_lock);
-
- return count;
-}
-LUSTRE_RW_ATTR(max_read_ahead_whole_mb);
-
-static int ll_max_cached_mb_seq_show(struct seq_file *m, void *v)
-{
- struct super_block *sb = m->private;
- struct ll_sb_info *sbi = ll_s2sbi(sb);
- struct cl_client_cache *cache = &sbi->ll_cache;
- int shift = 20 - PAGE_CACHE_SHIFT;
- int max_cached_mb;
- int unused_mb;
-
- max_cached_mb = cache->ccc_lru_max >> shift;
- unused_mb = atomic_read(&cache->ccc_lru_left) >> shift;
- seq_printf(m,
- "users: %d\n"
- "max_cached_mb: %d\n"
- "used_mb: %d\n"
- "unused_mb: %d\n"
- "reclaim_count: %u\n",
- atomic_read(&cache->ccc_users),
- max_cached_mb,
- max_cached_mb - unused_mb,
- unused_mb,
- cache->ccc_lru_shrinkers);
- return 0;
-}
-
-static ssize_t ll_max_cached_mb_seq_write(struct file *file,
- const char __user *buffer,
- size_t count, loff_t *off)
-{
- struct super_block *sb = ((struct seq_file *)file->private_data)->private;
- struct ll_sb_info *sbi = ll_s2sbi(sb);
- struct cl_client_cache *cache = &sbi->ll_cache;
- int mult, rc, pages_number;
- int diff = 0;
- int nrpages = 0;
- char kernbuf[128];
-
- if (count >= sizeof(kernbuf))
- return -EINVAL;
-
- if (copy_from_user(kernbuf, buffer, count))
- return -EFAULT;
- kernbuf[count] = 0;
-
- mult = 1 << (20 - PAGE_CACHE_SHIFT);
- buffer += lprocfs_find_named_value(kernbuf, "max_cached_mb:", &count) -
- kernbuf;
- rc = lprocfs_write_frac_helper(buffer, count, &pages_number, mult);
- if (rc)
- return rc;
-
- if (pages_number < 0 || pages_number > totalram_pages) {
- CERROR("%s: can't set max cache more than %lu MB\n",
- ll_get_fsname(sb, NULL, 0),
- totalram_pages >> (20 - PAGE_CACHE_SHIFT));
- return -ERANGE;
- }
-
- spin_lock(&sbi->ll_lock);
- diff = pages_number - cache->ccc_lru_max;
- spin_unlock(&sbi->ll_lock);
-
- /* easy - add more LRU slots. */
- if (diff >= 0) {
- atomic_add(diff, &cache->ccc_lru_left);
- rc = 0;
- goto out;
- }
-
- diff = -diff;
- while (diff > 0) {
- int tmp;
-
- /* reduce LRU budget from free slots. */
- do {
- int ov, nv;
-
- ov = atomic_read(&cache->ccc_lru_left);
- if (ov == 0)
- break;
-
- nv = ov > diff ? ov - diff : 0;
- rc = atomic_cmpxchg(&cache->ccc_lru_left, ov, nv);
- if (likely(ov == rc)) {
- diff -= ov - nv;
- nrpages += ov - nv;
- break;
- }
- } while (1);
-
- if (diff <= 0)
- break;
-
- if (sbi->ll_dt_exp == NULL) { /* being initialized */
- rc = -ENODEV;
- break;
- }
-
- /* difficult - have to ask OSCs to drop LRU slots. */
- tmp = diff << 1;
- rc = obd_set_info_async(NULL, sbi->ll_dt_exp,
- sizeof(KEY_CACHE_LRU_SHRINK),
- KEY_CACHE_LRU_SHRINK,
- sizeof(tmp), &tmp, NULL);
- if (rc < 0)
- break;
- }
-
-out:
- if (rc >= 0) {
- spin_lock(&sbi->ll_lock);
- cache->ccc_lru_max = pages_number;
- spin_unlock(&sbi->ll_lock);
- rc = count;
- } else {
- atomic_add(nrpages, &cache->ccc_lru_left);
- }
- return rc;
-}
-LPROC_SEQ_FOPS(ll_max_cached_mb);
-
-static ssize_t checksum_pages_show(struct kobject *kobj, struct attribute *attr,
- char *buf)
-{
- struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
- ll_kobj);
-
- return sprintf(buf, "%u\n", (sbi->ll_flags & LL_SBI_CHECKSUM) ? 1 : 0);
-}
-
-static ssize_t checksum_pages_store(struct kobject *kobj,
- struct attribute *attr,
- const char *buffer,
- size_t count)
-{
- struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
- ll_kobj);
- int rc;
- unsigned long val;
-
- if (!sbi->ll_dt_exp)
- /* Not set up yet */
- return -EAGAIN;
-
- rc = kstrtoul(buffer, 10, &val);
- if (rc)
- return rc;
- if (val)
- sbi->ll_flags |= LL_SBI_CHECKSUM;
- else
- sbi->ll_flags &= ~LL_SBI_CHECKSUM;
-
- rc = obd_set_info_async(NULL, sbi->ll_dt_exp, sizeof(KEY_CHECKSUM),
- KEY_CHECKSUM, sizeof(val), &val, NULL);
- if (rc)
- CWARN("Failed to set OSC checksum flags: %d\n", rc);
-
- return count;
-}
-LUSTRE_RW_ATTR(checksum_pages);
-
-static ssize_t ll_rd_track_id(struct kobject *kobj, char *buf,
- enum stats_track_type type)
-{
- struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
- ll_kobj);
-
- if (sbi->ll_stats_track_type == type)
- return sprintf(buf, "%d\n", sbi->ll_stats_track_id);
- else if (sbi->ll_stats_track_type == STATS_TRACK_ALL)
- return sprintf(buf, "0 (all)\n");
- else
- return sprintf(buf, "untracked\n");
-}
-
-static ssize_t ll_wr_track_id(struct kobject *kobj, const char *buffer,
- size_t count,
- enum stats_track_type type)
-{
- struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
- ll_kobj);
- int rc;
- unsigned long pid;
-
- rc = kstrtoul(buffer, 10, &pid);
- if (rc)
- return rc;
- sbi->ll_stats_track_id = pid;
- if (pid == 0)
- sbi->ll_stats_track_type = STATS_TRACK_ALL;
- else
- sbi->ll_stats_track_type = type;
- lprocfs_clear_stats(sbi->ll_stats);
- return count;
-}
-
-static ssize_t stats_track_pid_show(struct kobject *kobj,
- struct attribute *attr,
- char *buf)
-{
- return ll_rd_track_id(kobj, buf, STATS_TRACK_PID);
-}
-
-static ssize_t stats_track_pid_store(struct kobject *kobj,
- struct attribute *attr,
- const char *buffer,
- size_t count)
-{
- return ll_wr_track_id(kobj, buffer, count, STATS_TRACK_PID);
-}
-LUSTRE_RW_ATTR(stats_track_pid);
-
-static ssize_t stats_track_ppid_show(struct kobject *kobj,
- struct attribute *attr,
- char *buf)
-{
- return ll_rd_track_id(kobj, buf, STATS_TRACK_PPID);
-}
-
-static ssize_t stats_track_ppid_store(struct kobject *kobj,
- struct attribute *attr,
- const char *buffer,
- size_t count)
-{
- return ll_wr_track_id(kobj, buffer, count, STATS_TRACK_PPID);
-}
-LUSTRE_RW_ATTR(stats_track_ppid);
-
-static ssize_t stats_track_gid_show(struct kobject *kobj,
- struct attribute *attr,
- char *buf)
-{
- return ll_rd_track_id(kobj, buf, STATS_TRACK_GID);
-}
-
-static ssize_t stats_track_gid_store(struct kobject *kobj,
- struct attribute *attr,
- const char *buffer,
- size_t count)
-{
- return ll_wr_track_id(kobj, buffer, count, STATS_TRACK_GID);
-}
-LUSTRE_RW_ATTR(stats_track_gid);
-
-static ssize_t statahead_max_show(struct kobject *kobj,
- struct attribute *attr,
- char *buf)
-{
- struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
- ll_kobj);
-
- return sprintf(buf, "%u\n", sbi->ll_sa_max);
-}
-
-static ssize_t statahead_max_store(struct kobject *kobj,
- struct attribute *attr,
- const char *buffer,
- size_t count)
-{
- struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
- ll_kobj);
- int rc;
- unsigned long val;
-
- rc = kstrtoul(buffer, 10, &val);
- if (rc)
- return rc;
-
- if (val <= LL_SA_RPC_MAX)
- sbi->ll_sa_max = val;
- else
- CERROR("Bad statahead_max value %lu. Valid values are in the range [0, %d]\n",
- val, LL_SA_RPC_MAX);
-
- return count;
-}
-LUSTRE_RW_ATTR(statahead_max);
-
-static ssize_t statahead_agl_show(struct kobject *kobj,
- struct attribute *attr,
- char *buf)
-{
- struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
- ll_kobj);
-
- return sprintf(buf, "%u\n", sbi->ll_flags & LL_SBI_AGL_ENABLED ? 1 : 0);
-}
-
-static ssize_t statahead_agl_store(struct kobject *kobj,
- struct attribute *attr,
- const char *buffer,
- size_t count)
-{
- struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
- ll_kobj);
- int rc;
- unsigned long val;
-
- rc = kstrtoul(buffer, 10, &val);
- if (rc)
- return rc;
-
- if (val)
- sbi->ll_flags |= LL_SBI_AGL_ENABLED;
- else
- sbi->ll_flags &= ~LL_SBI_AGL_ENABLED;
-
- return count;
-}
-LUSTRE_RW_ATTR(statahead_agl);
-
-static int ll_statahead_stats_seq_show(struct seq_file *m, void *v)
-{
- struct super_block *sb = m->private;
- struct ll_sb_info *sbi = ll_s2sbi(sb);
-
- seq_printf(m,
- "statahead total: %u\n"
- "statahead wrong: %u\n"
- "agl total: %u\n",
- atomic_read(&sbi->ll_sa_total),
- atomic_read(&sbi->ll_sa_wrong),
- atomic_read(&sbi->ll_agl_total));
- return 0;
-}
-LPROC_SEQ_FOPS_RO(ll_statahead_stats);
-
-static ssize_t lazystatfs_show(struct kobject *kobj,
- struct attribute *attr,
- char *buf)
-{
- struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
- ll_kobj);
-
- return sprintf(buf, "%u\n", sbi->ll_flags & LL_SBI_LAZYSTATFS ? 1 : 0);
-}
-
-static ssize_t lazystatfs_store(struct kobject *kobj,
- struct attribute *attr,
- const char *buffer,
- size_t count)
-{
- struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
- ll_kobj);
- int rc;
- unsigned long val;
-
- rc = kstrtoul(buffer, 10, &val);
- if (rc)
- return rc;
-
- if (val)
- sbi->ll_flags |= LL_SBI_LAZYSTATFS;
- else
- sbi->ll_flags &= ~LL_SBI_LAZYSTATFS;
-
- return count;
-}
-LUSTRE_RW_ATTR(lazystatfs);
-
-static ssize_t max_easize_show(struct kobject *kobj,
- struct attribute *attr,
- char *buf)
-{
- struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
- ll_kobj);
- unsigned int ealen;
- int rc;
-
- rc = ll_get_max_mdsize(sbi, &ealen);
- if (rc)
- return rc;
-
- return sprintf(buf, "%u\n", ealen);
-}
-LUSTRE_RO_ATTR(max_easize);
-
-static ssize_t default_easize_show(struct kobject *kobj,
- struct attribute *attr,
- char *buf)
-{
- struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
- ll_kobj);
- unsigned int ealen;
- int rc;
-
- rc = ll_get_default_mdsize(sbi, &ealen);
- if (rc)
- return rc;
-
- return sprintf(buf, "%u\n", ealen);
-}
-LUSTRE_RO_ATTR(default_easize);
-
-static int ll_sbi_flags_seq_show(struct seq_file *m, void *v)
-{
- const char *str[] = LL_SBI_FLAGS;
- struct super_block *sb = m->private;
- int flags = ll_s2sbi(sb)->ll_flags;
- int i = 0;
-
- while (flags != 0) {
- if (ARRAY_SIZE(str) <= i) {
- CERROR("%s: Revise array LL_SBI_FLAGS to match sbi flags please.\n",
- ll_get_fsname(sb, NULL, 0));
- return -EINVAL;
- }
-
- if (flags & 0x1)
- seq_printf(m, "%s ", str[i]);
- flags >>= 1;
- ++i;
- }
- seq_printf(m, "\b\n");
- return 0;
-}
-LPROC_SEQ_FOPS_RO(ll_sbi_flags);
-
-static ssize_t xattr_cache_show(struct kobject *kobj,
- struct attribute *attr,
- char *buf)
-{
- struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
- ll_kobj);
-
- return sprintf(buf, "%u\n", sbi->ll_xattr_cache_enabled);
-}
-
-static ssize_t xattr_cache_store(struct kobject *kobj,
- struct attribute *attr,
- const char *buffer,
- size_t count)
-{
- struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
- ll_kobj);
- int rc;
- unsigned long val;
-
- rc = kstrtoul(buffer, 10, &val);
- if (rc)
- return rc;
-
- if (val != 0 && val != 1)
- return -ERANGE;
-
- if (val == 1 && !(sbi->ll_flags & LL_SBI_XATTR_CACHE))
- return -ENOTSUPP;
-
- sbi->ll_xattr_cache_enabled = val;
-
- return count;
-}
-LUSTRE_RW_ATTR(xattr_cache);
-
-static struct lprocfs_vars lprocfs_llite_obd_vars[] = {
- /* { "mntpt_path", ll_rd_path, 0, 0 }, */
- { "site", &ll_site_stats_fops, NULL, 0 },
- /* { "filegroups", lprocfs_rd_filegroups, 0, 0 }, */
- { "max_cached_mb", &ll_max_cached_mb_fops, NULL },
- { "statahead_stats", &ll_statahead_stats_fops, NULL, 0 },
- { "sbi_flags", &ll_sbi_flags_fops, NULL, 0 },
- { NULL }
-};
-
-#define MAX_STRING_SIZE 128
-
-static struct attribute *llite_attrs[] = {
- &lustre_attr_blocksize.attr,
- &lustre_attr_kbytestotal.attr,
- &lustre_attr_kbytesfree.attr,
- &lustre_attr_kbytesavail.attr,
- &lustre_attr_filestotal.attr,
- &lustre_attr_filesfree.attr,
- &lustre_attr_client_type.attr,
- &lustre_attr_fstype.attr,
- &lustre_attr_uuid.attr,
- &lustre_attr_max_read_ahead_mb.attr,
- &lustre_attr_max_read_ahead_per_file_mb.attr,
- &lustre_attr_max_read_ahead_whole_mb.attr,
- &lustre_attr_checksum_pages.attr,
- &lustre_attr_stats_track_pid.attr,
- &lustre_attr_stats_track_ppid.attr,
- &lustre_attr_stats_track_gid.attr,
- &lustre_attr_statahead_max.attr,
- &lustre_attr_statahead_agl.attr,
- &lustre_attr_lazystatfs.attr,
- &lustre_attr_max_easize.attr,
- &lustre_attr_default_easize.attr,
- &lustre_attr_xattr_cache.attr,
- NULL,
-};
-
-static void llite_sb_release(struct kobject *kobj)
-{
- struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
- ll_kobj);
- complete(&sbi->ll_kobj_unregister);
-}
-
-static struct kobj_type llite_ktype = {
- .default_attrs = llite_attrs,
- .sysfs_ops = &lustre_sysfs_ops,
- .release = llite_sb_release,
-};
-
-static const struct llite_file_opcode {
- __u32 opcode;
- __u32 type;
- const char *opname;
-} llite_opcode_table[LPROC_LL_FILE_OPCODES] = {
- /* file operation */
- { LPROC_LL_DIRTY_HITS, LPROCFS_TYPE_REGS, "dirty_pages_hits" },
- { LPROC_LL_DIRTY_MISSES, LPROCFS_TYPE_REGS, "dirty_pages_misses" },
- { LPROC_LL_READ_BYTES, LPROCFS_CNTR_AVGMINMAX|LPROCFS_TYPE_BYTES,
- "read_bytes" },
- { LPROC_LL_WRITE_BYTES, LPROCFS_CNTR_AVGMINMAX|LPROCFS_TYPE_BYTES,
- "write_bytes" },
- { LPROC_LL_BRW_READ, LPROCFS_CNTR_AVGMINMAX|LPROCFS_TYPE_PAGES,
- "brw_read" },
- { LPROC_LL_BRW_WRITE, LPROCFS_CNTR_AVGMINMAX|LPROCFS_TYPE_PAGES,
- "brw_write" },
- { LPROC_LL_OSC_READ, LPROCFS_CNTR_AVGMINMAX|LPROCFS_TYPE_BYTES,
- "osc_read" },
- { LPROC_LL_OSC_WRITE, LPROCFS_CNTR_AVGMINMAX|LPROCFS_TYPE_BYTES,
- "osc_write" },
- { LPROC_LL_IOCTL, LPROCFS_TYPE_REGS, "ioctl" },
- { LPROC_LL_OPEN, LPROCFS_TYPE_REGS, "open" },
- { LPROC_LL_RELEASE, LPROCFS_TYPE_REGS, "close" },
- { LPROC_LL_MAP, LPROCFS_TYPE_REGS, "mmap" },
- { LPROC_LL_LLSEEK, LPROCFS_TYPE_REGS, "seek" },
- { LPROC_LL_FSYNC, LPROCFS_TYPE_REGS, "fsync" },
- { LPROC_LL_READDIR, LPROCFS_TYPE_REGS, "readdir" },
- /* inode operation */
- { LPROC_LL_SETATTR, LPROCFS_TYPE_REGS, "setattr" },
- { LPROC_LL_TRUNC, LPROCFS_TYPE_REGS, "truncate" },
- { LPROC_LL_FLOCK, LPROCFS_TYPE_REGS, "flock" },
- { LPROC_LL_GETATTR, LPROCFS_TYPE_REGS, "getattr" },
- /* dir inode operation */
- { LPROC_LL_CREATE, LPROCFS_TYPE_REGS, "create" },
- { LPROC_LL_LINK, LPROCFS_TYPE_REGS, "link" },
- { LPROC_LL_UNLINK, LPROCFS_TYPE_REGS, "unlink" },
- { LPROC_LL_SYMLINK, LPROCFS_TYPE_REGS, "symlink" },
- { LPROC_LL_MKDIR, LPROCFS_TYPE_REGS, "mkdir" },
- { LPROC_LL_RMDIR, LPROCFS_TYPE_REGS, "rmdir" },
- { LPROC_LL_MKNOD, LPROCFS_TYPE_REGS, "mknod" },
- { LPROC_LL_RENAME, LPROCFS_TYPE_REGS, "rename" },
- /* special inode operation */
- { LPROC_LL_STAFS, LPROCFS_TYPE_REGS, "statfs" },
- { LPROC_LL_ALLOC_INODE, LPROCFS_TYPE_REGS, "alloc_inode" },
- { LPROC_LL_SETXATTR, LPROCFS_TYPE_REGS, "setxattr" },
- { LPROC_LL_GETXATTR, LPROCFS_TYPE_REGS, "getxattr" },
- { LPROC_LL_GETXATTR_HITS, LPROCFS_TYPE_REGS, "getxattr_hits" },
- { LPROC_LL_LISTXATTR, LPROCFS_TYPE_REGS, "listxattr" },
- { LPROC_LL_REMOVEXATTR, LPROCFS_TYPE_REGS, "removexattr" },
- { LPROC_LL_INODE_PERM, LPROCFS_TYPE_REGS, "inode_permission" },
-};
-
-void ll_stats_ops_tally(struct ll_sb_info *sbi, int op, int count)
-{
- if (!sbi->ll_stats)
- return;
- if (sbi->ll_stats_track_type == STATS_TRACK_ALL)
- lprocfs_counter_add(sbi->ll_stats, op, count);
- else if (sbi->ll_stats_track_type == STATS_TRACK_PID &&
- sbi->ll_stats_track_id == current->pid)
- lprocfs_counter_add(sbi->ll_stats, op, count);
- else if (sbi->ll_stats_track_type == STATS_TRACK_PPID &&
- sbi->ll_stats_track_id == current->real_parent->pid)
- lprocfs_counter_add(sbi->ll_stats, op, count);
- else if (sbi->ll_stats_track_type == STATS_TRACK_GID &&
- sbi->ll_stats_track_id ==
- from_kgid(&init_user_ns, current_gid()))
- lprocfs_counter_add(sbi->ll_stats, op, count);
-}
-EXPORT_SYMBOL(ll_stats_ops_tally);
-
-static const char *ra_stat_string[] = {
- [RA_STAT_HIT] = "hits",
- [RA_STAT_MISS] = "misses",
- [RA_STAT_DISTANT_READPAGE] = "readpage not consecutive",
- [RA_STAT_MISS_IN_WINDOW] = "miss inside window",
- [RA_STAT_FAILED_GRAB_PAGE] = "failed grab_cache_page",
- [RA_STAT_FAILED_MATCH] = "failed lock match",
- [RA_STAT_DISCARDED] = "read but discarded",
- [RA_STAT_ZERO_LEN] = "zero length file",
- [RA_STAT_ZERO_WINDOW] = "zero size window",
- [RA_STAT_EOF] = "read-ahead to EOF",
- [RA_STAT_MAX_IN_FLIGHT] = "hit max r-a issue",
- [RA_STAT_WRONG_GRAB_PAGE] = "wrong page from grab_cache_page",
-};
-
-int ldebugfs_register_mountpoint(struct dentry *parent,
- struct super_block *sb, char *osc, char *mdc)
-{
- struct lustre_sb_info *lsi = s2lsi(sb);
- struct ll_sb_info *sbi = ll_s2sbi(sb);
- struct obd_device *obd;
- struct dentry *dir;
- char name[MAX_STRING_SIZE + 1], *ptr;
- int err, id, len, rc;
-
-
- name[MAX_STRING_SIZE] = '\0';
-
- LASSERT(sbi != NULL);
- LASSERT(mdc != NULL);
- LASSERT(osc != NULL);
-
- /* Get fsname */
- len = strlen(lsi->lsi_lmd->lmd_profile);
- ptr = strrchr(lsi->lsi_lmd->lmd_profile, '-');
- if (ptr && (strcmp(ptr, "-client") == 0))
- len -= 7;
-
- /* Mount info */
- snprintf(name, MAX_STRING_SIZE, "%.*s-%p", len,
- lsi->lsi_lmd->lmd_profile, sb);
-
- dir = ldebugfs_register(name, parent, NULL, NULL);
- if (IS_ERR_OR_NULL(dir)) {
- err = dir ? PTR_ERR(dir) : -ENOMEM;
- sbi->ll_debugfs_entry = NULL;
- return err;
- }
- sbi->ll_debugfs_entry = dir;
-
- rc = ldebugfs_seq_create(sbi->ll_debugfs_entry, "dump_page_cache", 0444,
- &vvp_dump_pgcache_file_ops, sbi);
- if (rc)
- CWARN("Error adding the dump_page_cache file\n");
-
- rc = ldebugfs_seq_create(sbi->ll_debugfs_entry, "extents_stats", 0644,
- &ll_rw_extents_stats_fops, sbi);
- if (rc)
- CWARN("Error adding the extent_stats file\n");
-
- rc = ldebugfs_seq_create(sbi->ll_debugfs_entry,
- "extents_stats_per_process",
- 0644, &ll_rw_extents_stats_pp_fops, sbi);
- if (rc)
- CWARN("Error adding the extents_stats_per_process file\n");
-
- rc = ldebugfs_seq_create(sbi->ll_debugfs_entry, "offset_stats", 0644,
- &ll_rw_offset_stats_fops, sbi);
- if (rc)
- CWARN("Error adding the offset_stats file\n");
-
- /* File operations stats */
- sbi->ll_stats = lprocfs_alloc_stats(LPROC_LL_FILE_OPCODES,
- LPROCFS_STATS_FLAG_NONE);
- if (sbi->ll_stats == NULL) {
- err = -ENOMEM;
- goto out;
- }
- /* do counter init */
- for (id = 0; id < LPROC_LL_FILE_OPCODES; id++) {
- __u32 type = llite_opcode_table[id].type;
- void *ptr = NULL;
- if (type & LPROCFS_TYPE_REGS)
- ptr = "regs";
- else if (type & LPROCFS_TYPE_BYTES)
- ptr = "bytes";
- else if (type & LPROCFS_TYPE_PAGES)
- ptr = "pages";
- lprocfs_counter_init(sbi->ll_stats,
- llite_opcode_table[id].opcode,
- (type & LPROCFS_CNTR_AVGMINMAX),
- llite_opcode_table[id].opname, ptr);
- }
- err = ldebugfs_register_stats(sbi->ll_debugfs_entry, "stats",
- sbi->ll_stats);
- if (err)
- goto out;
-
- sbi->ll_ra_stats = lprocfs_alloc_stats(ARRAY_SIZE(ra_stat_string),
- LPROCFS_STATS_FLAG_NONE);
- if (sbi->ll_ra_stats == NULL) {
- err = -ENOMEM;
- goto out;
- }
-
- for (id = 0; id < ARRAY_SIZE(ra_stat_string); id++)
- lprocfs_counter_init(sbi->ll_ra_stats, id, 0,
- ra_stat_string[id], "pages");
-
- err = ldebugfs_register_stats(sbi->ll_debugfs_entry, "read_ahead_stats",
- sbi->ll_ra_stats);
- if (err)
- goto out;
-
-
- err = ldebugfs_add_vars(sbi->ll_debugfs_entry,
- lprocfs_llite_obd_vars, sb);
- if (err)
- goto out;
-
- sbi->ll_kobj.kset = llite_kset;
- init_completion(&sbi->ll_kobj_unregister);
- err = kobject_init_and_add(&sbi->ll_kobj, &llite_ktype, NULL,
- "%s", name);
- if (err)
- goto out;
-
- /* MDC info */
- obd = class_name2obd(mdc);
-
- err = sysfs_create_link(&sbi->ll_kobj, &obd->obd_kobj,
- obd->obd_type->typ_name);
- if (err)
- goto out;
-
- /* OSC */
- obd = class_name2obd(osc);
-
- err = sysfs_create_link(&sbi->ll_kobj, &obd->obd_kobj,
- obd->obd_type->typ_name);
-out:
- if (err) {
- ldebugfs_remove(&sbi->ll_debugfs_entry);
- lprocfs_free_stats(&sbi->ll_ra_stats);
- lprocfs_free_stats(&sbi->ll_stats);
- }
- return err;
-}
-
-void ldebugfs_unregister_mountpoint(struct ll_sb_info *sbi)
-{
- if (sbi->ll_debugfs_entry) {
- ldebugfs_remove(&sbi->ll_debugfs_entry);
- kobject_put(&sbi->ll_kobj);
- wait_for_completion(&sbi->ll_kobj_unregister);
- lprocfs_free_stats(&sbi->ll_ra_stats);
- lprocfs_free_stats(&sbi->ll_stats);
- }
-}
-#undef MAX_STRING_SIZE
-
-#define pct(a, b) (b ? a * 100 / b : 0)
-
-static void ll_display_extents_info(struct ll_rw_extents_info *io_extents,
- struct seq_file *seq, int which)
-{
- unsigned long read_tot = 0, write_tot = 0, read_cum, write_cum;
- unsigned long start, end, r, w;
- char *unitp = "KMGTPEZY";
- int i, units = 10;
- struct per_process_info *pp_info = &io_extents->pp_extents[which];
-
- read_cum = 0;
- write_cum = 0;
- start = 0;
-
- for (i = 0; i < LL_HIST_MAX; i++) {
- read_tot += pp_info->pp_r_hist.oh_buckets[i];
- write_tot += pp_info->pp_w_hist.oh_buckets[i];
- }
-
- for (i = 0; i < LL_HIST_MAX; i++) {
- r = pp_info->pp_r_hist.oh_buckets[i];
- w = pp_info->pp_w_hist.oh_buckets[i];
- read_cum += r;
- write_cum += w;
- end = 1 << (i + LL_HIST_START - units);
- seq_printf(seq, "%4lu%c - %4lu%c%c: %14lu %4lu %4lu | %14lu %4lu %4lu\n",
- start, *unitp, end, *unitp,
- (i == LL_HIST_MAX - 1) ? '+' : ' ',
- r, pct(r, read_tot), pct(read_cum, read_tot),
- w, pct(w, write_tot), pct(write_cum, write_tot));
- start = end;
- if (start == 1<<10) {
- start = 1;
- units += 10;
- unitp++;
- }
- if (read_cum == read_tot && write_cum == write_tot)
- break;
- }
-}
-
-static int ll_rw_extents_stats_pp_seq_show(struct seq_file *seq, void *v)
-{
- struct timespec64 now;
- struct ll_sb_info *sbi = seq->private;
- struct ll_rw_extents_info *io_extents = &sbi->ll_rw_extents_info;
- int k;
-
- ktime_get_real_ts64(&now);
-
- if (!sbi->ll_rw_stats_on) {
- seq_printf(seq, "disabled\n"
- "write anything in this file to activate, then 0 or \"[D/d]isabled\" to deactivate\n");
- return 0;
- }
- seq_printf(seq, "snapshot_time: %llu.%09lu (secs.usecs)\n",
- (s64)now.tv_sec, (unsigned long)now.tv_nsec);
- seq_printf(seq, "%15s %19s | %20s\n", " ", "read", "write");
- seq_printf(seq, "%13s %14s %4s %4s | %14s %4s %4s\n",
- "extents", "calls", "%", "cum%",
- "calls", "%", "cum%");
- spin_lock(&sbi->ll_pp_extent_lock);
- for (k = 0; k < LL_PROCESS_HIST_MAX; k++) {
- if (io_extents->pp_extents[k].pid != 0) {
- seq_printf(seq, "\nPID: %d\n",
- io_extents->pp_extents[k].pid);
- ll_display_extents_info(io_extents, seq, k);
- }
- }
- spin_unlock(&sbi->ll_pp_extent_lock);
- return 0;
-}
-
-static ssize_t ll_rw_extents_stats_pp_seq_write(struct file *file,
- const char __user *buf,
- size_t len,
- loff_t *off)
-{
- struct seq_file *seq = file->private_data;
- struct ll_sb_info *sbi = seq->private;
- struct ll_rw_extents_info *io_extents = &sbi->ll_rw_extents_info;
- int i;
- int value = 1, rc = 0;
-
- if (len == 0)
- return -EINVAL;
-
- rc = lprocfs_write_helper(buf, len, &value);
- if (rc < 0 && len < 16) {
- char kernbuf[16];
-
- if (copy_from_user(kernbuf, buf, len))
- return -EFAULT;
- kernbuf[len] = 0;
-
- if (kernbuf[len - 1] == '\n')
- kernbuf[len - 1] = 0;
-
- if (strcmp(kernbuf, "disabled") == 0 ||
- strcmp(kernbuf, "Disabled") == 0)
- value = 0;
- }
-
- if (value == 0)
- sbi->ll_rw_stats_on = 0;
- else
- sbi->ll_rw_stats_on = 1;
-
- spin_lock(&sbi->ll_pp_extent_lock);
- for (i = 0; i < LL_PROCESS_HIST_MAX; i++) {
- io_extents->pp_extents[i].pid = 0;
- lprocfs_oh_clear(&io_extents->pp_extents[i].pp_r_hist);
- lprocfs_oh_clear(&io_extents->pp_extents[i].pp_w_hist);
- }
- spin_unlock(&sbi->ll_pp_extent_lock);
- return len;
-}
-
-LPROC_SEQ_FOPS(ll_rw_extents_stats_pp);
-
-static int ll_rw_extents_stats_seq_show(struct seq_file *seq, void *v)
-{
- struct timespec64 now;
- struct ll_sb_info *sbi = seq->private;
- struct ll_rw_extents_info *io_extents = &sbi->ll_rw_extents_info;
-
- ktime_get_real_ts64(&now);
-
- if (!sbi->ll_rw_stats_on) {
- seq_printf(seq, "disabled\n"
- "write anything in this file to activate, then 0 or \"[D/d]isabled\" to deactivate\n");
- return 0;
- }
- seq_printf(seq, "snapshot_time: %llu.%09lu (secs.usecs)\n",
- (u64)now.tv_sec, (unsigned long)now.tv_nsec);
-
- seq_printf(seq, "%15s %19s | %20s\n", " ", "read", "write");
- seq_printf(seq, "%13s %14s %4s %4s | %14s %4s %4s\n",
- "extents", "calls", "%", "cum%",
- "calls", "%", "cum%");
- spin_lock(&sbi->ll_lock);
- ll_display_extents_info(io_extents, seq, LL_PROCESS_HIST_MAX);
- spin_unlock(&sbi->ll_lock);
-
- return 0;
-}
-
-static ssize_t ll_rw_extents_stats_seq_write(struct file *file,
- const char __user *buf,
- size_t len, loff_t *off)
-{
- struct seq_file *seq = file->private_data;
- struct ll_sb_info *sbi = seq->private;
- struct ll_rw_extents_info *io_extents = &sbi->ll_rw_extents_info;
- int i;
- int value = 1, rc = 0;
-
- if (len == 0)
- return -EINVAL;
-
- rc = lprocfs_write_helper(buf, len, &value);
- if (rc < 0 && len < 16) {
- char kernbuf[16];
-
- if (copy_from_user(kernbuf, buf, len))
- return -EFAULT;
- kernbuf[len] = 0;
-
- if (kernbuf[len - 1] == '\n')
- kernbuf[len - 1] = 0;
-
- if (strcmp(kernbuf, "disabled") == 0 ||
- strcmp(kernbuf, "Disabled") == 0)
- value = 0;
- }
-
- if (value == 0)
- sbi->ll_rw_stats_on = 0;
- else
- sbi->ll_rw_stats_on = 1;
-
- spin_lock(&sbi->ll_pp_extent_lock);
- for (i = 0; i <= LL_PROCESS_HIST_MAX; i++) {
- io_extents->pp_extents[i].pid = 0;
- lprocfs_oh_clear(&io_extents->pp_extents[i].pp_r_hist);
- lprocfs_oh_clear(&io_extents->pp_extents[i].pp_w_hist);
- }
- spin_unlock(&sbi->ll_pp_extent_lock);
-
- return len;
-}
-LPROC_SEQ_FOPS(ll_rw_extents_stats);
-
-void ll_rw_stats_tally(struct ll_sb_info *sbi, pid_t pid,
- struct ll_file_data *file, loff_t pos,
- size_t count, int rw)
-{
- int i, cur = -1;
- struct ll_rw_process_info *process;
- struct ll_rw_process_info *offset;
- int *off_count = &sbi->ll_rw_offset_entry_count;
- int *process_count = &sbi->ll_offset_process_count;
- struct ll_rw_extents_info *io_extents = &sbi->ll_rw_extents_info;
-
- if (!sbi->ll_rw_stats_on)
- return;
- process = sbi->ll_rw_process_info;
- offset = sbi->ll_rw_offset_info;
-
- spin_lock(&sbi->ll_pp_extent_lock);
- /* Extent statistics */
- for (i = 0; i < LL_PROCESS_HIST_MAX; i++) {
- if (io_extents->pp_extents[i].pid == pid) {
- cur = i;
- break;
- }
- }
-
- if (cur == -1) {
- /* new process */
- sbi->ll_extent_process_count =
- (sbi->ll_extent_process_count + 1) % LL_PROCESS_HIST_MAX;
- cur = sbi->ll_extent_process_count;
- io_extents->pp_extents[cur].pid = pid;
- lprocfs_oh_clear(&io_extents->pp_extents[cur].pp_r_hist);
- lprocfs_oh_clear(&io_extents->pp_extents[cur].pp_w_hist);
- }
-
- for (i = 0; (count >= (1 << LL_HIST_START << i)) &&
- (i < (LL_HIST_MAX - 1)); i++)
- ;
- if (rw == 0) {
- io_extents->pp_extents[cur].pp_r_hist.oh_buckets[i]++;
- io_extents->pp_extents[LL_PROCESS_HIST_MAX].pp_r_hist.oh_buckets[i]++;
- } else {
- io_extents->pp_extents[cur].pp_w_hist.oh_buckets[i]++;
- io_extents->pp_extents[LL_PROCESS_HIST_MAX].pp_w_hist.oh_buckets[i]++;
- }
- spin_unlock(&sbi->ll_pp_extent_lock);
-
- spin_lock(&sbi->ll_process_lock);
- /* Offset statistics */
- for (i = 0; i < LL_PROCESS_HIST_MAX; i++) {
- if (process[i].rw_pid == pid) {
- if (process[i].rw_last_file != file) {
- process[i].rw_range_start = pos;
- process[i].rw_last_file_pos = pos + count;
- process[i].rw_smallest_extent = count;
- process[i].rw_largest_extent = count;
- process[i].rw_offset = 0;
- process[i].rw_last_file = file;
- spin_unlock(&sbi->ll_process_lock);
- return;
- }
- if (process[i].rw_last_file_pos != pos) {
- *off_count =
- (*off_count + 1) % LL_OFFSET_HIST_MAX;
- offset[*off_count].rw_op = process[i].rw_op;
- offset[*off_count].rw_pid = pid;
- offset[*off_count].rw_range_start =
- process[i].rw_range_start;
- offset[*off_count].rw_range_end =
- process[i].rw_last_file_pos;
- offset[*off_count].rw_smallest_extent =
- process[i].rw_smallest_extent;
- offset[*off_count].rw_largest_extent =
- process[i].rw_largest_extent;
- offset[*off_count].rw_offset =
- process[i].rw_offset;
- process[i].rw_op = rw;
- process[i].rw_range_start = pos;
- process[i].rw_smallest_extent = count;
- process[i].rw_largest_extent = count;
- process[i].rw_offset = pos -
- process[i].rw_last_file_pos;
- }
- if (process[i].rw_smallest_extent > count)
- process[i].rw_smallest_extent = count;
- if (process[i].rw_largest_extent < count)
- process[i].rw_largest_extent = count;
- process[i].rw_last_file_pos = pos + count;
- spin_unlock(&sbi->ll_process_lock);
- return;
- }
- }
- *process_count = (*process_count + 1) % LL_PROCESS_HIST_MAX;
- process[*process_count].rw_pid = pid;
- process[*process_count].rw_op = rw;
- process[*process_count].rw_range_start = pos;
- process[*process_count].rw_last_file_pos = pos + count;
- process[*process_count].rw_smallest_extent = count;
- process[*process_count].rw_largest_extent = count;
- process[*process_count].rw_offset = 0;
- process[*process_count].rw_last_file = file;
- spin_unlock(&sbi->ll_process_lock);
-}
-
-static int ll_rw_offset_stats_seq_show(struct seq_file *seq, void *v)
-{
- struct timespec64 now;
- struct ll_sb_info *sbi = seq->private;
- struct ll_rw_process_info *offset = sbi->ll_rw_offset_info;
- struct ll_rw_process_info *process = sbi->ll_rw_process_info;
- int i;
-
- ktime_get_real_ts64(&now);
-
- if (!sbi->ll_rw_stats_on) {
- seq_printf(seq, "disabled\n"
- "write anything in this file to activate, then 0 or \"[D/d]isabled\" to deactivate\n");
- return 0;
- }
- spin_lock(&sbi->ll_process_lock);
-
- seq_printf(seq, "snapshot_time: %llu.%09lu (secs.usecs)\n",
- (s64)now.tv_sec, (unsigned long)now.tv_nsec);
- seq_printf(seq, "%3s %10s %14s %14s %17s %17s %14s\n",
- "R/W", "PID", "RANGE START", "RANGE END",
- "SMALLEST EXTENT", "LARGEST EXTENT", "OFFSET");
- /* We stored the discontiguous offsets here; print them first */
- for (i = 0; i < LL_OFFSET_HIST_MAX; i++) {
- if (offset[i].rw_pid != 0)
- seq_printf(seq,
- "%3c %10d %14Lu %14Lu %17lu %17lu %14Lu",
- offset[i].rw_op == READ ? 'R' : 'W',
- offset[i].rw_pid,
- offset[i].rw_range_start,
- offset[i].rw_range_end,
- (unsigned long)offset[i].rw_smallest_extent,
- (unsigned long)offset[i].rw_largest_extent,
- offset[i].rw_offset);
- }
- /* Then print the current offsets for each process */
- for (i = 0; i < LL_PROCESS_HIST_MAX; i++) {
- if (process[i].rw_pid != 0)
- seq_printf(seq,
- "%3c %10d %14Lu %14Lu %17lu %17lu %14Lu",
- process[i].rw_op == READ ? 'R' : 'W',
- process[i].rw_pid,
- process[i].rw_range_start,
- process[i].rw_last_file_pos,
- (unsigned long)process[i].rw_smallest_extent,
- (unsigned long)process[i].rw_largest_extent,
- process[i].rw_offset);
- }
- spin_unlock(&sbi->ll_process_lock);
-
- return 0;
-}
-
-static ssize_t ll_rw_offset_stats_seq_write(struct file *file,
- const char __user *buf,
- size_t len, loff_t *off)
-{
- struct seq_file *seq = file->private_data;
- struct ll_sb_info *sbi = seq->private;
- struct ll_rw_process_info *process_info = sbi->ll_rw_process_info;
- struct ll_rw_process_info *offset_info = sbi->ll_rw_offset_info;
- int value = 1, rc = 0;
-
- if (len == 0)
- return -EINVAL;
-
- rc = lprocfs_write_helper(buf, len, &value);
-
- if (rc < 0 && len < 16) {
- char kernbuf[16];
-
- if (copy_from_user(kernbuf, buf, len))
- return -EFAULT;
- kernbuf[len] = 0;
-
- if (kernbuf[len - 1] == '\n')
- kernbuf[len - 1] = 0;
-
- if (strcmp(kernbuf, "disabled") == 0 ||
- strcmp(kernbuf, "Disabled") == 0)
- value = 0;
- }
-
- if (value == 0)
- sbi->ll_rw_stats_on = 0;
- else
- sbi->ll_rw_stats_on = 1;
-
- spin_lock(&sbi->ll_process_lock);
- sbi->ll_offset_process_count = 0;
- sbi->ll_rw_offset_entry_count = 0;
- memset(process_info, 0, sizeof(struct ll_rw_process_info) *
- LL_PROCESS_HIST_MAX);
- memset(offset_info, 0, sizeof(struct ll_rw_process_info) *
- LL_OFFSET_HIST_MAX);
- spin_unlock(&sbi->ll_process_lock);
-
- return len;
-}
-
-LPROC_SEQ_FOPS(ll_rw_offset_stats);
-
-void lprocfs_llite_init_vars(struct lprocfs_static_vars *lvars)
-{
- lvars->obd_vars = lprocfs_llite_obd_vars;
-}
diff --git a/drivers/staging/lustre/lustre/llite/namei.c b/drivers/staging/lustre/lustre/llite/namei.c
deleted file mode 100644
index 2e663ea25258..000000000000
--- a/drivers/staging/lustre/lustre/llite/namei.c
+++ /dev/null
@@ -1,1174 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- */
-
-#include <linux/fs.h>
-#include <linux/sched.h>
-#include <linux/mm.h>
-#include <linux/quotaops.h>
-#include <linux/highmem.h>
-#include <linux/pagemap.h>
-#include <linux/security.h>
-
-#define DEBUG_SUBSYSTEM S_LLITE
-
-#include "../include/obd_support.h"
-#include "../include/lustre_fid.h"
-#include "../include/lustre_lite.h"
-#include "../include/lustre_dlm.h"
-#include "../include/lustre_ver.h"
-#include "llite_internal.h"
-
-static int ll_create_it(struct inode *, struct dentry *,
- int, struct lookup_intent *);
-
-/* called from iget5_locked->find_inode() under inode_hash_lock spinlock */
-static int ll_test_inode(struct inode *inode, void *opaque)
-{
- struct ll_inode_info *lli = ll_i2info(inode);
- struct lustre_md *md = opaque;
-
- if (unlikely(!(md->body->valid & OBD_MD_FLID))) {
- CERROR("MDS body missing FID\n");
- return 0;
- }
-
- if (!lu_fid_eq(&lli->lli_fid, &md->body->fid1))
- return 0;
-
- return 1;
-}
-
-static int ll_set_inode(struct inode *inode, void *opaque)
-{
- struct ll_inode_info *lli = ll_i2info(inode);
- struct mdt_body *body = ((struct lustre_md *)opaque)->body;
-
- if (unlikely(!(body->valid & OBD_MD_FLID))) {
- CERROR("MDS body missing FID\n");
- return -EINVAL;
- }
-
- lli->lli_fid = body->fid1;
- if (unlikely(!(body->valid & OBD_MD_FLTYPE))) {
- CERROR("Can not initialize inode " DFID
- " without object type: valid = %#llx\n",
- PFID(&lli->lli_fid), body->valid);
- return -EINVAL;
- }
-
- inode->i_mode = (inode->i_mode & ~S_IFMT) | (body->mode & S_IFMT);
- if (unlikely(inode->i_mode == 0)) {
- CERROR("Invalid inode "DFID" type\n", PFID(&lli->lli_fid));
- return -EINVAL;
- }
-
- ll_lli_init(lli);
-
- return 0;
-}
-
-
-/*
- * Get an inode by inode number (already instantiated by the intent lookup).
- * Returns inode or NULL
- */
-struct inode *ll_iget(struct super_block *sb, ino_t hash,
- struct lustre_md *md)
-{
- struct inode *inode;
-
- LASSERT(hash != 0);
- inode = iget5_locked(sb, hash, ll_test_inode, ll_set_inode, md);
-
- if (inode) {
- if (inode->i_state & I_NEW) {
- int rc = 0;
-
- ll_read_inode2(inode, md);
- if (S_ISREG(inode->i_mode) &&
- ll_i2info(inode)->lli_clob == NULL) {
- CDEBUG(D_INODE,
- "%s: apply lsm %p to inode "DFID".\n",
- ll_get_fsname(sb, NULL, 0), md->lsm,
- PFID(ll_inode2fid(inode)));
- rc = cl_file_inode_init(inode, md);
- }
- if (rc != 0) {
- make_bad_inode(inode);
- unlock_new_inode(inode);
- iput(inode);
- inode = ERR_PTR(rc);
- } else
- unlock_new_inode(inode);
- } else if (!(inode->i_state & (I_FREEING | I_CLEAR)))
- ll_update_inode(inode, md);
- CDEBUG(D_VFSTRACE, "got inode: %p for "DFID"\n",
- inode, PFID(&md->body->fid1));
- }
- return inode;
-}
-
-static void ll_invalidate_negative_children(struct inode *dir)
-{
- struct dentry *dentry, *tmp_subdir;
-
- ll_lock_dcache(dir);
- hlist_for_each_entry(dentry, &dir->i_dentry, d_u.d_alias) {
- spin_lock(&dentry->d_lock);
- if (!list_empty(&dentry->d_subdirs)) {
- struct dentry *child;
-
- list_for_each_entry_safe(child, tmp_subdir,
- &dentry->d_subdirs,
- d_child) {
- if (d_really_is_negative(child))
- d_lustre_invalidate(child, 1);
- }
- }
- spin_unlock(&dentry->d_lock);
- }
- ll_unlock_dcache(dir);
-}
-
-int ll_md_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc,
- void *data, int flag)
-{
- struct lustre_handle lockh;
- int rc;
-
- switch (flag) {
- case LDLM_CB_BLOCKING:
- ldlm_lock2handle(lock, &lockh);
- rc = ldlm_cli_cancel(&lockh, LCF_ASYNC);
- if (rc < 0) {
- CDEBUG(D_INODE, "ldlm_cli_cancel: rc = %d\n", rc);
- return rc;
- }
- break;
- case LDLM_CB_CANCELING: {
- struct inode *inode = ll_inode_from_resource_lock(lock);
- __u64 bits = lock->l_policy_data.l_inodebits.bits;
-
- /* Inode is set to lock->l_resource->lr_lvb_inode
- * for mdc - bug 24555 */
- LASSERT(lock->l_ast_data == NULL);
-
- if (inode == NULL)
- break;
-
- /* Invalidate all dentries associated with this inode */
- LASSERT(lock->l_flags & LDLM_FL_CANCELING);
-
- if (!fid_res_name_eq(ll_inode2fid(inode),
- &lock->l_resource->lr_name)) {
- LDLM_ERROR(lock, "data mismatch with object "DFID"(%p)",
- PFID(ll_inode2fid(inode)), inode);
- LBUG();
- }
-
- if (bits & MDS_INODELOCK_XATTR) {
- ll_xattr_cache_destroy(inode);
- bits &= ~MDS_INODELOCK_XATTR;
- }
-
- /* For OPEN locks we differentiate between lock modes
- * LCK_CR, LCK_CW, LCK_PR - bug 22891 */
- if (bits & MDS_INODELOCK_OPEN)
- ll_have_md_lock(inode, &bits, lock->l_req_mode);
-
- if (bits & MDS_INODELOCK_OPEN) {
- fmode_t fmode;
-
- switch (lock->l_req_mode) {
- case LCK_CW:
- fmode = FMODE_WRITE;
- break;
- case LCK_PR:
- fmode = FMODE_EXEC;
- break;
- case LCK_CR:
- fmode = FMODE_READ;
- break;
- default:
- LDLM_ERROR(lock, "bad lock mode for OPEN lock");
- LBUG();
- }
-
- ll_md_real_close(inode, fmode);
- }
-
- if (bits & (MDS_INODELOCK_LOOKUP | MDS_INODELOCK_UPDATE |
- MDS_INODELOCK_LAYOUT | MDS_INODELOCK_PERM))
- ll_have_md_lock(inode, &bits, LCK_MINMODE);
-
- if (bits & MDS_INODELOCK_LAYOUT) {
- struct cl_object_conf conf = {
- .coc_opc = OBJECT_CONF_INVALIDATE,
- .coc_inode = inode,
- };
-
- rc = ll_layout_conf(inode, &conf);
- if (rc < 0)
- CDEBUG(D_INODE, "cannot invalidate layout of "
- DFID": rc = %d\n",
- PFID(ll_inode2fid(inode)), rc);
- }
-
- if (bits & MDS_INODELOCK_UPDATE) {
- struct ll_inode_info *lli = ll_i2info(inode);
-
- spin_lock(&lli->lli_lock);
- lli->lli_flags &= ~LLIF_MDS_SIZE_LOCK;
- spin_unlock(&lli->lli_lock);
- }
-
- if ((bits & MDS_INODELOCK_UPDATE) && S_ISDIR(inode->i_mode)) {
- CDEBUG(D_INODE, "invalidating inode %lu\n",
- inode->i_ino);
- truncate_inode_pages(inode->i_mapping, 0);
- ll_invalidate_negative_children(inode);
- }
-
- if ((bits & (MDS_INODELOCK_LOOKUP | MDS_INODELOCK_PERM)) &&
- inode->i_sb->s_root != NULL &&
- !is_root_inode(inode))
- ll_invalidate_aliases(inode);
-
- iput(inode);
- break;
- }
- default:
- LBUG();
- }
-
- return 0;
-}
-
-__u32 ll_i2suppgid(struct inode *i)
-{
- if (in_group_p(i->i_gid))
- return (__u32)from_kgid(&init_user_ns, i->i_gid);
- else
- return (__u32)(-1);
-}
-
-/* Pack the required supplementary groups into the supplied groups array.
- * If we don't need to use the groups from the target inode(s) then we
- * instead pack one or more groups from the user's supplementary group
- * array in case it might be useful. Not needed if doing an MDS-side upcall. */
-void ll_i2gids(__u32 *suppgids, struct inode *i1, struct inode *i2)
-{
-#if 0
- int i;
-#endif
-
- LASSERT(i1 != NULL);
- LASSERT(suppgids != NULL);
-
- suppgids[0] = ll_i2suppgid(i1);
-
- if (i2)
- suppgids[1] = ll_i2suppgid(i2);
- else
- suppgids[1] = -1;
-
-#if 0
- for (i = 0; i < current_ngroups; i++) {
- if (suppgids[0] == -1) {
- if (current_groups[i] != suppgids[1])
- suppgids[0] = current_groups[i];
- continue;
- }
- if (suppgids[1] == -1) {
- if (current_groups[i] != suppgids[0])
- suppgids[1] = current_groups[i];
- continue;
- }
- break;
- }
-#endif
-}
-
-/*
- * try to reuse three types of dentry:
- * 1. unhashed alias, this one is unhashed by d_invalidate (but it may be valid
- * by concurrent .revalidate).
- * 2. INVALID alias (common case for no valid ldlm lock held, but this flag may
- * be cleared by others calling d_lustre_revalidate).
- * 3. DISCONNECTED alias.
- */
-static struct dentry *ll_find_alias(struct inode *inode, struct dentry *dentry)
-{
- struct dentry *alias, *discon_alias, *invalid_alias;
-
- if (hlist_empty(&inode->i_dentry))
- return NULL;
-
- discon_alias = invalid_alias = NULL;
-
- ll_lock_dcache(inode);
- hlist_for_each_entry(alias, &inode->i_dentry, d_u.d_alias) {
- LASSERT(alias != dentry);
-
- spin_lock(&alias->d_lock);
- if (alias->d_flags & DCACHE_DISCONNECTED)
- /* LASSERT(last_discon == NULL); LU-405, bz 20055 */
- discon_alias = alias;
- else if (alias->d_parent == dentry->d_parent &&
- alias->d_name.hash == dentry->d_name.hash &&
- alias->d_name.len == dentry->d_name.len &&
- memcmp(alias->d_name.name, dentry->d_name.name,
- dentry->d_name.len) == 0)
- invalid_alias = alias;
- spin_unlock(&alias->d_lock);
-
- if (invalid_alias)
- break;
- }
- alias = invalid_alias ?: discon_alias ?: NULL;
- if (alias) {
- spin_lock(&alias->d_lock);
- dget_dlock(alias);
- spin_unlock(&alias->d_lock);
- }
- ll_unlock_dcache(inode);
-
- return alias;
-}
-
-/*
- * Similar to d_splice_alias(), but lustre treats invalid alias
- * similar to DCACHE_DISCONNECTED, and tries to use it anyway.
- */
-struct dentry *ll_splice_alias(struct inode *inode, struct dentry *de)
-{
- struct dentry *new;
- int rc;
-
- if (inode) {
- new = ll_find_alias(inode, de);
- if (new) {
- rc = ll_d_init(new);
- if (rc < 0) {
- dput(new);
- return ERR_PTR(rc);
- }
- d_move(new, de);
- iput(inode);
- CDEBUG(D_DENTRY,
- "Reuse dentry %p inode %p refc %d flags %#x\n",
- new, d_inode(new), d_count(new), new->d_flags);
- return new;
- }
- }
- rc = ll_d_init(de);
- if (rc < 0)
- return ERR_PTR(rc);
- d_add(de, inode);
- CDEBUG(D_DENTRY, "Add dentry %p inode %p refc %d flags %#x\n",
- de, d_inode(de), d_count(de), de->d_flags);
- return de;
-}
-
-static int ll_lookup_it_finish(struct ptlrpc_request *request,
- struct lookup_intent *it,
- struct inode *parent, struct dentry **de)
-{
- struct inode *inode = NULL;
- __u64 bits = 0;
- int rc = 0;
-
- /* NB 1 request reference will be taken away by ll_intent_lock()
- * when I return */
- CDEBUG(D_DENTRY, "it %p it_disposition %x\n", it,
- it->d.lustre.it_disposition);
- if (!it_disposition(it, DISP_LOOKUP_NEG)) {
- rc = ll_prep_inode(&inode, request, (*de)->d_sb, it);
- if (rc)
- return rc;
-
- ll_set_lock_data(ll_i2sbi(parent)->ll_md_exp, inode, it, &bits);
-
- /* We used to query real size from OSTs here, but actually
- this is not needed. For stat() calls size would be updated
- from subsequent do_revalidate()->ll_inode_revalidate_it() in
- 2.4 and
- vfs_getattr_it->ll_getattr()->ll_inode_revalidate_it() in 2.6
- Everybody else who needs correct file size would call
- ll_glimpse_size or some equivalent themselves anyway.
- Also see bug 7198. */
- }
-
- /* Only hash *de if it is unhashed (new dentry).
- * Atoimc_open may passing hashed dentries for open.
- */
- if (d_unhashed(*de)) {
- struct dentry *alias;
-
- alias = ll_splice_alias(inode, *de);
- if (IS_ERR(alias)) {
- rc = PTR_ERR(alias);
- goto out;
- }
- *de = alias;
- } else if (!it_disposition(it, DISP_LOOKUP_NEG) &&
- !it_disposition(it, DISP_OPEN_CREATE)) {
- /* With DISP_OPEN_CREATE dentry will
- instantiated in ll_create_it. */
- LASSERT(d_inode(*de) == NULL);
- d_instantiate(*de, inode);
- }
-
- if (!it_disposition(it, DISP_LOOKUP_NEG)) {
- /* we have lookup look - unhide dentry */
- if (bits & MDS_INODELOCK_LOOKUP)
- d_lustre_revalidate(*de);
- } else if (!it_disposition(it, DISP_OPEN_CREATE)) {
- /* If file created on server, don't depend on parent UPDATE
- * lock to unhide it. It is left hidden and next lookup can
- * find it in ll_splice_alias.
- */
- /* Check that parent has UPDATE lock. */
- struct lookup_intent parent_it = {
- .it_op = IT_GETATTR,
- .d.lustre.it_lock_handle = 0 };
-
- if (md_revalidate_lock(ll_i2mdexp(parent), &parent_it,
- &ll_i2info(parent)->lli_fid, NULL)) {
- d_lustre_revalidate(*de);
- ll_intent_release(&parent_it);
- }
- }
-
-out:
- if (rc != 0 && it->it_op & IT_OPEN)
- ll_open_cleanup((*de)->d_sb, request);
-
- return rc;
-}
-
-static struct dentry *ll_lookup_it(struct inode *parent, struct dentry *dentry,
- struct lookup_intent *it, int lookup_flags)
-{
- struct lookup_intent lookup_it = { .it_op = IT_LOOKUP };
- struct dentry *save = dentry, *retval;
- struct ptlrpc_request *req = NULL;
- struct inode *inode;
- struct md_op_data *op_data;
- __u32 opc;
- int rc;
-
- if (dentry->d_name.len > ll_i2sbi(parent)->ll_namelen)
- return ERR_PTR(-ENAMETOOLONG);
-
- CDEBUG(D_VFSTRACE, "VFS Op:name=%pd,dir=%lu/%u(%p),intent=%s\n",
- dentry, parent->i_ino,
- parent->i_generation, parent, LL_IT2STR(it));
-
- if (d_mountpoint(dentry))
- CERROR("Tell Peter, lookup on mtpt, it %s\n", LL_IT2STR(it));
-
- if (it == NULL || it->it_op == IT_GETXATTR)
- it = &lookup_it;
-
- if (it->it_op == IT_GETATTR) {
- rc = ll_statahead_enter(parent, &dentry, 0);
- if (rc == 1) {
- if (dentry == save)
- retval = NULL;
- else
- retval = dentry;
- goto out;
- }
- }
-
- if (it->it_op & IT_CREAT)
- opc = LUSTRE_OPC_CREATE;
- else
- opc = LUSTRE_OPC_ANY;
-
- op_data = ll_prep_md_op_data(NULL, parent, NULL, dentry->d_name.name,
- dentry->d_name.len, lookup_flags, opc,
- NULL);
- if (IS_ERR(op_data))
- return (void *)op_data;
-
- /* enforce umask if acl disabled or MDS doesn't support umask */
- if (!IS_POSIXACL(parent) || !exp_connect_umask(ll_i2mdexp(parent)))
- it->it_create_mode &= ~current_umask();
-
- rc = md_intent_lock(ll_i2mdexp(parent), op_data, NULL, 0, it,
- lookup_flags, &req, ll_md_blocking_ast, 0);
- ll_finish_md_op_data(op_data);
- if (rc < 0) {
- retval = ERR_PTR(rc);
- goto out;
- }
-
- rc = ll_lookup_it_finish(req, it, parent, &dentry);
- if (rc != 0) {
- ll_intent_release(it);
- retval = ERR_PTR(rc);
- goto out;
- }
-
- inode = d_inode(dentry);
- if ((it->it_op & IT_OPEN) && inode &&
- !S_ISREG(inode->i_mode) &&
- !S_ISDIR(inode->i_mode)) {
- ll_release_openhandle(inode, it);
- }
- ll_lookup_finish_locks(it, inode);
-
- if (dentry == save)
- retval = NULL;
- else
- retval = dentry;
- goto out;
- out:
- if (req)
- ptlrpc_req_finished(req);
- if (it->it_op == IT_GETATTR && (retval == NULL || retval == dentry))
- ll_statahead_mark(parent, dentry);
- return retval;
-}
-
-static struct dentry *ll_lookup_nd(struct inode *parent, struct dentry *dentry,
- unsigned int flags)
-{
- struct lookup_intent *itp, it = { .it_op = IT_GETATTR };
- struct dentry *de;
-
- CDEBUG(D_VFSTRACE, "VFS Op:name=%pd,dir=%lu/%u(%p),flags=%u\n",
- dentry, parent->i_ino,
- parent->i_generation, parent, flags);
-
- /* Optimize away (CREATE && !OPEN). Let .create handle the race. */
- if ((flags & LOOKUP_CREATE) && !(flags & LOOKUP_OPEN))
- return NULL;
-
- if (flags & (LOOKUP_PARENT|LOOKUP_OPEN|LOOKUP_CREATE))
- itp = NULL;
- else
- itp = &it;
- de = ll_lookup_it(parent, dentry, itp, 0);
-
- if (itp != NULL)
- ll_intent_release(itp);
-
- return de;
-}
-
-/*
- * For cached negative dentry and new dentry, handle lookup/create/open
- * together.
- */
-static int ll_atomic_open(struct inode *dir, struct dentry *dentry,
- struct file *file, unsigned open_flags,
- umode_t mode, int *opened)
-{
- struct lookup_intent *it;
- struct dentry *de;
- long long lookup_flags = LOOKUP_OPEN;
- int rc = 0;
-
- CDEBUG(D_VFSTRACE,
- "VFS Op:name=%pd,dir=%lu/%u(%p),file %p,open_flags %x,mode %x opened %d\n",
- dentry, dir->i_ino,
- dir->i_generation, dir, file, open_flags, mode, *opened);
-
- it = kzalloc(sizeof(*it), GFP_NOFS);
- if (!it)
- return -ENOMEM;
-
- it->it_op = IT_OPEN;
- if (open_flags & O_CREAT) {
- it->it_op |= IT_CREAT;
- lookup_flags |= LOOKUP_CREATE;
- }
- it->it_create_mode = (mode & S_IALLUGO) | S_IFREG;
- it->it_flags = (open_flags & ~O_ACCMODE) | OPEN_FMODE(open_flags);
-
- /* Dentry added to dcache tree in ll_lookup_it */
- de = ll_lookup_it(dir, dentry, it, lookup_flags);
- if (IS_ERR(de))
- rc = PTR_ERR(de);
- else if (de != NULL)
- dentry = de;
-
- if (!rc) {
- if (it_disposition(it, DISP_OPEN_CREATE)) {
- /* Dentry instantiated in ll_create_it. */
- rc = ll_create_it(dir, dentry, mode, it);
- if (rc) {
- /* We dget in ll_splice_alias. */
- if (de != NULL)
- dput(de);
- goto out_release;
- }
-
- *opened |= FILE_CREATED;
- }
- if (d_really_is_positive(dentry) && it_disposition(it, DISP_OPEN_OPEN)) {
- /* Open dentry. */
- if (S_ISFIFO(d_inode(dentry)->i_mode)) {
- /* We cannot call open here as it would
- * deadlock.
- */
- if (it_disposition(it, DISP_ENQ_OPEN_REF))
- ptlrpc_req_finished(
- (struct ptlrpc_request *)
- it->d.lustre.it_data);
- rc = finish_no_open(file, de);
- } else {
- file->private_data = it;
- rc = finish_open(file, dentry, NULL, opened);
- /* We dget in ll_splice_alias. finish_open takes
- * care of dget for fd open.
- */
- if (de != NULL)
- dput(de);
- }
- } else {
- rc = finish_no_open(file, de);
- }
- }
-
-out_release:
- ll_intent_release(it);
- kfree(it);
-
- return rc;
-}
-
-
-/* We depend on "mode" being set with the proper file type/umask by now */
-static struct inode *ll_create_node(struct inode *dir, struct lookup_intent *it)
-{
- struct inode *inode = NULL;
- struct ptlrpc_request *request = NULL;
- struct ll_sb_info *sbi = ll_i2sbi(dir);
- int rc;
-
- LASSERT(it && it->d.lustre.it_disposition);
-
- LASSERT(it_disposition(it, DISP_ENQ_CREATE_REF));
- request = it->d.lustre.it_data;
- it_clear_disposition(it, DISP_ENQ_CREATE_REF);
- rc = ll_prep_inode(&inode, request, dir->i_sb, it);
- if (rc) {
- inode = ERR_PTR(rc);
- goto out;
- }
-
- LASSERT(hlist_empty(&inode->i_dentry));
-
- /* We asked for a lock on the directory, but were granted a
- * lock on the inode. Since we finally have an inode pointer,
- * stuff it in the lock. */
- CDEBUG(D_DLMTRACE, "setting l_ast_data to inode %p (%lu/%u)\n",
- inode, inode->i_ino, inode->i_generation);
- ll_set_lock_data(sbi->ll_md_exp, inode, it, NULL);
- out:
- ptlrpc_req_finished(request);
- return inode;
-}
-
-/*
- * By the time this is called, we already have created the directory cache
- * entry for the new file, but it is so far negative - it has no inode.
- *
- * We defer creating the OBD object(s) until open, to keep the intent and
- * non-intent code paths similar, and also because we do not have the MDS
- * inode number before calling ll_create_node() (which is needed for LOV),
- * so we would need to do yet another RPC to the MDS to store the LOV EA
- * data on the MDS. If needed, we would pass the PACKED lmm as data and
- * lmm_size in datalen (the MDS still has code which will handle that).
- *
- * If the create succeeds, we fill in the inode information
- * with d_instantiate().
- */
-static int ll_create_it(struct inode *dir, struct dentry *dentry, int mode,
- struct lookup_intent *it)
-{
- struct inode *inode;
- int rc = 0;
-
- CDEBUG(D_VFSTRACE, "VFS Op:name=%pd,dir=%lu/%u(%p),intent=%s\n",
- dentry, dir->i_ino,
- dir->i_generation, dir, LL_IT2STR(it));
-
- rc = it_open_error(DISP_OPEN_CREATE, it);
- if (rc)
- return rc;
-
- inode = ll_create_node(dir, it);
- if (IS_ERR(inode))
- return PTR_ERR(inode);
-
- d_instantiate(dentry, inode);
- return 0;
-}
-
-static void ll_update_times(struct ptlrpc_request *request,
- struct inode *inode)
-{
- struct mdt_body *body = req_capsule_server_get(&request->rq_pill,
- &RMF_MDT_BODY);
-
- LASSERT(body);
- if (body->valid & OBD_MD_FLMTIME &&
- body->mtime > LTIME_S(inode->i_mtime)) {
- CDEBUG(D_INODE, "setting ino %lu mtime from %lu to %llu\n",
- inode->i_ino, LTIME_S(inode->i_mtime), body->mtime);
- LTIME_S(inode->i_mtime) = body->mtime;
- }
- if (body->valid & OBD_MD_FLCTIME &&
- body->ctime > LTIME_S(inode->i_ctime))
- LTIME_S(inode->i_ctime) = body->ctime;
-}
-
-static int ll_new_node(struct inode *dir, struct dentry *dentry,
- const char *tgt, int mode, int rdev,
- __u32 opc)
-{
- struct ptlrpc_request *request = NULL;
- struct md_op_data *op_data;
- struct inode *inode = NULL;
- struct ll_sb_info *sbi = ll_i2sbi(dir);
- int tgt_len = 0;
- int err;
-
- if (unlikely(tgt != NULL))
- tgt_len = strlen(tgt) + 1;
-
- op_data = ll_prep_md_op_data(NULL, dir, NULL,
- dentry->d_name.name,
- dentry->d_name.len,
- 0, opc, NULL);
- if (IS_ERR(op_data)) {
- err = PTR_ERR(op_data);
- goto err_exit;
- }
-
- err = md_create(sbi->ll_md_exp, op_data, tgt, tgt_len, mode,
- from_kuid(&init_user_ns, current_fsuid()),
- from_kgid(&init_user_ns, current_fsgid()),
- cfs_curproc_cap_pack(), rdev, &request);
- ll_finish_md_op_data(op_data);
- if (err)
- goto err_exit;
-
- ll_update_times(request, dir);
-
- err = ll_prep_inode(&inode, request, dir->i_sb, NULL);
- if (err)
- goto err_exit;
-
- d_instantiate(dentry, inode);
-err_exit:
- ptlrpc_req_finished(request);
-
- return err;
-}
-
-static int ll_mknod(struct inode *dir, struct dentry *dchild,
- umode_t mode, dev_t rdev)
-{
- int err;
-
- CDEBUG(D_VFSTRACE, "VFS Op:name=%pd,dir=%lu/%u(%p) mode %o dev %x\n",
- dchild, dir->i_ino, dir->i_generation, dir,
- mode, old_encode_dev(rdev));
-
- if (!IS_POSIXACL(dir) || !exp_connect_umask(ll_i2mdexp(dir)))
- mode &= ~current_umask();
-
- switch (mode & S_IFMT) {
- case 0:
- mode |= S_IFREG; /* for mode = 0 case, fallthrough */
- case S_IFREG:
- case S_IFCHR:
- case S_IFBLK:
- case S_IFIFO:
- case S_IFSOCK:
- err = ll_new_node(dir, dchild, NULL, mode,
- old_encode_dev(rdev),
- LUSTRE_OPC_MKNOD);
- break;
- case S_IFDIR:
- err = -EPERM;
- break;
- default:
- err = -EINVAL;
- }
-
- if (!err)
- ll_stats_ops_tally(ll_i2sbi(dir), LPROC_LL_MKNOD, 1);
-
- return err;
-}
-
-/*
- * Plain create. Intent create is handled in atomic_open.
- */
-static int ll_create_nd(struct inode *dir, struct dentry *dentry,
- umode_t mode, bool want_excl)
-{
- int rc;
-
- CDEBUG(D_VFSTRACE, "VFS Op:name=%pd,dir=%lu/%u(%p),flags=%u, excl=%d\n",
- dentry, dir->i_ino,
- dir->i_generation, dir, mode, want_excl);
-
- rc = ll_mknod(dir, dentry, mode, 0);
-
- ll_stats_ops_tally(ll_i2sbi(dir), LPROC_LL_CREATE, 1);
-
- CDEBUG(D_VFSTRACE, "VFS Op:name=%pd, unhashed %d\n",
- dentry, d_unhashed(dentry));
-
- return rc;
-}
-
-static inline void ll_get_child_fid(struct dentry *child, struct lu_fid *fid)
-{
- if (d_really_is_positive(child))
- *fid = *ll_inode2fid(d_inode(child));
-}
-
-/**
- * Remove dir entry
- **/
-int ll_rmdir_entry(struct inode *dir, char *name, int namelen)
-{
- struct ptlrpc_request *request = NULL;
- struct md_op_data *op_data;
- int rc;
-
- CDEBUG(D_VFSTRACE, "VFS Op:name=%.*s,dir=%lu/%u(%p)\n",
- namelen, name, dir->i_ino, dir->i_generation, dir);
-
- op_data = ll_prep_md_op_data(NULL, dir, NULL, name, strlen(name),
- S_IFDIR, LUSTRE_OPC_ANY, NULL);
- if (IS_ERR(op_data))
- return PTR_ERR(op_data);
- op_data->op_cli_flags |= CLI_RM_ENTRY;
- rc = md_unlink(ll_i2sbi(dir)->ll_md_exp, op_data, &request);
- ll_finish_md_op_data(op_data);
- if (rc == 0) {
- ll_update_times(request, dir);
- ll_stats_ops_tally(ll_i2sbi(dir), LPROC_LL_RMDIR, 1);
- }
-
- ptlrpc_req_finished(request);
- return rc;
-}
-
-int ll_objects_destroy(struct ptlrpc_request *request, struct inode *dir)
-{
- struct mdt_body *body;
- struct lov_mds_md *eadata;
- struct lov_stripe_md *lsm = NULL;
- struct obd_trans_info oti = { 0 };
- struct obdo *oa;
- int rc;
-
- /* req is swabbed so this is safe */
- body = req_capsule_server_get(&request->rq_pill, &RMF_MDT_BODY);
- if (!(body->valid & OBD_MD_FLEASIZE))
- return 0;
-
- if (body->eadatasize == 0) {
- CERROR("OBD_MD_FLEASIZE set but eadatasize zero\n");
- rc = -EPROTO;
- goto out;
- }
-
- /* The MDS sent back the EA because we unlinked the last reference
- * to this file. Use this EA to unlink the objects on the OST.
- * It's opaque so we don't swab here; we leave it to obd_unpackmd() to
- * check it is complete and sensible. */
- eadata = req_capsule_server_sized_get(&request->rq_pill, &RMF_MDT_MD,
- body->eadatasize);
- LASSERT(eadata != NULL);
-
- rc = obd_unpackmd(ll_i2dtexp(dir), &lsm, eadata, body->eadatasize);
- if (rc < 0) {
- CERROR("obd_unpackmd: %d\n", rc);
- goto out;
- }
- LASSERT(rc >= sizeof(*lsm));
-
- OBDO_ALLOC(oa);
- if (oa == NULL) {
- rc = -ENOMEM;
- goto out_free_memmd;
- }
-
- oa->o_oi = lsm->lsm_oi;
- oa->o_mode = body->mode & S_IFMT;
- oa->o_valid = OBD_MD_FLID | OBD_MD_FLTYPE | OBD_MD_FLGROUP;
-
- if (body->valid & OBD_MD_FLCOOKIE) {
- oa->o_valid |= OBD_MD_FLCOOKIE;
- oti.oti_logcookies =
- req_capsule_server_sized_get(&request->rq_pill,
- &RMF_LOGCOOKIES,
- sizeof(struct llog_cookie) *
- lsm->lsm_stripe_count);
- if (oti.oti_logcookies == NULL) {
- oa->o_valid &= ~OBD_MD_FLCOOKIE;
- body->valid &= ~OBD_MD_FLCOOKIE;
- }
- }
-
- rc = obd_destroy(NULL, ll_i2dtexp(dir), oa, lsm, &oti,
- ll_i2mdexp(dir));
- if (rc)
- CERROR("obd destroy objid "DOSTID" error %d\n",
- POSTID(&lsm->lsm_oi), rc);
-out_free_memmd:
- obd_free_memmd(ll_i2dtexp(dir), &lsm);
- OBDO_FREE(oa);
-out:
- return rc;
-}
-
-/* ll_unlink() doesn't update the inode with the new link count.
- * Instead, ll_ddelete() and ll_d_iput() will update it based upon if there
- * is any lock existing. They will recycle dentries and inodes based upon locks
- * too. b=20433 */
-static int ll_unlink(struct inode *dir, struct dentry *dentry)
-{
- struct ptlrpc_request *request = NULL;
- struct md_op_data *op_data;
- int rc;
-
- CDEBUG(D_VFSTRACE, "VFS Op:name=%pd,dir=%lu/%u(%p)\n",
- dentry, dir->i_ino, dir->i_generation, dir);
-
- op_data = ll_prep_md_op_data(NULL, dir, NULL,
- dentry->d_name.name,
- dentry->d_name.len,
- 0, LUSTRE_OPC_ANY, NULL);
- if (IS_ERR(op_data))
- return PTR_ERR(op_data);
-
- ll_get_child_fid(dentry, &op_data->op_fid3);
- op_data->op_fid2 = op_data->op_fid3;
- rc = md_unlink(ll_i2sbi(dir)->ll_md_exp, op_data, &request);
- ll_finish_md_op_data(op_data);
- if (rc)
- goto out;
-
- ll_update_times(request, dir);
- ll_stats_ops_tally(ll_i2sbi(dir), LPROC_LL_UNLINK, 1);
-
- rc = ll_objects_destroy(request, dir);
- out:
- ptlrpc_req_finished(request);
- return rc;
-}
-
-static int ll_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
-{
- int err;
-
- CDEBUG(D_VFSTRACE, "VFS Op:name=%pd,dir=%lu/%u(%p)\n",
- dentry, dir->i_ino, dir->i_generation, dir);
-
- if (!IS_POSIXACL(dir) || !exp_connect_umask(ll_i2mdexp(dir)))
- mode &= ~current_umask();
- mode = (mode & (S_IRWXUGO|S_ISVTX)) | S_IFDIR;
- err = ll_new_node(dir, dentry, NULL, mode, 0, LUSTRE_OPC_MKDIR);
-
- if (!err)
- ll_stats_ops_tally(ll_i2sbi(dir), LPROC_LL_MKDIR, 1);
-
- return err;
-}
-
-static int ll_rmdir(struct inode *dir, struct dentry *dentry)
-{
- struct ptlrpc_request *request = NULL;
- struct md_op_data *op_data;
- int rc;
-
- CDEBUG(D_VFSTRACE, "VFS Op:name=%pd,dir=%lu/%u(%p)\n",
- dentry, dir->i_ino, dir->i_generation, dir);
-
- op_data = ll_prep_md_op_data(NULL, dir, NULL,
- dentry->d_name.name,
- dentry->d_name.len,
- S_IFDIR, LUSTRE_OPC_ANY, NULL);
- if (IS_ERR(op_data))
- return PTR_ERR(op_data);
-
- ll_get_child_fid(dentry, &op_data->op_fid3);
- op_data->op_fid2 = op_data->op_fid3;
- rc = md_unlink(ll_i2sbi(dir)->ll_md_exp, op_data, &request);
- ll_finish_md_op_data(op_data);
- if (rc == 0) {
- ll_update_times(request, dir);
- ll_stats_ops_tally(ll_i2sbi(dir), LPROC_LL_RMDIR, 1);
- }
-
- ptlrpc_req_finished(request);
- return rc;
-}
-
-static int ll_symlink(struct inode *dir, struct dentry *dentry,
- const char *oldname)
-{
- int err;
-
- CDEBUG(D_VFSTRACE, "VFS Op:name=%pd,dir=%lu/%u(%p),target=%.*s\n",
- dentry, dir->i_ino, dir->i_generation,
- dir, 3000, oldname);
-
- err = ll_new_node(dir, dentry, oldname, S_IFLNK | S_IRWXUGO,
- 0, LUSTRE_OPC_SYMLINK);
-
- if (!err)
- ll_stats_ops_tally(ll_i2sbi(dir), LPROC_LL_SYMLINK, 1);
-
- return err;
-}
-
-static int ll_link(struct dentry *old_dentry, struct inode *dir,
- struct dentry *new_dentry)
-{
- struct inode *src = d_inode(old_dentry);
- struct ll_sb_info *sbi = ll_i2sbi(dir);
- struct ptlrpc_request *request = NULL;
- struct md_op_data *op_data;
- int err;
-
- CDEBUG(D_VFSTRACE,
- "VFS Op: inode=%lu/%u(%p), dir=%lu/%u(%p), target=%pd\n",
- src->i_ino, src->i_generation, src, dir->i_ino,
- dir->i_generation, dir, new_dentry);
-
- op_data = ll_prep_md_op_data(NULL, src, dir, new_dentry->d_name.name,
- new_dentry->d_name.len,
- 0, LUSTRE_OPC_ANY, NULL);
- if (IS_ERR(op_data))
- return PTR_ERR(op_data);
-
- err = md_link(sbi->ll_md_exp, op_data, &request);
- ll_finish_md_op_data(op_data);
- if (err)
- goto out;
-
- ll_update_times(request, dir);
- ll_stats_ops_tally(sbi, LPROC_LL_LINK, 1);
-out:
- ptlrpc_req_finished(request);
- return err;
-}
-
-static int ll_rename(struct inode *old_dir, struct dentry *old_dentry,
- struct inode *new_dir, struct dentry *new_dentry)
-{
- struct ptlrpc_request *request = NULL;
- struct ll_sb_info *sbi = ll_i2sbi(old_dir);
- struct md_op_data *op_data;
- int err;
-
- CDEBUG(D_VFSTRACE,
- "VFS Op:oldname=%pd,src_dir=%lu/%u(%p),newname=%pd,tgt_dir=%lu/%u(%p)\n",
- old_dentry, old_dir->i_ino, old_dir->i_generation, old_dir,
- new_dentry, new_dir->i_ino, new_dir->i_generation, new_dir);
-
- op_data = ll_prep_md_op_data(NULL, old_dir, new_dir, NULL, 0, 0,
- LUSTRE_OPC_ANY, NULL);
- if (IS_ERR(op_data))
- return PTR_ERR(op_data);
-
- ll_get_child_fid(old_dentry, &op_data->op_fid3);
- ll_get_child_fid(new_dentry, &op_data->op_fid4);
- err = md_rename(sbi->ll_md_exp, op_data,
- old_dentry->d_name.name,
- old_dentry->d_name.len,
- new_dentry->d_name.name,
- new_dentry->d_name.len, &request);
- ll_finish_md_op_data(op_data);
- if (!err) {
- ll_update_times(request, old_dir);
- ll_update_times(request, new_dir);
- ll_stats_ops_tally(sbi, LPROC_LL_RENAME, 1);
- err = ll_objects_destroy(request, old_dir);
- }
-
- ptlrpc_req_finished(request);
- if (!err)
- d_move(old_dentry, new_dentry);
- return err;
-}
-
-const struct inode_operations ll_dir_inode_operations = {
- .mknod = ll_mknod,
- .atomic_open = ll_atomic_open,
- .lookup = ll_lookup_nd,
- .create = ll_create_nd,
- /* We need all these non-raw things for NFSD, to not patch it. */
- .unlink = ll_unlink,
- .mkdir = ll_mkdir,
- .rmdir = ll_rmdir,
- .symlink = ll_symlink,
- .link = ll_link,
- .rename = ll_rename,
- .setattr = ll_setattr,
- .getattr = ll_getattr,
- .permission = ll_inode_permission,
- .setxattr = ll_setxattr,
- .getxattr = ll_getxattr,
- .listxattr = ll_listxattr,
- .removexattr = ll_removexattr,
- .get_acl = ll_get_acl,
-};
-
-const struct inode_operations ll_special_inode_operations = {
- .setattr = ll_setattr,
- .getattr = ll_getattr,
- .permission = ll_inode_permission,
- .setxattr = ll_setxattr,
- .getxattr = ll_getxattr,
- .listxattr = ll_listxattr,
- .removexattr = ll_removexattr,
- .get_acl = ll_get_acl,
-};
diff --git a/drivers/staging/lustre/lustre/llite/remote_perm.c b/drivers/staging/lustre/lustre/llite/remote_perm.c
deleted file mode 100644
index dc33055acf3b..000000000000
--- a/drivers/staging/lustre/lustre/llite/remote_perm.c
+++ /dev/null
@@ -1,327 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * lustre/llite/remote_perm.c
- *
- * Lustre Permission Cache for Remote Client
- *
- * Author: Lai Siyao <lsy@clusterfs.com>
- * Author: Fan Yong <fanyong@clusterfs.com>
- */
-
-#define DEBUG_SUBSYSTEM S_LLITE
-
-#include <linux/module.h>
-#include <linux/types.h>
-
-#include "../include/lustre_lite.h"
-#include "../include/lustre_ha.h"
-#include "../include/lustre_dlm.h"
-#include "../include/lprocfs_status.h"
-#include "../include/lustre_disk.h"
-#include "../include/lustre_param.h"
-#include "llite_internal.h"
-
-struct kmem_cache *ll_remote_perm_cachep;
-struct kmem_cache *ll_rmtperm_hash_cachep;
-
-static inline struct ll_remote_perm *alloc_ll_remote_perm(void)
-{
- struct ll_remote_perm *lrp;
-
- OBD_SLAB_ALLOC_PTR_GFP(lrp, ll_remote_perm_cachep, GFP_KERNEL);
- if (lrp)
- INIT_HLIST_NODE(&lrp->lrp_list);
- return lrp;
-}
-
-static inline void free_ll_remote_perm(struct ll_remote_perm *lrp)
-{
- if (!lrp)
- return;
-
- if (!hlist_unhashed(&lrp->lrp_list))
- hlist_del(&lrp->lrp_list);
- OBD_SLAB_FREE(lrp, ll_remote_perm_cachep, sizeof(*lrp));
-}
-
-static struct hlist_head *alloc_rmtperm_hash(void)
-{
- struct hlist_head *hash;
- int i;
-
- OBD_SLAB_ALLOC_GFP(hash, ll_rmtperm_hash_cachep,
- REMOTE_PERM_HASHSIZE * sizeof(*hash),
- GFP_IOFS);
- if (!hash)
- return NULL;
-
- for (i = 0; i < REMOTE_PERM_HASHSIZE; i++)
- INIT_HLIST_HEAD(hash + i);
-
- return hash;
-}
-
-void free_rmtperm_hash(struct hlist_head *hash)
-{
- int i;
- struct ll_remote_perm *lrp;
- struct hlist_node *next;
-
- if (!hash)
- return;
-
- for (i = 0; i < REMOTE_PERM_HASHSIZE; i++)
- hlist_for_each_entry_safe(lrp, next, hash + i, lrp_list)
- free_ll_remote_perm(lrp);
- OBD_SLAB_FREE(hash, ll_rmtperm_hash_cachep,
- REMOTE_PERM_HASHSIZE * sizeof(*hash));
-}
-
-static inline int remote_perm_hashfunc(uid_t uid)
-{
- return uid & (REMOTE_PERM_HASHSIZE - 1);
-}
-
-/* NB: setxid permission is not checked here, instead it's done on
- * MDT when client get remote permission.
- */
-static int do_check_remote_perm(struct ll_inode_info *lli, int mask)
-{
- struct hlist_head *head;
- struct ll_remote_perm *lrp;
- int found = 0, rc;
-
- if (!lli->lli_remote_perms)
- return -ENOENT;
-
- head = lli->lli_remote_perms +
- remote_perm_hashfunc(from_kuid(&init_user_ns, current_uid()));
-
- spin_lock(&lli->lli_lock);
- hlist_for_each_entry(lrp, head, lrp_list) {
- if (lrp->lrp_uid != from_kuid(&init_user_ns, current_uid()))
- continue;
- if (lrp->lrp_gid != from_kgid(&init_user_ns, current_gid()))
- continue;
- if (lrp->lrp_fsuid != from_kuid(&init_user_ns, current_fsuid()))
- continue;
- if (lrp->lrp_fsgid != from_kgid(&init_user_ns, current_fsgid()))
- continue;
- found = 1;
- break;
- }
-
- if (!found) {
- rc = -ENOENT;
- goto out;
- }
-
- CDEBUG(D_SEC, "found remote perm: %u/%u/%u/%u - %#x\n",
- lrp->lrp_uid, lrp->lrp_gid, lrp->lrp_fsuid, lrp->lrp_fsgid,
- lrp->lrp_access_perm);
- rc = ((lrp->lrp_access_perm & mask) == mask) ? 0 : -EACCES;
-
-out:
- spin_unlock(&lli->lli_lock);
- return rc;
-}
-
-int ll_update_remote_perm(struct inode *inode, struct mdt_remote_perm *perm)
-{
- struct ll_inode_info *lli = ll_i2info(inode);
- struct ll_remote_perm *lrp = NULL, *tmp = NULL;
- struct hlist_head *head, *perm_hash = NULL;
-
- LASSERT(ll_i2sbi(inode)->ll_flags & LL_SBI_RMT_CLIENT);
-
-#if 0
- if (perm->rp_uid != current->uid ||
- perm->rp_gid != current->gid ||
- perm->rp_fsuid != current->fsuid ||
- perm->rp_fsgid != current->fsgid) {
- /* user might setxid in this small period */
- CDEBUG(D_SEC,
- "remote perm user %u/%u/%u/%u != current %u/%u/%u/%u\n",
- perm->rp_uid, perm->rp_gid, perm->rp_fsuid,
- perm->rp_fsgid, current->uid, current->gid,
- current->fsuid, current->fsgid);
- return -EAGAIN;
- }
-#endif
-
- if (!lli->lli_remote_perms) {
- perm_hash = alloc_rmtperm_hash();
- if (!perm_hash) {
- CERROR("alloc lli_remote_perms failed!\n");
- return -ENOMEM;
- }
- }
-
- spin_lock(&lli->lli_lock);
-
- if (!lli->lli_remote_perms)
- lli->lli_remote_perms = perm_hash;
- else
- free_rmtperm_hash(perm_hash);
-
- head = lli->lli_remote_perms + remote_perm_hashfunc(perm->rp_uid);
-
-again:
- hlist_for_each_entry(tmp, head, lrp_list) {
- if (tmp->lrp_uid != perm->rp_uid)
- continue;
- if (tmp->lrp_gid != perm->rp_gid)
- continue;
- if (tmp->lrp_fsuid != perm->rp_fsuid)
- continue;
- if (tmp->lrp_fsgid != perm->rp_fsgid)
- continue;
- free_ll_remote_perm(lrp);
- lrp = tmp;
- break;
- }
-
- if (!lrp) {
- spin_unlock(&lli->lli_lock);
- lrp = alloc_ll_remote_perm();
- if (!lrp) {
- CERROR("alloc memory for ll_remote_perm failed!\n");
- return -ENOMEM;
- }
- spin_lock(&lli->lli_lock);
- goto again;
- }
-
- lrp->lrp_access_perm = perm->rp_access_perm;
- if (lrp != tmp) {
- lrp->lrp_uid = perm->rp_uid;
- lrp->lrp_gid = perm->rp_gid;
- lrp->lrp_fsuid = perm->rp_fsuid;
- lrp->lrp_fsgid = perm->rp_fsgid;
- hlist_add_head(&lrp->lrp_list, head);
- }
- lli->lli_rmtperm_time = cfs_time_current();
- spin_unlock(&lli->lli_lock);
-
- CDEBUG(D_SEC, "new remote perm@%p: %u/%u/%u/%u - %#x\n",
- lrp, lrp->lrp_uid, lrp->lrp_gid, lrp->lrp_fsuid, lrp->lrp_fsgid,
- lrp->lrp_access_perm);
-
- return 0;
-}
-
-int lustre_check_remote_perm(struct inode *inode, int mask)
-{
- struct ll_inode_info *lli = ll_i2info(inode);
- struct ll_sb_info *sbi = ll_i2sbi(inode);
- struct ptlrpc_request *req = NULL;
- struct mdt_remote_perm *perm;
- unsigned long save;
- int i = 0, rc;
-
- do {
- save = lli->lli_rmtperm_time;
- rc = do_check_remote_perm(lli, mask);
- if (!rc || (rc != -ENOENT && i))
- break;
-
- might_sleep();
-
- mutex_lock(&lli->lli_rmtperm_mutex);
- /* check again */
- if (save != lli->lli_rmtperm_time) {
- rc = do_check_remote_perm(lli, mask);
- if (!rc || (rc != -ENOENT && i)) {
- mutex_unlock(&lli->lli_rmtperm_mutex);
- break;
- }
- }
-
- if (i++ > 5) {
- CERROR("check remote perm falls in dead loop!\n");
- LBUG();
- }
-
- rc = md_get_remote_perm(sbi->ll_md_exp, ll_inode2fid(inode),
- ll_i2suppgid(inode), &req);
- if (rc) {
- mutex_unlock(&lli->lli_rmtperm_mutex);
- break;
- }
-
- perm = req_capsule_server_swab_get(&req->rq_pill, &RMF_ACL,
- lustre_swab_mdt_remote_perm);
- if (unlikely(!perm)) {
- mutex_unlock(&lli->lli_rmtperm_mutex);
- rc = -EPROTO;
- break;
- }
-
- rc = ll_update_remote_perm(inode, perm);
- mutex_unlock(&lli->lli_rmtperm_mutex);
- if (rc == -ENOMEM)
- break;
-
- ptlrpc_req_finished(req);
- req = NULL;
- } while (1);
- ptlrpc_req_finished(req);
- return rc;
-}
-
-#if 0 /* NB: remote perms can't be freed in ll_mdc_blocking_ast of UPDATE lock,
- * because it will fail sanity test 48.
- */
-void ll_free_remote_perms(struct inode *inode)
-{
- struct ll_inode_info *lli = ll_i2info(inode);
- struct hlist_head *hash = lli->lli_remote_perms;
- struct ll_remote_perm *lrp;
- struct hlist_node *node, *next;
- int i;
-
- LASSERT(hash);
-
- spin_lock(&lli->lli_lock);
-
- for (i = 0; i < REMOTE_PERM_HASHSIZE; i++) {
- hlist_for_each_entry_safe(lrp, node, next, hash + i, lrp_list)
- free_ll_remote_perm(lrp);
- }
-
- spin_unlock(&lli->lli_lock);
-}
-#endif
diff --git a/drivers/staging/lustre/lustre/llite/rw.c b/drivers/staging/lustre/lustre/llite/rw.c
deleted file mode 100644
index 8c1bfd2f6d4f..000000000000
--- a/drivers/staging/lustre/lustre/llite/rw.c
+++ /dev/null
@@ -1,1281 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * lustre/llite/rw.c
- *
- * Lustre Lite I/O page cache routines shared by different kernel revs
- */
-
-#include <linux/kernel.h>
-#include <linux/mm.h>
-#include <linux/string.h>
-#include <linux/stat.h>
-#include <linux/errno.h>
-#include <linux/unistd.h>
-#include <linux/writeback.h>
-#include <linux/uaccess.h>
-
-#include <linux/fs.h>
-#include <linux/pagemap.h>
-/* current_is_kswapd() */
-#include <linux/swap.h>
-
-#define DEBUG_SUBSYSTEM S_LLITE
-
-#include "../include/lustre_lite.h"
-#include "../include/obd_cksum.h"
-#include "llite_internal.h"
-#include "../include/linux/lustre_compat25.h"
-
-/**
- * Finalizes cl-data before exiting typical address_space operation. Dual to
- * ll_cl_init().
- */
-static void ll_cl_fini(struct ll_cl_context *lcc)
-{
- struct lu_env *env = lcc->lcc_env;
- struct cl_io *io = lcc->lcc_io;
- struct cl_page *page = lcc->lcc_page;
-
- LASSERT(lcc->lcc_cookie == current);
- LASSERT(env != NULL);
-
- if (page != NULL) {
- lu_ref_del(&page->cp_reference, "cl_io", io);
- cl_page_put(env, page);
- }
-
- cl_env_put(env, &lcc->lcc_refcheck);
-}
-
-/**
- * Initializes common cl-data at the typical address_space operation entry
- * point.
- */
-static struct ll_cl_context *ll_cl_init(struct file *file,
- struct page *vmpage, int create)
-{
- struct ll_cl_context *lcc;
- struct lu_env *env;
- struct cl_io *io;
- struct cl_object *clob;
- struct ccc_io *cio;
-
- int refcheck;
- int result = 0;
-
- clob = ll_i2info(vmpage->mapping->host)->lli_clob;
- LASSERT(clob != NULL);
-
- env = cl_env_get(&refcheck);
- if (IS_ERR(env))
- return ERR_CAST(env);
-
- lcc = &vvp_env_info(env)->vti_io_ctx;
- memset(lcc, 0, sizeof(*lcc));
- lcc->lcc_env = env;
- lcc->lcc_refcheck = refcheck;
- lcc->lcc_cookie = current;
-
- cio = ccc_env_io(env);
- io = cio->cui_cl.cis_io;
- if (io == NULL && create) {
- struct inode *inode = vmpage->mapping->host;
- loff_t pos;
-
- if (mutex_trylock(&inode->i_mutex)) {
- mutex_unlock(&(inode)->i_mutex);
-
- /* this is too bad. Someone is trying to write the
- * page w/o holding inode mutex. This means we can
- * add dirty pages into cache during truncate */
- CERROR("Proc %s is dirtying page w/o inode lock, this will break truncate\n",
- current->comm);
- dump_stack();
- LBUG();
- return ERR_PTR(-EIO);
- }
-
- /*
- * Loop-back driver calls ->prepare_write().
- * methods directly, bypassing file system ->write() operation,
- * so cl_io has to be created here.
- */
- io = ccc_env_thread_io(env);
- ll_io_init(io, file, 1);
-
- /* No lock at all for this kind of IO - we can't do it because
- * we have held page lock, it would cause deadlock.
- * XXX: This causes poor performance to loop device - One page
- * per RPC.
- * In order to get better performance, users should use
- * lloop driver instead.
- */
- io->ci_lockreq = CILR_NEVER;
-
- pos = vmpage->index << PAGE_CACHE_SHIFT;
-
- /* Create a temp IO to serve write. */
- result = cl_io_rw_init(env, io, CIT_WRITE, pos, PAGE_CACHE_SIZE);
- if (result == 0) {
- cio->cui_fd = LUSTRE_FPRIVATE(file);
- cio->cui_iter = NULL;
- result = cl_io_iter_init(env, io);
- if (result == 0) {
- result = cl_io_lock(env, io);
- if (result == 0)
- result = cl_io_start(env, io);
- }
- } else
- result = io->ci_result;
- }
-
- lcc->lcc_io = io;
- if (io == NULL)
- result = -EIO;
- if (result == 0) {
- struct cl_page *page;
-
- LASSERT(io != NULL);
- LASSERT(io->ci_state == CIS_IO_GOING);
- LASSERT(cio->cui_fd == LUSTRE_FPRIVATE(file));
- page = cl_page_find(env, clob, vmpage->index, vmpage,
- CPT_CACHEABLE);
- if (!IS_ERR(page)) {
- lcc->lcc_page = page;
- lu_ref_add(&page->cp_reference, "cl_io", io);
- result = 0;
- } else
- result = PTR_ERR(page);
- }
- if (result) {
- ll_cl_fini(lcc);
- lcc = ERR_PTR(result);
- }
-
- CDEBUG(D_VFSTRACE, "%lu@"DFID" -> %d %p %p\n",
- vmpage->index, PFID(lu_object_fid(&clob->co_lu)), result,
- env, io);
- return lcc;
-}
-
-static struct ll_cl_context *ll_cl_get(void)
-{
- struct ll_cl_context *lcc;
- struct lu_env *env;
- int refcheck;
-
- env = cl_env_get(&refcheck);
- LASSERT(!IS_ERR(env));
- lcc = &vvp_env_info(env)->vti_io_ctx;
- LASSERT(env == lcc->lcc_env);
- LASSERT(current == lcc->lcc_cookie);
- cl_env_put(env, &refcheck);
-
- /* env has got in ll_cl_init, so it is still usable. */
- return lcc;
-}
-
-/**
- * ->prepare_write() address space operation called by generic_file_write()
- * for every page during write.
- */
-int ll_prepare_write(struct file *file, struct page *vmpage, unsigned from,
- unsigned to)
-{
- struct ll_cl_context *lcc;
- int result;
-
- lcc = ll_cl_init(file, vmpage, 1);
- if (!IS_ERR(lcc)) {
- struct lu_env *env = lcc->lcc_env;
- struct cl_io *io = lcc->lcc_io;
- struct cl_page *page = lcc->lcc_page;
-
- cl_page_assume(env, io, page);
-
- result = cl_io_prepare_write(env, io, page, from, to);
- if (result == 0) {
- /*
- * Add a reference, so that page is not evicted from
- * the cache until ->commit_write() is called.
- */
- cl_page_get(page);
- lu_ref_add(&page->cp_reference, "prepare_write",
- current);
- } else {
- cl_page_unassume(env, io, page);
- ll_cl_fini(lcc);
- }
- /* returning 0 in prepare assumes commit must be called
- * afterwards */
- } else {
- result = PTR_ERR(lcc);
- }
- return result;
-}
-
-int ll_commit_write(struct file *file, struct page *vmpage, unsigned from,
- unsigned to)
-{
- struct ll_cl_context *lcc;
- struct lu_env *env;
- struct cl_io *io;
- struct cl_page *page;
- int result = 0;
-
- lcc = ll_cl_get();
- env = lcc->lcc_env;
- page = lcc->lcc_page;
- io = lcc->lcc_io;
-
- LASSERT(cl_page_is_owned(page, io));
- LASSERT(from <= to);
- if (from != to) /* handle short write case. */
- result = cl_io_commit_write(env, io, page, from, to);
- if (cl_page_is_owned(page, io))
- cl_page_unassume(env, io, page);
-
- /*
- * Release reference acquired by ll_prepare_write().
- */
- lu_ref_del(&page->cp_reference, "prepare_write", current);
- cl_page_put(env, page);
- ll_cl_fini(lcc);
- return result;
-}
-
-static void ll_ra_stats_inc_sbi(struct ll_sb_info *sbi, enum ra_stat which);
-
-/**
- * Get readahead pages from the filesystem readahead pool of the client for a
- * thread.
- *
- * /param sbi superblock for filesystem readahead state ll_ra_info
- * /param ria per-thread readahead state
- * /param pages number of pages requested for readahead for the thread.
- *
- * WARNING: This algorithm is used to reduce contention on sbi->ll_lock.
- * It should work well if the ra_max_pages is much greater than the single
- * file's read-ahead window, and not too many threads contending for
- * these readahead pages.
- *
- * TODO: There may be a 'global sync problem' if many threads are trying
- * to get an ra budget that is larger than the remaining readahead pages
- * and reach here at exactly the same time. They will compute /a ret to
- * consume the remaining pages, but will fail at atomic_add_return() and
- * get a zero ra window, although there is still ra space remaining. - Jay */
-
-static unsigned long ll_ra_count_get(struct ll_sb_info *sbi,
- struct ra_io_arg *ria,
- unsigned long pages)
-{
- struct ll_ra_info *ra = &sbi->ll_ra_info;
- long ret;
-
- /* If read-ahead pages left are less than 1M, do not do read-ahead,
- * otherwise it will form small read RPC(< 1M), which hurt server
- * performance a lot. */
- ret = min(ra->ra_max_pages - atomic_read(&ra->ra_cur_pages), pages);
- if (ret < 0 || ret < min_t(long, PTLRPC_MAX_BRW_PAGES, pages)) {
- ret = 0;
- goto out;
- }
-
- /* If the non-strided (ria_pages == 0) readahead window
- * (ria_start + ret) has grown across an RPC boundary, then trim
- * readahead size by the amount beyond the RPC so it ends on an
- * RPC boundary. If the readahead window is already ending on
- * an RPC boundary (beyond_rpc == 0), or smaller than a full
- * RPC (beyond_rpc < ret) the readahead size is unchanged.
- * The (beyond_rpc != 0) check is skipped since the conditional
- * branch is more expensive than subtracting zero from the result.
- *
- * Strided read is left unaligned to avoid small fragments beyond
- * the RPC boundary from needing an extra read RPC. */
- if (ria->ria_pages == 0) {
- long beyond_rpc = (ria->ria_start + ret) % PTLRPC_MAX_BRW_PAGES;
- if (/* beyond_rpc != 0 && */ beyond_rpc < ret)
- ret -= beyond_rpc;
- }
-
- if (atomic_add_return(ret, &ra->ra_cur_pages) > ra->ra_max_pages) {
- atomic_sub(ret, &ra->ra_cur_pages);
- ret = 0;
- }
-
-out:
- return ret;
-}
-
-void ll_ra_count_put(struct ll_sb_info *sbi, unsigned long len)
-{
- struct ll_ra_info *ra = &sbi->ll_ra_info;
- atomic_sub(len, &ra->ra_cur_pages);
-}
-
-static void ll_ra_stats_inc_sbi(struct ll_sb_info *sbi, enum ra_stat which)
-{
- LASSERTF(which >= 0 && which < _NR_RA_STAT, "which: %u\n", which);
- lprocfs_counter_incr(sbi->ll_ra_stats, which);
-}
-
-void ll_ra_stats_inc(struct address_space *mapping, enum ra_stat which)
-{
- struct ll_sb_info *sbi = ll_i2sbi(mapping->host);
- ll_ra_stats_inc_sbi(sbi, which);
-}
-
-#define RAS_CDEBUG(ras) \
- CDEBUG(D_READA, \
- "lrp %lu cr %lu cp %lu ws %lu wl %lu nra %lu r %lu ri %lu" \
- "csr %lu sf %lu sp %lu sl %lu \n", \
- ras->ras_last_readpage, ras->ras_consecutive_requests, \
- ras->ras_consecutive_pages, ras->ras_window_start, \
- ras->ras_window_len, ras->ras_next_readahead, \
- ras->ras_requests, ras->ras_request_index, \
- ras->ras_consecutive_stride_requests, ras->ras_stride_offset, \
- ras->ras_stride_pages, ras->ras_stride_length)
-
-static int index_in_window(unsigned long index, unsigned long point,
- unsigned long before, unsigned long after)
-{
- unsigned long start = point - before, end = point + after;
-
- if (start > point)
- start = 0;
- if (end < point)
- end = ~0;
-
- return start <= index && index <= end;
-}
-
-static struct ll_readahead_state *ll_ras_get(struct file *f)
-{
- struct ll_file_data *fd;
-
- fd = LUSTRE_FPRIVATE(f);
- return &fd->fd_ras;
-}
-
-void ll_ra_read_in(struct file *f, struct ll_ra_read *rar)
-{
- struct ll_readahead_state *ras;
-
- ras = ll_ras_get(f);
-
- spin_lock(&ras->ras_lock);
- ras->ras_requests++;
- ras->ras_request_index = 0;
- ras->ras_consecutive_requests++;
- rar->lrr_reader = current;
-
- list_add(&rar->lrr_linkage, &ras->ras_read_beads);
- spin_unlock(&ras->ras_lock);
-}
-
-void ll_ra_read_ex(struct file *f, struct ll_ra_read *rar)
-{
- struct ll_readahead_state *ras;
-
- ras = ll_ras_get(f);
-
- spin_lock(&ras->ras_lock);
- list_del_init(&rar->lrr_linkage);
- spin_unlock(&ras->ras_lock);
-}
-
-static struct ll_ra_read *ll_ra_read_get_locked(struct ll_readahead_state *ras)
-{
- struct ll_ra_read *scan;
-
- list_for_each_entry(scan, &ras->ras_read_beads, lrr_linkage) {
- if (scan->lrr_reader == current)
- return scan;
- }
- return NULL;
-}
-
-struct ll_ra_read *ll_ra_read_get(struct file *f)
-{
- struct ll_readahead_state *ras;
- struct ll_ra_read *bead;
-
- ras = ll_ras_get(f);
-
- spin_lock(&ras->ras_lock);
- bead = ll_ra_read_get_locked(ras);
- spin_unlock(&ras->ras_lock);
- return bead;
-}
-
-static int cl_read_ahead_page(const struct lu_env *env, struct cl_io *io,
- struct cl_page_list *queue, struct cl_page *page,
- struct page *vmpage)
-{
- struct ccc_page *cp;
- int rc;
-
- rc = 0;
- cl_page_assume(env, io, page);
- lu_ref_add(&page->cp_reference, "ra", current);
- cp = cl2ccc_page(cl_page_at(page, &vvp_device_type));
- if (!cp->cpg_defer_uptodate && !PageUptodate(vmpage)) {
- rc = cl_page_is_under_lock(env, io, page);
- if (rc == -EBUSY) {
- cp->cpg_defer_uptodate = 1;
- cp->cpg_ra_used = 0;
- cl_page_list_add(queue, page);
- rc = 1;
- } else {
- cl_page_delete(env, page);
- rc = -ENOLCK;
- }
- } else {
- /* skip completed pages */
- cl_page_unassume(env, io, page);
- }
- lu_ref_del(&page->cp_reference, "ra", current);
- cl_page_put(env, page);
- return rc;
-}
-
-/**
- * Initiates read-ahead of a page with given index.
- *
- * \retval +ve: page was added to \a queue.
- *
- * \retval -ENOLCK: there is no extent lock for this part of a file, stop
- * read-ahead.
- *
- * \retval -ve, 0: page wasn't added to \a queue for other reason.
- */
-static int ll_read_ahead_page(const struct lu_env *env, struct cl_io *io,
- struct cl_page_list *queue,
- pgoff_t index, struct address_space *mapping)
-{
- struct page *vmpage;
- struct cl_object *clob = ll_i2info(mapping->host)->lli_clob;
- struct cl_page *page;
- enum ra_stat which = _NR_RA_STAT; /* keep gcc happy */
- int rc = 0;
- const char *msg = NULL;
-
- vmpage = grab_cache_page_nowait(mapping, index);
- if (vmpage != NULL) {
- /* Check if vmpage was truncated or reclaimed */
- if (vmpage->mapping == mapping) {
- page = cl_page_find(env, clob, vmpage->index,
- vmpage, CPT_CACHEABLE);
- if (!IS_ERR(page)) {
- rc = cl_read_ahead_page(env, io, queue,
- page, vmpage);
- if (rc == -ENOLCK) {
- which = RA_STAT_FAILED_MATCH;
- msg = "lock match failed";
- }
- } else {
- which = RA_STAT_FAILED_GRAB_PAGE;
- msg = "cl_page_find failed";
- }
- } else {
- which = RA_STAT_WRONG_GRAB_PAGE;
- msg = "g_c_p_n returned invalid page";
- }
- if (rc != 1)
- unlock_page(vmpage);
- page_cache_release(vmpage);
- } else {
- which = RA_STAT_FAILED_GRAB_PAGE;
- msg = "g_c_p_n failed";
- }
- if (msg != NULL) {
- ll_ra_stats_inc(mapping, which);
- CDEBUG(D_READA, "%s\n", msg);
- }
- return rc;
-}
-
-#define RIA_DEBUG(ria) \
- CDEBUG(D_READA, "rs %lu re %lu ro %lu rl %lu rp %lu\n", \
- ria->ria_start, ria->ria_end, ria->ria_stoff, ria->ria_length,\
- ria->ria_pages)
-
-/* Limit this to the blocksize instead of PTLRPC_BRW_MAX_SIZE, since we don't
- * know what the actual RPC size is. If this needs to change, it makes more
- * sense to tune the i_blkbits value for the file based on the OSTs it is
- * striped over, rather than having a constant value for all files here. */
-
-/* RAS_INCREASE_STEP should be (1UL << (inode->i_blkbits - PAGE_CACHE_SHIFT)).
- * Temporarily set RAS_INCREASE_STEP to 1MB. After 4MB RPC is enabled
- * by default, this should be adjusted corresponding with max_read_ahead_mb
- * and max_read_ahead_per_file_mb otherwise the readahead budget can be used
- * up quickly which will affect read performance significantly. See LU-2816 */
-#define RAS_INCREASE_STEP(inode) (ONE_MB_BRW_SIZE >> PAGE_CACHE_SHIFT)
-
-static inline int stride_io_mode(struct ll_readahead_state *ras)
-{
- return ras->ras_consecutive_stride_requests > 1;
-}
-/* The function calculates how much pages will be read in
- * [off, off + length], in such stride IO area,
- * stride_offset = st_off, stride_length = st_len,
- * stride_pages = st_pgs
- *
- * |------------------|*****|------------------|*****|------------|*****|....
- * st_off
- * |--- st_pgs ---|
- * |----- st_len -----|
- *
- * How many pages it should read in such pattern
- * |-------------------------------------------------------------|
- * off
- * |<------ length ------->|
- *
- * = |<----->| + |-------------------------------------| + |---|
- * start_left st_pgs * i end_left
- */
-static unsigned long
-stride_pg_count(pgoff_t st_off, unsigned long st_len, unsigned long st_pgs,
- unsigned long off, unsigned long length)
-{
- __u64 start = off > st_off ? off - st_off : 0;
- __u64 end = off + length > st_off ? off + length - st_off : 0;
- unsigned long start_left = 0;
- unsigned long end_left = 0;
- unsigned long pg_count;
-
- if (st_len == 0 || length == 0 || end == 0)
- return length;
-
- start_left = do_div(start, st_len);
- if (start_left < st_pgs)
- start_left = st_pgs - start_left;
- else
- start_left = 0;
-
- end_left = do_div(end, st_len);
- if (end_left > st_pgs)
- end_left = st_pgs;
-
- CDEBUG(D_READA, "start %llu, end %llu start_left %lu end_left %lu \n",
- start, end, start_left, end_left);
-
- if (start == end)
- pg_count = end_left - (st_pgs - start_left);
- else
- pg_count = start_left + st_pgs * (end - start - 1) + end_left;
-
- CDEBUG(D_READA, "st_off %lu, st_len %lu st_pgs %lu off %lu length %lu pgcount %lu\n",
- st_off, st_len, st_pgs, off, length, pg_count);
-
- return pg_count;
-}
-
-static int ria_page_count(struct ra_io_arg *ria)
-{
- __u64 length = ria->ria_end >= ria->ria_start ?
- ria->ria_end - ria->ria_start + 1 : 0;
-
- return stride_pg_count(ria->ria_stoff, ria->ria_length,
- ria->ria_pages, ria->ria_start,
- length);
-}
-
-/*Check whether the index is in the defined ra-window */
-static int ras_inside_ra_window(unsigned long idx, struct ra_io_arg *ria)
-{
- /* If ria_length == ria_pages, it means non-stride I/O mode,
- * idx should always inside read-ahead window in this case
- * For stride I/O mode, just check whether the idx is inside
- * the ria_pages. */
- return ria->ria_length == 0 || ria->ria_length == ria->ria_pages ||
- (idx >= ria->ria_stoff && (idx - ria->ria_stoff) %
- ria->ria_length < ria->ria_pages);
-}
-
-static int ll_read_ahead_pages(const struct lu_env *env,
- struct cl_io *io, struct cl_page_list *queue,
- struct ra_io_arg *ria,
- unsigned long *reserved_pages,
- struct address_space *mapping,
- unsigned long *ra_end)
-{
- int rc, count = 0, stride_ria;
- unsigned long page_idx;
-
- LASSERT(ria != NULL);
- RIA_DEBUG(ria);
-
- stride_ria = ria->ria_length > ria->ria_pages && ria->ria_pages > 0;
- for (page_idx = ria->ria_start; page_idx <= ria->ria_end &&
- *reserved_pages > 0; page_idx++) {
- if (ras_inside_ra_window(page_idx, ria)) {
- /* If the page is inside the read-ahead window*/
- rc = ll_read_ahead_page(env, io, queue,
- page_idx, mapping);
- if (rc == 1) {
- (*reserved_pages)--;
- count++;
- } else if (rc == -ENOLCK)
- break;
- } else if (stride_ria) {
- /* If it is not in the read-ahead window, and it is
- * read-ahead mode, then check whether it should skip
- * the stride gap */
- pgoff_t offset;
- /* FIXME: This assertion only is valid when it is for
- * forward read-ahead, it will be fixed when backward
- * read-ahead is implemented */
- LASSERTF(page_idx > ria->ria_stoff, "Invalid page_idx %lu rs %lu re %lu ro %lu rl %lu rp %lu\n",
- page_idx,
- ria->ria_start, ria->ria_end, ria->ria_stoff,
- ria->ria_length, ria->ria_pages);
- offset = page_idx - ria->ria_stoff;
- offset = offset % (ria->ria_length);
- if (offset > ria->ria_pages) {
- page_idx += ria->ria_length - offset;
- CDEBUG(D_READA, "i %lu skip %lu \n", page_idx,
- ria->ria_length - offset);
- continue;
- }
- }
- }
- *ra_end = page_idx;
- return count;
-}
-
-int ll_readahead(const struct lu_env *env, struct cl_io *io,
- struct ll_readahead_state *ras, struct address_space *mapping,
- struct cl_page_list *queue, int flags)
-{
- struct vvp_io *vio = vvp_env_io(env);
- struct vvp_thread_info *vti = vvp_env_info(env);
- struct cl_attr *attr = ccc_env_thread_attr(env);
- unsigned long start = 0, end = 0, reserved;
- unsigned long ra_end, len;
- struct inode *inode;
- struct ll_ra_read *bead;
- struct ra_io_arg *ria = &vti->vti_ria;
- struct ll_inode_info *lli;
- struct cl_object *clob;
- int ret = 0;
- __u64 kms;
-
- inode = mapping->host;
- lli = ll_i2info(inode);
- clob = lli->lli_clob;
-
- memset(ria, 0, sizeof(*ria));
-
- cl_object_attr_lock(clob);
- ret = cl_object_attr_get(env, clob, attr);
- cl_object_attr_unlock(clob);
-
- if (ret != 0)
- return ret;
- kms = attr->cat_kms;
- if (kms == 0) {
- ll_ra_stats_inc(mapping, RA_STAT_ZERO_LEN);
- return 0;
- }
-
- spin_lock(&ras->ras_lock);
- if (vio->cui_ra_window_set)
- bead = &vio->cui_bead;
- else
- bead = NULL;
-
- /* Enlarge the RA window to encompass the full read */
- if (bead != NULL && ras->ras_window_start + ras->ras_window_len <
- bead->lrr_start + bead->lrr_count) {
- ras->ras_window_len = bead->lrr_start + bead->lrr_count -
- ras->ras_window_start;
- }
- /* Reserve a part of the read-ahead window that we'll be issuing */
- if (ras->ras_window_len) {
- start = ras->ras_next_readahead;
- end = ras->ras_window_start + ras->ras_window_len - 1;
- }
- if (end != 0) {
- unsigned long rpc_boundary;
- /*
- * Align RA window to an optimal boundary.
- *
- * XXX This would be better to align to cl_max_pages_per_rpc
- * instead of PTLRPC_MAX_BRW_PAGES, because the RPC size may
- * be aligned to the RAID stripe size in the future and that
- * is more important than the RPC size.
- */
- /* Note: we only trim the RPC, instead of extending the RPC
- * to the boundary, so to avoid reading too much pages during
- * random reading. */
- rpc_boundary = (end + 1) & (~(PTLRPC_MAX_BRW_PAGES - 1));
- if (rpc_boundary > 0)
- rpc_boundary--;
-
- if (rpc_boundary > start)
- end = rpc_boundary;
-
- /* Truncate RA window to end of file */
- end = min(end, (unsigned long)((kms - 1) >> PAGE_CACHE_SHIFT));
-
- ras->ras_next_readahead = max(end, end + 1);
- RAS_CDEBUG(ras);
- }
- ria->ria_start = start;
- ria->ria_end = end;
- /* If stride I/O mode is detected, get stride window*/
- if (stride_io_mode(ras)) {
- ria->ria_stoff = ras->ras_stride_offset;
- ria->ria_length = ras->ras_stride_length;
- ria->ria_pages = ras->ras_stride_pages;
- }
- spin_unlock(&ras->ras_lock);
-
- if (end == 0) {
- ll_ra_stats_inc(mapping, RA_STAT_ZERO_WINDOW);
- return 0;
- }
- len = ria_page_count(ria);
- if (len == 0)
- return 0;
-
- reserved = ll_ra_count_get(ll_i2sbi(inode), ria, len);
- if (reserved < len)
- ll_ra_stats_inc(mapping, RA_STAT_MAX_IN_FLIGHT);
-
- CDEBUG(D_READA, "reserved page %lu ra_cur %d ra_max %lu\n", reserved,
- atomic_read(&ll_i2sbi(inode)->ll_ra_info.ra_cur_pages),
- ll_i2sbi(inode)->ll_ra_info.ra_max_pages);
-
- ret = ll_read_ahead_pages(env, io, queue,
- ria, &reserved, mapping, &ra_end);
-
- LASSERTF(reserved >= 0, "reserved %lu\n", reserved);
- if (reserved != 0)
- ll_ra_count_put(ll_i2sbi(inode), reserved);
-
- if (ra_end == end + 1 && ra_end == (kms >> PAGE_CACHE_SHIFT))
- ll_ra_stats_inc(mapping, RA_STAT_EOF);
-
- /* if we didn't get to the end of the region we reserved from
- * the ras we need to go back and update the ras so that the
- * next read-ahead tries from where we left off. we only do so
- * if the region we failed to issue read-ahead on is still ahead
- * of the app and behind the next index to start read-ahead from */
- CDEBUG(D_READA, "ra_end %lu end %lu stride end %lu \n",
- ra_end, end, ria->ria_end);
-
- if (ra_end != end + 1) {
- spin_lock(&ras->ras_lock);
- if (ra_end < ras->ras_next_readahead &&
- index_in_window(ra_end, ras->ras_window_start, 0,
- ras->ras_window_len)) {
- ras->ras_next_readahead = ra_end;
- RAS_CDEBUG(ras);
- }
- spin_unlock(&ras->ras_lock);
- }
-
- return ret;
-}
-
-static void ras_set_start(struct inode *inode, struct ll_readahead_state *ras,
- unsigned long index)
-{
- ras->ras_window_start = index & (~(RAS_INCREASE_STEP(inode) - 1));
-}
-
-/* called with the ras_lock held or from places where it doesn't matter */
-static void ras_reset(struct inode *inode, struct ll_readahead_state *ras,
- unsigned long index)
-{
- ras->ras_last_readpage = index;
- ras->ras_consecutive_requests = 0;
- ras->ras_consecutive_pages = 0;
- ras->ras_window_len = 0;
- ras_set_start(inode, ras, index);
- ras->ras_next_readahead = max(ras->ras_window_start, index);
-
- RAS_CDEBUG(ras);
-}
-
-/* called with the ras_lock held or from places where it doesn't matter */
-static void ras_stride_reset(struct ll_readahead_state *ras)
-{
- ras->ras_consecutive_stride_requests = 0;
- ras->ras_stride_length = 0;
- ras->ras_stride_pages = 0;
- RAS_CDEBUG(ras);
-}
-
-void ll_readahead_init(struct inode *inode, struct ll_readahead_state *ras)
-{
- spin_lock_init(&ras->ras_lock);
- ras_reset(inode, ras, 0);
- ras->ras_requests = 0;
- INIT_LIST_HEAD(&ras->ras_read_beads);
-}
-
-/*
- * Check whether the read request is in the stride window.
- * If it is in the stride window, return 1, otherwise return 0.
- */
-static int index_in_stride_window(struct ll_readahead_state *ras,
- unsigned long index)
-{
- unsigned long stride_gap;
-
- if (ras->ras_stride_length == 0 || ras->ras_stride_pages == 0 ||
- ras->ras_stride_pages == ras->ras_stride_length)
- return 0;
-
- stride_gap = index - ras->ras_last_readpage - 1;
-
- /* If it is contiguous read */
- if (stride_gap == 0)
- return ras->ras_consecutive_pages + 1 <= ras->ras_stride_pages;
-
- /* Otherwise check the stride by itself */
- return (ras->ras_stride_length - ras->ras_stride_pages) == stride_gap &&
- ras->ras_consecutive_pages == ras->ras_stride_pages;
-}
-
-static void ras_update_stride_detector(struct ll_readahead_state *ras,
- unsigned long index)
-{
- unsigned long stride_gap = index - ras->ras_last_readpage - 1;
-
- if (!stride_io_mode(ras) && (stride_gap != 0 ||
- ras->ras_consecutive_stride_requests == 0)) {
- ras->ras_stride_pages = ras->ras_consecutive_pages;
- ras->ras_stride_length = stride_gap+ras->ras_consecutive_pages;
- }
- LASSERT(ras->ras_request_index == 0);
- LASSERT(ras->ras_consecutive_stride_requests == 0);
-
- if (index <= ras->ras_last_readpage) {
- /*Reset stride window for forward read*/
- ras_stride_reset(ras);
- return;
- }
-
- ras->ras_stride_pages = ras->ras_consecutive_pages;
- ras->ras_stride_length = stride_gap+ras->ras_consecutive_pages;
-
- RAS_CDEBUG(ras);
- return;
-}
-
-static unsigned long
-stride_page_count(struct ll_readahead_state *ras, unsigned long len)
-{
- return stride_pg_count(ras->ras_stride_offset, ras->ras_stride_length,
- ras->ras_stride_pages, ras->ras_stride_offset,
- len);
-}
-
-/* Stride Read-ahead window will be increased inc_len according to
- * stride I/O pattern */
-static void ras_stride_increase_window(struct ll_readahead_state *ras,
- struct ll_ra_info *ra,
- unsigned long inc_len)
-{
- unsigned long left, step, window_len;
- unsigned long stride_len;
-
- LASSERT(ras->ras_stride_length > 0);
- LASSERTF(ras->ras_window_start + ras->ras_window_len
- >= ras->ras_stride_offset, "window_start %lu, window_len %lu stride_offset %lu\n",
- ras->ras_window_start,
- ras->ras_window_len, ras->ras_stride_offset);
-
- stride_len = ras->ras_window_start + ras->ras_window_len -
- ras->ras_stride_offset;
-
- left = stride_len % ras->ras_stride_length;
- window_len = ras->ras_window_len - left;
-
- if (left < ras->ras_stride_pages)
- left += inc_len;
- else
- left = ras->ras_stride_pages + inc_len;
-
- LASSERT(ras->ras_stride_pages != 0);
-
- step = left / ras->ras_stride_pages;
- left %= ras->ras_stride_pages;
-
- window_len += step * ras->ras_stride_length + left;
-
- if (stride_page_count(ras, window_len) <= ra->ra_max_pages_per_file)
- ras->ras_window_len = window_len;
-
- RAS_CDEBUG(ras);
-}
-
-static void ras_increase_window(struct inode *inode,
- struct ll_readahead_state *ras,
- struct ll_ra_info *ra)
-{
- /* The stretch of ra-window should be aligned with max rpc_size
- * but current clio architecture does not support retrieve such
- * information from lower layer. FIXME later
- */
- if (stride_io_mode(ras))
- ras_stride_increase_window(ras, ra, RAS_INCREASE_STEP(inode));
- else
- ras->ras_window_len = min(ras->ras_window_len +
- RAS_INCREASE_STEP(inode),
- ra->ra_max_pages_per_file);
-}
-
-void ras_update(struct ll_sb_info *sbi, struct inode *inode,
- struct ll_readahead_state *ras, unsigned long index,
- unsigned hit)
-{
- struct ll_ra_info *ra = &sbi->ll_ra_info;
- int zero = 0, stride_detect = 0, ra_miss = 0;
-
- spin_lock(&ras->ras_lock);
-
- ll_ra_stats_inc_sbi(sbi, hit ? RA_STAT_HIT : RA_STAT_MISS);
-
- /* reset the read-ahead window in two cases. First when the app seeks
- * or reads to some other part of the file. Secondly if we get a
- * read-ahead miss that we think we've previously issued. This can
- * be a symptom of there being so many read-ahead pages that the VM is
- * reclaiming it before we get to it. */
- if (!index_in_window(index, ras->ras_last_readpage, 8, 8)) {
- zero = 1;
- ll_ra_stats_inc_sbi(sbi, RA_STAT_DISTANT_READPAGE);
- } else if (!hit && ras->ras_window_len &&
- index < ras->ras_next_readahead &&
- index_in_window(index, ras->ras_window_start, 0,
- ras->ras_window_len)) {
- ra_miss = 1;
- ll_ra_stats_inc_sbi(sbi, RA_STAT_MISS_IN_WINDOW);
- }
-
- /* On the second access to a file smaller than the tunable
- * ra_max_read_ahead_whole_pages trigger RA on all pages in the
- * file up to ra_max_pages_per_file. This is simply a best effort
- * and only occurs once per open file. Normal RA behavior is reverted
- * to for subsequent IO. The mmap case does not increment
- * ras_requests and thus can never trigger this behavior. */
- if (ras->ras_requests == 2 && !ras->ras_request_index) {
- __u64 kms_pages;
-
- kms_pages = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >>
- PAGE_CACHE_SHIFT;
-
- CDEBUG(D_READA, "kmsp %llu mwp %lu mp %lu\n", kms_pages,
- ra->ra_max_read_ahead_whole_pages, ra->ra_max_pages_per_file);
-
- if (kms_pages &&
- kms_pages <= ra->ra_max_read_ahead_whole_pages) {
- ras->ras_window_start = 0;
- ras->ras_last_readpage = 0;
- ras->ras_next_readahead = 0;
- ras->ras_window_len = min(ra->ra_max_pages_per_file,
- ra->ra_max_read_ahead_whole_pages);
- goto out_unlock;
- }
- }
- if (zero) {
- /* check whether it is in stride I/O mode*/
- if (!index_in_stride_window(ras, index)) {
- if (ras->ras_consecutive_stride_requests == 0 &&
- ras->ras_request_index == 0) {
- ras_update_stride_detector(ras, index);
- ras->ras_consecutive_stride_requests++;
- } else {
- ras_stride_reset(ras);
- }
- ras_reset(inode, ras, index);
- ras->ras_consecutive_pages++;
- goto out_unlock;
- } else {
- ras->ras_consecutive_pages = 0;
- ras->ras_consecutive_requests = 0;
- if (++ras->ras_consecutive_stride_requests > 1)
- stride_detect = 1;
- RAS_CDEBUG(ras);
- }
- } else {
- if (ra_miss) {
- if (index_in_stride_window(ras, index) &&
- stride_io_mode(ras)) {
- /*If stride-RA hit cache miss, the stride dector
- *will not be reset to avoid the overhead of
- *redetecting read-ahead mode */
- if (index != ras->ras_last_readpage + 1)
- ras->ras_consecutive_pages = 0;
- ras_reset(inode, ras, index);
- RAS_CDEBUG(ras);
- } else {
- /* Reset both stride window and normal RA
- * window */
- ras_reset(inode, ras, index);
- ras->ras_consecutive_pages++;
- ras_stride_reset(ras);
- goto out_unlock;
- }
- } else if (stride_io_mode(ras)) {
- /* If this is contiguous read but in stride I/O mode
- * currently, check whether stride step still is valid,
- * if invalid, it will reset the stride ra window*/
- if (!index_in_stride_window(ras, index)) {
- /* Shrink stride read-ahead window to be zero */
- ras_stride_reset(ras);
- ras->ras_window_len = 0;
- ras->ras_next_readahead = index;
- }
- }
- }
- ras->ras_consecutive_pages++;
- ras->ras_last_readpage = index;
- ras_set_start(inode, ras, index);
-
- if (stride_io_mode(ras))
- /* Since stride readahead is sensitive to the offset
- * of read-ahead, so we use original offset here,
- * instead of ras_window_start, which is RPC aligned */
- ras->ras_next_readahead = max(index, ras->ras_next_readahead);
- else
- ras->ras_next_readahead = max(ras->ras_window_start,
- ras->ras_next_readahead);
- RAS_CDEBUG(ras);
-
- /* Trigger RA in the mmap case where ras_consecutive_requests
- * is not incremented and thus can't be used to trigger RA */
- if (!ras->ras_window_len && ras->ras_consecutive_pages == 4) {
- ras->ras_window_len = RAS_INCREASE_STEP(inode);
- goto out_unlock;
- }
-
- /* Initially reset the stride window offset to next_readahead*/
- if (ras->ras_consecutive_stride_requests == 2 && stride_detect) {
- /**
- * Once stride IO mode is detected, next_readahead should be
- * reset to make sure next_readahead > stride offset
- */
- ras->ras_next_readahead = max(index, ras->ras_next_readahead);
- ras->ras_stride_offset = index;
- ras->ras_window_len = RAS_INCREASE_STEP(inode);
- }
-
- /* The initial ras_window_len is set to the request size. To avoid
- * uselessly reading and discarding pages for random IO the window is
- * only increased once per consecutive request received. */
- if ((ras->ras_consecutive_requests > 1 || stride_detect) &&
- !ras->ras_request_index)
- ras_increase_window(inode, ras, ra);
-out_unlock:
- RAS_CDEBUG(ras);
- ras->ras_request_index++;
- spin_unlock(&ras->ras_lock);
- return;
-}
-
-int ll_writepage(struct page *vmpage, struct writeback_control *wbc)
-{
- struct inode *inode = vmpage->mapping->host;
- struct ll_inode_info *lli = ll_i2info(inode);
- struct lu_env *env;
- struct cl_io *io;
- struct cl_page *page;
- struct cl_object *clob;
- struct cl_env_nest nest;
- bool redirtied = false;
- bool unlocked = false;
- int result;
-
- LASSERT(PageLocked(vmpage));
- LASSERT(!PageWriteback(vmpage));
-
- LASSERT(ll_i2dtexp(inode) != NULL);
-
- env = cl_env_nested_get(&nest);
- if (IS_ERR(env)) {
- result = PTR_ERR(env);
- goto out;
- }
-
- clob = ll_i2info(inode)->lli_clob;
- LASSERT(clob != NULL);
-
- io = ccc_env_thread_io(env);
- io->ci_obj = clob;
- io->ci_ignore_layout = 1;
- result = cl_io_init(env, io, CIT_MISC, clob);
- if (result == 0) {
- page = cl_page_find(env, clob, vmpage->index,
- vmpage, CPT_CACHEABLE);
- if (!IS_ERR(page)) {
- lu_ref_add(&page->cp_reference, "writepage",
- current);
- cl_page_assume(env, io, page);
- result = cl_page_flush(env, io, page);
- if (result != 0) {
- /*
- * Re-dirty page on error so it retries write,
- * but not in case when IO has actually
- * occurred and completed with an error.
- */
- if (!PageError(vmpage)) {
- redirty_page_for_writepage(wbc, vmpage);
- result = 0;
- redirtied = true;
- }
- }
- cl_page_disown(env, io, page);
- unlocked = true;
- lu_ref_del(&page->cp_reference,
- "writepage", current);
- cl_page_put(env, page);
- } else {
- result = PTR_ERR(page);
- }
- }
- cl_io_fini(env, io);
-
- if (redirtied && wbc->sync_mode == WB_SYNC_ALL) {
- loff_t offset = cl_offset(clob, vmpage->index);
-
- /* Flush page failed because the extent is being written out.
- * Wait for the write of extent to be finished to avoid
- * breaking kernel which assumes ->writepage should mark
- * PageWriteback or clean the page. */
- result = cl_sync_file_range(inode, offset,
- offset + PAGE_CACHE_SIZE - 1,
- CL_FSYNC_LOCAL, 1);
- if (result > 0) {
- /* actually we may have written more than one page.
- * decreasing this page because the caller will count
- * it. */
- wbc->nr_to_write -= result - 1;
- result = 0;
- }
- }
-
- cl_env_nested_put(&nest, env);
- goto out;
-
-out:
- if (result < 0) {
- if (!lli->lli_async_rc)
- lli->lli_async_rc = result;
- SetPageError(vmpage);
- if (!unlocked)
- unlock_page(vmpage);
- }
- return result;
-}
-
-int ll_writepages(struct address_space *mapping, struct writeback_control *wbc)
-{
- struct inode *inode = mapping->host;
- struct ll_sb_info *sbi = ll_i2sbi(inode);
- loff_t start;
- loff_t end;
- enum cl_fsync_mode mode;
- int range_whole = 0;
- int result;
- int ignore_layout = 0;
-
- if (wbc->range_cyclic) {
- start = mapping->writeback_index << PAGE_CACHE_SHIFT;
- end = OBD_OBJECT_EOF;
- } else {
- start = wbc->range_start;
- end = wbc->range_end;
- if (end == LLONG_MAX) {
- end = OBD_OBJECT_EOF;
- range_whole = start == 0;
- }
- }
-
- mode = CL_FSYNC_NONE;
- if (wbc->sync_mode == WB_SYNC_ALL)
- mode = CL_FSYNC_LOCAL;
-
- if (sbi->ll_umounting)
- /* if the mountpoint is being umounted, all pages have to be
- * evicted to avoid hitting LBUG when truncate_inode_pages()
- * is called later on. */
- ignore_layout = 1;
- result = cl_sync_file_range(inode, start, end, mode, ignore_layout);
- if (result > 0) {
- wbc->nr_to_write -= result;
- result = 0;
- }
-
- if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0)) {
- if (end == OBD_OBJECT_EOF)
- end = i_size_read(inode);
- mapping->writeback_index = (end >> PAGE_CACHE_SHIFT) + 1;
- }
- return result;
-}
-
-int ll_readpage(struct file *file, struct page *vmpage)
-{
- struct ll_cl_context *lcc;
- int result;
-
- lcc = ll_cl_init(file, vmpage, 0);
- if (!IS_ERR(lcc)) {
- struct lu_env *env = lcc->lcc_env;
- struct cl_io *io = lcc->lcc_io;
- struct cl_page *page = lcc->lcc_page;
-
- LASSERT(page->cp_type == CPT_CACHEABLE);
- if (likely(!PageUptodate(vmpage))) {
- cl_page_assume(env, io, page);
- result = cl_io_read_page(env, io, page);
- } else {
- /* Page from a non-object file. */
- unlock_page(vmpage);
- result = 0;
- }
- ll_cl_fini(lcc);
- } else {
- unlock_page(vmpage);
- result = PTR_ERR(lcc);
- }
- return result;
-}
diff --git a/drivers/staging/lustre/lustre/llite/rw26.c b/drivers/staging/lustre/lustre/llite/rw26.c
deleted file mode 100644
index b17b7cea582c..000000000000
--- a/drivers/staging/lustre/lustre/llite/rw26.c
+++ /dev/null
@@ -1,533 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * lustre/lustre/llite/rw26.c
- *
- * Lustre Lite I/O page cache routines for the 2.5/2.6 kernel version
- */
-
-#include <linux/kernel.h>
-#include <linux/mm.h>
-#include <linux/string.h>
-#include <linux/stat.h>
-#include <linux/errno.h>
-#include <linux/unistd.h>
-#include <linux/uaccess.h>
-
-#include <linux/migrate.h>
-#include <linux/fs.h>
-#include <linux/buffer_head.h>
-#include <linux/mpage.h>
-#include <linux/writeback.h>
-#include <linux/pagemap.h>
-
-#define DEBUG_SUBSYSTEM S_LLITE
-
-#include "../include/lustre_lite.h"
-#include "llite_internal.h"
-#include "../include/linux/lustre_compat25.h"
-
-/**
- * Implements Linux VM address_space::invalidatepage() method. This method is
- * called when the page is truncate from a file, either as a result of
- * explicit truncate, or when inode is removed from memory (as a result of
- * final iput(), umount, or memory pressure induced icache shrinking).
- *
- * [0, offset] bytes of the page remain valid (this is for a case of not-page
- * aligned truncate). Lustre leaves partially truncated page in the cache,
- * relying on struct inode::i_size to limit further accesses.
- */
-static void ll_invalidatepage(struct page *vmpage, unsigned int offset,
- unsigned int length)
-{
- struct inode *inode;
- struct lu_env *env;
- struct cl_page *page;
- struct cl_object *obj;
-
- int refcheck;
-
- LASSERT(PageLocked(vmpage));
- LASSERT(!PageWriteback(vmpage));
-
- /*
- * It is safe to not check anything in invalidatepage/releasepage
- * below because they are run with page locked and all our io is
- * happening with locked page too
- */
- if (offset == 0 && length == PAGE_CACHE_SIZE) {
- env = cl_env_get(&refcheck);
- if (!IS_ERR(env)) {
- inode = vmpage->mapping->host;
- obj = ll_i2info(inode)->lli_clob;
- if (obj != NULL) {
- page = cl_vmpage_page(vmpage, obj);
- if (page != NULL) {
- lu_ref_add(&page->cp_reference,
- "delete", vmpage);
- cl_page_delete(env, page);
- lu_ref_del(&page->cp_reference,
- "delete", vmpage);
- cl_page_put(env, page);
- }
- } else
- LASSERT(vmpage->private == 0);
- cl_env_put(env, &refcheck);
- }
- }
-}
-
-#ifdef HAVE_RELEASEPAGE_WITH_INT
-#define RELEASEPAGE_ARG_TYPE int
-#else
-#define RELEASEPAGE_ARG_TYPE gfp_t
-#endif
-static int ll_releasepage(struct page *vmpage, RELEASEPAGE_ARG_TYPE gfp_mask)
-{
- struct cl_env_nest nest;
- struct lu_env *env;
- struct cl_object *obj;
- struct cl_page *page;
- struct address_space *mapping;
- int result;
-
- LASSERT(PageLocked(vmpage));
- if (PageWriteback(vmpage) || PageDirty(vmpage))
- return 0;
-
- mapping = vmpage->mapping;
- if (mapping == NULL)
- return 1;
-
- obj = ll_i2info(mapping->host)->lli_clob;
- if (obj == NULL)
- return 1;
-
- /* 1 for page allocator, 1 for cl_page and 1 for page cache */
- if (page_count(vmpage) > 3)
- return 0;
-
- /* TODO: determine what gfp should be used by @gfp_mask. */
- env = cl_env_nested_get(&nest);
- if (IS_ERR(env))
- /* If we can't allocate an env we won't call cl_page_put()
- * later on which further means it's impossible to drop
- * page refcount by cl_page, so ask kernel to not free
- * this page. */
- return 0;
-
- page = cl_vmpage_page(vmpage, obj);
- result = page == NULL;
- if (page != NULL) {
- if (!cl_page_in_use(page)) {
- result = 1;
- cl_page_delete(env, page);
- }
- cl_page_put(env, page);
- }
- cl_env_nested_put(&nest, env);
- return result;
-}
-
-static int ll_set_page_dirty(struct page *vmpage)
-{
-#if 0
- struct cl_page *page = vvp_vmpage_page_transient(vmpage);
- struct vvp_object *obj = cl_inode2vvp(vmpage->mapping->host);
- struct vvp_page *cpg;
-
- /*
- * XXX should page method be called here?
- */
- LASSERT(&obj->co_cl == page->cp_obj);
- cpg = cl2vvp_page(cl_page_at(page, &vvp_device_type));
- /*
- * XXX cannot do much here, because page is possibly not locked:
- * sys_munmap()->...
- * ->unmap_page_range()->zap_pte_range()->set_page_dirty().
- */
- vvp_write_pending(obj, cpg);
-#endif
- return __set_page_dirty_nobuffers(vmpage);
-}
-
-#define MAX_DIRECTIO_SIZE (2*1024*1024*1024UL)
-
-static inline int ll_get_user_pages(int rw, unsigned long user_addr,
- size_t size, struct page ***pages,
- int *max_pages)
-{
- int result = -ENOMEM;
-
- /* set an arbitrary limit to prevent arithmetic overflow */
- if (size > MAX_DIRECTIO_SIZE) {
- *pages = NULL;
- return -EFBIG;
- }
-
- *max_pages = (user_addr + size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
- *max_pages -= user_addr >> PAGE_CACHE_SHIFT;
-
- *pages = libcfs_kvzalloc(*max_pages * sizeof(**pages), GFP_NOFS);
- if (*pages) {
- result = get_user_pages_fast(user_addr, *max_pages,
- (rw == READ), *pages);
- if (unlikely(result <= 0))
- kvfree(*pages);
- }
-
- return result;
-}
-
-/* ll_free_user_pages - tear down page struct array
- * @pages: array of page struct pointers underlying target buffer */
-static void ll_free_user_pages(struct page **pages, int npages, int do_dirty)
-{
- int i;
-
- for (i = 0; i < npages; i++) {
- if (do_dirty)
- set_page_dirty_lock(pages[i]);
- page_cache_release(pages[i]);
- }
- kvfree(pages);
-}
-
-ssize_t ll_direct_rw_pages(const struct lu_env *env, struct cl_io *io,
- int rw, struct inode *inode,
- struct ll_dio_pages *pv)
-{
- struct cl_page *clp;
- struct cl_2queue *queue;
- struct cl_object *obj = io->ci_obj;
- int i;
- ssize_t rc = 0;
- loff_t file_offset = pv->ldp_start_offset;
- long size = pv->ldp_size;
- int page_count = pv->ldp_nr;
- struct page **pages = pv->ldp_pages;
- long page_size = cl_page_size(obj);
- bool do_io;
- int io_pages = 0;
-
- queue = &io->ci_queue;
- cl_2queue_init(queue);
- for (i = 0; i < page_count; i++) {
- if (pv->ldp_offsets)
- file_offset = pv->ldp_offsets[i];
-
- LASSERT(!(file_offset & (page_size - 1)));
- clp = cl_page_find(env, obj, cl_index(obj, file_offset),
- pv->ldp_pages[i], CPT_TRANSIENT);
- if (IS_ERR(clp)) {
- rc = PTR_ERR(clp);
- break;
- }
-
- rc = cl_page_own(env, io, clp);
- if (rc) {
- LASSERT(clp->cp_state == CPS_FREEING);
- cl_page_put(env, clp);
- break;
- }
-
- do_io = true;
-
- /* check the page type: if the page is a host page, then do
- * write directly */
- if (clp->cp_type == CPT_CACHEABLE) {
- struct page *vmpage = cl_page_vmpage(env, clp);
- struct page *src_page;
- struct page *dst_page;
- void *src;
- void *dst;
-
- src_page = (rw == WRITE) ? pages[i] : vmpage;
- dst_page = (rw == WRITE) ? vmpage : pages[i];
-
- src = kmap_atomic(src_page);
- dst = kmap_atomic(dst_page);
- memcpy(dst, src, min(page_size, size));
- kunmap_atomic(dst);
- kunmap_atomic(src);
-
- /* make sure page will be added to the transfer by
- * cl_io_submit()->...->vvp_page_prep_write(). */
- if (rw == WRITE)
- set_page_dirty(vmpage);
-
- if (rw == READ) {
- /* do not issue the page for read, since it
- * may reread a ra page which has NOT uptodate
- * bit set. */
- cl_page_disown(env, io, clp);
- do_io = false;
- }
- }
-
- if (likely(do_io)) {
- cl_2queue_add(queue, clp);
-
- /*
- * Set page clip to tell transfer formation engine
- * that page has to be sent even if it is beyond KMS.
- */
- cl_page_clip(env, clp, 0, min(size, page_size));
-
- ++io_pages;
- }
-
- /* drop the reference count for cl_page_find */
- cl_page_put(env, clp);
- size -= page_size;
- file_offset += page_size;
- }
-
- if (rc == 0 && io_pages) {
- rc = cl_io_submit_sync(env, io,
- rw == READ ? CRT_READ : CRT_WRITE,
- queue, 0);
- }
- if (rc == 0)
- rc = pv->ldp_size;
-
- cl_2queue_discard(env, io, queue);
- cl_2queue_disown(env, io, queue);
- cl_2queue_fini(env, queue);
- return rc;
-}
-EXPORT_SYMBOL(ll_direct_rw_pages);
-
-static ssize_t ll_direct_IO_26_seg(const struct lu_env *env, struct cl_io *io,
- int rw, struct inode *inode,
- struct address_space *mapping,
- size_t size, loff_t file_offset,
- struct page **pages, int page_count)
-{
- struct ll_dio_pages pvec = { .ldp_pages = pages,
- .ldp_nr = page_count,
- .ldp_size = size,
- .ldp_offsets = NULL,
- .ldp_start_offset = file_offset
- };
-
- return ll_direct_rw_pages(env, io, rw, inode, &pvec);
-}
-
-#ifdef KMALLOC_MAX_SIZE
-#define MAX_MALLOC KMALLOC_MAX_SIZE
-#else
-#define MAX_MALLOC (128 * 1024)
-#endif
-
-/* This is the maximum size of a single O_DIRECT request, based on the
- * kmalloc limit. We need to fit all of the brw_page structs, each one
- * representing PAGE_SIZE worth of user data, into a single buffer, and
- * then truncate this to be a full-sized RPC. For 4kB PAGE_SIZE this is
- * up to 22MB for 128kB kmalloc and up to 682MB for 4MB kmalloc. */
-#define MAX_DIO_SIZE ((MAX_MALLOC / sizeof(struct brw_page) * PAGE_CACHE_SIZE) & \
- ~(DT_MAX_BRW_SIZE - 1))
-static ssize_t ll_direct_IO_26(struct kiocb *iocb, struct iov_iter *iter,
- loff_t file_offset)
-{
- struct lu_env *env;
- struct cl_io *io;
- struct file *file = iocb->ki_filp;
- struct inode *inode = file->f_mapping->host;
- struct ccc_object *obj = cl_inode2ccc(inode);
- ssize_t count = iov_iter_count(iter);
- ssize_t tot_bytes = 0, result = 0;
- struct ll_inode_info *lli = ll_i2info(inode);
- long size = MAX_DIO_SIZE;
- int refcheck;
-
- if (!lli->lli_has_smd)
- return -EBADF;
-
- /* FIXME: io smaller than PAGE_SIZE is broken on ia64 ??? */
- if ((file_offset & ~CFS_PAGE_MASK) || (count & ~CFS_PAGE_MASK))
- return -EINVAL;
-
- CDEBUG(D_VFSTRACE,
- "VFS Op:inode=%lu/%u(%p), size=%zd (max %lu), offset=%lld=%llx, pages %zd (max %lu)\n",
- inode->i_ino, inode->i_generation, inode, count, MAX_DIO_SIZE,
- file_offset, file_offset, count >> PAGE_CACHE_SHIFT,
- MAX_DIO_SIZE >> PAGE_CACHE_SHIFT);
-
- /* Check that all user buffers are aligned as well */
- if (iov_iter_alignment(iter) & ~CFS_PAGE_MASK)
- return -EINVAL;
-
- env = cl_env_get(&refcheck);
- LASSERT(!IS_ERR(env));
- io = ccc_env_io(env)->cui_cl.cis_io;
- LASSERT(io != NULL);
-
- /* 0. Need locking between buffered and direct access. and race with
- * size changing by concurrent truncates and writes.
- * 1. Need inode mutex to operate transient pages.
- */
- if (iov_iter_rw(iter) == READ)
- mutex_lock(&inode->i_mutex);
-
- LASSERT(obj->cob_transient_pages == 0);
- while (iov_iter_count(iter)) {
- struct page **pages;
- size_t offs;
-
- count = min_t(size_t, iov_iter_count(iter), size);
- if (iov_iter_rw(iter) == READ) {
- if (file_offset >= i_size_read(inode))
- break;
- if (file_offset + count > i_size_read(inode))
- count = i_size_read(inode) - file_offset;
- }
-
- result = iov_iter_get_pages_alloc(iter, &pages, count, &offs);
- if (likely(result > 0)) {
- int n = DIV_ROUND_UP(result + offs, PAGE_SIZE);
- result = ll_direct_IO_26_seg(env, io, iov_iter_rw(iter),
- inode, file->f_mapping,
- result, file_offset, pages,
- n);
- ll_free_user_pages(pages, n, iov_iter_rw(iter) == READ);
- }
- if (unlikely(result <= 0)) {
- /* If we can't allocate a large enough buffer
- * for the request, shrink it to a smaller
- * PAGE_SIZE multiple and try again.
- * We should always be able to kmalloc for a
- * page worth of page pointers = 4MB on i386. */
- if (result == -ENOMEM &&
- size > (PAGE_CACHE_SIZE / sizeof(*pages)) *
- PAGE_CACHE_SIZE) {
- size = ((((size / 2) - 1) |
- ~CFS_PAGE_MASK) + 1) &
- CFS_PAGE_MASK;
- CDEBUG(D_VFSTRACE, "DIO size now %lu\n",
- size);
- continue;
- }
-
- goto out;
- }
- iov_iter_advance(iter, result);
- tot_bytes += result;
- file_offset += result;
- }
-out:
- LASSERT(obj->cob_transient_pages == 0);
- if (iov_iter_rw(iter) == READ)
- mutex_unlock(&inode->i_mutex);
-
- if (tot_bytes > 0) {
- if (iov_iter_rw(iter) == WRITE) {
- struct lov_stripe_md *lsm;
-
- lsm = ccc_inode_lsm_get(inode);
- LASSERT(lsm != NULL);
- lov_stripe_lock(lsm);
- obd_adjust_kms(ll_i2dtexp(inode), lsm, file_offset, 0);
- lov_stripe_unlock(lsm);
- ccc_inode_lsm_put(inode, lsm);
- }
- }
-
- cl_env_put(env, &refcheck);
- return tot_bytes ? : result;
-}
-
-static int ll_write_begin(struct file *file, struct address_space *mapping,
- loff_t pos, unsigned len, unsigned flags,
- struct page **pagep, void **fsdata)
-{
- pgoff_t index = pos >> PAGE_CACHE_SHIFT;
- struct page *page;
- int rc;
- unsigned from = pos & (PAGE_CACHE_SIZE - 1);
-
- page = grab_cache_page_write_begin(mapping, index, flags);
- if (!page)
- return -ENOMEM;
-
- *pagep = page;
-
- rc = ll_prepare_write(file, page, from, from + len);
- if (rc) {
- unlock_page(page);
- page_cache_release(page);
- }
- return rc;
-}
-
-static int ll_write_end(struct file *file, struct address_space *mapping,
- loff_t pos, unsigned len, unsigned copied,
- struct page *page, void *fsdata)
-{
- unsigned from = pos & (PAGE_CACHE_SIZE - 1);
- int rc;
-
- rc = ll_commit_write(file, page, from, from + copied);
- unlock_page(page);
- page_cache_release(page);
-
- return rc ?: copied;
-}
-
-#ifdef CONFIG_MIGRATION
-static int ll_migratepage(struct address_space *mapping,
- struct page *newpage, struct page *page,
- enum migrate_mode mode
- )
-{
- /* Always fail page migration until we have a proper implementation */
- return -EIO;
-}
-#endif
-
-const struct address_space_operations ll_aops = {
- .readpage = ll_readpage,
- .direct_IO = ll_direct_IO_26,
- .writepage = ll_writepage,
- .writepages = ll_writepages,
- .set_page_dirty = ll_set_page_dirty,
- .write_begin = ll_write_begin,
- .write_end = ll_write_end,
- .invalidatepage = ll_invalidatepage,
- .releasepage = (void *)ll_releasepage,
-#ifdef CONFIG_MIGRATION
- .migratepage = ll_migratepage,
-#endif
-};
diff --git a/drivers/staging/lustre/lustre/llite/statahead.c b/drivers/staging/lustre/lustre/llite/statahead.c
deleted file mode 100644
index c8602818836d..000000000000
--- a/drivers/staging/lustre/lustre/llite/statahead.c
+++ /dev/null
@@ -1,1708 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- */
-
-#include <linux/fs.h>
-#include <linux/sched.h>
-#include <linux/mm.h>
-#include <linux/highmem.h>
-#include <linux/pagemap.h>
-
-#define DEBUG_SUBSYSTEM S_LLITE
-
-#include "../include/obd_support.h"
-#include "../include/lustre_lite.h"
-#include "../include/lustre_dlm.h"
-#include "llite_internal.h"
-
-#define SA_OMITTED_ENTRY_MAX 8ULL
-
-typedef enum {
- /** negative values are for error cases */
- SA_ENTRY_INIT = 0, /** init entry */
- SA_ENTRY_SUCC = 1, /** stat succeed */
- SA_ENTRY_INVA = 2, /** invalid entry */
- SA_ENTRY_DEST = 3, /** entry to be destroyed */
-} se_stat_t;
-
-struct ll_sa_entry {
- /* link into sai->sai_entries */
- struct list_head se_link;
- /* link into sai->sai_entries_{received,stated} */
- struct list_head se_list;
- /* link into sai hash table locally */
- struct list_head se_hash;
- /* entry reference count */
- atomic_t se_refcount;
- /* entry index in the sai */
- __u64 se_index;
- /* low layer ldlm lock handle */
- __u64 se_handle;
- /* entry status */
- se_stat_t se_stat;
- /* entry size, contains name */
- int se_size;
- /* pointer to async getattr enqueue info */
- struct md_enqueue_info *se_minfo;
- /* pointer to the async getattr request */
- struct ptlrpc_request *se_req;
- /* pointer to the target inode */
- struct inode *se_inode;
- /* entry name */
- struct qstr se_qstr;
-};
-
-static unsigned int sai_generation;
-static DEFINE_SPINLOCK(sai_generation_lock);
-
-static inline int ll_sa_entry_unhashed(struct ll_sa_entry *entry)
-{
- return list_empty(&entry->se_hash);
-}
-
-/*
- * The entry only can be released by the caller, it is necessary to hold lock.
- */
-static inline int ll_sa_entry_stated(struct ll_sa_entry *entry)
-{
- smp_rmb();
- return (entry->se_stat != SA_ENTRY_INIT);
-}
-
-static inline int ll_sa_entry_hash(int val)
-{
- return val & LL_SA_CACHE_MASK;
-}
-
-/*
- * Insert entry to hash SA table.
- */
-static inline void
-ll_sa_entry_enhash(struct ll_statahead_info *sai, struct ll_sa_entry *entry)
-{
- int i = ll_sa_entry_hash(entry->se_qstr.hash);
-
- spin_lock(&sai->sai_cache_lock[i]);
- list_add_tail(&entry->se_hash, &sai->sai_cache[i]);
- spin_unlock(&sai->sai_cache_lock[i]);
-}
-
-/*
- * Remove entry from SA table.
- */
-static inline void
-ll_sa_entry_unhash(struct ll_statahead_info *sai, struct ll_sa_entry *entry)
-{
- int i = ll_sa_entry_hash(entry->se_qstr.hash);
-
- spin_lock(&sai->sai_cache_lock[i]);
- list_del_init(&entry->se_hash);
- spin_unlock(&sai->sai_cache_lock[i]);
-}
-
-static inline int agl_should_run(struct ll_statahead_info *sai,
- struct inode *inode)
-{
- return (inode != NULL && S_ISREG(inode->i_mode) && sai->sai_agl_valid);
-}
-
-static inline struct ll_sa_entry *
-sa_first_received_entry(struct ll_statahead_info *sai)
-{
- return list_entry(sai->sai_entries_received.next,
- struct ll_sa_entry, se_list);
-}
-
-static inline struct ll_inode_info *
-agl_first_entry(struct ll_statahead_info *sai)
-{
- return list_entry(sai->sai_entries_agl.next,
- struct ll_inode_info, lli_agl_list);
-}
-
-static inline int sa_sent_full(struct ll_statahead_info *sai)
-{
- return atomic_read(&sai->sai_cache_count) >= sai->sai_max;
-}
-
-static inline int sa_received_empty(struct ll_statahead_info *sai)
-{
- return list_empty(&sai->sai_entries_received);
-}
-
-static inline int agl_list_empty(struct ll_statahead_info *sai)
-{
- return list_empty(&sai->sai_entries_agl);
-}
-
-/**
- * (1) hit ratio less than 80%
- * or
- * (2) consecutive miss more than 8
- * then means low hit.
- */
-static inline int sa_low_hit(struct ll_statahead_info *sai)
-{
- return ((sai->sai_hit > 7 && sai->sai_hit < 4 * sai->sai_miss) ||
- (sai->sai_consecutive_miss > 8));
-}
-
-/*
- * If the given index is behind of statahead window more than
- * SA_OMITTED_ENTRY_MAX, then it is old.
- */
-static inline int is_omitted_entry(struct ll_statahead_info *sai, __u64 index)
-{
- return ((__u64)sai->sai_max + index + SA_OMITTED_ENTRY_MAX <
- sai->sai_index);
-}
-
-/*
- * Insert it into sai_entries tail when init.
- */
-static struct ll_sa_entry *
-ll_sa_entry_alloc(struct ll_statahead_info *sai, __u64 index,
- const char *name, int len)
-{
- struct ll_inode_info *lli;
- struct ll_sa_entry *entry;
- int entry_size;
- char *dname;
-
- entry_size = sizeof(struct ll_sa_entry) + (len & ~3) + 4;
- entry = kzalloc(entry_size, GFP_NOFS);
- if (unlikely(!entry))
- return ERR_PTR(-ENOMEM);
-
- CDEBUG(D_READA, "alloc sa entry %.*s(%p) index %llu\n",
- len, name, entry, index);
-
- entry->se_index = index;
-
- /*
- * Statahead entry reference rules:
- *
- * 1) When statahead entry is initialized, its reference is set as 2.
- * One reference is used by the directory scanner. When the scanner
- * searches the statahead cache for the given name, it can perform
- * lockless hash lookup (only the scanner can remove entry from hash
- * list), and once found, it needn't to call "atomic_inc()" for the
- * entry reference. So the performance is improved. After using the
- * statahead entry, the scanner will call "atomic_dec()" to drop the
- * reference held when initialization. If it is the last reference,
- * the statahead entry will be freed.
- *
- * 2) All other threads, including statahead thread and ptlrpcd thread,
- * when they process the statahead entry, the reference for target
- * should be held to guarantee the entry will not be released by the
- * directory scanner. After processing the entry, these threads will
- * drop the entry reference. If it is the last reference, the entry
- * will be freed.
- *
- * The second reference when initializes the statahead entry is used
- * by the statahead thread, following the rule 2).
- */
- atomic_set(&entry->se_refcount, 2);
- entry->se_stat = SA_ENTRY_INIT;
- entry->se_size = entry_size;
- dname = (char *)entry + sizeof(struct ll_sa_entry);
- memcpy(dname, name, len);
- dname[len] = 0;
- entry->se_qstr.hash = full_name_hash(name, len);
- entry->se_qstr.len = len;
- entry->se_qstr.name = dname;
-
- lli = ll_i2info(sai->sai_inode);
- spin_lock(&lli->lli_sa_lock);
- list_add_tail(&entry->se_link, &sai->sai_entries);
- INIT_LIST_HEAD(&entry->se_list);
- ll_sa_entry_enhash(sai, entry);
- spin_unlock(&lli->lli_sa_lock);
-
- atomic_inc(&sai->sai_cache_count);
-
- return entry;
-}
-
-/*
- * Used by the directory scanner to search entry with name.
- *
- * Only the caller can remove the entry from hash, so it is unnecessary to hold
- * hash lock. It is caller's duty to release the init refcount on the entry, so
- * it is also unnecessary to increase refcount on the entry.
- */
-static struct ll_sa_entry *
-ll_sa_entry_get_byname(struct ll_statahead_info *sai, const struct qstr *qstr)
-{
- struct ll_sa_entry *entry;
- int i = ll_sa_entry_hash(qstr->hash);
-
- list_for_each_entry(entry, &sai->sai_cache[i], se_hash) {
- if (entry->se_qstr.hash == qstr->hash &&
- entry->se_qstr.len == qstr->len &&
- memcmp(entry->se_qstr.name, qstr->name, qstr->len) == 0)
- return entry;
- }
- return NULL;
-}
-
-/*
- * Used by the async getattr request callback to find entry with index.
- *
- * Inside lli_sa_lock to prevent others to change the list during the search.
- * It needs to increase entry refcount before returning to guarantee that the
- * entry cannot be freed by others.
- */
-static struct ll_sa_entry *
-ll_sa_entry_get_byindex(struct ll_statahead_info *sai, __u64 index)
-{
- struct ll_sa_entry *entry;
-
- list_for_each_entry(entry, &sai->sai_entries, se_link) {
- if (entry->se_index == index) {
- LASSERT(atomic_read(&entry->se_refcount) > 0);
- atomic_inc(&entry->se_refcount);
- return entry;
- }
- if (entry->se_index > index)
- break;
- }
- return NULL;
-}
-
-static void ll_sa_entry_cleanup(struct ll_statahead_info *sai,
- struct ll_sa_entry *entry)
-{
- struct md_enqueue_info *minfo = entry->se_minfo;
- struct ptlrpc_request *req = entry->se_req;
-
- if (minfo) {
- entry->se_minfo = NULL;
- ll_intent_release(&minfo->mi_it);
- iput(minfo->mi_dir);
- kfree(minfo);
- }
-
- if (req) {
- entry->se_req = NULL;
- ptlrpc_req_finished(req);
- }
-}
-
-static void ll_sa_entry_put(struct ll_statahead_info *sai,
- struct ll_sa_entry *entry)
-{
- if (atomic_dec_and_test(&entry->se_refcount)) {
- CDEBUG(D_READA, "free sa entry %.*s(%p) index %llu\n",
- entry->se_qstr.len, entry->se_qstr.name, entry,
- entry->se_index);
-
- LASSERT(list_empty(&entry->se_link));
- LASSERT(list_empty(&entry->se_list));
- LASSERT(ll_sa_entry_unhashed(entry));
-
- ll_sa_entry_cleanup(sai, entry);
- iput(entry->se_inode);
-
- kfree(entry);
- atomic_dec(&sai->sai_cache_count);
- }
-}
-
-static inline void
-do_sa_entry_fini(struct ll_statahead_info *sai, struct ll_sa_entry *entry)
-{
- struct ll_inode_info *lli = ll_i2info(sai->sai_inode);
-
- LASSERT(!ll_sa_entry_unhashed(entry));
- LASSERT(!list_empty(&entry->se_link));
-
- ll_sa_entry_unhash(sai, entry);
-
- spin_lock(&lli->lli_sa_lock);
- entry->se_stat = SA_ENTRY_DEST;
- list_del_init(&entry->se_link);
- if (likely(!list_empty(&entry->se_list)))
- list_del_init(&entry->se_list);
- spin_unlock(&lli->lli_sa_lock);
-
- ll_sa_entry_put(sai, entry);
-}
-
-/*
- * Delete it from sai_entries_stated list when fini.
- */
-static void
-ll_sa_entry_fini(struct ll_statahead_info *sai, struct ll_sa_entry *entry)
-{
- struct ll_sa_entry *pos, *next;
-
- if (entry)
- do_sa_entry_fini(sai, entry);
-
- /* drop old entry, only 'scanner' process does this, no need to lock */
- list_for_each_entry_safe(pos, next, &sai->sai_entries, se_link) {
- if (!is_omitted_entry(sai, pos->se_index))
- break;
- do_sa_entry_fini(sai, pos);
- }
-}
-
-/*
- * Inside lli_sa_lock.
- */
-static void
-do_sa_entry_to_stated(struct ll_statahead_info *sai,
- struct ll_sa_entry *entry, se_stat_t stat)
-{
- struct ll_sa_entry *se;
- struct list_head *pos = &sai->sai_entries_stated;
-
- if (!list_empty(&entry->se_list))
- list_del_init(&entry->se_list);
-
- list_for_each_entry_reverse(se, &sai->sai_entries_stated, se_list) {
- if (se->se_index < entry->se_index) {
- pos = &se->se_list;
- break;
- }
- }
-
- list_add(&entry->se_list, pos);
- entry->se_stat = stat;
-}
-
-/*
- * Move entry to sai_entries_stated and sort with the index.
- * \retval 1 -- entry to be destroyed.
- * \retval 0 -- entry is inserted into stated list.
- */
-static int
-ll_sa_entry_to_stated(struct ll_statahead_info *sai,
- struct ll_sa_entry *entry, se_stat_t stat)
-{
- struct ll_inode_info *lli = ll_i2info(sai->sai_inode);
- int ret = 1;
-
- ll_sa_entry_cleanup(sai, entry);
-
- spin_lock(&lli->lli_sa_lock);
- if (likely(entry->se_stat != SA_ENTRY_DEST)) {
- do_sa_entry_to_stated(sai, entry, stat);
- ret = 0;
- }
- spin_unlock(&lli->lli_sa_lock);
-
- return ret;
-}
-
-/*
- * Insert inode into the list of sai_entries_agl.
- */
-static void ll_agl_add(struct ll_statahead_info *sai,
- struct inode *inode, int index)
-{
- struct ll_inode_info *child = ll_i2info(inode);
- struct ll_inode_info *parent = ll_i2info(sai->sai_inode);
- int added = 0;
-
- spin_lock(&child->lli_agl_lock);
- if (child->lli_agl_index == 0) {
- child->lli_agl_index = index;
- spin_unlock(&child->lli_agl_lock);
-
- LASSERT(list_empty(&child->lli_agl_list));
-
- igrab(inode);
- spin_lock(&parent->lli_agl_lock);
- if (agl_list_empty(sai))
- added = 1;
- list_add_tail(&child->lli_agl_list, &sai->sai_entries_agl);
- spin_unlock(&parent->lli_agl_lock);
- } else {
- spin_unlock(&child->lli_agl_lock);
- }
-
- if (added > 0)
- wake_up(&sai->sai_agl_thread.t_ctl_waitq);
-}
-
-static struct ll_statahead_info *ll_sai_alloc(void)
-{
- struct ll_statahead_info *sai;
- int i;
-
- sai = kzalloc(sizeof(*sai), GFP_NOFS);
- if (!sai)
- return NULL;
-
- atomic_set(&sai->sai_refcount, 1);
-
- spin_lock(&sai_generation_lock);
- sai->sai_generation = ++sai_generation;
- if (unlikely(sai_generation == 0))
- sai->sai_generation = ++sai_generation;
- spin_unlock(&sai_generation_lock);
-
- sai->sai_max = LL_SA_RPC_MIN;
- sai->sai_index = 1;
- init_waitqueue_head(&sai->sai_waitq);
- init_waitqueue_head(&sai->sai_thread.t_ctl_waitq);
- init_waitqueue_head(&sai->sai_agl_thread.t_ctl_waitq);
-
- INIT_LIST_HEAD(&sai->sai_entries);
- INIT_LIST_HEAD(&sai->sai_entries_received);
- INIT_LIST_HEAD(&sai->sai_entries_stated);
- INIT_LIST_HEAD(&sai->sai_entries_agl);
-
- for (i = 0; i < LL_SA_CACHE_SIZE; i++) {
- INIT_LIST_HEAD(&sai->sai_cache[i]);
- spin_lock_init(&sai->sai_cache_lock[i]);
- }
- atomic_set(&sai->sai_cache_count, 0);
-
- return sai;
-}
-
-static inline struct ll_statahead_info *
-ll_sai_get(struct ll_statahead_info *sai)
-{
- atomic_inc(&sai->sai_refcount);
- return sai;
-}
-
-static void ll_sai_put(struct ll_statahead_info *sai)
-{
- struct inode *inode = sai->sai_inode;
- struct ll_inode_info *lli = ll_i2info(inode);
-
- if (atomic_dec_and_lock(&sai->sai_refcount, &lli->lli_sa_lock)) {
- struct ll_sa_entry *entry, *next;
-
- if (unlikely(atomic_read(&sai->sai_refcount) > 0)) {
- /* It is race case, the interpret callback just hold
- * a reference count */
- spin_unlock(&lli->lli_sa_lock);
- return;
- }
-
- LASSERT(lli->lli_opendir_key == NULL);
- LASSERT(thread_is_stopped(&sai->sai_thread));
- LASSERT(thread_is_stopped(&sai->sai_agl_thread));
-
- lli->lli_sai = NULL;
- lli->lli_opendir_pid = 0;
- spin_unlock(&lli->lli_sa_lock);
-
- if (sai->sai_sent > sai->sai_replied)
- CDEBUG(D_READA, "statahead for dir "DFID
- " does not finish: [sent:%llu] [replied:%llu]\n",
- PFID(&lli->lli_fid),
- sai->sai_sent, sai->sai_replied);
-
- list_for_each_entry_safe(entry, next,
- &sai->sai_entries, se_link)
- do_sa_entry_fini(sai, entry);
-
- LASSERT(list_empty(&sai->sai_entries));
- LASSERT(sa_received_empty(sai));
- LASSERT(list_empty(&sai->sai_entries_stated));
-
- LASSERT(atomic_read(&sai->sai_cache_count) == 0);
- LASSERT(agl_list_empty(sai));
-
- iput(inode);
- kfree(sai);
- }
-}
-
-/* Do NOT forget to drop inode refcount when into sai_entries_agl. */
-static void ll_agl_trigger(struct inode *inode, struct ll_statahead_info *sai)
-{
- struct ll_inode_info *lli = ll_i2info(inode);
- __u64 index = lli->lli_agl_index;
- int rc;
-
- LASSERT(list_empty(&lli->lli_agl_list));
-
- /* AGL maybe fall behind statahead with one entry */
- if (is_omitted_entry(sai, index + 1)) {
- lli->lli_agl_index = 0;
- iput(inode);
- return;
- }
-
- /* Someone is in glimpse (sync or async), do nothing. */
- rc = down_write_trylock(&lli->lli_glimpse_sem);
- if (rc == 0) {
- lli->lli_agl_index = 0;
- iput(inode);
- return;
- }
-
- /*
- * Someone triggered glimpse within 1 sec before.
- * 1) The former glimpse succeeded with glimpse lock granted by OST, and
- * if the lock is still cached on client, AGL needs to do nothing. If
- * it is cancelled by other client, AGL maybe cannot obtain new lock
- * for no glimpse callback triggered by AGL.
- * 2) The former glimpse succeeded, but OST did not grant glimpse lock.
- * Under such case, it is quite possible that the OST will not grant
- * glimpse lock for AGL also.
- * 3) The former glimpse failed, compared with other two cases, it is
- * relative rare. AGL can ignore such case, and it will not muchly
- * affect the performance.
- */
- if (lli->lli_glimpse_time != 0 &&
- time_before(cfs_time_shift(-1), lli->lli_glimpse_time)) {
- up_write(&lli->lli_glimpse_sem);
- lli->lli_agl_index = 0;
- iput(inode);
- return;
- }
-
- CDEBUG(D_READA, "Handling (init) async glimpse: inode = "
- DFID", idx = %llu\n", PFID(&lli->lli_fid), index);
-
- cl_agl(inode);
- lli->lli_agl_index = 0;
- lli->lli_glimpse_time = cfs_time_current();
- up_write(&lli->lli_glimpse_sem);
-
- CDEBUG(D_READA, "Handled (init) async glimpse: inode= "
- DFID", idx = %llu, rc = %d\n",
- PFID(&lli->lli_fid), index, rc);
-
- iput(inode);
-}
-
-static void ll_post_statahead(struct ll_statahead_info *sai)
-{
- struct inode *dir = sai->sai_inode;
- struct inode *child;
- struct ll_inode_info *lli = ll_i2info(dir);
- struct ll_sa_entry *entry;
- struct md_enqueue_info *minfo;
- struct lookup_intent *it;
- struct ptlrpc_request *req;
- struct mdt_body *body;
- int rc = 0;
-
- spin_lock(&lli->lli_sa_lock);
- if (unlikely(sa_received_empty(sai))) {
- spin_unlock(&lli->lli_sa_lock);
- return;
- }
- entry = sa_first_received_entry(sai);
- atomic_inc(&entry->se_refcount);
- list_del_init(&entry->se_list);
- spin_unlock(&lli->lli_sa_lock);
-
- LASSERT(entry->se_handle != 0);
-
- minfo = entry->se_minfo;
- it = &minfo->mi_it;
- req = entry->se_req;
- body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
- if (body == NULL) {
- rc = -EFAULT;
- goto out;
- }
-
- child = entry->se_inode;
- if (child == NULL) {
- /*
- * lookup.
- */
- LASSERT(fid_is_zero(&minfo->mi_data.op_fid2));
-
- /* XXX: No fid in reply, this is probably cross-ref case.
- * SA can't handle it yet. */
- if (body->valid & OBD_MD_MDS) {
- rc = -EAGAIN;
- goto out;
- }
- } else {
- /*
- * revalidate.
- */
- /* unlinked and re-created with the same name */
- if (unlikely(!lu_fid_eq(&minfo->mi_data.op_fid2, &body->fid1))) {
- entry->se_inode = NULL;
- iput(child);
- child = NULL;
- }
- }
-
- it->d.lustre.it_lock_handle = entry->se_handle;
- rc = md_revalidate_lock(ll_i2mdexp(dir), it, ll_inode2fid(dir), NULL);
- if (rc != 1) {
- rc = -EAGAIN;
- goto out;
- }
-
- rc = ll_prep_inode(&child, req, dir->i_sb, it);
- if (rc)
- goto out;
-
- CDEBUG(D_DLMTRACE, "setting l_data to inode %p (%lu/%u)\n",
- child, child->i_ino, child->i_generation);
- ll_set_lock_data(ll_i2sbi(dir)->ll_md_exp, child, it, NULL);
-
- entry->se_inode = child;
-
- if (agl_should_run(sai, child))
- ll_agl_add(sai, child, entry->se_index);
-
-out:
- /* The "ll_sa_entry_to_stated()" will drop related ldlm ibits lock
- * reference count by calling "ll_intent_drop_lock()" in spite of the
- * above operations failed or not. Do not worry about calling
- * "ll_intent_drop_lock()" more than once. */
- rc = ll_sa_entry_to_stated(sai, entry,
- rc < 0 ? SA_ENTRY_INVA : SA_ENTRY_SUCC);
- if (rc == 0 && entry->se_index == sai->sai_index_wait)
- wake_up(&sai->sai_waitq);
- ll_sa_entry_put(sai, entry);
-}
-
-static int ll_statahead_interpret(struct ptlrpc_request *req,
- struct md_enqueue_info *minfo, int rc)
-{
- struct lookup_intent *it = &minfo->mi_it;
- struct inode *dir = minfo->mi_dir;
- struct ll_inode_info *lli = ll_i2info(dir);
- struct ll_statahead_info *sai = NULL;
- struct ll_sa_entry *entry;
- __u64 handle = 0;
- int wakeup;
-
- if (it_disposition(it, DISP_LOOKUP_NEG))
- rc = -ENOENT;
-
- if (rc == 0) {
- /* release ibits lock ASAP to avoid deadlock when statahead
- * thread enqueues lock on parent in readdir and another
- * process enqueues lock on child with parent lock held, eg.
- * unlink. */
- handle = it->d.lustre.it_lock_handle;
- ll_intent_drop_lock(it);
- }
-
- spin_lock(&lli->lli_sa_lock);
- /* stale entry */
- if (unlikely(lli->lli_sai == NULL ||
- lli->lli_sai->sai_generation != minfo->mi_generation)) {
- spin_unlock(&lli->lli_sa_lock);
- rc = -ESTALE;
- goto out;
- } else {
- sai = ll_sai_get(lli->lli_sai);
- if (unlikely(!thread_is_running(&sai->sai_thread))) {
- sai->sai_replied++;
- spin_unlock(&lli->lli_sa_lock);
- rc = -EBADFD;
- goto out;
- }
-
- entry = ll_sa_entry_get_byindex(sai, minfo->mi_cbdata);
- if (entry == NULL) {
- sai->sai_replied++;
- spin_unlock(&lli->lli_sa_lock);
- rc = -EIDRM;
- goto out;
- }
-
- if (rc != 0) {
- do_sa_entry_to_stated(sai, entry, SA_ENTRY_INVA);
- wakeup = (entry->se_index == sai->sai_index_wait);
- } else {
- entry->se_minfo = minfo;
- entry->se_req = ptlrpc_request_addref(req);
- /* Release the async ibits lock ASAP to avoid deadlock
- * when statahead thread tries to enqueue lock on parent
- * for readpage and other tries to enqueue lock on child
- * with parent's lock held, for example: unlink. */
- entry->se_handle = handle;
- wakeup = sa_received_empty(sai);
- list_add_tail(&entry->se_list,
- &sai->sai_entries_received);
- }
- sai->sai_replied++;
- spin_unlock(&lli->lli_sa_lock);
-
- ll_sa_entry_put(sai, entry);
- if (wakeup)
- wake_up(&sai->sai_thread.t_ctl_waitq);
- }
-
-out:
- if (rc != 0) {
- ll_intent_release(it);
- iput(dir);
- kfree(minfo);
- }
- if (sai != NULL)
- ll_sai_put(sai);
- return rc;
-}
-
-static void sa_args_fini(struct md_enqueue_info *minfo,
- struct ldlm_enqueue_info *einfo)
-{
- LASSERT(minfo && einfo);
- iput(minfo->mi_dir);
- kfree(minfo);
- kfree(einfo);
-}
-
-/**
- * prepare arguments for async stat RPC.
- */
-static int sa_args_init(struct inode *dir, struct inode *child,
- struct ll_sa_entry *entry, struct md_enqueue_info **pmi,
- struct ldlm_enqueue_info **pei)
-{
- struct qstr *qstr = &entry->se_qstr;
- struct ll_inode_info *lli = ll_i2info(dir);
- struct md_enqueue_info *minfo;
- struct ldlm_enqueue_info *einfo;
- struct md_op_data *op_data;
-
- einfo = kzalloc(sizeof(*einfo), GFP_NOFS);
- if (!einfo)
- return -ENOMEM;
-
- minfo = kzalloc(sizeof(*minfo), GFP_NOFS);
- if (!minfo) {
- kfree(einfo);
- return -ENOMEM;
- }
-
- op_data = ll_prep_md_op_data(&minfo->mi_data, dir, child, qstr->name,
- qstr->len, 0, LUSTRE_OPC_ANY, NULL);
- if (IS_ERR(op_data)) {
- kfree(einfo);
- kfree(minfo);
- return PTR_ERR(op_data);
- }
-
- minfo->mi_it.it_op = IT_GETATTR;
- minfo->mi_dir = igrab(dir);
- minfo->mi_cb = ll_statahead_interpret;
- minfo->mi_generation = lli->lli_sai->sai_generation;
- minfo->mi_cbdata = entry->se_index;
-
- einfo->ei_type = LDLM_IBITS;
- einfo->ei_mode = it_to_lock_mode(&minfo->mi_it);
- einfo->ei_cb_bl = ll_md_blocking_ast;
- einfo->ei_cb_cp = ldlm_completion_ast;
- einfo->ei_cb_gl = NULL;
- einfo->ei_cbdata = NULL;
-
- *pmi = minfo;
- *pei = einfo;
-
- return 0;
-}
-
-static int do_sa_lookup(struct inode *dir, struct ll_sa_entry *entry)
-{
- struct md_enqueue_info *minfo;
- struct ldlm_enqueue_info *einfo;
- int rc;
-
- rc = sa_args_init(dir, NULL, entry, &minfo, &einfo);
- if (rc)
- return rc;
-
- rc = md_intent_getattr_async(ll_i2mdexp(dir), minfo, einfo);
- if (rc < 0)
- sa_args_fini(minfo, einfo);
-
- return rc;
-}
-
-/**
- * similar to ll_revalidate_it().
- * \retval 1 -- dentry valid
- * \retval 0 -- will send stat-ahead request
- * \retval others -- prepare stat-ahead request failed
- */
-static int do_sa_revalidate(struct inode *dir, struct ll_sa_entry *entry,
- struct dentry *dentry)
-{
- struct inode *inode = d_inode(dentry);
- struct lookup_intent it = { .it_op = IT_GETATTR,
- .d.lustre.it_lock_handle = 0 };
- struct md_enqueue_info *minfo;
- struct ldlm_enqueue_info *einfo;
- int rc;
-
- if (unlikely(inode == NULL))
- return 1;
-
- if (d_mountpoint(dentry))
- return 1;
-
- entry->se_inode = igrab(inode);
- rc = md_revalidate_lock(ll_i2mdexp(dir), &it, ll_inode2fid(inode),
- NULL);
- if (rc == 1) {
- entry->se_handle = it.d.lustre.it_lock_handle;
- ll_intent_release(&it);
- return 1;
- }
-
- rc = sa_args_init(dir, inode, entry, &minfo, &einfo);
- if (rc) {
- entry->se_inode = NULL;
- iput(inode);
- return rc;
- }
-
- rc = md_intent_getattr_async(ll_i2mdexp(dir), minfo, einfo);
- if (rc < 0) {
- entry->se_inode = NULL;
- iput(inode);
- sa_args_fini(minfo, einfo);
- }
-
- return rc;
-}
-
-static void ll_statahead_one(struct dentry *parent, const char *entry_name,
- int entry_name_len)
-{
- struct inode *dir = d_inode(parent);
- struct ll_inode_info *lli = ll_i2info(dir);
- struct ll_statahead_info *sai = lli->lli_sai;
- struct dentry *dentry = NULL;
- struct ll_sa_entry *entry;
- int rc;
- int rc1;
-
- entry = ll_sa_entry_alloc(sai, sai->sai_index, entry_name,
- entry_name_len);
- if (IS_ERR(entry))
- return;
-
- dentry = d_lookup(parent, &entry->se_qstr);
- if (!dentry) {
- rc = do_sa_lookup(dir, entry);
- } else {
- rc = do_sa_revalidate(dir, entry, dentry);
- if (rc == 1 && agl_should_run(sai, d_inode(dentry)))
- ll_agl_add(sai, d_inode(dentry), entry->se_index);
- }
-
- if (dentry != NULL)
- dput(dentry);
-
- if (rc) {
- rc1 = ll_sa_entry_to_stated(sai, entry,
- rc < 0 ? SA_ENTRY_INVA : SA_ENTRY_SUCC);
- if (rc1 == 0 && entry->se_index == sai->sai_index_wait)
- wake_up(&sai->sai_waitq);
- } else {
- sai->sai_sent++;
- }
-
- sai->sai_index++;
- /* drop one refcount on entry by ll_sa_entry_alloc */
- ll_sa_entry_put(sai, entry);
-}
-
-static int ll_agl_thread(void *arg)
-{
- struct dentry *parent = (struct dentry *)arg;
- struct inode *dir = d_inode(parent);
- struct ll_inode_info *plli = ll_i2info(dir);
- struct ll_inode_info *clli;
- struct ll_sb_info *sbi = ll_i2sbi(dir);
- struct ll_statahead_info *sai = ll_sai_get(plli->lli_sai);
- struct ptlrpc_thread *thread = &sai->sai_agl_thread;
- struct l_wait_info lwi = { 0 };
-
- thread->t_pid = current_pid();
- CDEBUG(D_READA, "agl thread started: sai %p, parent %pd\n",
- sai, parent);
-
- atomic_inc(&sbi->ll_agl_total);
- spin_lock(&plli->lli_agl_lock);
- sai->sai_agl_valid = 1;
- if (thread_is_init(thread))
- /* If someone else has changed the thread state
- * (e.g. already changed to SVC_STOPPING), we can't just
- * blindly overwrite that setting. */
- thread_set_flags(thread, SVC_RUNNING);
- spin_unlock(&plli->lli_agl_lock);
- wake_up(&thread->t_ctl_waitq);
-
- while (1) {
- l_wait_event(thread->t_ctl_waitq,
- !agl_list_empty(sai) ||
- !thread_is_running(thread),
- &lwi);
-
- if (!thread_is_running(thread))
- break;
-
- spin_lock(&plli->lli_agl_lock);
- /* The statahead thread maybe help to process AGL entries,
- * so check whether list empty again. */
- if (!agl_list_empty(sai)) {
- clli = agl_first_entry(sai);
- list_del_init(&clli->lli_agl_list);
- spin_unlock(&plli->lli_agl_lock);
- ll_agl_trigger(&clli->lli_vfs_inode, sai);
- } else {
- spin_unlock(&plli->lli_agl_lock);
- }
- }
-
- spin_lock(&plli->lli_agl_lock);
- sai->sai_agl_valid = 0;
- while (!agl_list_empty(sai)) {
- clli = agl_first_entry(sai);
- list_del_init(&clli->lli_agl_list);
- spin_unlock(&plli->lli_agl_lock);
- clli->lli_agl_index = 0;
- iput(&clli->lli_vfs_inode);
- spin_lock(&plli->lli_agl_lock);
- }
- thread_set_flags(thread, SVC_STOPPED);
- spin_unlock(&plli->lli_agl_lock);
- wake_up(&thread->t_ctl_waitq);
- ll_sai_put(sai);
- CDEBUG(D_READA, "agl thread stopped: sai %p, parent %pd\n",
- sai, parent);
- return 0;
-}
-
-static void ll_start_agl(struct dentry *parent, struct ll_statahead_info *sai)
-{
- struct ptlrpc_thread *thread = &sai->sai_agl_thread;
- struct l_wait_info lwi = { 0 };
- struct ll_inode_info *plli;
- struct task_struct *task;
-
- CDEBUG(D_READA, "start agl thread: sai %p, parent %pd\n",
- sai, parent);
-
- plli = ll_i2info(d_inode(parent));
- task = kthread_run(ll_agl_thread, parent,
- "ll_agl_%u", plli->lli_opendir_pid);
- if (IS_ERR(task)) {
- CERROR("can't start ll_agl thread, rc: %ld\n", PTR_ERR(task));
- thread_set_flags(thread, SVC_STOPPED);
- return;
- }
-
- l_wait_event(thread->t_ctl_waitq,
- thread_is_running(thread) || thread_is_stopped(thread),
- &lwi);
-}
-
-static int ll_statahead_thread(void *arg)
-{
- struct dentry *parent = (struct dentry *)arg;
- struct inode *dir = d_inode(parent);
- struct ll_inode_info *plli = ll_i2info(dir);
- struct ll_inode_info *clli;
- struct ll_sb_info *sbi = ll_i2sbi(dir);
- struct ll_statahead_info *sai = ll_sai_get(plli->lli_sai);
- struct ptlrpc_thread *thread = &sai->sai_thread;
- struct ptlrpc_thread *agl_thread = &sai->sai_agl_thread;
- struct page *page;
- __u64 pos = 0;
- int first = 0;
- int rc = 0;
- struct ll_dir_chain chain;
- struct l_wait_info lwi = { 0 };
-
- thread->t_pid = current_pid();
- CDEBUG(D_READA, "statahead thread starting: sai %p, parent %pd\n",
- sai, parent);
-
- if (sbi->ll_flags & LL_SBI_AGL_ENABLED)
- ll_start_agl(parent, sai);
-
- atomic_inc(&sbi->ll_sa_total);
- spin_lock(&plli->lli_sa_lock);
- if (thread_is_init(thread))
- /* If someone else has changed the thread state
- * (e.g. already changed to SVC_STOPPING), we can't just
- * blindly overwrite that setting. */
- thread_set_flags(thread, SVC_RUNNING);
- spin_unlock(&plli->lli_sa_lock);
- wake_up(&thread->t_ctl_waitq);
-
- ll_dir_chain_init(&chain);
- page = ll_get_dir_page(dir, pos, &chain);
-
- while (1) {
- struct lu_dirpage *dp;
- struct lu_dirent *ent;
-
- if (IS_ERR(page)) {
- rc = PTR_ERR(page);
- CDEBUG(D_READA, "error reading dir "DFID" at %llu/%llu: [rc %d] [parent %u]\n",
- PFID(ll_inode2fid(dir)), pos, sai->sai_index,
- rc, plli->lli_opendir_pid);
- goto out;
- }
-
- dp = page_address(page);
- for (ent = lu_dirent_start(dp); ent != NULL;
- ent = lu_dirent_next(ent)) {
- __u64 hash;
- int namelen;
- char *name;
-
- hash = le64_to_cpu(ent->lde_hash);
- if (unlikely(hash < pos))
- /*
- * Skip until we find target hash value.
- */
- continue;
-
- namelen = le16_to_cpu(ent->lde_namelen);
- if (unlikely(namelen == 0))
- /*
- * Skip dummy record.
- */
- continue;
-
- name = ent->lde_name;
- if (name[0] == '.') {
- if (namelen == 1) {
- /*
- * skip "."
- */
- continue;
- } else if (name[1] == '.' && namelen == 2) {
- /*
- * skip ".."
- */
- continue;
- } else if (!sai->sai_ls_all) {
- /*
- * skip hidden files.
- */
- sai->sai_skip_hidden++;
- continue;
- }
- }
-
- /*
- * don't stat-ahead first entry.
- */
- if (unlikely(++first == 1))
- continue;
-
-keep_it:
- l_wait_event(thread->t_ctl_waitq,
- !sa_sent_full(sai) ||
- !sa_received_empty(sai) ||
- !agl_list_empty(sai) ||
- !thread_is_running(thread),
- &lwi);
-
-interpret_it:
- while (!sa_received_empty(sai))
- ll_post_statahead(sai);
-
- if (unlikely(!thread_is_running(thread))) {
- ll_release_page(page, 0);
- rc = 0;
- goto out;
- }
-
- /* If no window for metadata statahead, but there are
- * some AGL entries to be triggered, then try to help
- * to process the AGL entries. */
- if (sa_sent_full(sai)) {
- spin_lock(&plli->lli_agl_lock);
- while (!agl_list_empty(sai)) {
- clli = agl_first_entry(sai);
- list_del_init(&clli->lli_agl_list);
- spin_unlock(&plli->lli_agl_lock);
- ll_agl_trigger(&clli->lli_vfs_inode,
- sai);
-
- if (!sa_received_empty(sai))
- goto interpret_it;
-
- if (unlikely(
- !thread_is_running(thread))) {
- ll_release_page(page, 0);
- rc = 0;
- goto out;
- }
-
- if (!sa_sent_full(sai))
- goto do_it;
-
- spin_lock(&plli->lli_agl_lock);
- }
- spin_unlock(&plli->lli_agl_lock);
-
- goto keep_it;
- }
-
-do_it:
- ll_statahead_one(parent, name, namelen);
- }
- pos = le64_to_cpu(dp->ldp_hash_end);
- if (pos == MDS_DIR_END_OFF) {
- /*
- * End of directory reached.
- */
- ll_release_page(page, 0);
- while (1) {
- l_wait_event(thread->t_ctl_waitq,
- !sa_received_empty(sai) ||
- sai->sai_sent == sai->sai_replied ||
- !thread_is_running(thread),
- &lwi);
-
- while (!sa_received_empty(sai))
- ll_post_statahead(sai);
-
- if (unlikely(!thread_is_running(thread))) {
- rc = 0;
- goto out;
- }
-
- if (sai->sai_sent == sai->sai_replied &&
- sa_received_empty(sai))
- break;
- }
-
- spin_lock(&plli->lli_agl_lock);
- while (!agl_list_empty(sai) &&
- thread_is_running(thread)) {
- clli = agl_first_entry(sai);
- list_del_init(&clli->lli_agl_list);
- spin_unlock(&plli->lli_agl_lock);
- ll_agl_trigger(&clli->lli_vfs_inode, sai);
- spin_lock(&plli->lli_agl_lock);
- }
- spin_unlock(&plli->lli_agl_lock);
-
- rc = 0;
- goto out;
- } else if (1) {
- /*
- * chain is exhausted.
- * Normal case: continue to the next page.
- */
- ll_release_page(page, le32_to_cpu(dp->ldp_flags) &
- LDF_COLLIDE);
- page = ll_get_dir_page(dir, pos, &chain);
- } else {
- LASSERT(le32_to_cpu(dp->ldp_flags) & LDF_COLLIDE);
- ll_release_page(page, 1);
- /*
- * go into overflow page.
- */
- }
- }
-
-out:
- if (sai->sai_agl_valid) {
- spin_lock(&plli->lli_agl_lock);
- thread_set_flags(agl_thread, SVC_STOPPING);
- spin_unlock(&plli->lli_agl_lock);
- wake_up(&agl_thread->t_ctl_waitq);
-
- CDEBUG(D_READA, "stop agl thread: sai %p pid %u\n",
- sai, (unsigned int)agl_thread->t_pid);
- l_wait_event(agl_thread->t_ctl_waitq,
- thread_is_stopped(agl_thread),
- &lwi);
- } else {
- /* Set agl_thread flags anyway. */
- thread_set_flags(&sai->sai_agl_thread, SVC_STOPPED);
- }
- ll_dir_chain_fini(&chain);
- spin_lock(&plli->lli_sa_lock);
- if (!sa_received_empty(sai)) {
- thread_set_flags(thread, SVC_STOPPING);
- spin_unlock(&plli->lli_sa_lock);
-
- /* To release the resources held by received entries. */
- while (!sa_received_empty(sai))
- ll_post_statahead(sai);
-
- spin_lock(&plli->lli_sa_lock);
- }
- thread_set_flags(thread, SVC_STOPPED);
- spin_unlock(&plli->lli_sa_lock);
- wake_up(&sai->sai_waitq);
- wake_up(&thread->t_ctl_waitq);
- ll_sai_put(sai);
- dput(parent);
- CDEBUG(D_READA, "statahead thread stopped: sai %p, parent %pd\n",
- sai, parent);
- return rc;
-}
-
-/**
- * called in ll_file_release().
- */
-void ll_stop_statahead(struct inode *dir, void *key)
-{
- struct ll_inode_info *lli = ll_i2info(dir);
-
- if (unlikely(key == NULL))
- return;
-
- spin_lock(&lli->lli_sa_lock);
- if (lli->lli_opendir_key != key || lli->lli_opendir_pid == 0) {
- spin_unlock(&lli->lli_sa_lock);
- return;
- }
-
- lli->lli_opendir_key = NULL;
-
- if (lli->lli_sai) {
- struct l_wait_info lwi = { 0 };
- struct ptlrpc_thread *thread = &lli->lli_sai->sai_thread;
-
- if (!thread_is_stopped(thread)) {
- thread_set_flags(thread, SVC_STOPPING);
- spin_unlock(&lli->lli_sa_lock);
- wake_up(&thread->t_ctl_waitq);
-
- CDEBUG(D_READA, "stop statahead thread: sai %p pid %u\n",
- lli->lli_sai, (unsigned int)thread->t_pid);
- l_wait_event(thread->t_ctl_waitq,
- thread_is_stopped(thread),
- &lwi);
- } else {
- spin_unlock(&lli->lli_sa_lock);
- }
-
- /*
- * Put the ref which was held when first statahead_enter.
- * It maybe not the last ref for some statahead requests
- * maybe inflight.
- */
- ll_sai_put(lli->lli_sai);
- } else {
- lli->lli_opendir_pid = 0;
- spin_unlock(&lli->lli_sa_lock);
- }
-}
-
-enum {
- /**
- * not first dirent, or is "."
- */
- LS_NONE_FIRST_DE = 0,
- /**
- * the first non-hidden dirent
- */
- LS_FIRST_DE,
- /**
- * the first hidden dirent, that is "."
- */
- LS_FIRST_DOT_DE
-};
-
-static int is_first_dirent(struct inode *dir, struct dentry *dentry)
-{
- struct ll_dir_chain chain;
- struct qstr *target = &dentry->d_name;
- struct page *page;
- __u64 pos = 0;
- int dot_de;
- int rc = LS_NONE_FIRST_DE;
-
- ll_dir_chain_init(&chain);
- page = ll_get_dir_page(dir, pos, &chain);
-
- while (1) {
- struct lu_dirpage *dp;
- struct lu_dirent *ent;
-
- if (IS_ERR(page)) {
- struct ll_inode_info *lli = ll_i2info(dir);
-
- rc = PTR_ERR(page);
- CERROR("error reading dir "DFID" at %llu: [rc %d] [parent %u]\n",
- PFID(ll_inode2fid(dir)), pos,
- rc, lli->lli_opendir_pid);
- break;
- }
-
- dp = page_address(page);
- for (ent = lu_dirent_start(dp); ent != NULL;
- ent = lu_dirent_next(ent)) {
- __u64 hash;
- int namelen;
- char *name;
-
- hash = le64_to_cpu(ent->lde_hash);
- /* The ll_get_dir_page() can return any page containing
- * the given hash which may be not the start hash. */
- if (unlikely(hash < pos))
- continue;
-
- namelen = le16_to_cpu(ent->lde_namelen);
- if (unlikely(namelen == 0))
- /*
- * skip dummy record.
- */
- continue;
-
- name = ent->lde_name;
- if (name[0] == '.') {
- if (namelen == 1)
- /*
- * skip "."
- */
- continue;
- else if (name[1] == '.' && namelen == 2)
- /*
- * skip ".."
- */
- continue;
- else
- dot_de = 1;
- } else {
- dot_de = 0;
- }
-
- if (dot_de && target->name[0] != '.') {
- CDEBUG(D_READA, "%.*s skip hidden file %.*s\n",
- target->len, target->name,
- namelen, name);
- continue;
- }
-
- if (target->len != namelen ||
- memcmp(target->name, name, namelen) != 0)
- rc = LS_NONE_FIRST_DE;
- else if (!dot_de)
- rc = LS_FIRST_DE;
- else
- rc = LS_FIRST_DOT_DE;
-
- ll_release_page(page, 0);
- goto out;
- }
- pos = le64_to_cpu(dp->ldp_hash_end);
- if (pos == MDS_DIR_END_OFF) {
- /*
- * End of directory reached.
- */
- ll_release_page(page, 0);
- break;
- } else if (1) {
- /*
- * chain is exhausted
- * Normal case: continue to the next page.
- */
- ll_release_page(page, le32_to_cpu(dp->ldp_flags) &
- LDF_COLLIDE);
- page = ll_get_dir_page(dir, pos, &chain);
- } else {
- /*
- * go into overflow page.
- */
- LASSERT(le32_to_cpu(dp->ldp_flags) & LDF_COLLIDE);
- ll_release_page(page, 1);
- }
- }
-
-out:
- ll_dir_chain_fini(&chain);
- return rc;
-}
-
-static void
-ll_sai_unplug(struct ll_statahead_info *sai, struct ll_sa_entry *entry)
-{
- struct ptlrpc_thread *thread = &sai->sai_thread;
- struct ll_sb_info *sbi = ll_i2sbi(sai->sai_inode);
- int hit;
-
- if (entry != NULL && entry->se_stat == SA_ENTRY_SUCC)
- hit = 1;
- else
- hit = 0;
-
- ll_sa_entry_fini(sai, entry);
- if (hit) {
- sai->sai_hit++;
- sai->sai_consecutive_miss = 0;
- sai->sai_max = min(2 * sai->sai_max, sbi->ll_sa_max);
- } else {
- struct ll_inode_info *lli = ll_i2info(sai->sai_inode);
-
- sai->sai_miss++;
- sai->sai_consecutive_miss++;
- if (sa_low_hit(sai) && thread_is_running(thread)) {
- atomic_inc(&sbi->ll_sa_wrong);
- CDEBUG(D_READA, "Statahead for dir " DFID " hit ratio too low: hit/miss %llu/%llu, sent/replied %llu/%llu, stopping statahead thread\n",
- PFID(&lli->lli_fid), sai->sai_hit,
- sai->sai_miss, sai->sai_sent,
- sai->sai_replied);
- spin_lock(&lli->lli_sa_lock);
- if (!thread_is_stopped(thread))
- thread_set_flags(thread, SVC_STOPPING);
- spin_unlock(&lli->lli_sa_lock);
- }
- }
-
- if (!thread_is_stopped(thread))
- wake_up(&thread->t_ctl_waitq);
-}
-
-/**
- * Start statahead thread if this is the first dir entry.
- * Otherwise if a thread is started already, wait it until it is ahead of me.
- * \retval 1 -- find entry with lock in cache, the caller needs to do
- * nothing.
- * \retval 0 -- find entry in cache, but without lock, the caller needs
- * refresh from MDS.
- * \retval others -- the caller need to process as non-statahead.
- */
-int do_statahead_enter(struct inode *dir, struct dentry **dentryp,
- int only_unplug)
-{
- struct ll_inode_info *lli = ll_i2info(dir);
- struct ll_statahead_info *sai = lli->lli_sai;
- struct dentry *parent;
- struct ll_sa_entry *entry;
- struct ptlrpc_thread *thread;
- struct l_wait_info lwi = { 0 };
- int rc = 0;
- struct ll_inode_info *plli;
-
- LASSERT(lli->lli_opendir_pid == current_pid());
-
- if (sai) {
- thread = &sai->sai_thread;
- if (unlikely(thread_is_stopped(thread) &&
- list_empty(&sai->sai_entries_stated))) {
- /* to release resource */
- ll_stop_statahead(dir, lli->lli_opendir_key);
- return -EAGAIN;
- }
-
- if ((*dentryp)->d_name.name[0] == '.') {
- if (sai->sai_ls_all ||
- sai->sai_miss_hidden >= sai->sai_skip_hidden) {
- /*
- * Hidden dentry is the first one, or statahead
- * thread does not skip so many hidden dentries
- * before "sai_ls_all" enabled as below.
- */
- } else {
- if (!sai->sai_ls_all)
- /*
- * It maybe because hidden dentry is not
- * the first one, "sai_ls_all" was not
- * set, then "ls -al" missed. Enable
- * "sai_ls_all" for such case.
- */
- sai->sai_ls_all = 1;
-
- /*
- * Such "getattr" has been skipped before
- * "sai_ls_all" enabled as above.
- */
- sai->sai_miss_hidden++;
- return -EAGAIN;
- }
- }
-
- entry = ll_sa_entry_get_byname(sai, &(*dentryp)->d_name);
- if (entry == NULL || only_unplug) {
- ll_sai_unplug(sai, entry);
- return entry ? 1 : -EAGAIN;
- }
-
- if (!ll_sa_entry_stated(entry)) {
- sai->sai_index_wait = entry->se_index;
- lwi = LWI_TIMEOUT_INTR(cfs_time_seconds(30), NULL,
- LWI_ON_SIGNAL_NOOP, NULL);
- rc = l_wait_event(sai->sai_waitq,
- ll_sa_entry_stated(entry) ||
- thread_is_stopped(thread),
- &lwi);
- if (rc < 0) {
- ll_sai_unplug(sai, entry);
- return -EAGAIN;
- }
- }
-
- if (entry->se_stat == SA_ENTRY_SUCC &&
- entry->se_inode != NULL) {
- struct inode *inode = entry->se_inode;
- struct lookup_intent it = { .it_op = IT_GETATTR,
- .d.lustre.it_lock_handle =
- entry->se_handle };
- __u64 bits;
-
- rc = md_revalidate_lock(ll_i2mdexp(dir), &it,
- ll_inode2fid(inode), &bits);
- if (rc == 1) {
- if (d_inode(*dentryp) == NULL) {
- struct dentry *alias;
-
- alias = ll_splice_alias(inode,
- *dentryp);
- if (IS_ERR(alias)) {
- ll_sai_unplug(sai, entry);
- return PTR_ERR(alias);
- }
- *dentryp = alias;
- } else if (d_inode(*dentryp) != inode) {
- /* revalidate, but inode is recreated */
- CDEBUG(D_READA,
- "stale dentry %pd inode %lu/%u, statahead inode %lu/%u\n",
- *dentryp,
- d_inode(*dentryp)->i_ino,
- d_inode(*dentryp)->i_generation,
- inode->i_ino,
- inode->i_generation);
- ll_sai_unplug(sai, entry);
- return -ESTALE;
- } else {
- iput(inode);
- }
- entry->se_inode = NULL;
-
- if ((bits & MDS_INODELOCK_LOOKUP) &&
- d_lustre_invalid(*dentryp))
- d_lustre_revalidate(*dentryp);
- ll_intent_release(&it);
- }
- }
-
- ll_sai_unplug(sai, entry);
- return rc;
- }
-
- /* I am the "lli_opendir_pid" owner, only me can set "lli_sai". */
- rc = is_first_dirent(dir, *dentryp);
- if (rc == LS_NONE_FIRST_DE) {
- /* It is not "ls -{a}l" operation, no need statahead for it. */
- rc = -EAGAIN;
- goto out;
- }
-
- sai = ll_sai_alloc();
- if (sai == NULL) {
- rc = -ENOMEM;
- goto out;
- }
-
- sai->sai_ls_all = (rc == LS_FIRST_DOT_DE);
- sai->sai_inode = igrab(dir);
- if (unlikely(sai->sai_inode == NULL)) {
- CWARN("Do not start stat ahead on dying inode "DFID"\n",
- PFID(&lli->lli_fid));
- rc = -ESTALE;
- goto out;
- }
-
- /* get parent reference count here, and put it in ll_statahead_thread */
- parent = dget((*dentryp)->d_parent);
- if (unlikely(sai->sai_inode != d_inode(parent))) {
- struct ll_inode_info *nlli = ll_i2info(d_inode(parent));
-
- CWARN("Race condition, someone changed %pd just now: old parent "DFID", new parent "DFID"\n",
- *dentryp,
- PFID(&lli->lli_fid), PFID(&nlli->lli_fid));
- dput(parent);
- iput(sai->sai_inode);
- rc = -EAGAIN;
- goto out;
- }
-
- CDEBUG(D_READA, "start statahead thread: sai %p, parent %pd\n",
- sai, parent);
-
- /* The sai buffer already has one reference taken at allocation time,
- * but as soon as we expose the sai by attaching it to the lli that
- * default reference can be dropped by another thread calling
- * ll_stop_statahead. We need to take a local reference to protect
- * the sai buffer while we intend to access it. */
- ll_sai_get(sai);
- lli->lli_sai = sai;
-
- plli = ll_i2info(d_inode(parent));
- rc = PTR_ERR(kthread_run(ll_statahead_thread, parent,
- "ll_sa_%u", plli->lli_opendir_pid));
- thread = &sai->sai_thread;
- if (IS_ERR_VALUE(rc)) {
- CERROR("can't start ll_sa thread, rc: %d\n", rc);
- dput(parent);
- lli->lli_opendir_key = NULL;
- thread_set_flags(thread, SVC_STOPPED);
- thread_set_flags(&sai->sai_agl_thread, SVC_STOPPED);
- /* Drop both our own local reference and the default
- * reference from allocation time. */
- ll_sai_put(sai);
- ll_sai_put(sai);
- LASSERT(lli->lli_sai == NULL);
- return -EAGAIN;
- }
-
- l_wait_event(thread->t_ctl_waitq,
- thread_is_running(thread) || thread_is_stopped(thread),
- &lwi);
- ll_sai_put(sai);
-
- /*
- * We don't stat-ahead for the first dirent since we are already in
- * lookup.
- */
- return -EAGAIN;
-
-out:
- kfree(sai);
- spin_lock(&lli->lli_sa_lock);
- lli->lli_opendir_key = NULL;
- lli->lli_opendir_pid = 0;
- spin_unlock(&lli->lli_sa_lock);
- return rc;
-}
diff --git a/drivers/staging/lustre/lustre/llite/super25.c b/drivers/staging/lustre/lustre/llite/super25.c
deleted file mode 100644
index b35e02a49ba3..000000000000
--- a/drivers/staging/lustre/lustre/llite/super25.c
+++ /dev/null
@@ -1,211 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- */
-
-#define DEBUG_SUBSYSTEM S_LLITE
-
-#include <linux/module.h>
-#include <linux/types.h>
-#include "../include/lustre_lite.h"
-#include "../include/lustre_ha.h"
-#include "../include/lustre_dlm.h"
-#include <linux/init.h>
-#include <linux/fs.h>
-#include "../include/lprocfs_status.h"
-#include "llite_internal.h"
-
-static struct kmem_cache *ll_inode_cachep;
-
-static struct inode *ll_alloc_inode(struct super_block *sb)
-{
- struct ll_inode_info *lli;
- ll_stats_ops_tally(ll_s2sbi(sb), LPROC_LL_ALLOC_INODE, 1);
- OBD_SLAB_ALLOC_PTR_GFP(lli, ll_inode_cachep, GFP_NOFS);
- if (lli == NULL)
- return NULL;
-
- inode_init_once(&lli->lli_vfs_inode);
- return &lli->lli_vfs_inode;
-}
-
-static void ll_inode_destroy_callback(struct rcu_head *head)
-{
- struct inode *inode = container_of(head, struct inode, i_rcu);
- struct ll_inode_info *ptr = ll_i2info(inode);
- OBD_SLAB_FREE_PTR(ptr, ll_inode_cachep);
-}
-
-static void ll_destroy_inode(struct inode *inode)
-{
- call_rcu(&inode->i_rcu, ll_inode_destroy_callback);
-}
-
-/* exported operations */
-struct super_operations lustre_super_operations = {
- .alloc_inode = ll_alloc_inode,
- .destroy_inode = ll_destroy_inode,
- .evict_inode = ll_delete_inode,
- .put_super = ll_put_super,
- .statfs = ll_statfs,
- .umount_begin = ll_umount_begin,
- .remount_fs = ll_remount_fs,
- .show_options = ll_show_options,
-};
-MODULE_ALIAS_FS("lustre");
-
-void lustre_register_client_process_config(int (*cpc)(struct lustre_cfg *lcfg));
-
-static int __init init_lustre_lite(void)
-{
- lnet_process_id_t lnet_id;
- struct timespec64 ts;
- int i, rc, seed[2];
-
- CLASSERT(sizeof(LUSTRE_VOLATILE_HDR) == LUSTRE_VOLATILE_HDR_LEN + 1);
-
- /* print an address of _any_ initialized kernel symbol from this
- * module, to allow debugging with gdb that doesn't support data
- * symbols from modules.*/
- CDEBUG(D_INFO, "Lustre client module (%p).\n",
- &lustre_super_operations);
-
- rc = -ENOMEM;
- ll_inode_cachep = kmem_cache_create("lustre_inode_cache",
- sizeof(struct ll_inode_info),
- 0, SLAB_HWCACHE_ALIGN, NULL);
- if (ll_inode_cachep == NULL)
- goto out_cache;
-
- ll_file_data_slab = kmem_cache_create("ll_file_data",
- sizeof(struct ll_file_data), 0,
- SLAB_HWCACHE_ALIGN, NULL);
- if (ll_file_data_slab == NULL)
- goto out_cache;
-
- ll_remote_perm_cachep = kmem_cache_create("ll_remote_perm_cache",
- sizeof(struct ll_remote_perm),
- 0, 0, NULL);
- if (ll_remote_perm_cachep == NULL)
- goto out_cache;
-
- ll_rmtperm_hash_cachep = kmem_cache_create("ll_rmtperm_hash_cache",
- REMOTE_PERM_HASHSIZE *
- sizeof(struct list_head),
- 0, 0, NULL);
- if (ll_rmtperm_hash_cachep == NULL)
- goto out_cache;
-
- llite_root = debugfs_create_dir("llite", debugfs_lustre_root);
- if (IS_ERR_OR_NULL(llite_root)) {
- rc = llite_root ? PTR_ERR(llite_root) : -ENOMEM;
- llite_root = NULL;
- goto out_cache;
- }
-
- llite_kset = kset_create_and_add("llite", NULL, lustre_kobj);
- if (!llite_kset) {
- rc = -ENOMEM;
- goto out_debugfs;
- }
-
- cfs_get_random_bytes(seed, sizeof(seed));
-
- /* Nodes with small feet have little entropy. The NID for this
- * node gives the most entropy in the low bits */
- for (i = 0;; i++) {
- if (LNetGetId(i, &lnet_id) == -ENOENT)
- break;
-
- if (LNET_NETTYP(LNET_NIDNET(lnet_id.nid)) != LOLND)
- seed[0] ^= LNET_NIDADDR(lnet_id.nid);
- }
-
- ktime_get_ts64(&ts);
- cfs_srand(ts.tv_sec ^ seed[0], ts.tv_nsec ^ seed[1]);
-
- rc = vvp_global_init();
- if (rc != 0)
- goto out_sysfs;
-
- rc = ll_xattr_init();
- if (rc != 0)
- goto out_vvp;
-
- lustre_register_client_fill_super(ll_fill_super);
- lustre_register_kill_super_cb(ll_kill_super);
- lustre_register_client_process_config(ll_process_config);
-
- return 0;
-
-out_vvp:
- vvp_global_fini();
-out_sysfs:
- kset_unregister(llite_kset);
-out_debugfs:
- debugfs_remove(llite_root);
-out_cache:
- kmem_cache_destroy(ll_inode_cachep);
- kmem_cache_destroy(ll_file_data_slab);
- kmem_cache_destroy(ll_remote_perm_cachep);
- kmem_cache_destroy(ll_rmtperm_hash_cachep);
- return rc;
-}
-
-static void __exit exit_lustre_lite(void)
-{
- lustre_register_client_fill_super(NULL);
- lustre_register_kill_super_cb(NULL);
- lustre_register_client_process_config(NULL);
-
- debugfs_remove(llite_root);
- kset_unregister(llite_kset);
-
- ll_xattr_fini();
- vvp_global_fini();
-
- kmem_cache_destroy(ll_inode_cachep);
- kmem_cache_destroy(ll_rmtperm_hash_cachep);
-
- kmem_cache_destroy(ll_remote_perm_cachep);
-
- kmem_cache_destroy(ll_file_data_slab);
-}
-
-MODULE_AUTHOR("Sun Microsystems, Inc. <http://www.lustre.org/>");
-MODULE_DESCRIPTION("Lustre Lite Client File System");
-MODULE_LICENSE("GPL");
-
-module_init(init_lustre_lite);
-module_exit(exit_lustre_lite);
diff --git a/drivers/staging/lustre/lustre/llite/symlink.c b/drivers/staging/lustre/lustre/llite/symlink.c
deleted file mode 100644
index 69b203651905..000000000000
--- a/drivers/staging/lustre/lustre/llite/symlink.c
+++ /dev/null
@@ -1,160 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- */
-
-#include <linux/fs.h>
-#include <linux/mm.h>
-#include <linux/stat.h>
-#define DEBUG_SUBSYSTEM S_LLITE
-
-#include "../include/lustre_lite.h"
-#include "llite_internal.h"
-
-static int ll_readlink_internal(struct inode *inode,
- struct ptlrpc_request **request, char **symname)
-{
- struct ll_inode_info *lli = ll_i2info(inode);
- struct ll_sb_info *sbi = ll_i2sbi(inode);
- int rc, symlen = i_size_read(inode) + 1;
- struct mdt_body *body;
- struct md_op_data *op_data;
-
- *request = NULL;
-
- if (lli->lli_symlink_name) {
- int print_limit = min_t(int, PAGE_SIZE - 128, symlen);
-
- *symname = lli->lli_symlink_name;
- /* If the total CDEBUG() size is larger than a page, it
- * will print a warning to the console, avoid this by
- * printing just the last part of the symlink. */
- CDEBUG(D_INODE, "using cached symlink %s%.*s, len = %d\n",
- print_limit < symlen ? "..." : "", print_limit,
- (*symname) + symlen - print_limit, symlen);
- return 0;
- }
-
- op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL, 0, symlen,
- LUSTRE_OPC_ANY, NULL);
- if (IS_ERR(op_data))
- return PTR_ERR(op_data);
-
- op_data->op_valid = OBD_MD_LINKNAME;
- rc = md_getattr(sbi->ll_md_exp, op_data, request);
- ll_finish_md_op_data(op_data);
- if (rc) {
- if (rc != -ENOENT)
- CERROR("inode %lu: rc = %d\n", inode->i_ino, rc);
- goto failed;
- }
-
- body = req_capsule_server_get(&(*request)->rq_pill, &RMF_MDT_BODY);
- LASSERT(body != NULL);
- if ((body->valid & OBD_MD_LINKNAME) == 0) {
- CERROR("OBD_MD_LINKNAME not set on reply\n");
- rc = -EPROTO;
- goto failed;
- }
-
- LASSERT(symlen != 0);
- if (body->eadatasize != symlen) {
- CERROR("inode %lu: symlink length %d not expected %d\n",
- inode->i_ino, body->eadatasize - 1, symlen - 1);
- rc = -EPROTO;
- goto failed;
- }
-
- *symname = req_capsule_server_get(&(*request)->rq_pill, &RMF_MDT_MD);
- if (*symname == NULL ||
- strnlen(*symname, symlen) != symlen - 1) {
- /* not full/NULL terminated */
- CERROR("inode %lu: symlink not NULL terminated string of length %d\n",
- inode->i_ino, symlen - 1);
- rc = -EPROTO;
- goto failed;
- }
-
- lli->lli_symlink_name = kzalloc(symlen, GFP_NOFS);
- /* do not return an error if we cannot cache the symlink locally */
- if (lli->lli_symlink_name) {
- memcpy(lli->lli_symlink_name, *symname, symlen);
- *symname = lli->lli_symlink_name;
- }
- return 0;
-
-failed:
- return rc;
-}
-
-static const char *ll_follow_link(struct dentry *dentry, void **cookie)
-{
- struct inode *inode = d_inode(dentry);
- struct ptlrpc_request *request = NULL;
- int rc;
- char *symname = NULL;
-
- CDEBUG(D_VFSTRACE, "VFS Op\n");
- ll_inode_size_lock(inode);
- rc = ll_readlink_internal(inode, &request, &symname);
- ll_inode_size_unlock(inode);
- if (rc) {
- ptlrpc_req_finished(request);
- return ERR_PTR(rc);
- }
-
- /* symname may contain a pointer to the request message buffer,
- * we delay request releasing until ll_put_link then.
- */
- *cookie = request;
- return symname;
-}
-
-static void ll_put_link(struct inode *unused, void *cookie)
-{
- ptlrpc_req_finished(cookie);
-}
-
-struct inode_operations ll_fast_symlink_inode_operations = {
- .readlink = generic_readlink,
- .setattr = ll_setattr,
- .follow_link = ll_follow_link,
- .put_link = ll_put_link,
- .getattr = ll_getattr,
- .permission = ll_inode_permission,
- .setxattr = ll_setxattr,
- .getxattr = ll_getxattr,
- .listxattr = ll_listxattr,
- .removexattr = ll_removexattr,
-};
diff --git a/drivers/staging/lustre/lustre/llite/vvp_dev.c b/drivers/staging/lustre/lustre/llite/vvp_dev.c
deleted file mode 100644
index b8f6a8779fd3..000000000000
--- a/drivers/staging/lustre/lustre/llite/vvp_dev.c
+++ /dev/null
@@ -1,548 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * cl_device and cl_device_type implementation for VVP layer.
- *
- * Author: Nikita Danilov <nikita.danilov@sun.com>
- */
-
-#define DEBUG_SUBSYSTEM S_LLITE
-
-
-#include "../include/obd.h"
-#include "../include/lustre_lite.h"
-#include "llite_internal.h"
-#include "vvp_internal.h"
-
-/*****************************************************************************
- *
- * Vvp device and device type functions.
- *
- */
-
-/*
- * vvp_ prefix stands for "Vfs Vm Posix". It corresponds to historical
- * "llite_" (var. "ll_") prefix.
- */
-
-static struct kmem_cache *vvp_thread_kmem;
-static struct kmem_cache *vvp_session_kmem;
-static struct lu_kmem_descr vvp_caches[] = {
- {
- .ckd_cache = &vvp_thread_kmem,
- .ckd_name = "vvp_thread_kmem",
- .ckd_size = sizeof(struct vvp_thread_info),
- },
- {
- .ckd_cache = &vvp_session_kmem,
- .ckd_name = "vvp_session_kmem",
- .ckd_size = sizeof(struct vvp_session)
- },
- {
- .ckd_cache = NULL
- }
-};
-
-static void *vvp_key_init(const struct lu_context *ctx,
- struct lu_context_key *key)
-{
- struct vvp_thread_info *info;
-
- OBD_SLAB_ALLOC_PTR_GFP(info, vvp_thread_kmem, GFP_NOFS);
- if (info == NULL)
- info = ERR_PTR(-ENOMEM);
- return info;
-}
-
-static void vvp_key_fini(const struct lu_context *ctx,
- struct lu_context_key *key, void *data)
-{
- struct vvp_thread_info *info = data;
-
- OBD_SLAB_FREE_PTR(info, vvp_thread_kmem);
-}
-
-static void *vvp_session_key_init(const struct lu_context *ctx,
- struct lu_context_key *key)
-{
- struct vvp_session *session;
-
- OBD_SLAB_ALLOC_PTR_GFP(session, vvp_session_kmem, GFP_NOFS);
- if (session == NULL)
- session = ERR_PTR(-ENOMEM);
- return session;
-}
-
-static void vvp_session_key_fini(const struct lu_context *ctx,
- struct lu_context_key *key, void *data)
-{
- struct vvp_session *session = data;
-
- OBD_SLAB_FREE_PTR(session, vvp_session_kmem);
-}
-
-
-struct lu_context_key vvp_key = {
- .lct_tags = LCT_CL_THREAD,
- .lct_init = vvp_key_init,
- .lct_fini = vvp_key_fini
-};
-
-struct lu_context_key vvp_session_key = {
- .lct_tags = LCT_SESSION,
- .lct_init = vvp_session_key_init,
- .lct_fini = vvp_session_key_fini
-};
-
-/* type constructor/destructor: vvp_type_{init,fini,start,stop}(). */
-LU_TYPE_INIT_FINI(vvp, &ccc_key, &ccc_session_key, &vvp_key, &vvp_session_key);
-
-static const struct lu_device_operations vvp_lu_ops = {
- .ldo_object_alloc = vvp_object_alloc
-};
-
-static const struct cl_device_operations vvp_cl_ops = {
- .cdo_req_init = ccc_req_init
-};
-
-static struct lu_device *vvp_device_alloc(const struct lu_env *env,
- struct lu_device_type *t,
- struct lustre_cfg *cfg)
-{
- return ccc_device_alloc(env, t, cfg, &vvp_lu_ops, &vvp_cl_ops);
-}
-
-static const struct lu_device_type_operations vvp_device_type_ops = {
- .ldto_init = vvp_type_init,
- .ldto_fini = vvp_type_fini,
-
- .ldto_start = vvp_type_start,
- .ldto_stop = vvp_type_stop,
-
- .ldto_device_alloc = vvp_device_alloc,
- .ldto_device_free = ccc_device_free,
- .ldto_device_init = ccc_device_init,
- .ldto_device_fini = ccc_device_fini
-};
-
-struct lu_device_type vvp_device_type = {
- .ldt_tags = LU_DEVICE_CL,
- .ldt_name = LUSTRE_VVP_NAME,
- .ldt_ops = &vvp_device_type_ops,
- .ldt_ctx_tags = LCT_CL_THREAD
-};
-
-/**
- * A mutex serializing calls to vvp_inode_fini() under extreme memory
- * pressure, when environments cannot be allocated.
- */
-int vvp_global_init(void)
-{
- int result;
-
- result = lu_kmem_init(vvp_caches);
- if (result == 0) {
- result = ccc_global_init(&vvp_device_type);
- if (result != 0)
- lu_kmem_fini(vvp_caches);
- }
- return result;
-}
-
-void vvp_global_fini(void)
-{
- ccc_global_fini(&vvp_device_type);
- lu_kmem_fini(vvp_caches);
-}
-
-
-/*****************************************************************************
- *
- * mirror obd-devices into cl devices.
- *
- */
-
-int cl_sb_init(struct super_block *sb)
-{
- struct ll_sb_info *sbi;
- struct cl_device *cl;
- struct lu_env *env;
- int rc = 0;
- int refcheck;
-
- sbi = ll_s2sbi(sb);
- env = cl_env_get(&refcheck);
- if (!IS_ERR(env)) {
- cl = cl_type_setup(env, NULL, &vvp_device_type,
- sbi->ll_dt_exp->exp_obd->obd_lu_dev);
- if (!IS_ERR(cl)) {
- cl2ccc_dev(cl)->cdv_sb = sb;
- sbi->ll_cl = cl;
- sbi->ll_site = cl2lu_dev(cl)->ld_site;
- }
- cl_env_put(env, &refcheck);
- } else
- rc = PTR_ERR(env);
- return rc;
-}
-
-int cl_sb_fini(struct super_block *sb)
-{
- struct ll_sb_info *sbi;
- struct lu_env *env;
- struct cl_device *cld;
- int refcheck;
- int result;
-
- sbi = ll_s2sbi(sb);
- env = cl_env_get(&refcheck);
- if (!IS_ERR(env)) {
- cld = sbi->ll_cl;
-
- if (cld != NULL) {
- cl_stack_fini(env, cld);
- sbi->ll_cl = NULL;
- sbi->ll_site = NULL;
- }
- cl_env_put(env, &refcheck);
- result = 0;
- } else {
- CERROR("Cannot cleanup cl-stack due to memory shortage.\n");
- result = PTR_ERR(env);
- }
- /*
- * If mount failed (sbi->ll_cl == NULL), and this there are no other
- * mounts, stop device types manually (this usually happens
- * automatically when last device is destroyed).
- */
- lu_types_stop();
- return result;
-}
-
-/****************************************************************************
- *
- * debugfs/lustre/llite/$MNT/dump_page_cache
- *
- ****************************************************************************/
-
-/*
- * To represent contents of a page cache as a byte stream, following
- * information if encoded in 64bit offset:
- *
- * - file hash bucket in lu_site::ls_hash[] 28bits
- *
- * - how far file is from bucket head 4bits
- *
- * - page index 32bits
- *
- * First two data identify a file in the cache uniquely.
- */
-
-#define PGC_OBJ_SHIFT (32 + 4)
-#define PGC_DEPTH_SHIFT (32)
-
-struct vvp_pgcache_id {
- unsigned vpi_bucket;
- unsigned vpi_depth;
- uint32_t vpi_index;
-
- unsigned vpi_curdep;
- struct lu_object_header *vpi_obj;
-};
-
-static void vvp_pgcache_id_unpack(loff_t pos, struct vvp_pgcache_id *id)
-{
- CLASSERT(sizeof(pos) == sizeof(__u64));
-
- id->vpi_index = pos & 0xffffffff;
- id->vpi_depth = (pos >> PGC_DEPTH_SHIFT) & 0xf;
- id->vpi_bucket = (unsigned long long)pos >> PGC_OBJ_SHIFT;
-}
-
-static loff_t vvp_pgcache_id_pack(struct vvp_pgcache_id *id)
-{
- return
- ((__u64)id->vpi_index) |
- ((__u64)id->vpi_depth << PGC_DEPTH_SHIFT) |
- ((__u64)id->vpi_bucket << PGC_OBJ_SHIFT);
-}
-
-static int vvp_pgcache_obj_get(struct cfs_hash *hs, struct cfs_hash_bd *bd,
- struct hlist_node *hnode, void *data)
-{
- struct vvp_pgcache_id *id = data;
- struct lu_object_header *hdr = cfs_hash_object(hs, hnode);
-
- if (id->vpi_curdep-- > 0)
- return 0; /* continue */
-
- if (lu_object_is_dying(hdr))
- return 1;
-
- cfs_hash_get(hs, hnode);
- id->vpi_obj = hdr;
- return 1;
-}
-
-static struct cl_object *vvp_pgcache_obj(const struct lu_env *env,
- struct lu_device *dev,
- struct vvp_pgcache_id *id)
-{
- LASSERT(lu_device_is_cl(dev));
-
- id->vpi_depth &= 0xf;
- id->vpi_obj = NULL;
- id->vpi_curdep = id->vpi_depth;
-
- cfs_hash_hlist_for_each(dev->ld_site->ls_obj_hash, id->vpi_bucket,
- vvp_pgcache_obj_get, id);
- if (id->vpi_obj != NULL) {
- struct lu_object *lu_obj;
-
- lu_obj = lu_object_locate(id->vpi_obj, dev->ld_type);
- if (lu_obj != NULL) {
- lu_object_ref_add(lu_obj, "dump", current);
- return lu2cl(lu_obj);
- }
- lu_object_put(env, lu_object_top(id->vpi_obj));
-
- } else if (id->vpi_curdep > 0) {
- id->vpi_depth = 0xf;
- }
- return NULL;
-}
-
-static loff_t vvp_pgcache_find(const struct lu_env *env,
- struct lu_device *dev, loff_t pos)
-{
- struct cl_object *clob;
- struct lu_site *site;
- struct vvp_pgcache_id id;
-
- site = dev->ld_site;
- vvp_pgcache_id_unpack(pos, &id);
-
- while (1) {
- if (id.vpi_bucket >= CFS_HASH_NHLIST(site->ls_obj_hash))
- return ~0ULL;
- clob = vvp_pgcache_obj(env, dev, &id);
- if (clob != NULL) {
- struct cl_object_header *hdr;
- int nr;
- struct cl_page *pg;
-
- /* got an object. Find next page. */
- hdr = cl_object_header(clob);
-
- spin_lock(&hdr->coh_page_guard);
- nr = radix_tree_gang_lookup(&hdr->coh_tree,
- (void **)&pg,
- id.vpi_index, 1);
- if (nr > 0) {
- id.vpi_index = pg->cp_index;
- /* Cant support over 16T file */
- nr = !(pg->cp_index > 0xffffffff);
- }
- spin_unlock(&hdr->coh_page_guard);
-
- lu_object_ref_del(&clob->co_lu, "dump", current);
- cl_object_put(env, clob);
- if (nr > 0)
- return vvp_pgcache_id_pack(&id);
- }
- /* to the next object. */
- ++id.vpi_depth;
- id.vpi_depth &= 0xf;
- if (id.vpi_depth == 0 && ++id.vpi_bucket == 0)
- return ~0ULL;
- id.vpi_index = 0;
- }
-}
-
-#define seq_page_flag(seq, page, flag, has_flags) do { \
- if (test_bit(PG_##flag, &(page)->flags)) { \
- seq_printf(seq, "%s"#flag, has_flags ? "|" : ""); \
- has_flags = 1; \
- } \
-} while (0)
-
-static void vvp_pgcache_page_show(const struct lu_env *env,
- struct seq_file *seq, struct cl_page *page)
-{
- struct ccc_page *cpg;
- struct page *vmpage;
- int has_flags;
-
- cpg = cl2ccc_page(cl_page_at(page, &vvp_device_type));
- vmpage = cpg->cpg_page;
- seq_printf(seq, " %5i | %p %p %s %s %s %s | %p %lu/%u(%p) %lu %u [",
- 0 /* gen */,
- cpg, page,
- "none",
- cpg->cpg_write_queued ? "wq" : "- ",
- cpg->cpg_defer_uptodate ? "du" : "- ",
- PageWriteback(vmpage) ? "wb" : "-",
- vmpage, vmpage->mapping->host->i_ino,
- vmpage->mapping->host->i_generation,
- vmpage->mapping->host, vmpage->index,
- page_count(vmpage));
- has_flags = 0;
- seq_page_flag(seq, vmpage, locked, has_flags);
- seq_page_flag(seq, vmpage, error, has_flags);
- seq_page_flag(seq, vmpage, referenced, has_flags);
- seq_page_flag(seq, vmpage, uptodate, has_flags);
- seq_page_flag(seq, vmpage, dirty, has_flags);
- seq_page_flag(seq, vmpage, writeback, has_flags);
- seq_printf(seq, "%s]\n", has_flags ? "" : "-");
-}
-
-static int vvp_pgcache_show(struct seq_file *f, void *v)
-{
- loff_t pos;
- struct ll_sb_info *sbi;
- struct cl_object *clob;
- struct lu_env *env;
- struct cl_page *page;
- struct cl_object_header *hdr;
- struct vvp_pgcache_id id;
- int refcheck;
- int result;
-
- env = cl_env_get(&refcheck);
- if (!IS_ERR(env)) {
- pos = *(loff_t *) v;
- vvp_pgcache_id_unpack(pos, &id);
- sbi = f->private;
- clob = vvp_pgcache_obj(env, &sbi->ll_cl->cd_lu_dev, &id);
- if (clob != NULL) {
- hdr = cl_object_header(clob);
-
- spin_lock(&hdr->coh_page_guard);
- page = cl_page_lookup(hdr, id.vpi_index);
- spin_unlock(&hdr->coh_page_guard);
-
- seq_printf(f, "%8x@"DFID": ",
- id.vpi_index, PFID(&hdr->coh_lu.loh_fid));
- if (page != NULL) {
- vvp_pgcache_page_show(env, f, page);
- cl_page_put(env, page);
- } else
- seq_puts(f, "missing\n");
- lu_object_ref_del(&clob->co_lu, "dump", current);
- cl_object_put(env, clob);
- } else
- seq_printf(f, "%llx missing\n", pos);
- cl_env_put(env, &refcheck);
- result = 0;
- } else
- result = PTR_ERR(env);
- return result;
-}
-
-static void *vvp_pgcache_start(struct seq_file *f, loff_t *pos)
-{
- struct ll_sb_info *sbi;
- struct lu_env *env;
- int refcheck;
-
- sbi = f->private;
-
- env = cl_env_get(&refcheck);
- if (!IS_ERR(env)) {
- sbi = f->private;
- if (sbi->ll_site->ls_obj_hash->hs_cur_bits > 64 - PGC_OBJ_SHIFT)
- pos = ERR_PTR(-EFBIG);
- else {
- *pos = vvp_pgcache_find(env, &sbi->ll_cl->cd_lu_dev,
- *pos);
- if (*pos == ~0ULL)
- pos = NULL;
- }
- cl_env_put(env, &refcheck);
- }
- return pos;
-}
-
-static void *vvp_pgcache_next(struct seq_file *f, void *v, loff_t *pos)
-{
- struct ll_sb_info *sbi;
- struct lu_env *env;
- int refcheck;
-
- env = cl_env_get(&refcheck);
- if (!IS_ERR(env)) {
- sbi = f->private;
- *pos = vvp_pgcache_find(env, &sbi->ll_cl->cd_lu_dev, *pos + 1);
- if (*pos == ~0ULL)
- pos = NULL;
- cl_env_put(env, &refcheck);
- }
- return pos;
-}
-
-static void vvp_pgcache_stop(struct seq_file *f, void *v)
-{
- /* Nothing to do */
-}
-
-static const struct seq_operations vvp_pgcache_ops = {
- .start = vvp_pgcache_start,
- .next = vvp_pgcache_next,
- .stop = vvp_pgcache_stop,
- .show = vvp_pgcache_show
-};
-
-static int vvp_dump_pgcache_seq_open(struct inode *inode, struct file *filp)
-{
- struct seq_file *seq;
- int rc;
-
- rc = seq_open(filp, &vvp_pgcache_ops);
- if (rc)
- return rc;
-
- seq = filp->private_data;
- seq->private = inode->i_private;
-
- return 0;
-}
-
-const struct file_operations vvp_dump_pgcache_file_ops = {
- .owner = THIS_MODULE,
- .open = vvp_dump_pgcache_seq_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = seq_release,
-};
diff --git a/drivers/staging/lustre/lustre/llite/vvp_internal.h b/drivers/staging/lustre/lustre/llite/vvp_internal.h
deleted file mode 100644
index 2162bf6c08a7..000000000000
--- a/drivers/staging/lustre/lustre/llite/vvp_internal.h
+++ /dev/null
@@ -1,62 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * Internal definitions for VVP layer.
- *
- * Author: Nikita Danilov <nikita.danilov@sun.com>
- */
-
-#ifndef VVP_INTERNAL_H
-#define VVP_INTERNAL_H
-
-
-#include "../include/cl_object.h"
-#include "llite_internal.h"
-
-int vvp_io_init (const struct lu_env *env,
- struct cl_object *obj, struct cl_io *io);
-int vvp_lock_init (const struct lu_env *env,
- struct cl_object *obj, struct cl_lock *lock,
- const struct cl_io *io);
-int vvp_page_init (const struct lu_env *env,
- struct cl_object *obj,
- struct cl_page *page, struct page *vmpage);
-struct lu_object *vvp_object_alloc(const struct lu_env *env,
- const struct lu_object_header *hdr,
- struct lu_device *dev);
-
-struct ccc_object *cl_inode2ccc(struct inode *inode);
-
-extern const struct file_operations vvp_dump_pgcache_file_ops;
-
-#endif /* VVP_INTERNAL_H */
diff --git a/drivers/staging/lustre/lustre/llite/vvp_io.c b/drivers/staging/lustre/lustre/llite/vvp_io.c
deleted file mode 100644
index cf264de0e531..000000000000
--- a/drivers/staging/lustre/lustre/llite/vvp_io.c
+++ /dev/null
@@ -1,1208 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * Implementation of cl_io for VVP layer.
- *
- * Author: Nikita Danilov <nikita.danilov@sun.com>
- * Author: Jinshan Xiong <jinshan.xiong@whamcloud.com>
- */
-
-#define DEBUG_SUBSYSTEM S_LLITE
-
-
-#include "../include/obd.h"
-#include "../include/lustre_lite.h"
-
-#include "vvp_internal.h"
-
-static struct vvp_io *cl2vvp_io(const struct lu_env *env,
- const struct cl_io_slice *slice);
-
-/**
- * True, if \a io is a normal io, False for splice_{read,write}
- */
-int cl_is_normalio(const struct lu_env *env, const struct cl_io *io)
-{
- struct vvp_io *vio = vvp_env_io(env);
-
- LASSERT(io->ci_type == CIT_READ || io->ci_type == CIT_WRITE);
-
- return vio->cui_io_subtype == IO_NORMAL;
-}
-
-/**
- * For swapping layout. The file's layout may have changed.
- * To avoid populating pages to a wrong stripe, we have to verify the
- * correctness of layout. It works because swapping layout processes
- * have to acquire group lock.
- */
-static bool can_populate_pages(const struct lu_env *env, struct cl_io *io,
- struct inode *inode)
-{
- struct ll_inode_info *lli = ll_i2info(inode);
- struct ccc_io *cio = ccc_env_io(env);
- bool rc = true;
-
- switch (io->ci_type) {
- case CIT_READ:
- case CIT_WRITE:
- /* don't need lock here to check lli_layout_gen as we have held
- * extent lock and GROUP lock has to hold to swap layout */
- if (ll_layout_version_get(lli) != cio->cui_layout_gen) {
- io->ci_need_restart = 1;
- /* this will return application a short read/write */
- io->ci_continue = 0;
- rc = false;
- }
- case CIT_FAULT:
- /* fault is okay because we've already had a page. */
- default:
- break;
- }
-
- return rc;
-}
-
-/*****************************************************************************
- *
- * io operations.
- *
- */
-
-static int vvp_io_fault_iter_init(const struct lu_env *env,
- const struct cl_io_slice *ios)
-{
- struct vvp_io *vio = cl2vvp_io(env, ios);
- struct inode *inode = ccc_object_inode(ios->cis_obj);
-
- LASSERT(inode ==
- file_inode(cl2ccc_io(env, ios)->cui_fd->fd_file));
- vio->u.fault.ft_mtime = inode->i_mtime.tv_sec;
- return 0;
-}
-
-static void vvp_io_fini(const struct lu_env *env, const struct cl_io_slice *ios)
-{
- struct cl_io *io = ios->cis_io;
- struct cl_object *obj = io->ci_obj;
- struct ccc_io *cio = cl2ccc_io(env, ios);
-
- CLOBINVRNT(env, obj, ccc_object_invariant(obj));
-
- CDEBUG(D_VFSTRACE, DFID
- " ignore/verify layout %d/%d, layout version %d restore needed %d\n",
- PFID(lu_object_fid(&obj->co_lu)),
- io->ci_ignore_layout, io->ci_verify_layout,
- cio->cui_layout_gen, io->ci_restore_needed);
-
- if (io->ci_restore_needed == 1) {
- int rc;
-
- /* file was detected release, we need to restore it
- * before finishing the io
- */
- rc = ll_layout_restore(ccc_object_inode(obj));
- /* if restore registration failed, no restart,
- * we will return -ENODATA */
- /* The layout will change after restore, so we need to
- * block on layout lock hold by the MDT
- * as MDT will not send new layout in lvb (see LU-3124)
- * we have to explicitly fetch it, all this will be done
- * by ll_layout_refresh()
- */
- if (rc == 0) {
- io->ci_restore_needed = 0;
- io->ci_need_restart = 1;
- io->ci_verify_layout = 1;
- } else {
- io->ci_restore_needed = 1;
- io->ci_need_restart = 0;
- io->ci_verify_layout = 0;
- io->ci_result = rc;
- }
- }
-
- if (!io->ci_ignore_layout && io->ci_verify_layout) {
- __u32 gen = 0;
-
- /* check layout version */
- ll_layout_refresh(ccc_object_inode(obj), &gen);
- io->ci_need_restart = cio->cui_layout_gen != gen;
- if (io->ci_need_restart) {
- CDEBUG(D_VFSTRACE,
- DFID" layout changed from %d to %d.\n",
- PFID(lu_object_fid(&obj->co_lu)),
- cio->cui_layout_gen, gen);
- /* today successful restore is the only possible
- * case */
- /* restore was done, clear restoring state */
- ll_i2info(ccc_object_inode(obj))->lli_flags &=
- ~LLIF_FILE_RESTORING;
- }
- }
-}
-
-static void vvp_io_fault_fini(const struct lu_env *env,
- const struct cl_io_slice *ios)
-{
- struct cl_io *io = ios->cis_io;
- struct cl_page *page = io->u.ci_fault.ft_page;
-
- CLOBINVRNT(env, io->ci_obj, ccc_object_invariant(io->ci_obj));
-
- if (page != NULL) {
- lu_ref_del(&page->cp_reference, "fault", io);
- cl_page_put(env, page);
- io->u.ci_fault.ft_page = NULL;
- }
- vvp_io_fini(env, ios);
-}
-
-static enum cl_lock_mode vvp_mode_from_vma(struct vm_area_struct *vma)
-{
- /*
- * we only want to hold PW locks if the mmap() can generate
- * writes back to the file and that only happens in shared
- * writable vmas
- */
- if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_WRITE))
- return CLM_WRITE;
- return CLM_READ;
-}
-
-static int vvp_mmap_locks(const struct lu_env *env,
- struct ccc_io *vio, struct cl_io *io)
-{
- struct ccc_thread_info *cti = ccc_env_info(env);
- struct mm_struct *mm = current->mm;
- struct vm_area_struct *vma;
- struct cl_lock_descr *descr = &cti->cti_descr;
- ldlm_policy_data_t policy;
- unsigned long addr;
- ssize_t count;
- int result;
- struct iov_iter i;
- struct iovec iov;
-
- LASSERT(io->ci_type == CIT_READ || io->ci_type == CIT_WRITE);
-
- if (!cl_is_normalio(env, io))
- return 0;
-
- if (vio->cui_iter == NULL) /* nfs or loop back device write */
- return 0;
-
- /* No MM (e.g. NFS)? No vmas too. */
- if (mm == NULL)
- return 0;
-
- iov_for_each(iov, i, *(vio->cui_iter)) {
- addr = (unsigned long)iov.iov_base;
- count = iov.iov_len;
- if (count == 0)
- continue;
-
- count += addr & (~CFS_PAGE_MASK);
- addr &= CFS_PAGE_MASK;
-
- down_read(&mm->mmap_sem);
- while ((vma = our_vma(mm, addr, count)) != NULL) {
- struct inode *inode = file_inode(vma->vm_file);
- int flags = CEF_MUST;
-
- if (ll_file_nolock(vma->vm_file)) {
- /*
- * For no lock case, a lockless lock will be
- * generated.
- */
- flags = CEF_NEVER;
- }
-
- /*
- * XXX: Required lock mode can be weakened: CIT_WRITE
- * io only ever reads user level buffer, and CIT_READ
- * only writes on it.
- */
- policy_from_vma(&policy, vma, addr, count);
- descr->cld_mode = vvp_mode_from_vma(vma);
- descr->cld_obj = ll_i2info(inode)->lli_clob;
- descr->cld_start = cl_index(descr->cld_obj,
- policy.l_extent.start);
- descr->cld_end = cl_index(descr->cld_obj,
- policy.l_extent.end);
- descr->cld_enq_flags = flags;
- result = cl_io_lock_alloc_add(env, io, descr);
-
- CDEBUG(D_VFSTRACE, "lock: %d: [%lu, %lu]\n",
- descr->cld_mode, descr->cld_start,
- descr->cld_end);
-
- if (result < 0) {
- up_read(&mm->mmap_sem);
- return result;
- }
-
- if (vma->vm_end - addr >= count)
- break;
-
- count -= vma->vm_end - addr;
- addr = vma->vm_end;
- }
- up_read(&mm->mmap_sem);
- }
- return 0;
-}
-
-static int vvp_io_rw_lock(const struct lu_env *env, struct cl_io *io,
- enum cl_lock_mode mode, loff_t start, loff_t end)
-{
- struct ccc_io *cio = ccc_env_io(env);
- int result;
- int ast_flags = 0;
-
- LASSERT(io->ci_type == CIT_READ || io->ci_type == CIT_WRITE);
-
- ccc_io_update_iov(env, cio, io);
-
- if (io->u.ci_rw.crw_nonblock)
- ast_flags |= CEF_NONBLOCK;
- result = vvp_mmap_locks(env, cio, io);
- if (result == 0)
- result = ccc_io_one_lock(env, io, ast_flags, mode, start, end);
- return result;
-}
-
-static int vvp_io_read_lock(const struct lu_env *env,
- const struct cl_io_slice *ios)
-{
- struct cl_io *io = ios->cis_io;
- struct cl_io_rw_common *rd = &io->u.ci_rd.rd;
- int result;
-
- result = vvp_io_rw_lock(env, io, CLM_READ, rd->crw_pos,
- rd->crw_pos + rd->crw_count - 1);
-
- return result;
-}
-
-static int vvp_io_fault_lock(const struct lu_env *env,
- const struct cl_io_slice *ios)
-{
- struct cl_io *io = ios->cis_io;
- struct vvp_io *vio = cl2vvp_io(env, ios);
- /*
- * XXX LDLM_FL_CBPENDING
- */
- return ccc_io_one_lock_index
- (env, io, 0, vvp_mode_from_vma(vio->u.fault.ft_vma),
- io->u.ci_fault.ft_index, io->u.ci_fault.ft_index);
-}
-
-static int vvp_io_write_lock(const struct lu_env *env,
- const struct cl_io_slice *ios)
-{
- struct cl_io *io = ios->cis_io;
- loff_t start;
- loff_t end;
-
- if (io->u.ci_wr.wr_append) {
- start = 0;
- end = OBD_OBJECT_EOF;
- } else {
- start = io->u.ci_wr.wr.crw_pos;
- end = start + io->u.ci_wr.wr.crw_count - 1;
- }
- return vvp_io_rw_lock(env, io, CLM_WRITE, start, end);
-}
-
-static int vvp_io_setattr_iter_init(const struct lu_env *env,
- const struct cl_io_slice *ios)
-{
- return 0;
-}
-
-/**
- * Implementation of cl_io_operations::cio_lock() method for CIT_SETATTR io.
- *
- * Handles "lockless io" mode when extent locking is done by server.
- */
-static int vvp_io_setattr_lock(const struct lu_env *env,
- const struct cl_io_slice *ios)
-{
- struct ccc_io *cio = ccc_env_io(env);
- struct cl_io *io = ios->cis_io;
- __u64 new_size;
- __u32 enqflags = 0;
-
- if (cl_io_is_trunc(io)) {
- new_size = io->u.ci_setattr.sa_attr.lvb_size;
- if (new_size == 0)
- enqflags = CEF_DISCARD_DATA;
- } else {
- if ((io->u.ci_setattr.sa_attr.lvb_mtime >=
- io->u.ci_setattr.sa_attr.lvb_ctime) ||
- (io->u.ci_setattr.sa_attr.lvb_atime >=
- io->u.ci_setattr.sa_attr.lvb_ctime))
- return 0;
- new_size = 0;
- }
- cio->u.setattr.cui_local_lock = SETATTR_EXTENT_LOCK;
- return ccc_io_one_lock(env, io, enqflags, CLM_WRITE,
- new_size, OBD_OBJECT_EOF);
-}
-
-static int vvp_do_vmtruncate(struct inode *inode, size_t size)
-{
- int result;
- /*
- * Only ll_inode_size_lock is taken at this level.
- */
- ll_inode_size_lock(inode);
- result = inode_newsize_ok(inode, size);
- if (result < 0) {
- ll_inode_size_unlock(inode);
- return result;
- }
- truncate_setsize(inode, size);
- ll_inode_size_unlock(inode);
- return result;
-}
-
-static int vvp_io_setattr_trunc(const struct lu_env *env,
- const struct cl_io_slice *ios,
- struct inode *inode, loff_t size)
-{
- inode_dio_wait(inode);
- return 0;
-}
-
-static int vvp_io_setattr_time(const struct lu_env *env,
- const struct cl_io_slice *ios)
-{
- struct cl_io *io = ios->cis_io;
- struct cl_object *obj = io->ci_obj;
- struct cl_attr *attr = ccc_env_thread_attr(env);
- int result;
- unsigned valid = CAT_CTIME;
-
- cl_object_attr_lock(obj);
- attr->cat_ctime = io->u.ci_setattr.sa_attr.lvb_ctime;
- if (io->u.ci_setattr.sa_valid & ATTR_ATIME_SET) {
- attr->cat_atime = io->u.ci_setattr.sa_attr.lvb_atime;
- valid |= CAT_ATIME;
- }
- if (io->u.ci_setattr.sa_valid & ATTR_MTIME_SET) {
- attr->cat_mtime = io->u.ci_setattr.sa_attr.lvb_mtime;
- valid |= CAT_MTIME;
- }
- result = cl_object_attr_set(env, obj, attr, valid);
- cl_object_attr_unlock(obj);
-
- return result;
-}
-
-static int vvp_io_setattr_start(const struct lu_env *env,
- const struct cl_io_slice *ios)
-{
- struct cl_io *io = ios->cis_io;
- struct inode *inode = ccc_object_inode(io->ci_obj);
- int result = 0;
-
- mutex_lock(&inode->i_mutex);
- if (cl_io_is_trunc(io))
- result = vvp_io_setattr_trunc(env, ios, inode,
- io->u.ci_setattr.sa_attr.lvb_size);
- if (result == 0)
- result = vvp_io_setattr_time(env, ios);
- return result;
-}
-
-static void vvp_io_setattr_end(const struct lu_env *env,
- const struct cl_io_slice *ios)
-{
- struct cl_io *io = ios->cis_io;
- struct inode *inode = ccc_object_inode(io->ci_obj);
-
- if (cl_io_is_trunc(io))
- /* Truncate in memory pages - they must be clean pages
- * because osc has already notified to destroy osc_extents. */
- vvp_do_vmtruncate(inode, io->u.ci_setattr.sa_attr.lvb_size);
-
- mutex_unlock(&inode->i_mutex);
-}
-
-static void vvp_io_setattr_fini(const struct lu_env *env,
- const struct cl_io_slice *ios)
-{
- vvp_io_fini(env, ios);
-}
-
-static int vvp_io_read_start(const struct lu_env *env,
- const struct cl_io_slice *ios)
-{
- struct vvp_io *vio = cl2vvp_io(env, ios);
- struct ccc_io *cio = cl2ccc_io(env, ios);
- struct cl_io *io = ios->cis_io;
- struct cl_object *obj = io->ci_obj;
- struct inode *inode = ccc_object_inode(obj);
- struct ll_ra_read *bead = &vio->cui_bead;
- struct file *file = cio->cui_fd->fd_file;
-
- int result;
- loff_t pos = io->u.ci_rd.rd.crw_pos;
- long cnt = io->u.ci_rd.rd.crw_count;
- long tot = cio->cui_tot_count;
- int exceed = 0;
-
- CLOBINVRNT(env, obj, ccc_object_invariant(obj));
-
- CDEBUG(D_VFSTRACE, "read: -> [%lli, %lli)\n", pos, pos + cnt);
-
- if (!can_populate_pages(env, io, inode))
- return 0;
-
- result = ccc_prep_size(env, obj, io, pos, tot, &exceed);
- if (result != 0)
- return result;
- else if (exceed != 0)
- goto out;
-
- LU_OBJECT_HEADER(D_INODE, env, &obj->co_lu,
- "Read ino %lu, %lu bytes, offset %lld, size %llu\n",
- inode->i_ino, cnt, pos, i_size_read(inode));
-
- /* turn off the kernel's read-ahead */
- cio->cui_fd->fd_file->f_ra.ra_pages = 0;
-
- /* initialize read-ahead window once per syscall */
- if (!vio->cui_ra_window_set) {
- vio->cui_ra_window_set = 1;
- bead->lrr_start = cl_index(obj, pos);
- /*
- * XXX: explicit PAGE_CACHE_SIZE
- */
- bead->lrr_count = cl_index(obj, tot + PAGE_CACHE_SIZE - 1);
- ll_ra_read_in(file, bead);
- }
-
- /* BUG: 5972 */
- file_accessed(file);
- switch (vio->cui_io_subtype) {
- case IO_NORMAL:
- LASSERT(cio->cui_iocb->ki_pos == pos);
- result = generic_file_read_iter(cio->cui_iocb, cio->cui_iter);
- break;
- case IO_SPLICE:
- result = generic_file_splice_read(file, &pos,
- vio->u.splice.cui_pipe, cnt,
- vio->u.splice.cui_flags);
- /* LU-1109: do splice read stripe by stripe otherwise if it
- * may make nfsd stuck if this read occupied all internal pipe
- * buffers. */
- io->ci_continue = 0;
- break;
- default:
- CERROR("Wrong IO type %u\n", vio->cui_io_subtype);
- LBUG();
- }
-
-out:
- if (result >= 0) {
- if (result < cnt)
- io->ci_continue = 0;
- io->ci_nob += result;
- ll_rw_stats_tally(ll_i2sbi(inode), current->pid,
- cio->cui_fd, pos, result, READ);
- result = 0;
- }
- return result;
-}
-
-static void vvp_io_read_fini(const struct lu_env *env, const struct cl_io_slice *ios)
-{
- struct vvp_io *vio = cl2vvp_io(env, ios);
- struct ccc_io *cio = cl2ccc_io(env, ios);
-
- if (vio->cui_ra_window_set)
- ll_ra_read_ex(cio->cui_fd->fd_file, &vio->cui_bead);
-
- vvp_io_fini(env, ios);
-}
-
-static int vvp_io_write_start(const struct lu_env *env,
- const struct cl_io_slice *ios)
-{
- struct ccc_io *cio = cl2ccc_io(env, ios);
- struct cl_io *io = ios->cis_io;
- struct cl_object *obj = io->ci_obj;
- struct inode *inode = ccc_object_inode(obj);
- ssize_t result = 0;
- loff_t pos = io->u.ci_wr.wr.crw_pos;
- size_t cnt = io->u.ci_wr.wr.crw_count;
-
- if (!can_populate_pages(env, io, inode))
- return 0;
-
- if (cl_io_is_append(io)) {
- /*
- * PARALLEL IO This has to be changed for parallel IO doing
- * out-of-order writes.
- */
- pos = io->u.ci_wr.wr.crw_pos = i_size_read(inode);
- cio->cui_iocb->ki_pos = pos;
- } else {
- LASSERT(cio->cui_iocb->ki_pos == pos);
- }
-
- CDEBUG(D_VFSTRACE, "write: [%lli, %lli)\n", pos, pos + (long long)cnt);
-
- if (cio->cui_iter == NULL) /* from a temp io in ll_cl_init(). */
- result = 0;
- else
- result = generic_file_write_iter(cio->cui_iocb, cio->cui_iter);
-
- if (result > 0) {
- if (result < cnt)
- io->ci_continue = 0;
- io->ci_nob += result;
- ll_rw_stats_tally(ll_i2sbi(inode), current->pid,
- cio->cui_fd, pos, result, WRITE);
- result = 0;
- }
- return result;
-}
-
-static int vvp_io_kernel_fault(struct vvp_fault_io *cfio)
-{
- struct vm_fault *vmf = cfio->fault.ft_vmf;
-
- cfio->fault.ft_flags = filemap_fault(cfio->ft_vma, vmf);
- cfio->fault.ft_flags_valid = 1;
-
- if (vmf->page) {
- CDEBUG(D_PAGE,
- "page %p map %p index %lu flags %lx count %u priv %0lx: got addr %p type NOPAGE\n",
- vmf->page, vmf->page->mapping, vmf->page->index,
- (long)vmf->page->flags, page_count(vmf->page),
- page_private(vmf->page), vmf->virtual_address);
- if (unlikely(!(cfio->fault.ft_flags & VM_FAULT_LOCKED))) {
- lock_page(vmf->page);
- cfio->fault.ft_flags |= VM_FAULT_LOCKED;
- }
-
- cfio->ft_vmpage = vmf->page;
- return 0;
- }
-
- if (cfio->fault.ft_flags & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV)) {
- CDEBUG(D_PAGE, "got addr %p - SIGBUS\n", vmf->virtual_address);
- return -EFAULT;
- }
-
- if (cfio->fault.ft_flags & VM_FAULT_OOM) {
- CDEBUG(D_PAGE, "got addr %p - OOM\n", vmf->virtual_address);
- return -ENOMEM;
- }
-
- if (cfio->fault.ft_flags & VM_FAULT_RETRY)
- return -EAGAIN;
-
- CERROR("Unknown error in page fault %d!\n", cfio->fault.ft_flags);
- return -EINVAL;
-}
-
-
-static int vvp_io_fault_start(const struct lu_env *env,
- const struct cl_io_slice *ios)
-{
- struct vvp_io *vio = cl2vvp_io(env, ios);
- struct cl_io *io = ios->cis_io;
- struct cl_object *obj = io->ci_obj;
- struct inode *inode = ccc_object_inode(obj);
- struct cl_fault_io *fio = &io->u.ci_fault;
- struct vvp_fault_io *cfio = &vio->u.fault;
- loff_t offset;
- int result = 0;
- struct page *vmpage = NULL;
- struct cl_page *page;
- loff_t size;
- pgoff_t last; /* last page in a file data region */
-
- if (fio->ft_executable &&
- inode->i_mtime.tv_sec != vio->u.fault.ft_mtime)
- CWARN("binary "DFID
- " changed while waiting for the page fault lock\n",
- PFID(lu_object_fid(&obj->co_lu)));
-
- /* offset of the last byte on the page */
- offset = cl_offset(obj, fio->ft_index + 1) - 1;
- LASSERT(cl_index(obj, offset) == fio->ft_index);
- result = ccc_prep_size(env, obj, io, 0, offset + 1, NULL);
- if (result != 0)
- return result;
-
- /* must return locked page */
- if (fio->ft_mkwrite) {
- LASSERT(cfio->ft_vmpage != NULL);
- lock_page(cfio->ft_vmpage);
- } else {
- result = vvp_io_kernel_fault(cfio);
- if (result != 0)
- return result;
- }
-
- vmpage = cfio->ft_vmpage;
- LASSERT(PageLocked(vmpage));
-
- if (OBD_FAIL_CHECK(OBD_FAIL_LLITE_FAULT_TRUNC_RACE))
- ll_invalidate_page(vmpage);
-
- size = i_size_read(inode);
- /* Though we have already held a cl_lock upon this page, but
- * it still can be truncated locally. */
- if (unlikely((vmpage->mapping != inode->i_mapping) ||
- (page_offset(vmpage) > size))) {
- CDEBUG(D_PAGE, "llite: fault and truncate race happened!\n");
-
- /* return +1 to stop cl_io_loop() and ll_fault() will catch
- * and retry. */
- result = 1;
- goto out;
- }
-
-
- if (fio->ft_mkwrite) {
- pgoff_t last_index;
- /*
- * Capture the size while holding the lli_trunc_sem from above
- * we want to make sure that we complete the mkwrite action
- * while holding this lock. We need to make sure that we are
- * not past the end of the file.
- */
- last_index = cl_index(obj, size - 1);
- if (last_index < fio->ft_index) {
- CDEBUG(D_PAGE,
- "llite: mkwrite and truncate race happened: %p: 0x%lx 0x%lx\n",
- vmpage->mapping, fio->ft_index, last_index);
- /*
- * We need to return if we are
- * passed the end of the file. This will propagate
- * up the call stack to ll_page_mkwrite where
- * we will return VM_FAULT_NOPAGE. Any non-negative
- * value returned here will be silently
- * converted to 0. If the vmpage->mapping is null
- * the error code would be converted back to ENODATA
- * in ll_page_mkwrite0. Thus we return -ENODATA
- * to handle both cases
- */
- result = -ENODATA;
- goto out;
- }
- }
-
- page = cl_page_find(env, obj, fio->ft_index, vmpage, CPT_CACHEABLE);
- if (IS_ERR(page)) {
- result = PTR_ERR(page);
- goto out;
- }
-
- /* if page is going to be written, we should add this page into cache
- * earlier. */
- if (fio->ft_mkwrite) {
- wait_on_page_writeback(vmpage);
- if (set_page_dirty(vmpage)) {
- struct ccc_page *cp;
-
- /* vvp_page_assume() calls wait_on_page_writeback(). */
- cl_page_assume(env, io, page);
-
- cp = cl2ccc_page(cl_page_at(page, &vvp_device_type));
- vvp_write_pending(cl2ccc(obj), cp);
-
- /* Do not set Dirty bit here so that in case IO is
- * started before the page is really made dirty, we
- * still have chance to detect it. */
- result = cl_page_cache_add(env, io, page, CRT_WRITE);
- LASSERT(cl_page_is_owned(page, io));
-
- vmpage = NULL;
- if (result < 0) {
- cl_page_unmap(env, io, page);
- cl_page_discard(env, io, page);
- cl_page_disown(env, io, page);
-
- cl_page_put(env, page);
-
- /* we're in big trouble, what can we do now? */
- if (result == -EDQUOT)
- result = -ENOSPC;
- goto out;
- } else
- cl_page_disown(env, io, page);
- }
- }
-
- last = cl_index(obj, size - 1);
- /*
- * The ft_index is only used in the case of
- * a mkwrite action. We need to check
- * our assertions are correct, since
- * we should have caught this above
- */
- LASSERT(!fio->ft_mkwrite || fio->ft_index <= last);
- if (fio->ft_index == last)
- /*
- * Last page is mapped partially.
- */
- fio->ft_nob = size - cl_offset(obj, fio->ft_index);
- else
- fio->ft_nob = cl_page_size(obj);
-
- lu_ref_add(&page->cp_reference, "fault", io);
- fio->ft_page = page;
-
-out:
- /* return unlocked vmpage to avoid deadlocking */
- if (vmpage != NULL)
- unlock_page(vmpage);
- cfio->fault.ft_flags &= ~VM_FAULT_LOCKED;
- return result;
-}
-
-static int vvp_io_fsync_start(const struct lu_env *env,
- const struct cl_io_slice *ios)
-{
- /* we should mark TOWRITE bit to each dirty page in radix tree to
- * verify pages have been written, but this is difficult because of
- * race. */
- return 0;
-}
-
-static int vvp_io_read_page(const struct lu_env *env,
- const struct cl_io_slice *ios,
- const struct cl_page_slice *slice)
-{
- struct cl_io *io = ios->cis_io;
- struct cl_object *obj = slice->cpl_obj;
- struct ccc_page *cp = cl2ccc_page(slice);
- struct cl_page *page = slice->cpl_page;
- struct inode *inode = ccc_object_inode(obj);
- struct ll_sb_info *sbi = ll_i2sbi(inode);
- struct ll_file_data *fd = cl2ccc_io(env, ios)->cui_fd;
- struct ll_readahead_state *ras = &fd->fd_ras;
- struct page *vmpage = cp->cpg_page;
- struct cl_2queue *queue = &io->ci_queue;
- int rc;
-
- CLOBINVRNT(env, obj, ccc_object_invariant(obj));
- LASSERT(slice->cpl_obj == obj);
-
- if (sbi->ll_ra_info.ra_max_pages_per_file &&
- sbi->ll_ra_info.ra_max_pages)
- ras_update(sbi, inode, ras, page->cp_index,
- cp->cpg_defer_uptodate);
-
- /* Sanity check whether the page is protected by a lock. */
- rc = cl_page_is_under_lock(env, io, page);
- if (rc != -EBUSY) {
- CL_PAGE_HEADER(D_WARNING, env, page, "%s: %d\n",
- rc == -ENODATA ? "without a lock" :
- "match failed", rc);
- if (rc != -ENODATA)
- return rc;
- }
-
- if (cp->cpg_defer_uptodate) {
- cp->cpg_ra_used = 1;
- cl_page_export(env, page, 1);
- }
- /*
- * Add page into the queue even when it is marked uptodate above.
- * this will unlock it automatically as part of cl_page_list_disown().
- */
- cl_2queue_add(queue, page);
- if (sbi->ll_ra_info.ra_max_pages_per_file &&
- sbi->ll_ra_info.ra_max_pages)
- ll_readahead(env, io, ras,
- vmpage->mapping, &queue->c2_qin, fd->fd_flags);
-
- return 0;
-}
-
-static int vvp_page_sync_io(const struct lu_env *env, struct cl_io *io,
- struct cl_page *page, struct ccc_page *cp,
- enum cl_req_type crt)
-{
- struct cl_2queue *queue;
- int result;
-
- LASSERT(io->ci_type == CIT_READ || io->ci_type == CIT_WRITE);
-
- queue = &io->ci_queue;
- cl_2queue_init_page(queue, page);
-
- result = cl_io_submit_sync(env, io, crt, queue, 0);
- LASSERT(cl_page_is_owned(page, io));
-
- if (crt == CRT_READ)
- /*
- * in CRT_WRITE case page is left locked even in case of
- * error.
- */
- cl_page_list_disown(env, io, &queue->c2_qin);
- cl_2queue_fini(env, queue);
-
- return result;
-}
-
-/**
- * Prepare partially written-to page for a write.
- */
-static int vvp_io_prepare_partial(const struct lu_env *env, struct cl_io *io,
- struct cl_object *obj, struct cl_page *pg,
- struct ccc_page *cp,
- unsigned from, unsigned to)
-{
- struct cl_attr *attr = ccc_env_thread_attr(env);
- loff_t offset = cl_offset(obj, pg->cp_index);
- int result;
-
- cl_object_attr_lock(obj);
- result = cl_object_attr_get(env, obj, attr);
- cl_object_attr_unlock(obj);
- if (result == 0) {
- /*
- * If are writing to a new page, no need to read old data.
- * The extent locking will have updated the KMS, and for our
- * purposes here we can treat it like i_size.
- */
- if (attr->cat_kms <= offset) {
- char *kaddr = kmap_atomic(cp->cpg_page);
-
- memset(kaddr, 0, cl_page_size(obj));
- kunmap_atomic(kaddr);
- } else if (cp->cpg_defer_uptodate)
- cp->cpg_ra_used = 1;
- else
- result = vvp_page_sync_io(env, io, pg, cp, CRT_READ);
- /*
- * In older implementations, obdo_refresh_inode is called here
- * to update the inode because the write might modify the
- * object info at OST. However, this has been proven useless,
- * since LVB functions will be called when user space program
- * tries to retrieve inode attribute. Also, see bug 15909 for
- * details. -jay
- */
- if (result == 0)
- cl_page_export(env, pg, 1);
- }
- return result;
-}
-
-static int vvp_io_prepare_write(const struct lu_env *env,
- const struct cl_io_slice *ios,
- const struct cl_page_slice *slice,
- unsigned from, unsigned to)
-{
- struct cl_object *obj = slice->cpl_obj;
- struct ccc_page *cp = cl2ccc_page(slice);
- struct cl_page *pg = slice->cpl_page;
- struct page *vmpage = cp->cpg_page;
-
- int result;
-
- LINVRNT(cl_page_is_vmlocked(env, pg));
- LASSERT(vmpage->mapping->host == ccc_object_inode(obj));
-
- result = 0;
-
- CL_PAGE_HEADER(D_PAGE, env, pg, "preparing: [%d, %d]\n", from, to);
- if (!PageUptodate(vmpage)) {
- /*
- * We're completely overwriting an existing page, so _don't_
- * set it up to date until commit_write
- */
- if (from == 0 && to == PAGE_CACHE_SIZE) {
- CL_PAGE_HEADER(D_PAGE, env, pg, "full page write\n");
- POISON_PAGE(page, 0x11);
- } else
- result = vvp_io_prepare_partial(env, ios->cis_io, obj,
- pg, cp, from, to);
- } else
- CL_PAGE_HEADER(D_PAGE, env, pg, "uptodate\n");
- return result;
-}
-
-static int vvp_io_commit_write(const struct lu_env *env,
- const struct cl_io_slice *ios,
- const struct cl_page_slice *slice,
- unsigned from, unsigned to)
-{
- struct cl_object *obj = slice->cpl_obj;
- struct cl_io *io = ios->cis_io;
- struct ccc_page *cp = cl2ccc_page(slice);
- struct cl_page *pg = slice->cpl_page;
- struct inode *inode = ccc_object_inode(obj);
- struct ll_sb_info *sbi = ll_i2sbi(inode);
- struct ll_inode_info *lli = ll_i2info(inode);
- struct page *vmpage = cp->cpg_page;
-
- int result;
- int tallyop;
- loff_t size;
-
- LINVRNT(cl_page_is_vmlocked(env, pg));
- LASSERT(vmpage->mapping->host == inode);
-
- LU_OBJECT_HEADER(D_INODE, env, &obj->co_lu, "committing page write\n");
- CL_PAGE_HEADER(D_PAGE, env, pg, "committing: [%d, %d]\n", from, to);
-
- /*
- * queue a write for some time in the future the first time we
- * dirty the page.
- *
- * This is different from what other file systems do: they usually
- * just mark page (and some of its buffers) dirty and rely on
- * balance_dirty_pages() to start a write-back. Lustre wants write-back
- * to be started earlier for the following reasons:
- *
- * (1) with a large number of clients we need to limit the amount
- * of cached data on the clients a lot;
- *
- * (2) large compute jobs generally want compute-only then io-only
- * and the IO should complete as quickly as possible;
- *
- * (3) IO is batched up to the RPC size and is async until the
- * client max cache is hit
- * (/proc/fs/lustre/osc/OSC.../max_dirty_mb)
- *
- */
- if (!PageDirty(vmpage)) {
- tallyop = LPROC_LL_DIRTY_MISSES;
- result = cl_page_cache_add(env, io, pg, CRT_WRITE);
- if (result == 0) {
- /* page was added into cache successfully. */
- set_page_dirty(vmpage);
- vvp_write_pending(cl2ccc(obj), cp);
- } else if (result == -EDQUOT) {
- pgoff_t last_index = i_size_read(inode) >> PAGE_CACHE_SHIFT;
- bool need_clip = true;
-
- /*
- * Client ran out of disk space grant. Possible
- * strategies are:
- *
- * (a) do a sync write, renewing grant;
- *
- * (b) stop writing on this stripe, switch to the
- * next one.
- *
- * (b) is a part of "parallel io" design that is the
- * ultimate goal. (a) is what "old" client did, and
- * what the new code continues to do for the time
- * being.
- */
- if (last_index > pg->cp_index) {
- to = PAGE_CACHE_SIZE;
- need_clip = false;
- } else if (last_index == pg->cp_index) {
- int size_to = i_size_read(inode) & ~CFS_PAGE_MASK;
- if (to < size_to)
- to = size_to;
- }
- if (need_clip)
- cl_page_clip(env, pg, 0, to);
- result = vvp_page_sync_io(env, io, pg, cp, CRT_WRITE);
- if (result)
- CERROR("Write page %lu of inode %p failed %d\n",
- pg->cp_index, inode, result);
- }
- } else {
- tallyop = LPROC_LL_DIRTY_HITS;
- result = 0;
- }
- ll_stats_ops_tally(sbi, tallyop, 1);
-
- /* Inode should be marked DIRTY even if no new page was marked DIRTY
- * because page could have been not flushed between 2 modifications.
- * It is important the file is marked DIRTY as soon as the I/O is done
- * Indeed, when cache is flushed, file could be already closed and it
- * is too late to warn the MDT.
- * It is acceptable that file is marked DIRTY even if I/O is dropped
- * for some reasons before being flushed to OST.
- */
- if (result == 0) {
- spin_lock(&lli->lli_lock);
- lli->lli_flags |= LLIF_DATA_MODIFIED;
- spin_unlock(&lli->lli_lock);
- }
-
- size = cl_offset(obj, pg->cp_index) + to;
-
- ll_inode_size_lock(inode);
- if (result == 0) {
- if (size > i_size_read(inode)) {
- cl_isize_write_nolock(inode, size);
- CDEBUG(D_VFSTRACE, DFID" updating i_size %lu\n",
- PFID(lu_object_fid(&obj->co_lu)),
- (unsigned long)size);
- }
- cl_page_export(env, pg, 1);
- } else {
- if (size > i_size_read(inode))
- cl_page_discard(env, io, pg);
- }
- ll_inode_size_unlock(inode);
- return result;
-}
-
-static const struct cl_io_operations vvp_io_ops = {
- .op = {
- [CIT_READ] = {
- .cio_fini = vvp_io_read_fini,
- .cio_lock = vvp_io_read_lock,
- .cio_start = vvp_io_read_start,
- .cio_advance = ccc_io_advance
- },
- [CIT_WRITE] = {
- .cio_fini = vvp_io_fini,
- .cio_lock = vvp_io_write_lock,
- .cio_start = vvp_io_write_start,
- .cio_advance = ccc_io_advance
- },
- [CIT_SETATTR] = {
- .cio_fini = vvp_io_setattr_fini,
- .cio_iter_init = vvp_io_setattr_iter_init,
- .cio_lock = vvp_io_setattr_lock,
- .cio_start = vvp_io_setattr_start,
- .cio_end = vvp_io_setattr_end
- },
- [CIT_FAULT] = {
- .cio_fini = vvp_io_fault_fini,
- .cio_iter_init = vvp_io_fault_iter_init,
- .cio_lock = vvp_io_fault_lock,
- .cio_start = vvp_io_fault_start,
- .cio_end = ccc_io_end
- },
- [CIT_FSYNC] = {
- .cio_start = vvp_io_fsync_start,
- .cio_fini = vvp_io_fini
- },
- [CIT_MISC] = {
- .cio_fini = vvp_io_fini
- }
- },
- .cio_read_page = vvp_io_read_page,
- .cio_prepare_write = vvp_io_prepare_write,
- .cio_commit_write = vvp_io_commit_write
-};
-
-int vvp_io_init(const struct lu_env *env, struct cl_object *obj,
- struct cl_io *io)
-{
- struct vvp_io *vio = vvp_env_io(env);
- struct ccc_io *cio = ccc_env_io(env);
- struct inode *inode = ccc_object_inode(obj);
- int result;
-
- CLOBINVRNT(env, obj, ccc_object_invariant(obj));
-
- CDEBUG(D_VFSTRACE, DFID
- " ignore/verify layout %d/%d, layout version %d restore needed %d\n",
- PFID(lu_object_fid(&obj->co_lu)),
- io->ci_ignore_layout, io->ci_verify_layout,
- cio->cui_layout_gen, io->ci_restore_needed);
-
- CL_IO_SLICE_CLEAN(cio, cui_cl);
- cl_io_slice_add(io, &cio->cui_cl, obj, &vvp_io_ops);
- vio->cui_ra_window_set = 0;
- result = 0;
- if (io->ci_type == CIT_READ || io->ci_type == CIT_WRITE) {
- size_t count;
- struct ll_inode_info *lli = ll_i2info(inode);
-
- count = io->u.ci_rw.crw_count;
- /* "If nbyte is 0, read() will return 0 and have no other
- * results." -- Single Unix Spec */
- if (count == 0)
- result = 1;
- else
- cio->cui_tot_count = count;
-
- /* for read/write, we store the jobid in the inode, and
- * it'll be fetched by osc when building RPC.
- *
- * it's not accurate if the file is shared by different
- * jobs.
- */
- lustre_get_jobid(lli->lli_jobid);
- } else if (io->ci_type == CIT_SETATTR) {
- if (!cl_io_is_trunc(io))
- io->ci_lockreq = CILR_MANDATORY;
- }
-
- /* ignore layout change for generic CIT_MISC but not for glimpse.
- * io context for glimpse must set ci_verify_layout to true,
- * see cl_glimpse_size0() for details. */
- if (io->ci_type == CIT_MISC && !io->ci_verify_layout)
- io->ci_ignore_layout = 1;
-
- /* Enqueue layout lock and get layout version. We need to do this
- * even for operations requiring to open file, such as read and write,
- * because it might not grant layout lock in IT_OPEN. */
- if (result == 0 && !io->ci_ignore_layout) {
- result = ll_layout_refresh(inode, &cio->cui_layout_gen);
- if (result == -ENOENT)
- /* If the inode on MDS has been removed, but the objects
- * on OSTs haven't been destroyed (async unlink), layout
- * fetch will return -ENOENT, we'd ignore this error
- * and continue with dirty flush. LU-3230. */
- result = 0;
- if (result < 0)
- CERROR("%s: refresh file layout " DFID " error %d.\n",
- ll_get_fsname(inode->i_sb, NULL, 0),
- PFID(lu_object_fid(&obj->co_lu)), result);
- }
-
- return result;
-}
-
-static struct vvp_io *cl2vvp_io(const struct lu_env *env,
- const struct cl_io_slice *slice)
-{
- /* Calling just for assertion */
- cl2ccc_io(env, slice);
- return vvp_env_io(env);
-}
diff --git a/drivers/staging/lustre/lustre/llite/vvp_lock.c b/drivers/staging/lustre/lustre/llite/vvp_lock.c
deleted file mode 100644
index f354e82d4ae7..000000000000
--- a/drivers/staging/lustre/lustre/llite/vvp_lock.c
+++ /dev/null
@@ -1,85 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * Implementation of cl_lock for VVP layer.
- *
- * Author: Nikita Danilov <nikita.danilov@sun.com>
- */
-
-#define DEBUG_SUBSYSTEM S_LLITE
-
-
-#include "../include/obd.h"
-#include "../include/lustre_lite.h"
-
-#include "vvp_internal.h"
-
-/*****************************************************************************
- *
- * Vvp lock functions.
- *
- */
-
-/**
- * Estimates lock value for the purpose of managing the lock cache during
- * memory shortages.
- *
- * Locks for memory mapped files are almost infinitely precious, others are
- * junk. "Mapped locks" are heavy, but not infinitely heavy, so that they are
- * ordered within themselves by weights assigned from other layers.
- */
-static unsigned long vvp_lock_weigh(const struct lu_env *env,
- const struct cl_lock_slice *slice)
-{
- struct ccc_object *cob = cl2ccc(slice->cls_obj);
-
- return atomic_read(&cob->cob_mmap_cnt) > 0 ? ~0UL >> 2 : 0;
-}
-
-static const struct cl_lock_operations vvp_lock_ops = {
- .clo_delete = ccc_lock_delete,
- .clo_fini = ccc_lock_fini,
- .clo_enqueue = ccc_lock_enqueue,
- .clo_wait = ccc_lock_wait,
- .clo_use = ccc_lock_use,
- .clo_unuse = ccc_lock_unuse,
- .clo_fits_into = ccc_lock_fits_into,
- .clo_state = ccc_lock_state,
- .clo_weigh = vvp_lock_weigh
-};
-
-int vvp_lock_init(const struct lu_env *env, struct cl_object *obj,
- struct cl_lock *lock, const struct cl_io *io)
-{
- return ccc_lock_init(env, obj, lock, io, &vvp_lock_ops);
-}
diff --git a/drivers/staging/lustre/lustre/llite/vvp_object.c b/drivers/staging/lustre/lustre/llite/vvp_object.c
deleted file mode 100644
index cb2811b9aae7..000000000000
--- a/drivers/staging/lustre/lustre/llite/vvp_object.c
+++ /dev/null
@@ -1,201 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * cl_object implementation for VVP layer.
- *
- * Author: Nikita Danilov <nikita.danilov@sun.com>
- */
-
-#define DEBUG_SUBSYSTEM S_LLITE
-
-
-#include "../../include/linux/libcfs/libcfs.h"
-
-#include "../include/obd.h"
-#include "../include/lustre_lite.h"
-
-#include "vvp_internal.h"
-
-/*****************************************************************************
- *
- * Object operations.
- *
- */
-
-static int vvp_object_print(const struct lu_env *env, void *cookie,
- lu_printer_t p, const struct lu_object *o)
-{
- struct ccc_object *obj = lu2ccc(o);
- struct inode *inode = obj->cob_inode;
- struct ll_inode_info *lli;
-
- (*p)(env, cookie, "(%s %d %d) inode: %p ",
- list_empty(&obj->cob_pending_list) ? "-" : "+",
- obj->cob_transient_pages, atomic_read(&obj->cob_mmap_cnt),
- inode);
- if (inode) {
- lli = ll_i2info(inode);
- (*p)(env, cookie, "%lu/%u %o %u %d %p "DFID,
- inode->i_ino, inode->i_generation, inode->i_mode,
- inode->i_nlink, atomic_read(&inode->i_count),
- lli->lli_clob, PFID(&lli->lli_fid));
- }
- return 0;
-}
-
-static int vvp_attr_get(const struct lu_env *env, struct cl_object *obj,
- struct cl_attr *attr)
-{
- struct inode *inode = ccc_object_inode(obj);
-
- /*
- * lov overwrites most of these fields in
- * lov_attr_get()->...lov_merge_lvb_kms(), except when inode
- * attributes are newer.
- */
-
- attr->cat_size = i_size_read(inode);
- attr->cat_mtime = inode->i_mtime.tv_sec;
- attr->cat_atime = inode->i_atime.tv_sec;
- attr->cat_ctime = inode->i_ctime.tv_sec;
- attr->cat_blocks = inode->i_blocks;
- attr->cat_uid = from_kuid(&init_user_ns, inode->i_uid);
- attr->cat_gid = from_kgid(&init_user_ns, inode->i_gid);
- /* KMS is not known by this layer */
- return 0; /* layers below have to fill in the rest */
-}
-
-static int vvp_attr_set(const struct lu_env *env, struct cl_object *obj,
- const struct cl_attr *attr, unsigned valid)
-{
- struct inode *inode = ccc_object_inode(obj);
-
- if (valid & CAT_UID)
- inode->i_uid = make_kuid(&init_user_ns, attr->cat_uid);
- if (valid & CAT_GID)
- inode->i_gid = make_kgid(&init_user_ns, attr->cat_gid);
- if (valid & CAT_ATIME)
- inode->i_atime.tv_sec = attr->cat_atime;
- if (valid & CAT_MTIME)
- inode->i_mtime.tv_sec = attr->cat_mtime;
- if (valid & CAT_CTIME)
- inode->i_ctime.tv_sec = attr->cat_ctime;
- if (0 && valid & CAT_SIZE)
- cl_isize_write_nolock(inode, attr->cat_size);
- /* not currently necessary */
- if (0 && valid & (CAT_UID|CAT_GID|CAT_SIZE))
- mark_inode_dirty(inode);
- return 0;
-}
-
-static int vvp_conf_set(const struct lu_env *env, struct cl_object *obj,
- const struct cl_object_conf *conf)
-{
- struct ll_inode_info *lli = ll_i2info(conf->coc_inode);
-
- if (conf->coc_opc == OBJECT_CONF_INVALIDATE) {
- CDEBUG(D_VFSTRACE, DFID ": losing layout lock\n",
- PFID(&lli->lli_fid));
-
- ll_layout_version_set(lli, LL_LAYOUT_GEN_NONE);
-
- /* Clean up page mmap for this inode.
- * The reason for us to do this is that if the page has
- * already been installed into memory space, the process
- * can access it without interacting with lustre, so this
- * page may be stale due to layout change, and the process
- * will never be notified.
- * This operation is expensive but mmap processes have to pay
- * a price themselves. */
- unmap_mapping_range(conf->coc_inode->i_mapping,
- 0, OBD_OBJECT_EOF, 0);
-
- return 0;
- }
-
- if (conf->coc_opc != OBJECT_CONF_SET)
- return 0;
-
- if (conf->u.coc_md != NULL && conf->u.coc_md->lsm != NULL) {
- CDEBUG(D_VFSTRACE, DFID ": layout version change: %u -> %u\n",
- PFID(&lli->lli_fid), lli->lli_layout_gen,
- conf->u.coc_md->lsm->lsm_layout_gen);
-
- lli->lli_has_smd = lsm_has_objects(conf->u.coc_md->lsm);
- ll_layout_version_set(lli, conf->u.coc_md->lsm->lsm_layout_gen);
- } else {
- CDEBUG(D_VFSTRACE, DFID ": layout nuked: %u.\n",
- PFID(&lli->lli_fid), lli->lli_layout_gen);
-
- lli->lli_has_smd = false;
- ll_layout_version_set(lli, LL_LAYOUT_GEN_EMPTY);
- }
- return 0;
-}
-
-static const struct cl_object_operations vvp_ops = {
- .coo_page_init = vvp_page_init,
- .coo_lock_init = vvp_lock_init,
- .coo_io_init = vvp_io_init,
- .coo_attr_get = vvp_attr_get,
- .coo_attr_set = vvp_attr_set,
- .coo_conf_set = vvp_conf_set,
- .coo_glimpse = ccc_object_glimpse
-};
-
-static const struct lu_object_operations vvp_lu_obj_ops = {
- .loo_object_init = ccc_object_init,
- .loo_object_free = ccc_object_free,
- .loo_object_print = vvp_object_print
-};
-
-struct ccc_object *cl_inode2ccc(struct inode *inode)
-{
- struct cl_inode_info *lli = cl_i2info(inode);
- struct cl_object *obj = lli->lli_clob;
- struct lu_object *lu;
-
- LASSERT(obj != NULL);
- lu = lu_object_locate(obj->co_lu.lo_header, &vvp_device_type);
- LASSERT(lu != NULL);
- return lu2ccc(lu);
-}
-
-struct lu_object *vvp_object_alloc(const struct lu_env *env,
- const struct lu_object_header *hdr,
- struct lu_device *dev)
-{
- return ccc_object_alloc(env, hdr, dev, &vvp_ops, &vvp_lu_obj_ops);
-}
diff --git a/drivers/staging/lustre/lustre/llite/vvp_page.c b/drivers/staging/lustre/lustre/llite/vvp_page.c
deleted file mode 100644
index a3cf5ad20c60..000000000000
--- a/drivers/staging/lustre/lustre/llite/vvp_page.c
+++ /dev/null
@@ -1,558 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * Implementation of cl_page for VVP layer.
- *
- * Author: Nikita Danilov <nikita.danilov@sun.com>
- * Author: Jinshan Xiong <jinshan.xiong@whamcloud.com>
- */
-
-#define DEBUG_SUBSYSTEM S_LLITE
-
-
-#include "../include/obd.h"
-#include "../include/lustre_lite.h"
-
-#include "vvp_internal.h"
-
-/*****************************************************************************
- *
- * Page operations.
- *
- */
-
-static void vvp_page_fini_common(struct ccc_page *cp)
-{
- struct page *vmpage = cp->cpg_page;
-
- LASSERT(vmpage != NULL);
- page_cache_release(vmpage);
-}
-
-static void vvp_page_fini(const struct lu_env *env,
- struct cl_page_slice *slice)
-{
- struct ccc_page *cp = cl2ccc_page(slice);
- struct page *vmpage = cp->cpg_page;
-
- /*
- * vmpage->private was already cleared when page was moved into
- * VPG_FREEING state.
- */
- LASSERT((struct cl_page *)vmpage->private != slice->cpl_page);
- vvp_page_fini_common(cp);
-}
-
-static int vvp_page_own(const struct lu_env *env,
- const struct cl_page_slice *slice, struct cl_io *io,
- int nonblock)
-{
- struct ccc_page *vpg = cl2ccc_page(slice);
- struct page *vmpage = vpg->cpg_page;
-
- LASSERT(vmpage != NULL);
- if (nonblock) {
- if (!trylock_page(vmpage))
- return -EAGAIN;
-
- if (unlikely(PageWriteback(vmpage))) {
- unlock_page(vmpage);
- return -EAGAIN;
- }
-
- return 0;
- }
-
- lock_page(vmpage);
- wait_on_page_writeback(vmpage);
- return 0;
-}
-
-static void vvp_page_assume(const struct lu_env *env,
- const struct cl_page_slice *slice,
- struct cl_io *unused)
-{
- struct page *vmpage = cl2vm_page(slice);
-
- LASSERT(vmpage != NULL);
- LASSERT(PageLocked(vmpage));
- wait_on_page_writeback(vmpage);
-}
-
-static void vvp_page_unassume(const struct lu_env *env,
- const struct cl_page_slice *slice,
- struct cl_io *unused)
-{
- struct page *vmpage = cl2vm_page(slice);
-
- LASSERT(vmpage != NULL);
- LASSERT(PageLocked(vmpage));
-}
-
-static void vvp_page_disown(const struct lu_env *env,
- const struct cl_page_slice *slice, struct cl_io *io)
-{
- struct page *vmpage = cl2vm_page(slice);
-
- LASSERT(vmpage != NULL);
- LASSERT(PageLocked(vmpage));
-
- unlock_page(cl2vm_page(slice));
-}
-
-static void vvp_page_discard(const struct lu_env *env,
- const struct cl_page_slice *slice,
- struct cl_io *unused)
-{
- struct page *vmpage = cl2vm_page(slice);
- struct address_space *mapping;
- struct ccc_page *cpg = cl2ccc_page(slice);
-
- LASSERT(vmpage != NULL);
- LASSERT(PageLocked(vmpage));
-
- mapping = vmpage->mapping;
-
- if (cpg->cpg_defer_uptodate && !cpg->cpg_ra_used)
- ll_ra_stats_inc(mapping, RA_STAT_DISCARDED);
-
- /*
- * truncate_complete_page() calls
- * a_ops->invalidatepage()->cl_page_delete()->vvp_page_delete().
- */
- truncate_complete_page(mapping, vmpage);
-}
-
-static int vvp_page_unmap(const struct lu_env *env,
- const struct cl_page_slice *slice,
- struct cl_io *unused)
-{
- struct page *vmpage = cl2vm_page(slice);
- __u64 offset;
-
- LASSERT(vmpage != NULL);
- LASSERT(PageLocked(vmpage));
-
- offset = vmpage->index << PAGE_CACHE_SHIFT;
-
- /*
- * XXX is it safe to call this with the page lock held?
- */
- ll_teardown_mmaps(vmpage->mapping, offset, offset + PAGE_CACHE_SIZE);
- return 0;
-}
-
-static void vvp_page_delete(const struct lu_env *env,
- const struct cl_page_slice *slice)
-{
- struct page *vmpage = cl2vm_page(slice);
- struct inode *inode = vmpage->mapping->host;
- struct cl_object *obj = slice->cpl_obj;
-
- LASSERT(PageLocked(vmpage));
- LASSERT((struct cl_page *)vmpage->private == slice->cpl_page);
- LASSERT(inode == ccc_object_inode(obj));
-
- vvp_write_complete(cl2ccc(obj), cl2ccc_page(slice));
- ClearPagePrivate(vmpage);
- vmpage->private = 0;
- /*
- * Reference from vmpage to cl_page is removed, but the reference back
- * is still here. It is removed later in vvp_page_fini().
- */
-}
-
-static void vvp_page_export(const struct lu_env *env,
- const struct cl_page_slice *slice,
- int uptodate)
-{
- struct page *vmpage = cl2vm_page(slice);
-
- LASSERT(vmpage != NULL);
- LASSERT(PageLocked(vmpage));
- if (uptodate)
- SetPageUptodate(vmpage);
- else
- ClearPageUptodate(vmpage);
-}
-
-static int vvp_page_is_vmlocked(const struct lu_env *env,
- const struct cl_page_slice *slice)
-{
- return PageLocked(cl2vm_page(slice)) ? -EBUSY : -ENODATA;
-}
-
-static int vvp_page_prep_read(const struct lu_env *env,
- const struct cl_page_slice *slice,
- struct cl_io *unused)
-{
- /* Skip the page already marked as PG_uptodate. */
- return PageUptodate(cl2vm_page(slice)) ? -EALREADY : 0;
-}
-
-static int vvp_page_prep_write(const struct lu_env *env,
- const struct cl_page_slice *slice,
- struct cl_io *unused)
-{
- struct page *vmpage = cl2vm_page(slice);
- struct cl_page *pg = slice->cpl_page;
-
- LASSERT(PageLocked(vmpage));
- LASSERT(!PageDirty(vmpage));
-
- /* ll_writepage path is not a sync write, so need to set page writeback
- * flag */
- if (!pg->cp_sync_io)
- set_page_writeback(vmpage);
-
- vvp_write_pending(cl2ccc(slice->cpl_obj), cl2ccc_page(slice));
-
- return 0;
-}
-
-/**
- * Handles page transfer errors at VM level.
- *
- * This takes inode as a separate argument, because inode on which error is to
- * be set can be different from \a vmpage inode in case of direct-io.
- */
-static void vvp_vmpage_error(struct inode *inode, struct page *vmpage, int ioret)
-{
- struct ccc_object *obj = cl_inode2ccc(inode);
-
- if (ioret == 0) {
- ClearPageError(vmpage);
- obj->cob_discard_page_warned = 0;
- } else {
- SetPageError(vmpage);
- if (ioret == -ENOSPC)
- set_bit(AS_ENOSPC, &inode->i_mapping->flags);
- else
- set_bit(AS_EIO, &inode->i_mapping->flags);
-
- if ((ioret == -ESHUTDOWN || ioret == -EINTR) &&
- obj->cob_discard_page_warned == 0) {
- obj->cob_discard_page_warned = 1;
- ll_dirty_page_discard_warn(vmpage, ioret);
- }
- }
-}
-
-static void vvp_page_completion_read(const struct lu_env *env,
- const struct cl_page_slice *slice,
- int ioret)
-{
- struct ccc_page *cp = cl2ccc_page(slice);
- struct page *vmpage = cp->cpg_page;
- struct cl_page *page = cl_page_top(slice->cpl_page);
- struct inode *inode = ccc_object_inode(page->cp_obj);
-
- LASSERT(PageLocked(vmpage));
- CL_PAGE_HEADER(D_PAGE, env, page, "completing READ with %d\n", ioret);
-
- if (cp->cpg_defer_uptodate)
- ll_ra_count_put(ll_i2sbi(inode), 1);
-
- if (ioret == 0) {
- if (!cp->cpg_defer_uptodate)
- cl_page_export(env, page, 1);
- } else
- cp->cpg_defer_uptodate = 0;
-
- if (page->cp_sync_io == NULL)
- unlock_page(vmpage);
-}
-
-static void vvp_page_completion_write(const struct lu_env *env,
- const struct cl_page_slice *slice,
- int ioret)
-{
- struct ccc_page *cp = cl2ccc_page(slice);
- struct cl_page *pg = slice->cpl_page;
- struct page *vmpage = cp->cpg_page;
-
- CL_PAGE_HEADER(D_PAGE, env, pg, "completing WRITE with %d\n", ioret);
-
- /*
- * TODO: Actually it makes sense to add the page into oap pending
- * list again and so that we don't need to take the page out from
- * SoM write pending list, if we just meet a recoverable error,
- * -ENOMEM, etc.
- * To implement this, we just need to return a non zero value in
- * ->cpo_completion method. The underlying transfer should be notified
- * and then re-add the page into pending transfer queue. -jay
- */
-
- cp->cpg_write_queued = 0;
- vvp_write_complete(cl2ccc(slice->cpl_obj), cp);
-
- if (pg->cp_sync_io != NULL) {
- LASSERT(PageLocked(vmpage));
- LASSERT(!PageWriteback(vmpage));
- } else {
- LASSERT(PageWriteback(vmpage));
- /*
- * Only mark the page error only when it's an async write
- * because applications won't wait for IO to finish.
- */
- vvp_vmpage_error(ccc_object_inode(pg->cp_obj), vmpage, ioret);
-
- end_page_writeback(vmpage);
- }
-}
-
-/**
- * Implements cl_page_operations::cpo_make_ready() method.
- *
- * This is called to yank a page from the transfer cache and to send it out as
- * a part of transfer. This function try-locks the page. If try-lock failed,
- * page is owned by some concurrent IO, and should be skipped (this is bad,
- * but hopefully rare situation, as it usually results in transfer being
- * shorter than possible).
- *
- * \retval 0 success, page can be placed into transfer
- *
- * \retval -EAGAIN page is either used by concurrent IO has been
- * truncated. Skip it.
- */
-static int vvp_page_make_ready(const struct lu_env *env,
- const struct cl_page_slice *slice)
-{
- struct page *vmpage = cl2vm_page(slice);
- struct cl_page *pg = slice->cpl_page;
- int result = 0;
-
- lock_page(vmpage);
- if (clear_page_dirty_for_io(vmpage)) {
- LASSERT(pg->cp_state == CPS_CACHED);
- /* This actually clears the dirty bit in the radix
- * tree. */
- set_page_writeback(vmpage);
- vvp_write_pending(cl2ccc(slice->cpl_obj),
- cl2ccc_page(slice));
- CL_PAGE_HEADER(D_PAGE, env, pg, "readied\n");
- } else if (pg->cp_state == CPS_PAGEOUT) {
- /* is it possible for osc_flush_async_page() to already
- * make it ready? */
- result = -EALREADY;
- } else {
- CL_PAGE_DEBUG(D_ERROR, env, pg, "Unexpecting page state %d.\n",
- pg->cp_state);
- LBUG();
- }
- unlock_page(vmpage);
- return result;
-}
-
-static int vvp_page_print(const struct lu_env *env,
- const struct cl_page_slice *slice,
- void *cookie, lu_printer_t printer)
-{
- struct ccc_page *vp = cl2ccc_page(slice);
- struct page *vmpage = vp->cpg_page;
-
- (*printer)(env, cookie, LUSTRE_VVP_NAME "-page@%p(%d:%d:%d) vm@%p ",
- vp, vp->cpg_defer_uptodate, vp->cpg_ra_used,
- vp->cpg_write_queued, vmpage);
- if (vmpage != NULL) {
- (*printer)(env, cookie, "%lx %d:%d %lx %lu %slru",
- (long)vmpage->flags, page_count(vmpage),
- page_mapcount(vmpage), vmpage->private,
- page_index(vmpage),
- list_empty(&vmpage->lru) ? "not-" : "");
- }
- (*printer)(env, cookie, "\n");
- return 0;
-}
-
-static const struct cl_page_operations vvp_page_ops = {
- .cpo_own = vvp_page_own,
- .cpo_assume = vvp_page_assume,
- .cpo_unassume = vvp_page_unassume,
- .cpo_disown = vvp_page_disown,
- .cpo_vmpage = ccc_page_vmpage,
- .cpo_discard = vvp_page_discard,
- .cpo_delete = vvp_page_delete,
- .cpo_unmap = vvp_page_unmap,
- .cpo_export = vvp_page_export,
- .cpo_is_vmlocked = vvp_page_is_vmlocked,
- .cpo_fini = vvp_page_fini,
- .cpo_print = vvp_page_print,
- .cpo_is_under_lock = ccc_page_is_under_lock,
- .io = {
- [CRT_READ] = {
- .cpo_prep = vvp_page_prep_read,
- .cpo_completion = vvp_page_completion_read,
- .cpo_make_ready = ccc_fail,
- },
- [CRT_WRITE] = {
- .cpo_prep = vvp_page_prep_write,
- .cpo_completion = vvp_page_completion_write,
- .cpo_make_ready = vvp_page_make_ready,
- }
- }
-};
-
-static void vvp_transient_page_verify(const struct cl_page *page)
-{
- struct inode *inode = ccc_object_inode(page->cp_obj);
-
- LASSERT(!mutex_trylock(&inode->i_mutex));
-}
-
-static int vvp_transient_page_own(const struct lu_env *env,
- const struct cl_page_slice *slice,
- struct cl_io *unused, int nonblock)
-{
- vvp_transient_page_verify(slice->cpl_page);
- return 0;
-}
-
-static void vvp_transient_page_assume(const struct lu_env *env,
- const struct cl_page_slice *slice,
- struct cl_io *unused)
-{
- vvp_transient_page_verify(slice->cpl_page);
-}
-
-static void vvp_transient_page_unassume(const struct lu_env *env,
- const struct cl_page_slice *slice,
- struct cl_io *unused)
-{
- vvp_transient_page_verify(slice->cpl_page);
-}
-
-static void vvp_transient_page_disown(const struct lu_env *env,
- const struct cl_page_slice *slice,
- struct cl_io *unused)
-{
- vvp_transient_page_verify(slice->cpl_page);
-}
-
-static void vvp_transient_page_discard(const struct lu_env *env,
- const struct cl_page_slice *slice,
- struct cl_io *unused)
-{
- struct cl_page *page = slice->cpl_page;
-
- vvp_transient_page_verify(slice->cpl_page);
-
- /*
- * For transient pages, remove it from the radix tree.
- */
- cl_page_delete(env, page);
-}
-
-static int vvp_transient_page_is_vmlocked(const struct lu_env *env,
- const struct cl_page_slice *slice)
-{
- struct inode *inode = ccc_object_inode(slice->cpl_obj);
- int locked;
-
- locked = !mutex_trylock(&inode->i_mutex);
- if (!locked)
- mutex_unlock(&inode->i_mutex);
- return locked ? -EBUSY : -ENODATA;
-}
-
-static void
-vvp_transient_page_completion(const struct lu_env *env,
- const struct cl_page_slice *slice,
- int ioret)
-{
- vvp_transient_page_verify(slice->cpl_page);
-}
-
-static void vvp_transient_page_fini(const struct lu_env *env,
- struct cl_page_slice *slice)
-{
- struct ccc_page *cp = cl2ccc_page(slice);
- struct cl_page *clp = slice->cpl_page;
- struct ccc_object *clobj = cl2ccc(clp->cp_obj);
-
- vvp_page_fini_common(cp);
- LASSERT(!mutex_trylock(&clobj->cob_inode->i_mutex));
- clobj->cob_transient_pages--;
-}
-
-static const struct cl_page_operations vvp_transient_page_ops = {
- .cpo_own = vvp_transient_page_own,
- .cpo_assume = vvp_transient_page_assume,
- .cpo_unassume = vvp_transient_page_unassume,
- .cpo_disown = vvp_transient_page_disown,
- .cpo_discard = vvp_transient_page_discard,
- .cpo_vmpage = ccc_page_vmpage,
- .cpo_fini = vvp_transient_page_fini,
- .cpo_is_vmlocked = vvp_transient_page_is_vmlocked,
- .cpo_print = vvp_page_print,
- .cpo_is_under_lock = ccc_page_is_under_lock,
- .io = {
- [CRT_READ] = {
- .cpo_prep = ccc_transient_page_prep,
- .cpo_completion = vvp_transient_page_completion,
- },
- [CRT_WRITE] = {
- .cpo_prep = ccc_transient_page_prep,
- .cpo_completion = vvp_transient_page_completion,
- }
- }
-};
-
-int vvp_page_init(const struct lu_env *env, struct cl_object *obj,
- struct cl_page *page, struct page *vmpage)
-{
- struct ccc_page *cpg = cl_object_page_slice(obj, page);
-
- CLOBINVRNT(env, obj, ccc_object_invariant(obj));
-
- cpg->cpg_page = vmpage;
- page_cache_get(vmpage);
-
- INIT_LIST_HEAD(&cpg->cpg_pending_linkage);
- if (page->cp_type == CPT_CACHEABLE) {
- SetPagePrivate(vmpage);
- vmpage->private = (unsigned long)page;
- cl_page_slice_add(page, &cpg->cpg_cl, obj,
- &vvp_page_ops);
- } else {
- struct ccc_object *clobj = cl2ccc(obj);
-
- LASSERT(!mutex_trylock(&clobj->cob_inode->i_mutex));
- cl_page_slice_add(page, &cpg->cpg_cl, obj,
- &vvp_transient_page_ops);
- clobj->cob_transient_pages++;
- }
- return 0;
-}
diff --git a/drivers/staging/lustre/lustre/llite/xattr.c b/drivers/staging/lustre/lustre/llite/xattr.c
deleted file mode 100644
index e051dcc82efe..000000000000
--- a/drivers/staging/lustre/lustre/llite/xattr.c
+++ /dev/null
@@ -1,616 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- */
-
-#include <linux/fs.h>
-#include <linux/sched.h>
-#include <linux/mm.h>
-#include <linux/selinux.h>
-
-#define DEBUG_SUBSYSTEM S_LLITE
-
-#include "../include/obd_support.h"
-#include "../include/lustre_lite.h"
-#include "../include/lustre_dlm.h"
-#include "../include/lustre_ver.h"
-#include "../include/lustre_eacl.h"
-
-#include "llite_internal.h"
-
-#define XATTR_USER_T (1)
-#define XATTR_TRUSTED_T (2)
-#define XATTR_SECURITY_T (3)
-#define XATTR_ACL_ACCESS_T (4)
-#define XATTR_ACL_DEFAULT_T (5)
-#define XATTR_LUSTRE_T (6)
-#define XATTR_OTHER_T (7)
-
-static
-int get_xattr_type(const char *name)
-{
- if (!strcmp(name, POSIX_ACL_XATTR_ACCESS))
- return XATTR_ACL_ACCESS_T;
-
- if (!strcmp(name, POSIX_ACL_XATTR_DEFAULT))
- return XATTR_ACL_DEFAULT_T;
-
- if (!strncmp(name, XATTR_USER_PREFIX,
- sizeof(XATTR_USER_PREFIX) - 1))
- return XATTR_USER_T;
-
- if (!strncmp(name, XATTR_TRUSTED_PREFIX,
- sizeof(XATTR_TRUSTED_PREFIX) - 1))
- return XATTR_TRUSTED_T;
-
- if (!strncmp(name, XATTR_SECURITY_PREFIX,
- sizeof(XATTR_SECURITY_PREFIX) - 1))
- return XATTR_SECURITY_T;
-
- if (!strncmp(name, XATTR_LUSTRE_PREFIX,
- sizeof(XATTR_LUSTRE_PREFIX) - 1))
- return XATTR_LUSTRE_T;
-
- return XATTR_OTHER_T;
-}
-
-static
-int xattr_type_filter(struct ll_sb_info *sbi, int xattr_type)
-{
- if ((xattr_type == XATTR_ACL_ACCESS_T ||
- xattr_type == XATTR_ACL_DEFAULT_T) &&
- !(sbi->ll_flags & LL_SBI_ACL))
- return -EOPNOTSUPP;
-
- if (xattr_type == XATTR_USER_T && !(sbi->ll_flags & LL_SBI_USER_XATTR))
- return -EOPNOTSUPP;
- if (xattr_type == XATTR_TRUSTED_T && !capable(CFS_CAP_SYS_ADMIN))
- return -EPERM;
- if (xattr_type == XATTR_OTHER_T)
- return -EOPNOTSUPP;
-
- return 0;
-}
-
-static
-int ll_setxattr_common(struct inode *inode, const char *name,
- const void *value, size_t size,
- int flags, __u64 valid)
-{
- struct ll_sb_info *sbi = ll_i2sbi(inode);
- struct ptlrpc_request *req = NULL;
- int xattr_type, rc;
-#ifdef CONFIG_FS_POSIX_ACL
- struct rmtacl_ctl_entry *rce = NULL;
- posix_acl_xattr_header *new_value = NULL;
- ext_acl_xattr_header *acl = NULL;
-#endif
- const char *pv = value;
-
- xattr_type = get_xattr_type(name);
- rc = xattr_type_filter(sbi, xattr_type);
- if (rc)
- return rc;
-
- if ((xattr_type == XATTR_ACL_ACCESS_T ||
- xattr_type == XATTR_ACL_DEFAULT_T) &&
- !inode_owner_or_capable(inode))
- return -EPERM;
-
- /* b10667: ignore lustre special xattr for now */
- if ((xattr_type == XATTR_TRUSTED_T && strcmp(name, "trusted.lov") == 0) ||
- (xattr_type == XATTR_LUSTRE_T && strcmp(name, "lustre.lov") == 0))
- return 0;
-
- /* b15587: ignore security.capability xattr for now */
- if ((xattr_type == XATTR_SECURITY_T &&
- strcmp(name, "security.capability") == 0))
- return 0;
-
- /* LU-549: Disable security.selinux when selinux is disabled */
- if (xattr_type == XATTR_SECURITY_T && !selinux_is_enabled() &&
- strcmp(name, "security.selinux") == 0)
- return -EOPNOTSUPP;
-
-#ifdef CONFIG_FS_POSIX_ACL
- if (sbi->ll_flags & LL_SBI_RMT_CLIENT &&
- (xattr_type == XATTR_ACL_ACCESS_T ||
- xattr_type == XATTR_ACL_DEFAULT_T)) {
- rce = rct_search(&sbi->ll_rct, current_pid());
- if (rce == NULL ||
- (rce->rce_ops != RMT_LSETFACL &&
- rce->rce_ops != RMT_RSETFACL))
- return -EOPNOTSUPP;
-
- if (rce->rce_ops == RMT_LSETFACL) {
- struct eacl_entry *ee;
-
- ee = et_search_del(&sbi->ll_et, current_pid(),
- ll_inode2fid(inode), xattr_type);
- LASSERT(ee != NULL);
- if (valid & OBD_MD_FLXATTR) {
- acl = lustre_acl_xattr_merge2ext(
- (posix_acl_xattr_header *)value,
- size, ee->ee_acl);
- if (IS_ERR(acl)) {
- ee_free(ee);
- return PTR_ERR(acl);
- }
- size = CFS_ACL_XATTR_SIZE(\
- le32_to_cpu(acl->a_count), \
- ext_acl_xattr);
- pv = (const char *)acl;
- }
- ee_free(ee);
- } else if (rce->rce_ops == RMT_RSETFACL) {
- rc = lustre_posix_acl_xattr_filter(
- (posix_acl_xattr_header *)value,
- size, &new_value);
- if (unlikely(rc < 0))
- return rc;
- size = rc;
-
- pv = (const char *)new_value;
- } else
- return -EOPNOTSUPP;
-
- valid |= rce_ops2valid(rce->rce_ops);
- }
-#endif
- rc = md_setxattr(sbi->ll_md_exp, ll_inode2fid(inode),
- valid, name, pv, size, 0, flags,
- ll_i2suppgid(inode), &req);
-#ifdef CONFIG_FS_POSIX_ACL
- if (new_value != NULL)
- lustre_posix_acl_xattr_free(new_value, size);
- if (acl != NULL)
- lustre_ext_acl_xattr_free(acl);
-#endif
- if (rc) {
- if (rc == -EOPNOTSUPP && xattr_type == XATTR_USER_T) {
- LCONSOLE_INFO("Disabling user_xattr feature because it is not supported on the server\n");
- sbi->ll_flags &= ~LL_SBI_USER_XATTR;
- }
- return rc;
- }
-
- ptlrpc_req_finished(req);
- return 0;
-}
-
-int ll_setxattr(struct dentry *dentry, const char *name,
- const void *value, size_t size, int flags)
-{
- struct inode *inode = d_inode(dentry);
-
- LASSERT(inode);
- LASSERT(name);
-
- CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p), xattr %s\n",
- inode->i_ino, inode->i_generation, inode, name);
-
- ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_SETXATTR, 1);
-
- if ((strncmp(name, XATTR_TRUSTED_PREFIX,
- sizeof(XATTR_TRUSTED_PREFIX) - 1) == 0 &&
- strcmp(name + sizeof(XATTR_TRUSTED_PREFIX) - 1, "lov") == 0) ||
- (strncmp(name, XATTR_LUSTRE_PREFIX,
- sizeof(XATTR_LUSTRE_PREFIX) - 1) == 0 &&
- strcmp(name + sizeof(XATTR_LUSTRE_PREFIX) - 1, "lov") == 0)) {
- struct lov_user_md *lump = (struct lov_user_md *)value;
- int rc = 0;
-
- if (size != 0 && size < sizeof(struct lov_user_md))
- return -EINVAL;
-
- /* Attributes that are saved via getxattr will always have
- * the stripe_offset as 0. Instead, the MDS should be
- * allowed to pick the starting OST index. b=17846 */
- if (lump != NULL && lump->lmm_stripe_offset == 0)
- lump->lmm_stripe_offset = -1;
-
- if (lump != NULL && S_ISREG(inode->i_mode)) {
- int flags = FMODE_WRITE;
- int lum_size = (lump->lmm_magic == LOV_USER_MAGIC_V1) ?
- sizeof(*lump) : sizeof(struct lov_user_md_v3);
-
- rc = ll_lov_setstripe_ea_info(inode, dentry, flags, lump,
- lum_size);
- /* b10667: rc always be 0 here for now */
- rc = 0;
- } else if (S_ISDIR(inode->i_mode)) {
- rc = ll_dir_setstripe(inode, lump, 0);
- }
-
- return rc;
-
- } else if (strcmp(name, XATTR_NAME_LMA) == 0 ||
- strcmp(name, XATTR_NAME_LINK) == 0)
- return 0;
-
- return ll_setxattr_common(inode, name, value, size, flags,
- OBD_MD_FLXATTR);
-}
-
-int ll_removexattr(struct dentry *dentry, const char *name)
-{
- struct inode *inode = d_inode(dentry);
-
- LASSERT(inode);
- LASSERT(name);
-
- CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p), xattr %s\n",
- inode->i_ino, inode->i_generation, inode, name);
-
- ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_REMOVEXATTR, 1);
- return ll_setxattr_common(inode, name, NULL, 0, 0,
- OBD_MD_FLXATTRRM);
-}
-
-static
-int ll_getxattr_common(struct inode *inode, const char *name,
- void *buffer, size_t size, __u64 valid)
-{
- struct ll_sb_info *sbi = ll_i2sbi(inode);
- struct ptlrpc_request *req = NULL;
- struct mdt_body *body;
- int xattr_type, rc;
- void *xdata;
- struct rmtacl_ctl_entry *rce = NULL;
- struct ll_inode_info *lli = ll_i2info(inode);
-
- CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p)\n",
- inode->i_ino, inode->i_generation, inode);
-
- /* listxattr have slightly different behavior from of ext3:
- * without 'user_xattr' ext3 will list all xattr names but
- * filtered out "^user..*"; we list them all for simplicity.
- */
- if (!name) {
- xattr_type = XATTR_OTHER_T;
- goto do_getxattr;
- }
-
- xattr_type = get_xattr_type(name);
- rc = xattr_type_filter(sbi, xattr_type);
- if (rc)
- return rc;
-
- /* b15587: ignore security.capability xattr for now */
- if ((xattr_type == XATTR_SECURITY_T &&
- strcmp(name, "security.capability") == 0))
- return -ENODATA;
-
- /* LU-549: Disable security.selinux when selinux is disabled */
- if (xattr_type == XATTR_SECURITY_T && !selinux_is_enabled() &&
- strcmp(name, "security.selinux") == 0)
- return -EOPNOTSUPP;
-
-#ifdef CONFIG_FS_POSIX_ACL
- if (sbi->ll_flags & LL_SBI_RMT_CLIENT &&
- (xattr_type == XATTR_ACL_ACCESS_T ||
- xattr_type == XATTR_ACL_DEFAULT_T)) {
- rce = rct_search(&sbi->ll_rct, current_pid());
- if (rce == NULL ||
- (rce->rce_ops != RMT_LSETFACL &&
- rce->rce_ops != RMT_LGETFACL &&
- rce->rce_ops != RMT_RSETFACL &&
- rce->rce_ops != RMT_RGETFACL))
- return -EOPNOTSUPP;
- }
-
- /* posix acl is under protection of LOOKUP lock. when calling to this,
- * we just have path resolution to the target inode, so we have great
- * chance that cached ACL is uptodate.
- */
- if (xattr_type == XATTR_ACL_ACCESS_T &&
- !(sbi->ll_flags & LL_SBI_RMT_CLIENT)) {
-
- struct posix_acl *acl;
-
- spin_lock(&lli->lli_lock);
- acl = posix_acl_dup(lli->lli_posix_acl);
- spin_unlock(&lli->lli_lock);
-
- if (!acl)
- return -ENODATA;
-
- rc = posix_acl_to_xattr(&init_user_ns, acl, buffer, size);
- posix_acl_release(acl);
- return rc;
- }
- if (xattr_type == XATTR_ACL_DEFAULT_T && !S_ISDIR(inode->i_mode))
- return -ENODATA;
-#endif
-
-do_getxattr:
- if (sbi->ll_xattr_cache_enabled && xattr_type != XATTR_ACL_ACCESS_T) {
- rc = ll_xattr_cache_get(inode, name, buffer, size, valid);
- if (rc == -EAGAIN)
- goto getxattr_nocache;
- if (rc < 0)
- goto out_xattr;
-
- /* Add "system.posix_acl_access" to the list */
- if (lli->lli_posix_acl != NULL && valid & OBD_MD_FLXATTRLS) {
- if (size == 0) {
- rc += sizeof(XATTR_NAME_ACL_ACCESS);
- } else if (size - rc >= sizeof(XATTR_NAME_ACL_ACCESS)) {
- memcpy(buffer + rc, XATTR_NAME_ACL_ACCESS,
- sizeof(XATTR_NAME_ACL_ACCESS));
- rc += sizeof(XATTR_NAME_ACL_ACCESS);
- } else {
- rc = -ERANGE;
- goto out_xattr;
- }
- }
- } else {
-getxattr_nocache:
- rc = md_getxattr(sbi->ll_md_exp, ll_inode2fid(inode),
- valid | (rce ? rce_ops2valid(rce->rce_ops) : 0),
- name, NULL, 0, size, 0, &req);
-
- if (rc < 0)
- goto out_xattr;
-
- body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
- LASSERT(body);
-
- /* only detect the xattr size */
- if (size == 0) {
- rc = body->eadatasize;
- goto out;
- }
-
- if (size < body->eadatasize) {
- CERROR("server bug: replied size %u > %u\n",
- body->eadatasize, (int)size);
- rc = -ERANGE;
- goto out;
- }
-
- if (body->eadatasize == 0) {
- rc = -ENODATA;
- goto out;
- }
-
- /* do not need swab xattr data */
- xdata = req_capsule_server_sized_get(&req->rq_pill, &RMF_EADATA,
- body->eadatasize);
- if (!xdata) {
- rc = -EFAULT;
- goto out;
- }
-
- memcpy(buffer, xdata, body->eadatasize);
- rc = body->eadatasize;
- }
-
-#ifdef CONFIG_FS_POSIX_ACL
- if (rce && rce->rce_ops == RMT_LSETFACL) {
- ext_acl_xattr_header *acl;
-
- acl = lustre_posix_acl_xattr_2ext(
- (posix_acl_xattr_header *)buffer, rc);
- if (IS_ERR(acl)) {
- rc = PTR_ERR(acl);
- goto out;
- }
-
- rc = ee_add(&sbi->ll_et, current_pid(), ll_inode2fid(inode),
- xattr_type, acl);
- if (unlikely(rc < 0)) {
- lustre_ext_acl_xattr_free(acl);
- goto out;
- }
- }
-#endif
-
-out_xattr:
- if (rc == -EOPNOTSUPP && xattr_type == XATTR_USER_T) {
- LCONSOLE_INFO(
- "%s: disabling user_xattr feature because it is not supported on the server: rc = %d\n",
- ll_get_fsname(inode->i_sb, NULL, 0), rc);
- sbi->ll_flags &= ~LL_SBI_USER_XATTR;
- }
-out:
- ptlrpc_req_finished(req);
- return rc;
-}
-
-ssize_t ll_getxattr(struct dentry *dentry, const char *name,
- void *buffer, size_t size)
-{
- struct inode *inode = d_inode(dentry);
-
- LASSERT(inode);
- LASSERT(name);
-
- CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p), xattr %s\n",
- inode->i_ino, inode->i_generation, inode, name);
-
- ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_GETXATTR, 1);
-
- if ((strncmp(name, XATTR_TRUSTED_PREFIX,
- sizeof(XATTR_TRUSTED_PREFIX) - 1) == 0 &&
- strcmp(name + sizeof(XATTR_TRUSTED_PREFIX) - 1, "lov") == 0) ||
- (strncmp(name, XATTR_LUSTRE_PREFIX,
- sizeof(XATTR_LUSTRE_PREFIX) - 1) == 0 &&
- strcmp(name + sizeof(XATTR_LUSTRE_PREFIX) - 1, "lov") == 0)) {
- struct lov_stripe_md *lsm;
- struct lov_user_md *lump;
- struct lov_mds_md *lmm = NULL;
- struct ptlrpc_request *request = NULL;
- int rc = 0, lmmsize = 0;
-
- if (!S_ISREG(inode->i_mode) && !S_ISDIR(inode->i_mode))
- return -ENODATA;
-
- if (size == 0 && S_ISDIR(inode->i_mode)) {
- /* XXX directory EA is fix for now, optimize to save
- * RPC transfer */
- rc = sizeof(struct lov_user_md);
- goto out;
- }
-
- lsm = ccc_inode_lsm_get(inode);
- if (lsm == NULL) {
- if (S_ISDIR(inode->i_mode)) {
- rc = ll_dir_getstripe(inode, &lmm,
- &lmmsize, &request);
- } else {
- rc = -ENODATA;
- }
- } else {
- /* LSM is present already after lookup/getattr call.
- * we need to grab layout lock once it is implemented */
- rc = obd_packmd(ll_i2dtexp(inode), &lmm, lsm);
- lmmsize = rc;
- }
- ccc_inode_lsm_put(inode, lsm);
-
- if (rc < 0)
- goto out;
-
- if (size == 0) {
- /* used to call ll_get_max_mdsize() forward to get
- * the maximum buffer size, while some apps (such as
- * rsync 3.0.x) care much about the exact xattr value
- * size */
- rc = lmmsize;
- goto out;
- }
-
- if (size < lmmsize) {
- CERROR("server bug: replied size %d > %d for %pd (%s)\n",
- lmmsize, (int)size, dentry, name);
- rc = -ERANGE;
- goto out;
- }
-
- lump = (struct lov_user_md *)buffer;
- memcpy(lump, lmm, lmmsize);
- /* do not return layout gen for getxattr otherwise it would
- * confuse tar --xattr by recognizing layout gen as stripe
- * offset when the file is restored. See LU-2809. */
- lump->lmm_layout_gen = 0;
-
- rc = lmmsize;
-out:
- if (request)
- ptlrpc_req_finished(request);
- else if (lmm)
- obd_free_diskmd(ll_i2dtexp(inode), &lmm);
- return rc;
- }
-
- return ll_getxattr_common(inode, name, buffer, size, OBD_MD_FLXATTR);
-}
-
-ssize_t ll_listxattr(struct dentry *dentry, char *buffer, size_t size)
-{
- struct inode *inode = d_inode(dentry);
- int rc = 0, rc2 = 0;
- struct lov_mds_md *lmm = NULL;
- struct ptlrpc_request *request = NULL;
- int lmmsize;
-
- LASSERT(inode);
-
- CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p)\n",
- inode->i_ino, inode->i_generation, inode);
-
- ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_LISTXATTR, 1);
-
- rc = ll_getxattr_common(inode, NULL, buffer, size, OBD_MD_FLXATTRLS);
- if (rc < 0)
- goto out;
-
- if (buffer != NULL) {
- struct ll_sb_info *sbi = ll_i2sbi(inode);
- char *xattr_name = buffer;
- int xlen, rem = rc;
-
- while (rem > 0) {
- xlen = strnlen(xattr_name, rem - 1) + 1;
- rem -= xlen;
- if (xattr_type_filter(sbi,
- get_xattr_type(xattr_name)) == 0) {
- /* skip OK xattr type
- * leave it in buffer
- */
- xattr_name += xlen;
- continue;
- }
- /* move up remaining xattrs in buffer
- * removing the xattr that is not OK
- */
- memmove(xattr_name, xattr_name + xlen, rem);
- rc -= xlen;
- }
- }
- if (S_ISREG(inode->i_mode)) {
- if (!ll_i2info(inode)->lli_has_smd)
- rc2 = -1;
- } else if (S_ISDIR(inode->i_mode)) {
- rc2 = ll_dir_getstripe(inode, &lmm, &lmmsize, &request);
- }
-
- if (rc2 < 0) {
- rc2 = 0;
- goto out;
- } else if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode)) {
- const int prefix_len = sizeof(XATTR_LUSTRE_PREFIX) - 1;
- const size_t name_len = sizeof("lov") - 1;
- const size_t total_len = prefix_len + name_len + 1;
-
- if (((rc + total_len) > size) && (buffer != NULL)) {
- ptlrpc_req_finished(request);
- return -ERANGE;
- }
-
- if (buffer != NULL) {
- buffer += rc;
- memcpy(buffer, XATTR_LUSTRE_PREFIX, prefix_len);
- memcpy(buffer + prefix_len, "lov", name_len);
- buffer[prefix_len + name_len] = '\0';
- }
- rc2 = total_len;
- }
-out:
- ptlrpc_req_finished(request);
- rc = rc + rc2;
-
- return rc;
-}
diff --git a/drivers/staging/lustre/lustre/llite/xattr_cache.c b/drivers/staging/lustre/lustre/llite/xattr_cache.c
deleted file mode 100644
index 9e763ce244e3..000000000000
--- a/drivers/staging/lustre/lustre/llite/xattr_cache.c
+++ /dev/null
@@ -1,538 +0,0 @@
-/*
- * Copyright 2012 Xyratex Technology Limited
- *
- * Author: Andrew Perepechko <Andrew_Perepechko@xyratex.com>
- *
- */
-
-#define DEBUG_SUBSYSTEM S_LLITE
-
-#include <linux/fs.h>
-#include <linux/sched.h>
-#include <linux/mm.h>
-#include "../include/obd_support.h"
-#include "../include/lustre_lite.h"
-#include "../include/lustre_dlm.h"
-#include "../include/lustre_ver.h"
-#include "llite_internal.h"
-
-/* If we ever have hundreds of extended attributes, we might want to consider
- * using a hash or a tree structure instead of list for faster lookups.
- */
-struct ll_xattr_entry {
- struct list_head xe_list; /* protected with
- * lli_xattrs_list_rwsem */
- char *xe_name; /* xattr name, \0-terminated */
- char *xe_value; /* xattr value */
- unsigned xe_namelen; /* strlen(xe_name) + 1 */
- unsigned xe_vallen; /* xattr value length */
-};
-
-static struct kmem_cache *xattr_kmem;
-static struct lu_kmem_descr xattr_caches[] = {
- {
- .ckd_cache = &xattr_kmem,
- .ckd_name = "xattr_kmem",
- .ckd_size = sizeof(struct ll_xattr_entry)
- },
- {
- .ckd_cache = NULL
- }
-};
-
-int ll_xattr_init(void)
-{
- return lu_kmem_init(xattr_caches);
-}
-
-void ll_xattr_fini(void)
-{
- lu_kmem_fini(xattr_caches);
-}
-
-/**
- * Initializes xattr cache for an inode.
- *
- * This initializes the xattr list and marks cache presence.
- */
-static void ll_xattr_cache_init(struct ll_inode_info *lli)
-{
-
-
- LASSERT(lli != NULL);
-
- INIT_LIST_HEAD(&lli->lli_xattrs);
- lli->lli_flags |= LLIF_XATTR_CACHE;
-}
-
-/**
- * This looks for a specific extended attribute.
- *
- * Find in @cache and return @xattr_name attribute in @xattr,
- * for the NULL @xattr_name return the first cached @xattr.
- *
- * \retval 0 success
- * \retval -ENODATA if not found
- */
-static int ll_xattr_cache_find(struct list_head *cache,
- const char *xattr_name,
- struct ll_xattr_entry **xattr)
-{
- struct ll_xattr_entry *entry;
-
-
-
- list_for_each_entry(entry, cache, xe_list) {
- /* xattr_name == NULL means look for any entry */
- if (xattr_name == NULL ||
- strcmp(xattr_name, entry->xe_name) == 0) {
- *xattr = entry;
- CDEBUG(D_CACHE, "find: [%s]=%.*s\n",
- entry->xe_name, entry->xe_vallen,
- entry->xe_value);
- return 0;
- }
- }
-
- return -ENODATA;
-}
-
-/**
- * This adds an xattr.
- *
- * Add @xattr_name attr with @xattr_val value and @xattr_val_len length,
- *
- * \retval 0 success
- * \retval -ENOMEM if no memory could be allocated for the cached attr
- * \retval -EPROTO if duplicate xattr is being added
- */
-static int ll_xattr_cache_add(struct list_head *cache,
- const char *xattr_name,
- const char *xattr_val,
- unsigned xattr_val_len)
-{
- struct ll_xattr_entry *xattr;
-
-
-
- if (ll_xattr_cache_find(cache, xattr_name, &xattr) == 0) {
- CDEBUG(D_CACHE, "duplicate xattr: [%s]\n", xattr_name);
- return -EPROTO;
- }
-
- OBD_SLAB_ALLOC_PTR_GFP(xattr, xattr_kmem, GFP_NOFS);
- if (xattr == NULL) {
- CDEBUG(D_CACHE, "failed to allocate xattr\n");
- return -ENOMEM;
- }
-
- xattr->xe_name = kstrdup(xattr_name, GFP_NOFS);
- if (!xattr->xe_name) {
- CDEBUG(D_CACHE, "failed to alloc xattr name %u\n",
- xattr->xe_namelen);
- goto err_name;
- }
- xattr->xe_value = kmemdup(xattr_val, xattr_val_len, GFP_NOFS);
- if (!xattr->xe_value)
- goto err_value;
-
- xattr->xe_vallen = xattr_val_len;
- list_add(&xattr->xe_list, cache);
-
- CDEBUG(D_CACHE, "set: [%s]=%.*s\n", xattr_name,
- xattr_val_len, xattr_val);
-
- return 0;
-err_value:
- kfree(xattr->xe_name);
-err_name:
- OBD_SLAB_FREE_PTR(xattr, xattr_kmem);
-
- return -ENOMEM;
-}
-
-/**
- * This removes an extended attribute from cache.
- *
- * Remove @xattr_name attribute from @cache.
- *
- * \retval 0 success
- * \retval -ENODATA if @xattr_name is not cached
- */
-static int ll_xattr_cache_del(struct list_head *cache,
- const char *xattr_name)
-{
- struct ll_xattr_entry *xattr;
-
-
-
- CDEBUG(D_CACHE, "del xattr: %s\n", xattr_name);
-
- if (ll_xattr_cache_find(cache, xattr_name, &xattr) == 0) {
- list_del(&xattr->xe_list);
- kfree(xattr->xe_name);
- kfree(xattr->xe_value);
- OBD_SLAB_FREE_PTR(xattr, xattr_kmem);
-
- return 0;
- }
-
- return -ENODATA;
-}
-
-/**
- * This iterates cached extended attributes.
- *
- * Walk over cached attributes in @cache and
- * fill in @xld_buffer or only calculate buffer
- * size if @xld_buffer is NULL.
- *
- * \retval >= 0 buffer list size
- * \retval -ENODATA if the list cannot fit @xld_size buffer
- */
-static int ll_xattr_cache_list(struct list_head *cache,
- char *xld_buffer,
- int xld_size)
-{
- struct ll_xattr_entry *xattr, *tmp;
- int xld_tail = 0;
-
-
-
- list_for_each_entry_safe(xattr, tmp, cache, xe_list) {
- CDEBUG(D_CACHE, "list: buffer=%p[%d] name=%s\n",
- xld_buffer, xld_tail, xattr->xe_name);
-
- if (xld_buffer) {
- xld_size -= xattr->xe_namelen;
- if (xld_size < 0)
- break;
- memcpy(&xld_buffer[xld_tail],
- xattr->xe_name, xattr->xe_namelen);
- }
- xld_tail += xattr->xe_namelen;
- }
-
- if (xld_size < 0)
- return -ERANGE;
-
- return xld_tail;
-}
-
-/**
- * Check if the xattr cache is initialized (filled).
- *
- * \retval 0 @cache is not initialized
- * \retval 1 @cache is initialized
- */
-static int ll_xattr_cache_valid(struct ll_inode_info *lli)
-{
- return !!(lli->lli_flags & LLIF_XATTR_CACHE);
-}
-
-/**
- * This finalizes the xattr cache.
- *
- * Free all xattr memory. @lli is the inode info pointer.
- *
- * \retval 0 no error occurred
- */
-static int ll_xattr_cache_destroy_locked(struct ll_inode_info *lli)
-{
-
-
- if (!ll_xattr_cache_valid(lli))
- return 0;
-
- while (ll_xattr_cache_del(&lli->lli_xattrs, NULL) == 0)
- ; /* empty loop */
- lli->lli_flags &= ~LLIF_XATTR_CACHE;
-
- return 0;
-}
-
-int ll_xattr_cache_destroy(struct inode *inode)
-{
- struct ll_inode_info *lli = ll_i2info(inode);
- int rc;
-
-
-
- down_write(&lli->lli_xattrs_list_rwsem);
- rc = ll_xattr_cache_destroy_locked(lli);
- up_write(&lli->lli_xattrs_list_rwsem);
-
- return rc;
-}
-
-/**
- * Match or enqueue a PR lock.
- *
- * Find or request an LDLM lock with xattr data.
- * Since LDLM does not provide API for atomic match_or_enqueue,
- * the function handles it with a separate enq lock.
- * If successful, the function exits with the list lock held.
- *
- * \retval 0 no error occurred
- * \retval -ENOMEM not enough memory
- */
-static int ll_xattr_find_get_lock(struct inode *inode,
- struct lookup_intent *oit,
- struct ptlrpc_request **req)
-{
- ldlm_mode_t mode;
- struct lustre_handle lockh = { 0 };
- struct md_op_data *op_data;
- struct ll_inode_info *lli = ll_i2info(inode);
- struct ldlm_enqueue_info einfo = { .ei_type = LDLM_IBITS,
- .ei_mode = it_to_lock_mode(oit),
- .ei_cb_bl = ll_md_blocking_ast,
- .ei_cb_cp = ldlm_completion_ast };
- struct ll_sb_info *sbi = ll_i2sbi(inode);
- struct obd_export *exp = sbi->ll_md_exp;
- int rc;
-
-
-
- mutex_lock(&lli->lli_xattrs_enq_lock);
- /* inode may have been shrunk and recreated, so data is gone, match lock
- * only when data exists. */
- if (ll_xattr_cache_valid(lli)) {
- /* Try matching first. */
- mode = ll_take_md_lock(inode, MDS_INODELOCK_XATTR, &lockh, 0,
- LCK_PR);
- if (mode != 0) {
- /* fake oit in mdc_revalidate_lock() manner */
- oit->d.lustre.it_lock_handle = lockh.cookie;
- oit->d.lustre.it_lock_mode = mode;
- goto out;
- }
- }
-
- /* Enqueue if the lock isn't cached locally. */
- op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL, 0, 0,
- LUSTRE_OPC_ANY, NULL);
- if (IS_ERR(op_data)) {
- mutex_unlock(&lli->lli_xattrs_enq_lock);
- return PTR_ERR(op_data);
- }
-
- op_data->op_valid = OBD_MD_FLXATTR | OBD_MD_FLXATTRLS;
-
- rc = md_enqueue(exp, &einfo, oit, op_data, &lockh, NULL, 0, NULL, 0);
- ll_finish_md_op_data(op_data);
-
- if (rc < 0) {
- CDEBUG(D_CACHE,
- "md_intent_lock failed with %d for fid "DFID"\n",
- rc, PFID(ll_inode2fid(inode)));
- mutex_unlock(&lli->lli_xattrs_enq_lock);
- return rc;
- }
-
- *req = (struct ptlrpc_request *)oit->d.lustre.it_data;
-out:
- down_write(&lli->lli_xattrs_list_rwsem);
- mutex_unlock(&lli->lli_xattrs_enq_lock);
-
- return 0;
-}
-
-/**
- * Refill the xattr cache.
- *
- * Fetch and cache the whole of xattrs for @inode, acquiring
- * a read or a write xattr lock depending on operation in @oit.
- * Intent is dropped on exit unless the operation is setxattr.
- *
- * \retval 0 no error occurred
- * \retval -EPROTO network protocol error
- * \retval -ENOMEM not enough memory for the cache
- */
-static int ll_xattr_cache_refill(struct inode *inode, struct lookup_intent *oit)
-{
- struct ll_sb_info *sbi = ll_i2sbi(inode);
- struct ptlrpc_request *req = NULL;
- const char *xdata, *xval, *xtail, *xvtail;
- struct ll_inode_info *lli = ll_i2info(inode);
- struct mdt_body *body;
- __u32 *xsizes;
- int rc, i;
-
-
-
- rc = ll_xattr_find_get_lock(inode, oit, &req);
- if (rc)
- goto out_no_unlock;
-
- /* Do we have the data at this point? */
- if (ll_xattr_cache_valid(lli)) {
- ll_stats_ops_tally(sbi, LPROC_LL_GETXATTR_HITS, 1);
- rc = 0;
- goto out_maybe_drop;
- }
-
- /* Matched but no cache? Cancelled on error by a parallel refill. */
- if (unlikely(req == NULL)) {
- CDEBUG(D_CACHE, "cancelled by a parallel getxattr\n");
- rc = -EIO;
- goto out_maybe_drop;
- }
-
- if (oit->d.lustre.it_status < 0) {
- CDEBUG(D_CACHE, "getxattr intent returned %d for fid "DFID"\n",
- oit->d.lustre.it_status, PFID(ll_inode2fid(inode)));
- rc = oit->d.lustre.it_status;
- /* xattr data is so large that we don't want to cache it */
- if (rc == -ERANGE)
- rc = -EAGAIN;
- goto out_destroy;
- }
-
- body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
- if (body == NULL) {
- CERROR("no MDT BODY in the refill xattr reply\n");
- rc = -EPROTO;
- goto out_destroy;
- }
- /* do not need swab xattr data */
- xdata = req_capsule_server_sized_get(&req->rq_pill, &RMF_EADATA,
- body->eadatasize);
- xval = req_capsule_server_sized_get(&req->rq_pill, &RMF_EAVALS,
- body->aclsize);
- xsizes = req_capsule_server_sized_get(&req->rq_pill, &RMF_EAVALS_LENS,
- body->max_mdsize * sizeof(__u32));
- if (xdata == NULL || xval == NULL || xsizes == NULL) {
- CERROR("wrong setxattr reply\n");
- rc = -EPROTO;
- goto out_destroy;
- }
-
- xtail = xdata + body->eadatasize;
- xvtail = xval + body->aclsize;
-
- CDEBUG(D_CACHE, "caching: xdata=%p xtail=%p\n", xdata, xtail);
-
- ll_xattr_cache_init(lli);
-
- for (i = 0; i < body->max_mdsize; i++) {
- CDEBUG(D_CACHE, "caching [%s]=%.*s\n", xdata, *xsizes, xval);
- /* Perform consistency checks: attr names and vals in pill */
- if (memchr(xdata, 0, xtail - xdata) == NULL) {
- CERROR("xattr protocol violation (names are broken)\n");
- rc = -EPROTO;
- } else if (xval + *xsizes > xvtail) {
- CERROR("xattr protocol violation (vals are broken)\n");
- rc = -EPROTO;
- } else if (OBD_FAIL_CHECK(OBD_FAIL_LLITE_XATTR_ENOMEM)) {
- rc = -ENOMEM;
- } else if (!strcmp(xdata, XATTR_NAME_ACL_ACCESS)) {
- /* Filter out ACL ACCESS since it's cached separately */
- CDEBUG(D_CACHE, "not caching %s\n",
- XATTR_NAME_ACL_ACCESS);
- rc = 0;
- } else {
- rc = ll_xattr_cache_add(&lli->lli_xattrs, xdata, xval,
- *xsizes);
- }
- if (rc < 0) {
- ll_xattr_cache_destroy_locked(lli);
- goto out_destroy;
- }
- xdata += strlen(xdata) + 1;
- xval += *xsizes;
- xsizes++;
- }
-
- if (xdata != xtail || xval != xvtail)
- CERROR("a hole in xattr data\n");
-
- ll_set_lock_data(sbi->ll_md_exp, inode, oit, NULL);
-
- goto out_maybe_drop;
-out_maybe_drop:
-
- ll_intent_drop_lock(oit);
-
- if (rc != 0)
- up_write(&lli->lli_xattrs_list_rwsem);
-out_no_unlock:
- ptlrpc_req_finished(req);
-
- return rc;
-
-out_destroy:
- up_write(&lli->lli_xattrs_list_rwsem);
-
- ldlm_lock_decref_and_cancel((struct lustre_handle *)
- &oit->d.lustre.it_lock_handle,
- oit->d.lustre.it_lock_mode);
-
- goto out_no_unlock;
-}
-
-/**
- * Get an xattr value or list xattrs using the write-through cache.
- *
- * Get the xattr value (@valid has OBD_MD_FLXATTR set) of @name or
- * list xattr names (@valid has OBD_MD_FLXATTRLS set) for @inode.
- * The resulting value/list is stored in @buffer if the former
- * is not larger than @size.
- *
- * \retval 0 no error occurred
- * \retval -EPROTO network protocol error
- * \retval -ENOMEM not enough memory for the cache
- * \retval -ERANGE the buffer is not large enough
- * \retval -ENODATA no such attr or the list is empty
- */
-int ll_xattr_cache_get(struct inode *inode,
- const char *name,
- char *buffer,
- size_t size,
- __u64 valid)
-{
- struct lookup_intent oit = { .it_op = IT_GETXATTR };
- struct ll_inode_info *lli = ll_i2info(inode);
- int rc = 0;
-
-
-
- LASSERT(!!(valid & OBD_MD_FLXATTR) ^ !!(valid & OBD_MD_FLXATTRLS));
-
- down_read(&lli->lli_xattrs_list_rwsem);
- if (!ll_xattr_cache_valid(lli)) {
- up_read(&lli->lli_xattrs_list_rwsem);
- rc = ll_xattr_cache_refill(inode, &oit);
- if (rc)
- return rc;
- downgrade_write(&lli->lli_xattrs_list_rwsem);
- } else {
- ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_GETXATTR_HITS, 1);
- }
-
- if (valid & OBD_MD_FLXATTR) {
- struct ll_xattr_entry *xattr;
-
- rc = ll_xattr_cache_find(&lli->lli_xattrs, name, &xattr);
- if (rc == 0) {
- rc = xattr->xe_vallen;
- /* zero size means we are only requested size in rc */
- if (size != 0) {
- if (size >= xattr->xe_vallen)
- memcpy(buffer, xattr->xe_value,
- xattr->xe_vallen);
- else
- rc = -ERANGE;
- }
- }
- } else if (valid & OBD_MD_FLXATTRLS) {
- rc = ll_xattr_cache_list(&lli->lli_xattrs,
- size ? buffer : NULL, size);
- }
-
- goto out;
-out:
- up_read(&lli->lli_xattrs_list_rwsem);
-
- return rc;
-}
diff --git a/drivers/staging/lustre/lustre/lmv/Makefile b/drivers/staging/lustre/lustre/lmv/Makefile
deleted file mode 100644
index 1a24299791d7..000000000000
--- a/drivers/staging/lustre/lustre/lmv/Makefile
+++ /dev/null
@@ -1,2 +0,0 @@
-obj-$(CONFIG_LUSTRE_FS) += lmv.o
-lmv-y := lmv_obd.o lmv_intent.o lmv_fld.o lproc_lmv.o
diff --git a/drivers/staging/lustre/lustre/lmv/lmv_fld.c b/drivers/staging/lustre/lustre/lmv/lmv_fld.c
deleted file mode 100644
index ee235926f52b..000000000000
--- a/drivers/staging/lustre/lustre/lmv/lmv_fld.c
+++ /dev/null
@@ -1,83 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2013, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- */
-
-#define DEBUG_SUBSYSTEM S_LMV
-#include <linux/slab.h>
-#include <linux/module.h>
-#include <linux/pagemap.h>
-#include <asm/div64.h>
-#include <linux/seq_file.h>
-
-#include "../include/obd_support.h"
-#include "../include/lustre/lustre_idl.h"
-#include "../include/lustre_fid.h"
-#include "../include/lustre_lib.h"
-#include "../include/lustre_net.h"
-#include "../include/lustre_dlm.h"
-#include "../include/obd_class.h"
-#include "../include/lprocfs_status.h"
-#include "lmv_internal.h"
-
-int lmv_fld_lookup(struct lmv_obd *lmv,
- const struct lu_fid *fid,
- u32 *mds)
-{
- int rc;
-
- /* FIXME: Currently ZFS still use local seq for ROOT unfortunately, and
- * this fid_is_local check should be removed once LU-2240 is fixed */
- LASSERTF((fid_seq_in_fldb(fid_seq(fid)) ||
- fid_seq_is_local_file(fid_seq(fid))) &&
- fid_is_sane(fid), DFID" is insane!\n", PFID(fid));
-
- rc = fld_client_lookup(&lmv->lmv_fld, fid_seq(fid), mds,
- LU_SEQ_RANGE_MDT, NULL);
- if (rc) {
- CERROR("Error while looking for mds number. Seq %#llx, err = %d\n",
- fid_seq(fid), rc);
- return rc;
- }
-
- CDEBUG(D_INODE, "FLD lookup got mds #%x for fid="DFID"\n",
- *mds, PFID(fid));
-
- if (*mds >= lmv->desc.ld_tgt_count) {
- CERROR("FLD lookup got invalid mds #%x (max: %x) for fid=" DFID "\n", *mds, lmv->desc.ld_tgt_count,
- PFID(fid));
- rc = -EINVAL;
- }
- return rc;
-}
diff --git a/drivers/staging/lustre/lustre/lmv/lmv_intent.c b/drivers/staging/lustre/lustre/lmv/lmv_intent.c
deleted file mode 100644
index eebe45bdceb6..000000000000
--- a/drivers/staging/lustre/lustre/lmv/lmv_intent.c
+++ /dev/null
@@ -1,323 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- */
-
-#define DEBUG_SUBSYSTEM S_LMV
-#include <linux/slab.h>
-#include <linux/module.h>
-#include <linux/pagemap.h>
-#include <asm/div64.h>
-#include <linux/seq_file.h>
-#include <linux/namei.h>
-#include "../include/lustre_intent.h"
-#include "../include/obd_support.h"
-#include "../include/lustre/lustre_idl.h"
-#include "../include/lustre_lib.h"
-#include "../include/lustre_net.h"
-#include "../include/lustre_dlm.h"
-#include "../include/obd_class.h"
-#include "../include/lprocfs_status.h"
-#include "lmv_internal.h"
-
-static int lmv_intent_remote(struct obd_export *exp, void *lmm,
- int lmmsize, struct lookup_intent *it,
- const struct lu_fid *parent_fid, int flags,
- struct ptlrpc_request **reqp,
- ldlm_blocking_callback cb_blocking,
- __u64 extra_lock_flags)
-{
- struct obd_device *obd = exp->exp_obd;
- struct lmv_obd *lmv = &obd->u.lmv;
- struct ptlrpc_request *req = NULL;
- struct lustre_handle plock;
- struct md_op_data *op_data;
- struct lmv_tgt_desc *tgt;
- struct mdt_body *body;
- int pmode;
- int rc = 0;
-
- body = req_capsule_server_get(&(*reqp)->rq_pill, &RMF_MDT_BODY);
- if (body == NULL)
- return -EPROTO;
-
- LASSERT((body->valid & OBD_MD_MDS));
-
- /*
- * Unfortunately, we have to lie to MDC/MDS to retrieve
- * attributes llite needs and provideproper locking.
- */
- if (it->it_op & IT_LOOKUP)
- it->it_op = IT_GETATTR;
-
- /*
- * We got LOOKUP lock, but we really need attrs.
- */
- pmode = it->d.lustre.it_lock_mode;
- if (pmode) {
- plock.cookie = it->d.lustre.it_lock_handle;
- it->d.lustre.it_lock_mode = 0;
- it->d.lustre.it_data = NULL;
- }
-
- LASSERT(fid_is_sane(&body->fid1));
-
- tgt = lmv_find_target(lmv, &body->fid1);
- if (IS_ERR(tgt)) {
- rc = PTR_ERR(tgt);
- goto out;
- }
-
- op_data = kzalloc(sizeof(*op_data), GFP_NOFS);
- if (!op_data) {
- rc = -ENOMEM;
- goto out;
- }
-
- op_data->op_fid1 = body->fid1;
- /* Sent the parent FID to the remote MDT */
- if (parent_fid != NULL) {
- /* The parent fid is only for remote open to
- * check whether the open is from OBF,
- * see mdt_cross_open */
- LASSERT(it->it_op & IT_OPEN);
- op_data->op_fid2 = *parent_fid;
- /* Add object FID to op_fid3, in case it needs to check stale
- * (M_CHECK_STALE), see mdc_finish_intent_lock */
- op_data->op_fid3 = body->fid1;
- }
-
- op_data->op_bias = MDS_CROSS_REF;
- CDEBUG(D_INODE, "REMOTE_INTENT with fid="DFID" -> mds #%d\n",
- PFID(&body->fid1), tgt->ltd_idx);
-
- rc = md_intent_lock(tgt->ltd_exp, op_data, lmm, lmmsize, it,
- flags, &req, cb_blocking, extra_lock_flags);
- if (rc)
- goto out_free_op_data;
-
- /*
- * LLite needs LOOKUP lock to track dentry revocation in order to
- * maintain dcache consistency. Thus drop UPDATE|PERM lock here
- * and put LOOKUP in request.
- */
- if (it->d.lustre.it_lock_mode != 0) {
- it->d.lustre.it_remote_lock_handle =
- it->d.lustre.it_lock_handle;
- it->d.lustre.it_remote_lock_mode = it->d.lustre.it_lock_mode;
- }
-
- it->d.lustre.it_lock_handle = plock.cookie;
- it->d.lustre.it_lock_mode = pmode;
-
-out_free_op_data:
- kfree(op_data);
-out:
- if (rc && pmode)
- ldlm_lock_decref(&plock, pmode);
-
- ptlrpc_req_finished(*reqp);
- *reqp = req;
- return rc;
-}
-
-/*
- * IT_OPEN is intended to open (and create, possible) an object. Parent (pid)
- * may be split dir.
- */
-int lmv_intent_open(struct obd_export *exp, struct md_op_data *op_data,
- void *lmm, int lmmsize, struct lookup_intent *it,
- int flags, struct ptlrpc_request **reqp,
- ldlm_blocking_callback cb_blocking,
- __u64 extra_lock_flags)
-{
- struct obd_device *obd = exp->exp_obd;
- struct lmv_obd *lmv = &obd->u.lmv;
- struct lmv_tgt_desc *tgt;
- struct mdt_body *body;
- int rc;
-
- tgt = lmv_locate_mds(lmv, op_data, &op_data->op_fid1);
- if (IS_ERR(tgt))
- return PTR_ERR(tgt);
-
- /* If it is ready to open the file by FID, do not need
- * allocate FID at all, otherwise it will confuse MDT */
- if ((it->it_op & IT_CREAT) &&
- !(it->it_flags & MDS_OPEN_BY_FID)) {
- /*
- * For open with IT_CREATE and for IT_CREATE cases allocate new
- * fid and setup FLD for it.
- */
- op_data->op_fid3 = op_data->op_fid2;
- rc = lmv_fid_alloc(exp, &op_data->op_fid2, op_data);
- if (rc != 0)
- return rc;
- }
-
- CDEBUG(D_INODE, "OPEN_INTENT with fid1=" DFID ", fid2=" DFID ", name='%s' -> mds #%d\n",
- PFID(&op_data->op_fid1),
- PFID(&op_data->op_fid2), op_data->op_name, tgt->ltd_idx);
-
- rc = md_intent_lock(tgt->ltd_exp, op_data, lmm, lmmsize, it, flags,
- reqp, cb_blocking, extra_lock_flags);
- if (rc != 0)
- return rc;
- /*
- * Nothing is found, do not access body->fid1 as it is zero and thus
- * pointless.
- */
- if ((it->d.lustre.it_disposition & DISP_LOOKUP_NEG) &&
- !(it->d.lustre.it_disposition & DISP_OPEN_CREATE) &&
- !(it->d.lustre.it_disposition & DISP_OPEN_OPEN))
- return rc;
-
- body = req_capsule_server_get(&(*reqp)->rq_pill, &RMF_MDT_BODY);
- if (body == NULL)
- return -EPROTO;
- /*
- * Not cross-ref case, just get out of here.
- */
- if (likely(!(body->valid & OBD_MD_MDS)))
- return 0;
-
- /*
- * Okay, MDS has returned success. Probably name has been resolved in
- * remote inode.
- */
- rc = lmv_intent_remote(exp, lmm, lmmsize, it, &op_data->op_fid1, flags,
- reqp, cb_blocking, extra_lock_flags);
- if (rc != 0) {
- LASSERT(rc < 0);
- /*
- * This is possible, that some userspace application will try to
- * open file as directory and we will have -ENOTDIR here. As
- * this is normal situation, we should not print error here,
- * only debug info.
- */
- CDEBUG(D_INODE, "Can't handle remote %s: dir " DFID "(" DFID "):%*s: %d\n",
- LL_IT2STR(it), PFID(&op_data->op_fid2),
- PFID(&op_data->op_fid1), op_data->op_namelen,
- op_data->op_name, rc);
- return rc;
- }
-
- return rc;
-}
-
-/*
- * Handler for: getattr, lookup and revalidate cases.
- */
-int lmv_intent_lookup(struct obd_export *exp, struct md_op_data *op_data,
- void *lmm, int lmmsize, struct lookup_intent *it,
- int flags, struct ptlrpc_request **reqp,
- ldlm_blocking_callback cb_blocking,
- __u64 extra_lock_flags)
-{
- struct obd_device *obd = exp->exp_obd;
- struct lmv_obd *lmv = &obd->u.lmv;
- struct lmv_tgt_desc *tgt = NULL;
- struct mdt_body *body;
- int rc = 0;
-
- tgt = lmv_locate_mds(lmv, op_data, &op_data->op_fid1);
- if (IS_ERR(tgt))
- return PTR_ERR(tgt);
-
- if (!fid_is_sane(&op_data->op_fid2))
- fid_zero(&op_data->op_fid2);
-
- CDEBUG(D_INODE, "LOOKUP_INTENT with fid1="DFID", fid2="DFID
- ", name='%s' -> mds #%d\n", PFID(&op_data->op_fid1),
- PFID(&op_data->op_fid2),
- op_data->op_name ? op_data->op_name : "<NULL>",
- tgt->ltd_idx);
-
- op_data->op_bias &= ~MDS_CROSS_REF;
-
- rc = md_intent_lock(tgt->ltd_exp, op_data, lmm, lmmsize, it,
- flags, reqp, cb_blocking, extra_lock_flags);
-
- if (rc < 0 || *reqp == NULL)
- return rc;
-
- /*
- * MDS has returned success. Probably name has been resolved in
- * remote inode. Let's check this.
- */
- body = req_capsule_server_get(&(*reqp)->rq_pill, &RMF_MDT_BODY);
- if (body == NULL)
- return -EPROTO;
- /* Not cross-ref case, just get out of here. */
- if (likely(!(body->valid & OBD_MD_MDS)))
- return 0;
-
- rc = lmv_intent_remote(exp, lmm, lmmsize, it, NULL, flags, reqp,
- cb_blocking, extra_lock_flags);
-
- return rc;
-}
-
-int lmv_intent_lock(struct obd_export *exp, struct md_op_data *op_data,
- void *lmm, int lmmsize, struct lookup_intent *it,
- int flags, struct ptlrpc_request **reqp,
- ldlm_blocking_callback cb_blocking,
- __u64 extra_lock_flags)
-{
- struct obd_device *obd = exp->exp_obd;
- int rc;
-
- LASSERT(it != NULL);
- LASSERT(fid_is_sane(&op_data->op_fid1));
-
- CDEBUG(D_INODE, "INTENT LOCK '%s' for '%*s' on "DFID"\n",
- LL_IT2STR(it), op_data->op_namelen, op_data->op_name,
- PFID(&op_data->op_fid1));
-
- rc = lmv_check_connect(obd);
- if (rc)
- return rc;
-
- if (it->it_op & (IT_LOOKUP | IT_GETATTR | IT_LAYOUT))
- rc = lmv_intent_lookup(exp, op_data, lmm, lmmsize, it,
- flags, reqp, cb_blocking,
- extra_lock_flags);
- else if (it->it_op & IT_OPEN)
- rc = lmv_intent_open(exp, op_data, lmm, lmmsize, it,
- flags, reqp, cb_blocking,
- extra_lock_flags);
- else
- LBUG();
- return rc;
-}
diff --git a/drivers/staging/lustre/lustre/lmv/lmv_internal.h b/drivers/staging/lustre/lustre/lmv/lmv_internal.h
deleted file mode 100644
index b808728daee7..000000000000
--- a/drivers/staging/lustre/lustre/lmv/lmv_internal.h
+++ /dev/null
@@ -1,151 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- */
-
-#ifndef _LMV_INTERNAL_H_
-#define _LMV_INTERNAL_H_
-
-#include "../include/lustre/lustre_idl.h"
-#include "../include/obd.h"
-
-#define LMV_MAX_TGT_COUNT 128
-
-#define lmv_init_lock(lmv) mutex_lock(&lmv->init_mutex)
-#define lmv_init_unlock(lmv) mutex_unlock(&lmv->init_mutex)
-
-#define LL_IT2STR(it) \
- ((it) ? ldlm_it2str((it)->it_op) : "0")
-
-int lmv_check_connect(struct obd_device *obd);
-
-int lmv_intent_lock(struct obd_export *exp, struct md_op_data *op_data,
- void *lmm, int lmmsize, struct lookup_intent *it,
- int flags, struct ptlrpc_request **reqp,
- ldlm_blocking_callback cb_blocking,
- __u64 extra_lock_flags);
-
-int lmv_intent_lookup(struct obd_export *exp, struct md_op_data *op_data,
- void *lmm, int lmmsize, struct lookup_intent *it,
- int flags, struct ptlrpc_request **reqp,
- ldlm_blocking_callback cb_blocking,
- __u64 extra_lock_flags);
-
-int lmv_intent_open(struct obd_export *exp, struct md_op_data *op_data,
- void *lmm, int lmmsize, struct lookup_intent *it,
- int flags, struct ptlrpc_request **reqp,
- ldlm_blocking_callback cb_blocking,
- __u64 extra_lock_flags);
-
-int lmv_blocking_ast(struct ldlm_lock *, struct ldlm_lock_desc *,
- void *, int);
-int lmv_fld_lookup(struct lmv_obd *lmv, const struct lu_fid *fid, u32 *mds);
-int __lmv_fid_alloc(struct lmv_obd *lmv, struct lu_fid *fid, u32 mds);
-int lmv_fid_alloc(struct obd_export *exp, struct lu_fid *fid,
- struct md_op_data *op_data);
-
-static inline struct lmv_stripe_md *lmv_get_mea(struct ptlrpc_request *req)
-{
- struct mdt_body *body;
- struct lmv_stripe_md *mea;
-
- LASSERT(req != NULL);
-
- body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
-
- if (!body || !S_ISDIR(body->mode) || !body->eadatasize)
- return NULL;
-
- mea = req_capsule_server_sized_get(&req->rq_pill, &RMF_MDT_MD,
- body->eadatasize);
- LASSERT(mea != NULL);
-
- if (mea->mea_count == 0)
- return NULL;
- if (mea->mea_magic != MEA_MAGIC_LAST_CHAR &&
- mea->mea_magic != MEA_MAGIC_ALL_CHARS &&
- mea->mea_magic != MEA_MAGIC_HASH_SEGMENT)
- return NULL;
-
- return mea;
-}
-
-static inline int lmv_get_easize(struct lmv_obd *lmv)
-{
- return sizeof(struct lmv_stripe_md) +
- lmv->desc.ld_tgt_count *
- sizeof(struct lu_fid);
-}
-
-static inline struct lmv_tgt_desc *
-lmv_get_target(struct lmv_obd *lmv, u32 mds)
-{
- int count = lmv->desc.ld_tgt_count;
- int i;
-
- for (i = 0; i < count; i++) {
- if (lmv->tgts[i] == NULL)
- continue;
-
- if (lmv->tgts[i]->ltd_idx == mds)
- return lmv->tgts[i];
- }
-
- return ERR_PTR(-ENODEV);
-}
-
-static inline struct lmv_tgt_desc *
-lmv_find_target(struct lmv_obd *lmv, const struct lu_fid *fid)
-{
- u32 mds = 0;
- int rc;
-
- if (lmv->desc.ld_tgt_count > 1) {
- rc = lmv_fld_lookup(lmv, fid, &mds);
- if (rc)
- return ERR_PTR(rc);
- }
-
- return lmv_get_target(lmv, mds);
-}
-
-struct lmv_tgt_desc
-*lmv_locate_mds(struct lmv_obd *lmv, struct md_op_data *op_data,
- struct lu_fid *fid);
-/* lproc_lmv.c */
-void lprocfs_lmv_init_vars(struct lprocfs_static_vars *lvars);
-
-extern struct file_operations lmv_proc_target_fops;
-
-#endif
diff --git a/drivers/staging/lustre/lustre/lmv/lmv_obd.c b/drivers/staging/lustre/lustre/lmv/lmv_obd.c
deleted file mode 100644
index 8f0ffa458322..000000000000
--- a/drivers/staging/lustre/lustre/lmv/lmv_obd.c
+++ /dev/null
@@ -1,2820 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- */
-
-#define DEBUG_SUBSYSTEM S_LMV
-#include <linux/slab.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/pagemap.h>
-#include <linux/mm.h>
-#include <asm/div64.h>
-#include <linux/seq_file.h>
-#include <linux/namei.h>
-#include <linux/uaccess.h>
-
-#include "../include/lustre/lustre_idl.h"
-#include "../include/obd_support.h"
-#include "../include/lustre_lib.h"
-#include "../include/lustre_net.h"
-#include "../include/obd_class.h"
-#include "../include/lprocfs_status.h"
-#include "../include/lustre_lite.h"
-#include "../include/lustre_fid.h"
-#include "lmv_internal.h"
-
-static void lmv_activate_target(struct lmv_obd *lmv,
- struct lmv_tgt_desc *tgt,
- int activate)
-{
- if (tgt->ltd_active == activate)
- return;
-
- tgt->ltd_active = activate;
- lmv->desc.ld_active_tgt_count += (activate ? 1 : -1);
-}
-
-/**
- * Error codes:
- *
- * -EINVAL : UUID can't be found in the LMV's target list
- * -ENOTCONN: The UUID is found, but the target connection is bad (!)
- * -EBADF : The UUID is found, but the OBD of the wrong type (!)
- */
-static int lmv_set_mdc_active(struct lmv_obd *lmv, struct obd_uuid *uuid,
- int activate)
-{
- struct lmv_tgt_desc *uninitialized_var(tgt);
- struct obd_device *obd;
- int i;
- int rc = 0;
-
- CDEBUG(D_INFO, "Searching in lmv %p for uuid %s (activate=%d)\n",
- lmv, uuid->uuid, activate);
-
- spin_lock(&lmv->lmv_lock);
- for (i = 0; i < lmv->desc.ld_tgt_count; i++) {
- tgt = lmv->tgts[i];
- if (tgt == NULL || tgt->ltd_exp == NULL)
- continue;
-
- CDEBUG(D_INFO, "Target idx %d is %s conn %#llx\n", i,
- tgt->ltd_uuid.uuid, tgt->ltd_exp->exp_handle.h_cookie);
-
- if (obd_uuid_equals(uuid, &tgt->ltd_uuid))
- break;
- }
-
- if (i == lmv->desc.ld_tgt_count) {
- rc = -EINVAL;
- goto out_lmv_lock;
- }
-
- obd = class_exp2obd(tgt->ltd_exp);
- if (obd == NULL) {
- rc = -ENOTCONN;
- goto out_lmv_lock;
- }
-
- CDEBUG(D_INFO, "Found OBD %s=%s device %d (%p) type %s at LMV idx %d\n",
- obd->obd_name, obd->obd_uuid.uuid, obd->obd_minor, obd,
- obd->obd_type->typ_name, i);
- LASSERT(strcmp(obd->obd_type->typ_name, LUSTRE_MDC_NAME) == 0);
-
- if (tgt->ltd_active == activate) {
- CDEBUG(D_INFO, "OBD %p already %sactive!\n", obd,
- activate ? "" : "in");
- goto out_lmv_lock;
- }
-
- CDEBUG(D_INFO, "Marking OBD %p %sactive\n", obd,
- activate ? "" : "in");
- lmv_activate_target(lmv, tgt, activate);
-
- out_lmv_lock:
- spin_unlock(&lmv->lmv_lock);
- return rc;
-}
-
-static struct obd_uuid *lmv_get_uuid(struct obd_export *exp)
-{
- struct lmv_obd *lmv = &exp->exp_obd->u.lmv;
-
- return obd_get_uuid(lmv->tgts[0]->ltd_exp);
-}
-
-static int lmv_notify(struct obd_device *obd, struct obd_device *watched,
- enum obd_notify_event ev, void *data)
-{
- struct obd_connect_data *conn_data;
- struct lmv_obd *lmv = &obd->u.lmv;
- struct obd_uuid *uuid;
- int rc = 0;
-
- if (strcmp(watched->obd_type->typ_name, LUSTRE_MDC_NAME)) {
- CERROR("unexpected notification of %s %s!\n",
- watched->obd_type->typ_name,
- watched->obd_name);
- return -EINVAL;
- }
-
- uuid = &watched->u.cli.cl_target_uuid;
- if (ev == OBD_NOTIFY_ACTIVE || ev == OBD_NOTIFY_INACTIVE) {
- /*
- * Set MDC as active before notifying the observer, so the
- * observer can use the MDC normally.
- */
- rc = lmv_set_mdc_active(lmv, uuid,
- ev == OBD_NOTIFY_ACTIVE);
- if (rc) {
- CERROR("%sactivation of %s failed: %d\n",
- ev == OBD_NOTIFY_ACTIVE ? "" : "de",
- uuid->uuid, rc);
- return rc;
- }
- } else if (ev == OBD_NOTIFY_OCD) {
- conn_data = &watched->u.cli.cl_import->imp_connect_data;
- /*
- * XXX: Make sure that ocd_connect_flags from all targets are
- * the same. Otherwise one of MDTs runs wrong version or
- * something like this. --umka
- */
- obd->obd_self_export->exp_connect_data = *conn_data;
- }
-#if 0
- else if (ev == OBD_NOTIFY_DISCON) {
- /*
- * For disconnect event, flush fld cache for failout MDS case.
- */
- fld_client_flush(&lmv->lmv_fld);
- }
-#endif
- /*
- * Pass the notification up the chain.
- */
- if (obd->obd_observer)
- rc = obd_notify(obd->obd_observer, watched, ev, data);
-
- return rc;
-}
-
-/**
- * This is fake connect function. Its purpose is to initialize lmv and say
- * caller that everything is okay. Real connection will be performed later.
- */
-static int lmv_connect(const struct lu_env *env,
- struct obd_export **exp, struct obd_device *obd,
- struct obd_uuid *cluuid, struct obd_connect_data *data,
- void *localdata)
-{
- struct lmv_obd *lmv = &obd->u.lmv;
- struct lustre_handle conn = { 0 };
- int rc = 0;
-
- /*
- * We don't want to actually do the underlying connections more than
- * once, so keep track.
- */
- lmv->refcount++;
- if (lmv->refcount > 1) {
- *exp = NULL;
- return 0;
- }
-
- rc = class_connect(&conn, obd, cluuid);
- if (rc) {
- CERROR("class_connection() returned %d\n", rc);
- return rc;
- }
-
- *exp = class_conn2export(&conn);
- class_export_get(*exp);
-
- lmv->exp = *exp;
- lmv->connected = 0;
- lmv->cluuid = *cluuid;
-
- if (data)
- lmv->conn_data = *data;
-
- lmv->lmv_tgts_kobj = kobject_create_and_add("target_obds",
- &obd->obd_kobj);
- /*
- * All real clients should perform actual connection right away, because
- * it is possible, that LMV will not have opportunity to connect targets
- * and MDC stuff will be called directly, for instance while reading
- * ../mdc/../kbytesfree procfs file, etc.
- */
- if (data->ocd_connect_flags & OBD_CONNECT_REAL)
- rc = lmv_check_connect(obd);
-
- if (rc && lmv->lmv_tgts_kobj)
- kobject_put(lmv->lmv_tgts_kobj);
-
- return rc;
-}
-
-static void lmv_set_timeouts(struct obd_device *obd)
-{
- struct lmv_tgt_desc *tgt;
- struct lmv_obd *lmv;
- int i;
-
- lmv = &obd->u.lmv;
- if (lmv->server_timeout == 0)
- return;
-
- if (lmv->connected == 0)
- return;
-
- for (i = 0; i < lmv->desc.ld_tgt_count; i++) {
- tgt = lmv->tgts[i];
- if (tgt == NULL || tgt->ltd_exp == NULL || tgt->ltd_active == 0)
- continue;
-
- obd_set_info_async(NULL, tgt->ltd_exp, sizeof(KEY_INTERMDS),
- KEY_INTERMDS, 0, NULL, NULL);
- }
-}
-
-static int lmv_init_ea_size(struct obd_export *exp, int easize,
- int def_easize, int cookiesize, int def_cookiesize)
-{
- struct obd_device *obd = exp->exp_obd;
- struct lmv_obd *lmv = &obd->u.lmv;
- int i;
- int rc = 0;
- int change = 0;
-
- if (lmv->max_easize < easize) {
- lmv->max_easize = easize;
- change = 1;
- }
- if (lmv->max_def_easize < def_easize) {
- lmv->max_def_easize = def_easize;
- change = 1;
- }
- if (lmv->max_cookiesize < cookiesize) {
- lmv->max_cookiesize = cookiesize;
- change = 1;
- }
- if (lmv->max_def_cookiesize < def_cookiesize) {
- lmv->max_def_cookiesize = def_cookiesize;
- change = 1;
- }
- if (change == 0)
- return 0;
-
- if (lmv->connected == 0)
- return 0;
-
- for (i = 0; i < lmv->desc.ld_tgt_count; i++) {
- if (lmv->tgts[i] == NULL ||
- lmv->tgts[i]->ltd_exp == NULL ||
- lmv->tgts[i]->ltd_active == 0) {
- CWARN("%s: NULL export for %d\n", obd->obd_name, i);
- continue;
- }
-
- rc = md_init_ea_size(lmv->tgts[i]->ltd_exp, easize, def_easize,
- cookiesize, def_cookiesize);
- if (rc) {
- CERROR("%s: obd_init_ea_size() failed on MDT target %d: rc = %d.\n",
- obd->obd_name, i, rc);
- break;
- }
- }
- return rc;
-}
-
-#define MAX_STRING_SIZE 128
-
-static int lmv_connect_mdc(struct obd_device *obd, struct lmv_tgt_desc *tgt)
-{
- struct lmv_obd *lmv = &obd->u.lmv;
- struct obd_uuid *cluuid = &lmv->cluuid;
- struct obd_uuid lmv_mdc_uuid = { "LMV_MDC_UUID" };
- struct obd_device *mdc_obd;
- struct obd_export *mdc_exp;
- struct lu_fld_target target;
- int rc;
-
- mdc_obd = class_find_client_obd(&tgt->ltd_uuid, LUSTRE_MDC_NAME,
- &obd->obd_uuid);
- if (!mdc_obd) {
- CERROR("target %s not attached\n", tgt->ltd_uuid.uuid);
- return -EINVAL;
- }
-
- CDEBUG(D_CONFIG, "connect to %s(%s) - %s, %s FOR %s\n",
- mdc_obd->obd_name, mdc_obd->obd_uuid.uuid,
- tgt->ltd_uuid.uuid, obd->obd_uuid.uuid,
- cluuid->uuid);
-
- if (!mdc_obd->obd_set_up) {
- CERROR("target %s is not set up\n", tgt->ltd_uuid.uuid);
- return -EINVAL;
- }
-
- rc = obd_connect(NULL, &mdc_exp, mdc_obd, &lmv_mdc_uuid,
- &lmv->conn_data, NULL);
- if (rc) {
- CERROR("target %s connect error %d\n", tgt->ltd_uuid.uuid, rc);
- return rc;
- }
-
- /*
- * Init fid sequence client for this mdc and add new fld target.
- */
- rc = obd_fid_init(mdc_obd, mdc_exp, LUSTRE_SEQ_METADATA);
- if (rc)
- return rc;
-
- target.ft_srv = NULL;
- target.ft_exp = mdc_exp;
- target.ft_idx = tgt->ltd_idx;
-
- fld_client_add_target(&lmv->lmv_fld, &target);
-
- rc = obd_register_observer(mdc_obd, obd);
- if (rc) {
- obd_disconnect(mdc_exp);
- CERROR("target %s register_observer error %d\n",
- tgt->ltd_uuid.uuid, rc);
- return rc;
- }
-
- if (obd->obd_observer) {
- /*
- * Tell the observer about the new target.
- */
- rc = obd_notify(obd->obd_observer, mdc_exp->exp_obd,
- OBD_NOTIFY_ACTIVE,
- (void *)(tgt - lmv->tgts[0]));
- if (rc) {
- obd_disconnect(mdc_exp);
- return rc;
- }
- }
-
- tgt->ltd_active = 1;
- tgt->ltd_exp = mdc_exp;
- lmv->desc.ld_active_tgt_count++;
-
- md_init_ea_size(tgt->ltd_exp, lmv->max_easize, lmv->max_def_easize,
- lmv->max_cookiesize, lmv->max_def_cookiesize);
-
- CDEBUG(D_CONFIG, "Connected to %s(%s) successfully (%d)\n",
- mdc_obd->obd_name, mdc_obd->obd_uuid.uuid,
- atomic_read(&obd->obd_refcount));
-
- if (lmv->lmv_tgts_kobj)
- /* Even if we failed to create the link, that's fine */
- rc = sysfs_create_link(lmv->lmv_tgts_kobj, &mdc_obd->obd_kobj,
- mdc_obd->obd_name);
- return 0;
-}
-
-static void lmv_del_target(struct lmv_obd *lmv, int index)
-{
- if (lmv->tgts[index] == NULL)
- return;
-
- kfree(lmv->tgts[index]);
- lmv->tgts[index] = NULL;
- return;
-}
-
-static int lmv_add_target(struct obd_device *obd, struct obd_uuid *uuidp,
- __u32 index, int gen)
-{
- struct lmv_obd *lmv = &obd->u.lmv;
- struct lmv_tgt_desc *tgt;
- int rc = 0;
-
- CDEBUG(D_CONFIG, "Target uuid: %s. index %d\n", uuidp->uuid, index);
-
- lmv_init_lock(lmv);
-
- if (lmv->desc.ld_tgt_count == 0) {
- struct obd_device *mdc_obd;
-
- mdc_obd = class_find_client_obd(uuidp, LUSTRE_MDC_NAME,
- &obd->obd_uuid);
- if (!mdc_obd) {
- lmv_init_unlock(lmv);
- CERROR("%s: Target %s not attached: rc = %d\n",
- obd->obd_name, uuidp->uuid, -EINVAL);
- return -EINVAL;
- }
- }
-
- if ((index < lmv->tgts_size) && (lmv->tgts[index] != NULL)) {
- tgt = lmv->tgts[index];
- CERROR("%s: UUID %s already assigned at LOV target index %d: rc = %d\n",
- obd->obd_name,
- obd_uuid2str(&tgt->ltd_uuid), index, -EEXIST);
- lmv_init_unlock(lmv);
- return -EEXIST;
- }
-
- if (index >= lmv->tgts_size) {
- /* We need to reallocate the lmv target array. */
- struct lmv_tgt_desc **newtgts, **old = NULL;
- __u32 newsize = 1;
- __u32 oldsize = 0;
-
- while (newsize < index + 1)
- newsize <<= 1;
- newtgts = kcalloc(newsize, sizeof(*newtgts), GFP_NOFS);
- if (newtgts == NULL) {
- lmv_init_unlock(lmv);
- return -ENOMEM;
- }
-
- if (lmv->tgts_size) {
- memcpy(newtgts, lmv->tgts,
- sizeof(*newtgts) * lmv->tgts_size);
- old = lmv->tgts;
- oldsize = lmv->tgts_size;
- }
-
- lmv->tgts = newtgts;
- lmv->tgts_size = newsize;
- smp_rmb();
- kfree(old);
-
- CDEBUG(D_CONFIG, "tgts: %p size: %d\n", lmv->tgts,
- lmv->tgts_size);
- }
-
- tgt = kzalloc(sizeof(*tgt), GFP_NOFS);
- if (!tgt) {
- lmv_init_unlock(lmv);
- return -ENOMEM;
- }
-
- mutex_init(&tgt->ltd_fid_mutex);
- tgt->ltd_idx = index;
- tgt->ltd_uuid = *uuidp;
- tgt->ltd_active = 0;
- lmv->tgts[index] = tgt;
- if (index >= lmv->desc.ld_tgt_count)
- lmv->desc.ld_tgt_count = index + 1;
-
- if (lmv->connected) {
- rc = lmv_connect_mdc(obd, tgt);
- if (rc) {
- spin_lock(&lmv->lmv_lock);
- lmv->desc.ld_tgt_count--;
- memset(tgt, 0, sizeof(*tgt));
- spin_unlock(&lmv->lmv_lock);
- } else {
- int easize = sizeof(struct lmv_stripe_md) +
- lmv->desc.ld_tgt_count * sizeof(struct lu_fid);
- lmv_init_ea_size(obd->obd_self_export, easize, 0, 0, 0);
- }
- }
-
- lmv_init_unlock(lmv);
- return rc;
-}
-
-int lmv_check_connect(struct obd_device *obd)
-{
- struct lmv_obd *lmv = &obd->u.lmv;
- struct lmv_tgt_desc *tgt;
- int i;
- int rc;
- int easize;
-
- if (lmv->connected)
- return 0;
-
- lmv_init_lock(lmv);
- if (lmv->connected) {
- lmv_init_unlock(lmv);
- return 0;
- }
-
- if (lmv->desc.ld_tgt_count == 0) {
- lmv_init_unlock(lmv);
- CERROR("%s: no targets configured.\n", obd->obd_name);
- return -EINVAL;
- }
-
- CDEBUG(D_CONFIG, "Time to connect %s to %s\n",
- lmv->cluuid.uuid, obd->obd_name);
-
- LASSERT(lmv->tgts != NULL);
-
- for (i = 0; i < lmv->desc.ld_tgt_count; i++) {
- tgt = lmv->tgts[i];
- if (tgt == NULL)
- continue;
- rc = lmv_connect_mdc(obd, tgt);
- if (rc)
- goto out_disc;
- }
-
- lmv_set_timeouts(obd);
- class_export_put(lmv->exp);
- lmv->connected = 1;
- easize = lmv_get_easize(lmv);
- lmv_init_ea_size(obd->obd_self_export, easize, 0, 0, 0);
- lmv_init_unlock(lmv);
- return 0;
-
- out_disc:
- while (i-- > 0) {
- int rc2;
- tgt = lmv->tgts[i];
- if (tgt == NULL)
- continue;
- tgt->ltd_active = 0;
- if (tgt->ltd_exp) {
- --lmv->desc.ld_active_tgt_count;
- rc2 = obd_disconnect(tgt->ltd_exp);
- if (rc2) {
- CERROR("LMV target %s disconnect on MDC idx %d: error %d\n",
- tgt->ltd_uuid.uuid, i, rc2);
- }
- }
- }
- class_disconnect(lmv->exp);
- lmv_init_unlock(lmv);
- return rc;
-}
-
-static int lmv_disconnect_mdc(struct obd_device *obd, struct lmv_tgt_desc *tgt)
-{
- struct lmv_obd *lmv = &obd->u.lmv;
- struct obd_device *mdc_obd;
- int rc;
-
- LASSERT(tgt != NULL);
- LASSERT(obd != NULL);
-
- mdc_obd = class_exp2obd(tgt->ltd_exp);
-
- if (mdc_obd) {
- mdc_obd->obd_force = obd->obd_force;
- mdc_obd->obd_fail = obd->obd_fail;
- mdc_obd->obd_no_recov = obd->obd_no_recov;
-
- if (lmv->lmv_tgts_kobj)
- sysfs_remove_link(lmv->lmv_tgts_kobj,
- mdc_obd->obd_name);
- }
-
- rc = obd_fid_fini(tgt->ltd_exp->exp_obd);
- if (rc)
- CERROR("Can't finalize fids factory\n");
-
- CDEBUG(D_INFO, "Disconnected from %s(%s) successfully\n",
- tgt->ltd_exp->exp_obd->obd_name,
- tgt->ltd_exp->exp_obd->obd_uuid.uuid);
-
- obd_register_observer(tgt->ltd_exp->exp_obd, NULL);
- rc = obd_disconnect(tgt->ltd_exp);
- if (rc) {
- if (tgt->ltd_active) {
- CERROR("Target %s disconnect error %d\n",
- tgt->ltd_uuid.uuid, rc);
- }
- }
-
- lmv_activate_target(lmv, tgt, 0);
- tgt->ltd_exp = NULL;
- return 0;
-}
-
-static int lmv_disconnect(struct obd_export *exp)
-{
- struct obd_device *obd = class_exp2obd(exp);
- struct lmv_obd *lmv = &obd->u.lmv;
- int rc;
- int i;
-
- if (!lmv->tgts)
- goto out_local;
-
- /*
- * Only disconnect the underlying layers on the final disconnect.
- */
- lmv->refcount--;
- if (lmv->refcount != 0)
- goto out_local;
-
- for (i = 0; i < lmv->desc.ld_tgt_count; i++) {
- if (lmv->tgts[i] == NULL || lmv->tgts[i]->ltd_exp == NULL)
- continue;
-
- lmv_disconnect_mdc(obd, lmv->tgts[i]);
- }
-
- if (lmv->lmv_tgts_kobj)
- kobject_put(lmv->lmv_tgts_kobj);
-
-out_local:
- /*
- * This is the case when no real connection is established by
- * lmv_check_connect().
- */
- if (!lmv->connected)
- class_export_put(exp);
- rc = class_disconnect(exp);
- if (lmv->refcount == 0)
- lmv->connected = 0;
- return rc;
-}
-
-static int lmv_fid2path(struct obd_export *exp, int len, void *karg, void *uarg)
-{
- struct obd_device *obddev = class_exp2obd(exp);
- struct lmv_obd *lmv = &obddev->u.lmv;
- struct getinfo_fid2path *gf;
- struct lmv_tgt_desc *tgt;
- struct getinfo_fid2path *remote_gf = NULL;
- int remote_gf_size = 0;
- int rc;
-
- gf = (struct getinfo_fid2path *)karg;
- tgt = lmv_find_target(lmv, &gf->gf_fid);
- if (IS_ERR(tgt))
- return PTR_ERR(tgt);
-
-repeat_fid2path:
- rc = obd_iocontrol(OBD_IOC_FID2PATH, tgt->ltd_exp, len, gf, uarg);
- if (rc != 0 && rc != -EREMOTE)
- goto out_fid2path;
-
- /* If remote_gf != NULL, it means just building the
- * path on the remote MDT, copy this path segment to gf */
- if (remote_gf != NULL) {
- struct getinfo_fid2path *ori_gf;
- char *ptr;
-
- ori_gf = (struct getinfo_fid2path *)karg;
- if (strlen(ori_gf->gf_path) +
- strlen(gf->gf_path) > ori_gf->gf_pathlen) {
- rc = -EOVERFLOW;
- goto out_fid2path;
- }
-
- ptr = ori_gf->gf_path;
-
- memmove(ptr + strlen(gf->gf_path) + 1, ptr,
- strlen(ori_gf->gf_path));
-
- strncpy(ptr, gf->gf_path, strlen(gf->gf_path));
- ptr += strlen(gf->gf_path);
- *ptr = '/';
- }
-
- CDEBUG(D_INFO, "%s: get path %s "DFID" rec: %llu ln: %u\n",
- tgt->ltd_exp->exp_obd->obd_name,
- gf->gf_path, PFID(&gf->gf_fid), gf->gf_recno,
- gf->gf_linkno);
-
- if (rc == 0)
- goto out_fid2path;
-
- /* sigh, has to go to another MDT to do path building further */
- if (remote_gf == NULL) {
- remote_gf_size = sizeof(*remote_gf) + PATH_MAX;
- remote_gf = kzalloc(remote_gf_size, GFP_NOFS);
- if (!remote_gf) {
- rc = -ENOMEM;
- goto out_fid2path;
- }
- remote_gf->gf_pathlen = PATH_MAX;
- }
-
- if (!fid_is_sane(&gf->gf_fid)) {
- CERROR("%s: invalid FID "DFID": rc = %d\n",
- tgt->ltd_exp->exp_obd->obd_name,
- PFID(&gf->gf_fid), -EINVAL);
- rc = -EINVAL;
- goto out_fid2path;
- }
-
- tgt = lmv_find_target(lmv, &gf->gf_fid);
- if (IS_ERR(tgt)) {
- rc = -EINVAL;
- goto out_fid2path;
- }
-
- remote_gf->gf_fid = gf->gf_fid;
- remote_gf->gf_recno = -1;
- remote_gf->gf_linkno = -1;
- memset(remote_gf->gf_path, 0, remote_gf->gf_pathlen);
- gf = remote_gf;
- goto repeat_fid2path;
-
-out_fid2path:
- kfree(remote_gf);
- return rc;
-}
-
-static int lmv_hsm_req_count(struct lmv_obd *lmv,
- const struct hsm_user_request *hur,
- const struct lmv_tgt_desc *tgt_mds)
-{
- int i, nr = 0;
- struct lmv_tgt_desc *curr_tgt;
-
- /* count how many requests must be sent to the given target */
- for (i = 0; i < hur->hur_request.hr_itemcount; i++) {
- curr_tgt = lmv_find_target(lmv, &hur->hur_user_item[i].hui_fid);
- if (obd_uuid_equals(&curr_tgt->ltd_uuid, &tgt_mds->ltd_uuid))
- nr++;
- }
- return nr;
-}
-
-static void lmv_hsm_req_build(struct lmv_obd *lmv,
- struct hsm_user_request *hur_in,
- const struct lmv_tgt_desc *tgt_mds,
- struct hsm_user_request *hur_out)
-{
- int i, nr_out;
- struct lmv_tgt_desc *curr_tgt;
-
- /* build the hsm_user_request for the given target */
- hur_out->hur_request = hur_in->hur_request;
- nr_out = 0;
- for (i = 0; i < hur_in->hur_request.hr_itemcount; i++) {
- curr_tgt = lmv_find_target(lmv,
- &hur_in->hur_user_item[i].hui_fid);
- if (obd_uuid_equals(&curr_tgt->ltd_uuid, &tgt_mds->ltd_uuid)) {
- hur_out->hur_user_item[nr_out] =
- hur_in->hur_user_item[i];
- nr_out++;
- }
- }
- hur_out->hur_request.hr_itemcount = nr_out;
- memcpy(hur_data(hur_out), hur_data(hur_in),
- hur_in->hur_request.hr_data_len);
-}
-
-static int lmv_hsm_ct_unregister(struct lmv_obd *lmv, unsigned int cmd, int len,
- struct lustre_kernelcomm *lk, void *uarg)
-{
- int i, rc = 0;
-
- /* unregister request (call from llapi_hsm_copytool_fini) */
- for (i = 0; i < lmv->desc.ld_tgt_count; i++) {
- /* best effort: try to clean as much as possible
- * (continue on error) */
- obd_iocontrol(cmd, lmv->tgts[i]->ltd_exp, len, lk, uarg);
- }
-
- /* Whatever the result, remove copytool from kuc groups.
- * Unreached coordinators will get EPIPE on next requests
- * and will unregister automatically.
- */
- rc = libcfs_kkuc_group_rem(lk->lk_uid, lk->lk_group);
- return rc;
-}
-
-static int lmv_hsm_ct_register(struct lmv_obd *lmv, unsigned int cmd, int len,
- struct lustre_kernelcomm *lk, void *uarg)
-{
- struct file *filp;
- int i, j, err;
- int rc = 0;
- bool any_set = false;
-
- /* All or nothing: try to register to all MDS.
- * In case of failure, unregister from previous MDS,
- * except if it because of inactive target. */
- for (i = 0; i < lmv->desc.ld_tgt_count; i++) {
- err = obd_iocontrol(cmd, lmv->tgts[i]->ltd_exp,
- len, lk, uarg);
- if (err) {
- if (lmv->tgts[i]->ltd_active) {
- /* permanent error */
- CERROR("error: iocontrol MDC %s on MDTidx %d cmd %x: err = %d\n",
- lmv->tgts[i]->ltd_uuid.uuid,
- i, cmd, err);
- rc = err;
- lk->lk_flags |= LK_FLG_STOP;
- /* unregister from previous MDS */
- for (j = 0; j < i; j++)
- obd_iocontrol(cmd,
- lmv->tgts[j]->ltd_exp,
- len, lk, uarg);
- return rc;
- }
- /* else: transient error.
- * kuc will register to the missing MDT
- * when it is back */
- } else {
- any_set = true;
- }
- }
-
- if (!any_set)
- /* no registration done: return error */
- return -ENOTCONN;
-
- /* at least one registration done, with no failure */
- filp = fget(lk->lk_wfd);
- if (filp == NULL) {
- return -EBADF;
- }
- rc = libcfs_kkuc_group_add(filp, lk->lk_uid, lk->lk_group, lk->lk_data);
- if (rc != 0 && filp != NULL)
- fput(filp);
- return rc;
-}
-
-
-
-
-static int lmv_iocontrol(unsigned int cmd, struct obd_export *exp,
- int len, void *karg, void *uarg)
-{
- struct obd_device *obddev = class_exp2obd(exp);
- struct lmv_obd *lmv = &obddev->u.lmv;
- int i = 0;
- int rc = 0;
- int set = 0;
- int count = lmv->desc.ld_tgt_count;
-
- if (count == 0)
- return -ENOTTY;
-
- switch (cmd) {
- case IOC_OBD_STATFS: {
- struct obd_ioctl_data *data = karg;
- struct obd_device *mdc_obd;
- struct obd_statfs stat_buf = {0};
- __u32 index;
-
- memcpy(&index, data->ioc_inlbuf2, sizeof(__u32));
- if (index >= count)
- return -ENODEV;
-
- if (lmv->tgts[index] == NULL ||
- lmv->tgts[index]->ltd_active == 0)
- return -ENODATA;
-
- mdc_obd = class_exp2obd(lmv->tgts[index]->ltd_exp);
- if (!mdc_obd)
- return -EINVAL;
-
- /* copy UUID */
- if (copy_to_user(data->ioc_pbuf2, obd2cli_tgt(mdc_obd),
- min((int) data->ioc_plen2,
- (int) sizeof(struct obd_uuid))))
- return -EFAULT;
-
- rc = obd_statfs(NULL, lmv->tgts[index]->ltd_exp, &stat_buf,
- cfs_time_shift_64(-OBD_STATFS_CACHE_SECONDS),
- 0);
- if (rc)
- return rc;
- if (copy_to_user(data->ioc_pbuf1, &stat_buf,
- min((int) data->ioc_plen1,
- (int) sizeof(stat_buf))))
- return -EFAULT;
- break;
- }
- case OBD_IOC_QUOTACTL: {
- struct if_quotactl *qctl = karg;
- struct lmv_tgt_desc *tgt = NULL;
- struct obd_quotactl *oqctl;
-
- if (qctl->qc_valid == QC_MDTIDX) {
- if (qctl->qc_idx < 0 || count <= qctl->qc_idx)
- return -EINVAL;
-
- tgt = lmv->tgts[qctl->qc_idx];
- if (tgt == NULL || tgt->ltd_exp == NULL)
- return -EINVAL;
- } else if (qctl->qc_valid == QC_UUID) {
- for (i = 0; i < count; i++) {
- tgt = lmv->tgts[i];
- if (tgt == NULL)
- continue;
- if (!obd_uuid_equals(&tgt->ltd_uuid,
- &qctl->obd_uuid))
- continue;
-
- if (tgt->ltd_exp == NULL)
- return -EINVAL;
-
- break;
- }
- } else {
- return -EINVAL;
- }
-
- if (i >= count)
- return -EAGAIN;
-
- LASSERT(tgt && tgt->ltd_exp);
- oqctl = kzalloc(sizeof(*oqctl), GFP_NOFS);
- if (!oqctl)
- return -ENOMEM;
-
- QCTL_COPY(oqctl, qctl);
- rc = obd_quotactl(tgt->ltd_exp, oqctl);
- if (rc == 0) {
- QCTL_COPY(qctl, oqctl);
- qctl->qc_valid = QC_MDTIDX;
- qctl->obd_uuid = tgt->ltd_uuid;
- }
- kfree(oqctl);
- break;
- }
- case OBD_IOC_CHANGELOG_SEND:
- case OBD_IOC_CHANGELOG_CLEAR: {
- struct ioc_changelog *icc = karg;
-
- if (icc->icc_mdtindex >= count)
- return -ENODEV;
-
- if (lmv->tgts[icc->icc_mdtindex] == NULL ||
- lmv->tgts[icc->icc_mdtindex]->ltd_exp == NULL ||
- lmv->tgts[icc->icc_mdtindex]->ltd_active == 0)
- return -ENODEV;
- rc = obd_iocontrol(cmd, lmv->tgts[icc->icc_mdtindex]->ltd_exp,
- sizeof(*icc), icc, NULL);
- break;
- }
- case LL_IOC_GET_CONNECT_FLAGS: {
- if (lmv->tgts[0] == NULL)
- return -ENODATA;
- rc = obd_iocontrol(cmd, lmv->tgts[0]->ltd_exp, len, karg, uarg);
- break;
- }
- case OBD_IOC_FID2PATH: {
- rc = lmv_fid2path(exp, len, karg, uarg);
- break;
- }
- case LL_IOC_HSM_STATE_GET:
- case LL_IOC_HSM_STATE_SET:
- case LL_IOC_HSM_ACTION: {
- struct md_op_data *op_data = karg;
- struct lmv_tgt_desc *tgt;
-
- tgt = lmv_find_target(lmv, &op_data->op_fid1);
- if (IS_ERR(tgt))
- return PTR_ERR(tgt);
-
- if (tgt->ltd_exp == NULL)
- return -EINVAL;
-
- rc = obd_iocontrol(cmd, tgt->ltd_exp, len, karg, uarg);
- break;
- }
- case LL_IOC_HSM_PROGRESS: {
- const struct hsm_progress_kernel *hpk = karg;
- struct lmv_tgt_desc *tgt;
-
- tgt = lmv_find_target(lmv, &hpk->hpk_fid);
- if (IS_ERR(tgt))
- return PTR_ERR(tgt);
- rc = obd_iocontrol(cmd, tgt->ltd_exp, len, karg, uarg);
- break;
- }
- case LL_IOC_HSM_REQUEST: {
- struct hsm_user_request *hur = karg;
- struct lmv_tgt_desc *tgt;
- unsigned int reqcount = hur->hur_request.hr_itemcount;
-
- if (reqcount == 0)
- return 0;
-
- /* if the request is about a single fid
- * or if there is a single MDS, no need to split
- * the request. */
- if (reqcount == 1 || count == 1) {
- tgt = lmv_find_target(lmv,
- &hur->hur_user_item[0].hui_fid);
- if (IS_ERR(tgt))
- return PTR_ERR(tgt);
- rc = obd_iocontrol(cmd, tgt->ltd_exp, len, karg, uarg);
- } else {
- /* split fid list to their respective MDS */
- for (i = 0; i < count; i++) {
- unsigned int nr, reqlen;
- int rc1;
- struct hsm_user_request *req;
-
- nr = lmv_hsm_req_count(lmv, hur, lmv->tgts[i]);
- if (nr == 0) /* nothing for this MDS */
- continue;
-
- /* build a request with fids for this MDS */
- reqlen = offsetof(typeof(*hur),
- hur_user_item[nr])
- + hur->hur_request.hr_data_len;
- req = libcfs_kvzalloc(reqlen, GFP_NOFS);
- if (req == NULL)
- return -ENOMEM;
-
- lmv_hsm_req_build(lmv, hur, lmv->tgts[i], req);
-
- rc1 = obd_iocontrol(cmd, lmv->tgts[i]->ltd_exp,
- reqlen, req, uarg);
- if (rc1 != 0 && rc == 0)
- rc = rc1;
- kvfree(req);
- }
- }
- break;
- }
- case LL_IOC_LOV_SWAP_LAYOUTS: {
- struct md_op_data *op_data = karg;
- struct lmv_tgt_desc *tgt1, *tgt2;
-
- tgt1 = lmv_find_target(lmv, &op_data->op_fid1);
- if (IS_ERR(tgt1))
- return PTR_ERR(tgt1);
-
- tgt2 = lmv_find_target(lmv, &op_data->op_fid2);
- if (IS_ERR(tgt2))
- return PTR_ERR(tgt2);
-
- if ((tgt1->ltd_exp == NULL) || (tgt2->ltd_exp == NULL))
- return -EINVAL;
-
- /* only files on same MDT can have their layouts swapped */
- if (tgt1->ltd_idx != tgt2->ltd_idx)
- return -EPERM;
-
- rc = obd_iocontrol(cmd, tgt1->ltd_exp, len, karg, uarg);
- break;
- }
- case LL_IOC_HSM_CT_START: {
- struct lustre_kernelcomm *lk = karg;
- if (lk->lk_flags & LK_FLG_STOP)
- rc = lmv_hsm_ct_unregister(lmv, cmd, len, lk, uarg);
- else
- rc = lmv_hsm_ct_register(lmv, cmd, len, lk, uarg);
- break;
- }
- default:
- for (i = 0; i < count; i++) {
- struct obd_device *mdc_obd;
- int err;
-
- if (lmv->tgts[i] == NULL ||
- lmv->tgts[i]->ltd_exp == NULL)
- continue;
- /* ll_umount_begin() sets force flag but for lmv, not
- * mdc. Let's pass it through */
- mdc_obd = class_exp2obd(lmv->tgts[i]->ltd_exp);
- mdc_obd->obd_force = obddev->obd_force;
- err = obd_iocontrol(cmd, lmv->tgts[i]->ltd_exp, len,
- karg, uarg);
- if (err == -ENODATA && cmd == OBD_IOC_POLL_QUOTACHECK) {
- return err;
- } else if (err) {
- if (lmv->tgts[i]->ltd_active) {
- CERROR("error: iocontrol MDC %s on MDTidx %d cmd %x: err = %d\n",
- lmv->tgts[i]->ltd_uuid.uuid,
- i, cmd, err);
- if (!rc)
- rc = err;
- }
- } else
- set = 1;
- }
- if (!set && !rc)
- rc = -EIO;
- }
- return rc;
-}
-
-#if 0
-static int lmv_all_chars_policy(int count, const char *name,
- int len)
-{
- unsigned int c = 0;
-
- while (len > 0)
- c += name[--len];
- c = c % count;
- return c;
-}
-
-static int lmv_nid_policy(struct lmv_obd *lmv)
-{
- struct obd_import *imp;
- __u32 id;
-
- /*
- * XXX: To get nid we assume that underlying obd device is mdc.
- */
- imp = class_exp2cliimp(lmv->tgts[0].ltd_exp);
- id = imp->imp_connection->c_self ^ (imp->imp_connection->c_self >> 32);
- return id % lmv->desc.ld_tgt_count;
-}
-
-static int lmv_choose_mds(struct lmv_obd *lmv, struct md_op_data *op_data,
- enum placement_policy placement)
-{
- switch (placement) {
- case PLACEMENT_CHAR_POLICY:
- return lmv_all_chars_policy(lmv->desc.ld_tgt_count,
- op_data->op_name,
- op_data->op_namelen);
- case PLACEMENT_NID_POLICY:
- return lmv_nid_policy(lmv);
-
- default:
- break;
- }
-
- CERROR("Unsupported placement policy %x\n", placement);
- return -EINVAL;
-}
-#endif
-
-/**
- * This is _inode_ placement policy function (not name).
- */
-static int lmv_placement_policy(struct obd_device *obd,
- struct md_op_data *op_data, u32 *mds)
-{
- struct lmv_obd *lmv = &obd->u.lmv;
-
- LASSERT(mds != NULL);
-
- if (lmv->desc.ld_tgt_count == 1) {
- *mds = 0;
- return 0;
- }
-
- /**
- * If stripe_offset is provided during setdirstripe
- * (setdirstripe -i xx), xx MDS will be chosen.
- */
- if (op_data->op_cli_flags & CLI_SET_MEA) {
- struct lmv_user_md *lum;
-
- lum = (struct lmv_user_md *)op_data->op_data;
- if (lum->lum_type == LMV_STRIPE_TYPE &&
- lum->lum_stripe_offset != -1) {
- if (lum->lum_stripe_offset >= lmv->desc.ld_tgt_count) {
- CERROR("%s: Stripe_offset %d > MDT count %d: rc = %d\n",
- obd->obd_name,
- lum->lum_stripe_offset,
- lmv->desc.ld_tgt_count, -ERANGE);
- return -ERANGE;
- }
- *mds = lum->lum_stripe_offset;
- return 0;
- }
- }
-
- /* Allocate new fid on target according to operation type and parent
- * home mds. */
- *mds = op_data->op_mds;
- return 0;
-}
-
-int __lmv_fid_alloc(struct lmv_obd *lmv, struct lu_fid *fid, u32 mds)
-{
- struct lmv_tgt_desc *tgt;
- int rc;
-
- tgt = lmv_get_target(lmv, mds);
- if (IS_ERR(tgt))
- return PTR_ERR(tgt);
-
- /*
- * New seq alloc and FLD setup should be atomic. Otherwise we may find
- * on server that seq in new allocated fid is not yet known.
- */
- mutex_lock(&tgt->ltd_fid_mutex);
-
- if (tgt->ltd_active == 0 || tgt->ltd_exp == NULL) {
- rc = -ENODEV;
- goto out;
- }
-
- /*
- * Asking underlaying tgt layer to allocate new fid.
- */
- rc = obd_fid_alloc(tgt->ltd_exp, fid, NULL);
- if (rc > 0) {
- LASSERT(fid_is_sane(fid));
- rc = 0;
- }
-
-out:
- mutex_unlock(&tgt->ltd_fid_mutex);
- return rc;
-}
-
-int lmv_fid_alloc(struct obd_export *exp, struct lu_fid *fid,
- struct md_op_data *op_data)
-{
- struct obd_device *obd = class_exp2obd(exp);
- struct lmv_obd *lmv = &obd->u.lmv;
- u32 mds = 0;
- int rc;
-
- LASSERT(op_data != NULL);
- LASSERT(fid != NULL);
-
- rc = lmv_placement_policy(obd, op_data, &mds);
- if (rc) {
- CERROR("Can't get target for allocating fid, rc %d\n",
- rc);
- return rc;
- }
-
- rc = __lmv_fid_alloc(lmv, fid, mds);
- if (rc) {
- CERROR("Can't alloc new fid, rc %d\n", rc);
- return rc;
- }
-
- return rc;
-}
-
-static int lmv_setup(struct obd_device *obd, struct lustre_cfg *lcfg)
-{
- struct lmv_obd *lmv = &obd->u.lmv;
- struct lprocfs_static_vars lvars = { NULL };
- struct lmv_desc *desc;
- int rc;
-
- if (LUSTRE_CFG_BUFLEN(lcfg, 1) < 1) {
- CERROR("LMV setup requires a descriptor\n");
- return -EINVAL;
- }
-
- desc = (struct lmv_desc *)lustre_cfg_buf(lcfg, 1);
- if (sizeof(*desc) > LUSTRE_CFG_BUFLEN(lcfg, 1)) {
- CERROR("Lmv descriptor size wrong: %d > %d\n",
- (int)sizeof(*desc), LUSTRE_CFG_BUFLEN(lcfg, 1));
- return -EINVAL;
- }
-
- lmv->tgts = kcalloc(32, sizeof(*lmv->tgts), GFP_NOFS);
- if (lmv->tgts == NULL)
- return -ENOMEM;
- lmv->tgts_size = 32;
-
- obd_str2uuid(&lmv->desc.ld_uuid, desc->ld_uuid.uuid);
- lmv->desc.ld_tgt_count = 0;
- lmv->desc.ld_active_tgt_count = 0;
- lmv->max_cookiesize = 0;
- lmv->max_def_easize = 0;
- lmv->max_easize = 0;
- lmv->lmv_placement = PLACEMENT_CHAR_POLICY;
-
- spin_lock_init(&lmv->lmv_lock);
- mutex_init(&lmv->init_mutex);
-
- lprocfs_lmv_init_vars(&lvars);
-
- lprocfs_obd_setup(obd, lvars.obd_vars, lvars.sysfs_vars);
- rc = ldebugfs_seq_create(obd->obd_debugfs_entry, "target_obd",
- 0444, &lmv_proc_target_fops, obd);
- if (rc)
- CWARN("%s: error adding LMV target_obd file: rc = %d\n",
- obd->obd_name, rc);
- rc = fld_client_init(&lmv->lmv_fld, obd->obd_name,
- LUSTRE_CLI_FLD_HASH_DHT);
- if (rc) {
- CERROR("Can't init FLD, err %d\n", rc);
- goto out;
- }
-
- return 0;
-
-out:
- return rc;
-}
-
-static int lmv_cleanup(struct obd_device *obd)
-{
- struct lmv_obd *lmv = &obd->u.lmv;
-
- fld_client_fini(&lmv->lmv_fld);
- if (lmv->tgts != NULL) {
- int i;
- for (i = 0; i < lmv->desc.ld_tgt_count; i++) {
- if (lmv->tgts[i] == NULL)
- continue;
- lmv_del_target(lmv, i);
- }
- kfree(lmv->tgts);
- lmv->tgts_size = 0;
- }
- return 0;
-}
-
-static int lmv_process_config(struct obd_device *obd, u32 len, void *buf)
-{
- struct lustre_cfg *lcfg = buf;
- struct obd_uuid obd_uuid;
- int gen;
- __u32 index;
- int rc;
-
- switch (lcfg->lcfg_command) {
- case LCFG_ADD_MDC:
- /* modify_mdc_tgts add 0:lustre-clilmv 1:lustre-MDT0000_UUID
- * 2:0 3:1 4:lustre-MDT0000-mdc_UUID */
- if (LUSTRE_CFG_BUFLEN(lcfg, 1) > sizeof(obd_uuid.uuid)) {
- rc = -EINVAL;
- goto out;
- }
-
- obd_str2uuid(&obd_uuid, lustre_cfg_buf(lcfg, 1));
-
- if (sscanf(lustre_cfg_buf(lcfg, 2), "%d", &index) != 1) {
- rc = -EINVAL;
- goto out;
- }
- if (sscanf(lustre_cfg_buf(lcfg, 3), "%d", &gen) != 1) {
- rc = -EINVAL;
- goto out;
- }
- rc = lmv_add_target(obd, &obd_uuid, index, gen);
- goto out;
- default:
- CERROR("Unknown command: %d\n", lcfg->lcfg_command);
- rc = -EINVAL;
- goto out;
- }
-out:
- return rc;
-}
-
-static int lmv_statfs(const struct lu_env *env, struct obd_export *exp,
- struct obd_statfs *osfs, __u64 max_age, __u32 flags)
-{
- struct obd_device *obd = class_exp2obd(exp);
- struct lmv_obd *lmv = &obd->u.lmv;
- struct obd_statfs *temp;
- int rc = 0;
- int i;
-
- rc = lmv_check_connect(obd);
- if (rc)
- return rc;
-
- temp = kzalloc(sizeof(*temp), GFP_NOFS);
- if (!temp)
- return -ENOMEM;
-
- for (i = 0; i < lmv->desc.ld_tgt_count; i++) {
- if (lmv->tgts[i] == NULL || lmv->tgts[i]->ltd_exp == NULL)
- continue;
-
- rc = obd_statfs(env, lmv->tgts[i]->ltd_exp, temp,
- max_age, flags);
- if (rc) {
- CERROR("can't stat MDS #%d (%s), error %d\n", i,
- lmv->tgts[i]->ltd_exp->exp_obd->obd_name,
- rc);
- goto out_free_temp;
- }
-
- if (i == 0) {
- *osfs = *temp;
- /* If the statfs is from mount, it will needs
- * retrieve necessary information from MDT0.
- * i.e. mount does not need the merged osfs
- * from all of MDT.
- * And also clients can be mounted as long as
- * MDT0 is in service*/
- if (flags & OBD_STATFS_FOR_MDT0)
- goto out_free_temp;
- } else {
- osfs->os_bavail += temp->os_bavail;
- osfs->os_blocks += temp->os_blocks;
- osfs->os_ffree += temp->os_ffree;
- osfs->os_files += temp->os_files;
- }
- }
-
-out_free_temp:
- kfree(temp);
- return rc;
-}
-
-static int lmv_getstatus(struct obd_export *exp,
- struct lu_fid *fid)
-{
- struct obd_device *obd = exp->exp_obd;
- struct lmv_obd *lmv = &obd->u.lmv;
- int rc;
-
- rc = lmv_check_connect(obd);
- if (rc)
- return rc;
-
- rc = md_getstatus(lmv->tgts[0]->ltd_exp, fid);
- return rc;
-}
-
-static int lmv_getxattr(struct obd_export *exp, const struct lu_fid *fid,
- u64 valid, const char *name,
- const char *input, int input_size, int output_size,
- int flags, struct ptlrpc_request **request)
-{
- struct obd_device *obd = exp->exp_obd;
- struct lmv_obd *lmv = &obd->u.lmv;
- struct lmv_tgt_desc *tgt;
- int rc;
-
- rc = lmv_check_connect(obd);
- if (rc)
- return rc;
-
- tgt = lmv_find_target(lmv, fid);
- if (IS_ERR(tgt))
- return PTR_ERR(tgt);
-
- rc = md_getxattr(tgt->ltd_exp, fid, valid, name, input,
- input_size, output_size, flags, request);
-
- return rc;
-}
-
-static int lmv_setxattr(struct obd_export *exp, const struct lu_fid *fid,
- u64 valid, const char *name,
- const char *input, int input_size, int output_size,
- int flags, __u32 suppgid,
- struct ptlrpc_request **request)
-{
- struct obd_device *obd = exp->exp_obd;
- struct lmv_obd *lmv = &obd->u.lmv;
- struct lmv_tgt_desc *tgt;
- int rc;
-
- rc = lmv_check_connect(obd);
- if (rc)
- return rc;
-
- tgt = lmv_find_target(lmv, fid);
- if (IS_ERR(tgt))
- return PTR_ERR(tgt);
-
- rc = md_setxattr(tgt->ltd_exp, fid, valid, name, input,
- input_size, output_size, flags, suppgid,
- request);
-
- return rc;
-}
-
-static int lmv_getattr(struct obd_export *exp, struct md_op_data *op_data,
- struct ptlrpc_request **request)
-{
- struct obd_device *obd = exp->exp_obd;
- struct lmv_obd *lmv = &obd->u.lmv;
- struct lmv_tgt_desc *tgt;
- int rc;
-
- rc = lmv_check_connect(obd);
- if (rc)
- return rc;
-
- tgt = lmv_find_target(lmv, &op_data->op_fid1);
- if (IS_ERR(tgt))
- return PTR_ERR(tgt);
-
- if (op_data->op_flags & MF_GET_MDT_IDX) {
- op_data->op_mds = tgt->ltd_idx;
- return 0;
- }
-
- rc = md_getattr(tgt->ltd_exp, op_data, request);
-
- return rc;
-}
-
-static int lmv_null_inode(struct obd_export *exp, const struct lu_fid *fid)
-{
- struct obd_device *obd = exp->exp_obd;
- struct lmv_obd *lmv = &obd->u.lmv;
- int i;
- int rc;
-
- rc = lmv_check_connect(obd);
- if (rc)
- return rc;
-
- CDEBUG(D_INODE, "CBDATA for "DFID"\n", PFID(fid));
-
- /*
- * With DNE every object can have two locks in different namespaces:
- * lookup lock in space of MDT storing direntry and update/open lock in
- * space of MDT storing inode.
- */
- for (i = 0; i < lmv->desc.ld_tgt_count; i++) {
- if (lmv->tgts[i] == NULL || lmv->tgts[i]->ltd_exp == NULL)
- continue;
- md_null_inode(lmv->tgts[i]->ltd_exp, fid);
- }
-
- return 0;
-}
-
-static int lmv_find_cbdata(struct obd_export *exp, const struct lu_fid *fid,
- ldlm_iterator_t it, void *data)
-{
- struct obd_device *obd = exp->exp_obd;
- struct lmv_obd *lmv = &obd->u.lmv;
- int i;
- int rc;
-
- rc = lmv_check_connect(obd);
- if (rc)
- return rc;
-
- CDEBUG(D_INODE, "CBDATA for "DFID"\n", PFID(fid));
-
- /*
- * With DNE every object can have two locks in different namespaces:
- * lookup lock in space of MDT storing direntry and update/open lock in
- * space of MDT storing inode.
- */
- for (i = 0; i < lmv->desc.ld_tgt_count; i++) {
- if (lmv->tgts[i] == NULL || lmv->tgts[i]->ltd_exp == NULL)
- continue;
- rc = md_find_cbdata(lmv->tgts[i]->ltd_exp, fid, it, data);
- if (rc)
- return rc;
- }
-
- return rc;
-}
-
-
-static int lmv_close(struct obd_export *exp, struct md_op_data *op_data,
- struct md_open_data *mod, struct ptlrpc_request **request)
-{
- struct obd_device *obd = exp->exp_obd;
- struct lmv_obd *lmv = &obd->u.lmv;
- struct lmv_tgt_desc *tgt;
- int rc;
-
- rc = lmv_check_connect(obd);
- if (rc)
- return rc;
-
- tgt = lmv_find_target(lmv, &op_data->op_fid1);
- if (IS_ERR(tgt))
- return PTR_ERR(tgt);
-
- CDEBUG(D_INODE, "CLOSE "DFID"\n", PFID(&op_data->op_fid1));
- rc = md_close(tgt->ltd_exp, op_data, mod, request);
- return rc;
-}
-
-struct lmv_tgt_desc
-*lmv_locate_mds(struct lmv_obd *lmv, struct md_op_data *op_data,
- struct lu_fid *fid)
-{
- struct lmv_tgt_desc *tgt;
-
- tgt = lmv_find_target(lmv, fid);
- if (IS_ERR(tgt))
- return tgt;
-
- op_data->op_mds = tgt->ltd_idx;
-
- return tgt;
-}
-
-static int lmv_create(struct obd_export *exp, struct md_op_data *op_data,
- const void *data, int datalen, int mode, __u32 uid,
- __u32 gid, cfs_cap_t cap_effective, __u64 rdev,
- struct ptlrpc_request **request)
-{
- struct obd_device *obd = exp->exp_obd;
- struct lmv_obd *lmv = &obd->u.lmv;
- struct lmv_tgt_desc *tgt;
- int rc;
-
- rc = lmv_check_connect(obd);
- if (rc)
- return rc;
-
- if (!lmv->desc.ld_active_tgt_count)
- return -EIO;
-
- tgt = lmv_locate_mds(lmv, op_data, &op_data->op_fid1);
- if (IS_ERR(tgt))
- return PTR_ERR(tgt);
-
- rc = lmv_fid_alloc(exp, &op_data->op_fid2, op_data);
- if (rc)
- return rc;
-
- CDEBUG(D_INODE, "CREATE '%*s' on "DFID" -> mds #%x\n",
- op_data->op_namelen, op_data->op_name, PFID(&op_data->op_fid1),
- op_data->op_mds);
-
- op_data->op_flags |= MF_MDC_CANCEL_FID1;
- rc = md_create(tgt->ltd_exp, op_data, data, datalen, mode, uid, gid,
- cap_effective, rdev, request);
-
- if (rc == 0) {
- if (*request == NULL)
- return rc;
- CDEBUG(D_INODE, "Created - "DFID"\n", PFID(&op_data->op_fid2));
- }
- return rc;
-}
-
-static int lmv_done_writing(struct obd_export *exp,
- struct md_op_data *op_data,
- struct md_open_data *mod)
-{
- struct obd_device *obd = exp->exp_obd;
- struct lmv_obd *lmv = &obd->u.lmv;
- struct lmv_tgt_desc *tgt;
- int rc;
-
- rc = lmv_check_connect(obd);
- if (rc)
- return rc;
-
- tgt = lmv_find_target(lmv, &op_data->op_fid1);
- if (IS_ERR(tgt))
- return PTR_ERR(tgt);
-
- rc = md_done_writing(tgt->ltd_exp, op_data, mod);
- return rc;
-}
-
-static int
-lmv_enqueue_remote(struct obd_export *exp, struct ldlm_enqueue_info *einfo,
- struct lookup_intent *it, struct md_op_data *op_data,
- struct lustre_handle *lockh, void *lmm, int lmmsize,
- __u64 extra_lock_flags)
-{
- struct ptlrpc_request *req = it->d.lustre.it_data;
- struct obd_device *obd = exp->exp_obd;
- struct lmv_obd *lmv = &obd->u.lmv;
- struct lustre_handle plock;
- struct lmv_tgt_desc *tgt;
- struct md_op_data *rdata;
- struct lu_fid fid1;
- struct mdt_body *body;
- int rc = 0;
- int pmode;
-
- body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
- LASSERT(body != NULL);
-
- if (!(body->valid & OBD_MD_MDS))
- return 0;
-
- CDEBUG(D_INODE, "REMOTE_ENQUEUE '%s' on "DFID" -> "DFID"\n",
- LL_IT2STR(it), PFID(&op_data->op_fid1), PFID(&body->fid1));
-
- /*
- * We got LOOKUP lock, but we really need attrs.
- */
- pmode = it->d.lustre.it_lock_mode;
- LASSERT(pmode != 0);
- memcpy(&plock, lockh, sizeof(plock));
- it->d.lustre.it_lock_mode = 0;
- it->d.lustre.it_data = NULL;
- fid1 = body->fid1;
-
- ptlrpc_req_finished(req);
-
- tgt = lmv_find_target(lmv, &fid1);
- if (IS_ERR(tgt)) {
- rc = PTR_ERR(tgt);
- goto out;
- }
-
- rdata = kzalloc(sizeof(*rdata), GFP_NOFS);
- if (!rdata) {
- rc = -ENOMEM;
- goto out;
- }
-
- rdata->op_fid1 = fid1;
- rdata->op_bias = MDS_CROSS_REF;
-
- rc = md_enqueue(tgt->ltd_exp, einfo, it, rdata, lockh,
- lmm, lmmsize, NULL, extra_lock_flags);
- kfree(rdata);
-out:
- ldlm_lock_decref(&plock, pmode);
- return rc;
-}
-
-static int
-lmv_enqueue(struct obd_export *exp, struct ldlm_enqueue_info *einfo,
- struct lookup_intent *it, struct md_op_data *op_data,
- struct lustre_handle *lockh, void *lmm, int lmmsize,
- struct ptlrpc_request **req, __u64 extra_lock_flags)
-{
- struct obd_device *obd = exp->exp_obd;
- struct lmv_obd *lmv = &obd->u.lmv;
- struct lmv_tgt_desc *tgt;
- int rc;
-
- rc = lmv_check_connect(obd);
- if (rc)
- return rc;
-
- CDEBUG(D_INODE, "ENQUEUE '%s' on "DFID"\n",
- LL_IT2STR(it), PFID(&op_data->op_fid1));
-
- tgt = lmv_locate_mds(lmv, op_data, &op_data->op_fid1);
- if (IS_ERR(tgt))
- return PTR_ERR(tgt);
-
- CDEBUG(D_INODE, "ENQUEUE '%s' on "DFID" -> mds #%d\n",
- LL_IT2STR(it), PFID(&op_data->op_fid1), tgt->ltd_idx);
-
- rc = md_enqueue(tgt->ltd_exp, einfo, it, op_data, lockh,
- lmm, lmmsize, req, extra_lock_flags);
-
- if (rc == 0 && it && it->it_op == IT_OPEN) {
- rc = lmv_enqueue_remote(exp, einfo, it, op_data, lockh,
- lmm, lmmsize, extra_lock_flags);
- }
- return rc;
-}
-
-static int
-lmv_getattr_name(struct obd_export *exp, struct md_op_data *op_data,
- struct ptlrpc_request **request)
-{
- struct ptlrpc_request *req = NULL;
- struct obd_device *obd = exp->exp_obd;
- struct lmv_obd *lmv = &obd->u.lmv;
- struct lmv_tgt_desc *tgt;
- struct mdt_body *body;
- int rc;
-
- rc = lmv_check_connect(obd);
- if (rc)
- return rc;
-
- tgt = lmv_locate_mds(lmv, op_data, &op_data->op_fid1);
- if (IS_ERR(tgt))
- return PTR_ERR(tgt);
-
- CDEBUG(D_INODE, "GETATTR_NAME for %*s on "DFID" -> mds #%d\n",
- op_data->op_namelen, op_data->op_name, PFID(&op_data->op_fid1),
- tgt->ltd_idx);
-
- rc = md_getattr_name(tgt->ltd_exp, op_data, request);
- if (rc != 0)
- return rc;
-
- body = req_capsule_server_get(&(*request)->rq_pill,
- &RMF_MDT_BODY);
- LASSERT(body != NULL);
-
- if (body->valid & OBD_MD_MDS) {
- struct lu_fid rid = body->fid1;
- CDEBUG(D_INODE, "Request attrs for "DFID"\n",
- PFID(&rid));
-
- tgt = lmv_find_target(lmv, &rid);
- if (IS_ERR(tgt)) {
- ptlrpc_req_finished(*request);
- return PTR_ERR(tgt);
- }
-
- op_data->op_fid1 = rid;
- op_data->op_valid |= OBD_MD_FLCROSSREF;
- op_data->op_namelen = 0;
- op_data->op_name = NULL;
- rc = md_getattr_name(tgt->ltd_exp, op_data, &req);
- ptlrpc_req_finished(*request);
- *request = req;
- }
-
- return rc;
-}
-
-#define md_op_data_fid(op_data, fl) \
- (fl == MF_MDC_CANCEL_FID1 ? &op_data->op_fid1 : \
- fl == MF_MDC_CANCEL_FID2 ? &op_data->op_fid2 : \
- fl == MF_MDC_CANCEL_FID3 ? &op_data->op_fid3 : \
- fl == MF_MDC_CANCEL_FID4 ? &op_data->op_fid4 : \
- NULL)
-
-static int lmv_early_cancel(struct obd_export *exp, struct md_op_data *op_data,
- int op_tgt, ldlm_mode_t mode, int bits, int flag)
-{
- struct lu_fid *fid = md_op_data_fid(op_data, flag);
- struct obd_device *obd = exp->exp_obd;
- struct lmv_obd *lmv = &obd->u.lmv;
- struct lmv_tgt_desc *tgt;
- ldlm_policy_data_t policy = { {0} };
- int rc = 0;
-
- if (!fid_is_sane(fid))
- return 0;
-
- tgt = lmv_find_target(lmv, fid);
- if (IS_ERR(tgt))
- return PTR_ERR(tgt);
-
- if (tgt->ltd_idx != op_tgt) {
- CDEBUG(D_INODE, "EARLY_CANCEL on "DFID"\n", PFID(fid));
- policy.l_inodebits.bits = bits;
- rc = md_cancel_unused(tgt->ltd_exp, fid, &policy,
- mode, LCF_ASYNC, NULL);
- } else {
- CDEBUG(D_INODE,
- "EARLY_CANCEL skip operation target %d on "DFID"\n",
- op_tgt, PFID(fid));
- op_data->op_flags |= flag;
- rc = 0;
- }
-
- return rc;
-}
-
-/*
- * llite passes fid of an target inode in op_data->op_fid1 and id of directory in
- * op_data->op_fid2
- */
-static int lmv_link(struct obd_export *exp, struct md_op_data *op_data,
- struct ptlrpc_request **request)
-{
- struct obd_device *obd = exp->exp_obd;
- struct lmv_obd *lmv = &obd->u.lmv;
- struct lmv_tgt_desc *tgt;
- int rc;
-
- rc = lmv_check_connect(obd);
- if (rc)
- return rc;
-
- LASSERT(op_data->op_namelen != 0);
-
- CDEBUG(D_INODE, "LINK "DFID":%*s to "DFID"\n",
- PFID(&op_data->op_fid2), op_data->op_namelen,
- op_data->op_name, PFID(&op_data->op_fid1));
-
- op_data->op_fsuid = from_kuid(&init_user_ns, current_fsuid());
- op_data->op_fsgid = from_kgid(&init_user_ns, current_fsgid());
- op_data->op_cap = cfs_curproc_cap_pack();
- tgt = lmv_locate_mds(lmv, op_data, &op_data->op_fid2);
- if (IS_ERR(tgt))
- return PTR_ERR(tgt);
-
- /*
- * Cancel UPDATE lock on child (fid1).
- */
- op_data->op_flags |= MF_MDC_CANCEL_FID2;
- rc = lmv_early_cancel(exp, op_data, tgt->ltd_idx, LCK_EX,
- MDS_INODELOCK_UPDATE, MF_MDC_CANCEL_FID1);
- if (rc != 0)
- return rc;
-
- rc = md_link(tgt->ltd_exp, op_data, request);
-
- return rc;
-}
-
-static int lmv_rename(struct obd_export *exp, struct md_op_data *op_data,
- const char *old, int oldlen, const char *new, int newlen,
- struct ptlrpc_request **request)
-{
- struct obd_device *obd = exp->exp_obd;
- struct lmv_obd *lmv = &obd->u.lmv;
- struct lmv_tgt_desc *src_tgt;
- struct lmv_tgt_desc *tgt_tgt;
- int rc;
-
- LASSERT(oldlen != 0);
-
- CDEBUG(D_INODE, "RENAME %*s in "DFID" to %*s in "DFID"\n",
- oldlen, old, PFID(&op_data->op_fid1),
- newlen, new, PFID(&op_data->op_fid2));
-
- rc = lmv_check_connect(obd);
- if (rc)
- return rc;
-
- op_data->op_fsuid = from_kuid(&init_user_ns, current_fsuid());
- op_data->op_fsgid = from_kgid(&init_user_ns, current_fsgid());
- op_data->op_cap = cfs_curproc_cap_pack();
- src_tgt = lmv_locate_mds(lmv, op_data, &op_data->op_fid1);
- if (IS_ERR(src_tgt))
- return PTR_ERR(src_tgt);
-
- tgt_tgt = lmv_locate_mds(lmv, op_data, &op_data->op_fid2);
- if (IS_ERR(tgt_tgt))
- return PTR_ERR(tgt_tgt);
- /*
- * LOOKUP lock on src child (fid3) should also be cancelled for
- * src_tgt in mdc_rename.
- */
- op_data->op_flags |= MF_MDC_CANCEL_FID1 | MF_MDC_CANCEL_FID3;
-
- /*
- * Cancel UPDATE locks on tgt parent (fid2), tgt_tgt is its
- * own target.
- */
- rc = lmv_early_cancel(exp, op_data, src_tgt->ltd_idx,
- LCK_EX, MDS_INODELOCK_UPDATE,
- MF_MDC_CANCEL_FID2);
-
- /*
- * Cancel LOOKUP locks on tgt child (fid4) for parent tgt_tgt.
- */
- if (rc == 0) {
- rc = lmv_early_cancel(exp, op_data, src_tgt->ltd_idx,
- LCK_EX, MDS_INODELOCK_LOOKUP,
- MF_MDC_CANCEL_FID4);
- }
-
- /*
- * Cancel all the locks on tgt child (fid4).
- */
- if (rc == 0)
- rc = lmv_early_cancel(exp, op_data, src_tgt->ltd_idx,
- LCK_EX, MDS_INODELOCK_FULL,
- MF_MDC_CANCEL_FID4);
-
- if (rc == 0)
- rc = md_rename(src_tgt->ltd_exp, op_data, old, oldlen,
- new, newlen, request);
- return rc;
-}
-
-static int lmv_setattr(struct obd_export *exp, struct md_op_data *op_data,
- void *ea, int ealen, void *ea2, int ea2len,
- struct ptlrpc_request **request,
- struct md_open_data **mod)
-{
- struct obd_device *obd = exp->exp_obd;
- struct lmv_obd *lmv = &obd->u.lmv;
- struct lmv_tgt_desc *tgt;
- int rc;
-
- rc = lmv_check_connect(obd);
- if (rc)
- return rc;
-
- CDEBUG(D_INODE, "SETATTR for "DFID", valid 0x%x\n",
- PFID(&op_data->op_fid1), op_data->op_attr.ia_valid);
-
- op_data->op_flags |= MF_MDC_CANCEL_FID1;
- tgt = lmv_find_target(lmv, &op_data->op_fid1);
- if (IS_ERR(tgt))
- return PTR_ERR(tgt);
-
- rc = md_setattr(tgt->ltd_exp, op_data, ea, ealen, ea2,
- ea2len, request, mod);
-
- return rc;
-}
-
-static int lmv_sync(struct obd_export *exp, const struct lu_fid *fid,
- struct ptlrpc_request **request)
-{
- struct obd_device *obd = exp->exp_obd;
- struct lmv_obd *lmv = &obd->u.lmv;
- struct lmv_tgt_desc *tgt;
- int rc;
-
- rc = lmv_check_connect(obd);
- if (rc)
- return rc;
-
- tgt = lmv_find_target(lmv, fid);
- if (IS_ERR(tgt))
- return PTR_ERR(tgt);
-
- rc = md_sync(tgt->ltd_exp, fid, request);
- return rc;
-}
-
-/*
- * Adjust a set of pages, each page containing an array of lu_dirpages,
- * so that each page can be used as a single logical lu_dirpage.
- *
- * A lu_dirpage is laid out as follows, where s = ldp_hash_start,
- * e = ldp_hash_end, f = ldp_flags, p = padding, and each "ent" is a
- * struct lu_dirent. It has size up to LU_PAGE_SIZE. The ldp_hash_end
- * value is used as a cookie to request the next lu_dirpage in a
- * directory listing that spans multiple pages (two in this example):
- * ________
- * | |
- * .|--------v------- -----.
- * |s|e|f|p|ent|ent| ... |ent|
- * '--|-------------- -----' Each CFS_PAGE contains a single
- * '------. lu_dirpage.
- * .---------v------- -----.
- * |s|e|f|p|ent| 0 | ... | 0 |
- * '----------------- -----'
- *
- * However, on hosts where the native VM page size (PAGE_CACHE_SIZE) is
- * larger than LU_PAGE_SIZE, a single host page may contain multiple
- * lu_dirpages. After reading the lu_dirpages from the MDS, the
- * ldp_hash_end of the first lu_dirpage refers to the one immediately
- * after it in the same CFS_PAGE (arrows simplified for brevity, but
- * in general e0==s1, e1==s2, etc.):
- *
- * .-------------------- -----.
- * |s0|e0|f0|p|ent|ent| ... |ent|
- * |---v---------------- -----|
- * |s1|e1|f1|p|ent|ent| ... |ent|
- * |---v---------------- -----| Here, each CFS_PAGE contains
- * ... multiple lu_dirpages.
- * |---v---------------- -----|
- * |s'|e'|f'|p|ent|ent| ... |ent|
- * '---|---------------- -----'
- * v
- * .----------------------------.
- * | next CFS_PAGE |
- *
- * This structure is transformed into a single logical lu_dirpage as follows:
- *
- * - Replace e0 with e' so the request for the next lu_dirpage gets the page
- * labeled 'next CFS_PAGE'.
- *
- * - Copy the LDF_COLLIDE flag from f' to f0 to correctly reflect whether
- * a hash collision with the next page exists.
- *
- * - Adjust the lde_reclen of the ending entry of each lu_dirpage to span
- * to the first entry of the next lu_dirpage.
- */
-#if PAGE_CACHE_SIZE > LU_PAGE_SIZE
-static void lmv_adjust_dirpages(struct page **pages, int ncfspgs, int nlupgs)
-{
- int i;
-
- for (i = 0; i < ncfspgs; i++) {
- struct lu_dirpage *dp = kmap(pages[i]);
- struct lu_dirpage *first = dp;
- struct lu_dirent *end_dirent = NULL;
- struct lu_dirent *ent;
- __u64 hash_end = dp->ldp_hash_end;
- __u32 flags = dp->ldp_flags;
-
- while (--nlupgs > 0) {
- ent = lu_dirent_start(dp);
- for (end_dirent = ent; ent != NULL;
- end_dirent = ent, ent = lu_dirent_next(ent))
- ;
-
- /* Advance dp to next lu_dirpage. */
- dp = (struct lu_dirpage *)((char *)dp + LU_PAGE_SIZE);
-
- /* Check if we've reached the end of the CFS_PAGE. */
- if (!((unsigned long)dp & ~CFS_PAGE_MASK))
- break;
-
- /* Save the hash and flags of this lu_dirpage. */
- hash_end = dp->ldp_hash_end;
- flags = dp->ldp_flags;
-
- /* Check if lu_dirpage contains no entries. */
- if (!end_dirent)
- break;
-
- /* Enlarge the end entry lde_reclen from 0 to
- * first entry of next lu_dirpage. */
- LASSERT(le16_to_cpu(end_dirent->lde_reclen) == 0);
- end_dirent->lde_reclen =
- cpu_to_le16((char *)(dp->ldp_entries) -
- (char *)end_dirent);
- }
-
- first->ldp_hash_end = hash_end;
- first->ldp_flags &= ~cpu_to_le32(LDF_COLLIDE);
- first->ldp_flags |= flags & cpu_to_le32(LDF_COLLIDE);
-
- kunmap(pages[i]);
- }
- LASSERTF(nlupgs == 0, "left = %d", nlupgs);
-}
-#else
-#define lmv_adjust_dirpages(pages, ncfspgs, nlupgs) do {} while (0)
-#endif /* PAGE_CACHE_SIZE > LU_PAGE_SIZE */
-
-static int lmv_readpage(struct obd_export *exp, struct md_op_data *op_data,
- struct page **pages, struct ptlrpc_request **request)
-{
- struct obd_device *obd = exp->exp_obd;
- struct lmv_obd *lmv = &obd->u.lmv;
- __u64 offset = op_data->op_offset;
- int rc;
- int ncfspgs; /* pages read in PAGE_CACHE_SIZE */
- int nlupgs; /* pages read in LU_PAGE_SIZE */
- struct lmv_tgt_desc *tgt;
-
- rc = lmv_check_connect(obd);
- if (rc)
- return rc;
-
- CDEBUG(D_INODE, "READPAGE at %#llx from "DFID"\n",
- offset, PFID(&op_data->op_fid1));
-
- tgt = lmv_find_target(lmv, &op_data->op_fid1);
- if (IS_ERR(tgt))
- return PTR_ERR(tgt);
-
- rc = md_readpage(tgt->ltd_exp, op_data, pages, request);
- if (rc != 0)
- return rc;
-
- ncfspgs = ((*request)->rq_bulk->bd_nob_transferred + PAGE_CACHE_SIZE - 1)
- >> PAGE_CACHE_SHIFT;
- nlupgs = (*request)->rq_bulk->bd_nob_transferred >> LU_PAGE_SHIFT;
- LASSERT(!((*request)->rq_bulk->bd_nob_transferred & ~LU_PAGE_MASK));
- LASSERT(ncfspgs > 0 && ncfspgs <= op_data->op_npages);
-
- CDEBUG(D_INODE, "read %d(%d)/%d pages\n", ncfspgs, nlupgs,
- op_data->op_npages);
-
- lmv_adjust_dirpages(pages, ncfspgs, nlupgs);
-
- return rc;
-}
-
-static int lmv_unlink(struct obd_export *exp, struct md_op_data *op_data,
- struct ptlrpc_request **request)
-{
- struct obd_device *obd = exp->exp_obd;
- struct lmv_obd *lmv = &obd->u.lmv;
- struct lmv_tgt_desc *tgt = NULL;
- struct mdt_body *body;
- int rc;
-
- rc = lmv_check_connect(obd);
- if (rc)
- return rc;
-retry:
- /* Send unlink requests to the MDT where the child is located */
- if (likely(!fid_is_zero(&op_data->op_fid2)))
- tgt = lmv_locate_mds(lmv, op_data, &op_data->op_fid2);
- else
- tgt = lmv_locate_mds(lmv, op_data, &op_data->op_fid1);
- if (IS_ERR(tgt))
- return PTR_ERR(tgt);
-
- op_data->op_fsuid = from_kuid(&init_user_ns, current_fsuid());
- op_data->op_fsgid = from_kgid(&init_user_ns, current_fsgid());
- op_data->op_cap = cfs_curproc_cap_pack();
-
- /*
- * If child's fid is given, cancel unused locks for it if it is from
- * another export than parent.
- *
- * LOOKUP lock for child (fid3) should also be cancelled on parent
- * tgt_tgt in mdc_unlink().
- */
- op_data->op_flags |= MF_MDC_CANCEL_FID1 | MF_MDC_CANCEL_FID3;
-
- /*
- * Cancel FULL locks on child (fid3).
- */
- rc = lmv_early_cancel(exp, op_data, tgt->ltd_idx, LCK_EX,
- MDS_INODELOCK_FULL, MF_MDC_CANCEL_FID3);
-
- if (rc != 0)
- return rc;
-
- CDEBUG(D_INODE, "unlink with fid="DFID"/"DFID" -> mds #%d\n",
- PFID(&op_data->op_fid1), PFID(&op_data->op_fid2), tgt->ltd_idx);
-
- rc = md_unlink(tgt->ltd_exp, op_data, request);
- if (rc != 0 && rc != -EREMOTE)
- return rc;
-
- body = req_capsule_server_get(&(*request)->rq_pill, &RMF_MDT_BODY);
- if (body == NULL)
- return -EPROTO;
-
- /* Not cross-ref case, just get out of here. */
- if (likely(!(body->valid & OBD_MD_MDS)))
- return 0;
-
- CDEBUG(D_INODE, "%s: try unlink to another MDT for "DFID"\n",
- exp->exp_obd->obd_name, PFID(&body->fid1));
-
- /* This is a remote object, try remote MDT, Note: it may
- * try more than 1 time here, Considering following case
- * /mnt/lustre is root on MDT0, remote1 is on MDT1
- * 1. Initially A does not know where remote1 is, it send
- * unlink RPC to MDT0, MDT0 return -EREMOTE, it will
- * resend unlink RPC to MDT1 (retry 1st time).
- *
- * 2. During the unlink RPC in flight,
- * client B mv /mnt/lustre/remote1 /mnt/lustre/remote2
- * and create new remote1, but on MDT0
- *
- * 3. MDT1 get unlink RPC(from A), then do remote lock on
- * /mnt/lustre, then lookup get fid of remote1, and find
- * it is remote dir again, and replay -EREMOTE again.
- *
- * 4. Then A will resend unlink RPC to MDT0. (retry 2nd times).
- *
- * In theory, it might try unlimited time here, but it should
- * be very rare case. */
- op_data->op_fid2 = body->fid1;
- ptlrpc_req_finished(*request);
- *request = NULL;
-
- goto retry;
-}
-
-static int lmv_precleanup(struct obd_device *obd, enum obd_cleanup_stage stage)
-{
- struct lmv_obd *lmv = &obd->u.lmv;
-
- switch (stage) {
- case OBD_CLEANUP_EARLY:
- /* XXX: here should be calling obd_precleanup() down to
- * stack. */
- break;
- case OBD_CLEANUP_EXPORTS:
- fld_client_debugfs_fini(&lmv->lmv_fld);
- lprocfs_obd_cleanup(obd);
- break;
- default:
- break;
- }
- return 0;
-}
-
-static int lmv_get_info(const struct lu_env *env, struct obd_export *exp,
- __u32 keylen, void *key, __u32 *vallen, void *val,
- struct lov_stripe_md *lsm)
-{
- struct obd_device *obd;
- struct lmv_obd *lmv;
- int rc = 0;
-
- obd = class_exp2obd(exp);
- if (obd == NULL) {
- CDEBUG(D_IOCTL, "Invalid client cookie %#llx\n",
- exp->exp_handle.h_cookie);
- return -EINVAL;
- }
-
- lmv = &obd->u.lmv;
- if (keylen >= strlen("remote_flag") && !strcmp(key, "remote_flag")) {
- struct lmv_tgt_desc *tgt;
- int i;
-
- rc = lmv_check_connect(obd);
- if (rc)
- return rc;
-
- LASSERT(*vallen == sizeof(__u32));
- for (i = 0; i < lmv->desc.ld_tgt_count; i++) {
- tgt = lmv->tgts[i];
- /*
- * All tgts should be connected when this gets called.
- */
- if (tgt == NULL || tgt->ltd_exp == NULL)
- continue;
-
- if (!obd_get_info(env, tgt->ltd_exp, keylen, key,
- vallen, val, NULL))
- return 0;
- }
- return -EINVAL;
- } else if (KEY_IS(KEY_MAX_EASIZE) ||
- KEY_IS(KEY_DEFAULT_EASIZE) ||
- KEY_IS(KEY_MAX_COOKIESIZE) ||
- KEY_IS(KEY_DEFAULT_COOKIESIZE) ||
- KEY_IS(KEY_CONN_DATA)) {
- rc = lmv_check_connect(obd);
- if (rc)
- return rc;
-
- /*
- * Forwarding this request to first MDS, it should know LOV
- * desc.
- */
- rc = obd_get_info(env, lmv->tgts[0]->ltd_exp, keylen, key,
- vallen, val, NULL);
- if (!rc && KEY_IS(KEY_CONN_DATA))
- exp->exp_connect_data = *(struct obd_connect_data *)val;
- return rc;
- } else if (KEY_IS(KEY_TGT_COUNT)) {
- *((int *)val) = lmv->desc.ld_tgt_count;
- return 0;
- }
-
- CDEBUG(D_IOCTL, "Invalid key\n");
- return -EINVAL;
-}
-
-static int lmv_set_info_async(const struct lu_env *env, struct obd_export *exp,
- u32 keylen, void *key, u32 vallen,
- void *val, struct ptlrpc_request_set *set)
-{
- struct lmv_tgt_desc *tgt;
- struct obd_device *obd;
- struct lmv_obd *lmv;
- int rc = 0;
-
- obd = class_exp2obd(exp);
- if (obd == NULL) {
- CDEBUG(D_IOCTL, "Invalid client cookie %#llx\n",
- exp->exp_handle.h_cookie);
- return -EINVAL;
- }
- lmv = &obd->u.lmv;
-
- if (KEY_IS(KEY_READ_ONLY) || KEY_IS(KEY_FLUSH_CTX)) {
- int i, err = 0;
-
- for (i = 0; i < lmv->desc.ld_tgt_count; i++) {
- tgt = lmv->tgts[i];
-
- if (tgt == NULL || tgt->ltd_exp == NULL)
- continue;
-
- err = obd_set_info_async(env, tgt->ltd_exp,
- keylen, key, vallen, val, set);
- if (err && rc == 0)
- rc = err;
- }
-
- return rc;
- }
-
- return -EINVAL;
-}
-
-static int lmv_packmd(struct obd_export *exp, struct lov_mds_md **lmmp,
- struct lov_stripe_md *lsm)
-{
- struct obd_device *obd = class_exp2obd(exp);
- struct lmv_obd *lmv = &obd->u.lmv;
- struct lmv_stripe_md *meap;
- struct lmv_stripe_md *lsmp;
- int mea_size;
- int i;
-
- mea_size = lmv_get_easize(lmv);
- if (!lmmp)
- return mea_size;
-
- if (*lmmp && !lsm) {
- kvfree(*lmmp);
- *lmmp = NULL;
- return 0;
- }
-
- if (*lmmp == NULL) {
- *lmmp = libcfs_kvzalloc(mea_size, GFP_NOFS);
- if (*lmmp == NULL)
- return -ENOMEM;
- }
-
- if (!lsm)
- return mea_size;
-
- lsmp = (struct lmv_stripe_md *)lsm;
- meap = (struct lmv_stripe_md *)*lmmp;
-
- if (lsmp->mea_magic != MEA_MAGIC_LAST_CHAR &&
- lsmp->mea_magic != MEA_MAGIC_ALL_CHARS)
- return -EINVAL;
-
- meap->mea_magic = cpu_to_le32(lsmp->mea_magic);
- meap->mea_count = cpu_to_le32(lsmp->mea_count);
- meap->mea_master = cpu_to_le32(lsmp->mea_master);
-
- for (i = 0; i < lmv->desc.ld_tgt_count; i++) {
- meap->mea_ids[i] = lsmp->mea_ids[i];
- fid_cpu_to_le(&meap->mea_ids[i], &lsmp->mea_ids[i]);
- }
-
- return mea_size;
-}
-
-static int lmv_unpackmd(struct obd_export *exp, struct lov_stripe_md **lsmp,
- struct lov_mds_md *lmm, int lmm_size)
-{
- struct obd_device *obd = class_exp2obd(exp);
- struct lmv_stripe_md **tmea = (struct lmv_stripe_md **)lsmp;
- struct lmv_stripe_md *mea = (struct lmv_stripe_md *)lmm;
- struct lmv_obd *lmv = &obd->u.lmv;
- int mea_size;
- int i;
- __u32 magic;
-
- mea_size = lmv_get_easize(lmv);
- if (lsmp == NULL)
- return mea_size;
-
- if (*lsmp != NULL && lmm == NULL) {
- kvfree(*tmea);
- *lsmp = NULL;
- return 0;
- }
-
- LASSERT(mea_size == lmm_size);
-
- *tmea = libcfs_kvzalloc(mea_size, GFP_NOFS);
- if (*tmea == NULL)
- return -ENOMEM;
-
- if (!lmm)
- return mea_size;
-
- if (mea->mea_magic == MEA_MAGIC_LAST_CHAR ||
- mea->mea_magic == MEA_MAGIC_ALL_CHARS ||
- mea->mea_magic == MEA_MAGIC_HASH_SEGMENT) {
- magic = le32_to_cpu(mea->mea_magic);
- } else {
- /*
- * Old mea is not handled here.
- */
- CERROR("Old not supportable EA is found\n");
- LBUG();
- }
-
- (*tmea)->mea_magic = magic;
- (*tmea)->mea_count = le32_to_cpu(mea->mea_count);
- (*tmea)->mea_master = le32_to_cpu(mea->mea_master);
-
- for (i = 0; i < (*tmea)->mea_count; i++) {
- (*tmea)->mea_ids[i] = mea->mea_ids[i];
- fid_le_to_cpu(&(*tmea)->mea_ids[i], &(*tmea)->mea_ids[i]);
- }
- return mea_size;
-}
-
-static int lmv_cancel_unused(struct obd_export *exp, const struct lu_fid *fid,
- ldlm_policy_data_t *policy, ldlm_mode_t mode,
- ldlm_cancel_flags_t flags, void *opaque)
-{
- struct obd_device *obd = exp->exp_obd;
- struct lmv_obd *lmv = &obd->u.lmv;
- int rc = 0;
- int err;
- int i;
-
- LASSERT(fid != NULL);
-
- for (i = 0; i < lmv->desc.ld_tgt_count; i++) {
- if (lmv->tgts[i] == NULL || lmv->tgts[i]->ltd_exp == NULL ||
- lmv->tgts[i]->ltd_active == 0)
- continue;
-
- err = md_cancel_unused(lmv->tgts[i]->ltd_exp, fid,
- policy, mode, flags, opaque);
- if (!rc)
- rc = err;
- }
- return rc;
-}
-
-static int lmv_set_lock_data(struct obd_export *exp, __u64 *lockh, void *data,
- __u64 *bits)
-{
- struct lmv_obd *lmv = &exp->exp_obd->u.lmv;
- int rc;
-
- rc = md_set_lock_data(lmv->tgts[0]->ltd_exp, lockh, data, bits);
- return rc;
-}
-
-static ldlm_mode_t lmv_lock_match(struct obd_export *exp, __u64 flags,
- const struct lu_fid *fid, ldlm_type_t type,
- ldlm_policy_data_t *policy, ldlm_mode_t mode,
- struct lustre_handle *lockh)
-{
- struct obd_device *obd = exp->exp_obd;
- struct lmv_obd *lmv = &obd->u.lmv;
- ldlm_mode_t rc;
- int i;
-
- CDEBUG(D_INODE, "Lock match for "DFID"\n", PFID(fid));
-
- /*
- * With CMD every object can have two locks in different namespaces:
- * lookup lock in space of mds storing direntry and update/open lock in
- * space of mds storing inode. Thus we check all targets, not only that
- * one fid was created in.
- */
- for (i = 0; i < lmv->desc.ld_tgt_count; i++) {
- if (lmv->tgts[i] == NULL ||
- lmv->tgts[i]->ltd_exp == NULL ||
- lmv->tgts[i]->ltd_active == 0)
- continue;
-
- rc = md_lock_match(lmv->tgts[i]->ltd_exp, flags, fid,
- type, policy, mode, lockh);
- if (rc)
- return rc;
- }
-
- return 0;
-}
-
-static int lmv_get_lustre_md(struct obd_export *exp,
- struct ptlrpc_request *req,
- struct obd_export *dt_exp,
- struct obd_export *md_exp,
- struct lustre_md *md)
-{
- struct lmv_obd *lmv = &exp->exp_obd->u.lmv;
-
- return md_get_lustre_md(lmv->tgts[0]->ltd_exp, req, dt_exp, md_exp, md);
-}
-
-static int lmv_free_lustre_md(struct obd_export *exp, struct lustre_md *md)
-{
- struct obd_device *obd = exp->exp_obd;
- struct lmv_obd *lmv = &obd->u.lmv;
-
- if (md->mea)
- obd_free_memmd(exp, (void *)&md->mea);
- return md_free_lustre_md(lmv->tgts[0]->ltd_exp, md);
-}
-
-static int lmv_set_open_replay_data(struct obd_export *exp,
- struct obd_client_handle *och,
- struct lookup_intent *it)
-{
- struct obd_device *obd = exp->exp_obd;
- struct lmv_obd *lmv = &obd->u.lmv;
- struct lmv_tgt_desc *tgt;
-
- tgt = lmv_find_target(lmv, &och->och_fid);
- if (IS_ERR(tgt))
- return PTR_ERR(tgt);
-
- return md_set_open_replay_data(tgt->ltd_exp, och, it);
-}
-
-static int lmv_clear_open_replay_data(struct obd_export *exp,
- struct obd_client_handle *och)
-{
- struct obd_device *obd = exp->exp_obd;
- struct lmv_obd *lmv = &obd->u.lmv;
- struct lmv_tgt_desc *tgt;
-
- tgt = lmv_find_target(lmv, &och->och_fid);
- if (IS_ERR(tgt))
- return PTR_ERR(tgt);
-
- return md_clear_open_replay_data(tgt->ltd_exp, och);
-}
-
-static int lmv_get_remote_perm(struct obd_export *exp,
- const struct lu_fid *fid,
- __u32 suppgid, struct ptlrpc_request **request)
-{
- struct obd_device *obd = exp->exp_obd;
- struct lmv_obd *lmv = &obd->u.lmv;
- struct lmv_tgt_desc *tgt;
- int rc;
-
- rc = lmv_check_connect(obd);
- if (rc)
- return rc;
-
- tgt = lmv_find_target(lmv, fid);
- if (IS_ERR(tgt))
- return PTR_ERR(tgt);
-
- rc = md_get_remote_perm(tgt->ltd_exp, fid, suppgid, request);
- return rc;
-}
-
-static int lmv_intent_getattr_async(struct obd_export *exp,
- struct md_enqueue_info *minfo,
- struct ldlm_enqueue_info *einfo)
-{
- struct md_op_data *op_data = &minfo->mi_data;
- struct obd_device *obd = exp->exp_obd;
- struct lmv_obd *lmv = &obd->u.lmv;
- struct lmv_tgt_desc *tgt = NULL;
- int rc;
-
- rc = lmv_check_connect(obd);
- if (rc)
- return rc;
-
- tgt = lmv_find_target(lmv, &op_data->op_fid1);
- if (IS_ERR(tgt))
- return PTR_ERR(tgt);
-
- rc = md_intent_getattr_async(tgt->ltd_exp, minfo, einfo);
- return rc;
-}
-
-static int lmv_revalidate_lock(struct obd_export *exp, struct lookup_intent *it,
- struct lu_fid *fid, __u64 *bits)
-{
- struct obd_device *obd = exp->exp_obd;
- struct lmv_obd *lmv = &obd->u.lmv;
- struct lmv_tgt_desc *tgt;
- int rc;
-
- rc = lmv_check_connect(obd);
- if (rc)
- return rc;
-
- tgt = lmv_find_target(lmv, fid);
- if (IS_ERR(tgt))
- return PTR_ERR(tgt);
-
- rc = md_revalidate_lock(tgt->ltd_exp, it, fid, bits);
- return rc;
-}
-
-/**
- * For lmv, only need to send request to master MDT, and the master MDT will
- * process with other slave MDTs. The only exception is Q_GETOQUOTA for which
- * we directly fetch data from the slave MDTs.
- */
-static int lmv_quotactl(struct obd_device *unused, struct obd_export *exp,
- struct obd_quotactl *oqctl)
-{
- struct obd_device *obd = class_exp2obd(exp);
- struct lmv_obd *lmv = &obd->u.lmv;
- struct lmv_tgt_desc *tgt = lmv->tgts[0];
- int rc = 0, i;
- __u64 curspace, curinodes;
-
- if (!lmv->desc.ld_tgt_count || !tgt->ltd_active) {
- CERROR("master lmv inactive\n");
- return -EIO;
- }
-
- if (oqctl->qc_cmd != Q_GETOQUOTA) {
- rc = obd_quotactl(tgt->ltd_exp, oqctl);
- return rc;
- }
-
- curspace = curinodes = 0;
- for (i = 0; i < lmv->desc.ld_tgt_count; i++) {
- int err;
- tgt = lmv->tgts[i];
-
- if (tgt == NULL || tgt->ltd_exp == NULL || tgt->ltd_active == 0)
- continue;
- if (!tgt->ltd_active) {
- CDEBUG(D_HA, "mdt %d is inactive.\n", i);
- continue;
- }
-
- err = obd_quotactl(tgt->ltd_exp, oqctl);
- if (err) {
- CERROR("getquota on mdt %d failed. %d\n", i, err);
- if (!rc)
- rc = err;
- } else {
- curspace += oqctl->qc_dqblk.dqb_curspace;
- curinodes += oqctl->qc_dqblk.dqb_curinodes;
- }
- }
- oqctl->qc_dqblk.dqb_curspace = curspace;
- oqctl->qc_dqblk.dqb_curinodes = curinodes;
-
- return rc;
-}
-
-static int lmv_quotacheck(struct obd_device *unused, struct obd_export *exp,
- struct obd_quotactl *oqctl)
-{
- struct obd_device *obd = class_exp2obd(exp);
- struct lmv_obd *lmv = &obd->u.lmv;
- struct lmv_tgt_desc *tgt;
- int i, rc = 0;
-
- for (i = 0; i < lmv->desc.ld_tgt_count; i++) {
- int err;
- tgt = lmv->tgts[i];
- if (tgt == NULL || tgt->ltd_exp == NULL || !tgt->ltd_active) {
- CERROR("lmv idx %d inactive\n", i);
- return -EIO;
- }
-
- err = obd_quotacheck(tgt->ltd_exp, oqctl);
- if (err && !rc)
- rc = err;
- }
-
- return rc;
-}
-
-static struct obd_ops lmv_obd_ops = {
- .o_owner = THIS_MODULE,
- .o_setup = lmv_setup,
- .o_cleanup = lmv_cleanup,
- .o_precleanup = lmv_precleanup,
- .o_process_config = lmv_process_config,
- .o_connect = lmv_connect,
- .o_disconnect = lmv_disconnect,
- .o_statfs = lmv_statfs,
- .o_get_info = lmv_get_info,
- .o_set_info_async = lmv_set_info_async,
- .o_packmd = lmv_packmd,
- .o_unpackmd = lmv_unpackmd,
- .o_notify = lmv_notify,
- .o_get_uuid = lmv_get_uuid,
- .o_iocontrol = lmv_iocontrol,
- .o_quotacheck = lmv_quotacheck,
- .o_quotactl = lmv_quotactl
-};
-
-static struct md_ops lmv_md_ops = {
- .m_getstatus = lmv_getstatus,
- .m_null_inode = lmv_null_inode,
- .m_find_cbdata = lmv_find_cbdata,
- .m_close = lmv_close,
- .m_create = lmv_create,
- .m_done_writing = lmv_done_writing,
- .m_enqueue = lmv_enqueue,
- .m_getattr = lmv_getattr,
- .m_getxattr = lmv_getxattr,
- .m_getattr_name = lmv_getattr_name,
- .m_intent_lock = lmv_intent_lock,
- .m_link = lmv_link,
- .m_rename = lmv_rename,
- .m_setattr = lmv_setattr,
- .m_setxattr = lmv_setxattr,
- .m_sync = lmv_sync,
- .m_readpage = lmv_readpage,
- .m_unlink = lmv_unlink,
- .m_init_ea_size = lmv_init_ea_size,
- .m_cancel_unused = lmv_cancel_unused,
- .m_set_lock_data = lmv_set_lock_data,
- .m_lock_match = lmv_lock_match,
- .m_get_lustre_md = lmv_get_lustre_md,
- .m_free_lustre_md = lmv_free_lustre_md,
- .m_set_open_replay_data = lmv_set_open_replay_data,
- .m_clear_open_replay_data = lmv_clear_open_replay_data,
- .m_get_remote_perm = lmv_get_remote_perm,
- .m_intent_getattr_async = lmv_intent_getattr_async,
- .m_revalidate_lock = lmv_revalidate_lock
-};
-
-static int __init lmv_init(void)
-{
- struct lprocfs_static_vars lvars;
- int rc;
-
- lprocfs_lmv_init_vars(&lvars);
-
- rc = class_register_type(&lmv_obd_ops, &lmv_md_ops,
- LUSTRE_LMV_NAME, NULL);
- return rc;
-}
-
-static void lmv_exit(void)
-{
- class_unregister_type(LUSTRE_LMV_NAME);
-}
-
-MODULE_AUTHOR("Sun Microsystems, Inc. <http://www.lustre.org/>");
-MODULE_DESCRIPTION("Lustre Logical Metadata Volume OBD driver");
-MODULE_LICENSE("GPL");
-
-module_init(lmv_init);
-module_exit(lmv_exit);
diff --git a/drivers/staging/lustre/lustre/lmv/lproc_lmv.c b/drivers/staging/lustre/lustre/lmv/lproc_lmv.c
deleted file mode 100644
index 311fc1b70c4d..000000000000
--- a/drivers/staging/lustre/lustre/lmv/lproc_lmv.c
+++ /dev/null
@@ -1,230 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- */
-
-#define DEBUG_SUBSYSTEM S_CLASS
-
-#include <linux/seq_file.h>
-#include <linux/statfs.h>
-#include "../include/lprocfs_status.h"
-#include "../include/obd_class.h"
-#include "lmv_internal.h"
-
-static ssize_t numobd_show(struct kobject *kobj, struct attribute *attr,
- char *buf)
-{
- struct obd_device *dev = container_of(kobj, struct obd_device,
- obd_kobj);
- struct lmv_desc *desc;
-
- desc = &dev->u.lmv.desc;
- return sprintf(buf, "%u\n", desc->ld_tgt_count);
-}
-LUSTRE_RO_ATTR(numobd);
-
-static const char *placement_name[] = {
- [PLACEMENT_CHAR_POLICY] = "CHAR",
- [PLACEMENT_NID_POLICY] = "NID",
- [PLACEMENT_INVAL_POLICY] = "INVAL"
-};
-
-static enum placement_policy placement_name2policy(char *name, int len)
-{
- int i;
-
- for (i = 0; i < PLACEMENT_MAX_POLICY; i++) {
- if (!strncmp(placement_name[i], name, len))
- return i;
- }
- return PLACEMENT_INVAL_POLICY;
-}
-
-static const char *placement_policy2name(enum placement_policy placement)
-{
- LASSERT(placement < PLACEMENT_MAX_POLICY);
- return placement_name[placement];
-}
-
-static ssize_t placement_show(struct kobject *kobj, struct attribute *attr,
- char *buf)
-{
- struct obd_device *dev = container_of(kobj, struct obd_device,
- obd_kobj);
- struct lmv_obd *lmv;
-
- lmv = &dev->u.lmv;
- return sprintf(buf, "%s\n", placement_policy2name(lmv->lmv_placement));
-}
-
-#define MAX_POLICY_STRING_SIZE 64
-
-static ssize_t placement_store(struct kobject *kobj, struct attribute *attr,
- const char *buffer,
- size_t count)
-{
- struct obd_device *dev = container_of(kobj, struct obd_device,
- obd_kobj);
- char dummy[MAX_POLICY_STRING_SIZE + 1];
- enum placement_policy policy;
- struct lmv_obd *lmv = &dev->u.lmv;
-
- memcpy(dummy, buffer, MAX_POLICY_STRING_SIZE);
-
- if (count > MAX_POLICY_STRING_SIZE)
- count = MAX_POLICY_STRING_SIZE;
-
- if (dummy[count - 1] == '\n')
- count--;
- dummy[count] = '\0';
-
- policy = placement_name2policy(dummy, count);
- if (policy != PLACEMENT_INVAL_POLICY) {
- spin_lock(&lmv->lmv_lock);
- lmv->lmv_placement = policy;
- spin_unlock(&lmv->lmv_lock);
- } else {
- return -EINVAL;
- }
- return count;
-}
-LUSTRE_RW_ATTR(placement);
-
-static ssize_t activeobd_show(struct kobject *kobj, struct attribute *attr,
- char *buf)
-{
- struct obd_device *dev = container_of(kobj, struct obd_device,
- obd_kobj);
- struct lmv_desc *desc;
-
- desc = &dev->u.lmv.desc;
- return sprintf(buf, "%u\n", desc->ld_active_tgt_count);
-}
-LUSTRE_RO_ATTR(activeobd);
-
-static int lmv_desc_uuid_seq_show(struct seq_file *m, void *v)
-{
- struct obd_device *dev = (struct obd_device *)m->private;
- struct lmv_obd *lmv;
-
- LASSERT(dev != NULL);
- lmv = &dev->u.lmv;
- seq_printf(m, "%s\n", lmv->desc.ld_uuid.uuid);
- return 0;
-}
-LPROC_SEQ_FOPS_RO(lmv_desc_uuid);
-
-static void *lmv_tgt_seq_start(struct seq_file *p, loff_t *pos)
-{
- struct obd_device *dev = p->private;
- struct lmv_obd *lmv = &dev->u.lmv;
- return (*pos >= lmv->desc.ld_tgt_count) ? NULL : lmv->tgts[*pos];
-}
-
-static void lmv_tgt_seq_stop(struct seq_file *p, void *v)
-{
- return;
-}
-
-static void *lmv_tgt_seq_next(struct seq_file *p, void *v, loff_t *pos)
-{
- struct obd_device *dev = p->private;
- struct lmv_obd *lmv = &dev->u.lmv;
- ++*pos;
- return (*pos >= lmv->desc.ld_tgt_count) ? NULL : lmv->tgts[*pos];
-}
-
-static int lmv_tgt_seq_show(struct seq_file *p, void *v)
-{
- struct lmv_tgt_desc *tgt = v;
-
- if (tgt == NULL)
- return 0;
- seq_printf(p, "%d: %s %sACTIVE\n",
- tgt->ltd_idx, tgt->ltd_uuid.uuid,
- tgt->ltd_active ? "" : "IN");
- return 0;
-}
-
-static const struct seq_operations lmv_tgt_sops = {
- .start = lmv_tgt_seq_start,
- .stop = lmv_tgt_seq_stop,
- .next = lmv_tgt_seq_next,
- .show = lmv_tgt_seq_show,
-};
-
-static int lmv_target_seq_open(struct inode *inode, struct file *file)
-{
- struct seq_file *seq;
- int rc;
-
- rc = seq_open(file, &lmv_tgt_sops);
- if (rc)
- return rc;
-
- seq = file->private_data;
- seq->private = inode->i_private;
-
- return 0;
-}
-
-static struct lprocfs_vars lprocfs_lmv_obd_vars[] = {
- { "desc_uuid", &lmv_desc_uuid_fops, NULL, 0 },
- { NULL }
-};
-
-struct file_operations lmv_proc_target_fops = {
- .owner = THIS_MODULE,
- .open = lmv_target_seq_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = seq_release,
-};
-
-static struct attribute *lmv_attrs[] = {
- &lustre_attr_activeobd.attr,
- &lustre_attr_numobd.attr,
- &lustre_attr_placement.attr,
- NULL,
-};
-
-static struct attribute_group lmv_attr_group = {
- .attrs = lmv_attrs,
-};
-
-void lprocfs_lmv_init_vars(struct lprocfs_static_vars *lvars)
-{
- lvars->sysfs_vars = &lmv_attr_group;
- lvars->obd_vars = lprocfs_lmv_obd_vars;
-}
diff --git a/drivers/staging/lustre/lustre/lov/Makefile b/drivers/staging/lustre/lustre/lov/Makefile
deleted file mode 100644
index e4cc0db21014..000000000000
--- a/drivers/staging/lustre/lustre/lov/Makefile
+++ /dev/null
@@ -1,5 +0,0 @@
-obj-$(CONFIG_LUSTRE_FS) += lov.o
-lov-y := lov_obd.o lov_pack.o lov_offset.o lov_merge.o \
- lov_request.o lov_ea.o lov_dev.o lov_object.o lov_page.o \
- lov_lock.o lov_io.o lovsub_dev.o lovsub_object.o lovsub_page.o \
- lovsub_lock.o lovsub_io.o lov_pool.o lproc_lov.o
diff --git a/drivers/staging/lustre/lustre/lov/lov_cl_internal.h b/drivers/staging/lustre/lustre/lov/lov_cl_internal.h
deleted file mode 100644
index 314ce8525aed..000000000000
--- a/drivers/staging/lustre/lustre/lov/lov_cl_internal.h
+++ /dev/null
@@ -1,839 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * Internal interfaces of LOV layer.
- *
- * Author: Nikita Danilov <nikita.danilov@sun.com>
- * Author: Jinshan Xiong <jinshan.xiong@intel.com>
- */
-
-#ifndef LOV_CL_INTERNAL_H
-#define LOV_CL_INTERNAL_H
-
-#include "../../include/linux/libcfs/libcfs.h"
-
-#include "../include/obd.h"
-#include "../include/cl_object.h"
-#include "lov_internal.h"
-
-/** \defgroup lov lov
- * Logical object volume layer. This layer implements data striping (raid0).
- *
- * At the lov layer top-entity (object, page, lock, io) is connected to one or
- * more sub-entities: top-object, representing a file is connected to a set of
- * sub-objects, each representing a stripe, file-level top-lock is connected
- * to a set of per-stripe sub-locks, top-page is connected to a (single)
- * sub-page, and a top-level IO is connected to a set of (potentially
- * concurrent) sub-IO's.
- *
- * Sub-object, sub-page, and sub-io have well-defined top-object and top-page
- * respectively, while a single sub-lock can be part of multiple top-locks.
- *
- * Reference counting models are different for different types of entities:
- *
- * - top-object keeps a reference to its sub-objects, and destroys them
- * when it is destroyed.
- *
- * - top-page keeps a reference to its sub-page, and destroys it when it
- * is destroyed.
- *
- * - sub-lock keep a reference to its top-locks. Top-lock keeps a
- * reference (and a hold, see cl_lock_hold()) on its sub-locks when it
- * actively using them (that is, in cl_lock_state::CLS_QUEUING,
- * cl_lock_state::CLS_ENQUEUED, cl_lock_state::CLS_HELD states). When
- * moving into cl_lock_state::CLS_CACHED state, top-lock releases a
- * hold. From this moment top-lock has only a 'weak' reference to its
- * sub-locks. This reference is protected by top-lock
- * cl_lock::cll_guard, and will be automatically cleared by the sub-lock
- * when the latter is destroyed. When a sub-lock is canceled, a
- * reference to it is removed from the top-lock array, and top-lock is
- * moved into CLS_NEW state. It is guaranteed that all sub-locks exist
- * while their top-lock is in CLS_HELD or CLS_CACHED states.
- *
- * - IO's are not reference counted.
- *
- * To implement a connection between top and sub entities, lov layer is split
- * into two pieces: lov ("upper half"), and lovsub ("bottom half"), both
- * implementing full set of cl-interfaces. For example, top-object has vvp and
- * lov layers, and it's sub-object has lovsub and osc layers. lovsub layer is
- * used to track child-parent relationship.
- *
- * @{
- */
-
-struct lovsub_device;
-struct lovsub_object;
-struct lovsub_lock;
-
-enum lov_device_flags {
- LOV_DEV_INITIALIZED = 1 << 0
-};
-
-/*
- * Upper half.
- */
-
-/**
- * Resources that are used in memory-cleaning path, and whose allocation
- * cannot fail even when memory is tight. They are preallocated in sufficient
- * quantities in lov_device::ld_emerg[], and access to them is serialized
- * lov_device::ld_mutex.
- */
-struct lov_device_emerg {
- /**
- * Page list used to submit IO when memory is in pressure.
- */
- struct cl_page_list emrg_page_list;
- /**
- * sub-io's shared by all threads accessing this device when memory is
- * too low to allocate sub-io's dynamically.
- */
- struct cl_io emrg_subio;
- /**
- * Environments used by sub-io's in
- * lov_device_emerg::emrg_subio.
- */
- struct lu_env *emrg_env;
- /**
- * Refchecks for lov_device_emerg::emrg_env.
- *
- * \see cl_env_get()
- */
- int emrg_refcheck;
-};
-
-struct lov_device {
- /*
- * XXX Locking of lov-private data is missing.
- */
- struct cl_device ld_cl;
- struct lov_obd *ld_lov;
- /** size of lov_device::ld_target[] array */
- __u32 ld_target_nr;
- struct lovsub_device **ld_target;
- __u32 ld_flags;
-
- /** Emergency resources used in memory-cleansing paths. */
- struct lov_device_emerg **ld_emrg;
- /**
- * Serializes access to lov_device::ld_emrg in low-memory
- * conditions.
- */
- struct mutex ld_mutex;
-};
-
-/**
- * Layout type.
- */
-enum lov_layout_type {
- LLT_EMPTY, /** empty file without body (mknod + truncate) */
- LLT_RAID0, /** striped file */
- LLT_RELEASED, /** file with no objects (data in HSM) */
- LLT_NR
-};
-
-static inline char *llt2str(enum lov_layout_type llt)
-{
- switch (llt) {
- case LLT_EMPTY:
- return "EMPTY";
- case LLT_RAID0:
- return "RAID0";
- case LLT_RELEASED:
- return "RELEASED";
- case LLT_NR:
- LBUG();
- }
- LBUG();
- return "";
-}
-
-/**
- * lov-specific file state.
- *
- * lov object has particular layout type, determining how top-object is built
- * on top of sub-objects. Layout type can change dynamically. When this
- * happens, lov_object::lo_type_guard semaphore is taken in exclusive mode,
- * all state pertaining to the old layout type is destroyed, and new state is
- * constructed. All object methods take said semaphore in the shared mode,
- * providing serialization against transition between layout types.
- *
- * To avoid multiple `if' or `switch' statements, selecting behavior for the
- * current layout type, object methods perform double-dispatch, invoking
- * function corresponding to the current layout type.
- */
-struct lov_object {
- struct cl_object lo_cl;
- /**
- * Serializes object operations with transitions between layout types.
- *
- * This semaphore is taken in shared mode by all object methods, and
- * is taken in exclusive mode when object type is changed.
- *
- * \see lov_object::lo_type
- */
- struct rw_semaphore lo_type_guard;
- /**
- * Type of an object. Protected by lov_object::lo_type_guard.
- */
- enum lov_layout_type lo_type;
- /**
- * True if layout is invalid. This bit is cleared when layout lock
- * is lost.
- */
- bool lo_layout_invalid;
- /**
- * How many IOs are on going on this object. Layout can be changed
- * only if there is no active IO.
- */
- atomic_t lo_active_ios;
- /**
- * Waitq - wait for no one else is using lo_lsm
- */
- wait_queue_head_t lo_waitq;
- /**
- * Layout metadata. NULL if empty layout.
- */
- struct lov_stripe_md *lo_lsm;
-
- union lov_layout_state {
- struct lov_layout_raid0 {
- unsigned lo_nr;
- /**
- * When this is true, lov_object::lo_attr contains
- * valid up to date attributes for a top-level
- * object. This field is reset to 0 when attributes of
- * any sub-object change.
- */
- int lo_attr_valid;
- /**
- * Array of sub-objects. Allocated when top-object is
- * created (lov_init_raid0()).
- *
- * Top-object is a strict master of its sub-objects:
- * it is created before them, and outlives its
- * children (this later is necessary so that basic
- * functions like cl_object_top() always
- * work). Top-object keeps a reference on every
- * sub-object.
- *
- * When top-object is destroyed (lov_delete_raid0())
- * it releases its reference to a sub-object and waits
- * until the latter is finally destroyed.
- */
- struct lovsub_object **lo_sub;
- /**
- * protect lo_sub
- */
- spinlock_t lo_sub_lock;
- /**
- * Cached object attribute, built from sub-object
- * attributes.
- */
- struct cl_attr lo_attr;
- } raid0;
- struct lov_layout_state_empty {
- } empty;
- struct lov_layout_state_released {
- } released;
- } u;
- /**
- * Thread that acquired lov_object::lo_type_guard in an exclusive
- * mode.
- */
- struct task_struct *lo_owner;
-};
-
-/**
- * Flags that top-lock can set on each of its sub-locks.
- */
-enum lov_sub_flags {
- /** Top-lock acquired a hold (cl_lock_hold()) on a sub-lock. */
- LSF_HELD = 1 << 0
-};
-
-/**
- * State lov_lock keeps for each sub-lock.
- */
-struct lov_lock_sub {
- /** sub-lock itself */
- struct lovsub_lock *sub_lock;
- /** An array of per-sub-lock flags, taken from enum lov_sub_flags */
- unsigned sub_flags;
- int sub_stripe;
- struct cl_lock_descr sub_descr;
- struct cl_lock_descr sub_got;
-};
-
-/**
- * lov-specific lock state.
- */
-struct lov_lock {
- struct cl_lock_slice lls_cl;
- /** Number of sub-locks in this lock */
- int lls_nr;
- /**
- * Number of existing sub-locks.
- */
- unsigned lls_nr_filled;
- /**
- * Set when sub-lock was canceled, while top-lock was being
- * used, or unused.
- */
- unsigned int lls_cancel_race:1;
- /**
- * An array of sub-locks
- *
- * There are two issues with managing sub-locks:
- *
- * - sub-locks are concurrently canceled, and
- *
- * - sub-locks are shared with other top-locks.
- *
- * To manage cancellation, top-lock acquires a hold on a sublock
- * (lov_sublock_adopt()) when the latter is inserted into
- * lov_lock::lls_sub[]. This hold is released (lov_sublock_release())
- * when top-lock is going into CLS_CACHED state or destroyed. Hold
- * prevents sub-lock from cancellation.
- *
- * Sub-lock sharing means, among other things, that top-lock that is
- * in the process of creation (i.e., not yet inserted into lock list)
- * is already accessible to other threads once at least one of its
- * sub-locks is created, see lov_lock_sub_init().
- *
- * Sub-lock can be in one of the following states:
- *
- * - doesn't exist, lov_lock::lls_sub[]::sub_lock == NULL. Such
- * sub-lock was either never created (top-lock is in CLS_NEW
- * state), or it was created, then canceled, then destroyed
- * (lov_lock_unlink() cleared sub-lock pointer in the top-lock).
- *
- * - sub-lock exists and is on
- * hold. (lov_lock::lls_sub[]::sub_flags & LSF_HELD). This is a
- * normal state of a sub-lock in CLS_HELD and CLS_CACHED states
- * of a top-lock.
- *
- * - sub-lock exists, but is not held by the top-lock. This
- * happens after top-lock released a hold on sub-locks before
- * going into cache (lov_lock_unuse()).
- *
- * \todo To support wide-striping, array has to be replaced with a set
- * of queues to avoid scanning.
- */
- struct lov_lock_sub *lls_sub;
- /**
- * Original description with which lock was enqueued.
- */
- struct cl_lock_descr lls_orig;
-};
-
-struct lov_page {
- struct cl_page_slice lps_cl;
- int lps_invalid;
-};
-
-/*
- * Bottom half.
- */
-
-struct lovsub_device {
- struct cl_device acid_cl;
- struct lov_device *acid_super;
- int acid_idx;
- struct cl_device *acid_next;
-};
-
-struct lovsub_object {
- struct cl_object_header lso_header;
- struct cl_object lso_cl;
- struct lov_object *lso_super;
- int lso_index;
-};
-
-/**
- * A link between a top-lock and a sub-lock. Separate data-structure is
- * necessary, because top-locks and sub-locks are in M:N relationship.
- *
- * \todo This can be optimized for a (by far) most frequent case of a single
- * top-lock per sub-lock.
- */
-struct lov_lock_link {
- struct lov_lock *lll_super;
- /** An index within parent lock. */
- int lll_idx;
- /**
- * A linkage into per sub-lock list of all corresponding top-locks,
- * hanging off lovsub_lock::lss_parents.
- */
- struct list_head lll_list;
-};
-
-/**
- * Lock state at lovsub layer.
- */
-struct lovsub_lock {
- struct cl_lock_slice lss_cl;
- /**
- * List of top-locks that have given sub-lock as their part. Protected
- * by cl_lock::cll_guard mutex.
- */
- struct list_head lss_parents;
- /**
- * Top-lock that initiated current operation on this sub-lock. This is
- * only set during top-to-bottom lock operations like enqueue, and is
- * used to optimize state change notification. Protected by
- * cl_lock::cll_guard mutex.
- *
- * \see lovsub_lock_state_one().
- */
- struct cl_lock *lss_active;
-};
-
-/**
- * Describe the environment settings for sublocks.
- */
-struct lov_sublock_env {
- const struct lu_env *lse_env;
- struct cl_io *lse_io;
- struct lov_io_sub *lse_sub;
-};
-
-struct lovsub_page {
- struct cl_page_slice lsb_cl;
-};
-
-
-struct lov_thread_info {
- struct cl_object_conf lti_stripe_conf;
- struct lu_fid lti_fid;
- struct cl_lock_descr lti_ldescr;
- struct ost_lvb lti_lvb;
- struct cl_2queue lti_cl2q;
- struct cl_lock_closure lti_closure;
- wait_queue_t lti_waiter;
-};
-
-/**
- * State that lov_io maintains for every sub-io.
- */
-struct lov_io_sub {
- int sub_stripe;
- /**
- * sub-io for a stripe. Ideally sub-io's can be stopped and resumed
- * independently, with lov acting as a scheduler to maximize overall
- * throughput.
- */
- struct cl_io *sub_io;
- /**
- * Linkage into a list (hanging off lov_io::lis_active) of all
- * sub-io's active for the current IO iteration.
- */
- struct list_head sub_linkage;
- /**
- * true, iff cl_io_init() was successfully executed against
- * lov_io_sub::sub_io.
- */
- int sub_io_initialized;
- /**
- * True, iff lov_io_sub::sub_io and lov_io_sub::sub_env weren't
- * allocated, but borrowed from a per-device emergency pool.
- */
- int sub_borrowed;
- /**
- * environment, in which sub-io executes.
- */
- struct lu_env *sub_env;
- /**
- * environment's refcheck.
- *
- * \see cl_env_get()
- */
- int sub_refcheck;
- int sub_refcheck2;
- int sub_reenter;
- void *sub_cookie;
-};
-
-/**
- * IO state private for LOV.
- */
-struct lov_io {
- /** super-class */
- struct cl_io_slice lis_cl;
- /**
- * Pointer to the object slice. This is a duplicate of
- * lov_io::lis_cl::cis_object.
- */
- struct lov_object *lis_object;
- /**
- * Original end-of-io position for this IO, set by the upper layer as
- * cl_io::u::ci_rw::pos + cl_io::u::ci_rw::count. lov remembers this,
- * changes pos and count to fit IO into a single stripe and uses saved
- * value to determine when IO iterations have to stop.
- *
- * This is used only for CIT_READ and CIT_WRITE io's.
- */
- loff_t lis_io_endpos;
-
- /**
- * starting position within a file, for the current io loop iteration
- * (stripe), used by ci_io_loop().
- */
- u64 lis_pos;
- /**
- * end position with in a file, for the current stripe io. This is
- * exclusive (i.e., next offset after last byte affected by io).
- */
- u64 lis_endpos;
-
- int lis_mem_frozen;
- int lis_stripe_count;
- int lis_active_subios;
-
- /**
- * the index of ls_single_subio in ls_subios array
- */
- int lis_single_subio_index;
- struct cl_io lis_single_subio;
-
- /**
- * size of ls_subios array, actually the highest stripe #
- */
- int lis_nr_subios;
- struct lov_io_sub *lis_subs;
- /**
- * List of active sub-io's.
- */
- struct list_head lis_active;
-};
-
-struct lov_session {
- struct lov_io ls_io;
- struct lov_sublock_env ls_subenv;
-};
-
-/**
- * State of transfer for lov.
- */
-struct lov_req {
- struct cl_req_slice lr_cl;
-};
-
-/**
- * State of transfer for lovsub.
- */
-struct lovsub_req {
- struct cl_req_slice lsrq_cl;
-};
-
-extern struct lu_device_type lov_device_type;
-extern struct lu_device_type lovsub_device_type;
-
-extern struct lu_context_key lov_key;
-extern struct lu_context_key lov_session_key;
-
-extern struct kmem_cache *lov_lock_kmem;
-extern struct kmem_cache *lov_object_kmem;
-extern struct kmem_cache *lov_thread_kmem;
-extern struct kmem_cache *lov_session_kmem;
-extern struct kmem_cache *lov_req_kmem;
-
-extern struct kmem_cache *lovsub_lock_kmem;
-extern struct kmem_cache *lovsub_object_kmem;
-extern struct kmem_cache *lovsub_req_kmem;
-
-extern struct kmem_cache *lov_lock_link_kmem;
-
-int lov_object_init(const struct lu_env *env, struct lu_object *obj,
- const struct lu_object_conf *conf);
-int lovsub_object_init(const struct lu_env *env, struct lu_object *obj,
- const struct lu_object_conf *conf);
-int lov_lock_init(const struct lu_env *env, struct cl_object *obj,
- struct cl_lock *lock, const struct cl_io *io);
-int lov_io_init(const struct lu_env *env, struct cl_object *obj,
- struct cl_io *io);
-int lovsub_lock_init(const struct lu_env *env, struct cl_object *obj,
- struct cl_lock *lock, const struct cl_io *io);
-
-int lov_lock_init_raid0(const struct lu_env *env, struct cl_object *obj,
- struct cl_lock *lock, const struct cl_io *io);
-int lov_lock_init_empty(const struct lu_env *env, struct cl_object *obj,
- struct cl_lock *lock, const struct cl_io *io);
-int lov_io_init_raid0(const struct lu_env *env, struct cl_object *obj,
- struct cl_io *io);
-int lov_io_init_empty(const struct lu_env *env, struct cl_object *obj,
- struct cl_io *io);
-int lov_io_init_released(const struct lu_env *env, struct cl_object *obj,
- struct cl_io *io);
-void lov_lock_unlink(const struct lu_env *env, struct lov_lock_link *link,
- struct lovsub_lock *sub);
-
-struct lov_io_sub *lov_sub_get(const struct lu_env *env, struct lov_io *lio,
- int stripe);
-void lov_sub_put(struct lov_io_sub *sub);
-int lov_sublock_modify(const struct lu_env *env, struct lov_lock *lov,
- struct lovsub_lock *sublock,
- const struct cl_lock_descr *d, int idx);
-
-
-int lov_page_init(const struct lu_env *env, struct cl_object *ob,
- struct cl_page *page, struct page *vmpage);
-int lovsub_page_init(const struct lu_env *env, struct cl_object *ob,
- struct cl_page *page, struct page *vmpage);
-
-int lov_page_init_empty(const struct lu_env *env,
- struct cl_object *obj,
- struct cl_page *page, struct page *vmpage);
-int lov_page_init_raid0(const struct lu_env *env,
- struct cl_object *obj,
- struct cl_page *page, struct page *vmpage);
-struct lu_object *lov_object_alloc(const struct lu_env *env,
- const struct lu_object_header *hdr,
- struct lu_device *dev);
-struct lu_object *lovsub_object_alloc(const struct lu_env *env,
- const struct lu_object_header *hdr,
- struct lu_device *dev);
-
-struct lov_lock_link *lov_lock_link_find(const struct lu_env *env,
- struct lov_lock *lck,
- struct lovsub_lock *sub);
-struct lov_io_sub *lov_page_subio(const struct lu_env *env,
- struct lov_io *lio,
- const struct cl_page_slice *slice);
-
-void lov_lsm_decref(struct lov_object *lov, struct lov_stripe_md *lsm);
-struct lov_stripe_md *lov_lsm_addref(struct lov_object *lov);
-
-#define lov_foreach_target(lov, var) \
- for (var = 0; var < lov_targets_nr(lov); ++var)
-
-/*****************************************************************************
- *
- * Type conversions.
- *
- * Accessors.
- *
- */
-
-static inline struct lov_session *lov_env_session(const struct lu_env *env)
-{
- struct lov_session *ses;
-
- ses = lu_context_key_get(env->le_ses, &lov_session_key);
- LASSERT(ses != NULL);
- return ses;
-}
-
-static inline struct lov_io *lov_env_io(const struct lu_env *env)
-{
- return &lov_env_session(env)->ls_io;
-}
-
-static inline int lov_is_object(const struct lu_object *obj)
-{
- return obj->lo_dev->ld_type == &lov_device_type;
-}
-
-static inline int lovsub_is_object(const struct lu_object *obj)
-{
- return obj->lo_dev->ld_type == &lovsub_device_type;
-}
-
-static inline struct lu_device *lov2lu_dev(struct lov_device *lov)
-{
- return &lov->ld_cl.cd_lu_dev;
-}
-
-static inline struct lov_device *lu2lov_dev(const struct lu_device *d)
-{
- LINVRNT(d->ld_type == &lov_device_type);
- return container_of0(d, struct lov_device, ld_cl.cd_lu_dev);
-}
-
-static inline struct cl_device *lovsub2cl_dev(struct lovsub_device *lovsub)
-{
- return &lovsub->acid_cl;
-}
-
-static inline struct lu_device *lovsub2lu_dev(struct lovsub_device *lovsub)
-{
- return &lovsub2cl_dev(lovsub)->cd_lu_dev;
-}
-
-static inline struct lovsub_device *lu2lovsub_dev(const struct lu_device *d)
-{
- LINVRNT(d->ld_type == &lovsub_device_type);
- return container_of0(d, struct lovsub_device, acid_cl.cd_lu_dev);
-}
-
-static inline struct lovsub_device *cl2lovsub_dev(const struct cl_device *d)
-{
- LINVRNT(d->cd_lu_dev.ld_type == &lovsub_device_type);
- return container_of0(d, struct lovsub_device, acid_cl);
-}
-
-static inline struct lu_object *lov2lu(struct lov_object *lov)
-{
- return &lov->lo_cl.co_lu;
-}
-
-static inline struct cl_object *lov2cl(struct lov_object *lov)
-{
- return &lov->lo_cl;
-}
-
-static inline struct lov_object *lu2lov(const struct lu_object *obj)
-{
- LINVRNT(lov_is_object(obj));
- return container_of0(obj, struct lov_object, lo_cl.co_lu);
-}
-
-static inline struct lov_object *cl2lov(const struct cl_object *obj)
-{
- LINVRNT(lov_is_object(&obj->co_lu));
- return container_of0(obj, struct lov_object, lo_cl);
-}
-
-static inline struct lu_object *lovsub2lu(struct lovsub_object *los)
-{
- return &los->lso_cl.co_lu;
-}
-
-static inline struct cl_object *lovsub2cl(struct lovsub_object *los)
-{
- return &los->lso_cl;
-}
-
-static inline struct lovsub_object *cl2lovsub(const struct cl_object *obj)
-{
- LINVRNT(lovsub_is_object(&obj->co_lu));
- return container_of0(obj, struct lovsub_object, lso_cl);
-}
-
-static inline struct lovsub_object *lu2lovsub(const struct lu_object *obj)
-{
- LINVRNT(lovsub_is_object(obj));
- return container_of0(obj, struct lovsub_object, lso_cl.co_lu);
-}
-
-static inline struct lovsub_lock *
-cl2lovsub_lock(const struct cl_lock_slice *slice)
-{
- LINVRNT(lovsub_is_object(&slice->cls_obj->co_lu));
- return container_of(slice, struct lovsub_lock, lss_cl);
-}
-
-static inline struct lovsub_lock *cl2sub_lock(const struct cl_lock *lock)
-{
- const struct cl_lock_slice *slice;
-
- slice = cl_lock_at(lock, &lovsub_device_type);
- LASSERT(slice != NULL);
- return cl2lovsub_lock(slice);
-}
-
-static inline struct lov_lock *cl2lov_lock(const struct cl_lock_slice *slice)
-{
- LINVRNT(lov_is_object(&slice->cls_obj->co_lu));
- return container_of(slice, struct lov_lock, lls_cl);
-}
-
-static inline struct lov_page *cl2lov_page(const struct cl_page_slice *slice)
-{
- LINVRNT(lov_is_object(&slice->cpl_obj->co_lu));
- return container_of0(slice, struct lov_page, lps_cl);
-}
-
-static inline struct lov_req *cl2lov_req(const struct cl_req_slice *slice)
-{
- return container_of0(slice, struct lov_req, lr_cl);
-}
-
-static inline struct lovsub_page *
-cl2lovsub_page(const struct cl_page_slice *slice)
-{
- LINVRNT(lovsub_is_object(&slice->cpl_obj->co_lu));
- return container_of0(slice, struct lovsub_page, lsb_cl);
-}
-
-static inline struct lovsub_req *cl2lovsub_req(const struct cl_req_slice *slice)
-{
- return container_of0(slice, struct lovsub_req, lsrq_cl);
-}
-
-static inline struct cl_page *lov_sub_page(const struct cl_page_slice *slice)
-{
- return slice->cpl_page->cp_child;
-}
-
-static inline struct lov_io *cl2lov_io(const struct lu_env *env,
- const struct cl_io_slice *ios)
-{
- struct lov_io *lio;
-
- lio = container_of(ios, struct lov_io, lis_cl);
- LASSERT(lio == lov_env_io(env));
- return lio;
-}
-
-static inline int lov_targets_nr(const struct lov_device *lov)
-{
- return lov->ld_lov->desc.ld_tgt_count;
-}
-
-static inline struct lov_thread_info *lov_env_info(const struct lu_env *env)
-{
- struct lov_thread_info *info;
-
- info = lu_context_key_get(&env->le_ctx, &lov_key);
- LASSERT(info != NULL);
- return info;
-}
-
-static inline struct lov_layout_raid0 *lov_r0(struct lov_object *lov)
-{
- LASSERT(lov->lo_type == LLT_RAID0);
- LASSERT(lov->lo_lsm->lsm_wire.lw_magic == LOV_MAGIC ||
- lov->lo_lsm->lsm_wire.lw_magic == LOV_MAGIC_V3);
- return &lov->u.raid0;
-}
-
-/** @} lov */
-
-#endif
diff --git a/drivers/staging/lustre/lustre/lov/lov_dev.c b/drivers/staging/lustre/lustre/lov/lov_dev.c
deleted file mode 100644
index 10317e0c3150..000000000000
--- a/drivers/staging/lustre/lustre/lov/lov_dev.c
+++ /dev/null
@@ -1,529 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * Implementation of cl_device and cl_device_type for LOV layer.
- *
- * Author: Nikita Danilov <nikita.danilov@sun.com>
- */
-
-#define DEBUG_SUBSYSTEM S_LOV
-
-/* class_name2obd() */
-#include "../include/obd_class.h"
-
-#include "lov_cl_internal.h"
-#include "lov_internal.h"
-
-
-struct kmem_cache *lov_lock_kmem;
-struct kmem_cache *lov_object_kmem;
-struct kmem_cache *lov_thread_kmem;
-struct kmem_cache *lov_session_kmem;
-struct kmem_cache *lov_req_kmem;
-
-struct kmem_cache *lovsub_lock_kmem;
-struct kmem_cache *lovsub_object_kmem;
-struct kmem_cache *lovsub_req_kmem;
-
-struct kmem_cache *lov_lock_link_kmem;
-
-/** Lock class of lov_device::ld_mutex. */
-static struct lock_class_key cl_lov_device_mutex_class;
-
-struct lu_kmem_descr lov_caches[] = {
- {
- .ckd_cache = &lov_lock_kmem,
- .ckd_name = "lov_lock_kmem",
- .ckd_size = sizeof(struct lov_lock)
- },
- {
- .ckd_cache = &lov_object_kmem,
- .ckd_name = "lov_object_kmem",
- .ckd_size = sizeof(struct lov_object)
- },
- {
- .ckd_cache = &lov_thread_kmem,
- .ckd_name = "lov_thread_kmem",
- .ckd_size = sizeof(struct lov_thread_info)
- },
- {
- .ckd_cache = &lov_session_kmem,
- .ckd_name = "lov_session_kmem",
- .ckd_size = sizeof(struct lov_session)
- },
- {
- .ckd_cache = &lov_req_kmem,
- .ckd_name = "lov_req_kmem",
- .ckd_size = sizeof(struct lov_req)
- },
- {
- .ckd_cache = &lovsub_lock_kmem,
- .ckd_name = "lovsub_lock_kmem",
- .ckd_size = sizeof(struct lovsub_lock)
- },
- {
- .ckd_cache = &lovsub_object_kmem,
- .ckd_name = "lovsub_object_kmem",
- .ckd_size = sizeof(struct lovsub_object)
- },
- {
- .ckd_cache = &lovsub_req_kmem,
- .ckd_name = "lovsub_req_kmem",
- .ckd_size = sizeof(struct lovsub_req)
- },
- {
- .ckd_cache = &lov_lock_link_kmem,
- .ckd_name = "lov_lock_link_kmem",
- .ckd_size = sizeof(struct lov_lock_link)
- },
- {
- .ckd_cache = NULL
- }
-};
-
-/*****************************************************************************
- *
- * Lov transfer operations.
- *
- */
-
-static void lov_req_completion(const struct lu_env *env,
- const struct cl_req_slice *slice, int ioret)
-{
- struct lov_req *lr;
-
- lr = cl2lov_req(slice);
- OBD_SLAB_FREE_PTR(lr, lov_req_kmem);
-}
-
-static const struct cl_req_operations lov_req_ops = {
- .cro_completion = lov_req_completion
-};
-
-/*****************************************************************************
- *
- * Lov device and device type functions.
- *
- */
-
-static void *lov_key_init(const struct lu_context *ctx,
- struct lu_context_key *key)
-{
- struct lov_thread_info *info;
-
- OBD_SLAB_ALLOC_PTR_GFP(info, lov_thread_kmem, GFP_NOFS);
- if (info != NULL)
- INIT_LIST_HEAD(&info->lti_closure.clc_list);
- else
- info = ERR_PTR(-ENOMEM);
- return info;
-}
-
-static void lov_key_fini(const struct lu_context *ctx,
- struct lu_context_key *key, void *data)
-{
- struct lov_thread_info *info = data;
-
- LINVRNT(list_empty(&info->lti_closure.clc_list));
- OBD_SLAB_FREE_PTR(info, lov_thread_kmem);
-}
-
-struct lu_context_key lov_key = {
- .lct_tags = LCT_CL_THREAD,
- .lct_init = lov_key_init,
- .lct_fini = lov_key_fini
-};
-
-static void *lov_session_key_init(const struct lu_context *ctx,
- struct lu_context_key *key)
-{
- struct lov_session *info;
-
- OBD_SLAB_ALLOC_PTR_GFP(info, lov_session_kmem, GFP_NOFS);
- if (info == NULL)
- info = ERR_PTR(-ENOMEM);
- return info;
-}
-
-static void lov_session_key_fini(const struct lu_context *ctx,
- struct lu_context_key *key, void *data)
-{
- struct lov_session *info = data;
-
- OBD_SLAB_FREE_PTR(info, lov_session_kmem);
-}
-
-struct lu_context_key lov_session_key = {
- .lct_tags = LCT_SESSION,
- .lct_init = lov_session_key_init,
- .lct_fini = lov_session_key_fini
-};
-
-/* type constructor/destructor: lov_type_{init,fini,start,stop}() */
-LU_TYPE_INIT_FINI(lov, &lov_key, &lov_session_key);
-
-static struct lu_device *lov_device_fini(const struct lu_env *env,
- struct lu_device *d)
-{
- int i;
- struct lov_device *ld = lu2lov_dev(d);
-
- LASSERT(ld->ld_lov != NULL);
- if (ld->ld_target == NULL)
- return NULL;
-
- lov_foreach_target(ld, i) {
- struct lovsub_device *lsd;
-
- lsd = ld->ld_target[i];
- if (lsd != NULL) {
- cl_stack_fini(env, lovsub2cl_dev(lsd));
- ld->ld_target[i] = NULL;
- }
- }
- return NULL;
-}
-
-static int lov_device_init(const struct lu_env *env, struct lu_device *d,
- const char *name, struct lu_device *next)
-{
- struct lov_device *ld = lu2lov_dev(d);
- int i;
- int rc = 0;
-
- LASSERT(d->ld_site != NULL);
- if (ld->ld_target == NULL)
- return rc;
-
- lov_foreach_target(ld, i) {
- struct lovsub_device *lsd;
- struct cl_device *cl;
- struct lov_tgt_desc *desc;
-
- desc = ld->ld_lov->lov_tgts[i];
- if (desc == NULL)
- continue;
-
- cl = cl_type_setup(env, d->ld_site, &lovsub_device_type,
- desc->ltd_obd->obd_lu_dev);
- if (IS_ERR(cl)) {
- rc = PTR_ERR(cl);
- break;
- }
- lsd = cl2lovsub_dev(cl);
- lsd->acid_idx = i;
- lsd->acid_super = ld;
- ld->ld_target[i] = lsd;
- }
-
- if (rc)
- lov_device_fini(env, d);
- else
- ld->ld_flags |= LOV_DEV_INITIALIZED;
-
- return rc;
-}
-
-static int lov_req_init(const struct lu_env *env, struct cl_device *dev,
- struct cl_req *req)
-{
- struct lov_req *lr;
- int result;
-
- OBD_SLAB_ALLOC_PTR_GFP(lr, lov_req_kmem, GFP_NOFS);
- if (lr != NULL) {
- cl_req_slice_add(req, &lr->lr_cl, dev, &lov_req_ops);
- result = 0;
- } else
- result = -ENOMEM;
- return result;
-}
-
-static const struct cl_device_operations lov_cl_ops = {
- .cdo_req_init = lov_req_init
-};
-
-static void lov_emerg_free(struct lov_device_emerg **emrg, int nr)
-{
- int i;
-
- for (i = 0; i < nr; ++i) {
- struct lov_device_emerg *em;
-
- em = emrg[i];
- if (em != NULL) {
- LASSERT(em->emrg_page_list.pl_nr == 0);
- if (em->emrg_env != NULL)
- cl_env_put(em->emrg_env, &em->emrg_refcheck);
- kfree(em);
- }
- }
- kfree(emrg);
-}
-
-static struct lu_device *lov_device_free(const struct lu_env *env,
- struct lu_device *d)
-{
- struct lov_device *ld = lu2lov_dev(d);
- const int nr = ld->ld_target_nr;
-
- cl_device_fini(lu2cl_dev(d));
- kfree(ld->ld_target);
- if (ld->ld_emrg != NULL)
- lov_emerg_free(ld->ld_emrg, nr);
- kfree(ld);
- return NULL;
-}
-
-static void lov_cl_del_target(const struct lu_env *env, struct lu_device *dev,
- __u32 index)
-{
- struct lov_device *ld = lu2lov_dev(dev);
-
- if (ld->ld_target[index] != NULL) {
- cl_stack_fini(env, lovsub2cl_dev(ld->ld_target[index]));
- ld->ld_target[index] = NULL;
- }
-}
-
-static struct lov_device_emerg **lov_emerg_alloc(int nr)
-{
- struct lov_device_emerg **emerg;
- int i;
- int result;
-
- emerg = kcalloc(nr, sizeof(emerg[0]), GFP_NOFS);
- if (emerg == NULL)
- return ERR_PTR(-ENOMEM);
- for (result = i = 0; i < nr && result == 0; i++) {
- struct lov_device_emerg *em;
-
- em = kzalloc(sizeof(*em), GFP_NOFS);
- if (em != NULL) {
- emerg[i] = em;
- cl_page_list_init(&em->emrg_page_list);
- em->emrg_env = cl_env_alloc(&em->emrg_refcheck,
- LCT_REMEMBER|LCT_NOREF);
- if (!IS_ERR(em->emrg_env))
- em->emrg_env->le_ctx.lc_cookie = 0x2;
- else {
- result = PTR_ERR(em->emrg_env);
- em->emrg_env = NULL;
- }
- } else
- result = -ENOMEM;
- }
- if (result != 0) {
- lov_emerg_free(emerg, nr);
- emerg = ERR_PTR(result);
- }
- return emerg;
-}
-
-static int lov_expand_targets(const struct lu_env *env, struct lov_device *dev)
-{
- int result;
- __u32 tgt_size;
- __u32 sub_size;
-
- result = 0;
- tgt_size = dev->ld_lov->lov_tgt_size;
- sub_size = dev->ld_target_nr;
- if (sub_size < tgt_size) {
- struct lovsub_device **newd;
- struct lov_device_emerg **emerg;
- const size_t sz = sizeof(newd[0]);
-
- emerg = lov_emerg_alloc(tgt_size);
- if (IS_ERR(emerg))
- return PTR_ERR(emerg);
-
- newd = kcalloc(tgt_size, sz, GFP_NOFS);
- if (newd != NULL) {
- mutex_lock(&dev->ld_mutex);
- if (sub_size > 0) {
- memcpy(newd, dev->ld_target, sub_size * sz);
- kfree(dev->ld_target);
- }
- dev->ld_target = newd;
- dev->ld_target_nr = tgt_size;
-
- if (dev->ld_emrg != NULL)
- lov_emerg_free(dev->ld_emrg, sub_size);
- dev->ld_emrg = emerg;
- mutex_unlock(&dev->ld_mutex);
- } else {
- lov_emerg_free(emerg, tgt_size);
- result = -ENOMEM;
- }
- }
- return result;
-}
-
-static int lov_cl_add_target(const struct lu_env *env, struct lu_device *dev,
- __u32 index)
-{
- struct obd_device *obd = dev->ld_obd;
- struct lov_device *ld = lu2lov_dev(dev);
- struct lov_tgt_desc *tgt;
- struct lovsub_device *lsd;
- struct cl_device *cl;
- int rc;
-
- obd_getref(obd);
-
- tgt = obd->u.lov.lov_tgts[index];
- LASSERT(tgt != NULL);
- LASSERT(tgt->ltd_obd != NULL);
-
- if (!tgt->ltd_obd->obd_set_up) {
- CERROR("Target %s not set up\n", obd_uuid2str(&tgt->ltd_uuid));
- return -EINVAL;
- }
-
- rc = lov_expand_targets(env, ld);
- if (rc == 0 && ld->ld_flags & LOV_DEV_INITIALIZED) {
- LASSERT(dev->ld_site != NULL);
-
- cl = cl_type_setup(env, dev->ld_site, &lovsub_device_type,
- tgt->ltd_obd->obd_lu_dev);
- if (!IS_ERR(cl)) {
- lsd = cl2lovsub_dev(cl);
- lsd->acid_idx = index;
- lsd->acid_super = ld;
- ld->ld_target[index] = lsd;
- } else {
- CERROR("add failed (%d), deleting %s\n", rc,
- obd_uuid2str(&tgt->ltd_uuid));
- lov_cl_del_target(env, dev, index);
- rc = PTR_ERR(cl);
- }
- }
- obd_putref(obd);
- return rc;
-}
-
-static int lov_process_config(const struct lu_env *env,
- struct lu_device *d, struct lustre_cfg *cfg)
-{
- struct obd_device *obd = d->ld_obd;
- int cmd;
- int rc;
- int gen;
- __u32 index;
-
- obd_getref(obd);
-
- cmd = cfg->lcfg_command;
- rc = lov_process_config_base(d->ld_obd, cfg, &index, &gen);
- if (rc == 0) {
- switch (cmd) {
- case LCFG_LOV_ADD_OBD:
- case LCFG_LOV_ADD_INA:
- rc = lov_cl_add_target(env, d, index);
- if (rc != 0)
- lov_del_target(d->ld_obd, index, NULL, 0);
- break;
- case LCFG_LOV_DEL_OBD:
- lov_cl_del_target(env, d, index);
- break;
- }
- }
- obd_putref(obd);
- return rc;
-}
-
-static const struct lu_device_operations lov_lu_ops = {
- .ldo_object_alloc = lov_object_alloc,
- .ldo_process_config = lov_process_config,
-};
-
-static struct lu_device *lov_device_alloc(const struct lu_env *env,
- struct lu_device_type *t,
- struct lustre_cfg *cfg)
-{
- struct lu_device *d;
- struct lov_device *ld;
- struct obd_device *obd;
- int rc;
-
- ld = kzalloc(sizeof(*ld), GFP_NOFS);
- if (!ld)
- return ERR_PTR(-ENOMEM);
-
- cl_device_init(&ld->ld_cl, t);
- d = lov2lu_dev(ld);
- d->ld_ops = &lov_lu_ops;
- ld->ld_cl.cd_ops = &lov_cl_ops;
-
- mutex_init(&ld->ld_mutex);
- lockdep_set_class(&ld->ld_mutex, &cl_lov_device_mutex_class);
-
- /* setup the LOV OBD */
- obd = class_name2obd(lustre_cfg_string(cfg, 0));
- LASSERT(obd != NULL);
- rc = lov_setup(obd, cfg);
- if (rc) {
- lov_device_free(env, d);
- return ERR_PTR(rc);
- }
-
- ld->ld_lov = &obd->u.lov;
- return d;
-}
-
-static const struct lu_device_type_operations lov_device_type_ops = {
- .ldto_init = lov_type_init,
- .ldto_fini = lov_type_fini,
-
- .ldto_start = lov_type_start,
- .ldto_stop = lov_type_stop,
-
- .ldto_device_alloc = lov_device_alloc,
- .ldto_device_free = lov_device_free,
-
- .ldto_device_init = lov_device_init,
- .ldto_device_fini = lov_device_fini
-};
-
-struct lu_device_type lov_device_type = {
- .ldt_tags = LU_DEVICE_CL,
- .ldt_name = LUSTRE_LOV_NAME,
- .ldt_ops = &lov_device_type_ops,
- .ldt_ctx_tags = LCT_CL_THREAD
-};
-EXPORT_SYMBOL(lov_device_type);
-
-/** @} lov */
diff --git a/drivers/staging/lustre/lustre/lov/lov_ea.c b/drivers/staging/lustre/lustre/lov/lov_ea.c
deleted file mode 100644
index 3f51b573e1fb..000000000000
--- a/drivers/staging/lustre/lustre/lov/lov_ea.c
+++ /dev/null
@@ -1,362 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * lustre/lov/lov_ea.c
- *
- * Author: Wang Di <wangdi@clusterfs.com>
- */
-
-#define DEBUG_SUBSYSTEM S_LOV
-
-#include <asm/div64.h>
-#include "../../include/linux/libcfs/libcfs.h"
-
-#include "../include/obd_class.h"
-#include "../include/lustre/lustre_idl.h"
-
-#include "lov_internal.h"
-
-struct lovea_unpack_args {
- struct lov_stripe_md *lsm;
- int cursor;
-};
-
-static int lsm_lmm_verify_common(struct lov_mds_md *lmm, int lmm_bytes,
- __u16 stripe_count)
-{
- if (stripe_count > LOV_V1_INSANE_STRIPE_COUNT) {
- CERROR("bad stripe count %d\n", stripe_count);
- lov_dump_lmm_common(D_WARNING, lmm);
- return -EINVAL;
- }
-
- if (lmm_oi_id(&lmm->lmm_oi) == 0) {
- CERROR("zero object id\n");
- lov_dump_lmm_common(D_WARNING, lmm);
- return -EINVAL;
- }
-
- if (lov_pattern(le32_to_cpu(lmm->lmm_pattern)) != LOV_PATTERN_RAID0) {
- CERROR("bad striping pattern\n");
- lov_dump_lmm_common(D_WARNING, lmm);
- return -EINVAL;
- }
-
- if (lmm->lmm_stripe_size == 0 ||
- (le32_to_cpu(lmm->lmm_stripe_size)&(LOV_MIN_STRIPE_SIZE-1)) != 0) {
- CERROR("bad stripe size %u\n",
- le32_to_cpu(lmm->lmm_stripe_size));
- lov_dump_lmm_common(D_WARNING, lmm);
- return -EINVAL;
- }
- return 0;
-}
-
-struct lov_stripe_md *lsm_alloc_plain(__u16 stripe_count, int *size)
-{
- struct lov_stripe_md *lsm;
- struct lov_oinfo *loi;
- int i, oinfo_ptrs_size;
-
- LASSERT(stripe_count <= LOV_MAX_STRIPE_COUNT);
-
- oinfo_ptrs_size = sizeof(struct lov_oinfo *) * stripe_count;
- *size = sizeof(struct lov_stripe_md) + oinfo_ptrs_size;
-
- lsm = libcfs_kvzalloc(*size, GFP_NOFS);
- if (!lsm)
- return NULL;
-
- for (i = 0; i < stripe_count; i++) {
- OBD_SLAB_ALLOC_PTR_GFP(loi, lov_oinfo_slab, GFP_NOFS);
- if (loi == NULL)
- goto err;
- lsm->lsm_oinfo[i] = loi;
- }
- lsm->lsm_stripe_count = stripe_count;
- return lsm;
-
-err:
- while (--i >= 0)
- OBD_SLAB_FREE(lsm->lsm_oinfo[i], lov_oinfo_slab, sizeof(*loi));
- kvfree(lsm);
- return NULL;
-}
-
-void lsm_free_plain(struct lov_stripe_md *lsm)
-{
- __u16 stripe_count = lsm->lsm_stripe_count;
- int i;
-
- for (i = 0; i < stripe_count; i++)
- OBD_SLAB_FREE(lsm->lsm_oinfo[i], lov_oinfo_slab,
- sizeof(struct lov_oinfo));
- kvfree(lsm);
-}
-
-static void lsm_unpackmd_common(struct lov_stripe_md *lsm,
- struct lov_mds_md *lmm)
-{
- /*
- * This supposes lov_mds_md_v1/v3 first fields are
- * are the same
- */
- lmm_oi_le_to_cpu(&lsm->lsm_oi, &lmm->lmm_oi);
- lsm->lsm_stripe_size = le32_to_cpu(lmm->lmm_stripe_size);
- lsm->lsm_pattern = le32_to_cpu(lmm->lmm_pattern);
- lsm->lsm_layout_gen = le16_to_cpu(lmm->lmm_layout_gen);
- lsm->lsm_pool_name[0] = '\0';
-}
-
-static void
-lsm_stripe_by_index_plain(struct lov_stripe_md *lsm, int *stripeno,
- u64 *lov_off, u64 *swidth)
-{
- if (swidth)
- *swidth = (u64)lsm->lsm_stripe_size * lsm->lsm_stripe_count;
-}
-
-static void
-lsm_stripe_by_offset_plain(struct lov_stripe_md *lsm, int *stripeno,
- u64 *lov_off, u64 *swidth)
-{
- if (swidth)
- *swidth = (u64)lsm->lsm_stripe_size * lsm->lsm_stripe_count;
-}
-
-static int lsm_destroy_plain(struct lov_stripe_md *lsm, struct obdo *oa,
- struct obd_export *md_exp)
-{
- return 0;
-}
-
-/* Find minimum stripe maxbytes value. For inactive or
- * reconnecting targets use LUSTRE_STRIPE_MAXBYTES. */
-static void lov_tgt_maxbytes(struct lov_tgt_desc *tgt, __u64 *stripe_maxbytes)
-{
- struct obd_import *imp = tgt->ltd_obd->u.cli.cl_import;
-
- if (imp == NULL || !tgt->ltd_active) {
- *stripe_maxbytes = LUSTRE_STRIPE_MAXBYTES;
- return;
- }
-
- spin_lock(&imp->imp_lock);
- if (imp->imp_state == LUSTRE_IMP_FULL &&
- (imp->imp_connect_data.ocd_connect_flags & OBD_CONNECT_MAXBYTES) &&
- imp->imp_connect_data.ocd_maxbytes > 0) {
- if (*stripe_maxbytes > imp->imp_connect_data.ocd_maxbytes)
- *stripe_maxbytes = imp->imp_connect_data.ocd_maxbytes;
- } else {
- *stripe_maxbytes = LUSTRE_STRIPE_MAXBYTES;
- }
- spin_unlock(&imp->imp_lock);
-}
-
-static int lsm_lmm_verify_v1(struct lov_mds_md_v1 *lmm, int lmm_bytes,
- __u16 *stripe_count)
-{
- if (lmm_bytes < sizeof(*lmm)) {
- CERROR("lov_mds_md_v1 too small: %d, need at least %d\n",
- lmm_bytes, (int)sizeof(*lmm));
- return -EINVAL;
- }
-
- *stripe_count = le16_to_cpu(lmm->lmm_stripe_count);
- if (le32_to_cpu(lmm->lmm_pattern) & LOV_PATTERN_F_RELEASED)
- *stripe_count = 0;
-
- if (lmm_bytes < lov_mds_md_size(*stripe_count, LOV_MAGIC_V1)) {
- CERROR("LOV EA V1 too small: %d, need %d\n",
- lmm_bytes, lov_mds_md_size(*stripe_count, LOV_MAGIC_V1));
- lov_dump_lmm_common(D_WARNING, lmm);
- return -EINVAL;
- }
-
- return lsm_lmm_verify_common(lmm, lmm_bytes, *stripe_count);
-}
-
-static int lsm_unpackmd_v1(struct lov_obd *lov, struct lov_stripe_md *lsm,
- struct lov_mds_md_v1 *lmm)
-{
- struct lov_oinfo *loi;
- int i;
- int stripe_count;
- __u64 stripe_maxbytes = OBD_OBJECT_EOF;
-
- lsm_unpackmd_common(lsm, lmm);
-
- stripe_count = lsm_is_released(lsm) ? 0 : lsm->lsm_stripe_count;
-
- for (i = 0; i < stripe_count; i++) {
- /* XXX LOV STACKING call down to osc_unpackmd() */
- loi = lsm->lsm_oinfo[i];
- ostid_le_to_cpu(&lmm->lmm_objects[i].l_ost_oi, &loi->loi_oi);
- loi->loi_ost_idx = le32_to_cpu(lmm->lmm_objects[i].l_ost_idx);
- loi->loi_ost_gen = le32_to_cpu(lmm->lmm_objects[i].l_ost_gen);
- if (lov_oinfo_is_dummy(loi))
- continue;
-
- if (loi->loi_ost_idx >= lov->desc.ld_tgt_count) {
- CERROR("OST index %d more than OST count %d\n",
- loi->loi_ost_idx, lov->desc.ld_tgt_count);
- lov_dump_lmm_v1(D_WARNING, lmm);
- return -EINVAL;
- }
- if (!lov->lov_tgts[loi->loi_ost_idx]) {
- CERROR("OST index %d missing\n", loi->loi_ost_idx);
- lov_dump_lmm_v1(D_WARNING, lmm);
- return -EINVAL;
- }
- /* calculate the minimum stripe max bytes */
- lov_tgt_maxbytes(lov->lov_tgts[loi->loi_ost_idx],
- &stripe_maxbytes);
- }
-
- lsm->lsm_maxbytes = stripe_maxbytes * lsm->lsm_stripe_count;
- if (lsm->lsm_stripe_count == 0)
- lsm->lsm_maxbytes = stripe_maxbytes * lov->desc.ld_tgt_count;
-
- return 0;
-}
-
-const struct lsm_operations lsm_v1_ops = {
- .lsm_free = lsm_free_plain,
- .lsm_destroy = lsm_destroy_plain,
- .lsm_stripe_by_index = lsm_stripe_by_index_plain,
- .lsm_stripe_by_offset = lsm_stripe_by_offset_plain,
- .lsm_lmm_verify = lsm_lmm_verify_v1,
- .lsm_unpackmd = lsm_unpackmd_v1,
-};
-
-static int lsm_lmm_verify_v3(struct lov_mds_md *lmmv1, int lmm_bytes,
- __u16 *stripe_count)
-{
- struct lov_mds_md_v3 *lmm;
-
- lmm = (struct lov_mds_md_v3 *)lmmv1;
-
- if (lmm_bytes < sizeof(*lmm)) {
- CERROR("lov_mds_md_v3 too small: %d, need at least %d\n",
- lmm_bytes, (int)sizeof(*lmm));
- return -EINVAL;
- }
-
- *stripe_count = le16_to_cpu(lmm->lmm_stripe_count);
- if (le32_to_cpu(lmm->lmm_pattern) & LOV_PATTERN_F_RELEASED)
- *stripe_count = 0;
-
- if (lmm_bytes < lov_mds_md_size(*stripe_count, LOV_MAGIC_V3)) {
- CERROR("LOV EA V3 too small: %d, need %d\n",
- lmm_bytes, lov_mds_md_size(*stripe_count, LOV_MAGIC_V3));
- lov_dump_lmm_common(D_WARNING, lmm);
- return -EINVAL;
- }
-
- return lsm_lmm_verify_common((struct lov_mds_md_v1 *)lmm, lmm_bytes,
- *stripe_count);
-}
-
-static int lsm_unpackmd_v3(struct lov_obd *lov, struct lov_stripe_md *lsm,
- struct lov_mds_md *lmmv1)
-{
- struct lov_mds_md_v3 *lmm;
- struct lov_oinfo *loi;
- int i;
- int stripe_count;
- __u64 stripe_maxbytes = OBD_OBJECT_EOF;
- int cplen = 0;
-
- lmm = (struct lov_mds_md_v3 *)lmmv1;
-
- lsm_unpackmd_common(lsm, (struct lov_mds_md_v1 *)lmm);
-
- stripe_count = lsm_is_released(lsm) ? 0 : lsm->lsm_stripe_count;
-
- cplen = strlcpy(lsm->lsm_pool_name, lmm->lmm_pool_name,
- sizeof(lsm->lsm_pool_name));
- if (cplen >= sizeof(lsm->lsm_pool_name))
- return -E2BIG;
-
- for (i = 0; i < stripe_count; i++) {
- /* XXX LOV STACKING call down to osc_unpackmd() */
- loi = lsm->lsm_oinfo[i];
- ostid_le_to_cpu(&lmm->lmm_objects[i].l_ost_oi, &loi->loi_oi);
- loi->loi_ost_idx = le32_to_cpu(lmm->lmm_objects[i].l_ost_idx);
- loi->loi_ost_gen = le32_to_cpu(lmm->lmm_objects[i].l_ost_gen);
- if (lov_oinfo_is_dummy(loi))
- continue;
-
- if (loi->loi_ost_idx >= lov->desc.ld_tgt_count) {
- CERROR("OST index %d more than OST count %d\n",
- loi->loi_ost_idx, lov->desc.ld_tgt_count);
- lov_dump_lmm_v3(D_WARNING, lmm);
- return -EINVAL;
- }
- if (!lov->lov_tgts[loi->loi_ost_idx]) {
- CERROR("OST index %d missing\n", loi->loi_ost_idx);
- lov_dump_lmm_v3(D_WARNING, lmm);
- return -EINVAL;
- }
- /* calculate the minimum stripe max bytes */
- lov_tgt_maxbytes(lov->lov_tgts[loi->loi_ost_idx],
- &stripe_maxbytes);
- }
-
- lsm->lsm_maxbytes = stripe_maxbytes * lsm->lsm_stripe_count;
- if (lsm->lsm_stripe_count == 0)
- lsm->lsm_maxbytes = stripe_maxbytes * lov->desc.ld_tgt_count;
-
- return 0;
-}
-
-const struct lsm_operations lsm_v3_ops = {
- .lsm_free = lsm_free_plain,
- .lsm_destroy = lsm_destroy_plain,
- .lsm_stripe_by_index = lsm_stripe_by_index_plain,
- .lsm_stripe_by_offset = lsm_stripe_by_offset_plain,
- .lsm_lmm_verify = lsm_lmm_verify_v3,
- .lsm_unpackmd = lsm_unpackmd_v3,
-};
-
-void dump_lsm(unsigned int level, const struct lov_stripe_md *lsm)
-{
- CDEBUG(level, "lsm %p, objid " DOSTID ", maxbytes %#llx, magic 0x%08X, stripe_size %u, stripe_count %u, refc: %d, layout_gen %u, pool [" LOV_POOLNAMEF "]\n",
- lsm,
- POSTID(&lsm->lsm_oi), lsm->lsm_maxbytes, lsm->lsm_magic,
- lsm->lsm_stripe_size, lsm->lsm_stripe_count,
- atomic_read(&lsm->lsm_refc), lsm->lsm_layout_gen,
- lsm->lsm_pool_name);
-}
diff --git a/drivers/staging/lustre/lustre/lov/lov_internal.h b/drivers/staging/lustre/lustre/lov/lov_internal.h
deleted file mode 100644
index dde9656d4dd6..000000000000
--- a/drivers/staging/lustre/lustre/lov/lov_internal.h
+++ /dev/null
@@ -1,275 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- */
-
-#ifndef LOV_INTERNAL_H
-#define LOV_INTERNAL_H
-
-#include "../include/obd_class.h"
-#include "../include/lustre/lustre_user.h"
-
-/* lov_do_div64(a, b) returns a % b, and a = a / b.
- * The 32-bit code is LOV-specific due to knowing about stripe limits in
- * order to reduce the divisor to a 32-bit number. If the divisor is
- * already a 32-bit value the compiler handles this directly. */
-#if BITS_PER_LONG == 64
-# define lov_do_div64(n, base) ({ \
- uint64_t __base = (base); \
- uint64_t __rem; \
- __rem = ((uint64_t)(n)) % __base; \
- (n) = ((uint64_t)(n)) / __base; \
- __rem; \
-})
-#elif BITS_PER_LONG == 32
-# define lov_do_div64(n, base) ({ \
- uint64_t __rem; \
- if ((sizeof(base) > 4) && (((base) & 0xffffffff00000000ULL) != 0)) { \
- int __remainder; \
- LASSERTF(!((base) & (LOV_MIN_STRIPE_SIZE - 1)), "64 bit lov " \
- "division %llu / %llu\n", (n), (uint64_t)(base)); \
- __remainder = (n) & (LOV_MIN_STRIPE_SIZE - 1); \
- (n) >>= LOV_MIN_STRIPE_BITS; \
- __rem = do_div(n, (base) >> LOV_MIN_STRIPE_BITS); \
- __rem <<= LOV_MIN_STRIPE_BITS; \
- __rem += __remainder; \
- } else { \
- __rem = do_div(n, base); \
- } \
- __rem; \
-})
-#endif
-
-struct lov_request {
- struct obd_info rq_oi;
- struct lov_request_set *rq_rqset;
-
- struct list_head rq_link;
-
- int rq_idx; /* index in lov->tgts array */
- int rq_stripe; /* stripe number */
- int rq_complete;
- int rq_rc;
-
- u32 rq_oabufs;
- u32 rq_pgaidx;
-};
-
-struct lov_request_set {
- struct ldlm_enqueue_info *set_ei;
- struct obd_info *set_oi;
- atomic_t set_refcount;
- struct obd_export *set_exp;
- /* XXX: There is @set_exp already, however obd_statfs gets obd_device
- only. */
- struct obd_device *set_obd;
- int set_count;
- atomic_t set_completes;
- atomic_t set_success;
- atomic_t set_finish_checked;
- struct llog_cookie *set_cookies;
- int set_cookie_sent;
- struct obd_trans_info *set_oti;
- struct list_head set_list;
- wait_queue_head_t set_waitq;
- spinlock_t set_lock;
-};
-
-extern struct kmem_cache *lov_oinfo_slab;
-
-extern struct lu_kmem_descr lov_caches[];
-
-void lov_finish_set(struct lov_request_set *set);
-
-static inline void lov_get_reqset(struct lov_request_set *set)
-{
- LASSERT(set != NULL);
- LASSERT(atomic_read(&set->set_refcount) > 0);
- atomic_inc(&set->set_refcount);
-}
-
-static inline void lov_put_reqset(struct lov_request_set *set)
-{
- if (atomic_dec_and_test(&set->set_refcount))
- lov_finish_set(set);
-}
-
-#define lov_uuid2str(lv, index) \
- (char *)((lv)->lov_tgts[index]->ltd_uuid.uuid)
-
-/* lov_merge.c */
-void lov_merge_attrs(struct obdo *tgt, struct obdo *src, u64 valid,
- struct lov_stripe_md *lsm, int stripeno, int *set);
-int lov_adjust_kms(struct obd_export *exp, struct lov_stripe_md *lsm,
- u64 size, int shrink);
-int lov_merge_lvb_kms(struct lov_stripe_md *lsm,
- struct ost_lvb *lvb, __u64 *kms_place);
-
-/* lov_offset.c */
-u64 lov_stripe_size(struct lov_stripe_md *lsm, u64 ost_size,
- int stripeno);
-int lov_stripe_offset(struct lov_stripe_md *lsm, u64 lov_off,
- int stripeno, u64 *u64);
-u64 lov_size_to_stripe(struct lov_stripe_md *lsm, u64 file_size,
- int stripeno);
-int lov_stripe_intersects(struct lov_stripe_md *lsm, int stripeno,
- u64 start, u64 end,
- u64 *obd_start, u64 *obd_end);
-int lov_stripe_number(struct lov_stripe_md *lsm, u64 lov_off);
-
-/* lov_qos.c */
-#define LOV_USES_ASSIGNED_STRIPE 0
-#define LOV_USES_DEFAULT_STRIPE 1
-int qos_add_tgt(struct obd_device *obd, __u32 index);
-int qos_del_tgt(struct obd_device *obd, struct lov_tgt_desc *tgt);
-void qos_shrink_lsm(struct lov_request_set *set);
-int qos_prep_create(struct obd_export *exp, struct lov_request_set *set);
-void qos_update(struct lov_obd *lov);
-void qos_statfs_done(struct lov_obd *lov);
-void qos_statfs_update(struct obd_device *obd, __u64 max_age, int wait);
-int qos_remedy_create(struct lov_request_set *set, struct lov_request *req);
-
-/* lov_request.c */
-void lov_set_add_req(struct lov_request *req, struct lov_request_set *set);
-int lov_set_finished(struct lov_request_set *set, int idempotent);
-void lov_update_set(struct lov_request_set *set,
- struct lov_request *req, int rc);
-int lov_update_common_set(struct lov_request_set *set,
- struct lov_request *req, int rc);
-int lov_check_and_wait_active(struct lov_obd *lov, int ost_idx);
-int lov_prep_getattr_set(struct obd_export *exp, struct obd_info *oinfo,
- struct lov_request_set **reqset);
-int lov_fini_getattr_set(struct lov_request_set *set);
-int lov_prep_destroy_set(struct obd_export *exp, struct obd_info *oinfo,
- struct obdo *src_oa, struct lov_stripe_md *lsm,
- struct obd_trans_info *oti,
- struct lov_request_set **reqset);
-int lov_fini_destroy_set(struct lov_request_set *set);
-int lov_prep_setattr_set(struct obd_export *exp, struct obd_info *oinfo,
- struct obd_trans_info *oti,
- struct lov_request_set **reqset);
-int lov_update_setattr_set(struct lov_request_set *set,
- struct lov_request *req, int rc);
-int lov_fini_setattr_set(struct lov_request_set *set);
-int lov_prep_statfs_set(struct obd_device *obd, struct obd_info *oinfo,
- struct lov_request_set **reqset);
-void lov_update_statfs(struct obd_statfs *osfs, struct obd_statfs *lov_sfs,
- int success);
-int lov_fini_statfs(struct obd_device *obd, struct obd_statfs *osfs,
- int success);
-int lov_fini_statfs_set(struct lov_request_set *set);
-int lov_statfs_interpret(struct ptlrpc_request_set *rqset, void *data, int rc);
-
-/* lov_obd.c */
-void lov_fix_desc(struct lov_desc *desc);
-void lov_fix_desc_stripe_size(__u64 *val);
-void lov_fix_desc_stripe_count(__u32 *val);
-void lov_fix_desc_pattern(__u32 *val);
-void lov_fix_desc_qos_maxage(__u32 *val);
-__u16 lov_get_stripecnt(struct lov_obd *lov, __u32 magic, __u16 stripe_count);
-int lov_connect_obd(struct obd_device *obd, __u32 index, int activate,
- struct obd_connect_data *data);
-int lov_setup(struct obd_device *obd, struct lustre_cfg *lcfg);
-int lov_process_config_base(struct obd_device *obd, struct lustre_cfg *lcfg,
- __u32 *indexp, int *genp);
-int lov_del_target(struct obd_device *obd, __u32 index,
- struct obd_uuid *uuidp, int gen);
-
-/* lov_pack.c */
-int lov_packmd(struct obd_export *exp, struct lov_mds_md **lmm,
- struct lov_stripe_md *lsm);
-int lov_unpackmd(struct obd_export *exp, struct lov_stripe_md **lsmp,
- struct lov_mds_md *lmm, int lmm_bytes);
-int lov_getstripe(struct obd_export *exp,
- struct lov_stripe_md *lsm, struct lov_user_md *lump);
-int lov_alloc_memmd(struct lov_stripe_md **lsmp, __u16 stripe_count,
- int pattern, int magic);
-int lov_free_memmd(struct lov_stripe_md **lsmp);
-
-void lov_dump_lmm_v1(int level, struct lov_mds_md_v1 *lmm);
-void lov_dump_lmm_v3(int level, struct lov_mds_md_v3 *lmm);
-void lov_dump_lmm_common(int level, void *lmmp);
-void lov_dump_lmm(int level, void *lmm);
-
-/* lov_ea.c */
-struct lov_stripe_md *lsm_alloc_plain(__u16 stripe_count, int *size);
-void lsm_free_plain(struct lov_stripe_md *lsm);
-void dump_lsm(unsigned int level, const struct lov_stripe_md *lsm);
-
-/* lproc_lov.c */
-extern const struct file_operations lov_proc_target_fops;
-void lprocfs_lov_init_vars(struct lprocfs_static_vars *lvars);
-
-/* lov_cl.c */
-extern struct lu_device_type lov_device_type;
-
-/* pools */
-extern cfs_hash_ops_t pool_hash_operations;
-/* ost_pool methods */
-int lov_ost_pool_init(struct ost_pool *op, unsigned int count);
-int lov_ost_pool_extend(struct ost_pool *op, unsigned int min_count);
-int lov_ost_pool_add(struct ost_pool *op, __u32 idx, unsigned int min_count);
-int lov_ost_pool_remove(struct ost_pool *op, __u32 idx);
-int lov_ost_pool_free(struct ost_pool *op);
-
-/* high level pool methods */
-int lov_pool_new(struct obd_device *obd, char *poolname);
-int lov_pool_del(struct obd_device *obd, char *poolname);
-int lov_pool_add(struct obd_device *obd, char *poolname, char *ostname);
-int lov_pool_remove(struct obd_device *obd, char *poolname, char *ostname);
-void lov_dump_pool(int level, struct pool_desc *pool);
-struct pool_desc *lov_find_pool(struct lov_obd *lov, char *poolname);
-int lov_check_index_in_pool(__u32 idx, struct pool_desc *pool);
-void lov_pool_putref(struct pool_desc *pool);
-
-static inline struct lov_stripe_md *lsm_addref(struct lov_stripe_md *lsm)
-{
- LASSERT(atomic_read(&lsm->lsm_refc) > 0);
- atomic_inc(&lsm->lsm_refc);
- return lsm;
-}
-
-static inline bool lov_oinfo_is_dummy(const struct lov_oinfo *loi)
-{
- if (unlikely(loi->loi_oi.oi.oi_id == 0 &&
- loi->loi_oi.oi.oi_seq == 0 &&
- loi->loi_ost_idx == 0 &&
- loi->loi_ost_gen == 0))
- return true;
-
- return false;
-}
-
-
-#endif
diff --git a/drivers/staging/lustre/lustre/lov/lov_io.c b/drivers/staging/lustre/lustre/lov/lov_io.c
deleted file mode 100644
index 489227ae1256..000000000000
--- a/drivers/staging/lustre/lustre/lov/lov_io.c
+++ /dev/null
@@ -1,993 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * Implementation of cl_io for LOV layer.
- *
- * Author: Nikita Danilov <nikita.danilov@sun.com>
- * Author: Jinshan Xiong <jinshan.xiong@whamcloud.com>
- */
-
-#define DEBUG_SUBSYSTEM S_LOV
-
-#include "lov_cl_internal.h"
-
-/** \addtogroup lov
- * @{
- */
-
-static inline void lov_sub_enter(struct lov_io_sub *sub)
-{
- sub->sub_reenter++;
-}
-static inline void lov_sub_exit(struct lov_io_sub *sub)
-{
- sub->sub_reenter--;
-}
-
-static void lov_io_sub_fini(const struct lu_env *env, struct lov_io *lio,
- struct lov_io_sub *sub)
-{
- if (sub->sub_io != NULL) {
- if (sub->sub_io_initialized) {
- lov_sub_enter(sub);
- cl_io_fini(sub->sub_env, sub->sub_io);
- lov_sub_exit(sub);
- sub->sub_io_initialized = 0;
- lio->lis_active_subios--;
- }
- if (sub->sub_stripe == lio->lis_single_subio_index)
- lio->lis_single_subio_index = -1;
- else if (!sub->sub_borrowed)
- kfree(sub->sub_io);
- sub->sub_io = NULL;
- }
- if (sub->sub_env != NULL && !IS_ERR(sub->sub_env)) {
- if (!sub->sub_borrowed)
- cl_env_put(sub->sub_env, &sub->sub_refcheck);
- sub->sub_env = NULL;
- }
-}
-
-static void lov_io_sub_inherit(struct cl_io *io, struct lov_io *lio,
- int stripe, loff_t start, loff_t end)
-{
- struct lov_stripe_md *lsm = lio->lis_object->lo_lsm;
- struct cl_io *parent = lio->lis_cl.cis_io;
-
- switch (io->ci_type) {
- case CIT_SETATTR: {
- io->u.ci_setattr.sa_attr = parent->u.ci_setattr.sa_attr;
- io->u.ci_setattr.sa_valid = parent->u.ci_setattr.sa_valid;
- if (cl_io_is_trunc(io)) {
- loff_t new_size = parent->u.ci_setattr.sa_attr.lvb_size;
-
- new_size = lov_size_to_stripe(lsm, new_size, stripe);
- io->u.ci_setattr.sa_attr.lvb_size = new_size;
- }
- break;
- }
- case CIT_FAULT: {
- struct cl_object *obj = parent->ci_obj;
- loff_t off = cl_offset(obj, parent->u.ci_fault.ft_index);
-
- io->u.ci_fault = parent->u.ci_fault;
- off = lov_size_to_stripe(lsm, off, stripe);
- io->u.ci_fault.ft_index = cl_index(obj, off);
- break;
- }
- case CIT_FSYNC: {
- io->u.ci_fsync.fi_start = start;
- io->u.ci_fsync.fi_end = end;
- io->u.ci_fsync.fi_fid = parent->u.ci_fsync.fi_fid;
- io->u.ci_fsync.fi_mode = parent->u.ci_fsync.fi_mode;
- break;
- }
- case CIT_READ:
- case CIT_WRITE: {
- io->u.ci_wr.wr_sync = cl_io_is_sync_write(parent);
- if (cl_io_is_append(parent)) {
- io->u.ci_wr.wr_append = 1;
- } else {
- io->u.ci_rw.crw_pos = start;
- io->u.ci_rw.crw_count = end - start;
- }
- break;
- }
- default:
- break;
- }
-}
-
-static int lov_io_sub_init(const struct lu_env *env, struct lov_io *lio,
- struct lov_io_sub *sub)
-{
- struct lov_object *lov = lio->lis_object;
- struct lov_device *ld = lu2lov_dev(lov2cl(lov)->co_lu.lo_dev);
- struct cl_io *sub_io;
- struct cl_object *sub_obj;
- struct cl_io *io = lio->lis_cl.cis_io;
-
- int stripe = sub->sub_stripe;
- int result;
-
- LASSERT(sub->sub_io == NULL);
- LASSERT(sub->sub_env == NULL);
- LASSERT(sub->sub_stripe < lio->lis_stripe_count);
-
- if (unlikely(lov_r0(lov)->lo_sub[stripe] == NULL))
- return -EIO;
-
- result = 0;
- sub->sub_io_initialized = 0;
- sub->sub_borrowed = 0;
-
- if (lio->lis_mem_frozen) {
- LASSERT(mutex_is_locked(&ld->ld_mutex));
- sub->sub_io = &ld->ld_emrg[stripe]->emrg_subio;
- sub->sub_env = ld->ld_emrg[stripe]->emrg_env;
- sub->sub_borrowed = 1;
- } else {
- void *cookie;
-
- /* obtain new environment */
- cookie = cl_env_reenter();
- sub->sub_env = cl_env_get(&sub->sub_refcheck);
- cl_env_reexit(cookie);
- if (IS_ERR(sub->sub_env))
- result = PTR_ERR(sub->sub_env);
-
- if (result == 0) {
- /*
- * First sub-io. Use ->lis_single_subio to
- * avoid dynamic allocation.
- */
- if (lio->lis_active_subios == 0) {
- sub->sub_io = &lio->lis_single_subio;
- lio->lis_single_subio_index = stripe;
- } else {
- sub->sub_io = kzalloc(sizeof(*sub->sub_io),
- GFP_NOFS);
- if (!sub->sub_io)
- result = -ENOMEM;
- }
- }
- }
-
- if (result == 0) {
- sub_obj = lovsub2cl(lov_r0(lov)->lo_sub[stripe]);
- sub_io = sub->sub_io;
-
- sub_io->ci_obj = sub_obj;
- sub_io->ci_result = 0;
-
- sub_io->ci_parent = io;
- sub_io->ci_lockreq = io->ci_lockreq;
- sub_io->ci_type = io->ci_type;
- sub_io->ci_no_srvlock = io->ci_no_srvlock;
- sub_io->ci_noatime = io->ci_noatime;
-
- lov_sub_enter(sub);
- result = cl_io_sub_init(sub->sub_env, sub_io,
- io->ci_type, sub_obj);
- lov_sub_exit(sub);
- if (result >= 0) {
- lio->lis_active_subios++;
- sub->sub_io_initialized = 1;
- result = 0;
- }
- }
- if (result != 0)
- lov_io_sub_fini(env, lio, sub);
- return result;
-}
-
-struct lov_io_sub *lov_sub_get(const struct lu_env *env,
- struct lov_io *lio, int stripe)
-{
- int rc;
- struct lov_io_sub *sub = &lio->lis_subs[stripe];
-
- LASSERT(stripe < lio->lis_stripe_count);
-
- if (!sub->sub_io_initialized) {
- sub->sub_stripe = stripe;
- rc = lov_io_sub_init(env, lio, sub);
- } else
- rc = 0;
- if (rc == 0)
- lov_sub_enter(sub);
- else
- sub = ERR_PTR(rc);
- return sub;
-}
-
-void lov_sub_put(struct lov_io_sub *sub)
-{
- lov_sub_exit(sub);
-}
-
-/*****************************************************************************
- *
- * Lov io operations.
- *
- */
-
-static int lov_page_stripe(const struct cl_page *page)
-{
- struct lovsub_object *subobj;
-
- subobj = lu2lovsub(
- lu_object_locate(page->cp_child->cp_obj->co_lu.lo_header,
- &lovsub_device_type));
- LASSERT(subobj != NULL);
- return subobj->lso_index;
-}
-
-struct lov_io_sub *lov_page_subio(const struct lu_env *env, struct lov_io *lio,
- const struct cl_page_slice *slice)
-{
- struct lov_stripe_md *lsm = lio->lis_object->lo_lsm;
- struct cl_page *page = slice->cpl_page;
- int stripe;
-
- LASSERT(lio->lis_cl.cis_io != NULL);
- LASSERT(cl2lov(slice->cpl_obj) == lio->lis_object);
- LASSERT(lsm != NULL);
- LASSERT(lio->lis_nr_subios > 0);
-
- stripe = lov_page_stripe(page);
- return lov_sub_get(env, lio, stripe);
-}
-
-
-static int lov_io_subio_init(const struct lu_env *env, struct lov_io *lio,
- struct cl_io *io)
-{
- struct lov_stripe_md *lsm = lio->lis_object->lo_lsm;
- int result;
-
- LASSERT(lio->lis_object != NULL);
-
- /*
- * Need to be optimized, we can't afford to allocate a piece of memory
- * when writing a page. -jay
- */
- lio->lis_subs =
- libcfs_kvzalloc(lsm->lsm_stripe_count *
- sizeof(lio->lis_subs[0]),
- GFP_NOFS);
- if (lio->lis_subs != NULL) {
- lio->lis_nr_subios = lio->lis_stripe_count;
- lio->lis_single_subio_index = -1;
- lio->lis_active_subios = 0;
- result = 0;
- } else
- result = -ENOMEM;
- return result;
-}
-
-static void lov_io_slice_init(struct lov_io *lio,
- struct lov_object *obj, struct cl_io *io)
-{
- io->ci_result = 0;
- lio->lis_object = obj;
-
- LASSERT(obj->lo_lsm != NULL);
- lio->lis_stripe_count = obj->lo_lsm->lsm_stripe_count;
-
- switch (io->ci_type) {
- case CIT_READ:
- case CIT_WRITE:
- lio->lis_pos = io->u.ci_rw.crw_pos;
- lio->lis_endpos = io->u.ci_rw.crw_pos + io->u.ci_rw.crw_count;
- lio->lis_io_endpos = lio->lis_endpos;
- if (cl_io_is_append(io)) {
- LASSERT(io->ci_type == CIT_WRITE);
- lio->lis_pos = 0;
- lio->lis_endpos = OBD_OBJECT_EOF;
- }
- break;
-
- case CIT_SETATTR:
- if (cl_io_is_trunc(io))
- lio->lis_pos = io->u.ci_setattr.sa_attr.lvb_size;
- else
- lio->lis_pos = 0;
- lio->lis_endpos = OBD_OBJECT_EOF;
- break;
-
- case CIT_FAULT: {
- pgoff_t index = io->u.ci_fault.ft_index;
- lio->lis_pos = cl_offset(io->ci_obj, index);
- lio->lis_endpos = cl_offset(io->ci_obj, index + 1);
- break;
- }
-
- case CIT_FSYNC: {
- lio->lis_pos = io->u.ci_fsync.fi_start;
- lio->lis_endpos = io->u.ci_fsync.fi_end;
- break;
- }
-
- case CIT_MISC:
- lio->lis_pos = 0;
- lio->lis_endpos = OBD_OBJECT_EOF;
- break;
-
- default:
- LBUG();
- }
-}
-
-static void lov_io_fini(const struct lu_env *env, const struct cl_io_slice *ios)
-{
- struct lov_io *lio = cl2lov_io(env, ios);
- struct lov_object *lov = cl2lov(ios->cis_obj);
- int i;
-
- if (lio->lis_subs != NULL) {
- for (i = 0; i < lio->lis_nr_subios; i++)
- lov_io_sub_fini(env, lio, &lio->lis_subs[i]);
- kvfree(lio->lis_subs);
- lio->lis_nr_subios = 0;
- }
-
- LASSERT(atomic_read(&lov->lo_active_ios) > 0);
- if (atomic_dec_and_test(&lov->lo_active_ios))
- wake_up_all(&lov->lo_waitq);
-}
-
-static u64 lov_offset_mod(u64 val, int delta)
-{
- if (val != OBD_OBJECT_EOF)
- val += delta;
- return val;
-}
-
-static int lov_io_iter_init(const struct lu_env *env,
- const struct cl_io_slice *ios)
-{
- struct lov_io *lio = cl2lov_io(env, ios);
- struct lov_stripe_md *lsm = lio->lis_object->lo_lsm;
- struct lov_io_sub *sub;
- u64 endpos;
- u64 start;
- u64 end;
- int stripe;
- int rc = 0;
-
- endpos = lov_offset_mod(lio->lis_endpos, -1);
- for (stripe = 0; stripe < lio->lis_stripe_count; stripe++) {
- if (!lov_stripe_intersects(lsm, stripe, lio->lis_pos,
- endpos, &start, &end))
- continue;
-
- if (unlikely(lov_r0(lio->lis_object)->lo_sub[stripe] == NULL)) {
- if (ios->cis_io->ci_type == CIT_READ ||
- ios->cis_io->ci_type == CIT_WRITE ||
- ios->cis_io->ci_type == CIT_FAULT)
- return -EIO;
-
- continue;
- }
-
- end = lov_offset_mod(end, 1);
- sub = lov_sub_get(env, lio, stripe);
- if (!IS_ERR(sub)) {
- lov_io_sub_inherit(sub->sub_io, lio, stripe,
- start, end);
- rc = cl_io_iter_init(sub->sub_env, sub->sub_io);
- lov_sub_put(sub);
- CDEBUG(D_VFSTRACE, "shrink: %d [%llu, %llu)\n",
- stripe, start, end);
- } else
- rc = PTR_ERR(sub);
-
- if (!rc)
- list_add_tail(&sub->sub_linkage, &lio->lis_active);
- else
- break;
- }
- return rc;
-}
-
-static int lov_io_rw_iter_init(const struct lu_env *env,
- const struct cl_io_slice *ios)
-{
- struct lov_io *lio = cl2lov_io(env, ios);
- struct cl_io *io = ios->cis_io;
- struct lov_stripe_md *lsm = lio->lis_object->lo_lsm;
- __u64 start = io->u.ci_rw.crw_pos;
- loff_t next;
- unsigned long ssize = lsm->lsm_stripe_size;
-
- LASSERT(io->ci_type == CIT_READ || io->ci_type == CIT_WRITE);
-
- /* fast path for common case. */
- if (lio->lis_nr_subios != 1 && !cl_io_is_append(io)) {
-
- lov_do_div64(start, ssize);
- next = (start + 1) * ssize;
- if (next <= start * ssize)
- next = ~0ull;
-
- io->ci_continue = next < lio->lis_io_endpos;
- io->u.ci_rw.crw_count = min_t(loff_t, lio->lis_io_endpos,
- next) - io->u.ci_rw.crw_pos;
- lio->lis_pos = io->u.ci_rw.crw_pos;
- lio->lis_endpos = io->u.ci_rw.crw_pos + io->u.ci_rw.crw_count;
- CDEBUG(D_VFSTRACE, "stripe: %llu chunk: [%llu, %llu) %llu\n",
- (__u64)start, lio->lis_pos, lio->lis_endpos,
- (__u64)lio->lis_io_endpos);
- }
- /*
- * XXX The following call should be optimized: we know, that
- * [lio->lis_pos, lio->lis_endpos) intersects with exactly one stripe.
- */
- return lov_io_iter_init(env, ios);
-}
-
-static int lov_io_call(const struct lu_env *env, struct lov_io *lio,
- int (*iofunc)(const struct lu_env *, struct cl_io *))
-{
- struct cl_io *parent = lio->lis_cl.cis_io;
- struct lov_io_sub *sub;
- int rc = 0;
-
- list_for_each_entry(sub, &lio->lis_active, sub_linkage) {
- lov_sub_enter(sub);
- rc = iofunc(sub->sub_env, sub->sub_io);
- lov_sub_exit(sub);
- if (rc)
- break;
-
- if (parent->ci_result == 0)
- parent->ci_result = sub->sub_io->ci_result;
- }
- return rc;
-}
-
-static int lov_io_lock(const struct lu_env *env, const struct cl_io_slice *ios)
-{
- return lov_io_call(env, cl2lov_io(env, ios), cl_io_lock);
-}
-
-static int lov_io_start(const struct lu_env *env, const struct cl_io_slice *ios)
-{
- return lov_io_call(env, cl2lov_io(env, ios), cl_io_start);
-}
-
-static int lov_io_end_wrapper(const struct lu_env *env, struct cl_io *io)
-{
- /*
- * It's possible that lov_io_start() wasn't called against this
- * sub-io, either because previous sub-io failed, or upper layer
- * completed IO.
- */
- if (io->ci_state == CIS_IO_GOING)
- cl_io_end(env, io);
- else
- io->ci_state = CIS_IO_FINISHED;
- return 0;
-}
-
-static int lov_io_iter_fini_wrapper(const struct lu_env *env, struct cl_io *io)
-{
- cl_io_iter_fini(env, io);
- return 0;
-}
-
-static int lov_io_unlock_wrapper(const struct lu_env *env, struct cl_io *io)
-{
- cl_io_unlock(env, io);
- return 0;
-}
-
-static void lov_io_end(const struct lu_env *env, const struct cl_io_slice *ios)
-{
- int rc;
-
- rc = lov_io_call(env, cl2lov_io(env, ios), lov_io_end_wrapper);
- LASSERT(rc == 0);
-}
-
-static void lov_io_iter_fini(const struct lu_env *env,
- const struct cl_io_slice *ios)
-{
- struct lov_io *lio = cl2lov_io(env, ios);
- int rc;
-
- rc = lov_io_call(env, lio, lov_io_iter_fini_wrapper);
- LASSERT(rc == 0);
- while (!list_empty(&lio->lis_active))
- list_del_init(lio->lis_active.next);
-}
-
-static void lov_io_unlock(const struct lu_env *env,
- const struct cl_io_slice *ios)
-{
- int rc;
-
- rc = lov_io_call(env, cl2lov_io(env, ios), lov_io_unlock_wrapper);
- LASSERT(rc == 0);
-}
-
-
-static struct cl_page_list *lov_io_submit_qin(struct lov_device *ld,
- struct cl_page_list *qin,
- int idx, int alloc)
-{
- return alloc ? &qin[idx] : &ld->ld_emrg[idx]->emrg_page_list;
-}
-
-/**
- * lov implementation of cl_operations::cio_submit() method. It takes a list
- * of pages in \a queue, splits it into per-stripe sub-lists, invokes
- * cl_io_submit() on underlying devices to submit sub-lists, and then splices
- * everything back.
- *
- * Major complication of this function is a need to handle memory cleansing:
- * cl_io_submit() is called to write out pages as a part of VM memory
- * reclamation, and hence it may not fail due to memory shortages (system
- * dead-locks otherwise). To deal with this, some resources (sub-lists,
- * sub-environment, etc.) are allocated per-device on "startup" (i.e., in a
- * not-memory cleansing context), and in case of memory shortage, these
- * pre-allocated resources are used by lov_io_submit() under
- * lov_device::ld_mutex mutex.
- */
-static int lov_io_submit(const struct lu_env *env,
- const struct cl_io_slice *ios,
- enum cl_req_type crt, struct cl_2queue *queue)
-{
- struct lov_io *lio = cl2lov_io(env, ios);
- struct lov_object *obj = lio->lis_object;
- struct lov_device *ld = lu2lov_dev(lov2cl(obj)->co_lu.lo_dev);
- struct cl_page_list *qin = &queue->c2_qin;
- struct cl_2queue *cl2q = &lov_env_info(env)->lti_cl2q;
- struct cl_page_list *stripes_qin = NULL;
- struct cl_page *page;
- struct cl_page *tmp;
- int stripe;
-
-#define QIN(stripe) lov_io_submit_qin(ld, stripes_qin, stripe, alloc)
-
- int rc = 0;
- int alloc =
- !(current->flags & PF_MEMALLOC);
-
- if (lio->lis_active_subios == 1) {
- int idx = lio->lis_single_subio_index;
- struct lov_io_sub *sub;
-
- LASSERT(idx < lio->lis_nr_subios);
- sub = lov_sub_get(env, lio, idx);
- LASSERT(!IS_ERR(sub));
- LASSERT(sub->sub_io == &lio->lis_single_subio);
- rc = cl_io_submit_rw(sub->sub_env, sub->sub_io,
- crt, queue);
- lov_sub_put(sub);
- return rc;
- }
-
- LASSERT(lio->lis_subs != NULL);
- if (alloc) {
- stripes_qin =
- libcfs_kvzalloc(sizeof(*stripes_qin) *
- lio->lis_nr_subios,
- GFP_NOFS);
- if (stripes_qin == NULL)
- return -ENOMEM;
-
- for (stripe = 0; stripe < lio->lis_nr_subios; stripe++)
- cl_page_list_init(&stripes_qin[stripe]);
- } else {
- /*
- * If we get here, it means pageout & swap doesn't help.
- * In order to not make things worse, even don't try to
- * allocate the memory with __GFP_NOWARN. -jay
- */
- mutex_lock(&ld->ld_mutex);
- lio->lis_mem_frozen = 1;
- }
-
- cl_2queue_init(cl2q);
- cl_page_list_for_each_safe(page, tmp, qin) {
- stripe = lov_page_stripe(page);
- cl_page_list_move(QIN(stripe), qin, page);
- }
-
- for (stripe = 0; stripe < lio->lis_nr_subios; stripe++) {
- struct lov_io_sub *sub;
- struct cl_page_list *sub_qin = QIN(stripe);
-
- if (list_empty(&sub_qin->pl_pages))
- continue;
-
- cl_page_list_splice(sub_qin, &cl2q->c2_qin);
- sub = lov_sub_get(env, lio, stripe);
- if (!IS_ERR(sub)) {
- rc = cl_io_submit_rw(sub->sub_env, sub->sub_io,
- crt, cl2q);
- lov_sub_put(sub);
- } else
- rc = PTR_ERR(sub);
- cl_page_list_splice(&cl2q->c2_qin, &queue->c2_qin);
- cl_page_list_splice(&cl2q->c2_qout, &queue->c2_qout);
- if (rc != 0)
- break;
- }
-
- for (stripe = 0; stripe < lio->lis_nr_subios; stripe++) {
- struct cl_page_list *sub_qin = QIN(stripe);
-
- if (list_empty(&sub_qin->pl_pages))
- continue;
-
- cl_page_list_splice(sub_qin, qin);
- }
-
- if (alloc) {
- kvfree(stripes_qin);
- } else {
- int i;
-
- for (i = 0; i < lio->lis_nr_subios; i++) {
- struct cl_io *cio = lio->lis_subs[i].sub_io;
-
- if (cio && cio == &ld->ld_emrg[i]->emrg_subio)
- lov_io_sub_fini(env, lio, &lio->lis_subs[i]);
- }
- lio->lis_mem_frozen = 0;
- mutex_unlock(&ld->ld_mutex);
- }
-
- return rc;
-#undef QIN
-}
-
-static int lov_io_prepare_write(const struct lu_env *env,
- const struct cl_io_slice *ios,
- const struct cl_page_slice *slice,
- unsigned from, unsigned to)
-{
- struct lov_io *lio = cl2lov_io(env, ios);
- struct cl_page *sub_page = lov_sub_page(slice);
- struct lov_io_sub *sub;
- int result;
-
- sub = lov_page_subio(env, lio, slice);
- if (!IS_ERR(sub)) {
- result = cl_io_prepare_write(sub->sub_env, sub->sub_io,
- sub_page, from, to);
- lov_sub_put(sub);
- } else
- result = PTR_ERR(sub);
- return result;
-}
-
-static int lov_io_commit_write(const struct lu_env *env,
- const struct cl_io_slice *ios,
- const struct cl_page_slice *slice,
- unsigned from, unsigned to)
-{
- struct lov_io *lio = cl2lov_io(env, ios);
- struct cl_page *sub_page = lov_sub_page(slice);
- struct lov_io_sub *sub;
- int result;
-
- sub = lov_page_subio(env, lio, slice);
- if (!IS_ERR(sub)) {
- result = cl_io_commit_write(sub->sub_env, sub->sub_io,
- sub_page, from, to);
- lov_sub_put(sub);
- } else
- result = PTR_ERR(sub);
- return result;
-}
-
-static int lov_io_fault_start(const struct lu_env *env,
- const struct cl_io_slice *ios)
-{
- struct cl_fault_io *fio;
- struct lov_io *lio;
- struct lov_io_sub *sub;
-
- fio = &ios->cis_io->u.ci_fault;
- lio = cl2lov_io(env, ios);
- sub = lov_sub_get(env, lio, lov_page_stripe(fio->ft_page));
- if (IS_ERR(sub))
- return PTR_ERR(sub);
- sub->sub_io->u.ci_fault.ft_nob = fio->ft_nob;
- lov_sub_put(sub);
- return lov_io_start(env, ios);
-}
-
-static void lov_io_fsync_end(const struct lu_env *env,
- const struct cl_io_slice *ios)
-{
- struct lov_io *lio = cl2lov_io(env, ios);
- struct lov_io_sub *sub;
- unsigned int *written = &ios->cis_io->u.ci_fsync.fi_nr_written;
-
- *written = 0;
- list_for_each_entry(sub, &lio->lis_active, sub_linkage) {
- struct cl_io *subio = sub->sub_io;
-
- lov_sub_enter(sub);
- lov_io_end_wrapper(sub->sub_env, subio);
- lov_sub_exit(sub);
-
- if (subio->ci_result == 0)
- *written += subio->u.ci_fsync.fi_nr_written;
- }
-}
-
-static const struct cl_io_operations lov_io_ops = {
- .op = {
- [CIT_READ] = {
- .cio_fini = lov_io_fini,
- .cio_iter_init = lov_io_rw_iter_init,
- .cio_iter_fini = lov_io_iter_fini,
- .cio_lock = lov_io_lock,
- .cio_unlock = lov_io_unlock,
- .cio_start = lov_io_start,
- .cio_end = lov_io_end
- },
- [CIT_WRITE] = {
- .cio_fini = lov_io_fini,
- .cio_iter_init = lov_io_rw_iter_init,
- .cio_iter_fini = lov_io_iter_fini,
- .cio_lock = lov_io_lock,
- .cio_unlock = lov_io_unlock,
- .cio_start = lov_io_start,
- .cio_end = lov_io_end
- },
- [CIT_SETATTR] = {
- .cio_fini = lov_io_fini,
- .cio_iter_init = lov_io_iter_init,
- .cio_iter_fini = lov_io_iter_fini,
- .cio_lock = lov_io_lock,
- .cio_unlock = lov_io_unlock,
- .cio_start = lov_io_start,
- .cio_end = lov_io_end
- },
- [CIT_FAULT] = {
- .cio_fini = lov_io_fini,
- .cio_iter_init = lov_io_iter_init,
- .cio_iter_fini = lov_io_iter_fini,
- .cio_lock = lov_io_lock,
- .cio_unlock = lov_io_unlock,
- .cio_start = lov_io_fault_start,
- .cio_end = lov_io_end
- },
- [CIT_FSYNC] = {
- .cio_fini = lov_io_fini,
- .cio_iter_init = lov_io_iter_init,
- .cio_iter_fini = lov_io_iter_fini,
- .cio_lock = lov_io_lock,
- .cio_unlock = lov_io_unlock,
- .cio_start = lov_io_start,
- .cio_end = lov_io_fsync_end
- },
- [CIT_MISC] = {
- .cio_fini = lov_io_fini
- }
- },
- .req_op = {
- [CRT_READ] = {
- .cio_submit = lov_io_submit
- },
- [CRT_WRITE] = {
- .cio_submit = lov_io_submit
- }
- },
- .cio_prepare_write = lov_io_prepare_write,
- .cio_commit_write = lov_io_commit_write
-};
-
-/*****************************************************************************
- *
- * Empty lov io operations.
- *
- */
-
-static void lov_empty_io_fini(const struct lu_env *env,
- const struct cl_io_slice *ios)
-{
- struct lov_object *lov = cl2lov(ios->cis_obj);
-
- if (atomic_dec_and_test(&lov->lo_active_ios))
- wake_up_all(&lov->lo_waitq);
-}
-
-static void lov_empty_impossible(const struct lu_env *env,
- struct cl_io_slice *ios)
-{
- LBUG();
-}
-
-#define LOV_EMPTY_IMPOSSIBLE ((void *)lov_empty_impossible)
-
-/**
- * An io operation vector for files without stripes.
- */
-static const struct cl_io_operations lov_empty_io_ops = {
- .op = {
- [CIT_READ] = {
- .cio_fini = lov_empty_io_fini,
-#if 0
- .cio_iter_init = LOV_EMPTY_IMPOSSIBLE,
- .cio_lock = LOV_EMPTY_IMPOSSIBLE,
- .cio_start = LOV_EMPTY_IMPOSSIBLE,
- .cio_end = LOV_EMPTY_IMPOSSIBLE
-#endif
- },
- [CIT_WRITE] = {
- .cio_fini = lov_empty_io_fini,
- .cio_iter_init = LOV_EMPTY_IMPOSSIBLE,
- .cio_lock = LOV_EMPTY_IMPOSSIBLE,
- .cio_start = LOV_EMPTY_IMPOSSIBLE,
- .cio_end = LOV_EMPTY_IMPOSSIBLE
- },
- [CIT_SETATTR] = {
- .cio_fini = lov_empty_io_fini,
- .cio_iter_init = LOV_EMPTY_IMPOSSIBLE,
- .cio_lock = LOV_EMPTY_IMPOSSIBLE,
- .cio_start = LOV_EMPTY_IMPOSSIBLE,
- .cio_end = LOV_EMPTY_IMPOSSIBLE
- },
- [CIT_FAULT] = {
- .cio_fini = lov_empty_io_fini,
- .cio_iter_init = LOV_EMPTY_IMPOSSIBLE,
- .cio_lock = LOV_EMPTY_IMPOSSIBLE,
- .cio_start = LOV_EMPTY_IMPOSSIBLE,
- .cio_end = LOV_EMPTY_IMPOSSIBLE
- },
- [CIT_FSYNC] = {
- .cio_fini = lov_empty_io_fini
- },
- [CIT_MISC] = {
- .cio_fini = lov_empty_io_fini
- }
- },
- .req_op = {
- [CRT_READ] = {
- .cio_submit = LOV_EMPTY_IMPOSSIBLE
- },
- [CRT_WRITE] = {
- .cio_submit = LOV_EMPTY_IMPOSSIBLE
- }
- },
- .cio_commit_write = LOV_EMPTY_IMPOSSIBLE
-};
-
-int lov_io_init_raid0(const struct lu_env *env, struct cl_object *obj,
- struct cl_io *io)
-{
- struct lov_io *lio = lov_env_io(env);
- struct lov_object *lov = cl2lov(obj);
-
- INIT_LIST_HEAD(&lio->lis_active);
- lov_io_slice_init(lio, lov, io);
- if (io->ci_result == 0) {
- io->ci_result = lov_io_subio_init(env, lio, io);
- if (io->ci_result == 0) {
- cl_io_slice_add(io, &lio->lis_cl, obj, &lov_io_ops);
- atomic_inc(&lov->lo_active_ios);
- }
- }
- return io->ci_result;
-}
-
-int lov_io_init_empty(const struct lu_env *env, struct cl_object *obj,
- struct cl_io *io)
-{
- struct lov_object *lov = cl2lov(obj);
- struct lov_io *lio = lov_env_io(env);
- int result;
-
- lio->lis_object = lov;
- switch (io->ci_type) {
- default:
- LBUG();
- case CIT_MISC:
- case CIT_READ:
- result = 0;
- break;
- case CIT_FSYNC:
- case CIT_SETATTR:
- result = 1;
- break;
- case CIT_WRITE:
- result = -EBADF;
- break;
- case CIT_FAULT:
- result = -EFAULT;
- CERROR("Page fault on a file without stripes: "DFID"\n",
- PFID(lu_object_fid(&obj->co_lu)));
- break;
- }
- if (result == 0) {
- cl_io_slice_add(io, &lio->lis_cl, obj, &lov_empty_io_ops);
- atomic_inc(&lov->lo_active_ios);
- }
-
- io->ci_result = result < 0 ? result : 0;
- return result != 0;
-}
-
-int lov_io_init_released(const struct lu_env *env, struct cl_object *obj,
- struct cl_io *io)
-{
- struct lov_object *lov = cl2lov(obj);
- struct lov_io *lio = lov_env_io(env);
- int result;
-
- LASSERT(lov->lo_lsm != NULL);
- lio->lis_object = lov;
-
- switch (io->ci_type) {
- default:
- LASSERTF(0, "invalid type %d\n", io->ci_type);
- case CIT_MISC:
- case CIT_FSYNC:
- result = 1;
- break;
- case CIT_SETATTR:
- /* the truncate to 0 is managed by MDT:
- * - in open, for open O_TRUNC
- * - in setattr, for truncate
- */
- /* the truncate is for size > 0 so triggers a restore */
- if (cl_io_is_trunc(io))
- io->ci_restore_needed = 1;
- result = -ENODATA;
- break;
- case CIT_READ:
- case CIT_WRITE:
- case CIT_FAULT:
- io->ci_restore_needed = 1;
- result = -ENODATA;
- break;
- }
- if (result == 0) {
- cl_io_slice_add(io, &lio->lis_cl, obj, &lov_empty_io_ops);
- atomic_inc(&lov->lo_active_ios);
- }
-
- io->ci_result = result < 0 ? result : 0;
- return result != 0;
-}
-/** @} lov */
diff --git a/drivers/staging/lustre/lustre/lov/lov_lock.c b/drivers/staging/lustre/lustre/lov/lov_lock.c
deleted file mode 100644
index a6938085ff24..000000000000
--- a/drivers/staging/lustre/lustre/lov/lov_lock.c
+++ /dev/null
@@ -1,1197 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * Implementation of cl_lock for LOV layer.
- *
- * Author: Nikita Danilov <nikita.danilov@sun.com>
- */
-
-#define DEBUG_SUBSYSTEM S_LOV
-
-#include "lov_cl_internal.h"
-
-/** \addtogroup lov
- * @{
- */
-
-static struct cl_lock_closure *lov_closure_get(const struct lu_env *env,
- struct cl_lock *parent);
-
-static int lov_lock_unuse(const struct lu_env *env,
- const struct cl_lock_slice *slice);
-/*****************************************************************************
- *
- * Lov lock operations.
- *
- */
-
-static struct lov_sublock_env *lov_sublock_env_get(const struct lu_env *env,
- struct cl_lock *parent,
- struct lov_lock_sub *lls)
-{
- struct lov_sublock_env *subenv;
- struct lov_io *lio = lov_env_io(env);
- struct cl_io *io = lio->lis_cl.cis_io;
- struct lov_io_sub *sub;
-
- subenv = &lov_env_session(env)->ls_subenv;
-
- /*
- * FIXME: We tend to use the subio's env & io to call the sublock
- * lock operations because osc lock sometimes stores some control
- * variables in thread's IO information(Now only lockless information).
- * However, if the lock's host(object) is different from the object
- * for current IO, we have no way to get the subenv and subio because
- * they are not initialized at all. As a temp fix, in this case,
- * we still borrow the parent's env to call sublock operations.
- */
- if (!io || !cl_object_same(io->ci_obj, parent->cll_descr.cld_obj)) {
- subenv->lse_env = env;
- subenv->lse_io = io;
- subenv->lse_sub = NULL;
- } else {
- sub = lov_sub_get(env, lio, lls->sub_stripe);
- if (!IS_ERR(sub)) {
- subenv->lse_env = sub->sub_env;
- subenv->lse_io = sub->sub_io;
- subenv->lse_sub = sub;
- } else {
- subenv = (void *)sub;
- }
- }
- return subenv;
-}
-
-static void lov_sublock_env_put(struct lov_sublock_env *subenv)
-{
- if (subenv && subenv->lse_sub)
- lov_sub_put(subenv->lse_sub);
-}
-
-static void lov_sublock_adopt(const struct lu_env *env, struct lov_lock *lck,
- struct cl_lock *sublock, int idx,
- struct lov_lock_link *link)
-{
- struct lovsub_lock *lsl;
- struct cl_lock *parent = lck->lls_cl.cls_lock;
- int rc;
-
- LASSERT(cl_lock_is_mutexed(parent));
- LASSERT(cl_lock_is_mutexed(sublock));
-
- lsl = cl2sub_lock(sublock);
- /*
- * check that sub-lock doesn't have lock link to this top-lock.
- */
- LASSERT(lov_lock_link_find(env, lck, lsl) == NULL);
- LASSERT(idx < lck->lls_nr);
-
- lck->lls_sub[idx].sub_lock = lsl;
- lck->lls_nr_filled++;
- LASSERT(lck->lls_nr_filled <= lck->lls_nr);
- list_add_tail(&link->lll_list, &lsl->lss_parents);
- link->lll_idx = idx;
- link->lll_super = lck;
- cl_lock_get(parent);
- lu_ref_add(&parent->cll_reference, "lov-child", sublock);
- lck->lls_sub[idx].sub_flags |= LSF_HELD;
- cl_lock_user_add(env, sublock);
-
- rc = lov_sublock_modify(env, lck, lsl, &sublock->cll_descr, idx);
- LASSERT(rc == 0); /* there is no way this can fail, currently */
-}
-
-static struct cl_lock *lov_sublock_alloc(const struct lu_env *env,
- const struct cl_io *io,
- struct lov_lock *lck,
- int idx, struct lov_lock_link **out)
-{
- struct cl_lock *sublock;
- struct cl_lock *parent;
- struct lov_lock_link *link;
-
- LASSERT(idx < lck->lls_nr);
-
- OBD_SLAB_ALLOC_PTR_GFP(link, lov_lock_link_kmem, GFP_NOFS);
- if (link != NULL) {
- struct lov_sublock_env *subenv;
- struct lov_lock_sub *lls;
- struct cl_lock_descr *descr;
-
- parent = lck->lls_cl.cls_lock;
- lls = &lck->lls_sub[idx];
- descr = &lls->sub_got;
-
- subenv = lov_sublock_env_get(env, parent, lls);
- if (!IS_ERR(subenv)) {
- /* CAVEAT: Don't try to add a field in lov_lock_sub
- * to remember the subio. This is because lock is able
- * to be cached, but this is not true for IO. This
- * further means a sublock might be referenced in
- * different io context. -jay */
-
- sublock = cl_lock_hold(subenv->lse_env, subenv->lse_io,
- descr, "lov-parent", parent);
- lov_sublock_env_put(subenv);
- } else {
- /* error occurs. */
- sublock = (void *)subenv;
- }
-
- if (!IS_ERR(sublock))
- *out = link;
- else
- OBD_SLAB_FREE_PTR(link, lov_lock_link_kmem);
- } else
- sublock = ERR_PTR(-ENOMEM);
- return sublock;
-}
-
-static void lov_sublock_unlock(const struct lu_env *env,
- struct lovsub_lock *lsl,
- struct cl_lock_closure *closure,
- struct lov_sublock_env *subenv)
-{
- lov_sublock_env_put(subenv);
- lsl->lss_active = NULL;
- cl_lock_disclosure(env, closure);
-}
-
-static int lov_sublock_lock(const struct lu_env *env,
- struct lov_lock *lck,
- struct lov_lock_sub *lls,
- struct cl_lock_closure *closure,
- struct lov_sublock_env **lsep)
-{
- struct lovsub_lock *sublock;
- struct cl_lock *child;
- int result = 0;
-
- LASSERT(list_empty(&closure->clc_list));
-
- sublock = lls->sub_lock;
- child = sublock->lss_cl.cls_lock;
- result = cl_lock_closure_build(env, child, closure);
- if (result == 0) {
- struct cl_lock *parent = closure->clc_origin;
-
- LASSERT(cl_lock_is_mutexed(child));
- sublock->lss_active = parent;
-
- if (unlikely((child->cll_state == CLS_FREEING) ||
- (child->cll_flags & CLF_CANCELLED))) {
- struct lov_lock_link *link;
- /*
- * we could race with lock deletion which temporarily
- * put the lock in freeing state, bug 19080.
- */
- LASSERT(!(lls->sub_flags & LSF_HELD));
-
- link = lov_lock_link_find(env, lck, sublock);
- LASSERT(link != NULL);
- lov_lock_unlink(env, link, sublock);
- lov_sublock_unlock(env, sublock, closure, NULL);
- lck->lls_cancel_race = 1;
- result = CLO_REPEAT;
- } else if (lsep) {
- struct lov_sublock_env *subenv;
- subenv = lov_sublock_env_get(env, parent, lls);
- if (IS_ERR(subenv)) {
- lov_sublock_unlock(env, sublock,
- closure, NULL);
- result = PTR_ERR(subenv);
- } else {
- *lsep = subenv;
- }
- }
- }
- return result;
-}
-
-/**
- * Updates the result of a top-lock operation from a result of sub-lock
- * sub-operations. Top-operations like lov_lock_{enqueue,use,unuse}() iterate
- * over sub-locks and lov_subresult() is used to calculate return value of a
- * top-operation. To this end, possible return values of sub-operations are
- * ordered as
- *
- * - 0 success
- * - CLO_WAIT wait for event
- * - CLO_REPEAT repeat top-operation
- * - -ne fundamental error
- *
- * Top-level return code can only go down through this list. CLO_REPEAT
- * overwrites CLO_WAIT, because lock mutex was released and sleeping condition
- * has to be rechecked by the upper layer.
- */
-static int lov_subresult(int result, int rc)
-{
- int result_rank;
- int rc_rank;
-
- LASSERTF(result <= 0 || result == CLO_REPEAT || result == CLO_WAIT,
- "result = %d", result);
- LASSERTF(rc <= 0 || rc == CLO_REPEAT || rc == CLO_WAIT,
- "rc = %d\n", rc);
- CLASSERT(CLO_WAIT < CLO_REPEAT);
-
- /* calculate ranks in the ordering above */
- result_rank = result < 0 ? 1 + CLO_REPEAT : result;
- rc_rank = rc < 0 ? 1 + CLO_REPEAT : rc;
-
- if (result_rank < rc_rank)
- result = rc;
- return result;
-}
-
-/**
- * Creates sub-locks for a given lov_lock for the first time.
- *
- * Goes through all sub-objects of top-object, and creates sub-locks on every
- * sub-object intersecting with top-lock extent. This is complicated by the
- * fact that top-lock (that is being created) can be accessed concurrently
- * through already created sub-locks (possibly shared with other top-locks).
- */
-static int lov_lock_sub_init(const struct lu_env *env,
- struct lov_lock *lck, const struct cl_io *io)
-{
- int result = 0;
- int i;
- int nr;
- u64 start;
- u64 end;
- u64 file_start;
- u64 file_end;
-
- struct lov_object *loo = cl2lov(lck->lls_cl.cls_obj);
- struct lov_layout_raid0 *r0 = lov_r0(loo);
- struct cl_lock *parent = lck->lls_cl.cls_lock;
-
- lck->lls_orig = parent->cll_descr;
- file_start = cl_offset(lov2cl(loo), parent->cll_descr.cld_start);
- file_end = cl_offset(lov2cl(loo), parent->cll_descr.cld_end + 1) - 1;
-
- for (i = 0, nr = 0; i < r0->lo_nr; i++) {
- /*
- * XXX for wide striping smarter algorithm is desirable,
- * breaking out of the loop, early.
- */
- if (likely(r0->lo_sub[i] != NULL) &&
- lov_stripe_intersects(loo->lo_lsm, i,
- file_start, file_end, &start, &end))
- nr++;
- }
- LASSERT(nr > 0);
- lck->lls_sub = libcfs_kvzalloc(nr * sizeof(lck->lls_sub[0]), GFP_NOFS);
- if (lck->lls_sub == NULL)
- return -ENOMEM;
-
- lck->lls_nr = nr;
- /*
- * First, fill in sub-lock descriptions in
- * lck->lls_sub[].sub_descr. They are used by lov_sublock_alloc()
- * (called below in this function, and by lov_lock_enqueue()) to
- * create sub-locks. At this moment, no other thread can access
- * top-lock.
- */
- for (i = 0, nr = 0; i < r0->lo_nr; ++i) {
- if (likely(r0->lo_sub[i] != NULL) &&
- lov_stripe_intersects(loo->lo_lsm, i,
- file_start, file_end, &start, &end)) {
- struct cl_lock_descr *descr;
-
- descr = &lck->lls_sub[nr].sub_descr;
-
- LASSERT(descr->cld_obj == NULL);
- descr->cld_obj = lovsub2cl(r0->lo_sub[i]);
- descr->cld_start = cl_index(descr->cld_obj, start);
- descr->cld_end = cl_index(descr->cld_obj, end);
- descr->cld_mode = parent->cll_descr.cld_mode;
- descr->cld_gid = parent->cll_descr.cld_gid;
- descr->cld_enq_flags = parent->cll_descr.cld_enq_flags;
- /* XXX has no effect */
- lck->lls_sub[nr].sub_got = *descr;
- lck->lls_sub[nr].sub_stripe = i;
- nr++;
- }
- }
- LASSERT(nr == lck->lls_nr);
-
- /*
- * Some sub-locks can be missing at this point. This is not a problem,
- * because enqueue will create them anyway. Main duty of this function
- * is to fill in sub-lock descriptions in a race free manner.
- */
- return result;
-}
-
-static int lov_sublock_release(const struct lu_env *env, struct lov_lock *lck,
- int i, int deluser, int rc)
-{
- struct cl_lock *parent = lck->lls_cl.cls_lock;
-
- LASSERT(cl_lock_is_mutexed(parent));
-
- if (lck->lls_sub[i].sub_flags & LSF_HELD) {
- struct cl_lock *sublock;
- int dying;
-
- LASSERT(lck->lls_sub[i].sub_lock != NULL);
- sublock = lck->lls_sub[i].sub_lock->lss_cl.cls_lock;
- LASSERT(cl_lock_is_mutexed(sublock));
-
- lck->lls_sub[i].sub_flags &= ~LSF_HELD;
- if (deluser)
- cl_lock_user_del(env, sublock);
- /*
- * If the last hold is released, and cancellation is pending
- * for a sub-lock, release parent mutex, to avoid keeping it
- * while sub-lock is being paged out.
- */
- dying = (sublock->cll_descr.cld_mode == CLM_PHANTOM ||
- sublock->cll_descr.cld_mode == CLM_GROUP ||
- (sublock->cll_flags & (CLF_CANCELPEND|CLF_DOOMED))) &&
- sublock->cll_holds == 1;
- if (dying)
- cl_lock_mutex_put(env, parent);
- cl_lock_unhold(env, sublock, "lov-parent", parent);
- if (dying) {
- cl_lock_mutex_get(env, parent);
- rc = lov_subresult(rc, CLO_REPEAT);
- }
- /*
- * From now on lck->lls_sub[i].sub_lock is a "weak" pointer,
- * not backed by a reference on a
- * sub-lock. lovsub_lock_delete() will clear
- * lck->lls_sub[i].sub_lock under semaphores, just before
- * sub-lock is destroyed.
- */
- }
- return rc;
-}
-
-static void lov_sublock_hold(const struct lu_env *env, struct lov_lock *lck,
- int i)
-{
- struct cl_lock *parent = lck->lls_cl.cls_lock;
-
- LASSERT(cl_lock_is_mutexed(parent));
-
- if (!(lck->lls_sub[i].sub_flags & LSF_HELD)) {
- struct cl_lock *sublock;
-
- LASSERT(lck->lls_sub[i].sub_lock != NULL);
- sublock = lck->lls_sub[i].sub_lock->lss_cl.cls_lock;
- LASSERT(cl_lock_is_mutexed(sublock));
- LASSERT(sublock->cll_state != CLS_FREEING);
-
- lck->lls_sub[i].sub_flags |= LSF_HELD;
-
- cl_lock_get_trust(sublock);
- cl_lock_hold_add(env, sublock, "lov-parent", parent);
- cl_lock_user_add(env, sublock);
- cl_lock_put(env, sublock);
- }
-}
-
-static void lov_lock_fini(const struct lu_env *env,
- struct cl_lock_slice *slice)
-{
- struct lov_lock *lck;
- int i;
-
- lck = cl2lov_lock(slice);
- LASSERT(lck->lls_nr_filled == 0);
- if (lck->lls_sub != NULL) {
- for (i = 0; i < lck->lls_nr; ++i)
- /*
- * No sub-locks exists at this point, as sub-lock has
- * a reference on its parent.
- */
- LASSERT(lck->lls_sub[i].sub_lock == NULL);
- kvfree(lck->lls_sub);
- }
- OBD_SLAB_FREE_PTR(lck, lov_lock_kmem);
-}
-
-static int lov_lock_enqueue_wait(const struct lu_env *env,
- struct lov_lock *lck,
- struct cl_lock *sublock)
-{
- struct cl_lock *lock = lck->lls_cl.cls_lock;
- int result;
-
- LASSERT(cl_lock_is_mutexed(lock));
-
- cl_lock_mutex_put(env, lock);
- result = cl_lock_enqueue_wait(env, sublock, 0);
- cl_lock_mutex_get(env, lock);
- return result ?: CLO_REPEAT;
-}
-
-/**
- * Tries to advance a state machine of a given sub-lock toward enqueuing of
- * the top-lock.
- *
- * \retval 0 if state-transition can proceed
- * \retval -ve otherwise.
- */
-static int lov_lock_enqueue_one(const struct lu_env *env, struct lov_lock *lck,
- struct cl_lock *sublock,
- struct cl_io *io, __u32 enqflags, int last)
-{
- int result;
-
- /* first, try to enqueue a sub-lock ... */
- result = cl_enqueue_try(env, sublock, io, enqflags);
- if ((sublock->cll_state == CLS_ENQUEUED) && !(enqflags & CEF_AGL)) {
- /* if it is enqueued, try to `wait' on it---maybe it's already
- * granted */
- result = cl_wait_try(env, sublock);
- if (result == CLO_REENQUEUED)
- result = CLO_WAIT;
- }
- /*
- * If CEF_ASYNC flag is set, then all sub-locks can be enqueued in
- * parallel, otherwise---enqueue has to wait until sub-lock is granted
- * before proceeding to the next one.
- */
- if ((result == CLO_WAIT) && (sublock->cll_state <= CLS_HELD) &&
- (enqflags & CEF_ASYNC) && (!last || (enqflags & CEF_AGL)))
- result = 0;
- return result;
-}
-
-/**
- * Helper function for lov_lock_enqueue() that creates missing sub-lock.
- */
-static int lov_sublock_fill(const struct lu_env *env, struct cl_lock *parent,
- struct cl_io *io, struct lov_lock *lck, int idx)
-{
- struct lov_lock_link *link = NULL;
- struct cl_lock *sublock;
- int result;
-
- LASSERT(parent->cll_depth == 1);
- cl_lock_mutex_put(env, parent);
- sublock = lov_sublock_alloc(env, io, lck, idx, &link);
- if (!IS_ERR(sublock))
- cl_lock_mutex_get(env, sublock);
- cl_lock_mutex_get(env, parent);
-
- if (!IS_ERR(sublock)) {
- cl_lock_get_trust(sublock);
- if (parent->cll_state == CLS_QUEUING &&
- lck->lls_sub[idx].sub_lock == NULL) {
- lov_sublock_adopt(env, lck, sublock, idx, link);
- } else {
- OBD_SLAB_FREE_PTR(link, lov_lock_link_kmem);
- /* other thread allocated sub-lock, or enqueue is no
- * longer going on */
- cl_lock_mutex_put(env, parent);
- cl_lock_unhold(env, sublock, "lov-parent", parent);
- cl_lock_mutex_get(env, parent);
- }
- cl_lock_mutex_put(env, sublock);
- cl_lock_put(env, sublock);
- result = CLO_REPEAT;
- } else
- result = PTR_ERR(sublock);
- return result;
-}
-
-/**
- * Implementation of cl_lock_operations::clo_enqueue() for lov layer. This
- * function is rather subtle, as it enqueues top-lock (i.e., advances top-lock
- * state machine from CLS_QUEUING to CLS_ENQUEUED states) by juggling sub-lock
- * state machines in the face of sub-locks sharing (by multiple top-locks),
- * and concurrent sub-lock cancellations.
- */
-static int lov_lock_enqueue(const struct lu_env *env,
- const struct cl_lock_slice *slice,
- struct cl_io *io, __u32 enqflags)
-{
- struct cl_lock *lock = slice->cls_lock;
- struct lov_lock *lck = cl2lov_lock(slice);
- struct cl_lock_closure *closure = lov_closure_get(env, lock);
- int i;
- int result;
- enum cl_lock_state minstate;
-
- for (result = 0, minstate = CLS_FREEING, i = 0; i < lck->lls_nr; ++i) {
- int rc;
- struct lovsub_lock *sub;
- struct lov_lock_sub *lls;
- struct cl_lock *sublock;
- struct lov_sublock_env *subenv;
-
- if (lock->cll_state != CLS_QUEUING) {
- /*
- * Lock might have left QUEUING state if previous
- * iteration released its mutex. Stop enqueing in this
- * case and let the upper layer to decide what to do.
- */
- LASSERT(i > 0 && result != 0);
- break;
- }
-
- lls = &lck->lls_sub[i];
- sub = lls->sub_lock;
- /*
- * Sub-lock might have been canceled, while top-lock was
- * cached.
- */
- if (sub == NULL) {
- result = lov_sublock_fill(env, lock, io, lck, i);
- /* lov_sublock_fill() released @lock mutex,
- * restart. */
- break;
- }
- sublock = sub->lss_cl.cls_lock;
- rc = lov_sublock_lock(env, lck, lls, closure, &subenv);
- if (rc == 0) {
- lov_sublock_hold(env, lck, i);
- rc = lov_lock_enqueue_one(subenv->lse_env, lck, sublock,
- subenv->lse_io, enqflags,
- i == lck->lls_nr - 1);
- minstate = min(minstate, sublock->cll_state);
- if (rc == CLO_WAIT) {
- switch (sublock->cll_state) {
- case CLS_QUEUING:
- /* take recursive mutex, the lock is
- * released in lov_lock_enqueue_wait.
- */
- cl_lock_mutex_get(env, sublock);
- lov_sublock_unlock(env, sub, closure,
- subenv);
- rc = lov_lock_enqueue_wait(env, lck,
- sublock);
- break;
- case CLS_CACHED:
- cl_lock_get(sublock);
- /* take recursive mutex of sublock */
- cl_lock_mutex_get(env, sublock);
- /* need to release all locks in closure
- * otherwise it may deadlock. LU-2683.*/
- lov_sublock_unlock(env, sub, closure,
- subenv);
- /* sublock and parent are held. */
- rc = lov_sublock_release(env, lck, i,
- 1, rc);
- cl_lock_mutex_put(env, sublock);
- cl_lock_put(env, sublock);
- break;
- default:
- lov_sublock_unlock(env, sub, closure,
- subenv);
- break;
- }
- } else {
- LASSERT(sublock->cll_conflict == NULL);
- lov_sublock_unlock(env, sub, closure, subenv);
- }
- }
- result = lov_subresult(result, rc);
- if (result != 0)
- break;
- }
- cl_lock_closure_fini(closure);
- return result ?: minstate >= CLS_ENQUEUED ? 0 : CLO_WAIT;
-}
-
-static int lov_lock_unuse(const struct lu_env *env,
- const struct cl_lock_slice *slice)
-{
- struct lov_lock *lck = cl2lov_lock(slice);
- struct cl_lock_closure *closure = lov_closure_get(env, slice->cls_lock);
- int i;
- int result;
-
- for (result = 0, i = 0; i < lck->lls_nr; ++i) {
- int rc;
- struct lovsub_lock *sub;
- struct cl_lock *sublock;
- struct lov_lock_sub *lls;
- struct lov_sublock_env *subenv;
-
- /* top-lock state cannot change concurrently, because single
- * thread (one that released the last hold) carries unlocking
- * to the completion. */
- LASSERT(slice->cls_lock->cll_state == CLS_INTRANSIT);
- lls = &lck->lls_sub[i];
- sub = lls->sub_lock;
- if (sub == NULL)
- continue;
-
- sublock = sub->lss_cl.cls_lock;
- rc = lov_sublock_lock(env, lck, lls, closure, &subenv);
- if (rc == 0) {
- if (lls->sub_flags & LSF_HELD) {
- LASSERT(sublock->cll_state == CLS_HELD ||
- sublock->cll_state == CLS_ENQUEUED);
- rc = cl_unuse_try(subenv->lse_env, sublock);
- rc = lov_sublock_release(env, lck, i, 0, rc);
- }
- lov_sublock_unlock(env, sub, closure, subenv);
- }
- result = lov_subresult(result, rc);
- }
-
- if (result == 0 && lck->lls_cancel_race) {
- lck->lls_cancel_race = 0;
- result = -ESTALE;
- }
- cl_lock_closure_fini(closure);
- return result;
-}
-
-
-static void lov_lock_cancel(const struct lu_env *env,
- const struct cl_lock_slice *slice)
-{
- struct lov_lock *lck = cl2lov_lock(slice);
- struct cl_lock_closure *closure = lov_closure_get(env, slice->cls_lock);
- int i;
- int result;
-
- for (result = 0, i = 0; i < lck->lls_nr; ++i) {
- int rc;
- struct lovsub_lock *sub;
- struct cl_lock *sublock;
- struct lov_lock_sub *lls;
- struct lov_sublock_env *subenv;
-
- /* top-lock state cannot change concurrently, because single
- * thread (one that released the last hold) carries unlocking
- * to the completion. */
- lls = &lck->lls_sub[i];
- sub = lls->sub_lock;
- if (sub == NULL)
- continue;
-
- sublock = sub->lss_cl.cls_lock;
- rc = lov_sublock_lock(env, lck, lls, closure, &subenv);
- if (rc == 0) {
- if (!(lls->sub_flags & LSF_HELD)) {
- lov_sublock_unlock(env, sub, closure, subenv);
- continue;
- }
-
- switch (sublock->cll_state) {
- case CLS_HELD:
- rc = cl_unuse_try(subenv->lse_env, sublock);
- lov_sublock_release(env, lck, i, 0, 0);
- break;
- default:
- lov_sublock_release(env, lck, i, 1, 0);
- break;
- }
- lov_sublock_unlock(env, sub, closure, subenv);
- }
-
- if (rc == CLO_REPEAT) {
- --i;
- continue;
- }
-
- result = lov_subresult(result, rc);
- }
-
- if (result)
- CL_LOCK_DEBUG(D_ERROR, env, slice->cls_lock,
- "lov_lock_cancel fails with %d.\n", result);
-
- cl_lock_closure_fini(closure);
-}
-
-static int lov_lock_wait(const struct lu_env *env,
- const struct cl_lock_slice *slice)
-{
- struct lov_lock *lck = cl2lov_lock(slice);
- struct cl_lock_closure *closure = lov_closure_get(env, slice->cls_lock);
- enum cl_lock_state minstate;
- int reenqueued;
- int result;
- int i;
-
-again:
- for (result = 0, minstate = CLS_FREEING, i = 0, reenqueued = 0;
- i < lck->lls_nr; ++i) {
- int rc;
- struct lovsub_lock *sub;
- struct cl_lock *sublock;
- struct lov_lock_sub *lls;
- struct lov_sublock_env *subenv;
-
- lls = &lck->lls_sub[i];
- sub = lls->sub_lock;
- LASSERT(sub != NULL);
- sublock = sub->lss_cl.cls_lock;
- rc = lov_sublock_lock(env, lck, lls, closure, &subenv);
- if (rc == 0) {
- LASSERT(sublock->cll_state >= CLS_ENQUEUED);
- if (sublock->cll_state < CLS_HELD)
- rc = cl_wait_try(env, sublock);
-
- minstate = min(minstate, sublock->cll_state);
- lov_sublock_unlock(env, sub, closure, subenv);
- }
- if (rc == CLO_REENQUEUED) {
- reenqueued++;
- rc = 0;
- }
- result = lov_subresult(result, rc);
- if (result != 0)
- break;
- }
- /* Each sublock only can be reenqueued once, so will not loop for
- * ever. */
- if (result == 0 && reenqueued != 0)
- goto again;
- cl_lock_closure_fini(closure);
- return result ?: minstate >= CLS_HELD ? 0 : CLO_WAIT;
-}
-
-static int lov_lock_use(const struct lu_env *env,
- const struct cl_lock_slice *slice)
-{
- struct lov_lock *lck = cl2lov_lock(slice);
- struct cl_lock_closure *closure = lov_closure_get(env, slice->cls_lock);
- int result;
- int i;
-
- LASSERT(slice->cls_lock->cll_state == CLS_INTRANSIT);
-
- for (result = 0, i = 0; i < lck->lls_nr; ++i) {
- int rc;
- struct lovsub_lock *sub;
- struct cl_lock *sublock;
- struct lov_lock_sub *lls;
- struct lov_sublock_env *subenv;
-
- LASSERT(slice->cls_lock->cll_state == CLS_INTRANSIT);
-
- lls = &lck->lls_sub[i];
- sub = lls->sub_lock;
- if (sub == NULL) {
- /*
- * Sub-lock might have been canceled, while top-lock was
- * cached.
- */
- result = -ESTALE;
- break;
- }
-
- sublock = sub->lss_cl.cls_lock;
- rc = lov_sublock_lock(env, lck, lls, closure, &subenv);
- if (rc == 0) {
- LASSERT(sublock->cll_state != CLS_FREEING);
- lov_sublock_hold(env, lck, i);
- if (sublock->cll_state == CLS_CACHED) {
- rc = cl_use_try(subenv->lse_env, sublock, 0);
- if (rc != 0)
- rc = lov_sublock_release(env, lck,
- i, 1, rc);
- } else if (sublock->cll_state == CLS_NEW) {
- /* Sub-lock might have been canceled, while
- * top-lock was cached. */
- result = -ESTALE;
- lov_sublock_release(env, lck, i, 1, result);
- }
- lov_sublock_unlock(env, sub, closure, subenv);
- }
- result = lov_subresult(result, rc);
- if (result != 0)
- break;
- }
-
- if (lck->lls_cancel_race) {
- /*
- * If there is unlocking happened at the same time, then
- * sublock_lock state should be FREEING, and lov_sublock_lock
- * should return CLO_REPEAT. In this case, it should return
- * ESTALE, and up layer should reset the lock state to be NEW.
- */
- lck->lls_cancel_race = 0;
- LASSERT(result != 0);
- result = -ESTALE;
- }
- cl_lock_closure_fini(closure);
- return result;
-}
-
-#if 0
-static int lock_lock_multi_match()
-{
- struct cl_lock *lock = slice->cls_lock;
- struct cl_lock_descr *subneed = &lov_env_info(env)->lti_ldescr;
- struct lov_object *loo = cl2lov(lov->lls_cl.cls_obj);
- struct lov_layout_raid0 *r0 = lov_r0(loo);
- struct lov_lock_sub *sub;
- struct cl_object *subobj;
- u64 fstart;
- u64 fend;
- u64 start;
- u64 end;
- int i;
-
- fstart = cl_offset(need->cld_obj, need->cld_start);
- fend = cl_offset(need->cld_obj, need->cld_end + 1) - 1;
- subneed->cld_mode = need->cld_mode;
- cl_lock_mutex_get(env, lock);
- for (i = 0; i < lov->lls_nr; ++i) {
- sub = &lov->lls_sub[i];
- if (sub->sub_lock == NULL)
- continue;
- subobj = sub->sub_descr.cld_obj;
- if (!lov_stripe_intersects(loo->lo_lsm, sub->sub_stripe,
- fstart, fend, &start, &end))
- continue;
- subneed->cld_start = cl_index(subobj, start);
- subneed->cld_end = cl_index(subobj, end);
- subneed->cld_obj = subobj;
- if (!cl_lock_ext_match(&sub->sub_got, subneed)) {
- result = 0;
- break;
- }
- }
- cl_lock_mutex_put(env, lock);
-}
-#endif
-
-/**
- * Check if the extent region \a descr is covered by \a child against the
- * specific \a stripe.
- */
-static int lov_lock_stripe_is_matching(const struct lu_env *env,
- struct lov_object *lov, int stripe,
- const struct cl_lock_descr *child,
- const struct cl_lock_descr *descr)
-{
- struct lov_stripe_md *lsm = lov->lo_lsm;
- u64 start;
- u64 end;
- int result;
-
- if (lov_r0(lov)->lo_nr == 1)
- return cl_lock_ext_match(child, descr);
-
- /*
- * For a multi-stripes object:
- * - make sure the descr only covers child's stripe, and
- * - check if extent is matching.
- */
- start = cl_offset(&lov->lo_cl, descr->cld_start);
- end = cl_offset(&lov->lo_cl, descr->cld_end + 1) - 1;
- result = 0;
- /* glimpse should work on the object with LOV EA hole. */
- if (end - start <= lsm->lsm_stripe_size) {
- int idx;
-
- idx = lov_stripe_number(lsm, start);
- if (idx == stripe ||
- unlikely(lov_r0(lov)->lo_sub[idx] == NULL)) {
- idx = lov_stripe_number(lsm, end);
- if (idx == stripe ||
- unlikely(lov_r0(lov)->lo_sub[idx] == NULL))
- result = 1;
- }
- }
-
- if (result != 0) {
- struct cl_lock_descr *subd = &lov_env_info(env)->lti_ldescr;
- u64 sub_start;
- u64 sub_end;
-
- subd->cld_obj = NULL; /* don't need sub object at all */
- subd->cld_mode = descr->cld_mode;
- subd->cld_gid = descr->cld_gid;
- result = lov_stripe_intersects(lsm, stripe, start, end,
- &sub_start, &sub_end);
- LASSERT(result);
- subd->cld_start = cl_index(child->cld_obj, sub_start);
- subd->cld_end = cl_index(child->cld_obj, sub_end);
- result = cl_lock_ext_match(child, subd);
- }
- return result;
-}
-
-/**
- * An implementation of cl_lock_operations::clo_fits_into() method.
- *
- * Checks whether a lock (given by \a slice) is suitable for \a
- * io. Multi-stripe locks can be used only for "quick" io, like truncate, or
- * O_APPEND write.
- *
- * \see ccc_lock_fits_into().
- */
-static int lov_lock_fits_into(const struct lu_env *env,
- const struct cl_lock_slice *slice,
- const struct cl_lock_descr *need,
- const struct cl_io *io)
-{
- struct lov_lock *lov = cl2lov_lock(slice);
- struct lov_object *obj = cl2lov(slice->cls_obj);
- int result;
-
- LASSERT(cl_object_same(need->cld_obj, slice->cls_obj));
- LASSERT(lov->lls_nr > 0);
-
- /* for top lock, it's necessary to match enq flags otherwise it will
- * run into problem if a sublock is missing and reenqueue. */
- if (need->cld_enq_flags != lov->lls_orig.cld_enq_flags)
- return 0;
-
- if (need->cld_mode == CLM_GROUP)
- /*
- * always allow to match group lock.
- */
- result = cl_lock_ext_match(&lov->lls_orig, need);
- else if (lov->lls_nr == 1) {
- struct cl_lock_descr *got = &lov->lls_sub[0].sub_got;
- result = lov_lock_stripe_is_matching(env,
- cl2lov(slice->cls_obj),
- lov->lls_sub[0].sub_stripe,
- got, need);
- } else if (io->ci_type != CIT_SETATTR && io->ci_type != CIT_MISC &&
- !cl_io_is_append(io) && need->cld_mode != CLM_PHANTOM)
- /*
- * Multi-stripe locks are only suitable for `quick' IO and for
- * glimpse.
- */
- result = 0;
- else
- /*
- * Most general case: multi-stripe existing lock, and
- * (potentially) multi-stripe @need lock. Check that @need is
- * covered by @lov's sub-locks.
- *
- * For now, ignore lock expansions made by the server, and
- * match against original lock extent.
- */
- result = cl_lock_ext_match(&lov->lls_orig, need);
- CDEBUG(D_DLMTRACE, DDESCR"/"DDESCR" %d %d/%d: %d\n",
- PDESCR(&lov->lls_orig), PDESCR(&lov->lls_sub[0].sub_got),
- lov->lls_sub[0].sub_stripe, lov->lls_nr, lov_r0(obj)->lo_nr,
- result);
- return result;
-}
-
-void lov_lock_unlink(const struct lu_env *env,
- struct lov_lock_link *link, struct lovsub_lock *sub)
-{
- struct lov_lock *lck = link->lll_super;
- struct cl_lock *parent = lck->lls_cl.cls_lock;
-
- LASSERT(cl_lock_is_mutexed(parent));
- LASSERT(cl_lock_is_mutexed(sub->lss_cl.cls_lock));
-
- list_del_init(&link->lll_list);
- LASSERT(lck->lls_sub[link->lll_idx].sub_lock == sub);
- /* yank this sub-lock from parent's array */
- lck->lls_sub[link->lll_idx].sub_lock = NULL;
- LASSERT(lck->lls_nr_filled > 0);
- lck->lls_nr_filled--;
- lu_ref_del(&parent->cll_reference, "lov-child", sub->lss_cl.cls_lock);
- cl_lock_put(env, parent);
- OBD_SLAB_FREE_PTR(link, lov_lock_link_kmem);
-}
-
-struct lov_lock_link *lov_lock_link_find(const struct lu_env *env,
- struct lov_lock *lck,
- struct lovsub_lock *sub)
-{
- struct lov_lock_link *scan;
-
- LASSERT(cl_lock_is_mutexed(sub->lss_cl.cls_lock));
-
- list_for_each_entry(scan, &sub->lss_parents, lll_list) {
- if (scan->lll_super == lck)
- return scan;
- }
- return NULL;
-}
-
-/**
- * An implementation of cl_lock_operations::clo_delete() method. This is
- * invoked for "top-to-bottom" delete, when lock destruction starts from the
- * top-lock, e.g., as a result of inode destruction.
- *
- * Unlinks top-lock from all its sub-locks. Sub-locks are not deleted there:
- * this is done separately elsewhere:
- *
- * - for inode destruction, lov_object_delete() calls cl_object_kill() for
- * each sub-object, purging its locks;
- *
- * - in other cases (e.g., a fatal error with a top-lock) sub-locks are
- * left in the cache.
- */
-static void lov_lock_delete(const struct lu_env *env,
- const struct cl_lock_slice *slice)
-{
- struct lov_lock *lck = cl2lov_lock(slice);
- struct cl_lock_closure *closure = lov_closure_get(env, slice->cls_lock);
- struct lov_lock_link *link;
- int rc;
- int i;
-
- LASSERT(slice->cls_lock->cll_state == CLS_FREEING);
-
- for (i = 0; i < lck->lls_nr; ++i) {
- struct lov_lock_sub *lls = &lck->lls_sub[i];
- struct lovsub_lock *lsl = lls->sub_lock;
-
- if (lsl == NULL) /* already removed */
- continue;
-
- rc = lov_sublock_lock(env, lck, lls, closure, NULL);
- if (rc == CLO_REPEAT) {
- --i;
- continue;
- }
-
- LASSERT(rc == 0);
- LASSERT(lsl->lss_cl.cls_lock->cll_state < CLS_FREEING);
-
- if (lls->sub_flags & LSF_HELD)
- lov_sublock_release(env, lck, i, 1, 0);
-
- link = lov_lock_link_find(env, lck, lsl);
- LASSERT(link != NULL);
- lov_lock_unlink(env, link, lsl);
- LASSERT(lck->lls_sub[i].sub_lock == NULL);
-
- lov_sublock_unlock(env, lsl, closure, NULL);
- }
-
- cl_lock_closure_fini(closure);
-}
-
-static int lov_lock_print(const struct lu_env *env, void *cookie,
- lu_printer_t p, const struct cl_lock_slice *slice)
-{
- struct lov_lock *lck = cl2lov_lock(slice);
- int i;
-
- (*p)(env, cookie, "%d\n", lck->lls_nr);
- for (i = 0; i < lck->lls_nr; ++i) {
- struct lov_lock_sub *sub;
-
- sub = &lck->lls_sub[i];
- (*p)(env, cookie, " %d %x: ", i, sub->sub_flags);
- if (sub->sub_lock != NULL)
- cl_lock_print(env, cookie, p,
- sub->sub_lock->lss_cl.cls_lock);
- else
- (*p)(env, cookie, "---\n");
- }
- return 0;
-}
-
-static const struct cl_lock_operations lov_lock_ops = {
- .clo_fini = lov_lock_fini,
- .clo_enqueue = lov_lock_enqueue,
- .clo_wait = lov_lock_wait,
- .clo_use = lov_lock_use,
- .clo_unuse = lov_lock_unuse,
- .clo_cancel = lov_lock_cancel,
- .clo_fits_into = lov_lock_fits_into,
- .clo_delete = lov_lock_delete,
- .clo_print = lov_lock_print
-};
-
-int lov_lock_init_raid0(const struct lu_env *env, struct cl_object *obj,
- struct cl_lock *lock, const struct cl_io *io)
-{
- struct lov_lock *lck;
- int result;
-
- OBD_SLAB_ALLOC_PTR_GFP(lck, lov_lock_kmem, GFP_NOFS);
- if (lck != NULL) {
- cl_lock_slice_add(lock, &lck->lls_cl, obj, &lov_lock_ops);
- result = lov_lock_sub_init(env, lck, io);
- } else
- result = -ENOMEM;
- return result;
-}
-
-static void lov_empty_lock_fini(const struct lu_env *env,
- struct cl_lock_slice *slice)
-{
- struct lov_lock *lck = cl2lov_lock(slice);
- OBD_SLAB_FREE_PTR(lck, lov_lock_kmem);
-}
-
-static int lov_empty_lock_print(const struct lu_env *env, void *cookie,
- lu_printer_t p, const struct cl_lock_slice *slice)
-{
- (*p)(env, cookie, "empty\n");
- return 0;
-}
-
-/* XXX: more methods will be added later. */
-static const struct cl_lock_operations lov_empty_lock_ops = {
- .clo_fini = lov_empty_lock_fini,
- .clo_print = lov_empty_lock_print
-};
-
-int lov_lock_init_empty(const struct lu_env *env, struct cl_object *obj,
- struct cl_lock *lock, const struct cl_io *io)
-{
- struct lov_lock *lck;
- int result = -ENOMEM;
-
- OBD_SLAB_ALLOC_PTR_GFP(lck, lov_lock_kmem, GFP_NOFS);
- if (lck != NULL) {
- cl_lock_slice_add(lock, &lck->lls_cl, obj, &lov_empty_lock_ops);
- lck->lls_orig = lock->cll_descr;
- result = 0;
- }
- return result;
-}
-
-static struct cl_lock_closure *lov_closure_get(const struct lu_env *env,
- struct cl_lock *parent)
-{
- struct cl_lock_closure *closure;
-
- closure = &lov_env_info(env)->lti_closure;
- LASSERT(list_empty(&closure->clc_list));
- cl_lock_closure_init(env, closure, parent, 1);
- return closure;
-}
-
-
-/** @} lov */
diff --git a/drivers/staging/lustre/lustre/lov/lov_merge.c b/drivers/staging/lustre/lustre/lov/lov_merge.c
deleted file mode 100644
index dd1cf3d2d039..000000000000
--- a/drivers/staging/lustre/lustre/lov/lov_merge.c
+++ /dev/null
@@ -1,187 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- */
-
-#define DEBUG_SUBSYSTEM S_LOV
-
-#include "../../include/linux/libcfs/libcfs.h"
-
-#include "../include/obd_class.h"
-#include "lov_internal.h"
-
-/** Merge the lock value block(&lvb) attributes and KMS from each of the
- * stripes in a file into a single lvb. It is expected that the caller
- * initializes the current atime, mtime, ctime to avoid regressing a more
- * uptodate time on the local client.
- */
-int lov_merge_lvb_kms(struct lov_stripe_md *lsm,
- struct ost_lvb *lvb, __u64 *kms_place)
-{
- __u64 size = 0;
- __u64 kms = 0;
- __u64 blocks = 0;
- s64 current_mtime = lvb->lvb_mtime;
- s64 current_atime = lvb->lvb_atime;
- s64 current_ctime = lvb->lvb_ctime;
- int i;
- int rc = 0;
-
- assert_spin_locked(&lsm->lsm_lock);
- LASSERT(lsm->lsm_lock_owner == current_pid());
-
- CDEBUG(D_INODE, "MDT ID "DOSTID" initial value: s=%llu m=%llu a=%llu c=%llu b=%llu\n",
- POSTID(&lsm->lsm_oi), lvb->lvb_size, lvb->lvb_mtime,
- lvb->lvb_atime, lvb->lvb_ctime, lvb->lvb_blocks);
- for (i = 0; i < lsm->lsm_stripe_count; i++) {
- struct lov_oinfo *loi = lsm->lsm_oinfo[i];
- u64 lov_size, tmpsize;
-
- if (OST_LVB_IS_ERR(loi->loi_lvb.lvb_blocks)) {
- rc = OST_LVB_GET_ERR(loi->loi_lvb.lvb_blocks);
- continue;
- }
-
- tmpsize = loi->loi_kms;
- lov_size = lov_stripe_size(lsm, tmpsize, i);
- if (lov_size > kms)
- kms = lov_size;
-
- if (loi->loi_lvb.lvb_size > tmpsize)
- tmpsize = loi->loi_lvb.lvb_size;
-
- lov_size = lov_stripe_size(lsm, tmpsize, i);
- if (lov_size > size)
- size = lov_size;
- /* merge blocks, mtime, atime */
- blocks += loi->loi_lvb.lvb_blocks;
- if (loi->loi_lvb.lvb_mtime > current_mtime)
- current_mtime = loi->loi_lvb.lvb_mtime;
- if (loi->loi_lvb.lvb_atime > current_atime)
- current_atime = loi->loi_lvb.lvb_atime;
- if (loi->loi_lvb.lvb_ctime > current_ctime)
- current_ctime = loi->loi_lvb.lvb_ctime;
-
- CDEBUG(D_INODE, "MDT ID "DOSTID" on OST[%u]: s=%llu m=%llu a=%llu c=%llu b=%llu\n",
- POSTID(&lsm->lsm_oi), loi->loi_ost_idx,
- loi->loi_lvb.lvb_size, loi->loi_lvb.lvb_mtime,
- loi->loi_lvb.lvb_atime, loi->loi_lvb.lvb_ctime,
- loi->loi_lvb.lvb_blocks);
- }
-
- *kms_place = kms;
- lvb->lvb_size = size;
- lvb->lvb_blocks = blocks;
- lvb->lvb_mtime = current_mtime;
- lvb->lvb_atime = current_atime;
- lvb->lvb_ctime = current_ctime;
- return rc;
-}
-
-/* Must be called under the lov_stripe_lock() */
-int lov_adjust_kms(struct obd_export *exp, struct lov_stripe_md *lsm,
- u64 size, int shrink)
-{
- struct lov_oinfo *loi;
- int stripe = 0;
- __u64 kms;
-
- assert_spin_locked(&lsm->lsm_lock);
- LASSERT(lsm->lsm_lock_owner == current_pid());
-
- if (shrink) {
- for (; stripe < lsm->lsm_stripe_count; stripe++) {
- struct lov_oinfo *loi = lsm->lsm_oinfo[stripe];
-
- kms = lov_size_to_stripe(lsm, size, stripe);
- CDEBUG(D_INODE,
- "stripe %d KMS %sing %llu->%llu\n",
- stripe, kms > loi->loi_kms ? "increase":"shrink",
- loi->loi_kms, kms);
- loi_kms_set(loi, loi->loi_lvb.lvb_size = kms);
- }
- return 0;
- }
-
- if (size > 0)
- stripe = lov_stripe_number(lsm, size - 1);
- kms = lov_size_to_stripe(lsm, size, stripe);
- loi = lsm->lsm_oinfo[stripe];
-
- CDEBUG(D_INODE, "stripe %d KMS %sincreasing %llu->%llu\n",
- stripe, kms > loi->loi_kms ? "" : "not ", loi->loi_kms, kms);
- if (kms > loi->loi_kms)
- loi_kms_set(loi, kms);
-
- return 0;
-}
-
-void lov_merge_attrs(struct obdo *tgt, struct obdo *src, u64 valid,
- struct lov_stripe_md *lsm, int stripeno, int *set)
-{
- valid &= src->o_valid;
-
- if (*set) {
- if (valid & OBD_MD_FLSIZE) {
- /* this handles sparse files properly */
- u64 lov_size;
-
- lov_size = lov_stripe_size(lsm, src->o_size, stripeno);
- if (lov_size > tgt->o_size)
- tgt->o_size = lov_size;
- }
- if (valid & OBD_MD_FLBLOCKS)
- tgt->o_blocks += src->o_blocks;
- if (valid & OBD_MD_FLBLKSZ)
- tgt->o_blksize += src->o_blksize;
- if (valid & OBD_MD_FLCTIME && tgt->o_ctime < src->o_ctime)
- tgt->o_ctime = src->o_ctime;
- if (valid & OBD_MD_FLMTIME && tgt->o_mtime < src->o_mtime)
- tgt->o_mtime = src->o_mtime;
- if (valid & OBD_MD_FLDATAVERSION)
- tgt->o_data_version += src->o_data_version;
- } else {
- memcpy(tgt, src, sizeof(*tgt));
- tgt->o_oi = lsm->lsm_oi;
- if (valid & OBD_MD_FLSIZE)
- tgt->o_size = lov_stripe_size(lsm, src->o_size,
- stripeno);
- }
-
- /* data_version needs to be valid on all stripes to be correct! */
- if (!(valid & OBD_MD_FLDATAVERSION))
- tgt->o_valid &= ~OBD_MD_FLDATAVERSION;
-
- *set += 1;
-}
diff --git a/drivers/staging/lustre/lustre/lov/lov_obd.c b/drivers/staging/lustre/lustre/lov/lov_obd.c
deleted file mode 100644
index 5c69ce9b9402..000000000000
--- a/drivers/staging/lustre/lustre/lov/lov_obd.c
+++ /dev/null
@@ -1,2361 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * lustre/lov/lov_obd.c
- *
- * Author: Phil Schwan <phil@clusterfs.com>
- * Author: Peter Braam <braam@clusterfs.com>
- * Author: Mike Shaver <shaver@clusterfs.com>
- * Author: Nathan Rutman <nathan@clusterfs.com>
- */
-
-#define DEBUG_SUBSYSTEM S_LOV
-#include "../../include/linux/libcfs/libcfs.h"
-
-#include "../include/obd_support.h"
-#include "../include/lustre_lib.h"
-#include "../include/lustre_net.h"
-#include "../include/lustre/lustre_idl.h"
-#include "../include/lustre_dlm.h"
-#include "../include/lustre_mds.h"
-#include "../include/obd_class.h"
-#include "../include/lprocfs_status.h"
-#include "../include/lustre_param.h"
-#include "../include/cl_object.h"
-#include "../include/lclient.h" /* for cl_client_lru */
-#include "../include/lustre/ll_fiemap.h"
-#include "../include/lustre_fid.h"
-
-#include "lov_internal.h"
-
-/* Keep a refcount of lov->tgt usage to prevent racing with addition/deletion.
- Any function that expects lov_tgts to remain stationary must take a ref. */
-static void lov_getref(struct obd_device *obd)
-{
- struct lov_obd *lov = &obd->u.lov;
-
- /* nobody gets through here until lov_putref is done */
- mutex_lock(&lov->lov_lock);
- atomic_inc(&lov->lov_refcount);
- mutex_unlock(&lov->lov_lock);
- return;
-}
-
-static void __lov_del_obd(struct obd_device *obd, struct lov_tgt_desc *tgt);
-
-static void lov_putref(struct obd_device *obd)
-{
- struct lov_obd *lov = &obd->u.lov;
-
- mutex_lock(&lov->lov_lock);
- /* ok to dec to 0 more than once -- ltd_exp's will be null */
- if (atomic_dec_and_test(&lov->lov_refcount) && lov->lov_death_row) {
- LIST_HEAD(kill);
- int i;
- struct lov_tgt_desc *tgt, *n;
- CDEBUG(D_CONFIG, "destroying %d lov targets\n",
- lov->lov_death_row);
- for (i = 0; i < lov->desc.ld_tgt_count; i++) {
- tgt = lov->lov_tgts[i];
-
- if (!tgt || !tgt->ltd_reap)
- continue;
- list_add(&tgt->ltd_kill, &kill);
- /* XXX - right now there is a dependency on ld_tgt_count
- * being the maximum tgt index for computing the
- * mds_max_easize. So we can't shrink it. */
- lov_ost_pool_remove(&lov->lov_packed, i);
- lov->lov_tgts[i] = NULL;
- lov->lov_death_row--;
- }
- mutex_unlock(&lov->lov_lock);
-
- list_for_each_entry_safe(tgt, n, &kill, ltd_kill) {
- list_del(&tgt->ltd_kill);
- /* Disconnect */
- __lov_del_obd(obd, tgt);
- }
-
- if (lov->lov_tgts_kobj)
- kobject_put(lov->lov_tgts_kobj);
-
- } else {
- mutex_unlock(&lov->lov_lock);
- }
-}
-
-static int lov_set_osc_active(struct obd_device *obd, struct obd_uuid *uuid,
- enum obd_notify_event ev);
-static int lov_notify(struct obd_device *obd, struct obd_device *watched,
- enum obd_notify_event ev, void *data);
-
-
-#define MAX_STRING_SIZE 128
-int lov_connect_obd(struct obd_device *obd, __u32 index, int activate,
- struct obd_connect_data *data)
-{
- struct lov_obd *lov = &obd->u.lov;
- struct obd_uuid *tgt_uuid;
- struct obd_device *tgt_obd;
- static struct obd_uuid lov_osc_uuid = { "LOV_OSC_UUID" };
- struct obd_import *imp;
- int rc;
-
- if (!lov->lov_tgts[index])
- return -EINVAL;
-
- tgt_uuid = &lov->lov_tgts[index]->ltd_uuid;
- tgt_obd = lov->lov_tgts[index]->ltd_obd;
-
- if (!tgt_obd->obd_set_up) {
- CERROR("Target %s not set up\n", obd_uuid2str(tgt_uuid));
- return -EINVAL;
- }
-
- /* override the sp_me from lov */
- tgt_obd->u.cli.cl_sp_me = lov->lov_sp_me;
-
- if (data && (data->ocd_connect_flags & OBD_CONNECT_INDEX))
- data->ocd_index = index;
-
- /*
- * Divine LOV knows that OBDs under it are OSCs.
- */
- imp = tgt_obd->u.cli.cl_import;
-
- if (activate) {
- tgt_obd->obd_no_recov = 0;
- /* FIXME this is probably supposed to be
- ptlrpc_set_import_active. Horrible naming. */
- ptlrpc_activate_import(imp);
- }
-
- rc = obd_register_observer(tgt_obd, obd);
- if (rc) {
- CERROR("Target %s register_observer error %d\n",
- obd_uuid2str(tgt_uuid), rc);
- return rc;
- }
-
-
- if (imp->imp_invalid) {
- CDEBUG(D_CONFIG, "not connecting OSC %s; administratively disabled\n",
- obd_uuid2str(tgt_uuid));
- return 0;
- }
-
- rc = obd_connect(NULL, &lov->lov_tgts[index]->ltd_exp, tgt_obd,
- &lov_osc_uuid, data, NULL);
- if (rc || !lov->lov_tgts[index]->ltd_exp) {
- CERROR("Target %s connect error %d\n",
- obd_uuid2str(tgt_uuid), rc);
- return -ENODEV;
- }
-
- lov->lov_tgts[index]->ltd_reap = 0;
-
- CDEBUG(D_CONFIG, "Connected tgt idx %d %s (%s) %sactive\n", index,
- obd_uuid2str(tgt_uuid), tgt_obd->obd_name, activate ? "":"in");
-
- if (lov->lov_tgts_kobj)
- /* Even if we failed, that's ok */
- rc = sysfs_create_link(lov->lov_tgts_kobj, &tgt_obd->obd_kobj,
- tgt_obd->obd_name);
-
- return 0;
-}
-
-static int lov_connect(const struct lu_env *env,
- struct obd_export **exp, struct obd_device *obd,
- struct obd_uuid *cluuid, struct obd_connect_data *data,
- void *localdata)
-{
- struct lov_obd *lov = &obd->u.lov;
- struct lov_tgt_desc *tgt;
- struct lustre_handle conn;
- int i, rc;
-
- CDEBUG(D_CONFIG, "connect #%d\n", lov->lov_connects);
-
- rc = class_connect(&conn, obd, cluuid);
- if (rc)
- return rc;
-
- *exp = class_conn2export(&conn);
-
- /* Why should there ever be more than 1 connect? */
- lov->lov_connects++;
- LASSERT(lov->lov_connects == 1);
-
- memset(&lov->lov_ocd, 0, sizeof(lov->lov_ocd));
- if (data)
- lov->lov_ocd = *data;
-
- obd_getref(obd);
-
- lov->lov_tgts_kobj = kobject_create_and_add("target_obds",
- &obd->obd_kobj);
-
- for (i = 0; i < lov->desc.ld_tgt_count; i++) {
- tgt = lov->lov_tgts[i];
- if (!tgt || obd_uuid_empty(&tgt->ltd_uuid))
- continue;
- /* Flags will be lowest common denominator */
- rc = lov_connect_obd(obd, i, tgt->ltd_activate, &lov->lov_ocd);
- if (rc) {
- CERROR("%s: lov connect tgt %d failed: %d\n",
- obd->obd_name, i, rc);
- continue;
- }
- /* connect to administrative disabled ost */
- if (!lov->lov_tgts[i]->ltd_exp)
- continue;
-
- rc = lov_notify(obd, lov->lov_tgts[i]->ltd_exp->exp_obd,
- OBD_NOTIFY_CONNECT, (void *)&i);
- if (rc) {
- CERROR("%s error sending notify %d\n",
- obd->obd_name, rc);
- }
- }
- obd_putref(obd);
-
- return 0;
-}
-
-static int lov_disconnect_obd(struct obd_device *obd, struct lov_tgt_desc *tgt)
-{
- struct lov_obd *lov = &obd->u.lov;
- struct obd_device *osc_obd;
- int rc;
-
- osc_obd = class_exp2obd(tgt->ltd_exp);
- CDEBUG(D_CONFIG, "%s: disconnecting target %s\n",
- obd->obd_name, osc_obd ? osc_obd->obd_name : "NULL");
-
- if (tgt->ltd_active) {
- tgt->ltd_active = 0;
- lov->desc.ld_active_tgt_count--;
- tgt->ltd_exp->exp_obd->obd_inactive = 1;
- }
-
- if (osc_obd) {
- if (lov->lov_tgts_kobj)
- sysfs_remove_link(lov->lov_tgts_kobj,
- osc_obd->obd_name);
-
- /* Pass it on to our clients.
- * XXX This should be an argument to disconnect,
- * XXX not a back-door flag on the OBD. Ah well.
- */
- osc_obd->obd_force = obd->obd_force;
- osc_obd->obd_fail = obd->obd_fail;
- osc_obd->obd_no_recov = obd->obd_no_recov;
- }
-
- obd_register_observer(osc_obd, NULL);
-
- rc = obd_disconnect(tgt->ltd_exp);
- if (rc) {
- CERROR("Target %s disconnect error %d\n",
- tgt->ltd_uuid.uuid, rc);
- rc = 0;
- }
-
- tgt->ltd_exp = NULL;
- return 0;
-}
-
-static int lov_disconnect(struct obd_export *exp)
-{
- struct obd_device *obd = class_exp2obd(exp);
- struct lov_obd *lov = &obd->u.lov;
- int i, rc;
-
- if (!lov->lov_tgts)
- goto out;
-
- /* Only disconnect the underlying layers on the final disconnect. */
- lov->lov_connects--;
- if (lov->lov_connects != 0) {
- /* why should there be more than 1 connect? */
- CERROR("disconnect #%d\n", lov->lov_connects);
- goto out;
- }
-
- /* Let's hold another reference so lov_del_obd doesn't spin through
- putref every time */
- obd_getref(obd);
-
- for (i = 0; i < lov->desc.ld_tgt_count; i++) {
- if (lov->lov_tgts[i] && lov->lov_tgts[i]->ltd_exp) {
- /* Disconnection is the last we know about an obd */
- lov_del_target(obd, i, NULL, lov->lov_tgts[i]->ltd_gen);
- }
- }
-
- obd_putref(obd);
-
-out:
- rc = class_disconnect(exp); /* bz 9811 */
- return rc;
-}
-
-/* Error codes:
- *
- * -EINVAL : UUID can't be found in the LOV's target list
- * -ENOTCONN: The UUID is found, but the target connection is bad (!)
- * -EBADF : The UUID is found, but the OBD is the wrong type (!)
- * any >= 0 : is log target index
- */
-static int lov_set_osc_active(struct obd_device *obd, struct obd_uuid *uuid,
- enum obd_notify_event ev)
-{
- struct lov_obd *lov = &obd->u.lov;
- struct lov_tgt_desc *tgt;
- int index, activate, active;
-
- CDEBUG(D_INFO, "Searching in lov %p for uuid %s event(%d)\n",
- lov, uuid->uuid, ev);
-
- obd_getref(obd);
- for (index = 0; index < lov->desc.ld_tgt_count; index++) {
- tgt = lov->lov_tgts[index];
- if (!tgt)
- continue;
- /*
- * LU-642, initially inactive OSC could miss the obd_connect,
- * we make up for it here.
- */
- if (ev == OBD_NOTIFY_ACTIVATE && tgt->ltd_exp == NULL &&
- obd_uuid_equals(uuid, &tgt->ltd_uuid)) {
- struct obd_uuid lov_osc_uuid = {"LOV_OSC_UUID"};
-
- obd_connect(NULL, &tgt->ltd_exp, tgt->ltd_obd,
- &lov_osc_uuid, &lov->lov_ocd, NULL);
- }
- if (!tgt->ltd_exp)
- continue;
-
- CDEBUG(D_INFO, "lov idx %d is %s conn %#llx\n",
- index, obd_uuid2str(&tgt->ltd_uuid),
- tgt->ltd_exp->exp_handle.h_cookie);
- if (obd_uuid_equals(uuid, &tgt->ltd_uuid))
- break;
- }
-
- if (index == lov->desc.ld_tgt_count) {
- index = -EINVAL;
- goto out;
- }
-
- if (ev == OBD_NOTIFY_DEACTIVATE || ev == OBD_NOTIFY_ACTIVATE) {
- activate = (ev == OBD_NOTIFY_ACTIVATE) ? 1 : 0;
-
- if (lov->lov_tgts[index]->ltd_activate == activate) {
- CDEBUG(D_INFO, "OSC %s already %sactivate!\n",
- uuid->uuid, activate ? "" : "de");
- } else {
- lov->lov_tgts[index]->ltd_activate = activate;
- CDEBUG(D_CONFIG, "%sactivate OSC %s\n",
- activate ? "" : "de", obd_uuid2str(uuid));
- }
-
- } else if (ev == OBD_NOTIFY_INACTIVE || ev == OBD_NOTIFY_ACTIVE) {
- active = (ev == OBD_NOTIFY_ACTIVE) ? 1 : 0;
-
- if (lov->lov_tgts[index]->ltd_active == active) {
- CDEBUG(D_INFO, "OSC %s already %sactive!\n",
- uuid->uuid, active ? "" : "in");
- goto out;
- } else {
- CDEBUG(D_CONFIG, "Marking OSC %s %sactive\n",
- obd_uuid2str(uuid), active ? "" : "in");
- }
-
- lov->lov_tgts[index]->ltd_active = active;
- if (active) {
- lov->desc.ld_active_tgt_count++;
- lov->lov_tgts[index]->ltd_exp->exp_obd->obd_inactive = 0;
- } else {
- lov->desc.ld_active_tgt_count--;
- lov->lov_tgts[index]->ltd_exp->exp_obd->obd_inactive = 1;
- }
- } else {
- CERROR("Unknown event(%d) for uuid %s", ev, uuid->uuid);
- }
-
- out:
- obd_putref(obd);
- return index;
-}
-
-static int lov_notify(struct obd_device *obd, struct obd_device *watched,
- enum obd_notify_event ev, void *data)
-{
- int rc = 0;
- struct lov_obd *lov = &obd->u.lov;
-
- down_read(&lov->lov_notify_lock);
- if (!lov->lov_connects) {
- up_read(&lov->lov_notify_lock);
- return rc;
- }
-
- if (ev == OBD_NOTIFY_ACTIVE || ev == OBD_NOTIFY_INACTIVE ||
- ev == OBD_NOTIFY_ACTIVATE || ev == OBD_NOTIFY_DEACTIVATE) {
- struct obd_uuid *uuid;
-
- LASSERT(watched);
-
- if (strcmp(watched->obd_type->typ_name, LUSTRE_OSC_NAME)) {
- up_read(&lov->lov_notify_lock);
- CERROR("unexpected notification of %s %s!\n",
- watched->obd_type->typ_name,
- watched->obd_name);
- return -EINVAL;
- }
- uuid = &watched->u.cli.cl_target_uuid;
-
- /* Set OSC as active before notifying the observer, so the
- * observer can use the OSC normally.
- */
- rc = lov_set_osc_active(obd, uuid, ev);
- if (rc < 0) {
- up_read(&lov->lov_notify_lock);
- CERROR("event(%d) of %s failed: %d\n", ev,
- obd_uuid2str(uuid), rc);
- return rc;
- }
- /* active event should be pass lov target index as data */
- data = &rc;
- }
-
- /* Pass the notification up the chain. */
- if (watched) {
- rc = obd_notify_observer(obd, watched, ev, data);
- } else {
- /* NULL watched means all osc's in the lov (only for syncs) */
- /* sync event should be send lov idx as data */
- struct lov_obd *lov = &obd->u.lov;
- int i, is_sync;
-
- data = &i;
- is_sync = (ev == OBD_NOTIFY_SYNC) ||
- (ev == OBD_NOTIFY_SYNC_NONBLOCK);
-
- obd_getref(obd);
- for (i = 0; i < lov->desc.ld_tgt_count; i++) {
- if (!lov->lov_tgts[i])
- continue;
-
- /* don't send sync event if target not
- * connected/activated */
- if (is_sync && !lov->lov_tgts[i]->ltd_active)
- continue;
-
- rc = obd_notify_observer(obd, lov->lov_tgts[i]->ltd_obd,
- ev, data);
- if (rc) {
- CERROR("%s: notify %s of %s failed %d\n",
- obd->obd_name,
- obd->obd_observer->obd_name,
- lov->lov_tgts[i]->ltd_obd->obd_name,
- rc);
- }
- }
- obd_putref(obd);
- }
-
- up_read(&lov->lov_notify_lock);
- return rc;
-}
-
-static int lov_add_target(struct obd_device *obd, struct obd_uuid *uuidp,
- __u32 index, int gen, int active)
-{
- struct lov_obd *lov = &obd->u.lov;
- struct lov_tgt_desc *tgt;
- struct obd_device *tgt_obd;
- int rc;
-
- CDEBUG(D_CONFIG, "uuid:%s idx:%d gen:%d active:%d\n",
- uuidp->uuid, index, gen, active);
-
- if (gen <= 0) {
- CERROR("request to add OBD %s with invalid generation: %d\n",
- uuidp->uuid, gen);
- return -EINVAL;
- }
-
- tgt_obd = class_find_client_obd(uuidp, LUSTRE_OSC_NAME,
- &obd->obd_uuid);
- if (tgt_obd == NULL)
- return -EINVAL;
-
- mutex_lock(&lov->lov_lock);
-
- if ((index < lov->lov_tgt_size) && (lov->lov_tgts[index] != NULL)) {
- tgt = lov->lov_tgts[index];
- CERROR("UUID %s already assigned at LOV target index %d\n",
- obd_uuid2str(&tgt->ltd_uuid), index);
- mutex_unlock(&lov->lov_lock);
- return -EEXIST;
- }
-
- if (index >= lov->lov_tgt_size) {
- /* We need to reallocate the lov target array. */
- struct lov_tgt_desc **newtgts, **old = NULL;
- __u32 newsize, oldsize = 0;
-
- newsize = max_t(__u32, lov->lov_tgt_size, 2);
- while (newsize < index + 1)
- newsize <<= 1;
- newtgts = kcalloc(newsize, sizeof(*newtgts), GFP_NOFS);
- if (newtgts == NULL) {
- mutex_unlock(&lov->lov_lock);
- return -ENOMEM;
- }
-
- if (lov->lov_tgt_size) {
- memcpy(newtgts, lov->lov_tgts, sizeof(*newtgts) *
- lov->lov_tgt_size);
- old = lov->lov_tgts;
- oldsize = lov->lov_tgt_size;
- }
-
- lov->lov_tgts = newtgts;
- lov->lov_tgt_size = newsize;
- smp_rmb();
- kfree(old);
-
- CDEBUG(D_CONFIG, "tgts: %p size: %d\n",
- lov->lov_tgts, lov->lov_tgt_size);
- }
-
- tgt = kzalloc(sizeof(*tgt), GFP_NOFS);
- if (!tgt) {
- mutex_unlock(&lov->lov_lock);
- return -ENOMEM;
- }
-
- rc = lov_ost_pool_add(&lov->lov_packed, index, lov->lov_tgt_size);
- if (rc) {
- mutex_unlock(&lov->lov_lock);
- kfree(tgt);
- return rc;
- }
-
- tgt->ltd_uuid = *uuidp;
- tgt->ltd_obd = tgt_obd;
- /* XXX - add a sanity check on the generation number. */
- tgt->ltd_gen = gen;
- tgt->ltd_index = index;
- tgt->ltd_activate = active;
- lov->lov_tgts[index] = tgt;
- if (index >= lov->desc.ld_tgt_count)
- lov->desc.ld_tgt_count = index + 1;
-
- mutex_unlock(&lov->lov_lock);
-
- CDEBUG(D_CONFIG, "idx=%d ltd_gen=%d ld_tgt_count=%d\n",
- index, tgt->ltd_gen, lov->desc.ld_tgt_count);
-
- rc = obd_notify(obd, tgt_obd, OBD_NOTIFY_CREATE, &index);
-
- if (lov->lov_connects == 0) {
- /* lov_connect hasn't been called yet. We'll do the
- lov_connect_obd on this target when that fn first runs,
- because we don't know the connect flags yet. */
- return 0;
- }
-
- obd_getref(obd);
-
- rc = lov_connect_obd(obd, index, active, &lov->lov_ocd);
- if (rc)
- goto out;
-
- /* connect to administrative disabled ost */
- if (!tgt->ltd_exp) {
- rc = 0;
- goto out;
- }
-
- if (lov->lov_cache != NULL) {
- rc = obd_set_info_async(NULL, tgt->ltd_exp,
- sizeof(KEY_CACHE_SET), KEY_CACHE_SET,
- sizeof(struct cl_client_cache), lov->lov_cache,
- NULL);
- if (rc < 0)
- goto out;
- }
-
- rc = lov_notify(obd, tgt->ltd_exp->exp_obd,
- active ? OBD_NOTIFY_CONNECT : OBD_NOTIFY_INACTIVE,
- (void *)&index);
-
-out:
- if (rc) {
- CERROR("add failed (%d), deleting %s\n", rc,
- obd_uuid2str(&tgt->ltd_uuid));
- lov_del_target(obd, index, NULL, 0);
- }
- obd_putref(obd);
- return rc;
-}
-
-/* Schedule a target for deletion */
-int lov_del_target(struct obd_device *obd, __u32 index,
- struct obd_uuid *uuidp, int gen)
-{
- struct lov_obd *lov = &obd->u.lov;
- int count = lov->desc.ld_tgt_count;
- int rc = 0;
-
- if (index >= count) {
- CERROR("LOV target index %d >= number of LOV OBDs %d.\n",
- index, count);
- return -EINVAL;
- }
-
- /* to make sure there's no ongoing lov_notify() now */
- down_write(&lov->lov_notify_lock);
- obd_getref(obd);
-
- if (!lov->lov_tgts[index]) {
- CERROR("LOV target at index %d is not setup.\n", index);
- rc = -EINVAL;
- goto out;
- }
-
- if (uuidp && !obd_uuid_equals(uuidp, &lov->lov_tgts[index]->ltd_uuid)) {
- CERROR("LOV target UUID %s at index %d doesn't match %s.\n",
- lov_uuid2str(lov, index), index,
- obd_uuid2str(uuidp));
- rc = -EINVAL;
- goto out;
- }
-
- CDEBUG(D_CONFIG, "uuid: %s idx: %d gen: %d exp: %p active: %d\n",
- lov_uuid2str(lov, index), index,
- lov->lov_tgts[index]->ltd_gen, lov->lov_tgts[index]->ltd_exp,
- lov->lov_tgts[index]->ltd_active);
-
- lov->lov_tgts[index]->ltd_reap = 1;
- lov->lov_death_row++;
- /* we really delete it from obd_putref */
-out:
- obd_putref(obd);
- up_write(&lov->lov_notify_lock);
-
- return rc;
-}
-
-static void __lov_del_obd(struct obd_device *obd, struct lov_tgt_desc *tgt)
-{
- struct obd_device *osc_obd;
-
- LASSERT(tgt);
- LASSERT(tgt->ltd_reap);
-
- osc_obd = class_exp2obd(tgt->ltd_exp);
-
- CDEBUG(D_CONFIG, "Removing tgt %s : %s\n",
- tgt->ltd_uuid.uuid,
- osc_obd ? osc_obd->obd_name : "<no obd>");
-
- if (tgt->ltd_exp)
- lov_disconnect_obd(obd, tgt);
-
- kfree(tgt);
-
- /* Manual cleanup - no cleanup logs to clean up the osc's. We must
- do it ourselves. And we can't do it from lov_cleanup,
- because we just lost our only reference to it. */
- if (osc_obd)
- class_manual_cleanup(osc_obd);
-}
-
-void lov_fix_desc_stripe_size(__u64 *val)
-{
- if (*val < LOV_MIN_STRIPE_SIZE) {
- if (*val != 0)
- LCONSOLE_INFO("Increasing default stripe size to minimum %u\n",
- LOV_DESC_STRIPE_SIZE_DEFAULT);
- *val = LOV_DESC_STRIPE_SIZE_DEFAULT;
- } else if (*val & (LOV_MIN_STRIPE_SIZE - 1)) {
- *val &= ~(LOV_MIN_STRIPE_SIZE - 1);
- LCONSOLE_WARN("Changing default stripe size to %llu (a multiple of %u)\n",
- *val, LOV_MIN_STRIPE_SIZE);
- }
-}
-
-void lov_fix_desc_stripe_count(__u32 *val)
-{
- if (*val == 0)
- *val = 1;
-}
-
-void lov_fix_desc_pattern(__u32 *val)
-{
- /* from lov_setstripe */
- if ((*val != 0) && (*val != LOV_PATTERN_RAID0)) {
- LCONSOLE_WARN("Unknown stripe pattern: %#x\n", *val);
- *val = 0;
- }
-}
-
-void lov_fix_desc_qos_maxage(__u32 *val)
-{
- if (*val == 0)
- *val = LOV_DESC_QOS_MAXAGE_DEFAULT;
-}
-
-void lov_fix_desc(struct lov_desc *desc)
-{
- lov_fix_desc_stripe_size(&desc->ld_default_stripe_size);
- lov_fix_desc_stripe_count(&desc->ld_default_stripe_count);
- lov_fix_desc_pattern(&desc->ld_pattern);
- lov_fix_desc_qos_maxage(&desc->ld_qos_maxage);
-}
-
-int lov_setup(struct obd_device *obd, struct lustre_cfg *lcfg)
-{
- struct lprocfs_static_vars lvars = { NULL };
- struct lov_desc *desc;
- struct lov_obd *lov = &obd->u.lov;
- int rc;
-
- if (LUSTRE_CFG_BUFLEN(lcfg, 1) < 1) {
- CERROR("LOV setup requires a descriptor\n");
- return -EINVAL;
- }
-
- desc = (struct lov_desc *)lustre_cfg_buf(lcfg, 1);
-
- if (sizeof(*desc) > LUSTRE_CFG_BUFLEN(lcfg, 1)) {
- CERROR("descriptor size wrong: %d > %d\n",
- (int)sizeof(*desc), LUSTRE_CFG_BUFLEN(lcfg, 1));
- return -EINVAL;
- }
-
- if (desc->ld_magic != LOV_DESC_MAGIC) {
- if (desc->ld_magic == __swab32(LOV_DESC_MAGIC)) {
- CDEBUG(D_OTHER, "%s: Swabbing lov desc %p\n",
- obd->obd_name, desc);
- lustre_swab_lov_desc(desc);
- } else {
- CERROR("%s: Bad lov desc magic: %#x\n",
- obd->obd_name, desc->ld_magic);
- return -EINVAL;
- }
- }
-
- lov_fix_desc(desc);
-
- desc->ld_active_tgt_count = 0;
- lov->desc = *desc;
- lov->lov_tgt_size = 0;
-
- mutex_init(&lov->lov_lock);
- atomic_set(&lov->lov_refcount, 0);
- lov->lov_sp_me = LUSTRE_SP_CLI;
-
- init_rwsem(&lov->lov_notify_lock);
-
- lov->lov_pools_hash_body = cfs_hash_create("POOLS", HASH_POOLS_CUR_BITS,
- HASH_POOLS_MAX_BITS,
- HASH_POOLS_BKT_BITS, 0,
- CFS_HASH_MIN_THETA,
- CFS_HASH_MAX_THETA,
- &pool_hash_operations,
- CFS_HASH_DEFAULT);
- INIT_LIST_HEAD(&lov->lov_pool_list);
- lov->lov_pool_count = 0;
- rc = lov_ost_pool_init(&lov->lov_packed, 0);
- if (rc)
- goto out;
-
- lprocfs_lov_init_vars(&lvars);
- lprocfs_obd_setup(obd, lvars.obd_vars, lvars.sysfs_vars);
-
- rc = ldebugfs_seq_create(obd->obd_debugfs_entry, "target_obd",
- 0444, &lov_proc_target_fops, obd);
- if (rc)
- CWARN("Error adding the target_obd file\n");
-
- lov->lov_pool_debugfs_entry = ldebugfs_register("pools",
- obd->obd_debugfs_entry,
- NULL, NULL);
- return 0;
-
-out:
- return rc;
-}
-
-static int lov_precleanup(struct obd_device *obd, enum obd_cleanup_stage stage)
-{
- struct lov_obd *lov = &obd->u.lov;
-
- switch (stage) {
- case OBD_CLEANUP_EARLY: {
- int i;
- for (i = 0; i < lov->desc.ld_tgt_count; i++) {
- if (!lov->lov_tgts[i] || !lov->lov_tgts[i]->ltd_active)
- continue;
- obd_precleanup(class_exp2obd(lov->lov_tgts[i]->ltd_exp),
- OBD_CLEANUP_EARLY);
- }
- break;
- }
- default:
- break;
- }
-
- return 0;
-}
-
-static int lov_cleanup(struct obd_device *obd)
-{
- struct lov_obd *lov = &obd->u.lov;
- struct list_head *pos, *tmp;
- struct pool_desc *pool;
-
- list_for_each_safe(pos, tmp, &lov->lov_pool_list) {
- pool = list_entry(pos, struct pool_desc, pool_list);
- /* free pool structs */
- CDEBUG(D_INFO, "delete pool %p\n", pool);
- /* In the function below, .hs_keycmp resolves to
- * pool_hashkey_keycmp() */
- /* coverity[overrun-buffer-val] */
- lov_pool_del(obd, pool->pool_name);
- }
- cfs_hash_putref(lov->lov_pools_hash_body);
- lov_ost_pool_free(&lov->lov_packed);
-
- lprocfs_obd_cleanup(obd);
- if (lov->lov_tgts) {
- int i;
- obd_getref(obd);
- for (i = 0; i < lov->desc.ld_tgt_count; i++) {
- if (!lov->lov_tgts[i])
- continue;
-
- /* Inactive targets may never have connected */
- if (lov->lov_tgts[i]->ltd_active ||
- atomic_read(&lov->lov_refcount))
- /* We should never get here - these
- should have been removed in the
- disconnect. */
- CERROR("lov tgt %d not cleaned! deathrow=%d, lovrc=%d\n",
- i, lov->lov_death_row,
- atomic_read(&lov->lov_refcount));
- lov_del_target(obd, i, NULL, 0);
- }
- obd_putref(obd);
- kfree(lov->lov_tgts);
- lov->lov_tgt_size = 0;
- }
- return 0;
-}
-
-int lov_process_config_base(struct obd_device *obd, struct lustre_cfg *lcfg,
- __u32 *indexp, int *genp)
-{
- struct obd_uuid obd_uuid;
- int cmd;
- int rc = 0;
-
- switch (cmd = lcfg->lcfg_command) {
- case LCFG_LOV_ADD_OBD:
- case LCFG_LOV_ADD_INA:
- case LCFG_LOV_DEL_OBD: {
- __u32 index;
- int gen;
- /* lov_modify_tgts add 0:lov_mdsA 1:ost1_UUID 2:0 3:1 */
- if (LUSTRE_CFG_BUFLEN(lcfg, 1) > sizeof(obd_uuid.uuid)) {
- rc = -EINVAL;
- goto out;
- }
-
- obd_str2uuid(&obd_uuid, lustre_cfg_buf(lcfg, 1));
-
- if (sscanf(lustre_cfg_buf(lcfg, 2), "%d", indexp) != 1) {
- rc = -EINVAL;
- goto out;
- }
- if (sscanf(lustre_cfg_buf(lcfg, 3), "%d", genp) != 1) {
- rc = -EINVAL;
- goto out;
- }
- index = *indexp;
- gen = *genp;
- if (cmd == LCFG_LOV_ADD_OBD)
- rc = lov_add_target(obd, &obd_uuid, index, gen, 1);
- else if (cmd == LCFG_LOV_ADD_INA)
- rc = lov_add_target(obd, &obd_uuid, index, gen, 0);
- else
- rc = lov_del_target(obd, index, &obd_uuid, gen);
- goto out;
- }
- case LCFG_PARAM: {
- struct lprocfs_static_vars lvars = { NULL };
- struct lov_desc *desc = &(obd->u.lov.desc);
-
- if (!desc) {
- rc = -EINVAL;
- goto out;
- }
-
- lprocfs_lov_init_vars(&lvars);
-
- rc = class_process_proc_param(PARAM_LOV, lvars.obd_vars,
- lcfg, obd);
- if (rc > 0)
- rc = 0;
- goto out;
- }
- case LCFG_POOL_NEW:
- case LCFG_POOL_ADD:
- case LCFG_POOL_DEL:
- case LCFG_POOL_REM:
- goto out;
-
- default: {
- CERROR("Unknown command: %d\n", lcfg->lcfg_command);
- rc = -EINVAL;
- goto out;
-
- }
- }
-out:
- return rc;
-}
-
-static int lov_recreate(struct obd_export *exp, struct obdo *src_oa,
- struct lov_stripe_md **ea, struct obd_trans_info *oti)
-{
- struct lov_stripe_md *obj_mdp, *lsm;
- struct lov_obd *lov = &exp->exp_obd->u.lov;
- unsigned ost_idx;
- int rc, i;
-
- LASSERT(src_oa->o_valid & OBD_MD_FLFLAGS &&
- src_oa->o_flags & OBD_FL_RECREATE_OBJS);
-
- obj_mdp = kzalloc(sizeof(*obj_mdp), GFP_NOFS);
- if (!obj_mdp)
- return -ENOMEM;
-
- ost_idx = src_oa->o_nlink;
- lsm = *ea;
- if (lsm == NULL) {
- rc = -EINVAL;
- goto out;
- }
- if (ost_idx >= lov->desc.ld_tgt_count ||
- !lov->lov_tgts[ost_idx]) {
- rc = -EINVAL;
- goto out;
- }
-
- for (i = 0; i < lsm->lsm_stripe_count; i++) {
- struct lov_oinfo *loi = lsm->lsm_oinfo[i];
-
- if (lov_oinfo_is_dummy(loi))
- continue;
-
- if (loi->loi_ost_idx == ost_idx) {
- if (ostid_id(&loi->loi_oi) != ostid_id(&src_oa->o_oi)) {
- rc = -EINVAL;
- goto out;
- }
- break;
- }
- }
- if (i == lsm->lsm_stripe_count) {
- rc = -EINVAL;
- goto out;
- }
-
- rc = obd_create(NULL, lov->lov_tgts[ost_idx]->ltd_exp,
- src_oa, &obj_mdp, oti);
-out:
- kfree(obj_mdp);
- return rc;
-}
-
-/* the LOV expects oa->o_id to be set to the LOV object id */
-static int lov_create(const struct lu_env *env, struct obd_export *exp,
- struct obdo *src_oa, struct lov_stripe_md **ea,
- struct obd_trans_info *oti)
-{
- struct lov_obd *lov;
- int rc = 0;
-
- LASSERT(ea != NULL);
- if (exp == NULL)
- return -EINVAL;
-
- if ((src_oa->o_valid & OBD_MD_FLFLAGS) &&
- src_oa->o_flags == OBD_FL_DELORPHAN) {
- /* should be used with LOV anymore */
- LBUG();
- }
-
- lov = &exp->exp_obd->u.lov;
- if (!lov->desc.ld_active_tgt_count)
- return -EIO;
-
- obd_getref(exp->exp_obd);
- /* Recreate a specific object id at the given OST index */
- if ((src_oa->o_valid & OBD_MD_FLFLAGS) &&
- (src_oa->o_flags & OBD_FL_RECREATE_OBJS)) {
- rc = lov_recreate(exp, src_oa, ea, oti);
- }
-
- obd_putref(exp->exp_obd);
- return rc;
-}
-
-#define ASSERT_LSM_MAGIC(lsmp) \
-do { \
- LASSERT((lsmp) != NULL); \
- LASSERTF(((lsmp)->lsm_magic == LOV_MAGIC_V1 || \
- (lsmp)->lsm_magic == LOV_MAGIC_V3), \
- "%p->lsm_magic=%x\n", (lsmp), (lsmp)->lsm_magic); \
-} while (0)
-
-static int lov_destroy(const struct lu_env *env, struct obd_export *exp,
- struct obdo *oa, struct lov_stripe_md *lsm,
- struct obd_trans_info *oti, struct obd_export *md_exp)
-{
- struct lov_request_set *set;
- struct obd_info oinfo;
- struct lov_request *req;
- struct list_head *pos;
- struct lov_obd *lov;
- int rc = 0, err = 0;
-
- ASSERT_LSM_MAGIC(lsm);
-
- if (!exp || !exp->exp_obd)
- return -ENODEV;
-
- if (oa->o_valid & OBD_MD_FLCOOKIE) {
- LASSERT(oti);
- LASSERT(oti->oti_logcookies);
- }
-
- lov = &exp->exp_obd->u.lov;
- obd_getref(exp->exp_obd);
- rc = lov_prep_destroy_set(exp, &oinfo, oa, lsm, oti, &set);
- if (rc)
- goto out;
-
- list_for_each(pos, &set->set_list) {
- req = list_entry(pos, struct lov_request, rq_link);
-
- if (oa->o_valid & OBD_MD_FLCOOKIE)
- oti->oti_logcookies = set->set_cookies + req->rq_stripe;
-
- err = obd_destroy(env, lov->lov_tgts[req->rq_idx]->ltd_exp,
- req->rq_oi.oi_oa, NULL, oti, NULL);
- err = lov_update_common_set(set, req, err);
- if (err) {
- CERROR("%s: destroying objid "DOSTID" subobj "
- DOSTID" on OST idx %d: rc = %d\n",
- exp->exp_obd->obd_name, POSTID(&oa->o_oi),
- POSTID(&req->rq_oi.oi_oa->o_oi),
- req->rq_idx, err);
- if (!rc)
- rc = err;
- }
- }
-
- if (rc == 0) {
- LASSERT(lsm_op_find(lsm->lsm_magic) != NULL);
- rc = lsm_op_find(lsm->lsm_magic)->lsm_destroy(lsm, oa, md_exp);
- }
- err = lov_fini_destroy_set(set);
-out:
- obd_putref(exp->exp_obd);
- return rc ? rc : err;
-}
-
-static int lov_getattr_interpret(struct ptlrpc_request_set *rqset,
- void *data, int rc)
-{
- struct lov_request_set *lovset = (struct lov_request_set *)data;
- int err;
-
- /* don't do attribute merge if this async op failed */
- if (rc)
- atomic_set(&lovset->set_completes, 0);
- err = lov_fini_getattr_set(lovset);
- return rc ? rc : err;
-}
-
-static int lov_getattr_async(struct obd_export *exp, struct obd_info *oinfo,
- struct ptlrpc_request_set *rqset)
-{
- struct lov_request_set *lovset;
- struct lov_obd *lov;
- struct list_head *pos;
- struct lov_request *req;
- int rc = 0, err;
-
- LASSERT(oinfo);
- ASSERT_LSM_MAGIC(oinfo->oi_md);
-
- if (!exp || !exp->exp_obd)
- return -ENODEV;
-
- lov = &exp->exp_obd->u.lov;
-
- rc = lov_prep_getattr_set(exp, oinfo, &lovset);
- if (rc)
- return rc;
-
- CDEBUG(D_INFO, "objid "DOSTID": %ux%u byte stripes\n",
- POSTID(&oinfo->oi_md->lsm_oi), oinfo->oi_md->lsm_stripe_count,
- oinfo->oi_md->lsm_stripe_size);
-
- list_for_each(pos, &lovset->set_list) {
- req = list_entry(pos, struct lov_request, rq_link);
-
- CDEBUG(D_INFO, "objid " DOSTID "[%d] has subobj " DOSTID " at idx%u\n",
- POSTID(&oinfo->oi_oa->o_oi), req->rq_stripe,
- POSTID(&req->rq_oi.oi_oa->o_oi), req->rq_idx);
- rc = obd_getattr_async(lov->lov_tgts[req->rq_idx]->ltd_exp,
- &req->rq_oi, rqset);
- if (rc) {
- CERROR("%s: getattr objid "DOSTID" subobj"
- DOSTID" on OST idx %d: rc = %d\n",
- exp->exp_obd->obd_name,
- POSTID(&oinfo->oi_oa->o_oi),
- POSTID(&req->rq_oi.oi_oa->o_oi),
- req->rq_idx, rc);
- goto out;
- }
- }
-
- if (!list_empty(&rqset->set_requests)) {
- LASSERT(rc == 0);
- LASSERT(rqset->set_interpret == NULL);
- rqset->set_interpret = lov_getattr_interpret;
- rqset->set_arg = (void *)lovset;
- return rc;
- }
-out:
- if (rc)
- atomic_set(&lovset->set_completes, 0);
- err = lov_fini_getattr_set(lovset);
- return rc ? rc : err;
-}
-
-static int lov_setattr_interpret(struct ptlrpc_request_set *rqset,
- void *data, int rc)
-{
- struct lov_request_set *lovset = (struct lov_request_set *)data;
- int err;
-
- if (rc)
- atomic_set(&lovset->set_completes, 0);
- err = lov_fini_setattr_set(lovset);
- return rc ? rc : err;
-}
-
-/* If @oti is given, the request goes from MDS and responses from OSTs are not
- needed. Otherwise, a client is waiting for responses. */
-static int lov_setattr_async(struct obd_export *exp, struct obd_info *oinfo,
- struct obd_trans_info *oti,
- struct ptlrpc_request_set *rqset)
-{
- struct lov_request_set *set;
- struct lov_request *req;
- struct list_head *pos;
- struct lov_obd *lov;
- int rc = 0;
-
- LASSERT(oinfo);
- ASSERT_LSM_MAGIC(oinfo->oi_md);
- if (oinfo->oi_oa->o_valid & OBD_MD_FLCOOKIE) {
- LASSERT(oti);
- LASSERT(oti->oti_logcookies);
- }
-
- if (!exp || !exp->exp_obd)
- return -ENODEV;
-
- lov = &exp->exp_obd->u.lov;
- rc = lov_prep_setattr_set(exp, oinfo, oti, &set);
- if (rc)
- return rc;
-
- CDEBUG(D_INFO, "objid "DOSTID": %ux%u byte stripes\n",
- POSTID(&oinfo->oi_md->lsm_oi),
- oinfo->oi_md->lsm_stripe_count,
- oinfo->oi_md->lsm_stripe_size);
-
- list_for_each(pos, &set->set_list) {
- req = list_entry(pos, struct lov_request, rq_link);
-
- if (oinfo->oi_oa->o_valid & OBD_MD_FLCOOKIE)
- oti->oti_logcookies = set->set_cookies + req->rq_stripe;
-
- CDEBUG(D_INFO, "objid " DOSTID "[%d] has subobj " DOSTID " at idx%u\n",
- POSTID(&oinfo->oi_oa->o_oi), req->rq_stripe,
- POSTID(&req->rq_oi.oi_oa->o_oi), req->rq_idx);
-
- rc = obd_setattr_async(lov->lov_tgts[req->rq_idx]->ltd_exp,
- &req->rq_oi, oti, rqset);
- if (rc) {
- CERROR("error: setattr objid "DOSTID" subobj"
- DOSTID" on OST idx %d: rc = %d\n",
- POSTID(&set->set_oi->oi_oa->o_oi),
- POSTID(&req->rq_oi.oi_oa->o_oi),
- req->rq_idx, rc);
- break;
- }
- }
-
- /* If we are not waiting for responses on async requests, return. */
- if (rc || !rqset || list_empty(&rqset->set_requests)) {
- int err;
- if (rc)
- atomic_set(&set->set_completes, 0);
- err = lov_fini_setattr_set(set);
- return rc ? rc : err;
- }
-
- LASSERT(rqset->set_interpret == NULL);
- rqset->set_interpret = lov_setattr_interpret;
- rqset->set_arg = (void *)set;
-
- return 0;
-}
-
-/* find any ldlm lock of the inode in lov
- * return 0 not find
- * 1 find one
- * < 0 error */
-static int lov_find_cbdata(struct obd_export *exp,
- struct lov_stripe_md *lsm, ldlm_iterator_t it,
- void *data)
-{
- struct lov_obd *lov;
- int rc = 0, i;
-
- ASSERT_LSM_MAGIC(lsm);
-
- if (!exp || !exp->exp_obd)
- return -ENODEV;
-
- lov = &exp->exp_obd->u.lov;
- for (i = 0; i < lsm->lsm_stripe_count; i++) {
- struct lov_stripe_md submd;
- struct lov_oinfo *loi = lsm->lsm_oinfo[i];
-
- if (lov_oinfo_is_dummy(loi))
- continue;
-
- if (!lov->lov_tgts[loi->loi_ost_idx]) {
- CDEBUG(D_HA, "lov idx %d NULL\n", loi->loi_ost_idx);
- continue;
- }
-
- submd.lsm_oi = loi->loi_oi;
- submd.lsm_stripe_count = 0;
- rc = obd_find_cbdata(lov->lov_tgts[loi->loi_ost_idx]->ltd_exp,
- &submd, it, data);
- if (rc != 0)
- return rc;
- }
- return rc;
-}
-
-int lov_statfs_interpret(struct ptlrpc_request_set *rqset, void *data, int rc)
-{
- struct lov_request_set *lovset = (struct lov_request_set *)data;
- int err;
-
- if (rc)
- atomic_set(&lovset->set_completes, 0);
-
- err = lov_fini_statfs_set(lovset);
- return rc ? rc : err;
-}
-
-static int lov_statfs_async(struct obd_export *exp, struct obd_info *oinfo,
- __u64 max_age, struct ptlrpc_request_set *rqset)
-{
- struct obd_device *obd = class_exp2obd(exp);
- struct lov_request_set *set;
- struct lov_request *req;
- struct list_head *pos;
- struct lov_obd *lov;
- int rc = 0;
-
- LASSERT(oinfo != NULL);
- LASSERT(oinfo->oi_osfs != NULL);
-
- lov = &obd->u.lov;
- rc = lov_prep_statfs_set(obd, oinfo, &set);
- if (rc)
- return rc;
-
- list_for_each(pos, &set->set_list) {
- req = list_entry(pos, struct lov_request, rq_link);
- rc = obd_statfs_async(lov->lov_tgts[req->rq_idx]->ltd_exp,
- &req->rq_oi, max_age, rqset);
- if (rc)
- break;
- }
-
- if (rc || list_empty(&rqset->set_requests)) {
- int err;
- if (rc)
- atomic_set(&set->set_completes, 0);
- err = lov_fini_statfs_set(set);
- return rc ? rc : err;
- }
-
- LASSERT(rqset->set_interpret == NULL);
- rqset->set_interpret = lov_statfs_interpret;
- rqset->set_arg = (void *)set;
- return 0;
-}
-
-static int lov_statfs(const struct lu_env *env, struct obd_export *exp,
- struct obd_statfs *osfs, __u64 max_age, __u32 flags)
-{
- struct ptlrpc_request_set *set = NULL;
- struct obd_info oinfo = { };
- int rc = 0;
-
- /* for obdclass we forbid using obd_statfs_rqset, but prefer using async
- * statfs requests */
- set = ptlrpc_prep_set();
- if (set == NULL)
- return -ENOMEM;
-
- oinfo.oi_osfs = osfs;
- oinfo.oi_flags = flags;
- rc = lov_statfs_async(exp, &oinfo, max_age, set);
- if (rc == 0)
- rc = ptlrpc_set_wait(set);
- ptlrpc_set_destroy(set);
-
- return rc;
-}
-
-static int lov_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
- void *karg, void *uarg)
-{
- struct obd_device *obddev = class_exp2obd(exp);
- struct lov_obd *lov = &obddev->u.lov;
- int i = 0, rc = 0, count = lov->desc.ld_tgt_count;
- struct obd_uuid *uuidp;
-
- switch (cmd) {
- case IOC_OBD_STATFS: {
- struct obd_ioctl_data *data = karg;
- struct obd_device *osc_obd;
- struct obd_statfs stat_buf = {0};
- __u32 index;
- __u32 flags;
-
- memcpy(&index, data->ioc_inlbuf2, sizeof(__u32));
- if (index >= count)
- return -ENODEV;
-
- if (!lov->lov_tgts[index])
- /* Try again with the next index */
- return -EAGAIN;
- if (!lov->lov_tgts[index]->ltd_active)
- return -ENODATA;
-
- osc_obd = class_exp2obd(lov->lov_tgts[index]->ltd_exp);
- if (!osc_obd)
- return -EINVAL;
-
- /* copy UUID */
- if (copy_to_user(data->ioc_pbuf2, obd2cli_tgt(osc_obd),
- min((int) data->ioc_plen2,
- (int) sizeof(struct obd_uuid))))
- return -EFAULT;
-
- flags = uarg ? *(__u32 *)uarg : 0;
- /* got statfs data */
- rc = obd_statfs(NULL, lov->lov_tgts[index]->ltd_exp, &stat_buf,
- cfs_time_shift_64(-OBD_STATFS_CACHE_SECONDS),
- flags);
- if (rc)
- return rc;
- if (copy_to_user(data->ioc_pbuf1, &stat_buf,
- min((int) data->ioc_plen1,
- (int) sizeof(stat_buf))))
- return -EFAULT;
- break;
- }
- case OBD_IOC_LOV_GET_CONFIG: {
- struct obd_ioctl_data *data;
- struct lov_desc *desc;
- char *buf = NULL;
- __u32 *genp;
-
- len = 0;
- if (obd_ioctl_getdata(&buf, &len, uarg))
- return -EINVAL;
-
- data = (struct obd_ioctl_data *)buf;
-
- if (sizeof(*desc) > data->ioc_inllen1) {
- obd_ioctl_freedata(buf, len);
- return -EINVAL;
- }
-
- if (sizeof(uuidp->uuid) * count > data->ioc_inllen2) {
- obd_ioctl_freedata(buf, len);
- return -EINVAL;
- }
-
- if (sizeof(__u32) * count > data->ioc_inllen3) {
- obd_ioctl_freedata(buf, len);
- return -EINVAL;
- }
-
- desc = (struct lov_desc *)data->ioc_inlbuf1;
- memcpy(desc, &(lov->desc), sizeof(*desc));
-
- uuidp = (struct obd_uuid *)data->ioc_inlbuf2;
- genp = (__u32 *)data->ioc_inlbuf3;
- /* the uuid will be empty for deleted OSTs */
- for (i = 0; i < count; i++, uuidp++, genp++) {
- if (!lov->lov_tgts[i])
- continue;
- *uuidp = lov->lov_tgts[i]->ltd_uuid;
- *genp = lov->lov_tgts[i]->ltd_gen;
- }
-
- if (copy_to_user(uarg, buf, len))
- rc = -EFAULT;
- obd_ioctl_freedata(buf, len);
- break;
- }
- case LL_IOC_LOV_GETSTRIPE:
- rc = lov_getstripe(exp, karg, uarg);
- break;
- case OBD_IOC_QUOTACTL: {
- struct if_quotactl *qctl = karg;
- struct lov_tgt_desc *tgt = NULL;
- struct obd_quotactl *oqctl;
-
- if (qctl->qc_valid == QC_OSTIDX) {
- if (count <= qctl->qc_idx)
- return -EINVAL;
-
- tgt = lov->lov_tgts[qctl->qc_idx];
- if (!tgt || !tgt->ltd_exp)
- return -EINVAL;
- } else if (qctl->qc_valid == QC_UUID) {
- for (i = 0; i < count; i++) {
- tgt = lov->lov_tgts[i];
- if (!tgt ||
- !obd_uuid_equals(&tgt->ltd_uuid,
- &qctl->obd_uuid))
- continue;
-
- if (tgt->ltd_exp == NULL)
- return -EINVAL;
-
- break;
- }
- } else {
- return -EINVAL;
- }
-
- if (i >= count)
- return -EAGAIN;
-
- LASSERT(tgt && tgt->ltd_exp);
- oqctl = kzalloc(sizeof(*oqctl), GFP_NOFS);
- if (!oqctl)
- return -ENOMEM;
-
- QCTL_COPY(oqctl, qctl);
- rc = obd_quotactl(tgt->ltd_exp, oqctl);
- if (rc == 0) {
- QCTL_COPY(qctl, oqctl);
- qctl->qc_valid = QC_OSTIDX;
- qctl->obd_uuid = tgt->ltd_uuid;
- }
- kfree(oqctl);
- break;
- }
- default: {
- int set = 0;
-
- if (count == 0)
- return -ENOTTY;
-
- for (i = 0; i < count; i++) {
- int err;
- struct obd_device *osc_obd;
-
- /* OST was disconnected */
- if (!lov->lov_tgts[i] || !lov->lov_tgts[i]->ltd_exp)
- continue;
-
- /* ll_umount_begin() sets force flag but for lov, not
- * osc. Let's pass it through */
- osc_obd = class_exp2obd(lov->lov_tgts[i]->ltd_exp);
- osc_obd->obd_force = obddev->obd_force;
- err = obd_iocontrol(cmd, lov->lov_tgts[i]->ltd_exp,
- len, karg, uarg);
- if (err == -ENODATA && cmd == OBD_IOC_POLL_QUOTACHECK) {
- return err;
- } else if (err) {
- if (lov->lov_tgts[i]->ltd_active) {
- CDEBUG(err == -ENOTTY ?
- D_IOCTL : D_WARNING,
- "iocontrol OSC %s on OST idx %d cmd %x: err = %d\n",
- lov_uuid2str(lov, i),
- i, cmd, err);
- if (!rc)
- rc = err;
- }
- } else {
- set = 1;
- }
- }
- if (!set && !rc)
- rc = -EIO;
- }
- }
-
- return rc;
-}
-
-#define FIEMAP_BUFFER_SIZE 4096
-
-/**
- * Non-zero fe_logical indicates that this is a continuation FIEMAP
- * call. The local end offset and the device are sent in the first
- * fm_extent. This function calculates the stripe number from the index.
- * This function returns a stripe_no on which mapping is to be restarted.
- *
- * This function returns fm_end_offset which is the in-OST offset at which
- * mapping should be restarted. If fm_end_offset=0 is returned then caller
- * will re-calculate proper offset in next stripe.
- * Note that the first extent is passed to lov_get_info via the value field.
- *
- * \param fiemap fiemap request header
- * \param lsm striping information for the file
- * \param fm_start logical start of mapping
- * \param fm_end logical end of mapping
- * \param start_stripe starting stripe will be returned in this
- */
-static u64 fiemap_calc_fm_end_offset(struct ll_user_fiemap *fiemap,
- struct lov_stripe_md *lsm, u64 fm_start,
- u64 fm_end, int *start_stripe)
-{
- u64 local_end = fiemap->fm_extents[0].fe_logical;
- u64 lun_start, lun_end;
- u64 fm_end_offset;
- int stripe_no = -1, i;
-
- if (fiemap->fm_extent_count == 0 ||
- fiemap->fm_extents[0].fe_logical == 0)
- return 0;
-
- /* Find out stripe_no from ost_index saved in the fe_device */
- for (i = 0; i < lsm->lsm_stripe_count; i++) {
- struct lov_oinfo *oinfo = lsm->lsm_oinfo[i];
-
- if (lov_oinfo_is_dummy(oinfo))
- continue;
-
- if (oinfo->loi_ost_idx == fiemap->fm_extents[0].fe_device) {
- stripe_no = i;
- break;
- }
- }
- if (stripe_no == -1)
- return -EINVAL;
-
- /* If we have finished mapping on previous device, shift logical
- * offset to start of next device */
- if ((lov_stripe_intersects(lsm, stripe_no, fm_start, fm_end,
- &lun_start, &lun_end)) != 0 &&
- local_end < lun_end) {
- fm_end_offset = local_end;
- *start_stripe = stripe_no;
- } else {
- /* This is a special value to indicate that caller should
- * calculate offset in next stripe. */
- fm_end_offset = 0;
- *start_stripe = (stripe_no + 1) % lsm->lsm_stripe_count;
- }
-
- return fm_end_offset;
-}
-
-/**
- * We calculate on which OST the mapping will end. If the length of mapping
- * is greater than (stripe_size * stripe_count) then the last_stripe will
- * will be one just before start_stripe. Else we check if the mapping
- * intersects each OST and find last_stripe.
- * This function returns the last_stripe and also sets the stripe_count
- * over which the mapping is spread
- *
- * \param lsm striping information for the file
- * \param fm_start logical start of mapping
- * \param fm_end logical end of mapping
- * \param start_stripe starting stripe of the mapping
- * \param stripe_count the number of stripes across which to map is returned
- *
- * \retval last_stripe return the last stripe of the mapping
- */
-static int fiemap_calc_last_stripe(struct lov_stripe_md *lsm, u64 fm_start,
- u64 fm_end, int start_stripe,
- int *stripe_count)
-{
- int last_stripe;
- u64 obd_start, obd_end;
- int i, j;
-
- if (fm_end - fm_start > lsm->lsm_stripe_size * lsm->lsm_stripe_count) {
- last_stripe = start_stripe < 1 ? lsm->lsm_stripe_count - 1 :
- start_stripe - 1;
- *stripe_count = lsm->lsm_stripe_count;
- } else {
- for (j = 0, i = start_stripe; j < lsm->lsm_stripe_count;
- i = (i + 1) % lsm->lsm_stripe_count, j++) {
- if ((lov_stripe_intersects(lsm, i, fm_start, fm_end,
- &obd_start, &obd_end)) == 0)
- break;
- }
- *stripe_count = j;
- last_stripe = (start_stripe + j - 1) % lsm->lsm_stripe_count;
- }
-
- return last_stripe;
-}
-
-/**
- * Set fe_device and copy extents from local buffer into main return buffer.
- *
- * \param fiemap fiemap request header
- * \param lcl_fm_ext array of local fiemap extents to be copied
- * \param ost_index OST index to be written into the fm_device field for each
- extent
- * \param ext_count number of extents to be copied
- * \param current_extent where to start copying in main extent array
- */
-static void fiemap_prepare_and_copy_exts(struct ll_user_fiemap *fiemap,
- struct ll_fiemap_extent *lcl_fm_ext,
- int ost_index, unsigned int ext_count,
- int current_extent)
-{
- char *to;
- int ext;
-
- for (ext = 0; ext < ext_count; ext++) {
- lcl_fm_ext[ext].fe_device = ost_index;
- lcl_fm_ext[ext].fe_flags |= FIEMAP_EXTENT_NET;
- }
-
- /* Copy fm_extent's from fm_local to return buffer */
- to = (char *)fiemap + fiemap_count_to_size(current_extent);
- memcpy(to, lcl_fm_ext, ext_count * sizeof(struct ll_fiemap_extent));
-}
-
-/**
- * Break down the FIEMAP request and send appropriate calls to individual OSTs.
- * This also handles the restarting of FIEMAP calls in case mapping overflows
- * the available number of extents in single call.
- */
-static int lov_fiemap(struct lov_obd *lov, __u32 keylen, void *key,
- __u32 *vallen, void *val, struct lov_stripe_md *lsm)
-{
- struct ll_fiemap_info_key *fm_key = key;
- struct ll_user_fiemap *fiemap = val;
- struct ll_user_fiemap *fm_local = NULL;
- struct ll_fiemap_extent *lcl_fm_ext;
- int count_local;
- unsigned int get_num_extents = 0;
- int ost_index = 0, actual_start_stripe, start_stripe;
- u64 fm_start, fm_end, fm_length, fm_end_offset;
- u64 curr_loc;
- int current_extent = 0, rc = 0, i;
- int ost_eof = 0; /* EOF for object */
- int ost_done = 0; /* done with required mapping for this OST? */
- int last_stripe;
- int cur_stripe = 0, cur_stripe_wrap = 0, stripe_count;
- unsigned int buffer_size = FIEMAP_BUFFER_SIZE;
-
- if (!lsm_has_objects(lsm)) {
- rc = 0;
- goto out;
- }
-
- if (fiemap_count_to_size(fm_key->fiemap.fm_extent_count) < buffer_size)
- buffer_size = fiemap_count_to_size(fm_key->fiemap.fm_extent_count);
-
- fm_local = libcfs_kvzalloc(buffer_size, GFP_NOFS);
- if (fm_local == NULL) {
- rc = -ENOMEM;
- goto out;
- }
- lcl_fm_ext = &fm_local->fm_extents[0];
-
- count_local = fiemap_size_to_count(buffer_size);
-
- memcpy(fiemap, &fm_key->fiemap, sizeof(*fiemap));
- fm_start = fiemap->fm_start;
- fm_length = fiemap->fm_length;
- /* Calculate start stripe, last stripe and length of mapping */
- actual_start_stripe = start_stripe = lov_stripe_number(lsm, fm_start);
- fm_end = (fm_length == ~0ULL ? fm_key->oa.o_size :
- fm_start + fm_length - 1);
- /* If fm_length != ~0ULL but fm_start+fm_length-1 exceeds file size */
- if (fm_end > fm_key->oa.o_size)
- fm_end = fm_key->oa.o_size;
-
- last_stripe = fiemap_calc_last_stripe(lsm, fm_start, fm_end,
- actual_start_stripe, &stripe_count);
-
- fm_end_offset = fiemap_calc_fm_end_offset(fiemap, lsm, fm_start,
- fm_end, &start_stripe);
- if (fm_end_offset == -EINVAL) {
- rc = -EINVAL;
- goto out;
- }
-
- if (fiemap_count_to_size(fiemap->fm_extent_count) > *vallen)
- fiemap->fm_extent_count = fiemap_size_to_count(*vallen);
- if (fiemap->fm_extent_count == 0) {
- get_num_extents = 1;
- count_local = 0;
- }
- /* Check each stripe */
- for (cur_stripe = start_stripe, i = 0; i < stripe_count;
- i++, cur_stripe = (cur_stripe + 1) % lsm->lsm_stripe_count) {
- u64 req_fm_len; /* Stores length of required mapping */
- u64 len_mapped_single_call;
- u64 lun_start, lun_end, obd_object_end;
- unsigned int ext_count;
-
- cur_stripe_wrap = cur_stripe;
-
- /* Find out range of mapping on this stripe */
- if ((lov_stripe_intersects(lsm, cur_stripe, fm_start, fm_end,
- &lun_start, &obd_object_end)) == 0)
- continue;
-
- if (lov_oinfo_is_dummy(lsm->lsm_oinfo[cur_stripe])) {
- rc = -EIO;
- goto out;
- }
-
- /* If this is a continuation FIEMAP call and we are on
- * starting stripe then lun_start needs to be set to
- * fm_end_offset */
- if (fm_end_offset != 0 && cur_stripe == start_stripe)
- lun_start = fm_end_offset;
-
- if (fm_length != ~0ULL) {
- /* Handle fm_start + fm_length overflow */
- if (fm_start + fm_length < fm_start)
- fm_length = ~0ULL - fm_start;
- lun_end = lov_size_to_stripe(lsm, fm_start + fm_length,
- cur_stripe);
- } else {
- lun_end = ~0ULL;
- }
-
- if (lun_start == lun_end)
- continue;
-
- req_fm_len = obd_object_end - lun_start;
- fm_local->fm_length = 0;
- len_mapped_single_call = 0;
-
- /* If the output buffer is very large and the objects have many
- * extents we may need to loop on a single OST repeatedly */
- ost_eof = 0;
- ost_done = 0;
- do {
- if (get_num_extents == 0) {
- /* Don't get too many extents. */
- if (current_extent + count_local >
- fiemap->fm_extent_count)
- count_local = fiemap->fm_extent_count -
- current_extent;
- }
-
- lun_start += len_mapped_single_call;
- fm_local->fm_length = req_fm_len - len_mapped_single_call;
- req_fm_len = fm_local->fm_length;
- fm_local->fm_extent_count = count_local;
- fm_local->fm_mapped_extents = 0;
- fm_local->fm_flags = fiemap->fm_flags;
-
- fm_key->oa.o_oi = lsm->lsm_oinfo[cur_stripe]->loi_oi;
- ost_index = lsm->lsm_oinfo[cur_stripe]->loi_ost_idx;
-
- if (ost_index < 0 ||
- ost_index >= lov->desc.ld_tgt_count) {
- rc = -EINVAL;
- goto out;
- }
-
- /* If OST is inactive, return extent with UNKNOWN flag */
- if (!lov->lov_tgts[ost_index]->ltd_active) {
- fm_local->fm_flags |= FIEMAP_EXTENT_LAST;
- fm_local->fm_mapped_extents = 1;
-
- lcl_fm_ext[0].fe_logical = lun_start;
- lcl_fm_ext[0].fe_length = obd_object_end -
- lun_start;
- lcl_fm_ext[0].fe_flags |= FIEMAP_EXTENT_UNKNOWN;
-
- goto inactive_tgt;
- }
-
- fm_local->fm_start = lun_start;
- fm_local->fm_flags &= ~FIEMAP_FLAG_DEVICE_ORDER;
- memcpy(&fm_key->fiemap, fm_local, sizeof(*fm_local));
- *vallen = fiemap_count_to_size(fm_local->fm_extent_count);
- rc = obd_get_info(NULL,
- lov->lov_tgts[ost_index]->ltd_exp,
- keylen, key, vallen, fm_local, lsm);
- if (rc != 0)
- goto out;
-
-inactive_tgt:
- ext_count = fm_local->fm_mapped_extents;
- if (ext_count == 0) {
- ost_done = 1;
- /* If last stripe has hole at the end,
- * then we need to return */
- if (cur_stripe_wrap == last_stripe) {
- fiemap->fm_mapped_extents = 0;
- goto finish;
- }
- break;
- }
-
- /* If we just need num of extents then go to next device */
- if (get_num_extents) {
- current_extent += ext_count;
- break;
- }
-
- len_mapped_single_call = lcl_fm_ext[ext_count-1].fe_logical -
- lun_start + lcl_fm_ext[ext_count - 1].fe_length;
-
- /* Have we finished mapping on this device? */
- if (req_fm_len <= len_mapped_single_call)
- ost_done = 1;
-
- /* Clear the EXTENT_LAST flag which can be present on
- * last extent */
- if (lcl_fm_ext[ext_count-1].fe_flags & FIEMAP_EXTENT_LAST)
- lcl_fm_ext[ext_count - 1].fe_flags &=
- ~FIEMAP_EXTENT_LAST;
-
- curr_loc = lov_stripe_size(lsm,
- lcl_fm_ext[ext_count - 1].fe_logical+
- lcl_fm_ext[ext_count - 1].fe_length,
- cur_stripe);
- if (curr_loc >= fm_key->oa.o_size)
- ost_eof = 1;
-
- fiemap_prepare_and_copy_exts(fiemap, lcl_fm_ext,
- ost_index, ext_count,
- current_extent);
-
- current_extent += ext_count;
-
- /* Ran out of available extents? */
- if (current_extent >= fiemap->fm_extent_count)
- goto finish;
- } while (ost_done == 0 && ost_eof == 0);
-
- if (cur_stripe_wrap == last_stripe)
- goto finish;
- }
-
-finish:
- /* Indicate that we are returning device offsets unless file just has
- * single stripe */
- if (lsm->lsm_stripe_count > 1)
- fiemap->fm_flags |= FIEMAP_FLAG_DEVICE_ORDER;
-
- if (get_num_extents)
- goto skip_last_device_calc;
-
- /* Check if we have reached the last stripe and whether mapping for that
- * stripe is done. */
- if (cur_stripe_wrap == last_stripe) {
- if (ost_done || ost_eof)
- fiemap->fm_extents[current_extent - 1].fe_flags |=
- FIEMAP_EXTENT_LAST;
- }
-
-skip_last_device_calc:
- fiemap->fm_mapped_extents = current_extent;
-
-out:
- kvfree(fm_local);
- return rc;
-}
-
-static int lov_get_info(const struct lu_env *env, struct obd_export *exp,
- __u32 keylen, void *key, __u32 *vallen, void *val,
- struct lov_stripe_md *lsm)
-{
- struct obd_device *obddev = class_exp2obd(exp);
- struct lov_obd *lov = &obddev->u.lov;
- int i, rc;
-
- if (!vallen || !val)
- return -EFAULT;
-
- obd_getref(obddev);
-
- if (KEY_IS(KEY_LOCK_TO_STRIPE)) {
- struct {
- char name[16];
- struct ldlm_lock *lock;
- } *data = key;
- struct ldlm_res_id *res_id = &data->lock->l_resource->lr_name;
- struct lov_oinfo *loi;
- __u32 *stripe = val;
-
- if (*vallen < sizeof(*stripe)) {
- rc = -EFAULT;
- goto out;
- }
- *vallen = sizeof(*stripe);
-
- /* XXX This is another one of those bits that will need to
- * change if we ever actually support nested LOVs. It uses
- * the lock's export to find out which stripe it is. */
- /* XXX - it's assumed all the locks for deleted OSTs have
- * been cancelled. Also, the export for deleted OSTs will
- * be NULL and won't match the lock's export. */
- for (i = 0; i < lsm->lsm_stripe_count; i++) {
- loi = lsm->lsm_oinfo[i];
- if (lov_oinfo_is_dummy(loi))
- continue;
-
- if (!lov->lov_tgts[loi->loi_ost_idx])
- continue;
- if (lov->lov_tgts[loi->loi_ost_idx]->ltd_exp ==
- data->lock->l_conn_export &&
- ostid_res_name_eq(&loi->loi_oi, res_id)) {
- *stripe = i;
- rc = 0;
- goto out;
- }
- }
- LDLM_ERROR(data->lock, "lock on inode without such object");
- dump_lsm(D_ERROR, lsm);
- rc = -ENXIO;
- goto out;
- } else if (KEY_IS(KEY_LAST_ID)) {
- struct obd_id_info *info = val;
- __u32 size = sizeof(u64);
- struct lov_tgt_desc *tgt;
-
- LASSERT(*vallen == sizeof(struct obd_id_info));
- tgt = lov->lov_tgts[info->idx];
-
- if (!tgt || !tgt->ltd_active) {
- rc = -ESRCH;
- goto out;
- }
-
- rc = obd_get_info(env, tgt->ltd_exp, keylen, key,
- &size, info->data, NULL);
- rc = 0;
- goto out;
- } else if (KEY_IS(KEY_LOVDESC)) {
- struct lov_desc *desc_ret = val;
- *desc_ret = lov->desc;
-
- rc = 0;
- goto out;
- } else if (KEY_IS(KEY_FIEMAP)) {
- rc = lov_fiemap(lov, keylen, key, vallen, val, lsm);
- goto out;
- } else if (KEY_IS(KEY_CONNECT_FLAG)) {
- struct lov_tgt_desc *tgt;
- __u64 ost_idx = *((__u64 *)val);
-
- LASSERT(*vallen == sizeof(__u64));
- LASSERT(ost_idx < lov->desc.ld_tgt_count);
- tgt = lov->lov_tgts[ost_idx];
-
- if (!tgt || !tgt->ltd_exp) {
- rc = -ESRCH;
- goto out;
- }
-
- *((__u64 *)val) = exp_connect_flags(tgt->ltd_exp);
- rc = 0;
- goto out;
- } else if (KEY_IS(KEY_TGT_COUNT)) {
- *((int *)val) = lov->desc.ld_tgt_count;
- rc = 0;
- goto out;
- }
-
- rc = -EINVAL;
-
-out:
- obd_putref(obddev);
- return rc;
-}
-
-static int lov_set_info_async(const struct lu_env *env, struct obd_export *exp,
- u32 keylen, void *key, u32 vallen,
- void *val, struct ptlrpc_request_set *set)
-{
- struct obd_device *obddev = class_exp2obd(exp);
- struct lov_obd *lov = &obddev->u.lov;
- u32 count;
- int i, rc = 0, err;
- struct lov_tgt_desc *tgt;
- unsigned incr, check_uuid,
- do_inactive, no_set;
- unsigned next_id = 0, mds_con = 0;
-
- incr = check_uuid = do_inactive = no_set = 0;
- if (set == NULL) {
- no_set = 1;
- set = ptlrpc_prep_set();
- if (!set)
- return -ENOMEM;
- }
-
- obd_getref(obddev);
- count = lov->desc.ld_tgt_count;
-
- if (KEY_IS(KEY_NEXT_ID)) {
- count = vallen / sizeof(struct obd_id_info);
- vallen = sizeof(u64);
- incr = sizeof(struct obd_id_info);
- do_inactive = 1;
- next_id = 1;
- } else if (KEY_IS(KEY_CHECKSUM)) {
- do_inactive = 1;
- } else if (KEY_IS(KEY_EVICT_BY_NID)) {
- /* use defaults: do_inactive = incr = 0; */
- } else if (KEY_IS(KEY_MDS_CONN)) {
- mds_con = 1;
- } else if (KEY_IS(KEY_CACHE_SET)) {
- LASSERT(lov->lov_cache == NULL);
- lov->lov_cache = val;
- do_inactive = 1;
- }
-
- for (i = 0; i < count; i++, val = (char *)val + incr) {
- if (next_id) {
- tgt = lov->lov_tgts[((struct obd_id_info *)val)->idx];
- } else {
- tgt = lov->lov_tgts[i];
- }
- /* OST was disconnected */
- if (!tgt || !tgt->ltd_exp)
- continue;
-
- /* OST is inactive and we don't want inactive OSCs */
- if (!tgt->ltd_active && !do_inactive)
- continue;
-
- if (mds_con) {
- struct mds_group_info *mgi;
-
- LASSERT(vallen == sizeof(*mgi));
- mgi = (struct mds_group_info *)val;
-
- /* Only want a specific OSC */
- if (mgi->uuid && !obd_uuid_equals(mgi->uuid,
- &tgt->ltd_uuid))
- continue;
-
- err = obd_set_info_async(env, tgt->ltd_exp,
- keylen, key, sizeof(int),
- &mgi->group, set);
- } else if (next_id) {
- err = obd_set_info_async(env, tgt->ltd_exp,
- keylen, key, vallen,
- ((struct obd_id_info *)val)->data, set);
- } else {
- /* Only want a specific OSC */
- if (check_uuid &&
- !obd_uuid_equals(val, &tgt->ltd_uuid))
- continue;
-
- err = obd_set_info_async(env, tgt->ltd_exp,
- keylen, key, vallen, val, set);
- }
-
- if (!rc)
- rc = err;
- }
-
- obd_putref(obddev);
- if (no_set) {
- err = ptlrpc_set_wait(set);
- if (!rc)
- rc = err;
- ptlrpc_set_destroy(set);
- }
- return rc;
-}
-
-void lov_stripe_lock(struct lov_stripe_md *md)
- __acquires(&md->lsm_lock)
-{
- LASSERT(md->lsm_lock_owner != current_pid());
- spin_lock(&md->lsm_lock);
- LASSERT(md->lsm_lock_owner == 0);
- md->lsm_lock_owner = current_pid();
-}
-EXPORT_SYMBOL(lov_stripe_lock);
-
-void lov_stripe_unlock(struct lov_stripe_md *md)
- __releases(&md->lsm_lock)
-{
- LASSERT(md->lsm_lock_owner == current_pid());
- md->lsm_lock_owner = 0;
- spin_unlock(&md->lsm_lock);
-}
-EXPORT_SYMBOL(lov_stripe_unlock);
-
-static int lov_quotactl(struct obd_device *obd, struct obd_export *exp,
- struct obd_quotactl *oqctl)
-{
- struct lov_obd *lov = &obd->u.lov;
- struct lov_tgt_desc *tgt;
- __u64 curspace = 0;
- __u64 bhardlimit = 0;
- int i, rc = 0;
-
- if (oqctl->qc_cmd != LUSTRE_Q_QUOTAON &&
- oqctl->qc_cmd != LUSTRE_Q_QUOTAOFF &&
- oqctl->qc_cmd != Q_GETOQUOTA &&
- oqctl->qc_cmd != Q_INITQUOTA &&
- oqctl->qc_cmd != LUSTRE_Q_SETQUOTA &&
- oqctl->qc_cmd != Q_FINVALIDATE) {
- CERROR("bad quota opc %x for lov obd", oqctl->qc_cmd);
- return -EFAULT;
- }
-
- /* for lov tgt */
- obd_getref(obd);
- for (i = 0; i < lov->desc.ld_tgt_count; i++) {
- int err;
-
- tgt = lov->lov_tgts[i];
-
- if (!tgt)
- continue;
-
- if (!tgt->ltd_active || tgt->ltd_reap) {
- if (oqctl->qc_cmd == Q_GETOQUOTA &&
- lov->lov_tgts[i]->ltd_activate) {
- rc = -EREMOTEIO;
- CERROR("ost %d is inactive\n", i);
- } else {
- CDEBUG(D_HA, "ost %d is inactive\n", i);
- }
- continue;
- }
-
- err = obd_quotactl(tgt->ltd_exp, oqctl);
- if (err) {
- if (tgt->ltd_active && !rc)
- rc = err;
- continue;
- }
-
- if (oqctl->qc_cmd == Q_GETOQUOTA) {
- curspace += oqctl->qc_dqblk.dqb_curspace;
- bhardlimit += oqctl->qc_dqblk.dqb_bhardlimit;
- }
- }
- obd_putref(obd);
-
- if (oqctl->qc_cmd == Q_GETOQUOTA) {
- oqctl->qc_dqblk.dqb_curspace = curspace;
- oqctl->qc_dqblk.dqb_bhardlimit = bhardlimit;
- }
- return rc;
-}
-
-static int lov_quotacheck(struct obd_device *obd, struct obd_export *exp,
- struct obd_quotactl *oqctl)
-{
- struct lov_obd *lov = &obd->u.lov;
- int i, rc = 0;
-
- obd_getref(obd);
-
- for (i = 0; i < lov->desc.ld_tgt_count; i++) {
- if (!lov->lov_tgts[i])
- continue;
-
- /* Skip quota check on the administratively disabled OSTs. */
- if (!lov->lov_tgts[i]->ltd_activate) {
- CWARN("lov idx %d was administratively disabled, skip quotacheck on it.\n",
- i);
- continue;
- }
-
- if (!lov->lov_tgts[i]->ltd_active) {
- CERROR("lov idx %d inactive\n", i);
- rc = -EIO;
- goto out;
- }
- }
-
- for (i = 0; i < lov->desc.ld_tgt_count; i++) {
- int err;
-
- if (!lov->lov_tgts[i] || !lov->lov_tgts[i]->ltd_activate)
- continue;
-
- err = obd_quotacheck(lov->lov_tgts[i]->ltd_exp, oqctl);
- if (err && !rc)
- rc = err;
- }
-
-out:
- obd_putref(obd);
-
- return rc;
-}
-
-static struct obd_ops lov_obd_ops = {
- .o_owner = THIS_MODULE,
- .o_setup = lov_setup,
- .o_precleanup = lov_precleanup,
- .o_cleanup = lov_cleanup,
- /*.o_process_config = lov_process_config,*/
- .o_connect = lov_connect,
- .o_disconnect = lov_disconnect,
- .o_statfs = lov_statfs,
- .o_statfs_async = lov_statfs_async,
- .o_packmd = lov_packmd,
- .o_unpackmd = lov_unpackmd,
- .o_create = lov_create,
- .o_destroy = lov_destroy,
- .o_getattr_async = lov_getattr_async,
- .o_setattr_async = lov_setattr_async,
- .o_adjust_kms = lov_adjust_kms,
- .o_find_cbdata = lov_find_cbdata,
- .o_iocontrol = lov_iocontrol,
- .o_get_info = lov_get_info,
- .o_set_info_async = lov_set_info_async,
- .o_notify = lov_notify,
- .o_pool_new = lov_pool_new,
- .o_pool_rem = lov_pool_remove,
- .o_pool_add = lov_pool_add,
- .o_pool_del = lov_pool_del,
- .o_getref = lov_getref,
- .o_putref = lov_putref,
- .o_quotactl = lov_quotactl,
- .o_quotacheck = lov_quotacheck,
-};
-
-struct kmem_cache *lov_oinfo_slab;
-
-static int __init lov_init(void)
-{
- struct lprocfs_static_vars lvars = { NULL };
- int rc;
-
- /* print an address of _any_ initialized kernel symbol from this
- * module, to allow debugging with gdb that doesn't support data
- * symbols from modules.*/
- CDEBUG(D_INFO, "Lustre LOV module (%p).\n", &lov_caches);
-
- rc = lu_kmem_init(lov_caches);
- if (rc)
- return rc;
-
- lov_oinfo_slab = kmem_cache_create("lov_oinfo",
- sizeof(struct lov_oinfo),
- 0, SLAB_HWCACHE_ALIGN, NULL);
- if (lov_oinfo_slab == NULL) {
- lu_kmem_fini(lov_caches);
- return -ENOMEM;
- }
- lprocfs_lov_init_vars(&lvars);
-
- rc = class_register_type(&lov_obd_ops, NULL,
- LUSTRE_LOV_NAME, &lov_device_type);
-
- if (rc) {
- kmem_cache_destroy(lov_oinfo_slab);
- lu_kmem_fini(lov_caches);
- }
-
- return rc;
-}
-
-static void /*__exit*/ lov_exit(void)
-{
- class_unregister_type(LUSTRE_LOV_NAME);
- kmem_cache_destroy(lov_oinfo_slab);
-
- lu_kmem_fini(lov_caches);
-}
-
-MODULE_AUTHOR("Sun Microsystems, Inc. <http://www.lustre.org/>");
-MODULE_DESCRIPTION("Lustre Logical Object Volume OBD driver");
-MODULE_LICENSE("GPL");
-MODULE_VERSION(LUSTRE_VERSION_STRING);
-
-module_init(lov_init);
-module_exit(lov_exit);
diff --git a/drivers/staging/lustre/lustre/lov/lov_object.c b/drivers/staging/lustre/lustre/lov/lov_object.c
deleted file mode 100644
index 4d7cd924a27e..000000000000
--- a/drivers/staging/lustre/lustre/lov/lov_object.c
+++ /dev/null
@@ -1,1002 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * Implementation of cl_object for LOV layer.
- *
- * Author: Nikita Danilov <nikita.danilov@sun.com>
- * Author: Jinshan Xiong <jinshan.xiong@whamcloud.com>
- */
-
-#define DEBUG_SUBSYSTEM S_LOV
-
-#include "lov_cl_internal.h"
-#include "../include/lclient.h"
-
-/** \addtogroup lov
- * @{
- */
-
-/*****************************************************************************
- *
- * Layout operations.
- *
- */
-
-struct lov_layout_operations {
- int (*llo_init)(const struct lu_env *env, struct lov_device *dev,
- struct lov_object *lov,
- const struct cl_object_conf *conf,
- union lov_layout_state *state);
- int (*llo_delete)(const struct lu_env *env, struct lov_object *lov,
- union lov_layout_state *state);
- void (*llo_fini)(const struct lu_env *env, struct lov_object *lov,
- union lov_layout_state *state);
- void (*llo_install)(const struct lu_env *env, struct lov_object *lov,
- union lov_layout_state *state);
- int (*llo_print)(const struct lu_env *env, void *cookie,
- lu_printer_t p, const struct lu_object *o);
- int (*llo_page_init)(const struct lu_env *env, struct cl_object *obj,
- struct cl_page *page, struct page *vmpage);
- int (*llo_lock_init)(const struct lu_env *env,
- struct cl_object *obj, struct cl_lock *lock,
- const struct cl_io *io);
- int (*llo_io_init)(const struct lu_env *env,
- struct cl_object *obj, struct cl_io *io);
- int (*llo_getattr)(const struct lu_env *env, struct cl_object *obj,
- struct cl_attr *attr);
-};
-
-static int lov_layout_wait(const struct lu_env *env, struct lov_object *lov);
-
-/*****************************************************************************
- *
- * Lov object layout operations.
- *
- */
-
-static void lov_install_empty(const struct lu_env *env,
- struct lov_object *lov,
- union lov_layout_state *state)
-{
- /*
- * File without objects.
- */
-}
-
-static int lov_init_empty(const struct lu_env *env,
- struct lov_device *dev, struct lov_object *lov,
- const struct cl_object_conf *conf,
- union lov_layout_state *state)
-{
- return 0;
-}
-
-static void lov_install_raid0(const struct lu_env *env,
- struct lov_object *lov,
- union lov_layout_state *state)
-{
-}
-
-static struct cl_object *lov_sub_find(const struct lu_env *env,
- struct cl_device *dev,
- const struct lu_fid *fid,
- const struct cl_object_conf *conf)
-{
- struct lu_object *o;
-
- o = lu_object_find_at(env, cl2lu_dev(dev), fid, &conf->coc_lu);
- LASSERT(ergo(!IS_ERR(o), o->lo_dev->ld_type == &lovsub_device_type));
- return lu2cl(o);
-}
-
-static int lov_init_sub(const struct lu_env *env, struct lov_object *lov,
- struct cl_object *stripe, struct lov_layout_raid0 *r0,
- int idx)
-{
- struct cl_object_header *hdr;
- struct cl_object_header *subhdr;
- struct cl_object_header *parent;
- struct lov_oinfo *oinfo;
- int result;
-
- if (OBD_FAIL_CHECK(OBD_FAIL_LOV_INIT)) {
- /* For sanity:test_206.
- * Do not leave the object in cache to avoid accessing
- * freed memory. This is because osc_object is referring to
- * lov_oinfo of lsm_stripe_data which will be freed due to
- * this failure. */
- cl_object_kill(env, stripe);
- cl_object_put(env, stripe);
- return -EIO;
- }
-
- hdr = cl_object_header(lov2cl(lov));
- subhdr = cl_object_header(stripe);
-
- oinfo = lov->lo_lsm->lsm_oinfo[idx];
- CDEBUG(D_INODE, DFID"@%p[%d] -> "DFID"@%p: ostid: "DOSTID
- " idx: %d gen: %d\n",
- PFID(&subhdr->coh_lu.loh_fid), subhdr, idx,
- PFID(&hdr->coh_lu.loh_fid), hdr, POSTID(&oinfo->loi_oi),
- oinfo->loi_ost_idx, oinfo->loi_ost_gen);
-
- /* reuse ->coh_attr_guard to protect coh_parent change */
- spin_lock(&subhdr->coh_attr_guard);
- parent = subhdr->coh_parent;
- if (parent == NULL) {
- subhdr->coh_parent = hdr;
- spin_unlock(&subhdr->coh_attr_guard);
- subhdr->coh_nesting = hdr->coh_nesting + 1;
- lu_object_ref_add(&stripe->co_lu, "lov-parent", lov);
- r0->lo_sub[idx] = cl2lovsub(stripe);
- r0->lo_sub[idx]->lso_super = lov;
- r0->lo_sub[idx]->lso_index = idx;
- result = 0;
- } else {
- struct lu_object *old_obj;
- struct lov_object *old_lov;
- unsigned int mask = D_INODE;
-
- spin_unlock(&subhdr->coh_attr_guard);
- old_obj = lu_object_locate(&parent->coh_lu, &lov_device_type);
- LASSERT(old_obj != NULL);
- old_lov = cl2lov(lu2cl(old_obj));
- if (old_lov->lo_layout_invalid) {
- /* the object's layout has already changed but isn't
- * refreshed */
- lu_object_unhash(env, &stripe->co_lu);
- result = -EAGAIN;
- } else {
- mask = D_ERROR;
- result = -EIO;
- }
-
- LU_OBJECT_DEBUG(mask, env, &stripe->co_lu,
- "stripe %d is already owned.\n", idx);
- LU_OBJECT_DEBUG(mask, env, old_obj, "owned.\n");
- LU_OBJECT_HEADER(mask, env, lov2lu(lov), "try to own.\n");
- cl_object_put(env, stripe);
- }
- return result;
-}
-
-static int lov_init_raid0(const struct lu_env *env,
- struct lov_device *dev, struct lov_object *lov,
- const struct cl_object_conf *conf,
- union lov_layout_state *state)
-{
- int result;
- int i;
-
- struct cl_object *stripe;
- struct lov_thread_info *lti = lov_env_info(env);
- struct cl_object_conf *subconf = &lti->lti_stripe_conf;
- struct lov_stripe_md *lsm = conf->u.coc_md->lsm;
- struct lu_fid *ofid = &lti->lti_fid;
- struct lov_layout_raid0 *r0 = &state->raid0;
-
- if (lsm->lsm_magic != LOV_MAGIC_V1 && lsm->lsm_magic != LOV_MAGIC_V3) {
- dump_lsm(D_ERROR, lsm);
- LASSERTF(0, "magic mismatch, expected %d/%d, actual %d.\n",
- LOV_MAGIC_V1, LOV_MAGIC_V3, lsm->lsm_magic);
- }
-
- LASSERT(lov->lo_lsm == NULL);
- lov->lo_lsm = lsm_addref(lsm);
- r0->lo_nr = lsm->lsm_stripe_count;
- LASSERT(r0->lo_nr <= lov_targets_nr(dev));
-
- r0->lo_sub = libcfs_kvzalloc(r0->lo_nr * sizeof(r0->lo_sub[0]),
- GFP_NOFS);
- if (r0->lo_sub != NULL) {
- result = 0;
- subconf->coc_inode = conf->coc_inode;
- spin_lock_init(&r0->lo_sub_lock);
- /*
- * Create stripe cl_objects.
- */
- for (i = 0; i < r0->lo_nr && result == 0; ++i) {
- struct cl_device *subdev;
- struct lov_oinfo *oinfo = lsm->lsm_oinfo[i];
- int ost_idx = oinfo->loi_ost_idx;
-
- if (lov_oinfo_is_dummy(oinfo))
- continue;
-
- result = ostid_to_fid(ofid, &oinfo->loi_oi,
- oinfo->loi_ost_idx);
- if (result != 0)
- goto out;
-
- subdev = lovsub2cl_dev(dev->ld_target[ost_idx]);
- subconf->u.coc_oinfo = oinfo;
- LASSERTF(subdev != NULL, "not init ost %d\n", ost_idx);
- /* In the function below, .hs_keycmp resolves to
- * lu_obj_hop_keycmp() */
- /* coverity[overrun-buffer-val] */
- stripe = lov_sub_find(env, subdev, ofid, subconf);
- if (!IS_ERR(stripe)) {
- result = lov_init_sub(env, lov, stripe, r0, i);
- if (result == -EAGAIN) { /* try again */
- --i;
- result = 0;
- }
- } else {
- result = PTR_ERR(stripe);
- }
- }
- } else
- result = -ENOMEM;
-out:
- return result;
-}
-
-static int lov_init_released(const struct lu_env *env,
- struct lov_device *dev, struct lov_object *lov,
- const struct cl_object_conf *conf,
- union lov_layout_state *state)
-{
- struct lov_stripe_md *lsm = conf->u.coc_md->lsm;
-
- LASSERT(lsm != NULL);
- LASSERT(lsm_is_released(lsm));
- LASSERT(lov->lo_lsm == NULL);
-
- lov->lo_lsm = lsm_addref(lsm);
- return 0;
-}
-
-static int lov_delete_empty(const struct lu_env *env, struct lov_object *lov,
- union lov_layout_state *state)
-{
- LASSERT(lov->lo_type == LLT_EMPTY || lov->lo_type == LLT_RELEASED);
-
- lov_layout_wait(env, lov);
-
- cl_object_prune(env, &lov->lo_cl);
- return 0;
-}
-
-static void lov_subobject_kill(const struct lu_env *env, struct lov_object *lov,
- struct lovsub_object *los, int idx)
-{
- struct cl_object *sub;
- struct lov_layout_raid0 *r0;
- struct lu_site *site;
- struct lu_site_bkt_data *bkt;
- wait_queue_t *waiter;
-
- r0 = &lov->u.raid0;
- LASSERT(r0->lo_sub[idx] == los);
-
- sub = lovsub2cl(los);
- site = sub->co_lu.lo_dev->ld_site;
- bkt = lu_site_bkt_from_fid(site, &sub->co_lu.lo_header->loh_fid);
-
- cl_object_kill(env, sub);
- /* release a reference to the sub-object and ... */
- lu_object_ref_del(&sub->co_lu, "lov-parent", lov);
- cl_object_put(env, sub);
-
- /* ... wait until it is actually destroyed---sub-object clears its
- * ->lo_sub[] slot in lovsub_object_fini() */
- if (r0->lo_sub[idx] == los) {
- waiter = &lov_env_info(env)->lti_waiter;
- init_waitqueue_entry(waiter, current);
- add_wait_queue(&bkt->lsb_marche_funebre, waiter);
- set_current_state(TASK_UNINTERRUPTIBLE);
- while (1) {
- /* this wait-queue is signaled at the end of
- * lu_object_free(). */
- set_current_state(TASK_UNINTERRUPTIBLE);
- spin_lock(&r0->lo_sub_lock);
- if (r0->lo_sub[idx] == los) {
- spin_unlock(&r0->lo_sub_lock);
- schedule();
- } else {
- spin_unlock(&r0->lo_sub_lock);
- set_current_state(TASK_RUNNING);
- break;
- }
- }
- remove_wait_queue(&bkt->lsb_marche_funebre, waiter);
- }
- LASSERT(r0->lo_sub[idx] == NULL);
-}
-
-static int lov_delete_raid0(const struct lu_env *env, struct lov_object *lov,
- union lov_layout_state *state)
-{
- struct lov_layout_raid0 *r0 = &state->raid0;
- struct lov_stripe_md *lsm = lov->lo_lsm;
- int i;
-
- dump_lsm(D_INODE, lsm);
-
- lov_layout_wait(env, lov);
- if (r0->lo_sub != NULL) {
- for (i = 0; i < r0->lo_nr; ++i) {
- struct lovsub_object *los = r0->lo_sub[i];
-
- if (los != NULL) {
- cl_locks_prune(env, &los->lso_cl, 1);
- /*
- * If top-level object is to be evicted from
- * the cache, so are its sub-objects.
- */
- lov_subobject_kill(env, lov, los, i);
- }
- }
- }
- cl_object_prune(env, &lov->lo_cl);
- return 0;
-}
-
-static void lov_fini_empty(const struct lu_env *env, struct lov_object *lov,
- union lov_layout_state *state)
-{
- LASSERT(lov->lo_type == LLT_EMPTY || lov->lo_type == LLT_RELEASED);
-}
-
-static void lov_fini_raid0(const struct lu_env *env, struct lov_object *lov,
- union lov_layout_state *state)
-{
- struct lov_layout_raid0 *r0 = &state->raid0;
-
- if (r0->lo_sub != NULL) {
- kvfree(r0->lo_sub);
- r0->lo_sub = NULL;
- }
-
- dump_lsm(D_INODE, lov->lo_lsm);
- lov_free_memmd(&lov->lo_lsm);
-}
-
-static void lov_fini_released(const struct lu_env *env, struct lov_object *lov,
- union lov_layout_state *state)
-{
- dump_lsm(D_INODE, lov->lo_lsm);
- lov_free_memmd(&lov->lo_lsm);
-}
-
-static int lov_print_empty(const struct lu_env *env, void *cookie,
- lu_printer_t p, const struct lu_object *o)
-{
- (*p)(env, cookie, "empty %d\n", lu2lov(o)->lo_layout_invalid);
- return 0;
-}
-
-static int lov_print_raid0(const struct lu_env *env, void *cookie,
- lu_printer_t p, const struct lu_object *o)
-{
- struct lov_object *lov = lu2lov(o);
- struct lov_layout_raid0 *r0 = lov_r0(lov);
- struct lov_stripe_md *lsm = lov->lo_lsm;
- int i;
-
- (*p)(env, cookie, "stripes: %d, %s, lsm{%p 0x%08X %d %u %u}:\n",
- r0->lo_nr, lov->lo_layout_invalid ? "invalid" : "valid", lsm,
- lsm->lsm_magic, atomic_read(&lsm->lsm_refc),
- lsm->lsm_stripe_count, lsm->lsm_layout_gen);
- for (i = 0; i < r0->lo_nr; ++i) {
- struct lu_object *sub;
-
- if (r0->lo_sub[i] != NULL) {
- sub = lovsub2lu(r0->lo_sub[i]);
- lu_object_print(env, cookie, p, sub);
- } else {
- (*p)(env, cookie, "sub %d absent\n", i);
- }
- }
- return 0;
-}
-
-static int lov_print_released(const struct lu_env *env, void *cookie,
- lu_printer_t p, const struct lu_object *o)
-{
- struct lov_object *lov = lu2lov(o);
- struct lov_stripe_md *lsm = lov->lo_lsm;
-
- (*p)(env, cookie,
- "released: %s, lsm{%p 0x%08X %d %u %u}:\n",
- lov->lo_layout_invalid ? "invalid" : "valid", lsm,
- lsm->lsm_magic, atomic_read(&lsm->lsm_refc),
- lsm->lsm_stripe_count, lsm->lsm_layout_gen);
- return 0;
-}
-
-/**
- * Implements cl_object_operations::coo_attr_get() method for an object
- * without stripes (LLT_EMPTY layout type).
- *
- * The only attributes this layer is authoritative in this case is
- * cl_attr::cat_blocks---it's 0.
- */
-static int lov_attr_get_empty(const struct lu_env *env, struct cl_object *obj,
- struct cl_attr *attr)
-{
- attr->cat_blocks = 0;
- return 0;
-}
-
-static int lov_attr_get_raid0(const struct lu_env *env, struct cl_object *obj,
- struct cl_attr *attr)
-{
- struct lov_object *lov = cl2lov(obj);
- struct lov_layout_raid0 *r0 = lov_r0(lov);
- struct cl_attr *lov_attr = &r0->lo_attr;
- int result = 0;
-
- /* this is called w/o holding type guard mutex, so it must be inside
- * an on going IO otherwise lsm may be replaced.
- * LU-2117: it turns out there exists one exception. For mmaped files,
- * the lock of those files may be requested in the other file's IO
- * context, and this function is called in ccc_lock_state(), it will
- * hit this assertion.
- * Anyway, it's still okay to call attr_get w/o type guard as layout
- * can't go if locks exist. */
- /* LASSERT(atomic_read(&lsm->lsm_refc) > 1); */
-
- if (!r0->lo_attr_valid) {
- struct lov_stripe_md *lsm = lov->lo_lsm;
- struct ost_lvb *lvb = &lov_env_info(env)->lti_lvb;
- __u64 kms = 0;
-
- memset(lvb, 0, sizeof(*lvb));
- /* XXX: timestamps can be negative by sanity:test_39m,
- * how can it be? */
- lvb->lvb_atime = LLONG_MIN;
- lvb->lvb_ctime = LLONG_MIN;
- lvb->lvb_mtime = LLONG_MIN;
-
- /*
- * XXX that should be replaced with a loop over sub-objects,
- * doing cl_object_attr_get() on them. But for now, let's
- * reuse old lov code.
- */
-
- /*
- * XXX take lsm spin-lock to keep lov_merge_lvb_kms()
- * happy. It's not needed, because new code uses
- * ->coh_attr_guard spin-lock to protect consistency of
- * sub-object attributes.
- */
- lov_stripe_lock(lsm);
- result = lov_merge_lvb_kms(lsm, lvb, &kms);
- lov_stripe_unlock(lsm);
- if (result == 0) {
- cl_lvb2attr(lov_attr, lvb);
- lov_attr->cat_kms = kms;
- r0->lo_attr_valid = 1;
- }
- }
- if (result == 0) { /* merge results */
- attr->cat_blocks = lov_attr->cat_blocks;
- attr->cat_size = lov_attr->cat_size;
- attr->cat_kms = lov_attr->cat_kms;
- if (attr->cat_atime < lov_attr->cat_atime)
- attr->cat_atime = lov_attr->cat_atime;
- if (attr->cat_ctime < lov_attr->cat_ctime)
- attr->cat_ctime = lov_attr->cat_ctime;
- if (attr->cat_mtime < lov_attr->cat_mtime)
- attr->cat_mtime = lov_attr->cat_mtime;
- }
- return result;
-}
-
-static const struct lov_layout_operations lov_dispatch[] = {
- [LLT_EMPTY] = {
- .llo_init = lov_init_empty,
- .llo_delete = lov_delete_empty,
- .llo_fini = lov_fini_empty,
- .llo_install = lov_install_empty,
- .llo_print = lov_print_empty,
- .llo_page_init = lov_page_init_empty,
- .llo_lock_init = lov_lock_init_empty,
- .llo_io_init = lov_io_init_empty,
- .llo_getattr = lov_attr_get_empty
- },
- [LLT_RAID0] = {
- .llo_init = lov_init_raid0,
- .llo_delete = lov_delete_raid0,
- .llo_fini = lov_fini_raid0,
- .llo_install = lov_install_raid0,
- .llo_print = lov_print_raid0,
- .llo_page_init = lov_page_init_raid0,
- .llo_lock_init = lov_lock_init_raid0,
- .llo_io_init = lov_io_init_raid0,
- .llo_getattr = lov_attr_get_raid0
- },
- [LLT_RELEASED] = {
- .llo_init = lov_init_released,
- .llo_delete = lov_delete_empty,
- .llo_fini = lov_fini_released,
- .llo_install = lov_install_empty,
- .llo_print = lov_print_released,
- .llo_page_init = lov_page_init_empty,
- .llo_lock_init = lov_lock_init_empty,
- .llo_io_init = lov_io_init_released,
- .llo_getattr = lov_attr_get_empty
- }
-};
-
-/**
- * Performs a double-dispatch based on the layout type of an object.
- */
-#define LOV_2DISPATCH_NOLOCK(obj, op, ...) \
-({ \
- struct lov_object *__obj = (obj); \
- enum lov_layout_type __llt; \
- \
- __llt = __obj->lo_type; \
- LASSERT(0 <= __llt && __llt < ARRAY_SIZE(lov_dispatch)); \
- lov_dispatch[__llt].op(__VA_ARGS__); \
-})
-
-/**
- * Return lov_layout_type associated with a given lsm
- */
-static enum lov_layout_type lov_type(struct lov_stripe_md *lsm)
-{
- if (lsm == NULL)
- return LLT_EMPTY;
- if (lsm_is_released(lsm))
- return LLT_RELEASED;
- return LLT_RAID0;
-}
-
-static inline void lov_conf_freeze(struct lov_object *lov)
-{
- if (lov->lo_owner != current)
- down_read(&lov->lo_type_guard);
-}
-
-static inline void lov_conf_thaw(struct lov_object *lov)
-{
- if (lov->lo_owner != current)
- up_read(&lov->lo_type_guard);
-}
-
-#define LOV_2DISPATCH_MAYLOCK(obj, op, lock, ...) \
-({ \
- struct lov_object *__obj = (obj); \
- int __lock = !!(lock); \
- typeof(lov_dispatch[0].op(__VA_ARGS__)) __result; \
- \
- if (__lock) \
- lov_conf_freeze(__obj); \
- __result = LOV_2DISPATCH_NOLOCK(obj, op, __VA_ARGS__); \
- if (__lock) \
- lov_conf_thaw(__obj); \
- __result; \
-})
-
-/**
- * Performs a locked double-dispatch based on the layout type of an object.
- */
-#define LOV_2DISPATCH(obj, op, ...) \
- LOV_2DISPATCH_MAYLOCK(obj, op, 1, __VA_ARGS__)
-
-#define LOV_2DISPATCH_VOID(obj, op, ...) \
-do { \
- struct lov_object *__obj = (obj); \
- enum lov_layout_type __llt; \
- \
- lov_conf_freeze(__obj); \
- __llt = __obj->lo_type; \
- LASSERT(0 <= __llt && __llt < ARRAY_SIZE(lov_dispatch)); \
- lov_dispatch[__llt].op(__VA_ARGS__); \
- lov_conf_thaw(__obj); \
-} while (0)
-
-static void lov_conf_lock(struct lov_object *lov)
-{
- LASSERT(lov->lo_owner != current);
- down_write(&lov->lo_type_guard);
- LASSERT(lov->lo_owner == NULL);
- lov->lo_owner = current;
-}
-
-static void lov_conf_unlock(struct lov_object *lov)
-{
- lov->lo_owner = NULL;
- up_write(&lov->lo_type_guard);
-}
-
-static int lov_layout_wait(const struct lu_env *env, struct lov_object *lov)
-{
- struct l_wait_info lwi = { 0 };
-
- while (atomic_read(&lov->lo_active_ios) > 0) {
- CDEBUG(D_INODE, "file:"DFID" wait for active IO, now: %d.\n",
- PFID(lu_object_fid(lov2lu(lov))),
- atomic_read(&lov->lo_active_ios));
-
- l_wait_event(lov->lo_waitq,
- atomic_read(&lov->lo_active_ios) == 0, &lwi);
- }
- return 0;
-}
-
-static int lov_layout_change(const struct lu_env *unused,
- struct lov_object *lov,
- const struct cl_object_conf *conf)
-{
- int result;
- enum lov_layout_type llt = LLT_EMPTY;
- union lov_layout_state *state = &lov->u;
- const struct lov_layout_operations *old_ops;
- const struct lov_layout_operations *new_ops;
-
- struct cl_object_header *hdr = cl_object_header(&lov->lo_cl);
- void *cookie;
- struct lu_env *env;
- int refcheck;
-
- LASSERT(0 <= lov->lo_type && lov->lo_type < ARRAY_SIZE(lov_dispatch));
-
- if (conf->u.coc_md != NULL)
- llt = lov_type(conf->u.coc_md->lsm);
- LASSERT(0 <= llt && llt < ARRAY_SIZE(lov_dispatch));
-
- cookie = cl_env_reenter();
- env = cl_env_get(&refcheck);
- if (IS_ERR(env)) {
- cl_env_reexit(cookie);
- return PTR_ERR(env);
- }
-
- CDEBUG(D_INODE, DFID" from %s to %s\n",
- PFID(lu_object_fid(lov2lu(lov))),
- llt2str(lov->lo_type), llt2str(llt));
-
- old_ops = &lov_dispatch[lov->lo_type];
- new_ops = &lov_dispatch[llt];
-
- result = old_ops->llo_delete(env, lov, &lov->u);
- if (result == 0) {
- old_ops->llo_fini(env, lov, &lov->u);
-
- LASSERT(atomic_read(&lov->lo_active_ios) == 0);
- LASSERT(hdr->coh_tree.rnode == NULL);
- LASSERT(hdr->coh_pages == 0);
-
- lov->lo_type = LLT_EMPTY;
- result = new_ops->llo_init(env,
- lu2lov_dev(lov->lo_cl.co_lu.lo_dev),
- lov, conf, state);
- if (result == 0) {
- new_ops->llo_install(env, lov, state);
- lov->lo_type = llt;
- } else {
- new_ops->llo_delete(env, lov, state);
- new_ops->llo_fini(env, lov, state);
- /* this file becomes an EMPTY file. */
- }
- }
-
- cl_env_put(env, &refcheck);
- cl_env_reexit(cookie);
- return result;
-}
-
-/*****************************************************************************
- *
- * Lov object operations.
- *
- */
-int lov_object_init(const struct lu_env *env, struct lu_object *obj,
- const struct lu_object_conf *conf)
-{
- struct lov_device *dev = lu2lov_dev(obj->lo_dev);
- struct lov_object *lov = lu2lov(obj);
- const struct cl_object_conf *cconf = lu2cl_conf(conf);
- union lov_layout_state *set = &lov->u;
- const struct lov_layout_operations *ops;
- int result;
-
- init_rwsem(&lov->lo_type_guard);
- atomic_set(&lov->lo_active_ios, 0);
- init_waitqueue_head(&lov->lo_waitq);
-
- cl_object_page_init(lu2cl(obj), sizeof(struct lov_page));
-
- /* no locking is necessary, as object is being created */
- lov->lo_type = lov_type(cconf->u.coc_md->lsm);
- ops = &lov_dispatch[lov->lo_type];
- result = ops->llo_init(env, dev, lov, cconf, set);
- if (result == 0)
- ops->llo_install(env, lov, set);
- return result;
-}
-
-static int lov_conf_set(const struct lu_env *env, struct cl_object *obj,
- const struct cl_object_conf *conf)
-{
- struct lov_stripe_md *lsm = NULL;
- struct lov_object *lov = cl2lov(obj);
- int result = 0;
-
- lov_conf_lock(lov);
- if (conf->coc_opc == OBJECT_CONF_INVALIDATE) {
- lov->lo_layout_invalid = true;
- result = 0;
- goto out;
- }
-
- if (conf->coc_opc == OBJECT_CONF_WAIT) {
- if (lov->lo_layout_invalid &&
- atomic_read(&lov->lo_active_ios) > 0) {
- lov_conf_unlock(lov);
- result = lov_layout_wait(env, lov);
- lov_conf_lock(lov);
- }
- goto out;
- }
-
- LASSERT(conf->coc_opc == OBJECT_CONF_SET);
-
- if (conf->u.coc_md != NULL)
- lsm = conf->u.coc_md->lsm;
- if ((lsm == NULL && lov->lo_lsm == NULL) ||
- ((lsm != NULL && lov->lo_lsm != NULL) &&
- (lov->lo_lsm->lsm_layout_gen == lsm->lsm_layout_gen) &&
- (lov->lo_lsm->lsm_pattern == lsm->lsm_pattern))) {
- /* same version of layout */
- lov->lo_layout_invalid = false;
- result = 0;
- goto out;
- }
-
- /* will change layout - check if there still exists active IO. */
- if (atomic_read(&lov->lo_active_ios) > 0) {
- lov->lo_layout_invalid = true;
- result = -EBUSY;
- goto out;
- }
-
- lov->lo_layout_invalid = lov_layout_change(env, lov, conf);
-
-out:
- lov_conf_unlock(lov);
- CDEBUG(D_INODE, DFID" lo_layout_invalid=%d\n",
- PFID(lu_object_fid(lov2lu(lov))), lov->lo_layout_invalid);
- return result;
-}
-
-static void lov_object_delete(const struct lu_env *env, struct lu_object *obj)
-{
- struct lov_object *lov = lu2lov(obj);
-
- LOV_2DISPATCH_VOID(lov, llo_delete, env, lov, &lov->u);
-}
-
-static void lov_object_free(const struct lu_env *env, struct lu_object *obj)
-{
- struct lov_object *lov = lu2lov(obj);
-
- LOV_2DISPATCH_VOID(lov, llo_fini, env, lov, &lov->u);
- lu_object_fini(obj);
- OBD_SLAB_FREE_PTR(lov, lov_object_kmem);
-}
-
-static int lov_object_print(const struct lu_env *env, void *cookie,
- lu_printer_t p, const struct lu_object *o)
-{
- return LOV_2DISPATCH_NOLOCK(lu2lov(o), llo_print, env, cookie, p, o);
-}
-
-int lov_page_init(const struct lu_env *env, struct cl_object *obj,
- struct cl_page *page, struct page *vmpage)
-{
- return LOV_2DISPATCH_NOLOCK(cl2lov(obj),
- llo_page_init, env, obj, page, vmpage);
-}
-
-/**
- * Implements cl_object_operations::clo_io_init() method for lov
- * layer. Dispatches to the appropriate layout io initialization method.
- */
-int lov_io_init(const struct lu_env *env, struct cl_object *obj,
- struct cl_io *io)
-{
- CL_IO_SLICE_CLEAN(lov_env_io(env), lis_cl);
- return LOV_2DISPATCH_MAYLOCK(cl2lov(obj), llo_io_init,
- !io->ci_ignore_layout, env, obj, io);
-}
-
-/**
- * An implementation of cl_object_operations::clo_attr_get() method for lov
- * layer. For raid0 layout this collects and merges attributes of all
- * sub-objects.
- */
-static int lov_attr_get(const struct lu_env *env, struct cl_object *obj,
- struct cl_attr *attr)
-{
- /* do not take lock, as this function is called under a
- * spin-lock. Layout is protected from changing by ongoing IO. */
- return LOV_2DISPATCH_NOLOCK(cl2lov(obj), llo_getattr, env, obj, attr);
-}
-
-static int lov_attr_set(const struct lu_env *env, struct cl_object *obj,
- const struct cl_attr *attr, unsigned valid)
-{
- /*
- * No dispatch is required here, as no layout implements this.
- */
- return 0;
-}
-
-int lov_lock_init(const struct lu_env *env, struct cl_object *obj,
- struct cl_lock *lock, const struct cl_io *io)
-{
- /* No need to lock because we've taken one refcount of layout. */
- return LOV_2DISPATCH_NOLOCK(cl2lov(obj), llo_lock_init, env, obj, lock,
- io);
-}
-
-static const struct cl_object_operations lov_ops = {
- .coo_page_init = lov_page_init,
- .coo_lock_init = lov_lock_init,
- .coo_io_init = lov_io_init,
- .coo_attr_get = lov_attr_get,
- .coo_attr_set = lov_attr_set,
- .coo_conf_set = lov_conf_set
-};
-
-static const struct lu_object_operations lov_lu_obj_ops = {
- .loo_object_init = lov_object_init,
- .loo_object_delete = lov_object_delete,
- .loo_object_release = NULL,
- .loo_object_free = lov_object_free,
- .loo_object_print = lov_object_print,
- .loo_object_invariant = NULL
-};
-
-struct lu_object *lov_object_alloc(const struct lu_env *env,
- const struct lu_object_header *unused,
- struct lu_device *dev)
-{
- struct lov_object *lov;
- struct lu_object *obj;
-
- OBD_SLAB_ALLOC_PTR_GFP(lov, lov_object_kmem, GFP_NOFS);
- if (lov != NULL) {
- obj = lov2lu(lov);
- lu_object_init(obj, NULL, dev);
- lov->lo_cl.co_ops = &lov_ops;
- lov->lo_type = -1; /* invalid, to catch uninitialized type */
- /*
- * object io operation vector (cl_object::co_iop) is installed
- * later in lov_object_init(), as different vectors are used
- * for object with different layouts.
- */
- obj->lo_ops = &lov_lu_obj_ops;
- } else
- obj = NULL;
- return obj;
-}
-
-struct lov_stripe_md *lov_lsm_addref(struct lov_object *lov)
-{
- struct lov_stripe_md *lsm = NULL;
-
- lov_conf_freeze(lov);
- if (lov->lo_lsm != NULL) {
- lsm = lsm_addref(lov->lo_lsm);
- CDEBUG(D_INODE, "lsm %p addref %d/%d by %p.\n",
- lsm, atomic_read(&lsm->lsm_refc),
- lov->lo_layout_invalid, current);
- }
- lov_conf_thaw(lov);
- return lsm;
-}
-
-void lov_lsm_decref(struct lov_object *lov, struct lov_stripe_md *lsm)
-{
- if (lsm == NULL)
- return;
-
- CDEBUG(D_INODE, "lsm %p decref %d by %p.\n",
- lsm, atomic_read(&lsm->lsm_refc), current);
-
- lov_free_memmd(&lsm);
-}
-
-struct lov_stripe_md *lov_lsm_get(struct cl_object *clobj)
-{
- struct lu_object *luobj;
- struct lov_stripe_md *lsm = NULL;
-
- if (clobj == NULL)
- return NULL;
-
- luobj = lu_object_locate(&cl_object_header(clobj)->coh_lu,
- &lov_device_type);
- if (luobj != NULL)
- lsm = lov_lsm_addref(lu2lov(luobj));
- return lsm;
-}
-EXPORT_SYMBOL(lov_lsm_get);
-
-void lov_lsm_put(struct cl_object *unused, struct lov_stripe_md *lsm)
-{
- if (lsm != NULL)
- lov_free_memmd(&lsm);
-}
-EXPORT_SYMBOL(lov_lsm_put);
-
-int lov_read_and_clear_async_rc(struct cl_object *clob)
-{
- struct lu_object *luobj;
- int rc = 0;
-
- luobj = lu_object_locate(&cl_object_header(clob)->coh_lu,
- &lov_device_type);
- if (luobj != NULL) {
- struct lov_object *lov = lu2lov(luobj);
-
- lov_conf_freeze(lov);
- switch (lov->lo_type) {
- case LLT_RAID0: {
- struct lov_stripe_md *lsm;
- int i;
-
- lsm = lov->lo_lsm;
- LASSERT(lsm != NULL);
- for (i = 0; i < lsm->lsm_stripe_count; i++) {
- struct lov_oinfo *loi = lsm->lsm_oinfo[i];
-
- if (lov_oinfo_is_dummy(loi))
- continue;
-
- if (loi->loi_ar.ar_rc && !rc)
- rc = loi->loi_ar.ar_rc;
- loi->loi_ar.ar_rc = 0;
- }
- }
- case LLT_RELEASED:
- case LLT_EMPTY:
- break;
- default:
- LBUG();
- }
- lov_conf_thaw(lov);
- }
- return rc;
-}
-EXPORT_SYMBOL(lov_read_and_clear_async_rc);
-
-/** @} lov */
diff --git a/drivers/staging/lustre/lustre/lov/lov_offset.c b/drivers/staging/lustre/lustre/lov/lov_offset.c
deleted file mode 100644
index 9c8c77c05a8a..000000000000
--- a/drivers/staging/lustre/lustre/lov/lov_offset.c
+++ /dev/null
@@ -1,264 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- */
-
-#define DEBUG_SUBSYSTEM S_LOV
-
-#include "../../include/linux/libcfs/libcfs.h"
-
-#include "../include/obd_class.h"
-
-#include "lov_internal.h"
-
-/* compute object size given "stripeno" and the ost size */
-u64 lov_stripe_size(struct lov_stripe_md *lsm, u64 ost_size,
- int stripeno)
-{
- unsigned long ssize = lsm->lsm_stripe_size;
- unsigned long stripe_size;
- u64 swidth;
- u64 lov_size;
- int magic = lsm->lsm_magic;
-
- if (ost_size == 0)
- return 0;
-
- LASSERT(lsm_op_find(magic) != NULL);
- lsm_op_find(magic)->lsm_stripe_by_index(lsm, &stripeno, NULL, &swidth);
-
- /* lov_do_div64(a, b) returns a % b, and a = a / b */
- stripe_size = lov_do_div64(ost_size, ssize);
- if (stripe_size)
- lov_size = ost_size * swidth + stripeno * ssize + stripe_size;
- else
- lov_size = (ost_size - 1) * swidth + (stripeno + 1) * ssize;
-
- return lov_size;
-}
-
-/* we have an offset in file backed by an lov and want to find out where
- * that offset lands in our given stripe of the file. for the easy
- * case where the offset is within the stripe, we just have to scale the
- * offset down to make it relative to the stripe instead of the lov.
- *
- * the harder case is what to do when the offset doesn't intersect the
- * stripe. callers will want start offsets clamped ahead to the start
- * of the nearest stripe in the file. end offsets similarly clamped to the
- * nearest ending byte of a stripe in the file:
- *
- * all this function does is move offsets to the nearest region of the
- * stripe, and it does its work "mod" the full length of all the stripes.
- * consider a file with 3 stripes:
- *
- * S E
- * ---------------------------------------------------------------------
- * | 0 | 1 | 2 | 0 | 1 | 2 |
- * ---------------------------------------------------------------------
- *
- * to find stripe 1's offsets for S and E, it divides by the full stripe
- * width and does its math in the context of a single set of stripes:
- *
- * S E
- * -----------------------------------
- * | 0 | 1 | 2 |
- * -----------------------------------
- *
- * it'll notice that E is outside stripe 1 and clamp it to the end of the
- * stripe, then multiply it back out by lov_off to give the real offsets in
- * the stripe:
- *
- * S E
- * ---------------------------------------------------------------------
- * | 1 | 1 | 1 | 1 | 1 | 1 |
- * ---------------------------------------------------------------------
- *
- * it would have done similarly and pulled S forward to the start of a 1
- * stripe if, say, S had landed in a 0 stripe.
- *
- * this rounding isn't always correct. consider an E lov offset that lands
- * on a 0 stripe, the "mod stripe width" math will pull it forward to the
- * start of a 1 stripe, when in fact it wanted to be rounded back to the end
- * of a previous 1 stripe. this logic is handled by callers and this is why:
- *
- * this function returns < 0 when the offset was "before" the stripe and
- * was moved forward to the start of the stripe in question; 0 when it
- * falls in the stripe and no shifting was done; > 0 when the offset
- * was outside the stripe and was pulled back to its final byte. */
-int lov_stripe_offset(struct lov_stripe_md *lsm, u64 lov_off,
- int stripeno, u64 *obdoff)
-{
- unsigned long ssize = lsm->lsm_stripe_size;
- u64 stripe_off, this_stripe, swidth;
- int magic = lsm->lsm_magic;
- int ret = 0;
-
- if (lov_off == OBD_OBJECT_EOF) {
- *obdoff = OBD_OBJECT_EOF;
- return 0;
- }
-
- LASSERT(lsm_op_find(magic) != NULL);
-
- lsm_op_find(magic)->lsm_stripe_by_index(lsm, &stripeno, &lov_off,
- &swidth);
-
- /* lov_do_div64(a, b) returns a % b, and a = a / b */
- stripe_off = lov_do_div64(lov_off, swidth);
-
- this_stripe = (u64)stripeno * ssize;
- if (stripe_off < this_stripe) {
- stripe_off = 0;
- ret = -1;
- } else {
- stripe_off -= this_stripe;
-
- if (stripe_off >= ssize) {
- stripe_off = ssize;
- ret = 1;
- }
- }
-
- *obdoff = lov_off * ssize + stripe_off;
- return ret;
-}
-
-/* Given a whole-file size and a stripe number, give the file size which
- * corresponds to the individual object of that stripe.
- *
- * This behaves basically in the same was as lov_stripe_offset, except that
- * file sizes falling before the beginning of a stripe are clamped to the end
- * of the previous stripe, not the beginning of the next:
- *
- * S
- * ---------------------------------------------------------------------
- * | 0 | 1 | 2 | 0 | 1 | 2 |
- * ---------------------------------------------------------------------
- *
- * if clamped to stripe 2 becomes:
- *
- * S
- * ---------------------------------------------------------------------
- * | 0 | 1 | 2 | 0 | 1 | 2 |
- * ---------------------------------------------------------------------
- */
-u64 lov_size_to_stripe(struct lov_stripe_md *lsm, u64 file_size,
- int stripeno)
-{
- unsigned long ssize = lsm->lsm_stripe_size;
- u64 stripe_off, this_stripe, swidth;
- int magic = lsm->lsm_magic;
-
- if (file_size == OBD_OBJECT_EOF)
- return OBD_OBJECT_EOF;
-
- LASSERT(lsm_op_find(magic) != NULL);
- lsm_op_find(magic)->lsm_stripe_by_index(lsm, &stripeno, &file_size,
- &swidth);
-
- /* lov_do_div64(a, b) returns a % b, and a = a / b */
- stripe_off = lov_do_div64(file_size, swidth);
-
- this_stripe = (u64)stripeno * ssize;
- if (stripe_off < this_stripe) {
- /* Move to end of previous stripe, or zero */
- if (file_size > 0) {
- file_size--;
- stripe_off = ssize;
- } else {
- stripe_off = 0;
- }
- } else {
- stripe_off -= this_stripe;
-
- if (stripe_off >= ssize) {
- /* Clamp to end of this stripe */
- stripe_off = ssize;
- }
- }
-
- return (file_size * ssize + stripe_off);
-}
-
-/* given an extent in an lov and a stripe, calculate the extent of the stripe
- * that is contained within the lov extent. this returns true if the given
- * stripe does intersect with the lov extent. */
-int lov_stripe_intersects(struct lov_stripe_md *lsm, int stripeno,
- u64 start, u64 end, u64 *obd_start, u64 *obd_end)
-{
- int start_side, end_side;
-
- start_side = lov_stripe_offset(lsm, start, stripeno, obd_start);
- end_side = lov_stripe_offset(lsm, end, stripeno, obd_end);
-
- CDEBUG(D_INODE, "[%llu->%llu] -> [(%d) %llu->%llu (%d)]\n",
- start, end, start_side, *obd_start, *obd_end, end_side);
-
- /* this stripe doesn't intersect the file extent when neither
- * start or the end intersected the stripe and obd_start and
- * obd_end got rounded up to the save value. */
- if (start_side != 0 && end_side != 0 && *obd_start == *obd_end)
- return 0;
-
- /* as mentioned in the lov_stripe_offset commentary, end
- * might have been shifted in the wrong direction. This
- * happens when an end offset is before the stripe when viewed
- * through the "mod stripe size" math. we detect it being shifted
- * in the wrong direction and touch it up.
- * interestingly, this can't underflow since end must be > start
- * if we passed through the previous check.
- * (should we assert for that somewhere?) */
- if (end_side != 0)
- (*obd_end)--;
-
- return 1;
-}
-
-/* compute which stripe number "lov_off" will be written into */
-int lov_stripe_number(struct lov_stripe_md *lsm, u64 lov_off)
-{
- unsigned long ssize = lsm->lsm_stripe_size;
- u64 stripe_off, swidth;
- int magic = lsm->lsm_magic;
-
- LASSERT(lsm_op_find(magic) != NULL);
- lsm_op_find(magic)->lsm_stripe_by_offset(lsm, NULL, &lov_off, &swidth);
-
- stripe_off = lov_do_div64(lov_off, swidth);
-
- /* Puts stripe_off/ssize result into stripe_off */
- lov_do_div64(stripe_off, ssize);
-
- return stripe_off;
-}
diff --git a/drivers/staging/lustre/lustre/lov/lov_pack.c b/drivers/staging/lustre/lustre/lov/lov_pack.c
deleted file mode 100644
index 6b1c135c9ab0..000000000000
--- a/drivers/staging/lustre/lustre/lov/lov_pack.c
+++ /dev/null
@@ -1,512 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * lustre/lov/lov_pack.c
- *
- * (Un)packing of OST/MDS requests
- *
- * Author: Andreas Dilger <adilger@clusterfs.com>
- */
-
-#define DEBUG_SUBSYSTEM S_LOV
-
-#include "../include/lustre_net.h"
-#include "../include/obd.h"
-#include "../include/obd_class.h"
-#include "../include/obd_support.h"
-#include "../include/lustre/lustre_user.h"
-
-#include "lov_internal.h"
-
-void lov_dump_lmm_common(int level, void *lmmp)
-{
- struct lov_mds_md *lmm = lmmp;
- struct ost_id oi;
-
- lmm_oi_le_to_cpu(&oi, &lmm->lmm_oi);
- CDEBUG(level, "objid "DOSTID", magic 0x%08x, pattern %#x\n",
- POSTID(&oi), le32_to_cpu(lmm->lmm_magic),
- le32_to_cpu(lmm->lmm_pattern));
- CDEBUG(level, "stripe_size %u, stripe_count %u, layout_gen %u\n",
- le32_to_cpu(lmm->lmm_stripe_size),
- le16_to_cpu(lmm->lmm_stripe_count),
- le16_to_cpu(lmm->lmm_layout_gen));
-}
-
-static void lov_dump_lmm_objects(int level, struct lov_ost_data *lod,
- int stripe_count)
-{
- int i;
-
- if (stripe_count > LOV_V1_INSANE_STRIPE_COUNT) {
- CDEBUG(level, "bad stripe_count %u > max_stripe_count %u\n",
- stripe_count, LOV_V1_INSANE_STRIPE_COUNT);
- return;
- }
-
- for (i = 0; i < stripe_count; ++i, ++lod) {
- struct ost_id oi;
-
- ostid_le_to_cpu(&lod->l_ost_oi, &oi);
- CDEBUG(level, "stripe %u idx %u subobj "DOSTID"\n", i,
- le32_to_cpu(lod->l_ost_idx), POSTID(&oi));
- }
-}
-
-void lov_dump_lmm_v1(int level, struct lov_mds_md_v1 *lmm)
-{
- lov_dump_lmm_common(level, lmm);
- lov_dump_lmm_objects(level, lmm->lmm_objects,
- le16_to_cpu(lmm->lmm_stripe_count));
-}
-
-void lov_dump_lmm_v3(int level, struct lov_mds_md_v3 *lmm)
-{
- lov_dump_lmm_common(level, lmm);
- CDEBUG(level, "pool_name "LOV_POOLNAMEF"\n", lmm->lmm_pool_name);
- lov_dump_lmm_objects(level, lmm->lmm_objects,
- le16_to_cpu(lmm->lmm_stripe_count));
-}
-
-void lov_dump_lmm(int level, void *lmm)
-{
- int magic;
-
- magic = le32_to_cpu(((struct lov_mds_md *)lmm)->lmm_magic);
- switch (magic) {
- case LOV_MAGIC_V1:
- lov_dump_lmm_v1(level, (struct lov_mds_md_v1 *)lmm);
- break;
- case LOV_MAGIC_V3:
- lov_dump_lmm_v3(level, (struct lov_mds_md_v3 *)lmm);
- break;
- default:
- CDEBUG(level, "unrecognized lmm_magic %x, assuming %x\n",
- magic, LOV_MAGIC_V1);
- lov_dump_lmm_common(level, lmm);
- break;
- }
-}
-
-/* Pack LOV object metadata for disk storage. It is packed in LE byte
- * order and is opaque to the networking layer.
- *
- * XXX In the future, this will be enhanced to get the EA size from the
- * underlying OSC device(s) to get their EA sizes so we can stack
- * LOVs properly. For now lov_mds_md_size() just assumes one u64
- * per stripe.
- */
-int lov_packmd(struct obd_export *exp, struct lov_mds_md **lmmp,
- struct lov_stripe_md *lsm)
-{
- struct obd_device *obd = class_exp2obd(exp);
- struct lov_obd *lov = &obd->u.lov;
- struct lov_mds_md_v1 *lmmv1;
- struct lov_mds_md_v3 *lmmv3;
- __u16 stripe_count;
- struct lov_ost_data_v1 *lmm_objects;
- int lmm_size, lmm_magic;
- int i;
- int cplen = 0;
-
- if (lsm) {
- lmm_magic = lsm->lsm_magic;
- } else {
- if (lmmp && *lmmp)
- lmm_magic = le32_to_cpu((*lmmp)->lmm_magic);
- else
- /* lsm == NULL and lmmp == NULL */
- lmm_magic = LOV_MAGIC;
- }
-
- if ((lmm_magic != LOV_MAGIC_V1) &&
- (lmm_magic != LOV_MAGIC_V3)) {
- CERROR("bad mem LOV MAGIC: 0x%08X != 0x%08X nor 0x%08X\n",
- lmm_magic, LOV_MAGIC_V1, LOV_MAGIC_V3);
- return -EINVAL;
-
- }
-
- if (lsm) {
- /* If we are just sizing the EA, limit the stripe count
- * to the actual number of OSTs in this filesystem. */
- if (!lmmp) {
- stripe_count = lov_get_stripecnt(lov, lmm_magic,
- lsm->lsm_stripe_count);
- lsm->lsm_stripe_count = stripe_count;
- } else if (!lsm_is_released(lsm)) {
- stripe_count = lsm->lsm_stripe_count;
- } else {
- stripe_count = 0;
- }
- } else {
- /* No need to allocate more than maximum supported stripes.
- * Anyway, this is pretty inaccurate since ld_tgt_count now
- * represents max index and we should rely on the actual number
- * of OSTs instead */
- stripe_count = lov_mds_md_max_stripe_count(
- lov->lov_ocd.ocd_max_easize, lmm_magic);
-
- if (stripe_count > lov->desc.ld_tgt_count)
- stripe_count = lov->desc.ld_tgt_count;
- }
-
- /* XXX LOV STACKING call into osc for sizes */
- lmm_size = lov_mds_md_size(stripe_count, lmm_magic);
-
- if (!lmmp)
- return lmm_size;
-
- if (*lmmp && !lsm) {
- stripe_count = le16_to_cpu((*lmmp)->lmm_stripe_count);
- lmm_size = lov_mds_md_size(stripe_count, lmm_magic);
- kvfree(*lmmp);
- *lmmp = NULL;
- return 0;
- }
-
- if (!*lmmp) {
- *lmmp = libcfs_kvzalloc(lmm_size, GFP_NOFS);
- if (!*lmmp)
- return -ENOMEM;
- }
-
- CDEBUG(D_INFO, "lov_packmd: LOV_MAGIC 0x%08X, lmm_size = %d \n",
- lmm_magic, lmm_size);
-
- lmmv1 = *lmmp;
- lmmv3 = (struct lov_mds_md_v3 *)*lmmp;
- if (lmm_magic == LOV_MAGIC_V3)
- lmmv3->lmm_magic = cpu_to_le32(LOV_MAGIC_V3);
- else
- lmmv1->lmm_magic = cpu_to_le32(LOV_MAGIC_V1);
-
- if (!lsm)
- return lmm_size;
-
- /* lmmv1 and lmmv3 point to the same struct and have the
- * same first fields
- */
- lmm_oi_cpu_to_le(&lmmv1->lmm_oi, &lsm->lsm_oi);
- lmmv1->lmm_stripe_size = cpu_to_le32(lsm->lsm_stripe_size);
- lmmv1->lmm_stripe_count = cpu_to_le16(stripe_count);
- lmmv1->lmm_pattern = cpu_to_le32(lsm->lsm_pattern);
- lmmv1->lmm_layout_gen = cpu_to_le16(lsm->lsm_layout_gen);
- if (lsm->lsm_magic == LOV_MAGIC_V3) {
- cplen = strlcpy(lmmv3->lmm_pool_name, lsm->lsm_pool_name,
- sizeof(lmmv3->lmm_pool_name));
- if (cplen >= sizeof(lmmv3->lmm_pool_name))
- return -E2BIG;
- lmm_objects = lmmv3->lmm_objects;
- } else {
- lmm_objects = lmmv1->lmm_objects;
- }
-
- for (i = 0; i < stripe_count; i++) {
- struct lov_oinfo *loi = lsm->lsm_oinfo[i];
- /* XXX LOV STACKING call down to osc_packmd() to do packing */
- LASSERTF(ostid_id(&loi->loi_oi) != 0, "lmm_oi "DOSTID
- " stripe %u/%u idx %u\n", POSTID(&lmmv1->lmm_oi),
- i, stripe_count, loi->loi_ost_idx);
- ostid_cpu_to_le(&loi->loi_oi, &lmm_objects[i].l_ost_oi);
- lmm_objects[i].l_ost_gen = cpu_to_le32(loi->loi_ost_gen);
- lmm_objects[i].l_ost_idx = cpu_to_le32(loi->loi_ost_idx);
- }
-
- return lmm_size;
-}
-
-/* Find the max stripecount we should use */
-__u16 lov_get_stripecnt(struct lov_obd *lov, __u32 magic, __u16 stripe_count)
-{
- __u32 max_stripes = LOV_MAX_STRIPE_COUNT_OLD;
-
- if (!stripe_count)
- stripe_count = lov->desc.ld_default_stripe_count;
- if (stripe_count > lov->desc.ld_active_tgt_count)
- stripe_count = lov->desc.ld_active_tgt_count;
- if (!stripe_count)
- stripe_count = 1;
-
- /* stripe count is based on whether ldiskfs can handle
- * larger EA sizes */
- if (lov->lov_ocd.ocd_connect_flags & OBD_CONNECT_MAX_EASIZE &&
- lov->lov_ocd.ocd_max_easize)
- max_stripes = lov_mds_md_max_stripe_count(
- lov->lov_ocd.ocd_max_easize, magic);
-
- if (stripe_count > max_stripes)
- stripe_count = max_stripes;
-
- return stripe_count;
-}
-
-
-static int lov_verify_lmm(void *lmm, int lmm_bytes, __u16 *stripe_count)
-{
- int rc;
-
- if (lsm_op_find(le32_to_cpu(*(__u32 *)lmm)) == NULL) {
- char *buffer;
- int sz;
-
- CERROR("bad disk LOV MAGIC: 0x%08X; dumping LMM (size=%d):\n",
- le32_to_cpu(*(__u32 *)lmm), lmm_bytes);
- sz = lmm_bytes * 2 + 1;
- buffer = libcfs_kvzalloc(sz, GFP_NOFS);
- if (buffer != NULL) {
- int i;
-
- for (i = 0; i < lmm_bytes; i++)
- sprintf(buffer+2*i, "%.2X", ((char *)lmm)[i]);
- buffer[sz - 1] = '\0';
- CERROR("%s\n", buffer);
- kvfree(buffer);
- }
- return -EINVAL;
- }
- rc = lsm_op_find(le32_to_cpu(*(__u32 *)lmm))->lsm_lmm_verify(lmm,
- lmm_bytes, stripe_count);
- return rc;
-}
-
-int lov_alloc_memmd(struct lov_stripe_md **lsmp, __u16 stripe_count,
- int pattern, int magic)
-{
- int i, lsm_size;
-
- CDEBUG(D_INFO, "alloc lsm, stripe_count %d\n", stripe_count);
-
- *lsmp = lsm_alloc_plain(stripe_count, &lsm_size);
- if (!*lsmp) {
- CERROR("can't allocate lsmp stripe_count %d\n", stripe_count);
- return -ENOMEM;
- }
-
- atomic_set(&(*lsmp)->lsm_refc, 1);
- spin_lock_init(&(*lsmp)->lsm_lock);
- (*lsmp)->lsm_magic = magic;
- (*lsmp)->lsm_stripe_count = stripe_count;
- (*lsmp)->lsm_maxbytes = LUSTRE_STRIPE_MAXBYTES * stripe_count;
- (*lsmp)->lsm_pattern = pattern;
- (*lsmp)->lsm_pool_name[0] = '\0';
- (*lsmp)->lsm_layout_gen = 0;
- if (stripe_count > 0)
- (*lsmp)->lsm_oinfo[0]->loi_ost_idx = ~0;
-
- for (i = 0; i < stripe_count; i++)
- loi_init((*lsmp)->lsm_oinfo[i]);
-
- return lsm_size;
-}
-
-int lov_free_memmd(struct lov_stripe_md **lsmp)
-{
- struct lov_stripe_md *lsm = *lsmp;
- int refc;
-
- *lsmp = NULL;
- LASSERT(atomic_read(&lsm->lsm_refc) > 0);
- refc = atomic_dec_return(&lsm->lsm_refc);
- if (refc == 0) {
- LASSERT(lsm_op_find(lsm->lsm_magic) != NULL);
- lsm_op_find(lsm->lsm_magic)->lsm_free(lsm);
- }
- return refc;
-}
-
-
-/* Unpack LOV object metadata from disk storage. It is packed in LE byte
- * order and is opaque to the networking layer.
- */
-int lov_unpackmd(struct obd_export *exp, struct lov_stripe_md **lsmp,
- struct lov_mds_md *lmm, int lmm_bytes)
-{
- struct obd_device *obd = class_exp2obd(exp);
- struct lov_obd *lov = &obd->u.lov;
- int rc = 0, lsm_size;
- __u16 stripe_count;
- __u32 magic;
- __u32 pattern;
-
- /* If passed an MDS struct use values from there, otherwise defaults */
- if (lmm) {
- rc = lov_verify_lmm(lmm, lmm_bytes, &stripe_count);
- if (rc)
- return rc;
- magic = le32_to_cpu(lmm->lmm_magic);
- pattern = le32_to_cpu(lmm->lmm_pattern);
- } else {
- magic = LOV_MAGIC;
- stripe_count = lov_get_stripecnt(lov, magic, 0);
- pattern = LOV_PATTERN_RAID0;
- }
-
- /* If we aren't passed an lsmp struct, we just want the size */
- if (!lsmp) {
- /* XXX LOV STACKING call into osc for sizes */
- LBUG();
- return lov_stripe_md_size(stripe_count);
- }
- /* If we are passed an allocated struct but nothing to unpack, free */
- if (*lsmp && !lmm) {
- lov_free_memmd(lsmp);
- return 0;
- }
-
- lsm_size = lov_alloc_memmd(lsmp, stripe_count, pattern, magic);
- if (lsm_size < 0)
- return lsm_size;
-
- /* If we are passed a pointer but nothing to unpack, we only alloc */
- if (!lmm)
- return lsm_size;
-
- LASSERT(lsm_op_find(magic) != NULL);
- rc = lsm_op_find(magic)->lsm_unpackmd(lov, *lsmp, lmm);
- if (rc) {
- lov_free_memmd(lsmp);
- return rc;
- }
-
- return lsm_size;
-}
-
-/* Retrieve object striping information.
- *
- * @lump is a pointer to an in-core struct with lmm_ost_count indicating
- * the maximum number of OST indices which will fit in the user buffer.
- * lmm_magic must be LOV_USER_MAGIC.
- */
-int lov_getstripe(struct obd_export *exp, struct lov_stripe_md *lsm,
- struct lov_user_md *lump)
-{
- /*
- * XXX huge struct allocated on stack.
- */
- /* we use lov_user_md_v3 because it is larger than lov_user_md_v1 */
- struct lov_user_md_v3 lum;
- struct lov_mds_md *lmmk = NULL;
- int rc, lmm_size;
- int lum_size;
- mm_segment_t seg;
-
- if (!lsm)
- return -ENODATA;
-
- /*
- * "Switch to kernel segment" to allow copying from kernel space by
- * copy_{to,from}_user().
- */
- seg = get_fs();
- set_fs(KERNEL_DS);
-
- /* we only need the header part from user space to get lmm_magic and
- * lmm_stripe_count, (the header part is common to v1 and v3) */
- lum_size = sizeof(struct lov_user_md_v1);
- if (copy_from_user(&lum, lump, lum_size)) {
- rc = -EFAULT;
- goto out_set;
- } else if ((lum.lmm_magic != LOV_USER_MAGIC) &&
- (lum.lmm_magic != LOV_USER_MAGIC_V3)) {
- rc = -EINVAL;
- goto out_set;
- }
-
- if (lum.lmm_stripe_count &&
- (lum.lmm_stripe_count < lsm->lsm_stripe_count)) {
- /* Return right size of stripe to user */
- lum.lmm_stripe_count = lsm->lsm_stripe_count;
- rc = copy_to_user(lump, &lum, lum_size);
- rc = -EOVERFLOW;
- goto out_set;
- }
- rc = lov_packmd(exp, &lmmk, lsm);
- if (rc < 0)
- goto out_set;
- lmm_size = rc;
- rc = 0;
-
- /* FIXME: Bug 1185 - copy fields properly when structs change */
- /* struct lov_user_md_v3 and struct lov_mds_md_v3 must be the same */
- CLASSERT(sizeof(lum) == sizeof(struct lov_mds_md_v3));
- CLASSERT(sizeof(lum.lmm_objects[0]) == sizeof(lmmk->lmm_objects[0]));
-
- if ((cpu_to_le32(LOV_MAGIC) != LOV_MAGIC) &&
- ((lmmk->lmm_magic == cpu_to_le32(LOV_MAGIC_V1)) ||
- (lmmk->lmm_magic == cpu_to_le32(LOV_MAGIC_V3)))) {
- lustre_swab_lov_mds_md(lmmk);
- lustre_swab_lov_user_md_objects(
- (struct lov_user_ost_data *)lmmk->lmm_objects,
- lmmk->lmm_stripe_count);
- }
- if (lum.lmm_magic == LOV_USER_MAGIC) {
- /* User request for v1, we need skip lmm_pool_name */
- if (lmmk->lmm_magic == LOV_MAGIC_V3) {
- memmove((char *)(&lmmk->lmm_stripe_count) +
- sizeof(lmmk->lmm_stripe_count),
- ((struct lov_mds_md_v3 *)lmmk)->lmm_objects,
- lmmk->lmm_stripe_count *
- sizeof(struct lov_ost_data_v1));
- lmm_size -= LOV_MAXPOOLNAME;
- }
- } else {
- /* if v3 we just have to update the lum_size */
- lum_size = sizeof(struct lov_user_md_v3);
- }
-
- /* User wasn't expecting this many OST entries */
- if (lum.lmm_stripe_count == 0)
- lmm_size = lum_size;
- else if (lum.lmm_stripe_count < lmmk->lmm_stripe_count) {
- rc = -EOVERFLOW;
- goto out_set;
- }
- /*
- * Have a difference between lov_mds_md & lov_user_md.
- * So we have to re-order the data before copy to user.
- */
- lum.lmm_stripe_count = lmmk->lmm_stripe_count;
- lum.lmm_layout_gen = lmmk->lmm_layout_gen;
- ((struct lov_user_md *)lmmk)->lmm_layout_gen = lum.lmm_layout_gen;
- ((struct lov_user_md *)lmmk)->lmm_stripe_count = lum.lmm_stripe_count;
- if (copy_to_user(lump, lmmk, lmm_size))
- rc = -EFAULT;
-
- obd_free_diskmd(exp, &lmmk);
-out_set:
- set_fs(seg);
- return rc;
-}
diff --git a/drivers/staging/lustre/lustre/lov/lov_page.c b/drivers/staging/lustre/lustre/lov/lov_page.c
deleted file mode 100644
index c4596e8e5783..000000000000
--- a/drivers/staging/lustre/lustre/lov/lov_page.c
+++ /dev/null
@@ -1,232 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * Implementation of cl_page for LOV layer.
- *
- * Author: Nikita Danilov <nikita.danilov@sun.com>
- */
-
-#define DEBUG_SUBSYSTEM S_LOV
-
-#include "lov_cl_internal.h"
-
-/** \addtogroup lov
- * @{
- */
-
-/*****************************************************************************
- *
- * Lov page operations.
- *
- */
-
-static int lov_page_invariant(const struct cl_page_slice *slice)
-{
- const struct cl_page *page = slice->cpl_page;
- const struct cl_page *sub = lov_sub_page(slice);
-
- return ergo(sub != NULL,
- page->cp_child == sub &&
- sub->cp_parent == page &&
- page->cp_state == sub->cp_state);
-}
-
-static void lov_page_fini(const struct lu_env *env,
- struct cl_page_slice *slice)
-{
- struct cl_page *sub = lov_sub_page(slice);
-
- LINVRNT(lov_page_invariant(slice));
-
- if (sub != NULL) {
- LASSERT(sub->cp_state == CPS_FREEING);
- lu_ref_del(&sub->cp_reference, "lov", sub->cp_parent);
- sub->cp_parent = NULL;
- slice->cpl_page->cp_child = NULL;
- cl_page_put(env, sub);
- }
-}
-
-static int lov_page_own(const struct lu_env *env,
- const struct cl_page_slice *slice, struct cl_io *io,
- int nonblock)
-{
- struct lov_io *lio = lov_env_io(env);
- struct lov_io_sub *sub;
-
- LINVRNT(lov_page_invariant(slice));
- LINVRNT(!cl2lov_page(slice)->lps_invalid);
-
- sub = lov_page_subio(env, lio, slice);
- if (!IS_ERR(sub)) {
- lov_sub_page(slice)->cp_owner = sub->sub_io;
- lov_sub_put(sub);
- } else
- LBUG(); /* Arrgh */
- return 0;
-}
-
-static void lov_page_assume(const struct lu_env *env,
- const struct cl_page_slice *slice, struct cl_io *io)
-{
- lov_page_own(env, slice, io, 0);
-}
-
-static int lov_page_cache_add(const struct lu_env *env,
- const struct cl_page_slice *slice,
- struct cl_io *io)
-{
- struct lov_io *lio = lov_env_io(env);
- struct lov_io_sub *sub;
- int rc = 0;
-
- LINVRNT(lov_page_invariant(slice));
- LINVRNT(!cl2lov_page(slice)->lps_invalid);
-
- sub = lov_page_subio(env, lio, slice);
- if (!IS_ERR(sub)) {
- rc = cl_page_cache_add(sub->sub_env, sub->sub_io,
- slice->cpl_page->cp_child, CRT_WRITE);
- lov_sub_put(sub);
- } else {
- rc = PTR_ERR(sub);
- CL_PAGE_DEBUG(D_ERROR, env, slice->cpl_page, "rc = %d\n", rc);
- }
- return rc;
-}
-
-static int lov_page_print(const struct lu_env *env,
- const struct cl_page_slice *slice,
- void *cookie, lu_printer_t printer)
-{
- struct lov_page *lp = cl2lov_page(slice);
-
- return (*printer)(env, cookie, LUSTRE_LOV_NAME"-page@%p\n", lp);
-}
-
-static const struct cl_page_operations lov_page_ops = {
- .cpo_fini = lov_page_fini,
- .cpo_own = lov_page_own,
- .cpo_assume = lov_page_assume,
- .io = {
- [CRT_WRITE] = {
- .cpo_cache_add = lov_page_cache_add
- }
- },
- .cpo_print = lov_page_print
-};
-
-static void lov_empty_page_fini(const struct lu_env *env,
- struct cl_page_slice *slice)
-{
- LASSERT(slice->cpl_page->cp_child == NULL);
-}
-
-int lov_page_init_raid0(const struct lu_env *env, struct cl_object *obj,
- struct cl_page *page, struct page *vmpage)
-{
- struct lov_object *loo = cl2lov(obj);
- struct lov_layout_raid0 *r0 = lov_r0(loo);
- struct lov_io *lio = lov_env_io(env);
- struct cl_page *subpage;
- struct cl_object *subobj;
- struct lov_io_sub *sub;
- struct lov_page *lpg = cl_object_page_slice(obj, page);
- loff_t offset;
- u64 suboff;
- int stripe;
- int rc;
-
- offset = cl_offset(obj, page->cp_index);
- stripe = lov_stripe_number(loo->lo_lsm, offset);
- LASSERT(stripe < r0->lo_nr);
- rc = lov_stripe_offset(loo->lo_lsm, offset, stripe,
- &suboff);
- LASSERT(rc == 0);
-
- lpg->lps_invalid = 1;
- cl_page_slice_add(page, &lpg->lps_cl, obj, &lov_page_ops);
-
- sub = lov_sub_get(env, lio, stripe);
- if (IS_ERR(sub)) {
- rc = PTR_ERR(sub);
- goto out;
- }
-
- subobj = lovsub2cl(r0->lo_sub[stripe]);
- subpage = cl_page_find_sub(sub->sub_env, subobj,
- cl_index(subobj, suboff), vmpage, page);
- lov_sub_put(sub);
- if (IS_ERR(subpage)) {
- rc = PTR_ERR(subpage);
- goto out;
- }
-
- if (likely(subpage->cp_parent == page)) {
- lu_ref_add(&subpage->cp_reference, "lov", page);
- lpg->lps_invalid = 0;
- rc = 0;
- } else {
- CL_PAGE_DEBUG(D_ERROR, env, page, "parent page\n");
- CL_PAGE_DEBUG(D_ERROR, env, subpage, "child page\n");
- LASSERT(0);
- }
-
-out:
- return rc;
-}
-
-
-static const struct cl_page_operations lov_empty_page_ops = {
- .cpo_fini = lov_empty_page_fini,
- .cpo_print = lov_page_print
-};
-
-int lov_page_init_empty(const struct lu_env *env, struct cl_object *obj,
- struct cl_page *page, struct page *vmpage)
-{
- struct lov_page *lpg = cl_object_page_slice(obj, page);
- void *addr;
-
- cl_page_slice_add(page, &lpg->lps_cl, obj, &lov_empty_page_ops);
- addr = kmap(vmpage);
- memset(addr, 0, cl_page_size(obj));
- kunmap(vmpage);
- cl_page_export(env, page, 1);
- return 0;
-}
-
-
-/** @} lov */
diff --git a/drivers/staging/lustre/lustre/lov/lov_pool.c b/drivers/staging/lustre/lustre/lov/lov_pool.c
deleted file mode 100644
index c59b1402616e..000000000000
--- a/drivers/staging/lustre/lustre/lov/lov_pool.c
+++ /dev/null
@@ -1,672 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see [sun.com URL with a
- * copy of GPLv2].
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * lustre/lov/lov_pool.c
- *
- * OST pool methods
- *
- * Author: Jacques-Charles LAFOUCRIERE <jc.lafoucriere@cea.fr>
- * Author: Alex Lyashkov <Alexey.Lyashkov@Sun.COM>
- * Author: Nathaniel Rutman <Nathan.Rutman@Sun.COM>
- */
-
-#define DEBUG_SUBSYSTEM S_LOV
-
-#include "../../include/linux/libcfs/libcfs.h"
-
-#include "../include/obd.h"
-#include "lov_internal.h"
-
-#define pool_tgt(_p, _i) \
- _p->pool_lobd->u.lov.lov_tgts[_p->pool_obds.op_array[_i]]
-
-static void lov_pool_getref(struct pool_desc *pool)
-{
- CDEBUG(D_INFO, "pool %p\n", pool);
- atomic_inc(&pool->pool_refcount);
-}
-
-void lov_pool_putref(struct pool_desc *pool)
-{
- CDEBUG(D_INFO, "pool %p\n", pool);
- if (atomic_dec_and_test(&pool->pool_refcount)) {
- LASSERT(hlist_unhashed(&pool->pool_hash));
- LASSERT(list_empty(&pool->pool_list));
- LASSERT(pool->pool_debugfs_entry == NULL);
- lov_ost_pool_free(&(pool->pool_rr.lqr_pool));
- lov_ost_pool_free(&(pool->pool_obds));
- kfree(pool);
- }
-}
-
-static void lov_pool_putref_locked(struct pool_desc *pool)
-{
- CDEBUG(D_INFO, "pool %p\n", pool);
- LASSERT(atomic_read(&pool->pool_refcount) > 1);
-
- atomic_dec(&pool->pool_refcount);
-}
-
-/*
- * hash function using a Rotating Hash algorithm
- * Knuth, D. The Art of Computer Programming,
- * Volume 3: Sorting and Searching,
- * Chapter 6.4.
- * Addison Wesley, 1973
- */
-static __u32 pool_hashfn(struct cfs_hash *hash_body, const void *key, unsigned mask)
-{
- int i;
- __u32 result;
- char *poolname;
-
- result = 0;
- poolname = (char *)key;
- for (i = 0; i < LOV_MAXPOOLNAME; i++) {
- if (poolname[i] == '\0')
- break;
- result = (result << 4)^(result >> 28) ^ poolname[i];
- }
- return (result % mask);
-}
-
-static void *pool_key(struct hlist_node *hnode)
-{
- struct pool_desc *pool;
-
- pool = hlist_entry(hnode, struct pool_desc, pool_hash);
- return pool->pool_name;
-}
-
-static int pool_hashkey_keycmp(const void *key, struct hlist_node *compared_hnode)
-{
- char *pool_name;
- struct pool_desc *pool;
-
- pool_name = (char *)key;
- pool = hlist_entry(compared_hnode, struct pool_desc, pool_hash);
- return !strncmp(pool_name, pool->pool_name, LOV_MAXPOOLNAME);
-}
-
-static void *pool_hashobject(struct hlist_node *hnode)
-{
- return hlist_entry(hnode, struct pool_desc, pool_hash);
-}
-
-static void pool_hashrefcount_get(struct cfs_hash *hs, struct hlist_node *hnode)
-{
- struct pool_desc *pool;
-
- pool = hlist_entry(hnode, struct pool_desc, pool_hash);
- lov_pool_getref(pool);
-}
-
-static void pool_hashrefcount_put_locked(struct cfs_hash *hs,
- struct hlist_node *hnode)
-{
- struct pool_desc *pool;
-
- pool = hlist_entry(hnode, struct pool_desc, pool_hash);
- lov_pool_putref_locked(pool);
-}
-
-cfs_hash_ops_t pool_hash_operations = {
- .hs_hash = pool_hashfn,
- .hs_key = pool_key,
- .hs_keycmp = pool_hashkey_keycmp,
- .hs_object = pool_hashobject,
- .hs_get = pool_hashrefcount_get,
- .hs_put_locked = pool_hashrefcount_put_locked,
-
-};
-
-/* ifdef needed for liblustre support */
-/*
- * pool /proc seq_file methods
- */
-/*
- * iterator is used to go through the target pool entries
- * index is the current entry index in the lp_array[] array
- * index >= pos returned to the seq_file interface
- * pos is from 0 to (pool->pool_obds.op_count - 1)
- */
-#define POOL_IT_MAGIC 0xB001CEA0
-struct pool_iterator {
- int magic;
- struct pool_desc *pool;
- int idx; /* from 0 to pool_tgt_size - 1 */
-};
-
-static void *pool_proc_next(struct seq_file *s, void *v, loff_t *pos)
-{
- struct pool_iterator *iter = (struct pool_iterator *)s->private;
- int prev_idx;
-
- LASSERTF(iter->magic == POOL_IT_MAGIC, "%08X", iter->magic);
-
- /* test if end of file */
- if (*pos >= pool_tgt_count(iter->pool))
- return NULL;
-
- /* iterate to find a non empty entry */
- prev_idx = iter->idx;
- down_read(&pool_tgt_rw_sem(iter->pool));
- iter->idx++;
- if (iter->idx == pool_tgt_count(iter->pool)) {
- iter->idx = prev_idx; /* we stay on the last entry */
- up_read(&pool_tgt_rw_sem(iter->pool));
- return NULL;
- }
- up_read(&pool_tgt_rw_sem(iter->pool));
- (*pos)++;
- /* return != NULL to continue */
- return iter;
-}
-
-static void *pool_proc_start(struct seq_file *s, loff_t *pos)
-{
- struct pool_desc *pool = (struct pool_desc *)s->private;
- struct pool_iterator *iter;
-
- lov_pool_getref(pool);
- if ((pool_tgt_count(pool) == 0) ||
- (*pos >= pool_tgt_count(pool))) {
- /* iter is not created, so stop() has no way to
- * find pool to dec ref */
- lov_pool_putref(pool);
- return NULL;
- }
-
- iter = kzalloc(sizeof(*iter), GFP_NOFS);
- if (!iter)
- return ERR_PTR(-ENOMEM);
- iter->magic = POOL_IT_MAGIC;
- iter->pool = pool;
- iter->idx = 0;
-
- /* we use seq_file private field to memorized iterator so
- * we can free it at stop() */
- /* /!\ do not forget to restore it to pool before freeing it */
- s->private = iter;
- if (*pos > 0) {
- loff_t i;
- void *ptr;
-
- i = 0;
- do {
- ptr = pool_proc_next(s, &iter, &i);
- } while ((i < *pos) && (ptr != NULL));
- return ptr;
- }
- return iter;
-}
-
-static void pool_proc_stop(struct seq_file *s, void *v)
-{
- struct pool_iterator *iter = (struct pool_iterator *)s->private;
-
- /* in some cases stop() method is called 2 times, without
- * calling start() method (see seq_read() from fs/seq_file.c)
- * we have to free only if s->private is an iterator */
- if ((iter) && (iter->magic == POOL_IT_MAGIC)) {
- /* we restore s->private so next call to pool_proc_start()
- * will work */
- s->private = iter->pool;
- lov_pool_putref(iter->pool);
- kfree(iter);
- }
- return;
-}
-
-static int pool_proc_show(struct seq_file *s, void *v)
-{
- struct pool_iterator *iter = (struct pool_iterator *)v;
- struct lov_tgt_desc *tgt;
-
- LASSERTF(iter->magic == POOL_IT_MAGIC, "%08X", iter->magic);
- LASSERT(iter->pool != NULL);
- LASSERT(iter->idx <= pool_tgt_count(iter->pool));
-
- down_read(&pool_tgt_rw_sem(iter->pool));
- tgt = pool_tgt(iter->pool, iter->idx);
- up_read(&pool_tgt_rw_sem(iter->pool));
- if (tgt)
- seq_printf(s, "%s\n", obd_uuid2str(&(tgt->ltd_uuid)));
-
- return 0;
-}
-
-static const struct seq_operations pool_proc_ops = {
- .start = pool_proc_start,
- .next = pool_proc_next,
- .stop = pool_proc_stop,
- .show = pool_proc_show,
-};
-
-static int pool_proc_open(struct inode *inode, struct file *file)
-{
- int rc;
-
- rc = seq_open(file, &pool_proc_ops);
- if (!rc) {
- struct seq_file *s = file->private_data;
- s->private = inode->i_private;
- }
- return rc;
-}
-
-static struct file_operations pool_proc_operations = {
- .open = pool_proc_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = seq_release,
-};
-
-void lov_dump_pool(int level, struct pool_desc *pool)
-{
- int i;
-
- lov_pool_getref(pool);
-
- CDEBUG(level, "pool "LOV_POOLNAMEF" has %d members\n",
- pool->pool_name, pool->pool_obds.op_count);
- down_read(&pool_tgt_rw_sem(pool));
-
- for (i = 0; i < pool_tgt_count(pool) ; i++) {
- if (!pool_tgt(pool, i) || !(pool_tgt(pool, i))->ltd_exp)
- continue;
- CDEBUG(level, "pool "LOV_POOLNAMEF"[%d] = %s\n",
- pool->pool_name, i,
- obd_uuid2str(&((pool_tgt(pool, i))->ltd_uuid)));
- }
-
- up_read(&pool_tgt_rw_sem(pool));
- lov_pool_putref(pool);
-}
-
-#define LOV_POOL_INIT_COUNT 2
-int lov_ost_pool_init(struct ost_pool *op, unsigned int count)
-{
- if (count == 0)
- count = LOV_POOL_INIT_COUNT;
- op->op_array = NULL;
- op->op_count = 0;
- init_rwsem(&op->op_rw_sem);
- op->op_size = count;
- op->op_array = kcalloc(op->op_size, sizeof(op->op_array[0]), GFP_NOFS);
- if (op->op_array == NULL) {
- op->op_size = 0;
- return -ENOMEM;
- }
- return 0;
-}
-
-/* Caller must hold write op_rwlock */
-int lov_ost_pool_extend(struct ost_pool *op, unsigned int min_count)
-{
- __u32 *new;
- int new_size;
-
- LASSERT(min_count != 0);
-
- if (op->op_count < op->op_size)
- return 0;
-
- new_size = max(min_count, 2 * op->op_size);
- new = kcalloc(new_size, sizeof(op->op_array[0]), GFP_NOFS);
- if (new == NULL)
- return -ENOMEM;
-
- /* copy old array to new one */
- memcpy(new, op->op_array, op->op_size * sizeof(op->op_array[0]));
- kfree(op->op_array);
- op->op_array = new;
- op->op_size = new_size;
- return 0;
-}
-
-int lov_ost_pool_add(struct ost_pool *op, __u32 idx, unsigned int min_count)
-{
- int rc = 0, i;
-
- down_write(&op->op_rw_sem);
-
- rc = lov_ost_pool_extend(op, min_count);
- if (rc)
- goto out;
-
- /* search ost in pool array */
- for (i = 0; i < op->op_count; i++) {
- if (op->op_array[i] == idx) {
- rc = -EEXIST;
- goto out;
- }
- }
- /* ost not found we add it */
- op->op_array[op->op_count] = idx;
- op->op_count++;
-out:
- up_write(&op->op_rw_sem);
- return rc;
-}
-
-int lov_ost_pool_remove(struct ost_pool *op, __u32 idx)
-{
- int i;
-
- down_write(&op->op_rw_sem);
-
- for (i = 0; i < op->op_count; i++) {
- if (op->op_array[i] == idx) {
- memmove(&op->op_array[i], &op->op_array[i + 1],
- (op->op_count - i - 1) * sizeof(op->op_array[0]));
- op->op_count--;
- up_write(&op->op_rw_sem);
- return 0;
- }
- }
-
- up_write(&op->op_rw_sem);
- return -EINVAL;
-}
-
-int lov_ost_pool_free(struct ost_pool *op)
-{
- if (op->op_size == 0)
- return 0;
-
- down_write(&op->op_rw_sem);
-
- kfree(op->op_array);
- op->op_array = NULL;
- op->op_count = 0;
- op->op_size = 0;
-
- up_write(&op->op_rw_sem);
- return 0;
-}
-
-
-int lov_pool_new(struct obd_device *obd, char *poolname)
-{
- struct lov_obd *lov;
- struct pool_desc *new_pool;
- int rc;
-
- lov = &(obd->u.lov);
-
- if (strlen(poolname) > LOV_MAXPOOLNAME)
- return -ENAMETOOLONG;
-
- new_pool = kzalloc(sizeof(*new_pool), GFP_NOFS);
- if (!new_pool)
- return -ENOMEM;
-
- strncpy(new_pool->pool_name, poolname, LOV_MAXPOOLNAME);
- new_pool->pool_name[LOV_MAXPOOLNAME] = '\0';
- new_pool->pool_lobd = obd;
- /* ref count init to 1 because when created a pool is always used
- * up to deletion
- */
- atomic_set(&new_pool->pool_refcount, 1);
- rc = lov_ost_pool_init(&new_pool->pool_obds, 0);
- if (rc)
- goto out_err;
-
- memset(&(new_pool->pool_rr), 0, sizeof(struct lov_qos_rr));
- rc = lov_ost_pool_init(&new_pool->pool_rr.lqr_pool, 0);
- if (rc)
- goto out_free_pool_obds;
-
- INIT_HLIST_NODE(&new_pool->pool_hash);
-
- /* we need this assert seq_file is not implemented for liblustre */
- /* get ref for /proc file */
- lov_pool_getref(new_pool);
- new_pool->pool_debugfs_entry = ldebugfs_add_simple(
- lov->lov_pool_debugfs_entry,
- poolname, new_pool,
- &pool_proc_operations);
- if (IS_ERR_OR_NULL(new_pool->pool_debugfs_entry)) {
- CWARN("Cannot add debugfs pool entry "LOV_POOLNAMEF"\n",
- poolname);
- new_pool->pool_debugfs_entry = NULL;
- lov_pool_putref(new_pool);
- }
- CDEBUG(D_INFO, "pool %p - proc %p\n",
- new_pool, new_pool->pool_debugfs_entry);
-
- spin_lock(&obd->obd_dev_lock);
- list_add_tail(&new_pool->pool_list, &lov->lov_pool_list);
- lov->lov_pool_count++;
- spin_unlock(&obd->obd_dev_lock);
-
- /* add to find only when it fully ready */
- rc = cfs_hash_add_unique(lov->lov_pools_hash_body, poolname,
- &new_pool->pool_hash);
- if (rc) {
- rc = -EEXIST;
- goto out_err;
- }
-
- CDEBUG(D_CONFIG, LOV_POOLNAMEF" is pool #%d\n",
- poolname, lov->lov_pool_count);
-
- return 0;
-
-out_err:
- spin_lock(&obd->obd_dev_lock);
- list_del_init(&new_pool->pool_list);
- lov->lov_pool_count--;
- spin_unlock(&obd->obd_dev_lock);
-
- ldebugfs_remove(&new_pool->pool_debugfs_entry);
-
- lov_ost_pool_free(&new_pool->pool_rr.lqr_pool);
-out_free_pool_obds:
- lov_ost_pool_free(&new_pool->pool_obds);
- kfree(new_pool);
- return rc;
-}
-
-int lov_pool_del(struct obd_device *obd, char *poolname)
-{
- struct lov_obd *lov;
- struct pool_desc *pool;
-
- lov = &(obd->u.lov);
-
- /* lookup and kill hash reference */
- pool = cfs_hash_del_key(lov->lov_pools_hash_body, poolname);
- if (pool == NULL)
- return -ENOENT;
-
- if (!IS_ERR_OR_NULL(pool->pool_debugfs_entry)) {
- CDEBUG(D_INFO, "proc entry %p\n", pool->pool_debugfs_entry);
- ldebugfs_remove(&pool->pool_debugfs_entry);
- lov_pool_putref(pool);
- }
-
- spin_lock(&obd->obd_dev_lock);
- list_del_init(&pool->pool_list);
- lov->lov_pool_count--;
- spin_unlock(&obd->obd_dev_lock);
-
- /* release last reference */
- lov_pool_putref(pool);
-
- return 0;
-}
-
-
-int lov_pool_add(struct obd_device *obd, char *poolname, char *ostname)
-{
- struct obd_uuid ost_uuid;
- struct lov_obd *lov;
- struct pool_desc *pool;
- unsigned int lov_idx;
- int rc;
-
- lov = &(obd->u.lov);
-
- pool = cfs_hash_lookup(lov->lov_pools_hash_body, poolname);
- if (pool == NULL)
- return -ENOENT;
-
- obd_str2uuid(&ost_uuid, ostname);
-
-
- /* search ost in lov array */
- obd_getref(obd);
- for (lov_idx = 0; lov_idx < lov->desc.ld_tgt_count; lov_idx++) {
- if (!lov->lov_tgts[lov_idx])
- continue;
- if (obd_uuid_equals(&ost_uuid,
- &(lov->lov_tgts[lov_idx]->ltd_uuid)))
- break;
- }
- /* test if ost found in lov */
- if (lov_idx == lov->desc.ld_tgt_count) {
- rc = -EINVAL;
- goto out;
- }
-
- rc = lov_ost_pool_add(&pool->pool_obds, lov_idx, lov->lov_tgt_size);
- if (rc)
- goto out;
-
- pool->pool_rr.lqr_dirty = 1;
-
- CDEBUG(D_CONFIG, "Added %s to "LOV_POOLNAMEF" as member %d\n",
- ostname, poolname, pool_tgt_count(pool));
-
-out:
- obd_putref(obd);
- lov_pool_putref(pool);
- return rc;
-}
-
-int lov_pool_remove(struct obd_device *obd, char *poolname, char *ostname)
-{
- struct obd_uuid ost_uuid;
- struct lov_obd *lov;
- struct pool_desc *pool;
- unsigned int lov_idx;
- int rc = 0;
-
- lov = &(obd->u.lov);
-
- pool = cfs_hash_lookup(lov->lov_pools_hash_body, poolname);
- if (pool == NULL)
- return -ENOENT;
-
- obd_str2uuid(&ost_uuid, ostname);
-
- obd_getref(obd);
- /* search ost in lov array, to get index */
- for (lov_idx = 0; lov_idx < lov->desc.ld_tgt_count; lov_idx++) {
- if (!lov->lov_tgts[lov_idx])
- continue;
-
- if (obd_uuid_equals(&ost_uuid,
- &(lov->lov_tgts[lov_idx]->ltd_uuid)))
- break;
- }
-
- /* test if ost found in lov */
- if (lov_idx == lov->desc.ld_tgt_count) {
- rc = -EINVAL;
- goto out;
- }
-
- lov_ost_pool_remove(&pool->pool_obds, lov_idx);
-
- pool->pool_rr.lqr_dirty = 1;
-
- CDEBUG(D_CONFIG, "%s removed from "LOV_POOLNAMEF"\n", ostname,
- poolname);
-
-out:
- obd_putref(obd);
- lov_pool_putref(pool);
- return rc;
-}
-
-int lov_check_index_in_pool(__u32 idx, struct pool_desc *pool)
-{
- int i, rc;
-
- /* caller may no have a ref on pool if it got the pool
- * without calling lov_find_pool() (e.g. go through the lov pool
- * list)
- */
- lov_pool_getref(pool);
-
- down_read(&pool_tgt_rw_sem(pool));
-
- for (i = 0; i < pool_tgt_count(pool); i++) {
- if (pool_tgt_array(pool)[i] == idx) {
- rc = 0;
- goto out;
- }
- }
- rc = -ENOENT;
-out:
- up_read(&pool_tgt_rw_sem(pool));
-
- lov_pool_putref(pool);
- return rc;
-}
-
-struct pool_desc *lov_find_pool(struct lov_obd *lov, char *poolname)
-{
- struct pool_desc *pool;
-
- pool = NULL;
- if (poolname[0] != '\0') {
- pool = cfs_hash_lookup(lov->lov_pools_hash_body, poolname);
- if (pool == NULL)
- CWARN("Request for an unknown pool ("LOV_POOLNAMEF")\n",
- poolname);
- if ((pool != NULL) && (pool_tgt_count(pool) == 0)) {
- CWARN("Request for an empty pool ("LOV_POOLNAMEF")\n",
- poolname);
- /* pool is ignored, so we remove ref on it */
- lov_pool_putref(pool);
- pool = NULL;
- }
- }
- return pool;
-}
diff --git a/drivers/staging/lustre/lustre/lov/lov_request.c b/drivers/staging/lustre/lustre/lov/lov_request.c
deleted file mode 100644
index 104c92ace682..000000000000
--- a/drivers/staging/lustre/lustre/lov/lov_request.c
+++ /dev/null
@@ -1,759 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- */
-
-#define DEBUG_SUBSYSTEM S_LOV
-
-#include "../../include/linux/libcfs/libcfs.h"
-
-#include "../include/obd_class.h"
-#include "../include/lustre/lustre_idl.h"
-#include "lov_internal.h"
-
-static void lov_init_set(struct lov_request_set *set)
-{
- set->set_count = 0;
- atomic_set(&set->set_completes, 0);
- atomic_set(&set->set_success, 0);
- atomic_set(&set->set_finish_checked, 0);
- set->set_cookies = NULL;
- INIT_LIST_HEAD(&set->set_list);
- atomic_set(&set->set_refcount, 1);
- init_waitqueue_head(&set->set_waitq);
- spin_lock_init(&set->set_lock);
-}
-
-void lov_finish_set(struct lov_request_set *set)
-{
- struct list_head *pos, *n;
-
- LASSERT(set);
- list_for_each_safe(pos, n, &set->set_list) {
- struct lov_request *req = list_entry(pos,
- struct lov_request,
- rq_link);
- list_del_init(&req->rq_link);
-
- if (req->rq_oi.oi_oa)
- OBDO_FREE(req->rq_oi.oi_oa);
- kfree(req->rq_oi.oi_osfs);
- kfree(req);
- }
- kfree(set);
-}
-
-int lov_set_finished(struct lov_request_set *set, int idempotent)
-{
- int completes = atomic_read(&set->set_completes);
-
- CDEBUG(D_INFO, "check set %d/%d\n", completes, set->set_count);
-
- if (completes == set->set_count) {
- if (idempotent)
- return 1;
- if (atomic_inc_return(&set->set_finish_checked) == 1)
- return 1;
- }
- return 0;
-}
-
-void lov_update_set(struct lov_request_set *set,
- struct lov_request *req, int rc)
-{
- req->rq_complete = 1;
- req->rq_rc = rc;
-
- atomic_inc(&set->set_completes);
- if (rc == 0)
- atomic_inc(&set->set_success);
-
- wake_up(&set->set_waitq);
-}
-
-int lov_update_common_set(struct lov_request_set *set,
- struct lov_request *req, int rc)
-{
- struct lov_obd *lov = &set->set_exp->exp_obd->u.lov;
-
- lov_update_set(set, req, rc);
-
- /* grace error on inactive ost */
- if (rc && !(lov->lov_tgts[req->rq_idx] &&
- lov->lov_tgts[req->rq_idx]->ltd_active))
- rc = 0;
-
- /* FIXME in raid1 regime, should return 0 */
- return rc;
-}
-
-void lov_set_add_req(struct lov_request *req, struct lov_request_set *set)
-{
- list_add_tail(&req->rq_link, &set->set_list);
- set->set_count++;
- req->rq_rqset = set;
-}
-
-static int lov_check_set(struct lov_obd *lov, int idx)
-{
- int rc;
- struct lov_tgt_desc *tgt;
-
- mutex_lock(&lov->lov_lock);
- tgt = lov->lov_tgts[idx];
- rc = !tgt || tgt->ltd_active ||
- (tgt->ltd_exp &&
- class_exp2cliimp(tgt->ltd_exp)->imp_connect_tried);
- mutex_unlock(&lov->lov_lock);
-
- return rc;
-}
-
-/* Check if the OSC connection exists and is active.
- * If the OSC has not yet had a chance to connect to the OST the first time,
- * wait once for it to connect instead of returning an error.
- */
-int lov_check_and_wait_active(struct lov_obd *lov, int ost_idx)
-{
- wait_queue_head_t waitq;
- struct l_wait_info lwi;
- struct lov_tgt_desc *tgt;
- int rc = 0;
-
- mutex_lock(&lov->lov_lock);
-
- tgt = lov->lov_tgts[ost_idx];
-
- if (unlikely(tgt == NULL)) {
- rc = 0;
- goto out;
- }
-
- if (likely(tgt->ltd_active)) {
- rc = 1;
- goto out;
- }
-
- if (tgt->ltd_exp && class_exp2cliimp(tgt->ltd_exp)->imp_connect_tried) {
- rc = 0;
- goto out;
- }
-
- mutex_unlock(&lov->lov_lock);
-
- init_waitqueue_head(&waitq);
- lwi = LWI_TIMEOUT_INTERVAL(cfs_time_seconds(obd_timeout),
- cfs_time_seconds(1), NULL, NULL);
-
- rc = l_wait_event(waitq, lov_check_set(lov, ost_idx), &lwi);
- if (tgt != NULL && tgt->ltd_active)
- return 1;
-
- return 0;
-
-out:
- mutex_unlock(&lov->lov_lock);
- return rc;
-}
-
-static int common_attr_done(struct lov_request_set *set)
-{
- struct list_head *pos;
- struct lov_request *req;
- struct obdo *tmp_oa;
- int rc = 0, attrset = 0;
-
- LASSERT(set->set_oi != NULL);
-
- if (set->set_oi->oi_oa == NULL)
- return 0;
-
- if (!atomic_read(&set->set_success))
- return -EIO;
-
- OBDO_ALLOC(tmp_oa);
- if (tmp_oa == NULL) {
- rc = -ENOMEM;
- goto out;
- }
-
- list_for_each(pos, &set->set_list) {
- req = list_entry(pos, struct lov_request, rq_link);
-
- if (!req->rq_complete || req->rq_rc)
- continue;
- if (req->rq_oi.oi_oa->o_valid == 0) /* inactive stripe */
- continue;
- lov_merge_attrs(tmp_oa, req->rq_oi.oi_oa,
- req->rq_oi.oi_oa->o_valid,
- set->set_oi->oi_md, req->rq_stripe, &attrset);
- }
- if (!attrset) {
- CERROR("No stripes had valid attrs\n");
- rc = -EIO;
- }
- if ((set->set_oi->oi_oa->o_valid & OBD_MD_FLEPOCH) &&
- (set->set_oi->oi_md->lsm_stripe_count != attrset)) {
- /* When we take attributes of some epoch, we require all the
- * ost to be active. */
- CERROR("Not all the stripes had valid attrs\n");
- rc = -EIO;
- goto out;
- }
-
- tmp_oa->o_oi = set->set_oi->oi_oa->o_oi;
- memcpy(set->set_oi->oi_oa, tmp_oa, sizeof(*set->set_oi->oi_oa));
-out:
- if (tmp_oa)
- OBDO_FREE(tmp_oa);
- return rc;
-
-}
-
-int lov_fini_getattr_set(struct lov_request_set *set)
-{
- int rc = 0;
-
- if (set == NULL)
- return 0;
- LASSERT(set->set_exp);
- if (atomic_read(&set->set_completes))
- rc = common_attr_done(set);
-
- lov_put_reqset(set);
-
- return rc;
-}
-
-/* The callback for osc_getattr_async that finalizes a request info when a
- * response is received. */
-static int cb_getattr_update(void *cookie, int rc)
-{
- struct obd_info *oinfo = cookie;
- struct lov_request *lovreq;
-
- lovreq = container_of(oinfo, struct lov_request, rq_oi);
- return lov_update_common_set(lovreq->rq_rqset, lovreq, rc);
-}
-
-int lov_prep_getattr_set(struct obd_export *exp, struct obd_info *oinfo,
- struct lov_request_set **reqset)
-{
- struct lov_request_set *set;
- struct lov_obd *lov = &exp->exp_obd->u.lov;
- int rc = 0, i;
-
- set = kzalloc(sizeof(*set), GFP_NOFS);
- if (!set)
- return -ENOMEM;
- lov_init_set(set);
-
- set->set_exp = exp;
- set->set_oi = oinfo;
-
- for (i = 0; i < oinfo->oi_md->lsm_stripe_count; i++) {
- struct lov_oinfo *loi;
- struct lov_request *req;
-
- loi = oinfo->oi_md->lsm_oinfo[i];
- if (lov_oinfo_is_dummy(loi))
- continue;
-
- if (!lov_check_and_wait_active(lov, loi->loi_ost_idx)) {
- CDEBUG(D_HA, "lov idx %d inactive\n", loi->loi_ost_idx);
- if (oinfo->oi_oa->o_valid & OBD_MD_FLEPOCH) {
- /* SOM requires all the OSTs to be active. */
- rc = -EIO;
- goto out_set;
- }
- continue;
- }
-
- req = kzalloc(sizeof(*req), GFP_NOFS);
- if (!req) {
- rc = -ENOMEM;
- goto out_set;
- }
-
- req->rq_stripe = i;
- req->rq_idx = loi->loi_ost_idx;
-
- OBDO_ALLOC(req->rq_oi.oi_oa);
- if (req->rq_oi.oi_oa == NULL) {
- kfree(req);
- rc = -ENOMEM;
- goto out_set;
- }
- memcpy(req->rq_oi.oi_oa, oinfo->oi_oa,
- sizeof(*req->rq_oi.oi_oa));
- req->rq_oi.oi_oa->o_oi = loi->loi_oi;
- req->rq_oi.oi_cb_up = cb_getattr_update;
-
- lov_set_add_req(req, set);
- }
- if (!set->set_count) {
- rc = -EIO;
- goto out_set;
- }
- *reqset = set;
- return rc;
-out_set:
- lov_fini_getattr_set(set);
- return rc;
-}
-
-int lov_fini_destroy_set(struct lov_request_set *set)
-{
- if (set == NULL)
- return 0;
- LASSERT(set->set_exp);
- if (atomic_read(&set->set_completes)) {
- /* FIXME update qos data here */
- }
-
- lov_put_reqset(set);
-
- return 0;
-}
-
-int lov_prep_destroy_set(struct obd_export *exp, struct obd_info *oinfo,
- struct obdo *src_oa, struct lov_stripe_md *lsm,
- struct obd_trans_info *oti,
- struct lov_request_set **reqset)
-{
- struct lov_request_set *set;
- struct lov_obd *lov = &exp->exp_obd->u.lov;
- int rc = 0, i;
-
- set = kzalloc(sizeof(*set), GFP_NOFS);
- if (!set)
- return -ENOMEM;
- lov_init_set(set);
-
- set->set_exp = exp;
- set->set_oi = oinfo;
- set->set_oi->oi_md = lsm;
- set->set_oi->oi_oa = src_oa;
- set->set_oti = oti;
- if (oti != NULL && src_oa->o_valid & OBD_MD_FLCOOKIE)
- set->set_cookies = oti->oti_logcookies;
-
- for (i = 0; i < lsm->lsm_stripe_count; i++) {
- struct lov_oinfo *loi;
- struct lov_request *req;
-
- loi = lsm->lsm_oinfo[i];
- if (lov_oinfo_is_dummy(loi))
- continue;
-
- if (!lov_check_and_wait_active(lov, loi->loi_ost_idx)) {
- CDEBUG(D_HA, "lov idx %d inactive\n", loi->loi_ost_idx);
- continue;
- }
-
- req = kzalloc(sizeof(*req), GFP_NOFS);
- if (!req) {
- rc = -ENOMEM;
- goto out_set;
- }
-
- req->rq_stripe = i;
- req->rq_idx = loi->loi_ost_idx;
-
- OBDO_ALLOC(req->rq_oi.oi_oa);
- if (req->rq_oi.oi_oa == NULL) {
- kfree(req);
- rc = -ENOMEM;
- goto out_set;
- }
- memcpy(req->rq_oi.oi_oa, src_oa, sizeof(*req->rq_oi.oi_oa));
- req->rq_oi.oi_oa->o_oi = loi->loi_oi;
- lov_set_add_req(req, set);
- }
- if (!set->set_count) {
- rc = -EIO;
- goto out_set;
- }
- *reqset = set;
- return rc;
-out_set:
- lov_fini_destroy_set(set);
- return rc;
-}
-
-int lov_fini_setattr_set(struct lov_request_set *set)
-{
- int rc = 0;
-
- if (set == NULL)
- return 0;
- LASSERT(set->set_exp);
- if (atomic_read(&set->set_completes)) {
- rc = common_attr_done(set);
- /* FIXME update qos data here */
- }
-
- lov_put_reqset(set);
- return rc;
-}
-
-int lov_update_setattr_set(struct lov_request_set *set,
- struct lov_request *req, int rc)
-{
- struct lov_obd *lov = &req->rq_rqset->set_exp->exp_obd->u.lov;
- struct lov_stripe_md *lsm = req->rq_rqset->set_oi->oi_md;
-
- lov_update_set(set, req, rc);
-
- /* grace error on inactive ost */
- if (rc && !(lov->lov_tgts[req->rq_idx] &&
- lov->lov_tgts[req->rq_idx]->ltd_active))
- rc = 0;
-
- if (rc == 0) {
- if (req->rq_oi.oi_oa->o_valid & OBD_MD_FLCTIME)
- lsm->lsm_oinfo[req->rq_stripe]->loi_lvb.lvb_ctime =
- req->rq_oi.oi_oa->o_ctime;
- if (req->rq_oi.oi_oa->o_valid & OBD_MD_FLMTIME)
- lsm->lsm_oinfo[req->rq_stripe]->loi_lvb.lvb_mtime =
- req->rq_oi.oi_oa->o_mtime;
- if (req->rq_oi.oi_oa->o_valid & OBD_MD_FLATIME)
- lsm->lsm_oinfo[req->rq_stripe]->loi_lvb.lvb_atime =
- req->rq_oi.oi_oa->o_atime;
- }
-
- return rc;
-}
-
-/* The callback for osc_setattr_async that finalizes a request info when a
- * response is received. */
-static int cb_setattr_update(void *cookie, int rc)
-{
- struct obd_info *oinfo = cookie;
- struct lov_request *lovreq;
-
- lovreq = container_of(oinfo, struct lov_request, rq_oi);
- return lov_update_setattr_set(lovreq->rq_rqset, lovreq, rc);
-}
-
-int lov_prep_setattr_set(struct obd_export *exp, struct obd_info *oinfo,
- struct obd_trans_info *oti,
- struct lov_request_set **reqset)
-{
- struct lov_request_set *set;
- struct lov_obd *lov = &exp->exp_obd->u.lov;
- int rc = 0, i;
-
- set = kzalloc(sizeof(*set), GFP_NOFS);
- if (!set)
- return -ENOMEM;
- lov_init_set(set);
-
- set->set_exp = exp;
- set->set_oti = oti;
- set->set_oi = oinfo;
- if (oti != NULL && oinfo->oi_oa->o_valid & OBD_MD_FLCOOKIE)
- set->set_cookies = oti->oti_logcookies;
-
- for (i = 0; i < oinfo->oi_md->lsm_stripe_count; i++) {
- struct lov_oinfo *loi = oinfo->oi_md->lsm_oinfo[i];
- struct lov_request *req;
-
- if (lov_oinfo_is_dummy(loi))
- continue;
-
- if (!lov_check_and_wait_active(lov, loi->loi_ost_idx)) {
- CDEBUG(D_HA, "lov idx %d inactive\n", loi->loi_ost_idx);
- continue;
- }
-
- req = kzalloc(sizeof(*req), GFP_NOFS);
- if (!req) {
- rc = -ENOMEM;
- goto out_set;
- }
- req->rq_stripe = i;
- req->rq_idx = loi->loi_ost_idx;
-
- OBDO_ALLOC(req->rq_oi.oi_oa);
- if (req->rq_oi.oi_oa == NULL) {
- kfree(req);
- rc = -ENOMEM;
- goto out_set;
- }
- memcpy(req->rq_oi.oi_oa, oinfo->oi_oa,
- sizeof(*req->rq_oi.oi_oa));
- req->rq_oi.oi_oa->o_oi = loi->loi_oi;
- req->rq_oi.oi_oa->o_stripe_idx = i;
- req->rq_oi.oi_cb_up = cb_setattr_update;
-
- if (oinfo->oi_oa->o_valid & OBD_MD_FLSIZE) {
- int off = lov_stripe_offset(oinfo->oi_md,
- oinfo->oi_oa->o_size, i,
- &req->rq_oi.oi_oa->o_size);
-
- if (off < 0 && req->rq_oi.oi_oa->o_size)
- req->rq_oi.oi_oa->o_size--;
-
- CDEBUG(D_INODE, "stripe %d has size %llu/%llu\n",
- i, req->rq_oi.oi_oa->o_size,
- oinfo->oi_oa->o_size);
- }
- lov_set_add_req(req, set);
- }
- if (!set->set_count) {
- rc = -EIO;
- goto out_set;
- }
- *reqset = set;
- return rc;
-out_set:
- lov_fini_setattr_set(set);
- return rc;
-}
-
-#define LOV_U64_MAX ((__u64)~0ULL)
-#define LOV_SUM_MAX(tot, add) \
- do { \
- if ((tot) + (add) < (tot)) \
- (tot) = LOV_U64_MAX; \
- else \
- (tot) += (add); \
- } while (0)
-
-int lov_fini_statfs(struct obd_device *obd, struct obd_statfs *osfs,
- int success)
-{
- if (success) {
- __u32 expected_stripes = lov_get_stripecnt(&obd->u.lov,
- LOV_MAGIC, 0);
- if (osfs->os_files != LOV_U64_MAX)
- lov_do_div64(osfs->os_files, expected_stripes);
- if (osfs->os_ffree != LOV_U64_MAX)
- lov_do_div64(osfs->os_ffree, expected_stripes);
-
- spin_lock(&obd->obd_osfs_lock);
- memcpy(&obd->obd_osfs, osfs, sizeof(*osfs));
- obd->obd_osfs_age = cfs_time_current_64();
- spin_unlock(&obd->obd_osfs_lock);
- return 0;
- }
-
- return -EIO;
-}
-
-int lov_fini_statfs_set(struct lov_request_set *set)
-{
- int rc = 0;
-
- if (set == NULL)
- return 0;
-
- if (atomic_read(&set->set_completes)) {
- rc = lov_fini_statfs(set->set_obd, set->set_oi->oi_osfs,
- atomic_read(&set->set_success));
- }
- lov_put_reqset(set);
- return rc;
-}
-
-void lov_update_statfs(struct obd_statfs *osfs, struct obd_statfs *lov_sfs,
- int success)
-{
- int shift = 0, quit = 0;
- __u64 tmp;
-
- if (success == 0) {
- memcpy(osfs, lov_sfs, sizeof(*lov_sfs));
- } else {
- if (osfs->os_bsize != lov_sfs->os_bsize) {
- /* assume all block sizes are always powers of 2 */
- /* get the bits difference */
- tmp = osfs->os_bsize | lov_sfs->os_bsize;
- for (shift = 0; shift <= 64; ++shift) {
- if (tmp & 1) {
- if (quit)
- break;
- quit = 1;
- shift = 0;
- }
- tmp >>= 1;
- }
- }
-
- if (osfs->os_bsize < lov_sfs->os_bsize) {
- osfs->os_bsize = lov_sfs->os_bsize;
-
- osfs->os_bfree >>= shift;
- osfs->os_bavail >>= shift;
- osfs->os_blocks >>= shift;
- } else if (shift != 0) {
- lov_sfs->os_bfree >>= shift;
- lov_sfs->os_bavail >>= shift;
- lov_sfs->os_blocks >>= shift;
- }
- osfs->os_bfree += lov_sfs->os_bfree;
- osfs->os_bavail += lov_sfs->os_bavail;
- osfs->os_blocks += lov_sfs->os_blocks;
- /* XXX not sure about this one - depends on policy.
- * - could be minimum if we always stripe on all OBDs
- * (but that would be wrong for any other policy,
- * if one of the OBDs has no more objects left)
- * - could be sum if we stripe whole objects
- * - could be average, just to give a nice number
- *
- * To give a "reasonable" (if not wholly accurate)
- * number, we divide the total number of free objects
- * by expected stripe count (watch out for overflow).
- */
- LOV_SUM_MAX(osfs->os_files, lov_sfs->os_files);
- LOV_SUM_MAX(osfs->os_ffree, lov_sfs->os_ffree);
- }
-}
-
-/* The callback for osc_statfs_async that finalizes a request info when a
- * response is received. */
-static int cb_statfs_update(void *cookie, int rc)
-{
- struct obd_info *oinfo = cookie;
- struct lov_request *lovreq;
- struct lov_request_set *set;
- struct obd_statfs *osfs, *lov_sfs;
- struct lov_obd *lov;
- struct lov_tgt_desc *tgt;
- struct obd_device *lovobd, *tgtobd;
- int success;
-
- lovreq = container_of(oinfo, struct lov_request, rq_oi);
- set = lovreq->rq_rqset;
- lovobd = set->set_obd;
- lov = &lovobd->u.lov;
- osfs = set->set_oi->oi_osfs;
- lov_sfs = oinfo->oi_osfs;
- success = atomic_read(&set->set_success);
- /* XXX: the same is done in lov_update_common_set, however
- lovset->set_exp is not initialized. */
- lov_update_set(set, lovreq, rc);
- if (rc)
- goto out;
-
- obd_getref(lovobd);
- tgt = lov->lov_tgts[lovreq->rq_idx];
- if (!tgt || !tgt->ltd_active)
- goto out_update;
-
- tgtobd = class_exp2obd(tgt->ltd_exp);
- spin_lock(&tgtobd->obd_osfs_lock);
- memcpy(&tgtobd->obd_osfs, lov_sfs, sizeof(*lov_sfs));
- if ((oinfo->oi_flags & OBD_STATFS_FROM_CACHE) == 0)
- tgtobd->obd_osfs_age = cfs_time_current_64();
- spin_unlock(&tgtobd->obd_osfs_lock);
-
-out_update:
- lov_update_statfs(osfs, lov_sfs, success);
- obd_putref(lovobd);
-
-out:
- if (set->set_oi->oi_flags & OBD_STATFS_PTLRPCD &&
- lov_set_finished(set, 0)) {
- lov_statfs_interpret(NULL, set, set->set_count !=
- atomic_read(&set->set_success));
- }
-
- return 0;
-}
-
-int lov_prep_statfs_set(struct obd_device *obd, struct obd_info *oinfo,
- struct lov_request_set **reqset)
-{
- struct lov_request_set *set;
- struct lov_obd *lov = &obd->u.lov;
- int rc = 0, i;
-
- set = kzalloc(sizeof(*set), GFP_NOFS);
- if (!set)
- return -ENOMEM;
- lov_init_set(set);
-
- set->set_obd = obd;
- set->set_oi = oinfo;
-
- /* We only get block data from the OBD */
- for (i = 0; i < lov->desc.ld_tgt_count; i++) {
- struct lov_request *req;
-
- if (lov->lov_tgts[i] == NULL ||
- (!lov_check_and_wait_active(lov, i) &&
- (oinfo->oi_flags & OBD_STATFS_NODELAY))) {
- CDEBUG(D_HA, "lov idx %d inactive\n", i);
- continue;
- }
-
- /* skip targets that have been explicitly disabled by the
- * administrator */
- if (!lov->lov_tgts[i]->ltd_exp) {
- CDEBUG(D_HA, "lov idx %d administratively disabled\n", i);
- continue;
- }
-
- req = kzalloc(sizeof(*req), GFP_NOFS);
- if (!req) {
- rc = -ENOMEM;
- goto out_set;
- }
-
- req->rq_oi.oi_osfs = kzalloc(sizeof(*req->rq_oi.oi_osfs),
- GFP_NOFS);
- if (!req->rq_oi.oi_osfs) {
- kfree(req);
- rc = -ENOMEM;
- goto out_set;
- }
-
- req->rq_idx = i;
- req->rq_oi.oi_cb_up = cb_statfs_update;
- req->rq_oi.oi_flags = oinfo->oi_flags;
-
- lov_set_add_req(req, set);
- }
- if (!set->set_count) {
- rc = -EIO;
- goto out_set;
- }
- *reqset = set;
- return rc;
-out_set:
- lov_fini_statfs_set(set);
- return rc;
-}
diff --git a/drivers/staging/lustre/lustre/lov/lovsub_dev.c b/drivers/staging/lustre/lustre/lov/lovsub_dev.c
deleted file mode 100644
index 90d9ec386a1a..000000000000
--- a/drivers/staging/lustre/lustre/lov/lovsub_dev.c
+++ /dev/null
@@ -1,209 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * Implementation of cl_device and cl_device_type for LOVSUB layer.
- *
- * Author: Nikita Danilov <nikita.danilov@sun.com>
- */
-
-#define DEBUG_SUBSYSTEM S_LOV
-
-#include "lov_cl_internal.h"
-
-/** \addtogroup lov
- * @{
- */
-
-/*****************************************************************************
- *
- * Lovsub transfer operations.
- *
- */
-
-static void lovsub_req_completion(const struct lu_env *env,
- const struct cl_req_slice *slice, int ioret)
-{
- struct lovsub_req *lsr;
-
- lsr = cl2lovsub_req(slice);
- OBD_SLAB_FREE_PTR(lsr, lovsub_req_kmem);
-}
-
-/**
- * Implementation of struct cl_req_operations::cro_attr_set() for lovsub
- * layer. Lov and lovsub are responsible only for struct obdo::o_stripe_idx
- * field, which is filled there.
- */
-static void lovsub_req_attr_set(const struct lu_env *env,
- const struct cl_req_slice *slice,
- const struct cl_object *obj,
- struct cl_req_attr *attr, u64 flags)
-{
- struct lovsub_object *subobj;
-
- subobj = cl2lovsub(obj);
- /*
- * There is no OBD_MD_* flag for obdo::o_stripe_idx, so set it
- * unconditionally. It never changes anyway.
- */
- attr->cra_oa->o_stripe_idx = subobj->lso_index;
-}
-
-static const struct cl_req_operations lovsub_req_ops = {
- .cro_attr_set = lovsub_req_attr_set,
- .cro_completion = lovsub_req_completion
-};
-
-/*****************************************************************************
- *
- * Lov-sub device and device type functions.
- *
- */
-
-static int lovsub_device_init(const struct lu_env *env, struct lu_device *d,
- const char *name, struct lu_device *next)
-{
- struct lovsub_device *lsd = lu2lovsub_dev(d);
- struct lu_device_type *ldt;
- int rc;
-
- next->ld_site = d->ld_site;
- ldt = next->ld_type;
- LASSERT(ldt != NULL);
- rc = ldt->ldt_ops->ldto_device_init(env, next, ldt->ldt_name, NULL);
- if (rc) {
- next->ld_site = NULL;
- return rc;
- }
-
- lu_device_get(next);
- lu_ref_add(&next->ld_reference, "lu-stack", &lu_site_init);
- lsd->acid_next = lu2cl_dev(next);
- return rc;
-}
-
-static struct lu_device *lovsub_device_fini(const struct lu_env *env,
- struct lu_device *d)
-{
- struct lu_device *next;
- struct lovsub_device *lsd;
-
- lsd = lu2lovsub_dev(d);
- next = cl2lu_dev(lsd->acid_next);
- lsd->acid_super = NULL;
- lsd->acid_next = NULL;
- return next;
-}
-
-static struct lu_device *lovsub_device_free(const struct lu_env *env,
- struct lu_device *d)
-{
- struct lovsub_device *lsd = lu2lovsub_dev(d);
- struct lu_device *next = cl2lu_dev(lsd->acid_next);
-
- if (atomic_read(&d->ld_ref) && d->ld_site) {
- LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, D_ERROR, NULL);
- lu_site_print(env, d->ld_site, &msgdata, lu_cdebug_printer);
- }
- cl_device_fini(lu2cl_dev(d));
- kfree(lsd);
- return next;
-}
-
-static int lovsub_req_init(const struct lu_env *env, struct cl_device *dev,
- struct cl_req *req)
-{
- struct lovsub_req *lsr;
- int result;
-
- OBD_SLAB_ALLOC_PTR_GFP(lsr, lovsub_req_kmem, GFP_NOFS);
- if (lsr != NULL) {
- cl_req_slice_add(req, &lsr->lsrq_cl, dev, &lovsub_req_ops);
- result = 0;
- } else
- result = -ENOMEM;
- return result;
-}
-
-static const struct lu_device_operations lovsub_lu_ops = {
- .ldo_object_alloc = lovsub_object_alloc,
- .ldo_process_config = NULL,
- .ldo_recovery_complete = NULL
-};
-
-static const struct cl_device_operations lovsub_cl_ops = {
- .cdo_req_init = lovsub_req_init
-};
-
-static struct lu_device *lovsub_device_alloc(const struct lu_env *env,
- struct lu_device_type *t,
- struct lustre_cfg *cfg)
-{
- struct lu_device *d;
- struct lovsub_device *lsd;
-
- lsd = kzalloc(sizeof(*lsd), GFP_NOFS);
- if (lsd != NULL) {
- int result;
-
- result = cl_device_init(&lsd->acid_cl, t);
- if (result == 0) {
- d = lovsub2lu_dev(lsd);
- d->ld_ops = &lovsub_lu_ops;
- lsd->acid_cl.cd_ops = &lovsub_cl_ops;
- } else
- d = ERR_PTR(result);
- } else
- d = ERR_PTR(-ENOMEM);
- return d;
-}
-
-static const struct lu_device_type_operations lovsub_device_type_ops = {
- .ldto_device_alloc = lovsub_device_alloc,
- .ldto_device_free = lovsub_device_free,
-
- .ldto_device_init = lovsub_device_init,
- .ldto_device_fini = lovsub_device_fini
-};
-
-#define LUSTRE_LOVSUB_NAME "lovsub"
-
-struct lu_device_type lovsub_device_type = {
- .ldt_tags = LU_DEVICE_CL,
- .ldt_name = LUSTRE_LOVSUB_NAME,
- .ldt_ops = &lovsub_device_type_ops,
- .ldt_ctx_tags = LCT_CL_THREAD
-};
-
-
-/** @} lov */
diff --git a/drivers/staging/lustre/lustre/lov/lovsub_io.c b/drivers/staging/lustre/lustre/lov/lovsub_io.c
deleted file mode 100644
index 783ec687a4e7..000000000000
--- a/drivers/staging/lustre/lustre/lov/lovsub_io.c
+++ /dev/null
@@ -1,55 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * Implementation of cl_io for LOVSUB layer.
- *
- * Author: Nikita Danilov <nikita.danilov@sun.com>
- */
-
-#define DEBUG_SUBSYSTEM S_LOV
-
-#include "lov_cl_internal.h"
-
-/** \addtogroup lov
- * @{
- */
-
-/*****************************************************************************
- *
- * Lovsub io operations.
- *
- */
-
-/* All trivial */
-
-/** @} lov */
diff --git a/drivers/staging/lustre/lustre/lov/lovsub_lock.c b/drivers/staging/lustre/lustre/lov/lovsub_lock.c
deleted file mode 100644
index 62b696d25d81..000000000000
--- a/drivers/staging/lustre/lustre/lov/lovsub_lock.c
+++ /dev/null
@@ -1,466 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * Implementation of cl_lock for LOVSUB layer.
- *
- * Author: Nikita Danilov <nikita.danilov@sun.com>
- */
-
-#define DEBUG_SUBSYSTEM S_LOV
-
-#include "lov_cl_internal.h"
-
-/** \addtogroup lov
- * @{
- */
-
-/*****************************************************************************
- *
- * Lovsub lock operations.
- *
- */
-
-static void lovsub_lock_fini(const struct lu_env *env,
- struct cl_lock_slice *slice)
-{
- struct lovsub_lock *lsl;
-
- lsl = cl2lovsub_lock(slice);
- LASSERT(list_empty(&lsl->lss_parents));
- OBD_SLAB_FREE_PTR(lsl, lovsub_lock_kmem);
-}
-
-static void lovsub_parent_lock(const struct lu_env *env, struct lov_lock *lov)
-{
- struct cl_lock *parent;
-
- parent = lov->lls_cl.cls_lock;
- cl_lock_get(parent);
- lu_ref_add(&parent->cll_reference, "lovsub-parent", current);
- cl_lock_mutex_get(env, parent);
-}
-
-static void lovsub_parent_unlock(const struct lu_env *env, struct lov_lock *lov)
-{
- struct cl_lock *parent;
-
- parent = lov->lls_cl.cls_lock;
- cl_lock_mutex_put(env, lov->lls_cl.cls_lock);
- lu_ref_del(&parent->cll_reference, "lovsub-parent", current);
- cl_lock_put(env, parent);
-}
-
-/**
- * Implements cl_lock_operations::clo_state() method for lovsub layer, which
- * method is called whenever sub-lock state changes. Propagates state change
- * to the top-locks.
- */
-static void lovsub_lock_state(const struct lu_env *env,
- const struct cl_lock_slice *slice,
- enum cl_lock_state state)
-{
- struct lovsub_lock *sub = cl2lovsub_lock(slice);
- struct lov_lock_link *scan;
-
- LASSERT(cl_lock_is_mutexed(slice->cls_lock));
-
- list_for_each_entry(scan, &sub->lss_parents, lll_list) {
- struct lov_lock *lov = scan->lll_super;
- struct cl_lock *parent = lov->lls_cl.cls_lock;
-
- if (sub->lss_active != parent) {
- lovsub_parent_lock(env, lov);
- cl_lock_signal(env, parent);
- lovsub_parent_unlock(env, lov);
- }
- }
-}
-
-/**
- * Implementation of cl_lock_operation::clo_weigh() estimating lock weight by
- * asking parent lock.
- */
-static unsigned long lovsub_lock_weigh(const struct lu_env *env,
- const struct cl_lock_slice *slice)
-{
- struct lovsub_lock *lock = cl2lovsub_lock(slice);
- struct lov_lock *lov;
- unsigned long dumbbell;
-
- LASSERT(cl_lock_is_mutexed(slice->cls_lock));
-
- if (!list_empty(&lock->lss_parents)) {
- /*
- * It is not clear whether all parents have to be asked and
- * their estimations summed, or it is enough to ask one. For
- * the current usages, one is always enough.
- */
- lov = container_of(lock->lss_parents.next,
- struct lov_lock_link, lll_list)->lll_super;
-
- lovsub_parent_lock(env, lov);
- dumbbell = cl_lock_weigh(env, lov->lls_cl.cls_lock);
- lovsub_parent_unlock(env, lov);
- } else
- dumbbell = 0;
-
- return dumbbell;
-}
-
-/**
- * Maps start/end offsets within a stripe, to offsets within a file.
- */
-static void lovsub_lock_descr_map(const struct cl_lock_descr *in,
- struct lov_object *lov,
- int stripe, struct cl_lock_descr *out)
-{
- pgoff_t size; /* stripe size in pages */
- pgoff_t skip; /* how many pages in every stripe are occupied by
- * "other" stripes */
- pgoff_t start;
- pgoff_t end;
-
- start = in->cld_start;
- end = in->cld_end;
-
- if (lov->lo_lsm->lsm_stripe_count > 1) {
- size = cl_index(lov2cl(lov), lov->lo_lsm->lsm_stripe_size);
- skip = (lov->lo_lsm->lsm_stripe_count - 1) * size;
-
- /* XXX overflow check here? */
- start += start/size * skip + stripe * size;
-
- if (end != CL_PAGE_EOF) {
- end += end/size * skip + stripe * size;
- /*
- * And check for overflow...
- */
- if (end < in->cld_end)
- end = CL_PAGE_EOF;
- }
- }
- out->cld_start = start;
- out->cld_end = end;
-}
-
-/**
- * Adjusts parent lock extent when a sub-lock is attached to a parent. This is
- * called in two ways:
- *
- * - as part of receive call-back, when server returns granted extent to
- * the client, and
- *
- * - when top-lock finds existing sub-lock in the cache.
- *
- * Note, that lock mode is not propagated to the parent: i.e., if CLM_READ
- * top-lock matches CLM_WRITE sub-lock, top-lock is still CLM_READ.
- */
-int lov_sublock_modify(const struct lu_env *env, struct lov_lock *lov,
- struct lovsub_lock *sublock,
- const struct cl_lock_descr *d, int idx)
-{
- struct cl_lock *parent;
- struct lovsub_object *subobj;
- struct cl_lock_descr *pd;
- struct cl_lock_descr *parent_descr;
- int result;
-
- parent = lov->lls_cl.cls_lock;
- parent_descr = &parent->cll_descr;
- LASSERT(cl_lock_mode_match(d->cld_mode, parent_descr->cld_mode));
-
- subobj = cl2lovsub(sublock->lss_cl.cls_obj);
- pd = &lov_env_info(env)->lti_ldescr;
-
- pd->cld_obj = parent_descr->cld_obj;
- pd->cld_mode = parent_descr->cld_mode;
- pd->cld_gid = parent_descr->cld_gid;
- lovsub_lock_descr_map(d, subobj->lso_super, subobj->lso_index, pd);
- lov->lls_sub[idx].sub_got = *d;
- /*
- * Notify top-lock about modification, if lock description changes
- * materially.
- */
- if (!cl_lock_ext_match(parent_descr, pd))
- result = cl_lock_modify(env, parent, pd);
- else
- result = 0;
- return result;
-}
-
-static int lovsub_lock_modify(const struct lu_env *env,
- const struct cl_lock_slice *s,
- const struct cl_lock_descr *d)
-{
- struct lovsub_lock *lock = cl2lovsub_lock(s);
- struct lov_lock_link *scan;
- struct lov_lock *lov;
- int result = 0;
-
- LASSERT(cl_lock_mode_match(d->cld_mode,
- s->cls_lock->cll_descr.cld_mode));
- list_for_each_entry(scan, &lock->lss_parents, lll_list) {
- int rc;
-
- lov = scan->lll_super;
- lovsub_parent_lock(env, lov);
- rc = lov_sublock_modify(env, lov, lock, d, scan->lll_idx);
- lovsub_parent_unlock(env, lov);
- result = result ?: rc;
- }
- return result;
-}
-
-static int lovsub_lock_closure(const struct lu_env *env,
- const struct cl_lock_slice *slice,
- struct cl_lock_closure *closure)
-{
- struct lovsub_lock *sub;
- struct cl_lock *parent;
- struct lov_lock_link *scan;
- int result;
-
- LASSERT(cl_lock_is_mutexed(slice->cls_lock));
-
- sub = cl2lovsub_lock(slice);
- result = 0;
-
- list_for_each_entry(scan, &sub->lss_parents, lll_list) {
- parent = scan->lll_super->lls_cl.cls_lock;
- result = cl_lock_closure_build(env, parent, closure);
- if (result != 0)
- break;
- }
- return result;
-}
-
-/**
- * A helper function for lovsub_lock_delete() that deals with a given parent
- * top-lock.
- */
-static int lovsub_lock_delete_one(const struct lu_env *env,
- struct cl_lock *child, struct lov_lock *lov)
-{
- struct cl_lock *parent;
- int result;
-
- parent = lov->lls_cl.cls_lock;
- if (parent->cll_error)
- return 0;
-
- result = 0;
- switch (parent->cll_state) {
- case CLS_ENQUEUED:
- /* See LU-1355 for the case that a glimpse lock is
- * interrupted by signal */
- LASSERT(parent->cll_flags & CLF_CANCELLED);
- break;
- case CLS_QUEUING:
- case CLS_FREEING:
- cl_lock_signal(env, parent);
- break;
- case CLS_INTRANSIT:
- /*
- * Here lies a problem: a sub-lock is canceled while top-lock
- * is being unlocked. Top-lock cannot be moved into CLS_NEW
- * state, because unlocking has to succeed eventually by
- * placing lock into CLS_CACHED (or failing it), see
- * cl_unuse_try(). Nor can top-lock be left in CLS_CACHED
- * state, because lov maintains an invariant that all
- * sub-locks exist in CLS_CACHED (this allows cached top-lock
- * to be reused immediately). Nor can we wait for top-lock
- * state to change, because this can be synchronous to the
- * current thread.
- *
- * We know for sure that lov_lock_unuse() will be called at
- * least one more time to finish un-using, so leave a mark on
- * the top-lock, that will be seen by the next call to
- * lov_lock_unuse().
- */
- if (cl_lock_is_intransit(parent))
- lov->lls_cancel_race = 1;
- break;
- case CLS_CACHED:
- /*
- * if a sub-lock is canceled move its top-lock into CLS_NEW
- * state to preserve an invariant that a top-lock in
- * CLS_CACHED is immediately ready for re-use (i.e., has all
- * sub-locks), and so that next attempt to re-use the top-lock
- * enqueues missing sub-lock.
- */
- cl_lock_state_set(env, parent, CLS_NEW);
- /* fall through */
- case CLS_NEW:
- /*
- * if last sub-lock is canceled, destroy the top-lock (which
- * is now `empty') proactively.
- */
- if (lov->lls_nr_filled == 0) {
- /* ... but unfortunately, this cannot be done easily,
- * as cancellation of a top-lock might acquire mutices
- * of its other sub-locks, violating lock ordering,
- * see cl_lock_{cancel,delete}() preconditions.
- *
- * To work around this, the mutex of this sub-lock is
- * released, top-lock is destroyed, and sub-lock mutex
- * acquired again. The list of parents has to be
- * re-scanned from the beginning after this.
- *
- * Only do this if no mutices other than on @child and
- * @parent are held by the current thread.
- *
- * TODO: The lock modal here is too complex, because
- * the lock may be canceled and deleted by voluntarily:
- * cl_lock_request
- * -> osc_lock_enqueue_wait
- * -> osc_lock_cancel_wait
- * -> cl_lock_delete
- * -> lovsub_lock_delete
- * -> cl_lock_cancel/delete
- * -> ...
- *
- * The better choice is to spawn a kernel thread for
- * this purpose. -jay
- */
- if (cl_lock_nr_mutexed(env) == 2) {
- cl_lock_mutex_put(env, child);
- cl_lock_cancel(env, parent);
- cl_lock_delete(env, parent);
- result = 1;
- }
- }
- break;
- case CLS_HELD:
- CL_LOCK_DEBUG(D_ERROR, env, parent, "Delete CLS_HELD lock\n");
- default:
- CERROR("Impossible state: %d\n", parent->cll_state);
- LBUG();
- break;
- }
-
- return result;
-}
-
-/**
- * An implementation of cl_lock_operations::clo_delete() method. This is
- * invoked in "bottom-to-top" delete, when lock destruction starts from the
- * sub-lock (e.g, as a result of ldlm lock LRU policy).
- */
-static void lovsub_lock_delete(const struct lu_env *env,
- const struct cl_lock_slice *slice)
-{
- struct cl_lock *child = slice->cls_lock;
- struct lovsub_lock *sub = cl2lovsub_lock(slice);
- int restart;
-
- LASSERT(cl_lock_is_mutexed(child));
-
- /*
- * Destruction of a sub-lock might take multiple iterations, because
- * when the last sub-lock of a given top-lock is deleted, top-lock is
- * canceled proactively, and this requires to release sub-lock
- * mutex. Once sub-lock mutex has been released, list of its parents
- * has to be re-scanned from the beginning.
- */
- do {
- struct lov_lock *lov;
- struct lov_lock_link *scan;
- struct lov_lock_link *temp;
- struct lov_lock_sub *subdata;
-
- restart = 0;
- list_for_each_entry_safe(scan, temp,
- &sub->lss_parents, lll_list) {
- lov = scan->lll_super;
- subdata = &lov->lls_sub[scan->lll_idx];
- lovsub_parent_lock(env, lov);
- subdata->sub_got = subdata->sub_descr;
- lov_lock_unlink(env, scan, sub);
- restart = lovsub_lock_delete_one(env, child, lov);
- lovsub_parent_unlock(env, lov);
-
- if (restart) {
- cl_lock_mutex_get(env, child);
- break;
- }
- }
- } while (restart);
-}
-
-static int lovsub_lock_print(const struct lu_env *env, void *cookie,
- lu_printer_t p, const struct cl_lock_slice *slice)
-{
- struct lovsub_lock *sub = cl2lovsub_lock(slice);
- struct lov_lock *lov;
- struct lov_lock_link *scan;
-
- list_for_each_entry(scan, &sub->lss_parents, lll_list) {
- lov = scan->lll_super;
- (*p)(env, cookie, "[%d %p ", scan->lll_idx, lov);
- if (lov != NULL)
- cl_lock_descr_print(env, cookie, p,
- &lov->lls_cl.cls_lock->cll_descr);
- (*p)(env, cookie, "] ");
- }
- return 0;
-}
-
-static const struct cl_lock_operations lovsub_lock_ops = {
- .clo_fini = lovsub_lock_fini,
- .clo_state = lovsub_lock_state,
- .clo_delete = lovsub_lock_delete,
- .clo_modify = lovsub_lock_modify,
- .clo_closure = lovsub_lock_closure,
- .clo_weigh = lovsub_lock_weigh,
- .clo_print = lovsub_lock_print
-};
-
-int lovsub_lock_init(const struct lu_env *env, struct cl_object *obj,
- struct cl_lock *lock, const struct cl_io *io)
-{
- struct lovsub_lock *lsk;
- int result;
-
- OBD_SLAB_ALLOC_PTR_GFP(lsk, lovsub_lock_kmem, GFP_NOFS);
- if (lsk != NULL) {
- INIT_LIST_HEAD(&lsk->lss_parents);
- cl_lock_slice_add(lock, &lsk->lss_cl, obj, &lovsub_lock_ops);
- result = 0;
- } else
- result = -ENOMEM;
- return result;
-}
-
-/** @} lov */
diff --git a/drivers/staging/lustre/lustre/lov/lovsub_object.c b/drivers/staging/lustre/lustre/lov/lovsub_object.c
deleted file mode 100644
index 57e3629fccee..000000000000
--- a/drivers/staging/lustre/lustre/lov/lovsub_object.c
+++ /dev/null
@@ -1,164 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * Implementation of cl_object for LOVSUB layer.
- *
- * Author: Nikita Danilov <nikita.danilov@sun.com>
- */
-
-#define DEBUG_SUBSYSTEM S_LOV
-
-#include "lov_cl_internal.h"
-
-/** \addtogroup lov
- * @{
- */
-
-/*****************************************************************************
- *
- * Lovsub object operations.
- *
- */
-
-int lovsub_object_init(const struct lu_env *env, struct lu_object *obj,
- const struct lu_object_conf *conf)
-{
- struct lovsub_device *dev = lu2lovsub_dev(obj->lo_dev);
- struct lu_object *below;
- struct lu_device *under;
-
- int result;
-
- under = &dev->acid_next->cd_lu_dev;
- below = under->ld_ops->ldo_object_alloc(env, obj->lo_header, under);
- if (below != NULL) {
- lu_object_add(obj, below);
- cl_object_page_init(lu2cl(obj), sizeof(struct lovsub_page));
- result = 0;
- } else
- result = -ENOMEM;
- return result;
-
-}
-
-static void lovsub_object_free(const struct lu_env *env, struct lu_object *obj)
-{
- struct lovsub_object *los = lu2lovsub(obj);
- struct lov_object *lov = los->lso_super;
-
- /* We can't assume lov was assigned here, because of the shadow
- * object handling in lu_object_find.
- */
- if (lov) {
- LASSERT(lov->lo_type == LLT_RAID0);
- LASSERT(lov->u.raid0.lo_sub[los->lso_index] == los);
- spin_lock(&lov->u.raid0.lo_sub_lock);
- lov->u.raid0.lo_sub[los->lso_index] = NULL;
- spin_unlock(&lov->u.raid0.lo_sub_lock);
- }
-
- lu_object_fini(obj);
- lu_object_header_fini(&los->lso_header.coh_lu);
- OBD_SLAB_FREE_PTR(los, lovsub_object_kmem);
-}
-
-static int lovsub_object_print(const struct lu_env *env, void *cookie,
- lu_printer_t p, const struct lu_object *obj)
-{
- struct lovsub_object *los = lu2lovsub(obj);
-
- return (*p)(env, cookie, "[%d]", los->lso_index);
-}
-
-static int lovsub_attr_set(const struct lu_env *env, struct cl_object *obj,
- const struct cl_attr *attr, unsigned valid)
-{
- struct lov_object *lov = cl2lovsub(obj)->lso_super;
-
- lov_r0(lov)->lo_attr_valid = 0;
- return 0;
-}
-
-static int lovsub_object_glimpse(const struct lu_env *env,
- const struct cl_object *obj,
- struct ost_lvb *lvb)
-{
- struct lovsub_object *los = cl2lovsub(obj);
-
- return cl_object_glimpse(env, &los->lso_super->lo_cl, lvb);
-}
-
-
-
-static const struct cl_object_operations lovsub_ops = {
- .coo_page_init = lovsub_page_init,
- .coo_lock_init = lovsub_lock_init,
- .coo_attr_set = lovsub_attr_set,
- .coo_glimpse = lovsub_object_glimpse
-};
-
-static const struct lu_object_operations lovsub_lu_obj_ops = {
- .loo_object_init = lovsub_object_init,
- .loo_object_delete = NULL,
- .loo_object_release = NULL,
- .loo_object_free = lovsub_object_free,
- .loo_object_print = lovsub_object_print,
- .loo_object_invariant = NULL
-};
-
-struct lu_object *lovsub_object_alloc(const struct lu_env *env,
- const struct lu_object_header *unused,
- struct lu_device *dev)
-{
- struct lovsub_object *los;
- struct lu_object *obj;
-
- OBD_SLAB_ALLOC_PTR_GFP(los, lovsub_object_kmem, GFP_NOFS);
- if (los != NULL) {
- struct cl_object_header *hdr;
-
- obj = lovsub2lu(los);
- hdr = &los->lso_header;
- cl_object_header_init(hdr);
- lu_object_init(obj, &hdr->coh_lu, dev);
- lu_object_add_top(&hdr->coh_lu, obj);
- los->lso_cl.co_ops = &lovsub_ops;
- obj->lo_ops = &lovsub_lu_obj_ops;
- } else
- obj = NULL;
- return obj;
-}
-
-/** @} lov */
diff --git a/drivers/staging/lustre/lustre/lov/lovsub_page.c b/drivers/staging/lustre/lustre/lov/lovsub_page.c
deleted file mode 100644
index 3f00ce9677b7..000000000000
--- a/drivers/staging/lustre/lustre/lov/lovsub_page.c
+++ /dev/null
@@ -1,71 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * Implementation of cl_page for LOVSUB layer.
- *
- * Author: Nikita Danilov <nikita.danilov@sun.com>
- */
-
-#define DEBUG_SUBSYSTEM S_LOV
-
-#include "lov_cl_internal.h"
-
-/** \addtogroup lov
- * @{
- */
-
-/*****************************************************************************
- *
- * Lovsub page operations.
- *
- */
-
-static void lovsub_page_fini(const struct lu_env *env,
- struct cl_page_slice *slice)
-{
-}
-
-static const struct cl_page_operations lovsub_page_ops = {
- .cpo_fini = lovsub_page_fini
-};
-
-int lovsub_page_init(const struct lu_env *env, struct cl_object *obj,
- struct cl_page *page, struct page *unused)
-{
- struct lovsub_page *lsb = cl_object_page_slice(obj, page);
-
- cl_page_slice_add(page, &lsb->lsb_cl, obj, &lovsub_page_ops);
- return 0;
-}
-
-/** @} lov */
diff --git a/drivers/staging/lustre/lustre/lov/lproc_lov.c b/drivers/staging/lustre/lustre/lov/lproc_lov.c
deleted file mode 100644
index 380b8271bf24..000000000000
--- a/drivers/staging/lustre/lustre/lov/lproc_lov.c
+++ /dev/null
@@ -1,297 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- */
-#define DEBUG_SUBSYSTEM S_CLASS
-
-#include <linux/statfs.h>
-#include "../include/lprocfs_status.h"
-#include "../include/obd_class.h"
-#include <linux/seq_file.h>
-#include "lov_internal.h"
-
-static int lov_stripesize_seq_show(struct seq_file *m, void *v)
-{
- struct obd_device *dev = (struct obd_device *)m->private;
- struct lov_desc *desc;
-
- LASSERT(dev != NULL);
- desc = &dev->u.lov.desc;
- seq_printf(m, "%llu\n", desc->ld_default_stripe_size);
- return 0;
-}
-
-static ssize_t lov_stripesize_seq_write(struct file *file,
- const char __user *buffer,
- size_t count, loff_t *off)
-{
- struct obd_device *dev = ((struct seq_file *)file->private_data)->private;
- struct lov_desc *desc;
- __u64 val;
- int rc;
-
- LASSERT(dev != NULL);
- desc = &dev->u.lov.desc;
- rc = lprocfs_write_u64_helper(buffer, count, &val);
- if (rc)
- return rc;
-
- lov_fix_desc_stripe_size(&val);
- desc->ld_default_stripe_size = val;
- return count;
-}
-LPROC_SEQ_FOPS(lov_stripesize);
-
-static int lov_stripeoffset_seq_show(struct seq_file *m, void *v)
-{
- struct obd_device *dev = (struct obd_device *)m->private;
- struct lov_desc *desc;
-
- LASSERT(dev != NULL);
- desc = &dev->u.lov.desc;
- seq_printf(m, "%llu\n", desc->ld_default_stripe_offset);
- return 0;
-}
-
-static ssize_t lov_stripeoffset_seq_write(struct file *file,
- const char __user *buffer,
- size_t count, loff_t *off)
-{
- struct obd_device *dev = ((struct seq_file *)file->private_data)->private;
- struct lov_desc *desc;
- __u64 val;
- int rc;
-
- LASSERT(dev != NULL);
- desc = &dev->u.lov.desc;
- rc = lprocfs_write_u64_helper(buffer, count, &val);
- if (rc)
- return rc;
-
- desc->ld_default_stripe_offset = val;
- return count;
-}
-LPROC_SEQ_FOPS(lov_stripeoffset);
-
-static int lov_stripetype_seq_show(struct seq_file *m, void *v)
-{
- struct obd_device *dev = (struct obd_device *)m->private;
- struct lov_desc *desc;
-
- LASSERT(dev != NULL);
- desc = &dev->u.lov.desc;
- seq_printf(m, "%u\n", desc->ld_pattern);
- return 0;
-}
-
-static ssize_t lov_stripetype_seq_write(struct file *file,
- const char __user *buffer,
- size_t count, loff_t *off)
-{
- struct obd_device *dev = ((struct seq_file *)file->private_data)->private;
- struct lov_desc *desc;
- int val, rc;
-
- LASSERT(dev != NULL);
- desc = &dev->u.lov.desc;
- rc = lprocfs_write_helper(buffer, count, &val);
- if (rc)
- return rc;
-
- lov_fix_desc_pattern(&val);
- desc->ld_pattern = val;
- return count;
-}
-LPROC_SEQ_FOPS(lov_stripetype);
-
-static int lov_stripecount_seq_show(struct seq_file *m, void *v)
-{
- struct obd_device *dev = (struct obd_device *)m->private;
- struct lov_desc *desc;
-
- LASSERT(dev != NULL);
- desc = &dev->u.lov.desc;
- seq_printf(m, "%d\n", (__s16)(desc->ld_default_stripe_count + 1) - 1);
- return 0;
-}
-
-static ssize_t lov_stripecount_seq_write(struct file *file,
- const char __user *buffer,
- size_t count, loff_t *off)
-{
- struct obd_device *dev = ((struct seq_file *)file->private_data)->private;
- struct lov_desc *desc;
- int val, rc;
-
- LASSERT(dev != NULL);
- desc = &dev->u.lov.desc;
- rc = lprocfs_write_helper(buffer, count, &val);
- if (rc)
- return rc;
-
- lov_fix_desc_stripe_count(&val);
- desc->ld_default_stripe_count = val;
- return count;
-}
-LPROC_SEQ_FOPS(lov_stripecount);
-
-static ssize_t numobd_show(struct kobject *kobj, struct attribute *attr,
- char *buf)
-{
- struct obd_device *dev = container_of(kobj, struct obd_device,
- obd_kobj);
- struct lov_desc *desc;
-
- desc = &dev->u.lov.desc;
- return sprintf(buf, "%u\n", desc->ld_tgt_count);
-}
-LUSTRE_RO_ATTR(numobd);
-
-static ssize_t activeobd_show(struct kobject *kobj, struct attribute *attr,
- char *buf)
-{
- struct obd_device *dev = container_of(kobj, struct obd_device,
- obd_kobj);
- struct lov_desc *desc;
-
- desc = &dev->u.lov.desc;
- return sprintf(buf, "%u\n", desc->ld_active_tgt_count);
-}
-LUSTRE_RO_ATTR(activeobd);
-
-static int lov_desc_uuid_seq_show(struct seq_file *m, void *v)
-{
- struct obd_device *dev = (struct obd_device *)m->private;
- struct lov_obd *lov;
-
- LASSERT(dev != NULL);
- lov = &dev->u.lov;
- seq_printf(m, "%s\n", lov->desc.ld_uuid.uuid);
- return 0;
-}
-LPROC_SEQ_FOPS_RO(lov_desc_uuid);
-
-static void *lov_tgt_seq_start(struct seq_file *p, loff_t *pos)
-{
- struct obd_device *dev = p->private;
- struct lov_obd *lov = &dev->u.lov;
-
- while (*pos < lov->desc.ld_tgt_count) {
- if (lov->lov_tgts[*pos])
- return lov->lov_tgts[*pos];
- ++*pos;
- }
- return NULL;
-}
-
-static void lov_tgt_seq_stop(struct seq_file *p, void *v)
-{
-}
-
-static void *lov_tgt_seq_next(struct seq_file *p, void *v, loff_t *pos)
-{
- struct obd_device *dev = p->private;
- struct lov_obd *lov = &dev->u.lov;
-
- while (++*pos < lov->desc.ld_tgt_count) {
- if (lov->lov_tgts[*pos])
- return lov->lov_tgts[*pos];
- }
- return NULL;
-}
-
-static int lov_tgt_seq_show(struct seq_file *p, void *v)
-{
- struct lov_tgt_desc *tgt = v;
-
- seq_printf(p, "%d: %s %sACTIVE\n",
- tgt->ltd_index, obd_uuid2str(&tgt->ltd_uuid),
- tgt->ltd_active ? "" : "IN");
- return 0;
-}
-
-static const struct seq_operations lov_tgt_sops = {
- .start = lov_tgt_seq_start,
- .stop = lov_tgt_seq_stop,
- .next = lov_tgt_seq_next,
- .show = lov_tgt_seq_show,
-};
-
-static int lov_target_seq_open(struct inode *inode, struct file *file)
-{
- struct seq_file *seq;
- int rc;
-
- rc = seq_open(file, &lov_tgt_sops);
- if (rc)
- return rc;
-
- seq = file->private_data;
- seq->private = inode->i_private;
- return 0;
-}
-
-static struct lprocfs_vars lprocfs_lov_obd_vars[] = {
- { "stripesize", &lov_stripesize_fops, NULL },
- { "stripeoffset", &lov_stripeoffset_fops, NULL },
- { "stripecount", &lov_stripecount_fops, NULL },
- { "stripetype", &lov_stripetype_fops, NULL },
- /*{ "filegroups", lprocfs_rd_filegroups, NULL, 0 },*/
- { "desc_uuid", &lov_desc_uuid_fops, NULL, 0 },
- { NULL }
-};
-
-static struct attribute *lov_attrs[] = {
- &lustre_attr_activeobd.attr,
- &lustre_attr_numobd.attr,
- NULL,
-};
-
-static struct attribute_group lov_attr_group = {
- .attrs = lov_attrs,
-};
-
-void lprocfs_lov_init_vars(struct lprocfs_static_vars *lvars)
-{
- lvars->sysfs_vars = &lov_attr_group;
- lvars->obd_vars = lprocfs_lov_obd_vars;
-}
-
-const struct file_operations lov_proc_target_fops = {
- .owner = THIS_MODULE,
- .open = lov_target_seq_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = lprocfs_seq_release,
-};
diff --git a/drivers/staging/lustre/lustre/mdc/Makefile b/drivers/staging/lustre/lustre/mdc/Makefile
deleted file mode 100644
index 99ba9ff0d83a..000000000000
--- a/drivers/staging/lustre/lustre/mdc/Makefile
+++ /dev/null
@@ -1,2 +0,0 @@
-obj-$(CONFIG_LUSTRE_FS) += mdc.o
-mdc-y := mdc_request.o mdc_reint.o mdc_lib.o mdc_locks.o lproc_mdc.o
diff --git a/drivers/staging/lustre/lustre/mdc/lproc_mdc.c b/drivers/staging/lustre/lustre/mdc/lproc_mdc.c
deleted file mode 100644
index 1c95f87a0e2a..000000000000
--- a/drivers/staging/lustre/lustre/mdc/lproc_mdc.c
+++ /dev/null
@@ -1,218 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- */
-#define DEBUG_SUBSYSTEM S_CLASS
-
-#include <linux/vfs.h>
-#include "../include/obd_class.h"
-#include "../include/lprocfs_status.h"
-#include "mdc_internal.h"
-
-static ssize_t max_rpcs_in_flight_show(struct kobject *kobj,
- struct attribute *attr,
- char *buf)
-{
- int len;
- struct obd_device *dev = container_of(kobj, struct obd_device,
- obd_kobj);
- struct client_obd *cli = &dev->u.cli;
-
- client_obd_list_lock(&cli->cl_loi_list_lock);
- len = sprintf(buf, "%u\n", cli->cl_max_rpcs_in_flight);
- client_obd_list_unlock(&cli->cl_loi_list_lock);
-
- return len;
-}
-
-static ssize_t max_rpcs_in_flight_store(struct kobject *kobj,
- struct attribute *attr,
- const char *buffer,
- size_t count)
-{
- struct obd_device *dev = container_of(kobj, struct obd_device,
- obd_kobj);
- struct client_obd *cli = &dev->u.cli;
- int rc;
- unsigned long val;
-
- rc = kstrtoul(buffer, 10, &val);
- if (rc)
- return rc;
-
- if (val < 1 || val > MDC_MAX_RIF_MAX)
- return -ERANGE;
-
- client_obd_list_lock(&cli->cl_loi_list_lock);
- cli->cl_max_rpcs_in_flight = val;
- client_obd_list_unlock(&cli->cl_loi_list_lock);
-
- return count;
-}
-LUSTRE_RW_ATTR(max_rpcs_in_flight);
-
-static int mdc_kuc_open(struct inode *inode, struct file *file)
-{
- return single_open(file, NULL, inode->i_private);
-}
-
-/* temporary for testing */
-static ssize_t mdc_kuc_write(struct file *file,
- const char __user *buffer,
- size_t count, loff_t *off)
-{
- struct obd_device *obd =
- ((struct seq_file *)file->private_data)->private;
- struct kuc_hdr *lh;
- struct hsm_action_list *hal;
- struct hsm_action_item *hai;
- int len;
- int fd, rc;
-
- rc = lprocfs_write_helper(buffer, count, &fd);
- if (rc)
- return rc;
-
- if (fd < 0)
- return -ERANGE;
- CWARN("message to fd %d\n", fd);
-
- len = sizeof(*lh) + sizeof(*hal) + MTI_NAME_MAXLEN +
- /* for mockup below */ 2 * cfs_size_round(sizeof(*hai));
-
- lh = kzalloc(len, GFP_NOFS);
- if (!lh)
- return -ENOMEM;
-
- lh->kuc_magic = KUC_MAGIC;
- lh->kuc_transport = KUC_TRANSPORT_HSM;
- lh->kuc_msgtype = HMT_ACTION_LIST;
- lh->kuc_msglen = len;
-
- hal = (struct hsm_action_list *)(lh + 1);
- hal->hal_version = HAL_VERSION;
- hal->hal_archive_id = 1;
- hal->hal_flags = 0;
- obd_uuid2fsname(hal->hal_fsname, obd->obd_name, MTI_NAME_MAXLEN);
-
- /* mock up an action list */
- hal->hal_count = 2;
- hai = hai_zero(hal);
- hai->hai_action = HSMA_ARCHIVE;
- hai->hai_fid.f_oid = 5;
- hai->hai_len = sizeof(*hai);
- hai = hai_next(hai);
- hai->hai_action = HSMA_RESTORE;
- hai->hai_fid.f_oid = 10;
- hai->hai_len = sizeof(*hai);
-
- /* This works for either broadcast or unicast to a single fd */
- if (fd == 0) {
- rc = libcfs_kkuc_group_put(KUC_GRP_HSM, lh);
- } else {
- struct file *fp = fget(fd);
-
- rc = libcfs_kkuc_msg_put(fp, lh);
- fput(fp);
- }
- kfree(lh);
- if (rc < 0)
- return rc;
- return count;
-}
-
-static struct file_operations mdc_kuc_fops = {
- .open = mdc_kuc_open,
- .write = mdc_kuc_write,
- .release = single_release,
-};
-
-LPROC_SEQ_FOPS_WR_ONLY(mdc, ping);
-
-LPROC_SEQ_FOPS_RO_TYPE(mdc, connect_flags);
-LPROC_SEQ_FOPS_RO_TYPE(mdc, server_uuid);
-LPROC_SEQ_FOPS_RO_TYPE(mdc, conn_uuid);
-LPROC_SEQ_FOPS_RO_TYPE(mdc, timeouts);
-LPROC_SEQ_FOPS_RO_TYPE(mdc, state);
-
-/*
- * Note: below sysfs entry is provided, but not currently in use, instead
- * sbi->sb_md_brw_size is used, the per obd variable should be used
- * when DNE is enabled, and dir pages are managed in MDC layer.
- * Don't forget to enable sysfs store function then.
- */
-static ssize_t max_pages_per_rpc_show(struct kobject *kobj,
- struct attribute *attr,
- char *buf)
-{
- struct obd_device *dev = container_of(kobj, struct obd_device,
- obd_kobj);
- struct client_obd *cli = &dev->u.cli;
-
- return sprintf(buf, "%d\n", cli->cl_max_pages_per_rpc);
-}
-LUSTRE_RO_ATTR(max_pages_per_rpc);
-
-LPROC_SEQ_FOPS_RW_TYPE(mdc, import);
-LPROC_SEQ_FOPS_RW_TYPE(mdc, pinger_recov);
-
-static struct lprocfs_vars lprocfs_mdc_obd_vars[] = {
- { "ping", &mdc_ping_fops, NULL, 0222 },
- { "connect_flags", &mdc_connect_flags_fops, NULL, 0 },
- /*{ "filegroups", lprocfs_rd_filegroups, NULL, 0 },*/
- { "mds_server_uuid", &mdc_server_uuid_fops, NULL, 0 },
- { "mds_conn_uuid", &mdc_conn_uuid_fops, NULL, 0 },
- { "timeouts", &mdc_timeouts_fops, NULL, 0 },
- { "import", &mdc_import_fops, NULL, 0 },
- { "state", &mdc_state_fops, NULL, 0 },
- { "hsm_nl", &mdc_kuc_fops, NULL, 0200 },
- { "pinger_recov", &mdc_pinger_recov_fops, NULL, 0 },
- { NULL }
-};
-
-static struct attribute *mdc_attrs[] = {
- &lustre_attr_max_rpcs_in_flight.attr,
- &lustre_attr_max_pages_per_rpc.attr,
- NULL,
-};
-
-static struct attribute_group mdc_attr_group = {
- .attrs = mdc_attrs,
-};
-
-void lprocfs_mdc_init_vars(struct lprocfs_static_vars *lvars)
-{
- lvars->sysfs_vars = &mdc_attr_group;
- lvars->obd_vars = lprocfs_mdc_obd_vars;
-}
diff --git a/drivers/staging/lustre/lustre/mdc/mdc_internal.h b/drivers/staging/lustre/lustre/mdc/mdc_internal.h
deleted file mode 100644
index 29b46f754726..000000000000
--- a/drivers/staging/lustre/lustre/mdc/mdc_internal.h
+++ /dev/null
@@ -1,160 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- */
-
-#ifndef _MDC_INTERNAL_H
-#define _MDC_INTERNAL_H
-
-#include "../include/lustre_mdc.h"
-#include "../include/lustre_mds.h"
-
-void lprocfs_mdc_init_vars(struct lprocfs_static_vars *lvars);
-
-void mdc_pack_body(struct ptlrpc_request *req, const struct lu_fid *fid,
- __u64 valid, int ea_size, __u32 suppgid, int flags);
-int mdc_pack_req(struct ptlrpc_request *req, int version, int opc);
-void mdc_is_subdir_pack(struct ptlrpc_request *req, const struct lu_fid *pfid,
- const struct lu_fid *cfid, int flags);
-void mdc_swap_layouts_pack(struct ptlrpc_request *req,
- struct md_op_data *op_data);
-void mdc_readdir_pack(struct ptlrpc_request *req, __u64 pgoff, __u32 size,
- const struct lu_fid *fid);
-void mdc_getattr_pack(struct ptlrpc_request *req, __u64 valid, int flags,
- struct md_op_data *data, int ea_size);
-void mdc_setattr_pack(struct ptlrpc_request *req, struct md_op_data *op_data,
- void *ea, int ealen, void *ea2, int ea2len);
-void mdc_create_pack(struct ptlrpc_request *req, struct md_op_data *op_data,
- const void *data, int datalen, __u32 mode, __u32 uid,
- __u32 gid, cfs_cap_t capability, __u64 rdev);
-void mdc_open_pack(struct ptlrpc_request *req, struct md_op_data *op_data,
- __u32 mode, __u64 rdev, __u64 flags, const void *data,
- int datalen);
-void mdc_unlink_pack(struct ptlrpc_request *req, struct md_op_data *op_data);
-void mdc_getxattr_pack(struct ptlrpc_request *req, struct md_op_data *op_data);
-void mdc_link_pack(struct ptlrpc_request *req, struct md_op_data *op_data);
-void mdc_rename_pack(struct ptlrpc_request *req, struct md_op_data *op_data,
- const char *old, int oldlen, const char *new, int newlen);
-void mdc_close_pack(struct ptlrpc_request *req, struct md_op_data *op_data);
-int mdc_enter_request(struct client_obd *cli);
-void mdc_exit_request(struct client_obd *cli);
-
-/* mdc/mdc_locks.c */
-int mdc_set_lock_data(struct obd_export *exp,
- __u64 *lockh, void *data, __u64 *bits);
-
-int mdc_null_inode(struct obd_export *exp, const struct lu_fid *fid);
-
-int mdc_find_cbdata(struct obd_export *exp, const struct lu_fid *fid,
- ldlm_iterator_t it, void *data);
-
-int mdc_intent_lock(struct obd_export *exp,
- struct md_op_data *,
- void *lmm, int lmmsize,
- struct lookup_intent *, int,
- struct ptlrpc_request **reqp,
- ldlm_blocking_callback cb_blocking,
- __u64 extra_lock_flags);
-int mdc_enqueue(struct obd_export *exp, struct ldlm_enqueue_info *einfo,
- struct lookup_intent *it, struct md_op_data *op_data,
- struct lustre_handle *lockh, void *lmm, int lmmsize,
- struct ptlrpc_request **req, __u64 extra_lock_flags);
-
-int mdc_resource_get_unused(struct obd_export *exp, const struct lu_fid *fid,
- struct list_head *cancels, ldlm_mode_t mode,
- __u64 bits);
-/* mdc/mdc_request.c */
-int mdc_fid_alloc(struct obd_export *exp, struct lu_fid *fid,
- struct md_op_data *op_data);
-
-int mdc_open(struct obd_export *exp, u64 ino, int type, int flags,
- struct lov_mds_md *lmm, int lmm_size, struct lustre_handle *fh,
- struct ptlrpc_request **);
-
-struct obd_client_handle;
-
-int mdc_get_lustre_md(struct obd_export *md_exp, struct ptlrpc_request *req,
- struct obd_export *dt_exp, struct obd_export *lmv_exp,
- struct lustre_md *md);
-
-int mdc_free_lustre_md(struct obd_export *exp, struct lustre_md *md);
-
-int mdc_set_open_replay_data(struct obd_export *exp,
- struct obd_client_handle *och,
- struct lookup_intent *it);
-
-int mdc_clear_open_replay_data(struct obd_export *exp,
- struct obd_client_handle *och);
-void mdc_commit_open(struct ptlrpc_request *req);
-void mdc_replay_open(struct ptlrpc_request *req);
-
-int mdc_create(struct obd_export *exp, struct md_op_data *op_data,
- const void *data, int datalen, int mode, __u32 uid, __u32 gid,
- cfs_cap_t capability, __u64 rdev,
- struct ptlrpc_request **request);
-int mdc_link(struct obd_export *exp, struct md_op_data *op_data,
- struct ptlrpc_request **request);
-int mdc_rename(struct obd_export *exp, struct md_op_data *op_data,
- const char *old, int oldlen, const char *new, int newlen,
- struct ptlrpc_request **request);
-int mdc_setattr(struct obd_export *exp, struct md_op_data *op_data,
- void *ea, int ealen, void *ea2, int ea2len,
- struct ptlrpc_request **request, struct md_open_data **mod);
-int mdc_unlink(struct obd_export *exp, struct md_op_data *op_data,
- struct ptlrpc_request **request);
-int mdc_cancel_unused(struct obd_export *exp, const struct lu_fid *fid,
- ldlm_policy_data_t *policy, ldlm_mode_t mode,
- ldlm_cancel_flags_t flags, void *opaque);
-
-int mdc_revalidate_lock(struct obd_export *exp, struct lookup_intent *it,
- struct lu_fid *fid, __u64 *bits);
-
-int mdc_intent_getattr_async(struct obd_export *exp,
- struct md_enqueue_info *minfo,
- struct ldlm_enqueue_info *einfo);
-
-ldlm_mode_t mdc_lock_match(struct obd_export *exp, __u64 flags,
- const struct lu_fid *fid, ldlm_type_t type,
- ldlm_policy_data_t *policy, ldlm_mode_t mode,
- struct lustre_handle *lockh);
-
-static inline int mdc_prep_elc_req(struct obd_export *exp,
- struct ptlrpc_request *req, int opc,
- struct list_head *cancels, int count)
-{
- return ldlm_prep_elc_req(exp, req, LUSTRE_MDS_VERSION, opc, 0, cancels,
- count);
-}
-
-#endif
diff --git a/drivers/staging/lustre/lustre/mdc/mdc_lib.c b/drivers/staging/lustre/lustre/mdc/mdc_lib.c
deleted file mode 100644
index ce0bcdfacb0e..000000000000
--- a/drivers/staging/lustre/lustre/mdc/mdc_lib.c
+++ /dev/null
@@ -1,549 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- */
-
-#define DEBUG_SUBSYSTEM S_MDC
-#include "../include/lustre_net.h"
-#include "../include/lustre/lustre_idl.h"
-#include "mdc_internal.h"
-
-
-static void __mdc_pack_body(struct mdt_body *b, __u32 suppgid)
-{
- LASSERT(b != NULL);
-
- b->suppgid = suppgid;
- b->uid = from_kuid(&init_user_ns, current_uid());
- b->gid = from_kgid(&init_user_ns, current_gid());
- b->fsuid = from_kuid(&init_user_ns, current_fsuid());
- b->fsgid = from_kgid(&init_user_ns, current_fsgid());
- b->capability = cfs_curproc_cap_pack();
-}
-
-void mdc_is_subdir_pack(struct ptlrpc_request *req, const struct lu_fid *pfid,
- const struct lu_fid *cfid, int flags)
-{
- struct mdt_body *b = req_capsule_client_get(&req->rq_pill,
- &RMF_MDT_BODY);
-
- if (pfid) {
- b->fid1 = *pfid;
- b->valid = OBD_MD_FLID;
- }
- if (cfid)
- b->fid2 = *cfid;
- b->flags = flags;
-}
-
-void mdc_swap_layouts_pack(struct ptlrpc_request *req,
- struct md_op_data *op_data)
-{
- struct mdt_body *b = req_capsule_client_get(&req->rq_pill,
- &RMF_MDT_BODY);
-
- __mdc_pack_body(b, op_data->op_suppgids[0]);
- b->fid1 = op_data->op_fid1;
- b->fid2 = op_data->op_fid2;
- b->valid |= OBD_MD_FLID;
-}
-
-void mdc_pack_body(struct ptlrpc_request *req, const struct lu_fid *fid,
- __u64 valid, int ea_size, __u32 suppgid, int flags)
-{
- struct mdt_body *b = req_capsule_client_get(&req->rq_pill,
- &RMF_MDT_BODY);
- LASSERT(b != NULL);
- b->valid = valid;
- b->eadatasize = ea_size;
- b->flags = flags;
- __mdc_pack_body(b, suppgid);
- if (fid) {
- b->fid1 = *fid;
- b->valid |= OBD_MD_FLID;
- }
-}
-
-void mdc_readdir_pack(struct ptlrpc_request *req, __u64 pgoff,
- __u32 size, const struct lu_fid *fid)
-{
- struct mdt_body *b = req_capsule_client_get(&req->rq_pill,
- &RMF_MDT_BODY);
- b->fid1 = *fid;
- b->valid |= OBD_MD_FLID;
- b->size = pgoff; /* !! */
- b->nlink = size; /* !! */
- __mdc_pack_body(b, -1);
- b->mode = LUDA_FID | LUDA_TYPE;
-}
-
-/* packing of MDS records */
-void mdc_create_pack(struct ptlrpc_request *req, struct md_op_data *op_data,
- const void *data, int datalen, __u32 mode,
- __u32 uid, __u32 gid, cfs_cap_t cap_effective, __u64 rdev)
-{
- struct mdt_rec_create *rec;
- char *tmp;
- __u64 flags;
-
- CLASSERT(sizeof(struct mdt_rec_reint) == sizeof(struct mdt_rec_create));
- rec = req_capsule_client_get(&req->rq_pill, &RMF_REC_REINT);
-
-
- rec->cr_opcode = REINT_CREATE;
- rec->cr_fsuid = uid;
- rec->cr_fsgid = gid;
- rec->cr_cap = cap_effective;
- rec->cr_fid1 = op_data->op_fid1;
- rec->cr_fid2 = op_data->op_fid2;
- rec->cr_mode = mode;
- rec->cr_rdev = rdev;
- rec->cr_time = op_data->op_mod_time;
- rec->cr_suppgid1 = op_data->op_suppgids[0];
- rec->cr_suppgid2 = op_data->op_suppgids[1];
- flags = op_data->op_flags & MF_SOM_LOCAL_FLAGS;
- if (op_data->op_bias & MDS_CREATE_VOLATILE)
- flags |= MDS_OPEN_VOLATILE;
- set_mrc_cr_flags(rec, flags);
- rec->cr_bias = op_data->op_bias;
- rec->cr_umask = current_umask();
-
- tmp = req_capsule_client_get(&req->rq_pill, &RMF_NAME);
- LOGL0(op_data->op_name, op_data->op_namelen, tmp);
-
- if (data) {
- tmp = req_capsule_client_get(&req->rq_pill, &RMF_EADATA);
- memcpy(tmp, data, datalen);
- }
-}
-
-static __u64 mds_pack_open_flags(__u64 flags, __u32 mode)
-{
- __u64 cr_flags = (flags & (FMODE_READ | FMODE_WRITE |
- MDS_OPEN_HAS_EA | MDS_OPEN_HAS_OBJS |
- MDS_OPEN_OWNEROVERRIDE | MDS_OPEN_LOCK |
- MDS_OPEN_BY_FID | MDS_OPEN_LEASE |
- MDS_OPEN_RELEASE));
- if (flags & O_CREAT)
- cr_flags |= MDS_OPEN_CREAT;
- if (flags & O_EXCL)
- cr_flags |= MDS_OPEN_EXCL;
- if (flags & O_TRUNC)
- cr_flags |= MDS_OPEN_TRUNC;
- if (flags & O_APPEND)
- cr_flags |= MDS_OPEN_APPEND;
- if (flags & O_SYNC)
- cr_flags |= MDS_OPEN_SYNC;
- if (flags & O_DIRECTORY)
- cr_flags |= MDS_OPEN_DIRECTORY;
- if (flags & __FMODE_EXEC)
- cr_flags |= MDS_FMODE_EXEC;
- if (cl_is_lov_delay_create(flags))
- cr_flags |= MDS_OPEN_DELAY_CREATE;
-
- if (flags & O_NONBLOCK)
- cr_flags |= MDS_OPEN_NORESTORE;
-
- return cr_flags;
-}
-
-/* packing of MDS records */
-void mdc_open_pack(struct ptlrpc_request *req, struct md_op_data *op_data,
- __u32 mode, __u64 rdev, __u64 flags, const void *lmm,
- int lmmlen)
-{
- struct mdt_rec_create *rec;
- char *tmp;
- __u64 cr_flags;
-
- CLASSERT(sizeof(struct mdt_rec_reint) == sizeof(struct mdt_rec_create));
- rec = req_capsule_client_get(&req->rq_pill, &RMF_REC_REINT);
-
- /* XXX do something about time, uid, gid */
- rec->cr_opcode = REINT_OPEN;
- rec->cr_fsuid = from_kuid(&init_user_ns, current_fsuid());
- rec->cr_fsgid = from_kgid(&init_user_ns, current_fsgid());
- rec->cr_cap = cfs_curproc_cap_pack();
- rec->cr_fid1 = op_data->op_fid1;
- rec->cr_fid2 = op_data->op_fid2;
-
- rec->cr_mode = mode;
- cr_flags = mds_pack_open_flags(flags, mode);
- rec->cr_rdev = rdev;
- rec->cr_time = op_data->op_mod_time;
- rec->cr_suppgid1 = op_data->op_suppgids[0];
- rec->cr_suppgid2 = op_data->op_suppgids[1];
- rec->cr_bias = op_data->op_bias;
- rec->cr_umask = current_umask();
- rec->cr_old_handle = op_data->op_handle;
-
- if (op_data->op_name) {
- tmp = req_capsule_client_get(&req->rq_pill, &RMF_NAME);
- LOGL0(op_data->op_name, op_data->op_namelen, tmp);
- if (op_data->op_bias & MDS_CREATE_VOLATILE)
- cr_flags |= MDS_OPEN_VOLATILE;
- }
-
- if (lmm) {
- cr_flags |= MDS_OPEN_HAS_EA;
- tmp = req_capsule_client_get(&req->rq_pill, &RMF_EADATA);
- memcpy(tmp, lmm, lmmlen);
- }
- set_mrc_cr_flags(rec, cr_flags);
-}
-
-static inline __u64 attr_pack(unsigned int ia_valid)
-{
- __u64 sa_valid = 0;
-
- if (ia_valid & ATTR_MODE)
- sa_valid |= MDS_ATTR_MODE;
- if (ia_valid & ATTR_UID)
- sa_valid |= MDS_ATTR_UID;
- if (ia_valid & ATTR_GID)
- sa_valid |= MDS_ATTR_GID;
- if (ia_valid & ATTR_SIZE)
- sa_valid |= MDS_ATTR_SIZE;
- if (ia_valid & ATTR_ATIME)
- sa_valid |= MDS_ATTR_ATIME;
- if (ia_valid & ATTR_MTIME)
- sa_valid |= MDS_ATTR_MTIME;
- if (ia_valid & ATTR_CTIME)
- sa_valid |= MDS_ATTR_CTIME;
- if (ia_valid & ATTR_ATIME_SET)
- sa_valid |= MDS_ATTR_ATIME_SET;
- if (ia_valid & ATTR_MTIME_SET)
- sa_valid |= MDS_ATTR_MTIME_SET;
- if (ia_valid & ATTR_FORCE)
- sa_valid |= MDS_ATTR_FORCE;
- if (ia_valid & ATTR_ATTR_FLAG)
- sa_valid |= MDS_ATTR_ATTR_FLAG;
- if (ia_valid & ATTR_KILL_SUID)
- sa_valid |= MDS_ATTR_KILL_SUID;
- if (ia_valid & ATTR_KILL_SGID)
- sa_valid |= MDS_ATTR_KILL_SGID;
- if (ia_valid & ATTR_CTIME_SET)
- sa_valid |= MDS_ATTR_CTIME_SET;
- if (ia_valid & ATTR_OPEN)
- sa_valid |= MDS_ATTR_FROM_OPEN;
- if (ia_valid & ATTR_BLOCKS)
- sa_valid |= MDS_ATTR_BLOCKS;
- if (ia_valid & MDS_OPEN_OWNEROVERRIDE)
- /* NFSD hack (see bug 5781) */
- sa_valid |= MDS_OPEN_OWNEROVERRIDE;
- return sa_valid;
-}
-
-static void mdc_setattr_pack_rec(struct mdt_rec_setattr *rec,
- struct md_op_data *op_data)
-{
- rec->sa_opcode = REINT_SETATTR;
- rec->sa_fsuid = from_kuid(&init_user_ns, current_fsuid());
- rec->sa_fsgid = from_kgid(&init_user_ns, current_fsgid());
- rec->sa_cap = cfs_curproc_cap_pack();
- rec->sa_suppgid = -1;
-
- rec->sa_fid = op_data->op_fid1;
- rec->sa_valid = attr_pack(op_data->op_attr.ia_valid);
- rec->sa_mode = op_data->op_attr.ia_mode;
- rec->sa_uid = from_kuid(&init_user_ns, op_data->op_attr.ia_uid);
- rec->sa_gid = from_kgid(&init_user_ns, op_data->op_attr.ia_gid);
- rec->sa_size = op_data->op_attr.ia_size;
- rec->sa_blocks = op_data->op_attr_blocks;
- rec->sa_atime = LTIME_S(op_data->op_attr.ia_atime);
- rec->sa_mtime = LTIME_S(op_data->op_attr.ia_mtime);
- rec->sa_ctime = LTIME_S(op_data->op_attr.ia_ctime);
- rec->sa_attr_flags =
- ((struct ll_iattr *)&op_data->op_attr)->ia_attr_flags;
- if ((op_data->op_attr.ia_valid & ATTR_GID) &&
- in_group_p(op_data->op_attr.ia_gid))
- rec->sa_suppgid =
- from_kgid(&init_user_ns, op_data->op_attr.ia_gid);
- else
- rec->sa_suppgid = op_data->op_suppgids[0];
-
- rec->sa_bias = op_data->op_bias;
-}
-
-static void mdc_ioepoch_pack(struct mdt_ioepoch *epoch,
- struct md_op_data *op_data)
-{
- memcpy(&epoch->handle, &op_data->op_handle, sizeof(epoch->handle));
- epoch->ioepoch = op_data->op_ioepoch;
- epoch->flags = op_data->op_flags & MF_SOM_LOCAL_FLAGS;
-}
-
-void mdc_setattr_pack(struct ptlrpc_request *req, struct md_op_data *op_data,
- void *ea, int ealen, void *ea2, int ea2len)
-{
- struct mdt_rec_setattr *rec;
- struct mdt_ioepoch *epoch;
- struct lov_user_md *lum = NULL;
-
- CLASSERT(sizeof(struct mdt_rec_reint) ==
- sizeof(struct mdt_rec_setattr));
- rec = req_capsule_client_get(&req->rq_pill, &RMF_REC_REINT);
- mdc_setattr_pack_rec(rec, op_data);
-
- if (op_data->op_flags & (MF_SOM_CHANGE | MF_EPOCH_OPEN)) {
- epoch = req_capsule_client_get(&req->rq_pill, &RMF_MDT_EPOCH);
- mdc_ioepoch_pack(epoch, op_data);
- }
-
- if (ealen == 0)
- return;
-
- lum = req_capsule_client_get(&req->rq_pill, &RMF_EADATA);
- if (ea == NULL) { /* Remove LOV EA */
- lum->lmm_magic = LOV_USER_MAGIC_V1;
- lum->lmm_stripe_size = 0;
- lum->lmm_stripe_count = 0;
- lum->lmm_stripe_offset = (typeof(lum->lmm_stripe_offset))(-1);
- } else {
- memcpy(lum, ea, ealen);
- }
-
- if (ea2len == 0)
- return;
-
- memcpy(req_capsule_client_get(&req->rq_pill, &RMF_LOGCOOKIES), ea2,
- ea2len);
-}
-
-void mdc_unlink_pack(struct ptlrpc_request *req, struct md_op_data *op_data)
-{
- struct mdt_rec_unlink *rec;
- char *tmp;
-
- CLASSERT(sizeof(struct mdt_rec_reint) == sizeof(struct mdt_rec_unlink));
- rec = req_capsule_client_get(&req->rq_pill, &RMF_REC_REINT);
- LASSERT(rec != NULL);
-
- rec->ul_opcode = op_data->op_cli_flags & CLI_RM_ENTRY ?
- REINT_RMENTRY : REINT_UNLINK;
- rec->ul_fsuid = op_data->op_fsuid;
- rec->ul_fsgid = op_data->op_fsgid;
- rec->ul_cap = op_data->op_cap;
- rec->ul_mode = op_data->op_mode;
- rec->ul_suppgid1 = op_data->op_suppgids[0];
- rec->ul_suppgid2 = -1;
- rec->ul_fid1 = op_data->op_fid1;
- rec->ul_fid2 = op_data->op_fid2;
- rec->ul_time = op_data->op_mod_time;
- rec->ul_bias = op_data->op_bias;
-
- tmp = req_capsule_client_get(&req->rq_pill, &RMF_NAME);
- LASSERT(tmp != NULL);
- LOGL0(op_data->op_name, op_data->op_namelen, tmp);
-}
-
-void mdc_link_pack(struct ptlrpc_request *req, struct md_op_data *op_data)
-{
- struct mdt_rec_link *rec;
- char *tmp;
-
- CLASSERT(sizeof(struct mdt_rec_reint) == sizeof(struct mdt_rec_link));
- rec = req_capsule_client_get(&req->rq_pill, &RMF_REC_REINT);
- LASSERT(rec != NULL);
-
- rec->lk_opcode = REINT_LINK;
- rec->lk_fsuid = op_data->op_fsuid; /* current->fsuid; */
- rec->lk_fsgid = op_data->op_fsgid; /* current->fsgid; */
- rec->lk_cap = op_data->op_cap; /* current->cap_effective; */
- rec->lk_suppgid1 = op_data->op_suppgids[0];
- rec->lk_suppgid2 = op_data->op_suppgids[1];
- rec->lk_fid1 = op_data->op_fid1;
- rec->lk_fid2 = op_data->op_fid2;
- rec->lk_time = op_data->op_mod_time;
- rec->lk_bias = op_data->op_bias;
-
- tmp = req_capsule_client_get(&req->rq_pill, &RMF_NAME);
- LOGL0(op_data->op_name, op_data->op_namelen, tmp);
-}
-
-void mdc_rename_pack(struct ptlrpc_request *req, struct md_op_data *op_data,
- const char *old, int oldlen, const char *new, int newlen)
-{
- struct mdt_rec_rename *rec;
- char *tmp;
-
- CLASSERT(sizeof(struct mdt_rec_reint) == sizeof(struct mdt_rec_rename));
- rec = req_capsule_client_get(&req->rq_pill, &RMF_REC_REINT);
-
- /* XXX do something about time, uid, gid */
- rec->rn_opcode = REINT_RENAME;
- rec->rn_fsuid = op_data->op_fsuid;
- rec->rn_fsgid = op_data->op_fsgid;
- rec->rn_cap = op_data->op_cap;
- rec->rn_suppgid1 = op_data->op_suppgids[0];
- rec->rn_suppgid2 = op_data->op_suppgids[1];
- rec->rn_fid1 = op_data->op_fid1;
- rec->rn_fid2 = op_data->op_fid2;
- rec->rn_time = op_data->op_mod_time;
- rec->rn_mode = op_data->op_mode;
- rec->rn_bias = op_data->op_bias;
-
- tmp = req_capsule_client_get(&req->rq_pill, &RMF_NAME);
- LOGL0(old, oldlen, tmp);
-
- if (new) {
- tmp = req_capsule_client_get(&req->rq_pill, &RMF_SYMTGT);
- LOGL0(new, newlen, tmp);
- }
-}
-
-void mdc_getattr_pack(struct ptlrpc_request *req, __u64 valid, int flags,
- struct md_op_data *op_data, int ea_size)
-{
- struct mdt_body *b = req_capsule_client_get(&req->rq_pill,
- &RMF_MDT_BODY);
-
- b->valid = valid;
- if (op_data->op_bias & MDS_CHECK_SPLIT)
- b->valid |= OBD_MD_FLCKSPLIT;
- if (op_data->op_bias & MDS_CROSS_REF)
- b->valid |= OBD_MD_FLCROSSREF;
- b->eadatasize = ea_size;
- b->flags = flags;
- __mdc_pack_body(b, op_data->op_suppgids[0]);
-
- b->fid1 = op_data->op_fid1;
- b->fid2 = op_data->op_fid2;
- b->valid |= OBD_MD_FLID;
-
- if (op_data->op_name) {
- char *tmp = req_capsule_client_get(&req->rq_pill, &RMF_NAME);
-
- LOGL0(op_data->op_name, op_data->op_namelen, tmp);
-
- }
-}
-
-static void mdc_hsm_release_pack(struct ptlrpc_request *req,
- struct md_op_data *op_data)
-{
- if (op_data->op_bias & MDS_HSM_RELEASE) {
- struct close_data *data;
- struct ldlm_lock *lock;
-
- data = req_capsule_client_get(&req->rq_pill, &RMF_CLOSE_DATA);
- LASSERT(data != NULL);
-
- lock = ldlm_handle2lock(&op_data->op_lease_handle);
- if (lock != NULL) {
- data->cd_handle = lock->l_remote_handle;
- ldlm_lock_put(lock);
- }
- ldlm_cli_cancel(&op_data->op_lease_handle, LCF_LOCAL);
-
- data->cd_data_version = op_data->op_data_version;
- data->cd_fid = op_data->op_fid2;
- }
-}
-
-void mdc_close_pack(struct ptlrpc_request *req, struct md_op_data *op_data)
-{
- struct mdt_ioepoch *epoch;
- struct mdt_rec_setattr *rec;
-
- epoch = req_capsule_client_get(&req->rq_pill, &RMF_MDT_EPOCH);
- rec = req_capsule_client_get(&req->rq_pill, &RMF_REC_REINT);
-
- mdc_setattr_pack_rec(rec, op_data);
- mdc_ioepoch_pack(epoch, op_data);
- mdc_hsm_release_pack(req, op_data);
-}
-
-static int mdc_req_avail(struct client_obd *cli, struct mdc_cache_waiter *mcw)
-{
- int rc;
-
- client_obd_list_lock(&cli->cl_loi_list_lock);
- rc = list_empty(&mcw->mcw_entry);
- client_obd_list_unlock(&cli->cl_loi_list_lock);
- return rc;
-};
-
-/* We record requests in flight in cli->cl_r_in_flight here.
- * There is only one write rpc possible in mdc anyway. If this to change
- * in the future - the code may need to be revisited. */
-int mdc_enter_request(struct client_obd *cli)
-{
- int rc = 0;
- struct mdc_cache_waiter mcw;
- struct l_wait_info lwi = LWI_INTR(LWI_ON_SIGNAL_NOOP, NULL);
-
- client_obd_list_lock(&cli->cl_loi_list_lock);
- if (cli->cl_r_in_flight >= cli->cl_max_rpcs_in_flight) {
- list_add_tail(&mcw.mcw_entry, &cli->cl_cache_waiters);
- init_waitqueue_head(&mcw.mcw_waitq);
- client_obd_list_unlock(&cli->cl_loi_list_lock);
- rc = l_wait_event(mcw.mcw_waitq, mdc_req_avail(cli, &mcw),
- &lwi);
- if (rc) {
- client_obd_list_lock(&cli->cl_loi_list_lock);
- if (list_empty(&mcw.mcw_entry))
- cli->cl_r_in_flight--;
- list_del_init(&mcw.mcw_entry);
- client_obd_list_unlock(&cli->cl_loi_list_lock);
- }
- } else {
- cli->cl_r_in_flight++;
- client_obd_list_unlock(&cli->cl_loi_list_lock);
- }
- return rc;
-}
-
-void mdc_exit_request(struct client_obd *cli)
-{
- struct list_head *l, *tmp;
- struct mdc_cache_waiter *mcw;
-
- client_obd_list_lock(&cli->cl_loi_list_lock);
- cli->cl_r_in_flight--;
- list_for_each_safe(l, tmp, &cli->cl_cache_waiters) {
- if (cli->cl_r_in_flight >= cli->cl_max_rpcs_in_flight) {
- /* No free request slots anymore */
- break;
- }
-
- mcw = list_entry(l, struct mdc_cache_waiter, mcw_entry);
- list_del_init(&mcw->mcw_entry);
- cli->cl_r_in_flight++;
- wake_up(&mcw->mcw_waitq);
- }
- /* Empty waiting list? Decrease reqs in-flight number */
-
- client_obd_list_unlock(&cli->cl_loi_list_lock);
-}
diff --git a/drivers/staging/lustre/lustre/mdc/mdc_locks.c b/drivers/staging/lustre/lustre/mdc/mdc_locks.c
deleted file mode 100644
index 5c34130ab69a..000000000000
--- a/drivers/staging/lustre/lustre/mdc/mdc_locks.c
+++ /dev/null
@@ -1,1282 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- */
-
-#define DEBUG_SUBSYSTEM S_MDC
-
-# include <linux/module.h>
-
-#include "../include/lustre_intent.h"
-#include "../include/obd.h"
-#include "../include/obd_class.h"
-#include "../include/lustre_dlm.h"
-#include "../include/lustre_fid.h" /* fid_res_name_eq() */
-#include "../include/lustre_mdc.h"
-#include "../include/lustre_net.h"
-#include "../include/lustre_req_layout.h"
-#include "mdc_internal.h"
-
-struct mdc_getattr_args {
- struct obd_export *ga_exp;
- struct md_enqueue_info *ga_minfo;
- struct ldlm_enqueue_info *ga_einfo;
-};
-
-int it_disposition(struct lookup_intent *it, int flag)
-{
- return it->d.lustre.it_disposition & flag;
-}
-EXPORT_SYMBOL(it_disposition);
-
-void it_set_disposition(struct lookup_intent *it, int flag)
-{
- it->d.lustre.it_disposition |= flag;
-}
-EXPORT_SYMBOL(it_set_disposition);
-
-void it_clear_disposition(struct lookup_intent *it, int flag)
-{
- it->d.lustre.it_disposition &= ~flag;
-}
-EXPORT_SYMBOL(it_clear_disposition);
-
-int it_open_error(int phase, struct lookup_intent *it)
-{
- if (it_disposition(it, DISP_OPEN_LEASE)) {
- if (phase >= DISP_OPEN_LEASE)
- return it->d.lustre.it_status;
- else
- return 0;
- }
- if (it_disposition(it, DISP_OPEN_OPEN)) {
- if (phase >= DISP_OPEN_OPEN)
- return it->d.lustre.it_status;
- else
- return 0;
- }
-
- if (it_disposition(it, DISP_OPEN_CREATE)) {
- if (phase >= DISP_OPEN_CREATE)
- return it->d.lustre.it_status;
- else
- return 0;
- }
-
- if (it_disposition(it, DISP_LOOKUP_EXECD)) {
- if (phase >= DISP_LOOKUP_EXECD)
- return it->d.lustre.it_status;
- else
- return 0;
- }
-
- if (it_disposition(it, DISP_IT_EXECD)) {
- if (phase >= DISP_IT_EXECD)
- return it->d.lustre.it_status;
- else
- return 0;
- }
- CERROR("it disp: %X, status: %d\n", it->d.lustre.it_disposition,
- it->d.lustre.it_status);
- LBUG();
- return 0;
-}
-EXPORT_SYMBOL(it_open_error);
-
-/* this must be called on a lockh that is known to have a referenced lock */
-int mdc_set_lock_data(struct obd_export *exp, __u64 *lockh, void *data,
- __u64 *bits)
-{
- struct ldlm_lock *lock;
- struct inode *new_inode = data;
-
- if (bits)
- *bits = 0;
-
- if (!*lockh)
- return 0;
-
- lock = ldlm_handle2lock((struct lustre_handle *)lockh);
-
- LASSERT(lock != NULL);
- lock_res_and_lock(lock);
- if (lock->l_resource->lr_lvb_inode &&
- lock->l_resource->lr_lvb_inode != data) {
- struct inode *old_inode = lock->l_resource->lr_lvb_inode;
-
- LASSERTF(old_inode->i_state & I_FREEING,
- "Found existing inode %p/%lu/%u state %lu in lock: setting data to %p/%lu/%u\n",
- old_inode, old_inode->i_ino, old_inode->i_generation,
- old_inode->i_state, new_inode, new_inode->i_ino,
- new_inode->i_generation);
- }
- lock->l_resource->lr_lvb_inode = new_inode;
- if (bits)
- *bits = lock->l_policy_data.l_inodebits.bits;
-
- unlock_res_and_lock(lock);
- LDLM_LOCK_PUT(lock);
-
- return 0;
-}
-
-ldlm_mode_t mdc_lock_match(struct obd_export *exp, __u64 flags,
- const struct lu_fid *fid, ldlm_type_t type,
- ldlm_policy_data_t *policy, ldlm_mode_t mode,
- struct lustre_handle *lockh)
-{
- struct ldlm_res_id res_id;
- ldlm_mode_t rc;
-
- fid_build_reg_res_name(fid, &res_id);
- /* LU-4405: Clear bits not supported by server */
- policy->l_inodebits.bits &= exp_connect_ibits(exp);
- rc = ldlm_lock_match(class_exp2obd(exp)->obd_namespace, flags,
- &res_id, type, policy, mode, lockh, 0);
- return rc;
-}
-
-int mdc_cancel_unused(struct obd_export *exp,
- const struct lu_fid *fid,
- ldlm_policy_data_t *policy,
- ldlm_mode_t mode,
- ldlm_cancel_flags_t flags,
- void *opaque)
-{
- struct ldlm_res_id res_id;
- struct obd_device *obd = class_exp2obd(exp);
- int rc;
-
- fid_build_reg_res_name(fid, &res_id);
- rc = ldlm_cli_cancel_unused_resource(obd->obd_namespace, &res_id,
- policy, mode, flags, opaque);
- return rc;
-}
-
-int mdc_null_inode(struct obd_export *exp,
- const struct lu_fid *fid)
-{
- struct ldlm_res_id res_id;
- struct ldlm_resource *res;
- struct ldlm_namespace *ns = class_exp2obd(exp)->obd_namespace;
-
- LASSERTF(ns != NULL, "no namespace passed\n");
-
- fid_build_reg_res_name(fid, &res_id);
-
- res = ldlm_resource_get(ns, NULL, &res_id, 0, 0);
- if (res == NULL)
- return 0;
-
- lock_res(res);
- res->lr_lvb_inode = NULL;
- unlock_res(res);
-
- ldlm_resource_putref(res);
- return 0;
-}
-
-/* find any ldlm lock of the inode in mdc
- * return 0 not find
- * 1 find one
- * < 0 error */
-int mdc_find_cbdata(struct obd_export *exp,
- const struct lu_fid *fid,
- ldlm_iterator_t it, void *data)
-{
- struct ldlm_res_id res_id;
- int rc = 0;
-
- fid_build_reg_res_name((struct lu_fid *)fid, &res_id);
- rc = ldlm_resource_iterate(class_exp2obd(exp)->obd_namespace, &res_id,
- it, data);
- if (rc == LDLM_ITER_STOP)
- return 1;
- else if (rc == LDLM_ITER_CONTINUE)
- return 0;
- return rc;
-}
-
-static inline void mdc_clear_replay_flag(struct ptlrpc_request *req, int rc)
-{
- /* Don't hold error requests for replay. */
- if (req->rq_replay) {
- spin_lock(&req->rq_lock);
- req->rq_replay = 0;
- spin_unlock(&req->rq_lock);
- }
- if (rc && req->rq_transno != 0) {
- DEBUG_REQ(D_ERROR, req, "transno returned on error rc %d", rc);
- LBUG();
- }
-}
-
-/* Save a large LOV EA into the request buffer so that it is available
- * for replay. We don't do this in the initial request because the
- * original request doesn't need this buffer (at most it sends just the
- * lov_mds_md) and it is a waste of RAM/bandwidth to send the empty
- * buffer and may also be difficult to allocate and save a very large
- * request buffer for each open. (bug 5707)
- *
- * OOM here may cause recovery failure if lmm is needed (only for the
- * original open if the MDS crashed just when this client also OOM'd)
- * but this is incredibly unlikely, and questionable whether the client
- * could do MDS recovery under OOM anyways... */
-static void mdc_realloc_openmsg(struct ptlrpc_request *req,
- struct mdt_body *body)
-{
- int rc;
-
- /* FIXME: remove this explicit offset. */
- rc = sptlrpc_cli_enlarge_reqbuf(req, DLM_INTENT_REC_OFF + 4,
- body->eadatasize);
- if (rc) {
- CERROR("Can't enlarge segment %d size to %d\n",
- DLM_INTENT_REC_OFF + 4, body->eadatasize);
- body->valid &= ~OBD_MD_FLEASIZE;
- body->eadatasize = 0;
- }
-}
-
-static struct ptlrpc_request *mdc_intent_open_pack(struct obd_export *exp,
- struct lookup_intent *it,
- struct md_op_data *op_data,
- void *lmm, int lmmsize,
- void *cb_data)
-{
- struct ptlrpc_request *req;
- struct obd_device *obddev = class_exp2obd(exp);
- struct ldlm_intent *lit;
- LIST_HEAD(cancels);
- int count = 0;
- int mode;
- int rc;
-
- it->it_create_mode = (it->it_create_mode & ~S_IFMT) | S_IFREG;
-
- /* XXX: openlock is not cancelled for cross-refs. */
- /* If inode is known, cancel conflicting OPEN locks. */
- if (fid_is_sane(&op_data->op_fid2)) {
- if (it->it_flags & MDS_OPEN_LEASE) { /* try to get lease */
- if (it->it_flags & FMODE_WRITE)
- mode = LCK_EX;
- else
- mode = LCK_PR;
- } else {
- if (it->it_flags & (FMODE_WRITE|MDS_OPEN_TRUNC))
- mode = LCK_CW;
- else if (it->it_flags & __FMODE_EXEC)
- mode = LCK_PR;
- else
- mode = LCK_CR;
- }
- count = mdc_resource_get_unused(exp, &op_data->op_fid2,
- &cancels, mode,
- MDS_INODELOCK_OPEN);
- }
-
- /* If CREATE, cancel parent's UPDATE lock. */
- if (it->it_op & IT_CREAT)
- mode = LCK_EX;
- else
- mode = LCK_CR;
- count += mdc_resource_get_unused(exp, &op_data->op_fid1,
- &cancels, mode,
- MDS_INODELOCK_UPDATE);
-
- req = ptlrpc_request_alloc(class_exp2cliimp(exp),
- &RQF_LDLM_INTENT_OPEN);
- if (req == NULL) {
- ldlm_lock_list_put(&cancels, l_bl_ast, count);
- return ERR_PTR(-ENOMEM);
- }
-
- req_capsule_set_size(&req->rq_pill, &RMF_NAME, RCL_CLIENT,
- op_data->op_namelen + 1);
- req_capsule_set_size(&req->rq_pill, &RMF_EADATA, RCL_CLIENT,
- max(lmmsize, obddev->u.cli.cl_default_mds_easize));
-
- rc = ldlm_prep_enqueue_req(exp, req, &cancels, count);
- if (rc < 0) {
- ptlrpc_request_free(req);
- return ERR_PTR(rc);
- }
-
- spin_lock(&req->rq_lock);
- req->rq_replay = req->rq_import->imp_replayable;
- spin_unlock(&req->rq_lock);
-
- /* pack the intent */
- lit = req_capsule_client_get(&req->rq_pill, &RMF_LDLM_INTENT);
- lit->opc = (__u64)it->it_op;
-
- /* pack the intended request */
- mdc_open_pack(req, op_data, it->it_create_mode, 0, it->it_flags, lmm,
- lmmsize);
-
- /* for remote client, fetch remote perm for current user */
- if (client_is_remote(exp))
- req_capsule_set_size(&req->rq_pill, &RMF_ACL, RCL_SERVER,
- sizeof(struct mdt_remote_perm));
- ptlrpc_request_set_replen(req);
- return req;
-}
-
-static struct ptlrpc_request *
-mdc_intent_getxattr_pack(struct obd_export *exp,
- struct lookup_intent *it,
- struct md_op_data *op_data)
-{
- struct ptlrpc_request *req;
- struct ldlm_intent *lit;
- int rc, count = 0, maxdata;
- LIST_HEAD(cancels);
-
-
-
- req = ptlrpc_request_alloc(class_exp2cliimp(exp),
- &RQF_LDLM_INTENT_GETXATTR);
- if (req == NULL)
- return ERR_PTR(-ENOMEM);
-
- rc = ldlm_prep_enqueue_req(exp, req, &cancels, count);
- if (rc) {
- ptlrpc_request_free(req);
- return ERR_PTR(rc);
- }
-
- /* pack the intent */
- lit = req_capsule_client_get(&req->rq_pill, &RMF_LDLM_INTENT);
- lit->opc = IT_GETXATTR;
-
- maxdata = class_exp2cliimp(exp)->imp_connect_data.ocd_max_easize;
-
- /* pack the intended request */
- mdc_pack_body(req, &op_data->op_fid1, op_data->op_valid, maxdata, -1,
- 0);
-
- req_capsule_set_size(&req->rq_pill, &RMF_EADATA,
- RCL_SERVER, maxdata);
-
- req_capsule_set_size(&req->rq_pill, &RMF_EAVALS,
- RCL_SERVER, maxdata);
-
- req_capsule_set_size(&req->rq_pill, &RMF_EAVALS_LENS,
- RCL_SERVER, maxdata);
-
- ptlrpc_request_set_replen(req);
-
- return req;
-}
-
-static struct ptlrpc_request *mdc_intent_unlink_pack(struct obd_export *exp,
- struct lookup_intent *it,
- struct md_op_data *op_data)
-{
- struct ptlrpc_request *req;
- struct obd_device *obddev = class_exp2obd(exp);
- struct ldlm_intent *lit;
- int rc;
-
- req = ptlrpc_request_alloc(class_exp2cliimp(exp),
- &RQF_LDLM_INTENT_UNLINK);
- if (req == NULL)
- return ERR_PTR(-ENOMEM);
-
- req_capsule_set_size(&req->rq_pill, &RMF_NAME, RCL_CLIENT,
- op_data->op_namelen + 1);
-
- rc = ldlm_prep_enqueue_req(exp, req, NULL, 0);
- if (rc) {
- ptlrpc_request_free(req);
- return ERR_PTR(rc);
- }
-
- /* pack the intent */
- lit = req_capsule_client_get(&req->rq_pill, &RMF_LDLM_INTENT);
- lit->opc = (__u64)it->it_op;
-
- /* pack the intended request */
- mdc_unlink_pack(req, op_data);
-
- req_capsule_set_size(&req->rq_pill, &RMF_MDT_MD, RCL_SERVER,
- obddev->u.cli.cl_default_mds_easize);
- req_capsule_set_size(&req->rq_pill, &RMF_ACL, RCL_SERVER,
- obddev->u.cli.cl_default_mds_cookiesize);
- ptlrpc_request_set_replen(req);
- return req;
-}
-
-static struct ptlrpc_request *mdc_intent_getattr_pack(struct obd_export *exp,
- struct lookup_intent *it,
- struct md_op_data *op_data)
-{
- struct ptlrpc_request *req;
- struct obd_device *obddev = class_exp2obd(exp);
- u64 valid = OBD_MD_FLGETATTR | OBD_MD_FLEASIZE |
- OBD_MD_FLMODEASIZE | OBD_MD_FLDIREA |
- OBD_MD_MEA |
- (client_is_remote(exp) ?
- OBD_MD_FLRMTPERM : OBD_MD_FLACL);
- struct ldlm_intent *lit;
- int rc;
- int easize;
-
- req = ptlrpc_request_alloc(class_exp2cliimp(exp),
- &RQF_LDLM_INTENT_GETATTR);
- if (req == NULL)
- return ERR_PTR(-ENOMEM);
-
- req_capsule_set_size(&req->rq_pill, &RMF_NAME, RCL_CLIENT,
- op_data->op_namelen + 1);
-
- rc = ldlm_prep_enqueue_req(exp, req, NULL, 0);
- if (rc) {
- ptlrpc_request_free(req);
- return ERR_PTR(rc);
- }
-
- /* pack the intent */
- lit = req_capsule_client_get(&req->rq_pill, &RMF_LDLM_INTENT);
- lit->opc = (__u64)it->it_op;
-
- if (obddev->u.cli.cl_default_mds_easize > 0)
- easize = obddev->u.cli.cl_default_mds_easize;
- else
- easize = obddev->u.cli.cl_max_mds_easize;
-
- /* pack the intended request */
- mdc_getattr_pack(req, valid, it->it_flags, op_data, easize);
-
- req_capsule_set_size(&req->rq_pill, &RMF_MDT_MD, RCL_SERVER, easize);
- if (client_is_remote(exp))
- req_capsule_set_size(&req->rq_pill, &RMF_ACL, RCL_SERVER,
- sizeof(struct mdt_remote_perm));
- ptlrpc_request_set_replen(req);
- return req;
-}
-
-static struct ptlrpc_request *mdc_intent_layout_pack(struct obd_export *exp,
- struct lookup_intent *it,
- struct md_op_data *unused)
-{
- struct obd_device *obd = class_exp2obd(exp);
- struct ptlrpc_request *req;
- struct ldlm_intent *lit;
- struct layout_intent *layout;
- int rc;
-
- req = ptlrpc_request_alloc(class_exp2cliimp(exp),
- &RQF_LDLM_INTENT_LAYOUT);
- if (req == NULL)
- return ERR_PTR(-ENOMEM);
-
- req_capsule_set_size(&req->rq_pill, &RMF_EADATA, RCL_CLIENT, 0);
- rc = ldlm_prep_enqueue_req(exp, req, NULL, 0);
- if (rc) {
- ptlrpc_request_free(req);
- return ERR_PTR(rc);
- }
-
- /* pack the intent */
- lit = req_capsule_client_get(&req->rq_pill, &RMF_LDLM_INTENT);
- lit->opc = (__u64)it->it_op;
-
- /* pack the layout intent request */
- layout = req_capsule_client_get(&req->rq_pill, &RMF_LAYOUT_INTENT);
- /* LAYOUT_INTENT_ACCESS is generic, specific operation will be
- * set for replication */
- layout->li_opc = LAYOUT_INTENT_ACCESS;
-
- req_capsule_set_size(&req->rq_pill, &RMF_DLM_LVB, RCL_SERVER,
- obd->u.cli.cl_default_mds_easize);
- ptlrpc_request_set_replen(req);
- return req;
-}
-
-static struct ptlrpc_request *
-mdc_enqueue_pack(struct obd_export *exp, int lvb_len)
-{
- struct ptlrpc_request *req;
- int rc;
-
- req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_LDLM_ENQUEUE);
- if (req == NULL)
- return ERR_PTR(-ENOMEM);
-
- rc = ldlm_prep_enqueue_req(exp, req, NULL, 0);
- if (rc) {
- ptlrpc_request_free(req);
- return ERR_PTR(rc);
- }
-
- req_capsule_set_size(&req->rq_pill, &RMF_DLM_LVB, RCL_SERVER, lvb_len);
- ptlrpc_request_set_replen(req);
- return req;
-}
-
-static int mdc_finish_enqueue(struct obd_export *exp,
- struct ptlrpc_request *req,
- struct ldlm_enqueue_info *einfo,
- struct lookup_intent *it,
- struct lustre_handle *lockh,
- int rc)
-{
- struct req_capsule *pill = &req->rq_pill;
- struct ldlm_request *lockreq;
- struct ldlm_reply *lockrep;
- struct lustre_intent_data *intent = &it->d.lustre;
- struct ldlm_lock *lock;
- void *lvb_data = NULL;
- int lvb_len = 0;
-
- LASSERT(rc >= 0);
- /* Similarly, if we're going to replay this request, we don't want to
- * actually get a lock, just perform the intent. */
- if (req->rq_transno || req->rq_replay) {
- lockreq = req_capsule_client_get(pill, &RMF_DLM_REQ);
- lockreq->lock_flags |= ldlm_flags_to_wire(LDLM_FL_INTENT_ONLY);
- }
-
- if (rc == ELDLM_LOCK_ABORTED) {
- einfo->ei_mode = 0;
- memset(lockh, 0, sizeof(*lockh));
- rc = 0;
- } else { /* rc = 0 */
- lock = ldlm_handle2lock(lockh);
- LASSERT(lock != NULL);
-
- /* If the server gave us back a different lock mode, we should
- * fix up our variables. */
- if (lock->l_req_mode != einfo->ei_mode) {
- ldlm_lock_addref(lockh, lock->l_req_mode);
- ldlm_lock_decref(lockh, einfo->ei_mode);
- einfo->ei_mode = lock->l_req_mode;
- }
- LDLM_LOCK_PUT(lock);
- }
-
- lockrep = req_capsule_server_get(pill, &RMF_DLM_REP);
- LASSERT(lockrep != NULL); /* checked by ldlm_cli_enqueue() */
-
- intent->it_disposition = (int)lockrep->lock_policy_res1;
- intent->it_status = (int)lockrep->lock_policy_res2;
- intent->it_lock_mode = einfo->ei_mode;
- intent->it_lock_handle = lockh->cookie;
- intent->it_data = req;
-
- /* Technically speaking rq_transno must already be zero if
- * it_status is in error, so the check is a bit redundant */
- if ((!req->rq_transno || intent->it_status < 0) && req->rq_replay)
- mdc_clear_replay_flag(req, intent->it_status);
-
- /* If we're doing an IT_OPEN which did not result in an actual
- * successful open, then we need to remove the bit which saves
- * this request for unconditional replay.
- *
- * It's important that we do this first! Otherwise we might exit the
- * function without doing so, and try to replay a failed create
- * (bug 3440) */
- if (it->it_op & IT_OPEN && req->rq_replay &&
- (!it_disposition(it, DISP_OPEN_OPEN) || intent->it_status != 0))
- mdc_clear_replay_flag(req, intent->it_status);
-
- DEBUG_REQ(D_RPCTRACE, req, "op: %d disposition: %x, status: %d",
- it->it_op, intent->it_disposition, intent->it_status);
-
- /* We know what to expect, so we do any byte flipping required here */
- if (it->it_op & (IT_OPEN | IT_UNLINK | IT_LOOKUP | IT_GETATTR)) {
- struct mdt_body *body;
-
- body = req_capsule_server_get(pill, &RMF_MDT_BODY);
- if (body == NULL) {
- CERROR("Can't swab mdt_body\n");
- return -EPROTO;
- }
-
- if (it_disposition(it, DISP_OPEN_OPEN) &&
- !it_open_error(DISP_OPEN_OPEN, it)) {
- /*
- * If this is a successful OPEN request, we need to set
- * replay handler and data early, so that if replay
- * happens immediately after swabbing below, new reply
- * is swabbed by that handler correctly.
- */
- mdc_set_open_replay_data(NULL, NULL, it);
- }
-
- if ((body->valid & (OBD_MD_FLDIREA | OBD_MD_FLEASIZE)) != 0) {
- void *eadata;
-
- mdc_update_max_ea_from_body(exp, body);
-
- /*
- * The eadata is opaque; just check that it is there.
- * Eventually, obd_unpackmd() will check the contents.
- */
- eadata = req_capsule_server_sized_get(pill, &RMF_MDT_MD,
- body->eadatasize);
- if (eadata == NULL)
- return -EPROTO;
-
- /* save lvb data and length in case this is for layout
- * lock */
- lvb_data = eadata;
- lvb_len = body->eadatasize;
-
- /*
- * We save the reply LOV EA in case we have to replay a
- * create for recovery. If we didn't allocate a large
- * enough request buffer above we need to reallocate it
- * here to hold the actual LOV EA.
- *
- * To not save LOV EA if request is not going to replay
- * (for example error one).
- */
- if ((it->it_op & IT_OPEN) && req->rq_replay) {
- void *lmm;
-
- if (req_capsule_get_size(pill, &RMF_EADATA,
- RCL_CLIENT) <
- body->eadatasize)
- mdc_realloc_openmsg(req, body);
- else
- req_capsule_shrink(pill, &RMF_EADATA,
- body->eadatasize,
- RCL_CLIENT);
-
- req_capsule_set_size(pill, &RMF_EADATA,
- RCL_CLIENT,
- body->eadatasize);
-
- lmm = req_capsule_client_get(pill, &RMF_EADATA);
- if (lmm)
- memcpy(lmm, eadata, body->eadatasize);
- }
- }
-
- if (body->valid & OBD_MD_FLRMTPERM) {
- struct mdt_remote_perm *perm;
-
- LASSERT(client_is_remote(exp));
- perm = req_capsule_server_swab_get(pill, &RMF_ACL,
- lustre_swab_mdt_remote_perm);
- if (perm == NULL)
- return -EPROTO;
- }
- } else if (it->it_op & IT_LAYOUT) {
- /* maybe the lock was granted right away and layout
- * is packed into RMF_DLM_LVB of req */
- lvb_len = req_capsule_get_size(pill, &RMF_DLM_LVB, RCL_SERVER);
- if (lvb_len > 0) {
- lvb_data = req_capsule_server_sized_get(pill,
- &RMF_DLM_LVB, lvb_len);
- if (lvb_data == NULL)
- return -EPROTO;
- }
- }
-
- /* fill in stripe data for layout lock */
- lock = ldlm_handle2lock(lockh);
- if (lock != NULL && ldlm_has_layout(lock) && lvb_data != NULL) {
- void *lmm;
-
- LDLM_DEBUG(lock, "layout lock returned by: %s, lvb_len: %d\n",
- ldlm_it2str(it->it_op), lvb_len);
-
- lmm = libcfs_kvzalloc(lvb_len, GFP_NOFS);
- if (lmm == NULL) {
- LDLM_LOCK_PUT(lock);
- return -ENOMEM;
- }
- memcpy(lmm, lvb_data, lvb_len);
-
- /* install lvb_data */
- lock_res_and_lock(lock);
- if (lock->l_lvb_data == NULL) {
- lock->l_lvb_type = LVB_T_LAYOUT;
- lock->l_lvb_data = lmm;
- lock->l_lvb_len = lvb_len;
- lmm = NULL;
- }
- unlock_res_and_lock(lock);
- if (lmm != NULL)
- kvfree(lmm);
- }
- if (lock != NULL)
- LDLM_LOCK_PUT(lock);
-
- return rc;
-}
-
-/* We always reserve enough space in the reply packet for a stripe MD, because
- * we don't know in advance the file type. */
-int mdc_enqueue(struct obd_export *exp, struct ldlm_enqueue_info *einfo,
- struct lookup_intent *it, struct md_op_data *op_data,
- struct lustre_handle *lockh, void *lmm, int lmmsize,
- struct ptlrpc_request **reqp, u64 extra_lock_flags)
-{
- static const ldlm_policy_data_t lookup_policy = {
- .l_inodebits = { MDS_INODELOCK_LOOKUP }
- };
- static const ldlm_policy_data_t update_policy = {
- .l_inodebits = { MDS_INODELOCK_UPDATE }
- };
- static const ldlm_policy_data_t layout_policy = {
- .l_inodebits = { MDS_INODELOCK_LAYOUT }
- };
- static const ldlm_policy_data_t getxattr_policy = {
- .l_inodebits = { MDS_INODELOCK_XATTR }
- };
- ldlm_policy_data_t const *policy = &lookup_policy;
- struct obd_device *obddev = class_exp2obd(exp);
- struct ptlrpc_request *req;
- u64 flags, saved_flags = extra_lock_flags;
- struct ldlm_res_id res_id;
- int generation, resends = 0;
- struct ldlm_reply *lockrep;
- enum lvb_type lvb_type = LVB_T_NONE;
- int rc;
-
- LASSERTF(!it || einfo->ei_type == LDLM_IBITS, "lock type %d\n",
- einfo->ei_type);
-
- fid_build_reg_res_name(&op_data->op_fid1, &res_id);
-
- if (it) {
- saved_flags |= LDLM_FL_HAS_INTENT;
- if (it->it_op & (IT_UNLINK | IT_GETATTR | IT_READDIR))
- policy = &update_policy;
- else if (it->it_op & IT_LAYOUT)
- policy = &layout_policy;
- else if (it->it_op & (IT_GETXATTR | IT_SETXATTR))
- policy = &getxattr_policy;
- }
-
- LASSERT(reqp == NULL);
-
- generation = obddev->u.cli.cl_import->imp_generation;
-resend:
- flags = saved_flags;
- if (!it) {
- /* The only way right now is FLOCK, in this case we hide flock
- policy as lmm, but lmmsize is 0 */
- LASSERT(lmm && lmmsize == 0);
- LASSERTF(einfo->ei_type == LDLM_FLOCK, "lock type %d\n",
- einfo->ei_type);
- policy = lmm;
- res_id.name[3] = LDLM_FLOCK;
- req = NULL;
- } else if (it->it_op & IT_OPEN) {
- req = mdc_intent_open_pack(exp, it, op_data, lmm, lmmsize,
- einfo->ei_cbdata);
- policy = &update_policy;
- einfo->ei_cbdata = NULL;
- lmm = NULL;
- } else if (it->it_op & IT_UNLINK) {
- req = mdc_intent_unlink_pack(exp, it, op_data);
- } else if (it->it_op & (IT_GETATTR | IT_LOOKUP)) {
- req = mdc_intent_getattr_pack(exp, it, op_data);
- } else if (it->it_op & IT_READDIR) {
- req = mdc_enqueue_pack(exp, 0);
- } else if (it->it_op & IT_LAYOUT) {
- if (!imp_connect_lvb_type(class_exp2cliimp(exp)))
- return -EOPNOTSUPP;
- req = mdc_intent_layout_pack(exp, it, op_data);
- lvb_type = LVB_T_LAYOUT;
- } else if (it->it_op & IT_GETXATTR) {
- req = mdc_intent_getxattr_pack(exp, it, op_data);
- } else {
- LBUG();
- return -EINVAL;
- }
-
- if (IS_ERR(req))
- return PTR_ERR(req);
-
- if (req != NULL && it && it->it_op & IT_CREAT)
- /* ask ptlrpc not to resend on EINPROGRESS since we have our own
- * retry logic */
- req->rq_no_retry_einprogress = 1;
-
- if (resends) {
- req->rq_generation_set = 1;
- req->rq_import_generation = generation;
- req->rq_sent = ktime_get_real_seconds() + resends;
- }
-
- /* It is important to obtain rpc_lock first (if applicable), so that
- * threads that are serialised with rpc_lock are not polluting our
- * rpcs in flight counter. We do not do flock request limiting, though*/
- if (it) {
- mdc_get_rpc_lock(obddev->u.cli.cl_rpc_lock, it);
- rc = mdc_enter_request(&obddev->u.cli);
- if (rc != 0) {
- mdc_put_rpc_lock(obddev->u.cli.cl_rpc_lock, it);
- mdc_clear_replay_flag(req, 0);
- ptlrpc_req_finished(req);
- return rc;
- }
- }
-
- rc = ldlm_cli_enqueue(exp, &req, einfo, &res_id, policy, &flags, NULL,
- 0, lvb_type, lockh, 0);
- if (!it) {
- /* For flock requests we immediately return without further
- delay and let caller deal with the rest, since rest of
- this function metadata processing makes no sense for flock
- requests anyway. But in case of problem during comms with
- Server (ETIMEDOUT) or any signal/kill attempt (EINTR), we
- can not rely on caller and this mainly for F_UNLCKs
- (explicits or automatically generated by Kernel to clean
- current FLocks upon exit) that can't be trashed */
- if ((rc == -EINTR) || (rc == -ETIMEDOUT))
- goto resend;
- return rc;
- }
-
- mdc_exit_request(&obddev->u.cli);
- mdc_put_rpc_lock(obddev->u.cli.cl_rpc_lock, it);
-
- if (rc < 0) {
- CDEBUG_LIMIT((rc == -EACCES || rc == -EIDRM) ? D_INFO : D_ERROR,
- "%s: ldlm_cli_enqueue failed: rc = %d\n",
- obddev->obd_name, rc);
-
- mdc_clear_replay_flag(req, rc);
- ptlrpc_req_finished(req);
- return rc;
- }
-
- lockrep = req_capsule_server_get(&req->rq_pill, &RMF_DLM_REP);
- LASSERT(lockrep != NULL);
-
- lockrep->lock_policy_res2 =
- ptlrpc_status_ntoh(lockrep->lock_policy_res2);
-
- /* Retry the create infinitely when we get -EINPROGRESS from
- * server. This is required by the new quota design. */
- if (it && it->it_op & IT_CREAT &&
- (int)lockrep->lock_policy_res2 == -EINPROGRESS) {
- mdc_clear_replay_flag(req, rc);
- ptlrpc_req_finished(req);
- resends++;
-
- CDEBUG(D_HA, "%s: resend:%d op:%d "DFID"/"DFID"\n",
- obddev->obd_name, resends, it->it_op,
- PFID(&op_data->op_fid1), PFID(&op_data->op_fid2));
-
- if (generation == obddev->u.cli.cl_import->imp_generation) {
- goto resend;
- } else {
- CDEBUG(D_HA, "resend cross eviction\n");
- return -EIO;
- }
- }
-
- rc = mdc_finish_enqueue(exp, req, einfo, it, lockh, rc);
- if (rc < 0) {
- if (lustre_handle_is_used(lockh)) {
- ldlm_lock_decref(lockh, einfo->ei_mode);
- memset(lockh, 0, sizeof(*lockh));
- }
- ptlrpc_req_finished(req);
-
- it->d.lustre.it_lock_handle = 0;
- it->d.lustre.it_lock_mode = 0;
- it->d.lustre.it_data = NULL;
- }
-
- return rc;
-}
-
-static int mdc_finish_intent_lock(struct obd_export *exp,
- struct ptlrpc_request *request,
- struct md_op_data *op_data,
- struct lookup_intent *it,
- struct lustre_handle *lockh)
-{
- struct lustre_handle old_lock;
- struct mdt_body *mdt_body;
- struct ldlm_lock *lock;
- int rc;
-
- LASSERT(request != NULL);
- LASSERT(request != LP_POISON);
- LASSERT(request->rq_repmsg != LP_POISON);
-
- if (!it_disposition(it, DISP_IT_EXECD)) {
- /* The server failed before it even started executing the
- * intent, i.e. because it couldn't unpack the request. */
- LASSERT(it->d.lustre.it_status != 0);
- return it->d.lustre.it_status;
- }
- rc = it_open_error(DISP_IT_EXECD, it);
- if (rc)
- return rc;
-
- mdt_body = req_capsule_server_get(&request->rq_pill, &RMF_MDT_BODY);
- LASSERT(mdt_body != NULL); /* mdc_enqueue checked */
-
- /* If we were revalidating a fid/name pair, mark the intent in
- * case we fail and get called again from lookup */
- if (fid_is_sane(&op_data->op_fid2) &&
- it->it_create_mode & M_CHECK_STALE &&
- it->it_op != IT_GETATTR) {
-
- /* Also: did we find the same inode? */
- /* sever can return one of two fids:
- * op_fid2 - new allocated fid - if file is created.
- * op_fid3 - existent fid - if file only open.
- * op_fid3 is saved in lmv_intent_open */
- if ((!lu_fid_eq(&op_data->op_fid2, &mdt_body->fid1)) &&
- (!lu_fid_eq(&op_data->op_fid3, &mdt_body->fid1))) {
- CDEBUG(D_DENTRY, "Found stale data "DFID"("DFID")/"DFID
- "\n", PFID(&op_data->op_fid2),
- PFID(&op_data->op_fid2), PFID(&mdt_body->fid1));
- return -ESTALE;
- }
- }
-
- rc = it_open_error(DISP_LOOKUP_EXECD, it);
- if (rc)
- return rc;
-
- /* keep requests around for the multiple phases of the call
- * this shows the DISP_XX must guarantee we make it into the call
- */
- if (!it_disposition(it, DISP_ENQ_CREATE_REF) &&
- it_disposition(it, DISP_OPEN_CREATE) &&
- !it_open_error(DISP_OPEN_CREATE, it)) {
- it_set_disposition(it, DISP_ENQ_CREATE_REF);
- ptlrpc_request_addref(request); /* balanced in ll_create_node */
- }
- if (!it_disposition(it, DISP_ENQ_OPEN_REF) &&
- it_disposition(it, DISP_OPEN_OPEN) &&
- !it_open_error(DISP_OPEN_OPEN, it)) {
- it_set_disposition(it, DISP_ENQ_OPEN_REF);
- ptlrpc_request_addref(request); /* balanced in ll_file_open */
- /* BUG 11546 - eviction in the middle of open rpc processing */
- OBD_FAIL_TIMEOUT(OBD_FAIL_MDC_ENQUEUE_PAUSE, obd_timeout);
- }
-
- if (it->it_op & IT_CREAT) {
- /* XXX this belongs in ll_create_it */
- } else if (it->it_op == IT_OPEN) {
- LASSERT(!it_disposition(it, DISP_OPEN_CREATE));
- } else {
- LASSERT(it->it_op & (IT_GETATTR | IT_LOOKUP | IT_LAYOUT));
- }
-
- /* If we already have a matching lock, then cancel the new
- * one. We have to set the data here instead of in
- * mdc_enqueue, because we need to use the child's inode as
- * the l_ast_data to match, and that's not available until
- * intent_finish has performed the iget().) */
- lock = ldlm_handle2lock(lockh);
- if (lock) {
- ldlm_policy_data_t policy = lock->l_policy_data;
-
- LDLM_DEBUG(lock, "matching against this");
-
- LASSERTF(fid_res_name_eq(&mdt_body->fid1,
- &lock->l_resource->lr_name),
- "Lock res_id: "DLDLMRES", fid: "DFID"\n",
- PLDLMRES(lock->l_resource), PFID(&mdt_body->fid1));
- LDLM_LOCK_PUT(lock);
-
- memcpy(&old_lock, lockh, sizeof(*lockh));
- if (ldlm_lock_match(NULL, LDLM_FL_BLOCK_GRANTED, NULL,
- LDLM_IBITS, &policy, LCK_NL,
- &old_lock, 0)) {
- ldlm_lock_decref_and_cancel(lockh,
- it->d.lustre.it_lock_mode);
- memcpy(lockh, &old_lock, sizeof(old_lock));
- it->d.lustre.it_lock_handle = lockh->cookie;
- }
- }
- CDEBUG(D_DENTRY,
- "D_IT dentry %.*s intent: %s status %d disp %x rc %d\n",
- op_data->op_namelen, op_data->op_name, ldlm_it2str(it->it_op),
- it->d.lustre.it_status, it->d.lustre.it_disposition, rc);
- return rc;
-}
-
-int mdc_revalidate_lock(struct obd_export *exp, struct lookup_intent *it,
- struct lu_fid *fid, __u64 *bits)
-{
- /* We could just return 1 immediately, but since we should only
- * be called in revalidate_it if we already have a lock, let's
- * verify that. */
- struct ldlm_res_id res_id;
- struct lustre_handle lockh;
- ldlm_policy_data_t policy;
- ldlm_mode_t mode;
-
- if (it->d.lustre.it_lock_handle) {
- lockh.cookie = it->d.lustre.it_lock_handle;
- mode = ldlm_revalidate_lock_handle(&lockh, bits);
- } else {
- fid_build_reg_res_name(fid, &res_id);
- switch (it->it_op) {
- case IT_GETATTR:
- /* File attributes are held under multiple bits:
- * nlink is under lookup lock, size and times are
- * under UPDATE lock and recently we've also got
- * a separate permissions lock for owner/group/acl that
- * were protected by lookup lock before.
- * Getattr must provide all of that information,
- * so we need to ensure we have all of those locks.
- * Unfortunately, if the bits are split across multiple
- * locks, there's no easy way to match all of them here,
- * so an extra RPC would be performed to fetch all
- * of those bits at once for now. */
- /* For new MDTs(> 2.4), UPDATE|PERM should be enough,
- * but for old MDTs (< 2.4), permission is covered
- * by LOOKUP lock, so it needs to match all bits here.*/
- policy.l_inodebits.bits = MDS_INODELOCK_UPDATE |
- MDS_INODELOCK_LOOKUP |
- MDS_INODELOCK_PERM;
- break;
- case IT_LAYOUT:
- policy.l_inodebits.bits = MDS_INODELOCK_LAYOUT;
- break;
- default:
- policy.l_inodebits.bits = MDS_INODELOCK_LOOKUP;
- break;
- }
-
- mode = mdc_lock_match(exp, LDLM_FL_BLOCK_GRANTED, fid,
- LDLM_IBITS, &policy,
- LCK_CR | LCK_CW | LCK_PR | LCK_PW,
- &lockh);
- }
-
- if (mode) {
- it->d.lustre.it_lock_handle = lockh.cookie;
- it->d.lustre.it_lock_mode = mode;
- } else {
- it->d.lustre.it_lock_handle = 0;
- it->d.lustre.it_lock_mode = 0;
- }
-
- return !!mode;
-}
-
-/*
- * This long block is all about fixing up the lock and request state
- * so that it is correct as of the moment _before_ the operation was
- * applied; that way, the VFS will think that everything is normal and
- * call Lustre's regular VFS methods.
- *
- * If we're performing a creation, that means that unless the creation
- * failed with EEXIST, we should fake up a negative dentry.
- *
- * For everything else, we want to lookup to succeed.
- *
- * One additional note: if CREATE or OPEN succeeded, we add an extra
- * reference to the request because we need to keep it around until
- * ll_create/ll_open gets called.
- *
- * The server will return to us, in it_disposition, an indication of
- * exactly what d.lustre.it_status refers to.
- *
- * If DISP_OPEN_OPEN is set, then d.lustre.it_status refers to the open() call,
- * otherwise if DISP_OPEN_CREATE is set, then it status is the
- * creation failure mode. In either case, one of DISP_LOOKUP_NEG or
- * DISP_LOOKUP_POS will be set, indicating whether the child lookup
- * was successful.
- *
- * Else, if DISP_LOOKUP_EXECD then d.lustre.it_status is the rc of the
- * child lookup.
- */
-int mdc_intent_lock(struct obd_export *exp, struct md_op_data *op_data,
- void *lmm, int lmmsize, struct lookup_intent *it,
- int lookup_flags, struct ptlrpc_request **reqp,
- ldlm_blocking_callback cb_blocking,
- __u64 extra_lock_flags)
-{
- struct ldlm_enqueue_info einfo = {
- .ei_type = LDLM_IBITS,
- .ei_mode = it_to_lock_mode(it),
- .ei_cb_bl = cb_blocking,
- .ei_cb_cp = ldlm_completion_ast,
- };
- struct lustre_handle lockh;
- int rc = 0;
-
- LASSERT(it);
-
- CDEBUG(D_DLMTRACE, "(name: %.*s,"DFID") in obj "DFID
- ", intent: %s flags %#Lo\n", op_data->op_namelen,
- op_data->op_name, PFID(&op_data->op_fid2),
- PFID(&op_data->op_fid1), ldlm_it2str(it->it_op),
- it->it_flags);
-
- lockh.cookie = 0;
- if (fid_is_sane(&op_data->op_fid2) &&
- (it->it_op & (IT_LOOKUP | IT_GETATTR))) {
- /* We could just return 1 immediately, but since we should only
- * be called in revalidate_it if we already have a lock, let's
- * verify that. */
- it->d.lustre.it_lock_handle = 0;
- rc = mdc_revalidate_lock(exp, it, &op_data->op_fid2, NULL);
- /* Only return failure if it was not GETATTR by cfid
- (from inode_revalidate) */
- if (rc || op_data->op_namelen != 0)
- return rc;
- }
-
- /* For case if upper layer did not alloc fid, do it now. */
- if (!fid_is_sane(&op_data->op_fid2) && it->it_op & IT_CREAT) {
- rc = mdc_fid_alloc(exp, &op_data->op_fid2, op_data);
- if (rc < 0) {
- CERROR("Can't alloc new fid, rc %d\n", rc);
- return rc;
- }
- }
- rc = mdc_enqueue(exp, &einfo, it, op_data, &lockh, lmm, lmmsize, NULL,
- extra_lock_flags);
- if (rc < 0)
- return rc;
-
- *reqp = it->d.lustre.it_data;
- rc = mdc_finish_intent_lock(exp, *reqp, op_data, it, &lockh);
- return rc;
-}
-
-static int mdc_intent_getattr_async_interpret(const struct lu_env *env,
- struct ptlrpc_request *req,
- void *args, int rc)
-{
- struct mdc_getattr_args *ga = args;
- struct obd_export *exp = ga->ga_exp;
- struct md_enqueue_info *minfo = ga->ga_minfo;
- struct ldlm_enqueue_info *einfo = ga->ga_einfo;
- struct lookup_intent *it;
- struct lustre_handle *lockh;
- struct obd_device *obddev;
- struct ldlm_reply *lockrep;
- __u64 flags = LDLM_FL_HAS_INTENT;
-
- it = &minfo->mi_it;
- lockh = &minfo->mi_lockh;
-
- obddev = class_exp2obd(exp);
-
- mdc_exit_request(&obddev->u.cli);
- if (OBD_FAIL_CHECK(OBD_FAIL_MDC_GETATTR_ENQUEUE))
- rc = -ETIMEDOUT;
-
- rc = ldlm_cli_enqueue_fini(exp, req, einfo->ei_type, 1, einfo->ei_mode,
- &flags, NULL, 0, lockh, rc);
- if (rc < 0) {
- CERROR("ldlm_cli_enqueue_fini: %d\n", rc);
- mdc_clear_replay_flag(req, rc);
- goto out;
- }
-
- lockrep = req_capsule_server_get(&req->rq_pill, &RMF_DLM_REP);
- LASSERT(lockrep != NULL);
-
- lockrep->lock_policy_res2 =
- ptlrpc_status_ntoh(lockrep->lock_policy_res2);
-
- rc = mdc_finish_enqueue(exp, req, einfo, it, lockh, rc);
- if (rc)
- goto out;
-
- rc = mdc_finish_intent_lock(exp, req, &minfo->mi_data, it, lockh);
-
-out:
- kfree(einfo);
- minfo->mi_cb(req, minfo, rc);
- return 0;
-}
-
-int mdc_intent_getattr_async(struct obd_export *exp,
- struct md_enqueue_info *minfo,
- struct ldlm_enqueue_info *einfo)
-{
- struct md_op_data *op_data = &minfo->mi_data;
- struct lookup_intent *it = &minfo->mi_it;
- struct ptlrpc_request *req;
- struct mdc_getattr_args *ga;
- struct obd_device *obddev = class_exp2obd(exp);
- struct ldlm_res_id res_id;
- /*XXX: Both MDS_INODELOCK_LOOKUP and MDS_INODELOCK_UPDATE are needed
- * for statahead currently. Consider CMD in future, such two bits
- * maybe managed by different MDS, should be adjusted then. */
- ldlm_policy_data_t policy = {
- .l_inodebits = { MDS_INODELOCK_LOOKUP |
- MDS_INODELOCK_UPDATE }
- };
- int rc = 0;
- __u64 flags = LDLM_FL_HAS_INTENT;
-
- CDEBUG(D_DLMTRACE,
- "name: %.*s in inode "DFID", intent: %s flags %#Lo\n",
- op_data->op_namelen, op_data->op_name, PFID(&op_data->op_fid1),
- ldlm_it2str(it->it_op), it->it_flags);
-
- fid_build_reg_res_name(&op_data->op_fid1, &res_id);
- req = mdc_intent_getattr_pack(exp, it, op_data);
- if (IS_ERR(req))
- return PTR_ERR(req);
-
- rc = mdc_enter_request(&obddev->u.cli);
- if (rc != 0) {
- ptlrpc_req_finished(req);
- return rc;
- }
-
- rc = ldlm_cli_enqueue(exp, &req, einfo, &res_id, &policy, &flags, NULL,
- 0, LVB_T_NONE, &minfo->mi_lockh, 1);
- if (rc < 0) {
- mdc_exit_request(&obddev->u.cli);
- ptlrpc_req_finished(req);
- return rc;
- }
-
- CLASSERT(sizeof(*ga) <= sizeof(req->rq_async_args));
- ga = ptlrpc_req_async_args(req);
- ga->ga_exp = exp;
- ga->ga_minfo = minfo;
- ga->ga_einfo = einfo;
-
- req->rq_interpret_reply = mdc_intent_getattr_async_interpret;
- ptlrpcd_add_req(req);
-
- return 0;
-}
diff --git a/drivers/staging/lustre/lustre/mdc/mdc_reint.c b/drivers/staging/lustre/lustre/mdc/mdc_reint.c
deleted file mode 100644
index c87c7d8efa07..000000000000
--- a/drivers/staging/lustre/lustre/mdc/mdc_reint.c
+++ /dev/null
@@ -1,463 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- */
-
-#define DEBUG_SUBSYSTEM S_MDC
-
-# include <linux/module.h>
-# include <linux/kernel.h>
-
-#include "../include/obd_class.h"
-#include "mdc_internal.h"
-#include "../include/lustre_fid.h"
-
-/* mdc_setattr does its own semaphore handling */
-static int mdc_reint(struct ptlrpc_request *request,
- struct mdc_rpc_lock *rpc_lock,
- int level)
-{
- int rc;
-
- request->rq_send_state = level;
-
- mdc_get_rpc_lock(rpc_lock, NULL);
- rc = ptlrpc_queue_wait(request);
- mdc_put_rpc_lock(rpc_lock, NULL);
- if (rc)
- CDEBUG(D_INFO, "error in handling %d\n", rc);
- else if (!req_capsule_server_get(&request->rq_pill, &RMF_MDT_BODY))
- rc = -EPROTO;
-
- return rc;
-}
-
-/* Find and cancel locally locks matched by inode @bits & @mode in the resource
- * found by @fid. Found locks are added into @cancel list. Returns the amount of
- * locks added to @cancels list. */
-int mdc_resource_get_unused(struct obd_export *exp, const struct lu_fid *fid,
- struct list_head *cancels, ldlm_mode_t mode,
- __u64 bits)
-{
- struct ldlm_namespace *ns = exp->exp_obd->obd_namespace;
- ldlm_policy_data_t policy = {};
- struct ldlm_res_id res_id;
- struct ldlm_resource *res;
- int count;
-
- /* Return, i.e. cancel nothing, only if ELC is supported (flag in
- * export) but disabled through procfs (flag in NS).
- *
- * This distinguishes from a case when ELC is not supported originally,
- * when we still want to cancel locks in advance and just cancel them
- * locally, without sending any RPC. */
- if (exp_connect_cancelset(exp) && !ns_connect_cancelset(ns))
- return 0;
-
- fid_build_reg_res_name(fid, &res_id);
- res = ldlm_resource_get(exp->exp_obd->obd_namespace,
- NULL, &res_id, 0, 0);
- if (res == NULL)
- return 0;
- LDLM_RESOURCE_ADDREF(res);
- /* Initialize ibits lock policy. */
- policy.l_inodebits.bits = bits;
- count = ldlm_cancel_resource_local(res, cancels, &policy,
- mode, 0, 0, NULL);
- LDLM_RESOURCE_DELREF(res);
- ldlm_resource_putref(res);
- return count;
-}
-
-int mdc_setattr(struct obd_export *exp, struct md_op_data *op_data,
- void *ea, int ealen, void *ea2, int ea2len,
- struct ptlrpc_request **request, struct md_open_data **mod)
-{
- LIST_HEAD(cancels);
- struct ptlrpc_request *req;
- struct mdc_rpc_lock *rpc_lock;
- struct obd_device *obd = exp->exp_obd;
- int count = 0, rc;
- __u64 bits;
-
- LASSERT(op_data != NULL);
-
- bits = MDS_INODELOCK_UPDATE;
- if (op_data->op_attr.ia_valid & (ATTR_MODE|ATTR_UID|ATTR_GID))
- bits |= MDS_INODELOCK_LOOKUP;
- if ((op_data->op_flags & MF_MDC_CANCEL_FID1) &&
- (fid_is_sane(&op_data->op_fid1)) &&
- !OBD_FAIL_CHECK(OBD_FAIL_LDLM_BL_CALLBACK_NET))
- count = mdc_resource_get_unused(exp, &op_data->op_fid1,
- &cancels, LCK_EX, bits);
- req = ptlrpc_request_alloc(class_exp2cliimp(exp),
- &RQF_MDS_REINT_SETATTR);
- if (req == NULL) {
- ldlm_lock_list_put(&cancels, l_bl_ast, count);
- return -ENOMEM;
- }
- if ((op_data->op_flags & (MF_SOM_CHANGE | MF_EPOCH_OPEN)) == 0)
- req_capsule_set_size(&req->rq_pill, &RMF_MDT_EPOCH, RCL_CLIENT,
- 0);
- req_capsule_set_size(&req->rq_pill, &RMF_EADATA, RCL_CLIENT, ealen);
- req_capsule_set_size(&req->rq_pill, &RMF_LOGCOOKIES, RCL_CLIENT,
- ea2len);
-
- rc = mdc_prep_elc_req(exp, req, MDS_REINT, &cancels, count);
- if (rc) {
- ptlrpc_request_free(req);
- return rc;
- }
-
- rpc_lock = obd->u.cli.cl_rpc_lock;
-
- if (op_data->op_attr.ia_valid & (ATTR_MTIME | ATTR_CTIME))
- CDEBUG(D_INODE, "setting mtime %ld, ctime %ld\n",
- LTIME_S(op_data->op_attr.ia_mtime),
- LTIME_S(op_data->op_attr.ia_ctime));
- mdc_setattr_pack(req, op_data, ea, ealen, ea2, ea2len);
-
- ptlrpc_request_set_replen(req);
- if (mod && (op_data->op_flags & MF_EPOCH_OPEN) &&
- req->rq_import->imp_replayable) {
- LASSERT(*mod == NULL);
-
- *mod = obd_mod_alloc();
- if (*mod == NULL) {
- DEBUG_REQ(D_ERROR, req, "Can't allocate md_open_data");
- } else {
- req->rq_replay = 1;
- req->rq_cb_data = *mod;
- (*mod)->mod_open_req = req;
- req->rq_commit_cb = mdc_commit_open;
- (*mod)->mod_is_create = true;
- /**
- * Take an extra reference on \var mod, it protects \var
- * mod from being freed on eviction (commit callback is
- * called despite rq_replay flag).
- * Will be put on mdc_done_writing().
- */
- obd_mod_get(*mod);
- }
- }
-
- rc = mdc_reint(req, rpc_lock, LUSTRE_IMP_FULL);
-
- /* Save the obtained info in the original RPC for the replay case. */
- if (rc == 0 && (op_data->op_flags & MF_EPOCH_OPEN)) {
- struct mdt_ioepoch *epoch;
- struct mdt_body *body;
-
- epoch = req_capsule_client_get(&req->rq_pill, &RMF_MDT_EPOCH);
- body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
- LASSERT(epoch != NULL);
- LASSERT(body != NULL);
- epoch->handle = body->handle;
- epoch->ioepoch = body->ioepoch;
- req->rq_replay_cb = mdc_replay_open;
- /** bug 3633, open may be committed and estale answer is not error */
- } else if (rc == -ESTALE && (op_data->op_flags & MF_SOM_CHANGE)) {
- rc = 0;
- } else if (rc == -ERESTARTSYS) {
- rc = 0;
- }
- *request = req;
- if (rc && req->rq_commit_cb) {
- /* Put an extra reference on \var mod on error case. */
- if (mod != NULL && *mod != NULL)
- obd_mod_put(*mod);
- req->rq_commit_cb(req);
- }
- return rc;
-}
-
-int mdc_create(struct obd_export *exp, struct md_op_data *op_data,
- const void *data, int datalen, int mode, __u32 uid, __u32 gid,
- cfs_cap_t cap_effective, __u64 rdev,
- struct ptlrpc_request **request)
-{
- struct ptlrpc_request *req;
- int level, rc;
- int count, resends = 0;
- struct obd_import *import = exp->exp_obd->u.cli.cl_import;
- int generation = import->imp_generation;
- LIST_HEAD(cancels);
-
- /* For case if upper layer did not alloc fid, do it now. */
- if (!fid_is_sane(&op_data->op_fid2)) {
- /*
- * mdc_fid_alloc() may return errno 1 in case of switch to new
- * sequence, handle this.
- */
- rc = mdc_fid_alloc(exp, &op_data->op_fid2, op_data);
- if (rc < 0) {
- CERROR("Can't alloc new fid, rc %d\n", rc);
- return rc;
- }
- }
-
-rebuild:
- count = 0;
- if ((op_data->op_flags & MF_MDC_CANCEL_FID1) &&
- (fid_is_sane(&op_data->op_fid1)))
- count = mdc_resource_get_unused(exp, &op_data->op_fid1,
- &cancels, LCK_EX,
- MDS_INODELOCK_UPDATE);
-
- req = ptlrpc_request_alloc(class_exp2cliimp(exp),
- &RQF_MDS_REINT_CREATE_RMT_ACL);
- if (req == NULL) {
- ldlm_lock_list_put(&cancels, l_bl_ast, count);
- return -ENOMEM;
- }
- req_capsule_set_size(&req->rq_pill, &RMF_NAME, RCL_CLIENT,
- op_data->op_namelen + 1);
- req_capsule_set_size(&req->rq_pill, &RMF_EADATA, RCL_CLIENT,
- data && datalen ? datalen : 0);
-
- rc = mdc_prep_elc_req(exp, req, MDS_REINT, &cancels, count);
- if (rc) {
- ptlrpc_request_free(req);
- return rc;
- }
-
- /*
- * mdc_create_pack() fills msg->bufs[1] with name and msg->bufs[2] with
- * tgt, for symlinks or lov MD data.
- */
- mdc_create_pack(req, op_data, data, datalen, mode, uid,
- gid, cap_effective, rdev);
-
- ptlrpc_request_set_replen(req);
-
- /* ask ptlrpc not to resend on EINPROGRESS since we have our own retry
- * logic here */
- req->rq_no_retry_einprogress = 1;
-
- if (resends) {
- req->rq_generation_set = 1;
- req->rq_import_generation = generation;
- req->rq_sent = ktime_get_real_seconds() + resends;
- }
- level = LUSTRE_IMP_FULL;
- resend:
- rc = mdc_reint(req, exp->exp_obd->u.cli.cl_rpc_lock, level);
-
- /* Resend if we were told to. */
- if (rc == -ERESTARTSYS) {
- level = LUSTRE_IMP_RECOVER;
- goto resend;
- } else if (rc == -EINPROGRESS) {
- /* Retry create infinitely until succeed or get other
- * error code. */
- ptlrpc_req_finished(req);
- resends++;
-
- CDEBUG(D_HA, "%s: resend:%d create on "DFID"/"DFID"\n",
- exp->exp_obd->obd_name, resends,
- PFID(&op_data->op_fid1), PFID(&op_data->op_fid2));
-
- if (generation == import->imp_generation) {
- goto rebuild;
- } else {
- CDEBUG(D_HA, "resend cross eviction\n");
- return -EIO;
- }
- }
-
- *request = req;
- return rc;
-}
-
-int mdc_unlink(struct obd_export *exp, struct md_op_data *op_data,
- struct ptlrpc_request **request)
-{
- LIST_HEAD(cancels);
- struct obd_device *obd = class_exp2obd(exp);
- struct ptlrpc_request *req = *request;
- int count = 0, rc;
-
- LASSERT(req == NULL);
-
- if ((op_data->op_flags & MF_MDC_CANCEL_FID1) &&
- (fid_is_sane(&op_data->op_fid1)) &&
- !OBD_FAIL_CHECK(OBD_FAIL_LDLM_BL_CALLBACK_NET))
- count = mdc_resource_get_unused(exp, &op_data->op_fid1,
- &cancels, LCK_EX,
- MDS_INODELOCK_UPDATE);
- if ((op_data->op_flags & MF_MDC_CANCEL_FID3) &&
- (fid_is_sane(&op_data->op_fid3)) &&
- !OBD_FAIL_CHECK(OBD_FAIL_LDLM_BL_CALLBACK_NET))
- count += mdc_resource_get_unused(exp, &op_data->op_fid3,
- &cancels, LCK_EX,
- MDS_INODELOCK_FULL);
- req = ptlrpc_request_alloc(class_exp2cliimp(exp),
- &RQF_MDS_REINT_UNLINK);
- if (req == NULL) {
- ldlm_lock_list_put(&cancels, l_bl_ast, count);
- return -ENOMEM;
- }
- req_capsule_set_size(&req->rq_pill, &RMF_NAME, RCL_CLIENT,
- op_data->op_namelen + 1);
-
- rc = mdc_prep_elc_req(exp, req, MDS_REINT, &cancels, count);
- if (rc) {
- ptlrpc_request_free(req);
- return rc;
- }
-
- mdc_unlink_pack(req, op_data);
-
- req_capsule_set_size(&req->rq_pill, &RMF_MDT_MD, RCL_SERVER,
- obd->u.cli.cl_default_mds_easize);
- req_capsule_set_size(&req->rq_pill, &RMF_LOGCOOKIES, RCL_SERVER,
- obd->u.cli.cl_default_mds_cookiesize);
- ptlrpc_request_set_replen(req);
-
- *request = req;
-
- rc = mdc_reint(req, obd->u.cli.cl_rpc_lock, LUSTRE_IMP_FULL);
- if (rc == -ERESTARTSYS)
- rc = 0;
- return rc;
-}
-
-int mdc_link(struct obd_export *exp, struct md_op_data *op_data,
- struct ptlrpc_request **request)
-{
- LIST_HEAD(cancels);
- struct obd_device *obd = exp->exp_obd;
- struct ptlrpc_request *req;
- int count = 0, rc;
-
- if ((op_data->op_flags & MF_MDC_CANCEL_FID2) &&
- (fid_is_sane(&op_data->op_fid2)))
- count = mdc_resource_get_unused(exp, &op_data->op_fid2,
- &cancels, LCK_EX,
- MDS_INODELOCK_UPDATE);
- if ((op_data->op_flags & MF_MDC_CANCEL_FID1) &&
- (fid_is_sane(&op_data->op_fid1)))
- count += mdc_resource_get_unused(exp, &op_data->op_fid1,
- &cancels, LCK_EX,
- MDS_INODELOCK_UPDATE);
-
- req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_MDS_REINT_LINK);
- if (req == NULL) {
- ldlm_lock_list_put(&cancels, l_bl_ast, count);
- return -ENOMEM;
- }
- req_capsule_set_size(&req->rq_pill, &RMF_NAME, RCL_CLIENT,
- op_data->op_namelen + 1);
-
- rc = mdc_prep_elc_req(exp, req, MDS_REINT, &cancels, count);
- if (rc) {
- ptlrpc_request_free(req);
- return rc;
- }
-
- mdc_link_pack(req, op_data);
- ptlrpc_request_set_replen(req);
-
- rc = mdc_reint(req, obd->u.cli.cl_rpc_lock, LUSTRE_IMP_FULL);
- *request = req;
- if (rc == -ERESTARTSYS)
- rc = 0;
-
- return rc;
-}
-
-int mdc_rename(struct obd_export *exp, struct md_op_data *op_data,
- const char *old, int oldlen, const char *new, int newlen,
- struct ptlrpc_request **request)
-{
- LIST_HEAD(cancels);
- struct obd_device *obd = exp->exp_obd;
- struct ptlrpc_request *req;
- int count = 0, rc;
-
- if ((op_data->op_flags & MF_MDC_CANCEL_FID1) &&
- (fid_is_sane(&op_data->op_fid1)))
- count = mdc_resource_get_unused(exp, &op_data->op_fid1,
- &cancels, LCK_EX,
- MDS_INODELOCK_UPDATE);
- if ((op_data->op_flags & MF_MDC_CANCEL_FID2) &&
- (fid_is_sane(&op_data->op_fid2)))
- count += mdc_resource_get_unused(exp, &op_data->op_fid2,
- &cancels, LCK_EX,
- MDS_INODELOCK_UPDATE);
- if ((op_data->op_flags & MF_MDC_CANCEL_FID3) &&
- (fid_is_sane(&op_data->op_fid3)))
- count += mdc_resource_get_unused(exp, &op_data->op_fid3,
- &cancels, LCK_EX,
- MDS_INODELOCK_LOOKUP);
- if ((op_data->op_flags & MF_MDC_CANCEL_FID4) &&
- (fid_is_sane(&op_data->op_fid4)))
- count += mdc_resource_get_unused(exp, &op_data->op_fid4,
- &cancels, LCK_EX,
- MDS_INODELOCK_FULL);
-
- req = ptlrpc_request_alloc(class_exp2cliimp(exp),
- &RQF_MDS_REINT_RENAME);
- if (req == NULL) {
- ldlm_lock_list_put(&cancels, l_bl_ast, count);
- return -ENOMEM;
- }
-
- req_capsule_set_size(&req->rq_pill, &RMF_NAME, RCL_CLIENT, oldlen + 1);
- req_capsule_set_size(&req->rq_pill, &RMF_SYMTGT, RCL_CLIENT, newlen+1);
-
- rc = mdc_prep_elc_req(exp, req, MDS_REINT, &cancels, count);
- if (rc) {
- ptlrpc_request_free(req);
- return rc;
- }
-
- if (exp_connect_cancelset(exp) && req)
- ldlm_cli_cancel_list(&cancels, count, req, 0);
-
- mdc_rename_pack(req, op_data, old, oldlen, new, newlen);
-
- req_capsule_set_size(&req->rq_pill, &RMF_MDT_MD, RCL_SERVER,
- obd->u.cli.cl_default_mds_easize);
- req_capsule_set_size(&req->rq_pill, &RMF_LOGCOOKIES, RCL_SERVER,
- obd->u.cli.cl_default_mds_cookiesize);
- ptlrpc_request_set_replen(req);
-
- rc = mdc_reint(req, obd->u.cli.cl_rpc_lock, LUSTRE_IMP_FULL);
- *request = req;
- if (rc == -ERESTARTSYS)
- rc = 0;
-
- return rc;
-}
diff --git a/drivers/staging/lustre/lustre/mdc/mdc_request.c b/drivers/staging/lustre/lustre/mdc/mdc_request.c
deleted file mode 100644
index 87261766a018..000000000000
--- a/drivers/staging/lustre/lustre/mdc/mdc_request.c
+++ /dev/null
@@ -1,2561 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- */
-
-#define DEBUG_SUBSYSTEM S_MDC
-
-# include <linux/module.h>
-# include <linux/pagemap.h>
-# include <linux/miscdevice.h>
-# include <linux/init.h>
-# include <linux/utsname.h>
-
-#include "../include/lustre_acl.h"
-#include "../include/obd_class.h"
-#include "../include/lustre_fid.h"
-#include "../include/lprocfs_status.h"
-#include "../include/lustre_param.h"
-#include "../include/lustre_log.h"
-
-#include "mdc_internal.h"
-
-#define REQUEST_MINOR 244
-
-static int mdc_cleanup(struct obd_device *obd);
-
-static inline int mdc_queue_wait(struct ptlrpc_request *req)
-{
- struct client_obd *cli = &req->rq_import->imp_obd->u.cli;
- int rc;
-
- /* mdc_enter_request() ensures that this client has no more
- * than cl_max_rpcs_in_flight RPCs simultaneously inf light
- * against an MDT. */
- rc = mdc_enter_request(cli);
- if (rc != 0)
- return rc;
-
- rc = ptlrpc_queue_wait(req);
- mdc_exit_request(cli);
-
- return rc;
-}
-
-static int mdc_getstatus(struct obd_export *exp, struct lu_fid *rootfid)
-{
- struct ptlrpc_request *req;
- struct mdt_body *body;
- int rc;
-
- req = ptlrpc_request_alloc_pack(class_exp2cliimp(exp),
- &RQF_MDS_GETSTATUS,
- LUSTRE_MDS_VERSION, MDS_GETSTATUS);
- if (req == NULL)
- return -ENOMEM;
-
- mdc_pack_body(req, NULL, 0, 0, -1, 0);
- req->rq_send_state = LUSTRE_IMP_FULL;
-
- ptlrpc_request_set_replen(req);
-
- rc = ptlrpc_queue_wait(req);
- if (rc)
- goto out;
-
- body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
- if (body == NULL) {
- rc = -EPROTO;
- goto out;
- }
-
- *rootfid = body->fid1;
- CDEBUG(D_NET,
- "root fid="DFID", last_committed=%llu\n",
- PFID(rootfid),
- lustre_msg_get_last_committed(req->rq_repmsg));
-out:
- ptlrpc_req_finished(req);
- return rc;
-}
-
-/*
- * This function now is known to always saying that it will receive 4 buffers
- * from server. Even for cases when acl_size and md_size is zero, RPC header
- * will contain 4 fields and RPC itself will contain zero size fields. This is
- * because mdt_getattr*() _always_ returns 4 fields, but if acl is not needed
- * and thus zero, it shrinks it, making zero size. The same story about
- * md_size. And this is course of problem when client waits for smaller number
- * of fields. This issue will be fixed later when client gets aware of RPC
- * layouts. --umka
- */
-static int mdc_getattr_common(struct obd_export *exp,
- struct ptlrpc_request *req)
-{
- struct req_capsule *pill = &req->rq_pill;
- struct mdt_body *body;
- void *eadata;
- int rc;
-
- /* Request message already built. */
- rc = ptlrpc_queue_wait(req);
- if (rc != 0)
- return rc;
-
- /* sanity check for the reply */
- body = req_capsule_server_get(pill, &RMF_MDT_BODY);
- if (body == NULL)
- return -EPROTO;
-
- CDEBUG(D_NET, "mode: %o\n", body->mode);
-
- if (body->eadatasize != 0) {
- mdc_update_max_ea_from_body(exp, body);
-
- eadata = req_capsule_server_sized_get(pill, &RMF_MDT_MD,
- body->eadatasize);
- if (eadata == NULL)
- return -EPROTO;
- }
-
- if (body->valid & OBD_MD_FLRMTPERM) {
- struct mdt_remote_perm *perm;
-
- LASSERT(client_is_remote(exp));
- perm = req_capsule_server_swab_get(pill, &RMF_ACL,
- lustre_swab_mdt_remote_perm);
- if (perm == NULL)
- return -EPROTO;
- }
-
- return 0;
-}
-
-static int mdc_getattr(struct obd_export *exp, struct md_op_data *op_data,
- struct ptlrpc_request **request)
-{
- struct ptlrpc_request *req;
- int rc;
-
- /* Single MDS without an LMV case */
- if (op_data->op_flags & MF_GET_MDT_IDX) {
- op_data->op_mds = 0;
- return 0;
- }
- *request = NULL;
- req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_MDS_GETATTR);
- if (req == NULL)
- return -ENOMEM;
-
- rc = ptlrpc_request_pack(req, LUSTRE_MDS_VERSION, MDS_GETATTR);
- if (rc) {
- ptlrpc_request_free(req);
- return rc;
- }
-
- mdc_pack_body(req, &op_data->op_fid1, op_data->op_valid,
- op_data->op_mode, -1, 0);
-
- req_capsule_set_size(&req->rq_pill, &RMF_MDT_MD, RCL_SERVER,
- op_data->op_mode);
- if (op_data->op_valid & OBD_MD_FLRMTPERM) {
- LASSERT(client_is_remote(exp));
- req_capsule_set_size(&req->rq_pill, &RMF_ACL, RCL_SERVER,
- sizeof(struct mdt_remote_perm));
- }
- ptlrpc_request_set_replen(req);
-
- rc = mdc_getattr_common(exp, req);
- if (rc)
- ptlrpc_req_finished(req);
- else
- *request = req;
- return rc;
-}
-
-static int mdc_getattr_name(struct obd_export *exp, struct md_op_data *op_data,
- struct ptlrpc_request **request)
-{
- struct ptlrpc_request *req;
- int rc;
-
- *request = NULL;
- req = ptlrpc_request_alloc(class_exp2cliimp(exp),
- &RQF_MDS_GETATTR_NAME);
- if (req == NULL)
- return -ENOMEM;
-
- req_capsule_set_size(&req->rq_pill, &RMF_NAME, RCL_CLIENT,
- op_data->op_namelen + 1);
-
- rc = ptlrpc_request_pack(req, LUSTRE_MDS_VERSION, MDS_GETATTR_NAME);
- if (rc) {
- ptlrpc_request_free(req);
- return rc;
- }
-
- mdc_pack_body(req, &op_data->op_fid1, op_data->op_valid,
- op_data->op_mode, op_data->op_suppgids[0], 0);
-
- if (op_data->op_name) {
- char *name = req_capsule_client_get(&req->rq_pill, &RMF_NAME);
-
- LASSERT(strnlen(op_data->op_name, op_data->op_namelen) ==
- op_data->op_namelen);
- memcpy(name, op_data->op_name, op_data->op_namelen);
- }
-
- req_capsule_set_size(&req->rq_pill, &RMF_MDT_MD, RCL_SERVER,
- op_data->op_mode);
- ptlrpc_request_set_replen(req);
-
- rc = mdc_getattr_common(exp, req);
- if (rc)
- ptlrpc_req_finished(req);
- else
- *request = req;
- return rc;
-}
-
-static int mdc_is_subdir(struct obd_export *exp,
- const struct lu_fid *pfid,
- const struct lu_fid *cfid,
- struct ptlrpc_request **request)
-{
- struct ptlrpc_request *req;
- int rc;
-
- *request = NULL;
- req = ptlrpc_request_alloc_pack(class_exp2cliimp(exp),
- &RQF_MDS_IS_SUBDIR, LUSTRE_MDS_VERSION,
- MDS_IS_SUBDIR);
- if (req == NULL)
- return -ENOMEM;
-
- mdc_is_subdir_pack(req, pfid, cfid, 0);
- ptlrpc_request_set_replen(req);
-
- rc = ptlrpc_queue_wait(req);
- if (rc && rc != -EREMOTE)
- ptlrpc_req_finished(req);
- else
- *request = req;
- return rc;
-}
-
-static int mdc_xattr_common(struct obd_export *exp,
- const struct req_format *fmt,
- const struct lu_fid *fid,
- int opcode, u64 valid,
- const char *xattr_name, const char *input,
- int input_size, int output_size, int flags,
- __u32 suppgid, struct ptlrpc_request **request)
-{
- struct ptlrpc_request *req;
- int xattr_namelen = 0;
- char *tmp;
- int rc;
-
- *request = NULL;
- req = ptlrpc_request_alloc(class_exp2cliimp(exp), fmt);
- if (req == NULL)
- return -ENOMEM;
-
- if (xattr_name) {
- xattr_namelen = strlen(xattr_name) + 1;
- req_capsule_set_size(&req->rq_pill, &RMF_NAME, RCL_CLIENT,
- xattr_namelen);
- }
- if (input_size) {
- LASSERT(input);
- req_capsule_set_size(&req->rq_pill, &RMF_EADATA, RCL_CLIENT,
- input_size);
- }
-
- /* Flush local XATTR locks to get rid of a possible cancel RPC */
- if (opcode == MDS_REINT && fid_is_sane(fid) &&
- exp->exp_connect_data.ocd_ibits_known & MDS_INODELOCK_XATTR) {
- LIST_HEAD(cancels);
- int count;
-
- /* Without that packing would fail */
- if (input_size == 0)
- req_capsule_set_size(&req->rq_pill, &RMF_EADATA,
- RCL_CLIENT, 0);
-
- count = mdc_resource_get_unused(exp, fid,
- &cancels, LCK_EX,
- MDS_INODELOCK_XATTR);
-
- rc = mdc_prep_elc_req(exp, req, MDS_REINT, &cancels, count);
- if (rc) {
- ptlrpc_request_free(req);
- return rc;
- }
- } else {
- rc = ptlrpc_request_pack(req, LUSTRE_MDS_VERSION, opcode);
- if (rc) {
- ptlrpc_request_free(req);
- return rc;
- }
- }
-
- if (opcode == MDS_REINT) {
- struct mdt_rec_setxattr *rec;
-
- CLASSERT(sizeof(struct mdt_rec_setxattr) ==
- sizeof(struct mdt_rec_reint));
- rec = req_capsule_client_get(&req->rq_pill, &RMF_REC_REINT);
- rec->sx_opcode = REINT_SETXATTR;
- rec->sx_fsuid = from_kuid(&init_user_ns, current_fsuid());
- rec->sx_fsgid = from_kgid(&init_user_ns, current_fsgid());
- rec->sx_cap = cfs_curproc_cap_pack();
- rec->sx_suppgid1 = suppgid;
- rec->sx_suppgid2 = -1;
- rec->sx_fid = *fid;
- rec->sx_valid = valid | OBD_MD_FLCTIME;
- rec->sx_time = ktime_get_real_seconds();
- rec->sx_size = output_size;
- rec->sx_flags = flags;
-
- } else {
- mdc_pack_body(req, fid, valid, output_size, suppgid, flags);
- }
-
- if (xattr_name) {
- tmp = req_capsule_client_get(&req->rq_pill, &RMF_NAME);
- memcpy(tmp, xattr_name, xattr_namelen);
- }
- if (input_size) {
- tmp = req_capsule_client_get(&req->rq_pill, &RMF_EADATA);
- memcpy(tmp, input, input_size);
- }
-
- if (req_capsule_has_field(&req->rq_pill, &RMF_EADATA, RCL_SERVER))
- req_capsule_set_size(&req->rq_pill, &RMF_EADATA,
- RCL_SERVER, output_size);
- ptlrpc_request_set_replen(req);
-
- /* make rpc */
- if (opcode == MDS_REINT)
- mdc_get_rpc_lock(exp->exp_obd->u.cli.cl_rpc_lock, NULL);
-
- rc = ptlrpc_queue_wait(req);
-
- if (opcode == MDS_REINT)
- mdc_put_rpc_lock(exp->exp_obd->u.cli.cl_rpc_lock, NULL);
-
- if (rc)
- ptlrpc_req_finished(req);
- else
- *request = req;
- return rc;
-}
-
-static int mdc_setxattr(struct obd_export *exp, const struct lu_fid *fid,
- u64 valid, const char *xattr_name,
- const char *input, int input_size, int output_size,
- int flags, __u32 suppgid,
- struct ptlrpc_request **request)
-{
- return mdc_xattr_common(exp, &RQF_MDS_REINT_SETXATTR,
- fid, MDS_REINT, valid, xattr_name,
- input, input_size, output_size, flags,
- suppgid, request);
-}
-
-static int mdc_getxattr(struct obd_export *exp, const struct lu_fid *fid,
- u64 valid, const char *xattr_name,
- const char *input, int input_size, int output_size,
- int flags, struct ptlrpc_request **request)
-{
- return mdc_xattr_common(exp, &RQF_MDS_GETXATTR,
- fid, MDS_GETXATTR, valid, xattr_name,
- input, input_size, output_size, flags,
- -1, request);
-}
-
-#ifdef CONFIG_FS_POSIX_ACL
-static int mdc_unpack_acl(struct ptlrpc_request *req, struct lustre_md *md)
-{
- struct req_capsule *pill = &req->rq_pill;
- struct mdt_body *body = md->body;
- struct posix_acl *acl;
- void *buf;
- int rc;
-
- if (!body->aclsize)
- return 0;
-
- buf = req_capsule_server_sized_get(pill, &RMF_ACL, body->aclsize);
-
- if (!buf)
- return -EPROTO;
-
- acl = posix_acl_from_xattr(&init_user_ns, buf, body->aclsize);
- if (acl == NULL)
- return 0;
-
- if (IS_ERR(acl)) {
- rc = PTR_ERR(acl);
- CERROR("convert xattr to acl: %d\n", rc);
- return rc;
- }
-
- rc = posix_acl_valid(acl);
- if (rc) {
- CERROR("validate acl: %d\n", rc);
- posix_acl_release(acl);
- return rc;
- }
-
- md->posix_acl = acl;
- return 0;
-}
-#else
-#define mdc_unpack_acl(req, md) 0
-#endif
-
-int mdc_get_lustre_md(struct obd_export *exp, struct ptlrpc_request *req,
- struct obd_export *dt_exp, struct obd_export *md_exp,
- struct lustre_md *md)
-{
- struct req_capsule *pill = &req->rq_pill;
- int rc;
-
- LASSERT(md);
- memset(md, 0, sizeof(*md));
-
- md->body = req_capsule_server_get(pill, &RMF_MDT_BODY);
- LASSERT(md->body != NULL);
-
- if (md->body->valid & OBD_MD_FLEASIZE) {
- int lmmsize;
- struct lov_mds_md *lmm;
-
- if (!S_ISREG(md->body->mode)) {
- CDEBUG(D_INFO,
- "OBD_MD_FLEASIZE set, should be a regular file, but is not\n");
- rc = -EPROTO;
- goto out;
- }
-
- if (md->body->eadatasize == 0) {
- CDEBUG(D_INFO,
- "OBD_MD_FLEASIZE set, but eadatasize 0\n");
- rc = -EPROTO;
- goto out;
- }
- lmmsize = md->body->eadatasize;
- lmm = req_capsule_server_sized_get(pill, &RMF_MDT_MD, lmmsize);
- if (!lmm) {
- rc = -EPROTO;
- goto out;
- }
-
- rc = obd_unpackmd(dt_exp, &md->lsm, lmm, lmmsize);
- if (rc < 0)
- goto out;
-
- if (rc < sizeof(*md->lsm)) {
- CDEBUG(D_INFO,
- "lsm size too small: rc < sizeof (*md->lsm) (%d < %d)\n",
- rc, (int)sizeof(*md->lsm));
- rc = -EPROTO;
- goto out;
- }
-
- } else if (md->body->valid & OBD_MD_FLDIREA) {
- int lmvsize;
- struct lov_mds_md *lmv;
-
- if (!S_ISDIR(md->body->mode)) {
- CDEBUG(D_INFO,
- "OBD_MD_FLDIREA set, should be a directory, but is not\n");
- rc = -EPROTO;
- goto out;
- }
-
- if (md->body->eadatasize == 0) {
- CDEBUG(D_INFO,
- "OBD_MD_FLDIREA is set, but eadatasize 0\n");
- return -EPROTO;
- }
- if (md->body->valid & OBD_MD_MEA) {
- lmvsize = md->body->eadatasize;
- lmv = req_capsule_server_sized_get(pill, &RMF_MDT_MD,
- lmvsize);
- if (!lmv) {
- rc = -EPROTO;
- goto out;
- }
-
- rc = obd_unpackmd(md_exp, (void *)&md->mea, lmv,
- lmvsize);
- if (rc < 0)
- goto out;
-
- if (rc < sizeof(*md->mea)) {
- CDEBUG(D_INFO,
- "size too small: rc < sizeof(*md->mea) (%d < %d)\n",
- rc, (int)sizeof(*md->mea));
- rc = -EPROTO;
- goto out;
- }
- }
- }
- rc = 0;
-
- if (md->body->valid & OBD_MD_FLRMTPERM) {
- /* remote permission */
- LASSERT(client_is_remote(exp));
- md->remote_perm = req_capsule_server_swab_get(pill, &RMF_ACL,
- lustre_swab_mdt_remote_perm);
- if (!md->remote_perm) {
- rc = -EPROTO;
- goto out;
- }
- } else if (md->body->valid & OBD_MD_FLACL) {
- /* for ACL, it's possible that FLACL is set but aclsize is zero.
- * only when aclsize != 0 there's an actual segment for ACL
- * in reply buffer.
- */
- if (md->body->aclsize) {
- rc = mdc_unpack_acl(req, md);
- if (rc)
- goto out;
-#ifdef CONFIG_FS_POSIX_ACL
- } else {
- md->posix_acl = NULL;
-#endif
- }
- }
-
-out:
- if (rc) {
-#ifdef CONFIG_FS_POSIX_ACL
- posix_acl_release(md->posix_acl);
-#endif
- if (md->lsm)
- obd_free_memmd(dt_exp, &md->lsm);
- }
- return rc;
-}
-
-int mdc_free_lustre_md(struct obd_export *exp, struct lustre_md *md)
-{
- return 0;
-}
-
-/**
- * Handles both OPEN and SETATTR RPCs for OPEN-CLOSE and SETATTR-DONE_WRITING
- * RPC chains.
- */
-void mdc_replay_open(struct ptlrpc_request *req)
-{
- struct md_open_data *mod = req->rq_cb_data;
- struct ptlrpc_request *close_req;
- struct obd_client_handle *och;
- struct lustre_handle old;
- struct mdt_body *body;
-
- if (mod == NULL) {
- DEBUG_REQ(D_ERROR, req,
- "Can't properly replay without open data.");
- return;
- }
-
- body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
- LASSERT(body != NULL);
-
- och = mod->mod_och;
- if (och != NULL) {
- struct lustre_handle *file_fh;
-
- LASSERT(och->och_magic == OBD_CLIENT_HANDLE_MAGIC);
-
- file_fh = &och->och_fh;
- CDEBUG(D_HA, "updating handle from %#llx to %#llx\n",
- file_fh->cookie, body->handle.cookie);
- old = *file_fh;
- *file_fh = body->handle;
- }
- close_req = mod->mod_close_req;
- if (close_req != NULL) {
- __u32 opc = lustre_msg_get_opc(close_req->rq_reqmsg);
- struct mdt_ioepoch *epoch;
-
- LASSERT(opc == MDS_CLOSE || opc == MDS_DONE_WRITING);
- epoch = req_capsule_client_get(&close_req->rq_pill,
- &RMF_MDT_EPOCH);
- LASSERT(epoch);
-
- if (och != NULL)
- LASSERT(!memcmp(&old, &epoch->handle, sizeof(old)));
- DEBUG_REQ(D_HA, close_req, "updating close body with new fh");
- epoch->handle = body->handle;
- }
-}
-
-void mdc_commit_open(struct ptlrpc_request *req)
-{
- struct md_open_data *mod = req->rq_cb_data;
-
- if (mod == NULL)
- return;
-
- /**
- * No need to touch md_open_data::mod_och, it holds a reference on
- * \var mod and will zero references to each other, \var mod will be
- * freed after that when md_open_data::mod_och will put the reference.
- */
-
- /**
- * Do not let open request to disappear as it still may be needed
- * for close rpc to happen (it may happen on evict only, otherwise
- * ptlrpc_request::rq_replay does not let mdc_commit_open() to be
- * called), just mark this rpc as committed to distinguish these 2
- * cases, see mdc_close() for details. The open request reference will
- * be put along with freeing \var mod.
- */
- ptlrpc_request_addref(req);
- spin_lock(&req->rq_lock);
- req->rq_committed = 1;
- spin_unlock(&req->rq_lock);
- req->rq_cb_data = NULL;
- obd_mod_put(mod);
-}
-
-int mdc_set_open_replay_data(struct obd_export *exp,
- struct obd_client_handle *och,
- struct lookup_intent *it)
-{
- struct md_open_data *mod;
- struct mdt_rec_create *rec;
- struct mdt_body *body;
- struct ptlrpc_request *open_req = it->d.lustre.it_data;
- struct obd_import *imp = open_req->rq_import;
-
- if (!open_req->rq_replay)
- return 0;
-
- rec = req_capsule_client_get(&open_req->rq_pill, &RMF_REC_REINT);
- body = req_capsule_server_get(&open_req->rq_pill, &RMF_MDT_BODY);
- LASSERT(rec != NULL);
- /* Incoming message in my byte order (it's been swabbed). */
- /* Outgoing messages always in my byte order. */
- LASSERT(body != NULL);
-
- /* Only if the import is replayable, we set replay_open data */
- if (och && imp->imp_replayable) {
- mod = obd_mod_alloc();
- if (mod == NULL) {
- DEBUG_REQ(D_ERROR, open_req,
- "Can't allocate md_open_data");
- return 0;
- }
-
- /**
- * Take a reference on \var mod, to be freed on mdc_close().
- * It protects \var mod from being freed on eviction (commit
- * callback is called despite rq_replay flag).
- * Another reference for \var och.
- */
- obd_mod_get(mod);
- obd_mod_get(mod);
-
- spin_lock(&open_req->rq_lock);
- och->och_mod = mod;
- mod->mod_och = och;
- mod->mod_is_create = it_disposition(it, DISP_OPEN_CREATE) ||
- it_disposition(it, DISP_OPEN_STRIPE);
- mod->mod_open_req = open_req;
- open_req->rq_cb_data = mod;
- open_req->rq_commit_cb = mdc_commit_open;
- spin_unlock(&open_req->rq_lock);
- }
-
- rec->cr_fid2 = body->fid1;
- rec->cr_ioepoch = body->ioepoch;
- rec->cr_old_handle.cookie = body->handle.cookie;
- open_req->rq_replay_cb = mdc_replay_open;
- if (!fid_is_sane(&body->fid1)) {
- DEBUG_REQ(D_ERROR, open_req,
- "Saving replay request with insane fid");
- LBUG();
- }
-
- DEBUG_REQ(D_RPCTRACE, open_req, "Set up open replay data");
- return 0;
-}
-
-static void mdc_free_open(struct md_open_data *mod)
-{
- int committed = 0;
-
- if (mod->mod_is_create == 0 &&
- imp_connect_disp_stripe(mod->mod_open_req->rq_import))
- committed = 1;
-
- LASSERT(mod->mod_open_req->rq_replay == 0);
-
- DEBUG_REQ(D_RPCTRACE, mod->mod_open_req, "free open request\n");
-
- ptlrpc_request_committed(mod->mod_open_req, committed);
- if (mod->mod_close_req)
- ptlrpc_request_committed(mod->mod_close_req, committed);
-}
-
-int mdc_clear_open_replay_data(struct obd_export *exp,
- struct obd_client_handle *och)
-{
- struct md_open_data *mod = och->och_mod;
-
- /**
- * It is possible to not have \var mod in a case of eviction between
- * lookup and ll_file_open().
- **/
- if (mod == NULL)
- return 0;
-
- LASSERT(mod != LP_POISON);
- LASSERT(mod->mod_open_req != NULL);
- mdc_free_open(mod);
-
- mod->mod_och = NULL;
- och->och_mod = NULL;
- obd_mod_put(mod);
-
- return 0;
-}
-
-/* Prepares the request for the replay by the given reply */
-static void mdc_close_handle_reply(struct ptlrpc_request *req,
- struct md_op_data *op_data, int rc) {
- struct mdt_body *repbody;
- struct mdt_ioepoch *epoch;
-
- if (req && rc == -EAGAIN) {
- repbody = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
- epoch = req_capsule_client_get(&req->rq_pill, &RMF_MDT_EPOCH);
-
- epoch->flags |= MF_SOM_AU;
- if (repbody->valid & OBD_MD_FLGETATTRLOCK)
- op_data->op_flags |= MF_GETATTR_LOCK;
- }
-}
-
-static int mdc_close(struct obd_export *exp, struct md_op_data *op_data,
- struct md_open_data *mod, struct ptlrpc_request **request)
-{
- struct obd_device *obd = class_exp2obd(exp);
- struct ptlrpc_request *req;
- struct req_format *req_fmt;
- int rc;
- int saved_rc = 0;
-
-
- req_fmt = &RQF_MDS_CLOSE;
- if (op_data->op_bias & MDS_HSM_RELEASE) {
- req_fmt = &RQF_MDS_RELEASE_CLOSE;
-
- /* allocate a FID for volatile file */
- rc = mdc_fid_alloc(exp, &op_data->op_fid2, op_data);
- if (rc < 0) {
- CERROR("%s: "DFID" failed to allocate FID: %d\n",
- obd->obd_name, PFID(&op_data->op_fid1), rc);
- /* save the errcode and proceed to close */
- saved_rc = rc;
- }
- }
-
- *request = NULL;
- req = ptlrpc_request_alloc(class_exp2cliimp(exp), req_fmt);
- if (req == NULL)
- return -ENOMEM;
-
- rc = ptlrpc_request_pack(req, LUSTRE_MDS_VERSION, MDS_CLOSE);
- if (rc) {
- ptlrpc_request_free(req);
- return rc;
- }
-
- /* To avoid a livelock (bug 7034), we need to send CLOSE RPCs to a
- * portal whose threads are not taking any DLM locks and are therefore
- * always progressing */
- req->rq_request_portal = MDS_READPAGE_PORTAL;
- ptlrpc_at_set_req_timeout(req);
-
- /* Ensure that this close's handle is fixed up during replay. */
- if (likely(mod != NULL)) {
- LASSERTF(mod->mod_open_req != NULL &&
- mod->mod_open_req->rq_type != LI_POISON,
- "POISONED open %p!\n", mod->mod_open_req);
-
- mod->mod_close_req = req;
-
- DEBUG_REQ(D_HA, mod->mod_open_req, "matched open");
- /* We no longer want to preserve this open for replay even
- * though the open was committed. b=3632, b=3633 */
- spin_lock(&mod->mod_open_req->rq_lock);
- mod->mod_open_req->rq_replay = 0;
- spin_unlock(&mod->mod_open_req->rq_lock);
- } else {
- CDEBUG(D_HA,
- "couldn't find open req; expecting close error\n");
- }
-
- mdc_close_pack(req, op_data);
-
- req_capsule_set_size(&req->rq_pill, &RMF_MDT_MD, RCL_SERVER,
- obd->u.cli.cl_default_mds_easize);
- req_capsule_set_size(&req->rq_pill, &RMF_LOGCOOKIES, RCL_SERVER,
- obd->u.cli.cl_default_mds_cookiesize);
-
- ptlrpc_request_set_replen(req);
-
- mdc_get_rpc_lock(obd->u.cli.cl_close_lock, NULL);
- rc = ptlrpc_queue_wait(req);
- mdc_put_rpc_lock(obd->u.cli.cl_close_lock, NULL);
-
- if (req->rq_repmsg == NULL) {
- CDEBUG(D_RPCTRACE, "request failed to send: %p, %d\n", req,
- req->rq_status);
- if (rc == 0)
- rc = req->rq_status ?: -EIO;
- } else if (rc == 0 || rc == -EAGAIN) {
- struct mdt_body *body;
-
- rc = lustre_msg_get_status(req->rq_repmsg);
- if (lustre_msg_get_type(req->rq_repmsg) == PTL_RPC_MSG_ERR) {
- DEBUG_REQ(D_ERROR, req,
- "type == PTL_RPC_MSG_ERR, err = %d", rc);
- if (rc > 0)
- rc = -rc;
- }
- body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
- if (body == NULL)
- rc = -EPROTO;
- } else if (rc == -ESTALE) {
- /**
- * it can be allowed error after 3633 if open was committed and
- * server failed before close was sent. Let's check if mod
- * exists and return no error in that case
- */
- if (mod) {
- DEBUG_REQ(D_HA, req, "Reset ESTALE = %d", rc);
- LASSERT(mod->mod_open_req != NULL);
- if (mod->mod_open_req->rq_committed)
- rc = 0;
- }
- }
-
- if (mod) {
- if (rc != 0)
- mod->mod_close_req = NULL;
- /* Since now, mod is accessed through open_req only,
- * thus close req does not keep a reference on mod anymore. */
- obd_mod_put(mod);
- }
- *request = req;
- mdc_close_handle_reply(req, op_data, rc);
- return rc < 0 ? rc : saved_rc;
-}
-
-static int mdc_done_writing(struct obd_export *exp, struct md_op_data *op_data,
- struct md_open_data *mod)
-{
- struct obd_device *obd = class_exp2obd(exp);
- struct ptlrpc_request *req;
- int rc;
-
- req = ptlrpc_request_alloc(class_exp2cliimp(exp),
- &RQF_MDS_DONE_WRITING);
- if (req == NULL)
- return -ENOMEM;
-
- rc = ptlrpc_request_pack(req, LUSTRE_MDS_VERSION, MDS_DONE_WRITING);
- if (rc) {
- ptlrpc_request_free(req);
- return rc;
- }
-
- if (mod != NULL) {
- LASSERTF(mod->mod_open_req != NULL &&
- mod->mod_open_req->rq_type != LI_POISON,
- "POISONED setattr %p!\n", mod->mod_open_req);
-
- mod->mod_close_req = req;
- DEBUG_REQ(D_HA, mod->mod_open_req, "matched setattr");
- /* We no longer want to preserve this setattr for replay even
- * though the open was committed. b=3632, b=3633 */
- spin_lock(&mod->mod_open_req->rq_lock);
- mod->mod_open_req->rq_replay = 0;
- spin_unlock(&mod->mod_open_req->rq_lock);
- }
-
- mdc_close_pack(req, op_data);
- ptlrpc_request_set_replen(req);
-
- mdc_get_rpc_lock(obd->u.cli.cl_close_lock, NULL);
- rc = ptlrpc_queue_wait(req);
- mdc_put_rpc_lock(obd->u.cli.cl_close_lock, NULL);
-
- if (rc == -ESTALE) {
- /**
- * it can be allowed error after 3633 if open or setattr were
- * committed and server failed before close was sent.
- * Let's check if mod exists and return no error in that case
- */
- if (mod) {
- LASSERT(mod->mod_open_req != NULL);
- if (mod->mod_open_req->rq_committed)
- rc = 0;
- }
- }
-
- if (mod) {
- if (rc != 0)
- mod->mod_close_req = NULL;
- LASSERT(mod->mod_open_req != NULL);
- mdc_free_open(mod);
-
- /* Since now, mod is accessed through setattr req only,
- * thus DW req does not keep a reference on mod anymore. */
- obd_mod_put(mod);
- }
-
- mdc_close_handle_reply(req, op_data, rc);
- ptlrpc_req_finished(req);
- return rc;
-}
-
-
-static int mdc_readpage(struct obd_export *exp, struct md_op_data *op_data,
- struct page **pages, struct ptlrpc_request **request)
-{
- struct ptlrpc_request *req;
- struct ptlrpc_bulk_desc *desc;
- int i;
- wait_queue_head_t waitq;
- int resends = 0;
- struct l_wait_info lwi;
- int rc;
-
- *request = NULL;
- init_waitqueue_head(&waitq);
-
-restart_bulk:
- req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_MDS_READPAGE);
- if (req == NULL)
- return -ENOMEM;
-
- rc = ptlrpc_request_pack(req, LUSTRE_MDS_VERSION, MDS_READPAGE);
- if (rc) {
- ptlrpc_request_free(req);
- return rc;
- }
-
- req->rq_request_portal = MDS_READPAGE_PORTAL;
- ptlrpc_at_set_req_timeout(req);
-
- desc = ptlrpc_prep_bulk_imp(req, op_data->op_npages, 1, BULK_PUT_SINK,
- MDS_BULK_PORTAL);
- if (desc == NULL) {
- ptlrpc_request_free(req);
- return -ENOMEM;
- }
-
- /* NB req now owns desc and will free it when it gets freed */
- for (i = 0; i < op_data->op_npages; i++)
- ptlrpc_prep_bulk_page_pin(desc, pages[i], 0, PAGE_CACHE_SIZE);
-
- mdc_readdir_pack(req, op_data->op_offset,
- PAGE_CACHE_SIZE * op_data->op_npages,
- &op_data->op_fid1);
-
- ptlrpc_request_set_replen(req);
- rc = ptlrpc_queue_wait(req);
- if (rc) {
- ptlrpc_req_finished(req);
- if (rc != -ETIMEDOUT)
- return rc;
-
- resends++;
- if (!client_should_resend(resends, &exp->exp_obd->u.cli)) {
- CERROR("too many resend retries, returning error\n");
- return -EIO;
- }
- lwi = LWI_TIMEOUT_INTR(cfs_time_seconds(resends),
- NULL, NULL, NULL);
- l_wait_event(waitq, 0, &lwi);
-
- goto restart_bulk;
- }
-
- rc = sptlrpc_cli_unwrap_bulk_read(req, req->rq_bulk,
- req->rq_bulk->bd_nob_transferred);
- if (rc < 0) {
- ptlrpc_req_finished(req);
- return rc;
- }
-
- if (req->rq_bulk->bd_nob_transferred & ~LU_PAGE_MASK) {
- CERROR("Unexpected # bytes transferred: %d (%ld expected)\n",
- req->rq_bulk->bd_nob_transferred,
- PAGE_CACHE_SIZE * op_data->op_npages);
- ptlrpc_req_finished(req);
- return -EPROTO;
- }
-
- *request = req;
- return 0;
-}
-
-static int mdc_statfs(const struct lu_env *env,
- struct obd_export *exp, struct obd_statfs *osfs,
- __u64 max_age, __u32 flags)
-{
- struct obd_device *obd = class_exp2obd(exp);
- struct ptlrpc_request *req;
- struct obd_statfs *msfs;
- struct obd_import *imp = NULL;
- int rc;
-
- /*
- * Since the request might also come from lprocfs, so we need
- * sync this with client_disconnect_export Bug15684
- */
- down_read(&obd->u.cli.cl_sem);
- if (obd->u.cli.cl_import)
- imp = class_import_get(obd->u.cli.cl_import);
- up_read(&obd->u.cli.cl_sem);
- if (!imp)
- return -ENODEV;
-
- req = ptlrpc_request_alloc_pack(imp, &RQF_MDS_STATFS,
- LUSTRE_MDS_VERSION, MDS_STATFS);
- if (req == NULL) {
- rc = -ENOMEM;
- goto output;
- }
-
- ptlrpc_request_set_replen(req);
-
- if (flags & OBD_STATFS_NODELAY) {
- /* procfs requests not want stay in wait for avoid deadlock */
- req->rq_no_resend = 1;
- req->rq_no_delay = 1;
- }
-
- rc = ptlrpc_queue_wait(req);
- if (rc) {
- /* check connection error first */
- if (imp->imp_connect_error)
- rc = imp->imp_connect_error;
- goto out;
- }
-
- msfs = req_capsule_server_get(&req->rq_pill, &RMF_OBD_STATFS);
- if (msfs == NULL) {
- rc = -EPROTO;
- goto out;
- }
-
- *osfs = *msfs;
-out:
- ptlrpc_req_finished(req);
-output:
- class_import_put(imp);
- return rc;
-}
-
-static int mdc_ioc_fid2path(struct obd_export *exp, struct getinfo_fid2path *gf)
-{
- __u32 keylen, vallen;
- void *key;
- int rc;
-
- if (gf->gf_pathlen > PATH_MAX)
- return -ENAMETOOLONG;
- if (gf->gf_pathlen < 2)
- return -EOVERFLOW;
-
- /* Key is KEY_FID2PATH + getinfo_fid2path description */
- keylen = cfs_size_round(sizeof(KEY_FID2PATH)) + sizeof(*gf);
- key = kzalloc(keylen, GFP_NOFS);
- if (!key)
- return -ENOMEM;
- memcpy(key, KEY_FID2PATH, sizeof(KEY_FID2PATH));
- memcpy(key + cfs_size_round(sizeof(KEY_FID2PATH)), gf, sizeof(*gf));
-
- CDEBUG(D_IOCTL, "path get "DFID" from %llu #%d\n",
- PFID(&gf->gf_fid), gf->gf_recno, gf->gf_linkno);
-
- if (!fid_is_sane(&gf->gf_fid)) {
- rc = -EINVAL;
- goto out;
- }
-
- /* Val is struct getinfo_fid2path result plus path */
- vallen = sizeof(*gf) + gf->gf_pathlen;
-
- rc = obd_get_info(NULL, exp, keylen, key, &vallen, gf, NULL);
- if (rc != 0 && rc != -EREMOTE)
- goto out;
-
- if (vallen <= sizeof(*gf)) {
- rc = -EPROTO;
- goto out;
- } else if (vallen > sizeof(*gf) + gf->gf_pathlen) {
- rc = -EOVERFLOW;
- goto out;
- }
-
- CDEBUG(D_IOCTL, "path get "DFID" from %llu #%d\n%s\n",
- PFID(&gf->gf_fid), gf->gf_recno, gf->gf_linkno, gf->gf_path);
-
-out:
- kfree(key);
- return rc;
-}
-
-static int mdc_ioc_hsm_progress(struct obd_export *exp,
- struct hsm_progress_kernel *hpk)
-{
- struct obd_import *imp = class_exp2cliimp(exp);
- struct hsm_progress_kernel *req_hpk;
- struct ptlrpc_request *req;
- int rc;
-
- req = ptlrpc_request_alloc_pack(imp, &RQF_MDS_HSM_PROGRESS,
- LUSTRE_MDS_VERSION, MDS_HSM_PROGRESS);
- if (req == NULL) {
- rc = -ENOMEM;
- goto out;
- }
-
- mdc_pack_body(req, NULL, OBD_MD_FLRMTPERM, 0, 0, 0);
-
- /* Copy hsm_progress struct */
- req_hpk = req_capsule_client_get(&req->rq_pill, &RMF_MDS_HSM_PROGRESS);
- if (req_hpk == NULL) {
- rc = -EPROTO;
- goto out;
- }
-
- *req_hpk = *hpk;
- req_hpk->hpk_errval = lustre_errno_hton(hpk->hpk_errval);
-
- ptlrpc_request_set_replen(req);
-
- rc = mdc_queue_wait(req);
- goto out;
-out:
- ptlrpc_req_finished(req);
- return rc;
-}
-
-static int mdc_ioc_hsm_ct_register(struct obd_import *imp, __u32 archives)
-{
- __u32 *archive_mask;
- struct ptlrpc_request *req;
- int rc;
-
- req = ptlrpc_request_alloc_pack(imp, &RQF_MDS_HSM_CT_REGISTER,
- LUSTRE_MDS_VERSION,
- MDS_HSM_CT_REGISTER);
- if (req == NULL) {
- rc = -ENOMEM;
- goto out;
- }
-
- mdc_pack_body(req, NULL, OBD_MD_FLRMTPERM, 0, 0, 0);
-
- /* Copy hsm_progress struct */
- archive_mask = req_capsule_client_get(&req->rq_pill,
- &RMF_MDS_HSM_ARCHIVE);
- if (archive_mask == NULL) {
- rc = -EPROTO;
- goto out;
- }
-
- *archive_mask = archives;
-
- ptlrpc_request_set_replen(req);
-
- rc = mdc_queue_wait(req);
- goto out;
-out:
- ptlrpc_req_finished(req);
- return rc;
-}
-
-static int mdc_ioc_hsm_current_action(struct obd_export *exp,
- struct md_op_data *op_data)
-{
- struct hsm_current_action *hca = op_data->op_data;
- struct hsm_current_action *req_hca;
- struct ptlrpc_request *req;
- int rc;
-
- req = ptlrpc_request_alloc(class_exp2cliimp(exp),
- &RQF_MDS_HSM_ACTION);
- if (req == NULL)
- return -ENOMEM;
-
- rc = ptlrpc_request_pack(req, LUSTRE_MDS_VERSION, MDS_HSM_ACTION);
- if (rc) {
- ptlrpc_request_free(req);
- return rc;
- }
-
- mdc_pack_body(req, &op_data->op_fid1, OBD_MD_FLRMTPERM, 0,
- op_data->op_suppgids[0], 0);
-
- ptlrpc_request_set_replen(req);
-
- rc = mdc_queue_wait(req);
- if (rc)
- goto out;
-
- req_hca = req_capsule_server_get(&req->rq_pill,
- &RMF_MDS_HSM_CURRENT_ACTION);
- if (req_hca == NULL) {
- rc = -EPROTO;
- goto out;
- }
-
- *hca = *req_hca;
-
-out:
- ptlrpc_req_finished(req);
- return rc;
-}
-
-static int mdc_ioc_hsm_ct_unregister(struct obd_import *imp)
-{
- struct ptlrpc_request *req;
- int rc;
-
- req = ptlrpc_request_alloc_pack(imp, &RQF_MDS_HSM_CT_UNREGISTER,
- LUSTRE_MDS_VERSION,
- MDS_HSM_CT_UNREGISTER);
- if (req == NULL) {
- rc = -ENOMEM;
- goto out;
- }
-
- mdc_pack_body(req, NULL, OBD_MD_FLRMTPERM, 0, 0, 0);
-
- ptlrpc_request_set_replen(req);
-
- rc = mdc_queue_wait(req);
- goto out;
-out:
- ptlrpc_req_finished(req);
- return rc;
-}
-
-static int mdc_ioc_hsm_state_get(struct obd_export *exp,
- struct md_op_data *op_data)
-{
- struct hsm_user_state *hus = op_data->op_data;
- struct hsm_user_state *req_hus;
- struct ptlrpc_request *req;
- int rc;
-
- req = ptlrpc_request_alloc(class_exp2cliimp(exp),
- &RQF_MDS_HSM_STATE_GET);
- if (req == NULL)
- return -ENOMEM;
-
- rc = ptlrpc_request_pack(req, LUSTRE_MDS_VERSION, MDS_HSM_STATE_GET);
- if (rc != 0) {
- ptlrpc_request_free(req);
- return rc;
- }
-
- mdc_pack_body(req, &op_data->op_fid1, OBD_MD_FLRMTPERM, 0,
- op_data->op_suppgids[0], 0);
-
- ptlrpc_request_set_replen(req);
-
- rc = mdc_queue_wait(req);
- if (rc)
- goto out;
-
- req_hus = req_capsule_server_get(&req->rq_pill, &RMF_HSM_USER_STATE);
- if (req_hus == NULL) {
- rc = -EPROTO;
- goto out;
- }
-
- *hus = *req_hus;
-
-out:
- ptlrpc_req_finished(req);
- return rc;
-}
-
-static int mdc_ioc_hsm_state_set(struct obd_export *exp,
- struct md_op_data *op_data)
-{
- struct hsm_state_set *hss = op_data->op_data;
- struct hsm_state_set *req_hss;
- struct ptlrpc_request *req;
- int rc;
-
- req = ptlrpc_request_alloc(class_exp2cliimp(exp),
- &RQF_MDS_HSM_STATE_SET);
- if (req == NULL)
- return -ENOMEM;
-
- rc = ptlrpc_request_pack(req, LUSTRE_MDS_VERSION, MDS_HSM_STATE_SET);
- if (rc) {
- ptlrpc_request_free(req);
- return rc;
- }
-
- mdc_pack_body(req, &op_data->op_fid1, OBD_MD_FLRMTPERM, 0,
- op_data->op_suppgids[0], 0);
-
- /* Copy states */
- req_hss = req_capsule_client_get(&req->rq_pill, &RMF_HSM_STATE_SET);
- if (req_hss == NULL) {
- rc = -EPROTO;
- goto out;
- }
- *req_hss = *hss;
-
- ptlrpc_request_set_replen(req);
-
- rc = mdc_queue_wait(req);
- goto out;
-
-out:
- ptlrpc_req_finished(req);
- return rc;
-}
-
-static int mdc_ioc_hsm_request(struct obd_export *exp,
- struct hsm_user_request *hur)
-{
- struct obd_import *imp = class_exp2cliimp(exp);
- struct ptlrpc_request *req;
- struct hsm_request *req_hr;
- struct hsm_user_item *req_hui;
- char *req_opaque;
- int rc;
-
- req = ptlrpc_request_alloc(imp, &RQF_MDS_HSM_REQUEST);
- if (req == NULL) {
- rc = -ENOMEM;
- goto out;
- }
-
- req_capsule_set_size(&req->rq_pill, &RMF_MDS_HSM_USER_ITEM, RCL_CLIENT,
- hur->hur_request.hr_itemcount
- * sizeof(struct hsm_user_item));
- req_capsule_set_size(&req->rq_pill, &RMF_GENERIC_DATA, RCL_CLIENT,
- hur->hur_request.hr_data_len);
-
- rc = ptlrpc_request_pack(req, LUSTRE_MDS_VERSION, MDS_HSM_REQUEST);
- if (rc) {
- ptlrpc_request_free(req);
- return rc;
- }
-
- mdc_pack_body(req, NULL, OBD_MD_FLRMTPERM, 0, 0, 0);
-
- /* Copy hsm_request struct */
- req_hr = req_capsule_client_get(&req->rq_pill, &RMF_MDS_HSM_REQUEST);
- if (req_hr == NULL) {
- rc = -EPROTO;
- goto out;
- }
- *req_hr = hur->hur_request;
-
- /* Copy hsm_user_item structs */
- req_hui = req_capsule_client_get(&req->rq_pill, &RMF_MDS_HSM_USER_ITEM);
- if (req_hui == NULL) {
- rc = -EPROTO;
- goto out;
- }
- memcpy(req_hui, hur->hur_user_item,
- hur->hur_request.hr_itemcount * sizeof(struct hsm_user_item));
-
- /* Copy opaque field */
- req_opaque = req_capsule_client_get(&req->rq_pill, &RMF_GENERIC_DATA);
- if (req_opaque == NULL) {
- rc = -EPROTO;
- goto out;
- }
- memcpy(req_opaque, hur_data(hur), hur->hur_request.hr_data_len);
-
- ptlrpc_request_set_replen(req);
-
- rc = mdc_queue_wait(req);
- goto out;
-
-out:
- ptlrpc_req_finished(req);
- return rc;
-}
-
-static struct kuc_hdr *changelog_kuc_hdr(char *buf, int len, int flags)
-{
- struct kuc_hdr *lh = (struct kuc_hdr *)buf;
-
- LASSERT(len <= KUC_CHANGELOG_MSG_MAXSIZE);
-
- lh->kuc_magic = KUC_MAGIC;
- lh->kuc_transport = KUC_TRANSPORT_CHANGELOG;
- lh->kuc_flags = flags;
- lh->kuc_msgtype = CL_RECORD;
- lh->kuc_msglen = len;
- return lh;
-}
-
-#define D_CHANGELOG 0
-
-struct changelog_show {
- __u64 cs_startrec;
- __u32 cs_flags;
- struct file *cs_fp;
- char *cs_buf;
- struct obd_device *cs_obd;
-};
-
-static int changelog_kkuc_cb(const struct lu_env *env, struct llog_handle *llh,
- struct llog_rec_hdr *hdr, void *data)
-{
- struct changelog_show *cs = data;
- struct llog_changelog_rec *rec = (struct llog_changelog_rec *)hdr;
- struct kuc_hdr *lh;
- int len, rc;
-
- if (rec->cr_hdr.lrh_type != CHANGELOG_REC) {
- rc = -EINVAL;
- CERROR("%s: not a changelog rec %x/%d: rc = %d\n",
- cs->cs_obd->obd_name, rec->cr_hdr.lrh_type,
- rec->cr.cr_type, rc);
- return rc;
- }
-
- if (rec->cr.cr_index < cs->cs_startrec) {
- /* Skip entries earlier than what we are interested in */
- CDEBUG(D_CHANGELOG, "rec=%llu start=%llu\n",
- rec->cr.cr_index, cs->cs_startrec);
- return 0;
- }
-
- CDEBUG(D_CHANGELOG, "%llu %02d%-5s %llu 0x%x t="DFID" p="DFID
- " %.*s\n", rec->cr.cr_index, rec->cr.cr_type,
- changelog_type2str(rec->cr.cr_type), rec->cr.cr_time,
- rec->cr.cr_flags & CLF_FLAGMASK,
- PFID(&rec->cr.cr_tfid), PFID(&rec->cr.cr_pfid),
- rec->cr.cr_namelen, changelog_rec_name(&rec->cr));
-
- len = sizeof(*lh) + changelog_rec_size(&rec->cr) + rec->cr.cr_namelen;
-
- /* Set up the message */
- lh = changelog_kuc_hdr(cs->cs_buf, len, cs->cs_flags);
- memcpy(lh + 1, &rec->cr, len - sizeof(*lh));
-
- rc = libcfs_kkuc_msg_put(cs->cs_fp, lh);
- CDEBUG(D_CHANGELOG, "kucmsg fp %p len %d rc %d\n", cs->cs_fp, len, rc);
-
- return rc;
-}
-
-static int mdc_changelog_send_thread(void *csdata)
-{
- struct changelog_show *cs = csdata;
- struct llog_ctxt *ctxt = NULL;
- struct llog_handle *llh = NULL;
- struct kuc_hdr *kuch;
- int rc;
-
- CDEBUG(D_CHANGELOG, "changelog to fp=%p start %llu\n",
- cs->cs_fp, cs->cs_startrec);
-
- cs->cs_buf = kzalloc(KUC_CHANGELOG_MSG_MAXSIZE, GFP_NOFS);
- if (!cs->cs_buf) {
- rc = -ENOMEM;
- goto out;
- }
-
- /* Set up the remote catalog handle */
- ctxt = llog_get_context(cs->cs_obd, LLOG_CHANGELOG_REPL_CTXT);
- if (ctxt == NULL) {
- rc = -ENOENT;
- goto out;
- }
- rc = llog_open(NULL, ctxt, &llh, NULL, CHANGELOG_CATALOG,
- LLOG_OPEN_EXISTS);
- if (rc) {
- CERROR("%s: fail to open changelog catalog: rc = %d\n",
- cs->cs_obd->obd_name, rc);
- goto out;
- }
- rc = llog_init_handle(NULL, llh, LLOG_F_IS_CAT, NULL);
- if (rc) {
- CERROR("llog_init_handle failed %d\n", rc);
- goto out;
- }
-
- rc = llog_cat_process(NULL, llh, changelog_kkuc_cb, cs, 0, 0);
-
- /* Send EOF no matter what our result */
- kuch = changelog_kuc_hdr(cs->cs_buf, sizeof(*kuch), cs->cs_flags);
- if (kuch) {
- kuch->kuc_msgtype = CL_EOF;
- libcfs_kkuc_msg_put(cs->cs_fp, kuch);
- }
-
-out:
- fput(cs->cs_fp);
- if (llh)
- llog_cat_close(NULL, llh);
- if (ctxt)
- llog_ctxt_put(ctxt);
- kfree(cs->cs_buf);
- kfree(cs);
- return rc;
-}
-
-static int mdc_ioc_changelog_send(struct obd_device *obd,
- struct ioc_changelog *icc)
-{
- struct changelog_show *cs;
- int rc;
-
- /* Freed in mdc_changelog_send_thread */
- cs = kzalloc(sizeof(*cs), GFP_NOFS);
- if (!cs)
- return -ENOMEM;
-
- cs->cs_obd = obd;
- cs->cs_startrec = icc->icc_recno;
- /* matching fput in mdc_changelog_send_thread */
- cs->cs_fp = fget(icc->icc_id);
- cs->cs_flags = icc->icc_flags;
-
- /*
- * New thread because we should return to user app before
- * writing into our pipe
- */
- rc = PTR_ERR(kthread_run(mdc_changelog_send_thread, cs,
- "mdc_clg_send_thread"));
- if (!IS_ERR_VALUE(rc)) {
- CDEBUG(D_CHANGELOG, "start changelog thread\n");
- return 0;
- }
-
- CERROR("Failed to start changelog thread: %d\n", rc);
- kfree(cs);
- return rc;
-}
-
-static int mdc_ioc_hsm_ct_start(struct obd_export *exp,
- struct lustre_kernelcomm *lk);
-
-static int mdc_quotacheck(struct obd_device *unused, struct obd_export *exp,
- struct obd_quotactl *oqctl)
-{
- struct client_obd *cli = &exp->exp_obd->u.cli;
- struct ptlrpc_request *req;
- struct obd_quotactl *body;
- int rc;
-
- req = ptlrpc_request_alloc_pack(class_exp2cliimp(exp),
- &RQF_MDS_QUOTACHECK, LUSTRE_MDS_VERSION,
- MDS_QUOTACHECK);
- if (req == NULL)
- return -ENOMEM;
-
- body = req_capsule_client_get(&req->rq_pill, &RMF_OBD_QUOTACTL);
- *body = *oqctl;
-
- ptlrpc_request_set_replen(req);
-
- /* the next poll will find -ENODATA, that means quotacheck is
- * going on */
- cli->cl_qchk_stat = -ENODATA;
- rc = ptlrpc_queue_wait(req);
- if (rc)
- cli->cl_qchk_stat = rc;
- ptlrpc_req_finished(req);
- return rc;
-}
-
-static int mdc_quota_poll_check(struct obd_export *exp,
- struct if_quotacheck *qchk)
-{
- struct client_obd *cli = &exp->exp_obd->u.cli;
- int rc;
-
- qchk->obd_uuid = cli->cl_target_uuid;
- memcpy(qchk->obd_type, LUSTRE_MDS_NAME, strlen(LUSTRE_MDS_NAME));
-
- rc = cli->cl_qchk_stat;
- /* the client is not the previous one */
- if (rc == CL_NOT_QUOTACHECKED)
- rc = -EINTR;
- return rc;
-}
-
-static int mdc_quotactl(struct obd_device *unused, struct obd_export *exp,
- struct obd_quotactl *oqctl)
-{
- struct ptlrpc_request *req;
- struct obd_quotactl *oqc;
- int rc;
-
- req = ptlrpc_request_alloc_pack(class_exp2cliimp(exp),
- &RQF_MDS_QUOTACTL, LUSTRE_MDS_VERSION,
- MDS_QUOTACTL);
- if (req == NULL)
- return -ENOMEM;
-
- oqc = req_capsule_client_get(&req->rq_pill, &RMF_OBD_QUOTACTL);
- *oqc = *oqctl;
-
- ptlrpc_request_set_replen(req);
- ptlrpc_at_set_req_timeout(req);
- req->rq_no_resend = 1;
-
- rc = ptlrpc_queue_wait(req);
- if (rc)
- CERROR("ptlrpc_queue_wait failed, rc: %d\n", rc);
-
- if (req->rq_repmsg) {
- oqc = req_capsule_server_get(&req->rq_pill, &RMF_OBD_QUOTACTL);
- if (oqc) {
- *oqctl = *oqc;
- } else if (!rc) {
- CERROR("Can't unpack obd_quotactl\n");
- rc = -EPROTO;
- }
- } else if (!rc) {
- CERROR("Can't unpack obd_quotactl\n");
- rc = -EPROTO;
- }
- ptlrpc_req_finished(req);
-
- return rc;
-}
-
-static int mdc_ioc_swap_layouts(struct obd_export *exp,
- struct md_op_data *op_data)
-{
- LIST_HEAD(cancels);
- struct ptlrpc_request *req;
- int rc, count;
- struct mdc_swap_layouts *msl, *payload;
-
- msl = op_data->op_data;
-
- /* When the MDT will get the MDS_SWAP_LAYOUTS RPC the
- * first thing it will do is to cancel the 2 layout
- * locks hold by this client.
- * So the client must cancel its layout locks on the 2 fids
- * with the request RPC to avoid extra RPC round trips
- */
- count = mdc_resource_get_unused(exp, &op_data->op_fid1, &cancels,
- LCK_CR, MDS_INODELOCK_LAYOUT);
- count += mdc_resource_get_unused(exp, &op_data->op_fid2, &cancels,
- LCK_CR, MDS_INODELOCK_LAYOUT);
-
- req = ptlrpc_request_alloc(class_exp2cliimp(exp),
- &RQF_MDS_SWAP_LAYOUTS);
- if (req == NULL) {
- ldlm_lock_list_put(&cancels, l_bl_ast, count);
- return -ENOMEM;
- }
-
- rc = mdc_prep_elc_req(exp, req, MDS_SWAP_LAYOUTS, &cancels, count);
- if (rc) {
- ptlrpc_request_free(req);
- return rc;
- }
-
- mdc_swap_layouts_pack(req, op_data);
-
- payload = req_capsule_client_get(&req->rq_pill, &RMF_SWAP_LAYOUTS);
- LASSERT(payload);
-
- *payload = *msl;
-
- ptlrpc_request_set_replen(req);
-
- rc = ptlrpc_queue_wait(req);
-
- ptlrpc_req_finished(req);
- return rc;
-}
-
-static int mdc_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
- void *karg, void *uarg)
-{
- struct obd_device *obd = exp->exp_obd;
- struct obd_ioctl_data *data = karg;
- struct obd_import *imp = obd->u.cli.cl_import;
- int rc;
-
- if (!try_module_get(THIS_MODULE)) {
- CERROR("Can't get module. Is it alive?");
- return -EINVAL;
- }
- switch (cmd) {
- case OBD_IOC_CHANGELOG_SEND:
- rc = mdc_ioc_changelog_send(obd, karg);
- goto out;
- case OBD_IOC_CHANGELOG_CLEAR: {
- struct ioc_changelog *icc = karg;
- struct changelog_setinfo cs = {
- .cs_recno = icc->icc_recno,
- .cs_id = icc->icc_id
- };
-
- rc = obd_set_info_async(NULL, exp, strlen(KEY_CHANGELOG_CLEAR),
- KEY_CHANGELOG_CLEAR, sizeof(cs), &cs,
- NULL);
- goto out;
- }
- case OBD_IOC_FID2PATH:
- rc = mdc_ioc_fid2path(exp, karg);
- goto out;
- case LL_IOC_HSM_CT_START:
- rc = mdc_ioc_hsm_ct_start(exp, karg);
- /* ignore if it was already registered on this MDS. */
- if (rc == -EEXIST)
- rc = 0;
- goto out;
- case LL_IOC_HSM_PROGRESS:
- rc = mdc_ioc_hsm_progress(exp, karg);
- goto out;
- case LL_IOC_HSM_STATE_GET:
- rc = mdc_ioc_hsm_state_get(exp, karg);
- goto out;
- case LL_IOC_HSM_STATE_SET:
- rc = mdc_ioc_hsm_state_set(exp, karg);
- goto out;
- case LL_IOC_HSM_ACTION:
- rc = mdc_ioc_hsm_current_action(exp, karg);
- goto out;
- case LL_IOC_HSM_REQUEST:
- rc = mdc_ioc_hsm_request(exp, karg);
- goto out;
- case OBD_IOC_CLIENT_RECOVER:
- rc = ptlrpc_recover_import(imp, data->ioc_inlbuf1, 0);
- if (rc < 0)
- goto out;
- rc = 0;
- goto out;
- case IOC_OSC_SET_ACTIVE:
- rc = ptlrpc_set_import_active(imp, data->ioc_offset);
- goto out;
- case OBD_IOC_POLL_QUOTACHECK:
- rc = mdc_quota_poll_check(exp, (struct if_quotacheck *)karg);
- goto out;
- case OBD_IOC_PING_TARGET:
- rc = ptlrpc_obd_ping(obd);
- goto out;
- /*
- * Normally IOC_OBD_STATFS, OBD_IOC_QUOTACTL iocontrol are handled by
- * LMV instead of MDC. But when the cluster is upgraded from 1.8,
- * there'd be no LMV layer thus we might be called here. Eventually
- * this code should be removed.
- * bz20731, LU-592.
- */
- case IOC_OBD_STATFS: {
- struct obd_statfs stat_buf = {0};
-
- if (*((__u32 *) data->ioc_inlbuf2) != 0) {
- rc = -ENODEV;
- goto out;
- }
-
- /* copy UUID */
- if (copy_to_user(data->ioc_pbuf2, obd2cli_tgt(obd),
- min_t(size_t, data->ioc_plen2,
- sizeof(struct obd_uuid)))) {
- rc = -EFAULT;
- goto out;
- }
-
- rc = mdc_statfs(NULL, obd->obd_self_export, &stat_buf,
- cfs_time_shift_64(-OBD_STATFS_CACHE_SECONDS),
- 0);
- if (rc != 0)
- goto out;
-
- if (copy_to_user(data->ioc_pbuf1, &stat_buf,
- min_t(size_t, data->ioc_plen1,
- sizeof(stat_buf)))) {
- rc = -EFAULT;
- goto out;
- }
-
- rc = 0;
- goto out;
- }
- case OBD_IOC_QUOTACTL: {
- struct if_quotactl *qctl = karg;
- struct obd_quotactl *oqctl;
-
- oqctl = kzalloc(sizeof(*oqctl), GFP_NOFS);
- if (!oqctl) {
- rc = -ENOMEM;
- goto out;
- }
-
- QCTL_COPY(oqctl, qctl);
- rc = obd_quotactl(exp, oqctl);
- if (rc == 0) {
- QCTL_COPY(qctl, oqctl);
- qctl->qc_valid = QC_MDTIDX;
- qctl->obd_uuid = obd->u.cli.cl_target_uuid;
- }
-
- kfree(oqctl);
- goto out;
- }
- case LL_IOC_GET_CONNECT_FLAGS:
- if (copy_to_user(uarg, exp_connect_flags_ptr(exp),
- sizeof(*exp_connect_flags_ptr(exp)))) {
- rc = -EFAULT;
- goto out;
- }
-
- rc = 0;
- goto out;
- case LL_IOC_LOV_SWAP_LAYOUTS:
- rc = mdc_ioc_swap_layouts(exp, karg);
- goto out;
- default:
- CERROR("unrecognised ioctl: cmd = %#x\n", cmd);
- rc = -ENOTTY;
- goto out;
- }
-out:
- module_put(THIS_MODULE);
-
- return rc;
-}
-
-static int mdc_get_info_rpc(struct obd_export *exp,
- u32 keylen, void *key,
- int vallen, void *val)
-{
- struct obd_import *imp = class_exp2cliimp(exp);
- struct ptlrpc_request *req;
- char *tmp;
- int rc = -EINVAL;
-
- req = ptlrpc_request_alloc(imp, &RQF_MDS_GET_INFO);
- if (req == NULL)
- return -ENOMEM;
-
- req_capsule_set_size(&req->rq_pill, &RMF_GETINFO_KEY,
- RCL_CLIENT, keylen);
- req_capsule_set_size(&req->rq_pill, &RMF_GETINFO_VALLEN,
- RCL_CLIENT, sizeof(__u32));
-
- rc = ptlrpc_request_pack(req, LUSTRE_MDS_VERSION, MDS_GET_INFO);
- if (rc) {
- ptlrpc_request_free(req);
- return rc;
- }
-
- tmp = req_capsule_client_get(&req->rq_pill, &RMF_GETINFO_KEY);
- memcpy(tmp, key, keylen);
- tmp = req_capsule_client_get(&req->rq_pill, &RMF_GETINFO_VALLEN);
- memcpy(tmp, &vallen, sizeof(__u32));
-
- req_capsule_set_size(&req->rq_pill, &RMF_GETINFO_VAL,
- RCL_SERVER, vallen);
- ptlrpc_request_set_replen(req);
-
- rc = ptlrpc_queue_wait(req);
- /* -EREMOTE means the get_info result is partial, and it needs to
- * continue on another MDT, see fid2path part in lmv_iocontrol */
- if (rc == 0 || rc == -EREMOTE) {
- tmp = req_capsule_server_get(&req->rq_pill, &RMF_GETINFO_VAL);
- memcpy(val, tmp, vallen);
- if (ptlrpc_rep_need_swab(req)) {
- if (KEY_IS(KEY_FID2PATH))
- lustre_swab_fid2path(val);
- }
- }
- ptlrpc_req_finished(req);
-
- return rc;
-}
-
-static void lustre_swab_hai(struct hsm_action_item *h)
-{
- __swab32s(&h->hai_len);
- __swab32s(&h->hai_action);
- lustre_swab_lu_fid(&h->hai_fid);
- lustre_swab_lu_fid(&h->hai_dfid);
- __swab64s(&h->hai_cookie);
- __swab64s(&h->hai_extent.offset);
- __swab64s(&h->hai_extent.length);
- __swab64s(&h->hai_gid);
-}
-
-static void lustre_swab_hal(struct hsm_action_list *h)
-{
- struct hsm_action_item *hai;
- int i;
-
- __swab32s(&h->hal_version);
- __swab32s(&h->hal_count);
- __swab32s(&h->hal_archive_id);
- __swab64s(&h->hal_flags);
- hai = hai_zero(h);
- for (i = 0; i < h->hal_count; i++, hai = hai_next(hai))
- lustre_swab_hai(hai);
-}
-
-static void lustre_swab_kuch(struct kuc_hdr *l)
-{
- __swab16s(&l->kuc_magic);
- /* __u8 l->kuc_transport */
- __swab16s(&l->kuc_msgtype);
- __swab16s(&l->kuc_msglen);
-}
-
-static int mdc_ioc_hsm_ct_start(struct obd_export *exp,
- struct lustre_kernelcomm *lk)
-{
- struct obd_import *imp = class_exp2cliimp(exp);
- __u32 archive = lk->lk_data;
- int rc = 0;
-
- if (lk->lk_group != KUC_GRP_HSM) {
- CERROR("Bad copytool group %d\n", lk->lk_group);
- return -EINVAL;
- }
-
- CDEBUG(D_HSM, "CT start r%d w%d u%d g%d f%#x\n", lk->lk_rfd, lk->lk_wfd,
- lk->lk_uid, lk->lk_group, lk->lk_flags);
-
- if (lk->lk_flags & LK_FLG_STOP) {
- /* Unregister with the coordinator */
- rc = mdc_ioc_hsm_ct_unregister(imp);
- } else {
- rc = mdc_ioc_hsm_ct_register(imp, archive);
- }
-
- return rc;
-}
-
-/**
- * Send a message to any listening copytools
- * @param val KUC message (kuc_hdr + hsm_action_list)
- * @param len total length of message
- */
-static int mdc_hsm_copytool_send(int len, void *val)
-{
- struct kuc_hdr *lh = (struct kuc_hdr *)val;
- struct hsm_action_list *hal = (struct hsm_action_list *)(lh + 1);
-
- if (len < sizeof(*lh) + sizeof(*hal)) {
- CERROR("Short HSM message %d < %d\n", len,
- (int) (sizeof(*lh) + sizeof(*hal)));
- return -EPROTO;
- }
- if (lh->kuc_magic == __swab16(KUC_MAGIC)) {
- lustre_swab_kuch(lh);
- lustre_swab_hal(hal);
- } else if (lh->kuc_magic != KUC_MAGIC) {
- CERROR("Bad magic %x!=%x\n", lh->kuc_magic, KUC_MAGIC);
- return -EPROTO;
- }
-
- CDEBUG(D_HSM,
- "Received message mg=%x t=%d m=%d l=%d actions=%d on %s\n",
- lh->kuc_magic, lh->kuc_transport, lh->kuc_msgtype,
- lh->kuc_msglen, hal->hal_count, hal->hal_fsname);
-
- /* Broadcast to HSM listeners */
- return libcfs_kkuc_group_put(KUC_GRP_HSM, lh);
-}
-
-/**
- * callback function passed to kuc for re-registering each HSM copytool
- * running on MDC, after MDT shutdown/recovery.
- * @param data archive id served by the copytool
- * @param cb_arg callback argument (obd_import)
- */
-static int mdc_hsm_ct_reregister(__u32 data, void *cb_arg)
-{
- struct obd_import *imp = (struct obd_import *)cb_arg;
- __u32 archive = data;
- int rc;
-
- CDEBUG(D_HA, "recover copytool registration to MDT (archive=%#x)\n",
- archive);
- rc = mdc_ioc_hsm_ct_register(imp, archive);
-
- /* ignore error if the copytool is already registered */
- return ((rc != 0) && (rc != -EEXIST)) ? rc : 0;
-}
-
-/**
- * Re-establish all kuc contexts with MDT
- * after MDT shutdown/recovery.
- */
-static int mdc_kuc_reregister(struct obd_import *imp)
-{
- /* re-register HSM agents */
- return libcfs_kkuc_group_foreach(KUC_GRP_HSM, mdc_hsm_ct_reregister,
- (void *)imp);
-}
-
-static int mdc_set_info_async(const struct lu_env *env,
- struct obd_export *exp,
- u32 keylen, void *key,
- u32 vallen, void *val,
- struct ptlrpc_request_set *set)
-{
- struct obd_import *imp = class_exp2cliimp(exp);
- int rc;
-
- if (KEY_IS(KEY_READ_ONLY)) {
- if (vallen != sizeof(int))
- return -EINVAL;
-
- spin_lock(&imp->imp_lock);
- if (*((int *)val)) {
- imp->imp_connect_flags_orig |= OBD_CONNECT_RDONLY;
- imp->imp_connect_data.ocd_connect_flags |=
- OBD_CONNECT_RDONLY;
- } else {
- imp->imp_connect_flags_orig &= ~OBD_CONNECT_RDONLY;
- imp->imp_connect_data.ocd_connect_flags &=
- ~OBD_CONNECT_RDONLY;
- }
- spin_unlock(&imp->imp_lock);
-
- rc = do_set_info_async(imp, MDS_SET_INFO, LUSTRE_MDS_VERSION,
- keylen, key, vallen, val, set);
- return rc;
- }
- if (KEY_IS(KEY_SPTLRPC_CONF)) {
- sptlrpc_conf_client_adapt(exp->exp_obd);
- return 0;
- }
- if (KEY_IS(KEY_FLUSH_CTX)) {
- sptlrpc_import_flush_my_ctx(imp);
- return 0;
- }
- if (KEY_IS(KEY_CHANGELOG_CLEAR)) {
- rc = do_set_info_async(imp, MDS_SET_INFO, LUSTRE_MDS_VERSION,
- keylen, key, vallen, val, set);
- return rc;
- }
- if (KEY_IS(KEY_HSM_COPYTOOL_SEND)) {
- rc = mdc_hsm_copytool_send(vallen, val);
- return rc;
- }
-
- CERROR("Unknown key %s\n", (char *)key);
- return -EINVAL;
-}
-
-static int mdc_get_info(const struct lu_env *env, struct obd_export *exp,
- __u32 keylen, void *key, __u32 *vallen, void *val,
- struct lov_stripe_md *lsm)
-{
- int rc = -EINVAL;
-
- if (KEY_IS(KEY_MAX_EASIZE)) {
- int mdsize, *max_easize;
-
- if (*vallen != sizeof(int))
- return -EINVAL;
- mdsize = *(int *)val;
- if (mdsize > exp->exp_obd->u.cli.cl_max_mds_easize)
- exp->exp_obd->u.cli.cl_max_mds_easize = mdsize;
- max_easize = val;
- *max_easize = exp->exp_obd->u.cli.cl_max_mds_easize;
- return 0;
- } else if (KEY_IS(KEY_DEFAULT_EASIZE)) {
- int *default_easize;
-
- if (*vallen != sizeof(int))
- return -EINVAL;
- default_easize = val;
- *default_easize = exp->exp_obd->u.cli.cl_default_mds_easize;
- return 0;
- } else if (KEY_IS(KEY_MAX_COOKIESIZE)) {
- int mdsize, *max_cookiesize;
-
- if (*vallen != sizeof(int))
- return -EINVAL;
- mdsize = *(int *)val;
- if (mdsize > exp->exp_obd->u.cli.cl_max_mds_cookiesize)
- exp->exp_obd->u.cli.cl_max_mds_cookiesize = mdsize;
- max_cookiesize = val;
- *max_cookiesize = exp->exp_obd->u.cli.cl_max_mds_cookiesize;
- return 0;
- } else if (KEY_IS(KEY_DEFAULT_COOKIESIZE)) {
- int *default_cookiesize;
-
- if (*vallen != sizeof(int))
- return -EINVAL;
- default_cookiesize = val;
- *default_cookiesize =
- exp->exp_obd->u.cli.cl_default_mds_cookiesize;
- return 0;
- } else if (KEY_IS(KEY_CONN_DATA)) {
- struct obd_import *imp = class_exp2cliimp(exp);
- struct obd_connect_data *data = val;
-
- if (*vallen != sizeof(*data))
- return -EINVAL;
-
- *data = imp->imp_connect_data;
- return 0;
- } else if (KEY_IS(KEY_TGT_COUNT)) {
- *((int *)val) = 1;
- return 0;
- }
-
- rc = mdc_get_info_rpc(exp, keylen, key, *vallen, val);
-
- return rc;
-}
-
-static int mdc_sync(struct obd_export *exp, const struct lu_fid *fid,
- struct ptlrpc_request **request)
-{
- struct ptlrpc_request *req;
- int rc;
-
- *request = NULL;
- req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_MDS_SYNC);
- if (req == NULL)
- return -ENOMEM;
-
- rc = ptlrpc_request_pack(req, LUSTRE_MDS_VERSION, MDS_SYNC);
- if (rc) {
- ptlrpc_request_free(req);
- return rc;
- }
-
- mdc_pack_body(req, fid, 0, 0, -1, 0);
-
- ptlrpc_request_set_replen(req);
-
- rc = ptlrpc_queue_wait(req);
- if (rc)
- ptlrpc_req_finished(req);
- else
- *request = req;
- return rc;
-}
-
-static int mdc_import_event(struct obd_device *obd, struct obd_import *imp,
- enum obd_import_event event)
-{
- int rc = 0;
-
- LASSERT(imp->imp_obd == obd);
-
- switch (event) {
- case IMP_EVENT_DISCON: {
-#if 0
- /* XXX Pass event up to OBDs stack. used only for FLD now */
- rc = obd_notify_observer(obd, obd, OBD_NOTIFY_DISCON, NULL);
-#endif
- break;
- }
- case IMP_EVENT_INACTIVE: {
- struct client_obd *cli = &obd->u.cli;
- /*
- * Flush current sequence to make client obtain new one
- * from server in case of disconnect/reconnect.
- */
- if (cli->cl_seq != NULL)
- seq_client_flush(cli->cl_seq);
-
- rc = obd_notify_observer(obd, obd, OBD_NOTIFY_INACTIVE, NULL);
- break;
- }
- case IMP_EVENT_INVALIDATE: {
- struct ldlm_namespace *ns = obd->obd_namespace;
-
- ldlm_namespace_cleanup(ns, LDLM_FL_LOCAL_ONLY);
-
- break;
- }
- case IMP_EVENT_ACTIVE:
- rc = obd_notify_observer(obd, obd, OBD_NOTIFY_ACTIVE, NULL);
- /* redo the kuc registration after reconnecting */
- if (rc == 0)
- rc = mdc_kuc_reregister(imp);
- break;
- case IMP_EVENT_OCD:
- rc = obd_notify_observer(obd, obd, OBD_NOTIFY_OCD, NULL);
- break;
- case IMP_EVENT_DEACTIVATE:
- case IMP_EVENT_ACTIVATE:
- break;
- default:
- CERROR("Unknown import event %x\n", event);
- LBUG();
- }
- return rc;
-}
-
-int mdc_fid_alloc(struct obd_export *exp, struct lu_fid *fid,
- struct md_op_data *op_data)
-{
- struct client_obd *cli = &exp->exp_obd->u.cli;
- struct lu_client_seq *seq = cli->cl_seq;
-
- return seq_client_alloc_fid(NULL, seq, fid);
-}
-
-static struct obd_uuid *mdc_get_uuid(struct obd_export *exp)
-{
- struct client_obd *cli = &exp->exp_obd->u.cli;
-
- return &cli->cl_target_uuid;
-}
-
-/**
- * Determine whether the lock can be canceled before replaying it during
- * recovery, non zero value will be return if the lock can be canceled,
- * or zero returned for not
- */
-static int mdc_cancel_for_recovery(struct ldlm_lock *lock)
-{
- if (lock->l_resource->lr_type != LDLM_IBITS)
- return 0;
-
- /* FIXME: if we ever get into a situation where there are too many
- * opened files with open locks on a single node, then we really
- * should replay these open locks to reget it */
- if (lock->l_policy_data.l_inodebits.bits & MDS_INODELOCK_OPEN)
- return 0;
-
- return 1;
-}
-
-static int mdc_resource_inode_free(struct ldlm_resource *res)
-{
- if (res->lr_lvb_inode)
- res->lr_lvb_inode = NULL;
-
- return 0;
-}
-
-static struct ldlm_valblock_ops inode_lvbo = {
- .lvbo_free = mdc_resource_inode_free,
-};
-
-static int mdc_llog_init(struct obd_device *obd)
-{
- struct obd_llog_group *olg = &obd->obd_olg;
- struct llog_ctxt *ctxt;
- int rc;
-
- rc = llog_setup(NULL, obd, olg, LLOG_CHANGELOG_REPL_CTXT, obd,
- &llog_client_ops);
- if (rc)
- return rc;
-
- ctxt = llog_group_get_ctxt(olg, LLOG_CHANGELOG_REPL_CTXT);
- llog_initiator_connect(ctxt);
- llog_ctxt_put(ctxt);
-
- return 0;
-}
-
-static void mdc_llog_finish(struct obd_device *obd)
-{
- struct llog_ctxt *ctxt;
-
- ctxt = llog_get_context(obd, LLOG_CHANGELOG_REPL_CTXT);
- if (ctxt)
- llog_cleanup(NULL, ctxt);
-}
-
-static int mdc_setup(struct obd_device *obd, struct lustre_cfg *cfg)
-{
- struct client_obd *cli = &obd->u.cli;
- struct lprocfs_static_vars lvars = { NULL };
- int rc;
-
- cli->cl_rpc_lock = kzalloc(sizeof(*cli->cl_rpc_lock), GFP_NOFS);
- if (!cli->cl_rpc_lock)
- return -ENOMEM;
- mdc_init_rpc_lock(cli->cl_rpc_lock);
-
- ptlrpcd_addref();
-
- cli->cl_close_lock = kzalloc(sizeof(*cli->cl_close_lock), GFP_NOFS);
- if (!cli->cl_close_lock) {
- rc = -ENOMEM;
- goto err_rpc_lock;
- }
- mdc_init_rpc_lock(cli->cl_close_lock);
-
- rc = client_obd_setup(obd, cfg);
- if (rc)
- goto err_close_lock;
- lprocfs_mdc_init_vars(&lvars);
- lprocfs_obd_setup(obd, lvars.obd_vars, lvars.sysfs_vars);
- sptlrpc_lprocfs_cliobd_attach(obd);
- ptlrpc_lprocfs_register_obd(obd);
-
- ns_register_cancel(obd->obd_namespace, mdc_cancel_for_recovery);
-
- obd->obd_namespace->ns_lvbo = &inode_lvbo;
-
- rc = mdc_llog_init(obd);
- if (rc) {
- mdc_cleanup(obd);
- CERROR("failed to setup llogging subsystems\n");
- }
-
- return rc;
-
-err_close_lock:
- kfree(cli->cl_close_lock);
-err_rpc_lock:
- kfree(cli->cl_rpc_lock);
- ptlrpcd_decref();
- return rc;
-}
-
-/* Initialize the default and maximum LOV EA and cookie sizes. This allows
- * us to make MDS RPCs with large enough reply buffers to hold a default
- * sized EA and cookie without having to calculate this (via a call into the
- * LOV + OSCs) each time we make an RPC. The maximum size is also tracked
- * but not used to avoid wastefully vmalloc()'ing large reply buffers when
- * a large number of stripes is possible. If a larger reply buffer is
- * required it will be reallocated in the ptlrpc layer due to overflow.
- */
-static int mdc_init_ea_size(struct obd_export *exp, int easize,
- int def_easize, int cookiesize, int def_cookiesize)
-{
- struct obd_device *obd = exp->exp_obd;
- struct client_obd *cli = &obd->u.cli;
-
- if (cli->cl_max_mds_easize < easize)
- cli->cl_max_mds_easize = easize;
-
- if (cli->cl_default_mds_easize < def_easize)
- cli->cl_default_mds_easize = def_easize;
-
- if (cli->cl_max_mds_cookiesize < cookiesize)
- cli->cl_max_mds_cookiesize = cookiesize;
-
- if (cli->cl_default_mds_cookiesize < def_cookiesize)
- cli->cl_default_mds_cookiesize = def_cookiesize;
-
- return 0;
-}
-
-static int mdc_precleanup(struct obd_device *obd, enum obd_cleanup_stage stage)
-{
- switch (stage) {
- case OBD_CLEANUP_EARLY:
- break;
- case OBD_CLEANUP_EXPORTS:
- /* Failsafe, ok if racy */
- if (obd->obd_type->typ_refcnt <= 1)
- libcfs_kkuc_group_rem(0, KUC_GRP_HSM);
-
- obd_cleanup_client_import(obd);
- ptlrpc_lprocfs_unregister_obd(obd);
- lprocfs_obd_cleanup(obd);
-
- mdc_llog_finish(obd);
- break;
- }
- return 0;
-}
-
-static int mdc_cleanup(struct obd_device *obd)
-{
- struct client_obd *cli = &obd->u.cli;
-
- kfree(cli->cl_rpc_lock);
- kfree(cli->cl_close_lock);
-
- ptlrpcd_decref();
-
- return client_obd_cleanup(obd);
-}
-
-static int mdc_process_config(struct obd_device *obd, u32 len, void *buf)
-{
- struct lustre_cfg *lcfg = buf;
- struct lprocfs_static_vars lvars = { NULL };
- int rc = 0;
-
- lprocfs_mdc_init_vars(&lvars);
- switch (lcfg->lcfg_command) {
- default:
- rc = class_process_proc_param(PARAM_MDC, lvars.obd_vars,
- lcfg, obd);
- if (rc > 0)
- rc = 0;
- break;
- }
- return rc;
-}
-
-
-/* get remote permission for current user on fid */
-static int mdc_get_remote_perm(struct obd_export *exp, const struct lu_fid *fid,
- __u32 suppgid, struct ptlrpc_request **request)
-{
- struct ptlrpc_request *req;
- int rc;
-
- LASSERT(client_is_remote(exp));
-
- *request = NULL;
- req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_MDS_GETATTR);
- if (req == NULL)
- return -ENOMEM;
-
- rc = ptlrpc_request_pack(req, LUSTRE_MDS_VERSION, MDS_GETATTR);
- if (rc) {
- ptlrpc_request_free(req);
- return rc;
- }
-
- mdc_pack_body(req, fid, OBD_MD_FLRMTPERM, 0, suppgid, 0);
-
- req_capsule_set_size(&req->rq_pill, &RMF_ACL, RCL_SERVER,
- sizeof(struct mdt_remote_perm));
-
- ptlrpc_request_set_replen(req);
-
- rc = ptlrpc_queue_wait(req);
- if (rc)
- ptlrpc_req_finished(req);
- else
- *request = req;
- return rc;
-}
-
-static struct obd_ops mdc_obd_ops = {
- .o_owner = THIS_MODULE,
- .o_setup = mdc_setup,
- .o_precleanup = mdc_precleanup,
- .o_cleanup = mdc_cleanup,
- .o_add_conn = client_import_add_conn,
- .o_del_conn = client_import_del_conn,
- .o_connect = client_connect_import,
- .o_disconnect = client_disconnect_export,
- .o_iocontrol = mdc_iocontrol,
- .o_set_info_async = mdc_set_info_async,
- .o_statfs = mdc_statfs,
- .o_fid_init = client_fid_init,
- .o_fid_fini = client_fid_fini,
- .o_fid_alloc = mdc_fid_alloc,
- .o_import_event = mdc_import_event,
- .o_get_info = mdc_get_info,
- .o_process_config = mdc_process_config,
- .o_get_uuid = mdc_get_uuid,
- .o_quotactl = mdc_quotactl,
- .o_quotacheck = mdc_quotacheck
-};
-
-static struct md_ops mdc_md_ops = {
- .m_getstatus = mdc_getstatus,
- .m_null_inode = mdc_null_inode,
- .m_find_cbdata = mdc_find_cbdata,
- .m_close = mdc_close,
- .m_create = mdc_create,
- .m_done_writing = mdc_done_writing,
- .m_enqueue = mdc_enqueue,
- .m_getattr = mdc_getattr,
- .m_getattr_name = mdc_getattr_name,
- .m_intent_lock = mdc_intent_lock,
- .m_link = mdc_link,
- .m_is_subdir = mdc_is_subdir,
- .m_rename = mdc_rename,
- .m_setattr = mdc_setattr,
- .m_setxattr = mdc_setxattr,
- .m_getxattr = mdc_getxattr,
- .m_sync = mdc_sync,
- .m_readpage = mdc_readpage,
- .m_unlink = mdc_unlink,
- .m_cancel_unused = mdc_cancel_unused,
- .m_init_ea_size = mdc_init_ea_size,
- .m_set_lock_data = mdc_set_lock_data,
- .m_lock_match = mdc_lock_match,
- .m_get_lustre_md = mdc_get_lustre_md,
- .m_free_lustre_md = mdc_free_lustre_md,
- .m_set_open_replay_data = mdc_set_open_replay_data,
- .m_clear_open_replay_data = mdc_clear_open_replay_data,
- .m_get_remote_perm = mdc_get_remote_perm,
- .m_intent_getattr_async = mdc_intent_getattr_async,
- .m_revalidate_lock = mdc_revalidate_lock
-};
-
-static int __init mdc_init(void)
-{
- struct lprocfs_static_vars lvars = { NULL };
-
- lprocfs_mdc_init_vars(&lvars);
-
- return class_register_type(&mdc_obd_ops, &mdc_md_ops,
- LUSTRE_MDC_NAME, NULL);
-}
-
-static void /*__exit*/ mdc_exit(void)
-{
- class_unregister_type(LUSTRE_MDC_NAME);
-}
-
-MODULE_AUTHOR("Sun Microsystems, Inc. <http://www.lustre.org/>");
-MODULE_DESCRIPTION("Lustre Metadata Client");
-MODULE_LICENSE("GPL");
-
-module_init(mdc_init);
-module_exit(mdc_exit);
diff --git a/drivers/staging/lustre/lustre/mgc/Makefile b/drivers/staging/lustre/lustre/mgc/Makefile
deleted file mode 100644
index 8ea29a89cf50..000000000000
--- a/drivers/staging/lustre/lustre/mgc/Makefile
+++ /dev/null
@@ -1,2 +0,0 @@
-obj-$(CONFIG_LUSTRE_FS) += mgc.o
-mgc-y := mgc_request.o lproc_mgc.o
diff --git a/drivers/staging/lustre/lustre/mgc/lproc_mgc.c b/drivers/staging/lustre/lustre/mgc/lproc_mgc.c
deleted file mode 100644
index 34a9317d6d63..000000000000
--- a/drivers/staging/lustre/lustre/mgc/lproc_mgc.c
+++ /dev/null
@@ -1,71 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- */
-#define DEBUG_SUBSYSTEM S_CLASS
-
-#include <linux/vfs.h>
-#include "../include/obd_class.h"
-#include "../include/lprocfs_status.h"
-#include "mgc_internal.h"
-
-LPROC_SEQ_FOPS_RO_TYPE(mgc, connect_flags);
-LPROC_SEQ_FOPS_RO_TYPE(mgc, server_uuid);
-LPROC_SEQ_FOPS_RO_TYPE(mgc, conn_uuid);
-LPROC_SEQ_FOPS_RO_TYPE(mgc, import);
-LPROC_SEQ_FOPS_RO_TYPE(mgc, state);
-
-LPROC_SEQ_FOPS_WR_ONLY(mgc, ping);
-
-static int mgc_ir_state_seq_show(struct seq_file *m, void *v)
-{
- return lprocfs_mgc_rd_ir_state(m, m->private);
-}
-LPROC_SEQ_FOPS_RO(mgc_ir_state);
-
-static struct lprocfs_vars lprocfs_mgc_obd_vars[] = {
- { "ping", &mgc_ping_fops, NULL, 0222 },
- { "connect_flags", &mgc_connect_flags_fops, NULL, 0 },
- { "mgs_server_uuid", &mgc_server_uuid_fops, NULL, 0 },
- { "mgs_conn_uuid", &mgc_conn_uuid_fops, NULL, 0 },
- { "import", &mgc_import_fops, NULL, 0 },
- { "state", &mgc_state_fops, NULL, 0 },
- { "ir_state", &mgc_ir_state_fops, NULL, 0 },
- { NULL }
-};
-
-void lprocfs_mgc_init_vars(struct lprocfs_static_vars *lvars)
-{
- lvars->obd_vars = lprocfs_mgc_obd_vars;
-}
diff --git a/drivers/staging/lustre/lustre/mgc/mgc_internal.h b/drivers/staging/lustre/lustre/mgc/mgc_internal.h
deleted file mode 100644
index 82fb8f46e037..000000000000
--- a/drivers/staging/lustre/lustre/mgc/mgc_internal.h
+++ /dev/null
@@ -1,62 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- */
-
-#ifndef _MGC_INTERNAL_H
-#define _MGC_INTERNAL_H
-
-#include "../../include/linux/libcfs/libcfs.h"
-#include "../include/lustre/lustre_idl.h"
-#include "../include/lustre_lib.h"
-#include "../include/lustre_dlm.h"
-#include "../include/lustre_log.h"
-#include "../include/lustre_export.h"
-
-void lprocfs_mgc_init_vars(struct lprocfs_static_vars *lvars);
-int lprocfs_mgc_rd_ir_state(struct seq_file *m, void *data);
-
-int mgc_process_log(struct obd_device *mgc, struct config_llog_data *cld);
-
-static inline int cld_is_sptlrpc(struct config_llog_data *cld)
-{
- return cld->cld_type == CONFIG_T_SPTLRPC;
-}
-
-static inline int cld_is_recover(struct config_llog_data *cld)
-{
- return cld->cld_type == CONFIG_T_RECOVER;
-}
-
-#endif /* _MGC_INTERNAL_H */
diff --git a/drivers/staging/lustre/lustre/mgc/mgc_request.c b/drivers/staging/lustre/lustre/mgc/mgc_request.c
deleted file mode 100644
index 3e9cca026306..000000000000
--- a/drivers/staging/lustre/lustre/mgc/mgc_request.c
+++ /dev/null
@@ -1,1728 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * lustre/mgc/mgc_request.c
- *
- * Author: Nathan Rutman <nathan@clusterfs.com>
- */
-
-#define DEBUG_SUBSYSTEM S_MGC
-#define D_MGC D_CONFIG /*|D_WARNING*/
-
-#include <linux/module.h>
-#include "../include/obd_class.h"
-#include "../include/lustre_dlm.h"
-#include "../include/lprocfs_status.h"
-#include "../include/lustre_log.h"
-#include "../include/lustre_disk.h"
-
-#include "mgc_internal.h"
-
-static int mgc_name2resid(char *name, int len, struct ldlm_res_id *res_id,
- int type)
-{
- __u64 resname = 0;
-
- if (len > sizeof(resname)) {
- CERROR("name too long: %s\n", name);
- return -EINVAL;
- }
- if (len <= 0) {
- CERROR("missing name: %s\n", name);
- return -EINVAL;
- }
- memcpy(&resname, name, len);
-
- /* Always use the same endianness for the resid */
- memset(res_id, 0, sizeof(*res_id));
- res_id->name[0] = cpu_to_le64(resname);
- /* XXX: unfortunately, sptlprc and config llog share one lock */
- switch (type) {
- case CONFIG_T_CONFIG:
- case CONFIG_T_SPTLRPC:
- resname = 0;
- break;
- case CONFIG_T_RECOVER:
- case CONFIG_T_PARAMS:
- resname = type;
- break;
- default:
- LBUG();
- }
- res_id->name[1] = cpu_to_le64(resname);
- CDEBUG(D_MGC, "log %s to resid %#llx/%#llx (%.8s)\n", name,
- res_id->name[0], res_id->name[1], (char *)&res_id->name[0]);
- return 0;
-}
-
-int mgc_fsname2resid(char *fsname, struct ldlm_res_id *res_id, int type)
-{
- /* fsname is at most 8 chars long, maybe contain "-".
- * e.g. "lustre", "SUN-000" */
- return mgc_name2resid(fsname, strlen(fsname), res_id, type);
-}
-EXPORT_SYMBOL(mgc_fsname2resid);
-
-static int mgc_logname2resid(char *logname, struct ldlm_res_id *res_id, int type)
-{
- char *name_end;
- int len;
-
- /* logname consists of "fsname-nodetype".
- * e.g. "lustre-MDT0001", "SUN-000-client"
- * there is an exception: llog "params" */
- name_end = strrchr(logname, '-');
- if (!name_end)
- len = strlen(logname);
- else
- len = name_end - logname;
- return mgc_name2resid(logname, len, res_id, type);
-}
-
-/********************** config llog list **********************/
-static LIST_HEAD(config_llog_list);
-static DEFINE_SPINLOCK(config_list_lock);
-
-/* Take a reference to a config log */
-static int config_log_get(struct config_llog_data *cld)
-{
- atomic_inc(&cld->cld_refcount);
- CDEBUG(D_INFO, "log %s refs %d\n", cld->cld_logname,
- atomic_read(&cld->cld_refcount));
- return 0;
-}
-
-/* Drop a reference to a config log. When no longer referenced,
- we can free the config log data */
-static void config_log_put(struct config_llog_data *cld)
-{
- CDEBUG(D_INFO, "log %s refs %d\n", cld->cld_logname,
- atomic_read(&cld->cld_refcount));
- LASSERT(atomic_read(&cld->cld_refcount) > 0);
-
- /* spinlock to make sure no item with 0 refcount in the list */
- if (atomic_dec_and_lock(&cld->cld_refcount, &config_list_lock)) {
- list_del(&cld->cld_list_chain);
- spin_unlock(&config_list_lock);
-
- CDEBUG(D_MGC, "dropping config log %s\n", cld->cld_logname);
-
- if (cld->cld_recover)
- config_log_put(cld->cld_recover);
- if (cld->cld_sptlrpc)
- config_log_put(cld->cld_sptlrpc);
- if (cld->cld_params)
- config_log_put(cld->cld_params);
- if (cld_is_sptlrpc(cld))
- sptlrpc_conf_log_stop(cld->cld_logname);
-
- class_export_put(cld->cld_mgcexp);
- kfree(cld);
- }
-}
-
-/* Find a config log by name */
-static
-struct config_llog_data *config_log_find(char *logname,
- struct config_llog_instance *cfg)
-{
- struct config_llog_data *cld;
- struct config_llog_data *found = NULL;
- void *instance;
-
- LASSERT(logname != NULL);
-
- instance = cfg ? cfg->cfg_instance : NULL;
- spin_lock(&config_list_lock);
- list_for_each_entry(cld, &config_llog_list, cld_list_chain) {
- /* check if instance equals */
- if (instance != cld->cld_cfg.cfg_instance)
- continue;
-
- /* instance may be NULL, should check name */
- if (strcmp(logname, cld->cld_logname) == 0) {
- found = cld;
- break;
- }
- }
- if (found) {
- atomic_inc(&found->cld_refcount);
- LASSERT(found->cld_stopping == 0 || cld_is_sptlrpc(found) == 0);
- }
- spin_unlock(&config_list_lock);
- return found;
-}
-
-static
-struct config_llog_data *do_config_log_add(struct obd_device *obd,
- char *logname,
- int type,
- struct config_llog_instance *cfg,
- struct super_block *sb)
-{
- struct config_llog_data *cld;
- int rc;
-
- CDEBUG(D_MGC, "do adding config log %s:%p\n", logname,
- cfg ? cfg->cfg_instance : NULL);
-
- cld = kzalloc(sizeof(*cld) + strlen(logname) + 1, GFP_NOFS);
- if (!cld)
- return ERR_PTR(-ENOMEM);
-
- strcpy(cld->cld_logname, logname);
- if (cfg)
- cld->cld_cfg = *cfg;
- else
- cld->cld_cfg.cfg_callback = class_config_llog_handler;
- mutex_init(&cld->cld_lock);
- cld->cld_cfg.cfg_last_idx = 0;
- cld->cld_cfg.cfg_flags = 0;
- cld->cld_cfg.cfg_sb = sb;
- cld->cld_type = type;
- atomic_set(&cld->cld_refcount, 1);
-
- /* Keep the mgc around until we are done */
- cld->cld_mgcexp = class_export_get(obd->obd_self_export);
-
- if (cld_is_sptlrpc(cld)) {
- sptlrpc_conf_log_start(logname);
- cld->cld_cfg.cfg_obdname = obd->obd_name;
- }
-
- rc = mgc_logname2resid(logname, &cld->cld_resid, type);
-
- spin_lock(&config_list_lock);
- list_add(&cld->cld_list_chain, &config_llog_list);
- spin_unlock(&config_list_lock);
-
- if (rc) {
- config_log_put(cld);
- return ERR_PTR(rc);
- }
-
- if (cld_is_sptlrpc(cld)) {
- rc = mgc_process_log(obd, cld);
- if (rc && rc != -ENOENT)
- CERROR("failed processing sptlrpc log: %d\n", rc);
- }
-
- return cld;
-}
-
-static struct config_llog_data *config_recover_log_add(struct obd_device *obd,
- char *fsname,
- struct config_llog_instance *cfg,
- struct super_block *sb)
-{
- struct config_llog_instance lcfg = *cfg;
- struct config_llog_data *cld;
- char logname[32];
-
- /* we have to use different llog for clients and mdts for cmd
- * where only clients are notified if one of cmd server restarts */
- LASSERT(strlen(fsname) < sizeof(logname) / 2);
- strcpy(logname, fsname);
- LASSERT(lcfg.cfg_instance);
- strcat(logname, "-cliir");
-
- cld = do_config_log_add(obd, logname, CONFIG_T_RECOVER, &lcfg, sb);
- return cld;
-}
-
-static struct config_llog_data *config_params_log_add(struct obd_device *obd,
- struct config_llog_instance *cfg, struct super_block *sb)
-{
- struct config_llog_instance lcfg = *cfg;
- struct config_llog_data *cld;
-
- lcfg.cfg_instance = sb;
-
- cld = do_config_log_add(obd, PARAMS_FILENAME, CONFIG_T_PARAMS,
- &lcfg, sb);
-
- return cld;
-}
-
-/** Add this log to the list of active logs watched by an MGC.
- * Active means we're watching for updates.
- * We have one active log per "mount" - client instance or servername.
- * Each instance may be at a different point in the log.
- */
-static int config_log_add(struct obd_device *obd, char *logname,
- struct config_llog_instance *cfg,
- struct super_block *sb)
-{
- struct lustre_sb_info *lsi = s2lsi(sb);
- struct config_llog_data *cld;
- struct config_llog_data *sptlrpc_cld;
- struct config_llog_data *params_cld;
- char seclogname[32];
- char *ptr;
- int rc;
-
- CDEBUG(D_MGC, "adding config log %s:%p\n", logname, cfg->cfg_instance);
-
- /*
- * for each regular log, the depended sptlrpc log name is
- * <fsname>-sptlrpc. multiple regular logs may share one sptlrpc log.
- */
- ptr = strrchr(logname, '-');
- if (ptr == NULL || ptr - logname > 8) {
- CERROR("logname %s is too long\n", logname);
- return -EINVAL;
- }
-
- memcpy(seclogname, logname, ptr - logname);
- strcpy(seclogname + (ptr - logname), "-sptlrpc");
-
- sptlrpc_cld = config_log_find(seclogname, NULL);
- if (sptlrpc_cld == NULL) {
- sptlrpc_cld = do_config_log_add(obd, seclogname,
- CONFIG_T_SPTLRPC, NULL, NULL);
- if (IS_ERR(sptlrpc_cld)) {
- CERROR("can't create sptlrpc log: %s\n", seclogname);
- rc = PTR_ERR(sptlrpc_cld);
- goto out_err;
- }
- }
- params_cld = config_params_log_add(obd, cfg, sb);
- if (IS_ERR(params_cld)) {
- rc = PTR_ERR(params_cld);
- CERROR("%s: can't create params log: rc = %d\n",
- obd->obd_name, rc);
- goto out_err1;
- }
-
- cld = do_config_log_add(obd, logname, CONFIG_T_CONFIG, cfg, sb);
- if (IS_ERR(cld)) {
- CERROR("can't create log: %s\n", logname);
- rc = PTR_ERR(cld);
- goto out_err2;
- }
-
- cld->cld_sptlrpc = sptlrpc_cld;
- cld->cld_params = params_cld;
-
- LASSERT(lsi->lsi_lmd);
- if (!(lsi->lsi_lmd->lmd_flags & LMD_FLG_NOIR)) {
- struct config_llog_data *recover_cld;
- *strrchr(seclogname, '-') = 0;
- recover_cld = config_recover_log_add(obd, seclogname, cfg, sb);
- if (IS_ERR(recover_cld)) {
- rc = PTR_ERR(recover_cld);
- goto out_err3;
- }
- cld->cld_recover = recover_cld;
- }
-
- return 0;
-
-out_err3:
- config_log_put(cld);
-
-out_err2:
- config_log_put(params_cld);
-
-out_err1:
- config_log_put(sptlrpc_cld);
-
-out_err:
- return rc;
-}
-
-DEFINE_MUTEX(llog_process_lock);
-
-/** Stop watching for updates on this log.
- */
-static int config_log_end(char *logname, struct config_llog_instance *cfg)
-{
- struct config_llog_data *cld;
- struct config_llog_data *cld_sptlrpc = NULL;
- struct config_llog_data *cld_params = NULL;
- struct config_llog_data *cld_recover = NULL;
- int rc = 0;
-
- cld = config_log_find(logname, cfg);
- if (cld == NULL)
- return -ENOENT;
-
- mutex_lock(&cld->cld_lock);
- /*
- * if cld_stopping is set, it means we didn't start the log thus
- * not owning the start ref. this can happen after previous umount:
- * the cld still hanging there waiting for lock cancel, and we
- * remount again but failed in the middle and call log_end without
- * calling start_log.
- */
- if (unlikely(cld->cld_stopping)) {
- mutex_unlock(&cld->cld_lock);
- /* drop the ref from the find */
- config_log_put(cld);
- return rc;
- }
-
- cld->cld_stopping = 1;
-
- cld_recover = cld->cld_recover;
- cld->cld_recover = NULL;
- mutex_unlock(&cld->cld_lock);
-
- if (cld_recover) {
- mutex_lock(&cld_recover->cld_lock);
- cld_recover->cld_stopping = 1;
- mutex_unlock(&cld_recover->cld_lock);
- config_log_put(cld_recover);
- }
-
- spin_lock(&config_list_lock);
- cld_sptlrpc = cld->cld_sptlrpc;
- cld->cld_sptlrpc = NULL;
- cld_params = cld->cld_params;
- cld->cld_params = NULL;
- spin_unlock(&config_list_lock);
-
- if (cld_sptlrpc)
- config_log_put(cld_sptlrpc);
-
- if (cld_params) {
- mutex_lock(&cld_params->cld_lock);
- cld_params->cld_stopping = 1;
- mutex_unlock(&cld_params->cld_lock);
- config_log_put(cld_params);
- }
-
- /* drop the ref from the find */
- config_log_put(cld);
- /* drop the start ref */
- config_log_put(cld);
-
- CDEBUG(D_MGC, "end config log %s (%d)\n", logname ? logname : "client",
- rc);
- return rc;
-}
-
-int lprocfs_mgc_rd_ir_state(struct seq_file *m, void *data)
-{
- struct obd_device *obd = data;
- struct obd_import *imp;
- struct obd_connect_data *ocd;
- struct config_llog_data *cld;
-
- LPROCFS_CLIMP_CHECK(obd);
- imp = obd->u.cli.cl_import;
- ocd = &imp->imp_connect_data;
-
- seq_printf(m, "imperative_recovery: %s\n",
- OCD_HAS_FLAG(ocd, IMP_RECOV) ? "ENABLED" : "DISABLED");
- seq_printf(m, "client_state:\n");
-
- spin_lock(&config_list_lock);
- list_for_each_entry(cld, &config_llog_list, cld_list_chain) {
- if (cld->cld_recover == NULL)
- continue;
- seq_printf(m, " - { client: %s, nidtbl_version: %u }\n",
- cld->cld_logname,
- cld->cld_recover->cld_cfg.cfg_last_idx);
- }
- spin_unlock(&config_list_lock);
-
- LPROCFS_CLIMP_EXIT(obd);
- return 0;
-}
-
-/* reenqueue any lost locks */
-#define RQ_RUNNING 0x1
-#define RQ_NOW 0x2
-#define RQ_LATER 0x4
-#define RQ_STOP 0x8
-#define RQ_PRECLEANUP 0x10
-static int rq_state;
-static wait_queue_head_t rq_waitq;
-static DECLARE_COMPLETION(rq_exit);
-static DECLARE_COMPLETION(rq_start);
-
-static void do_requeue(struct config_llog_data *cld)
-{
- LASSERT(atomic_read(&cld->cld_refcount) > 0);
-
- /* Do not run mgc_process_log on a disconnected export or an
- export which is being disconnected. Take the client
- semaphore to make the check non-racy. */
- down_read(&cld->cld_mgcexp->exp_obd->u.cli.cl_sem);
- if (cld->cld_mgcexp->exp_obd->u.cli.cl_conn_count != 0) {
- CDEBUG(D_MGC, "updating log %s\n", cld->cld_logname);
- mgc_process_log(cld->cld_mgcexp->exp_obd, cld);
- } else {
- CDEBUG(D_MGC, "disconnecting, won't update log %s\n",
- cld->cld_logname);
- }
- up_read(&cld->cld_mgcexp->exp_obd->u.cli.cl_sem);
-}
-
-/* this timeout represents how many seconds MGC should wait before
- * requeue config and recover lock to the MGS. We need to randomize this
- * in order to not flood the MGS.
- */
-#define MGC_TIMEOUT_MIN_SECONDS 5
-#define MGC_TIMEOUT_RAND_CENTISEC 0x1ff /* ~500 */
-
-static int mgc_requeue_thread(void *data)
-{
- bool first = true;
-
- CDEBUG(D_MGC, "Starting requeue thread\n");
-
- /* Keep trying failed locks periodically */
- spin_lock(&config_list_lock);
- rq_state |= RQ_RUNNING;
- while (1) {
- struct l_wait_info lwi;
- struct config_llog_data *cld, *cld_prev;
- int rand = cfs_rand() & MGC_TIMEOUT_RAND_CENTISEC;
- int stopped = !!(rq_state & RQ_STOP);
- int to;
-
- /* Any new or requeued lostlocks will change the state */
- rq_state &= ~(RQ_NOW | RQ_LATER);
- spin_unlock(&config_list_lock);
-
- if (first) {
- first = false;
- complete(&rq_start);
- }
-
- /* Always wait a few seconds to allow the server who
- caused the lock revocation to finish its setup, plus some
- random so everyone doesn't try to reconnect at once. */
- to = MGC_TIMEOUT_MIN_SECONDS * HZ;
- to += rand * HZ / 100; /* rand is centi-seconds */
- lwi = LWI_TIMEOUT(to, NULL, NULL);
- l_wait_event(rq_waitq, rq_state & (RQ_STOP | RQ_PRECLEANUP),
- &lwi);
-
- /*
- * iterate & processing through the list. for each cld, process
- * its depending sptlrpc cld firstly (if any) and then itself.
- *
- * it's guaranteed any item in the list must have
- * reference > 0; and if cld_lostlock is set, at
- * least one reference is taken by the previous enqueue.
- */
- cld_prev = NULL;
-
- spin_lock(&config_list_lock);
- rq_state &= ~RQ_PRECLEANUP;
- list_for_each_entry(cld, &config_llog_list,
- cld_list_chain) {
- if (!cld->cld_lostlock)
- continue;
-
- spin_unlock(&config_list_lock);
-
- LASSERT(atomic_read(&cld->cld_refcount) > 0);
-
- /* Whether we enqueued again or not in mgc_process_log,
- * we're done with the ref from the old enqueue */
- if (cld_prev)
- config_log_put(cld_prev);
- cld_prev = cld;
-
- cld->cld_lostlock = 0;
- if (likely(!stopped))
- do_requeue(cld);
-
- spin_lock(&config_list_lock);
- }
- spin_unlock(&config_list_lock);
- if (cld_prev)
- config_log_put(cld_prev);
-
- /* break after scanning the list so that we can drop
- * refcount to losing lock clds */
- if (unlikely(stopped)) {
- spin_lock(&config_list_lock);
- break;
- }
-
- /* Wait a bit to see if anyone else needs a requeue */
- lwi = (struct l_wait_info) { 0 };
- l_wait_event(rq_waitq, rq_state & (RQ_NOW | RQ_STOP),
- &lwi);
- spin_lock(&config_list_lock);
- }
- /* spinlock and while guarantee RQ_NOW and RQ_LATER are not set */
- rq_state &= ~RQ_RUNNING;
- spin_unlock(&config_list_lock);
-
- complete(&rq_exit);
-
- CDEBUG(D_MGC, "Ending requeue thread\n");
- return 0;
-}
-
-/* Add a cld to the list to requeue. Start the requeue thread if needed.
- We are responsible for dropping the config log reference from here on out. */
-static void mgc_requeue_add(struct config_llog_data *cld)
-{
- CDEBUG(D_INFO, "log %s: requeue (r=%d sp=%d st=%x)\n",
- cld->cld_logname, atomic_read(&cld->cld_refcount),
- cld->cld_stopping, rq_state);
- LASSERT(atomic_read(&cld->cld_refcount) > 0);
-
- mutex_lock(&cld->cld_lock);
- if (cld->cld_stopping || cld->cld_lostlock) {
- mutex_unlock(&cld->cld_lock);
- return;
- }
- /* this refcount will be released in mgc_requeue_thread. */
- config_log_get(cld);
- cld->cld_lostlock = 1;
- mutex_unlock(&cld->cld_lock);
-
- /* Hold lock for rq_state */
- spin_lock(&config_list_lock);
- if (rq_state & RQ_STOP) {
- spin_unlock(&config_list_lock);
- cld->cld_lostlock = 0;
- config_log_put(cld);
- } else {
- rq_state |= RQ_NOW;
- spin_unlock(&config_list_lock);
- wake_up(&rq_waitq);
- }
-}
-
-static int mgc_llog_init(const struct lu_env *env, struct obd_device *obd)
-{
- struct llog_ctxt *ctxt;
- int rc;
-
- /* setup only remote ctxt, the local disk context is switched per each
- * filesystem during mgc_fs_setup() */
- rc = llog_setup(env, obd, &obd->obd_olg, LLOG_CONFIG_REPL_CTXT, obd,
- &llog_client_ops);
- if (rc)
- return rc;
-
- ctxt = llog_get_context(obd, LLOG_CONFIG_REPL_CTXT);
- LASSERT(ctxt);
-
- llog_initiator_connect(ctxt);
- llog_ctxt_put(ctxt);
-
- return 0;
-}
-
-static int mgc_llog_fini(const struct lu_env *env, struct obd_device *obd)
-{
- struct llog_ctxt *ctxt;
-
- ctxt = llog_get_context(obd, LLOG_CONFIG_REPL_CTXT);
- if (ctxt)
- llog_cleanup(env, ctxt);
-
- return 0;
-}
-
-static atomic_t mgc_count = ATOMIC_INIT(0);
-static int mgc_precleanup(struct obd_device *obd, enum obd_cleanup_stage stage)
-{
- int rc = 0;
- int temp;
-
- switch (stage) {
- case OBD_CLEANUP_EARLY:
- break;
- case OBD_CLEANUP_EXPORTS:
- if (atomic_dec_and_test(&mgc_count)) {
- LASSERT(rq_state & RQ_RUNNING);
- /* stop requeue thread */
- temp = RQ_STOP;
- } else {
- /* wakeup requeue thread to clean our cld */
- temp = RQ_NOW | RQ_PRECLEANUP;
- }
- spin_lock(&config_list_lock);
- rq_state |= temp;
- spin_unlock(&config_list_lock);
- wake_up(&rq_waitq);
- if (temp & RQ_STOP)
- wait_for_completion(&rq_exit);
- obd_cleanup_client_import(obd);
- rc = mgc_llog_fini(NULL, obd);
- if (rc != 0)
- CERROR("failed to cleanup llogging subsystems\n");
- break;
- }
- return rc;
-}
-
-static int mgc_cleanup(struct obd_device *obd)
-{
- /* COMPAT_146 - old config logs may have added profiles we don't
- know about */
- if (obd->obd_type->typ_refcnt <= 1)
- /* Only for the last mgc */
- class_del_profiles();
-
- lprocfs_obd_cleanup(obd);
- ptlrpcd_decref();
-
- return client_obd_cleanup(obd);
-}
-
-static int mgc_setup(struct obd_device *obd, struct lustre_cfg *lcfg)
-{
- struct lprocfs_static_vars lvars = { NULL };
- int rc;
-
- ptlrpcd_addref();
-
- rc = client_obd_setup(obd, lcfg);
- if (rc)
- goto err_decref;
-
- rc = mgc_llog_init(NULL, obd);
- if (rc) {
- CERROR("failed to setup llogging subsystems\n");
- goto err_cleanup;
- }
-
- lprocfs_mgc_init_vars(&lvars);
- lprocfs_obd_setup(obd, lvars.obd_vars, lvars.sysfs_vars);
- sptlrpc_lprocfs_cliobd_attach(obd);
-
- if (atomic_inc_return(&mgc_count) == 1) {
- rq_state = 0;
- init_waitqueue_head(&rq_waitq);
-
- /* start requeue thread */
- rc = PTR_ERR(kthread_run(mgc_requeue_thread, NULL,
- "ll_cfg_requeue"));
- if (IS_ERR_VALUE(rc)) {
- CERROR("%s: Cannot start requeue thread (%d),no more log updates!\n",
- obd->obd_name, rc);
- goto err_cleanup;
- }
- /* rc is the task_struct pointer of mgc_requeue_thread. */
- rc = 0;
- wait_for_completion(&rq_start);
- }
-
- return rc;
-
-err_cleanup:
- client_obd_cleanup(obd);
-err_decref:
- ptlrpcd_decref();
- return rc;
-}
-
-/* based on ll_mdc_blocking_ast */
-static int mgc_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc,
- void *data, int flag)
-{
- struct lustre_handle lockh;
- struct config_llog_data *cld = (struct config_llog_data *)data;
- int rc = 0;
-
- switch (flag) {
- case LDLM_CB_BLOCKING:
- /* mgs wants the lock, give it up... */
- LDLM_DEBUG(lock, "MGC blocking CB");
- ldlm_lock2handle(lock, &lockh);
- rc = ldlm_cli_cancel(&lockh, LCF_ASYNC);
- break;
- case LDLM_CB_CANCELING:
- /* We've given up the lock, prepare ourselves to update. */
- LDLM_DEBUG(lock, "MGC cancel CB");
-
- CDEBUG(D_MGC, "Lock res "DLDLMRES" (%.8s)\n",
- PLDLMRES(lock->l_resource),
- (char *)&lock->l_resource->lr_name.name[0]);
-
- if (!cld) {
- CDEBUG(D_INFO, "missing data, won't requeue\n");
- break;
- }
-
- /* held at mgc_process_log(). */
- LASSERT(atomic_read(&cld->cld_refcount) > 0);
- /* Are we done with this log? */
- if (cld->cld_stopping) {
- CDEBUG(D_MGC, "log %s: stopping, won't requeue\n",
- cld->cld_logname);
- config_log_put(cld);
- break;
- }
- /* Make sure not to re-enqueue when the mgc is stopping
- (we get called from client_disconnect_export) */
- if (!lock->l_conn_export ||
- !lock->l_conn_export->exp_obd->u.cli.cl_conn_count) {
- CDEBUG(D_MGC, "log %.8s: disconnecting, won't requeue\n",
- cld->cld_logname);
- config_log_put(cld);
- break;
- }
-
- /* Re-enqueue now */
- mgc_requeue_add(cld);
- config_log_put(cld);
- break;
- default:
- LBUG();
- }
-
- return rc;
-}
-
-/* Not sure where this should go... */
-/* This is the timeout value for MGS_CONNECT request plus a ping interval, such
- * that we can have a chance to try the secondary MGS if any. */
-#define MGC_ENQUEUE_LIMIT (INITIAL_CONNECT_TIMEOUT + (AT_OFF ? 0 : at_min) \
- + PING_INTERVAL)
-#define MGC_TARGET_REG_LIMIT 10
-#define MGC_SEND_PARAM_LIMIT 10
-
-/* Send parameter to MGS*/
-static int mgc_set_mgs_param(struct obd_export *exp,
- struct mgs_send_param *msp)
-{
- struct ptlrpc_request *req;
- struct mgs_send_param *req_msp, *rep_msp;
- int rc;
-
- req = ptlrpc_request_alloc_pack(class_exp2cliimp(exp),
- &RQF_MGS_SET_INFO, LUSTRE_MGS_VERSION,
- MGS_SET_INFO);
- if (!req)
- return -ENOMEM;
-
- req_msp = req_capsule_client_get(&req->rq_pill, &RMF_MGS_SEND_PARAM);
- if (!req_msp) {
- ptlrpc_req_finished(req);
- return -ENOMEM;
- }
-
- memcpy(req_msp, msp, sizeof(*req_msp));
- ptlrpc_request_set_replen(req);
-
- /* Limit how long we will wait for the enqueue to complete */
- req->rq_delay_limit = MGC_SEND_PARAM_LIMIT;
- rc = ptlrpc_queue_wait(req);
- if (!rc) {
- rep_msp = req_capsule_server_get(&req->rq_pill, &RMF_MGS_SEND_PARAM);
- memcpy(msp, rep_msp, sizeof(*rep_msp));
- }
-
- ptlrpc_req_finished(req);
-
- return rc;
-}
-
-/* Take a config lock so we can get cancel notifications */
-static int mgc_enqueue(struct obd_export *exp, struct lov_stripe_md *lsm,
- __u32 type, ldlm_policy_data_t *policy, __u32 mode,
- __u64 *flags, void *bl_cb, void *cp_cb, void *gl_cb,
- void *data, __u32 lvb_len, void *lvb_swabber,
- struct lustre_handle *lockh)
-{
- struct config_llog_data *cld = (struct config_llog_data *)data;
- struct ldlm_enqueue_info einfo = {
- .ei_type = type,
- .ei_mode = mode,
- .ei_cb_bl = mgc_blocking_ast,
- .ei_cb_cp = ldlm_completion_ast,
- };
- struct ptlrpc_request *req;
- int short_limit = cld_is_sptlrpc(cld);
- int rc;
-
- CDEBUG(D_MGC, "Enqueue for %s (res %#llx)\n", cld->cld_logname,
- cld->cld_resid.name[0]);
-
- /* We need a callback for every lockholder, so don't try to
- ldlm_lock_match (see rev 1.1.2.11.2.47) */
- req = ptlrpc_request_alloc_pack(class_exp2cliimp(exp),
- &RQF_LDLM_ENQUEUE, LUSTRE_DLM_VERSION,
- LDLM_ENQUEUE);
- if (req == NULL)
- return -ENOMEM;
-
- req_capsule_set_size(&req->rq_pill, &RMF_DLM_LVB, RCL_SERVER, 0);
- ptlrpc_request_set_replen(req);
-
- /* Limit how long we will wait for the enqueue to complete */
- req->rq_delay_limit = short_limit ? 5 : MGC_ENQUEUE_LIMIT;
- rc = ldlm_cli_enqueue(exp, &req, &einfo, &cld->cld_resid, NULL, flags,
- NULL, 0, LVB_T_NONE, lockh, 0);
- /* A failed enqueue should still call the mgc_blocking_ast,
- where it will be requeued if needed ("grant failed"). */
- ptlrpc_req_finished(req);
- return rc;
-}
-
-static void mgc_notify_active(struct obd_device *unused)
-{
- /* wakeup mgc_requeue_thread to requeue mgc lock */
- spin_lock(&config_list_lock);
- rq_state |= RQ_NOW;
- spin_unlock(&config_list_lock);
- wake_up(&rq_waitq);
-
- /* TODO: Help the MGS rebuild nidtbl. -jay */
-}
-
-/* Send target_reg message to MGS */
-static int mgc_target_register(struct obd_export *exp,
- struct mgs_target_info *mti)
-{
- struct ptlrpc_request *req;
- struct mgs_target_info *req_mti, *rep_mti;
- int rc;
-
- req = ptlrpc_request_alloc_pack(class_exp2cliimp(exp),
- &RQF_MGS_TARGET_REG, LUSTRE_MGS_VERSION,
- MGS_TARGET_REG);
- if (req == NULL)
- return -ENOMEM;
-
- req_mti = req_capsule_client_get(&req->rq_pill, &RMF_MGS_TARGET_INFO);
- if (!req_mti) {
- ptlrpc_req_finished(req);
- return -ENOMEM;
- }
-
- memcpy(req_mti, mti, sizeof(*req_mti));
- ptlrpc_request_set_replen(req);
- CDEBUG(D_MGC, "register %s\n", mti->mti_svname);
- /* Limit how long we will wait for the enqueue to complete */
- req->rq_delay_limit = MGC_TARGET_REG_LIMIT;
-
- rc = ptlrpc_queue_wait(req);
- if (!rc) {
- rep_mti = req_capsule_server_get(&req->rq_pill,
- &RMF_MGS_TARGET_INFO);
- memcpy(mti, rep_mti, sizeof(*rep_mti));
- CDEBUG(D_MGC, "register %s got index = %d\n",
- mti->mti_svname, mti->mti_stripe_index);
- }
- ptlrpc_req_finished(req);
-
- return rc;
-}
-
-static int mgc_set_info_async(const struct lu_env *env, struct obd_export *exp,
- u32 keylen, void *key, u32 vallen,
- void *val, struct ptlrpc_request_set *set)
-{
- int rc = -EINVAL;
-
- /* Turn off initial_recov after we try all backup servers once */
- if (KEY_IS(KEY_INIT_RECOV_BACKUP)) {
- struct obd_import *imp = class_exp2cliimp(exp);
- int value;
- if (vallen != sizeof(int))
- return -EINVAL;
- value = *(int *)val;
- CDEBUG(D_MGC, "InitRecov %s %d/d%d:i%d:r%d:or%d:%s\n",
- imp->imp_obd->obd_name, value,
- imp->imp_deactive, imp->imp_invalid,
- imp->imp_replayable, imp->imp_obd->obd_replayable,
- ptlrpc_import_state_name(imp->imp_state));
- /* Resurrect if we previously died */
- if ((imp->imp_state != LUSTRE_IMP_FULL &&
- imp->imp_state != LUSTRE_IMP_NEW) || value > 1)
- ptlrpc_reconnect_import(imp);
- return 0;
- }
- if (KEY_IS(KEY_SET_INFO)) {
- struct mgs_send_param *msp;
-
- msp = (struct mgs_send_param *)val;
- rc = mgc_set_mgs_param(exp, msp);
- return rc;
- }
- if (KEY_IS(KEY_MGSSEC)) {
- struct client_obd *cli = &exp->exp_obd->u.cli;
- struct sptlrpc_flavor flvr;
-
- /*
- * empty string means using current flavor, if which haven't
- * been set yet, set it as null.
- *
- * if flavor has been set previously, check the asking flavor
- * must match the existing one.
- */
- if (vallen == 0) {
- if (cli->cl_flvr_mgc.sf_rpc != SPTLRPC_FLVR_INVALID)
- return 0;
- val = "null";
- vallen = 4;
- }
-
- rc = sptlrpc_parse_flavor(val, &flvr);
- if (rc) {
- CERROR("invalid sptlrpc flavor %s to MGS\n",
- (char *) val);
- return rc;
- }
-
- /*
- * caller already hold a mutex
- */
- if (cli->cl_flvr_mgc.sf_rpc == SPTLRPC_FLVR_INVALID) {
- cli->cl_flvr_mgc = flvr;
- } else if (memcmp(&cli->cl_flvr_mgc, &flvr,
- sizeof(flvr)) != 0) {
- char str[20];
-
- sptlrpc_flavor2name(&cli->cl_flvr_mgc,
- str, sizeof(str));
- LCONSOLE_ERROR("asking sptlrpc flavor %s to MGS but currently %s is in use\n",
- (char *) val, str);
- rc = -EPERM;
- }
- return rc;
- }
-
- return rc;
-}
-
-static int mgc_get_info(const struct lu_env *env, struct obd_export *exp,
- __u32 keylen, void *key, __u32 *vallen, void *val,
- struct lov_stripe_md *unused)
-{
- int rc = -EINVAL;
-
- if (KEY_IS(KEY_CONN_DATA)) {
- struct obd_import *imp = class_exp2cliimp(exp);
- struct obd_connect_data *data = val;
-
- if (*vallen == sizeof(*data)) {
- *data = imp->imp_connect_data;
- rc = 0;
- }
- }
-
- return rc;
-}
-
-static int mgc_import_event(struct obd_device *obd,
- struct obd_import *imp,
- enum obd_import_event event)
-{
- LASSERT(imp->imp_obd == obd);
- CDEBUG(D_MGC, "import event %#x\n", event);
-
- switch (event) {
- case IMP_EVENT_DISCON:
- /* MGC imports should not wait for recovery */
- if (OCD_HAS_FLAG(&imp->imp_connect_data, IMP_RECOV))
- ptlrpc_pinger_ir_down();
- break;
- case IMP_EVENT_INACTIVE:
- break;
- case IMP_EVENT_INVALIDATE: {
- struct ldlm_namespace *ns = obd->obd_namespace;
- ldlm_namespace_cleanup(ns, LDLM_FL_LOCAL_ONLY);
- break;
- }
- case IMP_EVENT_ACTIVE:
- CDEBUG(D_INFO, "%s: Reactivating import\n", obd->obd_name);
- /* Clearing obd_no_recov allows us to continue pinging */
- obd->obd_no_recov = 0;
- mgc_notify_active(obd);
- if (OCD_HAS_FLAG(&imp->imp_connect_data, IMP_RECOV))
- ptlrpc_pinger_ir_up();
- break;
- case IMP_EVENT_OCD:
- break;
- case IMP_EVENT_DEACTIVATE:
- case IMP_EVENT_ACTIVATE:
- break;
- default:
- CERROR("Unknown import event %#x\n", event);
- LBUG();
- }
- return 0;
-}
-
-enum {
- CONFIG_READ_NRPAGES_INIT = 1 << (20 - PAGE_CACHE_SHIFT),
- CONFIG_READ_NRPAGES = 4
-};
-
-static int mgc_apply_recover_logs(struct obd_device *mgc,
- struct config_llog_data *cld,
- __u64 max_version,
- void *data, int datalen, bool mne_swab)
-{
- struct config_llog_instance *cfg = &cld->cld_cfg;
- struct mgs_nidtbl_entry *entry;
- struct lustre_cfg *lcfg;
- struct lustre_cfg_bufs bufs;
- u64 prev_version = 0;
- char *inst;
- char *buf;
- int bufsz;
- int pos;
- int rc = 0;
- int off = 0;
-
- LASSERT(cfg->cfg_instance != NULL);
- LASSERT(cfg->cfg_sb == cfg->cfg_instance);
-
- inst = kzalloc(PAGE_CACHE_SIZE, GFP_NOFS);
- if (!inst)
- return -ENOMEM;
-
- pos = snprintf(inst, PAGE_CACHE_SIZE, "%p", cfg->cfg_instance);
- if (pos >= PAGE_CACHE_SIZE) {
- kfree(inst);
- return -E2BIG;
- }
-
- ++pos;
- buf = inst + pos;
- bufsz = PAGE_CACHE_SIZE - pos;
-
- while (datalen > 0) {
- int entry_len = sizeof(*entry);
- int is_ost;
- struct obd_device *obd;
- char *obdname;
- char *cname;
- char *params;
- char *uuid;
-
- rc = -EINVAL;
- if (datalen < sizeof(*entry))
- break;
-
- entry = (typeof(entry))(data + off);
-
- /* sanity check */
- if (entry->mne_nid_type != 0) /* only support type 0 for ipv4 */
- break;
- if (entry->mne_nid_count == 0) /* at least one nid entry */
- break;
- if (entry->mne_nid_size != sizeof(lnet_nid_t))
- break;
-
- entry_len += entry->mne_nid_count * entry->mne_nid_size;
- if (datalen < entry_len) /* must have entry_len at least */
- break;
-
- /* Keep this swab for normal mixed endian handling. LU-1644 */
- if (mne_swab)
- lustre_swab_mgs_nidtbl_entry(entry);
- if (entry->mne_length > PAGE_CACHE_SIZE) {
- CERROR("MNE too large (%u)\n", entry->mne_length);
- break;
- }
-
- if (entry->mne_length < entry_len)
- break;
-
- off += entry->mne_length;
- datalen -= entry->mne_length;
- if (datalen < 0)
- break;
-
- if (entry->mne_version > max_version) {
- CERROR("entry index(%lld) is over max_index(%lld)\n",
- entry->mne_version, max_version);
- break;
- }
-
- if (prev_version >= entry->mne_version) {
- CERROR("index unsorted, prev %lld, now %lld\n",
- prev_version, entry->mne_version);
- break;
- }
- prev_version = entry->mne_version;
-
- /*
- * Write a string with format "nid::instance" to
- * lustre/<osc|mdc>/<target>-<osc|mdc>-<instance>/import.
- */
-
- is_ost = entry->mne_type == LDD_F_SV_TYPE_OST;
- memset(buf, 0, bufsz);
- obdname = buf;
- pos = 0;
-
- /* lustre-OST0001-osc-<instance #> */
- strcpy(obdname, cld->cld_logname);
- cname = strrchr(obdname, '-');
- if (cname == NULL) {
- CERROR("mgc %s: invalid logname %s\n",
- mgc->obd_name, obdname);
- break;
- }
-
- pos = cname - obdname;
- obdname[pos] = 0;
- pos += sprintf(obdname + pos, "-%s%04x",
- is_ost ? "OST" : "MDT", entry->mne_index);
-
- cname = is_ost ? "osc" : "mdc";
- pos += sprintf(obdname + pos, "-%s-%s", cname, inst);
- lustre_cfg_bufs_reset(&bufs, obdname);
-
- /* find the obd by obdname */
- obd = class_name2obd(obdname);
- if (obd == NULL) {
- CDEBUG(D_INFO, "mgc %s: cannot find obdname %s\n",
- mgc->obd_name, obdname);
- rc = 0;
- /* this is a safe race, when the ost is starting up...*/
- continue;
- }
-
- /* osc.import = "connection=<Conn UUID>::<target instance>" */
- ++pos;
- params = buf + pos;
- pos += sprintf(params, "%s.import=%s", cname, "connection=");
- uuid = buf + pos;
-
- down_read(&obd->u.cli.cl_sem);
- if (obd->u.cli.cl_import == NULL) {
- /* client does not connect to the OST yet */
- up_read(&obd->u.cli.cl_sem);
- rc = 0;
- continue;
- }
-
- /* TODO: iterate all nids to find one */
- /* find uuid by nid */
- rc = client_import_find_conn(obd->u.cli.cl_import,
- entry->u.nids[0],
- (struct obd_uuid *)uuid);
- up_read(&obd->u.cli.cl_sem);
- if (rc < 0) {
- CERROR("mgc: cannot find uuid by nid %s\n",
- libcfs_nid2str(entry->u.nids[0]));
- break;
- }
-
- CDEBUG(D_INFO, "Find uuid %s by nid %s\n",
- uuid, libcfs_nid2str(entry->u.nids[0]));
-
- pos += strlen(uuid);
- pos += sprintf(buf + pos, "::%u", entry->mne_instance);
- LASSERT(pos < bufsz);
-
- lustre_cfg_bufs_set_string(&bufs, 1, params);
-
- rc = -ENOMEM;
- lcfg = lustre_cfg_new(LCFG_PARAM, &bufs);
- if (lcfg == NULL) {
- CERROR("mgc: cannot allocate memory\n");
- break;
- }
-
- CDEBUG(D_INFO, "ir apply logs %lld/%lld for %s -> %s\n",
- prev_version, max_version, obdname, params);
-
- rc = class_process_config(lcfg);
- lustre_cfg_free(lcfg);
- if (rc)
- CDEBUG(D_INFO, "process config for %s error %d\n",
- obdname, rc);
-
- /* continue, even one with error */
- }
-
- kfree(inst);
- return rc;
-}
-
-/**
- * This function is called if this client was notified for target restarting
- * by the MGS. A CONFIG_READ RPC is going to send to fetch recovery logs.
- */
-static int mgc_process_recover_log(struct obd_device *obd,
- struct config_llog_data *cld)
-{
- struct ptlrpc_request *req = NULL;
- struct config_llog_instance *cfg = &cld->cld_cfg;
- struct mgs_config_body *body;
- struct mgs_config_res *res;
- struct ptlrpc_bulk_desc *desc;
- struct page **pages;
- int nrpages;
- bool eof = true;
- bool mne_swab = false;
- int i;
- int ealen;
- int rc;
-
- /* allocate buffer for bulk transfer.
- * if this is the first time for this mgs to read logs,
- * CONFIG_READ_NRPAGES_INIT will be used since it will read all logs
- * once; otherwise, it only reads increment of logs, this should be
- * small and CONFIG_READ_NRPAGES will be used.
- */
- nrpages = CONFIG_READ_NRPAGES;
- if (cfg->cfg_last_idx == 0) /* the first time */
- nrpages = CONFIG_READ_NRPAGES_INIT;
-
- pages = kcalloc(nrpages, sizeof(*pages), GFP_NOFS);
- if (pages == NULL) {
- rc = -ENOMEM;
- goto out;
- }
-
- for (i = 0; i < nrpages; i++) {
- pages[i] = alloc_page(GFP_IOFS);
- if (pages[i] == NULL) {
- rc = -ENOMEM;
- goto out;
- }
- }
-
-again:
- LASSERT(cld_is_recover(cld));
- LASSERT(mutex_is_locked(&cld->cld_lock));
- req = ptlrpc_request_alloc(class_exp2cliimp(cld->cld_mgcexp),
- &RQF_MGS_CONFIG_READ);
- if (req == NULL) {
- rc = -ENOMEM;
- goto out;
- }
-
- rc = ptlrpc_request_pack(req, LUSTRE_MGS_VERSION, MGS_CONFIG_READ);
- if (rc)
- goto out;
-
- /* pack request */
- body = req_capsule_client_get(&req->rq_pill, &RMF_MGS_CONFIG_BODY);
- LASSERT(body != NULL);
- LASSERT(sizeof(body->mcb_name) > strlen(cld->cld_logname));
- if (strlcpy(body->mcb_name, cld->cld_logname, sizeof(body->mcb_name))
- >= sizeof(body->mcb_name)) {
- rc = -E2BIG;
- goto out;
- }
- body->mcb_offset = cfg->cfg_last_idx + 1;
- body->mcb_type = cld->cld_type;
- body->mcb_bits = PAGE_CACHE_SHIFT;
- body->mcb_units = nrpages;
-
- /* allocate bulk transfer descriptor */
- desc = ptlrpc_prep_bulk_imp(req, nrpages, 1, BULK_PUT_SINK,
- MGS_BULK_PORTAL);
- if (desc == NULL) {
- rc = -ENOMEM;
- goto out;
- }
-
- for (i = 0; i < nrpages; i++)
- ptlrpc_prep_bulk_page_pin(desc, pages[i], 0, PAGE_CACHE_SIZE);
-
- ptlrpc_request_set_replen(req);
- rc = ptlrpc_queue_wait(req);
- if (rc)
- goto out;
-
- res = req_capsule_server_get(&req->rq_pill, &RMF_MGS_CONFIG_RES);
- if (res->mcr_size < res->mcr_offset) {
- rc = -EINVAL;
- goto out;
- }
-
- /* always update the index even though it might have errors with
- * handling the recover logs */
- cfg->cfg_last_idx = res->mcr_offset;
- eof = res->mcr_offset == res->mcr_size;
-
- CDEBUG(D_INFO, "Latest version %lld, more %d.\n",
- res->mcr_offset, eof == false);
-
- ealen = sptlrpc_cli_unwrap_bulk_read(req, req->rq_bulk, 0);
- if (ealen < 0) {
- rc = ealen;
- goto out;
- }
-
- if (ealen > nrpages << PAGE_CACHE_SHIFT) {
- rc = -EINVAL;
- goto out;
- }
-
- if (ealen == 0) { /* no logs transferred */
- if (!eof)
- rc = -EINVAL;
- goto out;
- }
-
- mne_swab = !!ptlrpc_rep_need_swab(req);
-#if LUSTRE_VERSION_CODE < OBD_OCD_VERSION(3, 2, 50, 0)
- /* This import flag means the server did an extra swab of IR MNE
- * records (fixed in LU-1252), reverse it here if needed. LU-1644 */
- if (unlikely(req->rq_import->imp_need_mne_swab))
- mne_swab = !mne_swab;
-#else
-#warning "LU-1644: Remove old OBD_CONNECT_MNE_SWAB fixup and imp_need_mne_swab"
-#endif
-
- for (i = 0; i < nrpages && ealen > 0; i++) {
- int rc2;
- void *ptr;
-
- ptr = kmap(pages[i]);
- rc2 = mgc_apply_recover_logs(obd, cld, res->mcr_offset, ptr,
- min_t(int, ealen, PAGE_CACHE_SIZE),
- mne_swab);
- kunmap(pages[i]);
- if (rc2 < 0) {
- CWARN("Process recover log %s error %d\n",
- cld->cld_logname, rc2);
- break;
- }
-
- ealen -= PAGE_CACHE_SIZE;
- }
-
-out:
- if (req)
- ptlrpc_req_finished(req);
-
- if (rc == 0 && !eof)
- goto again;
-
- if (pages) {
- for (i = 0; i < nrpages; i++) {
- if (pages[i] == NULL)
- break;
- __free_page(pages[i]);
- }
- kfree(pages);
- }
- return rc;
-}
-
-/* local_only means it cannot get remote llogs */
-static int mgc_process_cfg_log(struct obd_device *mgc,
- struct config_llog_data *cld, int local_only)
-{
- struct llog_ctxt *ctxt;
- struct lustre_sb_info *lsi = NULL;
- int rc = 0;
- bool sptlrpc_started = false;
- struct lu_env *env;
-
- LASSERT(cld);
- LASSERT(mutex_is_locked(&cld->cld_lock));
-
- /*
- * local copy of sptlrpc log is controlled elsewhere, don't try to
- * read it up here.
- */
- if (cld_is_sptlrpc(cld) && local_only)
- return 0;
-
- if (cld->cld_cfg.cfg_sb)
- lsi = s2lsi(cld->cld_cfg.cfg_sb);
-
- env = kzalloc(sizeof(*env), GFP_NOFS);
- if (!env)
- return -ENOMEM;
-
- rc = lu_env_init(env, LCT_MG_THREAD);
- if (rc)
- goto out_free;
-
- ctxt = llog_get_context(mgc, LLOG_CONFIG_REPL_CTXT);
- LASSERT(ctxt);
-
- if (local_only) /* no local log at client side */ {
- rc = -EIO;
- goto out_pop;
- }
-
- if (cld_is_sptlrpc(cld)) {
- sptlrpc_conf_log_update_begin(cld->cld_logname);
- sptlrpc_started = true;
- }
-
- /* logname and instance info should be the same, so use our
- * copy of the instance for the update. The cfg_last_idx will
- * be updated here. */
- rc = class_config_parse_llog(env, ctxt, cld->cld_logname,
- &cld->cld_cfg);
-
-out_pop:
- __llog_ctxt_put(env, ctxt);
-
- /*
- * update settings on existing OBDs. doing it inside
- * of llog_process_lock so no device is attaching/detaching
- * in parallel.
- * the logname must be <fsname>-sptlrpc
- */
- if (sptlrpc_started) {
- LASSERT(cld_is_sptlrpc(cld));
- sptlrpc_conf_log_update_end(cld->cld_logname);
- class_notify_sptlrpc_conf(cld->cld_logname,
- strlen(cld->cld_logname) -
- strlen("-sptlrpc"));
- }
-
- lu_env_fini(env);
-out_free:
- kfree(env);
- return rc;
-}
-
-/** Get a config log from the MGS and process it.
- * This func is called for both clients and servers.
- * Copy the log locally before parsing it if appropriate (non-MGS server)
- */
-int mgc_process_log(struct obd_device *mgc, struct config_llog_data *cld)
-{
- struct lustre_handle lockh = { 0 };
- __u64 flags = LDLM_FL_NO_LRU;
- int rc = 0, rcl;
-
- LASSERT(cld);
-
- /* I don't want multiple processes running process_log at once --
- sounds like badness. It actually might be fine, as long as
- we're not trying to update from the same log
- simultaneously (in which case we should use a per-log sem.) */
- mutex_lock(&cld->cld_lock);
- if (cld->cld_stopping) {
- mutex_unlock(&cld->cld_lock);
- return 0;
- }
-
- OBD_FAIL_TIMEOUT(OBD_FAIL_MGC_PAUSE_PROCESS_LOG, 20);
-
- CDEBUG(D_MGC, "Process log %s:%p from %d\n", cld->cld_logname,
- cld->cld_cfg.cfg_instance, cld->cld_cfg.cfg_last_idx + 1);
-
- /* Get the cfg lock on the llog */
- rcl = mgc_enqueue(mgc->u.cli.cl_mgc_mgsexp, NULL, LDLM_PLAIN, NULL,
- LCK_CR, &flags, NULL, NULL, NULL,
- cld, 0, NULL, &lockh);
- if (rcl == 0) {
- /* Get the cld, it will be released in mgc_blocking_ast. */
- config_log_get(cld);
- rc = ldlm_lock_set_data(&lockh, (void *)cld);
- LASSERT(rc == 0);
- } else {
- CDEBUG(D_MGC, "Can't get cfg lock: %d\n", rcl);
-
- /* mark cld_lostlock so that it will requeue
- * after MGC becomes available. */
- cld->cld_lostlock = 1;
- /* Get extra reference, it will be put in requeue thread */
- config_log_get(cld);
- }
-
-
- if (cld_is_recover(cld)) {
- rc = 0; /* this is not a fatal error for recover log */
- if (rcl == 0)
- rc = mgc_process_recover_log(mgc, cld);
- } else {
- rc = mgc_process_cfg_log(mgc, cld, rcl != 0);
- }
-
- CDEBUG(D_MGC, "%s: configuration from log '%s' %sed (%d).\n",
- mgc->obd_name, cld->cld_logname, rc ? "fail" : "succeed", rc);
-
- mutex_unlock(&cld->cld_lock);
-
- /* Now drop the lock so MGS can revoke it */
- if (!rcl)
- ldlm_lock_decref(&lockh, LCK_CR);
-
- return rc;
-}
-
-
-/** Called from lustre_process_log.
- * LCFG_LOG_START gets the config log from the MGS, processes it to start
- * any services, and adds it to the list logs to watch (follow).
- */
-static int mgc_process_config(struct obd_device *obd, u32 len, void *buf)
-{
- struct lustre_cfg *lcfg = buf;
- struct config_llog_instance *cfg = NULL;
- char *logname;
- int rc = 0;
-
- switch (lcfg->lcfg_command) {
- case LCFG_LOV_ADD_OBD: {
- /* Overloading this cfg command: register a new target */
- struct mgs_target_info *mti;
-
- if (LUSTRE_CFG_BUFLEN(lcfg, 1) !=
- sizeof(struct mgs_target_info)) {
- rc = -EINVAL;
- goto out;
- }
-
- mti = (struct mgs_target_info *)lustre_cfg_buf(lcfg, 1);
- CDEBUG(D_MGC, "add_target %s %#x\n",
- mti->mti_svname, mti->mti_flags);
- rc = mgc_target_register(obd->u.cli.cl_mgc_mgsexp, mti);
- break;
- }
- case LCFG_LOV_DEL_OBD:
- /* Unregister has no meaning at the moment. */
- CERROR("lov_del_obd unimplemented\n");
- rc = -ENOSYS;
- break;
- case LCFG_SPTLRPC_CONF: {
- rc = sptlrpc_process_config(lcfg);
- break;
- }
- case LCFG_LOG_START: {
- struct config_llog_data *cld;
- struct super_block *sb;
-
- logname = lustre_cfg_string(lcfg, 1);
- cfg = (struct config_llog_instance *)lustre_cfg_buf(lcfg, 2);
- sb = *(struct super_block **)lustre_cfg_buf(lcfg, 3);
-
- CDEBUG(D_MGC, "parse_log %s from %d\n", logname,
- cfg->cfg_last_idx);
-
- /* We're only called through here on the initial mount */
- rc = config_log_add(obd, logname, cfg, sb);
- if (rc)
- break;
- cld = config_log_find(logname, cfg);
- if (cld == NULL) {
- rc = -ENOENT;
- break;
- }
-
- /* COMPAT_146 */
- /* FIXME only set this for old logs! Right now this forces
- us to always skip the "inside markers" check */
- cld->cld_cfg.cfg_flags |= CFG_F_COMPAT146;
-
- rc = mgc_process_log(obd, cld);
- if (rc == 0 && cld->cld_recover != NULL) {
- if (OCD_HAS_FLAG(&obd->u.cli.cl_import->
- imp_connect_data, IMP_RECOV)) {
- rc = mgc_process_log(obd, cld->cld_recover);
- } else {
- struct config_llog_data *cir = cld->cld_recover;
- cld->cld_recover = NULL;
- config_log_put(cir);
- }
- if (rc)
- CERROR("Cannot process recover llog %d\n", rc);
- }
-
- if (rc == 0 && cld->cld_params != NULL) {
- rc = mgc_process_log(obd, cld->cld_params);
- if (rc == -ENOENT) {
- CDEBUG(D_MGC,
- "There is no params config file yet\n");
- rc = 0;
- }
- /* params log is optional */
- if (rc)
- CERROR(
- "%s: can't process params llog: rc = %d\n",
- obd->obd_name, rc);
- }
- config_log_put(cld);
-
- break;
- }
- case LCFG_LOG_END: {
- logname = lustre_cfg_string(lcfg, 1);
-
- if (lcfg->lcfg_bufcount >= 2)
- cfg = (struct config_llog_instance *)lustre_cfg_buf(
- lcfg, 2);
- rc = config_log_end(logname, cfg);
- break;
- }
- default: {
- CERROR("Unknown command: %d\n", lcfg->lcfg_command);
- rc = -EINVAL;
- goto out;
-
- }
- }
-out:
- return rc;
-}
-
-static struct obd_ops mgc_obd_ops = {
- .o_owner = THIS_MODULE,
- .o_setup = mgc_setup,
- .o_precleanup = mgc_precleanup,
- .o_cleanup = mgc_cleanup,
- .o_add_conn = client_import_add_conn,
- .o_del_conn = client_import_del_conn,
- .o_connect = client_connect_import,
- .o_disconnect = client_disconnect_export,
- /* .o_enqueue = mgc_enqueue, */
- /* .o_iocontrol = mgc_iocontrol, */
- .o_set_info_async = mgc_set_info_async,
- .o_get_info = mgc_get_info,
- .o_import_event = mgc_import_event,
- .o_process_config = mgc_process_config,
-};
-
-static int __init mgc_init(void)
-{
- return class_register_type(&mgc_obd_ops, NULL,
- LUSTRE_MGC_NAME, NULL);
-}
-
-static void /*__exit*/ mgc_exit(void)
-{
- class_unregister_type(LUSTRE_MGC_NAME);
-}
-
-MODULE_AUTHOR("Sun Microsystems, Inc. <http://www.lustre.org/>");
-MODULE_DESCRIPTION("Lustre Management Client");
-MODULE_LICENSE("GPL");
-
-module_init(mgc_init);
-module_exit(mgc_exit);
diff --git a/drivers/staging/lustre/lustre/obdclass/Makefile b/drivers/staging/lustre/lustre/obdclass/Makefile
deleted file mode 100644
index acc685712ce9..000000000000
--- a/drivers/staging/lustre/lustre/obdclass/Makefile
+++ /dev/null
@@ -1,9 +0,0 @@
-obj-$(CONFIG_LUSTRE_FS) += obdclass.o
-
-obdclass-y := linux/linux-module.o linux/linux-obdo.o linux/linux-sysctl.o \
- llog.o llog_cat.o llog_obd.o llog_swab.o class_obd.o debug.o \
- genops.o uuid.o lprocfs_status.o \
- lustre_handles.o lustre_peer.o \
- statfs_pack.o obdo.o obd_config.o obd_mount.o \
- lu_object.o cl_object.o \
- cl_page.o cl_lock.o cl_io.o lu_ref.o acl.o lprocfs_counters.o
diff --git a/drivers/staging/lustre/lustre/obdclass/acl.c b/drivers/staging/lustre/lustre/obdclass/acl.c
deleted file mode 100644
index 1dace1419530..000000000000
--- a/drivers/staging/lustre/lustre/obdclass/acl.c
+++ /dev/null
@@ -1,425 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * lustre/obdclass/acl.c
- *
- * Lustre Access Control List.
- *
- * Author: Fan Yong <fanyong@clusterfs.com>
- */
-
-#define DEBUG_SUBSYSTEM S_SEC
-#include "../include/lu_object.h"
-#include "../include/lustre_acl.h"
-#include "../include/lustre_eacl.h"
-#include "../include/obd_support.h"
-
-#ifdef CONFIG_FS_POSIX_ACL
-
-#define CFS_ACL_XATTR_VERSION POSIX_ACL_XATTR_VERSION
-
-enum {
- ES_UNK = 0, /* unknown stat */
- ES_UNC = 1, /* ACL entry is not changed */
- ES_MOD = 2, /* ACL entry is modified */
- ES_ADD = 3, /* ACL entry is added */
- ES_DEL = 4 /* ACL entry is deleted */
-};
-
-static inline void lustre_ext_acl_le_to_cpu(ext_acl_xattr_entry *d,
- ext_acl_xattr_entry *s)
-{
- d->e_tag = le16_to_cpu(s->e_tag);
- d->e_perm = le16_to_cpu(s->e_perm);
- d->e_id = le32_to_cpu(s->e_id);
- d->e_stat = le32_to_cpu(s->e_stat);
-}
-
-static inline void lustre_ext_acl_cpu_to_le(ext_acl_xattr_entry *d,
- ext_acl_xattr_entry *s)
-{
- d->e_tag = cpu_to_le16(s->e_tag);
- d->e_perm = cpu_to_le16(s->e_perm);
- d->e_id = cpu_to_le32(s->e_id);
- d->e_stat = cpu_to_le32(s->e_stat);
-}
-
-static inline void lustre_posix_acl_le_to_cpu(posix_acl_xattr_entry *d,
- posix_acl_xattr_entry *s)
-{
- d->e_tag = le16_to_cpu(s->e_tag);
- d->e_perm = le16_to_cpu(s->e_perm);
- d->e_id = le32_to_cpu(s->e_id);
-}
-
-static inline void lustre_posix_acl_cpu_to_le(posix_acl_xattr_entry *d,
- posix_acl_xattr_entry *s)
-{
- d->e_tag = cpu_to_le16(s->e_tag);
- d->e_perm = cpu_to_le16(s->e_perm);
- d->e_id = cpu_to_le32(s->e_id);
-}
-
-
-/* if "new_count == 0", then "new = {a_version, NULL}", NOT NULL. */
-static int lustre_posix_acl_xattr_reduce_space(posix_acl_xattr_header **header,
- int old_count, int new_count)
-{
- int old_size = CFS_ACL_XATTR_SIZE(old_count, posix_acl_xattr);
- int new_size = CFS_ACL_XATTR_SIZE(new_count, posix_acl_xattr);
- posix_acl_xattr_header *new;
-
- if (unlikely(old_count <= new_count))
- return old_size;
-
- new = kmemdup(*header, new_size, GFP_NOFS);
- if (unlikely(new == NULL))
- return -ENOMEM;
-
- kfree(*header);
- *header = new;
- return new_size;
-}
-
-/* if "new_count == 0", then "new = {0, NULL}", NOT NULL. */
-static int lustre_ext_acl_xattr_reduce_space(ext_acl_xattr_header **header,
- int old_count)
-{
- int ext_count = le32_to_cpu((*header)->a_count);
- int ext_size = CFS_ACL_XATTR_SIZE(ext_count, ext_acl_xattr);
- ext_acl_xattr_header *new;
-
- if (unlikely(old_count <= ext_count))
- return 0;
-
- new = kmemdup(*header, ext_size, GFP_NOFS);
- if (unlikely(new == NULL))
- return -ENOMEM;
-
- kfree(*header);
- *header = new;
- return 0;
-}
-
-/*
- * Generate new extended ACL based on the posix ACL.
- */
-ext_acl_xattr_header *
-lustre_posix_acl_xattr_2ext(posix_acl_xattr_header *header, int size)
-{
- int count, i, esize;
- ext_acl_xattr_header *new;
-
- if (unlikely(size < 0))
- return ERR_PTR(-EINVAL);
- else if (!size)
- count = 0;
- else
- count = CFS_ACL_XATTR_COUNT(size, posix_acl_xattr);
- esize = CFS_ACL_XATTR_SIZE(count, ext_acl_xattr);
- new = kzalloc(esize, GFP_NOFS);
- if (unlikely(new == NULL))
- return ERR_PTR(-ENOMEM);
-
- new->a_count = cpu_to_le32(count);
- for (i = 0; i < count; i++) {
- new->a_entries[i].e_tag = header->a_entries[i].e_tag;
- new->a_entries[i].e_perm = header->a_entries[i].e_perm;
- new->a_entries[i].e_id = header->a_entries[i].e_id;
- new->a_entries[i].e_stat = cpu_to_le32(ES_UNK);
- }
-
- return new;
-}
-EXPORT_SYMBOL(lustre_posix_acl_xattr_2ext);
-
-/*
- * Filter out the "nobody" entries in the posix ACL.
- */
-int lustre_posix_acl_xattr_filter(posix_acl_xattr_header *header, size_t size,
- posix_acl_xattr_header **out)
-{
- int count, i, j, rc = 0;
- __u32 id;
- posix_acl_xattr_header *new;
-
- if (!size)
- return 0;
- if (size < sizeof(*new))
- return -EINVAL;
-
- new = kzalloc(size, GFP_NOFS);
- if (unlikely(new == NULL))
- return -ENOMEM;
-
- new->a_version = cpu_to_le32(CFS_ACL_XATTR_VERSION);
- count = CFS_ACL_XATTR_COUNT(size, posix_acl_xattr);
- for (i = 0, j = 0; i < count; i++) {
- id = le32_to_cpu(header->a_entries[i].e_id);
- switch (le16_to_cpu(header->a_entries[i].e_tag)) {
- case ACL_USER_OBJ:
- case ACL_GROUP_OBJ:
- case ACL_MASK:
- case ACL_OTHER:
- if (id != ACL_UNDEFINED_ID) {
- rc = -EIO;
- goto _out;
- }
-
- memcpy(&new->a_entries[j++], &header->a_entries[i],
- sizeof(posix_acl_xattr_entry));
- break;
- case ACL_USER:
- if (id != NOBODY_UID)
- memcpy(&new->a_entries[j++],
- &header->a_entries[i],
- sizeof(posix_acl_xattr_entry));
- break;
- case ACL_GROUP:
- if (id != NOBODY_GID)
- memcpy(&new->a_entries[j++],
- &header->a_entries[i],
- sizeof(posix_acl_xattr_entry));
- break;
- default:
- rc = -EIO;
- goto _out;
- }
- }
-
- /* free unused space. */
- rc = lustre_posix_acl_xattr_reduce_space(&new, count, j);
- if (rc >= 0) {
- size = rc;
- *out = new;
- rc = 0;
- }
-
-_out:
- if (rc) {
- kfree(new);
- size = rc;
- }
- return size;
-}
-EXPORT_SYMBOL(lustre_posix_acl_xattr_filter);
-
-/*
- * Release the posix ACL space.
- */
-void lustre_posix_acl_xattr_free(posix_acl_xattr_header *header, int size)
-{
- kfree(header);
-}
-EXPORT_SYMBOL(lustre_posix_acl_xattr_free);
-
-/*
- * Release the extended ACL space.
- */
-void lustre_ext_acl_xattr_free(ext_acl_xattr_header *header)
-{
- kfree(header);
-}
-EXPORT_SYMBOL(lustre_ext_acl_xattr_free);
-
-static ext_acl_xattr_entry *
-lustre_ext_acl_xattr_search(ext_acl_xattr_header *header,
- posix_acl_xattr_entry *entry, int *pos)
-{
- int once, start, end, i, j, count = le32_to_cpu(header->a_count);
-
- once = 0;
- start = *pos;
- end = count;
-
-again:
- for (i = start; i < end; i++) {
- if (header->a_entries[i].e_tag == entry->e_tag &&
- header->a_entries[i].e_id == entry->e_id) {
- j = i;
- if (++i >= count)
- i = 0;
- *pos = i;
- return &header->a_entries[j];
- }
- }
-
- if (!once) {
- once = 1;
- start = 0;
- end = *pos;
- goto again;
- }
-
- return NULL;
-}
-
-/*
- * Merge the posix ACL and the extended ACL into new extended ACL.
- */
-ext_acl_xattr_header *
-lustre_acl_xattr_merge2ext(posix_acl_xattr_header *posix_header, int size,
- ext_acl_xattr_header *ext_header)
-{
- int ori_ext_count, posix_count, ext_count, ext_size;
- int i, j, pos = 0, rc = 0;
- posix_acl_xattr_entry pae;
- ext_acl_xattr_header *new;
- ext_acl_xattr_entry *ee, eae;
-
- if (unlikely(size < 0))
- return ERR_PTR(-EINVAL);
- else if (!size)
- posix_count = 0;
- else
- posix_count = CFS_ACL_XATTR_COUNT(size, posix_acl_xattr);
- ori_ext_count = le32_to_cpu(ext_header->a_count);
- ext_count = posix_count + ori_ext_count;
- ext_size = CFS_ACL_XATTR_SIZE(ext_count, ext_acl_xattr);
-
- new = kzalloc(ext_size, GFP_NOFS);
- if (unlikely(new == NULL))
- return ERR_PTR(-ENOMEM);
-
- for (i = 0, j = 0; i < posix_count; i++) {
- lustre_posix_acl_le_to_cpu(&pae, &posix_header->a_entries[i]);
- switch (pae.e_tag) {
- case ACL_USER_OBJ:
- case ACL_GROUP_OBJ:
- case ACL_MASK:
- case ACL_OTHER:
- if (pae.e_id != ACL_UNDEFINED_ID) {
- rc = -EIO;
- goto out;
- }
- case ACL_USER:
- /* ignore "nobody" entry. */
- if (pae.e_id == NOBODY_UID)
- break;
-
- new->a_entries[j].e_tag =
- posix_header->a_entries[i].e_tag;
- new->a_entries[j].e_perm =
- posix_header->a_entries[i].e_perm;
- new->a_entries[j].e_id =
- posix_header->a_entries[i].e_id;
- ee = lustre_ext_acl_xattr_search(ext_header,
- &posix_header->a_entries[i], &pos);
- if (ee) {
- if (posix_header->a_entries[i].e_perm !=
- ee->e_perm)
- /* entry modified. */
- ee->e_stat =
- new->a_entries[j++].e_stat =
- cpu_to_le32(ES_MOD);
- else
- /* entry unchanged. */
- ee->e_stat =
- new->a_entries[j++].e_stat =
- cpu_to_le32(ES_UNC);
- } else {
- /* new entry. */
- new->a_entries[j++].e_stat =
- cpu_to_le32(ES_ADD);
- }
- break;
- case ACL_GROUP:
- /* ignore "nobody" entry. */
- if (pae.e_id == NOBODY_GID)
- break;
- new->a_entries[j].e_tag =
- posix_header->a_entries[i].e_tag;
- new->a_entries[j].e_perm =
- posix_header->a_entries[i].e_perm;
- new->a_entries[j].e_id =
- posix_header->a_entries[i].e_id;
- ee = lustre_ext_acl_xattr_search(ext_header,
- &posix_header->a_entries[i], &pos);
- if (ee) {
- if (posix_header->a_entries[i].e_perm !=
- ee->e_perm)
- /* entry modified. */
- ee->e_stat =
- new->a_entries[j++].e_stat =
- cpu_to_le32(ES_MOD);
- else
- /* entry unchanged. */
- ee->e_stat =
- new->a_entries[j++].e_stat =
- cpu_to_le32(ES_UNC);
- } else {
- /* new entry. */
- new->a_entries[j++].e_stat =
- cpu_to_le32(ES_ADD);
- }
- break;
- default:
- rc = -EIO;
- goto out;
- }
- }
-
- /* process deleted entries. */
- for (i = 0; i < ori_ext_count; i++) {
- lustre_ext_acl_le_to_cpu(&eae, &ext_header->a_entries[i]);
- if (eae.e_stat == ES_UNK) {
- /* ignore "nobody" entry. */
- if ((eae.e_tag == ACL_USER && eae.e_id == NOBODY_UID) ||
- (eae.e_tag == ACL_GROUP && eae.e_id == NOBODY_GID))
- continue;
-
- new->a_entries[j].e_tag =
- ext_header->a_entries[i].e_tag;
- new->a_entries[j].e_perm =
- ext_header->a_entries[i].e_perm;
- new->a_entries[j].e_id = ext_header->a_entries[i].e_id;
- new->a_entries[j++].e_stat = cpu_to_le32(ES_DEL);
- }
- }
-
- new->a_count = cpu_to_le32(j);
- /* free unused space. */
- rc = lustre_ext_acl_xattr_reduce_space(&new, ext_count);
-
-out:
- if (rc) {
- kfree(new);
- new = ERR_PTR(rc);
- }
- return new;
-}
-EXPORT_SYMBOL(lustre_acl_xattr_merge2ext);
-
-#endif
diff --git a/drivers/staging/lustre/lustre/obdclass/cl_internal.h b/drivers/staging/lustre/lustre/obdclass/cl_internal.h
deleted file mode 100644
index 7eb0ad7b3644..000000000000
--- a/drivers/staging/lustre/lustre/obdclass/cl_internal.h
+++ /dev/null
@@ -1,121 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * Internal cl interfaces.
- *
- * Author: Nikita Danilov <nikita.danilov@sun.com>
- */
-#ifndef _CL_INTERNAL_H
-#define _CL_INTERNAL_H
-
-#define CLT_PVEC_SIZE (14)
-
-/**
- * Possible levels of the nesting. Currently this is 2: there are "top"
- * entities (files, extent locks), and "sub" entities (stripes and stripe
- * locks). This is used only for debugging counters right now.
- */
-enum clt_nesting_level {
- CNL_TOP,
- CNL_SUB,
- CNL_NR
-};
-
-/**
- * Counters used to check correctness of cl_lock interface usage.
- */
-struct cl_thread_counters {
- /**
- * Number of outstanding calls to cl_lock_mutex_get() made by the
- * current thread. For debugging.
- */
- int ctc_nr_locks_locked;
- /** List of locked locks. */
- struct lu_ref ctc_locks_locked;
- /** Number of outstanding holds on locks. */
- int ctc_nr_held;
- /** Number of outstanding uses on locks. */
- int ctc_nr_used;
- /** Number of held extent locks. */
- int ctc_nr_locks_acquired;
-};
-
-/**
- * Thread local state internal for generic cl-code.
- */
-struct cl_thread_info {
- /*
- * Common fields.
- */
- struct cl_io clt_io;
- struct cl_2queue clt_queue;
-
- /*
- * Fields used by cl_lock.c
- */
- struct cl_lock_descr clt_descr;
- struct cl_page_list clt_list;
- /**
- * Counters for every level of lock nesting.
- */
- struct cl_thread_counters clt_counters[CNL_NR];
- /** @} debugging */
-
- /*
- * Fields used by cl_page.c
- */
- struct cl_page *clt_pvec[CLT_PVEC_SIZE];
-
- /*
- * Fields used by cl_io.c
- */
- /**
- * Pointer to the topmost ongoing IO in this thread.
- */
- struct cl_io *clt_current_io;
- /**
- * Used for submitting a sync io.
- */
- struct cl_sync_io clt_anchor;
- /**
- * Fields used by cl_lock_discard_pages().
- */
- pgoff_t clt_next_index;
- pgoff_t clt_fn_index; /* first non-overlapped index */
-};
-
-struct cl_thread_info *cl_env_info(const struct lu_env *env);
-
-#endif /* _CL_INTERNAL_H */
diff --git a/drivers/staging/lustre/lustre/obdclass/cl_io.c b/drivers/staging/lustre/lustre/obdclass/cl_io.c
deleted file mode 100644
index a822a0b3464e..000000000000
--- a/drivers/staging/lustre/lustre/obdclass/cl_io.c
+++ /dev/null
@@ -1,1605 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * Client IO.
- *
- * Author: Nikita Danilov <nikita.danilov@sun.com>
- */
-
-#define DEBUG_SUBSYSTEM S_CLASS
-
-#include "../include/obd_class.h"
-#include "../include/obd_support.h"
-#include "../include/lustre_fid.h"
-#include <linux/list.h>
-#include "../include/cl_object.h"
-#include "cl_internal.h"
-
-/*****************************************************************************
- *
- * cl_io interface.
- *
- */
-
-#define cl_io_for_each(slice, io) \
- list_for_each_entry((slice), &io->ci_layers, cis_linkage)
-#define cl_io_for_each_reverse(slice, io) \
- list_for_each_entry_reverse((slice), &io->ci_layers, cis_linkage)
-
-static inline int cl_io_type_is_valid(enum cl_io_type type)
-{
- return CIT_READ <= type && type < CIT_OP_NR;
-}
-
-static inline int cl_io_is_loopable(const struct cl_io *io)
-{
- return cl_io_type_is_valid(io->ci_type) && io->ci_type != CIT_MISC;
-}
-
-/**
- * Returns true iff there is an IO ongoing in the given environment.
- */
-int cl_io_is_going(const struct lu_env *env)
-{
- return cl_env_info(env)->clt_current_io != NULL;
-}
-EXPORT_SYMBOL(cl_io_is_going);
-
-/**
- * cl_io invariant that holds at all times when exported cl_io_*() functions
- * are entered and left.
- */
-static int cl_io_invariant(const struct cl_io *io)
-{
- struct cl_io *up;
-
- up = io->ci_parent;
- return
- /*
- * io can own pages only when it is ongoing. Sub-io might
- * still be in CIS_LOCKED state when top-io is in
- * CIS_IO_GOING.
- */
- ergo(io->ci_owned_nr > 0, io->ci_state == CIS_IO_GOING ||
- (io->ci_state == CIS_LOCKED && up != NULL));
-}
-
-/**
- * Finalize \a io, by calling cl_io_operations::cio_fini() bottom-to-top.
- */
-void cl_io_fini(const struct lu_env *env, struct cl_io *io)
-{
- struct cl_io_slice *slice;
- struct cl_thread_info *info;
-
- LINVRNT(cl_io_type_is_valid(io->ci_type));
- LINVRNT(cl_io_invariant(io));
-
- while (!list_empty(&io->ci_layers)) {
- slice = container_of(io->ci_layers.prev, struct cl_io_slice,
- cis_linkage);
- list_del_init(&slice->cis_linkage);
- if (slice->cis_iop->op[io->ci_type].cio_fini != NULL)
- slice->cis_iop->op[io->ci_type].cio_fini(env, slice);
- /*
- * Invalidate slice to catch use after free. This assumes that
- * slices are allocated within session and can be touched
- * after ->cio_fini() returns.
- */
- slice->cis_io = NULL;
- }
- io->ci_state = CIS_FINI;
- info = cl_env_info(env);
- if (info->clt_current_io == io)
- info->clt_current_io = NULL;
-
- /* sanity check for layout change */
- switch (io->ci_type) {
- case CIT_READ:
- case CIT_WRITE:
- break;
- case CIT_FAULT:
- case CIT_FSYNC:
- LASSERT(!io->ci_need_restart);
- break;
- case CIT_SETATTR:
- case CIT_MISC:
- /* Check ignore layout change conf */
- LASSERT(ergo(io->ci_ignore_layout || !io->ci_verify_layout,
- !io->ci_need_restart));
- break;
- default:
- LBUG();
- }
-}
-EXPORT_SYMBOL(cl_io_fini);
-
-static int cl_io_init0(const struct lu_env *env, struct cl_io *io,
- enum cl_io_type iot, struct cl_object *obj)
-{
- struct cl_object *scan;
- int result;
-
- LINVRNT(io->ci_state == CIS_ZERO || io->ci_state == CIS_FINI);
- LINVRNT(cl_io_type_is_valid(iot));
- LINVRNT(cl_io_invariant(io));
-
- io->ci_type = iot;
- INIT_LIST_HEAD(&io->ci_lockset.cls_todo);
- INIT_LIST_HEAD(&io->ci_lockset.cls_curr);
- INIT_LIST_HEAD(&io->ci_lockset.cls_done);
- INIT_LIST_HEAD(&io->ci_layers);
-
- result = 0;
- cl_object_for_each(scan, obj) {
- if (scan->co_ops->coo_io_init != NULL) {
- result = scan->co_ops->coo_io_init(env, scan, io);
- if (result != 0)
- break;
- }
- }
- if (result == 0)
- io->ci_state = CIS_INIT;
- return result;
-}
-
-/**
- * Initialize sub-io, by calling cl_io_operations::cio_init() top-to-bottom.
- *
- * \pre obj != cl_object_top(obj)
- */
-int cl_io_sub_init(const struct lu_env *env, struct cl_io *io,
- enum cl_io_type iot, struct cl_object *obj)
-{
- struct cl_thread_info *info = cl_env_info(env);
-
- LASSERT(obj != cl_object_top(obj));
- if (info->clt_current_io == NULL)
- info->clt_current_io = io;
- return cl_io_init0(env, io, iot, obj);
-}
-EXPORT_SYMBOL(cl_io_sub_init);
-
-/**
- * Initialize \a io, by calling cl_io_operations::cio_init() top-to-bottom.
- *
- * Caller has to call cl_io_fini() after a call to cl_io_init(), no matter
- * what the latter returned.
- *
- * \pre obj == cl_object_top(obj)
- * \pre cl_io_type_is_valid(iot)
- * \post cl_io_type_is_valid(io->ci_type) && io->ci_type == iot
- */
-int cl_io_init(const struct lu_env *env, struct cl_io *io,
- enum cl_io_type iot, struct cl_object *obj)
-{
- struct cl_thread_info *info = cl_env_info(env);
-
- LASSERT(obj == cl_object_top(obj));
- LASSERT(info->clt_current_io == NULL);
-
- info->clt_current_io = io;
- return cl_io_init0(env, io, iot, obj);
-}
-EXPORT_SYMBOL(cl_io_init);
-
-/**
- * Initialize read or write io.
- *
- * \pre iot == CIT_READ || iot == CIT_WRITE
- */
-int cl_io_rw_init(const struct lu_env *env, struct cl_io *io,
- enum cl_io_type iot, loff_t pos, size_t count)
-{
- LINVRNT(iot == CIT_READ || iot == CIT_WRITE);
- LINVRNT(io->ci_obj != NULL);
-
- LU_OBJECT_HEADER(D_VFSTRACE, env, &io->ci_obj->co_lu,
- "io range: %u [%llu, %llu) %u %u\n",
- iot, (__u64)pos, (__u64)pos + count,
- io->u.ci_rw.crw_nonblock, io->u.ci_wr.wr_append);
- io->u.ci_rw.crw_pos = pos;
- io->u.ci_rw.crw_count = count;
- return cl_io_init(env, io, iot, io->ci_obj);
-}
-EXPORT_SYMBOL(cl_io_rw_init);
-
-static inline const struct lu_fid *
-cl_lock_descr_fid(const struct cl_lock_descr *descr)
-{
- return lu_object_fid(&descr->cld_obj->co_lu);
-}
-
-static int cl_lock_descr_sort(const struct cl_lock_descr *d0,
- const struct cl_lock_descr *d1)
-{
- return lu_fid_cmp(cl_lock_descr_fid(d0), cl_lock_descr_fid(d1)) ?:
- __diff_normalize(d0->cld_start, d1->cld_start);
-}
-
-static int cl_lock_descr_cmp(const struct cl_lock_descr *d0,
- const struct cl_lock_descr *d1)
-{
- int ret;
-
- ret = lu_fid_cmp(cl_lock_descr_fid(d0), cl_lock_descr_fid(d1));
- if (ret)
- return ret;
- if (d0->cld_end < d1->cld_start)
- return -1;
- if (d0->cld_start > d0->cld_end)
- return 1;
- return 0;
-}
-
-static void cl_lock_descr_merge(struct cl_lock_descr *d0,
- const struct cl_lock_descr *d1)
-{
- d0->cld_start = min(d0->cld_start, d1->cld_start);
- d0->cld_end = max(d0->cld_end, d1->cld_end);
-
- if (d1->cld_mode == CLM_WRITE && d0->cld_mode != CLM_WRITE)
- d0->cld_mode = CLM_WRITE;
-
- if (d1->cld_mode == CLM_GROUP && d0->cld_mode != CLM_GROUP)
- d0->cld_mode = CLM_GROUP;
-}
-
-/*
- * Sort locks in lexicographical order of their (fid, start-offset) pairs.
- */
-static void cl_io_locks_sort(struct cl_io *io)
-{
- int done = 0;
-
- /* hidden treasure: bubble sort for now. */
- do {
- struct cl_io_lock_link *curr;
- struct cl_io_lock_link *prev;
- struct cl_io_lock_link *temp;
-
- done = 1;
- prev = NULL;
-
- list_for_each_entry_safe(curr, temp,
- &io->ci_lockset.cls_todo,
- cill_linkage) {
- if (prev != NULL) {
- switch (cl_lock_descr_sort(&prev->cill_descr,
- &curr->cill_descr)) {
- case 0:
- /*
- * IMPOSSIBLE: Identical locks are
- * already removed at
- * this point.
- */
- default:
- LBUG();
- case 1:
- list_move_tail(&curr->cill_linkage,
- &prev->cill_linkage);
- done = 0;
- continue; /* don't change prev: it's
- * still "previous" */
- case -1: /* already in order */
- break;
- }
- }
- prev = curr;
- }
- } while (!done);
-}
-
-/**
- * Check whether \a queue contains locks matching \a need.
- *
- * \retval +ve there is a matching lock in the \a queue
- * \retval 0 there are no matching locks in the \a queue
- */
-int cl_queue_match(const struct list_head *queue,
- const struct cl_lock_descr *need)
-{
- struct cl_io_lock_link *scan;
-
- list_for_each_entry(scan, queue, cill_linkage) {
- if (cl_lock_descr_match(&scan->cill_descr, need))
- return 1;
- }
- return 0;
-}
-EXPORT_SYMBOL(cl_queue_match);
-
-static int cl_queue_merge(const struct list_head *queue,
- const struct cl_lock_descr *need)
-{
- struct cl_io_lock_link *scan;
-
- list_for_each_entry(scan, queue, cill_linkage) {
- if (cl_lock_descr_cmp(&scan->cill_descr, need))
- continue;
- cl_lock_descr_merge(&scan->cill_descr, need);
- CDEBUG(D_VFSTRACE, "lock: %d: [%lu, %lu]\n",
- scan->cill_descr.cld_mode, scan->cill_descr.cld_start,
- scan->cill_descr.cld_end);
- return 1;
- }
- return 0;
-
-}
-
-static int cl_lockset_match(const struct cl_lockset *set,
- const struct cl_lock_descr *need)
-{
- return cl_queue_match(&set->cls_curr, need) ||
- cl_queue_match(&set->cls_done, need);
-}
-
-static int cl_lockset_merge(const struct cl_lockset *set,
- const struct cl_lock_descr *need)
-{
- return cl_queue_merge(&set->cls_todo, need) ||
- cl_lockset_match(set, need);
-}
-
-static int cl_lockset_lock_one(const struct lu_env *env,
- struct cl_io *io, struct cl_lockset *set,
- struct cl_io_lock_link *link)
-{
- struct cl_lock *lock;
- int result;
-
- lock = cl_lock_request(env, io, &link->cill_descr, "io", io);
-
- if (!IS_ERR(lock)) {
- link->cill_lock = lock;
- list_move(&link->cill_linkage, &set->cls_curr);
- if (!(link->cill_descr.cld_enq_flags & CEF_ASYNC)) {
- result = cl_wait(env, lock);
- if (result == 0)
- list_move(&link->cill_linkage,
- &set->cls_done);
- } else
- result = 0;
- } else
- result = PTR_ERR(lock);
- return result;
-}
-
-static void cl_lock_link_fini(const struct lu_env *env, struct cl_io *io,
- struct cl_io_lock_link *link)
-{
- struct cl_lock *lock = link->cill_lock;
-
- list_del_init(&link->cill_linkage);
- if (lock != NULL) {
- cl_lock_release(env, lock, "io", io);
- link->cill_lock = NULL;
- }
- if (link->cill_fini != NULL)
- link->cill_fini(env, link);
-}
-
-static int cl_lockset_lock(const struct lu_env *env, struct cl_io *io,
- struct cl_lockset *set)
-{
- struct cl_io_lock_link *link;
- struct cl_io_lock_link *temp;
- struct cl_lock *lock;
- int result;
-
- result = 0;
- list_for_each_entry_safe(link, temp, &set->cls_todo, cill_linkage) {
- if (!cl_lockset_match(set, &link->cill_descr)) {
- /* XXX some locking to guarantee that locks aren't
- * expanded in between. */
- result = cl_lockset_lock_one(env, io, set, link);
- if (result != 0)
- break;
- } else
- cl_lock_link_fini(env, io, link);
- }
- if (result == 0) {
- list_for_each_entry_safe(link, temp,
- &set->cls_curr, cill_linkage) {
- lock = link->cill_lock;
- result = cl_wait(env, lock);
- if (result == 0)
- list_move(&link->cill_linkage,
- &set->cls_done);
- else
- break;
- }
- }
- return result;
-}
-
-/**
- * Takes locks necessary for the current iteration of io.
- *
- * Calls cl_io_operations::cio_lock() top-to-bottom to collect locks required
- * by layers for the current iteration. Then sort locks (to avoid dead-locks),
- * and acquire them.
- */
-int cl_io_lock(const struct lu_env *env, struct cl_io *io)
-{
- const struct cl_io_slice *scan;
- int result = 0;
-
- LINVRNT(cl_io_is_loopable(io));
- LINVRNT(io->ci_state == CIS_IT_STARTED);
- LINVRNT(cl_io_invariant(io));
-
- cl_io_for_each(scan, io) {
- if (scan->cis_iop->op[io->ci_type].cio_lock == NULL)
- continue;
- result = scan->cis_iop->op[io->ci_type].cio_lock(env, scan);
- if (result != 0)
- break;
- }
- if (result == 0) {
- cl_io_locks_sort(io);
- result = cl_lockset_lock(env, io, &io->ci_lockset);
- }
- if (result != 0)
- cl_io_unlock(env, io);
- else
- io->ci_state = CIS_LOCKED;
- return result;
-}
-EXPORT_SYMBOL(cl_io_lock);
-
-/**
- * Release locks takes by io.
- */
-void cl_io_unlock(const struct lu_env *env, struct cl_io *io)
-{
- struct cl_lockset *set;
- struct cl_io_lock_link *link;
- struct cl_io_lock_link *temp;
- const struct cl_io_slice *scan;
-
- LASSERT(cl_io_is_loopable(io));
- LASSERT(CIS_IT_STARTED <= io->ci_state && io->ci_state < CIS_UNLOCKED);
- LINVRNT(cl_io_invariant(io));
-
- set = &io->ci_lockset;
-
- list_for_each_entry_safe(link, temp, &set->cls_todo, cill_linkage)
- cl_lock_link_fini(env, io, link);
-
- list_for_each_entry_safe(link, temp, &set->cls_curr, cill_linkage)
- cl_lock_link_fini(env, io, link);
-
- list_for_each_entry_safe(link, temp, &set->cls_done, cill_linkage) {
- cl_unuse(env, link->cill_lock);
- cl_lock_link_fini(env, io, link);
- }
- cl_io_for_each_reverse(scan, io) {
- if (scan->cis_iop->op[io->ci_type].cio_unlock != NULL)
- scan->cis_iop->op[io->ci_type].cio_unlock(env, scan);
- }
- io->ci_state = CIS_UNLOCKED;
- LASSERT(!cl_env_info(env)->clt_counters[CNL_TOP].ctc_nr_locks_acquired);
-}
-EXPORT_SYMBOL(cl_io_unlock);
-
-/**
- * Prepares next iteration of io.
- *
- * Calls cl_io_operations::cio_iter_init() top-to-bottom. This exists to give
- * layers a chance to modify io parameters, e.g., so that lov can restrict io
- * to a single stripe.
- */
-int cl_io_iter_init(const struct lu_env *env, struct cl_io *io)
-{
- const struct cl_io_slice *scan;
- int result;
-
- LINVRNT(cl_io_is_loopable(io));
- LINVRNT(io->ci_state == CIS_INIT || io->ci_state == CIS_IT_ENDED);
- LINVRNT(cl_io_invariant(io));
-
- result = 0;
- cl_io_for_each(scan, io) {
- if (scan->cis_iop->op[io->ci_type].cio_iter_init == NULL)
- continue;
- result = scan->cis_iop->op[io->ci_type].cio_iter_init(env,
- scan);
- if (result != 0)
- break;
- }
- if (result == 0)
- io->ci_state = CIS_IT_STARTED;
- return result;
-}
-EXPORT_SYMBOL(cl_io_iter_init);
-
-/**
- * Finalizes io iteration.
- *
- * Calls cl_io_operations::cio_iter_fini() bottom-to-top.
- */
-void cl_io_iter_fini(const struct lu_env *env, struct cl_io *io)
-{
- const struct cl_io_slice *scan;
-
- LINVRNT(cl_io_is_loopable(io));
- LINVRNT(io->ci_state == CIS_UNLOCKED);
- LINVRNT(cl_io_invariant(io));
-
- cl_io_for_each_reverse(scan, io) {
- if (scan->cis_iop->op[io->ci_type].cio_iter_fini != NULL)
- scan->cis_iop->op[io->ci_type].cio_iter_fini(env, scan);
- }
- io->ci_state = CIS_IT_ENDED;
-}
-EXPORT_SYMBOL(cl_io_iter_fini);
-
-/**
- * Records that read or write io progressed \a nob bytes forward.
- */
-void cl_io_rw_advance(const struct lu_env *env, struct cl_io *io, size_t nob)
-{
- const struct cl_io_slice *scan;
-
- LINVRNT(io->ci_type == CIT_READ || io->ci_type == CIT_WRITE ||
- nob == 0);
- LINVRNT(cl_io_is_loopable(io));
- LINVRNT(cl_io_invariant(io));
-
- io->u.ci_rw.crw_pos += nob;
- io->u.ci_rw.crw_count -= nob;
-
- /* layers have to be notified. */
- cl_io_for_each_reverse(scan, io) {
- if (scan->cis_iop->op[io->ci_type].cio_advance != NULL)
- scan->cis_iop->op[io->ci_type].cio_advance(env, scan,
- nob);
- }
-}
-EXPORT_SYMBOL(cl_io_rw_advance);
-
-/**
- * Adds a lock to a lockset.
- */
-int cl_io_lock_add(const struct lu_env *env, struct cl_io *io,
- struct cl_io_lock_link *link)
-{
- int result;
-
- if (cl_lockset_merge(&io->ci_lockset, &link->cill_descr))
- result = 1;
- else {
- list_add(&link->cill_linkage, &io->ci_lockset.cls_todo);
- result = 0;
- }
- return result;
-}
-EXPORT_SYMBOL(cl_io_lock_add);
-
-static void cl_free_io_lock_link(const struct lu_env *env,
- struct cl_io_lock_link *link)
-{
- kfree(link);
-}
-
-/**
- * Allocates new lock link, and uses it to add a lock to a lockset.
- */
-int cl_io_lock_alloc_add(const struct lu_env *env, struct cl_io *io,
- struct cl_lock_descr *descr)
-{
- struct cl_io_lock_link *link;
- int result;
-
- link = kzalloc(sizeof(*link), GFP_NOFS);
- if (link != NULL) {
- link->cill_descr = *descr;
- link->cill_fini = cl_free_io_lock_link;
- result = cl_io_lock_add(env, io, link);
- if (result) /* lock match */
- link->cill_fini(env, link);
- } else
- result = -ENOMEM;
-
- return result;
-}
-EXPORT_SYMBOL(cl_io_lock_alloc_add);
-
-/**
- * Starts io by calling cl_io_operations::cio_start() top-to-bottom.
- */
-int cl_io_start(const struct lu_env *env, struct cl_io *io)
-{
- const struct cl_io_slice *scan;
- int result = 0;
-
- LINVRNT(cl_io_is_loopable(io));
- LINVRNT(io->ci_state == CIS_LOCKED);
- LINVRNT(cl_io_invariant(io));
-
- io->ci_state = CIS_IO_GOING;
- cl_io_for_each(scan, io) {
- if (scan->cis_iop->op[io->ci_type].cio_start == NULL)
- continue;
- result = scan->cis_iop->op[io->ci_type].cio_start(env, scan);
- if (result != 0)
- break;
- }
- if (result >= 0)
- result = 0;
- return result;
-}
-EXPORT_SYMBOL(cl_io_start);
-
-/**
- * Wait until current io iteration is finished by calling
- * cl_io_operations::cio_end() bottom-to-top.
- */
-void cl_io_end(const struct lu_env *env, struct cl_io *io)
-{
- const struct cl_io_slice *scan;
-
- LINVRNT(cl_io_is_loopable(io));
- LINVRNT(io->ci_state == CIS_IO_GOING);
- LINVRNT(cl_io_invariant(io));
-
- cl_io_for_each_reverse(scan, io) {
- if (scan->cis_iop->op[io->ci_type].cio_end != NULL)
- scan->cis_iop->op[io->ci_type].cio_end(env, scan);
- /* TODO: error handling. */
- }
- io->ci_state = CIS_IO_FINISHED;
-}
-EXPORT_SYMBOL(cl_io_end);
-
-static const struct cl_page_slice *
-cl_io_slice_page(const struct cl_io_slice *ios, struct cl_page *page)
-{
- const struct cl_page_slice *slice;
-
- slice = cl_page_at(page, ios->cis_obj->co_lu.lo_dev->ld_type);
- LINVRNT(slice != NULL);
- return slice;
-}
-
-/**
- * True iff \a page is within \a io range.
- */
-static int cl_page_in_io(const struct cl_page *page, const struct cl_io *io)
-{
- int result = 1;
- loff_t start;
- loff_t end;
- pgoff_t idx;
-
- idx = page->cp_index;
- switch (io->ci_type) {
- case CIT_READ:
- case CIT_WRITE:
- /*
- * check that [start, end) and [pos, pos + count) extents
- * overlap.
- */
- if (!cl_io_is_append(io)) {
- const struct cl_io_rw_common *crw = &(io->u.ci_rw);
- start = cl_offset(page->cp_obj, idx);
- end = cl_offset(page->cp_obj, idx + 1);
- result = crw->crw_pos < end &&
- start < crw->crw_pos + crw->crw_count;
- }
- break;
- case CIT_FAULT:
- result = io->u.ci_fault.ft_index == idx;
- break;
- default:
- LBUG();
- }
- return result;
-}
-
-/**
- * Called by read io, when page has to be read from the server.
- *
- * \see cl_io_operations::cio_read_page()
- */
-int cl_io_read_page(const struct lu_env *env, struct cl_io *io,
- struct cl_page *page)
-{
- const struct cl_io_slice *scan;
- struct cl_2queue *queue;
- int result = 0;
-
- LINVRNT(io->ci_type == CIT_READ || io->ci_type == CIT_FAULT);
- LINVRNT(cl_page_is_owned(page, io));
- LINVRNT(io->ci_state == CIS_IO_GOING || io->ci_state == CIS_LOCKED);
- LINVRNT(cl_page_in_io(page, io));
- LINVRNT(cl_io_invariant(io));
-
- queue = &io->ci_queue;
-
- cl_2queue_init(queue);
- /*
- * ->cio_read_page() methods called in the loop below are supposed to
- * never block waiting for network (the only subtle point is the
- * creation of new pages for read-ahead that might result in cache
- * shrinking, but currently only clean pages are shrunk and this
- * requires no network io).
- *
- * Should this ever starts blocking, retry loop would be needed for
- * "parallel io" (see CLO_REPEAT loops in cl_lock.c).
- */
- cl_io_for_each(scan, io) {
- if (scan->cis_iop->cio_read_page != NULL) {
- const struct cl_page_slice *slice;
-
- slice = cl_io_slice_page(scan, page);
- LINVRNT(slice != NULL);
- result = scan->cis_iop->cio_read_page(env, scan, slice);
- if (result != 0)
- break;
- }
- }
- if (result == 0)
- result = cl_io_submit_rw(env, io, CRT_READ, queue);
- /*
- * Unlock unsent pages in case of error.
- */
- cl_page_list_disown(env, io, &queue->c2_qin);
- cl_2queue_fini(env, queue);
- return result;
-}
-EXPORT_SYMBOL(cl_io_read_page);
-
-/**
- * Called by write io to prepare page to receive data from user buffer.
- *
- * \see cl_io_operations::cio_prepare_write()
- */
-int cl_io_prepare_write(const struct lu_env *env, struct cl_io *io,
- struct cl_page *page, unsigned from, unsigned to)
-{
- const struct cl_io_slice *scan;
- int result = 0;
-
- LINVRNT(io->ci_type == CIT_WRITE);
- LINVRNT(cl_page_is_owned(page, io));
- LINVRNT(io->ci_state == CIS_IO_GOING || io->ci_state == CIS_LOCKED);
- LINVRNT(cl_io_invariant(io));
- LASSERT(cl_page_in_io(page, io));
-
- cl_io_for_each_reverse(scan, io) {
- if (scan->cis_iop->cio_prepare_write != NULL) {
- const struct cl_page_slice *slice;
-
- slice = cl_io_slice_page(scan, page);
- result = scan->cis_iop->cio_prepare_write(env, scan,
- slice,
- from, to);
- if (result != 0)
- break;
- }
- }
- return result;
-}
-EXPORT_SYMBOL(cl_io_prepare_write);
-
-/**
- * Called by write io after user data were copied into a page.
- *
- * \see cl_io_operations::cio_commit_write()
- */
-int cl_io_commit_write(const struct lu_env *env, struct cl_io *io,
- struct cl_page *page, unsigned from, unsigned to)
-{
- const struct cl_io_slice *scan;
- int result = 0;
-
- LINVRNT(io->ci_type == CIT_WRITE);
- LINVRNT(io->ci_state == CIS_IO_GOING || io->ci_state == CIS_LOCKED);
- LINVRNT(cl_io_invariant(io));
- /*
- * XXX Uh... not nice. Top level cl_io_commit_write() call (vvp->lov)
- * already called cl_page_cache_add(), moving page into CPS_CACHED
- * state. Better (and more general) way of dealing with such situation
- * is needed.
- */
- LASSERT(cl_page_is_owned(page, io) || page->cp_parent != NULL);
- LASSERT(cl_page_in_io(page, io));
-
- cl_io_for_each(scan, io) {
- if (scan->cis_iop->cio_commit_write != NULL) {
- const struct cl_page_slice *slice;
-
- slice = cl_io_slice_page(scan, page);
- result = scan->cis_iop->cio_commit_write(env, scan,
- slice,
- from, to);
- if (result != 0)
- break;
- }
- }
- LINVRNT(result <= 0);
- return result;
-}
-EXPORT_SYMBOL(cl_io_commit_write);
-
-/**
- * Submits a list of pages for immediate io.
- *
- * After the function gets returned, The submitted pages are moved to
- * queue->c2_qout queue, and queue->c2_qin contain both the pages don't need
- * to be submitted, and the pages are errant to submit.
- *
- * \returns 0 if at least one page was submitted, error code otherwise.
- * \see cl_io_operations::cio_submit()
- */
-int cl_io_submit_rw(const struct lu_env *env, struct cl_io *io,
- enum cl_req_type crt, struct cl_2queue *queue)
-{
- const struct cl_io_slice *scan;
- int result = 0;
-
- LINVRNT(crt < ARRAY_SIZE(scan->cis_iop->req_op));
-
- cl_io_for_each(scan, io) {
- if (scan->cis_iop->req_op[crt].cio_submit == NULL)
- continue;
- result = scan->cis_iop->req_op[crt].cio_submit(env, scan, crt,
- queue);
- if (result != 0)
- break;
- }
- /*
- * If ->cio_submit() failed, no pages were sent.
- */
- LASSERT(ergo(result != 0, list_empty(&queue->c2_qout.pl_pages)));
- return result;
-}
-EXPORT_SYMBOL(cl_io_submit_rw);
-
-/**
- * Submit a sync_io and wait for the IO to be finished, or error happens.
- * If \a timeout is zero, it means to wait for the IO unconditionally.
- */
-int cl_io_submit_sync(const struct lu_env *env, struct cl_io *io,
- enum cl_req_type iot, struct cl_2queue *queue,
- long timeout)
-{
- struct cl_sync_io *anchor = &cl_env_info(env)->clt_anchor;
- struct cl_page *pg;
- int rc;
-
- cl_page_list_for_each(pg, &queue->c2_qin) {
- LASSERT(pg->cp_sync_io == NULL);
- pg->cp_sync_io = anchor;
- }
-
- cl_sync_io_init(anchor, queue->c2_qin.pl_nr);
- rc = cl_io_submit_rw(env, io, iot, queue);
- if (rc == 0) {
- /*
- * If some pages weren't sent for any reason (e.g.,
- * read found up-to-date pages in the cache, or write found
- * clean pages), count them as completed to avoid infinite
- * wait.
- */
- cl_page_list_for_each(pg, &queue->c2_qin) {
- pg->cp_sync_io = NULL;
- cl_sync_io_note(anchor, 1);
- }
-
- /* wait for the IO to be finished. */
- rc = cl_sync_io_wait(env, io, &queue->c2_qout,
- anchor, timeout);
- } else {
- LASSERT(list_empty(&queue->c2_qout.pl_pages));
- cl_page_list_for_each(pg, &queue->c2_qin)
- pg->cp_sync_io = NULL;
- }
- return rc;
-}
-EXPORT_SYMBOL(cl_io_submit_sync);
-
-/**
- * Cancel an IO which has been submitted by cl_io_submit_rw.
- */
-int cl_io_cancel(const struct lu_env *env, struct cl_io *io,
- struct cl_page_list *queue)
-{
- struct cl_page *page;
- int result = 0;
-
- CERROR("Canceling ongoing page transmission\n");
- cl_page_list_for_each(page, queue) {
- int rc;
-
- LINVRNT(cl_page_in_io(page, io));
- rc = cl_page_cancel(env, page);
- result = result ?: rc;
- }
- return result;
-}
-EXPORT_SYMBOL(cl_io_cancel);
-
-/**
- * Main io loop.
- *
- * Pumps io through iterations calling
- *
- * - cl_io_iter_init()
- *
- * - cl_io_lock()
- *
- * - cl_io_start()
- *
- * - cl_io_end()
- *
- * - cl_io_unlock()
- *
- * - cl_io_iter_fini()
- *
- * repeatedly until there is no more io to do.
- */
-int cl_io_loop(const struct lu_env *env, struct cl_io *io)
-{
- int result = 0;
-
- LINVRNT(cl_io_is_loopable(io));
-
- do {
- size_t nob;
-
- io->ci_continue = 0;
- result = cl_io_iter_init(env, io);
- if (result == 0) {
- nob = io->ci_nob;
- result = cl_io_lock(env, io);
- if (result == 0) {
- /*
- * Notify layers that locks has been taken,
- * and do actual i/o.
- *
- * - llite: kms, short read;
- * - llite: generic_file_read();
- */
- result = cl_io_start(env, io);
- /*
- * Send any remaining pending
- * io, etc.
- *
- * - llite: ll_rw_stats_tally.
- */
- cl_io_end(env, io);
- cl_io_unlock(env, io);
- cl_io_rw_advance(env, io, io->ci_nob - nob);
- }
- }
- cl_io_iter_fini(env, io);
- } while (result == 0 && io->ci_continue);
- if (result == 0)
- result = io->ci_result;
- return result < 0 ? result : 0;
-}
-EXPORT_SYMBOL(cl_io_loop);
-
-/**
- * Adds io slice to the cl_io.
- *
- * This is called by cl_object_operations::coo_io_init() methods to add a
- * per-layer state to the io. New state is added at the end of
- * cl_io::ci_layers list, that is, it is at the bottom of the stack.
- *
- * \see cl_lock_slice_add(), cl_req_slice_add(), cl_page_slice_add()
- */
-void cl_io_slice_add(struct cl_io *io, struct cl_io_slice *slice,
- struct cl_object *obj,
- const struct cl_io_operations *ops)
-{
- struct list_head *linkage = &slice->cis_linkage;
-
- LASSERT((linkage->prev == NULL && linkage->next == NULL) ||
- list_empty(linkage));
-
- list_add_tail(linkage, &io->ci_layers);
- slice->cis_io = io;
- slice->cis_obj = obj;
- slice->cis_iop = ops;
-}
-EXPORT_SYMBOL(cl_io_slice_add);
-
-
-/**
- * Initializes page list.
- */
-void cl_page_list_init(struct cl_page_list *plist)
-{
- plist->pl_nr = 0;
- INIT_LIST_HEAD(&plist->pl_pages);
- plist->pl_owner = current;
-}
-EXPORT_SYMBOL(cl_page_list_init);
-
-/**
- * Adds a page to a page list.
- */
-void cl_page_list_add(struct cl_page_list *plist, struct cl_page *page)
-{
- /* it would be better to check that page is owned by "current" io, but
- * it is not passed here. */
- LASSERT(page->cp_owner != NULL);
- LINVRNT(plist->pl_owner == current);
-
- lockdep_off();
- mutex_lock(&page->cp_mutex);
- lockdep_on();
- LASSERT(list_empty(&page->cp_batch));
- list_add_tail(&page->cp_batch, &plist->pl_pages);
- ++plist->pl_nr;
- lu_ref_add_at(&page->cp_reference, &page->cp_queue_ref, "queue", plist);
- cl_page_get(page);
-}
-EXPORT_SYMBOL(cl_page_list_add);
-
-/**
- * Removes a page from a page list.
- */
-void cl_page_list_del(const struct lu_env *env,
- struct cl_page_list *plist, struct cl_page *page)
-{
- LASSERT(plist->pl_nr > 0);
- LINVRNT(plist->pl_owner == current);
-
- list_del_init(&page->cp_batch);
- lockdep_off();
- mutex_unlock(&page->cp_mutex);
- lockdep_on();
- --plist->pl_nr;
- lu_ref_del_at(&page->cp_reference, &page->cp_queue_ref, "queue", plist);
- cl_page_put(env, page);
-}
-EXPORT_SYMBOL(cl_page_list_del);
-
-/**
- * Moves a page from one page list to another.
- */
-void cl_page_list_move(struct cl_page_list *dst, struct cl_page_list *src,
- struct cl_page *page)
-{
- LASSERT(src->pl_nr > 0);
- LINVRNT(dst->pl_owner == current);
- LINVRNT(src->pl_owner == current);
-
- list_move_tail(&page->cp_batch, &dst->pl_pages);
- --src->pl_nr;
- ++dst->pl_nr;
- lu_ref_set_at(&page->cp_reference, &page->cp_queue_ref, "queue",
- src, dst);
-}
-EXPORT_SYMBOL(cl_page_list_move);
-
-/**
- * splice the cl_page_list, just as list head does
- */
-void cl_page_list_splice(struct cl_page_list *list, struct cl_page_list *head)
-{
- struct cl_page *page;
- struct cl_page *tmp;
-
- LINVRNT(list->pl_owner == current);
- LINVRNT(head->pl_owner == current);
-
- cl_page_list_for_each_safe(page, tmp, list)
- cl_page_list_move(head, list, page);
-}
-EXPORT_SYMBOL(cl_page_list_splice);
-
-void cl_page_disown0(const struct lu_env *env,
- struct cl_io *io, struct cl_page *pg);
-
-/**
- * Disowns pages in a queue.
- */
-void cl_page_list_disown(const struct lu_env *env,
- struct cl_io *io, struct cl_page_list *plist)
-{
- struct cl_page *page;
- struct cl_page *temp;
-
- LINVRNT(plist->pl_owner == current);
-
- cl_page_list_for_each_safe(page, temp, plist) {
- LASSERT(plist->pl_nr > 0);
-
- list_del_init(&page->cp_batch);
- lockdep_off();
- mutex_unlock(&page->cp_mutex);
- lockdep_on();
- --plist->pl_nr;
- /*
- * cl_page_disown0 rather than usual cl_page_disown() is used,
- * because pages are possibly in CPS_FREEING state already due
- * to the call to cl_page_list_discard().
- */
- /*
- * XXX cl_page_disown0() will fail if page is not locked.
- */
- cl_page_disown0(env, io, page);
- lu_ref_del_at(&page->cp_reference, &page->cp_queue_ref, "queue",
- plist);
- cl_page_put(env, page);
- }
-}
-EXPORT_SYMBOL(cl_page_list_disown);
-
-/**
- * Releases pages from queue.
- */
-void cl_page_list_fini(const struct lu_env *env, struct cl_page_list *plist)
-{
- struct cl_page *page;
- struct cl_page *temp;
-
- LINVRNT(plist->pl_owner == current);
-
- cl_page_list_for_each_safe(page, temp, plist)
- cl_page_list_del(env, plist, page);
- LASSERT(plist->pl_nr == 0);
-}
-EXPORT_SYMBOL(cl_page_list_fini);
-
-/**
- * Assumes all pages in a queue.
- */
-void cl_page_list_assume(const struct lu_env *env,
- struct cl_io *io, struct cl_page_list *plist)
-{
- struct cl_page *page;
-
- LINVRNT(plist->pl_owner == current);
-
- cl_page_list_for_each(page, plist)
- cl_page_assume(env, io, page);
-}
-EXPORT_SYMBOL(cl_page_list_assume);
-
-/**
- * Discards all pages in a queue.
- */
-void cl_page_list_discard(const struct lu_env *env, struct cl_io *io,
- struct cl_page_list *plist)
-{
- struct cl_page *page;
-
- LINVRNT(plist->pl_owner == current);
- cl_page_list_for_each(page, plist)
- cl_page_discard(env, io, page);
-}
-EXPORT_SYMBOL(cl_page_list_discard);
-
-/**
- * Initialize dual page queue.
- */
-void cl_2queue_init(struct cl_2queue *queue)
-{
- cl_page_list_init(&queue->c2_qin);
- cl_page_list_init(&queue->c2_qout);
-}
-EXPORT_SYMBOL(cl_2queue_init);
-
-/**
- * Add a page to the incoming page list of 2-queue.
- */
-void cl_2queue_add(struct cl_2queue *queue, struct cl_page *page)
-{
- cl_page_list_add(&queue->c2_qin, page);
-}
-EXPORT_SYMBOL(cl_2queue_add);
-
-/**
- * Disown pages in both lists of a 2-queue.
- */
-void cl_2queue_disown(const struct lu_env *env,
- struct cl_io *io, struct cl_2queue *queue)
-{
- cl_page_list_disown(env, io, &queue->c2_qin);
- cl_page_list_disown(env, io, &queue->c2_qout);
-}
-EXPORT_SYMBOL(cl_2queue_disown);
-
-/**
- * Discard (truncate) pages in both lists of a 2-queue.
- */
-void cl_2queue_discard(const struct lu_env *env,
- struct cl_io *io, struct cl_2queue *queue)
-{
- cl_page_list_discard(env, io, &queue->c2_qin);
- cl_page_list_discard(env, io, &queue->c2_qout);
-}
-EXPORT_SYMBOL(cl_2queue_discard);
-
-/**
- * Finalize both page lists of a 2-queue.
- */
-void cl_2queue_fini(const struct lu_env *env, struct cl_2queue *queue)
-{
- cl_page_list_fini(env, &queue->c2_qout);
- cl_page_list_fini(env, &queue->c2_qin);
-}
-EXPORT_SYMBOL(cl_2queue_fini);
-
-/**
- * Initialize a 2-queue to contain \a page in its incoming page list.
- */
-void cl_2queue_init_page(struct cl_2queue *queue, struct cl_page *page)
-{
- cl_2queue_init(queue);
- cl_2queue_add(queue, page);
-}
-EXPORT_SYMBOL(cl_2queue_init_page);
-
-/**
- * Returns top-level io.
- *
- * \see cl_object_top(), cl_page_top().
- */
-struct cl_io *cl_io_top(struct cl_io *io)
-{
- while (io->ci_parent != NULL)
- io = io->ci_parent;
- return io;
-}
-EXPORT_SYMBOL(cl_io_top);
-
-/**
- * Adds request slice to the compound request.
- *
- * This is called by cl_device_operations::cdo_req_init() methods to add a
- * per-layer state to the request. New state is added at the end of
- * cl_req::crq_layers list, that is, it is at the bottom of the stack.
- *
- * \see cl_lock_slice_add(), cl_page_slice_add(), cl_io_slice_add()
- */
-void cl_req_slice_add(struct cl_req *req, struct cl_req_slice *slice,
- struct cl_device *dev,
- const struct cl_req_operations *ops)
-{
- list_add_tail(&slice->crs_linkage, &req->crq_layers);
- slice->crs_dev = dev;
- slice->crs_ops = ops;
- slice->crs_req = req;
-}
-EXPORT_SYMBOL(cl_req_slice_add);
-
-static void cl_req_free(const struct lu_env *env, struct cl_req *req)
-{
- unsigned i;
-
- LASSERT(list_empty(&req->crq_pages));
- LASSERT(req->crq_nrpages == 0);
- LINVRNT(list_empty(&req->crq_layers));
- LINVRNT(equi(req->crq_nrobjs > 0, req->crq_o != NULL));
-
- if (req->crq_o != NULL) {
- for (i = 0; i < req->crq_nrobjs; ++i) {
- struct cl_object *obj = req->crq_o[i].ro_obj;
- if (obj != NULL) {
- lu_object_ref_del_at(&obj->co_lu,
- &req->crq_o[i].ro_obj_ref,
- "cl_req", req);
- cl_object_put(env, obj);
- }
- }
- kfree(req->crq_o);
- }
- kfree(req);
-}
-
-static int cl_req_init(const struct lu_env *env, struct cl_req *req,
- struct cl_page *page)
-{
- struct cl_device *dev;
- struct cl_page_slice *slice;
- int result;
-
- result = 0;
- page = cl_page_top(page);
- do {
- list_for_each_entry(slice, &page->cp_layers, cpl_linkage) {
- dev = lu2cl_dev(slice->cpl_obj->co_lu.lo_dev);
- if (dev->cd_ops->cdo_req_init != NULL) {
- result = dev->cd_ops->cdo_req_init(env,
- dev, req);
- if (result != 0)
- break;
- }
- }
- page = page->cp_child;
- } while (page != NULL && result == 0);
- return result;
-}
-
-/**
- * Invokes per-request transfer completion call-backs
- * (cl_req_operations::cro_completion()) bottom-to-top.
- */
-void cl_req_completion(const struct lu_env *env, struct cl_req *req, int rc)
-{
- struct cl_req_slice *slice;
-
- /*
- * for the lack of list_for_each_entry_reverse_safe()...
- */
- while (!list_empty(&req->crq_layers)) {
- slice = list_entry(req->crq_layers.prev,
- struct cl_req_slice, crs_linkage);
- list_del_init(&slice->crs_linkage);
- if (slice->crs_ops->cro_completion != NULL)
- slice->crs_ops->cro_completion(env, slice, rc);
- }
- cl_req_free(env, req);
-}
-EXPORT_SYMBOL(cl_req_completion);
-
-/**
- * Allocates new transfer request.
- */
-struct cl_req *cl_req_alloc(const struct lu_env *env, struct cl_page *page,
- enum cl_req_type crt, int nr_objects)
-{
- struct cl_req *req;
-
- LINVRNT(nr_objects > 0);
-
- req = kzalloc(sizeof(*req), GFP_NOFS);
- if (req != NULL) {
- int result;
-
- req->crq_type = crt;
- INIT_LIST_HEAD(&req->crq_pages);
- INIT_LIST_HEAD(&req->crq_layers);
-
- req->crq_o = kcalloc(nr_objects, sizeof(req->crq_o[0]),
- GFP_NOFS);
- if (req->crq_o != NULL) {
- req->crq_nrobjs = nr_objects;
- result = cl_req_init(env, req, page);
- } else
- result = -ENOMEM;
- if (result != 0) {
- cl_req_completion(env, req, result);
- req = ERR_PTR(result);
- }
- } else
- req = ERR_PTR(-ENOMEM);
- return req;
-}
-EXPORT_SYMBOL(cl_req_alloc);
-
-/**
- * Adds a page to a request.
- */
-void cl_req_page_add(const struct lu_env *env,
- struct cl_req *req, struct cl_page *page)
-{
- struct cl_object *obj;
- struct cl_req_obj *rqo;
- int i;
-
- page = cl_page_top(page);
-
- LASSERT(list_empty(&page->cp_flight));
- LASSERT(page->cp_req == NULL);
-
- CL_PAGE_DEBUG(D_PAGE, env, page, "req %p, %d, %u\n",
- req, req->crq_type, req->crq_nrpages);
-
- list_add_tail(&page->cp_flight, &req->crq_pages);
- ++req->crq_nrpages;
- page->cp_req = req;
- obj = cl_object_top(page->cp_obj);
- for (i = 0, rqo = req->crq_o; obj != rqo->ro_obj; ++i, ++rqo) {
- if (rqo->ro_obj == NULL) {
- rqo->ro_obj = obj;
- cl_object_get(obj);
- lu_object_ref_add_at(&obj->co_lu, &rqo->ro_obj_ref,
- "cl_req", req);
- break;
- }
- }
- LASSERT(i < req->crq_nrobjs);
-}
-EXPORT_SYMBOL(cl_req_page_add);
-
-/**
- * Removes a page from a request.
- */
-void cl_req_page_done(const struct lu_env *env, struct cl_page *page)
-{
- struct cl_req *req = page->cp_req;
-
- page = cl_page_top(page);
-
- LASSERT(!list_empty(&page->cp_flight));
- LASSERT(req->crq_nrpages > 0);
-
- list_del_init(&page->cp_flight);
- --req->crq_nrpages;
- page->cp_req = NULL;
-}
-EXPORT_SYMBOL(cl_req_page_done);
-
-/**
- * Notifies layers that request is about to depart by calling
- * cl_req_operations::cro_prep() top-to-bottom.
- */
-int cl_req_prep(const struct lu_env *env, struct cl_req *req)
-{
- int i;
- int result;
- const struct cl_req_slice *slice;
-
- /*
- * Check that the caller of cl_req_alloc() didn't lie about the number
- * of objects.
- */
- for (i = 0; i < req->crq_nrobjs; ++i)
- LASSERT(req->crq_o[i].ro_obj != NULL);
-
- result = 0;
- list_for_each_entry(slice, &req->crq_layers, crs_linkage) {
- if (slice->crs_ops->cro_prep != NULL) {
- result = slice->crs_ops->cro_prep(env, slice);
- if (result != 0)
- break;
- }
- }
- return result;
-}
-EXPORT_SYMBOL(cl_req_prep);
-
-/**
- * Fills in attributes that are passed to server together with transfer. Only
- * attributes from \a flags may be touched. This can be called multiple times
- * for the same request.
- */
-void cl_req_attr_set(const struct lu_env *env, struct cl_req *req,
- struct cl_req_attr *attr, u64 flags)
-{
- const struct cl_req_slice *slice;
- struct cl_page *page;
- int i;
-
- LASSERT(!list_empty(&req->crq_pages));
-
- /* Take any page to use as a model. */
- page = list_entry(req->crq_pages.next, struct cl_page, cp_flight);
-
- for (i = 0; i < req->crq_nrobjs; ++i) {
- list_for_each_entry(slice, &req->crq_layers, crs_linkage) {
- const struct cl_page_slice *scan;
- const struct cl_object *obj;
-
- scan = cl_page_at(page,
- slice->crs_dev->cd_lu_dev.ld_type);
- LASSERT(scan != NULL);
- obj = scan->cpl_obj;
- if (slice->crs_ops->cro_attr_set != NULL)
- slice->crs_ops->cro_attr_set(env, slice, obj,
- attr + i, flags);
- }
- }
-}
-EXPORT_SYMBOL(cl_req_attr_set);
-
-/* XXX complete(), init_completion(), and wait_for_completion(), until they are
- * implemented in libcfs. */
-# include <linux/sched.h>
-
-/**
- * Initialize synchronous io wait anchor, for transfer of \a nrpages pages.
- */
-void cl_sync_io_init(struct cl_sync_io *anchor, int nrpages)
-{
- init_waitqueue_head(&anchor->csi_waitq);
- atomic_set(&anchor->csi_sync_nr, nrpages);
- atomic_set(&anchor->csi_barrier, nrpages > 0);
- anchor->csi_sync_rc = 0;
-}
-EXPORT_SYMBOL(cl_sync_io_init);
-
-/**
- * Wait until all transfer completes. Transfer completion routine has to call
- * cl_sync_io_note() for every page.
- */
-int cl_sync_io_wait(const struct lu_env *env, struct cl_io *io,
- struct cl_page_list *queue, struct cl_sync_io *anchor,
- long timeout)
-{
- struct l_wait_info lwi = LWI_TIMEOUT_INTR(cfs_time_seconds(timeout),
- NULL, NULL, NULL);
- int rc;
-
- LASSERT(timeout >= 0);
-
- rc = l_wait_event(anchor->csi_waitq,
- atomic_read(&anchor->csi_sync_nr) == 0,
- &lwi);
- if (rc < 0) {
- CERROR("SYNC IO failed with error: %d, try to cancel %d remaining pages\n",
- rc, atomic_read(&anchor->csi_sync_nr));
-
- (void)cl_io_cancel(env, io, queue);
-
- lwi = (struct l_wait_info) { 0 };
- (void)l_wait_event(anchor->csi_waitq,
- atomic_read(&anchor->csi_sync_nr) == 0,
- &lwi);
- } else {
- rc = anchor->csi_sync_rc;
- }
- LASSERT(atomic_read(&anchor->csi_sync_nr) == 0);
- cl_page_list_assume(env, io, queue);
-
- /* wait until cl_sync_io_note() has done wakeup */
- while (unlikely(atomic_read(&anchor->csi_barrier) != 0)) {
- cpu_relax();
- }
-
- POISON(anchor, 0x5a, sizeof(*anchor));
- return rc;
-}
-EXPORT_SYMBOL(cl_sync_io_wait);
-
-/**
- * Indicate that transfer of a single page completed.
- */
-void cl_sync_io_note(struct cl_sync_io *anchor, int ioret)
-{
- if (anchor->csi_sync_rc == 0 && ioret < 0)
- anchor->csi_sync_rc = ioret;
- /*
- * Synchronous IO done without releasing page lock (e.g., as a part of
- * ->{prepare,commit}_write(). Completion is used to signal the end of
- * IO.
- */
- LASSERT(atomic_read(&anchor->csi_sync_nr) > 0);
- if (atomic_dec_and_test(&anchor->csi_sync_nr)) {
- wake_up_all(&anchor->csi_waitq);
- /* it's safe to nuke or reuse anchor now */
- atomic_set(&anchor->csi_barrier, 0);
- }
-}
-EXPORT_SYMBOL(cl_sync_io_note);
diff --git a/drivers/staging/lustre/lustre/obdclass/cl_lock.c b/drivers/staging/lustre/lustre/obdclass/cl_lock.c
deleted file mode 100644
index 4294ca62a9af..000000000000
--- a/drivers/staging/lustre/lustre/obdclass/cl_lock.c
+++ /dev/null
@@ -1,2213 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * Client Extent Lock.
- *
- * Author: Nikita Danilov <nikita.danilov@sun.com>
- */
-
-#define DEBUG_SUBSYSTEM S_CLASS
-
-#include "../include/obd_class.h"
-#include "../include/obd_support.h"
-#include "../include/lustre_fid.h"
-#include <linux/list.h>
-#include "../include/cl_object.h"
-#include "cl_internal.h"
-
-/** Lock class of cl_lock::cll_guard */
-static struct lock_class_key cl_lock_guard_class;
-static struct kmem_cache *cl_lock_kmem;
-
-static struct lu_kmem_descr cl_lock_caches[] = {
- {
- .ckd_cache = &cl_lock_kmem,
- .ckd_name = "cl_lock_kmem",
- .ckd_size = sizeof (struct cl_lock)
- },
- {
- .ckd_cache = NULL
- }
-};
-
-#define CS_LOCK_INC(o, item)
-#define CS_LOCK_DEC(o, item)
-#define CS_LOCKSTATE_INC(o, state)
-#define CS_LOCKSTATE_DEC(o, state)
-
-/**
- * Basic lock invariant that is maintained at all times. Caller either has a
- * reference to \a lock, or somehow assures that \a lock cannot be freed.
- *
- * \see cl_lock_invariant()
- */
-static int cl_lock_invariant_trusted(const struct lu_env *env,
- const struct cl_lock *lock)
-{
- return ergo(lock->cll_state == CLS_FREEING, lock->cll_holds == 0) &&
- atomic_read(&lock->cll_ref) >= lock->cll_holds &&
- lock->cll_holds >= lock->cll_users &&
- lock->cll_holds >= 0 &&
- lock->cll_users >= 0 &&
- lock->cll_depth >= 0;
-}
-
-/**
- * Stronger lock invariant, checking that caller has a reference on a lock.
- *
- * \see cl_lock_invariant_trusted()
- */
-static int cl_lock_invariant(const struct lu_env *env,
- const struct cl_lock *lock)
-{
- int result;
-
- result = atomic_read(&lock->cll_ref) > 0 &&
- cl_lock_invariant_trusted(env, lock);
- if (!result && env != NULL)
- CL_LOCK_DEBUG(D_ERROR, env, lock, "invariant broken");
- return result;
-}
-
-/**
- * Returns lock "nesting": 0 for a top-lock and 1 for a sub-lock.
- */
-static enum clt_nesting_level cl_lock_nesting(const struct cl_lock *lock)
-{
- return cl_object_header(lock->cll_descr.cld_obj)->coh_nesting;
-}
-
-/**
- * Returns a set of counters for this lock, depending on a lock nesting.
- */
-static struct cl_thread_counters *cl_lock_counters(const struct lu_env *env,
- const struct cl_lock *lock)
-{
- struct cl_thread_info *info;
- enum clt_nesting_level nesting;
-
- info = cl_env_info(env);
- nesting = cl_lock_nesting(lock);
- LASSERT(nesting < ARRAY_SIZE(info->clt_counters));
- return &info->clt_counters[nesting];
-}
-
-static void cl_lock_trace0(int level, const struct lu_env *env,
- const char *prefix, const struct cl_lock *lock,
- const char *func, const int line)
-{
- struct cl_object_header *h = cl_object_header(lock->cll_descr.cld_obj);
- CDEBUG(level, "%s: %p@(%d %p %d %d %d %d %d %lx)(%p/%d/%d) at %s():%d\n",
- prefix, lock, atomic_read(&lock->cll_ref),
- lock->cll_guarder, lock->cll_depth,
- lock->cll_state, lock->cll_error, lock->cll_holds,
- lock->cll_users, lock->cll_flags,
- env, h->coh_nesting, cl_lock_nr_mutexed(env),
- func, line);
-}
-#define cl_lock_trace(level, env, prefix, lock) \
- cl_lock_trace0(level, env, prefix, lock, __func__, __LINE__)
-
-#define RETIP ((unsigned long)__builtin_return_address(0))
-
-#ifdef CONFIG_LOCKDEP
-static struct lock_class_key cl_lock_key;
-
-static void cl_lock_lockdep_init(struct cl_lock *lock)
-{
- lockdep_set_class_and_name(lock, &cl_lock_key, "EXT");
-}
-
-static void cl_lock_lockdep_acquire(const struct lu_env *env,
- struct cl_lock *lock, __u32 enqflags)
-{
- cl_lock_counters(env, lock)->ctc_nr_locks_acquired++;
- lock_map_acquire(&lock->dep_map);
-}
-
-static void cl_lock_lockdep_release(const struct lu_env *env,
- struct cl_lock *lock)
-{
- cl_lock_counters(env, lock)->ctc_nr_locks_acquired--;
- lock_release(&lock->dep_map, 0, RETIP);
-}
-
-#else /* !CONFIG_LOCKDEP */
-
-static void cl_lock_lockdep_init(struct cl_lock *lock)
-{}
-static void cl_lock_lockdep_acquire(const struct lu_env *env,
- struct cl_lock *lock, __u32 enqflags)
-{}
-static void cl_lock_lockdep_release(const struct lu_env *env,
- struct cl_lock *lock)
-{}
-
-#endif /* !CONFIG_LOCKDEP */
-
-/**
- * Adds lock slice to the compound lock.
- *
- * This is called by cl_object_operations::coo_lock_init() methods to add a
- * per-layer state to the lock. New state is added at the end of
- * cl_lock::cll_layers list, that is, it is at the bottom of the stack.
- *
- * \see cl_req_slice_add(), cl_page_slice_add(), cl_io_slice_add()
- */
-void cl_lock_slice_add(struct cl_lock *lock, struct cl_lock_slice *slice,
- struct cl_object *obj,
- const struct cl_lock_operations *ops)
-{
- slice->cls_lock = lock;
- list_add_tail(&slice->cls_linkage, &lock->cll_layers);
- slice->cls_obj = obj;
- slice->cls_ops = ops;
-}
-EXPORT_SYMBOL(cl_lock_slice_add);
-
-/**
- * Returns true iff a lock with the mode \a has provides at least the same
- * guarantees as a lock with the mode \a need.
- */
-int cl_lock_mode_match(enum cl_lock_mode has, enum cl_lock_mode need)
-{
- LINVRNT(need == CLM_READ || need == CLM_WRITE ||
- need == CLM_PHANTOM || need == CLM_GROUP);
- LINVRNT(has == CLM_READ || has == CLM_WRITE ||
- has == CLM_PHANTOM || has == CLM_GROUP);
- CLASSERT(CLM_PHANTOM < CLM_READ);
- CLASSERT(CLM_READ < CLM_WRITE);
- CLASSERT(CLM_WRITE < CLM_GROUP);
-
- if (has != CLM_GROUP)
- return need <= has;
- else
- return need == has;
-}
-EXPORT_SYMBOL(cl_lock_mode_match);
-
-/**
- * Returns true iff extent portions of lock descriptions match.
- */
-int cl_lock_ext_match(const struct cl_lock_descr *has,
- const struct cl_lock_descr *need)
-{
- return
- has->cld_start <= need->cld_start &&
- has->cld_end >= need->cld_end &&
- cl_lock_mode_match(has->cld_mode, need->cld_mode) &&
- (has->cld_mode != CLM_GROUP || has->cld_gid == need->cld_gid);
-}
-EXPORT_SYMBOL(cl_lock_ext_match);
-
-/**
- * Returns true iff a lock with the description \a has provides at least the
- * same guarantees as a lock with the description \a need.
- */
-int cl_lock_descr_match(const struct cl_lock_descr *has,
- const struct cl_lock_descr *need)
-{
- return
- cl_object_same(has->cld_obj, need->cld_obj) &&
- cl_lock_ext_match(has, need);
-}
-EXPORT_SYMBOL(cl_lock_descr_match);
-
-static void cl_lock_free(const struct lu_env *env, struct cl_lock *lock)
-{
- struct cl_object *obj = lock->cll_descr.cld_obj;
-
- LINVRNT(!cl_lock_is_mutexed(lock));
-
- cl_lock_trace(D_DLMTRACE, env, "free lock", lock);
- might_sleep();
- while (!list_empty(&lock->cll_layers)) {
- struct cl_lock_slice *slice;
-
- slice = list_entry(lock->cll_layers.next,
- struct cl_lock_slice, cls_linkage);
- list_del_init(lock->cll_layers.next);
- slice->cls_ops->clo_fini(env, slice);
- }
- CS_LOCK_DEC(obj, total);
- CS_LOCKSTATE_DEC(obj, lock->cll_state);
- lu_object_ref_del_at(&obj->co_lu, &lock->cll_obj_ref, "cl_lock", lock);
- cl_object_put(env, obj);
- lu_ref_fini(&lock->cll_reference);
- lu_ref_fini(&lock->cll_holders);
- mutex_destroy(&lock->cll_guard);
- OBD_SLAB_FREE_PTR(lock, cl_lock_kmem);
-}
-
-/**
- * Releases a reference on a lock.
- *
- * When last reference is released, lock is returned to the cache, unless it
- * is in cl_lock_state::CLS_FREEING state, in which case it is destroyed
- * immediately.
- *
- * \see cl_object_put(), cl_page_put()
- */
-void cl_lock_put(const struct lu_env *env, struct cl_lock *lock)
-{
- struct cl_object *obj;
-
- LINVRNT(cl_lock_invariant(env, lock));
- obj = lock->cll_descr.cld_obj;
- LINVRNT(obj != NULL);
-
- CDEBUG(D_TRACE, "releasing reference: %d %p %lu\n",
- atomic_read(&lock->cll_ref), lock, RETIP);
-
- if (atomic_dec_and_test(&lock->cll_ref)) {
- if (lock->cll_state == CLS_FREEING) {
- LASSERT(list_empty(&lock->cll_linkage));
- cl_lock_free(env, lock);
- }
- CS_LOCK_DEC(obj, busy);
- }
-}
-EXPORT_SYMBOL(cl_lock_put);
-
-/**
- * Acquires an additional reference to a lock.
- *
- * This can be called only by caller already possessing a reference to \a
- * lock.
- *
- * \see cl_object_get(), cl_page_get()
- */
-void cl_lock_get(struct cl_lock *lock)
-{
- LINVRNT(cl_lock_invariant(NULL, lock));
- CDEBUG(D_TRACE, "acquiring reference: %d %p %lu\n",
- atomic_read(&lock->cll_ref), lock, RETIP);
- atomic_inc(&lock->cll_ref);
-}
-EXPORT_SYMBOL(cl_lock_get);
-
-/**
- * Acquires a reference to a lock.
- *
- * This is much like cl_lock_get(), except that this function can be used to
- * acquire initial reference to the cached lock. Caller has to deal with all
- * possible races. Use with care!
- *
- * \see cl_page_get_trust()
- */
-void cl_lock_get_trust(struct cl_lock *lock)
-{
- CDEBUG(D_TRACE, "acquiring trusted reference: %d %p %lu\n",
- atomic_read(&lock->cll_ref), lock, RETIP);
- if (atomic_inc_return(&lock->cll_ref) == 1)
- CS_LOCK_INC(lock->cll_descr.cld_obj, busy);
-}
-EXPORT_SYMBOL(cl_lock_get_trust);
-
-/**
- * Helper function destroying the lock that wasn't completely initialized.
- *
- * Other threads can acquire references to the top-lock through its
- * sub-locks. Hence, it cannot be cl_lock_free()-ed immediately.
- */
-static void cl_lock_finish(const struct lu_env *env, struct cl_lock *lock)
-{
- cl_lock_mutex_get(env, lock);
- cl_lock_cancel(env, lock);
- cl_lock_delete(env, lock);
- cl_lock_mutex_put(env, lock);
- cl_lock_put(env, lock);
-}
-
-static struct cl_lock *cl_lock_alloc(const struct lu_env *env,
- struct cl_object *obj,
- const struct cl_io *io,
- const struct cl_lock_descr *descr)
-{
- struct cl_lock *lock;
- struct lu_object_header *head;
-
- OBD_SLAB_ALLOC_PTR_GFP(lock, cl_lock_kmem, GFP_NOFS);
- if (lock != NULL) {
- atomic_set(&lock->cll_ref, 1);
- lock->cll_descr = *descr;
- lock->cll_state = CLS_NEW;
- cl_object_get(obj);
- lu_object_ref_add_at(&obj->co_lu, &lock->cll_obj_ref, "cl_lock",
- lock);
- INIT_LIST_HEAD(&lock->cll_layers);
- INIT_LIST_HEAD(&lock->cll_linkage);
- INIT_LIST_HEAD(&lock->cll_inclosure);
- lu_ref_init(&lock->cll_reference);
- lu_ref_init(&lock->cll_holders);
- mutex_init(&lock->cll_guard);
- lockdep_set_class(&lock->cll_guard, &cl_lock_guard_class);
- init_waitqueue_head(&lock->cll_wq);
- head = obj->co_lu.lo_header;
- CS_LOCKSTATE_INC(obj, CLS_NEW);
- CS_LOCK_INC(obj, total);
- CS_LOCK_INC(obj, create);
- cl_lock_lockdep_init(lock);
- list_for_each_entry(obj, &head->loh_layers,
- co_lu.lo_linkage) {
- int err;
-
- err = obj->co_ops->coo_lock_init(env, obj, lock, io);
- if (err != 0) {
- cl_lock_finish(env, lock);
- lock = ERR_PTR(err);
- break;
- }
- }
- } else
- lock = ERR_PTR(-ENOMEM);
- return lock;
-}
-
-/**
- * Transfer the lock into INTRANSIT state and return the original state.
- *
- * \pre state: CLS_CACHED, CLS_HELD or CLS_ENQUEUED
- * \post state: CLS_INTRANSIT
- * \see CLS_INTRANSIT
- */
-enum cl_lock_state cl_lock_intransit(const struct lu_env *env,
- struct cl_lock *lock)
-{
- enum cl_lock_state state = lock->cll_state;
-
- LASSERT(cl_lock_is_mutexed(lock));
- LASSERT(state != CLS_INTRANSIT);
- LASSERTF(state >= CLS_ENQUEUED && state <= CLS_CACHED,
- "Malformed lock state %d.\n", state);
-
- cl_lock_state_set(env, lock, CLS_INTRANSIT);
- lock->cll_intransit_owner = current;
- cl_lock_hold_add(env, lock, "intransit", current);
- return state;
-}
-EXPORT_SYMBOL(cl_lock_intransit);
-
-/**
- * Exit the intransit state and restore the lock state to the original state
- */
-void cl_lock_extransit(const struct lu_env *env, struct cl_lock *lock,
- enum cl_lock_state state)
-{
- LASSERT(cl_lock_is_mutexed(lock));
- LASSERT(lock->cll_state == CLS_INTRANSIT);
- LASSERT(state != CLS_INTRANSIT);
- LASSERT(lock->cll_intransit_owner == current);
-
- lock->cll_intransit_owner = NULL;
- cl_lock_state_set(env, lock, state);
- cl_lock_unhold(env, lock, "intransit", current);
-}
-EXPORT_SYMBOL(cl_lock_extransit);
-
-/**
- * Checking whether the lock is intransit state
- */
-int cl_lock_is_intransit(struct cl_lock *lock)
-{
- LASSERT(cl_lock_is_mutexed(lock));
- return lock->cll_state == CLS_INTRANSIT &&
- lock->cll_intransit_owner != current;
-}
-EXPORT_SYMBOL(cl_lock_is_intransit);
-/**
- * Returns true iff lock is "suitable" for given io. E.g., locks acquired by
- * truncate and O_APPEND cannot be reused for read/non-append-write, as they
- * cover multiple stripes and can trigger cascading timeouts.
- */
-static int cl_lock_fits_into(const struct lu_env *env,
- const struct cl_lock *lock,
- const struct cl_lock_descr *need,
- const struct cl_io *io)
-{
- const struct cl_lock_slice *slice;
-
- LINVRNT(cl_lock_invariant_trusted(env, lock));
- list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
- if (slice->cls_ops->clo_fits_into != NULL &&
- !slice->cls_ops->clo_fits_into(env, slice, need, io))
- return 0;
- }
- return 1;
-}
-
-static struct cl_lock *cl_lock_lookup(const struct lu_env *env,
- struct cl_object *obj,
- const struct cl_io *io,
- const struct cl_lock_descr *need)
-{
- struct cl_lock *lock;
- struct cl_object_header *head;
-
- head = cl_object_header(obj);
- assert_spin_locked(&head->coh_lock_guard);
- CS_LOCK_INC(obj, lookup);
- list_for_each_entry(lock, &head->coh_locks, cll_linkage) {
- int matched;
-
- matched = cl_lock_ext_match(&lock->cll_descr, need) &&
- lock->cll_state < CLS_FREEING &&
- lock->cll_error == 0 &&
- !(lock->cll_flags & CLF_CANCELLED) &&
- cl_lock_fits_into(env, lock, need, io);
- CDEBUG(D_DLMTRACE, "has: "DDESCR"(%d) need: "DDESCR": %d\n",
- PDESCR(&lock->cll_descr), lock->cll_state, PDESCR(need),
- matched);
- if (matched) {
- cl_lock_get_trust(lock);
- CS_LOCK_INC(obj, hit);
- return lock;
- }
- }
- return NULL;
-}
-
-/**
- * Returns a lock matching description \a need.
- *
- * This is the main entry point into the cl_lock caching interface. First, a
- * cache (implemented as a per-object linked list) is consulted. If lock is
- * found there, it is returned immediately. Otherwise new lock is allocated
- * and returned. In any case, additional reference to lock is acquired.
- *
- * \see cl_object_find(), cl_page_find()
- */
-static struct cl_lock *cl_lock_find(const struct lu_env *env,
- const struct cl_io *io,
- const struct cl_lock_descr *need)
-{
- struct cl_object_header *head;
- struct cl_object *obj;
- struct cl_lock *lock;
-
- obj = need->cld_obj;
- head = cl_object_header(obj);
-
- spin_lock(&head->coh_lock_guard);
- lock = cl_lock_lookup(env, obj, io, need);
- spin_unlock(&head->coh_lock_guard);
-
- if (lock == NULL) {
- lock = cl_lock_alloc(env, obj, io, need);
- if (!IS_ERR(lock)) {
- struct cl_lock *ghost;
-
- spin_lock(&head->coh_lock_guard);
- ghost = cl_lock_lookup(env, obj, io, need);
- if (ghost == NULL) {
- cl_lock_get_trust(lock);
- list_add_tail(&lock->cll_linkage,
- &head->coh_locks);
- spin_unlock(&head->coh_lock_guard);
- CS_LOCK_INC(obj, busy);
- } else {
- spin_unlock(&head->coh_lock_guard);
- /*
- * Other threads can acquire references to the
- * top-lock through its sub-locks. Hence, it
- * cannot be cl_lock_free()-ed immediately.
- */
- cl_lock_finish(env, lock);
- lock = ghost;
- }
- }
- }
- return lock;
-}
-
-/**
- * Returns existing lock matching given description. This is similar to
- * cl_lock_find() except that no new lock is created, and returned lock is
- * guaranteed to be in enum cl_lock_state::CLS_HELD state.
- */
-struct cl_lock *cl_lock_peek(const struct lu_env *env, const struct cl_io *io,
- const struct cl_lock_descr *need,
- const char *scope, const void *source)
-{
- struct cl_object_header *head;
- struct cl_object *obj;
- struct cl_lock *lock;
-
- obj = need->cld_obj;
- head = cl_object_header(obj);
-
- do {
- spin_lock(&head->coh_lock_guard);
- lock = cl_lock_lookup(env, obj, io, need);
- spin_unlock(&head->coh_lock_guard);
- if (lock == NULL)
- return NULL;
-
- cl_lock_mutex_get(env, lock);
- if (lock->cll_state == CLS_INTRANSIT)
- /* Don't care return value. */
- cl_lock_state_wait(env, lock);
- if (lock->cll_state == CLS_FREEING) {
- cl_lock_mutex_put(env, lock);
- cl_lock_put(env, lock);
- lock = NULL;
- }
- } while (lock == NULL);
-
- cl_lock_hold_add(env, lock, scope, source);
- cl_lock_user_add(env, lock);
- if (lock->cll_state == CLS_CACHED)
- cl_use_try(env, lock, 1);
- if (lock->cll_state == CLS_HELD) {
- cl_lock_mutex_put(env, lock);
- cl_lock_lockdep_acquire(env, lock, 0);
- cl_lock_put(env, lock);
- } else {
- cl_unuse_try(env, lock);
- cl_lock_unhold(env, lock, scope, source);
- cl_lock_mutex_put(env, lock);
- cl_lock_put(env, lock);
- lock = NULL;
- }
-
- return lock;
-}
-EXPORT_SYMBOL(cl_lock_peek);
-
-/**
- * Returns a slice within a lock, corresponding to the given layer in the
- * device stack.
- *
- * \see cl_page_at()
- */
-const struct cl_lock_slice *cl_lock_at(const struct cl_lock *lock,
- const struct lu_device_type *dtype)
-{
- const struct cl_lock_slice *slice;
-
- LINVRNT(cl_lock_invariant_trusted(NULL, lock));
-
- list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
- if (slice->cls_obj->co_lu.lo_dev->ld_type == dtype)
- return slice;
- }
- return NULL;
-}
-EXPORT_SYMBOL(cl_lock_at);
-
-static void cl_lock_mutex_tail(const struct lu_env *env, struct cl_lock *lock)
-{
- struct cl_thread_counters *counters;
-
- counters = cl_lock_counters(env, lock);
- lock->cll_depth++;
- counters->ctc_nr_locks_locked++;
- lu_ref_add(&counters->ctc_locks_locked, "cll_guard", lock);
- cl_lock_trace(D_TRACE, env, "got mutex", lock);
-}
-
-/**
- * Locks cl_lock object.
- *
- * This is used to manipulate cl_lock fields, and to serialize state
- * transitions in the lock state machine.
- *
- * \post cl_lock_is_mutexed(lock)
- *
- * \see cl_lock_mutex_put()
- */
-void cl_lock_mutex_get(const struct lu_env *env, struct cl_lock *lock)
-{
- LINVRNT(cl_lock_invariant(env, lock));
-
- if (lock->cll_guarder == current) {
- LINVRNT(cl_lock_is_mutexed(lock));
- LINVRNT(lock->cll_depth > 0);
- } else {
- struct cl_object_header *hdr;
- struct cl_thread_info *info;
- int i;
-
- LINVRNT(lock->cll_guarder != current);
- hdr = cl_object_header(lock->cll_descr.cld_obj);
- /*
- * Check that mutices are taken in the bottom-to-top order.
- */
- info = cl_env_info(env);
- for (i = 0; i < hdr->coh_nesting; ++i)
- LASSERT(info->clt_counters[i].ctc_nr_locks_locked == 0);
- mutex_lock_nested(&lock->cll_guard, hdr->coh_nesting);
- lock->cll_guarder = current;
- LINVRNT(lock->cll_depth == 0);
- }
- cl_lock_mutex_tail(env, lock);
-}
-EXPORT_SYMBOL(cl_lock_mutex_get);
-
-/**
- * Try-locks cl_lock object.
- *
- * \retval 0 \a lock was successfully locked
- *
- * \retval -EBUSY \a lock cannot be locked right now
- *
- * \post ergo(result == 0, cl_lock_is_mutexed(lock))
- *
- * \see cl_lock_mutex_get()
- */
-int cl_lock_mutex_try(const struct lu_env *env, struct cl_lock *lock)
-{
- int result;
-
- LINVRNT(cl_lock_invariant_trusted(env, lock));
-
- result = 0;
- if (lock->cll_guarder == current) {
- LINVRNT(lock->cll_depth > 0);
- cl_lock_mutex_tail(env, lock);
- } else if (mutex_trylock(&lock->cll_guard)) {
- LINVRNT(lock->cll_depth == 0);
- lock->cll_guarder = current;
- cl_lock_mutex_tail(env, lock);
- } else
- result = -EBUSY;
- return result;
-}
-EXPORT_SYMBOL(cl_lock_mutex_try);
-
-/**
- {* Unlocks cl_lock object.
- *
- * \pre cl_lock_is_mutexed(lock)
- *
- * \see cl_lock_mutex_get()
- */
-void cl_lock_mutex_put(const struct lu_env *env, struct cl_lock *lock)
-{
- struct cl_thread_counters *counters;
-
- LINVRNT(cl_lock_invariant(env, lock));
- LINVRNT(cl_lock_is_mutexed(lock));
- LINVRNT(lock->cll_guarder == current);
- LINVRNT(lock->cll_depth > 0);
-
- counters = cl_lock_counters(env, lock);
- LINVRNT(counters->ctc_nr_locks_locked > 0);
-
- cl_lock_trace(D_TRACE, env, "put mutex", lock);
- lu_ref_del(&counters->ctc_locks_locked, "cll_guard", lock);
- counters->ctc_nr_locks_locked--;
- if (--lock->cll_depth == 0) {
- lock->cll_guarder = NULL;
- mutex_unlock(&lock->cll_guard);
- }
-}
-EXPORT_SYMBOL(cl_lock_mutex_put);
-
-/**
- * Returns true iff lock's mutex is owned by the current thread.
- */
-int cl_lock_is_mutexed(struct cl_lock *lock)
-{
- return lock->cll_guarder == current;
-}
-EXPORT_SYMBOL(cl_lock_is_mutexed);
-
-/**
- * Returns number of cl_lock mutices held by the current thread (environment).
- */
-int cl_lock_nr_mutexed(const struct lu_env *env)
-{
- struct cl_thread_info *info;
- int i;
- int locked;
-
- /*
- * NOTE: if summation across all nesting levels (currently 2) proves
- * too expensive, a summary counter can be added to
- * struct cl_thread_info.
- */
- info = cl_env_info(env);
- for (i = 0, locked = 0; i < ARRAY_SIZE(info->clt_counters); ++i)
- locked += info->clt_counters[i].ctc_nr_locks_locked;
- return locked;
-}
-EXPORT_SYMBOL(cl_lock_nr_mutexed);
-
-static void cl_lock_cancel0(const struct lu_env *env, struct cl_lock *lock)
-{
- LINVRNT(cl_lock_is_mutexed(lock));
- LINVRNT(cl_lock_invariant(env, lock));
- if (!(lock->cll_flags & CLF_CANCELLED)) {
- const struct cl_lock_slice *slice;
-
- lock->cll_flags |= CLF_CANCELLED;
- list_for_each_entry_reverse(slice, &lock->cll_layers,
- cls_linkage) {
- if (slice->cls_ops->clo_cancel != NULL)
- slice->cls_ops->clo_cancel(env, slice);
- }
- }
-}
-
-static void cl_lock_delete0(const struct lu_env *env, struct cl_lock *lock)
-{
- struct cl_object_header *head;
- const struct cl_lock_slice *slice;
-
- LINVRNT(cl_lock_is_mutexed(lock));
- LINVRNT(cl_lock_invariant(env, lock));
-
- if (lock->cll_state < CLS_FREEING) {
- bool in_cache;
-
- LASSERT(lock->cll_state != CLS_INTRANSIT);
- cl_lock_state_set(env, lock, CLS_FREEING);
-
- head = cl_object_header(lock->cll_descr.cld_obj);
-
- spin_lock(&head->coh_lock_guard);
- in_cache = !list_empty(&lock->cll_linkage);
- if (in_cache)
- list_del_init(&lock->cll_linkage);
- spin_unlock(&head->coh_lock_guard);
-
- if (in_cache) /* coh_locks cache holds a refcount. */
- cl_lock_put(env, lock);
-
- /*
- * From now on, no new references to this lock can be acquired
- * by cl_lock_lookup().
- */
- list_for_each_entry_reverse(slice, &lock->cll_layers,
- cls_linkage) {
- if (slice->cls_ops->clo_delete != NULL)
- slice->cls_ops->clo_delete(env, slice);
- }
- /*
- * From now on, no new references to this lock can be acquired
- * by layer-specific means (like a pointer from struct
- * ldlm_lock in osc, or a pointer from top-lock to sub-lock in
- * lov).
- *
- * Lock will be finally freed in cl_lock_put() when last of
- * existing references goes away.
- */
- }
-}
-
-/**
- * Mod(ifie)s cl_lock::cll_holds counter for a given lock. Also, for a
- * top-lock (nesting == 0) accounts for this modification in the per-thread
- * debugging counters. Sub-lock holds can be released by a thread different
- * from one that acquired it.
- */
-static void cl_lock_hold_mod(const struct lu_env *env, struct cl_lock *lock,
- int delta)
-{
- struct cl_thread_counters *counters;
- enum clt_nesting_level nesting;
-
- lock->cll_holds += delta;
- nesting = cl_lock_nesting(lock);
- if (nesting == CNL_TOP) {
- counters = &cl_env_info(env)->clt_counters[CNL_TOP];
- counters->ctc_nr_held += delta;
- LASSERT(counters->ctc_nr_held >= 0);
- }
-}
-
-/**
- * Mod(ifie)s cl_lock::cll_users counter for a given lock. See
- * cl_lock_hold_mod() for the explanation of the debugging code.
- */
-static void cl_lock_used_mod(const struct lu_env *env, struct cl_lock *lock,
- int delta)
-{
- struct cl_thread_counters *counters;
- enum clt_nesting_level nesting;
-
- lock->cll_users += delta;
- nesting = cl_lock_nesting(lock);
- if (nesting == CNL_TOP) {
- counters = &cl_env_info(env)->clt_counters[CNL_TOP];
- counters->ctc_nr_used += delta;
- LASSERT(counters->ctc_nr_used >= 0);
- }
-}
-
-void cl_lock_hold_release(const struct lu_env *env, struct cl_lock *lock,
- const char *scope, const void *source)
-{
- LINVRNT(cl_lock_is_mutexed(lock));
- LINVRNT(cl_lock_invariant(env, lock));
- LASSERT(lock->cll_holds > 0);
-
- cl_lock_trace(D_DLMTRACE, env, "hold release lock", lock);
- lu_ref_del(&lock->cll_holders, scope, source);
- cl_lock_hold_mod(env, lock, -1);
- if (lock->cll_holds == 0) {
- CL_LOCK_ASSERT(lock->cll_state != CLS_HELD, env, lock);
- if (lock->cll_descr.cld_mode == CLM_PHANTOM ||
- lock->cll_descr.cld_mode == CLM_GROUP ||
- lock->cll_state != CLS_CACHED)
- /*
- * If lock is still phantom or grouplock when user is
- * done with it---destroy the lock.
- */
- lock->cll_flags |= CLF_CANCELPEND|CLF_DOOMED;
- if (lock->cll_flags & CLF_CANCELPEND) {
- lock->cll_flags &= ~CLF_CANCELPEND;
- cl_lock_cancel0(env, lock);
- }
- if (lock->cll_flags & CLF_DOOMED) {
- /* no longer doomed: it's dead... Jim. */
- lock->cll_flags &= ~CLF_DOOMED;
- cl_lock_delete0(env, lock);
- }
- }
-}
-EXPORT_SYMBOL(cl_lock_hold_release);
-
-/**
- * Waits until lock state is changed.
- *
- * This function is called with cl_lock mutex locked, atomically releases
- * mutex and goes to sleep, waiting for a lock state change (signaled by
- * cl_lock_signal()), and re-acquires the mutex before return.
- *
- * This function is used to wait until lock state machine makes some progress
- * and to emulate synchronous operations on top of asynchronous lock
- * interface.
- *
- * \retval -EINTR wait was interrupted
- *
- * \retval 0 wait wasn't interrupted
- *
- * \pre cl_lock_is_mutexed(lock)
- *
- * \see cl_lock_signal()
- */
-int cl_lock_state_wait(const struct lu_env *env, struct cl_lock *lock)
-{
- wait_queue_t waiter;
- sigset_t blocked;
- int result;
-
- LINVRNT(cl_lock_is_mutexed(lock));
- LINVRNT(cl_lock_invariant(env, lock));
- LASSERT(lock->cll_depth == 1);
- LASSERT(lock->cll_state != CLS_FREEING); /* too late to wait */
-
- cl_lock_trace(D_DLMTRACE, env, "state wait lock", lock);
- result = lock->cll_error;
- if (result == 0) {
- /* To avoid being interrupted by the 'non-fatal' signals
- * (SIGCHLD, for instance), we'd block them temporarily.
- * LU-305 */
- blocked = cfs_block_sigsinv(LUSTRE_FATAL_SIGS);
-
- init_waitqueue_entry(&waiter, current);
- add_wait_queue(&lock->cll_wq, &waiter);
- set_current_state(TASK_INTERRUPTIBLE);
- cl_lock_mutex_put(env, lock);
-
- LASSERT(cl_lock_nr_mutexed(env) == 0);
-
- /* Returning ERESTARTSYS instead of EINTR so syscalls
- * can be restarted if signals are pending here */
- result = -ERESTARTSYS;
- if (likely(!OBD_FAIL_CHECK(OBD_FAIL_LOCK_STATE_WAIT_INTR))) {
- schedule();
- if (!cfs_signal_pending())
- result = 0;
- }
-
- cl_lock_mutex_get(env, lock);
- set_current_state(TASK_RUNNING);
- remove_wait_queue(&lock->cll_wq, &waiter);
-
- /* Restore old blocked signals */
- cfs_restore_sigs(blocked);
- }
- return result;
-}
-EXPORT_SYMBOL(cl_lock_state_wait);
-
-static void cl_lock_state_signal(const struct lu_env *env, struct cl_lock *lock,
- enum cl_lock_state state)
-{
- const struct cl_lock_slice *slice;
-
- LINVRNT(cl_lock_is_mutexed(lock));
- LINVRNT(cl_lock_invariant(env, lock));
-
- list_for_each_entry(slice, &lock->cll_layers, cls_linkage)
- if (slice->cls_ops->clo_state != NULL)
- slice->cls_ops->clo_state(env, slice, state);
- wake_up_all(&lock->cll_wq);
-}
-
-/**
- * Notifies waiters that lock state changed.
- *
- * Wakes up all waiters sleeping in cl_lock_state_wait(), also notifies all
- * layers about state change by calling cl_lock_operations::clo_state()
- * top-to-bottom.
- */
-void cl_lock_signal(const struct lu_env *env, struct cl_lock *lock)
-{
- cl_lock_trace(D_DLMTRACE, env, "state signal lock", lock);
- cl_lock_state_signal(env, lock, lock->cll_state);
-}
-EXPORT_SYMBOL(cl_lock_signal);
-
-/**
- * Changes lock state.
- *
- * This function is invoked to notify layers that lock state changed, possible
- * as a result of an asynchronous event such as call-back reception.
- *
- * \post lock->cll_state == state
- *
- * \see cl_lock_operations::clo_state()
- */
-void cl_lock_state_set(const struct lu_env *env, struct cl_lock *lock,
- enum cl_lock_state state)
-{
- LASSERT(lock->cll_state <= state ||
- (lock->cll_state == CLS_CACHED &&
- (state == CLS_HELD || /* lock found in cache */
- state == CLS_NEW || /* sub-lock canceled */
- state == CLS_INTRANSIT)) ||
- /* lock is in transit state */
- lock->cll_state == CLS_INTRANSIT);
-
- if (lock->cll_state != state) {
- CS_LOCKSTATE_DEC(lock->cll_descr.cld_obj, lock->cll_state);
- CS_LOCKSTATE_INC(lock->cll_descr.cld_obj, state);
-
- cl_lock_state_signal(env, lock, state);
- lock->cll_state = state;
- }
-}
-EXPORT_SYMBOL(cl_lock_state_set);
-
-static int cl_unuse_try_internal(const struct lu_env *env, struct cl_lock *lock)
-{
- const struct cl_lock_slice *slice;
- int result;
-
- do {
- result = 0;
-
- LINVRNT(cl_lock_is_mutexed(lock));
- LINVRNT(cl_lock_invariant(env, lock));
- LASSERT(lock->cll_state == CLS_INTRANSIT);
-
- result = -ENOSYS;
- list_for_each_entry_reverse(slice, &lock->cll_layers,
- cls_linkage) {
- if (slice->cls_ops->clo_unuse != NULL) {
- result = slice->cls_ops->clo_unuse(env, slice);
- if (result != 0)
- break;
- }
- }
- LASSERT(result != -ENOSYS);
- } while (result == CLO_REPEAT);
-
- return result;
-}
-
-/**
- * Yanks lock from the cache (cl_lock_state::CLS_CACHED state) by calling
- * cl_lock_operations::clo_use() top-to-bottom to notify layers.
- * @atomic = 1, it must unuse the lock to recovery the lock to keep the
- * use process atomic
- */
-int cl_use_try(const struct lu_env *env, struct cl_lock *lock, int atomic)
-{
- const struct cl_lock_slice *slice;
- int result;
- enum cl_lock_state state;
-
- cl_lock_trace(D_DLMTRACE, env, "use lock", lock);
-
- LASSERT(lock->cll_state == CLS_CACHED);
- if (lock->cll_error)
- return lock->cll_error;
-
- result = -ENOSYS;
- state = cl_lock_intransit(env, lock);
- list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
- if (slice->cls_ops->clo_use != NULL) {
- result = slice->cls_ops->clo_use(env, slice);
- if (result != 0)
- break;
- }
- }
- LASSERT(result != -ENOSYS);
-
- LASSERTF(lock->cll_state == CLS_INTRANSIT, "Wrong state %d.\n",
- lock->cll_state);
-
- if (result == 0) {
- state = CLS_HELD;
- } else {
- if (result == -ESTALE) {
- /*
- * ESTALE means sublock being cancelled
- * at this time, and set lock state to
- * be NEW here and ask the caller to repeat.
- */
- state = CLS_NEW;
- result = CLO_REPEAT;
- }
-
- /* @atomic means back-off-on-failure. */
- if (atomic) {
- int rc;
- rc = cl_unuse_try_internal(env, lock);
- /* Vet the results. */
- if (rc < 0 && result > 0)
- result = rc;
- }
-
- }
- cl_lock_extransit(env, lock, state);
- return result;
-}
-EXPORT_SYMBOL(cl_use_try);
-
-/**
- * Helper for cl_enqueue_try() that calls ->clo_enqueue() across all layers
- * top-to-bottom.
- */
-static int cl_enqueue_kick(const struct lu_env *env,
- struct cl_lock *lock,
- struct cl_io *io, __u32 flags)
-{
- int result;
- const struct cl_lock_slice *slice;
-
- result = -ENOSYS;
- list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
- if (slice->cls_ops->clo_enqueue != NULL) {
- result = slice->cls_ops->clo_enqueue(env,
- slice, io, flags);
- if (result != 0)
- break;
- }
- }
- LASSERT(result != -ENOSYS);
- return result;
-}
-
-/**
- * Tries to enqueue a lock.
- *
- * This function is called repeatedly by cl_enqueue() until either lock is
- * enqueued, or error occurs. This function does not block waiting for
- * networking communication to complete.
- *
- * \post ergo(result == 0, lock->cll_state == CLS_ENQUEUED ||
- * lock->cll_state == CLS_HELD)
- *
- * \see cl_enqueue() cl_lock_operations::clo_enqueue()
- * \see cl_lock_state::CLS_ENQUEUED
- */
-int cl_enqueue_try(const struct lu_env *env, struct cl_lock *lock,
- struct cl_io *io, __u32 flags)
-{
- int result;
-
- cl_lock_trace(D_DLMTRACE, env, "enqueue lock", lock);
- do {
- LINVRNT(cl_lock_is_mutexed(lock));
-
- result = lock->cll_error;
- if (result != 0)
- break;
-
- switch (lock->cll_state) {
- case CLS_NEW:
- cl_lock_state_set(env, lock, CLS_QUEUING);
- /* fall-through */
- case CLS_QUEUING:
- /* kick layers. */
- result = cl_enqueue_kick(env, lock, io, flags);
- /* For AGL case, the cl_lock::cll_state may
- * become CLS_HELD already. */
- if (result == 0 && lock->cll_state == CLS_QUEUING)
- cl_lock_state_set(env, lock, CLS_ENQUEUED);
- break;
- case CLS_INTRANSIT:
- LASSERT(cl_lock_is_intransit(lock));
- result = CLO_WAIT;
- break;
- case CLS_CACHED:
- /* yank lock from the cache. */
- result = cl_use_try(env, lock, 0);
- break;
- case CLS_ENQUEUED:
- case CLS_HELD:
- result = 0;
- break;
- default:
- case CLS_FREEING:
- /*
- * impossible, only held locks with increased
- * ->cll_holds can be enqueued, and they cannot be
- * freed.
- */
- LBUG();
- }
- } while (result == CLO_REPEAT);
- return result;
-}
-EXPORT_SYMBOL(cl_enqueue_try);
-
-/**
- * Cancel the conflicting lock found during previous enqueue.
- *
- * \retval 0 conflicting lock has been canceled.
- * \retval -ve error code.
- */
-int cl_lock_enqueue_wait(const struct lu_env *env,
- struct cl_lock *lock,
- int keep_mutex)
-{
- struct cl_lock *conflict;
- int rc = 0;
-
- LASSERT(cl_lock_is_mutexed(lock));
- LASSERT(lock->cll_state == CLS_QUEUING);
- LASSERT(lock->cll_conflict != NULL);
-
- conflict = lock->cll_conflict;
- lock->cll_conflict = NULL;
-
- cl_lock_mutex_put(env, lock);
- LASSERT(cl_lock_nr_mutexed(env) == 0);
-
- cl_lock_mutex_get(env, conflict);
- cl_lock_trace(D_DLMTRACE, env, "enqueue wait", conflict);
- cl_lock_cancel(env, conflict);
- cl_lock_delete(env, conflict);
-
- while (conflict->cll_state != CLS_FREEING) {
- rc = cl_lock_state_wait(env, conflict);
- if (rc != 0)
- break;
- }
- cl_lock_mutex_put(env, conflict);
- lu_ref_del(&conflict->cll_reference, "cancel-wait", lock);
- cl_lock_put(env, conflict);
-
- if (keep_mutex)
- cl_lock_mutex_get(env, lock);
-
- LASSERT(rc <= 0);
- return rc;
-}
-EXPORT_SYMBOL(cl_lock_enqueue_wait);
-
-static int cl_enqueue_locked(const struct lu_env *env, struct cl_lock *lock,
- struct cl_io *io, __u32 enqflags)
-{
- int result;
-
- LINVRNT(cl_lock_is_mutexed(lock));
- LINVRNT(cl_lock_invariant(env, lock));
- LASSERT(lock->cll_holds > 0);
-
- cl_lock_user_add(env, lock);
- do {
- result = cl_enqueue_try(env, lock, io, enqflags);
- if (result == CLO_WAIT) {
- if (lock->cll_conflict != NULL)
- result = cl_lock_enqueue_wait(env, lock, 1);
- else
- result = cl_lock_state_wait(env, lock);
- if (result == 0)
- continue;
- }
- break;
- } while (1);
- if (result != 0)
- cl_unuse_try(env, lock);
- LASSERT(ergo(result == 0 && !(enqflags & CEF_AGL),
- lock->cll_state == CLS_ENQUEUED ||
- lock->cll_state == CLS_HELD));
- return result;
-}
-
-/**
- * Tries to unlock a lock.
- *
- * This function is called to release underlying resource:
- * 1. for top lock, the resource is sublocks it held;
- * 2. for sublock, the resource is the reference to dlmlock.
- *
- * cl_unuse_try is a one-shot operation, so it must NOT return CLO_WAIT.
- *
- * \see cl_unuse() cl_lock_operations::clo_unuse()
- * \see cl_lock_state::CLS_CACHED
- */
-int cl_unuse_try(const struct lu_env *env, struct cl_lock *lock)
-{
- int result;
- enum cl_lock_state state = CLS_NEW;
-
- cl_lock_trace(D_DLMTRACE, env, "unuse lock", lock);
-
- if (lock->cll_users > 1) {
- cl_lock_user_del(env, lock);
- return 0;
- }
-
- /* Only if the lock is in CLS_HELD or CLS_ENQUEUED state, it can hold
- * underlying resources. */
- if (!(lock->cll_state == CLS_HELD || lock->cll_state == CLS_ENQUEUED)) {
- cl_lock_user_del(env, lock);
- return 0;
- }
-
- /*
- * New lock users (->cll_users) are not protecting unlocking
- * from proceeding. From this point, lock eventually reaches
- * CLS_CACHED, is reinitialized to CLS_NEW or fails into
- * CLS_FREEING.
- */
- state = cl_lock_intransit(env, lock);
-
- result = cl_unuse_try_internal(env, lock);
- LASSERT(lock->cll_state == CLS_INTRANSIT);
- LASSERT(result != CLO_WAIT);
- cl_lock_user_del(env, lock);
- if (result == 0 || result == -ESTALE) {
- /*
- * Return lock back to the cache. This is the only
- * place where lock is moved into CLS_CACHED state.
- *
- * If one of ->clo_unuse() methods returned -ESTALE, lock
- * cannot be placed into cache and has to be
- * re-initialized. This happens e.g., when a sub-lock was
- * canceled while unlocking was in progress.
- */
- if (state == CLS_HELD && result == 0)
- state = CLS_CACHED;
- else
- state = CLS_NEW;
- cl_lock_extransit(env, lock, state);
-
- /*
- * Hide -ESTALE error.
- * If the lock is a glimpse lock, and it has multiple
- * stripes. Assuming that one of its sublock returned -ENAVAIL,
- * and other sublocks are matched write locks. In this case,
- * we can't set this lock to error because otherwise some of
- * its sublocks may not be canceled. This causes some dirty
- * pages won't be written to OSTs. -jay
- */
- result = 0;
- } else {
- CERROR("result = %d, this is unlikely!\n", result);
- state = CLS_NEW;
- cl_lock_extransit(env, lock, state);
- }
- return result ?: lock->cll_error;
-}
-EXPORT_SYMBOL(cl_unuse_try);
-
-static void cl_unuse_locked(const struct lu_env *env, struct cl_lock *lock)
-{
- int result;
-
- result = cl_unuse_try(env, lock);
- if (result)
- CL_LOCK_DEBUG(D_ERROR, env, lock, "unuse return %d\n", result);
-}
-
-/**
- * Unlocks a lock.
- */
-void cl_unuse(const struct lu_env *env, struct cl_lock *lock)
-{
- cl_lock_mutex_get(env, lock);
- cl_unuse_locked(env, lock);
- cl_lock_mutex_put(env, lock);
- cl_lock_lockdep_release(env, lock);
-}
-EXPORT_SYMBOL(cl_unuse);
-
-/**
- * Tries to wait for a lock.
- *
- * This function is called repeatedly by cl_wait() until either lock is
- * granted, or error occurs. This function does not block waiting for network
- * communication to complete.
- *
- * \see cl_wait() cl_lock_operations::clo_wait()
- * \see cl_lock_state::CLS_HELD
- */
-int cl_wait_try(const struct lu_env *env, struct cl_lock *lock)
-{
- const struct cl_lock_slice *slice;
- int result;
-
- cl_lock_trace(D_DLMTRACE, env, "wait lock try", lock);
- do {
- LINVRNT(cl_lock_is_mutexed(lock));
- LINVRNT(cl_lock_invariant(env, lock));
- LASSERTF(lock->cll_state == CLS_QUEUING ||
- lock->cll_state == CLS_ENQUEUED ||
- lock->cll_state == CLS_HELD ||
- lock->cll_state == CLS_INTRANSIT,
- "lock state: %d\n", lock->cll_state);
- LASSERT(lock->cll_users > 0);
- LASSERT(lock->cll_holds > 0);
-
- result = lock->cll_error;
- if (result != 0)
- break;
-
- if (cl_lock_is_intransit(lock)) {
- result = CLO_WAIT;
- break;
- }
-
- if (lock->cll_state == CLS_HELD)
- /* nothing to do */
- break;
-
- result = -ENOSYS;
- list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
- if (slice->cls_ops->clo_wait != NULL) {
- result = slice->cls_ops->clo_wait(env, slice);
- if (result != 0)
- break;
- }
- }
- LASSERT(result != -ENOSYS);
- if (result == 0) {
- LASSERT(lock->cll_state != CLS_INTRANSIT);
- cl_lock_state_set(env, lock, CLS_HELD);
- }
- } while (result == CLO_REPEAT);
- return result;
-}
-EXPORT_SYMBOL(cl_wait_try);
-
-/**
- * Waits until enqueued lock is granted.
- *
- * \pre current thread or io owns a hold on the lock
- * \pre ergo(result == 0, lock->cll_state == CLS_ENQUEUED ||
- * lock->cll_state == CLS_HELD)
- *
- * \post ergo(result == 0, lock->cll_state == CLS_HELD)
- */
-int cl_wait(const struct lu_env *env, struct cl_lock *lock)
-{
- int result;
-
- cl_lock_mutex_get(env, lock);
-
- LINVRNT(cl_lock_invariant(env, lock));
- LASSERTF(lock->cll_state == CLS_ENQUEUED || lock->cll_state == CLS_HELD,
- "Wrong state %d \n", lock->cll_state);
- LASSERT(lock->cll_holds > 0);
-
- do {
- result = cl_wait_try(env, lock);
- if (result == CLO_WAIT) {
- result = cl_lock_state_wait(env, lock);
- if (result == 0)
- continue;
- }
- break;
- } while (1);
- if (result < 0) {
- cl_unuse_try(env, lock);
- cl_lock_lockdep_release(env, lock);
- }
- cl_lock_trace(D_DLMTRACE, env, "wait lock", lock);
- cl_lock_mutex_put(env, lock);
- LASSERT(ergo(result == 0, lock->cll_state == CLS_HELD));
- return result;
-}
-EXPORT_SYMBOL(cl_wait);
-
-/**
- * Executes cl_lock_operations::clo_weigh(), and sums results to estimate lock
- * value.
- */
-unsigned long cl_lock_weigh(const struct lu_env *env, struct cl_lock *lock)
-{
- const struct cl_lock_slice *slice;
- unsigned long pound;
- unsigned long ounce;
-
- LINVRNT(cl_lock_is_mutexed(lock));
- LINVRNT(cl_lock_invariant(env, lock));
-
- pound = 0;
- list_for_each_entry_reverse(slice, &lock->cll_layers, cls_linkage) {
- if (slice->cls_ops->clo_weigh != NULL) {
- ounce = slice->cls_ops->clo_weigh(env, slice);
- pound += ounce;
- if (pound < ounce) /* over-weight^Wflow */
- pound = ~0UL;
- }
- }
- return pound;
-}
-EXPORT_SYMBOL(cl_lock_weigh);
-
-/**
- * Notifies layers that lock description changed.
- *
- * The server can grant client a lock different from one that was requested
- * (e.g., larger in extent). This method is called when actually granted lock
- * description becomes known to let layers to accommodate for changed lock
- * description.
- *
- * \see cl_lock_operations::clo_modify()
- */
-int cl_lock_modify(const struct lu_env *env, struct cl_lock *lock,
- const struct cl_lock_descr *desc)
-{
- const struct cl_lock_slice *slice;
- struct cl_object *obj = lock->cll_descr.cld_obj;
- struct cl_object_header *hdr = cl_object_header(obj);
- int result;
-
- cl_lock_trace(D_DLMTRACE, env, "modify lock", lock);
- /* don't allow object to change */
- LASSERT(obj == desc->cld_obj);
- LINVRNT(cl_lock_is_mutexed(lock));
- LINVRNT(cl_lock_invariant(env, lock));
-
- list_for_each_entry_reverse(slice, &lock->cll_layers, cls_linkage) {
- if (slice->cls_ops->clo_modify != NULL) {
- result = slice->cls_ops->clo_modify(env, slice, desc);
- if (result != 0)
- return result;
- }
- }
- CL_LOCK_DEBUG(D_DLMTRACE, env, lock, " -> "DDESCR"@"DFID"\n",
- PDESCR(desc), PFID(lu_object_fid(&desc->cld_obj->co_lu)));
- /*
- * Just replace description in place. Nothing more is needed for
- * now. If locks were indexed according to their extent and/or mode,
- * that index would have to be updated here.
- */
- spin_lock(&hdr->coh_lock_guard);
- lock->cll_descr = *desc;
- spin_unlock(&hdr->coh_lock_guard);
- return 0;
-}
-EXPORT_SYMBOL(cl_lock_modify);
-
-/**
- * Initializes lock closure with a given origin.
- *
- * \see cl_lock_closure
- */
-void cl_lock_closure_init(const struct lu_env *env,
- struct cl_lock_closure *closure,
- struct cl_lock *origin, int wait)
-{
- LINVRNT(cl_lock_is_mutexed(origin));
- LINVRNT(cl_lock_invariant(env, origin));
-
- INIT_LIST_HEAD(&closure->clc_list);
- closure->clc_origin = origin;
- closure->clc_wait = wait;
- closure->clc_nr = 0;
-}
-EXPORT_SYMBOL(cl_lock_closure_init);
-
-/**
- * Builds a closure of \a lock.
- *
- * Building of a closure consists of adding initial lock (\a lock) into it,
- * and calling cl_lock_operations::clo_closure() methods of \a lock. These
- * methods might call cl_lock_closure_build() recursively again, adding more
- * locks to the closure, etc.
- *
- * \see cl_lock_closure
- */
-int cl_lock_closure_build(const struct lu_env *env, struct cl_lock *lock,
- struct cl_lock_closure *closure)
-{
- const struct cl_lock_slice *slice;
- int result;
-
- LINVRNT(cl_lock_is_mutexed(closure->clc_origin));
- LINVRNT(cl_lock_invariant(env, closure->clc_origin));
-
- result = cl_lock_enclosure(env, lock, closure);
- if (result == 0) {
- list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
- if (slice->cls_ops->clo_closure != NULL) {
- result = slice->cls_ops->clo_closure(env, slice,
- closure);
- if (result != 0)
- break;
- }
- }
- }
- if (result != 0)
- cl_lock_disclosure(env, closure);
- return result;
-}
-EXPORT_SYMBOL(cl_lock_closure_build);
-
-/**
- * Adds new lock to a closure.
- *
- * Try-locks \a lock and if succeeded, adds it to the closure (never more than
- * once). If try-lock failed, returns CLO_REPEAT, after optionally waiting
- * until next try-lock is likely to succeed.
- */
-int cl_lock_enclosure(const struct lu_env *env, struct cl_lock *lock,
- struct cl_lock_closure *closure)
-{
- int result = 0;
-
- cl_lock_trace(D_DLMTRACE, env, "enclosure lock", lock);
- if (!cl_lock_mutex_try(env, lock)) {
- /*
- * If lock->cll_inclosure is not empty, lock is already in
- * this closure.
- */
- if (list_empty(&lock->cll_inclosure)) {
- cl_lock_get_trust(lock);
- lu_ref_add(&lock->cll_reference, "closure", closure);
- list_add(&lock->cll_inclosure, &closure->clc_list);
- closure->clc_nr++;
- } else
- cl_lock_mutex_put(env, lock);
- result = 0;
- } else {
- cl_lock_disclosure(env, closure);
- if (closure->clc_wait) {
- cl_lock_get_trust(lock);
- lu_ref_add(&lock->cll_reference, "closure-w", closure);
- cl_lock_mutex_put(env, closure->clc_origin);
-
- LASSERT(cl_lock_nr_mutexed(env) == 0);
- cl_lock_mutex_get(env, lock);
- cl_lock_mutex_put(env, lock);
-
- cl_lock_mutex_get(env, closure->clc_origin);
- lu_ref_del(&lock->cll_reference, "closure-w", closure);
- cl_lock_put(env, lock);
- }
- result = CLO_REPEAT;
- }
- return result;
-}
-EXPORT_SYMBOL(cl_lock_enclosure);
-
-/** Releases mutices of enclosed locks. */
-void cl_lock_disclosure(const struct lu_env *env,
- struct cl_lock_closure *closure)
-{
- struct cl_lock *scan;
- struct cl_lock *temp;
-
- cl_lock_trace(D_DLMTRACE, env, "disclosure lock", closure->clc_origin);
- list_for_each_entry_safe(scan, temp, &closure->clc_list,
- cll_inclosure){
- list_del_init(&scan->cll_inclosure);
- cl_lock_mutex_put(env, scan);
- lu_ref_del(&scan->cll_reference, "closure", closure);
- cl_lock_put(env, scan);
- closure->clc_nr--;
- }
- LASSERT(closure->clc_nr == 0);
-}
-EXPORT_SYMBOL(cl_lock_disclosure);
-
-/** Finalizes a closure. */
-void cl_lock_closure_fini(struct cl_lock_closure *closure)
-{
- LASSERT(closure->clc_nr == 0);
- LASSERT(list_empty(&closure->clc_list));
-}
-EXPORT_SYMBOL(cl_lock_closure_fini);
-
-/**
- * Destroys this lock. Notifies layers (bottom-to-top) that lock is being
- * destroyed, then destroy the lock. If there are holds on the lock, postpone
- * destruction until all holds are released. This is called when a decision is
- * made to destroy the lock in the future. E.g., when a blocking AST is
- * received on it, or fatal communication error happens.
- *
- * Caller must have a reference on this lock to prevent a situation, when
- * deleted lock lingers in memory for indefinite time, because nobody calls
- * cl_lock_put() to finish it.
- *
- * \pre atomic_read(&lock->cll_ref) > 0
- * \pre ergo(cl_lock_nesting(lock) == CNL_TOP,
- * cl_lock_nr_mutexed(env) == 1)
- * [i.e., if a top-lock is deleted, mutices of no other locks can be
- * held, as deletion of sub-locks might require releasing a top-lock
- * mutex]
- *
- * \see cl_lock_operations::clo_delete()
- * \see cl_lock::cll_holds
- */
-void cl_lock_delete(const struct lu_env *env, struct cl_lock *lock)
-{
- LINVRNT(cl_lock_is_mutexed(lock));
- LINVRNT(cl_lock_invariant(env, lock));
- LASSERT(ergo(cl_lock_nesting(lock) == CNL_TOP,
- cl_lock_nr_mutexed(env) == 1));
-
- cl_lock_trace(D_DLMTRACE, env, "delete lock", lock);
- if (lock->cll_holds == 0)
- cl_lock_delete0(env, lock);
- else
- lock->cll_flags |= CLF_DOOMED;
-}
-EXPORT_SYMBOL(cl_lock_delete);
-
-/**
- * Mark lock as irrecoverably failed, and mark it for destruction. This
- * happens when, e.g., server fails to grant a lock to us, or networking
- * time-out happens.
- *
- * \pre atomic_read(&lock->cll_ref) > 0
- *
- * \see clo_lock_delete()
- * \see cl_lock::cll_holds
- */
-void cl_lock_error(const struct lu_env *env, struct cl_lock *lock, int error)
-{
- LINVRNT(cl_lock_is_mutexed(lock));
- LINVRNT(cl_lock_invariant(env, lock));
-
- if (lock->cll_error == 0 && error != 0) {
- cl_lock_trace(D_DLMTRACE, env, "set lock error", lock);
- lock->cll_error = error;
- cl_lock_signal(env, lock);
- cl_lock_cancel(env, lock);
- cl_lock_delete(env, lock);
- }
-}
-EXPORT_SYMBOL(cl_lock_error);
-
-/**
- * Cancels this lock. Notifies layers
- * (bottom-to-top) that lock is being cancelled, then destroy the lock. If
- * there are holds on the lock, postpone cancellation until
- * all holds are released.
- *
- * Cancellation notification is delivered to layers at most once.
- *
- * \see cl_lock_operations::clo_cancel()
- * \see cl_lock::cll_holds
- */
-void cl_lock_cancel(const struct lu_env *env, struct cl_lock *lock)
-{
- LINVRNT(cl_lock_is_mutexed(lock));
- LINVRNT(cl_lock_invariant(env, lock));
-
- cl_lock_trace(D_DLMTRACE, env, "cancel lock", lock);
- if (lock->cll_holds == 0)
- cl_lock_cancel0(env, lock);
- else
- lock->cll_flags |= CLF_CANCELPEND;
-}
-EXPORT_SYMBOL(cl_lock_cancel);
-
-/**
- * Finds an existing lock covering given index and optionally different from a
- * given \a except lock.
- */
-struct cl_lock *cl_lock_at_pgoff(const struct lu_env *env,
- struct cl_object *obj, pgoff_t index,
- struct cl_lock *except,
- int pending, int canceld)
-{
- struct cl_object_header *head;
- struct cl_lock *scan;
- struct cl_lock *lock;
- struct cl_lock_descr *need;
-
- head = cl_object_header(obj);
- need = &cl_env_info(env)->clt_descr;
- lock = NULL;
-
- need->cld_mode = CLM_READ; /* CLM_READ matches both READ & WRITE, but
- * not PHANTOM */
- need->cld_start = need->cld_end = index;
- need->cld_enq_flags = 0;
-
- spin_lock(&head->coh_lock_guard);
- /* It is fine to match any group lock since there could be only one
- * with a uniq gid and it conflicts with all other lock modes too */
- list_for_each_entry(scan, &head->coh_locks, cll_linkage) {
- if (scan != except &&
- (scan->cll_descr.cld_mode == CLM_GROUP ||
- cl_lock_ext_match(&scan->cll_descr, need)) &&
- scan->cll_state >= CLS_HELD &&
- scan->cll_state < CLS_FREEING &&
- /*
- * This check is racy as the lock can be canceled right
- * after it is done, but this is fine, because page exists
- * already.
- */
- (canceld || !(scan->cll_flags & CLF_CANCELLED)) &&
- (pending || !(scan->cll_flags & CLF_CANCELPEND))) {
- /* Don't increase cs_hit here since this
- * is just a helper function. */
- cl_lock_get_trust(scan);
- lock = scan;
- break;
- }
- }
- spin_unlock(&head->coh_lock_guard);
- return lock;
-}
-EXPORT_SYMBOL(cl_lock_at_pgoff);
-
-/**
- * Calculate the page offset at the layer of @lock.
- * At the time of this writing, @page is top page and @lock is sub lock.
- */
-static pgoff_t pgoff_at_lock(struct cl_page *page, struct cl_lock *lock)
-{
- struct lu_device_type *dtype;
- const struct cl_page_slice *slice;
-
- dtype = lock->cll_descr.cld_obj->co_lu.lo_dev->ld_type;
- slice = cl_page_at(page, dtype);
- LASSERT(slice != NULL);
- return slice->cpl_page->cp_index;
-}
-
-/**
- * Check if page @page is covered by an extra lock or discard it.
- */
-static int check_and_discard_cb(const struct lu_env *env, struct cl_io *io,
- struct cl_page *page, void *cbdata)
-{
- struct cl_thread_info *info = cl_env_info(env);
- struct cl_lock *lock = cbdata;
- pgoff_t index = pgoff_at_lock(page, lock);
-
- if (index >= info->clt_fn_index) {
- struct cl_lock *tmp;
-
- /* refresh non-overlapped index */
- tmp = cl_lock_at_pgoff(env, lock->cll_descr.cld_obj, index,
- lock, 1, 0);
- if (tmp != NULL) {
- /* Cache the first-non-overlapped index so as to skip
- * all pages within [index, clt_fn_index). This
- * is safe because if tmp lock is canceled, it will
- * discard these pages. */
- info->clt_fn_index = tmp->cll_descr.cld_end + 1;
- if (tmp->cll_descr.cld_end == CL_PAGE_EOF)
- info->clt_fn_index = CL_PAGE_EOF;
- cl_lock_put(env, tmp);
- } else if (cl_page_own(env, io, page) == 0) {
- /* discard the page */
- cl_page_unmap(env, io, page);
- cl_page_discard(env, io, page);
- cl_page_disown(env, io, page);
- } else {
- LASSERT(page->cp_state == CPS_FREEING);
- }
- }
-
- info->clt_next_index = index + 1;
- return CLP_GANG_OKAY;
-}
-
-static int discard_cb(const struct lu_env *env, struct cl_io *io,
- struct cl_page *page, void *cbdata)
-{
- struct cl_thread_info *info = cl_env_info(env);
- struct cl_lock *lock = cbdata;
-
- LASSERT(lock->cll_descr.cld_mode >= CLM_WRITE);
- KLASSERT(ergo(page->cp_type == CPT_CACHEABLE,
- !PageWriteback(cl_page_vmpage(env, page))));
- KLASSERT(ergo(page->cp_type == CPT_CACHEABLE,
- !PageDirty(cl_page_vmpage(env, page))));
-
- info->clt_next_index = pgoff_at_lock(page, lock) + 1;
- if (cl_page_own(env, io, page) == 0) {
- /* discard the page */
- cl_page_unmap(env, io, page);
- cl_page_discard(env, io, page);
- cl_page_disown(env, io, page);
- } else {
- LASSERT(page->cp_state == CPS_FREEING);
- }
-
- return CLP_GANG_OKAY;
-}
-
-/**
- * Discard pages protected by the given lock. This function traverses radix
- * tree to find all covering pages and discard them. If a page is being covered
- * by other locks, it should remain in cache.
- *
- * If error happens on any step, the process continues anyway (the reasoning
- * behind this being that lock cancellation cannot be delayed indefinitely).
- */
-int cl_lock_discard_pages(const struct lu_env *env, struct cl_lock *lock)
-{
- struct cl_thread_info *info = cl_env_info(env);
- struct cl_io *io = &info->clt_io;
- struct cl_lock_descr *descr = &lock->cll_descr;
- cl_page_gang_cb_t cb;
- int res;
- int result;
-
- LINVRNT(cl_lock_invariant(env, lock));
-
- io->ci_obj = cl_object_top(descr->cld_obj);
- io->ci_ignore_layout = 1;
- result = cl_io_init(env, io, CIT_MISC, io->ci_obj);
- if (result != 0)
- goto out;
-
- cb = descr->cld_mode == CLM_READ ? check_and_discard_cb : discard_cb;
- info->clt_fn_index = info->clt_next_index = descr->cld_start;
- do {
- res = cl_page_gang_lookup(env, descr->cld_obj, io,
- info->clt_next_index, descr->cld_end,
- cb, (void *)lock);
- if (info->clt_next_index > descr->cld_end)
- break;
-
- if (res == CLP_GANG_RESCHED)
- cond_resched();
- } while (res != CLP_GANG_OKAY);
-out:
- cl_io_fini(env, io);
- return result;
-}
-EXPORT_SYMBOL(cl_lock_discard_pages);
-
-/**
- * Eliminate all locks for a given object.
- *
- * Caller has to guarantee that no lock is in active use.
- *
- * \param cancel when this is set, cl_locks_prune() cancels locks before
- * destroying.
- */
-void cl_locks_prune(const struct lu_env *env, struct cl_object *obj, int cancel)
-{
- struct cl_object_header *head;
- struct cl_lock *lock;
-
- head = cl_object_header(obj);
- /*
- * If locks are destroyed without cancellation, all pages must be
- * already destroyed (as otherwise they will be left unprotected).
- */
- LASSERT(ergo(!cancel,
- head->coh_tree.rnode == NULL && head->coh_pages == 0));
-
- spin_lock(&head->coh_lock_guard);
- while (!list_empty(&head->coh_locks)) {
- lock = container_of(head->coh_locks.next,
- struct cl_lock, cll_linkage);
- cl_lock_get_trust(lock);
- spin_unlock(&head->coh_lock_guard);
- lu_ref_add(&lock->cll_reference, "prune", current);
-
-again:
- cl_lock_mutex_get(env, lock);
- if (lock->cll_state < CLS_FREEING) {
- LASSERT(lock->cll_users <= 1);
- if (unlikely(lock->cll_users == 1)) {
- struct l_wait_info lwi = { 0 };
-
- cl_lock_mutex_put(env, lock);
- l_wait_event(lock->cll_wq,
- lock->cll_users == 0,
- &lwi);
- goto again;
- }
-
- if (cancel)
- cl_lock_cancel(env, lock);
- cl_lock_delete(env, lock);
- }
- cl_lock_mutex_put(env, lock);
- lu_ref_del(&lock->cll_reference, "prune", current);
- cl_lock_put(env, lock);
- spin_lock(&head->coh_lock_guard);
- }
- spin_unlock(&head->coh_lock_guard);
-}
-EXPORT_SYMBOL(cl_locks_prune);
-
-static struct cl_lock *cl_lock_hold_mutex(const struct lu_env *env,
- const struct cl_io *io,
- const struct cl_lock_descr *need,
- const char *scope, const void *source)
-{
- struct cl_lock *lock;
-
- while (1) {
- lock = cl_lock_find(env, io, need);
- if (IS_ERR(lock))
- break;
- cl_lock_mutex_get(env, lock);
- if (lock->cll_state < CLS_FREEING &&
- !(lock->cll_flags & CLF_CANCELLED)) {
- cl_lock_hold_mod(env, lock, 1);
- lu_ref_add(&lock->cll_holders, scope, source);
- lu_ref_add(&lock->cll_reference, scope, source);
- break;
- }
- cl_lock_mutex_put(env, lock);
- cl_lock_put(env, lock);
- }
- return lock;
-}
-
-/**
- * Returns a lock matching \a need description with a reference and a hold on
- * it.
- *
- * This is much like cl_lock_find(), except that cl_lock_hold() additionally
- * guarantees that lock is not in the CLS_FREEING state on return.
- */
-struct cl_lock *cl_lock_hold(const struct lu_env *env, const struct cl_io *io,
- const struct cl_lock_descr *need,
- const char *scope, const void *source)
-{
- struct cl_lock *lock;
-
- lock = cl_lock_hold_mutex(env, io, need, scope, source);
- if (!IS_ERR(lock))
- cl_lock_mutex_put(env, lock);
- return lock;
-}
-EXPORT_SYMBOL(cl_lock_hold);
-
-/**
- * Main high-level entry point of cl_lock interface that finds existing or
- * enqueues new lock matching given description.
- */
-struct cl_lock *cl_lock_request(const struct lu_env *env, struct cl_io *io,
- const struct cl_lock_descr *need,
- const char *scope, const void *source)
-{
- struct cl_lock *lock;
- int rc;
- __u32 enqflags = need->cld_enq_flags;
-
- do {
- lock = cl_lock_hold_mutex(env, io, need, scope, source);
- if (IS_ERR(lock))
- break;
-
- rc = cl_enqueue_locked(env, lock, io, enqflags);
- if (rc == 0) {
- if (cl_lock_fits_into(env, lock, need, io)) {
- if (!(enqflags & CEF_AGL)) {
- cl_lock_mutex_put(env, lock);
- cl_lock_lockdep_acquire(env, lock,
- enqflags);
- break;
- }
- rc = 1;
- }
- cl_unuse_locked(env, lock);
- }
- cl_lock_trace(D_DLMTRACE, env,
- rc <= 0 ? "enqueue failed" : "agl succeed", lock);
- cl_lock_hold_release(env, lock, scope, source);
- cl_lock_mutex_put(env, lock);
- lu_ref_del(&lock->cll_reference, scope, source);
- cl_lock_put(env, lock);
- if (rc > 0) {
- LASSERT(enqflags & CEF_AGL);
- lock = NULL;
- } else if (rc != 0) {
- lock = ERR_PTR(rc);
- }
- } while (rc == 0);
- return lock;
-}
-EXPORT_SYMBOL(cl_lock_request);
-
-/**
- * Adds a hold to a known lock.
- */
-void cl_lock_hold_add(const struct lu_env *env, struct cl_lock *lock,
- const char *scope, const void *source)
-{
- LINVRNT(cl_lock_is_mutexed(lock));
- LINVRNT(cl_lock_invariant(env, lock));
- LASSERT(lock->cll_state != CLS_FREEING);
-
- cl_lock_hold_mod(env, lock, 1);
- cl_lock_get(lock);
- lu_ref_add(&lock->cll_holders, scope, source);
- lu_ref_add(&lock->cll_reference, scope, source);
-}
-EXPORT_SYMBOL(cl_lock_hold_add);
-
-/**
- * Releases a hold and a reference on a lock, on which caller acquired a
- * mutex.
- */
-void cl_lock_unhold(const struct lu_env *env, struct cl_lock *lock,
- const char *scope, const void *source)
-{
- LINVRNT(cl_lock_invariant(env, lock));
- cl_lock_hold_release(env, lock, scope, source);
- lu_ref_del(&lock->cll_reference, scope, source);
- cl_lock_put(env, lock);
-}
-EXPORT_SYMBOL(cl_lock_unhold);
-
-/**
- * Releases a hold and a reference on a lock, obtained by cl_lock_hold().
- */
-void cl_lock_release(const struct lu_env *env, struct cl_lock *lock,
- const char *scope, const void *source)
-{
- LINVRNT(cl_lock_invariant(env, lock));
- cl_lock_trace(D_DLMTRACE, env, "release lock", lock);
- cl_lock_mutex_get(env, lock);
- cl_lock_hold_release(env, lock, scope, source);
- cl_lock_mutex_put(env, lock);
- lu_ref_del(&lock->cll_reference, scope, source);
- cl_lock_put(env, lock);
-}
-EXPORT_SYMBOL(cl_lock_release);
-
-void cl_lock_user_add(const struct lu_env *env, struct cl_lock *lock)
-{
- LINVRNT(cl_lock_is_mutexed(lock));
- LINVRNT(cl_lock_invariant(env, lock));
-
- cl_lock_used_mod(env, lock, 1);
-}
-EXPORT_SYMBOL(cl_lock_user_add);
-
-void cl_lock_user_del(const struct lu_env *env, struct cl_lock *lock)
-{
- LINVRNT(cl_lock_is_mutexed(lock));
- LINVRNT(cl_lock_invariant(env, lock));
- LASSERT(lock->cll_users > 0);
-
- cl_lock_used_mod(env, lock, -1);
- if (lock->cll_users == 0)
- wake_up_all(&lock->cll_wq);
-}
-EXPORT_SYMBOL(cl_lock_user_del);
-
-const char *cl_lock_mode_name(const enum cl_lock_mode mode)
-{
- static const char *names[] = {
- [CLM_PHANTOM] = "P",
- [CLM_READ] = "R",
- [CLM_WRITE] = "W",
- [CLM_GROUP] = "G"
- };
- if (0 <= mode && mode < ARRAY_SIZE(names))
- return names[mode];
- else
- return "U";
-}
-EXPORT_SYMBOL(cl_lock_mode_name);
-
-/**
- * Prints human readable representation of a lock description.
- */
-void cl_lock_descr_print(const struct lu_env *env, void *cookie,
- lu_printer_t printer,
- const struct cl_lock_descr *descr)
-{
- const struct lu_fid *fid;
-
- fid = lu_object_fid(&descr->cld_obj->co_lu);
- (*printer)(env, cookie, DDESCR"@"DFID, PDESCR(descr), PFID(fid));
-}
-EXPORT_SYMBOL(cl_lock_descr_print);
-
-/**
- * Prints human readable representation of \a lock to the \a f.
- */
-void cl_lock_print(const struct lu_env *env, void *cookie,
- lu_printer_t printer, const struct cl_lock *lock)
-{
- const struct cl_lock_slice *slice;
- (*printer)(env, cookie, "lock@%p[%d %d %d %d %d %08lx] ",
- lock, atomic_read(&lock->cll_ref),
- lock->cll_state, lock->cll_error, lock->cll_holds,
- lock->cll_users, lock->cll_flags);
- cl_lock_descr_print(env, cookie, printer, &lock->cll_descr);
- (*printer)(env, cookie, " {\n");
-
- list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
- (*printer)(env, cookie, " %s@%p: ",
- slice->cls_obj->co_lu.lo_dev->ld_type->ldt_name,
- slice);
- if (slice->cls_ops->clo_print != NULL)
- slice->cls_ops->clo_print(env, cookie, printer, slice);
- (*printer)(env, cookie, "\n");
- }
- (*printer)(env, cookie, "} lock@%p\n", lock);
-}
-EXPORT_SYMBOL(cl_lock_print);
-
-int cl_lock_init(void)
-{
- return lu_kmem_init(cl_lock_caches);
-}
-
-void cl_lock_fini(void)
-{
- lu_kmem_fini(cl_lock_caches);
-}
diff --git a/drivers/staging/lustre/lustre/obdclass/cl_object.c b/drivers/staging/lustre/lustre/obdclass/cl_object.c
deleted file mode 100644
index 89ff7f1fb0fc..000000000000
--- a/drivers/staging/lustre/lustre/obdclass/cl_object.c
+++ /dev/null
@@ -1,1097 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * Client Lustre Object.
- *
- * Author: Nikita Danilov <nikita.danilov@sun.com>
- */
-
-/*
- * Locking.
- *
- * i_mutex
- * PG_locked
- * ->coh_page_guard
- * ->coh_lock_guard
- * ->coh_attr_guard
- * ->ls_guard
- */
-
-#define DEBUG_SUBSYSTEM S_CLASS
-
-#include "../../include/linux/libcfs/libcfs.h"
-/* class_put_type() */
-#include "../include/obd_class.h"
-#include "../include/obd_support.h"
-#include "../include/lustre_fid.h"
-#include <linux/list.h>
-#include "../../include/linux/libcfs/libcfs_hash.h" /* for cfs_hash stuff */
-#include "../include/cl_object.h"
-#include "cl_internal.h"
-
-static struct kmem_cache *cl_env_kmem;
-
-/** Lock class of cl_object_header::coh_page_guard */
-static struct lock_class_key cl_page_guard_class;
-/** Lock class of cl_object_header::coh_lock_guard */
-static struct lock_class_key cl_lock_guard_class;
-/** Lock class of cl_object_header::coh_attr_guard */
-static struct lock_class_key cl_attr_guard_class;
-
-extern __u32 lu_context_tags_default;
-extern __u32 lu_session_tags_default;
-/**
- * Initialize cl_object_header.
- */
-int cl_object_header_init(struct cl_object_header *h)
-{
- int result;
-
- result = lu_object_header_init(&h->coh_lu);
- if (result == 0) {
- spin_lock_init(&h->coh_page_guard);
- spin_lock_init(&h->coh_lock_guard);
- spin_lock_init(&h->coh_attr_guard);
- lockdep_set_class(&h->coh_page_guard, &cl_page_guard_class);
- lockdep_set_class(&h->coh_lock_guard, &cl_lock_guard_class);
- lockdep_set_class(&h->coh_attr_guard, &cl_attr_guard_class);
- h->coh_pages = 0;
- /* XXX hard coded GFP_* mask. */
- INIT_RADIX_TREE(&h->coh_tree, GFP_ATOMIC);
- INIT_LIST_HEAD(&h->coh_locks);
- h->coh_page_bufsize = ALIGN(sizeof(struct cl_page), 8);
- }
- return result;
-}
-EXPORT_SYMBOL(cl_object_header_init);
-
-/**
- * Returns a cl_object with a given \a fid.
- *
- * Returns either cached or newly created object. Additional reference on the
- * returned object is acquired.
- *
- * \see lu_object_find(), cl_page_find(), cl_lock_find()
- */
-struct cl_object *cl_object_find(const struct lu_env *env,
- struct cl_device *cd, const struct lu_fid *fid,
- const struct cl_object_conf *c)
-{
- might_sleep();
- return lu2cl(lu_object_find_slice(env, cl2lu_dev(cd), fid, &c->coc_lu));
-}
-EXPORT_SYMBOL(cl_object_find);
-
-/**
- * Releases a reference on \a o.
- *
- * When last reference is released object is returned to the cache, unless
- * lu_object_header_flags::LU_OBJECT_HEARD_BANSHEE bit is set in its header.
- *
- * \see cl_page_put(), cl_lock_put().
- */
-void cl_object_put(const struct lu_env *env, struct cl_object *o)
-{
- lu_object_put(env, &o->co_lu);
-}
-EXPORT_SYMBOL(cl_object_put);
-
-/**
- * Acquire an additional reference to the object \a o.
- *
- * This can only be used to acquire _additional_ reference, i.e., caller
- * already has to possess at least one reference to \a o before calling this.
- *
- * \see cl_page_get(), cl_lock_get().
- */
-void cl_object_get(struct cl_object *o)
-{
- lu_object_get(&o->co_lu);
-}
-EXPORT_SYMBOL(cl_object_get);
-
-/**
- * Returns the top-object for a given \a o.
- *
- * \see cl_page_top(), cl_io_top()
- */
-struct cl_object *cl_object_top(struct cl_object *o)
-{
- struct cl_object_header *hdr = cl_object_header(o);
- struct cl_object *top;
-
- while (hdr->coh_parent != NULL)
- hdr = hdr->coh_parent;
-
- top = lu2cl(lu_object_top(&hdr->coh_lu));
- CDEBUG(D_TRACE, "%p -> %p\n", o, top);
- return top;
-}
-EXPORT_SYMBOL(cl_object_top);
-
-/**
- * Returns pointer to the lock protecting data-attributes for the given object
- * \a o.
- *
- * Data-attributes are protected by the cl_object_header::coh_attr_guard
- * spin-lock in the top-object.
- *
- * \see cl_attr, cl_object_attr_lock(), cl_object_operations::coo_attr_get().
- */
-static spinlock_t *cl_object_attr_guard(struct cl_object *o)
-{
- return &cl_object_header(cl_object_top(o))->coh_attr_guard;
-}
-
-/**
- * Locks data-attributes.
- *
- * Prevents data-attributes from changing, until lock is released by
- * cl_object_attr_unlock(). This has to be called before calls to
- * cl_object_attr_get(), cl_object_attr_set().
- */
-void cl_object_attr_lock(struct cl_object *o)
- __acquires(cl_object_attr_guard(o))
-{
- spin_lock(cl_object_attr_guard(o));
-}
-EXPORT_SYMBOL(cl_object_attr_lock);
-
-/**
- * Releases data-attributes lock, acquired by cl_object_attr_lock().
- */
-void cl_object_attr_unlock(struct cl_object *o)
- __releases(cl_object_attr_guard(o))
-{
- spin_unlock(cl_object_attr_guard(o));
-}
-EXPORT_SYMBOL(cl_object_attr_unlock);
-
-/**
- * Returns data-attributes of an object \a obj.
- *
- * Every layer is asked (by calling cl_object_operations::coo_attr_get())
- * top-to-bottom to fill in parts of \a attr that this layer is responsible
- * for.
- */
-int cl_object_attr_get(const struct lu_env *env, struct cl_object *obj,
- struct cl_attr *attr)
-{
- struct lu_object_header *top;
- int result;
-
- assert_spin_locked(cl_object_attr_guard(obj));
-
- top = obj->co_lu.lo_header;
- result = 0;
- list_for_each_entry(obj, &top->loh_layers, co_lu.lo_linkage) {
- if (obj->co_ops->coo_attr_get != NULL) {
- result = obj->co_ops->coo_attr_get(env, obj, attr);
- if (result != 0) {
- if (result > 0)
- result = 0;
- break;
- }
- }
- }
- return result;
-}
-EXPORT_SYMBOL(cl_object_attr_get);
-
-/**
- * Updates data-attributes of an object \a obj.
- *
- * Only attributes, mentioned in a validness bit-mask \a v are
- * updated. Calls cl_object_operations::coo_attr_set() on every layer, bottom
- * to top.
- */
-int cl_object_attr_set(const struct lu_env *env, struct cl_object *obj,
- const struct cl_attr *attr, unsigned v)
-{
- struct lu_object_header *top;
- int result;
-
- assert_spin_locked(cl_object_attr_guard(obj));
-
- top = obj->co_lu.lo_header;
- result = 0;
- list_for_each_entry_reverse(obj, &top->loh_layers,
- co_lu.lo_linkage) {
- if (obj->co_ops->coo_attr_set != NULL) {
- result = obj->co_ops->coo_attr_set(env, obj, attr, v);
- if (result != 0) {
- if (result > 0)
- result = 0;
- break;
- }
- }
- }
- return result;
-}
-EXPORT_SYMBOL(cl_object_attr_set);
-
-/**
- * Notifies layers (bottom-to-top) that glimpse AST was received.
- *
- * Layers have to fill \a lvb fields with information that will be shipped
- * back to glimpse issuer.
- *
- * \see cl_lock_operations::clo_glimpse()
- */
-int cl_object_glimpse(const struct lu_env *env, struct cl_object *obj,
- struct ost_lvb *lvb)
-{
- struct lu_object_header *top;
- int result;
-
- top = obj->co_lu.lo_header;
- result = 0;
- list_for_each_entry_reverse(obj, &top->loh_layers,
- co_lu.lo_linkage) {
- if (obj->co_ops->coo_glimpse != NULL) {
- result = obj->co_ops->coo_glimpse(env, obj, lvb);
- if (result != 0)
- break;
- }
- }
- LU_OBJECT_HEADER(D_DLMTRACE, env, lu_object_top(top),
- "size: %llu mtime: %llu atime: %llu ctime: %llu blocks: %llu\n",
- lvb->lvb_size, lvb->lvb_mtime, lvb->lvb_atime,
- lvb->lvb_ctime, lvb->lvb_blocks);
- return result;
-}
-EXPORT_SYMBOL(cl_object_glimpse);
-
-/**
- * Updates a configuration of an object \a obj.
- */
-int cl_conf_set(const struct lu_env *env, struct cl_object *obj,
- const struct cl_object_conf *conf)
-{
- struct lu_object_header *top;
- int result;
-
- top = obj->co_lu.lo_header;
- result = 0;
- list_for_each_entry(obj, &top->loh_layers, co_lu.lo_linkage) {
- if (obj->co_ops->coo_conf_set != NULL) {
- result = obj->co_ops->coo_conf_set(env, obj, conf);
- if (result != 0)
- break;
- }
- }
- return result;
-}
-EXPORT_SYMBOL(cl_conf_set);
-
-/**
- * Helper function removing all object locks, and marking object for
- * deletion. All object pages must have been deleted at this point.
- *
- * This is called by cl_inode_fini() and lov_object_delete() to destroy top-
- * and sub- objects respectively.
- */
-void cl_object_kill(const struct lu_env *env, struct cl_object *obj)
-{
- struct cl_object_header *hdr;
-
- hdr = cl_object_header(obj);
- LASSERT(hdr->coh_tree.rnode == NULL);
- LASSERT(hdr->coh_pages == 0);
-
- set_bit(LU_OBJECT_HEARD_BANSHEE, &hdr->coh_lu.loh_flags);
- /*
- * Destroy all locks. Object destruction (including cl_inode_fini())
- * cannot cancel the locks, because in the case of a local client,
- * where client and server share the same thread running
- * prune_icache(), this can dead-lock with ldlm_cancel_handler()
- * waiting on __wait_on_freeing_inode().
- */
- cl_locks_prune(env, obj, 0);
-}
-EXPORT_SYMBOL(cl_object_kill);
-
-/**
- * Prunes caches of pages and locks for this object.
- */
-void cl_object_prune(const struct lu_env *env, struct cl_object *obj)
-{
- cl_pages_prune(env, obj);
- cl_locks_prune(env, obj, 1);
-}
-EXPORT_SYMBOL(cl_object_prune);
-
-void cache_stats_init(struct cache_stats *cs, const char *name)
-{
- int i;
-
- cs->cs_name = name;
- for (i = 0; i < CS_NR; i++)
- atomic_set(&cs->cs_stats[i], 0);
-}
-
-int cache_stats_print(const struct cache_stats *cs, struct seq_file *m, int h)
-{
- int i;
- /*
- * lookup hit total cached create
- * env: ...... ...... ...... ...... ......
- */
- if (h) {
- const char *names[CS_NR] = CS_NAMES;
-
- seq_printf(m, "%6s", " ");
- for (i = 0; i < CS_NR; i++)
- seq_printf(m, "%8s", names[i]);
- seq_printf(m, "\n");
- }
-
- seq_printf(m, "%5.5s:", cs->cs_name);
- for (i = 0; i < CS_NR; i++)
- seq_printf(m, "%8u", atomic_read(&cs->cs_stats[i]));
- return 0;
-}
-
-/**
- * Initialize client site.
- *
- * Perform common initialization (lu_site_init()), and initialize statistical
- * counters. Also perform global initializations on the first call.
- */
-int cl_site_init(struct cl_site *s, struct cl_device *d)
-{
- int i;
- int result;
-
- result = lu_site_init(&s->cs_lu, &d->cd_lu_dev);
- if (result == 0) {
- cache_stats_init(&s->cs_pages, "pages");
- cache_stats_init(&s->cs_locks, "locks");
- for (i = 0; i < ARRAY_SIZE(s->cs_pages_state); ++i)
- atomic_set(&s->cs_pages_state[0], 0);
- for (i = 0; i < ARRAY_SIZE(s->cs_locks_state); ++i)
- atomic_set(&s->cs_locks_state[i], 0);
- }
- return result;
-}
-EXPORT_SYMBOL(cl_site_init);
-
-/**
- * Finalize client site. Dual to cl_site_init().
- */
-void cl_site_fini(struct cl_site *s)
-{
- lu_site_fini(&s->cs_lu);
-}
-EXPORT_SYMBOL(cl_site_fini);
-
-static struct cache_stats cl_env_stats = {
- .cs_name = "envs",
- .cs_stats = { ATOMIC_INIT(0), }
-};
-
-/**
- * Outputs client site statistical counters into a buffer. Suitable for
- * ll_rd_*()-style functions.
- */
-int cl_site_stats_print(const struct cl_site *site, struct seq_file *m)
-{
- int i;
- static const char *pstate[] = {
- [CPS_CACHED] = "c",
- [CPS_OWNED] = "o",
- [CPS_PAGEOUT] = "w",
- [CPS_PAGEIN] = "r",
- [CPS_FREEING] = "f"
- };
- static const char *lstate[] = {
- [CLS_NEW] = "n",
- [CLS_QUEUING] = "q",
- [CLS_ENQUEUED] = "e",
- [CLS_HELD] = "h",
- [CLS_INTRANSIT] = "t",
- [CLS_CACHED] = "c",
- [CLS_FREEING] = "f"
- };
-/*
- lookup hit total busy create
-pages: ...... ...... ...... ...... ...... [...... ...... ...... ......]
-locks: ...... ...... ...... ...... ...... [...... ...... ...... ...... ......]
- env: ...... ...... ...... ...... ......
- */
- lu_site_stats_print(&site->cs_lu, m);
- cache_stats_print(&site->cs_pages, m, 1);
- seq_printf(m, " [");
- for (i = 0; i < ARRAY_SIZE(site->cs_pages_state); ++i)
- seq_printf(m, "%s: %u ", pstate[i],
- atomic_read(&site->cs_pages_state[i]));
- seq_printf(m, "]\n");
- cache_stats_print(&site->cs_locks, m, 0);
- seq_printf(m, " [");
- for (i = 0; i < ARRAY_SIZE(site->cs_locks_state); ++i)
- seq_printf(m, "%s: %u ", lstate[i],
- atomic_read(&site->cs_locks_state[i]));
- seq_printf(m, "]\n");
- cache_stats_print(&cl_env_stats, m, 0);
- seq_printf(m, "\n");
- return 0;
-}
-EXPORT_SYMBOL(cl_site_stats_print);
-
-/*****************************************************************************
- *
- * lu_env handling on client.
- *
- */
-
-/**
- * The most efficient way is to store cl_env pointer in task specific
- * structures. On Linux, it wont' be easy to use task_struct->journal_info
- * because Lustre code may call into other fs which has certain assumptions
- * about journal_info. Currently following fields in task_struct are identified
- * can be used for this purpose:
- * - cl_env: for liblustre.
- * - tux_info: only on RedHat kernel.
- * - ...
- * \note As long as we use task_struct to store cl_env, we assume that once
- * called into Lustre, we'll never call into the other part of the kernel
- * which will use those fields in task_struct without explicitly exiting
- * Lustre.
- *
- * If there's no space in task_struct is available, hash will be used.
- * bz20044, bz22683.
- */
-
-struct cl_env {
- void *ce_magic;
- struct lu_env ce_lu;
- struct lu_context ce_ses;
-
- /**
- * This allows cl_env to be entered into cl_env_hash which implements
- * the current thread -> client environment lookup.
- */
- struct hlist_node ce_node;
- /**
- * Owner for the current cl_env.
- *
- * If LL_TASK_CL_ENV is defined, this point to the owning current,
- * only for debugging purpose ;
- * Otherwise hash is used, and this is the key for cfs_hash.
- * Now current thread pid is stored. Note using thread pointer would
- * lead to unbalanced hash because of its specific allocation locality
- * and could be varied for different platforms and OSes, even different
- * OS versions.
- */
- void *ce_owner;
-
- /*
- * Linkage into global list of all client environments. Used for
- * garbage collection.
- */
- struct list_head ce_linkage;
- /*
- *
- */
- int ce_ref;
- /*
- * Debugging field: address of the caller who made original
- * allocation.
- */
- void *ce_debug;
-};
-
-#define CL_ENV_INC(counter)
-#define CL_ENV_DEC(counter)
-
-static void cl_env_init0(struct cl_env *cle, void *debug)
-{
- LASSERT(cle->ce_ref == 0);
- LASSERT(cle->ce_magic == &cl_env_init0);
- LASSERT(cle->ce_debug == NULL && cle->ce_owner == NULL);
-
- cle->ce_ref = 1;
- cle->ce_debug = debug;
- CL_ENV_INC(busy);
-}
-
-
-/*
- * The implementation of using hash table to connect cl_env and thread
- */
-
-static struct cfs_hash *cl_env_hash;
-
-static unsigned cl_env_hops_hash(struct cfs_hash *lh,
- const void *key, unsigned mask)
-{
-#if BITS_PER_LONG == 64
- return cfs_hash_u64_hash((__u64)key, mask);
-#else
- return cfs_hash_u32_hash((__u32)key, mask);
-#endif
-}
-
-static void *cl_env_hops_obj(struct hlist_node *hn)
-{
- struct cl_env *cle = hlist_entry(hn, struct cl_env, ce_node);
- LASSERT(cle->ce_magic == &cl_env_init0);
- return (void *)cle;
-}
-
-static int cl_env_hops_keycmp(const void *key, struct hlist_node *hn)
-{
- struct cl_env *cle = cl_env_hops_obj(hn);
-
- LASSERT(cle->ce_owner != NULL);
- return (key == cle->ce_owner);
-}
-
-static void cl_env_hops_noop(struct cfs_hash *hs, struct hlist_node *hn)
-{
- struct cl_env *cle = hlist_entry(hn, struct cl_env, ce_node);
- LASSERT(cle->ce_magic == &cl_env_init0);
-}
-
-static cfs_hash_ops_t cl_env_hops = {
- .hs_hash = cl_env_hops_hash,
- .hs_key = cl_env_hops_obj,
- .hs_keycmp = cl_env_hops_keycmp,
- .hs_object = cl_env_hops_obj,
- .hs_get = cl_env_hops_noop,
- .hs_put_locked = cl_env_hops_noop,
-};
-
-static inline struct cl_env *cl_env_fetch(void)
-{
- struct cl_env *cle;
-
- cle = cfs_hash_lookup(cl_env_hash, (void *) (long) current->pid);
- LASSERT(ergo(cle, cle->ce_magic == &cl_env_init0));
- return cle;
-}
-
-static inline void cl_env_attach(struct cl_env *cle)
-{
- if (cle) {
- int rc;
-
- LASSERT(cle->ce_owner == NULL);
- cle->ce_owner = (void *) (long) current->pid;
- rc = cfs_hash_add_unique(cl_env_hash, cle->ce_owner,
- &cle->ce_node);
- LASSERT(rc == 0);
- }
-}
-
-static inline void cl_env_do_detach(struct cl_env *cle)
-{
- void *cookie;
-
- LASSERT(cle->ce_owner == (void *) (long) current->pid);
- cookie = cfs_hash_del(cl_env_hash, cle->ce_owner,
- &cle->ce_node);
- LASSERT(cookie == cle);
- cle->ce_owner = NULL;
-}
-
-static int cl_env_store_init(void)
-{
- cl_env_hash = cfs_hash_create("cl_env",
- HASH_CL_ENV_BITS, HASH_CL_ENV_BITS,
- HASH_CL_ENV_BKT_BITS, 0,
- CFS_HASH_MIN_THETA,
- CFS_HASH_MAX_THETA,
- &cl_env_hops,
- CFS_HASH_RW_BKTLOCK);
- return cl_env_hash != NULL ? 0 : -ENOMEM;
-}
-
-static void cl_env_store_fini(void)
-{
- cfs_hash_putref(cl_env_hash);
-}
-
-
-static inline struct cl_env *cl_env_detach(struct cl_env *cle)
-{
- if (cle == NULL)
- cle = cl_env_fetch();
-
- if (cle && cle->ce_owner)
- cl_env_do_detach(cle);
-
- return cle;
-}
-
-static struct lu_env *cl_env_new(__u32 ctx_tags, __u32 ses_tags, void *debug)
-{
- struct lu_env *env;
- struct cl_env *cle;
-
- OBD_SLAB_ALLOC_PTR_GFP(cle, cl_env_kmem, GFP_NOFS);
- if (cle != NULL) {
- int rc;
-
- INIT_LIST_HEAD(&cle->ce_linkage);
- cle->ce_magic = &cl_env_init0;
- env = &cle->ce_lu;
- rc = lu_env_init(env, ctx_tags | LCT_CL_THREAD);
- if (rc == 0) {
- rc = lu_context_init(&cle->ce_ses,
- ses_tags | LCT_SESSION);
- if (rc == 0) {
- lu_context_enter(&cle->ce_ses);
- env->le_ses = &cle->ce_ses;
- cl_env_init0(cle, debug);
- } else
- lu_env_fini(env);
- }
- if (rc != 0) {
- OBD_SLAB_FREE_PTR(cle, cl_env_kmem);
- env = ERR_PTR(rc);
- } else {
- CL_ENV_INC(create);
- CL_ENV_INC(total);
- }
- } else
- env = ERR_PTR(-ENOMEM);
- return env;
-}
-
-static void cl_env_fini(struct cl_env *cle)
-{
- CL_ENV_DEC(total);
- lu_context_fini(&cle->ce_lu.le_ctx);
- lu_context_fini(&cle->ce_ses);
- OBD_SLAB_FREE_PTR(cle, cl_env_kmem);
-}
-
-static inline struct cl_env *cl_env_container(struct lu_env *env)
-{
- return container_of(env, struct cl_env, ce_lu);
-}
-
-struct lu_env *cl_env_peek(int *refcheck)
-{
- struct lu_env *env;
- struct cl_env *cle;
-
- CL_ENV_INC(lookup);
-
- /* check that we don't go far from untrusted pointer */
- CLASSERT(offsetof(struct cl_env, ce_magic) == 0);
-
- env = NULL;
- cle = cl_env_fetch();
- if (cle != NULL) {
- CL_ENV_INC(hit);
- env = &cle->ce_lu;
- *refcheck = ++cle->ce_ref;
- }
- CDEBUG(D_OTHER, "%d@%p\n", cle ? cle->ce_ref : 0, cle);
- return env;
-}
-EXPORT_SYMBOL(cl_env_peek);
-
-/**
- * Returns lu_env: if there already is an environment associated with the
- * current thread, it is returned, otherwise, new environment is allocated.
- *
- * \param refcheck pointer to a counter used to detect environment leaks. In
- * the usual case cl_env_get() and cl_env_put() are called in the same lexical
- * scope and pointer to the same integer is passed as \a refcheck. This is
- * used to detect missed cl_env_put().
- *
- * \see cl_env_put()
- */
-struct lu_env *cl_env_get(int *refcheck)
-{
- struct lu_env *env;
-
- env = cl_env_peek(refcheck);
- if (env == NULL) {
- env = cl_env_new(lu_context_tags_default,
- lu_session_tags_default,
- __builtin_return_address(0));
-
- if (!IS_ERR(env)) {
- struct cl_env *cle;
-
- cle = cl_env_container(env);
- cl_env_attach(cle);
- *refcheck = cle->ce_ref;
- CDEBUG(D_OTHER, "%d@%p\n", cle->ce_ref, cle);
- }
- }
- return env;
-}
-EXPORT_SYMBOL(cl_env_get);
-
-/**
- * Forces an allocation of a fresh environment with given tags.
- *
- * \see cl_env_get()
- */
-struct lu_env *cl_env_alloc(int *refcheck, __u32 tags)
-{
- struct lu_env *env;
-
- LASSERT(cl_env_peek(refcheck) == NULL);
- env = cl_env_new(tags, tags, __builtin_return_address(0));
- if (!IS_ERR(env)) {
- struct cl_env *cle;
-
- cle = cl_env_container(env);
- *refcheck = cle->ce_ref;
- CDEBUG(D_OTHER, "%d@%p\n", cle->ce_ref, cle);
- }
- return env;
-}
-EXPORT_SYMBOL(cl_env_alloc);
-
-static void cl_env_exit(struct cl_env *cle)
-{
- LASSERT(cle->ce_owner == NULL);
- lu_context_exit(&cle->ce_lu.le_ctx);
- lu_context_exit(&cle->ce_ses);
-}
-
-/**
- * Release an environment.
- *
- * Decrement \a env reference counter. When counter drops to 0, nothing in
- * this thread is using environment and it is returned to the allocation
- * cache, or freed straight away, if cache is large enough.
- */
-void cl_env_put(struct lu_env *env, int *refcheck)
-{
- struct cl_env *cle;
-
- cle = cl_env_container(env);
-
- LASSERT(cle->ce_ref > 0);
- LASSERT(ergo(refcheck != NULL, cle->ce_ref == *refcheck));
-
- CDEBUG(D_OTHER, "%d@%p\n", cle->ce_ref, cle);
- if (--cle->ce_ref == 0) {
- CL_ENV_DEC(busy);
- cl_env_detach(cle);
- cle->ce_debug = NULL;
- cl_env_exit(cle);
- cl_env_fini(cle);
- }
-}
-EXPORT_SYMBOL(cl_env_put);
-
-/**
- * Declares a point of re-entrancy.
- *
- * \see cl_env_reexit()
- */
-void *cl_env_reenter(void)
-{
- return cl_env_detach(NULL);
-}
-EXPORT_SYMBOL(cl_env_reenter);
-
-/**
- * Exits re-entrancy.
- */
-void cl_env_reexit(void *cookie)
-{
- cl_env_detach(NULL);
- cl_env_attach(cookie);
-}
-EXPORT_SYMBOL(cl_env_reexit);
-
-/**
- * Setup user-supplied \a env as a current environment. This is to be used to
- * guaranteed that environment exists even when cl_env_get() fails. It is up
- * to user to ensure proper concurrency control.
- *
- * \see cl_env_unplant()
- */
-void cl_env_implant(struct lu_env *env, int *refcheck)
-{
- struct cl_env *cle = cl_env_container(env);
-
- LASSERT(cle->ce_ref > 0);
-
- cl_env_attach(cle);
- cl_env_get(refcheck);
- CDEBUG(D_OTHER, "%d@%p\n", cle->ce_ref, cle);
-}
-EXPORT_SYMBOL(cl_env_implant);
-
-/**
- * Detach environment installed earlier by cl_env_implant().
- */
-void cl_env_unplant(struct lu_env *env, int *refcheck)
-{
- struct cl_env *cle = cl_env_container(env);
-
- LASSERT(cle->ce_ref > 1);
-
- CDEBUG(D_OTHER, "%d@%p\n", cle->ce_ref, cle);
-
- cl_env_detach(cle);
- cl_env_put(env, refcheck);
-}
-EXPORT_SYMBOL(cl_env_unplant);
-
-struct lu_env *cl_env_nested_get(struct cl_env_nest *nest)
-{
- struct lu_env *env;
-
- nest->cen_cookie = NULL;
- env = cl_env_peek(&nest->cen_refcheck);
- if (env != NULL) {
- if (!cl_io_is_going(env))
- return env;
- cl_env_put(env, &nest->cen_refcheck);
- nest->cen_cookie = cl_env_reenter();
- }
- env = cl_env_get(&nest->cen_refcheck);
- if (IS_ERR(env)) {
- cl_env_reexit(nest->cen_cookie);
- return env;
- }
-
- LASSERT(!cl_io_is_going(env));
- return env;
-}
-EXPORT_SYMBOL(cl_env_nested_get);
-
-void cl_env_nested_put(struct cl_env_nest *nest, struct lu_env *env)
-{
- cl_env_put(env, &nest->cen_refcheck);
- cl_env_reexit(nest->cen_cookie);
-}
-EXPORT_SYMBOL(cl_env_nested_put);
-
-/**
- * Converts struct ost_lvb to struct cl_attr.
- *
- * \see cl_attr2lvb
- */
-void cl_lvb2attr(struct cl_attr *attr, const struct ost_lvb *lvb)
-{
- attr->cat_size = lvb->lvb_size;
- attr->cat_mtime = lvb->lvb_mtime;
- attr->cat_atime = lvb->lvb_atime;
- attr->cat_ctime = lvb->lvb_ctime;
- attr->cat_blocks = lvb->lvb_blocks;
-}
-EXPORT_SYMBOL(cl_lvb2attr);
-
-/*****************************************************************************
- *
- * Temporary prototype thing: mirror obd-devices into cl devices.
- *
- */
-
-struct cl_device *cl_type_setup(const struct lu_env *env, struct lu_site *site,
- struct lu_device_type *ldt,
- struct lu_device *next)
-{
- const char *typename;
- struct lu_device *d;
-
- LASSERT(ldt != NULL);
-
- typename = ldt->ldt_name;
- d = ldt->ldt_ops->ldto_device_alloc(env, ldt, NULL);
- if (!IS_ERR(d)) {
- int rc;
-
- if (site != NULL)
- d->ld_site = site;
- rc = ldt->ldt_ops->ldto_device_init(env, d, typename, next);
- if (rc == 0) {
- lu_device_get(d);
- lu_ref_add(&d->ld_reference,
- "lu-stack", &lu_site_init);
- } else {
- ldt->ldt_ops->ldto_device_free(env, d);
- CERROR("can't init device '%s', %d\n", typename, rc);
- d = ERR_PTR(rc);
- }
- } else
- CERROR("Cannot allocate device: '%s'\n", typename);
- return lu2cl_dev(d);
-}
-EXPORT_SYMBOL(cl_type_setup);
-
-/**
- * Finalize device stack by calling lu_stack_fini().
- */
-void cl_stack_fini(const struct lu_env *env, struct cl_device *cl)
-{
- lu_stack_fini(env, cl2lu_dev(cl));
-}
-EXPORT_SYMBOL(cl_stack_fini);
-
-int cl_lock_init(void);
-void cl_lock_fini(void);
-
-int cl_page_init(void);
-void cl_page_fini(void);
-
-static struct lu_context_key cl_key;
-
-struct cl_thread_info *cl_env_info(const struct lu_env *env)
-{
- return lu_context_key_get(&env->le_ctx, &cl_key);
-}
-
-/* defines cl0_key_{init,fini}() */
-LU_KEY_INIT_FINI(cl0, struct cl_thread_info);
-
-static void *cl_key_init(const struct lu_context *ctx,
- struct lu_context_key *key)
-{
- struct cl_thread_info *info;
-
- info = cl0_key_init(ctx, key);
- if (!IS_ERR(info)) {
- int i;
-
- for (i = 0; i < ARRAY_SIZE(info->clt_counters); ++i)
- lu_ref_init(&info->clt_counters[i].ctc_locks_locked);
- }
- return info;
-}
-
-static void cl_key_fini(const struct lu_context *ctx,
- struct lu_context_key *key, void *data)
-{
- struct cl_thread_info *info;
- int i;
-
- info = data;
- for (i = 0; i < ARRAY_SIZE(info->clt_counters); ++i)
- lu_ref_fini(&info->clt_counters[i].ctc_locks_locked);
- cl0_key_fini(ctx, key, data);
-}
-
-static void cl_key_exit(const struct lu_context *ctx,
- struct lu_context_key *key, void *data)
-{
- struct cl_thread_info *info = data;
- int i;
-
- for (i = 0; i < ARRAY_SIZE(info->clt_counters); ++i) {
- LASSERT(info->clt_counters[i].ctc_nr_held == 0);
- LASSERT(info->clt_counters[i].ctc_nr_used == 0);
- LASSERT(info->clt_counters[i].ctc_nr_locks_acquired == 0);
- LASSERT(info->clt_counters[i].ctc_nr_locks_locked == 0);
- lu_ref_fini(&info->clt_counters[i].ctc_locks_locked);
- lu_ref_init(&info->clt_counters[i].ctc_locks_locked);
- }
-}
-
-static struct lu_context_key cl_key = {
- .lct_tags = LCT_CL_THREAD,
- .lct_init = cl_key_init,
- .lct_fini = cl_key_fini,
- .lct_exit = cl_key_exit
-};
-
-static struct lu_kmem_descr cl_object_caches[] = {
- {
- .ckd_cache = &cl_env_kmem,
- .ckd_name = "cl_env_kmem",
- .ckd_size = sizeof(struct cl_env)
- },
- {
- .ckd_cache = NULL
- }
-};
-
-/**
- * Global initialization of cl-data. Create kmem caches, register
- * lu_context_key's, etc.
- *
- * \see cl_global_fini()
- */
-int cl_global_init(void)
-{
- int result;
-
- result = cl_env_store_init();
- if (result)
- return result;
-
- result = lu_kmem_init(cl_object_caches);
- if (result)
- goto out_store;
-
- LU_CONTEXT_KEY_INIT(&cl_key);
- result = lu_context_key_register(&cl_key);
- if (result)
- goto out_kmem;
-
- result = cl_lock_init();
- if (result)
- goto out_context;
-
- result = cl_page_init();
- if (result)
- goto out_lock;
-
- return 0;
-out_lock:
- cl_lock_fini();
-out_context:
- lu_context_key_degister(&cl_key);
-out_kmem:
- lu_kmem_fini(cl_object_caches);
-out_store:
- cl_env_store_fini();
- return result;
-}
-
-/**
- * Finalization of global cl-data. Dual to cl_global_init().
- */
-void cl_global_fini(void)
-{
- cl_lock_fini();
- cl_page_fini();
- lu_context_key_degister(&cl_key);
- lu_kmem_fini(cl_object_caches);
- cl_env_store_fini();
-}
diff --git a/drivers/staging/lustre/lustre/obdclass/cl_page.c b/drivers/staging/lustre/lustre/obdclass/cl_page.c
deleted file mode 100644
index e27062d38a45..000000000000
--- a/drivers/staging/lustre/lustre/obdclass/cl_page.c
+++ /dev/null
@@ -1,1534 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * Client Lustre Page.
- *
- * Author: Nikita Danilov <nikita.danilov@sun.com>
- */
-
-#define DEBUG_SUBSYSTEM S_CLASS
-
-#include "../../include/linux/libcfs/libcfs.h"
-#include "../include/obd_class.h"
-#include "../include/obd_support.h"
-#include <linux/list.h>
-
-#include "../include/cl_object.h"
-#include "cl_internal.h"
-
-static void cl_page_delete0(const struct lu_env *env, struct cl_page *pg,
- int radix);
-
-# define PASSERT(env, page, expr) \
- do { \
- if (unlikely(!(expr))) { \
- CL_PAGE_DEBUG(D_ERROR, (env), (page), #expr "\n"); \
- LASSERT(0); \
- } \
- } while (0)
-
-# define PINVRNT(env, page, exp) \
- ((void)sizeof(env), (void)sizeof(page), (void)sizeof !!(exp))
-
-/**
- * Internal version of cl_page_top, it should be called if the page is
- * known to be not freed, says with page referenced, or radix tree lock held,
- * or page owned.
- */
-static struct cl_page *cl_page_top_trusted(struct cl_page *page)
-{
- while (page->cp_parent != NULL)
- page = page->cp_parent;
- return page;
-}
-
-/**
- * Internal version of cl_page_get().
- *
- * This function can be used to obtain initial reference to previously
- * unreferenced cached object. It can be called only if concurrent page
- * reclamation is somehow prevented, e.g., by locking page radix-tree
- * (cl_object_header::hdr->coh_page_guard), or by keeping a lock on a VM page,
- * associated with \a page.
- *
- * Use with care! Not exported.
- */
-static void cl_page_get_trust(struct cl_page *page)
-{
- LASSERT(atomic_read(&page->cp_ref) > 0);
- atomic_inc(&page->cp_ref);
-}
-
-/**
- * Returns a slice within a page, corresponding to the given layer in the
- * device stack.
- *
- * \see cl_lock_at()
- */
-static const struct cl_page_slice *
-cl_page_at_trusted(const struct cl_page *page,
- const struct lu_device_type *dtype)
-{
- const struct cl_page_slice *slice;
-
- page = cl_page_top_trusted((struct cl_page *)page);
- do {
- list_for_each_entry(slice, &page->cp_layers, cpl_linkage) {
- if (slice->cpl_obj->co_lu.lo_dev->ld_type == dtype)
- return slice;
- }
- page = page->cp_child;
- } while (page != NULL);
- return NULL;
-}
-
-/**
- * Returns a page with given index in the given object, or NULL if no page is
- * found. Acquires a reference on \a page.
- *
- * Locking: called under cl_object_header::coh_page_guard spin-lock.
- */
-struct cl_page *cl_page_lookup(struct cl_object_header *hdr, pgoff_t index)
-{
- struct cl_page *page;
-
- assert_spin_locked(&hdr->coh_page_guard);
-
- page = radix_tree_lookup(&hdr->coh_tree, index);
- if (page != NULL)
- cl_page_get_trust(page);
- return page;
-}
-EXPORT_SYMBOL(cl_page_lookup);
-
-/**
- * Returns a list of pages by a given [start, end] of \a obj.
- *
- * \param resched If not NULL, then we give up before hogging CPU for too
- * long and set *resched = 1, in that case caller should implement a retry
- * logic.
- *
- * Gang tree lookup (radix_tree_gang_lookup()) optimization is absolutely
- * crucial in the face of [offset, EOF] locks.
- *
- * Return at least one page in @queue unless there is no covered page.
- */
-int cl_page_gang_lookup(const struct lu_env *env, struct cl_object *obj,
- struct cl_io *io, pgoff_t start, pgoff_t end,
- cl_page_gang_cb_t cb, void *cbdata)
-{
- struct cl_object_header *hdr;
- struct cl_page *page;
- struct cl_page **pvec;
- const struct cl_page_slice *slice;
- const struct lu_device_type *dtype;
- pgoff_t idx;
- unsigned int nr;
- unsigned int i;
- unsigned int j;
- int res = CLP_GANG_OKAY;
- int tree_lock = 1;
-
- idx = start;
- hdr = cl_object_header(obj);
- pvec = cl_env_info(env)->clt_pvec;
- dtype = cl_object_top(obj)->co_lu.lo_dev->ld_type;
- spin_lock(&hdr->coh_page_guard);
- while ((nr = radix_tree_gang_lookup(&hdr->coh_tree, (void **)pvec,
- idx, CLT_PVEC_SIZE)) > 0) {
- int end_of_region = 0;
-
- idx = pvec[nr - 1]->cp_index + 1;
- for (i = 0, j = 0; i < nr; ++i) {
- page = pvec[i];
- pvec[i] = NULL;
-
- LASSERT(page->cp_type == CPT_CACHEABLE);
- if (page->cp_index > end) {
- end_of_region = 1;
- break;
- }
- if (page->cp_state == CPS_FREEING)
- continue;
-
- slice = cl_page_at_trusted(page, dtype);
- /*
- * Pages for lsm-less file has no underneath sub-page
- * for osc, in case of ...
- */
- PASSERT(env, page, slice != NULL);
-
- page = slice->cpl_page;
- /*
- * Can safely call cl_page_get_trust() under
- * radix-tree spin-lock.
- *
- * XXX not true, because @page is from object another
- * than @hdr and protected by different tree lock.
- */
- cl_page_get_trust(page);
- lu_ref_add_atomic(&page->cp_reference,
- "gang_lookup", current);
- pvec[j++] = page;
- }
-
- /*
- * Here a delicate locking dance is performed. Current thread
- * holds a reference to a page, but has to own it before it
- * can be placed into queue. Owning implies waiting, so
- * radix-tree lock is to be released. After a wait one has to
- * check that pages weren't truncated (cl_page_own() returns
- * error in the latter case).
- */
- spin_unlock(&hdr->coh_page_guard);
- tree_lock = 0;
-
- for (i = 0; i < j; ++i) {
- page = pvec[i];
- if (res == CLP_GANG_OKAY)
- res = (*cb)(env, io, page, cbdata);
- lu_ref_del(&page->cp_reference,
- "gang_lookup", current);
- cl_page_put(env, page);
- }
- if (nr < CLT_PVEC_SIZE || end_of_region)
- break;
-
- if (res == CLP_GANG_OKAY && need_resched())
- res = CLP_GANG_RESCHED;
- if (res != CLP_GANG_OKAY)
- break;
-
- spin_lock(&hdr->coh_page_guard);
- tree_lock = 1;
- }
- if (tree_lock)
- spin_unlock(&hdr->coh_page_guard);
- return res;
-}
-EXPORT_SYMBOL(cl_page_gang_lookup);
-
-static void cl_page_free(const struct lu_env *env, struct cl_page *page)
-{
- struct cl_object *obj = page->cp_obj;
-
- PASSERT(env, page, list_empty(&page->cp_batch));
- PASSERT(env, page, page->cp_owner == NULL);
- PASSERT(env, page, page->cp_req == NULL);
- PASSERT(env, page, page->cp_parent == NULL);
- PASSERT(env, page, page->cp_state == CPS_FREEING);
-
- might_sleep();
- while (!list_empty(&page->cp_layers)) {
- struct cl_page_slice *slice;
-
- slice = list_entry(page->cp_layers.next,
- struct cl_page_slice, cpl_linkage);
- list_del_init(page->cp_layers.next);
- slice->cpl_ops->cpo_fini(env, slice);
- }
- lu_object_ref_del_at(&obj->co_lu, &page->cp_obj_ref, "cl_page", page);
- cl_object_put(env, obj);
- lu_ref_fini(&page->cp_reference);
- kfree(page);
-}
-
-/**
- * Helper function updating page state. This is the only place in the code
- * where cl_page::cp_state field is mutated.
- */
-static inline void cl_page_state_set_trust(struct cl_page *page,
- enum cl_page_state state)
-{
- /* bypass const. */
- *(enum cl_page_state *)&page->cp_state = state;
-}
-
-static struct cl_page *cl_page_alloc(const struct lu_env *env,
- struct cl_object *o, pgoff_t ind, struct page *vmpage,
- enum cl_page_type type)
-{
- struct cl_page *page;
- struct lu_object_header *head;
-
- page = kzalloc(cl_object_header(o)->coh_page_bufsize, GFP_NOFS);
- if (page != NULL) {
- int result = 0;
-
- atomic_set(&page->cp_ref, 1);
- if (type == CPT_CACHEABLE) /* for radix tree */
- atomic_inc(&page->cp_ref);
- page->cp_obj = o;
- cl_object_get(o);
- lu_object_ref_add_at(&o->co_lu, &page->cp_obj_ref, "cl_page",
- page);
- page->cp_index = ind;
- cl_page_state_set_trust(page, CPS_CACHED);
- page->cp_type = type;
- INIT_LIST_HEAD(&page->cp_layers);
- INIT_LIST_HEAD(&page->cp_batch);
- INIT_LIST_HEAD(&page->cp_flight);
- mutex_init(&page->cp_mutex);
- lu_ref_init(&page->cp_reference);
- head = o->co_lu.lo_header;
- list_for_each_entry(o, &head->loh_layers,
- co_lu.lo_linkage) {
- if (o->co_ops->coo_page_init != NULL) {
- result = o->co_ops->coo_page_init(env, o,
- page, vmpage);
- if (result != 0) {
- cl_page_delete0(env, page, 0);
- cl_page_free(env, page);
- page = ERR_PTR(result);
- break;
- }
- }
- }
- } else {
- page = ERR_PTR(-ENOMEM);
- }
- return page;
-}
-
-/**
- * Returns a cl_page with index \a idx at the object \a o, and associated with
- * the VM page \a vmpage.
- *
- * This is the main entry point into the cl_page caching interface. First, a
- * cache (implemented as a per-object radix tree) is consulted. If page is
- * found there, it is returned immediately. Otherwise new page is allocated
- * and returned. In any case, additional reference to page is acquired.
- *
- * \see cl_object_find(), cl_lock_find()
- */
-static struct cl_page *cl_page_find0(const struct lu_env *env,
- struct cl_object *o,
- pgoff_t idx, struct page *vmpage,
- enum cl_page_type type,
- struct cl_page *parent)
-{
- struct cl_page *page = NULL;
- struct cl_page *ghost = NULL;
- struct cl_object_header *hdr;
- int err;
-
- LASSERT(type == CPT_CACHEABLE || type == CPT_TRANSIENT);
- might_sleep();
-
- hdr = cl_object_header(o);
-
- CDEBUG(D_PAGE, "%lu@"DFID" %p %lx %d\n",
- idx, PFID(&hdr->coh_lu.loh_fid), vmpage, vmpage->private, type);
- /* fast path. */
- if (type == CPT_CACHEABLE) {
- /*
- * vmpage lock is used to protect the child/parent
- * relationship
- */
- KLASSERT(PageLocked(vmpage));
- /*
- * cl_vmpage_page() can be called here without any locks as
- *
- * - "vmpage" is locked (which prevents ->private from
- * concurrent updates), and
- *
- * - "o" cannot be destroyed while current thread holds a
- * reference on it.
- */
- page = cl_vmpage_page(vmpage, o);
- PINVRNT(env, page,
- ergo(page != NULL,
- cl_page_vmpage(env, page) == vmpage &&
- (void *)radix_tree_lookup(&hdr->coh_tree,
- idx) == page));
- }
-
- if (page != NULL)
- return page;
-
- /* allocate and initialize cl_page */
- page = cl_page_alloc(env, o, idx, vmpage, type);
- if (IS_ERR(page))
- return page;
-
- if (type == CPT_TRANSIENT) {
- if (parent) {
- LASSERT(page->cp_parent == NULL);
- page->cp_parent = parent;
- parent->cp_child = page;
- }
- return page;
- }
-
- /*
- * XXX optimization: use radix_tree_preload() here, and change tree
- * gfp mask to GFP_KERNEL in cl_object_header_init().
- */
- spin_lock(&hdr->coh_page_guard);
- err = radix_tree_insert(&hdr->coh_tree, idx, page);
- if (err != 0) {
- ghost = page;
- /*
- * Noted by Jay: a lock on \a vmpage protects cl_page_find()
- * from this race, but
- *
- * 0. it's better to have cl_page interface "locally
- * consistent" so that its correctness can be reasoned
- * about without appealing to the (obscure world of) VM
- * locking.
- *
- * 1. handling this race allows ->coh_tree to remain
- * consistent even when VM locking is somehow busted,
- * which is very useful during diagnosing and debugging.
- */
- page = ERR_PTR(err);
- CL_PAGE_DEBUG(D_ERROR, env, ghost,
- "fail to insert into radix tree: %d\n", err);
- } else {
- if (parent) {
- LASSERT(page->cp_parent == NULL);
- page->cp_parent = parent;
- parent->cp_child = page;
- }
- hdr->coh_pages++;
- }
- spin_unlock(&hdr->coh_page_guard);
-
- if (unlikely(ghost != NULL)) {
- cl_page_delete0(env, ghost, 0);
- cl_page_free(env, ghost);
- }
- return page;
-}
-
-struct cl_page *cl_page_find(const struct lu_env *env, struct cl_object *o,
- pgoff_t idx, struct page *vmpage,
- enum cl_page_type type)
-{
- return cl_page_find0(env, o, idx, vmpage, type, NULL);
-}
-EXPORT_SYMBOL(cl_page_find);
-
-
-struct cl_page *cl_page_find_sub(const struct lu_env *env, struct cl_object *o,
- pgoff_t idx, struct page *vmpage,
- struct cl_page *parent)
-{
- return cl_page_find0(env, o, idx, vmpage, parent->cp_type, parent);
-}
-EXPORT_SYMBOL(cl_page_find_sub);
-
-static inline int cl_page_invariant(const struct cl_page *pg)
-{
- struct cl_object_header *header;
- struct cl_page *parent;
- struct cl_page *child;
- struct cl_io *owner;
-
- /*
- * Page invariant is protected by a VM lock.
- */
- LINVRNT(cl_page_is_vmlocked(NULL, pg));
-
- header = cl_object_header(pg->cp_obj);
- parent = pg->cp_parent;
- child = pg->cp_child;
- owner = pg->cp_owner;
-
- return cl_page_in_use(pg) &&
- ergo(parent != NULL, parent->cp_child == pg) &&
- ergo(child != NULL, child->cp_parent == pg) &&
- ergo(child != NULL, pg->cp_obj != child->cp_obj) &&
- ergo(parent != NULL, pg->cp_obj != parent->cp_obj) &&
- ergo(owner != NULL && parent != NULL,
- parent->cp_owner == pg->cp_owner->ci_parent) &&
- ergo(owner != NULL && child != NULL,
- child->cp_owner->ci_parent == owner) &&
- /*
- * Either page is early in initialization (has neither child
- * nor parent yet), or it is in the object radix tree.
- */
- ergo(pg->cp_state < CPS_FREEING && pg->cp_type == CPT_CACHEABLE,
- (void *)radix_tree_lookup(&header->coh_tree,
- pg->cp_index) == pg ||
- (child == NULL && parent == NULL));
-}
-
-static void cl_page_state_set0(const struct lu_env *env,
- struct cl_page *page, enum cl_page_state state)
-{
- enum cl_page_state old;
-
- /*
- * Matrix of allowed state transitions [old][new], for sanity
- * checking.
- */
- static const int allowed_transitions[CPS_NR][CPS_NR] = {
- [CPS_CACHED] = {
- [CPS_CACHED] = 0,
- [CPS_OWNED] = 1, /* io finds existing cached page */
- [CPS_PAGEIN] = 0,
- [CPS_PAGEOUT] = 1, /* write-out from the cache */
- [CPS_FREEING] = 1, /* eviction on the memory pressure */
- },
- [CPS_OWNED] = {
- [CPS_CACHED] = 1, /* release to the cache */
- [CPS_OWNED] = 0,
- [CPS_PAGEIN] = 1, /* start read immediately */
- [CPS_PAGEOUT] = 1, /* start write immediately */
- [CPS_FREEING] = 1, /* lock invalidation or truncate */
- },
- [CPS_PAGEIN] = {
- [CPS_CACHED] = 1, /* io completion */
- [CPS_OWNED] = 0,
- [CPS_PAGEIN] = 0,
- [CPS_PAGEOUT] = 0,
- [CPS_FREEING] = 0,
- },
- [CPS_PAGEOUT] = {
- [CPS_CACHED] = 1, /* io completion */
- [CPS_OWNED] = 0,
- [CPS_PAGEIN] = 0,
- [CPS_PAGEOUT] = 0,
- [CPS_FREEING] = 0,
- },
- [CPS_FREEING] = {
- [CPS_CACHED] = 0,
- [CPS_OWNED] = 0,
- [CPS_PAGEIN] = 0,
- [CPS_PAGEOUT] = 0,
- [CPS_FREEING] = 0,
- }
- };
-
- old = page->cp_state;
- PASSERT(env, page, allowed_transitions[old][state]);
- CL_PAGE_HEADER(D_TRACE, env, page, "%d -> %d\n", old, state);
- for (; page != NULL; page = page->cp_child) {
- PASSERT(env, page, page->cp_state == old);
- PASSERT(env, page,
- equi(state == CPS_OWNED, page->cp_owner != NULL));
-
- cl_page_state_set_trust(page, state);
- }
-}
-
-static void cl_page_state_set(const struct lu_env *env,
- struct cl_page *page, enum cl_page_state state)
-{
- cl_page_state_set0(env, page, state);
-}
-
-/**
- * Acquires an additional reference to a page.
- *
- * This can be called only by caller already possessing a reference to \a
- * page.
- *
- * \see cl_object_get(), cl_lock_get().
- */
-void cl_page_get(struct cl_page *page)
-{
- cl_page_get_trust(page);
-}
-EXPORT_SYMBOL(cl_page_get);
-
-/**
- * Releases a reference to a page.
- *
- * When last reference is released, page is returned to the cache, unless it
- * is in cl_page_state::CPS_FREEING state, in which case it is immediately
- * destroyed.
- *
- * \see cl_object_put(), cl_lock_put().
- */
-void cl_page_put(const struct lu_env *env, struct cl_page *page)
-{
- PASSERT(env, page, atomic_read(&page->cp_ref) > !!page->cp_parent);
-
- CL_PAGE_HEADER(D_TRACE, env, page, "%d\n",
- atomic_read(&page->cp_ref));
-
- if (atomic_dec_and_test(&page->cp_ref)) {
- LASSERT(page->cp_state == CPS_FREEING);
-
- LASSERT(atomic_read(&page->cp_ref) == 0);
- PASSERT(env, page, page->cp_owner == NULL);
- PASSERT(env, page, list_empty(&page->cp_batch));
- /*
- * Page is no longer reachable by other threads. Tear
- * it down.
- */
- cl_page_free(env, page);
- }
-}
-EXPORT_SYMBOL(cl_page_put);
-
-/**
- * Returns a VM page associated with a given cl_page.
- */
-struct page *cl_page_vmpage(const struct lu_env *env, struct cl_page *page)
-{
- const struct cl_page_slice *slice;
-
- /*
- * Find uppermost layer with ->cpo_vmpage() method, and return its
- * result.
- */
- page = cl_page_top(page);
- do {
- list_for_each_entry(slice, &page->cp_layers, cpl_linkage) {
- if (slice->cpl_ops->cpo_vmpage != NULL)
- return slice->cpl_ops->cpo_vmpage(env, slice);
- }
- page = page->cp_child;
- } while (page != NULL);
- LBUG(); /* ->cpo_vmpage() has to be defined somewhere in the stack */
-}
-EXPORT_SYMBOL(cl_page_vmpage);
-
-/**
- * Returns a cl_page associated with a VM page, and given cl_object.
- */
-struct cl_page *cl_vmpage_page(struct page *vmpage, struct cl_object *obj)
-{
- struct cl_page *top;
- struct cl_page *page;
-
- KLASSERT(PageLocked(vmpage));
-
- /*
- * NOTE: absence of races and liveness of data are guaranteed by page
- * lock on a "vmpage". That works because object destruction has
- * bottom-to-top pass.
- */
-
- /*
- * This loop assumes that ->private points to the top-most page. This
- * can be rectified easily.
- */
- top = (struct cl_page *)vmpage->private;
- if (top == NULL)
- return NULL;
-
- for (page = top; page != NULL; page = page->cp_child) {
- if (cl_object_same(page->cp_obj, obj)) {
- cl_page_get_trust(page);
- break;
- }
- }
- LASSERT(ergo(page, page->cp_type == CPT_CACHEABLE));
- return page;
-}
-EXPORT_SYMBOL(cl_vmpage_page);
-
-/**
- * Returns the top-page for a given page.
- *
- * \see cl_object_top(), cl_io_top()
- */
-struct cl_page *cl_page_top(struct cl_page *page)
-{
- return cl_page_top_trusted(page);
-}
-EXPORT_SYMBOL(cl_page_top);
-
-const struct cl_page_slice *cl_page_at(const struct cl_page *page,
- const struct lu_device_type *dtype)
-{
- return cl_page_at_trusted(page, dtype);
-}
-EXPORT_SYMBOL(cl_page_at);
-
-#define CL_PAGE_OP(opname) offsetof(struct cl_page_operations, opname)
-
-#define CL_PAGE_INVOKE(_env, _page, _op, _proto, ...) \
-({ \
- const struct lu_env *__env = (_env); \
- struct cl_page *__page = (_page); \
- const struct cl_page_slice *__scan; \
- int __result; \
- ptrdiff_t __op = (_op); \
- int (*__method)_proto; \
- \
- __result = 0; \
- __page = cl_page_top(__page); \
- do { \
- list_for_each_entry(__scan, &__page->cp_layers, \
- cpl_linkage) { \
- __method = *(void **)((char *)__scan->cpl_ops + \
- __op); \
- if (__method != NULL) { \
- __result = (*__method)(__env, __scan, \
- ## __VA_ARGS__); \
- if (__result != 0) \
- break; \
- } \
- } \
- __page = __page->cp_child; \
- } while (__page != NULL && __result == 0); \
- if (__result > 0) \
- __result = 0; \
- __result; \
-})
-
-#define CL_PAGE_INVOID(_env, _page, _op, _proto, ...) \
-do { \
- const struct lu_env *__env = (_env); \
- struct cl_page *__page = (_page); \
- const struct cl_page_slice *__scan; \
- ptrdiff_t __op = (_op); \
- void (*__method)_proto; \
- \
- __page = cl_page_top(__page); \
- do { \
- list_for_each_entry(__scan, &__page->cp_layers, \
- cpl_linkage) { \
- __method = *(void **)((char *)__scan->cpl_ops + \
- __op); \
- if (__method != NULL) \
- (*__method)(__env, __scan, \
- ## __VA_ARGS__); \
- } \
- __page = __page->cp_child; \
- } while (__page != NULL); \
-} while (0)
-
-#define CL_PAGE_INVOID_REVERSE(_env, _page, _op, _proto, ...) \
-do { \
- const struct lu_env *__env = (_env); \
- struct cl_page *__page = (_page); \
- const struct cl_page_slice *__scan; \
- ptrdiff_t __op = (_op); \
- void (*__method)_proto; \
- \
- /* get to the bottom page. */ \
- while (__page->cp_child != NULL) \
- __page = __page->cp_child; \
- do { \
- list_for_each_entry_reverse(__scan, &__page->cp_layers, \
- cpl_linkage) { \
- __method = *(void **)((char *)__scan->cpl_ops + \
- __op); \
- if (__method != NULL) \
- (*__method)(__env, __scan, \
- ## __VA_ARGS__); \
- } \
- __page = __page->cp_parent; \
- } while (__page != NULL); \
-} while (0)
-
-static int cl_page_invoke(const struct lu_env *env,
- struct cl_io *io, struct cl_page *page, ptrdiff_t op)
-
-{
- PINVRNT(env, page, cl_object_same(page->cp_obj, io->ci_obj));
- return CL_PAGE_INVOKE(env, page, op,
- (const struct lu_env *,
- const struct cl_page_slice *, struct cl_io *),
- io);
-}
-
-static void cl_page_invoid(const struct lu_env *env,
- struct cl_io *io, struct cl_page *page, ptrdiff_t op)
-
-{
- PINVRNT(env, page, cl_object_same(page->cp_obj, io->ci_obj));
- CL_PAGE_INVOID(env, page, op,
- (const struct lu_env *,
- const struct cl_page_slice *, struct cl_io *), io);
-}
-
-static void cl_page_owner_clear(struct cl_page *page)
-{
- for (page = cl_page_top(page); page != NULL; page = page->cp_child) {
- if (page->cp_owner != NULL) {
- LASSERT(page->cp_owner->ci_owned_nr > 0);
- page->cp_owner->ci_owned_nr--;
- page->cp_owner = NULL;
- page->cp_task = NULL;
- }
- }
-}
-
-static void cl_page_owner_set(struct cl_page *page)
-{
- for (page = cl_page_top(page); page != NULL; page = page->cp_child) {
- LASSERT(page->cp_owner != NULL);
- page->cp_owner->ci_owned_nr++;
- }
-}
-
-void cl_page_disown0(const struct lu_env *env,
- struct cl_io *io, struct cl_page *pg)
-{
- enum cl_page_state state;
-
- state = pg->cp_state;
- PINVRNT(env, pg, state == CPS_OWNED || state == CPS_FREEING);
- PINVRNT(env, pg, cl_page_invariant(pg));
- cl_page_owner_clear(pg);
-
- if (state == CPS_OWNED)
- cl_page_state_set(env, pg, CPS_CACHED);
- /*
- * Completion call-backs are executed in the bottom-up order, so that
- * uppermost layer (llite), responsible for VFS/VM interaction runs
- * last and can release locks safely.
- */
- CL_PAGE_INVOID_REVERSE(env, pg, CL_PAGE_OP(cpo_disown),
- (const struct lu_env *,
- const struct cl_page_slice *, struct cl_io *),
- io);
-}
-
-/**
- * returns true, iff page is owned by the given io.
- */
-int cl_page_is_owned(const struct cl_page *pg, const struct cl_io *io)
-{
- LINVRNT(cl_object_same(pg->cp_obj, io->ci_obj));
- return pg->cp_state == CPS_OWNED && pg->cp_owner == io;
-}
-EXPORT_SYMBOL(cl_page_is_owned);
-
-/**
- * Try to own a page by IO.
- *
- * Waits until page is in cl_page_state::CPS_CACHED state, and then switch it
- * into cl_page_state::CPS_OWNED state.
- *
- * \pre !cl_page_is_owned(pg, io)
- * \post result == 0 iff cl_page_is_owned(pg, io)
- *
- * \retval 0 success
- *
- * \retval -ve failure, e.g., page was destroyed (and landed in
- * cl_page_state::CPS_FREEING instead of cl_page_state::CPS_CACHED).
- * or, page was owned by another thread, or in IO.
- *
- * \see cl_page_disown()
- * \see cl_page_operations::cpo_own()
- * \see cl_page_own_try()
- * \see cl_page_own
- */
-static int cl_page_own0(const struct lu_env *env, struct cl_io *io,
- struct cl_page *pg, int nonblock)
-{
- int result;
-
- PINVRNT(env, pg, !cl_page_is_owned(pg, io));
-
- pg = cl_page_top(pg);
- io = cl_io_top(io);
-
- if (pg->cp_state == CPS_FREEING) {
- result = -ENOENT;
- } else {
- result = CL_PAGE_INVOKE(env, pg, CL_PAGE_OP(cpo_own),
- (const struct lu_env *,
- const struct cl_page_slice *,
- struct cl_io *, int),
- io, nonblock);
- if (result == 0) {
- PASSERT(env, pg, pg->cp_owner == NULL);
- PASSERT(env, pg, pg->cp_req == NULL);
- pg->cp_owner = io;
- pg->cp_task = current;
- cl_page_owner_set(pg);
- if (pg->cp_state != CPS_FREEING) {
- cl_page_state_set(env, pg, CPS_OWNED);
- } else {
- cl_page_disown0(env, io, pg);
- result = -ENOENT;
- }
- }
- }
- PINVRNT(env, pg, ergo(result == 0, cl_page_invariant(pg)));
- return result;
-}
-
-/**
- * Own a page, might be blocked.
- *
- * \see cl_page_own0()
- */
-int cl_page_own(const struct lu_env *env, struct cl_io *io, struct cl_page *pg)
-{
- return cl_page_own0(env, io, pg, 0);
-}
-EXPORT_SYMBOL(cl_page_own);
-
-/**
- * Nonblock version of cl_page_own().
- *
- * \see cl_page_own0()
- */
-int cl_page_own_try(const struct lu_env *env, struct cl_io *io,
- struct cl_page *pg)
-{
- return cl_page_own0(env, io, pg, 1);
-}
-EXPORT_SYMBOL(cl_page_own_try);
-
-
-/**
- * Assume page ownership.
- *
- * Called when page is already locked by the hosting VM.
- *
- * \pre !cl_page_is_owned(pg, io)
- * \post cl_page_is_owned(pg, io)
- *
- * \see cl_page_operations::cpo_assume()
- */
-void cl_page_assume(const struct lu_env *env,
- struct cl_io *io, struct cl_page *pg)
-{
- PINVRNT(env, pg, cl_object_same(pg->cp_obj, io->ci_obj));
-
- pg = cl_page_top(pg);
- io = cl_io_top(io);
-
- cl_page_invoid(env, io, pg, CL_PAGE_OP(cpo_assume));
- PASSERT(env, pg, pg->cp_owner == NULL);
- pg->cp_owner = io;
- pg->cp_task = current;
- cl_page_owner_set(pg);
- cl_page_state_set(env, pg, CPS_OWNED);
-}
-EXPORT_SYMBOL(cl_page_assume);
-
-/**
- * Releases page ownership without unlocking the page.
- *
- * Moves page into cl_page_state::CPS_CACHED without releasing a lock on the
- * underlying VM page (as VM is supposed to do this itself).
- *
- * \pre cl_page_is_owned(pg, io)
- * \post !cl_page_is_owned(pg, io)
- *
- * \see cl_page_assume()
- */
-void cl_page_unassume(const struct lu_env *env,
- struct cl_io *io, struct cl_page *pg)
-{
- PINVRNT(env, pg, cl_page_is_owned(pg, io));
- PINVRNT(env, pg, cl_page_invariant(pg));
-
- pg = cl_page_top(pg);
- io = cl_io_top(io);
- cl_page_owner_clear(pg);
- cl_page_state_set(env, pg, CPS_CACHED);
- CL_PAGE_INVOID_REVERSE(env, pg, CL_PAGE_OP(cpo_unassume),
- (const struct lu_env *,
- const struct cl_page_slice *, struct cl_io *),
- io);
-}
-EXPORT_SYMBOL(cl_page_unassume);
-
-/**
- * Releases page ownership.
- *
- * Moves page into cl_page_state::CPS_CACHED.
- *
- * \pre cl_page_is_owned(pg, io)
- * \post !cl_page_is_owned(pg, io)
- *
- * \see cl_page_own()
- * \see cl_page_operations::cpo_disown()
- */
-void cl_page_disown(const struct lu_env *env,
- struct cl_io *io, struct cl_page *pg)
-{
- PINVRNT(env, pg, cl_page_is_owned(pg, io));
-
- pg = cl_page_top(pg);
- io = cl_io_top(io);
- cl_page_disown0(env, io, pg);
-}
-EXPORT_SYMBOL(cl_page_disown);
-
-/**
- * Called when page is to be removed from the object, e.g., as a result of
- * truncate.
- *
- * Calls cl_page_operations::cpo_discard() top-to-bottom.
- *
- * \pre cl_page_is_owned(pg, io)
- *
- * \see cl_page_operations::cpo_discard()
- */
-void cl_page_discard(const struct lu_env *env,
- struct cl_io *io, struct cl_page *pg)
-{
- PINVRNT(env, pg, cl_page_is_owned(pg, io));
- PINVRNT(env, pg, cl_page_invariant(pg));
-
- cl_page_invoid(env, io, pg, CL_PAGE_OP(cpo_discard));
-}
-EXPORT_SYMBOL(cl_page_discard);
-
-/**
- * Version of cl_page_delete() that can be called for not fully constructed
- * pages, e.g,. in a error handling cl_page_find()->cl_page_delete0()
- * path. Doesn't check page invariant.
- */
-static void cl_page_delete0(const struct lu_env *env, struct cl_page *pg,
- int radix)
-{
- struct cl_page *tmp = pg;
-
- PASSERT(env, pg, pg == cl_page_top(pg));
- PASSERT(env, pg, pg->cp_state != CPS_FREEING);
-
- /*
- * Severe all ways to obtain new pointers to @pg.
- */
- cl_page_owner_clear(pg);
-
- /*
- * unexport the page firstly before freeing it so that
- * the page content is considered to be invalid.
- * We have to do this because a CPS_FREEING cl_page may
- * be NOT under the protection of a cl_lock.
- * Afterwards, if this page is found by other threads, then this
- * page will be forced to reread.
- */
- cl_page_export(env, pg, 0);
- cl_page_state_set0(env, pg, CPS_FREEING);
-
- CL_PAGE_INVOID(env, pg, CL_PAGE_OP(cpo_delete),
- (const struct lu_env *, const struct cl_page_slice *));
-
- if (tmp->cp_type == CPT_CACHEABLE) {
- if (!radix)
- /* !radix means that @pg is not yet in the radix tree,
- * skip removing it.
- */
- tmp = pg->cp_child;
- for (; tmp != NULL; tmp = tmp->cp_child) {
- void *value;
- struct cl_object_header *hdr;
-
- hdr = cl_object_header(tmp->cp_obj);
- spin_lock(&hdr->coh_page_guard);
- value = radix_tree_delete(&hdr->coh_tree,
- tmp->cp_index);
- PASSERT(env, tmp, value == tmp);
- PASSERT(env, tmp, hdr->coh_pages > 0);
- hdr->coh_pages--;
- spin_unlock(&hdr->coh_page_guard);
- cl_page_put(env, tmp);
- }
- }
-}
-
-/**
- * Called when a decision is made to throw page out of memory.
- *
- * Notifies all layers about page destruction by calling
- * cl_page_operations::cpo_delete() method top-to-bottom.
- *
- * Moves page into cl_page_state::CPS_FREEING state (this is the only place
- * where transition to this state happens).
- *
- * Eliminates all venues through which new references to the page can be
- * obtained:
- *
- * - removes page from the radix trees,
- *
- * - breaks linkage from VM page to cl_page.
- *
- * Once page reaches cl_page_state::CPS_FREEING, all remaining references will
- * drain after some time, at which point page will be recycled.
- *
- * \pre pg == cl_page_top(pg)
- * \pre VM page is locked
- * \post pg->cp_state == CPS_FREEING
- *
- * \see cl_page_operations::cpo_delete()
- */
-void cl_page_delete(const struct lu_env *env, struct cl_page *pg)
-{
- PINVRNT(env, pg, cl_page_invariant(pg));
- cl_page_delete0(env, pg, 1);
-}
-EXPORT_SYMBOL(cl_page_delete);
-
-/**
- * Unmaps page from user virtual memory.
- *
- * Calls cl_page_operations::cpo_unmap() through all layers top-to-bottom. The
- * layer responsible for VM interaction has to unmap page from user space
- * virtual memory.
- *
- * \see cl_page_operations::cpo_unmap()
- */
-int cl_page_unmap(const struct lu_env *env,
- struct cl_io *io, struct cl_page *pg)
-{
- PINVRNT(env, pg, cl_page_is_owned(pg, io));
- PINVRNT(env, pg, cl_page_invariant(pg));
-
- return cl_page_invoke(env, io, pg, CL_PAGE_OP(cpo_unmap));
-}
-EXPORT_SYMBOL(cl_page_unmap);
-
-/**
- * Marks page up-to-date.
- *
- * Call cl_page_operations::cpo_export() through all layers top-to-bottom. The
- * layer responsible for VM interaction has to mark/clear page as up-to-date
- * by the \a uptodate argument.
- *
- * \see cl_page_operations::cpo_export()
- */
-void cl_page_export(const struct lu_env *env, struct cl_page *pg, int uptodate)
-{
- PINVRNT(env, pg, cl_page_invariant(pg));
- CL_PAGE_INVOID(env, pg, CL_PAGE_OP(cpo_export),
- (const struct lu_env *,
- const struct cl_page_slice *, int), uptodate);
-}
-EXPORT_SYMBOL(cl_page_export);
-
-/**
- * Returns true, iff \a pg is VM locked in a suitable sense by the calling
- * thread.
- */
-int cl_page_is_vmlocked(const struct lu_env *env, const struct cl_page *pg)
-{
- int result;
- const struct cl_page_slice *slice;
-
- pg = cl_page_top_trusted((struct cl_page *)pg);
- slice = container_of(pg->cp_layers.next,
- const struct cl_page_slice, cpl_linkage);
- PASSERT(env, pg, slice->cpl_ops->cpo_is_vmlocked != NULL);
- /*
- * Call ->cpo_is_vmlocked() directly instead of going through
- * CL_PAGE_INVOKE(), because cl_page_is_vmlocked() is used by
- * cl_page_invariant().
- */
- result = slice->cpl_ops->cpo_is_vmlocked(env, slice);
- PASSERT(env, pg, result == -EBUSY || result == -ENODATA);
- return result == -EBUSY;
-}
-EXPORT_SYMBOL(cl_page_is_vmlocked);
-
-static enum cl_page_state cl_req_type_state(enum cl_req_type crt)
-{
- return crt == CRT_WRITE ? CPS_PAGEOUT : CPS_PAGEIN;
-}
-
-static void cl_page_io_start(const struct lu_env *env,
- struct cl_page *pg, enum cl_req_type crt)
-{
- /*
- * Page is queued for IO, change its state.
- */
- cl_page_owner_clear(pg);
- cl_page_state_set(env, pg, cl_req_type_state(crt));
-}
-
-/**
- * Prepares page for immediate transfer. cl_page_operations::cpo_prep() is
- * called top-to-bottom. Every layer either agrees to submit this page (by
- * returning 0), or requests to omit this page (by returning -EALREADY). Layer
- * handling interactions with the VM also has to inform VM that page is under
- * transfer now.
- */
-int cl_page_prep(const struct lu_env *env, struct cl_io *io,
- struct cl_page *pg, enum cl_req_type crt)
-{
- int result;
-
- PINVRNT(env, pg, cl_page_is_owned(pg, io));
- PINVRNT(env, pg, cl_page_invariant(pg));
- PINVRNT(env, pg, crt < CRT_NR);
-
- /*
- * XXX this has to be called bottom-to-top, so that llite can set up
- * PG_writeback without risking other layers deciding to skip this
- * page.
- */
- if (crt >= CRT_NR)
- return -EINVAL;
- result = cl_page_invoke(env, io, pg, CL_PAGE_OP(io[crt].cpo_prep));
- if (result == 0)
- cl_page_io_start(env, pg, crt);
-
- CL_PAGE_HEADER(D_TRACE, env, pg, "%d %d\n", crt, result);
- return result;
-}
-EXPORT_SYMBOL(cl_page_prep);
-
-/**
- * Notify layers about transfer completion.
- *
- * Invoked by transfer sub-system (which is a part of osc) to notify layers
- * that a transfer, of which this page is a part of has completed.
- *
- * Completion call-backs are executed in the bottom-up order, so that
- * uppermost layer (llite), responsible for the VFS/VM interaction runs last
- * and can release locks safely.
- *
- * \pre pg->cp_state == CPS_PAGEIN || pg->cp_state == CPS_PAGEOUT
- * \post pg->cp_state == CPS_CACHED
- *
- * \see cl_page_operations::cpo_completion()
- */
-void cl_page_completion(const struct lu_env *env,
- struct cl_page *pg, enum cl_req_type crt, int ioret)
-{
- struct cl_sync_io *anchor = pg->cp_sync_io;
-
- PASSERT(env, pg, crt < CRT_NR);
- /* cl_page::cp_req already cleared by the caller (osc_completion()) */
- PASSERT(env, pg, pg->cp_req == NULL);
- PASSERT(env, pg, pg->cp_state == cl_req_type_state(crt));
-
- CL_PAGE_HEADER(D_TRACE, env, pg, "%d %d\n", crt, ioret);
- if (crt == CRT_READ && ioret == 0) {
- PASSERT(env, pg, !(pg->cp_flags & CPF_READ_COMPLETED));
- pg->cp_flags |= CPF_READ_COMPLETED;
- }
-
- cl_page_state_set(env, pg, CPS_CACHED);
- if (crt >= CRT_NR)
- return;
- CL_PAGE_INVOID_REVERSE(env, pg, CL_PAGE_OP(io[crt].cpo_completion),
- (const struct lu_env *,
- const struct cl_page_slice *, int), ioret);
- if (anchor) {
- LASSERT(cl_page_is_vmlocked(env, pg));
- LASSERT(pg->cp_sync_io == anchor);
- pg->cp_sync_io = NULL;
- }
- /*
- * As page->cp_obj is pinned by a reference from page->cp_req, it is
- * safe to call cl_page_put() without risking object destruction in a
- * non-blocking context.
- */
- cl_page_put(env, pg);
-
- if (anchor)
- cl_sync_io_note(anchor, ioret);
-}
-EXPORT_SYMBOL(cl_page_completion);
-
-/**
- * Notify layers that transfer formation engine decided to yank this page from
- * the cache and to make it a part of a transfer.
- *
- * \pre pg->cp_state == CPS_CACHED
- * \post pg->cp_state == CPS_PAGEIN || pg->cp_state == CPS_PAGEOUT
- *
- * \see cl_page_operations::cpo_make_ready()
- */
-int cl_page_make_ready(const struct lu_env *env, struct cl_page *pg,
- enum cl_req_type crt)
-{
- int result;
-
- PINVRNT(env, pg, crt < CRT_NR);
-
- if (crt >= CRT_NR)
- return -EINVAL;
- result = CL_PAGE_INVOKE(env, pg, CL_PAGE_OP(io[crt].cpo_make_ready),
- (const struct lu_env *,
- const struct cl_page_slice *));
- if (result == 0) {
- PASSERT(env, pg, pg->cp_state == CPS_CACHED);
- cl_page_io_start(env, pg, crt);
- }
- CL_PAGE_HEADER(D_TRACE, env, pg, "%d %d\n", crt, result);
- return result;
-}
-EXPORT_SYMBOL(cl_page_make_ready);
-
-/**
- * Notify layers that high level io decided to place this page into a cache
- * for future transfer.
- *
- * The layer implementing transfer engine (osc) has to register this page in
- * its queues.
- *
- * \pre cl_page_is_owned(pg, io)
- * \post cl_page_is_owned(pg, io)
- *
- * \see cl_page_operations::cpo_cache_add()
- */
-int cl_page_cache_add(const struct lu_env *env, struct cl_io *io,
- struct cl_page *pg, enum cl_req_type crt)
-{
- const struct cl_page_slice *scan;
- int result = 0;
-
- PINVRNT(env, pg, crt < CRT_NR);
- PINVRNT(env, pg, cl_page_is_owned(pg, io));
- PINVRNT(env, pg, cl_page_invariant(pg));
-
- if (crt >= CRT_NR)
- return -EINVAL;
-
- list_for_each_entry(scan, &pg->cp_layers, cpl_linkage) {
- if (scan->cpl_ops->io[crt].cpo_cache_add == NULL)
- continue;
-
- result = scan->cpl_ops->io[crt].cpo_cache_add(env, scan, io);
- if (result != 0)
- break;
- }
- CL_PAGE_HEADER(D_TRACE, env, pg, "%d %d\n", crt, result);
- return result;
-}
-EXPORT_SYMBOL(cl_page_cache_add);
-
-/**
- * Called if a pge is being written back by kernel's intention.
- *
- * \pre cl_page_is_owned(pg, io)
- * \post ergo(result == 0, pg->cp_state == CPS_PAGEOUT)
- *
- * \see cl_page_operations::cpo_flush()
- */
-int cl_page_flush(const struct lu_env *env, struct cl_io *io,
- struct cl_page *pg)
-{
- int result;
-
- PINVRNT(env, pg, cl_page_is_owned(pg, io));
- PINVRNT(env, pg, cl_page_invariant(pg));
-
- result = cl_page_invoke(env, io, pg, CL_PAGE_OP(cpo_flush));
-
- CL_PAGE_HEADER(D_TRACE, env, pg, "%d\n", result);
- return result;
-}
-EXPORT_SYMBOL(cl_page_flush);
-
-/**
- * Checks whether page is protected by any extent lock is at least required
- * mode.
- *
- * \return the same as in cl_page_operations::cpo_is_under_lock() method.
- * \see cl_page_operations::cpo_is_under_lock()
- */
-int cl_page_is_under_lock(const struct lu_env *env, struct cl_io *io,
- struct cl_page *page)
-{
- int rc;
-
- PINVRNT(env, page, cl_page_invariant(page));
-
- rc = CL_PAGE_INVOKE(env, page, CL_PAGE_OP(cpo_is_under_lock),
- (const struct lu_env *,
- const struct cl_page_slice *, struct cl_io *),
- io);
- PASSERT(env, page, rc != 0);
- return rc;
-}
-EXPORT_SYMBOL(cl_page_is_under_lock);
-
-static int page_prune_cb(const struct lu_env *env, struct cl_io *io,
- struct cl_page *page, void *cbdata)
-{
- cl_page_own(env, io, page);
- cl_page_unmap(env, io, page);
- cl_page_discard(env, io, page);
- cl_page_disown(env, io, page);
- return CLP_GANG_OKAY;
-}
-
-/**
- * Purges all cached pages belonging to the object \a obj.
- */
-int cl_pages_prune(const struct lu_env *env, struct cl_object *clobj)
-{
- struct cl_thread_info *info;
- struct cl_object *obj = cl_object_top(clobj);
- struct cl_io *io;
- int result;
-
- info = cl_env_info(env);
- io = &info->clt_io;
-
- /*
- * initialize the io. This is ugly since we never do IO in this
- * function, we just make cl_page_list functions happy. -jay
- */
- io->ci_obj = obj;
- io->ci_ignore_layout = 1;
- result = cl_io_init(env, io, CIT_MISC, obj);
- if (result != 0) {
- cl_io_fini(env, io);
- return io->ci_result;
- }
-
- do {
- result = cl_page_gang_lookup(env, obj, io, 0, CL_PAGE_EOF,
- page_prune_cb, NULL);
- if (result == CLP_GANG_RESCHED)
- cond_resched();
- } while (result != CLP_GANG_OKAY);
-
- cl_io_fini(env, io);
- return result;
-}
-EXPORT_SYMBOL(cl_pages_prune);
-
-/**
- * Tells transfer engine that only part of a page is to be transmitted.
- *
- * \see cl_page_operations::cpo_clip()
- */
-void cl_page_clip(const struct lu_env *env, struct cl_page *pg,
- int from, int to)
-{
- PINVRNT(env, pg, cl_page_invariant(pg));
-
- CL_PAGE_HEADER(D_TRACE, env, pg, "%d %d\n", from, to);
- CL_PAGE_INVOID(env, pg, CL_PAGE_OP(cpo_clip),
- (const struct lu_env *,
- const struct cl_page_slice *, int, int),
- from, to);
-}
-EXPORT_SYMBOL(cl_page_clip);
-
-/**
- * Prints human readable representation of \a pg to the \a f.
- */
-void cl_page_header_print(const struct lu_env *env, void *cookie,
- lu_printer_t printer, const struct cl_page *pg)
-{
- (*printer)(env, cookie,
- "page@%p[%d %p:%lu ^%p_%p %d %d %d %p %p %#x]\n",
- pg, atomic_read(&pg->cp_ref), pg->cp_obj,
- pg->cp_index, pg->cp_parent, pg->cp_child,
- pg->cp_state, pg->cp_error, pg->cp_type,
- pg->cp_owner, pg->cp_req, pg->cp_flags);
-}
-EXPORT_SYMBOL(cl_page_header_print);
-
-/**
- * Prints human readable representation of \a pg to the \a f.
- */
-void cl_page_print(const struct lu_env *env, void *cookie,
- lu_printer_t printer, const struct cl_page *pg)
-{
- struct cl_page *scan;
-
- for (scan = cl_page_top((struct cl_page *)pg);
- scan != NULL; scan = scan->cp_child)
- cl_page_header_print(env, cookie, printer, scan);
- CL_PAGE_INVOKE(env, (struct cl_page *)pg, CL_PAGE_OP(cpo_print),
- (const struct lu_env *env,
- const struct cl_page_slice *slice,
- void *cookie, lu_printer_t p), cookie, printer);
- (*printer)(env, cookie, "end page@%p\n", pg);
-}
-EXPORT_SYMBOL(cl_page_print);
-
-/**
- * Cancel a page which is still in a transfer.
- */
-int cl_page_cancel(const struct lu_env *env, struct cl_page *page)
-{
- return CL_PAGE_INVOKE(env, page, CL_PAGE_OP(cpo_cancel),
- (const struct lu_env *,
- const struct cl_page_slice *));
-}
-EXPORT_SYMBOL(cl_page_cancel);
-
-/**
- * Converts a byte offset within object \a obj into a page index.
- */
-loff_t cl_offset(const struct cl_object *obj, pgoff_t idx)
-{
- /*
- * XXX for now.
- */
- return (loff_t)idx << PAGE_CACHE_SHIFT;
-}
-EXPORT_SYMBOL(cl_offset);
-
-/**
- * Converts a page index into a byte offset within object \a obj.
- */
-pgoff_t cl_index(const struct cl_object *obj, loff_t offset)
-{
- /*
- * XXX for now.
- */
- return offset >> PAGE_CACHE_SHIFT;
-}
-EXPORT_SYMBOL(cl_index);
-
-int cl_page_size(const struct cl_object *obj)
-{
- return 1 << PAGE_CACHE_SHIFT;
-}
-EXPORT_SYMBOL(cl_page_size);
-
-/**
- * Adds page slice to the compound page.
- *
- * This is called by cl_object_operations::coo_page_init() methods to add a
- * per-layer state to the page. New state is added at the end of
- * cl_page::cp_layers list, that is, it is at the bottom of the stack.
- *
- * \see cl_lock_slice_add(), cl_req_slice_add(), cl_io_slice_add()
- */
-void cl_page_slice_add(struct cl_page *page, struct cl_page_slice *slice,
- struct cl_object *obj,
- const struct cl_page_operations *ops)
-{
- list_add_tail(&slice->cpl_linkage, &page->cp_layers);
- slice->cpl_obj = obj;
- slice->cpl_ops = ops;
- slice->cpl_page = page;
-}
-EXPORT_SYMBOL(cl_page_slice_add);
-
-int cl_page_init(void)
-{
- return 0;
-}
-
-void cl_page_fini(void)
-{
-}
diff --git a/drivers/staging/lustre/lustre/obdclass/class_obd.c b/drivers/staging/lustre/lustre/obdclass/class_obd.c
deleted file mode 100644
index 058b859de156..000000000000
--- a/drivers/staging/lustre/lustre/obdclass/class_obd.c
+++ /dev/null
@@ -1,584 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- */
-
-#define DEBUG_SUBSYSTEM S_CLASS
-# include <linux/atomic.h>
-
-#include "../include/obd_support.h"
-#include "../include/obd_class.h"
-#include "../../include/linux/lnet/lnetctl.h"
-#include "../include/lustre_debug.h"
-#include "../include/lprocfs_status.h"
-#include "../include/lustre/lustre_build_version.h"
-#include <linux/list.h>
-#include "../include/cl_object.h"
-#include "llog_internal.h"
-
-
-struct obd_device *obd_devs[MAX_OBD_DEVICES];
-EXPORT_SYMBOL(obd_devs);
-struct list_head obd_types;
-DEFINE_RWLOCK(obd_dev_lock);
-
-/* The following are visible and mutable through /proc/sys/lustre/. */
-unsigned int obd_debug_peer_on_timeout;
-EXPORT_SYMBOL(obd_debug_peer_on_timeout);
-unsigned int obd_dump_on_timeout;
-EXPORT_SYMBOL(obd_dump_on_timeout);
-unsigned int obd_dump_on_eviction;
-EXPORT_SYMBOL(obd_dump_on_eviction);
-unsigned int obd_max_dirty_pages = 256;
-EXPORT_SYMBOL(obd_max_dirty_pages);
-atomic_t obd_dirty_pages;
-EXPORT_SYMBOL(obd_dirty_pages);
-unsigned int obd_timeout = OBD_TIMEOUT_DEFAULT; /* seconds */
-EXPORT_SYMBOL(obd_timeout);
-unsigned int obd_timeout_set;
-EXPORT_SYMBOL(obd_timeout_set);
-/* Adaptive timeout defs here instead of ptlrpc module for /proc/sys/ access */
-unsigned int at_min;
-EXPORT_SYMBOL(at_min);
-unsigned int at_max = 600;
-EXPORT_SYMBOL(at_max);
-unsigned int at_history = 600;
-EXPORT_SYMBOL(at_history);
-int at_early_margin = 5;
-EXPORT_SYMBOL(at_early_margin);
-int at_extra = 30;
-EXPORT_SYMBOL(at_extra);
-
-atomic_t obd_dirty_transit_pages;
-EXPORT_SYMBOL(obd_dirty_transit_pages);
-
-char obd_jobid_var[JOBSTATS_JOBID_VAR_MAX_LEN + 1] = JOBSTATS_DISABLE;
-EXPORT_SYMBOL(obd_jobid_var);
-
-char obd_jobid_node[JOBSTATS_JOBID_SIZE + 1];
-
-/* Get jobid of current process from stored variable or calculate
- * it from pid and user_id.
- *
- * Historically this was also done by reading the environment variable
- * stored in between the "env_start" & "env_end" of task struct.
- * This is now deprecated.
- */
-int lustre_get_jobid(char *jobid)
-{
- memset(jobid, 0, JOBSTATS_JOBID_SIZE);
- /* Jobstats isn't enabled */
- if (strcmp(obd_jobid_var, JOBSTATS_DISABLE) == 0)
- return 0;
-
- /* Use process name + fsuid as jobid */
- if (strcmp(obd_jobid_var, JOBSTATS_PROCNAME_UID) == 0) {
- snprintf(jobid, JOBSTATS_JOBID_SIZE, "%s.%u",
- current_comm(),
- from_kuid(&init_user_ns, current_fsuid()));
- return 0;
- }
-
- /* Whole node dedicated to single job */
- if (strcmp(obd_jobid_var, JOBSTATS_NODELOCAL) == 0) {
- strcpy(jobid, obd_jobid_node);
- return 0;
- }
-
- return -ENOENT;
-}
-EXPORT_SYMBOL(lustre_get_jobid);
-
-static inline void obd_data2conn(struct lustre_handle *conn,
- struct obd_ioctl_data *data)
-{
- memset(conn, 0, sizeof(*conn));
- conn->cookie = data->ioc_cookie;
-}
-
-static inline void obd_conn2data(struct obd_ioctl_data *data,
- struct lustre_handle *conn)
-{
- data->ioc_cookie = conn->cookie;
-}
-
-int class_resolve_dev_name(__u32 len, const char *name)
-{
- int rc;
- int dev;
-
- if (!len || !name) {
- CERROR("No name passed,!\n");
- rc = -EINVAL;
- goto out;
- }
- if (name[len - 1] != 0) {
- CERROR("Name not nul terminated!\n");
- rc = -EINVAL;
- goto out;
- }
-
- CDEBUG(D_IOCTL, "device name %s\n", name);
- dev = class_name2dev(name);
- if (dev == -1) {
- CDEBUG(D_IOCTL, "No device for name %s!\n", name);
- rc = -EINVAL;
- goto out;
- }
-
- CDEBUG(D_IOCTL, "device name %s, dev %d\n", name, dev);
- rc = dev;
-
-out:
- return rc;
-}
-
-int class_handle_ioctl(unsigned int cmd, unsigned long arg)
-{
- char *buf = NULL;
- struct obd_ioctl_data *data;
- struct libcfs_debug_ioctl_data *debug_data;
- struct obd_device *obd = NULL;
- int err = 0, len = 0;
-
- /* only for debugging */
- if (cmd == LIBCFS_IOC_DEBUG_MASK) {
- debug_data = (struct libcfs_debug_ioctl_data *)arg;
- libcfs_subsystem_debug = debug_data->subs;
- libcfs_debug = debug_data->debug;
- return 0;
- }
-
- CDEBUG(D_IOCTL, "cmd = %x\n", cmd);
- if (obd_ioctl_getdata(&buf, &len, (void *)arg)) {
- CERROR("OBD ioctl: data error\n");
- return -EINVAL;
- }
- data = (struct obd_ioctl_data *)buf;
-
- switch (cmd) {
- case OBD_IOC_PROCESS_CFG: {
- struct lustre_cfg *lcfg;
-
- if (!data->ioc_plen1 || !data->ioc_pbuf1) {
- CERROR("No config buffer passed!\n");
- err = -EINVAL;
- goto out;
- }
- lcfg = kzalloc(data->ioc_plen1, GFP_NOFS);
- if (!lcfg) {
- err = -ENOMEM;
- goto out;
- }
- err = copy_from_user(lcfg, data->ioc_pbuf1,
- data->ioc_plen1);
- if (!err)
- err = lustre_cfg_sanity_check(lcfg, data->ioc_plen1);
- if (!err)
- err = class_process_config(lcfg);
-
- kfree(lcfg);
- goto out;
- }
-
- case OBD_GET_VERSION:
- if (!data->ioc_inlbuf1) {
- CERROR("No buffer passed in ioctl\n");
- err = -EINVAL;
- goto out;
- }
-
- if (strlen(BUILD_VERSION) + 1 > data->ioc_inllen1) {
- CERROR("ioctl buffer too small to hold version\n");
- err = -EINVAL;
- goto out;
- }
-
- memcpy(data->ioc_bulk, BUILD_VERSION,
- strlen(BUILD_VERSION) + 1);
-
- err = obd_ioctl_popdata((void *)arg, data, len);
- if (err)
- err = -EFAULT;
- goto out;
-
- case OBD_IOC_NAME2DEV: {
- /* Resolve a device name. This does not change the
- * currently selected device.
- */
- int dev;
-
- dev = class_resolve_dev_name(data->ioc_inllen1,
- data->ioc_inlbuf1);
- data->ioc_dev = dev;
- if (dev < 0) {
- err = -EINVAL;
- goto out;
- }
-
- err = obd_ioctl_popdata((void *)arg, data, sizeof(*data));
- if (err)
- err = -EFAULT;
- goto out;
- }
-
- case OBD_IOC_UUID2DEV: {
- /* Resolve a device uuid. This does not change the
- * currently selected device.
- */
- int dev;
- struct obd_uuid uuid;
-
- if (!data->ioc_inllen1 || !data->ioc_inlbuf1) {
- CERROR("No UUID passed!\n");
- err = -EINVAL;
- goto out;
- }
- if (data->ioc_inlbuf1[data->ioc_inllen1 - 1] != 0) {
- CERROR("UUID not NUL terminated!\n");
- err = -EINVAL;
- goto out;
- }
-
- CDEBUG(D_IOCTL, "device name %s\n", data->ioc_inlbuf1);
- obd_str2uuid(&uuid, data->ioc_inlbuf1);
- dev = class_uuid2dev(&uuid);
- data->ioc_dev = dev;
- if (dev == -1) {
- CDEBUG(D_IOCTL, "No device for UUID %s!\n",
- data->ioc_inlbuf1);
- err = -EINVAL;
- goto out;
- }
-
- CDEBUG(D_IOCTL, "device name %s, dev %d\n", data->ioc_inlbuf1,
- dev);
- err = obd_ioctl_popdata((void *)arg, data, sizeof(*data));
- if (err)
- err = -EFAULT;
- goto out;
- }
-
- case OBD_IOC_CLOSE_UUID: {
- CDEBUG(D_IOCTL, "closing all connections to uuid %s (NOOP)\n",
- data->ioc_inlbuf1);
- err = 0;
- goto out;
- }
-
- case OBD_IOC_GETDEVICE: {
- int index = data->ioc_count;
- char *status, *str;
-
- if (!data->ioc_inlbuf1) {
- CERROR("No buffer passed in ioctl\n");
- err = -EINVAL;
- goto out;
- }
- if (data->ioc_inllen1 < 128) {
- CERROR("ioctl buffer too small to hold version\n");
- err = -EINVAL;
- goto out;
- }
-
- obd = class_num2obd(index);
- if (!obd) {
- err = -ENOENT;
- goto out;
- }
-
- if (obd->obd_stopping)
- status = "ST";
- else if (obd->obd_set_up)
- status = "UP";
- else if (obd->obd_attached)
- status = "AT";
- else
- status = "--";
- str = (char *)data->ioc_bulk;
- snprintf(str, len - sizeof(*data), "%3d %s %s %s %s %d",
- (int)index, status, obd->obd_type->typ_name,
- obd->obd_name, obd->obd_uuid.uuid,
- atomic_read(&obd->obd_refcount));
- err = obd_ioctl_popdata((void *)arg, data, len);
-
- err = 0;
- goto out;
- }
-
- }
-
- if (data->ioc_dev == OBD_DEV_BY_DEVNAME) {
- if (data->ioc_inllen4 <= 0 || data->ioc_inlbuf4 == NULL) {
- err = -EINVAL;
- goto out;
- }
- if (strnlen(data->ioc_inlbuf4, MAX_OBD_NAME) >= MAX_OBD_NAME) {
- err = -EINVAL;
- goto out;
- }
- obd = class_name2obd(data->ioc_inlbuf4);
- } else if (data->ioc_dev < class_devno_max()) {
- obd = class_num2obd(data->ioc_dev);
- } else {
- CERROR("OBD ioctl: No device\n");
- err = -EINVAL;
- goto out;
- }
-
- if (obd == NULL) {
- CERROR("OBD ioctl : No Device %d\n", data->ioc_dev);
- err = -EINVAL;
- goto out;
- }
- LASSERT(obd->obd_magic == OBD_DEVICE_MAGIC);
-
- if (!obd->obd_set_up || obd->obd_stopping) {
- CERROR("OBD ioctl: device not setup %d\n", data->ioc_dev);
- err = -EINVAL;
- goto out;
- }
-
- switch (cmd) {
- case OBD_IOC_NO_TRANSNO: {
- if (!obd->obd_attached) {
- CERROR("Device %d not attached\n", obd->obd_minor);
- err = -ENODEV;
- goto out;
- }
- CDEBUG(D_HA, "%s: disabling committed-transno notification\n",
- obd->obd_name);
- obd->obd_no_transno = 1;
- err = 0;
- goto out;
- }
-
- default: {
- err = obd_iocontrol(cmd, obd->obd_self_export, len, data, NULL);
- if (err)
- goto out;
-
- err = obd_ioctl_popdata((void *)arg, data, len);
- if (err)
- err = -EFAULT;
- goto out;
- }
- }
-
- out:
- if (buf)
- obd_ioctl_freedata(buf, len);
- return err;
-} /* class_handle_ioctl */
-
-#define OBD_INIT_CHECK
-int obd_init_checks(void)
-{
- __u64 u64val, div64val;
- char buf[64];
- int len, ret = 0;
-
- CDEBUG(D_INFO, "LPU64=%s, LPD64=%s, LPX64=%s\n", "%llu", "%lld", "%#llx");
-
- CDEBUG(D_INFO, "OBD_OBJECT_EOF = %#llx\n", (__u64)OBD_OBJECT_EOF);
-
- u64val = OBD_OBJECT_EOF;
- CDEBUG(D_INFO, "u64val OBD_OBJECT_EOF = %#llx\n", u64val);
- if (u64val != OBD_OBJECT_EOF) {
- CERROR("__u64 %#llx(%d) != 0xffffffffffffffff\n",
- u64val, (int)sizeof(u64val));
- ret = -EINVAL;
- }
- len = snprintf(buf, sizeof(buf), "%#llx", u64val);
- if (len != 18) {
- CWARN("LPX64 wrong length! strlen(%s)=%d != 18\n", buf, len);
- ret = -EINVAL;
- }
-
- div64val = OBD_OBJECT_EOF;
- CDEBUG(D_INFO, "u64val OBD_OBJECT_EOF = %#llx\n", u64val);
- if (u64val != OBD_OBJECT_EOF) {
- CERROR("__u64 %#llx(%d) != 0xffffffffffffffff\n",
- u64val, (int)sizeof(u64val));
- ret = -EOVERFLOW;
- }
- if (u64val >> 8 != OBD_OBJECT_EOF >> 8) {
- CERROR("__u64 %#llx(%d) != 0xffffffffffffffff\n",
- u64val, (int)sizeof(u64val));
- return -EOVERFLOW;
- }
- if (do_div(div64val, 256) != (u64val & 255)) {
- CERROR("do_div(%#llx,256) != %llu\n", u64val, u64val & 255);
- return -EOVERFLOW;
- }
- if (u64val >> 8 != div64val) {
- CERROR("do_div(%#llx,256) %llu != %llu\n",
- u64val, div64val, u64val >> 8);
- return -EOVERFLOW;
- }
- len = snprintf(buf, sizeof(buf), "%#llx", u64val);
- if (len != 18) {
- CWARN("LPX64 wrong length! strlen(%s)=%d != 18\n", buf, len);
- ret = -EINVAL;
- }
- len = snprintf(buf, sizeof(buf), "%llu", u64val);
- if (len != 20) {
- CWARN("LPU64 wrong length! strlen(%s)=%d != 20\n", buf, len);
- ret = -EINVAL;
- }
- len = snprintf(buf, sizeof(buf), "%lld", u64val);
- if (len != 2) {
- CWARN("LPD64 wrong length! strlen(%s)=%d != 2\n", buf, len);
- ret = -EINVAL;
- }
- if ((u64val & ~CFS_PAGE_MASK) >= PAGE_CACHE_SIZE) {
- CWARN("mask failed: u64val %llu >= %llu\n", u64val,
- (__u64)PAGE_CACHE_SIZE);
- ret = -EINVAL;
- }
-
- return ret;
-}
-
-extern int class_procfs_init(void);
-extern int class_procfs_clean(void);
-
-static int __init init_obdclass(void)
-{
- int i, err;
- int lustre_register_fs(void);
-
- LCONSOLE_INFO("Lustre: Build Version: "BUILD_VERSION"\n");
-
- spin_lock_init(&obd_types_lock);
- obd_zombie_impexp_init();
-
- err = obd_init_checks();
- if (err == -EOVERFLOW)
- return err;
-
- class_init_uuidlist();
- err = class_handle_init();
- if (err)
- return err;
-
- INIT_LIST_HEAD(&obd_types);
-
- err = misc_register(&obd_psdev);
- if (err) {
- CERROR("cannot register %d err %d\n", OBD_DEV_MINOR, err);
- return err;
- }
-
- /* This struct is already zeroed for us (static global) */
- for (i = 0; i < class_devno_max(); i++)
- obd_devs[i] = NULL;
-
- /* Default the dirty page cache cap to 1/2 of system memory.
- * For clients with less memory, a larger fraction is needed
- * for other purposes (mostly for BGL). */
- if (totalram_pages <= 512 << (20 - PAGE_CACHE_SHIFT))
- obd_max_dirty_pages = totalram_pages / 4;
- else
- obd_max_dirty_pages = totalram_pages / 2;
-
- err = obd_init_caches();
- if (err)
- return err;
-
- err = class_procfs_init();
- if (err)
- return err;
-
- err = obd_sysctl_init();
- if (err)
- return err;
-
- err = lu_global_init();
- if (err)
- return err;
-
- err = cl_global_init();
- if (err != 0)
- return err;
-
-
- err = llog_info_init();
- if (err)
- return err;
-
- err = lustre_register_fs();
-
- return err;
-}
-
-/* liblustre doesn't call cleanup_obdclass, apparently. we carry on in this
- * ifdef to the end of the file to cover module and versioning goo.*/
-static void cleanup_obdclass(void)
-{
- int i;
- int lustre_unregister_fs(void);
-
- lustre_unregister_fs();
-
- misc_deregister(&obd_psdev);
- for (i = 0; i < class_devno_max(); i++) {
- struct obd_device *obd = class_num2obd(i);
- if (obd && obd->obd_set_up &&
- OBT(obd) && OBP(obd, detach)) {
- /* XXX should this call generic detach otherwise? */
- LASSERT(obd->obd_magic == OBD_DEVICE_MAGIC);
- OBP(obd, detach)(obd);
- }
- }
- llog_info_fini();
- cl_global_fini();
- lu_global_fini();
-
- obd_cleanup_caches();
-
- class_procfs_clean();
-
- class_handle_cleanup();
- class_exit_uuidlist();
- obd_zombie_impexp_stop();
-}
-
-MODULE_AUTHOR("Sun Microsystems, Inc. <http://www.lustre.org/>");
-MODULE_DESCRIPTION("Lustre Class Driver Build Version: " BUILD_VERSION);
-MODULE_LICENSE("GPL");
-MODULE_VERSION(LUSTRE_VERSION_STRING);
-
-module_init(init_obdclass);
-module_exit(cleanup_obdclass);
diff --git a/drivers/staging/lustre/lustre/obdclass/debug.c b/drivers/staging/lustre/lustre/obdclass/debug.c
deleted file mode 100644
index 43a7f7a79b35..000000000000
--- a/drivers/staging/lustre/lustre/obdclass/debug.c
+++ /dev/null
@@ -1,99 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * lustre/obdclass/debug.c
- *
- * Helper routines for dumping data structs for debugging.
- */
-
-#define DEBUG_SUBSYSTEM D_OTHER
-
-#include <asm/unaligned.h>
-
-#include "../include/obd_support.h"
-#include "../include/lustre_debug.h"
-#include "../include/lustre_net.h"
-
-#define LPDS sizeof(__u64)
-int block_debug_setup(void *addr, int len, __u64 off, __u64 id)
-{
- LASSERT(addr);
-
- put_unaligned_le64(off, addr);
- put_unaligned_le64(id, addr+LPDS);
- addr += len - LPDS - LPDS;
- put_unaligned_le64(off, addr);
- put_unaligned_le64(id, addr+LPDS);
-
- return 0;
-}
-EXPORT_SYMBOL(block_debug_setup);
-
-int block_debug_check(char *who, void *addr, int end, __u64 off, __u64 id)
-{
- __u64 ne_off;
- int err = 0;
-
- LASSERT(addr);
-
- ne_off = le64_to_cpu (off);
- id = le64_to_cpu (id);
- if (memcmp(addr, (char *)&ne_off, LPDS)) {
- CDEBUG(D_ERROR, "%s: id %#llx offset %llu off: %#llx != %#llx\n",
- who, id, off, *(__u64 *)addr, ne_off);
- err = -EINVAL;
- }
- if (memcmp(addr + LPDS, (char *)&id, LPDS)) {
- CDEBUG(D_ERROR, "%s: id %#llx offset %llu id: %#llx != %#llx\n",
- who, id, off, *(__u64 *)(addr + LPDS), id);
- err = -EINVAL;
- }
-
- addr += end - LPDS - LPDS;
- if (memcmp(addr, (char *)&ne_off, LPDS)) {
- CDEBUG(D_ERROR, "%s: id %#llx offset %llu end off: %#llx != %#llx\n",
- who, id, off, *(__u64 *)addr, ne_off);
- err = -EINVAL;
- }
- if (memcmp(addr + LPDS, (char *)&id, LPDS)) {
- CDEBUG(D_ERROR, "%s: id %#llx offset %llu end id: %#llx != %#llx\n",
- who, id, off, *(__u64 *)(addr + LPDS), id);
- err = -EINVAL;
- }
-
- return err;
-}
-EXPORT_SYMBOL(block_debug_check);
-#undef LPDS
diff --git a/drivers/staging/lustre/lustre/obdclass/genops.c b/drivers/staging/lustre/lustre/obdclass/genops.c
deleted file mode 100644
index 4467baa73ae2..000000000000
--- a/drivers/staging/lustre/lustre/obdclass/genops.c
+++ /dev/null
@@ -1,1438 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * lustre/obdclass/genops.c
- *
- * These are the only exported functions, they provide some generic
- * infrastructure for managing object devices
- */
-
-#define DEBUG_SUBSYSTEM S_CLASS
-#include "../include/obd_class.h"
-#include "../include/lprocfs_status.h"
-
-spinlock_t obd_types_lock;
-
-static struct kmem_cache *obd_device_cachep;
-struct kmem_cache *obdo_cachep;
-EXPORT_SYMBOL(obdo_cachep);
-static struct kmem_cache *import_cachep;
-
-static struct list_head obd_zombie_imports;
-static struct list_head obd_zombie_exports;
-static spinlock_t obd_zombie_impexp_lock;
-static void obd_zombie_impexp_notify(void);
-static void obd_zombie_export_add(struct obd_export *exp);
-static void obd_zombie_import_add(struct obd_import *imp);
-static void print_export_data(struct obd_export *exp,
- const char *status, int locks);
-
-int (*ptlrpc_put_connection_superhack)(struct ptlrpc_connection *c);
-EXPORT_SYMBOL(ptlrpc_put_connection_superhack);
-
-/*
- * support functions: we could use inter-module communication, but this
- * is more portable to other OS's
- */
-static struct obd_device *obd_device_alloc(void)
-{
- struct obd_device *obd;
-
- OBD_SLAB_ALLOC_PTR_GFP(obd, obd_device_cachep, GFP_NOFS);
- if (obd != NULL)
- obd->obd_magic = OBD_DEVICE_MAGIC;
- return obd;
-}
-
-static void obd_device_free(struct obd_device *obd)
-{
- LASSERT(obd != NULL);
- LASSERTF(obd->obd_magic == OBD_DEVICE_MAGIC, "obd %p obd_magic %08x != %08x\n",
- obd, obd->obd_magic, OBD_DEVICE_MAGIC);
- if (obd->obd_namespace != NULL) {
- CERROR("obd %p: namespace %p was not properly cleaned up (obd_force=%d)!\n",
- obd, obd->obd_namespace, obd->obd_force);
- LBUG();
- }
- lu_ref_fini(&obd->obd_reference);
- OBD_SLAB_FREE_PTR(obd, obd_device_cachep);
-}
-
-struct obd_type *class_search_type(const char *name)
-{
- struct list_head *tmp;
- struct obd_type *type;
-
- spin_lock(&obd_types_lock);
- list_for_each(tmp, &obd_types) {
- type = list_entry(tmp, struct obd_type, typ_chain);
- if (strcmp(type->typ_name, name) == 0) {
- spin_unlock(&obd_types_lock);
- return type;
- }
- }
- spin_unlock(&obd_types_lock);
- return NULL;
-}
-EXPORT_SYMBOL(class_search_type);
-
-struct obd_type *class_get_type(const char *name)
-{
- struct obd_type *type = class_search_type(name);
-
- if (!type) {
- const char *modname = name;
-
- if (strcmp(modname, "obdfilter") == 0)
- modname = "ofd";
-
- if (strcmp(modname, LUSTRE_LWP_NAME) == 0)
- modname = LUSTRE_OSP_NAME;
-
- if (!strncmp(modname, LUSTRE_MDS_NAME, strlen(LUSTRE_MDS_NAME)))
- modname = LUSTRE_MDT_NAME;
-
- if (!request_module("%s", modname)) {
- CDEBUG(D_INFO, "Loaded module '%s'\n", modname);
- type = class_search_type(name);
- } else {
- LCONSOLE_ERROR_MSG(0x158, "Can't load module '%s'\n",
- modname);
- }
- }
- if (type) {
- spin_lock(&type->obd_type_lock);
- type->typ_refcnt++;
- try_module_get(type->typ_dt_ops->o_owner);
- spin_unlock(&type->obd_type_lock);
- }
- return type;
-}
-EXPORT_SYMBOL(class_get_type);
-
-void class_put_type(struct obd_type *type)
-{
- LASSERT(type);
- spin_lock(&type->obd_type_lock);
- type->typ_refcnt--;
- module_put(type->typ_dt_ops->o_owner);
- spin_unlock(&type->obd_type_lock);
-}
-EXPORT_SYMBOL(class_put_type);
-
-#define CLASS_MAX_NAME 1024
-
-int class_register_type(struct obd_ops *dt_ops, struct md_ops *md_ops,
- const char *name,
- struct lu_device_type *ldt)
-{
- struct obd_type *type;
- int rc = 0;
-
- /* sanity check */
- LASSERT(strnlen(name, CLASS_MAX_NAME) < CLASS_MAX_NAME);
-
- if (class_search_type(name)) {
- CDEBUG(D_IOCTL, "Type %s already registered\n", name);
- return -EEXIST;
- }
-
- rc = -ENOMEM;
- type = kzalloc(sizeof(*type), GFP_NOFS);
- if (!type)
- return rc;
-
- type->typ_dt_ops = kzalloc(sizeof(*type->typ_dt_ops), GFP_NOFS);
- type->typ_md_ops = kzalloc(sizeof(*type->typ_md_ops), GFP_NOFS);
- type->typ_name = kzalloc(strlen(name) + 1, GFP_NOFS);
-
- if (!type->typ_dt_ops ||
- !type->typ_md_ops ||
- !type->typ_name)
- goto failed;
-
- *(type->typ_dt_ops) = *dt_ops;
- /* md_ops is optional */
- if (md_ops)
- *(type->typ_md_ops) = *md_ops;
- strcpy(type->typ_name, name);
- spin_lock_init(&type->obd_type_lock);
-
- type->typ_debugfs_entry = ldebugfs_register(type->typ_name,
- debugfs_lustre_root,
- NULL, type);
- if (IS_ERR_OR_NULL(type->typ_debugfs_entry)) {
- rc = type->typ_debugfs_entry ? PTR_ERR(type->typ_debugfs_entry)
- : -ENOMEM;
- type->typ_debugfs_entry = NULL;
- goto failed;
- }
-
- type->typ_kobj = kobject_create_and_add(type->typ_name, lustre_kobj);
- if (!type->typ_kobj) {
- rc = -ENOMEM;
- goto failed;
- }
-
- if (ldt != NULL) {
- type->typ_lu = ldt;
- rc = lu_device_type_init(ldt);
- if (rc != 0)
- goto failed;
- }
-
- spin_lock(&obd_types_lock);
- list_add(&type->typ_chain, &obd_types);
- spin_unlock(&obd_types_lock);
-
- return 0;
-
- failed:
- if (type->typ_kobj)
- kobject_put(type->typ_kobj);
- kfree(type->typ_name);
- kfree(type->typ_md_ops);
- kfree(type->typ_dt_ops);
- kfree(type);
- return rc;
-}
-EXPORT_SYMBOL(class_register_type);
-
-int class_unregister_type(const char *name)
-{
- struct obd_type *type = class_search_type(name);
-
- if (!type) {
- CERROR("unknown obd type\n");
- return -EINVAL;
- }
-
- if (type->typ_refcnt) {
- CERROR("type %s has refcount (%d)\n", name, type->typ_refcnt);
- /* This is a bad situation, let's make the best of it */
- /* Remove ops, but leave the name for debugging */
- kfree(type->typ_dt_ops);
- kfree(type->typ_md_ops);
- return -EBUSY;
- }
-
- if (type->typ_kobj)
- kobject_put(type->typ_kobj);
-
- if (!IS_ERR_OR_NULL(type->typ_debugfs_entry))
- ldebugfs_remove(&type->typ_debugfs_entry);
-
- if (type->typ_lu)
- lu_device_type_fini(type->typ_lu);
-
- spin_lock(&obd_types_lock);
- list_del(&type->typ_chain);
- spin_unlock(&obd_types_lock);
- kfree(type->typ_name);
- kfree(type->typ_dt_ops);
- kfree(type->typ_md_ops);
- kfree(type);
- return 0;
-} /* class_unregister_type */
-EXPORT_SYMBOL(class_unregister_type);
-
-/**
- * Create a new obd device.
- *
- * Find an empty slot in ::obd_devs[], create a new obd device in it.
- *
- * \param[in] type_name obd device type string.
- * \param[in] name obd device name.
- *
- * \retval NULL if create fails, otherwise return the obd device
- * pointer created.
- */
-struct obd_device *class_newdev(const char *type_name, const char *name)
-{
- struct obd_device *result = NULL;
- struct obd_device *newdev;
- struct obd_type *type = NULL;
- int i;
- int new_obd_minor = 0;
-
- if (strlen(name) >= MAX_OBD_NAME) {
- CERROR("name/uuid must be < %u bytes long\n", MAX_OBD_NAME);
- return ERR_PTR(-EINVAL);
- }
-
- type = class_get_type(type_name);
- if (!type) {
- CERROR("OBD: unknown type: %s\n", type_name);
- return ERR_PTR(-ENODEV);
- }
-
- newdev = obd_device_alloc();
- if (!newdev) {
- result = ERR_PTR(-ENOMEM);
- goto out_type;
- }
-
- LASSERT(newdev->obd_magic == OBD_DEVICE_MAGIC);
-
- write_lock(&obd_dev_lock);
- for (i = 0; i < class_devno_max(); i++) {
- struct obd_device *obd = class_num2obd(i);
-
- if (obd && (strcmp(name, obd->obd_name) == 0)) {
- CERROR("Device %s already exists at %d, won't add\n",
- name, i);
- if (result) {
- LASSERTF(result->obd_magic == OBD_DEVICE_MAGIC,
- "%p obd_magic %08x != %08x\n", result,
- result->obd_magic, OBD_DEVICE_MAGIC);
- LASSERTF(result->obd_minor == new_obd_minor,
- "%p obd_minor %d != %d\n", result,
- result->obd_minor, new_obd_minor);
-
- obd_devs[result->obd_minor] = NULL;
- result->obd_name[0] = '\0';
- }
- result = ERR_PTR(-EEXIST);
- break;
- }
- if (!result && !obd) {
- result = newdev;
- result->obd_minor = i;
- new_obd_minor = i;
- result->obd_type = type;
- strncpy(result->obd_name, name,
- sizeof(result->obd_name) - 1);
- obd_devs[i] = result;
- }
- }
- write_unlock(&obd_dev_lock);
-
- if (!result && i >= class_devno_max()) {
- CERROR("all %u OBD devices used, increase MAX_OBD_DEVICES\n",
- class_devno_max());
- result = ERR_PTR(-EOVERFLOW);
- goto out;
- }
-
- if (IS_ERR(result))
- goto out;
-
- CDEBUG(D_IOCTL, "Adding new device %s (%p)\n",
- result->obd_name, result);
-
- return result;
-out:
- obd_device_free(newdev);
-out_type:
- class_put_type(type);
- return result;
-}
-
-void class_release_dev(struct obd_device *obd)
-{
- struct obd_type *obd_type = obd->obd_type;
-
- LASSERTF(obd->obd_magic == OBD_DEVICE_MAGIC, "%p obd_magic %08x != %08x\n",
- obd, obd->obd_magic, OBD_DEVICE_MAGIC);
- LASSERTF(obd == obd_devs[obd->obd_minor], "obd %p != obd_devs[%d] %p\n",
- obd, obd->obd_minor, obd_devs[obd->obd_minor]);
- LASSERT(obd_type != NULL);
-
- CDEBUG(D_INFO, "Release obd device %s at %d obd_type name =%s\n",
- obd->obd_name, obd->obd_minor, obd->obd_type->typ_name);
-
- write_lock(&obd_dev_lock);
- obd_devs[obd->obd_minor] = NULL;
- write_unlock(&obd_dev_lock);
- obd_device_free(obd);
-
- class_put_type(obd_type);
-}
-
-int class_name2dev(const char *name)
-{
- int i;
-
- if (!name)
- return -1;
-
- read_lock(&obd_dev_lock);
- for (i = 0; i < class_devno_max(); i++) {
- struct obd_device *obd = class_num2obd(i);
-
- if (obd && strcmp(name, obd->obd_name) == 0) {
- /* Make sure we finished attaching before we give
- out any references */
- LASSERT(obd->obd_magic == OBD_DEVICE_MAGIC);
- if (obd->obd_attached) {
- read_unlock(&obd_dev_lock);
- return i;
- }
- break;
- }
- }
- read_unlock(&obd_dev_lock);
-
- return -1;
-}
-EXPORT_SYMBOL(class_name2dev);
-
-struct obd_device *class_name2obd(const char *name)
-{
- int dev = class_name2dev(name);
-
- if (dev < 0 || dev > class_devno_max())
- return NULL;
- return class_num2obd(dev);
-}
-EXPORT_SYMBOL(class_name2obd);
-
-int class_uuid2dev(struct obd_uuid *uuid)
-{
- int i;
-
- read_lock(&obd_dev_lock);
- for (i = 0; i < class_devno_max(); i++) {
- struct obd_device *obd = class_num2obd(i);
-
- if (obd && obd_uuid_equals(uuid, &obd->obd_uuid)) {
- LASSERT(obd->obd_magic == OBD_DEVICE_MAGIC);
- read_unlock(&obd_dev_lock);
- return i;
- }
- }
- read_unlock(&obd_dev_lock);
-
- return -1;
-}
-EXPORT_SYMBOL(class_uuid2dev);
-
-/**
- * Get obd device from ::obd_devs[]
- *
- * \param num [in] array index
- *
- * \retval NULL if ::obd_devs[\a num] does not contains an obd device
- * otherwise return the obd device there.
- */
-struct obd_device *class_num2obd(int num)
-{
- struct obd_device *obd = NULL;
-
- if (num < class_devno_max()) {
- obd = obd_devs[num];
- if (!obd)
- return NULL;
-
- LASSERTF(obd->obd_magic == OBD_DEVICE_MAGIC,
- "%p obd_magic %08x != %08x\n",
- obd, obd->obd_magic, OBD_DEVICE_MAGIC);
- LASSERTF(obd->obd_minor == num,
- "%p obd_minor %0d != %0d\n",
- obd, obd->obd_minor, num);
- }
-
- return obd;
-}
-EXPORT_SYMBOL(class_num2obd);
-
-/* Search for a client OBD connected to tgt_uuid. If grp_uuid is
- specified, then only the client with that uuid is returned,
- otherwise any client connected to the tgt is returned. */
-struct obd_device *class_find_client_obd(struct obd_uuid *tgt_uuid,
- const char *typ_name,
- struct obd_uuid *grp_uuid)
-{
- int i;
-
- read_lock(&obd_dev_lock);
- for (i = 0; i < class_devno_max(); i++) {
- struct obd_device *obd = class_num2obd(i);
-
- if (!obd)
- continue;
- if ((strncmp(obd->obd_type->typ_name, typ_name,
- strlen(typ_name)) == 0)) {
- if (obd_uuid_equals(tgt_uuid,
- &obd->u.cli.cl_target_uuid) &&
- ((grp_uuid) ? obd_uuid_equals(grp_uuid,
- &obd->obd_uuid) : 1)) {
- read_unlock(&obd_dev_lock);
- return obd;
- }
- }
- }
- read_unlock(&obd_dev_lock);
-
- return NULL;
-}
-EXPORT_SYMBOL(class_find_client_obd);
-
-/* Iterate the obd_device list looking devices have grp_uuid. Start
- searching at *next, and if a device is found, the next index to look
- at is saved in *next. If next is NULL, then the first matching device
- will always be returned. */
-struct obd_device *class_devices_in_group(struct obd_uuid *grp_uuid, int *next)
-{
- int i;
-
- if (!next)
- i = 0;
- else if (*next >= 0 && *next < class_devno_max())
- i = *next;
- else
- return NULL;
-
- read_lock(&obd_dev_lock);
- for (; i < class_devno_max(); i++) {
- struct obd_device *obd = class_num2obd(i);
-
- if (!obd)
- continue;
- if (obd_uuid_equals(grp_uuid, &obd->obd_uuid)) {
- if (next)
- *next = i+1;
- read_unlock(&obd_dev_lock);
- return obd;
- }
- }
- read_unlock(&obd_dev_lock);
-
- return NULL;
-}
-EXPORT_SYMBOL(class_devices_in_group);
-
-/**
- * to notify sptlrpc log for \a fsname has changed, let every relevant OBD
- * adjust sptlrpc settings accordingly.
- */
-int class_notify_sptlrpc_conf(const char *fsname, int namelen)
-{
- struct obd_device *obd;
- const char *type;
- int i, rc = 0, rc2;
-
- LASSERT(namelen > 0);
-
- read_lock(&obd_dev_lock);
- for (i = 0; i < class_devno_max(); i++) {
- obd = class_num2obd(i);
-
- if (!obd || obd->obd_set_up == 0 || obd->obd_stopping)
- continue;
-
- /* only notify mdc, osc, mdt, ost */
- type = obd->obd_type->typ_name;
- if (strcmp(type, LUSTRE_MDC_NAME) != 0 &&
- strcmp(type, LUSTRE_OSC_NAME) != 0 &&
- strcmp(type, LUSTRE_MDT_NAME) != 0 &&
- strcmp(type, LUSTRE_OST_NAME) != 0)
- continue;
-
- if (strncmp(obd->obd_name, fsname, namelen))
- continue;
-
- class_incref(obd, __func__, obd);
- read_unlock(&obd_dev_lock);
- rc2 = obd_set_info_async(NULL, obd->obd_self_export,
- sizeof(KEY_SPTLRPC_CONF),
- KEY_SPTLRPC_CONF, 0, NULL, NULL);
- rc = rc ? rc : rc2;
- class_decref(obd, __func__, obd);
- read_lock(&obd_dev_lock);
- }
- read_unlock(&obd_dev_lock);
- return rc;
-}
-EXPORT_SYMBOL(class_notify_sptlrpc_conf);
-
-void obd_cleanup_caches(void)
-{
- kmem_cache_destroy(obd_device_cachep);
- obd_device_cachep = NULL;
- kmem_cache_destroy(obdo_cachep);
- obdo_cachep = NULL;
- kmem_cache_destroy(import_cachep);
- import_cachep = NULL;
-}
-
-int obd_init_caches(void)
-{
- LASSERT(!obd_device_cachep);
- obd_device_cachep = kmem_cache_create("ll_obd_dev_cache",
- sizeof(struct obd_device),
- 0, 0, NULL);
- if (!obd_device_cachep)
- goto out;
-
- LASSERT(!obdo_cachep);
- obdo_cachep = kmem_cache_create("ll_obdo_cache", sizeof(struct obdo),
- 0, 0, NULL);
- if (!obdo_cachep)
- goto out;
-
- LASSERT(!import_cachep);
- import_cachep = kmem_cache_create("ll_import_cache",
- sizeof(struct obd_import),
- 0, 0, NULL);
- if (!import_cachep)
- goto out;
-
- return 0;
- out:
- obd_cleanup_caches();
- return -ENOMEM;
-
-}
-
-/* map connection to client */
-struct obd_export *class_conn2export(struct lustre_handle *conn)
-{
- struct obd_export *export;
-
- if (!conn) {
- CDEBUG(D_CACHE, "looking for null handle\n");
- return NULL;
- }
-
- if (conn->cookie == -1) { /* this means assign a new connection */
- CDEBUG(D_CACHE, "want a new connection\n");
- return NULL;
- }
-
- CDEBUG(D_INFO, "looking for export cookie %#llx\n", conn->cookie);
- export = class_handle2object(conn->cookie);
- return export;
-}
-EXPORT_SYMBOL(class_conn2export);
-
-struct obd_device *class_exp2obd(struct obd_export *exp)
-{
- if (exp)
- return exp->exp_obd;
- return NULL;
-}
-EXPORT_SYMBOL(class_exp2obd);
-
-struct obd_import *class_exp2cliimp(struct obd_export *exp)
-{
- struct obd_device *obd = exp->exp_obd;
-
- if (!obd)
- return NULL;
- return obd->u.cli.cl_import;
-}
-EXPORT_SYMBOL(class_exp2cliimp);
-
-/* Export management functions */
-static void class_export_destroy(struct obd_export *exp)
-{
- struct obd_device *obd = exp->exp_obd;
-
- LASSERT_ATOMIC_ZERO(&exp->exp_refcount);
- LASSERT(obd != NULL);
-
- CDEBUG(D_IOCTL, "destroying export %p/%s for %s\n", exp,
- exp->exp_client_uuid.uuid, obd->obd_name);
-
- /* "Local" exports (lctl, LOV->{mdc,osc}) have no connection. */
- if (exp->exp_connection)
- ptlrpc_put_connection_superhack(exp->exp_connection);
-
- LASSERT(list_empty(&exp->exp_outstanding_replies));
- LASSERT(list_empty(&exp->exp_uncommitted_replies));
- LASSERT(list_empty(&exp->exp_req_replay_queue));
- LASSERT(list_empty(&exp->exp_hp_rpcs));
- obd_destroy_export(exp);
- class_decref(obd, "export", exp);
-
- OBD_FREE_RCU(exp, sizeof(*exp), &exp->exp_handle);
-}
-
-static void export_handle_addref(void *export)
-{
- class_export_get(export);
-}
-
-static struct portals_handle_ops export_handle_ops = {
- .hop_addref = export_handle_addref,
- .hop_free = NULL,
-};
-
-struct obd_export *class_export_get(struct obd_export *exp)
-{
- atomic_inc(&exp->exp_refcount);
- CDEBUG(D_INFO, "GETting export %p : new refcount %d\n", exp,
- atomic_read(&exp->exp_refcount));
- return exp;
-}
-EXPORT_SYMBOL(class_export_get);
-
-void class_export_put(struct obd_export *exp)
-{
- LASSERT(exp != NULL);
- LASSERT_ATOMIC_GT_LT(&exp->exp_refcount, 0, LI_POISON);
- CDEBUG(D_INFO, "PUTting export %p : new refcount %d\n", exp,
- atomic_read(&exp->exp_refcount) - 1);
-
- if (atomic_dec_and_test(&exp->exp_refcount)) {
- LASSERT(!list_empty(&exp->exp_obd_chain));
- CDEBUG(D_IOCTL, "final put %p/%s\n",
- exp, exp->exp_client_uuid.uuid);
-
- /* release nid stat refererence */
- lprocfs_exp_cleanup(exp);
-
- obd_zombie_export_add(exp);
- }
-}
-EXPORT_SYMBOL(class_export_put);
-
-/* Creates a new export, adds it to the hash table, and returns a
- * pointer to it. The refcount is 2: one for the hash reference, and
- * one for the pointer returned by this function. */
-struct obd_export *class_new_export(struct obd_device *obd,
- struct obd_uuid *cluuid)
-{
- struct obd_export *export;
- struct cfs_hash *hash = NULL;
- int rc = 0;
-
- export = kzalloc(sizeof(*export), GFP_NOFS);
- if (!export)
- return ERR_PTR(-ENOMEM);
-
- export->exp_conn_cnt = 0;
- export->exp_lock_hash = NULL;
- export->exp_flock_hash = NULL;
- atomic_set(&export->exp_refcount, 2);
- atomic_set(&export->exp_rpc_count, 0);
- atomic_set(&export->exp_cb_count, 0);
- atomic_set(&export->exp_locks_count, 0);
-#if LUSTRE_TRACKS_LOCK_EXP_REFS
- INIT_LIST_HEAD(&export->exp_locks_list);
- spin_lock_init(&export->exp_locks_list_guard);
-#endif
- atomic_set(&export->exp_replay_count, 0);
- export->exp_obd = obd;
- INIT_LIST_HEAD(&export->exp_outstanding_replies);
- spin_lock_init(&export->exp_uncommitted_replies_lock);
- INIT_LIST_HEAD(&export->exp_uncommitted_replies);
- INIT_LIST_HEAD(&export->exp_req_replay_queue);
- INIT_LIST_HEAD(&export->exp_handle.h_link);
- INIT_LIST_HEAD(&export->exp_hp_rpcs);
- class_handle_hash(&export->exp_handle, &export_handle_ops);
- spin_lock_init(&export->exp_lock);
- spin_lock_init(&export->exp_rpc_lock);
- INIT_HLIST_NODE(&export->exp_uuid_hash);
- INIT_HLIST_NODE(&export->exp_nid_hash);
- spin_lock_init(&export->exp_bl_list_lock);
- INIT_LIST_HEAD(&export->exp_bl_list);
-
- export->exp_sp_peer = LUSTRE_SP_ANY;
- export->exp_flvr.sf_rpc = SPTLRPC_FLVR_INVALID;
- export->exp_client_uuid = *cluuid;
- obd_init_export(export);
-
- spin_lock(&obd->obd_dev_lock);
- /* shouldn't happen, but might race */
- if (obd->obd_stopping) {
- rc = -ENODEV;
- goto exit_unlock;
- }
-
- hash = cfs_hash_getref(obd->obd_uuid_hash);
- if (!hash) {
- rc = -ENODEV;
- goto exit_unlock;
- }
- spin_unlock(&obd->obd_dev_lock);
-
- if (!obd_uuid_equals(cluuid, &obd->obd_uuid)) {
- rc = cfs_hash_add_unique(hash, cluuid, &export->exp_uuid_hash);
- if (rc != 0) {
- LCONSOLE_WARN("%s: denying duplicate export for %s, %d\n",
- obd->obd_name, cluuid->uuid, rc);
- rc = -EALREADY;
- goto exit_err;
- }
- }
-
- spin_lock(&obd->obd_dev_lock);
- if (obd->obd_stopping) {
- cfs_hash_del(hash, cluuid, &export->exp_uuid_hash);
- rc = -ENODEV;
- goto exit_unlock;
- }
-
- class_incref(obd, "export", export);
- list_add(&export->exp_obd_chain, &export->exp_obd->obd_exports);
- export->exp_obd->obd_num_exports++;
- spin_unlock(&obd->obd_dev_lock);
- cfs_hash_putref(hash);
- return export;
-
-exit_unlock:
- spin_unlock(&obd->obd_dev_lock);
-exit_err:
- if (hash)
- cfs_hash_putref(hash);
- class_handle_unhash(&export->exp_handle);
- LASSERT(hlist_unhashed(&export->exp_uuid_hash));
- obd_destroy_export(export);
- kfree(export);
- return ERR_PTR(rc);
-}
-EXPORT_SYMBOL(class_new_export);
-
-void class_unlink_export(struct obd_export *exp)
-{
- class_handle_unhash(&exp->exp_handle);
-
- spin_lock(&exp->exp_obd->obd_dev_lock);
- /* delete an uuid-export hashitem from hashtables */
- if (!hlist_unhashed(&exp->exp_uuid_hash))
- cfs_hash_del(exp->exp_obd->obd_uuid_hash,
- &exp->exp_client_uuid,
- &exp->exp_uuid_hash);
-
- list_move(&exp->exp_obd_chain, &exp->exp_obd->obd_unlinked_exports);
- exp->exp_obd->obd_num_exports--;
- spin_unlock(&exp->exp_obd->obd_dev_lock);
- class_export_put(exp);
-}
-EXPORT_SYMBOL(class_unlink_export);
-
-/* Import management functions */
-static void class_import_destroy(struct obd_import *imp)
-{
- CDEBUG(D_IOCTL, "destroying import %p for %s\n", imp,
- imp->imp_obd->obd_name);
-
- LASSERT_ATOMIC_ZERO(&imp->imp_refcount);
-
- ptlrpc_put_connection_superhack(imp->imp_connection);
-
- while (!list_empty(&imp->imp_conn_list)) {
- struct obd_import_conn *imp_conn;
-
- imp_conn = list_entry(imp->imp_conn_list.next,
- struct obd_import_conn, oic_item);
- list_del_init(&imp_conn->oic_item);
- ptlrpc_put_connection_superhack(imp_conn->oic_conn);
- kfree(imp_conn);
- }
-
- LASSERT(!imp->imp_sec);
- class_decref(imp->imp_obd, "import", imp);
- OBD_FREE_RCU(imp, sizeof(*imp), &imp->imp_handle);
-}
-
-static void import_handle_addref(void *import)
-{
- class_import_get(import);
-}
-
-static struct portals_handle_ops import_handle_ops = {
- .hop_addref = import_handle_addref,
- .hop_free = NULL,
-};
-
-struct obd_import *class_import_get(struct obd_import *import)
-{
- atomic_inc(&import->imp_refcount);
- CDEBUG(D_INFO, "import %p refcount=%d obd=%s\n", import,
- atomic_read(&import->imp_refcount),
- import->imp_obd->obd_name);
- return import;
-}
-EXPORT_SYMBOL(class_import_get);
-
-void class_import_put(struct obd_import *imp)
-{
- LASSERT(list_empty(&imp->imp_zombie_chain));
- LASSERT_ATOMIC_GT_LT(&imp->imp_refcount, 0, LI_POISON);
-
- CDEBUG(D_INFO, "import %p refcount=%d obd=%s\n", imp,
- atomic_read(&imp->imp_refcount) - 1,
- imp->imp_obd->obd_name);
-
- if (atomic_dec_and_test(&imp->imp_refcount)) {
- CDEBUG(D_INFO, "final put import %p\n", imp);
- obd_zombie_import_add(imp);
- }
-
- /* catch possible import put race */
- LASSERT_ATOMIC_GE_LT(&imp->imp_refcount, 0, LI_POISON);
-}
-EXPORT_SYMBOL(class_import_put);
-
-static void init_imp_at(struct imp_at *at)
-{
- int i;
-
- at_init(&at->iat_net_latency, 0, 0);
- for (i = 0; i < IMP_AT_MAX_PORTALS; i++) {
- /* max service estimates are tracked on the server side, so
- don't use the AT history here, just use the last reported
- val. (But keep hist for proc histogram, worst_ever) */
- at_init(&at->iat_service_estimate[i], INITIAL_CONNECT_TIMEOUT,
- AT_FLG_NOHIST);
- }
-}
-
-struct obd_import *class_new_import(struct obd_device *obd)
-{
- struct obd_import *imp;
-
- imp = kzalloc(sizeof(*imp), GFP_NOFS);
- if (!imp)
- return NULL;
-
- INIT_LIST_HEAD(&imp->imp_pinger_chain);
- INIT_LIST_HEAD(&imp->imp_zombie_chain);
- INIT_LIST_HEAD(&imp->imp_replay_list);
- INIT_LIST_HEAD(&imp->imp_sending_list);
- INIT_LIST_HEAD(&imp->imp_delayed_list);
- INIT_LIST_HEAD(&imp->imp_committed_list);
- imp->imp_replay_cursor = &imp->imp_committed_list;
- spin_lock_init(&imp->imp_lock);
- imp->imp_last_success_conn = 0;
- imp->imp_state = LUSTRE_IMP_NEW;
- imp->imp_obd = class_incref(obd, "import", imp);
- mutex_init(&imp->imp_sec_mutex);
- init_waitqueue_head(&imp->imp_recovery_waitq);
-
- atomic_set(&imp->imp_refcount, 2);
- atomic_set(&imp->imp_unregistering, 0);
- atomic_set(&imp->imp_inflight, 0);
- atomic_set(&imp->imp_replay_inflight, 0);
- atomic_set(&imp->imp_inval_count, 0);
- INIT_LIST_HEAD(&imp->imp_conn_list);
- INIT_LIST_HEAD(&imp->imp_handle.h_link);
- class_handle_hash(&imp->imp_handle, &import_handle_ops);
- init_imp_at(&imp->imp_at);
-
- /* the default magic is V2, will be used in connect RPC, and
- * then adjusted according to the flags in request/reply. */
- imp->imp_msg_magic = LUSTRE_MSG_MAGIC_V2;
-
- return imp;
-}
-EXPORT_SYMBOL(class_new_import);
-
-void class_destroy_import(struct obd_import *import)
-{
- LASSERT(import != NULL);
- LASSERT(import != LP_POISON);
-
- class_handle_unhash(&import->imp_handle);
-
- spin_lock(&import->imp_lock);
- import->imp_generation++;
- spin_unlock(&import->imp_lock);
- class_import_put(import);
-}
-EXPORT_SYMBOL(class_destroy_import);
-
-#if LUSTRE_TRACKS_LOCK_EXP_REFS
-
-void __class_export_add_lock_ref(struct obd_export *exp, struct ldlm_lock *lock)
-{
- spin_lock(&exp->exp_locks_list_guard);
-
- LASSERT(lock->l_exp_refs_nr >= 0);
-
- if (lock->l_exp_refs_target != NULL &&
- lock->l_exp_refs_target != exp) {
- LCONSOLE_WARN("setting export %p for lock %p which already has export %p\n",
- exp, lock, lock->l_exp_refs_target);
- }
- if ((lock->l_exp_refs_nr++) == 0) {
- list_add(&lock->l_exp_refs_link, &exp->exp_locks_list);
- lock->l_exp_refs_target = exp;
- }
- CDEBUG(D_INFO, "lock = %p, export = %p, refs = %u\n",
- lock, exp, lock->l_exp_refs_nr);
- spin_unlock(&exp->exp_locks_list_guard);
-}
-EXPORT_SYMBOL(__class_export_add_lock_ref);
-
-void __class_export_del_lock_ref(struct obd_export *exp, struct ldlm_lock *lock)
-{
- spin_lock(&exp->exp_locks_list_guard);
- LASSERT(lock->l_exp_refs_nr > 0);
- if (lock->l_exp_refs_target != exp) {
- LCONSOLE_WARN("lock %p, mismatching export pointers: %p, %p\n",
- lock, lock->l_exp_refs_target, exp);
- }
- if (-- lock->l_exp_refs_nr == 0) {
- list_del_init(&lock->l_exp_refs_link);
- lock->l_exp_refs_target = NULL;
- }
- CDEBUG(D_INFO, "lock = %p, export = %p, refs = %u\n",
- lock, exp, lock->l_exp_refs_nr);
- spin_unlock(&exp->exp_locks_list_guard);
-}
-EXPORT_SYMBOL(__class_export_del_lock_ref);
-#endif
-
-/* A connection defines an export context in which preallocation can
- be managed. This releases the export pointer reference, and returns
- the export handle, so the export refcount is 1 when this function
- returns. */
-int class_connect(struct lustre_handle *conn, struct obd_device *obd,
- struct obd_uuid *cluuid)
-{
- struct obd_export *export;
-
- LASSERT(conn != NULL);
- LASSERT(obd != NULL);
- LASSERT(cluuid != NULL);
-
- export = class_new_export(obd, cluuid);
- if (IS_ERR(export))
- return PTR_ERR(export);
-
- conn->cookie = export->exp_handle.h_cookie;
- class_export_put(export);
-
- CDEBUG(D_IOCTL, "connect: client %s, cookie %#llx\n",
- cluuid->uuid, conn->cookie);
- return 0;
-}
-EXPORT_SYMBOL(class_connect);
-
-/* if export is involved in recovery then clean up related things */
-static void class_export_recovery_cleanup(struct obd_export *exp)
-{
- struct obd_device *obd = exp->exp_obd;
-
- spin_lock(&obd->obd_recovery_task_lock);
- if (exp->exp_delayed)
- obd->obd_delayed_clients--;
- if (obd->obd_recovering) {
- if (exp->exp_in_recovery) {
- spin_lock(&exp->exp_lock);
- exp->exp_in_recovery = 0;
- spin_unlock(&exp->exp_lock);
- LASSERT_ATOMIC_POS(&obd->obd_connected_clients);
- atomic_dec(&obd->obd_connected_clients);
- }
-
- /* if called during recovery then should update
- * obd_stale_clients counter,
- * lightweight exports are not counted */
- if (exp->exp_failed &&
- (exp_connect_flags(exp) & OBD_CONNECT_LIGHTWEIGHT) == 0)
- exp->exp_obd->obd_stale_clients++;
- }
- spin_unlock(&obd->obd_recovery_task_lock);
-
- spin_lock(&exp->exp_lock);
- /** Cleanup req replay fields */
- if (exp->exp_req_replay_needed) {
- exp->exp_req_replay_needed = 0;
-
- LASSERT(atomic_read(&obd->obd_req_replay_clients));
- atomic_dec(&obd->obd_req_replay_clients);
- }
-
- /** Cleanup lock replay data */
- if (exp->exp_lock_replay_needed) {
- exp->exp_lock_replay_needed = 0;
-
- LASSERT(atomic_read(&obd->obd_lock_replay_clients));
- atomic_dec(&obd->obd_lock_replay_clients);
- }
- spin_unlock(&exp->exp_lock);
-}
-
-/* This function removes 1-3 references from the export:
- * 1 - for export pointer passed
- * and if disconnect really need
- * 2 - removing from hash
- * 3 - in client_unlink_export
- * The export pointer passed to this function can destroyed */
-int class_disconnect(struct obd_export *export)
-{
- int already_disconnected;
-
- if (!export) {
- CWARN("attempting to free NULL export %p\n", export);
- return -EINVAL;
- }
-
- spin_lock(&export->exp_lock);
- already_disconnected = export->exp_disconnected;
- export->exp_disconnected = 1;
- spin_unlock(&export->exp_lock);
-
- /* class_cleanup(), abort_recovery(), and class_fail_export()
- * all end up in here, and if any of them race we shouldn't
- * call extra class_export_puts(). */
- if (already_disconnected) {
- LASSERT(hlist_unhashed(&export->exp_nid_hash));
- goto no_disconn;
- }
-
- CDEBUG(D_IOCTL, "disconnect: cookie %#llx\n",
- export->exp_handle.h_cookie);
-
- if (!hlist_unhashed(&export->exp_nid_hash))
- cfs_hash_del(export->exp_obd->obd_nid_hash,
- &export->exp_connection->c_peer.nid,
- &export->exp_nid_hash);
-
- class_export_recovery_cleanup(export);
- class_unlink_export(export);
-no_disconn:
- class_export_put(export);
- return 0;
-}
-EXPORT_SYMBOL(class_disconnect);
-
-void class_fail_export(struct obd_export *exp)
-{
- int rc, already_failed;
-
- spin_lock(&exp->exp_lock);
- already_failed = exp->exp_failed;
- exp->exp_failed = 1;
- spin_unlock(&exp->exp_lock);
-
- if (already_failed) {
- CDEBUG(D_HA, "disconnecting dead export %p/%s; skipping\n",
- exp, exp->exp_client_uuid.uuid);
- return;
- }
-
- CDEBUG(D_HA, "disconnecting export %p/%s\n",
- exp, exp->exp_client_uuid.uuid);
-
- if (obd_dump_on_timeout)
- libcfs_debug_dumplog();
-
- /* need for safe call CDEBUG after obd_disconnect */
- class_export_get(exp);
-
- /* Most callers into obd_disconnect are removing their own reference
- * (request, for example) in addition to the one from the hash table.
- * We don't have such a reference here, so make one. */
- class_export_get(exp);
- rc = obd_disconnect(exp);
- if (rc)
- CERROR("disconnecting export %p failed: %d\n", exp, rc);
- else
- CDEBUG(D_HA, "disconnected export %p/%s\n",
- exp, exp->exp_client_uuid.uuid);
- class_export_put(exp);
-}
-EXPORT_SYMBOL(class_fail_export);
-
-char *obd_export_nid2str(struct obd_export *exp)
-{
- if (exp->exp_connection != NULL)
- return libcfs_nid2str(exp->exp_connection->c_peer.nid);
-
- return "(no nid)";
-}
-EXPORT_SYMBOL(obd_export_nid2str);
-
-#if LUSTRE_TRACKS_LOCK_EXP_REFS
-void (*class_export_dump_hook)(struct obd_export *) = NULL;
-EXPORT_SYMBOL(class_export_dump_hook);
-#endif
-
-static void print_export_data(struct obd_export *exp, const char *status,
- int locks)
-{
- struct ptlrpc_reply_state *rs;
- struct ptlrpc_reply_state *first_reply = NULL;
- int nreplies = 0;
-
- spin_lock(&exp->exp_lock);
- list_for_each_entry(rs, &exp->exp_outstanding_replies,
- rs_exp_list) {
- if (nreplies == 0)
- first_reply = rs;
- nreplies++;
- }
- spin_unlock(&exp->exp_lock);
-
- CDEBUG(D_HA, "%s: %s %p %s %s %d (%d %d %d) %d %d %d %d: %p %s %llu\n",
- exp->exp_obd->obd_name, status, exp, exp->exp_client_uuid.uuid,
- obd_export_nid2str(exp), atomic_read(&exp->exp_refcount),
- atomic_read(&exp->exp_rpc_count),
- atomic_read(&exp->exp_cb_count),
- atomic_read(&exp->exp_locks_count),
- exp->exp_disconnected, exp->exp_delayed, exp->exp_failed,
- nreplies, first_reply, nreplies > 3 ? "..." : "",
- exp->exp_last_committed);
-#if LUSTRE_TRACKS_LOCK_EXP_REFS
- if (locks && class_export_dump_hook != NULL)
- class_export_dump_hook(exp);
-#endif
-}
-
-void dump_exports(struct obd_device *obd, int locks)
-{
- struct obd_export *exp;
-
- spin_lock(&obd->obd_dev_lock);
- list_for_each_entry(exp, &obd->obd_exports, exp_obd_chain)
- print_export_data(exp, "ACTIVE", locks);
- list_for_each_entry(exp, &obd->obd_unlinked_exports, exp_obd_chain)
- print_export_data(exp, "UNLINKED", locks);
- list_for_each_entry(exp, &obd->obd_delayed_exports, exp_obd_chain)
- print_export_data(exp, "DELAYED", locks);
- spin_unlock(&obd->obd_dev_lock);
- spin_lock(&obd_zombie_impexp_lock);
- list_for_each_entry(exp, &obd_zombie_exports, exp_obd_chain)
- print_export_data(exp, "ZOMBIE", locks);
- spin_unlock(&obd_zombie_impexp_lock);
-}
-EXPORT_SYMBOL(dump_exports);
-
-/* Total amount of zombies to be destroyed */
-static int zombies_count;
-
-/**
- * kill zombie imports and exports
- */
-void obd_zombie_impexp_cull(void)
-{
- struct obd_import *import;
- struct obd_export *export;
-
- do {
- spin_lock(&obd_zombie_impexp_lock);
-
- import = NULL;
- if (!list_empty(&obd_zombie_imports)) {
- import = list_entry(obd_zombie_imports.next,
- struct obd_import,
- imp_zombie_chain);
- list_del_init(&import->imp_zombie_chain);
- }
-
- export = NULL;
- if (!list_empty(&obd_zombie_exports)) {
- export = list_entry(obd_zombie_exports.next,
- struct obd_export,
- exp_obd_chain);
- list_del_init(&export->exp_obd_chain);
- }
-
- spin_unlock(&obd_zombie_impexp_lock);
-
- if (import != NULL) {
- class_import_destroy(import);
- spin_lock(&obd_zombie_impexp_lock);
- zombies_count--;
- spin_unlock(&obd_zombie_impexp_lock);
- }
-
- if (export != NULL) {
- class_export_destroy(export);
- spin_lock(&obd_zombie_impexp_lock);
- zombies_count--;
- spin_unlock(&obd_zombie_impexp_lock);
- }
-
- cond_resched();
- } while (import != NULL || export != NULL);
-}
-
-static struct completion obd_zombie_start;
-static struct completion obd_zombie_stop;
-static unsigned long obd_zombie_flags;
-static wait_queue_head_t obd_zombie_waitq;
-static pid_t obd_zombie_pid;
-
-enum {
- OBD_ZOMBIE_STOP = 0x0001,
-};
-
-/**
- * check for work for kill zombie import/export thread.
- */
-static int obd_zombie_impexp_check(void *arg)
-{
- int rc;
-
- spin_lock(&obd_zombie_impexp_lock);
- rc = (zombies_count == 0) &&
- !test_bit(OBD_ZOMBIE_STOP, &obd_zombie_flags);
- spin_unlock(&obd_zombie_impexp_lock);
-
- return rc;
-}
-
-/**
- * Add export to the obd_zombie thread and notify it.
- */
-static void obd_zombie_export_add(struct obd_export *exp)
-{
- spin_lock(&exp->exp_obd->obd_dev_lock);
- LASSERT(!list_empty(&exp->exp_obd_chain));
- list_del_init(&exp->exp_obd_chain);
- spin_unlock(&exp->exp_obd->obd_dev_lock);
- spin_lock(&obd_zombie_impexp_lock);
- zombies_count++;
- list_add(&exp->exp_obd_chain, &obd_zombie_exports);
- spin_unlock(&obd_zombie_impexp_lock);
-
- obd_zombie_impexp_notify();
-}
-
-/**
- * Add import to the obd_zombie thread and notify it.
- */
-static void obd_zombie_import_add(struct obd_import *imp)
-{
- LASSERT(!imp->imp_sec);
- spin_lock(&obd_zombie_impexp_lock);
- LASSERT(list_empty(&imp->imp_zombie_chain));
- zombies_count++;
- list_add(&imp->imp_zombie_chain, &obd_zombie_imports);
- spin_unlock(&obd_zombie_impexp_lock);
-
- obd_zombie_impexp_notify();
-}
-
-/**
- * notify import/export destroy thread about new zombie.
- */
-static void obd_zombie_impexp_notify(void)
-{
- /*
- * Make sure obd_zombie_impexp_thread get this notification.
- * It is possible this signal only get by obd_zombie_barrier, and
- * barrier gulps this notification and sleeps away and hangs ensues
- */
- wake_up_all(&obd_zombie_waitq);
-}
-
-/**
- * check whether obd_zombie is idle
- */
-static int obd_zombie_is_idle(void)
-{
- int rc;
-
- LASSERT(!test_bit(OBD_ZOMBIE_STOP, &obd_zombie_flags));
- spin_lock(&obd_zombie_impexp_lock);
- rc = (zombies_count == 0);
- spin_unlock(&obd_zombie_impexp_lock);
- return rc;
-}
-
-/**
- * wait when obd_zombie import/export queues become empty
- */
-void obd_zombie_barrier(void)
-{
- struct l_wait_info lwi = { 0 };
-
- if (obd_zombie_pid == current_pid())
- /* don't wait for myself */
- return;
- l_wait_event(obd_zombie_waitq, obd_zombie_is_idle(), &lwi);
-}
-EXPORT_SYMBOL(obd_zombie_barrier);
-
-
-/**
- * destroy zombie export/import thread.
- */
-static int obd_zombie_impexp_thread(void *unused)
-{
- unshare_fs_struct();
- complete(&obd_zombie_start);
-
- obd_zombie_pid = current_pid();
-
- while (!test_bit(OBD_ZOMBIE_STOP, &obd_zombie_flags)) {
- struct l_wait_info lwi = { 0 };
-
- l_wait_event(obd_zombie_waitq,
- !obd_zombie_impexp_check(NULL), &lwi);
- obd_zombie_impexp_cull();
-
- /*
- * Notify obd_zombie_barrier callers that queues
- * may be empty.
- */
- wake_up(&obd_zombie_waitq);
- }
-
- complete(&obd_zombie_stop);
-
- return 0;
-}
-
-
-/**
- * start destroy zombie import/export thread
- */
-int obd_zombie_impexp_init(void)
-{
- struct task_struct *task;
-
- INIT_LIST_HEAD(&obd_zombie_imports);
- INIT_LIST_HEAD(&obd_zombie_exports);
- spin_lock_init(&obd_zombie_impexp_lock);
- init_completion(&obd_zombie_start);
- init_completion(&obd_zombie_stop);
- init_waitqueue_head(&obd_zombie_waitq);
- obd_zombie_pid = 0;
-
- task = kthread_run(obd_zombie_impexp_thread, NULL, "obd_zombid");
- if (IS_ERR(task))
- return PTR_ERR(task);
-
- wait_for_completion(&obd_zombie_start);
- return 0;
-}
-/**
- * stop destroy zombie import/export thread
- */
-void obd_zombie_impexp_stop(void)
-{
- set_bit(OBD_ZOMBIE_STOP, &obd_zombie_flags);
- obd_zombie_impexp_notify();
- wait_for_completion(&obd_zombie_stop);
-}
diff --git a/drivers/staging/lustre/lustre/obdclass/linux/linux-module.c b/drivers/staging/lustre/lustre/obdclass/linux/linux-module.c
deleted file mode 100644
index 245ac2e9b2dd..000000000000
--- a/drivers/staging/lustre/lustre/obdclass/linux/linux-module.c
+++ /dev/null
@@ -1,468 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * lustre/obdclass/linux/linux-module.c
- *
- * Object Devices Class Driver
- * These are the only exported functions, they provide some generic
- * infrastructure for managing object devices
- */
-
-#define DEBUG_SUBSYSTEM S_CLASS
-
-#include <linux/module.h>
-#include <linux/errno.h>
-#include <linux/kernel.h>
-#include <linux/major.h>
-#include <linux/sched.h>
-#include <linux/lp.h>
-#include <linux/slab.h>
-#include <linux/ioport.h>
-#include <linux/fcntl.h>
-#include <linux/delay.h>
-#include <linux/skbuff.h>
-#include <linux/fs.h>
-#include <linux/poll.h>
-#include <linux/list.h>
-#include <linux/highmem.h>
-#include <linux/io.h>
-#include <asm/ioctls.h>
-#include <linux/poll.h>
-#include <linux/uaccess.h>
-#include <linux/miscdevice.h>
-#include <linux/seq_file.h>
-#include <linux/kobject.h>
-
-#include "../../../include/linux/libcfs/libcfs.h"
-#include "../../../include/linux/lnet/lnetctl.h"
-#include "../../include/obd_support.h"
-#include "../../include/obd_class.h"
-#include "../../include/lprocfs_status.h"
-#include "../../include/lustre_ver.h"
-#include "../../include/lustre/lustre_build_version.h"
-
-/* buffer MUST be at least the size of obd_ioctl_hdr */
-int obd_ioctl_getdata(char **buf, int *len, void *arg)
-{
- struct obd_ioctl_hdr hdr;
- struct obd_ioctl_data *data;
- int err;
- int offset = 0;
-
- if (copy_from_user(&hdr, (void *)arg, sizeof(hdr)))
- return -EFAULT;
-
- if (hdr.ioc_version != OBD_IOCTL_VERSION) {
- CERROR("Version mismatch kernel (%x) vs application (%x)\n",
- OBD_IOCTL_VERSION, hdr.ioc_version);
- return -EINVAL;
- }
-
- if (hdr.ioc_len > OBD_MAX_IOCTL_BUFFER) {
- CERROR("User buffer len %d exceeds %d max buffer\n",
- hdr.ioc_len, OBD_MAX_IOCTL_BUFFER);
- return -EINVAL;
- }
-
- if (hdr.ioc_len < sizeof(struct obd_ioctl_data)) {
- CERROR("User buffer too small for ioctl (%d)\n", hdr.ioc_len);
- return -EINVAL;
- }
-
- /* When there are lots of processes calling vmalloc on multi-core
- * system, the high lock contention will hurt performance badly,
- * obdfilter-survey is an example, which relies on ioctl. So we'd
- * better avoid vmalloc on ioctl path. LU-66 */
- *buf = libcfs_kvzalloc(hdr.ioc_len, GFP_NOFS);
- if (*buf == NULL) {
- CERROR("Cannot allocate control buffer of len %d\n",
- hdr.ioc_len);
- return -EINVAL;
- }
- *len = hdr.ioc_len;
- data = (struct obd_ioctl_data *)*buf;
-
- if (copy_from_user(*buf, (void *)arg, hdr.ioc_len)) {
- err = -EFAULT;
- goto free_buf;
- }
- if (hdr.ioc_len != data->ioc_len) {
- err = -EINVAL;
- goto free_buf;
- }
-
- if (obd_ioctl_is_invalid(data)) {
- CERROR("ioctl not correctly formatted\n");
- err = -EINVAL;
- goto free_buf;
- }
-
- if (data->ioc_inllen1) {
- data->ioc_inlbuf1 = &data->ioc_bulk[0];
- offset += cfs_size_round(data->ioc_inllen1);
- }
-
- if (data->ioc_inllen2) {
- data->ioc_inlbuf2 = &data->ioc_bulk[0] + offset;
- offset += cfs_size_round(data->ioc_inllen2);
- }
-
- if (data->ioc_inllen3) {
- data->ioc_inlbuf3 = &data->ioc_bulk[0] + offset;
- offset += cfs_size_round(data->ioc_inllen3);
- }
-
- if (data->ioc_inllen4) {
- data->ioc_inlbuf4 = &data->ioc_bulk[0] + offset;
- }
-
- return 0;
-
-free_buf:
- kvfree(*buf);
- return err;
-}
-EXPORT_SYMBOL(obd_ioctl_getdata);
-
-int obd_ioctl_popdata(void *arg, void *data, int len)
-{
- int err;
-
- err = copy_to_user(arg, data, len);
- if (err)
- err = -EFAULT;
- return err;
-}
-EXPORT_SYMBOL(obd_ioctl_popdata);
-
-/* opening /dev/obd */
-static int obd_class_open(struct inode *inode, struct file *file)
-{
- try_module_get(THIS_MODULE);
- return 0;
-}
-
-/* closing /dev/obd */
-static int obd_class_release(struct inode *inode, struct file *file)
-{
- module_put(THIS_MODULE);
- return 0;
-}
-
-/* to control /dev/obd */
-static long obd_class_ioctl(struct file *filp, unsigned int cmd,
- unsigned long arg)
-{
- int err = 0;
-
- /* Allow non-root access for OBD_IOC_PING_TARGET - used by lfs check */
- if (!capable(CFS_CAP_SYS_ADMIN) && (cmd != OBD_IOC_PING_TARGET))
- return err = -EACCES;
- if ((cmd & 0xffffff00) == ((int)'T') << 8) /* ignore all tty ioctls */
- return err = -ENOTTY;
-
- err = class_handle_ioctl(cmd, (unsigned long)arg);
-
- return err;
-}
-
-/* declare character device */
-static struct file_operations obd_psdev_fops = {
- .owner = THIS_MODULE,
- .unlocked_ioctl = obd_class_ioctl, /* unlocked_ioctl */
- .open = obd_class_open, /* open */
- .release = obd_class_release, /* release */
-};
-
-/* modules setup */
-struct miscdevice obd_psdev = {
- .minor = OBD_DEV_MINOR,
- .name = OBD_DEV_NAME,
- .fops = &obd_psdev_fops,
-};
-
-
-static ssize_t version_show(struct kobject *kobj, struct attribute *attr,
- char *buf)
-{
- return sprintf(buf, "%s\n", LUSTRE_VERSION_STRING);
-}
-
-static ssize_t pinger_show(struct kobject *kobj, struct attribute *attr,
- char *buf)
-{
- return sprintf(buf, "%s\n", "on");
-}
-
-static ssize_t health_show(struct kobject *kobj, struct attribute *attr,
- char *buf)
-{
- bool healthy = true;
- int i;
- size_t len = 0;
-
- if (libcfs_catastrophe)
- return sprintf(buf, "LBUG\n");
-
- read_lock(&obd_dev_lock);
- for (i = 0; i < class_devno_max(); i++) {
- struct obd_device *obd;
-
- obd = class_num2obd(i);
- if (obd == NULL || !obd->obd_attached || !obd->obd_set_up)
- continue;
-
- LASSERT(obd->obd_magic == OBD_DEVICE_MAGIC);
- if (obd->obd_stopping)
- continue;
-
- class_incref(obd, __func__, current);
- read_unlock(&obd_dev_lock);
-
- if (obd_health_check(NULL, obd)) {
- healthy = false;
- }
- class_decref(obd, __func__, current);
- read_lock(&obd_dev_lock);
- }
- read_unlock(&obd_dev_lock);
-
- if (healthy)
- len = sprintf(buf, "healthy\n");
- else
- len = sprintf(buf, "NOT HEALTHY\n");
-
- return len;
-}
-
-static ssize_t jobid_var_show(struct kobject *kobj, struct attribute *attr,
- char *buf)
-{
- return snprintf(buf, PAGE_SIZE, "%s\n", obd_jobid_var);
-}
-
-static ssize_t jobid_var_store(struct kobject *kobj, struct attribute *attr,
- const char *buffer,
- size_t count)
-{
- if (!count || count > JOBSTATS_JOBID_VAR_MAX_LEN)
- return -EINVAL;
-
- memset(obd_jobid_var, 0, JOBSTATS_JOBID_VAR_MAX_LEN + 1);
-
- memcpy(obd_jobid_var, buffer, count);
-
- /* Trim the trailing '\n' if any */
- if (obd_jobid_var[count - 1] == '\n')
- obd_jobid_var[count - 1] = 0;
-
- return count;
-}
-
-static ssize_t jobid_name_show(struct kobject *kobj, struct attribute *attr,
- char *buf)
-{
- return snprintf(buf, PAGE_SIZE, "%s\n", obd_jobid_node);
-}
-
-static ssize_t jobid_name_store(struct kobject *kobj, struct attribute *attr,
- const char *buffer,
- size_t count)
-{
- if (!count || count > JOBSTATS_JOBID_SIZE)
- return -EINVAL;
-
- memcpy(obd_jobid_node, buffer, count);
-
- obd_jobid_node[count] = 0;
-
- /* Trim the trailing '\n' if any */
- if (obd_jobid_node[count - 1] == '\n')
- obd_jobid_node[count - 1] = 0;
-
- return count;
-}
-
-/* Root for /sys/kernel/debug/lustre */
-struct dentry *debugfs_lustre_root;
-EXPORT_SYMBOL_GPL(debugfs_lustre_root);
-
-LUSTRE_RO_ATTR(version);
-LUSTRE_RO_ATTR(pinger);
-LUSTRE_RO_ATTR(health);
-LUSTRE_RW_ATTR(jobid_var);
-LUSTRE_RW_ATTR(jobid_name);
-
-static struct attribute *lustre_attrs[] = {
- &lustre_attr_version.attr,
- &lustre_attr_pinger.attr,
- &lustre_attr_health.attr,
- &lustre_attr_jobid_name.attr,
- &lustre_attr_jobid_var.attr,
- NULL,
-};
-
-static void *obd_device_list_seq_start(struct seq_file *p, loff_t *pos)
-{
- if (*pos >= class_devno_max())
- return NULL;
-
- return pos;
-}
-
-static void obd_device_list_seq_stop(struct seq_file *p, void *v)
-{
-}
-
-static void *obd_device_list_seq_next(struct seq_file *p, void *v, loff_t *pos)
-{
- ++*pos;
- if (*pos >= class_devno_max())
- return NULL;
-
- return pos;
-}
-
-static int obd_device_list_seq_show(struct seq_file *p, void *v)
-{
- loff_t index = *(loff_t *)v;
- struct obd_device *obd = class_num2obd((int)index);
- char *status;
-
- if (obd == NULL)
- return 0;
-
- LASSERT(obd->obd_magic == OBD_DEVICE_MAGIC);
- if (obd->obd_stopping)
- status = "ST";
- else if (obd->obd_inactive)
- status = "IN";
- else if (obd->obd_set_up)
- status = "UP";
- else if (obd->obd_attached)
- status = "AT";
- else
- status = "--";
-
- seq_printf(p, "%3d %s %s %s %s %d\n",
- (int)index, status, obd->obd_type->typ_name,
- obd->obd_name, obd->obd_uuid.uuid,
- atomic_read(&obd->obd_refcount));
- return 0;
-}
-
-static const struct seq_operations obd_device_list_sops = {
- .start = obd_device_list_seq_start,
- .stop = obd_device_list_seq_stop,
- .next = obd_device_list_seq_next,
- .show = obd_device_list_seq_show,
-};
-
-static int obd_device_list_open(struct inode *inode, struct file *file)
-{
- struct seq_file *seq;
- int rc = seq_open(file, &obd_device_list_sops);
-
- if (rc)
- return rc;
-
- seq = file->private_data;
- seq->private = inode->i_private;
-
- return 0;
-}
-
-static const struct file_operations obd_device_list_fops = {
- .owner = THIS_MODULE,
- .open = obd_device_list_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = seq_release,
-};
-
-struct kobject *lustre_kobj;
-EXPORT_SYMBOL_GPL(lustre_kobj);
-
-static struct attribute_group lustre_attr_group = {
- .attrs = lustre_attrs,
-};
-
-int class_procfs_init(void)
-{
- int rc = -ENOMEM;
- struct dentry *file;
-
- lustre_kobj = kobject_create_and_add("lustre", fs_kobj);
- if (lustre_kobj == NULL)
- goto out;
-
- /* Create the files associated with this kobject */
- rc = sysfs_create_group(lustre_kobj, &lustre_attr_group);
- if (rc) {
- kobject_put(lustre_kobj);
- goto out;
- }
-
- debugfs_lustre_root = debugfs_create_dir("lustre", NULL);
- if (IS_ERR_OR_NULL(debugfs_lustre_root)) {
- rc = debugfs_lustre_root ? PTR_ERR(debugfs_lustre_root)
- : -ENOMEM;
- debugfs_lustre_root = NULL;
- kobject_put(lustre_kobj);
- goto out;
- }
-
- file = debugfs_create_file("devices", 0444, debugfs_lustre_root, NULL,
- &obd_device_list_fops);
- if (IS_ERR_OR_NULL(file)) {
- rc = file ? PTR_ERR(file) : -ENOMEM;
- kobject_put(lustre_kobj);
- goto out;
- }
-out:
- return rc;
-}
-
-int class_procfs_clean(void)
-{
- if (debugfs_lustre_root != NULL)
- debugfs_remove_recursive(debugfs_lustre_root);
-
- debugfs_lustre_root = NULL;
-
- kobject_put(lustre_kobj);
-
- return 0;
-}
diff --git a/drivers/staging/lustre/lustre/obdclass/linux/linux-obdo.c b/drivers/staging/lustre/lustre/obdclass/linux/linux-obdo.c
deleted file mode 100644
index 9496c09b2b69..000000000000
--- a/drivers/staging/lustre/lustre/obdclass/linux/linux-obdo.c
+++ /dev/null
@@ -1,85 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * lustre/obdclass/linux/linux-obdo.c
- *
- * Object Devices Class Driver
- * These are the only exported functions, they provide some generic
- * infrastructure for managing object devices
- */
-
-#define DEBUG_SUBSYSTEM S_CLASS
-
-#include <linux/module.h>
-#include "../../include/obd_class.h"
-#include "../../include/lustre/lustre_idl.h"
-
-#include <linux/fs.h>
-#include <linux/pagemap.h> /* for PAGE_CACHE_SIZE */
-
-void obdo_refresh_inode(struct inode *dst, struct obdo *src, u32 valid)
-{
- valid &= src->o_valid;
-
- if (valid & (OBD_MD_FLCTIME | OBD_MD_FLMTIME))
- CDEBUG(D_INODE,
- "valid %#llx, cur time %lu/%lu, new %llu/%llu\n",
- src->o_valid, LTIME_S(dst->i_mtime),
- LTIME_S(dst->i_ctime), src->o_mtime, src->o_ctime);
-
- if (valid & OBD_MD_FLATIME && src->o_atime > LTIME_S(dst->i_atime))
- LTIME_S(dst->i_atime) = src->o_atime;
- if (valid & OBD_MD_FLMTIME && src->o_mtime > LTIME_S(dst->i_mtime))
- LTIME_S(dst->i_mtime) = src->o_mtime;
- if (valid & OBD_MD_FLCTIME && src->o_ctime > LTIME_S(dst->i_ctime))
- LTIME_S(dst->i_ctime) = src->o_ctime;
- if (valid & OBD_MD_FLSIZE)
- i_size_write(dst, src->o_size);
- /* optimum IO size */
- if (valid & OBD_MD_FLBLKSZ && src->o_blksize > (1 << dst->i_blkbits))
- dst->i_blkbits = ffs(src->o_blksize) - 1;
-
- if (dst->i_blkbits < PAGE_CACHE_SHIFT)
- dst->i_blkbits = PAGE_CACHE_SHIFT;
-
- /* allocation of space */
- if (valid & OBD_MD_FLBLOCKS && src->o_blocks > dst->i_blocks)
- /*
- * XXX shouldn't overflow be checked here like in
- * obdo_to_inode().
- */
- dst->i_blocks = src->o_blocks;
-}
-EXPORT_SYMBOL(obdo_refresh_inode);
diff --git a/drivers/staging/lustre/lustre/obdclass/linux/linux-sysctl.c b/drivers/staging/lustre/lustre/obdclass/linux/linux-sysctl.c
deleted file mode 100644
index 518288df4d53..000000000000
--- a/drivers/staging/lustre/lustre/obdclass/linux/linux-sysctl.c
+++ /dev/null
@@ -1,164 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- */
-
-#include <linux/module.h>
-#include <linux/sysctl.h>
-#include <linux/sched.h>
-#include <linux/mm.h>
-#include <linux/slab.h>
-#include <linux/stat.h>
-#include <linux/ctype.h>
-#include <linux/bitops.h>
-#include <linux/uaccess.h>
-#include <linux/utsname.h>
-
-#define DEBUG_SUBSYSTEM S_CLASS
-
-#include "../../include/obd_support.h"
-#include "../../include/lprocfs_status.h"
-
-struct static_lustre_uintvalue_attr {
- struct {
- struct attribute attr;
- ssize_t (*show)(struct kobject *kobj, struct attribute *attr,
- char *buf);
- ssize_t (*store)(struct kobject *kobj, struct attribute *attr,
- const char *buf, size_t len);
- } u;
- int *value;
-};
-
-static ssize_t static_uintvalue_show(struct kobject *kobj,
- struct attribute *attr,
- char *buf)
-{
- struct static_lustre_uintvalue_attr *lattr = (void *)attr;
-
- return sprintf(buf, "%d\n", *lattr->value);
-}
-
-static ssize_t static_uintvalue_store(struct kobject *kobj,
- struct attribute *attr,
- const char *buffer, size_t count)
-{
- struct static_lustre_uintvalue_attr *lattr = (void *)attr;
- int rc;
- unsigned int val;
-
- rc = kstrtouint(buffer, 10, &val);
- if (rc)
- return rc;
-
- *lattr->value = val;
-
- return count;
-}
-
-#define LUSTRE_STATIC_UINT_ATTR(name, value) \
-static struct static_lustre_uintvalue_attr lustre_sattr_##name = \
- {__ATTR(name, 0644, \
- static_uintvalue_show, \
- static_uintvalue_store),\
- value }
-
-LUSTRE_STATIC_UINT_ATTR(timeout, &obd_timeout);
-
-static ssize_t max_dirty_mb_show(struct kobject *kobj, struct attribute *attr,
- char *buf)
-{
- return sprintf(buf, "%ul\n",
- obd_max_dirty_pages / (1 << (20 - PAGE_CACHE_SHIFT)));
-}
-
-static ssize_t max_dirty_mb_store(struct kobject *kobj, struct attribute *attr,
- const char *buffer, size_t count)
-{
- int rc;
- unsigned long val;
-
- rc = kstrtoul(buffer, 10, &val);
- if (rc)
- return rc;
-
- val *= 1 << (20 - PAGE_CACHE_SHIFT); /* convert to pages */
-
- if (val > ((totalram_pages / 10) * 9)) {
- /* Somebody wants to assign too much memory to dirty pages */
- return -EINVAL;
- }
-
- if (val < 4 << (20 - PAGE_CACHE_SHIFT)) {
- /* Less than 4 Mb for dirty cache is also bad */
- return -EINVAL;
- }
-
- obd_max_dirty_pages = val;
-
- return count;
-}
-LUSTRE_RW_ATTR(max_dirty_mb);
-
-LUSTRE_STATIC_UINT_ATTR(debug_peer_on_timeout, &obd_debug_peer_on_timeout);
-LUSTRE_STATIC_UINT_ATTR(dump_on_timeout, &obd_dump_on_timeout);
-LUSTRE_STATIC_UINT_ATTR(dump_on_eviction, &obd_dump_on_eviction);
-LUSTRE_STATIC_UINT_ATTR(at_min, &at_min);
-LUSTRE_STATIC_UINT_ATTR(at_max, &at_max);
-LUSTRE_STATIC_UINT_ATTR(at_extra, &at_extra);
-LUSTRE_STATIC_UINT_ATTR(at_early_margin, &at_early_margin);
-LUSTRE_STATIC_UINT_ATTR(at_history, &at_history);
-
-static struct attribute *lustre_attrs[] = {
- &lustre_sattr_timeout.u.attr,
- &lustre_attr_max_dirty_mb.attr,
- &lustre_sattr_debug_peer_on_timeout.u.attr,
- &lustre_sattr_dump_on_timeout.u.attr,
- &lustre_sattr_dump_on_eviction.u.attr,
- &lustre_sattr_at_min.u.attr,
- &lustre_sattr_at_max.u.attr,
- &lustre_sattr_at_extra.u.attr,
- &lustre_sattr_at_early_margin.u.attr,
- &lustre_sattr_at_history.u.attr,
- NULL,
-};
-
-static struct attribute_group lustre_attr_group = {
- .attrs = lustre_attrs,
-};
-
-int obd_sysctl_init(void)
-{
- return sysfs_create_group(lustre_kobj, &lustre_attr_group);
-}
diff --git a/drivers/staging/lustre/lustre/obdclass/llog.c b/drivers/staging/lustre/lustre/obdclass/llog.c
deleted file mode 100644
index a941518e463d..000000000000
--- a/drivers/staging/lustre/lustre/obdclass/llog.c
+++ /dev/null
@@ -1,462 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * lustre/obdclass/llog.c
- *
- * OST<->MDS recovery logging infrastructure.
- * Invariants in implementation:
- * - we do not share logs among different OST<->MDS connections, so that
- * if an OST or MDS fails it need only look at log(s) relevant to itself
- *
- * Author: Andreas Dilger <adilger@clusterfs.com>
- * Author: Alex Zhuravlev <bzzz@whamcloud.com>
- * Author: Mikhail Pershin <tappro@whamcloud.com>
- */
-
-#define DEBUG_SUBSYSTEM S_LOG
-
-
-#include "../include/obd_class.h"
-#include "../include/lustre_log.h"
-#include "llog_internal.h"
-
-/*
- * Allocate a new log or catalog handle
- * Used inside llog_open().
- */
-static struct llog_handle *llog_alloc_handle(void)
-{
- struct llog_handle *loghandle;
-
- loghandle = kzalloc(sizeof(*loghandle), GFP_NOFS);
- if (!loghandle)
- return NULL;
-
- init_rwsem(&loghandle->lgh_lock);
- spin_lock_init(&loghandle->lgh_hdr_lock);
- INIT_LIST_HEAD(&loghandle->u.phd.phd_entry);
- atomic_set(&loghandle->lgh_refcount, 1);
-
- return loghandle;
-}
-
-/*
- * Free llog handle and header data if exists. Used in llog_close() only
- */
-static void llog_free_handle(struct llog_handle *loghandle)
-{
- LASSERT(loghandle != NULL);
-
- /* failed llog_init_handle */
- if (!loghandle->lgh_hdr)
- goto out;
-
- if (loghandle->lgh_hdr->llh_flags & LLOG_F_IS_PLAIN)
- LASSERT(list_empty(&loghandle->u.phd.phd_entry));
- else if (loghandle->lgh_hdr->llh_flags & LLOG_F_IS_CAT)
- LASSERT(list_empty(&loghandle->u.chd.chd_head));
- LASSERT(sizeof(*(loghandle->lgh_hdr)) == LLOG_CHUNK_SIZE);
- kfree(loghandle->lgh_hdr);
-out:
- kfree(loghandle);
-}
-
-void llog_handle_get(struct llog_handle *loghandle)
-{
- atomic_inc(&loghandle->lgh_refcount);
-}
-
-void llog_handle_put(struct llog_handle *loghandle)
-{
- LASSERT(atomic_read(&loghandle->lgh_refcount) > 0);
- if (atomic_dec_and_test(&loghandle->lgh_refcount))
- llog_free_handle(loghandle);
-}
-
-static int llog_read_header(const struct lu_env *env,
- struct llog_handle *handle,
- struct obd_uuid *uuid)
-{
- struct llog_operations *lop;
- int rc;
-
- rc = llog_handle2ops(handle, &lop);
- if (rc)
- return rc;
-
- if (lop->lop_read_header == NULL)
- return -EOPNOTSUPP;
-
- rc = lop->lop_read_header(env, handle);
- if (rc == LLOG_EEMPTY) {
- struct llog_log_hdr *llh = handle->lgh_hdr;
-
- handle->lgh_last_idx = 0; /* header is record with index 0 */
- llh->llh_count = 1; /* for the header record */
- llh->llh_hdr.lrh_type = LLOG_HDR_MAGIC;
- llh->llh_hdr.lrh_len = llh->llh_tail.lrt_len = LLOG_CHUNK_SIZE;
- llh->llh_hdr.lrh_index = llh->llh_tail.lrt_index = 0;
- llh->llh_timestamp = ktime_get_real_seconds();
- if (uuid)
- memcpy(&llh->llh_tgtuuid, uuid,
- sizeof(llh->llh_tgtuuid));
- llh->llh_bitmap_offset = offsetof(typeof(*llh), llh_bitmap);
- ext2_set_bit(0, llh->llh_bitmap);
- rc = 0;
- }
- return rc;
-}
-
-int llog_init_handle(const struct lu_env *env, struct llog_handle *handle,
- int flags, struct obd_uuid *uuid)
-{
- struct llog_log_hdr *llh;
- int rc;
-
- LASSERT(handle->lgh_hdr == NULL);
-
- llh = kzalloc(sizeof(*llh), GFP_NOFS);
- if (!llh)
- return -ENOMEM;
- handle->lgh_hdr = llh;
- /* first assign flags to use llog_client_ops */
- llh->llh_flags = flags;
- rc = llog_read_header(env, handle, uuid);
- if (rc == 0) {
- if (unlikely((llh->llh_flags & LLOG_F_IS_PLAIN &&
- flags & LLOG_F_IS_CAT) ||
- (llh->llh_flags & LLOG_F_IS_CAT &&
- flags & LLOG_F_IS_PLAIN))) {
- CERROR("%s: llog type is %s but initializing %s\n",
- handle->lgh_ctxt->loc_obd->obd_name,
- llh->llh_flags & LLOG_F_IS_CAT ?
- "catalog" : "plain",
- flags & LLOG_F_IS_CAT ? "catalog" : "plain");
- rc = -EINVAL;
- goto out;
- } else if (llh->llh_flags &
- (LLOG_F_IS_PLAIN | LLOG_F_IS_CAT)) {
- /*
- * it is possible to open llog without specifying llog
- * type so it is taken from llh_flags
- */
- flags = llh->llh_flags;
- } else {
- /* for some reason the llh_flags has no type set */
- CERROR("llog type is not specified!\n");
- rc = -EINVAL;
- goto out;
- }
- if (unlikely(uuid &&
- !obd_uuid_equals(uuid, &llh->llh_tgtuuid))) {
- CERROR("%s: llog uuid mismatch: %s/%s\n",
- handle->lgh_ctxt->loc_obd->obd_name,
- (char *)uuid->uuid,
- (char *)llh->llh_tgtuuid.uuid);
- rc = -EEXIST;
- goto out;
- }
- }
- if (flags & LLOG_F_IS_CAT) {
- LASSERT(list_empty(&handle->u.chd.chd_head));
- INIT_LIST_HEAD(&handle->u.chd.chd_head);
- llh->llh_size = sizeof(struct llog_logid_rec);
- } else if (!(flags & LLOG_F_IS_PLAIN)) {
- CERROR("%s: unknown flags: %#x (expected %#x or %#x)\n",
- handle->lgh_ctxt->loc_obd->obd_name,
- flags, LLOG_F_IS_CAT, LLOG_F_IS_PLAIN);
- rc = -EINVAL;
- }
-out:
- if (rc) {
- kfree(llh);
- handle->lgh_hdr = NULL;
- }
- return rc;
-}
-EXPORT_SYMBOL(llog_init_handle);
-
-static int llog_process_thread(void *arg)
-{
- struct llog_process_info *lpi = arg;
- struct llog_handle *loghandle = lpi->lpi_loghandle;
- struct llog_log_hdr *llh = loghandle->lgh_hdr;
- struct llog_process_cat_data *cd = lpi->lpi_catdata;
- char *buf;
- __u64 cur_offset = LLOG_CHUNK_SIZE;
- __u64 last_offset;
- int rc = 0, index = 1, last_index;
- int saved_index = 0;
- int last_called_index = 0;
-
- LASSERT(llh);
-
- buf = kzalloc(LLOG_CHUNK_SIZE, GFP_NOFS);
- if (!buf) {
- lpi->lpi_rc = -ENOMEM;
- return 0;
- }
-
- if (cd != NULL) {
- last_called_index = cd->lpcd_first_idx;
- index = cd->lpcd_first_idx + 1;
- }
- if (cd != NULL && cd->lpcd_last_idx)
- last_index = cd->lpcd_last_idx;
- else
- last_index = LLOG_BITMAP_BYTES * 8 - 1;
-
- while (rc == 0) {
- struct llog_rec_hdr *rec;
-
- /* skip records not set in bitmap */
- while (index <= last_index &&
- !ext2_test_bit(index, llh->llh_bitmap))
- ++index;
-
- LASSERT(index <= last_index + 1);
- if (index == last_index + 1)
- break;
-repeat:
- CDEBUG(D_OTHER, "index: %d last_index %d\n",
- index, last_index);
-
- /* get the buf with our target record; avoid old garbage */
- memset(buf, 0, LLOG_CHUNK_SIZE);
- last_offset = cur_offset;
- rc = llog_next_block(lpi->lpi_env, loghandle, &saved_index,
- index, &cur_offset, buf, LLOG_CHUNK_SIZE);
- if (rc)
- goto out;
-
- /* NB: when rec->lrh_len is accessed it is already swabbed
- * since it is used at the "end" of the loop and the rec
- * swabbing is done at the beginning of the loop. */
- for (rec = (struct llog_rec_hdr *)buf;
- (char *)rec < buf + LLOG_CHUNK_SIZE;
- rec = (struct llog_rec_hdr *)((char *)rec + rec->lrh_len)) {
-
- CDEBUG(D_OTHER, "processing rec 0x%p type %#x\n",
- rec, rec->lrh_type);
-
- if (LLOG_REC_HDR_NEEDS_SWABBING(rec))
- lustre_swab_llog_rec(rec);
-
- CDEBUG(D_OTHER, "after swabbing, type=%#x idx=%d\n",
- rec->lrh_type, rec->lrh_index);
-
- if (rec->lrh_index == 0) {
- /* probably another rec just got added? */
- rc = 0;
- if (index <= loghandle->lgh_last_idx)
- goto repeat;
- goto out; /* no more records */
- }
- if (rec->lrh_len == 0 ||
- rec->lrh_len > LLOG_CHUNK_SIZE) {
- CWARN("invalid length %d in llog record for index %d/%d\n",
- rec->lrh_len,
- rec->lrh_index, index);
- rc = -EINVAL;
- goto out;
- }
-
- if (rec->lrh_index < index) {
- CDEBUG(D_OTHER, "skipping lrh_index %d\n",
- rec->lrh_index);
- continue;
- }
-
- CDEBUG(D_OTHER,
- "lrh_index: %d lrh_len: %d (%d remains)\n",
- rec->lrh_index, rec->lrh_len,
- (int)(buf + LLOG_CHUNK_SIZE - (char *)rec));
-
- loghandle->lgh_cur_idx = rec->lrh_index;
- loghandle->lgh_cur_offset = (char *)rec - (char *)buf +
- last_offset;
-
- /* if set, process the callback on this record */
- if (ext2_test_bit(index, llh->llh_bitmap)) {
- rc = lpi->lpi_cb(lpi->lpi_env, loghandle, rec,
- lpi->lpi_cbdata);
- last_called_index = index;
- if (rc)
- goto out;
- } else {
- CDEBUG(D_OTHER, "Skipped index %d\n", index);
- }
-
- /* next record, still in buffer? */
- ++index;
- if (index > last_index) {
- rc = 0;
- goto out;
- }
- }
- }
-
-out:
- if (cd != NULL)
- cd->lpcd_last_idx = last_called_index;
-
- kfree(buf);
- lpi->lpi_rc = rc;
- return 0;
-}
-
-static int llog_process_thread_daemonize(void *arg)
-{
- struct llog_process_info *lpi = arg;
- struct lu_env env;
- int rc;
-
- unshare_fs_struct();
-
- /* client env has no keys, tags is just 0 */
- rc = lu_env_init(&env, LCT_LOCAL | LCT_MG_THREAD);
- if (rc)
- goto out;
- lpi->lpi_env = &env;
-
- rc = llog_process_thread(arg);
-
- lu_env_fini(&env);
-out:
- complete(&lpi->lpi_completion);
- return rc;
-}
-
-int llog_process_or_fork(const struct lu_env *env,
- struct llog_handle *loghandle,
- llog_cb_t cb, void *data, void *catdata, bool fork)
-{
- struct llog_process_info *lpi;
- int rc;
-
- lpi = kzalloc(sizeof(*lpi), GFP_NOFS);
- if (!lpi) {
- CERROR("cannot alloc pointer\n");
- return -ENOMEM;
- }
- lpi->lpi_loghandle = loghandle;
- lpi->lpi_cb = cb;
- lpi->lpi_cbdata = data;
- lpi->lpi_catdata = catdata;
-
- if (fork) {
- /* The new thread can't use parent env,
- * init the new one in llog_process_thread_daemonize. */
- lpi->lpi_env = NULL;
- init_completion(&lpi->lpi_completion);
- rc = PTR_ERR(kthread_run(llog_process_thread_daemonize, lpi,
- "llog_process_thread"));
- if (IS_ERR_VALUE(rc)) {
- CERROR("%s: cannot start thread: rc = %d\n",
- loghandle->lgh_ctxt->loc_obd->obd_name, rc);
- kfree(lpi);
- return rc;
- }
- wait_for_completion(&lpi->lpi_completion);
- } else {
- lpi->lpi_env = env;
- llog_process_thread(lpi);
- }
- rc = lpi->lpi_rc;
- kfree(lpi);
- return rc;
-}
-EXPORT_SYMBOL(llog_process_or_fork);
-
-int llog_process(const struct lu_env *env, struct llog_handle *loghandle,
- llog_cb_t cb, void *data, void *catdata)
-{
- return llog_process_or_fork(env, loghandle, cb, data, catdata, true);
-}
-EXPORT_SYMBOL(llog_process);
-
-int llog_open(const struct lu_env *env, struct llog_ctxt *ctxt,
- struct llog_handle **lgh, struct llog_logid *logid,
- char *name, enum llog_open_param open_param)
-{
- int raised;
- int rc;
-
- LASSERT(ctxt);
- LASSERT(ctxt->loc_logops);
-
- if (ctxt->loc_logops->lop_open == NULL) {
- *lgh = NULL;
- return -EOPNOTSUPP;
- }
-
- *lgh = llog_alloc_handle();
- if (*lgh == NULL)
- return -ENOMEM;
- (*lgh)->lgh_ctxt = ctxt;
- (*lgh)->lgh_logops = ctxt->loc_logops;
-
- raised = cfs_cap_raised(CFS_CAP_SYS_RESOURCE);
- if (!raised)
- cfs_cap_raise(CFS_CAP_SYS_RESOURCE);
- rc = ctxt->loc_logops->lop_open(env, *lgh, logid, name, open_param);
- if (!raised)
- cfs_cap_lower(CFS_CAP_SYS_RESOURCE);
- if (rc) {
- llog_free_handle(*lgh);
- *lgh = NULL;
- }
- return rc;
-}
-EXPORT_SYMBOL(llog_open);
-
-int llog_close(const struct lu_env *env, struct llog_handle *loghandle)
-{
- struct llog_operations *lop;
- int rc;
-
- rc = llog_handle2ops(loghandle, &lop);
- if (rc)
- goto out;
- if (lop->lop_close == NULL) {
- rc = -EOPNOTSUPP;
- goto out;
- }
- rc = lop->lop_close(env, loghandle);
-out:
- llog_handle_put(loghandle);
- return rc;
-}
-EXPORT_SYMBOL(llog_close);
diff --git a/drivers/staging/lustre/lustre/obdclass/llog_cat.c b/drivers/staging/lustre/lustre/obdclass/llog_cat.c
deleted file mode 100644
index 3984445f8442..000000000000
--- a/drivers/staging/lustre/lustre/obdclass/llog_cat.c
+++ /dev/null
@@ -1,239 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * lustre/obdclass/llog_cat.c
- *
- * OST<->MDS recovery logging infrastructure.
- *
- * Invariants in implementation:
- * - we do not share logs among different OST<->MDS connections, so that
- * if an OST or MDS fails it need only look at log(s) relevant to itself
- *
- * Author: Andreas Dilger <adilger@clusterfs.com>
- * Author: Alexey Zhuravlev <alexey.zhuravlev@intel.com>
- * Author: Mikhail Pershin <mike.pershin@intel.com>
- */
-
-#define DEBUG_SUBSYSTEM S_LOG
-
-
-#include "../include/obd_class.h"
-
-#include "llog_internal.h"
-
-/* Open an existent log handle and add it to the open list.
- * This log handle will be closed when all of the records in it are removed.
- *
- * Assumes caller has already pushed us into the kernel context and is locking.
- * We return a lock on the handle to ensure nobody yanks it from us.
- *
- * This takes extra reference on llog_handle via llog_handle_get() and require
- * this reference to be put by caller using llog_handle_put()
- */
-int llog_cat_id2handle(const struct lu_env *env, struct llog_handle *cathandle,
- struct llog_handle **res, struct llog_logid *logid)
-{
- struct llog_handle *loghandle;
- int rc = 0;
-
- if (cathandle == NULL)
- return -EBADF;
-
- down_write(&cathandle->lgh_lock);
- list_for_each_entry(loghandle, &cathandle->u.chd.chd_head,
- u.phd.phd_entry) {
- struct llog_logid *cgl = &loghandle->lgh_id;
-
- if (ostid_id(&cgl->lgl_oi) == ostid_id(&logid->lgl_oi) &&
- ostid_seq(&cgl->lgl_oi) == ostid_seq(&logid->lgl_oi)) {
- if (cgl->lgl_ogen != logid->lgl_ogen) {
- CERROR("%s: log "DOSTID" generation %x != %x\n",
- loghandle->lgh_ctxt->loc_obd->obd_name,
- POSTID(&logid->lgl_oi), cgl->lgl_ogen,
- logid->lgl_ogen);
- continue;
- }
- loghandle->u.phd.phd_cat_handle = cathandle;
- up_write(&cathandle->lgh_lock);
- rc = 0;
- goto out;
- }
- }
- up_write(&cathandle->lgh_lock);
-
- rc = llog_open(env, cathandle->lgh_ctxt, &loghandle, logid, NULL,
- LLOG_OPEN_EXISTS);
- if (rc < 0) {
- CERROR("%s: error opening log id "DOSTID":%x: rc = %d\n",
- cathandle->lgh_ctxt->loc_obd->obd_name,
- POSTID(&logid->lgl_oi), logid->lgl_ogen, rc);
- return rc;
- }
-
- rc = llog_init_handle(env, loghandle, LLOG_F_IS_PLAIN, NULL);
- if (rc < 0) {
- llog_close(env, loghandle);
- loghandle = NULL;
- return rc;
- }
-
- down_write(&cathandle->lgh_lock);
- list_add(&loghandle->u.phd.phd_entry, &cathandle->u.chd.chd_head);
- up_write(&cathandle->lgh_lock);
-
- loghandle->u.phd.phd_cat_handle = cathandle;
- loghandle->u.phd.phd_cookie.lgc_lgl = cathandle->lgh_id;
- loghandle->u.phd.phd_cookie.lgc_index =
- loghandle->lgh_hdr->llh_cat_idx;
-out:
- llog_handle_get(loghandle);
- *res = loghandle;
- return 0;
-}
-
-int llog_cat_close(const struct lu_env *env, struct llog_handle *cathandle)
-{
- struct llog_handle *loghandle, *n;
- int rc;
-
- list_for_each_entry_safe(loghandle, n, &cathandle->u.chd.chd_head,
- u.phd.phd_entry) {
- /* unlink open-not-created llogs */
- list_del_init(&loghandle->u.phd.phd_entry);
- llog_close(env, loghandle);
- }
- /* if handle was stored in ctxt, remove it too */
- if (cathandle->lgh_ctxt->loc_handle == cathandle)
- cathandle->lgh_ctxt->loc_handle = NULL;
- rc = llog_close(env, cathandle);
- return rc;
-}
-EXPORT_SYMBOL(llog_cat_close);
-
-static int llog_cat_process_cb(const struct lu_env *env,
- struct llog_handle *cat_llh,
- struct llog_rec_hdr *rec, void *data)
-{
- struct llog_process_data *d = data;
- struct llog_logid_rec *lir = (struct llog_logid_rec *)rec;
- struct llog_handle *llh;
- int rc;
-
- if (rec->lrh_type != LLOG_LOGID_MAGIC) {
- CERROR("invalid record in catalog\n");
- return -EINVAL;
- }
- CDEBUG(D_HA, "processing log "DOSTID":%x at index %u of catalog "
- DOSTID"\n", POSTID(&lir->lid_id.lgl_oi), lir->lid_id.lgl_ogen,
- rec->lrh_index, POSTID(&cat_llh->lgh_id.lgl_oi));
-
- rc = llog_cat_id2handle(env, cat_llh, &llh, &lir->lid_id);
- if (rc) {
- CERROR("%s: cannot find handle for llog "DOSTID": %d\n",
- cat_llh->lgh_ctxt->loc_obd->obd_name,
- POSTID(&lir->lid_id.lgl_oi), rc);
- return rc;
- }
-
- if (rec->lrh_index < d->lpd_startcat)
- /* Skip processing of the logs until startcat */
- rc = 0;
- else if (d->lpd_startidx > 0) {
- struct llog_process_cat_data cd;
-
- cd.lpcd_first_idx = d->lpd_startidx;
- cd.lpcd_last_idx = 0;
- rc = llog_process_or_fork(env, llh, d->lpd_cb, d->lpd_data,
- &cd, false);
- /* Continue processing the next log from idx 0 */
- d->lpd_startidx = 0;
- } else {
- rc = llog_process_or_fork(env, llh, d->lpd_cb, d->lpd_data,
- NULL, false);
- }
-
- llog_handle_put(llh);
-
- return rc;
-}
-
-int llog_cat_process_or_fork(const struct lu_env *env,
- struct llog_handle *cat_llh,
- llog_cb_t cb, void *data, int startcat,
- int startidx, bool fork)
-{
- struct llog_process_data d;
- struct llog_log_hdr *llh = cat_llh->lgh_hdr;
- int rc;
-
- LASSERT(llh->llh_flags & LLOG_F_IS_CAT);
- d.lpd_data = data;
- d.lpd_cb = cb;
- d.lpd_startcat = startcat;
- d.lpd_startidx = startidx;
-
- if (llh->llh_cat_idx > cat_llh->lgh_last_idx) {
- struct llog_process_cat_data cd;
-
- CWARN("catlog "DOSTID" crosses index zero\n",
- POSTID(&cat_llh->lgh_id.lgl_oi));
-
- cd.lpcd_first_idx = llh->llh_cat_idx;
- cd.lpcd_last_idx = 0;
- rc = llog_process_or_fork(env, cat_llh, llog_cat_process_cb,
- &d, &cd, fork);
- if (rc != 0)
- return rc;
-
- cd.lpcd_first_idx = 0;
- cd.lpcd_last_idx = cat_llh->lgh_last_idx;
- rc = llog_process_or_fork(env, cat_llh, llog_cat_process_cb,
- &d, &cd, fork);
- } else {
- rc = llog_process_or_fork(env, cat_llh, llog_cat_process_cb,
- &d, NULL, fork);
- }
-
- return rc;
-}
-EXPORT_SYMBOL(llog_cat_process_or_fork);
-
-int llog_cat_process(const struct lu_env *env, struct llog_handle *cat_llh,
- llog_cb_t cb, void *data, int startcat, int startidx)
-{
- return llog_cat_process_or_fork(env, cat_llh, cb, data, startcat,
- startidx, false);
-}
-EXPORT_SYMBOL(llog_cat_process);
diff --git a/drivers/staging/lustre/lustre/obdclass/llog_internal.h b/drivers/staging/lustre/lustre/obdclass/llog_internal.h
deleted file mode 100644
index 533998688fb4..000000000000
--- a/drivers/staging/lustre/lustre/obdclass/llog_internal.h
+++ /dev/null
@@ -1,80 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- */
-
-#ifndef __LLOG_INTERNAL_H__
-#define __LLOG_INTERNAL_H__
-
-#include "../include/lustre_log.h"
-
-struct llog_process_info {
- struct llog_handle *lpi_loghandle;
- llog_cb_t lpi_cb;
- void *lpi_cbdata;
- void *lpi_catdata;
- int lpi_rc;
- struct completion lpi_completion;
- const struct lu_env *lpi_env;
-
-};
-
-struct llog_thread_info {
- struct lu_attr lgi_attr;
- struct lu_fid lgi_fid;
- struct lu_buf lgi_buf;
- loff_t lgi_off;
- struct llog_rec_hdr lgi_lrh;
- struct llog_rec_tail lgi_tail;
-};
-
-extern struct lu_context_key llog_thread_key;
-
-int llog_info_init(void);
-void llog_info_fini(void);
-
-void llog_handle_get(struct llog_handle *loghandle);
-void llog_handle_put(struct llog_handle *loghandle);
-int llog_cat_id2handle(const struct lu_env *env, struct llog_handle *cathandle,
- struct llog_handle **res, struct llog_logid *logid);
-int class_config_dump_handler(const struct lu_env *env,
- struct llog_handle *handle,
- struct llog_rec_hdr *rec, void *data);
-int class_config_parse_rec(struct llog_rec_hdr *rec, char *buf, int size);
-int llog_process_or_fork(const struct lu_env *env,
- struct llog_handle *loghandle,
- llog_cb_t cb, void *data, void *catdata, bool fork);
-int llog_cat_cleanup(const struct lu_env *env, struct llog_handle *cathandle,
- struct llog_handle *loghandle, int index);
-#endif
diff --git a/drivers/staging/lustre/lustre/obdclass/llog_obd.c b/drivers/staging/lustre/lustre/obdclass/llog_obd.c
deleted file mode 100644
index 2b4a70b6c8d4..000000000000
--- a/drivers/staging/lustre/lustre/obdclass/llog_obd.c
+++ /dev/null
@@ -1,232 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- */
-
-#define DEBUG_SUBSYSTEM S_LOG
-
-
-#include "../include/obd_class.h"
-#include "../include/lustre_log.h"
-#include "llog_internal.h"
-
-/* helper functions for calling the llog obd methods */
-static struct llog_ctxt *llog_new_ctxt(struct obd_device *obd)
-{
- struct llog_ctxt *ctxt;
-
- ctxt = kzalloc(sizeof(*ctxt), GFP_NOFS);
- if (!ctxt)
- return NULL;
-
- ctxt->loc_obd = obd;
- atomic_set(&ctxt->loc_refcount, 1);
-
- return ctxt;
-}
-
-static void llog_ctxt_destroy(struct llog_ctxt *ctxt)
-{
- if (ctxt->loc_exp) {
- class_export_put(ctxt->loc_exp);
- ctxt->loc_exp = NULL;
- }
- if (ctxt->loc_imp) {
- class_import_put(ctxt->loc_imp);
- ctxt->loc_imp = NULL;
- }
- kfree(ctxt);
-}
-
-int __llog_ctxt_put(const struct lu_env *env, struct llog_ctxt *ctxt)
-{
- struct obd_llog_group *olg = ctxt->loc_olg;
- struct obd_device *obd;
- int rc = 0;
-
- spin_lock(&olg->olg_lock);
- if (!atomic_dec_and_test(&ctxt->loc_refcount)) {
- spin_unlock(&olg->olg_lock);
- return rc;
- }
- olg->olg_ctxts[ctxt->loc_idx] = NULL;
- spin_unlock(&olg->olg_lock);
-
- obd = ctxt->loc_obd;
- spin_lock(&obd->obd_dev_lock);
- /* sync with llog ctxt user thread */
- spin_unlock(&obd->obd_dev_lock);
-
- /* obd->obd_starting is needed for the case of cleanup
- * in error case while obd is starting up. */
- LASSERTF(obd->obd_starting == 1 ||
- obd->obd_stopping == 1 || obd->obd_set_up == 0,
- "wrong obd state: %d/%d/%d\n", !!obd->obd_starting,
- !!obd->obd_stopping, !!obd->obd_set_up);
-
- /* cleanup the llog ctxt here */
- if (CTXTP(ctxt, cleanup))
- rc = CTXTP(ctxt, cleanup)(env, ctxt);
-
- llog_ctxt_destroy(ctxt);
- wake_up(&olg->olg_waitq);
- return rc;
-}
-EXPORT_SYMBOL(__llog_ctxt_put);
-
-int llog_cleanup(const struct lu_env *env, struct llog_ctxt *ctxt)
-{
- struct l_wait_info lwi = LWI_INTR(LWI_ON_SIGNAL_NOOP, NULL);
- struct obd_llog_group *olg;
- int rc, idx;
-
- LASSERT(ctxt != NULL);
- LASSERT(ctxt != LP_POISON);
-
- olg = ctxt->loc_olg;
- LASSERT(olg != NULL);
- LASSERT(olg != LP_POISON);
-
- idx = ctxt->loc_idx;
-
- /*
- * Banlance the ctxt get when calling llog_cleanup()
- */
- LASSERT(atomic_read(&ctxt->loc_refcount) < LI_POISON);
- LASSERT(atomic_read(&ctxt->loc_refcount) > 1);
- llog_ctxt_put(ctxt);
-
- /*
- * Try to free the ctxt.
- */
- rc = __llog_ctxt_put(env, ctxt);
- if (rc)
- CERROR("Error %d while cleaning up ctxt %p\n",
- rc, ctxt);
-
- l_wait_event(olg->olg_waitq,
- llog_group_ctxt_null(olg, idx), &lwi);
-
- return rc;
-}
-EXPORT_SYMBOL(llog_cleanup);
-
-int llog_setup(const struct lu_env *env, struct obd_device *obd,
- struct obd_llog_group *olg, int index,
- struct obd_device *disk_obd, struct llog_operations *op)
-{
- struct llog_ctxt *ctxt;
- int rc = 0;
-
- if (index < 0 || index >= LLOG_MAX_CTXTS)
- return -EINVAL;
-
- LASSERT(olg != NULL);
-
- ctxt = llog_new_ctxt(obd);
- if (!ctxt)
- return -ENOMEM;
-
- ctxt->loc_obd = obd;
- ctxt->loc_olg = olg;
- ctxt->loc_idx = index;
- ctxt->loc_logops = op;
- mutex_init(&ctxt->loc_mutex);
- ctxt->loc_exp = class_export_get(disk_obd->obd_self_export);
- ctxt->loc_flags = LLOG_CTXT_FLAG_UNINITIALIZED;
-
- rc = llog_group_set_ctxt(olg, ctxt, index);
- if (rc) {
- llog_ctxt_destroy(ctxt);
- if (rc == -EEXIST) {
- ctxt = llog_group_get_ctxt(olg, index);
- if (ctxt) {
- /*
- * mds_lov_update_desc() might call here multiple
- * times. So if the llog is already set up then
- * don't to do it again.
- */
- CDEBUG(D_CONFIG, "obd %s ctxt %d already set up\n",
- obd->obd_name, index);
- LASSERT(ctxt->loc_olg == olg);
- LASSERT(ctxt->loc_obd == obd);
- LASSERT(ctxt->loc_exp == disk_obd->obd_self_export);
- LASSERT(ctxt->loc_logops == op);
- llog_ctxt_put(ctxt);
- }
- rc = 0;
- }
- return rc;
- }
-
- if (op->lop_setup) {
- if (OBD_FAIL_CHECK(OBD_FAIL_OBD_LLOG_SETUP))
- rc = -EOPNOTSUPP;
- else
- rc = op->lop_setup(env, obd, olg, index, disk_obd);
- }
-
- if (rc) {
- CERROR("%s: ctxt %d lop_setup=%p failed: rc = %d\n",
- obd->obd_name, index, op->lop_setup, rc);
- llog_group_clear_ctxt(olg, index);
- llog_ctxt_destroy(ctxt);
- } else {
- CDEBUG(D_CONFIG, "obd %s ctxt %d is initialized\n",
- obd->obd_name, index);
- ctxt->loc_flags &= ~LLOG_CTXT_FLAG_UNINITIALIZED;
- }
-
- return rc;
-}
-EXPORT_SYMBOL(llog_setup);
-
-/* context key constructor/destructor: llog_key_init, llog_key_fini */
-LU_KEY_INIT_FINI(llog, struct llog_thread_info);
-/* context key: llog_thread_key */
-LU_CONTEXT_KEY_DEFINE(llog, LCT_MD_THREAD | LCT_MG_THREAD | LCT_LOCAL);
-LU_KEY_INIT_GENERIC(llog);
-EXPORT_SYMBOL(llog_thread_key);
-
-int llog_info_init(void)
-{
- llog_key_init_generic(&llog_thread_key, NULL);
- lu_context_key_register(&llog_thread_key);
- return 0;
-}
-
-void llog_info_fini(void)
-{
- lu_context_key_degister(&llog_thread_key);
-}
diff --git a/drivers/staging/lustre/lustre/obdclass/llog_swab.c b/drivers/staging/lustre/lustre/obdclass/llog_swab.c
deleted file mode 100644
index a2d5aa105d6b..000000000000
--- a/drivers/staging/lustre/lustre/obdclass/llog_swab.c
+++ /dev/null
@@ -1,415 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * lustre/obdclass/llog_swab.c
- *
- * Swabbing of llog datatypes (from disk or over the wire).
- *
- * Author: jacob berkman <jacob@clusterfs.com>
- */
-
-#define DEBUG_SUBSYSTEM S_LOG
-
-
-#include "../include/lustre_log.h"
-
-static void print_llogd_body(struct llogd_body *d)
-{
- CDEBUG(D_OTHER, "llogd body: %p\n", d);
- CDEBUG(D_OTHER, "\tlgd_logid.lgl_oi: "DOSTID"\n",
- POSTID(&d->lgd_logid.lgl_oi));
- CDEBUG(D_OTHER, "\tlgd_logid.lgl_ogen: %#x\n", d->lgd_logid.lgl_ogen);
- CDEBUG(D_OTHER, "\tlgd_ctxt_idx: %#x\n", d->lgd_ctxt_idx);
- CDEBUG(D_OTHER, "\tlgd_llh_flags: %#x\n", d->lgd_llh_flags);
- CDEBUG(D_OTHER, "\tlgd_index: %#x\n", d->lgd_index);
- CDEBUG(D_OTHER, "\tlgd_saved_index: %#x\n", d->lgd_saved_index);
- CDEBUG(D_OTHER, "\tlgd_len: %#x\n", d->lgd_len);
- CDEBUG(D_OTHER, "\tlgd_cur_offset: %#llx\n", d->lgd_cur_offset);
-}
-
-void lustre_swab_lu_fid(struct lu_fid *fid)
-{
- __swab64s(&fid->f_seq);
- __swab32s(&fid->f_oid);
- __swab32s(&fid->f_ver);
-}
-EXPORT_SYMBOL(lustre_swab_lu_fid);
-
-void lustre_swab_ost_id(struct ost_id *oid)
-{
- if (fid_seq_is_mdt0(oid->oi.oi_seq)) {
- __swab64s(&oid->oi.oi_id);
- __swab64s(&oid->oi.oi_seq);
- } else {
- lustre_swab_lu_fid(&oid->oi_fid);
- }
-}
-EXPORT_SYMBOL(lustre_swab_ost_id);
-
-void lustre_swab_llog_id(struct llog_logid *log_id)
-{
- __swab64s(&log_id->lgl_oi.oi.oi_id);
- __swab64s(&log_id->lgl_oi.oi.oi_seq);
- __swab32s(&log_id->lgl_ogen);
-}
-EXPORT_SYMBOL(lustre_swab_llog_id);
-
-void lustre_swab_llogd_body(struct llogd_body *d)
-{
- print_llogd_body(d);
- lustre_swab_llog_id(&d->lgd_logid);
- __swab32s(&d->lgd_ctxt_idx);
- __swab32s(&d->lgd_llh_flags);
- __swab32s(&d->lgd_index);
- __swab32s(&d->lgd_saved_index);
- __swab32s(&d->lgd_len);
- __swab64s(&d->lgd_cur_offset);
- print_llogd_body(d);
-}
-EXPORT_SYMBOL(lustre_swab_llogd_body);
-
-void lustre_swab_llogd_conn_body(struct llogd_conn_body *d)
-{
- __swab64s(&d->lgdc_gen.mnt_cnt);
- __swab64s(&d->lgdc_gen.conn_cnt);
- lustre_swab_llog_id(&d->lgdc_logid);
- __swab32s(&d->lgdc_ctxt_idx);
-}
-EXPORT_SYMBOL(lustre_swab_llogd_conn_body);
-
-void lustre_swab_ll_fid(struct ll_fid *fid)
-{
- __swab64s(&fid->id);
- __swab32s(&fid->generation);
- __swab32s(&fid->f_type);
-}
-EXPORT_SYMBOL(lustre_swab_ll_fid);
-
-void lustre_swab_lu_seq_range(struct lu_seq_range *range)
-{
- __swab64s(&range->lsr_start);
- __swab64s(&range->lsr_end);
- __swab32s(&range->lsr_index);
- __swab32s(&range->lsr_flags);
-}
-EXPORT_SYMBOL(lustre_swab_lu_seq_range);
-
-void lustre_swab_llog_rec(struct llog_rec_hdr *rec)
-{
- struct llog_rec_tail *tail = NULL;
-
- __swab32s(&rec->lrh_len);
- __swab32s(&rec->lrh_index);
- __swab32s(&rec->lrh_type);
- __swab32s(&rec->lrh_id);
-
- switch (rec->lrh_type) {
- case OST_SZ_REC:
- {
- struct llog_size_change_rec *lsc =
- (struct llog_size_change_rec *)rec;
-
- lustre_swab_ll_fid(&lsc->lsc_fid);
- __swab32s(&lsc->lsc_ioepoch);
- tail = &lsc->lsc_tail;
- break;
- }
- case MDS_UNLINK_REC:
- {
- struct llog_unlink_rec *lur = (struct llog_unlink_rec *)rec;
-
- __swab64s(&lur->lur_oid);
- __swab32s(&lur->lur_oseq);
- __swab32s(&lur->lur_count);
- tail = &lur->lur_tail;
- break;
- }
- case MDS_UNLINK64_REC:
- {
- struct llog_unlink64_rec *lur =
- (struct llog_unlink64_rec *)rec;
-
- lustre_swab_lu_fid(&lur->lur_fid);
- __swab32s(&lur->lur_count);
- tail = &lur->lur_tail;
- break;
- }
- case CHANGELOG_REC:
- {
- struct llog_changelog_rec *cr =
- (struct llog_changelog_rec *)rec;
-
- __swab16s(&cr->cr.cr_namelen);
- __swab16s(&cr->cr.cr_flags);
- __swab32s(&cr->cr.cr_type);
- __swab64s(&cr->cr.cr_index);
- __swab64s(&cr->cr.cr_prev);
- __swab64s(&cr->cr.cr_time);
- lustre_swab_lu_fid(&cr->cr.cr_tfid);
- lustre_swab_lu_fid(&cr->cr.cr_pfid);
- if (CHANGELOG_REC_EXTENDED(&cr->cr)) {
- struct llog_changelog_ext_rec *ext =
- (struct llog_changelog_ext_rec *)rec;
-
- lustre_swab_lu_fid(&ext->cr.cr_sfid);
- lustre_swab_lu_fid(&ext->cr.cr_spfid);
- tail = &ext->cr_tail;
- } else {
- tail = &cr->cr_tail;
- }
- tail = (struct llog_rec_tail *)((char *)tail +
- cr->cr.cr_namelen);
- break;
- }
- case CHANGELOG_USER_REC:
- {
- struct llog_changelog_user_rec *cur =
- (struct llog_changelog_user_rec *)rec;
-
- __swab32s(&cur->cur_id);
- __swab64s(&cur->cur_endrec);
- tail = &cur->cur_tail;
- break;
- }
-
- case HSM_AGENT_REC: {
- struct llog_agent_req_rec *arr =
- (struct llog_agent_req_rec *)rec;
-
- __swab32s(&arr->arr_hai.hai_len);
- __swab32s(&arr->arr_hai.hai_action);
- lustre_swab_lu_fid(&arr->arr_hai.hai_fid);
- lustre_swab_lu_fid(&arr->arr_hai.hai_dfid);
- __swab64s(&arr->arr_hai.hai_cookie);
- __swab64s(&arr->arr_hai.hai_extent.offset);
- __swab64s(&arr->arr_hai.hai_extent.length);
- __swab64s(&arr->arr_hai.hai_gid);
- /* no swabing for opaque data */
- /* hai_data[0]; */
- break;
- }
-
- case MDS_SETATTR64_REC:
- {
- struct llog_setattr64_rec *lsr =
- (struct llog_setattr64_rec *)rec;
-
- lustre_swab_ost_id(&lsr->lsr_oi);
- __swab32s(&lsr->lsr_uid);
- __swab32s(&lsr->lsr_uid_h);
- __swab32s(&lsr->lsr_gid);
- __swab32s(&lsr->lsr_gid_h);
- tail = &lsr->lsr_tail;
- break;
- }
- case OBD_CFG_REC:
- /* these are swabbed as they are consumed */
- break;
- case LLOG_HDR_MAGIC:
- {
- struct llog_log_hdr *llh = (struct llog_log_hdr *)rec;
-
- __swab64s(&llh->llh_timestamp);
- __swab32s(&llh->llh_count);
- __swab32s(&llh->llh_bitmap_offset);
- __swab32s(&llh->llh_flags);
- __swab32s(&llh->llh_size);
- __swab32s(&llh->llh_cat_idx);
- tail = &llh->llh_tail;
- break;
- }
- case LLOG_LOGID_MAGIC:
- {
- struct llog_logid_rec *lid = (struct llog_logid_rec *)rec;
-
- lustre_swab_llog_id(&lid->lid_id);
- tail = &lid->lid_tail;
- break;
- }
- case LLOG_GEN_REC:
- {
- struct llog_gen_rec *lgr = (struct llog_gen_rec *)rec;
-
- __swab64s(&lgr->lgr_gen.mnt_cnt);
- __swab64s(&lgr->lgr_gen.conn_cnt);
- tail = &lgr->lgr_tail;
- break;
- }
- case LLOG_PAD_MAGIC:
- break;
- default:
- CERROR("Unknown llog rec type %#x swabbing rec %p\n",
- rec->lrh_type, rec);
- }
-
- if (tail) {
- __swab32s(&tail->lrt_len);
- __swab32s(&tail->lrt_index);
- }
-}
-EXPORT_SYMBOL(lustre_swab_llog_rec);
-
-static void print_llog_hdr(struct llog_log_hdr *h)
-{
- CDEBUG(D_OTHER, "llog header: %p\n", h);
- CDEBUG(D_OTHER, "\tllh_hdr.lrh_index: %#x\n", h->llh_hdr.lrh_index);
- CDEBUG(D_OTHER, "\tllh_hdr.lrh_len: %#x\n", h->llh_hdr.lrh_len);
- CDEBUG(D_OTHER, "\tllh_hdr.lrh_type: %#x\n", h->llh_hdr.lrh_type);
- CDEBUG(D_OTHER, "\tllh_timestamp: %#llx\n", h->llh_timestamp);
- CDEBUG(D_OTHER, "\tllh_count: %#x\n", h->llh_count);
- CDEBUG(D_OTHER, "\tllh_bitmap_offset: %#x\n", h->llh_bitmap_offset);
- CDEBUG(D_OTHER, "\tllh_flags: %#x\n", h->llh_flags);
- CDEBUG(D_OTHER, "\tllh_size: %#x\n", h->llh_size);
- CDEBUG(D_OTHER, "\tllh_cat_idx: %#x\n", h->llh_cat_idx);
- CDEBUG(D_OTHER, "\tllh_tail.lrt_index: %#x\n", h->llh_tail.lrt_index);
- CDEBUG(D_OTHER, "\tllh_tail.lrt_len: %#x\n", h->llh_tail.lrt_len);
-}
-
-void lustre_swab_llog_hdr(struct llog_log_hdr *h)
-{
- print_llog_hdr(h);
-
- lustre_swab_llog_rec(&h->llh_hdr);
-
- print_llog_hdr(h);
-}
-EXPORT_SYMBOL(lustre_swab_llog_hdr);
-
-static void print_lustre_cfg(struct lustre_cfg *lcfg)
-{
- int i;
-
- if (!(libcfs_debug & D_OTHER)) /* don't loop on nothing */
- return;
- CDEBUG(D_OTHER, "lustre_cfg: %p\n", lcfg);
- CDEBUG(D_OTHER, "\tlcfg->lcfg_version: %#x\n", lcfg->lcfg_version);
-
- CDEBUG(D_OTHER, "\tlcfg->lcfg_command: %#x\n", lcfg->lcfg_command);
- CDEBUG(D_OTHER, "\tlcfg->lcfg_num: %#x\n", lcfg->lcfg_num);
- CDEBUG(D_OTHER, "\tlcfg->lcfg_flags: %#x\n", lcfg->lcfg_flags);
- CDEBUG(D_OTHER, "\tlcfg->lcfg_nid: %s\n", libcfs_nid2str(lcfg->lcfg_nid));
-
- CDEBUG(D_OTHER, "\tlcfg->lcfg_bufcount: %d\n", lcfg->lcfg_bufcount);
- if (lcfg->lcfg_bufcount < LUSTRE_CFG_MAX_BUFCOUNT)
- for (i = 0; i < lcfg->lcfg_bufcount; i++)
- CDEBUG(D_OTHER, "\tlcfg->lcfg_buflens[%d]: %d\n",
- i, lcfg->lcfg_buflens[i]);
-}
-
-void lustre_swab_lustre_cfg(struct lustre_cfg *lcfg)
-{
- int i;
-
- __swab32s(&lcfg->lcfg_version);
-
- if (lcfg->lcfg_version != LUSTRE_CFG_VERSION) {
- CERROR("not swabbing lustre_cfg version %#x (expecting %#x)\n",
- lcfg->lcfg_version, LUSTRE_CFG_VERSION);
- return;
- }
-
- __swab32s(&lcfg->lcfg_command);
- __swab32s(&lcfg->lcfg_num);
- __swab32s(&lcfg->lcfg_flags);
- __swab64s(&lcfg->lcfg_nid);
- __swab32s(&lcfg->lcfg_bufcount);
- for (i = 0; i < lcfg->lcfg_bufcount && i < LUSTRE_CFG_MAX_BUFCOUNT; i++)
- __swab32s(&lcfg->lcfg_buflens[i]);
-
- print_lustre_cfg(lcfg);
- return;
-}
-EXPORT_SYMBOL(lustre_swab_lustre_cfg);
-
-/* used only for compatibility with old on-disk cfg_marker data */
-struct cfg_marker32 {
- __u32 cm_step;
- __u32 cm_flags;
- __u32 cm_vers;
- __u32 padding;
- __u32 cm_createtime;
- __u32 cm_canceltime;
- char cm_tgtname[MTI_NAME_MAXLEN];
- char cm_comment[MTI_NAME_MAXLEN];
-};
-
-#define MTI_NAMELEN32 (MTI_NAME_MAXLEN - \
- (sizeof(struct cfg_marker) - sizeof(struct cfg_marker32)))
-
-void lustre_swab_cfg_marker(struct cfg_marker *marker, int swab, int size)
-{
- struct cfg_marker32 *cm32 = (struct cfg_marker32 *)marker;
-
- if (swab) {
- __swab32s(&marker->cm_step);
- __swab32s(&marker->cm_flags);
- __swab32s(&marker->cm_vers);
- }
- if (size == sizeof(*cm32)) {
- __u32 createtime, canceltime;
- /* There was a problem with the original declaration of
- * cfg_marker on 32-bit systems because it used time_t as
- * a wire protocol structure, and didn't verify this in
- * wirecheck. We now have to convert the offsets of the
- * later fields in order to work on 32- and 64-bit systems.
- *
- * Fortunately, the cm_comment field has no functional use
- * so can be sacrificed when converting the timestamp size.
- *
- * Overwrite fields from the end first, so they are not
- * clobbered, and use memmove() instead of memcpy() because
- * the source and target buffers overlap. bug 16771 */
- createtime = cm32->cm_createtime;
- canceltime = cm32->cm_canceltime;
- memmove(marker->cm_comment, cm32->cm_comment, MTI_NAMELEN32);
- marker->cm_comment[MTI_NAMELEN32 - 1] = '\0';
- memmove(marker->cm_tgtname, cm32->cm_tgtname,
- sizeof(marker->cm_tgtname));
- if (swab) {
- __swab32s(&createtime);
- __swab32s(&canceltime);
- }
- marker->cm_createtime = createtime;
- marker->cm_canceltime = canceltime;
- CDEBUG(D_CONFIG, "Find old cfg_marker(Srv32b,Clt64b) for target %s, converting\n",
- marker->cm_tgtname);
- } else if (swab) {
- __swab64s(&marker->cm_createtime);
- __swab64s(&marker->cm_canceltime);
- }
-
- return;
-}
-EXPORT_SYMBOL(lustre_swab_cfg_marker);
diff --git a/drivers/staging/lustre/lustre/obdclass/lprocfs_counters.c b/drivers/staging/lustre/lustre/obdclass/lprocfs_counters.c
deleted file mode 100644
index 6acc4a10fde9..000000000000
--- a/drivers/staging/lustre/lustre/obdclass/lprocfs_counters.c
+++ /dev/null
@@ -1,130 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- *
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2012, 2013, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * lustre/obdclass/lprocfs_counters.c
- *
- * Lustre lprocfs counter routines
- *
- * Author: Andreas Dilger <andreas.dilger@intel.com>
- */
-
-#include <linux/module.h>
-#include "../include/lprocfs_status.h"
-#include "../include/obd_support.h"
-
-void lprocfs_counter_add(struct lprocfs_stats *stats, int idx, long amount)
-{
- struct lprocfs_counter *percpu_cntr;
- struct lprocfs_counter_header *header;
- int smp_id;
- unsigned long flags = 0;
-
- if (stats == NULL)
- return;
-
- LASSERTF(0 <= idx && idx < stats->ls_num,
- "idx %d, ls_num %hu\n", idx, stats->ls_num);
-
- /* With per-client stats, statistics are allocated only for
- * single CPU area, so the smp_id should be 0 always. */
- smp_id = lprocfs_stats_lock(stats, LPROCFS_GET_SMP_ID, &flags);
- if (smp_id < 0)
- return;
-
- header = &stats->ls_cnt_header[idx];
- percpu_cntr = lprocfs_stats_counter_get(stats, smp_id, idx);
- percpu_cntr->lc_count++;
-
- if (header->lc_config & LPROCFS_CNTR_AVGMINMAX) {
- /*
- * lprocfs_counter_add() can be called in interrupt context,
- * as memory allocation could trigger memory shrinker call
- * ldlm_pool_shrink(), which calls lprocfs_counter_add().
- * LU-1727.
- *
- */
- if (in_interrupt() &&
- (stats->ls_flags & LPROCFS_STATS_FLAG_IRQ_SAFE) != 0)
- percpu_cntr->lc_sum_irq += amount;
- else
- percpu_cntr->lc_sum += amount;
-
- if (header->lc_config & LPROCFS_CNTR_STDDEV)
- percpu_cntr->lc_sumsquare += (__s64)amount * amount;
- if (amount < percpu_cntr->lc_min)
- percpu_cntr->lc_min = amount;
- if (amount > percpu_cntr->lc_max)
- percpu_cntr->lc_max = amount;
- }
- lprocfs_stats_unlock(stats, LPROCFS_GET_SMP_ID, &flags);
-}
-EXPORT_SYMBOL(lprocfs_counter_add);
-
-void lprocfs_counter_sub(struct lprocfs_stats *stats, int idx, long amount)
-{
- struct lprocfs_counter *percpu_cntr;
- struct lprocfs_counter_header *header;
- int smp_id;
- unsigned long flags = 0;
-
- if (stats == NULL)
- return;
-
- LASSERTF(0 <= idx && idx < stats->ls_num,
- "idx %d, ls_num %hu\n", idx, stats->ls_num);
-
- /* With per-client stats, statistics are allocated only for
- * single CPU area, so the smp_id should be 0 always. */
- smp_id = lprocfs_stats_lock(stats, LPROCFS_GET_SMP_ID, &flags);
- if (smp_id < 0)
- return;
-
- header = &stats->ls_cnt_header[idx];
- percpu_cntr = lprocfs_stats_counter_get(stats, smp_id, idx);
- if (header->lc_config & LPROCFS_CNTR_AVGMINMAX) {
- /*
- * Sometimes we use RCU callbacks to free memory which calls
- * lprocfs_counter_sub(), and RCU callbacks may execute in
- * softirq context - right now that's the only case we're in
- * softirq context here, use separate counter for that.
- * bz20650.
- *
- */
- if (in_interrupt() &&
- (stats->ls_flags & LPROCFS_STATS_FLAG_IRQ_SAFE) != 0)
- percpu_cntr->lc_sum_irq -= amount;
- else
- percpu_cntr->lc_sum -= amount;
- }
- lprocfs_stats_unlock(stats, LPROCFS_GET_SMP_ID, &flags);
-}
-EXPORT_SYMBOL(lprocfs_counter_sub);
diff --git a/drivers/staging/lustre/lustre/obdclass/lprocfs_status.c b/drivers/staging/lustre/lustre/obdclass/lprocfs_status.c
deleted file mode 100644
index da4afe08ca62..000000000000
--- a/drivers/staging/lustre/lustre/obdclass/lprocfs_status.c
+++ /dev/null
@@ -1,1492 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * lustre/obdclass/lprocfs_status.c
- *
- * Author: Hariharan Thantry <thantry@users.sourceforge.net>
- */
-
-#define DEBUG_SUBSYSTEM S_CLASS
-
-
-#include "../include/obd_class.h"
-#include "../include/lprocfs_status.h"
-#include "../include/lustre/lustre_idl.h"
-#include <linux/seq_file.h>
-#include <linux/ctype.h>
-
-static const char * const obd_connect_names[] = {
- "read_only",
- "lov_index",
- "unused",
- "write_grant",
- "server_lock",
- "version",
- "request_portal",
- "acl",
- "xattr",
- "create_on_write",
- "truncate_lock",
- "initial_transno",
- "inode_bit_locks",
- "join_file(obsolete)",
- "getattr_by_fid",
- "no_oh_for_devices",
- "remote_client",
- "remote_client_by_force",
- "max_byte_per_rpc",
- "64bit_qdata",
- "mds_capability",
- "oss_capability",
- "early_lock_cancel",
- "som",
- "adaptive_timeouts",
- "lru_resize",
- "mds_mds_connection",
- "real_conn",
- "change_qunit_size",
- "alt_checksum_algorithm",
- "fid_is_enabled",
- "version_recovery",
- "pools",
- "grant_shrink",
- "skip_orphan",
- "large_ea",
- "full20",
- "layout_lock",
- "64bithash",
- "object_max_bytes",
- "imp_recov",
- "jobstats",
- "umask",
- "einprogress",
- "grant_param",
- "flock_owner",
- "lvb_type",
- "nanoseconds_times",
- "lightweight_conn",
- "short_io",
- "pingless",
- "flock_deadlock",
- "disp_stripe",
- "unknown",
- NULL
-};
-
-int obd_connect_flags2str(char *page, int count, __u64 flags, char *sep)
-{
- __u64 mask = 1;
- int i, ret = 0;
-
- for (i = 0; obd_connect_names[i] != NULL; i++, mask <<= 1) {
- if (flags & mask)
- ret += snprintf(page + ret, count - ret, "%s%s",
- ret ? sep : "", obd_connect_names[i]);
- }
- if (flags & ~(mask - 1))
- ret += snprintf(page + ret, count - ret,
- "%sunknown flags %#llx",
- ret ? sep : "", flags & ~(mask - 1));
- return ret;
-}
-EXPORT_SYMBOL(obd_connect_flags2str);
-
-int lprocfs_read_frac_helper(char *buffer, unsigned long count, long val,
- int mult)
-{
- long decimal_val, frac_val;
- int prtn;
-
- if (count < 10)
- return -EINVAL;
-
- decimal_val = val / mult;
- prtn = snprintf(buffer, count, "%ld", decimal_val);
- frac_val = val % mult;
-
- if (prtn < (count - 4) && frac_val > 0) {
- long temp_frac;
- int i, temp_mult = 1, frac_bits = 0;
-
- temp_frac = frac_val * 10;
- buffer[prtn++] = '.';
- while (frac_bits < 2 && (temp_frac / mult) < 1) {
- /* only reserved 2 bits fraction */
- buffer[prtn++] = '0';
- temp_frac *= 10;
- frac_bits++;
- }
- /*
- * Need to think these cases :
- * 1. #echo x.00 > /proc/xxx output result : x
- * 2. #echo x.0x > /proc/xxx output result : x.0x
- * 3. #echo x.x0 > /proc/xxx output result : x.x
- * 4. #echo x.xx > /proc/xxx output result : x.xx
- * Only reserved 2 bits fraction.
- */
- for (i = 0; i < (5 - prtn); i++)
- temp_mult *= 10;
-
- frac_bits = min((int)count - prtn, 3 - frac_bits);
- prtn += snprintf(buffer + prtn, frac_bits, "%ld",
- frac_val * temp_mult / mult);
-
- prtn--;
- while (buffer[prtn] < '1' || buffer[prtn] > '9') {
- prtn--;
- if (buffer[prtn] == '.') {
- prtn--;
- break;
- }
- }
- prtn++;
- }
- buffer[prtn++] = '\n';
- return prtn;
-}
-EXPORT_SYMBOL(lprocfs_read_frac_helper);
-
-int lprocfs_write_frac_helper(const char __user *buffer, unsigned long count,
- int *val, int mult)
-{
- char kernbuf[20], *end, *pbuf;
-
- if (count > (sizeof(kernbuf) - 1))
- return -EINVAL;
-
- if (copy_from_user(kernbuf, buffer, count))
- return -EFAULT;
-
- kernbuf[count] = '\0';
- pbuf = kernbuf;
- if (*pbuf == '-') {
- mult = -mult;
- pbuf++;
- }
-
- *val = (int)simple_strtoul(pbuf, &end, 10) * mult;
- if (pbuf == end)
- return -EINVAL;
-
- if (end != NULL && *end == '.') {
- int temp_val, pow = 1;
- int i;
-
- pbuf = end + 1;
- if (strlen(pbuf) > 5)
- pbuf[5] = '\0'; /*only allow 5bits fractional*/
-
- temp_val = (int)simple_strtoul(pbuf, &end, 10) * mult;
-
- if (pbuf < end) {
- for (i = 0; i < (end - pbuf); i++)
- pow *= 10;
-
- *val += temp_val / pow;
- }
- }
- return 0;
-}
-EXPORT_SYMBOL(lprocfs_write_frac_helper);
-
-static int lprocfs_no_percpu_stats;
-module_param(lprocfs_no_percpu_stats, int, 0644);
-MODULE_PARM_DESC(lprocfs_no_percpu_stats, "Do not alloc percpu data for lprocfs stats");
-
-#define MAX_STRING_SIZE 128
-
-int lprocfs_single_release(struct inode *inode, struct file *file)
-{
- return single_release(inode, file);
-}
-EXPORT_SYMBOL(lprocfs_single_release);
-
-int lprocfs_seq_release(struct inode *inode, struct file *file)
-{
- return seq_release(inode, file);
-}
-EXPORT_SYMBOL(lprocfs_seq_release);
-
-/* lprocfs API calls */
-
-struct dentry *ldebugfs_add_simple(struct dentry *root,
- char *name, void *data,
- struct file_operations *fops)
-{
- struct dentry *entry;
- umode_t mode = 0;
-
- if (root == NULL || name == NULL || fops == NULL)
- return ERR_PTR(-EINVAL);
-
- if (fops->read)
- mode = 0444;
- if (fops->write)
- mode |= 0200;
- entry = debugfs_create_file(name, mode, root, data, fops);
- if (IS_ERR_OR_NULL(entry)) {
- CERROR("LprocFS: No memory to create <debugfs> entry %s", name);
- return entry ?: ERR_PTR(-ENOMEM);
- }
- return entry;
-}
-EXPORT_SYMBOL(ldebugfs_add_simple);
-
-static struct file_operations lprocfs_generic_fops = { };
-
-int ldebugfs_add_vars(struct dentry *parent,
- struct lprocfs_vars *list,
- void *data)
-{
- if (IS_ERR_OR_NULL(parent) || IS_ERR_OR_NULL(list))
- return -EINVAL;
-
- while (list->name != NULL) {
- struct dentry *entry;
- umode_t mode = 0;
-
- if (list->proc_mode != 0000) {
- mode = list->proc_mode;
- } else if (list->fops) {
- if (list->fops->read)
- mode = 0444;
- if (list->fops->write)
- mode |= 0200;
- }
- entry = debugfs_create_file(list->name, mode, parent,
- list->data ?: data,
- list->fops ?: &lprocfs_generic_fops
- );
- if (IS_ERR_OR_NULL(entry))
- return entry ? PTR_ERR(entry) : -ENOMEM;
- list++;
- }
- return 0;
-}
-EXPORT_SYMBOL(ldebugfs_add_vars);
-
-void ldebugfs_remove(struct dentry **entryp)
-{
- debugfs_remove_recursive(*entryp);
- *entryp = NULL;
-}
-EXPORT_SYMBOL(ldebugfs_remove);
-
-struct dentry *ldebugfs_register(const char *name,
- struct dentry *parent,
- struct lprocfs_vars *list, void *data)
-{
- struct dentry *entry;
-
- entry = debugfs_create_dir(name, parent);
- if (IS_ERR_OR_NULL(entry)) {
- entry = entry ?: ERR_PTR(-ENOMEM);
- goto out;
- }
-
- if (!IS_ERR_OR_NULL(list)) {
- int rc;
-
- rc = ldebugfs_add_vars(entry, list, data);
- if (rc != 0) {
- debugfs_remove(entry);
- entry = ERR_PTR(rc);
- }
- }
-out:
- return entry;
-}
-EXPORT_SYMBOL(ldebugfs_register);
-
-/* Generic callbacks */
-int lprocfs_rd_uint(struct seq_file *m, void *data)
-{
- seq_printf(m, "%u\n", *(unsigned int *)data);
- return 0;
-}
-EXPORT_SYMBOL(lprocfs_rd_uint);
-
-int lprocfs_wr_uint(struct file *file, const char __user *buffer,
- unsigned long count, void *data)
-{
- unsigned *p = data;
- char dummy[MAX_STRING_SIZE + 1], *end;
- unsigned long tmp;
-
- dummy[MAX_STRING_SIZE] = '\0';
- if (copy_from_user(dummy, buffer, MAX_STRING_SIZE))
- return -EFAULT;
-
- tmp = simple_strtoul(dummy, &end, 0);
- if (dummy == end)
- return -EINVAL;
-
- *p = (unsigned int)tmp;
- return count;
-}
-EXPORT_SYMBOL(lprocfs_wr_uint);
-
-static ssize_t uuid_show(struct kobject *kobj, struct attribute *attr,
- char *buf)
-{
- struct obd_device *obd = container_of(kobj, struct obd_device,
- obd_kobj);
-
- return sprintf(buf, "%s\n", obd->obd_uuid.uuid);
-}
-LUSTRE_RO_ATTR(uuid);
-
-static ssize_t blocksize_show(struct kobject *kobj, struct attribute *attr,
- char *buf)
-{
- struct obd_device *obd = container_of(kobj, struct obd_device,
- obd_kobj);
- struct obd_statfs osfs;
- int rc = obd_statfs(NULL, obd->obd_self_export, &osfs,
- cfs_time_shift_64(-OBD_STATFS_CACHE_SECONDS),
- OBD_STATFS_NODELAY);
- if (!rc)
- return sprintf(buf, "%u\n", osfs.os_bsize);
-
- return rc;
-}
-LUSTRE_RO_ATTR(blocksize);
-
-static ssize_t kbytestotal_show(struct kobject *kobj, struct attribute *attr,
- char *buf)
-{
- struct obd_device *obd = container_of(kobj, struct obd_device,
- obd_kobj);
- struct obd_statfs osfs;
- int rc = obd_statfs(NULL, obd->obd_self_export, &osfs,
- cfs_time_shift_64(-OBD_STATFS_CACHE_SECONDS),
- OBD_STATFS_NODELAY);
- if (!rc) {
- __u32 blk_size = osfs.os_bsize >> 10;
- __u64 result = osfs.os_blocks;
-
- while (blk_size >>= 1)
- result <<= 1;
-
- return sprintf(buf, "%llu\n", result);
- }
-
- return rc;
-}
-LUSTRE_RO_ATTR(kbytestotal);
-
-static ssize_t kbytesfree_show(struct kobject *kobj, struct attribute *attr,
- char *buf)
-{
- struct obd_device *obd = container_of(kobj, struct obd_device,
- obd_kobj);
- struct obd_statfs osfs;
- int rc = obd_statfs(NULL, obd->obd_self_export, &osfs,
- cfs_time_shift_64(-OBD_STATFS_CACHE_SECONDS),
- OBD_STATFS_NODELAY);
- if (!rc) {
- __u32 blk_size = osfs.os_bsize >> 10;
- __u64 result = osfs.os_bfree;
-
- while (blk_size >>= 1)
- result <<= 1;
-
- return sprintf(buf, "%llu\n", result);
- }
-
- return rc;
-}
-LUSTRE_RO_ATTR(kbytesfree);
-
-static ssize_t kbytesavail_show(struct kobject *kobj, struct attribute *attr,
- char *buf)
-{
- struct obd_device *obd = container_of(kobj, struct obd_device,
- obd_kobj);
- struct obd_statfs osfs;
- int rc = obd_statfs(NULL, obd->obd_self_export, &osfs,
- cfs_time_shift_64(-OBD_STATFS_CACHE_SECONDS),
- OBD_STATFS_NODELAY);
- if (!rc) {
- __u32 blk_size = osfs.os_bsize >> 10;
- __u64 result = osfs.os_bavail;
-
- while (blk_size >>= 1)
- result <<= 1;
-
- return sprintf(buf, "%llu\n", result);
- }
-
- return rc;
-}
-LUSTRE_RO_ATTR(kbytesavail);
-
-static ssize_t filestotal_show(struct kobject *kobj, struct attribute *attr,
- char *buf)
-{
- struct obd_device *obd = container_of(kobj, struct obd_device,
- obd_kobj);
- struct obd_statfs osfs;
- int rc = obd_statfs(NULL, obd->obd_self_export, &osfs,
- cfs_time_shift_64(-OBD_STATFS_CACHE_SECONDS),
- OBD_STATFS_NODELAY);
- if (!rc)
- return sprintf(buf, "%llu\n", osfs.os_files);
-
- return rc;
-}
-LUSTRE_RO_ATTR(filestotal);
-
-static ssize_t filesfree_show(struct kobject *kobj, struct attribute *attr,
- char *buf)
-{
- struct obd_device *obd = container_of(kobj, struct obd_device,
- obd_kobj);
- struct obd_statfs osfs;
- int rc = obd_statfs(NULL, obd->obd_self_export, &osfs,
- cfs_time_shift_64(-OBD_STATFS_CACHE_SECONDS),
- OBD_STATFS_NODELAY);
- if (!rc)
- return sprintf(buf, "%llu\n", osfs.os_ffree);
-
- return rc;
-}
-LUSTRE_RO_ATTR(filesfree);
-
-int lprocfs_rd_server_uuid(struct seq_file *m, void *data)
-{
- struct obd_device *obd = data;
- struct obd_import *imp;
- char *imp_state_name = NULL;
-
- LASSERT(obd != NULL);
- LPROCFS_CLIMP_CHECK(obd);
- imp = obd->u.cli.cl_import;
- imp_state_name = ptlrpc_import_state_name(imp->imp_state);
- seq_printf(m, "%s\t%s%s\n",
- obd2cli_tgt(obd), imp_state_name,
- imp->imp_deactive ? "\tDEACTIVATED" : "");
-
- LPROCFS_CLIMP_EXIT(obd);
-
- return 0;
-}
-EXPORT_SYMBOL(lprocfs_rd_server_uuid);
-
-int lprocfs_rd_conn_uuid(struct seq_file *m, void *data)
-{
- struct obd_device *obd = data;
- struct ptlrpc_connection *conn;
-
- LASSERT(obd != NULL);
-
- LPROCFS_CLIMP_CHECK(obd);
- conn = obd->u.cli.cl_import->imp_connection;
- if (conn && obd->u.cli.cl_import)
- seq_printf(m, "%s\n", conn->c_remote_uuid.uuid);
- else
- seq_puts(m, "<none>\n");
-
- LPROCFS_CLIMP_EXIT(obd);
-
- return 0;
-}
-EXPORT_SYMBOL(lprocfs_rd_conn_uuid);
-
-/** add up per-cpu counters */
-void lprocfs_stats_collect(struct lprocfs_stats *stats, int idx,
- struct lprocfs_counter *cnt)
-{
- unsigned int num_entry;
- struct lprocfs_counter *percpu_cntr;
- int i;
- unsigned long flags = 0;
-
- memset(cnt, 0, sizeof(*cnt));
-
- if (stats == NULL) {
- /* set count to 1 to avoid divide-by-zero errs in callers */
- cnt->lc_count = 1;
- return;
- }
-
- cnt->lc_min = LC_MIN_INIT;
-
- num_entry = lprocfs_stats_lock(stats, LPROCFS_GET_NUM_CPU, &flags);
-
- for (i = 0; i < num_entry; i++) {
- if (stats->ls_percpu[i] == NULL)
- continue;
- percpu_cntr = lprocfs_stats_counter_get(stats, i, idx);
-
- cnt->lc_count += percpu_cntr->lc_count;
- cnt->lc_sum += percpu_cntr->lc_sum;
- if (percpu_cntr->lc_min < cnt->lc_min)
- cnt->lc_min = percpu_cntr->lc_min;
- if (percpu_cntr->lc_max > cnt->lc_max)
- cnt->lc_max = percpu_cntr->lc_max;
- cnt->lc_sumsquare += percpu_cntr->lc_sumsquare;
- }
-
- lprocfs_stats_unlock(stats, LPROCFS_GET_NUM_CPU, &flags);
-}
-EXPORT_SYMBOL(lprocfs_stats_collect);
-
-/**
- * Append a space separated list of current set flags to str.
- */
-#define flag2str(flag, first) \
- do { \
- if (imp->imp_##flag) \
- seq_printf(m, "%s" #flag, first ? "" : ", "); \
- } while (0)
-static int obd_import_flags2str(struct obd_import *imp, struct seq_file *m)
-{
- bool first = true;
-
- if (imp->imp_obd->obd_no_recov) {
- seq_printf(m, "no_recov");
- first = false;
- }
-
- flag2str(invalid, first);
- first = false;
- flag2str(deactive, first);
- flag2str(replayable, first);
- flag2str(pingable, first);
- return 0;
-}
-#undef flags2str
-
-static void obd_connect_seq_flags2str(struct seq_file *m, __u64 flags, char *sep)
-{
- __u64 mask = 1;
- int i;
- bool first = true;
-
- for (i = 0; obd_connect_names[i] != NULL; i++, mask <<= 1) {
- if (flags & mask) {
- seq_printf(m, "%s%s",
- first ? sep : "", obd_connect_names[i]);
- first = false;
- }
- }
- if (flags & ~(mask - 1))
- seq_printf(m, "%sunknown flags %#llx",
- first ? sep : "", flags & ~(mask - 1));
-}
-
-int lprocfs_rd_import(struct seq_file *m, void *data)
-{
- struct lprocfs_counter ret;
- struct lprocfs_counter_header *header;
- struct obd_device *obd = (struct obd_device *)data;
- struct obd_import *imp;
- struct obd_import_conn *conn;
- int j;
- int k;
- int rw = 0;
-
- LASSERT(obd != NULL);
- LPROCFS_CLIMP_CHECK(obd);
- imp = obd->u.cli.cl_import;
-
- seq_printf(m,
- "import:\n"
- " name: %s\n"
- " target: %s\n"
- " state: %s\n"
- " instance: %u\n"
- " connect_flags: [",
- obd->obd_name,
- obd2cli_tgt(obd),
- ptlrpc_import_state_name(imp->imp_state),
- imp->imp_connect_data.ocd_instance);
- obd_connect_seq_flags2str(m, imp->imp_connect_data.ocd_connect_flags, ", ");
- seq_printf(m,
- "]\n"
- " import_flags: [");
- obd_import_flags2str(imp, m);
-
- seq_printf(m,
- "]\n"
- " connection:\n"
- " failover_nids: [");
- spin_lock(&imp->imp_lock);
- j = 0;
- list_for_each_entry(conn, &imp->imp_conn_list, oic_item) {
- seq_printf(m, "%s%s", j ? ", " : "",
- libcfs_nid2str(conn->oic_conn->c_peer.nid));
- j++;
- }
- seq_printf(m,
- "]\n"
- " current_connection: %s\n"
- " connection_attempts: %u\n"
- " generation: %u\n"
- " in-progress_invalidations: %u\n",
- imp->imp_connection == NULL ? "<none>" :
- libcfs_nid2str(imp->imp_connection->c_peer.nid),
- imp->imp_conn_cnt,
- imp->imp_generation,
- atomic_read(&imp->imp_inval_count));
- spin_unlock(&imp->imp_lock);
-
- if (obd->obd_svc_stats == NULL)
- goto out_climp;
-
- header = &obd->obd_svc_stats->ls_cnt_header[PTLRPC_REQWAIT_CNTR];
- lprocfs_stats_collect(obd->obd_svc_stats, PTLRPC_REQWAIT_CNTR, &ret);
- if (ret.lc_count != 0) {
- /* first argument to do_div MUST be __u64 */
- __u64 sum = ret.lc_sum;
- do_div(sum, ret.lc_count);
- ret.lc_sum = sum;
- } else
- ret.lc_sum = 0;
- seq_printf(m,
- " rpcs:\n"
- " inflight: %u\n"
- " unregistering: %u\n"
- " timeouts: %u\n"
- " avg_waittime: %llu %s\n",
- atomic_read(&imp->imp_inflight),
- atomic_read(&imp->imp_unregistering),
- atomic_read(&imp->imp_timeouts),
- ret.lc_sum, header->lc_units);
-
- k = 0;
- for (j = 0; j < IMP_AT_MAX_PORTALS; j++) {
- if (imp->imp_at.iat_portal[j] == 0)
- break;
- k = max_t(unsigned int, k,
- at_get(&imp->imp_at.iat_service_estimate[j]));
- }
- seq_printf(m,
- " service_estimates:\n"
- " services: %u sec\n"
- " network: %u sec\n",
- k,
- at_get(&imp->imp_at.iat_net_latency));
-
- seq_printf(m,
- " transactions:\n"
- " last_replay: %llu\n"
- " peer_committed: %llu\n"
- " last_checked: %llu\n",
- imp->imp_last_replay_transno,
- imp->imp_peer_committed_transno,
- imp->imp_last_transno_checked);
-
- /* avg data rates */
- for (rw = 0; rw <= 1; rw++) {
- lprocfs_stats_collect(obd->obd_svc_stats,
- PTLRPC_LAST_CNTR + BRW_READ_BYTES + rw,
- &ret);
- if (ret.lc_sum > 0 && ret.lc_count > 0) {
- /* first argument to do_div MUST be __u64 */
- __u64 sum = ret.lc_sum;
- do_div(sum, ret.lc_count);
- ret.lc_sum = sum;
- seq_printf(m,
- " %s_data_averages:\n"
- " bytes_per_rpc: %llu\n",
- rw ? "write" : "read",
- ret.lc_sum);
- }
- k = (int)ret.lc_sum;
- j = opcode_offset(OST_READ + rw) + EXTRA_MAX_OPCODES;
- header = &obd->obd_svc_stats->ls_cnt_header[j];
- lprocfs_stats_collect(obd->obd_svc_stats, j, &ret);
- if (ret.lc_sum > 0 && ret.lc_count != 0) {
- /* first argument to do_div MUST be __u64 */
- __u64 sum = ret.lc_sum;
- do_div(sum, ret.lc_count);
- ret.lc_sum = sum;
- seq_printf(m,
- " %s_per_rpc: %llu\n",
- header->lc_units, ret.lc_sum);
- j = (int)ret.lc_sum;
- if (j > 0)
- seq_printf(m,
- " MB_per_sec: %u.%.02u\n",
- k / j, (100 * k / j) % 100);
- }
- }
-
-out_climp:
- LPROCFS_CLIMP_EXIT(obd);
- return 0;
-}
-EXPORT_SYMBOL(lprocfs_rd_import);
-
-int lprocfs_rd_state(struct seq_file *m, void *data)
-{
- struct obd_device *obd = (struct obd_device *)data;
- struct obd_import *imp;
- int j, k;
-
- LASSERT(obd != NULL);
- LPROCFS_CLIMP_CHECK(obd);
- imp = obd->u.cli.cl_import;
-
- seq_printf(m, "current_state: %s\n",
- ptlrpc_import_state_name(imp->imp_state));
- seq_printf(m, "state_history:\n");
- k = imp->imp_state_hist_idx;
- for (j = 0; j < IMP_STATE_HIST_LEN; j++) {
- struct import_state_hist *ish =
- &imp->imp_state_hist[(k + j) % IMP_STATE_HIST_LEN];
- if (ish->ish_state == 0)
- continue;
- seq_printf(m, " - [%lld, %s]\n", (s64)ish->ish_time,
- ptlrpc_import_state_name(ish->ish_state));
- }
-
- LPROCFS_CLIMP_EXIT(obd);
- return 0;
-}
-EXPORT_SYMBOL(lprocfs_rd_state);
-
-int lprocfs_at_hist_helper(struct seq_file *m, struct adaptive_timeout *at)
-{
- int i;
- for (i = 0; i < AT_BINS; i++)
- seq_printf(m, "%3u ", at->at_hist[i]);
- seq_printf(m, "\n");
- return 0;
-}
-EXPORT_SYMBOL(lprocfs_at_hist_helper);
-
-/* See also ptlrpc_lprocfs_rd_timeouts */
-int lprocfs_rd_timeouts(struct seq_file *m, void *data)
-{
- struct obd_device *obd = (struct obd_device *)data;
- struct obd_import *imp;
- unsigned int cur, worst;
- time64_t now, worstt;
- struct dhms ts;
- int i;
-
- LASSERT(obd != NULL);
- LPROCFS_CLIMP_CHECK(obd);
- imp = obd->u.cli.cl_import;
-
- now = ktime_get_real_seconds();
-
- /* Some network health info for kicks */
- s2dhms(&ts, now - imp->imp_last_reply_time);
- seq_printf(m, "%-10s : %lld, " DHMS_FMT " ago\n",
- "last reply", (s64)imp->imp_last_reply_time, DHMS_VARS(&ts));
-
- cur = at_get(&imp->imp_at.iat_net_latency);
- worst = imp->imp_at.iat_net_latency.at_worst_ever;
- worstt = imp->imp_at.iat_net_latency.at_worst_time;
- s2dhms(&ts, now - worstt);
- seq_printf(m, "%-10s : cur %3u worst %3u (at %lld, " DHMS_FMT " ago) ",
- "network", cur, worst, (s64)worstt, DHMS_VARS(&ts));
- lprocfs_at_hist_helper(m, &imp->imp_at.iat_net_latency);
-
- for (i = 0; i < IMP_AT_MAX_PORTALS; i++) {
- if (imp->imp_at.iat_portal[i] == 0)
- break;
- cur = at_get(&imp->imp_at.iat_service_estimate[i]);
- worst = imp->imp_at.iat_service_estimate[i].at_worst_ever;
- worstt = imp->imp_at.iat_service_estimate[i].at_worst_time;
- s2dhms(&ts, now - worstt);
- seq_printf(m, "portal %-2d : cur %3u worst %3u (at %lld, "
- DHMS_FMT " ago) ", imp->imp_at.iat_portal[i],
- cur, worst, (s64)worstt, DHMS_VARS(&ts));
- lprocfs_at_hist_helper(m, &imp->imp_at.iat_service_estimate[i]);
- }
-
- LPROCFS_CLIMP_EXIT(obd);
- return 0;
-}
-EXPORT_SYMBOL(lprocfs_rd_timeouts);
-
-int lprocfs_rd_connect_flags(struct seq_file *m, void *data)
-{
- struct obd_device *obd = data;
- __u64 flags;
-
- LPROCFS_CLIMP_CHECK(obd);
- flags = obd->u.cli.cl_import->imp_connect_data.ocd_connect_flags;
- seq_printf(m, "flags=%#llx\n", flags);
- obd_connect_seq_flags2str(m, flags, "\n");
- seq_printf(m, "\n");
- LPROCFS_CLIMP_EXIT(obd);
- return 0;
-}
-EXPORT_SYMBOL(lprocfs_rd_connect_flags);
-
-static struct attribute *obd_def_attrs[] = {
- &lustre_attr_blocksize.attr,
- &lustre_attr_kbytestotal.attr,
- &lustre_attr_kbytesfree.attr,
- &lustre_attr_kbytesavail.attr,
- &lustre_attr_filestotal.attr,
- &lustre_attr_filesfree.attr,
- &lustre_attr_uuid.attr,
- NULL,
-};
-
-static void obd_sysfs_release(struct kobject *kobj)
-{
- struct obd_device *obd = container_of(kobj, struct obd_device,
- obd_kobj);
-
- complete(&obd->obd_kobj_unregister);
-}
-
-static struct kobj_type obd_ktype = {
- .default_attrs = obd_def_attrs,
- .sysfs_ops = &lustre_sysfs_ops,
- .release = obd_sysfs_release,
-};
-
-int lprocfs_obd_setup(struct obd_device *obd, struct lprocfs_vars *list,
- struct attribute_group *attrs)
-{
- int rc = 0;
-
- init_completion(&obd->obd_kobj_unregister);
- rc = kobject_init_and_add(&obd->obd_kobj, &obd_ktype,
- obd->obd_type->typ_kobj,
- "%s", obd->obd_name);
- if (rc)
- return rc;
-
- if (attrs) {
- rc = sysfs_create_group(&obd->obd_kobj, attrs);
- if (rc) {
- kobject_put(&obd->obd_kobj);
- return rc;
- }
- }
-
- obd->obd_debugfs_entry = ldebugfs_register(obd->obd_name,
- obd->obd_type->typ_debugfs_entry,
- list, obd);
- if (IS_ERR_OR_NULL(obd->obd_debugfs_entry)) {
- rc = obd->obd_debugfs_entry ? PTR_ERR(obd->obd_debugfs_entry)
- : -ENOMEM;
- CERROR("error %d setting up lprocfs for %s\n",
- rc, obd->obd_name);
- obd->obd_debugfs_entry = NULL;
- }
-
- return rc;
-}
-EXPORT_SYMBOL(lprocfs_obd_setup);
-
-int lprocfs_obd_cleanup(struct obd_device *obd)
-{
- if (!obd)
- return -EINVAL;
-
- if (!IS_ERR_OR_NULL(obd->obd_debugfs_entry))
- ldebugfs_remove(&obd->obd_debugfs_entry);
-
- kobject_put(&obd->obd_kobj);
- wait_for_completion(&obd->obd_kobj_unregister);
-
- return 0;
-}
-EXPORT_SYMBOL(lprocfs_obd_cleanup);
-
-int lprocfs_stats_alloc_one(struct lprocfs_stats *stats, unsigned int cpuid)
-{
- struct lprocfs_counter *cntr;
- unsigned int percpusize;
- int rc = -ENOMEM;
- unsigned long flags = 0;
- int i;
-
- LASSERT(stats->ls_percpu[cpuid] == NULL);
- LASSERT((stats->ls_flags & LPROCFS_STATS_FLAG_NOPERCPU) == 0);
-
- percpusize = lprocfs_stats_counter_size(stats);
- LIBCFS_ALLOC_ATOMIC(stats->ls_percpu[cpuid], percpusize);
- if (stats->ls_percpu[cpuid] != NULL) {
- rc = 0;
- if (unlikely(stats->ls_biggest_alloc_num <= cpuid)) {
- if (stats->ls_flags & LPROCFS_STATS_FLAG_IRQ_SAFE)
- spin_lock_irqsave(&stats->ls_lock, flags);
- else
- spin_lock(&stats->ls_lock);
- if (stats->ls_biggest_alloc_num <= cpuid)
- stats->ls_biggest_alloc_num = cpuid + 1;
- if (stats->ls_flags & LPROCFS_STATS_FLAG_IRQ_SAFE)
- spin_unlock_irqrestore(&stats->ls_lock, flags);
- else
- spin_unlock(&stats->ls_lock);
- }
- /* initialize the ls_percpu[cpuid] non-zero counter */
- for (i = 0; i < stats->ls_num; ++i) {
- cntr = lprocfs_stats_counter_get(stats, cpuid, i);
- cntr->lc_min = LC_MIN_INIT;
- }
- }
- return rc;
-}
-EXPORT_SYMBOL(lprocfs_stats_alloc_one);
-
-struct lprocfs_stats *lprocfs_alloc_stats(unsigned int num,
- enum lprocfs_stats_flags flags)
-{
- struct lprocfs_stats *stats;
- unsigned int num_entry;
- unsigned int percpusize = 0;
- int i;
-
- if (num == 0)
- return NULL;
-
- if (lprocfs_no_percpu_stats != 0)
- flags |= LPROCFS_STATS_FLAG_NOPERCPU;
-
- if (flags & LPROCFS_STATS_FLAG_NOPERCPU)
- num_entry = 1;
- else
- num_entry = num_possible_cpus();
-
- /* alloc percpu pointers for all possible cpu slots */
- LIBCFS_ALLOC(stats, offsetof(typeof(*stats), ls_percpu[num_entry]));
- if (stats == NULL)
- return NULL;
-
- stats->ls_num = num;
- stats->ls_flags = flags;
- spin_lock_init(&stats->ls_lock);
-
- /* alloc num of counter headers */
- LIBCFS_ALLOC(stats->ls_cnt_header,
- stats->ls_num * sizeof(struct lprocfs_counter_header));
- if (stats->ls_cnt_header == NULL)
- goto fail;
-
- if ((flags & LPROCFS_STATS_FLAG_NOPERCPU) != 0) {
- /* contains only one set counters */
- percpusize = lprocfs_stats_counter_size(stats);
- LIBCFS_ALLOC_ATOMIC(stats->ls_percpu[0], percpusize);
- if (stats->ls_percpu[0] == NULL)
- goto fail;
- stats->ls_biggest_alloc_num = 1;
- } else if ((flags & LPROCFS_STATS_FLAG_IRQ_SAFE) != 0) {
- /* alloc all percpu data */
- for (i = 0; i < num_entry; ++i)
- if (lprocfs_stats_alloc_one(stats, i) < 0)
- goto fail;
- }
-
- return stats;
-
-fail:
- lprocfs_free_stats(&stats);
- return NULL;
-}
-EXPORT_SYMBOL(lprocfs_alloc_stats);
-
-void lprocfs_free_stats(struct lprocfs_stats **statsh)
-{
- struct lprocfs_stats *stats = *statsh;
- unsigned int num_entry;
- unsigned int percpusize;
- unsigned int i;
-
- if (stats == NULL || stats->ls_num == 0)
- return;
- *statsh = NULL;
-
- if (stats->ls_flags & LPROCFS_STATS_FLAG_NOPERCPU)
- num_entry = 1;
- else
- num_entry = num_possible_cpus();
-
- percpusize = lprocfs_stats_counter_size(stats);
- for (i = 0; i < num_entry; i++)
- if (stats->ls_percpu[i] != NULL)
- LIBCFS_FREE(stats->ls_percpu[i], percpusize);
- if (stats->ls_cnt_header != NULL)
- LIBCFS_FREE(stats->ls_cnt_header, stats->ls_num *
- sizeof(struct lprocfs_counter_header));
- LIBCFS_FREE(stats, offsetof(typeof(*stats), ls_percpu[num_entry]));
-}
-EXPORT_SYMBOL(lprocfs_free_stats);
-
-void lprocfs_clear_stats(struct lprocfs_stats *stats)
-{
- struct lprocfs_counter *percpu_cntr;
- int i;
- int j;
- unsigned int num_entry;
- unsigned long flags = 0;
-
- num_entry = lprocfs_stats_lock(stats, LPROCFS_GET_NUM_CPU, &flags);
-
- for (i = 0; i < num_entry; i++) {
- if (stats->ls_percpu[i] == NULL)
- continue;
- for (j = 0; j < stats->ls_num; j++) {
- percpu_cntr = lprocfs_stats_counter_get(stats, i, j);
- percpu_cntr->lc_count = 0;
- percpu_cntr->lc_min = LC_MIN_INIT;
- percpu_cntr->lc_max = 0;
- percpu_cntr->lc_sumsquare = 0;
- percpu_cntr->lc_sum = 0;
- if (stats->ls_flags & LPROCFS_STATS_FLAG_IRQ_SAFE)
- percpu_cntr->lc_sum_irq = 0;
- }
- }
-
- lprocfs_stats_unlock(stats, LPROCFS_GET_NUM_CPU, &flags);
-}
-EXPORT_SYMBOL(lprocfs_clear_stats);
-
-static ssize_t lprocfs_stats_seq_write(struct file *file,
- const char __user *buf,
- size_t len, loff_t *off)
-{
- struct seq_file *seq = file->private_data;
- struct lprocfs_stats *stats = seq->private;
-
- lprocfs_clear_stats(stats);
-
- return len;
-}
-
-static void *lprocfs_stats_seq_start(struct seq_file *p, loff_t *pos)
-{
- struct lprocfs_stats *stats = p->private;
-
- return (*pos < stats->ls_num) ? pos : NULL;
-}
-
-static void lprocfs_stats_seq_stop(struct seq_file *p, void *v)
-{
-}
-
-static void *lprocfs_stats_seq_next(struct seq_file *p, void *v, loff_t *pos)
-{
- (*pos)++;
- return lprocfs_stats_seq_start(p, pos);
-}
-
-/* seq file export of one lprocfs counter */
-static int lprocfs_stats_seq_show(struct seq_file *p, void *v)
-{
- struct lprocfs_stats *stats = p->private;
- struct lprocfs_counter_header *hdr;
- struct lprocfs_counter ctr;
- int idx = *(loff_t *)v;
-
- if (idx == 0) {
- struct timespec64 now;
-
- ktime_get_real_ts64(&now);
- seq_printf(p, "%-25s %llu.%9lu secs.usecs\n",
- "snapshot_time",
- (s64)now.tv_sec, (unsigned long)now.tv_nsec);
- }
-
- hdr = &stats->ls_cnt_header[idx];
- lprocfs_stats_collect(stats, idx, &ctr);
-
- if (ctr.lc_count != 0) {
- seq_printf(p, "%-25s %lld samples [%s]",
- hdr->lc_name, ctr.lc_count, hdr->lc_units);
-
- if ((hdr->lc_config & LPROCFS_CNTR_AVGMINMAX) &&
- (ctr.lc_count > 0)) {
- seq_printf(p, " %lld %lld %lld",
- ctr.lc_min, ctr.lc_max, ctr.lc_sum);
- if (hdr->lc_config & LPROCFS_CNTR_STDDEV)
- seq_printf(p, " %lld", ctr.lc_sumsquare);
- }
- seq_putc(p, '\n');
- }
-
- return 0;
-}
-
-static const struct seq_operations lprocfs_stats_seq_sops = {
- .start = lprocfs_stats_seq_start,
- .stop = lprocfs_stats_seq_stop,
- .next = lprocfs_stats_seq_next,
- .show = lprocfs_stats_seq_show,
-};
-
-static int lprocfs_stats_seq_open(struct inode *inode, struct file *file)
-{
- struct seq_file *seq;
- int rc;
-
- rc = seq_open(file, &lprocfs_stats_seq_sops);
- if (rc)
- return rc;
-
- seq = file->private_data;
- seq->private = inode->i_private;
-
- return 0;
-}
-
-struct file_operations lprocfs_stats_seq_fops = {
- .owner = THIS_MODULE,
- .open = lprocfs_stats_seq_open,
- .read = seq_read,
- .write = lprocfs_stats_seq_write,
- .llseek = seq_lseek,
- .release = lprocfs_seq_release,
-};
-
-int ldebugfs_register_stats(struct dentry *parent, const char *name,
- struct lprocfs_stats *stats)
-{
- struct dentry *entry;
-
- LASSERT(!IS_ERR_OR_NULL(parent));
-
- entry = debugfs_create_file(name, 0644, parent, stats,
- &lprocfs_stats_seq_fops);
- if (IS_ERR_OR_NULL(entry))
- return entry ? PTR_ERR(entry) : -ENOMEM;
-
- return 0;
-}
-EXPORT_SYMBOL(ldebugfs_register_stats);
-
-void lprocfs_counter_init(struct lprocfs_stats *stats, int index,
- unsigned conf, const char *name, const char *units)
-{
- struct lprocfs_counter_header *header;
- struct lprocfs_counter *percpu_cntr;
- unsigned long flags = 0;
- unsigned int i;
- unsigned int num_cpu;
-
- LASSERT(stats != NULL);
-
- header = &stats->ls_cnt_header[index];
- LASSERTF(header != NULL, "Failed to allocate stats header:[%d]%s/%s\n",
- index, name, units);
-
- header->lc_config = conf;
- header->lc_name = name;
- header->lc_units = units;
-
- num_cpu = lprocfs_stats_lock(stats, LPROCFS_GET_NUM_CPU, &flags);
- for (i = 0; i < num_cpu; ++i) {
- if (stats->ls_percpu[i] == NULL)
- continue;
- percpu_cntr = lprocfs_stats_counter_get(stats, i, index);
- percpu_cntr->lc_count = 0;
- percpu_cntr->lc_min = LC_MIN_INIT;
- percpu_cntr->lc_max = 0;
- percpu_cntr->lc_sumsquare = 0;
- percpu_cntr->lc_sum = 0;
- if ((stats->ls_flags & LPROCFS_STATS_FLAG_IRQ_SAFE) != 0)
- percpu_cntr->lc_sum_irq = 0;
- }
- lprocfs_stats_unlock(stats, LPROCFS_GET_NUM_CPU, &flags);
-}
-EXPORT_SYMBOL(lprocfs_counter_init);
-
-int lprocfs_exp_cleanup(struct obd_export *exp)
-{
- return 0;
-}
-EXPORT_SYMBOL(lprocfs_exp_cleanup);
-
-__s64 lprocfs_read_helper(struct lprocfs_counter *lc,
- struct lprocfs_counter_header *header,
- enum lprocfs_stats_flags flags,
- enum lprocfs_fields_flags field)
-{
- __s64 ret = 0;
-
- if (lc == NULL || header == NULL)
- return 0;
-
- switch (field) {
- case LPROCFS_FIELDS_FLAGS_CONFIG:
- ret = header->lc_config;
- break;
- case LPROCFS_FIELDS_FLAGS_SUM:
- ret = lc->lc_sum;
- if ((flags & LPROCFS_STATS_FLAG_IRQ_SAFE) != 0)
- ret += lc->lc_sum_irq;
- break;
- case LPROCFS_FIELDS_FLAGS_MIN:
- ret = lc->lc_min;
- break;
- case LPROCFS_FIELDS_FLAGS_MAX:
- ret = lc->lc_max;
- break;
- case LPROCFS_FIELDS_FLAGS_AVG:
- ret = (lc->lc_max - lc->lc_min) / 2;
- break;
- case LPROCFS_FIELDS_FLAGS_SUMSQUARE:
- ret = lc->lc_sumsquare;
- break;
- case LPROCFS_FIELDS_FLAGS_COUNT:
- ret = lc->lc_count;
- break;
- default:
- break;
- }
-
- return 0;
-}
-EXPORT_SYMBOL(lprocfs_read_helper);
-
-int lprocfs_write_helper(const char __user *buffer, unsigned long count,
- int *val)
-{
- return lprocfs_write_frac_helper(buffer, count, val, 1);
-}
-EXPORT_SYMBOL(lprocfs_write_helper);
-
-int lprocfs_write_u64_helper(const char __user *buffer, unsigned long count,
- __u64 *val)
-{
- return lprocfs_write_frac_u64_helper(buffer, count, val, 1);
-}
-EXPORT_SYMBOL(lprocfs_write_u64_helper);
-
-int lprocfs_write_frac_u64_helper(const char *buffer, unsigned long count,
- __u64 *val, int mult)
-{
- char kernbuf[22], *end, *pbuf;
- __u64 whole, frac = 0, units;
- unsigned frac_d = 1;
- int sign = 1;
-
- if (count > (sizeof(kernbuf) - 1))
- return -EINVAL;
-
- if (copy_from_user(kernbuf, buffer, count))
- return -EFAULT;
-
- kernbuf[count] = '\0';
- pbuf = kernbuf;
- if (*pbuf == '-') {
- sign = -1;
- pbuf++;
- }
-
- whole = simple_strtoull(pbuf, &end, 10);
- if (pbuf == end)
- return -EINVAL;
-
- if (*end == '.') {
- int i;
- pbuf = end + 1;
-
- /* need to limit frac_d to a __u32 */
- if (strlen(pbuf) > 10)
- pbuf[10] = '\0';
-
- frac = simple_strtoull(pbuf, &end, 10);
- /* count decimal places */
- for (i = 0; i < (end - pbuf); i++)
- frac_d *= 10;
- }
-
- units = 1;
- switch (tolower(*end)) {
- case 'p':
- units <<= 10;
- case 't':
- units <<= 10;
- case 'g':
- units <<= 10;
- case 'm':
- units <<= 10;
- case 'k':
- units <<= 10;
- }
- /* Specified units override the multiplier */
- if (units > 1)
- mult = units;
-
- frac *= mult;
- do_div(frac, frac_d);
- *val = sign * (whole * mult + frac);
- return 0;
-}
-EXPORT_SYMBOL(lprocfs_write_frac_u64_helper);
-
-static char *lprocfs_strnstr(const char *s1, const char *s2, size_t len)
-{
- size_t l2;
-
- l2 = strlen(s2);
- if (!l2)
- return (char *)s1;
- while (len >= l2) {
- len--;
- if (!memcmp(s1, s2, l2))
- return (char *)s1;
- s1++;
- }
- return NULL;
-}
-
-/**
- * Find the string \a name in the input \a buffer, and return a pointer to the
- * value immediately following \a name, reducing \a count appropriately.
- * If \a name is not found the original \a buffer is returned.
- */
-char *lprocfs_find_named_value(const char *buffer, const char *name,
- size_t *count)
-{
- char *val;
- size_t buflen = *count;
-
- /* there is no strnstr() in rhel5 and ubuntu kernels */
- val = lprocfs_strnstr(buffer, name, buflen);
- if (val == NULL)
- return (char *)buffer;
-
- val += strlen(name); /* skip prefix */
- while (val < buffer + buflen && isspace(*val)) /* skip separator */
- val++;
-
- *count = 0;
- while (val < buffer + buflen && isalnum(*val)) {
- ++*count;
- ++val;
- }
-
- return val - *count;
-}
-EXPORT_SYMBOL(lprocfs_find_named_value);
-
-int ldebugfs_seq_create(struct dentry *parent,
- const char *name,
- umode_t mode,
- const struct file_operations *seq_fops,
- void *data)
-{
- struct dentry *entry;
-
- /* Disallow secretly (un)writable entries. */
- LASSERT((seq_fops->write == NULL) == ((mode & 0222) == 0));
-
- entry = debugfs_create_file(name, mode, parent, data, seq_fops);
- if (IS_ERR_OR_NULL(entry))
- return entry ? PTR_ERR(entry) : -ENOMEM;
-
- return 0;
-}
-EXPORT_SYMBOL(ldebugfs_seq_create);
-
-int ldebugfs_obd_seq_create(struct obd_device *dev,
- const char *name,
- umode_t mode,
- const struct file_operations *seq_fops,
- void *data)
-{
- return ldebugfs_seq_create(dev->obd_debugfs_entry, name,
- mode, seq_fops, data);
-}
-EXPORT_SYMBOL(ldebugfs_obd_seq_create);
-
-void lprocfs_oh_tally(struct obd_histogram *oh, unsigned int value)
-{
- if (value >= OBD_HIST_MAX)
- value = OBD_HIST_MAX - 1;
-
- spin_lock(&oh->oh_lock);
- oh->oh_buckets[value]++;
- spin_unlock(&oh->oh_lock);
-}
-EXPORT_SYMBOL(lprocfs_oh_tally);
-
-void lprocfs_oh_tally_log2(struct obd_histogram *oh, unsigned int value)
-{
- unsigned int val;
-
- for (val = 0; ((1 << val) < value) && (val <= OBD_HIST_MAX); val++)
- ;
-
- lprocfs_oh_tally(oh, val);
-}
-EXPORT_SYMBOL(lprocfs_oh_tally_log2);
-
-unsigned long lprocfs_oh_sum(struct obd_histogram *oh)
-{
- unsigned long ret = 0;
- int i;
-
- for (i = 0; i < OBD_HIST_MAX; i++)
- ret += oh->oh_buckets[i];
- return ret;
-}
-EXPORT_SYMBOL(lprocfs_oh_sum);
-
-void lprocfs_oh_clear(struct obd_histogram *oh)
-{
- spin_lock(&oh->oh_lock);
- memset(oh->oh_buckets, 0, sizeof(oh->oh_buckets));
- spin_unlock(&oh->oh_lock);
-}
-EXPORT_SYMBOL(lprocfs_oh_clear);
-
-ssize_t lustre_attr_show(struct kobject *kobj,
- struct attribute *attr, char *buf)
-{
- struct lustre_attr *a = container_of(attr, struct lustre_attr, attr);
-
- return a->show ? a->show(kobj, attr, buf) : 0;
-}
-EXPORT_SYMBOL_GPL(lustre_attr_show);
-
-ssize_t lustre_attr_store(struct kobject *kobj, struct attribute *attr,
- const char *buf, size_t len)
-{
- struct lustre_attr *a = container_of(attr, struct lustre_attr, attr);
-
- return a->store ? a->store(kobj, attr, buf, len) : len;
-}
-EXPORT_SYMBOL_GPL(lustre_attr_store);
-
-const struct sysfs_ops lustre_sysfs_ops = {
- .show = lustre_attr_show,
- .store = lustre_attr_store,
-};
-EXPORT_SYMBOL_GPL(lustre_sysfs_ops);
diff --git a/drivers/staging/lustre/lustre/obdclass/lu_object.c b/drivers/staging/lustre/lustre/obdclass/lu_object.c
deleted file mode 100644
index 01d70f029dc7..000000000000
--- a/drivers/staging/lustre/lustre/obdclass/lu_object.c
+++ /dev/null
@@ -1,1954 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * lustre/obdclass/lu_object.c
- *
- * Lustre Object.
- * These are the only exported functions, they provide some generic
- * infrastructure for managing object devices
- *
- * Author: Nikita Danilov <nikita.danilov@sun.com>
- */
-
-#define DEBUG_SUBSYSTEM S_CLASS
-
-#include "../../include/linux/libcfs/libcfs.h"
-
-# include <linux/module.h>
-
-/* hash_long() */
-#include "../../include/linux/libcfs/libcfs_hash.h"
-#include "../include/obd_class.h"
-#include "../include/obd_support.h"
-#include "../include/lustre_disk.h"
-#include "../include/lustre_fid.h"
-#include "../include/lu_object.h"
-#include "../include/lu_ref.h"
-#include <linux/list.h>
-
-static void lu_object_free(const struct lu_env *env, struct lu_object *o);
-static __u32 ls_stats_read(struct lprocfs_stats *stats, int idx);
-
-/**
- * Decrease reference counter on object. If last reference is freed, return
- * object to the cache, unless lu_object_is_dying(o) holds. In the latter
- * case, free object immediately.
- */
-void lu_object_put(const struct lu_env *env, struct lu_object *o)
-{
- struct lu_site_bkt_data *bkt;
- struct lu_object_header *top;
- struct lu_site *site;
- struct lu_object *orig;
- struct cfs_hash_bd bd;
- const struct lu_fid *fid;
-
- top = o->lo_header;
- site = o->lo_dev->ld_site;
- orig = o;
-
- /*
- * till we have full fids-on-OST implemented anonymous objects
- * are possible in OSP. such an object isn't listed in the site
- * so we should not remove it from the site.
- */
- fid = lu_object_fid(o);
- if (fid_is_zero(fid)) {
- LASSERT(top->loh_hash.next == NULL
- && top->loh_hash.pprev == NULL);
- LASSERT(list_empty(&top->loh_lru));
- if (!atomic_dec_and_test(&top->loh_ref))
- return;
- list_for_each_entry_reverse(o, &top->loh_layers, lo_linkage) {
- if (o->lo_ops->loo_object_release != NULL)
- o->lo_ops->loo_object_release(env, o);
- }
- lu_object_free(env, orig);
- return;
- }
-
- cfs_hash_bd_get(site->ls_obj_hash, &top->loh_fid, &bd);
- bkt = cfs_hash_bd_extra_get(site->ls_obj_hash, &bd);
-
- if (!cfs_hash_bd_dec_and_lock(site->ls_obj_hash, &bd, &top->loh_ref)) {
- if (lu_object_is_dying(top)) {
-
- /*
- * somebody may be waiting for this, currently only
- * used for cl_object, see cl_object_put_last().
- */
- wake_up_all(&bkt->lsb_marche_funebre);
- }
- return;
- }
-
- /*
- * When last reference is released, iterate over object
- * layers, and notify them that object is no longer busy.
- */
- list_for_each_entry_reverse(o, &top->loh_layers, lo_linkage) {
- if (o->lo_ops->loo_object_release != NULL)
- o->lo_ops->loo_object_release(env, o);
- }
-
- if (!lu_object_is_dying(top)) {
- LASSERT(list_empty(&top->loh_lru));
- list_add_tail(&top->loh_lru, &bkt->lsb_lru);
- bkt->lsb_lru_len++;
- lprocfs_counter_incr(site->ls_stats, LU_SS_LRU_LEN);
- CDEBUG(D_INODE, "Add %p to site lru. hash: %p, bkt: %p, lru_len: %ld\n",
- o, site->ls_obj_hash, bkt, bkt->lsb_lru_len);
- cfs_hash_bd_unlock(site->ls_obj_hash, &bd, 1);
- return;
- }
-
- /*
- * If object is dying (will not be cached), removed it
- * from hash table and LRU.
- *
- * This is done with hash table and LRU lists locked. As the only
- * way to acquire first reference to previously unreferenced
- * object is through hash-table lookup (lu_object_find()),
- * or LRU scanning (lu_site_purge()), that are done under hash-table
- * and LRU lock, no race with concurrent object lookup is possible
- * and we can safely destroy object below.
- */
- if (!test_and_set_bit(LU_OBJECT_UNHASHED, &top->loh_flags))
- cfs_hash_bd_del_locked(site->ls_obj_hash, &bd, &top->loh_hash);
- cfs_hash_bd_unlock(site->ls_obj_hash, &bd, 1);
- /*
- * Object was already removed from hash and lru above, can
- * kill it.
- */
- lu_object_free(env, orig);
-}
-EXPORT_SYMBOL(lu_object_put);
-
-/**
- * Kill the object and take it out of LRU cache.
- * Currently used by client code for layout change.
- */
-void lu_object_unhash(const struct lu_env *env, struct lu_object *o)
-{
- struct lu_object_header *top;
-
- top = o->lo_header;
- set_bit(LU_OBJECT_HEARD_BANSHEE, &top->loh_flags);
- if (!test_and_set_bit(LU_OBJECT_UNHASHED, &top->loh_flags)) {
- struct lu_site *site = o->lo_dev->ld_site;
- struct cfs_hash *obj_hash = site->ls_obj_hash;
- struct cfs_hash_bd bd;
-
- cfs_hash_bd_get_and_lock(obj_hash, &top->loh_fid, &bd, 1);
- if (!list_empty(&top->loh_lru)) {
- struct lu_site_bkt_data *bkt;
-
- list_del_init(&top->loh_lru);
- bkt = cfs_hash_bd_extra_get(obj_hash, &bd);
- bkt->lsb_lru_len--;
- lprocfs_counter_decr(site->ls_stats, LU_SS_LRU_LEN);
- }
- cfs_hash_bd_del_locked(obj_hash, &bd, &top->loh_hash);
- cfs_hash_bd_unlock(obj_hash, &bd, 1);
- }
-}
-EXPORT_SYMBOL(lu_object_unhash);
-
-/**
- * Allocate new object.
- *
- * This follows object creation protocol, described in the comment within
- * struct lu_device_operations definition.
- */
-static struct lu_object *lu_object_alloc(const struct lu_env *env,
- struct lu_device *dev,
- const struct lu_fid *f,
- const struct lu_object_conf *conf)
-{
- struct lu_object *scan;
- struct lu_object *top;
- struct list_head *layers;
- unsigned int init_mask = 0;
- unsigned int init_flag;
- int clean;
- int result;
-
- /*
- * Create top-level object slice. This will also create
- * lu_object_header.
- */
- top = dev->ld_ops->ldo_object_alloc(env, NULL, dev);
- if (top == NULL)
- return ERR_PTR(-ENOMEM);
- if (IS_ERR(top))
- return top;
- /*
- * This is the only place where object fid is assigned. It's constant
- * after this point.
- */
- top->lo_header->loh_fid = *f;
- layers = &top->lo_header->loh_layers;
-
- do {
- /*
- * Call ->loo_object_init() repeatedly, until no more new
- * object slices are created.
- */
- clean = 1;
- init_flag = 1;
- list_for_each_entry(scan, layers, lo_linkage) {
- if (init_mask & init_flag)
- goto next;
- clean = 0;
- scan->lo_header = top->lo_header;
- result = scan->lo_ops->loo_object_init(env, scan, conf);
- if (result != 0) {
- lu_object_free(env, top);
- return ERR_PTR(result);
- }
- init_mask |= init_flag;
-next:
- init_flag <<= 1;
- }
- } while (!clean);
-
- list_for_each_entry_reverse(scan, layers, lo_linkage) {
- if (scan->lo_ops->loo_object_start != NULL) {
- result = scan->lo_ops->loo_object_start(env, scan);
- if (result != 0) {
- lu_object_free(env, top);
- return ERR_PTR(result);
- }
- }
- }
-
- lprocfs_counter_incr(dev->ld_site->ls_stats, LU_SS_CREATED);
- return top;
-}
-
-/**
- * Free an object.
- */
-static void lu_object_free(const struct lu_env *env, struct lu_object *o)
-{
- struct lu_site_bkt_data *bkt;
- struct lu_site *site;
- struct lu_object *scan;
- struct list_head *layers;
- struct list_head splice;
-
- site = o->lo_dev->ld_site;
- layers = &o->lo_header->loh_layers;
- bkt = lu_site_bkt_from_fid(site, &o->lo_header->loh_fid);
- /*
- * First call ->loo_object_delete() method to release all resources.
- */
- list_for_each_entry_reverse(scan, layers, lo_linkage) {
- if (scan->lo_ops->loo_object_delete != NULL)
- scan->lo_ops->loo_object_delete(env, scan);
- }
-
- /*
- * Then, splice object layers into stand-alone list, and call
- * ->loo_object_free() on all layers to free memory. Splice is
- * necessary, because lu_object_header is freed together with the
- * top-level slice.
- */
- INIT_LIST_HEAD(&splice);
- list_splice_init(layers, &splice);
- while (!list_empty(&splice)) {
- /*
- * Free layers in bottom-to-top order, so that object header
- * lives as long as possible and ->loo_object_free() methods
- * can look at its contents.
- */
- o = container_of0(splice.prev, struct lu_object, lo_linkage);
- list_del_init(&o->lo_linkage);
- LASSERT(o->lo_ops->loo_object_free != NULL);
- o->lo_ops->loo_object_free(env, o);
- }
-
- if (waitqueue_active(&bkt->lsb_marche_funebre))
- wake_up_all(&bkt->lsb_marche_funebre);
-}
-
-/**
- * Free \a nr objects from the cold end of the site LRU list.
- */
-int lu_site_purge(const struct lu_env *env, struct lu_site *s, int nr)
-{
- struct lu_object_header *h;
- struct lu_object_header *temp;
- struct lu_site_bkt_data *bkt;
- struct cfs_hash_bd bd;
- struct cfs_hash_bd bd2;
- struct list_head dispose;
- int did_sth;
- int start;
- int count;
- int bnr;
- int i;
-
- if (OBD_FAIL_CHECK(OBD_FAIL_OBD_NO_LRU))
- return 0;
-
- INIT_LIST_HEAD(&dispose);
- /*
- * Under LRU list lock, scan LRU list and move unreferenced objects to
- * the dispose list, removing them from LRU and hash table.
- */
- start = s->ls_purge_start;
- bnr = (nr == ~0) ? -1 : nr / CFS_HASH_NBKT(s->ls_obj_hash) + 1;
- again:
- did_sth = 0;
- cfs_hash_for_each_bucket(s->ls_obj_hash, &bd, i) {
- if (i < start)
- continue;
- count = bnr;
- cfs_hash_bd_lock(s->ls_obj_hash, &bd, 1);
- bkt = cfs_hash_bd_extra_get(s->ls_obj_hash, &bd);
-
- list_for_each_entry_safe(h, temp, &bkt->lsb_lru, loh_lru) {
- LASSERT(atomic_read(&h->loh_ref) == 0);
-
- cfs_hash_bd_get(s->ls_obj_hash, &h->loh_fid, &bd2);
- LASSERT(bd.bd_bucket == bd2.bd_bucket);
-
- cfs_hash_bd_del_locked(s->ls_obj_hash,
- &bd2, &h->loh_hash);
- list_move(&h->loh_lru, &dispose);
- bkt->lsb_lru_len--;
- lprocfs_counter_decr(s->ls_stats, LU_SS_LRU_LEN);
- if (did_sth == 0)
- did_sth = 1;
-
- if (nr != ~0 && --nr == 0)
- break;
-
- if (count > 0 && --count == 0)
- break;
-
- }
- cfs_hash_bd_unlock(s->ls_obj_hash, &bd, 1);
- cond_resched();
- /*
- * Free everything on the dispose list. This is safe against
- * races due to the reasons described in lu_object_put().
- */
- while (!list_empty(&dispose)) {
- h = container_of0(dispose.next,
- struct lu_object_header, loh_lru);
- list_del_init(&h->loh_lru);
- lu_object_free(env, lu_object_top(h));
- lprocfs_counter_incr(s->ls_stats, LU_SS_LRU_PURGED);
- }
-
- if (nr == 0)
- break;
- }
-
- if (nr != 0 && did_sth && start != 0) {
- start = 0; /* restart from the first bucket */
- goto again;
- }
- /* race on s->ls_purge_start, but nobody cares */
- s->ls_purge_start = i % CFS_HASH_NBKT(s->ls_obj_hash);
-
- return nr;
-}
-EXPORT_SYMBOL(lu_site_purge);
-
-/*
- * Object printing.
- *
- * Code below has to jump through certain loops to output object description
- * into libcfs_debug_msg-based log. The problem is that lu_object_print()
- * composes object description from strings that are parts of _lines_ of
- * output (i.e., strings that are not terminated by newline). This doesn't fit
- * very well into libcfs_debug_msg() interface that assumes that each message
- * supplied to it is a self-contained output line.
- *
- * To work around this, strings are collected in a temporary buffer
- * (implemented as a value of lu_cdebug_key key), until terminating newline
- * character is detected.
- *
- */
-
-enum {
- /**
- * Maximal line size.
- *
- * XXX overflow is not handled correctly.
- */
- LU_CDEBUG_LINE = 512
-};
-
-struct lu_cdebug_data {
- /**
- * Temporary buffer.
- */
- char lck_area[LU_CDEBUG_LINE];
-};
-
-/* context key constructor/destructor: lu_global_key_init, lu_global_key_fini */
-LU_KEY_INIT_FINI(lu_global, struct lu_cdebug_data);
-
-/**
- * Key, holding temporary buffer. This key is registered very early by
- * lu_global_init().
- */
-struct lu_context_key lu_global_key = {
- .lct_tags = LCT_MD_THREAD | LCT_DT_THREAD |
- LCT_MG_THREAD | LCT_CL_THREAD | LCT_LOCAL,
- .lct_init = lu_global_key_init,
- .lct_fini = lu_global_key_fini
-};
-
-/**
- * Printer function emitting messages through libcfs_debug_msg().
- */
-int lu_cdebug_printer(const struct lu_env *env,
- void *cookie, const char *format, ...)
-{
- struct libcfs_debug_msg_data *msgdata = cookie;
- struct lu_cdebug_data *key;
- int used;
- int complete;
- va_list args;
-
- va_start(args, format);
-
- key = lu_context_key_get(&env->le_ctx, &lu_global_key);
- LASSERT(key != NULL);
-
- used = strlen(key->lck_area);
- complete = format[strlen(format) - 1] == '\n';
- /*
- * Append new chunk to the buffer.
- */
- vsnprintf(key->lck_area + used,
- ARRAY_SIZE(key->lck_area) - used, format, args);
- if (complete) {
- if (cfs_cdebug_show(msgdata->msg_mask, msgdata->msg_subsys))
- libcfs_debug_msg(msgdata, "%s", key->lck_area);
- key->lck_area[0] = 0;
- }
- va_end(args);
- return 0;
-}
-EXPORT_SYMBOL(lu_cdebug_printer);
-
-/**
- * Print object header.
- */
-void lu_object_header_print(const struct lu_env *env, void *cookie,
- lu_printer_t printer,
- const struct lu_object_header *hdr)
-{
- (*printer)(env, cookie, "header@%p[%#lx, %d, "DFID"%s%s%s]",
- hdr, hdr->loh_flags, atomic_read(&hdr->loh_ref),
- PFID(&hdr->loh_fid),
- hlist_unhashed(&hdr->loh_hash) ? "" : " hash",
- list_empty((struct list_head *)&hdr->loh_lru) ? \
- "" : " lru",
- hdr->loh_attr & LOHA_EXISTS ? " exist":"");
-}
-EXPORT_SYMBOL(lu_object_header_print);
-
-/**
- * Print human readable representation of the \a o to the \a printer.
- */
-void lu_object_print(const struct lu_env *env, void *cookie,
- lu_printer_t printer, const struct lu_object *o)
-{
- static const char ruler[] = "........................................";
- struct lu_object_header *top;
- int depth = 4;
-
- top = o->lo_header;
- lu_object_header_print(env, cookie, printer, top);
- (*printer)(env, cookie, "{\n");
-
- list_for_each_entry(o, &top->loh_layers, lo_linkage) {
- /*
- * print `.' \a depth times followed by type name and address
- */
- (*printer)(env, cookie, "%*.*s%s@%p", depth, depth, ruler,
- o->lo_dev->ld_type->ldt_name, o);
-
- if (o->lo_ops->loo_object_print != NULL)
- (*o->lo_ops->loo_object_print)(env, cookie, printer, o);
-
- (*printer)(env, cookie, "\n");
- }
-
- (*printer)(env, cookie, "} header@%p\n", top);
-}
-EXPORT_SYMBOL(lu_object_print);
-
-static struct lu_object *htable_lookup(struct lu_site *s,
- struct cfs_hash_bd *bd,
- const struct lu_fid *f,
- wait_queue_t *waiter,
- __u64 *version)
-{
- struct lu_site_bkt_data *bkt;
- struct lu_object_header *h;
- struct hlist_node *hnode;
- __u64 ver = cfs_hash_bd_version_get(bd);
-
- if (*version == ver)
- return ERR_PTR(-ENOENT);
-
- *version = ver;
- bkt = cfs_hash_bd_extra_get(s->ls_obj_hash, bd);
- /* cfs_hash_bd_peek_locked is a somehow "internal" function
- * of cfs_hash, it doesn't add refcount on object. */
- hnode = cfs_hash_bd_peek_locked(s->ls_obj_hash, bd, (void *)f);
- if (hnode == NULL) {
- lprocfs_counter_incr(s->ls_stats, LU_SS_CACHE_MISS);
- return ERR_PTR(-ENOENT);
- }
-
- h = container_of0(hnode, struct lu_object_header, loh_hash);
- if (likely(!lu_object_is_dying(h))) {
- cfs_hash_get(s->ls_obj_hash, hnode);
- lprocfs_counter_incr(s->ls_stats, LU_SS_CACHE_HIT);
- if (!list_empty(&h->loh_lru)) {
- list_del_init(&h->loh_lru);
- bkt->lsb_lru_len--;
- lprocfs_counter_decr(s->ls_stats, LU_SS_LRU_LEN);
- }
- return lu_object_top(h);
- }
-
- /*
- * Lookup found an object being destroyed this object cannot be
- * returned (to assure that references to dying objects are eventually
- * drained), and moreover, lookup has to wait until object is freed.
- */
-
- init_waitqueue_entry(waiter, current);
- add_wait_queue(&bkt->lsb_marche_funebre, waiter);
- set_current_state(TASK_UNINTERRUPTIBLE);
- lprocfs_counter_incr(s->ls_stats, LU_SS_CACHE_DEATH_RACE);
- return ERR_PTR(-EAGAIN);
-}
-
-/**
- * Search cache for an object with the fid \a f. If such object is found,
- * return it. Otherwise, create new object, insert it into cache and return
- * it. In any case, additional reference is acquired on the returned object.
- */
-struct lu_object *lu_object_find(const struct lu_env *env,
- struct lu_device *dev, const struct lu_fid *f,
- const struct lu_object_conf *conf)
-{
- return lu_object_find_at(env, dev->ld_site->ls_top_dev, f, conf);
-}
-EXPORT_SYMBOL(lu_object_find);
-
-static struct lu_object *lu_object_new(const struct lu_env *env,
- struct lu_device *dev,
- const struct lu_fid *f,
- const struct lu_object_conf *conf)
-{
- struct lu_object *o;
- struct cfs_hash *hs;
- struct cfs_hash_bd bd;
-
- o = lu_object_alloc(env, dev, f, conf);
- if (IS_ERR(o))
- return o;
-
- hs = dev->ld_site->ls_obj_hash;
- cfs_hash_bd_get_and_lock(hs, (void *)f, &bd, 1);
- cfs_hash_bd_add_locked(hs, &bd, &o->lo_header->loh_hash);
- cfs_hash_bd_unlock(hs, &bd, 1);
- return o;
-}
-
-/**
- * Core logic of lu_object_find*() functions.
- */
-static struct lu_object *lu_object_find_try(const struct lu_env *env,
- struct lu_device *dev,
- const struct lu_fid *f,
- const struct lu_object_conf *conf,
- wait_queue_t *waiter)
-{
- struct lu_object *o;
- struct lu_object *shadow;
- struct lu_site *s;
- struct cfs_hash *hs;
- struct cfs_hash_bd bd;
- __u64 version = 0;
-
- /*
- * This uses standard index maintenance protocol:
- *
- * - search index under lock, and return object if found;
- * - otherwise, unlock index, allocate new object;
- * - lock index and search again;
- * - if nothing is found (usual case), insert newly created
- * object into index;
- * - otherwise (race: other thread inserted object), free
- * object just allocated.
- * - unlock index;
- * - return object.
- *
- * For "LOC_F_NEW" case, we are sure the object is new established.
- * It is unnecessary to perform lookup-alloc-lookup-insert, instead,
- * just alloc and insert directly.
- *
- * If dying object is found during index search, add @waiter to the
- * site wait-queue and return ERR_PTR(-EAGAIN).
- */
- if (conf != NULL && conf->loc_flags & LOC_F_NEW)
- return lu_object_new(env, dev, f, conf);
-
- s = dev->ld_site;
- hs = s->ls_obj_hash;
- cfs_hash_bd_get_and_lock(hs, (void *)f, &bd, 1);
- o = htable_lookup(s, &bd, f, waiter, &version);
- cfs_hash_bd_unlock(hs, &bd, 1);
- if (!IS_ERR(o) || PTR_ERR(o) != -ENOENT)
- return o;
-
- /*
- * Allocate new object. This may result in rather complicated
- * operations, including fld queries, inode loading, etc.
- */
- o = lu_object_alloc(env, dev, f, conf);
- if (IS_ERR(o))
- return o;
-
- LASSERT(lu_fid_eq(lu_object_fid(o), f));
-
- cfs_hash_bd_lock(hs, &bd, 1);
-
- shadow = htable_lookup(s, &bd, f, waiter, &version);
- if (likely(PTR_ERR(shadow) == -ENOENT)) {
- cfs_hash_bd_add_locked(hs, &bd, &o->lo_header->loh_hash);
- cfs_hash_bd_unlock(hs, &bd, 1);
- return o;
- }
-
- lprocfs_counter_incr(s->ls_stats, LU_SS_CACHE_RACE);
- cfs_hash_bd_unlock(hs, &bd, 1);
- lu_object_free(env, o);
- return shadow;
-}
-
-/**
- * Much like lu_object_find(), but top level device of object is specifically
- * \a dev rather than top level device of the site. This interface allows
- * objects of different "stacking" to be created within the same site.
- */
-struct lu_object *lu_object_find_at(const struct lu_env *env,
- struct lu_device *dev,
- const struct lu_fid *f,
- const struct lu_object_conf *conf)
-{
- struct lu_site_bkt_data *bkt;
- struct lu_object *obj;
- wait_queue_t wait;
-
- while (1) {
- obj = lu_object_find_try(env, dev, f, conf, &wait);
- if (obj != ERR_PTR(-EAGAIN))
- return obj;
- /*
- * lu_object_find_try() already added waiter into the
- * wait queue.
- */
- schedule();
- bkt = lu_site_bkt_from_fid(dev->ld_site, (void *)f);
- remove_wait_queue(&bkt->lsb_marche_funebre, &wait);
- }
-}
-EXPORT_SYMBOL(lu_object_find_at);
-
-/**
- * Find object with given fid, and return its slice belonging to given device.
- */
-struct lu_object *lu_object_find_slice(const struct lu_env *env,
- struct lu_device *dev,
- const struct lu_fid *f,
- const struct lu_object_conf *conf)
-{
- struct lu_object *top;
- struct lu_object *obj;
-
- top = lu_object_find(env, dev, f, conf);
- if (!IS_ERR(top)) {
- obj = lu_object_locate(top->lo_header, dev->ld_type);
- if (obj == NULL)
- lu_object_put(env, top);
- } else
- obj = top;
- return obj;
-}
-EXPORT_SYMBOL(lu_object_find_slice);
-
-/**
- * Global list of all device types.
- */
-static LIST_HEAD(lu_device_types);
-
-int lu_device_type_init(struct lu_device_type *ldt)
-{
- int result = 0;
-
- INIT_LIST_HEAD(&ldt->ldt_linkage);
- if (ldt->ldt_ops->ldto_init)
- result = ldt->ldt_ops->ldto_init(ldt);
- if (result == 0)
- list_add(&ldt->ldt_linkage, &lu_device_types);
- return result;
-}
-EXPORT_SYMBOL(lu_device_type_init);
-
-void lu_device_type_fini(struct lu_device_type *ldt)
-{
- list_del_init(&ldt->ldt_linkage);
- if (ldt->ldt_ops->ldto_fini)
- ldt->ldt_ops->ldto_fini(ldt);
-}
-EXPORT_SYMBOL(lu_device_type_fini);
-
-void lu_types_stop(void)
-{
- struct lu_device_type *ldt;
-
- list_for_each_entry(ldt, &lu_device_types, ldt_linkage) {
- if (ldt->ldt_device_nr == 0 && ldt->ldt_ops->ldto_stop)
- ldt->ldt_ops->ldto_stop(ldt);
- }
-}
-EXPORT_SYMBOL(lu_types_stop);
-
-/**
- * Global list of all sites on this node
- */
-static LIST_HEAD(lu_sites);
-static DEFINE_MUTEX(lu_sites_guard);
-
-/**
- * Global environment used by site shrinker.
- */
-static struct lu_env lu_shrink_env;
-
-struct lu_site_print_arg {
- struct lu_env *lsp_env;
- void *lsp_cookie;
- lu_printer_t lsp_printer;
-};
-
-static int
-lu_site_obj_print(struct cfs_hash *hs, struct cfs_hash_bd *bd,
- struct hlist_node *hnode, void *data)
-{
- struct lu_site_print_arg *arg = (struct lu_site_print_arg *)data;
- struct lu_object_header *h;
-
- h = hlist_entry(hnode, struct lu_object_header, loh_hash);
- if (!list_empty(&h->loh_layers)) {
- const struct lu_object *o;
-
- o = lu_object_top(h);
- lu_object_print(arg->lsp_env, arg->lsp_cookie,
- arg->lsp_printer, o);
- } else {
- lu_object_header_print(arg->lsp_env, arg->lsp_cookie,
- arg->lsp_printer, h);
- }
- return 0;
-}
-
-/**
- * Print all objects in \a s.
- */
-void lu_site_print(const struct lu_env *env, struct lu_site *s, void *cookie,
- lu_printer_t printer)
-{
- struct lu_site_print_arg arg = {
- .lsp_env = (struct lu_env *)env,
- .lsp_cookie = cookie,
- .lsp_printer = printer,
- };
-
- cfs_hash_for_each(s->ls_obj_hash, lu_site_obj_print, &arg);
-}
-EXPORT_SYMBOL(lu_site_print);
-
-enum {
- LU_CACHE_PERCENT_MAX = 50,
- LU_CACHE_PERCENT_DEFAULT = 20
-};
-
-static unsigned int lu_cache_percent = LU_CACHE_PERCENT_DEFAULT;
-module_param(lu_cache_percent, int, 0644);
-MODULE_PARM_DESC(lu_cache_percent, "Percentage of memory to be used as lu_object cache");
-
-/**
- * Return desired hash table order.
- */
-static int lu_htable_order(void)
-{
- unsigned long cache_size;
- int bits;
-
- /*
- * Calculate hash table size, assuming that we want reasonable
- * performance when 20% of total memory is occupied by cache of
- * lu_objects.
- *
- * Size of lu_object is (arbitrary) taken as 1K (together with inode).
- */
- cache_size = totalram_pages;
-
-#if BITS_PER_LONG == 32
- /* limit hashtable size for lowmem systems to low RAM */
- if (cache_size > 1 << (30 - PAGE_CACHE_SHIFT))
- cache_size = 1 << (30 - PAGE_CACHE_SHIFT) * 3 / 4;
-#endif
-
- /* clear off unreasonable cache setting. */
- if (lu_cache_percent == 0 || lu_cache_percent > LU_CACHE_PERCENT_MAX) {
- CWARN("obdclass: invalid lu_cache_percent: %u, it must be in the range of (0, %u]. Will use default value: %u.\n",
- lu_cache_percent, LU_CACHE_PERCENT_MAX,
- LU_CACHE_PERCENT_DEFAULT);
-
- lu_cache_percent = LU_CACHE_PERCENT_DEFAULT;
- }
- cache_size = cache_size / 100 * lu_cache_percent *
- (PAGE_CACHE_SIZE / 1024);
-
- for (bits = 1; (1 << bits) < cache_size; ++bits) {
- ;
- }
- return bits;
-}
-
-static unsigned lu_obj_hop_hash(struct cfs_hash *hs,
- const void *key, unsigned mask)
-{
- struct lu_fid *fid = (struct lu_fid *)key;
- __u32 hash;
-
- hash = fid_flatten32(fid);
- hash += (hash >> 4) + (hash << 12); /* mixing oid and seq */
- hash = hash_long(hash, hs->hs_bkt_bits);
-
- /* give me another random factor */
- hash -= hash_long((unsigned long)hs, fid_oid(fid) % 11 + 3);
-
- hash <<= hs->hs_cur_bits - hs->hs_bkt_bits;
- hash |= (fid_seq(fid) + fid_oid(fid)) & (CFS_HASH_NBKT(hs) - 1);
-
- return hash & mask;
-}
-
-static void *lu_obj_hop_object(struct hlist_node *hnode)
-{
- return hlist_entry(hnode, struct lu_object_header, loh_hash);
-}
-
-static void *lu_obj_hop_key(struct hlist_node *hnode)
-{
- struct lu_object_header *h;
-
- h = hlist_entry(hnode, struct lu_object_header, loh_hash);
- return &h->loh_fid;
-}
-
-static int lu_obj_hop_keycmp(const void *key, struct hlist_node *hnode)
-{
- struct lu_object_header *h;
-
- h = hlist_entry(hnode, struct lu_object_header, loh_hash);
- return lu_fid_eq(&h->loh_fid, (struct lu_fid *)key);
-}
-
-static void lu_obj_hop_get(struct cfs_hash *hs, struct hlist_node *hnode)
-{
- struct lu_object_header *h;
-
- h = hlist_entry(hnode, struct lu_object_header, loh_hash);
- atomic_inc(&h->loh_ref);
-}
-
-static void lu_obj_hop_put_locked(struct cfs_hash *hs, struct hlist_node *hnode)
-{
- LBUG(); /* we should never called it */
-}
-
-cfs_hash_ops_t lu_site_hash_ops = {
- .hs_hash = lu_obj_hop_hash,
- .hs_key = lu_obj_hop_key,
- .hs_keycmp = lu_obj_hop_keycmp,
- .hs_object = lu_obj_hop_object,
- .hs_get = lu_obj_hop_get,
- .hs_put_locked = lu_obj_hop_put_locked,
-};
-
-void lu_dev_add_linkage(struct lu_site *s, struct lu_device *d)
-{
- spin_lock(&s->ls_ld_lock);
- if (list_empty(&d->ld_linkage))
- list_add(&d->ld_linkage, &s->ls_ld_linkage);
- spin_unlock(&s->ls_ld_lock);
-}
-EXPORT_SYMBOL(lu_dev_add_linkage);
-
-/**
- * Initialize site \a s, with \a d as the top level device.
- */
-#define LU_SITE_BITS_MIN 12
-#define LU_SITE_BITS_MAX 24
-/**
- * total 256 buckets, we don't want too many buckets because:
- * - consume too much memory
- * - avoid unbalanced LRU list
- */
-#define LU_SITE_BKT_BITS 8
-
-int lu_site_init(struct lu_site *s, struct lu_device *top)
-{
- struct lu_site_bkt_data *bkt;
- struct cfs_hash_bd bd;
- char name[16];
- int bits;
- int i;
-
- memset(s, 0, sizeof(*s));
- bits = lu_htable_order();
- snprintf(name, 16, "lu_site_%s", top->ld_type->ldt_name);
- for (bits = min(max(LU_SITE_BITS_MIN, bits), LU_SITE_BITS_MAX);
- bits >= LU_SITE_BITS_MIN; bits--) {
- s->ls_obj_hash = cfs_hash_create(name, bits, bits,
- bits - LU_SITE_BKT_BITS,
- sizeof(*bkt), 0, 0,
- &lu_site_hash_ops,
- CFS_HASH_SPIN_BKTLOCK |
- CFS_HASH_NO_ITEMREF |
- CFS_HASH_DEPTH |
- CFS_HASH_ASSERT_EMPTY);
- if (s->ls_obj_hash != NULL)
- break;
- }
-
- if (s->ls_obj_hash == NULL) {
- CERROR("failed to create lu_site hash with bits: %d\n", bits);
- return -ENOMEM;
- }
-
- cfs_hash_for_each_bucket(s->ls_obj_hash, &bd, i) {
- bkt = cfs_hash_bd_extra_get(s->ls_obj_hash, &bd);
- INIT_LIST_HEAD(&bkt->lsb_lru);
- init_waitqueue_head(&bkt->lsb_marche_funebre);
- }
-
- s->ls_stats = lprocfs_alloc_stats(LU_SS_LAST_STAT, 0);
- if (s->ls_stats == NULL) {
- cfs_hash_putref(s->ls_obj_hash);
- s->ls_obj_hash = NULL;
- return -ENOMEM;
- }
-
- lprocfs_counter_init(s->ls_stats, LU_SS_CREATED,
- 0, "created", "created");
- lprocfs_counter_init(s->ls_stats, LU_SS_CACHE_HIT,
- 0, "cache_hit", "cache_hit");
- lprocfs_counter_init(s->ls_stats, LU_SS_CACHE_MISS,
- 0, "cache_miss", "cache_miss");
- lprocfs_counter_init(s->ls_stats, LU_SS_CACHE_RACE,
- 0, "cache_race", "cache_race");
- lprocfs_counter_init(s->ls_stats, LU_SS_CACHE_DEATH_RACE,
- 0, "cache_death_race", "cache_death_race");
- lprocfs_counter_init(s->ls_stats, LU_SS_LRU_PURGED,
- 0, "lru_purged", "lru_purged");
- /*
- * Unlike other counters, lru_len can be decremented so
- * need lc_sum instead of just lc_count
- */
- lprocfs_counter_init(s->ls_stats, LU_SS_LRU_LEN,
- LPROCFS_CNTR_AVGMINMAX, "lru_len", "lru_len");
-
- INIT_LIST_HEAD(&s->ls_linkage);
- s->ls_top_dev = top;
- top->ld_site = s;
- lu_device_get(top);
- lu_ref_add(&top->ld_reference, "site-top", s);
-
- INIT_LIST_HEAD(&s->ls_ld_linkage);
- spin_lock_init(&s->ls_ld_lock);
-
- lu_dev_add_linkage(s, top);
-
- return 0;
-}
-EXPORT_SYMBOL(lu_site_init);
-
-/**
- * Finalize \a s and release its resources.
- */
-void lu_site_fini(struct lu_site *s)
-{
- mutex_lock(&lu_sites_guard);
- list_del_init(&s->ls_linkage);
- mutex_unlock(&lu_sites_guard);
-
- if (s->ls_obj_hash != NULL) {
- cfs_hash_putref(s->ls_obj_hash);
- s->ls_obj_hash = NULL;
- }
-
- if (s->ls_top_dev != NULL) {
- s->ls_top_dev->ld_site = NULL;
- lu_ref_del(&s->ls_top_dev->ld_reference, "site-top", s);
- lu_device_put(s->ls_top_dev);
- s->ls_top_dev = NULL;
- }
-
- if (s->ls_stats != NULL)
- lprocfs_free_stats(&s->ls_stats);
-}
-EXPORT_SYMBOL(lu_site_fini);
-
-/**
- * Called when initialization of stack for this site is completed.
- */
-int lu_site_init_finish(struct lu_site *s)
-{
- int result;
- mutex_lock(&lu_sites_guard);
- result = lu_context_refill(&lu_shrink_env.le_ctx);
- if (result == 0)
- list_add(&s->ls_linkage, &lu_sites);
- mutex_unlock(&lu_sites_guard);
- return result;
-}
-EXPORT_SYMBOL(lu_site_init_finish);
-
-/**
- * Acquire additional reference on device \a d
- */
-void lu_device_get(struct lu_device *d)
-{
- atomic_inc(&d->ld_ref);
-}
-EXPORT_SYMBOL(lu_device_get);
-
-/**
- * Release reference on device \a d.
- */
-void lu_device_put(struct lu_device *d)
-{
- LASSERT(atomic_read(&d->ld_ref) > 0);
- atomic_dec(&d->ld_ref);
-}
-EXPORT_SYMBOL(lu_device_put);
-
-/**
- * Initialize device \a d of type \a t.
- */
-int lu_device_init(struct lu_device *d, struct lu_device_type *t)
-{
- if (t->ldt_device_nr++ == 0 && t->ldt_ops->ldto_start != NULL)
- t->ldt_ops->ldto_start(t);
- memset(d, 0, sizeof(*d));
- atomic_set(&d->ld_ref, 0);
- d->ld_type = t;
- lu_ref_init(&d->ld_reference);
- INIT_LIST_HEAD(&d->ld_linkage);
- return 0;
-}
-EXPORT_SYMBOL(lu_device_init);
-
-/**
- * Finalize device \a d.
- */
-void lu_device_fini(struct lu_device *d)
-{
- struct lu_device_type *t;
-
- t = d->ld_type;
- if (d->ld_obd != NULL) {
- d->ld_obd->obd_lu_dev = NULL;
- d->ld_obd = NULL;
- }
-
- lu_ref_fini(&d->ld_reference);
- LASSERTF(atomic_read(&d->ld_ref) == 0,
- "Refcount is %u\n", atomic_read(&d->ld_ref));
- LASSERT(t->ldt_device_nr > 0);
- if (--t->ldt_device_nr == 0 && t->ldt_ops->ldto_stop != NULL)
- t->ldt_ops->ldto_stop(t);
-}
-EXPORT_SYMBOL(lu_device_fini);
-
-/**
- * Initialize object \a o that is part of compound object \a h and was created
- * by device \a d.
- */
-int lu_object_init(struct lu_object *o, struct lu_object_header *h,
- struct lu_device *d)
-{
- memset(o, 0, sizeof(*o));
- o->lo_header = h;
- o->lo_dev = d;
- lu_device_get(d);
- lu_ref_add_at(&d->ld_reference, &o->lo_dev_ref, "lu_object", o);
- INIT_LIST_HEAD(&o->lo_linkage);
-
- return 0;
-}
-EXPORT_SYMBOL(lu_object_init);
-
-/**
- * Finalize object and release its resources.
- */
-void lu_object_fini(struct lu_object *o)
-{
- struct lu_device *dev = o->lo_dev;
-
- LASSERT(list_empty(&o->lo_linkage));
-
- if (dev != NULL) {
- lu_ref_del_at(&dev->ld_reference, &o->lo_dev_ref,
- "lu_object", o);
- lu_device_put(dev);
- o->lo_dev = NULL;
- }
-}
-EXPORT_SYMBOL(lu_object_fini);
-
-/**
- * Add object \a o as first layer of compound object \a h
- *
- * This is typically called by the ->ldo_object_alloc() method of top-level
- * device.
- */
-void lu_object_add_top(struct lu_object_header *h, struct lu_object *o)
-{
- list_move(&o->lo_linkage, &h->loh_layers);
-}
-EXPORT_SYMBOL(lu_object_add_top);
-
-/**
- * Add object \a o as a layer of compound object, going after \a before.
- *
- * This is typically called by the ->ldo_object_alloc() method of \a
- * before->lo_dev.
- */
-void lu_object_add(struct lu_object *before, struct lu_object *o)
-{
- list_move(&o->lo_linkage, &before->lo_linkage);
-}
-EXPORT_SYMBOL(lu_object_add);
-
-/**
- * Initialize compound object.
- */
-int lu_object_header_init(struct lu_object_header *h)
-{
- memset(h, 0, sizeof(*h));
- atomic_set(&h->loh_ref, 1);
- INIT_HLIST_NODE(&h->loh_hash);
- INIT_LIST_HEAD(&h->loh_lru);
- INIT_LIST_HEAD(&h->loh_layers);
- lu_ref_init(&h->loh_reference);
- return 0;
-}
-EXPORT_SYMBOL(lu_object_header_init);
-
-/**
- * Finalize compound object.
- */
-void lu_object_header_fini(struct lu_object_header *h)
-{
- LASSERT(list_empty(&h->loh_layers));
- LASSERT(list_empty(&h->loh_lru));
- LASSERT(hlist_unhashed(&h->loh_hash));
- lu_ref_fini(&h->loh_reference);
-}
-EXPORT_SYMBOL(lu_object_header_fini);
-
-/**
- * Given a compound object, find its slice, corresponding to the device type
- * \a dtype.
- */
-struct lu_object *lu_object_locate(struct lu_object_header *h,
- const struct lu_device_type *dtype)
-{
- struct lu_object *o;
-
- list_for_each_entry(o, &h->loh_layers, lo_linkage) {
- if (o->lo_dev->ld_type == dtype)
- return o;
- }
- return NULL;
-}
-EXPORT_SYMBOL(lu_object_locate);
-
-
-
-/**
- * Finalize and free devices in the device stack.
- *
- * Finalize device stack by purging object cache, and calling
- * lu_device_type_operations::ldto_device_fini() and
- * lu_device_type_operations::ldto_device_free() on all devices in the stack.
- */
-void lu_stack_fini(const struct lu_env *env, struct lu_device *top)
-{
- struct lu_site *site = top->ld_site;
- struct lu_device *scan;
- struct lu_device *next;
-
- lu_site_purge(env, site, ~0);
- for (scan = top; scan != NULL; scan = next) {
- next = scan->ld_type->ldt_ops->ldto_device_fini(env, scan);
- lu_ref_del(&scan->ld_reference, "lu-stack", &lu_site_init);
- lu_device_put(scan);
- }
-
- /* purge again. */
- lu_site_purge(env, site, ~0);
-
- for (scan = top; scan != NULL; scan = next) {
- const struct lu_device_type *ldt = scan->ld_type;
- struct obd_type *type;
-
- next = ldt->ldt_ops->ldto_device_free(env, scan);
- type = ldt->ldt_obd_type;
- if (type != NULL) {
- type->typ_refcnt--;
- class_put_type(type);
- }
- }
-}
-EXPORT_SYMBOL(lu_stack_fini);
-
-enum {
- /**
- * Maximal number of tld slots.
- */
- LU_CONTEXT_KEY_NR = 40
-};
-
-static struct lu_context_key *lu_keys[LU_CONTEXT_KEY_NR] = { NULL, };
-
-static DEFINE_SPINLOCK(lu_keys_guard);
-
-/**
- * Global counter incremented whenever key is registered, unregistered,
- * revived or quiesced. This is used to void unnecessary calls to
- * lu_context_refill(). No locking is provided, as initialization and shutdown
- * are supposed to be externally serialized.
- */
-static unsigned key_set_version;
-
-/**
- * Register new key.
- */
-int lu_context_key_register(struct lu_context_key *key)
-{
- int result;
- int i;
-
- LASSERT(key->lct_init != NULL);
- LASSERT(key->lct_fini != NULL);
- LASSERT(key->lct_tags != 0);
-
- result = -ENFILE;
- spin_lock(&lu_keys_guard);
- for (i = 0; i < ARRAY_SIZE(lu_keys); ++i) {
- if (lu_keys[i] == NULL) {
- key->lct_index = i;
- atomic_set(&key->lct_used, 1);
- lu_keys[i] = key;
- lu_ref_init(&key->lct_reference);
- result = 0;
- ++key_set_version;
- break;
- }
- }
- spin_unlock(&lu_keys_guard);
- return result;
-}
-EXPORT_SYMBOL(lu_context_key_register);
-
-static void key_fini(struct lu_context *ctx, int index)
-{
- if (ctx->lc_value != NULL && ctx->lc_value[index] != NULL) {
- struct lu_context_key *key;
-
- key = lu_keys[index];
- LASSERT(key != NULL);
- LASSERT(key->lct_fini != NULL);
- LASSERT(atomic_read(&key->lct_used) > 1);
-
- key->lct_fini(ctx, key, ctx->lc_value[index]);
- lu_ref_del(&key->lct_reference, "ctx", ctx);
- atomic_dec(&key->lct_used);
-
- if ((ctx->lc_tags & LCT_NOREF) == 0) {
-#ifdef CONFIG_MODULE_UNLOAD
- LINVRNT(module_refcount(key->lct_owner) > 0);
-#endif
- module_put(key->lct_owner);
- }
- ctx->lc_value[index] = NULL;
- }
-}
-
-/**
- * Deregister key.
- */
-void lu_context_key_degister(struct lu_context_key *key)
-{
- LASSERT(atomic_read(&key->lct_used) >= 1);
- LINVRNT(0 <= key->lct_index && key->lct_index < ARRAY_SIZE(lu_keys));
-
- lu_context_key_quiesce(key);
-
- ++key_set_version;
- spin_lock(&lu_keys_guard);
- key_fini(&lu_shrink_env.le_ctx, key->lct_index);
- if (lu_keys[key->lct_index]) {
- lu_keys[key->lct_index] = NULL;
- lu_ref_fini(&key->lct_reference);
- }
- spin_unlock(&lu_keys_guard);
-
- LASSERTF(atomic_read(&key->lct_used) == 1,
- "key has instances: %d\n",
- atomic_read(&key->lct_used));
-}
-EXPORT_SYMBOL(lu_context_key_degister);
-
-/**
- * Register a number of keys. This has to be called after all keys have been
- * initialized by a call to LU_CONTEXT_KEY_INIT().
- */
-int lu_context_key_register_many(struct lu_context_key *k, ...)
-{
- struct lu_context_key *key = k;
- va_list args;
- int result;
-
- va_start(args, k);
- do {
- result = lu_context_key_register(key);
- if (result)
- break;
- key = va_arg(args, struct lu_context_key *);
- } while (key != NULL);
- va_end(args);
-
- if (result != 0) {
- va_start(args, k);
- while (k != key) {
- lu_context_key_degister(k);
- k = va_arg(args, struct lu_context_key *);
- }
- va_end(args);
- }
-
- return result;
-}
-EXPORT_SYMBOL(lu_context_key_register_many);
-
-/**
- * De-register a number of keys. This is a dual to
- * lu_context_key_register_many().
- */
-void lu_context_key_degister_many(struct lu_context_key *k, ...)
-{
- va_list args;
-
- va_start(args, k);
- do {
- lu_context_key_degister(k);
- k = va_arg(args, struct lu_context_key*);
- } while (k != NULL);
- va_end(args);
-}
-EXPORT_SYMBOL(lu_context_key_degister_many);
-
-/**
- * Revive a number of keys.
- */
-void lu_context_key_revive_many(struct lu_context_key *k, ...)
-{
- va_list args;
-
- va_start(args, k);
- do {
- lu_context_key_revive(k);
- k = va_arg(args, struct lu_context_key*);
- } while (k != NULL);
- va_end(args);
-}
-EXPORT_SYMBOL(lu_context_key_revive_many);
-
-/**
- * Quiescent a number of keys.
- */
-void lu_context_key_quiesce_many(struct lu_context_key *k, ...)
-{
- va_list args;
-
- va_start(args, k);
- do {
- lu_context_key_quiesce(k);
- k = va_arg(args, struct lu_context_key*);
- } while (k != NULL);
- va_end(args);
-}
-EXPORT_SYMBOL(lu_context_key_quiesce_many);
-
-/**
- * Return value associated with key \a key in context \a ctx.
- */
-void *lu_context_key_get(const struct lu_context *ctx,
- const struct lu_context_key *key)
-{
- LINVRNT(ctx->lc_state == LCS_ENTERED);
- LINVRNT(0 <= key->lct_index && key->lct_index < ARRAY_SIZE(lu_keys));
- LASSERT(lu_keys[key->lct_index] == key);
- return ctx->lc_value[key->lct_index];
-}
-EXPORT_SYMBOL(lu_context_key_get);
-
-/**
- * List of remembered contexts. XXX document me.
- */
-static LIST_HEAD(lu_context_remembered);
-
-/**
- * Destroy \a key in all remembered contexts. This is used to destroy key
- * values in "shared" contexts (like service threads), when a module owning
- * the key is about to be unloaded.
- */
-void lu_context_key_quiesce(struct lu_context_key *key)
-{
- struct lu_context *ctx;
-
- if (!(key->lct_tags & LCT_QUIESCENT)) {
- /*
- * XXX layering violation.
- */
- key->lct_tags |= LCT_QUIESCENT;
- /*
- * XXX memory barrier has to go here.
- */
- spin_lock(&lu_keys_guard);
- list_for_each_entry(ctx, &lu_context_remembered,
- lc_remember)
- key_fini(ctx, key->lct_index);
- spin_unlock(&lu_keys_guard);
- ++key_set_version;
- }
-}
-EXPORT_SYMBOL(lu_context_key_quiesce);
-
-void lu_context_key_revive(struct lu_context_key *key)
-{
- key->lct_tags &= ~LCT_QUIESCENT;
- ++key_set_version;
-}
-EXPORT_SYMBOL(lu_context_key_revive);
-
-static void keys_fini(struct lu_context *ctx)
-{
- int i;
-
- if (ctx->lc_value == NULL)
- return;
-
- for (i = 0; i < ARRAY_SIZE(lu_keys); ++i)
- key_fini(ctx, i);
-
- kfree(ctx->lc_value);
- ctx->lc_value = NULL;
-}
-
-static int keys_fill(struct lu_context *ctx)
-{
- int i;
-
- LINVRNT(ctx->lc_value != NULL);
- for (i = 0; i < ARRAY_SIZE(lu_keys); ++i) {
- struct lu_context_key *key;
-
- key = lu_keys[i];
- if (ctx->lc_value[i] == NULL && key != NULL &&
- (key->lct_tags & ctx->lc_tags) &&
- /*
- * Don't create values for a LCT_QUIESCENT key, as this
- * will pin module owning a key.
- */
- !(key->lct_tags & LCT_QUIESCENT)) {
- void *value;
-
- LINVRNT(key->lct_init != NULL);
- LINVRNT(key->lct_index == i);
-
- value = key->lct_init(ctx, key);
- if (IS_ERR(value))
- return PTR_ERR(value);
-
- if (!(ctx->lc_tags & LCT_NOREF))
- try_module_get(key->lct_owner);
- lu_ref_add_atomic(&key->lct_reference, "ctx", ctx);
- atomic_inc(&key->lct_used);
- /*
- * This is the only place in the code, where an
- * element of ctx->lc_value[] array is set to non-NULL
- * value.
- */
- ctx->lc_value[i] = value;
- if (key->lct_exit != NULL)
- ctx->lc_tags |= LCT_HAS_EXIT;
- }
- ctx->lc_version = key_set_version;
- }
- return 0;
-}
-
-static int keys_init(struct lu_context *ctx)
-{
- ctx->lc_value = kcalloc(ARRAY_SIZE(lu_keys), sizeof(ctx->lc_value[0]),
- GFP_NOFS);
- if (likely(ctx->lc_value != NULL))
- return keys_fill(ctx);
-
- return -ENOMEM;
-}
-
-/**
- * Initialize context data-structure. Create values for all keys.
- */
-int lu_context_init(struct lu_context *ctx, __u32 tags)
-{
- int rc;
-
- memset(ctx, 0, sizeof(*ctx));
- ctx->lc_state = LCS_INITIALIZED;
- ctx->lc_tags = tags;
- if (tags & LCT_REMEMBER) {
- spin_lock(&lu_keys_guard);
- list_add(&ctx->lc_remember, &lu_context_remembered);
- spin_unlock(&lu_keys_guard);
- } else {
- INIT_LIST_HEAD(&ctx->lc_remember);
- }
-
- rc = keys_init(ctx);
- if (rc != 0)
- lu_context_fini(ctx);
-
- return rc;
-}
-EXPORT_SYMBOL(lu_context_init);
-
-/**
- * Finalize context data-structure. Destroy key values.
- */
-void lu_context_fini(struct lu_context *ctx)
-{
- LINVRNT(ctx->lc_state == LCS_INITIALIZED || ctx->lc_state == LCS_LEFT);
- ctx->lc_state = LCS_FINALIZED;
-
- if ((ctx->lc_tags & LCT_REMEMBER) == 0) {
- LASSERT(list_empty(&ctx->lc_remember));
- keys_fini(ctx);
-
- } else { /* could race with key degister */
- spin_lock(&lu_keys_guard);
- keys_fini(ctx);
- list_del_init(&ctx->lc_remember);
- spin_unlock(&lu_keys_guard);
- }
-}
-EXPORT_SYMBOL(lu_context_fini);
-
-/**
- * Called before entering context.
- */
-void lu_context_enter(struct lu_context *ctx)
-{
- LINVRNT(ctx->lc_state == LCS_INITIALIZED || ctx->lc_state == LCS_LEFT);
- ctx->lc_state = LCS_ENTERED;
-}
-EXPORT_SYMBOL(lu_context_enter);
-
-/**
- * Called after exiting from \a ctx
- */
-void lu_context_exit(struct lu_context *ctx)
-{
- int i;
-
- LINVRNT(ctx->lc_state == LCS_ENTERED);
- ctx->lc_state = LCS_LEFT;
- if (ctx->lc_tags & LCT_HAS_EXIT && ctx->lc_value != NULL) {
- for (i = 0; i < ARRAY_SIZE(lu_keys); ++i) {
- if (ctx->lc_value[i] != NULL) {
- struct lu_context_key *key;
-
- key = lu_keys[i];
- LASSERT(key != NULL);
- if (key->lct_exit != NULL)
- key->lct_exit(ctx,
- key, ctx->lc_value[i]);
- }
- }
- }
-}
-EXPORT_SYMBOL(lu_context_exit);
-
-/**
- * Allocate for context all missing keys that were registered after context
- * creation. key_set_version is only changed in rare cases when modules
- * are loaded and removed.
- */
-int lu_context_refill(struct lu_context *ctx)
-{
- return likely(ctx->lc_version == key_set_version) ? 0 : keys_fill(ctx);
-}
-EXPORT_SYMBOL(lu_context_refill);
-
-/**
- * lu_ctx_tags/lu_ses_tags will be updated if there are new types of
- * obd being added. Currently, this is only used on client side, specifically
- * for echo device client, for other stack (like ptlrpc threads), context are
- * predefined when the lu_device type are registered, during the module probe
- * phase.
- */
-__u32 lu_context_tags_default;
-__u32 lu_session_tags_default;
-
-int lu_env_init(struct lu_env *env, __u32 tags)
-{
- int result;
-
- env->le_ses = NULL;
- result = lu_context_init(&env->le_ctx, tags);
- if (likely(result == 0))
- lu_context_enter(&env->le_ctx);
- return result;
-}
-EXPORT_SYMBOL(lu_env_init);
-
-void lu_env_fini(struct lu_env *env)
-{
- lu_context_exit(&env->le_ctx);
- lu_context_fini(&env->le_ctx);
- env->le_ses = NULL;
-}
-EXPORT_SYMBOL(lu_env_fini);
-
-int lu_env_refill(struct lu_env *env)
-{
- int result;
-
- result = lu_context_refill(&env->le_ctx);
- if (result == 0 && env->le_ses != NULL)
- result = lu_context_refill(env->le_ses);
- return result;
-}
-EXPORT_SYMBOL(lu_env_refill);
-
-struct lu_site_stats {
- unsigned lss_populated;
- unsigned lss_max_search;
- unsigned lss_total;
- unsigned lss_busy;
-};
-
-static void lu_site_stats_get(struct cfs_hash *hs,
- struct lu_site_stats *stats, int populated)
-{
- struct cfs_hash_bd bd;
- int i;
-
- cfs_hash_for_each_bucket(hs, &bd, i) {
- struct lu_site_bkt_data *bkt = cfs_hash_bd_extra_get(hs, &bd);
- struct hlist_head *hhead;
-
- cfs_hash_bd_lock(hs, &bd, 1);
- stats->lss_busy +=
- cfs_hash_bd_count_get(&bd) - bkt->lsb_lru_len;
- stats->lss_total += cfs_hash_bd_count_get(&bd);
- stats->lss_max_search = max((int)stats->lss_max_search,
- cfs_hash_bd_depmax_get(&bd));
- if (!populated) {
- cfs_hash_bd_unlock(hs, &bd, 1);
- continue;
- }
-
- cfs_hash_bd_for_each_hlist(hs, &bd, hhead) {
- if (!hlist_empty(hhead))
- stats->lss_populated++;
- }
- cfs_hash_bd_unlock(hs, &bd, 1);
- }
-}
-
-
-/*
- * lu_cache_shrink_count returns the number of cached objects that are
- * candidates to be freed by shrink_slab(). A counter, which tracks
- * the number of items in the site's lru, is maintained in the per cpu
- * stats of each site. The counter is incremented when an object is added
- * to a site's lru and decremented when one is removed. The number of
- * free-able objects is the sum of all per cpu counters for all sites.
- *
- * Using a per cpu counter is a compromise solution to concurrent access:
- * lu_object_put() can update the counter without locking the site and
- * lu_cache_shrink_count can sum the counters without locking each
- * ls_obj_hash bucket.
- */
-static unsigned long lu_cache_shrink_count(struct shrinker *sk,
- struct shrink_control *sc)
-{
- struct lu_site *s;
- struct lu_site *tmp;
- unsigned long cached = 0;
-
- if (!(sc->gfp_mask & __GFP_FS))
- return 0;
-
- mutex_lock(&lu_sites_guard);
- list_for_each_entry_safe(s, tmp, &lu_sites, ls_linkage) {
- cached += ls_stats_read(s->ls_stats, LU_SS_LRU_LEN);
- }
- mutex_unlock(&lu_sites_guard);
-
- cached = (cached / 100) * sysctl_vfs_cache_pressure;
- CDEBUG(D_INODE, "%ld objects cached, cache pressure %d\n",
- cached, sysctl_vfs_cache_pressure);
-
- return cached;
-}
-
-static unsigned long lu_cache_shrink_scan(struct shrinker *sk,
- struct shrink_control *sc)
-{
- struct lu_site *s;
- struct lu_site *tmp;
- unsigned long remain = sc->nr_to_scan, freed = 0;
- LIST_HEAD(splice);
-
- if (!(sc->gfp_mask & __GFP_FS))
- /* We must not take the lu_sites_guard lock when
- * __GFP_FS is *not* set because of the deadlock
- * possibility detailed above. Additionally,
- * since we cannot determine the number of
- * objects in the cache without taking this
- * lock, we're in a particularly tough spot. As
- * a result, we'll just lie and say our cache is
- * empty. This _should_ be ok, as we can't
- * reclaim objects when __GFP_FS is *not* set
- * anyways.
- */
- return SHRINK_STOP;
-
- mutex_lock(&lu_sites_guard);
- list_for_each_entry_safe(s, tmp, &lu_sites, ls_linkage) {
- freed = lu_site_purge(&lu_shrink_env, s, remain);
- remain -= freed;
- /*
- * Move just shrunk site to the tail of site list to
- * assure shrinking fairness.
- */
- list_move_tail(&s->ls_linkage, &splice);
- }
- list_splice(&splice, lu_sites.prev);
- mutex_unlock(&lu_sites_guard);
-
- return sc->nr_to_scan - remain;
-}
-
-/**
- * Debugging printer function using printk().
- */
-static struct shrinker lu_site_shrinker = {
- .count_objects = lu_cache_shrink_count,
- .scan_objects = lu_cache_shrink_scan,
- .seeks = DEFAULT_SEEKS,
-};
-
-/**
- * Initialization of global lu_* data.
- */
-int lu_global_init(void)
-{
- int result;
-
- CDEBUG(D_INFO, "Lustre LU module (%p).\n", &lu_keys);
-
- result = lu_ref_global_init();
- if (result != 0)
- return result;
-
- LU_CONTEXT_KEY_INIT(&lu_global_key);
- result = lu_context_key_register(&lu_global_key);
- if (result != 0)
- return result;
-
- /*
- * At this level, we don't know what tags are needed, so allocate them
- * conservatively. This should not be too bad, because this
- * environment is global.
- */
- mutex_lock(&lu_sites_guard);
- result = lu_env_init(&lu_shrink_env, LCT_SHRINKER);
- mutex_unlock(&lu_sites_guard);
- if (result != 0)
- return result;
-
- /*
- * seeks estimation: 3 seeks to read a record from oi, one to read
- * inode, one for ea. Unfortunately setting this high value results in
- * lu_object/inode cache consuming all the memory.
- */
- register_shrinker(&lu_site_shrinker);
-
- return result;
-}
-
-/**
- * Dual to lu_global_init().
- */
-void lu_global_fini(void)
-{
- unregister_shrinker(&lu_site_shrinker);
- lu_context_key_degister(&lu_global_key);
-
- /*
- * Tear shrinker environment down _after_ de-registering
- * lu_global_key, because the latter has a value in the former.
- */
- mutex_lock(&lu_sites_guard);
- lu_env_fini(&lu_shrink_env);
- mutex_unlock(&lu_sites_guard);
-
- lu_ref_global_fini();
-}
-
-static __u32 ls_stats_read(struct lprocfs_stats *stats, int idx)
-{
- struct lprocfs_counter ret;
-
- lprocfs_stats_collect(stats, idx, &ret);
- if (idx == LU_SS_LRU_LEN)
- /*
- * protect against counter on cpu A being decremented
- * before counter is incremented on cpu B; unlikely
- */
- return (__u32)((ret.lc_sum > 0) ? ret.lc_sum : 0);
-
- return (__u32)ret.lc_count;
-}
-
-/**
- * Output site statistical counters into a buffer. Suitable for
- * lprocfs_rd_*()-style functions.
- */
-int lu_site_stats_print(const struct lu_site *s, struct seq_file *m)
-{
- struct lu_site_stats stats;
-
- memset(&stats, 0, sizeof(stats));
- lu_site_stats_get(s->ls_obj_hash, &stats, 1);
-
- seq_printf(m, "%d/%d %d/%d %d %d %d %d %d %d %d %d\n",
- stats.lss_busy,
- stats.lss_total,
- stats.lss_populated,
- CFS_HASH_NHLIST(s->ls_obj_hash),
- stats.lss_max_search,
- ls_stats_read(s->ls_stats, LU_SS_CREATED),
- ls_stats_read(s->ls_stats, LU_SS_CACHE_HIT),
- ls_stats_read(s->ls_stats, LU_SS_CACHE_MISS),
- ls_stats_read(s->ls_stats, LU_SS_CACHE_RACE),
- ls_stats_read(s->ls_stats, LU_SS_CACHE_DEATH_RACE),
- ls_stats_read(s->ls_stats, LU_SS_LRU_PURGED),
- ls_stats_read(s->ls_stats, LU_SS_LRU_LEN));
- return 0;
-}
-EXPORT_SYMBOL(lu_site_stats_print);
-
-/**
- * Helper function to initialize a number of kmem slab caches at once.
- */
-int lu_kmem_init(struct lu_kmem_descr *caches)
-{
- int result;
- struct lu_kmem_descr *iter = caches;
-
- for (result = 0; iter->ckd_cache != NULL; ++iter) {
- *iter->ckd_cache = kmem_cache_create(iter->ckd_name,
- iter->ckd_size,
- 0, 0, NULL);
- if (*iter->ckd_cache == NULL) {
- result = -ENOMEM;
- /* free all previously allocated caches */
- lu_kmem_fini(caches);
- break;
- }
- }
- return result;
-}
-EXPORT_SYMBOL(lu_kmem_init);
-
-/**
- * Helper function to finalize a number of kmem slab cached at once. Dual to
- * lu_kmem_init().
- */
-void lu_kmem_fini(struct lu_kmem_descr *caches)
-{
- for (; caches->ckd_cache != NULL; ++caches) {
- kmem_cache_destroy(*caches->ckd_cache);
- *caches->ckd_cache = NULL;
- }
-}
-EXPORT_SYMBOL(lu_kmem_fini);
diff --git a/drivers/staging/lustre/lustre/obdclass/lu_ref.c b/drivers/staging/lustre/lustre/obdclass/lu_ref.c
deleted file mode 100644
index 993697b660f6..000000000000
--- a/drivers/staging/lustre/lustre/obdclass/lu_ref.c
+++ /dev/null
@@ -1,50 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * lustre/obdclass/lu_ref.c
- *
- * Lustre reference.
- *
- * Author: Nikita Danilov <nikita.danilov@sun.com>
- */
-
-#define DEBUG_SUBSYSTEM S_CLASS
-
-#include "../../include/linux/libcfs/libcfs.h"
-
-#include "../include/obd.h"
-#include "../include/obd_class.h"
-#include "../include/obd_support.h"
-#include "../include/lu_ref.h"
diff --git a/drivers/staging/lustre/lustre/obdclass/lustre_handles.c b/drivers/staging/lustre/lustre/obdclass/lustre_handles.c
deleted file mode 100644
index d19ec15cb463..000000000000
--- a/drivers/staging/lustre/lustre/obdclass/lustre_handles.c
+++ /dev/null
@@ -1,245 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * lustre/obdclass/lustre_handles.c
- *
- * Author: Phil Schwan <phil@clusterfs.com>
- */
-
-#define DEBUG_SUBSYSTEM S_CLASS
-
-#include "../include/obd_support.h"
-#include "../include/lustre_handles.h"
-#include "../include/lustre_lib.h"
-
-
-static __u64 handle_base;
-#define HANDLE_INCR 7
-static spinlock_t handle_base_lock;
-
-static struct handle_bucket {
- spinlock_t lock;
- struct list_head head;
-} *handle_hash;
-
-#define HANDLE_HASH_SIZE (1 << 16)
-#define HANDLE_HASH_MASK (HANDLE_HASH_SIZE - 1)
-
-/*
- * Generate a unique 64bit cookie (hash) for a handle and insert it into
- * global (per-node) hash-table.
- */
-void class_handle_hash(struct portals_handle *h,
- struct portals_handle_ops *ops)
-{
- struct handle_bucket *bucket;
-
- LASSERT(h != NULL);
- LASSERT(list_empty(&h->h_link));
-
- /*
- * This is fast, but simplistic cookie generation algorithm, it will
- * need a re-do at some point in the future for security.
- */
- spin_lock(&handle_base_lock);
- handle_base += HANDLE_INCR;
-
- if (unlikely(handle_base == 0)) {
- /*
- * Cookie of zero is "dangerous", because in many places it's
- * assumed that 0 means "unassigned" handle, not bound to any
- * object.
- */
- CWARN("The universe has been exhausted: cookie wrap-around.\n");
- handle_base += HANDLE_INCR;
- }
- h->h_cookie = handle_base;
- spin_unlock(&handle_base_lock);
-
- h->h_ops = ops;
- spin_lock_init(&h->h_lock);
-
- bucket = &handle_hash[h->h_cookie & HANDLE_HASH_MASK];
- spin_lock(&bucket->lock);
- list_add_rcu(&h->h_link, &bucket->head);
- h->h_in = 1;
- spin_unlock(&bucket->lock);
-
- CDEBUG(D_INFO, "added object %p with handle %#llx to hash\n",
- h, h->h_cookie);
-}
-EXPORT_SYMBOL(class_handle_hash);
-
-static void class_handle_unhash_nolock(struct portals_handle *h)
-{
- if (list_empty(&h->h_link)) {
- CERROR("removing an already-removed handle (%#llx)\n",
- h->h_cookie);
- return;
- }
-
- CDEBUG(D_INFO, "removing object %p with handle %#llx from hash\n",
- h, h->h_cookie);
-
- spin_lock(&h->h_lock);
- if (h->h_in == 0) {
- spin_unlock(&h->h_lock);
- return;
- }
- h->h_in = 0;
- spin_unlock(&h->h_lock);
- list_del_rcu(&h->h_link);
-}
-
-void class_handle_unhash(struct portals_handle *h)
-{
- struct handle_bucket *bucket;
- bucket = handle_hash + (h->h_cookie & HANDLE_HASH_MASK);
-
- spin_lock(&bucket->lock);
- class_handle_unhash_nolock(h);
- spin_unlock(&bucket->lock);
-}
-EXPORT_SYMBOL(class_handle_unhash);
-
-void *class_handle2object(__u64 cookie)
-{
- struct handle_bucket *bucket;
- struct portals_handle *h;
- void *retval = NULL;
-
- LASSERT(handle_hash != NULL);
-
- /* Be careful when you want to change this code. See the
- * rcu_read_lock() definition on top this file. - jxiong */
- bucket = handle_hash + (cookie & HANDLE_HASH_MASK);
-
- rcu_read_lock();
- list_for_each_entry_rcu(h, &bucket->head, h_link) {
- if (h->h_cookie != cookie)
- continue;
-
- spin_lock(&h->h_lock);
- if (likely(h->h_in != 0)) {
- h->h_ops->hop_addref(h);
- retval = h;
- }
- spin_unlock(&h->h_lock);
- break;
- }
- rcu_read_unlock();
-
- return retval;
-}
-EXPORT_SYMBOL(class_handle2object);
-
-void class_handle_free_cb(struct rcu_head *rcu)
-{
- struct portals_handle *h = RCU2HANDLE(rcu);
- void *ptr = (void *)(unsigned long)h->h_cookie;
-
- if (h->h_ops->hop_free != NULL)
- h->h_ops->hop_free(ptr, h->h_size);
- else
- kfree(ptr);
-}
-EXPORT_SYMBOL(class_handle_free_cb);
-
-int class_handle_init(void)
-{
- struct handle_bucket *bucket;
- struct timespec64 ts;
- int seed[2];
-
- LASSERT(handle_hash == NULL);
-
- handle_hash = libcfs_kvzalloc(sizeof(*bucket) * HANDLE_HASH_SIZE,
- GFP_NOFS);
- if (handle_hash == NULL)
- return -ENOMEM;
-
- spin_lock_init(&handle_base_lock);
- for (bucket = handle_hash + HANDLE_HASH_SIZE - 1; bucket >= handle_hash;
- bucket--) {
- INIT_LIST_HEAD(&bucket->head);
- spin_lock_init(&bucket->lock);
- }
-
- /** bug 21430: add randomness to the initial base */
- cfs_get_random_bytes(seed, sizeof(seed));
- ktime_get_ts64(&ts);
- cfs_srand(ts.tv_sec ^ seed[0], ts.tv_nsec ^ seed[1]);
-
- cfs_get_random_bytes(&handle_base, sizeof(handle_base));
- LASSERT(handle_base != 0ULL);
-
- return 0;
-}
-
-static int cleanup_all_handles(void)
-{
- int rc;
- int i;
-
- for (rc = i = 0; i < HANDLE_HASH_SIZE; i++) {
- struct portals_handle *h;
-
- spin_lock(&handle_hash[i].lock);
- list_for_each_entry_rcu(h, &(handle_hash[i].head), h_link) {
- CERROR("force clean handle %#llx addr %p ops %p\n",
- h->h_cookie, h, h->h_ops);
-
- class_handle_unhash_nolock(h);
- rc++;
- }
- spin_unlock(&handle_hash[i].lock);
- }
-
- return rc;
-}
-
-void class_handle_cleanup(void)
-{
- int count;
- LASSERT(handle_hash != NULL);
-
- count = cleanup_all_handles();
-
- kvfree(handle_hash);
- handle_hash = NULL;
-
- if (count != 0)
- CERROR("handle_count at cleanup: %d\n", count);
-}
diff --git a/drivers/staging/lustre/lustre/obdclass/lustre_peer.c b/drivers/staging/lustre/lustre/obdclass/lustre_peer.c
deleted file mode 100644
index d6184f821cd0..000000000000
--- a/drivers/staging/lustre/lustre/obdclass/lustre_peer.c
+++ /dev/null
@@ -1,217 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- */
-
-#define DEBUG_SUBSYSTEM S_RPC
-
-#include "../include/obd.h"
-#include "../include/obd_support.h"
-#include "../include/obd_class.h"
-#include "../include/lustre_lib.h"
-#include "../include/lustre_ha.h"
-#include "../include/lustre_net.h"
-#include "../include/lprocfs_status.h"
-
-#define NIDS_MAX 32
-
-struct uuid_nid_data {
- struct list_head un_list;
- struct obd_uuid un_uuid;
- int un_nid_count;
- lnet_nid_t un_nids[NIDS_MAX];
-};
-
-/* FIXME: This should probably become more elegant than a global linked list */
-static struct list_head g_uuid_list;
-static spinlock_t g_uuid_lock;
-
-void class_init_uuidlist(void)
-{
- INIT_LIST_HEAD(&g_uuid_list);
- spin_lock_init(&g_uuid_lock);
-}
-
-void class_exit_uuidlist(void)
-{
- /* delete all */
- class_del_uuid(NULL);
-}
-
-int lustre_uuid_to_peer(const char *uuid, lnet_nid_t *peer_nid, int index)
-{
- struct uuid_nid_data *data;
- struct obd_uuid tmp;
- int rc = -ENOENT;
-
- obd_str2uuid(&tmp, uuid);
- spin_lock(&g_uuid_lock);
- list_for_each_entry(data, &g_uuid_list, un_list) {
- if (obd_uuid_equals(&data->un_uuid, &tmp)) {
- if (index >= data->un_nid_count)
- break;
-
- rc = 0;
- *peer_nid = data->un_nids[index];
- break;
- }
- }
- spin_unlock(&g_uuid_lock);
- return rc;
-}
-EXPORT_SYMBOL(lustre_uuid_to_peer);
-
-/* Add a nid to a niduuid. Multiple nids can be added to a single uuid;
- LNET will choose the best one. */
-int class_add_uuid(const char *uuid, __u64 nid)
-{
- struct uuid_nid_data *data, *entry;
- int found = 0;
-
- LASSERT(nid != 0); /* valid newconfig NID is never zero */
-
- if (strlen(uuid) > UUID_MAX - 1)
- return -EOVERFLOW;
-
- data = kzalloc(sizeof(*data), GFP_NOFS);
- if (!data)
- return -ENOMEM;
-
- obd_str2uuid(&data->un_uuid, uuid);
- data->un_nids[0] = nid;
- data->un_nid_count = 1;
-
- spin_lock(&g_uuid_lock);
- list_for_each_entry(entry, &g_uuid_list, un_list) {
- if (obd_uuid_equals(&entry->un_uuid, &data->un_uuid)) {
- int i;
-
- found = 1;
- for (i = 0; i < entry->un_nid_count; i++)
- if (nid == entry->un_nids[i])
- break;
-
- if (i == entry->un_nid_count) {
- LASSERT(entry->un_nid_count < NIDS_MAX);
- entry->un_nids[entry->un_nid_count++] = nid;
- }
- break;
- }
- }
- if (!found)
- list_add(&data->un_list, &g_uuid_list);
- spin_unlock(&g_uuid_lock);
-
- if (found) {
- CDEBUG(D_INFO, "found uuid %s %s cnt=%d\n", uuid,
- libcfs_nid2str(nid), entry->un_nid_count);
- kfree(data);
- } else {
- CDEBUG(D_INFO, "add uuid %s %s\n", uuid, libcfs_nid2str(nid));
- }
- return 0;
-}
-EXPORT_SYMBOL(class_add_uuid);
-
-/* Delete the nids for one uuid if specified, otherwise delete all */
-int class_del_uuid(const char *uuid)
-{
- LIST_HEAD(deathrow);
- struct uuid_nid_data *data;
-
- spin_lock(&g_uuid_lock);
- if (uuid != NULL) {
- struct obd_uuid tmp;
-
- obd_str2uuid(&tmp, uuid);
- list_for_each_entry(data, &g_uuid_list, un_list) {
- if (obd_uuid_equals(&data->un_uuid, &tmp)) {
- list_move(&data->un_list, &deathrow);
- break;
- }
- }
- } else
- list_splice_init(&g_uuid_list, &deathrow);
- spin_unlock(&g_uuid_lock);
-
- if (uuid != NULL && list_empty(&deathrow)) {
- CDEBUG(D_INFO, "Try to delete a non-existent uuid %s\n", uuid);
- return -EINVAL;
- }
-
- while (!list_empty(&deathrow)) {
- data = list_entry(deathrow.next, struct uuid_nid_data,
- un_list);
- list_del(&data->un_list);
-
- CDEBUG(D_INFO, "del uuid %s %s/%d\n",
- obd_uuid2str(&data->un_uuid),
- libcfs_nid2str(data->un_nids[0]),
- data->un_nid_count);
-
- kfree(data);
- }
-
- return 0;
-}
-
-/* check if @nid exists in nid list of @uuid */
-int class_check_uuid(struct obd_uuid *uuid, __u64 nid)
-{
- struct uuid_nid_data *entry;
- int found = 0;
-
- CDEBUG(D_INFO, "check if uuid %s has %s.\n",
- obd_uuid2str(uuid), libcfs_nid2str(nid));
-
- spin_lock(&g_uuid_lock);
- list_for_each_entry(entry, &g_uuid_list, un_list) {
- int i;
-
- if (!obd_uuid_equals(&entry->un_uuid, uuid))
- continue;
-
- /* found the uuid, check if it has @nid */
- for (i = 0; i < entry->un_nid_count; i++) {
- if (entry->un_nids[i] == nid) {
- found = 1;
- break;
- }
- }
- break;
- }
- spin_unlock(&g_uuid_lock);
- return found;
-}
-EXPORT_SYMBOL(class_check_uuid);
diff --git a/drivers/staging/lustre/lustre/obdclass/obd_config.c b/drivers/staging/lustre/lustre/obdclass/obd_config.c
deleted file mode 100644
index a8a1cb774fb4..000000000000
--- a/drivers/staging/lustre/lustre/obdclass/obd_config.c
+++ /dev/null
@@ -1,1588 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * lustre/obdclass/obd_config.c
- *
- * Config API
- */
-
-#define DEBUG_SUBSYSTEM S_CLASS
-#include "../include/obd_class.h"
-#include <linux/string.h>
-#include "../include/lustre_log.h"
-#include "../include/lprocfs_status.h"
-#include "../include/lustre_param.h"
-
-#include "llog_internal.h"
-
-static cfs_hash_ops_t uuid_hash_ops;
-static cfs_hash_ops_t nid_hash_ops;
-
-/*********** string parsing utils *********/
-
-/* returns 0 if we find this key in the buffer, else 1 */
-int class_find_param(char *buf, char *key, char **valp)
-{
- char *ptr;
-
- if (!buf)
- return 1;
-
- ptr = strstr(buf, key);
- if (!ptr)
- return 1;
-
- if (valp)
- *valp = ptr + strlen(key);
-
- return 0;
-}
-EXPORT_SYMBOL(class_find_param);
-
-/* returns 0 if this is the first key in the buffer, else 1.
- valp points to first char after key. */
-int class_match_param(char *buf, char *key, char **valp)
-{
- if (!buf)
- return 1;
-
- if (memcmp(buf, key, strlen(key)) != 0)
- return 1;
-
- if (valp)
- *valp = buf + strlen(key);
-
- return 0;
-}
-EXPORT_SYMBOL(class_match_param);
-
-static int parse_nid(char *buf, void *value, int quiet)
-{
- lnet_nid_t *nid = (lnet_nid_t *)value;
-
- *nid = libcfs_str2nid(buf);
- if (*nid != LNET_NID_ANY)
- return 0;
-
- if (!quiet)
- LCONSOLE_ERROR_MSG(0x159, "Can't parse NID '%s'\n", buf);
- return -EINVAL;
-}
-
-static int parse_net(char *buf, void *value)
-{
- __u32 *net = (__u32 *)value;
-
- *net = libcfs_str2net(buf);
- CDEBUG(D_INFO, "Net %s\n", libcfs_net2str(*net));
- return 0;
-}
-
-enum {
- CLASS_PARSE_NID = 1,
- CLASS_PARSE_NET,
-};
-
-/* 0 is good nid,
- 1 not found
- < 0 error
- endh is set to next separator */
-static int class_parse_value(char *buf, int opc, void *value, char **endh,
- int quiet)
-{
- char *endp;
- char tmp;
- int rc = 0;
-
- if (!buf)
- return 1;
- while (*buf == ',' || *buf == ':')
- buf++;
- if (*buf == ' ' || *buf == '/' || *buf == '\0')
- return 1;
-
- /* nid separators or end of nids */
- endp = strpbrk(buf, ",: /");
- if (!endp)
- endp = buf + strlen(buf);
-
- tmp = *endp;
- *endp = '\0';
- switch (opc) {
- default:
- LBUG();
- case CLASS_PARSE_NID:
- rc = parse_nid(buf, value, quiet);
- break;
- case CLASS_PARSE_NET:
- rc = parse_net(buf, value);
- break;
- }
- *endp = tmp;
- if (rc != 0)
- return rc;
- if (endh)
- *endh = endp;
- return 0;
-}
-
-int class_parse_nid(char *buf, lnet_nid_t *nid, char **endh)
-{
- return class_parse_value(buf, CLASS_PARSE_NID, (void *)nid, endh, 0);
-}
-EXPORT_SYMBOL(class_parse_nid);
-
-int class_parse_nid_quiet(char *buf, lnet_nid_t *nid, char **endh)
-{
- return class_parse_value(buf, CLASS_PARSE_NID, (void *)nid, endh, 1);
-}
-EXPORT_SYMBOL(class_parse_nid_quiet);
-
-/********************** class fns **********************/
-
-/**
- * Create a new obd device and set the type, name and uuid. If successful,
- * the new device can be accessed by either name or uuid.
- */
-int class_attach(struct lustre_cfg *lcfg)
-{
- struct obd_device *obd = NULL;
- char *typename, *name, *uuid;
- int rc, len;
-
- if (!LUSTRE_CFG_BUFLEN(lcfg, 1)) {
- CERROR("No type passed!\n");
- return -EINVAL;
- }
- typename = lustre_cfg_string(lcfg, 1);
-
- if (!LUSTRE_CFG_BUFLEN(lcfg, 0)) {
- CERROR("No name passed!\n");
- return -EINVAL;
- }
- name = lustre_cfg_string(lcfg, 0);
-
- if (!LUSTRE_CFG_BUFLEN(lcfg, 2)) {
- CERROR("No UUID passed!\n");
- return -EINVAL;
- }
- uuid = lustre_cfg_string(lcfg, 2);
-
- CDEBUG(D_IOCTL, "attach type %s name: %s uuid: %s\n",
- MKSTR(typename), MKSTR(name), MKSTR(uuid));
-
- obd = class_newdev(typename, name);
- if (IS_ERR(obd)) {
- /* Already exists or out of obds */
- rc = PTR_ERR(obd);
- obd = NULL;
- CERROR("Cannot create device %s of type %s : %d\n",
- name, typename, rc);
- goto out;
- }
- LASSERTF(obd != NULL, "Cannot get obd device %s of type %s\n",
- name, typename);
- LASSERTF(obd->obd_magic == OBD_DEVICE_MAGIC,
- "obd %p obd_magic %08X != %08X\n",
- obd, obd->obd_magic, OBD_DEVICE_MAGIC);
- LASSERTF(strncmp(obd->obd_name, name, strlen(name)) == 0,
- "%p obd_name %s != %s\n", obd, obd->obd_name, name);
-
- rwlock_init(&obd->obd_pool_lock);
- obd->obd_pool_limit = 0;
- obd->obd_pool_slv = 0;
-
- INIT_LIST_HEAD(&obd->obd_exports);
- INIT_LIST_HEAD(&obd->obd_unlinked_exports);
- INIT_LIST_HEAD(&obd->obd_delayed_exports);
- spin_lock_init(&obd->obd_nid_lock);
- spin_lock_init(&obd->obd_dev_lock);
- mutex_init(&obd->obd_dev_mutex);
- spin_lock_init(&obd->obd_osfs_lock);
- /* obd->obd_osfs_age must be set to a value in the distant
- * past to guarantee a fresh statfs is fetched on mount. */
- obd->obd_osfs_age = cfs_time_shift_64(-1000);
-
- /* XXX belongs in setup not attach */
- init_rwsem(&obd->obd_observer_link_sem);
- /* recovery data */
- spin_lock_init(&obd->obd_recovery_task_lock);
- init_waitqueue_head(&obd->obd_next_transno_waitq);
- init_waitqueue_head(&obd->obd_evict_inprogress_waitq);
- INIT_LIST_HEAD(&obd->obd_req_replay_queue);
- INIT_LIST_HEAD(&obd->obd_lock_replay_queue);
- INIT_LIST_HEAD(&obd->obd_final_req_queue);
- INIT_LIST_HEAD(&obd->obd_evict_list);
-
- llog_group_init(&obd->obd_olg, FID_SEQ_LLOG);
-
- obd->obd_conn_inprogress = 0;
-
- len = strlen(uuid);
- if (len >= sizeof(obd->obd_uuid)) {
- CERROR("uuid must be < %d bytes long\n",
- (int)sizeof(obd->obd_uuid));
- rc = -EINVAL;
- goto out;
- }
- memcpy(obd->obd_uuid.uuid, uuid, len);
-
- /* do the attach */
- if (OBP(obd, attach)) {
- rc = OBP(obd, attach)(obd, sizeof(*lcfg), lcfg);
- if (rc) {
- rc = -EINVAL;
- goto out;
- }
- }
-
- /* Detach drops this */
- spin_lock(&obd->obd_dev_lock);
- atomic_set(&obd->obd_refcount, 1);
- spin_unlock(&obd->obd_dev_lock);
- lu_ref_init(&obd->obd_reference);
- lu_ref_add(&obd->obd_reference, "attach", obd);
-
- obd->obd_attached = 1;
- CDEBUG(D_IOCTL, "OBD: dev %d attached type %s with refcount %d\n",
- obd->obd_minor, typename, atomic_read(&obd->obd_refcount));
- return 0;
- out:
- if (obd != NULL) {
- class_release_dev(obd);
- }
- return rc;
-}
-EXPORT_SYMBOL(class_attach);
-
-/** Create hashes, self-export, and call type-specific setup.
- * Setup is effectively the "start this obd" call.
- */
-int class_setup(struct obd_device *obd, struct lustre_cfg *lcfg)
-{
- int err = 0;
- struct obd_export *exp;
-
- LASSERT(obd != NULL);
- LASSERTF(obd == class_num2obd(obd->obd_minor),
- "obd %p != obd_devs[%d] %p\n",
- obd, obd->obd_minor, class_num2obd(obd->obd_minor));
- LASSERTF(obd->obd_magic == OBD_DEVICE_MAGIC,
- "obd %p obd_magic %08x != %08x\n",
- obd, obd->obd_magic, OBD_DEVICE_MAGIC);
-
- /* have we attached a type to this device? */
- if (!obd->obd_attached) {
- CERROR("Device %d not attached\n", obd->obd_minor);
- return -ENODEV;
- }
-
- if (obd->obd_set_up) {
- CERROR("Device %d already setup (type %s)\n",
- obd->obd_minor, obd->obd_type->typ_name);
- return -EEXIST;
- }
-
- /* is someone else setting us up right now? (attach inits spinlock) */
- spin_lock(&obd->obd_dev_lock);
- if (obd->obd_starting) {
- spin_unlock(&obd->obd_dev_lock);
- CERROR("Device %d setup in progress (type %s)\n",
- obd->obd_minor, obd->obd_type->typ_name);
- return -EEXIST;
- }
- /* just leave this on forever. I can't use obd_set_up here because
- other fns check that status, and we're not actually set up yet. */
- obd->obd_starting = 1;
- obd->obd_uuid_hash = NULL;
- obd->obd_nid_hash = NULL;
- spin_unlock(&obd->obd_dev_lock);
-
- /* create an uuid-export lustre hash */
- obd->obd_uuid_hash = cfs_hash_create("UUID_HASH",
- HASH_UUID_CUR_BITS,
- HASH_UUID_MAX_BITS,
- HASH_UUID_BKT_BITS, 0,
- CFS_HASH_MIN_THETA,
- CFS_HASH_MAX_THETA,
- &uuid_hash_ops, CFS_HASH_DEFAULT);
- if (!obd->obd_uuid_hash) {
- err = -ENOMEM;
- goto err_hash;
- }
-
- /* create a nid-export lustre hash */
- obd->obd_nid_hash = cfs_hash_create("NID_HASH",
- HASH_NID_CUR_BITS,
- HASH_NID_MAX_BITS,
- HASH_NID_BKT_BITS, 0,
- CFS_HASH_MIN_THETA,
- CFS_HASH_MAX_THETA,
- &nid_hash_ops, CFS_HASH_DEFAULT);
- if (!obd->obd_nid_hash) {
- err = -ENOMEM;
- goto err_hash;
- }
-
- exp = class_new_export(obd, &obd->obd_uuid);
- if (IS_ERR(exp)) {
- err = PTR_ERR(exp);
- goto err_hash;
- }
-
- obd->obd_self_export = exp;
- class_export_put(exp);
-
- err = obd_setup(obd, lcfg);
- if (err)
- goto err_exp;
-
- obd->obd_set_up = 1;
-
- spin_lock(&obd->obd_dev_lock);
- /* cleanup drops this */
- class_incref(obd, "setup", obd);
- spin_unlock(&obd->obd_dev_lock);
-
- CDEBUG(D_IOCTL, "finished setup of obd %s (uuid %s)\n",
- obd->obd_name, obd->obd_uuid.uuid);
-
- return 0;
-err_exp:
- if (obd->obd_self_export) {
- class_unlink_export(obd->obd_self_export);
- obd->obd_self_export = NULL;
- }
-err_hash:
- if (obd->obd_uuid_hash) {
- cfs_hash_putref(obd->obd_uuid_hash);
- obd->obd_uuid_hash = NULL;
- }
- if (obd->obd_nid_hash) {
- cfs_hash_putref(obd->obd_nid_hash);
- obd->obd_nid_hash = NULL;
- }
- obd->obd_starting = 0;
- CERROR("setup %s failed (%d)\n", obd->obd_name, err);
- return err;
-}
-EXPORT_SYMBOL(class_setup);
-
-/** We have finished using this obd and are ready to destroy it.
- * There can be no more references to this obd.
- */
-int class_detach(struct obd_device *obd, struct lustre_cfg *lcfg)
-{
- if (obd->obd_set_up) {
- CERROR("OBD device %d still set up\n", obd->obd_minor);
- return -EBUSY;
- }
-
- spin_lock(&obd->obd_dev_lock);
- if (!obd->obd_attached) {
- spin_unlock(&obd->obd_dev_lock);
- CERROR("OBD device %d not attached\n", obd->obd_minor);
- return -ENODEV;
- }
- obd->obd_attached = 0;
- spin_unlock(&obd->obd_dev_lock);
-
- CDEBUG(D_IOCTL, "detach on obd %s (uuid %s)\n",
- obd->obd_name, obd->obd_uuid.uuid);
-
- class_decref(obd, "attach", obd);
- return 0;
-}
-EXPORT_SYMBOL(class_detach);
-
-/** Start shutting down the obd. There may be in-progress ops when
- * this is called. We tell them to start shutting down with a call
- * to class_disconnect_exports().
- */
-int class_cleanup(struct obd_device *obd, struct lustre_cfg *lcfg)
-{
- int err = 0;
- char *flag;
-
- OBD_RACE(OBD_FAIL_LDLM_RECOV_CLIENTS);
-
- if (!obd->obd_set_up) {
- CERROR("Device %d not setup\n", obd->obd_minor);
- return -ENODEV;
- }
-
- spin_lock(&obd->obd_dev_lock);
- if (obd->obd_stopping) {
- spin_unlock(&obd->obd_dev_lock);
- CERROR("OBD %d already stopping\n", obd->obd_minor);
- return -ENODEV;
- }
- /* Leave this on forever */
- obd->obd_stopping = 1;
-
- /* wait for already-arrived-connections to finish. */
- while (obd->obd_conn_inprogress > 0) {
- spin_unlock(&obd->obd_dev_lock);
-
- cond_resched();
-
- spin_lock(&obd->obd_dev_lock);
- }
- spin_unlock(&obd->obd_dev_lock);
-
- if (lcfg->lcfg_bufcount >= 2 && LUSTRE_CFG_BUFLEN(lcfg, 1) > 0) {
- for (flag = lustre_cfg_string(lcfg, 1); *flag != 0; flag++)
- switch (*flag) {
- case 'F':
- obd->obd_force = 1;
- break;
- case 'A':
- LCONSOLE_WARN("Failing over %s\n",
- obd->obd_name);
- obd->obd_fail = 1;
- obd->obd_no_transno = 1;
- obd->obd_no_recov = 1;
- if (OBP(obd, iocontrol)) {
- obd_iocontrol(OBD_IOC_SYNC,
- obd->obd_self_export,
- 0, NULL, NULL);
- }
- break;
- default:
- CERROR("Unrecognised flag '%c'\n", *flag);
- }
- }
-
- LASSERT(obd->obd_self_export);
-
- /* Precleanup, we must make sure all exports get destroyed. */
- err = obd_precleanup(obd, OBD_CLEANUP_EXPORTS);
- if (err)
- CERROR("Precleanup %s returned %d\n",
- obd->obd_name, err);
-
- /* destroy an uuid-export hash body */
- if (obd->obd_uuid_hash) {
- cfs_hash_putref(obd->obd_uuid_hash);
- obd->obd_uuid_hash = NULL;
- }
-
- /* destroy a nid-export hash body */
- if (obd->obd_nid_hash) {
- cfs_hash_putref(obd->obd_nid_hash);
- obd->obd_nid_hash = NULL;
- }
-
- class_decref(obd, "setup", obd);
- obd->obd_set_up = 0;
-
- return 0;
-}
-EXPORT_SYMBOL(class_cleanup);
-
-struct obd_device *class_incref(struct obd_device *obd,
- const char *scope, const void *source)
-{
- lu_ref_add_atomic(&obd->obd_reference, scope, source);
- atomic_inc(&obd->obd_refcount);
- CDEBUG(D_INFO, "incref %s (%p) now %d\n", obd->obd_name, obd,
- atomic_read(&obd->obd_refcount));
-
- return obd;
-}
-EXPORT_SYMBOL(class_incref);
-
-void class_decref(struct obd_device *obd, const char *scope, const void *source)
-{
- int err;
- int refs;
-
- spin_lock(&obd->obd_dev_lock);
- atomic_dec(&obd->obd_refcount);
- refs = atomic_read(&obd->obd_refcount);
- spin_unlock(&obd->obd_dev_lock);
- lu_ref_del(&obd->obd_reference, scope, source);
-
- CDEBUG(D_INFO, "Decref %s (%p) now %d\n", obd->obd_name, obd, refs);
-
- if ((refs == 1) && obd->obd_stopping) {
- /* All exports have been destroyed; there should
- be no more in-progress ops by this point.*/
-
- spin_lock(&obd->obd_self_export->exp_lock);
- obd->obd_self_export->exp_flags |= exp_flags_from_obd(obd);
- spin_unlock(&obd->obd_self_export->exp_lock);
-
- /* note that we'll recurse into class_decref again */
- class_unlink_export(obd->obd_self_export);
- return;
- }
-
- if (refs == 0) {
- CDEBUG(D_CONFIG, "finishing cleanup of obd %s (%s)\n",
- obd->obd_name, obd->obd_uuid.uuid);
- LASSERT(!obd->obd_attached);
- if (obd->obd_stopping) {
- /* If we're not stopping, we were never set up */
- err = obd_cleanup(obd);
- if (err)
- CERROR("Cleanup %s returned %d\n",
- obd->obd_name, err);
- }
- if (OBP(obd, detach)) {
- err = OBP(obd, detach)(obd);
- if (err)
- CERROR("Detach returned %d\n", err);
- }
- class_release_dev(obd);
- }
-}
-EXPORT_SYMBOL(class_decref);
-
-/** Add a failover nid location.
- * Client obd types contact server obd types using this nid list.
- */
-int class_add_conn(struct obd_device *obd, struct lustre_cfg *lcfg)
-{
- struct obd_import *imp;
- struct obd_uuid uuid;
- int rc;
-
- if (LUSTRE_CFG_BUFLEN(lcfg, 1) < 1 ||
- LUSTRE_CFG_BUFLEN(lcfg, 1) > sizeof(struct obd_uuid)) {
- CERROR("invalid conn_uuid\n");
- return -EINVAL;
- }
- if (strcmp(obd->obd_type->typ_name, LUSTRE_MDC_NAME) &&
- strcmp(obd->obd_type->typ_name, LUSTRE_OSC_NAME) &&
- strcmp(obd->obd_type->typ_name, LUSTRE_OSP_NAME) &&
- strcmp(obd->obd_type->typ_name, LUSTRE_LWP_NAME) &&
- strcmp(obd->obd_type->typ_name, LUSTRE_MGC_NAME)) {
- CERROR("can't add connection on non-client dev\n");
- return -EINVAL;
- }
-
- imp = obd->u.cli.cl_import;
- if (!imp) {
- CERROR("try to add conn on immature client dev\n");
- return -EINVAL;
- }
-
- obd_str2uuid(&uuid, lustre_cfg_string(lcfg, 1));
- rc = obd_add_conn(imp, &uuid, lcfg->lcfg_num);
-
- return rc;
-}
-EXPORT_SYMBOL(class_add_conn);
-
-/** Remove a failover nid location.
- */
-int class_del_conn(struct obd_device *obd, struct lustre_cfg *lcfg)
-{
- struct obd_import *imp;
- struct obd_uuid uuid;
- int rc;
-
- if (LUSTRE_CFG_BUFLEN(lcfg, 1) < 1 ||
- LUSTRE_CFG_BUFLEN(lcfg, 1) > sizeof(struct obd_uuid)) {
- CERROR("invalid conn_uuid\n");
- return -EINVAL;
- }
- if (strcmp(obd->obd_type->typ_name, LUSTRE_MDC_NAME) &&
- strcmp(obd->obd_type->typ_name, LUSTRE_OSC_NAME)) {
- CERROR("can't del connection on non-client dev\n");
- return -EINVAL;
- }
-
- imp = obd->u.cli.cl_import;
- if (!imp) {
- CERROR("try to del conn on immature client dev\n");
- return -EINVAL;
- }
-
- obd_str2uuid(&uuid, lustre_cfg_string(lcfg, 1));
- rc = obd_del_conn(imp, &uuid);
-
- return rc;
-}
-
-LIST_HEAD(lustre_profile_list);
-
-struct lustre_profile *class_get_profile(const char *prof)
-{
- struct lustre_profile *lprof;
-
- list_for_each_entry(lprof, &lustre_profile_list, lp_list) {
- if (!strcmp(lprof->lp_profile, prof)) {
- return lprof;
- }
- }
- return NULL;
-}
-EXPORT_SYMBOL(class_get_profile);
-
-/** Create a named "profile".
- * This defines the mdc and osc names to use for a client.
- * This also is used to define the lov to be used by a mdt.
- */
-int class_add_profile(int proflen, char *prof, int osclen, char *osc,
- int mdclen, char *mdc)
-{
- struct lustre_profile *lprof;
- int err = 0;
-
- CDEBUG(D_CONFIG, "Add profile %s\n", prof);
-
- lprof = kzalloc(sizeof(*lprof), GFP_NOFS);
- if (!lprof)
- return -ENOMEM;
- INIT_LIST_HEAD(&lprof->lp_list);
-
- LASSERT(proflen == (strlen(prof) + 1));
- lprof->lp_profile = kmemdup(prof, proflen, GFP_NOFS);
- if (!lprof->lp_profile) {
- err = -ENOMEM;
- goto free_lprof;
- }
-
- LASSERT(osclen == (strlen(osc) + 1));
- lprof->lp_dt = kmemdup(osc, osclen, GFP_NOFS);
- if (!lprof->lp_dt) {
- err = -ENOMEM;
- goto free_lp_profile;
- }
-
- if (mdclen > 0) {
- LASSERT(mdclen == (strlen(mdc) + 1));
- lprof->lp_md = kmemdup(mdc, mdclen, GFP_NOFS);
- if (!lprof->lp_md) {
- err = -ENOMEM;
- goto free_lp_dt;
- }
- }
-
- list_add(&lprof->lp_list, &lustre_profile_list);
- return err;
-
-free_lp_dt:
- kfree(lprof->lp_dt);
-free_lp_profile:
- kfree(lprof->lp_profile);
-free_lprof:
- kfree(lprof);
- return err;
-}
-
-void class_del_profile(const char *prof)
-{
- struct lustre_profile *lprof;
-
- CDEBUG(D_CONFIG, "Del profile %s\n", prof);
-
- lprof = class_get_profile(prof);
- if (lprof) {
- list_del(&lprof->lp_list);
- kfree(lprof->lp_profile);
- kfree(lprof->lp_dt);
- kfree(lprof->lp_md);
- kfree(lprof);
- }
-}
-EXPORT_SYMBOL(class_del_profile);
-
-/* COMPAT_146 */
-void class_del_profiles(void)
-{
- struct lustre_profile *lprof, *n;
-
- list_for_each_entry_safe(lprof, n, &lustre_profile_list, lp_list) {
- list_del(&lprof->lp_list);
- kfree(lprof->lp_profile);
- kfree(lprof->lp_dt);
- kfree(lprof->lp_md);
- kfree(lprof);
- }
-}
-EXPORT_SYMBOL(class_del_profiles);
-
-static int class_set_global(char *ptr, int val, struct lustre_cfg *lcfg)
-{
- if (class_match_param(ptr, PARAM_AT_MIN, NULL) == 0)
- at_min = val;
- else if (class_match_param(ptr, PARAM_AT_MAX, NULL) == 0)
- at_max = val;
- else if (class_match_param(ptr, PARAM_AT_EXTRA, NULL) == 0)
- at_extra = val;
- else if (class_match_param(ptr, PARAM_AT_EARLY_MARGIN, NULL) == 0)
- at_early_margin = val;
- else if (class_match_param(ptr, PARAM_AT_HISTORY, NULL) == 0)
- at_history = val;
- else if (class_match_param(ptr, PARAM_JOBID_VAR, NULL) == 0)
- strlcpy(obd_jobid_var, lustre_cfg_string(lcfg, 2),
- JOBSTATS_JOBID_VAR_MAX_LEN + 1);
- else
- return -EINVAL;
-
- CDEBUG(D_IOCTL, "global %s = %d\n", ptr, val);
- return 0;
-}
-
-
-/* We can't call ll_process_config or lquota_process_config directly because
- * it lives in a module that must be loaded after this one. */
-static int (*client_process_config)(struct lustre_cfg *lcfg);
-static int (*quota_process_config)(struct lustre_cfg *lcfg);
-
-void lustre_register_client_process_config(int (*cpc)(struct lustre_cfg *lcfg))
-{
- client_process_config = cpc;
-}
-EXPORT_SYMBOL(lustre_register_client_process_config);
-
-static int process_param2_config(struct lustre_cfg *lcfg)
-{
- char *param = lustre_cfg_string(lcfg, 1);
- char *upcall = lustre_cfg_string(lcfg, 2);
- char *argv[] = {
- [0] = "/usr/sbin/lctl",
- [1] = "set_param",
- [2] = param,
- [3] = NULL
- };
- ktime_t start;
- ktime_t end;
- int rc;
-
-
- /* Add upcall processing here. Now only lctl is supported */
- if (strcmp(upcall, LCTL_UPCALL) != 0) {
- CERROR("Unsupported upcall %s\n", upcall);
- return -EINVAL;
- }
-
- start = ktime_get();
- rc = call_usermodehelper(argv[0], argv, NULL, 1);
- end = ktime_get();
-
- if (rc < 0) {
- CERROR(
- "lctl: error invoking upcall %s %s %s: rc = %d; time %ldus\n",
- argv[0], argv[1], argv[2], rc,
- (long)ktime_us_delta(end, start));
- } else {
- CDEBUG(D_HA, "lctl: invoked upcall %s %s %s, time %ldus\n",
- argv[0], argv[1], argv[2],
- (long)ktime_us_delta(end, start));
- rc = 0;
- }
-
- return rc;
-}
-
-/** Process configuration commands given in lustre_cfg form.
- * These may come from direct calls (e.g. class_manual_cleanup)
- * or processing the config llog, or ioctl from lctl.
- */
-int class_process_config(struct lustre_cfg *lcfg)
-{
- struct obd_device *obd;
- int err;
-
- LASSERT(lcfg && !IS_ERR(lcfg));
- CDEBUG(D_IOCTL, "processing cmd: %x\n", lcfg->lcfg_command);
-
- /* Commands that don't need a device */
- switch (lcfg->lcfg_command) {
- case LCFG_ATTACH: {
- err = class_attach(lcfg);
- goto out;
- }
- case LCFG_ADD_UUID: {
- CDEBUG(D_IOCTL, "adding mapping from uuid %s to nid %#llx (%s)\n",
- lustre_cfg_string(lcfg, 1), lcfg->lcfg_nid,
- libcfs_nid2str(lcfg->lcfg_nid));
-
- err = class_add_uuid(lustre_cfg_string(lcfg, 1), lcfg->lcfg_nid);
- goto out;
- }
- case LCFG_DEL_UUID: {
- CDEBUG(D_IOCTL, "removing mappings for uuid %s\n",
- (lcfg->lcfg_bufcount < 2 || LUSTRE_CFG_BUFLEN(lcfg, 1) == 0)
- ? "<all uuids>" : lustre_cfg_string(lcfg, 1));
-
- err = class_del_uuid(lustre_cfg_string(lcfg, 1));
- goto out;
- }
- case LCFG_MOUNTOPT: {
- CDEBUG(D_IOCTL, "mountopt: profile %s osc %s mdc %s\n",
- lustre_cfg_string(lcfg, 1),
- lustre_cfg_string(lcfg, 2),
- lustre_cfg_string(lcfg, 3));
- /* set these mount options somewhere, so ll_fill_super
- * can find them. */
- err = class_add_profile(LUSTRE_CFG_BUFLEN(lcfg, 1),
- lustre_cfg_string(lcfg, 1),
- LUSTRE_CFG_BUFLEN(lcfg, 2),
- lustre_cfg_string(lcfg, 2),
- LUSTRE_CFG_BUFLEN(lcfg, 3),
- lustre_cfg_string(lcfg, 3));
- goto out;
- }
- case LCFG_DEL_MOUNTOPT: {
- CDEBUG(D_IOCTL, "mountopt: profile %s\n",
- lustre_cfg_string(lcfg, 1));
- class_del_profile(lustre_cfg_string(lcfg, 1));
- err = 0;
- goto out;
- }
- case LCFG_SET_TIMEOUT: {
- CDEBUG(D_IOCTL, "changing lustre timeout from %d to %d\n",
- obd_timeout, lcfg->lcfg_num);
- obd_timeout = max(lcfg->lcfg_num, 1U);
- obd_timeout_set = 1;
- err = 0;
- goto out;
- }
- case LCFG_SET_LDLM_TIMEOUT: {
- /* ldlm_timeout is not used on the client */
- err = 0;
- goto out;
- }
- case LCFG_SET_UPCALL: {
- LCONSOLE_ERROR_MSG(0x15a, "recovery upcall is deprecated\n");
- /* COMPAT_146 Don't fail on old configs */
- err = 0;
- goto out;
- }
- case LCFG_MARKER: {
- struct cfg_marker *marker;
- marker = lustre_cfg_buf(lcfg, 1);
- CDEBUG(D_IOCTL, "marker %d (%#x) %.16s %s\n", marker->cm_step,
- marker->cm_flags, marker->cm_tgtname, marker->cm_comment);
- err = 0;
- goto out;
- }
- case LCFG_PARAM: {
- char *tmp;
- /* llite has no obd */
- if ((class_match_param(lustre_cfg_string(lcfg, 1),
- PARAM_LLITE, NULL) == 0) &&
- client_process_config) {
- err = (*client_process_config)(lcfg);
- goto out;
- } else if ((class_match_param(lustre_cfg_string(lcfg, 1),
- PARAM_SYS, &tmp) == 0)) {
- /* Global param settings */
- err = class_set_global(tmp, lcfg->lcfg_num, lcfg);
- /*
- * Client or server should not fail to mount if
- * it hits an unknown configuration parameter.
- */
- if (err != 0)
- CWARN("Ignoring unknown param %s\n", tmp);
-
- err = 0;
- goto out;
- } else if ((class_match_param(lustre_cfg_string(lcfg, 1),
- PARAM_QUOTA, &tmp) == 0) &&
- quota_process_config) {
- err = (*quota_process_config)(lcfg);
- goto out;
- }
-
- break;
- }
- case LCFG_SET_PARAM: {
- err = process_param2_config(lcfg);
- goto out;
- }
- }
- /* Commands that require a device */
- obd = class_name2obd(lustre_cfg_string(lcfg, 0));
- if (!obd) {
- if (!LUSTRE_CFG_BUFLEN(lcfg, 0))
- CERROR("this lcfg command requires a device name\n");
- else
- CERROR("no device for: %s\n",
- lustre_cfg_string(lcfg, 0));
-
- err = -EINVAL;
- goto out;
- }
-
- switch (lcfg->lcfg_command) {
- case LCFG_SETUP: {
- err = class_setup(obd, lcfg);
- goto out;
- }
- case LCFG_DETACH: {
- err = class_detach(obd, lcfg);
- err = 0;
- goto out;
- }
- case LCFG_CLEANUP: {
- err = class_cleanup(obd, lcfg);
- err = 0;
- goto out;
- }
- case LCFG_ADD_CONN: {
- err = class_add_conn(obd, lcfg);
- err = 0;
- goto out;
- }
- case LCFG_DEL_CONN: {
- err = class_del_conn(obd, lcfg);
- err = 0;
- goto out;
- }
- case LCFG_POOL_NEW: {
- err = obd_pool_new(obd, lustre_cfg_string(lcfg, 2));
- err = 0;
- goto out;
- }
- case LCFG_POOL_ADD: {
- err = obd_pool_add(obd, lustre_cfg_string(lcfg, 2),
- lustre_cfg_string(lcfg, 3));
- err = 0;
- goto out;
- }
- case LCFG_POOL_REM: {
- err = obd_pool_rem(obd, lustre_cfg_string(lcfg, 2),
- lustre_cfg_string(lcfg, 3));
- err = 0;
- goto out;
- }
- case LCFG_POOL_DEL: {
- err = obd_pool_del(obd, lustre_cfg_string(lcfg, 2));
- err = 0;
- goto out;
- }
- default: {
- err = obd_process_config(obd, sizeof(*lcfg), lcfg);
- goto out;
-
- }
- }
-out:
- if ((err < 0) && !(lcfg->lcfg_command & LCFG_REQUIRED)) {
- CWARN("Ignoring error %d on optional command %#x\n", err,
- lcfg->lcfg_command);
- err = 0;
- }
- return err;
-}
-EXPORT_SYMBOL(class_process_config);
-
-int class_process_proc_param(char *prefix, struct lprocfs_vars *lvars,
- struct lustre_cfg *lcfg, void *data)
-{
- struct lprocfs_vars *var;
- struct file fakefile;
- struct seq_file fake_seqfile;
- char *key, *sval;
- int i, keylen, vallen;
- int matched = 0, j = 0;
- int rc = 0;
- int skip = 0;
-
- if (lcfg->lcfg_command != LCFG_PARAM) {
- CERROR("Unknown command: %d\n", lcfg->lcfg_command);
- return -EINVAL;
- }
-
- /* fake a seq file so that var->fops->write can work... */
- fakefile.private_data = &fake_seqfile;
- fake_seqfile.private = data;
- /* e.g. tunefs.lustre --param mdt.group_upcall=foo /r/tmp/lustre-mdt
- or lctl conf_param lustre-MDT0000.mdt.group_upcall=bar
- or lctl conf_param lustre-OST0000.osc.max_dirty_mb=36 */
- for (i = 1; i < lcfg->lcfg_bufcount; i++) {
- key = lustre_cfg_buf(lcfg, i);
- /* Strip off prefix */
- class_match_param(key, prefix, &key);
- sval = strchr(key, '=');
- if (!sval || (*(sval + 1) == 0)) {
- CERROR("Can't parse param %s (missing '=')\n", key);
- /* rc = -EINVAL; continue parsing other params */
- continue;
- }
- keylen = sval - key;
- sval++;
- vallen = strlen(sval);
- matched = 0;
- j = 0;
- /* Search proc entries */
- while (lvars[j].name) {
- var = &lvars[j];
- if (class_match_param(key, (char *)var->name, NULL) == 0
- && keylen == strlen(var->name)) {
- matched++;
- rc = -EROFS;
- if (var->fops && var->fops->write) {
- mm_segment_t oldfs;
- oldfs = get_fs();
- set_fs(KERNEL_DS);
- rc = (var->fops->write)(&fakefile, sval,
- vallen, NULL);
- set_fs(oldfs);
- }
- break;
- }
- j++;
- }
- if (!matched) {
- /* If the prefix doesn't match, return error so we
- can pass it down the stack */
- if (strnchr(key, keylen, '.'))
- return -ENOSYS;
- CERROR("%s: unknown param %s\n",
- (char *)lustre_cfg_string(lcfg, 0), key);
- /* rc = -EINVAL; continue parsing other params */
- skip++;
- } else if (rc < 0) {
- CERROR("writing proc entry %s err %d\n",
- var->name, rc);
- rc = 0;
- } else {
- CDEBUG(D_CONFIG, "%s.%.*s: Set parameter %.*s=%s\n",
- lustre_cfg_string(lcfg, 0),
- (int)strlen(prefix) - 1, prefix,
- (int)(sval - key - 1), key, sval);
- }
- }
-
- if (rc > 0)
- rc = 0;
- if (!rc && skip)
- rc = skip;
- return rc;
-}
-EXPORT_SYMBOL(class_process_proc_param);
-
-extern int lustre_check_exclusion(struct super_block *sb, char *svname);
-
-/** Parse a configuration llog, doing various manipulations on them
- * for various reasons, (modifications for compatibility, skip obsolete
- * records, change uuids, etc), then class_process_config() resulting
- * net records.
- */
-int class_config_llog_handler(const struct lu_env *env,
- struct llog_handle *handle,
- struct llog_rec_hdr *rec, void *data)
-{
- struct config_llog_instance *clli = data;
- int cfg_len = rec->lrh_len;
- char *cfg_buf = (char *) (rec + 1);
- int rc = 0;
-
- //class_config_dump_handler(handle, rec, data);
-
- switch (rec->lrh_type) {
- case OBD_CFG_REC: {
- struct lustre_cfg *lcfg, *lcfg_new;
- struct lustre_cfg_bufs bufs;
- char *inst_name = NULL;
- int inst_len = 0;
- int inst = 0, swab = 0;
-
- lcfg = (struct lustre_cfg *)cfg_buf;
- if (lcfg->lcfg_version == __swab32(LUSTRE_CFG_VERSION)) {
- lustre_swab_lustre_cfg(lcfg);
- swab = 1;
- }
-
- rc = lustre_cfg_sanity_check(cfg_buf, cfg_len);
- if (rc)
- goto out;
-
- /* Figure out config state info */
- if (lcfg->lcfg_command == LCFG_MARKER) {
- struct cfg_marker *marker = lustre_cfg_buf(lcfg, 1);
- lustre_swab_cfg_marker(marker, swab,
- LUSTRE_CFG_BUFLEN(lcfg, 1));
- CDEBUG(D_CONFIG, "Marker, inst_flg=%#x mark_flg=%#x\n",
- clli->cfg_flags, marker->cm_flags);
- if (marker->cm_flags & CM_START) {
- /* all previous flags off */
- clli->cfg_flags = CFG_F_MARKER;
- if (marker->cm_flags & CM_SKIP) {
- clli->cfg_flags |= CFG_F_SKIP;
- CDEBUG(D_CONFIG, "SKIP #%d\n",
- marker->cm_step);
- } else if ((marker->cm_flags & CM_EXCLUDE) ||
- (clli->cfg_sb &&
- lustre_check_exclusion(clli->cfg_sb,
- marker->cm_tgtname))) {
- clli->cfg_flags |= CFG_F_EXCLUDE;
- CDEBUG(D_CONFIG, "EXCLUDE %d\n",
- marker->cm_step);
- }
- } else if (marker->cm_flags & CM_END) {
- clli->cfg_flags = 0;
- }
- }
- /* A config command without a start marker before it is
- illegal (post 146) */
- if (!(clli->cfg_flags & CFG_F_COMPAT146) &&
- !(clli->cfg_flags & CFG_F_MARKER) &&
- (lcfg->lcfg_command != LCFG_MARKER)) {
- CWARN("Config not inside markers, ignoring! (inst: %p, uuid: %s, flags: %#x)\n",
- clli->cfg_instance,
- clli->cfg_uuid.uuid, clli->cfg_flags);
- clli->cfg_flags |= CFG_F_SKIP;
- }
- if (clli->cfg_flags & CFG_F_SKIP) {
- CDEBUG(D_CONFIG, "skipping %#x\n",
- clli->cfg_flags);
- rc = 0;
- /* No processing! */
- break;
- }
-
- /*
- * For interoperability between 1.8 and 2.0,
- * rename "mds" obd device type to "mdt".
- */
- {
- char *typename = lustre_cfg_string(lcfg, 1);
- char *index = lustre_cfg_string(lcfg, 2);
-
- if ((lcfg->lcfg_command == LCFG_ATTACH && typename &&
- strcmp(typename, "mds") == 0)) {
- CWARN("For 1.8 interoperability, rename obd type from mds to mdt\n");
- typename[2] = 't';
- }
- if ((lcfg->lcfg_command == LCFG_SETUP && index &&
- strcmp(index, "type") == 0)) {
- CDEBUG(D_INFO, "For 1.8 interoperability, set this index to '0'\n");
- index[0] = '0';
- index[1] = 0;
- }
- }
-
-
- if (clli->cfg_flags & CFG_F_EXCLUDE) {
- CDEBUG(D_CONFIG, "cmd: %x marked EXCLUDED\n",
- lcfg->lcfg_command);
- if (lcfg->lcfg_command == LCFG_LOV_ADD_OBD)
- /* Add inactive instead */
- lcfg->lcfg_command = LCFG_LOV_ADD_INA;
- }
-
- lustre_cfg_bufs_init(&bufs, lcfg);
-
- if (clli && clli->cfg_instance &&
- LUSTRE_CFG_BUFLEN(lcfg, 0) > 0) {
- inst = 1;
- inst_len = LUSTRE_CFG_BUFLEN(lcfg, 0) +
- sizeof(clli->cfg_instance) * 2 + 4;
- inst_name = kasprintf(GFP_NOFS, "%s-%p",
- lustre_cfg_string(lcfg, 0),
- clli->cfg_instance);
- if (!inst_name) {
- rc = -ENOMEM;
- goto out;
- }
- lustre_cfg_bufs_set_string(&bufs, 0, inst_name);
- CDEBUG(D_CONFIG, "cmd %x, instance name: %s\n",
- lcfg->lcfg_command, inst_name);
- }
-
- /* we override the llog's uuid for clients, to insure they
- are unique */
- if (clli && clli->cfg_instance != NULL &&
- lcfg->lcfg_command == LCFG_ATTACH) {
- lustre_cfg_bufs_set_string(&bufs, 2,
- clli->cfg_uuid.uuid);
- }
- /*
- * sptlrpc config record, we expect 2 data segments:
- * [0]: fs_name/target_name,
- * [1]: rule string
- * moving them to index [1] and [2], and insert MGC's
- * obdname at index [0].
- */
- if (clli && !clli->cfg_instance &&
- lcfg->lcfg_command == LCFG_SPTLRPC_CONF) {
- lustre_cfg_bufs_set(&bufs, 2, bufs.lcfg_buf[1],
- bufs.lcfg_buflen[1]);
- lustre_cfg_bufs_set(&bufs, 1, bufs.lcfg_buf[0],
- bufs.lcfg_buflen[0]);
- lustre_cfg_bufs_set_string(&bufs, 0,
- clli->cfg_obdname);
- }
-
- lcfg_new = lustre_cfg_new(lcfg->lcfg_command, &bufs);
-
- lcfg_new->lcfg_num = lcfg->lcfg_num;
- lcfg_new->lcfg_flags = lcfg->lcfg_flags;
-
- /* XXX Hack to try to remain binary compatible with
- * pre-newconfig logs */
- if (lcfg->lcfg_nal != 0 && /* pre-newconfig log? */
- (lcfg->lcfg_nid >> 32) == 0) {
- __u32 addr = (__u32)(lcfg->lcfg_nid & 0xffffffff);
-
- lcfg_new->lcfg_nid =
- LNET_MKNID(LNET_MKNET(lcfg->lcfg_nal, 0), addr);
- CWARN("Converted pre-newconfig NAL %d NID %x to %s\n",
- lcfg->lcfg_nal, addr,
- libcfs_nid2str(lcfg_new->lcfg_nid));
- } else {
- lcfg_new->lcfg_nid = lcfg->lcfg_nid;
- }
-
- lcfg_new->lcfg_nal = 0; /* illegal value for obsolete field */
-
- rc = class_process_config(lcfg_new);
- lustre_cfg_free(lcfg_new);
-
- if (inst)
- kfree(inst_name);
- break;
- }
- default:
- CERROR("Unknown llog record type %#x encountered\n",
- rec->lrh_type);
- break;
- }
-out:
- if (rc) {
- CERROR("%s: cfg command failed: rc = %d\n",
- handle->lgh_ctxt->loc_obd->obd_name, rc);
- class_config_dump_handler(NULL, handle, rec, data);
- }
- return rc;
-}
-EXPORT_SYMBOL(class_config_llog_handler);
-
-int class_config_parse_llog(const struct lu_env *env, struct llog_ctxt *ctxt,
- char *name, struct config_llog_instance *cfg)
-{
- struct llog_process_cat_data cd = {0, 0};
- struct llog_handle *llh;
- llog_cb_t callback;
- int rc;
-
- CDEBUG(D_INFO, "looking up llog %s\n", name);
- rc = llog_open(env, ctxt, &llh, NULL, name, LLOG_OPEN_EXISTS);
- if (rc)
- return rc;
-
- rc = llog_init_handle(env, llh, LLOG_F_IS_PLAIN, NULL);
- if (rc)
- goto parse_out;
-
- /* continue processing from where we last stopped to end-of-log */
- if (cfg) {
- cd.lpcd_first_idx = cfg->cfg_last_idx;
- callback = cfg->cfg_callback;
- LASSERT(callback != NULL);
- } else {
- callback = class_config_llog_handler;
- }
-
- cd.lpcd_last_idx = 0;
-
- rc = llog_process(env, llh, callback, cfg, &cd);
-
- CDEBUG(D_CONFIG, "Processed log %s gen %d-%d (rc=%d)\n", name,
- cd.lpcd_first_idx + 1, cd.lpcd_last_idx, rc);
- if (cfg)
- cfg->cfg_last_idx = cd.lpcd_last_idx;
-
-parse_out:
- llog_close(env, llh);
- return rc;
-}
-EXPORT_SYMBOL(class_config_parse_llog);
-
-/**
- * parse config record and output dump in supplied buffer.
- * This is separated from class_config_dump_handler() to use
- * for ioctl needs as well
- */
-int class_config_parse_rec(struct llog_rec_hdr *rec, char *buf, int size)
-{
- struct lustre_cfg *lcfg = (struct lustre_cfg *)(rec + 1);
- char *ptr = buf;
- char *end = buf + size;
- int rc = 0;
-
- LASSERT(rec->lrh_type == OBD_CFG_REC);
- rc = lustre_cfg_sanity_check(lcfg, rec->lrh_len);
- if (rc < 0)
- return rc;
-
- ptr += snprintf(ptr, end-ptr, "cmd=%05x ", lcfg->lcfg_command);
- if (lcfg->lcfg_flags)
- ptr += snprintf(ptr, end-ptr, "flags=%#08x ",
- lcfg->lcfg_flags);
-
- if (lcfg->lcfg_num)
- ptr += snprintf(ptr, end-ptr, "num=%#08x ", lcfg->lcfg_num);
-
- if (lcfg->lcfg_nid)
- ptr += snprintf(ptr, end-ptr, "nid=%s(%#llx)\n ",
- libcfs_nid2str(lcfg->lcfg_nid),
- lcfg->lcfg_nid);
-
- if (lcfg->lcfg_command == LCFG_MARKER) {
- struct cfg_marker *marker = lustre_cfg_buf(lcfg, 1);
-
- ptr += snprintf(ptr, end-ptr, "marker=%d(%#x)%s '%s'",
- marker->cm_step, marker->cm_flags,
- marker->cm_tgtname, marker->cm_comment);
- } else {
- int i;
-
- for (i = 0; i < lcfg->lcfg_bufcount; i++) {
- ptr += snprintf(ptr, end-ptr, "%d:%s ", i,
- lustre_cfg_string(lcfg, i));
- }
- }
- /* return consumed bytes */
- rc = ptr - buf;
- return rc;
-}
-
-int class_config_dump_handler(const struct lu_env *env,
- struct llog_handle *handle,
- struct llog_rec_hdr *rec, void *data)
-{
- char *outstr;
- int rc = 0;
-
- outstr = kzalloc(256, GFP_NOFS);
- if (!outstr)
- return -ENOMEM;
-
- if (rec->lrh_type == OBD_CFG_REC) {
- class_config_parse_rec(rec, outstr, 256);
- LCONSOLE(D_WARNING, " %s\n", outstr);
- } else {
- LCONSOLE(D_WARNING, "unhandled lrh_type: %#x\n", rec->lrh_type);
- rc = -EINVAL;
- }
-
- kfree(outstr);
- return rc;
-}
-
-/** Call class_cleanup and class_detach.
- * "Manual" only in the sense that we're faking lcfg commands.
- */
-int class_manual_cleanup(struct obd_device *obd)
-{
- char flags[3] = "";
- struct lustre_cfg *lcfg;
- struct lustre_cfg_bufs bufs;
- int rc;
-
- if (!obd) {
- CERROR("empty cleanup\n");
- return -EALREADY;
- }
-
- if (obd->obd_force)
- strcat(flags, "F");
- if (obd->obd_fail)
- strcat(flags, "A");
-
- CDEBUG(D_CONFIG, "Manual cleanup of %s (flags='%s')\n",
- obd->obd_name, flags);
-
- lustre_cfg_bufs_reset(&bufs, obd->obd_name);
- lustre_cfg_bufs_set_string(&bufs, 1, flags);
- lcfg = lustre_cfg_new(LCFG_CLEANUP, &bufs);
- if (!lcfg)
- return -ENOMEM;
-
- rc = class_process_config(lcfg);
- if (rc) {
- CERROR("cleanup failed %d: %s\n", rc, obd->obd_name);
- goto out;
- }
-
- /* the lcfg is almost the same for both ops */
- lcfg->lcfg_command = LCFG_DETACH;
- rc = class_process_config(lcfg);
- if (rc)
- CERROR("detach failed %d: %s\n", rc, obd->obd_name);
-out:
- lustre_cfg_free(lcfg);
- return rc;
-}
-EXPORT_SYMBOL(class_manual_cleanup);
-
-/*
- * uuid<->export lustre hash operations
- */
-
-static unsigned
-uuid_hash(struct cfs_hash *hs, const void *key, unsigned mask)
-{
- return cfs_hash_djb2_hash(((struct obd_uuid *)key)->uuid,
- sizeof(((struct obd_uuid *)key)->uuid), mask);
-}
-
-static void *
-uuid_key(struct hlist_node *hnode)
-{
- struct obd_export *exp;
-
- exp = hlist_entry(hnode, struct obd_export, exp_uuid_hash);
-
- return &exp->exp_client_uuid;
-}
-
-/*
- * NOTE: It is impossible to find an export that is in failed
- * state with this function
- */
-static int
-uuid_keycmp(const void *key, struct hlist_node *hnode)
-{
- struct obd_export *exp;
-
- LASSERT(key);
- exp = hlist_entry(hnode, struct obd_export, exp_uuid_hash);
-
- return obd_uuid_equals(key, &exp->exp_client_uuid) &&
- !exp->exp_failed;
-}
-
-static void *
-uuid_export_object(struct hlist_node *hnode)
-{
- return hlist_entry(hnode, struct obd_export, exp_uuid_hash);
-}
-
-static void
-uuid_export_get(struct cfs_hash *hs, struct hlist_node *hnode)
-{
- struct obd_export *exp;
-
- exp = hlist_entry(hnode, struct obd_export, exp_uuid_hash);
- class_export_get(exp);
-}
-
-static void
-uuid_export_put_locked(struct cfs_hash *hs, struct hlist_node *hnode)
-{
- struct obd_export *exp;
-
- exp = hlist_entry(hnode, struct obd_export, exp_uuid_hash);
- class_export_put(exp);
-}
-
-static cfs_hash_ops_t uuid_hash_ops = {
- .hs_hash = uuid_hash,
- .hs_key = uuid_key,
- .hs_keycmp = uuid_keycmp,
- .hs_object = uuid_export_object,
- .hs_get = uuid_export_get,
- .hs_put_locked = uuid_export_put_locked,
-};
-
-
-/*
- * nid<->export hash operations
- */
-
-static unsigned
-nid_hash(struct cfs_hash *hs, const void *key, unsigned mask)
-{
- return cfs_hash_djb2_hash(key, sizeof(lnet_nid_t), mask);
-}
-
-static void *
-nid_key(struct hlist_node *hnode)
-{
- struct obd_export *exp;
-
- exp = hlist_entry(hnode, struct obd_export, exp_nid_hash);
-
- return &exp->exp_connection->c_peer.nid;
-}
-
-/*
- * NOTE: It is impossible to find an export that is in failed
- * state with this function
- */
-static int
-nid_kepcmp(const void *key, struct hlist_node *hnode)
-{
- struct obd_export *exp;
-
- LASSERT(key);
- exp = hlist_entry(hnode, struct obd_export, exp_nid_hash);
-
- return exp->exp_connection->c_peer.nid == *(lnet_nid_t *)key &&
- !exp->exp_failed;
-}
-
-static void *
-nid_export_object(struct hlist_node *hnode)
-{
- return hlist_entry(hnode, struct obd_export, exp_nid_hash);
-}
-
-static void
-nid_export_get(struct cfs_hash *hs, struct hlist_node *hnode)
-{
- struct obd_export *exp;
-
- exp = hlist_entry(hnode, struct obd_export, exp_nid_hash);
- class_export_get(exp);
-}
-
-static void
-nid_export_put_locked(struct cfs_hash *hs, struct hlist_node *hnode)
-{
- struct obd_export *exp;
-
- exp = hlist_entry(hnode, struct obd_export, exp_nid_hash);
- class_export_put(exp);
-}
-
-static cfs_hash_ops_t nid_hash_ops = {
- .hs_hash = nid_hash,
- .hs_key = nid_key,
- .hs_keycmp = nid_kepcmp,
- .hs_object = nid_export_object,
- .hs_get = nid_export_get,
- .hs_put_locked = nid_export_put_locked,
-};
diff --git a/drivers/staging/lustre/lustre/obdclass/obd_mount.c b/drivers/staging/lustre/lustre/obdclass/obd_mount.c
deleted file mode 100644
index bc15e1546844..000000000000
--- a/drivers/staging/lustre/lustre/obdclass/obd_mount.c
+++ /dev/null
@@ -1,1206 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * lustre/obdclass/obd_mount.c
- *
- * Client mount routines
- *
- * Author: Nathan Rutman <nathan@clusterfs.com>
- */
-
-
-#define DEBUG_SUBSYSTEM S_CLASS
-#define D_MOUNT (D_SUPER|D_CONFIG/*|D_WARNING */)
-#define PRINT_CMD CDEBUG
-
-#include "../include/obd.h"
-#include "../include/linux/lustre_compat25.h"
-#include "../include/obd_class.h"
-#include "../include/lustre/lustre_user.h"
-#include "../include/lustre_log.h"
-#include "../include/lustre_disk.h"
-#include "../include/lustre_param.h"
-
-static int (*client_fill_super)(struct super_block *sb,
- struct vfsmount *mnt);
-
-static void (*kill_super_cb)(struct super_block *sb);
-
-/**************** config llog ********************/
-
-/** Get a config log from the MGS and process it.
- * This func is called for both clients and servers.
- * Continue to process new statements appended to the logs
- * (whenever the config lock is revoked) until lustre_end_log
- * is called.
- * @param sb The superblock is used by the MGC to write to the local copy of
- * the config log
- * @param logname The name of the llog to replicate from the MGS
- * @param cfg Since the same mgc may be used to follow multiple config logs
- * (e.g. ost1, ost2, client), the config_llog_instance keeps the state for
- * this log, and is added to the mgc's list of logs to follow.
- */
-int lustre_process_log(struct super_block *sb, char *logname,
- struct config_llog_instance *cfg)
-{
- struct lustre_cfg *lcfg;
- struct lustre_cfg_bufs *bufs;
- struct lustre_sb_info *lsi = s2lsi(sb);
- struct obd_device *mgc = lsi->lsi_mgc;
- int rc;
-
- LASSERT(mgc);
- LASSERT(cfg);
-
- bufs = kzalloc(sizeof(*bufs), GFP_NOFS);
- if (!bufs)
- return -ENOMEM;
-
- /* mgc_process_config */
- lustre_cfg_bufs_reset(bufs, mgc->obd_name);
- lustre_cfg_bufs_set_string(bufs, 1, logname);
- lustre_cfg_bufs_set(bufs, 2, cfg, sizeof(*cfg));
- lustre_cfg_bufs_set(bufs, 3, &sb, sizeof(sb));
- lcfg = lustre_cfg_new(LCFG_LOG_START, bufs);
- rc = obd_process_config(mgc, sizeof(*lcfg), lcfg);
- lustre_cfg_free(lcfg);
-
- kfree(bufs);
-
- if (rc == -EINVAL)
- LCONSOLE_ERROR_MSG(0x15b, "%s: The configuration from log '%s' failed from the MGS (%d). Make sure this client and the MGS are running compatible versions of Lustre.\n",
- mgc->obd_name, logname, rc);
-
- if (rc)
- LCONSOLE_ERROR_MSG(0x15c, "%s: The configuration from log '%s' failed (%d). This may be the result of communication errors between this node and the MGS, a bad configuration, or other errors. See the syslog for more information.\n",
- mgc->obd_name, logname,
- rc);
-
- /* class_obd_list(); */
- return rc;
-}
-EXPORT_SYMBOL(lustre_process_log);
-
-/* Stop watching this config log for updates */
-int lustre_end_log(struct super_block *sb, char *logname,
- struct config_llog_instance *cfg)
-{
- struct lustre_cfg *lcfg;
- struct lustre_cfg_bufs bufs;
- struct lustre_sb_info *lsi = s2lsi(sb);
- struct obd_device *mgc = lsi->lsi_mgc;
- int rc;
-
- if (!mgc)
- return -ENOENT;
-
- /* mgc_process_config */
- lustre_cfg_bufs_reset(&bufs, mgc->obd_name);
- lustre_cfg_bufs_set_string(&bufs, 1, logname);
- if (cfg)
- lustre_cfg_bufs_set(&bufs, 2, cfg, sizeof(*cfg));
- lcfg = lustre_cfg_new(LCFG_LOG_END, &bufs);
- rc = obd_process_config(mgc, sizeof(*lcfg), lcfg);
- lustre_cfg_free(lcfg);
- return rc;
-}
-EXPORT_SYMBOL(lustre_end_log);
-
-/**************** obd start *******************/
-
-/** lustre_cfg_bufs are a holdover from 1.4; we can still set these up from
- * lctl (and do for echo cli/srv.
- */
-int do_lcfg(char *cfgname, lnet_nid_t nid, int cmd,
- char *s1, char *s2, char *s3, char *s4)
-{
- struct lustre_cfg_bufs bufs;
- struct lustre_cfg *lcfg = NULL;
- int rc;
-
- CDEBUG(D_TRACE, "lcfg %s %#x %s %s %s %s\n", cfgname,
- cmd, s1, s2, s3, s4);
-
- lustre_cfg_bufs_reset(&bufs, cfgname);
- if (s1)
- lustre_cfg_bufs_set_string(&bufs, 1, s1);
- if (s2)
- lustre_cfg_bufs_set_string(&bufs, 2, s2);
- if (s3)
- lustre_cfg_bufs_set_string(&bufs, 3, s3);
- if (s4)
- lustre_cfg_bufs_set_string(&bufs, 4, s4);
-
- lcfg = lustre_cfg_new(cmd, &bufs);
- lcfg->lcfg_nid = nid;
- rc = class_process_config(lcfg);
- lustre_cfg_free(lcfg);
- return rc;
-}
-EXPORT_SYMBOL(do_lcfg);
-
-/** Call class_attach and class_setup. These methods in turn call
- * obd type-specific methods.
- */
-int lustre_start_simple(char *obdname, char *type, char *uuid,
- char *s1, char *s2, char *s3, char *s4)
-{
- int rc;
- CDEBUG(D_MOUNT, "Starting obd %s (typ=%s)\n", obdname, type);
-
- rc = do_lcfg(obdname, 0, LCFG_ATTACH, type, uuid, NULL, NULL);
- if (rc) {
- CERROR("%s attach error %d\n", obdname, rc);
- return rc;
- }
- rc = do_lcfg(obdname, 0, LCFG_SETUP, s1, s2, s3, s4);
- if (rc) {
- CERROR("%s setup error %d\n", obdname, rc);
- do_lcfg(obdname, 0, LCFG_DETACH, NULL, NULL, NULL, NULL);
- }
- return rc;
-}
-
-DEFINE_MUTEX(mgc_start_lock);
-
-/** Set up a mgc obd to process startup logs
- *
- * \param sb [in] super block of the mgc obd
- *
- * \retval 0 success, otherwise error code
- */
-int lustre_start_mgc(struct super_block *sb)
-{
- struct obd_connect_data *data = NULL;
- struct lustre_sb_info *lsi = s2lsi(sb);
- struct obd_device *obd;
- struct obd_export *exp;
- struct obd_uuid *uuid;
- class_uuid_t uuidc;
- lnet_nid_t nid;
- char *mgcname = NULL, *niduuid = NULL, *mgssec = NULL;
- char *ptr;
- int rc = 0, i = 0, j, len;
-
- LASSERT(lsi->lsi_lmd);
-
- /* Use nids from mount line: uml1,1@elan:uml2,2@elan:/lustre */
- ptr = lsi->lsi_lmd->lmd_dev;
- if (class_parse_nid(ptr, &nid, &ptr) == 0)
- i++;
- if (i == 0) {
- CERROR("No valid MGS nids found.\n");
- return -EINVAL;
- }
-
- mutex_lock(&mgc_start_lock);
-
- len = strlen(LUSTRE_MGC_OBDNAME) + strlen(libcfs_nid2str(nid)) + 1;
- mgcname = kasprintf(GFP_NOFS,
- "%s%s", LUSTRE_MGC_OBDNAME, libcfs_nid2str(nid));
- niduuid = kasprintf(GFP_NOFS, "%s_%x", mgcname, i);
- if (!mgcname || !niduuid) {
- rc = -ENOMEM;
- goto out_free;
- }
-
- mgssec = lsi->lsi_lmd->lmd_mgssec ? lsi->lsi_lmd->lmd_mgssec : "";
-
- data = kzalloc(sizeof(*data), GFP_NOFS);
- if (!data) {
- rc = -ENOMEM;
- goto out_free;
- }
-
- obd = class_name2obd(mgcname);
- if (obd && !obd->obd_stopping) {
- int recov_bk;
-
- rc = obd_set_info_async(NULL, obd->obd_self_export,
- strlen(KEY_MGSSEC), KEY_MGSSEC,
- strlen(mgssec), mgssec, NULL);
- if (rc)
- goto out_free;
-
- /* Re-using an existing MGC */
- atomic_inc(&obd->u.cli.cl_mgc_refcount);
-
- /* IR compatibility check, only for clients */
- if (lmd_is_client(lsi->lsi_lmd)) {
- int has_ir;
- int vallen = sizeof(*data);
- __u32 *flags = &lsi->lsi_lmd->lmd_flags;
-
- rc = obd_get_info(NULL, obd->obd_self_export,
- strlen(KEY_CONN_DATA), KEY_CONN_DATA,
- &vallen, data, NULL);
- LASSERT(rc == 0);
- has_ir = OCD_HAS_FLAG(data, IMP_RECOV);
- if (has_ir ^ !(*flags & LMD_FLG_NOIR)) {
- /* LMD_FLG_NOIR is for test purpose only */
- LCONSOLE_WARN(
- "Trying to mount a client with IR setting not compatible with current mgc. Force to use current mgc setting that is IR %s.\n",
- has_ir ? "enabled" : "disabled");
- if (has_ir)
- *flags &= ~LMD_FLG_NOIR;
- else
- *flags |= LMD_FLG_NOIR;
- }
- }
-
- recov_bk = 0;
-
- /* Try all connections, but only once (again).
- We don't want to block another target from starting
- (using its local copy of the log), but we do want to connect
- if at all possible. */
- recov_bk++;
- CDEBUG(D_MOUNT, "%s: Set MGC reconnect %d\n", mgcname,
- recov_bk);
- rc = obd_set_info_async(NULL, obd->obd_self_export,
- sizeof(KEY_INIT_RECOV_BACKUP),
- KEY_INIT_RECOV_BACKUP,
- sizeof(recov_bk), &recov_bk, NULL);
- rc = 0;
- goto out;
- }
-
- CDEBUG(D_MOUNT, "Start MGC '%s'\n", mgcname);
-
- /* Add the primary nids for the MGS */
- i = 0;
- /* Use nids from mount line: uml1,1@elan:uml2,2@elan:/lustre */
- ptr = lsi->lsi_lmd->lmd_dev;
- while (class_parse_nid(ptr, &nid, &ptr) == 0) {
- rc = do_lcfg(mgcname, nid,
- LCFG_ADD_UUID, niduuid, NULL, NULL, NULL);
- i++;
- /* Stop at the first failover nid */
- if (*ptr == ':')
- break;
- }
- if (i == 0) {
- CERROR("No valid MGS nids found.\n");
- rc = -EINVAL;
- goto out_free;
- }
- lsi->lsi_lmd->lmd_mgs_failnodes = 1;
-
- /* Random uuid for MGC allows easier reconnects */
- uuid = kzalloc(sizeof(*uuid), GFP_NOFS);
- if (!uuid) {
- rc = -ENOMEM;
- goto out_free;
- }
-
- ll_generate_random_uuid(uuidc);
- class_uuid_unparse(uuidc, uuid);
-
- /* Start the MGC */
- rc = lustre_start_simple(mgcname, LUSTRE_MGC_NAME,
- (char *)uuid->uuid, LUSTRE_MGS_OBDNAME,
- niduuid, NULL, NULL);
- kfree(uuid);
- if (rc)
- goto out_free;
-
- /* Add any failover MGS nids */
- i = 1;
- while (ptr && ((*ptr == ':' ||
- class_find_param(ptr, PARAM_MGSNODE, &ptr) == 0))) {
- /* New failover node */
- sprintf(niduuid, "%s_%x", mgcname, i);
- j = 0;
- while (class_parse_nid_quiet(ptr, &nid, &ptr) == 0) {
- j++;
- rc = do_lcfg(mgcname, nid,
- LCFG_ADD_UUID, niduuid, NULL, NULL, NULL);
- if (*ptr == ':')
- break;
- }
- if (j > 0) {
- rc = do_lcfg(mgcname, 0, LCFG_ADD_CONN,
- niduuid, NULL, NULL, NULL);
- i++;
- } else {
- /* at ":/fsname" */
- break;
- }
- }
- lsi->lsi_lmd->lmd_mgs_failnodes = i;
-
- obd = class_name2obd(mgcname);
- if (!obd) {
- CERROR("Can't find mgcobd %s\n", mgcname);
- rc = -ENOTCONN;
- goto out_free;
- }
-
- rc = obd_set_info_async(NULL, obd->obd_self_export,
- strlen(KEY_MGSSEC), KEY_MGSSEC,
- strlen(mgssec), mgssec, NULL);
- if (rc)
- goto out_free;
-
- /* Keep a refcount of servers/clients who started with "mount",
- so we know when we can get rid of the mgc. */
- atomic_set(&obd->u.cli.cl_mgc_refcount, 1);
-
- /* We connect to the MGS at setup, and don't disconnect until cleanup */
- data->ocd_connect_flags = OBD_CONNECT_VERSION | OBD_CONNECT_AT |
- OBD_CONNECT_FULL20 | OBD_CONNECT_IMP_RECOV |
- OBD_CONNECT_LVB_TYPE;
-
-#if LUSTRE_VERSION_CODE < OBD_OCD_VERSION(3, 2, 50, 0)
- data->ocd_connect_flags |= OBD_CONNECT_MNE_SWAB;
-#else
-#warning "LU-1644: Remove old OBD_CONNECT_MNE_SWAB fixup and imp_need_mne_swab"
-#endif
-
- if (lmd_is_client(lsi->lsi_lmd) &&
- lsi->lsi_lmd->lmd_flags & LMD_FLG_NOIR)
- data->ocd_connect_flags &= ~OBD_CONNECT_IMP_RECOV;
- data->ocd_version = LUSTRE_VERSION_CODE;
- rc = obd_connect(NULL, &exp, obd, &(obd->obd_uuid), data, NULL);
- if (rc) {
- CERROR("connect failed %d\n", rc);
- goto out;
- }
-
- obd->u.cli.cl_mgc_mgsexp = exp;
-
-out:
- /* Keep the mgc info in the sb. Note that many lsi's can point
- to the same mgc.*/
- lsi->lsi_mgc = obd;
-out_free:
- mutex_unlock(&mgc_start_lock);
-
- kfree(data);
- kfree(mgcname);
- kfree(niduuid);
- return rc;
-}
-
-static int lustre_stop_mgc(struct super_block *sb)
-{
- struct lustre_sb_info *lsi = s2lsi(sb);
- struct obd_device *obd;
- char *niduuid = NULL, *ptr = NULL;
- int i, rc = 0, len = 0;
-
- if (!lsi)
- return -ENOENT;
- obd = lsi->lsi_mgc;
- if (!obd)
- return -ENOENT;
- lsi->lsi_mgc = NULL;
-
- mutex_lock(&mgc_start_lock);
- LASSERT(atomic_read(&obd->u.cli.cl_mgc_refcount) > 0);
- if (!atomic_dec_and_test(&obd->u.cli.cl_mgc_refcount)) {
- /* This is not fatal, every client that stops
- will call in here. */
- CDEBUG(D_MOUNT, "mgc still has %d references.\n",
- atomic_read(&obd->u.cli.cl_mgc_refcount));
- rc = -EBUSY;
- goto out;
- }
-
- /* The MGC has no recoverable data in any case.
- * force shutdown set in umount_begin */
- obd->obd_no_recov = 1;
-
- if (obd->u.cli.cl_mgc_mgsexp) {
- /* An error is not fatal, if we are unable to send the
- disconnect mgs ping evictor cleans up the export */
- rc = obd_disconnect(obd->u.cli.cl_mgc_mgsexp);
- if (rc)
- CDEBUG(D_MOUNT, "disconnect failed %d\n", rc);
- }
-
- /* Save the obdname for cleaning the nid uuids, which are
- obdname_XX */
- len = strlen(obd->obd_name) + 6;
- niduuid = kzalloc(len, GFP_NOFS);
- if (niduuid) {
- strcpy(niduuid, obd->obd_name);
- ptr = niduuid + strlen(niduuid);
- }
-
- rc = class_manual_cleanup(obd);
- if (rc)
- goto out;
-
- /* Clean the nid uuids */
- if (!niduuid) {
- rc = -ENOMEM;
- goto out;
- }
-
- for (i = 0; i < lsi->lsi_lmd->lmd_mgs_failnodes; i++) {
- sprintf(ptr, "_%x", i);
- rc = do_lcfg(LUSTRE_MGC_OBDNAME, 0, LCFG_DEL_UUID,
- niduuid, NULL, NULL, NULL);
- if (rc)
- CERROR("del MDC UUID %s failed: rc = %d\n",
- niduuid, rc);
- }
-out:
- kfree(niduuid);
-
- /* class_import_put will get rid of the additional connections */
- mutex_unlock(&mgc_start_lock);
- return rc;
-}
-
-/***************** lustre superblock **************/
-
-struct lustre_sb_info *lustre_init_lsi(struct super_block *sb)
-{
- struct lustre_sb_info *lsi;
-
- lsi = kzalloc(sizeof(*lsi), GFP_NOFS);
- if (!lsi)
- return NULL;
- lsi->lsi_lmd = kzalloc(sizeof(*lsi->lsi_lmd), GFP_NOFS);
- if (!lsi->lsi_lmd) {
- kfree(lsi);
- return NULL;
- }
-
- lsi->lsi_lmd->lmd_exclude_count = 0;
- lsi->lsi_lmd->lmd_recovery_time_soft = 0;
- lsi->lsi_lmd->lmd_recovery_time_hard = 0;
- s2lsi_nocast(sb) = lsi;
- /* we take 1 extra ref for our setup */
- atomic_set(&lsi->lsi_mounts, 1);
-
- /* Default umount style */
- lsi->lsi_flags = LSI_UMOUNT_FAILOVER;
-
- return lsi;
-}
-
-static int lustre_free_lsi(struct super_block *sb)
-{
- struct lustre_sb_info *lsi = s2lsi(sb);
-
- LASSERT(lsi != NULL);
- CDEBUG(D_MOUNT, "Freeing lsi %p\n", lsi);
-
- /* someone didn't call server_put_mount. */
- LASSERT(atomic_read(&lsi->lsi_mounts) == 0);
-
- if (lsi->lsi_lmd != NULL) {
- kfree(lsi->lsi_lmd->lmd_dev);
- kfree(lsi->lsi_lmd->lmd_profile);
- kfree(lsi->lsi_lmd->lmd_mgssec);
- kfree(lsi->lsi_lmd->lmd_opts);
- if (lsi->lsi_lmd->lmd_exclude_count)
- kfree(lsi->lsi_lmd->lmd_exclude);
- kfree(lsi->lsi_lmd->lmd_mgs);
- kfree(lsi->lsi_lmd->lmd_osd_type);
- kfree(lsi->lsi_lmd->lmd_params);
-
- kfree(lsi->lsi_lmd);
- }
-
- LASSERT(lsi->lsi_llsbi == NULL);
- kfree(lsi);
- s2lsi_nocast(sb) = NULL;
-
- return 0;
-}
-
-/* The lsi has one reference for every server that is using the disk -
- e.g. MDT, MGS, and potentially MGC */
-int lustre_put_lsi(struct super_block *sb)
-{
- struct lustre_sb_info *lsi = s2lsi(sb);
-
- LASSERT(lsi != NULL);
-
- CDEBUG(D_MOUNT, "put %p %d\n", sb, atomic_read(&lsi->lsi_mounts));
- if (atomic_dec_and_test(&lsi->lsi_mounts)) {
- lustre_free_lsi(sb);
- return 1;
- }
- return 0;
-}
-
-/*** SERVER NAME ***
- * <FSNAME><SEPARATOR><TYPE><INDEX>
- * FSNAME is between 1 and 8 characters (inclusive).
- * Excluded characters are '/' and ':'
- * SEPARATOR is either ':' or '-'
- * TYPE: "OST", "MDT", etc.
- * INDEX: Hex representation of the index
- */
-
-/** Get the fsname ("lustre") from the server name ("lustre-OST003F").
- * @param [in] svname server name including type and index
- * @param [out] fsname Buffer to copy filesystem name prefix into.
- * Must have at least 'strlen(fsname) + 1' chars.
- * @param [out] endptr if endptr isn't NULL it is set to end of fsname
- * rc < 0 on error
- */
-int server_name2fsname(const char *svname, char *fsname, const char **endptr)
-{
- const char *dash;
-
- dash = svname + strnlen(svname, 8); /* max fsname length is 8 */
- for (; dash > svname && *dash != '-' && *dash != ':'; dash--)
- ;
- if (dash == svname)
- return -EINVAL;
-
- if (fsname != NULL) {
- strncpy(fsname, svname, dash - svname);
- fsname[dash - svname] = '\0';
- }
-
- if (endptr != NULL)
- *endptr = dash;
-
- return 0;
-}
-EXPORT_SYMBOL(server_name2fsname);
-
-/* Get the index from the obd name.
- rc = server type, or
- rc < 0 on error
- if endptr isn't NULL it is set to end of name */
-int server_name2index(const char *svname, __u32 *idx, const char **endptr)
-{
- unsigned long index;
- int rc;
- const char *dash;
-
- /* We use server_name2fsname() just for parsing */
- rc = server_name2fsname(svname, NULL, &dash);
- if (rc != 0)
- return rc;
-
- dash++;
-
- if (strncmp(dash, "MDT", 3) == 0)
- rc = LDD_F_SV_TYPE_MDT;
- else if (strncmp(dash, "OST", 3) == 0)
- rc = LDD_F_SV_TYPE_OST;
- else
- return -EINVAL;
-
- dash += 3;
-
- if (strncmp(dash, "all", 3) == 0) {
- if (endptr != NULL)
- *endptr = dash + 3;
- return rc | LDD_F_SV_ALL;
- }
-
- index = simple_strtoul(dash, (char **)endptr, 16);
- if (idx != NULL)
- *idx = index;
-
- /* Account for -mdc after index that is possible when specifying mdt */
- if (endptr != NULL && strncmp(LUSTRE_MDC_NAME, *endptr + 1,
- sizeof(LUSTRE_MDC_NAME)-1) == 0)
- *endptr += sizeof(LUSTRE_MDC_NAME);
-
- return rc;
-}
-EXPORT_SYMBOL(server_name2index);
-
-/*************** mount common between server and client ***************/
-
-/* Common umount */
-int lustre_common_put_super(struct super_block *sb)
-{
- int rc;
-
- CDEBUG(D_MOUNT, "dropping sb %p\n", sb);
-
- /* Drop a ref to the MGC */
- rc = lustre_stop_mgc(sb);
- if (rc && (rc != -ENOENT)) {
- if (rc != -EBUSY) {
- CERROR("Can't stop MGC: %d\n", rc);
- return rc;
- }
- /* BUSY just means that there's some other obd that
- needs the mgc. Let him clean it up. */
- CDEBUG(D_MOUNT, "MGC still in use\n");
- }
- /* Drop a ref to the mounted disk */
- lustre_put_lsi(sb);
- lu_types_stop();
- return rc;
-}
-EXPORT_SYMBOL(lustre_common_put_super);
-
-static void lmd_print(struct lustre_mount_data *lmd)
-{
- int i;
-
- PRINT_CMD(D_MOUNT, " mount data:\n");
- if (lmd_is_client(lmd))
- PRINT_CMD(D_MOUNT, "profile: %s\n", lmd->lmd_profile);
- PRINT_CMD(D_MOUNT, "device: %s\n", lmd->lmd_dev);
- PRINT_CMD(D_MOUNT, "flags: %x\n", lmd->lmd_flags);
-
- if (lmd->lmd_opts)
- PRINT_CMD(D_MOUNT, "options: %s\n", lmd->lmd_opts);
-
- if (lmd->lmd_recovery_time_soft)
- PRINT_CMD(D_MOUNT, "recovery time soft: %d\n",
- lmd->lmd_recovery_time_soft);
-
- if (lmd->lmd_recovery_time_hard)
- PRINT_CMD(D_MOUNT, "recovery time hard: %d\n",
- lmd->lmd_recovery_time_hard);
-
- for (i = 0; i < lmd->lmd_exclude_count; i++) {
- PRINT_CMD(D_MOUNT, "exclude %d: OST%04x\n", i,
- lmd->lmd_exclude[i]);
- }
-}
-
-/* Is this server on the exclusion list */
-int lustre_check_exclusion(struct super_block *sb, char *svname)
-{
- struct lustre_sb_info *lsi = s2lsi(sb);
- struct lustre_mount_data *lmd = lsi->lsi_lmd;
- __u32 index;
- int i, rc;
-
- rc = server_name2index(svname, &index, NULL);
- if (rc != LDD_F_SV_TYPE_OST)
- /* Only exclude OSTs */
- return 0;
-
- CDEBUG(D_MOUNT, "Check exclusion %s (%d) in %d of %s\n", svname,
- index, lmd->lmd_exclude_count, lmd->lmd_dev);
-
- for (i = 0; i < lmd->lmd_exclude_count; i++) {
- if (index == lmd->lmd_exclude[i]) {
- CWARN("Excluding %s (on exclusion list)\n", svname);
- return 1;
- }
- }
- return 0;
-}
-
-/* mount -v -o exclude=lustre-OST0001:lustre-OST0002 -t lustre ... */
-static int lmd_make_exclusion(struct lustre_mount_data *lmd, const char *ptr)
-{
- const char *s1 = ptr, *s2;
- __u32 index, *exclude_list;
- int rc = 0, devmax;
-
- /* The shortest an ost name can be is 8 chars: -OST0000.
- We don't actually know the fsname at this time, so in fact
- a user could specify any fsname. */
- devmax = strlen(ptr) / 8 + 1;
-
- /* temp storage until we figure out how many we have */
- exclude_list = kcalloc(devmax, sizeof(index), GFP_NOFS);
- if (!exclude_list)
- return -ENOMEM;
-
- /* we enter this fn pointing at the '=' */
- while (*s1 && *s1 != ' ' && *s1 != ',') {
- s1++;
- rc = server_name2index(s1, &index, &s2);
- if (rc < 0) {
- CERROR("Can't parse server name '%s': rc = %d\n",
- s1, rc);
- break;
- }
- if (rc == LDD_F_SV_TYPE_OST)
- exclude_list[lmd->lmd_exclude_count++] = index;
- else
- CDEBUG(D_MOUNT, "ignoring exclude %.*s: type = %#x\n",
- (uint)(s2-s1), s1, rc);
- s1 = s2;
- /* now we are pointing at ':' (next exclude)
- or ',' (end of excludes) */
- if (lmd->lmd_exclude_count >= devmax)
- break;
- }
- if (rc >= 0) /* non-err */
- rc = 0;
-
- if (lmd->lmd_exclude_count) {
- /* permanent, freed in lustre_free_lsi */
- lmd->lmd_exclude = kcalloc(lmd->lmd_exclude_count,
- sizeof(index), GFP_NOFS);
- if (lmd->lmd_exclude) {
- memcpy(lmd->lmd_exclude, exclude_list,
- sizeof(index) * lmd->lmd_exclude_count);
- } else {
- rc = -ENOMEM;
- lmd->lmd_exclude_count = 0;
- }
- }
- kfree(exclude_list);
- return rc;
-}
-
-static int lmd_parse_mgssec(struct lustre_mount_data *lmd, char *ptr)
-{
- char *tail;
- int length;
-
- kfree(lmd->lmd_mgssec);
- lmd->lmd_mgssec = NULL;
-
- tail = strchr(ptr, ',');
- if (tail == NULL)
- length = strlen(ptr);
- else
- length = tail - ptr;
-
- lmd->lmd_mgssec = kzalloc(length + 1, GFP_NOFS);
- if (!lmd->lmd_mgssec)
- return -ENOMEM;
-
- memcpy(lmd->lmd_mgssec, ptr, length);
- lmd->lmd_mgssec[length] = '\0';
- return 0;
-}
-
-static int lmd_parse_string(char **handle, char *ptr)
-{
- char *tail;
- int length;
-
- if ((handle == NULL) || (ptr == NULL))
- return -EINVAL;
-
- kfree(*handle);
- *handle = NULL;
-
- tail = strchr(ptr, ',');
- if (tail == NULL)
- length = strlen(ptr);
- else
- length = tail - ptr;
-
- *handle = kzalloc(length + 1, GFP_NOFS);
- if (!*handle)
- return -ENOMEM;
-
- memcpy(*handle, ptr, length);
- (*handle)[length] = '\0';
-
- return 0;
-}
-
-/* Collect multiple values for mgsnid specifiers */
-static int lmd_parse_mgs(struct lustre_mount_data *lmd, char **ptr)
-{
- lnet_nid_t nid;
- char *tail = *ptr;
- char *mgsnid;
- int length;
- int oldlen = 0;
-
- /* Find end of nidlist */
- while (class_parse_nid_quiet(tail, &nid, &tail) == 0)
- ;
- length = tail - *ptr;
- if (length == 0) {
- LCONSOLE_ERROR_MSG(0x159, "Can't parse NID '%s'\n", *ptr);
- return -EINVAL;
- }
-
- if (lmd->lmd_mgs != NULL)
- oldlen = strlen(lmd->lmd_mgs) + 1;
-
- mgsnid = kzalloc(oldlen + length + 1, GFP_NOFS);
- if (!mgsnid)
- return -ENOMEM;
-
- if (lmd->lmd_mgs != NULL) {
- /* Multiple mgsnid= are taken to mean failover locations */
- memcpy(mgsnid, lmd->lmd_mgs, oldlen);
- mgsnid[oldlen - 1] = ':';
- kfree(lmd->lmd_mgs);
- }
- memcpy(mgsnid + oldlen, *ptr, length);
- mgsnid[oldlen + length] = '\0';
- lmd->lmd_mgs = mgsnid;
- *ptr = tail;
-
- return 0;
-}
-
-/** Parse mount line options
- * e.g. mount -v -t lustre -o abort_recov uml1:uml2:/lustre-client /mnt/lustre
- * dev is passed as device=uml1:/lustre by mount.lustre
- */
-static int lmd_parse(char *options, struct lustre_mount_data *lmd)
-{
- char *s1, *s2, *devname = NULL;
- struct lustre_mount_data *raw = (struct lustre_mount_data *)options;
- int rc = 0;
-
- LASSERT(lmd);
- if (!options) {
- LCONSOLE_ERROR_MSG(0x162, "Missing mount data: check that /sbin/mount.lustre is installed.\n");
- return -EINVAL;
- }
-
- /* Options should be a string - try to detect old lmd data */
- if ((raw->lmd_magic & 0xffffff00) == (LMD_MAGIC & 0xffffff00)) {
- LCONSOLE_ERROR_MSG(0x163, "You're using an old version of /sbin/mount.lustre. Please install version %s\n",
- LUSTRE_VERSION_STRING);
- return -EINVAL;
- }
- lmd->lmd_magic = LMD_MAGIC;
-
- lmd->lmd_params = kzalloc(4096, GFP_NOFS);
- if (!lmd->lmd_params)
- return -ENOMEM;
- lmd->lmd_params[0] = '\0';
-
- /* Set default flags here */
-
- s1 = options;
- while (*s1) {
- int clear = 0;
- int time_min = OBD_RECOVERY_TIME_MIN;
-
- /* Skip whitespace and extra commas */
- while (*s1 == ' ' || *s1 == ',')
- s1++;
-
- /* Client options are parsed in ll_options: eg. flock,
- user_xattr, acl */
-
- /* Parse non-ldiskfs options here. Rather than modifying
- ldiskfs, we just zero these out here */
- if (strncmp(s1, "abort_recov", 11) == 0) {
- lmd->lmd_flags |= LMD_FLG_ABORT_RECOV;
- clear++;
- } else if (strncmp(s1, "recovery_time_soft=", 19) == 0) {
- lmd->lmd_recovery_time_soft = max_t(int,
- simple_strtoul(s1 + 19, NULL, 10), time_min);
- clear++;
- } else if (strncmp(s1, "recovery_time_hard=", 19) == 0) {
- lmd->lmd_recovery_time_hard = max_t(int,
- simple_strtoul(s1 + 19, NULL, 10), time_min);
- clear++;
- } else if (strncmp(s1, "noir", 4) == 0) {
- lmd->lmd_flags |= LMD_FLG_NOIR; /* test purpose only. */
- clear++;
- } else if (strncmp(s1, "nosvc", 5) == 0) {
- lmd->lmd_flags |= LMD_FLG_NOSVC;
- clear++;
- } else if (strncmp(s1, "nomgs", 5) == 0) {
- lmd->lmd_flags |= LMD_FLG_NOMGS;
- clear++;
- } else if (strncmp(s1, "noscrub", 7) == 0) {
- lmd->lmd_flags |= LMD_FLG_NOSCRUB;
- clear++;
- } else if (strncmp(s1, PARAM_MGSNODE,
- sizeof(PARAM_MGSNODE) - 1) == 0) {
- s2 = s1 + sizeof(PARAM_MGSNODE) - 1;
- /* Assume the next mount opt is the first
- invalid nid we get to. */
- rc = lmd_parse_mgs(lmd, &s2);
- if (rc)
- goto invalid;
- clear++;
- } else if (strncmp(s1, "writeconf", 9) == 0) {
- lmd->lmd_flags |= LMD_FLG_WRITECONF;
- clear++;
- } else if (strncmp(s1, "update", 6) == 0) {
- lmd->lmd_flags |= LMD_FLG_UPDATE;
- clear++;
- } else if (strncmp(s1, "virgin", 6) == 0) {
- lmd->lmd_flags |= LMD_FLG_VIRGIN;
- clear++;
- } else if (strncmp(s1, "noprimnode", 10) == 0) {
- lmd->lmd_flags |= LMD_FLG_NO_PRIMNODE;
- clear++;
- } else if (strncmp(s1, "mgssec=", 7) == 0) {
- rc = lmd_parse_mgssec(lmd, s1 + 7);
- if (rc)
- goto invalid;
- clear++;
- /* ost exclusion list */
- } else if (strncmp(s1, "exclude=", 8) == 0) {
- rc = lmd_make_exclusion(lmd, s1 + 7);
- if (rc)
- goto invalid;
- clear++;
- } else if (strncmp(s1, "mgs", 3) == 0) {
- /* We are an MGS */
- lmd->lmd_flags |= LMD_FLG_MGS;
- clear++;
- } else if (strncmp(s1, "svname=", 7) == 0) {
- rc = lmd_parse_string(&lmd->lmd_profile, s1 + 7);
- if (rc)
- goto invalid;
- clear++;
- } else if (strncmp(s1, "param=", 6) == 0) {
- int length;
- char *tail = strchr(s1 + 6, ',');
- if (tail == NULL)
- length = strlen(s1);
- else
- length = tail - s1;
- length -= 6;
- strncat(lmd->lmd_params, s1 + 6, length);
- strcat(lmd->lmd_params, " ");
- clear++;
- } else if (strncmp(s1, "osd=", 4) == 0) {
- rc = lmd_parse_string(&lmd->lmd_osd_type, s1 + 4);
- if (rc)
- goto invalid;
- clear++;
- }
- /* Linux 2.4 doesn't pass the device, so we stuck it at the
- end of the options. */
- else if (strncmp(s1, "device=", 7) == 0) {
- devname = s1 + 7;
- /* terminate options right before device. device
- must be the last one. */
- *s1 = '\0';
- break;
- }
-
- /* Find next opt */
- s2 = strchr(s1, ',');
- if (s2 == NULL) {
- if (clear)
- *s1 = '\0';
- break;
- }
- s2++;
- if (clear)
- memmove(s1, s2, strlen(s2) + 1);
- else
- s1 = s2;
- }
-
- if (!devname) {
- LCONSOLE_ERROR_MSG(0x164, "Can't find the device name (need mount option 'device=...')\n");
- goto invalid;
- }
-
- s1 = strstr(devname, ":/");
- if (s1) {
- ++s1;
- lmd->lmd_flags |= LMD_FLG_CLIENT;
- /* Remove leading /s from fsname */
- while (*++s1 == '/')
- ;
- /* Freed in lustre_free_lsi */
- lmd->lmd_profile = kasprintf(GFP_NOFS, "%s-client", s1);
- if (!lmd->lmd_profile)
- return -ENOMEM;
- }
-
- /* Freed in lustre_free_lsi */
- lmd->lmd_dev = kzalloc(strlen(devname) + 1, GFP_NOFS);
- if (!lmd->lmd_dev)
- return -ENOMEM;
- strcpy(lmd->lmd_dev, devname);
-
- /* Save mount options */
- s1 = options + strlen(options) - 1;
- while (s1 >= options && (*s1 == ',' || *s1 == ' '))
- *s1-- = 0;
- if (*options != 0) {
- /* Freed in lustre_free_lsi */
- lmd->lmd_opts = kzalloc(strlen(options) + 1, GFP_NOFS);
- if (!lmd->lmd_opts)
- return -ENOMEM;
- strcpy(lmd->lmd_opts, options);
- }
-
- lmd_print(lmd);
- lmd->lmd_magic = LMD_MAGIC;
-
- return rc;
-
-invalid:
- CERROR("Bad mount options %s\n", options);
- return -EINVAL;
-}
-
-struct lustre_mount_data2 {
- void *lmd2_data;
- struct vfsmount *lmd2_mnt;
-};
-
-/** This is the entry point for the mount call into Lustre.
- * This is called when a server or client is mounted,
- * and this is where we start setting things up.
- * @param data Mount options (e.g. -o flock,abort_recov)
- */
-int lustre_fill_super(struct super_block *sb, void *data, int silent)
-{
- struct lustre_mount_data *lmd;
- struct lustre_mount_data2 *lmd2 = data;
- struct lustre_sb_info *lsi;
- int rc;
-
- CDEBUG(D_MOUNT|D_VFSTRACE, "VFS Op: sb %p\n", sb);
-
- lsi = lustre_init_lsi(sb);
- if (!lsi)
- return -ENOMEM;
- lmd = lsi->lsi_lmd;
-
- /*
- * Disable lockdep during mount, because mount locking patterns are
- * `special'.
- */
- lockdep_off();
-
- /*
- * LU-639: the obd cleanup of last mount may not finish yet, wait here.
- */
- obd_zombie_barrier();
-
- /* Figure out the lmd from the mount options */
- if (lmd_parse((char *)(lmd2->lmd2_data), lmd)) {
- lustre_put_lsi(sb);
- rc = -EINVAL;
- goto out;
- }
-
- if (lmd_is_client(lmd)) {
- CDEBUG(D_MOUNT, "Mounting client %s\n", lmd->lmd_profile);
- if (client_fill_super == NULL)
- request_module("lustre");
- if (client_fill_super == NULL) {
- LCONSOLE_ERROR_MSG(0x165, "Nothing registered for client mount! Is the 'lustre' module loaded?\n");
- lustre_put_lsi(sb);
- rc = -ENODEV;
- } else {
- rc = lustre_start_mgc(sb);
- if (rc) {
- lustre_put_lsi(sb);
- goto out;
- }
- /* Connect and start */
- /* (should always be ll_fill_super) */
- rc = (*client_fill_super)(sb, lmd2->lmd2_mnt);
- /* c_f_s will call lustre_common_put_super on failure */
- }
- } else {
- CERROR("This is client-side-only module, cannot handle server mount.\n");
- rc = -EINVAL;
- }
-
- /* If error happens in fill_super() call, @lsi will be killed there.
- * This is why we do not put it here. */
- goto out;
-out:
- if (rc) {
- CERROR("Unable to mount %s (%d)\n",
- s2lsi(sb) ? lmd->lmd_dev : "", rc);
- } else {
- CDEBUG(D_SUPER, "Mount %s complete\n",
- lmd->lmd_dev);
- }
- lockdep_on();
- return rc;
-}
-
-
-/* We can't call ll_fill_super by name because it lives in a module that
- must be loaded after this one. */
-void lustre_register_client_fill_super(int (*cfs)(struct super_block *sb,
- struct vfsmount *mnt))
-{
- client_fill_super = cfs;
-}
-EXPORT_SYMBOL(lustre_register_client_fill_super);
-
-void lustre_register_kill_super_cb(void (*cfs)(struct super_block *sb))
-{
- kill_super_cb = cfs;
-}
-EXPORT_SYMBOL(lustre_register_kill_super_cb);
-
-/***************** FS registration ******************/
-struct dentry *lustre_mount(struct file_system_type *fs_type, int flags,
- const char *devname, void *data)
-{
- struct lustre_mount_data2 lmd2 = {
- .lmd2_data = data,
- .lmd2_mnt = NULL
- };
-
- return mount_nodev(fs_type, flags, &lmd2, lustre_fill_super);
-}
-
-static void lustre_kill_super(struct super_block *sb)
-{
- struct lustre_sb_info *lsi = s2lsi(sb);
-
- if (kill_super_cb && lsi)
- (*kill_super_cb)(sb);
-
- kill_anon_super(sb);
-}
-
-/** Register the "lustre" fs type
- */
-struct file_system_type lustre_fs_type = {
- .owner = THIS_MODULE,
- .name = "lustre",
- .mount = lustre_mount,
- .kill_sb = lustre_kill_super,
- .fs_flags = FS_BINARY_MOUNTDATA | FS_REQUIRES_DEV |
- FS_RENAME_DOES_D_MOVE,
-};
-MODULE_ALIAS_FS("lustre");
-
-int lustre_register_fs(void)
-{
- return register_filesystem(&lustre_fs_type);
-}
-
-int lustre_unregister_fs(void)
-{
- return unregister_filesystem(&lustre_fs_type);
-}
diff --git a/drivers/staging/lustre/lustre/obdclass/obdo.c b/drivers/staging/lustre/lustre/obdclass/obdo.c
deleted file mode 100644
index 1a950fb8e769..000000000000
--- a/drivers/staging/lustre/lustre/obdclass/obdo.c
+++ /dev/null
@@ -1,192 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * lustre/obdclass/obdo.c
- *
- * Object Devices Class Driver
- * These are the only exported functions, they provide some generic
- * infrastructure for managing object devices
- */
-
-#define DEBUG_SUBSYSTEM S_CLASS
-
-#include "../include/obd_class.h"
-#include "../include/lustre/lustre_idl.h"
-
-void obdo_set_parent_fid(struct obdo *dst, const struct lu_fid *parent)
-{
- dst->o_parent_oid = fid_oid(parent);
- dst->o_parent_seq = fid_seq(parent);
- dst->o_parent_ver = fid_ver(parent);
- dst->o_valid |= OBD_MD_FLGENER | OBD_MD_FLFID;
-}
-EXPORT_SYMBOL(obdo_set_parent_fid);
-
-/* WARNING: the file systems must take care not to tinker with
- attributes they don't manage (such as blocks). */
-void obdo_from_inode(struct obdo *dst, struct inode *src, u32 valid)
-{
- u32 newvalid = 0;
-
- if (valid & (OBD_MD_FLCTIME | OBD_MD_FLMTIME))
- CDEBUG(D_INODE, "valid %x, new time %lu/%lu\n",
- valid, LTIME_S(src->i_mtime),
- LTIME_S(src->i_ctime));
-
- if (valid & OBD_MD_FLATIME) {
- dst->o_atime = LTIME_S(src->i_atime);
- newvalid |= OBD_MD_FLATIME;
- }
- if (valid & OBD_MD_FLMTIME) {
- dst->o_mtime = LTIME_S(src->i_mtime);
- newvalid |= OBD_MD_FLMTIME;
- }
- if (valid & OBD_MD_FLCTIME) {
- dst->o_ctime = LTIME_S(src->i_ctime);
- newvalid |= OBD_MD_FLCTIME;
- }
- if (valid & OBD_MD_FLSIZE) {
- dst->o_size = i_size_read(src);
- newvalid |= OBD_MD_FLSIZE;
- }
- if (valid & OBD_MD_FLBLOCKS) { /* allocation of space (x512 bytes) */
- dst->o_blocks = src->i_blocks;
- newvalid |= OBD_MD_FLBLOCKS;
- }
- if (valid & OBD_MD_FLBLKSZ) { /* optimal block size */
- dst->o_blksize = 1 << src->i_blkbits;
- newvalid |= OBD_MD_FLBLKSZ;
- }
- if (valid & OBD_MD_FLTYPE) {
- dst->o_mode = (dst->o_mode & S_IALLUGO) |
- (src->i_mode & S_IFMT);
- newvalid |= OBD_MD_FLTYPE;
- }
- if (valid & OBD_MD_FLMODE) {
- dst->o_mode = (dst->o_mode & S_IFMT) |
- (src->i_mode & S_IALLUGO);
- newvalid |= OBD_MD_FLMODE;
- }
- if (valid & OBD_MD_FLUID) {
- dst->o_uid = from_kuid(&init_user_ns, src->i_uid);
- newvalid |= OBD_MD_FLUID;
- }
- if (valid & OBD_MD_FLGID) {
- dst->o_gid = from_kgid(&init_user_ns, src->i_gid);
- newvalid |= OBD_MD_FLGID;
- }
- if (valid & OBD_MD_FLFLAGS) {
- dst->o_flags = src->i_flags;
- newvalid |= OBD_MD_FLFLAGS;
- }
- dst->o_valid |= newvalid;
-}
-EXPORT_SYMBOL(obdo_from_inode);
-
-void obdo_to_ioobj(struct obdo *oa, struct obd_ioobj *ioobj)
-{
- ioobj->ioo_oid = oa->o_oi;
- if (unlikely(!(oa->o_valid & OBD_MD_FLGROUP)))
- ostid_set_seq_mdt0(&ioobj->ioo_oid);
-
- /* Since 2.4 this does not contain o_mode in the low 16 bits.
- * Instead, it holds (bd_md_max_brw - 1) for multi-bulk BRW RPCs */
- ioobj->ioo_max_brw = 0;
-}
-EXPORT_SYMBOL(obdo_to_ioobj);
-
-void iattr_from_obdo(struct iattr *attr, struct obdo *oa, u32 valid)
-{
- valid &= oa->o_valid;
-
- if (valid & (OBD_MD_FLCTIME | OBD_MD_FLMTIME))
- CDEBUG(D_INODE, "valid %#llx, new time %llu/%llu\n",
- oa->o_valid, oa->o_mtime, oa->o_ctime);
-
- attr->ia_valid = 0;
- if (valid & OBD_MD_FLATIME) {
- LTIME_S(attr->ia_atime) = oa->o_atime;
- attr->ia_valid |= ATTR_ATIME;
- }
- if (valid & OBD_MD_FLMTIME) {
- LTIME_S(attr->ia_mtime) = oa->o_mtime;
- attr->ia_valid |= ATTR_MTIME;
- }
- if (valid & OBD_MD_FLCTIME) {
- LTIME_S(attr->ia_ctime) = oa->o_ctime;
- attr->ia_valid |= ATTR_CTIME;
- }
- if (valid & OBD_MD_FLSIZE) {
- attr->ia_size = oa->o_size;
- attr->ia_valid |= ATTR_SIZE;
- }
-#if 0 /* you shouldn't be able to change a file's type with setattr */
- if (valid & OBD_MD_FLTYPE) {
- attr->ia_mode = (attr->ia_mode & ~S_IFMT)|(oa->o_mode & S_IFMT);
- attr->ia_valid |= ATTR_MODE;
- }
-#endif
- if (valid & OBD_MD_FLMODE) {
- attr->ia_mode = (attr->ia_mode & S_IFMT)|(oa->o_mode & ~S_IFMT);
- attr->ia_valid |= ATTR_MODE;
- if (!in_group_p(make_kgid(&init_user_ns, oa->o_gid)) &&
- !capable(CFS_CAP_FSETID))
- attr->ia_mode &= ~S_ISGID;
- }
- if (valid & OBD_MD_FLUID) {
- attr->ia_uid = make_kuid(&init_user_ns, oa->o_uid);
- attr->ia_valid |= ATTR_UID;
- }
- if (valid & OBD_MD_FLGID) {
- attr->ia_gid = make_kgid(&init_user_ns, oa->o_gid);
- attr->ia_valid |= ATTR_GID;
- }
-}
-EXPORT_SYMBOL(iattr_from_obdo);
-
-void md_from_obdo(struct md_op_data *op_data, struct obdo *oa, u32 valid)
-{
- iattr_from_obdo(&op_data->op_attr, oa, valid);
- if (valid & OBD_MD_FLBLOCKS) {
- op_data->op_attr_blocks = oa->o_blocks;
- op_data->op_attr.ia_valid |= ATTR_BLOCKS;
- }
- if (valid & OBD_MD_FLFLAGS) {
- ((struct ll_iattr *)&op_data->op_attr)->ia_attr_flags =
- oa->o_flags;
- op_data->op_attr.ia_valid |= ATTR_ATTR_FLAG;
- }
-}
-EXPORT_SYMBOL(md_from_obdo);
diff --git a/drivers/staging/lustre/lustre/obdclass/statfs_pack.c b/drivers/staging/lustre/lustre/obdclass/statfs_pack.c
deleted file mode 100644
index fb4e3ae845e0..000000000000
--- a/drivers/staging/lustre/lustre/obdclass/statfs_pack.c
+++ /dev/null
@@ -1,61 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * lustre/obdclass/statfs_pack.c
- *
- * (Un)packing of OST/MDS requests
- *
- * Author: Andreas Dilger <adilger@clusterfs.com>
- */
-
-#define DEBUG_SUBSYSTEM S_CLASS
-
-#include <linux/statfs.h>
-#include "../include/lustre_export.h"
-#include "../include/lustre_net.h"
-#include "../include/obd_support.h"
-#include "../include/obd_class.h"
-
-void statfs_unpack(struct kstatfs *sfs, struct obd_statfs *osfs)
-{
- memset(sfs, 0, sizeof(*sfs));
- sfs->f_type = osfs->os_type;
- sfs->f_blocks = osfs->os_blocks;
- sfs->f_bfree = osfs->os_bfree;
- sfs->f_bavail = osfs->os_bavail;
- sfs->f_files = osfs->os_files;
- sfs->f_ffree = osfs->os_ffree;
- sfs->f_bsize = osfs->os_bsize;
- sfs->f_namelen = osfs->os_namelen;
-}
-EXPORT_SYMBOL(statfs_unpack);
diff --git a/drivers/staging/lustre/lustre/obdclass/uuid.c b/drivers/staging/lustre/lustre/obdclass/uuid.c
deleted file mode 100644
index b0b0157a6334..000000000000
--- a/drivers/staging/lustre/lustre/obdclass/uuid.c
+++ /dev/null
@@ -1,50 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * lustre/obdclass/uuid.c
- *
- * Public include file for the UUID library
- */
-
-#define DEBUG_SUBSYSTEM S_CLASS
-
-#include "../../include/linux/libcfs/libcfs.h"
-
-#include "../include/obd_support.h"
-#include "../include/obd_class.h"
-
-void class_uuid_unparse(class_uuid_t uu, struct obd_uuid *out)
-{
- sprintf(out->uuid, "%pU", uu);
-}
-EXPORT_SYMBOL(class_uuid_unparse);
diff --git a/drivers/staging/lustre/lustre/obdecho/Makefile b/drivers/staging/lustre/lustre/obdecho/Makefile
deleted file mode 100644
index a659a37a7e93..000000000000
--- a/drivers/staging/lustre/lustre/obdecho/Makefile
+++ /dev/null
@@ -1,2 +0,0 @@
-obj-$(CONFIG_LUSTRE_FS) += obdecho.o
-obdecho-y := echo_client.o
diff --git a/drivers/staging/lustre/lustre/obdecho/echo_client.c b/drivers/staging/lustre/lustre/obdecho/echo_client.c
deleted file mode 100644
index ec9ccff86648..000000000000
--- a/drivers/staging/lustre/lustre/obdecho/echo_client.c
+++ /dev/null
@@ -1,2180 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- */
-
-#define DEBUG_SUBSYSTEM S_ECHO
-#include "../../include/linux/libcfs/libcfs.h"
-
-#include "../include/obd.h"
-#include "../include/obd_support.h"
-#include "../include/obd_class.h"
-#include "../include/lustre_debug.h"
-#include "../include/lprocfs_status.h"
-#include "../include/cl_object.h"
-#include "../include/lustre_fid.h"
-#include "../include/lustre_acl.h"
-#include "../include/lustre_net.h"
-
-#include "echo_internal.h"
-
-/** \defgroup echo_client Echo Client
- * @{
- */
-
-struct echo_device {
- struct cl_device ed_cl;
- struct echo_client_obd *ed_ec;
-
- struct cl_site ed_site_myself;
- struct cl_site *ed_site;
- struct lu_device *ed_next;
- int ed_next_islov;
-};
-
-struct echo_object {
- struct cl_object eo_cl;
- struct cl_object_header eo_hdr;
-
- struct echo_device *eo_dev;
- struct list_head eo_obj_chain;
- struct lov_stripe_md *eo_lsm;
- atomic_t eo_npages;
- int eo_deleted;
-};
-
-struct echo_object_conf {
- struct cl_object_conf eoc_cl;
- struct lov_stripe_md **eoc_md;
-};
-
-struct echo_page {
- struct cl_page_slice ep_cl;
- struct mutex ep_lock;
- struct page *ep_vmpage;
-};
-
-struct echo_lock {
- struct cl_lock_slice el_cl;
- struct list_head el_chain;
- struct echo_object *el_object;
- __u64 el_cookie;
- atomic_t el_refcount;
-};
-
-static int echo_client_setup(const struct lu_env *env,
- struct obd_device *obddev,
- struct lustre_cfg *lcfg);
-static int echo_client_cleanup(struct obd_device *obddev);
-
-
-/** \defgroup echo_helpers Helper functions
- * @{
- */
-static inline struct echo_device *cl2echo_dev(const struct cl_device *dev)
-{
- return container_of0(dev, struct echo_device, ed_cl);
-}
-
-static inline struct cl_device *echo_dev2cl(struct echo_device *d)
-{
- return &d->ed_cl;
-}
-
-static inline struct echo_device *obd2echo_dev(const struct obd_device *obd)
-{
- return cl2echo_dev(lu2cl_dev(obd->obd_lu_dev));
-}
-
-static inline struct cl_object *echo_obj2cl(struct echo_object *eco)
-{
- return &eco->eo_cl;
-}
-
-static inline struct echo_object *cl2echo_obj(const struct cl_object *o)
-{
- return container_of(o, struct echo_object, eo_cl);
-}
-
-static inline struct echo_page *cl2echo_page(const struct cl_page_slice *s)
-{
- return container_of(s, struct echo_page, ep_cl);
-}
-
-static inline struct echo_lock *cl2echo_lock(const struct cl_lock_slice *s)
-{
- return container_of(s, struct echo_lock, el_cl);
-}
-
-static inline struct cl_lock *echo_lock2cl(const struct echo_lock *ecl)
-{
- return ecl->el_cl.cls_lock;
-}
-
-static struct lu_context_key echo_thread_key;
-static inline struct echo_thread_info *echo_env_info(const struct lu_env *env)
-{
- struct echo_thread_info *info;
-
- info = lu_context_key_get(&env->le_ctx, &echo_thread_key);
- LASSERT(info != NULL);
- return info;
-}
-
-static inline
-struct echo_object_conf *cl2echo_conf(const struct cl_object_conf *c)
-{
- return container_of(c, struct echo_object_conf, eoc_cl);
-}
-
-/** @} echo_helpers */
-
-static struct echo_object *cl_echo_object_find(struct echo_device *d,
- struct lov_stripe_md **lsm);
-static int cl_echo_object_put(struct echo_object *eco);
-static int cl_echo_enqueue(struct echo_object *eco, u64 start,
- u64 end, int mode, __u64 *cookie);
-static int cl_echo_cancel(struct echo_device *d, __u64 cookie);
-static int cl_echo_object_brw(struct echo_object *eco, int rw, u64 offset,
- struct page **pages, int npages, int async);
-
-static struct echo_thread_info *echo_env_info(const struct lu_env *env);
-
-struct echo_thread_info {
- struct echo_object_conf eti_conf;
- struct lustre_md eti_md;
-
- struct cl_2queue eti_queue;
- struct cl_io eti_io;
- struct cl_lock_descr eti_descr;
- struct lu_fid eti_fid;
- struct lu_fid eti_fid2;
-};
-
-/* No session used right now */
-struct echo_session_info {
- unsigned long dummy;
-};
-
-static struct kmem_cache *echo_lock_kmem;
-static struct kmem_cache *echo_object_kmem;
-static struct kmem_cache *echo_thread_kmem;
-static struct kmem_cache *echo_session_kmem;
-
-static struct lu_kmem_descr echo_caches[] = {
- {
- .ckd_cache = &echo_lock_kmem,
- .ckd_name = "echo_lock_kmem",
- .ckd_size = sizeof(struct echo_lock)
- },
- {
- .ckd_cache = &echo_object_kmem,
- .ckd_name = "echo_object_kmem",
- .ckd_size = sizeof(struct echo_object)
- },
- {
- .ckd_cache = &echo_thread_kmem,
- .ckd_name = "echo_thread_kmem",
- .ckd_size = sizeof(struct echo_thread_info)
- },
- {
- .ckd_cache = &echo_session_kmem,
- .ckd_name = "echo_session_kmem",
- .ckd_size = sizeof(struct echo_session_info)
- },
- {
- .ckd_cache = NULL
- }
-};
-
-/** \defgroup echo_page Page operations
- *
- * Echo page operations.
- *
- * @{
- */
-static struct page *echo_page_vmpage(const struct lu_env *env,
- const struct cl_page_slice *slice)
-{
- return cl2echo_page(slice)->ep_vmpage;
-}
-
-static int echo_page_own(const struct lu_env *env,
- const struct cl_page_slice *slice,
- struct cl_io *io, int nonblock)
-{
- struct echo_page *ep = cl2echo_page(slice);
-
- if (!nonblock)
- mutex_lock(&ep->ep_lock);
- else if (!mutex_trylock(&ep->ep_lock))
- return -EAGAIN;
- return 0;
-}
-
-static void echo_page_disown(const struct lu_env *env,
- const struct cl_page_slice *slice,
- struct cl_io *io)
-{
- struct echo_page *ep = cl2echo_page(slice);
-
- LASSERT(mutex_is_locked(&ep->ep_lock));
- mutex_unlock(&ep->ep_lock);
-}
-
-static void echo_page_discard(const struct lu_env *env,
- const struct cl_page_slice *slice,
- struct cl_io *unused)
-{
- cl_page_delete(env, slice->cpl_page);
-}
-
-static int echo_page_is_vmlocked(const struct lu_env *env,
- const struct cl_page_slice *slice)
-{
- if (mutex_is_locked(&cl2echo_page(slice)->ep_lock))
- return -EBUSY;
- return -ENODATA;
-}
-
-static void echo_page_completion(const struct lu_env *env,
- const struct cl_page_slice *slice,
- int ioret)
-{
- LASSERT(slice->cpl_page->cp_sync_io != NULL);
-}
-
-static void echo_page_fini(const struct lu_env *env,
- struct cl_page_slice *slice)
-{
- struct echo_page *ep = cl2echo_page(slice);
- struct echo_object *eco = cl2echo_obj(slice->cpl_obj);
- struct page *vmpage = ep->ep_vmpage;
-
- atomic_dec(&eco->eo_npages);
- page_cache_release(vmpage);
-}
-
-static int echo_page_prep(const struct lu_env *env,
- const struct cl_page_slice *slice,
- struct cl_io *unused)
-{
- return 0;
-}
-
-static int echo_page_print(const struct lu_env *env,
- const struct cl_page_slice *slice,
- void *cookie, lu_printer_t printer)
-{
- struct echo_page *ep = cl2echo_page(slice);
-
- (*printer)(env, cookie, LUSTRE_ECHO_CLIENT_NAME"-page@%p %d vm@%p\n",
- ep, mutex_is_locked(&ep->ep_lock), ep->ep_vmpage);
- return 0;
-}
-
-static const struct cl_page_operations echo_page_ops = {
- .cpo_own = echo_page_own,
- .cpo_disown = echo_page_disown,
- .cpo_discard = echo_page_discard,
- .cpo_vmpage = echo_page_vmpage,
- .cpo_fini = echo_page_fini,
- .cpo_print = echo_page_print,
- .cpo_is_vmlocked = echo_page_is_vmlocked,
- .io = {
- [CRT_READ] = {
- .cpo_prep = echo_page_prep,
- .cpo_completion = echo_page_completion,
- },
- [CRT_WRITE] = {
- .cpo_prep = echo_page_prep,
- .cpo_completion = echo_page_completion,
- }
- }
-};
-/** @} echo_page */
-
-/** \defgroup echo_lock Locking
- *
- * echo lock operations
- *
- * @{
- */
-static void echo_lock_fini(const struct lu_env *env,
- struct cl_lock_slice *slice)
-{
- struct echo_lock *ecl = cl2echo_lock(slice);
-
- LASSERT(list_empty(&ecl->el_chain));
- OBD_SLAB_FREE_PTR(ecl, echo_lock_kmem);
-}
-
-static void echo_lock_delete(const struct lu_env *env,
- const struct cl_lock_slice *slice)
-{
- struct echo_lock *ecl = cl2echo_lock(slice);
-
- LASSERT(list_empty(&ecl->el_chain));
-}
-
-static int echo_lock_fits_into(const struct lu_env *env,
- const struct cl_lock_slice *slice,
- const struct cl_lock_descr *need,
- const struct cl_io *unused)
-{
- return 1;
-}
-
-static struct cl_lock_operations echo_lock_ops = {
- .clo_fini = echo_lock_fini,
- .clo_delete = echo_lock_delete,
- .clo_fits_into = echo_lock_fits_into
-};
-
-/** @} echo_lock */
-
-/** \defgroup echo_cl_ops cl_object operations
- *
- * operations for cl_object
- *
- * @{
- */
-static int echo_page_init(const struct lu_env *env, struct cl_object *obj,
- struct cl_page *page, struct page *vmpage)
-{
- struct echo_page *ep = cl_object_page_slice(obj, page);
- struct echo_object *eco = cl2echo_obj(obj);
-
- ep->ep_vmpage = vmpage;
- page_cache_get(vmpage);
- mutex_init(&ep->ep_lock);
- cl_page_slice_add(page, &ep->ep_cl, obj, &echo_page_ops);
- atomic_inc(&eco->eo_npages);
- return 0;
-}
-
-static int echo_io_init(const struct lu_env *env, struct cl_object *obj,
- struct cl_io *io)
-{
- return 0;
-}
-
-static int echo_lock_init(const struct lu_env *env,
- struct cl_object *obj, struct cl_lock *lock,
- const struct cl_io *unused)
-{
- struct echo_lock *el;
-
- OBD_SLAB_ALLOC_PTR_GFP(el, echo_lock_kmem, GFP_NOFS);
- if (el != NULL) {
- cl_lock_slice_add(lock, &el->el_cl, obj, &echo_lock_ops);
- el->el_object = cl2echo_obj(obj);
- INIT_LIST_HEAD(&el->el_chain);
- atomic_set(&el->el_refcount, 0);
- }
- return el == NULL ? -ENOMEM : 0;
-}
-
-static int echo_conf_set(const struct lu_env *env, struct cl_object *obj,
- const struct cl_object_conf *conf)
-{
- return 0;
-}
-
-static const struct cl_object_operations echo_cl_obj_ops = {
- .coo_page_init = echo_page_init,
- .coo_lock_init = echo_lock_init,
- .coo_io_init = echo_io_init,
- .coo_conf_set = echo_conf_set
-};
-/** @} echo_cl_ops */
-
-/** \defgroup echo_lu_ops lu_object operations
- *
- * operations for echo lu object.
- *
- * @{
- */
-static int echo_object_init(const struct lu_env *env, struct lu_object *obj,
- const struct lu_object_conf *conf)
-{
- struct echo_device *ed = cl2echo_dev(lu2cl_dev(obj->lo_dev));
- struct echo_client_obd *ec = ed->ed_ec;
- struct echo_object *eco = cl2echo_obj(lu2cl(obj));
- const struct cl_object_conf *cconf;
- struct echo_object_conf *econf;
-
- if (ed->ed_next) {
- struct lu_object *below;
- struct lu_device *under;
-
- under = ed->ed_next;
- below = under->ld_ops->ldo_object_alloc(env, obj->lo_header,
- under);
- if (below == NULL)
- return -ENOMEM;
- lu_object_add(obj, below);
- }
-
- cconf = lu2cl_conf(conf);
- econf = cl2echo_conf(cconf);
-
- LASSERT(econf->eoc_md);
- eco->eo_lsm = *econf->eoc_md;
- /* clear the lsm pointer so that it won't get freed. */
- *econf->eoc_md = NULL;
-
- eco->eo_dev = ed;
- atomic_set(&eco->eo_npages, 0);
- cl_object_page_init(lu2cl(obj), sizeof(struct echo_page));
-
- spin_lock(&ec->ec_lock);
- list_add_tail(&eco->eo_obj_chain, &ec->ec_objects);
- spin_unlock(&ec->ec_lock);
-
- return 0;
-}
-
-/* taken from osc_unpackmd() */
-static int echo_alloc_memmd(struct echo_device *ed,
- struct lov_stripe_md **lsmp)
-{
- int lsm_size;
-
- /* If export is lov/osc then use their obd method */
- if (ed->ed_next != NULL)
- return obd_alloc_memmd(ed->ed_ec->ec_exp, lsmp);
- /* OFD has no unpackmd method, do everything here */
- lsm_size = lov_stripe_md_size(1);
-
- LASSERT(*lsmp == NULL);
- *lsmp = kzalloc(lsm_size, GFP_NOFS);
- if (!*lsmp)
- return -ENOMEM;
-
- (*lsmp)->lsm_oinfo[0] = kzalloc(sizeof(struct lov_oinfo), GFP_NOFS);
- if (!(*lsmp)->lsm_oinfo[0]) {
- kfree(*lsmp);
- return -ENOMEM;
- }
-
- loi_init((*lsmp)->lsm_oinfo[0]);
- (*lsmp)->lsm_maxbytes = LUSTRE_STRIPE_MAXBYTES;
- ostid_set_seq_echo(&(*lsmp)->lsm_oi);
-
- return lsm_size;
-}
-
-static int echo_free_memmd(struct echo_device *ed, struct lov_stripe_md **lsmp)
-{
- int lsm_size;
-
- /* If export is lov/osc then use their obd method */
- if (ed->ed_next != NULL)
- return obd_free_memmd(ed->ed_ec->ec_exp, lsmp);
- /* OFD has no unpackmd method, do everything here */
- lsm_size = lov_stripe_md_size(1);
-
- LASSERT(*lsmp != NULL);
- kfree((*lsmp)->lsm_oinfo[0]);
- kfree(*lsmp);
- *lsmp = NULL;
- return 0;
-}
-
-static void echo_object_free(const struct lu_env *env, struct lu_object *obj)
-{
- struct echo_object *eco = cl2echo_obj(lu2cl(obj));
- struct echo_client_obd *ec = eco->eo_dev->ed_ec;
-
- LASSERT(atomic_read(&eco->eo_npages) == 0);
-
- spin_lock(&ec->ec_lock);
- list_del_init(&eco->eo_obj_chain);
- spin_unlock(&ec->ec_lock);
-
- lu_object_fini(obj);
- lu_object_header_fini(obj->lo_header);
-
- if (eco->eo_lsm)
- echo_free_memmd(eco->eo_dev, &eco->eo_lsm);
- OBD_SLAB_FREE_PTR(eco, echo_object_kmem);
-}
-
-static int echo_object_print(const struct lu_env *env, void *cookie,
- lu_printer_t p, const struct lu_object *o)
-{
- struct echo_object *obj = cl2echo_obj(lu2cl(o));
-
- return (*p)(env, cookie, "echoclient-object@%p", obj);
-}
-
-static const struct lu_object_operations echo_lu_obj_ops = {
- .loo_object_init = echo_object_init,
- .loo_object_delete = NULL,
- .loo_object_release = NULL,
- .loo_object_free = echo_object_free,
- .loo_object_print = echo_object_print,
- .loo_object_invariant = NULL
-};
-/** @} echo_lu_ops */
-
-/** \defgroup echo_lu_dev_ops lu_device operations
- *
- * Operations for echo lu device.
- *
- * @{
- */
-static struct lu_object *echo_object_alloc(const struct lu_env *env,
- const struct lu_object_header *hdr,
- struct lu_device *dev)
-{
- struct echo_object *eco;
- struct lu_object *obj = NULL;
-
- /* we're the top dev. */
- LASSERT(hdr == NULL);
- OBD_SLAB_ALLOC_PTR_GFP(eco, echo_object_kmem, GFP_NOFS);
- if (eco != NULL) {
- struct cl_object_header *hdr = &eco->eo_hdr;
-
- obj = &echo_obj2cl(eco)->co_lu;
- cl_object_header_init(hdr);
- lu_object_init(obj, &hdr->coh_lu, dev);
- lu_object_add_top(&hdr->coh_lu, obj);
-
- eco->eo_cl.co_ops = &echo_cl_obj_ops;
- obj->lo_ops = &echo_lu_obj_ops;
- }
- return obj;
-}
-
-static struct lu_device_operations echo_device_lu_ops = {
- .ldo_object_alloc = echo_object_alloc,
-};
-
-/** @} echo_lu_dev_ops */
-
-static struct cl_device_operations echo_device_cl_ops = {
-};
-
-/** \defgroup echo_init Setup and teardown
- *
- * Init and fini functions for echo client.
- *
- * @{
- */
-static int echo_site_init(const struct lu_env *env, struct echo_device *ed)
-{
- struct cl_site *site = &ed->ed_site_myself;
- int rc;
-
- /* initialize site */
- rc = cl_site_init(site, &ed->ed_cl);
- if (rc) {
- CERROR("Cannot initialize site for echo client(%d)\n", rc);
- return rc;
- }
-
- rc = lu_site_init_finish(&site->cs_lu);
- if (rc)
- return rc;
-
- ed->ed_site = site;
- return 0;
-}
-
-static void echo_site_fini(const struct lu_env *env, struct echo_device *ed)
-{
- if (ed->ed_site) {
- cl_site_fini(ed->ed_site);
- ed->ed_site = NULL;
- }
-}
-
-static void *echo_thread_key_init(const struct lu_context *ctx,
- struct lu_context_key *key)
-{
- struct echo_thread_info *info;
-
- OBD_SLAB_ALLOC_PTR_GFP(info, echo_thread_kmem, GFP_NOFS);
- if (info == NULL)
- info = ERR_PTR(-ENOMEM);
- return info;
-}
-
-static void echo_thread_key_fini(const struct lu_context *ctx,
- struct lu_context_key *key, void *data)
-{
- struct echo_thread_info *info = data;
-
- OBD_SLAB_FREE_PTR(info, echo_thread_kmem);
-}
-
-static void echo_thread_key_exit(const struct lu_context *ctx,
- struct lu_context_key *key, void *data)
-{
-}
-
-static struct lu_context_key echo_thread_key = {
- .lct_tags = LCT_CL_THREAD,
- .lct_init = echo_thread_key_init,
- .lct_fini = echo_thread_key_fini,
- .lct_exit = echo_thread_key_exit
-};
-
-static void *echo_session_key_init(const struct lu_context *ctx,
- struct lu_context_key *key)
-{
- struct echo_session_info *session;
-
- OBD_SLAB_ALLOC_PTR_GFP(session, echo_session_kmem, GFP_NOFS);
- if (session == NULL)
- session = ERR_PTR(-ENOMEM);
- return session;
-}
-
-static void echo_session_key_fini(const struct lu_context *ctx,
- struct lu_context_key *key, void *data)
-{
- struct echo_session_info *session = data;
-
- OBD_SLAB_FREE_PTR(session, echo_session_kmem);
-}
-
-static void echo_session_key_exit(const struct lu_context *ctx,
- struct lu_context_key *key, void *data)
-{
-}
-
-static struct lu_context_key echo_session_key = {
- .lct_tags = LCT_SESSION,
- .lct_init = echo_session_key_init,
- .lct_fini = echo_session_key_fini,
- .lct_exit = echo_session_key_exit
-};
-
-LU_TYPE_INIT_FINI(echo, &echo_thread_key, &echo_session_key);
-
-static struct lu_device *echo_device_alloc(const struct lu_env *env,
- struct lu_device_type *t,
- struct lustre_cfg *cfg)
-{
- struct lu_device *next;
- struct echo_device *ed;
- struct cl_device *cd;
- struct obd_device *obd = NULL; /* to keep compiler happy */
- struct obd_device *tgt;
- const char *tgt_type_name;
- int rc;
- int cleanup = 0;
-
- ed = kzalloc(sizeof(*ed), GFP_NOFS);
- if (!ed) {
- rc = -ENOMEM;
- goto out;
- }
-
- cleanup = 1;
- cd = &ed->ed_cl;
- rc = cl_device_init(cd, t);
- if (rc)
- goto out;
-
- cd->cd_lu_dev.ld_ops = &echo_device_lu_ops;
- cd->cd_ops = &echo_device_cl_ops;
-
- cleanup = 2;
- obd = class_name2obd(lustre_cfg_string(cfg, 0));
- LASSERT(obd != NULL);
- LASSERT(env != NULL);
-
- tgt = class_name2obd(lustre_cfg_string(cfg, 1));
- if (tgt == NULL) {
- CERROR("Can not find tgt device %s\n",
- lustre_cfg_string(cfg, 1));
- rc = -ENODEV;
- goto out;
- }
-
- next = tgt->obd_lu_dev;
- if (!strcmp(tgt->obd_type->typ_name, LUSTRE_MDT_NAME)) {
- CERROR("echo MDT client must be run on server\n");
- rc = -EOPNOTSUPP;
- goto out;
- }
-
- rc = echo_site_init(env, ed);
- if (rc)
- goto out;
-
- cleanup = 3;
-
- rc = echo_client_setup(env, obd, cfg);
- if (rc)
- goto out;
-
- ed->ed_ec = &obd->u.echo_client;
- cleanup = 4;
-
- /* if echo client is to be stacked upon ost device, the next is
- * NULL since ost is not a clio device so far */
- if (next != NULL && !lu_device_is_cl(next))
- next = NULL;
-
- tgt_type_name = tgt->obd_type->typ_name;
- if (next != NULL) {
- LASSERT(next != NULL);
- if (next->ld_site != NULL) {
- rc = -EBUSY;
- goto out;
- }
-
- next->ld_site = &ed->ed_site->cs_lu;
- rc = next->ld_type->ldt_ops->ldto_device_init(env, next,
- next->ld_type->ldt_name,
- NULL);
- if (rc)
- goto out;
-
- /* Tricky case, I have to determine the obd type since
- * CLIO uses the different parameters to initialize
- * objects for lov & osc. */
- if (strcmp(tgt_type_name, LUSTRE_LOV_NAME) == 0)
- ed->ed_next_islov = 1;
- else
- LASSERT(strcmp(tgt_type_name,
- LUSTRE_OSC_NAME) == 0);
- } else {
- LASSERT(strcmp(tgt_type_name, LUSTRE_OST_NAME) == 0);
- }
-
- ed->ed_next = next;
- return &cd->cd_lu_dev;
-out:
- switch (cleanup) {
- case 4: {
- int rc2;
-
- rc2 = echo_client_cleanup(obd);
- if (rc2)
- CERROR("Cleanup obd device %s error(%d)\n",
- obd->obd_name, rc2);
- }
-
- case 3:
- echo_site_fini(env, ed);
- case 2:
- cl_device_fini(&ed->ed_cl);
- case 1:
- kfree(ed);
- case 0:
- default:
- break;
- }
- return ERR_PTR(rc);
-}
-
-static int echo_device_init(const struct lu_env *env, struct lu_device *d,
- const char *name, struct lu_device *next)
-{
- LBUG();
- return 0;
-}
-
-static struct lu_device *echo_device_fini(const struct lu_env *env,
- struct lu_device *d)
-{
- struct echo_device *ed = cl2echo_dev(lu2cl_dev(d));
- struct lu_device *next = ed->ed_next;
-
- while (next)
- next = next->ld_type->ldt_ops->ldto_device_fini(env, next);
- return NULL;
-}
-
-static void echo_lock_release(const struct lu_env *env,
- struct echo_lock *ecl,
- int still_used)
-{
- struct cl_lock *clk = echo_lock2cl(ecl);
-
- cl_lock_get(clk);
- cl_unuse(env, clk);
- cl_lock_release(env, clk, "ec enqueue", ecl->el_object);
- if (!still_used) {
- cl_lock_mutex_get(env, clk);
- cl_lock_cancel(env, clk);
- cl_lock_delete(env, clk);
- cl_lock_mutex_put(env, clk);
- }
- cl_lock_put(env, clk);
-}
-
-static struct lu_device *echo_device_free(const struct lu_env *env,
- struct lu_device *d)
-{
- struct echo_device *ed = cl2echo_dev(lu2cl_dev(d));
- struct echo_client_obd *ec = ed->ed_ec;
- struct echo_object *eco;
- struct lu_device *next = ed->ed_next;
-
- CDEBUG(D_INFO, "echo device:%p is going to be freed, next = %p\n",
- ed, next);
-
- lu_site_purge(env, &ed->ed_site->cs_lu, -1);
-
- /* check if there are objects still alive.
- * It shouldn't have any object because lu_site_purge would cleanup
- * all of cached objects. Anyway, probably the echo device is being
- * parallelly accessed.
- */
- spin_lock(&ec->ec_lock);
- list_for_each_entry(eco, &ec->ec_objects, eo_obj_chain)
- eco->eo_deleted = 1;
- spin_unlock(&ec->ec_lock);
-
- /* purge again */
- lu_site_purge(env, &ed->ed_site->cs_lu, -1);
-
- CDEBUG(D_INFO,
- "Waiting for the reference of echo object to be dropped\n");
-
- /* Wait for the last reference to be dropped. */
- spin_lock(&ec->ec_lock);
- while (!list_empty(&ec->ec_objects)) {
- spin_unlock(&ec->ec_lock);
- CERROR("echo_client still has objects at cleanup time, wait for 1 second\n");
- set_current_state(TASK_UNINTERRUPTIBLE);
- schedule_timeout(cfs_time_seconds(1));
- lu_site_purge(env, &ed->ed_site->cs_lu, -1);
- spin_lock(&ec->ec_lock);
- }
- spin_unlock(&ec->ec_lock);
-
- LASSERT(list_empty(&ec->ec_locks));
-
- CDEBUG(D_INFO, "No object exists, exiting...\n");
-
- echo_client_cleanup(d->ld_obd);
-
- while (next)
- next = next->ld_type->ldt_ops->ldto_device_free(env, next);
-
- LASSERT(ed->ed_site == lu2cl_site(d->ld_site));
- echo_site_fini(env, ed);
- cl_device_fini(&ed->ed_cl);
- kfree(ed);
-
- return NULL;
-}
-
-static const struct lu_device_type_operations echo_device_type_ops = {
- .ldto_init = echo_type_init,
- .ldto_fini = echo_type_fini,
-
- .ldto_start = echo_type_start,
- .ldto_stop = echo_type_stop,
-
- .ldto_device_alloc = echo_device_alloc,
- .ldto_device_free = echo_device_free,
- .ldto_device_init = echo_device_init,
- .ldto_device_fini = echo_device_fini
-};
-
-static struct lu_device_type echo_device_type = {
- .ldt_tags = LU_DEVICE_CL,
- .ldt_name = LUSTRE_ECHO_CLIENT_NAME,
- .ldt_ops = &echo_device_type_ops,
- .ldt_ctx_tags = LCT_CL_THREAD,
-};
-/** @} echo_init */
-
-/** \defgroup echo_exports Exported operations
- *
- * exporting functions to echo client
- *
- * @{
- */
-
-/* Interfaces to echo client obd device */
-static struct echo_object *cl_echo_object_find(struct echo_device *d,
- struct lov_stripe_md **lsmp)
-{
- struct lu_env *env;
- struct echo_thread_info *info;
- struct echo_object_conf *conf;
- struct lov_stripe_md *lsm;
- struct echo_object *eco;
- struct cl_object *obj;
- struct lu_fid *fid;
- int refcheck;
- int rc;
-
- LASSERT(lsmp);
- lsm = *lsmp;
- LASSERT(lsm);
- LASSERTF(ostid_id(&lsm->lsm_oi) != 0, DOSTID"\n", POSTID(&lsm->lsm_oi));
- LASSERTF(ostid_seq(&lsm->lsm_oi) == FID_SEQ_ECHO, DOSTID"\n",
- POSTID(&lsm->lsm_oi));
-
- /* Never return an object if the obd is to be freed. */
- if (echo_dev2cl(d)->cd_lu_dev.ld_obd->obd_stopping)
- return ERR_PTR(-ENODEV);
-
- env = cl_env_get(&refcheck);
- if (IS_ERR(env))
- return (void *)env;
-
- info = echo_env_info(env);
- conf = &info->eti_conf;
- if (d->ed_next) {
- if (!d->ed_next_islov) {
- struct lov_oinfo *oinfo = lsm->lsm_oinfo[0];
-
- LASSERT(oinfo != NULL);
- oinfo->loi_oi = lsm->lsm_oi;
- conf->eoc_cl.u.coc_oinfo = oinfo;
- } else {
- struct lustre_md *md;
-
- md = &info->eti_md;
- memset(md, 0, sizeof(*md));
- md->lsm = lsm;
- conf->eoc_cl.u.coc_md = md;
- }
- }
- conf->eoc_md = lsmp;
-
- fid = &info->eti_fid;
- rc = ostid_to_fid(fid, &lsm->lsm_oi, 0);
- if (rc != 0) {
- eco = ERR_PTR(rc);
- goto out;
- }
-
- /* In the function below, .hs_keycmp resolves to
- * lu_obj_hop_keycmp() */
- /* coverity[overrun-buffer-val] */
- obj = cl_object_find(env, echo_dev2cl(d), fid, &conf->eoc_cl);
- if (IS_ERR(obj)) {
- eco = (void *)obj;
- goto out;
- }
-
- eco = cl2echo_obj(obj);
- if (eco->eo_deleted) {
- cl_object_put(env, obj);
- eco = ERR_PTR(-EAGAIN);
- }
-
-out:
- cl_env_put(env, &refcheck);
- return eco;
-}
-
-static int cl_echo_object_put(struct echo_object *eco)
-{
- struct lu_env *env;
- struct cl_object *obj = echo_obj2cl(eco);
- int refcheck;
-
- env = cl_env_get(&refcheck);
- if (IS_ERR(env))
- return PTR_ERR(env);
-
- /* an external function to kill an object? */
- if (eco->eo_deleted) {
- struct lu_object_header *loh = obj->co_lu.lo_header;
-
- LASSERT(&eco->eo_hdr == luh2coh(loh));
- set_bit(LU_OBJECT_HEARD_BANSHEE, &loh->loh_flags);
- }
-
- cl_object_put(env, obj);
- cl_env_put(env, &refcheck);
- return 0;
-}
-
-static int cl_echo_enqueue0(struct lu_env *env, struct echo_object *eco,
- u64 start, u64 end, int mode,
- __u64 *cookie, __u32 enqflags)
-{
- struct cl_io *io;
- struct cl_lock *lck;
- struct cl_object *obj;
- struct cl_lock_descr *descr;
- struct echo_thread_info *info;
- int rc = -ENOMEM;
-
- info = echo_env_info(env);
- io = &info->eti_io;
- descr = &info->eti_descr;
- obj = echo_obj2cl(eco);
-
- descr->cld_obj = obj;
- descr->cld_start = cl_index(obj, start);
- descr->cld_end = cl_index(obj, end);
- descr->cld_mode = mode == LCK_PW ? CLM_WRITE : CLM_READ;
- descr->cld_enq_flags = enqflags;
- io->ci_obj = obj;
-
- lck = cl_lock_request(env, io, descr, "ec enqueue", eco);
- if (lck) {
- struct echo_client_obd *ec = eco->eo_dev->ed_ec;
- struct echo_lock *el;
-
- rc = cl_wait(env, lck);
- if (rc == 0) {
- el = cl2echo_lock(cl_lock_at(lck, &echo_device_type));
- spin_lock(&ec->ec_lock);
- if (list_empty(&el->el_chain)) {
- list_add(&el->el_chain, &ec->ec_locks);
- el->el_cookie = ++ec->ec_unique;
- }
- atomic_inc(&el->el_refcount);
- *cookie = el->el_cookie;
- spin_unlock(&ec->ec_lock);
- } else {
- cl_lock_release(env, lck, "ec enqueue", current);
- }
- }
- return rc;
-}
-
-static int cl_echo_enqueue(struct echo_object *eco, u64 start, u64 end,
- int mode, __u64 *cookie)
-{
- struct echo_thread_info *info;
- struct lu_env *env;
- struct cl_io *io;
- int refcheck;
- int result;
-
- env = cl_env_get(&refcheck);
- if (IS_ERR(env))
- return PTR_ERR(env);
-
- info = echo_env_info(env);
- io = &info->eti_io;
-
- io->ci_ignore_layout = 1;
- result = cl_io_init(env, io, CIT_MISC, echo_obj2cl(eco));
- if (result < 0)
- goto out;
- LASSERT(result == 0);
-
- result = cl_echo_enqueue0(env, eco, start, end, mode, cookie, 0);
- cl_io_fini(env, io);
-
-out:
- cl_env_put(env, &refcheck);
- return result;
-}
-
-static int cl_echo_cancel0(struct lu_env *env, struct echo_device *ed,
- __u64 cookie)
-{
- struct echo_client_obd *ec = ed->ed_ec;
- struct echo_lock *ecl = NULL;
- struct list_head *el;
- int found = 0, still_used = 0;
-
- LASSERT(ec != NULL);
- spin_lock(&ec->ec_lock);
- list_for_each(el, &ec->ec_locks) {
- ecl = list_entry(el, struct echo_lock, el_chain);
- CDEBUG(D_INFO, "ecl: %p, cookie: %#llx\n", ecl, ecl->el_cookie);
- found = (ecl->el_cookie == cookie);
- if (found) {
- if (atomic_dec_and_test(&ecl->el_refcount))
- list_del_init(&ecl->el_chain);
- else
- still_used = 1;
- break;
- }
- }
- spin_unlock(&ec->ec_lock);
-
- if (!found)
- return -ENOENT;
-
- echo_lock_release(env, ecl, still_used);
- return 0;
-}
-
-static int cl_echo_cancel(struct echo_device *ed, __u64 cookie)
-{
- struct lu_env *env;
- int refcheck;
- int rc;
-
- env = cl_env_get(&refcheck);
- if (IS_ERR(env))
- return PTR_ERR(env);
-
- rc = cl_echo_cancel0(env, ed, cookie);
-
- cl_env_put(env, &refcheck);
- return rc;
-}
-
-static int cl_echo_async_brw(const struct lu_env *env, struct cl_io *io,
- enum cl_req_type unused, struct cl_2queue *queue)
-{
- struct cl_page *clp;
- struct cl_page *temp;
- int result = 0;
-
- cl_page_list_for_each_safe(clp, temp, &queue->c2_qin) {
- int rc;
-
- rc = cl_page_cache_add(env, io, clp, CRT_WRITE);
- if (rc == 0)
- continue;
- result = result ?: rc;
- }
- return result;
-}
-
-static int cl_echo_object_brw(struct echo_object *eco, int rw, u64 offset,
- struct page **pages, int npages, int async)
-{
- struct lu_env *env;
- struct echo_thread_info *info;
- struct cl_object *obj = echo_obj2cl(eco);
- struct echo_device *ed = eco->eo_dev;
- struct cl_2queue *queue;
- struct cl_io *io;
- struct cl_page *clp;
- struct lustre_handle lh = { 0 };
- int page_size = cl_page_size(obj);
- int refcheck;
- int rc;
- int i;
-
- LASSERT((offset & ~CFS_PAGE_MASK) == 0);
- LASSERT(ed->ed_next != NULL);
- env = cl_env_get(&refcheck);
- if (IS_ERR(env))
- return PTR_ERR(env);
-
- info = echo_env_info(env);
- io = &info->eti_io;
- queue = &info->eti_queue;
-
- cl_2queue_init(queue);
-
- io->ci_ignore_layout = 1;
- rc = cl_io_init(env, io, CIT_MISC, obj);
- if (rc < 0)
- goto out;
- LASSERT(rc == 0);
-
-
- rc = cl_echo_enqueue0(env, eco, offset,
- offset + npages * PAGE_CACHE_SIZE - 1,
- rw == READ ? LCK_PR : LCK_PW, &lh.cookie,
- CEF_NEVER);
- if (rc < 0)
- goto error_lock;
-
- for (i = 0; i < npages; i++) {
- LASSERT(pages[i]);
- clp = cl_page_find(env, obj, cl_index(obj, offset),
- pages[i], CPT_TRANSIENT);
- if (IS_ERR(clp)) {
- rc = PTR_ERR(clp);
- break;
- }
- LASSERT(clp->cp_type == CPT_TRANSIENT);
-
- rc = cl_page_own(env, io, clp);
- if (rc) {
- LASSERT(clp->cp_state == CPS_FREEING);
- cl_page_put(env, clp);
- break;
- }
-
- cl_2queue_add(queue, clp);
-
- /* drop the reference count for cl_page_find, so that the page
- * will be freed in cl_2queue_fini. */
- cl_page_put(env, clp);
- cl_page_clip(env, clp, 0, page_size);
-
- offset += page_size;
- }
-
- if (rc == 0) {
- enum cl_req_type typ = rw == READ ? CRT_READ : CRT_WRITE;
-
- async = async && (typ == CRT_WRITE);
- if (async)
- rc = cl_echo_async_brw(env, io, typ, queue);
- else
- rc = cl_io_submit_sync(env, io, typ, queue, 0);
- CDEBUG(D_INFO, "echo_client %s write returns %d\n",
- async ? "async" : "sync", rc);
- }
-
- cl_echo_cancel0(env, ed, lh.cookie);
-error_lock:
- cl_2queue_discard(env, io, queue);
- cl_2queue_disown(env, io, queue);
- cl_2queue_fini(env, queue);
- cl_io_fini(env, io);
-out:
- cl_env_put(env, &refcheck);
- return rc;
-}
-/** @} echo_exports */
-
-
-static u64 last_object_id;
-
-static int
-echo_copyout_lsm(struct lov_stripe_md *lsm, void *_ulsm, int ulsm_nob)
-{
- struct lov_stripe_md *ulsm = _ulsm;
- int nob, i;
-
- nob = offsetof(struct lov_stripe_md, lsm_oinfo[lsm->lsm_stripe_count]);
- if (nob > ulsm_nob)
- return -EINVAL;
-
- if (copy_to_user(ulsm, lsm, sizeof(*ulsm)))
- return -EFAULT;
-
- for (i = 0; i < lsm->lsm_stripe_count; i++) {
- if (copy_to_user(ulsm->lsm_oinfo[i], lsm->lsm_oinfo[i],
- sizeof(lsm->lsm_oinfo[0])))
- return -EFAULT;
- }
- return 0;
-}
-
-static int
-echo_copyin_lsm(struct echo_device *ed, struct lov_stripe_md *lsm,
- void *ulsm, int ulsm_nob)
-{
- struct echo_client_obd *ec = ed->ed_ec;
- int i;
-
- if (ulsm_nob < sizeof(*lsm))
- return -EINVAL;
-
- if (copy_from_user(lsm, ulsm, sizeof(*lsm)))
- return -EFAULT;
-
- if (lsm->lsm_stripe_count > ec->ec_nstripes ||
- lsm->lsm_magic != LOV_MAGIC ||
- (lsm->lsm_stripe_size & (~CFS_PAGE_MASK)) != 0 ||
- ((__u64)lsm->lsm_stripe_size * lsm->lsm_stripe_count > ~0UL))
- return -EINVAL;
-
-
- for (i = 0; i < lsm->lsm_stripe_count; i++) {
- if (copy_from_user(lsm->lsm_oinfo[i],
- ((struct lov_stripe_md *)ulsm)-> \
- lsm_oinfo[i],
- sizeof(lsm->lsm_oinfo[0])))
- return -EFAULT;
- }
- return 0;
-}
-
-static int echo_create_object(const struct lu_env *env, struct echo_device *ed,
- int on_target, struct obdo *oa, void *ulsm,
- int ulsm_nob, struct obd_trans_info *oti)
-{
- struct echo_object *eco;
- struct echo_client_obd *ec = ed->ed_ec;
- struct lov_stripe_md *lsm = NULL;
- int rc;
- int created = 0;
-
- if ((oa->o_valid & OBD_MD_FLID) == 0 && /* no obj id */
- (on_target || /* set_stripe */
- ec->ec_nstripes != 0)) { /* LOV */
- CERROR("No valid oid\n");
- return -EINVAL;
- }
-
- rc = echo_alloc_memmd(ed, &lsm);
- if (rc < 0) {
- CERROR("Cannot allocate md: rc = %d\n", rc);
- goto failed;
- }
-
- if (ulsm != NULL) {
- int i, idx;
-
- rc = echo_copyin_lsm(ed, lsm, ulsm, ulsm_nob);
- if (rc != 0)
- goto failed;
-
- if (lsm->lsm_stripe_count == 0)
- lsm->lsm_stripe_count = ec->ec_nstripes;
-
- if (lsm->lsm_stripe_size == 0)
- lsm->lsm_stripe_size = PAGE_CACHE_SIZE;
-
- idx = cfs_rand();
-
- /* setup stripes: indices + default ids if required */
- for (i = 0; i < lsm->lsm_stripe_count; i++) {
- if (ostid_id(&lsm->lsm_oinfo[i]->loi_oi) == 0)
- lsm->lsm_oinfo[i]->loi_oi = lsm->lsm_oi;
-
- lsm->lsm_oinfo[i]->loi_ost_idx =
- (idx + i) % ec->ec_nstripes;
- }
- }
-
- /* setup object ID here for !on_target and LOV hint */
- if (oa->o_valid & OBD_MD_FLID) {
- LASSERT(oa->o_valid & OBD_MD_FLGROUP);
- lsm->lsm_oi = oa->o_oi;
- }
-
- if (ostid_id(&lsm->lsm_oi) == 0)
- ostid_set_id(&lsm->lsm_oi, ++last_object_id);
-
- rc = 0;
- if (on_target) {
- /* Only echo objects are allowed to be created */
- LASSERT((oa->o_valid & OBD_MD_FLGROUP) &&
- (ostid_seq(&oa->o_oi) == FID_SEQ_ECHO));
- rc = obd_create(env, ec->ec_exp, oa, &lsm, oti);
- if (rc != 0) {
- CERROR("Cannot create objects: rc = %d\n", rc);
- goto failed;
- }
- created = 1;
- }
-
- /* See what object ID we were given */
- oa->o_oi = lsm->lsm_oi;
- oa->o_valid |= OBD_MD_FLID;
-
- eco = cl_echo_object_find(ed, &lsm);
- if (IS_ERR(eco)) {
- rc = PTR_ERR(eco);
- goto failed;
- }
- cl_echo_object_put(eco);
-
- CDEBUG(D_INFO, "oa oid "DOSTID"\n", POSTID(&oa->o_oi));
-
- failed:
- if (created && rc)
- obd_destroy(env, ec->ec_exp, oa, lsm, oti, NULL);
- if (lsm)
- echo_free_memmd(ed, &lsm);
- if (rc)
- CERROR("create object failed with: rc = %d\n", rc);
- return rc;
-}
-
-static int echo_get_object(struct echo_object **ecop, struct echo_device *ed,
- struct obdo *oa)
-{
- struct lov_stripe_md *lsm = NULL;
- struct echo_object *eco;
- int rc;
-
- if ((oa->o_valid & OBD_MD_FLID) == 0 || ostid_id(&oa->o_oi) == 0) {
- /* disallow use of object id 0 */
- CERROR("No valid oid\n");
- return -EINVAL;
- }
-
- rc = echo_alloc_memmd(ed, &lsm);
- if (rc < 0)
- return rc;
-
- lsm->lsm_oi = oa->o_oi;
- if (!(oa->o_valid & OBD_MD_FLGROUP))
- ostid_set_seq_echo(&lsm->lsm_oi);
-
- rc = 0;
- eco = cl_echo_object_find(ed, &lsm);
- if (!IS_ERR(eco))
- *ecop = eco;
- else
- rc = PTR_ERR(eco);
- if (lsm)
- echo_free_memmd(ed, &lsm);
- return rc;
-}
-
-static void echo_put_object(struct echo_object *eco)
-{
- if (cl_echo_object_put(eco))
- CERROR("echo client: drop an object failed");
-}
-
-static void
-echo_get_stripe_off_id(struct lov_stripe_md *lsm, u64 *offp, u64 *idp)
-{
- unsigned long stripe_count;
- unsigned long stripe_size;
- unsigned long width;
- unsigned long woffset;
- int stripe_index;
- u64 offset;
-
- if (lsm->lsm_stripe_count <= 1)
- return;
-
- offset = *offp;
- stripe_size = lsm->lsm_stripe_size;
- stripe_count = lsm->lsm_stripe_count;
-
- /* width = # bytes in all stripes */
- width = stripe_size * stripe_count;
-
- /* woffset = offset within a width; offset = whole number of widths */
- woffset = do_div(offset, width);
-
- stripe_index = woffset / stripe_size;
-
- *idp = ostid_id(&lsm->lsm_oinfo[stripe_index]->loi_oi);
- *offp = offset * stripe_size + woffset % stripe_size;
-}
-
-static void
-echo_client_page_debug_setup(struct lov_stripe_md *lsm,
- struct page *page, int rw, u64 id,
- u64 offset, u64 count)
-{
- char *addr;
- u64 stripe_off;
- u64 stripe_id;
- int delta;
-
- /* no partial pages on the client */
- LASSERT(count == PAGE_CACHE_SIZE);
-
- addr = kmap(page);
-
- for (delta = 0; delta < PAGE_CACHE_SIZE; delta += OBD_ECHO_BLOCK_SIZE) {
- if (rw == OBD_BRW_WRITE) {
- stripe_off = offset + delta;
- stripe_id = id;
- echo_get_stripe_off_id(lsm, &stripe_off, &stripe_id);
- } else {
- stripe_off = 0xdeadbeef00c0ffeeULL;
- stripe_id = 0xdeadbeef00c0ffeeULL;
- }
- block_debug_setup(addr + delta, OBD_ECHO_BLOCK_SIZE,
- stripe_off, stripe_id);
- }
-
- kunmap(page);
-}
-
-static int echo_client_page_debug_check(struct lov_stripe_md *lsm,
- struct page *page, u64 id,
- u64 offset, u64 count)
-{
- u64 stripe_off;
- u64 stripe_id;
- char *addr;
- int delta;
- int rc;
- int rc2;
-
- /* no partial pages on the client */
- LASSERT(count == PAGE_CACHE_SIZE);
-
- addr = kmap(page);
-
- for (rc = delta = 0; delta < PAGE_CACHE_SIZE; delta += OBD_ECHO_BLOCK_SIZE) {
- stripe_off = offset + delta;
- stripe_id = id;
- echo_get_stripe_off_id(lsm, &stripe_off, &stripe_id);
-
- rc2 = block_debug_check("test_brw",
- addr + delta, OBD_ECHO_BLOCK_SIZE,
- stripe_off, stripe_id);
- if (rc2 != 0) {
- CERROR("Error in echo object %#llx\n", id);
- rc = rc2;
- }
- }
-
- kunmap(page);
- return rc;
-}
-
-static int echo_client_kbrw(struct echo_device *ed, int rw, struct obdo *oa,
- struct echo_object *eco, u64 offset,
- u64 count, int async,
- struct obd_trans_info *oti)
-{
- struct lov_stripe_md *lsm = eco->eo_lsm;
- u32 npages;
- struct brw_page *pga;
- struct brw_page *pgp;
- struct page **pages;
- u64 off;
- int i;
- int rc;
- int verify;
- gfp_t gfp_mask;
- int brw_flags = 0;
-
- verify = (ostid_id(&oa->o_oi) != ECHO_PERSISTENT_OBJID &&
- (oa->o_valid & OBD_MD_FLFLAGS) != 0 &&
- (oa->o_flags & OBD_FL_DEBUG_CHECK) != 0);
-
- gfp_mask = ((ostid_id(&oa->o_oi) & 2) == 0) ? GFP_IOFS : GFP_HIGHUSER;
-
- LASSERT(rw == OBD_BRW_WRITE || rw == OBD_BRW_READ);
- LASSERT(lsm != NULL);
- LASSERT(ostid_id(&lsm->lsm_oi) == ostid_id(&oa->o_oi));
-
- if (count <= 0 ||
- (count & (~CFS_PAGE_MASK)) != 0)
- return -EINVAL;
-
- /* XXX think again with misaligned I/O */
- npages = count >> PAGE_CACHE_SHIFT;
-
- if (rw == OBD_BRW_WRITE)
- brw_flags = OBD_BRW_ASYNC;
-
- pga = kcalloc(npages, sizeof(*pga), GFP_NOFS);
- if (pga == NULL)
- return -ENOMEM;
-
- pages = kcalloc(npages, sizeof(*pages), GFP_NOFS);
- if (pages == NULL) {
- kfree(pga);
- return -ENOMEM;
- }
-
- for (i = 0, pgp = pga, off = offset;
- i < npages;
- i++, pgp++, off += PAGE_CACHE_SIZE) {
-
- LASSERT(pgp->pg == NULL); /* for cleanup */
-
- rc = -ENOMEM;
- OBD_PAGE_ALLOC(pgp->pg, gfp_mask);
- if (pgp->pg == NULL)
- goto out;
-
- pages[i] = pgp->pg;
- pgp->count = PAGE_CACHE_SIZE;
- pgp->off = off;
- pgp->flag = brw_flags;
-
- if (verify)
- echo_client_page_debug_setup(lsm, pgp->pg, rw,
- ostid_id(&oa->o_oi), off,
- pgp->count);
- }
-
- /* brw mode can only be used at client */
- LASSERT(ed->ed_next != NULL);
- rc = cl_echo_object_brw(eco, rw, offset, pages, npages, async);
-
- out:
- if (rc != 0 || rw != OBD_BRW_READ)
- verify = 0;
-
- for (i = 0, pgp = pga; i < npages; i++, pgp++) {
- if (pgp->pg == NULL)
- continue;
-
- if (verify) {
- int vrc;
-
- vrc = echo_client_page_debug_check(lsm, pgp->pg,
- ostid_id(&oa->o_oi),
- pgp->off, pgp->count);
- if (vrc != 0 && rc == 0)
- rc = vrc;
- }
- OBD_PAGE_FREE(pgp->pg);
- }
- kfree(pga);
- kfree(pages);
- return rc;
-}
-
-static int echo_client_prep_commit(const struct lu_env *env,
- struct obd_export *exp, int rw,
- struct obdo *oa, struct echo_object *eco,
- u64 offset, u64 count,
- u64 batch, struct obd_trans_info *oti,
- int async)
-{
- struct lov_stripe_md *lsm = eco->eo_lsm;
- struct obd_ioobj ioo;
- struct niobuf_local *lnb;
- struct niobuf_remote *rnb;
- u64 off;
- u64 npages, tot_pages;
- int i, ret = 0, brw_flags = 0;
-
- if (count <= 0 || (count & (~CFS_PAGE_MASK)) != 0 ||
- (lsm != NULL && ostid_id(&lsm->lsm_oi) != ostid_id(&oa->o_oi)))
- return -EINVAL;
-
- npages = batch >> PAGE_CACHE_SHIFT;
- tot_pages = count >> PAGE_CACHE_SHIFT;
-
- lnb = kcalloc(npages, sizeof(struct niobuf_local), GFP_NOFS);
- rnb = kcalloc(npages, sizeof(struct niobuf_remote), GFP_NOFS);
-
- if (lnb == NULL || rnb == NULL) {
- ret = -ENOMEM;
- goto out;
- }
-
- if (rw == OBD_BRW_WRITE && async)
- brw_flags |= OBD_BRW_ASYNC;
-
- obdo_to_ioobj(oa, &ioo);
-
- off = offset;
-
- for (; tot_pages; tot_pages -= npages) {
- int lpages;
-
- if (tot_pages < npages)
- npages = tot_pages;
-
- for (i = 0; i < npages; i++, off += PAGE_CACHE_SIZE) {
- rnb[i].offset = off;
- rnb[i].len = PAGE_CACHE_SIZE;
- rnb[i].flags = brw_flags;
- }
-
- ioo.ioo_bufcnt = npages;
- oti->oti_transno = 0;
-
- lpages = npages;
- ret = obd_preprw(env, rw, exp, oa, 1, &ioo, rnb, &lpages,
- lnb, oti);
- if (ret != 0)
- goto out;
- LASSERT(lpages == npages);
-
- for (i = 0; i < lpages; i++) {
- struct page *page = lnb[i].page;
-
- /* read past eof? */
- if (page == NULL && lnb[i].rc == 0)
- continue;
-
- if (async)
- lnb[i].flags |= OBD_BRW_ASYNC;
-
- if (ostid_id(&oa->o_oi) == ECHO_PERSISTENT_OBJID ||
- (oa->o_valid & OBD_MD_FLFLAGS) == 0 ||
- (oa->o_flags & OBD_FL_DEBUG_CHECK) == 0)
- continue;
-
- if (rw == OBD_BRW_WRITE)
- echo_client_page_debug_setup(lsm, page, rw,
- ostid_id(&oa->o_oi),
- rnb[i].offset,
- rnb[i].len);
- else
- echo_client_page_debug_check(lsm, page,
- ostid_id(&oa->o_oi),
- rnb[i].offset,
- rnb[i].len);
- }
-
- ret = obd_commitrw(env, rw, exp, oa, 1, &ioo,
- rnb, npages, lnb, oti, ret);
- if (ret != 0)
- goto out;
-
- /* Reset oti otherwise it would confuse ldiskfs. */
- memset(oti, 0, sizeof(*oti));
-
- /* Reuse env context. */
- lu_context_exit((struct lu_context *)&env->le_ctx);
- lu_context_enter((struct lu_context *)&env->le_ctx);
- }
-
-out:
- kfree(lnb);
- kfree(rnb);
- return ret;
-}
-
-static int echo_client_brw_ioctl(const struct lu_env *env, int rw,
- struct obd_export *exp,
- struct obd_ioctl_data *data,
- struct obd_trans_info *dummy_oti)
-{
- struct obd_device *obd = class_exp2obd(exp);
- struct echo_device *ed = obd2echo_dev(obd);
- struct echo_client_obd *ec = ed->ed_ec;
- struct obdo *oa = &data->ioc_obdo1;
- struct echo_object *eco;
- int rc;
- int async = 1;
- long test_mode;
-
- LASSERT(oa->o_valid & OBD_MD_FLGROUP);
-
- rc = echo_get_object(&eco, ed, oa);
- if (rc)
- return rc;
-
- oa->o_valid &= ~OBD_MD_FLHANDLE;
-
- /* OFD/obdfilter works only via prep/commit */
- test_mode = (long)data->ioc_pbuf1;
- if (test_mode == 1)
- async = 0;
-
- if (ed->ed_next == NULL && test_mode != 3) {
- test_mode = 3;
- data->ioc_plen1 = data->ioc_count;
- }
-
- /* Truncate batch size to maximum */
- if (data->ioc_plen1 > PTLRPC_MAX_BRW_SIZE)
- data->ioc_plen1 = PTLRPC_MAX_BRW_SIZE;
-
- switch (test_mode) {
- case 1:
- /* fall through */
- case 2:
- rc = echo_client_kbrw(ed, rw, oa,
- eco, data->ioc_offset,
- data->ioc_count, async, dummy_oti);
- break;
- case 3:
- rc = echo_client_prep_commit(env, ec->ec_exp, rw, oa,
- eco, data->ioc_offset,
- data->ioc_count, data->ioc_plen1,
- dummy_oti, async);
- break;
- default:
- rc = -EINVAL;
- }
- echo_put_object(eco);
- return rc;
-}
-
-static int
-echo_client_enqueue(struct obd_export *exp, struct obdo *oa,
- int mode, u64 offset, u64 nob)
-{
- struct echo_device *ed = obd2echo_dev(exp->exp_obd);
- struct lustre_handle *ulh = &oa->o_handle;
- struct echo_object *eco;
- u64 end;
- int rc;
-
- if (ed->ed_next == NULL)
- return -EOPNOTSUPP;
-
- if (!(mode == LCK_PR || mode == LCK_PW))
- return -EINVAL;
-
- if ((offset & (~CFS_PAGE_MASK)) != 0 ||
- (nob & (~CFS_PAGE_MASK)) != 0)
- return -EINVAL;
-
- rc = echo_get_object(&eco, ed, oa);
- if (rc != 0)
- return rc;
-
- end = (nob == 0) ? ((u64) -1) : (offset + nob - 1);
- rc = cl_echo_enqueue(eco, offset, end, mode, &ulh->cookie);
- if (rc == 0) {
- oa->o_valid |= OBD_MD_FLHANDLE;
- CDEBUG(D_INFO, "Cookie is %#llx\n", ulh->cookie);
- }
- echo_put_object(eco);
- return rc;
-}
-
-static int
-echo_client_cancel(struct obd_export *exp, struct obdo *oa)
-{
- struct echo_device *ed = obd2echo_dev(exp->exp_obd);
- __u64 cookie = oa->o_handle.cookie;
-
- if ((oa->o_valid & OBD_MD_FLHANDLE) == 0)
- return -EINVAL;
-
- CDEBUG(D_INFO, "Cookie is %#llx\n", cookie);
- return cl_echo_cancel(ed, cookie);
-}
-
-static int
-echo_client_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
- void *karg, void *uarg)
-{
- struct obd_device *obd = exp->exp_obd;
- struct echo_device *ed = obd2echo_dev(obd);
- struct echo_client_obd *ec = ed->ed_ec;
- struct echo_object *eco;
- struct obd_ioctl_data *data = karg;
- struct obd_trans_info dummy_oti;
- struct lu_env *env;
- struct oti_req_ack_lock *ack_lock;
- struct obdo *oa;
- struct lu_fid fid;
- int rw = OBD_BRW_READ;
- int rc = 0;
- int i;
-
- memset(&dummy_oti, 0, sizeof(dummy_oti));
-
- oa = &data->ioc_obdo1;
- if (!(oa->o_valid & OBD_MD_FLGROUP)) {
- oa->o_valid |= OBD_MD_FLGROUP;
- ostid_set_seq_echo(&oa->o_oi);
- }
-
- /* This FID is unpacked just for validation at this point */
- rc = ostid_to_fid(&fid, &oa->o_oi, 0);
- if (rc < 0)
- return rc;
-
- env = kzalloc(sizeof(*env), GFP_NOFS);
- if (!env)
- return -ENOMEM;
-
- rc = lu_env_init(env, LCT_DT_THREAD);
- if (rc) {
- rc = -ENOMEM;
- goto out;
- }
-
- switch (cmd) {
- case OBD_IOC_CREATE: /* may create echo object */
- if (!capable(CFS_CAP_SYS_ADMIN)) {
- rc = -EPERM;
- goto out;
- }
-
- rc = echo_create_object(env, ed, 1, oa, data->ioc_pbuf1,
- data->ioc_plen1, &dummy_oti);
- goto out;
-
- case OBD_IOC_DESTROY:
- if (!capable(CFS_CAP_SYS_ADMIN)) {
- rc = -EPERM;
- goto out;
- }
-
- rc = echo_get_object(&eco, ed, oa);
- if (rc == 0) {
- rc = obd_destroy(env, ec->ec_exp, oa, eco->eo_lsm,
- &dummy_oti, NULL);
- if (rc == 0)
- eco->eo_deleted = 1;
- echo_put_object(eco);
- }
- goto out;
-
- case OBD_IOC_GETATTR:
- rc = echo_get_object(&eco, ed, oa);
- if (rc == 0) {
- struct obd_info oinfo = { };
-
- oinfo.oi_md = eco->eo_lsm;
- oinfo.oi_oa = oa;
- rc = obd_getattr(env, ec->ec_exp, &oinfo);
- echo_put_object(eco);
- }
- goto out;
-
- case OBD_IOC_SETATTR:
- if (!capable(CFS_CAP_SYS_ADMIN)) {
- rc = -EPERM;
- goto out;
- }
-
- rc = echo_get_object(&eco, ed, oa);
- if (rc == 0) {
- struct obd_info oinfo = { };
-
- oinfo.oi_oa = oa;
- oinfo.oi_md = eco->eo_lsm;
-
- rc = obd_setattr(env, ec->ec_exp, &oinfo, NULL);
- echo_put_object(eco);
- }
- goto out;
-
- case OBD_IOC_BRW_WRITE:
- if (!capable(CFS_CAP_SYS_ADMIN)) {
- rc = -EPERM;
- goto out;
- }
-
- rw = OBD_BRW_WRITE;
- /* fall through */
- case OBD_IOC_BRW_READ:
- rc = echo_client_brw_ioctl(env, rw, exp, data, &dummy_oti);
- goto out;
-
- case ECHO_IOC_GET_STRIPE:
- rc = echo_get_object(&eco, ed, oa);
- if (rc == 0) {
- rc = echo_copyout_lsm(eco->eo_lsm, data->ioc_pbuf1,
- data->ioc_plen1);
- echo_put_object(eco);
- }
- goto out;
-
- case ECHO_IOC_SET_STRIPE:
- if (!capable(CFS_CAP_SYS_ADMIN)) {
- rc = -EPERM;
- goto out;
- }
-
- if (data->ioc_pbuf1 == NULL) { /* unset */
- rc = echo_get_object(&eco, ed, oa);
- if (rc == 0) {
- eco->eo_deleted = 1;
- echo_put_object(eco);
- }
- } else {
- rc = echo_create_object(env, ed, 0, oa,
- data->ioc_pbuf1,
- data->ioc_plen1, &dummy_oti);
- }
- goto out;
-
- case ECHO_IOC_ENQUEUE:
- if (!capable(CFS_CAP_SYS_ADMIN)) {
- rc = -EPERM;
- goto out;
- }
-
- rc = echo_client_enqueue(exp, oa,
- data->ioc_conn1, /* lock mode */
- data->ioc_offset,
- data->ioc_count);/*extent*/
- goto out;
-
- case ECHO_IOC_CANCEL:
- rc = echo_client_cancel(exp, oa);
- goto out;
-
- default:
- CERROR("echo_ioctl(): unrecognised ioctl %#x\n", cmd);
- rc = -ENOTTY;
- goto out;
- }
-
-out:
- lu_env_fini(env);
- kfree(env);
-
- /* XXX this should be in a helper also called by target_send_reply */
- for (ack_lock = dummy_oti.oti_ack_locks, i = 0; i < 4;
- i++, ack_lock++) {
- if (!ack_lock->mode)
- break;
- ldlm_lock_decref(&ack_lock->lock, ack_lock->mode);
- }
-
- return rc;
-}
-
-static int echo_client_setup(const struct lu_env *env,
- struct obd_device *obddev, struct lustre_cfg *lcfg)
-{
- struct echo_client_obd *ec = &obddev->u.echo_client;
- struct obd_device *tgt;
- struct obd_uuid echo_uuid = { "ECHO_UUID" };
- struct obd_connect_data *ocd = NULL;
- int rc;
-
- if (lcfg->lcfg_bufcount < 2 || LUSTRE_CFG_BUFLEN(lcfg, 1) < 1) {
- CERROR("requires a TARGET OBD name\n");
- return -EINVAL;
- }
-
- tgt = class_name2obd(lustre_cfg_string(lcfg, 1));
- if (!tgt || !tgt->obd_attached || !tgt->obd_set_up) {
- CERROR("device not attached or not set up (%s)\n",
- lustre_cfg_string(lcfg, 1));
- return -EINVAL;
- }
-
- spin_lock_init(&ec->ec_lock);
- INIT_LIST_HEAD(&ec->ec_objects);
- INIT_LIST_HEAD(&ec->ec_locks);
- ec->ec_unique = 0;
- ec->ec_nstripes = 0;
-
- ocd = kzalloc(sizeof(*ocd), GFP_NOFS);
- if (!ocd) {
- CERROR("Can't alloc ocd connecting to %s\n",
- lustre_cfg_string(lcfg, 1));
- return -ENOMEM;
- }
-
- ocd->ocd_connect_flags = OBD_CONNECT_VERSION | OBD_CONNECT_REQPORTAL |
- OBD_CONNECT_BRW_SIZE |
- OBD_CONNECT_GRANT | OBD_CONNECT_FULL20 |
- OBD_CONNECT_64BITHASH | OBD_CONNECT_LVB_TYPE |
- OBD_CONNECT_FID;
- ocd->ocd_brw_size = DT_MAX_BRW_SIZE;
- ocd->ocd_version = LUSTRE_VERSION_CODE;
- ocd->ocd_group = FID_SEQ_ECHO;
-
- rc = obd_connect(env, &ec->ec_exp, tgt, &echo_uuid, ocd, NULL);
-
- kfree(ocd);
-
- if (rc != 0) {
- CERROR("fail to connect to device %s\n",
- lustre_cfg_string(lcfg, 1));
- return rc;
- }
-
- return rc;
-}
-
-static int echo_client_cleanup(struct obd_device *obddev)
-{
- struct echo_client_obd *ec = &obddev->u.echo_client;
- int rc;
-
- if (!list_empty(&obddev->obd_exports)) {
- CERROR("still has clients!\n");
- return -EBUSY;
- }
-
- LASSERT(atomic_read(&ec->ec_exp->exp_refcount) > 0);
- rc = obd_disconnect(ec->ec_exp);
- if (rc != 0)
- CERROR("fail to disconnect device: %d\n", rc);
-
- return rc;
-}
-
-static int echo_client_connect(const struct lu_env *env,
- struct obd_export **exp,
- struct obd_device *src, struct obd_uuid *cluuid,
- struct obd_connect_data *data, void *localdata)
-{
- int rc;
- struct lustre_handle conn = { 0 };
-
- rc = class_connect(&conn, src, cluuid);
- if (rc == 0) {
- *exp = class_conn2export(&conn);
- }
-
- return rc;
-}
-
-static int echo_client_disconnect(struct obd_export *exp)
-{
- int rc;
-
- if (exp == NULL) {
- rc = -EINVAL;
- goto out;
- }
-
- rc = class_disconnect(exp);
- goto out;
- out:
- return rc;
-}
-
-static struct obd_ops echo_client_obd_ops = {
- .o_owner = THIS_MODULE,
- .o_iocontrol = echo_client_iocontrol,
- .o_connect = echo_client_connect,
- .o_disconnect = echo_client_disconnect
-};
-
-static int echo_client_init(void)
-{
- int rc;
-
- rc = lu_kmem_init(echo_caches);
- if (rc == 0) {
- rc = class_register_type(&echo_client_obd_ops, NULL,
- LUSTRE_ECHO_CLIENT_NAME,
- &echo_device_type);
- if (rc)
- lu_kmem_fini(echo_caches);
- }
- return rc;
-}
-
-static void echo_client_exit(void)
-{
- class_unregister_type(LUSTRE_ECHO_CLIENT_NAME);
- lu_kmem_fini(echo_caches);
-}
-
-static int __init obdecho_init(void)
-{
- LCONSOLE_INFO("Echo OBD driver; http://www.lustre.org/\n");
-
- LASSERT(PAGE_CACHE_SIZE % OBD_ECHO_BLOCK_SIZE == 0);
-
- return echo_client_init();
-}
-
-static void /*__exit*/ obdecho_exit(void)
-{
- echo_client_exit();
-
-}
-
-MODULE_AUTHOR("Sun Microsystems, Inc. <http://www.lustre.org/>");
-MODULE_DESCRIPTION("Lustre Testing Echo OBD driver");
-MODULE_LICENSE("GPL");
-MODULE_VERSION(LUSTRE_VERSION_STRING);
-
-module_init(obdecho_init);
-module_exit(obdecho_exit);
-
-/** @} echo_client */
diff --git a/drivers/staging/lustre/lustre/obdecho/echo_internal.h b/drivers/staging/lustre/lustre/obdecho/echo_internal.h
deleted file mode 100644
index 8e9dbc2351e7..000000000000
--- a/drivers/staging/lustre/lustre/obdecho/echo_internal.h
+++ /dev/null
@@ -1,47 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; if not, write to the
- * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
- * Boston, MA 021110-1307, USA
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2012, Whamcloud, Inc.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * lustre/obdecho/echo_internal.h
- */
-
-#ifndef _ECHO_INTERNAL_H
-#define _ECHO_INTERNAL_H
-
-/* The persistent object (i.e. actually stores stuff!) */
-#define ECHO_PERSISTENT_OBJID 1ULL
-#define ECHO_PERSISTENT_SIZE ((__u64)(1<<20))
-
-/* block size to use for data verification */
-#define OBD_ECHO_BLOCK_SIZE (4<<10)
-
-
-#endif
diff --git a/drivers/staging/lustre/lustre/osc/Makefile b/drivers/staging/lustre/lustre/osc/Makefile
deleted file mode 100644
index 37cdeea9ac49..000000000000
--- a/drivers/staging/lustre/lustre/osc/Makefile
+++ /dev/null
@@ -1,3 +0,0 @@
-obj-$(CONFIG_LUSTRE_FS) += osc.o
-osc-y := osc_request.o osc_dev.o osc_object.o \
- osc_page.o osc_lock.o osc_io.o osc_quota.o osc_cache.o lproc_osc.o
diff --git a/drivers/staging/lustre/lustre/osc/lproc_osc.c b/drivers/staging/lustre/lustre/osc/lproc_osc.c
deleted file mode 100644
index 2a16046aac20..000000000000
--- a/drivers/staging/lustre/lustre/osc/lproc_osc.c
+++ /dev/null
@@ -1,783 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- */
-#define DEBUG_SUBSYSTEM S_CLASS
-
-#include <linux/statfs.h>
-#include "../include/obd_cksum.h"
-#include "../include/obd_class.h"
-#include "../include/lprocfs_status.h"
-#include <linux/seq_file.h>
-#include "osc_internal.h"
-
-static ssize_t active_show(struct kobject *kobj, struct attribute *attr,
- char *buf)
-{
- struct obd_device *dev = container_of(kobj, struct obd_device,
- obd_kobj);
-
- return sprintf(buf, "%d\n", !dev->u.cli.cl_import->imp_deactive);
-}
-
-static ssize_t active_store(struct kobject *kobj, struct attribute *attr,
- const char *buffer,
- size_t count)
-{
- struct obd_device *dev = container_of(kobj, struct obd_device,
- obd_kobj);
- int rc;
- unsigned long val;
-
- rc = kstrtoul(buffer, 10, &val);
- if (rc)
- return rc;
- if (val < 0 || val > 1)
- return -ERANGE;
-
- /* opposite senses */
- if (dev->u.cli.cl_import->imp_deactive == val)
- rc = ptlrpc_set_import_active(dev->u.cli.cl_import, val);
- else
- CDEBUG(D_CONFIG, "activate %ld: ignoring repeat request\n",
- val);
-
- return count;
-}
-LUSTRE_RW_ATTR(active);
-
-static ssize_t max_rpcs_in_flight_show(struct kobject *kobj,
- struct attribute *attr,
- char *buf)
-{
- struct obd_device *dev = container_of(kobj, struct obd_device,
- obd_kobj);
- struct client_obd *cli = &dev->u.cli;
-
- return sprintf(buf, "%u\n", cli->cl_max_rpcs_in_flight);
-}
-
-static ssize_t max_rpcs_in_flight_store(struct kobject *kobj,
- struct attribute *attr,
- const char *buffer,
- size_t count)
-{
- struct obd_device *dev = container_of(kobj, struct obd_device,
- obd_kobj);
- struct client_obd *cli = &dev->u.cli;
- int rc;
- unsigned long val;
- int adding, added, req_count;
-
- rc = kstrtoul(buffer, 10, &val);
- if (rc)
- return rc;
-
- if (val < 1 || val > OSC_MAX_RIF_MAX)
- return -ERANGE;
-
- adding = val - cli->cl_max_rpcs_in_flight;
- req_count = atomic_read(&osc_pool_req_count);
- if (adding > 0 && req_count < osc_reqpool_maxreqcount) {
- /*
- * There might be some race which will cause over-limit
- * allocation, but it is fine.
- */
- if (req_count + adding > osc_reqpool_maxreqcount)
- adding = osc_reqpool_maxreqcount - req_count;
-
- added = osc_rq_pool->prp_populate(osc_rq_pool, adding);
- atomic_add(added, &osc_pool_req_count);
- }
-
- client_obd_list_lock(&cli->cl_loi_list_lock);
- cli->cl_max_rpcs_in_flight = val;
- client_obd_list_unlock(&cli->cl_loi_list_lock);
-
- return count;
-}
-LUSTRE_RW_ATTR(max_rpcs_in_flight);
-
-static ssize_t max_dirty_mb_show(struct kobject *kobj,
- struct attribute *attr,
- char *buf)
-{
- struct obd_device *dev = container_of(kobj, struct obd_device,
- obd_kobj);
- struct client_obd *cli = &dev->u.cli;
- long val;
- int mult;
-
- client_obd_list_lock(&cli->cl_loi_list_lock);
- val = cli->cl_dirty_max;
- client_obd_list_unlock(&cli->cl_loi_list_lock);
-
- mult = 1 << 20;
- return lprocfs_read_frac_helper(buf, PAGE_SIZE, val, mult);
-}
-
-static ssize_t max_dirty_mb_store(struct kobject *kobj,
- struct attribute *attr,
- const char *buffer,
- size_t count)
-{
- struct obd_device *dev = container_of(kobj, struct obd_device,
- obd_kobj);
- struct client_obd *cli = &dev->u.cli;
- int rc;
- unsigned long pages_number;
-
- rc = kstrtoul(buffer, 10, &pages_number);
- if (rc)
- return rc;
-
- pages_number *= 1 << (20 - PAGE_CACHE_SHIFT); /* MB -> pages */
-
- if (pages_number <= 0 ||
- pages_number > OSC_MAX_DIRTY_MB_MAX << (20 - PAGE_CACHE_SHIFT) ||
- pages_number > totalram_pages / 4) /* 1/4 of RAM */
- return -ERANGE;
-
- client_obd_list_lock(&cli->cl_loi_list_lock);
- cli->cl_dirty_max = (u32)(pages_number << PAGE_CACHE_SHIFT);
- osc_wake_cache_waiters(cli);
- client_obd_list_unlock(&cli->cl_loi_list_lock);
-
- return count;
-}
-LUSTRE_RW_ATTR(max_dirty_mb);
-
-static int osc_cached_mb_seq_show(struct seq_file *m, void *v)
-{
- struct obd_device *dev = m->private;
- struct client_obd *cli = &dev->u.cli;
- int shift = 20 - PAGE_CACHE_SHIFT;
-
- seq_printf(m,
- "used_mb: %d\n"
- "busy_cnt: %d\n",
- (atomic_read(&cli->cl_lru_in_list) +
- atomic_read(&cli->cl_lru_busy)) >> shift,
- atomic_read(&cli->cl_lru_busy));
-
- return 0;
-}
-
-/* shrink the number of caching pages to a specific number */
-static ssize_t osc_cached_mb_seq_write(struct file *file,
- const char __user *buffer,
- size_t count, loff_t *off)
-{
- struct obd_device *dev = ((struct seq_file *)file->private_data)->private;
- struct client_obd *cli = &dev->u.cli;
- int pages_number, mult, rc;
- char kernbuf[128];
-
- if (count >= sizeof(kernbuf))
- return -EINVAL;
-
- if (copy_from_user(kernbuf, buffer, count))
- return -EFAULT;
- kernbuf[count] = 0;
-
- mult = 1 << (20 - PAGE_CACHE_SHIFT);
- buffer += lprocfs_find_named_value(kernbuf, "used_mb:", &count) -
- kernbuf;
- rc = lprocfs_write_frac_helper(buffer, count, &pages_number, mult);
- if (rc)
- return rc;
-
- if (pages_number < 0)
- return -ERANGE;
-
- rc = atomic_read(&cli->cl_lru_in_list) - pages_number;
- if (rc > 0)
- (void)osc_lru_shrink(cli, rc);
-
- return count;
-}
-LPROC_SEQ_FOPS(osc_cached_mb);
-
-static ssize_t cur_dirty_bytes_show(struct kobject *kobj,
- struct attribute *attr,
- char *buf)
-{
- struct obd_device *dev = container_of(kobj, struct obd_device,
- obd_kobj);
- struct client_obd *cli = &dev->u.cli;
- int len;
-
- client_obd_list_lock(&cli->cl_loi_list_lock);
- len = sprintf(buf, "%lu\n", cli->cl_dirty);
- client_obd_list_unlock(&cli->cl_loi_list_lock);
-
- return len;
-}
-LUSTRE_RO_ATTR(cur_dirty_bytes);
-
-static ssize_t cur_grant_bytes_show(struct kobject *kobj,
- struct attribute *attr,
- char *buf)
-{
- struct obd_device *dev = container_of(kobj, struct obd_device,
- obd_kobj);
- struct client_obd *cli = &dev->u.cli;
- int len;
-
- client_obd_list_lock(&cli->cl_loi_list_lock);
- len = sprintf(buf, "%lu\n", cli->cl_avail_grant);
- client_obd_list_unlock(&cli->cl_loi_list_lock);
-
- return len;
-}
-
-static ssize_t cur_grant_bytes_store(struct kobject *kobj,
- struct attribute *attr,
- const char *buffer,
- size_t count)
-{
- struct obd_device *obd = container_of(kobj, struct obd_device,
- obd_kobj);
- struct client_obd *cli = &obd->u.cli;
- int rc;
- unsigned long long val;
-
- rc = kstrtoull(buffer, 10, &val);
- if (rc)
- return rc;
-
- /* this is only for shrinking grant */
- client_obd_list_lock(&cli->cl_loi_list_lock);
- if (val >= cli->cl_avail_grant) {
- client_obd_list_unlock(&cli->cl_loi_list_lock);
- return -EINVAL;
- }
- client_obd_list_unlock(&cli->cl_loi_list_lock);
-
- if (cli->cl_import->imp_state == LUSTRE_IMP_FULL)
- rc = osc_shrink_grant_to_target(cli, val);
- if (rc)
- return rc;
- return count;
-}
-LUSTRE_RW_ATTR(cur_grant_bytes);
-
-static ssize_t cur_lost_grant_bytes_show(struct kobject *kobj,
- struct attribute *attr,
- char *buf)
-{
- struct obd_device *dev = container_of(kobj, struct obd_device,
- obd_kobj);
- struct client_obd *cli = &dev->u.cli;
- int len;
-
- client_obd_list_lock(&cli->cl_loi_list_lock);
- len = sprintf(buf, "%lu\n", cli->cl_lost_grant);
- client_obd_list_unlock(&cli->cl_loi_list_lock);
-
- return len;
-}
-LUSTRE_RO_ATTR(cur_lost_grant_bytes);
-
-static ssize_t grant_shrink_interval_show(struct kobject *kobj,
- struct attribute *attr,
- char *buf)
-{
- struct obd_device *obd = container_of(kobj, struct obd_device,
- obd_kobj);
-
- return sprintf(buf, "%d\n", obd->u.cli.cl_grant_shrink_interval);
-}
-
-static ssize_t grant_shrink_interval_store(struct kobject *kobj,
- struct attribute *attr,
- const char *buffer,
- size_t count)
-{
- struct obd_device *obd = container_of(kobj, struct obd_device,
- obd_kobj);
- int rc;
- unsigned long val;
-
- rc = kstrtoul(buffer, 10, &val);
- if (rc)
- return rc;
-
- if (val <= 0)
- return -ERANGE;
-
- obd->u.cli.cl_grant_shrink_interval = val;
-
- return count;
-}
-LUSTRE_RW_ATTR(grant_shrink_interval);
-
-static ssize_t checksums_show(struct kobject *kobj,
- struct attribute *attr,
- char *buf)
-{
- struct obd_device *obd = container_of(kobj, struct obd_device,
- obd_kobj);
-
- return sprintf(buf, "%d\n", obd->u.cli.cl_checksum ? 1 : 0);
-}
-
-static ssize_t checksums_store(struct kobject *kobj,
- struct attribute *attr,
- const char *buffer,
- size_t count)
-{
- struct obd_device *obd = container_of(kobj, struct obd_device,
- obd_kobj);
- int rc;
- unsigned long val;
-
- rc = kstrtoul(buffer, 10, &val);
- if (rc)
- return rc;
-
- obd->u.cli.cl_checksum = (val ? 1 : 0);
-
- return count;
-}
-LUSTRE_RW_ATTR(checksums);
-
-static int osc_checksum_type_seq_show(struct seq_file *m, void *v)
-{
- struct obd_device *obd = m->private;
- int i;
- DECLARE_CKSUM_NAME;
-
- if (obd == NULL)
- return 0;
-
- for (i = 0; i < ARRAY_SIZE(cksum_name); i++) {
- if (((1 << i) & obd->u.cli.cl_supp_cksum_types) == 0)
- continue;
- if (obd->u.cli.cl_cksum_type == (1 << i))
- seq_printf(m, "[%s] ", cksum_name[i]);
- else
- seq_printf(m, "%s ", cksum_name[i]);
- }
- seq_putc(m, '\n');
- return 0;
-}
-
-static ssize_t osc_checksum_type_seq_write(struct file *file,
- const char __user *buffer,
- size_t count, loff_t *off)
-{
- struct obd_device *obd = ((struct seq_file *)file->private_data)->private;
- int i;
- DECLARE_CKSUM_NAME;
- char kernbuf[10];
-
- if (obd == NULL)
- return 0;
-
- if (count > sizeof(kernbuf) - 1)
- return -EINVAL;
- if (copy_from_user(kernbuf, buffer, count))
- return -EFAULT;
- if (count > 0 && kernbuf[count - 1] == '\n')
- kernbuf[count - 1] = '\0';
- else
- kernbuf[count] = '\0';
-
- for (i = 0; i < ARRAY_SIZE(cksum_name); i++) {
- if (((1 << i) & obd->u.cli.cl_supp_cksum_types) == 0)
- continue;
- if (!strcmp(kernbuf, cksum_name[i])) {
- obd->u.cli.cl_cksum_type = 1 << i;
- return count;
- }
- }
- return -EINVAL;
-}
-LPROC_SEQ_FOPS(osc_checksum_type);
-
-static ssize_t resend_count_show(struct kobject *kobj,
- struct attribute *attr,
- char *buf)
-{
- struct obd_device *obd = container_of(kobj, struct obd_device,
- obd_kobj);
-
- return sprintf(buf, "%u\n", atomic_read(&obd->u.cli.cl_resends));
-}
-
-static ssize_t resend_count_store(struct kobject *kobj,
- struct attribute *attr,
- const char *buffer,
- size_t count)
-{
- struct obd_device *obd = container_of(kobj, struct obd_device,
- obd_kobj);
- int rc;
- unsigned long val;
-
- rc = kstrtoul(buffer, 10, &val);
- if (rc)
- return rc;
-
- atomic_set(&obd->u.cli.cl_resends, val);
-
- return count;
-}
-LUSTRE_RW_ATTR(resend_count);
-
-static ssize_t contention_seconds_show(struct kobject *kobj,
- struct attribute *attr,
- char *buf)
-{
- struct obd_device *obd = container_of(kobj, struct obd_device,
- obd_kobj);
- struct osc_device *od = obd2osc_dev(obd);
-
- return sprintf(buf, "%u\n", od->od_contention_time);
-}
-
-static ssize_t contention_seconds_store(struct kobject *kobj,
- struct attribute *attr,
- const char *buffer,
- size_t count)
-{
- struct obd_device *obd = container_of(kobj, struct obd_device,
- obd_kobj);
- struct osc_device *od = obd2osc_dev(obd);
-
- return lprocfs_write_helper(buffer, count, &od->od_contention_time) ?:
- count;
-}
-LUSTRE_RW_ATTR(contention_seconds);
-
-static ssize_t lockless_truncate_show(struct kobject *kobj,
- struct attribute *attr,
- char *buf)
-{
- struct obd_device *obd = container_of(kobj, struct obd_device,
- obd_kobj);
- struct osc_device *od = obd2osc_dev(obd);
-
- return sprintf(buf, "%u\n", od->od_lockless_truncate);
-}
-
-static ssize_t lockless_truncate_store(struct kobject *kobj,
- struct attribute *attr,
- const char *buffer,
- size_t count)
-{
- struct obd_device *obd = container_of(kobj, struct obd_device,
- obd_kobj);
- struct osc_device *od = obd2osc_dev(obd);
-
- return lprocfs_write_helper(buffer, count, &od->od_lockless_truncate) ?:
- count;
-}
-LUSTRE_RW_ATTR(lockless_truncate);
-
-static ssize_t destroys_in_flight_show(struct kobject *kobj,
- struct attribute *attr,
- char *buf)
-{
- struct obd_device *obd = container_of(kobj, struct obd_device,
- obd_kobj);
-
- return sprintf(buf, "%u\n",
- atomic_read(&obd->u.cli.cl_destroy_in_flight));
-}
-LUSTRE_RO_ATTR(destroys_in_flight);
-
-static ssize_t max_pages_per_rpc_show(struct kobject *kobj,
- struct attribute *attr,
- char *buf)
-{
- struct obd_device *dev = container_of(kobj, struct obd_device,
- obd_kobj);
- struct client_obd *cli = &dev->u.cli;
-
- return sprintf(buf, "%d\n", cli->cl_max_pages_per_rpc);
-}
-
-static ssize_t max_pages_per_rpc_store(struct kobject *kobj,
- struct attribute *attr,
- const char *buffer,
- size_t count)
-{
- struct obd_device *dev = container_of(kobj, struct obd_device,
- obd_kobj);
- struct client_obd *cli = &dev->u.cli;
- struct obd_connect_data *ocd = &cli->cl_import->imp_connect_data;
- int chunk_mask, rc;
- unsigned long long val;
-
- rc = kstrtoull(buffer, 10, &val);
- if (rc)
- return rc;
-
- /* if the max_pages is specified in bytes, convert to pages */
- if (val >= ONE_MB_BRW_SIZE)
- val >>= PAGE_CACHE_SHIFT;
-
- chunk_mask = ~((1 << (cli->cl_chunkbits - PAGE_CACHE_SHIFT)) - 1);
- /* max_pages_per_rpc must be chunk aligned */
- val = (val + ~chunk_mask) & chunk_mask;
- if (val == 0 || val > ocd->ocd_brw_size >> PAGE_CACHE_SHIFT) {
- return -ERANGE;
- }
- client_obd_list_lock(&cli->cl_loi_list_lock);
- cli->cl_max_pages_per_rpc = val;
- client_obd_list_unlock(&cli->cl_loi_list_lock);
-
- return count;
-}
-LUSTRE_RW_ATTR(max_pages_per_rpc);
-
-LPROC_SEQ_FOPS_RO_TYPE(osc, connect_flags);
-LPROC_SEQ_FOPS_RO_TYPE(osc, server_uuid);
-LPROC_SEQ_FOPS_RO_TYPE(osc, conn_uuid);
-LPROC_SEQ_FOPS_RO_TYPE(osc, timeouts);
-LPROC_SEQ_FOPS_RO_TYPE(osc, state);
-
-LPROC_SEQ_FOPS_WR_ONLY(osc, ping);
-
-LPROC_SEQ_FOPS_RW_TYPE(osc, import);
-LPROC_SEQ_FOPS_RW_TYPE(osc, pinger_recov);
-
-static struct lprocfs_vars lprocfs_osc_obd_vars[] = {
- { "ping", &osc_ping_fops, NULL, 0222 },
- { "connect_flags", &osc_connect_flags_fops, NULL, 0 },
- /*{ "filegroups", lprocfs_rd_filegroups, NULL, 0 },*/
- { "ost_server_uuid", &osc_server_uuid_fops, NULL, 0 },
- { "ost_conn_uuid", &osc_conn_uuid_fops, NULL, 0 },
- { "osc_cached_mb", &osc_cached_mb_fops, NULL },
- { "checksum_type", &osc_checksum_type_fops, NULL },
- { "timeouts", &osc_timeouts_fops, NULL, 0 },
- { "import", &osc_import_fops, NULL },
- { "state", &osc_state_fops, NULL, 0 },
- { "pinger_recov", &osc_pinger_recov_fops, NULL },
- { NULL }
-};
-
-#define pct(a, b) (b ? a * 100 / b : 0)
-
-static int osc_rpc_stats_seq_show(struct seq_file *seq, void *v)
-{
- struct timespec64 now;
- struct obd_device *dev = seq->private;
- struct client_obd *cli = &dev->u.cli;
- unsigned long read_tot = 0, write_tot = 0, read_cum, write_cum;
- int i;
-
- ktime_get_real_ts64(&now);
-
- client_obd_list_lock(&cli->cl_loi_list_lock);
-
- seq_printf(seq, "snapshot_time: %llu.%9lu (secs.usecs)\n",
- (s64)now.tv_sec, (unsigned long)now.tv_nsec);
- seq_printf(seq, "read RPCs in flight: %d\n",
- cli->cl_r_in_flight);
- seq_printf(seq, "write RPCs in flight: %d\n",
- cli->cl_w_in_flight);
- seq_printf(seq, "pending write pages: %d\n",
- atomic_read(&cli->cl_pending_w_pages));
- seq_printf(seq, "pending read pages: %d\n",
- atomic_read(&cli->cl_pending_r_pages));
-
- seq_puts(seq, "\n\t\t\tread\t\t\twrite\n");
- seq_puts(seq, "pages per rpc rpcs % cum % |");
- seq_puts(seq, " rpcs % cum %\n");
-
- read_tot = lprocfs_oh_sum(&cli->cl_read_page_hist);
- write_tot = lprocfs_oh_sum(&cli->cl_write_page_hist);
-
- read_cum = 0;
- write_cum = 0;
- for (i = 0; i < OBD_HIST_MAX; i++) {
- unsigned long r = cli->cl_read_page_hist.oh_buckets[i];
- unsigned long w = cli->cl_write_page_hist.oh_buckets[i];
- read_cum += r;
- write_cum += w;
- seq_printf(seq, "%d:\t\t%10lu %3lu %3lu | %10lu %3lu %3lu\n",
- 1 << i, r, pct(r, read_tot),
- pct(read_cum, read_tot), w,
- pct(w, write_tot),
- pct(write_cum, write_tot));
- if (read_cum == read_tot && write_cum == write_tot)
- break;
- }
-
- seq_puts(seq, "\n\t\t\tread\t\t\twrite\n");
- seq_puts(seq, "rpcs in flight rpcs % cum % |");
- seq_puts(seq, " rpcs % cum %\n");
-
- read_tot = lprocfs_oh_sum(&cli->cl_read_rpc_hist);
- write_tot = lprocfs_oh_sum(&cli->cl_write_rpc_hist);
-
- read_cum = 0;
- write_cum = 0;
- for (i = 0; i < OBD_HIST_MAX; i++) {
- unsigned long r = cli->cl_read_rpc_hist.oh_buckets[i];
- unsigned long w = cli->cl_write_rpc_hist.oh_buckets[i];
- read_cum += r;
- write_cum += w;
- seq_printf(seq, "%d:\t\t%10lu %3lu %3lu | %10lu %3lu %3lu\n",
- i, r, pct(r, read_tot),
- pct(read_cum, read_tot), w,
- pct(w, write_tot),
- pct(write_cum, write_tot));
- if (read_cum == read_tot && write_cum == write_tot)
- break;
- }
-
- seq_puts(seq, "\n\t\t\tread\t\t\twrite\n");
- seq_puts(seq, "offset rpcs % cum % |");
- seq_puts(seq, " rpcs % cum %\n");
-
- read_tot = lprocfs_oh_sum(&cli->cl_read_offset_hist);
- write_tot = lprocfs_oh_sum(&cli->cl_write_offset_hist);
-
- read_cum = 0;
- write_cum = 0;
- for (i = 0; i < OBD_HIST_MAX; i++) {
- unsigned long r = cli->cl_read_offset_hist.oh_buckets[i];
- unsigned long w = cli->cl_write_offset_hist.oh_buckets[i];
- read_cum += r;
- write_cum += w;
- seq_printf(seq, "%d:\t\t%10lu %3lu %3lu | %10lu %3lu %3lu\n",
- (i == 0) ? 0 : 1 << (i - 1),
- r, pct(r, read_tot), pct(read_cum, read_tot),
- w, pct(w, write_tot), pct(write_cum, write_tot));
- if (read_cum == read_tot && write_cum == write_tot)
- break;
- }
-
- client_obd_list_unlock(&cli->cl_loi_list_lock);
-
- return 0;
-}
-#undef pct
-
-static ssize_t osc_rpc_stats_seq_write(struct file *file,
- const char __user *buf,
- size_t len, loff_t *off)
-{
- struct seq_file *seq = file->private_data;
- struct obd_device *dev = seq->private;
- struct client_obd *cli = &dev->u.cli;
-
- lprocfs_oh_clear(&cli->cl_read_rpc_hist);
- lprocfs_oh_clear(&cli->cl_write_rpc_hist);
- lprocfs_oh_clear(&cli->cl_read_page_hist);
- lprocfs_oh_clear(&cli->cl_write_page_hist);
- lprocfs_oh_clear(&cli->cl_read_offset_hist);
- lprocfs_oh_clear(&cli->cl_write_offset_hist);
-
- return len;
-}
-
-LPROC_SEQ_FOPS(osc_rpc_stats);
-
-static int osc_stats_seq_show(struct seq_file *seq, void *v)
-{
- struct timespec64 now;
- struct obd_device *dev = seq->private;
- struct osc_stats *stats = &obd2osc_dev(dev)->od_stats;
-
- ktime_get_real_ts64(&now);
-
- seq_printf(seq, "snapshot_time: %llu.%9lu (secs.usecs)\n",
- (s64)now.tv_sec, (unsigned long)now.tv_nsec);
- seq_printf(seq, "lockless_write_bytes\t\t%llu\n",
- stats->os_lockless_writes);
- seq_printf(seq, "lockless_read_bytes\t\t%llu\n",
- stats->os_lockless_reads);
- seq_printf(seq, "lockless_truncate\t\t%llu\n",
- stats->os_lockless_truncates);
- return 0;
-}
-
-static ssize_t osc_stats_seq_write(struct file *file,
- const char __user *buf,
- size_t len, loff_t *off)
-{
- struct seq_file *seq = file->private_data;
- struct obd_device *dev = seq->private;
- struct osc_stats *stats = &obd2osc_dev(dev)->od_stats;
-
- memset(stats, 0, sizeof(*stats));
- return len;
-}
-
-LPROC_SEQ_FOPS(osc_stats);
-
-int lproc_osc_attach_seqstat(struct obd_device *dev)
-{
- int rc;
-
- rc = ldebugfs_seq_create(dev->obd_debugfs_entry, "osc_stats", 0644,
- &osc_stats_fops, dev);
- if (rc == 0)
- rc = ldebugfs_obd_seq_create(dev, "rpc_stats", 0644,
- &osc_rpc_stats_fops, dev);
-
- return rc;
-}
-
-static struct attribute *osc_attrs[] = {
- &lustre_attr_active.attr,
- &lustre_attr_checksums.attr,
- &lustre_attr_contention_seconds.attr,
- &lustre_attr_cur_dirty_bytes.attr,
- &lustre_attr_cur_grant_bytes.attr,
- &lustre_attr_cur_lost_grant_bytes.attr,
- &lustre_attr_destroys_in_flight.attr,
- &lustre_attr_grant_shrink_interval.attr,
- &lustre_attr_lockless_truncate.attr,
- &lustre_attr_max_dirty_mb.attr,
- &lustre_attr_max_pages_per_rpc.attr,
- &lustre_attr_max_rpcs_in_flight.attr,
- &lustre_attr_resend_count.attr,
- NULL,
-};
-
-static struct attribute_group osc_attr_group = {
- .attrs = osc_attrs,
-};
-
-void lprocfs_osc_init_vars(struct lprocfs_static_vars *lvars)
-{
- lvars->sysfs_vars = &osc_attr_group;
- lvars->obd_vars = lprocfs_osc_obd_vars;
-}
diff --git a/drivers/staging/lustre/lustre/osc/osc_cache.c b/drivers/staging/lustre/lustre/osc/osc_cache.c
deleted file mode 100644
index 62da06157665..000000000000
--- a/drivers/staging/lustre/lustre/osc/osc_cache.c
+++ /dev/null
@@ -1,2936 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2012, Intel Corporation.
- *
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * osc cache management.
- *
- * Author: Jinshan Xiong <jinshan.xiong@whamcloud.com>
- */
-
-#define DEBUG_SUBSYSTEM S_OSC
-
-#include "osc_cl_internal.h"
-#include "osc_internal.h"
-
-static int extent_debug; /* set it to be true for more debug */
-
-static void osc_update_pending(struct osc_object *obj, int cmd, int delta);
-static int osc_extent_wait(const struct lu_env *env, struct osc_extent *ext,
- int state);
-static void osc_ap_completion(const struct lu_env *env, struct client_obd *cli,
- struct osc_async_page *oap, int sent, int rc);
-static int osc_make_ready(const struct lu_env *env, struct osc_async_page *oap,
- int cmd);
-static int osc_refresh_count(const struct lu_env *env,
- struct osc_async_page *oap, int cmd);
-static int osc_io_unplug_async(const struct lu_env *env,
- struct client_obd *cli, struct osc_object *osc);
-static void osc_free_grant(struct client_obd *cli, unsigned int nr_pages,
- unsigned int lost_grant);
-
-static void osc_extent_tree_dump0(int level, struct osc_object *obj,
- const char *func, int line);
-#define osc_extent_tree_dump(lvl, obj) \
- osc_extent_tree_dump0(lvl, obj, __func__, __LINE__)
-
-/** \addtogroup osc
- * @{
- */
-
-/* ------------------ osc extent ------------------ */
-static inline char *ext_flags(struct osc_extent *ext, char *flags)
-{
- char *buf = flags;
- *buf++ = ext->oe_rw ? 'r' : 'w';
- if (ext->oe_intree)
- *buf++ = 'i';
- if (ext->oe_srvlock)
- *buf++ = 's';
- if (ext->oe_hp)
- *buf++ = 'h';
- if (ext->oe_urgent)
- *buf++ = 'u';
- if (ext->oe_memalloc)
- *buf++ = 'm';
- if (ext->oe_trunc_pending)
- *buf++ = 't';
- if (ext->oe_fsync_wait)
- *buf++ = 'Y';
- *buf = 0;
- return flags;
-}
-
-static inline char list_empty_marker(struct list_head *list)
-{
- return list_empty(list) ? '-' : '+';
-}
-
-#define EXTSTR "[%lu -> %lu/%lu]"
-#define EXTPARA(ext) (ext)->oe_start, (ext)->oe_end, (ext)->oe_max_end
-static const char *oes_strings[] = {
- "inv", "active", "cache", "locking", "lockdone", "rpc", "trunc", NULL };
-
-#define OSC_EXTENT_DUMP(lvl, extent, fmt, ...) do { \
- struct osc_extent *__ext = (extent); \
- char __buf[16]; \
- \
- CDEBUG(lvl, \
- "extent %p@{" EXTSTR ", " \
- "[%d|%d|%c|%s|%s|%p], [%d|%d|%c|%c|%p|%u|%p]} " fmt, \
- /* ----- extent part 0 ----- */ \
- __ext, EXTPARA(__ext), \
- /* ----- part 1 ----- */ \
- atomic_read(&__ext->oe_refc), \
- atomic_read(&__ext->oe_users), \
- list_empty_marker(&__ext->oe_link), \
- oes_strings[__ext->oe_state], ext_flags(__ext, __buf), \
- __ext->oe_obj, \
- /* ----- part 2 ----- */ \
- __ext->oe_grants, __ext->oe_nr_pages, \
- list_empty_marker(&__ext->oe_pages), \
- waitqueue_active(&__ext->oe_waitq) ? '+' : '-', \
- __ext->oe_osclock, __ext->oe_mppr, __ext->oe_owner, \
- /* ----- part 4 ----- */ \
- ## __VA_ARGS__); \
-} while (0)
-
-#undef EASSERTF
-#define EASSERTF(expr, ext, fmt, args...) do { \
- if (!(expr)) { \
- OSC_EXTENT_DUMP(D_ERROR, (ext), fmt, ##args); \
- osc_extent_tree_dump(D_ERROR, (ext)->oe_obj); \
- LASSERT(expr); \
- } \
-} while (0)
-
-#undef EASSERT
-#define EASSERT(expr, ext) EASSERTF(expr, ext, "\n")
-
-static inline struct osc_extent *rb_extent(struct rb_node *n)
-{
- if (n == NULL)
- return NULL;
-
- return container_of(n, struct osc_extent, oe_node);
-}
-
-static inline struct osc_extent *next_extent(struct osc_extent *ext)
-{
- if (ext == NULL)
- return NULL;
-
- LASSERT(ext->oe_intree);
- return rb_extent(rb_next(&ext->oe_node));
-}
-
-static inline struct osc_extent *prev_extent(struct osc_extent *ext)
-{
- if (ext == NULL)
- return NULL;
-
- LASSERT(ext->oe_intree);
- return rb_extent(rb_prev(&ext->oe_node));
-}
-
-static inline struct osc_extent *first_extent(struct osc_object *obj)
-{
- return rb_extent(rb_first(&obj->oo_root));
-}
-
-/* object must be locked by caller. */
-static int osc_extent_sanity_check0(struct osc_extent *ext,
- const char *func, const int line)
-{
- struct osc_object *obj = ext->oe_obj;
- struct osc_async_page *oap;
- int page_count;
- int rc = 0;
-
- if (!osc_object_is_locked(obj)) {
- rc = 9;
- goto out;
- }
-
- if (ext->oe_state >= OES_STATE_MAX) {
- rc = 10;
- goto out;
- }
-
- if (atomic_read(&ext->oe_refc) <= 0) {
- rc = 20;
- goto out;
- }
-
- if (atomic_read(&ext->oe_refc) < atomic_read(&ext->oe_users)) {
- rc = 30;
- goto out;
- }
-
- switch (ext->oe_state) {
- case OES_INV:
- if (ext->oe_nr_pages > 0 || !list_empty(&ext->oe_pages))
- rc = 35;
- else
- rc = 0;
- goto out;
- case OES_ACTIVE:
- if (atomic_read(&ext->oe_users) == 0) {
- rc = 40;
- goto out;
- }
- if (ext->oe_hp) {
- rc = 50;
- goto out;
- }
- if (ext->oe_fsync_wait && !ext->oe_urgent) {
- rc = 55;
- goto out;
- }
- break;
- case OES_CACHE:
- if (ext->oe_grants == 0) {
- rc = 60;
- goto out;
- }
- if (ext->oe_fsync_wait && !ext->oe_urgent && !ext->oe_hp) {
- rc = 65;
- goto out;
- }
- default:
- if (atomic_read(&ext->oe_users) > 0) {
- rc = 70;
- goto out;
- }
- }
-
- if (ext->oe_max_end < ext->oe_end || ext->oe_end < ext->oe_start) {
- rc = 80;
- goto out;
- }
-
- if (ext->oe_osclock == NULL && ext->oe_grants > 0) {
- rc = 90;
- goto out;
- }
-
- if (ext->oe_osclock) {
- struct cl_lock_descr *descr;
- descr = &ext->oe_osclock->cll_descr;
- if (!(descr->cld_start <= ext->oe_start &&
- descr->cld_end >= ext->oe_max_end)) {
- rc = 100;
- goto out;
- }
- }
-
- if (ext->oe_nr_pages > ext->oe_mppr) {
- rc = 105;
- goto out;
- }
-
- /* Do not verify page list if extent is in RPC. This is because an
- * in-RPC extent is supposed to be exclusively accessible w/o lock. */
- if (ext->oe_state > OES_CACHE) {
- rc = 0;
- goto out;
- }
-
- if (!extent_debug) {
- rc = 0;
- goto out;
- }
-
- page_count = 0;
- list_for_each_entry(oap, &ext->oe_pages, oap_pending_item) {
- pgoff_t index = oap2cl_page(oap)->cp_index;
- ++page_count;
- if (index > ext->oe_end || index < ext->oe_start) {
- rc = 110;
- goto out;
- }
- }
- if (page_count != ext->oe_nr_pages) {
- rc = 120;
- goto out;
- }
-
-out:
- if (rc != 0)
- OSC_EXTENT_DUMP(D_ERROR, ext,
- "%s:%d sanity check %p failed with rc = %d\n",
- func, line, ext, rc);
- return rc;
-}
-
-#define sanity_check_nolock(ext) \
- osc_extent_sanity_check0(ext, __func__, __LINE__)
-
-#define sanity_check(ext) ({ \
- int __res; \
- osc_object_lock((ext)->oe_obj); \
- __res = sanity_check_nolock(ext); \
- osc_object_unlock((ext)->oe_obj); \
- __res; \
-})
-
-
-/**
- * sanity check - to make sure there is no overlapped extent in the tree.
- */
-static int osc_extent_is_overlapped(struct osc_object *obj,
- struct osc_extent *ext)
-{
- struct osc_extent *tmp;
-
- LASSERT(osc_object_is_locked(obj));
-
- if (!extent_debug)
- return 0;
-
- for (tmp = first_extent(obj); tmp != NULL; tmp = next_extent(tmp)) {
- if (tmp == ext)
- continue;
- if (tmp->oe_end >= ext->oe_start &&
- tmp->oe_start <= ext->oe_end)
- return 1;
- }
- return 0;
-}
-
-static void osc_extent_state_set(struct osc_extent *ext, int state)
-{
- LASSERT(osc_object_is_locked(ext->oe_obj));
- LASSERT(state >= OES_INV && state < OES_STATE_MAX);
-
- /* Never try to sanity check a state changing extent :-) */
- /* LASSERT(sanity_check_nolock(ext) == 0); */
-
- /* TODO: validate the state machine */
- ext->oe_state = state;
- wake_up_all(&ext->oe_waitq);
-}
-
-static struct osc_extent *osc_extent_alloc(struct osc_object *obj)
-{
- struct osc_extent *ext;
-
- OBD_SLAB_ALLOC_PTR_GFP(ext, osc_extent_kmem, GFP_IOFS);
- if (ext == NULL)
- return NULL;
-
- RB_CLEAR_NODE(&ext->oe_node);
- ext->oe_obj = obj;
- atomic_set(&ext->oe_refc, 1);
- atomic_set(&ext->oe_users, 0);
- INIT_LIST_HEAD(&ext->oe_link);
- ext->oe_state = OES_INV;
- INIT_LIST_HEAD(&ext->oe_pages);
- init_waitqueue_head(&ext->oe_waitq);
- ext->oe_osclock = NULL;
-
- return ext;
-}
-
-static void osc_extent_free(struct osc_extent *ext)
-{
- OBD_SLAB_FREE_PTR(ext, osc_extent_kmem);
-}
-
-static struct osc_extent *osc_extent_get(struct osc_extent *ext)
-{
- LASSERT(atomic_read(&ext->oe_refc) >= 0);
- atomic_inc(&ext->oe_refc);
- return ext;
-}
-
-static void osc_extent_put(const struct lu_env *env, struct osc_extent *ext)
-{
- LASSERT(atomic_read(&ext->oe_refc) > 0);
- if (atomic_dec_and_test(&ext->oe_refc)) {
- LASSERT(list_empty(&ext->oe_link));
- LASSERT(atomic_read(&ext->oe_users) == 0);
- LASSERT(ext->oe_state == OES_INV);
- LASSERT(!ext->oe_intree);
-
- if (ext->oe_osclock) {
- cl_lock_put(env, ext->oe_osclock);
- ext->oe_osclock = NULL;
- }
- osc_extent_free(ext);
- }
-}
-
-/**
- * osc_extent_put_trust() is a special version of osc_extent_put() when
- * it's known that the caller is not the last user. This is to address the
- * problem of lacking of lu_env ;-).
- */
-static void osc_extent_put_trust(struct osc_extent *ext)
-{
- LASSERT(atomic_read(&ext->oe_refc) > 1);
- LASSERT(osc_object_is_locked(ext->oe_obj));
- atomic_dec(&ext->oe_refc);
-}
-
-/**
- * Return the extent which includes pgoff @index, or return the greatest
- * previous extent in the tree.
- */
-static struct osc_extent *osc_extent_search(struct osc_object *obj,
- pgoff_t index)
-{
- struct rb_node *n = obj->oo_root.rb_node;
- struct osc_extent *tmp, *p = NULL;
-
- LASSERT(osc_object_is_locked(obj));
- while (n != NULL) {
- tmp = rb_extent(n);
- if (index < tmp->oe_start) {
- n = n->rb_left;
- } else if (index > tmp->oe_end) {
- p = rb_extent(n);
- n = n->rb_right;
- } else {
- return tmp;
- }
- }
- return p;
-}
-
-/*
- * Return the extent covering @index, otherwise return NULL.
- * caller must have held object lock.
- */
-static struct osc_extent *osc_extent_lookup(struct osc_object *obj,
- pgoff_t index)
-{
- struct osc_extent *ext;
-
- ext = osc_extent_search(obj, index);
- if (ext != NULL && ext->oe_start <= index && index <= ext->oe_end)
- return osc_extent_get(ext);
- return NULL;
-}
-
-/* caller must have held object lock. */
-static void osc_extent_insert(struct osc_object *obj, struct osc_extent *ext)
-{
- struct rb_node **n = &obj->oo_root.rb_node;
- struct rb_node *parent = NULL;
- struct osc_extent *tmp;
-
- LASSERT(ext->oe_intree == 0);
- LASSERT(ext->oe_obj == obj);
- LASSERT(osc_object_is_locked(obj));
- while (*n != NULL) {
- tmp = rb_extent(*n);
- parent = *n;
-
- if (ext->oe_end < tmp->oe_start)
- n = &(*n)->rb_left;
- else if (ext->oe_start > tmp->oe_end)
- n = &(*n)->rb_right;
- else
- EASSERTF(0, tmp, EXTSTR, EXTPARA(ext));
- }
- rb_link_node(&ext->oe_node, parent, n);
- rb_insert_color(&ext->oe_node, &obj->oo_root);
- osc_extent_get(ext);
- ext->oe_intree = 1;
-}
-
-/* caller must have held object lock. */
-static void osc_extent_erase(struct osc_extent *ext)
-{
- struct osc_object *obj = ext->oe_obj;
- LASSERT(osc_object_is_locked(obj));
- if (ext->oe_intree) {
- rb_erase(&ext->oe_node, &obj->oo_root);
- ext->oe_intree = 0;
- /* rbtree held a refcount */
- osc_extent_put_trust(ext);
- }
-}
-
-static struct osc_extent *osc_extent_hold(struct osc_extent *ext)
-{
- struct osc_object *obj = ext->oe_obj;
-
- LASSERT(osc_object_is_locked(obj));
- LASSERT(ext->oe_state == OES_ACTIVE || ext->oe_state == OES_CACHE);
- if (ext->oe_state == OES_CACHE) {
- osc_extent_state_set(ext, OES_ACTIVE);
- osc_update_pending(obj, OBD_BRW_WRITE, -ext->oe_nr_pages);
- }
- atomic_inc(&ext->oe_users);
- list_del_init(&ext->oe_link);
- return osc_extent_get(ext);
-}
-
-static void __osc_extent_remove(struct osc_extent *ext)
-{
- LASSERT(osc_object_is_locked(ext->oe_obj));
- LASSERT(list_empty(&ext->oe_pages));
- osc_extent_erase(ext);
- list_del_init(&ext->oe_link);
- osc_extent_state_set(ext, OES_INV);
- OSC_EXTENT_DUMP(D_CACHE, ext, "destroyed.\n");
-}
-
-static void osc_extent_remove(struct osc_extent *ext)
-{
- struct osc_object *obj = ext->oe_obj;
-
- osc_object_lock(obj);
- __osc_extent_remove(ext);
- osc_object_unlock(obj);
-}
-
-/**
- * This function is used to merge extents to get better performance. It checks
- * if @cur and @victim are contiguous at chunk level.
- */
-static int osc_extent_merge(const struct lu_env *env, struct osc_extent *cur,
- struct osc_extent *victim)
-{
- struct osc_object *obj = cur->oe_obj;
- pgoff_t chunk_start;
- pgoff_t chunk_end;
- int ppc_bits;
-
- LASSERT(cur->oe_state == OES_CACHE);
- LASSERT(osc_object_is_locked(obj));
- if (victim == NULL)
- return -EINVAL;
-
- if (victim->oe_state != OES_CACHE || victim->oe_fsync_wait)
- return -EBUSY;
-
- if (cur->oe_max_end != victim->oe_max_end)
- return -ERANGE;
-
- LASSERT(cur->oe_osclock == victim->oe_osclock);
- ppc_bits = osc_cli(obj)->cl_chunkbits - PAGE_CACHE_SHIFT;
- chunk_start = cur->oe_start >> ppc_bits;
- chunk_end = cur->oe_end >> ppc_bits;
- if (chunk_start != (victim->oe_end >> ppc_bits) + 1 &&
- chunk_end + 1 != victim->oe_start >> ppc_bits)
- return -ERANGE;
-
- OSC_EXTENT_DUMP(D_CACHE, victim, "will be merged by %p.\n", cur);
-
- cur->oe_start = min(cur->oe_start, victim->oe_start);
- cur->oe_end = max(cur->oe_end, victim->oe_end);
- cur->oe_grants += victim->oe_grants;
- cur->oe_nr_pages += victim->oe_nr_pages;
- /* only the following bits are needed to merge */
- cur->oe_urgent |= victim->oe_urgent;
- cur->oe_memalloc |= victim->oe_memalloc;
- list_splice_init(&victim->oe_pages, &cur->oe_pages);
- list_del_init(&victim->oe_link);
- victim->oe_nr_pages = 0;
-
- osc_extent_get(victim);
- __osc_extent_remove(victim);
- osc_extent_put(env, victim);
-
- OSC_EXTENT_DUMP(D_CACHE, cur, "after merging %p.\n", victim);
- return 0;
-}
-
-/**
- * Drop user count of osc_extent, and unplug IO asynchronously.
- */
-void osc_extent_release(const struct lu_env *env, struct osc_extent *ext)
-{
- struct osc_object *obj = ext->oe_obj;
-
- LASSERT(atomic_read(&ext->oe_users) > 0);
- LASSERT(sanity_check(ext) == 0);
- LASSERT(ext->oe_grants > 0);
-
- if (atomic_dec_and_lock(&ext->oe_users, &obj->oo_lock)) {
- LASSERT(ext->oe_state == OES_ACTIVE);
- if (ext->oe_trunc_pending) {
- /* a truncate process is waiting for this extent.
- * This may happen due to a race, check
- * osc_cache_truncate_start(). */
- osc_extent_state_set(ext, OES_TRUNC);
- ext->oe_trunc_pending = 0;
- } else {
- osc_extent_state_set(ext, OES_CACHE);
- osc_update_pending(obj, OBD_BRW_WRITE,
- ext->oe_nr_pages);
-
- /* try to merge the previous and next extent. */
- osc_extent_merge(env, ext, prev_extent(ext));
- osc_extent_merge(env, ext, next_extent(ext));
-
- if (ext->oe_urgent)
- list_move_tail(&ext->oe_link,
- &obj->oo_urgent_exts);
- }
- osc_object_unlock(obj);
-
- osc_io_unplug_async(env, osc_cli(obj), obj);
- }
- osc_extent_put(env, ext);
-}
-
-static inline int overlapped(struct osc_extent *ex1, struct osc_extent *ex2)
-{
- return !(ex1->oe_end < ex2->oe_start || ex2->oe_end < ex1->oe_start);
-}
-
-/**
- * Find or create an extent which includes @index, core function to manage
- * extent tree.
- */
-struct osc_extent *osc_extent_find(const struct lu_env *env,
- struct osc_object *obj, pgoff_t index,
- int *grants)
-
-{
- struct client_obd *cli = osc_cli(obj);
- struct cl_lock *lock;
- struct osc_extent *cur;
- struct osc_extent *ext;
- struct osc_extent *conflict = NULL;
- struct osc_extent *found = NULL;
- pgoff_t chunk;
- pgoff_t max_end;
- int max_pages; /* max_pages_per_rpc */
- int chunksize;
- int ppc_bits; /* pages per chunk bits */
- int chunk_mask;
- int rc;
-
- cur = osc_extent_alloc(obj);
- if (cur == NULL)
- return ERR_PTR(-ENOMEM);
-
- lock = cl_lock_at_pgoff(env, osc2cl(obj), index, NULL, 1, 0);
- LASSERT(lock != NULL);
- LASSERT(lock->cll_descr.cld_mode >= CLM_WRITE);
-
- LASSERT(cli->cl_chunkbits >= PAGE_CACHE_SHIFT);
- ppc_bits = cli->cl_chunkbits - PAGE_CACHE_SHIFT;
- chunk_mask = ~((1 << ppc_bits) - 1);
- chunksize = 1 << cli->cl_chunkbits;
- chunk = index >> ppc_bits;
-
- /* align end to rpc edge, rpc size may not be a power 2 integer. */
- max_pages = cli->cl_max_pages_per_rpc;
- LASSERT((max_pages & ~chunk_mask) == 0);
- max_end = index - (index % max_pages) + max_pages - 1;
- max_end = min_t(pgoff_t, max_end, lock->cll_descr.cld_end);
-
- /* initialize new extent by parameters so far */
- cur->oe_max_end = max_end;
- cur->oe_start = index & chunk_mask;
- cur->oe_end = ((index + ~chunk_mask + 1) & chunk_mask) - 1;
- if (cur->oe_start < lock->cll_descr.cld_start)
- cur->oe_start = lock->cll_descr.cld_start;
- if (cur->oe_end > max_end)
- cur->oe_end = max_end;
- cur->oe_osclock = lock;
- cur->oe_grants = 0;
- cur->oe_mppr = max_pages;
-
- /* grants has been allocated by caller */
- LASSERTF(*grants >= chunksize + cli->cl_extent_tax,
- "%u/%u/%u.\n", *grants, chunksize, cli->cl_extent_tax);
- LASSERTF((max_end - cur->oe_start) < max_pages, EXTSTR, EXTPARA(cur));
-
-restart:
- osc_object_lock(obj);
- ext = osc_extent_search(obj, cur->oe_start);
- if (ext == NULL)
- ext = first_extent(obj);
- while (ext != NULL) {
- loff_t ext_chk_start = ext->oe_start >> ppc_bits;
- loff_t ext_chk_end = ext->oe_end >> ppc_bits;
-
- LASSERT(sanity_check_nolock(ext) == 0);
- if (chunk > ext_chk_end + 1)
- break;
-
- /* if covering by different locks, no chance to match */
- if (lock != ext->oe_osclock) {
- EASSERTF(!overlapped(ext, cur), ext,
- EXTSTR, EXTPARA(cur));
-
- ext = next_extent(ext);
- continue;
- }
-
- /* discontiguous chunks? */
- if (chunk + 1 < ext_chk_start) {
- ext = next_extent(ext);
- continue;
- }
-
- /* ok, from now on, ext and cur have these attrs:
- * 1. covered by the same lock
- * 2. contiguous at chunk level or overlapping. */
-
- if (overlapped(ext, cur)) {
- /* cur is the minimum unit, so overlapping means
- * full contain. */
- EASSERTF((ext->oe_start <= cur->oe_start &&
- ext->oe_end >= cur->oe_end),
- ext, EXTSTR, EXTPARA(cur));
-
- if (ext->oe_state > OES_CACHE || ext->oe_fsync_wait) {
- /* for simplicity, we wait for this extent to
- * finish before going forward. */
- conflict = osc_extent_get(ext);
- break;
- }
-
- found = osc_extent_hold(ext);
- break;
- }
-
- /* non-overlapped extent */
- if (ext->oe_state != OES_CACHE || ext->oe_fsync_wait) {
- /* we can't do anything for a non OES_CACHE extent, or
- * if there is someone waiting for this extent to be
- * flushed, try next one. */
- ext = next_extent(ext);
- continue;
- }
-
- /* check if they belong to the same rpc slot before trying to
- * merge. the extents are not overlapped and contiguous at
- * chunk level to get here. */
- if (ext->oe_max_end != max_end) {
- /* if they don't belong to the same RPC slot or
- * max_pages_per_rpc has ever changed, do not merge. */
- ext = next_extent(ext);
- continue;
- }
-
- /* it's required that an extent must be contiguous at chunk
- * level so that we know the whole extent is covered by grant
- * (the pages in the extent are NOT required to be contiguous).
- * Otherwise, it will be too much difficult to know which
- * chunks have grants allocated. */
-
- /* try to do front merge - extend ext's start */
- if (chunk + 1 == ext_chk_start) {
- /* ext must be chunk size aligned */
- EASSERT((ext->oe_start & ~chunk_mask) == 0, ext);
-
- /* pull ext's start back to cover cur */
- ext->oe_start = cur->oe_start;
- ext->oe_grants += chunksize;
- *grants -= chunksize;
-
- found = osc_extent_hold(ext);
- } else if (chunk == ext_chk_end + 1) {
- /* rear merge */
- ext->oe_end = cur->oe_end;
- ext->oe_grants += chunksize;
- *grants -= chunksize;
-
- /* try to merge with the next one because we just fill
- * in a gap */
- if (osc_extent_merge(env, ext, next_extent(ext)) == 0)
- /* we can save extent tax from next extent */
- *grants += cli->cl_extent_tax;
-
- found = osc_extent_hold(ext);
- }
- if (found != NULL)
- break;
-
- ext = next_extent(ext);
- }
-
- osc_extent_tree_dump(D_CACHE, obj);
- if (found != NULL) {
- LASSERT(conflict == NULL);
- if (!IS_ERR(found)) {
- LASSERT(found->oe_osclock == cur->oe_osclock);
- OSC_EXTENT_DUMP(D_CACHE, found,
- "found caching ext for %lu.\n", index);
- }
- } else if (conflict == NULL) {
- /* create a new extent */
- EASSERT(osc_extent_is_overlapped(obj, cur) == 0, cur);
- cur->oe_grants = chunksize + cli->cl_extent_tax;
- *grants -= cur->oe_grants;
- LASSERT(*grants >= 0);
-
- cur->oe_state = OES_CACHE;
- found = osc_extent_hold(cur);
- osc_extent_insert(obj, cur);
- OSC_EXTENT_DUMP(D_CACHE, cur, "add into tree %lu/%lu.\n",
- index, lock->cll_descr.cld_end);
- }
- osc_object_unlock(obj);
-
- if (conflict != NULL) {
- LASSERT(found == NULL);
-
- /* waiting for IO to finish. Please notice that it's impossible
- * to be an OES_TRUNC extent. */
- rc = osc_extent_wait(env, conflict, OES_INV);
- osc_extent_put(env, conflict);
- conflict = NULL;
- if (rc < 0) {
- found = ERR_PTR(rc);
- goto out;
- }
-
- goto restart;
- }
-
-out:
- osc_extent_put(env, cur);
- LASSERT(*grants >= 0);
- return found;
-}
-
-/**
- * Called when IO is finished to an extent.
- */
-int osc_extent_finish(const struct lu_env *env, struct osc_extent *ext,
- int sent, int rc)
-{
- struct client_obd *cli = osc_cli(ext->oe_obj);
- struct osc_async_page *oap;
- struct osc_async_page *tmp;
- int nr_pages = ext->oe_nr_pages;
- int lost_grant = 0;
- int blocksize = cli->cl_import->imp_obd->obd_osfs.os_bsize ? : 4096;
- __u64 last_off = 0;
- int last_count = -1;
-
- OSC_EXTENT_DUMP(D_CACHE, ext, "extent finished.\n");
-
- ext->oe_rc = rc ?: ext->oe_nr_pages;
- EASSERT(ergo(rc == 0, ext->oe_state == OES_RPC), ext);
- list_for_each_entry_safe(oap, tmp, &ext->oe_pages,
- oap_pending_item) {
- list_del_init(&oap->oap_rpc_item);
- list_del_init(&oap->oap_pending_item);
- if (last_off <= oap->oap_obj_off) {
- last_off = oap->oap_obj_off;
- last_count = oap->oap_count;
- }
-
- --ext->oe_nr_pages;
- osc_ap_completion(env, cli, oap, sent, rc);
- }
- EASSERT(ext->oe_nr_pages == 0, ext);
-
- if (!sent) {
- lost_grant = ext->oe_grants;
- } else if (blocksize < PAGE_CACHE_SIZE &&
- last_count != PAGE_CACHE_SIZE) {
- /* For short writes we shouldn't count parts of pages that
- * span a whole chunk on the OST side, or our accounting goes
- * wrong. Should match the code in filter_grant_check. */
- int offset = oap->oap_page_off & ~CFS_PAGE_MASK;
- int count = oap->oap_count + (offset & (blocksize - 1));
- int end = (offset + oap->oap_count) & (blocksize - 1);
- if (end)
- count += blocksize - end;
-
- lost_grant = PAGE_CACHE_SIZE - count;
- }
- if (ext->oe_grants > 0)
- osc_free_grant(cli, nr_pages, lost_grant);
-
- osc_extent_remove(ext);
- /* put the refcount for RPC */
- osc_extent_put(env, ext);
- return 0;
-}
-
-static int extent_wait_cb(struct osc_extent *ext, int state)
-{
- int ret;
-
- osc_object_lock(ext->oe_obj);
- ret = ext->oe_state == state;
- osc_object_unlock(ext->oe_obj);
-
- return ret;
-}
-
-/**
- * Wait for the extent's state to become @state.
- */
-static int osc_extent_wait(const struct lu_env *env, struct osc_extent *ext,
- int state)
-{
- struct osc_object *obj = ext->oe_obj;
- struct l_wait_info lwi = LWI_TIMEOUT_INTR(cfs_time_seconds(600), NULL,
- LWI_ON_SIGNAL_NOOP, NULL);
- int rc = 0;
-
- osc_object_lock(obj);
- LASSERT(sanity_check_nolock(ext) == 0);
- /* `Kick' this extent only if the caller is waiting for it to be
- * written out. */
- if (state == OES_INV && !ext->oe_urgent && !ext->oe_hp &&
- !ext->oe_trunc_pending) {
- if (ext->oe_state == OES_ACTIVE) {
- ext->oe_urgent = 1;
- } else if (ext->oe_state == OES_CACHE) {
- ext->oe_urgent = 1;
- osc_extent_hold(ext);
- rc = 1;
- }
- }
- osc_object_unlock(obj);
- if (rc == 1)
- osc_extent_release(env, ext);
-
- /* wait for the extent until its state becomes @state */
- rc = l_wait_event(ext->oe_waitq, extent_wait_cb(ext, state), &lwi);
- if (rc == -ETIMEDOUT) {
- OSC_EXTENT_DUMP(D_ERROR, ext,
- "%s: wait ext to %d timedout, recovery in progress?\n",
- osc_export(obj)->exp_obd->obd_name, state);
-
- lwi = LWI_INTR(LWI_ON_SIGNAL_NOOP, NULL);
- rc = l_wait_event(ext->oe_waitq, extent_wait_cb(ext, state),
- &lwi);
- }
- if (rc == 0 && ext->oe_rc < 0)
- rc = ext->oe_rc;
- return rc;
-}
-
-/**
- * Discard pages with index greater than @size. If @ext is overlapped with
- * @size, then partial truncate happens.
- */
-static int osc_extent_truncate(struct osc_extent *ext, pgoff_t trunc_index,
- bool partial)
-{
- struct cl_env_nest nest;
- struct lu_env *env;
- struct cl_io *io;
- struct osc_object *obj = ext->oe_obj;
- struct client_obd *cli = osc_cli(obj);
- struct osc_async_page *oap;
- struct osc_async_page *tmp;
- int pages_in_chunk = 0;
- int ppc_bits = cli->cl_chunkbits - PAGE_CACHE_SHIFT;
- __u64 trunc_chunk = trunc_index >> ppc_bits;
- int grants = 0;
- int nr_pages = 0;
- int rc = 0;
-
- LASSERT(sanity_check(ext) == 0);
- EASSERT(ext->oe_state == OES_TRUNC, ext);
- EASSERT(!ext->oe_urgent, ext);
-
- /* Request new lu_env.
- * We can't use that env from osc_cache_truncate_start() because
- * it's from lov_io_sub and not fully initialized. */
- env = cl_env_nested_get(&nest);
- io = &osc_env_info(env)->oti_io;
- io->ci_obj = cl_object_top(osc2cl(obj));
- rc = cl_io_init(env, io, CIT_MISC, io->ci_obj);
- if (rc < 0)
- goto out;
-
- /* discard all pages with index greater then trunc_index */
- list_for_each_entry_safe(oap, tmp, &ext->oe_pages,
- oap_pending_item) {
- struct cl_page *sub = oap2cl_page(oap);
- struct cl_page *page = cl_page_top(sub);
-
- LASSERT(list_empty(&oap->oap_rpc_item));
-
- /* only discard the pages with their index greater than
- * trunc_index, and ... */
- if (sub->cp_index < trunc_index ||
- (sub->cp_index == trunc_index && partial)) {
- /* accounting how many pages remaining in the chunk
- * so that we can calculate grants correctly. */
- if (sub->cp_index >> ppc_bits == trunc_chunk)
- ++pages_in_chunk;
- continue;
- }
-
- list_del_init(&oap->oap_pending_item);
-
- cl_page_get(page);
- lu_ref_add(&page->cp_reference, "truncate", current);
-
- if (cl_page_own(env, io, page) == 0) {
- cl_page_unmap(env, io, page);
- cl_page_discard(env, io, page);
- cl_page_disown(env, io, page);
- } else {
- LASSERT(page->cp_state == CPS_FREEING);
- LASSERT(0);
- }
-
- lu_ref_del(&page->cp_reference, "truncate", current);
- cl_page_put(env, page);
-
- --ext->oe_nr_pages;
- ++nr_pages;
- }
- EASSERTF(ergo(ext->oe_start >= trunc_index + !!partial,
- ext->oe_nr_pages == 0),
- ext, "trunc_index %lu, partial %d\n", trunc_index, partial);
-
- osc_object_lock(obj);
- if (ext->oe_nr_pages == 0) {
- LASSERT(pages_in_chunk == 0);
- grants = ext->oe_grants;
- ext->oe_grants = 0;
- } else { /* calculate how many grants we can free */
- int chunks = (ext->oe_end >> ppc_bits) - trunc_chunk;
- pgoff_t last_index;
-
-
- /* if there is no pages in this chunk, we can also free grants
- * for the last chunk */
- if (pages_in_chunk == 0) {
- /* if this is the 1st chunk and no pages in this chunk,
- * ext->oe_nr_pages must be zero, so we should be in
- * the other if-clause. */
- LASSERT(trunc_chunk > 0);
- --trunc_chunk;
- ++chunks;
- }
-
- /* this is what we can free from this extent */
- grants = chunks << cli->cl_chunkbits;
- ext->oe_grants -= grants;
- last_index = ((trunc_chunk + 1) << ppc_bits) - 1;
- ext->oe_end = min(last_index, ext->oe_max_end);
- LASSERT(ext->oe_end >= ext->oe_start);
- LASSERT(ext->oe_grants > 0);
- }
- osc_object_unlock(obj);
-
- if (grants > 0 || nr_pages > 0)
- osc_free_grant(cli, nr_pages, grants);
-
-out:
- cl_io_fini(env, io);
- cl_env_nested_put(&nest, env);
- return rc;
-}
-
-/**
- * This function is used to make the extent prepared for transfer.
- * A race with flushing page - ll_writepage() has to be handled cautiously.
- */
-static int osc_extent_make_ready(const struct lu_env *env,
- struct osc_extent *ext)
-{
- struct osc_async_page *oap;
- struct osc_async_page *last = NULL;
- struct osc_object *obj = ext->oe_obj;
- int page_count = 0;
- int rc;
-
- /* we're going to grab page lock, so object lock must not be taken. */
- LASSERT(sanity_check(ext) == 0);
- /* in locking state, any process should not touch this extent. */
- EASSERT(ext->oe_state == OES_LOCKING, ext);
- EASSERT(ext->oe_owner != NULL, ext);
-
- OSC_EXTENT_DUMP(D_CACHE, ext, "make ready\n");
-
- list_for_each_entry(oap, &ext->oe_pages, oap_pending_item) {
- ++page_count;
- if (last == NULL || last->oap_obj_off < oap->oap_obj_off)
- last = oap;
-
- /* checking ASYNC_READY is race safe */
- if ((oap->oap_async_flags & ASYNC_READY) != 0)
- continue;
-
- rc = osc_make_ready(env, oap, OBD_BRW_WRITE);
- switch (rc) {
- case 0:
- spin_lock(&oap->oap_lock);
- oap->oap_async_flags |= ASYNC_READY;
- spin_unlock(&oap->oap_lock);
- break;
- case -EALREADY:
- LASSERT((oap->oap_async_flags & ASYNC_READY) != 0);
- break;
- default:
- LASSERTF(0, "unknown return code: %d\n", rc);
- }
- }
-
- LASSERT(page_count == ext->oe_nr_pages);
- LASSERT(last != NULL);
- /* the last page is the only one we need to refresh its count by
- * the size of file. */
- if (!(last->oap_async_flags & ASYNC_COUNT_STABLE)) {
- last->oap_count = osc_refresh_count(env, last, OBD_BRW_WRITE);
- LASSERT(last->oap_count > 0);
- LASSERT(last->oap_page_off + last->oap_count <= PAGE_CACHE_SIZE);
- last->oap_async_flags |= ASYNC_COUNT_STABLE;
- }
-
- /* for the rest of pages, we don't need to call osf_refresh_count()
- * because it's known they are not the last page */
- list_for_each_entry(oap, &ext->oe_pages, oap_pending_item) {
- if (!(oap->oap_async_flags & ASYNC_COUNT_STABLE)) {
- oap->oap_count = PAGE_CACHE_SIZE - oap->oap_page_off;
- oap->oap_async_flags |= ASYNC_COUNT_STABLE;
- }
- }
-
- osc_object_lock(obj);
- osc_extent_state_set(ext, OES_RPC);
- osc_object_unlock(obj);
- /* get a refcount for RPC. */
- osc_extent_get(ext);
-
- return 0;
-}
-
-/**
- * Quick and simple version of osc_extent_find(). This function is frequently
- * called to expand the extent for the same IO. To expand the extent, the
- * page index must be in the same or next chunk of ext->oe_end.
- */
-static int osc_extent_expand(struct osc_extent *ext, pgoff_t index, int *grants)
-{
- struct osc_object *obj = ext->oe_obj;
- struct client_obd *cli = osc_cli(obj);
- struct osc_extent *next;
- int ppc_bits = cli->cl_chunkbits - PAGE_CACHE_SHIFT;
- pgoff_t chunk = index >> ppc_bits;
- pgoff_t end_chunk;
- pgoff_t end_index;
- int chunksize = 1 << cli->cl_chunkbits;
- int rc = 0;
-
- LASSERT(ext->oe_max_end >= index && ext->oe_start <= index);
- osc_object_lock(obj);
- LASSERT(sanity_check_nolock(ext) == 0);
- end_chunk = ext->oe_end >> ppc_bits;
- if (chunk > end_chunk + 1) {
- rc = -ERANGE;
- goto out;
- }
-
- if (end_chunk >= chunk) {
- rc = 0;
- goto out;
- }
-
- LASSERT(end_chunk + 1 == chunk);
- /* try to expand this extent to cover @index */
- end_index = min(ext->oe_max_end, ((chunk + 1) << ppc_bits) - 1);
-
- next = next_extent(ext);
- if (next != NULL && next->oe_start <= end_index) {
- /* complex mode - overlapped with the next extent,
- * this case will be handled by osc_extent_find() */
- rc = -EAGAIN;
- goto out;
- }
-
- ext->oe_end = end_index;
- ext->oe_grants += chunksize;
- *grants -= chunksize;
- LASSERT(*grants >= 0);
- EASSERTF(osc_extent_is_overlapped(obj, ext) == 0, ext,
- "overlapped after expanding for %lu.\n", index);
-
-out:
- osc_object_unlock(obj);
- return rc;
-}
-
-static void osc_extent_tree_dump0(int level, struct osc_object *obj,
- const char *func, int line)
-{
- struct osc_extent *ext;
- int cnt;
-
- CDEBUG(level, "Dump object %p extents at %s:%d, mppr: %u.\n",
- obj, func, line, osc_cli(obj)->cl_max_pages_per_rpc);
-
- /* osc_object_lock(obj); */
- cnt = 1;
- for (ext = first_extent(obj); ext != NULL; ext = next_extent(ext))
- OSC_EXTENT_DUMP(level, ext, "in tree %d.\n", cnt++);
-
- cnt = 1;
- list_for_each_entry(ext, &obj->oo_hp_exts, oe_link)
- OSC_EXTENT_DUMP(level, ext, "hp %d.\n", cnt++);
-
- cnt = 1;
- list_for_each_entry(ext, &obj->oo_urgent_exts, oe_link)
- OSC_EXTENT_DUMP(level, ext, "urgent %d.\n", cnt++);
-
- cnt = 1;
- list_for_each_entry(ext, &obj->oo_reading_exts, oe_link)
- OSC_EXTENT_DUMP(level, ext, "reading %d.\n", cnt++);
- /* osc_object_unlock(obj); */
-}
-
-/* ------------------ osc extent end ------------------ */
-
-static inline int osc_is_ready(struct osc_object *osc)
-{
- return !list_empty(&osc->oo_ready_item) ||
- !list_empty(&osc->oo_hp_ready_item);
-}
-
-#define OSC_IO_DEBUG(OSC, STR, args...) \
- CDEBUG(D_CACHE, "obj %p ready %d|%c|%c wr %d|%c|%c rd %d|%c " STR, \
- (OSC), osc_is_ready(OSC), \
- list_empty_marker(&(OSC)->oo_hp_ready_item), \
- list_empty_marker(&(OSC)->oo_ready_item), \
- atomic_read(&(OSC)->oo_nr_writes), \
- list_empty_marker(&(OSC)->oo_hp_exts), \
- list_empty_marker(&(OSC)->oo_urgent_exts), \
- atomic_read(&(OSC)->oo_nr_reads), \
- list_empty_marker(&(OSC)->oo_reading_exts), \
- ##args)
-
-static int osc_make_ready(const struct lu_env *env, struct osc_async_page *oap,
- int cmd)
-{
- struct osc_page *opg = oap2osc_page(oap);
- struct cl_page *page = cl_page_top(oap2cl_page(oap));
- int result;
-
- LASSERT(cmd == OBD_BRW_WRITE); /* no cached reads */
-
- result = cl_page_make_ready(env, page, CRT_WRITE);
- if (result == 0)
- opg->ops_submit_time = cfs_time_current();
- return result;
-}
-
-static int osc_refresh_count(const struct lu_env *env,
- struct osc_async_page *oap, int cmd)
-{
- struct osc_page *opg = oap2osc_page(oap);
- struct cl_page *page = oap2cl_page(oap);
- struct cl_object *obj;
- struct cl_attr *attr = &osc_env_info(env)->oti_attr;
-
- int result;
- loff_t kms;
-
- /* readpage queues with _COUNT_STABLE, shouldn't get here. */
- LASSERT(!(cmd & OBD_BRW_READ));
- LASSERT(opg != NULL);
- obj = opg->ops_cl.cpl_obj;
-
- cl_object_attr_lock(obj);
- result = cl_object_attr_get(env, obj, attr);
- cl_object_attr_unlock(obj);
- if (result < 0)
- return result;
- kms = attr->cat_kms;
- if (cl_offset(obj, page->cp_index) >= kms)
- /* catch race with truncate */
- return 0;
- else if (cl_offset(obj, page->cp_index + 1) > kms)
- /* catch sub-page write at end of file */
- return kms % PAGE_CACHE_SIZE;
- else
- return PAGE_CACHE_SIZE;
-}
-
-static int osc_completion(const struct lu_env *env, struct osc_async_page *oap,
- int cmd, int rc)
-{
- struct osc_page *opg = oap2osc_page(oap);
- struct cl_page *page = cl_page_top(oap2cl_page(oap));
- struct osc_object *obj = cl2osc(opg->ops_cl.cpl_obj);
- enum cl_req_type crt;
- int srvlock;
-
- cmd &= ~OBD_BRW_NOQUOTA;
- LASSERT(equi(page->cp_state == CPS_PAGEIN, cmd == OBD_BRW_READ));
- LASSERT(equi(page->cp_state == CPS_PAGEOUT, cmd == OBD_BRW_WRITE));
- LASSERT(opg->ops_transfer_pinned);
-
- /*
- * page->cp_req can be NULL if io submission failed before
- * cl_req was allocated.
- */
- if (page->cp_req != NULL)
- cl_req_page_done(env, page);
- LASSERT(page->cp_req == NULL);
-
- crt = cmd == OBD_BRW_READ ? CRT_READ : CRT_WRITE;
- /* Clear opg->ops_transfer_pinned before VM lock is released. */
- opg->ops_transfer_pinned = 0;
-
- spin_lock(&obj->oo_seatbelt);
- LASSERT(opg->ops_submitter != NULL);
- LASSERT(!list_empty(&opg->ops_inflight));
- list_del_init(&opg->ops_inflight);
- opg->ops_submitter = NULL;
- spin_unlock(&obj->oo_seatbelt);
-
- opg->ops_submit_time = 0;
- srvlock = oap->oap_brw_flags & OBD_BRW_SRVLOCK;
-
- /* statistic */
- if (rc == 0 && srvlock) {
- struct lu_device *ld = opg->ops_cl.cpl_obj->co_lu.lo_dev;
- struct osc_stats *stats = &lu2osc_dev(ld)->od_stats;
- int bytes = oap->oap_count;
-
- if (crt == CRT_READ)
- stats->os_lockless_reads += bytes;
- else
- stats->os_lockless_writes += bytes;
- }
-
- /*
- * This has to be the last operation with the page, as locks are
- * released in cl_page_completion() and nothing except for the
- * reference counter protects page from concurrent reclaim.
- */
- lu_ref_del(&page->cp_reference, "transfer", page);
-
- cl_page_completion(env, page, crt, rc);
-
- return 0;
-}
-
-#define OSC_DUMP_GRANT(cli, fmt, args...) do { \
- struct client_obd *__tmp = (cli); \
- CDEBUG(D_CACHE, "%s: { dirty: %ld/%ld dirty_pages: %d/%d " \
- "dropped: %ld avail: %ld, reserved: %ld, flight: %d } " fmt, \
- __tmp->cl_import->imp_obd->obd_name, \
- __tmp->cl_dirty, __tmp->cl_dirty_max, \
- atomic_read(&obd_dirty_pages), obd_max_dirty_pages, \
- __tmp->cl_lost_grant, __tmp->cl_avail_grant, \
- __tmp->cl_reserved_grant, __tmp->cl_w_in_flight, ##args); \
-} while (0)
-
-/* caller must hold loi_list_lock */
-static void osc_consume_write_grant(struct client_obd *cli,
- struct brw_page *pga)
-{
- assert_spin_locked(&cli->cl_loi_list_lock.lock);
- LASSERT(!(pga->flag & OBD_BRW_FROM_GRANT));
- atomic_inc(&obd_dirty_pages);
- cli->cl_dirty += PAGE_CACHE_SIZE;
- pga->flag |= OBD_BRW_FROM_GRANT;
- CDEBUG(D_CACHE, "using %lu grant credits for brw %p page %p\n",
- PAGE_CACHE_SIZE, pga, pga->pg);
- osc_update_next_shrink(cli);
-}
-
-/* the companion to osc_consume_write_grant, called when a brw has completed.
- * must be called with the loi lock held. */
-static void osc_release_write_grant(struct client_obd *cli,
- struct brw_page *pga)
-{
- assert_spin_locked(&cli->cl_loi_list_lock.lock);
- if (!(pga->flag & OBD_BRW_FROM_GRANT)) {
- return;
- }
-
- pga->flag &= ~OBD_BRW_FROM_GRANT;
- atomic_dec(&obd_dirty_pages);
- cli->cl_dirty -= PAGE_CACHE_SIZE;
- if (pga->flag & OBD_BRW_NOCACHE) {
- pga->flag &= ~OBD_BRW_NOCACHE;
- atomic_dec(&obd_dirty_transit_pages);
- cli->cl_dirty_transit -= PAGE_CACHE_SIZE;
- }
-}
-
-/**
- * To avoid sleeping with object lock held, it's good for us allocate enough
- * grants before entering into critical section.
- *
- * client_obd_list_lock held by caller
- */
-static int osc_reserve_grant(struct client_obd *cli, unsigned int bytes)
-{
- int rc = -EDQUOT;
-
- if (cli->cl_avail_grant >= bytes) {
- cli->cl_avail_grant -= bytes;
- cli->cl_reserved_grant += bytes;
- rc = 0;
- }
- return rc;
-}
-
-static void __osc_unreserve_grant(struct client_obd *cli,
- unsigned int reserved, unsigned int unused)
-{
- /* it's quite normal for us to get more grant than reserved.
- * Thinking about a case that two extents merged by adding a new
- * chunk, we can save one extent tax. If extent tax is greater than
- * one chunk, we can save more grant by adding a new chunk */
- cli->cl_reserved_grant -= reserved;
- if (unused > reserved) {
- cli->cl_avail_grant += reserved;
- cli->cl_lost_grant += unused - reserved;
- } else {
- cli->cl_avail_grant += unused;
- }
-}
-
-void osc_unreserve_grant(struct client_obd *cli,
- unsigned int reserved, unsigned int unused)
-{
- client_obd_list_lock(&cli->cl_loi_list_lock);
- __osc_unreserve_grant(cli, reserved, unused);
- if (unused > 0)
- osc_wake_cache_waiters(cli);
- client_obd_list_unlock(&cli->cl_loi_list_lock);
-}
-
-/**
- * Free grant after IO is finished or canceled.
- *
- * @lost_grant is used to remember how many grants we have allocated but not
- * used, we should return these grants to OST. There're two cases where grants
- * can be lost:
- * 1. truncate;
- * 2. blocksize at OST is less than PAGE_CACHE_SIZE and a partial page was
- * written. In this case OST may use less chunks to serve this partial
- * write. OSTs don't actually know the page size on the client side. so
- * clients have to calculate lost grant by the blocksize on the OST.
- * See filter_grant_check() for details.
- */
-static void osc_free_grant(struct client_obd *cli, unsigned int nr_pages,
- unsigned int lost_grant)
-{
- int grant = (1 << cli->cl_chunkbits) + cli->cl_extent_tax;
-
- client_obd_list_lock(&cli->cl_loi_list_lock);
- atomic_sub(nr_pages, &obd_dirty_pages);
- cli->cl_dirty -= nr_pages << PAGE_CACHE_SHIFT;
- cli->cl_lost_grant += lost_grant;
- if (cli->cl_avail_grant < grant && cli->cl_lost_grant >= grant) {
- /* borrow some grant from truncate to avoid the case that
- * truncate uses up all avail grant */
- cli->cl_lost_grant -= grant;
- cli->cl_avail_grant += grant;
- }
- osc_wake_cache_waiters(cli);
- client_obd_list_unlock(&cli->cl_loi_list_lock);
- CDEBUG(D_CACHE, "lost %u grant: %lu avail: %lu dirty: %lu\n",
- lost_grant, cli->cl_lost_grant,
- cli->cl_avail_grant, cli->cl_dirty);
-}
-
-/**
- * The companion to osc_enter_cache(), called when @oap is no longer part of
- * the dirty accounting due to error.
- */
-static void osc_exit_cache(struct client_obd *cli, struct osc_async_page *oap)
-{
- client_obd_list_lock(&cli->cl_loi_list_lock);
- osc_release_write_grant(cli, &oap->oap_brw_page);
- client_obd_list_unlock(&cli->cl_loi_list_lock);
-}
-
-/**
- * Non-blocking version of osc_enter_cache() that consumes grant only when it
- * is available.
- */
-static int osc_enter_cache_try(struct client_obd *cli,
- struct osc_async_page *oap,
- int bytes, int transient)
-{
- int rc;
-
- OSC_DUMP_GRANT(cli, "need:%d.\n", bytes);
-
- rc = osc_reserve_grant(cli, bytes);
- if (rc < 0)
- return 0;
-
- if (cli->cl_dirty + PAGE_CACHE_SIZE <= cli->cl_dirty_max &&
- atomic_read(&obd_dirty_pages) + 1 <= obd_max_dirty_pages) {
- osc_consume_write_grant(cli, &oap->oap_brw_page);
- if (transient) {
- cli->cl_dirty_transit += PAGE_CACHE_SIZE;
- atomic_inc(&obd_dirty_transit_pages);
- oap->oap_brw_flags |= OBD_BRW_NOCACHE;
- }
- rc = 1;
- } else {
- __osc_unreserve_grant(cli, bytes, bytes);
- rc = 0;
- }
- return rc;
-}
-
-static int ocw_granted(struct client_obd *cli, struct osc_cache_waiter *ocw)
-{
- int rc;
- client_obd_list_lock(&cli->cl_loi_list_lock);
- rc = list_empty(&ocw->ocw_entry);
- client_obd_list_unlock(&cli->cl_loi_list_lock);
- return rc;
-}
-
-/**
- * The main entry to reserve dirty page accounting. Usually the grant reserved
- * in this function will be freed in bulk in osc_free_grant() unless it fails
- * to add osc cache, in that case, it will be freed in osc_exit_cache().
- *
- * The process will be put into sleep if it's already run out of grant.
- */
-static int osc_enter_cache(const struct lu_env *env, struct client_obd *cli,
- struct osc_async_page *oap, int bytes)
-{
- struct osc_object *osc = oap->oap_obj;
- struct lov_oinfo *loi = osc->oo_oinfo;
- struct osc_cache_waiter ocw;
- struct l_wait_info lwi = LWI_INTR(LWI_ON_SIGNAL_NOOP, NULL);
- int rc = -EDQUOT;
-
- OSC_DUMP_GRANT(cli, "need:%d.\n", bytes);
-
- client_obd_list_lock(&cli->cl_loi_list_lock);
-
- /* force the caller to try sync io. this can jump the list
- * of queued writes and create a discontiguous rpc stream */
- if (OBD_FAIL_CHECK(OBD_FAIL_OSC_NO_GRANT) ||
- cli->cl_dirty_max < PAGE_CACHE_SIZE ||
- cli->cl_ar.ar_force_sync || loi->loi_ar.ar_force_sync) {
- rc = -EDQUOT;
- goto out;
- }
-
- /* Hopefully normal case - cache space and write credits available */
- if (osc_enter_cache_try(cli, oap, bytes, 0)) {
- rc = 0;
- goto out;
- }
-
- /* We can get here for two reasons: too many dirty pages in cache, or
- * run out of grants. In both cases we should write dirty pages out.
- * Adding a cache waiter will trigger urgent write-out no matter what
- * RPC size will be.
- * The exiting condition is no avail grants and no dirty pages caching,
- * that really means there is no space on the OST. */
- init_waitqueue_head(&ocw.ocw_waitq);
- ocw.ocw_oap = oap;
- ocw.ocw_grant = bytes;
- while (cli->cl_dirty > 0 || cli->cl_w_in_flight > 0) {
- list_add_tail(&ocw.ocw_entry, &cli->cl_cache_waiters);
- ocw.ocw_rc = 0;
- client_obd_list_unlock(&cli->cl_loi_list_lock);
-
- osc_io_unplug_async(env, cli, NULL);
-
- CDEBUG(D_CACHE, "%s: sleeping for cache space @ %p for %p\n",
- cli->cl_import->imp_obd->obd_name, &ocw, oap);
-
- rc = l_wait_event(ocw.ocw_waitq, ocw_granted(cli, &ocw), &lwi);
-
- client_obd_list_lock(&cli->cl_loi_list_lock);
-
- /* l_wait_event is interrupted by signal */
- if (rc < 0) {
- list_del_init(&ocw.ocw_entry);
- goto out;
- }
-
- LASSERT(list_empty(&ocw.ocw_entry));
- rc = ocw.ocw_rc;
-
- if (rc != -EDQUOT)
- goto out;
- if (osc_enter_cache_try(cli, oap, bytes, 0)) {
- rc = 0;
- goto out;
- }
- }
-out:
- client_obd_list_unlock(&cli->cl_loi_list_lock);
- OSC_DUMP_GRANT(cli, "returned %d.\n", rc);
- return rc;
-}
-
-/* caller must hold loi_list_lock */
-void osc_wake_cache_waiters(struct client_obd *cli)
-{
- struct list_head *l, *tmp;
- struct osc_cache_waiter *ocw;
-
- list_for_each_safe(l, tmp, &cli->cl_cache_waiters) {
- ocw = list_entry(l, struct osc_cache_waiter, ocw_entry);
- list_del_init(&ocw->ocw_entry);
-
- ocw->ocw_rc = -EDQUOT;
- /* we can't dirty more */
- if ((cli->cl_dirty + PAGE_CACHE_SIZE > cli->cl_dirty_max) ||
- (atomic_read(&obd_dirty_pages) + 1 >
- obd_max_dirty_pages)) {
- CDEBUG(D_CACHE, "no dirty room: dirty: %ld osc max %ld, sys max %d\n",
- cli->cl_dirty,
- cli->cl_dirty_max, obd_max_dirty_pages);
- goto wakeup;
- }
-
- ocw->ocw_rc = 0;
- if (!osc_enter_cache_try(cli, ocw->ocw_oap, ocw->ocw_grant, 0))
- ocw->ocw_rc = -EDQUOT;
-
-wakeup:
- CDEBUG(D_CACHE, "wake up %p for oap %p, avail grant %ld, %d\n",
- ocw, ocw->ocw_oap, cli->cl_avail_grant, ocw->ocw_rc);
-
- wake_up(&ocw->ocw_waitq);
- }
-}
-
-static int osc_max_rpc_in_flight(struct client_obd *cli, struct osc_object *osc)
-{
- int hprpc = !!list_empty(&osc->oo_hp_exts);
- return rpcs_in_flight(cli) >= cli->cl_max_rpcs_in_flight + hprpc;
-}
-
-/* This maintains the lists of pending pages to read/write for a given object
- * (lop). This is used by osc_check_rpcs->osc_next_obj() and osc_list_maint()
- * to quickly find objects that are ready to send an RPC. */
-static int osc_makes_rpc(struct client_obd *cli, struct osc_object *osc,
- int cmd)
-{
- int invalid_import = 0;
-
- /* if we have an invalid import we want to drain the queued pages
- * by forcing them through rpcs that immediately fail and complete
- * the pages. recovery relies on this to empty the queued pages
- * before canceling the locks and evicting down the llite pages */
- if ((cli->cl_import == NULL || cli->cl_import->imp_invalid))
- invalid_import = 1;
-
- if (cmd & OBD_BRW_WRITE) {
- if (atomic_read(&osc->oo_nr_writes) == 0)
- return 0;
- if (invalid_import) {
- CDEBUG(D_CACHE, "invalid import forcing RPC\n");
- return 1;
- }
- if (!list_empty(&osc->oo_hp_exts)) {
- CDEBUG(D_CACHE, "high prio request forcing RPC\n");
- return 1;
- }
- if (!list_empty(&osc->oo_urgent_exts)) {
- CDEBUG(D_CACHE, "urgent request forcing RPC\n");
- return 1;
- }
- /* trigger a write rpc stream as long as there are dirtiers
- * waiting for space. as they're waiting, they're not going to
- * create more pages to coalesce with what's waiting.. */
- if (!list_empty(&cli->cl_cache_waiters)) {
- CDEBUG(D_CACHE, "cache waiters forcing RPC\n");
- return 1;
- }
- if (atomic_read(&osc->oo_nr_writes) >=
- cli->cl_max_pages_per_rpc)
- return 1;
- } else {
- if (atomic_read(&osc->oo_nr_reads) == 0)
- return 0;
- if (invalid_import) {
- CDEBUG(D_CACHE, "invalid import forcing RPC\n");
- return 1;
- }
- /* all read are urgent. */
- if (!list_empty(&osc->oo_reading_exts))
- return 1;
- }
-
- return 0;
-}
-
-static void osc_update_pending(struct osc_object *obj, int cmd, int delta)
-{
- struct client_obd *cli = osc_cli(obj);
- if (cmd & OBD_BRW_WRITE) {
- atomic_add(delta, &obj->oo_nr_writes);
- atomic_add(delta, &cli->cl_pending_w_pages);
- LASSERT(atomic_read(&obj->oo_nr_writes) >= 0);
- } else {
- atomic_add(delta, &obj->oo_nr_reads);
- atomic_add(delta, &cli->cl_pending_r_pages);
- LASSERT(atomic_read(&obj->oo_nr_reads) >= 0);
- }
- OSC_IO_DEBUG(obj, "update pending cmd %d delta %d.\n", cmd, delta);
-}
-
-static int osc_makes_hprpc(struct osc_object *obj)
-{
- return !list_empty(&obj->oo_hp_exts);
-}
-
-static void on_list(struct list_head *item, struct list_head *list, int should_be_on)
-{
- if (list_empty(item) && should_be_on)
- list_add_tail(item, list);
- else if (!list_empty(item) && !should_be_on)
- list_del_init(item);
-}
-
-/* maintain the osc's cli list membership invariants so that osc_send_oap_rpc
- * can find pages to build into rpcs quickly */
-static int __osc_list_maint(struct client_obd *cli, struct osc_object *osc)
-{
- if (osc_makes_hprpc(osc)) {
- /* HP rpc */
- on_list(&osc->oo_ready_item, &cli->cl_loi_ready_list, 0);
- on_list(&osc->oo_hp_ready_item, &cli->cl_loi_hp_ready_list, 1);
- } else {
- on_list(&osc->oo_hp_ready_item, &cli->cl_loi_hp_ready_list, 0);
- on_list(&osc->oo_ready_item, &cli->cl_loi_ready_list,
- osc_makes_rpc(cli, osc, OBD_BRW_WRITE) ||
- osc_makes_rpc(cli, osc, OBD_BRW_READ));
- }
-
- on_list(&osc->oo_write_item, &cli->cl_loi_write_list,
- atomic_read(&osc->oo_nr_writes) > 0);
-
- on_list(&osc->oo_read_item, &cli->cl_loi_read_list,
- atomic_read(&osc->oo_nr_reads) > 0);
-
- return osc_is_ready(osc);
-}
-
-static int osc_list_maint(struct client_obd *cli, struct osc_object *osc)
-{
- int is_ready;
-
- client_obd_list_lock(&cli->cl_loi_list_lock);
- is_ready = __osc_list_maint(cli, osc);
- client_obd_list_unlock(&cli->cl_loi_list_lock);
-
- return is_ready;
-}
-
-/* this is trying to propagate async writeback errors back up to the
- * application. As an async write fails we record the error code for later if
- * the app does an fsync. As long as errors persist we force future rpcs to be
- * sync so that the app can get a sync error and break the cycle of queueing
- * pages for which writeback will fail. */
-static void osc_process_ar(struct osc_async_rc *ar, __u64 xid,
- int rc)
-{
- if (rc) {
- if (!ar->ar_rc)
- ar->ar_rc = rc;
-
- ar->ar_force_sync = 1;
- ar->ar_min_xid = ptlrpc_sample_next_xid();
- return;
-
- }
-
- if (ar->ar_force_sync && (xid >= ar->ar_min_xid))
- ar->ar_force_sync = 0;
-}
-
-
-/* this must be called holding the loi list lock to give coverage to exit_cache,
- * async_flag maintenance, and oap_request */
-static void osc_ap_completion(const struct lu_env *env, struct client_obd *cli,
- struct osc_async_page *oap, int sent, int rc)
-{
- struct osc_object *osc = oap->oap_obj;
- struct lov_oinfo *loi = osc->oo_oinfo;
- __u64 xid = 0;
-
- if (oap->oap_request != NULL) {
- xid = ptlrpc_req_xid(oap->oap_request);
- ptlrpc_req_finished(oap->oap_request);
- oap->oap_request = NULL;
- }
-
- /* As the transfer for this page is being done, clear the flags */
- spin_lock(&oap->oap_lock);
- oap->oap_async_flags = 0;
- spin_unlock(&oap->oap_lock);
- oap->oap_interrupted = 0;
-
- if (oap->oap_cmd & OBD_BRW_WRITE && xid > 0) {
- client_obd_list_lock(&cli->cl_loi_list_lock);
- osc_process_ar(&cli->cl_ar, xid, rc);
- osc_process_ar(&loi->loi_ar, xid, rc);
- client_obd_list_unlock(&cli->cl_loi_list_lock);
- }
-
- rc = osc_completion(env, oap, oap->oap_cmd, rc);
- if (rc)
- CERROR("completion on oap %p obj %p returns %d.\n",
- oap, osc, rc);
-}
-
-/**
- * Try to add extent to one RPC. We need to think about the following things:
- * - # of pages must not be over max_pages_per_rpc
- * - extent must be compatible with previous ones
- */
-static int try_to_add_extent_for_io(struct client_obd *cli,
- struct osc_extent *ext, struct list_head *rpclist,
- int *pc, unsigned int *max_pages)
-{
- struct osc_extent *tmp;
- struct osc_async_page *oap = list_first_entry(&ext->oe_pages,
- struct osc_async_page,
- oap_pending_item);
-
- EASSERT((ext->oe_state == OES_CACHE || ext->oe_state == OES_LOCK_DONE),
- ext);
-
- *max_pages = max(ext->oe_mppr, *max_pages);
- if (*pc + ext->oe_nr_pages > *max_pages)
- return 0;
-
- list_for_each_entry(tmp, rpclist, oe_link) {
- struct osc_async_page *oap2;
-
- oap2 = list_first_entry(&tmp->oe_pages, struct osc_async_page,
- oap_pending_item);
- EASSERT(tmp->oe_owner == current, tmp);
- if (oap2cl_page(oap)->cp_type != oap2cl_page(oap2)->cp_type) {
- CDEBUG(D_CACHE, "Do not permit different type of IO"
- " for a same RPC\n");
- return 0;
- }
-
- if (tmp->oe_srvlock != ext->oe_srvlock ||
- !tmp->oe_grants != !ext->oe_grants)
- return 0;
-
- /* remove break for strict check */
- break;
- }
-
- *pc += ext->oe_nr_pages;
- list_move_tail(&ext->oe_link, rpclist);
- ext->oe_owner = current;
- return 1;
-}
-
-/**
- * In order to prevent multiple ptlrpcd from breaking contiguous extents,
- * get_write_extent() takes all appropriate extents in atomic.
- *
- * The following policy is used to collect extents for IO:
- * 1. Add as many HP extents as possible;
- * 2. Add the first urgent extent in urgent extent list and take it out of
- * urgent list;
- * 3. Add subsequent extents of this urgent extent;
- * 4. If urgent list is not empty, goto 2;
- * 5. Traverse the extent tree from the 1st extent;
- * 6. Above steps exit if there is no space in this RPC.
- */
-static int get_write_extents(struct osc_object *obj, struct list_head *rpclist)
-{
- struct client_obd *cli = osc_cli(obj);
- struct osc_extent *ext;
- int page_count = 0;
- unsigned int max_pages = cli->cl_max_pages_per_rpc;
-
- LASSERT(osc_object_is_locked(obj));
- while (!list_empty(&obj->oo_hp_exts)) {
- ext = list_entry(obj->oo_hp_exts.next, struct osc_extent,
- oe_link);
- LASSERT(ext->oe_state == OES_CACHE);
- if (!try_to_add_extent_for_io(cli, ext, rpclist, &page_count,
- &max_pages))
- return page_count;
- EASSERT(ext->oe_nr_pages <= max_pages, ext);
- }
- if (page_count == max_pages)
- return page_count;
-
- while (!list_empty(&obj->oo_urgent_exts)) {
- ext = list_entry(obj->oo_urgent_exts.next,
- struct osc_extent, oe_link);
- if (!try_to_add_extent_for_io(cli, ext, rpclist, &page_count,
- &max_pages))
- return page_count;
-
- if (!ext->oe_intree)
- continue;
-
- while ((ext = next_extent(ext)) != NULL) {
- if ((ext->oe_state != OES_CACHE) ||
- (!list_empty(&ext->oe_link) &&
- ext->oe_owner != NULL))
- continue;
-
- if (!try_to_add_extent_for_io(cli, ext, rpclist,
- &page_count, &max_pages))
- return page_count;
- }
- }
- if (page_count == max_pages)
- return page_count;
-
- ext = first_extent(obj);
- while (ext != NULL) {
- if ((ext->oe_state != OES_CACHE) ||
- /* this extent may be already in current rpclist */
- (!list_empty(&ext->oe_link) && ext->oe_owner != NULL)) {
- ext = next_extent(ext);
- continue;
- }
-
- if (!try_to_add_extent_for_io(cli, ext, rpclist, &page_count,
- &max_pages))
- return page_count;
-
- ext = next_extent(ext);
- }
- return page_count;
-}
-
-static int
-osc_send_write_rpc(const struct lu_env *env, struct client_obd *cli,
- struct osc_object *osc)
-{
- LIST_HEAD(rpclist);
- struct osc_extent *ext;
- struct osc_extent *tmp;
- struct osc_extent *first = NULL;
- u32 page_count = 0;
- int srvlock = 0;
- int rc = 0;
-
- LASSERT(osc_object_is_locked(osc));
-
- page_count = get_write_extents(osc, &rpclist);
- LASSERT(equi(page_count == 0, list_empty(&rpclist)));
-
- if (list_empty(&rpclist))
- return 0;
-
- osc_update_pending(osc, OBD_BRW_WRITE, -page_count);
-
- list_for_each_entry(ext, &rpclist, oe_link) {
- LASSERT(ext->oe_state == OES_CACHE ||
- ext->oe_state == OES_LOCK_DONE);
- if (ext->oe_state == OES_CACHE)
- osc_extent_state_set(ext, OES_LOCKING);
- else
- osc_extent_state_set(ext, OES_RPC);
- }
-
- /* we're going to grab page lock, so release object lock because
- * lock order is page lock -> object lock. */
- osc_object_unlock(osc);
-
- list_for_each_entry_safe(ext, tmp, &rpclist, oe_link) {
- if (ext->oe_state == OES_LOCKING) {
- rc = osc_extent_make_ready(env, ext);
- if (unlikely(rc < 0)) {
- list_del_init(&ext->oe_link);
- osc_extent_finish(env, ext, 0, rc);
- continue;
- }
- }
- if (first == NULL) {
- first = ext;
- srvlock = ext->oe_srvlock;
- } else {
- LASSERT(srvlock == ext->oe_srvlock);
- }
- }
-
- if (!list_empty(&rpclist)) {
- LASSERT(page_count > 0);
- rc = osc_build_rpc(env, cli, &rpclist, OBD_BRW_WRITE);
- LASSERT(list_empty(&rpclist));
- }
-
- osc_object_lock(osc);
- return rc;
-}
-
-/**
- * prepare pages for ASYNC io and put pages in send queue.
- *
- * \param cmd OBD_BRW_* macroses
- * \param lop pending pages
- *
- * \return zero if no page added to send queue.
- * \return 1 if pages successfully added to send queue.
- * \return negative on errors.
- */
-static int
-osc_send_read_rpc(const struct lu_env *env, struct client_obd *cli,
- struct osc_object *osc)
-{
- struct osc_extent *ext;
- struct osc_extent *next;
- LIST_HEAD(rpclist);
- int page_count = 0;
- unsigned int max_pages = cli->cl_max_pages_per_rpc;
- int rc = 0;
-
- LASSERT(osc_object_is_locked(osc));
- list_for_each_entry_safe(ext, next,
- &osc->oo_reading_exts, oe_link) {
- EASSERT(ext->oe_state == OES_LOCK_DONE, ext);
- if (!try_to_add_extent_for_io(cli, ext, &rpclist, &page_count,
- &max_pages))
- break;
- osc_extent_state_set(ext, OES_RPC);
- EASSERT(ext->oe_nr_pages <= max_pages, ext);
- }
- LASSERT(page_count <= max_pages);
-
- osc_update_pending(osc, OBD_BRW_READ, -page_count);
-
- if (!list_empty(&rpclist)) {
- osc_object_unlock(osc);
-
- LASSERT(page_count > 0);
- rc = osc_build_rpc(env, cli, &rpclist, OBD_BRW_READ);
- LASSERT(list_empty(&rpclist));
-
- osc_object_lock(osc);
- }
- return rc;
-}
-
-#define list_to_obj(list, item) ({ \
- struct list_head *__tmp = (list)->next; \
- list_del_init(__tmp); \
- list_entry(__tmp, struct osc_object, oo_##item); \
-})
-
-/* This is called by osc_check_rpcs() to find which objects have pages that
- * we could be sending. These lists are maintained by osc_makes_rpc(). */
-static struct osc_object *osc_next_obj(struct client_obd *cli)
-{
- /* First return objects that have blocked locks so that they
- * will be flushed quickly and other clients can get the lock,
- * then objects which have pages ready to be stuffed into RPCs */
- if (!list_empty(&cli->cl_loi_hp_ready_list))
- return list_to_obj(&cli->cl_loi_hp_ready_list, hp_ready_item);
- if (!list_empty(&cli->cl_loi_ready_list))
- return list_to_obj(&cli->cl_loi_ready_list, ready_item);
-
- /* then if we have cache waiters, return all objects with queued
- * writes. This is especially important when many small files
- * have filled up the cache and not been fired into rpcs because
- * they don't pass the nr_pending/object threshold */
- if (!list_empty(&cli->cl_cache_waiters) &&
- !list_empty(&cli->cl_loi_write_list))
- return list_to_obj(&cli->cl_loi_write_list, write_item);
-
- /* then return all queued objects when we have an invalid import
- * so that they get flushed */
- if (cli->cl_import == NULL || cli->cl_import->imp_invalid) {
- if (!list_empty(&cli->cl_loi_write_list))
- return list_to_obj(&cli->cl_loi_write_list, write_item);
- if (!list_empty(&cli->cl_loi_read_list))
- return list_to_obj(&cli->cl_loi_read_list, read_item);
- }
- return NULL;
-}
-
-/* called with the loi list lock held */
-static void osc_check_rpcs(const struct lu_env *env, struct client_obd *cli)
-{
- struct osc_object *osc;
- int rc = 0;
-
- while ((osc = osc_next_obj(cli)) != NULL) {
- struct cl_object *obj = osc2cl(osc);
- struct lu_ref_link link;
-
- OSC_IO_DEBUG(osc, "%lu in flight\n", rpcs_in_flight(cli));
-
- if (osc_max_rpc_in_flight(cli, osc)) {
- __osc_list_maint(cli, osc);
- break;
- }
-
- cl_object_get(obj);
- client_obd_list_unlock(&cli->cl_loi_list_lock);
- lu_object_ref_add_at(&obj->co_lu, &link, "check",
- current);
-
- /* attempt some read/write balancing by alternating between
- * reads and writes in an object. The makes_rpc checks here
- * would be redundant if we were getting read/write work items
- * instead of objects. we don't want send_oap_rpc to drain a
- * partial read pending queue when we're given this object to
- * do io on writes while there are cache waiters */
- osc_object_lock(osc);
- if (osc_makes_rpc(cli, osc, OBD_BRW_WRITE)) {
- rc = osc_send_write_rpc(env, cli, osc);
- if (rc < 0) {
- CERROR("Write request failed with %d\n", rc);
-
- /* osc_send_write_rpc failed, mostly because of
- * memory pressure.
- *
- * It can't break here, because if:
- * - a page was submitted by osc_io_submit, so
- * page locked;
- * - no request in flight
- * - no subsequent request
- * The system will be in live-lock state,
- * because there is no chance to call
- * osc_io_unplug() and osc_check_rpcs() any
- * more. pdflush can't help in this case,
- * because it might be blocked at grabbing
- * the page lock as we mentioned.
- *
- * Anyway, continue to drain pages. */
- /* break; */
- }
- }
- if (osc_makes_rpc(cli, osc, OBD_BRW_READ)) {
- rc = osc_send_read_rpc(env, cli, osc);
- if (rc < 0)
- CERROR("Read request failed with %d\n", rc);
- }
- osc_object_unlock(osc);
-
- osc_list_maint(cli, osc);
- lu_object_ref_del_at(&obj->co_lu, &link, "check",
- current);
- cl_object_put(env, obj);
-
- client_obd_list_lock(&cli->cl_loi_list_lock);
- }
-}
-
-static int osc_io_unplug0(const struct lu_env *env, struct client_obd *cli,
- struct osc_object *osc, int async)
-{
- int rc = 0;
-
- if (osc != NULL && osc_list_maint(cli, osc) == 0)
- return 0;
-
- if (!async) {
- /* disable osc_lru_shrink() temporarily to avoid
- * potential stack overrun problem. LU-2859 */
- atomic_inc(&cli->cl_lru_shrinkers);
- client_obd_list_lock(&cli->cl_loi_list_lock);
- osc_check_rpcs(env, cli);
- client_obd_list_unlock(&cli->cl_loi_list_lock);
- atomic_dec(&cli->cl_lru_shrinkers);
- } else {
- CDEBUG(D_CACHE, "Queue writeback work for client %p.\n", cli);
- LASSERT(cli->cl_writeback_work != NULL);
- rc = ptlrpcd_queue_work(cli->cl_writeback_work);
- }
- return rc;
-}
-
-static int osc_io_unplug_async(const struct lu_env *env,
- struct client_obd *cli, struct osc_object *osc)
-{
- return osc_io_unplug0(env, cli, osc, 1);
-}
-
-void osc_io_unplug(const struct lu_env *env, struct client_obd *cli,
- struct osc_object *osc)
-{
- (void)osc_io_unplug0(env, cli, osc, 0);
-}
-
-int osc_prep_async_page(struct osc_object *osc, struct osc_page *ops,
- struct page *page, loff_t offset)
-{
- struct obd_export *exp = osc_export(osc);
- struct osc_async_page *oap = &ops->ops_oap;
-
- if (!page)
- return cfs_size_round(sizeof(*oap));
-
- oap->oap_magic = OAP_MAGIC;
- oap->oap_cli = &exp->exp_obd->u.cli;
- oap->oap_obj = osc;
-
- oap->oap_page = page;
- oap->oap_obj_off = offset;
- LASSERT(!(offset & ~CFS_PAGE_MASK));
-
- if (!client_is_remote(exp) && capable(CFS_CAP_SYS_RESOURCE))
- oap->oap_brw_flags = OBD_BRW_NOQUOTA;
-
- INIT_LIST_HEAD(&oap->oap_pending_item);
- INIT_LIST_HEAD(&oap->oap_rpc_item);
-
- spin_lock_init(&oap->oap_lock);
- CDEBUG(D_INFO, "oap %p page %p obj off %llu\n",
- oap, page, oap->oap_obj_off);
- return 0;
-}
-
-int osc_queue_async_io(const struct lu_env *env, struct cl_io *io,
- struct osc_page *ops)
-{
- struct osc_io *oio = osc_env_io(env);
- struct osc_extent *ext = NULL;
- struct osc_async_page *oap = &ops->ops_oap;
- struct client_obd *cli = oap->oap_cli;
- struct osc_object *osc = oap->oap_obj;
- pgoff_t index;
- int grants = 0;
- int brw_flags = OBD_BRW_ASYNC;
- int cmd = OBD_BRW_WRITE;
- int need_release = 0;
- int rc = 0;
-
- if (oap->oap_magic != OAP_MAGIC)
- return -EINVAL;
-
- if (cli->cl_import == NULL || cli->cl_import->imp_invalid)
- return -EIO;
-
- if (!list_empty(&oap->oap_pending_item) ||
- !list_empty(&oap->oap_rpc_item))
- return -EBUSY;
-
- /* Set the OBD_BRW_SRVLOCK before the page is queued. */
- brw_flags |= ops->ops_srvlock ? OBD_BRW_SRVLOCK : 0;
- if (!client_is_remote(osc_export(osc)) &&
- capable(CFS_CAP_SYS_RESOURCE)) {
- brw_flags |= OBD_BRW_NOQUOTA;
- cmd |= OBD_BRW_NOQUOTA;
- }
-
- /* check if the file's owner/group is over quota */
- if (!(cmd & OBD_BRW_NOQUOTA)) {
- struct cl_object *obj;
- struct cl_attr *attr;
- unsigned int qid[MAXQUOTAS];
-
- obj = cl_object_top(&osc->oo_cl);
- attr = &osc_env_info(env)->oti_attr;
-
- cl_object_attr_lock(obj);
- rc = cl_object_attr_get(env, obj, attr);
- cl_object_attr_unlock(obj);
-
- qid[USRQUOTA] = attr->cat_uid;
- qid[GRPQUOTA] = attr->cat_gid;
- if (rc == 0 && osc_quota_chkdq(cli, qid) == NO_QUOTA)
- rc = -EDQUOT;
- if (rc)
- return rc;
- }
-
- oap->oap_cmd = cmd;
- oap->oap_page_off = ops->ops_from;
- oap->oap_count = ops->ops_to - ops->ops_from;
- oap->oap_async_flags = 0;
- oap->oap_brw_flags = brw_flags;
-
- OSC_IO_DEBUG(osc, "oap %p page %p added for cmd %d\n",
- oap, oap->oap_page, oap->oap_cmd & OBD_BRW_RWMASK);
-
- index = oap2cl_page(oap)->cp_index;
-
- /* Add this page into extent by the following steps:
- * 1. if there exists an active extent for this IO, mostly this page
- * can be added to the active extent and sometimes we need to
- * expand extent to accommodate this page;
- * 2. otherwise, a new extent will be allocated. */
-
- ext = oio->oi_active;
- if (ext != NULL && ext->oe_start <= index && ext->oe_max_end >= index) {
- /* one chunk plus extent overhead must be enough to write this
- * page */
- grants = (1 << cli->cl_chunkbits) + cli->cl_extent_tax;
- if (ext->oe_end >= index)
- grants = 0;
-
- /* it doesn't need any grant to dirty this page */
- client_obd_list_lock(&cli->cl_loi_list_lock);
- rc = osc_enter_cache_try(cli, oap, grants, 0);
- client_obd_list_unlock(&cli->cl_loi_list_lock);
- if (rc == 0) { /* try failed */
- grants = 0;
- need_release = 1;
- } else if (ext->oe_end < index) {
- int tmp = grants;
- /* try to expand this extent */
- rc = osc_extent_expand(ext, index, &tmp);
- if (rc < 0) {
- need_release = 1;
- /* don't free reserved grant */
- } else {
- OSC_EXTENT_DUMP(D_CACHE, ext,
- "expanded for %lu.\n", index);
- osc_unreserve_grant(cli, grants, tmp);
- grants = 0;
- }
- }
- rc = 0;
- } else if (ext != NULL) {
- /* index is located outside of active extent */
- need_release = 1;
- }
- if (need_release) {
- osc_extent_release(env, ext);
- oio->oi_active = NULL;
- ext = NULL;
- }
-
- if (ext == NULL) {
- int tmp = (1 << cli->cl_chunkbits) + cli->cl_extent_tax;
-
- /* try to find new extent to cover this page */
- LASSERT(oio->oi_active == NULL);
- /* we may have allocated grant for this page if we failed
- * to expand the previous active extent. */
- LASSERT(ergo(grants > 0, grants >= tmp));
-
- rc = 0;
- if (grants == 0) {
- /* we haven't allocated grant for this page. */
- rc = osc_enter_cache(env, cli, oap, tmp);
- if (rc == 0)
- grants = tmp;
- }
-
- tmp = grants;
- if (rc == 0) {
- ext = osc_extent_find(env, osc, index, &tmp);
- if (IS_ERR(ext)) {
- LASSERT(tmp == grants);
- osc_exit_cache(cli, oap);
- rc = PTR_ERR(ext);
- ext = NULL;
- } else {
- oio->oi_active = ext;
- }
- }
- if (grants > 0)
- osc_unreserve_grant(cli, grants, tmp);
- }
-
- LASSERT(ergo(rc == 0, ext != NULL));
- if (ext != NULL) {
- EASSERTF(ext->oe_end >= index && ext->oe_start <= index,
- ext, "index = %lu.\n", index);
- LASSERT((oap->oap_brw_flags & OBD_BRW_FROM_GRANT) != 0);
-
- osc_object_lock(osc);
- if (ext->oe_nr_pages == 0)
- ext->oe_srvlock = ops->ops_srvlock;
- else
- LASSERT(ext->oe_srvlock == ops->ops_srvlock);
- ++ext->oe_nr_pages;
- list_add_tail(&oap->oap_pending_item, &ext->oe_pages);
- osc_object_unlock(osc);
- }
- return rc;
-}
-
-int osc_teardown_async_page(const struct lu_env *env,
- struct osc_object *obj, struct osc_page *ops)
-{
- struct osc_async_page *oap = &ops->ops_oap;
- struct osc_extent *ext = NULL;
- int rc = 0;
-
- LASSERT(oap->oap_magic == OAP_MAGIC);
-
- CDEBUG(D_INFO, "teardown oap %p page %p at index %lu.\n",
- oap, ops, oap2cl_page(oap)->cp_index);
-
- osc_object_lock(obj);
- if (!list_empty(&oap->oap_rpc_item)) {
- CDEBUG(D_CACHE, "oap %p is not in cache.\n", oap);
- rc = -EBUSY;
- } else if (!list_empty(&oap->oap_pending_item)) {
- ext = osc_extent_lookup(obj, oap2cl_page(oap)->cp_index);
- /* only truncated pages are allowed to be taken out.
- * See osc_extent_truncate() and osc_cache_truncate_start()
- * for details. */
- if (ext != NULL && ext->oe_state != OES_TRUNC) {
- OSC_EXTENT_DUMP(D_ERROR, ext, "trunc at %lu.\n",
- oap2cl_page(oap)->cp_index);
- rc = -EBUSY;
- }
- }
- osc_object_unlock(obj);
- if (ext != NULL)
- osc_extent_put(env, ext);
- return rc;
-}
-
-/**
- * This is called when a page is picked up by kernel to write out.
- *
- * We should find out the corresponding extent and add the whole extent
- * into urgent list. The extent may be being truncated or used, handle it
- * carefully.
- */
-int osc_flush_async_page(const struct lu_env *env, struct cl_io *io,
- struct osc_page *ops)
-{
- struct osc_extent *ext = NULL;
- struct osc_object *obj = cl2osc(ops->ops_cl.cpl_obj);
- struct cl_page *cp = ops->ops_cl.cpl_page;
- pgoff_t index = cp->cp_index;
- struct osc_async_page *oap = &ops->ops_oap;
- bool unplug = false;
- int rc = 0;
-
- osc_object_lock(obj);
- ext = osc_extent_lookup(obj, index);
- if (ext == NULL) {
- osc_extent_tree_dump(D_ERROR, obj);
- LASSERTF(0, "page index %lu is NOT covered.\n", index);
- }
-
- switch (ext->oe_state) {
- case OES_RPC:
- case OES_LOCK_DONE:
- CL_PAGE_DEBUG(D_ERROR, env, cl_page_top(cp),
- "flush an in-rpc page?\n");
- LASSERT(0);
- break;
- case OES_LOCKING:
- /* If we know this extent is being written out, we should abort
- * so that the writer can make this page ready. Otherwise, there
- * exists a deadlock problem because other process can wait for
- * page writeback bit holding page lock; and meanwhile in
- * vvp_page_make_ready(), we need to grab page lock before
- * really sending the RPC. */
- case OES_TRUNC:
- /* race with truncate, page will be redirtied */
- case OES_ACTIVE:
- /* The extent is active so we need to abort and let the caller
- * re-dirty the page. If we continued on here, and we were the
- * one making the extent active, we could deadlock waiting for
- * the page writeback to clear but it won't because the extent
- * is active and won't be written out. */
- rc = -EAGAIN;
- goto out;
- default:
- break;
- }
-
- rc = cl_page_prep(env, io, cl_page_top(cp), CRT_WRITE);
- if (rc)
- goto out;
-
- spin_lock(&oap->oap_lock);
- oap->oap_async_flags |= ASYNC_READY|ASYNC_URGENT;
- spin_unlock(&oap->oap_lock);
-
- if (memory_pressure_get())
- ext->oe_memalloc = 1;
-
- ext->oe_urgent = 1;
- if (ext->oe_state == OES_CACHE) {
- OSC_EXTENT_DUMP(D_CACHE, ext,
- "flush page %p make it urgent.\n", oap);
- if (list_empty(&ext->oe_link))
- list_add_tail(&ext->oe_link, &obj->oo_urgent_exts);
- unplug = true;
- }
- rc = 0;
-
-out:
- osc_object_unlock(obj);
- osc_extent_put(env, ext);
- if (unplug)
- osc_io_unplug_async(env, osc_cli(obj), obj);
- return rc;
-}
-
-/**
- * this is called when a sync waiter receives an interruption. Its job is to
- * get the caller woken as soon as possible. If its page hasn't been put in an
- * rpc yet it can dequeue immediately. Otherwise it has to mark the rpc as
- * desiring interruption which will forcefully complete the rpc once the rpc
- * has timed out.
- */
-int osc_cancel_async_page(const struct lu_env *env, struct osc_page *ops)
-{
- struct osc_async_page *oap = &ops->ops_oap;
- struct osc_object *obj = oap->oap_obj;
- struct client_obd *cli = osc_cli(obj);
- struct osc_extent *ext;
- struct osc_extent *found = NULL;
- struct list_head *plist;
- pgoff_t index = oap2cl_page(oap)->cp_index;
- int rc = -EBUSY;
- int cmd;
-
- LASSERT(!oap->oap_interrupted);
- oap->oap_interrupted = 1;
-
- /* Find out the caching extent */
- osc_object_lock(obj);
- if (oap->oap_cmd & OBD_BRW_WRITE) {
- plist = &obj->oo_urgent_exts;
- cmd = OBD_BRW_WRITE;
- } else {
- plist = &obj->oo_reading_exts;
- cmd = OBD_BRW_READ;
- }
- list_for_each_entry(ext, plist, oe_link) {
- if (ext->oe_start <= index && ext->oe_end >= index) {
- LASSERT(ext->oe_state == OES_LOCK_DONE);
- /* For OES_LOCK_DONE state extent, it has already held
- * a refcount for RPC. */
- found = osc_extent_get(ext);
- break;
- }
- }
- if (found != NULL) {
- list_del_init(&found->oe_link);
- osc_update_pending(obj, cmd, -found->oe_nr_pages);
- osc_object_unlock(obj);
-
- osc_extent_finish(env, found, 0, -EINTR);
- osc_extent_put(env, found);
- rc = 0;
- } else {
- osc_object_unlock(obj);
- /* ok, it's been put in an rpc. only one oap gets a request
- * reference */
- if (oap->oap_request != NULL) {
- ptlrpc_mark_interrupted(oap->oap_request);
- ptlrpcd_wake(oap->oap_request);
- ptlrpc_req_finished(oap->oap_request);
- oap->oap_request = NULL;
- }
- }
-
- osc_list_maint(cli, obj);
- return rc;
-}
-
-int osc_queue_sync_pages(const struct lu_env *env, struct osc_object *obj,
- struct list_head *list, int cmd, int brw_flags)
-{
- struct client_obd *cli = osc_cli(obj);
- struct osc_extent *ext;
- struct osc_async_page *oap, *tmp;
- int page_count = 0;
- int mppr = cli->cl_max_pages_per_rpc;
- pgoff_t start = CL_PAGE_EOF;
- pgoff_t end = 0;
-
- list_for_each_entry(oap, list, oap_pending_item) {
- struct cl_page *cp = oap2cl_page(oap);
- if (cp->cp_index > end)
- end = cp->cp_index;
- if (cp->cp_index < start)
- start = cp->cp_index;
- ++page_count;
- mppr <<= (page_count > mppr);
- }
-
- ext = osc_extent_alloc(obj);
- if (ext == NULL) {
- list_for_each_entry_safe(oap, tmp, list, oap_pending_item) {
- list_del_init(&oap->oap_pending_item);
- osc_ap_completion(env, cli, oap, 0, -ENOMEM);
- }
- return -ENOMEM;
- }
-
- ext->oe_rw = !!(cmd & OBD_BRW_READ);
- ext->oe_urgent = 1;
- ext->oe_start = start;
- ext->oe_end = ext->oe_max_end = end;
- ext->oe_obj = obj;
- ext->oe_srvlock = !!(brw_flags & OBD_BRW_SRVLOCK);
- ext->oe_nr_pages = page_count;
- ext->oe_mppr = mppr;
- list_splice_init(list, &ext->oe_pages);
-
- osc_object_lock(obj);
- /* Reuse the initial refcount for RPC, don't drop it */
- osc_extent_state_set(ext, OES_LOCK_DONE);
- if (cmd & OBD_BRW_WRITE) {
- list_add_tail(&ext->oe_link, &obj->oo_urgent_exts);
- osc_update_pending(obj, OBD_BRW_WRITE, page_count);
- } else {
- list_add_tail(&ext->oe_link, &obj->oo_reading_exts);
- osc_update_pending(obj, OBD_BRW_READ, page_count);
- }
- osc_object_unlock(obj);
-
- osc_io_unplug_async(env, cli, obj);
- return 0;
-}
-
-/**
- * Called by osc_io_setattr_start() to freeze and destroy covering extents.
- */
-int osc_cache_truncate_start(const struct lu_env *env, struct osc_io *oio,
- struct osc_object *obj, __u64 size)
-{
- struct client_obd *cli = osc_cli(obj);
- struct osc_extent *ext;
- struct osc_extent *waiting = NULL;
- pgoff_t index;
- LIST_HEAD(list);
- int result = 0;
- bool partial;
-
- /* pages with index greater or equal to index will be truncated. */
- index = cl_index(osc2cl(obj), size);
- partial = size > cl_offset(osc2cl(obj), index);
-
-again:
- osc_object_lock(obj);
- ext = osc_extent_search(obj, index);
- if (ext == NULL)
- ext = first_extent(obj);
- else if (ext->oe_end < index)
- ext = next_extent(ext);
- while (ext != NULL) {
- EASSERT(ext->oe_state != OES_TRUNC, ext);
-
- if (ext->oe_state > OES_CACHE || ext->oe_urgent) {
- /* if ext is in urgent state, it means there must exist
- * a page already having been flushed by write_page().
- * We have to wait for this extent because we can't
- * truncate that page. */
- LASSERT(!ext->oe_hp);
- OSC_EXTENT_DUMP(D_CACHE, ext,
- "waiting for busy extent\n");
- waiting = osc_extent_get(ext);
- break;
- }
-
- OSC_EXTENT_DUMP(D_CACHE, ext, "try to trunc:%llu.\n", size);
-
- osc_extent_get(ext);
- if (ext->oe_state == OES_ACTIVE) {
- /* though we grab inode mutex for write path, but we
- * release it before releasing extent(in osc_io_end()),
- * so there is a race window that an extent is still
- * in OES_ACTIVE when truncate starts. */
- LASSERT(!ext->oe_trunc_pending);
- ext->oe_trunc_pending = 1;
- } else {
- EASSERT(ext->oe_state == OES_CACHE, ext);
- osc_extent_state_set(ext, OES_TRUNC);
- osc_update_pending(obj, OBD_BRW_WRITE,
- -ext->oe_nr_pages);
- }
- EASSERT(list_empty(&ext->oe_link), ext);
- list_add_tail(&ext->oe_link, &list);
-
- ext = next_extent(ext);
- }
- osc_object_unlock(obj);
-
- osc_list_maint(cli, obj);
-
- while (!list_empty(&list)) {
- int rc;
-
- ext = list_entry(list.next, struct osc_extent, oe_link);
- list_del_init(&ext->oe_link);
-
- /* extent may be in OES_ACTIVE state because inode mutex
- * is released before osc_io_end() in file write case */
- if (ext->oe_state != OES_TRUNC)
- osc_extent_wait(env, ext, OES_TRUNC);
-
- rc = osc_extent_truncate(ext, index, partial);
- if (rc < 0) {
- if (result == 0)
- result = rc;
-
- OSC_EXTENT_DUMP(D_ERROR, ext,
- "truncate error %d\n", rc);
- } else if (ext->oe_nr_pages == 0) {
- osc_extent_remove(ext);
- } else {
- /* this must be an overlapped extent which means only
- * part of pages in this extent have been truncated.
- */
- EASSERTF(ext->oe_start <= index, ext,
- "trunc index = %lu/%d.\n", index, partial);
- /* fix index to skip this partially truncated extent */
- index = ext->oe_end + 1;
- partial = false;
-
- /* we need to hold this extent in OES_TRUNC state so
- * that no writeback will happen. This is to avoid
- * BUG 17397. */
- LASSERT(oio->oi_trunc == NULL);
- oio->oi_trunc = osc_extent_get(ext);
- OSC_EXTENT_DUMP(D_CACHE, ext,
- "trunc at %llu\n", size);
- }
- osc_extent_put(env, ext);
- }
- if (waiting != NULL) {
- int rc;
-
- /* ignore the result of osc_extent_wait the write initiator
- * should take care of it. */
- rc = osc_extent_wait(env, waiting, OES_INV);
- if (rc < 0)
- OSC_EXTENT_DUMP(D_CACHE, waiting, "error: %d.\n", rc);
-
- osc_extent_put(env, waiting);
- waiting = NULL;
- goto again;
- }
- return result;
-}
-
-/**
- * Called after osc_io_setattr_end to add oio->oi_trunc back to cache.
- */
-void osc_cache_truncate_end(const struct lu_env *env, struct osc_io *oio,
- struct osc_object *obj)
-{
- struct osc_extent *ext = oio->oi_trunc;
-
- oio->oi_trunc = NULL;
- if (ext != NULL) {
- bool unplug = false;
-
- EASSERT(ext->oe_nr_pages > 0, ext);
- EASSERT(ext->oe_state == OES_TRUNC, ext);
- EASSERT(!ext->oe_urgent, ext);
-
- OSC_EXTENT_DUMP(D_CACHE, ext, "trunc -> cache.\n");
- osc_object_lock(obj);
- osc_extent_state_set(ext, OES_CACHE);
- if (ext->oe_fsync_wait && !ext->oe_urgent) {
- ext->oe_urgent = 1;
- list_move_tail(&ext->oe_link, &obj->oo_urgent_exts);
- unplug = true;
- }
- osc_update_pending(obj, OBD_BRW_WRITE, ext->oe_nr_pages);
- osc_object_unlock(obj);
- osc_extent_put(env, ext);
-
- if (unplug)
- osc_io_unplug_async(env, osc_cli(obj), obj);
- }
-}
-
-/**
- * Wait for extents in a specific range to be written out.
- * The caller must have called osc_cache_writeback_range() to issue IO
- * otherwise it will take a long time for this function to finish.
- *
- * Caller must hold inode_mutex , or cancel exclusive dlm lock so that
- * nobody else can dirty this range of file while we're waiting for
- * extents to be written.
- */
-int osc_cache_wait_range(const struct lu_env *env, struct osc_object *obj,
- pgoff_t start, pgoff_t end)
-{
- struct osc_extent *ext;
- pgoff_t index = start;
- int result = 0;
-
-again:
- osc_object_lock(obj);
- ext = osc_extent_search(obj, index);
- if (ext == NULL)
- ext = first_extent(obj);
- else if (ext->oe_end < index)
- ext = next_extent(ext);
- while (ext != NULL) {
- int rc;
-
- if (ext->oe_start > end)
- break;
-
- if (!ext->oe_fsync_wait) {
- ext = next_extent(ext);
- continue;
- }
-
- EASSERT(ergo(ext->oe_state == OES_CACHE,
- ext->oe_hp || ext->oe_urgent), ext);
- EASSERT(ergo(ext->oe_state == OES_ACTIVE,
- !ext->oe_hp && ext->oe_urgent), ext);
-
- index = ext->oe_end + 1;
- osc_extent_get(ext);
- osc_object_unlock(obj);
-
- rc = osc_extent_wait(env, ext, OES_INV);
- if (result == 0)
- result = rc;
- osc_extent_put(env, ext);
- goto again;
- }
- osc_object_unlock(obj);
-
- OSC_IO_DEBUG(obj, "sync file range.\n");
- return result;
-}
-
-/**
- * Called to write out a range of osc object.
- *
- * @hp : should be set this is caused by lock cancel;
- * @discard: is set if dirty pages should be dropped - file will be deleted or
- * truncated, this implies there is no partially discarding extents.
- *
- * Return how many pages will be issued, or error code if error occurred.
- */
-int osc_cache_writeback_range(const struct lu_env *env, struct osc_object *obj,
- pgoff_t start, pgoff_t end, int hp, int discard)
-{
- struct osc_extent *ext;
- LIST_HEAD(discard_list);
- bool unplug = false;
- int result = 0;
-
- osc_object_lock(obj);
- ext = osc_extent_search(obj, start);
- if (ext == NULL)
- ext = first_extent(obj);
- else if (ext->oe_end < start)
- ext = next_extent(ext);
- while (ext != NULL) {
- if (ext->oe_start > end)
- break;
-
- ext->oe_fsync_wait = 1;
- switch (ext->oe_state) {
- case OES_CACHE:
- result += ext->oe_nr_pages;
- if (!discard) {
- struct list_head *list = NULL;
- if (hp) {
- EASSERT(!ext->oe_hp, ext);
- ext->oe_hp = 1;
- list = &obj->oo_hp_exts;
- } else if (!ext->oe_urgent) {
- ext->oe_urgent = 1;
- list = &obj->oo_urgent_exts;
- }
- if (list != NULL)
- list_move_tail(&ext->oe_link, list);
- unplug = true;
- } else {
- /* the only discarder is lock cancelling, so
- * [start, end] must contain this extent */
- EASSERT(ext->oe_start >= start &&
- ext->oe_max_end <= end, ext);
- osc_extent_state_set(ext, OES_LOCKING);
- ext->oe_owner = current;
- list_move_tail(&ext->oe_link,
- &discard_list);
- osc_update_pending(obj, OBD_BRW_WRITE,
- -ext->oe_nr_pages);
- }
- break;
- case OES_ACTIVE:
- /* It's pretty bad to wait for ACTIVE extents, because
- * we don't know how long we will wait for it to be
- * flushed since it may be blocked at awaiting more
- * grants. We do this for the correctness of fsync. */
- LASSERT(hp == 0 && discard == 0);
- ext->oe_urgent = 1;
- break;
- case OES_TRUNC:
- /* this extent is being truncated, can't do anything
- * for it now. it will be set to urgent after truncate
- * is finished in osc_cache_truncate_end(). */
- default:
- break;
- }
- ext = next_extent(ext);
- }
- osc_object_unlock(obj);
-
- LASSERT(ergo(!discard, list_empty(&discard_list)));
- if (!list_empty(&discard_list)) {
- struct osc_extent *tmp;
- int rc;
-
- osc_list_maint(osc_cli(obj), obj);
- list_for_each_entry_safe(ext, tmp, &discard_list, oe_link) {
- list_del_init(&ext->oe_link);
- EASSERT(ext->oe_state == OES_LOCKING, ext);
-
- /* Discard caching pages. We don't actually write this
- * extent out but we complete it as if we did. */
- rc = osc_extent_make_ready(env, ext);
- if (unlikely(rc < 0)) {
- OSC_EXTENT_DUMP(D_ERROR, ext,
- "make_ready returned %d\n", rc);
- if (result >= 0)
- result = rc;
- }
-
- /* finish the extent as if the pages were sent */
- osc_extent_finish(env, ext, 0, 0);
- }
- }
-
- if (unplug)
- osc_io_unplug(env, osc_cli(obj), obj);
-
- if (hp || discard) {
- int rc;
- rc = osc_cache_wait_range(env, obj, start, end);
- if (result >= 0 && rc < 0)
- result = rc;
- }
-
- OSC_IO_DEBUG(obj, "cache page out.\n");
- return result;
-}
-
-/** @} osc */
diff --git a/drivers/staging/lustre/lustre/osc/osc_cl_internal.h b/drivers/staging/lustre/lustre/osc/osc_cl_internal.h
deleted file mode 100644
index 75bfda6e4ad3..000000000000
--- a/drivers/staging/lustre/lustre/osc/osc_cl_internal.h
+++ /dev/null
@@ -1,685 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * Internal interfaces of OSC layer.
- *
- * Author: Nikita Danilov <nikita.danilov@sun.com>
- * Author: Jinshan Xiong <jinshan.xiong@whamcloud.com>
- */
-
-#ifndef OSC_CL_INTERNAL_H
-#define OSC_CL_INTERNAL_H
-
-#include "../../include/linux/libcfs/libcfs.h"
-
-#include "../include/obd.h"
-/* osc_build_res_name() */
-#include "../include/cl_object.h"
-#include "../include/lclient.h"
-#include "osc_internal.h"
-
-/** \defgroup osc osc
- * @{
- */
-
-struct osc_extent;
-
-/**
- * State maintained by osc layer for each IO context.
- */
-struct osc_io {
- /** super class */
- struct cl_io_slice oi_cl;
- /** true if this io is lockless. */
- int oi_lockless;
- /** active extents, we know how many bytes is going to be written,
- * so having an active extent will prevent it from being fragmented */
- struct osc_extent *oi_active;
- /** partially truncated extent, we need to hold this extent to prevent
- * page writeback from happening. */
- struct osc_extent *oi_trunc;
-
- struct obd_info oi_info;
- struct obdo oi_oa;
- struct osc_async_cbargs {
- bool opc_rpc_sent;
- int opc_rc;
- struct completion opc_sync;
- } oi_cbarg;
-};
-
-/**
- * State of transfer for osc.
- */
-struct osc_req {
- struct cl_req_slice or_cl;
-};
-
-/**
- * State maintained by osc layer for the duration of a system call.
- */
-struct osc_session {
- struct osc_io os_io;
-};
-
-#define OTI_PVEC_SIZE 64
-struct osc_thread_info {
- struct ldlm_res_id oti_resname;
- ldlm_policy_data_t oti_policy;
- struct cl_lock_descr oti_descr;
- struct cl_attr oti_attr;
- struct lustre_handle oti_handle;
- struct cl_page_list oti_plist;
- struct cl_io oti_io;
- struct cl_page *oti_pvec[OTI_PVEC_SIZE];
-};
-
-struct osc_object {
- struct cl_object oo_cl;
- struct lov_oinfo *oo_oinfo;
- /**
- * True if locking against this stripe got -EUSERS.
- */
- int oo_contended;
- unsigned long oo_contention_time;
- /**
- * List of pages in transfer.
- */
- struct list_head oo_inflight[CRT_NR];
- /**
- * Lock, protecting ccc_object::cob_inflight, because a seat-belt is
- * locked during take-off and landing.
- */
- spinlock_t oo_seatbelt;
-
- /**
- * used by the osc to keep track of what objects to build into rpcs.
- * Protected by client_obd->cli_loi_list_lock.
- */
- struct list_head oo_ready_item;
- struct list_head oo_hp_ready_item;
- struct list_head oo_write_item;
- struct list_head oo_read_item;
-
- /**
- * extent is a red black tree to manage (async) dirty pages.
- */
- struct rb_root oo_root;
- /**
- * Manage write(dirty) extents.
- */
- struct list_head oo_hp_exts; /* list of hp extents */
- struct list_head oo_urgent_exts; /* list of writeback extents */
- struct list_head oo_rpc_exts;
-
- struct list_head oo_reading_exts;
-
- atomic_t oo_nr_reads;
- atomic_t oo_nr_writes;
-
- /** Protect extent tree. Will be used to protect
- * oo_{read|write}_pages soon. */
- spinlock_t oo_lock;
-};
-
-static inline void osc_object_lock(struct osc_object *obj)
-{
- spin_lock(&obj->oo_lock);
-}
-
-static inline int osc_object_trylock(struct osc_object *obj)
-{
- return spin_trylock(&obj->oo_lock);
-}
-
-static inline void osc_object_unlock(struct osc_object *obj)
-{
- spin_unlock(&obj->oo_lock);
-}
-
-static inline int osc_object_is_locked(struct osc_object *obj)
-{
-#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
- return spin_is_locked(&obj->oo_lock);
-#else
- /*
- * It is not perfect to return true all the time.
- * But since this function is only used for assertion
- * and checking, it seems OK.
- */
- return 1;
-#endif
-}
-
-/*
- * Lock "micro-states" for osc layer.
- */
-enum osc_lock_state {
- OLS_NEW,
- OLS_ENQUEUED,
- OLS_UPCALL_RECEIVED,
- OLS_GRANTED,
- OLS_RELEASED,
- OLS_BLOCKED,
- OLS_CANCELLED
-};
-
-/**
- * osc-private state of cl_lock.
- *
- * Interaction with DLM.
- *
- * CLIO enqueues all DLM locks through ptlrpcd (that is, in "async" mode).
- *
- * Once receive upcall is invoked, osc_lock remembers a handle of DLM lock in
- * osc_lock::ols_handle and a pointer to that lock in osc_lock::ols_lock.
- *
- * This pointer is protected through a reference, acquired by
- * osc_lock_upcall0(). Also, an additional reference is acquired by
- * ldlm_lock_addref() call protecting the lock from cancellation, until
- * osc_lock_unuse() releases it.
- *
- * Below is a description of how lock references are acquired and released
- * inside of DLM.
- *
- * - When new lock is created and enqueued to the server (ldlm_cli_enqueue())
- * - ldlm_lock_create()
- * - ldlm_lock_new(): initializes a lock with 2 references. One for
- * the caller (released when reply from the server is received, or on
- * error), and another for the hash table.
- * - ldlm_lock_addref_internal(): protects the lock from cancellation.
- *
- * - When reply is received from the server (osc_enqueue_interpret())
- * - ldlm_cli_enqueue_fini()
- * - LDLM_LOCK_PUT(): releases caller reference acquired by
- * ldlm_lock_new().
- * - if (rc != 0)
- * ldlm_lock_decref(): error case: matches ldlm_cli_enqueue().
- * - ldlm_lock_decref(): for async locks, matches ldlm_cli_enqueue().
- *
- * - When lock is being cancelled (ldlm_lock_cancel())
- * - ldlm_lock_destroy()
- * - LDLM_LOCK_PUT(): releases hash-table reference acquired by
- * ldlm_lock_new().
- *
- * osc_lock is detached from ldlm_lock by osc_lock_detach() that is called
- * either when lock is cancelled (osc_lock_blocking()), or when locks is
- * deleted without cancellation (e.g., from cl_locks_prune()). In the latter
- * case ldlm lock remains in memory, and can be re-attached to osc_lock in the
- * future.
- */
-struct osc_lock {
- struct cl_lock_slice ols_cl;
- /** underlying DLM lock */
- struct ldlm_lock *ols_lock;
- /** lock value block */
- struct ost_lvb ols_lvb;
- /** DLM flags with which osc_lock::ols_lock was enqueued */
- __u64 ols_flags;
- /** osc_lock::ols_lock handle */
- struct lustre_handle ols_handle;
- struct ldlm_enqueue_info ols_einfo;
- enum osc_lock_state ols_state;
-
- /**
- * How many pages are using this lock for io, currently only used by
- * read-ahead. If non-zero, the underlying dlm lock won't be cancelled
- * during recovery to avoid deadlock. see bz16774.
- *
- * \see osc_page::ops_lock
- * \see osc_page_addref_lock(), osc_page_putref_lock()
- */
- atomic_t ols_pageref;
-
- /**
- * true, if ldlm_lock_addref() was called against
- * osc_lock::ols_lock. This is used for sanity checking.
- *
- * \see osc_lock::ols_has_ref
- */
- unsigned ols_hold :1,
- /**
- * this is much like osc_lock::ols_hold, except that this bit is
- * cleared _after_ reference in released in osc_lock_unuse(). This
- * fine distinction is needed because:
- *
- * - if ldlm lock still has a reference, osc_ast_data_get() needs
- * to return associated cl_lock (so that a flag is needed that is
- * cleared after ldlm_lock_decref() returned), and
- *
- * - ldlm_lock_decref() can invoke blocking ast (for a
- * LDLM_FL_CBPENDING lock), and osc_lock functions like
- * osc_lock_cancel() called from there need to know whether to
- * release lock reference (so that a flag is needed that is
- * cleared before ldlm_lock_decref() is called).
- */
- ols_has_ref:1,
- /**
- * inherit the lockless attribute from top level cl_io.
- * If true, osc_lock_enqueue is able to tolerate the -EUSERS error.
- */
- ols_locklessable:1,
- /**
- * set by osc_lock_use() to wait until blocking AST enters into
- * osc_ldlm_blocking_ast0(), so that cl_lock mutex can be used for
- * further synchronization.
- */
- ols_ast_wait:1,
- /**
- * If the data of this lock has been flushed to server side.
- */
- ols_flush:1,
- /**
- * if set, the osc_lock is a glimpse lock. For glimpse locks, we treat
- * the EVAVAIL error as tolerable, this will make upper logic happy
- * to wait all glimpse locks to each OSTs to be completed.
- * Glimpse lock converts to normal lock if the server lock is
- * granted.
- * Glimpse lock should be destroyed immediately after use.
- */
- ols_glimpse:1,
- /**
- * For async glimpse lock.
- */
- ols_agl:1;
- /**
- * IO that owns this lock. This field is used for a dead-lock
- * avoidance by osc_lock_enqueue_wait().
- *
- * XXX: unfortunately, the owner of a osc_lock is not unique,
- * the lock may have multiple users, if the lock is granted and
- * then matched.
- */
- struct osc_io *ols_owner;
-};
-
-
-/**
- * Page state private for osc layer.
- */
-struct osc_page {
- struct cl_page_slice ops_cl;
- /**
- * Page queues used by osc to detect when RPC can be formed.
- */
- struct osc_async_page ops_oap;
- /**
- * An offset within page from which next transfer starts. This is used
- * by cl_page_clip() to submit partial page transfers.
- */
- int ops_from;
- /**
- * An offset within page at which next transfer ends.
- *
- * \see osc_page::ops_from.
- */
- int ops_to;
- /**
- * Boolean, true iff page is under transfer. Used for sanity checking.
- */
- unsigned ops_transfer_pinned:1,
- /**
- * True for a `temporary page' created by read-ahead code, probably
- * outside of any DLM lock.
- */
- ops_temp:1,
- /**
- * in LRU?
- */
- ops_in_lru:1,
- /**
- * Set if the page must be transferred with OBD_BRW_SRVLOCK.
- */
- ops_srvlock:1;
- union {
- /**
- * lru page list. ops_inflight and ops_lru are exclusive so
- * that they can share the same data.
- */
- struct list_head ops_lru;
- /**
- * Linkage into a per-osc_object list of pages in flight. For
- * debugging.
- */
- struct list_head ops_inflight;
- };
- /**
- * Thread that submitted this page for transfer. For debugging.
- */
- struct task_struct *ops_submitter;
- /**
- * Submit time - the time when the page is starting RPC. For debugging.
- */
- unsigned long ops_submit_time;
-
- /**
- * A lock of which we hold a reference covers this page. Only used by
- * read-ahead: for a readahead page, we hold it's covering lock to
- * prevent it from being canceled during recovery.
- *
- * \see osc_lock::ols_pageref
- * \see osc_page_addref_lock(), osc_page_putref_lock().
- */
- struct cl_lock *ops_lock;
-};
-
-extern struct kmem_cache *osc_lock_kmem;
-extern struct kmem_cache *osc_object_kmem;
-extern struct kmem_cache *osc_thread_kmem;
-extern struct kmem_cache *osc_session_kmem;
-extern struct kmem_cache *osc_req_kmem;
-extern struct kmem_cache *osc_extent_kmem;
-
-extern struct lu_device_type osc_device_type;
-extern struct lu_context_key osc_key;
-extern struct lu_context_key osc_session_key;
-
-#define OSC_FLAGS (ASYNC_URGENT|ASYNC_READY)
-
-int osc_lock_init(const struct lu_env *env,
- struct cl_object *obj, struct cl_lock *lock,
- const struct cl_io *io);
-int osc_io_init (const struct lu_env *env,
- struct cl_object *obj, struct cl_io *io);
-int osc_req_init (const struct lu_env *env, struct cl_device *dev,
- struct cl_req *req);
-struct lu_object *osc_object_alloc(const struct lu_env *env,
- const struct lu_object_header *hdr,
- struct lu_device *dev);
-int osc_page_init(const struct lu_env *env, struct cl_object *obj,
- struct cl_page *page, struct page *vmpage);
-
-void osc_index2policy (ldlm_policy_data_t *policy, const struct cl_object *obj,
- pgoff_t start, pgoff_t end);
-int osc_lvb_print (const struct lu_env *env, void *cookie,
- lu_printer_t p, const struct ost_lvb *lvb);
-
-void osc_page_submit(const struct lu_env *env, struct osc_page *opg,
- enum cl_req_type crt, int brw_flags);
-int osc_cancel_async_page(const struct lu_env *env, struct osc_page *ops);
-int osc_set_async_flags(struct osc_object *obj, struct osc_page *opg,
- u32 async_flags);
-int osc_prep_async_page(struct osc_object *osc, struct osc_page *ops,
- struct page *page, loff_t offset);
-int osc_queue_async_io(const struct lu_env *env, struct cl_io *io,
- struct osc_page *ops);
-int osc_teardown_async_page(const struct lu_env *env, struct osc_object *obj,
- struct osc_page *ops);
-int osc_flush_async_page(const struct lu_env *env, struct cl_io *io,
- struct osc_page *ops);
-int osc_queue_sync_pages(const struct lu_env *env, struct osc_object *obj,
- struct list_head *list, int cmd, int brw_flags);
-int osc_cache_truncate_start(const struct lu_env *env, struct osc_io *oio,
- struct osc_object *obj, __u64 size);
-void osc_cache_truncate_end(const struct lu_env *env, struct osc_io *oio,
- struct osc_object *obj);
-int osc_cache_writeback_range(const struct lu_env *env, struct osc_object *obj,
- pgoff_t start, pgoff_t end, int hp, int discard);
-int osc_cache_wait_range(const struct lu_env *env, struct osc_object *obj,
- pgoff_t start, pgoff_t end);
-void osc_io_unplug(const struct lu_env *env, struct client_obd *cli,
- struct osc_object *osc);
-
-void osc_object_set_contended (struct osc_object *obj);
-void osc_object_clear_contended(struct osc_object *obj);
-int osc_object_is_contended (struct osc_object *obj);
-
-int osc_lock_is_lockless (const struct osc_lock *olck);
-
-/*****************************************************************************
- *
- * Accessors.
- *
- */
-
-static inline struct osc_thread_info *osc_env_info(const struct lu_env *env)
-{
- struct osc_thread_info *info;
-
- info = lu_context_key_get(&env->le_ctx, &osc_key);
- LASSERT(info != NULL);
- return info;
-}
-
-static inline struct osc_session *osc_env_session(const struct lu_env *env)
-{
- struct osc_session *ses;
-
- ses = lu_context_key_get(env->le_ses, &osc_session_key);
- LASSERT(ses != NULL);
- return ses;
-}
-
-static inline struct osc_io *osc_env_io(const struct lu_env *env)
-{
- return &osc_env_session(env)->os_io;
-}
-
-static inline int osc_is_object(const struct lu_object *obj)
-{
- return obj->lo_dev->ld_type == &osc_device_type;
-}
-
-static inline struct osc_device *lu2osc_dev(const struct lu_device *d)
-{
- LINVRNT(d->ld_type == &osc_device_type);
- return container_of0(d, struct osc_device, od_cl.cd_lu_dev);
-}
-
-static inline struct obd_export *osc_export(const struct osc_object *obj)
-{
- return lu2osc_dev(obj->oo_cl.co_lu.lo_dev)->od_exp;
-}
-
-static inline struct client_obd *osc_cli(const struct osc_object *obj)
-{
- return &osc_export(obj)->exp_obd->u.cli;
-}
-
-static inline struct osc_object *cl2osc(const struct cl_object *obj)
-{
- LINVRNT(osc_is_object(&obj->co_lu));
- return container_of0(obj, struct osc_object, oo_cl);
-}
-
-static inline struct cl_object *osc2cl(const struct osc_object *obj)
-{
- return (struct cl_object *)&obj->oo_cl;
-}
-
-static inline ldlm_mode_t osc_cl_lock2ldlm(enum cl_lock_mode mode)
-{
- LASSERT(mode == CLM_READ || mode == CLM_WRITE || mode == CLM_GROUP);
- if (mode == CLM_READ)
- return LCK_PR;
- else if (mode == CLM_WRITE)
- return LCK_PW;
- else
- return LCK_GROUP;
-}
-
-static inline enum cl_lock_mode osc_ldlm2cl_lock(ldlm_mode_t mode)
-{
- LASSERT(mode == LCK_PR || mode == LCK_PW || mode == LCK_GROUP);
- if (mode == LCK_PR)
- return CLM_READ;
- else if (mode == LCK_PW)
- return CLM_WRITE;
- else
- return CLM_GROUP;
-}
-
-static inline struct osc_page *cl2osc_page(const struct cl_page_slice *slice)
-{
- LINVRNT(osc_is_object(&slice->cpl_obj->co_lu));
- return container_of0(slice, struct osc_page, ops_cl);
-}
-
-static inline struct osc_page *oap2osc(struct osc_async_page *oap)
-{
- return container_of0(oap, struct osc_page, ops_oap);
-}
-
-static inline struct cl_page *oap2cl_page(struct osc_async_page *oap)
-{
- return oap2osc(oap)->ops_cl.cpl_page;
-}
-
-static inline struct osc_page *oap2osc_page(struct osc_async_page *oap)
-{
- return (struct osc_page *)container_of(oap, struct osc_page, ops_oap);
-}
-
-static inline struct osc_lock *cl2osc_lock(const struct cl_lock_slice *slice)
-{
- LINVRNT(osc_is_object(&slice->cls_obj->co_lu));
- return container_of0(slice, struct osc_lock, ols_cl);
-}
-
-static inline struct osc_lock *osc_lock_at(const struct cl_lock *lock)
-{
- return cl2osc_lock(cl_lock_at(lock, &osc_device_type));
-}
-
-static inline int osc_io_srvlock(struct osc_io *oio)
-{
- return (oio->oi_lockless && !oio->oi_cl.cis_io->ci_no_srvlock);
-}
-
-enum osc_extent_state {
- OES_INV = 0, /** extent is just initialized or destroyed */
- OES_ACTIVE = 1, /** process is using this extent */
- OES_CACHE = 2, /** extent is ready for IO */
- OES_LOCKING = 3, /** locking page to prepare IO */
- OES_LOCK_DONE = 4, /** locking finished, ready to send */
- OES_RPC = 5, /** in RPC */
- OES_TRUNC = 6, /** being truncated */
- OES_STATE_MAX
-};
-
-/**
- * osc_extent data to manage dirty pages.
- * osc_extent has the following attributes:
- * 1. all pages in the same must be in one RPC in write back;
- * 2. # of pages must be less than max_pages_per_rpc - implied by 1;
- * 3. must be covered by only 1 osc_lock;
- * 4. exclusive. It's impossible to have overlapped osc_extent.
- *
- * The lifetime of an extent is from when the 1st page is dirtied to when
- * all pages inside it are written out.
- *
- * LOCKING ORDER
- * =============
- * page lock -> client_obd_list_lock -> object lock(osc_object::oo_lock)
- */
-struct osc_extent {
- /** red-black tree node */
- struct rb_node oe_node;
- /** osc_object of this extent */
- struct osc_object *oe_obj;
- /** refcount, removed from red-black tree if reaches zero. */
- atomic_t oe_refc;
- /** busy if non-zero */
- atomic_t oe_users;
- /** link list of osc_object's oo_{hp|urgent|locking}_exts. */
- struct list_head oe_link;
- /** state of this extent */
- unsigned int oe_state;
- /** flags for this extent. */
- unsigned int oe_intree:1,
- /** 0 is write, 1 is read */
- oe_rw:1,
- oe_srvlock:1,
- oe_memalloc:1,
- /** an ACTIVE extent is going to be truncated, so when this extent
- * is released, it will turn into TRUNC state instead of CACHE. */
- oe_trunc_pending:1,
- /** this extent should be written asap and someone may wait for the
- * write to finish. This bit is usually set along with urgent if
- * the extent was CACHE state.
- * fsync_wait extent can't be merged because new extent region may
- * exceed fsync range. */
- oe_fsync_wait:1,
- /** covering lock is being canceled */
- oe_hp:1,
- /** this extent should be written back asap. set if one of pages is
- * called by page WB daemon, or sync write or reading requests. */
- oe_urgent:1;
- /** how many grants allocated for this extent.
- * Grant allocated for this extent. There is no grant allocated
- * for reading extents and sync write extents. */
- unsigned int oe_grants;
- /** # of dirty pages in this extent */
- unsigned int oe_nr_pages;
- /** list of pending oap pages. Pages in this list are NOT sorted. */
- struct list_head oe_pages;
- /** Since an extent has to be written out in atomic, this is used to
- * remember the next page need to be locked to write this extent out.
- * Not used right now.
- */
- struct osc_page *oe_next_page;
- /** start and end index of this extent, include start and end
- * themselves. Page offset here is the page index of osc_pages.
- * oe_start is used as keyword for red-black tree. */
- pgoff_t oe_start;
- pgoff_t oe_end;
- /** maximum ending index of this extent, this is limited by
- * max_pages_per_rpc, lock extent and chunk size. */
- pgoff_t oe_max_end;
- /** waitqueue - for those who want to be notified if this extent's
- * state has changed. */
- wait_queue_head_t oe_waitq;
- /** lock covering this extent */
- struct cl_lock *oe_osclock;
- /** terminator of this extent. Must be true if this extent is in IO. */
- struct task_struct *oe_owner;
- /** return value of writeback. If somebody is waiting for this extent,
- * this value can be known by outside world. */
- int oe_rc;
- /** max pages per rpc when this extent was created */
- unsigned int oe_mppr;
-};
-
-int osc_extent_finish(const struct lu_env *env, struct osc_extent *ext,
- int sent, int rc);
-void osc_extent_release(const struct lu_env *env, struct osc_extent *ext);
-
-/** @} osc */
-
-#endif /* OSC_CL_INTERNAL_H */
diff --git a/drivers/staging/lustre/lustre/osc/osc_dev.c b/drivers/staging/lustre/lustre/osc/osc_dev.c
deleted file mode 100644
index 91fdec44792b..000000000000
--- a/drivers/staging/lustre/lustre/osc/osc_dev.c
+++ /dev/null
@@ -1,262 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * Implementation of cl_device, cl_req for OSC layer.
- *
- * Author: Nikita Danilov <nikita.danilov@sun.com>
- */
-
-#define DEBUG_SUBSYSTEM S_OSC
-
-/* class_name2obd() */
-#include "../include/obd_class.h"
-
-#include "osc_cl_internal.h"
-
-/** \addtogroup osc
- * @{
- */
-
-struct kmem_cache *osc_lock_kmem;
-struct kmem_cache *osc_object_kmem;
-struct kmem_cache *osc_thread_kmem;
-struct kmem_cache *osc_session_kmem;
-struct kmem_cache *osc_req_kmem;
-struct kmem_cache *osc_extent_kmem;
-struct kmem_cache *osc_quota_kmem;
-
-struct lu_kmem_descr osc_caches[] = {
- {
- .ckd_cache = &osc_lock_kmem,
- .ckd_name = "osc_lock_kmem",
- .ckd_size = sizeof(struct osc_lock)
- },
- {
- .ckd_cache = &osc_object_kmem,
- .ckd_name = "osc_object_kmem",
- .ckd_size = sizeof(struct osc_object)
- },
- {
- .ckd_cache = &osc_thread_kmem,
- .ckd_name = "osc_thread_kmem",
- .ckd_size = sizeof(struct osc_thread_info)
- },
- {
- .ckd_cache = &osc_session_kmem,
- .ckd_name = "osc_session_kmem",
- .ckd_size = sizeof(struct osc_session)
- },
- {
- .ckd_cache = &osc_req_kmem,
- .ckd_name = "osc_req_kmem",
- .ckd_size = sizeof(struct osc_req)
- },
- {
- .ckd_cache = &osc_extent_kmem,
- .ckd_name = "osc_extent_kmem",
- .ckd_size = sizeof(struct osc_extent)
- },
- {
- .ckd_cache = &osc_quota_kmem,
- .ckd_name = "osc_quota_kmem",
- .ckd_size = sizeof(struct osc_quota_info)
- },
- {
- .ckd_cache = NULL
- }
-};
-
-struct lock_class_key osc_ast_guard_class;
-
-/*****************************************************************************
- *
- * Type conversions.
- *
- */
-
-static struct lu_device *osc2lu_dev(struct osc_device *osc)
-{
- return &osc->od_cl.cd_lu_dev;
-}
-
-/*****************************************************************************
- *
- * Osc device and device type functions.
- *
- */
-
-static void *osc_key_init(const struct lu_context *ctx,
- struct lu_context_key *key)
-{
- struct osc_thread_info *info;
-
- OBD_SLAB_ALLOC_PTR_GFP(info, osc_thread_kmem, GFP_NOFS);
- if (info == NULL)
- info = ERR_PTR(-ENOMEM);
- return info;
-}
-
-static void osc_key_fini(const struct lu_context *ctx,
- struct lu_context_key *key, void *data)
-{
- struct osc_thread_info *info = data;
-
- OBD_SLAB_FREE_PTR(info, osc_thread_kmem);
-}
-
-struct lu_context_key osc_key = {
- .lct_tags = LCT_CL_THREAD,
- .lct_init = osc_key_init,
- .lct_fini = osc_key_fini
-};
-
-static void *osc_session_init(const struct lu_context *ctx,
- struct lu_context_key *key)
-{
- struct osc_session *info;
-
- OBD_SLAB_ALLOC_PTR_GFP(info, osc_session_kmem, GFP_NOFS);
- if (info == NULL)
- info = ERR_PTR(-ENOMEM);
- return info;
-}
-
-static void osc_session_fini(const struct lu_context *ctx,
- struct lu_context_key *key, void *data)
-{
- struct osc_session *info = data;
-
- OBD_SLAB_FREE_PTR(info, osc_session_kmem);
-}
-
-struct lu_context_key osc_session_key = {
- .lct_tags = LCT_SESSION,
- .lct_init = osc_session_init,
- .lct_fini = osc_session_fini
-};
-
-/* type constructor/destructor: osc_type_{init,fini,start,stop}(). */
-LU_TYPE_INIT_FINI(osc, &osc_key, &osc_session_key);
-
-static int osc_cl_process_config(const struct lu_env *env,
- struct lu_device *d, struct lustre_cfg *cfg)
-{
- return osc_process_config_base(d->ld_obd, cfg);
-}
-
-static const struct lu_device_operations osc_lu_ops = {
- .ldo_object_alloc = osc_object_alloc,
- .ldo_process_config = osc_cl_process_config,
- .ldo_recovery_complete = NULL
-};
-
-static const struct cl_device_operations osc_cl_ops = {
- .cdo_req_init = osc_req_init
-};
-
-static int osc_device_init(const struct lu_env *env, struct lu_device *d,
- const char *name, struct lu_device *next)
-{
- return 0;
-}
-
-static struct lu_device *osc_device_fini(const struct lu_env *env,
- struct lu_device *d)
-{
- return NULL;
-}
-
-static struct lu_device *osc_device_free(const struct lu_env *env,
- struct lu_device *d)
-{
- struct osc_device *od = lu2osc_dev(d);
-
- cl_device_fini(lu2cl_dev(d));
- kfree(od);
- return NULL;
-}
-
-static struct lu_device *osc_device_alloc(const struct lu_env *env,
- struct lu_device_type *t,
- struct lustre_cfg *cfg)
-{
- struct lu_device *d;
- struct osc_device *od;
- struct obd_device *obd;
- int rc;
-
- od = kzalloc(sizeof(*od), GFP_NOFS);
- if (!od)
- return ERR_PTR(-ENOMEM);
-
- cl_device_init(&od->od_cl, t);
- d = osc2lu_dev(od);
- d->ld_ops = &osc_lu_ops;
- od->od_cl.cd_ops = &osc_cl_ops;
-
- /* Setup OSC OBD */
- obd = class_name2obd(lustre_cfg_string(cfg, 0));
- LASSERT(obd != NULL);
- rc = osc_setup(obd, cfg);
- if (rc) {
- osc_device_free(env, d);
- return ERR_PTR(rc);
- }
- od->od_exp = obd->obd_self_export;
- return d;
-}
-
-static const struct lu_device_type_operations osc_device_type_ops = {
- .ldto_init = osc_type_init,
- .ldto_fini = osc_type_fini,
-
- .ldto_start = osc_type_start,
- .ldto_stop = osc_type_stop,
-
- .ldto_device_alloc = osc_device_alloc,
- .ldto_device_free = osc_device_free,
-
- .ldto_device_init = osc_device_init,
- .ldto_device_fini = osc_device_fini
-};
-
-struct lu_device_type osc_device_type = {
- .ldt_tags = LU_DEVICE_CL,
- .ldt_name = LUSTRE_OSC_NAME,
- .ldt_ops = &osc_device_type_ops,
- .ldt_ctx_tags = LCT_CL_THREAD
-};
-
-/** @} osc */
diff --git a/drivers/staging/lustre/lustre/osc/osc_internal.h b/drivers/staging/lustre/lustre/osc/osc_internal.h
deleted file mode 100644
index 448fdf4b8939..000000000000
--- a/drivers/staging/lustre/lustre/osc/osc_internal.h
+++ /dev/null
@@ -1,199 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- */
-
-#ifndef OSC_INTERNAL_H
-#define OSC_INTERNAL_H
-
-#define OAP_MAGIC 8675309
-
-extern atomic_t osc_pool_req_count;
-extern unsigned int osc_reqpool_maxreqcount;
-extern struct ptlrpc_request_pool *osc_rq_pool;
-
-struct lu_env;
-
-enum async_flags {
- ASYNC_READY = 0x1, /* ap_make_ready will not be called before this
- page is added to an rpc */
- ASYNC_URGENT = 0x2, /* page must be put into an RPC before return */
- ASYNC_COUNT_STABLE = 0x4, /* ap_refresh_count will not be called
- to give the caller a chance to update
- or cancel the size of the io */
- ASYNC_HP = 0x10,
-};
-
-struct osc_async_page {
- int oap_magic;
- unsigned short oap_cmd;
- unsigned short oap_interrupted:1;
-
- struct list_head oap_pending_item;
- struct list_head oap_rpc_item;
-
- u64 oap_obj_off;
- unsigned oap_page_off;
- enum async_flags oap_async_flags;
-
- struct brw_page oap_brw_page;
-
- struct ptlrpc_request *oap_request;
- struct client_obd *oap_cli;
- struct osc_object *oap_obj;
-
- struct ldlm_lock *oap_ldlm_lock;
- spinlock_t oap_lock;
-};
-
-#define oap_page oap_brw_page.pg
-#define oap_count oap_brw_page.count
-#define oap_brw_flags oap_brw_page.flag
-
-struct osc_cache_waiter {
- struct list_head ocw_entry;
- wait_queue_head_t ocw_waitq;
- struct osc_async_page *ocw_oap;
- int ocw_grant;
- int ocw_rc;
-};
-
-int osc_create(const struct lu_env *env, struct obd_export *exp,
- struct obdo *oa, struct lov_stripe_md **ea,
- struct obd_trans_info *oti);
-int osc_real_create(struct obd_export *exp, struct obdo *oa,
- struct lov_stripe_md **ea, struct obd_trans_info *oti);
-void osc_wake_cache_waiters(struct client_obd *cli);
-int osc_shrink_grant_to_target(struct client_obd *cli, __u64 target_bytes);
-void osc_update_next_shrink(struct client_obd *cli);
-
-/*
- * cl integration.
- */
-#include "../include/cl_object.h"
-
-extern struct ptlrpc_request_set *PTLRPCD_SET;
-
-int osc_enqueue_base(struct obd_export *exp, struct ldlm_res_id *res_id,
- __u64 *flags, ldlm_policy_data_t *policy,
- struct ost_lvb *lvb, int kms_valid,
- obd_enqueue_update_f upcall,
- void *cookie, struct ldlm_enqueue_info *einfo,
- struct lustre_handle *lockh,
- struct ptlrpc_request_set *rqset, int async, int agl);
-int osc_cancel_base(struct lustre_handle *lockh, __u32 mode);
-
-int osc_match_base(struct obd_export *exp, struct ldlm_res_id *res_id,
- __u32 type, ldlm_policy_data_t *policy, __u32 mode,
- __u64 *flags, void *data, struct lustre_handle *lockh,
- int unref);
-
-int osc_setattr_async_base(struct obd_export *exp, struct obd_info *oinfo,
- struct obd_trans_info *oti,
- obd_enqueue_update_f upcall, void *cookie,
- struct ptlrpc_request_set *rqset);
-int osc_punch_base(struct obd_export *exp, struct obd_info *oinfo,
- obd_enqueue_update_f upcall, void *cookie,
- struct ptlrpc_request_set *rqset);
-int osc_sync_base(struct obd_export *exp, struct obd_info *oinfo,
- obd_enqueue_update_f upcall, void *cookie,
- struct ptlrpc_request_set *rqset);
-
-int osc_process_config_base(struct obd_device *obd, struct lustre_cfg *cfg);
-int osc_build_rpc(const struct lu_env *env, struct client_obd *cli,
- struct list_head *ext_list, int cmd);
-int osc_lru_shrink(struct client_obd *cli, int target);
-
-extern spinlock_t osc_ast_guard;
-
-int osc_cleanup(struct obd_device *obd);
-int osc_setup(struct obd_device *obd, struct lustre_cfg *lcfg);
-
-int lproc_osc_attach_seqstat(struct obd_device *dev);
-void lprocfs_osc_init_vars(struct lprocfs_static_vars *lvars);
-
-extern struct lu_device_type osc_device_type;
-
-static inline int osc_recoverable_error(int rc)
-{
- return (rc == -EIO || rc == -EROFS || rc == -ENOMEM ||
- rc == -EAGAIN || rc == -EINPROGRESS);
-}
-
-static inline unsigned long rpcs_in_flight(struct client_obd *cli)
-{
- return cli->cl_r_in_flight + cli->cl_w_in_flight;
-}
-
-struct osc_device {
- struct cl_device od_cl;
- struct obd_export *od_exp;
-
- /* Write stats is actually protected by client_obd's lock. */
- struct osc_stats {
- uint64_t os_lockless_writes; /* by bytes */
- uint64_t os_lockless_reads; /* by bytes */
- uint64_t os_lockless_truncates; /* by times */
- } od_stats;
-
- /* configuration item(s) */
- int od_contention_time;
- int od_lockless_truncate;
-};
-
-static inline struct osc_device *obd2osc_dev(const struct obd_device *d)
-{
- return container_of0(d->obd_lu_dev, struct osc_device, od_cl.cd_lu_dev);
-}
-
-int osc_dlm_lock_pageref(struct ldlm_lock *dlm);
-
-extern struct kmem_cache *osc_quota_kmem;
-struct osc_quota_info {
- /** linkage for quota hash table */
- struct hlist_node oqi_hash;
- u32 oqi_id;
-};
-int osc_quota_setup(struct obd_device *obd);
-int osc_quota_cleanup(struct obd_device *obd);
-int osc_quota_setdq(struct client_obd *cli, const unsigned int qid[],
- u32 valid, u32 flags);
-int osc_quota_chkdq(struct client_obd *cli, const unsigned int qid[]);
-int osc_quotactl(struct obd_device *unused, struct obd_export *exp,
- struct obd_quotactl *oqctl);
-int osc_quotacheck(struct obd_device *unused, struct obd_export *exp,
- struct obd_quotactl *oqctl);
-int osc_quota_poll_check(struct obd_export *exp, struct if_quotacheck *qchk);
-
-#endif /* OSC_INTERNAL_H */
diff --git a/drivers/staging/lustre/lustre/osc/osc_io.c b/drivers/staging/lustre/lustre/osc/osc_io.c
deleted file mode 100644
index 8d7eab0856a0..000000000000
--- a/drivers/staging/lustre/lustre/osc/osc_io.c
+++ /dev/null
@@ -1,817 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * Implementation of cl_io for OSC layer.
- *
- * Author: Nikita Danilov <nikita.danilov@sun.com>
- * Author: Jinshan Xiong <jinshan.xiong@whamcloud.com>
- */
-
-#define DEBUG_SUBSYSTEM S_OSC
-
-#include "osc_cl_internal.h"
-
-/** \addtogroup osc
- * @{
- */
-
-/*****************************************************************************
- *
- * Type conversions.
- *
- */
-
-static struct osc_req *cl2osc_req(const struct cl_req_slice *slice)
-{
- LINVRNT(slice->crs_dev->cd_lu_dev.ld_type == &osc_device_type);
- return container_of0(slice, struct osc_req, or_cl);
-}
-
-static struct osc_io *cl2osc_io(const struct lu_env *env,
- const struct cl_io_slice *slice)
-{
- struct osc_io *oio = container_of0(slice, struct osc_io, oi_cl);
-
- LINVRNT(oio == osc_env_io(env));
- return oio;
-}
-
-static struct osc_page *osc_cl_page_osc(struct cl_page *page)
-{
- const struct cl_page_slice *slice;
-
- slice = cl_page_at(page, &osc_device_type);
- LASSERT(slice != NULL);
-
- return cl2osc_page(slice);
-}
-
-
-/*****************************************************************************
- *
- * io operations.
- *
- */
-
-static void osc_io_fini(const struct lu_env *env, const struct cl_io_slice *io)
-{
-}
-
-/**
- * An implementation of cl_io_operations::cio_io_submit() method for osc
- * layer. Iterates over pages in the in-queue, prepares each for io by calling
- * cl_page_prep() and then either submits them through osc_io_submit_page()
- * or, if page is already submitted, changes osc flags through
- * osc_set_async_flags().
- */
-static int osc_io_submit(const struct lu_env *env,
- const struct cl_io_slice *ios,
- enum cl_req_type crt, struct cl_2queue *queue)
-{
- struct cl_page *page;
- struct cl_page *tmp;
- struct client_obd *cli = NULL;
- struct osc_object *osc = NULL; /* to keep gcc happy */
- struct osc_page *opg;
- struct cl_io *io;
- LIST_HEAD(list);
-
- struct cl_page_list *qin = &queue->c2_qin;
- struct cl_page_list *qout = &queue->c2_qout;
- int queued = 0;
- int result = 0;
- int cmd;
- int brw_flags;
- int max_pages;
-
- LASSERT(qin->pl_nr > 0);
-
- CDEBUG(D_CACHE, "%d %d\n", qin->pl_nr, crt);
-
- osc = cl2osc(ios->cis_obj);
- cli = osc_cli(osc);
- max_pages = cli->cl_max_pages_per_rpc;
-
- cmd = crt == CRT_WRITE ? OBD_BRW_WRITE : OBD_BRW_READ;
- brw_flags = osc_io_srvlock(cl2osc_io(env, ios)) ? OBD_BRW_SRVLOCK : 0;
-
- /*
- * NOTE: here @page is a top-level page. This is done to avoid
- * creation of sub-page-list.
- */
- cl_page_list_for_each_safe(page, tmp, qin) {
- struct osc_async_page *oap;
-
- /* Top level IO. */
- io = page->cp_owner;
- LASSERT(io != NULL);
-
- opg = osc_cl_page_osc(page);
- oap = &opg->ops_oap;
- LASSERT(osc == oap->oap_obj);
-
- if (!list_empty(&oap->oap_pending_item) ||
- !list_empty(&oap->oap_rpc_item)) {
- CDEBUG(D_CACHE, "Busy oap %p page %p for submit.\n",
- oap, opg);
- result = -EBUSY;
- break;
- }
-
- result = cl_page_prep(env, io, page, crt);
- if (result != 0) {
- LASSERT(result < 0);
- if (result != -EALREADY)
- break;
- /*
- * Handle -EALREADY error: for read case, the page is
- * already in UPTODATE state; for write, the page
- * is not dirty.
- */
- result = 0;
- continue;
- }
-
- cl_page_list_move(qout, qin, page);
- oap->oap_async_flags = ASYNC_URGENT|ASYNC_READY;
- oap->oap_async_flags |= ASYNC_COUNT_STABLE;
-
- osc_page_submit(env, opg, crt, brw_flags);
- list_add_tail(&oap->oap_pending_item, &list);
- if (++queued == max_pages) {
- queued = 0;
- result = osc_queue_sync_pages(env, osc, &list, cmd,
- brw_flags);
- if (result < 0)
- break;
- }
- }
-
- if (queued > 0)
- result = osc_queue_sync_pages(env, osc, &list, cmd, brw_flags);
-
- CDEBUG(D_INFO, "%d/%d %d\n", qin->pl_nr, qout->pl_nr, result);
- return qout->pl_nr > 0 ? 0 : result;
-}
-
-static void osc_page_touch_at(const struct lu_env *env,
- struct cl_object *obj, pgoff_t idx, unsigned to)
-{
- struct lov_oinfo *loi = cl2osc(obj)->oo_oinfo;
- struct cl_attr *attr = &osc_env_info(env)->oti_attr;
- int valid;
- __u64 kms;
-
- /* offset within stripe */
- kms = cl_offset(obj, idx) + to;
-
- cl_object_attr_lock(obj);
- /*
- * XXX old code used
- *
- * ll_inode_size_lock(inode, 0); lov_stripe_lock(lsm);
- *
- * here
- */
- CDEBUG(D_INODE, "stripe KMS %sincreasing %llu->%llu %llu\n",
- kms > loi->loi_kms ? "" : "not ", loi->loi_kms, kms,
- loi->loi_lvb.lvb_size);
-
- valid = 0;
- if (kms > loi->loi_kms) {
- attr->cat_kms = kms;
- valid |= CAT_KMS;
- }
- if (kms > loi->loi_lvb.lvb_size) {
- attr->cat_size = kms;
- valid |= CAT_SIZE;
- }
- cl_object_attr_set(env, obj, attr, valid);
- cl_object_attr_unlock(obj);
-}
-
-/**
- * This is called when a page is accessed within file in a way that creates
- * new page, if one were missing (i.e., if there were a hole at that place in
- * the file, or accessed page is beyond the current file size). Examples:
- * ->commit_write() and ->nopage() methods.
- *
- * Expand stripe KMS if necessary.
- */
-static void osc_page_touch(const struct lu_env *env,
- struct osc_page *opage, unsigned to)
-{
- struct cl_page *page = opage->ops_cl.cpl_page;
- struct cl_object *obj = opage->ops_cl.cpl_obj;
-
- osc_page_touch_at(env, obj, page->cp_index, to);
-}
-
-/**
- * Implements cl_io_operations::cio_prepare_write() method for osc layer.
- *
- * \retval -EIO transfer initiated against this osc will most likely fail
- * \retval 0 transfer initiated against this osc will most likely succeed.
- *
- * The reason for this check is to immediately return an error to the caller
- * in the case of a deactivated import. Note, that import can be deactivated
- * later, while pages, dirtied by this IO, are still in the cache, but this is
- * irrelevant, because that would still return an error to the application (if
- * it does fsync), but many applications don't do fsync because of performance
- * issues, and we wanted to return an -EIO at write time to notify the
- * application.
- */
-static int osc_io_prepare_write(const struct lu_env *env,
- const struct cl_io_slice *ios,
- const struct cl_page_slice *slice,
- unsigned from, unsigned to)
-{
- struct osc_device *dev = lu2osc_dev(slice->cpl_obj->co_lu.lo_dev);
- struct obd_import *imp = class_exp2cliimp(dev->od_exp);
- struct osc_io *oio = cl2osc_io(env, ios);
- int result = 0;
-
- /*
- * This implements OBD_BRW_CHECK logic from old client.
- */
-
- if (imp == NULL || imp->imp_invalid)
- result = -EIO;
- if (result == 0 && oio->oi_lockless)
- /* this page contains `invalid' data, but who cares?
- * nobody can access the invalid data.
- * in osc_io_commit_write(), we're going to write exact
- * [from, to) bytes of this page to OST. -jay */
- cl_page_export(env, slice->cpl_page, 1);
-
- return result;
-}
-
-static int osc_io_commit_write(const struct lu_env *env,
- const struct cl_io_slice *ios,
- const struct cl_page_slice *slice,
- unsigned from, unsigned to)
-{
- struct osc_io *oio = cl2osc_io(env, ios);
- struct osc_page *opg = cl2osc_page(slice);
- struct osc_object *obj = cl2osc(opg->ops_cl.cpl_obj);
- struct osc_async_page *oap = &opg->ops_oap;
-
- LASSERT(to > 0);
- /*
- * XXX instead of calling osc_page_touch() here and in
- * osc_io_fault_start() it might be more logical to introduce
- * cl_page_touch() method, that generic cl_io_commit_write() and page
- * fault code calls.
- */
- osc_page_touch(env, cl2osc_page(slice), to);
- if (!client_is_remote(osc_export(obj)) &&
- capable(CFS_CAP_SYS_RESOURCE))
- oap->oap_brw_flags |= OBD_BRW_NOQUOTA;
-
- if (oio->oi_lockless)
- /* see osc_io_prepare_write() for lockless io handling. */
- cl_page_clip(env, slice->cpl_page, from, to);
-
- return 0;
-}
-
-static int osc_io_fault_start(const struct lu_env *env,
- const struct cl_io_slice *ios)
-{
- struct cl_io *io;
- struct cl_fault_io *fio;
-
- io = ios->cis_io;
- fio = &io->u.ci_fault;
- CDEBUG(D_INFO, "%lu %d %d\n",
- fio->ft_index, fio->ft_writable, fio->ft_nob);
- /*
- * If mapping is writeable, adjust kms to cover this page,
- * but do not extend kms beyond actual file size.
- * See bug 10919.
- */
- if (fio->ft_writable)
- osc_page_touch_at(env, ios->cis_obj,
- fio->ft_index, fio->ft_nob);
- return 0;
-}
-
-static int osc_async_upcall(void *a, int rc)
-{
- struct osc_async_cbargs *args = a;
-
- args->opc_rc = rc;
- complete(&args->opc_sync);
- return 0;
-}
-
-/**
- * Checks that there are no pages being written in the extent being truncated.
- */
-static int trunc_check_cb(const struct lu_env *env, struct cl_io *io,
- struct cl_page *page, void *cbdata)
-{
- const struct cl_page_slice *slice;
- struct osc_page *ops;
- struct osc_async_page *oap;
- __u64 start = *(__u64 *)cbdata;
-
- slice = cl_page_at(page, &osc_device_type);
- LASSERT(slice != NULL);
- ops = cl2osc_page(slice);
- oap = &ops->ops_oap;
-
- if (oap->oap_cmd & OBD_BRW_WRITE &&
- !list_empty(&oap->oap_pending_item))
- CL_PAGE_DEBUG(D_ERROR, env, page, "exists %llu/%s.\n",
- start, current->comm);
-
- {
- struct page *vmpage = cl_page_vmpage(env, page);
-
- if (PageLocked(vmpage))
- CDEBUG(D_CACHE, "page %p index %lu locked for %d.\n",
- ops, page->cp_index,
- (oap->oap_cmd & OBD_BRW_RWMASK));
- }
-
- return CLP_GANG_OKAY;
-}
-
-static void osc_trunc_check(const struct lu_env *env, struct cl_io *io,
- struct osc_io *oio, __u64 size)
-{
- struct cl_object *clob;
- int partial;
- pgoff_t start;
-
- clob = oio->oi_cl.cis_obj;
- start = cl_index(clob, size);
- partial = cl_offset(clob, start) < size;
-
- /*
- * Complain if there are pages in the truncated region.
- */
- cl_page_gang_lookup(env, clob, io, start + partial, CL_PAGE_EOF,
- trunc_check_cb, (void *)&size);
-}
-
-static int osc_io_setattr_start(const struct lu_env *env,
- const struct cl_io_slice *slice)
-{
- struct cl_io *io = slice->cis_io;
- struct osc_io *oio = cl2osc_io(env, slice);
- struct cl_object *obj = slice->cis_obj;
- struct lov_oinfo *loi = cl2osc(obj)->oo_oinfo;
- struct cl_attr *attr = &osc_env_info(env)->oti_attr;
- struct obdo *oa = &oio->oi_oa;
- struct osc_async_cbargs *cbargs = &oio->oi_cbarg;
- __u64 size = io->u.ci_setattr.sa_attr.lvb_size;
- unsigned int ia_valid = io->u.ci_setattr.sa_valid;
- int result = 0;
- struct obd_info oinfo = { };
-
- /* truncate cache dirty pages first */
- if (cl_io_is_trunc(io))
- result = osc_cache_truncate_start(env, oio, cl2osc(obj), size);
-
- if (result == 0 && oio->oi_lockless == 0) {
- cl_object_attr_lock(obj);
- result = cl_object_attr_get(env, obj, attr);
- if (result == 0) {
- struct ost_lvb *lvb = &io->u.ci_setattr.sa_attr;
- unsigned int cl_valid = 0;
-
- if (ia_valid & ATTR_SIZE) {
- attr->cat_size = attr->cat_kms = size;
- cl_valid = CAT_SIZE | CAT_KMS;
- }
- if (ia_valid & ATTR_MTIME_SET) {
- attr->cat_mtime = lvb->lvb_mtime;
- cl_valid |= CAT_MTIME;
- }
- if (ia_valid & ATTR_ATIME_SET) {
- attr->cat_atime = lvb->lvb_atime;
- cl_valid |= CAT_ATIME;
- }
- if (ia_valid & ATTR_CTIME_SET) {
- attr->cat_ctime = lvb->lvb_ctime;
- cl_valid |= CAT_CTIME;
- }
- result = cl_object_attr_set(env, obj, attr, cl_valid);
- }
- cl_object_attr_unlock(obj);
- }
- memset(oa, 0, sizeof(*oa));
- if (result == 0) {
- oa->o_oi = loi->loi_oi;
- oa->o_mtime = attr->cat_mtime;
- oa->o_atime = attr->cat_atime;
- oa->o_ctime = attr->cat_ctime;
- oa->o_valid = OBD_MD_FLID | OBD_MD_FLGROUP | OBD_MD_FLATIME |
- OBD_MD_FLCTIME | OBD_MD_FLMTIME;
- if (ia_valid & ATTR_SIZE) {
- oa->o_size = size;
- oa->o_blocks = OBD_OBJECT_EOF;
- oa->o_valid |= OBD_MD_FLSIZE | OBD_MD_FLBLOCKS;
-
- if (oio->oi_lockless) {
- oa->o_flags = OBD_FL_SRVLOCK;
- oa->o_valid |= OBD_MD_FLFLAGS;
- }
- } else {
- LASSERT(oio->oi_lockless == 0);
- }
-
- oinfo.oi_oa = oa;
- init_completion(&cbargs->opc_sync);
-
- if (ia_valid & ATTR_SIZE)
- result = osc_punch_base(osc_export(cl2osc(obj)),
- &oinfo, osc_async_upcall,
- cbargs, PTLRPCD_SET);
- else
- result = osc_setattr_async_base(osc_export(cl2osc(obj)),
- &oinfo, NULL,
- osc_async_upcall,
- cbargs, PTLRPCD_SET);
- cbargs->opc_rpc_sent = result == 0;
- }
- return result;
-}
-
-static void osc_io_setattr_end(const struct lu_env *env,
- const struct cl_io_slice *slice)
-{
- struct cl_io *io = slice->cis_io;
- struct osc_io *oio = cl2osc_io(env, slice);
- struct cl_object *obj = slice->cis_obj;
- struct osc_async_cbargs *cbargs = &oio->oi_cbarg;
- int result = 0;
-
- if (cbargs->opc_rpc_sent) {
- wait_for_completion(&cbargs->opc_sync);
- result = io->ci_result = cbargs->opc_rc;
- }
- if (result == 0) {
- if (oio->oi_lockless) {
- /* lockless truncate */
- struct osc_device *osd = lu2osc_dev(obj->co_lu.lo_dev);
-
- LASSERT(cl_io_is_trunc(io));
- /* XXX: Need a lock. */
- osd->od_stats.os_lockless_truncates++;
- }
- }
-
- if (cl_io_is_trunc(io)) {
- __u64 size = io->u.ci_setattr.sa_attr.lvb_size;
-
- osc_trunc_check(env, io, oio, size);
- if (oio->oi_trunc != NULL) {
- osc_cache_truncate_end(env, oio, cl2osc(obj));
- oio->oi_trunc = NULL;
- }
- }
-}
-
-static int osc_io_read_start(const struct lu_env *env,
- const struct cl_io_slice *slice)
-{
- struct cl_object *obj = slice->cis_obj;
- struct cl_attr *attr = &osc_env_info(env)->oti_attr;
- int rc = 0;
-
- if (!slice->cis_io->ci_noatime) {
- cl_object_attr_lock(obj);
- attr->cat_atime = ktime_get_real_seconds();
- rc = cl_object_attr_set(env, obj, attr, CAT_ATIME);
- cl_object_attr_unlock(obj);
- }
- return rc;
-}
-
-static int osc_io_write_start(const struct lu_env *env,
- const struct cl_io_slice *slice)
-{
- struct cl_object *obj = slice->cis_obj;
- struct cl_attr *attr = &osc_env_info(env)->oti_attr;
- int rc = 0;
-
- OBD_FAIL_TIMEOUT(OBD_FAIL_OSC_DELAY_SETTIME, 1);
- cl_object_attr_lock(obj);
- attr->cat_mtime = attr->cat_ctime = ktime_get_real_seconds();
- rc = cl_object_attr_set(env, obj, attr, CAT_MTIME | CAT_CTIME);
- cl_object_attr_unlock(obj);
-
- return rc;
-}
-
-static int osc_fsync_ost(const struct lu_env *env, struct osc_object *obj,
- struct cl_fsync_io *fio)
-{
- struct osc_io *oio = osc_env_io(env);
- struct obdo *oa = &oio->oi_oa;
- struct obd_info *oinfo = &oio->oi_info;
- struct lov_oinfo *loi = obj->oo_oinfo;
- struct osc_async_cbargs *cbargs = &oio->oi_cbarg;
- int rc = 0;
-
- memset(oa, 0, sizeof(*oa));
- oa->o_oi = loi->loi_oi;
- oa->o_valid = OBD_MD_FLID | OBD_MD_FLGROUP;
-
- /* reload size abd blocks for start and end of sync range */
- oa->o_size = fio->fi_start;
- oa->o_blocks = fio->fi_end;
- oa->o_valid |= OBD_MD_FLSIZE | OBD_MD_FLBLOCKS;
-
- obdo_set_parent_fid(oa, fio->fi_fid);
-
- memset(oinfo, 0, sizeof(*oinfo));
- oinfo->oi_oa = oa;
- init_completion(&cbargs->opc_sync);
-
- rc = osc_sync_base(osc_export(obj), oinfo, osc_async_upcall, cbargs,
- PTLRPCD_SET);
- return rc;
-}
-
-static int osc_io_fsync_start(const struct lu_env *env,
- const struct cl_io_slice *slice)
-{
- struct cl_io *io = slice->cis_io;
- struct cl_fsync_io *fio = &io->u.ci_fsync;
- struct cl_object *obj = slice->cis_obj;
- struct osc_object *osc = cl2osc(obj);
- pgoff_t start = cl_index(obj, fio->fi_start);
- pgoff_t end = cl_index(obj, fio->fi_end);
- int result = 0;
-
- if (fio->fi_end == OBD_OBJECT_EOF)
- end = CL_PAGE_EOF;
-
- result = osc_cache_writeback_range(env, osc, start, end, 0,
- fio->fi_mode == CL_FSYNC_DISCARD);
- if (result > 0) {
- fio->fi_nr_written += result;
- result = 0;
- }
- if (fio->fi_mode == CL_FSYNC_ALL) {
- int rc;
-
- /* we have to wait for writeback to finish before we can
- * send OST_SYNC RPC. This is bad because it causes extents
- * to be written osc by osc. However, we usually start
- * writeback before CL_FSYNC_ALL so this won't have any real
- * problem. */
- rc = osc_cache_wait_range(env, osc, start, end);
- if (result == 0)
- result = rc;
- rc = osc_fsync_ost(env, osc, fio);
- if (result == 0)
- result = rc;
- }
-
- return result;
-}
-
-static void osc_io_fsync_end(const struct lu_env *env,
- const struct cl_io_slice *slice)
-{
- struct cl_fsync_io *fio = &slice->cis_io->u.ci_fsync;
- struct cl_object *obj = slice->cis_obj;
- pgoff_t start = cl_index(obj, fio->fi_start);
- pgoff_t end = cl_index(obj, fio->fi_end);
- int result = 0;
-
- if (fio->fi_mode == CL_FSYNC_LOCAL) {
- result = osc_cache_wait_range(env, cl2osc(obj), start, end);
- } else if (fio->fi_mode == CL_FSYNC_ALL) {
- struct osc_io *oio = cl2osc_io(env, slice);
- struct osc_async_cbargs *cbargs = &oio->oi_cbarg;
-
- wait_for_completion(&cbargs->opc_sync);
- if (result == 0)
- result = cbargs->opc_rc;
- }
- slice->cis_io->ci_result = result;
-}
-
-static void osc_io_end(const struct lu_env *env,
- const struct cl_io_slice *slice)
-{
- struct osc_io *oio = cl2osc_io(env, slice);
-
- if (oio->oi_active) {
- osc_extent_release(env, oio->oi_active);
- oio->oi_active = NULL;
- }
-}
-
-static const struct cl_io_operations osc_io_ops = {
- .op = {
- [CIT_READ] = {
- .cio_start = osc_io_read_start,
- .cio_fini = osc_io_fini
- },
- [CIT_WRITE] = {
- .cio_start = osc_io_write_start,
- .cio_end = osc_io_end,
- .cio_fini = osc_io_fini
- },
- [CIT_SETATTR] = {
- .cio_start = osc_io_setattr_start,
- .cio_end = osc_io_setattr_end
- },
- [CIT_FAULT] = {
- .cio_start = osc_io_fault_start,
- .cio_end = osc_io_end,
- .cio_fini = osc_io_fini
- },
- [CIT_FSYNC] = {
- .cio_start = osc_io_fsync_start,
- .cio_end = osc_io_fsync_end,
- .cio_fini = osc_io_fini
- },
- [CIT_MISC] = {
- .cio_fini = osc_io_fini
- }
- },
- .req_op = {
- [CRT_READ] = {
- .cio_submit = osc_io_submit
- },
- [CRT_WRITE] = {
- .cio_submit = osc_io_submit
- }
- },
- .cio_prepare_write = osc_io_prepare_write,
- .cio_commit_write = osc_io_commit_write
-};
-
-/*****************************************************************************
- *
- * Transfer operations.
- *
- */
-
-static int osc_req_prep(const struct lu_env *env,
- const struct cl_req_slice *slice)
-{
- return 0;
-}
-
-static void osc_req_completion(const struct lu_env *env,
- const struct cl_req_slice *slice, int ioret)
-{
- struct osc_req *or;
-
- or = cl2osc_req(slice);
- OBD_SLAB_FREE_PTR(or, osc_req_kmem);
-}
-
-/**
- * Implementation of struct cl_req_operations::cro_attr_set() for osc
- * layer. osc is responsible for struct obdo::o_id and struct obdo::o_seq
- * fields.
- */
-static void osc_req_attr_set(const struct lu_env *env,
- const struct cl_req_slice *slice,
- const struct cl_object *obj,
- struct cl_req_attr *attr, u64 flags)
-{
- struct lov_oinfo *oinfo;
- struct cl_req *clerq;
- struct cl_page *apage; /* _some_ page in @clerq */
- struct cl_lock *lock; /* _some_ lock protecting @apage */
- struct osc_lock *olck;
- struct osc_page *opg;
- struct obdo *oa;
- struct ost_lvb *lvb;
-
- oinfo = cl2osc(obj)->oo_oinfo;
- lvb = &oinfo->loi_lvb;
- oa = attr->cra_oa;
-
- if ((flags & OBD_MD_FLMTIME) != 0) {
- oa->o_mtime = lvb->lvb_mtime;
- oa->o_valid |= OBD_MD_FLMTIME;
- }
- if ((flags & OBD_MD_FLATIME) != 0) {
- oa->o_atime = lvb->lvb_atime;
- oa->o_valid |= OBD_MD_FLATIME;
- }
- if ((flags & OBD_MD_FLCTIME) != 0) {
- oa->o_ctime = lvb->lvb_ctime;
- oa->o_valid |= OBD_MD_FLCTIME;
- }
- if (flags & OBD_MD_FLGROUP) {
- ostid_set_seq(&oa->o_oi, ostid_seq(&oinfo->loi_oi));
- oa->o_valid |= OBD_MD_FLGROUP;
- }
- if (flags & OBD_MD_FLID) {
- ostid_set_id(&oa->o_oi, ostid_id(&oinfo->loi_oi));
- oa->o_valid |= OBD_MD_FLID;
- }
- if (flags & OBD_MD_FLHANDLE) {
- clerq = slice->crs_req;
- LASSERT(!list_empty(&clerq->crq_pages));
- apage = container_of(clerq->crq_pages.next,
- struct cl_page, cp_flight);
- opg = osc_cl_page_osc(apage);
- apage = opg->ops_cl.cpl_page; /* now apage is a sub-page */
- lock = cl_lock_at_page(env, apage->cp_obj, apage, NULL, 1, 1);
- if (lock == NULL) {
- struct cl_object_header *head;
- struct cl_lock *scan;
-
- head = cl_object_header(apage->cp_obj);
- list_for_each_entry(scan, &head->coh_locks,
- cll_linkage)
- CL_LOCK_DEBUG(D_ERROR, env, scan,
- "no cover page!\n");
- CL_PAGE_DEBUG(D_ERROR, env, apage,
- "dump uncover page!\n");
- dump_stack();
- LBUG();
- }
-
- olck = osc_lock_at(lock);
- LASSERT(olck != NULL);
- LASSERT(ergo(opg->ops_srvlock, olck->ols_lock == NULL));
- /* check for lockless io. */
- if (olck->ols_lock != NULL) {
- oa->o_handle = olck->ols_lock->l_remote_handle;
- oa->o_valid |= OBD_MD_FLHANDLE;
- }
- cl_lock_put(env, lock);
- }
-}
-
-static const struct cl_req_operations osc_req_ops = {
- .cro_prep = osc_req_prep,
- .cro_attr_set = osc_req_attr_set,
- .cro_completion = osc_req_completion
-};
-
-
-int osc_io_init(const struct lu_env *env,
- struct cl_object *obj, struct cl_io *io)
-{
- struct osc_io *oio = osc_env_io(env);
-
- CL_IO_SLICE_CLEAN(oio, oi_cl);
- cl_io_slice_add(io, &oio->oi_cl, obj, &osc_io_ops);
- return 0;
-}
-
-int osc_req_init(const struct lu_env *env, struct cl_device *dev,
- struct cl_req *req)
-{
- struct osc_req *or;
- int result;
-
- OBD_SLAB_ALLOC_PTR_GFP(or, osc_req_kmem, GFP_NOFS);
- if (or != NULL) {
- cl_req_slice_add(req, &or->or_cl, dev, &osc_req_ops);
- result = 0;
- } else
- result = -ENOMEM;
- return result;
-}
-
-/** @} osc */
diff --git a/drivers/staging/lustre/lustre/osc/osc_lock.c b/drivers/staging/lustre/lustre/osc/osc_lock.c
deleted file mode 100644
index 70b1b43f692b..000000000000
--- a/drivers/staging/lustre/lustre/osc/osc_lock.c
+++ /dev/null
@@ -1,1612 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * Implementation of cl_lock for OSC layer.
- *
- * Author: Nikita Danilov <nikita.danilov@sun.com>
- */
-
-#define DEBUG_SUBSYSTEM S_OSC
-
-#include "../../include/linux/libcfs/libcfs.h"
-/* fid_build_reg_res_name() */
-#include "../include/lustre_fid.h"
-
-#include "osc_cl_internal.h"
-
-/** \addtogroup osc
- * @{
- */
-
-#define _PAGEREF_MAGIC (-10000000)
-
-/*****************************************************************************
- *
- * Type conversions.
- *
- */
-
-static const struct cl_lock_operations osc_lock_ops;
-static const struct cl_lock_operations osc_lock_lockless_ops;
-static void osc_lock_to_lockless(const struct lu_env *env,
- struct osc_lock *ols, int force);
-static int osc_lock_has_pages(struct osc_lock *olck);
-
-int osc_lock_is_lockless(const struct osc_lock *olck)
-{
- return (olck->ols_cl.cls_ops == &osc_lock_lockless_ops);
-}
-
-/**
- * Returns a weak pointer to the ldlm lock identified by a handle. Returned
- * pointer cannot be dereferenced, as lock is not protected from concurrent
- * reclaim. This function is a helper for osc_lock_invariant().
- */
-static struct ldlm_lock *osc_handle_ptr(struct lustre_handle *handle)
-{
- struct ldlm_lock *lock;
-
- lock = ldlm_handle2lock(handle);
- if (lock != NULL)
- LDLM_LOCK_PUT(lock);
- return lock;
-}
-
-/**
- * Invariant that has to be true all of the time.
- */
-static int osc_lock_invariant(struct osc_lock *ols)
-{
- struct ldlm_lock *lock = osc_handle_ptr(&ols->ols_handle);
- struct ldlm_lock *olock = ols->ols_lock;
- int handle_used = lustre_handle_is_used(&ols->ols_handle);
-
- if (ergo(osc_lock_is_lockless(ols),
- ols->ols_locklessable && ols->ols_lock == NULL))
- return 1;
-
- /*
- * If all the following "ergo"s are true, return 1, otherwise 0
- */
- if (!ergo(olock != NULL, handle_used))
- return 0;
-
- if (!ergo(olock != NULL,
- olock->l_handle.h_cookie == ols->ols_handle.cookie))
- return 0;
-
- if (!ergo(handle_used,
- ergo(lock != NULL && olock != NULL, lock == olock) &&
- ergo(lock == NULL, olock == NULL)))
- return 0;
- /*
- * Check that ->ols_handle and ->ols_lock are consistent, but
- * take into account that they are set at the different time.
- */
- if (!ergo(ols->ols_state == OLS_CANCELLED,
- olock == NULL && !handle_used))
- return 0;
- /*
- * DLM lock is destroyed only after we have seen cancellation
- * ast.
- */
- if (!ergo(olock != NULL && ols->ols_state < OLS_CANCELLED,
- ((olock->l_flags & LDLM_FL_DESTROYED) == 0)))
- return 0;
-
- if (!ergo(ols->ols_state == OLS_GRANTED,
- olock != NULL &&
- olock->l_req_mode == olock->l_granted_mode &&
- ols->ols_hold))
- return 0;
- return 1;
-}
-
-/*****************************************************************************
- *
- * Lock operations.
- *
- */
-
-/**
- * Breaks a link between osc_lock and dlm_lock.
- */
-static void osc_lock_detach(const struct lu_env *env, struct osc_lock *olck)
-{
- struct ldlm_lock *dlmlock;
-
- spin_lock(&osc_ast_guard);
- dlmlock = olck->ols_lock;
- if (dlmlock == NULL) {
- spin_unlock(&osc_ast_guard);
- return;
- }
-
- olck->ols_lock = NULL;
- /* wb(); --- for all who checks (ols->ols_lock != NULL) before
- * call to osc_lock_detach() */
- dlmlock->l_ast_data = NULL;
- olck->ols_handle.cookie = 0ULL;
- spin_unlock(&osc_ast_guard);
-
- lock_res_and_lock(dlmlock);
- if (dlmlock->l_granted_mode == dlmlock->l_req_mode) {
- struct cl_object *obj = olck->ols_cl.cls_obj;
- struct cl_attr *attr = &osc_env_info(env)->oti_attr;
- __u64 old_kms;
-
- cl_object_attr_lock(obj);
- /* Must get the value under the lock to avoid possible races. */
- old_kms = cl2osc(obj)->oo_oinfo->loi_kms;
- /* Update the kms. Need to loop all granted locks.
- * Not a problem for the client */
- attr->cat_kms = ldlm_extent_shift_kms(dlmlock, old_kms);
-
- cl_object_attr_set(env, obj, attr, CAT_KMS);
- cl_object_attr_unlock(obj);
- }
- unlock_res_and_lock(dlmlock);
-
- /* release a reference taken in osc_lock_upcall0(). */
- LASSERT(olck->ols_has_ref);
- lu_ref_del(&dlmlock->l_reference, "osc_lock", olck);
- LDLM_LOCK_RELEASE(dlmlock);
- olck->ols_has_ref = 0;
-}
-
-static int osc_lock_unhold(struct osc_lock *ols)
-{
- int result = 0;
-
- if (ols->ols_hold) {
- ols->ols_hold = 0;
- result = osc_cancel_base(&ols->ols_handle,
- ols->ols_einfo.ei_mode);
- }
- return result;
-}
-
-static int osc_lock_unuse(const struct lu_env *env,
- const struct cl_lock_slice *slice)
-{
- struct osc_lock *ols = cl2osc_lock(slice);
-
- LINVRNT(osc_lock_invariant(ols));
-
- switch (ols->ols_state) {
- case OLS_NEW:
- LASSERT(!ols->ols_hold);
- LASSERT(ols->ols_agl);
- return 0;
- case OLS_UPCALL_RECEIVED:
- osc_lock_unhold(ols);
- case OLS_ENQUEUED:
- LASSERT(!ols->ols_hold);
- osc_lock_detach(env, ols);
- ols->ols_state = OLS_NEW;
- return 0;
- case OLS_GRANTED:
- LASSERT(!ols->ols_glimpse);
- LASSERT(ols->ols_hold);
- /*
- * Move lock into OLS_RELEASED state before calling
- * osc_cancel_base() so that possible synchronous cancellation
- * (that always happens e.g., for liblustre) sees that lock is
- * released.
- */
- ols->ols_state = OLS_RELEASED;
- return osc_lock_unhold(ols);
- default:
- CERROR("Impossible state: %d\n", ols->ols_state);
- LBUG();
- }
-}
-
-static void osc_lock_fini(const struct lu_env *env,
- struct cl_lock_slice *slice)
-{
- struct osc_lock *ols = cl2osc_lock(slice);
-
- LINVRNT(osc_lock_invariant(ols));
- /*
- * ->ols_hold can still be true at this point if, for example, a
- * thread that requested a lock was killed (and released a reference
- * to the lock), before reply from a server was received. In this case
- * lock is destroyed immediately after upcall.
- */
- osc_lock_unhold(ols);
- LASSERT(ols->ols_lock == NULL);
- LASSERT(atomic_read(&ols->ols_pageref) == 0 ||
- atomic_read(&ols->ols_pageref) == _PAGEREF_MAGIC);
-
- OBD_SLAB_FREE_PTR(ols, osc_lock_kmem);
-}
-
-static void osc_lock_build_policy(const struct lu_env *env,
- const struct cl_lock *lock,
- ldlm_policy_data_t *policy)
-{
- const struct cl_lock_descr *d = &lock->cll_descr;
-
- osc_index2policy(policy, d->cld_obj, d->cld_start, d->cld_end);
- policy->l_extent.gid = d->cld_gid;
-}
-
-static __u64 osc_enq2ldlm_flags(__u32 enqflags)
-{
- __u64 result = 0;
-
- LASSERT((enqflags & ~CEF_MASK) == 0);
-
- if (enqflags & CEF_NONBLOCK)
- result |= LDLM_FL_BLOCK_NOWAIT;
- if (enqflags & CEF_ASYNC)
- result |= LDLM_FL_HAS_INTENT;
- if (enqflags & CEF_DISCARD_DATA)
- result |= LDLM_FL_AST_DISCARD_DATA;
- return result;
-}
-
-/**
- * Global spin-lock protecting consistency of ldlm_lock::l_ast_data
- * pointers. Initialized in osc_init().
- */
-spinlock_t osc_ast_guard;
-
-static struct osc_lock *osc_ast_data_get(struct ldlm_lock *dlm_lock)
-{
- struct osc_lock *olck;
-
- lock_res_and_lock(dlm_lock);
- spin_lock(&osc_ast_guard);
- olck = dlm_lock->l_ast_data;
- if (olck != NULL) {
- struct cl_lock *lock = olck->ols_cl.cls_lock;
- /*
- * If osc_lock holds a reference on ldlm lock, return it even
- * when cl_lock is in CLS_FREEING state. This way
- *
- * osc_ast_data_get(dlmlock) == NULL
- *
- * guarantees that all osc references on dlmlock were
- * released. osc_dlm_blocking_ast0() relies on that.
- */
- if (lock->cll_state < CLS_FREEING || olck->ols_has_ref) {
- cl_lock_get_trust(lock);
- lu_ref_add_atomic(&lock->cll_reference,
- "ast", current);
- } else
- olck = NULL;
- }
- spin_unlock(&osc_ast_guard);
- unlock_res_and_lock(dlm_lock);
- return olck;
-}
-
-static void osc_ast_data_put(const struct lu_env *env, struct osc_lock *olck)
-{
- struct cl_lock *lock;
-
- lock = olck->ols_cl.cls_lock;
- lu_ref_del(&lock->cll_reference, "ast", current);
- cl_lock_put(env, lock);
-}
-
-/**
- * Updates object attributes from a lock value block (lvb) received together
- * with the DLM lock reply from the server. Copy of osc_update_enqueue()
- * logic.
- *
- * This can be optimized to not update attributes when lock is a result of a
- * local match.
- *
- * Called under lock and resource spin-locks.
- */
-static void osc_lock_lvb_update(const struct lu_env *env, struct osc_lock *olck,
- int rc)
-{
- struct ost_lvb *lvb;
- struct cl_object *obj;
- struct lov_oinfo *oinfo;
- struct cl_attr *attr;
- unsigned valid;
-
- if (!(olck->ols_flags & LDLM_FL_LVB_READY))
- return;
-
- lvb = &olck->ols_lvb;
- obj = olck->ols_cl.cls_obj;
- oinfo = cl2osc(obj)->oo_oinfo;
- attr = &osc_env_info(env)->oti_attr;
- valid = CAT_BLOCKS | CAT_ATIME | CAT_CTIME | CAT_MTIME | CAT_SIZE;
- cl_lvb2attr(attr, lvb);
-
- cl_object_attr_lock(obj);
- if (rc == 0) {
- struct ldlm_lock *dlmlock;
- __u64 size;
-
- dlmlock = olck->ols_lock;
- LASSERT(dlmlock != NULL);
-
- /* re-grab LVB from a dlm lock under DLM spin-locks. */
- *lvb = *(struct ost_lvb *)dlmlock->l_lvb_data;
- size = lvb->lvb_size;
- /* Extend KMS up to the end of this lock and no further
- * A lock on [x,y] means a KMS of up to y + 1 bytes! */
- if (size > dlmlock->l_policy_data.l_extent.end)
- size = dlmlock->l_policy_data.l_extent.end + 1;
- if (size >= oinfo->loi_kms) {
- LDLM_DEBUG(dlmlock, "lock acquired, setting rss=%llu, kms=%llu",
- lvb->lvb_size, size);
- valid |= CAT_KMS;
- attr->cat_kms = size;
- } else {
- LDLM_DEBUG(dlmlock, "lock acquired, setting rss=%llu; leaving kms=%llu, end=%llu",
- lvb->lvb_size, oinfo->loi_kms,
- dlmlock->l_policy_data.l_extent.end);
- }
- ldlm_lock_allow_match_locked(dlmlock);
- } else if (rc == -ENAVAIL && olck->ols_glimpse) {
- CDEBUG(D_INODE, "glimpsed, setting rss=%llu; leaving kms=%llu\n",
- lvb->lvb_size, oinfo->loi_kms);
- } else
- valid = 0;
-
- if (valid != 0)
- cl_object_attr_set(env, obj, attr, valid);
-
- cl_object_attr_unlock(obj);
-}
-
-/**
- * Called when a lock is granted, from an upcall (when server returned a
- * granted lock), or from completion AST, when server returned a blocked lock.
- *
- * Called under lock and resource spin-locks, that are released temporarily
- * here.
- */
-static void osc_lock_granted(const struct lu_env *env, struct osc_lock *olck,
- struct ldlm_lock *dlmlock, int rc)
-{
- struct ldlm_extent *ext;
- struct cl_lock *lock;
- struct cl_lock_descr *descr;
-
- LASSERT(dlmlock->l_granted_mode == dlmlock->l_req_mode);
-
- if (olck->ols_state < OLS_GRANTED) {
- lock = olck->ols_cl.cls_lock;
- ext = &dlmlock->l_policy_data.l_extent;
- descr = &osc_env_info(env)->oti_descr;
- descr->cld_obj = lock->cll_descr.cld_obj;
-
- /* XXX check that ->l_granted_mode is valid. */
- descr->cld_mode = osc_ldlm2cl_lock(dlmlock->l_granted_mode);
- descr->cld_start = cl_index(descr->cld_obj, ext->start);
- descr->cld_end = cl_index(descr->cld_obj, ext->end);
- descr->cld_gid = ext->gid;
- /*
- * tell upper layers the extent of the lock that was actually
- * granted
- */
- olck->ols_state = OLS_GRANTED;
- osc_lock_lvb_update(env, olck, rc);
-
- /* release DLM spin-locks to allow cl_lock_{modify,signal}()
- * to take a semaphore on a parent lock. This is safe, because
- * spin-locks are needed to protect consistency of
- * dlmlock->l_*_mode and LVB, and we have finished processing
- * them. */
- unlock_res_and_lock(dlmlock);
- cl_lock_modify(env, lock, descr);
- cl_lock_signal(env, lock);
- LINVRNT(osc_lock_invariant(olck));
- lock_res_and_lock(dlmlock);
- }
-}
-
-static void osc_lock_upcall0(const struct lu_env *env, struct osc_lock *olck)
-
-{
- struct ldlm_lock *dlmlock;
-
- dlmlock = ldlm_handle2lock_long(&olck->ols_handle, 0);
- LASSERT(dlmlock != NULL);
-
- lock_res_and_lock(dlmlock);
- spin_lock(&osc_ast_guard);
- LASSERT(dlmlock->l_ast_data == olck);
- LASSERT(olck->ols_lock == NULL);
- olck->ols_lock = dlmlock;
- spin_unlock(&osc_ast_guard);
-
- /*
- * Lock might be not yet granted. In this case, completion ast
- * (osc_ldlm_completion_ast()) comes later and finishes lock
- * granting.
- */
- if (dlmlock->l_granted_mode == dlmlock->l_req_mode)
- osc_lock_granted(env, olck, dlmlock, 0);
- unlock_res_and_lock(dlmlock);
-
- /*
- * osc_enqueue_interpret() decrefs asynchronous locks, counter
- * this.
- */
- ldlm_lock_addref(&olck->ols_handle, olck->ols_einfo.ei_mode);
- olck->ols_hold = 1;
-
- /* lock reference taken by ldlm_handle2lock_long() is owned by
- * osc_lock and released in osc_lock_detach() */
- lu_ref_add(&dlmlock->l_reference, "osc_lock", olck);
- olck->ols_has_ref = 1;
-}
-
-/**
- * Lock upcall function that is executed either when a reply to ENQUEUE rpc is
- * received from a server, or after osc_enqueue_base() matched a local DLM
- * lock.
- */
-static int osc_lock_upcall(void *cookie, int errcode)
-{
- struct osc_lock *olck = cookie;
- struct cl_lock_slice *slice = &olck->ols_cl;
- struct cl_lock *lock = slice->cls_lock;
- struct lu_env *env;
- struct cl_env_nest nest;
-
- env = cl_env_nested_get(&nest);
- if (!IS_ERR(env)) {
- int rc;
-
- cl_lock_mutex_get(env, lock);
-
- LASSERT(lock->cll_state >= CLS_QUEUING);
- if (olck->ols_state == OLS_ENQUEUED) {
- olck->ols_state = OLS_UPCALL_RECEIVED;
- rc = ldlm_error2errno(errcode);
- } else if (olck->ols_state == OLS_CANCELLED) {
- rc = -EIO;
- } else {
- CERROR("Impossible state: %d\n", olck->ols_state);
- LBUG();
- }
- if (rc) {
- struct ldlm_lock *dlmlock;
-
- dlmlock = ldlm_handle2lock(&olck->ols_handle);
- if (dlmlock != NULL) {
- lock_res_and_lock(dlmlock);
- spin_lock(&osc_ast_guard);
- LASSERT(olck->ols_lock == NULL);
- dlmlock->l_ast_data = NULL;
- olck->ols_handle.cookie = 0ULL;
- spin_unlock(&osc_ast_guard);
- ldlm_lock_fail_match_locked(dlmlock);
- unlock_res_and_lock(dlmlock);
- LDLM_LOCK_PUT(dlmlock);
- }
- } else {
- if (olck->ols_glimpse)
- olck->ols_glimpse = 0;
- osc_lock_upcall0(env, olck);
- }
-
- /* Error handling, some errors are tolerable. */
- if (olck->ols_locklessable && rc == -EUSERS) {
- /* This is a tolerable error, turn this lock into
- * lockless lock.
- */
- osc_object_set_contended(cl2osc(slice->cls_obj));
- LASSERT(slice->cls_ops == &osc_lock_ops);
-
- /* Change this lock to ldlmlock-less lock. */
- osc_lock_to_lockless(env, olck, 1);
- olck->ols_state = OLS_GRANTED;
- rc = 0;
- } else if (olck->ols_glimpse && rc == -ENAVAIL) {
- osc_lock_lvb_update(env, olck, rc);
- cl_lock_delete(env, lock);
- /* Hide the error. */
- rc = 0;
- }
-
- if (rc == 0) {
- /* For AGL case, the RPC sponsor may exits the cl_lock
- * processing without wait() called before related OSC
- * lock upcall(). So update the lock status according
- * to the enqueue result inside AGL upcall(). */
- if (olck->ols_agl) {
- lock->cll_flags |= CLF_FROM_UPCALL;
- cl_wait_try(env, lock);
- lock->cll_flags &= ~CLF_FROM_UPCALL;
- if (!olck->ols_glimpse)
- olck->ols_agl = 0;
- }
- cl_lock_signal(env, lock);
- /* del user for lock upcall cookie */
- cl_unuse_try(env, lock);
- } else {
- /* del user for lock upcall cookie */
- cl_lock_user_del(env, lock);
- cl_lock_error(env, lock, rc);
- }
-
- /* release cookie reference, acquired by osc_lock_enqueue() */
- cl_lock_hold_release(env, lock, "upcall", lock);
- cl_lock_mutex_put(env, lock);
-
- lu_ref_del(&lock->cll_reference, "upcall", lock);
- /* This maybe the last reference, so must be called after
- * cl_lock_mutex_put(). */
- cl_lock_put(env, lock);
-
- cl_env_nested_put(&nest, env);
- } else {
- /* should never happen, similar to osc_ldlm_blocking_ast(). */
- LBUG();
- }
- return errcode;
-}
-
-/**
- * Core of osc_dlm_blocking_ast() logic.
- */
-static void osc_lock_blocking(const struct lu_env *env,
- struct ldlm_lock *dlmlock,
- struct osc_lock *olck, int blocking)
-{
- struct cl_lock *lock = olck->ols_cl.cls_lock;
-
- LASSERT(olck->ols_lock == dlmlock);
- CLASSERT(OLS_BLOCKED < OLS_CANCELLED);
- LASSERT(!osc_lock_is_lockless(olck));
-
- /*
- * Lock might be still addref-ed here, if e.g., blocking ast
- * is sent for a failed lock.
- */
- osc_lock_unhold(olck);
-
- if (blocking && olck->ols_state < OLS_BLOCKED)
- /*
- * Move osc_lock into OLS_BLOCKED before canceling the lock,
- * because it recursively re-enters osc_lock_blocking(), with
- * the state set to OLS_CANCELLED.
- */
- olck->ols_state = OLS_BLOCKED;
- /*
- * cancel and destroy lock at least once no matter how blocking ast is
- * entered (see comment above osc_ldlm_blocking_ast() for use
- * cases). cl_lock_cancel() and cl_lock_delete() are idempotent.
- */
- cl_lock_cancel(env, lock);
- cl_lock_delete(env, lock);
-}
-
-/**
- * Helper for osc_dlm_blocking_ast() handling discrepancies between cl_lock
- * and ldlm_lock caches.
- */
-static int osc_dlm_blocking_ast0(const struct lu_env *env,
- struct ldlm_lock *dlmlock,
- void *data, int flag)
-{
- struct osc_lock *olck;
- struct cl_lock *lock;
- int result;
- int cancel;
-
- LASSERT(flag == LDLM_CB_BLOCKING || flag == LDLM_CB_CANCELING);
-
- cancel = 0;
- olck = osc_ast_data_get(dlmlock);
- if (olck != NULL) {
- lock = olck->ols_cl.cls_lock;
- cl_lock_mutex_get(env, lock);
- LINVRNT(osc_lock_invariant(olck));
- if (olck->ols_ast_wait) {
- /* wake up osc_lock_use() */
- cl_lock_signal(env, lock);
- olck->ols_ast_wait = 0;
- }
- /*
- * Lock might have been canceled while this thread was
- * sleeping for lock mutex, but olck is pinned in memory.
- */
- if (olck == dlmlock->l_ast_data) {
- /*
- * NOTE: DLM sends blocking AST's for failed locks
- * (that are still in pre-OLS_GRANTED state)
- * too, and they have to be canceled otherwise
- * DLM lock is never destroyed and stuck in
- * the memory.
- *
- * Alternatively, ldlm_cli_cancel() can be
- * called here directly for osc_locks with
- * ols_state < OLS_GRANTED to maintain an
- * invariant that ->clo_cancel() is only called
- * for locks that were granted.
- */
- LASSERT(data == olck);
- osc_lock_blocking(env, dlmlock,
- olck, flag == LDLM_CB_BLOCKING);
- } else
- cancel = 1;
- cl_lock_mutex_put(env, lock);
- osc_ast_data_put(env, olck);
- } else
- /*
- * DLM lock exists, but there is no cl_lock attached to it.
- * This is a `normal' race. cl_object and its cl_lock's can be
- * removed by memory pressure, together with all pages.
- */
- cancel = (flag == LDLM_CB_BLOCKING);
-
- if (cancel) {
- struct lustre_handle *lockh;
-
- lockh = &osc_env_info(env)->oti_handle;
- ldlm_lock2handle(dlmlock, lockh);
- result = ldlm_cli_cancel(lockh, LCF_ASYNC);
- } else
- result = 0;
- return result;
-}
-
-/**
- * Blocking ast invoked by ldlm when dlm lock is either blocking progress of
- * some other lock, or is canceled. This function is installed as a
- * ldlm_lock::l_blocking_ast() for client extent locks.
- *
- * Control flow is tricky, because ldlm uses the same call-back
- * (ldlm_lock::l_blocking_ast()) for both blocking and cancellation ast's.
- *
- * \param dlmlock lock for which ast occurred.
- *
- * \param new description of a conflicting lock in case of blocking ast.
- *
- * \param data value of dlmlock->l_ast_data
- *
- * \param flag LDLM_CB_BLOCKING or LDLM_CB_CANCELING. Used to distinguish
- * cancellation and blocking ast's.
- *
- * Possible use cases:
- *
- * - ldlm calls dlmlock->l_blocking_ast(..., LDLM_CB_CANCELING) to cancel
- * lock due to lock lru pressure, or explicit user request to purge
- * locks.
- *
- * - ldlm calls dlmlock->l_blocking_ast(..., LDLM_CB_BLOCKING) to notify
- * us that dlmlock conflicts with another lock that some client is
- * enqueing. Lock is canceled.
- *
- * - cl_lock_cancel() is called. osc_lock_cancel() calls
- * ldlm_cli_cancel() that calls
- *
- * dlmlock->l_blocking_ast(..., LDLM_CB_CANCELING)
- *
- * recursively entering osc_ldlm_blocking_ast().
- *
- * - client cancels lock voluntary (e.g., as a part of early cancellation):
- *
- * cl_lock_cancel()->
- * osc_lock_cancel()->
- * ldlm_cli_cancel()->
- * dlmlock->l_blocking_ast(..., LDLM_CB_CANCELING)
- *
- */
-static int osc_ldlm_blocking_ast(struct ldlm_lock *dlmlock,
- struct ldlm_lock_desc *new, void *data,
- int flag)
-{
- struct lu_env *env;
- struct cl_env_nest nest;
- int result;
-
- /*
- * This can be called in the context of outer IO, e.g.,
- *
- * cl_enqueue()->...
- * ->osc_enqueue_base()->...
- * ->ldlm_prep_elc_req()->...
- * ->ldlm_cancel_callback()->...
- * ->osc_ldlm_blocking_ast()
- *
- * new environment has to be created to not corrupt outer context.
- */
- env = cl_env_nested_get(&nest);
- if (!IS_ERR(env)) {
- result = osc_dlm_blocking_ast0(env, dlmlock, data, flag);
- cl_env_nested_put(&nest, env);
- } else {
- result = PTR_ERR(env);
- /*
- * XXX This should never happen, as cl_lock is
- * stuck. Pre-allocated environment a la vvp_inode_fini_env
- * should be used.
- */
- LBUG();
- }
- if (result != 0) {
- if (result == -ENODATA)
- result = 0;
- else
- CERROR("BAST failed: %d\n", result);
- }
- return result;
-}
-
-static int osc_ldlm_completion_ast(struct ldlm_lock *dlmlock,
- __u64 flags, void *data)
-{
- struct cl_env_nest nest;
- struct lu_env *env;
- struct osc_lock *olck;
- struct cl_lock *lock;
- int result;
- int dlmrc;
-
- /* first, do dlm part of the work */
- dlmrc = ldlm_completion_ast_async(dlmlock, flags, data);
- /* then, notify cl_lock */
- env = cl_env_nested_get(&nest);
- if (!IS_ERR(env)) {
- olck = osc_ast_data_get(dlmlock);
- if (olck != NULL) {
- lock = olck->ols_cl.cls_lock;
- cl_lock_mutex_get(env, lock);
- /*
- * ldlm_handle_cp_callback() copied LVB from request
- * to lock->l_lvb_data, store it in osc_lock.
- */
- LASSERT(dlmlock->l_lvb_data != NULL);
- lock_res_and_lock(dlmlock);
- olck->ols_lvb = *(struct ost_lvb *)dlmlock->l_lvb_data;
- if (olck->ols_lock == NULL) {
- /*
- * upcall (osc_lock_upcall()) hasn't yet been
- * called. Do nothing now, upcall will bind
- * olck to dlmlock and signal the waiters.
- *
- * This maintains an invariant that osc_lock
- * and ldlm_lock are always bound when
- * osc_lock is in OLS_GRANTED state.
- */
- } else if (dlmlock->l_granted_mode ==
- dlmlock->l_req_mode) {
- osc_lock_granted(env, olck, dlmlock, dlmrc);
- }
- unlock_res_and_lock(dlmlock);
-
- if (dlmrc != 0) {
- CL_LOCK_DEBUG(D_ERROR, env, lock,
- "dlmlock returned %d\n", dlmrc);
- cl_lock_error(env, lock, dlmrc);
- }
- cl_lock_mutex_put(env, lock);
- osc_ast_data_put(env, olck);
- result = 0;
- } else
- result = -ELDLM_NO_LOCK_DATA;
- cl_env_nested_put(&nest, env);
- } else
- result = PTR_ERR(env);
- return dlmrc ?: result;
-}
-
-static int osc_ldlm_glimpse_ast(struct ldlm_lock *dlmlock, void *data)
-{
- struct ptlrpc_request *req = data;
- struct osc_lock *olck;
- struct cl_lock *lock;
- struct cl_object *obj;
- struct cl_env_nest nest;
- struct lu_env *env;
- struct ost_lvb *lvb;
- struct req_capsule *cap;
- int result;
-
- LASSERT(lustre_msg_get_opc(req->rq_reqmsg) == LDLM_GL_CALLBACK);
-
- env = cl_env_nested_get(&nest);
- if (!IS_ERR(env)) {
- /* osc_ast_data_get() has to go after environment is
- * allocated, because osc_ast_data() acquires a
- * reference to a lock, and it can only be released in
- * environment.
- */
- olck = osc_ast_data_get(dlmlock);
- if (olck != NULL) {
- lock = olck->ols_cl.cls_lock;
- /* Do not grab the mutex of cl_lock for glimpse.
- * See LU-1274 for details.
- * BTW, it's okay for cl_lock to be cancelled during
- * this period because server can handle this race.
- * See ldlm_server_glimpse_ast() for details.
- * cl_lock_mutex_get(env, lock); */
- cap = &req->rq_pill;
- req_capsule_extend(cap, &RQF_LDLM_GL_CALLBACK);
- req_capsule_set_size(cap, &RMF_DLM_LVB, RCL_SERVER,
- sizeof(*lvb));
- result = req_capsule_server_pack(cap);
- if (result == 0) {
- lvb = req_capsule_server_get(cap, &RMF_DLM_LVB);
- obj = lock->cll_descr.cld_obj;
- result = cl_object_glimpse(env, obj, lvb);
- }
- if (!exp_connect_lvb_type(req->rq_export))
- req_capsule_shrink(&req->rq_pill,
- &RMF_DLM_LVB,
- sizeof(struct ost_lvb_v1),
- RCL_SERVER);
- osc_ast_data_put(env, olck);
- } else {
- /*
- * These errors are normal races, so we don't want to
- * fill the console with messages by calling
- * ptlrpc_error()
- */
- lustre_pack_reply(req, 1, NULL, NULL);
- result = -ELDLM_NO_LOCK_DATA;
- }
- cl_env_nested_put(&nest, env);
- } else
- result = PTR_ERR(env);
- req->rq_status = result;
- return result;
-}
-
-static unsigned long osc_lock_weigh(const struct lu_env *env,
- const struct cl_lock_slice *slice)
-{
- /*
- * don't need to grab coh_page_guard since we don't care the exact #
- * of pages..
- */
- return cl_object_header(slice->cls_obj)->coh_pages;
-}
-
-static void osc_lock_build_einfo(const struct lu_env *env,
- const struct cl_lock *clock,
- struct osc_lock *lock,
- struct ldlm_enqueue_info *einfo)
-{
- enum cl_lock_mode mode;
-
- mode = clock->cll_descr.cld_mode;
- if (mode == CLM_PHANTOM)
- /*
- * For now, enqueue all glimpse locks in read mode. In the
- * future, client might choose to enqueue LCK_PW lock for
- * glimpse on a file opened for write.
- */
- mode = CLM_READ;
-
- einfo->ei_type = LDLM_EXTENT;
- einfo->ei_mode = osc_cl_lock2ldlm(mode);
- einfo->ei_cb_bl = osc_ldlm_blocking_ast;
- einfo->ei_cb_cp = osc_ldlm_completion_ast;
- einfo->ei_cb_gl = osc_ldlm_glimpse_ast;
- einfo->ei_cbdata = lock; /* value to be put into ->l_ast_data */
-}
-
-/**
- * Determine if the lock should be converted into a lockless lock.
- *
- * Steps to check:
- * - if the lock has an explicit requirement for a non-lockless lock;
- * - if the io lock request type ci_lockreq;
- * - send the enqueue rpc to ost to make the further decision;
- * - special treat to truncate lockless lock
- *
- * Additional policy can be implemented here, e.g., never do lockless-io
- * for large extents.
- */
-static void osc_lock_to_lockless(const struct lu_env *env,
- struct osc_lock *ols, int force)
-{
- struct cl_lock_slice *slice = &ols->ols_cl;
-
- LASSERT(ols->ols_state == OLS_NEW ||
- ols->ols_state == OLS_UPCALL_RECEIVED);
-
- if (force) {
- ols->ols_locklessable = 1;
- slice->cls_ops = &osc_lock_lockless_ops;
- } else {
- struct osc_io *oio = osc_env_io(env);
- struct cl_io *io = oio->oi_cl.cis_io;
- struct cl_object *obj = slice->cls_obj;
- struct osc_object *oob = cl2osc(obj);
- const struct osc_device *osd = lu2osc_dev(obj->co_lu.lo_dev);
- struct obd_connect_data *ocd;
-
- LASSERT(io->ci_lockreq == CILR_MANDATORY ||
- io->ci_lockreq == CILR_MAYBE ||
- io->ci_lockreq == CILR_NEVER);
-
- ocd = &class_exp2cliimp(osc_export(oob))->imp_connect_data;
- ols->ols_locklessable = (io->ci_type != CIT_SETATTR) &&
- (io->ci_lockreq == CILR_MAYBE) &&
- (ocd->ocd_connect_flags & OBD_CONNECT_SRVLOCK);
- if (io->ci_lockreq == CILR_NEVER ||
- /* lockless IO */
- (ols->ols_locklessable && osc_object_is_contended(oob)) ||
- /* lockless truncate */
- (cl_io_is_trunc(io) &&
- (ocd->ocd_connect_flags & OBD_CONNECT_TRUNCLOCK) &&
- osd->od_lockless_truncate)) {
- ols->ols_locklessable = 1;
- slice->cls_ops = &osc_lock_lockless_ops;
- }
- }
- LASSERT(ergo(ols->ols_glimpse, !osc_lock_is_lockless(ols)));
-}
-
-static int osc_lock_compatible(const struct osc_lock *qing,
- const struct osc_lock *qed)
-{
- enum cl_lock_mode qing_mode;
- enum cl_lock_mode qed_mode;
-
- qing_mode = qing->ols_cl.cls_lock->cll_descr.cld_mode;
- if (qed->ols_glimpse &&
- (qed->ols_state >= OLS_UPCALL_RECEIVED || qing_mode == CLM_READ))
- return 1;
-
- qed_mode = qed->ols_cl.cls_lock->cll_descr.cld_mode;
- return ((qing_mode == CLM_READ) && (qed_mode == CLM_READ));
-}
-
-/**
- * Cancel all conflicting locks and wait for them to be destroyed.
- *
- * This function is used for two purposes:
- *
- * - early cancel all conflicting locks before starting IO, and
- *
- * - guarantee that pages added to the page cache by lockless IO are never
- * covered by locks other than lockless IO lock, and, hence, are not
- * visible to other threads.
- */
-static int osc_lock_enqueue_wait(const struct lu_env *env,
- const struct osc_lock *olck)
-{
- struct cl_lock *lock = olck->ols_cl.cls_lock;
- struct cl_lock_descr *descr = &lock->cll_descr;
- struct cl_object_header *hdr = cl_object_header(descr->cld_obj);
- struct cl_lock *scan;
- struct cl_lock *conflict = NULL;
- int lockless = osc_lock_is_lockless(olck);
- int rc = 0;
-
- LASSERT(cl_lock_is_mutexed(lock));
-
- /* make it enqueue anyway for glimpse lock, because we actually
- * don't need to cancel any conflicting locks. */
- if (olck->ols_glimpse)
- return 0;
-
- spin_lock(&hdr->coh_lock_guard);
- list_for_each_entry(scan, &hdr->coh_locks, cll_linkage) {
- struct cl_lock_descr *cld = &scan->cll_descr;
- const struct osc_lock *scan_ols;
-
- if (scan == lock)
- break;
-
- if (scan->cll_state < CLS_QUEUING ||
- scan->cll_state == CLS_FREEING ||
- cld->cld_start > descr->cld_end ||
- cld->cld_end < descr->cld_start)
- continue;
-
- /* overlapped and living locks. */
-
- /* We're not supposed to give up group lock. */
- if (scan->cll_descr.cld_mode == CLM_GROUP) {
- LASSERT(descr->cld_mode != CLM_GROUP ||
- descr->cld_gid != scan->cll_descr.cld_gid);
- continue;
- }
-
- scan_ols = osc_lock_at(scan);
-
- /* We need to cancel the compatible locks if we're enqueuing
- * a lockless lock, for example:
- * imagine that client has PR lock on [0, 1000], and thread T0
- * is doing lockless IO in [500, 1500] region. Concurrent
- * thread T1 can see lockless data in [500, 1000], which is
- * wrong, because these data are possibly stale. */
- if (!lockless && osc_lock_compatible(olck, scan_ols))
- continue;
-
- cl_lock_get_trust(scan);
- conflict = scan;
- break;
- }
- spin_unlock(&hdr->coh_lock_guard);
-
- if (conflict) {
- if (lock->cll_descr.cld_mode == CLM_GROUP) {
- /* we want a group lock but a previous lock request
- * conflicts, we do not wait but return 0 so the
- * request is send to the server
- */
- CDEBUG(D_DLMTRACE, "group lock %p is conflicted with %p, no wait, send to server\n",
- lock, conflict);
- cl_lock_put(env, conflict);
- rc = 0;
- } else {
- CDEBUG(D_DLMTRACE, "lock %p is conflicted with %p, will wait\n",
- lock, conflict);
- LASSERT(lock->cll_conflict == NULL);
- lu_ref_add(&conflict->cll_reference, "cancel-wait",
- lock);
- lock->cll_conflict = conflict;
- rc = CLO_WAIT;
- }
- }
- return rc;
-}
-
-/**
- * Implementation of cl_lock_operations::clo_enqueue() method for osc
- * layer. This initiates ldlm enqueue:
- *
- * - cancels conflicting locks early (osc_lock_enqueue_wait());
- *
- * - calls osc_enqueue_base() to do actual enqueue.
- *
- * osc_enqueue_base() is supplied with an upcall function that is executed
- * when lock is received either after a local cached ldlm lock is matched, or
- * when a reply from the server is received.
- *
- * This function does not wait for the network communication to complete.
- */
-static int osc_lock_enqueue(const struct lu_env *env,
- const struct cl_lock_slice *slice,
- struct cl_io *unused, __u32 enqflags)
-{
- struct osc_lock *ols = cl2osc_lock(slice);
- struct cl_lock *lock = ols->ols_cl.cls_lock;
- int result;
-
- LASSERT(cl_lock_is_mutexed(lock));
- LASSERTF(ols->ols_state == OLS_NEW,
- "Impossible state: %d\n", ols->ols_state);
-
- LASSERTF(ergo(ols->ols_glimpse, lock->cll_descr.cld_mode <= CLM_READ),
- "lock = %p, ols = %p\n", lock, ols);
-
- result = osc_lock_enqueue_wait(env, ols);
- if (result == 0) {
- if (!osc_lock_is_lockless(ols)) {
- struct osc_object *obj = cl2osc(slice->cls_obj);
- struct osc_thread_info *info = osc_env_info(env);
- struct ldlm_res_id *resname = &info->oti_resname;
- ldlm_policy_data_t *policy = &info->oti_policy;
- struct ldlm_enqueue_info *einfo = &ols->ols_einfo;
-
- /* lock will be passed as upcall cookie,
- * hold ref to prevent to be released. */
- cl_lock_hold_add(env, lock, "upcall", lock);
- /* a user for lock also */
- cl_lock_user_add(env, lock);
- ols->ols_state = OLS_ENQUEUED;
-
- /*
- * XXX: this is possible blocking point as
- * ldlm_lock_match(LDLM_FL_LVB_READY) waits for
- * LDLM_CP_CALLBACK.
- */
- ostid_build_res_name(&obj->oo_oinfo->loi_oi, resname);
- osc_lock_build_policy(env, lock, policy);
- result = osc_enqueue_base(osc_export(obj), resname,
- &ols->ols_flags, policy,
- &ols->ols_lvb,
- obj->oo_oinfo->loi_kms_valid,
- osc_lock_upcall,
- ols, einfo, &ols->ols_handle,
- PTLRPCD_SET, 1, ols->ols_agl);
- if (result != 0) {
- cl_lock_user_del(env, lock);
- cl_lock_unhold(env, lock, "upcall", lock);
- if (unlikely(result == -ECANCELED)) {
- ols->ols_state = OLS_NEW;
- result = 0;
- }
- }
- } else {
- ols->ols_state = OLS_GRANTED;
- ols->ols_owner = osc_env_io(env);
- }
- }
- LASSERT(ergo(ols->ols_glimpse, !osc_lock_is_lockless(ols)));
- return result;
-}
-
-static int osc_lock_wait(const struct lu_env *env,
- const struct cl_lock_slice *slice)
-{
- struct osc_lock *olck = cl2osc_lock(slice);
- struct cl_lock *lock = olck->ols_cl.cls_lock;
-
- LINVRNT(osc_lock_invariant(olck));
-
- if (olck->ols_glimpse && olck->ols_state >= OLS_UPCALL_RECEIVED) {
- if (olck->ols_flags & LDLM_FL_LVB_READY) {
- return 0;
- } else if (olck->ols_agl) {
- if (lock->cll_flags & CLF_FROM_UPCALL)
- /* It is from enqueue RPC reply upcall for
- * updating state. Do not re-enqueue. */
- return -ENAVAIL;
- olck->ols_state = OLS_NEW;
- } else {
- LASSERT(lock->cll_error);
- return lock->cll_error;
- }
- }
-
- if (olck->ols_state == OLS_NEW) {
- int rc;
-
- LASSERT(olck->ols_agl);
- olck->ols_agl = 0;
- olck->ols_flags &= ~LDLM_FL_BLOCK_NOWAIT;
- rc = osc_lock_enqueue(env, slice, NULL, CEF_ASYNC | CEF_MUST);
- if (rc != 0)
- return rc;
- else
- return CLO_REENQUEUED;
- }
-
- LASSERT(equi(olck->ols_state >= OLS_UPCALL_RECEIVED &&
- lock->cll_error == 0, olck->ols_lock != NULL));
-
- return lock->cll_error ?: olck->ols_state >= OLS_GRANTED ? 0 : CLO_WAIT;
-}
-
-/**
- * An implementation of cl_lock_operations::clo_use() method that pins cached
- * lock.
- */
-static int osc_lock_use(const struct lu_env *env,
- const struct cl_lock_slice *slice)
-{
- struct osc_lock *olck = cl2osc_lock(slice);
- int rc;
-
- LASSERT(!olck->ols_hold);
-
- /*
- * Atomically check for LDLM_FL_CBPENDING and addref a lock if this
- * flag is not set. This protects us from a concurrent blocking ast.
- */
- rc = ldlm_lock_addref_try(&olck->ols_handle, olck->ols_einfo.ei_mode);
- if (rc == 0) {
- olck->ols_hold = 1;
- olck->ols_state = OLS_GRANTED;
- } else {
- struct cl_lock *lock;
-
- /*
- * Lock is being cancelled somewhere within
- * ldlm_handle_bl_callback(): LDLM_FL_CBPENDING is already
- * set, but osc_ldlm_blocking_ast() hasn't yet acquired
- * cl_lock mutex.
- */
- lock = slice->cls_lock;
- LASSERT(lock->cll_state == CLS_INTRANSIT);
- LASSERT(lock->cll_users > 0);
- /* set a flag for osc_dlm_blocking_ast0() to signal the
- * lock.*/
- olck->ols_ast_wait = 1;
- rc = CLO_WAIT;
- }
- return rc;
-}
-
-static int osc_lock_flush(struct osc_lock *ols, int discard)
-{
- struct cl_lock *lock = ols->ols_cl.cls_lock;
- struct cl_env_nest nest;
- struct lu_env *env;
- int result = 0;
-
- env = cl_env_nested_get(&nest);
- if (!IS_ERR(env)) {
- struct osc_object *obj = cl2osc(ols->ols_cl.cls_obj);
- struct cl_lock_descr *descr = &lock->cll_descr;
- int rc = 0;
-
- if (descr->cld_mode >= CLM_WRITE) {
- result = osc_cache_writeback_range(env, obj,
- descr->cld_start, descr->cld_end,
- 1, discard);
- LDLM_DEBUG(ols->ols_lock,
- "lock %p: %d pages were %s.\n", lock, result,
- discard ? "discarded" : "written");
- if (result > 0)
- result = 0;
- }
-
- rc = cl_lock_discard_pages(env, lock);
- if (result == 0 && rc < 0)
- result = rc;
-
- cl_env_nested_put(&nest, env);
- } else
- result = PTR_ERR(env);
- if (result == 0) {
- ols->ols_flush = 1;
- LINVRNT(!osc_lock_has_pages(ols));
- }
- return result;
-}
-
-/**
- * Implements cl_lock_operations::clo_cancel() method for osc layer. This is
- * called (as part of cl_lock_cancel()) when lock is canceled either voluntary
- * (LRU pressure, early cancellation, umount, etc.) or due to the conflict
- * with some other lock some where in the cluster. This function does the
- * following:
- *
- * - invalidates all pages protected by this lock (after sending dirty
- * ones to the server, as necessary);
- *
- * - decref's underlying ldlm lock;
- *
- * - cancels ldlm lock (ldlm_cli_cancel()).
- */
-static void osc_lock_cancel(const struct lu_env *env,
- const struct cl_lock_slice *slice)
-{
- struct cl_lock *lock = slice->cls_lock;
- struct osc_lock *olck = cl2osc_lock(slice);
- struct ldlm_lock *dlmlock = olck->ols_lock;
- int result = 0;
- int discard;
-
- LASSERT(cl_lock_is_mutexed(lock));
- LINVRNT(osc_lock_invariant(olck));
-
- if (dlmlock != NULL) {
- int do_cancel;
-
- discard = !!(dlmlock->l_flags & LDLM_FL_DISCARD_DATA);
- if (olck->ols_state >= OLS_GRANTED)
- result = osc_lock_flush(olck, discard);
- osc_lock_unhold(olck);
-
- lock_res_and_lock(dlmlock);
- /* Now that we're the only user of dlm read/write reference,
- * mostly the ->l_readers + ->l_writers should be zero.
- * However, there is a corner case.
- * See bug 18829 for details.*/
- do_cancel = (dlmlock->l_readers == 0 &&
- dlmlock->l_writers == 0);
- dlmlock->l_flags |= LDLM_FL_CBPENDING;
- unlock_res_and_lock(dlmlock);
- if (do_cancel)
- result = ldlm_cli_cancel(&olck->ols_handle, LCF_ASYNC);
- if (result < 0)
- CL_LOCK_DEBUG(D_ERROR, env, lock,
- "lock %p cancel failure with error(%d)\n",
- lock, result);
- }
- olck->ols_state = OLS_CANCELLED;
- olck->ols_flags &= ~LDLM_FL_LVB_READY;
- osc_lock_detach(env, olck);
-}
-
-static int osc_lock_has_pages(struct osc_lock *olck)
-{
- return 0;
-}
-
-static void osc_lock_delete(const struct lu_env *env,
- const struct cl_lock_slice *slice)
-{
- struct osc_lock *olck;
-
- olck = cl2osc_lock(slice);
- if (olck->ols_glimpse) {
- LASSERT(!olck->ols_hold);
- LASSERT(!olck->ols_lock);
- return;
- }
-
- LINVRNT(osc_lock_invariant(olck));
- LINVRNT(!osc_lock_has_pages(olck));
-
- osc_lock_unhold(olck);
- osc_lock_detach(env, olck);
-}
-
-/**
- * Implements cl_lock_operations::clo_state() method for osc layer.
- *
- * Maintains osc_lock::ols_owner field.
- *
- * This assumes that lock always enters CLS_HELD (from some other state) in
- * the same IO context as one that requested the lock. This should not be a
- * problem, because context is by definition shared by all activity pertaining
- * to the same high-level IO.
- */
-static void osc_lock_state(const struct lu_env *env,
- const struct cl_lock_slice *slice,
- enum cl_lock_state state)
-{
- struct osc_lock *lock = cl2osc_lock(slice);
-
- /*
- * XXX multiple io contexts can use the lock at the same time.
- */
- LINVRNT(osc_lock_invariant(lock));
- if (state == CLS_HELD && slice->cls_lock->cll_state != CLS_HELD) {
- struct osc_io *oio = osc_env_io(env);
-
- LASSERT(lock->ols_owner == NULL);
- lock->ols_owner = oio;
- } else if (state != CLS_HELD)
- lock->ols_owner = NULL;
-}
-
-static int osc_lock_print(const struct lu_env *env, void *cookie,
- lu_printer_t p, const struct cl_lock_slice *slice)
-{
- struct osc_lock *lock = cl2osc_lock(slice);
-
- /*
- * XXX print ldlm lock and einfo properly.
- */
- (*p)(env, cookie, "%p %#16llx %#llx %d %p ",
- lock->ols_lock, lock->ols_flags, lock->ols_handle.cookie,
- lock->ols_state, lock->ols_owner);
- osc_lvb_print(env, cookie, p, &lock->ols_lvb);
- return 0;
-}
-
-static int osc_lock_fits_into(const struct lu_env *env,
- const struct cl_lock_slice *slice,
- const struct cl_lock_descr *need,
- const struct cl_io *io)
-{
- struct osc_lock *ols = cl2osc_lock(slice);
-
- if (need->cld_enq_flags & CEF_NEVER)
- return 0;
-
- if (ols->ols_state >= OLS_CANCELLED)
- return 0;
-
- if (need->cld_mode == CLM_PHANTOM) {
- if (ols->ols_agl)
- return !(ols->ols_state > OLS_RELEASED);
-
- /*
- * Note: the QUEUED lock can't be matched here, otherwise
- * it might cause the deadlocks.
- * In read_process,
- * P1: enqueued read lock, create sublock1
- * P2: enqueued write lock, create sublock2(conflicted
- * with sublock1).
- * P1: Grant read lock.
- * P1: enqueued glimpse lock(with holding sublock1_read),
- * matched with sublock2, waiting sublock2 to be granted.
- * But sublock2 can not be granted, because P1
- * will not release sublock1. Bang!
- */
- if (ols->ols_state < OLS_GRANTED ||
- ols->ols_state > OLS_RELEASED)
- return 0;
- } else if (need->cld_enq_flags & CEF_MUST) {
- /*
- * If the lock hasn't ever enqueued, it can't be matched
- * because enqueue process brings in many information
- * which can be used to determine things such as lockless,
- * CEF_MUST, etc.
- */
- if (ols->ols_state < OLS_UPCALL_RECEIVED &&
- ols->ols_locklessable)
- return 0;
- }
- return 1;
-}
-
-static const struct cl_lock_operations osc_lock_ops = {
- .clo_fini = osc_lock_fini,
- .clo_enqueue = osc_lock_enqueue,
- .clo_wait = osc_lock_wait,
- .clo_unuse = osc_lock_unuse,
- .clo_use = osc_lock_use,
- .clo_delete = osc_lock_delete,
- .clo_state = osc_lock_state,
- .clo_cancel = osc_lock_cancel,
- .clo_weigh = osc_lock_weigh,
- .clo_print = osc_lock_print,
- .clo_fits_into = osc_lock_fits_into,
-};
-
-static int osc_lock_lockless_unuse(const struct lu_env *env,
- const struct cl_lock_slice *slice)
-{
- struct osc_lock *ols = cl2osc_lock(slice);
- struct cl_lock *lock = slice->cls_lock;
-
- LASSERT(ols->ols_state == OLS_GRANTED);
- LINVRNT(osc_lock_invariant(ols));
-
- cl_lock_cancel(env, lock);
- cl_lock_delete(env, lock);
- return 0;
-}
-
-static void osc_lock_lockless_cancel(const struct lu_env *env,
- const struct cl_lock_slice *slice)
-{
- struct osc_lock *ols = cl2osc_lock(slice);
- int result;
-
- result = osc_lock_flush(ols, 0);
- if (result)
- CERROR("Pages for lockless lock %p were not purged(%d)\n",
- ols, result);
- ols->ols_state = OLS_CANCELLED;
-}
-
-static int osc_lock_lockless_wait(const struct lu_env *env,
- const struct cl_lock_slice *slice)
-{
- struct osc_lock *olck = cl2osc_lock(slice);
- struct cl_lock *lock = olck->ols_cl.cls_lock;
-
- LINVRNT(osc_lock_invariant(olck));
- LASSERT(olck->ols_state >= OLS_UPCALL_RECEIVED);
-
- return lock->cll_error;
-}
-
-static void osc_lock_lockless_state(const struct lu_env *env,
- const struct cl_lock_slice *slice,
- enum cl_lock_state state)
-{
- struct osc_lock *lock = cl2osc_lock(slice);
-
- LINVRNT(osc_lock_invariant(lock));
- if (state == CLS_HELD) {
- struct osc_io *oio = osc_env_io(env);
-
- LASSERT(ergo(lock->ols_owner, lock->ols_owner == oio));
- lock->ols_owner = oio;
-
- /* set the io to be lockless if this lock is for io's
- * host object */
- if (cl_object_same(oio->oi_cl.cis_obj, slice->cls_obj))
- oio->oi_lockless = 1;
- }
-}
-
-static int osc_lock_lockless_fits_into(const struct lu_env *env,
- const struct cl_lock_slice *slice,
- const struct cl_lock_descr *need,
- const struct cl_io *io)
-{
- struct osc_lock *lock = cl2osc_lock(slice);
-
- if (!(need->cld_enq_flags & CEF_NEVER))
- return 0;
-
- /* lockless lock should only be used by its owning io. b22147 */
- return (lock->ols_owner == osc_env_io(env));
-}
-
-static const struct cl_lock_operations osc_lock_lockless_ops = {
- .clo_fini = osc_lock_fini,
- .clo_enqueue = osc_lock_enqueue,
- .clo_wait = osc_lock_lockless_wait,
- .clo_unuse = osc_lock_lockless_unuse,
- .clo_state = osc_lock_lockless_state,
- .clo_fits_into = osc_lock_lockless_fits_into,
- .clo_cancel = osc_lock_lockless_cancel,
- .clo_print = osc_lock_print
-};
-
-int osc_lock_init(const struct lu_env *env,
- struct cl_object *obj, struct cl_lock *lock,
- const struct cl_io *unused)
-{
- struct osc_lock *clk;
- int result;
-
- OBD_SLAB_ALLOC_PTR_GFP(clk, osc_lock_kmem, GFP_NOFS);
- if (clk != NULL) {
- __u32 enqflags = lock->cll_descr.cld_enq_flags;
-
- osc_lock_build_einfo(env, lock, clk, &clk->ols_einfo);
- atomic_set(&clk->ols_pageref, 0);
- clk->ols_state = OLS_NEW;
-
- clk->ols_flags = osc_enq2ldlm_flags(enqflags);
- clk->ols_agl = !!(enqflags & CEF_AGL);
- if (clk->ols_agl)
- clk->ols_flags |= LDLM_FL_BLOCK_NOWAIT;
- if (clk->ols_flags & LDLM_FL_HAS_INTENT)
- clk->ols_glimpse = 1;
-
- cl_lock_slice_add(lock, &clk->ols_cl, obj, &osc_lock_ops);
-
- if (!(enqflags & CEF_MUST))
- /* try to convert this lock to a lockless lock */
- osc_lock_to_lockless(env, clk, (enqflags & CEF_NEVER));
- if (clk->ols_locklessable && !(enqflags & CEF_DISCARD_DATA))
- clk->ols_flags |= LDLM_FL_DENY_ON_CONTENTION;
-
- LDLM_DEBUG_NOLOCK("lock %p, osc lock %p, flags %llx\n",
- lock, clk, clk->ols_flags);
-
- result = 0;
- } else
- result = -ENOMEM;
- return result;
-}
-
-int osc_dlm_lock_pageref(struct ldlm_lock *dlm)
-{
- struct osc_lock *olock;
- int rc = 0;
-
- spin_lock(&osc_ast_guard);
- olock = dlm->l_ast_data;
- /*
- * there's a very rare race with osc_page_addref_lock(), but that
- * doesn't matter because in the worst case we don't cancel a lock
- * which we actually can, that's no harm.
- */
- if (olock != NULL &&
- atomic_add_return(_PAGEREF_MAGIC,
- &olock->ols_pageref) != _PAGEREF_MAGIC) {
- atomic_sub(_PAGEREF_MAGIC, &olock->ols_pageref);
- rc = 1;
- }
- spin_unlock(&osc_ast_guard);
- return rc;
-}
-
-/** @} osc */
diff --git a/drivers/staging/lustre/lustre/osc/osc_object.c b/drivers/staging/lustre/lustre/osc/osc_object.c
deleted file mode 100644
index c628a250ebd6..000000000000
--- a/drivers/staging/lustre/lustre/osc/osc_object.c
+++ /dev/null
@@ -1,271 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * Implementation of cl_object for OSC layer.
- *
- * Author: Nikita Danilov <nikita.danilov@sun.com>
- */
-
-#define DEBUG_SUBSYSTEM S_OSC
-
-#include "osc_cl_internal.h"
-
-/** \addtogroup osc
- * @{
- */
-
-/*****************************************************************************
- *
- * Type conversions.
- *
- */
-
-static struct lu_object *osc2lu(struct osc_object *osc)
-{
- return &osc->oo_cl.co_lu;
-}
-
-static struct osc_object *lu2osc(const struct lu_object *obj)
-{
- LINVRNT(osc_is_object(obj));
- return container_of0(obj, struct osc_object, oo_cl.co_lu);
-}
-
-/*****************************************************************************
- *
- * Object operations.
- *
- */
-
-static int osc_object_init(const struct lu_env *env, struct lu_object *obj,
- const struct lu_object_conf *conf)
-{
- struct osc_object *osc = lu2osc(obj);
- const struct cl_object_conf *cconf = lu2cl_conf(conf);
- int i;
-
- osc->oo_oinfo = cconf->u.coc_oinfo;
- spin_lock_init(&osc->oo_seatbelt);
- for (i = 0; i < CRT_NR; ++i)
- INIT_LIST_HEAD(&osc->oo_inflight[i]);
-
- INIT_LIST_HEAD(&osc->oo_ready_item);
- INIT_LIST_HEAD(&osc->oo_hp_ready_item);
- INIT_LIST_HEAD(&osc->oo_write_item);
- INIT_LIST_HEAD(&osc->oo_read_item);
-
- osc->oo_root.rb_node = NULL;
- INIT_LIST_HEAD(&osc->oo_hp_exts);
- INIT_LIST_HEAD(&osc->oo_urgent_exts);
- INIT_LIST_HEAD(&osc->oo_rpc_exts);
- INIT_LIST_HEAD(&osc->oo_reading_exts);
- atomic_set(&osc->oo_nr_reads, 0);
- atomic_set(&osc->oo_nr_writes, 0);
- spin_lock_init(&osc->oo_lock);
-
- cl_object_page_init(lu2cl(obj), sizeof(struct osc_page));
-
- return 0;
-}
-
-static void osc_object_free(const struct lu_env *env, struct lu_object *obj)
-{
- struct osc_object *osc = lu2osc(obj);
- int i;
-
- for (i = 0; i < CRT_NR; ++i)
- LASSERT(list_empty(&osc->oo_inflight[i]));
-
- LASSERT(list_empty(&osc->oo_ready_item));
- LASSERT(list_empty(&osc->oo_hp_ready_item));
- LASSERT(list_empty(&osc->oo_write_item));
- LASSERT(list_empty(&osc->oo_read_item));
-
- LASSERT(osc->oo_root.rb_node == NULL);
- LASSERT(list_empty(&osc->oo_hp_exts));
- LASSERT(list_empty(&osc->oo_urgent_exts));
- LASSERT(list_empty(&osc->oo_rpc_exts));
- LASSERT(list_empty(&osc->oo_reading_exts));
- LASSERT(atomic_read(&osc->oo_nr_reads) == 0);
- LASSERT(atomic_read(&osc->oo_nr_writes) == 0);
-
- lu_object_fini(obj);
- OBD_SLAB_FREE_PTR(osc, osc_object_kmem);
-}
-
-int osc_lvb_print(const struct lu_env *env, void *cookie,
- lu_printer_t p, const struct ost_lvb *lvb)
-{
- return (*p)(env, cookie, "size: %llu mtime: %llu atime: %llu ctime: %llu blocks: %llu",
- lvb->lvb_size, lvb->lvb_mtime, lvb->lvb_atime,
- lvb->lvb_ctime, lvb->lvb_blocks);
-}
-
-static int osc_object_print(const struct lu_env *env, void *cookie,
- lu_printer_t p, const struct lu_object *obj)
-{
- struct osc_object *osc = lu2osc(obj);
- struct lov_oinfo *oinfo = osc->oo_oinfo;
- struct osc_async_rc *ar = &oinfo->loi_ar;
-
- (*p)(env, cookie, "id: " DOSTID " idx: %d gen: %d kms_valid: %u kms %llu rc: %d force_sync: %d min_xid: %llu ",
- POSTID(&oinfo->loi_oi), oinfo->loi_ost_idx,
- oinfo->loi_ost_gen, oinfo->loi_kms_valid, oinfo->loi_kms,
- ar->ar_rc, ar->ar_force_sync, ar->ar_min_xid);
- osc_lvb_print(env, cookie, p, &oinfo->loi_lvb);
- return 0;
-}
-
-
-static int osc_attr_get(const struct lu_env *env, struct cl_object *obj,
- struct cl_attr *attr)
-{
- struct lov_oinfo *oinfo = cl2osc(obj)->oo_oinfo;
-
- cl_lvb2attr(attr, &oinfo->loi_lvb);
- attr->cat_kms = oinfo->loi_kms_valid ? oinfo->loi_kms : 0;
- return 0;
-}
-
-int osc_attr_set(const struct lu_env *env, struct cl_object *obj,
- const struct cl_attr *attr, unsigned valid)
-{
- struct lov_oinfo *oinfo = cl2osc(obj)->oo_oinfo;
- struct ost_lvb *lvb = &oinfo->loi_lvb;
-
- if (valid & CAT_SIZE)
- lvb->lvb_size = attr->cat_size;
- if (valid & CAT_MTIME)
- lvb->lvb_mtime = attr->cat_mtime;
- if (valid & CAT_ATIME)
- lvb->lvb_atime = attr->cat_atime;
- if (valid & CAT_CTIME)
- lvb->lvb_ctime = attr->cat_ctime;
- if (valid & CAT_BLOCKS)
- lvb->lvb_blocks = attr->cat_blocks;
- if (valid & CAT_KMS) {
- CDEBUG(D_CACHE, "set kms from %llu to %llu\n",
- oinfo->loi_kms, (__u64)attr->cat_kms);
- loi_kms_set(oinfo, attr->cat_kms);
- }
- return 0;
-}
-
-static int osc_object_glimpse(const struct lu_env *env,
- const struct cl_object *obj, struct ost_lvb *lvb)
-{
- struct lov_oinfo *oinfo = cl2osc(obj)->oo_oinfo;
-
- lvb->lvb_size = oinfo->loi_kms;
- lvb->lvb_blocks = oinfo->loi_lvb.lvb_blocks;
- return 0;
-}
-
-
-void osc_object_set_contended(struct osc_object *obj)
-{
- obj->oo_contention_time = cfs_time_current();
- /* mb(); */
- obj->oo_contended = 1;
-}
-
-void osc_object_clear_contended(struct osc_object *obj)
-{
- obj->oo_contended = 0;
-}
-
-int osc_object_is_contended(struct osc_object *obj)
-{
- struct osc_device *dev = lu2osc_dev(obj->oo_cl.co_lu.lo_dev);
- int osc_contention_time = dev->od_contention_time;
- unsigned long cur_time = cfs_time_current();
- unsigned long retry_time;
-
- if (OBD_FAIL_CHECK(OBD_FAIL_OSC_OBJECT_CONTENTION))
- return 1;
-
- if (!obj->oo_contended)
- return 0;
-
- /*
- * I like copy-paste. the code is copied from
- * ll_file_is_contended.
- */
- retry_time = cfs_time_add(obj->oo_contention_time,
- cfs_time_seconds(osc_contention_time));
- if (cfs_time_after(cur_time, retry_time)) {
- osc_object_clear_contended(obj);
- return 0;
- }
- return 1;
-}
-
-static const struct cl_object_operations osc_ops = {
- .coo_page_init = osc_page_init,
- .coo_lock_init = osc_lock_init,
- .coo_io_init = osc_io_init,
- .coo_attr_get = osc_attr_get,
- .coo_attr_set = osc_attr_set,
- .coo_glimpse = osc_object_glimpse
-};
-
-static const struct lu_object_operations osc_lu_obj_ops = {
- .loo_object_init = osc_object_init,
- .loo_object_delete = NULL,
- .loo_object_release = NULL,
- .loo_object_free = osc_object_free,
- .loo_object_print = osc_object_print,
- .loo_object_invariant = NULL
-};
-
-struct lu_object *osc_object_alloc(const struct lu_env *env,
- const struct lu_object_header *unused,
- struct lu_device *dev)
-{
- struct osc_object *osc;
- struct lu_object *obj;
-
- OBD_SLAB_ALLOC_PTR_GFP(osc, osc_object_kmem, GFP_NOFS);
- if (osc != NULL) {
- obj = osc2lu(osc);
- lu_object_init(obj, NULL, dev);
- osc->oo_cl.co_ops = &osc_ops;
- obj->lo_ops = &osc_lu_obj_ops;
- } else
- obj = NULL;
- return obj;
-}
-
-/** @} osc */
diff --git a/drivers/staging/lustre/lustre/osc/osc_page.c b/drivers/staging/lustre/lustre/osc/osc_page.c
deleted file mode 100644
index 2af3232230ec..000000000000
--- a/drivers/staging/lustre/lustre/osc/osc_page.c
+++ /dev/null
@@ -1,917 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * Implementation of cl_page for OSC layer.
- *
- * Author: Nikita Danilov <nikita.danilov@sun.com>
- */
-
-#define DEBUG_SUBSYSTEM S_OSC
-
-#include "osc_cl_internal.h"
-
-static void osc_lru_del(struct client_obd *cli, struct osc_page *opg, bool del);
-static void osc_lru_add(struct client_obd *cli, struct osc_page *opg);
-static int osc_lru_reserve(const struct lu_env *env, struct osc_object *obj,
- struct osc_page *opg);
-
-/** \addtogroup osc
- * @{
- */
-
-/*
- * Comment out osc_page_protected because it may sleep inside the
- * the client_obd_list_lock.
- * client_obd_list_lock -> osc_ap_completion -> osc_completion ->
- * -> osc_page_protected -> osc_page_is_dlocked -> osc_match_base
- * -> ldlm_lock_match -> sptlrpc_import_check_ctx -> sleep.
- */
-#if 0
-static int osc_page_is_dlocked(const struct lu_env *env,
- const struct osc_page *opg,
- enum cl_lock_mode mode, int pending, int unref)
-{
- struct cl_page *page;
- struct osc_object *obj;
- struct osc_thread_info *info;
- struct ldlm_res_id *resname;
- struct lustre_handle *lockh;
- ldlm_policy_data_t *policy;
- ldlm_mode_t dlmmode;
- __u64 flags;
-
- might_sleep();
-
- info = osc_env_info(env);
- resname = &info->oti_resname;
- policy = &info->oti_policy;
- lockh = &info->oti_handle;
- page = opg->ops_cl.cpl_page;
- obj = cl2osc(opg->ops_cl.cpl_obj);
-
- flags = LDLM_FL_TEST_LOCK | LDLM_FL_BLOCK_GRANTED;
- if (pending)
- flags |= LDLM_FL_CBPENDING;
-
- dlmmode = osc_cl_lock2ldlm(mode) | LCK_PW;
- osc_lock_build_res(env, obj, resname);
- osc_index2policy(policy, page->cp_obj, page->cp_index, page->cp_index);
- return osc_match_base(osc_export(obj), resname, LDLM_EXTENT, policy,
- dlmmode, &flags, NULL, lockh, unref);
-}
-
-/**
- * Checks an invariant that a page in the cache is covered by a lock, as
- * needed.
- */
-static int osc_page_protected(const struct lu_env *env,
- const struct osc_page *opg,
- enum cl_lock_mode mode, int unref)
-{
- struct cl_object_header *hdr;
- struct cl_lock *scan;
- struct cl_page *page;
- struct cl_lock_descr *descr;
- int result;
-
- LINVRNT(!opg->ops_temp);
-
- page = opg->ops_cl.cpl_page;
- if (page->cp_owner != NULL &&
- cl_io_top(page->cp_owner)->ci_lockreq == CILR_NEVER)
- /*
- * If IO is done without locks (liblustre, or lloop), lock is
- * not required.
- */
- result = 1;
- else
- /* otherwise check for a DLM lock */
- result = osc_page_is_dlocked(env, opg, mode, 1, unref);
- if (result == 0) {
- /* maybe this page is a part of a lockless io? */
- hdr = cl_object_header(opg->ops_cl.cpl_obj);
- descr = &osc_env_info(env)->oti_descr;
- descr->cld_mode = mode;
- descr->cld_start = page->cp_index;
- descr->cld_end = page->cp_index;
- spin_lock(&hdr->coh_lock_guard);
- list_for_each_entry(scan, &hdr->coh_locks, cll_linkage) {
- /*
- * Lock-less sub-lock has to be either in HELD state
- * (when io is actively going on), or in CACHED state,
- * when top-lock is being unlocked:
- * cl_io_unlock()->cl_unuse()->...->lov_lock_unuse().
- */
- if ((scan->cll_state == CLS_HELD ||
- scan->cll_state == CLS_CACHED) &&
- cl_lock_ext_match(&scan->cll_descr, descr)) {
- struct osc_lock *olck;
-
- olck = osc_lock_at(scan);
- result = osc_lock_is_lockless(olck);
- break;
- }
- }
- spin_unlock(&hdr->coh_lock_guard);
- }
- return result;
-}
-#else
-static int osc_page_protected(const struct lu_env *env,
- const struct osc_page *opg,
- enum cl_lock_mode mode, int unref)
-{
- return 1;
-}
-#endif
-
-/*****************************************************************************
- *
- * Page operations.
- *
- */
-static void osc_page_fini(const struct lu_env *env,
- struct cl_page_slice *slice)
-{
- struct osc_page *opg = cl2osc_page(slice);
- CDEBUG(D_TRACE, "%p\n", opg);
- LASSERT(opg->ops_lock == NULL);
-}
-
-static void osc_page_transfer_get(struct osc_page *opg, const char *label)
-{
- struct cl_page *page = cl_page_top(opg->ops_cl.cpl_page);
-
- LASSERT(!opg->ops_transfer_pinned);
- cl_page_get(page);
- lu_ref_add_atomic(&page->cp_reference, label, page);
- opg->ops_transfer_pinned = 1;
-}
-
-static void osc_page_transfer_put(const struct lu_env *env,
- struct osc_page *opg)
-{
- struct cl_page *page = cl_page_top(opg->ops_cl.cpl_page);
-
- if (opg->ops_transfer_pinned) {
- lu_ref_del(&page->cp_reference, "transfer", page);
- opg->ops_transfer_pinned = 0;
- cl_page_put(env, page);
- }
-}
-
-/**
- * This is called once for every page when it is submitted for a transfer
- * either opportunistic (osc_page_cache_add()), or immediate
- * (osc_page_submit()).
- */
-static void osc_page_transfer_add(const struct lu_env *env,
- struct osc_page *opg, enum cl_req_type crt)
-{
- struct osc_object *obj = cl2osc(opg->ops_cl.cpl_obj);
-
- /* ops_lru and ops_inflight share the same field, so take it from LRU
- * first and then use it as inflight. */
- osc_lru_del(osc_cli(obj), opg, false);
-
- spin_lock(&obj->oo_seatbelt);
- list_add(&opg->ops_inflight, &obj->oo_inflight[crt]);
- opg->ops_submitter = current;
- spin_unlock(&obj->oo_seatbelt);
-}
-
-static int osc_page_cache_add(const struct lu_env *env,
- const struct cl_page_slice *slice,
- struct cl_io *io)
-{
- struct osc_io *oio = osc_env_io(env);
- struct osc_page *opg = cl2osc_page(slice);
- int result;
-
- LINVRNT(osc_page_protected(env, opg, CLM_WRITE, 0));
-
- osc_page_transfer_get(opg, "transfer\0cache");
- result = osc_queue_async_io(env, io, opg);
- if (result != 0)
- osc_page_transfer_put(env, opg);
- else
- osc_page_transfer_add(env, opg, CRT_WRITE);
-
- /* for sync write, kernel will wait for this page to be flushed before
- * osc_io_end() is called, so release it earlier.
- * for mkwrite(), it's known there is no further pages. */
- if (cl_io_is_sync_write(io) || cl_io_is_mkwrite(io)) {
- if (oio->oi_active != NULL) {
- osc_extent_release(env, oio->oi_active);
- oio->oi_active = NULL;
- }
- }
-
- return result;
-}
-
-void osc_index2policy(ldlm_policy_data_t *policy, const struct cl_object *obj,
- pgoff_t start, pgoff_t end)
-{
- memset(policy, 0, sizeof(*policy));
- policy->l_extent.start = cl_offset(obj, start);
- policy->l_extent.end = cl_offset(obj, end + 1) - 1;
-}
-
-static int osc_page_addref_lock(const struct lu_env *env,
- struct osc_page *opg,
- struct cl_lock *lock)
-{
- struct osc_lock *olock;
- int rc;
-
- LASSERT(opg->ops_lock == NULL);
-
- olock = osc_lock_at(lock);
- if (atomic_inc_return(&olock->ols_pageref) <= 0) {
- atomic_dec(&olock->ols_pageref);
- rc = -ENODATA;
- } else {
- cl_lock_get(lock);
- opg->ops_lock = lock;
- rc = 0;
- }
- return rc;
-}
-
-static void osc_page_putref_lock(const struct lu_env *env,
- struct osc_page *opg)
-{
- struct cl_lock *lock = opg->ops_lock;
- struct osc_lock *olock;
-
- LASSERT(lock != NULL);
- olock = osc_lock_at(lock);
-
- atomic_dec(&olock->ols_pageref);
- opg->ops_lock = NULL;
-
- cl_lock_put(env, lock);
-}
-
-static int osc_page_is_under_lock(const struct lu_env *env,
- const struct cl_page_slice *slice,
- struct cl_io *unused)
-{
- struct cl_lock *lock;
- int result = -ENODATA;
-
- lock = cl_lock_at_page(env, slice->cpl_obj, slice->cpl_page,
- NULL, 1, 0);
- if (lock != NULL) {
- if (osc_page_addref_lock(env, cl2osc_page(slice), lock) == 0)
- result = -EBUSY;
- cl_lock_put(env, lock);
- }
- return result;
-}
-
-static void osc_page_disown(const struct lu_env *env,
- const struct cl_page_slice *slice,
- struct cl_io *io)
-{
- struct osc_page *opg = cl2osc_page(slice);
-
- if (unlikely(opg->ops_lock))
- osc_page_putref_lock(env, opg);
-}
-
-static void osc_page_completion_read(const struct lu_env *env,
- const struct cl_page_slice *slice,
- int ioret)
-{
- struct osc_page *opg = cl2osc_page(slice);
- struct osc_object *obj = cl2osc(opg->ops_cl.cpl_obj);
-
- if (likely(opg->ops_lock))
- osc_page_putref_lock(env, opg);
- osc_lru_add(osc_cli(obj), opg);
-}
-
-static void osc_page_completion_write(const struct lu_env *env,
- const struct cl_page_slice *slice,
- int ioret)
-{
- struct osc_page *opg = cl2osc_page(slice);
- struct osc_object *obj = cl2osc(slice->cpl_obj);
-
- osc_lru_add(osc_cli(obj), opg);
-}
-
-static int osc_page_fail(const struct lu_env *env,
- const struct cl_page_slice *slice,
- struct cl_io *unused)
-{
- /*
- * Cached read?
- */
- LBUG();
- return 0;
-}
-
-
-static const char *osc_list(struct list_head *head)
-{
- return list_empty(head) ? "-" : "+";
-}
-
-static inline unsigned long osc_submit_duration(struct osc_page *opg)
-{
- if (opg->ops_submit_time == 0)
- return 0;
-
- return (cfs_time_current() - opg->ops_submit_time);
-}
-
-static int osc_page_print(const struct lu_env *env,
- const struct cl_page_slice *slice,
- void *cookie, lu_printer_t printer)
-{
- struct osc_page *opg = cl2osc_page(slice);
- struct osc_async_page *oap = &opg->ops_oap;
- struct osc_object *obj = cl2osc(slice->cpl_obj);
- struct client_obd *cli = &osc_export(obj)->exp_obd->u.cli;
-
- return (*printer)(env, cookie, LUSTRE_OSC_NAME "-page@%p: 1< %#x %d %u %s %s > 2< %llu %u %u %#x %#x | %p %p %p > 3< %s %p %d %lu %d > 4< %d %d %d %lu %s | %s %s %s %s > 5< %s %s %s %s | %d %s | %d %s %s>\n",
- opg,
- /* 1 */
- oap->oap_magic, oap->oap_cmd,
- oap->oap_interrupted,
- osc_list(&oap->oap_pending_item),
- osc_list(&oap->oap_rpc_item),
- /* 2 */
- oap->oap_obj_off, oap->oap_page_off, oap->oap_count,
- oap->oap_async_flags, oap->oap_brw_flags,
- oap->oap_request, oap->oap_cli, obj,
- /* 3 */
- osc_list(&opg->ops_inflight),
- opg->ops_submitter, opg->ops_transfer_pinned,
- osc_submit_duration(opg), opg->ops_srvlock,
- /* 4 */
- cli->cl_r_in_flight, cli->cl_w_in_flight,
- cli->cl_max_rpcs_in_flight,
- cli->cl_avail_grant,
- osc_list(&cli->cl_cache_waiters),
- osc_list(&cli->cl_loi_ready_list),
- osc_list(&cli->cl_loi_hp_ready_list),
- osc_list(&cli->cl_loi_write_list),
- osc_list(&cli->cl_loi_read_list),
- /* 5 */
- osc_list(&obj->oo_ready_item),
- osc_list(&obj->oo_hp_ready_item),
- osc_list(&obj->oo_write_item),
- osc_list(&obj->oo_read_item),
- atomic_read(&obj->oo_nr_reads),
- osc_list(&obj->oo_reading_exts),
- atomic_read(&obj->oo_nr_writes),
- osc_list(&obj->oo_hp_exts),
- osc_list(&obj->oo_urgent_exts));
-}
-
-static void osc_page_delete(const struct lu_env *env,
- const struct cl_page_slice *slice)
-{
- struct osc_page *opg = cl2osc_page(slice);
- struct osc_object *obj = cl2osc(opg->ops_cl.cpl_obj);
- int rc;
-
- LINVRNT(opg->ops_temp || osc_page_protected(env, opg, CLM_READ, 1));
-
- CDEBUG(D_TRACE, "%p\n", opg);
- osc_page_transfer_put(env, opg);
- rc = osc_teardown_async_page(env, obj, opg);
- if (rc) {
- CL_PAGE_DEBUG(D_ERROR, env, cl_page_top(slice->cpl_page),
- "Trying to teardown failed: %d\n", rc);
- LASSERT(0);
- }
-
- spin_lock(&obj->oo_seatbelt);
- if (opg->ops_submitter != NULL) {
- LASSERT(!list_empty(&opg->ops_inflight));
- list_del_init(&opg->ops_inflight);
- opg->ops_submitter = NULL;
- }
- spin_unlock(&obj->oo_seatbelt);
-
- osc_lru_del(osc_cli(obj), opg, true);
-}
-
-void osc_page_clip(const struct lu_env *env, const struct cl_page_slice *slice,
- int from, int to)
-{
- struct osc_page *opg = cl2osc_page(slice);
- struct osc_async_page *oap = &opg->ops_oap;
-
- LINVRNT(osc_page_protected(env, opg, CLM_READ, 0));
-
- opg->ops_from = from;
- opg->ops_to = to;
- spin_lock(&oap->oap_lock);
- oap->oap_async_flags |= ASYNC_COUNT_STABLE;
- spin_unlock(&oap->oap_lock);
-}
-
-static int osc_page_cancel(const struct lu_env *env,
- const struct cl_page_slice *slice)
-{
- struct osc_page *opg = cl2osc_page(slice);
- int rc = 0;
-
- LINVRNT(osc_page_protected(env, opg, CLM_READ, 0));
-
- /* Check if the transferring against this page
- * is completed, or not even queued. */
- if (opg->ops_transfer_pinned)
- /* FIXME: may not be interrupted.. */
- rc = osc_cancel_async_page(env, opg);
- LASSERT(ergo(rc == 0, opg->ops_transfer_pinned == 0));
- return rc;
-}
-
-static int osc_page_flush(const struct lu_env *env,
- const struct cl_page_slice *slice,
- struct cl_io *io)
-{
- struct osc_page *opg = cl2osc_page(slice);
- int rc;
-
- rc = osc_flush_async_page(env, io, opg);
- return rc;
-}
-
-static const struct cl_page_operations osc_page_ops = {
- .cpo_fini = osc_page_fini,
- .cpo_print = osc_page_print,
- .cpo_delete = osc_page_delete,
- .cpo_is_under_lock = osc_page_is_under_lock,
- .cpo_disown = osc_page_disown,
- .io = {
- [CRT_READ] = {
- .cpo_cache_add = osc_page_fail,
- .cpo_completion = osc_page_completion_read
- },
- [CRT_WRITE] = {
- .cpo_cache_add = osc_page_cache_add,
- .cpo_completion = osc_page_completion_write
- }
- },
- .cpo_clip = osc_page_clip,
- .cpo_cancel = osc_page_cancel,
- .cpo_flush = osc_page_flush
-};
-
-int osc_page_init(const struct lu_env *env, struct cl_object *obj,
- struct cl_page *page, struct page *vmpage)
-{
- struct osc_object *osc = cl2osc(obj);
- struct osc_page *opg = cl_object_page_slice(obj, page);
- int result;
-
- opg->ops_from = 0;
- opg->ops_to = PAGE_CACHE_SIZE;
-
- result = osc_prep_async_page(osc, opg, vmpage,
- cl_offset(obj, page->cp_index));
- if (result == 0) {
- struct osc_io *oio = osc_env_io(env);
- opg->ops_srvlock = osc_io_srvlock(oio);
- cl_page_slice_add(page, &opg->ops_cl, obj,
- &osc_page_ops);
- }
- /*
- * Cannot assert osc_page_protected() here as read-ahead
- * creates temporary pages outside of a lock.
- */
- /* ops_inflight and ops_lru are the same field, but it doesn't
- * hurt to initialize it twice :-) */
- INIT_LIST_HEAD(&opg->ops_inflight);
- INIT_LIST_HEAD(&opg->ops_lru);
-
- /* reserve an LRU space for this page */
- if (page->cp_type == CPT_CACHEABLE && result == 0)
- result = osc_lru_reserve(env, osc, opg);
-
- return result;
-}
-
-/**
- * Helper function called by osc_io_submit() for every page in an immediate
- * transfer (i.e., transferred synchronously).
- */
-void osc_page_submit(const struct lu_env *env, struct osc_page *opg,
- enum cl_req_type crt, int brw_flags)
-{
- struct osc_async_page *oap = &opg->ops_oap;
- struct osc_object *obj = oap->oap_obj;
-
- LINVRNT(osc_page_protected(env, opg,
- crt == CRT_WRITE ? CLM_WRITE : CLM_READ, 1));
-
- LASSERTF(oap->oap_magic == OAP_MAGIC, "Bad oap magic: oap %p, magic 0x%x\n",
- oap, oap->oap_magic);
- LASSERT(oap->oap_async_flags & ASYNC_READY);
- LASSERT(oap->oap_async_flags & ASYNC_COUNT_STABLE);
-
- oap->oap_cmd = crt == CRT_WRITE ? OBD_BRW_WRITE : OBD_BRW_READ;
- oap->oap_page_off = opg->ops_from;
- oap->oap_count = opg->ops_to - opg->ops_from;
- oap->oap_brw_flags = brw_flags | OBD_BRW_SYNC;
-
- if (!client_is_remote(osc_export(obj)) &&
- capable(CFS_CAP_SYS_RESOURCE)) {
- oap->oap_brw_flags |= OBD_BRW_NOQUOTA;
- oap->oap_cmd |= OBD_BRW_NOQUOTA;
- }
-
- opg->ops_submit_time = cfs_time_current();
- osc_page_transfer_get(opg, "transfer\0imm");
- osc_page_transfer_add(env, opg, crt);
-}
-
-/* --------------- LRU page management ------------------ */
-
-/* OSC is a natural place to manage LRU pages as applications are specialized
- * to write OSC by OSC. Ideally, if one OSC is used more frequently it should
- * occupy more LRU slots. On the other hand, we should avoid using up all LRU
- * slots (client_obd::cl_lru_left) otherwise process has to be put into sleep
- * for free LRU slots - this will be very bad so the algorithm requires each
- * OSC to free slots voluntarily to maintain a reasonable number of free slots
- * at any time.
- */
-
-static DECLARE_WAIT_QUEUE_HEAD(osc_lru_waitq);
-static atomic_t osc_lru_waiters = ATOMIC_INIT(0);
-/* LRU pages are freed in batch mode. OSC should at least free this
- * number of pages to avoid running out of LRU budget, and.. */
-static const int lru_shrink_min = 2 << (20 - PAGE_CACHE_SHIFT); /* 2M */
-/* free this number at most otherwise it will take too long time to finish. */
-static const int lru_shrink_max = 32 << (20 - PAGE_CACHE_SHIFT); /* 32M */
-
-/* Check if we can free LRU slots from this OSC. If there exists LRU waiters,
- * we should free slots aggressively. In this way, slots are freed in a steady
- * step to maintain fairness among OSCs.
- *
- * Return how many LRU pages should be freed. */
-static int osc_cache_too_much(struct client_obd *cli)
-{
- struct cl_client_cache *cache = cli->cl_cache;
- int pages = atomic_read(&cli->cl_lru_in_list) >> 1;
-
- if (atomic_read(&osc_lru_waiters) > 0 &&
- atomic_read(cli->cl_lru_left) < lru_shrink_max)
- /* drop lru pages aggressively */
- return min(pages, lru_shrink_max);
-
- /* if it's going to run out LRU slots, we should free some, but not
- * too much to maintain fairness among OSCs. */
- if (atomic_read(cli->cl_lru_left) < cache->ccc_lru_max >> 4) {
- unsigned long tmp;
-
- tmp = cache->ccc_lru_max / atomic_read(&cache->ccc_users);
- if (pages > tmp)
- return min(pages, lru_shrink_max);
-
- return pages > lru_shrink_min ? lru_shrink_min : 0;
- }
-
- return 0;
-}
-
-/* Return how many pages are not discarded in @pvec. */
-static int discard_pagevec(const struct lu_env *env, struct cl_io *io,
- struct cl_page **pvec, int max_index)
-{
- int count;
- int i;
-
- for (count = 0, i = 0; i < max_index; i++) {
- struct cl_page *page = pvec[i];
- if (cl_page_own_try(env, io, page) == 0) {
- /* free LRU page only if nobody is using it.
- * This check is necessary to avoid freeing the pages
- * having already been removed from LRU and pinned
- * for IO. */
- if (!cl_page_in_use(page)) {
- cl_page_unmap(env, io, page);
- cl_page_discard(env, io, page);
- ++count;
- }
- cl_page_disown(env, io, page);
- }
- cl_page_put(env, page);
- pvec[i] = NULL;
- }
- return max_index - count;
-}
-
-/**
- * Drop @target of pages from LRU at most.
- */
-int osc_lru_shrink(struct client_obd *cli, int target)
-{
- struct cl_env_nest nest;
- struct lu_env *env;
- struct cl_io *io;
- struct cl_object *clobj = NULL;
- struct cl_page **pvec;
- struct osc_page *opg;
- int maxscan = 0;
- int count = 0;
- int index = 0;
- int rc = 0;
-
- LASSERT(atomic_read(&cli->cl_lru_in_list) >= 0);
- if (atomic_read(&cli->cl_lru_in_list) == 0 || target <= 0)
- return 0;
-
- env = cl_env_nested_get(&nest);
- if (IS_ERR(env))
- return PTR_ERR(env);
-
- pvec = osc_env_info(env)->oti_pvec;
- io = &osc_env_info(env)->oti_io;
-
- client_obd_list_lock(&cli->cl_lru_list_lock);
- atomic_inc(&cli->cl_lru_shrinkers);
- maxscan = min(target << 1, atomic_read(&cli->cl_lru_in_list));
- while (!list_empty(&cli->cl_lru_list)) {
- struct cl_page *page;
-
- if (--maxscan < 0)
- break;
-
- opg = list_entry(cli->cl_lru_list.next, struct osc_page,
- ops_lru);
- page = cl_page_top(opg->ops_cl.cpl_page);
- if (cl_page_in_use_noref(page)) {
- list_move_tail(&opg->ops_lru, &cli->cl_lru_list);
- continue;
- }
-
- LASSERT(page->cp_obj != NULL);
- if (clobj != page->cp_obj) {
- struct cl_object *tmp = page->cp_obj;
-
- cl_object_get(tmp);
- client_obd_list_unlock(&cli->cl_lru_list_lock);
-
- if (clobj != NULL) {
- count -= discard_pagevec(env, io, pvec, index);
- index = 0;
-
- cl_io_fini(env, io);
- cl_object_put(env, clobj);
- clobj = NULL;
- }
-
- clobj = tmp;
- io->ci_obj = clobj;
- io->ci_ignore_layout = 1;
- rc = cl_io_init(env, io, CIT_MISC, clobj);
-
- client_obd_list_lock(&cli->cl_lru_list_lock);
-
- if (rc != 0)
- break;
-
- ++maxscan;
- continue;
- }
-
- /* move this page to the end of list as it will be discarded
- * soon. The page will be finally removed from LRU list in
- * osc_page_delete(). */
- list_move_tail(&opg->ops_lru, &cli->cl_lru_list);
-
- /* it's okay to grab a refcount here w/o holding lock because
- * it has to grab cl_lru_list_lock to delete the page. */
- cl_page_get(page);
- pvec[index++] = page;
- if (++count >= target)
- break;
-
- if (unlikely(index == OTI_PVEC_SIZE)) {
- client_obd_list_unlock(&cli->cl_lru_list_lock);
- count -= discard_pagevec(env, io, pvec, index);
- index = 0;
-
- client_obd_list_lock(&cli->cl_lru_list_lock);
- }
- }
- client_obd_list_unlock(&cli->cl_lru_list_lock);
-
- if (clobj != NULL) {
- count -= discard_pagevec(env, io, pvec, index);
-
- cl_io_fini(env, io);
- cl_object_put(env, clobj);
- }
- cl_env_nested_put(&nest, env);
-
- atomic_dec(&cli->cl_lru_shrinkers);
- return count > 0 ? count : rc;
-}
-
-static void osc_lru_add(struct client_obd *cli, struct osc_page *opg)
-{
- bool wakeup = false;
-
- if (!opg->ops_in_lru)
- return;
-
- atomic_dec(&cli->cl_lru_busy);
- client_obd_list_lock(&cli->cl_lru_list_lock);
- if (list_empty(&opg->ops_lru)) {
- list_move_tail(&opg->ops_lru, &cli->cl_lru_list);
- atomic_inc_return(&cli->cl_lru_in_list);
- wakeup = atomic_read(&osc_lru_waiters) > 0;
- }
- client_obd_list_unlock(&cli->cl_lru_list_lock);
-
- if (wakeup) {
- osc_lru_shrink(cli, osc_cache_too_much(cli));
- wake_up_all(&osc_lru_waitq);
- }
-}
-
-/* delete page from LRUlist. The page can be deleted from LRUlist for two
- * reasons: redirtied or deleted from page cache. */
-static void osc_lru_del(struct client_obd *cli, struct osc_page *opg, bool del)
-{
- if (opg->ops_in_lru) {
- client_obd_list_lock(&cli->cl_lru_list_lock);
- if (!list_empty(&opg->ops_lru)) {
- LASSERT(atomic_read(&cli->cl_lru_in_list) > 0);
- list_del_init(&opg->ops_lru);
- atomic_dec(&cli->cl_lru_in_list);
- if (!del)
- atomic_inc(&cli->cl_lru_busy);
- } else if (del) {
- LASSERT(atomic_read(&cli->cl_lru_busy) > 0);
- atomic_dec(&cli->cl_lru_busy);
- }
- client_obd_list_unlock(&cli->cl_lru_list_lock);
- if (del) {
- atomic_inc(cli->cl_lru_left);
- /* this is a great place to release more LRU pages if
- * this osc occupies too many LRU pages and kernel is
- * stealing one of them.
- * cl_lru_shrinkers is to avoid recursive call in case
- * we're already in the context of osc_lru_shrink(). */
- if (atomic_read(&cli->cl_lru_shrinkers) == 0 &&
- !memory_pressure_get())
- osc_lru_shrink(cli, osc_cache_too_much(cli));
- wake_up(&osc_lru_waitq);
- }
- } else {
- LASSERT(list_empty(&opg->ops_lru));
- }
-}
-
-static inline int max_to_shrink(struct client_obd *cli)
-{
- return min(atomic_read(&cli->cl_lru_in_list) >> 1, lru_shrink_max);
-}
-
-static int osc_lru_reclaim(struct client_obd *cli)
-{
- struct cl_client_cache *cache = cli->cl_cache;
- int max_scans;
- int rc;
-
- LASSERT(cache != NULL);
-
- rc = osc_lru_shrink(cli, lru_shrink_min);
- if (rc != 0) {
- CDEBUG(D_CACHE, "%s: Free %d pages from own LRU: %p.\n",
- cli->cl_import->imp_obd->obd_name, rc, cli);
- return rc;
- }
-
- CDEBUG(D_CACHE, "%s: cli %p no free slots, pages: %d, busy: %d.\n",
- cli->cl_import->imp_obd->obd_name, cli,
- atomic_read(&cli->cl_lru_in_list),
- atomic_read(&cli->cl_lru_busy));
-
- /* Reclaim LRU slots from other client_obd as it can't free enough
- * from its own. This should rarely happen. */
- spin_lock(&cache->ccc_lru_lock);
- LASSERT(!list_empty(&cache->ccc_lru));
-
- cache->ccc_lru_shrinkers++;
- list_move_tail(&cli->cl_lru_osc, &cache->ccc_lru);
-
- max_scans = atomic_read(&cache->ccc_users);
- while (--max_scans > 0 && !list_empty(&cache->ccc_lru)) {
- cli = list_entry(cache->ccc_lru.next, struct client_obd,
- cl_lru_osc);
-
- CDEBUG(D_CACHE, "%s: cli %p LRU pages: %d, busy: %d.\n",
- cli->cl_import->imp_obd->obd_name, cli,
- atomic_read(&cli->cl_lru_in_list),
- atomic_read(&cli->cl_lru_busy));
-
- list_move_tail(&cli->cl_lru_osc, &cache->ccc_lru);
- if (atomic_read(&cli->cl_lru_in_list) > 0) {
- spin_unlock(&cache->ccc_lru_lock);
-
- rc = osc_lru_shrink(cli, max_to_shrink(cli));
- spin_lock(&cache->ccc_lru_lock);
- if (rc != 0)
- break;
- }
- }
- spin_unlock(&cache->ccc_lru_lock);
-
- CDEBUG(D_CACHE, "%s: cli %p freed %d pages.\n",
- cli->cl_import->imp_obd->obd_name, cli, rc);
- return rc;
-}
-
-static int osc_lru_reserve(const struct lu_env *env, struct osc_object *obj,
- struct osc_page *opg)
-{
- struct l_wait_info lwi = LWI_INTR(LWI_ON_SIGNAL_NOOP, NULL);
- struct client_obd *cli = osc_cli(obj);
- int rc = 0;
-
- if (cli->cl_cache == NULL) /* shall not be in LRU */
- return 0;
-
- LASSERT(atomic_read(cli->cl_lru_left) >= 0);
- while (!atomic_add_unless(cli->cl_lru_left, -1, 0)) {
- int gen;
-
- /* run out of LRU spaces, try to drop some by itself */
- rc = osc_lru_reclaim(cli);
- if (rc < 0)
- break;
- if (rc > 0)
- continue;
-
- cond_resched();
-
- /* slowest case, all of caching pages are busy, notifying
- * other OSCs that we're lack of LRU slots. */
- atomic_inc(&osc_lru_waiters);
-
- gen = atomic_read(&cli->cl_lru_in_list);
- rc = l_wait_event(osc_lru_waitq,
- atomic_read(cli->cl_lru_left) > 0 ||
- (atomic_read(&cli->cl_lru_in_list) > 0 &&
- gen != atomic_read(&cli->cl_lru_in_list)),
- &lwi);
-
- atomic_dec(&osc_lru_waiters);
- if (rc < 0)
- break;
- }
-
- if (rc >= 0) {
- atomic_inc(&cli->cl_lru_busy);
- opg->ops_in_lru = 1;
- rc = 0;
- }
-
- return rc;
-}
-
-/** @} osc */
diff --git a/drivers/staging/lustre/lustre/osc/osc_quota.c b/drivers/staging/lustre/lustre/osc/osc_quota.c
deleted file mode 100644
index 2ff253f458f8..000000000000
--- a/drivers/staging/lustre/lustre/osc/osc_quota.c
+++ /dev/null
@@ -1,327 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; if not, write to the
- * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
- * Boston, MA 021110-1307, USA
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
- *
- * Copyright (c) 2011, 2012, Intel Corporation.
- *
- * Code originally extracted from quota directory
- */
-
-#include "../include/obd_class.h"
-#include "osc_internal.h"
-
-static inline struct osc_quota_info *osc_oqi_alloc(u32 id)
-{
- struct osc_quota_info *oqi;
-
- OBD_SLAB_ALLOC_PTR(oqi, osc_quota_kmem);
- if (oqi != NULL)
- oqi->oqi_id = id;
-
- return oqi;
-}
-
-int osc_quota_chkdq(struct client_obd *cli, const unsigned int qid[])
-{
- int type;
-
- for (type = 0; type < MAXQUOTAS; type++) {
- struct osc_quota_info *oqi;
-
- oqi = cfs_hash_lookup(cli->cl_quota_hash[type], &qid[type]);
- if (oqi) {
- /* do not try to access oqi here, it could have been
- * freed by osc_quota_setdq() */
-
- /* the slot is busy, the user is about to run out of
- * quota space on this OST */
- CDEBUG(D_QUOTA, "chkdq found noquota for %s %d\n",
- type == USRQUOTA ? "user" : "grout", qid[type]);
- return NO_QUOTA;
- }
- }
-
- return QUOTA_OK;
-}
-
-#define MD_QUOTA_FLAG(type) ((type == USRQUOTA) ? OBD_MD_FLUSRQUOTA \
- : OBD_MD_FLGRPQUOTA)
-#define FL_QUOTA_FLAG(type) ((type == USRQUOTA) ? OBD_FL_NO_USRQUOTA \
- : OBD_FL_NO_GRPQUOTA)
-
-int osc_quota_setdq(struct client_obd *cli, const unsigned int qid[],
- u32 valid, u32 flags)
-{
- int type;
- int rc = 0;
-
- if ((valid & (OBD_MD_FLUSRQUOTA | OBD_MD_FLGRPQUOTA)) == 0)
- return 0;
-
- for (type = 0; type < MAXQUOTAS; type++) {
- struct osc_quota_info *oqi;
-
- if ((valid & MD_QUOTA_FLAG(type)) == 0)
- continue;
-
- /* lookup the ID in the per-type hash table */
- oqi = cfs_hash_lookup(cli->cl_quota_hash[type], &qid[type]);
- if ((flags & FL_QUOTA_FLAG(type)) != 0) {
- /* This ID is getting close to its quota limit, let's
- * switch to sync I/O */
- if (oqi != NULL)
- continue;
-
- oqi = osc_oqi_alloc(qid[type]);
- if (oqi == NULL) {
- rc = -ENOMEM;
- break;
- }
-
- rc = cfs_hash_add_unique(cli->cl_quota_hash[type],
- &qid[type], &oqi->oqi_hash);
- /* race with others? */
- if (rc == -EALREADY) {
- rc = 0;
- OBD_SLAB_FREE_PTR(oqi, osc_quota_kmem);
- }
-
- CDEBUG(D_QUOTA, "%s: setdq to insert for %s %d (%d)\n",
- cli->cl_import->imp_obd->obd_name,
- type == USRQUOTA ? "user" : "group",
- qid[type], rc);
- } else {
- /* This ID is now off the hook, let's remove it from
- * the hash table */
- if (oqi == NULL)
- continue;
-
- oqi = cfs_hash_del_key(cli->cl_quota_hash[type],
- &qid[type]);
- if (oqi)
- OBD_SLAB_FREE_PTR(oqi, osc_quota_kmem);
-
- CDEBUG(D_QUOTA, "%s: setdq to remove for %s %d (%p)\n",
- cli->cl_import->imp_obd->obd_name,
- type == USRQUOTA ? "user" : "group",
- qid[type], oqi);
- }
- }
-
- return rc;
-}
-
-/*
- * Hash operations for uid/gid <-> osc_quota_info
- */
-static unsigned
-oqi_hashfn(struct cfs_hash *hs, const void *key, unsigned mask)
-{
- return cfs_hash_u32_hash(*((__u32 *)key), mask);
-}
-
-static int
-oqi_keycmp(const void *key, struct hlist_node *hnode)
-{
- struct osc_quota_info *oqi;
- u32 uid;
-
- LASSERT(key != NULL);
- uid = *((u32 *)key);
- oqi = hlist_entry(hnode, struct osc_quota_info, oqi_hash);
-
- return uid == oqi->oqi_id;
-}
-
-static void *
-oqi_key(struct hlist_node *hnode)
-{
- struct osc_quota_info *oqi;
- oqi = hlist_entry(hnode, struct osc_quota_info, oqi_hash);
- return &oqi->oqi_id;
-}
-
-static void *
-oqi_object(struct hlist_node *hnode)
-{
- return hlist_entry(hnode, struct osc_quota_info, oqi_hash);
-}
-
-static void
-oqi_get(struct cfs_hash *hs, struct hlist_node *hnode)
-{
-}
-
-static void
-oqi_put_locked(struct cfs_hash *hs, struct hlist_node *hnode)
-{
-}
-
-static void
-oqi_exit(struct cfs_hash *hs, struct hlist_node *hnode)
-{
- struct osc_quota_info *oqi;
-
- oqi = hlist_entry(hnode, struct osc_quota_info, oqi_hash);
-
- OBD_SLAB_FREE_PTR(oqi, osc_quota_kmem);
-}
-
-#define HASH_QUOTA_BKT_BITS 5
-#define HASH_QUOTA_CUR_BITS 5
-#define HASH_QUOTA_MAX_BITS 15
-
-static cfs_hash_ops_t quota_hash_ops = {
- .hs_hash = oqi_hashfn,
- .hs_keycmp = oqi_keycmp,
- .hs_key = oqi_key,
- .hs_object = oqi_object,
- .hs_get = oqi_get,
- .hs_put_locked = oqi_put_locked,
- .hs_exit = oqi_exit,
-};
-
-int osc_quota_setup(struct obd_device *obd)
-{
- struct client_obd *cli = &obd->u.cli;
- int i, type;
-
- for (type = 0; type < MAXQUOTAS; type++) {
- cli->cl_quota_hash[type] = cfs_hash_create("QUOTA_HASH",
- HASH_QUOTA_CUR_BITS,
- HASH_QUOTA_MAX_BITS,
- HASH_QUOTA_BKT_BITS,
- 0,
- CFS_HASH_MIN_THETA,
- CFS_HASH_MAX_THETA,
- &quota_hash_ops,
- CFS_HASH_DEFAULT);
- if (cli->cl_quota_hash[type] == NULL)
- break;
- }
-
- if (type == MAXQUOTAS)
- return 0;
-
- for (i = 0; i < type; i++)
- cfs_hash_putref(cli->cl_quota_hash[i]);
-
- return -ENOMEM;
-}
-
-int osc_quota_cleanup(struct obd_device *obd)
-{
- struct client_obd *cli = &obd->u.cli;
- int type;
-
- for (type = 0; type < MAXQUOTAS; type++)
- cfs_hash_putref(cli->cl_quota_hash[type]);
-
- return 0;
-}
-
-int osc_quotactl(struct obd_device *unused, struct obd_export *exp,
- struct obd_quotactl *oqctl)
-{
- struct ptlrpc_request *req;
- struct obd_quotactl *oqc;
- int rc;
-
- req = ptlrpc_request_alloc_pack(class_exp2cliimp(exp),
- &RQF_OST_QUOTACTL, LUSTRE_OST_VERSION,
- OST_QUOTACTL);
- if (req == NULL)
- return -ENOMEM;
-
- oqc = req_capsule_client_get(&req->rq_pill, &RMF_OBD_QUOTACTL);
- *oqc = *oqctl;
-
- ptlrpc_request_set_replen(req);
- ptlrpc_at_set_req_timeout(req);
- req->rq_no_resend = 1;
-
- rc = ptlrpc_queue_wait(req);
- if (rc)
- CERROR("ptlrpc_queue_wait failed, rc: %d\n", rc);
-
- if (req->rq_repmsg) {
- oqc = req_capsule_server_get(&req->rq_pill, &RMF_OBD_QUOTACTL);
- if (oqc) {
- *oqctl = *oqc;
- } else if (!rc) {
- CERROR("Can't unpack obd_quotactl\n");
- rc = -EPROTO;
- }
- } else if (!rc) {
- CERROR("Can't unpack obd_quotactl\n");
- rc = -EPROTO;
- }
- ptlrpc_req_finished(req);
-
- return rc;
-}
-
-int osc_quotacheck(struct obd_device *unused, struct obd_export *exp,
- struct obd_quotactl *oqctl)
-{
- struct client_obd *cli = &exp->exp_obd->u.cli;
- struct ptlrpc_request *req;
- struct obd_quotactl *body;
- int rc;
-
- req = ptlrpc_request_alloc_pack(class_exp2cliimp(exp),
- &RQF_OST_QUOTACHECK, LUSTRE_OST_VERSION,
- OST_QUOTACHECK);
- if (req == NULL)
- return -ENOMEM;
-
- body = req_capsule_client_get(&req->rq_pill, &RMF_OBD_QUOTACTL);
- *body = *oqctl;
-
- ptlrpc_request_set_replen(req);
-
- /* the next poll will find -ENODATA, that means quotacheck is
- * going on */
- cli->cl_qchk_stat = -ENODATA;
- rc = ptlrpc_queue_wait(req);
- if (rc)
- cli->cl_qchk_stat = rc;
- ptlrpc_req_finished(req);
- return rc;
-}
-
-int osc_quota_poll_check(struct obd_export *exp, struct if_quotacheck *qchk)
-{
- struct client_obd *cli = &exp->exp_obd->u.cli;
- int rc;
-
- qchk->obd_uuid = cli->cl_target_uuid;
- memcpy(qchk->obd_type, LUSTRE_OST_NAME, strlen(LUSTRE_OST_NAME));
-
- rc = cli->cl_qchk_stat;
- /* the client is not the previous one */
- if (rc == CL_NOT_QUOTACHECKED)
- rc = -EINTR;
- return rc;
-}
diff --git a/drivers/staging/lustre/lustre/osc/osc_request.c b/drivers/staging/lustre/lustre/osc/osc_request.c
deleted file mode 100644
index 225e5ed6714f..000000000000
--- a/drivers/staging/lustre/lustre/osc/osc_request.c
+++ /dev/null
@@ -1,3367 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- */
-
-#define DEBUG_SUBSYSTEM S_OSC
-
-#include "../../include/linux/libcfs/libcfs.h"
-
-
-#include "../include/lustre_dlm.h"
-#include "../include/lustre_net.h"
-#include "../include/lustre/lustre_user.h"
-#include "../include/obd_cksum.h"
-
-#include "../include/lustre_ha.h"
-#include "../include/lprocfs_status.h"
-#include "../include/lustre_debug.h"
-#include "../include/lustre_param.h"
-#include "../include/lustre_fid.h"
-#include "../include/obd_class.h"
-#include "../include/obd.h"
-#include "osc_internal.h"
-#include "osc_cl_internal.h"
-
-atomic_t osc_pool_req_count;
-unsigned int osc_reqpool_maxreqcount;
-struct ptlrpc_request_pool *osc_rq_pool;
-
-/* max memory used for request pool, unit is MB */
-static unsigned int osc_reqpool_mem_max = 5;
-module_param(osc_reqpool_mem_max, uint, 0444);
-
-struct osc_brw_async_args {
- struct obdo *aa_oa;
- int aa_requested_nob;
- int aa_nio_count;
- u32 aa_page_count;
- int aa_resends;
- struct brw_page **aa_ppga;
- struct client_obd *aa_cli;
- struct list_head aa_oaps;
- struct list_head aa_exts;
- struct cl_req *aa_clerq;
-};
-
-struct osc_async_args {
- struct obd_info *aa_oi;
-};
-
-struct osc_setattr_args {
- struct obdo *sa_oa;
- obd_enqueue_update_f sa_upcall;
- void *sa_cookie;
-};
-
-struct osc_fsync_args {
- struct obd_info *fa_oi;
- obd_enqueue_update_f fa_upcall;
- void *fa_cookie;
-};
-
-struct osc_enqueue_args {
- struct obd_export *oa_exp;
- __u64 *oa_flags;
- obd_enqueue_update_f oa_upcall;
- void *oa_cookie;
- struct ost_lvb *oa_lvb;
- struct lustre_handle *oa_lockh;
- struct ldlm_enqueue_info *oa_ei;
- unsigned int oa_agl:1;
-};
-
-static void osc_release_ppga(struct brw_page **ppga, u32 count);
-static int brw_interpret(const struct lu_env *env,
- struct ptlrpc_request *req, void *data, int rc);
-int osc_cleanup(struct obd_device *obd);
-
-/* Pack OSC object metadata for disk storage (LE byte order). */
-static int osc_packmd(struct obd_export *exp, struct lov_mds_md **lmmp,
- struct lov_stripe_md *lsm)
-{
- int lmm_size;
-
- lmm_size = sizeof(**lmmp);
- if (lmmp == NULL)
- return lmm_size;
-
- if (*lmmp != NULL && lsm == NULL) {
- kfree(*lmmp);
- *lmmp = NULL;
- return 0;
- } else if (unlikely(lsm != NULL && ostid_id(&lsm->lsm_oi) == 0)) {
- return -EBADF;
- }
-
- if (*lmmp == NULL) {
- *lmmp = kzalloc(lmm_size, GFP_NOFS);
- if (!*lmmp)
- return -ENOMEM;
- }
-
- if (lsm)
- ostid_cpu_to_le(&lsm->lsm_oi, &(*lmmp)->lmm_oi);
-
- return lmm_size;
-}
-
-/* Unpack OSC object metadata from disk storage (LE byte order). */
-static int osc_unpackmd(struct obd_export *exp, struct lov_stripe_md **lsmp,
- struct lov_mds_md *lmm, int lmm_bytes)
-{
- int lsm_size;
- struct obd_import *imp = class_exp2cliimp(exp);
-
- if (lmm != NULL) {
- if (lmm_bytes < sizeof(*lmm)) {
- CERROR("%s: lov_mds_md too small: %d, need %d\n",
- exp->exp_obd->obd_name, lmm_bytes,
- (int)sizeof(*lmm));
- return -EINVAL;
- }
- /* XXX LOV_MAGIC etc check? */
-
- if (unlikely(ostid_id(&lmm->lmm_oi) == 0)) {
- CERROR("%s: zero lmm_object_id: rc = %d\n",
- exp->exp_obd->obd_name, -EINVAL);
- return -EINVAL;
- }
- }
-
- lsm_size = lov_stripe_md_size(1);
- if (lsmp == NULL)
- return lsm_size;
-
- if (*lsmp != NULL && lmm == NULL) {
- kfree((*lsmp)->lsm_oinfo[0]);
- kfree(*lsmp);
- *lsmp = NULL;
- return 0;
- }
-
- if (*lsmp == NULL) {
- *lsmp = kzalloc(lsm_size, GFP_NOFS);
- if (unlikely(*lsmp == NULL))
- return -ENOMEM;
- (*lsmp)->lsm_oinfo[0] = kzalloc(sizeof(struct lov_oinfo),
- GFP_NOFS);
- if (unlikely((*lsmp)->lsm_oinfo[0] == NULL)) {
- kfree(*lsmp);
- return -ENOMEM;
- }
- loi_init((*lsmp)->lsm_oinfo[0]);
- } else if (unlikely(ostid_id(&(*lsmp)->lsm_oi) == 0)) {
- return -EBADF;
- }
-
- if (lmm != NULL)
- /* XXX zero *lsmp? */
- ostid_le_to_cpu(&lmm->lmm_oi, &(*lsmp)->lsm_oi);
-
- if (imp != NULL &&
- (imp->imp_connect_data.ocd_connect_flags & OBD_CONNECT_MAXBYTES))
- (*lsmp)->lsm_maxbytes = imp->imp_connect_data.ocd_maxbytes;
- else
- (*lsmp)->lsm_maxbytes = LUSTRE_STRIPE_MAXBYTES;
-
- return lsm_size;
-}
-
-static inline void osc_pack_req_body(struct ptlrpc_request *req,
- struct obd_info *oinfo)
-{
- struct ost_body *body;
-
- body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
- LASSERT(body);
-
- lustre_set_wire_obdo(&req->rq_import->imp_connect_data, &body->oa,
- oinfo->oi_oa);
-}
-
-static int osc_getattr_interpret(const struct lu_env *env,
- struct ptlrpc_request *req,
- struct osc_async_args *aa, int rc)
-{
- struct ost_body *body;
-
- if (rc != 0)
- goto out;
-
- body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
- if (body) {
- CDEBUG(D_INODE, "mode: %o\n", body->oa.o_mode);
- lustre_get_wire_obdo(&req->rq_import->imp_connect_data,
- aa->aa_oi->oi_oa, &body->oa);
-
- /* This should really be sent by the OST */
- aa->aa_oi->oi_oa->o_blksize = DT_MAX_BRW_SIZE;
- aa->aa_oi->oi_oa->o_valid |= OBD_MD_FLBLKSZ;
- } else {
- CDEBUG(D_INFO, "can't unpack ost_body\n");
- rc = -EPROTO;
- aa->aa_oi->oi_oa->o_valid = 0;
- }
-out:
- rc = aa->aa_oi->oi_cb_up(aa->aa_oi, rc);
- return rc;
-}
-
-static int osc_getattr_async(struct obd_export *exp, struct obd_info *oinfo,
- struct ptlrpc_request_set *set)
-{
- struct ptlrpc_request *req;
- struct osc_async_args *aa;
- int rc;
-
- req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_GETATTR);
- if (req == NULL)
- return -ENOMEM;
-
- rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_GETATTR);
- if (rc) {
- ptlrpc_request_free(req);
- return rc;
- }
-
- osc_pack_req_body(req, oinfo);
-
- ptlrpc_request_set_replen(req);
- req->rq_interpret_reply = (ptlrpc_interpterer_t)osc_getattr_interpret;
-
- CLASSERT(sizeof(*aa) <= sizeof(req->rq_async_args));
- aa = ptlrpc_req_async_args(req);
- aa->aa_oi = oinfo;
-
- ptlrpc_set_add_req(set, req);
- return 0;
-}
-
-static int osc_getattr(const struct lu_env *env, struct obd_export *exp,
- struct obd_info *oinfo)
-{
- struct ptlrpc_request *req;
- struct ost_body *body;
- int rc;
-
- req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_GETATTR);
- if (req == NULL)
- return -ENOMEM;
-
- rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_GETATTR);
- if (rc) {
- ptlrpc_request_free(req);
- return rc;
- }
-
- osc_pack_req_body(req, oinfo);
-
- ptlrpc_request_set_replen(req);
-
- rc = ptlrpc_queue_wait(req);
- if (rc)
- goto out;
-
- body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
- if (body == NULL) {
- rc = -EPROTO;
- goto out;
- }
-
- CDEBUG(D_INODE, "mode: %o\n", body->oa.o_mode);
- lustre_get_wire_obdo(&req->rq_import->imp_connect_data, oinfo->oi_oa,
- &body->oa);
-
- oinfo->oi_oa->o_blksize = cli_brw_size(exp->exp_obd);
- oinfo->oi_oa->o_valid |= OBD_MD_FLBLKSZ;
-
- out:
- ptlrpc_req_finished(req);
- return rc;
-}
-
-static int osc_setattr(const struct lu_env *env, struct obd_export *exp,
- struct obd_info *oinfo, struct obd_trans_info *oti)
-{
- struct ptlrpc_request *req;
- struct ost_body *body;
- int rc;
-
- LASSERT(oinfo->oi_oa->o_valid & OBD_MD_FLGROUP);
-
- req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_SETATTR);
- if (req == NULL)
- return -ENOMEM;
-
- rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_SETATTR);
- if (rc) {
- ptlrpc_request_free(req);
- return rc;
- }
-
- osc_pack_req_body(req, oinfo);
-
- ptlrpc_request_set_replen(req);
-
- rc = ptlrpc_queue_wait(req);
- if (rc)
- goto out;
-
- body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
- if (body == NULL) {
- rc = -EPROTO;
- goto out;
- }
-
- lustre_get_wire_obdo(&req->rq_import->imp_connect_data, oinfo->oi_oa,
- &body->oa);
-
-out:
- ptlrpc_req_finished(req);
- return rc;
-}
-
-static int osc_setattr_interpret(const struct lu_env *env,
- struct ptlrpc_request *req,
- struct osc_setattr_args *sa, int rc)
-{
- struct ost_body *body;
-
- if (rc != 0)
- goto out;
-
- body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
- if (body == NULL) {
- rc = -EPROTO;
- goto out;
- }
-
- lustre_get_wire_obdo(&req->rq_import->imp_connect_data, sa->sa_oa,
- &body->oa);
-out:
- rc = sa->sa_upcall(sa->sa_cookie, rc);
- return rc;
-}
-
-int osc_setattr_async_base(struct obd_export *exp, struct obd_info *oinfo,
- struct obd_trans_info *oti,
- obd_enqueue_update_f upcall, void *cookie,
- struct ptlrpc_request_set *rqset)
-{
- struct ptlrpc_request *req;
- struct osc_setattr_args *sa;
- int rc;
-
- req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_SETATTR);
- if (req == NULL)
- return -ENOMEM;
-
- rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_SETATTR);
- if (rc) {
- ptlrpc_request_free(req);
- return rc;
- }
-
- if (oti && oinfo->oi_oa->o_valid & OBD_MD_FLCOOKIE)
- oinfo->oi_oa->o_lcookie = *oti->oti_logcookies;
-
- osc_pack_req_body(req, oinfo);
-
- ptlrpc_request_set_replen(req);
-
- /* do mds to ost setattr asynchronously */
- if (!rqset) {
- /* Do not wait for response. */
- ptlrpcd_add_req(req);
- } else {
- req->rq_interpret_reply =
- (ptlrpc_interpterer_t)osc_setattr_interpret;
-
- CLASSERT(sizeof(*sa) <= sizeof(req->rq_async_args));
- sa = ptlrpc_req_async_args(req);
- sa->sa_oa = oinfo->oi_oa;
- sa->sa_upcall = upcall;
- sa->sa_cookie = cookie;
-
- if (rqset == PTLRPCD_SET)
- ptlrpcd_add_req(req);
- else
- ptlrpc_set_add_req(rqset, req);
- }
-
- return 0;
-}
-
-static int osc_setattr_async(struct obd_export *exp, struct obd_info *oinfo,
- struct obd_trans_info *oti,
- struct ptlrpc_request_set *rqset)
-{
- return osc_setattr_async_base(exp, oinfo, oti,
- oinfo->oi_cb_up, oinfo, rqset);
-}
-
-int osc_real_create(struct obd_export *exp, struct obdo *oa,
- struct lov_stripe_md **ea, struct obd_trans_info *oti)
-{
- struct ptlrpc_request *req;
- struct ost_body *body;
- struct lov_stripe_md *lsm;
- int rc;
-
- LASSERT(oa);
- LASSERT(ea);
-
- lsm = *ea;
- if (!lsm) {
- rc = obd_alloc_memmd(exp, &lsm);
- if (rc < 0)
- return rc;
- }
-
- req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_CREATE);
- if (req == NULL) {
- rc = -ENOMEM;
- goto out;
- }
-
- rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_CREATE);
- if (rc) {
- ptlrpc_request_free(req);
- goto out;
- }
-
- body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
- LASSERT(body);
-
- lustre_set_wire_obdo(&req->rq_import->imp_connect_data, &body->oa, oa);
-
- ptlrpc_request_set_replen(req);
-
- if ((oa->o_valid & OBD_MD_FLFLAGS) &&
- oa->o_flags == OBD_FL_DELORPHAN) {
- DEBUG_REQ(D_HA, req,
- "delorphan from OST integration");
- /* Don't resend the delorphan req */
- req->rq_no_resend = req->rq_no_delay = 1;
- }
-
- rc = ptlrpc_queue_wait(req);
- if (rc)
- goto out_req;
-
- body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
- if (body == NULL) {
- rc = -EPROTO;
- goto out_req;
- }
-
- CDEBUG(D_INFO, "oa flags %x\n", oa->o_flags);
- lustre_get_wire_obdo(&req->rq_import->imp_connect_data, oa, &body->oa);
-
- oa->o_blksize = cli_brw_size(exp->exp_obd);
- oa->o_valid |= OBD_MD_FLBLKSZ;
-
- /* XXX LOV STACKING: the lsm that is passed to us from LOV does not
- * have valid lsm_oinfo data structs, so don't go touching that.
- * This needs to be fixed in a big way.
- */
- lsm->lsm_oi = oa->o_oi;
- *ea = lsm;
-
- if (oti != NULL) {
- oti->oti_transno = lustre_msg_get_transno(req->rq_repmsg);
-
- if (oa->o_valid & OBD_MD_FLCOOKIE) {
- if (!oti->oti_logcookies)
- oti_alloc_cookies(oti, 1);
- *oti->oti_logcookies = oa->o_lcookie;
- }
- }
-
- CDEBUG(D_HA, "transno: %lld\n",
- lustre_msg_get_transno(req->rq_repmsg));
-out_req:
- ptlrpc_req_finished(req);
-out:
- if (rc && !*ea)
- obd_free_memmd(exp, &lsm);
- return rc;
-}
-
-int osc_punch_base(struct obd_export *exp, struct obd_info *oinfo,
- obd_enqueue_update_f upcall, void *cookie,
- struct ptlrpc_request_set *rqset)
-{
- struct ptlrpc_request *req;
- struct osc_setattr_args *sa;
- struct ost_body *body;
- int rc;
-
- req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_PUNCH);
- if (req == NULL)
- return -ENOMEM;
-
- rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_PUNCH);
- if (rc) {
- ptlrpc_request_free(req);
- return rc;
- }
- req->rq_request_portal = OST_IO_PORTAL; /* bug 7198 */
- ptlrpc_at_set_req_timeout(req);
-
- body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
- LASSERT(body);
- lustre_set_wire_obdo(&req->rq_import->imp_connect_data, &body->oa,
- oinfo->oi_oa);
-
- ptlrpc_request_set_replen(req);
-
- req->rq_interpret_reply = (ptlrpc_interpterer_t)osc_setattr_interpret;
- CLASSERT(sizeof(*sa) <= sizeof(req->rq_async_args));
- sa = ptlrpc_req_async_args(req);
- sa->sa_oa = oinfo->oi_oa;
- sa->sa_upcall = upcall;
- sa->sa_cookie = cookie;
- if (rqset == PTLRPCD_SET)
- ptlrpcd_add_req(req);
- else
- ptlrpc_set_add_req(rqset, req);
-
- return 0;
-}
-
-static int osc_sync_interpret(const struct lu_env *env,
- struct ptlrpc_request *req,
- void *arg, int rc)
-{
- struct osc_fsync_args *fa = arg;
- struct ost_body *body;
-
- if (rc)
- goto out;
-
- body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
- if (body == NULL) {
- CERROR("can't unpack ost_body\n");
- rc = -EPROTO;
- goto out;
- }
-
- *fa->fa_oi->oi_oa = body->oa;
-out:
- rc = fa->fa_upcall(fa->fa_cookie, rc);
- return rc;
-}
-
-int osc_sync_base(struct obd_export *exp, struct obd_info *oinfo,
- obd_enqueue_update_f upcall, void *cookie,
- struct ptlrpc_request_set *rqset)
-{
- struct ptlrpc_request *req;
- struct ost_body *body;
- struct osc_fsync_args *fa;
- int rc;
-
- req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_SYNC);
- if (req == NULL)
- return -ENOMEM;
-
- rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_SYNC);
- if (rc) {
- ptlrpc_request_free(req);
- return rc;
- }
-
- /* overload the size and blocks fields in the oa with start/end */
- body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
- LASSERT(body);
- lustre_set_wire_obdo(&req->rq_import->imp_connect_data, &body->oa,
- oinfo->oi_oa);
-
- ptlrpc_request_set_replen(req);
- req->rq_interpret_reply = osc_sync_interpret;
-
- CLASSERT(sizeof(*fa) <= sizeof(req->rq_async_args));
- fa = ptlrpc_req_async_args(req);
- fa->fa_oi = oinfo;
- fa->fa_upcall = upcall;
- fa->fa_cookie = cookie;
-
- if (rqset == PTLRPCD_SET)
- ptlrpcd_add_req(req);
- else
- ptlrpc_set_add_req(rqset, req);
-
- return 0;
-}
-
-/* Find and cancel locally locks matched by @mode in the resource found by
- * @objid. Found locks are added into @cancel list. Returns the amount of
- * locks added to @cancels list. */
-static int osc_resource_get_unused(struct obd_export *exp, struct obdo *oa,
- struct list_head *cancels,
- ldlm_mode_t mode, __u64 lock_flags)
-{
- struct ldlm_namespace *ns = exp->exp_obd->obd_namespace;
- struct ldlm_res_id res_id;
- struct ldlm_resource *res;
- int count;
-
- /* Return, i.e. cancel nothing, only if ELC is supported (flag in
- * export) but disabled through procfs (flag in NS).
- *
- * This distinguishes from a case when ELC is not supported originally,
- * when we still want to cancel locks in advance and just cancel them
- * locally, without sending any RPC. */
- if (exp_connect_cancelset(exp) && !ns_connect_cancelset(ns))
- return 0;
-
- ostid_build_res_name(&oa->o_oi, &res_id);
- res = ldlm_resource_get(ns, NULL, &res_id, 0, 0);
- if (res == NULL)
- return 0;
-
- LDLM_RESOURCE_ADDREF(res);
- count = ldlm_cancel_resource_local(res, cancels, NULL, mode,
- lock_flags, 0, NULL);
- LDLM_RESOURCE_DELREF(res);
- ldlm_resource_putref(res);
- return count;
-}
-
-static int osc_destroy_interpret(const struct lu_env *env,
- struct ptlrpc_request *req, void *data,
- int rc)
-{
- struct client_obd *cli = &req->rq_import->imp_obd->u.cli;
-
- atomic_dec(&cli->cl_destroy_in_flight);
- wake_up(&cli->cl_destroy_waitq);
- return 0;
-}
-
-static int osc_can_send_destroy(struct client_obd *cli)
-{
- if (atomic_inc_return(&cli->cl_destroy_in_flight) <=
- cli->cl_max_rpcs_in_flight) {
- /* The destroy request can be sent */
- return 1;
- }
- if (atomic_dec_return(&cli->cl_destroy_in_flight) <
- cli->cl_max_rpcs_in_flight) {
- /*
- * The counter has been modified between the two atomic
- * operations.
- */
- wake_up(&cli->cl_destroy_waitq);
- }
- return 0;
-}
-
-int osc_create(const struct lu_env *env, struct obd_export *exp,
- struct obdo *oa, struct lov_stripe_md **ea,
- struct obd_trans_info *oti)
-{
- int rc = 0;
-
- LASSERT(oa);
- LASSERT(ea);
- LASSERT(oa->o_valid & OBD_MD_FLGROUP);
-
- if ((oa->o_valid & OBD_MD_FLFLAGS) &&
- oa->o_flags == OBD_FL_RECREATE_OBJS) {
- return osc_real_create(exp, oa, ea, oti);
- }
-
- if (!fid_seq_is_mdt(ostid_seq(&oa->o_oi)))
- return osc_real_create(exp, oa, ea, oti);
-
- /* we should not get here anymore */
- LBUG();
-
- return rc;
-}
-
-/* Destroy requests can be async always on the client, and we don't even really
- * care about the return code since the client cannot do anything at all about
- * a destroy failure.
- * When the MDS is unlinking a filename, it saves the file objects into a
- * recovery llog, and these object records are cancelled when the OST reports
- * they were destroyed and sync'd to disk (i.e. transaction committed).
- * If the client dies, or the OST is down when the object should be destroyed,
- * the records are not cancelled, and when the OST reconnects to the MDS next,
- * it will retrieve the llog unlink logs and then sends the log cancellation
- * cookies to the MDS after committing destroy transactions. */
-static int osc_destroy(const struct lu_env *env, struct obd_export *exp,
- struct obdo *oa, struct lov_stripe_md *ea,
- struct obd_trans_info *oti, struct obd_export *md_export)
-{
- struct client_obd *cli = &exp->exp_obd->u.cli;
- struct ptlrpc_request *req;
- struct ost_body *body;
- LIST_HEAD(cancels);
- int rc, count;
-
- if (!oa) {
- CDEBUG(D_INFO, "oa NULL\n");
- return -EINVAL;
- }
-
- count = osc_resource_get_unused(exp, oa, &cancels, LCK_PW,
- LDLM_FL_DISCARD_DATA);
-
- req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_DESTROY);
- if (req == NULL) {
- ldlm_lock_list_put(&cancels, l_bl_ast, count);
- return -ENOMEM;
- }
-
- rc = ldlm_prep_elc_req(exp, req, LUSTRE_OST_VERSION, OST_DESTROY,
- 0, &cancels, count);
- if (rc) {
- ptlrpc_request_free(req);
- return rc;
- }
-
- req->rq_request_portal = OST_IO_PORTAL; /* bug 7198 */
- ptlrpc_at_set_req_timeout(req);
-
- if (oti != NULL && oa->o_valid & OBD_MD_FLCOOKIE)
- oa->o_lcookie = *oti->oti_logcookies;
- body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
- LASSERT(body);
- lustre_set_wire_obdo(&req->rq_import->imp_connect_data, &body->oa, oa);
-
- ptlrpc_request_set_replen(req);
-
- /* If osc_destroy is for destroying the unlink orphan,
- * sent from MDT to OST, which should not be blocked here,
- * because the process might be triggered by ptlrpcd, and
- * it is not good to block ptlrpcd thread (b=16006)*/
- if (!(oa->o_flags & OBD_FL_DELORPHAN)) {
- req->rq_interpret_reply = osc_destroy_interpret;
- if (!osc_can_send_destroy(cli)) {
- struct l_wait_info lwi = LWI_INTR(LWI_ON_SIGNAL_NOOP,
- NULL);
-
- /*
- * Wait until the number of on-going destroy RPCs drops
- * under max_rpc_in_flight
- */
- l_wait_event_exclusive(cli->cl_destroy_waitq,
- osc_can_send_destroy(cli), &lwi);
- }
- }
-
- /* Do not wait for response */
- ptlrpcd_add_req(req);
- return 0;
-}
-
-static void osc_announce_cached(struct client_obd *cli, struct obdo *oa,
- long writing_bytes)
-{
- u32 bits = OBD_MD_FLBLOCKS|OBD_MD_FLGRANT;
-
- LASSERT(!(oa->o_valid & bits));
-
- oa->o_valid |= bits;
- client_obd_list_lock(&cli->cl_loi_list_lock);
- oa->o_dirty = cli->cl_dirty;
- if (unlikely(cli->cl_dirty - cli->cl_dirty_transit >
- cli->cl_dirty_max)) {
- CERROR("dirty %lu - %lu > dirty_max %lu\n",
- cli->cl_dirty, cli->cl_dirty_transit, cli->cl_dirty_max);
- oa->o_undirty = 0;
- } else if (unlikely(atomic_read(&obd_dirty_pages) -
- atomic_read(&obd_dirty_transit_pages) >
- (long)(obd_max_dirty_pages + 1))) {
- /* The atomic_read() allowing the atomic_inc() are
- * not covered by a lock thus they may safely race and trip
- * this CERROR() unless we add in a small fudge factor (+1). */
- CERROR("dirty %d - %d > system dirty_max %d\n",
- atomic_read(&obd_dirty_pages),
- atomic_read(&obd_dirty_transit_pages),
- obd_max_dirty_pages);
- oa->o_undirty = 0;
- } else if (unlikely(cli->cl_dirty_max - cli->cl_dirty > 0x7fffffff)) {
- CERROR("dirty %lu - dirty_max %lu too big???\n",
- cli->cl_dirty, cli->cl_dirty_max);
- oa->o_undirty = 0;
- } else {
- long max_in_flight = (cli->cl_max_pages_per_rpc <<
- PAGE_CACHE_SHIFT)*
- (cli->cl_max_rpcs_in_flight + 1);
- oa->o_undirty = max(cli->cl_dirty_max, max_in_flight);
- }
- oa->o_grant = cli->cl_avail_grant + cli->cl_reserved_grant;
- oa->o_dropped = cli->cl_lost_grant;
- cli->cl_lost_grant = 0;
- client_obd_list_unlock(&cli->cl_loi_list_lock);
- CDEBUG(D_CACHE, "dirty: %llu undirty: %u dropped %u grant: %llu\n",
- oa->o_dirty, oa->o_undirty, oa->o_dropped, oa->o_grant);
-
-}
-
-void osc_update_next_shrink(struct client_obd *cli)
-{
- cli->cl_next_shrink_grant =
- cfs_time_shift(cli->cl_grant_shrink_interval);
- CDEBUG(D_CACHE, "next time %ld to shrink grant \n",
- cli->cl_next_shrink_grant);
-}
-
-static void __osc_update_grant(struct client_obd *cli, u64 grant)
-{
- client_obd_list_lock(&cli->cl_loi_list_lock);
- cli->cl_avail_grant += grant;
- client_obd_list_unlock(&cli->cl_loi_list_lock);
-}
-
-static void osc_update_grant(struct client_obd *cli, struct ost_body *body)
-{
- if (body->oa.o_valid & OBD_MD_FLGRANT) {
- CDEBUG(D_CACHE, "got %llu extra grant\n", body->oa.o_grant);
- __osc_update_grant(cli, body->oa.o_grant);
- }
-}
-
-static int osc_set_info_async(const struct lu_env *env, struct obd_export *exp,
- u32 keylen, void *key, u32 vallen,
- void *val, struct ptlrpc_request_set *set);
-
-static int osc_shrink_grant_interpret(const struct lu_env *env,
- struct ptlrpc_request *req,
- void *aa, int rc)
-{
- struct client_obd *cli = &req->rq_import->imp_obd->u.cli;
- struct obdo *oa = ((struct osc_brw_async_args *)aa)->aa_oa;
- struct ost_body *body;
-
- if (rc != 0) {
- __osc_update_grant(cli, oa->o_grant);
- goto out;
- }
-
- body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
- LASSERT(body);
- osc_update_grant(cli, body);
-out:
- OBDO_FREE(oa);
- return rc;
-}
-
-static void osc_shrink_grant_local(struct client_obd *cli, struct obdo *oa)
-{
- client_obd_list_lock(&cli->cl_loi_list_lock);
- oa->o_grant = cli->cl_avail_grant / 4;
- cli->cl_avail_grant -= oa->o_grant;
- client_obd_list_unlock(&cli->cl_loi_list_lock);
- if (!(oa->o_valid & OBD_MD_FLFLAGS)) {
- oa->o_valid |= OBD_MD_FLFLAGS;
- oa->o_flags = 0;
- }
- oa->o_flags |= OBD_FL_SHRINK_GRANT;
- osc_update_next_shrink(cli);
-}
-
-/* Shrink the current grant, either from some large amount to enough for a
- * full set of in-flight RPCs, or if we have already shrunk to that limit
- * then to enough for a single RPC. This avoids keeping more grant than
- * needed, and avoids shrinking the grant piecemeal. */
-static int osc_shrink_grant(struct client_obd *cli)
-{
- __u64 target_bytes = (cli->cl_max_rpcs_in_flight + 1) *
- (cli->cl_max_pages_per_rpc << PAGE_CACHE_SHIFT);
-
- client_obd_list_lock(&cli->cl_loi_list_lock);
- if (cli->cl_avail_grant <= target_bytes)
- target_bytes = cli->cl_max_pages_per_rpc << PAGE_CACHE_SHIFT;
- client_obd_list_unlock(&cli->cl_loi_list_lock);
-
- return osc_shrink_grant_to_target(cli, target_bytes);
-}
-
-int osc_shrink_grant_to_target(struct client_obd *cli, __u64 target_bytes)
-{
- int rc = 0;
- struct ost_body *body;
-
- client_obd_list_lock(&cli->cl_loi_list_lock);
- /* Don't shrink if we are already above or below the desired limit
- * We don't want to shrink below a single RPC, as that will negatively
- * impact block allocation and long-term performance. */
- if (target_bytes < cli->cl_max_pages_per_rpc << PAGE_CACHE_SHIFT)
- target_bytes = cli->cl_max_pages_per_rpc << PAGE_CACHE_SHIFT;
-
- if (target_bytes >= cli->cl_avail_grant) {
- client_obd_list_unlock(&cli->cl_loi_list_lock);
- return 0;
- }
- client_obd_list_unlock(&cli->cl_loi_list_lock);
-
- body = kzalloc(sizeof(*body), GFP_NOFS);
- if (!body)
- return -ENOMEM;
-
- osc_announce_cached(cli, &body->oa, 0);
-
- client_obd_list_lock(&cli->cl_loi_list_lock);
- body->oa.o_grant = cli->cl_avail_grant - target_bytes;
- cli->cl_avail_grant = target_bytes;
- client_obd_list_unlock(&cli->cl_loi_list_lock);
- if (!(body->oa.o_valid & OBD_MD_FLFLAGS)) {
- body->oa.o_valid |= OBD_MD_FLFLAGS;
- body->oa.o_flags = 0;
- }
- body->oa.o_flags |= OBD_FL_SHRINK_GRANT;
- osc_update_next_shrink(cli);
-
- rc = osc_set_info_async(NULL, cli->cl_import->imp_obd->obd_self_export,
- sizeof(KEY_GRANT_SHRINK), KEY_GRANT_SHRINK,
- sizeof(*body), body, NULL);
- if (rc != 0)
- __osc_update_grant(cli, body->oa.o_grant);
- kfree(body);
- return rc;
-}
-
-static int osc_should_shrink_grant(struct client_obd *client)
-{
- unsigned long time = cfs_time_current();
- unsigned long next_shrink = client->cl_next_shrink_grant;
-
- if ((client->cl_import->imp_connect_data.ocd_connect_flags &
- OBD_CONNECT_GRANT_SHRINK) == 0)
- return 0;
-
- if (cfs_time_aftereq(time, next_shrink - 5 * CFS_TICK)) {
- /* Get the current RPC size directly, instead of going via:
- * cli_brw_size(obd->u.cli.cl_import->imp_obd->obd_self_export)
- * Keep comment here so that it can be found by searching. */
- int brw_size = client->cl_max_pages_per_rpc << PAGE_CACHE_SHIFT;
-
- if (client->cl_import->imp_state == LUSTRE_IMP_FULL &&
- client->cl_avail_grant > brw_size)
- return 1;
-
- osc_update_next_shrink(client);
- }
- return 0;
-}
-
-static int osc_grant_shrink_grant_cb(struct timeout_item *item, void *data)
-{
- struct client_obd *client;
-
- list_for_each_entry(client, &item->ti_obd_list,
- cl_grant_shrink_list) {
- if (osc_should_shrink_grant(client))
- osc_shrink_grant(client);
- }
- return 0;
-}
-
-static int osc_add_shrink_grant(struct client_obd *client)
-{
- int rc;
-
- rc = ptlrpc_add_timeout_client(client->cl_grant_shrink_interval,
- TIMEOUT_GRANT,
- osc_grant_shrink_grant_cb, NULL,
- &client->cl_grant_shrink_list);
- if (rc) {
- CERROR("add grant client %s error %d\n",
- client->cl_import->imp_obd->obd_name, rc);
- return rc;
- }
- CDEBUG(D_CACHE, "add grant client %s \n",
- client->cl_import->imp_obd->obd_name);
- osc_update_next_shrink(client);
- return 0;
-}
-
-static int osc_del_shrink_grant(struct client_obd *client)
-{
- return ptlrpc_del_timeout_client(&client->cl_grant_shrink_list,
- TIMEOUT_GRANT);
-}
-
-static void osc_init_grant(struct client_obd *cli, struct obd_connect_data *ocd)
-{
- /*
- * ocd_grant is the total grant amount we're expect to hold: if we've
- * been evicted, it's the new avail_grant amount, cl_dirty will drop
- * to 0 as inflight RPCs fail out; otherwise, it's avail_grant + dirty.
- *
- * race is tolerable here: if we're evicted, but imp_state already
- * left EVICTED state, then cl_dirty must be 0 already.
- */
- client_obd_list_lock(&cli->cl_loi_list_lock);
- if (cli->cl_import->imp_state == LUSTRE_IMP_EVICTED)
- cli->cl_avail_grant = ocd->ocd_grant;
- else
- cli->cl_avail_grant = ocd->ocd_grant - cli->cl_dirty;
-
- if (cli->cl_avail_grant < 0) {
- CWARN("%s: available grant < 0: avail/ocd/dirty %ld/%u/%ld\n",
- cli->cl_import->imp_obd->obd_name, cli->cl_avail_grant,
- ocd->ocd_grant, cli->cl_dirty);
- /* workaround for servers which do not have the patch from
- * LU-2679 */
- cli->cl_avail_grant = ocd->ocd_grant;
- }
-
- /* determine the appropriate chunk size used by osc_extent. */
- cli->cl_chunkbits = max_t(int, PAGE_CACHE_SHIFT, ocd->ocd_blocksize);
- client_obd_list_unlock(&cli->cl_loi_list_lock);
-
- CDEBUG(D_CACHE, "%s, setting cl_avail_grant: %ld cl_lost_grant: %ld chunk bits: %d\n",
- cli->cl_import->imp_obd->obd_name,
- cli->cl_avail_grant, cli->cl_lost_grant, cli->cl_chunkbits);
-
- if (ocd->ocd_connect_flags & OBD_CONNECT_GRANT_SHRINK &&
- list_empty(&cli->cl_grant_shrink_list))
- osc_add_shrink_grant(cli);
-}
-
-/* We assume that the reason this OSC got a short read is because it read
- * beyond the end of a stripe file; i.e. lustre is reading a sparse file
- * via the LOV, and it _knows_ it's reading inside the file, it's just that
- * this stripe never got written at or beyond this stripe offset yet. */
-static void handle_short_read(int nob_read, u32 page_count,
- struct brw_page **pga)
-{
- char *ptr;
- int i = 0;
-
- /* skip bytes read OK */
- while (nob_read > 0) {
- LASSERT(page_count > 0);
-
- if (pga[i]->count > nob_read) {
- /* EOF inside this page */
- ptr = kmap(pga[i]->pg) +
- (pga[i]->off & ~CFS_PAGE_MASK);
- memset(ptr + nob_read, 0, pga[i]->count - nob_read);
- kunmap(pga[i]->pg);
- page_count--;
- i++;
- break;
- }
-
- nob_read -= pga[i]->count;
- page_count--;
- i++;
- }
-
- /* zero remaining pages */
- while (page_count-- > 0) {
- ptr = kmap(pga[i]->pg) + (pga[i]->off & ~CFS_PAGE_MASK);
- memset(ptr, 0, pga[i]->count);
- kunmap(pga[i]->pg);
- i++;
- }
-}
-
-static int check_write_rcs(struct ptlrpc_request *req,
- int requested_nob, int niocount,
- u32 page_count, struct brw_page **pga)
-{
- int i;
- __u32 *remote_rcs;
-
- remote_rcs = req_capsule_server_sized_get(&req->rq_pill, &RMF_RCS,
- sizeof(*remote_rcs) *
- niocount);
- if (remote_rcs == NULL) {
- CDEBUG(D_INFO, "Missing/short RC vector on BRW_WRITE reply\n");
- return -EPROTO;
- }
-
- /* return error if any niobuf was in error */
- for (i = 0; i < niocount; i++) {
- if ((int)remote_rcs[i] < 0)
- return remote_rcs[i];
-
- if (remote_rcs[i] != 0) {
- CDEBUG(D_INFO, "rc[%d] invalid (%d) req %p\n",
- i, remote_rcs[i], req);
- return -EPROTO;
- }
- }
-
- if (req->rq_bulk->bd_nob_transferred != requested_nob) {
- CERROR("Unexpected # bytes transferred: %d (requested %d)\n",
- req->rq_bulk->bd_nob_transferred, requested_nob);
- return -EPROTO;
- }
-
- return 0;
-}
-
-static inline int can_merge_pages(struct brw_page *p1, struct brw_page *p2)
-{
- if (p1->flag != p2->flag) {
- unsigned mask = ~(OBD_BRW_FROM_GRANT | OBD_BRW_NOCACHE |
- OBD_BRW_SYNC | OBD_BRW_ASYNC|OBD_BRW_NOQUOTA);
-
- /* warn if we try to combine flags that we don't know to be
- * safe to combine */
- if (unlikely((p1->flag & mask) != (p2->flag & mask))) {
- CWARN("Saw flags 0x%x and 0x%x in the same brw, please report this at http://bugs.whamcloud.com/\n",
- p1->flag, p2->flag);
- }
- return 0;
- }
-
- return (p1->off + p1->count == p2->off);
-}
-
-static u32 osc_checksum_bulk(int nob, u32 pg_count,
- struct brw_page **pga, int opc,
- cksum_type_t cksum_type)
-{
- __u32 cksum;
- int i = 0;
- struct cfs_crypto_hash_desc *hdesc;
- unsigned int bufsize;
- int err;
- unsigned char cfs_alg = cksum_obd2cfs(cksum_type);
-
- LASSERT(pg_count > 0);
-
- hdesc = cfs_crypto_hash_init(cfs_alg, NULL, 0);
- if (IS_ERR(hdesc)) {
- CERROR("Unable to initialize checksum hash %s\n",
- cfs_crypto_hash_name(cfs_alg));
- return PTR_ERR(hdesc);
- }
-
- while (nob > 0 && pg_count > 0) {
- int count = pga[i]->count > nob ? nob : pga[i]->count;
-
- /* corrupt the data before we compute the checksum, to
- * simulate an OST->client data error */
- if (i == 0 && opc == OST_READ &&
- OBD_FAIL_CHECK(OBD_FAIL_OSC_CHECKSUM_RECEIVE)) {
- unsigned char *ptr = kmap(pga[i]->pg);
- int off = pga[i]->off & ~CFS_PAGE_MASK;
- memcpy(ptr + off, "bad1", min(4, nob));
- kunmap(pga[i]->pg);
- }
- cfs_crypto_hash_update_page(hdesc, pga[i]->pg,
- pga[i]->off & ~CFS_PAGE_MASK,
- count);
- CDEBUG(D_PAGE,
- "page %p map %p index %lu flags %lx count %u priv %0lx: off %d\n",
- pga[i]->pg, pga[i]->pg->mapping, pga[i]->pg->index,
- (long)pga[i]->pg->flags, page_count(pga[i]->pg),
- page_private(pga[i]->pg),
- (int)(pga[i]->off & ~CFS_PAGE_MASK));
-
- nob -= pga[i]->count;
- pg_count--;
- i++;
- }
-
- bufsize = 4;
- err = cfs_crypto_hash_final(hdesc, (unsigned char *)&cksum, &bufsize);
-
- if (err)
- cfs_crypto_hash_final(hdesc, NULL, NULL);
-
- /* For sending we only compute the wrong checksum instead
- * of corrupting the data so it is still correct on a redo */
- if (opc == OST_WRITE && OBD_FAIL_CHECK(OBD_FAIL_OSC_CHECKSUM_SEND))
- cksum++;
-
- return cksum;
-}
-
-static int osc_brw_prep_request(int cmd, struct client_obd *cli,
- struct obdo *oa,
- struct lov_stripe_md *lsm, u32 page_count,
- struct brw_page **pga,
- struct ptlrpc_request **reqp,
- int reserve,
- int resend)
-{
- struct ptlrpc_request *req;
- struct ptlrpc_bulk_desc *desc;
- struct ost_body *body;
- struct obd_ioobj *ioobj;
- struct niobuf_remote *niobuf;
- int niocount, i, requested_nob, opc, rc;
- struct osc_brw_async_args *aa;
- struct req_capsule *pill;
- struct brw_page *pg_prev;
-
- if (OBD_FAIL_CHECK(OBD_FAIL_OSC_BRW_PREP_REQ))
- return -ENOMEM; /* Recoverable */
- if (OBD_FAIL_CHECK(OBD_FAIL_OSC_BRW_PREP_REQ2))
- return -EINVAL; /* Fatal */
-
- if ((cmd & OBD_BRW_WRITE) != 0) {
- opc = OST_WRITE;
- req = ptlrpc_request_alloc_pool(cli->cl_import,
- osc_rq_pool,
- &RQF_OST_BRW_WRITE);
- } else {
- opc = OST_READ;
- req = ptlrpc_request_alloc(cli->cl_import, &RQF_OST_BRW_READ);
- }
- if (req == NULL)
- return -ENOMEM;
-
- for (niocount = i = 1; i < page_count; i++) {
- if (!can_merge_pages(pga[i - 1], pga[i]))
- niocount++;
- }
-
- pill = &req->rq_pill;
- req_capsule_set_size(pill, &RMF_OBD_IOOBJ, RCL_CLIENT,
- sizeof(*ioobj));
- req_capsule_set_size(pill, &RMF_NIOBUF_REMOTE, RCL_CLIENT,
- niocount * sizeof(*niobuf));
-
- rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, opc);
- if (rc) {
- ptlrpc_request_free(req);
- return rc;
- }
- req->rq_request_portal = OST_IO_PORTAL; /* bug 7198 */
- ptlrpc_at_set_req_timeout(req);
- /* ask ptlrpc not to resend on EINPROGRESS since BRWs have their own
- * retry logic */
- req->rq_no_retry_einprogress = 1;
-
- desc = ptlrpc_prep_bulk_imp(req, page_count,
- cli->cl_import->imp_connect_data.ocd_brw_size >> LNET_MTU_BITS,
- opc == OST_WRITE ? BULK_GET_SOURCE : BULK_PUT_SINK,
- OST_BULK_PORTAL);
-
- if (desc == NULL) {
- rc = -ENOMEM;
- goto out;
- }
- /* NB request now owns desc and will free it when it gets freed */
-
- body = req_capsule_client_get(pill, &RMF_OST_BODY);
- ioobj = req_capsule_client_get(pill, &RMF_OBD_IOOBJ);
- niobuf = req_capsule_client_get(pill, &RMF_NIOBUF_REMOTE);
- LASSERT(body != NULL && ioobj != NULL && niobuf != NULL);
-
- lustre_set_wire_obdo(&req->rq_import->imp_connect_data, &body->oa, oa);
-
- obdo_to_ioobj(oa, ioobj);
- ioobj->ioo_bufcnt = niocount;
- /* The high bits of ioo_max_brw tells server _maximum_ number of bulks
- * that might be send for this request. The actual number is decided
- * when the RPC is finally sent in ptlrpc_register_bulk(). It sends
- * "max - 1" for old client compatibility sending "0", and also so the
- * the actual maximum is a power-of-two number, not one less. LU-1431 */
- ioobj_max_brw_set(ioobj, desc->bd_md_max_brw);
- LASSERT(page_count > 0);
- pg_prev = pga[0];
- for (requested_nob = i = 0; i < page_count; i++, niobuf++) {
- struct brw_page *pg = pga[i];
- int poff = pg->off & ~CFS_PAGE_MASK;
-
- LASSERT(pg->count > 0);
- /* make sure there is no gap in the middle of page array */
- LASSERTF(page_count == 1 ||
- (ergo(i == 0, poff + pg->count == PAGE_CACHE_SIZE) &&
- ergo(i > 0 && i < page_count - 1,
- poff == 0 && pg->count == PAGE_CACHE_SIZE) &&
- ergo(i == page_count - 1, poff == 0)),
- "i: %d/%d pg: %p off: %llu, count: %u\n",
- i, page_count, pg, pg->off, pg->count);
- LASSERTF(i == 0 || pg->off > pg_prev->off,
- "i %d p_c %u pg %p [pri %lu ind %lu] off %llu prev_pg %p [pri %lu ind %lu] off %llu\n",
- i, page_count,
- pg->pg, page_private(pg->pg), pg->pg->index, pg->off,
- pg_prev->pg, page_private(pg_prev->pg),
- pg_prev->pg->index, pg_prev->off);
- LASSERT((pga[0]->flag & OBD_BRW_SRVLOCK) ==
- (pg->flag & OBD_BRW_SRVLOCK));
-
- ptlrpc_prep_bulk_page_pin(desc, pg->pg, poff, pg->count);
- requested_nob += pg->count;
-
- if (i > 0 && can_merge_pages(pg_prev, pg)) {
- niobuf--;
- niobuf->len += pg->count;
- } else {
- niobuf->offset = pg->off;
- niobuf->len = pg->count;
- niobuf->flags = pg->flag;
- }
- pg_prev = pg;
- }
-
- LASSERTF((void *)(niobuf - niocount) ==
- req_capsule_client_get(&req->rq_pill, &RMF_NIOBUF_REMOTE),
- "want %p - real %p\n", req_capsule_client_get(&req->rq_pill,
- &RMF_NIOBUF_REMOTE), (void *)(niobuf - niocount));
-
- osc_announce_cached(cli, &body->oa, opc == OST_WRITE ? requested_nob:0);
- if (resend) {
- if ((body->oa.o_valid & OBD_MD_FLFLAGS) == 0) {
- body->oa.o_valid |= OBD_MD_FLFLAGS;
- body->oa.o_flags = 0;
- }
- body->oa.o_flags |= OBD_FL_RECOV_RESEND;
- }
-
- if (osc_should_shrink_grant(cli))
- osc_shrink_grant_local(cli, &body->oa);
-
- /* size[REQ_REC_OFF] still sizeof (*body) */
- if (opc == OST_WRITE) {
- if (cli->cl_checksum &&
- !sptlrpc_flavor_has_bulk(&req->rq_flvr)) {
- /* store cl_cksum_type in a local variable since
- * it can be changed via lprocfs */
- cksum_type_t cksum_type = cli->cl_cksum_type;
-
- if ((body->oa.o_valid & OBD_MD_FLFLAGS) == 0) {
- oa->o_flags &= OBD_FL_LOCAL_MASK;
- body->oa.o_flags = 0;
- }
- body->oa.o_flags |= cksum_type_pack(cksum_type);
- body->oa.o_valid |= OBD_MD_FLCKSUM | OBD_MD_FLFLAGS;
- body->oa.o_cksum = osc_checksum_bulk(requested_nob,
- page_count, pga,
- OST_WRITE,
- cksum_type);
- CDEBUG(D_PAGE, "checksum at write origin: %x\n",
- body->oa.o_cksum);
- /* save this in 'oa', too, for later checking */
- oa->o_valid |= OBD_MD_FLCKSUM | OBD_MD_FLFLAGS;
- oa->o_flags |= cksum_type_pack(cksum_type);
- } else {
- /* clear out the checksum flag, in case this is a
- * resend but cl_checksum is no longer set. b=11238 */
- oa->o_valid &= ~OBD_MD_FLCKSUM;
- }
- oa->o_cksum = body->oa.o_cksum;
- /* 1 RC per niobuf */
- req_capsule_set_size(pill, &RMF_RCS, RCL_SERVER,
- sizeof(__u32) * niocount);
- } else {
- if (cli->cl_checksum &&
- !sptlrpc_flavor_has_bulk(&req->rq_flvr)) {
- if ((body->oa.o_valid & OBD_MD_FLFLAGS) == 0)
- body->oa.o_flags = 0;
- body->oa.o_flags |= cksum_type_pack(cli->cl_cksum_type);
- body->oa.o_valid |= OBD_MD_FLCKSUM | OBD_MD_FLFLAGS;
- }
- }
- ptlrpc_request_set_replen(req);
-
- CLASSERT(sizeof(*aa) <= sizeof(req->rq_async_args));
- aa = ptlrpc_req_async_args(req);
- aa->aa_oa = oa;
- aa->aa_requested_nob = requested_nob;
- aa->aa_nio_count = niocount;
- aa->aa_page_count = page_count;
- aa->aa_resends = 0;
- aa->aa_ppga = pga;
- aa->aa_cli = cli;
- INIT_LIST_HEAD(&aa->aa_oaps);
-
- *reqp = req;
- return 0;
-
- out:
- ptlrpc_req_finished(req);
- return rc;
-}
-
-static int check_write_checksum(struct obdo *oa, const lnet_process_id_t *peer,
- __u32 client_cksum, __u32 server_cksum, int nob,
- u32 page_count, struct brw_page **pga,
- cksum_type_t client_cksum_type)
-{
- __u32 new_cksum;
- char *msg;
- cksum_type_t cksum_type;
-
- if (server_cksum == client_cksum) {
- CDEBUG(D_PAGE, "checksum %x confirmed\n", client_cksum);
- return 0;
- }
-
- cksum_type = cksum_type_unpack(oa->o_valid & OBD_MD_FLFLAGS ?
- oa->o_flags : 0);
- new_cksum = osc_checksum_bulk(nob, page_count, pga, OST_WRITE,
- cksum_type);
-
- if (cksum_type != client_cksum_type)
- msg = "the server did not use the checksum type specified in the original request - likely a protocol problem"
- ;
- else if (new_cksum == server_cksum)
- msg = "changed on the client after we checksummed it - likely false positive due to mmap IO (bug 11742)"
- ;
- else if (new_cksum == client_cksum)
- msg = "changed in transit before arrival at OST";
- else
- msg = "changed in transit AND doesn't match the original - likely false positive due to mmap IO (bug 11742)"
- ;
-
- LCONSOLE_ERROR_MSG(0x132, "BAD WRITE CHECKSUM: %s: from %s inode "DFID
- " object "DOSTID" extent [%llu-%llu]\n",
- msg, libcfs_nid2str(peer->nid),
- oa->o_valid & OBD_MD_FLFID ? oa->o_parent_seq : (__u64)0,
- oa->o_valid & OBD_MD_FLFID ? oa->o_parent_oid : 0,
- oa->o_valid & OBD_MD_FLFID ? oa->o_parent_ver : 0,
- POSTID(&oa->o_oi), pga[0]->off,
- pga[page_count-1]->off + pga[page_count-1]->count - 1);
- CERROR("original client csum %x (type %x), server csum %x (type %x), client csum now %x\n",
- client_cksum, client_cksum_type,
- server_cksum, cksum_type, new_cksum);
- return 1;
-}
-
-/* Note rc enters this function as number of bytes transferred */
-static int osc_brw_fini_request(struct ptlrpc_request *req, int rc)
-{
- struct osc_brw_async_args *aa = (void *)&req->rq_async_args;
- const lnet_process_id_t *peer =
- &req->rq_import->imp_connection->c_peer;
- struct client_obd *cli = aa->aa_cli;
- struct ost_body *body;
- __u32 client_cksum = 0;
-
- if (rc < 0 && rc != -EDQUOT) {
- DEBUG_REQ(D_INFO, req, "Failed request with rc = %d\n", rc);
- return rc;
- }
-
- LASSERTF(req->rq_repmsg != NULL, "rc = %d\n", rc);
- body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
- if (body == NULL) {
- DEBUG_REQ(D_INFO, req, "Can't unpack body\n");
- return -EPROTO;
- }
-
- /* set/clear over quota flag for a uid/gid */
- if (lustre_msg_get_opc(req->rq_reqmsg) == OST_WRITE &&
- body->oa.o_valid & (OBD_MD_FLUSRQUOTA | OBD_MD_FLGRPQUOTA)) {
- unsigned int qid[MAXQUOTAS] = { body->oa.o_uid, body->oa.o_gid };
-
- CDEBUG(D_QUOTA, "setdq for [%u %u] with valid %#llx, flags %x\n",
- body->oa.o_uid, body->oa.o_gid, body->oa.o_valid,
- body->oa.o_flags);
- osc_quota_setdq(cli, qid, body->oa.o_valid, body->oa.o_flags);
- }
-
- osc_update_grant(cli, body);
-
- if (rc < 0)
- return rc;
-
- if (aa->aa_oa->o_valid & OBD_MD_FLCKSUM)
- client_cksum = aa->aa_oa->o_cksum; /* save for later */
-
- if (lustre_msg_get_opc(req->rq_reqmsg) == OST_WRITE) {
- if (rc > 0) {
- CERROR("Unexpected +ve rc %d\n", rc);
- return -EPROTO;
- }
- LASSERT(req->rq_bulk->bd_nob == aa->aa_requested_nob);
-
- if (sptlrpc_cli_unwrap_bulk_write(req, req->rq_bulk))
- return -EAGAIN;
-
- if ((aa->aa_oa->o_valid & OBD_MD_FLCKSUM) && client_cksum &&
- check_write_checksum(&body->oa, peer, client_cksum,
- body->oa.o_cksum, aa->aa_requested_nob,
- aa->aa_page_count, aa->aa_ppga,
- cksum_type_unpack(aa->aa_oa->o_flags)))
- return -EAGAIN;
-
- rc = check_write_rcs(req, aa->aa_requested_nob,
- aa->aa_nio_count,
- aa->aa_page_count, aa->aa_ppga);
- goto out;
- }
-
- /* The rest of this function executes only for OST_READs */
-
- /* if unwrap_bulk failed, return -EAGAIN to retry */
- rc = sptlrpc_cli_unwrap_bulk_read(req, req->rq_bulk, rc);
- if (rc < 0) {
- rc = -EAGAIN;
- goto out;
- }
-
- if (rc > aa->aa_requested_nob) {
- CERROR("Unexpected rc %d (%d requested)\n", rc,
- aa->aa_requested_nob);
- return -EPROTO;
- }
-
- if (rc != req->rq_bulk->bd_nob_transferred) {
- CERROR("Unexpected rc %d (%d transferred)\n",
- rc, req->rq_bulk->bd_nob_transferred);
- return -EPROTO;
- }
-
- if (rc < aa->aa_requested_nob)
- handle_short_read(rc, aa->aa_page_count, aa->aa_ppga);
-
- if (body->oa.o_valid & OBD_MD_FLCKSUM) {
- static int cksum_counter;
- __u32 server_cksum = body->oa.o_cksum;
- char *via;
- char *router;
- cksum_type_t cksum_type;
-
- cksum_type = cksum_type_unpack(body->oa.o_valid&OBD_MD_FLFLAGS ?
- body->oa.o_flags : 0);
- client_cksum = osc_checksum_bulk(rc, aa->aa_page_count,
- aa->aa_ppga, OST_READ,
- cksum_type);
-
- if (peer->nid == req->rq_bulk->bd_sender) {
- via = router = "";
- } else {
- via = " via ";
- router = libcfs_nid2str(req->rq_bulk->bd_sender);
- }
-
- if (server_cksum != client_cksum) {
- LCONSOLE_ERROR_MSG(0x133, "%s: BAD READ CHECKSUM: from %s%s%s inode " DFID " object " DOSTID " extent [%llu-%llu]\n",
- req->rq_import->imp_obd->obd_name,
- libcfs_nid2str(peer->nid),
- via, router,
- body->oa.o_valid & OBD_MD_FLFID ?
- body->oa.o_parent_seq : (__u64)0,
- body->oa.o_valid & OBD_MD_FLFID ?
- body->oa.o_parent_oid : 0,
- body->oa.o_valid & OBD_MD_FLFID ?
- body->oa.o_parent_ver : 0,
- POSTID(&body->oa.o_oi),
- aa->aa_ppga[0]->off,
- aa->aa_ppga[aa->aa_page_count-1]->off +
- aa->aa_ppga[aa->aa_page_count-1]->count -
- 1);
- CERROR("client %x, server %x, cksum_type %x\n",
- client_cksum, server_cksum, cksum_type);
- cksum_counter = 0;
- aa->aa_oa->o_cksum = client_cksum;
- rc = -EAGAIN;
- } else {
- cksum_counter++;
- CDEBUG(D_PAGE, "checksum %x confirmed\n", client_cksum);
- rc = 0;
- }
- } else if (unlikely(client_cksum)) {
- static int cksum_missed;
-
- cksum_missed++;
- if ((cksum_missed & (-cksum_missed)) == cksum_missed)
- CERROR("Checksum %u requested from %s but not sent\n",
- cksum_missed, libcfs_nid2str(peer->nid));
- } else {
- rc = 0;
- }
-out:
- if (rc >= 0)
- lustre_get_wire_obdo(&req->rq_import->imp_connect_data,
- aa->aa_oa, &body->oa);
-
- return rc;
-}
-
-static int osc_brw_redo_request(struct ptlrpc_request *request,
- struct osc_brw_async_args *aa, int rc)
-{
- struct ptlrpc_request *new_req;
- struct osc_brw_async_args *new_aa;
- struct osc_async_page *oap;
-
- DEBUG_REQ(rc == -EINPROGRESS ? D_RPCTRACE : D_ERROR, request,
- "redo for recoverable error %d", rc);
-
- rc = osc_brw_prep_request(lustre_msg_get_opc(request->rq_reqmsg) ==
- OST_WRITE ? OBD_BRW_WRITE : OBD_BRW_READ,
- aa->aa_cli, aa->aa_oa,
- NULL /* lsm unused by osc currently */,
- aa->aa_page_count, aa->aa_ppga,
- &new_req, 0, 1);
- if (rc)
- return rc;
-
- list_for_each_entry(oap, &aa->aa_oaps, oap_rpc_item) {
- if (oap->oap_request != NULL) {
- LASSERTF(request == oap->oap_request,
- "request %p != oap_request %p\n",
- request, oap->oap_request);
- if (oap->oap_interrupted) {
- ptlrpc_req_finished(new_req);
- return -EINTR;
- }
- }
- }
- /* New request takes over pga and oaps from old request.
- * Note that copying a list_head doesn't work, need to move it... */
- aa->aa_resends++;
- new_req->rq_interpret_reply = request->rq_interpret_reply;
- new_req->rq_async_args = request->rq_async_args;
- /* cap resend delay to the current request timeout, this is similar to
- * what ptlrpc does (see after_reply()) */
- if (aa->aa_resends > new_req->rq_timeout)
- new_req->rq_sent = ktime_get_real_seconds() + new_req->rq_timeout;
- else
- new_req->rq_sent = ktime_get_real_seconds() + aa->aa_resends;
- new_req->rq_generation_set = 1;
- new_req->rq_import_generation = request->rq_import_generation;
-
- new_aa = ptlrpc_req_async_args(new_req);
-
- INIT_LIST_HEAD(&new_aa->aa_oaps);
- list_splice_init(&aa->aa_oaps, &new_aa->aa_oaps);
- INIT_LIST_HEAD(&new_aa->aa_exts);
- list_splice_init(&aa->aa_exts, &new_aa->aa_exts);
- new_aa->aa_resends = aa->aa_resends;
-
- list_for_each_entry(oap, &new_aa->aa_oaps, oap_rpc_item) {
- if (oap->oap_request) {
- ptlrpc_req_finished(oap->oap_request);
- oap->oap_request = ptlrpc_request_addref(new_req);
- }
- }
-
- /* XXX: This code will run into problem if we're going to support
- * to add a series of BRW RPCs into a self-defined ptlrpc_request_set
- * and wait for all of them to be finished. We should inherit request
- * set from old request. */
- ptlrpcd_add_req(new_req);
-
- DEBUG_REQ(D_INFO, new_req, "new request");
- return 0;
-}
-
-/*
- * ugh, we want disk allocation on the target to happen in offset order. we'll
- * follow sedgewicks advice and stick to the dead simple shellsort -- it'll do
- * fine for our small page arrays and doesn't require allocation. its an
- * insertion sort that swaps elements that are strides apart, shrinking the
- * stride down until its '1' and the array is sorted.
- */
-static void sort_brw_pages(struct brw_page **array, int num)
-{
- int stride, i, j;
- struct brw_page *tmp;
-
- if (num == 1)
- return;
- for (stride = 1; stride < num ; stride = (stride * 3) + 1)
- ;
-
- do {
- stride /= 3;
- for (i = stride ; i < num ; i++) {
- tmp = array[i];
- j = i;
- while (j >= stride && array[j - stride]->off > tmp->off) {
- array[j] = array[j - stride];
- j -= stride;
- }
- array[j] = tmp;
- }
- } while (stride > 1);
-}
-
-static void osc_release_ppga(struct brw_page **ppga, u32 count)
-{
- LASSERT(ppga != NULL);
- kfree(ppga);
-}
-
-static int brw_interpret(const struct lu_env *env,
- struct ptlrpc_request *req, void *data, int rc)
-{
- struct osc_brw_async_args *aa = data;
- struct osc_extent *ext;
- struct osc_extent *tmp;
- struct cl_object *obj = NULL;
- struct client_obd *cli = aa->aa_cli;
-
- rc = osc_brw_fini_request(req, rc);
- CDEBUG(D_INODE, "request %p aa %p rc %d\n", req, aa, rc);
- /* When server return -EINPROGRESS, client should always retry
- * regardless of the number of times the bulk was resent already. */
- if (osc_recoverable_error(rc)) {
- if (req->rq_import_generation !=
- req->rq_import->imp_generation) {
- CDEBUG(D_HA, "%s: resend cross eviction for object: " DOSTID ", rc = %d.\n",
- req->rq_import->imp_obd->obd_name,
- POSTID(&aa->aa_oa->o_oi), rc);
- } else if (rc == -EINPROGRESS ||
- client_should_resend(aa->aa_resends, aa->aa_cli)) {
- rc = osc_brw_redo_request(req, aa, rc);
- } else {
- CERROR("%s: too many resent retries for object: %llu:%llu, rc = %d.\n",
- req->rq_import->imp_obd->obd_name,
- POSTID(&aa->aa_oa->o_oi), rc);
- }
-
- if (rc == 0)
- return 0;
- else if (rc == -EAGAIN || rc == -EINPROGRESS)
- rc = -EIO;
- }
-
- list_for_each_entry_safe(ext, tmp, &aa->aa_exts, oe_link) {
- if (obj == NULL && rc == 0) {
- obj = osc2cl(ext->oe_obj);
- cl_object_get(obj);
- }
-
- list_del_init(&ext->oe_link);
- osc_extent_finish(env, ext, 1, rc);
- }
- LASSERT(list_empty(&aa->aa_exts));
- LASSERT(list_empty(&aa->aa_oaps));
-
- if (obj != NULL) {
- struct obdo *oa = aa->aa_oa;
- struct cl_attr *attr = &osc_env_info(env)->oti_attr;
- unsigned long valid = 0;
-
- LASSERT(rc == 0);
- if (oa->o_valid & OBD_MD_FLBLOCKS) {
- attr->cat_blocks = oa->o_blocks;
- valid |= CAT_BLOCKS;
- }
- if (oa->o_valid & OBD_MD_FLMTIME) {
- attr->cat_mtime = oa->o_mtime;
- valid |= CAT_MTIME;
- }
- if (oa->o_valid & OBD_MD_FLATIME) {
- attr->cat_atime = oa->o_atime;
- valid |= CAT_ATIME;
- }
- if (oa->o_valid & OBD_MD_FLCTIME) {
- attr->cat_ctime = oa->o_ctime;
- valid |= CAT_CTIME;
- }
- if (valid != 0) {
- cl_object_attr_lock(obj);
- cl_object_attr_set(env, obj, attr, valid);
- cl_object_attr_unlock(obj);
- }
- cl_object_put(env, obj);
- }
- OBDO_FREE(aa->aa_oa);
-
- cl_req_completion(env, aa->aa_clerq, rc < 0 ? rc :
- req->rq_bulk->bd_nob_transferred);
- osc_release_ppga(aa->aa_ppga, aa->aa_page_count);
- ptlrpc_lprocfs_brw(req, req->rq_bulk->bd_nob_transferred);
-
- client_obd_list_lock(&cli->cl_loi_list_lock);
- /* We need to decrement before osc_ap_completion->osc_wake_cache_waiters
- * is called so we know whether to go to sync BRWs or wait for more
- * RPCs to complete */
- if (lustre_msg_get_opc(req->rq_reqmsg) == OST_WRITE)
- cli->cl_w_in_flight--;
- else
- cli->cl_r_in_flight--;
- osc_wake_cache_waiters(cli);
- client_obd_list_unlock(&cli->cl_loi_list_lock);
-
- osc_io_unplug(env, cli, NULL);
- return rc;
-}
-
-/**
- * Build an RPC by the list of extent @ext_list. The caller must ensure
- * that the total pages in this list are NOT over max pages per RPC.
- * Extents in the list must be in OES_RPC state.
- */
-int osc_build_rpc(const struct lu_env *env, struct client_obd *cli,
- struct list_head *ext_list, int cmd)
-{
- struct ptlrpc_request *req = NULL;
- struct osc_extent *ext;
- struct brw_page **pga = NULL;
- struct osc_brw_async_args *aa = NULL;
- struct obdo *oa = NULL;
- struct osc_async_page *oap;
- struct osc_async_page *tmp;
- struct cl_req *clerq = NULL;
- enum cl_req_type crt = (cmd & OBD_BRW_WRITE) ? CRT_WRITE : CRT_READ;
- struct ldlm_lock *lock = NULL;
- struct cl_req_attr *crattr = NULL;
- u64 starting_offset = OBD_OBJECT_EOF;
- u64 ending_offset = 0;
- int mpflag = 0;
- int mem_tight = 0;
- int page_count = 0;
- int i;
- int rc;
- struct ost_body *body;
- LIST_HEAD(rpc_list);
-
- LASSERT(!list_empty(ext_list));
-
- /* add pages into rpc_list to build BRW rpc */
- list_for_each_entry(ext, ext_list, oe_link) {
- LASSERT(ext->oe_state == OES_RPC);
- mem_tight |= ext->oe_memalloc;
- list_for_each_entry(oap, &ext->oe_pages, oap_pending_item) {
- ++page_count;
- list_add_tail(&oap->oap_rpc_item, &rpc_list);
- if (starting_offset > oap->oap_obj_off)
- starting_offset = oap->oap_obj_off;
- else
- LASSERT(oap->oap_page_off == 0);
- if (ending_offset < oap->oap_obj_off + oap->oap_count)
- ending_offset = oap->oap_obj_off +
- oap->oap_count;
- else
- LASSERT(oap->oap_page_off + oap->oap_count ==
- PAGE_CACHE_SIZE);
- }
- }
-
- if (mem_tight)
- mpflag = cfs_memory_pressure_get_and_set();
-
- crattr = kzalloc(sizeof(*crattr), GFP_NOFS);
- if (!crattr) {
- rc = -ENOMEM;
- goto out;
- }
-
- pga = kcalloc(page_count, sizeof(*pga), GFP_NOFS);
- if (pga == NULL) {
- rc = -ENOMEM;
- goto out;
- }
-
- OBDO_ALLOC(oa);
- if (oa == NULL) {
- rc = -ENOMEM;
- goto out;
- }
-
- i = 0;
- list_for_each_entry(oap, &rpc_list, oap_rpc_item) {
- struct cl_page *page = oap2cl_page(oap);
- if (clerq == NULL) {
- clerq = cl_req_alloc(env, page, crt,
- 1 /* only 1-object rpcs for now */);
- if (IS_ERR(clerq)) {
- rc = PTR_ERR(clerq);
- goto out;
- }
- lock = oap->oap_ldlm_lock;
- }
- if (mem_tight)
- oap->oap_brw_flags |= OBD_BRW_MEMALLOC;
- pga[i] = &oap->oap_brw_page;
- pga[i]->off = oap->oap_obj_off + oap->oap_page_off;
- CDEBUG(0, "put page %p index %lu oap %p flg %x to pga\n",
- pga[i]->pg, page_index(oap->oap_page), oap,
- pga[i]->flag);
- i++;
- cl_req_page_add(env, clerq, page);
- }
-
- /* always get the data for the obdo for the rpc */
- LASSERT(clerq != NULL);
- crattr->cra_oa = oa;
- cl_req_attr_set(env, clerq, crattr, ~0ULL);
- if (lock) {
- oa->o_handle = lock->l_remote_handle;
- oa->o_valid |= OBD_MD_FLHANDLE;
- }
-
- rc = cl_req_prep(env, clerq);
- if (rc != 0) {
- CERROR("cl_req_prep failed: %d\n", rc);
- goto out;
- }
-
- sort_brw_pages(pga, page_count);
- rc = osc_brw_prep_request(cmd, cli, oa, NULL, page_count,
- pga, &req, 1, 0);
- if (rc != 0) {
- CERROR("prep_req failed: %d\n", rc);
- goto out;
- }
-
- req->rq_interpret_reply = brw_interpret;
-
- if (mem_tight != 0)
- req->rq_memalloc = 1;
-
- /* Need to update the timestamps after the request is built in case
- * we race with setattr (locally or in queue at OST). If OST gets
- * later setattr before earlier BRW (as determined by the request xid),
- * the OST will not use BRW timestamps. Sadly, there is no obvious
- * way to do this in a single call. bug 10150 */
- body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
- crattr->cra_oa = &body->oa;
- cl_req_attr_set(env, clerq, crattr,
- OBD_MD_FLMTIME|OBD_MD_FLCTIME|OBD_MD_FLATIME);
-
- lustre_msg_set_jobid(req->rq_reqmsg, crattr->cra_jobid);
-
- CLASSERT(sizeof(*aa) <= sizeof(req->rq_async_args));
- aa = ptlrpc_req_async_args(req);
- INIT_LIST_HEAD(&aa->aa_oaps);
- list_splice_init(&rpc_list, &aa->aa_oaps);
- INIT_LIST_HEAD(&aa->aa_exts);
- list_splice_init(ext_list, &aa->aa_exts);
- aa->aa_clerq = clerq;
-
- /* queued sync pages can be torn down while the pages
- * were between the pending list and the rpc */
- tmp = NULL;
- list_for_each_entry(oap, &aa->aa_oaps, oap_rpc_item) {
- /* only one oap gets a request reference */
- if (tmp == NULL)
- tmp = oap;
- if (oap->oap_interrupted && !req->rq_intr) {
- CDEBUG(D_INODE, "oap %p in req %p interrupted\n",
- oap, req);
- ptlrpc_mark_interrupted(req);
- }
- }
- if (tmp != NULL)
- tmp->oap_request = ptlrpc_request_addref(req);
-
- client_obd_list_lock(&cli->cl_loi_list_lock);
- starting_offset >>= PAGE_CACHE_SHIFT;
- if (cmd == OBD_BRW_READ) {
- cli->cl_r_in_flight++;
- lprocfs_oh_tally_log2(&cli->cl_read_page_hist, page_count);
- lprocfs_oh_tally(&cli->cl_read_rpc_hist, cli->cl_r_in_flight);
- lprocfs_oh_tally_log2(&cli->cl_read_offset_hist,
- starting_offset + 1);
- } else {
- cli->cl_w_in_flight++;
- lprocfs_oh_tally_log2(&cli->cl_write_page_hist, page_count);
- lprocfs_oh_tally(&cli->cl_write_rpc_hist, cli->cl_w_in_flight);
- lprocfs_oh_tally_log2(&cli->cl_write_offset_hist,
- starting_offset + 1);
- }
- client_obd_list_unlock(&cli->cl_loi_list_lock);
-
- DEBUG_REQ(D_INODE, req, "%d pages, aa %p. now %dr/%dw in flight",
- page_count, aa, cli->cl_r_in_flight,
- cli->cl_w_in_flight);
-
- ptlrpcd_add_req(req);
- rc = 0;
-
-out:
- if (mem_tight != 0)
- cfs_memory_pressure_restore(mpflag);
-
- if (crattr != NULL) {
- kfree(crattr);
- }
-
- if (rc != 0) {
- LASSERT(req == NULL);
-
- if (oa)
- OBDO_FREE(oa);
- kfree(pga);
- /* this should happen rarely and is pretty bad, it makes the
- * pending list not follow the dirty order */
- while (!list_empty(ext_list)) {
- ext = list_entry(ext_list->next, struct osc_extent,
- oe_link);
- list_del_init(&ext->oe_link);
- osc_extent_finish(env, ext, 0, rc);
- }
- if (clerq && !IS_ERR(clerq))
- cl_req_completion(env, clerq, rc);
- }
- return rc;
-}
-
-static int osc_set_lock_data_with_check(struct ldlm_lock *lock,
- struct ldlm_enqueue_info *einfo)
-{
- void *data = einfo->ei_cbdata;
- int set = 0;
-
- LASSERT(lock != NULL);
- LASSERT(lock->l_blocking_ast == einfo->ei_cb_bl);
- LASSERT(lock->l_resource->lr_type == einfo->ei_type);
- LASSERT(lock->l_completion_ast == einfo->ei_cb_cp);
- LASSERT(lock->l_glimpse_ast == einfo->ei_cb_gl);
-
- lock_res_and_lock(lock);
- spin_lock(&osc_ast_guard);
-
- if (lock->l_ast_data == NULL)
- lock->l_ast_data = data;
- if (lock->l_ast_data == data)
- set = 1;
-
- spin_unlock(&osc_ast_guard);
- unlock_res_and_lock(lock);
-
- return set;
-}
-
-static int osc_set_data_with_check(struct lustre_handle *lockh,
- struct ldlm_enqueue_info *einfo)
-{
- struct ldlm_lock *lock = ldlm_handle2lock(lockh);
- int set = 0;
-
- if (lock != NULL) {
- set = osc_set_lock_data_with_check(lock, einfo);
- LDLM_LOCK_PUT(lock);
- } else
- CERROR("lockh %p, data %p - client evicted?\n",
- lockh, einfo->ei_cbdata);
- return set;
-}
-
-/* find any ldlm lock of the inode in osc
- * return 0 not find
- * 1 find one
- * < 0 error */
-static int osc_find_cbdata(struct obd_export *exp, struct lov_stripe_md *lsm,
- ldlm_iterator_t replace, void *data)
-{
- struct ldlm_res_id res_id;
- struct obd_device *obd = class_exp2obd(exp);
- int rc = 0;
-
- ostid_build_res_name(&lsm->lsm_oi, &res_id);
- rc = ldlm_resource_iterate(obd->obd_namespace, &res_id, replace, data);
- if (rc == LDLM_ITER_STOP)
- return 1;
- if (rc == LDLM_ITER_CONTINUE)
- return 0;
- return rc;
-}
-
-static int osc_enqueue_fini(struct ptlrpc_request *req, struct ost_lvb *lvb,
- obd_enqueue_update_f upcall, void *cookie,
- __u64 *flags, int agl, int rc)
-{
- int intent = *flags & LDLM_FL_HAS_INTENT;
-
- if (intent) {
- /* The request was created before ldlm_cli_enqueue call. */
- if (rc == ELDLM_LOCK_ABORTED) {
- struct ldlm_reply *rep;
- rep = req_capsule_server_get(&req->rq_pill,
- &RMF_DLM_REP);
-
- LASSERT(rep != NULL);
- rep->lock_policy_res1 =
- ptlrpc_status_ntoh(rep->lock_policy_res1);
- if (rep->lock_policy_res1)
- rc = rep->lock_policy_res1;
- }
- }
-
- if ((intent != 0 && rc == ELDLM_LOCK_ABORTED && agl == 0) ||
- (rc == 0)) {
- *flags |= LDLM_FL_LVB_READY;
- CDEBUG(D_INODE, "got kms %llu blocks %llu mtime %llu\n",
- lvb->lvb_size, lvb->lvb_blocks, lvb->lvb_mtime);
- }
-
- /* Call the update callback. */
- rc = (*upcall)(cookie, rc);
- return rc;
-}
-
-static int osc_enqueue_interpret(const struct lu_env *env,
- struct ptlrpc_request *req,
- struct osc_enqueue_args *aa, int rc)
-{
- struct ldlm_lock *lock;
- struct lustre_handle handle;
- __u32 mode;
- struct ost_lvb *lvb;
- __u32 lvb_len;
- __u64 *flags = aa->oa_flags;
-
- /* Make a local copy of a lock handle and a mode, because aa->oa_*
- * might be freed anytime after lock upcall has been called. */
- lustre_handle_copy(&handle, aa->oa_lockh);
- mode = aa->oa_ei->ei_mode;
-
- /* ldlm_cli_enqueue is holding a reference on the lock, so it must
- * be valid. */
- lock = ldlm_handle2lock(&handle);
-
- /* Take an additional reference so that a blocking AST that
- * ldlm_cli_enqueue_fini() might post for a failed lock, is guaranteed
- * to arrive after an upcall has been executed by
- * osc_enqueue_fini(). */
- ldlm_lock_addref(&handle, mode);
-
- /* Let CP AST to grant the lock first. */
- OBD_FAIL_TIMEOUT(OBD_FAIL_OSC_CP_ENQ_RACE, 1);
-
- if (aa->oa_agl && rc == ELDLM_LOCK_ABORTED) {
- lvb = NULL;
- lvb_len = 0;
- } else {
- lvb = aa->oa_lvb;
- lvb_len = sizeof(*aa->oa_lvb);
- }
-
- /* Complete obtaining the lock procedure. */
- rc = ldlm_cli_enqueue_fini(aa->oa_exp, req, aa->oa_ei->ei_type, 1,
- mode, flags, lvb, lvb_len, &handle, rc);
- /* Complete osc stuff. */
- rc = osc_enqueue_fini(req, aa->oa_lvb, aa->oa_upcall, aa->oa_cookie,
- flags, aa->oa_agl, rc);
-
- OBD_FAIL_TIMEOUT(OBD_FAIL_OSC_CP_CANCEL_RACE, 10);
-
- /* Release the lock for async request. */
- if (lustre_handle_is_used(&handle) && rc == ELDLM_OK)
- /*
- * Releases a reference taken by ldlm_cli_enqueue(), if it is
- * not already released by
- * ldlm_cli_enqueue_fini()->failed_lock_cleanup()
- */
- ldlm_lock_decref(&handle, mode);
-
- LASSERTF(lock != NULL, "lockh %p, req %p, aa %p - client evicted?\n",
- aa->oa_lockh, req, aa);
- ldlm_lock_decref(&handle, mode);
- LDLM_LOCK_PUT(lock);
- return rc;
-}
-
-struct ptlrpc_request_set *PTLRPCD_SET = (void *)1;
-
-/* When enqueuing asynchronously, locks are not ordered, we can obtain a lock
- * from the 2nd OSC before a lock from the 1st one. This does not deadlock with
- * other synchronous requests, however keeping some locks and trying to obtain
- * others may take a considerable amount of time in a case of ost failure; and
- * when other sync requests do not get released lock from a client, the client
- * is excluded from the cluster -- such scenarious make the life difficult, so
- * release locks just after they are obtained. */
-int osc_enqueue_base(struct obd_export *exp, struct ldlm_res_id *res_id,
- __u64 *flags, ldlm_policy_data_t *policy,
- struct ost_lvb *lvb, int kms_valid,
- obd_enqueue_update_f upcall, void *cookie,
- struct ldlm_enqueue_info *einfo,
- struct lustre_handle *lockh,
- struct ptlrpc_request_set *rqset, int async, int agl)
-{
- struct obd_device *obd = exp->exp_obd;
- struct ptlrpc_request *req = NULL;
- int intent = *flags & LDLM_FL_HAS_INTENT;
- __u64 match_lvb = (agl != 0 ? 0 : LDLM_FL_LVB_READY);
- ldlm_mode_t mode;
- int rc;
-
- /* Filesystem lock extents are extended to page boundaries so that
- * dealing with the page cache is a little smoother. */
- policy->l_extent.start -= policy->l_extent.start & ~CFS_PAGE_MASK;
- policy->l_extent.end |= ~CFS_PAGE_MASK;
-
- /*
- * kms is not valid when either object is completely fresh (so that no
- * locks are cached), or object was evicted. In the latter case cached
- * lock cannot be used, because it would prime inode state with
- * potentially stale LVB.
- */
- if (!kms_valid)
- goto no_match;
-
- /* Next, search for already existing extent locks that will cover us */
- /* If we're trying to read, we also search for an existing PW lock. The
- * VFS and page cache already protect us locally, so lots of readers/
- * writers can share a single PW lock.
- *
- * There are problems with conversion deadlocks, so instead of
- * converting a read lock to a write lock, we'll just enqueue a new
- * one.
- *
- * At some point we should cancel the read lock instead of making them
- * send us a blocking callback, but there are problems with canceling
- * locks out from other users right now, too. */
- mode = einfo->ei_mode;
- if (einfo->ei_mode == LCK_PR)
- mode |= LCK_PW;
- mode = ldlm_lock_match(obd->obd_namespace, *flags | match_lvb, res_id,
- einfo->ei_type, policy, mode, lockh, 0);
- if (mode) {
- struct ldlm_lock *matched = ldlm_handle2lock(lockh);
-
- if ((agl != 0) && !(matched->l_flags & LDLM_FL_LVB_READY)) {
- /* For AGL, if enqueue RPC is sent but the lock is not
- * granted, then skip to process this strpe.
- * Return -ECANCELED to tell the caller. */
- ldlm_lock_decref(lockh, mode);
- LDLM_LOCK_PUT(matched);
- return -ECANCELED;
- }
-
- if (osc_set_lock_data_with_check(matched, einfo)) {
- *flags |= LDLM_FL_LVB_READY;
- /* addref the lock only if not async requests and PW
- * lock is matched whereas we asked for PR. */
- if (!rqset && einfo->ei_mode != mode)
- ldlm_lock_addref(lockh, LCK_PR);
- if (intent) {
- /* I would like to be able to ASSERT here that
- * rss <= kms, but I can't, for reasons which
- * are explained in lov_enqueue() */
- }
-
- /* We already have a lock, and it's referenced.
- *
- * At this point, the cl_lock::cll_state is CLS_QUEUING,
- * AGL upcall may change it to CLS_HELD directly. */
- (*upcall)(cookie, ELDLM_OK);
-
- if (einfo->ei_mode != mode)
- ldlm_lock_decref(lockh, LCK_PW);
- else if (rqset)
- /* For async requests, decref the lock. */
- ldlm_lock_decref(lockh, einfo->ei_mode);
- LDLM_LOCK_PUT(matched);
- return ELDLM_OK;
- }
-
- ldlm_lock_decref(lockh, mode);
- LDLM_LOCK_PUT(matched);
- }
-
- no_match:
- if (intent) {
- LIST_HEAD(cancels);
- req = ptlrpc_request_alloc(class_exp2cliimp(exp),
- &RQF_LDLM_ENQUEUE_LVB);
- if (req == NULL)
- return -ENOMEM;
-
- rc = ldlm_prep_enqueue_req(exp, req, &cancels, 0);
- if (rc) {
- ptlrpc_request_free(req);
- return rc;
- }
-
- req_capsule_set_size(&req->rq_pill, &RMF_DLM_LVB, RCL_SERVER,
- sizeof(*lvb));
- ptlrpc_request_set_replen(req);
- }
-
- /* users of osc_enqueue() can pass this flag for ldlm_lock_match() */
- *flags &= ~LDLM_FL_BLOCK_GRANTED;
-
- rc = ldlm_cli_enqueue(exp, &req, einfo, res_id, policy, flags, lvb,
- sizeof(*lvb), LVB_T_OST, lockh, async);
- if (rqset) {
- if (!rc) {
- struct osc_enqueue_args *aa;
- CLASSERT (sizeof(*aa) <= sizeof(req->rq_async_args));
- aa = ptlrpc_req_async_args(req);
- aa->oa_ei = einfo;
- aa->oa_exp = exp;
- aa->oa_flags = flags;
- aa->oa_upcall = upcall;
- aa->oa_cookie = cookie;
- aa->oa_lvb = lvb;
- aa->oa_lockh = lockh;
- aa->oa_agl = !!agl;
-
- req->rq_interpret_reply =
- (ptlrpc_interpterer_t)osc_enqueue_interpret;
- if (rqset == PTLRPCD_SET)
- ptlrpcd_add_req(req);
- else
- ptlrpc_set_add_req(rqset, req);
- } else if (intent) {
- ptlrpc_req_finished(req);
- }
- return rc;
- }
-
- rc = osc_enqueue_fini(req, lvb, upcall, cookie, flags, agl, rc);
- if (intent)
- ptlrpc_req_finished(req);
-
- return rc;
-}
-
-int osc_match_base(struct obd_export *exp, struct ldlm_res_id *res_id,
- __u32 type, ldlm_policy_data_t *policy, __u32 mode,
- __u64 *flags, void *data, struct lustre_handle *lockh,
- int unref)
-{
- struct obd_device *obd = exp->exp_obd;
- __u64 lflags = *flags;
- ldlm_mode_t rc;
-
- if (OBD_FAIL_CHECK(OBD_FAIL_OSC_MATCH))
- return -EIO;
-
- /* Filesystem lock extents are extended to page boundaries so that
- * dealing with the page cache is a little smoother */
- policy->l_extent.start -= policy->l_extent.start & ~CFS_PAGE_MASK;
- policy->l_extent.end |= ~CFS_PAGE_MASK;
-
- /* Next, search for already existing extent locks that will cover us */
- /* If we're trying to read, we also search for an existing PW lock. The
- * VFS and page cache already protect us locally, so lots of readers/
- * writers can share a single PW lock. */
- rc = mode;
- if (mode == LCK_PR)
- rc |= LCK_PW;
- rc = ldlm_lock_match(obd->obd_namespace, lflags,
- res_id, type, policy, rc, lockh, unref);
- if (rc) {
- if (data != NULL) {
- if (!osc_set_data_with_check(lockh, data)) {
- if (!(lflags & LDLM_FL_TEST_LOCK))
- ldlm_lock_decref(lockh, rc);
- return 0;
- }
- }
- if (!(lflags & LDLM_FL_TEST_LOCK) && mode != rc) {
- ldlm_lock_addref(lockh, LCK_PR);
- ldlm_lock_decref(lockh, LCK_PW);
- }
- return rc;
- }
- return rc;
-}
-
-int osc_cancel_base(struct lustre_handle *lockh, __u32 mode)
-{
- if (unlikely(mode == LCK_GROUP))
- ldlm_lock_decref_and_cancel(lockh, mode);
- else
- ldlm_lock_decref(lockh, mode);
-
- return 0;
-}
-
-static int osc_statfs_interpret(const struct lu_env *env,
- struct ptlrpc_request *req,
- struct osc_async_args *aa, int rc)
-{
- struct obd_statfs *msfs;
-
- if (rc == -EBADR)
- /* The request has in fact never been sent
- * due to issues at a higher level (LOV).
- * Exit immediately since the caller is
- * aware of the problem and takes care
- * of the clean up */
- return rc;
-
- if ((rc == -ENOTCONN || rc == -EAGAIN) &&
- (aa->aa_oi->oi_flags & OBD_STATFS_NODELAY)) {
- rc = 0;
- goto out;
- }
-
- if (rc != 0)
- goto out;
-
- msfs = req_capsule_server_get(&req->rq_pill, &RMF_OBD_STATFS);
- if (msfs == NULL) {
- rc = -EPROTO;
- goto out;
- }
-
- *aa->aa_oi->oi_osfs = *msfs;
-out:
- rc = aa->aa_oi->oi_cb_up(aa->aa_oi, rc);
- return rc;
-}
-
-static int osc_statfs_async(struct obd_export *exp,
- struct obd_info *oinfo, __u64 max_age,
- struct ptlrpc_request_set *rqset)
-{
- struct obd_device *obd = class_exp2obd(exp);
- struct ptlrpc_request *req;
- struct osc_async_args *aa;
- int rc;
-
- /* We could possibly pass max_age in the request (as an absolute
- * timestamp or a "seconds.usec ago") so the target can avoid doing
- * extra calls into the filesystem if that isn't necessary (e.g.
- * during mount that would help a bit). Having relative timestamps
- * is not so great if request processing is slow, while absolute
- * timestamps are not ideal because they need time synchronization. */
- req = ptlrpc_request_alloc(obd->u.cli.cl_import, &RQF_OST_STATFS);
- if (req == NULL)
- return -ENOMEM;
-
- rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_STATFS);
- if (rc) {
- ptlrpc_request_free(req);
- return rc;
- }
- ptlrpc_request_set_replen(req);
- req->rq_request_portal = OST_CREATE_PORTAL;
- ptlrpc_at_set_req_timeout(req);
-
- if (oinfo->oi_flags & OBD_STATFS_NODELAY) {
- /* procfs requests not want stat in wait for avoid deadlock */
- req->rq_no_resend = 1;
- req->rq_no_delay = 1;
- }
-
- req->rq_interpret_reply = (ptlrpc_interpterer_t)osc_statfs_interpret;
- CLASSERT (sizeof(*aa) <= sizeof(req->rq_async_args));
- aa = ptlrpc_req_async_args(req);
- aa->aa_oi = oinfo;
-
- ptlrpc_set_add_req(rqset, req);
- return 0;
-}
-
-static int osc_statfs(const struct lu_env *env, struct obd_export *exp,
- struct obd_statfs *osfs, __u64 max_age, __u32 flags)
-{
- struct obd_device *obd = class_exp2obd(exp);
- struct obd_statfs *msfs;
- struct ptlrpc_request *req;
- struct obd_import *imp = NULL;
- int rc;
-
- /*Since the request might also come from lprocfs, so we need
- *sync this with client_disconnect_export Bug15684*/
- down_read(&obd->u.cli.cl_sem);
- if (obd->u.cli.cl_import)
- imp = class_import_get(obd->u.cli.cl_import);
- up_read(&obd->u.cli.cl_sem);
- if (!imp)
- return -ENODEV;
-
- /* We could possibly pass max_age in the request (as an absolute
- * timestamp or a "seconds.usec ago") so the target can avoid doing
- * extra calls into the filesystem if that isn't necessary (e.g.
- * during mount that would help a bit). Having relative timestamps
- * is not so great if request processing is slow, while absolute
- * timestamps are not ideal because they need time synchronization. */
- req = ptlrpc_request_alloc(imp, &RQF_OST_STATFS);
-
- class_import_put(imp);
-
- if (req == NULL)
- return -ENOMEM;
-
- rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_STATFS);
- if (rc) {
- ptlrpc_request_free(req);
- return rc;
- }
- ptlrpc_request_set_replen(req);
- req->rq_request_portal = OST_CREATE_PORTAL;
- ptlrpc_at_set_req_timeout(req);
-
- if (flags & OBD_STATFS_NODELAY) {
- /* procfs requests not want stat in wait for avoid deadlock */
- req->rq_no_resend = 1;
- req->rq_no_delay = 1;
- }
-
- rc = ptlrpc_queue_wait(req);
- if (rc)
- goto out;
-
- msfs = req_capsule_server_get(&req->rq_pill, &RMF_OBD_STATFS);
- if (msfs == NULL) {
- rc = -EPROTO;
- goto out;
- }
-
- *osfs = *msfs;
-
- out:
- ptlrpc_req_finished(req);
- return rc;
-}
-
-/* Retrieve object striping information.
- *
- * @lmmu is a pointer to an in-core struct with lmm_ost_count indicating
- * the maximum number of OST indices which will fit in the user buffer.
- * lmm_magic must be LOV_MAGIC (we only use 1 slot here).
- */
-static int osc_getstripe(struct lov_stripe_md *lsm, struct lov_user_md *lump)
-{
- /* we use lov_user_md_v3 because it is larger than lov_user_md_v1 */
- struct lov_user_md_v3 lum, *lumk;
- struct lov_user_ost_data_v1 *lmm_objects;
- int rc = 0, lum_size;
-
- if (!lsm)
- return -ENODATA;
-
- /* we only need the header part from user space to get lmm_magic and
- * lmm_stripe_count, (the header part is common to v1 and v3) */
- lum_size = sizeof(struct lov_user_md_v1);
- if (copy_from_user(&lum, lump, lum_size))
- return -EFAULT;
-
- if ((lum.lmm_magic != LOV_USER_MAGIC_V1) &&
- (lum.lmm_magic != LOV_USER_MAGIC_V3))
- return -EINVAL;
-
- /* lov_user_md_vX and lov_mds_md_vX must have the same size */
- LASSERT(sizeof(struct lov_user_md_v1) == sizeof(struct lov_mds_md_v1));
- LASSERT(sizeof(struct lov_user_md_v3) == sizeof(struct lov_mds_md_v3));
- LASSERT(sizeof(lum.lmm_objects[0]) == sizeof(lumk->lmm_objects[0]));
-
- /* we can use lov_mds_md_size() to compute lum_size
- * because lov_user_md_vX and lov_mds_md_vX have the same size */
- if (lum.lmm_stripe_count > 0) {
- lum_size = lov_mds_md_size(lum.lmm_stripe_count, lum.lmm_magic);
- lumk = kzalloc(lum_size, GFP_NOFS);
- if (!lumk)
- return -ENOMEM;
-
- if (lum.lmm_magic == LOV_USER_MAGIC_V1)
- lmm_objects =
- &(((struct lov_user_md_v1 *)lumk)->lmm_objects[0]);
- else
- lmm_objects = &(lumk->lmm_objects[0]);
- lmm_objects->l_ost_oi = lsm->lsm_oi;
- } else {
- lum_size = lov_mds_md_size(0, lum.lmm_magic);
- lumk = &lum;
- }
-
- lumk->lmm_oi = lsm->lsm_oi;
- lumk->lmm_stripe_count = 1;
-
- if (copy_to_user(lump, lumk, lum_size))
- rc = -EFAULT;
-
- if (lumk != &lum)
- kfree(lumk);
-
- return rc;
-}
-
-
-static int osc_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
- void *karg, void *uarg)
-{
- struct obd_device *obd = exp->exp_obd;
- struct obd_ioctl_data *data = karg;
- int err = 0;
-
- if (!try_module_get(THIS_MODULE)) {
- CERROR("Can't get module. Is it alive?");
- return -EINVAL;
- }
- switch (cmd) {
- case OBD_IOC_LOV_GET_CONFIG: {
- char *buf;
- struct lov_desc *desc;
- struct obd_uuid uuid;
-
- buf = NULL;
- len = 0;
- if (obd_ioctl_getdata(&buf, &len, uarg)) {
- err = -EINVAL;
- goto out;
- }
-
- data = (struct obd_ioctl_data *)buf;
-
- if (sizeof(*desc) > data->ioc_inllen1) {
- obd_ioctl_freedata(buf, len);
- err = -EINVAL;
- goto out;
- }
-
- if (data->ioc_inllen2 < sizeof(uuid)) {
- obd_ioctl_freedata(buf, len);
- err = -EINVAL;
- goto out;
- }
-
- desc = (struct lov_desc *)data->ioc_inlbuf1;
- desc->ld_tgt_count = 1;
- desc->ld_active_tgt_count = 1;
- desc->ld_default_stripe_count = 1;
- desc->ld_default_stripe_size = 0;
- desc->ld_default_stripe_offset = 0;
- desc->ld_pattern = 0;
- memcpy(&desc->ld_uuid, &obd->obd_uuid, sizeof(uuid));
-
- memcpy(data->ioc_inlbuf2, &obd->obd_uuid, sizeof(uuid));
-
- err = copy_to_user(uarg, buf, len);
- if (err)
- err = -EFAULT;
- obd_ioctl_freedata(buf, len);
- goto out;
- }
- case LL_IOC_LOV_SETSTRIPE:
- err = obd_alloc_memmd(exp, karg);
- if (err > 0)
- err = 0;
- goto out;
- case LL_IOC_LOV_GETSTRIPE:
- err = osc_getstripe(karg, uarg);
- goto out;
- case OBD_IOC_CLIENT_RECOVER:
- err = ptlrpc_recover_import(obd->u.cli.cl_import,
- data->ioc_inlbuf1, 0);
- if (err > 0)
- err = 0;
- goto out;
- case IOC_OSC_SET_ACTIVE:
- err = ptlrpc_set_import_active(obd->u.cli.cl_import,
- data->ioc_offset);
- goto out;
- case OBD_IOC_POLL_QUOTACHECK:
- err = osc_quota_poll_check(exp, (struct if_quotacheck *)karg);
- goto out;
- case OBD_IOC_PING_TARGET:
- err = ptlrpc_obd_ping(obd);
- goto out;
- default:
- CDEBUG(D_INODE, "unrecognised ioctl %#x by %s\n",
- cmd, current_comm());
- err = -ENOTTY;
- goto out;
- }
-out:
- module_put(THIS_MODULE);
- return err;
-}
-
-static int osc_get_info(const struct lu_env *env, struct obd_export *exp,
- u32 keylen, void *key, __u32 *vallen, void *val,
- struct lov_stripe_md *lsm)
-{
- if (!vallen || !val)
- return -EFAULT;
-
- if (KEY_IS(KEY_LOCK_TO_STRIPE)) {
- __u32 *stripe = val;
- *vallen = sizeof(*stripe);
- *stripe = 0;
- return 0;
- } else if (KEY_IS(KEY_LAST_ID)) {
- struct ptlrpc_request *req;
- u64 *reply;
- char *tmp;
- int rc;
-
- req = ptlrpc_request_alloc(class_exp2cliimp(exp),
- &RQF_OST_GET_INFO_LAST_ID);
- if (req == NULL)
- return -ENOMEM;
-
- req_capsule_set_size(&req->rq_pill, &RMF_SETINFO_KEY,
- RCL_CLIENT, keylen);
- rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_GET_INFO);
- if (rc) {
- ptlrpc_request_free(req);
- return rc;
- }
-
- tmp = req_capsule_client_get(&req->rq_pill, &RMF_SETINFO_KEY);
- memcpy(tmp, key, keylen);
-
- req->rq_no_delay = req->rq_no_resend = 1;
- ptlrpc_request_set_replen(req);
- rc = ptlrpc_queue_wait(req);
- if (rc)
- goto out;
-
- reply = req_capsule_server_get(&req->rq_pill, &RMF_OBD_ID);
- if (reply == NULL) {
- rc = -EPROTO;
- goto out;
- }
-
- *((u64 *)val) = *reply;
- out:
- ptlrpc_req_finished(req);
- return rc;
- } else if (KEY_IS(KEY_FIEMAP)) {
- struct ll_fiemap_info_key *fm_key =
- (struct ll_fiemap_info_key *)key;
- struct ldlm_res_id res_id;
- ldlm_policy_data_t policy;
- struct lustre_handle lockh;
- ldlm_mode_t mode = 0;
- struct ptlrpc_request *req;
- struct ll_user_fiemap *reply;
- char *tmp;
- int rc;
-
- if (!(fm_key->fiemap.fm_flags & FIEMAP_FLAG_SYNC))
- goto skip_locking;
-
- policy.l_extent.start = fm_key->fiemap.fm_start &
- CFS_PAGE_MASK;
-
- if (OBD_OBJECT_EOF - fm_key->fiemap.fm_length <=
- fm_key->fiemap.fm_start + PAGE_CACHE_SIZE - 1)
- policy.l_extent.end = OBD_OBJECT_EOF;
- else
- policy.l_extent.end = (fm_key->fiemap.fm_start +
- fm_key->fiemap.fm_length +
- PAGE_CACHE_SIZE - 1) & CFS_PAGE_MASK;
-
- ostid_build_res_name(&fm_key->oa.o_oi, &res_id);
- mode = ldlm_lock_match(exp->exp_obd->obd_namespace,
- LDLM_FL_BLOCK_GRANTED |
- LDLM_FL_LVB_READY,
- &res_id, LDLM_EXTENT, &policy,
- LCK_PR | LCK_PW, &lockh, 0);
- if (mode) { /* lock is cached on client */
- if (mode != LCK_PR) {
- ldlm_lock_addref(&lockh, LCK_PR);
- ldlm_lock_decref(&lockh, LCK_PW);
- }
- } else { /* no cached lock, needs acquire lock on server side */
- fm_key->oa.o_valid |= OBD_MD_FLFLAGS;
- fm_key->oa.o_flags |= OBD_FL_SRVLOCK;
- }
-
-skip_locking:
- req = ptlrpc_request_alloc(class_exp2cliimp(exp),
- &RQF_OST_GET_INFO_FIEMAP);
- if (req == NULL) {
- rc = -ENOMEM;
- goto drop_lock;
- }
-
- req_capsule_set_size(&req->rq_pill, &RMF_FIEMAP_KEY,
- RCL_CLIENT, keylen);
- req_capsule_set_size(&req->rq_pill, &RMF_FIEMAP_VAL,
- RCL_CLIENT, *vallen);
- req_capsule_set_size(&req->rq_pill, &RMF_FIEMAP_VAL,
- RCL_SERVER, *vallen);
-
- rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_GET_INFO);
- if (rc) {
- ptlrpc_request_free(req);
- goto drop_lock;
- }
-
- tmp = req_capsule_client_get(&req->rq_pill, &RMF_FIEMAP_KEY);
- memcpy(tmp, key, keylen);
- tmp = req_capsule_client_get(&req->rq_pill, &RMF_FIEMAP_VAL);
- memcpy(tmp, val, *vallen);
-
- ptlrpc_request_set_replen(req);
- rc = ptlrpc_queue_wait(req);
- if (rc)
- goto fini_req;
-
- reply = req_capsule_server_get(&req->rq_pill, &RMF_FIEMAP_VAL);
- if (reply == NULL) {
- rc = -EPROTO;
- goto fini_req;
- }
-
- memcpy(val, reply, *vallen);
-fini_req:
- ptlrpc_req_finished(req);
-drop_lock:
- if (mode)
- ldlm_lock_decref(&lockh, LCK_PR);
- return rc;
- }
-
- return -EINVAL;
-}
-
-static int osc_set_info_async(const struct lu_env *env, struct obd_export *exp,
- u32 keylen, void *key, u32 vallen,
- void *val, struct ptlrpc_request_set *set)
-{
- struct ptlrpc_request *req;
- struct obd_device *obd = exp->exp_obd;
- struct obd_import *imp = class_exp2cliimp(exp);
- char *tmp;
- int rc;
-
- OBD_FAIL_TIMEOUT(OBD_FAIL_OSC_SHUTDOWN, 10);
-
- if (KEY_IS(KEY_CHECKSUM)) {
- if (vallen != sizeof(int))
- return -EINVAL;
- exp->exp_obd->u.cli.cl_checksum = (*(int *)val) ? 1 : 0;
- return 0;
- }
-
- if (KEY_IS(KEY_SPTLRPC_CONF)) {
- sptlrpc_conf_client_adapt(obd);
- return 0;
- }
-
- if (KEY_IS(KEY_FLUSH_CTX)) {
- sptlrpc_import_flush_my_ctx(imp);
- return 0;
- }
-
- if (KEY_IS(KEY_CACHE_SET)) {
- struct client_obd *cli = &obd->u.cli;
-
- LASSERT(cli->cl_cache == NULL); /* only once */
- cli->cl_cache = (struct cl_client_cache *)val;
- atomic_inc(&cli->cl_cache->ccc_users);
- cli->cl_lru_left = &cli->cl_cache->ccc_lru_left;
-
- /* add this osc into entity list */
- LASSERT(list_empty(&cli->cl_lru_osc));
- spin_lock(&cli->cl_cache->ccc_lru_lock);
- list_add(&cli->cl_lru_osc, &cli->cl_cache->ccc_lru);
- spin_unlock(&cli->cl_cache->ccc_lru_lock);
-
- return 0;
- }
-
- if (KEY_IS(KEY_CACHE_LRU_SHRINK)) {
- struct client_obd *cli = &obd->u.cli;
- int nr = atomic_read(&cli->cl_lru_in_list) >> 1;
- int target = *(int *)val;
-
- nr = osc_lru_shrink(cli, min(nr, target));
- *(int *)val -= nr;
- return 0;
- }
-
- if (!set && !KEY_IS(KEY_GRANT_SHRINK))
- return -EINVAL;
-
- /* We pass all other commands directly to OST. Since nobody calls osc
- methods directly and everybody is supposed to go through LOV, we
- assume lov checked invalid values for us.
- The only recognised values so far are evict_by_nid and mds_conn.
- Even if something bad goes through, we'd get a -EINVAL from OST
- anyway. */
-
- req = ptlrpc_request_alloc(imp, KEY_IS(KEY_GRANT_SHRINK) ?
- &RQF_OST_SET_GRANT_INFO :
- &RQF_OBD_SET_INFO);
- if (req == NULL)
- return -ENOMEM;
-
- req_capsule_set_size(&req->rq_pill, &RMF_SETINFO_KEY,
- RCL_CLIENT, keylen);
- if (!KEY_IS(KEY_GRANT_SHRINK))
- req_capsule_set_size(&req->rq_pill, &RMF_SETINFO_VAL,
- RCL_CLIENT, vallen);
- rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_SET_INFO);
- if (rc) {
- ptlrpc_request_free(req);
- return rc;
- }
-
- tmp = req_capsule_client_get(&req->rq_pill, &RMF_SETINFO_KEY);
- memcpy(tmp, key, keylen);
- tmp = req_capsule_client_get(&req->rq_pill, KEY_IS(KEY_GRANT_SHRINK) ?
- &RMF_OST_BODY :
- &RMF_SETINFO_VAL);
- memcpy(tmp, val, vallen);
-
- if (KEY_IS(KEY_GRANT_SHRINK)) {
- struct osc_brw_async_args *aa;
- struct obdo *oa;
-
- CLASSERT(sizeof(*aa) <= sizeof(req->rq_async_args));
- aa = ptlrpc_req_async_args(req);
- OBDO_ALLOC(oa);
- if (!oa) {
- ptlrpc_req_finished(req);
- return -ENOMEM;
- }
- *oa = ((struct ost_body *)val)->oa;
- aa->aa_oa = oa;
- req->rq_interpret_reply = osc_shrink_grant_interpret;
- }
-
- ptlrpc_request_set_replen(req);
- if (!KEY_IS(KEY_GRANT_SHRINK)) {
- LASSERT(set != NULL);
- ptlrpc_set_add_req(set, req);
- ptlrpc_check_set(NULL, set);
- } else {
- ptlrpcd_add_req(req);
- }
-
- return 0;
-}
-
-static int osc_reconnect(const struct lu_env *env,
- struct obd_export *exp, struct obd_device *obd,
- struct obd_uuid *cluuid,
- struct obd_connect_data *data,
- void *localdata)
-{
- struct client_obd *cli = &obd->u.cli;
-
- if (data != NULL && (data->ocd_connect_flags & OBD_CONNECT_GRANT)) {
- long lost_grant;
-
- client_obd_list_lock(&cli->cl_loi_list_lock);
- data->ocd_grant = (cli->cl_avail_grant + cli->cl_dirty) ?:
- 2 * cli_brw_size(obd);
- lost_grant = cli->cl_lost_grant;
- cli->cl_lost_grant = 0;
- client_obd_list_unlock(&cli->cl_loi_list_lock);
-
- CDEBUG(D_RPCTRACE, "ocd_connect_flags: %#llx ocd_version: %d ocd_grant: %d, lost: %ld.\n",
- data->ocd_connect_flags,
- data->ocd_version, data->ocd_grant, lost_grant);
- }
-
- return 0;
-}
-
-static int osc_disconnect(struct obd_export *exp)
-{
- struct obd_device *obd = class_exp2obd(exp);
- int rc;
-
- rc = client_disconnect_export(exp);
- /**
- * Initially we put del_shrink_grant before disconnect_export, but it
- * causes the following problem if setup (connect) and cleanup
- * (disconnect) are tangled together.
- * connect p1 disconnect p2
- * ptlrpc_connect_import
- * ............... class_manual_cleanup
- * osc_disconnect
- * del_shrink_grant
- * ptlrpc_connect_interrupt
- * init_grant_shrink
- * add this client to shrink list
- * cleanup_osc
- * Bang! pinger trigger the shrink.
- * So the osc should be disconnected from the shrink list, after we
- * are sure the import has been destroyed. BUG18662
- */
- if (obd->u.cli.cl_import == NULL)
- osc_del_shrink_grant(&obd->u.cli);
- return rc;
-}
-
-static int osc_import_event(struct obd_device *obd,
- struct obd_import *imp,
- enum obd_import_event event)
-{
- struct client_obd *cli;
- int rc = 0;
-
- LASSERT(imp->imp_obd == obd);
-
- switch (event) {
- case IMP_EVENT_DISCON: {
- cli = &obd->u.cli;
- client_obd_list_lock(&cli->cl_loi_list_lock);
- cli->cl_avail_grant = 0;
- cli->cl_lost_grant = 0;
- client_obd_list_unlock(&cli->cl_loi_list_lock);
- break;
- }
- case IMP_EVENT_INACTIVE: {
- rc = obd_notify_observer(obd, obd, OBD_NOTIFY_INACTIVE, NULL);
- break;
- }
- case IMP_EVENT_INVALIDATE: {
- struct ldlm_namespace *ns = obd->obd_namespace;
- struct lu_env *env;
- int refcheck;
-
- env = cl_env_get(&refcheck);
- if (!IS_ERR(env)) {
- /* Reset grants */
- cli = &obd->u.cli;
- /* all pages go to failing rpcs due to the invalid
- * import */
- osc_io_unplug(env, cli, NULL);
-
- ldlm_namespace_cleanup(ns, LDLM_FL_LOCAL_ONLY);
- cl_env_put(env, &refcheck);
- } else
- rc = PTR_ERR(env);
- break;
- }
- case IMP_EVENT_ACTIVE: {
- rc = obd_notify_observer(obd, obd, OBD_NOTIFY_ACTIVE, NULL);
- break;
- }
- case IMP_EVENT_OCD: {
- struct obd_connect_data *ocd = &imp->imp_connect_data;
-
- if (ocd->ocd_connect_flags & OBD_CONNECT_GRANT)
- osc_init_grant(&obd->u.cli, ocd);
-
- /* See bug 7198 */
- if (ocd->ocd_connect_flags & OBD_CONNECT_REQPORTAL)
- imp->imp_client->cli_request_portal = OST_REQUEST_PORTAL;
-
- rc = obd_notify_observer(obd, obd, OBD_NOTIFY_OCD, NULL);
- break;
- }
- case IMP_EVENT_DEACTIVATE: {
- rc = obd_notify_observer(obd, obd, OBD_NOTIFY_DEACTIVATE, NULL);
- break;
- }
- case IMP_EVENT_ACTIVATE: {
- rc = obd_notify_observer(obd, obd, OBD_NOTIFY_ACTIVATE, NULL);
- break;
- }
- default:
- CERROR("Unknown import event %d\n", event);
- LBUG();
- }
- return rc;
-}
-
-/**
- * Determine whether the lock can be canceled before replaying the lock
- * during recovery, see bug16774 for detailed information.
- *
- * \retval zero the lock can't be canceled
- * \retval other ok to cancel
- */
-static int osc_cancel_for_recovery(struct ldlm_lock *lock)
-{
- check_res_locked(lock->l_resource);
-
- /*
- * Cancel all unused extent lock in granted mode LCK_PR or LCK_CR.
- *
- * XXX as a future improvement, we can also cancel unused write lock
- * if it doesn't have dirty data and active mmaps.
- */
- if (lock->l_resource->lr_type == LDLM_EXTENT &&
- (lock->l_granted_mode == LCK_PR ||
- lock->l_granted_mode == LCK_CR) &&
- (osc_dlm_lock_pageref(lock) == 0))
- return 1;
-
- return 0;
-}
-
-static int brw_queue_work(const struct lu_env *env, void *data)
-{
- struct client_obd *cli = data;
-
- CDEBUG(D_CACHE, "Run writeback work for client obd %p.\n", cli);
-
- osc_io_unplug(env, cli, NULL);
- return 0;
-}
-
-int osc_setup(struct obd_device *obd, struct lustre_cfg *lcfg)
-{
- struct lprocfs_static_vars lvars = { NULL };
- struct client_obd *cli = &obd->u.cli;
- void *handler;
- int rc;
- int adding;
- int added;
- int req_count;
-
- rc = ptlrpcd_addref();
- if (rc)
- return rc;
-
- rc = client_obd_setup(obd, lcfg);
- if (rc)
- goto out_ptlrpcd;
-
- handler = ptlrpcd_alloc_work(cli->cl_import, brw_queue_work, cli);
- if (IS_ERR(handler)) {
- rc = PTR_ERR(handler);
- goto out_client_setup;
- }
- cli->cl_writeback_work = handler;
-
- rc = osc_quota_setup(obd);
- if (rc)
- goto out_ptlrpcd_work;
-
- cli->cl_grant_shrink_interval = GRANT_SHRINK_INTERVAL;
- lprocfs_osc_init_vars(&lvars);
- if (lprocfs_obd_setup(obd, lvars.obd_vars, lvars.sysfs_vars) == 0) {
- lproc_osc_attach_seqstat(obd);
- sptlrpc_lprocfs_cliobd_attach(obd);
- ptlrpc_lprocfs_register_obd(obd);
- }
-
- /*
- * We try to control the total number of requests with a upper limit
- * osc_reqpool_maxreqcount. There might be some race which will cause
- * over-limit allocation, but it is fine.
- */
- req_count = atomic_read(&osc_pool_req_count);
- if (req_count < osc_reqpool_maxreqcount) {
- adding = cli->cl_max_rpcs_in_flight + 2;
- if (req_count + adding > osc_reqpool_maxreqcount)
- adding = osc_reqpool_maxreqcount - req_count;
-
- added = ptlrpc_add_rqs_to_pool(osc_rq_pool, adding);
- atomic_add(added, &osc_pool_req_count);
- }
-
- INIT_LIST_HEAD(&cli->cl_grant_shrink_list);
- ns_register_cancel(obd->obd_namespace, osc_cancel_for_recovery);
- return rc;
-
-out_ptlrpcd_work:
- ptlrpcd_destroy_work(handler);
-out_client_setup:
- client_obd_cleanup(obd);
-out_ptlrpcd:
- ptlrpcd_decref();
- return rc;
-}
-
-static int osc_precleanup(struct obd_device *obd, enum obd_cleanup_stage stage)
-{
- switch (stage) {
- case OBD_CLEANUP_EARLY: {
- struct obd_import *imp;
- imp = obd->u.cli.cl_import;
- CDEBUG(D_HA, "Deactivating import %s\n", obd->obd_name);
- /* ptlrpc_abort_inflight to stop an mds_lov_synchronize */
- ptlrpc_deactivate_import(imp);
- spin_lock(&imp->imp_lock);
- imp->imp_pingable = 0;
- spin_unlock(&imp->imp_lock);
- break;
- }
- case OBD_CLEANUP_EXPORTS: {
- struct client_obd *cli = &obd->u.cli;
- /* LU-464
- * for echo client, export may be on zombie list, wait for
- * zombie thread to cull it, because cli.cl_import will be
- * cleared in client_disconnect_export():
- * class_export_destroy() -> obd_cleanup() ->
- * echo_device_free() -> echo_client_cleanup() ->
- * obd_disconnect() -> osc_disconnect() ->
- * client_disconnect_export()
- */
- obd_zombie_barrier();
- if (cli->cl_writeback_work) {
- ptlrpcd_destroy_work(cli->cl_writeback_work);
- cli->cl_writeback_work = NULL;
- }
- obd_cleanup_client_import(obd);
- ptlrpc_lprocfs_unregister_obd(obd);
- lprocfs_obd_cleanup(obd);
- break;
- }
- }
- return 0;
-}
-
-int osc_cleanup(struct obd_device *obd)
-{
- struct client_obd *cli = &obd->u.cli;
- int rc;
-
- /* lru cleanup */
- if (cli->cl_cache != NULL) {
- LASSERT(atomic_read(&cli->cl_cache->ccc_users) > 0);
- spin_lock(&cli->cl_cache->ccc_lru_lock);
- list_del_init(&cli->cl_lru_osc);
- spin_unlock(&cli->cl_cache->ccc_lru_lock);
- cli->cl_lru_left = NULL;
- atomic_dec(&cli->cl_cache->ccc_users);
- cli->cl_cache = NULL;
- }
-
- /* free memory of osc quota cache */
- osc_quota_cleanup(obd);
-
- rc = client_obd_cleanup(obd);
-
- ptlrpcd_decref();
- return rc;
-}
-
-int osc_process_config_base(struct obd_device *obd, struct lustre_cfg *lcfg)
-{
- struct lprocfs_static_vars lvars = { NULL };
- int rc = 0;
-
- lprocfs_osc_init_vars(&lvars);
-
- switch (lcfg->lcfg_command) {
- default:
- rc = class_process_proc_param(PARAM_OSC, lvars.obd_vars,
- lcfg, obd);
- if (rc > 0)
- rc = 0;
- break;
- }
-
- return rc;
-}
-
-static int osc_process_config(struct obd_device *obd, u32 len, void *buf)
-{
- return osc_process_config_base(obd, buf);
-}
-
-struct obd_ops osc_obd_ops = {
- .o_owner = THIS_MODULE,
- .o_setup = osc_setup,
- .o_precleanup = osc_precleanup,
- .o_cleanup = osc_cleanup,
- .o_add_conn = client_import_add_conn,
- .o_del_conn = client_import_del_conn,
- .o_connect = client_connect_import,
- .o_reconnect = osc_reconnect,
- .o_disconnect = osc_disconnect,
- .o_statfs = osc_statfs,
- .o_statfs_async = osc_statfs_async,
- .o_packmd = osc_packmd,
- .o_unpackmd = osc_unpackmd,
- .o_create = osc_create,
- .o_destroy = osc_destroy,
- .o_getattr = osc_getattr,
- .o_getattr_async = osc_getattr_async,
- .o_setattr = osc_setattr,
- .o_setattr_async = osc_setattr_async,
- .o_find_cbdata = osc_find_cbdata,
- .o_iocontrol = osc_iocontrol,
- .o_get_info = osc_get_info,
- .o_set_info_async = osc_set_info_async,
- .o_import_event = osc_import_event,
- .o_process_config = osc_process_config,
- .o_quotactl = osc_quotactl,
- .o_quotacheck = osc_quotacheck,
-};
-
-extern struct lu_kmem_descr osc_caches[];
-extern spinlock_t osc_ast_guard;
-extern struct lock_class_key osc_ast_guard_class;
-
-static int __init osc_init(void)
-{
- struct lprocfs_static_vars lvars = { NULL };
- unsigned int reqpool_size;
- unsigned int reqsize;
- int rc;
-
- /* print an address of _any_ initialized kernel symbol from this
- * module, to allow debugging with gdb that doesn't support data
- * symbols from modules.*/
- CDEBUG(D_INFO, "Lustre OSC module (%p).\n", &osc_caches);
-
- rc = lu_kmem_init(osc_caches);
- if (rc)
- return rc;
-
- lprocfs_osc_init_vars(&lvars);
-
- rc = class_register_type(&osc_obd_ops, NULL,
- LUSTRE_OSC_NAME, &osc_device_type);
- if (rc)
- goto out_kmem;
-
- spin_lock_init(&osc_ast_guard);
- lockdep_set_class(&osc_ast_guard, &osc_ast_guard_class);
-
- /* This is obviously too much memory, only prevent overflow here */
- if (osc_reqpool_mem_max >= 1 << 12 || osc_reqpool_mem_max == 0) {
- rc = -EINVAL;
- goto out_type;
- }
-
- reqpool_size = osc_reqpool_mem_max << 20;
-
- reqsize = 1;
- while (reqsize < OST_MAXREQSIZE)
- reqsize = reqsize << 1;
-
- /*
- * We don't enlarge the request count in OSC pool according to
- * cl_max_rpcs_in_flight. The allocation from the pool will only be
- * tried after normal allocation failed. So a small OSC pool won't
- * cause much performance degression in most of cases.
- */
- osc_reqpool_maxreqcount = reqpool_size / reqsize;
-
- atomic_set(&osc_pool_req_count, 0);
- osc_rq_pool = ptlrpc_init_rq_pool(0, OST_MAXREQSIZE,
- ptlrpc_add_rqs_to_pool);
-
- if (osc_rq_pool)
- return 0;
-
- rc = -ENOMEM;
-
-out_type:
- class_unregister_type(LUSTRE_OSC_NAME);
-out_kmem:
- lu_kmem_fini(osc_caches);
- return rc;
-}
-
-static void /*__exit*/ osc_exit(void)
-{
- class_unregister_type(LUSTRE_OSC_NAME);
- lu_kmem_fini(osc_caches);
- ptlrpc_free_rq_pool(osc_rq_pool);
-}
-
-MODULE_AUTHOR("Sun Microsystems, Inc. <http://www.lustre.org/>");
-MODULE_DESCRIPTION("Lustre Object Storage Client (OSC)");
-MODULE_LICENSE("GPL");
-MODULE_VERSION(LUSTRE_VERSION_STRING);
-
-module_init(osc_init);
-module_exit(osc_exit);
diff --git a/drivers/staging/lustre/lustre/ptlrpc/Makefile b/drivers/staging/lustre/lustre/ptlrpc/Makefile
deleted file mode 100644
index 24bbac19ddd1..000000000000
--- a/drivers/staging/lustre/lustre/ptlrpc/Makefile
+++ /dev/null
@@ -1,19 +0,0 @@
-obj-$(CONFIG_LUSTRE_FS) += ptlrpc.o
-LDLM := ../../lustre/ldlm/
-
-ldlm_objs := $(LDLM)l_lock.o $(LDLM)ldlm_lock.o
-ldlm_objs += $(LDLM)ldlm_resource.o $(LDLM)ldlm_lib.o
-ldlm_objs += $(LDLM)ldlm_plain.o $(LDLM)ldlm_extent.o
-ldlm_objs += $(LDLM)ldlm_request.o $(LDLM)ldlm_lockd.o
-ldlm_objs += $(LDLM)ldlm_flock.o $(LDLM)ldlm_inodebits.o
-ldlm_objs += $(LDLM)ldlm_pool.o
-ldlm_objs += $(LDLM)interval_tree.o
-ptlrpc_objs := client.o recover.o connection.o niobuf.o pack_generic.o
-ptlrpc_objs += events.o ptlrpc_module.o service.o pinger.o
-ptlrpc_objs += llog_net.o llog_client.o import.o ptlrpcd.o
-ptlrpc_objs += pers.o lproc_ptlrpc.o wiretest.o layout.o
-ptlrpc_objs += sec.o sec_bulk.o sec_gc.o sec_config.o
-ptlrpc_objs += sec_null.o sec_plain.o nrs.o nrs_fifo.o
-
-ptlrpc-y := $(ldlm_objs) $(ptlrpc_objs) sec_lproc.o
-ptlrpc-$(CONFIG_LUSTRE_TRANSLATE_ERRNOS) += errno.o
diff --git a/drivers/staging/lustre/lustre/ptlrpc/client.c b/drivers/staging/lustre/lustre/ptlrpc/client.c
deleted file mode 100644
index 6aaa5dd55a92..000000000000
--- a/drivers/staging/lustre/lustre/ptlrpc/client.c
+++ /dev/null
@@ -1,3051 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- */
-
-/** Implementation of client-side PortalRPC interfaces */
-
-#define DEBUG_SUBSYSTEM S_RPC
-
-#include "../include/obd_support.h"
-#include "../include/obd_class.h"
-#include "../include/lustre_lib.h"
-#include "../include/lustre_ha.h"
-#include "../include/lustre_import.h"
-#include "../include/lustre_req_layout.h"
-
-#include "ptlrpc_internal.h"
-
-static int ptlrpc_send_new_req(struct ptlrpc_request *req);
-static int ptlrpcd_check_work(struct ptlrpc_request *req);
-
-/**
- * Initialize passed in client structure \a cl.
- */
-void ptlrpc_init_client(int req_portal, int rep_portal, char *name,
- struct ptlrpc_client *cl)
-{
- cl->cli_request_portal = req_portal;
- cl->cli_reply_portal = rep_portal;
- cl->cli_name = name;
-}
-EXPORT_SYMBOL(ptlrpc_init_client);
-
-/**
- * Return PortalRPC connection for remote uud \a uuid
- */
-struct ptlrpc_connection *ptlrpc_uuid_to_connection(struct obd_uuid *uuid)
-{
- struct ptlrpc_connection *c;
- lnet_nid_t self;
- lnet_process_id_t peer;
- int err;
-
- /* ptlrpc_uuid_to_peer() initializes its 2nd parameter
- * before accessing its values. */
- /* coverity[uninit_use_in_call] */
- err = ptlrpc_uuid_to_peer(uuid, &peer, &self);
- if (err != 0) {
- CNETERR("cannot find peer %s!\n", uuid->uuid);
- return NULL;
- }
-
- c = ptlrpc_connection_get(peer, self, uuid);
- if (c) {
- memcpy(c->c_remote_uuid.uuid,
- uuid->uuid, sizeof(c->c_remote_uuid.uuid));
- }
-
- CDEBUG(D_INFO, "%s -> %p\n", uuid->uuid, c);
-
- return c;
-}
-EXPORT_SYMBOL(ptlrpc_uuid_to_connection);
-
-/**
- * Allocate and initialize new bulk descriptor on the sender.
- * Returns pointer to the descriptor or NULL on error.
- */
-struct ptlrpc_bulk_desc *ptlrpc_new_bulk(unsigned npages, unsigned max_brw,
- unsigned type, unsigned portal)
-{
- struct ptlrpc_bulk_desc *desc;
- int i;
-
- desc = kzalloc(offsetof(struct ptlrpc_bulk_desc, bd_iov[npages]),
- GFP_NOFS);
- if (!desc)
- return NULL;
-
- spin_lock_init(&desc->bd_lock);
- init_waitqueue_head(&desc->bd_waitq);
- desc->bd_max_iov = npages;
- desc->bd_iov_count = 0;
- desc->bd_portal = portal;
- desc->bd_type = type;
- desc->bd_md_count = 0;
- LASSERT(max_brw > 0);
- desc->bd_md_max_brw = min(max_brw, PTLRPC_BULK_OPS_COUNT);
- /* PTLRPC_BULK_OPS_COUNT is the compile-time transfer limit for this
- * node. Negotiated ocd_brw_size will always be <= this number. */
- for (i = 0; i < PTLRPC_BULK_OPS_COUNT; i++)
- LNetInvalidateHandle(&desc->bd_mds[i]);
-
- return desc;
-}
-
-/**
- * Prepare bulk descriptor for specified outgoing request \a req that
- * can fit \a npages * pages. \a type is bulk type. \a portal is where
- * the bulk to be sent. Used on client-side.
- * Returns pointer to newly allocated initialized bulk descriptor or NULL on
- * error.
- */
-struct ptlrpc_bulk_desc *ptlrpc_prep_bulk_imp(struct ptlrpc_request *req,
- unsigned npages, unsigned max_brw,
- unsigned type, unsigned portal)
-{
- struct obd_import *imp = req->rq_import;
- struct ptlrpc_bulk_desc *desc;
-
- LASSERT(type == BULK_PUT_SINK || type == BULK_GET_SOURCE);
- desc = ptlrpc_new_bulk(npages, max_brw, type, portal);
- if (desc == NULL)
- return NULL;
-
- desc->bd_import_generation = req->rq_import_generation;
- desc->bd_import = class_import_get(imp);
- desc->bd_req = req;
-
- desc->bd_cbid.cbid_fn = client_bulk_callback;
- desc->bd_cbid.cbid_arg = desc;
-
- /* This makes req own desc, and free it when she frees herself */
- req->rq_bulk = desc;
-
- return desc;
-}
-EXPORT_SYMBOL(ptlrpc_prep_bulk_imp);
-
-/**
- * Add a page \a page to the bulk descriptor \a desc.
- * Data to transfer in the page starts at offset \a pageoffset and
- * amount of data to transfer from the page is \a len
- */
-void __ptlrpc_prep_bulk_page(struct ptlrpc_bulk_desc *desc,
- struct page *page, int pageoffset, int len, int pin)
-{
- LASSERT(desc->bd_iov_count < desc->bd_max_iov);
- LASSERT(page != NULL);
- LASSERT(pageoffset >= 0);
- LASSERT(len > 0);
- LASSERT(pageoffset + len <= PAGE_CACHE_SIZE);
-
- desc->bd_nob += len;
-
- if (pin)
- page_cache_get(page);
-
- ptlrpc_add_bulk_page(desc, page, pageoffset, len);
-}
-EXPORT_SYMBOL(__ptlrpc_prep_bulk_page);
-
-/**
- * Uninitialize and free bulk descriptor \a desc.
- * Works on bulk descriptors both from server and client side.
- */
-void __ptlrpc_free_bulk(struct ptlrpc_bulk_desc *desc, int unpin)
-{
- int i;
-
- LASSERT(desc != NULL);
- LASSERT(desc->bd_iov_count != LI_POISON); /* not freed already */
- LASSERT(desc->bd_md_count == 0); /* network hands off */
- LASSERT((desc->bd_export != NULL) ^ (desc->bd_import != NULL));
-
- sptlrpc_enc_pool_put_pages(desc);
-
- if (desc->bd_export)
- class_export_put(desc->bd_export);
- else
- class_import_put(desc->bd_import);
-
- if (unpin) {
- for (i = 0; i < desc->bd_iov_count; i++)
- page_cache_release(desc->bd_iov[i].kiov_page);
- }
-
- kfree(desc);
-}
-EXPORT_SYMBOL(__ptlrpc_free_bulk);
-
-/**
- * Set server timelimit for this req, i.e. how long are we willing to wait
- * for reply before timing out this request.
- */
-void ptlrpc_at_set_req_timeout(struct ptlrpc_request *req)
-{
- __u32 serv_est;
- int idx;
- struct imp_at *at;
-
- LASSERT(req->rq_import);
-
- if (AT_OFF) {
- /* non-AT settings */
- /**
- * \a imp_server_timeout means this is reverse import and
- * we send (currently only) ASTs to the client and cannot afford
- * to wait too long for the reply, otherwise the other client
- * (because of which we are sending this request) would
- * timeout waiting for us
- */
- req->rq_timeout = req->rq_import->imp_server_timeout ?
- obd_timeout / 2 : obd_timeout;
- } else {
- at = &req->rq_import->imp_at;
- idx = import_at_get_index(req->rq_import,
- req->rq_request_portal);
- serv_est = at_get(&at->iat_service_estimate[idx]);
- req->rq_timeout = at_est2timeout(serv_est);
- }
- /* We could get even fancier here, using history to predict increased
- loading... */
-
- /* Let the server know what this RPC timeout is by putting it in the
- reqmsg*/
- lustre_msg_set_timeout(req->rq_reqmsg, req->rq_timeout);
-}
-EXPORT_SYMBOL(ptlrpc_at_set_req_timeout);
-
-/* Adjust max service estimate based on server value */
-static void ptlrpc_at_adj_service(struct ptlrpc_request *req,
- unsigned int serv_est)
-{
- int idx;
- unsigned int oldse;
- struct imp_at *at;
-
- LASSERT(req->rq_import);
- at = &req->rq_import->imp_at;
-
- idx = import_at_get_index(req->rq_import, req->rq_request_portal);
- /* max service estimates are tracked on the server side,
- so just keep minimal history here */
- oldse = at_measured(&at->iat_service_estimate[idx], serv_est);
- if (oldse != 0)
- CDEBUG(D_ADAPTTO, "The RPC service estimate for %s ptl %d has changed from %d to %d\n",
- req->rq_import->imp_obd->obd_name, req->rq_request_portal,
- oldse, at_get(&at->iat_service_estimate[idx]));
-}
-
-/* Expected network latency per remote node (secs) */
-int ptlrpc_at_get_net_latency(struct ptlrpc_request *req)
-{
- return AT_OFF ? 0 : at_get(&req->rq_import->imp_at.iat_net_latency);
-}
-
-/* Adjust expected network latency */
-static void ptlrpc_at_adj_net_latency(struct ptlrpc_request *req,
- unsigned int service_time)
-{
- unsigned int nl, oldnl;
- struct imp_at *at;
- time64_t now = ktime_get_real_seconds();
-
- LASSERT(req->rq_import);
-
- if (service_time > now - req->rq_sent + 3) {
- /* bz16408, however, this can also happen if early reply
- * is lost and client RPC is expired and resent, early reply
- * or reply of original RPC can still be fit in reply buffer
- * of resent RPC, now client is measuring time from the
- * resent time, but server sent back service time of original
- * RPC.
- */
- CDEBUG((lustre_msg_get_flags(req->rq_reqmsg) & MSG_RESENT) ?
- D_ADAPTTO : D_WARNING,
- "Reported service time %u > total measured time "
- CFS_DURATION_T"\n", service_time,
- (long)(now - req->rq_sent));
- return;
- }
-
- /* Network latency is total time less server processing time */
- nl = max_t(int, now - req->rq_sent -
- service_time, 0) + 1; /* st rounding */
- at = &req->rq_import->imp_at;
-
- oldnl = at_measured(&at->iat_net_latency, nl);
- if (oldnl != 0)
- CDEBUG(D_ADAPTTO, "The network latency for %s (nid %s) has changed from %d to %d\n",
- req->rq_import->imp_obd->obd_name,
- obd_uuid2str(
- &req->rq_import->imp_connection->c_remote_uuid),
- oldnl, at_get(&at->iat_net_latency));
-}
-
-static int unpack_reply(struct ptlrpc_request *req)
-{
- int rc;
-
- if (SPTLRPC_FLVR_POLICY(req->rq_flvr.sf_rpc) != SPTLRPC_POLICY_NULL) {
- rc = ptlrpc_unpack_rep_msg(req, req->rq_replen);
- if (rc) {
- DEBUG_REQ(D_ERROR, req, "unpack_rep failed: %d", rc);
- return -EPROTO;
- }
- }
-
- rc = lustre_unpack_rep_ptlrpc_body(req, MSG_PTLRPC_BODY_OFF);
- if (rc) {
- DEBUG_REQ(D_ERROR, req, "unpack ptlrpc body failed: %d", rc);
- return -EPROTO;
- }
- return 0;
-}
-
-/**
- * Handle an early reply message, called with the rq_lock held.
- * If anything goes wrong just ignore it - same as if it never happened
- */
-static int ptlrpc_at_recv_early_reply(struct ptlrpc_request *req)
-{
- struct ptlrpc_request *early_req;
- time64_t olddl;
- int rc;
-
- req->rq_early = 0;
- spin_unlock(&req->rq_lock);
-
- rc = sptlrpc_cli_unwrap_early_reply(req, &early_req);
- if (rc) {
- spin_lock(&req->rq_lock);
- return rc;
- }
-
- rc = unpack_reply(early_req);
- if (rc == 0) {
- /* Expecting to increase the service time estimate here */
- ptlrpc_at_adj_service(req,
- lustre_msg_get_timeout(early_req->rq_repmsg));
- ptlrpc_at_adj_net_latency(req,
- lustre_msg_get_service_time(early_req->rq_repmsg));
- }
-
- sptlrpc_cli_finish_early_reply(early_req);
-
- if (rc != 0) {
- spin_lock(&req->rq_lock);
- return rc;
- }
-
- /* Adjust the local timeout for this req */
- ptlrpc_at_set_req_timeout(req);
-
- spin_lock(&req->rq_lock);
- olddl = req->rq_deadline;
- /* server assumes it now has rq_timeout from when it sent the
- * early reply, so client should give it at least that long. */
- req->rq_deadline = ktime_get_real_seconds() + req->rq_timeout +
- ptlrpc_at_get_net_latency(req);
-
- DEBUG_REQ(D_ADAPTTO, req,
- "Early reply #%d, new deadline in %lds (%lds)",
- req->rq_early_count,
- (long)(req->rq_deadline - ktime_get_real_seconds()),
- (long)(req->rq_deadline - olddl));
-
- return rc;
-}
-
-static struct kmem_cache *request_cache;
-
-int ptlrpc_request_cache_init(void)
-{
- request_cache = kmem_cache_create("ptlrpc_cache",
- sizeof(struct ptlrpc_request),
- 0, SLAB_HWCACHE_ALIGN, NULL);
- return request_cache == NULL ? -ENOMEM : 0;
-}
-
-void ptlrpc_request_cache_fini(void)
-{
- kmem_cache_destroy(request_cache);
-}
-
-struct ptlrpc_request *ptlrpc_request_cache_alloc(gfp_t flags)
-{
- struct ptlrpc_request *req;
-
- OBD_SLAB_ALLOC_PTR_GFP(req, request_cache, flags);
- return req;
-}
-
-void ptlrpc_request_cache_free(struct ptlrpc_request *req)
-{
- OBD_SLAB_FREE_PTR(req, request_cache);
-}
-
-/**
- * Wind down request pool \a pool.
- * Frees all requests from the pool too
- */
-void ptlrpc_free_rq_pool(struct ptlrpc_request_pool *pool)
-{
- struct list_head *l, *tmp;
- struct ptlrpc_request *req;
-
- LASSERT(pool != NULL);
-
- spin_lock(&pool->prp_lock);
- list_for_each_safe(l, tmp, &pool->prp_req_list) {
- req = list_entry(l, struct ptlrpc_request, rq_list);
- list_del(&req->rq_list);
- LASSERT(req->rq_reqbuf);
- LASSERT(req->rq_reqbuf_len == pool->prp_rq_size);
- kvfree(req->rq_reqbuf);
- ptlrpc_request_cache_free(req);
- }
- spin_unlock(&pool->prp_lock);
- kfree(pool);
-}
-EXPORT_SYMBOL(ptlrpc_free_rq_pool);
-
-/**
- * Allocates, initializes and adds \a num_rq requests to the pool \a pool
- */
-int ptlrpc_add_rqs_to_pool(struct ptlrpc_request_pool *pool, int num_rq)
-{
- int i;
- int size = 1;
-
- while (size < pool->prp_rq_size)
- size <<= 1;
-
- LASSERTF(list_empty(&pool->prp_req_list) ||
- size == pool->prp_rq_size,
- "Trying to change pool size with nonempty pool from %d to %d bytes\n",
- pool->prp_rq_size, size);
-
- spin_lock(&pool->prp_lock);
- pool->prp_rq_size = size;
- for (i = 0; i < num_rq; i++) {
- struct ptlrpc_request *req;
- struct lustre_msg *msg;
-
- spin_unlock(&pool->prp_lock);
- req = ptlrpc_request_cache_alloc(GFP_NOFS);
- if (!req)
- return i;
- msg = libcfs_kvzalloc(size, GFP_NOFS);
- if (!msg) {
- ptlrpc_request_cache_free(req);
- return i;
- }
- req->rq_reqbuf = msg;
- req->rq_reqbuf_len = size;
- req->rq_pool = pool;
- spin_lock(&pool->prp_lock);
- list_add_tail(&req->rq_list, &pool->prp_req_list);
- }
- spin_unlock(&pool->prp_lock);
- return num_rq;
-}
-EXPORT_SYMBOL(ptlrpc_add_rqs_to_pool);
-
-/**
- * Create and initialize new request pool with given attributes:
- * \a num_rq - initial number of requests to create for the pool
- * \a msgsize - maximum message size possible for requests in thid pool
- * \a populate_pool - function to be called when more requests need to be added
- * to the pool
- * Returns pointer to newly created pool or NULL on error.
- */
-struct ptlrpc_request_pool *
-ptlrpc_init_rq_pool(int num_rq, int msgsize,
- int (*populate_pool)(struct ptlrpc_request_pool *, int))
-{
- struct ptlrpc_request_pool *pool;
-
- pool = kzalloc(sizeof(struct ptlrpc_request_pool), GFP_NOFS);
- if (!pool)
- return NULL;
-
- /* Request next power of two for the allocation, because internally
- kernel would do exactly this */
-
- spin_lock_init(&pool->prp_lock);
- INIT_LIST_HEAD(&pool->prp_req_list);
- pool->prp_rq_size = msgsize + SPTLRPC_MAX_PAYLOAD;
- pool->prp_populate = populate_pool;
-
- populate_pool(pool, num_rq);
-
- return pool;
-}
-EXPORT_SYMBOL(ptlrpc_init_rq_pool);
-
-/**
- * Fetches one request from pool \a pool
- */
-static struct ptlrpc_request *
-ptlrpc_prep_req_from_pool(struct ptlrpc_request_pool *pool)
-{
- struct ptlrpc_request *request;
- struct lustre_msg *reqbuf;
-
- if (!pool)
- return NULL;
-
- spin_lock(&pool->prp_lock);
-
- /* See if we have anything in a pool, and bail out if nothing,
- * in writeout path, where this matters, this is safe to do, because
- * nothing is lost in this case, and when some in-flight requests
- * complete, this code will be called again. */
- if (unlikely(list_empty(&pool->prp_req_list))) {
- spin_unlock(&pool->prp_lock);
- return NULL;
- }
-
- request = list_entry(pool->prp_req_list.next, struct ptlrpc_request,
- rq_list);
- list_del_init(&request->rq_list);
- spin_unlock(&pool->prp_lock);
-
- LASSERT(request->rq_reqbuf);
- LASSERT(request->rq_pool);
-
- reqbuf = request->rq_reqbuf;
- memset(request, 0, sizeof(*request));
- request->rq_reqbuf = reqbuf;
- request->rq_reqbuf_len = pool->prp_rq_size;
- request->rq_pool = pool;
-
- return request;
-}
-
-/**
- * Returns freed \a request to pool.
- */
-static void __ptlrpc_free_req_to_pool(struct ptlrpc_request *request)
-{
- struct ptlrpc_request_pool *pool = request->rq_pool;
-
- spin_lock(&pool->prp_lock);
- LASSERT(list_empty(&request->rq_list));
- LASSERT(!request->rq_receiving_reply);
- list_add_tail(&request->rq_list, &pool->prp_req_list);
- spin_unlock(&pool->prp_lock);
-}
-
-static int __ptlrpc_request_bufs_pack(struct ptlrpc_request *request,
- __u32 version, int opcode,
- int count, __u32 *lengths, char **bufs,
- struct ptlrpc_cli_ctx *ctx)
-{
- struct obd_import *imp = request->rq_import;
- int rc;
-
- if (unlikely(ctx))
- request->rq_cli_ctx = sptlrpc_cli_ctx_get(ctx);
- else {
- rc = sptlrpc_req_get_ctx(request);
- if (rc)
- goto out_free;
- }
-
- sptlrpc_req_set_flavor(request, opcode);
-
- rc = lustre_pack_request(request, imp->imp_msg_magic, count,
- lengths, bufs);
- if (rc) {
- LASSERT(!request->rq_pool);
- goto out_ctx;
- }
-
- lustre_msg_add_version(request->rq_reqmsg, version);
- request->rq_send_state = LUSTRE_IMP_FULL;
- request->rq_type = PTL_RPC_MSG_REQUEST;
- request->rq_export = NULL;
-
- request->rq_req_cbid.cbid_fn = request_out_callback;
- request->rq_req_cbid.cbid_arg = request;
-
- request->rq_reply_cbid.cbid_fn = reply_in_callback;
- request->rq_reply_cbid.cbid_arg = request;
-
- request->rq_reply_deadline = 0;
- request->rq_phase = RQ_PHASE_NEW;
- request->rq_next_phase = RQ_PHASE_UNDEFINED;
-
- request->rq_request_portal = imp->imp_client->cli_request_portal;
- request->rq_reply_portal = imp->imp_client->cli_reply_portal;
-
- ptlrpc_at_set_req_timeout(request);
-
- spin_lock_init(&request->rq_lock);
- INIT_LIST_HEAD(&request->rq_list);
- INIT_LIST_HEAD(&request->rq_timed_list);
- INIT_LIST_HEAD(&request->rq_replay_list);
- INIT_LIST_HEAD(&request->rq_ctx_chain);
- INIT_LIST_HEAD(&request->rq_set_chain);
- INIT_LIST_HEAD(&request->rq_history_list);
- INIT_LIST_HEAD(&request->rq_exp_list);
- init_waitqueue_head(&request->rq_reply_waitq);
- init_waitqueue_head(&request->rq_set_waitq);
- request->rq_xid = ptlrpc_next_xid();
- atomic_set(&request->rq_refcount, 1);
-
- lustre_msg_set_opc(request->rq_reqmsg, opcode);
-
- return 0;
-out_ctx:
- sptlrpc_cli_ctx_put(request->rq_cli_ctx, 1);
-out_free:
- class_import_put(imp);
- return rc;
-}
-
-int ptlrpc_request_bufs_pack(struct ptlrpc_request *request,
- __u32 version, int opcode, char **bufs,
- struct ptlrpc_cli_ctx *ctx)
-{
- int count;
-
- count = req_capsule_filled_sizes(&request->rq_pill, RCL_CLIENT);
- return __ptlrpc_request_bufs_pack(request, version, opcode, count,
- request->rq_pill.rc_area[RCL_CLIENT],
- bufs, ctx);
-}
-EXPORT_SYMBOL(ptlrpc_request_bufs_pack);
-
-/**
- * Pack request buffers for network transfer, performing necessary encryption
- * steps if necessary.
- */
-int ptlrpc_request_pack(struct ptlrpc_request *request,
- __u32 version, int opcode)
-{
- int rc;
- rc = ptlrpc_request_bufs_pack(request, version, opcode, NULL, NULL);
- if (rc)
- return rc;
-
- /* For some old 1.8 clients (< 1.8.7), they will LASSERT the size of
- * ptlrpc_body sent from server equal to local ptlrpc_body size, so we
- * have to send old ptlrpc_body to keep interoperability with these
- * clients.
- *
- * Only three kinds of server->client RPCs so far:
- * - LDLM_BL_CALLBACK
- * - LDLM_CP_CALLBACK
- * - LDLM_GL_CALLBACK
- *
- * XXX This should be removed whenever we drop the interoperability with
- * the these old clients.
- */
- if (opcode == LDLM_BL_CALLBACK || opcode == LDLM_CP_CALLBACK ||
- opcode == LDLM_GL_CALLBACK)
- req_capsule_shrink(&request->rq_pill, &RMF_PTLRPC_BODY,
- sizeof(struct ptlrpc_body_v2), RCL_CLIENT);
-
- return rc;
-}
-EXPORT_SYMBOL(ptlrpc_request_pack);
-
-/**
- * Helper function to allocate new request on import \a imp
- * and possibly using existing request from pool \a pool if provided.
- * Returns allocated request structure with import field filled or
- * NULL on error.
- */
-static inline
-struct ptlrpc_request *__ptlrpc_request_alloc(struct obd_import *imp,
- struct ptlrpc_request_pool *pool)
-{
- struct ptlrpc_request *request = NULL;
-
- request = ptlrpc_request_cache_alloc(GFP_NOFS);
-
- if (!request && pool)
- request = ptlrpc_prep_req_from_pool(pool);
-
- if (request) {
- LASSERTF((unsigned long)imp > 0x1000, "%p", imp);
- LASSERT(imp != LP_POISON);
- LASSERTF((unsigned long)imp->imp_client > 0x1000, "%p",
- imp->imp_client);
- LASSERT(imp->imp_client != LP_POISON);
-
- request->rq_import = class_import_get(imp);
- } else {
- CERROR("request allocation out of memory\n");
- }
-
- return request;
-}
-
-/**
- * Helper function for creating a request.
- * Calls __ptlrpc_request_alloc to allocate new request structure and inits
- * buffer structures according to capsule template \a format.
- * Returns allocated request structure pointer or NULL on error.
- */
-static struct ptlrpc_request *
-ptlrpc_request_alloc_internal(struct obd_import *imp,
- struct ptlrpc_request_pool *pool,
- const struct req_format *format)
-{
- struct ptlrpc_request *request;
-
- request = __ptlrpc_request_alloc(imp, pool);
- if (request == NULL)
- return NULL;
-
- req_capsule_init(&request->rq_pill, request, RCL_CLIENT);
- req_capsule_set(&request->rq_pill, format);
- return request;
-}
-
-/**
- * Allocate new request structure for import \a imp and initialize its
- * buffer structure according to capsule template \a format.
- */
-struct ptlrpc_request *ptlrpc_request_alloc(struct obd_import *imp,
- const struct req_format *format)
-{
- return ptlrpc_request_alloc_internal(imp, NULL, format);
-}
-EXPORT_SYMBOL(ptlrpc_request_alloc);
-
-/**
- * Allocate new request structure for import \a imp from pool \a pool and
- * initialize its buffer structure according to capsule template \a format.
- */
-struct ptlrpc_request *ptlrpc_request_alloc_pool(struct obd_import *imp,
- struct ptlrpc_request_pool *pool,
- const struct req_format *format)
-{
- return ptlrpc_request_alloc_internal(imp, pool, format);
-}
-EXPORT_SYMBOL(ptlrpc_request_alloc_pool);
-
-/**
- * For requests not from pool, free memory of the request structure.
- * For requests obtained from a pool earlier, return request back to pool.
- */
-void ptlrpc_request_free(struct ptlrpc_request *request)
-{
- if (request->rq_pool)
- __ptlrpc_free_req_to_pool(request);
- else
- ptlrpc_request_cache_free(request);
-}
-EXPORT_SYMBOL(ptlrpc_request_free);
-
-/**
- * Allocate new request for operation \a opcode and immediately pack it for
- * network transfer.
- * Only used for simple requests like OBD_PING where the only important
- * part of the request is operation itself.
- * Returns allocated request or NULL on error.
- */
-struct ptlrpc_request *ptlrpc_request_alloc_pack(struct obd_import *imp,
- const struct req_format *format,
- __u32 version, int opcode)
-{
- struct ptlrpc_request *req = ptlrpc_request_alloc(imp, format);
- int rc;
-
- if (req) {
- rc = ptlrpc_request_pack(req, version, opcode);
- if (rc) {
- ptlrpc_request_free(req);
- req = NULL;
- }
- }
- return req;
-}
-EXPORT_SYMBOL(ptlrpc_request_alloc_pack);
-
-/**
- * Allocate and initialize new request set structure on the current CPT.
- * Returns a pointer to the newly allocated set structure or NULL on error.
- */
-struct ptlrpc_request_set *ptlrpc_prep_set(void)
-{
- struct ptlrpc_request_set *set;
- int cpt;
-
- cpt = cfs_cpt_current(cfs_cpt_table, 0);
- set = kzalloc_node(sizeof(*set), GFP_NOFS,
- cfs_cpt_spread_node(cfs_cpt_table, cpt));
- if (!set)
- return NULL;
- atomic_set(&set->set_refcount, 1);
- INIT_LIST_HEAD(&set->set_requests);
- init_waitqueue_head(&set->set_waitq);
- atomic_set(&set->set_new_count, 0);
- atomic_set(&set->set_remaining, 0);
- spin_lock_init(&set->set_new_req_lock);
- INIT_LIST_HEAD(&set->set_new_requests);
- INIT_LIST_HEAD(&set->set_cblist);
- set->set_max_inflight = UINT_MAX;
- set->set_producer = NULL;
- set->set_producer_arg = NULL;
- set->set_rc = 0;
-
- return set;
-}
-EXPORT_SYMBOL(ptlrpc_prep_set);
-
-/**
- * Allocate and initialize new request set structure with flow control
- * extension. This extension allows to control the number of requests in-flight
- * for the whole set. A callback function to generate requests must be provided
- * and the request set will keep the number of requests sent over the wire to
- * @max_inflight.
- * Returns a pointer to the newly allocated set structure or NULL on error.
- */
-struct ptlrpc_request_set *ptlrpc_prep_fcset(int max, set_producer_func func,
- void *arg)
-
-{
- struct ptlrpc_request_set *set;
-
- set = ptlrpc_prep_set();
- if (!set)
- return NULL;
-
- set->set_max_inflight = max;
- set->set_producer = func;
- set->set_producer_arg = arg;
-
- return set;
-}
-EXPORT_SYMBOL(ptlrpc_prep_fcset);
-
-/**
- * Wind down and free request set structure previously allocated with
- * ptlrpc_prep_set.
- * Ensures that all requests on the set have completed and removes
- * all requests from the request list in a set.
- * If any unsent request happen to be on the list, pretends that they got
- * an error in flight and calls their completion handler.
- */
-void ptlrpc_set_destroy(struct ptlrpc_request_set *set)
-{
- struct list_head *tmp;
- struct list_head *next;
- int expected_phase;
- int n = 0;
-
- /* Requests on the set should either all be completed, or all be new */
- expected_phase = (atomic_read(&set->set_remaining) == 0) ?
- RQ_PHASE_COMPLETE : RQ_PHASE_NEW;
- list_for_each(tmp, &set->set_requests) {
- struct ptlrpc_request *req =
- list_entry(tmp, struct ptlrpc_request,
- rq_set_chain);
-
- LASSERT(req->rq_phase == expected_phase);
- n++;
- }
-
- LASSERTF(atomic_read(&set->set_remaining) == 0 ||
- atomic_read(&set->set_remaining) == n, "%d / %d\n",
- atomic_read(&set->set_remaining), n);
-
- list_for_each_safe(tmp, next, &set->set_requests) {
- struct ptlrpc_request *req =
- list_entry(tmp, struct ptlrpc_request,
- rq_set_chain);
- list_del_init(&req->rq_set_chain);
-
- LASSERT(req->rq_phase == expected_phase);
-
- if (req->rq_phase == RQ_PHASE_NEW) {
- ptlrpc_req_interpret(NULL, req, -EBADR);
- atomic_dec(&set->set_remaining);
- }
-
- spin_lock(&req->rq_lock);
- req->rq_set = NULL;
- req->rq_invalid_rqset = 0;
- spin_unlock(&req->rq_lock);
-
- ptlrpc_req_finished(req);
- }
-
- LASSERT(atomic_read(&set->set_remaining) == 0);
-
- ptlrpc_reqset_put(set);
-}
-EXPORT_SYMBOL(ptlrpc_set_destroy);
-
-/**
- * Add a new request to the general purpose request set.
- * Assumes request reference from the caller.
- */
-void ptlrpc_set_add_req(struct ptlrpc_request_set *set,
- struct ptlrpc_request *req)
-{
- LASSERT(list_empty(&req->rq_set_chain));
-
- /* The set takes over the caller's request reference */
- list_add_tail(&req->rq_set_chain, &set->set_requests);
- req->rq_set = set;
- atomic_inc(&set->set_remaining);
- req->rq_queued_time = cfs_time_current();
-
- if (req->rq_reqmsg != NULL)
- lustre_msg_set_jobid(req->rq_reqmsg, NULL);
-
- if (set->set_producer != NULL)
- /* If the request set has a producer callback, the RPC must be
- * sent straight away */
- ptlrpc_send_new_req(req);
-}
-EXPORT_SYMBOL(ptlrpc_set_add_req);
-
-/**
- * Add a request to a request with dedicated server thread
- * and wake the thread to make any necessary processing.
- * Currently only used for ptlrpcd.
- */
-void ptlrpc_set_add_new_req(struct ptlrpcd_ctl *pc,
- struct ptlrpc_request *req)
-{
- struct ptlrpc_request_set *set = pc->pc_set;
- int count, i;
-
- LASSERT(req->rq_set == NULL);
- LASSERT(test_bit(LIOD_STOP, &pc->pc_flags) == 0);
-
- spin_lock(&set->set_new_req_lock);
- /*
- * The set takes over the caller's request reference.
- */
- req->rq_set = set;
- req->rq_queued_time = cfs_time_current();
- list_add_tail(&req->rq_set_chain, &set->set_new_requests);
- count = atomic_inc_return(&set->set_new_count);
- spin_unlock(&set->set_new_req_lock);
-
- /* Only need to call wakeup once for the first entry. */
- if (count == 1) {
- wake_up(&set->set_waitq);
-
- /* XXX: It maybe unnecessary to wakeup all the partners. But to
- * guarantee the async RPC can be processed ASAP, we have
- * no other better choice. It maybe fixed in future. */
- for (i = 0; i < pc->pc_npartners; i++)
- wake_up(&pc->pc_partners[i]->pc_set->set_waitq);
- }
-}
-EXPORT_SYMBOL(ptlrpc_set_add_new_req);
-
-/**
- * Based on the current state of the import, determine if the request
- * can be sent, is an error, or should be delayed.
- *
- * Returns true if this request should be delayed. If false, and
- * *status is set, then the request can not be sent and *status is the
- * error code. If false and status is 0, then request can be sent.
- *
- * The imp->imp_lock must be held.
- */
-static int ptlrpc_import_delay_req(struct obd_import *imp,
- struct ptlrpc_request *req, int *status)
-{
- int delay = 0;
-
- LASSERT(status != NULL);
- *status = 0;
-
- if (req->rq_ctx_init || req->rq_ctx_fini) {
- /* always allow ctx init/fini rpc go through */
- } else if (imp->imp_state == LUSTRE_IMP_NEW) {
- DEBUG_REQ(D_ERROR, req, "Uninitialized import.");
- *status = -EIO;
- } else if (imp->imp_state == LUSTRE_IMP_CLOSED) {
- /* pings may safely race with umount */
- DEBUG_REQ(lustre_msg_get_opc(req->rq_reqmsg) == OBD_PING ?
- D_HA : D_ERROR, req, "IMP_CLOSED ");
- *status = -EIO;
- } else if (ptlrpc_send_limit_expired(req)) {
- /* probably doesn't need to be a D_ERROR after initial testing */
- DEBUG_REQ(D_ERROR, req, "send limit expired ");
- *status = -EIO;
- } else if (req->rq_send_state == LUSTRE_IMP_CONNECTING &&
- imp->imp_state == LUSTRE_IMP_CONNECTING) {
- /* allow CONNECT even if import is invalid */
- if (atomic_read(&imp->imp_inval_count) != 0) {
- DEBUG_REQ(D_ERROR, req, "invalidate in flight");
- *status = -EIO;
- }
- } else if (imp->imp_invalid || imp->imp_obd->obd_no_recov) {
- if (!imp->imp_deactive)
- DEBUG_REQ(D_NET, req, "IMP_INVALID");
- *status = -ESHUTDOWN; /* bz 12940 */
- } else if (req->rq_import_generation != imp->imp_generation) {
- DEBUG_REQ(D_ERROR, req, "req wrong generation:");
- *status = -EIO;
- } else if (req->rq_send_state != imp->imp_state) {
- /* invalidate in progress - any requests should be drop */
- if (atomic_read(&imp->imp_inval_count) != 0) {
- DEBUG_REQ(D_ERROR, req, "invalidate in flight");
- *status = -EIO;
- } else if (imp->imp_dlm_fake || req->rq_no_delay) {
- *status = -EWOULDBLOCK;
- } else if (req->rq_allow_replay &&
- (imp->imp_state == LUSTRE_IMP_REPLAY ||
- imp->imp_state == LUSTRE_IMP_REPLAY_LOCKS ||
- imp->imp_state == LUSTRE_IMP_REPLAY_WAIT ||
- imp->imp_state == LUSTRE_IMP_RECOVER)) {
- DEBUG_REQ(D_HA, req, "allow during recovery.\n");
- } else {
- delay = 1;
- }
- }
-
- return delay;
-}
-
-/**
- * Decide if the error message regarding provided request \a req
- * should be printed to the console or not.
- * Makes it's decision on request status and other properties.
- * Returns 1 to print error on the system console or 0 if not.
- */
-static int ptlrpc_console_allow(struct ptlrpc_request *req)
-{
- __u32 opc;
- int err;
-
- LASSERT(req->rq_reqmsg != NULL);
- opc = lustre_msg_get_opc(req->rq_reqmsg);
-
- /* Suppress particular reconnect errors which are to be expected. No
- * errors are suppressed for the initial connection on an import */
- if ((lustre_handle_is_used(&req->rq_import->imp_remote_handle)) &&
- (opc == OST_CONNECT || opc == MDS_CONNECT || opc == MGS_CONNECT)) {
-
- /* Suppress timed out reconnect requests */
- if (req->rq_timedout)
- return 0;
-
- /* Suppress unavailable/again reconnect requests */
- err = lustre_msg_get_status(req->rq_repmsg);
- if (err == -ENODEV || err == -EAGAIN)
- return 0;
- }
-
- return 1;
-}
-
-/**
- * Check request processing status.
- * Returns the status.
- */
-static int ptlrpc_check_status(struct ptlrpc_request *req)
-{
- int err;
-
- err = lustre_msg_get_status(req->rq_repmsg);
- if (lustre_msg_get_type(req->rq_repmsg) == PTL_RPC_MSG_ERR) {
- struct obd_import *imp = req->rq_import;
- __u32 opc = lustre_msg_get_opc(req->rq_reqmsg);
- if (ptlrpc_console_allow(req))
- LCONSOLE_ERROR_MSG(0x011, "%s: Communicating with %s, operation %s failed with %d.\n",
- imp->imp_obd->obd_name,
- libcfs_nid2str(
- imp->imp_connection->c_peer.nid),
- ll_opcode2str(opc), err);
- return err < 0 ? err : -EINVAL;
- }
-
- if (err < 0)
- DEBUG_REQ(D_INFO, req, "status is %d", err);
- else if (err > 0)
- /* XXX: translate this error from net to host */
- DEBUG_REQ(D_INFO, req, "status is %d", err);
-
- return err;
-}
-
-/**
- * save pre-versions of objects into request for replay.
- * Versions are obtained from server reply.
- * used for VBR.
- */
-static void ptlrpc_save_versions(struct ptlrpc_request *req)
-{
- struct lustre_msg *repmsg = req->rq_repmsg;
- struct lustre_msg *reqmsg = req->rq_reqmsg;
- __u64 *versions = lustre_msg_get_versions(repmsg);
-
- if (lustre_msg_get_flags(req->rq_reqmsg) & MSG_REPLAY)
- return;
-
- LASSERT(versions);
- lustre_msg_set_versions(reqmsg, versions);
- CDEBUG(D_INFO, "Client save versions [%#llx/%#llx]\n",
- versions[0], versions[1]);
-}
-
-/**
- * Callback function called when client receives RPC reply for \a req.
- * Returns 0 on success or error code.
- * The return value would be assigned to req->rq_status by the caller
- * as request processing status.
- * This function also decides if the request needs to be saved for later replay.
- */
-static int after_reply(struct ptlrpc_request *req)
-{
- struct obd_import *imp = req->rq_import;
- struct obd_device *obd = req->rq_import->imp_obd;
- int rc;
- struct timespec64 work_start;
- long timediff;
-
- LASSERT(obd != NULL);
- /* repbuf must be unlinked */
- LASSERT(!req->rq_receiving_reply && !req->rq_reply_unlink);
-
- if (req->rq_reply_truncate) {
- if (ptlrpc_no_resend(req)) {
- DEBUG_REQ(D_ERROR, req, "reply buffer overflow, expected: %d, actual size: %d",
- req->rq_nob_received, req->rq_repbuf_len);
- return -EOVERFLOW;
- }
-
- sptlrpc_cli_free_repbuf(req);
- /* Pass the required reply buffer size (include
- * space for early reply).
- * NB: no need to roundup because alloc_repbuf
- * will roundup it */
- req->rq_replen = req->rq_nob_received;
- req->rq_nob_received = 0;
- spin_lock(&req->rq_lock);
- req->rq_resend = 1;
- spin_unlock(&req->rq_lock);
- return 0;
- }
-
- /*
- * NB Until this point, the whole of the incoming message,
- * including buflens, status etc is in the sender's byte order.
- */
- rc = sptlrpc_cli_unwrap_reply(req);
- if (rc) {
- DEBUG_REQ(D_ERROR, req, "unwrap reply failed (%d):", rc);
- return rc;
- }
-
- /*
- * Security layer unwrap might ask resend this request.
- */
- if (req->rq_resend)
- return 0;
-
- rc = unpack_reply(req);
- if (rc)
- return rc;
-
- /* retry indefinitely on EINPROGRESS */
- if (lustre_msg_get_status(req->rq_repmsg) == -EINPROGRESS &&
- ptlrpc_no_resend(req) == 0 && !req->rq_no_retry_einprogress) {
- time64_t now = ktime_get_real_seconds();
-
- DEBUG_REQ(D_RPCTRACE, req, "Resending request on EINPROGRESS");
- spin_lock(&req->rq_lock);
- req->rq_resend = 1;
- spin_unlock(&req->rq_lock);
- req->rq_nr_resend++;
-
- /* allocate new xid to avoid reply reconstruction */
- if (!req->rq_bulk) {
- /* new xid is already allocated for bulk in
- * ptlrpc_check_set() */
- req->rq_xid = ptlrpc_next_xid();
- DEBUG_REQ(D_RPCTRACE, req, "Allocating new xid for resend on EINPROGRESS");
- }
-
- /* Readjust the timeout for current conditions */
- ptlrpc_at_set_req_timeout(req);
- /* delay resend to give a chance to the server to get ready.
- * The delay is increased by 1s on every resend and is capped to
- * the current request timeout (i.e. obd_timeout if AT is off,
- * or AT service time x 125% + 5s, see at_est2timeout) */
- if (req->rq_nr_resend > req->rq_timeout)
- req->rq_sent = now + req->rq_timeout;
- else
- req->rq_sent = now + req->rq_nr_resend;
-
- return 0;
- }
-
- ktime_get_real_ts64(&work_start);
- timediff = (work_start.tv_sec - req->rq_arrival_time.tv_sec) * USEC_PER_SEC +
- (work_start.tv_nsec - req->rq_arrival_time.tv_nsec) / NSEC_PER_USEC;
- if (obd->obd_svc_stats != NULL) {
- lprocfs_counter_add(obd->obd_svc_stats, PTLRPC_REQWAIT_CNTR,
- timediff);
- ptlrpc_lprocfs_rpc_sent(req, timediff);
- }
-
- if (lustre_msg_get_type(req->rq_repmsg) != PTL_RPC_MSG_REPLY &&
- lustre_msg_get_type(req->rq_repmsg) != PTL_RPC_MSG_ERR) {
- DEBUG_REQ(D_ERROR, req, "invalid packet received (type=%u)",
- lustre_msg_get_type(req->rq_repmsg));
- return -EPROTO;
- }
-
- if (lustre_msg_get_opc(req->rq_reqmsg) != OBD_PING)
- CFS_FAIL_TIMEOUT(OBD_FAIL_PTLRPC_PAUSE_REP, cfs_fail_val);
- ptlrpc_at_adj_service(req, lustre_msg_get_timeout(req->rq_repmsg));
- ptlrpc_at_adj_net_latency(req,
- lustre_msg_get_service_time(req->rq_repmsg));
-
- rc = ptlrpc_check_status(req);
- imp->imp_connect_error = rc;
-
- if (rc) {
- /*
- * Either we've been evicted, or the server has failed for
- * some reason. Try to reconnect, and if that fails, punt to
- * the upcall.
- */
- if (ll_rpc_recoverable_error(rc)) {
- if (req->rq_send_state != LUSTRE_IMP_FULL ||
- imp->imp_obd->obd_no_recov || imp->imp_dlm_fake) {
- return rc;
- }
- ptlrpc_request_handle_notconn(req);
- return rc;
- }
- } else {
- /*
- * Let's look if server sent slv. Do it only for RPC with
- * rc == 0.
- */
- ldlm_cli_update_pool(req);
- }
-
- /*
- * Store transno in reqmsg for replay.
- */
- if (!(lustre_msg_get_flags(req->rq_reqmsg) & MSG_REPLAY)) {
- req->rq_transno = lustre_msg_get_transno(req->rq_repmsg);
- lustre_msg_set_transno(req->rq_reqmsg, req->rq_transno);
- }
-
- if (imp->imp_replayable) {
- spin_lock(&imp->imp_lock);
- /*
- * No point in adding already-committed requests to the replay
- * list, we will just remove them immediately. b=9829
- */
- if (req->rq_transno != 0 &&
- (req->rq_transno >
- lustre_msg_get_last_committed(req->rq_repmsg) ||
- req->rq_replay)) {
- /** version recovery */
- ptlrpc_save_versions(req);
- ptlrpc_retain_replayable_request(req, imp);
- } else if (req->rq_commit_cb != NULL &&
- list_empty(&req->rq_replay_list)) {
- /* NB: don't call rq_commit_cb if it's already on
- * rq_replay_list, ptlrpc_free_committed() will call
- * it later, see LU-3618 for details */
- spin_unlock(&imp->imp_lock);
- req->rq_commit_cb(req);
- spin_lock(&imp->imp_lock);
- }
-
- /*
- * Replay-enabled imports return commit-status information.
- */
- if (lustre_msg_get_last_committed(req->rq_repmsg)) {
- imp->imp_peer_committed_transno =
- lustre_msg_get_last_committed(req->rq_repmsg);
- }
-
- ptlrpc_free_committed(imp);
-
- if (!list_empty(&imp->imp_replay_list)) {
- struct ptlrpc_request *last;
-
- last = list_entry(imp->imp_replay_list.prev,
- struct ptlrpc_request,
- rq_replay_list);
- /*
- * Requests with rq_replay stay on the list even if no
- * commit is expected.
- */
- if (last->rq_transno > imp->imp_peer_committed_transno)
- ptlrpc_pinger_commit_expected(imp);
- }
-
- spin_unlock(&imp->imp_lock);
- }
-
- return rc;
-}
-
-/**
- * Helper function to send request \a req over the network for the first time
- * Also adjusts request phase.
- * Returns 0 on success or error code.
- */
-static int ptlrpc_send_new_req(struct ptlrpc_request *req)
-{
- struct obd_import *imp = req->rq_import;
- int rc;
-
- LASSERT(req->rq_phase == RQ_PHASE_NEW);
- if (req->rq_sent && (req->rq_sent > ktime_get_real_seconds()) &&
- (!req->rq_generation_set ||
- req->rq_import_generation == imp->imp_generation))
- return 0;
-
- ptlrpc_rqphase_move(req, RQ_PHASE_RPC);
-
- spin_lock(&imp->imp_lock);
-
- if (!req->rq_generation_set)
- req->rq_import_generation = imp->imp_generation;
-
- if (ptlrpc_import_delay_req(imp, req, &rc)) {
- spin_lock(&req->rq_lock);
- req->rq_waiting = 1;
- spin_unlock(&req->rq_lock);
-
- DEBUG_REQ(D_HA, req, "req from PID %d waiting for recovery: (%s != %s)",
- lustre_msg_get_status(req->rq_reqmsg),
- ptlrpc_import_state_name(req->rq_send_state),
- ptlrpc_import_state_name(imp->imp_state));
- LASSERT(list_empty(&req->rq_list));
- list_add_tail(&req->rq_list, &imp->imp_delayed_list);
- atomic_inc(&req->rq_import->imp_inflight);
- spin_unlock(&imp->imp_lock);
- return 0;
- }
-
- if (rc != 0) {
- spin_unlock(&imp->imp_lock);
- req->rq_status = rc;
- ptlrpc_rqphase_move(req, RQ_PHASE_INTERPRET);
- return rc;
- }
-
- LASSERT(list_empty(&req->rq_list));
- list_add_tail(&req->rq_list, &imp->imp_sending_list);
- atomic_inc(&req->rq_import->imp_inflight);
- spin_unlock(&imp->imp_lock);
-
- lustre_msg_set_status(req->rq_reqmsg, current_pid());
-
- rc = sptlrpc_req_refresh_ctx(req, -1);
- if (rc) {
- if (req->rq_err) {
- req->rq_status = rc;
- return 1;
- }
- spin_lock(&req->rq_lock);
- req->rq_wait_ctx = 1;
- spin_unlock(&req->rq_lock);
- return 0;
- }
-
- CDEBUG(D_RPCTRACE, "Sending RPC pname:cluuid:pid:xid:nid:opc %s:%s:%d:%llu:%s:%d\n",
- current_comm(),
- imp->imp_obd->obd_uuid.uuid,
- lustre_msg_get_status(req->rq_reqmsg), req->rq_xid,
- libcfs_nid2str(imp->imp_connection->c_peer.nid),
- lustre_msg_get_opc(req->rq_reqmsg));
-
- rc = ptl_send_rpc(req, 0);
- if (rc) {
- DEBUG_REQ(D_HA, req, "send failed (%d); expect timeout", rc);
- spin_lock(&req->rq_lock);
- req->rq_net_err = 1;
- spin_unlock(&req->rq_lock);
- return rc;
- }
- return 0;
-}
-
-static inline int ptlrpc_set_producer(struct ptlrpc_request_set *set)
-{
- int remaining, rc;
-
- LASSERT(set->set_producer != NULL);
-
- remaining = atomic_read(&set->set_remaining);
-
- /* populate the ->set_requests list with requests until we
- * reach the maximum number of RPCs in flight for this set */
- while (atomic_read(&set->set_remaining) < set->set_max_inflight) {
- rc = set->set_producer(set, set->set_producer_arg);
- if (rc == -ENOENT) {
- /* no more RPC to produce */
- set->set_producer = NULL;
- set->set_producer_arg = NULL;
- return 0;
- }
- }
-
- return (atomic_read(&set->set_remaining) - remaining);
-}
-
-/**
- * this sends any unsent RPCs in \a set and returns 1 if all are sent
- * and no more replies are expected.
- * (it is possible to get less replies than requests sent e.g. due to timed out
- * requests or requests that we had trouble to send out)
- *
- * NOTE: This function contains a potential schedule point (cond_resched()).
- */
-int ptlrpc_check_set(const struct lu_env *env, struct ptlrpc_request_set *set)
-{
- struct list_head *tmp, *next;
- struct list_head comp_reqs;
- int force_timer_recalc = 0;
-
- if (atomic_read(&set->set_remaining) == 0)
- return 1;
-
- INIT_LIST_HEAD(&comp_reqs);
- list_for_each_safe(tmp, next, &set->set_requests) {
- struct ptlrpc_request *req =
- list_entry(tmp, struct ptlrpc_request,
- rq_set_chain);
- struct obd_import *imp = req->rq_import;
- int unregistered = 0;
- int rc = 0;
-
- /* This schedule point is mainly for the ptlrpcd caller of this
- * function. Most ptlrpc sets are not long-lived and unbounded
- * in length, but at the least the set used by the ptlrpcd is.
- * Since the processing time is unbounded, we need to insert an
- * explicit schedule point to make the thread well-behaved.
- */
- cond_resched();
-
- if (req->rq_phase == RQ_PHASE_NEW &&
- ptlrpc_send_new_req(req)) {
- force_timer_recalc = 1;
- }
-
- /* delayed send - skip */
- if (req->rq_phase == RQ_PHASE_NEW && req->rq_sent)
- continue;
-
- /* delayed resend - skip */
- if (req->rq_phase == RQ_PHASE_RPC && req->rq_resend &&
- req->rq_sent > ktime_get_real_seconds())
- continue;
-
- if (!(req->rq_phase == RQ_PHASE_RPC ||
- req->rq_phase == RQ_PHASE_BULK ||
- req->rq_phase == RQ_PHASE_INTERPRET ||
- req->rq_phase == RQ_PHASE_UNREGISTERING ||
- req->rq_phase == RQ_PHASE_COMPLETE)) {
- DEBUG_REQ(D_ERROR, req, "bad phase %x", req->rq_phase);
- LBUG();
- }
-
- if (req->rq_phase == RQ_PHASE_UNREGISTERING) {
- LASSERT(req->rq_next_phase != req->rq_phase);
- LASSERT(req->rq_next_phase != RQ_PHASE_UNDEFINED);
-
- /*
- * Skip processing until reply is unlinked. We
- * can't return to pool before that and we can't
- * call interpret before that. We need to make
- * sure that all rdma transfers finished and will
- * not corrupt any data.
- */
- if (ptlrpc_client_recv_or_unlink(req) ||
- ptlrpc_client_bulk_active(req))
- continue;
-
- /*
- * Turn fail_loc off to prevent it from looping
- * forever.
- */
- if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REPL_UNLINK)) {
- OBD_FAIL_CHECK_ORSET(OBD_FAIL_PTLRPC_LONG_REPL_UNLINK,
- OBD_FAIL_ONCE);
- }
- if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_BULK_UNLINK)) {
- OBD_FAIL_CHECK_ORSET(OBD_FAIL_PTLRPC_LONG_BULK_UNLINK,
- OBD_FAIL_ONCE);
- }
-
- /*
- * Move to next phase if reply was successfully
- * unlinked.
- */
- ptlrpc_rqphase_move(req, req->rq_next_phase);
- }
-
- if (req->rq_phase == RQ_PHASE_COMPLETE) {
- list_move_tail(&req->rq_set_chain, &comp_reqs);
- continue;
- }
-
- if (req->rq_phase == RQ_PHASE_INTERPRET)
- goto interpret;
-
- /*
- * Note that this also will start async reply unlink.
- */
- if (req->rq_net_err && !req->rq_timedout) {
- ptlrpc_expire_one_request(req, 1);
-
- /*
- * Check if we still need to wait for unlink.
- */
- if (ptlrpc_client_recv_or_unlink(req) ||
- ptlrpc_client_bulk_active(req))
- continue;
- /* If there is no need to resend, fail it now. */
- if (req->rq_no_resend) {
- if (req->rq_status == 0)
- req->rq_status = -EIO;
- ptlrpc_rqphase_move(req, RQ_PHASE_INTERPRET);
- goto interpret;
- } else {
- continue;
- }
- }
-
- if (req->rq_err) {
- spin_lock(&req->rq_lock);
- req->rq_replied = 0;
- spin_unlock(&req->rq_lock);
- if (req->rq_status == 0)
- req->rq_status = -EIO;
- ptlrpc_rqphase_move(req, RQ_PHASE_INTERPRET);
- goto interpret;
- }
-
- /* ptlrpc_set_wait->l_wait_event sets lwi_allow_intr
- * so it sets rq_intr regardless of individual rpc
- * timeouts. The synchronous IO waiting path sets
- * rq_intr irrespective of whether ptlrpcd
- * has seen a timeout. Our policy is to only interpret
- * interrupted rpcs after they have timed out, so we
- * need to enforce that here.
- */
-
- if (req->rq_intr && (req->rq_timedout || req->rq_waiting ||
- req->rq_wait_ctx)) {
- req->rq_status = -EINTR;
- ptlrpc_rqphase_move(req, RQ_PHASE_INTERPRET);
- goto interpret;
- }
-
- if (req->rq_phase == RQ_PHASE_RPC) {
- if (req->rq_timedout || req->rq_resend ||
- req->rq_waiting || req->rq_wait_ctx) {
- int status;
-
- if (!ptlrpc_unregister_reply(req, 1))
- continue;
-
- spin_lock(&imp->imp_lock);
- if (ptlrpc_import_delay_req(imp, req,
- &status)) {
- /* put on delay list - only if we wait
- * recovery finished - before send */
- list_del_init(&req->rq_list);
- list_add_tail(&req->rq_list,
- &imp->
- imp_delayed_list);
- spin_unlock(&imp->imp_lock);
- continue;
- }
-
- if (status != 0) {
- req->rq_status = status;
- ptlrpc_rqphase_move(req,
- RQ_PHASE_INTERPRET);
- spin_unlock(&imp->imp_lock);
- goto interpret;
- }
- if (ptlrpc_no_resend(req) &&
- !req->rq_wait_ctx) {
- req->rq_status = -ENOTCONN;
- ptlrpc_rqphase_move(req,
- RQ_PHASE_INTERPRET);
- spin_unlock(&imp->imp_lock);
- goto interpret;
- }
-
- list_del_init(&req->rq_list);
- list_add_tail(&req->rq_list,
- &imp->imp_sending_list);
-
- spin_unlock(&imp->imp_lock);
-
- spin_lock(&req->rq_lock);
- req->rq_waiting = 0;
- spin_unlock(&req->rq_lock);
-
- if (req->rq_timedout || req->rq_resend) {
- /* This is re-sending anyways,
- * let's mark req as resend. */
- spin_lock(&req->rq_lock);
- req->rq_resend = 1;
- spin_unlock(&req->rq_lock);
- if (req->rq_bulk) {
- __u64 old_xid;
-
- if (!ptlrpc_unregister_bulk(req, 1))
- continue;
-
- /* ensure previous bulk fails */
- old_xid = req->rq_xid;
- req->rq_xid = ptlrpc_next_xid();
- CDEBUG(D_HA, "resend bulk old x%llu new x%llu\n",
- old_xid, req->rq_xid);
- }
- }
- /*
- * rq_wait_ctx is only touched by ptlrpcd,
- * so no lock is needed here.
- */
- status = sptlrpc_req_refresh_ctx(req, -1);
- if (status) {
- if (req->rq_err) {
- req->rq_status = status;
- spin_lock(&req->rq_lock);
- req->rq_wait_ctx = 0;
- spin_unlock(&req->rq_lock);
- force_timer_recalc = 1;
- } else {
- spin_lock(&req->rq_lock);
- req->rq_wait_ctx = 1;
- spin_unlock(&req->rq_lock);
- }
-
- continue;
- } else {
- spin_lock(&req->rq_lock);
- req->rq_wait_ctx = 0;
- spin_unlock(&req->rq_lock);
- }
-
- rc = ptl_send_rpc(req, 0);
- if (rc) {
- DEBUG_REQ(D_HA, req,
- "send failed: rc = %d", rc);
- force_timer_recalc = 1;
- spin_lock(&req->rq_lock);
- req->rq_net_err = 1;
- spin_unlock(&req->rq_lock);
- continue;
- }
- /* need to reset the timeout */
- force_timer_recalc = 1;
- }
-
- spin_lock(&req->rq_lock);
-
- if (ptlrpc_client_early(req)) {
- ptlrpc_at_recv_early_reply(req);
- spin_unlock(&req->rq_lock);
- continue;
- }
-
- /* Still waiting for a reply? */
- if (ptlrpc_client_recv(req)) {
- spin_unlock(&req->rq_lock);
- continue;
- }
-
- /* Did we actually receive a reply? */
- if (!ptlrpc_client_replied(req)) {
- spin_unlock(&req->rq_lock);
- continue;
- }
-
- spin_unlock(&req->rq_lock);
-
- /* unlink from net because we are going to
- * swab in-place of reply buffer */
- unregistered = ptlrpc_unregister_reply(req, 1);
- if (!unregistered)
- continue;
-
- req->rq_status = after_reply(req);
- if (req->rq_resend)
- continue;
-
- /* If there is no bulk associated with this request,
- * then we're done and should let the interpreter
- * process the reply. Similarly if the RPC returned
- * an error, and therefore the bulk will never arrive.
- */
- if (req->rq_bulk == NULL || req->rq_status < 0) {
- ptlrpc_rqphase_move(req, RQ_PHASE_INTERPRET);
- goto interpret;
- }
-
- ptlrpc_rqphase_move(req, RQ_PHASE_BULK);
- }
-
- LASSERT(req->rq_phase == RQ_PHASE_BULK);
- if (ptlrpc_client_bulk_active(req))
- continue;
-
- if (req->rq_bulk->bd_failure) {
- /* The RPC reply arrived OK, but the bulk screwed
- * up! Dead weird since the server told us the RPC
- * was good after getting the REPLY for her GET or
- * the ACK for her PUT. */
- DEBUG_REQ(D_ERROR, req, "bulk transfer failed");
- req->rq_status = -EIO;
- }
-
- ptlrpc_rqphase_move(req, RQ_PHASE_INTERPRET);
-
-interpret:
- LASSERT(req->rq_phase == RQ_PHASE_INTERPRET);
-
- /* This moves to "unregistering" phase we need to wait for
- * reply unlink. */
- if (!unregistered && !ptlrpc_unregister_reply(req, 1)) {
- /* start async bulk unlink too */
- ptlrpc_unregister_bulk(req, 1);
- continue;
- }
-
- if (!ptlrpc_unregister_bulk(req, 1))
- continue;
-
- /* When calling interpret receiving already should be
- * finished. */
- LASSERT(!req->rq_receiving_reply);
-
- ptlrpc_req_interpret(env, req, req->rq_status);
-
- if (ptlrpcd_check_work(req)) {
- atomic_dec(&set->set_remaining);
- continue;
- }
- ptlrpc_rqphase_move(req, RQ_PHASE_COMPLETE);
-
- CDEBUG(req->rq_reqmsg != NULL ? D_RPCTRACE : 0,
- "Completed RPC pname:cluuid:pid:xid:nid:opc %s:%s:%d:%llu:%s:%d\n",
- current_comm(), imp->imp_obd->obd_uuid.uuid,
- lustre_msg_get_status(req->rq_reqmsg), req->rq_xid,
- libcfs_nid2str(imp->imp_connection->c_peer.nid),
- lustre_msg_get_opc(req->rq_reqmsg));
-
- spin_lock(&imp->imp_lock);
- /* Request already may be not on sending or delaying list. This
- * may happen in the case of marking it erroneous for the case
- * ptlrpc_import_delay_req(req, status) find it impossible to
- * allow sending this rpc and returns *status != 0. */
- if (!list_empty(&req->rq_list)) {
- list_del_init(&req->rq_list);
- atomic_dec(&imp->imp_inflight);
- }
- spin_unlock(&imp->imp_lock);
-
- atomic_dec(&set->set_remaining);
- wake_up_all(&imp->imp_recovery_waitq);
-
- if (set->set_producer) {
- /* produce a new request if possible */
- if (ptlrpc_set_producer(set) > 0)
- force_timer_recalc = 1;
-
- /* free the request that has just been completed
- * in order not to pollute set->set_requests */
- list_del_init(&req->rq_set_chain);
- spin_lock(&req->rq_lock);
- req->rq_set = NULL;
- req->rq_invalid_rqset = 0;
- spin_unlock(&req->rq_lock);
-
- /* record rq_status to compute the final status later */
- if (req->rq_status != 0)
- set->set_rc = req->rq_status;
- ptlrpc_req_finished(req);
- } else {
- list_move_tail(&req->rq_set_chain, &comp_reqs);
- }
- }
-
- /* move completed request at the head of list so it's easier for
- * caller to find them */
- list_splice(&comp_reqs, &set->set_requests);
-
- /* If we hit an error, we want to recover promptly. */
- return atomic_read(&set->set_remaining) == 0 || force_timer_recalc;
-}
-EXPORT_SYMBOL(ptlrpc_check_set);
-
-/**
- * Time out request \a req. is \a async_unlink is set, that means do not wait
- * until LNet actually confirms network buffer unlinking.
- * Return 1 if we should give up further retrying attempts or 0 otherwise.
- */
-int ptlrpc_expire_one_request(struct ptlrpc_request *req, int async_unlink)
-{
- struct obd_import *imp = req->rq_import;
- int rc = 0;
-
- spin_lock(&req->rq_lock);
- req->rq_timedout = 1;
- spin_unlock(&req->rq_lock);
-
- DEBUG_REQ(D_WARNING, req, "Request sent has %s: [sent %lld/real %lld]",
- req->rq_net_err ? "failed due to network error" :
- ((req->rq_real_sent == 0 ||
- req->rq_real_sent < req->rq_sent ||
- req->rq_real_sent >= req->rq_deadline) ?
- "timed out for sent delay" : "timed out for slow reply"),
- (s64)req->rq_sent, (s64)req->rq_real_sent);
-
- if (imp != NULL && obd_debug_peer_on_timeout)
- LNetCtl(IOC_LIBCFS_DEBUG_PEER, &imp->imp_connection->c_peer);
-
- ptlrpc_unregister_reply(req, async_unlink);
- ptlrpc_unregister_bulk(req, async_unlink);
-
- if (obd_dump_on_timeout)
- libcfs_debug_dumplog();
-
- if (imp == NULL) {
- DEBUG_REQ(D_HA, req, "NULL import: already cleaned up?");
- return 1;
- }
-
- atomic_inc(&imp->imp_timeouts);
-
- /* The DLM server doesn't want recovery run on its imports. */
- if (imp->imp_dlm_fake)
- return 1;
-
- /* If this request is for recovery or other primordial tasks,
- * then error it out here. */
- if (req->rq_ctx_init || req->rq_ctx_fini ||
- req->rq_send_state != LUSTRE_IMP_FULL ||
- imp->imp_obd->obd_no_recov) {
- DEBUG_REQ(D_RPCTRACE, req, "err -110, sent_state=%s (now=%s)",
- ptlrpc_import_state_name(req->rq_send_state),
- ptlrpc_import_state_name(imp->imp_state));
- spin_lock(&req->rq_lock);
- req->rq_status = -ETIMEDOUT;
- req->rq_err = 1;
- spin_unlock(&req->rq_lock);
- return 1;
- }
-
- /* if a request can't be resent we can't wait for an answer after
- the timeout */
- if (ptlrpc_no_resend(req)) {
- DEBUG_REQ(D_RPCTRACE, req, "TIMEOUT-NORESEND:");
- rc = 1;
- }
-
- ptlrpc_fail_import(imp, lustre_msg_get_conn_cnt(req->rq_reqmsg));
-
- return rc;
-}
-
-/**
- * Time out all uncompleted requests in request set pointed by \a data
- * Callback used when waiting on sets with l_wait_event.
- * Always returns 1.
- */
-int ptlrpc_expired_set(void *data)
-{
- struct ptlrpc_request_set *set = data;
- struct list_head *tmp;
- time64_t now = ktime_get_real_seconds();
-
- LASSERT(set != NULL);
-
- /*
- * A timeout expired. See which reqs it applies to...
- */
- list_for_each(tmp, &set->set_requests) {
- struct ptlrpc_request *req =
- list_entry(tmp, struct ptlrpc_request,
- rq_set_chain);
-
- /* don't expire request waiting for context */
- if (req->rq_wait_ctx)
- continue;
-
- /* Request in-flight? */
- if (!((req->rq_phase == RQ_PHASE_RPC &&
- !req->rq_waiting && !req->rq_resend) ||
- (req->rq_phase == RQ_PHASE_BULK)))
- continue;
-
- if (req->rq_timedout || /* already dealt with */
- req->rq_deadline > now) /* not expired */
- continue;
-
- /* Deal with this guy. Do it asynchronously to not block
- * ptlrpcd thread. */
- ptlrpc_expire_one_request(req, 1);
- }
-
- /*
- * When waiting for a whole set, we always break out of the
- * sleep so we can recalculate the timeout, or enable interrupts
- * if everyone's timed out.
- */
- return 1;
-}
-EXPORT_SYMBOL(ptlrpc_expired_set);
-
-/**
- * Sets rq_intr flag in \a req under spinlock.
- */
-void ptlrpc_mark_interrupted(struct ptlrpc_request *req)
-{
- spin_lock(&req->rq_lock);
- req->rq_intr = 1;
- spin_unlock(&req->rq_lock);
-}
-EXPORT_SYMBOL(ptlrpc_mark_interrupted);
-
-/**
- * Interrupts (sets interrupted flag) all uncompleted requests in
- * a set \a data. Callback for l_wait_event for interruptible waits.
- */
-void ptlrpc_interrupted_set(void *data)
-{
- struct ptlrpc_request_set *set = data;
- struct list_head *tmp;
-
- LASSERT(set != NULL);
- CDEBUG(D_RPCTRACE, "INTERRUPTED SET %p\n", set);
-
- list_for_each(tmp, &set->set_requests) {
- struct ptlrpc_request *req =
- list_entry(tmp, struct ptlrpc_request,
- rq_set_chain);
-
- if (req->rq_phase != RQ_PHASE_RPC &&
- req->rq_phase != RQ_PHASE_UNREGISTERING)
- continue;
-
- ptlrpc_mark_interrupted(req);
- }
-}
-EXPORT_SYMBOL(ptlrpc_interrupted_set);
-
-/**
- * Get the smallest timeout in the set; this does NOT set a timeout.
- */
-int ptlrpc_set_next_timeout(struct ptlrpc_request_set *set)
-{
- struct list_head *tmp;
- time64_t now = ktime_get_real_seconds();
- int timeout = 0;
- struct ptlrpc_request *req;
- time64_t deadline;
-
- list_for_each(tmp, &set->set_requests) {
- req = list_entry(tmp, struct ptlrpc_request, rq_set_chain);
-
- /*
- * Request in-flight?
- */
- if (!(((req->rq_phase == RQ_PHASE_RPC) && !req->rq_waiting) ||
- (req->rq_phase == RQ_PHASE_BULK) ||
- (req->rq_phase == RQ_PHASE_NEW)))
- continue;
-
- /*
- * Already timed out.
- */
- if (req->rq_timedout)
- continue;
-
- /*
- * Waiting for ctx.
- */
- if (req->rq_wait_ctx)
- continue;
-
- if (req->rq_phase == RQ_PHASE_NEW)
- deadline = req->rq_sent;
- else if (req->rq_phase == RQ_PHASE_RPC && req->rq_resend)
- deadline = req->rq_sent;
- else
- deadline = req->rq_sent + req->rq_timeout;
-
- if (deadline <= now) /* actually expired already */
- timeout = 1; /* ASAP */
- else if (timeout == 0 || timeout > deadline - now)
- timeout = deadline - now;
- }
- return timeout;
-}
-EXPORT_SYMBOL(ptlrpc_set_next_timeout);
-
-/**
- * Send all unset request from the set and then wait until all
- * requests in the set complete (either get a reply, timeout, get an
- * error or otherwise be interrupted).
- * Returns 0 on success or error code otherwise.
- */
-int ptlrpc_set_wait(struct ptlrpc_request_set *set)
-{
- struct list_head *tmp;
- struct ptlrpc_request *req;
- struct l_wait_info lwi;
- int rc, timeout;
-
- if (set->set_producer)
- (void)ptlrpc_set_producer(set);
- else
- list_for_each(tmp, &set->set_requests) {
- req = list_entry(tmp, struct ptlrpc_request,
- rq_set_chain);
- if (req->rq_phase == RQ_PHASE_NEW)
- (void)ptlrpc_send_new_req(req);
- }
-
- if (list_empty(&set->set_requests))
- return 0;
-
- do {
- timeout = ptlrpc_set_next_timeout(set);
-
- /* wait until all complete, interrupted, or an in-flight
- * req times out */
- CDEBUG(D_RPCTRACE, "set %p going to sleep for %d seconds\n",
- set, timeout);
-
- if (timeout == 0 && !cfs_signal_pending())
- /*
- * No requests are in-flight (ether timed out
- * or delayed), so we can allow interrupts.
- * We still want to block for a limited time,
- * so we allow interrupts during the timeout.
- */
- lwi = LWI_TIMEOUT_INTR_ALL(cfs_time_seconds(1),
- ptlrpc_expired_set,
- ptlrpc_interrupted_set, set);
- else
- /*
- * At least one request is in flight, so no
- * interrupts are allowed. Wait until all
- * complete, or an in-flight req times out.
- */
- lwi = LWI_TIMEOUT(cfs_time_seconds(timeout ? timeout : 1),
- ptlrpc_expired_set, set);
-
- rc = l_wait_event(set->set_waitq, ptlrpc_check_set(NULL, set), &lwi);
-
- /* LU-769 - if we ignored the signal because it was already
- * pending when we started, we need to handle it now or we risk
- * it being ignored forever */
- if (rc == -ETIMEDOUT && !lwi.lwi_allow_intr &&
- cfs_signal_pending()) {
- sigset_t blocked_sigs =
- cfs_block_sigsinv(LUSTRE_FATAL_SIGS);
-
- /* In fact we only interrupt for the "fatal" signals
- * like SIGINT or SIGKILL. We still ignore less
- * important signals since ptlrpc set is not easily
- * reentrant from userspace again */
- if (cfs_signal_pending())
- ptlrpc_interrupted_set(set);
- cfs_restore_sigs(blocked_sigs);
- }
-
- LASSERT(rc == 0 || rc == -EINTR || rc == -ETIMEDOUT);
-
- /* -EINTR => all requests have been flagged rq_intr so next
- * check completes.
- * -ETIMEDOUT => someone timed out. When all reqs have
- * timed out, signals are enabled allowing completion with
- * EINTR.
- * I don't really care if we go once more round the loop in
- * the error cases -eeb. */
- if (rc == 0 && atomic_read(&set->set_remaining) == 0) {
- list_for_each(tmp, &set->set_requests) {
- req = list_entry(tmp, struct ptlrpc_request,
- rq_set_chain);
- spin_lock(&req->rq_lock);
- req->rq_invalid_rqset = 1;
- spin_unlock(&req->rq_lock);
- }
- }
- } while (rc != 0 || atomic_read(&set->set_remaining) != 0);
-
- LASSERT(atomic_read(&set->set_remaining) == 0);
-
- rc = set->set_rc; /* rq_status of already freed requests if any */
- list_for_each(tmp, &set->set_requests) {
- req = list_entry(tmp, struct ptlrpc_request, rq_set_chain);
-
- LASSERT(req->rq_phase == RQ_PHASE_COMPLETE);
- if (req->rq_status != 0)
- rc = req->rq_status;
- }
-
- if (set->set_interpret != NULL) {
- int (*interpreter)(struct ptlrpc_request_set *set, void *, int) =
- set->set_interpret;
- rc = interpreter(set, set->set_arg, rc);
- } else {
- struct ptlrpc_set_cbdata *cbdata, *n;
- int err;
-
- list_for_each_entry_safe(cbdata, n,
- &set->set_cblist, psc_item) {
- list_del_init(&cbdata->psc_item);
- err = cbdata->psc_interpret(set, cbdata->psc_data, rc);
- if (err && !rc)
- rc = err;
- kfree(cbdata);
- }
- }
-
- return rc;
-}
-EXPORT_SYMBOL(ptlrpc_set_wait);
-
-/**
- * Helper function for request freeing.
- * Called when request count reached zero and request needs to be freed.
- * Removes request from all sorts of sending/replay lists it might be on,
- * frees network buffers if any are present.
- * If \a locked is set, that means caller is already holding import imp_lock
- * and so we no longer need to reobtain it (for certain lists manipulations)
- */
-static void __ptlrpc_free_req(struct ptlrpc_request *request, int locked)
-{
- if (request == NULL)
- return;
- LASSERTF(!request->rq_receiving_reply, "req %p\n", request);
- LASSERTF(request->rq_rqbd == NULL, "req %p\n", request);/* client-side */
- LASSERTF(list_empty(&request->rq_list), "req %p\n", request);
- LASSERTF(list_empty(&request->rq_set_chain), "req %p\n", request);
- LASSERTF(list_empty(&request->rq_exp_list), "req %p\n", request);
- LASSERTF(!request->rq_replay, "req %p\n", request);
-
- req_capsule_fini(&request->rq_pill);
-
- /* We must take it off the imp_replay_list first. Otherwise, we'll set
- * request->rq_reqmsg to NULL while osc_close is dereferencing it. */
- if (request->rq_import != NULL) {
- if (!locked)
- spin_lock(&request->rq_import->imp_lock);
- list_del_init(&request->rq_replay_list);
- if (!locked)
- spin_unlock(&request->rq_import->imp_lock);
- }
- LASSERTF(list_empty(&request->rq_replay_list), "req %p\n", request);
-
- if (atomic_read(&request->rq_refcount) != 0) {
- DEBUG_REQ(D_ERROR, request,
- "freeing request with nonzero refcount");
- LBUG();
- }
-
- if (request->rq_repbuf != NULL)
- sptlrpc_cli_free_repbuf(request);
- if (request->rq_export != NULL) {
- class_export_put(request->rq_export);
- request->rq_export = NULL;
- }
- if (request->rq_import != NULL) {
- class_import_put(request->rq_import);
- request->rq_import = NULL;
- }
- if (request->rq_bulk != NULL)
- ptlrpc_free_bulk_pin(request->rq_bulk);
-
- if (request->rq_reqbuf != NULL || request->rq_clrbuf != NULL)
- sptlrpc_cli_free_reqbuf(request);
-
- if (request->rq_cli_ctx)
- sptlrpc_req_put_ctx(request, !locked);
-
- if (request->rq_pool)
- __ptlrpc_free_req_to_pool(request);
- else
- ptlrpc_request_cache_free(request);
-}
-
-/**
- * Helper function
- * Drops one reference count for request \a request.
- * \a locked set indicates that caller holds import imp_lock.
- * Frees the request when reference count reaches zero.
- */
-static int __ptlrpc_req_finished(struct ptlrpc_request *request, int locked)
-{
- if (request == NULL)
- return 1;
-
- if (request == LP_POISON ||
- request->rq_reqmsg == LP_POISON) {
- CERROR("dereferencing freed request (bug 575)\n");
- LBUG();
- return 1;
- }
-
- DEBUG_REQ(D_INFO, request, "refcount now %u",
- atomic_read(&request->rq_refcount) - 1);
-
- if (atomic_dec_and_test(&request->rq_refcount)) {
- __ptlrpc_free_req(request, locked);
- return 1;
- }
-
- return 0;
-}
-
-/**
- * Drops one reference count for a request.
- */
-void ptlrpc_req_finished(struct ptlrpc_request *request)
-{
- __ptlrpc_req_finished(request, 0);
-}
-EXPORT_SYMBOL(ptlrpc_req_finished);
-
-/**
- * Returns xid of a \a request
- */
-__u64 ptlrpc_req_xid(struct ptlrpc_request *request)
-{
- return request->rq_xid;
-}
-EXPORT_SYMBOL(ptlrpc_req_xid);
-
-/**
- * Disengage the client's reply buffer from the network
- * NB does _NOT_ unregister any client-side bulk.
- * IDEMPOTENT, but _not_ safe against concurrent callers.
- * The request owner (i.e. the thread doing the I/O) must call...
- * Returns 0 on success or 1 if unregistering cannot be made.
- */
-int ptlrpc_unregister_reply(struct ptlrpc_request *request, int async)
-{
- int rc;
- wait_queue_head_t *wq;
- struct l_wait_info lwi;
-
- /*
- * Might sleep.
- */
- LASSERT(!in_interrupt());
-
- /*
- * Let's setup deadline for reply unlink.
- */
- if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REPL_UNLINK) &&
- async && request->rq_reply_deadline == 0)
- request->rq_reply_deadline = ktime_get_real_seconds()+LONG_UNLINK;
-
- /*
- * Nothing left to do.
- */
- if (!ptlrpc_client_recv_or_unlink(request))
- return 1;
-
- LNetMDUnlink(request->rq_reply_md_h);
-
- /*
- * Let's check it once again.
- */
- if (!ptlrpc_client_recv_or_unlink(request))
- return 1;
-
- /*
- * Move to "Unregistering" phase as reply was not unlinked yet.
- */
- ptlrpc_rqphase_move(request, RQ_PHASE_UNREGISTERING);
-
- /*
- * Do not wait for unlink to finish.
- */
- if (async)
- return 0;
-
- /*
- * We have to l_wait_event() whatever the result, to give liblustre
- * a chance to run reply_in_callback(), and to make sure we've
- * unlinked before returning a req to the pool.
- */
- if (request->rq_set != NULL)
- wq = &request->rq_set->set_waitq;
- else
- wq = &request->rq_reply_waitq;
-
- for (;;) {
- /* Network access will complete in finite time but the HUGE
- * timeout lets us CWARN for visibility of sluggish NALs */
- lwi = LWI_TIMEOUT_INTERVAL(cfs_time_seconds(LONG_UNLINK),
- cfs_time_seconds(1), NULL, NULL);
- rc = l_wait_event(*wq, !ptlrpc_client_recv_or_unlink(request),
- &lwi);
- if (rc == 0) {
- ptlrpc_rqphase_move(request, request->rq_next_phase);
- return 1;
- }
-
- LASSERT(rc == -ETIMEDOUT);
- DEBUG_REQ(D_WARNING, request,
- "Unexpectedly long timeout rvcng=%d unlnk=%d/%d",
- request->rq_receiving_reply,
- request->rq_req_unlink, request->rq_reply_unlink);
- }
- return 0;
-}
-EXPORT_SYMBOL(ptlrpc_unregister_reply);
-
-static void ptlrpc_free_request(struct ptlrpc_request *req)
-{
- spin_lock(&req->rq_lock);
- req->rq_replay = 0;
- spin_unlock(&req->rq_lock);
-
- if (req->rq_commit_cb != NULL)
- req->rq_commit_cb(req);
- list_del_init(&req->rq_replay_list);
-
- __ptlrpc_req_finished(req, 1);
-}
-
-/**
- * the request is committed and dropped from the replay list of its import
- */
-void ptlrpc_request_committed(struct ptlrpc_request *req, int force)
-{
- struct obd_import *imp = req->rq_import;
-
- spin_lock(&imp->imp_lock);
- if (list_empty(&req->rq_replay_list)) {
- spin_unlock(&imp->imp_lock);
- return;
- }
-
- if (force || req->rq_transno <= imp->imp_peer_committed_transno)
- ptlrpc_free_request(req);
-
- spin_unlock(&imp->imp_lock);
-}
-EXPORT_SYMBOL(ptlrpc_request_committed);
-
-/**
- * Iterates through replay_list on import and prunes
- * all requests have transno smaller than last_committed for the
- * import and don't have rq_replay set.
- * Since requests are sorted in transno order, stops when meeting first
- * transno bigger than last_committed.
- * caller must hold imp->imp_lock
- */
-void ptlrpc_free_committed(struct obd_import *imp)
-{
- struct ptlrpc_request *req, *saved;
- struct ptlrpc_request *last_req = NULL; /* temporary fire escape */
- bool skip_committed_list = true;
-
- LASSERT(imp != NULL);
- assert_spin_locked(&imp->imp_lock);
-
- if (imp->imp_peer_committed_transno == imp->imp_last_transno_checked &&
- imp->imp_generation == imp->imp_last_generation_checked) {
- CDEBUG(D_INFO, "%s: skip recheck: last_committed %llu\n",
- imp->imp_obd->obd_name, imp->imp_peer_committed_transno);
- return;
- }
- CDEBUG(D_RPCTRACE, "%s: committing for last_committed %llu gen %d\n",
- imp->imp_obd->obd_name, imp->imp_peer_committed_transno,
- imp->imp_generation);
-
- if (imp->imp_generation != imp->imp_last_generation_checked)
- skip_committed_list = false;
-
- imp->imp_last_transno_checked = imp->imp_peer_committed_transno;
- imp->imp_last_generation_checked = imp->imp_generation;
-
- list_for_each_entry_safe(req, saved, &imp->imp_replay_list,
- rq_replay_list) {
- /* XXX ok to remove when 1357 resolved - rread 05/29/03 */
- LASSERT(req != last_req);
- last_req = req;
-
- if (req->rq_transno == 0) {
- DEBUG_REQ(D_EMERG, req, "zero transno during replay");
- LBUG();
- }
- if (req->rq_import_generation < imp->imp_generation) {
- DEBUG_REQ(D_RPCTRACE, req, "free request with old gen");
- goto free_req;
- }
-
- /* not yet committed */
- if (req->rq_transno > imp->imp_peer_committed_transno) {
- DEBUG_REQ(D_RPCTRACE, req, "stopping search");
- break;
- }
-
- if (req->rq_replay) {
- DEBUG_REQ(D_RPCTRACE, req, "keeping (FL_REPLAY)");
- list_move_tail(&req->rq_replay_list,
- &imp->imp_committed_list);
- continue;
- }
-
- DEBUG_REQ(D_INFO, req, "commit (last_committed %llu)",
- imp->imp_peer_committed_transno);
-free_req:
- ptlrpc_free_request(req);
- }
- if (skip_committed_list)
- return;
-
- list_for_each_entry_safe(req, saved, &imp->imp_committed_list,
- rq_replay_list) {
- LASSERT(req->rq_transno != 0);
- if (req->rq_import_generation < imp->imp_generation) {
- DEBUG_REQ(D_RPCTRACE, req, "free stale open request");
- ptlrpc_free_request(req);
- }
- }
-}
-
-/**
- * Schedule previously sent request for resend.
- * For bulk requests we assign new xid (to avoid problems with
- * lost replies and therefore several transfers landing into same buffer
- * from different sending attempts).
- */
-void ptlrpc_resend_req(struct ptlrpc_request *req)
-{
- DEBUG_REQ(D_HA, req, "going to resend");
- spin_lock(&req->rq_lock);
-
- /* Request got reply but linked to the import list still.
- Let ptlrpc_check_set() to process it. */
- if (ptlrpc_client_replied(req)) {
- spin_unlock(&req->rq_lock);
- DEBUG_REQ(D_HA, req, "it has reply, so skip it");
- return;
- }
-
- lustre_msg_set_handle(req->rq_reqmsg, &(struct lustre_handle){ 0 });
- req->rq_status = -EAGAIN;
-
- req->rq_resend = 1;
- req->rq_net_err = 0;
- req->rq_timedout = 0;
- if (req->rq_bulk) {
- __u64 old_xid = req->rq_xid;
-
- /* ensure previous bulk fails */
- req->rq_xid = ptlrpc_next_xid();
- CDEBUG(D_HA, "resend bulk old x%llu new x%llu\n",
- old_xid, req->rq_xid);
- }
- ptlrpc_client_wake_req(req);
- spin_unlock(&req->rq_lock);
-}
-EXPORT_SYMBOL(ptlrpc_resend_req);
-
-/**
- * Grab additional reference on a request \a req
- */
-struct ptlrpc_request *ptlrpc_request_addref(struct ptlrpc_request *req)
-{
- atomic_inc(&req->rq_refcount);
- return req;
-}
-EXPORT_SYMBOL(ptlrpc_request_addref);
-
-/**
- * Add a request to import replay_list.
- * Must be called under imp_lock
- */
-void ptlrpc_retain_replayable_request(struct ptlrpc_request *req,
- struct obd_import *imp)
-{
- struct list_head *tmp;
-
- assert_spin_locked(&imp->imp_lock);
-
- if (req->rq_transno == 0) {
- DEBUG_REQ(D_EMERG, req, "saving request with zero transno");
- LBUG();
- }
-
- /* clear this for new requests that were resent as well
- as resent replayed requests. */
- lustre_msg_clear_flags(req->rq_reqmsg, MSG_RESENT);
-
- /* don't re-add requests that have been replayed */
- if (!list_empty(&req->rq_replay_list))
- return;
-
- lustre_msg_add_flags(req->rq_reqmsg, MSG_REPLAY);
-
- LASSERT(imp->imp_replayable);
- /* Balanced in ptlrpc_free_committed, usually. */
- ptlrpc_request_addref(req);
- list_for_each_prev(tmp, &imp->imp_replay_list) {
- struct ptlrpc_request *iter =
- list_entry(tmp, struct ptlrpc_request,
- rq_replay_list);
-
- /* We may have duplicate transnos if we create and then
- * open a file, or for closes retained if to match creating
- * opens, so use req->rq_xid as a secondary key.
- * (See bugs 684, 685, and 428.)
- * XXX no longer needed, but all opens need transnos!
- */
- if (iter->rq_transno > req->rq_transno)
- continue;
-
- if (iter->rq_transno == req->rq_transno) {
- LASSERT(iter->rq_xid != req->rq_xid);
- if (iter->rq_xid > req->rq_xid)
- continue;
- }
-
- list_add(&req->rq_replay_list, &iter->rq_replay_list);
- return;
- }
-
- list_add(&req->rq_replay_list, &imp->imp_replay_list);
-}
-EXPORT_SYMBOL(ptlrpc_retain_replayable_request);
-
-/**
- * Send request and wait until it completes.
- * Returns request processing status.
- */
-int ptlrpc_queue_wait(struct ptlrpc_request *req)
-{
- struct ptlrpc_request_set *set;
- int rc;
-
- LASSERT(req->rq_set == NULL);
- LASSERT(!req->rq_receiving_reply);
-
- set = ptlrpc_prep_set();
- if (set == NULL) {
- CERROR("Unable to allocate ptlrpc set.");
- return -ENOMEM;
- }
-
- /* for distributed debugging */
- lustre_msg_set_status(req->rq_reqmsg, current_pid());
-
- /* add a ref for the set (see comment in ptlrpc_set_add_req) */
- ptlrpc_request_addref(req);
- ptlrpc_set_add_req(set, req);
- rc = ptlrpc_set_wait(set);
- ptlrpc_set_destroy(set);
-
- return rc;
-}
-EXPORT_SYMBOL(ptlrpc_queue_wait);
-
-struct ptlrpc_replay_async_args {
- int praa_old_state;
- int praa_old_status;
-};
-
-/**
- * Callback used for replayed requests reply processing.
- * In case of successful reply calls registered request replay callback.
- * In case of error restart replay process.
- */
-static int ptlrpc_replay_interpret(const struct lu_env *env,
- struct ptlrpc_request *req,
- void *data, int rc)
-{
- struct ptlrpc_replay_async_args *aa = data;
- struct obd_import *imp = req->rq_import;
-
- atomic_dec(&imp->imp_replay_inflight);
-
- if (!ptlrpc_client_replied(req)) {
- CERROR("request replay timed out, restarting recovery\n");
- rc = -ETIMEDOUT;
- goto out;
- }
-
- if (lustre_msg_get_type(req->rq_repmsg) == PTL_RPC_MSG_ERR &&
- (lustre_msg_get_status(req->rq_repmsg) == -ENOTCONN ||
- lustre_msg_get_status(req->rq_repmsg) == -ENODEV)) {
- rc = lustre_msg_get_status(req->rq_repmsg);
- goto out;
- }
-
- /** VBR: check version failure */
- if (lustre_msg_get_status(req->rq_repmsg) == -EOVERFLOW) {
- /** replay was failed due to version mismatch */
- DEBUG_REQ(D_WARNING, req, "Version mismatch during replay\n");
- spin_lock(&imp->imp_lock);
- imp->imp_vbr_failed = 1;
- imp->imp_no_lock_replay = 1;
- spin_unlock(&imp->imp_lock);
- lustre_msg_set_status(req->rq_repmsg, aa->praa_old_status);
- } else {
- /** The transno had better not change over replay. */
- LASSERTF(lustre_msg_get_transno(req->rq_reqmsg) ==
- lustre_msg_get_transno(req->rq_repmsg) ||
- lustre_msg_get_transno(req->rq_repmsg) == 0,
- "%#llx/%#llx\n",
- lustre_msg_get_transno(req->rq_reqmsg),
- lustre_msg_get_transno(req->rq_repmsg));
- }
-
- spin_lock(&imp->imp_lock);
- /** if replays by version then gap occur on server, no trust to locks */
- if (lustre_msg_get_flags(req->rq_repmsg) & MSG_VERSION_REPLAY)
- imp->imp_no_lock_replay = 1;
- imp->imp_last_replay_transno = lustre_msg_get_transno(req->rq_reqmsg);
- spin_unlock(&imp->imp_lock);
- LASSERT(imp->imp_last_replay_transno);
-
- /* transaction number shouldn't be bigger than the latest replayed */
- if (req->rq_transno > lustre_msg_get_transno(req->rq_reqmsg)) {
- DEBUG_REQ(D_ERROR, req,
- "Reported transno %llu is bigger than the replayed one: %llu",
- req->rq_transno,
- lustre_msg_get_transno(req->rq_reqmsg));
- rc = -EINVAL;
- goto out;
- }
-
- DEBUG_REQ(D_HA, req, "got rep");
-
- /* let the callback do fixups, possibly including in the request */
- if (req->rq_replay_cb)
- req->rq_replay_cb(req);
-
- if (ptlrpc_client_replied(req) &&
- lustre_msg_get_status(req->rq_repmsg) != aa->praa_old_status) {
- DEBUG_REQ(D_ERROR, req, "status %d, old was %d",
- lustre_msg_get_status(req->rq_repmsg),
- aa->praa_old_status);
- } else {
- /* Put it back for re-replay. */
- lustre_msg_set_status(req->rq_repmsg, aa->praa_old_status);
- }
-
- /*
- * Errors while replay can set transno to 0, but
- * imp_last_replay_transno shouldn't be set to 0 anyway
- */
- if (req->rq_transno == 0)
- CERROR("Transno is 0 during replay!\n");
-
- /* continue with recovery */
- rc = ptlrpc_import_recovery_state_machine(imp);
- out:
- req->rq_send_state = aa->praa_old_state;
-
- if (rc != 0)
- /* this replay failed, so restart recovery */
- ptlrpc_connect_import(imp);
-
- return rc;
-}
-
-/**
- * Prepares and queues request for replay.
- * Adds it to ptlrpcd queue for actual sending.
- * Returns 0 on success.
- */
-int ptlrpc_replay_req(struct ptlrpc_request *req)
-{
- struct ptlrpc_replay_async_args *aa;
-
- LASSERT(req->rq_import->imp_state == LUSTRE_IMP_REPLAY);
-
- LASSERT(sizeof(*aa) <= sizeof(req->rq_async_args));
- aa = ptlrpc_req_async_args(req);
- memset(aa, 0, sizeof(*aa));
-
- /* Prepare request to be resent with ptlrpcd */
- aa->praa_old_state = req->rq_send_state;
- req->rq_send_state = LUSTRE_IMP_REPLAY;
- req->rq_phase = RQ_PHASE_NEW;
- req->rq_next_phase = RQ_PHASE_UNDEFINED;
- if (req->rq_repmsg)
- aa->praa_old_status = lustre_msg_get_status(req->rq_repmsg);
- req->rq_status = 0;
- req->rq_interpret_reply = ptlrpc_replay_interpret;
- /* Readjust the timeout for current conditions */
- ptlrpc_at_set_req_timeout(req);
-
- /* Tell server the net_latency, so the server can calculate how long
- * it should wait for next replay */
- lustre_msg_set_service_time(req->rq_reqmsg,
- ptlrpc_at_get_net_latency(req));
- DEBUG_REQ(D_HA, req, "REPLAY");
-
- atomic_inc(&req->rq_import->imp_replay_inflight);
- ptlrpc_request_addref(req); /* ptlrpcd needs a ref */
-
- ptlrpcd_add_req(req);
- return 0;
-}
-EXPORT_SYMBOL(ptlrpc_replay_req);
-
-/**
- * Aborts all in-flight request on import \a imp sending and delayed lists
- */
-void ptlrpc_abort_inflight(struct obd_import *imp)
-{
- struct list_head *tmp, *n;
-
- /* Make sure that no new requests get processed for this import.
- * ptlrpc_{queue,set}_wait must (and does) hold imp_lock while testing
- * this flag and then putting requests on sending_list or delayed_list.
- */
- spin_lock(&imp->imp_lock);
-
- /* XXX locking? Maybe we should remove each request with the list
- * locked? Also, how do we know if the requests on the list are
- * being freed at this time?
- */
- list_for_each_safe(tmp, n, &imp->imp_sending_list) {
- struct ptlrpc_request *req =
- list_entry(tmp, struct ptlrpc_request, rq_list);
-
- DEBUG_REQ(D_RPCTRACE, req, "inflight");
-
- spin_lock(&req->rq_lock);
- if (req->rq_import_generation < imp->imp_generation) {
- req->rq_err = 1;
- req->rq_status = -EIO;
- ptlrpc_client_wake_req(req);
- }
- spin_unlock(&req->rq_lock);
- }
-
- list_for_each_safe(tmp, n, &imp->imp_delayed_list) {
- struct ptlrpc_request *req =
- list_entry(tmp, struct ptlrpc_request, rq_list);
-
- DEBUG_REQ(D_RPCTRACE, req, "aborting waiting req");
-
- spin_lock(&req->rq_lock);
- if (req->rq_import_generation < imp->imp_generation) {
- req->rq_err = 1;
- req->rq_status = -EIO;
- ptlrpc_client_wake_req(req);
- }
- spin_unlock(&req->rq_lock);
- }
-
- /* Last chance to free reqs left on the replay list, but we
- * will still leak reqs that haven't committed. */
- if (imp->imp_replayable)
- ptlrpc_free_committed(imp);
-
- spin_unlock(&imp->imp_lock);
-}
-EXPORT_SYMBOL(ptlrpc_abort_inflight);
-
-/**
- * Abort all uncompleted requests in request set \a set
- */
-void ptlrpc_abort_set(struct ptlrpc_request_set *set)
-{
- struct list_head *tmp, *pos;
-
- LASSERT(set != NULL);
-
- list_for_each_safe(pos, tmp, &set->set_requests) {
- struct ptlrpc_request *req =
- list_entry(pos, struct ptlrpc_request,
- rq_set_chain);
-
- spin_lock(&req->rq_lock);
- if (req->rq_phase != RQ_PHASE_RPC) {
- spin_unlock(&req->rq_lock);
- continue;
- }
-
- req->rq_err = 1;
- req->rq_status = -EINTR;
- ptlrpc_client_wake_req(req);
- spin_unlock(&req->rq_lock);
- }
-}
-
-static __u64 ptlrpc_last_xid;
-static spinlock_t ptlrpc_last_xid_lock;
-
-/**
- * Initialize the XID for the node. This is common among all requests on
- * this node, and only requires the property that it is monotonically
- * increasing. It does not need to be sequential. Since this is also used
- * as the RDMA match bits, it is important that a single client NOT have
- * the same match bits for two different in-flight requests, hence we do
- * NOT want to have an XID per target or similar.
- *
- * To avoid an unlikely collision between match bits after a client reboot
- * (which would deliver old data into the wrong RDMA buffer) initialize
- * the XID based on the current time, assuming a maximum RPC rate of 1M RPC/s.
- * If the time is clearly incorrect, we instead use a 62-bit random number.
- * In the worst case the random number will overflow 1M RPCs per second in
- * 9133 years, or permutations thereof.
- */
-#define YEAR_2004 (1ULL << 30)
-void ptlrpc_init_xid(void)
-{
- time64_t now = ktime_get_real_seconds();
-
- spin_lock_init(&ptlrpc_last_xid_lock);
- if (now < YEAR_2004) {
- cfs_get_random_bytes(&ptlrpc_last_xid, sizeof(ptlrpc_last_xid));
- ptlrpc_last_xid >>= 2;
- ptlrpc_last_xid |= (1ULL << 61);
- } else {
- ptlrpc_last_xid = (__u64)now << 20;
- }
-
- /* Always need to be aligned to a power-of-two for multi-bulk BRW */
- CLASSERT(((PTLRPC_BULK_OPS_COUNT - 1) & PTLRPC_BULK_OPS_COUNT) == 0);
- ptlrpc_last_xid &= PTLRPC_BULK_OPS_MASK;
-}
-
-/**
- * Increase xid and returns resulting new value to the caller.
- *
- * Multi-bulk BRW RPCs consume multiple XIDs for each bulk transfer, starting
- * at the returned xid, up to xid + PTLRPC_BULK_OPS_COUNT - 1. The BRW RPC
- * itself uses the last bulk xid needed, so the server can determine the
- * the number of bulk transfers from the RPC XID and a bitmask. The starting
- * xid must align to a power-of-two value.
- *
- * This is assumed to be true due to the initial ptlrpc_last_xid
- * value also being initialized to a power-of-two value. LU-1431
- */
-__u64 ptlrpc_next_xid(void)
-{
- __u64 next;
-
- spin_lock(&ptlrpc_last_xid_lock);
- next = ptlrpc_last_xid + PTLRPC_BULK_OPS_COUNT;
- ptlrpc_last_xid = next;
- spin_unlock(&ptlrpc_last_xid_lock);
-
- return next;
-}
-EXPORT_SYMBOL(ptlrpc_next_xid);
-
-/**
- * Get a glimpse at what next xid value might have been.
- * Returns possible next xid.
- */
-__u64 ptlrpc_sample_next_xid(void)
-{
-#if BITS_PER_LONG == 32
- /* need to avoid possible word tearing on 32-bit systems */
- __u64 next;
-
- spin_lock(&ptlrpc_last_xid_lock);
- next = ptlrpc_last_xid + PTLRPC_BULK_OPS_COUNT;
- spin_unlock(&ptlrpc_last_xid_lock);
-
- return next;
-#else
- /* No need to lock, since returned value is racy anyways */
- return ptlrpc_last_xid + PTLRPC_BULK_OPS_COUNT;
-#endif
-}
-EXPORT_SYMBOL(ptlrpc_sample_next_xid);
-
-/**
- * Functions for operating ptlrpc workers.
- *
- * A ptlrpc work is a function which will be running inside ptlrpc context.
- * The callback shouldn't sleep otherwise it will block that ptlrpcd thread.
- *
- * 1. after a work is created, it can be used many times, that is:
- * handler = ptlrpcd_alloc_work();
- * ptlrpcd_queue_work();
- *
- * queue it again when necessary:
- * ptlrpcd_queue_work();
- * ptlrpcd_destroy_work();
- * 2. ptlrpcd_queue_work() can be called by multiple processes meanwhile, but
- * it will only be queued once in any time. Also as its name implies, it may
- * have delay before it really runs by ptlrpcd thread.
- */
-struct ptlrpc_work_async_args {
- int (*cb)(const struct lu_env *, void *);
- void *cbdata;
-};
-
-static void ptlrpcd_add_work_req(struct ptlrpc_request *req)
-{
- /* re-initialize the req */
- req->rq_timeout = obd_timeout;
- req->rq_sent = ktime_get_real_seconds();
- req->rq_deadline = req->rq_sent + req->rq_timeout;
- req->rq_reply_deadline = req->rq_deadline;
- req->rq_phase = RQ_PHASE_INTERPRET;
- req->rq_next_phase = RQ_PHASE_COMPLETE;
- req->rq_xid = ptlrpc_next_xid();
- req->rq_import_generation = req->rq_import->imp_generation;
-
- ptlrpcd_add_req(req);
-}
-
-static int work_interpreter(const struct lu_env *env,
- struct ptlrpc_request *req, void *data, int rc)
-{
- struct ptlrpc_work_async_args *arg = data;
-
- LASSERT(ptlrpcd_check_work(req));
- LASSERT(arg->cb != NULL);
-
- rc = arg->cb(env, arg->cbdata);
-
- list_del_init(&req->rq_set_chain);
- req->rq_set = NULL;
-
- if (atomic_dec_return(&req->rq_refcount) > 1) {
- atomic_set(&req->rq_refcount, 2);
- ptlrpcd_add_work_req(req);
- }
- return rc;
-}
-
-static int worker_format;
-
-static int ptlrpcd_check_work(struct ptlrpc_request *req)
-{
- return req->rq_pill.rc_fmt == (void *)&worker_format;
-}
-
-/**
- * Create a work for ptlrpc.
- */
-void *ptlrpcd_alloc_work(struct obd_import *imp,
- int (*cb)(const struct lu_env *, void *), void *cbdata)
-{
- struct ptlrpc_request *req = NULL;
- struct ptlrpc_work_async_args *args;
-
- might_sleep();
-
- if (cb == NULL)
- return ERR_PTR(-EINVAL);
-
- /* copy some code from deprecated fakereq. */
- req = ptlrpc_request_cache_alloc(GFP_NOFS);
- if (req == NULL) {
- CERROR("ptlrpc: run out of memory!\n");
- return ERR_PTR(-ENOMEM);
- }
-
- req->rq_send_state = LUSTRE_IMP_FULL;
- req->rq_type = PTL_RPC_MSG_REQUEST;
- req->rq_import = class_import_get(imp);
- req->rq_export = NULL;
- req->rq_interpret_reply = work_interpreter;
- /* don't want reply */
- req->rq_receiving_reply = 0;
- req->rq_req_unlink = req->rq_reply_unlink = 0;
- req->rq_no_delay = req->rq_no_resend = 1;
- req->rq_pill.rc_fmt = (void *)&worker_format;
-
- spin_lock_init(&req->rq_lock);
- INIT_LIST_HEAD(&req->rq_list);
- INIT_LIST_HEAD(&req->rq_replay_list);
- INIT_LIST_HEAD(&req->rq_set_chain);
- INIT_LIST_HEAD(&req->rq_history_list);
- INIT_LIST_HEAD(&req->rq_exp_list);
- init_waitqueue_head(&req->rq_reply_waitq);
- init_waitqueue_head(&req->rq_set_waitq);
- atomic_set(&req->rq_refcount, 1);
-
- CLASSERT(sizeof(*args) <= sizeof(req->rq_async_args));
- args = ptlrpc_req_async_args(req);
- args->cb = cb;
- args->cbdata = cbdata;
-
- return req;
-}
-EXPORT_SYMBOL(ptlrpcd_alloc_work);
-
-void ptlrpcd_destroy_work(void *handler)
-{
- struct ptlrpc_request *req = handler;
-
- if (req)
- ptlrpc_req_finished(req);
-}
-EXPORT_SYMBOL(ptlrpcd_destroy_work);
-
-int ptlrpcd_queue_work(void *handler)
-{
- struct ptlrpc_request *req = handler;
-
- /*
- * Check if the req is already being queued.
- *
- * Here comes a trick: it lacks a way of checking if a req is being
- * processed reliably in ptlrpc. Here I have to use refcount of req
- * for this purpose. This is okay because the caller should use this
- * req as opaque data. - Jinshan
- */
- LASSERT(atomic_read(&req->rq_refcount) > 0);
- if (atomic_inc_return(&req->rq_refcount) == 2)
- ptlrpcd_add_work_req(req);
- return 0;
-}
-EXPORT_SYMBOL(ptlrpcd_queue_work);
diff --git a/drivers/staging/lustre/lustre/ptlrpc/connection.c b/drivers/staging/lustre/lustre/ptlrpc/connection.c
deleted file mode 100644
index ffe36e22245f..000000000000
--- a/drivers/staging/lustre/lustre/ptlrpc/connection.c
+++ /dev/null
@@ -1,241 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- */
-
-#define DEBUG_SUBSYSTEM S_RPC
-#include "../include/obd_support.h"
-#include "../include/obd_class.h"
-#include "../include/lustre_net.h"
-
-#include "ptlrpc_internal.h"
-
-static struct cfs_hash *conn_hash;
-static cfs_hash_ops_t conn_hash_ops;
-
-struct ptlrpc_connection *
-ptlrpc_connection_get(lnet_process_id_t peer, lnet_nid_t self,
- struct obd_uuid *uuid)
-{
- struct ptlrpc_connection *conn, *conn2;
-
- conn = cfs_hash_lookup(conn_hash, &peer);
- if (conn)
- goto out;
-
- conn = kzalloc(sizeof(*conn), GFP_NOFS);
- if (!conn)
- return NULL;
-
- conn->c_peer = peer;
- conn->c_self = self;
- INIT_HLIST_NODE(&conn->c_hash);
- atomic_set(&conn->c_refcount, 1);
- if (uuid)
- obd_str2uuid(&conn->c_remote_uuid, uuid->uuid);
-
- /*
- * Add the newly created conn to the hash, on key collision we
- * lost a racing addition and must destroy our newly allocated
- * connection. The object which exists in the has will be
- * returned and may be compared against out object.
- */
- /* In the function below, .hs_keycmp resolves to
- * conn_keycmp() */
- /* coverity[overrun-buffer-val] */
- conn2 = cfs_hash_findadd_unique(conn_hash, &peer, &conn->c_hash);
- if (conn != conn2) {
- kfree(conn);
- conn = conn2;
- }
-out:
- CDEBUG(D_INFO, "conn=%p refcount %d to %s\n",
- conn, atomic_read(&conn->c_refcount),
- libcfs_nid2str(conn->c_peer.nid));
- return conn;
-}
-EXPORT_SYMBOL(ptlrpc_connection_get);
-
-int ptlrpc_connection_put(struct ptlrpc_connection *conn)
-{
- int rc = 0;
-
- if (!conn)
- return rc;
-
- LASSERT(atomic_read(&conn->c_refcount) > 1);
-
- /*
- * We do not remove connection from hashtable and
- * do not free it even if last caller released ref,
- * as we want to have it cached for the case it is
- * needed again.
- *
- * Deallocating it and later creating new connection
- * again would be wastful. This way we also avoid
- * expensive locking to protect things from get/put
- * race when found cached connection is freed by
- * ptlrpc_connection_put().
- *
- * It will be freed later in module unload time,
- * when ptlrpc_connection_fini()->lh_exit->conn_exit()
- * path is called.
- */
- if (atomic_dec_return(&conn->c_refcount) == 1)
- rc = 1;
-
- CDEBUG(D_INFO, "PUT conn=%p refcount %d to %s\n",
- conn, atomic_read(&conn->c_refcount),
- libcfs_nid2str(conn->c_peer.nid));
-
- return rc;
-}
-EXPORT_SYMBOL(ptlrpc_connection_put);
-
-struct ptlrpc_connection *
-ptlrpc_connection_addref(struct ptlrpc_connection *conn)
-{
- atomic_inc(&conn->c_refcount);
- CDEBUG(D_INFO, "conn=%p refcount %d to %s\n",
- conn, atomic_read(&conn->c_refcount),
- libcfs_nid2str(conn->c_peer.nid));
-
- return conn;
-}
-EXPORT_SYMBOL(ptlrpc_connection_addref);
-
-int ptlrpc_connection_init(void)
-{
- conn_hash = cfs_hash_create("CONN_HASH",
- HASH_CONN_CUR_BITS,
- HASH_CONN_MAX_BITS,
- HASH_CONN_BKT_BITS, 0,
- CFS_HASH_MIN_THETA,
- CFS_HASH_MAX_THETA,
- &conn_hash_ops, CFS_HASH_DEFAULT);
- if (!conn_hash)
- return -ENOMEM;
-
- return 0;
-}
-EXPORT_SYMBOL(ptlrpc_connection_init);
-
-void ptlrpc_connection_fini(void)
-{
- cfs_hash_putref(conn_hash);
-}
-EXPORT_SYMBOL(ptlrpc_connection_fini);
-
-/*
- * Hash operations for net_peer<->connection
- */
-static unsigned
-conn_hashfn(struct cfs_hash *hs, const void *key, unsigned mask)
-{
- return cfs_hash_djb2_hash(key, sizeof(lnet_process_id_t), mask);
-}
-
-static int
-conn_keycmp(const void *key, struct hlist_node *hnode)
-{
- struct ptlrpc_connection *conn;
- const lnet_process_id_t *conn_key;
-
- LASSERT(key != NULL);
- conn_key = (lnet_process_id_t *)key;
- conn = hlist_entry(hnode, struct ptlrpc_connection, c_hash);
-
- return conn_key->nid == conn->c_peer.nid &&
- conn_key->pid == conn->c_peer.pid;
-}
-
-static void *
-conn_key(struct hlist_node *hnode)
-{
- struct ptlrpc_connection *conn;
-
- conn = hlist_entry(hnode, struct ptlrpc_connection, c_hash);
- return &conn->c_peer;
-}
-
-static void *
-conn_object(struct hlist_node *hnode)
-{
- return hlist_entry(hnode, struct ptlrpc_connection, c_hash);
-}
-
-static void
-conn_get(struct cfs_hash *hs, struct hlist_node *hnode)
-{
- struct ptlrpc_connection *conn;
-
- conn = hlist_entry(hnode, struct ptlrpc_connection, c_hash);
- atomic_inc(&conn->c_refcount);
-}
-
-static void
-conn_put_locked(struct cfs_hash *hs, struct hlist_node *hnode)
-{
- struct ptlrpc_connection *conn;
-
- conn = hlist_entry(hnode, struct ptlrpc_connection, c_hash);
- atomic_dec(&conn->c_refcount);
-}
-
-static void
-conn_exit(struct cfs_hash *hs, struct hlist_node *hnode)
-{
- struct ptlrpc_connection *conn;
-
- conn = hlist_entry(hnode, struct ptlrpc_connection, c_hash);
- /*
- * Nothing should be left. Connection user put it and
- * connection also was deleted from table by this time
- * so we should have 0 refs.
- */
- LASSERTF(atomic_read(&conn->c_refcount) == 0,
- "Busy connection with %d refs\n",
- atomic_read(&conn->c_refcount));
- kfree(conn);
-}
-
-static cfs_hash_ops_t conn_hash_ops = {
- .hs_hash = conn_hashfn,
- .hs_keycmp = conn_keycmp,
- .hs_key = conn_key,
- .hs_object = conn_object,
- .hs_get = conn_get,
- .hs_put_locked = conn_put_locked,
- .hs_exit = conn_exit,
-};
diff --git a/drivers/staging/lustre/lustre/ptlrpc/errno.c b/drivers/staging/lustre/lustre/ptlrpc/errno.c
deleted file mode 100644
index 73f8374f190e..000000000000
--- a/drivers/staging/lustre/lustre/ptlrpc/errno.c
+++ /dev/null
@@ -1,380 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.txt
- *
- * GPL HEADER END
- */
-/*
- * Copyright (C) 2011 FUJITSU LIMITED. All rights reserved.
- *
- * Copyright (c) 2013, Intel Corporation.
- */
-
-#include "../../include/linux/libcfs/libcfs.h"
-#include "../include/lustre/lustre_errno.h"
-
-/*
- * The two translation tables below must define a one-to-one mapping between
- * host and network errnos.
- *
- * EWOULDBLOCK is equal to EAGAIN on all architectures except for parisc, which
- * appears irrelevant. Thus, existing references to EWOULDBLOCK are fine.
- *
- * EDEADLOCK is equal to EDEADLK on x86 but not on sparc, at least. A sparc
- * host has no context-free way to determine if a LUSTRE_EDEADLK represents an
- * EDEADLK or an EDEADLOCK. Therefore, all existing references to EDEADLOCK
- * that need to be transferred on wire have been replaced with EDEADLK.
- */
-static int lustre_errno_hton_mapping[] = {
- [EPERM] = LUSTRE_EPERM,
- [ENOENT] = LUSTRE_ENOENT,
- [ESRCH] = LUSTRE_ESRCH,
- [EINTR] = LUSTRE_EINTR,
- [EIO] = LUSTRE_EIO,
- [ENXIO] = LUSTRE_ENXIO,
- [E2BIG] = LUSTRE_E2BIG,
- [ENOEXEC] = LUSTRE_ENOEXEC,
- [EBADF] = LUSTRE_EBADF,
- [ECHILD] = LUSTRE_ECHILD,
- [EAGAIN] = LUSTRE_EAGAIN,
- [ENOMEM] = LUSTRE_ENOMEM,
- [EACCES] = LUSTRE_EACCES,
- [EFAULT] = LUSTRE_EFAULT,
- [ENOTBLK] = LUSTRE_ENOTBLK,
- [EBUSY] = LUSTRE_EBUSY,
- [EEXIST] = LUSTRE_EEXIST,
- [EXDEV] = LUSTRE_EXDEV,
- [ENODEV] = LUSTRE_ENODEV,
- [ENOTDIR] = LUSTRE_ENOTDIR,
- [EISDIR] = LUSTRE_EISDIR,
- [EINVAL] = LUSTRE_EINVAL,
- [ENFILE] = LUSTRE_ENFILE,
- [EMFILE] = LUSTRE_EMFILE,
- [ENOTTY] = LUSTRE_ENOTTY,
- [ETXTBSY] = LUSTRE_ETXTBSY,
- [EFBIG] = LUSTRE_EFBIG,
- [ENOSPC] = LUSTRE_ENOSPC,
- [ESPIPE] = LUSTRE_ESPIPE,
- [EROFS] = LUSTRE_EROFS,
- [EMLINK] = LUSTRE_EMLINK,
- [EPIPE] = LUSTRE_EPIPE,
- [EDOM] = LUSTRE_EDOM,
- [ERANGE] = LUSTRE_ERANGE,
- [EDEADLK] = LUSTRE_EDEADLK,
- [ENAMETOOLONG] = LUSTRE_ENAMETOOLONG,
- [ENOLCK] = LUSTRE_ENOLCK,
- [ENOSYS] = LUSTRE_ENOSYS,
- [ENOTEMPTY] = LUSTRE_ENOTEMPTY,
- [ELOOP] = LUSTRE_ELOOP,
- [ENOMSG] = LUSTRE_ENOMSG,
- [EIDRM] = LUSTRE_EIDRM,
- [ECHRNG] = LUSTRE_ECHRNG,
- [EL2NSYNC] = LUSTRE_EL2NSYNC,
- [EL3HLT] = LUSTRE_EL3HLT,
- [EL3RST] = LUSTRE_EL3RST,
- [ELNRNG] = LUSTRE_ELNRNG,
- [EUNATCH] = LUSTRE_EUNATCH,
- [ENOCSI] = LUSTRE_ENOCSI,
- [EL2HLT] = LUSTRE_EL2HLT,
- [EBADE] = LUSTRE_EBADE,
- [EBADR] = LUSTRE_EBADR,
- [EXFULL] = LUSTRE_EXFULL,
- [ENOANO] = LUSTRE_ENOANO,
- [EBADRQC] = LUSTRE_EBADRQC,
- [EBADSLT] = LUSTRE_EBADSLT,
- [EBFONT] = LUSTRE_EBFONT,
- [ENOSTR] = LUSTRE_ENOSTR,
- [ENODATA] = LUSTRE_ENODATA,
- [ETIME] = LUSTRE_ETIME,
- [ENOSR] = LUSTRE_ENOSR,
- [ENONET] = LUSTRE_ENONET,
- [ENOPKG] = LUSTRE_ENOPKG,
- [EREMOTE] = LUSTRE_EREMOTE,
- [ENOLINK] = LUSTRE_ENOLINK,
- [EADV] = LUSTRE_EADV,
- [ESRMNT] = LUSTRE_ESRMNT,
- [ECOMM] = LUSTRE_ECOMM,
- [EPROTO] = LUSTRE_EPROTO,
- [EMULTIHOP] = LUSTRE_EMULTIHOP,
- [EDOTDOT] = LUSTRE_EDOTDOT,
- [EBADMSG] = LUSTRE_EBADMSG,
- [EOVERFLOW] = LUSTRE_EOVERFLOW,
- [ENOTUNIQ] = LUSTRE_ENOTUNIQ,
- [EBADFD] = LUSTRE_EBADFD,
- [EREMCHG] = LUSTRE_EREMCHG,
- [ELIBACC] = LUSTRE_ELIBACC,
- [ELIBBAD] = LUSTRE_ELIBBAD,
- [ELIBSCN] = LUSTRE_ELIBSCN,
- [ELIBMAX] = LUSTRE_ELIBMAX,
- [ELIBEXEC] = LUSTRE_ELIBEXEC,
- [EILSEQ] = LUSTRE_EILSEQ,
- [ERESTART] = LUSTRE_ERESTART,
- [ESTRPIPE] = LUSTRE_ESTRPIPE,
- [EUSERS] = LUSTRE_EUSERS,
- [ENOTSOCK] = LUSTRE_ENOTSOCK,
- [EDESTADDRREQ] = LUSTRE_EDESTADDRREQ,
- [EMSGSIZE] = LUSTRE_EMSGSIZE,
- [EPROTOTYPE] = LUSTRE_EPROTOTYPE,
- [ENOPROTOOPT] = LUSTRE_ENOPROTOOPT,
- [EPROTONOSUPPORT] = LUSTRE_EPROTONOSUPPORT,
- [ESOCKTNOSUPPORT] = LUSTRE_ESOCKTNOSUPPORT,
- [EOPNOTSUPP] = LUSTRE_EOPNOTSUPP,
- [EPFNOSUPPORT] = LUSTRE_EPFNOSUPPORT,
- [EAFNOSUPPORT] = LUSTRE_EAFNOSUPPORT,
- [EADDRINUSE] = LUSTRE_EADDRINUSE,
- [EADDRNOTAVAIL] = LUSTRE_EADDRNOTAVAIL,
- [ENETDOWN] = LUSTRE_ENETDOWN,
- [ENETUNREACH] = LUSTRE_ENETUNREACH,
- [ENETRESET] = LUSTRE_ENETRESET,
- [ECONNABORTED] = LUSTRE_ECONNABORTED,
- [ECONNRESET] = LUSTRE_ECONNRESET,
- [ENOBUFS] = LUSTRE_ENOBUFS,
- [EISCONN] = LUSTRE_EISCONN,
- [ENOTCONN] = LUSTRE_ENOTCONN,
- [ESHUTDOWN] = LUSTRE_ESHUTDOWN,
- [ETOOMANYREFS] = LUSTRE_ETOOMANYREFS,
- [ETIMEDOUT] = LUSTRE_ETIMEDOUT,
- [ECONNREFUSED] = LUSTRE_ECONNREFUSED,
- [EHOSTDOWN] = LUSTRE_EHOSTDOWN,
- [EHOSTUNREACH] = LUSTRE_EHOSTUNREACH,
- [EALREADY] = LUSTRE_EALREADY,
- [EINPROGRESS] = LUSTRE_EINPROGRESS,
- [ESTALE] = LUSTRE_ESTALE,
- [EUCLEAN] = LUSTRE_EUCLEAN,
- [ENOTNAM] = LUSTRE_ENOTNAM,
- [ENAVAIL] = LUSTRE_ENAVAIL,
- [EISNAM] = LUSTRE_EISNAM,
- [EREMOTEIO] = LUSTRE_EREMOTEIO,
- [EDQUOT] = LUSTRE_EDQUOT,
- [ENOMEDIUM] = LUSTRE_ENOMEDIUM,
- [EMEDIUMTYPE] = LUSTRE_EMEDIUMTYPE,
- [ECANCELED] = LUSTRE_ECANCELED,
- [ENOKEY] = LUSTRE_ENOKEY,
- [EKEYEXPIRED] = LUSTRE_EKEYEXPIRED,
- [EKEYREVOKED] = LUSTRE_EKEYREVOKED,
- [EKEYREJECTED] = LUSTRE_EKEYREJECTED,
- [EOWNERDEAD] = LUSTRE_EOWNERDEAD,
- [ENOTRECOVERABLE] = LUSTRE_ENOTRECOVERABLE,
- [ERESTARTSYS] = LUSTRE_ERESTARTSYS,
- [ERESTARTNOINTR] = LUSTRE_ERESTARTNOINTR,
- [ERESTARTNOHAND] = LUSTRE_ERESTARTNOHAND,
- [ENOIOCTLCMD] = LUSTRE_ENOIOCTLCMD,
- [ERESTART_RESTARTBLOCK] = LUSTRE_ERESTART_RESTARTBLOCK,
- [EBADHANDLE] = LUSTRE_EBADHANDLE,
- [ENOTSYNC] = LUSTRE_ENOTSYNC,
- [EBADCOOKIE] = LUSTRE_EBADCOOKIE,
- [ENOTSUPP] = LUSTRE_ENOTSUPP,
- [ETOOSMALL] = LUSTRE_ETOOSMALL,
- [ESERVERFAULT] = LUSTRE_ESERVERFAULT,
- [EBADTYPE] = LUSTRE_EBADTYPE,
- [EJUKEBOX] = LUSTRE_EJUKEBOX,
- [EIOCBQUEUED] = LUSTRE_EIOCBQUEUED,
-};
-
-static int lustre_errno_ntoh_mapping[] = {
- [LUSTRE_EPERM] = EPERM,
- [LUSTRE_ENOENT] = ENOENT,
- [LUSTRE_ESRCH] = ESRCH,
- [LUSTRE_EINTR] = EINTR,
- [LUSTRE_EIO] = EIO,
- [LUSTRE_ENXIO] = ENXIO,
- [LUSTRE_E2BIG] = E2BIG,
- [LUSTRE_ENOEXEC] = ENOEXEC,
- [LUSTRE_EBADF] = EBADF,
- [LUSTRE_ECHILD] = ECHILD,
- [LUSTRE_EAGAIN] = EAGAIN,
- [LUSTRE_ENOMEM] = ENOMEM,
- [LUSTRE_EACCES] = EACCES,
- [LUSTRE_EFAULT] = EFAULT,
- [LUSTRE_ENOTBLK] = ENOTBLK,
- [LUSTRE_EBUSY] = EBUSY,
- [LUSTRE_EEXIST] = EEXIST,
- [LUSTRE_EXDEV] = EXDEV,
- [LUSTRE_ENODEV] = ENODEV,
- [LUSTRE_ENOTDIR] = ENOTDIR,
- [LUSTRE_EISDIR] = EISDIR,
- [LUSTRE_EINVAL] = EINVAL,
- [LUSTRE_ENFILE] = ENFILE,
- [LUSTRE_EMFILE] = EMFILE,
- [LUSTRE_ENOTTY] = ENOTTY,
- [LUSTRE_ETXTBSY] = ETXTBSY,
- [LUSTRE_EFBIG] = EFBIG,
- [LUSTRE_ENOSPC] = ENOSPC,
- [LUSTRE_ESPIPE] = ESPIPE,
- [LUSTRE_EROFS] = EROFS,
- [LUSTRE_EMLINK] = EMLINK,
- [LUSTRE_EPIPE] = EPIPE,
- [LUSTRE_EDOM] = EDOM,
- [LUSTRE_ERANGE] = ERANGE,
- [LUSTRE_EDEADLK] = EDEADLK,
- [LUSTRE_ENAMETOOLONG] = ENAMETOOLONG,
- [LUSTRE_ENOLCK] = ENOLCK,
- [LUSTRE_ENOSYS] = ENOSYS,
- [LUSTRE_ENOTEMPTY] = ENOTEMPTY,
- [LUSTRE_ELOOP] = ELOOP,
- [LUSTRE_ENOMSG] = ENOMSG,
- [LUSTRE_EIDRM] = EIDRM,
- [LUSTRE_ECHRNG] = ECHRNG,
- [LUSTRE_EL2NSYNC] = EL2NSYNC,
- [LUSTRE_EL3HLT] = EL3HLT,
- [LUSTRE_EL3RST] = EL3RST,
- [LUSTRE_ELNRNG] = ELNRNG,
- [LUSTRE_EUNATCH] = EUNATCH,
- [LUSTRE_ENOCSI] = ENOCSI,
- [LUSTRE_EL2HLT] = EL2HLT,
- [LUSTRE_EBADE] = EBADE,
- [LUSTRE_EBADR] = EBADR,
- [LUSTRE_EXFULL] = EXFULL,
- [LUSTRE_ENOANO] = ENOANO,
- [LUSTRE_EBADRQC] = EBADRQC,
- [LUSTRE_EBADSLT] = EBADSLT,
- [LUSTRE_EBFONT] = EBFONT,
- [LUSTRE_ENOSTR] = ENOSTR,
- [LUSTRE_ENODATA] = ENODATA,
- [LUSTRE_ETIME] = ETIME,
- [LUSTRE_ENOSR] = ENOSR,
- [LUSTRE_ENONET] = ENONET,
- [LUSTRE_ENOPKG] = ENOPKG,
- [LUSTRE_EREMOTE] = EREMOTE,
- [LUSTRE_ENOLINK] = ENOLINK,
- [LUSTRE_EADV] = EADV,
- [LUSTRE_ESRMNT] = ESRMNT,
- [LUSTRE_ECOMM] = ECOMM,
- [LUSTRE_EPROTO] = EPROTO,
- [LUSTRE_EMULTIHOP] = EMULTIHOP,
- [LUSTRE_EDOTDOT] = EDOTDOT,
- [LUSTRE_EBADMSG] = EBADMSG,
- [LUSTRE_EOVERFLOW] = EOVERFLOW,
- [LUSTRE_ENOTUNIQ] = ENOTUNIQ,
- [LUSTRE_EBADFD] = EBADFD,
- [LUSTRE_EREMCHG] = EREMCHG,
- [LUSTRE_ELIBACC] = ELIBACC,
- [LUSTRE_ELIBBAD] = ELIBBAD,
- [LUSTRE_ELIBSCN] = ELIBSCN,
- [LUSTRE_ELIBMAX] = ELIBMAX,
- [LUSTRE_ELIBEXEC] = ELIBEXEC,
- [LUSTRE_EILSEQ] = EILSEQ,
- [LUSTRE_ERESTART] = ERESTART,
- [LUSTRE_ESTRPIPE] = ESTRPIPE,
- [LUSTRE_EUSERS] = EUSERS,
- [LUSTRE_ENOTSOCK] = ENOTSOCK,
- [LUSTRE_EDESTADDRREQ] = EDESTADDRREQ,
- [LUSTRE_EMSGSIZE] = EMSGSIZE,
- [LUSTRE_EPROTOTYPE] = EPROTOTYPE,
- [LUSTRE_ENOPROTOOPT] = ENOPROTOOPT,
- [LUSTRE_EPROTONOSUPPORT] = EPROTONOSUPPORT,
- [LUSTRE_ESOCKTNOSUPPORT] = ESOCKTNOSUPPORT,
- [LUSTRE_EOPNOTSUPP] = EOPNOTSUPP,
- [LUSTRE_EPFNOSUPPORT] = EPFNOSUPPORT,
- [LUSTRE_EAFNOSUPPORT] = EAFNOSUPPORT,
- [LUSTRE_EADDRINUSE] = EADDRINUSE,
- [LUSTRE_EADDRNOTAVAIL] = EADDRNOTAVAIL,
- [LUSTRE_ENETDOWN] = ENETDOWN,
- [LUSTRE_ENETUNREACH] = ENETUNREACH,
- [LUSTRE_ENETRESET] = ENETRESET,
- [LUSTRE_ECONNABORTED] = ECONNABORTED,
- [LUSTRE_ECONNRESET] = ECONNRESET,
- [LUSTRE_ENOBUFS] = ENOBUFS,
- [LUSTRE_EISCONN] = EISCONN,
- [LUSTRE_ENOTCONN] = ENOTCONN,
- [LUSTRE_ESHUTDOWN] = ESHUTDOWN,
- [LUSTRE_ETOOMANYREFS] = ETOOMANYREFS,
- [LUSTRE_ETIMEDOUT] = ETIMEDOUT,
- [LUSTRE_ECONNREFUSED] = ECONNREFUSED,
- [LUSTRE_EHOSTDOWN] = EHOSTDOWN,
- [LUSTRE_EHOSTUNREACH] = EHOSTUNREACH,
- [LUSTRE_EALREADY] = EALREADY,
- [LUSTRE_EINPROGRESS] = EINPROGRESS,
- [LUSTRE_ESTALE] = ESTALE,
- [LUSTRE_EUCLEAN] = EUCLEAN,
- [LUSTRE_ENOTNAM] = ENOTNAM,
- [LUSTRE_ENAVAIL] = ENAVAIL,
- [LUSTRE_EISNAM] = EISNAM,
- [LUSTRE_EREMOTEIO] = EREMOTEIO,
- [LUSTRE_EDQUOT] = EDQUOT,
- [LUSTRE_ENOMEDIUM] = ENOMEDIUM,
- [LUSTRE_EMEDIUMTYPE] = EMEDIUMTYPE,
- [LUSTRE_ECANCELED] = ECANCELED,
- [LUSTRE_ENOKEY] = ENOKEY,
- [LUSTRE_EKEYEXPIRED] = EKEYEXPIRED,
- [LUSTRE_EKEYREVOKED] = EKEYREVOKED,
- [LUSTRE_EKEYREJECTED] = EKEYREJECTED,
- [LUSTRE_EOWNERDEAD] = EOWNERDEAD,
- [LUSTRE_ENOTRECOVERABLE] = ENOTRECOVERABLE,
- [LUSTRE_ERESTARTSYS] = ERESTARTSYS,
- [LUSTRE_ERESTARTNOINTR] = ERESTARTNOINTR,
- [LUSTRE_ERESTARTNOHAND] = ERESTARTNOHAND,
- [LUSTRE_ENOIOCTLCMD] = ENOIOCTLCMD,
- [LUSTRE_ERESTART_RESTARTBLOCK] = ERESTART_RESTARTBLOCK,
- [LUSTRE_EBADHANDLE] = EBADHANDLE,
- [LUSTRE_ENOTSYNC] = ENOTSYNC,
- [LUSTRE_EBADCOOKIE] = EBADCOOKIE,
- [LUSTRE_ENOTSUPP] = ENOTSUPP,
- [LUSTRE_ETOOSMALL] = ETOOSMALL,
- [LUSTRE_ESERVERFAULT] = ESERVERFAULT,
- [LUSTRE_EBADTYPE] = EBADTYPE,
- [LUSTRE_EJUKEBOX] = EJUKEBOX,
- [LUSTRE_EIOCBQUEUED] = EIOCBQUEUED,
-};
-
-unsigned int lustre_errno_hton(unsigned int h)
-{
- unsigned int n;
-
- if (h == 0) {
- n = 0;
- } else if (h < ARRAY_SIZE(lustre_errno_hton_mapping)) {
- n = lustre_errno_hton_mapping[h];
- if (n == 0)
- goto generic;
- } else {
-generic:
- /*
- * A generic errno is better than the unknown one that could
- * mean anything to a different host.
- */
- n = LUSTRE_EIO;
- }
-
- return n;
-}
-EXPORT_SYMBOL(lustre_errno_hton);
-
-unsigned int lustre_errno_ntoh(unsigned int n)
-{
- unsigned int h;
-
- if (n == 0) {
- h = 0;
- } else if (n < ARRAY_SIZE(lustre_errno_ntoh_mapping)) {
- h = lustre_errno_ntoh_mapping[n];
- if (h == 0)
- goto generic;
- } else {
-generic:
- /*
- * Similar to the situation in lustre_errno_hton(), an unknown
- * network errno could coincide with anything. Hence, it is
- * better to return a generic errno.
- */
- h = EIO;
- }
-
- return h;
-}
-EXPORT_SYMBOL(lustre_errno_ntoh);
diff --git a/drivers/staging/lustre/lustre/ptlrpc/events.c b/drivers/staging/lustre/lustre/ptlrpc/events.c
deleted file mode 100644
index afd869b205a5..000000000000
--- a/drivers/staging/lustre/lustre/ptlrpc/events.c
+++ /dev/null
@@ -1,585 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- */
-
-#define DEBUG_SUBSYSTEM S_RPC
-
-#include "../../include/linux/libcfs/libcfs.h"
-# ifdef __mips64__
-# include <linux/kernel.h>
-# endif
-
-#include "../include/obd_class.h"
-#include "../include/lustre_net.h"
-#include "../include/lustre_sec.h"
-#include "ptlrpc_internal.h"
-
-lnet_handle_eq_t ptlrpc_eq_h;
-
-/*
- * Client's outgoing request callback
- */
-void request_out_callback(lnet_event_t *ev)
-{
- struct ptlrpc_cb_id *cbid = ev->md.user_ptr;
- struct ptlrpc_request *req = cbid->cbid_arg;
-
- LASSERT(ev->type == LNET_EVENT_SEND ||
- ev->type == LNET_EVENT_UNLINK);
- LASSERT(ev->unlinked);
-
- DEBUG_REQ(D_NET, req, "type %d, status %d", ev->type, ev->status);
-
- sptlrpc_request_out_callback(req);
- spin_lock(&req->rq_lock);
- req->rq_real_sent = ktime_get_real_seconds();
- if (ev->unlinked)
- req->rq_req_unlink = 0;
-
- if (ev->type == LNET_EVENT_UNLINK || ev->status != 0) {
-
- /* Failed send: make it seem like the reply timed out, just
- * like failing sends in client.c does currently... */
-
- req->rq_net_err = 1;
- ptlrpc_client_wake_req(req);
- }
- spin_unlock(&req->rq_lock);
-
- ptlrpc_req_finished(req);
-}
-
-/*
- * Client's incoming reply callback
- */
-void reply_in_callback(lnet_event_t *ev)
-{
- struct ptlrpc_cb_id *cbid = ev->md.user_ptr;
- struct ptlrpc_request *req = cbid->cbid_arg;
-
- DEBUG_REQ(D_NET, req, "type %d, status %d", ev->type, ev->status);
-
- LASSERT(ev->type == LNET_EVENT_PUT || ev->type == LNET_EVENT_UNLINK);
- LASSERT(ev->md.start == req->rq_repbuf);
- LASSERT(ev->offset + ev->mlength <= req->rq_repbuf_len);
- /* We've set LNET_MD_MANAGE_REMOTE for all outgoing requests
- for adaptive timeouts' early reply. */
- LASSERT((ev->md.options & LNET_MD_MANAGE_REMOTE) != 0);
-
- spin_lock(&req->rq_lock);
-
- req->rq_receiving_reply = 0;
- req->rq_early = 0;
- if (ev->unlinked)
- req->rq_reply_unlink = 0;
-
- if (ev->status)
- goto out_wake;
-
- if (ev->type == LNET_EVENT_UNLINK) {
- LASSERT(ev->unlinked);
- DEBUG_REQ(D_NET, req, "unlink");
- goto out_wake;
- }
-
- if (ev->mlength < ev->rlength) {
- CDEBUG(D_RPCTRACE, "truncate req %p rpc %d - %d+%d\n", req,
- req->rq_replen, ev->rlength, ev->offset);
- req->rq_reply_truncate = 1;
- req->rq_replied = 1;
- req->rq_status = -EOVERFLOW;
- req->rq_nob_received = ev->rlength + ev->offset;
- goto out_wake;
- }
-
- if ((ev->offset == 0) &&
- ((lustre_msghdr_get_flags(req->rq_reqmsg) & MSGHDR_AT_SUPPORT))) {
- /* Early reply */
- DEBUG_REQ(D_ADAPTTO, req,
- "Early reply received: mlen=%u offset=%d replen=%d replied=%d unlinked=%d",
- ev->mlength, ev->offset,
- req->rq_replen, req->rq_replied, ev->unlinked);
-
- req->rq_early_count++; /* number received, client side */
-
- if (req->rq_replied) /* already got the real reply */
- goto out_wake;
-
- req->rq_early = 1;
- req->rq_reply_off = ev->offset;
- req->rq_nob_received = ev->mlength;
- /* And we're still receiving */
- req->rq_receiving_reply = 1;
- } else {
- /* Real reply */
- req->rq_rep_swab_mask = 0;
- req->rq_replied = 1;
- /* Got reply, no resend required */
- req->rq_resend = 0;
- req->rq_reply_off = ev->offset;
- req->rq_nob_received = ev->mlength;
- /* LNetMDUnlink can't be called under the LNET_LOCK,
- so we must unlink in ptlrpc_unregister_reply */
- DEBUG_REQ(D_INFO, req,
- "reply in flags=%x mlen=%u offset=%d replen=%d",
- lustre_msg_get_flags(req->rq_reqmsg),
- ev->mlength, ev->offset, req->rq_replen);
- }
-
- req->rq_import->imp_last_reply_time = ktime_get_real_seconds();
-
-out_wake:
- /* NB don't unlock till after wakeup; req can disappear under us
- * since we don't have our own ref */
- ptlrpc_client_wake_req(req);
- spin_unlock(&req->rq_lock);
-}
-
-/*
- * Client's bulk has been written/read
- */
-void client_bulk_callback(lnet_event_t *ev)
-{
- struct ptlrpc_cb_id *cbid = ev->md.user_ptr;
- struct ptlrpc_bulk_desc *desc = cbid->cbid_arg;
- struct ptlrpc_request *req;
-
- LASSERT((desc->bd_type == BULK_PUT_SINK &&
- ev->type == LNET_EVENT_PUT) ||
- (desc->bd_type == BULK_GET_SOURCE &&
- ev->type == LNET_EVENT_GET) ||
- ev->type == LNET_EVENT_UNLINK);
- LASSERT(ev->unlinked);
-
- if (CFS_FAIL_CHECK_ORSET(OBD_FAIL_PTLRPC_CLIENT_BULK_CB, CFS_FAIL_ONCE))
- ev->status = -EIO;
-
- if (CFS_FAIL_CHECK_ORSET(OBD_FAIL_PTLRPC_CLIENT_BULK_CB2,
- CFS_FAIL_ONCE))
- ev->status = -EIO;
-
- CDEBUG((ev->status == 0) ? D_NET : D_ERROR,
- "event type %d, status %d, desc %p\n",
- ev->type, ev->status, desc);
-
- spin_lock(&desc->bd_lock);
- req = desc->bd_req;
- LASSERT(desc->bd_md_count > 0);
- desc->bd_md_count--;
-
- if (ev->type != LNET_EVENT_UNLINK && ev->status == 0) {
- desc->bd_nob_transferred += ev->mlength;
- desc->bd_sender = ev->sender;
- } else {
- /* start reconnect and resend if network error hit */
- spin_lock(&req->rq_lock);
- req->rq_net_err = 1;
- spin_unlock(&req->rq_lock);
- }
-
- if (ev->status != 0)
- desc->bd_failure = 1;
-
- /* NB don't unlock till after wakeup; desc can disappear under us
- * otherwise */
- if (desc->bd_md_count == 0)
- ptlrpc_client_wake_req(desc->bd_req);
-
- spin_unlock(&desc->bd_lock);
-}
-
-/*
- * We will have percpt request history list for ptlrpc service in upcoming
- * patches because we don't want to be serialized by current per-service
- * history operations. So we require history ID can (somehow) show arriving
- * order w/o grabbing global lock, and user can sort them in userspace.
- *
- * This is how we generate history ID for ptlrpc_request:
- * ----------------------------------------------------
- * | 32 bits | 16 bits | (16 - X)bits | X bits |
- * ----------------------------------------------------
- * | seconds | usec / 16 | sequence | CPT id |
- * ----------------------------------------------------
- *
- * it might not be precise but should be good enough.
- */
-
-#define REQS_CPT_BITS(svcpt) ((svcpt)->scp_service->srv_cpt_bits)
-
-#define REQS_SEC_SHIFT 32
-#define REQS_USEC_SHIFT 16
-#define REQS_SEQ_SHIFT(svcpt) REQS_CPT_BITS(svcpt)
-
-static void ptlrpc_req_add_history(struct ptlrpc_service_part *svcpt,
- struct ptlrpc_request *req)
-{
- __u64 sec = req->rq_arrival_time.tv_sec;
- __u32 usec = req->rq_arrival_time.tv_nsec / NSEC_PER_USEC / 16; /* usec / 16 */
- __u64 new_seq;
-
- /* set sequence ID for request and add it to history list,
- * it must be called with hold svcpt::scp_lock */
-
- new_seq = (sec << REQS_SEC_SHIFT) |
- (usec << REQS_USEC_SHIFT) |
- (svcpt->scp_cpt < 0 ? 0 : svcpt->scp_cpt);
-
- if (new_seq > svcpt->scp_hist_seq) {
- /* This handles the initial case of scp_hist_seq == 0 or
- * we just jumped into a new time window */
- svcpt->scp_hist_seq = new_seq;
- } else {
- LASSERT(REQS_SEQ_SHIFT(svcpt) < REQS_USEC_SHIFT);
- /* NB: increase sequence number in current usec bucket,
- * however, it's possible that we used up all bits for
- * sequence and jumped into the next usec bucket (future time),
- * then we hope there will be less RPCs per bucket at some
- * point, and sequence will catch up again */
- svcpt->scp_hist_seq += (1U << REQS_SEQ_SHIFT(svcpt));
- new_seq = svcpt->scp_hist_seq;
- }
-
- req->rq_history_seq = new_seq;
-
- list_add_tail(&req->rq_history_list, &svcpt->scp_hist_reqs);
-}
-
-/*
- * Server's incoming request callback
- */
-void request_in_callback(lnet_event_t *ev)
-{
- struct ptlrpc_cb_id *cbid = ev->md.user_ptr;
- struct ptlrpc_request_buffer_desc *rqbd = cbid->cbid_arg;
- struct ptlrpc_service_part *svcpt = rqbd->rqbd_svcpt;
- struct ptlrpc_service *service = svcpt->scp_service;
- struct ptlrpc_request *req;
-
- LASSERT(ev->type == LNET_EVENT_PUT ||
- ev->type == LNET_EVENT_UNLINK);
- LASSERT((char *)ev->md.start >= rqbd->rqbd_buffer);
- LASSERT((char *)ev->md.start + ev->offset + ev->mlength <=
- rqbd->rqbd_buffer + service->srv_buf_size);
-
- CDEBUG((ev->status == 0) ? D_NET : D_ERROR,
- "event type %d, status %d, service %s\n",
- ev->type, ev->status, service->srv_name);
-
- if (ev->unlinked) {
- /* If this is the last request message to fit in the
- * request buffer we can use the request object embedded in
- * rqbd. Note that if we failed to allocate a request,
- * we'd have to re-post the rqbd, which we can't do in this
- * context. */
- req = &rqbd->rqbd_req;
- memset(req, 0, sizeof(*req));
- } else {
- LASSERT(ev->type == LNET_EVENT_PUT);
- if (ev->status != 0) {
- /* We moaned above already... */
- return;
- }
- req = ptlrpc_request_cache_alloc(GFP_ATOMIC);
- if (req == NULL) {
- CERROR("Can't allocate incoming request descriptor: Dropping %s RPC from %s\n",
- service->srv_name,
- libcfs_id2str(ev->initiator));
- return;
- }
- }
-
- /* NB we ABSOLUTELY RELY on req being zeroed, so pointers are NULL,
- * flags are reset and scalars are zero. We only set the message
- * size to non-zero if this was a successful receive. */
- req->rq_xid = ev->match_bits;
- req->rq_reqbuf = ev->md.start + ev->offset;
- if (ev->type == LNET_EVENT_PUT && ev->status == 0)
- req->rq_reqdata_len = ev->mlength;
- ktime_get_real_ts64(&req->rq_arrival_time);
- req->rq_peer = ev->initiator;
- req->rq_self = ev->target.nid;
- req->rq_rqbd = rqbd;
- req->rq_phase = RQ_PHASE_NEW;
- spin_lock_init(&req->rq_lock);
- INIT_LIST_HEAD(&req->rq_timed_list);
- INIT_LIST_HEAD(&req->rq_exp_list);
- atomic_set(&req->rq_refcount, 1);
- if (ev->type == LNET_EVENT_PUT)
- CDEBUG(D_INFO, "incoming req@%p x%llu msgsize %u\n",
- req, req->rq_xid, ev->mlength);
-
- CDEBUG(D_RPCTRACE, "peer: %s\n", libcfs_id2str(req->rq_peer));
-
- spin_lock(&svcpt->scp_lock);
-
- ptlrpc_req_add_history(svcpt, req);
-
- if (ev->unlinked) {
- svcpt->scp_nrqbds_posted--;
- CDEBUG(D_INFO, "Buffer complete: %d buffers still posted\n",
- svcpt->scp_nrqbds_posted);
-
- /* Normally, don't complain about 0 buffers posted; LNET won't
- * drop incoming reqs since we set the portal lazy */
- if (test_req_buffer_pressure &&
- ev->type != LNET_EVENT_UNLINK &&
- svcpt->scp_nrqbds_posted == 0)
- CWARN("All %s request buffers busy\n",
- service->srv_name);
-
- /* req takes over the network's ref on rqbd */
- } else {
- /* req takes a ref on rqbd */
- rqbd->rqbd_refcount++;
- }
-
- list_add_tail(&req->rq_list, &svcpt->scp_req_incoming);
- svcpt->scp_nreqs_incoming++;
-
- /* NB everything can disappear under us once the request
- * has been queued and we unlock, so do the wake now... */
- wake_up(&svcpt->scp_waitq);
-
- spin_unlock(&svcpt->scp_lock);
-}
-
-/*
- * Server's outgoing reply callback
- */
-void reply_out_callback(lnet_event_t *ev)
-{
- struct ptlrpc_cb_id *cbid = ev->md.user_ptr;
- struct ptlrpc_reply_state *rs = cbid->cbid_arg;
- struct ptlrpc_service_part *svcpt = rs->rs_svcpt;
-
- LASSERT(ev->type == LNET_EVENT_SEND ||
- ev->type == LNET_EVENT_ACK ||
- ev->type == LNET_EVENT_UNLINK);
-
- if (!rs->rs_difficult) {
- /* 'Easy' replies have no further processing so I drop the
- * net's ref on 'rs' */
- LASSERT(ev->unlinked);
- ptlrpc_rs_decref(rs);
- return;
- }
-
- LASSERT(rs->rs_on_net);
-
- if (ev->unlinked) {
- /* Last network callback. The net's ref on 'rs' stays put
- * until ptlrpc_handle_rs() is done with it */
- spin_lock(&svcpt->scp_rep_lock);
- spin_lock(&rs->rs_lock);
-
- rs->rs_on_net = 0;
- if (!rs->rs_no_ack ||
- rs->rs_transno <=
- rs->rs_export->exp_obd->obd_last_committed)
- ptlrpc_schedule_difficult_reply(rs);
-
- spin_unlock(&rs->rs_lock);
- spin_unlock(&svcpt->scp_rep_lock);
- }
-}
-
-
-static void ptlrpc_master_callback(lnet_event_t *ev)
-{
- struct ptlrpc_cb_id *cbid = ev->md.user_ptr;
- void (*callback)(lnet_event_t *ev) = cbid->cbid_fn;
-
- /* Honestly, it's best to find out early. */
- LASSERT(cbid->cbid_arg != LP_POISON);
- LASSERT(callback == request_out_callback ||
- callback == reply_in_callback ||
- callback == client_bulk_callback ||
- callback == request_in_callback ||
- callback == reply_out_callback);
-
- callback(ev);
-}
-
-int ptlrpc_uuid_to_peer(struct obd_uuid *uuid,
- lnet_process_id_t *peer, lnet_nid_t *self)
-{
- int best_dist = 0;
- __u32 best_order = 0;
- int count = 0;
- int rc = -ENOENT;
- int portals_compatibility;
- int dist;
- __u32 order;
- lnet_nid_t dst_nid;
- lnet_nid_t src_nid;
-
- portals_compatibility = LNetCtl(IOC_LIBCFS_PORTALS_COMPATIBILITY, NULL);
-
- peer->pid = LUSTRE_SRV_LNET_PID;
-
- /* Choose the matching UUID that's closest */
- while (lustre_uuid_to_peer(uuid->uuid, &dst_nid, count++) == 0) {
- dist = LNetDist(dst_nid, &src_nid, &order);
- if (dist < 0)
- continue;
-
- if (dist == 0) { /* local! use loopback LND */
- peer->nid = *self = LNET_MKNID(LNET_MKNET(LOLND, 0), 0);
- rc = 0;
- break;
- }
-
- if (rc < 0 ||
- dist < best_dist ||
- (dist == best_dist && order < best_order)) {
- best_dist = dist;
- best_order = order;
-
- if (portals_compatibility > 1) {
- /* Strong portals compatibility: Zero the nid's
- * NET, so if I'm reading new config logs, or
- * getting configured by (new) lconf I can
- * still talk to old servers. */
- dst_nid = LNET_MKNID(0, LNET_NIDADDR(dst_nid));
- src_nid = LNET_MKNID(0, LNET_NIDADDR(src_nid));
- }
- peer->nid = dst_nid;
- *self = src_nid;
- rc = 0;
- }
- }
-
- CDEBUG(D_NET, "%s->%s\n", uuid->uuid, libcfs_id2str(*peer));
- return rc;
-}
-
-static void ptlrpc_ni_fini(void)
-{
- wait_queue_head_t waitq;
- struct l_wait_info lwi;
- int rc;
- int retries;
-
- /* Wait for the event queue to become idle since there may still be
- * messages in flight with pending events (i.e. the fire-and-forget
- * messages == client requests and "non-difficult" server
- * replies */
-
- for (retries = 0;; retries++) {
- rc = LNetEQFree(ptlrpc_eq_h);
- switch (rc) {
- default:
- LBUG();
-
- case 0:
- LNetNIFini();
- return;
-
- case -EBUSY:
- if (retries != 0)
- CWARN("Event queue still busy\n");
-
- /* Wait for a bit */
- init_waitqueue_head(&waitq);
- lwi = LWI_TIMEOUT(cfs_time_seconds(2), NULL, NULL);
- l_wait_event(waitq, 0, &lwi);
- break;
- }
- }
- /* notreached */
-}
-
-lnet_pid_t ptl_get_pid(void)
-{
- lnet_pid_t pid;
-
- pid = LUSTRE_SRV_LNET_PID;
- return pid;
-}
-
-static int ptlrpc_ni_init(void)
-{
- int rc;
- lnet_pid_t pid;
-
- pid = ptl_get_pid();
- CDEBUG(D_NET, "My pid is: %x\n", pid);
-
- /* We're not passing any limits yet... */
- rc = LNetNIInit(pid);
- if (rc < 0) {
- CDEBUG(D_NET, "Can't init network interface: %d\n", rc);
- return -ENOENT;
- }
-
- /* CAVEAT EMPTOR: how we process portals events is _radically_
- * different depending on... */
- /* kernel LNet calls our master callback when there are new event,
- * because we are guaranteed to get every event via callback,
- * so we just set EQ size to 0 to avoid overhead of serializing
- * enqueue/dequeue operations in LNet. */
- rc = LNetEQAlloc(0, ptlrpc_master_callback, &ptlrpc_eq_h);
- if (rc == 0)
- return 0;
-
- CERROR("Failed to allocate event queue: %d\n", rc);
- LNetNIFini();
-
- return -ENOMEM;
-}
-
-
-int ptlrpc_init_portals(void)
-{
- int rc = ptlrpc_ni_init();
-
- if (rc != 0) {
- CERROR("network initialisation failed\n");
- return -EIO;
- }
- rc = ptlrpcd_addref();
- if (rc == 0)
- return 0;
-
- CERROR("rpcd initialisation failed\n");
- ptlrpc_ni_fini();
- return rc;
-}
-
-void ptlrpc_exit_portals(void)
-{
- ptlrpcd_decref();
- ptlrpc_ni_fini();
-}
diff --git a/drivers/staging/lustre/lustre/ptlrpc/import.c b/drivers/staging/lustre/lustre/ptlrpc/import.c
deleted file mode 100644
index 1ac265dfce2f..000000000000
--- a/drivers/staging/lustre/lustre/ptlrpc/import.c
+++ /dev/null
@@ -1,1621 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * lustre/ptlrpc/import.c
- *
- * Author: Mike Shaver <shaver@clusterfs.com>
- */
-
-#define DEBUG_SUBSYSTEM S_RPC
-
-#include "../include/obd_support.h"
-#include "../include/lustre_ha.h"
-#include "../include/lustre_net.h"
-#include "../include/lustre_import.h"
-#include "../include/lustre_export.h"
-#include "../include/obd.h"
-#include "../include/obd_cksum.h"
-#include "../include/obd_class.h"
-
-#include "ptlrpc_internal.h"
-
-struct ptlrpc_connect_async_args {
- __u64 pcaa_peer_committed;
- int pcaa_initial_connect;
-};
-
-/**
- * Updates import \a imp current state to provided \a state value
- * Helper function. Must be called under imp_lock.
- */
-static void __import_set_state(struct obd_import *imp,
- enum lustre_imp_state state)
-{
- switch (state) {
- case LUSTRE_IMP_CLOSED:
- case LUSTRE_IMP_NEW:
- case LUSTRE_IMP_DISCON:
- case LUSTRE_IMP_CONNECTING:
- break;
- case LUSTRE_IMP_REPLAY_WAIT:
- imp->imp_replay_state = LUSTRE_IMP_REPLAY_LOCKS;
- break;
- default:
- imp->imp_replay_state = LUSTRE_IMP_REPLAY;
- }
-
- imp->imp_state = state;
- imp->imp_state_hist[imp->imp_state_hist_idx].ish_state = state;
- imp->imp_state_hist[imp->imp_state_hist_idx].ish_time =
- ktime_get_real_seconds();
- imp->imp_state_hist_idx = (imp->imp_state_hist_idx + 1) %
- IMP_STATE_HIST_LEN;
-}
-
-/* A CLOSED import should remain so. */
-#define IMPORT_SET_STATE_NOLOCK(imp, state) \
-do { \
- if (imp->imp_state != LUSTRE_IMP_CLOSED) { \
- CDEBUG(D_HA, "%p %s: changing import state from %s to %s\n", \
- imp, obd2cli_tgt(imp->imp_obd), \
- ptlrpc_import_state_name(imp->imp_state), \
- ptlrpc_import_state_name(state)); \
- __import_set_state(imp, state); \
- } \
-} while (0)
-
-#define IMPORT_SET_STATE(imp, state) \
-do { \
- spin_lock(&imp->imp_lock); \
- IMPORT_SET_STATE_NOLOCK(imp, state); \
- spin_unlock(&imp->imp_lock); \
-} while (0)
-
-
-static int ptlrpc_connect_interpret(const struct lu_env *env,
- struct ptlrpc_request *request,
- void *data, int rc);
-int ptlrpc_import_recovery_state_machine(struct obd_import *imp);
-
-/* Only this function is allowed to change the import state when it is
- * CLOSED. I would rather refcount the import and free it after
- * disconnection like we do with exports. To do that, the client_obd
- * will need to save the peer info somewhere other than in the import,
- * though. */
-int ptlrpc_init_import(struct obd_import *imp)
-{
- spin_lock(&imp->imp_lock);
-
- imp->imp_generation++;
- imp->imp_state = LUSTRE_IMP_NEW;
-
- spin_unlock(&imp->imp_lock);
-
- return 0;
-}
-EXPORT_SYMBOL(ptlrpc_init_import);
-
-#define UUID_STR "_UUID"
-void deuuidify(char *uuid, const char *prefix, char **uuid_start, int *uuid_len)
-{
- *uuid_start = !prefix || strncmp(uuid, prefix, strlen(prefix))
- ? uuid : uuid + strlen(prefix);
-
- *uuid_len = strlen(*uuid_start);
-
- if (*uuid_len < strlen(UUID_STR))
- return;
-
- if (!strncmp(*uuid_start + *uuid_len - strlen(UUID_STR),
- UUID_STR, strlen(UUID_STR)))
- *uuid_len -= strlen(UUID_STR);
-}
-
-/**
- * Returns true if import was FULL, false if import was already not
- * connected.
- * @imp - import to be disconnected
- * @conn_cnt - connection count (epoch) of the request that timed out
- * and caused the disconnection. In some cases, multiple
- * inflight requests can fail to a single target (e.g. OST
- * bulk requests) and if one has already caused a reconnection
- * (increasing the import->conn_cnt) the older failure should
- * not also cause a reconnection. If zero it forces a reconnect.
- */
-int ptlrpc_set_import_discon(struct obd_import *imp, __u32 conn_cnt)
-{
- int rc = 0;
-
- spin_lock(&imp->imp_lock);
-
- if (imp->imp_state == LUSTRE_IMP_FULL &&
- (conn_cnt == 0 || conn_cnt == imp->imp_conn_cnt)) {
- char *target_start;
- int target_len;
-
- deuuidify(obd2cli_tgt(imp->imp_obd), NULL,
- &target_start, &target_len);
-
- if (imp->imp_replayable) {
- LCONSOLE_WARN("%s: Connection to %.*s (at %s) was lost; in progress operations using this service will wait for recovery to complete\n",
- imp->imp_obd->obd_name, target_len, target_start,
- libcfs_nid2str(imp->imp_connection->c_peer.nid));
- } else {
- LCONSOLE_ERROR_MSG(0x166, "%s: Connection to %.*s (at %s) was lost; in progress operations using this service will fail\n",
- imp->imp_obd->obd_name,
- target_len, target_start,
- libcfs_nid2str(imp->imp_connection->c_peer.nid));
- }
- IMPORT_SET_STATE_NOLOCK(imp, LUSTRE_IMP_DISCON);
- spin_unlock(&imp->imp_lock);
-
- if (obd_dump_on_timeout)
- libcfs_debug_dumplog();
-
- obd_import_event(imp->imp_obd, imp, IMP_EVENT_DISCON);
- rc = 1;
- } else {
- spin_unlock(&imp->imp_lock);
- CDEBUG(D_HA, "%s: import %p already %s (conn %u, was %u): %s\n",
- imp->imp_client->cli_name, imp,
- (imp->imp_state == LUSTRE_IMP_FULL &&
- imp->imp_conn_cnt > conn_cnt) ?
- "reconnected" : "not connected", imp->imp_conn_cnt,
- conn_cnt, ptlrpc_import_state_name(imp->imp_state));
- }
-
- return rc;
-}
-
-/*
- * This acts as a barrier; all existing requests are rejected, and
- * no new requests will be accepted until the import is valid again.
- */
-void ptlrpc_deactivate_import(struct obd_import *imp)
-{
- CDEBUG(D_HA, "setting import %s INVALID\n", obd2cli_tgt(imp->imp_obd));
-
- spin_lock(&imp->imp_lock);
- imp->imp_invalid = 1;
- imp->imp_generation++;
- spin_unlock(&imp->imp_lock);
-
- ptlrpc_abort_inflight(imp);
- obd_import_event(imp->imp_obd, imp, IMP_EVENT_INACTIVE);
-}
-EXPORT_SYMBOL(ptlrpc_deactivate_import);
-
-static unsigned int
-ptlrpc_inflight_deadline(struct ptlrpc_request *req, time64_t now)
-{
- long dl;
-
- if (!(((req->rq_phase == RQ_PHASE_RPC) && !req->rq_waiting) ||
- (req->rq_phase == RQ_PHASE_BULK) ||
- (req->rq_phase == RQ_PHASE_NEW)))
- return 0;
-
- if (req->rq_timedout)
- return 0;
-
- if (req->rq_phase == RQ_PHASE_NEW)
- dl = req->rq_sent;
- else
- dl = req->rq_deadline;
-
- if (dl <= now)
- return 0;
-
- return dl - now;
-}
-
-static unsigned int ptlrpc_inflight_timeout(struct obd_import *imp)
-{
- time64_t now = ktime_get_real_seconds();
- struct list_head *tmp, *n;
- struct ptlrpc_request *req;
- unsigned int timeout = 0;
-
- spin_lock(&imp->imp_lock);
- list_for_each_safe(tmp, n, &imp->imp_sending_list) {
- req = list_entry(tmp, struct ptlrpc_request, rq_list);
- timeout = max(ptlrpc_inflight_deadline(req, now), timeout);
- }
- spin_unlock(&imp->imp_lock);
- return timeout;
-}
-
-/**
- * This function will invalidate the import, if necessary, then block
- * for all the RPC completions, and finally notify the obd to
- * invalidate its state (ie cancel locks, clear pending requests,
- * etc).
- */
-void ptlrpc_invalidate_import(struct obd_import *imp)
-{
- struct list_head *tmp, *n;
- struct ptlrpc_request *req;
- struct l_wait_info lwi;
- unsigned int timeout;
- int rc;
-
- atomic_inc(&imp->imp_inval_count);
-
- if (!imp->imp_invalid || imp->imp_obd->obd_no_recov)
- ptlrpc_deactivate_import(imp);
-
- CFS_FAIL_TIMEOUT(OBD_FAIL_MGS_CONNECT_NET, 3 * cfs_fail_val / 2);
- LASSERT(imp->imp_invalid);
-
- /* Wait forever until inflight == 0. We really can't do it another
- * way because in some cases we need to wait for very long reply
- * unlink. We can't do anything before that because there is really
- * no guarantee that some rdma transfer is not in progress right now. */
- do {
- /* Calculate max timeout for waiting on rpcs to error
- * out. Use obd_timeout if calculated value is smaller
- * than it. */
- if (!OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REPL_UNLINK)) {
- timeout = ptlrpc_inflight_timeout(imp);
- timeout += timeout / 3;
-
- if (timeout == 0)
- timeout = obd_timeout;
- } else {
- /* decrease the interval to increase race condition */
- timeout = 1;
- }
-
- CDEBUG(D_RPCTRACE,
- "Sleeping %d sec for inflight to error out\n",
- timeout);
-
- /* Wait for all requests to error out and call completion
- * callbacks. Cap it at obd_timeout -- these should all
- * have been locally cancelled by ptlrpc_abort_inflight. */
- lwi = LWI_TIMEOUT_INTERVAL(
- cfs_timeout_cap(cfs_time_seconds(timeout)),
- (timeout > 1)?cfs_time_seconds(1):cfs_time_seconds(1)/2,
- NULL, NULL);
- rc = l_wait_event(imp->imp_recovery_waitq,
- (atomic_read(&imp->imp_inflight) == 0),
- &lwi);
- if (rc) {
- const char *cli_tgt = obd2cli_tgt(imp->imp_obd);
-
- CERROR("%s: rc = %d waiting for callback (%d != 0)\n",
- cli_tgt, rc,
- atomic_read(&imp->imp_inflight));
-
- spin_lock(&imp->imp_lock);
- if (atomic_read(&imp->imp_inflight) == 0) {
- int count = atomic_read(&imp->imp_unregistering);
-
- /* We know that "unregistering" rpcs only can
- * survive in sending or delaying lists (they
- * maybe waiting for long reply unlink in
- * sluggish nets). Let's check this. If there
- * is no inflight and unregistering != 0, this
- * is bug. */
- LASSERTF(count == 0, "Some RPCs are still unregistering: %d\n",
- count);
-
- /* Let's save one loop as soon as inflight have
- * dropped to zero. No new inflights possible at
- * this point. */
- rc = 0;
- } else {
- list_for_each_safe(tmp, n,
- &imp->imp_sending_list) {
- req = list_entry(tmp,
- struct ptlrpc_request,
- rq_list);
- DEBUG_REQ(D_ERROR, req,
- "still on sending list");
- }
- list_for_each_safe(tmp, n,
- &imp->imp_delayed_list) {
- req = list_entry(tmp,
- struct ptlrpc_request,
- rq_list);
- DEBUG_REQ(D_ERROR, req,
- "still on delayed list");
- }
-
- CERROR("%s: RPCs in \"%s\" phase found (%d). Network is sluggish? Waiting them to error out.\n",
- cli_tgt,
- ptlrpc_phase2str(RQ_PHASE_UNREGISTERING),
- atomic_read(&imp->
- imp_unregistering));
- }
- spin_unlock(&imp->imp_lock);
- }
- } while (rc != 0);
-
- /*
- * Let's additionally check that no new rpcs added to import in
- * "invalidate" state.
- */
- LASSERT(atomic_read(&imp->imp_inflight) == 0);
- obd_import_event(imp->imp_obd, imp, IMP_EVENT_INVALIDATE);
- sptlrpc_import_flush_all_ctx(imp);
-
- atomic_dec(&imp->imp_inval_count);
- wake_up_all(&imp->imp_recovery_waitq);
-}
-EXPORT_SYMBOL(ptlrpc_invalidate_import);
-
-/* unset imp_invalid */
-void ptlrpc_activate_import(struct obd_import *imp)
-{
- struct obd_device *obd = imp->imp_obd;
-
- spin_lock(&imp->imp_lock);
- if (imp->imp_deactive != 0) {
- spin_unlock(&imp->imp_lock);
- return;
- }
-
- imp->imp_invalid = 0;
- spin_unlock(&imp->imp_lock);
- obd_import_event(obd, imp, IMP_EVENT_ACTIVE);
-}
-EXPORT_SYMBOL(ptlrpc_activate_import);
-
-static void ptlrpc_pinger_force(struct obd_import *imp)
-{
- CDEBUG(D_HA, "%s: waking up pinger s:%s\n", obd2cli_tgt(imp->imp_obd),
- ptlrpc_import_state_name(imp->imp_state));
-
- spin_lock(&imp->imp_lock);
- imp->imp_force_verify = 1;
- spin_unlock(&imp->imp_lock);
-
- if (imp->imp_state != LUSTRE_IMP_CONNECTING)
- ptlrpc_pinger_wake_up();
-}
-
-void ptlrpc_fail_import(struct obd_import *imp, __u32 conn_cnt)
-{
- LASSERT(!imp->imp_dlm_fake);
-
- if (ptlrpc_set_import_discon(imp, conn_cnt)) {
- if (!imp->imp_replayable) {
- CDEBUG(D_HA, "import %s@%s for %s not replayable, auto-deactivating\n",
- obd2cli_tgt(imp->imp_obd),
- imp->imp_connection->c_remote_uuid.uuid,
- imp->imp_obd->obd_name);
- ptlrpc_deactivate_import(imp);
- }
-
- ptlrpc_pinger_force(imp);
- }
-}
-EXPORT_SYMBOL(ptlrpc_fail_import);
-
-int ptlrpc_reconnect_import(struct obd_import *imp)
-{
-#ifdef ENABLE_PINGER
- struct l_wait_info lwi;
- int secs = cfs_time_seconds(obd_timeout);
- int rc;
-
- ptlrpc_pinger_force(imp);
-
- CDEBUG(D_HA, "%s: recovery started, waiting %u seconds\n",
- obd2cli_tgt(imp->imp_obd), secs);
-
- lwi = LWI_TIMEOUT(secs, NULL, NULL);
- rc = l_wait_event(imp->imp_recovery_waitq,
- !ptlrpc_import_in_recovery(imp), &lwi);
- CDEBUG(D_HA, "%s: recovery finished s:%s\n", obd2cli_tgt(imp->imp_obd),
- ptlrpc_import_state_name(imp->imp_state));
- return rc;
-#else
- ptlrpc_set_import_discon(imp, 0);
- /* Force a new connect attempt */
- ptlrpc_invalidate_import(imp);
- /* Do a fresh connect next time by zeroing the handle */
- ptlrpc_disconnect_import(imp, 1);
- /* Wait for all invalidate calls to finish */
- if (atomic_read(&imp->imp_inval_count) > 0) {
- int rc;
- struct l_wait_info lwi = LWI_INTR(LWI_ON_SIGNAL_NOOP, NULL);
- rc = l_wait_event(imp->imp_recovery_waitq,
- (atomic_read(&imp->imp_inval_count) == 0),
- &lwi);
- if (rc)
- CERROR("Interrupted, inval=%d\n",
- atomic_read(&imp->imp_inval_count));
- }
-
- /* Allow reconnect attempts */
- imp->imp_obd->obd_no_recov = 0;
- /* Remove 'invalid' flag */
- ptlrpc_activate_import(imp);
- /* Attempt a new connect */
- ptlrpc_recover_import(imp, NULL, 0);
- return 0;
-#endif
-}
-EXPORT_SYMBOL(ptlrpc_reconnect_import);
-
-/**
- * Connection on import \a imp is changed to another one (if more than one is
- * present). We typically chose connection that we have not tried to connect to
- * the longest
- */
-static int import_select_connection(struct obd_import *imp)
-{
- struct obd_import_conn *imp_conn = NULL, *conn;
- struct obd_export *dlmexp;
- char *target_start;
- int target_len, tried_all = 1;
-
- spin_lock(&imp->imp_lock);
-
- if (list_empty(&imp->imp_conn_list)) {
- CERROR("%s: no connections available\n",
- imp->imp_obd->obd_name);
- spin_unlock(&imp->imp_lock);
- return -EINVAL;
- }
-
- list_for_each_entry(conn, &imp->imp_conn_list, oic_item) {
- CDEBUG(D_HA, "%s: connect to NID %s last attempt %llu\n",
- imp->imp_obd->obd_name,
- libcfs_nid2str(conn->oic_conn->c_peer.nid),
- conn->oic_last_attempt);
-
- /* If we have not tried this connection since
- the last successful attempt, go with this one */
- if ((conn->oic_last_attempt == 0) ||
- cfs_time_beforeq_64(conn->oic_last_attempt,
- imp->imp_last_success_conn)) {
- imp_conn = conn;
- tried_all = 0;
- break;
- }
-
- /* If all of the connections have already been tried
- since the last successful connection; just choose the
- least recently used */
- if (!imp_conn)
- imp_conn = conn;
- else if (cfs_time_before_64(conn->oic_last_attempt,
- imp_conn->oic_last_attempt))
- imp_conn = conn;
- }
-
- /* if not found, simply choose the current one */
- if (!imp_conn || imp->imp_force_reconnect) {
- LASSERT(imp->imp_conn_current);
- imp_conn = imp->imp_conn_current;
- tried_all = 0;
- }
- LASSERT(imp_conn->oic_conn);
-
- /* If we've tried everything, and we're back to the beginning of the
- list, increase our timeout and try again. It will be reset when
- we do finally connect. (FIXME: really we should wait for all network
- state associated with the last connection attempt to drain before
- trying to reconnect on it.) */
- if (tried_all && (imp->imp_conn_list.next == &imp_conn->oic_item)) {
- struct adaptive_timeout *at = &imp->imp_at.iat_net_latency;
- if (at_get(at) < CONNECTION_SWITCH_MAX) {
- at_measured(at, at_get(at) + CONNECTION_SWITCH_INC);
- if (at_get(at) > CONNECTION_SWITCH_MAX)
- at_reset(at, CONNECTION_SWITCH_MAX);
- }
- LASSERT(imp_conn->oic_last_attempt);
- CDEBUG(D_HA, "%s: tried all connections, increasing latency to %ds\n",
- imp->imp_obd->obd_name, at_get(at));
- }
-
- imp_conn->oic_last_attempt = cfs_time_current_64();
-
- /* switch connection, don't mind if it's same as the current one */
- ptlrpc_connection_put(imp->imp_connection);
- imp->imp_connection = ptlrpc_connection_addref(imp_conn->oic_conn);
-
- dlmexp = class_conn2export(&imp->imp_dlm_handle);
- LASSERT(dlmexp != NULL);
- ptlrpc_connection_put(dlmexp->exp_connection);
- dlmexp->exp_connection = ptlrpc_connection_addref(imp_conn->oic_conn);
- class_export_put(dlmexp);
-
- if (imp->imp_conn_current != imp_conn) {
- if (imp->imp_conn_current) {
- deuuidify(obd2cli_tgt(imp->imp_obd), NULL,
- &target_start, &target_len);
-
- CDEBUG(D_HA, "%s: Connection changing to %.*s (at %s)\n",
- imp->imp_obd->obd_name,
- target_len, target_start,
- libcfs_nid2str(imp_conn->oic_conn->c_peer.nid));
- }
-
- imp->imp_conn_current = imp_conn;
- }
-
- CDEBUG(D_HA, "%s: import %p using connection %s/%s\n",
- imp->imp_obd->obd_name, imp, imp_conn->oic_uuid.uuid,
- libcfs_nid2str(imp_conn->oic_conn->c_peer.nid));
-
- spin_unlock(&imp->imp_lock);
-
- return 0;
-}
-
-/*
- * must be called under imp_lock
- */
-static int ptlrpc_first_transno(struct obd_import *imp, __u64 *transno)
-{
- struct ptlrpc_request *req;
- struct list_head *tmp;
-
- /* The requests in committed_list always have smaller transnos than
- * the requests in replay_list */
- if (!list_empty(&imp->imp_committed_list)) {
- tmp = imp->imp_committed_list.next;
- req = list_entry(tmp, struct ptlrpc_request, rq_replay_list);
- *transno = req->rq_transno;
- if (req->rq_transno == 0) {
- DEBUG_REQ(D_ERROR, req,
- "zero transno in committed_list");
- LBUG();
- }
- return 1;
- }
- if (!list_empty(&imp->imp_replay_list)) {
- tmp = imp->imp_replay_list.next;
- req = list_entry(tmp, struct ptlrpc_request, rq_replay_list);
- *transno = req->rq_transno;
- if (req->rq_transno == 0) {
- DEBUG_REQ(D_ERROR, req, "zero transno in replay_list");
- LBUG();
- }
- return 1;
- }
- return 0;
-}
-
-/**
- * Attempt to (re)connect import \a imp. This includes all preparations,
- * initializing CONNECT RPC request and passing it to ptlrpcd for
- * actual sending.
- * Returns 0 on success or error code.
- */
-int ptlrpc_connect_import(struct obd_import *imp)
-{
- struct obd_device *obd = imp->imp_obd;
- int initial_connect = 0;
- int set_transno = 0;
- __u64 committed_before_reconnect = 0;
- struct ptlrpc_request *request;
- char *bufs[] = { NULL,
- obd2cli_tgt(imp->imp_obd),
- obd->obd_uuid.uuid,
- (char *)&imp->imp_dlm_handle,
- (char *)&imp->imp_connect_data };
- struct ptlrpc_connect_async_args *aa;
- int rc;
-
- spin_lock(&imp->imp_lock);
- if (imp->imp_state == LUSTRE_IMP_CLOSED) {
- spin_unlock(&imp->imp_lock);
- CERROR("can't connect to a closed import\n");
- return -EINVAL;
- } else if (imp->imp_state == LUSTRE_IMP_FULL) {
- spin_unlock(&imp->imp_lock);
- CERROR("already connected\n");
- return 0;
- } else if (imp->imp_state == LUSTRE_IMP_CONNECTING) {
- spin_unlock(&imp->imp_lock);
- CERROR("already connecting\n");
- return -EALREADY;
- }
-
- IMPORT_SET_STATE_NOLOCK(imp, LUSTRE_IMP_CONNECTING);
-
- imp->imp_conn_cnt++;
- imp->imp_resend_replay = 0;
-
- if (!lustre_handle_is_used(&imp->imp_remote_handle))
- initial_connect = 1;
- else
- committed_before_reconnect = imp->imp_peer_committed_transno;
-
- set_transno = ptlrpc_first_transno(imp,
- &imp->imp_connect_data.ocd_transno);
- spin_unlock(&imp->imp_lock);
-
- rc = import_select_connection(imp);
- if (rc)
- goto out;
-
- rc = sptlrpc_import_sec_adapt(imp, NULL, NULL);
- if (rc)
- goto out;
-
- /* Reset connect flags to the originally requested flags, in case
- * the server is updated on-the-fly we will get the new features. */
- imp->imp_connect_data.ocd_connect_flags = imp->imp_connect_flags_orig;
- /* Reset ocd_version each time so the server knows the exact versions */
- imp->imp_connect_data.ocd_version = LUSTRE_VERSION_CODE;
- imp->imp_msghdr_flags &= ~MSGHDR_AT_SUPPORT;
- imp->imp_msghdr_flags &= ~MSGHDR_CKSUM_INCOMPAT18;
-
- rc = obd_reconnect(NULL, imp->imp_obd->obd_self_export, obd,
- &obd->obd_uuid, &imp->imp_connect_data, NULL);
- if (rc)
- goto out;
-
- request = ptlrpc_request_alloc(imp, &RQF_MDS_CONNECT);
- if (request == NULL) {
- rc = -ENOMEM;
- goto out;
- }
-
- rc = ptlrpc_request_bufs_pack(request, LUSTRE_OBD_VERSION,
- imp->imp_connect_op, bufs, NULL);
- if (rc) {
- ptlrpc_request_free(request);
- goto out;
- }
-
- /* Report the rpc service time to the server so that it knows how long
- * to wait for clients to join recovery */
- lustre_msg_set_service_time(request->rq_reqmsg,
- at_timeout2est(request->rq_timeout));
-
- /* The amount of time we give the server to process the connect req.
- * import_select_connection will increase the net latency on
- * repeated reconnect attempts to cover slow networks.
- * We override/ignore the server rpc completion estimate here,
- * which may be large if this is a reconnect attempt */
- request->rq_timeout = INITIAL_CONNECT_TIMEOUT;
- lustre_msg_set_timeout(request->rq_reqmsg, request->rq_timeout);
-
- lustre_msg_add_op_flags(request->rq_reqmsg, MSG_CONNECT_NEXT_VER);
-
- request->rq_no_resend = request->rq_no_delay = 1;
- request->rq_send_state = LUSTRE_IMP_CONNECTING;
- /* Allow a slightly larger reply for future growth compatibility */
- req_capsule_set_size(&request->rq_pill, &RMF_CONNECT_DATA, RCL_SERVER,
- sizeof(struct obd_connect_data)+16*sizeof(__u64));
- ptlrpc_request_set_replen(request);
- request->rq_interpret_reply = ptlrpc_connect_interpret;
-
- CLASSERT(sizeof(*aa) <= sizeof(request->rq_async_args));
- aa = ptlrpc_req_async_args(request);
- memset(aa, 0, sizeof(*aa));
-
- aa->pcaa_peer_committed = committed_before_reconnect;
- aa->pcaa_initial_connect = initial_connect;
-
- if (aa->pcaa_initial_connect) {
- spin_lock(&imp->imp_lock);
- imp->imp_replayable = 1;
- spin_unlock(&imp->imp_lock);
- lustre_msg_add_op_flags(request->rq_reqmsg,
- MSG_CONNECT_INITIAL);
- }
-
- if (set_transno)
- lustre_msg_add_op_flags(request->rq_reqmsg,
- MSG_CONNECT_TRANSNO);
-
- DEBUG_REQ(D_RPCTRACE, request, "(re)connect request (timeout %d)",
- request->rq_timeout);
- ptlrpcd_add_req(request);
- rc = 0;
-out:
- if (rc != 0)
- IMPORT_SET_STATE(imp, LUSTRE_IMP_DISCON);
-
- return rc;
-}
-EXPORT_SYMBOL(ptlrpc_connect_import);
-
-static void ptlrpc_maybe_ping_import_soon(struct obd_import *imp)
-{
- int force_verify;
-
- spin_lock(&imp->imp_lock);
- force_verify = imp->imp_force_verify != 0;
- spin_unlock(&imp->imp_lock);
-
- if (force_verify)
- ptlrpc_pinger_wake_up();
-}
-
-static int ptlrpc_busy_reconnect(int rc)
-{
- return (rc == -EBUSY) || (rc == -EAGAIN);
-}
-
-/**
- * interpret_reply callback for connect RPCs.
- * Looks into returned status of connect operation and decides
- * what to do with the import - i.e enter recovery, promote it to
- * full state for normal operations of disconnect it due to an error.
- */
-static int ptlrpc_connect_interpret(const struct lu_env *env,
- struct ptlrpc_request *request,
- void *data, int rc)
-{
- struct ptlrpc_connect_async_args *aa = data;
- struct obd_import *imp = request->rq_import;
- struct client_obd *cli = &imp->imp_obd->u.cli;
- struct lustre_handle old_hdl;
- __u64 old_connect_flags;
- int msg_flags;
- struct obd_connect_data *ocd;
- struct obd_export *exp;
- int ret;
-
- spin_lock(&imp->imp_lock);
- if (imp->imp_state == LUSTRE_IMP_CLOSED) {
- imp->imp_connect_tried = 1;
- spin_unlock(&imp->imp_lock);
- return 0;
- }
-
- if (rc) {
- /* if this reconnect to busy export - not need select new target
- * for connecting*/
- imp->imp_force_reconnect = ptlrpc_busy_reconnect(rc);
- spin_unlock(&imp->imp_lock);
- ptlrpc_maybe_ping_import_soon(imp);
- goto out;
- }
- spin_unlock(&imp->imp_lock);
-
- LASSERT(imp->imp_conn_current);
-
- msg_flags = lustre_msg_get_op_flags(request->rq_repmsg);
-
- ret = req_capsule_get_size(&request->rq_pill, &RMF_CONNECT_DATA,
- RCL_SERVER);
- /* server replied obd_connect_data is always bigger */
- ocd = req_capsule_server_sized_get(&request->rq_pill,
- &RMF_CONNECT_DATA, ret);
-
- if (ocd == NULL) {
- CERROR("%s: no connect data from server\n",
- imp->imp_obd->obd_name);
- rc = -EPROTO;
- goto out;
- }
-
- spin_lock(&imp->imp_lock);
-
- /* All imports are pingable */
- imp->imp_pingable = 1;
- imp->imp_force_reconnect = 0;
- imp->imp_force_verify = 0;
-
- imp->imp_connect_data = *ocd;
-
- CDEBUG(D_HA, "%s: connect to target with instance %u\n",
- imp->imp_obd->obd_name, ocd->ocd_instance);
- exp = class_conn2export(&imp->imp_dlm_handle);
-
- spin_unlock(&imp->imp_lock);
-
- /* check that server granted subset of flags we asked for. */
- if ((ocd->ocd_connect_flags & imp->imp_connect_flags_orig) !=
- ocd->ocd_connect_flags) {
- CERROR("%s: Server didn't granted asked subset of flags: asked=%#llx grranted=%#llx\n",
- imp->imp_obd->obd_name, imp->imp_connect_flags_orig,
- ocd->ocd_connect_flags);
- rc = -EPROTO;
- goto out;
- }
-
- if (!exp) {
- /* This could happen if export is cleaned during the
- connect attempt */
- CERROR("%s: missing export after connect\n",
- imp->imp_obd->obd_name);
- rc = -ENODEV;
- goto out;
- }
- old_connect_flags = exp_connect_flags(exp);
- exp->exp_connect_data = *ocd;
- imp->imp_obd->obd_self_export->exp_connect_data = *ocd;
- class_export_put(exp);
-
- obd_import_event(imp->imp_obd, imp, IMP_EVENT_OCD);
-
- if (aa->pcaa_initial_connect) {
- spin_lock(&imp->imp_lock);
- if (msg_flags & MSG_CONNECT_REPLAYABLE) {
- imp->imp_replayable = 1;
- spin_unlock(&imp->imp_lock);
- CDEBUG(D_HA, "connected to replayable target: %s\n",
- obd2cli_tgt(imp->imp_obd));
- } else {
- imp->imp_replayable = 0;
- spin_unlock(&imp->imp_lock);
- }
-
- /* if applies, adjust the imp->imp_msg_magic here
- * according to reply flags */
-
- imp->imp_remote_handle =
- *lustre_msg_get_handle(request->rq_repmsg);
-
- /* Initial connects are allowed for clients with non-random
- * uuids when servers are in recovery. Simply signal the
- * servers replay is complete and wait in REPLAY_WAIT. */
- if (msg_flags & MSG_CONNECT_RECOVERING) {
- CDEBUG(D_HA, "connect to %s during recovery\n",
- obd2cli_tgt(imp->imp_obd));
- IMPORT_SET_STATE(imp, LUSTRE_IMP_REPLAY_LOCKS);
- } else {
- IMPORT_SET_STATE(imp, LUSTRE_IMP_FULL);
- ptlrpc_activate_import(imp);
- }
-
- rc = 0;
- goto finish;
- }
-
- /* Determine what recovery state to move the import to. */
- if (msg_flags & MSG_CONNECT_RECONNECT) {
- memset(&old_hdl, 0, sizeof(old_hdl));
- if (!memcmp(&old_hdl, lustre_msg_get_handle(request->rq_repmsg),
- sizeof(old_hdl))) {
- LCONSOLE_WARN("Reconnect to %s (at @%s) failed due bad handle %#llx\n",
- obd2cli_tgt(imp->imp_obd),
- imp->imp_connection->c_remote_uuid.uuid,
- imp->imp_dlm_handle.cookie);
- rc = -ENOTCONN;
- goto out;
- }
-
- if (memcmp(&imp->imp_remote_handle,
- lustre_msg_get_handle(request->rq_repmsg),
- sizeof(imp->imp_remote_handle))) {
- int level = msg_flags & MSG_CONNECT_RECOVERING ?
- D_HA : D_WARNING;
-
- /* Bug 16611/14775: if server handle have changed,
- * that means some sort of disconnection happened.
- * If the server is not in recovery, that also means it
- * already erased all of our state because of previous
- * eviction. If it is in recovery - we are safe to
- * participate since we can reestablish all of our state
- * with server again */
- if ((msg_flags & MSG_CONNECT_RECOVERING)) {
- CDEBUG(level, "%s@%s changed server handle from %#llx to %#llx but is still in recovery\n",
- obd2cli_tgt(imp->imp_obd),
- imp->imp_connection->c_remote_uuid.uuid,
- imp->imp_remote_handle.cookie,
- lustre_msg_get_handle(
- request->rq_repmsg)->cookie);
- } else {
- LCONSOLE_WARN("Evicted from %s (at %s) after server handle changed from %#llx to %#llx\n",
- obd2cli_tgt(imp->imp_obd),
- imp->imp_connection-> \
- c_remote_uuid.uuid,
- imp->imp_remote_handle.cookie,
- lustre_msg_get_handle(
- request->rq_repmsg)->cookie);
- }
-
-
- imp->imp_remote_handle =
- *lustre_msg_get_handle(request->rq_repmsg);
-
- if (!(msg_flags & MSG_CONNECT_RECOVERING)) {
- IMPORT_SET_STATE(imp, LUSTRE_IMP_EVICTED);
- rc = 0;
- goto finish;
- }
-
- } else {
- CDEBUG(D_HA, "reconnected to %s@%s after partition\n",
- obd2cli_tgt(imp->imp_obd),
- imp->imp_connection->c_remote_uuid.uuid);
- }
-
- if (imp->imp_invalid) {
- CDEBUG(D_HA, "%s: reconnected but import is invalid; marking evicted\n",
- imp->imp_obd->obd_name);
- IMPORT_SET_STATE(imp, LUSTRE_IMP_EVICTED);
- } else if (msg_flags & MSG_CONNECT_RECOVERING) {
- CDEBUG(D_HA, "%s: reconnected to %s during replay\n",
- imp->imp_obd->obd_name,
- obd2cli_tgt(imp->imp_obd));
-
- spin_lock(&imp->imp_lock);
- imp->imp_resend_replay = 1;
- spin_unlock(&imp->imp_lock);
-
- IMPORT_SET_STATE(imp, imp->imp_replay_state);
- } else {
- IMPORT_SET_STATE(imp, LUSTRE_IMP_RECOVER);
- }
- } else if ((msg_flags & MSG_CONNECT_RECOVERING) && !imp->imp_invalid) {
- LASSERT(imp->imp_replayable);
- imp->imp_remote_handle =
- *lustre_msg_get_handle(request->rq_repmsg);
- imp->imp_last_replay_transno = 0;
- IMPORT_SET_STATE(imp, LUSTRE_IMP_REPLAY);
- } else {
- DEBUG_REQ(D_HA, request, "%s: evicting (reconnect/recover flags not set: %x)",
- imp->imp_obd->obd_name, msg_flags);
- imp->imp_remote_handle =
- *lustre_msg_get_handle(request->rq_repmsg);
- IMPORT_SET_STATE(imp, LUSTRE_IMP_EVICTED);
- }
-
- /* Sanity checks for a reconnected import. */
- if (!(imp->imp_replayable) != !(msg_flags & MSG_CONNECT_REPLAYABLE)) {
- CERROR("imp_replayable flag does not match server after reconnect. We should LBUG right here.\n");
- }
-
- if (lustre_msg_get_last_committed(request->rq_repmsg) > 0 &&
- lustre_msg_get_last_committed(request->rq_repmsg) <
- aa->pcaa_peer_committed) {
- CERROR("%s went back in time (transno %lld was previously committed, server now claims %lld)! See https://bugzilla.lustre.org/show_bug.cgi?id=9646\n",
- obd2cli_tgt(imp->imp_obd), aa->pcaa_peer_committed,
- lustre_msg_get_last_committed(request->rq_repmsg));
- }
-
-finish:
- rc = ptlrpc_import_recovery_state_machine(imp);
- if (rc != 0) {
- if (rc == -ENOTCONN) {
- CDEBUG(D_HA, "evicted/aborted by %s@%s during recovery; invalidating and reconnecting\n",
- obd2cli_tgt(imp->imp_obd),
- imp->imp_connection->c_remote_uuid.uuid);
- ptlrpc_connect_import(imp);
- imp->imp_connect_tried = 1;
- return 0;
- }
- } else {
-
- spin_lock(&imp->imp_lock);
- list_del(&imp->imp_conn_current->oic_item);
- list_add(&imp->imp_conn_current->oic_item,
- &imp->imp_conn_list);
- imp->imp_last_success_conn =
- imp->imp_conn_current->oic_last_attempt;
-
- spin_unlock(&imp->imp_lock);
-
- if ((imp->imp_connect_flags_orig & OBD_CONNECT_IBITS) &&
- !(ocd->ocd_connect_flags & OBD_CONNECT_IBITS)) {
- LCONSOLE_WARN("%s: MDS %s does not support ibits lock, either very old or invalid: requested %llx, replied %llx\n",
- imp->imp_obd->obd_name,
- imp->imp_connection->c_remote_uuid.uuid,
- imp->imp_connect_flags_orig,
- ocd->ocd_connect_flags);
- rc = -EPROTO;
- goto out;
- }
-
- if ((ocd->ocd_connect_flags & OBD_CONNECT_VERSION) &&
- (ocd->ocd_version > LUSTRE_VERSION_CODE +
- LUSTRE_VERSION_OFFSET_WARN ||
- ocd->ocd_version < LUSTRE_VERSION_CODE -
- LUSTRE_VERSION_OFFSET_WARN)) {
- /* Sigh, some compilers do not like #ifdef in the middle
- of macro arguments */
- const char *older = "older. Consider upgrading server or downgrading client"
- ;
- const char *newer = "newer than client version. Consider upgrading client"
- ;
-
- LCONSOLE_WARN("Server %s version (%d.%d.%d.%d) is much %s (%s)\n",
- obd2cli_tgt(imp->imp_obd),
- OBD_OCD_VERSION_MAJOR(ocd->ocd_version),
- OBD_OCD_VERSION_MINOR(ocd->ocd_version),
- OBD_OCD_VERSION_PATCH(ocd->ocd_version),
- OBD_OCD_VERSION_FIX(ocd->ocd_version),
- ocd->ocd_version > LUSTRE_VERSION_CODE ?
- newer : older, LUSTRE_VERSION_STRING);
- }
-
-#if LUSTRE_VERSION_CODE < OBD_OCD_VERSION(3, 2, 50, 0)
- /* Check if server has LU-1252 fix applied to not always swab
- * the IR MNE entries. Do this only once per connection. This
- * fixup is version-limited, because we don't want to carry the
- * OBD_CONNECT_MNE_SWAB flag around forever, just so long as we
- * need interop with unpatched 2.2 servers. For newer servers,
- * the client will do MNE swabbing only as needed. LU-1644 */
- if (unlikely((ocd->ocd_connect_flags & OBD_CONNECT_VERSION) &&
- !(ocd->ocd_connect_flags & OBD_CONNECT_MNE_SWAB) &&
- OBD_OCD_VERSION_MAJOR(ocd->ocd_version) == 2 &&
- OBD_OCD_VERSION_MINOR(ocd->ocd_version) == 2 &&
- OBD_OCD_VERSION_PATCH(ocd->ocd_version) < 55 &&
- strcmp(imp->imp_obd->obd_type->typ_name,
- LUSTRE_MGC_NAME) == 0))
- imp->imp_need_mne_swab = 1;
- else /* clear if server was upgraded since last connect */
- imp->imp_need_mne_swab = 0;
-#else
-#warning "LU-1644: Remove old OBD_CONNECT_MNE_SWAB fixup and imp_need_mne_swab"
-#endif
-
- if (ocd->ocd_connect_flags & OBD_CONNECT_CKSUM) {
- /* We sent to the server ocd_cksum_types with bits set
- * for algorithms we understand. The server masked off
- * the checksum types it doesn't support */
- if ((ocd->ocd_cksum_types &
- cksum_types_supported_client()) == 0) {
- LCONSOLE_WARN("The negotiation of the checksum algorithm to use with server %s failed (%x/%x), disabling checksums\n",
- obd2cli_tgt(imp->imp_obd),
- ocd->ocd_cksum_types,
- cksum_types_supported_client());
- cli->cl_checksum = 0;
- cli->cl_supp_cksum_types = OBD_CKSUM_ADLER;
- } else {
- cli->cl_supp_cksum_types = ocd->ocd_cksum_types;
- }
- } else {
- /* The server does not support OBD_CONNECT_CKSUM.
- * Enforce ADLER for backward compatibility*/
- cli->cl_supp_cksum_types = OBD_CKSUM_ADLER;
- }
- cli->cl_cksum_type = cksum_type_select(cli->cl_supp_cksum_types);
-
- if (ocd->ocd_connect_flags & OBD_CONNECT_BRW_SIZE)
- cli->cl_max_pages_per_rpc =
- min(ocd->ocd_brw_size >> PAGE_CACHE_SHIFT,
- cli->cl_max_pages_per_rpc);
- else if (imp->imp_connect_op == MDS_CONNECT ||
- imp->imp_connect_op == MGS_CONNECT)
- cli->cl_max_pages_per_rpc = 1;
-
- /* Reset ns_connect_flags only for initial connect. It might be
- * changed in while using FS and if we reset it in reconnect
- * this leads to losing user settings done before such as
- * disable lru_resize, etc. */
- if (old_connect_flags != exp_connect_flags(exp) ||
- aa->pcaa_initial_connect) {
- CDEBUG(D_HA, "%s: Resetting ns_connect_flags to server flags: %#llx\n",
- imp->imp_obd->obd_name, ocd->ocd_connect_flags);
- imp->imp_obd->obd_namespace->ns_connect_flags =
- ocd->ocd_connect_flags;
- imp->imp_obd->obd_namespace->ns_orig_connect_flags =
- ocd->ocd_connect_flags;
- }
-
- if ((ocd->ocd_connect_flags & OBD_CONNECT_AT) &&
- (imp->imp_msg_magic == LUSTRE_MSG_MAGIC_V2))
- /* We need a per-message support flag, because
- a. we don't know if the incoming connect reply
- supports AT or not (in reply_in_callback)
- until we unpack it.
- b. failovered server means export and flags are gone
- (in ptlrpc_send_reply).
- Can only be set when we know AT is supported at
- both ends */
- imp->imp_msghdr_flags |= MSGHDR_AT_SUPPORT;
- else
- imp->imp_msghdr_flags &= ~MSGHDR_AT_SUPPORT;
-
- if ((ocd->ocd_connect_flags & OBD_CONNECT_FULL20) &&
- (imp->imp_msg_magic == LUSTRE_MSG_MAGIC_V2))
- imp->imp_msghdr_flags |= MSGHDR_CKSUM_INCOMPAT18;
- else
- imp->imp_msghdr_flags &= ~MSGHDR_CKSUM_INCOMPAT18;
-
- LASSERT((cli->cl_max_pages_per_rpc <= PTLRPC_MAX_BRW_PAGES) &&
- (cli->cl_max_pages_per_rpc > 0));
- }
-
-out:
- imp->imp_connect_tried = 1;
-
- if (rc != 0) {
- IMPORT_SET_STATE(imp, LUSTRE_IMP_DISCON);
- if (rc == -EACCES) {
- /*
- * Give up trying to reconnect
- * EACCES means client has no permission for connection
- */
- imp->imp_obd->obd_no_recov = 1;
- ptlrpc_deactivate_import(imp);
- }
-
- if (rc == -EPROTO) {
- struct obd_connect_data *ocd;
-
- /* reply message might not be ready */
- if (request->rq_repmsg == NULL)
- return -EPROTO;
-
- ocd = req_capsule_server_get(&request->rq_pill,
- &RMF_CONNECT_DATA);
- if (ocd &&
- (ocd->ocd_connect_flags & OBD_CONNECT_VERSION) &&
- (ocd->ocd_version != LUSTRE_VERSION_CODE)) {
- /*
- * Actually servers are only supposed to refuse
- * connection from liblustre clients, so we
- * should never see this from VFS context
- */
- LCONSOLE_ERROR_MSG(0x16a, "Server %s version (%d.%d.%d.%d) refused connection from this client with an incompatible version (%s). Client must be recompiled\n",
- obd2cli_tgt(imp->imp_obd),
- OBD_OCD_VERSION_MAJOR(ocd->ocd_version),
- OBD_OCD_VERSION_MINOR(ocd->ocd_version),
- OBD_OCD_VERSION_PATCH(ocd->ocd_version),
- OBD_OCD_VERSION_FIX(ocd->ocd_version),
- LUSTRE_VERSION_STRING);
- ptlrpc_deactivate_import(imp);
- IMPORT_SET_STATE(imp, LUSTRE_IMP_CLOSED);
- }
- return -EPROTO;
- }
-
- ptlrpc_maybe_ping_import_soon(imp);
-
- CDEBUG(D_HA, "recovery of %s on %s failed (%d)\n",
- obd2cli_tgt(imp->imp_obd),
- (char *)imp->imp_connection->c_remote_uuid.uuid, rc);
- }
-
- wake_up_all(&imp->imp_recovery_waitq);
- return rc;
-}
-
-/**
- * interpret callback for "completed replay" RPCs.
- * \see signal_completed_replay
- */
-static int completed_replay_interpret(const struct lu_env *env,
- struct ptlrpc_request *req,
- void *data, int rc)
-{
- atomic_dec(&req->rq_import->imp_replay_inflight);
- if (req->rq_status == 0 &&
- !req->rq_import->imp_vbr_failed) {
- ptlrpc_import_recovery_state_machine(req->rq_import);
- } else {
- if (req->rq_import->imp_vbr_failed) {
- CDEBUG(D_WARNING,
- "%s: version recovery fails, reconnecting\n",
- req->rq_import->imp_obd->obd_name);
- } else {
- CDEBUG(D_HA, "%s: LAST_REPLAY message error: %d, reconnecting\n",
- req->rq_import->imp_obd->obd_name,
- req->rq_status);
- }
- ptlrpc_connect_import(req->rq_import);
- }
-
- return 0;
-}
-
-/**
- * Let server know that we have no requests to replay anymore.
- * Achieved by just sending a PING request
- */
-static int signal_completed_replay(struct obd_import *imp)
-{
- struct ptlrpc_request *req;
-
- if (unlikely(OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_FINISH_REPLAY)))
- return 0;
-
- LASSERT(atomic_read(&imp->imp_replay_inflight) == 0);
- atomic_inc(&imp->imp_replay_inflight);
-
- req = ptlrpc_request_alloc_pack(imp, &RQF_OBD_PING, LUSTRE_OBD_VERSION,
- OBD_PING);
- if (req == NULL) {
- atomic_dec(&imp->imp_replay_inflight);
- return -ENOMEM;
- }
-
- ptlrpc_request_set_replen(req);
- req->rq_send_state = LUSTRE_IMP_REPLAY_WAIT;
- lustre_msg_add_flags(req->rq_reqmsg,
- MSG_LOCK_REPLAY_DONE | MSG_REQ_REPLAY_DONE);
- if (AT_OFF)
- req->rq_timeout *= 3;
- req->rq_interpret_reply = completed_replay_interpret;
-
- ptlrpcd_add_req(req);
- return 0;
-}
-
-/**
- * In kernel code all import invalidation happens in its own
- * separate thread, so that whatever application happened to encounter
- * a problem could still be killed or otherwise continue
- */
-static int ptlrpc_invalidate_import_thread(void *data)
-{
- struct obd_import *imp = data;
-
- unshare_fs_struct();
-
- CDEBUG(D_HA, "thread invalidate import %s to %s@%s\n",
- imp->imp_obd->obd_name, obd2cli_tgt(imp->imp_obd),
- imp->imp_connection->c_remote_uuid.uuid);
-
- ptlrpc_invalidate_import(imp);
-
- if (obd_dump_on_eviction) {
- CERROR("dump the log upon eviction\n");
- libcfs_debug_dumplog();
- }
-
- IMPORT_SET_STATE(imp, LUSTRE_IMP_RECOVER);
- ptlrpc_import_recovery_state_machine(imp);
-
- class_import_put(imp);
- return 0;
-}
-
-/**
- * This is the state machine for client-side recovery on import.
- *
- * Typically we have two possibly paths. If we came to server and it is not
- * in recovery, we just enter IMP_EVICTED state, invalidate our import
- * state and reconnect from scratch.
- * If we came to server that is in recovery, we enter IMP_REPLAY import state.
- * We go through our list of requests to replay and send them to server one by
- * one.
- * After sending all request from the list we change import state to
- * IMP_REPLAY_LOCKS and re-request all the locks we believe we have from server
- * and also all the locks we don't yet have and wait for server to grant us.
- * After that we send a special "replay completed" request and change import
- * state to IMP_REPLAY_WAIT.
- * Upon receiving reply to that "replay completed" RPC we enter IMP_RECOVER
- * state and resend all requests from sending list.
- * After that we promote import to FULL state and send all delayed requests
- * and import is fully operational after that.
- *
- */
-int ptlrpc_import_recovery_state_machine(struct obd_import *imp)
-{
- int rc = 0;
- int inflight;
- char *target_start;
- int target_len;
-
- if (imp->imp_state == LUSTRE_IMP_EVICTED) {
- deuuidify(obd2cli_tgt(imp->imp_obd), NULL,
- &target_start, &target_len);
- /* Don't care about MGC eviction */
- if (strcmp(imp->imp_obd->obd_type->typ_name,
- LUSTRE_MGC_NAME) != 0) {
- LCONSOLE_ERROR_MSG(0x167, "%s: This client was evicted by %.*s; in progress operations using this service will fail.\n",
- imp->imp_obd->obd_name, target_len,
- target_start);
- }
- CDEBUG(D_HA, "evicted from %s@%s; invalidating\n",
- obd2cli_tgt(imp->imp_obd),
- imp->imp_connection->c_remote_uuid.uuid);
- /* reset vbr_failed flag upon eviction */
- spin_lock(&imp->imp_lock);
- imp->imp_vbr_failed = 0;
- spin_unlock(&imp->imp_lock);
-
- {
- struct task_struct *task;
- /* bug 17802: XXX client_disconnect_export vs connect request
- * race. if client will evicted at this time, we start
- * invalidate thread without reference to import and import can
- * be freed at same time. */
- class_import_get(imp);
- task = kthread_run(ptlrpc_invalidate_import_thread, imp,
- "ll_imp_inval");
- if (IS_ERR(task)) {
- class_import_put(imp);
- CERROR("error starting invalidate thread: %d\n", rc);
- rc = PTR_ERR(task);
- } else {
- rc = 0;
- }
- return rc;
- }
- }
-
- if (imp->imp_state == LUSTRE_IMP_REPLAY) {
- CDEBUG(D_HA, "replay requested by %s\n",
- obd2cli_tgt(imp->imp_obd));
- rc = ptlrpc_replay_next(imp, &inflight);
- if (inflight == 0 &&
- atomic_read(&imp->imp_replay_inflight) == 0) {
- IMPORT_SET_STATE(imp, LUSTRE_IMP_REPLAY_LOCKS);
- rc = ldlm_replay_locks(imp);
- if (rc)
- goto out;
- }
- rc = 0;
- }
-
- if (imp->imp_state == LUSTRE_IMP_REPLAY_LOCKS) {
- if (atomic_read(&imp->imp_replay_inflight) == 0) {
- IMPORT_SET_STATE(imp, LUSTRE_IMP_REPLAY_WAIT);
- rc = signal_completed_replay(imp);
- if (rc)
- goto out;
- }
-
- }
-
- if (imp->imp_state == LUSTRE_IMP_REPLAY_WAIT) {
- if (atomic_read(&imp->imp_replay_inflight) == 0) {
- IMPORT_SET_STATE(imp, LUSTRE_IMP_RECOVER);
- }
- }
-
- if (imp->imp_state == LUSTRE_IMP_RECOVER) {
- CDEBUG(D_HA, "reconnected to %s@%s\n",
- obd2cli_tgt(imp->imp_obd),
- imp->imp_connection->c_remote_uuid.uuid);
-
- rc = ptlrpc_resend(imp);
- if (rc)
- goto out;
- IMPORT_SET_STATE(imp, LUSTRE_IMP_FULL);
- ptlrpc_activate_import(imp);
-
- deuuidify(obd2cli_tgt(imp->imp_obd), NULL,
- &target_start, &target_len);
- LCONSOLE_INFO("%s: Connection restored to %.*s (at %s)\n",
- imp->imp_obd->obd_name,
- target_len, target_start,
- libcfs_nid2str(imp->imp_connection->c_peer.nid));
- }
-
- if (imp->imp_state == LUSTRE_IMP_FULL) {
- wake_up_all(&imp->imp_recovery_waitq);
- ptlrpc_wake_delayed(imp);
- }
-
-out:
- return rc;
-}
-
-int ptlrpc_disconnect_import(struct obd_import *imp, int noclose)
-{
- struct ptlrpc_request *req;
- int rq_opc, rc = 0;
-
- if (imp->imp_obd->obd_force)
- goto set_state;
-
- switch (imp->imp_connect_op) {
- case OST_CONNECT:
- rq_opc = OST_DISCONNECT;
- break;
- case MDS_CONNECT:
- rq_opc = MDS_DISCONNECT;
- break;
- case MGS_CONNECT:
- rq_opc = MGS_DISCONNECT;
- break;
- default:
- rc = -EINVAL;
- CERROR("%s: don't know how to disconnect from %s (connect_op %d): rc = %d\n",
- imp->imp_obd->obd_name, obd2cli_tgt(imp->imp_obd),
- imp->imp_connect_op, rc);
- return rc;
- }
-
- if (ptlrpc_import_in_recovery(imp)) {
- struct l_wait_info lwi;
- long timeout;
-
- if (AT_OFF) {
- if (imp->imp_server_timeout)
- timeout = cfs_time_seconds(obd_timeout / 2);
- else
- timeout = cfs_time_seconds(obd_timeout);
- } else {
- int idx = import_at_get_index(imp,
- imp->imp_client->cli_request_portal);
- timeout = cfs_time_seconds(
- at_get(&imp->imp_at.iat_service_estimate[idx]));
- }
-
- lwi = LWI_TIMEOUT_INTR(cfs_timeout_cap(timeout),
- back_to_sleep, LWI_ON_SIGNAL_NOOP, NULL);
- rc = l_wait_event(imp->imp_recovery_waitq,
- !ptlrpc_import_in_recovery(imp), &lwi);
-
- }
-
- spin_lock(&imp->imp_lock);
- if (imp->imp_state != LUSTRE_IMP_FULL)
- goto out;
- spin_unlock(&imp->imp_lock);
-
- req = ptlrpc_request_alloc_pack(imp, &RQF_MDS_DISCONNECT,
- LUSTRE_OBD_VERSION, rq_opc);
- if (req) {
- /* We are disconnecting, do not retry a failed DISCONNECT rpc if
- * it fails. We can get through the above with a down server
- * if the client doesn't know the server is gone yet. */
- req->rq_no_resend = 1;
-
- /* We want client umounts to happen quickly, no matter the
- server state... */
- req->rq_timeout = min_t(int, req->rq_timeout,
- INITIAL_CONNECT_TIMEOUT);
-
- IMPORT_SET_STATE(imp, LUSTRE_IMP_CONNECTING);
- req->rq_send_state = LUSTRE_IMP_CONNECTING;
- ptlrpc_request_set_replen(req);
- rc = ptlrpc_queue_wait(req);
- ptlrpc_req_finished(req);
- }
-
-set_state:
- spin_lock(&imp->imp_lock);
-out:
- if (noclose)
- IMPORT_SET_STATE_NOLOCK(imp, LUSTRE_IMP_DISCON);
- else
- IMPORT_SET_STATE_NOLOCK(imp, LUSTRE_IMP_CLOSED);
- memset(&imp->imp_remote_handle, 0, sizeof(imp->imp_remote_handle));
- spin_unlock(&imp->imp_lock);
-
- if (rc == -ETIMEDOUT || rc == -ENOTCONN || rc == -ESHUTDOWN)
- rc = 0;
-
- return rc;
-}
-EXPORT_SYMBOL(ptlrpc_disconnect_import);
-
-/* Adaptive Timeout utils */
-extern unsigned int at_min, at_max, at_history;
-
-/* Bin into timeslices using AT_BINS bins.
- This gives us a max of the last binlimit*AT_BINS secs without the storage,
- but still smoothing out a return to normalcy from a slow response.
- (E.g. remember the maximum latency in each minute of the last 4 minutes.) */
-int at_measured(struct adaptive_timeout *at, unsigned int val)
-{
- unsigned int old = at->at_current;
- time64_t now = ktime_get_real_seconds();
- long binlimit = max_t(long, at_history / AT_BINS, 1);
-
- LASSERT(at);
- CDEBUG(D_OTHER, "add %u to %p time=%lu v=%u (%u %u %u %u)\n",
- val, at, (long)(now - at->at_binstart), at->at_current,
- at->at_hist[0], at->at_hist[1], at->at_hist[2], at->at_hist[3]);
-
- if (val == 0)
- /* 0's don't count, because we never want our timeout to
- drop to 0, and because 0 could mean an error */
- return 0;
-
- spin_lock(&at->at_lock);
-
- if (unlikely(at->at_binstart == 0)) {
- /* Special case to remove default from history */
- at->at_current = val;
- at->at_worst_ever = val;
- at->at_worst_time = now;
- at->at_hist[0] = val;
- at->at_binstart = now;
- } else if (now - at->at_binstart < binlimit) {
- /* in bin 0 */
- at->at_hist[0] = max(val, at->at_hist[0]);
- at->at_current = max(val, at->at_current);
- } else {
- int i, shift;
- unsigned int maxv = val;
- /* move bins over */
- shift = (u32)(now - at->at_binstart) / binlimit;
- LASSERT(shift > 0);
- for (i = AT_BINS - 1; i >= 0; i--) {
- if (i >= shift) {
- at->at_hist[i] = at->at_hist[i - shift];
- maxv = max(maxv, at->at_hist[i]);
- } else {
- at->at_hist[i] = 0;
- }
- }
- at->at_hist[0] = val;
- at->at_current = maxv;
- at->at_binstart += shift * binlimit;
- }
-
- if (at->at_current > at->at_worst_ever) {
- at->at_worst_ever = at->at_current;
- at->at_worst_time = now;
- }
-
- if (at->at_flags & AT_FLG_NOHIST)
- /* Only keep last reported val; keeping the rest of the history
- for proc only */
- at->at_current = val;
-
- if (at_max > 0)
- at->at_current = min(at->at_current, at_max);
- at->at_current = max(at->at_current, at_min);
-
- if (at->at_current != old)
- CDEBUG(D_OTHER, "AT %p change: old=%u new=%u delta=%d (val=%u) hist %u %u %u %u\n",
- at,
- old, at->at_current, at->at_current - old, val,
- at->at_hist[0], at->at_hist[1], at->at_hist[2],
- at->at_hist[3]);
-
- /* if we changed, report the old value */
- old = (at->at_current != old) ? old : 0;
-
- spin_unlock(&at->at_lock);
- return old;
-}
-
-/* Find the imp_at index for a given portal; assign if space available */
-int import_at_get_index(struct obd_import *imp, int portal)
-{
- struct imp_at *at = &imp->imp_at;
- int i;
-
- for (i = 0; i < IMP_AT_MAX_PORTALS; i++) {
- if (at->iat_portal[i] == portal)
- return i;
- if (at->iat_portal[i] == 0)
- /* unused */
- break;
- }
-
- /* Not found in list, add it under a lock */
- spin_lock(&imp->imp_lock);
-
- /* Check unused under lock */
- for (; i < IMP_AT_MAX_PORTALS; i++) {
- if (at->iat_portal[i] == portal)
- goto out;
- if (at->iat_portal[i] == 0)
- /* unused */
- break;
- }
-
- /* Not enough portals? */
- LASSERT(i < IMP_AT_MAX_PORTALS);
-
- at->iat_portal[i] = portal;
-out:
- spin_unlock(&imp->imp_lock);
- return i;
-}
diff --git a/drivers/staging/lustre/lustre/ptlrpc/layout.c b/drivers/staging/lustre/lustre/ptlrpc/layout.c
deleted file mode 100644
index 991fb74c8d08..000000000000
--- a/drivers/staging/lustre/lustre/ptlrpc/layout.c
+++ /dev/null
@@ -1,2316 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * lustre/ptlrpc/layout.c
- *
- * Lustre Metadata Target (mdt) request handler
- *
- * Author: Nikita Danilov <nikita@clusterfs.com>
- */
-/*
- * This file contains the "capsule/pill" abstraction layered above PTLRPC.
- *
- * Every struct ptlrpc_request contains a "pill", which points to a description
- * of the format that the request conforms to.
- */
-
-#if !defined(__REQ_LAYOUT_USER__)
-
-#define DEBUG_SUBSYSTEM S_RPC
-
-#include <linux/module.h>
-
-/* LUSTRE_VERSION_CODE */
-#include "../include/lustre_ver.h"
-
-#include "../include/obd_support.h"
-/* lustre_swab_mdt_body */
-#include "../include/lustre/lustre_idl.h"
-/* obd2cli_tgt() (required by DEBUG_REQ()) */
-#include "../include/obd.h"
-
-/* __REQ_LAYOUT_USER__ */
-#endif
-/* struct ptlrpc_request, lustre_msg* */
-#include "../include/lustre_req_layout.h"
-#include "../include/lustre_acl.h"
-#include "../include/lustre_debug.h"
-
-/*
- * RQFs (see below) refer to two struct req_msg_field arrays describing the
- * client request and server reply, respectively.
- */
-/* empty set of fields... for suitable definition of emptiness. */
-static const struct req_msg_field *empty[] = {
- &RMF_PTLRPC_BODY
-};
-
-static const struct req_msg_field *mgs_target_info_only[] = {
- &RMF_PTLRPC_BODY,
- &RMF_MGS_TARGET_INFO
-};
-
-static const struct req_msg_field *mgs_set_info[] = {
- &RMF_PTLRPC_BODY,
- &RMF_MGS_SEND_PARAM
-};
-
-static const struct req_msg_field *mgs_config_read_client[] = {
- &RMF_PTLRPC_BODY,
- &RMF_MGS_CONFIG_BODY
-};
-
-static const struct req_msg_field *mgs_config_read_server[] = {
- &RMF_PTLRPC_BODY,
- &RMF_MGS_CONFIG_RES
-};
-
-static const struct req_msg_field *log_cancel_client[] = {
- &RMF_PTLRPC_BODY,
- &RMF_LOGCOOKIES
-};
-
-static const struct req_msg_field *mdt_body_only[] = {
- &RMF_PTLRPC_BODY,
- &RMF_MDT_BODY
-};
-
-static const struct req_msg_field *mdt_body_capa[] = {
- &RMF_PTLRPC_BODY,
- &RMF_MDT_BODY,
- &RMF_CAPA1
-};
-
-static const struct req_msg_field *quotactl_only[] = {
- &RMF_PTLRPC_BODY,
- &RMF_OBD_QUOTACTL
-};
-
-static const struct req_msg_field *quota_body_only[] = {
- &RMF_PTLRPC_BODY,
- &RMF_QUOTA_BODY
-};
-
-static const struct req_msg_field *ldlm_intent_quota_client[] = {
- &RMF_PTLRPC_BODY,
- &RMF_DLM_REQ,
- &RMF_LDLM_INTENT,
- &RMF_QUOTA_BODY
-};
-
-static const struct req_msg_field *ldlm_intent_quota_server[] = {
- &RMF_PTLRPC_BODY,
- &RMF_DLM_REP,
- &RMF_DLM_LVB,
- &RMF_QUOTA_BODY
-};
-
-static const struct req_msg_field *mdt_close_client[] = {
- &RMF_PTLRPC_BODY,
- &RMF_MDT_EPOCH,
- &RMF_REC_REINT,
- &RMF_CAPA1
-};
-
-static const struct req_msg_field *mdt_release_close_client[] = {
- &RMF_PTLRPC_BODY,
- &RMF_MDT_EPOCH,
- &RMF_REC_REINT,
- &RMF_CAPA1,
- &RMF_CLOSE_DATA
-};
-
-static const struct req_msg_field *obd_statfs_server[] = {
- &RMF_PTLRPC_BODY,
- &RMF_OBD_STATFS
-};
-
-static const struct req_msg_field *seq_query_client[] = {
- &RMF_PTLRPC_BODY,
- &RMF_SEQ_OPC,
- &RMF_SEQ_RANGE
-};
-
-static const struct req_msg_field *seq_query_server[] = {
- &RMF_PTLRPC_BODY,
- &RMF_SEQ_RANGE
-};
-
-static const struct req_msg_field *fld_query_client[] = {
- &RMF_PTLRPC_BODY,
- &RMF_FLD_OPC,
- &RMF_FLD_MDFLD
-};
-
-static const struct req_msg_field *fld_query_server[] = {
- &RMF_PTLRPC_BODY,
- &RMF_FLD_MDFLD
-};
-
-static const struct req_msg_field *mds_getattr_name_client[] = {
- &RMF_PTLRPC_BODY,
- &RMF_MDT_BODY,
- &RMF_CAPA1,
- &RMF_NAME
-};
-
-static const struct req_msg_field *mds_reint_client[] = {
- &RMF_PTLRPC_BODY,
- &RMF_REC_REINT
-};
-
-static const struct req_msg_field *mds_reint_create_client[] = {
- &RMF_PTLRPC_BODY,
- &RMF_REC_REINT,
- &RMF_CAPA1,
- &RMF_NAME
-};
-
-static const struct req_msg_field *mds_reint_create_slave_client[] = {
- &RMF_PTLRPC_BODY,
- &RMF_REC_REINT,
- &RMF_CAPA1,
- &RMF_NAME,
- &RMF_EADATA,
- &RMF_DLM_REQ
-};
-
-static const struct req_msg_field *mds_reint_create_rmt_acl_client[] = {
- &RMF_PTLRPC_BODY,
- &RMF_REC_REINT,
- &RMF_CAPA1,
- &RMF_NAME,
- &RMF_EADATA,
- &RMF_DLM_REQ
-};
-
-static const struct req_msg_field *mds_reint_create_sym_client[] = {
- &RMF_PTLRPC_BODY,
- &RMF_REC_REINT,
- &RMF_CAPA1,
- &RMF_NAME,
- &RMF_SYMTGT,
- &RMF_DLM_REQ
-};
-
-static const struct req_msg_field *mds_reint_open_client[] = {
- &RMF_PTLRPC_BODY,
- &RMF_REC_REINT,
- &RMF_CAPA1,
- &RMF_CAPA2,
- &RMF_NAME,
- &RMF_EADATA
-};
-
-static const struct req_msg_field *mds_reint_open_server[] = {
- &RMF_PTLRPC_BODY,
- &RMF_MDT_BODY,
- &RMF_MDT_MD,
- &RMF_ACL,
- &RMF_CAPA1,
- &RMF_CAPA2
-};
-
-static const struct req_msg_field *mds_reint_unlink_client[] = {
- &RMF_PTLRPC_BODY,
- &RMF_REC_REINT,
- &RMF_CAPA1,
- &RMF_NAME,
- &RMF_DLM_REQ
-};
-
-static const struct req_msg_field *mds_reint_link_client[] = {
- &RMF_PTLRPC_BODY,
- &RMF_REC_REINT,
- &RMF_CAPA1,
- &RMF_CAPA2,
- &RMF_NAME,
- &RMF_DLM_REQ
-};
-
-static const struct req_msg_field *mds_reint_rename_client[] = {
- &RMF_PTLRPC_BODY,
- &RMF_REC_REINT,
- &RMF_CAPA1,
- &RMF_CAPA2,
- &RMF_NAME,
- &RMF_SYMTGT,
- &RMF_DLM_REQ
-};
-
-static const struct req_msg_field *mds_last_unlink_server[] = {
- &RMF_PTLRPC_BODY,
- &RMF_MDT_BODY,
- &RMF_MDT_MD,
- &RMF_LOGCOOKIES,
- &RMF_CAPA1,
- &RMF_CAPA2
-};
-
-static const struct req_msg_field *mds_reint_setattr_client[] = {
- &RMF_PTLRPC_BODY,
- &RMF_REC_REINT,
- &RMF_CAPA1,
- &RMF_MDT_EPOCH,
- &RMF_EADATA,
- &RMF_LOGCOOKIES,
- &RMF_DLM_REQ
-};
-
-static const struct req_msg_field *mds_reint_setxattr_client[] = {
- &RMF_PTLRPC_BODY,
- &RMF_REC_REINT,
- &RMF_CAPA1,
- &RMF_NAME,
- &RMF_EADATA,
- &RMF_DLM_REQ
-};
-
-static const struct req_msg_field *mdt_swap_layouts[] = {
- &RMF_PTLRPC_BODY,
- &RMF_MDT_BODY,
- &RMF_SWAP_LAYOUTS,
- &RMF_CAPA1,
- &RMF_CAPA2,
- &RMF_DLM_REQ
-};
-
-static const struct req_msg_field *obd_connect_client[] = {
- &RMF_PTLRPC_BODY,
- &RMF_TGTUUID,
- &RMF_CLUUID,
- &RMF_CONN,
- &RMF_CONNECT_DATA
-};
-
-static const struct req_msg_field *obd_connect_server[] = {
- &RMF_PTLRPC_BODY,
- &RMF_CONNECT_DATA
-};
-
-static const struct req_msg_field *obd_set_info_client[] = {
- &RMF_PTLRPC_BODY,
- &RMF_SETINFO_KEY,
- &RMF_SETINFO_VAL
-};
-
-static const struct req_msg_field *ost_grant_shrink_client[] = {
- &RMF_PTLRPC_BODY,
- &RMF_SETINFO_KEY,
- &RMF_OST_BODY
-};
-
-static const struct req_msg_field *mds_getinfo_client[] = {
- &RMF_PTLRPC_BODY,
- &RMF_GETINFO_KEY,
- &RMF_GETINFO_VALLEN
-};
-
-static const struct req_msg_field *mds_getinfo_server[] = {
- &RMF_PTLRPC_BODY,
- &RMF_GETINFO_VAL,
-};
-
-static const struct req_msg_field *ldlm_enqueue_client[] = {
- &RMF_PTLRPC_BODY,
- &RMF_DLM_REQ
-};
-
-static const struct req_msg_field *ldlm_enqueue_server[] = {
- &RMF_PTLRPC_BODY,
- &RMF_DLM_REP
-};
-
-static const struct req_msg_field *ldlm_enqueue_lvb_server[] = {
- &RMF_PTLRPC_BODY,
- &RMF_DLM_REP,
- &RMF_DLM_LVB
-};
-
-static const struct req_msg_field *ldlm_cp_callback_client[] = {
- &RMF_PTLRPC_BODY,
- &RMF_DLM_REQ,
- &RMF_DLM_LVB
-};
-
-static const struct req_msg_field *ldlm_gl_callback_desc_client[] = {
- &RMF_PTLRPC_BODY,
- &RMF_DLM_REQ,
- &RMF_DLM_GL_DESC
-};
-
-static const struct req_msg_field *ldlm_gl_callback_server[] = {
- &RMF_PTLRPC_BODY,
- &RMF_DLM_LVB
-};
-
-static const struct req_msg_field *ldlm_intent_basic_client[] = {
- &RMF_PTLRPC_BODY,
- &RMF_DLM_REQ,
- &RMF_LDLM_INTENT,
-};
-
-static const struct req_msg_field *ldlm_intent_client[] = {
- &RMF_PTLRPC_BODY,
- &RMF_DLM_REQ,
- &RMF_LDLM_INTENT,
- &RMF_REC_REINT
-};
-
-static const struct req_msg_field *ldlm_intent_server[] = {
- &RMF_PTLRPC_BODY,
- &RMF_DLM_REP,
- &RMF_MDT_BODY,
- &RMF_MDT_MD,
- &RMF_ACL
-};
-
-static const struct req_msg_field *ldlm_intent_layout_client[] = {
- &RMF_PTLRPC_BODY,
- &RMF_DLM_REQ,
- &RMF_LDLM_INTENT,
- &RMF_LAYOUT_INTENT,
- &RMF_EADATA /* for new layout to be set up */
-};
-static const struct req_msg_field *ldlm_intent_open_server[] = {
- &RMF_PTLRPC_BODY,
- &RMF_DLM_REP,
- &RMF_MDT_BODY,
- &RMF_MDT_MD,
- &RMF_ACL,
- &RMF_CAPA1,
- &RMF_CAPA2
-};
-
-static const struct req_msg_field *ldlm_intent_getattr_client[] = {
- &RMF_PTLRPC_BODY,
- &RMF_DLM_REQ,
- &RMF_LDLM_INTENT,
- &RMF_MDT_BODY, /* coincides with mds_getattr_name_client[] */
- &RMF_CAPA1,
- &RMF_NAME
-};
-
-static const struct req_msg_field *ldlm_intent_getattr_server[] = {
- &RMF_PTLRPC_BODY,
- &RMF_DLM_REP,
- &RMF_MDT_BODY,
- &RMF_MDT_MD,
- &RMF_ACL,
- &RMF_CAPA1
-};
-
-static const struct req_msg_field *ldlm_intent_create_client[] = {
- &RMF_PTLRPC_BODY,
- &RMF_DLM_REQ,
- &RMF_LDLM_INTENT,
- &RMF_REC_REINT, /* coincides with mds_reint_create_client[] */
- &RMF_CAPA1,
- &RMF_NAME,
- &RMF_EADATA
-};
-
-static const struct req_msg_field *ldlm_intent_open_client[] = {
- &RMF_PTLRPC_BODY,
- &RMF_DLM_REQ,
- &RMF_LDLM_INTENT,
- &RMF_REC_REINT, /* coincides with mds_reint_open_client[] */
- &RMF_CAPA1,
- &RMF_CAPA2,
- &RMF_NAME,
- &RMF_EADATA
-};
-
-static const struct req_msg_field *ldlm_intent_unlink_client[] = {
- &RMF_PTLRPC_BODY,
- &RMF_DLM_REQ,
- &RMF_LDLM_INTENT,
- &RMF_REC_REINT, /* coincides with mds_reint_unlink_client[] */
- &RMF_CAPA1,
- &RMF_NAME
-};
-
-static const struct req_msg_field *ldlm_intent_getxattr_client[] = {
- &RMF_PTLRPC_BODY,
- &RMF_DLM_REQ,
- &RMF_LDLM_INTENT,
- &RMF_MDT_BODY,
- &RMF_CAPA1,
-};
-
-static const struct req_msg_field *ldlm_intent_getxattr_server[] = {
- &RMF_PTLRPC_BODY,
- &RMF_DLM_REP,
- &RMF_MDT_BODY,
- &RMF_MDT_MD,
- &RMF_ACL, /* for req_capsule_extend/mdt_intent_policy */
- &RMF_EADATA,
- &RMF_EAVALS,
- &RMF_EAVALS_LENS
-};
-
-static const struct req_msg_field *mds_getxattr_client[] = {
- &RMF_PTLRPC_BODY,
- &RMF_MDT_BODY,
- &RMF_CAPA1,
- &RMF_NAME,
- &RMF_EADATA
-};
-
-static const struct req_msg_field *mds_getxattr_server[] = {
- &RMF_PTLRPC_BODY,
- &RMF_MDT_BODY,
- &RMF_EADATA
-};
-
-static const struct req_msg_field *mds_getattr_server[] = {
- &RMF_PTLRPC_BODY,
- &RMF_MDT_BODY,
- &RMF_MDT_MD,
- &RMF_ACL,
- &RMF_CAPA1,
- &RMF_CAPA2
-};
-
-static const struct req_msg_field *mds_setattr_server[] = {
- &RMF_PTLRPC_BODY,
- &RMF_MDT_BODY,
- &RMF_MDT_MD,
- &RMF_ACL,
- &RMF_CAPA1,
- &RMF_CAPA2
-};
-
-static const struct req_msg_field *mds_update_client[] = {
- &RMF_PTLRPC_BODY,
- &RMF_UPDATE,
-};
-
-static const struct req_msg_field *mds_update_server[] = {
- &RMF_PTLRPC_BODY,
- &RMF_UPDATE_REPLY,
-};
-
-static const struct req_msg_field *llog_origin_handle_create_client[] = {
- &RMF_PTLRPC_BODY,
- &RMF_LLOGD_BODY,
- &RMF_NAME
-};
-
-static const struct req_msg_field *llogd_body_only[] = {
- &RMF_PTLRPC_BODY,
- &RMF_LLOGD_BODY
-};
-
-static const struct req_msg_field *llog_log_hdr_only[] = {
- &RMF_PTLRPC_BODY,
- &RMF_LLOG_LOG_HDR
-};
-
-static const struct req_msg_field *llogd_conn_body_only[] = {
- &RMF_PTLRPC_BODY,
- &RMF_LLOGD_CONN_BODY
-};
-
-static const struct req_msg_field *llog_origin_handle_next_block_server[] = {
- &RMF_PTLRPC_BODY,
- &RMF_LLOGD_BODY,
- &RMF_EADATA
-};
-
-static const struct req_msg_field *obd_idx_read_client[] = {
- &RMF_PTLRPC_BODY,
- &RMF_IDX_INFO
-};
-
-static const struct req_msg_field *obd_idx_read_server[] = {
- &RMF_PTLRPC_BODY,
- &RMF_IDX_INFO
-};
-
-static const struct req_msg_field *ost_body_only[] = {
- &RMF_PTLRPC_BODY,
- &RMF_OST_BODY
-};
-
-static const struct req_msg_field *ost_body_capa[] = {
- &RMF_PTLRPC_BODY,
- &RMF_OST_BODY,
- &RMF_CAPA1
-};
-
-static const struct req_msg_field *ost_destroy_client[] = {
- &RMF_PTLRPC_BODY,
- &RMF_OST_BODY,
- &RMF_DLM_REQ,
- &RMF_CAPA1
-};
-
-
-static const struct req_msg_field *ost_brw_client[] = {
- &RMF_PTLRPC_BODY,
- &RMF_OST_BODY,
- &RMF_OBD_IOOBJ,
- &RMF_NIOBUF_REMOTE,
- &RMF_CAPA1
-};
-
-static const struct req_msg_field *ost_brw_read_server[] = {
- &RMF_PTLRPC_BODY,
- &RMF_OST_BODY
-};
-
-static const struct req_msg_field *ost_brw_write_server[] = {
- &RMF_PTLRPC_BODY,
- &RMF_OST_BODY,
- &RMF_RCS
-};
-
-static const struct req_msg_field *ost_get_info_generic_server[] = {
- &RMF_PTLRPC_BODY,
- &RMF_GENERIC_DATA,
-};
-
-static const struct req_msg_field *ost_get_info_generic_client[] = {
- &RMF_PTLRPC_BODY,
- &RMF_SETINFO_KEY
-};
-
-static const struct req_msg_field *ost_get_last_id_server[] = {
- &RMF_PTLRPC_BODY,
- &RMF_OBD_ID
-};
-
-static const struct req_msg_field *ost_get_last_fid_server[] = {
- &RMF_PTLRPC_BODY,
- &RMF_FID,
-};
-
-static const struct req_msg_field *ost_get_fiemap_client[] = {
- &RMF_PTLRPC_BODY,
- &RMF_FIEMAP_KEY,
- &RMF_FIEMAP_VAL
-};
-
-static const struct req_msg_field *ost_get_fiemap_server[] = {
- &RMF_PTLRPC_BODY,
- &RMF_FIEMAP_VAL
-};
-
-static const struct req_msg_field *mdt_hsm_progress[] = {
- &RMF_PTLRPC_BODY,
- &RMF_MDT_BODY,
- &RMF_MDS_HSM_PROGRESS,
-};
-
-static const struct req_msg_field *mdt_hsm_ct_register[] = {
- &RMF_PTLRPC_BODY,
- &RMF_MDT_BODY,
- &RMF_MDS_HSM_ARCHIVE,
-};
-
-static const struct req_msg_field *mdt_hsm_ct_unregister[] = {
- &RMF_PTLRPC_BODY,
- &RMF_MDT_BODY,
-};
-
-static const struct req_msg_field *mdt_hsm_action_server[] = {
- &RMF_PTLRPC_BODY,
- &RMF_MDT_BODY,
- &RMF_MDS_HSM_CURRENT_ACTION,
-};
-
-static const struct req_msg_field *mdt_hsm_state_get_server[] = {
- &RMF_PTLRPC_BODY,
- &RMF_MDT_BODY,
- &RMF_HSM_USER_STATE,
-};
-
-static const struct req_msg_field *mdt_hsm_state_set[] = {
- &RMF_PTLRPC_BODY,
- &RMF_MDT_BODY,
- &RMF_CAPA1,
- &RMF_HSM_STATE_SET,
-};
-
-static const struct req_msg_field *mdt_hsm_request[] = {
- &RMF_PTLRPC_BODY,
- &RMF_MDT_BODY,
- &RMF_MDS_HSM_REQUEST,
- &RMF_MDS_HSM_USER_ITEM,
- &RMF_GENERIC_DATA,
-};
-
-static struct req_format *req_formats[] = {
- &RQF_OBD_PING,
- &RQF_OBD_SET_INFO,
- &RQF_OBD_IDX_READ,
- &RQF_SEC_CTX,
- &RQF_MGS_TARGET_REG,
- &RQF_MGS_SET_INFO,
- &RQF_MGS_CONFIG_READ,
- &RQF_SEQ_QUERY,
- &RQF_FLD_QUERY,
- &RQF_MDS_CONNECT,
- &RQF_MDS_DISCONNECT,
- &RQF_MDS_GET_INFO,
- &RQF_MDS_GETSTATUS,
- &RQF_MDS_STATFS,
- &RQF_MDS_GETATTR,
- &RQF_MDS_GETATTR_NAME,
- &RQF_MDS_GETXATTR,
- &RQF_MDS_SYNC,
- &RQF_MDS_CLOSE,
- &RQF_MDS_RELEASE_CLOSE,
- &RQF_MDS_PIN,
- &RQF_MDS_UNPIN,
- &RQF_MDS_READPAGE,
- &RQF_MDS_WRITEPAGE,
- &RQF_MDS_IS_SUBDIR,
- &RQF_MDS_DONE_WRITING,
- &RQF_MDS_REINT,
- &RQF_MDS_REINT_CREATE,
- &RQF_MDS_REINT_CREATE_RMT_ACL,
- &RQF_MDS_REINT_CREATE_SLAVE,
- &RQF_MDS_REINT_CREATE_SYM,
- &RQF_MDS_REINT_OPEN,
- &RQF_MDS_REINT_UNLINK,
- &RQF_MDS_REINT_LINK,
- &RQF_MDS_REINT_RENAME,
- &RQF_MDS_REINT_SETATTR,
- &RQF_MDS_REINT_SETXATTR,
- &RQF_MDS_QUOTACHECK,
- &RQF_MDS_QUOTACTL,
- &RQF_MDS_HSM_PROGRESS,
- &RQF_MDS_HSM_CT_REGISTER,
- &RQF_MDS_HSM_CT_UNREGISTER,
- &RQF_MDS_HSM_STATE_GET,
- &RQF_MDS_HSM_STATE_SET,
- &RQF_MDS_HSM_ACTION,
- &RQF_MDS_HSM_REQUEST,
- &RQF_MDS_SWAP_LAYOUTS,
- &RQF_UPDATE_OBJ,
- &RQF_QC_CALLBACK,
- &RQF_OST_CONNECT,
- &RQF_OST_DISCONNECT,
- &RQF_OST_QUOTACHECK,
- &RQF_OST_QUOTACTL,
- &RQF_OST_GETATTR,
- &RQF_OST_SETATTR,
- &RQF_OST_CREATE,
- &RQF_OST_PUNCH,
- &RQF_OST_SYNC,
- &RQF_OST_DESTROY,
- &RQF_OST_BRW_READ,
- &RQF_OST_BRW_WRITE,
- &RQF_OST_STATFS,
- &RQF_OST_SET_GRANT_INFO,
- &RQF_OST_GET_INFO_GENERIC,
- &RQF_OST_GET_INFO_LAST_ID,
- &RQF_OST_GET_INFO_LAST_FID,
- &RQF_OST_SET_INFO_LAST_FID,
- &RQF_OST_GET_INFO_FIEMAP,
- &RQF_LDLM_ENQUEUE,
- &RQF_LDLM_ENQUEUE_LVB,
- &RQF_LDLM_CONVERT,
- &RQF_LDLM_CANCEL,
- &RQF_LDLM_CALLBACK,
- &RQF_LDLM_CP_CALLBACK,
- &RQF_LDLM_BL_CALLBACK,
- &RQF_LDLM_GL_CALLBACK,
- &RQF_LDLM_GL_DESC_CALLBACK,
- &RQF_LDLM_INTENT,
- &RQF_LDLM_INTENT_BASIC,
- &RQF_LDLM_INTENT_LAYOUT,
- &RQF_LDLM_INTENT_GETATTR,
- &RQF_LDLM_INTENT_OPEN,
- &RQF_LDLM_INTENT_CREATE,
- &RQF_LDLM_INTENT_UNLINK,
- &RQF_LDLM_INTENT_GETXATTR,
- &RQF_LDLM_INTENT_QUOTA,
- &RQF_QUOTA_DQACQ,
- &RQF_LOG_CANCEL,
- &RQF_LLOG_ORIGIN_HANDLE_CREATE,
- &RQF_LLOG_ORIGIN_HANDLE_DESTROY,
- &RQF_LLOG_ORIGIN_HANDLE_NEXT_BLOCK,
- &RQF_LLOG_ORIGIN_HANDLE_PREV_BLOCK,
- &RQF_LLOG_ORIGIN_HANDLE_READ_HEADER,
- &RQF_LLOG_ORIGIN_CONNECT,
- &RQF_CONNECT,
-};
-
-struct req_msg_field {
- const __u32 rmf_flags;
- const char *rmf_name;
- /**
- * Field length. (-1) means "variable length". If the
- * \a RMF_F_STRUCT_ARRAY flag is set the field is also variable-length,
- * but the actual size must be a whole multiple of \a rmf_size.
- */
- const int rmf_size;
- void (*rmf_swabber)(void *);
- void (*rmf_dumper)(void *);
- int rmf_offset[ARRAY_SIZE(req_formats)][RCL_NR];
-};
-
-enum rmf_flags {
- /**
- * The field is a string, must be NUL-terminated.
- */
- RMF_F_STRING = BIT(0),
- /**
- * The field's buffer size need not match the declared \a rmf_size.
- */
- RMF_F_NO_SIZE_CHECK = BIT(1),
- /**
- * The field's buffer size must be a whole multiple of the declared \a
- * rmf_size and the \a rmf_swabber function must work on the declared \a
- * rmf_size worth of bytes.
- */
- RMF_F_STRUCT_ARRAY = BIT(2)
-};
-
-struct req_capsule;
-
-/*
- * Request fields.
- */
-#define DEFINE_MSGF(name, flags, size, swabber, dumper) { \
- .rmf_name = (name), \
- .rmf_flags = (flags), \
- .rmf_size = (size), \
- .rmf_swabber = (void (*)(void *))(swabber), \
- .rmf_dumper = (void (*)(void *))(dumper) \
-}
-
-struct req_msg_field RMF_GENERIC_DATA =
- DEFINE_MSGF("generic_data", 0,
- -1, NULL, NULL);
-EXPORT_SYMBOL(RMF_GENERIC_DATA);
-
-struct req_msg_field RMF_MGS_TARGET_INFO =
- DEFINE_MSGF("mgs_target_info", 0,
- sizeof(struct mgs_target_info),
- lustre_swab_mgs_target_info, NULL);
-EXPORT_SYMBOL(RMF_MGS_TARGET_INFO);
-
-struct req_msg_field RMF_MGS_SEND_PARAM =
- DEFINE_MSGF("mgs_send_param", 0,
- sizeof(struct mgs_send_param),
- NULL, NULL);
-EXPORT_SYMBOL(RMF_MGS_SEND_PARAM);
-
-struct req_msg_field RMF_MGS_CONFIG_BODY =
- DEFINE_MSGF("mgs_config_read request", 0,
- sizeof(struct mgs_config_body),
- lustre_swab_mgs_config_body, NULL);
-EXPORT_SYMBOL(RMF_MGS_CONFIG_BODY);
-
-struct req_msg_field RMF_MGS_CONFIG_RES =
- DEFINE_MSGF("mgs_config_read reply ", 0,
- sizeof(struct mgs_config_res),
- lustre_swab_mgs_config_res, NULL);
-EXPORT_SYMBOL(RMF_MGS_CONFIG_RES);
-
-struct req_msg_field RMF_U32 =
- DEFINE_MSGF("generic u32", 0,
- sizeof(__u32), lustre_swab_generic_32s, NULL);
-EXPORT_SYMBOL(RMF_U32);
-
-struct req_msg_field RMF_SETINFO_VAL =
- DEFINE_MSGF("setinfo_val", 0, -1, NULL, NULL);
-EXPORT_SYMBOL(RMF_SETINFO_VAL);
-
-struct req_msg_field RMF_GETINFO_KEY =
- DEFINE_MSGF("getinfo_key", 0, -1, NULL, NULL);
-EXPORT_SYMBOL(RMF_GETINFO_KEY);
-
-struct req_msg_field RMF_GETINFO_VALLEN =
- DEFINE_MSGF("getinfo_vallen", 0,
- sizeof(__u32), lustre_swab_generic_32s, NULL);
-EXPORT_SYMBOL(RMF_GETINFO_VALLEN);
-
-struct req_msg_field RMF_GETINFO_VAL =
- DEFINE_MSGF("getinfo_val", 0, -1, NULL, NULL);
-EXPORT_SYMBOL(RMF_GETINFO_VAL);
-
-struct req_msg_field RMF_SEQ_OPC =
- DEFINE_MSGF("seq_query_opc", 0,
- sizeof(__u32), lustre_swab_generic_32s, NULL);
-EXPORT_SYMBOL(RMF_SEQ_OPC);
-
-struct req_msg_field RMF_SEQ_RANGE =
- DEFINE_MSGF("seq_query_range", 0,
- sizeof(struct lu_seq_range),
- lustre_swab_lu_seq_range, NULL);
-EXPORT_SYMBOL(RMF_SEQ_RANGE);
-
-struct req_msg_field RMF_FLD_OPC =
- DEFINE_MSGF("fld_query_opc", 0,
- sizeof(__u32), lustre_swab_generic_32s, NULL);
-EXPORT_SYMBOL(RMF_FLD_OPC);
-
-struct req_msg_field RMF_FLD_MDFLD =
- DEFINE_MSGF("fld_query_mdfld", 0,
- sizeof(struct lu_seq_range),
- lustre_swab_lu_seq_range, NULL);
-EXPORT_SYMBOL(RMF_FLD_MDFLD);
-
-struct req_msg_field RMF_MDT_BODY =
- DEFINE_MSGF("mdt_body", 0,
- sizeof(struct mdt_body), lustre_swab_mdt_body, NULL);
-EXPORT_SYMBOL(RMF_MDT_BODY);
-
-struct req_msg_field RMF_OBD_QUOTACTL =
- DEFINE_MSGF("obd_quotactl", 0,
- sizeof(struct obd_quotactl),
- lustre_swab_obd_quotactl, NULL);
-EXPORT_SYMBOL(RMF_OBD_QUOTACTL);
-
-struct req_msg_field RMF_QUOTA_BODY =
- DEFINE_MSGF("quota_body", 0,
- sizeof(struct quota_body), lustre_swab_quota_body, NULL);
-EXPORT_SYMBOL(RMF_QUOTA_BODY);
-
-struct req_msg_field RMF_MDT_EPOCH =
- DEFINE_MSGF("mdt_ioepoch", 0,
- sizeof(struct mdt_ioepoch), lustre_swab_mdt_ioepoch, NULL);
-EXPORT_SYMBOL(RMF_MDT_EPOCH);
-
-struct req_msg_field RMF_PTLRPC_BODY =
- DEFINE_MSGF("ptlrpc_body", 0,
- sizeof(struct ptlrpc_body), lustre_swab_ptlrpc_body, NULL);
-EXPORT_SYMBOL(RMF_PTLRPC_BODY);
-
-struct req_msg_field RMF_CLOSE_DATA =
- DEFINE_MSGF("data_version", 0,
- sizeof(struct close_data), lustre_swab_close_data, NULL);
-EXPORT_SYMBOL(RMF_CLOSE_DATA);
-
-struct req_msg_field RMF_OBD_STATFS =
- DEFINE_MSGF("obd_statfs", 0,
- sizeof(struct obd_statfs), lustre_swab_obd_statfs, NULL);
-EXPORT_SYMBOL(RMF_OBD_STATFS);
-
-struct req_msg_field RMF_SETINFO_KEY =
- DEFINE_MSGF("setinfo_key", 0, -1, NULL, NULL);
-EXPORT_SYMBOL(RMF_SETINFO_KEY);
-
-struct req_msg_field RMF_NAME =
- DEFINE_MSGF("name", RMF_F_STRING, -1, NULL, NULL);
-EXPORT_SYMBOL(RMF_NAME);
-
-struct req_msg_field RMF_SYMTGT =
- DEFINE_MSGF("symtgt", RMF_F_STRING, -1, NULL, NULL);
-EXPORT_SYMBOL(RMF_SYMTGT);
-
-struct req_msg_field RMF_TGTUUID =
- DEFINE_MSGF("tgtuuid", RMF_F_STRING, sizeof(struct obd_uuid) - 1, NULL,
- NULL);
-EXPORT_SYMBOL(RMF_TGTUUID);
-
-struct req_msg_field RMF_CLUUID =
- DEFINE_MSGF("cluuid", RMF_F_STRING, sizeof(struct obd_uuid) - 1, NULL,
- NULL);
-EXPORT_SYMBOL(RMF_CLUUID);
-
-struct req_msg_field RMF_STRING =
- DEFINE_MSGF("string", RMF_F_STRING, -1, NULL, NULL);
-EXPORT_SYMBOL(RMF_STRING);
-
-struct req_msg_field RMF_LLOGD_BODY =
- DEFINE_MSGF("llogd_body", 0,
- sizeof(struct llogd_body), lustre_swab_llogd_body, NULL);
-EXPORT_SYMBOL(RMF_LLOGD_BODY);
-
-struct req_msg_field RMF_LLOG_LOG_HDR =
- DEFINE_MSGF("llog_log_hdr", 0,
- sizeof(struct llog_log_hdr), lustre_swab_llog_hdr, NULL);
-EXPORT_SYMBOL(RMF_LLOG_LOG_HDR);
-
-struct req_msg_field RMF_LLOGD_CONN_BODY =
- DEFINE_MSGF("llogd_conn_body", 0,
- sizeof(struct llogd_conn_body),
- lustre_swab_llogd_conn_body, NULL);
-EXPORT_SYMBOL(RMF_LLOGD_CONN_BODY);
-
-/*
- * connection handle received in MDS_CONNECT request.
- *
- * No swabbing needed because struct lustre_handle contains only a 64-bit cookie
- * that the client does not interpret at all.
- */
-struct req_msg_field RMF_CONN =
- DEFINE_MSGF("conn", 0, sizeof(struct lustre_handle), NULL, NULL);
-EXPORT_SYMBOL(RMF_CONN);
-
-struct req_msg_field RMF_CONNECT_DATA =
- DEFINE_MSGF("cdata",
- RMF_F_NO_SIZE_CHECK /* we allow extra space for interop */,
- sizeof(struct obd_connect_data),
- lustre_swab_connect, NULL);
-EXPORT_SYMBOL(RMF_CONNECT_DATA);
-
-struct req_msg_field RMF_DLM_REQ =
- DEFINE_MSGF("dlm_req", RMF_F_NO_SIZE_CHECK /* ldlm_request_bufsize */,
- sizeof(struct ldlm_request),
- lustre_swab_ldlm_request, NULL);
-EXPORT_SYMBOL(RMF_DLM_REQ);
-
-struct req_msg_field RMF_DLM_REP =
- DEFINE_MSGF("dlm_rep", 0,
- sizeof(struct ldlm_reply), lustre_swab_ldlm_reply, NULL);
-EXPORT_SYMBOL(RMF_DLM_REP);
-
-struct req_msg_field RMF_LDLM_INTENT =
- DEFINE_MSGF("ldlm_intent", 0,
- sizeof(struct ldlm_intent), lustre_swab_ldlm_intent, NULL);
-EXPORT_SYMBOL(RMF_LDLM_INTENT);
-
-struct req_msg_field RMF_DLM_LVB =
- DEFINE_MSGF("dlm_lvb", 0, -1, NULL, NULL);
-EXPORT_SYMBOL(RMF_DLM_LVB);
-
-struct req_msg_field RMF_DLM_GL_DESC =
- DEFINE_MSGF("dlm_gl_desc", 0, sizeof(union ldlm_gl_desc),
- lustre_swab_gl_desc, NULL);
-EXPORT_SYMBOL(RMF_DLM_GL_DESC);
-
-struct req_msg_field RMF_MDT_MD =
- DEFINE_MSGF("mdt_md", RMF_F_NO_SIZE_CHECK, MIN_MD_SIZE, NULL, NULL);
-EXPORT_SYMBOL(RMF_MDT_MD);
-
-struct req_msg_field RMF_REC_REINT =
- DEFINE_MSGF("rec_reint", 0, sizeof(struct mdt_rec_reint),
- lustre_swab_mdt_rec_reint, NULL);
-EXPORT_SYMBOL(RMF_REC_REINT);
-
-/* FIXME: this length should be defined as a macro */
-struct req_msg_field RMF_EADATA = DEFINE_MSGF("eadata", 0, -1,
- NULL, NULL);
-EXPORT_SYMBOL(RMF_EADATA);
-
-struct req_msg_field RMF_EAVALS = DEFINE_MSGF("eavals", 0, -1, NULL, NULL);
-EXPORT_SYMBOL(RMF_EAVALS);
-
-struct req_msg_field RMF_ACL =
- DEFINE_MSGF("acl", RMF_F_NO_SIZE_CHECK,
- LUSTRE_POSIX_ACL_MAX_SIZE, NULL, NULL);
-EXPORT_SYMBOL(RMF_ACL);
-
-/* FIXME: this should be made to use RMF_F_STRUCT_ARRAY */
-struct req_msg_field RMF_LOGCOOKIES =
- DEFINE_MSGF("logcookies", RMF_F_NO_SIZE_CHECK /* multiple cookies */,
- sizeof(struct llog_cookie), NULL, NULL);
-EXPORT_SYMBOL(RMF_LOGCOOKIES);
-
-struct req_msg_field RMF_CAPA1 =
- DEFINE_MSGF("capa", 0, sizeof(struct lustre_capa),
- lustre_swab_lustre_capa, NULL);
-EXPORT_SYMBOL(RMF_CAPA1);
-
-struct req_msg_field RMF_CAPA2 =
- DEFINE_MSGF("capa", 0, sizeof(struct lustre_capa),
- lustre_swab_lustre_capa, NULL);
-EXPORT_SYMBOL(RMF_CAPA2);
-
-struct req_msg_field RMF_LAYOUT_INTENT =
- DEFINE_MSGF("layout_intent", 0,
- sizeof(struct layout_intent), lustre_swab_layout_intent,
- NULL);
-EXPORT_SYMBOL(RMF_LAYOUT_INTENT);
-
-/*
- * OST request field.
- */
-struct req_msg_field RMF_OST_BODY =
- DEFINE_MSGF("ost_body", 0,
- sizeof(struct ost_body), lustre_swab_ost_body, dump_ost_body);
-EXPORT_SYMBOL(RMF_OST_BODY);
-
-struct req_msg_field RMF_OBD_IOOBJ =
- DEFINE_MSGF("obd_ioobj", RMF_F_STRUCT_ARRAY,
- sizeof(struct obd_ioobj), lustre_swab_obd_ioobj, dump_ioo);
-EXPORT_SYMBOL(RMF_OBD_IOOBJ);
-
-struct req_msg_field RMF_NIOBUF_REMOTE =
- DEFINE_MSGF("niobuf_remote", RMF_F_STRUCT_ARRAY,
- sizeof(struct niobuf_remote), lustre_swab_niobuf_remote,
- dump_rniobuf);
-EXPORT_SYMBOL(RMF_NIOBUF_REMOTE);
-
-struct req_msg_field RMF_RCS =
- DEFINE_MSGF("niobuf_remote", RMF_F_STRUCT_ARRAY, sizeof(__u32),
- lustre_swab_generic_32s, dump_rcs);
-EXPORT_SYMBOL(RMF_RCS);
-
-struct req_msg_field RMF_EAVALS_LENS =
- DEFINE_MSGF("eavals_lens", RMF_F_STRUCT_ARRAY, sizeof(__u32),
- lustre_swab_generic_32s, NULL);
-EXPORT_SYMBOL(RMF_EAVALS_LENS);
-
-struct req_msg_field RMF_OBD_ID =
- DEFINE_MSGF("u64", 0,
- sizeof(u64), lustre_swab_ost_last_id, NULL);
-EXPORT_SYMBOL(RMF_OBD_ID);
-
-struct req_msg_field RMF_FID =
- DEFINE_MSGF("fid", 0,
- sizeof(struct lu_fid), lustre_swab_lu_fid, NULL);
-EXPORT_SYMBOL(RMF_FID);
-
-struct req_msg_field RMF_OST_ID =
- DEFINE_MSGF("ost_id", 0,
- sizeof(struct ost_id), lustre_swab_ost_id, NULL);
-EXPORT_SYMBOL(RMF_OST_ID);
-
-struct req_msg_field RMF_FIEMAP_KEY =
- DEFINE_MSGF("fiemap", 0, sizeof(struct ll_fiemap_info_key),
- lustre_swab_fiemap, NULL);
-EXPORT_SYMBOL(RMF_FIEMAP_KEY);
-
-struct req_msg_field RMF_FIEMAP_VAL =
- DEFINE_MSGF("fiemap", 0, -1, lustre_swab_fiemap, NULL);
-EXPORT_SYMBOL(RMF_FIEMAP_VAL);
-
-struct req_msg_field RMF_IDX_INFO =
- DEFINE_MSGF("idx_info", 0, sizeof(struct idx_info),
- lustre_swab_idx_info, NULL);
-EXPORT_SYMBOL(RMF_IDX_INFO);
-struct req_msg_field RMF_HSM_USER_STATE =
- DEFINE_MSGF("hsm_user_state", 0, sizeof(struct hsm_user_state),
- lustre_swab_hsm_user_state, NULL);
-EXPORT_SYMBOL(RMF_HSM_USER_STATE);
-
-struct req_msg_field RMF_HSM_STATE_SET =
- DEFINE_MSGF("hsm_state_set", 0, sizeof(struct hsm_state_set),
- lustre_swab_hsm_state_set, NULL);
-EXPORT_SYMBOL(RMF_HSM_STATE_SET);
-
-struct req_msg_field RMF_MDS_HSM_PROGRESS =
- DEFINE_MSGF("hsm_progress", 0, sizeof(struct hsm_progress_kernel),
- lustre_swab_hsm_progress_kernel, NULL);
-EXPORT_SYMBOL(RMF_MDS_HSM_PROGRESS);
-
-struct req_msg_field RMF_MDS_HSM_CURRENT_ACTION =
- DEFINE_MSGF("hsm_current_action", 0, sizeof(struct hsm_current_action),
- lustre_swab_hsm_current_action, NULL);
-EXPORT_SYMBOL(RMF_MDS_HSM_CURRENT_ACTION);
-
-struct req_msg_field RMF_MDS_HSM_USER_ITEM =
- DEFINE_MSGF("hsm_user_item", RMF_F_STRUCT_ARRAY,
- sizeof(struct hsm_user_item), lustre_swab_hsm_user_item,
- NULL);
-EXPORT_SYMBOL(RMF_MDS_HSM_USER_ITEM);
-
-struct req_msg_field RMF_MDS_HSM_ARCHIVE =
- DEFINE_MSGF("hsm_archive", 0,
- sizeof(__u32), lustre_swab_generic_32s, NULL);
-EXPORT_SYMBOL(RMF_MDS_HSM_ARCHIVE);
-
-struct req_msg_field RMF_MDS_HSM_REQUEST =
- DEFINE_MSGF("hsm_request", 0, sizeof(struct hsm_request),
- lustre_swab_hsm_request, NULL);
-EXPORT_SYMBOL(RMF_MDS_HSM_REQUEST);
-
-struct req_msg_field RMF_UPDATE = DEFINE_MSGF("update", 0, -1,
- lustre_swab_update_buf, NULL);
-EXPORT_SYMBOL(RMF_UPDATE);
-
-struct req_msg_field RMF_UPDATE_REPLY = DEFINE_MSGF("update_reply", 0, -1,
- lustre_swab_update_reply_buf,
- NULL);
-EXPORT_SYMBOL(RMF_UPDATE_REPLY);
-
-struct req_msg_field RMF_SWAP_LAYOUTS =
- DEFINE_MSGF("swap_layouts", 0, sizeof(struct mdc_swap_layouts),
- lustre_swab_swap_layouts, NULL);
-EXPORT_SYMBOL(RMF_SWAP_LAYOUTS);
-/*
- * Request formats.
- */
-
-struct req_format {
- const char *rf_name;
- int rf_idx;
- struct {
- int nr;
- const struct req_msg_field **d;
- } rf_fields[RCL_NR];
-};
-
-#define DEFINE_REQ_FMT(name, client, client_nr, server, server_nr) { \
- .rf_name = name, \
- .rf_fields = { \
- [RCL_CLIENT] = { \
- .nr = client_nr, \
- .d = client \
- }, \
- [RCL_SERVER] = { \
- .nr = server_nr, \
- .d = server \
- } \
- } \
-}
-
-#define DEFINE_REQ_FMT0(name, client, server) \
-DEFINE_REQ_FMT(name, client, ARRAY_SIZE(client), server, ARRAY_SIZE(server))
-
-struct req_format RQF_OBD_PING =
- DEFINE_REQ_FMT0("OBD_PING", empty, empty);
-EXPORT_SYMBOL(RQF_OBD_PING);
-
-struct req_format RQF_OBD_SET_INFO =
- DEFINE_REQ_FMT0("OBD_SET_INFO", obd_set_info_client, empty);
-EXPORT_SYMBOL(RQF_OBD_SET_INFO);
-
-/* Read index file through the network */
-struct req_format RQF_OBD_IDX_READ =
- DEFINE_REQ_FMT0("OBD_IDX_READ",
- obd_idx_read_client, obd_idx_read_server);
-EXPORT_SYMBOL(RQF_OBD_IDX_READ);
-
-struct req_format RQF_SEC_CTX =
- DEFINE_REQ_FMT0("SEC_CTX", empty, empty);
-EXPORT_SYMBOL(RQF_SEC_CTX);
-
-struct req_format RQF_MGS_TARGET_REG =
- DEFINE_REQ_FMT0("MGS_TARGET_REG", mgs_target_info_only,
- mgs_target_info_only);
-EXPORT_SYMBOL(RQF_MGS_TARGET_REG);
-
-struct req_format RQF_MGS_SET_INFO =
- DEFINE_REQ_FMT0("MGS_SET_INFO", mgs_set_info,
- mgs_set_info);
-EXPORT_SYMBOL(RQF_MGS_SET_INFO);
-
-struct req_format RQF_MGS_CONFIG_READ =
- DEFINE_REQ_FMT0("MGS_CONFIG_READ", mgs_config_read_client,
- mgs_config_read_server);
-EXPORT_SYMBOL(RQF_MGS_CONFIG_READ);
-
-struct req_format RQF_SEQ_QUERY =
- DEFINE_REQ_FMT0("SEQ_QUERY", seq_query_client, seq_query_server);
-EXPORT_SYMBOL(RQF_SEQ_QUERY);
-
-struct req_format RQF_FLD_QUERY =
- DEFINE_REQ_FMT0("FLD_QUERY", fld_query_client, fld_query_server);
-EXPORT_SYMBOL(RQF_FLD_QUERY);
-
-struct req_format RQF_LOG_CANCEL =
- DEFINE_REQ_FMT0("OBD_LOG_CANCEL", log_cancel_client, empty);
-EXPORT_SYMBOL(RQF_LOG_CANCEL);
-
-struct req_format RQF_MDS_QUOTACHECK =
- DEFINE_REQ_FMT0("MDS_QUOTACHECK", quotactl_only, empty);
-EXPORT_SYMBOL(RQF_MDS_QUOTACHECK);
-
-struct req_format RQF_OST_QUOTACHECK =
- DEFINE_REQ_FMT0("OST_QUOTACHECK", quotactl_only, empty);
-EXPORT_SYMBOL(RQF_OST_QUOTACHECK);
-
-struct req_format RQF_MDS_QUOTACTL =
- DEFINE_REQ_FMT0("MDS_QUOTACTL", quotactl_only, quotactl_only);
-EXPORT_SYMBOL(RQF_MDS_QUOTACTL);
-
-struct req_format RQF_OST_QUOTACTL =
- DEFINE_REQ_FMT0("OST_QUOTACTL", quotactl_only, quotactl_only);
-EXPORT_SYMBOL(RQF_OST_QUOTACTL);
-
-struct req_format RQF_QC_CALLBACK =
- DEFINE_REQ_FMT0("QC_CALLBACK", quotactl_only, empty);
-EXPORT_SYMBOL(RQF_QC_CALLBACK);
-
-struct req_format RQF_QUOTA_DQACQ =
- DEFINE_REQ_FMT0("QUOTA_DQACQ", quota_body_only, quota_body_only);
-EXPORT_SYMBOL(RQF_QUOTA_DQACQ);
-
-struct req_format RQF_LDLM_INTENT_QUOTA =
- DEFINE_REQ_FMT0("LDLM_INTENT_QUOTA",
- ldlm_intent_quota_client,
- ldlm_intent_quota_server);
-EXPORT_SYMBOL(RQF_LDLM_INTENT_QUOTA);
-
-struct req_format RQF_MDS_GETSTATUS =
- DEFINE_REQ_FMT0("MDS_GETSTATUS", mdt_body_only, mdt_body_capa);
-EXPORT_SYMBOL(RQF_MDS_GETSTATUS);
-
-struct req_format RQF_MDS_STATFS =
- DEFINE_REQ_FMT0("MDS_STATFS", empty, obd_statfs_server);
-EXPORT_SYMBOL(RQF_MDS_STATFS);
-
-struct req_format RQF_MDS_SYNC =
- DEFINE_REQ_FMT0("MDS_SYNC", mdt_body_capa, mdt_body_only);
-EXPORT_SYMBOL(RQF_MDS_SYNC);
-
-struct req_format RQF_MDS_GETATTR =
- DEFINE_REQ_FMT0("MDS_GETATTR", mdt_body_capa, mds_getattr_server);
-EXPORT_SYMBOL(RQF_MDS_GETATTR);
-
-struct req_format RQF_MDS_GETXATTR =
- DEFINE_REQ_FMT0("MDS_GETXATTR",
- mds_getxattr_client, mds_getxattr_server);
-EXPORT_SYMBOL(RQF_MDS_GETXATTR);
-
-struct req_format RQF_MDS_GETATTR_NAME =
- DEFINE_REQ_FMT0("MDS_GETATTR_NAME",
- mds_getattr_name_client, mds_getattr_server);
-EXPORT_SYMBOL(RQF_MDS_GETATTR_NAME);
-
-struct req_format RQF_MDS_REINT =
- DEFINE_REQ_FMT0("MDS_REINT", mds_reint_client, mdt_body_only);
-EXPORT_SYMBOL(RQF_MDS_REINT);
-
-struct req_format RQF_MDS_REINT_CREATE =
- DEFINE_REQ_FMT0("MDS_REINT_CREATE",
- mds_reint_create_client, mdt_body_capa);
-EXPORT_SYMBOL(RQF_MDS_REINT_CREATE);
-
-struct req_format RQF_MDS_REINT_CREATE_RMT_ACL =
- DEFINE_REQ_FMT0("MDS_REINT_CREATE_RMT_ACL",
- mds_reint_create_rmt_acl_client, mdt_body_capa);
-EXPORT_SYMBOL(RQF_MDS_REINT_CREATE_RMT_ACL);
-
-struct req_format RQF_MDS_REINT_CREATE_SLAVE =
- DEFINE_REQ_FMT0("MDS_REINT_CREATE_EA",
- mds_reint_create_slave_client, mdt_body_capa);
-EXPORT_SYMBOL(RQF_MDS_REINT_CREATE_SLAVE);
-
-struct req_format RQF_MDS_REINT_CREATE_SYM =
- DEFINE_REQ_FMT0("MDS_REINT_CREATE_SYM",
- mds_reint_create_sym_client, mdt_body_capa);
-EXPORT_SYMBOL(RQF_MDS_REINT_CREATE_SYM);
-
-struct req_format RQF_MDS_REINT_OPEN =
- DEFINE_REQ_FMT0("MDS_REINT_OPEN",
- mds_reint_open_client, mds_reint_open_server);
-EXPORT_SYMBOL(RQF_MDS_REINT_OPEN);
-
-struct req_format RQF_MDS_REINT_UNLINK =
- DEFINE_REQ_FMT0("MDS_REINT_UNLINK", mds_reint_unlink_client,
- mds_last_unlink_server);
-EXPORT_SYMBOL(RQF_MDS_REINT_UNLINK);
-
-struct req_format RQF_MDS_REINT_LINK =
- DEFINE_REQ_FMT0("MDS_REINT_LINK",
- mds_reint_link_client, mdt_body_only);
-EXPORT_SYMBOL(RQF_MDS_REINT_LINK);
-
-struct req_format RQF_MDS_REINT_RENAME =
- DEFINE_REQ_FMT0("MDS_REINT_RENAME", mds_reint_rename_client,
- mds_last_unlink_server);
-EXPORT_SYMBOL(RQF_MDS_REINT_RENAME);
-
-struct req_format RQF_MDS_REINT_SETATTR =
- DEFINE_REQ_FMT0("MDS_REINT_SETATTR",
- mds_reint_setattr_client, mds_setattr_server);
-EXPORT_SYMBOL(RQF_MDS_REINT_SETATTR);
-
-struct req_format RQF_MDS_REINT_SETXATTR =
- DEFINE_REQ_FMT0("MDS_REINT_SETXATTR",
- mds_reint_setxattr_client, mdt_body_only);
-EXPORT_SYMBOL(RQF_MDS_REINT_SETXATTR);
-
-struct req_format RQF_MDS_CONNECT =
- DEFINE_REQ_FMT0("MDS_CONNECT",
- obd_connect_client, obd_connect_server);
-EXPORT_SYMBOL(RQF_MDS_CONNECT);
-
-struct req_format RQF_MDS_DISCONNECT =
- DEFINE_REQ_FMT0("MDS_DISCONNECT", empty, empty);
-EXPORT_SYMBOL(RQF_MDS_DISCONNECT);
-
-struct req_format RQF_MDS_GET_INFO =
- DEFINE_REQ_FMT0("MDS_GET_INFO", mds_getinfo_client,
- mds_getinfo_server);
-EXPORT_SYMBOL(RQF_MDS_GET_INFO);
-
-struct req_format RQF_UPDATE_OBJ =
- DEFINE_REQ_FMT0("OBJECT_UPDATE_OBJ", mds_update_client,
- mds_update_server);
-EXPORT_SYMBOL(RQF_UPDATE_OBJ);
-
-struct req_format RQF_LDLM_ENQUEUE =
- DEFINE_REQ_FMT0("LDLM_ENQUEUE",
- ldlm_enqueue_client, ldlm_enqueue_lvb_server);
-EXPORT_SYMBOL(RQF_LDLM_ENQUEUE);
-
-struct req_format RQF_LDLM_ENQUEUE_LVB =
- DEFINE_REQ_FMT0("LDLM_ENQUEUE_LVB",
- ldlm_enqueue_client, ldlm_enqueue_lvb_server);
-EXPORT_SYMBOL(RQF_LDLM_ENQUEUE_LVB);
-
-struct req_format RQF_LDLM_CONVERT =
- DEFINE_REQ_FMT0("LDLM_CONVERT",
- ldlm_enqueue_client, ldlm_enqueue_server);
-EXPORT_SYMBOL(RQF_LDLM_CONVERT);
-
-struct req_format RQF_LDLM_CANCEL =
- DEFINE_REQ_FMT0("LDLM_CANCEL", ldlm_enqueue_client, empty);
-EXPORT_SYMBOL(RQF_LDLM_CANCEL);
-
-struct req_format RQF_LDLM_CALLBACK =
- DEFINE_REQ_FMT0("LDLM_CALLBACK", ldlm_enqueue_client, empty);
-EXPORT_SYMBOL(RQF_LDLM_CALLBACK);
-
-struct req_format RQF_LDLM_CP_CALLBACK =
- DEFINE_REQ_FMT0("LDLM_CP_CALLBACK", ldlm_cp_callback_client, empty);
-EXPORT_SYMBOL(RQF_LDLM_CP_CALLBACK);
-
-struct req_format RQF_LDLM_BL_CALLBACK =
- DEFINE_REQ_FMT0("LDLM_BL_CALLBACK", ldlm_enqueue_client, empty);
-EXPORT_SYMBOL(RQF_LDLM_BL_CALLBACK);
-
-struct req_format RQF_LDLM_GL_CALLBACK =
- DEFINE_REQ_FMT0("LDLM_GL_CALLBACK", ldlm_enqueue_client,
- ldlm_gl_callback_server);
-EXPORT_SYMBOL(RQF_LDLM_GL_CALLBACK);
-
-struct req_format RQF_LDLM_GL_DESC_CALLBACK =
- DEFINE_REQ_FMT0("LDLM_GL_CALLBACK", ldlm_gl_callback_desc_client,
- ldlm_gl_callback_server);
-EXPORT_SYMBOL(RQF_LDLM_GL_DESC_CALLBACK);
-
-struct req_format RQF_LDLM_INTENT_BASIC =
- DEFINE_REQ_FMT0("LDLM_INTENT_BASIC",
- ldlm_intent_basic_client, ldlm_enqueue_lvb_server);
-EXPORT_SYMBOL(RQF_LDLM_INTENT_BASIC);
-
-struct req_format RQF_LDLM_INTENT =
- DEFINE_REQ_FMT0("LDLM_INTENT",
- ldlm_intent_client, ldlm_intent_server);
-EXPORT_SYMBOL(RQF_LDLM_INTENT);
-
-struct req_format RQF_LDLM_INTENT_LAYOUT =
- DEFINE_REQ_FMT0("LDLM_INTENT_LAYOUT ",
- ldlm_intent_layout_client, ldlm_enqueue_lvb_server);
-EXPORT_SYMBOL(RQF_LDLM_INTENT_LAYOUT);
-
-struct req_format RQF_LDLM_INTENT_GETATTR =
- DEFINE_REQ_FMT0("LDLM_INTENT_GETATTR",
- ldlm_intent_getattr_client, ldlm_intent_getattr_server);
-EXPORT_SYMBOL(RQF_LDLM_INTENT_GETATTR);
-
-struct req_format RQF_LDLM_INTENT_OPEN =
- DEFINE_REQ_FMT0("LDLM_INTENT_OPEN",
- ldlm_intent_open_client, ldlm_intent_open_server);
-EXPORT_SYMBOL(RQF_LDLM_INTENT_OPEN);
-
-struct req_format RQF_LDLM_INTENT_CREATE =
- DEFINE_REQ_FMT0("LDLM_INTENT_CREATE",
- ldlm_intent_create_client, ldlm_intent_getattr_server);
-EXPORT_SYMBOL(RQF_LDLM_INTENT_CREATE);
-
-struct req_format RQF_LDLM_INTENT_UNLINK =
- DEFINE_REQ_FMT0("LDLM_INTENT_UNLINK",
- ldlm_intent_unlink_client, ldlm_intent_server);
-EXPORT_SYMBOL(RQF_LDLM_INTENT_UNLINK);
-
-struct req_format RQF_LDLM_INTENT_GETXATTR =
- DEFINE_REQ_FMT0("LDLM_INTENT_GETXATTR",
- ldlm_intent_getxattr_client,
- ldlm_intent_getxattr_server);
-EXPORT_SYMBOL(RQF_LDLM_INTENT_GETXATTR);
-
-struct req_format RQF_MDS_CLOSE =
- DEFINE_REQ_FMT0("MDS_CLOSE",
- mdt_close_client, mds_last_unlink_server);
-EXPORT_SYMBOL(RQF_MDS_CLOSE);
-
-struct req_format RQF_MDS_RELEASE_CLOSE =
- DEFINE_REQ_FMT0("MDS_CLOSE",
- mdt_release_close_client, mds_last_unlink_server);
-EXPORT_SYMBOL(RQF_MDS_RELEASE_CLOSE);
-
-struct req_format RQF_MDS_PIN =
- DEFINE_REQ_FMT0("MDS_PIN",
- mdt_body_capa, mdt_body_only);
-EXPORT_SYMBOL(RQF_MDS_PIN);
-
-struct req_format RQF_MDS_UNPIN =
- DEFINE_REQ_FMT0("MDS_UNPIN", mdt_body_only, empty);
-EXPORT_SYMBOL(RQF_MDS_UNPIN);
-
-struct req_format RQF_MDS_DONE_WRITING =
- DEFINE_REQ_FMT0("MDS_DONE_WRITING",
- mdt_close_client, mdt_body_only);
-EXPORT_SYMBOL(RQF_MDS_DONE_WRITING);
-
-struct req_format RQF_MDS_READPAGE =
- DEFINE_REQ_FMT0("MDS_READPAGE",
- mdt_body_capa, mdt_body_only);
-EXPORT_SYMBOL(RQF_MDS_READPAGE);
-
-struct req_format RQF_MDS_HSM_ACTION =
- DEFINE_REQ_FMT0("MDS_HSM_ACTION", mdt_body_capa, mdt_hsm_action_server);
-EXPORT_SYMBOL(RQF_MDS_HSM_ACTION);
-
-struct req_format RQF_MDS_HSM_PROGRESS =
- DEFINE_REQ_FMT0("MDS_HSM_PROGRESS", mdt_hsm_progress, empty);
-EXPORT_SYMBOL(RQF_MDS_HSM_PROGRESS);
-
-struct req_format RQF_MDS_HSM_CT_REGISTER =
- DEFINE_REQ_FMT0("MDS_HSM_CT_REGISTER", mdt_hsm_ct_register, empty);
-EXPORT_SYMBOL(RQF_MDS_HSM_CT_REGISTER);
-
-struct req_format RQF_MDS_HSM_CT_UNREGISTER =
- DEFINE_REQ_FMT0("MDS_HSM_CT_UNREGISTER", mdt_hsm_ct_unregister, empty);
-EXPORT_SYMBOL(RQF_MDS_HSM_CT_UNREGISTER);
-
-struct req_format RQF_MDS_HSM_STATE_GET =
- DEFINE_REQ_FMT0("MDS_HSM_STATE_GET",
- mdt_body_capa, mdt_hsm_state_get_server);
-EXPORT_SYMBOL(RQF_MDS_HSM_STATE_GET);
-
-struct req_format RQF_MDS_HSM_STATE_SET =
- DEFINE_REQ_FMT0("MDS_HSM_STATE_SET", mdt_hsm_state_set, empty);
-EXPORT_SYMBOL(RQF_MDS_HSM_STATE_SET);
-
-struct req_format RQF_MDS_HSM_REQUEST =
- DEFINE_REQ_FMT0("MDS_HSM_REQUEST", mdt_hsm_request, empty);
-EXPORT_SYMBOL(RQF_MDS_HSM_REQUEST);
-
-struct req_format RQF_MDS_SWAP_LAYOUTS =
- DEFINE_REQ_FMT0("MDS_SWAP_LAYOUTS",
- mdt_swap_layouts, empty);
-EXPORT_SYMBOL(RQF_MDS_SWAP_LAYOUTS);
-
-/* This is for split */
-struct req_format RQF_MDS_WRITEPAGE =
- DEFINE_REQ_FMT0("MDS_WRITEPAGE",
- mdt_body_capa, mdt_body_only);
-EXPORT_SYMBOL(RQF_MDS_WRITEPAGE);
-
-struct req_format RQF_MDS_IS_SUBDIR =
- DEFINE_REQ_FMT0("MDS_IS_SUBDIR",
- mdt_body_only, mdt_body_only);
-EXPORT_SYMBOL(RQF_MDS_IS_SUBDIR);
-
-struct req_format RQF_LLOG_ORIGIN_HANDLE_CREATE =
- DEFINE_REQ_FMT0("LLOG_ORIGIN_HANDLE_CREATE",
- llog_origin_handle_create_client, llogd_body_only);
-EXPORT_SYMBOL(RQF_LLOG_ORIGIN_HANDLE_CREATE);
-
-struct req_format RQF_LLOG_ORIGIN_HANDLE_DESTROY =
- DEFINE_REQ_FMT0("LLOG_ORIGIN_HANDLE_DESTROY",
- llogd_body_only, llogd_body_only);
-EXPORT_SYMBOL(RQF_LLOG_ORIGIN_HANDLE_DESTROY);
-
-struct req_format RQF_LLOG_ORIGIN_HANDLE_NEXT_BLOCK =
- DEFINE_REQ_FMT0("LLOG_ORIGIN_HANDLE_NEXT_BLOCK",
- llogd_body_only, llog_origin_handle_next_block_server);
-EXPORT_SYMBOL(RQF_LLOG_ORIGIN_HANDLE_NEXT_BLOCK);
-
-struct req_format RQF_LLOG_ORIGIN_HANDLE_PREV_BLOCK =
- DEFINE_REQ_FMT0("LLOG_ORIGIN_HANDLE_PREV_BLOCK",
- llogd_body_only, llog_origin_handle_next_block_server);
-EXPORT_SYMBOL(RQF_LLOG_ORIGIN_HANDLE_PREV_BLOCK);
-
-struct req_format RQF_LLOG_ORIGIN_HANDLE_READ_HEADER =
- DEFINE_REQ_FMT0("LLOG_ORIGIN_HANDLE_READ_HEADER",
- llogd_body_only, llog_log_hdr_only);
-EXPORT_SYMBOL(RQF_LLOG_ORIGIN_HANDLE_READ_HEADER);
-
-struct req_format RQF_LLOG_ORIGIN_CONNECT =
- DEFINE_REQ_FMT0("LLOG_ORIGIN_CONNECT", llogd_conn_body_only, empty);
-EXPORT_SYMBOL(RQF_LLOG_ORIGIN_CONNECT);
-
-struct req_format RQF_CONNECT =
- DEFINE_REQ_FMT0("CONNECT", obd_connect_client, obd_connect_server);
-EXPORT_SYMBOL(RQF_CONNECT);
-
-struct req_format RQF_OST_CONNECT =
- DEFINE_REQ_FMT0("OST_CONNECT",
- obd_connect_client, obd_connect_server);
-EXPORT_SYMBOL(RQF_OST_CONNECT);
-
-struct req_format RQF_OST_DISCONNECT =
- DEFINE_REQ_FMT0("OST_DISCONNECT", empty, empty);
-EXPORT_SYMBOL(RQF_OST_DISCONNECT);
-
-struct req_format RQF_OST_GETATTR =
- DEFINE_REQ_FMT0("OST_GETATTR", ost_body_capa, ost_body_only);
-EXPORT_SYMBOL(RQF_OST_GETATTR);
-
-struct req_format RQF_OST_SETATTR =
- DEFINE_REQ_FMT0("OST_SETATTR", ost_body_capa, ost_body_only);
-EXPORT_SYMBOL(RQF_OST_SETATTR);
-
-struct req_format RQF_OST_CREATE =
- DEFINE_REQ_FMT0("OST_CREATE", ost_body_only, ost_body_only);
-EXPORT_SYMBOL(RQF_OST_CREATE);
-
-struct req_format RQF_OST_PUNCH =
- DEFINE_REQ_FMT0("OST_PUNCH", ost_body_capa, ost_body_only);
-EXPORT_SYMBOL(RQF_OST_PUNCH);
-
-struct req_format RQF_OST_SYNC =
- DEFINE_REQ_FMT0("OST_SYNC", ost_body_capa, ost_body_only);
-EXPORT_SYMBOL(RQF_OST_SYNC);
-
-struct req_format RQF_OST_DESTROY =
- DEFINE_REQ_FMT0("OST_DESTROY", ost_destroy_client, ost_body_only);
-EXPORT_SYMBOL(RQF_OST_DESTROY);
-
-struct req_format RQF_OST_BRW_READ =
- DEFINE_REQ_FMT0("OST_BRW_READ", ost_brw_client, ost_brw_read_server);
-EXPORT_SYMBOL(RQF_OST_BRW_READ);
-
-struct req_format RQF_OST_BRW_WRITE =
- DEFINE_REQ_FMT0("OST_BRW_WRITE", ost_brw_client, ost_brw_write_server);
-EXPORT_SYMBOL(RQF_OST_BRW_WRITE);
-
-struct req_format RQF_OST_STATFS =
- DEFINE_REQ_FMT0("OST_STATFS", empty, obd_statfs_server);
-EXPORT_SYMBOL(RQF_OST_STATFS);
-
-struct req_format RQF_OST_SET_GRANT_INFO =
- DEFINE_REQ_FMT0("OST_SET_GRANT_INFO", ost_grant_shrink_client,
- ost_body_only);
-EXPORT_SYMBOL(RQF_OST_SET_GRANT_INFO);
-
-struct req_format RQF_OST_GET_INFO_GENERIC =
- DEFINE_REQ_FMT0("OST_GET_INFO", ost_get_info_generic_client,
- ost_get_info_generic_server);
-EXPORT_SYMBOL(RQF_OST_GET_INFO_GENERIC);
-
-struct req_format RQF_OST_GET_INFO_LAST_ID =
- DEFINE_REQ_FMT0("OST_GET_INFO_LAST_ID", ost_get_info_generic_client,
- ost_get_last_id_server);
-EXPORT_SYMBOL(RQF_OST_GET_INFO_LAST_ID);
-
-struct req_format RQF_OST_GET_INFO_LAST_FID =
- DEFINE_REQ_FMT0("OST_GET_INFO_LAST_FID", obd_set_info_client,
- ost_get_last_fid_server);
-EXPORT_SYMBOL(RQF_OST_GET_INFO_LAST_FID);
-
-struct req_format RQF_OST_SET_INFO_LAST_FID =
- DEFINE_REQ_FMT0("OST_SET_INFO_LAST_FID", obd_set_info_client,
- empty);
-EXPORT_SYMBOL(RQF_OST_SET_INFO_LAST_FID);
-
-struct req_format RQF_OST_GET_INFO_FIEMAP =
- DEFINE_REQ_FMT0("OST_GET_INFO_FIEMAP", ost_get_fiemap_client,
- ost_get_fiemap_server);
-EXPORT_SYMBOL(RQF_OST_GET_INFO_FIEMAP);
-
-#if !defined(__REQ_LAYOUT_USER__)
-
-/* Convenience macro */
-#define FMT_FIELD(fmt, i, j) (fmt)->rf_fields[(i)].d[(j)]
-
-/**
- * Initializes the capsule abstraction by computing and setting the \a rf_idx
- * field of RQFs and the \a rmf_offset field of RMFs.
- */
-int req_layout_init(void)
-{
- int i;
- int j;
- int k;
- struct req_format *rf = NULL;
-
- for (i = 0; i < ARRAY_SIZE(req_formats); ++i) {
- rf = req_formats[i];
- rf->rf_idx = i;
- for (j = 0; j < RCL_NR; ++j) {
- LASSERT(rf->rf_fields[j].nr <= REQ_MAX_FIELD_NR);
- for (k = 0; k < rf->rf_fields[j].nr; ++k) {
- struct req_msg_field *field;
-
- field = (typeof(field))rf->rf_fields[j].d[k];
- LASSERT(!(field->rmf_flags & RMF_F_STRUCT_ARRAY)
- || field->rmf_size > 0);
- LASSERT(field->rmf_offset[i][j] == 0);
- /*
- * k + 1 to detect unused format/field
- * combinations.
- */
- field->rmf_offset[i][j] = k + 1;
- }
- }
- }
- return 0;
-}
-EXPORT_SYMBOL(req_layout_init);
-
-void req_layout_fini(void)
-{
-}
-EXPORT_SYMBOL(req_layout_fini);
-
-/**
- * Initializes the expected sizes of each RMF in a \a pill (\a rc_area) to -1.
- *
- * Actual/expected field sizes are set elsewhere in functions in this file:
- * req_capsule_init(), req_capsule_server_pack(), req_capsule_set_size() and
- * req_capsule_msg_size(). The \a rc_area information is used by.
- * ptlrpc_request_set_replen().
- */
-void req_capsule_init_area(struct req_capsule *pill)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(pill->rc_area[RCL_CLIENT]); i++) {
- pill->rc_area[RCL_CLIENT][i] = -1;
- pill->rc_area[RCL_SERVER][i] = -1;
- }
-}
-EXPORT_SYMBOL(req_capsule_init_area);
-
-/**
- * Initialize a pill.
- *
- * The \a location indicates whether the caller is executing on the client side
- * (RCL_CLIENT) or server side (RCL_SERVER)..
- */
-void req_capsule_init(struct req_capsule *pill,
- struct ptlrpc_request *req,
- enum req_location location)
-{
- LASSERT(location == RCL_SERVER || location == RCL_CLIENT);
-
- /*
- * Today all capsules are embedded in ptlrpc_request structs,
- * but just in case that ever isn't the case, we don't reach
- * into req unless req != NULL and pill is the one embedded in
- * the req.
- *
- * The req->rq_pill_init flag makes it safe to initialize a pill
- * twice, which might happen in the OST paths as a result of the
- * high-priority RPC queue getting peeked at before ost_handle()
- * handles an OST RPC.
- */
- if (req != NULL && pill == &req->rq_pill && req->rq_pill_init)
- return;
-
- memset(pill, 0, sizeof(*pill));
- pill->rc_req = req;
- pill->rc_loc = location;
- req_capsule_init_area(pill);
-
- if (req != NULL && pill == &req->rq_pill)
- req->rq_pill_init = 1;
-}
-EXPORT_SYMBOL(req_capsule_init);
-
-void req_capsule_fini(struct req_capsule *pill)
-{
-}
-EXPORT_SYMBOL(req_capsule_fini);
-
-static int __req_format_is_sane(const struct req_format *fmt)
-{
- return
- 0 <= fmt->rf_idx && fmt->rf_idx < ARRAY_SIZE(req_formats) &&
- req_formats[fmt->rf_idx] == fmt;
-}
-
-static struct lustre_msg *__req_msg(const struct req_capsule *pill,
- enum req_location loc)
-{
- struct ptlrpc_request *req;
-
- req = pill->rc_req;
- return loc == RCL_CLIENT ? req->rq_reqmsg : req->rq_repmsg;
-}
-
-/**
- * Set the format (\a fmt) of a \a pill; format changes are not allowed here
- * (see req_capsule_extend()).
- */
-void req_capsule_set(struct req_capsule *pill, const struct req_format *fmt)
-{
- LASSERT(pill->rc_fmt == NULL || pill->rc_fmt == fmt);
- LASSERT(__req_format_is_sane(fmt));
-
- pill->rc_fmt = fmt;
-}
-EXPORT_SYMBOL(req_capsule_set);
-
-/**
- * Fills in any parts of the \a rc_area of a \a pill that haven't been filled in
- * yet.
-
- * \a rc_area is an array of REQ_MAX_FIELD_NR elements, used to store sizes of
- * variable-sized fields. The field sizes come from the declared \a rmf_size
- * field of a \a pill's \a rc_fmt's RMF's.
- */
-int req_capsule_filled_sizes(struct req_capsule *pill,
- enum req_location loc)
-{
- const struct req_format *fmt = pill->rc_fmt;
- int i;
-
- LASSERT(fmt != NULL);
-
- for (i = 0; i < fmt->rf_fields[loc].nr; ++i) {
- if (pill->rc_area[loc][i] == -1) {
- pill->rc_area[loc][i] =
- fmt->rf_fields[loc].d[i]->rmf_size;
- if (pill->rc_area[loc][i] == -1) {
- /*
- * Skip the following fields.
- *
- * If this LASSERT() trips then you're missing a
- * call to req_capsule_set_size().
- */
- LASSERT(loc != RCL_SERVER);
- break;
- }
- }
- }
- return i;
-}
-EXPORT_SYMBOL(req_capsule_filled_sizes);
-
-/**
- * Capsule equivalent of lustre_pack_request() and lustre_pack_reply().
- *
- * This function uses the \a pill's \a rc_area as filled in by
- * req_capsule_set_size() or req_capsule_filled_sizes() (the latter is called by
- * this function).
- */
-int req_capsule_server_pack(struct req_capsule *pill)
-{
- const struct req_format *fmt;
- int count;
- int rc;
-
- LASSERT(pill->rc_loc == RCL_SERVER);
- fmt = pill->rc_fmt;
- LASSERT(fmt != NULL);
-
- count = req_capsule_filled_sizes(pill, RCL_SERVER);
- rc = lustre_pack_reply(pill->rc_req, count,
- pill->rc_area[RCL_SERVER], NULL);
- if (rc != 0) {
- DEBUG_REQ(D_ERROR, pill->rc_req,
- "Cannot pack %d fields in format `%s': ",
- count, fmt->rf_name);
- }
- return rc;
-}
-EXPORT_SYMBOL(req_capsule_server_pack);
-
-/**
- * Returns the PTLRPC request or reply (\a loc) buffer offset of a \a pill
- * corresponding to the given RMF (\a field).
- */
-static int __req_capsule_offset(const struct req_capsule *pill,
- const struct req_msg_field *field,
- enum req_location loc)
-{
- int offset;
-
- offset = field->rmf_offset[pill->rc_fmt->rf_idx][loc];
- LASSERTF(offset > 0, "%s:%s, off=%d, loc=%d\n",
- pill->rc_fmt->rf_name,
- field->rmf_name, offset, loc);
- offset--;
-
- LASSERT(0 <= offset && offset < REQ_MAX_FIELD_NR);
- return offset;
-}
-
-/**
- * Helper for __req_capsule_get(); swabs value / array of values and/or dumps
- * them if desired.
- */
-static
-void
-swabber_dumper_helper(struct req_capsule *pill,
- const struct req_msg_field *field,
- enum req_location loc,
- int offset,
- void *value, int len, int dump, void (*swabber)(void *))
-{
- void *p;
- int i;
- int n;
- int do_swab;
- int inout = loc == RCL_CLIENT;
-
- swabber = swabber ?: field->rmf_swabber;
-
- if (ptlrpc_buf_need_swab(pill->rc_req, inout, offset) &&
- swabber != NULL && value != NULL)
- do_swab = 1;
- else
- do_swab = 0;
-
- if (!field->rmf_dumper)
- dump = 0;
-
- if (!(field->rmf_flags & RMF_F_STRUCT_ARRAY)) {
- if (dump) {
- CDEBUG(D_RPCTRACE, "Dump of %sfield %s follows\n",
- do_swab ? "unswabbed " : "", field->rmf_name);
- field->rmf_dumper(value);
- }
- if (!do_swab)
- return;
- swabber(value);
- ptlrpc_buf_set_swabbed(pill->rc_req, inout, offset);
- if (dump) {
- CDEBUG(D_RPCTRACE, "Dump of swabbed field %s follows\n",
- field->rmf_name);
- field->rmf_dumper(value);
- }
-
- return;
- }
-
- /*
- * We're swabbing an array; swabber() swabs a single array element, so
- * swab every element.
- */
- LASSERT((len % field->rmf_size) == 0);
- for (p = value, i = 0, n = len / field->rmf_size;
- i < n;
- i++, p += field->rmf_size) {
- if (dump) {
- CDEBUG(D_RPCTRACE, "Dump of %sarray field %s, element %d follows\n",
- do_swab ? "unswabbed " : "", field->rmf_name, i);
- field->rmf_dumper(p);
- }
- if (!do_swab)
- continue;
- swabber(p);
- if (dump) {
- CDEBUG(D_RPCTRACE, "Dump of swabbed array field %s, element %d follows\n",
- field->rmf_name, i);
- field->rmf_dumper(value);
- }
- }
- if (do_swab)
- ptlrpc_buf_set_swabbed(pill->rc_req, inout, offset);
-}
-
-/**
- * Returns the pointer to a PTLRPC request or reply (\a loc) buffer of a \a pill
- * corresponding to the given RMF (\a field).
- *
- * The buffer will be swabbed using the given \a swabber. If \a swabber == NULL
- * then the \a rmf_swabber from the RMF will be used. Soon there will be no
- * calls to __req_capsule_get() with a non-NULL \a swabber; \a swabber will then
- * be removed. Fields with the \a RMF_F_STRUCT_ARRAY flag set will have each
- * element of the array swabbed.
- */
-static void *__req_capsule_get(struct req_capsule *pill,
- const struct req_msg_field *field,
- enum req_location loc,
- void (*swabber)(void *),
- int dump)
-{
- const struct req_format *fmt;
- struct lustre_msg *msg;
- void *value;
- int len;
- int offset;
-
- void *(*getter)(struct lustre_msg *m, int n, int minlen);
-
- static const char *rcl_names[RCL_NR] = {
- [RCL_CLIENT] = "client",
- [RCL_SERVER] = "server"
- };
-
- LASSERT(pill != NULL);
- LASSERT(pill != LP_POISON);
- fmt = pill->rc_fmt;
- LASSERT(fmt != NULL);
- LASSERT(fmt != LP_POISON);
- LASSERT(__req_format_is_sane(fmt));
-
- offset = __req_capsule_offset(pill, field, loc);
-
- msg = __req_msg(pill, loc);
- LASSERT(msg != NULL);
-
- getter = (field->rmf_flags & RMF_F_STRING) ?
- (typeof(getter))lustre_msg_string : lustre_msg_buf;
-
- if (field->rmf_flags & RMF_F_STRUCT_ARRAY) {
- /*
- * We've already asserted that field->rmf_size > 0 in
- * req_layout_init().
- */
- len = lustre_msg_buflen(msg, offset);
- if ((len % field->rmf_size) != 0) {
- CERROR("%s: array field size mismatch %d modulo %d != 0 (%d)\n",
- field->rmf_name, len, field->rmf_size, loc);
- return NULL;
- }
- } else if (pill->rc_area[loc][offset] != -1) {
- len = pill->rc_area[loc][offset];
- } else {
- len = max(field->rmf_size, 0);
- }
- value = getter(msg, offset, len);
-
- if (value == NULL) {
- DEBUG_REQ(D_ERROR, pill->rc_req,
- "Wrong buffer for field `%s' (%d of %d) in format `%s': %d vs. %d (%s)\n",
- field->rmf_name, offset, lustre_msg_bufcount(msg),
- fmt->rf_name, lustre_msg_buflen(msg, offset), len,
- rcl_names[loc]);
- } else {
- swabber_dumper_helper(pill, field, loc, offset, value, len,
- dump, swabber);
- }
-
- return value;
-}
-
-/**
- * Trivial wrapper around __req_capsule_get(), that returns the PTLRPC request
- * buffer corresponding to the given RMF (\a field) of a \a pill.
- */
-void *req_capsule_client_get(struct req_capsule *pill,
- const struct req_msg_field *field)
-{
- return __req_capsule_get(pill, field, RCL_CLIENT, NULL, 0);
-}
-EXPORT_SYMBOL(req_capsule_client_get);
-
-/**
- * Same as req_capsule_client_get(), but with a \a swabber argument.
- *
- * Currently unused; will be removed when req_capsule_server_swab_get() is
- * unused too.
- */
-void *req_capsule_client_swab_get(struct req_capsule *pill,
- const struct req_msg_field *field,
- void *swabber)
-{
- return __req_capsule_get(pill, field, RCL_CLIENT, swabber, 0);
-}
-EXPORT_SYMBOL(req_capsule_client_swab_get);
-
-/**
- * Utility that combines req_capsule_set_size() and req_capsule_client_get().
- *
- * First the \a pill's request \a field's size is set (\a rc_area) using
- * req_capsule_set_size() with the given \a len. Then the actual buffer is
- * returned.
- */
-void *req_capsule_client_sized_get(struct req_capsule *pill,
- const struct req_msg_field *field,
- int len)
-{
- req_capsule_set_size(pill, field, RCL_CLIENT, len);
- return __req_capsule_get(pill, field, RCL_CLIENT, NULL, 0);
-}
-EXPORT_SYMBOL(req_capsule_client_sized_get);
-
-/**
- * Trivial wrapper around __req_capsule_get(), that returns the PTLRPC reply
- * buffer corresponding to the given RMF (\a field) of a \a pill.
- */
-void *req_capsule_server_get(struct req_capsule *pill,
- const struct req_msg_field *field)
-{
- return __req_capsule_get(pill, field, RCL_SERVER, NULL, 0);
-}
-EXPORT_SYMBOL(req_capsule_server_get);
-
-/**
- * Same as req_capsule_server_get(), but with a \a swabber argument.
- *
- * Ideally all swabbing should be done pursuant to RMF definitions, with no
- * swabbing done outside this capsule abstraction.
- */
-void *req_capsule_server_swab_get(struct req_capsule *pill,
- const struct req_msg_field *field,
- void *swabber)
-{
- return __req_capsule_get(pill, field, RCL_SERVER, swabber, 0);
-}
-EXPORT_SYMBOL(req_capsule_server_swab_get);
-
-/**
- * Utility that combines req_capsule_set_size() and req_capsule_server_get().
- *
- * First the \a pill's request \a field's size is set (\a rc_area) using
- * req_capsule_set_size() with the given \a len. Then the actual buffer is
- * returned.
- */
-void *req_capsule_server_sized_get(struct req_capsule *pill,
- const struct req_msg_field *field,
- int len)
-{
- req_capsule_set_size(pill, field, RCL_SERVER, len);
- return __req_capsule_get(pill, field, RCL_SERVER, NULL, 0);
-}
-EXPORT_SYMBOL(req_capsule_server_sized_get);
-
-void *req_capsule_server_sized_swab_get(struct req_capsule *pill,
- const struct req_msg_field *field,
- int len, void *swabber)
-{
- req_capsule_set_size(pill, field, RCL_SERVER, len);
- return __req_capsule_get(pill, field, RCL_SERVER, swabber, 0);
-}
-EXPORT_SYMBOL(req_capsule_server_sized_swab_get);
-
-/**
- * Set the size of the PTLRPC request/reply (\a loc) buffer for the given \a
- * field of the given \a pill.
- *
- * This function must be used when constructing variable sized fields of a
- * request or reply.
- */
-void req_capsule_set_size(struct req_capsule *pill,
- const struct req_msg_field *field,
- enum req_location loc, int size)
-{
- LASSERT(loc == RCL_SERVER || loc == RCL_CLIENT);
-
- if ((size != field->rmf_size) &&
- (field->rmf_size != -1) &&
- !(field->rmf_flags & RMF_F_NO_SIZE_CHECK) &&
- (size > 0)) {
- if ((field->rmf_flags & RMF_F_STRUCT_ARRAY) &&
- (size % field->rmf_size != 0)) {
- CERROR("%s: array field size mismatch %d %% %d != 0 (%d)\n",
- field->rmf_name, size, field->rmf_size, loc);
- LBUG();
- } else if (!(field->rmf_flags & RMF_F_STRUCT_ARRAY) &&
- size < field->rmf_size) {
- CERROR("%s: field size mismatch %d != %d (%d)\n",
- field->rmf_name, size, field->rmf_size, loc);
- LBUG();
- }
- }
-
- pill->rc_area[loc][__req_capsule_offset(pill, field, loc)] = size;
-}
-EXPORT_SYMBOL(req_capsule_set_size);
-
-/**
- * Return the actual PTLRPC buffer length of a request or reply (\a loc)
- * for the given \a pill's given \a field.
- *
- * NB: this function doesn't correspond with req_capsule_set_size(), which
- * actually sets the size in pill.rc_area[loc][offset], but this function
- * returns the message buflen[offset], maybe we should use another name.
- */
-int req_capsule_get_size(const struct req_capsule *pill,
- const struct req_msg_field *field,
- enum req_location loc)
-{
- LASSERT(loc == RCL_SERVER || loc == RCL_CLIENT);
-
- return lustre_msg_buflen(__req_msg(pill, loc),
- __req_capsule_offset(pill, field, loc));
-}
-EXPORT_SYMBOL(req_capsule_get_size);
-
-/**
- * Wrapper around lustre_msg_size() that returns the PTLRPC size needed for the
- * given \a pill's request or reply (\a loc) given the field size recorded in
- * the \a pill's rc_area.
- *
- * See also req_capsule_set_size().
- */
-int req_capsule_msg_size(struct req_capsule *pill, enum req_location loc)
-{
- return lustre_msg_size(pill->rc_req->rq_import->imp_msg_magic,
- pill->rc_fmt->rf_fields[loc].nr,
- pill->rc_area[loc]);
-}
-
-/**
- * While req_capsule_msg_size() computes the size of a PTLRPC request or reply
- * (\a loc) given a \a pill's \a rc_area, this function computes the size of a
- * PTLRPC request or reply given only an RQF (\a fmt).
- *
- * This function should not be used for formats which contain variable size
- * fields.
- */
-int req_capsule_fmt_size(__u32 magic, const struct req_format *fmt,
- enum req_location loc)
-{
- int size, i = 0;
-
- /*
- * This function should probably LASSERT() that fmt has no fields with
- * RMF_F_STRUCT_ARRAY in rmf_flags, since we can't know here how many
- * elements in the array there will ultimately be, but then, we could
- * assume that there will be at least one element, and that's just what
- * we do.
- */
- size = lustre_msg_hdr_size(magic, fmt->rf_fields[loc].nr);
- if (size < 0)
- return size;
-
- for (; i < fmt->rf_fields[loc].nr; ++i)
- if (fmt->rf_fields[loc].d[i]->rmf_size != -1)
- size += cfs_size_round(fmt->rf_fields[loc].d[i]->
- rmf_size);
- return size;
-}
-
-/**
- * Changes the format of an RPC.
- *
- * The pill must already have been initialized, which means that it already has
- * a request format. The new format \a fmt must be an extension of the pill's
- * old format. Specifically: the new format must have as many request and reply
- * fields as the old one, and all fields shared by the old and new format must
- * be at least as large in the new format.
- *
- * The new format's fields may be of different "type" than the old format, but
- * only for fields that are "opaque" blobs: fields which have a) have no
- * \a rmf_swabber, b) \a rmf_flags == 0 or RMF_F_NO_SIZE_CHECK, and c) \a
- * rmf_size == -1 or \a rmf_flags == RMF_F_NO_SIZE_CHECK. For example,
- * OBD_SET_INFO has a key field and an opaque value field that gets interpreted
- * according to the key field. When the value, according to the key, contains a
- * structure (or array thereof) to be swabbed, the format should be changed to
- * one where the value field has \a rmf_size/rmf_flags/rmf_swabber set
- * accordingly.
- */
-void req_capsule_extend(struct req_capsule *pill, const struct req_format *fmt)
-{
- int i;
- int j;
-
- const struct req_format *old;
-
- LASSERT(pill->rc_fmt != NULL);
- LASSERT(__req_format_is_sane(fmt));
-
- old = pill->rc_fmt;
- /*
- * Sanity checking...
- */
- for (i = 0; i < RCL_NR; ++i) {
- LASSERT(fmt->rf_fields[i].nr >= old->rf_fields[i].nr);
- for (j = 0; j < old->rf_fields[i].nr - 1; ++j) {
- const struct req_msg_field *ofield = FMT_FIELD(old, i, j);
-
- /* "opaque" fields can be transmogrified */
- if (ofield->rmf_swabber == NULL &&
- (ofield->rmf_flags & ~RMF_F_NO_SIZE_CHECK) == 0 &&
- (ofield->rmf_size == -1 ||
- ofield->rmf_flags == RMF_F_NO_SIZE_CHECK))
- continue;
- LASSERT(FMT_FIELD(fmt, i, j) == FMT_FIELD(old, i, j));
- }
- /*
- * Last field in old format can be shorter than in new.
- */
- LASSERT(FMT_FIELD(fmt, i, j)->rmf_size >=
- FMT_FIELD(old, i, j)->rmf_size);
- }
-
- pill->rc_fmt = fmt;
-}
-EXPORT_SYMBOL(req_capsule_extend);
-
-/**
- * This function returns a non-zero value if the given \a field is present in
- * the format (\a rc_fmt) of \a pill's PTLRPC request or reply (\a loc), else it
- * returns 0.
- */
-int req_capsule_has_field(const struct req_capsule *pill,
- const struct req_msg_field *field,
- enum req_location loc)
-{
- LASSERT(loc == RCL_SERVER || loc == RCL_CLIENT);
-
- return field->rmf_offset[pill->rc_fmt->rf_idx][loc];
-}
-EXPORT_SYMBOL(req_capsule_has_field);
-
-/**
- * Returns a non-zero value if the given \a field is present in the given \a
- * pill's PTLRPC request or reply (\a loc), else it returns 0.
- */
-int req_capsule_field_present(const struct req_capsule *pill,
- const struct req_msg_field *field,
- enum req_location loc)
-{
- int offset;
-
- LASSERT(loc == RCL_SERVER || loc == RCL_CLIENT);
- LASSERT(req_capsule_has_field(pill, field, loc));
-
- offset = __req_capsule_offset(pill, field, loc);
- return lustre_msg_bufcount(__req_msg(pill, loc)) > offset;
-}
-EXPORT_SYMBOL(req_capsule_field_present);
-
-/**
- * This function shrinks the size of the _buffer_ of the \a pill's PTLRPC
- * request or reply (\a loc).
- *
- * This is not the opposite of req_capsule_extend().
- */
-void req_capsule_shrink(struct req_capsule *pill,
- const struct req_msg_field *field,
- unsigned int newlen,
- enum req_location loc)
-{
- const struct req_format *fmt;
- struct lustre_msg *msg;
- int len;
- int offset;
-
- fmt = pill->rc_fmt;
- LASSERT(fmt != NULL);
- LASSERT(__req_format_is_sane(fmt));
- LASSERT(req_capsule_has_field(pill, field, loc));
- LASSERT(req_capsule_field_present(pill, field, loc));
-
- offset = __req_capsule_offset(pill, field, loc);
-
- msg = __req_msg(pill, loc);
- len = lustre_msg_buflen(msg, offset);
- LASSERTF(newlen <= len, "%s:%s, oldlen=%d, newlen=%d\n",
- fmt->rf_name, field->rmf_name, len, newlen);
-
- if (loc == RCL_CLIENT)
- pill->rc_req->rq_reqlen = lustre_shrink_msg(msg, offset, newlen,
- 1);
- else
- pill->rc_req->rq_replen = lustre_shrink_msg(msg, offset, newlen,
- 1);
-}
-EXPORT_SYMBOL(req_capsule_shrink);
-
-/* __REQ_LAYOUT_USER__ */
-#endif
diff --git a/drivers/staging/lustre/lustre/ptlrpc/llog_client.c b/drivers/staging/lustre/lustre/ptlrpc/llog_client.c
deleted file mode 100644
index fbb9ce8cc31a..000000000000
--- a/drivers/staging/lustre/lustre/ptlrpc/llog_client.c
+++ /dev/null
@@ -1,330 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * lustre/ptlrpc/llog_client.c
- *
- * remote api for llog - client side
- *
- * Author: Andreas Dilger <adilger@clusterfs.com>
- */
-
-#define DEBUG_SUBSYSTEM S_LOG
-
-#include "../../include/linux/libcfs/libcfs.h"
-
-#include "../include/obd_class.h"
-#include "../include/lustre_log.h"
-#include "../include/lustre_net.h"
-#include <linux/list.h>
-
-#define LLOG_CLIENT_ENTRY(ctxt, imp) do { \
- mutex_lock(&ctxt->loc_mutex); \
- if (ctxt->loc_imp) { \
- imp = class_import_get(ctxt->loc_imp); \
- } else { \
- CERROR("ctxt->loc_imp == NULL for context idx %d." \
- "Unable to complete MDS/OSS recovery," \
- "but I'll try again next time. Not fatal.\n", \
- ctxt->loc_idx); \
- imp = NULL; \
- mutex_unlock(&ctxt->loc_mutex); \
- return (-EINVAL); \
- } \
- mutex_unlock(&ctxt->loc_mutex); \
-} while (0)
-
-#define LLOG_CLIENT_EXIT(ctxt, imp) do { \
- mutex_lock(&ctxt->loc_mutex); \
- if (ctxt->loc_imp != imp) \
- CWARN("loc_imp has changed from %p to %p\n", \
- ctxt->loc_imp, imp); \
- class_import_put(imp); \
- mutex_unlock(&ctxt->loc_mutex); \
-} while (0)
-
-/* This is a callback from the llog_* functions.
- * Assumes caller has already pushed us into the kernel context. */
-static int llog_client_open(const struct lu_env *env,
- struct llog_handle *lgh, struct llog_logid *logid,
- char *name, enum llog_open_param open_param)
-{
- struct obd_import *imp;
- struct llogd_body *body;
- struct llog_ctxt *ctxt = lgh->lgh_ctxt;
- struct ptlrpc_request *req = NULL;
- int rc;
-
- LLOG_CLIENT_ENTRY(ctxt, imp);
-
- /* client cannot create llog */
- LASSERTF(open_param != LLOG_OPEN_NEW, "%#x\n", open_param);
- LASSERT(lgh);
-
- req = ptlrpc_request_alloc(imp, &RQF_LLOG_ORIGIN_HANDLE_CREATE);
- if (req == NULL) {
- rc = -ENOMEM;
- goto out;
- }
-
- if (name)
- req_capsule_set_size(&req->rq_pill, &RMF_NAME, RCL_CLIENT,
- strlen(name) + 1);
-
- rc = ptlrpc_request_pack(req, LUSTRE_LOG_VERSION,
- LLOG_ORIGIN_HANDLE_CREATE);
- if (rc) {
- ptlrpc_request_free(req);
- req = NULL;
- goto out;
- }
- ptlrpc_request_set_replen(req);
-
- body = req_capsule_client_get(&req->rq_pill, &RMF_LLOGD_BODY);
- if (logid)
- body->lgd_logid = *logid;
- body->lgd_ctxt_idx = ctxt->loc_idx - 1;
-
- if (name) {
- char *tmp;
- tmp = req_capsule_client_sized_get(&req->rq_pill, &RMF_NAME,
- strlen(name) + 1);
- LASSERT(tmp);
- strcpy(tmp, name);
- }
-
- rc = ptlrpc_queue_wait(req);
- if (rc)
- goto out;
-
- body = req_capsule_server_get(&req->rq_pill, &RMF_LLOGD_BODY);
- if (body == NULL) {
- rc = -EFAULT;
- goto out;
- }
-
- lgh->lgh_id = body->lgd_logid;
- lgh->lgh_ctxt = ctxt;
-out:
- LLOG_CLIENT_EXIT(ctxt, imp);
- ptlrpc_req_finished(req);
- return rc;
-}
-
-static int llog_client_next_block(const struct lu_env *env,
- struct llog_handle *loghandle,
- int *cur_idx, int next_idx,
- __u64 *cur_offset, void *buf, int len)
-{
- struct obd_import *imp;
- struct ptlrpc_request *req = NULL;
- struct llogd_body *body;
- void *ptr;
- int rc;
-
- LLOG_CLIENT_ENTRY(loghandle->lgh_ctxt, imp);
- req = ptlrpc_request_alloc_pack(imp, &RQF_LLOG_ORIGIN_HANDLE_NEXT_BLOCK,
- LUSTRE_LOG_VERSION,
- LLOG_ORIGIN_HANDLE_NEXT_BLOCK);
- if (req == NULL) {
- rc = -ENOMEM;
- goto err_exit;
- }
-
- body = req_capsule_client_get(&req->rq_pill, &RMF_LLOGD_BODY);
- body->lgd_logid = loghandle->lgh_id;
- body->lgd_ctxt_idx = loghandle->lgh_ctxt->loc_idx - 1;
- body->lgd_llh_flags = loghandle->lgh_hdr->llh_flags;
- body->lgd_index = next_idx;
- body->lgd_saved_index = *cur_idx;
- body->lgd_len = len;
- body->lgd_cur_offset = *cur_offset;
-
- req_capsule_set_size(&req->rq_pill, &RMF_EADATA, RCL_SERVER, len);
- ptlrpc_request_set_replen(req);
- rc = ptlrpc_queue_wait(req);
- if (rc)
- goto out;
-
- body = req_capsule_server_get(&req->rq_pill, &RMF_LLOGD_BODY);
- if (body == NULL) {
- rc = -EFAULT;
- goto out;
- }
-
- /* The log records are swabbed as they are processed */
- ptr = req_capsule_server_get(&req->rq_pill, &RMF_EADATA);
- if (ptr == NULL) {
- rc = -EFAULT;
- goto out;
- }
-
- *cur_idx = body->lgd_saved_index;
- *cur_offset = body->lgd_cur_offset;
-
- memcpy(buf, ptr, len);
-out:
- ptlrpc_req_finished(req);
-err_exit:
- LLOG_CLIENT_EXIT(loghandle->lgh_ctxt, imp);
- return rc;
-}
-
-static int llog_client_prev_block(const struct lu_env *env,
- struct llog_handle *loghandle,
- int prev_idx, void *buf, int len)
-{
- struct obd_import *imp;
- struct ptlrpc_request *req = NULL;
- struct llogd_body *body;
- void *ptr;
- int rc;
-
- LLOG_CLIENT_ENTRY(loghandle->lgh_ctxt, imp);
- req = ptlrpc_request_alloc_pack(imp, &RQF_LLOG_ORIGIN_HANDLE_PREV_BLOCK,
- LUSTRE_LOG_VERSION,
- LLOG_ORIGIN_HANDLE_PREV_BLOCK);
- if (req == NULL) {
- rc = -ENOMEM;
- goto err_exit;
- }
-
- body = req_capsule_client_get(&req->rq_pill, &RMF_LLOGD_BODY);
- body->lgd_logid = loghandle->lgh_id;
- body->lgd_ctxt_idx = loghandle->lgh_ctxt->loc_idx - 1;
- body->lgd_llh_flags = loghandle->lgh_hdr->llh_flags;
- body->lgd_index = prev_idx;
- body->lgd_len = len;
-
- req_capsule_set_size(&req->rq_pill, &RMF_EADATA, RCL_SERVER, len);
- ptlrpc_request_set_replen(req);
-
- rc = ptlrpc_queue_wait(req);
- if (rc)
- goto out;
-
- body = req_capsule_server_get(&req->rq_pill, &RMF_LLOGD_BODY);
- if (body == NULL) {
- rc = -EFAULT;
- goto out;
- }
-
- ptr = req_capsule_server_get(&req->rq_pill, &RMF_EADATA);
- if (ptr == NULL) {
- rc = -EFAULT;
- goto out;
- }
-
- memcpy(buf, ptr, len);
-out:
- ptlrpc_req_finished(req);
-err_exit:
- LLOG_CLIENT_EXIT(loghandle->lgh_ctxt, imp);
- return rc;
-}
-
-static int llog_client_read_header(const struct lu_env *env,
- struct llog_handle *handle)
-{
- struct obd_import *imp;
- struct ptlrpc_request *req = NULL;
- struct llogd_body *body;
- struct llog_log_hdr *hdr;
- struct llog_rec_hdr *llh_hdr;
- int rc;
-
- LLOG_CLIENT_ENTRY(handle->lgh_ctxt, imp);
- req = ptlrpc_request_alloc_pack(imp, &RQF_LLOG_ORIGIN_HANDLE_READ_HEADER,
- LUSTRE_LOG_VERSION,
- LLOG_ORIGIN_HANDLE_READ_HEADER);
- if (req == NULL) {
- rc = -ENOMEM;
- goto err_exit;
- }
-
- body = req_capsule_client_get(&req->rq_pill, &RMF_LLOGD_BODY);
- body->lgd_logid = handle->lgh_id;
- body->lgd_ctxt_idx = handle->lgh_ctxt->loc_idx - 1;
- body->lgd_llh_flags = handle->lgh_hdr->llh_flags;
-
- ptlrpc_request_set_replen(req);
- rc = ptlrpc_queue_wait(req);
- if (rc)
- goto out;
-
- hdr = req_capsule_server_get(&req->rq_pill, &RMF_LLOG_LOG_HDR);
- if (hdr == NULL) {
- rc = -EFAULT;
- goto out;
- }
-
- memcpy(handle->lgh_hdr, hdr, sizeof(*hdr));
- handle->lgh_last_idx = handle->lgh_hdr->llh_tail.lrt_index;
-
- /* sanity checks */
- llh_hdr = &handle->lgh_hdr->llh_hdr;
- if (llh_hdr->lrh_type != LLOG_HDR_MAGIC) {
- CERROR("bad log header magic: %#x (expecting %#x)\n",
- llh_hdr->lrh_type, LLOG_HDR_MAGIC);
- rc = -EIO;
- } else if (llh_hdr->lrh_len != LLOG_CHUNK_SIZE) {
- CERROR("incorrectly sized log header: %#x (expecting %#x)\n",
- llh_hdr->lrh_len, LLOG_CHUNK_SIZE);
- CERROR("you may need to re-run lconf --write_conf.\n");
- rc = -EIO;
- }
-out:
- ptlrpc_req_finished(req);
-err_exit:
- LLOG_CLIENT_EXIT(handle->lgh_ctxt, imp);
- return rc;
-}
-
-static int llog_client_close(const struct lu_env *env,
- struct llog_handle *handle)
-{
- /* this doesn't call LLOG_ORIGIN_HANDLE_CLOSE because
- the servers all close the file at the end of every
- other LLOG_ RPC. */
- return 0;
-}
-
-struct llog_operations llog_client_ops = {
- .lop_next_block = llog_client_next_block,
- .lop_prev_block = llog_client_prev_block,
- .lop_read_header = llog_client_read_header,
- .lop_open = llog_client_open,
- .lop_close = llog_client_close,
-};
-EXPORT_SYMBOL(llog_client_ops);
diff --git a/drivers/staging/lustre/lustre/ptlrpc/llog_net.c b/drivers/staging/lustre/lustre/ptlrpc/llog_net.c
deleted file mode 100644
index dac66f5b39da..000000000000
--- a/drivers/staging/lustre/lustre/ptlrpc/llog_net.c
+++ /dev/null
@@ -1,72 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * lustre/ptlrpc/llog_net.c
- *
- * OST<->MDS recovery logging infrastructure.
- *
- * Invariants in implementation:
- * - we do not share logs among different OST<->MDS connections, so that
- * if an OST or MDS fails it need only look at log(s) relevant to itself
- *
- * Author: Andreas Dilger <adilger@clusterfs.com>
- */
-
-#define DEBUG_SUBSYSTEM S_LOG
-
-#include "../../include/linux/libcfs/libcfs.h"
-
-#include "../include/obd_class.h"
-#include "../include/lustre_log.h"
-#include <linux/list.h>
-
-int llog_initiator_connect(struct llog_ctxt *ctxt)
-{
- struct obd_import *new_imp;
-
- LASSERT(ctxt);
- new_imp = ctxt->loc_obd->u.cli.cl_import;
- LASSERTF(ctxt->loc_imp == NULL || ctxt->loc_imp == new_imp,
- "%p - %p\n", ctxt->loc_imp, new_imp);
- mutex_lock(&ctxt->loc_mutex);
- if (ctxt->loc_imp != new_imp) {
- if (ctxt->loc_imp)
- class_import_put(ctxt->loc_imp);
- ctxt->loc_imp = class_import_get(new_imp);
- }
- mutex_unlock(&ctxt->loc_mutex);
- return 0;
-}
-EXPORT_SYMBOL(llog_initiator_connect);
diff --git a/drivers/staging/lustre/lustre/ptlrpc/lproc_ptlrpc.c b/drivers/staging/lustre/lustre/ptlrpc/lproc_ptlrpc.c
deleted file mode 100644
index 3a212b4be9a1..000000000000
--- a/drivers/staging/lustre/lustre/ptlrpc/lproc_ptlrpc.c
+++ /dev/null
@@ -1,1311 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- */
-#define DEBUG_SUBSYSTEM S_CLASS
-
-
-#include "../include/obd_support.h"
-#include "../include/obd.h"
-#include "../include/lprocfs_status.h"
-#include "../include/lustre/lustre_idl.h"
-#include "../include/lustre_net.h"
-#include "../include/obd_class.h"
-#include "ptlrpc_internal.h"
-
-
-static struct ll_rpc_opcode {
- __u32 opcode;
- const char *opname;
-} ll_rpc_opcode_table[LUSTRE_MAX_OPCODES] = {
- { OST_REPLY, "ost_reply" },
- { OST_GETATTR, "ost_getattr" },
- { OST_SETATTR, "ost_setattr" },
- { OST_READ, "ost_read" },
- { OST_WRITE, "ost_write" },
- { OST_CREATE, "ost_create" },
- { OST_DESTROY, "ost_destroy" },
- { OST_GET_INFO, "ost_get_info" },
- { OST_CONNECT, "ost_connect" },
- { OST_DISCONNECT, "ost_disconnect" },
- { OST_PUNCH, "ost_punch" },
- { OST_OPEN, "ost_open" },
- { OST_CLOSE, "ost_close" },
- { OST_STATFS, "ost_statfs" },
- { 14, NULL }, /* formerly OST_SAN_READ */
- { 15, NULL }, /* formerly OST_SAN_WRITE */
- { OST_SYNC, "ost_sync" },
- { OST_SET_INFO, "ost_set_info" },
- { OST_QUOTACHECK, "ost_quotacheck" },
- { OST_QUOTACTL, "ost_quotactl" },
- { OST_QUOTA_ADJUST_QUNIT, "ost_quota_adjust_qunit" },
- { MDS_GETATTR, "mds_getattr" },
- { MDS_GETATTR_NAME, "mds_getattr_lock" },
- { MDS_CLOSE, "mds_close" },
- { MDS_REINT, "mds_reint" },
- { MDS_READPAGE, "mds_readpage" },
- { MDS_CONNECT, "mds_connect" },
- { MDS_DISCONNECT, "mds_disconnect" },
- { MDS_GETSTATUS, "mds_getstatus" },
- { MDS_STATFS, "mds_statfs" },
- { MDS_PIN, "mds_pin" },
- { MDS_UNPIN, "mds_unpin" },
- { MDS_SYNC, "mds_sync" },
- { MDS_DONE_WRITING, "mds_done_writing" },
- { MDS_SET_INFO, "mds_set_info" },
- { MDS_QUOTACHECK, "mds_quotacheck" },
- { MDS_QUOTACTL, "mds_quotactl" },
- { MDS_GETXATTR, "mds_getxattr" },
- { MDS_SETXATTR, "mds_setxattr" },
- { MDS_WRITEPAGE, "mds_writepage" },
- { MDS_IS_SUBDIR, "mds_is_subdir" },
- { MDS_GET_INFO, "mds_get_info" },
- { MDS_HSM_STATE_GET, "mds_hsm_state_get" },
- { MDS_HSM_STATE_SET, "mds_hsm_state_set" },
- { MDS_HSM_ACTION, "mds_hsm_action" },
- { MDS_HSM_PROGRESS, "mds_hsm_progress" },
- { MDS_HSM_REQUEST, "mds_hsm_request" },
- { MDS_HSM_CT_REGISTER, "mds_hsm_ct_register" },
- { MDS_HSM_CT_UNREGISTER, "mds_hsm_ct_unregister" },
- { MDS_SWAP_LAYOUTS, "mds_swap_layouts" },
- { LDLM_ENQUEUE, "ldlm_enqueue" },
- { LDLM_CONVERT, "ldlm_convert" },
- { LDLM_CANCEL, "ldlm_cancel" },
- { LDLM_BL_CALLBACK, "ldlm_bl_callback" },
- { LDLM_CP_CALLBACK, "ldlm_cp_callback" },
- { LDLM_GL_CALLBACK, "ldlm_gl_callback" },
- { LDLM_SET_INFO, "ldlm_set_info" },
- { MGS_CONNECT, "mgs_connect" },
- { MGS_DISCONNECT, "mgs_disconnect" },
- { MGS_EXCEPTION, "mgs_exception" },
- { MGS_TARGET_REG, "mgs_target_reg" },
- { MGS_TARGET_DEL, "mgs_target_del" },
- { MGS_SET_INFO, "mgs_set_info" },
- { MGS_CONFIG_READ, "mgs_config_read" },
- { OBD_PING, "obd_ping" },
- { OBD_LOG_CANCEL, "llog_cancel" },
- { OBD_QC_CALLBACK, "obd_quota_callback" },
- { OBD_IDX_READ, "dt_index_read" },
- { LLOG_ORIGIN_HANDLE_CREATE, "llog_origin_handle_open" },
- { LLOG_ORIGIN_HANDLE_NEXT_BLOCK, "llog_origin_handle_next_block" },
- { LLOG_ORIGIN_HANDLE_READ_HEADER, "llog_origin_handle_read_header" },
- { LLOG_ORIGIN_HANDLE_WRITE_REC, "llog_origin_handle_write_rec" },
- { LLOG_ORIGIN_HANDLE_CLOSE, "llog_origin_handle_close" },
- { LLOG_ORIGIN_CONNECT, "llog_origin_connect" },
- { LLOG_CATINFO, "llog_catinfo" },
- { LLOG_ORIGIN_HANDLE_PREV_BLOCK, "llog_origin_handle_prev_block" },
- { LLOG_ORIGIN_HANDLE_DESTROY, "llog_origin_handle_destroy" },
- { QUOTA_DQACQ, "quota_acquire" },
- { QUOTA_DQREL, "quota_release" },
- { SEQ_QUERY, "seq_query" },
- { SEC_CTX_INIT, "sec_ctx_init" },
- { SEC_CTX_INIT_CONT, "sec_ctx_init_cont" },
- { SEC_CTX_FINI, "sec_ctx_fini" },
- { FLD_QUERY, "fld_query" },
- { UPDATE_OBJ, "update_obj" },
-};
-
-static struct ll_eopcode {
- __u32 opcode;
- const char *opname;
-} ll_eopcode_table[EXTRA_LAST_OPC] = {
- { LDLM_GLIMPSE_ENQUEUE, "ldlm_glimpse_enqueue" },
- { LDLM_PLAIN_ENQUEUE, "ldlm_plain_enqueue" },
- { LDLM_EXTENT_ENQUEUE, "ldlm_extent_enqueue" },
- { LDLM_FLOCK_ENQUEUE, "ldlm_flock_enqueue" },
- { LDLM_IBITS_ENQUEUE, "ldlm_ibits_enqueue" },
- { MDS_REINT_SETATTR, "mds_reint_setattr" },
- { MDS_REINT_CREATE, "mds_reint_create" },
- { MDS_REINT_LINK, "mds_reint_link" },
- { MDS_REINT_UNLINK, "mds_reint_unlink" },
- { MDS_REINT_RENAME, "mds_reint_rename" },
- { MDS_REINT_OPEN, "mds_reint_open" },
- { MDS_REINT_SETXATTR, "mds_reint_setxattr" },
- { BRW_READ_BYTES, "read_bytes" },
- { BRW_WRITE_BYTES, "write_bytes" },
-};
-
-const char *ll_opcode2str(__u32 opcode)
-{
- /* When one of the assertions below fail, chances are that:
- * 1) A new opcode was added in include/lustre/lustre_idl.h,
- * but is missing from the table above.
- * or 2) The opcode space was renumbered or rearranged,
- * and the opcode_offset() function in
- * ptlrpc_internal.h needs to be modified.
- */
- __u32 offset = opcode_offset(opcode);
- LASSERTF(offset < LUSTRE_MAX_OPCODES,
- "offset %u >= LUSTRE_MAX_OPCODES %u\n",
- offset, LUSTRE_MAX_OPCODES);
- LASSERTF(ll_rpc_opcode_table[offset].opcode == opcode,
- "ll_rpc_opcode_table[%u].opcode %u != opcode %u\n",
- offset, ll_rpc_opcode_table[offset].opcode, opcode);
- return ll_rpc_opcode_table[offset].opname;
-}
-
-static const char *ll_eopcode2str(__u32 opcode)
-{
- LASSERT(ll_eopcode_table[opcode].opcode == opcode);
- return ll_eopcode_table[opcode].opname;
-}
-
-static void
-ptlrpc_ldebugfs_register(struct dentry *root, char *dir,
- char *name,
- struct dentry **debugfs_root_ret,
- struct lprocfs_stats **stats_ret)
-{
- struct dentry *svc_debugfs_entry;
- struct lprocfs_stats *svc_stats;
- int i, rc;
- unsigned int svc_counter_config = LPROCFS_CNTR_AVGMINMAX |
- LPROCFS_CNTR_STDDEV;
-
- LASSERT(*debugfs_root_ret == NULL);
- LASSERT(*stats_ret == NULL);
-
- svc_stats = lprocfs_alloc_stats(EXTRA_MAX_OPCODES+LUSTRE_MAX_OPCODES,
- 0);
- if (svc_stats == NULL)
- return;
-
- if (dir != NULL) {
- svc_debugfs_entry = ldebugfs_register(dir, root, NULL, NULL);
- if (IS_ERR(svc_debugfs_entry)) {
- lprocfs_free_stats(&svc_stats);
- return;
- }
- } else {
- svc_debugfs_entry = root;
- }
-
- lprocfs_counter_init(svc_stats, PTLRPC_REQWAIT_CNTR,
- svc_counter_config, "req_waittime", "usec");
- lprocfs_counter_init(svc_stats, PTLRPC_REQQDEPTH_CNTR,
- svc_counter_config, "req_qdepth", "reqs");
- lprocfs_counter_init(svc_stats, PTLRPC_REQACTIVE_CNTR,
- svc_counter_config, "req_active", "reqs");
- lprocfs_counter_init(svc_stats, PTLRPC_TIMEOUT,
- svc_counter_config, "req_timeout", "sec");
- lprocfs_counter_init(svc_stats, PTLRPC_REQBUF_AVAIL_CNTR,
- svc_counter_config, "reqbuf_avail", "bufs");
- for (i = 0; i < EXTRA_LAST_OPC; i++) {
- char *units;
-
- switch (i) {
- case BRW_WRITE_BYTES:
- case BRW_READ_BYTES:
- units = "bytes";
- break;
- default:
- units = "reqs";
- break;
- }
- lprocfs_counter_init(svc_stats, PTLRPC_LAST_CNTR + i,
- svc_counter_config,
- ll_eopcode2str(i), units);
- }
- for (i = 0; i < LUSTRE_MAX_OPCODES; i++) {
- __u32 opcode = ll_rpc_opcode_table[i].opcode;
- lprocfs_counter_init(svc_stats,
- EXTRA_MAX_OPCODES + i, svc_counter_config,
- ll_opcode2str(opcode), "usec");
- }
-
- rc = ldebugfs_register_stats(svc_debugfs_entry, name, svc_stats);
- if (rc < 0) {
- if (dir != NULL)
- ldebugfs_remove(&svc_debugfs_entry);
- lprocfs_free_stats(&svc_stats);
- } else {
- if (dir != NULL)
- *debugfs_root_ret = svc_debugfs_entry;
- *stats_ret = svc_stats;
- }
-}
-
-static int
-ptlrpc_lprocfs_req_history_len_seq_show(struct seq_file *m, void *v)
-{
- struct ptlrpc_service *svc = m->private;
- struct ptlrpc_service_part *svcpt;
- int total = 0;
- int i;
-
- ptlrpc_service_for_each_part(svcpt, i, svc)
- total += svcpt->scp_hist_nrqbds;
-
- seq_printf(m, "%d\n", total);
- return 0;
-}
-LPROC_SEQ_FOPS_RO(ptlrpc_lprocfs_req_history_len);
-
-static int
-ptlrpc_lprocfs_req_history_max_seq_show(struct seq_file *m, void *n)
-{
- struct ptlrpc_service *svc = m->private;
- struct ptlrpc_service_part *svcpt;
- int total = 0;
- int i;
-
- ptlrpc_service_for_each_part(svcpt, i, svc)
- total += svc->srv_hist_nrqbds_cpt_max;
-
- seq_printf(m, "%d\n", total);
- return 0;
-}
-
-static ssize_t
-ptlrpc_lprocfs_req_history_max_seq_write(struct file *file,
- const char __user *buffer,
- size_t count, loff_t *off)
-{
- struct ptlrpc_service *svc = ((struct seq_file *)file->private_data)->private;
- int bufpages;
- int val;
- int rc;
-
- rc = lprocfs_write_helper(buffer, count, &val);
- if (rc < 0)
- return rc;
-
- if (val < 0)
- return -ERANGE;
-
- /* This sanity check is more of an insanity check; we can still
- * hose a kernel by allowing the request history to grow too
- * far. */
- bufpages = (svc->srv_buf_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
- if (val > totalram_pages / (2 * bufpages))
- return -ERANGE;
-
- spin_lock(&svc->srv_lock);
-
- if (val == 0)
- svc->srv_hist_nrqbds_cpt_max = 0;
- else
- svc->srv_hist_nrqbds_cpt_max = max(1, (val / svc->srv_ncpts));
-
- spin_unlock(&svc->srv_lock);
-
- return count;
-}
-LPROC_SEQ_FOPS(ptlrpc_lprocfs_req_history_max);
-
-
-static ssize_t threads_min_show(struct kobject *kobj, struct attribute *attr,
- char *buf)
-{
- struct ptlrpc_service *svc = container_of(kobj, struct ptlrpc_service,
- srv_kobj);
-
- return sprintf(buf, "%d\n", svc->srv_nthrs_cpt_init * svc->srv_ncpts);
-}
-
-static ssize_t threads_min_store(struct kobject *kobj, struct attribute *attr,
- const char *buffer, size_t count)
-{
- struct ptlrpc_service *svc = container_of(kobj, struct ptlrpc_service,
- srv_kobj);
- unsigned long val;
- int rc = kstrtoul(buffer, 10, &val);
-
- if (rc < 0)
- return rc;
-
- if (val / svc->srv_ncpts < PTLRPC_NTHRS_INIT)
- return -ERANGE;
-
- spin_lock(&svc->srv_lock);
- if (val > svc->srv_nthrs_cpt_limit * svc->srv_ncpts) {
- spin_unlock(&svc->srv_lock);
- return -ERANGE;
- }
-
- svc->srv_nthrs_cpt_init = val / svc->srv_ncpts;
-
- spin_unlock(&svc->srv_lock);
-
- return count;
-}
-LUSTRE_RW_ATTR(threads_min);
-
-static ssize_t threads_started_show(struct kobject *kobj,
- struct attribute *attr,
- char *buf)
-{
- struct ptlrpc_service *svc = container_of(kobj, struct ptlrpc_service,
- srv_kobj);
- struct ptlrpc_service_part *svcpt;
- int total = 0;
- int i;
-
- ptlrpc_service_for_each_part(svcpt, i, svc)
- total += svcpt->scp_nthrs_running;
-
- return sprintf(buf, "%d\n", total);
-}
-LUSTRE_RO_ATTR(threads_started);
-
-static ssize_t threads_max_show(struct kobject *kobj, struct attribute *attr,
- char *buf)
-{
- struct ptlrpc_service *svc = container_of(kobj, struct ptlrpc_service,
- srv_kobj);
-
- return sprintf(buf, "%d\n", svc->srv_nthrs_cpt_limit * svc->srv_ncpts);
-}
-
-static ssize_t threads_max_store(struct kobject *kobj, struct attribute *attr,
- const char *buffer, size_t count)
-{
- struct ptlrpc_service *svc = container_of(kobj, struct ptlrpc_service,
- srv_kobj);
- unsigned long val;
- int rc = kstrtoul(buffer, 10, &val);
-
- if (rc < 0)
- return rc;
-
- if (val / svc->srv_ncpts < PTLRPC_NTHRS_INIT)
- return -ERANGE;
-
- spin_lock(&svc->srv_lock);
- if (val < svc->srv_nthrs_cpt_init * svc->srv_ncpts) {
- spin_unlock(&svc->srv_lock);
- return -ERANGE;
- }
-
- svc->srv_nthrs_cpt_limit = val / svc->srv_ncpts;
-
- spin_unlock(&svc->srv_lock);
-
- return count;
-}
-LUSTRE_RW_ATTR(threads_max);
-
-/**
- * \addtogoup nrs
- * @{
- */
-
-/**
- * Translates \e ptlrpc_nrs_pol_state values to human-readable strings.
- *
- * \param[in] state The policy state
- */
-static const char *nrs_state2str(enum ptlrpc_nrs_pol_state state)
-{
- switch (state) {
- default:
- LBUG();
- case NRS_POL_STATE_INVALID:
- return "invalid";
- case NRS_POL_STATE_STOPPED:
- return "stopped";
- case NRS_POL_STATE_STOPPING:
- return "stopping";
- case NRS_POL_STATE_STARTING:
- return "starting";
- case NRS_POL_STATE_STARTED:
- return "started";
- }
-}
-
-/**
- * Obtains status information for \a policy.
- *
- * Information is copied in \a info.
- *
- * \param[in] policy The policy
- * \param[out] info Holds returned status information
- */
-void nrs_policy_get_info_locked(struct ptlrpc_nrs_policy *policy,
- struct ptlrpc_nrs_pol_info *info)
-{
- LASSERT(policy != NULL);
- LASSERT(info != NULL);
- assert_spin_locked(&policy->pol_nrs->nrs_lock);
-
- memcpy(info->pi_name, policy->pol_desc->pd_name, NRS_POL_NAME_MAX);
-
- info->pi_fallback = !!(policy->pol_flags & PTLRPC_NRS_FL_FALLBACK);
- info->pi_state = policy->pol_state;
- /**
- * XXX: These are accessed without holding
- * ptlrpc_service_part::scp_req_lock.
- */
- info->pi_req_queued = policy->pol_req_queued;
- info->pi_req_started = policy->pol_req_started;
-}
-
-/**
- * Reads and prints policy status information for all policies of a PTLRPC
- * service.
- */
-static int ptlrpc_lprocfs_nrs_seq_show(struct seq_file *m, void *n)
-{
- struct ptlrpc_service *svc = m->private;
- struct ptlrpc_service_part *svcpt;
- struct ptlrpc_nrs *nrs;
- struct ptlrpc_nrs_policy *policy;
- struct ptlrpc_nrs_pol_info *infos;
- struct ptlrpc_nrs_pol_info tmp;
- unsigned num_pols;
- unsigned pol_idx = 0;
- bool hp = false;
- int i;
- int rc = 0;
-
- /**
- * Serialize NRS core lprocfs operations with policy registration/
- * unregistration.
- */
- mutex_lock(&nrs_core.nrs_mutex);
-
- /**
- * Use the first service partition's regular NRS head in order to obtain
- * the number of policies registered with NRS heads of this service. All
- * service partitions will have the same number of policies.
- */
- nrs = nrs_svcpt2nrs(svc->srv_parts[0], false);
-
- spin_lock(&nrs->nrs_lock);
- num_pols = svc->srv_parts[0]->scp_nrs_reg.nrs_num_pols;
- spin_unlock(&nrs->nrs_lock);
-
- infos = kcalloc(num_pols, sizeof(*infos), GFP_NOFS);
- if (infos == NULL) {
- rc = -ENOMEM;
- goto unlock;
- }
-again:
-
- ptlrpc_service_for_each_part(svcpt, i, svc) {
- nrs = nrs_svcpt2nrs(svcpt, hp);
- spin_lock(&nrs->nrs_lock);
-
- pol_idx = 0;
-
- list_for_each_entry(policy, &nrs->nrs_policy_list,
- pol_list) {
- LASSERT(pol_idx < num_pols);
-
- nrs_policy_get_info_locked(policy, &tmp);
- /**
- * Copy values when handling the first service
- * partition.
- */
- if (i == 0) {
- memcpy(infos[pol_idx].pi_name, tmp.pi_name,
- NRS_POL_NAME_MAX);
- memcpy(&infos[pol_idx].pi_state, &tmp.pi_state,
- sizeof(tmp.pi_state));
- infos[pol_idx].pi_fallback = tmp.pi_fallback;
- /**
- * For the rest of the service partitions
- * sanity-check the values we get.
- */
- } else {
- LASSERT(strncmp(infos[pol_idx].pi_name,
- tmp.pi_name,
- NRS_POL_NAME_MAX) == 0);
- /**
- * Not asserting ptlrpc_nrs_pol_info::pi_state,
- * because it may be different between
- * instances of the same policy in different
- * service partitions.
- */
- LASSERT(infos[pol_idx].pi_fallback ==
- tmp.pi_fallback);
- }
-
- infos[pol_idx].pi_req_queued += tmp.pi_req_queued;
- infos[pol_idx].pi_req_started += tmp.pi_req_started;
-
- pol_idx++;
- }
- spin_unlock(&nrs->nrs_lock);
- }
-
- /**
- * Policy status information output is in YAML format.
- * For example:
- *
- * regular_requests:
- * - name: fifo
- * state: started
- * fallback: yes
- * queued: 0
- * active: 0
- *
- * - name: crrn
- * state: started
- * fallback: no
- * queued: 2015
- * active: 384
- *
- * high_priority_requests:
- * - name: fifo
- * state: started
- * fallback: yes
- * queued: 0
- * active: 2
- *
- * - name: crrn
- * state: stopped
- * fallback: no
- * queued: 0
- * active: 0
- */
- seq_printf(m, "%s\n",
- !hp ? "\nregular_requests:" : "high_priority_requests:");
-
- for (pol_idx = 0; pol_idx < num_pols; pol_idx++) {
- seq_printf(m, " - name: %s\n"
- " state: %s\n"
- " fallback: %s\n"
- " queued: %-20d\n"
- " active: %-20d\n\n",
- infos[pol_idx].pi_name,
- nrs_state2str(infos[pol_idx].pi_state),
- infos[pol_idx].pi_fallback ? "yes" : "no",
- (int)infos[pol_idx].pi_req_queued,
- (int)infos[pol_idx].pi_req_started);
- }
-
- if (!hp && nrs_svc_has_hp(svc)) {
- memset(infos, 0, num_pols * sizeof(*infos));
-
- /**
- * Redo the processing for the service's HP NRS heads' policies.
- */
- hp = true;
- goto again;
- }
-
- kfree(infos);
-unlock:
- mutex_unlock(&nrs_core.nrs_mutex);
-
- return rc;
-}
-
-/**
- * The longest valid command string is the maximum policy name size, plus the
- * length of the " reg" substring
- */
-#define LPROCFS_NRS_WR_MAX_CMD (NRS_POL_NAME_MAX + sizeof(" reg") - 1)
-
-/**
- * Starts and stops a given policy on a PTLRPC service.
- *
- * Commands consist of the policy name, followed by an optional [reg|hp] token;
- * if the optional token is omitted, the operation is performed on both the
- * regular and high-priority (if the service has one) NRS head.
- */
-static ssize_t ptlrpc_lprocfs_nrs_seq_write(struct file *file,
- const char __user *buffer,
- size_t count, loff_t *off)
-{
- struct ptlrpc_service *svc = ((struct seq_file *)file->private_data)->private;
- enum ptlrpc_nrs_queue_type queue = PTLRPC_NRS_QUEUE_BOTH;
- char *cmd;
- char *cmd_copy = NULL;
- char *token;
- int rc = 0;
-
- if (count >= LPROCFS_NRS_WR_MAX_CMD)
- return -EINVAL;
-
- cmd = kzalloc(LPROCFS_NRS_WR_MAX_CMD, GFP_NOFS);
- if (!cmd)
- return -ENOMEM;
- /**
- * strsep() modifies its argument, so keep a copy
- */
- cmd_copy = cmd;
-
- if (copy_from_user(cmd, buffer, count)) {
- rc = -EFAULT;
- goto out;
- }
-
- cmd[count] = '\0';
-
- token = strsep(&cmd, " ");
-
- if (strlen(token) > NRS_POL_NAME_MAX - 1) {
- rc = -EINVAL;
- goto out;
- }
-
- /**
- * No [reg|hp] token has been specified
- */
- if (cmd == NULL)
- goto default_queue;
-
- /**
- * The second token is either NULL, or an optional [reg|hp] string
- */
- if (strcmp(cmd, "reg") == 0)
- queue = PTLRPC_NRS_QUEUE_REG;
- else if (strcmp(cmd, "hp") == 0)
- queue = PTLRPC_NRS_QUEUE_HP;
- else {
- rc = -EINVAL;
- goto out;
- }
-
-default_queue:
-
- if (queue == PTLRPC_NRS_QUEUE_HP && !nrs_svc_has_hp(svc)) {
- rc = -ENODEV;
- goto out;
- } else if (queue == PTLRPC_NRS_QUEUE_BOTH && !nrs_svc_has_hp(svc))
- queue = PTLRPC_NRS_QUEUE_REG;
-
- /**
- * Serialize NRS core lprocfs operations with policy registration/
- * unregistration.
- */
- mutex_lock(&nrs_core.nrs_mutex);
-
- rc = ptlrpc_nrs_policy_control(svc, queue, token, PTLRPC_NRS_CTL_START,
- false, NULL);
-
- mutex_unlock(&nrs_core.nrs_mutex);
-out:
- kfree(cmd_copy);
-
- return rc < 0 ? rc : count;
-}
-LPROC_SEQ_FOPS(ptlrpc_lprocfs_nrs);
-
-/** @} nrs */
-
-struct ptlrpc_srh_iterator {
- int srhi_idx;
- __u64 srhi_seq;
- struct ptlrpc_request *srhi_req;
-};
-
-static int
-ptlrpc_lprocfs_svc_req_history_seek(struct ptlrpc_service_part *svcpt,
- struct ptlrpc_srh_iterator *srhi,
- __u64 seq)
-{
- struct list_head *e;
- struct ptlrpc_request *req;
-
- if (srhi->srhi_req != NULL &&
- srhi->srhi_seq > svcpt->scp_hist_seq_culled &&
- srhi->srhi_seq <= seq) {
- /* If srhi_req was set previously, hasn't been culled and
- * we're searching for a seq on or after it (i.e. more
- * recent), search from it onwards.
- * Since the service history is LRU (i.e. culled reqs will
- * be near the head), we shouldn't have to do long
- * re-scans */
- LASSERTF(srhi->srhi_seq == srhi->srhi_req->rq_history_seq,
- "%s:%d: seek seq %llu, request seq %llu\n",
- svcpt->scp_service->srv_name, svcpt->scp_cpt,
- srhi->srhi_seq, srhi->srhi_req->rq_history_seq);
- LASSERTF(!list_empty(&svcpt->scp_hist_reqs),
- "%s:%d: seek offset %llu, request seq %llu, last culled %llu\n",
- svcpt->scp_service->srv_name, svcpt->scp_cpt,
- seq, srhi->srhi_seq, svcpt->scp_hist_seq_culled);
- e = &srhi->srhi_req->rq_history_list;
- } else {
- /* search from start */
- e = svcpt->scp_hist_reqs.next;
- }
-
- while (e != &svcpt->scp_hist_reqs) {
- req = list_entry(e, struct ptlrpc_request, rq_history_list);
-
- if (req->rq_history_seq >= seq) {
- srhi->srhi_seq = req->rq_history_seq;
- srhi->srhi_req = req;
- return 0;
- }
- e = e->next;
- }
-
- return -ENOENT;
-}
-
-/*
- * ptlrpc history sequence is used as "position" of seq_file, in some case,
- * seq_read() will increase "position" to indicate reading the next
- * element, however, low bits of history sequence are reserved for CPT id
- * (check the details from comments before ptlrpc_req_add_history), which
- * means seq_read() might change CPT id of history sequence and never
- * finish reading of requests on a CPT. To make it work, we have to shift
- * CPT id to high bits and timestamp to low bits, so seq_read() will only
- * increase timestamp which can correctly indicate the next position.
- */
-
-/* convert seq_file pos to cpt */
-#define PTLRPC_REQ_POS2CPT(svc, pos) \
- ((svc)->srv_cpt_bits == 0 ? 0 : \
- (__u64)(pos) >> (64 - (svc)->srv_cpt_bits))
-
-/* make up seq_file pos from cpt */
-#define PTLRPC_REQ_CPT2POS(svc, cpt) \
- ((svc)->srv_cpt_bits == 0 ? 0 : \
- (cpt) << (64 - (svc)->srv_cpt_bits))
-
-/* convert sequence to position */
-#define PTLRPC_REQ_SEQ2POS(svc, seq) \
- ((svc)->srv_cpt_bits == 0 ? (seq) : \
- ((seq) >> (svc)->srv_cpt_bits) | \
- ((seq) << (64 - (svc)->srv_cpt_bits)))
-
-/* convert position to sequence */
-#define PTLRPC_REQ_POS2SEQ(svc, pos) \
- ((svc)->srv_cpt_bits == 0 ? (pos) : \
- ((__u64)(pos) << (svc)->srv_cpt_bits) | \
- ((__u64)(pos) >> (64 - (svc)->srv_cpt_bits)))
-
-static void *
-ptlrpc_lprocfs_svc_req_history_start(struct seq_file *s, loff_t *pos)
-{
- struct ptlrpc_service *svc = s->private;
- struct ptlrpc_service_part *svcpt;
- struct ptlrpc_srh_iterator *srhi;
- unsigned int cpt;
- int rc;
- int i;
-
- if (sizeof(loff_t) != sizeof(__u64)) { /* can't support */
- CWARN("Failed to read request history because size of loff_t %d can't match size of u64\n",
- (int)sizeof(loff_t));
- return NULL;
- }
-
- srhi = kzalloc(sizeof(*srhi), GFP_NOFS);
- if (!srhi)
- return NULL;
-
- srhi->srhi_seq = 0;
- srhi->srhi_req = NULL;
-
- cpt = PTLRPC_REQ_POS2CPT(svc, *pos);
-
- ptlrpc_service_for_each_part(svcpt, i, svc) {
- if (i < cpt) /* skip */
- continue;
- if (i > cpt) /* make up the lowest position for this CPT */
- *pos = PTLRPC_REQ_CPT2POS(svc, i);
-
- spin_lock(&svcpt->scp_lock);
- rc = ptlrpc_lprocfs_svc_req_history_seek(svcpt, srhi,
- PTLRPC_REQ_POS2SEQ(svc, *pos));
- spin_unlock(&svcpt->scp_lock);
- if (rc == 0) {
- *pos = PTLRPC_REQ_SEQ2POS(svc, srhi->srhi_seq);
- srhi->srhi_idx = i;
- return srhi;
- }
- }
-
- kfree(srhi);
- return NULL;
-}
-
-static void
-ptlrpc_lprocfs_svc_req_history_stop(struct seq_file *s, void *iter)
-{
- struct ptlrpc_srh_iterator *srhi = iter;
-
- kfree(srhi);
-}
-
-static void *
-ptlrpc_lprocfs_svc_req_history_next(struct seq_file *s,
- void *iter, loff_t *pos)
-{
- struct ptlrpc_service *svc = s->private;
- struct ptlrpc_srh_iterator *srhi = iter;
- struct ptlrpc_service_part *svcpt;
- __u64 seq;
- int rc;
- int i;
-
- for (i = srhi->srhi_idx; i < svc->srv_ncpts; i++) {
- svcpt = svc->srv_parts[i];
-
- if (i > srhi->srhi_idx) { /* reset iterator for a new CPT */
- srhi->srhi_req = NULL;
- seq = srhi->srhi_seq = 0;
- } else { /* the next sequence */
- seq = srhi->srhi_seq + (1 << svc->srv_cpt_bits);
- }
-
- spin_lock(&svcpt->scp_lock);
- rc = ptlrpc_lprocfs_svc_req_history_seek(svcpt, srhi, seq);
- spin_unlock(&svcpt->scp_lock);
- if (rc == 0) {
- *pos = PTLRPC_REQ_SEQ2POS(svc, srhi->srhi_seq);
- srhi->srhi_idx = i;
- return srhi;
- }
- }
-
- kfree(srhi);
- return NULL;
-}
-
-static int ptlrpc_lprocfs_svc_req_history_show(struct seq_file *s, void *iter)
-{
- struct ptlrpc_service *svc = s->private;
- struct ptlrpc_srh_iterator *srhi = iter;
- struct ptlrpc_service_part *svcpt;
- struct ptlrpc_request *req;
- int rc;
-
- LASSERT(srhi->srhi_idx < svc->srv_ncpts);
-
- svcpt = svc->srv_parts[srhi->srhi_idx];
-
- spin_lock(&svcpt->scp_lock);
-
- rc = ptlrpc_lprocfs_svc_req_history_seek(svcpt, srhi, srhi->srhi_seq);
-
- if (rc == 0) {
- req = srhi->srhi_req;
-
- /* Print common req fields.
- * CAVEAT EMPTOR: we're racing with the service handler
- * here. The request could contain any old crap, so you
- * must be just as careful as the service's request
- * parser. Currently I only print stuff here I know is OK
- * to look at coz it was set up in request_in_callback()!!! */
- seq_printf(s, "%lld:%s:%s:x%llu:%d:%s:%lld:%lds(%+lds) ",
- req->rq_history_seq, libcfs_nid2str(req->rq_self),
- libcfs_id2str(req->rq_peer), req->rq_xid,
- req->rq_reqlen, ptlrpc_rqphase2str(req),
- (s64)req->rq_arrival_time.tv_sec,
- (long)(req->rq_sent - req->rq_arrival_time.tv_sec),
- (long)(req->rq_sent - req->rq_deadline));
- if (svc->srv_ops.so_req_printer == NULL)
- seq_printf(s, "\n");
- else
- svc->srv_ops.so_req_printer(s, srhi->srhi_req);
- }
-
- spin_unlock(&svcpt->scp_lock);
- return rc;
-}
-
-static int
-ptlrpc_lprocfs_svc_req_history_open(struct inode *inode, struct file *file)
-{
- static struct seq_operations sops = {
- .start = ptlrpc_lprocfs_svc_req_history_start,
- .stop = ptlrpc_lprocfs_svc_req_history_stop,
- .next = ptlrpc_lprocfs_svc_req_history_next,
- .show = ptlrpc_lprocfs_svc_req_history_show,
- };
- struct seq_file *seqf;
- int rc;
-
- rc = seq_open(file, &sops);
- if (rc)
- return rc;
-
- seqf = file->private_data;
- seqf->private = inode->i_private;
- return 0;
-}
-
-/* See also lprocfs_rd_timeouts */
-static int ptlrpc_lprocfs_timeouts_seq_show(struct seq_file *m, void *n)
-{
- struct ptlrpc_service *svc = m->private;
- struct ptlrpc_service_part *svcpt;
- struct dhms ts;
- time64_t worstt;
- unsigned int cur;
- unsigned int worst;
- int i;
-
- if (AT_OFF) {
- seq_printf(m, "adaptive timeouts off, using obd_timeout %u\n",
- obd_timeout);
- return 0;
- }
-
- ptlrpc_service_for_each_part(svcpt, i, svc) {
- cur = at_get(&svcpt->scp_at_estimate);
- worst = svcpt->scp_at_estimate.at_worst_ever;
- worstt = svcpt->scp_at_estimate.at_worst_time;
- s2dhms(&ts, ktime_get_real_seconds() - worstt);
-
- seq_printf(m, "%10s : cur %3u worst %3u (at %lld, "
- DHMS_FMT" ago) ", "service",
- cur, worst, (s64)worstt, DHMS_VARS(&ts));
-
- lprocfs_at_hist_helper(m, &svcpt->scp_at_estimate);
- }
-
- return 0;
-}
-LPROC_SEQ_FOPS_RO(ptlrpc_lprocfs_timeouts);
-
-static ssize_t high_priority_ratio_show(struct kobject *kobj,
- struct attribute *attr,
- char *buf)
-{
- struct ptlrpc_service *svc = container_of(kobj, struct ptlrpc_service,
- srv_kobj);
- return sprintf(buf, "%d\n", svc->srv_hpreq_ratio);
-}
-
-static ssize_t high_priority_ratio_store(struct kobject *kobj,
- struct attribute *attr,
- const char *buffer,
- size_t count)
-{
- struct ptlrpc_service *svc = container_of(kobj, struct ptlrpc_service,
- srv_kobj);
- int rc;
- int val;
-
- rc = kstrtoint(buffer, 10, &val);
- if (rc < 0)
- return rc;
-
- if (val < 0)
- return -ERANGE;
-
- spin_lock(&svc->srv_lock);
- svc->srv_hpreq_ratio = val;
- spin_unlock(&svc->srv_lock);
-
- return count;
-}
-LUSTRE_RW_ATTR(high_priority_ratio);
-
-static struct attribute *ptlrpc_svc_attrs[] = {
- &lustre_attr_threads_min.attr,
- &lustre_attr_threads_started.attr,
- &lustre_attr_threads_max.attr,
- &lustre_attr_high_priority_ratio.attr,
- NULL,
-};
-
-static void ptlrpc_sysfs_svc_release(struct kobject *kobj)
-{
- struct ptlrpc_service *svc = container_of(kobj, struct ptlrpc_service,
- srv_kobj);
-
- complete(&svc->srv_kobj_unregister);
-}
-
-static struct kobj_type ptlrpc_svc_ktype = {
- .default_attrs = ptlrpc_svc_attrs,
- .sysfs_ops = &lustre_sysfs_ops,
- .release = ptlrpc_sysfs_svc_release,
-};
-
-void ptlrpc_sysfs_unregister_service(struct ptlrpc_service *svc)
-{
- /* Let's see if we had a chance at initialization first */
- if (svc->srv_kobj.kset) {
- kobject_put(&svc->srv_kobj);
- wait_for_completion(&svc->srv_kobj_unregister);
- }
-}
-
-int ptlrpc_sysfs_register_service(struct kset *parent,
- struct ptlrpc_service *svc)
-{
- int rc;
-
- svc->srv_kobj.kset = parent;
- init_completion(&svc->srv_kobj_unregister);
- rc = kobject_init_and_add(&svc->srv_kobj, &ptlrpc_svc_ktype, NULL,
- "%s", svc->srv_name);
-
- return rc;
-}
-
-void ptlrpc_ldebugfs_register_service(struct dentry *entry,
- struct ptlrpc_service *svc)
-{
- struct lprocfs_vars lproc_vars[] = {
- {.name = "req_buffer_history_len",
- .fops = &ptlrpc_lprocfs_req_history_len_fops,
- .data = svc},
- {.name = "req_buffer_history_max",
- .fops = &ptlrpc_lprocfs_req_history_max_fops,
- .data = svc},
- {.name = "timeouts",
- .fops = &ptlrpc_lprocfs_timeouts_fops,
- .data = svc},
- {.name = "nrs_policies",
- .fops = &ptlrpc_lprocfs_nrs_fops,
- .data = svc},
- {NULL}
- };
- static const struct file_operations req_history_fops = {
- .owner = THIS_MODULE,
- .open = ptlrpc_lprocfs_svc_req_history_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = lprocfs_seq_release,
- };
-
- int rc;
-
- ptlrpc_ldebugfs_register(entry, svc->srv_name,
- "stats", &svc->srv_debugfs_entry,
- &svc->srv_stats);
-
- if (svc->srv_debugfs_entry == NULL)
- return;
-
- ldebugfs_add_vars(svc->srv_debugfs_entry, lproc_vars, NULL);
-
- rc = ldebugfs_seq_create(svc->srv_debugfs_entry, "req_history",
- 0400, &req_history_fops, svc);
- if (rc)
- CWARN("Error adding the req_history file\n");
-}
-
-void ptlrpc_lprocfs_register_obd(struct obd_device *obddev)
-{
- ptlrpc_ldebugfs_register(obddev->obd_debugfs_entry, NULL, "stats",
- &obddev->obd_svc_debugfs_entry,
- &obddev->obd_svc_stats);
-}
-EXPORT_SYMBOL(ptlrpc_lprocfs_register_obd);
-
-void ptlrpc_lprocfs_rpc_sent(struct ptlrpc_request *req, long amount)
-{
- struct lprocfs_stats *svc_stats;
- __u32 op = lustre_msg_get_opc(req->rq_reqmsg);
- int opc = opcode_offset(op);
-
- svc_stats = req->rq_import->imp_obd->obd_svc_stats;
- if (svc_stats == NULL || opc <= 0)
- return;
- LASSERT(opc < LUSTRE_MAX_OPCODES);
- if (!(op == LDLM_ENQUEUE || op == MDS_REINT))
- lprocfs_counter_add(svc_stats, opc + EXTRA_MAX_OPCODES, amount);
-}
-
-void ptlrpc_lprocfs_brw(struct ptlrpc_request *req, int bytes)
-{
- struct lprocfs_stats *svc_stats;
- int idx;
-
- if (!req->rq_import)
- return;
- svc_stats = req->rq_import->imp_obd->obd_svc_stats;
- if (!svc_stats)
- return;
- idx = lustre_msg_get_opc(req->rq_reqmsg);
- switch (idx) {
- case OST_READ:
- idx = BRW_READ_BYTES + PTLRPC_LAST_CNTR;
- break;
- case OST_WRITE:
- idx = BRW_WRITE_BYTES + PTLRPC_LAST_CNTR;
- break;
- default:
- LASSERTF(0, "unsupported opcode %u\n", idx);
- break;
- }
-
- lprocfs_counter_add(svc_stats, idx, bytes);
-}
-
-EXPORT_SYMBOL(ptlrpc_lprocfs_brw);
-
-void ptlrpc_lprocfs_unregister_service(struct ptlrpc_service *svc)
-{
- if (svc->srv_debugfs_entry != NULL)
- ldebugfs_remove(&svc->srv_debugfs_entry);
-
- if (svc->srv_stats)
- lprocfs_free_stats(&svc->srv_stats);
-}
-
-void ptlrpc_lprocfs_unregister_obd(struct obd_device *obd)
-{
- if (!IS_ERR_OR_NULL(obd->obd_svc_debugfs_entry))
- ldebugfs_remove(&obd->obd_svc_debugfs_entry);
-
- if (obd->obd_svc_stats)
- lprocfs_free_stats(&obd->obd_svc_stats);
-}
-EXPORT_SYMBOL(ptlrpc_lprocfs_unregister_obd);
-
-#undef BUFLEN
-
-int lprocfs_wr_ping(struct file *file, const char __user *buffer,
- size_t count, loff_t *off)
-{
- struct obd_device *obd = ((struct seq_file *)file->private_data)->private;
- struct ptlrpc_request *req;
- int rc;
-
- LPROCFS_CLIMP_CHECK(obd);
- req = ptlrpc_prep_ping(obd->u.cli.cl_import);
- LPROCFS_CLIMP_EXIT(obd);
- if (req == NULL)
- return -ENOMEM;
-
- req->rq_send_state = LUSTRE_IMP_FULL;
-
- rc = ptlrpc_queue_wait(req);
-
- ptlrpc_req_finished(req);
- if (rc >= 0)
- return count;
- return rc;
-}
-EXPORT_SYMBOL(lprocfs_wr_ping);
-
-/* Write the connection UUID to this file to attempt to connect to that node.
- * The connection UUID is a node's primary NID. For example,
- * "echo connection=192.168.0.1@tcp0::instance > .../import".
- */
-int lprocfs_wr_import(struct file *file, const char __user *buffer,
- size_t count, loff_t *off)
-{
- struct obd_device *obd = ((struct seq_file *)file->private_data)->private;
- struct obd_import *imp = obd->u.cli.cl_import;
- char *kbuf = NULL;
- char *uuid;
- char *ptr;
- int do_reconn = 1;
- const char prefix[] = "connection=";
- const int prefix_len = sizeof(prefix) - 1;
-
- if (count > PAGE_CACHE_SIZE - 1 || count <= prefix_len)
- return -EINVAL;
-
- kbuf = kzalloc(count + 1, GFP_NOFS);
- if (!kbuf)
- return -ENOMEM;
-
- if (copy_from_user(kbuf, buffer, count)) {
- count = -EFAULT;
- goto out;
- }
-
- kbuf[count] = 0;
-
- /* only support connection=uuid::instance now */
- if (strncmp(prefix, kbuf, prefix_len) != 0) {
- count = -EINVAL;
- goto out;
- }
-
- uuid = kbuf + prefix_len;
- ptr = strstr(uuid, "::");
- if (ptr) {
- __u32 inst;
- char *endptr;
-
- *ptr = 0;
- do_reconn = 0;
- ptr += strlen("::");
- inst = simple_strtol(ptr, &endptr, 10);
- if (*endptr) {
- CERROR("config: wrong instance # %s\n", ptr);
- } else if (inst != imp->imp_connect_data.ocd_instance) {
- CDEBUG(D_INFO, "IR: %s is connecting to an obsoleted target(%u/%u), reconnecting...\n",
- imp->imp_obd->obd_name,
- imp->imp_connect_data.ocd_instance, inst);
- do_reconn = 1;
- } else {
- CDEBUG(D_INFO, "IR: %s has already been connecting to new target(%u)\n",
- imp->imp_obd->obd_name, inst);
- }
- }
-
- if (do_reconn)
- ptlrpc_recover_import(imp, uuid, 1);
-
-out:
- kfree(kbuf);
- return count;
-}
-EXPORT_SYMBOL(lprocfs_wr_import);
-
-int lprocfs_rd_pinger_recov(struct seq_file *m, void *n)
-{
- struct obd_device *obd = m->private;
- struct obd_import *imp = obd->u.cli.cl_import;
-
- LPROCFS_CLIMP_CHECK(obd);
- seq_printf(m, "%d\n", !imp->imp_no_pinger_recover);
- LPROCFS_CLIMP_EXIT(obd);
-
- return 0;
-}
-EXPORT_SYMBOL(lprocfs_rd_pinger_recov);
-
-int lprocfs_wr_pinger_recov(struct file *file, const char __user *buffer,
- size_t count, loff_t *off)
-{
- struct obd_device *obd = ((struct seq_file *)file->private_data)->private;
- struct client_obd *cli = &obd->u.cli;
- struct obd_import *imp = cli->cl_import;
- int rc, val;
-
- rc = lprocfs_write_helper(buffer, count, &val);
- if (rc < 0)
- return rc;
-
- if (val != 0 && val != 1)
- return -ERANGE;
-
- LPROCFS_CLIMP_CHECK(obd);
- spin_lock(&imp->imp_lock);
- imp->imp_no_pinger_recover = !val;
- spin_unlock(&imp->imp_lock);
- LPROCFS_CLIMP_EXIT(obd);
-
- return count;
-
-}
-EXPORT_SYMBOL(lprocfs_wr_pinger_recov);
diff --git a/drivers/staging/lustre/lustre/ptlrpc/niobuf.c b/drivers/staging/lustre/lustre/ptlrpc/niobuf.c
deleted file mode 100644
index 8db62d00437c..000000000000
--- a/drivers/staging/lustre/lustre/ptlrpc/niobuf.c
+++ /dev/null
@@ -1,730 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- */
-
-#define DEBUG_SUBSYSTEM S_RPC
-#include "../include/obd_support.h"
-#include "../include/lustre_net.h"
-#include "../include/lustre_lib.h"
-#include "../include/obd.h"
-#include "../include/obd_class.h"
-#include "ptlrpc_internal.h"
-
-/**
- * Helper function. Sends \a len bytes from \a base at offset \a offset
- * over \a conn connection to portal \a portal.
- * Returns 0 on success or error code.
- */
-static int ptl_send_buf(lnet_handle_md_t *mdh, void *base, int len,
- lnet_ack_req_t ack, struct ptlrpc_cb_id *cbid,
- struct ptlrpc_connection *conn, int portal, __u64 xid,
- unsigned int offset)
-{
- int rc;
- lnet_md_t md;
-
- LASSERT(portal != 0);
- LASSERT(conn != NULL);
- CDEBUG(D_INFO, "conn=%p id %s\n", conn, libcfs_id2str(conn->c_peer));
- md.start = base;
- md.length = len;
- md.threshold = (ack == LNET_ACK_REQ) ? 2 : 1;
- md.options = PTLRPC_MD_OPTIONS;
- md.user_ptr = cbid;
- md.eq_handle = ptlrpc_eq_h;
-
- if (unlikely(ack == LNET_ACK_REQ &&
- OBD_FAIL_CHECK_ORSET(OBD_FAIL_PTLRPC_ACK,
- OBD_FAIL_ONCE))) {
- /* don't ask for the ack to simulate failing client */
- ack = LNET_NOACK_REQ;
- }
-
- rc = LNetMDBind(md, LNET_UNLINK, mdh);
- if (unlikely(rc != 0)) {
- CERROR("LNetMDBind failed: %d\n", rc);
- LASSERT(rc == -ENOMEM);
- return -ENOMEM;
- }
-
- CDEBUG(D_NET, "Sending %d bytes to portal %d, xid %lld, offset %u\n",
- len, portal, xid, offset);
-
- rc = LNetPut(conn->c_self, *mdh, ack,
- conn->c_peer, portal, xid, offset, 0);
- if (unlikely(rc != 0)) {
- int rc2;
- /* We're going to get an UNLINK event when I unlink below,
- * which will complete just like any other failed send, so
- * I fall through and return success here! */
- CERROR("LNetPut(%s, %d, %lld) failed: %d\n",
- libcfs_id2str(conn->c_peer), portal, xid, rc);
- rc2 = LNetMDUnlink(*mdh);
- LASSERTF(rc2 == 0, "rc2 = %d\n", rc2);
- }
-
- return 0;
-}
-
-static void mdunlink_iterate_helper(lnet_handle_md_t *bd_mds, int count)
-{
- int i;
-
- for (i = 0; i < count; i++)
- LNetMDUnlink(bd_mds[i]);
-}
-
-
-/**
- * Register bulk at the sender for later transfer.
- * Returns 0 on success or error code.
- */
-int ptlrpc_register_bulk(struct ptlrpc_request *req)
-{
- struct ptlrpc_bulk_desc *desc = req->rq_bulk;
- lnet_process_id_t peer;
- int rc = 0;
- int rc2;
- int posted_md;
- int total_md;
- __u64 xid;
- lnet_handle_me_t me_h;
- lnet_md_t md;
-
- if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_BULK_GET_NET))
- return 0;
-
- /* NB no locking required until desc is on the network */
- LASSERT(desc->bd_nob > 0);
- LASSERT(desc->bd_md_count == 0);
- LASSERT(desc->bd_md_max_brw <= PTLRPC_BULK_OPS_COUNT);
- LASSERT(desc->bd_iov_count <= PTLRPC_MAX_BRW_PAGES);
- LASSERT(desc->bd_req != NULL);
- LASSERT(desc->bd_type == BULK_PUT_SINK ||
- desc->bd_type == BULK_GET_SOURCE);
-
- /* cleanup the state of the bulk for it will be reused */
- if (req->rq_resend || req->rq_send_state == LUSTRE_IMP_REPLAY)
- desc->bd_nob_transferred = 0;
- else
- LASSERT(desc->bd_nob_transferred == 0);
-
- desc->bd_failure = 0;
-
- peer = desc->bd_import->imp_connection->c_peer;
-
- LASSERT(desc->bd_cbid.cbid_fn == client_bulk_callback);
- LASSERT(desc->bd_cbid.cbid_arg == desc);
-
- /* An XID is only used for a single request from the client.
- * For retried bulk transfers, a new XID will be allocated in
- * in ptlrpc_check_set() if it needs to be resent, so it is not
- * using the same RDMA match bits after an error.
- *
- * For multi-bulk RPCs, rq_xid is the last XID needed for bulks. The
- * first bulk XID is power-of-two aligned before rq_xid. LU-1431 */
- xid = req->rq_xid & ~((__u64)desc->bd_md_max_brw - 1);
- LASSERTF(!(desc->bd_registered &&
- req->rq_send_state != LUSTRE_IMP_REPLAY) ||
- xid != desc->bd_last_xid,
- "registered: %d rq_xid: %llu bd_last_xid: %llu\n",
- desc->bd_registered, xid, desc->bd_last_xid);
-
- total_md = (desc->bd_iov_count + LNET_MAX_IOV - 1) / LNET_MAX_IOV;
- desc->bd_registered = 1;
- desc->bd_last_xid = xid;
- desc->bd_md_count = total_md;
- md.user_ptr = &desc->bd_cbid;
- md.eq_handle = ptlrpc_eq_h;
- md.threshold = 1; /* PUT or GET */
-
- for (posted_md = 0; posted_md < total_md; posted_md++, xid++) {
- md.options = PTLRPC_MD_OPTIONS |
- ((desc->bd_type == BULK_GET_SOURCE) ?
- LNET_MD_OP_GET : LNET_MD_OP_PUT);
- ptlrpc_fill_bulk_md(&md, desc, posted_md);
-
- rc = LNetMEAttach(desc->bd_portal, peer, xid, 0,
- LNET_UNLINK, LNET_INS_AFTER, &me_h);
- if (rc != 0) {
- CERROR("%s: LNetMEAttach failed x%llu/%d: rc = %d\n",
- desc->bd_import->imp_obd->obd_name, xid,
- posted_md, rc);
- break;
- }
-
- /* About to let the network at it... */
- rc = LNetMDAttach(me_h, md, LNET_UNLINK,
- &desc->bd_mds[posted_md]);
- if (rc != 0) {
- CERROR("%s: LNetMDAttach failed x%llu/%d: rc = %d\n",
- desc->bd_import->imp_obd->obd_name, xid,
- posted_md, rc);
- rc2 = LNetMEUnlink(me_h);
- LASSERT(rc2 == 0);
- break;
- }
- }
-
- if (rc != 0) {
- LASSERT(rc == -ENOMEM);
- spin_lock(&desc->bd_lock);
- desc->bd_md_count -= total_md - posted_md;
- spin_unlock(&desc->bd_lock);
- LASSERT(desc->bd_md_count >= 0);
- mdunlink_iterate_helper(desc->bd_mds, desc->bd_md_max_brw);
- req->rq_status = -ENOMEM;
- return -ENOMEM;
- }
-
- /* Set rq_xid to matchbits of the final bulk so that server can
- * infer the number of bulks that were prepared */
- req->rq_xid = --xid;
- LASSERTF(desc->bd_last_xid == (req->rq_xid & PTLRPC_BULK_OPS_MASK),
- "bd_last_xid = x%llu, rq_xid = x%llu\n",
- desc->bd_last_xid, req->rq_xid);
-
- spin_lock(&desc->bd_lock);
- /* Holler if peer manages to touch buffers before he knows the xid */
- if (desc->bd_md_count != total_md)
- CWARN("%s: Peer %s touched %d buffers while I registered\n",
- desc->bd_import->imp_obd->obd_name, libcfs_id2str(peer),
- total_md - desc->bd_md_count);
- spin_unlock(&desc->bd_lock);
-
- CDEBUG(D_NET, "Setup %u bulk %s buffers: %u pages %u bytes, xid x%#llx-%#llx, portal %u\n",
- desc->bd_md_count,
- desc->bd_type == BULK_GET_SOURCE ? "get-source" : "put-sink",
- desc->bd_iov_count, desc->bd_nob,
- desc->bd_last_xid, req->rq_xid, desc->bd_portal);
-
- return 0;
-}
-EXPORT_SYMBOL(ptlrpc_register_bulk);
-
-/**
- * Disconnect a bulk desc from the network. Idempotent. Not
- * thread-safe (i.e. only interlocks with completion callback).
- * Returns 1 on success or 0 if network unregistration failed for whatever
- * reason.
- */
-int ptlrpc_unregister_bulk(struct ptlrpc_request *req, int async)
-{
- struct ptlrpc_bulk_desc *desc = req->rq_bulk;
- wait_queue_head_t *wq;
- struct l_wait_info lwi;
- int rc;
-
- LASSERT(!in_interrupt()); /* might sleep */
-
- /* Let's setup deadline for reply unlink. */
- if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_BULK_UNLINK) &&
- async && req->rq_bulk_deadline == 0)
- req->rq_bulk_deadline = ktime_get_real_seconds() + LONG_UNLINK;
-
- if (ptlrpc_client_bulk_active(req) == 0) /* completed or */
- return 1; /* never registered */
-
- LASSERT(desc->bd_req == req); /* bd_req NULL until registered */
-
- /* the unlink ensures the callback happens ASAP and is the last
- * one. If it fails, it must be because completion just happened,
- * but we must still l_wait_event() in this case to give liblustre
- * a chance to run client_bulk_callback() */
- mdunlink_iterate_helper(desc->bd_mds, desc->bd_md_max_brw);
-
- if (ptlrpc_client_bulk_active(req) == 0) /* completed or */
- return 1; /* never registered */
-
- /* Move to "Unregistering" phase as bulk was not unlinked yet. */
- ptlrpc_rqphase_move(req, RQ_PHASE_UNREGISTERING);
-
- /* Do not wait for unlink to finish. */
- if (async)
- return 0;
-
- if (req->rq_set != NULL)
- wq = &req->rq_set->set_waitq;
- else
- wq = &req->rq_reply_waitq;
-
- for (;;) {
- /* Network access will complete in finite time but the HUGE
- * timeout lets us CWARN for visibility of sluggish NALs */
- lwi = LWI_TIMEOUT_INTERVAL(cfs_time_seconds(LONG_UNLINK),
- cfs_time_seconds(1), NULL, NULL);
- rc = l_wait_event(*wq, !ptlrpc_client_bulk_active(req), &lwi);
- if (rc == 0) {
- ptlrpc_rqphase_move(req, req->rq_next_phase);
- return 1;
- }
-
- LASSERT(rc == -ETIMEDOUT);
- DEBUG_REQ(D_WARNING, req, "Unexpectedly long timeout: desc %p",
- desc);
- }
- return 0;
-}
-EXPORT_SYMBOL(ptlrpc_unregister_bulk);
-
-static void ptlrpc_at_set_reply(struct ptlrpc_request *req, int flags)
-{
- struct ptlrpc_service_part *svcpt = req->rq_rqbd->rqbd_svcpt;
- struct ptlrpc_service *svc = svcpt->scp_service;
- int service_time = max_t(int, ktime_get_real_seconds() -
- req->rq_arrival_time.tv_sec, 1);
-
- if (!(flags & PTLRPC_REPLY_EARLY) &&
- (req->rq_type != PTL_RPC_MSG_ERR) &&
- (req->rq_reqmsg != NULL) &&
- !(lustre_msg_get_flags(req->rq_reqmsg) &
- (MSG_RESENT | MSG_REPLAY |
- MSG_REQ_REPLAY_DONE | MSG_LOCK_REPLAY_DONE))) {
- /* early replies, errors and recovery requests don't count
- * toward our service time estimate */
- int oldse = at_measured(&svcpt->scp_at_estimate, service_time);
-
- if (oldse != 0) {
- DEBUG_REQ(D_ADAPTTO, req,
- "svc %s changed estimate from %d to %d",
- svc->srv_name, oldse,
- at_get(&svcpt->scp_at_estimate));
- }
- }
- /* Report actual service time for client latency calc */
- lustre_msg_set_service_time(req->rq_repmsg, service_time);
- /* Report service time estimate for future client reqs, but report 0
- * (to be ignored by client) if it's a error reply during recovery.
- * (bz15815) */
- if (req->rq_type == PTL_RPC_MSG_ERR &&
- (req->rq_export == NULL || req->rq_export->exp_obd->obd_recovering))
- lustre_msg_set_timeout(req->rq_repmsg, 0);
- else
- lustre_msg_set_timeout(req->rq_repmsg,
- at_get(&svcpt->scp_at_estimate));
-
- if (req->rq_reqmsg &&
- !(lustre_msghdr_get_flags(req->rq_reqmsg) & MSGHDR_AT_SUPPORT)) {
- CDEBUG(D_ADAPTTO, "No early reply support: flags=%#x req_flags=%#x magic=%x/%x len=%d\n",
- flags, lustre_msg_get_flags(req->rq_reqmsg),
- lustre_msg_get_magic(req->rq_reqmsg),
- lustre_msg_get_magic(req->rq_repmsg), req->rq_replen);
- }
-}
-
-/**
- * Send request reply from request \a req reply buffer.
- * \a flags defines reply types
- * Returns 0 on success or error code
- */
-int ptlrpc_send_reply(struct ptlrpc_request *req, int flags)
-{
- struct ptlrpc_reply_state *rs = req->rq_reply_state;
- struct ptlrpc_connection *conn;
- int rc;
-
- /* We must already have a reply buffer (only ptlrpc_error() may be
- * called without one). The reply generated by sptlrpc layer (e.g.
- * error notify, etc.) might have NULL rq->reqmsg; Otherwise we must
- * have a request buffer which is either the actual (swabbed) incoming
- * request, or a saved copy if this is a req saved in
- * target_queue_final_reply().
- */
- LASSERT(req->rq_no_reply == 0);
- LASSERT(req->rq_reqbuf != NULL);
- LASSERT(rs != NULL);
- LASSERT((flags & PTLRPC_REPLY_MAYBE_DIFFICULT) || !rs->rs_difficult);
- LASSERT(req->rq_repmsg != NULL);
- LASSERT(req->rq_repmsg == rs->rs_msg);
- LASSERT(rs->rs_cb_id.cbid_fn == reply_out_callback);
- LASSERT(rs->rs_cb_id.cbid_arg == rs);
-
- /* There may be no rq_export during failover */
-
- if (unlikely(req->rq_export && req->rq_export->exp_obd &&
- req->rq_export->exp_obd->obd_fail)) {
- /* Failed obd's only send ENODEV */
- req->rq_type = PTL_RPC_MSG_ERR;
- req->rq_status = -ENODEV;
- CDEBUG(D_HA, "sending ENODEV from failed obd %d\n",
- req->rq_export->exp_obd->obd_minor);
- }
-
- /* In order to keep interoperability with the client (< 2.3) which
- * doesn't have pb_jobid in ptlrpc_body, We have to shrink the
- * ptlrpc_body in reply buffer to ptlrpc_body_v2, otherwise, the
- * reply buffer on client will be overflow.
- *
- * XXX Remove this whenever we drop the interoperability with
- * such client.
- */
- req->rq_replen = lustre_shrink_msg(req->rq_repmsg, 0,
- sizeof(struct ptlrpc_body_v2), 1);
-
- if (req->rq_type != PTL_RPC_MSG_ERR)
- req->rq_type = PTL_RPC_MSG_REPLY;
-
- lustre_msg_set_type(req->rq_repmsg, req->rq_type);
- lustre_msg_set_status(req->rq_repmsg,
- ptlrpc_status_hton(req->rq_status));
- lustre_msg_set_opc(req->rq_repmsg,
- req->rq_reqmsg ? lustre_msg_get_opc(req->rq_reqmsg) : 0);
-
- target_pack_pool_reply(req);
-
- ptlrpc_at_set_reply(req, flags);
-
- if (req->rq_export == NULL || req->rq_export->exp_connection == NULL)
- conn = ptlrpc_connection_get(req->rq_peer, req->rq_self, NULL);
- else
- conn = ptlrpc_connection_addref(req->rq_export->exp_connection);
-
- if (unlikely(conn == NULL)) {
- CERROR("not replying on NULL connection\n"); /* bug 9635 */
- return -ENOTCONN;
- }
- ptlrpc_rs_addref(rs); /* +1 ref for the network */
-
- rc = sptlrpc_svc_wrap_reply(req);
- if (unlikely(rc))
- goto out;
-
- req->rq_sent = ktime_get_real_seconds();
-
- rc = ptl_send_buf(&rs->rs_md_h, rs->rs_repbuf, rs->rs_repdata_len,
- (rs->rs_difficult && !rs->rs_no_ack) ?
- LNET_ACK_REQ : LNET_NOACK_REQ,
- &rs->rs_cb_id, conn,
- ptlrpc_req2svc(req)->srv_rep_portal,
- req->rq_xid, req->rq_reply_off);
-out:
- if (unlikely(rc != 0))
- ptlrpc_req_drop_rs(req);
- ptlrpc_connection_put(conn);
- return rc;
-}
-EXPORT_SYMBOL(ptlrpc_send_reply);
-
-int ptlrpc_reply(struct ptlrpc_request *req)
-{
- if (req->rq_no_reply)
- return 0;
- return ptlrpc_send_reply(req, 0);
-}
-EXPORT_SYMBOL(ptlrpc_reply);
-
-/**
- * For request \a req send an error reply back. Create empty
- * reply buffers if necessary.
- */
-int ptlrpc_send_error(struct ptlrpc_request *req, int may_be_difficult)
-{
- int rc;
-
- if (req->rq_no_reply)
- return 0;
-
- if (!req->rq_repmsg) {
- rc = lustre_pack_reply(req, 1, NULL, NULL);
- if (rc)
- return rc;
- }
-
- if (req->rq_status != -ENOSPC && req->rq_status != -EACCES &&
- req->rq_status != -EPERM && req->rq_status != -ENOENT &&
- req->rq_status != -EINPROGRESS && req->rq_status != -EDQUOT)
- req->rq_type = PTL_RPC_MSG_ERR;
-
- rc = ptlrpc_send_reply(req, may_be_difficult);
- return rc;
-}
-EXPORT_SYMBOL(ptlrpc_send_error);
-
-int ptlrpc_error(struct ptlrpc_request *req)
-{
- return ptlrpc_send_error(req, 0);
-}
-EXPORT_SYMBOL(ptlrpc_error);
-
-/**
- * Send request \a request.
- * if \a noreply is set, don't expect any reply back and don't set up
- * reply buffers.
- * Returns 0 on success or error code.
- */
-int ptl_send_rpc(struct ptlrpc_request *request, int noreply)
-{
- int rc;
- int rc2;
- int mpflag = 0;
- struct ptlrpc_connection *connection;
- lnet_handle_me_t reply_me_h;
- lnet_md_t reply_md;
- struct obd_device *obd = request->rq_import->imp_obd;
-
- if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_DROP_RPC))
- return 0;
-
- LASSERT(request->rq_type == PTL_RPC_MSG_REQUEST);
- LASSERT(request->rq_wait_ctx == 0);
-
- /* If this is a re-transmit, we're required to have disengaged
- * cleanly from the previous attempt */
- LASSERT(!request->rq_receiving_reply);
- LASSERT(!((lustre_msg_get_flags(request->rq_reqmsg) & MSG_REPLAY) &&
- (request->rq_import->imp_state == LUSTRE_IMP_FULL)));
-
- if (unlikely(obd != NULL && obd->obd_fail)) {
- CDEBUG(D_HA, "muting rpc for failed imp obd %s\n",
- obd->obd_name);
- /* this prevents us from waiting in ptlrpc_queue_wait */
- spin_lock(&request->rq_lock);
- request->rq_err = 1;
- spin_unlock(&request->rq_lock);
- request->rq_status = -ENODEV;
- return -ENODEV;
- }
-
- connection = request->rq_import->imp_connection;
-
- lustre_msg_set_handle(request->rq_reqmsg,
- &request->rq_import->imp_remote_handle);
- lustre_msg_set_type(request->rq_reqmsg, PTL_RPC_MSG_REQUEST);
- lustre_msg_set_conn_cnt(request->rq_reqmsg,
- request->rq_import->imp_conn_cnt);
- lustre_msghdr_set_flags(request->rq_reqmsg,
- request->rq_import->imp_msghdr_flags);
-
- if (request->rq_resend)
- lustre_msg_add_flags(request->rq_reqmsg, MSG_RESENT);
-
- if (request->rq_memalloc)
- mpflag = cfs_memory_pressure_get_and_set();
-
- rc = sptlrpc_cli_wrap_request(request);
- if (rc)
- goto out;
-
- /* bulk register should be done after wrap_request() */
- if (request->rq_bulk != NULL) {
- rc = ptlrpc_register_bulk(request);
- if (rc != 0)
- goto out;
- }
-
- if (!noreply) {
- LASSERT(request->rq_replen != 0);
- if (request->rq_repbuf == NULL) {
- LASSERT(request->rq_repdata == NULL);
- LASSERT(request->rq_repmsg == NULL);
- rc = sptlrpc_cli_alloc_repbuf(request,
- request->rq_replen);
- if (rc) {
- /* this prevents us from looping in
- * ptlrpc_queue_wait */
- spin_lock(&request->rq_lock);
- request->rq_err = 1;
- spin_unlock(&request->rq_lock);
- request->rq_status = rc;
- goto cleanup_bulk;
- }
- } else {
- request->rq_repdata = NULL;
- request->rq_repmsg = NULL;
- }
-
- rc = LNetMEAttach(request->rq_reply_portal,/*XXX FIXME bug 249*/
- connection->c_peer, request->rq_xid, 0,
- LNET_UNLINK, LNET_INS_AFTER, &reply_me_h);
- if (rc != 0) {
- CERROR("LNetMEAttach failed: %d\n", rc);
- LASSERT(rc == -ENOMEM);
- rc = -ENOMEM;
- goto cleanup_bulk;
- }
- }
-
- spin_lock(&request->rq_lock);
- /* If the MD attach succeeds, there _will_ be a reply_in callback */
- request->rq_receiving_reply = !noreply;
- request->rq_req_unlink = 1;
- /* We are responsible for unlinking the reply buffer */
- request->rq_reply_unlink = !noreply;
- /* Clear any flags that may be present from previous sends. */
- request->rq_replied = 0;
- request->rq_err = 0;
- request->rq_timedout = 0;
- request->rq_net_err = 0;
- request->rq_resend = 0;
- request->rq_restart = 0;
- request->rq_reply_truncate = 0;
- spin_unlock(&request->rq_lock);
-
- if (!noreply) {
- reply_md.start = request->rq_repbuf;
- reply_md.length = request->rq_repbuf_len;
- /* Allow multiple early replies */
- reply_md.threshold = LNET_MD_THRESH_INF;
- /* Manage remote for early replies */
- reply_md.options = PTLRPC_MD_OPTIONS | LNET_MD_OP_PUT |
- LNET_MD_MANAGE_REMOTE |
- LNET_MD_TRUNCATE; /* allow to make EOVERFLOW error */
- reply_md.user_ptr = &request->rq_reply_cbid;
- reply_md.eq_handle = ptlrpc_eq_h;
-
- /* We must see the unlink callback to unset rq_reply_unlink,
- so we can't auto-unlink */
- rc = LNetMDAttach(reply_me_h, reply_md, LNET_RETAIN,
- &request->rq_reply_md_h);
- if (rc != 0) {
- CERROR("LNetMDAttach failed: %d\n", rc);
- LASSERT(rc == -ENOMEM);
- spin_lock(&request->rq_lock);
- /* ...but the MD attach didn't succeed... */
- request->rq_receiving_reply = 0;
- spin_unlock(&request->rq_lock);
- rc = -ENOMEM;
- goto cleanup_me;
- }
-
- CDEBUG(D_NET, "Setup reply buffer: %u bytes, xid %llu, portal %u\n",
- request->rq_repbuf_len, request->rq_xid,
- request->rq_reply_portal);
- }
-
- /* add references on request for request_out_callback */
- ptlrpc_request_addref(request);
- if (obd != NULL && obd->obd_svc_stats != NULL)
- lprocfs_counter_add(obd->obd_svc_stats, PTLRPC_REQACTIVE_CNTR,
- atomic_read(&request->rq_import->imp_inflight));
-
- OBD_FAIL_TIMEOUT(OBD_FAIL_PTLRPC_DELAY_SEND, request->rq_timeout + 5);
-
- ktime_get_real_ts64(&request->rq_arrival_time);
- request->rq_sent = ktime_get_real_seconds();
- /* We give the server rq_timeout secs to process the req, and
- add the network latency for our local timeout. */
- request->rq_deadline = request->rq_sent + request->rq_timeout +
- ptlrpc_at_get_net_latency(request);
-
- ptlrpc_pinger_sending_on_import(request->rq_import);
-
- DEBUG_REQ(D_INFO, request, "send flg=%x",
- lustre_msg_get_flags(request->rq_reqmsg));
- rc = ptl_send_buf(&request->rq_req_md_h,
- request->rq_reqbuf, request->rq_reqdata_len,
- LNET_NOACK_REQ, &request->rq_req_cbid,
- connection,
- request->rq_request_portal,
- request->rq_xid, 0);
- if (rc == 0)
- goto out;
-
- ptlrpc_req_finished(request);
- if (noreply)
- goto out;
-
- cleanup_me:
- /* MEUnlink is safe; the PUT didn't even get off the ground, and
- * nobody apart from the PUT's target has the right nid+XID to
- * access the reply buffer. */
- rc2 = LNetMEUnlink(reply_me_h);
- LASSERT(rc2 == 0);
- /* UNLINKED callback called synchronously */
- LASSERT(!request->rq_receiving_reply);
-
- cleanup_bulk:
- /* We do sync unlink here as there was no real transfer here so
- * the chance to have long unlink to sluggish net is smaller here. */
- ptlrpc_unregister_bulk(request, 0);
- out:
- if (request->rq_memalloc)
- cfs_memory_pressure_restore(mpflag);
- return rc;
-}
-EXPORT_SYMBOL(ptl_send_rpc);
-
-/**
- * Register request buffer descriptor for request receiving.
- */
-int ptlrpc_register_rqbd(struct ptlrpc_request_buffer_desc *rqbd)
-{
- struct ptlrpc_service *service = rqbd->rqbd_svcpt->scp_service;
- static lnet_process_id_t match_id = {LNET_NID_ANY, LNET_PID_ANY};
- int rc;
- lnet_md_t md;
- lnet_handle_me_t me_h;
-
- CDEBUG(D_NET, "LNetMEAttach: portal %d\n",
- service->srv_req_portal);
-
- if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_RQBD))
- return -ENOMEM;
-
- /* NB: CPT affinity service should use new LNet flag LNET_INS_LOCAL,
- * which means buffer can only be attached on local CPT, and LND
- * threads can find it by grabbing a local lock */
- rc = LNetMEAttach(service->srv_req_portal,
- match_id, 0, ~0, LNET_UNLINK,
- rqbd->rqbd_svcpt->scp_cpt >= 0 ?
- LNET_INS_LOCAL : LNET_INS_AFTER, &me_h);
- if (rc != 0) {
- CERROR("LNetMEAttach failed: %d\n", rc);
- return -ENOMEM;
- }
-
- LASSERT(rqbd->rqbd_refcount == 0);
- rqbd->rqbd_refcount = 1;
-
- md.start = rqbd->rqbd_buffer;
- md.length = service->srv_buf_size;
- md.max_size = service->srv_max_req_size;
- md.threshold = LNET_MD_THRESH_INF;
- md.options = PTLRPC_MD_OPTIONS | LNET_MD_OP_PUT | LNET_MD_MAX_SIZE;
- md.user_ptr = &rqbd->rqbd_cbid;
- md.eq_handle = ptlrpc_eq_h;
-
- rc = LNetMDAttach(me_h, md, LNET_UNLINK, &rqbd->rqbd_md_h);
- if (rc == 0)
- return 0;
-
- CERROR("LNetMDAttach failed: %d;\n", rc);
- LASSERT(rc == -ENOMEM);
- rc = LNetMEUnlink(me_h);
- LASSERT(rc == 0);
- rqbd->rqbd_refcount = 0;
-
- return -ENOMEM;
-}
diff --git a/drivers/staging/lustre/lustre/ptlrpc/nrs.c b/drivers/staging/lustre/lustre/ptlrpc/nrs.c
deleted file mode 100644
index 2476cf22c184..000000000000
--- a/drivers/staging/lustre/lustre/ptlrpc/nrs.c
+++ /dev/null
@@ -1,1632 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
-
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License version 2 for more details. A copy is
- * included in the COPYING file that accompanied this code.
-
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2011 Intel Corporation
- *
- * Copyright 2012 Xyratex Technology Limited
- */
-/*
- * lustre/ptlrpc/nrs.c
- *
- * Network Request Scheduler (NRS)
- *
- * Allows to reorder the handling of RPCs at servers.
- *
- * Author: Liang Zhen <liang@whamcloud.com>
- * Author: Nikitas Angelinas <nikitas_angelinas@xyratex.com>
- */
-/**
- * \addtogoup nrs
- * @{
- */
-
-#define DEBUG_SUBSYSTEM S_RPC
-#include "../include/obd_support.h"
-#include "../include/obd_class.h"
-#include "../include/lustre_net.h"
-#include "../include/lprocfs_status.h"
-#include "../../include/linux/libcfs/libcfs.h"
-#include "ptlrpc_internal.h"
-
-/* XXX: This is just for liblustre. Remove the #if defined directive when the
- * "cfs_" prefix is dropped from cfs_list_head. */
-
-/**
- * NRS core object.
- */
-struct nrs_core nrs_core;
-
-static int nrs_policy_init(struct ptlrpc_nrs_policy *policy)
-{
- return policy->pol_desc->pd_ops->op_policy_init != NULL ?
- policy->pol_desc->pd_ops->op_policy_init(policy) : 0;
-}
-
-static void nrs_policy_fini(struct ptlrpc_nrs_policy *policy)
-{
- LASSERT(policy->pol_ref == 0);
- LASSERT(policy->pol_req_queued == 0);
-
- if (policy->pol_desc->pd_ops->op_policy_fini != NULL)
- policy->pol_desc->pd_ops->op_policy_fini(policy);
-}
-
-static int nrs_policy_ctl_locked(struct ptlrpc_nrs_policy *policy,
- enum ptlrpc_nrs_ctl opc, void *arg)
-{
- /**
- * The policy may be stopped, but the lprocfs files and
- * ptlrpc_nrs_policy instances remain present until unregistration time.
- * Do not perform the ctl operation if the policy is stopped, as
- * policy->pol_private will be NULL in such a case.
- */
- if (policy->pol_state == NRS_POL_STATE_STOPPED)
- return -ENODEV;
-
- return policy->pol_desc->pd_ops->op_policy_ctl != NULL ?
- policy->pol_desc->pd_ops->op_policy_ctl(policy, opc, arg) :
- -ENOSYS;
-}
-
-static void nrs_policy_stop0(struct ptlrpc_nrs_policy *policy)
-{
- struct ptlrpc_nrs *nrs = policy->pol_nrs;
-
- if (policy->pol_desc->pd_ops->op_policy_stop != NULL) {
- spin_unlock(&nrs->nrs_lock);
-
- policy->pol_desc->pd_ops->op_policy_stop(policy);
-
- spin_lock(&nrs->nrs_lock);
- }
-
- LASSERT(list_empty(&policy->pol_list_queued));
- LASSERT(policy->pol_req_queued == 0 &&
- policy->pol_req_started == 0);
-
- policy->pol_private = NULL;
-
- policy->pol_state = NRS_POL_STATE_STOPPED;
-
- if (atomic_dec_and_test(&policy->pol_desc->pd_refs))
- module_put(policy->pol_desc->pd_owner);
-}
-
-static int nrs_policy_stop_locked(struct ptlrpc_nrs_policy *policy)
-{
- struct ptlrpc_nrs *nrs = policy->pol_nrs;
-
- if (nrs->nrs_policy_fallback == policy && !nrs->nrs_stopping)
- return -EPERM;
-
- if (policy->pol_state == NRS_POL_STATE_STARTING)
- return -EAGAIN;
-
- /* In progress or already stopped */
- if (policy->pol_state != NRS_POL_STATE_STARTED)
- return 0;
-
- policy->pol_state = NRS_POL_STATE_STOPPING;
-
- /* Immediately make it invisible */
- if (nrs->nrs_policy_primary == policy) {
- nrs->nrs_policy_primary = NULL;
-
- } else {
- LASSERT(nrs->nrs_policy_fallback == policy);
- nrs->nrs_policy_fallback = NULL;
- }
-
- /* I have the only refcount */
- if (policy->pol_ref == 1)
- nrs_policy_stop0(policy);
-
- return 0;
-}
-
-/**
- * Transitions the \a nrs NRS head's primary policy to
- * ptlrpc_nrs_pol_state::NRS_POL_STATE_STOPPING and if the policy has no
- * pending usage references, to ptlrpc_nrs_pol_state::NRS_POL_STATE_STOPPED.
- *
- * \param[in] nrs the NRS head to carry out this operation on
- */
-static void nrs_policy_stop_primary(struct ptlrpc_nrs *nrs)
-{
- struct ptlrpc_nrs_policy *tmp = nrs->nrs_policy_primary;
-
- if (tmp == NULL)
- return;
-
- nrs->nrs_policy_primary = NULL;
-
- LASSERT(tmp->pol_state == NRS_POL_STATE_STARTED);
- tmp->pol_state = NRS_POL_STATE_STOPPING;
-
- if (tmp->pol_ref == 0)
- nrs_policy_stop0(tmp);
-}
-
-/**
- * Transitions a policy across the ptlrpc_nrs_pol_state range of values, in
- * response to an lprocfs command to start a policy.
- *
- * If a primary policy different to the current one is specified, this function
- * will transition the new policy to the
- * ptlrpc_nrs_pol_state::NRS_POL_STATE_STARTING and then to
- * ptlrpc_nrs_pol_state::NRS_POL_STATE_STARTED, and will then transition
- * the old primary policy (if there is one) to
- * ptlrpc_nrs_pol_state::NRS_POL_STATE_STOPPING, and if there are no outstanding
- * references on the policy to ptlrpc_nrs_pol_stae::NRS_POL_STATE_STOPPED.
- *
- * If the fallback policy is specified, this is taken to indicate an instruction
- * to stop the current primary policy, without substituting it with another
- * primary policy, so the primary policy (if any) is transitioned to
- * ptlrpc_nrs_pol_state::NRS_POL_STATE_STOPPING, and if there are no outstanding
- * references on the policy to ptlrpc_nrs_pol_stae::NRS_POL_STATE_STOPPED. In
- * this case, the fallback policy is only left active in the NRS head.
- */
-static int nrs_policy_start_locked(struct ptlrpc_nrs_policy *policy)
-{
- struct ptlrpc_nrs *nrs = policy->pol_nrs;
- int rc = 0;
-
- /**
- * Don't allow multiple starting which is too complex, and has no real
- * benefit.
- */
- if (nrs->nrs_policy_starting)
- return -EAGAIN;
-
- LASSERT(policy->pol_state != NRS_POL_STATE_STARTING);
-
- if (policy->pol_state == NRS_POL_STATE_STOPPING)
- return -EAGAIN;
-
- if (policy->pol_flags & PTLRPC_NRS_FL_FALLBACK) {
- /**
- * This is for cases in which the user sets the policy to the
- * fallback policy (currently fifo for all services); i.e. the
- * user is resetting the policy to the default; so we stop the
- * primary policy, if any.
- */
- if (policy == nrs->nrs_policy_fallback) {
- nrs_policy_stop_primary(nrs);
- return 0;
- }
-
- /**
- * If we reach here, we must be setting up the fallback policy
- * at service startup time, and only a single policy with the
- * nrs_policy_flags::PTLRPC_NRS_FL_FALLBACK flag set can
- * register with NRS core.
- */
- LASSERT(nrs->nrs_policy_fallback == NULL);
- } else {
- /**
- * Shouldn't start primary policy if w/o fallback policy.
- */
- if (nrs->nrs_policy_fallback == NULL)
- return -EPERM;
-
- if (policy->pol_state == NRS_POL_STATE_STARTED)
- return 0;
- }
-
- /**
- * Increase the module usage count for policies registering from other
- * modules.
- */
- if (atomic_inc_return(&policy->pol_desc->pd_refs) == 1 &&
- !try_module_get(policy->pol_desc->pd_owner)) {
- atomic_dec(&policy->pol_desc->pd_refs);
- CERROR("NRS: cannot get module for policy %s; is it alive?\n",
- policy->pol_desc->pd_name);
- return -ENODEV;
- }
-
- /**
- * Serialize policy starting across the NRS head
- */
- nrs->nrs_policy_starting = 1;
-
- policy->pol_state = NRS_POL_STATE_STARTING;
-
- if (policy->pol_desc->pd_ops->op_policy_start) {
- spin_unlock(&nrs->nrs_lock);
-
- rc = policy->pol_desc->pd_ops->op_policy_start(policy);
-
- spin_lock(&nrs->nrs_lock);
- if (rc != 0) {
- if (atomic_dec_and_test(&policy->pol_desc->pd_refs))
- module_put(policy->pol_desc->pd_owner);
-
- policy->pol_state = NRS_POL_STATE_STOPPED;
- goto out;
- }
- }
-
- policy->pol_state = NRS_POL_STATE_STARTED;
-
- if (policy->pol_flags & PTLRPC_NRS_FL_FALLBACK) {
- /**
- * This path is only used at PTLRPC service setup time.
- */
- nrs->nrs_policy_fallback = policy;
- } else {
- /*
- * Try to stop the current primary policy if there is one.
- */
- nrs_policy_stop_primary(nrs);
-
- /**
- * And set the newly-started policy as the primary one.
- */
- nrs->nrs_policy_primary = policy;
- }
-
-out:
- nrs->nrs_policy_starting = 0;
-
- return rc;
-}
-
-/**
- * Increases the policy's usage reference count.
- */
-static inline void nrs_policy_get_locked(struct ptlrpc_nrs_policy *policy)
-{
- policy->pol_ref++;
-}
-
-/**
- * Decreases the policy's usage reference count, and stops the policy in case it
- * was already stopping and have no more outstanding usage references (which
- * indicates it has no more queued or started requests, and can be safely
- * stopped).
- */
-static void nrs_policy_put_locked(struct ptlrpc_nrs_policy *policy)
-{
- LASSERT(policy->pol_ref > 0);
-
- policy->pol_ref--;
- if (unlikely(policy->pol_ref == 0 &&
- policy->pol_state == NRS_POL_STATE_STOPPING))
- nrs_policy_stop0(policy);
-}
-
-static void nrs_policy_put(struct ptlrpc_nrs_policy *policy)
-{
- spin_lock(&policy->pol_nrs->nrs_lock);
- nrs_policy_put_locked(policy);
- spin_unlock(&policy->pol_nrs->nrs_lock);
-}
-
-/**
- * Find and return a policy by name.
- */
-static struct ptlrpc_nrs_policy *nrs_policy_find_locked(struct ptlrpc_nrs *nrs,
- char *name)
-{
- struct ptlrpc_nrs_policy *tmp;
-
- list_for_each_entry(tmp, &nrs->nrs_policy_list, pol_list) {
- if (strncmp(tmp->pol_desc->pd_name, name,
- NRS_POL_NAME_MAX) == 0) {
- nrs_policy_get_locked(tmp);
- return tmp;
- }
- }
- return NULL;
-}
-
-/**
- * Release references for the resource hierarchy moving upwards towards the
- * policy instance resource.
- */
-static void nrs_resource_put(struct ptlrpc_nrs_resource *res)
-{
- struct ptlrpc_nrs_policy *policy = res->res_policy;
-
- if (policy->pol_desc->pd_ops->op_res_put != NULL) {
- struct ptlrpc_nrs_resource *parent;
-
- for (; res != NULL; res = parent) {
- parent = res->res_parent;
- policy->pol_desc->pd_ops->op_res_put(policy, res);
- }
- }
-}
-
-/**
- * Obtains references for each resource in the resource hierarchy for request
- * \a nrq if it is to be handled by \a policy.
- *
- * \param[in] policy the policy
- * \param[in] nrq the request
- * \param[in] moving_req denotes whether this is a call to the function by
- * ldlm_lock_reorder_req(), in order to move \a nrq to
- * the high-priority NRS head; we should not sleep when
- * set.
- *
- * \retval NULL resource hierarchy references not obtained
- * \retval valid-pointer the bottom level of the resource hierarchy
- *
- * \see ptlrpc_nrs_pol_ops::op_res_get()
- */
-static
-struct ptlrpc_nrs_resource *nrs_resource_get(struct ptlrpc_nrs_policy *policy,
- struct ptlrpc_nrs_request *nrq,
- bool moving_req)
-{
- /**
- * Set to NULL to traverse the resource hierarchy from the top.
- */
- struct ptlrpc_nrs_resource *res = NULL;
- struct ptlrpc_nrs_resource *tmp = NULL;
- int rc;
-
- while (1) {
- rc = policy->pol_desc->pd_ops->op_res_get(policy, nrq, res,
- &tmp, moving_req);
- if (rc < 0) {
- if (res != NULL)
- nrs_resource_put(res);
- return NULL;
- }
-
- LASSERT(tmp != NULL);
- tmp->res_parent = res;
- tmp->res_policy = policy;
- res = tmp;
- tmp = NULL;
- /**
- * Return once we have obtained a reference to the bottom level
- * of the resource hierarchy.
- */
- if (rc > 0)
- return res;
- }
-}
-
-/**
- * Obtains resources for the resource hierarchies and policy references for
- * the fallback and current primary policy (if any), that will later be used
- * to handle request \a nrq.
- *
- * \param[in] nrs the NRS head instance that will be handling request \a nrq.
- * \param[in] nrq the request that is being handled.
- * \param[out] resp the array where references to the resource hierarchy are
- * stored.
- * \param[in] moving_req is set when obtaining resources while moving a
- * request from a policy on the regular NRS head to a
- * policy on the HP NRS head (via
- * ldlm_lock_reorder_req()). It signifies that
- * allocations to get resources should be atomic; for
- * a full explanation, see comment in
- * ptlrpc_nrs_pol_ops::op_res_get().
- */
-static void nrs_resource_get_safe(struct ptlrpc_nrs *nrs,
- struct ptlrpc_nrs_request *nrq,
- struct ptlrpc_nrs_resource **resp,
- bool moving_req)
-{
- struct ptlrpc_nrs_policy *primary = NULL;
- struct ptlrpc_nrs_policy *fallback = NULL;
-
- memset(resp, 0, sizeof(resp[0]) * NRS_RES_MAX);
-
- /**
- * Obtain policy references.
- */
- spin_lock(&nrs->nrs_lock);
-
- fallback = nrs->nrs_policy_fallback;
- nrs_policy_get_locked(fallback);
-
- primary = nrs->nrs_policy_primary;
- if (primary != NULL)
- nrs_policy_get_locked(primary);
-
- spin_unlock(&nrs->nrs_lock);
-
- /**
- * Obtain resource hierarchy references.
- */
- resp[NRS_RES_FALLBACK] = nrs_resource_get(fallback, nrq, moving_req);
- LASSERT(resp[NRS_RES_FALLBACK] != NULL);
-
- if (primary != NULL) {
- resp[NRS_RES_PRIMARY] = nrs_resource_get(primary, nrq,
- moving_req);
- /**
- * A primary policy may exist which may not wish to serve a
- * particular request for different reasons; release the
- * reference on the policy as it will not be used for this
- * request.
- */
- if (resp[NRS_RES_PRIMARY] == NULL)
- nrs_policy_put(primary);
- }
-}
-
-/**
- * Releases references to resource hierarchies and policies, because they are no
- * longer required; used when request handling has been completed, or the
- * request is moving to the high priority NRS head.
- *
- * \param resp the resource hierarchy that is being released
- *
- * \see ptlrpc_nrs_req_finalize()
- */
-static void nrs_resource_put_safe(struct ptlrpc_nrs_resource **resp)
-{
- struct ptlrpc_nrs_policy *pols[NRS_RES_MAX];
- struct ptlrpc_nrs *nrs = NULL;
- int i;
-
- for (i = 0; i < NRS_RES_MAX; i++) {
- if (resp[i] != NULL) {
- pols[i] = resp[i]->res_policy;
- nrs_resource_put(resp[i]);
- resp[i] = NULL;
- } else {
- pols[i] = NULL;
- }
- }
-
- for (i = 0; i < NRS_RES_MAX; i++) {
- if (pols[i] == NULL)
- continue;
-
- if (nrs == NULL) {
- nrs = pols[i]->pol_nrs;
- spin_lock(&nrs->nrs_lock);
- }
- nrs_policy_put_locked(pols[i]);
- }
-
- if (nrs != NULL)
- spin_unlock(&nrs->nrs_lock);
-}
-
-/**
- * Obtains an NRS request from \a policy for handling or examination; the
- * request should be removed in the 'handling' case.
- *
- * Calling into this function implies we already know the policy has a request
- * waiting to be handled.
- *
- * \param[in] policy the policy from which a request
- * \param[in] peek when set, signifies that we just want to examine the
- * request, and not handle it, so the request is not removed
- * from the policy.
- * \param[in] force when set, it will force a policy to return a request if it
- * has one pending
- *
- * \retval the NRS request to be handled
- */
-static inline
-struct ptlrpc_nrs_request *nrs_request_get(struct ptlrpc_nrs_policy *policy,
- bool peek, bool force)
-{
- struct ptlrpc_nrs_request *nrq;
-
- LASSERT(policy->pol_req_queued > 0);
-
- nrq = policy->pol_desc->pd_ops->op_req_get(policy, peek, force);
-
- LASSERT(ergo(nrq != NULL, nrs_request_policy(nrq) == policy));
-
- return nrq;
-}
-
-/**
- * Enqueues request \a nrq for later handling, via one one the policies for
- * which resources where earlier obtained via nrs_resource_get_safe(). The
- * function attempts to enqueue the request first on the primary policy
- * (if any), since this is the preferred choice.
- *
- * \param nrq the request being enqueued
- *
- * \see nrs_resource_get_safe()
- */
-static inline void nrs_request_enqueue(struct ptlrpc_nrs_request *nrq)
-{
- struct ptlrpc_nrs_policy *policy;
- int rc;
- int i;
-
- /**
- * Try in descending order, because the primary policy (if any) is
- * the preferred choice.
- */
- for (i = NRS_RES_MAX - 1; i >= 0; i--) {
- if (nrq->nr_res_ptrs[i] == NULL)
- continue;
-
- nrq->nr_res_idx = i;
- policy = nrq->nr_res_ptrs[i]->res_policy;
-
- rc = policy->pol_desc->pd_ops->op_req_enqueue(policy, nrq);
- if (rc == 0) {
- policy->pol_nrs->nrs_req_queued++;
- policy->pol_req_queued++;
- return;
- }
- }
- /**
- * Should never get here, as at least the primary policy's
- * ptlrpc_nrs_pol_ops::op_req_enqueue() implementation should always
- * succeed.
- */
- LBUG();
-}
-
-/**
- * Called when a request has been handled
- *
- * \param[in] nrs the request that has been handled; can be used for
- * job/resource control.
- *
- * \see ptlrpc_nrs_req_stop_nolock()
- */
-static inline void nrs_request_stop(struct ptlrpc_nrs_request *nrq)
-{
- struct ptlrpc_nrs_policy *policy = nrs_request_policy(nrq);
-
- if (policy->pol_desc->pd_ops->op_req_stop)
- policy->pol_desc->pd_ops->op_req_stop(policy, nrq);
-
- LASSERT(policy->pol_nrs->nrs_req_started > 0);
- LASSERT(policy->pol_req_started > 0);
-
- policy->pol_nrs->nrs_req_started--;
- policy->pol_req_started--;
-}
-
-/**
- * Handler for operations that can be carried out on policies.
- *
- * Handles opcodes that are common to all policy types within NRS core, and
- * passes any unknown opcodes to the policy-specific control function.
- *
- * \param[in] nrs the NRS head this policy belongs to.
- * \param[in] name the human-readable policy name; should be the same as
- * ptlrpc_nrs_pol_desc::pd_name.
- * \param[in] opc the opcode of the operation being carried out.
- * \param[in,out] arg can be used to pass information in and out between when
- * carrying an operation; usually data that is private to
- * the policy at some level, or generic policy status
- * information.
- *
- * \retval -ve error condition
- * \retval 0 operation was carried out successfully
- */
-static int nrs_policy_ctl(struct ptlrpc_nrs *nrs, char *name,
- enum ptlrpc_nrs_ctl opc, void *arg)
-{
- struct ptlrpc_nrs_policy *policy;
- int rc = 0;
-
- spin_lock(&nrs->nrs_lock);
-
- policy = nrs_policy_find_locked(nrs, name);
- if (policy == NULL) {
- rc = -ENOENT;
- goto out;
- }
-
- switch (opc) {
- /**
- * Unknown opcode, pass it down to the policy-specific control
- * function for handling.
- */
- default:
- rc = nrs_policy_ctl_locked(policy, opc, arg);
- break;
-
- /**
- * Start \e policy
- */
- case PTLRPC_NRS_CTL_START:
- rc = nrs_policy_start_locked(policy);
- break;
- }
-out:
- if (policy != NULL)
- nrs_policy_put_locked(policy);
-
- spin_unlock(&nrs->nrs_lock);
-
- return rc;
-}
-
-/**
- * Unregisters a policy by name.
- *
- * \param[in] nrs the NRS head this policy belongs to.
- * \param[in] name the human-readable policy name; should be the same as
- * ptlrpc_nrs_pol_desc::pd_name
- *
- * \retval -ve error
- * \retval 0 success
- */
-static int nrs_policy_unregister(struct ptlrpc_nrs *nrs, char *name)
-{
- struct ptlrpc_nrs_policy *policy = NULL;
-
- spin_lock(&nrs->nrs_lock);
-
- policy = nrs_policy_find_locked(nrs, name);
- if (policy == NULL) {
- spin_unlock(&nrs->nrs_lock);
-
- CERROR("Can't find NRS policy %s\n", name);
- return -ENOENT;
- }
-
- if (policy->pol_ref > 1) {
- CERROR("Policy %s is busy with %d references\n", name,
- (int)policy->pol_ref);
- nrs_policy_put_locked(policy);
-
- spin_unlock(&nrs->nrs_lock);
- return -EBUSY;
- }
-
- LASSERT(policy->pol_req_queued == 0);
- LASSERT(policy->pol_req_started == 0);
-
- if (policy->pol_state != NRS_POL_STATE_STOPPED) {
- nrs_policy_stop_locked(policy);
- LASSERT(policy->pol_state == NRS_POL_STATE_STOPPED);
- }
-
- list_del(&policy->pol_list);
- nrs->nrs_num_pols--;
-
- nrs_policy_put_locked(policy);
-
- spin_unlock(&nrs->nrs_lock);
-
- nrs_policy_fini(policy);
-
- LASSERT(policy->pol_private == NULL);
- kfree(policy);
-
- return 0;
-}
-
-/**
- * Register a policy from \policy descriptor \a desc with NRS head \a nrs.
- *
- * \param[in] nrs the NRS head on which the policy will be registered.
- * \param[in] desc the policy descriptor from which the information will be
- * obtained to register the policy.
- *
- * \retval -ve error
- * \retval 0 success
- */
-static int nrs_policy_register(struct ptlrpc_nrs *nrs,
- struct ptlrpc_nrs_pol_desc *desc)
-{
- struct ptlrpc_nrs_policy *policy;
- struct ptlrpc_nrs_policy *tmp;
- struct ptlrpc_service_part *svcpt = nrs->nrs_svcpt;
- int rc;
-
- LASSERT(svcpt != NULL);
- LASSERT(desc->pd_ops != NULL);
- LASSERT(desc->pd_ops->op_res_get != NULL);
- LASSERT(desc->pd_ops->op_req_get != NULL);
- LASSERT(desc->pd_ops->op_req_enqueue != NULL);
- LASSERT(desc->pd_ops->op_req_dequeue != NULL);
- LASSERT(desc->pd_compat != NULL);
-
- policy = kzalloc_node(sizeof(*policy), GFP_NOFS,
- cfs_cpt_spread_node(svcpt->scp_service->srv_cptable,
- svcpt->scp_cpt));
- if (policy == NULL)
- return -ENOMEM;
-
- policy->pol_nrs = nrs;
- policy->pol_desc = desc;
- policy->pol_state = NRS_POL_STATE_STOPPED;
- policy->pol_flags = desc->pd_flags;
-
- INIT_LIST_HEAD(&policy->pol_list);
- INIT_LIST_HEAD(&policy->pol_list_queued);
-
- rc = nrs_policy_init(policy);
- if (rc != 0) {
- kfree(policy);
- return rc;
- }
-
- spin_lock(&nrs->nrs_lock);
-
- tmp = nrs_policy_find_locked(nrs, policy->pol_desc->pd_name);
- if (tmp != NULL) {
- CERROR("NRS policy %s has been registered, can't register it for %s\n",
- policy->pol_desc->pd_name,
- svcpt->scp_service->srv_name);
- nrs_policy_put_locked(tmp);
-
- spin_unlock(&nrs->nrs_lock);
- nrs_policy_fini(policy);
- kfree(policy);
-
- return -EEXIST;
- }
-
- list_add_tail(&policy->pol_list, &nrs->nrs_policy_list);
- nrs->nrs_num_pols++;
-
- if (policy->pol_flags & PTLRPC_NRS_FL_REG_START)
- rc = nrs_policy_start_locked(policy);
-
- spin_unlock(&nrs->nrs_lock);
-
- if (rc != 0)
- (void) nrs_policy_unregister(nrs, policy->pol_desc->pd_name);
-
- return rc;
-}
-
-/**
- * Enqueue request \a req using one of the policies its resources are referring
- * to.
- *
- * \param[in] req the request to enqueue.
- */
-static void ptlrpc_nrs_req_add_nolock(struct ptlrpc_request *req)
-{
- struct ptlrpc_nrs_policy *policy;
-
- LASSERT(req->rq_nrq.nr_initialized);
- LASSERT(!req->rq_nrq.nr_enqueued);
-
- nrs_request_enqueue(&req->rq_nrq);
- req->rq_nrq.nr_enqueued = 1;
-
- policy = nrs_request_policy(&req->rq_nrq);
- /**
- * Add the policy to the NRS head's list of policies with enqueued
- * requests, if it has not been added there.
- */
- if (unlikely(list_empty(&policy->pol_list_queued)))
- list_add_tail(&policy->pol_list_queued,
- &policy->pol_nrs->nrs_policy_queued);
-}
-
-/**
- * Enqueue a request on the high priority NRS head.
- *
- * \param req the request to enqueue.
- */
-static void ptlrpc_nrs_hpreq_add_nolock(struct ptlrpc_request *req)
-{
- int opc = lustre_msg_get_opc(req->rq_reqmsg);
-
- spin_lock(&req->rq_lock);
- req->rq_hp = 1;
- ptlrpc_nrs_req_add_nolock(req);
- if (opc != OBD_PING)
- DEBUG_REQ(D_NET, req, "high priority req");
- spin_unlock(&req->rq_lock);
-}
-
-/**
- * Returns a boolean predicate indicating whether the policy described by
- * \a desc is adequate for use with service \a svc.
- *
- * \param[in] svc the service
- * \param[in] desc the policy descriptor
- *
- * \retval false the policy is not compatible with the service
- * \retval true the policy is compatible with the service
- */
-static inline bool nrs_policy_compatible(const struct ptlrpc_service *svc,
- const struct ptlrpc_nrs_pol_desc *desc)
-{
- return desc->pd_compat(svc, desc);
-}
-
-/**
- * Registers all compatible policies in nrs_core.nrs_policies, for NRS head
- * \a nrs.
- *
- * \param[in] nrs the NRS head
- *
- * \retval -ve error
- * \retval 0 success
- *
- * \pre mutex_is_locked(&nrs_core.nrs_mutex)
- *
- * \see ptlrpc_service_nrs_setup()
- */
-static int nrs_register_policies_locked(struct ptlrpc_nrs *nrs)
-{
- struct ptlrpc_nrs_pol_desc *desc;
- /* for convenience */
- struct ptlrpc_service_part *svcpt = nrs->nrs_svcpt;
- struct ptlrpc_service *svc = svcpt->scp_service;
- int rc = -EINVAL;
-
- LASSERT(mutex_is_locked(&nrs_core.nrs_mutex));
-
- list_for_each_entry(desc, &nrs_core.nrs_policies, pd_list) {
- if (nrs_policy_compatible(svc, desc)) {
- rc = nrs_policy_register(nrs, desc);
- if (rc != 0) {
- CERROR("Failed to register NRS policy %s for partition %d of service %s: %d\n",
- desc->pd_name, svcpt->scp_cpt,
- svc->srv_name, rc);
- /**
- * Fail registration if any of the policies'
- * registration fails.
- */
- break;
- }
- }
- }
-
- return rc;
-}
-
-/**
- * Initializes NRS head \a nrs of service partition \a svcpt, and registers all
- * compatible policies in NRS core, with the NRS head.
- *
- * \param[in] nrs the NRS head
- * \param[in] svcpt the PTLRPC service partition to setup
- *
- * \retval -ve error
- * \retval 0 success
- *
- * \pre mutex_is_locked(&nrs_core.nrs_mutex)
- */
-static int nrs_svcpt_setup_locked0(struct ptlrpc_nrs *nrs,
- struct ptlrpc_service_part *svcpt)
-{
- enum ptlrpc_nrs_queue_type queue;
-
- LASSERT(mutex_is_locked(&nrs_core.nrs_mutex));
-
- if (nrs == &svcpt->scp_nrs_reg)
- queue = PTLRPC_NRS_QUEUE_REG;
- else if (nrs == svcpt->scp_nrs_hp)
- queue = PTLRPC_NRS_QUEUE_HP;
- else
- LBUG();
-
- nrs->nrs_svcpt = svcpt;
- nrs->nrs_queue_type = queue;
- spin_lock_init(&nrs->nrs_lock);
- INIT_LIST_HEAD(&nrs->nrs_policy_list);
- INIT_LIST_HEAD(&nrs->nrs_policy_queued);
-
- return nrs_register_policies_locked(nrs);
-}
-
-/**
- * Allocates a regular and optionally a high-priority NRS head (if the service
- * handles high-priority RPCs), and then registers all available compatible
- * policies on those NRS heads.
- *
- * \param[in,out] svcpt the PTLRPC service partition to setup
- *
- * \pre mutex_is_locked(&nrs_core.nrs_mutex)
- */
-static int nrs_svcpt_setup_locked(struct ptlrpc_service_part *svcpt)
-{
- struct ptlrpc_nrs *nrs;
- int rc;
-
- LASSERT(mutex_is_locked(&nrs_core.nrs_mutex));
-
- /**
- * Initialize the regular NRS head.
- */
- nrs = nrs_svcpt2nrs(svcpt, false);
- rc = nrs_svcpt_setup_locked0(nrs, svcpt);
- if (rc < 0)
- goto out;
-
- /**
- * Optionally allocate a high-priority NRS head.
- */
- if (svcpt->scp_service->srv_ops.so_hpreq_handler == NULL)
- goto out;
-
- svcpt->scp_nrs_hp =
- kzalloc_node(sizeof(*svcpt->scp_nrs_hp), GFP_NOFS,
- cfs_cpt_spread_node(svcpt->scp_service->srv_cptable,
- svcpt->scp_cpt));
- if (svcpt->scp_nrs_hp == NULL) {
- rc = -ENOMEM;
- goto out;
- }
-
- nrs = nrs_svcpt2nrs(svcpt, true);
- rc = nrs_svcpt_setup_locked0(nrs, svcpt);
-
-out:
- return rc;
-}
-
-/**
- * Unregisters all policies on all available NRS heads in a service partition;
- * called at PTLRPC service unregistration time.
- *
- * \param[in] svcpt the PTLRPC service partition
- *
- * \pre mutex_is_locked(&nrs_core.nrs_mutex)
- */
-static void nrs_svcpt_cleanup_locked(struct ptlrpc_service_part *svcpt)
-{
- struct ptlrpc_nrs *nrs;
- struct ptlrpc_nrs_policy *policy;
- struct ptlrpc_nrs_policy *tmp;
- int rc;
- bool hp = false;
-
- LASSERT(mutex_is_locked(&nrs_core.nrs_mutex));
-
-again:
- nrs = nrs_svcpt2nrs(svcpt, hp);
- nrs->nrs_stopping = 1;
-
- list_for_each_entry_safe(policy, tmp, &nrs->nrs_policy_list,
- pol_list) {
- rc = nrs_policy_unregister(nrs, policy->pol_desc->pd_name);
- LASSERT(rc == 0);
- }
-
- /**
- * If the service partition has an HP NRS head, clean that up as well.
- */
- if (!hp && nrs_svcpt_has_hp(svcpt)) {
- hp = true;
- goto again;
- }
-
- if (hp)
- kfree(nrs);
-}
-
-/**
- * Returns the descriptor for a policy as identified by by \a name.
- *
- * \param[in] name the policy name
- *
- * \retval the policy descriptor
- * \retval NULL
- */
-static struct ptlrpc_nrs_pol_desc *nrs_policy_find_desc_locked(const char *name)
-{
- struct ptlrpc_nrs_pol_desc *tmp;
-
- list_for_each_entry(tmp, &nrs_core.nrs_policies, pd_list) {
- if (strncmp(tmp->pd_name, name, NRS_POL_NAME_MAX) == 0)
- return tmp;
- }
- return NULL;
-}
-
-/**
- * Removes the policy from all supported NRS heads of all partitions of all
- * PTLRPC services.
- *
- * \param[in] desc the policy descriptor to unregister
- *
- * \retval -ve error
- * \retval 0 successfully unregistered policy on all supported NRS heads
- *
- * \pre mutex_is_locked(&nrs_core.nrs_mutex)
- * \pre mutex_is_locked(&ptlrpc_all_services_mutex)
- */
-static int nrs_policy_unregister_locked(struct ptlrpc_nrs_pol_desc *desc)
-{
- struct ptlrpc_nrs *nrs;
- struct ptlrpc_service *svc;
- struct ptlrpc_service_part *svcpt;
- int i;
- int rc = 0;
-
- LASSERT(mutex_is_locked(&nrs_core.nrs_mutex));
- LASSERT(mutex_is_locked(&ptlrpc_all_services_mutex));
-
- list_for_each_entry(svc, &ptlrpc_all_services, srv_list) {
-
- if (!nrs_policy_compatible(svc, desc) ||
- unlikely(svc->srv_is_stopping))
- continue;
-
- ptlrpc_service_for_each_part(svcpt, i, svc) {
- bool hp = false;
-
-again:
- nrs = nrs_svcpt2nrs(svcpt, hp);
- rc = nrs_policy_unregister(nrs, desc->pd_name);
- /**
- * Ignore -ENOENT as the policy may not have registered
- * successfully on all service partitions.
- */
- if (rc == -ENOENT) {
- rc = 0;
- } else if (rc != 0) {
- CERROR("Failed to unregister NRS policy %s for partition %d of service %s: %d\n",
- desc->pd_name, svcpt->scp_cpt,
- svcpt->scp_service->srv_name, rc);
- return rc;
- }
-
- if (!hp && nrs_svc_has_hp(svc)) {
- hp = true;
- goto again;
- }
- }
-
- if (desc->pd_ops->op_lprocfs_fini != NULL)
- desc->pd_ops->op_lprocfs_fini(svc);
- }
-
- return rc;
-}
-
-/**
- * Registers a new policy with NRS core.
- *
- * The function will only succeed if policy registration with all compatible
- * service partitions (if any) is successful.
- *
- * N.B. This function should be called either at ptlrpc module initialization
- * time when registering a policy that ships with NRS core, or in a
- * module's init() function for policies registering from other modules.
- *
- * \param[in] conf configuration information for the new policy to register
- *
- * \retval -ve error
- * \retval 0 success
- */
-int ptlrpc_nrs_policy_register(struct ptlrpc_nrs_pol_conf *conf)
-{
- struct ptlrpc_service *svc;
- struct ptlrpc_nrs_pol_desc *desc;
- int rc = 0;
-
- LASSERT(conf != NULL);
- LASSERT(conf->nc_ops != NULL);
- LASSERT(conf->nc_compat != NULL);
- LASSERT(ergo(conf->nc_compat == nrs_policy_compat_one,
- conf->nc_compat_svc_name != NULL));
- LASSERT(ergo((conf->nc_flags & PTLRPC_NRS_FL_REG_EXTERN) != 0,
- conf->nc_owner != NULL));
-
- conf->nc_name[NRS_POL_NAME_MAX - 1] = '\0';
-
- /**
- * External policies are not allowed to start immediately upon
- * registration, as there is a relatively higher chance that their
- * registration might fail. In such a case, some policy instances may
- * already have requests queued wen unregistration needs to happen as
- * part o cleanup; since there is currently no way to drain requests
- * from a policy unless the service is unregistering, we just disallow
- * this.
- */
- if ((conf->nc_flags & PTLRPC_NRS_FL_REG_EXTERN) &&
- (conf->nc_flags & (PTLRPC_NRS_FL_FALLBACK |
- PTLRPC_NRS_FL_REG_START))) {
- CERROR("NRS: failing to register policy %s. Please check policy flags; external policies cannot act as fallback policies, or be started immediately upon registration without interaction with lprocfs\n",
- conf->nc_name);
- return -EINVAL;
- }
-
- mutex_lock(&nrs_core.nrs_mutex);
-
- if (nrs_policy_find_desc_locked(conf->nc_name) != NULL) {
- CERROR("NRS: failing to register policy %s which has already been registered with NRS core!\n",
- conf->nc_name);
- rc = -EEXIST;
- goto fail;
- }
-
- desc = kzalloc(sizeof(*desc), GFP_NOFS);
- if (!desc) {
- rc = -ENOMEM;
- goto fail;
- }
-
- strncpy(desc->pd_name, conf->nc_name, NRS_POL_NAME_MAX);
- desc->pd_ops = conf->nc_ops;
- desc->pd_compat = conf->nc_compat;
- desc->pd_compat_svc_name = conf->nc_compat_svc_name;
- if ((conf->nc_flags & PTLRPC_NRS_FL_REG_EXTERN) != 0)
- desc->pd_owner = conf->nc_owner;
- desc->pd_flags = conf->nc_flags;
- atomic_set(&desc->pd_refs, 0);
-
- /**
- * For policies that are held in the same module as NRS (currently
- * ptlrpc), do not register the policy with all compatible services,
- * as the services will not have started at this point, since we are
- * calling from ptlrpc module initialization code. In such cases each
- * service will register all compatible policies later, via
- * ptlrpc_service_nrs_setup().
- */
- if ((conf->nc_flags & PTLRPC_NRS_FL_REG_EXTERN) == 0)
- goto internal;
-
- /**
- * Register the new policy on all compatible services
- */
- mutex_lock(&ptlrpc_all_services_mutex);
-
- list_for_each_entry(svc, &ptlrpc_all_services, srv_list) {
- struct ptlrpc_service_part *svcpt;
- int i;
- int rc2;
-
- if (!nrs_policy_compatible(svc, desc) ||
- unlikely(svc->srv_is_stopping))
- continue;
-
- ptlrpc_service_for_each_part(svcpt, i, svc) {
- struct ptlrpc_nrs *nrs;
- bool hp = false;
-again:
- nrs = nrs_svcpt2nrs(svcpt, hp);
- rc = nrs_policy_register(nrs, desc);
- if (rc != 0) {
- CERROR("Failed to register NRS policy %s for partition %d of service %s: %d\n",
- desc->pd_name, svcpt->scp_cpt,
- svcpt->scp_service->srv_name, rc);
-
- rc2 = nrs_policy_unregister_locked(desc);
- /**
- * Should not fail at this point
- */
- LASSERT(rc2 == 0);
- mutex_unlock(&ptlrpc_all_services_mutex);
- kfree(desc);
- goto fail;
- }
-
- if (!hp && nrs_svc_has_hp(svc)) {
- hp = true;
- goto again;
- }
- }
-
- /**
- * No need to take a reference to other modules here, as we
- * will be calling from the module's init() function.
- */
- if (desc->pd_ops->op_lprocfs_init != NULL) {
- rc = desc->pd_ops->op_lprocfs_init(svc);
- if (rc != 0) {
- rc2 = nrs_policy_unregister_locked(desc);
- /**
- * Should not fail at this point
- */
- LASSERT(rc2 == 0);
- mutex_unlock(&ptlrpc_all_services_mutex);
- kfree(desc);
- goto fail;
- }
- }
- }
-
- mutex_unlock(&ptlrpc_all_services_mutex);
-internal:
- list_add_tail(&desc->pd_list, &nrs_core.nrs_policies);
-fail:
- mutex_unlock(&nrs_core.nrs_mutex);
-
- return rc;
-}
-EXPORT_SYMBOL(ptlrpc_nrs_policy_register);
-
-/**
- * Setup NRS heads on all service partitions of service \a svc, and register
- * all compatible policies on those NRS heads.
- *
- * To be called from within ptl
- * \param[in] svc the service to setup
- *
- * \retval -ve error, the calling logic should eventually call
- * ptlrpc_service_nrs_cleanup() to undo any work performed
- * by this function.
- *
- * \see ptlrpc_register_service()
- * \see ptlrpc_service_nrs_cleanup()
- */
-int ptlrpc_service_nrs_setup(struct ptlrpc_service *svc)
-{
- struct ptlrpc_service_part *svcpt;
- const struct ptlrpc_nrs_pol_desc *desc;
- int i;
- int rc = 0;
-
- mutex_lock(&nrs_core.nrs_mutex);
-
- /**
- * Initialize NRS heads on all service CPTs.
- */
- ptlrpc_service_for_each_part(svcpt, i, svc) {
- rc = nrs_svcpt_setup_locked(svcpt);
- if (rc != 0)
- goto failed;
- }
-
- /**
- * Set up lprocfs interfaces for all supported policies for the
- * service.
- */
- list_for_each_entry(desc, &nrs_core.nrs_policies, pd_list) {
- if (!nrs_policy_compatible(svc, desc))
- continue;
-
- if (desc->pd_ops->op_lprocfs_init != NULL) {
- rc = desc->pd_ops->op_lprocfs_init(svc);
- if (rc != 0)
- goto failed;
- }
- }
-
-failed:
-
- mutex_unlock(&nrs_core.nrs_mutex);
-
- return rc;
-}
-
-/**
- * Unregisters all policies on all service partitions of service \a svc.
- *
- * \param[in] svc the PTLRPC service to unregister
- */
-void ptlrpc_service_nrs_cleanup(struct ptlrpc_service *svc)
-{
- struct ptlrpc_service_part *svcpt;
- const struct ptlrpc_nrs_pol_desc *desc;
- int i;
-
- mutex_lock(&nrs_core.nrs_mutex);
-
- /**
- * Clean up NRS heads on all service partitions
- */
- ptlrpc_service_for_each_part(svcpt, i, svc)
- nrs_svcpt_cleanup_locked(svcpt);
-
- /**
- * Clean up lprocfs interfaces for all supported policies for the
- * service.
- */
- list_for_each_entry(desc, &nrs_core.nrs_policies, pd_list) {
- if (!nrs_policy_compatible(svc, desc))
- continue;
-
- if (desc->pd_ops->op_lprocfs_fini != NULL)
- desc->pd_ops->op_lprocfs_fini(svc);
- }
-
- mutex_unlock(&nrs_core.nrs_mutex);
-}
-
-/**
- * Obtains NRS head resources for request \a req.
- *
- * These could be either on the regular or HP NRS head of \a svcpt; resources
- * taken on the regular head can later be swapped for HP head resources by
- * ldlm_lock_reorder_req().
- *
- * \param[in] svcpt the service partition
- * \param[in] req the request
- * \param[in] hp which NRS head of \a svcpt to use
- */
-void ptlrpc_nrs_req_initialize(struct ptlrpc_service_part *svcpt,
- struct ptlrpc_request *req, bool hp)
-{
- struct ptlrpc_nrs *nrs = nrs_svcpt2nrs(svcpt, hp);
-
- memset(&req->rq_nrq, 0, sizeof(req->rq_nrq));
- nrs_resource_get_safe(nrs, &req->rq_nrq, req->rq_nrq.nr_res_ptrs,
- false);
-
- /**
- * It is fine to access \e nr_initialized without locking as there is
- * no contention at this early stage.
- */
- req->rq_nrq.nr_initialized = 1;
-}
-
-/**
- * Releases resources for a request; is called after the request has been
- * handled.
- *
- * \param[in] req the request
- *
- * \see ptlrpc_server_finish_request()
- */
-void ptlrpc_nrs_req_finalize(struct ptlrpc_request *req)
-{
- if (req->rq_nrq.nr_initialized) {
- nrs_resource_put_safe(req->rq_nrq.nr_res_ptrs);
- /* no protection on bit nr_initialized because no
- * contention at this late stage */
- req->rq_nrq.nr_finalized = 1;
- }
-}
-
-void ptlrpc_nrs_req_stop_nolock(struct ptlrpc_request *req)
-{
- if (req->rq_nrq.nr_started)
- nrs_request_stop(&req->rq_nrq);
-}
-
-/**
- * Enqueues request \a req on either the regular or high-priority NRS head
- * of service partition \a svcpt.
- *
- * \param[in] svcpt the service partition
- * \param[in] req the request to be enqueued
- * \param[in] hp whether to enqueue the request on the regular or
- * high-priority NRS head.
- */
-void ptlrpc_nrs_req_add(struct ptlrpc_service_part *svcpt,
- struct ptlrpc_request *req, bool hp)
-{
- spin_lock(&svcpt->scp_req_lock);
-
- if (hp)
- ptlrpc_nrs_hpreq_add_nolock(req);
- else
- ptlrpc_nrs_req_add_nolock(req);
-
- spin_unlock(&svcpt->scp_req_lock);
-}
-
-static void nrs_request_removed(struct ptlrpc_nrs_policy *policy)
-{
- LASSERT(policy->pol_nrs->nrs_req_queued > 0);
- LASSERT(policy->pol_req_queued > 0);
-
- policy->pol_nrs->nrs_req_queued--;
- policy->pol_req_queued--;
-
- /**
- * If the policy has no more requests queued, remove it from
- * ptlrpc_nrs::nrs_policy_queued.
- */
- if (unlikely(policy->pol_req_queued == 0)) {
- list_del_init(&policy->pol_list_queued);
-
- /**
- * If there are other policies with queued requests, move the
- * current policy to the end so that we can round robin over
- * all policies and drain the requests.
- */
- } else if (policy->pol_req_queued != policy->pol_nrs->nrs_req_queued) {
- LASSERT(policy->pol_req_queued <
- policy->pol_nrs->nrs_req_queued);
-
- list_move_tail(&policy->pol_list_queued,
- &policy->pol_nrs->nrs_policy_queued);
- }
-}
-
-/**
- * Obtains a request for handling from an NRS head of service partition
- * \a svcpt.
- *
- * \param[in] svcpt the service partition
- * \param[in] hp whether to obtain a request from the regular or
- * high-priority NRS head.
- * \param[in] peek when set, signifies that we just want to examine the
- * request, and not handle it, so the request is not removed
- * from the policy.
- * \param[in] force when set, it will force a policy to return a request if it
- * has one pending
- *
- * \retval the request to be handled
- * \retval NULL the head has no requests to serve
- */
-struct ptlrpc_request *
-ptlrpc_nrs_req_get_nolock0(struct ptlrpc_service_part *svcpt, bool hp,
- bool peek, bool force)
-{
- struct ptlrpc_nrs *nrs = nrs_svcpt2nrs(svcpt, hp);
- struct ptlrpc_nrs_policy *policy;
- struct ptlrpc_nrs_request *nrq;
-
- /**
- * Always try to drain requests from all NRS polices even if they are
- * inactive, because the user can change policy status at runtime.
- */
- list_for_each_entry(policy, &nrs->nrs_policy_queued,
- pol_list_queued) {
- nrq = nrs_request_get(policy, peek, force);
- if (nrq != NULL) {
- if (likely(!peek)) {
- nrq->nr_started = 1;
-
- policy->pol_req_started++;
- policy->pol_nrs->nrs_req_started++;
-
- nrs_request_removed(policy);
- }
-
- return container_of(nrq, struct ptlrpc_request, rq_nrq);
- }
- }
-
- return NULL;
-}
-
-/**
- * Returns whether there are any requests currently enqueued on any of the
- * policies of service partition's \a svcpt NRS head specified by \a hp. Should
- * be called while holding ptlrpc_service_part::scp_req_lock to get a reliable
- * result.
- *
- * \param[in] svcpt the service partition to enquire.
- * \param[in] hp whether the regular or high-priority NRS head is to be
- * enquired.
- *
- * \retval false the indicated NRS head has no enqueued requests.
- * \retval true the indicated NRS head has some enqueued requests.
- */
-bool ptlrpc_nrs_req_pending_nolock(struct ptlrpc_service_part *svcpt, bool hp)
-{
- struct ptlrpc_nrs *nrs = nrs_svcpt2nrs(svcpt, hp);
-
- return nrs->nrs_req_queued > 0;
-};
-
-/**
- * Carries out a control operation \a opc on the policy identified by the
- * human-readable \a name, on either all partitions, or only on the first
- * partition of service \a svc.
- *
- * \param[in] svc the service the policy belongs to.
- * \param[in] queue whether to carry out the command on the policy which
- * belongs to the regular, high-priority, or both NRS
- * heads of service partitions of \a svc.
- * \param[in] name the policy to act upon, by human-readable name
- * \param[in] opc the opcode of the operation to carry out
- * \param[in] single when set, the operation will only be carried out on the
- * NRS heads of the first service partition of \a svc.
- * This is useful for some policies which e.g. share
- * identical values on the same parameters of different
- * service partitions; when reading these parameters via
- * lprocfs, these policies may just want to obtain and
- * print out the values from the first service partition.
- * Storing these values centrally elsewhere then could be
- * another solution for this.
- * \param[in,out] arg can be used as a generic in/out buffer between control
- * operations and the user environment.
- *
- *\retval -ve error condition
- *\retval 0 operation was carried out successfully
- */
-int ptlrpc_nrs_policy_control(const struct ptlrpc_service *svc,
- enum ptlrpc_nrs_queue_type queue, char *name,
- enum ptlrpc_nrs_ctl opc, bool single, void *arg)
-{
- struct ptlrpc_service_part *svcpt;
- int i;
- int rc = 0;
-
- LASSERT(opc != PTLRPC_NRS_CTL_INVALID);
-
- if ((queue & PTLRPC_NRS_QUEUE_BOTH) == 0)
- return -EINVAL;
-
- ptlrpc_service_for_each_part(svcpt, i, svc) {
- if ((queue & PTLRPC_NRS_QUEUE_REG) != 0) {
- rc = nrs_policy_ctl(nrs_svcpt2nrs(svcpt, false), name,
- opc, arg);
- if (rc != 0 || (queue == PTLRPC_NRS_QUEUE_REG &&
- single))
- goto out;
- }
-
- if ((queue & PTLRPC_NRS_QUEUE_HP) != 0) {
- /**
- * XXX: We could optionally check for
- * nrs_svc_has_hp(svc) here, and return an error if it
- * is false. Right now we rely on the policies' lprocfs
- * handlers that call the present function to make this
- * check; if they fail to do so, they might hit the
- * assertion inside nrs_svcpt2nrs() below.
- */
- rc = nrs_policy_ctl(nrs_svcpt2nrs(svcpt, true), name,
- opc, arg);
- if (rc != 0 || single)
- goto out;
- }
- }
-out:
- return rc;
-}
-
-
-/* ptlrpc/nrs_fifo.c */
-extern struct ptlrpc_nrs_pol_conf nrs_conf_fifo;
-
-/**
- * Adds all policies that ship with the ptlrpc module, to NRS core's list of
- * policies \e nrs_core.nrs_policies.
- *
- * \retval 0 all policies have been registered successfully
- * \retval -ve error
- */
-int ptlrpc_nrs_init(void)
-{
- int rc;
-
- mutex_init(&nrs_core.nrs_mutex);
- INIT_LIST_HEAD(&nrs_core.nrs_policies);
-
- rc = ptlrpc_nrs_policy_register(&nrs_conf_fifo);
- if (rc != 0)
- goto fail;
-
-
- return rc;
-fail:
- /**
- * Since no PTLRPC services have been started at this point, all we need
- * to do for cleanup is to free the descriptors.
- */
- ptlrpc_nrs_fini();
-
- return rc;
-}
-
-/**
- * Removes all policy descriptors from nrs_core::nrs_policies, and frees the
- * policy descriptors.
- *
- * Since all PTLRPC services are stopped at this point, there are no more
- * instances of any policies, because each service will have stopped its policy
- * instances in ptlrpc_service_nrs_cleanup(), so we just need to free the
- * descriptors here.
- */
-void ptlrpc_nrs_fini(void)
-{
- struct ptlrpc_nrs_pol_desc *desc;
- struct ptlrpc_nrs_pol_desc *tmp;
-
- list_for_each_entry_safe(desc, tmp, &nrs_core.nrs_policies,
- pd_list) {
- list_del_init(&desc->pd_list);
- kfree(desc);
- }
-}
-
-/** @} nrs */
diff --git a/drivers/staging/lustre/lustre/ptlrpc/nrs_fifo.c b/drivers/staging/lustre/lustre/ptlrpc/nrs_fifo.c
deleted file mode 100644
index 8e21f0cdc8f8..000000000000
--- a/drivers/staging/lustre/lustre/ptlrpc/nrs_fifo.c
+++ /dev/null
@@ -1,272 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
-
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License version 2 for more details. A copy is
- * included in the COPYING file that accompanied this code.
-
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2011 Intel Corporation
- *
- * Copyright 2012 Xyratex Technology Limited
- */
-/*
- * lustre/ptlrpc/nrs_fifo.c
- *
- * Network Request Scheduler (NRS) FIFO policy
- *
- * Handles RPCs in a FIFO manner, as received from the network. This policy is
- * a logical wrapper around previous, non-NRS functionality. It is used as the
- * default and fallback policy for all types of RPCs on all PTLRPC service
- * partitions, for both regular and high-priority NRS heads. Default here means
- * the policy is the one enabled at PTLRPC service partition startup time, and
- * fallback means the policy is used to handle RPCs that are not handled
- * successfully or are not handled at all by any primary policy that may be
- * enabled on a given NRS head.
- *
- * Author: Liang Zhen <liang@whamcloud.com>
- * Author: Nikitas Angelinas <nikitas_angelinas@xyratex.com>
- */
-/**
- * \addtogoup nrs
- * @{
- */
-
-#define DEBUG_SUBSYSTEM S_RPC
-#include "../include/obd_support.h"
-#include "../include/obd_class.h"
-#include "../../include/linux/libcfs/libcfs.h"
-#include "ptlrpc_internal.h"
-
-/**
- * \name fifo
- *
- * The FIFO policy is a logical wrapper around previous, non-NRS functionality.
- * It schedules RPCs in the same order as they are queued from LNet.
- *
- * @{
- */
-
-#define NRS_POL_NAME_FIFO "fifo"
-
-/**
- * Is called before the policy transitions into
- * ptlrpc_nrs_pol_state::NRS_POL_STATE_STARTED; allocates and initializes a
- * policy-specific private data structure.
- *
- * \param[in] policy The policy to start
- *
- * \retval -ENOMEM OOM error
- * \retval 0 success
- *
- * \see nrs_policy_register()
- * \see nrs_policy_ctl()
- */
-static int nrs_fifo_start(struct ptlrpc_nrs_policy *policy)
-{
- struct nrs_fifo_head *head;
-
- head = kzalloc_node(sizeof(*head), GFP_NOFS,
- cfs_cpt_spread_node(nrs_pol2cptab(policy),
- nrs_pol2cptid(policy)));
- if (head == NULL)
- return -ENOMEM;
-
- INIT_LIST_HEAD(&head->fh_list);
- policy->pol_private = head;
- return 0;
-}
-
-/**
- * Is called before the policy transitions into
- * ptlrpc_nrs_pol_state::NRS_POL_STATE_STOPPED; deallocates the policy-specific
- * private data structure.
- *
- * \param[in] policy The policy to stop
- *
- * \see nrs_policy_stop0()
- */
-static void nrs_fifo_stop(struct ptlrpc_nrs_policy *policy)
-{
- struct nrs_fifo_head *head = policy->pol_private;
-
- LASSERT(head != NULL);
- LASSERT(list_empty(&head->fh_list));
-
- kfree(head);
-}
-
-/**
- * Is called for obtaining a FIFO policy resource.
- *
- * \param[in] policy The policy on which the request is being asked for
- * \param[in] nrq The request for which resources are being taken
- * \param[in] parent Parent resource, unused in this policy
- * \param[out] resp Resources references are placed in this array
- * \param[in] moving_req Signifies limited caller context; unused in this
- * policy
- *
- * \retval 1 The FIFO policy only has a one-level resource hierarchy, as since
- * it implements a simple scheduling algorithm in which request
- * priority is determined on the request arrival order, it does not
- * need to maintain a set of resources that would otherwise be used
- * to calculate a request's priority.
- *
- * \see nrs_resource_get_safe()
- */
-static int nrs_fifo_res_get(struct ptlrpc_nrs_policy *policy,
- struct ptlrpc_nrs_request *nrq,
- const struct ptlrpc_nrs_resource *parent,
- struct ptlrpc_nrs_resource **resp, bool moving_req)
-{
- /**
- * Just return the resource embedded inside nrs_fifo_head, and end this
- * resource hierarchy reference request.
- */
- *resp = &((struct nrs_fifo_head *)policy->pol_private)->fh_res;
- return 1;
-}
-
-/**
- * Called when getting a request from the FIFO policy for handling, or just
- * peeking; removes the request from the policy when it is to be handled.
- *
- * \param[in] policy The policy
- * \param[in] peek When set, signifies that we just want to examine the
- * request, and not handle it, so the request is not removed
- * from the policy.
- * \param[in] force Force the policy to return a request; unused in this
- * policy
- *
- * \retval The request to be handled; this is the next request in the FIFO
- * queue
- *
- * \see ptlrpc_nrs_req_get_nolock()
- * \see nrs_request_get()
- */
-static
-struct ptlrpc_nrs_request *nrs_fifo_req_get(struct ptlrpc_nrs_policy *policy,
- bool peek, bool force)
-{
- struct nrs_fifo_head *head = policy->pol_private;
- struct ptlrpc_nrs_request *nrq;
-
- nrq = unlikely(list_empty(&head->fh_list)) ? NULL :
- list_entry(head->fh_list.next, struct ptlrpc_nrs_request,
- nr_u.fifo.fr_list);
-
- if (likely(!peek && nrq != NULL)) {
- struct ptlrpc_request *req = container_of(nrq,
- struct ptlrpc_request,
- rq_nrq);
-
- list_del_init(&nrq->nr_u.fifo.fr_list);
-
- CDEBUG(D_RPCTRACE, "NRS start %s request from %s, seq: %llu\n",
- policy->pol_desc->pd_name, libcfs_id2str(req->rq_peer),
- nrq->nr_u.fifo.fr_sequence);
- }
-
- return nrq;
-}
-
-/**
- * Adds request \a nrq to \a policy's list of queued requests
- *
- * \param[in] policy The policy
- * \param[in] nrq The request to add
- *
- * \retval 0 success; nrs_request_enqueue() assumes this function will always
- * succeed
- */
-static int nrs_fifo_req_add(struct ptlrpc_nrs_policy *policy,
- struct ptlrpc_nrs_request *nrq)
-{
- struct nrs_fifo_head *head;
-
- head = container_of(nrs_request_resource(nrq), struct nrs_fifo_head,
- fh_res);
- /**
- * Only used for debugging
- */
- nrq->nr_u.fifo.fr_sequence = head->fh_sequence++;
- list_add_tail(&nrq->nr_u.fifo.fr_list, &head->fh_list);
-
- return 0;
-}
-
-/**
- * Removes request \a nrq from \a policy's list of queued requests.
- *
- * \param[in] policy The policy
- * \param[in] nrq The request to remove
- */
-static void nrs_fifo_req_del(struct ptlrpc_nrs_policy *policy,
- struct ptlrpc_nrs_request *nrq)
-{
- LASSERT(!list_empty(&nrq->nr_u.fifo.fr_list));
- list_del_init(&nrq->nr_u.fifo.fr_list);
-}
-
-/**
- * Prints a debug statement right before the request \a nrq stops being
- * handled.
- *
- * \param[in] policy The policy handling the request
- * \param[in] nrq The request being handled
- *
- * \see ptlrpc_server_finish_request()
- * \see ptlrpc_nrs_req_stop_nolock()
- */
-static void nrs_fifo_req_stop(struct ptlrpc_nrs_policy *policy,
- struct ptlrpc_nrs_request *nrq)
-{
- struct ptlrpc_request *req = container_of(nrq, struct ptlrpc_request,
- rq_nrq);
-
- CDEBUG(D_RPCTRACE, "NRS stop %s request from %s, seq: %llu\n",
- policy->pol_desc->pd_name, libcfs_id2str(req->rq_peer),
- nrq->nr_u.fifo.fr_sequence);
-}
-
-/**
- * FIFO policy operations
- */
-static const struct ptlrpc_nrs_pol_ops nrs_fifo_ops = {
- .op_policy_start = nrs_fifo_start,
- .op_policy_stop = nrs_fifo_stop,
- .op_res_get = nrs_fifo_res_get,
- .op_req_get = nrs_fifo_req_get,
- .op_req_enqueue = nrs_fifo_req_add,
- .op_req_dequeue = nrs_fifo_req_del,
- .op_req_stop = nrs_fifo_req_stop,
-};
-
-/**
- * FIFO policy configuration
- */
-struct ptlrpc_nrs_pol_conf nrs_conf_fifo = {
- .nc_name = NRS_POL_NAME_FIFO,
- .nc_ops = &nrs_fifo_ops,
- .nc_compat = nrs_policy_compat_all,
- .nc_flags = PTLRPC_NRS_FL_FALLBACK |
- PTLRPC_NRS_FL_REG_START
-};
-
-/** @} fifo */
-
-/** @} nrs */
diff --git a/drivers/staging/lustre/lustre/ptlrpc/pack_generic.c b/drivers/staging/lustre/lustre/ptlrpc/pack_generic.c
deleted file mode 100644
index 3241b1f89c50..000000000000
--- a/drivers/staging/lustre/lustre/ptlrpc/pack_generic.c
+++ /dev/null
@@ -1,2291 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * lustre/ptlrpc/pack_generic.c
- *
- * (Un)packing of OST requests
- *
- * Author: Peter J. Braam <braam@clusterfs.com>
- * Author: Phil Schwan <phil@clusterfs.com>
- * Author: Eric Barton <eeb@clusterfs.com>
- */
-
-#define DEBUG_SUBSYSTEM S_RPC
-
-#include "../../include/linux/libcfs/libcfs.h"
-
-#include "../include/obd_support.h"
-#include "../include/obd_class.h"
-#include "../include/lustre_net.h"
-#include "../include/obd_cksum.h"
-#include "../include/lustre/ll_fiemap.h"
-
-#include "ptlrpc_internal.h"
-
-static inline int lustre_msg_hdr_size_v2(int count)
-{
- return cfs_size_round(offsetof(struct lustre_msg_v2,
- lm_buflens[count]));
-}
-
-int lustre_msg_hdr_size(__u32 magic, int count)
-{
- switch (magic) {
- case LUSTRE_MSG_MAGIC_V2:
- return lustre_msg_hdr_size_v2(count);
- default:
- LASSERTF(0, "incorrect message magic: %08x\n", magic);
- return -EINVAL;
- }
-}
-EXPORT_SYMBOL(lustre_msg_hdr_size);
-
-void ptlrpc_buf_set_swabbed(struct ptlrpc_request *req, const int inout,
- int index)
-{
- if (inout)
- lustre_set_req_swabbed(req, index);
- else
- lustre_set_rep_swabbed(req, index);
-}
-EXPORT_SYMBOL(ptlrpc_buf_set_swabbed);
-
-int ptlrpc_buf_need_swab(struct ptlrpc_request *req, const int inout,
- int index)
-{
- if (inout)
- return (ptlrpc_req_need_swab(req) &&
- !lustre_req_swabbed(req, index));
- else
- return (ptlrpc_rep_need_swab(req) &&
- !lustre_rep_swabbed(req, index));
-}
-EXPORT_SYMBOL(ptlrpc_buf_need_swab);
-
-/* early reply size */
-int lustre_msg_early_size(void)
-{
- static int size;
- if (!size) {
- /* Always reply old ptlrpc_body_v2 to keep interoperability
- * with the old client (< 2.3) which doesn't have pb_jobid
- * in the ptlrpc_body.
- *
- * XXX Remove this whenever we drop interoperability with such
- * client.
- */
- __u32 pblen = sizeof(struct ptlrpc_body_v2);
- size = lustre_msg_size(LUSTRE_MSG_MAGIC_V2, 1, &pblen);
- }
- return size;
-}
-EXPORT_SYMBOL(lustre_msg_early_size);
-
-int lustre_msg_size_v2(int count, __u32 *lengths)
-{
- int size;
- int i;
-
- size = lustre_msg_hdr_size_v2(count);
- for (i = 0; i < count; i++)
- size += cfs_size_round(lengths[i]);
-
- return size;
-}
-EXPORT_SYMBOL(lustre_msg_size_v2);
-
-/* This returns the size of the buffer that is required to hold a lustre_msg
- * with the given sub-buffer lengths.
- * NOTE: this should only be used for NEW requests, and should always be
- * in the form of a v2 request. If this is a connection to a v1
- * target then the first buffer will be stripped because the ptlrpc
- * data is part of the lustre_msg_v1 header. b=14043 */
-int lustre_msg_size(__u32 magic, int count, __u32 *lens)
-{
- __u32 size[] = { sizeof(struct ptlrpc_body) };
-
- if (!lens) {
- LASSERT(count == 1);
- lens = size;
- }
-
- LASSERT(count > 0);
- LASSERT(lens[MSG_PTLRPC_BODY_OFF] >= sizeof(struct ptlrpc_body_v2));
-
- switch (magic) {
- case LUSTRE_MSG_MAGIC_V2:
- return lustre_msg_size_v2(count, lens);
- default:
- LASSERTF(0, "incorrect message magic: %08x\n", magic);
- return -EINVAL;
- }
-}
-EXPORT_SYMBOL(lustre_msg_size);
-
-/* This is used to determine the size of a buffer that was already packed
- * and will correctly handle the different message formats. */
-int lustre_packed_msg_size(struct lustre_msg *msg)
-{
- switch (msg->lm_magic) {
- case LUSTRE_MSG_MAGIC_V2:
- return lustre_msg_size_v2(msg->lm_bufcount, msg->lm_buflens);
- default:
- CERROR("incorrect message magic: %08x\n", msg->lm_magic);
- return 0;
- }
-}
-EXPORT_SYMBOL(lustre_packed_msg_size);
-
-void lustre_init_msg_v2(struct lustre_msg_v2 *msg, int count, __u32 *lens,
- char **bufs)
-{
- char *ptr;
- int i;
-
- msg->lm_bufcount = count;
- /* XXX: lm_secflvr uninitialized here */
- msg->lm_magic = LUSTRE_MSG_MAGIC_V2;
-
- for (i = 0; i < count; i++)
- msg->lm_buflens[i] = lens[i];
-
- if (bufs == NULL)
- return;
-
- ptr = (char *)msg + lustre_msg_hdr_size_v2(count);
- for (i = 0; i < count; i++) {
- char *tmp = bufs[i];
- LOGL(tmp, lens[i], ptr);
- }
-}
-EXPORT_SYMBOL(lustre_init_msg_v2);
-
-static int lustre_pack_request_v2(struct ptlrpc_request *req,
- int count, __u32 *lens, char **bufs)
-{
- int reqlen, rc;
-
- reqlen = lustre_msg_size_v2(count, lens);
-
- rc = sptlrpc_cli_alloc_reqbuf(req, reqlen);
- if (rc)
- return rc;
-
- req->rq_reqlen = reqlen;
-
- lustre_init_msg_v2(req->rq_reqmsg, count, lens, bufs);
- lustre_msg_add_version(req->rq_reqmsg, PTLRPC_MSG_VERSION);
- return 0;
-}
-
-int lustre_pack_request(struct ptlrpc_request *req, __u32 magic, int count,
- __u32 *lens, char **bufs)
-{
- __u32 size[] = { sizeof(struct ptlrpc_body) };
-
- if (!lens) {
- LASSERT(count == 1);
- lens = size;
- }
-
- LASSERT(count > 0);
- LASSERT(lens[MSG_PTLRPC_BODY_OFF] == sizeof(struct ptlrpc_body));
-
- /* only use new format, we don't need to be compatible with 1.4 */
- return lustre_pack_request_v2(req, count, lens, bufs);
-}
-EXPORT_SYMBOL(lustre_pack_request);
-
-#if RS_DEBUG
-LIST_HEAD(ptlrpc_rs_debug_lru);
-spinlock_t ptlrpc_rs_debug_lock;
-
-#define PTLRPC_RS_DEBUG_LRU_ADD(rs) \
-do { \
- spin_lock(&ptlrpc_rs_debug_lock); \
- list_add_tail(&(rs)->rs_debug_list, &ptlrpc_rs_debug_lru); \
- spin_unlock(&ptlrpc_rs_debug_lock); \
-} while (0)
-
-#define PTLRPC_RS_DEBUG_LRU_DEL(rs) \
-do { \
- spin_lock(&ptlrpc_rs_debug_lock); \
- list_del(&(rs)->rs_debug_list); \
- spin_unlock(&ptlrpc_rs_debug_lock); \
-} while (0)
-#else
-# define PTLRPC_RS_DEBUG_LRU_ADD(rs) do {} while (0)
-# define PTLRPC_RS_DEBUG_LRU_DEL(rs) do {} while (0)
-#endif
-
-struct ptlrpc_reply_state *
-lustre_get_emerg_rs(struct ptlrpc_service_part *svcpt)
-{
- struct ptlrpc_reply_state *rs = NULL;
-
- spin_lock(&svcpt->scp_rep_lock);
-
- /* See if we have anything in a pool, and wait if nothing */
- while (list_empty(&svcpt->scp_rep_idle)) {
- struct l_wait_info lwi;
- int rc;
-
- spin_unlock(&svcpt->scp_rep_lock);
- /* If we cannot get anything for some long time, we better
- * bail out instead of waiting infinitely */
- lwi = LWI_TIMEOUT(cfs_time_seconds(10), NULL, NULL);
- rc = l_wait_event(svcpt->scp_rep_waitq,
- !list_empty(&svcpt->scp_rep_idle), &lwi);
- if (rc != 0)
- goto out;
- spin_lock(&svcpt->scp_rep_lock);
- }
-
- rs = list_entry(svcpt->scp_rep_idle.next,
- struct ptlrpc_reply_state, rs_list);
- list_del(&rs->rs_list);
-
- spin_unlock(&svcpt->scp_rep_lock);
-
- memset(rs, 0, svcpt->scp_service->srv_max_reply_size);
- rs->rs_size = svcpt->scp_service->srv_max_reply_size;
- rs->rs_svcpt = svcpt;
- rs->rs_prealloc = 1;
-out:
- return rs;
-}
-
-void lustre_put_emerg_rs(struct ptlrpc_reply_state *rs)
-{
- struct ptlrpc_service_part *svcpt = rs->rs_svcpt;
-
- spin_lock(&svcpt->scp_rep_lock);
- list_add(&rs->rs_list, &svcpt->scp_rep_idle);
- spin_unlock(&svcpt->scp_rep_lock);
- wake_up(&svcpt->scp_rep_waitq);
-}
-
-int lustre_pack_reply_v2(struct ptlrpc_request *req, int count,
- __u32 *lens, char **bufs, int flags)
-{
- struct ptlrpc_reply_state *rs;
- int msg_len, rc;
-
- LASSERT(req->rq_reply_state == NULL);
-
- if ((flags & LPRFL_EARLY_REPLY) == 0) {
- spin_lock(&req->rq_lock);
- req->rq_packed_final = 1;
- spin_unlock(&req->rq_lock);
- }
-
- msg_len = lustre_msg_size_v2(count, lens);
- rc = sptlrpc_svc_alloc_rs(req, msg_len);
- if (rc)
- return rc;
-
- rs = req->rq_reply_state;
- atomic_set(&rs->rs_refcount, 1); /* 1 ref for rq_reply_state */
- rs->rs_cb_id.cbid_fn = reply_out_callback;
- rs->rs_cb_id.cbid_arg = rs;
- rs->rs_svcpt = req->rq_rqbd->rqbd_svcpt;
- INIT_LIST_HEAD(&rs->rs_exp_list);
- INIT_LIST_HEAD(&rs->rs_obd_list);
- INIT_LIST_HEAD(&rs->rs_list);
- spin_lock_init(&rs->rs_lock);
-
- req->rq_replen = msg_len;
- req->rq_reply_state = rs;
- req->rq_repmsg = rs->rs_msg;
-
- lustre_init_msg_v2(rs->rs_msg, count, lens, bufs);
- lustre_msg_add_version(rs->rs_msg, PTLRPC_MSG_VERSION);
-
- PTLRPC_RS_DEBUG_LRU_ADD(rs);
-
- return 0;
-}
-EXPORT_SYMBOL(lustre_pack_reply_v2);
-
-int lustre_pack_reply_flags(struct ptlrpc_request *req, int count, __u32 *lens,
- char **bufs, int flags)
-{
- int rc = 0;
- __u32 size[] = { sizeof(struct ptlrpc_body) };
-
- if (!lens) {
- LASSERT(count == 1);
- lens = size;
- }
-
- LASSERT(count > 0);
- LASSERT(lens[MSG_PTLRPC_BODY_OFF] == sizeof(struct ptlrpc_body));
-
- switch (req->rq_reqmsg->lm_magic) {
- case LUSTRE_MSG_MAGIC_V2:
- rc = lustre_pack_reply_v2(req, count, lens, bufs, flags);
- break;
- default:
- LASSERTF(0, "incorrect message magic: %08x\n",
- req->rq_reqmsg->lm_magic);
- rc = -EINVAL;
- }
- if (rc != 0)
- CERROR("lustre_pack_reply failed: rc=%d size=%d\n", rc,
- lustre_msg_size(req->rq_reqmsg->lm_magic, count, lens));
- return rc;
-}
-EXPORT_SYMBOL(lustre_pack_reply_flags);
-
-int lustre_pack_reply(struct ptlrpc_request *req, int count, __u32 *lens,
- char **bufs)
-{
- return lustre_pack_reply_flags(req, count, lens, bufs, 0);
-}
-EXPORT_SYMBOL(lustre_pack_reply);
-
-void *lustre_msg_buf_v2(struct lustre_msg_v2 *m, int n, int min_size)
-{
- int i, offset, buflen, bufcount;
-
- LASSERT(m != NULL);
- LASSERT(n >= 0);
-
- bufcount = m->lm_bufcount;
- if (unlikely(n >= bufcount)) {
- CDEBUG(D_INFO, "msg %p buffer[%d] not present (count %d)\n",
- m, n, bufcount);
- return NULL;
- }
-
- buflen = m->lm_buflens[n];
- if (unlikely(buflen < min_size)) {
- CERROR("msg %p buffer[%d] size %d too small (required %d, opc=%d)\n",
- m, n, buflen, min_size,
- n == MSG_PTLRPC_BODY_OFF ? -1 : lustre_msg_get_opc(m));
- return NULL;
- }
-
- offset = lustre_msg_hdr_size_v2(bufcount);
- for (i = 0; i < n; i++)
- offset += cfs_size_round(m->lm_buflens[i]);
-
- return (char *)m + offset;
-}
-
-void *lustre_msg_buf(struct lustre_msg *m, int n, int min_size)
-{
- switch (m->lm_magic) {
- case LUSTRE_MSG_MAGIC_V2:
- return lustre_msg_buf_v2(m, n, min_size);
- default:
- LASSERTF(0, "incorrect message magic: %08x (msg:%p)\n",
- m->lm_magic, m);
- return NULL;
- }
-}
-EXPORT_SYMBOL(lustre_msg_buf);
-
-static int lustre_shrink_msg_v2(struct lustre_msg_v2 *msg, int segment,
- unsigned int newlen, int move_data)
-{
- char *tail = NULL, *newpos;
- int tail_len = 0, n;
-
- LASSERT(msg);
- LASSERT(msg->lm_bufcount > segment);
- LASSERT(msg->lm_buflens[segment] >= newlen);
-
- if (msg->lm_buflens[segment] == newlen)
- goto out;
-
- if (move_data && msg->lm_bufcount > segment + 1) {
- tail = lustre_msg_buf_v2(msg, segment + 1, 0);
- for (n = segment + 1; n < msg->lm_bufcount; n++)
- tail_len += cfs_size_round(msg->lm_buflens[n]);
- }
-
- msg->lm_buflens[segment] = newlen;
-
- if (tail && tail_len) {
- newpos = lustre_msg_buf_v2(msg, segment + 1, 0);
- LASSERT(newpos <= tail);
- if (newpos != tail)
- memmove(newpos, tail, tail_len);
- }
-out:
- return lustre_msg_size_v2(msg->lm_bufcount, msg->lm_buflens);
-}
-
-/*
- * for @msg, shrink @segment to size @newlen. if @move_data is non-zero,
- * we also move data forward from @segment + 1.
- *
- * if @newlen == 0, we remove the segment completely, but we still keep the
- * totally bufcount the same to save possible data moving. this will leave a
- * unused segment with size 0 at the tail, but that's ok.
- *
- * return new msg size after shrinking.
- *
- * CAUTION:
- * + if any buffers higher than @segment has been filled in, must call shrink
- * with non-zero @move_data.
- * + caller should NOT keep pointers to msg buffers which higher than @segment
- * after call shrink.
- */
-int lustre_shrink_msg(struct lustre_msg *msg, int segment,
- unsigned int newlen, int move_data)
-{
- switch (msg->lm_magic) {
- case LUSTRE_MSG_MAGIC_V2:
- return lustre_shrink_msg_v2(msg, segment, newlen, move_data);
- default:
- LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic);
- }
-}
-EXPORT_SYMBOL(lustre_shrink_msg);
-
-void lustre_free_reply_state(struct ptlrpc_reply_state *rs)
-{
- PTLRPC_RS_DEBUG_LRU_DEL(rs);
-
- LASSERT(atomic_read(&rs->rs_refcount) == 0);
- LASSERT(!rs->rs_difficult || rs->rs_handled);
- LASSERT(!rs->rs_on_net);
- LASSERT(!rs->rs_scheduled);
- LASSERT(rs->rs_export == NULL);
- LASSERT(rs->rs_nlocks == 0);
- LASSERT(list_empty(&rs->rs_exp_list));
- LASSERT(list_empty(&rs->rs_obd_list));
-
- sptlrpc_svc_free_rs(rs);
-}
-EXPORT_SYMBOL(lustre_free_reply_state);
-
-static int lustre_unpack_msg_v2(struct lustre_msg_v2 *m, int len)
-{
- int swabbed, required_len, i;
-
- /* Now we know the sender speaks my language. */
- required_len = lustre_msg_hdr_size_v2(0);
- if (len < required_len) {
- /* can't even look inside the message */
- CERROR("message length %d too small for lustre_msg\n", len);
- return -EINVAL;
- }
-
- swabbed = (m->lm_magic == LUSTRE_MSG_MAGIC_V2_SWABBED);
-
- if (swabbed) {
- __swab32s(&m->lm_magic);
- __swab32s(&m->lm_bufcount);
- __swab32s(&m->lm_secflvr);
- __swab32s(&m->lm_repsize);
- __swab32s(&m->lm_cksum);
- __swab32s(&m->lm_flags);
- CLASSERT(offsetof(typeof(*m), lm_padding_2) != 0);
- CLASSERT(offsetof(typeof(*m), lm_padding_3) != 0);
- }
-
- required_len = lustre_msg_hdr_size_v2(m->lm_bufcount);
- if (len < required_len) {
- /* didn't receive all the buffer lengths */
- CERROR("message length %d too small for %d buflens\n",
- len, m->lm_bufcount);
- return -EINVAL;
- }
-
- for (i = 0; i < m->lm_bufcount; i++) {
- if (swabbed)
- __swab32s(&m->lm_buflens[i]);
- required_len += cfs_size_round(m->lm_buflens[i]);
- }
-
- if (len < required_len) {
- CERROR("len: %d, required_len %d\n", len, required_len);
- CERROR("bufcount: %d\n", m->lm_bufcount);
- for (i = 0; i < m->lm_bufcount; i++)
- CERROR("buffer %d length %d\n", i, m->lm_buflens[i]);
- return -EINVAL;
- }
-
- return swabbed;
-}
-
-int __lustre_unpack_msg(struct lustre_msg *m, int len)
-{
- int required_len, rc;
-
- /* We can provide a slightly better error log, if we check the
- * message magic and version first. In the future, struct
- * lustre_msg may grow, and we'd like to log a version mismatch,
- * rather than a short message.
- *
- */
- required_len = offsetof(struct lustre_msg, lm_magic) +
- sizeof(m->lm_magic);
- if (len < required_len) {
- /* can't even look inside the message */
- CERROR("message length %d too small for magic/version check\n",
- len);
- return -EINVAL;
- }
-
- rc = lustre_unpack_msg_v2(m, len);
-
- return rc;
-}
-EXPORT_SYMBOL(__lustre_unpack_msg);
-
-int ptlrpc_unpack_req_msg(struct ptlrpc_request *req, int len)
-{
- int rc;
- rc = __lustre_unpack_msg(req->rq_reqmsg, len);
- if (rc == 1) {
- lustre_set_req_swabbed(req, MSG_PTLRPC_HEADER_OFF);
- rc = 0;
- }
- return rc;
-}
-EXPORT_SYMBOL(ptlrpc_unpack_req_msg);
-
-int ptlrpc_unpack_rep_msg(struct ptlrpc_request *req, int len)
-{
- int rc;
- rc = __lustre_unpack_msg(req->rq_repmsg, len);
- if (rc == 1) {
- lustre_set_rep_swabbed(req, MSG_PTLRPC_HEADER_OFF);
- rc = 0;
- }
- return rc;
-}
-EXPORT_SYMBOL(ptlrpc_unpack_rep_msg);
-
-static inline int lustre_unpack_ptlrpc_body_v2(struct ptlrpc_request *req,
- const int inout, int offset)
-{
- struct ptlrpc_body *pb;
- struct lustre_msg_v2 *m = inout ? req->rq_reqmsg : req->rq_repmsg;
-
- pb = lustre_msg_buf_v2(m, offset, sizeof(struct ptlrpc_body_v2));
- if (!pb) {
- CERROR("error unpacking ptlrpc body\n");
- return -EFAULT;
- }
- if (ptlrpc_buf_need_swab(req, inout, offset)) {
- lustre_swab_ptlrpc_body(pb);
- ptlrpc_buf_set_swabbed(req, inout, offset);
- }
-
- if ((pb->pb_version & ~LUSTRE_VERSION_MASK) != PTLRPC_MSG_VERSION) {
- CERROR("wrong lustre_msg version %08x\n", pb->pb_version);
- return -EINVAL;
- }
-
- if (!inout)
- pb->pb_status = ptlrpc_status_ntoh(pb->pb_status);
-
- return 0;
-}
-
-int lustre_unpack_req_ptlrpc_body(struct ptlrpc_request *req, int offset)
-{
- switch (req->rq_reqmsg->lm_magic) {
- case LUSTRE_MSG_MAGIC_V2:
- return lustre_unpack_ptlrpc_body_v2(req, 1, offset);
- default:
- CERROR("bad lustre msg magic: %08x\n",
- req->rq_reqmsg->lm_magic);
- return -EINVAL;
- }
-}
-
-int lustre_unpack_rep_ptlrpc_body(struct ptlrpc_request *req, int offset)
-{
- switch (req->rq_repmsg->lm_magic) {
- case LUSTRE_MSG_MAGIC_V2:
- return lustre_unpack_ptlrpc_body_v2(req, 0, offset);
- default:
- CERROR("bad lustre msg magic: %08x\n",
- req->rq_repmsg->lm_magic);
- return -EINVAL;
- }
-}
-
-static inline int lustre_msg_buflen_v2(struct lustre_msg_v2 *m, int n)
-{
- if (n >= m->lm_bufcount)
- return 0;
-
- return m->lm_buflens[n];
-}
-
-/**
- * lustre_msg_buflen - return the length of buffer \a n in message \a m
- * \param m lustre_msg (request or reply) to look at
- * \param n message index (base 0)
- *
- * returns zero for non-existent message indices
- */
-int lustre_msg_buflen(struct lustre_msg *m, int n)
-{
- switch (m->lm_magic) {
- case LUSTRE_MSG_MAGIC_V2:
- return lustre_msg_buflen_v2(m, n);
- default:
- CERROR("incorrect message magic: %08x\n", m->lm_magic);
- return -EINVAL;
- }
-}
-EXPORT_SYMBOL(lustre_msg_buflen);
-
-/* NB return the bufcount for lustre_msg_v2 format, so if message is packed
- * in V1 format, the result is one bigger. (add struct ptlrpc_body). */
-int lustre_msg_bufcount(struct lustre_msg *m)
-{
- switch (m->lm_magic) {
- case LUSTRE_MSG_MAGIC_V2:
- return m->lm_bufcount;
- default:
- CERROR("incorrect message magic: %08x\n", m->lm_magic);
- return -EINVAL;
- }
-}
-EXPORT_SYMBOL(lustre_msg_bufcount);
-
-char *lustre_msg_string(struct lustre_msg *m, int index, int max_len)
-{
- /* max_len == 0 means the string should fill the buffer */
- char *str;
- int slen, blen;
-
- switch (m->lm_magic) {
- case LUSTRE_MSG_MAGIC_V2:
- str = lustre_msg_buf_v2(m, index, 0);
- blen = lustre_msg_buflen_v2(m, index);
- break;
- default:
- LASSERTF(0, "incorrect message magic: %08x\n", m->lm_magic);
- }
-
- if (str == NULL) {
- CERROR("can't unpack string in msg %p buffer[%d]\n", m, index);
- return NULL;
- }
-
- slen = strnlen(str, blen);
-
- if (slen == blen) { /* not NULL terminated */
- CERROR("can't unpack non-NULL terminated string in msg %p buffer[%d] len %d\n",
- m, index, blen);
- return NULL;
- }
-
- if (max_len == 0) {
- if (slen != blen - 1) {
- CERROR("can't unpack short string in msg %p buffer[%d] len %d: strlen %d\n",
- m, index, blen, slen);
- return NULL;
- }
- } else if (slen > max_len) {
- CERROR("can't unpack oversized string in msg %p buffer[%d] len %d strlen %d: max %d expected\n",
- m, index, blen, slen, max_len);
- return NULL;
- }
-
- return str;
-}
-EXPORT_SYMBOL(lustre_msg_string);
-
-/* Wrap up the normal fixed length cases */
-static inline void *__lustre_swab_buf(struct lustre_msg *msg, int index,
- int min_size, void *swabber)
-{
- void *ptr = NULL;
-
- LASSERT(msg != NULL);
- switch (msg->lm_magic) {
- case LUSTRE_MSG_MAGIC_V2:
- ptr = lustre_msg_buf_v2(msg, index, min_size);
- break;
- default:
- CERROR("incorrect message magic: %08x\n", msg->lm_magic);
- }
-
- if (ptr && swabber)
- ((void (*)(void *))swabber)(ptr);
-
- return ptr;
-}
-
-static inline struct ptlrpc_body *lustre_msg_ptlrpc_body(struct lustre_msg *msg)
-{
- return lustre_msg_buf_v2(msg, MSG_PTLRPC_BODY_OFF,
- sizeof(struct ptlrpc_body_v2));
-}
-
-__u32 lustre_msghdr_get_flags(struct lustre_msg *msg)
-{
- switch (msg->lm_magic) {
- case LUSTRE_MSG_MAGIC_V2:
- /* already in host endian */
- return msg->lm_flags;
- default:
- CERROR("incorrect message magic: %08x\n", msg->lm_magic);
- return 0;
- }
-}
-EXPORT_SYMBOL(lustre_msghdr_get_flags);
-
-void lustre_msghdr_set_flags(struct lustre_msg *msg, __u32 flags)
-{
- switch (msg->lm_magic) {
- case LUSTRE_MSG_MAGIC_V2:
- msg->lm_flags = flags;
- return;
- default:
- LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic);
- }
-}
-
-__u32 lustre_msg_get_flags(struct lustre_msg *msg)
-{
- switch (msg->lm_magic) {
- case LUSTRE_MSG_MAGIC_V2: {
- struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
- if (pb)
- return pb->pb_flags;
-
- CERROR("invalid msg %p: no ptlrpc body!\n", msg);
- }
- /* no break */
- default:
- /* flags might be printed in debug code while message
- * uninitialized */
- return 0;
- }
-}
-EXPORT_SYMBOL(lustre_msg_get_flags);
-
-void lustre_msg_add_flags(struct lustre_msg *msg, int flags)
-{
- switch (msg->lm_magic) {
- case LUSTRE_MSG_MAGIC_V2: {
- struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
- LASSERTF(pb, "invalid msg %p: no ptlrpc body!\n", msg);
- pb->pb_flags |= flags;
- return;
- }
- default:
- LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic);
- }
-}
-EXPORT_SYMBOL(lustre_msg_add_flags);
-
-void lustre_msg_set_flags(struct lustre_msg *msg, int flags)
-{
- switch (msg->lm_magic) {
- case LUSTRE_MSG_MAGIC_V2: {
- struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
- LASSERTF(pb, "invalid msg %p: no ptlrpc body!\n", msg);
- pb->pb_flags = flags;
- return;
- }
- default:
- LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic);
- }
-}
-EXPORT_SYMBOL(lustre_msg_set_flags);
-
-void lustre_msg_clear_flags(struct lustre_msg *msg, int flags)
-{
- switch (msg->lm_magic) {
- case LUSTRE_MSG_MAGIC_V2: {
- struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
- LASSERTF(pb, "invalid msg %p: no ptlrpc body!\n", msg);
- pb->pb_flags &= ~(flags & MSG_GEN_FLAG_MASK);
- return;
- }
- default:
- LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic);
- }
-}
-EXPORT_SYMBOL(lustre_msg_clear_flags);
-
-__u32 lustre_msg_get_op_flags(struct lustre_msg *msg)
-{
- switch (msg->lm_magic) {
- case LUSTRE_MSG_MAGIC_V2: {
- struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
- if (pb)
- return pb->pb_op_flags;
-
- CERROR("invalid msg %p: no ptlrpc body!\n", msg);
- }
- /* no break */
- default:
- return 0;
- }
-}
-EXPORT_SYMBOL(lustre_msg_get_op_flags);
-
-void lustre_msg_add_op_flags(struct lustre_msg *msg, int flags)
-{
- switch (msg->lm_magic) {
- case LUSTRE_MSG_MAGIC_V2: {
- struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
- LASSERTF(pb, "invalid msg %p: no ptlrpc body!\n", msg);
- pb->pb_op_flags |= flags;
- return;
- }
- default:
- LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic);
- }
-}
-EXPORT_SYMBOL(lustre_msg_add_op_flags);
-
-struct lustre_handle *lustre_msg_get_handle(struct lustre_msg *msg)
-{
- switch (msg->lm_magic) {
- case LUSTRE_MSG_MAGIC_V2: {
- struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
- if (!pb) {
- CERROR("invalid msg %p: no ptlrpc body!\n", msg);
- return NULL;
- }
- return &pb->pb_handle;
- }
- default:
- CERROR("incorrect message magic: %08x\n", msg->lm_magic);
- return NULL;
- }
-}
-EXPORT_SYMBOL(lustre_msg_get_handle);
-
-__u32 lustre_msg_get_type(struct lustre_msg *msg)
-{
- switch (msg->lm_magic) {
- case LUSTRE_MSG_MAGIC_V2: {
- struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
- if (!pb) {
- CERROR("invalid msg %p: no ptlrpc body!\n", msg);
- return PTL_RPC_MSG_ERR;
- }
- return pb->pb_type;
- }
- default:
- CERROR("incorrect message magic: %08x\n", msg->lm_magic);
- return PTL_RPC_MSG_ERR;
- }
-}
-EXPORT_SYMBOL(lustre_msg_get_type);
-
-void lustre_msg_add_version(struct lustre_msg *msg, int version)
-{
- switch (msg->lm_magic) {
- case LUSTRE_MSG_MAGIC_V2: {
- struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
- LASSERTF(pb, "invalid msg %p: no ptlrpc body!\n", msg);
- pb->pb_version |= version;
- return;
- }
- default:
- LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic);
- }
-}
-EXPORT_SYMBOL(lustre_msg_add_version);
-
-__u32 lustre_msg_get_opc(struct lustre_msg *msg)
-{
- switch (msg->lm_magic) {
- case LUSTRE_MSG_MAGIC_V2: {
- struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
- if (!pb) {
- CERROR("invalid msg %p: no ptlrpc body!\n", msg);
- return 0;
- }
- return pb->pb_opc;
- }
- default:
- CERROR("incorrect message magic: %08x (msg:%p)\n",
- msg->lm_magic, msg);
- return 0;
- }
-}
-EXPORT_SYMBOL(lustre_msg_get_opc);
-
-__u64 lustre_msg_get_last_committed(struct lustre_msg *msg)
-{
- switch (msg->lm_magic) {
- case LUSTRE_MSG_MAGIC_V2: {
- struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
- if (!pb) {
- CERROR("invalid msg %p: no ptlrpc body!\n", msg);
- return 0;
- }
- return pb->pb_last_committed;
- }
- default:
- CERROR("incorrect message magic: %08x\n", msg->lm_magic);
- return 0;
- }
-}
-EXPORT_SYMBOL(lustre_msg_get_last_committed);
-
-__u64 *lustre_msg_get_versions(struct lustre_msg *msg)
-{
- switch (msg->lm_magic) {
- case LUSTRE_MSG_MAGIC_V2: {
- struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
- if (!pb) {
- CERROR("invalid msg %p: no ptlrpc body!\n", msg);
- return NULL;
- }
- return pb->pb_pre_versions;
- }
- default:
- CERROR("incorrect message magic: %08x\n", msg->lm_magic);
- return NULL;
- }
-}
-EXPORT_SYMBOL(lustre_msg_get_versions);
-
-__u64 lustre_msg_get_transno(struct lustre_msg *msg)
-{
- switch (msg->lm_magic) {
- case LUSTRE_MSG_MAGIC_V2: {
- struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
- if (!pb) {
- CERROR("invalid msg %p: no ptlrpc body!\n", msg);
- return 0;
- }
- return pb->pb_transno;
- }
- default:
- CERROR("incorrect message magic: %08x\n", msg->lm_magic);
- return 0;
- }
-}
-EXPORT_SYMBOL(lustre_msg_get_transno);
-
-int lustre_msg_get_status(struct lustre_msg *msg)
-{
- switch (msg->lm_magic) {
- case LUSTRE_MSG_MAGIC_V2: {
- struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
- if (pb)
- return pb->pb_status;
-
- CERROR("invalid msg %p: no ptlrpc body!\n", msg);
- }
- /* no break */
- default:
- /* status might be printed in debug code while message
- * uninitialized */
- return -EINVAL;
- }
-}
-EXPORT_SYMBOL(lustre_msg_get_status);
-
-__u64 lustre_msg_get_slv(struct lustre_msg *msg)
-{
- switch (msg->lm_magic) {
- case LUSTRE_MSG_MAGIC_V2: {
- struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
- if (!pb) {
- CERROR("invalid msg %p: no ptlrpc body!\n", msg);
- return -EINVAL;
- }
- return pb->pb_slv;
- }
- default:
- CERROR("invalid msg magic %08x\n", msg->lm_magic);
- return -EINVAL;
- }
-}
-EXPORT_SYMBOL(lustre_msg_get_slv);
-
-
-void lustre_msg_set_slv(struct lustre_msg *msg, __u64 slv)
-{
- switch (msg->lm_magic) {
- case LUSTRE_MSG_MAGIC_V2: {
- struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
- if (!pb) {
- CERROR("invalid msg %p: no ptlrpc body!\n", msg);
- return;
- }
- pb->pb_slv = slv;
- return;
- }
- default:
- CERROR("invalid msg magic %x\n", msg->lm_magic);
- return;
- }
-}
-EXPORT_SYMBOL(lustre_msg_set_slv);
-
-__u32 lustre_msg_get_limit(struct lustre_msg *msg)
-{
- switch (msg->lm_magic) {
- case LUSTRE_MSG_MAGIC_V2: {
- struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
- if (!pb) {
- CERROR("invalid msg %p: no ptlrpc body!\n", msg);
- return -EINVAL;
- }
- return pb->pb_limit;
- }
- default:
- CERROR("invalid msg magic %x\n", msg->lm_magic);
- return -EINVAL;
- }
-}
-EXPORT_SYMBOL(lustre_msg_get_limit);
-
-
-void lustre_msg_set_limit(struct lustre_msg *msg, __u64 limit)
-{
- switch (msg->lm_magic) {
- case LUSTRE_MSG_MAGIC_V2: {
- struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
- if (!pb) {
- CERROR("invalid msg %p: no ptlrpc body!\n", msg);
- return;
- }
- pb->pb_limit = limit;
- return;
- }
- default:
- CERROR("invalid msg magic %08x\n", msg->lm_magic);
- return;
- }
-}
-EXPORT_SYMBOL(lustre_msg_set_limit);
-
-__u32 lustre_msg_get_conn_cnt(struct lustre_msg *msg)
-{
- switch (msg->lm_magic) {
- case LUSTRE_MSG_MAGIC_V2: {
- struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
- if (!pb) {
- CERROR("invalid msg %p: no ptlrpc body!\n", msg);
- return 0;
- }
- return pb->pb_conn_cnt;
- }
- default:
- CERROR("incorrect message magic: %08x\n", msg->lm_magic);
- return 0;
- }
-}
-EXPORT_SYMBOL(lustre_msg_get_conn_cnt);
-
-__u32 lustre_msg_get_magic(struct lustre_msg *msg)
-{
- switch (msg->lm_magic) {
- case LUSTRE_MSG_MAGIC_V2:
- return msg->lm_magic;
- default:
- CERROR("incorrect message magic: %08x\n", msg->lm_magic);
- return 0;
- }
-}
-EXPORT_SYMBOL(lustre_msg_get_magic);
-
-__u32 lustre_msg_get_timeout(struct lustre_msg *msg)
-{
- switch (msg->lm_magic) {
- case LUSTRE_MSG_MAGIC_V2: {
- struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
- if (!pb) {
- CERROR("invalid msg %p: no ptlrpc body!\n", msg);
- return 0;
-
- }
- return pb->pb_timeout;
- }
- default:
- CERROR("incorrect message magic: %08x\n", msg->lm_magic);
- return -EPROTO;
- }
-}
-
-__u32 lustre_msg_get_service_time(struct lustre_msg *msg)
-{
- switch (msg->lm_magic) {
- case LUSTRE_MSG_MAGIC_V2: {
- struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
- if (!pb) {
- CERROR("invalid msg %p: no ptlrpc body!\n", msg);
- return 0;
-
- }
- return pb->pb_service_time;
- }
- default:
- CERROR("incorrect message magic: %08x\n", msg->lm_magic);
- return 0;
- }
-}
-
-__u32 lustre_msg_get_cksum(struct lustre_msg *msg)
-{
- switch (msg->lm_magic) {
- case LUSTRE_MSG_MAGIC_V2:
- return msg->lm_cksum;
- default:
- CERROR("incorrect message magic: %08x\n", msg->lm_magic);
- return 0;
- }
-}
-
-__u32 lustre_msg_calc_cksum(struct lustre_msg *msg)
-{
- switch (msg->lm_magic) {
- case LUSTRE_MSG_MAGIC_V2: {
- struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
- __u32 crc;
- unsigned int hsize = 4;
- cfs_crypto_hash_digest(CFS_HASH_ALG_CRC32, (unsigned char *)pb,
- lustre_msg_buflen(msg, MSG_PTLRPC_BODY_OFF),
- NULL, 0, (unsigned char *)&crc, &hsize);
- return crc;
- }
- default:
- CERROR("incorrect message magic: %08x\n", msg->lm_magic);
- return 0;
- }
-}
-
-void lustre_msg_set_handle(struct lustre_msg *msg, struct lustre_handle *handle)
-{
- switch (msg->lm_magic) {
- case LUSTRE_MSG_MAGIC_V2: {
- struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
- LASSERTF(pb, "invalid msg %p: no ptlrpc body!\n", msg);
- pb->pb_handle = *handle;
- return;
- }
- default:
- LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic);
- }
-}
-EXPORT_SYMBOL(lustre_msg_set_handle);
-
-void lustre_msg_set_type(struct lustre_msg *msg, __u32 type)
-{
- switch (msg->lm_magic) {
- case LUSTRE_MSG_MAGIC_V2: {
- struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
- LASSERTF(pb, "invalid msg %p: no ptlrpc body!\n", msg);
- pb->pb_type = type;
- return;
- }
- default:
- LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic);
- }
-}
-EXPORT_SYMBOL(lustre_msg_set_type);
-
-void lustre_msg_set_opc(struct lustre_msg *msg, __u32 opc)
-{
- switch (msg->lm_magic) {
- case LUSTRE_MSG_MAGIC_V2: {
- struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
- LASSERTF(pb, "invalid msg %p: no ptlrpc body!\n", msg);
- pb->pb_opc = opc;
- return;
- }
- default:
- LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic);
- }
-}
-EXPORT_SYMBOL(lustre_msg_set_opc);
-
-void lustre_msg_set_versions(struct lustre_msg *msg, __u64 *versions)
-{
- switch (msg->lm_magic) {
- case LUSTRE_MSG_MAGIC_V2: {
- struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
- LASSERTF(pb, "invalid msg %p: no ptlrpc body!\n", msg);
- pb->pb_pre_versions[0] = versions[0];
- pb->pb_pre_versions[1] = versions[1];
- pb->pb_pre_versions[2] = versions[2];
- pb->pb_pre_versions[3] = versions[3];
- return;
- }
- default:
- LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic);
- }
-}
-EXPORT_SYMBOL(lustre_msg_set_versions);
-
-void lustre_msg_set_transno(struct lustre_msg *msg, __u64 transno)
-{
- switch (msg->lm_magic) {
- case LUSTRE_MSG_MAGIC_V2: {
- struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
- LASSERTF(pb, "invalid msg %p: no ptlrpc body!\n", msg);
- pb->pb_transno = transno;
- return;
- }
- default:
- LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic);
- }
-}
-EXPORT_SYMBOL(lustre_msg_set_transno);
-
-void lustre_msg_set_status(struct lustre_msg *msg, __u32 status)
-{
- switch (msg->lm_magic) {
- case LUSTRE_MSG_MAGIC_V2: {
- struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
- LASSERTF(pb, "invalid msg %p: no ptlrpc body!\n", msg);
- pb->pb_status = status;
- return;
- }
- default:
- LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic);
- }
-}
-EXPORT_SYMBOL(lustre_msg_set_status);
-
-void lustre_msg_set_conn_cnt(struct lustre_msg *msg, __u32 conn_cnt)
-{
- switch (msg->lm_magic) {
- case LUSTRE_MSG_MAGIC_V2: {
- struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
- LASSERTF(pb, "invalid msg %p: no ptlrpc body!\n", msg);
- pb->pb_conn_cnt = conn_cnt;
- return;
- }
- default:
- LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic);
- }
-}
-EXPORT_SYMBOL(lustre_msg_set_conn_cnt);
-
-void lustre_msg_set_timeout(struct lustre_msg *msg, __u32 timeout)
-{
- switch (msg->lm_magic) {
- case LUSTRE_MSG_MAGIC_V2: {
- struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
- LASSERTF(pb, "invalid msg %p: no ptlrpc body!\n", msg);
- pb->pb_timeout = timeout;
- return;
- }
- default:
- LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic);
- }
-}
-
-void lustre_msg_set_service_time(struct lustre_msg *msg, __u32 service_time)
-{
- switch (msg->lm_magic) {
- case LUSTRE_MSG_MAGIC_V2: {
- struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
- LASSERTF(pb, "invalid msg %p: no ptlrpc body!\n", msg);
- pb->pb_service_time = service_time;
- return;
- }
- default:
- LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic);
- }
-}
-
-void lustre_msg_set_jobid(struct lustre_msg *msg, char *jobid)
-{
- switch (msg->lm_magic) {
- case LUSTRE_MSG_MAGIC_V2: {
- __u32 opc = lustre_msg_get_opc(msg);
- struct ptlrpc_body *pb;
-
- /* Don't set jobid for ldlm ast RPCs, they've been shrunk.
- * See the comment in ptlrpc_request_pack(). */
- if (!opc || opc == LDLM_BL_CALLBACK ||
- opc == LDLM_CP_CALLBACK || opc == LDLM_GL_CALLBACK)
- return;
-
- pb = lustre_msg_buf_v2(msg, MSG_PTLRPC_BODY_OFF,
- sizeof(struct ptlrpc_body));
- LASSERTF(pb, "invalid msg %p: no ptlrpc body!\n", msg);
-
- if (jobid != NULL)
- memcpy(pb->pb_jobid, jobid, JOBSTATS_JOBID_SIZE);
- else if (pb->pb_jobid[0] == '\0')
- lustre_get_jobid(pb->pb_jobid);
- return;
- }
- default:
- LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic);
- }
-}
-EXPORT_SYMBOL(lustre_msg_set_jobid);
-
-void lustre_msg_set_cksum(struct lustre_msg *msg, __u32 cksum)
-{
- switch (msg->lm_magic) {
- case LUSTRE_MSG_MAGIC_V2:
- msg->lm_cksum = cksum;
- return;
- default:
- LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic);
- }
-}
-
-
-void ptlrpc_request_set_replen(struct ptlrpc_request *req)
-{
- int count = req_capsule_filled_sizes(&req->rq_pill, RCL_SERVER);
-
- req->rq_replen = lustre_msg_size(req->rq_reqmsg->lm_magic, count,
- req->rq_pill.rc_area[RCL_SERVER]);
- if (req->rq_reqmsg->lm_magic == LUSTRE_MSG_MAGIC_V2)
- req->rq_reqmsg->lm_repsize = req->rq_replen;
-}
-EXPORT_SYMBOL(ptlrpc_request_set_replen);
-
-/**
- * Send a remote set_info_async.
- *
- * This may go from client to server or server to client.
- */
-int do_set_info_async(struct obd_import *imp,
- int opcode, int version,
- u32 keylen, void *key,
- u32 vallen, void *val,
- struct ptlrpc_request_set *set)
-{
- struct ptlrpc_request *req;
- char *tmp;
- int rc;
-
- req = ptlrpc_request_alloc(imp, &RQF_OBD_SET_INFO);
- if (req == NULL)
- return -ENOMEM;
-
- req_capsule_set_size(&req->rq_pill, &RMF_SETINFO_KEY,
- RCL_CLIENT, keylen);
- req_capsule_set_size(&req->rq_pill, &RMF_SETINFO_VAL,
- RCL_CLIENT, vallen);
- rc = ptlrpc_request_pack(req, version, opcode);
- if (rc) {
- ptlrpc_request_free(req);
- return rc;
- }
-
- tmp = req_capsule_client_get(&req->rq_pill, &RMF_SETINFO_KEY);
- memcpy(tmp, key, keylen);
- tmp = req_capsule_client_get(&req->rq_pill, &RMF_SETINFO_VAL);
- memcpy(tmp, val, vallen);
-
- ptlrpc_request_set_replen(req);
-
- if (set) {
- ptlrpc_set_add_req(set, req);
- ptlrpc_check_set(NULL, set);
- } else {
- rc = ptlrpc_queue_wait(req);
- ptlrpc_req_finished(req);
- }
-
- return rc;
-}
-EXPORT_SYMBOL(do_set_info_async);
-
-/* byte flipping routines for all wire types declared in
- * lustre_idl.h implemented here.
- */
-void lustre_swab_ptlrpc_body(struct ptlrpc_body *b)
-{
- __swab32s(&b->pb_type);
- __swab32s(&b->pb_version);
- __swab32s(&b->pb_opc);
- __swab32s(&b->pb_status);
- __swab64s(&b->pb_last_xid);
- __swab64s(&b->pb_last_seen);
- __swab64s(&b->pb_last_committed);
- __swab64s(&b->pb_transno);
- __swab32s(&b->pb_flags);
- __swab32s(&b->pb_op_flags);
- __swab32s(&b->pb_conn_cnt);
- __swab32s(&b->pb_timeout);
- __swab32s(&b->pb_service_time);
- __swab32s(&b->pb_limit);
- __swab64s(&b->pb_slv);
- __swab64s(&b->pb_pre_versions[0]);
- __swab64s(&b->pb_pre_versions[1]);
- __swab64s(&b->pb_pre_versions[2]);
- __swab64s(&b->pb_pre_versions[3]);
- CLASSERT(offsetof(typeof(*b), pb_padding) != 0);
- /* While we need to maintain compatibility between
- * clients and servers without ptlrpc_body_v2 (< 2.3)
- * do not swab any fields beyond pb_jobid, as we are
- * using this swab function for both ptlrpc_body
- * and ptlrpc_body_v2. */
- CLASSERT(offsetof(typeof(*b), pb_jobid) != 0);
-}
-EXPORT_SYMBOL(lustre_swab_ptlrpc_body);
-
-void lustre_swab_connect(struct obd_connect_data *ocd)
-{
- __swab64s(&ocd->ocd_connect_flags);
- __swab32s(&ocd->ocd_version);
- __swab32s(&ocd->ocd_grant);
- __swab64s(&ocd->ocd_ibits_known);
- __swab32s(&ocd->ocd_index);
- __swab32s(&ocd->ocd_brw_size);
- /* ocd_blocksize and ocd_inodespace don't need to be swabbed because
- * they are 8-byte values */
- __swab16s(&ocd->ocd_grant_extent);
- __swab32s(&ocd->ocd_unused);
- __swab64s(&ocd->ocd_transno);
- __swab32s(&ocd->ocd_group);
- __swab32s(&ocd->ocd_cksum_types);
- __swab32s(&ocd->ocd_instance);
- /* Fields after ocd_cksum_types are only accessible by the receiver
- * if the corresponding flag in ocd_connect_flags is set. Accessing
- * any field after ocd_maxbytes on the receiver without a valid flag
- * may result in out-of-bound memory access and kernel oops. */
- if (ocd->ocd_connect_flags & OBD_CONNECT_MAX_EASIZE)
- __swab32s(&ocd->ocd_max_easize);
- if (ocd->ocd_connect_flags & OBD_CONNECT_MAXBYTES)
- __swab64s(&ocd->ocd_maxbytes);
- CLASSERT(offsetof(typeof(*ocd), padding1) != 0);
- CLASSERT(offsetof(typeof(*ocd), padding2) != 0);
- CLASSERT(offsetof(typeof(*ocd), padding3) != 0);
- CLASSERT(offsetof(typeof(*ocd), padding4) != 0);
- CLASSERT(offsetof(typeof(*ocd), padding5) != 0);
- CLASSERT(offsetof(typeof(*ocd), padding6) != 0);
- CLASSERT(offsetof(typeof(*ocd), padding7) != 0);
- CLASSERT(offsetof(typeof(*ocd), padding8) != 0);
- CLASSERT(offsetof(typeof(*ocd), padding9) != 0);
- CLASSERT(offsetof(typeof(*ocd), paddingA) != 0);
- CLASSERT(offsetof(typeof(*ocd), paddingB) != 0);
- CLASSERT(offsetof(typeof(*ocd), paddingC) != 0);
- CLASSERT(offsetof(typeof(*ocd), paddingD) != 0);
- CLASSERT(offsetof(typeof(*ocd), paddingE) != 0);
- CLASSERT(offsetof(typeof(*ocd), paddingF) != 0);
-}
-
-void lustre_swab_obdo(struct obdo *o)
-{
- __swab64s(&o->o_valid);
- lustre_swab_ost_id(&o->o_oi);
- __swab64s(&o->o_parent_seq);
- __swab64s(&o->o_size);
- __swab64s(&o->o_mtime);
- __swab64s(&o->o_atime);
- __swab64s(&o->o_ctime);
- __swab64s(&o->o_blocks);
- __swab64s(&o->o_grant);
- __swab32s(&o->o_blksize);
- __swab32s(&o->o_mode);
- __swab32s(&o->o_uid);
- __swab32s(&o->o_gid);
- __swab32s(&o->o_flags);
- __swab32s(&o->o_nlink);
- __swab32s(&o->o_parent_oid);
- __swab32s(&o->o_misc);
- __swab64s(&o->o_ioepoch);
- __swab32s(&o->o_stripe_idx);
- __swab32s(&o->o_parent_ver);
- /* o_handle is opaque */
- /* o_lcookie is swabbed elsewhere */
- __swab32s(&o->o_uid_h);
- __swab32s(&o->o_gid_h);
- __swab64s(&o->o_data_version);
- CLASSERT(offsetof(typeof(*o), o_padding_4) != 0);
- CLASSERT(offsetof(typeof(*o), o_padding_5) != 0);
- CLASSERT(offsetof(typeof(*o), o_padding_6) != 0);
-
-}
-EXPORT_SYMBOL(lustre_swab_obdo);
-
-void lustre_swab_obd_statfs(struct obd_statfs *os)
-{
- __swab64s(&os->os_type);
- __swab64s(&os->os_blocks);
- __swab64s(&os->os_bfree);
- __swab64s(&os->os_bavail);
- __swab64s(&os->os_files);
- __swab64s(&os->os_ffree);
- /* no need to swab os_fsid */
- __swab32s(&os->os_bsize);
- __swab32s(&os->os_namelen);
- __swab64s(&os->os_maxbytes);
- __swab32s(&os->os_state);
- CLASSERT(offsetof(typeof(*os), os_fprecreated) != 0);
- CLASSERT(offsetof(typeof(*os), os_spare2) != 0);
- CLASSERT(offsetof(typeof(*os), os_spare3) != 0);
- CLASSERT(offsetof(typeof(*os), os_spare4) != 0);
- CLASSERT(offsetof(typeof(*os), os_spare5) != 0);
- CLASSERT(offsetof(typeof(*os), os_spare6) != 0);
- CLASSERT(offsetof(typeof(*os), os_spare7) != 0);
- CLASSERT(offsetof(typeof(*os), os_spare8) != 0);
- CLASSERT(offsetof(typeof(*os), os_spare9) != 0);
-}
-EXPORT_SYMBOL(lustre_swab_obd_statfs);
-
-void lustre_swab_obd_ioobj(struct obd_ioobj *ioo)
-{
- lustre_swab_ost_id(&ioo->ioo_oid);
- __swab32s(&ioo->ioo_max_brw);
- __swab32s(&ioo->ioo_bufcnt);
-}
-EXPORT_SYMBOL(lustre_swab_obd_ioobj);
-
-void lustre_swab_niobuf_remote(struct niobuf_remote *nbr)
-{
- __swab64s(&nbr->offset);
- __swab32s(&nbr->len);
- __swab32s(&nbr->flags);
-}
-EXPORT_SYMBOL(lustre_swab_niobuf_remote);
-
-void lustre_swab_ost_body(struct ost_body *b)
-{
- lustre_swab_obdo(&b->oa);
-}
-EXPORT_SYMBOL(lustre_swab_ost_body);
-
-void lustre_swab_ost_last_id(u64 *id)
-{
- __swab64s(id);
-}
-EXPORT_SYMBOL(lustre_swab_ost_last_id);
-
-void lustre_swab_generic_32s(__u32 *val)
-{
- __swab32s(val);
-}
-EXPORT_SYMBOL(lustre_swab_generic_32s);
-
-void lustre_swab_gl_desc(union ldlm_gl_desc *desc)
-{
- lustre_swab_lu_fid(&desc->lquota_desc.gl_id.qid_fid);
- __swab64s(&desc->lquota_desc.gl_flags);
- __swab64s(&desc->lquota_desc.gl_ver);
- __swab64s(&desc->lquota_desc.gl_hardlimit);
- __swab64s(&desc->lquota_desc.gl_softlimit);
- __swab64s(&desc->lquota_desc.gl_time);
- CLASSERT(offsetof(typeof(desc->lquota_desc), gl_pad2) != 0);
-}
-
-void lustre_swab_ost_lvb_v1(struct ost_lvb_v1 *lvb)
-{
- __swab64s(&lvb->lvb_size);
- __swab64s(&lvb->lvb_mtime);
- __swab64s(&lvb->lvb_atime);
- __swab64s(&lvb->lvb_ctime);
- __swab64s(&lvb->lvb_blocks);
-}
-EXPORT_SYMBOL(lustre_swab_ost_lvb_v1);
-
-void lustre_swab_ost_lvb(struct ost_lvb *lvb)
-{
- __swab64s(&lvb->lvb_size);
- __swab64s(&lvb->lvb_mtime);
- __swab64s(&lvb->lvb_atime);
- __swab64s(&lvb->lvb_ctime);
- __swab64s(&lvb->lvb_blocks);
- __swab32s(&lvb->lvb_mtime_ns);
- __swab32s(&lvb->lvb_atime_ns);
- __swab32s(&lvb->lvb_ctime_ns);
- __swab32s(&lvb->lvb_padding);
-}
-EXPORT_SYMBOL(lustre_swab_ost_lvb);
-
-void lustre_swab_lquota_lvb(struct lquota_lvb *lvb)
-{
- __swab64s(&lvb->lvb_flags);
- __swab64s(&lvb->lvb_id_may_rel);
- __swab64s(&lvb->lvb_id_rel);
- __swab64s(&lvb->lvb_id_qunit);
- __swab64s(&lvb->lvb_pad1);
-}
-EXPORT_SYMBOL(lustre_swab_lquota_lvb);
-
-void lustre_swab_mdt_body(struct mdt_body *b)
-{
- lustre_swab_lu_fid(&b->fid1);
- lustre_swab_lu_fid(&b->fid2);
- /* handle is opaque */
- __swab64s(&b->valid);
- __swab64s(&b->size);
- __swab64s(&b->mtime);
- __swab64s(&b->atime);
- __swab64s(&b->ctime);
- __swab64s(&b->blocks);
- __swab64s(&b->ioepoch);
- __swab64s(&b->t_state);
- __swab32s(&b->fsuid);
- __swab32s(&b->fsgid);
- __swab32s(&b->capability);
- __swab32s(&b->mode);
- __swab32s(&b->uid);
- __swab32s(&b->gid);
- __swab32s(&b->flags);
- __swab32s(&b->rdev);
- __swab32s(&b->nlink);
- CLASSERT(offsetof(typeof(*b), unused2) != 0);
- __swab32s(&b->suppgid);
- __swab32s(&b->eadatasize);
- __swab32s(&b->aclsize);
- __swab32s(&b->max_mdsize);
- __swab32s(&b->max_cookiesize);
- __swab32s(&b->uid_h);
- __swab32s(&b->gid_h);
- CLASSERT(offsetof(typeof(*b), padding_5) != 0);
-}
-EXPORT_SYMBOL(lustre_swab_mdt_body);
-
-void lustre_swab_mdt_ioepoch(struct mdt_ioepoch *b)
-{
- /* handle is opaque */
- __swab64s(&b->ioepoch);
- __swab32s(&b->flags);
- CLASSERT(offsetof(typeof(*b), padding) != 0);
-}
-EXPORT_SYMBOL(lustre_swab_mdt_ioepoch);
-
-void lustre_swab_mgs_target_info(struct mgs_target_info *mti)
-{
- int i;
- __swab32s(&mti->mti_lustre_ver);
- __swab32s(&mti->mti_stripe_index);
- __swab32s(&mti->mti_config_ver);
- __swab32s(&mti->mti_flags);
- __swab32s(&mti->mti_instance);
- __swab32s(&mti->mti_nid_count);
- CLASSERT(sizeof(lnet_nid_t) == sizeof(__u64));
- for (i = 0; i < MTI_NIDS_MAX; i++)
- __swab64s(&mti->mti_nids[i]);
-}
-EXPORT_SYMBOL(lustre_swab_mgs_target_info);
-
-void lustre_swab_mgs_nidtbl_entry(struct mgs_nidtbl_entry *entry)
-{
- int i;
-
- __swab64s(&entry->mne_version);
- __swab32s(&entry->mne_instance);
- __swab32s(&entry->mne_index);
- __swab32s(&entry->mne_length);
-
- /* mne_nid_(count|type) must be one byte size because we're gonna
- * access it w/o swapping. */
- CLASSERT(sizeof(entry->mne_nid_count) == sizeof(__u8));
- CLASSERT(sizeof(entry->mne_nid_type) == sizeof(__u8));
-
- /* remove this assertion if ipv6 is supported. */
- LASSERT(entry->mne_nid_type == 0);
- for (i = 0; i < entry->mne_nid_count; i++) {
- CLASSERT(sizeof(lnet_nid_t) == sizeof(__u64));
- __swab64s(&entry->u.nids[i]);
- }
-}
-EXPORT_SYMBOL(lustre_swab_mgs_nidtbl_entry);
-
-void lustre_swab_mgs_config_body(struct mgs_config_body *body)
-{
- __swab64s(&body->mcb_offset);
- __swab32s(&body->mcb_units);
- __swab16s(&body->mcb_type);
-}
-EXPORT_SYMBOL(lustre_swab_mgs_config_body);
-
-void lustre_swab_mgs_config_res(struct mgs_config_res *body)
-{
- __swab64s(&body->mcr_offset);
- __swab64s(&body->mcr_size);
-}
-EXPORT_SYMBOL(lustre_swab_mgs_config_res);
-
-static void lustre_swab_obd_dqinfo(struct obd_dqinfo *i)
-{
- __swab64s(&i->dqi_bgrace);
- __swab64s(&i->dqi_igrace);
- __swab32s(&i->dqi_flags);
- __swab32s(&i->dqi_valid);
-}
-
-static void lustre_swab_obd_dqblk(struct obd_dqblk *b)
-{
- __swab64s(&b->dqb_ihardlimit);
- __swab64s(&b->dqb_isoftlimit);
- __swab64s(&b->dqb_curinodes);
- __swab64s(&b->dqb_bhardlimit);
- __swab64s(&b->dqb_bsoftlimit);
- __swab64s(&b->dqb_curspace);
- __swab64s(&b->dqb_btime);
- __swab64s(&b->dqb_itime);
- __swab32s(&b->dqb_valid);
- CLASSERT(offsetof(typeof(*b), dqb_padding) != 0);
-}
-
-void lustre_swab_obd_quotactl(struct obd_quotactl *q)
-{
- __swab32s(&q->qc_cmd);
- __swab32s(&q->qc_type);
- __swab32s(&q->qc_id);
- __swab32s(&q->qc_stat);
- lustre_swab_obd_dqinfo(&q->qc_dqinfo);
- lustre_swab_obd_dqblk(&q->qc_dqblk);
-}
-EXPORT_SYMBOL(lustre_swab_obd_quotactl);
-
-void lustre_swab_mdt_remote_perm(struct mdt_remote_perm *p)
-{
- __swab32s(&p->rp_uid);
- __swab32s(&p->rp_gid);
- __swab32s(&p->rp_fsuid);
- __swab32s(&p->rp_fsuid_h);
- __swab32s(&p->rp_fsgid);
- __swab32s(&p->rp_fsgid_h);
- __swab32s(&p->rp_access_perm);
- __swab32s(&p->rp_padding);
-};
-EXPORT_SYMBOL(lustre_swab_mdt_remote_perm);
-
-void lustre_swab_fid2path(struct getinfo_fid2path *gf)
-{
- lustre_swab_lu_fid(&gf->gf_fid);
- __swab64s(&gf->gf_recno);
- __swab32s(&gf->gf_linkno);
- __swab32s(&gf->gf_pathlen);
-}
-EXPORT_SYMBOL(lustre_swab_fid2path);
-
-static void lustre_swab_fiemap_extent(struct ll_fiemap_extent *fm_extent)
-{
- __swab64s(&fm_extent->fe_logical);
- __swab64s(&fm_extent->fe_physical);
- __swab64s(&fm_extent->fe_length);
- __swab32s(&fm_extent->fe_flags);
- __swab32s(&fm_extent->fe_device);
-}
-
-void lustre_swab_fiemap(struct ll_user_fiemap *fiemap)
-{
- int i;
-
- __swab64s(&fiemap->fm_start);
- __swab64s(&fiemap->fm_length);
- __swab32s(&fiemap->fm_flags);
- __swab32s(&fiemap->fm_mapped_extents);
- __swab32s(&fiemap->fm_extent_count);
- __swab32s(&fiemap->fm_reserved);
-
- for (i = 0; i < fiemap->fm_mapped_extents; i++)
- lustre_swab_fiemap_extent(&fiemap->fm_extents[i]);
-}
-EXPORT_SYMBOL(lustre_swab_fiemap);
-
-void lustre_swab_idx_info(struct idx_info *ii)
-{
- __swab32s(&ii->ii_magic);
- __swab32s(&ii->ii_flags);
- __swab16s(&ii->ii_count);
- __swab32s(&ii->ii_attrs);
- lustre_swab_lu_fid(&ii->ii_fid);
- __swab64s(&ii->ii_version);
- __swab64s(&ii->ii_hash_start);
- __swab64s(&ii->ii_hash_end);
- __swab16s(&ii->ii_keysize);
- __swab16s(&ii->ii_recsize);
-}
-
-void lustre_swab_mdt_rec_reint (struct mdt_rec_reint *rr)
-{
- __swab32s(&rr->rr_opcode);
- __swab32s(&rr->rr_cap);
- __swab32s(&rr->rr_fsuid);
- /* rr_fsuid_h is unused */
- __swab32s(&rr->rr_fsgid);
- /* rr_fsgid_h is unused */
- __swab32s(&rr->rr_suppgid1);
- /* rr_suppgid1_h is unused */
- __swab32s(&rr->rr_suppgid2);
- /* rr_suppgid2_h is unused */
- lustre_swab_lu_fid(&rr->rr_fid1);
- lustre_swab_lu_fid(&rr->rr_fid2);
- __swab64s(&rr->rr_mtime);
- __swab64s(&rr->rr_atime);
- __swab64s(&rr->rr_ctime);
- __swab64s(&rr->rr_size);
- __swab64s(&rr->rr_blocks);
- __swab32s(&rr->rr_bias);
- __swab32s(&rr->rr_mode);
- __swab32s(&rr->rr_flags);
- __swab32s(&rr->rr_flags_h);
- __swab32s(&rr->rr_umask);
-
- CLASSERT(offsetof(typeof(*rr), rr_padding_4) != 0);
-};
-EXPORT_SYMBOL(lustre_swab_mdt_rec_reint);
-
-void lustre_swab_lov_desc(struct lov_desc *ld)
-{
- __swab32s(&ld->ld_tgt_count);
- __swab32s(&ld->ld_active_tgt_count);
- __swab32s(&ld->ld_default_stripe_count);
- __swab32s(&ld->ld_pattern);
- __swab64s(&ld->ld_default_stripe_size);
- __swab64s(&ld->ld_default_stripe_offset);
- __swab32s(&ld->ld_qos_maxage);
- /* uuid endian insensitive */
-}
-EXPORT_SYMBOL(lustre_swab_lov_desc);
-
-static void print_lum(struct lov_user_md *lum)
-{
- CDEBUG(D_OTHER, "lov_user_md %p:\n", lum);
- CDEBUG(D_OTHER, "\tlmm_magic: %#x\n", lum->lmm_magic);
- CDEBUG(D_OTHER, "\tlmm_pattern: %#x\n", lum->lmm_pattern);
- CDEBUG(D_OTHER, "\tlmm_object_id: %llu\n", lmm_oi_id(&lum->lmm_oi));
- CDEBUG(D_OTHER, "\tlmm_object_gr: %llu\n", lmm_oi_seq(&lum->lmm_oi));
- CDEBUG(D_OTHER, "\tlmm_stripe_size: %#x\n", lum->lmm_stripe_size);
- CDEBUG(D_OTHER, "\tlmm_stripe_count: %#x\n", lum->lmm_stripe_count);
- CDEBUG(D_OTHER, "\tlmm_stripe_offset/lmm_layout_gen: %#x\n",
- lum->lmm_stripe_offset);
-}
-
-static void lustre_swab_lmm_oi(struct ost_id *oi)
-{
- __swab64s(&oi->oi.oi_id);
- __swab64s(&oi->oi.oi_seq);
-}
-
-static void lustre_swab_lov_user_md_common(struct lov_user_md_v1 *lum)
-{
- __swab32s(&lum->lmm_magic);
- __swab32s(&lum->lmm_pattern);
- lustre_swab_lmm_oi(&lum->lmm_oi);
- __swab32s(&lum->lmm_stripe_size);
- __swab16s(&lum->lmm_stripe_count);
- __swab16s(&lum->lmm_stripe_offset);
- print_lum(lum);
-}
-
-void lustre_swab_lov_user_md_v1(struct lov_user_md_v1 *lum)
-{
- CDEBUG(D_IOCTL, "swabbing lov_user_md v1\n");
- lustre_swab_lov_user_md_common(lum);
-}
-EXPORT_SYMBOL(lustre_swab_lov_user_md_v1);
-
-void lustre_swab_lov_user_md_v3(struct lov_user_md_v3 *lum)
-{
- CDEBUG(D_IOCTL, "swabbing lov_user_md v3\n");
- lustre_swab_lov_user_md_common((struct lov_user_md_v1 *)lum);
- /* lmm_pool_name nothing to do with char */
-}
-EXPORT_SYMBOL(lustre_swab_lov_user_md_v3);
-
-void lustre_swab_lov_mds_md(struct lov_mds_md *lmm)
-{
- CDEBUG(D_IOCTL, "swabbing lov_mds_md\n");
- __swab32s(&lmm->lmm_magic);
- __swab32s(&lmm->lmm_pattern);
- lustre_swab_lmm_oi(&lmm->lmm_oi);
- __swab32s(&lmm->lmm_stripe_size);
- __swab16s(&lmm->lmm_stripe_count);
- __swab16s(&lmm->lmm_layout_gen);
-}
-EXPORT_SYMBOL(lustre_swab_lov_mds_md);
-
-void lustre_swab_lov_user_md_objects(struct lov_user_ost_data *lod,
- int stripe_count)
-{
- int i;
-
- for (i = 0; i < stripe_count; i++) {
- lustre_swab_ost_id(&(lod[i].l_ost_oi));
- __swab32s(&(lod[i].l_ost_gen));
- __swab32s(&(lod[i].l_ost_idx));
- }
-}
-EXPORT_SYMBOL(lustre_swab_lov_user_md_objects);
-
-void lustre_swab_ldlm_res_id(struct ldlm_res_id *id)
-{
- int i;
-
- for (i = 0; i < RES_NAME_SIZE; i++)
- __swab64s(&id->name[i]);
-}
-EXPORT_SYMBOL(lustre_swab_ldlm_res_id);
-
-void lustre_swab_ldlm_policy_data(ldlm_wire_policy_data_t *d)
-{
- /* the lock data is a union and the first two fields are always an
- * extent so it's ok to process an LDLM_EXTENT and LDLM_FLOCK lock
- * data the same way. */
- __swab64s(&d->l_extent.start);
- __swab64s(&d->l_extent.end);
- __swab64s(&d->l_extent.gid);
- __swab64s(&d->l_flock.lfw_owner);
- __swab32s(&d->l_flock.lfw_pid);
-}
-EXPORT_SYMBOL(lustre_swab_ldlm_policy_data);
-
-void lustre_swab_ldlm_intent(struct ldlm_intent *i)
-{
- __swab64s(&i->opc);
-}
-EXPORT_SYMBOL(lustre_swab_ldlm_intent);
-
-void lustre_swab_ldlm_resource_desc(struct ldlm_resource_desc *r)
-{
- __swab32s(&r->lr_type);
- CLASSERT(offsetof(typeof(*r), lr_padding) != 0);
- lustre_swab_ldlm_res_id(&r->lr_name);
-}
-EXPORT_SYMBOL(lustre_swab_ldlm_resource_desc);
-
-void lustre_swab_ldlm_lock_desc(struct ldlm_lock_desc *l)
-{
- lustre_swab_ldlm_resource_desc(&l->l_resource);
- __swab32s(&l->l_req_mode);
- __swab32s(&l->l_granted_mode);
- lustre_swab_ldlm_policy_data(&l->l_policy_data);
-}
-EXPORT_SYMBOL(lustre_swab_ldlm_lock_desc);
-
-void lustre_swab_ldlm_request(struct ldlm_request *rq)
-{
- __swab32s(&rq->lock_flags);
- lustre_swab_ldlm_lock_desc(&rq->lock_desc);
- __swab32s(&rq->lock_count);
- /* lock_handle[] opaque */
-}
-EXPORT_SYMBOL(lustre_swab_ldlm_request);
-
-void lustre_swab_ldlm_reply(struct ldlm_reply *r)
-{
- __swab32s(&r->lock_flags);
- CLASSERT(offsetof(typeof(*r), lock_padding) != 0);
- lustre_swab_ldlm_lock_desc(&r->lock_desc);
- /* lock_handle opaque */
- __swab64s(&r->lock_policy_res1);
- __swab64s(&r->lock_policy_res2);
-}
-EXPORT_SYMBOL(lustre_swab_ldlm_reply);
-
-void lustre_swab_quota_body(struct quota_body *b)
-{
- lustre_swab_lu_fid(&b->qb_fid);
- lustre_swab_lu_fid((struct lu_fid *)&b->qb_id);
- __swab32s(&b->qb_flags);
- __swab64s(&b->qb_count);
- __swab64s(&b->qb_usage);
- __swab64s(&b->qb_slv_ver);
-}
-
-/* Dump functions */
-void dump_ioo(struct obd_ioobj *ioo)
-{
- CDEBUG(D_RPCTRACE,
- "obd_ioobj: ioo_oid=" DOSTID ", ioo_max_brw=%#x, ioo_bufct=%d\n",
- POSTID(&ioo->ioo_oid), ioo->ioo_max_brw,
- ioo->ioo_bufcnt);
-}
-EXPORT_SYMBOL(dump_ioo);
-
-void dump_rniobuf(struct niobuf_remote *nb)
-{
- CDEBUG(D_RPCTRACE, "niobuf_remote: offset=%llu, len=%d, flags=%x\n",
- nb->offset, nb->len, nb->flags);
-}
-EXPORT_SYMBOL(dump_rniobuf);
-
-void dump_obdo(struct obdo *oa)
-{
- __u32 valid = oa->o_valid;
-
- CDEBUG(D_RPCTRACE, "obdo: o_valid = %08x\n", valid);
- if (valid & OBD_MD_FLID)
- CDEBUG(D_RPCTRACE, "obdo: id = "DOSTID"\n", POSTID(&oa->o_oi));
- if (valid & OBD_MD_FLFID)
- CDEBUG(D_RPCTRACE, "obdo: o_parent_seq = %#llx\n",
- oa->o_parent_seq);
- if (valid & OBD_MD_FLSIZE)
- CDEBUG(D_RPCTRACE, "obdo: o_size = %lld\n", oa->o_size);
- if (valid & OBD_MD_FLMTIME)
- CDEBUG(D_RPCTRACE, "obdo: o_mtime = %lld\n", oa->o_mtime);
- if (valid & OBD_MD_FLATIME)
- CDEBUG(D_RPCTRACE, "obdo: o_atime = %lld\n", oa->o_atime);
- if (valid & OBD_MD_FLCTIME)
- CDEBUG(D_RPCTRACE, "obdo: o_ctime = %lld\n", oa->o_ctime);
- if (valid & OBD_MD_FLBLOCKS) /* allocation of space */
- CDEBUG(D_RPCTRACE, "obdo: o_blocks = %lld\n", oa->o_blocks);
- if (valid & OBD_MD_FLGRANT)
- CDEBUG(D_RPCTRACE, "obdo: o_grant = %lld\n", oa->o_grant);
- if (valid & OBD_MD_FLBLKSZ)
- CDEBUG(D_RPCTRACE, "obdo: o_blksize = %d\n", oa->o_blksize);
- if (valid & (OBD_MD_FLTYPE | OBD_MD_FLMODE))
- CDEBUG(D_RPCTRACE, "obdo: o_mode = %o\n",
- oa->o_mode & ((valid & OBD_MD_FLTYPE ? S_IFMT : 0) |
- (valid & OBD_MD_FLMODE ? ~S_IFMT : 0)));
- if (valid & OBD_MD_FLUID)
- CDEBUG(D_RPCTRACE, "obdo: o_uid = %u\n", oa->o_uid);
- if (valid & OBD_MD_FLUID)
- CDEBUG(D_RPCTRACE, "obdo: o_uid_h = %u\n", oa->o_uid_h);
- if (valid & OBD_MD_FLGID)
- CDEBUG(D_RPCTRACE, "obdo: o_gid = %u\n", oa->o_gid);
- if (valid & OBD_MD_FLGID)
- CDEBUG(D_RPCTRACE, "obdo: o_gid_h = %u\n", oa->o_gid_h);
- if (valid & OBD_MD_FLFLAGS)
- CDEBUG(D_RPCTRACE, "obdo: o_flags = %x\n", oa->o_flags);
- if (valid & OBD_MD_FLNLINK)
- CDEBUG(D_RPCTRACE, "obdo: o_nlink = %u\n", oa->o_nlink);
- else if (valid & OBD_MD_FLCKSUM)
- CDEBUG(D_RPCTRACE, "obdo: o_checksum (o_nlink) = %u\n",
- oa->o_nlink);
- if (valid & OBD_MD_FLGENER)
- CDEBUG(D_RPCTRACE, "obdo: o_parent_oid = %x\n",
- oa->o_parent_oid);
- if (valid & OBD_MD_FLEPOCH)
- CDEBUG(D_RPCTRACE, "obdo: o_ioepoch = %lld\n",
- oa->o_ioepoch);
- if (valid & OBD_MD_FLFID) {
- CDEBUG(D_RPCTRACE, "obdo: o_stripe_idx = %u\n",
- oa->o_stripe_idx);
- CDEBUG(D_RPCTRACE, "obdo: o_parent_ver = %x\n",
- oa->o_parent_ver);
- }
- if (valid & OBD_MD_FLHANDLE)
- CDEBUG(D_RPCTRACE, "obdo: o_handle = %lld\n",
- oa->o_handle.cookie);
- if (valid & OBD_MD_FLCOOKIE)
- CDEBUG(D_RPCTRACE, "obdo: o_lcookie = (llog_cookie dumping not yet implemented)\n");
-}
-
-void dump_ost_body(struct ost_body *ob)
-{
- dump_obdo(&ob->oa);
-}
-EXPORT_SYMBOL(dump_ost_body);
-
-void dump_rcs(__u32 *rc)
-{
- CDEBUG(D_RPCTRACE, "rmf_rcs: %d\n", *rc);
-}
-EXPORT_SYMBOL(dump_rcs);
-
-static inline int req_ptlrpc_body_swabbed(struct ptlrpc_request *req)
-{
- LASSERT(req->rq_reqmsg);
-
- switch (req->rq_reqmsg->lm_magic) {
- case LUSTRE_MSG_MAGIC_V2:
- return lustre_req_swabbed(req, MSG_PTLRPC_BODY_OFF);
- default:
- CERROR("bad lustre msg magic: %#08X\n",
- req->rq_reqmsg->lm_magic);
- }
- return 0;
-}
-
-static inline int rep_ptlrpc_body_swabbed(struct ptlrpc_request *req)
-{
- LASSERT(req->rq_repmsg);
-
- switch (req->rq_repmsg->lm_magic) {
- case LUSTRE_MSG_MAGIC_V2:
- return lustre_rep_swabbed(req, MSG_PTLRPC_BODY_OFF);
- default:
- /* uninitialized yet */
- return 0;
- }
-}
-
-void _debug_req(struct ptlrpc_request *req,
- struct libcfs_debug_msg_data *msgdata,
- const char *fmt, ...)
-{
- int req_ok = req->rq_reqmsg != NULL;
- int rep_ok = req->rq_repmsg != NULL;
- lnet_nid_t nid = LNET_NID_ANY;
- va_list args;
-
- if (ptlrpc_req_need_swab(req)) {
- req_ok = req_ok && req_ptlrpc_body_swabbed(req);
- rep_ok = rep_ok && rep_ptlrpc_body_swabbed(req);
- }
-
- if (req->rq_import && req->rq_import->imp_connection)
- nid = req->rq_import->imp_connection->c_peer.nid;
- else if (req->rq_export && req->rq_export->exp_connection)
- nid = req->rq_export->exp_connection->c_peer.nid;
-
- va_start(args, fmt);
- libcfs_debug_vmsg2(msgdata, fmt, args,
- " req@%p x%llu/t%lld(%lld) o%d->%s@%s:%d/%d lens %d/%d e %d to %lld dl %lld ref %d fl " REQ_FLAGS_FMT "/%x/%x rc %d/%d\n",
- req, req->rq_xid, req->rq_transno,
- req_ok ? lustre_msg_get_transno(req->rq_reqmsg) : 0,
- req_ok ? lustre_msg_get_opc(req->rq_reqmsg) : -1,
- req->rq_import ?
- req->rq_import->imp_obd->obd_name :
- req->rq_export ?
- req->rq_export->exp_client_uuid.uuid :
- "<?>",
- libcfs_nid2str(nid),
- req->rq_request_portal, req->rq_reply_portal,
- req->rq_reqlen, req->rq_replen,
- req->rq_early_count, (s64)req->rq_timedout,
- (s64)req->rq_deadline,
- atomic_read(&req->rq_refcount),
- DEBUG_REQ_FLAGS(req),
- req_ok ? lustre_msg_get_flags(req->rq_reqmsg) : -1,
- rep_ok ? lustre_msg_get_flags(req->rq_repmsg) : -1,
- req->rq_status,
- rep_ok ? lustre_msg_get_status(req->rq_repmsg) : -1);
- va_end(args);
-}
-EXPORT_SYMBOL(_debug_req);
-
-void lustre_swab_lustre_capa(struct lustre_capa *c)
-{
- lustre_swab_lu_fid(&c->lc_fid);
- __swab64s(&c->lc_opc);
- __swab64s(&c->lc_uid);
- __swab64s(&c->lc_gid);
- __swab32s(&c->lc_flags);
- __swab32s(&c->lc_keyid);
- __swab32s(&c->lc_timeout);
- __swab32s(&c->lc_expiry);
-}
-EXPORT_SYMBOL(lustre_swab_lustre_capa);
-
-void lustre_swab_hsm_user_state(struct hsm_user_state *state)
-{
- __swab32s(&state->hus_states);
- __swab32s(&state->hus_archive_id);
-}
-EXPORT_SYMBOL(lustre_swab_hsm_user_state);
-
-void lustre_swab_hsm_state_set(struct hsm_state_set *hss)
-{
- __swab32s(&hss->hss_valid);
- __swab64s(&hss->hss_setmask);
- __swab64s(&hss->hss_clearmask);
- __swab32s(&hss->hss_archive_id);
-}
-EXPORT_SYMBOL(lustre_swab_hsm_state_set);
-
-static void lustre_swab_hsm_extent(struct hsm_extent *extent)
-{
- __swab64s(&extent->offset);
- __swab64s(&extent->length);
-}
-
-void lustre_swab_hsm_current_action(struct hsm_current_action *action)
-{
- __swab32s(&action->hca_state);
- __swab32s(&action->hca_action);
- lustre_swab_hsm_extent(&action->hca_location);
-}
-EXPORT_SYMBOL(lustre_swab_hsm_current_action);
-
-void lustre_swab_hsm_user_item(struct hsm_user_item *hui)
-{
- lustre_swab_lu_fid(&hui->hui_fid);
- lustre_swab_hsm_extent(&hui->hui_extent);
-}
-EXPORT_SYMBOL(lustre_swab_hsm_user_item);
-
-void lustre_swab_layout_intent(struct layout_intent *li)
-{
- __swab32s(&li->li_opc);
- __swab32s(&li->li_flags);
- __swab64s(&li->li_start);
- __swab64s(&li->li_end);
-}
-EXPORT_SYMBOL(lustre_swab_layout_intent);
-
-void lustre_swab_hsm_progress_kernel(struct hsm_progress_kernel *hpk)
-{
- lustre_swab_lu_fid(&hpk->hpk_fid);
- __swab64s(&hpk->hpk_cookie);
- __swab64s(&hpk->hpk_extent.offset);
- __swab64s(&hpk->hpk_extent.length);
- __swab16s(&hpk->hpk_flags);
- __swab16s(&hpk->hpk_errval);
-}
-EXPORT_SYMBOL(lustre_swab_hsm_progress_kernel);
-
-void lustre_swab_hsm_request(struct hsm_request *hr)
-{
- __swab32s(&hr->hr_action);
- __swab32s(&hr->hr_archive_id);
- __swab64s(&hr->hr_flags);
- __swab32s(&hr->hr_itemcount);
- __swab32s(&hr->hr_data_len);
-}
-EXPORT_SYMBOL(lustre_swab_hsm_request);
-
-void lustre_swab_update_buf(struct update_buf *ub)
-{
- __swab32s(&ub->ub_magic);
- __swab32s(&ub->ub_count);
-}
-EXPORT_SYMBOL(lustre_swab_update_buf);
-
-void lustre_swab_update_reply_buf(struct update_reply *ur)
-{
- int i;
-
- __swab32s(&ur->ur_version);
- __swab32s(&ur->ur_count);
- for (i = 0; i < ur->ur_count; i++)
- __swab32s(&ur->ur_lens[i]);
-}
-EXPORT_SYMBOL(lustre_swab_update_reply_buf);
-
-void lustre_swab_swap_layouts(struct mdc_swap_layouts *msl)
-{
- __swab64s(&msl->msl_flags);
-}
-EXPORT_SYMBOL(lustre_swab_swap_layouts);
-
-void lustre_swab_close_data(struct close_data *cd)
-{
- lustre_swab_lu_fid(&cd->cd_fid);
- __swab64s(&cd->cd_data_version);
-}
-EXPORT_SYMBOL(lustre_swab_close_data);
diff --git a/drivers/staging/lustre/lustre/ptlrpc/pers.c b/drivers/staging/lustre/lustre/ptlrpc/pers.c
deleted file mode 100644
index e1334c24ebe3..000000000000
--- a/drivers/staging/lustre/lustre/ptlrpc/pers.c
+++ /dev/null
@@ -1,75 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- */
-
-#define DEBUG_SUBSYSTEM S_RPC
-
-#include "../include/obd_support.h"
-#include "../include/obd_class.h"
-#include "../include/lustre_lib.h"
-#include "../include/lustre_ha.h"
-#include "../include/lustre_import.h"
-
-#include "ptlrpc_internal.h"
-
-
-void ptlrpc_fill_bulk_md(lnet_md_t *md, struct ptlrpc_bulk_desc *desc,
- int mdidx)
-{
- CLASSERT(PTLRPC_MAX_BRW_PAGES < LI_POISON);
-
- LASSERT(mdidx < desc->bd_md_max_brw);
- LASSERT(desc->bd_iov_count <= PTLRPC_MAX_BRW_PAGES);
- LASSERT(!(md->options & (LNET_MD_IOVEC | LNET_MD_KIOV |
- LNET_MD_PHYS)));
-
- md->options |= LNET_MD_KIOV;
- md->length = max(0, desc->bd_iov_count - mdidx * LNET_MAX_IOV);
- md->length = min_t(unsigned int, LNET_MAX_IOV, md->length);
- if (desc->bd_enc_iov)
- md->start = &desc->bd_enc_iov[mdidx * LNET_MAX_IOV];
- else
- md->start = &desc->bd_iov[mdidx * LNET_MAX_IOV];
-}
-
-void ptlrpc_add_bulk_page(struct ptlrpc_bulk_desc *desc, struct page *page,
- int pageoffset, int len)
-{
- lnet_kiov_t *kiov = &desc->bd_iov[desc->bd_iov_count];
-
- kiov->kiov_page = page;
- kiov->kiov_offset = pageoffset;
- kiov->kiov_len = len;
-
- desc->bd_iov_count++;
-}
diff --git a/drivers/staging/lustre/lustre/ptlrpc/pinger.c b/drivers/staging/lustre/lustre/ptlrpc/pinger.c
deleted file mode 100644
index 74fab7e21926..000000000000
--- a/drivers/staging/lustre/lustre/ptlrpc/pinger.c
+++ /dev/null
@@ -1,518 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * lustre/ptlrpc/pinger.c
- *
- * Portal-RPC reconnection and replay operations, for use in recovery.
- */
-
-#define DEBUG_SUBSYSTEM S_RPC
-
-#include "../include/obd_support.h"
-#include "../include/obd_class.h"
-#include "ptlrpc_internal.h"
-
-struct mutex pinger_mutex;
-static LIST_HEAD(pinger_imports);
-static struct list_head timeout_list = LIST_HEAD_INIT(timeout_list);
-
-struct ptlrpc_request *
-ptlrpc_prep_ping(struct obd_import *imp)
-{
- struct ptlrpc_request *req;
-
- req = ptlrpc_request_alloc_pack(imp, &RQF_OBD_PING,
- LUSTRE_OBD_VERSION, OBD_PING);
- if (req) {
- ptlrpc_request_set_replen(req);
- req->rq_no_resend = req->rq_no_delay = 1;
- }
- return req;
-}
-
-int ptlrpc_obd_ping(struct obd_device *obd)
-{
- int rc;
- struct ptlrpc_request *req;
-
- req = ptlrpc_prep_ping(obd->u.cli.cl_import);
- if (req == NULL)
- return -ENOMEM;
-
- req->rq_send_state = LUSTRE_IMP_FULL;
-
- rc = ptlrpc_queue_wait(req);
-
- ptlrpc_req_finished(req);
-
- return rc;
-}
-EXPORT_SYMBOL(ptlrpc_obd_ping);
-
-static int ptlrpc_ping(struct obd_import *imp)
-{
- struct ptlrpc_request *req;
-
- req = ptlrpc_prep_ping(imp);
- if (req == NULL) {
- CERROR("OOM trying to ping %s->%s\n",
- imp->imp_obd->obd_uuid.uuid,
- obd2cli_tgt(imp->imp_obd));
- return -ENOMEM;
- }
-
- DEBUG_REQ(D_INFO, req, "pinging %s->%s",
- imp->imp_obd->obd_uuid.uuid, obd2cli_tgt(imp->imp_obd));
- ptlrpcd_add_req(req);
-
- return 0;
-}
-
-static void ptlrpc_update_next_ping(struct obd_import *imp, int soon)
-{
- int time = soon ? PING_INTERVAL_SHORT : PING_INTERVAL;
- if (imp->imp_state == LUSTRE_IMP_DISCON) {
- int dtime = max_t(int, CONNECTION_SWITCH_MIN,
- AT_OFF ? 0 :
- at_get(&imp->imp_at.iat_net_latency));
- time = min(time, dtime);
- }
- imp->imp_next_ping = cfs_time_shift(time);
-}
-
-static inline int imp_is_deactive(struct obd_import *imp)
-{
- return (imp->imp_deactive ||
- OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_IMP_DEACTIVE));
-}
-
-static inline int ptlrpc_next_reconnect(struct obd_import *imp)
-{
- if (imp->imp_server_timeout)
- return cfs_time_shift(obd_timeout / 2);
- else
- return cfs_time_shift(obd_timeout);
-}
-
-static long pinger_check_timeout(unsigned long time)
-{
- struct timeout_item *item;
- unsigned long timeout = PING_INTERVAL;
-
- /* The timeout list is a increase order sorted list */
- mutex_lock(&pinger_mutex);
- list_for_each_entry(item, &timeout_list, ti_chain) {
- int ti_timeout = item->ti_timeout;
- if (timeout > ti_timeout)
- timeout = ti_timeout;
- break;
- }
- mutex_unlock(&pinger_mutex);
-
- return cfs_time_sub(cfs_time_add(time, cfs_time_seconds(timeout)),
- cfs_time_current());
-}
-
-static bool ir_up;
-
-void ptlrpc_pinger_ir_up(void)
-{
- CDEBUG(D_HA, "IR up\n");
- ir_up = true;
-}
-EXPORT_SYMBOL(ptlrpc_pinger_ir_up);
-
-void ptlrpc_pinger_ir_down(void)
-{
- CDEBUG(D_HA, "IR down\n");
- ir_up = false;
-}
-EXPORT_SYMBOL(ptlrpc_pinger_ir_down);
-
-static void ptlrpc_pinger_process_import(struct obd_import *imp,
- unsigned long this_ping)
-{
- int level;
- int force;
- int force_next;
- int suppress;
-
- spin_lock(&imp->imp_lock);
-
- level = imp->imp_state;
- force = imp->imp_force_verify;
- force_next = imp->imp_force_next_verify;
- /*
- * This will be used below only if the import is "FULL".
- */
- suppress = ir_up && OCD_HAS_FLAG(&imp->imp_connect_data, PINGLESS);
-
- imp->imp_force_verify = 0;
-
- if (cfs_time_aftereq(imp->imp_next_ping - 5 * CFS_TICK, this_ping) &&
- !force) {
- spin_unlock(&imp->imp_lock);
- return;
- }
-
- imp->imp_force_next_verify = 0;
-
- spin_unlock(&imp->imp_lock);
-
- CDEBUG(level == LUSTRE_IMP_FULL ? D_INFO : D_HA, "%s->%s: level %s/%u force %u force_next %u deactive %u pingable %u suppress %u\n",
- imp->imp_obd->obd_uuid.uuid, obd2cli_tgt(imp->imp_obd),
- ptlrpc_import_state_name(level), level, force, force_next,
- imp->imp_deactive, imp->imp_pingable, suppress);
-
- if (level == LUSTRE_IMP_DISCON && !imp_is_deactive(imp)) {
- /* wait for a while before trying recovery again */
- imp->imp_next_ping = ptlrpc_next_reconnect(imp);
- if (!imp->imp_no_pinger_recover)
- ptlrpc_initiate_recovery(imp);
- } else if (level != LUSTRE_IMP_FULL ||
- imp->imp_obd->obd_no_recov ||
- imp_is_deactive(imp)) {
- CDEBUG(D_HA, "%s->%s: not pinging (in recovery or recovery disabled: %s)\n",
- imp->imp_obd->obd_uuid.uuid, obd2cli_tgt(imp->imp_obd),
- ptlrpc_import_state_name(level));
- if (force) {
- spin_lock(&imp->imp_lock);
- imp->imp_force_verify = 1;
- spin_unlock(&imp->imp_lock);
- }
- } else if ((imp->imp_pingable && !suppress) || force_next || force) {
- ptlrpc_ping(imp);
- }
-}
-
-static int ptlrpc_pinger_main(void *arg)
-{
- struct ptlrpc_thread *thread = (struct ptlrpc_thread *)arg;
-
- /* Record that the thread is running */
- thread_set_flags(thread, SVC_RUNNING);
- wake_up(&thread->t_ctl_waitq);
-
- /* And now, loop forever, pinging as needed. */
- while (1) {
- unsigned long this_ping = cfs_time_current();
- struct l_wait_info lwi;
- long time_to_next_wake;
- struct timeout_item *item;
- struct list_head *iter;
-
- mutex_lock(&pinger_mutex);
- list_for_each_entry(item, &timeout_list, ti_chain) {
- item->ti_cb(item, item->ti_cb_data);
- }
- list_for_each(iter, &pinger_imports) {
- struct obd_import *imp =
- list_entry(iter, struct obd_import,
- imp_pinger_chain);
-
- ptlrpc_pinger_process_import(imp, this_ping);
- /* obd_timeout might have changed */
- if (imp->imp_pingable && imp->imp_next_ping &&
- cfs_time_after(imp->imp_next_ping,
- cfs_time_add(this_ping,
- cfs_time_seconds(PING_INTERVAL))))
- ptlrpc_update_next_ping(imp, 0);
- }
- mutex_unlock(&pinger_mutex);
-
- /* Wait until the next ping time, or until we're stopped. */
- time_to_next_wake = pinger_check_timeout(this_ping);
- /* The ping sent by ptlrpc_send_rpc may get sent out
- say .01 second after this.
- ptlrpc_pinger_sending_on_import will then set the
- next ping time to next_ping + .01 sec, which means
- we will SKIP the next ping at next_ping, and the
- ping will get sent 2 timeouts from now! Beware. */
- CDEBUG(D_INFO, "next wakeup in " CFS_DURATION_T " (%ld)\n",
- time_to_next_wake,
- cfs_time_add(this_ping,
- cfs_time_seconds(PING_INTERVAL)));
- if (time_to_next_wake > 0) {
- lwi = LWI_TIMEOUT(max_t(long, time_to_next_wake,
- cfs_time_seconds(1)),
- NULL, NULL);
- l_wait_event(thread->t_ctl_waitq,
- thread_is_stopping(thread) ||
- thread_is_event(thread),
- &lwi);
- if (thread_test_and_clear_flags(thread, SVC_STOPPING))
- break;
- /* woken after adding import to reset timer */
- thread_test_and_clear_flags(thread, SVC_EVENT);
- }
- }
-
- thread_set_flags(thread, SVC_STOPPED);
- wake_up(&thread->t_ctl_waitq);
-
- CDEBUG(D_NET, "pinger thread exiting, process %d\n", current_pid());
- return 0;
-}
-
-static struct ptlrpc_thread pinger_thread;
-
-int ptlrpc_start_pinger(void)
-{
- struct l_wait_info lwi = { 0 };
- int rc;
-
- if (!thread_is_init(&pinger_thread) &&
- !thread_is_stopped(&pinger_thread))
- return -EALREADY;
-
- init_waitqueue_head(&pinger_thread.t_ctl_waitq);
-
- strcpy(pinger_thread.t_name, "ll_ping");
-
- rc = PTR_ERR(kthread_run(ptlrpc_pinger_main, &pinger_thread,
- "%s", pinger_thread.t_name));
- if (IS_ERR_VALUE(rc)) {
- CERROR("cannot start thread: %d\n", rc);
- return rc;
- }
- l_wait_event(pinger_thread.t_ctl_waitq,
- thread_is_running(&pinger_thread), &lwi);
-
- return 0;
-}
-
-int ptlrpc_pinger_remove_timeouts(void);
-
-int ptlrpc_stop_pinger(void)
-{
- struct l_wait_info lwi = { 0 };
- int rc = 0;
-
- if (thread_is_init(&pinger_thread) ||
- thread_is_stopped(&pinger_thread))
- return -EALREADY;
-
- ptlrpc_pinger_remove_timeouts();
- thread_set_flags(&pinger_thread, SVC_STOPPING);
- wake_up(&pinger_thread.t_ctl_waitq);
-
- l_wait_event(pinger_thread.t_ctl_waitq,
- thread_is_stopped(&pinger_thread), &lwi);
-
- return rc;
-}
-
-void ptlrpc_pinger_sending_on_import(struct obd_import *imp)
-{
- ptlrpc_update_next_ping(imp, 0);
-}
-EXPORT_SYMBOL(ptlrpc_pinger_sending_on_import);
-
-void ptlrpc_pinger_commit_expected(struct obd_import *imp)
-{
- ptlrpc_update_next_ping(imp, 1);
- assert_spin_locked(&imp->imp_lock);
- /*
- * Avoid reading stale imp_connect_data. When not sure if pings are
- * expected or not on next connection, we assume they are not and force
- * one anyway to guarantee the chance of updating
- * imp_peer_committed_transno.
- */
- if (imp->imp_state != LUSTRE_IMP_FULL ||
- OCD_HAS_FLAG(&imp->imp_connect_data, PINGLESS))
- imp->imp_force_next_verify = 1;
-}
-
-int ptlrpc_pinger_add_import(struct obd_import *imp)
-{
- if (!list_empty(&imp->imp_pinger_chain))
- return -EALREADY;
-
- mutex_lock(&pinger_mutex);
- CDEBUG(D_HA, "adding pingable import %s->%s\n",
- imp->imp_obd->obd_uuid.uuid, obd2cli_tgt(imp->imp_obd));
- /* if we add to pinger we want recovery on this import */
- imp->imp_obd->obd_no_recov = 0;
- ptlrpc_update_next_ping(imp, 0);
- /* XXX sort, blah blah */
- list_add_tail(&imp->imp_pinger_chain, &pinger_imports);
- class_import_get(imp);
-
- ptlrpc_pinger_wake_up();
- mutex_unlock(&pinger_mutex);
-
- return 0;
-}
-EXPORT_SYMBOL(ptlrpc_pinger_add_import);
-
-int ptlrpc_pinger_del_import(struct obd_import *imp)
-{
- if (list_empty(&imp->imp_pinger_chain))
- return -ENOENT;
-
- mutex_lock(&pinger_mutex);
- list_del_init(&imp->imp_pinger_chain);
- CDEBUG(D_HA, "removing pingable import %s->%s\n",
- imp->imp_obd->obd_uuid.uuid, obd2cli_tgt(imp->imp_obd));
- /* if we remove from pinger we don't want recovery on this import */
- imp->imp_obd->obd_no_recov = 1;
- class_import_put(imp);
- mutex_unlock(&pinger_mutex);
- return 0;
-}
-EXPORT_SYMBOL(ptlrpc_pinger_del_import);
-
-/**
- * Register a timeout callback to the pinger list, and the callback will
- * be called when timeout happens.
- */
-static struct timeout_item *ptlrpc_new_timeout(int time,
- enum timeout_event event, timeout_cb_t cb, void *data)
-{
- struct timeout_item *ti;
-
- ti = kzalloc(sizeof(*ti), GFP_NOFS);
- if (!ti)
- return NULL;
-
- INIT_LIST_HEAD(&ti->ti_obd_list);
- INIT_LIST_HEAD(&ti->ti_chain);
- ti->ti_timeout = time;
- ti->ti_event = event;
- ti->ti_cb = cb;
- ti->ti_cb_data = data;
-
- return ti;
-}
-
-/**
- * Register timeout event on the pinger thread.
- * Note: the timeout list is an sorted list with increased timeout value.
- */
-static struct timeout_item*
-ptlrpc_pinger_register_timeout(int time, enum timeout_event event,
- timeout_cb_t cb, void *data)
-{
- struct timeout_item *item, *tmp;
-
- LASSERT(mutex_is_locked(&pinger_mutex));
-
- list_for_each_entry(item, &timeout_list, ti_chain)
- if (item->ti_event == event)
- goto out;
-
- item = ptlrpc_new_timeout(time, event, cb, data);
- if (item) {
- list_for_each_entry_reverse(tmp, &timeout_list, ti_chain) {
- if (tmp->ti_timeout < time) {
- list_add(&item->ti_chain, &tmp->ti_chain);
- goto out;
- }
- }
- list_add(&item->ti_chain, &timeout_list);
- }
-out:
- return item;
-}
-
-/* Add a client_obd to the timeout event list, when timeout(@time)
- * happens, the callback(@cb) will be called.
- */
-int ptlrpc_add_timeout_client(int time, enum timeout_event event,
- timeout_cb_t cb, void *data,
- struct list_head *obd_list)
-{
- struct timeout_item *ti;
-
- mutex_lock(&pinger_mutex);
- ti = ptlrpc_pinger_register_timeout(time, event, cb, data);
- if (!ti) {
- mutex_unlock(&pinger_mutex);
- return -EINVAL;
- }
- list_add(obd_list, &ti->ti_obd_list);
- mutex_unlock(&pinger_mutex);
- return 0;
-}
-EXPORT_SYMBOL(ptlrpc_add_timeout_client);
-
-int ptlrpc_del_timeout_client(struct list_head *obd_list,
- enum timeout_event event)
-{
- struct timeout_item *ti = NULL, *item;
-
- if (list_empty(obd_list))
- return 0;
- mutex_lock(&pinger_mutex);
- list_del_init(obd_list);
- /**
- * If there are no obd attached to the timeout event
- * list, remove this timeout event from the pinger
- */
- list_for_each_entry(item, &timeout_list, ti_chain) {
- if (item->ti_event == event) {
- ti = item;
- break;
- }
- }
- LASSERTF(ti != NULL, "ti is NULL !\n");
- if (list_empty(&ti->ti_obd_list)) {
- list_del(&ti->ti_chain);
- kfree(ti);
- }
- mutex_unlock(&pinger_mutex);
- return 0;
-}
-EXPORT_SYMBOL(ptlrpc_del_timeout_client);
-
-int ptlrpc_pinger_remove_timeouts(void)
-{
- struct timeout_item *item, *tmp;
-
- mutex_lock(&pinger_mutex);
- list_for_each_entry_safe(item, tmp, &timeout_list, ti_chain) {
- LASSERT(list_empty(&item->ti_obd_list));
- list_del(&item->ti_chain);
- kfree(item);
- }
- mutex_unlock(&pinger_mutex);
- return 0;
-}
-
-void ptlrpc_pinger_wake_up(void)
-{
- thread_add_flags(&pinger_thread, SVC_EVENT);
- wake_up(&pinger_thread.t_ctl_waitq);
-}
diff --git a/drivers/staging/lustre/lustre/ptlrpc/ptlrpc_internal.h b/drivers/staging/lustre/lustre/ptlrpc/ptlrpc_internal.h
deleted file mode 100644
index ab6c4580f91c..000000000000
--- a/drivers/staging/lustre/lustre/ptlrpc/ptlrpc_internal.h
+++ /dev/null
@@ -1,300 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- */
-
-/* Intramodule declarations for ptlrpc. */
-
-#ifndef PTLRPC_INTERNAL_H
-#define PTLRPC_INTERNAL_H
-
-#include "../ldlm/ldlm_internal.h"
-
-struct ldlm_namespace;
-struct obd_import;
-struct ldlm_res_id;
-struct ptlrpc_request_set;
-extern int test_req_buffer_pressure;
-extern struct mutex ptlrpc_all_services_mutex;
-extern struct list_head ptlrpc_all_services;
-
-extern struct mutex ptlrpcd_mutex;
-extern struct mutex pinger_mutex;
-
-int ptlrpc_start_thread(struct ptlrpc_service_part *svcpt, int wait);
-/* ptlrpcd.c */
-int ptlrpcd_start(struct ptlrpcd_ctl *pc);
-
-/* client.c */
-struct ptlrpc_bulk_desc *ptlrpc_new_bulk(unsigned npages, unsigned max_brw,
- unsigned type, unsigned portal);
-int ptlrpc_request_cache_init(void);
-void ptlrpc_request_cache_fini(void);
-struct ptlrpc_request *ptlrpc_request_cache_alloc(gfp_t flags);
-void ptlrpc_request_cache_free(struct ptlrpc_request *req);
-void ptlrpc_init_xid(void);
-
-/* events.c */
-int ptlrpc_init_portals(void);
-void ptlrpc_exit_portals(void);
-
-void ptlrpc_request_handle_notconn(struct ptlrpc_request *);
-void lustre_assert_wire_constants(void);
-int ptlrpc_import_in_recovery(struct obd_import *imp);
-int ptlrpc_set_import_discon(struct obd_import *imp, __u32 conn_cnt);
-void ptlrpc_handle_failed_import(struct obd_import *imp);
-int ptlrpc_replay_next(struct obd_import *imp, int *inflight);
-void ptlrpc_initiate_recovery(struct obd_import *imp);
-
-int lustre_unpack_req_ptlrpc_body(struct ptlrpc_request *req, int offset);
-int lustre_unpack_rep_ptlrpc_body(struct ptlrpc_request *req, int offset);
-
-int ptlrpc_sysfs_register_service(struct kset *parent,
- struct ptlrpc_service *svc);
-void ptlrpc_sysfs_unregister_service(struct ptlrpc_service *svc);
-
-void ptlrpc_ldebugfs_register_service(struct dentry *debugfs_entry,
- struct ptlrpc_service *svc);
-void ptlrpc_lprocfs_unregister_service(struct ptlrpc_service *svc);
-void ptlrpc_lprocfs_rpc_sent(struct ptlrpc_request *req, long amount);
-void ptlrpc_lprocfs_do_request_stat(struct ptlrpc_request *req,
- long q_usec, long work_usec);
-
-/* NRS */
-
-/**
- * NRS core object.
- *
- * Holds NRS core fields.
- */
-struct nrs_core {
- /**
- * Protects nrs_core::nrs_policies, serializes external policy
- * registration/unregistration, and NRS core lprocfs operations.
- */
- struct mutex nrs_mutex;
- /* XXX: This is just for liblustre. Remove the #if defined directive
- * when the * "cfs_" prefix is dropped from cfs_list_head. */
- /**
- * List of all policy descriptors registered with NRS core; protected
- * by nrs_core::nrs_mutex.
- */
- struct list_head nrs_policies;
-
-};
-
-extern struct nrs_core nrs_core;
-
-int ptlrpc_service_nrs_setup(struct ptlrpc_service *svc);
-void ptlrpc_service_nrs_cleanup(struct ptlrpc_service *svc);
-
-void ptlrpc_nrs_req_initialize(struct ptlrpc_service_part *svcpt,
- struct ptlrpc_request *req, bool hp);
-void ptlrpc_nrs_req_finalize(struct ptlrpc_request *req);
-void ptlrpc_nrs_req_stop_nolock(struct ptlrpc_request *req);
-void ptlrpc_nrs_req_add(struct ptlrpc_service_part *svcpt,
- struct ptlrpc_request *req, bool hp);
-
-struct ptlrpc_request *
-ptlrpc_nrs_req_get_nolock0(struct ptlrpc_service_part *svcpt, bool hp,
- bool peek, bool force);
-
-static inline struct ptlrpc_request *
-ptlrpc_nrs_req_get_nolock(struct ptlrpc_service_part *svcpt, bool hp,
- bool force)
-{
- return ptlrpc_nrs_req_get_nolock0(svcpt, hp, false, force);
-}
-
-bool ptlrpc_nrs_req_pending_nolock(struct ptlrpc_service_part *svcpt, bool hp);
-
-int ptlrpc_nrs_policy_control(const struct ptlrpc_service *svc,
- enum ptlrpc_nrs_queue_type queue, char *name,
- enum ptlrpc_nrs_ctl opc, bool single, void *arg);
-
-int ptlrpc_nrs_init(void);
-void ptlrpc_nrs_fini(void);
-
-static inline bool nrs_svcpt_has_hp(const struct ptlrpc_service_part *svcpt)
-{
- return svcpt->scp_nrs_hp != NULL;
-}
-
-static inline bool nrs_svc_has_hp(const struct ptlrpc_service *svc)
-{
- /**
- * If the first service partition has an HP NRS head, all service
- * partitions will.
- */
- return nrs_svcpt_has_hp(svc->srv_parts[0]);
-}
-
-static inline
-struct ptlrpc_nrs *nrs_svcpt2nrs(struct ptlrpc_service_part *svcpt, bool hp)
-{
- LASSERT(ergo(hp, nrs_svcpt_has_hp(svcpt)));
- return hp ? svcpt->scp_nrs_hp : &svcpt->scp_nrs_reg;
-}
-
-static inline int nrs_pol2cptid(const struct ptlrpc_nrs_policy *policy)
-{
- return policy->pol_nrs->nrs_svcpt->scp_cpt;
-}
-
-static inline
-struct ptlrpc_service *nrs_pol2svc(struct ptlrpc_nrs_policy *policy)
-{
- return policy->pol_nrs->nrs_svcpt->scp_service;
-}
-
-static inline
-struct ptlrpc_service_part *nrs_pol2svcpt(struct ptlrpc_nrs_policy *policy)
-{
- return policy->pol_nrs->nrs_svcpt;
-}
-
-static inline
-struct cfs_cpt_table *nrs_pol2cptab(struct ptlrpc_nrs_policy *policy)
-{
- return nrs_pol2svc(policy)->srv_cptable;
-}
-
-static inline struct ptlrpc_nrs_resource *
-nrs_request_resource(struct ptlrpc_nrs_request *nrq)
-{
- LASSERT(nrq->nr_initialized);
- LASSERT(!nrq->nr_finalized);
-
- return nrq->nr_res_ptrs[nrq->nr_res_idx];
-}
-
-static inline
-struct ptlrpc_nrs_policy *nrs_request_policy(struct ptlrpc_nrs_request *nrq)
-{
- return nrs_request_resource(nrq)->res_policy;
-}
-
-#define NRS_LPROCFS_QUANTUM_NAME_REG "reg_quantum:"
-#define NRS_LPROCFS_QUANTUM_NAME_HP "hp_quantum:"
-
-/**
- * the maximum size of nrs_crrn_client::cc_quantum and nrs_orr_data::od_quantum.
- */
-#define LPROCFS_NRS_QUANTUM_MAX 65535
-
-/**
- * Max valid command string is the size of the labels, plus "65535" twice, plus
- * a separating space character.
- */
-#define LPROCFS_NRS_WR_QUANTUM_MAX_CMD \
- sizeof(NRS_LPROCFS_QUANTUM_NAME_REG __stringify(LPROCFS_NRS_QUANTUM_MAX) " " \
- NRS_LPROCFS_QUANTUM_NAME_HP __stringify(LPROCFS_NRS_QUANTUM_MAX))
-
-/* recovd_thread.c */
-
-int ptlrpc_expire_one_request(struct ptlrpc_request *req, int async_unlink);
-
-/* pers.c */
-void ptlrpc_fill_bulk_md(lnet_md_t *md, struct ptlrpc_bulk_desc *desc,
- int mdcnt);
-void ptlrpc_add_bulk_page(struct ptlrpc_bulk_desc *desc, struct page *page,
- int pageoffset, int len);
-
-/* pack_generic.c */
-struct ptlrpc_reply_state *
-lustre_get_emerg_rs(struct ptlrpc_service_part *svcpt);
-void lustre_put_emerg_rs(struct ptlrpc_reply_state *rs);
-
-/* pinger.c */
-int ptlrpc_start_pinger(void);
-int ptlrpc_stop_pinger(void);
-void ptlrpc_pinger_sending_on_import(struct obd_import *imp);
-void ptlrpc_pinger_commit_expected(struct obd_import *imp);
-void ptlrpc_pinger_wake_up(void);
-
-/* sec_null.c */
-int sptlrpc_null_init(void);
-void sptlrpc_null_fini(void);
-
-/* sec_plain.c */
-int sptlrpc_plain_init(void);
-void sptlrpc_plain_fini(void);
-
-/* sec_bulk.c */
-int sptlrpc_enc_pool_init(void);
-void sptlrpc_enc_pool_fini(void);
-int sptlrpc_proc_enc_pool_seq_show(struct seq_file *m, void *v);
-
-/* sec_lproc.c */
-int sptlrpc_lproc_init(void);
-void sptlrpc_lproc_fini(void);
-
-/* sec_gc.c */
-int sptlrpc_gc_init(void);
-void sptlrpc_gc_fini(void);
-
-/* sec_config.c */
-void sptlrpc_conf_choose_flavor(enum lustre_sec_part from,
- enum lustre_sec_part to,
- struct obd_uuid *target,
- lnet_nid_t nid,
- struct sptlrpc_flavor *sf);
-int sptlrpc_conf_init(void);
-void sptlrpc_conf_fini(void);
-
-/* sec.c */
-int sptlrpc_init(void);
-void sptlrpc_fini(void);
-
-static inline int ll_rpc_recoverable_error(int rc)
-{
- return (rc == -ENOTCONN || rc == -ENODEV);
-}
-
-static inline int tgt_mod_init(void)
-{
- return 0;
-}
-
-static inline void tgt_mod_exit(void)
-{
- return;
-}
-
-static inline void ptlrpc_reqset_put(struct ptlrpc_request_set *set)
-{
- if (atomic_dec_and_test(&set->set_refcount))
- kfree(set);
-}
-#endif /* PTLRPC_INTERNAL_H */
diff --git a/drivers/staging/lustre/lustre/ptlrpc/ptlrpc_module.c b/drivers/staging/lustre/lustre/ptlrpc/ptlrpc_module.c
deleted file mode 100644
index d12e95bc9dce..000000000000
--- a/drivers/staging/lustre/lustre/ptlrpc/ptlrpc_module.c
+++ /dev/null
@@ -1,170 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- */
-
-#define DEBUG_SUBSYSTEM S_RPC
-
-
-#include "../include/obd_support.h"
-#include "../include/obd_class.h"
-#include "../include/lustre_net.h"
-#include "../include/lustre_req_layout.h"
-
-#include "ptlrpc_internal.h"
-
-extern spinlock_t ptlrpc_last_xid_lock;
-#if RS_DEBUG
-extern spinlock_t ptlrpc_rs_debug_lock;
-#endif
-
-static int __init ptlrpc_init(void)
-{
- int rc, cleanup_phase = 0;
-
- lustre_assert_wire_constants();
-#if RS_DEBUG
- spin_lock_init(&ptlrpc_rs_debug_lock);
-#endif
- mutex_init(&ptlrpc_all_services_mutex);
- mutex_init(&pinger_mutex);
- mutex_init(&ptlrpcd_mutex);
- ptlrpc_init_xid();
-
- rc = req_layout_init();
- if (rc)
- return rc;
-
- rc = ptlrpc_hr_init();
- if (rc)
- return rc;
-
- cleanup_phase = 1;
- rc = ptlrpc_request_cache_init();
- if (rc)
- goto cleanup;
-
- cleanup_phase = 2;
- rc = ptlrpc_init_portals();
- if (rc)
- goto cleanup;
-
- cleanup_phase = 3;
-
- rc = ptlrpc_connection_init();
- if (rc)
- goto cleanup;
-
- cleanup_phase = 4;
- ptlrpc_put_connection_superhack = ptlrpc_connection_put;
-
- rc = ptlrpc_start_pinger();
- if (rc)
- goto cleanup;
-
- cleanup_phase = 5;
- rc = ldlm_init();
- if (rc)
- goto cleanup;
-
- cleanup_phase = 6;
- rc = sptlrpc_init();
- if (rc)
- goto cleanup;
-
- cleanup_phase = 7;
- rc = ptlrpc_nrs_init();
- if (rc)
- goto cleanup;
-
- cleanup_phase = 8;
- rc = tgt_mod_init();
- if (rc)
- goto cleanup;
- return 0;
-
-cleanup:
- switch (cleanup_phase) {
- case 8:
- ptlrpc_nrs_fini();
- /* Fall through */
- case 7:
- sptlrpc_fini();
- /* Fall through */
- case 6:
- ldlm_exit();
- /* Fall through */
- case 5:
- ptlrpc_stop_pinger();
- /* Fall through */
- case 4:
- ptlrpc_connection_fini();
- /* Fall through */
- case 3:
- ptlrpc_exit_portals();
- /* Fall through */
- case 2:
- ptlrpc_request_cache_fini();
- /* Fall through */
- case 1:
- ptlrpc_hr_fini();
- req_layout_fini();
- /* Fall through */
- default:
- ;
- }
-
- return rc;
-}
-
-static void __exit ptlrpc_exit(void)
-{
- tgt_mod_exit();
- ptlrpc_nrs_fini();
- sptlrpc_fini();
- ldlm_exit();
- ptlrpc_stop_pinger();
- ptlrpc_exit_portals();
- ptlrpc_request_cache_fini();
- ptlrpc_hr_fini();
- ptlrpc_connection_fini();
-}
-
-MODULE_AUTHOR("Sun Microsystems, Inc. <http://www.lustre.org/>");
-MODULE_DESCRIPTION("Lustre Request Processor and Lock Management");
-MODULE_LICENSE("GPL");
-MODULE_VERSION("1.0.0");
-
-module_init(ptlrpc_init);
-module_exit(ptlrpc_exit);
diff --git a/drivers/staging/lustre/lustre/ptlrpc/ptlrpcd.c b/drivers/staging/lustre/lustre/ptlrpc/ptlrpcd.c
deleted file mode 100644
index ce036a1ac466..000000000000
--- a/drivers/staging/lustre/lustre/ptlrpc/ptlrpcd.c
+++ /dev/null
@@ -1,917 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * lustre/ptlrpc/ptlrpcd.c
- */
-
-/** \defgroup ptlrpcd PortalRPC daemon
- *
- * ptlrpcd is a special thread with its own set where other user might add
- * requests when they don't want to wait for their completion.
- * PtlRPCD will take care of sending such requests and then processing their
- * replies and calling completion callbacks as necessary.
- * The callbacks are called directly from ptlrpcd context.
- * It is important to never significantly block (esp. on RPCs!) within such
- * completion handler or a deadlock might occur where ptlrpcd enters some
- * callback that attempts to send another RPC and wait for it to return,
- * during which time ptlrpcd is completely blocked, so e.g. if import
- * fails, recovery cannot progress because connection requests are also
- * sent by ptlrpcd.
- *
- * @{
- */
-
-#define DEBUG_SUBSYSTEM S_RPC
-
-#include "../../include/linux/libcfs/libcfs.h"
-
-#include "../include/lustre_net.h"
-#include "../include/lustre_lib.h"
-#include "../include/lustre_ha.h"
-#include "../include/obd_class.h" /* for obd_zombie */
-#include "../include/obd_support.h" /* for OBD_FAIL_CHECK */
-#include "../include/cl_object.h" /* cl_env_{get,put}() */
-#include "../include/lprocfs_status.h"
-
-#include "ptlrpc_internal.h"
-
-/* One of these per CPT. */
-struct ptlrpcd {
- int pd_size;
- int pd_index;
- int pd_cpt;
- int pd_cursor;
- int pd_nthreads;
- int pd_groupsize;
- struct ptlrpcd_ctl pd_threads[0];
-};
-
-/*
- * max_ptlrpcds is obsolete, but retained to ensure that the kernel
- * module will load on a system where it has been tuned.
- * A value other than 0 implies it was tuned, in which case the value
- * is used to derive a setting for ptlrpcd_per_cpt_max.
- */
-static int max_ptlrpcds;
-module_param(max_ptlrpcds, int, 0644);
-MODULE_PARM_DESC(max_ptlrpcds, "Max ptlrpcd thread count to be started.");
-
-/*
- * ptlrpcd_bind_policy is obsolete, but retained to ensure that
- * the kernel module will load on a system where it has been tuned.
- * A value other than 0 implies it was tuned, in which case the value
- * is used to derive a setting for ptlrpcd_partner_group_size.
- */
-static int ptlrpcd_bind_policy;
-module_param(ptlrpcd_bind_policy, int, 0644);
-MODULE_PARM_DESC(ptlrpcd_bind_policy,
- "Ptlrpcd threads binding mode (obsolete).");
-
-/*
- * ptlrpcd_per_cpt_max: The maximum number of ptlrpcd threads to run
- * in a CPT.
- */
-static int ptlrpcd_per_cpt_max;
-module_param(ptlrpcd_per_cpt_max, int, 0644);
-MODULE_PARM_DESC(ptlrpcd_per_cpt_max,
- "Max ptlrpcd thread count to be started per cpt.");
-
-/*
- * ptlrpcd_partner_group_size: The desired number of threads in each
- * ptlrpcd partner thread group. Default is 2, corresponding to the
- * old PDB_POLICY_PAIR. A negative value makes all ptlrpcd threads in
- * a CPT partners of each other.
- */
-static int ptlrpcd_partner_group_size;
-module_param(ptlrpcd_partner_group_size, int, 0644);
-MODULE_PARM_DESC(ptlrpcd_partner_group_size,
- "Number of ptlrpcd threads in a partner group.");
-
-/*
- * ptlrpcd_cpts: A CPT string describing the CPU partitions that
- * ptlrpcd threads should run on. Used to make ptlrpcd threads run on
- * a subset of all CPTs.
- *
- * ptlrpcd_cpts=2
- * ptlrpcd_cpts=[2]
- * run ptlrpcd threads only on CPT 2.
- *
- * ptlrpcd_cpts=0-3
- * ptlrpcd_cpts=[0-3]
- * run ptlrpcd threads on CPTs 0, 1, 2, and 3.
- *
- * ptlrpcd_cpts=[0-3,5,7]
- * run ptlrpcd threads on CPTS 0, 1, 2, 3, 5, and 7.
- */
-static char *ptlrpcd_cpts;
-module_param(ptlrpcd_cpts, charp, 0644);
-MODULE_PARM_DESC(ptlrpcd_cpts,
- "CPU partitions ptlrpcd threads should run in");
-
-/* ptlrpcds_cpt_idx maps cpt numbers to an index in the ptlrpcds array. */
-static int *ptlrpcds_cpt_idx;
-
-/* ptlrpcds_num is the number of entries in the ptlrpcds array. */
-static int ptlrpcds_num;
-static struct ptlrpcd **ptlrpcds;
-
-/*
- * In addition to the regular thread pool above, there is a single
- * global recovery thread. Recovery isn't critical for performance,
- * and doesn't block, but must always be able to proceed, and it is
- * possible that all normal ptlrpcd threads are blocked. Hence the
- * need for a dedicated thread.
- */
-static struct ptlrpcd_ctl ptlrpcd_rcv;
-
-struct mutex ptlrpcd_mutex;
-static int ptlrpcd_users;
-
-void ptlrpcd_wake(struct ptlrpc_request *req)
-{
- struct ptlrpc_request_set *rq_set = req->rq_set;
-
- LASSERT(rq_set != NULL);
-
- wake_up(&rq_set->set_waitq);
-}
-EXPORT_SYMBOL(ptlrpcd_wake);
-
-static struct ptlrpcd_ctl *
-ptlrpcd_select_pc(struct ptlrpc_request *req)
-{
- struct ptlrpcd *pd;
- int cpt;
- int idx;
-
- if (req != NULL && req->rq_send_state != LUSTRE_IMP_FULL)
- return &ptlrpcd_rcv;
-
- cpt = cfs_cpt_current(cfs_cpt_table, 1);
- if (!ptlrpcds_cpt_idx)
- idx = cpt;
- else
- idx = ptlrpcds_cpt_idx[cpt];
- pd = ptlrpcds[idx];
-
- /* We do not care whether it is strict load balance. */
- idx = pd->pd_cursor;
- if (++idx == pd->pd_nthreads)
- idx = 0;
- pd->pd_cursor = idx;
-
- return &pd->pd_threads[idx];
-}
-
-/**
- * Return transferred RPCs count.
- */
-static int ptlrpcd_steal_rqset(struct ptlrpc_request_set *des,
- struct ptlrpc_request_set *src)
-{
- struct list_head *tmp, *pos;
- struct ptlrpc_request *req;
- int rc = 0;
-
- spin_lock(&src->set_new_req_lock);
- if (likely(!list_empty(&src->set_new_requests))) {
- list_for_each_safe(pos, tmp, &src->set_new_requests) {
- req = list_entry(pos, struct ptlrpc_request,
- rq_set_chain);
- req->rq_set = des;
- }
- list_splice_init(&src->set_new_requests,
- &des->set_requests);
- rc = atomic_read(&src->set_new_count);
- atomic_add(rc, &des->set_remaining);
- atomic_set(&src->set_new_count, 0);
- }
- spin_unlock(&src->set_new_req_lock);
- return rc;
-}
-
-/**
- * Requests that are added to the ptlrpcd queue are sent via
- * ptlrpcd_check->ptlrpc_check_set().
- */
-void ptlrpcd_add_req(struct ptlrpc_request *req)
-{
- struct ptlrpcd_ctl *pc;
-
- if (req->rq_reqmsg)
- lustre_msg_set_jobid(req->rq_reqmsg, NULL);
-
- spin_lock(&req->rq_lock);
- if (req->rq_invalid_rqset) {
- struct l_wait_info lwi = LWI_TIMEOUT(cfs_time_seconds(5),
- back_to_sleep, NULL);
-
- req->rq_invalid_rqset = 0;
- spin_unlock(&req->rq_lock);
- l_wait_event(req->rq_set_waitq, (req->rq_set == NULL), &lwi);
- } else if (req->rq_set) {
- /* If we have a valid "rq_set", just reuse it to avoid double
- * linked. */
- LASSERT(req->rq_phase == RQ_PHASE_NEW);
- LASSERT(req->rq_send_state == LUSTRE_IMP_REPLAY);
-
- /* ptlrpc_check_set will decrease the count */
- atomic_inc(&req->rq_set->set_remaining);
- spin_unlock(&req->rq_lock);
- wake_up(&req->rq_set->set_waitq);
- return;
- } else {
- spin_unlock(&req->rq_lock);
- }
-
- pc = ptlrpcd_select_pc(req);
-
- DEBUG_REQ(D_INFO, req, "add req [%p] to pc [%s:%d]",
- req, pc->pc_name, pc->pc_index);
-
- ptlrpc_set_add_new_req(pc, req);
-}
-EXPORT_SYMBOL(ptlrpcd_add_req);
-
-static inline void ptlrpc_reqset_get(struct ptlrpc_request_set *set)
-{
- atomic_inc(&set->set_refcount);
-}
-
-/**
- * Check if there is more work to do on ptlrpcd set.
- * Returns 1 if yes.
- */
-static int ptlrpcd_check(struct lu_env *env, struct ptlrpcd_ctl *pc)
-{
- struct list_head *tmp, *pos;
- struct ptlrpc_request *req;
- struct ptlrpc_request_set *set = pc->pc_set;
- int rc = 0;
- int rc2;
-
- if (atomic_read(&set->set_new_count)) {
- spin_lock(&set->set_new_req_lock);
- if (likely(!list_empty(&set->set_new_requests))) {
- list_splice_init(&set->set_new_requests,
- &set->set_requests);
- atomic_add(atomic_read(&set->set_new_count),
- &set->set_remaining);
- atomic_set(&set->set_new_count, 0);
- /*
- * Need to calculate its timeout.
- */
- rc = 1;
- }
- spin_unlock(&set->set_new_req_lock);
- }
-
- /* We should call lu_env_refill() before handling new requests to make
- * sure that env key the requests depending on really exists.
- */
- rc2 = lu_env_refill(env);
- if (rc2 != 0) {
- /*
- * XXX This is very awkward situation, because
- * execution can neither continue (request
- * interpreters assume that env is set up), nor repeat
- * the loop (as this potentially results in a tight
- * loop of -ENOMEM's).
- *
- * Fortunately, refill only ever does something when
- * new modules are loaded, i.e., early during boot up.
- */
- CERROR("Failure to refill session: %d\n", rc2);
- return rc;
- }
-
- if (atomic_read(&set->set_remaining))
- rc |= ptlrpc_check_set(env, set);
-
- /* NB: ptlrpc_check_set has already moved completed request at the
- * head of seq::set_requests */
- list_for_each_safe(pos, tmp, &set->set_requests) {
- req = list_entry(pos, struct ptlrpc_request, rq_set_chain);
- if (req->rq_phase != RQ_PHASE_COMPLETE)
- break;
-
- list_del_init(&req->rq_set_chain);
- req->rq_set = NULL;
- ptlrpc_req_finished(req);
- }
-
- if (rc == 0) {
- /*
- * If new requests have been added, make sure to wake up.
- */
- rc = atomic_read(&set->set_new_count);
-
- /* If we have nothing to do, check whether we can take some
- * work from our partner threads. */
- if (rc == 0 && pc->pc_npartners > 0) {
- struct ptlrpcd_ctl *partner;
- struct ptlrpc_request_set *ps;
- int first = pc->pc_cursor;
-
- do {
- partner = pc->pc_partners[pc->pc_cursor++];
- if (pc->pc_cursor >= pc->pc_npartners)
- pc->pc_cursor = 0;
- if (partner == NULL)
- continue;
-
- spin_lock(&partner->pc_lock);
- ps = partner->pc_set;
- if (ps == NULL) {
- spin_unlock(&partner->pc_lock);
- continue;
- }
-
- ptlrpc_reqset_get(ps);
- spin_unlock(&partner->pc_lock);
-
- if (atomic_read(&ps->set_new_count)) {
- rc = ptlrpcd_steal_rqset(set, ps);
- if (rc > 0)
- CDEBUG(D_RPCTRACE, "transfer %d async RPCs [%d->%d]\n",
- rc, partner->pc_index,
- pc->pc_index);
- }
- ptlrpc_reqset_put(ps);
- } while (rc == 0 && pc->pc_cursor != first);
- }
- }
-
- return rc;
-}
-
-/**
- * Main ptlrpcd thread.
- * ptlrpc's code paths like to execute in process context, so we have this
- * thread which spins on a set which contains the rpcs and sends them.
- *
- */
-static int ptlrpcd(void *arg)
-{
- struct ptlrpcd_ctl *pc = arg;
- struct ptlrpc_request_set *set;
- struct lu_env env = { .le_ses = NULL };
- int rc = 0;
- int exit = 0;
-
- unshare_fs_struct();
- if (cfs_cpt_bind(cfs_cpt_table, pc->pc_cpt) != 0)
- CWARN("Failed to bind %s on CPT %d\n", pc->pc_name, pc->pc_cpt);
-
- /*
- * Allocate the request set after the thread has been bound
- * above. This is safe because no requests will be queued
- * until all ptlrpcd threads have confirmed that they have
- * successfully started.
- */
- set = ptlrpc_prep_set();
- if (!set) {
- rc = -ENOMEM;
- goto failed;
- }
- spin_lock(&pc->pc_lock);
- pc->pc_set = set;
- spin_unlock(&pc->pc_lock);
- /*
- * XXX So far only "client" ptlrpcd uses an environment. In
- * the future, ptlrpcd thread (or a thread-set) has to given
- * an argument, describing its "scope".
- */
- rc = lu_context_init(&env.le_ctx,
- LCT_CL_THREAD|LCT_REMEMBER|LCT_NOREF);
- if (rc != 0)
- goto failed;
-
- complete(&pc->pc_starting);
-
- /*
- * This mainloop strongly resembles ptlrpc_set_wait() except that our
- * set never completes. ptlrpcd_check() calls ptlrpc_check_set() when
- * there are requests in the set. New requests come in on the set's
- * new_req_list and ptlrpcd_check() moves them into the set.
- */
- do {
- struct l_wait_info lwi;
- int timeout;
-
- timeout = ptlrpc_set_next_timeout(set);
- lwi = LWI_TIMEOUT(cfs_time_seconds(timeout ? timeout : 1),
- ptlrpc_expired_set, set);
-
- lu_context_enter(&env.le_ctx);
- l_wait_event(set->set_waitq,
- ptlrpcd_check(&env, pc), &lwi);
- lu_context_exit(&env.le_ctx);
-
- /*
- * Abort inflight rpcs for forced stop case.
- */
- if (test_bit(LIOD_STOP, &pc->pc_flags)) {
- if (test_bit(LIOD_FORCE, &pc->pc_flags))
- ptlrpc_abort_set(set);
- exit++;
- }
-
- /*
- * Let's make one more loop to make sure that ptlrpcd_check()
- * copied all raced new rpcs into the set so we can kill them.
- */
- } while (exit < 2);
-
- /*
- * Wait for inflight requests to drain.
- */
- if (!list_empty(&set->set_requests))
- ptlrpc_set_wait(set);
- lu_context_fini(&env.le_ctx);
-
- complete(&pc->pc_finishing);
-
- return 0;
-failed:
- pc->pc_error = rc;
- complete(&pc->pc_starting);
- return rc;
-}
-
-static void ptlrpcd_ctl_init(struct ptlrpcd_ctl *pc, int index, int cpt)
-{
- pc->pc_index = index;
- pc->pc_cpt = cpt;
- init_completion(&pc->pc_starting);
- init_completion(&pc->pc_finishing);
- spin_lock_init(&pc->pc_lock);
-
- if (index < 0) {
- /* Recovery thread. */
- snprintf(pc->pc_name, sizeof(pc->pc_name), "ptlrpcd_rcv");
- } else {
- /* Regular thread. */
- snprintf(pc->pc_name, sizeof(pc->pc_name),
- "ptlrpcd_%02d_%02d", cpt, index);
- }
-}
-
-/* XXX: We want multiple CPU cores to share the async RPC load. So we
- * start many ptlrpcd threads. We also want to reduce the ptlrpcd
- * overhead caused by data transfer cross-CPU cores. So we bind
- * all ptlrpcd threads to a CPT, in the expectation that CPTs
- * will be defined in a way that matches these boundaries. Within
- * a CPT a ptlrpcd thread can be scheduled on any available core.
- *
- * Each ptlrpcd thread has its own request queue. This can cause
- * response delay if the thread is already busy. To help with
- * this we define partner threads: these are other threads bound
- * to the same CPT which will check for work in each other's
- * request queues if they have no work to do.
- *
- * The desired number of partner threads can be tuned by setting
- * ptlrpcd_partner_group_size. The default is to create pairs of
- * partner threads.
- */
-static int ptlrpcd_partners(struct ptlrpcd *pd, int index)
-{
- struct ptlrpcd_ctl *pc;
- struct ptlrpcd_ctl **ppc;
- int first;
- int i;
- int rc = 0;
- int size;
-
- LASSERT(index >= 0 && index < pd->pd_nthreads);
- pc = &pd->pd_threads[index];
- pc->pc_npartners = pd->pd_groupsize - 1;
-
- if (pc->pc_npartners <= 0)
- goto out;
-
- size = sizeof(struct ptlrpcd_ctl *) * pc->pc_npartners;
- pc->pc_partners = kzalloc_node(size, GFP_NOFS,
- cfs_cpt_spread_node(cfs_cpt_table,
- pc->pc_cpt));
- if (!pc->pc_partners) {
- pc->pc_npartners = 0;
- rc = -ENOMEM;
- goto out;
- }
-
- first = index - index % pd->pd_groupsize;
- ppc = pc->pc_partners;
- for (i = first; i < first + pd->pd_groupsize; i++) {
- if (i != index)
- *ppc++ = &pd->pd_threads[i];
- }
-out:
- return rc;
-}
-
-int ptlrpcd_start(struct ptlrpcd_ctl *pc)
-{
- struct task_struct *task;
- int rc = 0;
-
- /*
- * Do not allow start second thread for one pc.
- */
- if (test_and_set_bit(LIOD_START, &pc->pc_flags)) {
- CWARN("Starting second thread (%s) for same pc %p\n",
- pc->pc_name, pc);
- return 0;
- }
-
- /*
- * So far only "client" ptlrpcd uses an environment. In the future,
- * ptlrpcd thread (or a thread-set) has to be given an argument,
- * describing its "scope".
- */
- rc = lu_context_init(&pc->pc_env.le_ctx, LCT_CL_THREAD|LCT_REMEMBER);
- if (rc != 0)
- goto out;
-
- task = kthread_run(ptlrpcd, pc, "%s", pc->pc_name);
- if (IS_ERR(task)) {
- rc = PTR_ERR(task);
- goto out_set;
- }
-
- wait_for_completion(&pc->pc_starting);
- rc = pc->pc_error;
- if (rc != 0)
- goto out_set;
-
- return 0;
-
-out_set:
- if (pc->pc_set != NULL) {
- struct ptlrpc_request_set *set = pc->pc_set;
-
- spin_lock(&pc->pc_lock);
- pc->pc_set = NULL;
- spin_unlock(&pc->pc_lock);
- ptlrpc_set_destroy(set);
- }
- lu_context_fini(&pc->pc_env.le_ctx);
-
-out:
- clear_bit(LIOD_START, &pc->pc_flags);
- return rc;
-}
-
-void ptlrpcd_stop(struct ptlrpcd_ctl *pc, int force)
-{
- if (!test_bit(LIOD_START, &pc->pc_flags)) {
- CWARN("Thread for pc %p was not started\n", pc);
- return;
- }
-
- set_bit(LIOD_STOP, &pc->pc_flags);
- if (force)
- set_bit(LIOD_FORCE, &pc->pc_flags);
- wake_up(&pc->pc_set->set_waitq);
-}
-
-void ptlrpcd_free(struct ptlrpcd_ctl *pc)
-{
- struct ptlrpc_request_set *set = pc->pc_set;
-
- if (!test_bit(LIOD_START, &pc->pc_flags)) {
- CWARN("Thread for pc %p was not started\n", pc);
- goto out;
- }
-
- wait_for_completion(&pc->pc_finishing);
- lu_context_fini(&pc->pc_env.le_ctx);
-
- spin_lock(&pc->pc_lock);
- pc->pc_set = NULL;
- spin_unlock(&pc->pc_lock);
- ptlrpc_set_destroy(set);
-
- clear_bit(LIOD_START, &pc->pc_flags);
- clear_bit(LIOD_STOP, &pc->pc_flags);
- clear_bit(LIOD_FORCE, &pc->pc_flags);
-
-out:
- if (pc->pc_npartners > 0) {
- LASSERT(pc->pc_partners != NULL);
-
- kfree(pc->pc_partners);
- pc->pc_partners = NULL;
- }
- pc->pc_npartners = 0;
- pc->pc_error = 0;
-}
-
-static void ptlrpcd_fini(void)
-{
- int i;
- int j;
-
- if (ptlrpcds != NULL) {
- for (i = 0; i < ptlrpcds_num; i++) {
- if (!ptlrpcds[i])
- break;
- for (j = 0; j < ptlrpcds[i]->pd_nthreads; j++)
- ptlrpcd_stop(&ptlrpcds[i]->pd_threads[j], 0);
- for (j = 0; j < ptlrpcds[i]->pd_nthreads; j++)
- ptlrpcd_free(&ptlrpcds[i]->pd_threads[j]);
- kfree(ptlrpcds[i]);
- ptlrpcds[i] = NULL;
- }
- kfree(ptlrpcds);
- }
- ptlrpcds_num = 0;
-
- ptlrpcd_stop(&ptlrpcd_rcv, 0);
- ptlrpcd_free(&ptlrpcd_rcv);
-
- kfree(ptlrpcds_cpt_idx);
- ptlrpcds_cpt_idx = NULL;
-}
-
-static int ptlrpcd_init(void)
-{
- int nthreads;
- int groupsize;
- int size;
- int i;
- int j;
- int rc = 0;
- struct cfs_cpt_table *cptable;
- __u32 *cpts = NULL;
- int ncpts;
- int cpt;
- struct ptlrpcd *pd;
-
- /*
- * Determine the CPTs that ptlrpcd threads will run on.
- */
- cptable = cfs_cpt_table;
- ncpts = cfs_cpt_number(cptable);
- if (ptlrpcd_cpts) {
- struct cfs_expr_list *el;
-
- size = ncpts * sizeof(ptlrpcds_cpt_idx[0]);
- ptlrpcds_cpt_idx = kzalloc(size, GFP_KERNEL);
- if (!ptlrpcds_cpt_idx) {
- rc = -ENOMEM;
- goto out;
- }
-
- rc = cfs_expr_list_parse(ptlrpcd_cpts,
- strlen(ptlrpcd_cpts),
- 0, ncpts - 1, &el);
-
- if (rc != 0) {
- CERROR("ptlrpcd_cpts: invalid CPT pattern string: %s",
- ptlrpcd_cpts);
- rc = -EINVAL;
- goto out;
- }
-
- rc = cfs_expr_list_values(el, ncpts, &cpts);
- cfs_expr_list_free(el);
- if (rc <= 0) {
- CERROR("ptlrpcd_cpts: failed to parse CPT array %s: %d\n",
- ptlrpcd_cpts, rc);
- if (rc == 0)
- rc = -EINVAL;
- goto out;
- }
-
- /*
- * Create the cpt-to-index map. When there is no match
- * in the cpt table, pick a cpt at random. This could
- * be changed to take the topology of the system into
- * account.
- */
- for (cpt = 0; cpt < ncpts; cpt++) {
- for (i = 0; i < rc; i++)
- if (cpts[i] == cpt)
- break;
- if (i >= rc)
- i = cpt % rc;
- ptlrpcds_cpt_idx[cpt] = i;
- }
-
- cfs_expr_list_values_free(cpts, rc);
- ncpts = rc;
- }
- ptlrpcds_num = ncpts;
-
- size = ncpts * sizeof(ptlrpcds[0]);
- ptlrpcds = kzalloc(size, GFP_KERNEL);
- if (!ptlrpcds) {
- rc = -ENOMEM;
- goto out;
- }
-
- /*
- * The max_ptlrpcds parameter is obsolete, but do something
- * sane if it has been tuned, and complain if
- * ptlrpcd_per_cpt_max has also been tuned.
- */
- if (max_ptlrpcds != 0) {
- CWARN("max_ptlrpcds is obsolete.\n");
- if (ptlrpcd_per_cpt_max == 0) {
- ptlrpcd_per_cpt_max = max_ptlrpcds / ncpts;
- /* Round up if there is a remainder. */
- if (max_ptlrpcds % ncpts != 0)
- ptlrpcd_per_cpt_max++;
- CWARN("Setting ptlrpcd_per_cpt_max = %d\n",
- ptlrpcd_per_cpt_max);
- } else {
- CWARN("ptlrpd_per_cpt_max is also set!\n");
- }
- }
-
- /*
- * The ptlrpcd_bind_policy parameter is obsolete, but do
- * something sane if it has been tuned, and complain if
- * ptlrpcd_partner_group_size is also tuned.
- */
- if (ptlrpcd_bind_policy != 0) {
- CWARN("ptlrpcd_bind_policy is obsolete.\n");
- if (ptlrpcd_partner_group_size == 0) {
- switch (ptlrpcd_bind_policy) {
- case 1: /* PDB_POLICY_NONE */
- case 2: /* PDB_POLICY_FULL */
- ptlrpcd_partner_group_size = 1;
- break;
- case 3: /* PDB_POLICY_PAIR */
- ptlrpcd_partner_group_size = 2;
- break;
- case 4: /* PDB_POLICY_NEIGHBOR */
-#ifdef CONFIG_NUMA
- ptlrpcd_partner_group_size = -1; /* CPT */
-#else
- ptlrpcd_partner_group_size = 3; /* Triplets */
-#endif
- break;
- default: /* Illegal value, use the default. */
- ptlrpcd_partner_group_size = 2;
- break;
- }
- CWARN("Setting ptlrpcd_partner_group_size = %d\n",
- ptlrpcd_partner_group_size);
- } else {
- CWARN("ptlrpcd_partner_group_size is also set!\n");
- }
- }
-
- if (ptlrpcd_partner_group_size == 0)
- ptlrpcd_partner_group_size = 2;
- else if (ptlrpcd_partner_group_size < 0)
- ptlrpcd_partner_group_size = -1;
- else if (ptlrpcd_per_cpt_max > 0 &&
- ptlrpcd_partner_group_size > ptlrpcd_per_cpt_max)
- ptlrpcd_partner_group_size = ptlrpcd_per_cpt_max;
-
- /*
- * Start the recovery thread first.
- */
- set_bit(LIOD_RECOVERY, &ptlrpcd_rcv.pc_flags);
- ptlrpcd_ctl_init(&ptlrpcd_rcv, -1, CFS_CPT_ANY);
- rc = ptlrpcd_start(&ptlrpcd_rcv);
- if (rc < 0)
- goto out;
-
- for (i = 0; i < ncpts; i++) {
- if (!cpts)
- cpt = i;
- else
- cpt = cpts[i];
-
- nthreads = cfs_cpt_weight(cptable, cpt);
- if (ptlrpcd_per_cpt_max > 0 && ptlrpcd_per_cpt_max < nthreads)
- nthreads = ptlrpcd_per_cpt_max;
- if (nthreads < 2)
- nthreads = 2;
-
- if (ptlrpcd_partner_group_size <= 0) {
- groupsize = nthreads;
- } else if (nthreads <= ptlrpcd_partner_group_size) {
- groupsize = nthreads;
- } else {
- groupsize = ptlrpcd_partner_group_size;
- if (nthreads % groupsize != 0)
- nthreads += groupsize - (nthreads % groupsize);
- }
-
- size = offsetof(struct ptlrpcd, pd_threads[nthreads]);
- pd = kzalloc_node(size, GFP_NOFS,
- cfs_cpt_spread_node(cfs_cpt_table, cpt));
- if (!pd) {
- rc = -ENOMEM;
- goto out;
- }
- pd->pd_size = size;
- pd->pd_index = i;
- pd->pd_cpt = cpt;
- pd->pd_cursor = 0;
- pd->pd_nthreads = nthreads;
- pd->pd_groupsize = groupsize;
- ptlrpcds[i] = pd;
-
- /*
- * The ptlrpcd threads in a partner group can access
- * each other's struct ptlrpcd_ctl, so these must be
- * initialized before any thread is started.
- */
- for (j = 0; j < nthreads; j++) {
- ptlrpcd_ctl_init(&pd->pd_threads[j], j, cpt);
- rc = ptlrpcd_partners(pd, j);
- if (rc < 0)
- goto out;
- }
-
- /* XXX: We start nthreads ptlrpc daemons.
- * Each of them can process any non-recovery
- * async RPC to improve overall async RPC
- * efficiency.
- *
- * But there are some issues with async I/O RPCs
- * and async non-I/O RPCs processed in the same
- * set under some cases. The ptlrpcd may be
- * blocked by some async I/O RPC(s), then will
- * cause other async non-I/O RPC(s) can not be
- * processed in time.
- *
- * Maybe we should distinguish blocked async RPCs
- * from non-blocked async RPCs, and process them
- * in different ptlrpcd sets to avoid unnecessary
- * dependency. But how to distribute async RPCs
- * load among all the ptlrpc daemons becomes
- * another trouble.
- */
- for (j = 0; j < nthreads; j++) {
- rc = ptlrpcd_start(&pd->pd_threads[j]);
- if (rc < 0)
- goto out;
- }
- }
-out:
- if (rc != 0)
- ptlrpcd_fini();
-
- return rc;
-}
-
-int ptlrpcd_addref(void)
-{
- int rc = 0;
-
- mutex_lock(&ptlrpcd_mutex);
- if (++ptlrpcd_users == 1)
- rc = ptlrpcd_init();
- mutex_unlock(&ptlrpcd_mutex);
- return rc;
-}
-EXPORT_SYMBOL(ptlrpcd_addref);
-
-void ptlrpcd_decref(void)
-{
- mutex_lock(&ptlrpcd_mutex);
- if (--ptlrpcd_users == 0)
- ptlrpcd_fini();
- mutex_unlock(&ptlrpcd_mutex);
-}
-EXPORT_SYMBOL(ptlrpcd_decref);
-/** @} ptlrpcd */
diff --git a/drivers/staging/lustre/lustre/ptlrpc/recover.c b/drivers/staging/lustre/lustre/ptlrpc/recover.c
deleted file mode 100644
index 7b1d72947330..000000000000
--- a/drivers/staging/lustre/lustre/ptlrpc/recover.c
+++ /dev/null
@@ -1,379 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * lustre/ptlrpc/recover.c
- *
- * Author: Mike Shaver <shaver@clusterfs.com>
- */
-
-#define DEBUG_SUBSYSTEM S_RPC
-#include "../../include/linux/libcfs/libcfs.h"
-
-#include "../include/obd_support.h"
-#include "../include/lustre_ha.h"
-#include "../include/lustre_net.h"
-#include "../include/lustre_import.h"
-#include "../include/lustre_export.h"
-#include "../include/obd.h"
-#include "../include/obd_class.h"
-#include <linux/list.h>
-
-#include "ptlrpc_internal.h"
-
-/**
- * Start recovery on disconnected import.
- * This is done by just attempting a connect
- */
-void ptlrpc_initiate_recovery(struct obd_import *imp)
-{
- CDEBUG(D_HA, "%s: starting recovery\n", obd2cli_tgt(imp->imp_obd));
- ptlrpc_connect_import(imp);
-}
-
-/**
- * Identify what request from replay list needs to be replayed next
- * (based on what we have already replayed) and send it to server.
- */
-int ptlrpc_replay_next(struct obd_import *imp, int *inflight)
-{
- int rc = 0;
- struct list_head *tmp, *pos;
- struct ptlrpc_request *req = NULL;
- __u64 last_transno;
-
- *inflight = 0;
-
- /* It might have committed some after we last spoke, so make sure we
- * get rid of them now.
- */
- spin_lock(&imp->imp_lock);
- imp->imp_last_transno_checked = 0;
- ptlrpc_free_committed(imp);
- last_transno = imp->imp_last_replay_transno;
- spin_unlock(&imp->imp_lock);
-
- CDEBUG(D_HA, "import %p from %s committed %llu last %llu\n",
- imp, obd2cli_tgt(imp->imp_obd),
- imp->imp_peer_committed_transno, last_transno);
-
- /* Do I need to hold a lock across this iteration? We shouldn't be
- * racing with any additions to the list, because we're in recovery
- * and are therefore not processing additional requests to add. Calls
- * to ptlrpc_free_committed might commit requests, but nothing "newer"
- * than the one we're replaying (it can't be committed until it's
- * replayed, and we're doing that here). l_f_e_safe protects against
- * problems with the current request being committed, in the unlikely
- * event of that race. So, in conclusion, I think that it's safe to
- * perform this list-walk without the imp_lock held.
- *
- * But, the {mdc,osc}_replay_open callbacks both iterate
- * request lists, and have comments saying they assume the
- * imp_lock is being held by ptlrpc_replay, but it's not. it's
- * just a little race...
- */
-
- /* Replay all the committed open requests on committed_list first */
- if (!list_empty(&imp->imp_committed_list)) {
- tmp = imp->imp_committed_list.prev;
- req = list_entry(tmp, struct ptlrpc_request,
- rq_replay_list);
-
- /* The last request on committed_list hasn't been replayed */
- if (req->rq_transno > last_transno) {
- /* Since the imp_committed_list is immutable before
- * all of it's requests being replayed, it's safe to
- * use a cursor to accelerate the search */
- imp->imp_replay_cursor = imp->imp_replay_cursor->next;
-
- while (imp->imp_replay_cursor !=
- &imp->imp_committed_list) {
- req = list_entry(imp->imp_replay_cursor,
- struct ptlrpc_request,
- rq_replay_list);
- if (req->rq_transno > last_transno)
- break;
-
- req = NULL;
- imp->imp_replay_cursor =
- imp->imp_replay_cursor->next;
- }
- } else {
- /* All requests on committed_list have been replayed */
- imp->imp_replay_cursor = &imp->imp_committed_list;
- req = NULL;
- }
- }
-
- /* All the requests in committed list have been replayed, let's replay
- * the imp_replay_list */
- if (req == NULL) {
- list_for_each_safe(tmp, pos, &imp->imp_replay_list) {
- req = list_entry(tmp, struct ptlrpc_request,
- rq_replay_list);
-
- if (req->rq_transno > last_transno)
- break;
- req = NULL;
- }
- }
-
- /* If need to resend the last sent transno (because a reconnect
- * has occurred), then stop on the matching req and send it again.
- * If, however, the last sent transno has been committed then we
- * continue replay from the next request. */
- if (req != NULL && imp->imp_resend_replay)
- lustre_msg_add_flags(req->rq_reqmsg, MSG_RESENT);
-
- spin_lock(&imp->imp_lock);
- imp->imp_resend_replay = 0;
- spin_unlock(&imp->imp_lock);
-
- if (req != NULL) {
- rc = ptlrpc_replay_req(req);
- if (rc) {
- CERROR("recovery replay error %d for req %llu\n",
- rc, req->rq_xid);
- return rc;
- }
- *inflight = 1;
- }
- return rc;
-}
-
-/**
- * Schedule resending of request on sending_list. This is done after
- * we completed replaying of requests and locks.
- */
-int ptlrpc_resend(struct obd_import *imp)
-{
- struct ptlrpc_request *req, *next;
-
- /* As long as we're in recovery, nothing should be added to the sending
- * list, so we don't need to hold the lock during this iteration and
- * resend process.
- */
- /* Well... what if lctl recover is called twice at the same time?
- */
- spin_lock(&imp->imp_lock);
- if (imp->imp_state != LUSTRE_IMP_RECOVER) {
- spin_unlock(&imp->imp_lock);
- return -1;
- }
-
- list_for_each_entry_safe(req, next, &imp->imp_sending_list,
- rq_list) {
- LASSERTF((long)req > PAGE_CACHE_SIZE && req != LP_POISON,
- "req %p bad\n", req);
- LASSERTF(req->rq_type != LI_POISON, "req %p freed\n", req);
- if (!ptlrpc_no_resend(req))
- ptlrpc_resend_req(req);
- }
- spin_unlock(&imp->imp_lock);
-
- return 0;
-}
-EXPORT_SYMBOL(ptlrpc_resend);
-
-/**
- * Go through all requests in delayed list and wake their threads
- * for resending
- */
-void ptlrpc_wake_delayed(struct obd_import *imp)
-{
- struct list_head *tmp, *pos;
- struct ptlrpc_request *req;
-
- spin_lock(&imp->imp_lock);
- list_for_each_safe(tmp, pos, &imp->imp_delayed_list) {
- req = list_entry(tmp, struct ptlrpc_request, rq_list);
-
- DEBUG_REQ(D_HA, req, "waking (set %p):", req->rq_set);
- ptlrpc_client_wake_req(req);
- }
- spin_unlock(&imp->imp_lock);
-}
-EXPORT_SYMBOL(ptlrpc_wake_delayed);
-
-void ptlrpc_request_handle_notconn(struct ptlrpc_request *failed_req)
-{
- struct obd_import *imp = failed_req->rq_import;
-
- CDEBUG(D_HA, "import %s of %s@%s abruptly disconnected: reconnecting\n",
- imp->imp_obd->obd_name, obd2cli_tgt(imp->imp_obd),
- imp->imp_connection->c_remote_uuid.uuid);
-
- if (ptlrpc_set_import_discon(imp,
- lustre_msg_get_conn_cnt(failed_req->rq_reqmsg))) {
- if (!imp->imp_replayable) {
- CDEBUG(D_HA, "import %s@%s for %s not replayable, auto-deactivating\n",
- obd2cli_tgt(imp->imp_obd),
- imp->imp_connection->c_remote_uuid.uuid,
- imp->imp_obd->obd_name);
- ptlrpc_deactivate_import(imp);
- }
- /* to control recovery via lctl {disable|enable}_recovery */
- if (imp->imp_deactive == 0)
- ptlrpc_connect_import(imp);
- }
-
- /* Wait for recovery to complete and resend. If evicted, then
- this request will be errored out later.*/
- spin_lock(&failed_req->rq_lock);
- if (!failed_req->rq_no_resend)
- failed_req->rq_resend = 1;
- spin_unlock(&failed_req->rq_lock);
-}
-
-/**
- * Administratively active/deactive a client.
- * This should only be called by the ioctl interface, currently
- * - the lctl deactivate and activate commands
- * - echo 0/1 >> /proc/osc/XXX/active
- * - client umount -f (ll_umount_begin)
- */
-int ptlrpc_set_import_active(struct obd_import *imp, int active)
-{
- struct obd_device *obd = imp->imp_obd;
- int rc = 0;
-
- LASSERT(obd);
-
- /* When deactivating, mark import invalid, and abort in-flight
- * requests. */
- if (!active) {
- LCONSOLE_WARN("setting import %s INACTIVE by administrator request\n",
- obd2cli_tgt(imp->imp_obd));
-
- /* set before invalidate to avoid messages about imp_inval
- * set without imp_deactive in ptlrpc_import_delay_req */
- spin_lock(&imp->imp_lock);
- imp->imp_deactive = 1;
- spin_unlock(&imp->imp_lock);
-
- obd_import_event(imp->imp_obd, imp, IMP_EVENT_DEACTIVATE);
-
- ptlrpc_invalidate_import(imp);
- }
-
- /* When activating, mark import valid, and attempt recovery */
- if (active) {
- CDEBUG(D_HA, "setting import %s VALID\n",
- obd2cli_tgt(imp->imp_obd));
-
- spin_lock(&imp->imp_lock);
- imp->imp_deactive = 0;
- spin_unlock(&imp->imp_lock);
- obd_import_event(imp->imp_obd, imp, IMP_EVENT_ACTIVATE);
-
- rc = ptlrpc_recover_import(imp, NULL, 0);
- }
-
- return rc;
-}
-EXPORT_SYMBOL(ptlrpc_set_import_active);
-
-/* Attempt to reconnect an import */
-int ptlrpc_recover_import(struct obd_import *imp, char *new_uuid, int async)
-{
- int rc = 0;
-
- spin_lock(&imp->imp_lock);
- if (imp->imp_state == LUSTRE_IMP_NEW || imp->imp_deactive ||
- atomic_read(&imp->imp_inval_count))
- rc = -EINVAL;
- spin_unlock(&imp->imp_lock);
- if (rc)
- goto out;
-
- /* force import to be disconnected. */
- ptlrpc_set_import_discon(imp, 0);
-
- if (new_uuid) {
- struct obd_uuid uuid;
-
- /* intruct import to use new uuid */
- obd_str2uuid(&uuid, new_uuid);
- rc = import_set_conn_priority(imp, &uuid);
- if (rc)
- goto out;
- }
-
- /* Check if reconnect is already in progress */
- spin_lock(&imp->imp_lock);
- if (imp->imp_state != LUSTRE_IMP_DISCON) {
- imp->imp_force_verify = 1;
- rc = -EALREADY;
- }
- spin_unlock(&imp->imp_lock);
- if (rc)
- goto out;
-
- rc = ptlrpc_connect_import(imp);
- if (rc)
- goto out;
-
- if (!async) {
- struct l_wait_info lwi;
- int secs = cfs_time_seconds(obd_timeout);
-
- CDEBUG(D_HA, "%s: recovery started, waiting %u seconds\n",
- obd2cli_tgt(imp->imp_obd), secs);
-
- lwi = LWI_TIMEOUT(secs, NULL, NULL);
- rc = l_wait_event(imp->imp_recovery_waitq,
- !ptlrpc_import_in_recovery(imp), &lwi);
- CDEBUG(D_HA, "%s: recovery finished\n",
- obd2cli_tgt(imp->imp_obd));
- }
-
-out:
- return rc;
-}
-EXPORT_SYMBOL(ptlrpc_recover_import);
-
-int ptlrpc_import_in_recovery(struct obd_import *imp)
-{
- int in_recovery = 1;
-
- spin_lock(&imp->imp_lock);
- if (imp->imp_state == LUSTRE_IMP_FULL ||
- imp->imp_state == LUSTRE_IMP_CLOSED ||
- imp->imp_state == LUSTRE_IMP_DISCON ||
- imp->imp_obd->obd_no_recov)
- in_recovery = 0;
- spin_unlock(&imp->imp_lock);
-
- return in_recovery;
-}
diff --git a/drivers/staging/lustre/lustre/ptlrpc/sec.c b/drivers/staging/lustre/lustre/ptlrpc/sec.c
deleted file mode 100644
index 67604b560670..000000000000
--- a/drivers/staging/lustre/lustre/ptlrpc/sec.c
+++ /dev/null
@@ -1,2341 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * lustre/ptlrpc/sec.c
- *
- * Author: Eric Mei <ericm@clusterfs.com>
- */
-
-#define DEBUG_SUBSYSTEM S_SEC
-
-#include "../../include/linux/libcfs/libcfs.h"
-#include <linux/crypto.h>
-#include <linux/key.h>
-
-#include "../include/obd.h"
-#include "../include/obd_class.h"
-#include "../include/obd_support.h"
-#include "../include/lustre_net.h"
-#include "../include/lustre_import.h"
-#include "../include/lustre_dlm.h"
-#include "../include/lustre_sec.h"
-
-#include "ptlrpc_internal.h"
-
-/***********************************************
- * policy registers *
- ***********************************************/
-
-static rwlock_t policy_lock;
-static struct ptlrpc_sec_policy *policies[SPTLRPC_POLICY_MAX] = {
- NULL,
-};
-
-int sptlrpc_register_policy(struct ptlrpc_sec_policy *policy)
-{
- __u16 number = policy->sp_policy;
-
- LASSERT(policy->sp_name);
- LASSERT(policy->sp_cops);
- LASSERT(policy->sp_sops);
-
- if (number >= SPTLRPC_POLICY_MAX)
- return -EINVAL;
-
- write_lock(&policy_lock);
- if (unlikely(policies[number])) {
- write_unlock(&policy_lock);
- return -EALREADY;
- }
- policies[number] = policy;
- write_unlock(&policy_lock);
-
- CDEBUG(D_SEC, "%s: registered\n", policy->sp_name);
- return 0;
-}
-EXPORT_SYMBOL(sptlrpc_register_policy);
-
-int sptlrpc_unregister_policy(struct ptlrpc_sec_policy *policy)
-{
- __u16 number = policy->sp_policy;
-
- LASSERT(number < SPTLRPC_POLICY_MAX);
-
- write_lock(&policy_lock);
- if (unlikely(policies[number] == NULL)) {
- write_unlock(&policy_lock);
- CERROR("%s: already unregistered\n", policy->sp_name);
- return -EINVAL;
- }
-
- LASSERT(policies[number] == policy);
- policies[number] = NULL;
- write_unlock(&policy_lock);
-
- CDEBUG(D_SEC, "%s: unregistered\n", policy->sp_name);
- return 0;
-}
-EXPORT_SYMBOL(sptlrpc_unregister_policy);
-
-static
-struct ptlrpc_sec_policy *sptlrpc_wireflavor2policy(__u32 flavor)
-{
- static DEFINE_MUTEX(load_mutex);
- static atomic_t loaded = ATOMIC_INIT(0);
- struct ptlrpc_sec_policy *policy;
- __u16 number = SPTLRPC_FLVR_POLICY(flavor);
- __u16 flag = 0;
-
- if (number >= SPTLRPC_POLICY_MAX)
- return NULL;
-
- while (1) {
- read_lock(&policy_lock);
- policy = policies[number];
- if (policy && !try_module_get(policy->sp_owner))
- policy = NULL;
- if (policy == NULL)
- flag = atomic_read(&loaded);
- read_unlock(&policy_lock);
-
- if (policy != NULL || flag != 0 ||
- number != SPTLRPC_POLICY_GSS)
- break;
-
- /* try to load gss module, once */
- mutex_lock(&load_mutex);
- if (atomic_read(&loaded) == 0) {
- if (request_module("ptlrpc_gss") == 0)
- CDEBUG(D_SEC,
- "module ptlrpc_gss loaded on demand\n");
- else
- CERROR("Unable to load module ptlrpc_gss\n");
-
- atomic_set(&loaded, 1);
- }
- mutex_unlock(&load_mutex);
- }
-
- return policy;
-}
-
-__u32 sptlrpc_name2flavor_base(const char *name)
-{
- if (!strcmp(name, "null"))
- return SPTLRPC_FLVR_NULL;
- if (!strcmp(name, "plain"))
- return SPTLRPC_FLVR_PLAIN;
- if (!strcmp(name, "krb5n"))
- return SPTLRPC_FLVR_KRB5N;
- if (!strcmp(name, "krb5a"))
- return SPTLRPC_FLVR_KRB5A;
- if (!strcmp(name, "krb5i"))
- return SPTLRPC_FLVR_KRB5I;
- if (!strcmp(name, "krb5p"))
- return SPTLRPC_FLVR_KRB5P;
-
- return SPTLRPC_FLVR_INVALID;
-}
-EXPORT_SYMBOL(sptlrpc_name2flavor_base);
-
-const char *sptlrpc_flavor2name_base(__u32 flvr)
-{
- __u32 base = SPTLRPC_FLVR_BASE(flvr);
-
- if (base == SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_NULL))
- return "null";
- else if (base == SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_PLAIN))
- return "plain";
- else if (base == SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_KRB5N))
- return "krb5n";
- else if (base == SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_KRB5A))
- return "krb5a";
- else if (base == SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_KRB5I))
- return "krb5i";
- else if (base == SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_KRB5P))
- return "krb5p";
-
- CERROR("invalid wire flavor 0x%x\n", flvr);
- return "invalid";
-}
-EXPORT_SYMBOL(sptlrpc_flavor2name_base);
-
-char *sptlrpc_flavor2name_bulk(struct sptlrpc_flavor *sf,
- char *buf, int bufsize)
-{
- if (SPTLRPC_FLVR_POLICY(sf->sf_rpc) == SPTLRPC_POLICY_PLAIN)
- snprintf(buf, bufsize, "hash:%s",
- sptlrpc_get_hash_name(sf->u_bulk.hash.hash_alg));
- else
- snprintf(buf, bufsize, "%s",
- sptlrpc_flavor2name_base(sf->sf_rpc));
-
- buf[bufsize - 1] = '\0';
- return buf;
-}
-EXPORT_SYMBOL(sptlrpc_flavor2name_bulk);
-
-char *sptlrpc_flavor2name(struct sptlrpc_flavor *sf, char *buf, int bufsize)
-{
- strlcpy(buf, sptlrpc_flavor2name_base(sf->sf_rpc), bufsize);
-
- /*
- * currently we don't support customized bulk specification for
- * flavors other than plain
- */
- if (SPTLRPC_FLVR_POLICY(sf->sf_rpc) == SPTLRPC_POLICY_PLAIN) {
- char bspec[16];
-
- bspec[0] = '-';
- sptlrpc_flavor2name_bulk(sf, &bspec[1], sizeof(bspec) - 1);
- strlcat(buf, bspec, bufsize);
- }
-
- return buf;
-}
-EXPORT_SYMBOL(sptlrpc_flavor2name);
-
-char *sptlrpc_secflags2str(__u32 flags, char *buf, int bufsize)
-{
- buf[0] = '\0';
-
- if (flags & PTLRPC_SEC_FL_REVERSE)
- strlcat(buf, "reverse,", bufsize);
- if (flags & PTLRPC_SEC_FL_ROOTONLY)
- strlcat(buf, "rootonly,", bufsize);
- if (flags & PTLRPC_SEC_FL_UDESC)
- strlcat(buf, "udesc,", bufsize);
- if (flags & PTLRPC_SEC_FL_BULK)
- strlcat(buf, "bulk,", bufsize);
- if (buf[0] == '\0')
- strlcat(buf, "-,", bufsize);
-
- return buf;
-}
-EXPORT_SYMBOL(sptlrpc_secflags2str);
-
-/**************************************************
- * client context APIs *
- **************************************************/
-
-static
-struct ptlrpc_cli_ctx *get_my_ctx(struct ptlrpc_sec *sec)
-{
- struct vfs_cred vcred;
- int create = 1, remove_dead = 1;
-
- LASSERT(sec);
- LASSERT(sec->ps_policy->sp_cops->lookup_ctx);
-
- if (sec->ps_flvr.sf_flags & (PTLRPC_SEC_FL_REVERSE |
- PTLRPC_SEC_FL_ROOTONLY)) {
- vcred.vc_uid = 0;
- vcred.vc_gid = 0;
- if (sec->ps_flvr.sf_flags & PTLRPC_SEC_FL_REVERSE) {
- create = 0;
- remove_dead = 0;
- }
- } else {
- vcred.vc_uid = from_kuid(&init_user_ns, current_uid());
- vcred.vc_gid = from_kgid(&init_user_ns, current_gid());
- }
-
- return sec->ps_policy->sp_cops->lookup_ctx(sec, &vcred,
- create, remove_dead);
-}
-
-struct ptlrpc_cli_ctx *sptlrpc_cli_ctx_get(struct ptlrpc_cli_ctx *ctx)
-{
- atomic_inc(&ctx->cc_refcount);
- return ctx;
-}
-EXPORT_SYMBOL(sptlrpc_cli_ctx_get);
-
-void sptlrpc_cli_ctx_put(struct ptlrpc_cli_ctx *ctx, int sync)
-{
- struct ptlrpc_sec *sec = ctx->cc_sec;
-
- LASSERT(sec);
- LASSERT_ATOMIC_POS(&ctx->cc_refcount);
-
- if (!atomic_dec_and_test(&ctx->cc_refcount))
- return;
-
- sec->ps_policy->sp_cops->release_ctx(sec, ctx, sync);
-}
-EXPORT_SYMBOL(sptlrpc_cli_ctx_put);
-
-static int import_sec_check_expire(struct obd_import *imp)
-{
- int adapt = 0;
-
- spin_lock(&imp->imp_lock);
- if (imp->imp_sec_expire &&
- imp->imp_sec_expire < ktime_get_real_seconds()) {
- adapt = 1;
- imp->imp_sec_expire = 0;
- }
- spin_unlock(&imp->imp_lock);
-
- if (!adapt)
- return 0;
-
- CDEBUG(D_SEC, "found delayed sec adapt expired, do it now\n");
- return sptlrpc_import_sec_adapt(imp, NULL, NULL);
-}
-
-static int import_sec_validate_get(struct obd_import *imp,
- struct ptlrpc_sec **sec)
-{
- int rc;
-
- if (unlikely(imp->imp_sec_expire)) {
- rc = import_sec_check_expire(imp);
- if (rc)
- return rc;
- }
-
- *sec = sptlrpc_import_sec_ref(imp);
- if (*sec == NULL) {
- CERROR("import %p (%s) with no sec\n",
- imp, ptlrpc_import_state_name(imp->imp_state));
- return -EACCES;
- }
-
- if (unlikely((*sec)->ps_dying)) {
- CERROR("attempt to use dying sec %p\n", sec);
- sptlrpc_sec_put(*sec);
- return -EACCES;
- }
-
- return 0;
-}
-
-/**
- * Given a \a req, find or allocate a appropriate context for it.
- * \pre req->rq_cli_ctx == NULL.
- *
- * \retval 0 succeed, and req->rq_cli_ctx is set.
- * \retval -ev error number, and req->rq_cli_ctx == NULL.
- */
-int sptlrpc_req_get_ctx(struct ptlrpc_request *req)
-{
- struct obd_import *imp = req->rq_import;
- struct ptlrpc_sec *sec;
- int rc;
-
- LASSERT(!req->rq_cli_ctx);
- LASSERT(imp);
-
- rc = import_sec_validate_get(imp, &sec);
- if (rc)
- return rc;
-
- req->rq_cli_ctx = get_my_ctx(sec);
-
- sptlrpc_sec_put(sec);
-
- if (!req->rq_cli_ctx) {
- CERROR("req %p: fail to get context\n", req);
- return -ENOMEM;
- }
-
- return 0;
-}
-
-/**
- * Drop the context for \a req.
- * \pre req->rq_cli_ctx != NULL.
- * \post req->rq_cli_ctx == NULL.
- *
- * If \a sync == 0, this function should return quickly without sleep;
- * otherwise it might trigger and wait for the whole process of sending
- * an context-destroying rpc to server.
- */
-void sptlrpc_req_put_ctx(struct ptlrpc_request *req, int sync)
-{
- LASSERT(req);
- LASSERT(req->rq_cli_ctx);
-
- /* request might be asked to release earlier while still
- * in the context waiting list.
- */
- if (!list_empty(&req->rq_ctx_chain)) {
- spin_lock(&req->rq_cli_ctx->cc_lock);
- list_del_init(&req->rq_ctx_chain);
- spin_unlock(&req->rq_cli_ctx->cc_lock);
- }
-
- sptlrpc_cli_ctx_put(req->rq_cli_ctx, sync);
- req->rq_cli_ctx = NULL;
-}
-
-static
-int sptlrpc_req_ctx_switch(struct ptlrpc_request *req,
- struct ptlrpc_cli_ctx *oldctx,
- struct ptlrpc_cli_ctx *newctx)
-{
- struct sptlrpc_flavor old_flvr;
- char *reqmsg = NULL; /* to workaround old gcc */
- int reqmsg_size;
- int rc = 0;
-
- LASSERT(req->rq_reqmsg);
- LASSERT(req->rq_reqlen);
- LASSERT(req->rq_replen);
-
- CDEBUG(D_SEC, "req %p: switch ctx %p(%u->%s) -> %p(%u->%s), switch sec %p(%s) -> %p(%s)\n",
- req,
- oldctx, oldctx->cc_vcred.vc_uid, sec2target_str(oldctx->cc_sec),
- newctx, newctx->cc_vcred.vc_uid, sec2target_str(newctx->cc_sec),
- oldctx->cc_sec, oldctx->cc_sec->ps_policy->sp_name,
- newctx->cc_sec, newctx->cc_sec->ps_policy->sp_name);
-
- /* save flavor */
- old_flvr = req->rq_flvr;
-
- /* save request message */
- reqmsg_size = req->rq_reqlen;
- if (reqmsg_size != 0) {
- reqmsg = libcfs_kvzalloc(reqmsg_size, GFP_NOFS);
- if (reqmsg == NULL)
- return -ENOMEM;
- memcpy(reqmsg, req->rq_reqmsg, reqmsg_size);
- }
-
- /* release old req/rep buf */
- req->rq_cli_ctx = oldctx;
- sptlrpc_cli_free_reqbuf(req);
- sptlrpc_cli_free_repbuf(req);
- req->rq_cli_ctx = newctx;
-
- /* recalculate the flavor */
- sptlrpc_req_set_flavor(req, 0);
-
- /* alloc new request buffer
- * we don't need to alloc reply buffer here, leave it to the
- * rest procedure of ptlrpc */
- if (reqmsg_size != 0) {
- rc = sptlrpc_cli_alloc_reqbuf(req, reqmsg_size);
- if (!rc) {
- LASSERT(req->rq_reqmsg);
- memcpy(req->rq_reqmsg, reqmsg, reqmsg_size);
- } else {
- CWARN("failed to alloc reqbuf: %d\n", rc);
- req->rq_flvr = old_flvr;
- }
-
- kvfree(reqmsg);
- }
- return rc;
-}
-
-/**
- * If current context of \a req is dead somehow, e.g. we just switched flavor
- * thus marked original contexts dead, we'll find a new context for it. if
- * no switch is needed, \a req will end up with the same context.
- *
- * \note a request must have a context, to keep other parts of code happy.
- * In any case of failure during the switching, we must restore the old one.
- */
-int sptlrpc_req_replace_dead_ctx(struct ptlrpc_request *req)
-{
- struct ptlrpc_cli_ctx *oldctx = req->rq_cli_ctx;
- struct ptlrpc_cli_ctx *newctx;
- int rc;
-
- LASSERT(oldctx);
-
- sptlrpc_cli_ctx_get(oldctx);
- sptlrpc_req_put_ctx(req, 0);
-
- rc = sptlrpc_req_get_ctx(req);
- if (unlikely(rc)) {
- LASSERT(!req->rq_cli_ctx);
-
- /* restore old ctx */
- req->rq_cli_ctx = oldctx;
- return rc;
- }
-
- newctx = req->rq_cli_ctx;
- LASSERT(newctx);
-
- if (unlikely(newctx == oldctx &&
- test_bit(PTLRPC_CTX_DEAD_BIT, &oldctx->cc_flags))) {
- /*
- * still get the old dead ctx, usually means system too busy
- */
- CDEBUG(D_SEC,
- "ctx (%p, fl %lx) doesn't switch, relax a little bit\n",
- newctx, newctx->cc_flags);
-
- set_current_state(TASK_INTERRUPTIBLE);
- schedule_timeout(HZ);
- } else {
- /*
- * it's possible newctx == oldctx if we're switching
- * subflavor with the same sec.
- */
- rc = sptlrpc_req_ctx_switch(req, oldctx, newctx);
- if (rc) {
- /* restore old ctx */
- sptlrpc_req_put_ctx(req, 0);
- req->rq_cli_ctx = oldctx;
- return rc;
- }
-
- LASSERT(req->rq_cli_ctx == newctx);
- }
-
- sptlrpc_cli_ctx_put(oldctx, 1);
- return 0;
-}
-EXPORT_SYMBOL(sptlrpc_req_replace_dead_ctx);
-
-static
-int ctx_check_refresh(struct ptlrpc_cli_ctx *ctx)
-{
- if (cli_ctx_is_refreshed(ctx))
- return 1;
- return 0;
-}
-
-static
-int ctx_refresh_timeout(void *data)
-{
- struct ptlrpc_request *req = data;
- int rc;
-
- /* conn_cnt is needed in expire_one_request */
- lustre_msg_set_conn_cnt(req->rq_reqmsg, req->rq_import->imp_conn_cnt);
-
- rc = ptlrpc_expire_one_request(req, 1);
- /* if we started recovery, we should mark this ctx dead; otherwise
- * in case of lgssd died nobody would retire this ctx, following
- * connecting will still find the same ctx thus cause deadlock.
- * there's an assumption that expire time of the request should be
- * later than the context refresh expire time.
- */
- if (rc == 0)
- req->rq_cli_ctx->cc_ops->force_die(req->rq_cli_ctx, 0);
- return rc;
-}
-
-static
-void ctx_refresh_interrupt(void *data)
-{
- struct ptlrpc_request *req = data;
-
- spin_lock(&req->rq_lock);
- req->rq_intr = 1;
- spin_unlock(&req->rq_lock);
-}
-
-static
-void req_off_ctx_list(struct ptlrpc_request *req, struct ptlrpc_cli_ctx *ctx)
-{
- spin_lock(&ctx->cc_lock);
- if (!list_empty(&req->rq_ctx_chain))
- list_del_init(&req->rq_ctx_chain);
- spin_unlock(&ctx->cc_lock);
-}
-
-/**
- * To refresh the context of \req, if it's not up-to-date.
- * \param timeout
- * - < 0: don't wait
- * - = 0: wait until success or fatal error occur
- * - > 0: timeout value (in seconds)
- *
- * The status of the context could be subject to be changed by other threads
- * at any time. We allow this race, but once we return with 0, the caller will
- * suppose it's uptodated and keep using it until the owning rpc is done.
- *
- * \retval 0 only if the context is uptodated.
- * \retval -ev error number.
- */
-int sptlrpc_req_refresh_ctx(struct ptlrpc_request *req, long timeout)
-{
- struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx;
- struct ptlrpc_sec *sec;
- struct l_wait_info lwi;
- int rc;
-
- LASSERT(ctx);
-
- if (req->rq_ctx_init || req->rq_ctx_fini)
- return 0;
-
- /*
- * during the process a request's context might change type even
- * (e.g. from gss ctx to null ctx), so each loop we need to re-check
- * everything
- */
-again:
- rc = import_sec_validate_get(req->rq_import, &sec);
- if (rc)
- return rc;
-
- if (sec->ps_flvr.sf_rpc != req->rq_flvr.sf_rpc) {
- CDEBUG(D_SEC, "req %p: flavor has changed %x -> %x\n",
- req, req->rq_flvr.sf_rpc, sec->ps_flvr.sf_rpc);
- req_off_ctx_list(req, ctx);
- sptlrpc_req_replace_dead_ctx(req);
- ctx = req->rq_cli_ctx;
- }
- sptlrpc_sec_put(sec);
-
- if (cli_ctx_is_eternal(ctx))
- return 0;
-
- if (unlikely(test_bit(PTLRPC_CTX_NEW_BIT, &ctx->cc_flags))) {
- LASSERT(ctx->cc_ops->refresh);
- ctx->cc_ops->refresh(ctx);
- }
- LASSERT(test_bit(PTLRPC_CTX_NEW_BIT, &ctx->cc_flags) == 0);
-
- LASSERT(ctx->cc_ops->validate);
- if (ctx->cc_ops->validate(ctx) == 0) {
- req_off_ctx_list(req, ctx);
- return 0;
- }
-
- if (unlikely(test_bit(PTLRPC_CTX_ERROR_BIT, &ctx->cc_flags))) {
- spin_lock(&req->rq_lock);
- req->rq_err = 1;
- spin_unlock(&req->rq_lock);
- req_off_ctx_list(req, ctx);
- return -EPERM;
- }
-
- /*
- * There's a subtle issue for resending RPCs, suppose following
- * situation:
- * 1. the request was sent to server.
- * 2. recovery was kicked start, after finished the request was
- * marked as resent.
- * 3. resend the request.
- * 4. old reply from server received, we accept and verify the reply.
- * this has to be success, otherwise the error will be aware
- * by application.
- * 5. new reply from server received, dropped by LNet.
- *
- * Note the xid of old & new request is the same. We can't simply
- * change xid for the resent request because the server replies on
- * it for reply reconstruction.
- *
- * Commonly the original context should be uptodate because we
- * have a expiry nice time; server will keep its context because
- * we at least hold a ref of old context which prevent context
- * destroying RPC being sent. So server still can accept the request
- * and finish the RPC. But if that's not the case:
- * 1. If server side context has been trimmed, a NO_CONTEXT will
- * be returned, gss_cli_ctx_verify/unseal will switch to new
- * context by force.
- * 2. Current context never be refreshed, then we are fine: we
- * never really send request with old context before.
- */
- if (test_bit(PTLRPC_CTX_UPTODATE_BIT, &ctx->cc_flags) &&
- unlikely(req->rq_reqmsg) &&
- lustre_msg_get_flags(req->rq_reqmsg) & MSG_RESENT) {
- req_off_ctx_list(req, ctx);
- return 0;
- }
-
- if (unlikely(test_bit(PTLRPC_CTX_DEAD_BIT, &ctx->cc_flags))) {
- req_off_ctx_list(req, ctx);
- /*
- * don't switch ctx if import was deactivated
- */
- if (req->rq_import->imp_deactive) {
- spin_lock(&req->rq_lock);
- req->rq_err = 1;
- spin_unlock(&req->rq_lock);
- return -EINTR;
- }
-
- rc = sptlrpc_req_replace_dead_ctx(req);
- if (rc) {
- LASSERT(ctx == req->rq_cli_ctx);
- CERROR("req %p: failed to replace dead ctx %p: %d\n",
- req, ctx, rc);
- spin_lock(&req->rq_lock);
- req->rq_err = 1;
- spin_unlock(&req->rq_lock);
- return rc;
- }
-
- ctx = req->rq_cli_ctx;
- goto again;
- }
-
- /*
- * Now we're sure this context is during upcall, add myself into
- * waiting list
- */
- spin_lock(&ctx->cc_lock);
- if (list_empty(&req->rq_ctx_chain))
- list_add(&req->rq_ctx_chain, &ctx->cc_req_list);
- spin_unlock(&ctx->cc_lock);
-
- if (timeout < 0)
- return -EWOULDBLOCK;
-
- /* Clear any flags that may be present from previous sends */
- LASSERT(req->rq_receiving_reply == 0);
- spin_lock(&req->rq_lock);
- req->rq_err = 0;
- req->rq_timedout = 0;
- req->rq_resend = 0;
- req->rq_restart = 0;
- spin_unlock(&req->rq_lock);
-
- lwi = LWI_TIMEOUT_INTR(timeout * HZ, ctx_refresh_timeout,
- ctx_refresh_interrupt, req);
- rc = l_wait_event(req->rq_reply_waitq, ctx_check_refresh(ctx), &lwi);
-
- /*
- * following cases could lead us here:
- * - successfully refreshed;
- * - interrupted;
- * - timedout, and we don't want recover from the failure;
- * - timedout, and waked up upon recovery finished;
- * - someone else mark this ctx dead by force;
- * - someone invalidate the req and call ptlrpc_client_wake_req(),
- * e.g. ptlrpc_abort_inflight();
- */
- if (!cli_ctx_is_refreshed(ctx)) {
- /* timed out or interrupted */
- req_off_ctx_list(req, ctx);
-
- LASSERT(rc != 0);
- return rc;
- }
-
- goto again;
-}
-
-/**
- * Initialize flavor settings for \a req, according to \a opcode.
- *
- * \note this could be called in two situations:
- * - new request from ptlrpc_pre_req(), with proper @opcode
- * - old request which changed ctx in the middle, with @opcode == 0
- */
-void sptlrpc_req_set_flavor(struct ptlrpc_request *req, int opcode)
-{
- struct ptlrpc_sec *sec;
-
- LASSERT(req->rq_import);
- LASSERT(req->rq_cli_ctx);
- LASSERT(req->rq_cli_ctx->cc_sec);
- LASSERT(req->rq_bulk_read == 0 || req->rq_bulk_write == 0);
-
- /* special security flags according to opcode */
- switch (opcode) {
- case OST_READ:
- case MDS_READPAGE:
- case MGS_CONFIG_READ:
- case OBD_IDX_READ:
- req->rq_bulk_read = 1;
- break;
- case OST_WRITE:
- case MDS_WRITEPAGE:
- req->rq_bulk_write = 1;
- break;
- case SEC_CTX_INIT:
- req->rq_ctx_init = 1;
- break;
- case SEC_CTX_FINI:
- req->rq_ctx_fini = 1;
- break;
- case 0:
- /* init/fini rpc won't be resend, so can't be here */
- LASSERT(req->rq_ctx_init == 0);
- LASSERT(req->rq_ctx_fini == 0);
-
- /* cleanup flags, which should be recalculated */
- req->rq_pack_udesc = 0;
- req->rq_pack_bulk = 0;
- break;
- }
-
- sec = req->rq_cli_ctx->cc_sec;
-
- spin_lock(&sec->ps_lock);
- req->rq_flvr = sec->ps_flvr;
- spin_unlock(&sec->ps_lock);
-
- /* force SVC_NULL for context initiation rpc, SVC_INTG for context
- * destruction rpc */
- if (unlikely(req->rq_ctx_init))
- flvr_set_svc(&req->rq_flvr.sf_rpc, SPTLRPC_SVC_NULL);
- else if (unlikely(req->rq_ctx_fini))
- flvr_set_svc(&req->rq_flvr.sf_rpc, SPTLRPC_SVC_INTG);
-
- /* user descriptor flag, null security can't do it anyway */
- if ((sec->ps_flvr.sf_flags & PTLRPC_SEC_FL_UDESC) &&
- (req->rq_flvr.sf_rpc != SPTLRPC_FLVR_NULL))
- req->rq_pack_udesc = 1;
-
- /* bulk security flag */
- if ((req->rq_bulk_read || req->rq_bulk_write) &&
- sptlrpc_flavor_has_bulk(&req->rq_flvr))
- req->rq_pack_bulk = 1;
-}
-
-void sptlrpc_request_out_callback(struct ptlrpc_request *req)
-{
- if (SPTLRPC_FLVR_SVC(req->rq_flvr.sf_rpc) != SPTLRPC_SVC_PRIV)
- return;
-
- LASSERT(req->rq_clrbuf);
- if (req->rq_pool || !req->rq_reqbuf)
- return;
-
- kfree(req->rq_reqbuf);
- req->rq_reqbuf = NULL;
- req->rq_reqbuf_len = 0;
-}
-
-/**
- * Given an import \a imp, check whether current user has a valid context
- * or not. We may create a new context and try to refresh it, and try
- * repeatedly try in case of non-fatal errors. Return 0 means success.
- */
-int sptlrpc_import_check_ctx(struct obd_import *imp)
-{
- struct ptlrpc_sec *sec;
- struct ptlrpc_cli_ctx *ctx;
- struct ptlrpc_request *req = NULL;
- int rc;
-
- might_sleep();
-
- sec = sptlrpc_import_sec_ref(imp);
- ctx = get_my_ctx(sec);
- sptlrpc_sec_put(sec);
-
- if (!ctx)
- return -ENOMEM;
-
- if (cli_ctx_is_eternal(ctx) ||
- ctx->cc_ops->validate(ctx) == 0) {
- sptlrpc_cli_ctx_put(ctx, 1);
- return 0;
- }
-
- if (cli_ctx_is_error(ctx)) {
- sptlrpc_cli_ctx_put(ctx, 1);
- return -EACCES;
- }
-
- req = ptlrpc_request_cache_alloc(GFP_NOFS);
- if (!req)
- return -ENOMEM;
-
- spin_lock_init(&req->rq_lock);
- atomic_set(&req->rq_refcount, 10000);
- INIT_LIST_HEAD(&req->rq_ctx_chain);
- init_waitqueue_head(&req->rq_reply_waitq);
- init_waitqueue_head(&req->rq_set_waitq);
- req->rq_import = imp;
- req->rq_flvr = sec->ps_flvr;
- req->rq_cli_ctx = ctx;
-
- rc = sptlrpc_req_refresh_ctx(req, 0);
- LASSERT(list_empty(&req->rq_ctx_chain));
- sptlrpc_cli_ctx_put(req->rq_cli_ctx, 1);
- ptlrpc_request_cache_free(req);
-
- return rc;
-}
-
-/**
- * Used by ptlrpc client, to perform the pre-defined security transformation
- * upon the request message of \a req. After this function called,
- * req->rq_reqmsg is still accessible as clear text.
- */
-int sptlrpc_cli_wrap_request(struct ptlrpc_request *req)
-{
- struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx;
- int rc = 0;
-
- LASSERT(ctx);
- LASSERT(ctx->cc_sec);
- LASSERT(req->rq_reqbuf || req->rq_clrbuf);
-
- /* we wrap bulk request here because now we can be sure
- * the context is uptodate.
- */
- if (req->rq_bulk) {
- rc = sptlrpc_cli_wrap_bulk(req, req->rq_bulk);
- if (rc)
- return rc;
- }
-
- switch (SPTLRPC_FLVR_SVC(req->rq_flvr.sf_rpc)) {
- case SPTLRPC_SVC_NULL:
- case SPTLRPC_SVC_AUTH:
- case SPTLRPC_SVC_INTG:
- LASSERT(ctx->cc_ops->sign);
- rc = ctx->cc_ops->sign(ctx, req);
- break;
- case SPTLRPC_SVC_PRIV:
- LASSERT(ctx->cc_ops->seal);
- rc = ctx->cc_ops->seal(ctx, req);
- break;
- default:
- LBUG();
- }
-
- if (rc == 0) {
- LASSERT(req->rq_reqdata_len);
- LASSERT(req->rq_reqdata_len % 8 == 0);
- LASSERT(req->rq_reqdata_len <= req->rq_reqbuf_len);
- }
-
- return rc;
-}
-
-static int do_cli_unwrap_reply(struct ptlrpc_request *req)
-{
- struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx;
- int rc;
-
- LASSERT(ctx);
- LASSERT(ctx->cc_sec);
- LASSERT(req->rq_repbuf);
- LASSERT(req->rq_repdata);
- LASSERT(req->rq_repmsg == NULL);
-
- req->rq_rep_swab_mask = 0;
-
- rc = __lustre_unpack_msg(req->rq_repdata, req->rq_repdata_len);
- switch (rc) {
- case 1:
- lustre_set_rep_swabbed(req, MSG_PTLRPC_HEADER_OFF);
- case 0:
- break;
- default:
- CERROR("failed unpack reply: x%llu\n", req->rq_xid);
- return -EPROTO;
- }
-
- if (req->rq_repdata_len < sizeof(struct lustre_msg)) {
- CERROR("replied data length %d too small\n",
- req->rq_repdata_len);
- return -EPROTO;
- }
-
- if (SPTLRPC_FLVR_POLICY(req->rq_repdata->lm_secflvr) !=
- SPTLRPC_FLVR_POLICY(req->rq_flvr.sf_rpc)) {
- CERROR("reply policy %u doesn't match request policy %u\n",
- SPTLRPC_FLVR_POLICY(req->rq_repdata->lm_secflvr),
- SPTLRPC_FLVR_POLICY(req->rq_flvr.sf_rpc));
- return -EPROTO;
- }
-
- switch (SPTLRPC_FLVR_SVC(req->rq_flvr.sf_rpc)) {
- case SPTLRPC_SVC_NULL:
- case SPTLRPC_SVC_AUTH:
- case SPTLRPC_SVC_INTG:
- LASSERT(ctx->cc_ops->verify);
- rc = ctx->cc_ops->verify(ctx, req);
- break;
- case SPTLRPC_SVC_PRIV:
- LASSERT(ctx->cc_ops->unseal);
- rc = ctx->cc_ops->unseal(ctx, req);
- break;
- default:
- LBUG();
- }
- LASSERT(rc || req->rq_repmsg || req->rq_resend);
-
- if (SPTLRPC_FLVR_POLICY(req->rq_flvr.sf_rpc) != SPTLRPC_POLICY_NULL &&
- !req->rq_ctx_init)
- req->rq_rep_swab_mask = 0;
- return rc;
-}
-
-/**
- * Used by ptlrpc client, to perform security transformation upon the reply
- * message of \a req. After return successfully, req->rq_repmsg points to
- * the reply message in clear text.
- *
- * \pre the reply buffer should have been un-posted from LNet, so nothing is
- * going to change.
- */
-int sptlrpc_cli_unwrap_reply(struct ptlrpc_request *req)
-{
- LASSERT(req->rq_repbuf);
- LASSERT(req->rq_repdata == NULL);
- LASSERT(req->rq_repmsg == NULL);
- LASSERT(req->rq_reply_off + req->rq_nob_received <= req->rq_repbuf_len);
-
- if (req->rq_reply_off == 0 &&
- (lustre_msghdr_get_flags(req->rq_reqmsg) & MSGHDR_AT_SUPPORT)) {
- CERROR("real reply with offset 0\n");
- return -EPROTO;
- }
-
- if (req->rq_reply_off % 8 != 0) {
- CERROR("reply at odd offset %u\n", req->rq_reply_off);
- return -EPROTO;
- }
-
- req->rq_repdata = (struct lustre_msg *)
- (req->rq_repbuf + req->rq_reply_off);
- req->rq_repdata_len = req->rq_nob_received;
-
- return do_cli_unwrap_reply(req);
-}
-
-/**
- * Used by ptlrpc client, to perform security transformation upon the early
- * reply message of \a req. We expect the rq_reply_off is 0, and
- * rq_nob_received is the early reply size.
- *
- * Because the receive buffer might be still posted, the reply data might be
- * changed at any time, no matter we're holding rq_lock or not. For this reason
- * we allocate a separate ptlrpc_request and reply buffer for early reply
- * processing.
- *
- * \retval 0 success, \a req_ret is filled with a duplicated ptlrpc_request.
- * Later the caller must call sptlrpc_cli_finish_early_reply() on the returned
- * \a *req_ret to release it.
- * \retval -ev error number, and \a req_ret will not be set.
- */
-int sptlrpc_cli_unwrap_early_reply(struct ptlrpc_request *req,
- struct ptlrpc_request **req_ret)
-{
- struct ptlrpc_request *early_req;
- char *early_buf;
- int early_bufsz, early_size;
- int rc;
-
- early_req = ptlrpc_request_cache_alloc(GFP_NOFS);
- if (early_req == NULL)
- return -ENOMEM;
-
- early_size = req->rq_nob_received;
- early_bufsz = size_roundup_power2(early_size);
- early_buf = libcfs_kvzalloc(early_bufsz, GFP_NOFS);
- if (early_buf == NULL) {
- rc = -ENOMEM;
- goto err_req;
- }
-
- /* sanity checkings and copy data out, do it inside spinlock */
- spin_lock(&req->rq_lock);
-
- if (req->rq_replied) {
- spin_unlock(&req->rq_lock);
- rc = -EALREADY;
- goto err_buf;
- }
-
- LASSERT(req->rq_repbuf);
- LASSERT(req->rq_repdata == NULL);
- LASSERT(req->rq_repmsg == NULL);
-
- if (req->rq_reply_off != 0) {
- CERROR("early reply with offset %u\n", req->rq_reply_off);
- spin_unlock(&req->rq_lock);
- rc = -EPROTO;
- goto err_buf;
- }
-
- if (req->rq_nob_received != early_size) {
- /* even another early arrived the size should be the same */
- CERROR("data size has changed from %u to %u\n",
- early_size, req->rq_nob_received);
- spin_unlock(&req->rq_lock);
- rc = -EINVAL;
- goto err_buf;
- }
-
- if (req->rq_nob_received < sizeof(struct lustre_msg)) {
- CERROR("early reply length %d too small\n",
- req->rq_nob_received);
- spin_unlock(&req->rq_lock);
- rc = -EALREADY;
- goto err_buf;
- }
-
- memcpy(early_buf, req->rq_repbuf, early_size);
- spin_unlock(&req->rq_lock);
-
- spin_lock_init(&early_req->rq_lock);
- early_req->rq_cli_ctx = sptlrpc_cli_ctx_get(req->rq_cli_ctx);
- early_req->rq_flvr = req->rq_flvr;
- early_req->rq_repbuf = early_buf;
- early_req->rq_repbuf_len = early_bufsz;
- early_req->rq_repdata = (struct lustre_msg *) early_buf;
- early_req->rq_repdata_len = early_size;
- early_req->rq_early = 1;
- early_req->rq_reqmsg = req->rq_reqmsg;
-
- rc = do_cli_unwrap_reply(early_req);
- if (rc) {
- DEBUG_REQ(D_ADAPTTO, early_req,
- "error %d unwrap early reply", rc);
- goto err_ctx;
- }
-
- LASSERT(early_req->rq_repmsg);
- *req_ret = early_req;
- return 0;
-
-err_ctx:
- sptlrpc_cli_ctx_put(early_req->rq_cli_ctx, 1);
-err_buf:
- kvfree(early_buf);
-err_req:
- ptlrpc_request_cache_free(early_req);
- return rc;
-}
-
-/**
- * Used by ptlrpc client, to release a processed early reply \a early_req.
- *
- * \pre \a early_req was obtained from calling sptlrpc_cli_unwrap_early_reply().
- */
-void sptlrpc_cli_finish_early_reply(struct ptlrpc_request *early_req)
-{
- LASSERT(early_req->rq_repbuf);
- LASSERT(early_req->rq_repdata);
- LASSERT(early_req->rq_repmsg);
-
- sptlrpc_cli_ctx_put(early_req->rq_cli_ctx, 1);
- kvfree(early_req->rq_repbuf);
- ptlrpc_request_cache_free(early_req);
-}
-
-/**************************************************
- * sec ID *
- **************************************************/
-
-/*
- * "fixed" sec (e.g. null) use sec_id < 0
- */
-static atomic_t sptlrpc_sec_id = ATOMIC_INIT(1);
-
-int sptlrpc_get_next_secid(void)
-{
- return atomic_inc_return(&sptlrpc_sec_id);
-}
-EXPORT_SYMBOL(sptlrpc_get_next_secid);
-
-/**************************************************
- * client side high-level security APIs *
- **************************************************/
-
-static int sec_cop_flush_ctx_cache(struct ptlrpc_sec *sec, uid_t uid,
- int grace, int force)
-{
- struct ptlrpc_sec_policy *policy = sec->ps_policy;
-
- LASSERT(policy->sp_cops);
- LASSERT(policy->sp_cops->flush_ctx_cache);
-
- return policy->sp_cops->flush_ctx_cache(sec, uid, grace, force);
-}
-
-static void sec_cop_destroy_sec(struct ptlrpc_sec *sec)
-{
- struct ptlrpc_sec_policy *policy = sec->ps_policy;
-
- LASSERT_ATOMIC_ZERO(&sec->ps_refcount);
- LASSERT_ATOMIC_ZERO(&sec->ps_nctx);
- LASSERT(policy->sp_cops->destroy_sec);
-
- CDEBUG(D_SEC, "%s@%p: being destroyed\n", sec->ps_policy->sp_name, sec);
-
- policy->sp_cops->destroy_sec(sec);
- sptlrpc_policy_put(policy);
-}
-
-static void sptlrpc_sec_kill(struct ptlrpc_sec *sec)
-{
- LASSERT_ATOMIC_POS(&sec->ps_refcount);
-
- if (sec->ps_policy->sp_cops->kill_sec) {
- sec->ps_policy->sp_cops->kill_sec(sec);
-
- sec_cop_flush_ctx_cache(sec, -1, 1, 1);
- }
-}
-
-struct ptlrpc_sec *sptlrpc_sec_get(struct ptlrpc_sec *sec)
-{
- if (sec)
- atomic_inc(&sec->ps_refcount);
-
- return sec;
-}
-EXPORT_SYMBOL(sptlrpc_sec_get);
-
-void sptlrpc_sec_put(struct ptlrpc_sec *sec)
-{
- if (sec) {
- LASSERT_ATOMIC_POS(&sec->ps_refcount);
-
- if (atomic_dec_and_test(&sec->ps_refcount)) {
- sptlrpc_gc_del_sec(sec);
- sec_cop_destroy_sec(sec);
- }
- }
-}
-EXPORT_SYMBOL(sptlrpc_sec_put);
-
-/*
- * policy module is responsible for taking reference of import
- */
-static
-struct ptlrpc_sec *sptlrpc_sec_create(struct obd_import *imp,
- struct ptlrpc_svc_ctx *svc_ctx,
- struct sptlrpc_flavor *sf,
- enum lustre_sec_part sp)
-{
- struct ptlrpc_sec_policy *policy;
- struct ptlrpc_sec *sec;
- char str[32];
-
- if (svc_ctx) {
- LASSERT(imp->imp_dlm_fake == 1);
-
- CDEBUG(D_SEC, "%s %s: reverse sec using flavor %s\n",
- imp->imp_obd->obd_type->typ_name,
- imp->imp_obd->obd_name,
- sptlrpc_flavor2name(sf, str, sizeof(str)));
-
- policy = sptlrpc_policy_get(svc_ctx->sc_policy);
- sf->sf_flags |= PTLRPC_SEC_FL_REVERSE | PTLRPC_SEC_FL_ROOTONLY;
- } else {
- LASSERT(imp->imp_dlm_fake == 0);
-
- CDEBUG(D_SEC, "%s %s: select security flavor %s\n",
- imp->imp_obd->obd_type->typ_name,
- imp->imp_obd->obd_name,
- sptlrpc_flavor2name(sf, str, sizeof(str)));
-
- policy = sptlrpc_wireflavor2policy(sf->sf_rpc);
- if (!policy) {
- CERROR("invalid flavor 0x%x\n", sf->sf_rpc);
- return NULL;
- }
- }
-
- sec = policy->sp_cops->create_sec(imp, svc_ctx, sf);
- if (sec) {
- atomic_inc(&sec->ps_refcount);
-
- sec->ps_part = sp;
-
- if (sec->ps_gc_interval && policy->sp_cops->gc_ctx)
- sptlrpc_gc_add_sec(sec);
- } else {
- sptlrpc_policy_put(policy);
- }
-
- return sec;
-}
-
-struct ptlrpc_sec *sptlrpc_import_sec_ref(struct obd_import *imp)
-{
- struct ptlrpc_sec *sec;
-
- spin_lock(&imp->imp_lock);
- sec = sptlrpc_sec_get(imp->imp_sec);
- spin_unlock(&imp->imp_lock);
-
- return sec;
-}
-EXPORT_SYMBOL(sptlrpc_import_sec_ref);
-
-static void sptlrpc_import_sec_install(struct obd_import *imp,
- struct ptlrpc_sec *sec)
-{
- struct ptlrpc_sec *old_sec;
-
- LASSERT_ATOMIC_POS(&sec->ps_refcount);
-
- spin_lock(&imp->imp_lock);
- old_sec = imp->imp_sec;
- imp->imp_sec = sec;
- spin_unlock(&imp->imp_lock);
-
- if (old_sec) {
- sptlrpc_sec_kill(old_sec);
-
- /* balance the ref taken by this import */
- sptlrpc_sec_put(old_sec);
- }
-}
-
-static inline
-int flavor_equal(struct sptlrpc_flavor *sf1, struct sptlrpc_flavor *sf2)
-{
- return (memcmp(sf1, sf2, sizeof(*sf1)) == 0);
-}
-
-static inline
-void flavor_copy(struct sptlrpc_flavor *dst, struct sptlrpc_flavor *src)
-{
- *dst = *src;
-}
-
-static void sptlrpc_import_sec_adapt_inplace(struct obd_import *imp,
- struct ptlrpc_sec *sec,
- struct sptlrpc_flavor *sf)
-{
- char str1[32], str2[32];
-
- if (sec->ps_flvr.sf_flags != sf->sf_flags)
- CDEBUG(D_SEC, "changing sec flags: %s -> %s\n",
- sptlrpc_secflags2str(sec->ps_flvr.sf_flags,
- str1, sizeof(str1)),
- sptlrpc_secflags2str(sf->sf_flags,
- str2, sizeof(str2)));
-
- spin_lock(&sec->ps_lock);
- flavor_copy(&sec->ps_flvr, sf);
- spin_unlock(&sec->ps_lock);
-}
-
-/**
- * To get an appropriate ptlrpc_sec for the \a imp, according to the current
- * configuration. Upon called, imp->imp_sec may or may not be NULL.
- *
- * - regular import: \a svc_ctx should be NULL and \a flvr is ignored;
- * - reverse import: \a svc_ctx and \a flvr are obtained from incoming request.
- */
-int sptlrpc_import_sec_adapt(struct obd_import *imp,
- struct ptlrpc_svc_ctx *svc_ctx,
- struct sptlrpc_flavor *flvr)
-{
- struct ptlrpc_connection *conn;
- struct sptlrpc_flavor sf;
- struct ptlrpc_sec *sec, *newsec;
- enum lustre_sec_part sp;
- char str[24];
- int rc = 0;
-
- might_sleep();
-
- if (imp == NULL)
- return 0;
-
- conn = imp->imp_connection;
-
- if (svc_ctx == NULL) {
- struct client_obd *cliobd = &imp->imp_obd->u.cli;
- /*
- * normal import, determine flavor from rule set, except
- * for mgc the flavor is predetermined.
- */
- if (cliobd->cl_sp_me == LUSTRE_SP_MGC)
- sf = cliobd->cl_flvr_mgc;
- else
- sptlrpc_conf_choose_flavor(cliobd->cl_sp_me,
- cliobd->cl_sp_to,
- &cliobd->cl_target_uuid,
- conn->c_self, &sf);
-
- sp = imp->imp_obd->u.cli.cl_sp_me;
- } else {
- /* reverse import, determine flavor from incoming request */
- sf = *flvr;
-
- if (sf.sf_rpc != SPTLRPC_FLVR_NULL)
- sf.sf_flags = PTLRPC_SEC_FL_REVERSE |
- PTLRPC_SEC_FL_ROOTONLY;
-
- sp = sptlrpc_target_sec_part(imp->imp_obd);
- }
-
- sec = sptlrpc_import_sec_ref(imp);
- if (sec) {
- char str2[24];
-
- if (flavor_equal(&sf, &sec->ps_flvr))
- goto out;
-
- CDEBUG(D_SEC, "import %s->%s: changing flavor %s -> %s\n",
- imp->imp_obd->obd_name,
- obd_uuid2str(&conn->c_remote_uuid),
- sptlrpc_flavor2name(&sec->ps_flvr, str, sizeof(str)),
- sptlrpc_flavor2name(&sf, str2, sizeof(str2)));
-
- if (SPTLRPC_FLVR_POLICY(sf.sf_rpc) ==
- SPTLRPC_FLVR_POLICY(sec->ps_flvr.sf_rpc) &&
- SPTLRPC_FLVR_MECH(sf.sf_rpc) ==
- SPTLRPC_FLVR_MECH(sec->ps_flvr.sf_rpc)) {
- sptlrpc_import_sec_adapt_inplace(imp, sec, &sf);
- goto out;
- }
- } else if (SPTLRPC_FLVR_BASE(sf.sf_rpc) !=
- SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_NULL)) {
- CDEBUG(D_SEC, "import %s->%s netid %x: select flavor %s\n",
- imp->imp_obd->obd_name,
- obd_uuid2str(&conn->c_remote_uuid),
- LNET_NIDNET(conn->c_self),
- sptlrpc_flavor2name(&sf, str, sizeof(str)));
- }
-
- mutex_lock(&imp->imp_sec_mutex);
-
- newsec = sptlrpc_sec_create(imp, svc_ctx, &sf, sp);
- if (newsec) {
- sptlrpc_import_sec_install(imp, newsec);
- } else {
- CERROR("import %s->%s: failed to create new sec\n",
- imp->imp_obd->obd_name,
- obd_uuid2str(&conn->c_remote_uuid));
- rc = -EPERM;
- }
-
- mutex_unlock(&imp->imp_sec_mutex);
-out:
- sptlrpc_sec_put(sec);
- return rc;
-}
-
-void sptlrpc_import_sec_put(struct obd_import *imp)
-{
- if (imp->imp_sec) {
- sptlrpc_sec_kill(imp->imp_sec);
-
- sptlrpc_sec_put(imp->imp_sec);
- imp->imp_sec = NULL;
- }
-}
-
-static void import_flush_ctx_common(struct obd_import *imp,
- uid_t uid, int grace, int force)
-{
- struct ptlrpc_sec *sec;
-
- if (imp == NULL)
- return;
-
- sec = sptlrpc_import_sec_ref(imp);
- if (sec == NULL)
- return;
-
- sec_cop_flush_ctx_cache(sec, uid, grace, force);
- sptlrpc_sec_put(sec);
-}
-
-void sptlrpc_import_flush_my_ctx(struct obd_import *imp)
-{
- import_flush_ctx_common(imp, from_kuid(&init_user_ns, current_uid()),
- 1, 1);
-}
-EXPORT_SYMBOL(sptlrpc_import_flush_my_ctx);
-
-void sptlrpc_import_flush_all_ctx(struct obd_import *imp)
-{
- import_flush_ctx_common(imp, -1, 1, 1);
-}
-EXPORT_SYMBOL(sptlrpc_import_flush_all_ctx);
-
-/**
- * Used by ptlrpc client to allocate request buffer of \a req. Upon return
- * successfully, req->rq_reqmsg points to a buffer with size \a msgsize.
- */
-int sptlrpc_cli_alloc_reqbuf(struct ptlrpc_request *req, int msgsize)
-{
- struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx;
- struct ptlrpc_sec_policy *policy;
- int rc;
-
- LASSERT(ctx);
- LASSERT(ctx->cc_sec);
- LASSERT(ctx->cc_sec->ps_policy);
- LASSERT(req->rq_reqmsg == NULL);
- LASSERT_ATOMIC_POS(&ctx->cc_refcount);
-
- policy = ctx->cc_sec->ps_policy;
- rc = policy->sp_cops->alloc_reqbuf(ctx->cc_sec, req, msgsize);
- if (!rc) {
- LASSERT(req->rq_reqmsg);
- LASSERT(req->rq_reqbuf || req->rq_clrbuf);
-
- /* zeroing preallocated buffer */
- if (req->rq_pool)
- memset(req->rq_reqmsg, 0, msgsize);
- }
-
- return rc;
-}
-
-/**
- * Used by ptlrpc client to free request buffer of \a req. After this
- * req->rq_reqmsg is set to NULL and should not be accessed anymore.
- */
-void sptlrpc_cli_free_reqbuf(struct ptlrpc_request *req)
-{
- struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx;
- struct ptlrpc_sec_policy *policy;
-
- LASSERT(ctx);
- LASSERT(ctx->cc_sec);
- LASSERT(ctx->cc_sec->ps_policy);
- LASSERT_ATOMIC_POS(&ctx->cc_refcount);
-
- if (req->rq_reqbuf == NULL && req->rq_clrbuf == NULL)
- return;
-
- policy = ctx->cc_sec->ps_policy;
- policy->sp_cops->free_reqbuf(ctx->cc_sec, req);
- req->rq_reqmsg = NULL;
-}
-
-/*
- * NOTE caller must guarantee the buffer size is enough for the enlargement
- */
-void _sptlrpc_enlarge_msg_inplace(struct lustre_msg *msg,
- int segment, int newsize)
-{
- void *src, *dst;
- int oldsize, oldmsg_size, movesize;
-
- LASSERT(segment < msg->lm_bufcount);
- LASSERT(msg->lm_buflens[segment] <= newsize);
-
- if (msg->lm_buflens[segment] == newsize)
- return;
-
- /* nothing to do if we are enlarging the last segment */
- if (segment == msg->lm_bufcount - 1) {
- msg->lm_buflens[segment] = newsize;
- return;
- }
-
- oldsize = msg->lm_buflens[segment];
-
- src = lustre_msg_buf(msg, segment + 1, 0);
- msg->lm_buflens[segment] = newsize;
- dst = lustre_msg_buf(msg, segment + 1, 0);
- msg->lm_buflens[segment] = oldsize;
-
- /* move from segment + 1 to end segment */
- LASSERT(msg->lm_magic == LUSTRE_MSG_MAGIC_V2);
- oldmsg_size = lustre_msg_size_v2(msg->lm_bufcount, msg->lm_buflens);
- movesize = oldmsg_size - ((unsigned long) src - (unsigned long) msg);
- LASSERT(movesize >= 0);
-
- if (movesize)
- memmove(dst, src, movesize);
-
- /* note we don't clear the ares where old data live, not secret */
-
- /* finally set new segment size */
- msg->lm_buflens[segment] = newsize;
-}
-EXPORT_SYMBOL(_sptlrpc_enlarge_msg_inplace);
-
-/**
- * Used by ptlrpc client to enlarge the \a segment of request message pointed
- * by req->rq_reqmsg to size \a newsize, all previously filled-in data will be
- * preserved after the enlargement. this must be called after original request
- * buffer being allocated.
- *
- * \note after this be called, rq_reqmsg and rq_reqlen might have been changed,
- * so caller should refresh its local pointers if needed.
- */
-int sptlrpc_cli_enlarge_reqbuf(struct ptlrpc_request *req,
- int segment, int newsize)
-{
- struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx;
- struct ptlrpc_sec_cops *cops;
- struct lustre_msg *msg = req->rq_reqmsg;
-
- LASSERT(ctx);
- LASSERT(msg);
- LASSERT(msg->lm_bufcount > segment);
- LASSERT(msg->lm_buflens[segment] <= newsize);
-
- if (msg->lm_buflens[segment] == newsize)
- return 0;
-
- cops = ctx->cc_sec->ps_policy->sp_cops;
- LASSERT(cops->enlarge_reqbuf);
- return cops->enlarge_reqbuf(ctx->cc_sec, req, segment, newsize);
-}
-EXPORT_SYMBOL(sptlrpc_cli_enlarge_reqbuf);
-
-/**
- * Used by ptlrpc client to allocate reply buffer of \a req.
- *
- * \note After this, req->rq_repmsg is still not accessible.
- */
-int sptlrpc_cli_alloc_repbuf(struct ptlrpc_request *req, int msgsize)
-{
- struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx;
- struct ptlrpc_sec_policy *policy;
-
- LASSERT(ctx);
- LASSERT(ctx->cc_sec);
- LASSERT(ctx->cc_sec->ps_policy);
-
- if (req->rq_repbuf)
- return 0;
-
- policy = ctx->cc_sec->ps_policy;
- return policy->sp_cops->alloc_repbuf(ctx->cc_sec, req, msgsize);
-}
-
-/**
- * Used by ptlrpc client to free reply buffer of \a req. After this
- * req->rq_repmsg is set to NULL and should not be accessed anymore.
- */
-void sptlrpc_cli_free_repbuf(struct ptlrpc_request *req)
-{
- struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx;
- struct ptlrpc_sec_policy *policy;
-
- LASSERT(ctx);
- LASSERT(ctx->cc_sec);
- LASSERT(ctx->cc_sec->ps_policy);
- LASSERT_ATOMIC_POS(&ctx->cc_refcount);
-
- if (req->rq_repbuf == NULL)
- return;
- LASSERT(req->rq_repbuf_len);
-
- policy = ctx->cc_sec->ps_policy;
- policy->sp_cops->free_repbuf(ctx->cc_sec, req);
- req->rq_repmsg = NULL;
-}
-
-int sptlrpc_svc_install_rvs_ctx(struct obd_import *imp,
- struct ptlrpc_svc_ctx *ctx)
-{
- struct ptlrpc_sec_policy *policy = ctx->sc_policy;
-
- if (!policy->sp_sops->install_rctx)
- return 0;
- return policy->sp_sops->install_rctx(imp, ctx);
-}
-
-/****************************************
- * server side security *
- ****************************************/
-
-static int flavor_allowed(struct sptlrpc_flavor *exp,
- struct ptlrpc_request *req)
-{
- struct sptlrpc_flavor *flvr = &req->rq_flvr;
-
- if (exp->sf_rpc == SPTLRPC_FLVR_ANY || exp->sf_rpc == flvr->sf_rpc)
- return 1;
-
- if ((req->rq_ctx_init || req->rq_ctx_fini) &&
- SPTLRPC_FLVR_POLICY(exp->sf_rpc) ==
- SPTLRPC_FLVR_POLICY(flvr->sf_rpc) &&
- SPTLRPC_FLVR_MECH(exp->sf_rpc) == SPTLRPC_FLVR_MECH(flvr->sf_rpc))
- return 1;
-
- return 0;
-}
-
-#define EXP_FLVR_UPDATE_EXPIRE (OBD_TIMEOUT_DEFAULT + 10)
-
-/**
- * Given an export \a exp, check whether the flavor of incoming \a req
- * is allowed by the export \a exp. Main logic is about taking care of
- * changing configurations. Return 0 means success.
- */
-int sptlrpc_target_export_check(struct obd_export *exp,
- struct ptlrpc_request *req)
-{
- struct sptlrpc_flavor flavor;
-
- if (exp == NULL)
- return 0;
-
- /* client side export has no imp_reverse, skip
- * FIXME maybe we should check flavor this as well??? */
- if (exp->exp_imp_reverse == NULL)
- return 0;
-
- /* don't care about ctx fini rpc */
- if (req->rq_ctx_fini)
- return 0;
-
- spin_lock(&exp->exp_lock);
-
- /* if flavor just changed (exp->exp_flvr_changed != 0), we wait for
- * the first req with the new flavor, then treat it as current flavor,
- * adapt reverse sec according to it.
- * note the first rpc with new flavor might not be with root ctx, in
- * which case delay the sec_adapt by leaving exp_flvr_adapt == 1. */
- if (unlikely(exp->exp_flvr_changed) &&
- flavor_allowed(&exp->exp_flvr_old[1], req)) {
- /* make the new flavor as "current", and old ones as
- * about-to-expire */
- CDEBUG(D_SEC, "exp %p: just changed: %x->%x\n", exp,
- exp->exp_flvr.sf_rpc, exp->exp_flvr_old[1].sf_rpc);
- flavor = exp->exp_flvr_old[1];
- exp->exp_flvr_old[1] = exp->exp_flvr_old[0];
- exp->exp_flvr_expire[1] = exp->exp_flvr_expire[0];
- exp->exp_flvr_old[0] = exp->exp_flvr;
- exp->exp_flvr_expire[0] = ktime_get_real_seconds() +
- EXP_FLVR_UPDATE_EXPIRE;
- exp->exp_flvr = flavor;
-
- /* flavor change finished */
- exp->exp_flvr_changed = 0;
- LASSERT(exp->exp_flvr_adapt == 1);
-
- /* if it's gss, we only interested in root ctx init */
- if (req->rq_auth_gss &&
- !(req->rq_ctx_init &&
- (req->rq_auth_usr_root || req->rq_auth_usr_mdt ||
- req->rq_auth_usr_ost))) {
- spin_unlock(&exp->exp_lock);
- CDEBUG(D_SEC, "is good but not root(%d:%d:%d:%d:%d)\n",
- req->rq_auth_gss, req->rq_ctx_init,
- req->rq_auth_usr_root, req->rq_auth_usr_mdt,
- req->rq_auth_usr_ost);
- return 0;
- }
-
- exp->exp_flvr_adapt = 0;
- spin_unlock(&exp->exp_lock);
-
- return sptlrpc_import_sec_adapt(exp->exp_imp_reverse,
- req->rq_svc_ctx, &flavor);
- }
-
- /* if it equals to the current flavor, we accept it, but need to
- * dealing with reverse sec/ctx */
- if (likely(flavor_allowed(&exp->exp_flvr, req))) {
- /* most cases should return here, we only interested in
- * gss root ctx init */
- if (!req->rq_auth_gss || !req->rq_ctx_init ||
- (!req->rq_auth_usr_root && !req->rq_auth_usr_mdt &&
- !req->rq_auth_usr_ost)) {
- spin_unlock(&exp->exp_lock);
- return 0;
- }
-
- /* if flavor just changed, we should not proceed, just leave
- * it and current flavor will be discovered and replaced
- * shortly, and let _this_ rpc pass through */
- if (exp->exp_flvr_changed) {
- LASSERT(exp->exp_flvr_adapt);
- spin_unlock(&exp->exp_lock);
- return 0;
- }
-
- if (exp->exp_flvr_adapt) {
- exp->exp_flvr_adapt = 0;
- CDEBUG(D_SEC, "exp %p (%x|%x|%x): do delayed adapt\n",
- exp, exp->exp_flvr.sf_rpc,
- exp->exp_flvr_old[0].sf_rpc,
- exp->exp_flvr_old[1].sf_rpc);
- flavor = exp->exp_flvr;
- spin_unlock(&exp->exp_lock);
-
- return sptlrpc_import_sec_adapt(exp->exp_imp_reverse,
- req->rq_svc_ctx,
- &flavor);
- } else {
- CDEBUG(D_SEC, "exp %p (%x|%x|%x): is current flavor, install rvs ctx\n",
- exp, exp->exp_flvr.sf_rpc,
- exp->exp_flvr_old[0].sf_rpc,
- exp->exp_flvr_old[1].sf_rpc);
- spin_unlock(&exp->exp_lock);
-
- return sptlrpc_svc_install_rvs_ctx(exp->exp_imp_reverse,
- req->rq_svc_ctx);
- }
- }
-
- if (exp->exp_flvr_expire[0]) {
- if (exp->exp_flvr_expire[0] >= ktime_get_real_seconds()) {
- if (flavor_allowed(&exp->exp_flvr_old[0], req)) {
- CDEBUG(D_SEC, "exp %p (%x|%x|%x): match the middle one (%lld)\n", exp,
- exp->exp_flvr.sf_rpc,
- exp->exp_flvr_old[0].sf_rpc,
- exp->exp_flvr_old[1].sf_rpc,
- (s64)(exp->exp_flvr_expire[0] -
- ktime_get_real_seconds()));
- spin_unlock(&exp->exp_lock);
- return 0;
- }
- } else {
- CDEBUG(D_SEC, "mark middle expired\n");
- exp->exp_flvr_expire[0] = 0;
- }
- CDEBUG(D_SEC, "exp %p (%x|%x|%x): %x not match middle\n", exp,
- exp->exp_flvr.sf_rpc,
- exp->exp_flvr_old[0].sf_rpc, exp->exp_flvr_old[1].sf_rpc,
- req->rq_flvr.sf_rpc);
- }
-
- /* now it doesn't match the current flavor, the only chance we can
- * accept it is match the old flavors which is not expired. */
- if (exp->exp_flvr_changed == 0 && exp->exp_flvr_expire[1]) {
- if (exp->exp_flvr_expire[1] >= ktime_get_real_seconds()) {
- if (flavor_allowed(&exp->exp_flvr_old[1], req)) {
- CDEBUG(D_SEC, "exp %p (%x|%x|%x): match the oldest one (%lld)\n",
- exp,
- exp->exp_flvr.sf_rpc,
- exp->exp_flvr_old[0].sf_rpc,
- exp->exp_flvr_old[1].sf_rpc,
- (s64)(exp->exp_flvr_expire[1] -
- ktime_get_real_seconds()));
- spin_unlock(&exp->exp_lock);
- return 0;
- }
- } else {
- CDEBUG(D_SEC, "mark oldest expired\n");
- exp->exp_flvr_expire[1] = 0;
- }
- CDEBUG(D_SEC, "exp %p (%x|%x|%x): %x not match found\n",
- exp, exp->exp_flvr.sf_rpc,
- exp->exp_flvr_old[0].sf_rpc, exp->exp_flvr_old[1].sf_rpc,
- req->rq_flvr.sf_rpc);
- } else {
- CDEBUG(D_SEC, "exp %p (%x|%x|%x): skip the last one\n",
- exp, exp->exp_flvr.sf_rpc, exp->exp_flvr_old[0].sf_rpc,
- exp->exp_flvr_old[1].sf_rpc);
- }
-
- spin_unlock(&exp->exp_lock);
-
- CWARN("exp %p(%s): req %p (%u|%u|%u|%u|%u|%u) with unauthorized flavor %x, expect %x|%x(%+lld)|%x(%+lld)\n",
- exp, exp->exp_obd->obd_name,
- req, req->rq_auth_gss, req->rq_ctx_init, req->rq_ctx_fini,
- req->rq_auth_usr_root, req->rq_auth_usr_mdt, req->rq_auth_usr_ost,
- req->rq_flvr.sf_rpc,
- exp->exp_flvr.sf_rpc,
- exp->exp_flvr_old[0].sf_rpc,
- exp->exp_flvr_expire[0] ?
- (s64)(exp->exp_flvr_expire[0] - ktime_get_real_seconds()) : 0,
- exp->exp_flvr_old[1].sf_rpc,
- exp->exp_flvr_expire[1] ?
- (s64)(exp->exp_flvr_expire[1] - ktime_get_real_seconds()) : 0);
- return -EACCES;
-}
-EXPORT_SYMBOL(sptlrpc_target_export_check);
-
-static int sptlrpc_svc_check_from(struct ptlrpc_request *req, int svc_rc)
-{
- /* peer's claim is unreliable unless gss is being used */
- if (!req->rq_auth_gss || svc_rc == SECSVC_DROP)
- return svc_rc;
-
- switch (req->rq_sp_from) {
- case LUSTRE_SP_CLI:
- if (req->rq_auth_usr_mdt || req->rq_auth_usr_ost) {
- DEBUG_REQ(D_ERROR, req, "faked source CLI");
- svc_rc = SECSVC_DROP;
- }
- break;
- case LUSTRE_SP_MDT:
- if (!req->rq_auth_usr_mdt) {
- DEBUG_REQ(D_ERROR, req, "faked source MDT");
- svc_rc = SECSVC_DROP;
- }
- break;
- case LUSTRE_SP_OST:
- if (!req->rq_auth_usr_ost) {
- DEBUG_REQ(D_ERROR, req, "faked source OST");
- svc_rc = SECSVC_DROP;
- }
- break;
- case LUSTRE_SP_MGS:
- case LUSTRE_SP_MGC:
- if (!req->rq_auth_usr_root && !req->rq_auth_usr_mdt &&
- !req->rq_auth_usr_ost) {
- DEBUG_REQ(D_ERROR, req, "faked source MGC/MGS");
- svc_rc = SECSVC_DROP;
- }
- break;
- case LUSTRE_SP_ANY:
- default:
- DEBUG_REQ(D_ERROR, req, "invalid source %u", req->rq_sp_from);
- svc_rc = SECSVC_DROP;
- }
-
- return svc_rc;
-}
-
-/**
- * Used by ptlrpc server, to perform transformation upon request message of
- * incoming \a req. This must be the first thing to do with a incoming
- * request in ptlrpc layer.
- *
- * \retval SECSVC_OK success, and req->rq_reqmsg point to request message in
- * clear text, size is req->rq_reqlen; also req->rq_svc_ctx is set.
- * \retval SECSVC_COMPLETE success, the request has been fully processed, and
- * reply message has been prepared.
- * \retval SECSVC_DROP failed, this request should be dropped.
- */
-int sptlrpc_svc_unwrap_request(struct ptlrpc_request *req)
-{
- struct ptlrpc_sec_policy *policy;
- struct lustre_msg *msg = req->rq_reqbuf;
- int rc;
-
- LASSERT(msg);
- LASSERT(req->rq_reqmsg == NULL);
- LASSERT(req->rq_repmsg == NULL);
- LASSERT(req->rq_svc_ctx == NULL);
-
- req->rq_req_swab_mask = 0;
-
- rc = __lustre_unpack_msg(msg, req->rq_reqdata_len);
- switch (rc) {
- case 1:
- lustre_set_req_swabbed(req, MSG_PTLRPC_HEADER_OFF);
- case 0:
- break;
- default:
- CERROR("error unpacking request from %s x%llu\n",
- libcfs_id2str(req->rq_peer), req->rq_xid);
- return SECSVC_DROP;
- }
-
- req->rq_flvr.sf_rpc = WIRE_FLVR(msg->lm_secflvr);
- req->rq_sp_from = LUSTRE_SP_ANY;
- req->rq_auth_uid = -1;
- req->rq_auth_mapped_uid = -1;
-
- policy = sptlrpc_wireflavor2policy(req->rq_flvr.sf_rpc);
- if (!policy) {
- CERROR("unsupported rpc flavor %x\n", req->rq_flvr.sf_rpc);
- return SECSVC_DROP;
- }
-
- LASSERT(policy->sp_sops->accept);
- rc = policy->sp_sops->accept(req);
- sptlrpc_policy_put(policy);
- LASSERT(req->rq_reqmsg || rc != SECSVC_OK);
- LASSERT(req->rq_svc_ctx || rc == SECSVC_DROP);
-
- /*
- * if it's not null flavor (which means embedded packing msg),
- * reset the swab mask for the coming inner msg unpacking.
- */
- if (SPTLRPC_FLVR_POLICY(req->rq_flvr.sf_rpc) != SPTLRPC_POLICY_NULL)
- req->rq_req_swab_mask = 0;
-
- /* sanity check for the request source */
- rc = sptlrpc_svc_check_from(req, rc);
- return rc;
-}
-
-/**
- * Used by ptlrpc server, to allocate reply buffer for \a req. If succeed,
- * req->rq_reply_state is set, and req->rq_reply_state->rs_msg point to
- * a buffer of \a msglen size.
- */
-int sptlrpc_svc_alloc_rs(struct ptlrpc_request *req, int msglen)
-{
- struct ptlrpc_sec_policy *policy;
- struct ptlrpc_reply_state *rs;
- int rc;
-
- LASSERT(req->rq_svc_ctx);
- LASSERT(req->rq_svc_ctx->sc_policy);
-
- policy = req->rq_svc_ctx->sc_policy;
- LASSERT(policy->sp_sops->alloc_rs);
-
- rc = policy->sp_sops->alloc_rs(req, msglen);
- if (unlikely(rc == -ENOMEM)) {
- struct ptlrpc_service_part *svcpt = req->rq_rqbd->rqbd_svcpt;
- if (svcpt->scp_service->srv_max_reply_size <
- msglen + sizeof(struct ptlrpc_reply_state)) {
- /* Just return failure if the size is too big */
- CERROR("size of message is too big (%zd), %d allowed",
- msglen + sizeof(struct ptlrpc_reply_state),
- svcpt->scp_service->srv_max_reply_size);
- return -ENOMEM;
- }
-
- /* failed alloc, try emergency pool */
- rs = lustre_get_emerg_rs(svcpt);
- if (rs == NULL)
- return -ENOMEM;
-
- req->rq_reply_state = rs;
- rc = policy->sp_sops->alloc_rs(req, msglen);
- if (rc) {
- lustre_put_emerg_rs(rs);
- req->rq_reply_state = NULL;
- }
- }
-
- LASSERT(rc != 0 ||
- (req->rq_reply_state && req->rq_reply_state->rs_msg));
-
- return rc;
-}
-
-/**
- * Used by ptlrpc server, to perform transformation upon reply message.
- *
- * \post req->rq_reply_off is set to appropriate server-controlled reply offset.
- * \post req->rq_repmsg and req->rq_reply_state->rs_msg becomes inaccessible.
- */
-int sptlrpc_svc_wrap_reply(struct ptlrpc_request *req)
-{
- struct ptlrpc_sec_policy *policy;
- int rc;
-
- LASSERT(req->rq_svc_ctx);
- LASSERT(req->rq_svc_ctx->sc_policy);
-
- policy = req->rq_svc_ctx->sc_policy;
- LASSERT(policy->sp_sops->authorize);
-
- rc = policy->sp_sops->authorize(req);
- LASSERT(rc || req->rq_reply_state->rs_repdata_len);
-
- return rc;
-}
-
-/**
- * Used by ptlrpc server, to free reply_state.
- */
-void sptlrpc_svc_free_rs(struct ptlrpc_reply_state *rs)
-{
- struct ptlrpc_sec_policy *policy;
- unsigned int prealloc;
-
- LASSERT(rs->rs_svc_ctx);
- LASSERT(rs->rs_svc_ctx->sc_policy);
-
- policy = rs->rs_svc_ctx->sc_policy;
- LASSERT(policy->sp_sops->free_rs);
-
- prealloc = rs->rs_prealloc;
- policy->sp_sops->free_rs(rs);
-
- if (prealloc)
- lustre_put_emerg_rs(rs);
-}
-
-void sptlrpc_svc_ctx_addref(struct ptlrpc_request *req)
-{
- struct ptlrpc_svc_ctx *ctx = req->rq_svc_ctx;
-
- if (ctx != NULL)
- atomic_inc(&ctx->sc_refcount);
-}
-
-void sptlrpc_svc_ctx_decref(struct ptlrpc_request *req)
-{
- struct ptlrpc_svc_ctx *ctx = req->rq_svc_ctx;
-
- if (ctx == NULL)
- return;
-
- LASSERT_ATOMIC_POS(&ctx->sc_refcount);
- if (atomic_dec_and_test(&ctx->sc_refcount)) {
- if (ctx->sc_policy->sp_sops->free_ctx)
- ctx->sc_policy->sp_sops->free_ctx(ctx);
- }
- req->rq_svc_ctx = NULL;
-}
-
-/****************************************
- * bulk security *
- ****************************************/
-
-/**
- * Perform transformation upon bulk data pointed by \a desc. This is called
- * before transforming the request message.
- */
-int sptlrpc_cli_wrap_bulk(struct ptlrpc_request *req,
- struct ptlrpc_bulk_desc *desc)
-{
- struct ptlrpc_cli_ctx *ctx;
-
- LASSERT(req->rq_bulk_read || req->rq_bulk_write);
-
- if (!req->rq_pack_bulk)
- return 0;
-
- ctx = req->rq_cli_ctx;
- if (ctx->cc_ops->wrap_bulk)
- return ctx->cc_ops->wrap_bulk(ctx, req, desc);
- return 0;
-}
-EXPORT_SYMBOL(sptlrpc_cli_wrap_bulk);
-
-/**
- * This is called after unwrap the reply message.
- * return nob of actual plain text size received, or error code.
- */
-int sptlrpc_cli_unwrap_bulk_read(struct ptlrpc_request *req,
- struct ptlrpc_bulk_desc *desc,
- int nob)
-{
- struct ptlrpc_cli_ctx *ctx;
- int rc;
-
- LASSERT(req->rq_bulk_read && !req->rq_bulk_write);
-
- if (!req->rq_pack_bulk)
- return desc->bd_nob_transferred;
-
- ctx = req->rq_cli_ctx;
- if (ctx->cc_ops->unwrap_bulk) {
- rc = ctx->cc_ops->unwrap_bulk(ctx, req, desc);
- if (rc < 0)
- return rc;
- }
- return desc->bd_nob_transferred;
-}
-EXPORT_SYMBOL(sptlrpc_cli_unwrap_bulk_read);
-
-/**
- * This is called after unwrap the reply message.
- * return 0 for success or error code.
- */
-int sptlrpc_cli_unwrap_bulk_write(struct ptlrpc_request *req,
- struct ptlrpc_bulk_desc *desc)
-{
- struct ptlrpc_cli_ctx *ctx;
- int rc;
-
- LASSERT(!req->rq_bulk_read && req->rq_bulk_write);
-
- if (!req->rq_pack_bulk)
- return 0;
-
- ctx = req->rq_cli_ctx;
- if (ctx->cc_ops->unwrap_bulk) {
- rc = ctx->cc_ops->unwrap_bulk(ctx, req, desc);
- if (rc < 0)
- return rc;
- }
-
- /*
- * if everything is going right, nob should equals to nob_transferred.
- * in case of privacy mode, nob_transferred needs to be adjusted.
- */
- if (desc->bd_nob != desc->bd_nob_transferred) {
- CERROR("nob %d doesn't match transferred nob %d",
- desc->bd_nob, desc->bd_nob_transferred);
- return -EPROTO;
- }
-
- return 0;
-}
-EXPORT_SYMBOL(sptlrpc_cli_unwrap_bulk_write);
-
-
-/****************************************
- * user descriptor helpers *
- ****************************************/
-
-int sptlrpc_current_user_desc_size(void)
-{
- int ngroups;
-
- ngroups = current_ngroups;
-
- if (ngroups > LUSTRE_MAX_GROUPS)
- ngroups = LUSTRE_MAX_GROUPS;
- return sptlrpc_user_desc_size(ngroups);
-}
-EXPORT_SYMBOL(sptlrpc_current_user_desc_size);
-
-int sptlrpc_pack_user_desc(struct lustre_msg *msg, int offset)
-{
- struct ptlrpc_user_desc *pud;
-
- pud = lustre_msg_buf(msg, offset, 0);
-
- pud->pud_uid = from_kuid(&init_user_ns, current_uid());
- pud->pud_gid = from_kgid(&init_user_ns, current_gid());
- pud->pud_fsuid = from_kuid(&init_user_ns, current_fsuid());
- pud->pud_fsgid = from_kgid(&init_user_ns, current_fsgid());
- pud->pud_cap = cfs_curproc_cap_pack();
- pud->pud_ngroups = (msg->lm_buflens[offset] - sizeof(*pud)) / 4;
-
- task_lock(current);
- if (pud->pud_ngroups > current_ngroups)
- pud->pud_ngroups = current_ngroups;
- memcpy(pud->pud_groups, current_cred()->group_info->blocks[0],
- pud->pud_ngroups * sizeof(__u32));
- task_unlock(current);
-
- return 0;
-}
-EXPORT_SYMBOL(sptlrpc_pack_user_desc);
-
-int sptlrpc_unpack_user_desc(struct lustre_msg *msg, int offset, int swabbed)
-{
- struct ptlrpc_user_desc *pud;
- int i;
-
- pud = lustre_msg_buf(msg, offset, sizeof(*pud));
- if (!pud)
- return -EINVAL;
-
- if (swabbed) {
- __swab32s(&pud->pud_uid);
- __swab32s(&pud->pud_gid);
- __swab32s(&pud->pud_fsuid);
- __swab32s(&pud->pud_fsgid);
- __swab32s(&pud->pud_cap);
- __swab32s(&pud->pud_ngroups);
- }
-
- if (pud->pud_ngroups > LUSTRE_MAX_GROUPS) {
- CERROR("%u groups is too large\n", pud->pud_ngroups);
- return -EINVAL;
- }
-
- if (sizeof(*pud) + pud->pud_ngroups * sizeof(__u32) >
- msg->lm_buflens[offset]) {
- CERROR("%u groups are claimed but bufsize only %u\n",
- pud->pud_ngroups, msg->lm_buflens[offset]);
- return -EINVAL;
- }
-
- if (swabbed) {
- for (i = 0; i < pud->pud_ngroups; i++)
- __swab32s(&pud->pud_groups[i]);
- }
-
- return 0;
-}
-EXPORT_SYMBOL(sptlrpc_unpack_user_desc);
-
-/****************************************
- * misc helpers *
- ****************************************/
-
-const char *sec2target_str(struct ptlrpc_sec *sec)
-{
- if (!sec || !sec->ps_import || !sec->ps_import->imp_obd)
- return "*";
- if (sec_is_reverse(sec))
- return "c";
- return obd_uuid2str(&sec->ps_import->imp_obd->u.cli.cl_target_uuid);
-}
-EXPORT_SYMBOL(sec2target_str);
-
-/*
- * return true if the bulk data is protected
- */
-int sptlrpc_flavor_has_bulk(struct sptlrpc_flavor *flvr)
-{
- switch (SPTLRPC_FLVR_BULK_SVC(flvr->sf_rpc)) {
- case SPTLRPC_BULK_SVC_INTG:
- case SPTLRPC_BULK_SVC_PRIV:
- return 1;
- default:
- return 0;
- }
-}
-EXPORT_SYMBOL(sptlrpc_flavor_has_bulk);
-
-/****************************************
- * crypto API helper/alloc blkciper *
- ****************************************/
-
-/****************************************
- * initialize/finalize *
- ****************************************/
-
-int sptlrpc_init(void)
-{
- int rc;
-
- rwlock_init(&policy_lock);
-
- rc = sptlrpc_gc_init();
- if (rc)
- goto out;
-
- rc = sptlrpc_conf_init();
- if (rc)
- goto out_gc;
-
- rc = sptlrpc_enc_pool_init();
- if (rc)
- goto out_conf;
-
- rc = sptlrpc_null_init();
- if (rc)
- goto out_pool;
-
- rc = sptlrpc_plain_init();
- if (rc)
- goto out_null;
-
- rc = sptlrpc_lproc_init();
- if (rc)
- goto out_plain;
-
- return 0;
-
-out_plain:
- sptlrpc_plain_fini();
-out_null:
- sptlrpc_null_fini();
-out_pool:
- sptlrpc_enc_pool_fini();
-out_conf:
- sptlrpc_conf_fini();
-out_gc:
- sptlrpc_gc_fini();
-out:
- return rc;
-}
-
-void sptlrpc_fini(void)
-{
- sptlrpc_lproc_fini();
- sptlrpc_plain_fini();
- sptlrpc_null_fini();
- sptlrpc_enc_pool_fini();
- sptlrpc_conf_fini();
- sptlrpc_gc_fini();
-}
diff --git a/drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c b/drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c
deleted file mode 100644
index c89973c0e3a2..000000000000
--- a/drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c
+++ /dev/null
@@ -1,552 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * lustre/ptlrpc/sec_bulk.c
- *
- * Author: Eric Mei <ericm@clusterfs.com>
- */
-
-#define DEBUG_SUBSYSTEM S_SEC
-
-#include "../../include/linux/libcfs/libcfs.h"
-#include <linux/crypto.h>
-
-#include "../include/obd.h"
-#include "../include/obd_cksum.h"
-#include "../include/obd_class.h"
-#include "../include/obd_support.h"
-#include "../include/lustre_net.h"
-#include "../include/lustre_import.h"
-#include "../include/lustre_dlm.h"
-#include "../include/lustre_sec.h"
-
-#include "ptlrpc_internal.h"
-
-/****************************************
- * bulk encryption page pools *
- ****************************************/
-
-
-#define POINTERS_PER_PAGE (PAGE_CACHE_SIZE / sizeof(void *))
-#define PAGES_PER_POOL (POINTERS_PER_PAGE)
-
-#define IDLE_IDX_MAX (100)
-#define IDLE_IDX_WEIGHT (3)
-
-#define CACHE_QUIESCENT_PERIOD (20)
-
-static struct ptlrpc_enc_page_pool {
- /*
- * constants
- */
- unsigned long epp_max_pages; /* maximum pages can hold, const */
- unsigned int epp_max_pools; /* number of pools, const */
-
- /*
- * wait queue in case of not enough free pages.
- */
- wait_queue_head_t epp_waitq; /* waiting threads */
- unsigned int epp_waitqlen; /* wait queue length */
- unsigned long epp_pages_short; /* # of pages wanted of in-q users */
- unsigned int epp_growing:1; /* during adding pages */
-
- /*
- * indicating how idle the pools are, from 0 to MAX_IDLE_IDX
- * this is counted based on each time when getting pages from
- * the pools, not based on time. which means in case that system
- * is idled for a while but the idle_idx might still be low if no
- * activities happened in the pools.
- */
- unsigned long epp_idle_idx;
-
- /* last shrink time due to mem tight */
- time64_t epp_last_shrink;
- time64_t epp_last_access;
-
- /*
- * in-pool pages bookkeeping
- */
- spinlock_t epp_lock; /* protect following fields */
- unsigned long epp_total_pages; /* total pages in pools */
- unsigned long epp_free_pages; /* current pages available */
-
- /*
- * statistics
- */
- unsigned long epp_st_max_pages; /* # of pages ever reached */
- unsigned int epp_st_grows; /* # of grows */
- unsigned int epp_st_grow_fails; /* # of add pages failures */
- unsigned int epp_st_shrinks; /* # of shrinks */
- unsigned long epp_st_access; /* # of access */
- unsigned long epp_st_missings; /* # of cache missing */
- unsigned long epp_st_lowfree; /* lowest free pages reached */
- unsigned int epp_st_max_wqlen; /* highest waitqueue length */
- unsigned long epp_st_max_wait; /* in jiffies */
- /*
- * pointers to pools
- */
- struct page ***epp_pools;
-} page_pools;
-
-/*
- * /proc/fs/lustre/sptlrpc/encrypt_page_pools
- */
-int sptlrpc_proc_enc_pool_seq_show(struct seq_file *m, void *v)
-{
- spin_lock(&page_pools.epp_lock);
-
- seq_printf(m,
- "physical pages: %lu\n"
- "pages per pool: %lu\n"
- "max pages: %lu\n"
- "max pools: %u\n"
- "total pages: %lu\n"
- "total free: %lu\n"
- "idle index: %lu/100\n"
- "last shrink: %lds\n"
- "last access: %lds\n"
- "max pages reached: %lu\n"
- "grows: %u\n"
- "grows failure: %u\n"
- "shrinks: %u\n"
- "cache access: %lu\n"
- "cache missing: %lu\n"
- "low free mark: %lu\n"
- "max waitqueue depth: %u\n"
- "max wait time: %ld/%u\n",
- totalram_pages,
- PAGES_PER_POOL,
- page_pools.epp_max_pages,
- page_pools.epp_max_pools,
- page_pools.epp_total_pages,
- page_pools.epp_free_pages,
- page_pools.epp_idle_idx,
- (long)(ktime_get_seconds() - page_pools.epp_last_shrink),
- (long)(ktime_get_seconds() - page_pools.epp_last_access),
- page_pools.epp_st_max_pages,
- page_pools.epp_st_grows,
- page_pools.epp_st_grow_fails,
- page_pools.epp_st_shrinks,
- page_pools.epp_st_access,
- page_pools.epp_st_missings,
- page_pools.epp_st_lowfree,
- page_pools.epp_st_max_wqlen,
- page_pools.epp_st_max_wait,
- HZ);
-
- spin_unlock(&page_pools.epp_lock);
-
- return 0;
-}
-
-static void enc_pools_release_free_pages(long npages)
-{
- int p_idx, g_idx;
- int p_idx_max1, p_idx_max2;
-
- LASSERT(npages > 0);
- LASSERT(npages <= page_pools.epp_free_pages);
- LASSERT(page_pools.epp_free_pages <= page_pools.epp_total_pages);
-
- /* max pool index before the release */
- p_idx_max2 = (page_pools.epp_total_pages - 1) / PAGES_PER_POOL;
-
- page_pools.epp_free_pages -= npages;
- page_pools.epp_total_pages -= npages;
-
- /* max pool index after the release */
- p_idx_max1 = page_pools.epp_total_pages == 0 ? -1 :
- ((page_pools.epp_total_pages - 1) / PAGES_PER_POOL);
-
- p_idx = page_pools.epp_free_pages / PAGES_PER_POOL;
- g_idx = page_pools.epp_free_pages % PAGES_PER_POOL;
- LASSERT(page_pools.epp_pools[p_idx]);
-
- while (npages--) {
- LASSERT(page_pools.epp_pools[p_idx]);
- LASSERT(page_pools.epp_pools[p_idx][g_idx] != NULL);
-
- __free_page(page_pools.epp_pools[p_idx][g_idx]);
- page_pools.epp_pools[p_idx][g_idx] = NULL;
-
- if (++g_idx == PAGES_PER_POOL) {
- p_idx++;
- g_idx = 0;
- }
- }
-
- /* free unused pools */
- while (p_idx_max1 < p_idx_max2) {
- LASSERT(page_pools.epp_pools[p_idx_max2]);
- kfree(page_pools.epp_pools[p_idx_max2]);
- page_pools.epp_pools[p_idx_max2] = NULL;
- p_idx_max2--;
- }
-}
-
-/*
- * we try to keep at least PTLRPC_MAX_BRW_PAGES pages in the pool.
- */
-static unsigned long enc_pools_shrink_count(struct shrinker *s,
- struct shrink_control *sc)
-{
- /*
- * if no pool access for a long time, we consider it's fully idle.
- * a little race here is fine.
- */
- if (unlikely(ktime_get_seconds() - page_pools.epp_last_access >
- CACHE_QUIESCENT_PERIOD)) {
- spin_lock(&page_pools.epp_lock);
- page_pools.epp_idle_idx = IDLE_IDX_MAX;
- spin_unlock(&page_pools.epp_lock);
- }
-
- LASSERT(page_pools.epp_idle_idx <= IDLE_IDX_MAX);
- return max((int)page_pools.epp_free_pages - PTLRPC_MAX_BRW_PAGES, 0) *
- (IDLE_IDX_MAX - page_pools.epp_idle_idx) / IDLE_IDX_MAX;
-}
-
-/*
- * we try to keep at least PTLRPC_MAX_BRW_PAGES pages in the pool.
- */
-static unsigned long enc_pools_shrink_scan(struct shrinker *s,
- struct shrink_control *sc)
-{
- spin_lock(&page_pools.epp_lock);
- sc->nr_to_scan = min_t(unsigned long, sc->nr_to_scan,
- page_pools.epp_free_pages - PTLRPC_MAX_BRW_PAGES);
- if (sc->nr_to_scan > 0) {
- enc_pools_release_free_pages(sc->nr_to_scan);
- CDEBUG(D_SEC, "released %ld pages, %ld left\n",
- (long)sc->nr_to_scan, page_pools.epp_free_pages);
-
- page_pools.epp_st_shrinks++;
- page_pools.epp_last_shrink = ktime_get_seconds();
- }
- spin_unlock(&page_pools.epp_lock);
-
- /*
- * if no pool access for a long time, we consider it's fully idle.
- * a little race here is fine.
- */
- if (unlikely(ktime_get_seconds() - page_pools.epp_last_access >
- CACHE_QUIESCENT_PERIOD)) {
- spin_lock(&page_pools.epp_lock);
- page_pools.epp_idle_idx = IDLE_IDX_MAX;
- spin_unlock(&page_pools.epp_lock);
- }
-
- LASSERT(page_pools.epp_idle_idx <= IDLE_IDX_MAX);
- return sc->nr_to_scan;
-}
-
-static inline
-int npages_to_npools(unsigned long npages)
-{
- return (int) ((npages + PAGES_PER_POOL - 1) / PAGES_PER_POOL);
-}
-
-/*
- * return how many pages cleaned up.
- */
-static unsigned long enc_pools_cleanup(struct page ***pools, int npools)
-{
- unsigned long cleaned = 0;
- int i, j;
-
- for (i = 0; i < npools; i++) {
- if (pools[i]) {
- for (j = 0; j < PAGES_PER_POOL; j++) {
- if (pools[i][j]) {
- __free_page(pools[i][j]);
- cleaned++;
- }
- }
- kfree(pools[i]);
- pools[i] = NULL;
- }
- }
-
- return cleaned;
-}
-
-static inline void enc_pools_wakeup(void)
-{
- assert_spin_locked(&page_pools.epp_lock);
- LASSERT(page_pools.epp_waitqlen >= 0);
-
- if (unlikely(page_pools.epp_waitqlen)) {
- LASSERT(waitqueue_active(&page_pools.epp_waitq));
- wake_up_all(&page_pools.epp_waitq);
- }
-}
-
-void sptlrpc_enc_pool_put_pages(struct ptlrpc_bulk_desc *desc)
-{
- int p_idx, g_idx;
- int i;
-
- if (desc->bd_enc_iov == NULL)
- return;
-
- LASSERT(desc->bd_iov_count > 0);
-
- spin_lock(&page_pools.epp_lock);
-
- p_idx = page_pools.epp_free_pages / PAGES_PER_POOL;
- g_idx = page_pools.epp_free_pages % PAGES_PER_POOL;
-
- LASSERT(page_pools.epp_free_pages + desc->bd_iov_count <=
- page_pools.epp_total_pages);
- LASSERT(page_pools.epp_pools[p_idx]);
-
- for (i = 0; i < desc->bd_iov_count; i++) {
- LASSERT(desc->bd_enc_iov[i].kiov_page != NULL);
- LASSERT(g_idx != 0 || page_pools.epp_pools[p_idx]);
- LASSERT(page_pools.epp_pools[p_idx][g_idx] == NULL);
-
- page_pools.epp_pools[p_idx][g_idx] =
- desc->bd_enc_iov[i].kiov_page;
-
- if (++g_idx == PAGES_PER_POOL) {
- p_idx++;
- g_idx = 0;
- }
- }
-
- page_pools.epp_free_pages += desc->bd_iov_count;
-
- enc_pools_wakeup();
-
- spin_unlock(&page_pools.epp_lock);
-
- kfree(desc->bd_enc_iov);
- desc->bd_enc_iov = NULL;
-}
-EXPORT_SYMBOL(sptlrpc_enc_pool_put_pages);
-
-static inline void enc_pools_alloc(void)
-{
- LASSERT(page_pools.epp_max_pools);
- page_pools.epp_pools =
- libcfs_kvzalloc(page_pools.epp_max_pools *
- sizeof(*page_pools.epp_pools),
- GFP_NOFS);
-}
-
-static inline void enc_pools_free(void)
-{
- LASSERT(page_pools.epp_max_pools);
- LASSERT(page_pools.epp_pools);
-
- kvfree(page_pools.epp_pools);
-}
-
-static struct shrinker pools_shrinker = {
- .count_objects = enc_pools_shrink_count,
- .scan_objects = enc_pools_shrink_scan,
- .seeks = DEFAULT_SEEKS,
-};
-
-int sptlrpc_enc_pool_init(void)
-{
- /*
- * maximum capacity is 1/8 of total physical memory.
- * is the 1/8 a good number?
- */
- page_pools.epp_max_pages = totalram_pages / 8;
- page_pools.epp_max_pools = npages_to_npools(page_pools.epp_max_pages);
-
- init_waitqueue_head(&page_pools.epp_waitq);
- page_pools.epp_waitqlen = 0;
- page_pools.epp_pages_short = 0;
-
- page_pools.epp_growing = 0;
-
- page_pools.epp_idle_idx = 0;
- page_pools.epp_last_shrink = ktime_get_seconds();
- page_pools.epp_last_access = ktime_get_seconds();
-
- spin_lock_init(&page_pools.epp_lock);
- page_pools.epp_total_pages = 0;
- page_pools.epp_free_pages = 0;
-
- page_pools.epp_st_max_pages = 0;
- page_pools.epp_st_grows = 0;
- page_pools.epp_st_grow_fails = 0;
- page_pools.epp_st_shrinks = 0;
- page_pools.epp_st_access = 0;
- page_pools.epp_st_missings = 0;
- page_pools.epp_st_lowfree = 0;
- page_pools.epp_st_max_wqlen = 0;
- page_pools.epp_st_max_wait = 0;
-
- enc_pools_alloc();
- if (page_pools.epp_pools == NULL)
- return -ENOMEM;
-
- register_shrinker(&pools_shrinker);
-
- return 0;
-}
-
-void sptlrpc_enc_pool_fini(void)
-{
- unsigned long cleaned, npools;
-
- LASSERT(page_pools.epp_pools);
- LASSERT(page_pools.epp_total_pages == page_pools.epp_free_pages);
-
- unregister_shrinker(&pools_shrinker);
-
- npools = npages_to_npools(page_pools.epp_total_pages);
- cleaned = enc_pools_cleanup(page_pools.epp_pools, npools);
- LASSERT(cleaned == page_pools.epp_total_pages);
-
- enc_pools_free();
-
- if (page_pools.epp_st_access > 0) {
- CDEBUG(D_SEC,
- "max pages %lu, grows %u, grow fails %u, shrinks %u, access %lu, missing %lu, max qlen %u, max wait %ld/%d\n",
- page_pools.epp_st_max_pages, page_pools.epp_st_grows,
- page_pools.epp_st_grow_fails,
- page_pools.epp_st_shrinks, page_pools.epp_st_access,
- page_pools.epp_st_missings, page_pools.epp_st_max_wqlen,
- page_pools.epp_st_max_wait, HZ);
- }
-}
-
-
-static int cfs_hash_alg_id[] = {
- [BULK_HASH_ALG_NULL] = CFS_HASH_ALG_NULL,
- [BULK_HASH_ALG_ADLER32] = CFS_HASH_ALG_ADLER32,
- [BULK_HASH_ALG_CRC32] = CFS_HASH_ALG_CRC32,
- [BULK_HASH_ALG_MD5] = CFS_HASH_ALG_MD5,
- [BULK_HASH_ALG_SHA1] = CFS_HASH_ALG_SHA1,
- [BULK_HASH_ALG_SHA256] = CFS_HASH_ALG_SHA256,
- [BULK_HASH_ALG_SHA384] = CFS_HASH_ALG_SHA384,
- [BULK_HASH_ALG_SHA512] = CFS_HASH_ALG_SHA512,
-};
-const char *sptlrpc_get_hash_name(__u8 hash_alg)
-{
- return cfs_crypto_hash_name(cfs_hash_alg_id[hash_alg]);
-}
-EXPORT_SYMBOL(sptlrpc_get_hash_name);
-
-__u8 sptlrpc_get_hash_alg(const char *algname)
-{
- return cfs_crypto_hash_alg(algname);
-}
-EXPORT_SYMBOL(sptlrpc_get_hash_alg);
-
-int bulk_sec_desc_unpack(struct lustre_msg *msg, int offset, int swabbed)
-{
- struct ptlrpc_bulk_sec_desc *bsd;
- int size = msg->lm_buflens[offset];
-
- bsd = lustre_msg_buf(msg, offset, sizeof(*bsd));
- if (bsd == NULL) {
- CERROR("Invalid bulk sec desc: size %d\n", size);
- return -EINVAL;
- }
-
- if (swabbed)
- __swab32s(&bsd->bsd_nob);
-
- if (unlikely(bsd->bsd_version != 0)) {
- CERROR("Unexpected version %u\n", bsd->bsd_version);
- return -EPROTO;
- }
-
- if (unlikely(bsd->bsd_type >= SPTLRPC_BULK_MAX)) {
- CERROR("Invalid type %u\n", bsd->bsd_type);
- return -EPROTO;
- }
-
- /* FIXME more sanity check here */
-
- if (unlikely(bsd->bsd_svc != SPTLRPC_BULK_SVC_NULL &&
- bsd->bsd_svc != SPTLRPC_BULK_SVC_INTG &&
- bsd->bsd_svc != SPTLRPC_BULK_SVC_PRIV)) {
- CERROR("Invalid svc %u\n", bsd->bsd_svc);
- return -EPROTO;
- }
-
- return 0;
-}
-EXPORT_SYMBOL(bulk_sec_desc_unpack);
-
-int sptlrpc_get_bulk_checksum(struct ptlrpc_bulk_desc *desc, __u8 alg,
- void *buf, int buflen)
-{
- struct cfs_crypto_hash_desc *hdesc;
- int hashsize;
- char hashbuf[64];
- unsigned int bufsize;
- int i, err;
-
- LASSERT(alg > BULK_HASH_ALG_NULL && alg < BULK_HASH_ALG_MAX);
- LASSERT(buflen >= 4);
-
- hdesc = cfs_crypto_hash_init(cfs_hash_alg_id[alg], NULL, 0);
- if (IS_ERR(hdesc)) {
- CERROR("Unable to initialize checksum hash %s\n",
- cfs_crypto_hash_name(cfs_hash_alg_id[alg]));
- return PTR_ERR(hdesc);
- }
-
- hashsize = cfs_crypto_hash_digestsize(cfs_hash_alg_id[alg]);
-
- for (i = 0; i < desc->bd_iov_count; i++) {
- cfs_crypto_hash_update_page(hdesc, desc->bd_iov[i].kiov_page,
- desc->bd_iov[i].kiov_offset & ~CFS_PAGE_MASK,
- desc->bd_iov[i].kiov_len);
- }
- if (hashsize > buflen) {
- bufsize = sizeof(hashbuf);
- err = cfs_crypto_hash_final(hdesc, (unsigned char *)hashbuf,
- &bufsize);
- memcpy(buf, hashbuf, buflen);
- } else {
- bufsize = buflen;
- err = cfs_crypto_hash_final(hdesc, (unsigned char *)buf,
- &bufsize);
- }
-
- if (err)
- cfs_crypto_hash_final(hdesc, NULL, NULL);
- return err;
-}
-EXPORT_SYMBOL(sptlrpc_get_bulk_checksum);
diff --git a/drivers/staging/lustre/lustre/ptlrpc/sec_config.c b/drivers/staging/lustre/lustre/ptlrpc/sec_config.c
deleted file mode 100644
index 0d29b8734a53..000000000000
--- a/drivers/staging/lustre/lustre/ptlrpc/sec_config.c
+++ /dev/null
@@ -1,851 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- */
-
-#define DEBUG_SUBSYSTEM S_SEC
-
-#include "../../include/linux/libcfs/libcfs.h"
-#include <linux/crypto.h>
-#include <linux/key.h>
-
-#include "../include/obd.h"
-#include "../include/obd_support.h"
-#include "../include/lustre_import.h"
-#include "../include/lustre_param.h"
-#include "../include/lustre_sec.h"
-
-#include "ptlrpc_internal.h"
-
-enum lustre_sec_part sptlrpc_target_sec_part(struct obd_device *obd)
-{
- const char *type = obd->obd_type->typ_name;
-
- if (!strcmp(type, LUSTRE_MDT_NAME))
- return LUSTRE_SP_MDT;
- if (!strcmp(type, LUSTRE_OST_NAME))
- return LUSTRE_SP_OST;
- if (!strcmp(type, LUSTRE_MGS_NAME))
- return LUSTRE_SP_MGS;
-
- CERROR("unknown target %p(%s)\n", obd, type);
- return LUSTRE_SP_ANY;
-}
-EXPORT_SYMBOL(sptlrpc_target_sec_part);
-
-/****************************************
- * user supplied flavor string parsing *
- ****************************************/
-
-/*
- * format: <base_flavor>[-<bulk_type:alg_spec>]
- */
-int sptlrpc_parse_flavor(const char *str, struct sptlrpc_flavor *flvr)
-{
- char buf[32];
- char *bulk, *alg;
-
- memset(flvr, 0, sizeof(*flvr));
-
- if (str == NULL || str[0] == '\0') {
- flvr->sf_rpc = SPTLRPC_FLVR_INVALID;
- return 0;
- }
-
- strncpy(buf, str, sizeof(buf));
- buf[sizeof(buf) - 1] = '\0';
-
- bulk = strchr(buf, '-');
- if (bulk)
- *bulk++ = '\0';
-
- flvr->sf_rpc = sptlrpc_name2flavor_base(buf);
- if (flvr->sf_rpc == SPTLRPC_FLVR_INVALID)
- goto err_out;
-
- /*
- * currently only base flavor "plain" can have bulk specification.
- */
- if (flvr->sf_rpc == SPTLRPC_FLVR_PLAIN) {
- flvr->u_bulk.hash.hash_alg = BULK_HASH_ALG_ADLER32;
- if (bulk) {
- /*
- * format: plain-hash:<hash_alg>
- */
- alg = strchr(bulk, ':');
- if (alg == NULL)
- goto err_out;
- *alg++ = '\0';
-
- if (strcmp(bulk, "hash"))
- goto err_out;
-
- flvr->u_bulk.hash.hash_alg = sptlrpc_get_hash_alg(alg);
- if (flvr->u_bulk.hash.hash_alg >= BULK_HASH_ALG_MAX)
- goto err_out;
- }
-
- if (flvr->u_bulk.hash.hash_alg == BULK_HASH_ALG_NULL)
- flvr_set_bulk_svc(&flvr->sf_rpc, SPTLRPC_BULK_SVC_NULL);
- else
- flvr_set_bulk_svc(&flvr->sf_rpc, SPTLRPC_BULK_SVC_INTG);
- } else {
- if (bulk)
- goto err_out;
- }
-
- flvr->sf_flags = 0;
- return 0;
-
-err_out:
- CERROR("invalid flavor string: %s\n", str);
- return -EINVAL;
-}
-EXPORT_SYMBOL(sptlrpc_parse_flavor);
-
-/****************************************
- * configure rules *
- ****************************************/
-
-static void get_default_flavor(struct sptlrpc_flavor *sf)
-{
- memset(sf, 0, sizeof(*sf));
-
- sf->sf_rpc = SPTLRPC_FLVR_NULL;
- sf->sf_flags = 0;
-}
-
-static void sptlrpc_rule_init(struct sptlrpc_rule *rule)
-{
- rule->sr_netid = LNET_NIDNET(LNET_NID_ANY);
- rule->sr_from = LUSTRE_SP_ANY;
- rule->sr_to = LUSTRE_SP_ANY;
- rule->sr_padding = 0;
-
- get_default_flavor(&rule->sr_flvr);
-}
-
-/*
- * format: network[.direction]=flavor
- */
-int sptlrpc_parse_rule(char *param, struct sptlrpc_rule *rule)
-{
- char *flavor, *dir;
- int rc;
-
- sptlrpc_rule_init(rule);
-
- flavor = strchr(param, '=');
- if (flavor == NULL) {
- CERROR("invalid param, no '='\n");
- return -EINVAL;
- }
- *flavor++ = '\0';
-
- dir = strchr(param, '.');
- if (dir)
- *dir++ = '\0';
-
- /* 1.1 network */
- if (strcmp(param, "default")) {
- rule->sr_netid = libcfs_str2net(param);
- if (rule->sr_netid == LNET_NIDNET(LNET_NID_ANY)) {
- CERROR("invalid network name: %s\n", param);
- return -EINVAL;
- }
- }
-
- /* 1.2 direction */
- if (dir) {
- if (!strcmp(dir, "mdt2ost")) {
- rule->sr_from = LUSTRE_SP_MDT;
- rule->sr_to = LUSTRE_SP_OST;
- } else if (!strcmp(dir, "mdt2mdt")) {
- rule->sr_from = LUSTRE_SP_MDT;
- rule->sr_to = LUSTRE_SP_MDT;
- } else if (!strcmp(dir, "cli2ost")) {
- rule->sr_from = LUSTRE_SP_CLI;
- rule->sr_to = LUSTRE_SP_OST;
- } else if (!strcmp(dir, "cli2mdt")) {
- rule->sr_from = LUSTRE_SP_CLI;
- rule->sr_to = LUSTRE_SP_MDT;
- } else {
- CERROR("invalid rule dir segment: %s\n", dir);
- return -EINVAL;
- }
- }
-
- /* 2.1 flavor */
- rc = sptlrpc_parse_flavor(flavor, &rule->sr_flvr);
- if (rc)
- return -EINVAL;
-
- return 0;
-}
-EXPORT_SYMBOL(sptlrpc_parse_rule);
-
-void sptlrpc_rule_set_free(struct sptlrpc_rule_set *rset)
-{
- LASSERT(rset->srs_nslot ||
- (rset->srs_nrule == 0 && rset->srs_rules == NULL));
-
- if (rset->srs_nslot) {
- kfree(rset->srs_rules);
- sptlrpc_rule_set_init(rset);
- }
-}
-EXPORT_SYMBOL(sptlrpc_rule_set_free);
-
-/*
- * return 0 if the rule set could accommodate one more rule.
- */
-int sptlrpc_rule_set_expand(struct sptlrpc_rule_set *rset)
-{
- struct sptlrpc_rule *rules;
- int nslot;
-
- might_sleep();
-
- if (rset->srs_nrule < rset->srs_nslot)
- return 0;
-
- nslot = rset->srs_nslot + 8;
-
- /* better use realloc() if available */
- rules = kcalloc(nslot, sizeof(*rset->srs_rules), GFP_NOFS);
- if (rules == NULL)
- return -ENOMEM;
-
- if (rset->srs_nrule) {
- LASSERT(rset->srs_nslot && rset->srs_rules);
- memcpy(rules, rset->srs_rules,
- rset->srs_nrule * sizeof(*rset->srs_rules));
-
- kfree(rset->srs_rules);
- }
-
- rset->srs_rules = rules;
- rset->srs_nslot = nslot;
- return 0;
-}
-EXPORT_SYMBOL(sptlrpc_rule_set_expand);
-
-static inline int rule_spec_dir(struct sptlrpc_rule *rule)
-{
- return (rule->sr_from != LUSTRE_SP_ANY ||
- rule->sr_to != LUSTRE_SP_ANY);
-}
-static inline int rule_spec_net(struct sptlrpc_rule *rule)
-{
- return (rule->sr_netid != LNET_NIDNET(LNET_NID_ANY));
-}
-static inline int rule_match_dir(struct sptlrpc_rule *r1,
- struct sptlrpc_rule *r2)
-{
- return (r1->sr_from == r2->sr_from && r1->sr_to == r2->sr_to);
-}
-static inline int rule_match_net(struct sptlrpc_rule *r1,
- struct sptlrpc_rule *r2)
-{
- return (r1->sr_netid == r2->sr_netid);
-}
-
-/*
- * merge @rule into @rset.
- * the @rset slots might be expanded.
- */
-int sptlrpc_rule_set_merge(struct sptlrpc_rule_set *rset,
- struct sptlrpc_rule *rule)
-{
- struct sptlrpc_rule *p = rset->srs_rules;
- int spec_dir, spec_net;
- int rc, n, match = 0;
-
- might_sleep();
-
- spec_net = rule_spec_net(rule);
- spec_dir = rule_spec_dir(rule);
-
- for (n = 0; n < rset->srs_nrule; n++) {
- p = &rset->srs_rules[n];
-
- /* test network match, if failed:
- * - spec rule: skip rules which is also spec rule match, until
- * we hit a wild rule, which means no more chance
- * - wild rule: skip until reach the one which is also wild
- * and matches
- */
- if (!rule_match_net(p, rule)) {
- if (spec_net) {
- if (rule_spec_net(p))
- continue;
- else
- break;
- } else {
- continue;
- }
- }
-
- /* test dir match, same logic as net matching */
- if (!rule_match_dir(p, rule)) {
- if (spec_dir) {
- if (rule_spec_dir(p))
- continue;
- else
- break;
- } else {
- continue;
- }
- }
-
- /* find a match */
- match = 1;
- break;
- }
-
- if (match) {
- LASSERT(n >= 0 && n < rset->srs_nrule);
-
- if (rule->sr_flvr.sf_rpc == SPTLRPC_FLVR_INVALID) {
- /* remove this rule */
- if (n < rset->srs_nrule - 1)
- memmove(&rset->srs_rules[n],
- &rset->srs_rules[n + 1],
- (rset->srs_nrule - n - 1) *
- sizeof(*rule));
- rset->srs_nrule--;
- } else {
- /* override the rule */
- memcpy(&rset->srs_rules[n], rule, sizeof(*rule));
- }
- } else {
- LASSERT(n >= 0 && n <= rset->srs_nrule);
-
- if (rule->sr_flvr.sf_rpc != SPTLRPC_FLVR_INVALID) {
- rc = sptlrpc_rule_set_expand(rset);
- if (rc)
- return rc;
-
- if (n < rset->srs_nrule)
- memmove(&rset->srs_rules[n + 1],
- &rset->srs_rules[n],
- (rset->srs_nrule - n) * sizeof(*rule));
- memcpy(&rset->srs_rules[n], rule, sizeof(*rule));
- rset->srs_nrule++;
- } else {
- CDEBUG(D_CONFIG, "ignore the unmatched deletion\n");
- }
- }
-
- return 0;
-}
-EXPORT_SYMBOL(sptlrpc_rule_set_merge);
-
-/**
- * given from/to/nid, determine a matching flavor in ruleset.
- * return 1 if a match found, otherwise return 0.
- */
-int sptlrpc_rule_set_choose(struct sptlrpc_rule_set *rset,
- enum lustre_sec_part from,
- enum lustre_sec_part to,
- lnet_nid_t nid,
- struct sptlrpc_flavor *sf)
-{
- struct sptlrpc_rule *r;
- int n;
-
- for (n = 0; n < rset->srs_nrule; n++) {
- r = &rset->srs_rules[n];
-
- if (LNET_NIDNET(nid) != LNET_NIDNET(LNET_NID_ANY) &&
- r->sr_netid != LNET_NIDNET(LNET_NID_ANY) &&
- LNET_NIDNET(nid) != r->sr_netid)
- continue;
-
- if (from != LUSTRE_SP_ANY && r->sr_from != LUSTRE_SP_ANY &&
- from != r->sr_from)
- continue;
-
- if (to != LUSTRE_SP_ANY && r->sr_to != LUSTRE_SP_ANY &&
- to != r->sr_to)
- continue;
-
- *sf = r->sr_flvr;
- return 1;
- }
-
- return 0;
-}
-EXPORT_SYMBOL(sptlrpc_rule_set_choose);
-
-/**********************************
- * sptlrpc configuration support *
- **********************************/
-
-struct sptlrpc_conf_tgt {
- struct list_head sct_list;
- char sct_name[MAX_OBD_NAME];
- struct sptlrpc_rule_set sct_rset;
-};
-
-struct sptlrpc_conf {
- struct list_head sc_list;
- char sc_fsname[MTI_NAME_MAXLEN];
- unsigned int sc_modified; /* modified during updating */
- unsigned int sc_updated:1, /* updated copy from MGS */
- sc_local:1; /* local copy from target */
- struct sptlrpc_rule_set sc_rset; /* fs general rules */
- struct list_head sc_tgts; /* target-specific rules */
-};
-
-static struct mutex sptlrpc_conf_lock;
-static LIST_HEAD(sptlrpc_confs);
-
-static inline int is_hex(char c)
-{
- return ((c >= '0' && c <= '9') ||
- (c >= 'a' && c <= 'f'));
-}
-
-static void target2fsname(const char *tgt, char *fsname, int buflen)
-{
- const char *ptr;
- int len;
-
- ptr = strrchr(tgt, '-');
- if (ptr) {
- if ((strncmp(ptr, "-MDT", 4) != 0 &&
- strncmp(ptr, "-OST", 4) != 0) ||
- !is_hex(ptr[4]) || !is_hex(ptr[5]) ||
- !is_hex(ptr[6]) || !is_hex(ptr[7]))
- ptr = NULL;
- }
-
- /* if we didn't find the pattern, treat the whole string as fsname */
- if (ptr == NULL)
- len = strlen(tgt);
- else
- len = ptr - tgt;
-
- len = min(len, buflen - 1);
- memcpy(fsname, tgt, len);
- fsname[len] = '\0';
-}
-
-static void sptlrpc_conf_free_rsets(struct sptlrpc_conf *conf)
-{
- struct sptlrpc_conf_tgt *conf_tgt, *conf_tgt_next;
-
- sptlrpc_rule_set_free(&conf->sc_rset);
-
- list_for_each_entry_safe(conf_tgt, conf_tgt_next,
- &conf->sc_tgts, sct_list) {
- sptlrpc_rule_set_free(&conf_tgt->sct_rset);
- list_del(&conf_tgt->sct_list);
- kfree(conf_tgt);
- }
- LASSERT(list_empty(&conf->sc_tgts));
-
- conf->sc_updated = 0;
- conf->sc_local = 0;
-}
-
-static void sptlrpc_conf_free(struct sptlrpc_conf *conf)
-{
- CDEBUG(D_SEC, "free sptlrpc conf %s\n", conf->sc_fsname);
-
- sptlrpc_conf_free_rsets(conf);
- list_del(&conf->sc_list);
- kfree(conf);
-}
-
-static
-struct sptlrpc_conf_tgt *sptlrpc_conf_get_tgt(struct sptlrpc_conf *conf,
- const char *name,
- int create)
-{
- struct sptlrpc_conf_tgt *conf_tgt;
-
- list_for_each_entry(conf_tgt, &conf->sc_tgts, sct_list) {
- if (strcmp(conf_tgt->sct_name, name) == 0)
- return conf_tgt;
- }
-
- if (!create)
- return NULL;
-
- conf_tgt = kzalloc(sizeof(*conf_tgt), GFP_NOFS);
- if (conf_tgt) {
- strlcpy(conf_tgt->sct_name, name, sizeof(conf_tgt->sct_name));
- sptlrpc_rule_set_init(&conf_tgt->sct_rset);
- list_add(&conf_tgt->sct_list, &conf->sc_tgts);
- }
-
- return conf_tgt;
-}
-
-static
-struct sptlrpc_conf *sptlrpc_conf_get(const char *fsname,
- int create)
-{
- struct sptlrpc_conf *conf;
-
- list_for_each_entry(conf, &sptlrpc_confs, sc_list) {
- if (strcmp(conf->sc_fsname, fsname) == 0)
- return conf;
- }
-
- if (!create)
- return NULL;
-
- conf = kzalloc(sizeof(*conf), GFP_NOFS);
- if (!conf)
- return NULL;
-
- strcpy(conf->sc_fsname, fsname);
- sptlrpc_rule_set_init(&conf->sc_rset);
- INIT_LIST_HEAD(&conf->sc_tgts);
- list_add(&conf->sc_list, &sptlrpc_confs);
-
- CDEBUG(D_SEC, "create sptlrpc conf %s\n", conf->sc_fsname);
- return conf;
-}
-
-/**
- * caller must hold conf_lock already.
- */
-static int sptlrpc_conf_merge_rule(struct sptlrpc_conf *conf,
- const char *target,
- struct sptlrpc_rule *rule)
-{
- struct sptlrpc_conf_tgt *conf_tgt;
- struct sptlrpc_rule_set *rule_set;
-
- /* fsname == target means general rules for the whole fs */
- if (strcmp(conf->sc_fsname, target) == 0) {
- rule_set = &conf->sc_rset;
- } else {
- conf_tgt = sptlrpc_conf_get_tgt(conf, target, 1);
- if (conf_tgt) {
- rule_set = &conf_tgt->sct_rset;
- } else {
- CERROR("out of memory, can't merge rule!\n");
- return -ENOMEM;
- }
- }
-
- return sptlrpc_rule_set_merge(rule_set, rule);
-}
-
-/**
- * process one LCFG_SPTLRPC_CONF record. if \a conf is NULL, we
- * find one through the target name in the record inside conf_lock;
- * otherwise means caller already hold conf_lock.
- */
-static int __sptlrpc_process_config(struct lustre_cfg *lcfg,
- struct sptlrpc_conf *conf)
-{
- char *target, *param;
- char fsname[MTI_NAME_MAXLEN];
- struct sptlrpc_rule rule;
- int rc;
-
- target = lustre_cfg_string(lcfg, 1);
- if (target == NULL) {
- CERROR("missing target name\n");
- return -EINVAL;
- }
-
- param = lustre_cfg_string(lcfg, 2);
- if (param == NULL) {
- CERROR("missing parameter\n");
- return -EINVAL;
- }
-
- CDEBUG(D_SEC, "processing rule: %s.%s\n", target, param);
-
- /* parse rule to make sure the format is correct */
- if (strncmp(param, PARAM_SRPC_FLVR, sizeof(PARAM_SRPC_FLVR) - 1) != 0) {
- CERROR("Invalid sptlrpc parameter: %s\n", param);
- return -EINVAL;
- }
- param += sizeof(PARAM_SRPC_FLVR) - 1;
-
- rc = sptlrpc_parse_rule(param, &rule);
- if (rc)
- return -EINVAL;
-
- if (conf == NULL) {
- target2fsname(target, fsname, sizeof(fsname));
-
- mutex_lock(&sptlrpc_conf_lock);
- conf = sptlrpc_conf_get(fsname, 0);
- if (conf == NULL) {
- CERROR("can't find conf\n");
- rc = -ENOMEM;
- } else {
- rc = sptlrpc_conf_merge_rule(conf, target, &rule);
- }
- mutex_unlock(&sptlrpc_conf_lock);
- } else {
- LASSERT(mutex_is_locked(&sptlrpc_conf_lock));
- rc = sptlrpc_conf_merge_rule(conf, target, &rule);
- }
-
- if (rc == 0)
- conf->sc_modified++;
-
- return rc;
-}
-
-int sptlrpc_process_config(struct lustre_cfg *lcfg)
-{
- return __sptlrpc_process_config(lcfg, NULL);
-}
-EXPORT_SYMBOL(sptlrpc_process_config);
-
-static int logname2fsname(const char *logname, char *buf, int buflen)
-{
- char *ptr;
- int len;
-
- ptr = strrchr(logname, '-');
- if (ptr == NULL || strcmp(ptr, "-sptlrpc")) {
- CERROR("%s is not a sptlrpc config log\n", logname);
- return -EINVAL;
- }
-
- len = min((int) (ptr - logname), buflen - 1);
-
- memcpy(buf, logname, len);
- buf[len] = '\0';
- return 0;
-}
-
-void sptlrpc_conf_log_update_begin(const char *logname)
-{
- struct sptlrpc_conf *conf;
- char fsname[16];
-
- if (logname2fsname(logname, fsname, sizeof(fsname)))
- return;
-
- mutex_lock(&sptlrpc_conf_lock);
-
- conf = sptlrpc_conf_get(fsname, 0);
- if (conf) {
- if (conf->sc_local) {
- LASSERT(conf->sc_updated == 0);
- sptlrpc_conf_free_rsets(conf);
- }
- conf->sc_modified = 0;
- }
-
- mutex_unlock(&sptlrpc_conf_lock);
-}
-EXPORT_SYMBOL(sptlrpc_conf_log_update_begin);
-
-/**
- * mark a config log has been updated
- */
-void sptlrpc_conf_log_update_end(const char *logname)
-{
- struct sptlrpc_conf *conf;
- char fsname[16];
-
- if (logname2fsname(logname, fsname, sizeof(fsname)))
- return;
-
- mutex_lock(&sptlrpc_conf_lock);
-
- conf = sptlrpc_conf_get(fsname, 0);
- if (conf) {
- /*
- * if original state is not updated, make sure the
- * modified counter > 0 to enforce updating local copy.
- */
- if (conf->sc_updated == 0)
- conf->sc_modified++;
-
- conf->sc_updated = 1;
- }
-
- mutex_unlock(&sptlrpc_conf_lock);
-}
-EXPORT_SYMBOL(sptlrpc_conf_log_update_end);
-
-void sptlrpc_conf_log_start(const char *logname)
-{
- char fsname[16];
-
- if (logname2fsname(logname, fsname, sizeof(fsname)))
- return;
-
- mutex_lock(&sptlrpc_conf_lock);
- sptlrpc_conf_get(fsname, 1);
- mutex_unlock(&sptlrpc_conf_lock);
-}
-EXPORT_SYMBOL(sptlrpc_conf_log_start);
-
-void sptlrpc_conf_log_stop(const char *logname)
-{
- struct sptlrpc_conf *conf;
- char fsname[16];
-
- if (logname2fsname(logname, fsname, sizeof(fsname)))
- return;
-
- mutex_lock(&sptlrpc_conf_lock);
- conf = sptlrpc_conf_get(fsname, 0);
- if (conf)
- sptlrpc_conf_free(conf);
- mutex_unlock(&sptlrpc_conf_lock);
-}
-EXPORT_SYMBOL(sptlrpc_conf_log_stop);
-
-static inline void flavor_set_flags(struct sptlrpc_flavor *sf,
- enum lustre_sec_part from,
- enum lustre_sec_part to,
- unsigned int fl_udesc)
-{
- /*
- * null flavor doesn't need to set any flavor, and in fact
- * we'd better not do that because everybody share a single sec.
- */
- if (sf->sf_rpc == SPTLRPC_FLVR_NULL)
- return;
-
- if (from == LUSTRE_SP_MDT) {
- /* MDT->MDT; MDT->OST */
- sf->sf_flags |= PTLRPC_SEC_FL_ROOTONLY;
- } else if (from == LUSTRE_SP_CLI && to == LUSTRE_SP_OST) {
- /* CLI->OST */
- sf->sf_flags |= PTLRPC_SEC_FL_ROOTONLY | PTLRPC_SEC_FL_BULK;
- } else if (from == LUSTRE_SP_CLI && to == LUSTRE_SP_MDT) {
- /* CLI->MDT */
- if (fl_udesc && sf->sf_rpc != SPTLRPC_FLVR_NULL)
- sf->sf_flags |= PTLRPC_SEC_FL_UDESC;
- }
-}
-
-void sptlrpc_conf_choose_flavor(enum lustre_sec_part from,
- enum lustre_sec_part to,
- struct obd_uuid *target,
- lnet_nid_t nid,
- struct sptlrpc_flavor *sf)
-{
- struct sptlrpc_conf *conf;
- struct sptlrpc_conf_tgt *conf_tgt;
- char name[MTI_NAME_MAXLEN];
- int len, rc = 0;
-
- target2fsname(target->uuid, name, sizeof(name));
-
- mutex_lock(&sptlrpc_conf_lock);
-
- conf = sptlrpc_conf_get(name, 0);
- if (conf == NULL)
- goto out;
-
- /* convert uuid name (supposed end with _UUID) to target name */
- len = strlen(target->uuid);
- LASSERT(len > 5);
- memcpy(name, target->uuid, len - 5);
- name[len - 5] = '\0';
-
- conf_tgt = sptlrpc_conf_get_tgt(conf, name, 0);
- if (conf_tgt) {
- rc = sptlrpc_rule_set_choose(&conf_tgt->sct_rset,
- from, to, nid, sf);
- if (rc)
- goto out;
- }
-
- rc = sptlrpc_rule_set_choose(&conf->sc_rset, from, to, nid, sf);
-out:
- mutex_unlock(&sptlrpc_conf_lock);
-
- if (rc == 0)
- get_default_flavor(sf);
-
- flavor_set_flags(sf, from, to, 1);
-}
-
-#define SEC_ADAPT_DELAY (10)
-
-/**
- * called by client devices, notify the sptlrpc config has changed and
- * do import_sec_adapt later.
- */
-void sptlrpc_conf_client_adapt(struct obd_device *obd)
-{
- struct obd_import *imp;
-
- LASSERT(strcmp(obd->obd_type->typ_name, LUSTRE_MDC_NAME) == 0 ||
- strcmp(obd->obd_type->typ_name, LUSTRE_OSC_NAME) == 0);
- CDEBUG(D_SEC, "obd %s\n", obd->u.cli.cl_target_uuid.uuid);
-
- /* serialize with connect/disconnect import */
- down_read(&obd->u.cli.cl_sem);
-
- imp = obd->u.cli.cl_import;
- if (imp) {
- spin_lock(&imp->imp_lock);
- if (imp->imp_sec)
- imp->imp_sec_expire = ktime_get_real_seconds() +
- SEC_ADAPT_DELAY;
- spin_unlock(&imp->imp_lock);
- }
-
- up_read(&obd->u.cli.cl_sem);
-}
-EXPORT_SYMBOL(sptlrpc_conf_client_adapt);
-
-int sptlrpc_conf_init(void)
-{
- mutex_init(&sptlrpc_conf_lock);
- return 0;
-}
-
-void sptlrpc_conf_fini(void)
-{
- struct sptlrpc_conf *conf, *conf_next;
-
- mutex_lock(&sptlrpc_conf_lock);
- list_for_each_entry_safe(conf, conf_next, &sptlrpc_confs, sc_list) {
- sptlrpc_conf_free(conf);
- }
- LASSERT(list_empty(&sptlrpc_confs));
- mutex_unlock(&sptlrpc_conf_lock);
-}
diff --git a/drivers/staging/lustre/lustre/ptlrpc/sec_gc.c b/drivers/staging/lustre/lustre/ptlrpc/sec_gc.c
deleted file mode 100644
index 520329f88465..000000000000
--- a/drivers/staging/lustre/lustre/ptlrpc/sec_gc.c
+++ /dev/null
@@ -1,237 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * lustre/ptlrpc/sec_gc.c
- *
- * Author: Eric Mei <ericm@clusterfs.com>
- */
-
-#define DEBUG_SUBSYSTEM S_SEC
-
-#include "../../include/linux/libcfs/libcfs.h"
-
-#include "../include/obd_support.h"
-#include "../include/obd_class.h"
-#include "../include/lustre_net.h"
-#include "../include/lustre_sec.h"
-
-#include "ptlrpc_internal.h"
-
-#define SEC_GC_INTERVAL (30 * 60)
-
-
-static struct mutex sec_gc_mutex;
-static LIST_HEAD(sec_gc_list);
-static spinlock_t sec_gc_list_lock;
-
-static LIST_HEAD(sec_gc_ctx_list);
-static spinlock_t sec_gc_ctx_list_lock;
-
-static struct ptlrpc_thread sec_gc_thread;
-static atomic_t sec_gc_wait_del = ATOMIC_INIT(0);
-
-
-void sptlrpc_gc_add_sec(struct ptlrpc_sec *sec)
-{
- LASSERT(sec->ps_policy->sp_cops->gc_ctx);
- LASSERT(sec->ps_gc_interval > 0);
- LASSERT(list_empty(&sec->ps_gc_list));
-
- sec->ps_gc_next = ktime_get_real_seconds() + sec->ps_gc_interval;
-
- spin_lock(&sec_gc_list_lock);
- list_add_tail(&sec_gc_list, &sec->ps_gc_list);
- spin_unlock(&sec_gc_list_lock);
-
- CDEBUG(D_SEC, "added sec %p(%s)\n", sec, sec->ps_policy->sp_name);
-}
-EXPORT_SYMBOL(sptlrpc_gc_add_sec);
-
-void sptlrpc_gc_del_sec(struct ptlrpc_sec *sec)
-{
- if (list_empty(&sec->ps_gc_list))
- return;
-
- might_sleep();
-
- /* signal before list_del to make iteration in gc thread safe */
- atomic_inc(&sec_gc_wait_del);
-
- spin_lock(&sec_gc_list_lock);
- list_del_init(&sec->ps_gc_list);
- spin_unlock(&sec_gc_list_lock);
-
- /* barrier */
- mutex_lock(&sec_gc_mutex);
- mutex_unlock(&sec_gc_mutex);
-
- atomic_dec(&sec_gc_wait_del);
-
- CDEBUG(D_SEC, "del sec %p(%s)\n", sec, sec->ps_policy->sp_name);
-}
-EXPORT_SYMBOL(sptlrpc_gc_del_sec);
-
-static void sec_process_ctx_list(void)
-{
- struct ptlrpc_cli_ctx *ctx;
-
- spin_lock(&sec_gc_ctx_list_lock);
-
- while (!list_empty(&sec_gc_ctx_list)) {
- ctx = list_entry(sec_gc_ctx_list.next,
- struct ptlrpc_cli_ctx, cc_gc_chain);
- list_del_init(&ctx->cc_gc_chain);
- spin_unlock(&sec_gc_ctx_list_lock);
-
- LASSERT(ctx->cc_sec);
- LASSERT(atomic_read(&ctx->cc_refcount) == 1);
- CDEBUG(D_SEC, "gc pick up ctx %p(%u->%s)\n",
- ctx, ctx->cc_vcred.vc_uid, sec2target_str(ctx->cc_sec));
- sptlrpc_cli_ctx_put(ctx, 1);
-
- spin_lock(&sec_gc_ctx_list_lock);
- }
-
- spin_unlock(&sec_gc_ctx_list_lock);
-}
-
-static void sec_do_gc(struct ptlrpc_sec *sec)
-{
- LASSERT(sec->ps_policy->sp_cops->gc_ctx);
-
- if (unlikely(sec->ps_gc_next == 0)) {
- CDEBUG(D_SEC, "sec %p(%s) has 0 gc time\n",
- sec, sec->ps_policy->sp_name);
- return;
- }
-
- CDEBUG(D_SEC, "check on sec %p(%s)\n", sec, sec->ps_policy->sp_name);
-
- if (sec->ps_gc_next > ktime_get_real_seconds())
- return;
-
- sec->ps_policy->sp_cops->gc_ctx(sec);
- sec->ps_gc_next = ktime_get_real_seconds() + sec->ps_gc_interval;
-}
-
-static int sec_gc_main(void *arg)
-{
- struct ptlrpc_thread *thread = (struct ptlrpc_thread *) arg;
- struct l_wait_info lwi;
-
- unshare_fs_struct();
-
- /* Record that the thread is running */
- thread_set_flags(thread, SVC_RUNNING);
- wake_up(&thread->t_ctl_waitq);
-
- while (1) {
- struct ptlrpc_sec *sec;
-
- thread_clear_flags(thread, SVC_SIGNAL);
- sec_process_ctx_list();
-again:
- /* go through sec list do gc.
- * FIXME here we iterate through the whole list each time which
- * is not optimal. we perhaps want to use balanced binary tree
- * to trace each sec as order of expiry time.
- * another issue here is we wakeup as fixed interval instead of
- * according to each sec's expiry time */
- mutex_lock(&sec_gc_mutex);
- list_for_each_entry(sec, &sec_gc_list, ps_gc_list) {
- /* if someone is waiting to be deleted, let it
- * proceed as soon as possible. */
- if (atomic_read(&sec_gc_wait_del)) {
- CDEBUG(D_SEC, "deletion pending, start over\n");
- mutex_unlock(&sec_gc_mutex);
- goto again;
- }
-
- sec_do_gc(sec);
- }
- mutex_unlock(&sec_gc_mutex);
-
- /* check ctx list again before sleep */
- sec_process_ctx_list();
-
- lwi = LWI_TIMEOUT(SEC_GC_INTERVAL * HZ, NULL, NULL);
- l_wait_event(thread->t_ctl_waitq,
- thread_is_stopping(thread) ||
- thread_is_signal(thread),
- &lwi);
-
- if (thread_test_and_clear_flags(thread, SVC_STOPPING))
- break;
- }
-
- thread_set_flags(thread, SVC_STOPPED);
- wake_up(&thread->t_ctl_waitq);
- return 0;
-}
-
-int sptlrpc_gc_init(void)
-{
- struct l_wait_info lwi = { 0 };
- struct task_struct *task;
-
- mutex_init(&sec_gc_mutex);
- spin_lock_init(&sec_gc_list_lock);
- spin_lock_init(&sec_gc_ctx_list_lock);
-
- /* initialize thread control */
- memset(&sec_gc_thread, 0, sizeof(sec_gc_thread));
- init_waitqueue_head(&sec_gc_thread.t_ctl_waitq);
-
- task = kthread_run(sec_gc_main, &sec_gc_thread, "sptlrpc_gc");
- if (IS_ERR(task)) {
- CERROR("can't start gc thread: %ld\n", PTR_ERR(task));
- return PTR_ERR(task);
- }
-
- l_wait_event(sec_gc_thread.t_ctl_waitq,
- thread_is_running(&sec_gc_thread), &lwi);
- return 0;
-}
-
-void sptlrpc_gc_fini(void)
-{
- struct l_wait_info lwi = { 0 };
-
- thread_set_flags(&sec_gc_thread, SVC_STOPPING);
- wake_up(&sec_gc_thread.t_ctl_waitq);
-
- l_wait_event(sec_gc_thread.t_ctl_waitq,
- thread_is_stopped(&sec_gc_thread), &lwi);
-}
diff --git a/drivers/staging/lustre/lustre/ptlrpc/sec_lproc.c b/drivers/staging/lustre/lustre/ptlrpc/sec_lproc.c
deleted file mode 100644
index a74e2f01e5be..000000000000
--- a/drivers/staging/lustre/lustre/ptlrpc/sec_lproc.c
+++ /dev/null
@@ -1,196 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * lustre/ptlrpc/sec_lproc.c
- *
- * Author: Eric Mei <ericm@clusterfs.com>
- */
-
-#define DEBUG_SUBSYSTEM S_SEC
-
-#include "../../include/linux/libcfs/libcfs.h"
-#include <linux/crypto.h>
-
-#include "../include/obd.h"
-#include "../include/obd_class.h"
-#include "../include/obd_support.h"
-#include "../include/lustre_net.h"
-#include "../include/lustre_import.h"
-#include "../include/lustre_dlm.h"
-#include "../include/lustre_sec.h"
-
-#include "ptlrpc_internal.h"
-
-static char *sec_flags2str(unsigned long flags, char *buf, int bufsize)
-{
- buf[0] = '\0';
-
- if (flags & PTLRPC_SEC_FL_REVERSE)
- strlcat(buf, "reverse,", bufsize);
- if (flags & PTLRPC_SEC_FL_ROOTONLY)
- strlcat(buf, "rootonly,", bufsize);
- if (flags & PTLRPC_SEC_FL_UDESC)
- strlcat(buf, "udesc,", bufsize);
- if (flags & PTLRPC_SEC_FL_BULK)
- strlcat(buf, "bulk,", bufsize);
- if (buf[0] == '\0')
- strlcat(buf, "-,", bufsize);
-
- return buf;
-}
-
-static int sptlrpc_info_lprocfs_seq_show(struct seq_file *seq, void *v)
-{
- struct obd_device *dev = seq->private;
- struct client_obd *cli = &dev->u.cli;
- struct ptlrpc_sec *sec = NULL;
- char str[32];
-
- LASSERT(strcmp(dev->obd_type->typ_name, LUSTRE_OSC_NAME) == 0 ||
- strcmp(dev->obd_type->typ_name, LUSTRE_MDC_NAME) == 0 ||
- strcmp(dev->obd_type->typ_name, LUSTRE_MGC_NAME) == 0);
-
- if (cli->cl_import)
- sec = sptlrpc_import_sec_ref(cli->cl_import);
- if (sec == NULL)
- goto out;
-
- sec_flags2str(sec->ps_flvr.sf_flags, str, sizeof(str));
-
- seq_printf(seq, "rpc flavor: %s\n",
- sptlrpc_flavor2name_base(sec->ps_flvr.sf_rpc));
- seq_printf(seq, "bulk flavor: %s\n",
- sptlrpc_flavor2name_bulk(&sec->ps_flvr, str, sizeof(str)));
- seq_printf(seq, "flags: %s\n",
- sec_flags2str(sec->ps_flvr.sf_flags, str, sizeof(str)));
- seq_printf(seq, "id: %d\n", sec->ps_id);
- seq_printf(seq, "refcount: %d\n",
- atomic_read(&sec->ps_refcount));
- seq_printf(seq, "nctx: %d\n", atomic_read(&sec->ps_nctx));
- seq_printf(seq, "gc internal %ld\n", sec->ps_gc_interval);
- seq_printf(seq, "gc next %lld\n",
- sec->ps_gc_interval ?
- (s64)(sec->ps_gc_next - ktime_get_real_seconds()) : 0ll);
-
- sptlrpc_sec_put(sec);
-out:
- return 0;
-}
-LPROC_SEQ_FOPS_RO(sptlrpc_info_lprocfs);
-
-static int sptlrpc_ctxs_lprocfs_seq_show(struct seq_file *seq, void *v)
-{
- struct obd_device *dev = seq->private;
- struct client_obd *cli = &dev->u.cli;
- struct ptlrpc_sec *sec = NULL;
-
- LASSERT(strcmp(dev->obd_type->typ_name, LUSTRE_OSC_NAME) == 0 ||
- strcmp(dev->obd_type->typ_name, LUSTRE_MDC_NAME) == 0 ||
- strcmp(dev->obd_type->typ_name, LUSTRE_MGC_NAME) == 0);
-
- if (cli->cl_import)
- sec = sptlrpc_import_sec_ref(cli->cl_import);
- if (sec == NULL)
- goto out;
-
- if (sec->ps_policy->sp_cops->display)
- sec->ps_policy->sp_cops->display(sec, seq);
-
- sptlrpc_sec_put(sec);
-out:
- return 0;
-}
-LPROC_SEQ_FOPS_RO(sptlrpc_ctxs_lprocfs);
-
-int sptlrpc_lprocfs_cliobd_attach(struct obd_device *dev)
-{
- int rc;
-
- if (strcmp(dev->obd_type->typ_name, LUSTRE_OSC_NAME) != 0 &&
- strcmp(dev->obd_type->typ_name, LUSTRE_MDC_NAME) != 0 &&
- strcmp(dev->obd_type->typ_name, LUSTRE_MGC_NAME) != 0) {
- CERROR("can't register lproc for obd type %s\n",
- dev->obd_type->typ_name);
- return -EINVAL;
- }
-
- rc = ldebugfs_obd_seq_create(dev, "srpc_info", 0444,
- &sptlrpc_info_lprocfs_fops, dev);
- if (rc) {
- CERROR("create proc entry srpc_info for %s: %d\n",
- dev->obd_name, rc);
- return rc;
- }
-
- rc = ldebugfs_obd_seq_create(dev, "srpc_contexts", 0444,
- &sptlrpc_ctxs_lprocfs_fops, dev);
- if (rc) {
- CERROR("create proc entry srpc_contexts for %s: %d\n",
- dev->obd_name, rc);
- return rc;
- }
-
- return 0;
-}
-EXPORT_SYMBOL(sptlrpc_lprocfs_cliobd_attach);
-
-LPROC_SEQ_FOPS_RO(sptlrpc_proc_enc_pool);
-static struct lprocfs_vars sptlrpc_lprocfs_vars[] = {
- { "encrypt_page_pools", &sptlrpc_proc_enc_pool_fops },
- { NULL }
-};
-
-static struct dentry *sptlrpc_debugfs_dir;
-
-int sptlrpc_lproc_init(void)
-{
- int rc;
-
- LASSERT(sptlrpc_debugfs_dir == NULL);
-
- sptlrpc_debugfs_dir = ldebugfs_register("sptlrpc", debugfs_lustre_root,
- sptlrpc_lprocfs_vars, NULL);
- if (IS_ERR_OR_NULL(sptlrpc_debugfs_dir)) {
- rc = sptlrpc_debugfs_dir ? PTR_ERR(sptlrpc_debugfs_dir)
- : -ENOMEM;
- sptlrpc_debugfs_dir = NULL;
- return rc;
- }
- return 0;
-}
-
-void sptlrpc_lproc_fini(void)
-{
- if (!IS_ERR_OR_NULL(sptlrpc_debugfs_dir))
- ldebugfs_remove(&sptlrpc_debugfs_dir);
-}
diff --git a/drivers/staging/lustre/lustre/ptlrpc/sec_null.c b/drivers/staging/lustre/lustre/ptlrpc/sec_null.c
deleted file mode 100644
index 0f3c693dd217..000000000000
--- a/drivers/staging/lustre/lustre/ptlrpc/sec_null.c
+++ /dev/null
@@ -1,460 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * lustre/ptlrpc/sec_null.c
- *
- * Author: Eric Mei <ericm@clusterfs.com>
- */
-
-#define DEBUG_SUBSYSTEM S_SEC
-
-
-#include "../include/obd_support.h"
-#include "../include/obd_cksum.h"
-#include "../include/obd_class.h"
-#include "../include/lustre_net.h"
-#include "../include/lustre_sec.h"
-
-#include "ptlrpc_internal.h"
-
-static struct ptlrpc_sec_policy null_policy;
-static struct ptlrpc_sec null_sec;
-static struct ptlrpc_cli_ctx null_cli_ctx;
-static struct ptlrpc_svc_ctx null_svc_ctx;
-
-/*
- * we can temporarily use the topmost 8-bits of lm_secflvr to identify
- * the source sec part.
- */
-static inline
-void null_encode_sec_part(struct lustre_msg *msg, enum lustre_sec_part sp)
-{
- msg->lm_secflvr |= (((__u32) sp) & 0xFF) << 24;
-}
-
-static inline
-enum lustre_sec_part null_decode_sec_part(struct lustre_msg *msg)
-{
- return (msg->lm_secflvr >> 24) & 0xFF;
-}
-
-static int null_ctx_refresh(struct ptlrpc_cli_ctx *ctx)
-{
- /* should never reach here */
- LBUG();
- return 0;
-}
-
-static
-int null_ctx_sign(struct ptlrpc_cli_ctx *ctx, struct ptlrpc_request *req)
-{
- req->rq_reqbuf->lm_secflvr = SPTLRPC_FLVR_NULL;
-
- if (!req->rq_import->imp_dlm_fake) {
- struct obd_device *obd = req->rq_import->imp_obd;
- null_encode_sec_part(req->rq_reqbuf,
- obd->u.cli.cl_sp_me);
- }
- req->rq_reqdata_len = req->rq_reqlen;
- return 0;
-}
-
-static
-int null_ctx_verify(struct ptlrpc_cli_ctx *ctx, struct ptlrpc_request *req)
-{
- __u32 cksums, cksumc;
-
- LASSERT(req->rq_repdata);
-
- req->rq_repmsg = req->rq_repdata;
- req->rq_replen = req->rq_repdata_len;
-
- if (req->rq_early) {
- cksums = lustre_msg_get_cksum(req->rq_repdata);
- cksumc = lustre_msg_calc_cksum(req->rq_repmsg);
- if (cksumc != cksums) {
- CDEBUG(D_SEC,
- "early reply checksum mismatch: %08x != %08x\n",
- cksumc, cksums);
- return -EINVAL;
- }
- }
-
- return 0;
-}
-
-static
-struct ptlrpc_sec *null_create_sec(struct obd_import *imp,
- struct ptlrpc_svc_ctx *svc_ctx,
- struct sptlrpc_flavor *sf)
-{
- LASSERT(SPTLRPC_FLVR_POLICY(sf->sf_rpc) == SPTLRPC_POLICY_NULL);
-
- /* general layer has take a module reference for us, because we never
- * really destroy the sec, simply release the reference here.
- */
- sptlrpc_policy_put(&null_policy);
- return &null_sec;
-}
-
-static
-void null_destroy_sec(struct ptlrpc_sec *sec)
-{
- LASSERT(sec == &null_sec);
-}
-
-static
-struct ptlrpc_cli_ctx *null_lookup_ctx(struct ptlrpc_sec *sec,
- struct vfs_cred *vcred,
- int create, int remove_dead)
-{
- atomic_inc(&null_cli_ctx.cc_refcount);
- return &null_cli_ctx;
-}
-
-static
-int null_flush_ctx_cache(struct ptlrpc_sec *sec,
- uid_t uid,
- int grace, int force)
-{
- return 0;
-}
-
-static
-int null_alloc_reqbuf(struct ptlrpc_sec *sec,
- struct ptlrpc_request *req,
- int msgsize)
-{
- if (!req->rq_reqbuf) {
- int alloc_size = size_roundup_power2(msgsize);
-
- LASSERT(!req->rq_pool);
- req->rq_reqbuf = libcfs_kvzalloc(alloc_size, GFP_NOFS);
- if (!req->rq_reqbuf)
- return -ENOMEM;
-
- req->rq_reqbuf_len = alloc_size;
- } else {
- LASSERT(req->rq_pool);
- LASSERT(req->rq_reqbuf_len >= msgsize);
- memset(req->rq_reqbuf, 0, msgsize);
- }
-
- req->rq_reqmsg = req->rq_reqbuf;
- return 0;
-}
-
-static
-void null_free_reqbuf(struct ptlrpc_sec *sec,
- struct ptlrpc_request *req)
-{
- if (!req->rq_pool) {
- LASSERTF(req->rq_reqmsg == req->rq_reqbuf,
- "req %p: reqmsg %p is not reqbuf %p in null sec\n",
- req, req->rq_reqmsg, req->rq_reqbuf);
- LASSERTF(req->rq_reqbuf_len >= req->rq_reqlen,
- "req %p: reqlen %d should smaller than buflen %d\n",
- req, req->rq_reqlen, req->rq_reqbuf_len);
-
- kvfree(req->rq_reqbuf);
- req->rq_reqbuf = NULL;
- req->rq_reqbuf_len = 0;
- }
-}
-
-static
-int null_alloc_repbuf(struct ptlrpc_sec *sec,
- struct ptlrpc_request *req,
- int msgsize)
-{
- /* add space for early replied */
- msgsize += lustre_msg_early_size();
-
- msgsize = size_roundup_power2(msgsize);
-
- req->rq_repbuf = libcfs_kvzalloc(msgsize, GFP_NOFS);
- if (!req->rq_repbuf)
- return -ENOMEM;
-
- req->rq_repbuf_len = msgsize;
- return 0;
-}
-
-static
-void null_free_repbuf(struct ptlrpc_sec *sec,
- struct ptlrpc_request *req)
-{
- LASSERT(req->rq_repbuf);
-
- kvfree(req->rq_repbuf);
- req->rq_repbuf = NULL;
- req->rq_repbuf_len = 0;
-}
-
-static
-int null_enlarge_reqbuf(struct ptlrpc_sec *sec,
- struct ptlrpc_request *req,
- int segment, int newsize)
-{
- struct lustre_msg *newbuf;
- struct lustre_msg *oldbuf = req->rq_reqmsg;
- int oldsize, newmsg_size, alloc_size;
-
- LASSERT(req->rq_reqbuf);
- LASSERT(req->rq_reqbuf == req->rq_reqmsg);
- LASSERT(req->rq_reqbuf_len >= req->rq_reqlen);
- LASSERT(req->rq_reqlen == lustre_packed_msg_size(oldbuf));
-
- /* compute new message size */
- oldsize = req->rq_reqbuf->lm_buflens[segment];
- req->rq_reqbuf->lm_buflens[segment] = newsize;
- newmsg_size = lustre_packed_msg_size(oldbuf);
- req->rq_reqbuf->lm_buflens[segment] = oldsize;
-
- /* request from pool should always have enough buffer */
- LASSERT(!req->rq_pool || req->rq_reqbuf_len >= newmsg_size);
-
- if (req->rq_reqbuf_len < newmsg_size) {
- alloc_size = size_roundup_power2(newmsg_size);
-
- newbuf = libcfs_kvzalloc(alloc_size, GFP_NOFS);
- if (newbuf == NULL)
- return -ENOMEM;
-
- /* Must lock this, so that otherwise unprotected change of
- * rq_reqmsg is not racing with parallel processing of
- * imp_replay_list traversing threads. See LU-3333
- * This is a bandaid at best, we really need to deal with this
- * in request enlarging code before unpacking that's already
- * there */
- if (req->rq_import)
- spin_lock(&req->rq_import->imp_lock);
- memcpy(newbuf, req->rq_reqbuf, req->rq_reqlen);
-
- kvfree(req->rq_reqbuf);
- req->rq_reqbuf = req->rq_reqmsg = newbuf;
- req->rq_reqbuf_len = alloc_size;
-
- if (req->rq_import)
- spin_unlock(&req->rq_import->imp_lock);
- }
-
- _sptlrpc_enlarge_msg_inplace(req->rq_reqmsg, segment, newsize);
- req->rq_reqlen = newmsg_size;
-
- return 0;
-}
-
-static struct ptlrpc_svc_ctx null_svc_ctx = {
- .sc_refcount = ATOMIC_INIT(1),
- .sc_policy = &null_policy,
-};
-
-static
-int null_accept(struct ptlrpc_request *req)
-{
- LASSERT(SPTLRPC_FLVR_POLICY(req->rq_flvr.sf_rpc) ==
- SPTLRPC_POLICY_NULL);
-
- if (req->rq_flvr.sf_rpc != SPTLRPC_FLVR_NULL) {
- CERROR("Invalid rpc flavor 0x%x\n", req->rq_flvr.sf_rpc);
- return SECSVC_DROP;
- }
-
- req->rq_sp_from = null_decode_sec_part(req->rq_reqbuf);
-
- req->rq_reqmsg = req->rq_reqbuf;
- req->rq_reqlen = req->rq_reqdata_len;
-
- req->rq_svc_ctx = &null_svc_ctx;
- atomic_inc(&req->rq_svc_ctx->sc_refcount);
-
- return SECSVC_OK;
-}
-
-static
-int null_alloc_rs(struct ptlrpc_request *req, int msgsize)
-{
- struct ptlrpc_reply_state *rs;
- int rs_size = sizeof(*rs) + msgsize;
-
- LASSERT(msgsize % 8 == 0);
-
- rs = req->rq_reply_state;
-
- if (rs) {
- /* pre-allocated */
- LASSERT(rs->rs_size >= rs_size);
- } else {
- rs = libcfs_kvzalloc(rs_size, GFP_NOFS);
- if (rs == NULL)
- return -ENOMEM;
-
- rs->rs_size = rs_size;
- }
-
- rs->rs_svc_ctx = req->rq_svc_ctx;
- atomic_inc(&req->rq_svc_ctx->sc_refcount);
-
- rs->rs_repbuf = (struct lustre_msg *) (rs + 1);
- rs->rs_repbuf_len = rs_size - sizeof(*rs);
- rs->rs_msg = rs->rs_repbuf;
-
- req->rq_reply_state = rs;
- return 0;
-}
-
-static
-void null_free_rs(struct ptlrpc_reply_state *rs)
-{
- LASSERT_ATOMIC_GT(&rs->rs_svc_ctx->sc_refcount, 1);
- atomic_dec(&rs->rs_svc_ctx->sc_refcount);
-
- if (!rs->rs_prealloc)
- kvfree(rs);
-}
-
-static
-int null_authorize(struct ptlrpc_request *req)
-{
- struct ptlrpc_reply_state *rs = req->rq_reply_state;
-
- LASSERT(rs);
-
- rs->rs_repbuf->lm_secflvr = SPTLRPC_FLVR_NULL;
- rs->rs_repdata_len = req->rq_replen;
-
- if (likely(req->rq_packed_final)) {
- if (lustre_msghdr_get_flags(req->rq_reqmsg) & MSGHDR_AT_SUPPORT)
- req->rq_reply_off = lustre_msg_early_size();
- else
- req->rq_reply_off = 0;
- } else {
- __u32 cksum;
-
- cksum = lustre_msg_calc_cksum(rs->rs_repbuf);
- lustre_msg_set_cksum(rs->rs_repbuf, cksum);
- req->rq_reply_off = 0;
- }
-
- return 0;
-}
-
-static struct ptlrpc_ctx_ops null_ctx_ops = {
- .refresh = null_ctx_refresh,
- .sign = null_ctx_sign,
- .verify = null_ctx_verify,
-};
-
-static struct ptlrpc_sec_cops null_sec_cops = {
- .create_sec = null_create_sec,
- .destroy_sec = null_destroy_sec,
- .lookup_ctx = null_lookup_ctx,
- .flush_ctx_cache = null_flush_ctx_cache,
- .alloc_reqbuf = null_alloc_reqbuf,
- .alloc_repbuf = null_alloc_repbuf,
- .free_reqbuf = null_free_reqbuf,
- .free_repbuf = null_free_repbuf,
- .enlarge_reqbuf = null_enlarge_reqbuf,
-};
-
-static struct ptlrpc_sec_sops null_sec_sops = {
- .accept = null_accept,
- .alloc_rs = null_alloc_rs,
- .authorize = null_authorize,
- .free_rs = null_free_rs,
-};
-
-static struct ptlrpc_sec_policy null_policy = {
- .sp_owner = THIS_MODULE,
- .sp_name = "sec.null",
- .sp_policy = SPTLRPC_POLICY_NULL,
- .sp_cops = &null_sec_cops,
- .sp_sops = &null_sec_sops,
-};
-
-static void null_init_internal(void)
-{
- static HLIST_HEAD(__list);
-
- null_sec.ps_policy = &null_policy;
- atomic_set(&null_sec.ps_refcount, 1); /* always busy */
- null_sec.ps_id = -1;
- null_sec.ps_import = NULL;
- null_sec.ps_flvr.sf_rpc = SPTLRPC_FLVR_NULL;
- null_sec.ps_flvr.sf_flags = 0;
- null_sec.ps_part = LUSTRE_SP_ANY;
- null_sec.ps_dying = 0;
- spin_lock_init(&null_sec.ps_lock);
- atomic_set(&null_sec.ps_nctx, 1); /* for "null_cli_ctx" */
- INIT_LIST_HEAD(&null_sec.ps_gc_list);
- null_sec.ps_gc_interval = 0;
- null_sec.ps_gc_next = 0;
-
- hlist_add_head(&null_cli_ctx.cc_cache, &__list);
- atomic_set(&null_cli_ctx.cc_refcount, 1); /* for hash */
- null_cli_ctx.cc_sec = &null_sec;
- null_cli_ctx.cc_ops = &null_ctx_ops;
- null_cli_ctx.cc_expire = 0;
- null_cli_ctx.cc_flags = PTLRPC_CTX_CACHED | PTLRPC_CTX_ETERNAL |
- PTLRPC_CTX_UPTODATE;
- null_cli_ctx.cc_vcred.vc_uid = 0;
- spin_lock_init(&null_cli_ctx.cc_lock);
- INIT_LIST_HEAD(&null_cli_ctx.cc_req_list);
- INIT_LIST_HEAD(&null_cli_ctx.cc_gc_chain);
-}
-
-int sptlrpc_null_init(void)
-{
- int rc;
-
- null_init_internal();
-
- rc = sptlrpc_register_policy(&null_policy);
- if (rc)
- CERROR("failed to register %s: %d\n", null_policy.sp_name, rc);
-
- return rc;
-}
-
-void sptlrpc_null_fini(void)
-{
- int rc;
-
- rc = sptlrpc_unregister_policy(&null_policy);
- if (rc)
- CERROR("failed to unregister %s: %d\n",
- null_policy.sp_name, rc);
-}
diff --git a/drivers/staging/lustre/lustre/ptlrpc/sec_plain.c b/drivers/staging/lustre/lustre/ptlrpc/sec_plain.c
deleted file mode 100644
index d905b77b758b..000000000000
--- a/drivers/staging/lustre/lustre/ptlrpc/sec_plain.c
+++ /dev/null
@@ -1,1013 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * lustre/ptlrpc/sec_plain.c
- *
- * Author: Eric Mei <ericm@clusterfs.com>
- */
-
-#define DEBUG_SUBSYSTEM S_SEC
-
-
-#include "../include/obd_support.h"
-#include "../include/obd_cksum.h"
-#include "../include/obd_class.h"
-#include "../include/lustre_net.h"
-#include "../include/lustre_sec.h"
-#include "ptlrpc_internal.h"
-
-struct plain_sec {
- struct ptlrpc_sec pls_base;
- rwlock_t pls_lock;
- struct ptlrpc_cli_ctx *pls_ctx;
-};
-
-static inline struct plain_sec *sec2plsec(struct ptlrpc_sec *sec)
-{
- return container_of(sec, struct plain_sec, pls_base);
-}
-
-static struct ptlrpc_sec_policy plain_policy;
-static struct ptlrpc_ctx_ops plain_ctx_ops;
-static struct ptlrpc_svc_ctx plain_svc_ctx;
-
-static unsigned int plain_at_offset;
-
-/*
- * for simplicity, plain policy rpc use fixed layout.
- */
-#define PLAIN_PACK_SEGMENTS (4)
-
-#define PLAIN_PACK_HDR_OFF (0)
-#define PLAIN_PACK_MSG_OFF (1)
-#define PLAIN_PACK_USER_OFF (2)
-#define PLAIN_PACK_BULK_OFF (3)
-
-#define PLAIN_FL_USER (0x01)
-#define PLAIN_FL_BULK (0x02)
-
-struct plain_header {
- __u8 ph_ver; /* 0 */
- __u8 ph_flags;
- __u8 ph_sp; /* source */
- __u8 ph_bulk_hash_alg; /* complete flavor desc */
- __u8 ph_pad[4];
-};
-
-struct plain_bulk_token {
- __u8 pbt_hash[8];
-};
-
-#define PLAIN_BSD_SIZE \
- (sizeof(struct ptlrpc_bulk_sec_desc) + sizeof(struct plain_bulk_token))
-
-/****************************************
- * bulk checksum helpers *
- ****************************************/
-
-static int plain_unpack_bsd(struct lustre_msg *msg, int swabbed)
-{
- struct ptlrpc_bulk_sec_desc *bsd;
-
- if (bulk_sec_desc_unpack(msg, PLAIN_PACK_BULK_OFF, swabbed))
- return -EPROTO;
-
- bsd = lustre_msg_buf(msg, PLAIN_PACK_BULK_OFF, PLAIN_BSD_SIZE);
- if (bsd == NULL) {
- CERROR("bulk sec desc has short size %d\n",
- lustre_msg_buflen(msg, PLAIN_PACK_BULK_OFF));
- return -EPROTO;
- }
-
- if (bsd->bsd_svc != SPTLRPC_BULK_SVC_NULL &&
- bsd->bsd_svc != SPTLRPC_BULK_SVC_INTG) {
- CERROR("invalid bulk svc %u\n", bsd->bsd_svc);
- return -EPROTO;
- }
-
- return 0;
-}
-
-static int plain_generate_bulk_csum(struct ptlrpc_bulk_desc *desc,
- __u8 hash_alg,
- struct plain_bulk_token *token)
-{
- if (hash_alg == BULK_HASH_ALG_NULL)
- return 0;
-
- memset(token->pbt_hash, 0, sizeof(token->pbt_hash));
- return sptlrpc_get_bulk_checksum(desc, hash_alg, token->pbt_hash,
- sizeof(token->pbt_hash));
-}
-
-static int plain_verify_bulk_csum(struct ptlrpc_bulk_desc *desc,
- __u8 hash_alg,
- struct plain_bulk_token *tokenr)
-{
- struct plain_bulk_token tokenv;
- int rc;
-
- if (hash_alg == BULK_HASH_ALG_NULL)
- return 0;
-
- memset(&tokenv.pbt_hash, 0, sizeof(tokenv.pbt_hash));
- rc = sptlrpc_get_bulk_checksum(desc, hash_alg, tokenv.pbt_hash,
- sizeof(tokenv.pbt_hash));
- if (rc)
- return rc;
-
- if (memcmp(tokenr->pbt_hash, tokenv.pbt_hash, sizeof(tokenr->pbt_hash)))
- return -EACCES;
- return 0;
-}
-
-static void corrupt_bulk_data(struct ptlrpc_bulk_desc *desc)
-{
- char *ptr;
- unsigned int off, i;
-
- for (i = 0; i < desc->bd_iov_count; i++) {
- if (desc->bd_iov[i].kiov_len == 0)
- continue;
-
- ptr = kmap(desc->bd_iov[i].kiov_page);
- off = desc->bd_iov[i].kiov_offset & ~CFS_PAGE_MASK;
- ptr[off] ^= 0x1;
- kunmap(desc->bd_iov[i].kiov_page);
- return;
- }
-}
-
-/****************************************
- * cli_ctx apis *
- ****************************************/
-
-static
-int plain_ctx_refresh(struct ptlrpc_cli_ctx *ctx)
-{
- /* should never reach here */
- LBUG();
- return 0;
-}
-
-static
-int plain_ctx_validate(struct ptlrpc_cli_ctx *ctx)
-{
- return 0;
-}
-
-static
-int plain_ctx_sign(struct ptlrpc_cli_ctx *ctx, struct ptlrpc_request *req)
-{
- struct lustre_msg *msg = req->rq_reqbuf;
- struct plain_header *phdr;
-
- msg->lm_secflvr = req->rq_flvr.sf_rpc;
-
- phdr = lustre_msg_buf(msg, PLAIN_PACK_HDR_OFF, 0);
- phdr->ph_ver = 0;
- phdr->ph_flags = 0;
- phdr->ph_sp = ctx->cc_sec->ps_part;
- phdr->ph_bulk_hash_alg = req->rq_flvr.u_bulk.hash.hash_alg;
-
- if (req->rq_pack_udesc)
- phdr->ph_flags |= PLAIN_FL_USER;
- if (req->rq_pack_bulk)
- phdr->ph_flags |= PLAIN_FL_BULK;
-
- req->rq_reqdata_len = lustre_msg_size_v2(msg->lm_bufcount,
- msg->lm_buflens);
- return 0;
-}
-
-static
-int plain_ctx_verify(struct ptlrpc_cli_ctx *ctx, struct ptlrpc_request *req)
-{
- struct lustre_msg *msg = req->rq_repdata;
- struct plain_header *phdr;
- __u32 cksum;
- int swabbed;
-
- if (msg->lm_bufcount != PLAIN_PACK_SEGMENTS) {
- CERROR("unexpected reply buf count %u\n", msg->lm_bufcount);
- return -EPROTO;
- }
-
- swabbed = ptlrpc_rep_need_swab(req);
-
- phdr = lustre_msg_buf(msg, PLAIN_PACK_HDR_OFF, sizeof(*phdr));
- if (phdr == NULL) {
- CERROR("missing plain header\n");
- return -EPROTO;
- }
-
- if (phdr->ph_ver != 0) {
- CERROR("Invalid header version\n");
- return -EPROTO;
- }
-
- /* expect no user desc in reply */
- if (phdr->ph_flags & PLAIN_FL_USER) {
- CERROR("Unexpected udesc flag in reply\n");
- return -EPROTO;
- }
-
- if (phdr->ph_bulk_hash_alg != req->rq_flvr.u_bulk.hash.hash_alg) {
- CERROR("reply bulk flavor %u != %u\n", phdr->ph_bulk_hash_alg,
- req->rq_flvr.u_bulk.hash.hash_alg);
- return -EPROTO;
- }
-
- if (unlikely(req->rq_early)) {
- unsigned int hsize = 4;
-
- cfs_crypto_hash_digest(CFS_HASH_ALG_CRC32,
- lustre_msg_buf(msg, PLAIN_PACK_MSG_OFF, 0),
- lustre_msg_buflen(msg, PLAIN_PACK_MSG_OFF),
- NULL, 0, (unsigned char *)&cksum, &hsize);
- if (cksum != msg->lm_cksum) {
- CDEBUG(D_SEC,
- "early reply checksum mismatch: %08x != %08x\n",
- cpu_to_le32(cksum), msg->lm_cksum);
- return -EINVAL;
- }
- } else {
- /* whether we sent with bulk or not, we expect the same
- * in reply, except for early reply */
- if (!req->rq_early &&
- !equi(req->rq_pack_bulk == 1,
- phdr->ph_flags & PLAIN_FL_BULK)) {
- CERROR("%s bulk checksum in reply\n",
- req->rq_pack_bulk ? "Missing" : "Unexpected");
- return -EPROTO;
- }
-
- if (phdr->ph_flags & PLAIN_FL_BULK) {
- if (plain_unpack_bsd(msg, swabbed))
- return -EPROTO;
- }
- }
-
- req->rq_repmsg = lustre_msg_buf(msg, PLAIN_PACK_MSG_OFF, 0);
- req->rq_replen = lustre_msg_buflen(msg, PLAIN_PACK_MSG_OFF);
- return 0;
-}
-
-static
-int plain_cli_wrap_bulk(struct ptlrpc_cli_ctx *ctx,
- struct ptlrpc_request *req,
- struct ptlrpc_bulk_desc *desc)
-{
- struct ptlrpc_bulk_sec_desc *bsd;
- struct plain_bulk_token *token;
- int rc;
-
- LASSERT(req->rq_pack_bulk);
- LASSERT(req->rq_reqbuf->lm_bufcount == PLAIN_PACK_SEGMENTS);
-
- bsd = lustre_msg_buf(req->rq_reqbuf, PLAIN_PACK_BULK_OFF, 0);
- token = (struct plain_bulk_token *) bsd->bsd_data;
-
- bsd->bsd_version = 0;
- bsd->bsd_flags = 0;
- bsd->bsd_type = SPTLRPC_BULK_DEFAULT;
- bsd->bsd_svc = SPTLRPC_FLVR_BULK_SVC(req->rq_flvr.sf_rpc);
-
- if (bsd->bsd_svc == SPTLRPC_BULK_SVC_NULL)
- return 0;
-
- if (req->rq_bulk_read)
- return 0;
-
- rc = plain_generate_bulk_csum(desc, req->rq_flvr.u_bulk.hash.hash_alg,
- token);
- if (rc) {
- CERROR("bulk write: failed to compute checksum: %d\n", rc);
- } else {
- /*
- * for sending we only compute the wrong checksum instead
- * of corrupting the data so it is still correct on a redo
- */
- if (OBD_FAIL_CHECK(OBD_FAIL_OSC_CHECKSUM_SEND) &&
- req->rq_flvr.u_bulk.hash.hash_alg != BULK_HASH_ALG_NULL)
- token->pbt_hash[0] ^= 0x1;
- }
-
- return rc;
-}
-
-static
-int plain_cli_unwrap_bulk(struct ptlrpc_cli_ctx *ctx,
- struct ptlrpc_request *req,
- struct ptlrpc_bulk_desc *desc)
-{
- struct ptlrpc_bulk_sec_desc *bsdv;
- struct plain_bulk_token *tokenv;
- int rc;
- int i, nob;
-
- LASSERT(req->rq_pack_bulk);
- LASSERT(req->rq_reqbuf->lm_bufcount == PLAIN_PACK_SEGMENTS);
- LASSERT(req->rq_repdata->lm_bufcount == PLAIN_PACK_SEGMENTS);
-
- bsdv = lustre_msg_buf(req->rq_repdata, PLAIN_PACK_BULK_OFF, 0);
- tokenv = (struct plain_bulk_token *) bsdv->bsd_data;
-
- if (req->rq_bulk_write) {
- if (bsdv->bsd_flags & BSD_FL_ERR)
- return -EIO;
- return 0;
- }
-
- /* fix the actual data size */
- for (i = 0, nob = 0; i < desc->bd_iov_count; i++) {
- if (desc->bd_iov[i].kiov_len + nob > desc->bd_nob_transferred) {
- desc->bd_iov[i].kiov_len =
- desc->bd_nob_transferred - nob;
- }
- nob += desc->bd_iov[i].kiov_len;
- }
-
- rc = plain_verify_bulk_csum(desc, req->rq_flvr.u_bulk.hash.hash_alg,
- tokenv);
- if (rc)
- CERROR("bulk read: client verify failed: %d\n", rc);
-
- return rc;
-}
-
-/****************************************
- * sec apis *
- ****************************************/
-
-static
-struct ptlrpc_cli_ctx *plain_sec_install_ctx(struct plain_sec *plsec)
-{
- struct ptlrpc_cli_ctx *ctx, *ctx_new;
-
- ctx_new = kzalloc(sizeof(*ctx_new), GFP_NOFS);
-
- write_lock(&plsec->pls_lock);
-
- ctx = plsec->pls_ctx;
- if (ctx) {
- atomic_inc(&ctx->cc_refcount);
-
- kfree(ctx_new);
- } else if (ctx_new) {
- ctx = ctx_new;
-
- atomic_set(&ctx->cc_refcount, 1); /* for cache */
- ctx->cc_sec = &plsec->pls_base;
- ctx->cc_ops = &plain_ctx_ops;
- ctx->cc_expire = 0;
- ctx->cc_flags = PTLRPC_CTX_CACHED | PTLRPC_CTX_UPTODATE;
- ctx->cc_vcred.vc_uid = 0;
- spin_lock_init(&ctx->cc_lock);
- INIT_LIST_HEAD(&ctx->cc_req_list);
- INIT_LIST_HEAD(&ctx->cc_gc_chain);
-
- plsec->pls_ctx = ctx;
- atomic_inc(&plsec->pls_base.ps_nctx);
- atomic_inc(&plsec->pls_base.ps_refcount);
-
- atomic_inc(&ctx->cc_refcount); /* for caller */
- }
-
- write_unlock(&plsec->pls_lock);
-
- return ctx;
-}
-
-static
-void plain_destroy_sec(struct ptlrpc_sec *sec)
-{
- struct plain_sec *plsec = sec2plsec(sec);
-
- LASSERT(sec->ps_policy == &plain_policy);
- LASSERT(sec->ps_import);
- LASSERT(atomic_read(&sec->ps_refcount) == 0);
- LASSERT(atomic_read(&sec->ps_nctx) == 0);
- LASSERT(plsec->pls_ctx == NULL);
-
- class_import_put(sec->ps_import);
-
- kfree(plsec);
-}
-
-static
-void plain_kill_sec(struct ptlrpc_sec *sec)
-{
- sec->ps_dying = 1;
-}
-
-static
-struct ptlrpc_sec *plain_create_sec(struct obd_import *imp,
- struct ptlrpc_svc_ctx *svc_ctx,
- struct sptlrpc_flavor *sf)
-{
- struct plain_sec *plsec;
- struct ptlrpc_sec *sec;
- struct ptlrpc_cli_ctx *ctx;
-
- LASSERT(SPTLRPC_FLVR_POLICY(sf->sf_rpc) == SPTLRPC_POLICY_PLAIN);
-
- plsec = kzalloc(sizeof(*plsec), GFP_NOFS);
- if (!plsec)
- return NULL;
-
- /*
- * initialize plain_sec
- */
- rwlock_init(&plsec->pls_lock);
- plsec->pls_ctx = NULL;
-
- sec = &plsec->pls_base;
- sec->ps_policy = &plain_policy;
- atomic_set(&sec->ps_refcount, 0);
- atomic_set(&sec->ps_nctx, 0);
- sec->ps_id = sptlrpc_get_next_secid();
- sec->ps_import = class_import_get(imp);
- sec->ps_flvr = *sf;
- spin_lock_init(&sec->ps_lock);
- INIT_LIST_HEAD(&sec->ps_gc_list);
- sec->ps_gc_interval = 0;
- sec->ps_gc_next = 0;
-
- /* install ctx immediately if this is a reverse sec */
- if (svc_ctx) {
- ctx = plain_sec_install_ctx(plsec);
- if (ctx == NULL) {
- plain_destroy_sec(sec);
- return NULL;
- }
- sptlrpc_cli_ctx_put(ctx, 1);
- }
-
- return sec;
-}
-
-static
-struct ptlrpc_cli_ctx *plain_lookup_ctx(struct ptlrpc_sec *sec,
- struct vfs_cred *vcred,
- int create, int remove_dead)
-{
- struct plain_sec *plsec = sec2plsec(sec);
- struct ptlrpc_cli_ctx *ctx;
-
- read_lock(&plsec->pls_lock);
- ctx = plsec->pls_ctx;
- if (ctx)
- atomic_inc(&ctx->cc_refcount);
- read_unlock(&plsec->pls_lock);
-
- if (unlikely(ctx == NULL))
- ctx = plain_sec_install_ctx(plsec);
-
- return ctx;
-}
-
-static
-void plain_release_ctx(struct ptlrpc_sec *sec,
- struct ptlrpc_cli_ctx *ctx, int sync)
-{
- LASSERT(atomic_read(&sec->ps_refcount) > 0);
- LASSERT(atomic_read(&sec->ps_nctx) > 0);
- LASSERT(atomic_read(&ctx->cc_refcount) == 0);
- LASSERT(ctx->cc_sec == sec);
-
- kfree(ctx);
-
- atomic_dec(&sec->ps_nctx);
- sptlrpc_sec_put(sec);
-}
-
-static
-int plain_flush_ctx_cache(struct ptlrpc_sec *sec,
- uid_t uid, int grace, int force)
-{
- struct plain_sec *plsec = sec2plsec(sec);
- struct ptlrpc_cli_ctx *ctx;
-
- /* do nothing unless caller want to flush for 'all' */
- if (uid != -1)
- return 0;
-
- write_lock(&plsec->pls_lock);
- ctx = plsec->pls_ctx;
- plsec->pls_ctx = NULL;
- write_unlock(&plsec->pls_lock);
-
- if (ctx)
- sptlrpc_cli_ctx_put(ctx, 1);
- return 0;
-}
-
-static
-int plain_alloc_reqbuf(struct ptlrpc_sec *sec,
- struct ptlrpc_request *req,
- int msgsize)
-{
- __u32 buflens[PLAIN_PACK_SEGMENTS] = { 0, };
- int alloc_len;
-
- buflens[PLAIN_PACK_HDR_OFF] = sizeof(struct plain_header);
- buflens[PLAIN_PACK_MSG_OFF] = msgsize;
-
- if (req->rq_pack_udesc)
- buflens[PLAIN_PACK_USER_OFF] = sptlrpc_current_user_desc_size();
-
- if (req->rq_pack_bulk) {
- LASSERT(req->rq_bulk_read || req->rq_bulk_write);
- buflens[PLAIN_PACK_BULK_OFF] = PLAIN_BSD_SIZE;
- }
-
- alloc_len = lustre_msg_size_v2(PLAIN_PACK_SEGMENTS, buflens);
-
- if (!req->rq_reqbuf) {
- LASSERT(!req->rq_pool);
-
- alloc_len = size_roundup_power2(alloc_len);
- req->rq_reqbuf = libcfs_kvzalloc(alloc_len, GFP_NOFS);
- if (!req->rq_reqbuf)
- return -ENOMEM;
-
- req->rq_reqbuf_len = alloc_len;
- } else {
- LASSERT(req->rq_pool);
- LASSERT(req->rq_reqbuf_len >= alloc_len);
- memset(req->rq_reqbuf, 0, alloc_len);
- }
-
- lustre_init_msg_v2(req->rq_reqbuf, PLAIN_PACK_SEGMENTS, buflens, NULL);
- req->rq_reqmsg = lustre_msg_buf(req->rq_reqbuf, PLAIN_PACK_MSG_OFF, 0);
-
- if (req->rq_pack_udesc)
- sptlrpc_pack_user_desc(req->rq_reqbuf, PLAIN_PACK_USER_OFF);
-
- return 0;
-}
-
-static
-void plain_free_reqbuf(struct ptlrpc_sec *sec,
- struct ptlrpc_request *req)
-{
- if (!req->rq_pool) {
- kvfree(req->rq_reqbuf);
- req->rq_reqbuf = NULL;
- req->rq_reqbuf_len = 0;
- }
-}
-
-static
-int plain_alloc_repbuf(struct ptlrpc_sec *sec,
- struct ptlrpc_request *req,
- int msgsize)
-{
- __u32 buflens[PLAIN_PACK_SEGMENTS] = { 0, };
- int alloc_len;
-
- buflens[PLAIN_PACK_HDR_OFF] = sizeof(struct plain_header);
- buflens[PLAIN_PACK_MSG_OFF] = msgsize;
-
- if (req->rq_pack_bulk) {
- LASSERT(req->rq_bulk_read || req->rq_bulk_write);
- buflens[PLAIN_PACK_BULK_OFF] = PLAIN_BSD_SIZE;
- }
-
- alloc_len = lustre_msg_size_v2(PLAIN_PACK_SEGMENTS, buflens);
-
- /* add space for early reply */
- alloc_len += plain_at_offset;
-
- alloc_len = size_roundup_power2(alloc_len);
-
- req->rq_repbuf = libcfs_kvzalloc(alloc_len, GFP_NOFS);
- if (!req->rq_repbuf)
- return -ENOMEM;
-
- req->rq_repbuf_len = alloc_len;
- return 0;
-}
-
-static
-void plain_free_repbuf(struct ptlrpc_sec *sec,
- struct ptlrpc_request *req)
-{
- kvfree(req->rq_repbuf);
- req->rq_repbuf = NULL;
- req->rq_repbuf_len = 0;
-}
-
-static
-int plain_enlarge_reqbuf(struct ptlrpc_sec *sec,
- struct ptlrpc_request *req,
- int segment, int newsize)
-{
- struct lustre_msg *newbuf;
- int oldsize;
- int newmsg_size, newbuf_size;
-
- LASSERT(req->rq_reqbuf);
- LASSERT(req->rq_reqbuf_len >= req->rq_reqlen);
- LASSERT(lustre_msg_buf(req->rq_reqbuf, PLAIN_PACK_MSG_OFF, 0) ==
- req->rq_reqmsg);
-
- /* compute new embedded msg size. */
- oldsize = req->rq_reqmsg->lm_buflens[segment];
- req->rq_reqmsg->lm_buflens[segment] = newsize;
- newmsg_size = lustre_msg_size_v2(req->rq_reqmsg->lm_bufcount,
- req->rq_reqmsg->lm_buflens);
- req->rq_reqmsg->lm_buflens[segment] = oldsize;
-
- /* compute new wrapper msg size. */
- oldsize = req->rq_reqbuf->lm_buflens[PLAIN_PACK_MSG_OFF];
- req->rq_reqbuf->lm_buflens[PLAIN_PACK_MSG_OFF] = newmsg_size;
- newbuf_size = lustre_msg_size_v2(req->rq_reqbuf->lm_bufcount,
- req->rq_reqbuf->lm_buflens);
- req->rq_reqbuf->lm_buflens[PLAIN_PACK_MSG_OFF] = oldsize;
-
- /* request from pool should always have enough buffer */
- LASSERT(!req->rq_pool || req->rq_reqbuf_len >= newbuf_size);
-
- if (req->rq_reqbuf_len < newbuf_size) {
- newbuf_size = size_roundup_power2(newbuf_size);
-
- newbuf = libcfs_kvzalloc(newbuf_size, GFP_NOFS);
- if (newbuf == NULL)
- return -ENOMEM;
-
- /* Must lock this, so that otherwise unprotected change of
- * rq_reqmsg is not racing with parallel processing of
- * imp_replay_list traversing threads. See LU-3333
- * This is a bandaid at best, we really need to deal with this
- * in request enlarging code before unpacking that's already
- * there */
- if (req->rq_import)
- spin_lock(&req->rq_import->imp_lock);
-
- memcpy(newbuf, req->rq_reqbuf, req->rq_reqbuf_len);
-
- kvfree(req->rq_reqbuf);
- req->rq_reqbuf = newbuf;
- req->rq_reqbuf_len = newbuf_size;
- req->rq_reqmsg = lustre_msg_buf(req->rq_reqbuf,
- PLAIN_PACK_MSG_OFF, 0);
-
- if (req->rq_import)
- spin_unlock(&req->rq_import->imp_lock);
- }
-
- _sptlrpc_enlarge_msg_inplace(req->rq_reqbuf, PLAIN_PACK_MSG_OFF,
- newmsg_size);
- _sptlrpc_enlarge_msg_inplace(req->rq_reqmsg, segment, newsize);
-
- req->rq_reqlen = newmsg_size;
- return 0;
-}
-
-/****************************************
- * service apis *
- ****************************************/
-
-static struct ptlrpc_svc_ctx plain_svc_ctx = {
- .sc_refcount = ATOMIC_INIT(1),
- .sc_policy = &plain_policy,
-};
-
-static
-int plain_accept(struct ptlrpc_request *req)
-{
- struct lustre_msg *msg = req->rq_reqbuf;
- struct plain_header *phdr;
- int swabbed;
-
- LASSERT(SPTLRPC_FLVR_POLICY(req->rq_flvr.sf_rpc) ==
- SPTLRPC_POLICY_PLAIN);
-
- if (SPTLRPC_FLVR_BASE(req->rq_flvr.sf_rpc) !=
- SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_PLAIN) ||
- SPTLRPC_FLVR_BULK_TYPE(req->rq_flvr.sf_rpc) !=
- SPTLRPC_FLVR_BULK_TYPE(SPTLRPC_FLVR_PLAIN)) {
- CERROR("Invalid rpc flavor %x\n", req->rq_flvr.sf_rpc);
- return SECSVC_DROP;
- }
-
- if (msg->lm_bufcount < PLAIN_PACK_SEGMENTS) {
- CERROR("unexpected request buf count %u\n", msg->lm_bufcount);
- return SECSVC_DROP;
- }
-
- swabbed = ptlrpc_req_need_swab(req);
-
- phdr = lustre_msg_buf(msg, PLAIN_PACK_HDR_OFF, sizeof(*phdr));
- if (phdr == NULL) {
- CERROR("missing plain header\n");
- return -EPROTO;
- }
-
- if (phdr->ph_ver != 0) {
- CERROR("Invalid header version\n");
- return -EPROTO;
- }
-
- if (phdr->ph_bulk_hash_alg >= BULK_HASH_ALG_MAX) {
- CERROR("invalid hash algorithm: %u\n", phdr->ph_bulk_hash_alg);
- return -EPROTO;
- }
-
- req->rq_sp_from = phdr->ph_sp;
- req->rq_flvr.u_bulk.hash.hash_alg = phdr->ph_bulk_hash_alg;
-
- if (phdr->ph_flags & PLAIN_FL_USER) {
- if (sptlrpc_unpack_user_desc(msg, PLAIN_PACK_USER_OFF,
- swabbed)) {
- CERROR("Mal-formed user descriptor\n");
- return SECSVC_DROP;
- }
-
- req->rq_pack_udesc = 1;
- req->rq_user_desc = lustre_msg_buf(msg, PLAIN_PACK_USER_OFF, 0);
- }
-
- if (phdr->ph_flags & PLAIN_FL_BULK) {
- if (plain_unpack_bsd(msg, swabbed))
- return SECSVC_DROP;
-
- req->rq_pack_bulk = 1;
- }
-
- req->rq_reqmsg = lustre_msg_buf(msg, PLAIN_PACK_MSG_OFF, 0);
- req->rq_reqlen = msg->lm_buflens[PLAIN_PACK_MSG_OFF];
-
- req->rq_svc_ctx = &plain_svc_ctx;
- atomic_inc(&req->rq_svc_ctx->sc_refcount);
-
- return SECSVC_OK;
-}
-
-static
-int plain_alloc_rs(struct ptlrpc_request *req, int msgsize)
-{
- struct ptlrpc_reply_state *rs;
- __u32 buflens[PLAIN_PACK_SEGMENTS] = { 0, };
- int rs_size = sizeof(*rs);
-
- LASSERT(msgsize % 8 == 0);
-
- buflens[PLAIN_PACK_HDR_OFF] = sizeof(struct plain_header);
- buflens[PLAIN_PACK_MSG_OFF] = msgsize;
-
- if (req->rq_pack_bulk && (req->rq_bulk_read || req->rq_bulk_write))
- buflens[PLAIN_PACK_BULK_OFF] = PLAIN_BSD_SIZE;
-
- rs_size += lustre_msg_size_v2(PLAIN_PACK_SEGMENTS, buflens);
-
- rs = req->rq_reply_state;
-
- if (rs) {
- /* pre-allocated */
- LASSERT(rs->rs_size >= rs_size);
- } else {
- rs = libcfs_kvzalloc(rs_size, GFP_NOFS);
- if (rs == NULL)
- return -ENOMEM;
-
- rs->rs_size = rs_size;
- }
-
- rs->rs_svc_ctx = req->rq_svc_ctx;
- atomic_inc(&req->rq_svc_ctx->sc_refcount);
- rs->rs_repbuf = (struct lustre_msg *) (rs + 1);
- rs->rs_repbuf_len = rs_size - sizeof(*rs);
-
- lustre_init_msg_v2(rs->rs_repbuf, PLAIN_PACK_SEGMENTS, buflens, NULL);
- rs->rs_msg = lustre_msg_buf_v2(rs->rs_repbuf, PLAIN_PACK_MSG_OFF, 0);
-
- req->rq_reply_state = rs;
- return 0;
-}
-
-static
-void plain_free_rs(struct ptlrpc_reply_state *rs)
-{
- LASSERT(atomic_read(&rs->rs_svc_ctx->sc_refcount) > 1);
- atomic_dec(&rs->rs_svc_ctx->sc_refcount);
-
- if (!rs->rs_prealloc)
- kvfree(rs);
-}
-
-static
-int plain_authorize(struct ptlrpc_request *req)
-{
- struct ptlrpc_reply_state *rs = req->rq_reply_state;
- struct lustre_msg_v2 *msg = rs->rs_repbuf;
- struct plain_header *phdr;
- int len;
-
- LASSERT(rs);
- LASSERT(msg);
-
- if (req->rq_replen != msg->lm_buflens[PLAIN_PACK_MSG_OFF])
- len = lustre_shrink_msg(msg, PLAIN_PACK_MSG_OFF,
- req->rq_replen, 1);
- else
- len = lustre_msg_size_v2(msg->lm_bufcount, msg->lm_buflens);
-
- msg->lm_secflvr = req->rq_flvr.sf_rpc;
-
- phdr = lustre_msg_buf(msg, PLAIN_PACK_HDR_OFF, 0);
- phdr->ph_ver = 0;
- phdr->ph_flags = 0;
- phdr->ph_bulk_hash_alg = req->rq_flvr.u_bulk.hash.hash_alg;
-
- if (req->rq_pack_bulk)
- phdr->ph_flags |= PLAIN_FL_BULK;
-
- rs->rs_repdata_len = len;
-
- if (likely(req->rq_packed_final)) {
- if (lustre_msghdr_get_flags(req->rq_reqmsg) & MSGHDR_AT_SUPPORT)
- req->rq_reply_off = plain_at_offset;
- else
- req->rq_reply_off = 0;
- } else {
- unsigned int hsize = 4;
-
- cfs_crypto_hash_digest(CFS_HASH_ALG_CRC32,
- lustre_msg_buf(msg, PLAIN_PACK_MSG_OFF, 0),
- lustre_msg_buflen(msg, PLAIN_PACK_MSG_OFF),
- NULL, 0, (unsigned char *)&msg->lm_cksum, &hsize);
- req->rq_reply_off = 0;
- }
-
- return 0;
-}
-
-static
-int plain_svc_unwrap_bulk(struct ptlrpc_request *req,
- struct ptlrpc_bulk_desc *desc)
-{
- struct ptlrpc_reply_state *rs = req->rq_reply_state;
- struct ptlrpc_bulk_sec_desc *bsdr, *bsdv;
- struct plain_bulk_token *tokenr;
- int rc;
-
- LASSERT(req->rq_bulk_write);
- LASSERT(req->rq_pack_bulk);
-
- bsdr = lustre_msg_buf(req->rq_reqbuf, PLAIN_PACK_BULK_OFF, 0);
- tokenr = (struct plain_bulk_token *) bsdr->bsd_data;
- bsdv = lustre_msg_buf(rs->rs_repbuf, PLAIN_PACK_BULK_OFF, 0);
-
- bsdv->bsd_version = 0;
- bsdv->bsd_type = SPTLRPC_BULK_DEFAULT;
- bsdv->bsd_svc = bsdr->bsd_svc;
- bsdv->bsd_flags = 0;
-
- if (bsdr->bsd_svc == SPTLRPC_BULK_SVC_NULL)
- return 0;
-
- rc = plain_verify_bulk_csum(desc, req->rq_flvr.u_bulk.hash.hash_alg,
- tokenr);
- if (rc) {
- bsdv->bsd_flags |= BSD_FL_ERR;
- CERROR("bulk write: server verify failed: %d\n", rc);
- }
-
- return rc;
-}
-
-static
-int plain_svc_wrap_bulk(struct ptlrpc_request *req,
- struct ptlrpc_bulk_desc *desc)
-{
- struct ptlrpc_reply_state *rs = req->rq_reply_state;
- struct ptlrpc_bulk_sec_desc *bsdr, *bsdv;
- struct plain_bulk_token *tokenv;
- int rc;
-
- LASSERT(req->rq_bulk_read);
- LASSERT(req->rq_pack_bulk);
-
- bsdr = lustre_msg_buf(req->rq_reqbuf, PLAIN_PACK_BULK_OFF, 0);
- bsdv = lustre_msg_buf(rs->rs_repbuf, PLAIN_PACK_BULK_OFF, 0);
- tokenv = (struct plain_bulk_token *) bsdv->bsd_data;
-
- bsdv->bsd_version = 0;
- bsdv->bsd_type = SPTLRPC_BULK_DEFAULT;
- bsdv->bsd_svc = bsdr->bsd_svc;
- bsdv->bsd_flags = 0;
-
- if (bsdr->bsd_svc == SPTLRPC_BULK_SVC_NULL)
- return 0;
-
- rc = plain_generate_bulk_csum(desc, req->rq_flvr.u_bulk.hash.hash_alg,
- tokenv);
- if (rc) {
- CERROR("bulk read: server failed to compute checksum: %d\n",
- rc);
- } else {
- if (OBD_FAIL_CHECK(OBD_FAIL_OSC_CHECKSUM_RECEIVE))
- corrupt_bulk_data(desc);
- }
-
- return rc;
-}
-
-static struct ptlrpc_ctx_ops plain_ctx_ops = {
- .refresh = plain_ctx_refresh,
- .validate = plain_ctx_validate,
- .sign = plain_ctx_sign,
- .verify = plain_ctx_verify,
- .wrap_bulk = plain_cli_wrap_bulk,
- .unwrap_bulk = plain_cli_unwrap_bulk,
-};
-
-static struct ptlrpc_sec_cops plain_sec_cops = {
- .create_sec = plain_create_sec,
- .destroy_sec = plain_destroy_sec,
- .kill_sec = plain_kill_sec,
- .lookup_ctx = plain_lookup_ctx,
- .release_ctx = plain_release_ctx,
- .flush_ctx_cache = plain_flush_ctx_cache,
- .alloc_reqbuf = plain_alloc_reqbuf,
- .free_reqbuf = plain_free_reqbuf,
- .alloc_repbuf = plain_alloc_repbuf,
- .free_repbuf = plain_free_repbuf,
- .enlarge_reqbuf = plain_enlarge_reqbuf,
-};
-
-static struct ptlrpc_sec_sops plain_sec_sops = {
- .accept = plain_accept,
- .alloc_rs = plain_alloc_rs,
- .authorize = plain_authorize,
- .free_rs = plain_free_rs,
- .unwrap_bulk = plain_svc_unwrap_bulk,
- .wrap_bulk = plain_svc_wrap_bulk,
-};
-
-static struct ptlrpc_sec_policy plain_policy = {
- .sp_owner = THIS_MODULE,
- .sp_name = "plain",
- .sp_policy = SPTLRPC_POLICY_PLAIN,
- .sp_cops = &plain_sec_cops,
- .sp_sops = &plain_sec_sops,
-};
-
-int sptlrpc_plain_init(void)
-{
- __u32 buflens[PLAIN_PACK_SEGMENTS] = { 0, };
- int rc;
-
- buflens[PLAIN_PACK_MSG_OFF] = lustre_msg_early_size();
- plain_at_offset = lustre_msg_size_v2(PLAIN_PACK_SEGMENTS, buflens);
-
- rc = sptlrpc_register_policy(&plain_policy);
- if (rc)
- CERROR("failed to register: %d\n", rc);
-
- return rc;
-}
-
-void sptlrpc_plain_fini(void)
-{
- int rc;
-
- rc = sptlrpc_unregister_policy(&plain_policy);
- if (rc)
- CERROR("cannot unregister: %d\n", rc);
-}
diff --git a/drivers/staging/lustre/lustre/ptlrpc/service.c b/drivers/staging/lustre/lustre/ptlrpc/service.c
deleted file mode 100644
index 353ac22296fa..000000000000
--- a/drivers/staging/lustre/lustre/ptlrpc/service.c
+++ /dev/null
@@ -1,2781 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2010, 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- */
-
-#define DEBUG_SUBSYSTEM S_RPC
-#include "../include/obd_support.h"
-#include "../include/obd_class.h"
-#include "../include/lustre_net.h"
-#include "../include/lu_object.h"
-#include "../../include/linux/lnet/types.h"
-#include "ptlrpc_internal.h"
-
-/* The following are visible and mutable through /sys/module/ptlrpc */
-int test_req_buffer_pressure;
-module_param(test_req_buffer_pressure, int, 0444);
-MODULE_PARM_DESC(test_req_buffer_pressure, "set non-zero to put pressure on request buffer pools");
-module_param(at_min, int, 0644);
-MODULE_PARM_DESC(at_min, "Adaptive timeout minimum (sec)");
-module_param(at_max, int, 0644);
-MODULE_PARM_DESC(at_max, "Adaptive timeout maximum (sec)");
-module_param(at_history, int, 0644);
-MODULE_PARM_DESC(at_history,
- "Adaptive timeouts remember the slowest event that took place within this period (sec)");
-module_param(at_early_margin, int, 0644);
-MODULE_PARM_DESC(at_early_margin, "How soon before an RPC deadline to send an early reply");
-module_param(at_extra, int, 0644);
-MODULE_PARM_DESC(at_extra, "How much extra time to give with each early reply");
-
-
-/* forward ref */
-static int ptlrpc_server_post_idle_rqbds(struct ptlrpc_service_part *svcpt);
-static void ptlrpc_server_hpreq_fini(struct ptlrpc_request *req);
-static void ptlrpc_at_remove_timed(struct ptlrpc_request *req);
-
-/** Holds a list of all PTLRPC services */
-LIST_HEAD(ptlrpc_all_services);
-/** Used to protect the \e ptlrpc_all_services list */
-struct mutex ptlrpc_all_services_mutex;
-
-static struct ptlrpc_request_buffer_desc *
-ptlrpc_alloc_rqbd(struct ptlrpc_service_part *svcpt)
-{
- struct ptlrpc_service *svc = svcpt->scp_service;
- struct ptlrpc_request_buffer_desc *rqbd;
-
- rqbd = kzalloc_node(sizeof(*rqbd), GFP_NOFS,
- cfs_cpt_spread_node(svc->srv_cptable,
- svcpt->scp_cpt));
- if (rqbd == NULL)
- return NULL;
-
- rqbd->rqbd_svcpt = svcpt;
- rqbd->rqbd_refcount = 0;
- rqbd->rqbd_cbid.cbid_fn = request_in_callback;
- rqbd->rqbd_cbid.cbid_arg = rqbd;
- INIT_LIST_HEAD(&rqbd->rqbd_reqs);
- rqbd->rqbd_buffer = libcfs_kvzalloc_cpt(svc->srv_cptable,
- svcpt->scp_cpt,
- svc->srv_buf_size,
- GFP_KERNEL);
- if (rqbd->rqbd_buffer == NULL) {
- kfree(rqbd);
- return NULL;
- }
-
- spin_lock(&svcpt->scp_lock);
- list_add(&rqbd->rqbd_list, &svcpt->scp_rqbd_idle);
- svcpt->scp_nrqbds_total++;
- spin_unlock(&svcpt->scp_lock);
-
- return rqbd;
-}
-
-static void
-ptlrpc_free_rqbd(struct ptlrpc_request_buffer_desc *rqbd)
-{
- struct ptlrpc_service_part *svcpt = rqbd->rqbd_svcpt;
-
- LASSERT(rqbd->rqbd_refcount == 0);
- LASSERT(list_empty(&rqbd->rqbd_reqs));
-
- spin_lock(&svcpt->scp_lock);
- list_del(&rqbd->rqbd_list);
- svcpt->scp_nrqbds_total--;
- spin_unlock(&svcpt->scp_lock);
-
- kvfree(rqbd->rqbd_buffer);
- kfree(rqbd);
-}
-
-static int
-ptlrpc_grow_req_bufs(struct ptlrpc_service_part *svcpt, int post)
-{
- struct ptlrpc_service *svc = svcpt->scp_service;
- struct ptlrpc_request_buffer_desc *rqbd;
- int rc = 0;
- int i;
-
- if (svcpt->scp_rqbd_allocating)
- goto try_post;
-
- spin_lock(&svcpt->scp_lock);
- /* check again with lock */
- if (svcpt->scp_rqbd_allocating) {
- /* NB: we might allow more than one thread in the future */
- LASSERT(svcpt->scp_rqbd_allocating == 1);
- spin_unlock(&svcpt->scp_lock);
- goto try_post;
- }
-
- svcpt->scp_rqbd_allocating++;
- spin_unlock(&svcpt->scp_lock);
-
-
- for (i = 0; i < svc->srv_nbuf_per_group; i++) {
- /* NB: another thread might have recycled enough rqbds, we
- * need to make sure it wouldn't over-allocate, see LU-1212. */
- if (svcpt->scp_nrqbds_posted >= svc->srv_nbuf_per_group)
- break;
-
- rqbd = ptlrpc_alloc_rqbd(svcpt);
-
- if (rqbd == NULL) {
- CERROR("%s: Can't allocate request buffer\n",
- svc->srv_name);
- rc = -ENOMEM;
- break;
- }
- }
-
- spin_lock(&svcpt->scp_lock);
-
- LASSERT(svcpt->scp_rqbd_allocating == 1);
- svcpt->scp_rqbd_allocating--;
-
- spin_unlock(&svcpt->scp_lock);
-
- CDEBUG(D_RPCTRACE,
- "%s: allocate %d new %d-byte reqbufs (%d/%d left), rc = %d\n",
- svc->srv_name, i, svc->srv_buf_size, svcpt->scp_nrqbds_posted,
- svcpt->scp_nrqbds_total, rc);
-
- try_post:
- if (post && rc == 0)
- rc = ptlrpc_server_post_idle_rqbds(svcpt);
-
- return rc;
-}
-
-struct ptlrpc_hr_partition;
-
-struct ptlrpc_hr_thread {
- int hrt_id; /* thread ID */
- spinlock_t hrt_lock;
- wait_queue_head_t hrt_waitq;
- struct list_head hrt_queue; /* RS queue */
- struct ptlrpc_hr_partition *hrt_partition;
-};
-
-struct ptlrpc_hr_partition {
- /* # of started threads */
- atomic_t hrp_nstarted;
- /* # of stopped threads */
- atomic_t hrp_nstopped;
- /* cpu partition id */
- int hrp_cpt;
- /* round-robin rotor for choosing thread */
- int hrp_rotor;
- /* total number of threads on this partition */
- int hrp_nthrs;
- /* threads table */
- struct ptlrpc_hr_thread *hrp_thrs;
-};
-
-#define HRT_RUNNING 0
-#define HRT_STOPPING 1
-
-struct ptlrpc_hr_service {
- /* CPU partition table, it's just cfs_cpt_table for now */
- struct cfs_cpt_table *hr_cpt_table;
- /** controller sleep waitq */
- wait_queue_head_t hr_waitq;
- unsigned int hr_stopping;
- /** roundrobin rotor for non-affinity service */
- unsigned int hr_rotor;
- /* partition data */
- struct ptlrpc_hr_partition **hr_partitions;
-};
-
-/** reply handling service. */
-static struct ptlrpc_hr_service ptlrpc_hr;
-
-/**
- * Choose an hr thread to dispatch requests to.
- */
-static struct ptlrpc_hr_thread *
-ptlrpc_hr_select(struct ptlrpc_service_part *svcpt)
-{
- struct ptlrpc_hr_partition *hrp;
- unsigned int rotor;
-
- if (svcpt->scp_cpt >= 0 &&
- svcpt->scp_service->srv_cptable == ptlrpc_hr.hr_cpt_table) {
- /* directly match partition */
- hrp = ptlrpc_hr.hr_partitions[svcpt->scp_cpt];
-
- } else {
- rotor = ptlrpc_hr.hr_rotor++;
- rotor %= cfs_cpt_number(ptlrpc_hr.hr_cpt_table);
-
- hrp = ptlrpc_hr.hr_partitions[rotor];
- }
-
- rotor = hrp->hrp_rotor++;
- return &hrp->hrp_thrs[rotor % hrp->hrp_nthrs];
-}
-
-/**
- * Put reply state into a queue for processing because we received
- * ACK from the client
- */
-void ptlrpc_dispatch_difficult_reply(struct ptlrpc_reply_state *rs)
-{
- struct ptlrpc_hr_thread *hrt;
-
- LASSERT(list_empty(&rs->rs_list));
-
- hrt = ptlrpc_hr_select(rs->rs_svcpt);
-
- spin_lock(&hrt->hrt_lock);
- list_add_tail(&rs->rs_list, &hrt->hrt_queue);
- spin_unlock(&hrt->hrt_lock);
-
- wake_up(&hrt->hrt_waitq);
-}
-
-void
-ptlrpc_schedule_difficult_reply(struct ptlrpc_reply_state *rs)
-{
- assert_spin_locked(&rs->rs_svcpt->scp_rep_lock);
- assert_spin_locked(&rs->rs_lock);
- LASSERT(rs->rs_difficult);
- rs->rs_scheduled_ever = 1; /* flag any notification attempt */
-
- if (rs->rs_scheduled) { /* being set up or already notified */
- return;
- }
-
- rs->rs_scheduled = 1;
- list_del_init(&rs->rs_list);
- ptlrpc_dispatch_difficult_reply(rs);
-}
-EXPORT_SYMBOL(ptlrpc_schedule_difficult_reply);
-
-static int
-ptlrpc_server_post_idle_rqbds(struct ptlrpc_service_part *svcpt)
-{
- struct ptlrpc_request_buffer_desc *rqbd;
- int rc;
- int posted = 0;
-
- for (;;) {
- spin_lock(&svcpt->scp_lock);
-
- if (list_empty(&svcpt->scp_rqbd_idle)) {
- spin_unlock(&svcpt->scp_lock);
- return posted;
- }
-
- rqbd = list_entry(svcpt->scp_rqbd_idle.next,
- struct ptlrpc_request_buffer_desc,
- rqbd_list);
- list_del(&rqbd->rqbd_list);
-
- /* assume we will post successfully */
- svcpt->scp_nrqbds_posted++;
- list_add(&rqbd->rqbd_list, &svcpt->scp_rqbd_posted);
-
- spin_unlock(&svcpt->scp_lock);
-
- rc = ptlrpc_register_rqbd(rqbd);
- if (rc != 0)
- break;
-
- posted = 1;
- }
-
- spin_lock(&svcpt->scp_lock);
-
- svcpt->scp_nrqbds_posted--;
- list_del(&rqbd->rqbd_list);
- list_add_tail(&rqbd->rqbd_list, &svcpt->scp_rqbd_idle);
-
- /* Don't complain if no request buffers are posted right now; LNET
- * won't drop requests because we set the portal lazy! */
-
- spin_unlock(&svcpt->scp_lock);
-
- return -1;
-}
-
-static void ptlrpc_at_timer(unsigned long castmeharder)
-{
- struct ptlrpc_service_part *svcpt;
-
- svcpt = (struct ptlrpc_service_part *)castmeharder;
-
- svcpt->scp_at_check = 1;
- svcpt->scp_at_checktime = cfs_time_current();
- wake_up(&svcpt->scp_waitq);
-}
-
-static void
-ptlrpc_server_nthreads_check(struct ptlrpc_service *svc,
- struct ptlrpc_service_conf *conf)
-{
- struct ptlrpc_service_thr_conf *tc = &conf->psc_thr;
- unsigned init;
- unsigned total;
- unsigned nthrs;
- int weight;
-
- /*
- * Common code for estimating & validating threads number.
- * CPT affinity service could have percpt thread-pool instead
- * of a global thread-pool, which means user might not always
- * get the threads number they give it in conf::tc_nthrs_user
- * even they did set. It's because we need to validate threads
- * number for each CPT to guarantee each pool will have enough
- * threads to keep the service healthy.
- */
- init = PTLRPC_NTHRS_INIT + (svc->srv_ops.so_hpreq_handler != NULL);
- init = max_t(int, init, tc->tc_nthrs_init);
-
- /* NB: please see comments in lustre_lnet.h for definition
- * details of these members */
- LASSERT(tc->tc_nthrs_max != 0);
-
- if (tc->tc_nthrs_user != 0) {
- /* In case there is a reason to test a service with many
- * threads, we give a less strict check here, it can
- * be up to 8 * nthrs_max */
- total = min(tc->tc_nthrs_max * 8, tc->tc_nthrs_user);
- nthrs = total / svc->srv_ncpts;
- init = max(init, nthrs);
- goto out;
- }
-
- total = tc->tc_nthrs_max;
- if (tc->tc_nthrs_base == 0) {
- /* don't care about base threads number per partition,
- * this is most for non-affinity service */
- nthrs = total / svc->srv_ncpts;
- goto out;
- }
-
- nthrs = tc->tc_nthrs_base;
- if (svc->srv_ncpts == 1) {
- int i;
-
- /* NB: Increase the base number if it's single partition
- * and total number of cores/HTs is larger or equal to 4.
- * result will always < 2 * nthrs_base */
- weight = cfs_cpt_weight(svc->srv_cptable, CFS_CPT_ANY);
- for (i = 1; (weight >> (i + 1)) != 0 && /* >= 4 cores/HTs */
- (tc->tc_nthrs_base >> i) != 0; i++)
- nthrs += tc->tc_nthrs_base >> i;
- }
-
- if (tc->tc_thr_factor != 0) {
- int factor = tc->tc_thr_factor;
- const int fade = 4;
-
- /*
- * User wants to increase number of threads with for
- * each CPU core/HT, most likely the factor is larger then
- * one thread/core because service threads are supposed to
- * be blocked by lock or wait for IO.
- */
- /*
- * Amdahl's law says that adding processors wouldn't give
- * a linear increasing of parallelism, so it's nonsense to
- * have too many threads no matter how many cores/HTs
- * there are.
- */
- /* weight is # of HTs */
- if (cpumask_weight(topology_sibling_cpumask(0)) > 1) {
- /* depress thread factor for hyper-thread */
- factor = factor - (factor >> 1) + (factor >> 3);
- }
-
- weight = cfs_cpt_weight(svc->srv_cptable, 0);
- LASSERT(weight > 0);
-
- for (; factor > 0 && weight > 0; factor--, weight -= fade)
- nthrs += min(weight, fade) * factor;
- }
-
- if (nthrs * svc->srv_ncpts > tc->tc_nthrs_max) {
- nthrs = max(tc->tc_nthrs_base,
- tc->tc_nthrs_max / svc->srv_ncpts);
- }
- out:
- nthrs = max(nthrs, tc->tc_nthrs_init);
- svc->srv_nthrs_cpt_limit = nthrs;
- svc->srv_nthrs_cpt_init = init;
-
- if (nthrs * svc->srv_ncpts > tc->tc_nthrs_max) {
- CDEBUG(D_OTHER, "%s: This service may have more threads (%d) than the given soft limit (%d)\n",
- svc->srv_name, nthrs * svc->srv_ncpts,
- tc->tc_nthrs_max);
- }
-}
-
-/**
- * Initialize percpt data for a service
- */
-static int
-ptlrpc_service_part_init(struct ptlrpc_service *svc,
- struct ptlrpc_service_part *svcpt, int cpt)
-{
- struct ptlrpc_at_array *array;
- int size;
- int index;
- int rc;
-
- svcpt->scp_cpt = cpt;
- INIT_LIST_HEAD(&svcpt->scp_threads);
-
- /* rqbd and incoming request queue */
- spin_lock_init(&svcpt->scp_lock);
- INIT_LIST_HEAD(&svcpt->scp_rqbd_idle);
- INIT_LIST_HEAD(&svcpt->scp_rqbd_posted);
- INIT_LIST_HEAD(&svcpt->scp_req_incoming);
- init_waitqueue_head(&svcpt->scp_waitq);
- /* history request & rqbd list */
- INIT_LIST_HEAD(&svcpt->scp_hist_reqs);
- INIT_LIST_HEAD(&svcpt->scp_hist_rqbds);
-
- /* active requests and hp requests */
- spin_lock_init(&svcpt->scp_req_lock);
-
- /* reply states */
- spin_lock_init(&svcpt->scp_rep_lock);
- INIT_LIST_HEAD(&svcpt->scp_rep_active);
- INIT_LIST_HEAD(&svcpt->scp_rep_idle);
- init_waitqueue_head(&svcpt->scp_rep_waitq);
- atomic_set(&svcpt->scp_nreps_difficult, 0);
-
- /* adaptive timeout */
- spin_lock_init(&svcpt->scp_at_lock);
- array = &svcpt->scp_at_array;
-
- size = at_est2timeout(at_max);
- array->paa_size = size;
- array->paa_count = 0;
- array->paa_deadline = -1;
-
- /* allocate memory for scp_at_array (ptlrpc_at_array) */
- array->paa_reqs_array =
- kzalloc_node(sizeof(struct list_head) * size, GFP_NOFS,
- cfs_cpt_spread_node(svc->srv_cptable, cpt));
- if (array->paa_reqs_array == NULL)
- return -ENOMEM;
-
- for (index = 0; index < size; index++)
- INIT_LIST_HEAD(&array->paa_reqs_array[index]);
-
- array->paa_reqs_count =
- kzalloc_node(sizeof(__u32) * size, GFP_NOFS,
- cfs_cpt_spread_node(svc->srv_cptable, cpt));
- if (array->paa_reqs_count == NULL)
- goto free_reqs_array;
-
- setup_timer(&svcpt->scp_at_timer, ptlrpc_at_timer,
- (unsigned long)svcpt);
-
- /* At SOW, service time should be quick; 10s seems generous. If client
- * timeout is less than this, we'll be sending an early reply. */
- at_init(&svcpt->scp_at_estimate, 10, 0);
-
- /* assign this before call ptlrpc_grow_req_bufs */
- svcpt->scp_service = svc;
- /* Now allocate the request buffers, but don't post them now */
- rc = ptlrpc_grow_req_bufs(svcpt, 0);
- /* We shouldn't be under memory pressure at startup, so
- * fail if we can't allocate all our buffers at this time. */
- if (rc != 0)
- goto free_reqs_count;
-
- return 0;
-
-free_reqs_count:
- kfree(array->paa_reqs_count);
- array->paa_reqs_count = NULL;
-free_reqs_array:
- kfree(array->paa_reqs_array);
- array->paa_reqs_array = NULL;
-
- return -ENOMEM;
-}
-
-/**
- * Initialize service on a given portal.
- * This includes starting serving threads , allocating and posting rqbds and
- * so on.
- */
-struct ptlrpc_service *
-ptlrpc_register_service(struct ptlrpc_service_conf *conf,
- struct kset *parent,
- struct dentry *debugfs_entry)
-{
- struct ptlrpc_service_cpt_conf *cconf = &conf->psc_cpt;
- struct ptlrpc_service *service;
- struct ptlrpc_service_part *svcpt;
- struct cfs_cpt_table *cptable;
- __u32 *cpts = NULL;
- int ncpts;
- int cpt;
- int rc;
- int i;
-
- LASSERT(conf->psc_buf.bc_nbufs > 0);
- LASSERT(conf->psc_buf.bc_buf_size >=
- conf->psc_buf.bc_req_max_size + SPTLRPC_MAX_PAYLOAD);
- LASSERT(conf->psc_thr.tc_ctx_tags != 0);
-
- cptable = cconf->cc_cptable;
- if (cptable == NULL)
- cptable = cfs_cpt_table;
-
- if (!conf->psc_thr.tc_cpu_affinity) {
- ncpts = 1;
- } else {
- ncpts = cfs_cpt_number(cptable);
- if (cconf->cc_pattern != NULL) {
- struct cfs_expr_list *el;
-
- rc = cfs_expr_list_parse(cconf->cc_pattern,
- strlen(cconf->cc_pattern),
- 0, ncpts - 1, &el);
- if (rc != 0) {
- CERROR("%s: invalid CPT pattern string: %s",
- conf->psc_name, cconf->cc_pattern);
- return ERR_PTR(-EINVAL);
- }
-
- rc = cfs_expr_list_values(el, ncpts, &cpts);
- cfs_expr_list_free(el);
- if (rc <= 0) {
- CERROR("%s: failed to parse CPT array %s: %d\n",
- conf->psc_name, cconf->cc_pattern, rc);
- kfree(cpts);
- return ERR_PTR(rc < 0 ? rc : -EINVAL);
- }
- ncpts = rc;
- }
- }
-
- service = kzalloc(offsetof(struct ptlrpc_service, srv_parts[ncpts]),
- GFP_NOFS);
- if (!service) {
- kfree(cpts);
- return ERR_PTR(-ENOMEM);
- }
-
- service->srv_cptable = cptable;
- service->srv_cpts = cpts;
- service->srv_ncpts = ncpts;
-
- service->srv_cpt_bits = 0; /* it's zero already, easy to read... */
- while ((1 << service->srv_cpt_bits) < cfs_cpt_number(cptable))
- service->srv_cpt_bits++;
-
- /* public members */
- spin_lock_init(&service->srv_lock);
- service->srv_name = conf->psc_name;
- service->srv_watchdog_factor = conf->psc_watchdog_factor;
- INIT_LIST_HEAD(&service->srv_list); /* for safety of cleanup */
-
- /* buffer configuration */
- service->srv_nbuf_per_group = test_req_buffer_pressure ?
- 1 : conf->psc_buf.bc_nbufs;
- service->srv_max_req_size = conf->psc_buf.bc_req_max_size +
- SPTLRPC_MAX_PAYLOAD;
- service->srv_buf_size = conf->psc_buf.bc_buf_size;
- service->srv_rep_portal = conf->psc_buf.bc_rep_portal;
- service->srv_req_portal = conf->psc_buf.bc_req_portal;
-
- /* Increase max reply size to next power of two */
- service->srv_max_reply_size = 1;
- while (service->srv_max_reply_size <
- conf->psc_buf.bc_rep_max_size + SPTLRPC_MAX_PAYLOAD)
- service->srv_max_reply_size <<= 1;
-
- service->srv_thread_name = conf->psc_thr.tc_thr_name;
- service->srv_ctx_tags = conf->psc_thr.tc_ctx_tags;
- service->srv_hpreq_ratio = PTLRPC_SVC_HP_RATIO;
- service->srv_ops = conf->psc_ops;
-
- for (i = 0; i < ncpts; i++) {
- if (!conf->psc_thr.tc_cpu_affinity)
- cpt = CFS_CPT_ANY;
- else
- cpt = cpts != NULL ? cpts[i] : i;
-
- svcpt = kzalloc_node(sizeof(*svcpt), GFP_NOFS,
- cfs_cpt_spread_node(cptable, cpt));
- if (svcpt == NULL) {
- rc = -ENOMEM;
- goto failed;
- }
-
- service->srv_parts[i] = svcpt;
- rc = ptlrpc_service_part_init(service, svcpt, cpt);
- if (rc != 0)
- goto failed;
- }
-
- ptlrpc_server_nthreads_check(service, conf);
-
- rc = LNetSetLazyPortal(service->srv_req_portal);
- LASSERT(rc == 0);
-
- mutex_lock(&ptlrpc_all_services_mutex);
- list_add(&service->srv_list, &ptlrpc_all_services);
- mutex_unlock(&ptlrpc_all_services_mutex);
-
- if (parent) {
- rc = ptlrpc_sysfs_register_service(parent, service);
- if (rc)
- goto failed;
- }
-
- if (!IS_ERR_OR_NULL(debugfs_entry))
- ptlrpc_ldebugfs_register_service(debugfs_entry, service);
-
- rc = ptlrpc_service_nrs_setup(service);
- if (rc != 0)
- goto failed;
-
- CDEBUG(D_NET, "%s: Started, listening on portal %d\n",
- service->srv_name, service->srv_req_portal);
-
- rc = ptlrpc_start_threads(service);
- if (rc != 0) {
- CERROR("Failed to start threads for service %s: %d\n",
- service->srv_name, rc);
- goto failed;
- }
-
- return service;
-failed:
- ptlrpc_unregister_service(service);
- return ERR_PTR(rc);
-}
-EXPORT_SYMBOL(ptlrpc_register_service);
-
-/**
- * to actually free the request, must be called without holding svc_lock.
- * note it's caller's responsibility to unlink req->rq_list.
- */
-static void ptlrpc_server_free_request(struct ptlrpc_request *req)
-{
- LASSERT(atomic_read(&req->rq_refcount) == 0);
- LASSERT(list_empty(&req->rq_timed_list));
-
- /* DEBUG_REQ() assumes the reply state of a request with a valid
- * ref will not be destroyed until that reference is dropped. */
- ptlrpc_req_drop_rs(req);
-
- sptlrpc_svc_ctx_decref(req);
-
- if (req != &req->rq_rqbd->rqbd_req) {
- /* NB request buffers use an embedded
- * req if the incoming req unlinked the
- * MD; this isn't one of them! */
- ptlrpc_request_cache_free(req);
- }
-}
-
-/**
- * drop a reference count of the request. if it reaches 0, we either
- * put it into history list, or free it immediately.
- */
-void ptlrpc_server_drop_request(struct ptlrpc_request *req)
-{
- struct ptlrpc_request_buffer_desc *rqbd = req->rq_rqbd;
- struct ptlrpc_service_part *svcpt = rqbd->rqbd_svcpt;
- struct ptlrpc_service *svc = svcpt->scp_service;
- int refcount;
- struct list_head *tmp;
- struct list_head *nxt;
-
- if (!atomic_dec_and_test(&req->rq_refcount))
- return;
-
- if (req->rq_at_linked) {
- spin_lock(&svcpt->scp_at_lock);
- /* recheck with lock, in case it's unlinked by
- * ptlrpc_at_check_timed() */
- if (likely(req->rq_at_linked))
- ptlrpc_at_remove_timed(req);
- spin_unlock(&svcpt->scp_at_lock);
- }
-
- LASSERT(list_empty(&req->rq_timed_list));
-
- /* finalize request */
- if (req->rq_export) {
- class_export_put(req->rq_export);
- req->rq_export = NULL;
- }
-
- spin_lock(&svcpt->scp_lock);
-
- list_add(&req->rq_list, &rqbd->rqbd_reqs);
-
- refcount = --(rqbd->rqbd_refcount);
- if (refcount == 0) {
- /* request buffer is now idle: add to history */
- list_del(&rqbd->rqbd_list);
-
- list_add_tail(&rqbd->rqbd_list, &svcpt->scp_hist_rqbds);
- svcpt->scp_hist_nrqbds++;
-
- /* cull some history?
- * I expect only about 1 or 2 rqbds need to be recycled here */
- while (svcpt->scp_hist_nrqbds > svc->srv_hist_nrqbds_cpt_max) {
- rqbd = list_entry(svcpt->scp_hist_rqbds.next,
- struct ptlrpc_request_buffer_desc,
- rqbd_list);
-
- list_del(&rqbd->rqbd_list);
- svcpt->scp_hist_nrqbds--;
-
- /* remove rqbd's reqs from svc's req history while
- * I've got the service lock */
- list_for_each(tmp, &rqbd->rqbd_reqs) {
- req = list_entry(tmp, struct ptlrpc_request,
- rq_list);
- /* Track the highest culled req seq */
- if (req->rq_history_seq >
- svcpt->scp_hist_seq_culled) {
- svcpt->scp_hist_seq_culled =
- req->rq_history_seq;
- }
- list_del(&req->rq_history_list);
- }
-
- spin_unlock(&svcpt->scp_lock);
-
- list_for_each_safe(tmp, nxt, &rqbd->rqbd_reqs) {
- req = list_entry(rqbd->rqbd_reqs.next,
- struct ptlrpc_request,
- rq_list);
- list_del(&req->rq_list);
- ptlrpc_server_free_request(req);
- }
-
- spin_lock(&svcpt->scp_lock);
- /*
- * now all reqs including the embedded req has been
- * disposed, schedule request buffer for re-use.
- */
- LASSERT(atomic_read(&rqbd->rqbd_req.rq_refcount) ==
- 0);
- list_add_tail(&rqbd->rqbd_list,
- &svcpt->scp_rqbd_idle);
- }
-
- spin_unlock(&svcpt->scp_lock);
- } else if (req->rq_reply_state && req->rq_reply_state->rs_prealloc) {
- /* If we are low on memory, we are not interested in history */
- list_del(&req->rq_list);
- list_del_init(&req->rq_history_list);
-
- /* Track the highest culled req seq */
- if (req->rq_history_seq > svcpt->scp_hist_seq_culled)
- svcpt->scp_hist_seq_culled = req->rq_history_seq;
-
- spin_unlock(&svcpt->scp_lock);
-
- ptlrpc_server_free_request(req);
- } else {
- spin_unlock(&svcpt->scp_lock);
- }
-}
-
-/**
- * to finish a request: stop sending more early replies, and release
- * the request.
- */
-static void ptlrpc_server_finish_request(struct ptlrpc_service_part *svcpt,
- struct ptlrpc_request *req)
-{
- ptlrpc_server_hpreq_fini(req);
-
- ptlrpc_server_drop_request(req);
-}
-
-/**
- * to finish a active request: stop sending more early replies, and release
- * the request. should be called after we finished handling the request.
- */
-static void ptlrpc_server_finish_active_request(
- struct ptlrpc_service_part *svcpt,
- struct ptlrpc_request *req)
-{
- spin_lock(&svcpt->scp_req_lock);
- ptlrpc_nrs_req_stop_nolock(req);
- svcpt->scp_nreqs_active--;
- if (req->rq_hp)
- svcpt->scp_nhreqs_active--;
- spin_unlock(&svcpt->scp_req_lock);
-
- ptlrpc_nrs_req_finalize(req);
-
- if (req->rq_export != NULL)
- class_export_rpc_dec(req->rq_export);
-
- ptlrpc_server_finish_request(svcpt, req);
-}
-
-/**
- * Sanity check request \a req.
- * Return 0 if all is ok, error code otherwise.
- */
-static int ptlrpc_check_req(struct ptlrpc_request *req)
-{
- struct obd_device *obd = req->rq_export->exp_obd;
- int rc = 0;
-
- if (unlikely(lustre_msg_get_conn_cnt(req->rq_reqmsg) <
- req->rq_export->exp_conn_cnt)) {
- DEBUG_REQ(D_RPCTRACE, req,
- "DROPPING req from old connection %d < %d",
- lustre_msg_get_conn_cnt(req->rq_reqmsg),
- req->rq_export->exp_conn_cnt);
- return -EEXIST;
- }
- if (unlikely(obd == NULL || obd->obd_fail)) {
- /*
- * Failing over, don't handle any more reqs, send
- * error response instead.
- */
- CDEBUG(D_RPCTRACE, "Dropping req %p for failed obd %s\n",
- req, (obd != NULL) ? obd->obd_name : "unknown");
- rc = -ENODEV;
- } else if (lustre_msg_get_flags(req->rq_reqmsg) &
- (MSG_REPLAY | MSG_REQ_REPLAY_DONE) &&
- !obd->obd_recovering) {
- DEBUG_REQ(D_ERROR, req,
- "Invalid replay without recovery");
- class_fail_export(req->rq_export);
- rc = -ENODEV;
- } else if (lustre_msg_get_transno(req->rq_reqmsg) != 0 &&
- !obd->obd_recovering) {
- DEBUG_REQ(D_ERROR, req, "Invalid req with transno %llu without recovery",
- lustre_msg_get_transno(req->rq_reqmsg));
- class_fail_export(req->rq_export);
- rc = -ENODEV;
- }
-
- if (unlikely(rc < 0)) {
- req->rq_status = rc;
- ptlrpc_error(req);
- }
- return rc;
-}
-
-static void ptlrpc_at_set_timer(struct ptlrpc_service_part *svcpt)
-{
- struct ptlrpc_at_array *array = &svcpt->scp_at_array;
- __s32 next;
-
- if (array->paa_count == 0) {
- del_timer(&svcpt->scp_at_timer);
- return;
- }
-
- /* Set timer for closest deadline */
- next = (__s32)(array->paa_deadline - ktime_get_real_seconds() -
- at_early_margin);
- if (next <= 0) {
- ptlrpc_at_timer((unsigned long)svcpt);
- } else {
- mod_timer(&svcpt->scp_at_timer, cfs_time_shift(next));
- CDEBUG(D_INFO, "armed %s at %+ds\n",
- svcpt->scp_service->srv_name, next);
- }
-}
-
-/* Add rpc to early reply check list */
-static int ptlrpc_at_add_timed(struct ptlrpc_request *req)
-{
- struct ptlrpc_service_part *svcpt = req->rq_rqbd->rqbd_svcpt;
- struct ptlrpc_at_array *array = &svcpt->scp_at_array;
- struct ptlrpc_request *rq = NULL;
- __u32 index;
-
- if (AT_OFF)
- return 0;
-
- if (req->rq_no_reply)
- return 0;
-
- if ((lustre_msghdr_get_flags(req->rq_reqmsg) & MSGHDR_AT_SUPPORT) == 0)
- return -ENOSYS;
-
- spin_lock(&svcpt->scp_at_lock);
- LASSERT(list_empty(&req->rq_timed_list));
-
- div_u64_rem(req->rq_deadline, array->paa_size, &index);
- if (array->paa_reqs_count[index] > 0) {
- /* latest rpcs will have the latest deadlines in the list,
- * so search backward. */
- list_for_each_entry_reverse(rq,
- &array->paa_reqs_array[index],
- rq_timed_list) {
- if (req->rq_deadline >= rq->rq_deadline) {
- list_add(&req->rq_timed_list,
- &rq->rq_timed_list);
- break;
- }
- }
- }
-
- /* Add the request at the head of the list */
- if (list_empty(&req->rq_timed_list))
- list_add(&req->rq_timed_list,
- &array->paa_reqs_array[index]);
-
- spin_lock(&req->rq_lock);
- req->rq_at_linked = 1;
- spin_unlock(&req->rq_lock);
- req->rq_at_index = index;
- array->paa_reqs_count[index]++;
- array->paa_count++;
- if (array->paa_count == 1 || array->paa_deadline > req->rq_deadline) {
- array->paa_deadline = req->rq_deadline;
- ptlrpc_at_set_timer(svcpt);
- }
- spin_unlock(&svcpt->scp_at_lock);
-
- return 0;
-}
-
-static void
-ptlrpc_at_remove_timed(struct ptlrpc_request *req)
-{
- struct ptlrpc_at_array *array;
-
- array = &req->rq_rqbd->rqbd_svcpt->scp_at_array;
-
- /* NB: must call with hold svcpt::scp_at_lock */
- LASSERT(!list_empty(&req->rq_timed_list));
- list_del_init(&req->rq_timed_list);
-
- spin_lock(&req->rq_lock);
- req->rq_at_linked = 0;
- spin_unlock(&req->rq_lock);
-
- array->paa_reqs_count[req->rq_at_index]--;
- array->paa_count--;
-}
-
-static int ptlrpc_at_send_early_reply(struct ptlrpc_request *req)
-{
- struct ptlrpc_service_part *svcpt = req->rq_rqbd->rqbd_svcpt;
- struct ptlrpc_request *reqcopy;
- struct lustre_msg *reqmsg;
- long olddl = req->rq_deadline - ktime_get_real_seconds();
- time64_t newdl;
- int rc;
-
- /* deadline is when the client expects us to reply, margin is the
- difference between clients' and servers' expectations */
- DEBUG_REQ(D_ADAPTTO, req,
- "%ssending early reply (deadline %+lds, margin %+lds) for %d+%d",
- AT_OFF ? "AT off - not " : "",
- olddl, olddl - at_get(&svcpt->scp_at_estimate),
- at_get(&svcpt->scp_at_estimate), at_extra);
-
- if (AT_OFF)
- return 0;
-
- if (olddl < 0) {
- DEBUG_REQ(D_WARNING, req, "Already past deadline (%+lds), not sending early reply. Consider increasing at_early_margin (%d)?",
- olddl, at_early_margin);
-
- /* Return an error so we're not re-added to the timed list. */
- return -ETIMEDOUT;
- }
-
- if (!(lustre_msghdr_get_flags(req->rq_reqmsg) & MSGHDR_AT_SUPPORT)) {
- DEBUG_REQ(D_INFO, req, "Wanted to ask client for more time, but no AT support");
- return -ENOSYS;
- }
-
- if (req->rq_export &&
- lustre_msg_get_flags(req->rq_reqmsg) &
- (MSG_REPLAY | MSG_REQ_REPLAY_DONE | MSG_LOCK_REPLAY_DONE)) {
- /* During recovery, we don't want to send too many early
- * replies, but on the other hand we want to make sure the
- * client has enough time to resend if the rpc is lost. So
- * during the recovery period send at least 4 early replies,
- * spacing them every at_extra if we can. at_estimate should
- * always equal this fixed value during recovery. */
- at_measured(&svcpt->scp_at_estimate, min(at_extra,
- req->rq_export->exp_obd->obd_recovery_timeout / 4));
- } else {
- /* Fake our processing time into the future to ask the clients
- * for some extra amount of time */
- at_measured(&svcpt->scp_at_estimate, at_extra +
- ktime_get_real_seconds() -
- req->rq_arrival_time.tv_sec);
-
- /* Check to see if we've actually increased the deadline -
- * we may be past adaptive_max */
- if (req->rq_deadline >= req->rq_arrival_time.tv_sec +
- at_get(&svcpt->scp_at_estimate)) {
- DEBUG_REQ(D_WARNING, req, "Couldn't add any time (%ld/%lld), not sending early reply\n",
- olddl, req->rq_arrival_time.tv_sec +
- at_get(&svcpt->scp_at_estimate) -
- ktime_get_real_seconds());
- return -ETIMEDOUT;
- }
- }
- newdl = ktime_get_real_seconds() + at_get(&svcpt->scp_at_estimate);
-
- reqcopy = ptlrpc_request_cache_alloc(GFP_NOFS);
- if (reqcopy == NULL)
- return -ENOMEM;
- reqmsg = libcfs_kvzalloc(req->rq_reqlen, GFP_NOFS);
- if (!reqmsg) {
- rc = -ENOMEM;
- goto out_free;
- }
-
- *reqcopy = *req;
- reqcopy->rq_reply_state = NULL;
- reqcopy->rq_rep_swab_mask = 0;
- reqcopy->rq_pack_bulk = 0;
- reqcopy->rq_pack_udesc = 0;
- reqcopy->rq_packed_final = 0;
- sptlrpc_svc_ctx_addref(reqcopy);
- /* We only need the reqmsg for the magic */
- reqcopy->rq_reqmsg = reqmsg;
- memcpy(reqmsg, req->rq_reqmsg, req->rq_reqlen);
-
- LASSERT(atomic_read(&req->rq_refcount));
- /** if it is last refcount then early reply isn't needed */
- if (atomic_read(&req->rq_refcount) == 1) {
- DEBUG_REQ(D_ADAPTTO, reqcopy, "Normal reply already sent out, abort sending early reply\n");
- rc = -EINVAL;
- goto out;
- }
-
- /* Connection ref */
- reqcopy->rq_export = class_conn2export(
- lustre_msg_get_handle(reqcopy->rq_reqmsg));
- if (reqcopy->rq_export == NULL) {
- rc = -ENODEV;
- goto out;
- }
-
- /* RPC ref */
- class_export_rpc_inc(reqcopy->rq_export);
- if (reqcopy->rq_export->exp_obd &&
- reqcopy->rq_export->exp_obd->obd_fail) {
- rc = -ENODEV;
- goto out_put;
- }
-
- rc = lustre_pack_reply_flags(reqcopy, 1, NULL, NULL, LPRFL_EARLY_REPLY);
- if (rc)
- goto out_put;
-
- rc = ptlrpc_send_reply(reqcopy, PTLRPC_REPLY_EARLY);
-
- if (!rc) {
- /* Adjust our own deadline to what we told the client */
- req->rq_deadline = newdl;
- req->rq_early_count++; /* number sent, server side */
- } else {
- DEBUG_REQ(D_ERROR, req, "Early reply send failed %d", rc);
- }
-
- /* Free the (early) reply state from lustre_pack_reply.
- (ptlrpc_send_reply takes it's own rs ref, so this is safe here) */
- ptlrpc_req_drop_rs(reqcopy);
-
-out_put:
- class_export_rpc_dec(reqcopy->rq_export);
- class_export_put(reqcopy->rq_export);
-out:
- sptlrpc_svc_ctx_decref(reqcopy);
- kvfree(reqmsg);
-out_free:
- ptlrpc_request_cache_free(reqcopy);
- return rc;
-}
-
-/* Send early replies to everybody expiring within at_early_margin
- asking for at_extra time */
-static int ptlrpc_at_check_timed(struct ptlrpc_service_part *svcpt)
-{
- struct ptlrpc_at_array *array = &svcpt->scp_at_array;
- struct ptlrpc_request *rq, *n;
- struct list_head work_list;
- __u32 index, count;
- time64_t deadline;
- time64_t now = ktime_get_real_seconds();
- long delay;
- int first, counter = 0;
-
- spin_lock(&svcpt->scp_at_lock);
- if (svcpt->scp_at_check == 0) {
- spin_unlock(&svcpt->scp_at_lock);
- return 0;
- }
- delay = cfs_time_sub(cfs_time_current(), svcpt->scp_at_checktime);
- svcpt->scp_at_check = 0;
-
- if (array->paa_count == 0) {
- spin_unlock(&svcpt->scp_at_lock);
- return 0;
- }
-
- /* The timer went off, but maybe the nearest rpc already completed. */
- first = array->paa_deadline - now;
- if (first > at_early_margin) {
- /* We've still got plenty of time. Reset the timer. */
- ptlrpc_at_set_timer(svcpt);
- spin_unlock(&svcpt->scp_at_lock);
- return 0;
- }
-
- /* We're close to a timeout, and we don't know how much longer the
- server will take. Send early replies to everyone expiring soon. */
- INIT_LIST_HEAD(&work_list);
- deadline = -1;
- div_u64_rem(array->paa_deadline, array->paa_size, &index);
- count = array->paa_count;
- while (count > 0) {
- count -= array->paa_reqs_count[index];
- list_for_each_entry_safe(rq, n,
- &array->paa_reqs_array[index],
- rq_timed_list) {
- if (rq->rq_deadline > now + at_early_margin) {
- /* update the earliest deadline */
- if (deadline == -1 ||
- rq->rq_deadline < deadline)
- deadline = rq->rq_deadline;
- break;
- }
-
- ptlrpc_at_remove_timed(rq);
- /**
- * ptlrpc_server_drop_request() may drop
- * refcount to 0 already. Let's check this and
- * don't add entry to work_list
- */
- if (likely(atomic_inc_not_zero(&rq->rq_refcount)))
- list_add(&rq->rq_timed_list, &work_list);
- counter++;
- }
-
- if (++index >= array->paa_size)
- index = 0;
- }
- array->paa_deadline = deadline;
- /* we have a new earliest deadline, restart the timer */
- ptlrpc_at_set_timer(svcpt);
-
- spin_unlock(&svcpt->scp_at_lock);
-
- CDEBUG(D_ADAPTTO, "timeout in %+ds, asking for %d secs on %d early replies\n",
- first, at_extra, counter);
- if (first < 0) {
- /* We're already past request deadlines before we even get a
- chance to send early replies */
- LCONSOLE_WARN("%s: This server is not able to keep up with request traffic (cpu-bound).\n",
- svcpt->scp_service->srv_name);
- CWARN("earlyQ=%d reqQ=%d recA=%d, svcEst=%d, delay=%ld(jiff)\n",
- counter, svcpt->scp_nreqs_incoming,
- svcpt->scp_nreqs_active,
- at_get(&svcpt->scp_at_estimate), delay);
- }
-
- /* we took additional refcount so entries can't be deleted from list, no
- * locking is needed */
- while (!list_empty(&work_list)) {
- rq = list_entry(work_list.next, struct ptlrpc_request,
- rq_timed_list);
- list_del_init(&rq->rq_timed_list);
-
- if (ptlrpc_at_send_early_reply(rq) == 0)
- ptlrpc_at_add_timed(rq);
-
- ptlrpc_server_drop_request(rq);
- }
-
- return 1; /* return "did_something" for liblustre */
-}
-
-/**
- * Put the request to the export list if the request may become
- * a high priority one.
- */
-static int ptlrpc_server_hpreq_init(struct ptlrpc_service_part *svcpt,
- struct ptlrpc_request *req)
-{
- int rc = 0;
-
- if (svcpt->scp_service->srv_ops.so_hpreq_handler) {
- rc = svcpt->scp_service->srv_ops.so_hpreq_handler(req);
- if (rc < 0)
- return rc;
- LASSERT(rc == 0);
- }
- if (req->rq_export && req->rq_ops) {
- /* Perform request specific check. We should do this check
- * before the request is added into exp_hp_rpcs list otherwise
- * it may hit swab race at LU-1044. */
- if (req->rq_ops->hpreq_check) {
- rc = req->rq_ops->hpreq_check(req);
- /**
- * XXX: Out of all current
- * ptlrpc_hpreq_ops::hpreq_check(), only
- * ldlm_cancel_hpreq_check() can return an error code;
- * other functions assert in similar places, which seems
- * odd. What also does not seem right is that handlers
- * for those RPCs do not assert on the same checks, but
- * rather handle the error cases. e.g. see
- * ost_rw_hpreq_check(), and ost_brw_read(),
- * ost_brw_write().
- */
- if (rc < 0)
- return rc;
- LASSERT(rc == 0 || rc == 1);
- }
-
- spin_lock_bh(&req->rq_export->exp_rpc_lock);
- list_add(&req->rq_exp_list,
- &req->rq_export->exp_hp_rpcs);
- spin_unlock_bh(&req->rq_export->exp_rpc_lock);
- }
-
- ptlrpc_nrs_req_initialize(svcpt, req, rc);
-
- return rc;
-}
-
-/** Remove the request from the export list. */
-static void ptlrpc_server_hpreq_fini(struct ptlrpc_request *req)
-{
- if (req->rq_export && req->rq_ops) {
- /* refresh lock timeout again so that client has more
- * room to send lock cancel RPC. */
- if (req->rq_ops->hpreq_fini)
- req->rq_ops->hpreq_fini(req);
-
- spin_lock_bh(&req->rq_export->exp_rpc_lock);
- list_del_init(&req->rq_exp_list);
- spin_unlock_bh(&req->rq_export->exp_rpc_lock);
- }
-}
-
-static int ptlrpc_server_request_add(struct ptlrpc_service_part *svcpt,
- struct ptlrpc_request *req)
-{
- int rc;
-
- rc = ptlrpc_server_hpreq_init(svcpt, req);
- if (rc < 0)
- return rc;
-
- ptlrpc_nrs_req_add(svcpt, req, !!rc);
-
- return 0;
-}
-
-/**
- * Allow to handle high priority request
- * User can call it w/o any lock but need to hold
- * ptlrpc_service_part::scp_req_lock to get reliable result
- */
-static bool ptlrpc_server_allow_high(struct ptlrpc_service_part *svcpt,
- bool force)
-{
- int running = svcpt->scp_nthrs_running;
-
- if (!nrs_svcpt_has_hp(svcpt))
- return false;
-
- if (force)
- return true;
-
- if (unlikely(svcpt->scp_service->srv_req_portal == MDS_REQUEST_PORTAL &&
- CFS_FAIL_PRECHECK(OBD_FAIL_PTLRPC_CANCEL_RESEND))) {
- /* leave just 1 thread for normal RPCs */
- running = PTLRPC_NTHRS_INIT;
- if (svcpt->scp_service->srv_ops.so_hpreq_handler != NULL)
- running += 1;
- }
-
- if (svcpt->scp_nreqs_active >= running - 1)
- return false;
-
- if (svcpt->scp_nhreqs_active == 0)
- return true;
-
- return !ptlrpc_nrs_req_pending_nolock(svcpt, false) ||
- svcpt->scp_hreq_count < svcpt->scp_service->srv_hpreq_ratio;
-}
-
-static bool ptlrpc_server_high_pending(struct ptlrpc_service_part *svcpt,
- bool force)
-{
- return ptlrpc_server_allow_high(svcpt, force) &&
- ptlrpc_nrs_req_pending_nolock(svcpt, true);
-}
-
-/**
- * Only allow normal priority requests on a service that has a high-priority
- * queue if forced (i.e. cleanup), if there are other high priority requests
- * already being processed (i.e. those threads can service more high-priority
- * requests), or if there are enough idle threads that a later thread can do
- * a high priority request.
- * User can call it w/o any lock but need to hold
- * ptlrpc_service_part::scp_req_lock to get reliable result
- */
-static bool ptlrpc_server_allow_normal(struct ptlrpc_service_part *svcpt,
- bool force)
-{
- int running = svcpt->scp_nthrs_running;
- if (unlikely(svcpt->scp_service->srv_req_portal == MDS_REQUEST_PORTAL &&
- CFS_FAIL_PRECHECK(OBD_FAIL_PTLRPC_CANCEL_RESEND))) {
- /* leave just 1 thread for normal RPCs */
- running = PTLRPC_NTHRS_INIT;
- if (svcpt->scp_service->srv_ops.so_hpreq_handler != NULL)
- running += 1;
- }
-
- if (force ||
- svcpt->scp_nreqs_active < running - 2)
- return true;
-
- if (svcpt->scp_nreqs_active >= running - 1)
- return false;
-
- return svcpt->scp_nhreqs_active > 0 || !nrs_svcpt_has_hp(svcpt);
-}
-
-static bool ptlrpc_server_normal_pending(struct ptlrpc_service_part *svcpt,
- bool force)
-{
- return ptlrpc_server_allow_normal(svcpt, force) &&
- ptlrpc_nrs_req_pending_nolock(svcpt, false);
-}
-
-/**
- * Returns true if there are requests available in incoming
- * request queue for processing and it is allowed to fetch them.
- * User can call it w/o any lock but need to hold ptlrpc_service::scp_req_lock
- * to get reliable result
- * \see ptlrpc_server_allow_normal
- * \see ptlrpc_server_allow high
- */
-static inline bool
-ptlrpc_server_request_pending(struct ptlrpc_service_part *svcpt, bool force)
-{
- return ptlrpc_server_high_pending(svcpt, force) ||
- ptlrpc_server_normal_pending(svcpt, force);
-}
-
-/**
- * Fetch a request for processing from queue of unprocessed requests.
- * Favors high-priority requests.
- * Returns a pointer to fetched request.
- */
-static struct ptlrpc_request *
-ptlrpc_server_request_get(struct ptlrpc_service_part *svcpt, bool force)
-{
- struct ptlrpc_request *req = NULL;
-
- spin_lock(&svcpt->scp_req_lock);
-
- if (ptlrpc_server_high_pending(svcpt, force)) {
- req = ptlrpc_nrs_req_get_nolock(svcpt, true, force);
- if (req != NULL) {
- svcpt->scp_hreq_count++;
- goto got_request;
- }
- }
-
- if (ptlrpc_server_normal_pending(svcpt, force)) {
- req = ptlrpc_nrs_req_get_nolock(svcpt, false, force);
- if (req != NULL) {
- svcpt->scp_hreq_count = 0;
- goto got_request;
- }
- }
-
- spin_unlock(&svcpt->scp_req_lock);
- return NULL;
-
-got_request:
- svcpt->scp_nreqs_active++;
- if (req->rq_hp)
- svcpt->scp_nhreqs_active++;
-
- spin_unlock(&svcpt->scp_req_lock);
-
- if (likely(req->rq_export))
- class_export_rpc_inc(req->rq_export);
-
- return req;
-}
-
-/**
- * Handle freshly incoming reqs, add to timed early reply list,
- * pass on to regular request queue.
- * All incoming requests pass through here before getting into
- * ptlrpc_server_handle_req later on.
- */
-static int
-ptlrpc_server_handle_req_in(struct ptlrpc_service_part *svcpt,
- struct ptlrpc_thread *thread)
-{
- struct ptlrpc_service *svc = svcpt->scp_service;
- struct ptlrpc_request *req;
- __u32 deadline;
- int rc;
-
- spin_lock(&svcpt->scp_lock);
- if (list_empty(&svcpt->scp_req_incoming)) {
- spin_unlock(&svcpt->scp_lock);
- return 0;
- }
-
- req = list_entry(svcpt->scp_req_incoming.next,
- struct ptlrpc_request, rq_list);
- list_del_init(&req->rq_list);
- svcpt->scp_nreqs_incoming--;
- /* Consider this still a "queued" request as far as stats are
- * concerned */
- spin_unlock(&svcpt->scp_lock);
-
- /* go through security check/transform */
- rc = sptlrpc_svc_unwrap_request(req);
- switch (rc) {
- case SECSVC_OK:
- break;
- case SECSVC_COMPLETE:
- target_send_reply(req, 0, OBD_FAIL_MDS_ALL_REPLY_NET);
- goto err_req;
- case SECSVC_DROP:
- goto err_req;
- default:
- LBUG();
- }
-
- /*
- * for null-flavored rpc, msg has been unpacked by sptlrpc, although
- * redo it wouldn't be harmful.
- */
- if (SPTLRPC_FLVR_POLICY(req->rq_flvr.sf_rpc) != SPTLRPC_POLICY_NULL) {
- rc = ptlrpc_unpack_req_msg(req, req->rq_reqlen);
- if (rc != 0) {
- CERROR("error unpacking request: ptl %d from %s x%llu\n",
- svc->srv_req_portal, libcfs_id2str(req->rq_peer),
- req->rq_xid);
- goto err_req;
- }
- }
-
- rc = lustre_unpack_req_ptlrpc_body(req, MSG_PTLRPC_BODY_OFF);
- if (rc) {
- CERROR("error unpacking ptlrpc body: ptl %d from %s x%llu\n",
- svc->srv_req_portal, libcfs_id2str(req->rq_peer),
- req->rq_xid);
- goto err_req;
- }
-
- if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_DROP_REQ_OPC) &&
- lustre_msg_get_opc(req->rq_reqmsg) == cfs_fail_val) {
- CERROR("drop incoming rpc opc %u, x%llu\n",
- cfs_fail_val, req->rq_xid);
- goto err_req;
- }
-
- rc = -EINVAL;
- if (lustre_msg_get_type(req->rq_reqmsg) != PTL_RPC_MSG_REQUEST) {
- CERROR("wrong packet type received (type=%u) from %s\n",
- lustre_msg_get_type(req->rq_reqmsg),
- libcfs_id2str(req->rq_peer));
- goto err_req;
- }
-
- switch (lustre_msg_get_opc(req->rq_reqmsg)) {
- case MDS_WRITEPAGE:
- case OST_WRITE:
- req->rq_bulk_write = 1;
- break;
- case MDS_READPAGE:
- case OST_READ:
- case MGS_CONFIG_READ:
- req->rq_bulk_read = 1;
- break;
- }
-
- CDEBUG(D_RPCTRACE, "got req x%llu\n", req->rq_xid);
-
- req->rq_export = class_conn2export(
- lustre_msg_get_handle(req->rq_reqmsg));
- if (req->rq_export) {
- rc = ptlrpc_check_req(req);
- if (rc == 0) {
- rc = sptlrpc_target_export_check(req->rq_export, req);
- if (rc)
- DEBUG_REQ(D_ERROR, req, "DROPPING req with illegal security flavor,");
- }
-
- if (rc)
- goto err_req;
- }
-
- /* req_in handling should/must be fast */
- if (ktime_get_real_seconds() - req->rq_arrival_time.tv_sec > 5)
- DEBUG_REQ(D_WARNING, req, "Slow req_in handling "CFS_DURATION_T"s",
- (long)(ktime_get_real_seconds() -
- req->rq_arrival_time.tv_sec));
-
- /* Set rpc server deadline and add it to the timed list */
- deadline = (lustre_msghdr_get_flags(req->rq_reqmsg) &
- MSGHDR_AT_SUPPORT) ?
- /* The max time the client expects us to take */
- lustre_msg_get_timeout(req->rq_reqmsg) : obd_timeout;
- req->rq_deadline = req->rq_arrival_time.tv_sec + deadline;
- if (unlikely(deadline == 0)) {
- DEBUG_REQ(D_ERROR, req, "Dropping request with 0 timeout");
- goto err_req;
- }
-
- req->rq_svc_thread = thread;
-
- ptlrpc_at_add_timed(req);
-
- /* Move it over to the request processing queue */
- rc = ptlrpc_server_request_add(svcpt, req);
- if (rc)
- goto err_req;
-
- wake_up(&svcpt->scp_waitq);
- return 1;
-
-err_req:
- ptlrpc_server_finish_request(svcpt, req);
-
- return 1;
-}
-
-/**
- * Main incoming request handling logic.
- * Calls handler function from service to do actual processing.
- */
-static int
-ptlrpc_server_handle_request(struct ptlrpc_service_part *svcpt,
- struct ptlrpc_thread *thread)
-{
- struct ptlrpc_service *svc = svcpt->scp_service;
- struct ptlrpc_request *request;
- struct timespec64 work_start;
- struct timespec64 work_end;
- struct timespec64 timediff;
- struct timespec64 arrived;
- unsigned long timediff_usecs;
- unsigned long arrived_usecs;
- int rc;
- int fail_opc = 0;
-
- request = ptlrpc_server_request_get(svcpt, false);
- if (request == NULL)
- return 0;
-
- if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_HPREQ_NOTIMEOUT))
- fail_opc = OBD_FAIL_PTLRPC_HPREQ_NOTIMEOUT;
- else if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_HPREQ_TIMEOUT))
- fail_opc = OBD_FAIL_PTLRPC_HPREQ_TIMEOUT;
-
- if (unlikely(fail_opc)) {
- if (request->rq_export && request->rq_ops)
- OBD_FAIL_TIMEOUT(fail_opc, 4);
- }
-
- ptlrpc_rqphase_move(request, RQ_PHASE_INTERPRET);
-
- if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_DUMP_LOG))
- libcfs_debug_dumplog();
-
- ktime_get_real_ts64(&work_start);
- timediff = timespec64_sub(work_start, request->rq_arrival_time);
- timediff_usecs = timediff.tv_sec * USEC_PER_SEC +
- timediff.tv_nsec / NSEC_PER_USEC;
- if (likely(svc->srv_stats != NULL)) {
- lprocfs_counter_add(svc->srv_stats, PTLRPC_REQWAIT_CNTR,
- timediff_usecs);
- lprocfs_counter_add(svc->srv_stats, PTLRPC_REQQDEPTH_CNTR,
- svcpt->scp_nreqs_incoming);
- lprocfs_counter_add(svc->srv_stats, PTLRPC_REQACTIVE_CNTR,
- svcpt->scp_nreqs_active);
- lprocfs_counter_add(svc->srv_stats, PTLRPC_TIMEOUT,
- at_get(&svcpt->scp_at_estimate));
- }
-
- rc = lu_context_init(&request->rq_session, LCT_SESSION | LCT_NOREF);
- if (rc) {
- CERROR("Failure to initialize session: %d\n", rc);
- goto out_req;
- }
- request->rq_session.lc_thread = thread;
- request->rq_session.lc_cookie = 0x5;
- lu_context_enter(&request->rq_session);
-
- CDEBUG(D_NET, "got req %llu\n", request->rq_xid);
-
- request->rq_svc_thread = thread;
- if (thread)
- request->rq_svc_thread->t_env->le_ses = &request->rq_session;
-
- if (likely(request->rq_export)) {
- if (unlikely(ptlrpc_check_req(request)))
- goto put_conn;
- }
-
- /* Discard requests queued for longer than the deadline.
- The deadline is increased if we send an early reply. */
- if (ktime_get_real_seconds() > request->rq_deadline) {
- DEBUG_REQ(D_ERROR, request, "Dropping timed-out request from %s: deadline " CFS_DURATION_T ":" CFS_DURATION_T "s ago\n",
- libcfs_id2str(request->rq_peer),
- (long)(request->rq_deadline -
- request->rq_arrival_time.tv_sec),
- (long)(ktime_get_real_seconds() -
- request->rq_deadline));
- goto put_conn;
- }
-
- CDEBUG(D_RPCTRACE, "Handling RPC pname:cluuid+ref:pid:xid:nid:opc %s:%s+%d:%d:x%llu:%s:%d\n",
- current_comm(),
- (request->rq_export ?
- (char *)request->rq_export->exp_client_uuid.uuid : "0"),
- (request->rq_export ?
- atomic_read(&request->rq_export->exp_refcount) : -99),
- lustre_msg_get_status(request->rq_reqmsg), request->rq_xid,
- libcfs_id2str(request->rq_peer),
- lustre_msg_get_opc(request->rq_reqmsg));
-
- if (lustre_msg_get_opc(request->rq_reqmsg) != OBD_PING)
- CFS_FAIL_TIMEOUT_MS(OBD_FAIL_PTLRPC_PAUSE_REQ, cfs_fail_val);
-
- rc = svc->srv_ops.so_req_handler(request);
-
- ptlrpc_rqphase_move(request, RQ_PHASE_COMPLETE);
-
-put_conn:
- lu_context_exit(&request->rq_session);
- lu_context_fini(&request->rq_session);
-
- if (unlikely(ktime_get_real_seconds() > request->rq_deadline)) {
- DEBUG_REQ(D_WARNING, request,
- "Request took longer than estimated (%lld:%llds); "
- "client may timeout.",
- (s64)request->rq_deadline -
- request->rq_arrival_time.tv_sec,
- (s64)ktime_get_real_seconds() - request->rq_deadline);
- }
-
- ktime_get_real_ts64(&work_end);
- timediff = timespec64_sub(work_end, work_start);
- timediff_usecs = timediff.tv_sec * USEC_PER_SEC +
- timediff.tv_nsec / NSEC_PER_USEC;
- arrived = timespec64_sub(work_end, request->rq_arrival_time);
- arrived_usecs = arrived.tv_sec * USEC_PER_SEC +
- arrived.tv_nsec / NSEC_PER_USEC;
- CDEBUG(D_RPCTRACE, "Handled RPC pname:cluuid+ref:pid:xid:nid:opc %s:%s+%d:%d:x%llu:%s:%d Request processed in %ldus (%ldus total) trans %llu rc %d/%d\n",
- current_comm(),
- (request->rq_export ?
- (char *)request->rq_export->exp_client_uuid.uuid : "0"),
- (request->rq_export ?
- atomic_read(&request->rq_export->exp_refcount) : -99),
- lustre_msg_get_status(request->rq_reqmsg),
- request->rq_xid,
- libcfs_id2str(request->rq_peer),
- lustre_msg_get_opc(request->rq_reqmsg),
- timediff_usecs,
- arrived_usecs,
- (request->rq_repmsg ?
- lustre_msg_get_transno(request->rq_repmsg) :
- request->rq_transno),
- request->rq_status,
- (request->rq_repmsg ?
- lustre_msg_get_status(request->rq_repmsg) : -999));
- if (likely(svc->srv_stats != NULL && request->rq_reqmsg != NULL)) {
- __u32 op = lustre_msg_get_opc(request->rq_reqmsg);
- int opc = opcode_offset(op);
- if (opc > 0 && !(op == LDLM_ENQUEUE || op == MDS_REINT)) {
- LASSERT(opc < LUSTRE_MAX_OPCODES);
- lprocfs_counter_add(svc->srv_stats,
- opc + EXTRA_MAX_OPCODES,
- timediff_usecs);
- }
- }
- if (unlikely(request->rq_early_count)) {
- DEBUG_REQ(D_ADAPTTO, request,
- "sent %d early replies before finishing in %llds",
- request->rq_early_count,
- (s64)work_end.tv_sec -
- request->rq_arrival_time.tv_sec);
- }
-
-out_req:
- ptlrpc_server_finish_active_request(svcpt, request);
-
- return 1;
-}
-
-/**
- * An internal function to process a single reply state object.
- */
-static int
-ptlrpc_handle_rs(struct ptlrpc_reply_state *rs)
-{
- struct ptlrpc_service_part *svcpt = rs->rs_svcpt;
- struct ptlrpc_service *svc = svcpt->scp_service;
- struct obd_export *exp;
- int nlocks;
- int been_handled;
-
- exp = rs->rs_export;
-
- LASSERT(rs->rs_difficult);
- LASSERT(rs->rs_scheduled);
- LASSERT(list_empty(&rs->rs_list));
-
- spin_lock(&exp->exp_lock);
- /* Noop if removed already */
- list_del_init(&rs->rs_exp_list);
- spin_unlock(&exp->exp_lock);
-
- /* The disk commit callback holds exp_uncommitted_replies_lock while it
- * iterates over newly committed replies, removing them from
- * exp_uncommitted_replies. It then drops this lock and schedules the
- * replies it found for handling here.
- *
- * We can avoid contention for exp_uncommitted_replies_lock between the
- * HRT threads and further commit callbacks by checking rs_committed
- * which is set in the commit callback while it holds both
- * rs_lock and exp_uncommitted_reples.
- *
- * If we see rs_committed clear, the commit callback _may_ not have
- * handled this reply yet and we race with it to grab
- * exp_uncommitted_replies_lock before removing the reply from
- * exp_uncommitted_replies. Note that if we lose the race and the
- * reply has already been removed, list_del_init() is a noop.
- *
- * If we see rs_committed set, we know the commit callback is handling,
- * or has handled this reply since store reordering might allow us to
- * see rs_committed set out of sequence. But since this is done
- * holding rs_lock, we can be sure it has all completed once we hold
- * rs_lock, which we do right next.
- */
- if (!rs->rs_committed) {
- spin_lock(&exp->exp_uncommitted_replies_lock);
- list_del_init(&rs->rs_obd_list);
- spin_unlock(&exp->exp_uncommitted_replies_lock);
- }
-
- spin_lock(&rs->rs_lock);
-
- been_handled = rs->rs_handled;
- rs->rs_handled = 1;
-
- nlocks = rs->rs_nlocks; /* atomic "steal", but */
- rs->rs_nlocks = 0; /* locks still on rs_locks! */
-
- if (nlocks == 0 && !been_handled) {
- /* If we see this, we should already have seen the warning
- * in mds_steal_ack_locks() */
- CDEBUG(D_HA, "All locks stolen from rs %p x%lld.t%lld o%d NID %s\n",
- rs,
- rs->rs_xid, rs->rs_transno, rs->rs_opc,
- libcfs_nid2str(exp->exp_connection->c_peer.nid));
- }
-
- if ((!been_handled && rs->rs_on_net) || nlocks > 0) {
- spin_unlock(&rs->rs_lock);
-
- if (!been_handled && rs->rs_on_net) {
- LNetMDUnlink(rs->rs_md_h);
- /* Ignore return code; we're racing with completion */
- }
-
- while (nlocks-- > 0)
- ldlm_lock_decref(&rs->rs_locks[nlocks],
- rs->rs_modes[nlocks]);
-
- spin_lock(&rs->rs_lock);
- }
-
- rs->rs_scheduled = 0;
-
- if (!rs->rs_on_net) {
- /* Off the net */
- spin_unlock(&rs->rs_lock);
-
- class_export_put(exp);
- rs->rs_export = NULL;
- ptlrpc_rs_decref(rs);
- if (atomic_dec_and_test(&svcpt->scp_nreps_difficult) &&
- svc->srv_is_stopping)
- wake_up_all(&svcpt->scp_waitq);
- return 1;
- }
-
- /* still on the net; callback will schedule */
- spin_unlock(&rs->rs_lock);
- return 1;
-}
-
-
-static void
-ptlrpc_check_rqbd_pool(struct ptlrpc_service_part *svcpt)
-{
- int avail = svcpt->scp_nrqbds_posted;
- int low_water = test_req_buffer_pressure ? 0 :
- svcpt->scp_service->srv_nbuf_per_group / 2;
-
- /* NB I'm not locking; just looking. */
-
- /* CAVEAT EMPTOR: We might be allocating buffers here because we've
- * allowed the request history to grow out of control. We could put a
- * sanity check on that here and cull some history if we need the
- * space. */
-
- if (avail <= low_water)
- ptlrpc_grow_req_bufs(svcpt, 1);
-
- if (svcpt->scp_service->srv_stats) {
- lprocfs_counter_add(svcpt->scp_service->srv_stats,
- PTLRPC_REQBUF_AVAIL_CNTR, avail);
- }
-}
-
-static int
-ptlrpc_retry_rqbds(void *arg)
-{
- struct ptlrpc_service_part *svcpt = (struct ptlrpc_service_part *)arg;
-
- svcpt->scp_rqbd_timeout = 0;
- return -ETIMEDOUT;
-}
-
-static inline int
-ptlrpc_threads_enough(struct ptlrpc_service_part *svcpt)
-{
- return svcpt->scp_nreqs_active <
- svcpt->scp_nthrs_running - 1 -
- (svcpt->scp_service->srv_ops.so_hpreq_handler != NULL);
-}
-
-/**
- * allowed to create more threads
- * user can call it w/o any lock but need to hold
- * ptlrpc_service_part::scp_lock to get reliable result
- */
-static inline int
-ptlrpc_threads_increasable(struct ptlrpc_service_part *svcpt)
-{
- return svcpt->scp_nthrs_running +
- svcpt->scp_nthrs_starting <
- svcpt->scp_service->srv_nthrs_cpt_limit;
-}
-
-/**
- * too many requests and allowed to create more threads
- */
-static inline int
-ptlrpc_threads_need_create(struct ptlrpc_service_part *svcpt)
-{
- return !ptlrpc_threads_enough(svcpt) &&
- ptlrpc_threads_increasable(svcpt);
-}
-
-static inline int
-ptlrpc_thread_stopping(struct ptlrpc_thread *thread)
-{
- return thread_is_stopping(thread) ||
- thread->t_svcpt->scp_service->srv_is_stopping;
-}
-
-static inline int
-ptlrpc_rqbd_pending(struct ptlrpc_service_part *svcpt)
-{
- return !list_empty(&svcpt->scp_rqbd_idle) &&
- svcpt->scp_rqbd_timeout == 0;
-}
-
-static inline int
-ptlrpc_at_check(struct ptlrpc_service_part *svcpt)
-{
- return svcpt->scp_at_check;
-}
-
-/**
- * requests wait on preprocessing
- * user can call it w/o any lock but need to hold
- * ptlrpc_service_part::scp_lock to get reliable result
- */
-static inline int
-ptlrpc_server_request_incoming(struct ptlrpc_service_part *svcpt)
-{
- return !list_empty(&svcpt->scp_req_incoming);
-}
-
-static __attribute__((__noinline__)) int
-ptlrpc_wait_event(struct ptlrpc_service_part *svcpt,
- struct ptlrpc_thread *thread)
-{
- /* Don't exit while there are replies to be handled */
- struct l_wait_info lwi = LWI_TIMEOUT(svcpt->scp_rqbd_timeout,
- ptlrpc_retry_rqbds, svcpt);
-
- /* XXX: Add this back when libcfs watchdog is merged upstream
- lc_watchdog_disable(thread->t_watchdog);
- */
-
- cond_resched();
-
- l_wait_event_exclusive_head(svcpt->scp_waitq,
- ptlrpc_thread_stopping(thread) ||
- ptlrpc_server_request_incoming(svcpt) ||
- ptlrpc_server_request_pending(svcpt, false) ||
- ptlrpc_rqbd_pending(svcpt) ||
- ptlrpc_at_check(svcpt), &lwi);
-
- if (ptlrpc_thread_stopping(thread))
- return -EINTR;
-
- /*
- lc_watchdog_touch(thread->t_watchdog,
- ptlrpc_server_get_timeout(svcpt));
- */
- return 0;
-}
-
-/**
- * Main thread body for service threads.
- * Waits in a loop waiting for new requests to process to appear.
- * Every time an incoming requests is added to its queue, a waitq
- * is woken up and one of the threads will handle it.
- */
-static int ptlrpc_main(void *arg)
-{
- struct ptlrpc_thread *thread = (struct ptlrpc_thread *)arg;
- struct ptlrpc_service_part *svcpt = thread->t_svcpt;
- struct ptlrpc_service *svc = svcpt->scp_service;
- struct ptlrpc_reply_state *rs;
- struct group_info *ginfo = NULL;
- struct lu_env *env;
- int counter = 0, rc = 0;
-
- thread->t_pid = current_pid();
- unshare_fs_struct();
-
- /* NB: we will call cfs_cpt_bind() for all threads, because we
- * might want to run lustre server only on a subset of system CPUs,
- * in that case ->scp_cpt is CFS_CPT_ANY */
- rc = cfs_cpt_bind(svc->srv_cptable, svcpt->scp_cpt);
- if (rc != 0) {
- CWARN("%s: failed to bind %s on CPT %d\n",
- svc->srv_name, thread->t_name, svcpt->scp_cpt);
- }
-
- ginfo = groups_alloc(0);
- if (!ginfo) {
- rc = -ENOMEM;
- goto out;
- }
-
- set_current_groups(ginfo);
- put_group_info(ginfo);
-
- if (svc->srv_ops.so_thr_init != NULL) {
- rc = svc->srv_ops.so_thr_init(thread);
- if (rc)
- goto out;
- }
-
- env = kzalloc(sizeof(*env), GFP_NOFS);
- if (!env) {
- rc = -ENOMEM;
- goto out_srv_fini;
- }
-
- rc = lu_context_init(&env->le_ctx,
- svc->srv_ctx_tags|LCT_REMEMBER|LCT_NOREF);
- if (rc)
- goto out_srv_fini;
-
- thread->t_env = env;
- env->le_ctx.lc_thread = thread;
- env->le_ctx.lc_cookie = 0x6;
-
- while (!list_empty(&svcpt->scp_rqbd_idle)) {
- rc = ptlrpc_server_post_idle_rqbds(svcpt);
- if (rc >= 0)
- continue;
-
- CERROR("Failed to post rqbd for %s on CPT %d: %d\n",
- svc->srv_name, svcpt->scp_cpt, rc);
- goto out_srv_fini;
- }
-
- /* Alloc reply state structure for this one */
- rs = libcfs_kvzalloc(svc->srv_max_reply_size, GFP_NOFS);
- if (!rs) {
- rc = -ENOMEM;
- goto out_srv_fini;
- }
-
- spin_lock(&svcpt->scp_lock);
-
- LASSERT(thread_is_starting(thread));
- thread_clear_flags(thread, SVC_STARTING);
-
- LASSERT(svcpt->scp_nthrs_starting == 1);
- svcpt->scp_nthrs_starting--;
-
- /* SVC_STOPPING may already be set here if someone else is trying
- * to stop the service while this new thread has been dynamically
- * forked. We still set SVC_RUNNING to let our creator know that
- * we are now running, however we will exit as soon as possible */
- thread_add_flags(thread, SVC_RUNNING);
- svcpt->scp_nthrs_running++;
- spin_unlock(&svcpt->scp_lock);
-
- /* wake up our creator in case he's still waiting. */
- wake_up(&thread->t_ctl_waitq);
-
- /*
- thread->t_watchdog = lc_watchdog_add(ptlrpc_server_get_timeout(svcpt),
- NULL, NULL);
- */
-
- spin_lock(&svcpt->scp_rep_lock);
- list_add(&rs->rs_list, &svcpt->scp_rep_idle);
- wake_up(&svcpt->scp_rep_waitq);
- spin_unlock(&svcpt->scp_rep_lock);
-
- CDEBUG(D_NET, "service thread %d (#%d) started\n", thread->t_id,
- svcpt->scp_nthrs_running);
-
- /* XXX maintain a list of all managed devices: insert here */
- while (!ptlrpc_thread_stopping(thread)) {
- if (ptlrpc_wait_event(svcpt, thread))
- break;
-
- ptlrpc_check_rqbd_pool(svcpt);
-
- if (ptlrpc_threads_need_create(svcpt)) {
- /* Ignore return code - we tried... */
- ptlrpc_start_thread(svcpt, 0);
- }
-
- /* Process all incoming reqs before handling any */
- if (ptlrpc_server_request_incoming(svcpt)) {
- lu_context_enter(&env->le_ctx);
- env->le_ses = NULL;
- ptlrpc_server_handle_req_in(svcpt, thread);
- lu_context_exit(&env->le_ctx);
-
- /* but limit ourselves in case of flood */
- if (counter++ < 100)
- continue;
- counter = 0;
- }
-
- if (ptlrpc_at_check(svcpt))
- ptlrpc_at_check_timed(svcpt);
-
- if (ptlrpc_server_request_pending(svcpt, false)) {
- lu_context_enter(&env->le_ctx);
- ptlrpc_server_handle_request(svcpt, thread);
- lu_context_exit(&env->le_ctx);
- }
-
- if (ptlrpc_rqbd_pending(svcpt) &&
- ptlrpc_server_post_idle_rqbds(svcpt) < 0) {
- /* I just failed to repost request buffers.
- * Wait for a timeout (unless something else
- * happens) before I try again */
- svcpt->scp_rqbd_timeout = cfs_time_seconds(1) / 10;
- CDEBUG(D_RPCTRACE, "Posted buffers: %d\n",
- svcpt->scp_nrqbds_posted);
- }
- }
-
- /*
- lc_watchdog_delete(thread->t_watchdog);
- thread->t_watchdog = NULL;
- */
-
-out_srv_fini:
- /*
- * deconstruct service specific state created by ptlrpc_start_thread()
- */
- if (svc->srv_ops.so_thr_done != NULL)
- svc->srv_ops.so_thr_done(thread);
-
- if (env != NULL) {
- lu_context_fini(&env->le_ctx);
- kfree(env);
- }
-out:
- CDEBUG(D_RPCTRACE, "service thread [ %p : %u ] %d exiting: rc %d\n",
- thread, thread->t_pid, thread->t_id, rc);
-
- spin_lock(&svcpt->scp_lock);
- if (thread_test_and_clear_flags(thread, SVC_STARTING))
- svcpt->scp_nthrs_starting--;
-
- if (thread_test_and_clear_flags(thread, SVC_RUNNING)) {
- /* must know immediately */
- svcpt->scp_nthrs_running--;
- }
-
- thread->t_id = rc;
- thread_add_flags(thread, SVC_STOPPED);
-
- wake_up(&thread->t_ctl_waitq);
- spin_unlock(&svcpt->scp_lock);
-
- return rc;
-}
-
-static int hrt_dont_sleep(struct ptlrpc_hr_thread *hrt,
- struct list_head *replies)
-{
- int result;
-
- spin_lock(&hrt->hrt_lock);
-
- list_splice_init(&hrt->hrt_queue, replies);
- result = ptlrpc_hr.hr_stopping || !list_empty(replies);
-
- spin_unlock(&hrt->hrt_lock);
- return result;
-}
-
-/**
- * Main body of "handle reply" function.
- * It processes acked reply states
- */
-static int ptlrpc_hr_main(void *arg)
-{
- struct ptlrpc_hr_thread *hrt = (struct ptlrpc_hr_thread *)arg;
- struct ptlrpc_hr_partition *hrp = hrt->hrt_partition;
- LIST_HEAD (replies);
- char threadname[20];
- int rc;
-
- snprintf(threadname, sizeof(threadname), "ptlrpc_hr%02d_%03d",
- hrp->hrp_cpt, hrt->hrt_id);
- unshare_fs_struct();
-
- rc = cfs_cpt_bind(ptlrpc_hr.hr_cpt_table, hrp->hrp_cpt);
- if (rc != 0) {
- CWARN("Failed to bind %s on CPT %d of CPT table %p: rc = %d\n",
- threadname, hrp->hrp_cpt, ptlrpc_hr.hr_cpt_table, rc);
- }
-
- atomic_inc(&hrp->hrp_nstarted);
- wake_up(&ptlrpc_hr.hr_waitq);
-
- while (!ptlrpc_hr.hr_stopping) {
- l_wait_condition(hrt->hrt_waitq, hrt_dont_sleep(hrt, &replies));
-
- while (!list_empty(&replies)) {
- struct ptlrpc_reply_state *rs;
-
- rs = list_entry(replies.prev,
- struct ptlrpc_reply_state,
- rs_list);
- list_del_init(&rs->rs_list);
- ptlrpc_handle_rs(rs);
- }
- }
-
- atomic_inc(&hrp->hrp_nstopped);
- wake_up(&ptlrpc_hr.hr_waitq);
-
- return 0;
-}
-
-static void ptlrpc_stop_hr_threads(void)
-{
- struct ptlrpc_hr_partition *hrp;
- int i;
- int j;
-
- ptlrpc_hr.hr_stopping = 1;
-
- cfs_percpt_for_each(hrp, i, ptlrpc_hr.hr_partitions) {
- if (hrp->hrp_thrs == NULL)
- continue; /* uninitialized */
- for (j = 0; j < hrp->hrp_nthrs; j++)
- wake_up_all(&hrp->hrp_thrs[j].hrt_waitq);
- }
-
- cfs_percpt_for_each(hrp, i, ptlrpc_hr.hr_partitions) {
- if (hrp->hrp_thrs == NULL)
- continue; /* uninitialized */
- wait_event(ptlrpc_hr.hr_waitq,
- atomic_read(&hrp->hrp_nstopped) ==
- atomic_read(&hrp->hrp_nstarted));
- }
-}
-
-static int ptlrpc_start_hr_threads(void)
-{
- struct ptlrpc_hr_partition *hrp;
- int i;
- int j;
-
- cfs_percpt_for_each(hrp, i, ptlrpc_hr.hr_partitions) {
- int rc = 0;
-
- for (j = 0; j < hrp->hrp_nthrs; j++) {
- struct ptlrpc_hr_thread *hrt = &hrp->hrp_thrs[j];
- rc = PTR_ERR(kthread_run(ptlrpc_hr_main,
- &hrp->hrp_thrs[j],
- "ptlrpc_hr%02d_%03d",
- hrp->hrp_cpt,
- hrt->hrt_id));
- if (IS_ERR_VALUE(rc))
- break;
- }
- wait_event(ptlrpc_hr.hr_waitq,
- atomic_read(&hrp->hrp_nstarted) == j);
- if (!IS_ERR_VALUE(rc))
- continue;
-
- CERROR("Reply handling thread %d:%d Failed on starting: rc = %d\n",
- i, j, rc);
- ptlrpc_stop_hr_threads();
- return rc;
- }
- return 0;
-}
-
-static void ptlrpc_svcpt_stop_threads(struct ptlrpc_service_part *svcpt)
-{
- struct l_wait_info lwi = { 0 };
- struct ptlrpc_thread *thread;
- LIST_HEAD (zombie);
-
- CDEBUG(D_INFO, "Stopping threads for service %s\n",
- svcpt->scp_service->srv_name);
-
- spin_lock(&svcpt->scp_lock);
- /* let the thread know that we would like it to stop asap */
- list_for_each_entry(thread, &svcpt->scp_threads, t_link) {
- CDEBUG(D_INFO, "Stopping thread %s #%u\n",
- svcpt->scp_service->srv_thread_name, thread->t_id);
- thread_add_flags(thread, SVC_STOPPING);
- }
-
- wake_up_all(&svcpt->scp_waitq);
-
- while (!list_empty(&svcpt->scp_threads)) {
- thread = list_entry(svcpt->scp_threads.next,
- struct ptlrpc_thread, t_link);
- if (thread_is_stopped(thread)) {
- list_del(&thread->t_link);
- list_add(&thread->t_link, &zombie);
- continue;
- }
- spin_unlock(&svcpt->scp_lock);
-
- CDEBUG(D_INFO, "waiting for stopping-thread %s #%u\n",
- svcpt->scp_service->srv_thread_name, thread->t_id);
- l_wait_event(thread->t_ctl_waitq,
- thread_is_stopped(thread), &lwi);
-
- spin_lock(&svcpt->scp_lock);
- }
-
- spin_unlock(&svcpt->scp_lock);
-
- while (!list_empty(&zombie)) {
- thread = list_entry(zombie.next,
- struct ptlrpc_thread, t_link);
- list_del(&thread->t_link);
- kfree(thread);
- }
-}
-
-/**
- * Stops all threads of a particular service \a svc
- */
-void ptlrpc_stop_all_threads(struct ptlrpc_service *svc)
-{
- struct ptlrpc_service_part *svcpt;
- int i;
-
- ptlrpc_service_for_each_part(svcpt, i, svc) {
- if (svcpt->scp_service != NULL)
- ptlrpc_svcpt_stop_threads(svcpt);
- }
-}
-EXPORT_SYMBOL(ptlrpc_stop_all_threads);
-
-int ptlrpc_start_threads(struct ptlrpc_service *svc)
-{
- int rc = 0;
- int i;
- int j;
-
- /* We require 2 threads min, see note in ptlrpc_server_handle_request */
- LASSERT(svc->srv_nthrs_cpt_init >= PTLRPC_NTHRS_INIT);
-
- for (i = 0; i < svc->srv_ncpts; i++) {
- for (j = 0; j < svc->srv_nthrs_cpt_init; j++) {
- rc = ptlrpc_start_thread(svc->srv_parts[i], 1);
- if (rc == 0)
- continue;
-
- if (rc != -EMFILE)
- goto failed;
- /* We have enough threads, don't start more. b=15759 */
- break;
- }
- }
-
- return 0;
- failed:
- CERROR("cannot start %s thread #%d_%d: rc %d\n",
- svc->srv_thread_name, i, j, rc);
- ptlrpc_stop_all_threads(svc);
- return rc;
-}
-EXPORT_SYMBOL(ptlrpc_start_threads);
-
-int ptlrpc_start_thread(struct ptlrpc_service_part *svcpt, int wait)
-{
- struct l_wait_info lwi = { 0 };
- struct ptlrpc_thread *thread;
- struct ptlrpc_service *svc;
- int rc;
-
- LASSERT(svcpt != NULL);
-
- svc = svcpt->scp_service;
-
- CDEBUG(D_RPCTRACE, "%s[%d] started %d min %d max %d\n",
- svc->srv_name, svcpt->scp_cpt, svcpt->scp_nthrs_running,
- svc->srv_nthrs_cpt_init, svc->srv_nthrs_cpt_limit);
-
- again:
- if (unlikely(svc->srv_is_stopping))
- return -ESRCH;
-
- if (!ptlrpc_threads_increasable(svcpt) ||
- (OBD_FAIL_CHECK(OBD_FAIL_TGT_TOOMANY_THREADS) &&
- svcpt->scp_nthrs_running == svc->srv_nthrs_cpt_init - 1))
- return -EMFILE;
-
- thread = kzalloc_node(sizeof(*thread), GFP_NOFS,
- cfs_cpt_spread_node(svc->srv_cptable,
- svcpt->scp_cpt));
- if (thread == NULL)
- return -ENOMEM;
- init_waitqueue_head(&thread->t_ctl_waitq);
-
- spin_lock(&svcpt->scp_lock);
- if (!ptlrpc_threads_increasable(svcpt)) {
- spin_unlock(&svcpt->scp_lock);
- kfree(thread);
- return -EMFILE;
- }
-
- if (svcpt->scp_nthrs_starting != 0) {
- /* serialize starting because some modules (obdfilter)
- * might require unique and contiguous t_id */
- LASSERT(svcpt->scp_nthrs_starting == 1);
- spin_unlock(&svcpt->scp_lock);
- kfree(thread);
- if (wait) {
- CDEBUG(D_INFO, "Waiting for creating thread %s #%d\n",
- svc->srv_thread_name, svcpt->scp_thr_nextid);
- schedule();
- goto again;
- }
-
- CDEBUG(D_INFO, "Creating thread %s #%d race, retry later\n",
- svc->srv_thread_name, svcpt->scp_thr_nextid);
- return -EAGAIN;
- }
-
- svcpt->scp_nthrs_starting++;
- thread->t_id = svcpt->scp_thr_nextid++;
- thread_add_flags(thread, SVC_STARTING);
- thread->t_svcpt = svcpt;
-
- list_add(&thread->t_link, &svcpt->scp_threads);
- spin_unlock(&svcpt->scp_lock);
-
- if (svcpt->scp_cpt >= 0) {
- snprintf(thread->t_name, sizeof(thread->t_name), "%s%02d_%03d",
- svc->srv_thread_name, svcpt->scp_cpt, thread->t_id);
- } else {
- snprintf(thread->t_name, sizeof(thread->t_name), "%s_%04d",
- svc->srv_thread_name, thread->t_id);
- }
-
- CDEBUG(D_RPCTRACE, "starting thread '%s'\n", thread->t_name);
- rc = PTR_ERR(kthread_run(ptlrpc_main, thread, "%s", thread->t_name));
- if (IS_ERR_VALUE(rc)) {
- CERROR("cannot start thread '%s': rc %d\n",
- thread->t_name, rc);
- spin_lock(&svcpt->scp_lock);
- --svcpt->scp_nthrs_starting;
- if (thread_is_stopping(thread)) {
- /* this ptlrpc_thread is being handled
- * by ptlrpc_svcpt_stop_threads now
- */
- thread_add_flags(thread, SVC_STOPPED);
- wake_up(&thread->t_ctl_waitq);
- spin_unlock(&svcpt->scp_lock);
- } else {
- list_del(&thread->t_link);
- spin_unlock(&svcpt->scp_lock);
- kfree(thread);
- }
- return rc;
- }
-
- if (!wait)
- return 0;
-
- l_wait_event(thread->t_ctl_waitq,
- thread_is_running(thread) || thread_is_stopped(thread),
- &lwi);
-
- rc = thread_is_stopped(thread) ? thread->t_id : 0;
- return rc;
-}
-
-int ptlrpc_hr_init(void)
-{
- struct ptlrpc_hr_partition *hrp;
- struct ptlrpc_hr_thread *hrt;
- int rc;
- int i;
- int j;
- int weight;
-
- memset(&ptlrpc_hr, 0, sizeof(ptlrpc_hr));
- ptlrpc_hr.hr_cpt_table = cfs_cpt_table;
-
- ptlrpc_hr.hr_partitions = cfs_percpt_alloc(ptlrpc_hr.hr_cpt_table,
- sizeof(*hrp));
- if (ptlrpc_hr.hr_partitions == NULL)
- return -ENOMEM;
-
- init_waitqueue_head(&ptlrpc_hr.hr_waitq);
-
- weight = cpumask_weight(topology_sibling_cpumask(0));
-
- cfs_percpt_for_each(hrp, i, ptlrpc_hr.hr_partitions) {
- hrp->hrp_cpt = i;
-
- atomic_set(&hrp->hrp_nstarted, 0);
- atomic_set(&hrp->hrp_nstopped, 0);
-
- hrp->hrp_nthrs = cfs_cpt_weight(ptlrpc_hr.hr_cpt_table, i);
- hrp->hrp_nthrs /= weight;
-
- LASSERT(hrp->hrp_nthrs > 0);
- hrp->hrp_thrs =
- kzalloc_node(hrp->hrp_nthrs * sizeof(*hrt), GFP_NOFS,
- cfs_cpt_spread_node(ptlrpc_hr.hr_cpt_table,
- i));
- if (hrp->hrp_thrs == NULL) {
- rc = -ENOMEM;
- goto out;
- }
-
- for (j = 0; j < hrp->hrp_nthrs; j++) {
- hrt = &hrp->hrp_thrs[j];
-
- hrt->hrt_id = j;
- hrt->hrt_partition = hrp;
- init_waitqueue_head(&hrt->hrt_waitq);
- spin_lock_init(&hrt->hrt_lock);
- INIT_LIST_HEAD(&hrt->hrt_queue);
- }
- }
-
- rc = ptlrpc_start_hr_threads();
-out:
- if (rc != 0)
- ptlrpc_hr_fini();
- return rc;
-}
-
-void ptlrpc_hr_fini(void)
-{
- struct ptlrpc_hr_partition *hrp;
- int i;
-
- if (ptlrpc_hr.hr_partitions == NULL)
- return;
-
- ptlrpc_stop_hr_threads();
-
- cfs_percpt_for_each(hrp, i, ptlrpc_hr.hr_partitions) {
- kfree(hrp->hrp_thrs);
- }
-
- cfs_percpt_free(ptlrpc_hr.hr_partitions);
- ptlrpc_hr.hr_partitions = NULL;
-}
-
-
-/**
- * Wait until all already scheduled replies are processed.
- */
-static void ptlrpc_wait_replies(struct ptlrpc_service_part *svcpt)
-{
- while (1) {
- int rc;
- struct l_wait_info lwi = LWI_TIMEOUT(cfs_time_seconds(10),
- NULL, NULL);
-
- rc = l_wait_event(svcpt->scp_waitq,
- atomic_read(&svcpt->scp_nreps_difficult) == 0, &lwi);
- if (rc == 0)
- break;
- CWARN("Unexpectedly long timeout %s %p\n",
- svcpt->scp_service->srv_name, svcpt->scp_service);
- }
-}
-
-static void
-ptlrpc_service_del_atimer(struct ptlrpc_service *svc)
-{
- struct ptlrpc_service_part *svcpt;
- int i;
-
- /* early disarm AT timer... */
- ptlrpc_service_for_each_part(svcpt, i, svc) {
- if (svcpt->scp_service != NULL)
- del_timer(&svcpt->scp_at_timer);
- }
-}
-
-static void
-ptlrpc_service_unlink_rqbd(struct ptlrpc_service *svc)
-{
- struct ptlrpc_service_part *svcpt;
- struct ptlrpc_request_buffer_desc *rqbd;
- struct l_wait_info lwi;
- int rc;
- int i;
-
- /* All history will be culled when the next request buffer is
- * freed in ptlrpc_service_purge_all() */
- svc->srv_hist_nrqbds_cpt_max = 0;
-
- rc = LNetClearLazyPortal(svc->srv_req_portal);
- LASSERT(rc == 0);
-
- ptlrpc_service_for_each_part(svcpt, i, svc) {
- if (svcpt->scp_service == NULL)
- break;
-
- /* Unlink all the request buffers. This forces a 'final'
- * event with its 'unlink' flag set for each posted rqbd */
- list_for_each_entry(rqbd, &svcpt->scp_rqbd_posted,
- rqbd_list) {
- rc = LNetMDUnlink(rqbd->rqbd_md_h);
- LASSERT(rc == 0 || rc == -ENOENT);
- }
- }
-
- ptlrpc_service_for_each_part(svcpt, i, svc) {
- if (svcpt->scp_service == NULL)
- break;
-
- /* Wait for the network to release any buffers
- * it's currently filling */
- spin_lock(&svcpt->scp_lock);
- while (svcpt->scp_nrqbds_posted != 0) {
- spin_unlock(&svcpt->scp_lock);
- /* Network access will complete in finite time but
- * the HUGE timeout lets us CWARN for visibility
- * of sluggish NALs */
- lwi = LWI_TIMEOUT_INTERVAL(
- cfs_time_seconds(LONG_UNLINK),
- cfs_time_seconds(1), NULL, NULL);
- rc = l_wait_event(svcpt->scp_waitq,
- svcpt->scp_nrqbds_posted == 0, &lwi);
- if (rc == -ETIMEDOUT) {
- CWARN("Service %s waiting for request buffers\n",
- svcpt->scp_service->srv_name);
- }
- spin_lock(&svcpt->scp_lock);
- }
- spin_unlock(&svcpt->scp_lock);
- }
-}
-
-static void
-ptlrpc_service_purge_all(struct ptlrpc_service *svc)
-{
- struct ptlrpc_service_part *svcpt;
- struct ptlrpc_request_buffer_desc *rqbd;
- struct ptlrpc_request *req;
- struct ptlrpc_reply_state *rs;
- int i;
-
- ptlrpc_service_for_each_part(svcpt, i, svc) {
- if (svcpt->scp_service == NULL)
- break;
-
- spin_lock(&svcpt->scp_rep_lock);
- while (!list_empty(&svcpt->scp_rep_active)) {
- rs = list_entry(svcpt->scp_rep_active.next,
- struct ptlrpc_reply_state, rs_list);
- spin_lock(&rs->rs_lock);
- ptlrpc_schedule_difficult_reply(rs);
- spin_unlock(&rs->rs_lock);
- }
- spin_unlock(&svcpt->scp_rep_lock);
-
- /* purge the request queue. NB No new replies (rqbds
- * all unlinked) and no service threads, so I'm the only
- * thread noodling the request queue now */
- while (!list_empty(&svcpt->scp_req_incoming)) {
- req = list_entry(svcpt->scp_req_incoming.next,
- struct ptlrpc_request, rq_list);
-
- list_del(&req->rq_list);
- svcpt->scp_nreqs_incoming--;
- ptlrpc_server_finish_request(svcpt, req);
- }
-
- while (ptlrpc_server_request_pending(svcpt, true)) {
- req = ptlrpc_server_request_get(svcpt, true);
- ptlrpc_server_finish_active_request(svcpt, req);
- }
-
- LASSERT(list_empty(&svcpt->scp_rqbd_posted));
- LASSERT(svcpt->scp_nreqs_incoming == 0);
- LASSERT(svcpt->scp_nreqs_active == 0);
- /* history should have been culled by
- * ptlrpc_server_finish_request */
- LASSERT(svcpt->scp_hist_nrqbds == 0);
-
- /* Now free all the request buffers since nothing
- * references them any more... */
-
- while (!list_empty(&svcpt->scp_rqbd_idle)) {
- rqbd = list_entry(svcpt->scp_rqbd_idle.next,
- struct ptlrpc_request_buffer_desc,
- rqbd_list);
- ptlrpc_free_rqbd(rqbd);
- }
- ptlrpc_wait_replies(svcpt);
-
- while (!list_empty(&svcpt->scp_rep_idle)) {
- rs = list_entry(svcpt->scp_rep_idle.next,
- struct ptlrpc_reply_state,
- rs_list);
- list_del(&rs->rs_list);
- kvfree(rs);
- }
- }
-}
-
-static void
-ptlrpc_service_free(struct ptlrpc_service *svc)
-{
- struct ptlrpc_service_part *svcpt;
- struct ptlrpc_at_array *array;
- int i;
-
- ptlrpc_service_for_each_part(svcpt, i, svc) {
- if (svcpt->scp_service == NULL)
- break;
-
- /* In case somebody rearmed this in the meantime */
- del_timer(&svcpt->scp_at_timer);
- array = &svcpt->scp_at_array;
-
- kfree(array->paa_reqs_array);
- array->paa_reqs_array = NULL;
- kfree(array->paa_reqs_count);
- array->paa_reqs_count = NULL;
- }
-
- ptlrpc_service_for_each_part(svcpt, i, svc)
- kfree(svcpt);
-
- if (svc->srv_cpts != NULL)
- cfs_expr_list_values_free(svc->srv_cpts, svc->srv_ncpts);
-
- kfree(svc);
-}
-
-int ptlrpc_unregister_service(struct ptlrpc_service *service)
-{
- CDEBUG(D_NET, "%s: tearing down\n", service->srv_name);
-
- service->srv_is_stopping = 1;
-
- mutex_lock(&ptlrpc_all_services_mutex);
- list_del_init(&service->srv_list);
- mutex_unlock(&ptlrpc_all_services_mutex);
-
- ptlrpc_service_del_atimer(service);
- ptlrpc_stop_all_threads(service);
-
- ptlrpc_service_unlink_rqbd(service);
- ptlrpc_service_purge_all(service);
- ptlrpc_service_nrs_cleanup(service);
-
- ptlrpc_lprocfs_unregister_service(service);
- ptlrpc_sysfs_unregister_service(service);
-
- ptlrpc_service_free(service);
-
- return 0;
-}
-EXPORT_SYMBOL(ptlrpc_unregister_service);
diff --git a/drivers/staging/lustre/lustre/ptlrpc/wiretest.c b/drivers/staging/lustre/lustre/ptlrpc/wiretest.c
deleted file mode 100644
index 40f720ca3b14..000000000000
--- a/drivers/staging/lustre/lustre/ptlrpc/wiretest.c
+++ /dev/null
@@ -1,4490 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- */
-
-#define DEBUG_SUBSYSTEM S_RPC
-
-#include <linux/fs.h>
-#include <linux/posix_acl_xattr.h>
-
-#include "../include/obd_support.h"
-#include "../include/obd_class.h"
-#include "../include/lustre_net.h"
-#include "../include/lustre_disk.h"
-#include "ptlrpc_internal.h"
-
-void lustre_assert_wire_constants(void)
-{
- /* Wire protocol assertions generated by 'wirecheck'
- * (make -C lustre/utils newwiretest)
- * running on Linux centos6-bis 2.6.32-358.0.1.el6-head
- * #3 SMP Wed Apr 17 17:37:43 CEST 2013
- * with gcc version 4.4.6 20110731 (Red Hat 4.4.6-3) (GCC)
- */
-
- /* Constants... */
- LASSERTF(PTL_RPC_MSG_REQUEST == 4711, "found %lld\n",
- (long long)PTL_RPC_MSG_REQUEST);
- LASSERTF(PTL_RPC_MSG_ERR == 4712, "found %lld\n",
- (long long)PTL_RPC_MSG_ERR);
- LASSERTF(PTL_RPC_MSG_REPLY == 4713, "found %lld\n",
- (long long)PTL_RPC_MSG_REPLY);
- LASSERTF(MDS_DIR_END_OFF == 0xfffffffffffffffeULL, "found 0x%.16llxULL\n",
- MDS_DIR_END_OFF);
- LASSERTF(DEAD_HANDLE_MAGIC == 0xdeadbeefcafebabeULL, "found 0x%.16llxULL\n",
- DEAD_HANDLE_MAGIC);
- CLASSERT(MTI_NAME_MAXLEN == 64);
- LASSERTF(OST_REPLY == 0, "found %lld\n",
- (long long)OST_REPLY);
- LASSERTF(OST_GETATTR == 1, "found %lld\n",
- (long long)OST_GETATTR);
- LASSERTF(OST_SETATTR == 2, "found %lld\n",
- (long long)OST_SETATTR);
- LASSERTF(OST_READ == 3, "found %lld\n",
- (long long)OST_READ);
- LASSERTF(OST_WRITE == 4, "found %lld\n",
- (long long)OST_WRITE);
- LASSERTF(OST_CREATE == 5, "found %lld\n",
- (long long)OST_CREATE);
- LASSERTF(OST_DESTROY == 6, "found %lld\n",
- (long long)OST_DESTROY);
- LASSERTF(OST_GET_INFO == 7, "found %lld\n",
- (long long)OST_GET_INFO);
- LASSERTF(OST_CONNECT == 8, "found %lld\n",
- (long long)OST_CONNECT);
- LASSERTF(OST_DISCONNECT == 9, "found %lld\n",
- (long long)OST_DISCONNECT);
- LASSERTF(OST_PUNCH == 10, "found %lld\n",
- (long long)OST_PUNCH);
- LASSERTF(OST_OPEN == 11, "found %lld\n",
- (long long)OST_OPEN);
- LASSERTF(OST_CLOSE == 12, "found %lld\n",
- (long long)OST_CLOSE);
- LASSERTF(OST_STATFS == 13, "found %lld\n",
- (long long)OST_STATFS);
- LASSERTF(OST_SYNC == 16, "found %lld\n",
- (long long)OST_SYNC);
- LASSERTF(OST_SET_INFO == 17, "found %lld\n",
- (long long)OST_SET_INFO);
- LASSERTF(OST_QUOTACHECK == 18, "found %lld\n",
- (long long)OST_QUOTACHECK);
- LASSERTF(OST_QUOTACTL == 19, "found %lld\n",
- (long long)OST_QUOTACTL);
- LASSERTF(OST_QUOTA_ADJUST_QUNIT == 20, "found %lld\n",
- (long long)OST_QUOTA_ADJUST_QUNIT);
- LASSERTF(OST_LAST_OPC == 21, "found %lld\n",
- (long long)OST_LAST_OPC);
- LASSERTF(OBD_OBJECT_EOF == 0xffffffffffffffffULL, "found 0x%.16llxULL\n",
- OBD_OBJECT_EOF);
- LASSERTF(OST_MIN_PRECREATE == 32, "found %lld\n",
- (long long)OST_MIN_PRECREATE);
- LASSERTF(OST_MAX_PRECREATE == 20000, "found %lld\n",
- (long long)OST_MAX_PRECREATE);
- LASSERTF(OST_LVB_ERR_INIT == 0xffbadbad80000000ULL, "found 0x%.16llxULL\n",
- OST_LVB_ERR_INIT);
- LASSERTF(OST_LVB_ERR_MASK == 0xffbadbad00000000ULL, "found 0x%.16llxULL\n",
- OST_LVB_ERR_MASK);
- LASSERTF(MDS_FIRST_OPC == 33, "found %lld\n",
- (long long)MDS_FIRST_OPC);
- LASSERTF(MDS_GETATTR == 33, "found %lld\n",
- (long long)MDS_GETATTR);
- LASSERTF(MDS_GETATTR_NAME == 34, "found %lld\n",
- (long long)MDS_GETATTR_NAME);
- LASSERTF(MDS_CLOSE == 35, "found %lld\n",
- (long long)MDS_CLOSE);
- LASSERTF(MDS_REINT == 36, "found %lld\n",
- (long long)MDS_REINT);
- LASSERTF(MDS_READPAGE == 37, "found %lld\n",
- (long long)MDS_READPAGE);
- LASSERTF(MDS_CONNECT == 38, "found %lld\n",
- (long long)MDS_CONNECT);
- LASSERTF(MDS_DISCONNECT == 39, "found %lld\n",
- (long long)MDS_DISCONNECT);
- LASSERTF(MDS_GETSTATUS == 40, "found %lld\n",
- (long long)MDS_GETSTATUS);
- LASSERTF(MDS_STATFS == 41, "found %lld\n",
- (long long)MDS_STATFS);
- LASSERTF(MDS_PIN == 42, "found %lld\n",
- (long long)MDS_PIN);
- LASSERTF(MDS_UNPIN == 43, "found %lld\n",
- (long long)MDS_UNPIN);
- LASSERTF(MDS_SYNC == 44, "found %lld\n",
- (long long)MDS_SYNC);
- LASSERTF(MDS_DONE_WRITING == 45, "found %lld\n",
- (long long)MDS_DONE_WRITING);
- LASSERTF(MDS_SET_INFO == 46, "found %lld\n",
- (long long)MDS_SET_INFO);
- LASSERTF(MDS_QUOTACHECK == 47, "found %lld\n",
- (long long)MDS_QUOTACHECK);
- LASSERTF(MDS_QUOTACTL == 48, "found %lld\n",
- (long long)MDS_QUOTACTL);
- LASSERTF(MDS_GETXATTR == 49, "found %lld\n",
- (long long)MDS_GETXATTR);
- LASSERTF(MDS_SETXATTR == 50, "found %lld\n",
- (long long)MDS_SETXATTR);
- LASSERTF(MDS_WRITEPAGE == 51, "found %lld\n",
- (long long)MDS_WRITEPAGE);
- LASSERTF(MDS_IS_SUBDIR == 52, "found %lld\n",
- (long long)MDS_IS_SUBDIR);
- LASSERTF(MDS_GET_INFO == 53, "found %lld\n",
- (long long)MDS_GET_INFO);
- LASSERTF(MDS_HSM_STATE_GET == 54, "found %lld\n",
- (long long)MDS_HSM_STATE_GET);
- LASSERTF(MDS_HSM_STATE_SET == 55, "found %lld\n",
- (long long)MDS_HSM_STATE_SET);
- LASSERTF(MDS_HSM_ACTION == 56, "found %lld\n",
- (long long)MDS_HSM_ACTION);
- LASSERTF(MDS_HSM_PROGRESS == 57, "found %lld\n",
- (long long)MDS_HSM_PROGRESS);
- LASSERTF(MDS_HSM_REQUEST == 58, "found %lld\n",
- (long long)MDS_HSM_REQUEST);
- LASSERTF(MDS_HSM_CT_REGISTER == 59, "found %lld\n",
- (long long)MDS_HSM_CT_REGISTER);
- LASSERTF(MDS_HSM_CT_UNREGISTER == 60, "found %lld\n",
- (long long)MDS_HSM_CT_UNREGISTER);
- LASSERTF(MDS_SWAP_LAYOUTS == 61, "found %lld\n",
- (long long)MDS_SWAP_LAYOUTS);
- LASSERTF(MDS_LAST_OPC == 62, "found %lld\n",
- (long long)MDS_LAST_OPC);
- LASSERTF(REINT_SETATTR == 1, "found %lld\n",
- (long long)REINT_SETATTR);
- LASSERTF(REINT_CREATE == 2, "found %lld\n",
- (long long)REINT_CREATE);
- LASSERTF(REINT_LINK == 3, "found %lld\n",
- (long long)REINT_LINK);
- LASSERTF(REINT_UNLINK == 4, "found %lld\n",
- (long long)REINT_UNLINK);
- LASSERTF(REINT_RENAME == 5, "found %lld\n",
- (long long)REINT_RENAME);
- LASSERTF(REINT_OPEN == 6, "found %lld\n",
- (long long)REINT_OPEN);
- LASSERTF(REINT_SETXATTR == 7, "found %lld\n",
- (long long)REINT_SETXATTR);
- LASSERTF(REINT_RMENTRY == 8, "found %lld\n",
- (long long)REINT_RMENTRY);
- LASSERTF(REINT_MAX == 9, "found %lld\n",
- (long long)REINT_MAX);
- LASSERTF(DISP_IT_EXECD == 0x00000001UL, "found 0x%.8xUL\n",
- (unsigned)DISP_IT_EXECD);
- LASSERTF(DISP_LOOKUP_EXECD == 0x00000002UL, "found 0x%.8xUL\n",
- (unsigned)DISP_LOOKUP_EXECD);
- LASSERTF(DISP_LOOKUP_NEG == 0x00000004UL, "found 0x%.8xUL\n",
- (unsigned)DISP_LOOKUP_NEG);
- LASSERTF(DISP_LOOKUP_POS == 0x00000008UL, "found 0x%.8xUL\n",
- (unsigned)DISP_LOOKUP_POS);
- LASSERTF(DISP_OPEN_CREATE == 0x00000010UL, "found 0x%.8xUL\n",
- (unsigned)DISP_OPEN_CREATE);
- LASSERTF(DISP_OPEN_OPEN == 0x00000020UL, "found 0x%.8xUL\n",
- (unsigned)DISP_OPEN_OPEN);
- LASSERTF(DISP_ENQ_COMPLETE == 0x00400000UL, "found 0x%.8xUL\n",
- (unsigned)DISP_ENQ_COMPLETE);
- LASSERTF(DISP_ENQ_OPEN_REF == 0x00800000UL, "found 0x%.8xUL\n",
- (unsigned)DISP_ENQ_OPEN_REF);
- LASSERTF(DISP_ENQ_CREATE_REF == 0x01000000UL, "found 0x%.8xUL\n",
- (unsigned)DISP_ENQ_CREATE_REF);
- LASSERTF(DISP_OPEN_LOCK == 0x02000000UL, "found 0x%.8xUL\n",
- (unsigned)DISP_OPEN_LOCK);
- LASSERTF(MDS_STATUS_CONN == 1, "found %lld\n",
- (long long)MDS_STATUS_CONN);
- LASSERTF(MDS_STATUS_LOV == 2, "found %lld\n",
- (long long)MDS_STATUS_LOV);
- LASSERTF(LUSTRE_BFLAG_UNCOMMITTED_WRITES == 1, "found %lld\n",
- (long long)LUSTRE_BFLAG_UNCOMMITTED_WRITES);
- LASSERTF(MF_SOM_CHANGE == 0x00000001UL, "found 0x%.8xUL\n",
- (unsigned)MF_SOM_CHANGE);
- LASSERTF(MF_EPOCH_OPEN == 0x00000002UL, "found 0x%.8xUL\n",
- (unsigned)MF_EPOCH_OPEN);
- LASSERTF(MF_EPOCH_CLOSE == 0x00000004UL, "found 0x%.8xUL\n",
- (unsigned)MF_EPOCH_CLOSE);
- LASSERTF(MF_MDC_CANCEL_FID1 == 0x00000008UL, "found 0x%.8xUL\n",
- (unsigned)MF_MDC_CANCEL_FID1);
- LASSERTF(MF_MDC_CANCEL_FID2 == 0x00000010UL, "found 0x%.8xUL\n",
- (unsigned)MF_MDC_CANCEL_FID2);
- LASSERTF(MF_MDC_CANCEL_FID3 == 0x00000020UL, "found 0x%.8xUL\n",
- (unsigned)MF_MDC_CANCEL_FID3);
- LASSERTF(MF_MDC_CANCEL_FID4 == 0x00000040UL, "found 0x%.8xUL\n",
- (unsigned)MF_MDC_CANCEL_FID4);
- LASSERTF(MF_SOM_AU == 0x00000080UL, "found 0x%.8xUL\n",
- (unsigned)MF_SOM_AU);
- LASSERTF(MF_GETATTR_LOCK == 0x00000100UL, "found 0x%.8xUL\n",
- (unsigned)MF_GETATTR_LOCK);
- LASSERTF(MDS_ATTR_MODE == 0x0000000000000001ULL, "found 0x%.16llxULL\n",
- (long long)MDS_ATTR_MODE);
- LASSERTF(MDS_ATTR_UID == 0x0000000000000002ULL, "found 0x%.16llxULL\n",
- (long long)MDS_ATTR_UID);
- LASSERTF(MDS_ATTR_GID == 0x0000000000000004ULL, "found 0x%.16llxULL\n",
- (long long)MDS_ATTR_GID);
- LASSERTF(MDS_ATTR_SIZE == 0x0000000000000008ULL, "found 0x%.16llxULL\n",
- (long long)MDS_ATTR_SIZE);
- LASSERTF(MDS_ATTR_ATIME == 0x0000000000000010ULL, "found 0x%.16llxULL\n",
- (long long)MDS_ATTR_ATIME);
- LASSERTF(MDS_ATTR_MTIME == 0x0000000000000020ULL, "found 0x%.16llxULL\n",
- (long long)MDS_ATTR_MTIME);
- LASSERTF(MDS_ATTR_CTIME == 0x0000000000000040ULL, "found 0x%.16llxULL\n",
- (long long)MDS_ATTR_CTIME);
- LASSERTF(MDS_ATTR_ATIME_SET == 0x0000000000000080ULL, "found 0x%.16llxULL\n",
- (long long)MDS_ATTR_ATIME_SET);
- LASSERTF(MDS_ATTR_MTIME_SET == 0x0000000000000100ULL, "found 0x%.16llxULL\n",
- (long long)MDS_ATTR_MTIME_SET);
- LASSERTF(MDS_ATTR_FORCE == 0x0000000000000200ULL, "found 0x%.16llxULL\n",
- (long long)MDS_ATTR_FORCE);
- LASSERTF(MDS_ATTR_ATTR_FLAG == 0x0000000000000400ULL, "found 0x%.16llxULL\n",
- (long long)MDS_ATTR_ATTR_FLAG);
- LASSERTF(MDS_ATTR_KILL_SUID == 0x0000000000000800ULL, "found 0x%.16llxULL\n",
- (long long)MDS_ATTR_KILL_SUID);
- LASSERTF(MDS_ATTR_KILL_SGID == 0x0000000000001000ULL, "found 0x%.16llxULL\n",
- (long long)MDS_ATTR_KILL_SGID);
- LASSERTF(MDS_ATTR_CTIME_SET == 0x0000000000002000ULL, "found 0x%.16llxULL\n",
- (long long)MDS_ATTR_CTIME_SET);
- LASSERTF(MDS_ATTR_FROM_OPEN == 0x0000000000004000ULL, "found 0x%.16llxULL\n",
- (long long)MDS_ATTR_FROM_OPEN);
- LASSERTF(MDS_ATTR_BLOCKS == 0x0000000000008000ULL, "found 0x%.16llxULL\n",
- (long long)MDS_ATTR_BLOCKS);
- LASSERTF(FLD_QUERY == 900, "found %lld\n",
- (long long)FLD_QUERY);
- LASSERTF(FLD_FIRST_OPC == 900, "found %lld\n",
- (long long)FLD_FIRST_OPC);
- LASSERTF(FLD_LAST_OPC == 901, "found %lld\n",
- (long long)FLD_LAST_OPC);
- LASSERTF(SEQ_QUERY == 700, "found %lld\n",
- (long long)SEQ_QUERY);
- LASSERTF(SEQ_FIRST_OPC == 700, "found %lld\n",
- (long long)SEQ_FIRST_OPC);
- LASSERTF(SEQ_LAST_OPC == 701, "found %lld\n",
- (long long)SEQ_LAST_OPC);
- LASSERTF(SEQ_ALLOC_SUPER == 0, "found %lld\n",
- (long long)SEQ_ALLOC_SUPER);
- LASSERTF(SEQ_ALLOC_META == 1, "found %lld\n",
- (long long)SEQ_ALLOC_META);
- LASSERTF(LDLM_ENQUEUE == 101, "found %lld\n",
- (long long)LDLM_ENQUEUE);
- LASSERTF(LDLM_CONVERT == 102, "found %lld\n",
- (long long)LDLM_CONVERT);
- LASSERTF(LDLM_CANCEL == 103, "found %lld\n",
- (long long)LDLM_CANCEL);
- LASSERTF(LDLM_BL_CALLBACK == 104, "found %lld\n",
- (long long)LDLM_BL_CALLBACK);
- LASSERTF(LDLM_CP_CALLBACK == 105, "found %lld\n",
- (long long)LDLM_CP_CALLBACK);
- LASSERTF(LDLM_GL_CALLBACK == 106, "found %lld\n",
- (long long)LDLM_GL_CALLBACK);
- LASSERTF(LDLM_SET_INFO == 107, "found %lld\n",
- (long long)LDLM_SET_INFO);
- LASSERTF(LDLM_LAST_OPC == 108, "found %lld\n",
- (long long)LDLM_LAST_OPC);
- LASSERTF(LCK_MINMODE == 0, "found %lld\n",
- (long long)LCK_MINMODE);
- LASSERTF(LCK_EX == 1, "found %lld\n",
- (long long)LCK_EX);
- LASSERTF(LCK_PW == 2, "found %lld\n",
- (long long)LCK_PW);
- LASSERTF(LCK_PR == 4, "found %lld\n",
- (long long)LCK_PR);
- LASSERTF(LCK_CW == 8, "found %lld\n",
- (long long)LCK_CW);
- LASSERTF(LCK_CR == 16, "found %lld\n",
- (long long)LCK_CR);
- LASSERTF(LCK_NL == 32, "found %lld\n",
- (long long)LCK_NL);
- LASSERTF(LCK_GROUP == 64, "found %lld\n",
- (long long)LCK_GROUP);
- LASSERTF(LCK_COS == 128, "found %lld\n",
- (long long)LCK_COS);
- LASSERTF(LCK_MAXMODE == 129, "found %lld\n",
- (long long)LCK_MAXMODE);
- LASSERTF(LCK_MODE_NUM == 8, "found %lld\n",
- (long long)LCK_MODE_NUM);
- CLASSERT(LDLM_PLAIN == 10);
- CLASSERT(LDLM_EXTENT == 11);
- CLASSERT(LDLM_FLOCK == 12);
- CLASSERT(LDLM_IBITS == 13);
- CLASSERT(LDLM_MAX_TYPE == 14);
- CLASSERT(LUSTRE_RES_ID_SEQ_OFF == 0);
- CLASSERT(LUSTRE_RES_ID_VER_OID_OFF == 1);
- LASSERTF(UPDATE_OBJ == 1000, "found %lld\n",
- (long long)UPDATE_OBJ);
- LASSERTF(UPDATE_LAST_OPC == 1001, "found %lld\n",
- (long long)UPDATE_LAST_OPC);
- CLASSERT(LUSTRE_RES_ID_QUOTA_SEQ_OFF == 2);
- CLASSERT(LUSTRE_RES_ID_QUOTA_VER_OID_OFF == 3);
- CLASSERT(LUSTRE_RES_ID_HSH_OFF == 3);
- CLASSERT(LQUOTA_TYPE_USR == 0);
- CLASSERT(LQUOTA_TYPE_GRP == 1);
- CLASSERT(LQUOTA_RES_MD == 1);
- CLASSERT(LQUOTA_RES_DT == 2);
- LASSERTF(OBD_PING == 400, "found %lld\n",
- (long long)OBD_PING);
- LASSERTF(OBD_LOG_CANCEL == 401, "found %lld\n",
- (long long)OBD_LOG_CANCEL);
- LASSERTF(OBD_QC_CALLBACK == 402, "found %lld\n",
- (long long)OBD_QC_CALLBACK);
- LASSERTF(OBD_IDX_READ == 403, "found %lld\n",
- (long long)OBD_IDX_READ);
- LASSERTF(OBD_LAST_OPC == 404, "found %lld\n",
- (long long)OBD_LAST_OPC);
- LASSERTF(QUOTA_DQACQ == 601, "found %lld\n",
- (long long)QUOTA_DQACQ);
- LASSERTF(QUOTA_DQREL == 602, "found %lld\n",
- (long long)QUOTA_DQREL);
- LASSERTF(QUOTA_LAST_OPC == 603, "found %lld\n",
- (long long)QUOTA_LAST_OPC);
- LASSERTF(MGS_CONNECT == 250, "found %lld\n",
- (long long)MGS_CONNECT);
- LASSERTF(MGS_DISCONNECT == 251, "found %lld\n",
- (long long)MGS_DISCONNECT);
- LASSERTF(MGS_EXCEPTION == 252, "found %lld\n",
- (long long)MGS_EXCEPTION);
- LASSERTF(MGS_TARGET_REG == 253, "found %lld\n",
- (long long)MGS_TARGET_REG);
- LASSERTF(MGS_TARGET_DEL == 254, "found %lld\n",
- (long long)MGS_TARGET_DEL);
- LASSERTF(MGS_SET_INFO == 255, "found %lld\n",
- (long long)MGS_SET_INFO);
- LASSERTF(MGS_LAST_OPC == 257, "found %lld\n",
- (long long)MGS_LAST_OPC);
- LASSERTF(SEC_CTX_INIT == 801, "found %lld\n",
- (long long)SEC_CTX_INIT);
- LASSERTF(SEC_CTX_INIT_CONT == 802, "found %lld\n",
- (long long)SEC_CTX_INIT_CONT);
- LASSERTF(SEC_CTX_FINI == 803, "found %lld\n",
- (long long)SEC_CTX_FINI);
- LASSERTF(SEC_LAST_OPC == 804, "found %lld\n",
- (long long)SEC_LAST_OPC);
- /* Sizes and Offsets */
-
- /* Checks for struct obd_uuid */
- LASSERTF((int)sizeof(struct obd_uuid) == 40, "found %lld\n",
- (long long)(int)sizeof(struct obd_uuid));
-
- /* Checks for struct lu_seq_range */
- LASSERTF((int)sizeof(struct lu_seq_range) == 24, "found %lld\n",
- (long long)(int)sizeof(struct lu_seq_range));
- LASSERTF((int)offsetof(struct lu_seq_range, lsr_start) == 0, "found %lld\n",
- (long long)(int)offsetof(struct lu_seq_range, lsr_start));
- LASSERTF((int)sizeof(((struct lu_seq_range *)0)->lsr_start) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct lu_seq_range *)0)->lsr_start));
- LASSERTF((int)offsetof(struct lu_seq_range, lsr_end) == 8, "found %lld\n",
- (long long)(int)offsetof(struct lu_seq_range, lsr_end));
- LASSERTF((int)sizeof(((struct lu_seq_range *)0)->lsr_end) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct lu_seq_range *)0)->lsr_end));
- LASSERTF((int)offsetof(struct lu_seq_range, lsr_index) == 16, "found %lld\n",
- (long long)(int)offsetof(struct lu_seq_range, lsr_index));
- LASSERTF((int)sizeof(((struct lu_seq_range *)0)->lsr_index) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct lu_seq_range *)0)->lsr_index));
- LASSERTF((int)offsetof(struct lu_seq_range, lsr_flags) == 20, "found %lld\n",
- (long long)(int)offsetof(struct lu_seq_range, lsr_flags));
- LASSERTF((int)sizeof(((struct lu_seq_range *)0)->lsr_flags) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct lu_seq_range *)0)->lsr_flags));
- LASSERTF(LU_SEQ_RANGE_MDT == 0, "found %lld\n",
- (long long)LU_SEQ_RANGE_MDT);
- LASSERTF(LU_SEQ_RANGE_OST == 1, "found %lld\n",
- (long long)LU_SEQ_RANGE_OST);
-
- /* Checks for struct lustre_mdt_attrs */
- LASSERTF((int)sizeof(struct lustre_mdt_attrs) == 24, "found %lld\n",
- (long long)(int)sizeof(struct lustre_mdt_attrs));
- LASSERTF((int)offsetof(struct lustre_mdt_attrs, lma_compat) == 0, "found %lld\n",
- (long long)(int)offsetof(struct lustre_mdt_attrs, lma_compat));
- LASSERTF((int)sizeof(((struct lustre_mdt_attrs *)0)->lma_compat) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct lustre_mdt_attrs *)0)->lma_compat));
- LASSERTF((int)offsetof(struct lustre_mdt_attrs, lma_incompat) == 4, "found %lld\n",
- (long long)(int)offsetof(struct lustre_mdt_attrs, lma_incompat));
- LASSERTF((int)sizeof(((struct lustre_mdt_attrs *)0)->lma_incompat) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct lustre_mdt_attrs *)0)->lma_incompat));
- LASSERTF((int)offsetof(struct lustre_mdt_attrs, lma_self_fid) == 8, "found %lld\n",
- (long long)(int)offsetof(struct lustre_mdt_attrs, lma_self_fid));
- LASSERTF((int)sizeof(((struct lustre_mdt_attrs *)0)->lma_self_fid) == 16, "found %lld\n",
- (long long)(int)sizeof(((struct lustre_mdt_attrs *)0)->lma_self_fid));
- LASSERTF(LMAI_RELEASED == 0x00000001UL, "found 0x%.8xUL\n",
- (unsigned)LMAI_RELEASED);
- LASSERTF(LMAC_HSM == 0x00000001UL, "found 0x%.8xUL\n",
- (unsigned)LMAC_HSM);
- LASSERTF(LMAC_SOM == 0x00000002UL, "found 0x%.8xUL\n",
- (unsigned)LMAC_SOM);
- LASSERTF(LMAC_NOT_IN_OI == 0x00000004UL, "found 0x%.8xUL\n",
- (unsigned)LMAC_NOT_IN_OI);
- LASSERTF(LMAC_FID_ON_OST == 0x00000008UL, "found 0x%.8xUL\n",
- (unsigned)LMAC_FID_ON_OST);
- LASSERTF(OBJ_CREATE == 1, "found %lld\n",
- (long long)OBJ_CREATE);
- LASSERTF(OBJ_DESTROY == 2, "found %lld\n",
- (long long)OBJ_DESTROY);
- LASSERTF(OBJ_REF_ADD == 3, "found %lld\n",
- (long long)OBJ_REF_ADD);
- LASSERTF(OBJ_REF_DEL == 4, "found %lld\n",
- (long long)OBJ_REF_DEL);
- LASSERTF(OBJ_ATTR_SET == 5, "found %lld\n",
- (long long)OBJ_ATTR_SET);
- LASSERTF(OBJ_ATTR_GET == 6, "found %lld\n",
- (long long)OBJ_ATTR_GET);
- LASSERTF(OBJ_XATTR_SET == 7, "found %lld\n",
- (long long)OBJ_XATTR_SET);
- LASSERTF(OBJ_XATTR_GET == 8, "found %lld\n",
- (long long)OBJ_XATTR_GET);
- LASSERTF(OBJ_INDEX_LOOKUP == 9, "found %lld\n",
- (long long)OBJ_INDEX_LOOKUP);
- LASSERTF(OBJ_INDEX_LOOKUP == 9, "found %lld\n",
- (long long)OBJ_INDEX_LOOKUP);
- LASSERTF(OBJ_INDEX_INSERT == 10, "found %lld\n",
- (long long)OBJ_INDEX_INSERT);
- LASSERTF(OBJ_INDEX_DELETE == 11, "found %lld\n",
- (long long)OBJ_INDEX_DELETE);
-
- /* Checks for struct ost_id */
- LASSERTF((int)sizeof(struct ost_id) == 16, "found %lld\n",
- (long long)(int)sizeof(struct ost_id));
- LASSERTF((int)offsetof(struct ost_id, oi) == 0, "found %lld\n",
- (long long)(int)offsetof(struct ost_id, oi));
- LASSERTF((int)sizeof(((struct ost_id *)0)->oi) == 16, "found %lld\n",
- (long long)(int)sizeof(((struct ost_id *)0)->oi));
- LASSERTF(LUSTRE_FID_INIT_OID == 1, "found %lld\n",
- (long long)LUSTRE_FID_INIT_OID);
- LASSERTF(FID_SEQ_OST_MDT0 == 0, "found %lld\n",
- (long long)FID_SEQ_OST_MDT0);
- LASSERTF(FID_SEQ_LLOG == 1, "found %lld\n",
- (long long)FID_SEQ_LLOG);
- LASSERTF(FID_SEQ_ECHO == 2, "found %lld\n",
- (long long)FID_SEQ_ECHO);
- LASSERTF(FID_SEQ_OST_MDT1 == 3, "found %lld\n",
- (long long)FID_SEQ_OST_MDT1);
- LASSERTF(FID_SEQ_OST_MAX == 9, "found %lld\n",
- (long long)FID_SEQ_OST_MAX);
- LASSERTF(FID_SEQ_RSVD == 11, "found %lld\n",
- (long long)FID_SEQ_RSVD);
- LASSERTF(FID_SEQ_IGIF == 12, "found %lld\n",
- (long long)FID_SEQ_IGIF);
- LASSERTF(FID_SEQ_IGIF_MAX == 0x00000000ffffffffULL, "found 0x%.16llxULL\n",
- (long long)FID_SEQ_IGIF_MAX);
- LASSERTF(FID_SEQ_IDIF == 0x0000000100000000ULL, "found 0x%.16llxULL\n",
- (long long)FID_SEQ_IDIF);
- LASSERTF(FID_SEQ_IDIF_MAX == 0x00000001ffffffffULL, "found 0x%.16llxULL\n",
- (long long)FID_SEQ_IDIF_MAX);
- LASSERTF(FID_SEQ_START == 0x0000000200000000ULL, "found 0x%.16llxULL\n",
- (long long)FID_SEQ_START);
- LASSERTF(FID_SEQ_LOCAL_FILE == 0x0000000200000001ULL, "found 0x%.16llxULL\n",
- (long long)FID_SEQ_LOCAL_FILE);
- LASSERTF(FID_SEQ_DOT_LUSTRE == 0x0000000200000002ULL, "found 0x%.16llxULL\n",
- (long long)FID_SEQ_DOT_LUSTRE);
- LASSERTF(FID_SEQ_SPECIAL == 0x0000000200000004ULL, "found 0x%.16llxULL\n",
- (long long)FID_SEQ_SPECIAL);
- LASSERTF(FID_SEQ_QUOTA == 0x0000000200000005ULL, "found 0x%.16llxULL\n",
- (long long)FID_SEQ_QUOTA);
- LASSERTF(FID_SEQ_QUOTA_GLB == 0x0000000200000006ULL, "found 0x%.16llxULL\n",
- (long long)FID_SEQ_QUOTA_GLB);
- LASSERTF(FID_SEQ_ROOT == 0x0000000200000007ULL, "found 0x%.16llxULL\n",
- (long long)FID_SEQ_ROOT);
- LASSERTF(FID_SEQ_NORMAL == 0x0000000200000400ULL, "found 0x%.16llxULL\n",
- (long long)FID_SEQ_NORMAL);
- LASSERTF(FID_SEQ_LOV_DEFAULT == 0xffffffffffffffffULL, "found 0x%.16llxULL\n",
- (long long)FID_SEQ_LOV_DEFAULT);
- LASSERTF(FID_OID_SPECIAL_BFL == 0x00000001UL, "found 0x%.8xUL\n",
- (unsigned)FID_OID_SPECIAL_BFL);
- LASSERTF(FID_OID_DOT_LUSTRE == 0x00000001UL, "found 0x%.8xUL\n",
- (unsigned)FID_OID_DOT_LUSTRE);
- LASSERTF(FID_OID_DOT_LUSTRE_OBF == 0x00000002UL, "found 0x%.8xUL\n",
- (unsigned)FID_OID_DOT_LUSTRE_OBF);
-
- /* Checks for struct lu_dirent */
- LASSERTF((int)sizeof(struct lu_dirent) == 32, "found %lld\n",
- (long long)(int)sizeof(struct lu_dirent));
- LASSERTF((int)offsetof(struct lu_dirent, lde_fid) == 0, "found %lld\n",
- (long long)(int)offsetof(struct lu_dirent, lde_fid));
- LASSERTF((int)sizeof(((struct lu_dirent *)0)->lde_fid) == 16, "found %lld\n",
- (long long)(int)sizeof(((struct lu_dirent *)0)->lde_fid));
- LASSERTF((int)offsetof(struct lu_dirent, lde_hash) == 16, "found %lld\n",
- (long long)(int)offsetof(struct lu_dirent, lde_hash));
- LASSERTF((int)sizeof(((struct lu_dirent *)0)->lde_hash) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct lu_dirent *)0)->lde_hash));
- LASSERTF((int)offsetof(struct lu_dirent, lde_reclen) == 24, "found %lld\n",
- (long long)(int)offsetof(struct lu_dirent, lde_reclen));
- LASSERTF((int)sizeof(((struct lu_dirent *)0)->lde_reclen) == 2, "found %lld\n",
- (long long)(int)sizeof(((struct lu_dirent *)0)->lde_reclen));
- LASSERTF((int)offsetof(struct lu_dirent, lde_namelen) == 26, "found %lld\n",
- (long long)(int)offsetof(struct lu_dirent, lde_namelen));
- LASSERTF((int)sizeof(((struct lu_dirent *)0)->lde_namelen) == 2, "found %lld\n",
- (long long)(int)sizeof(((struct lu_dirent *)0)->lde_namelen));
- LASSERTF((int)offsetof(struct lu_dirent, lde_attrs) == 28, "found %lld\n",
- (long long)(int)offsetof(struct lu_dirent, lde_attrs));
- LASSERTF((int)sizeof(((struct lu_dirent *)0)->lde_attrs) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct lu_dirent *)0)->lde_attrs));
- LASSERTF((int)offsetof(struct lu_dirent, lde_name[0]) == 32, "found %lld\n",
- (long long)(int)offsetof(struct lu_dirent, lde_name[0]));
- LASSERTF((int)sizeof(((struct lu_dirent *)0)->lde_name[0]) == 1, "found %lld\n",
- (long long)(int)sizeof(((struct lu_dirent *)0)->lde_name[0]));
- LASSERTF(LUDA_FID == 0x00000001UL, "found 0x%.8xUL\n",
- (unsigned)LUDA_FID);
- LASSERTF(LUDA_TYPE == 0x00000002UL, "found 0x%.8xUL\n",
- (unsigned)LUDA_TYPE);
- LASSERTF(LUDA_64BITHASH == 0x00000004UL, "found 0x%.8xUL\n",
- (unsigned)LUDA_64BITHASH);
-
- /* Checks for struct luda_type */
- LASSERTF((int)sizeof(struct luda_type) == 2, "found %lld\n",
- (long long)(int)sizeof(struct luda_type));
- LASSERTF((int)offsetof(struct luda_type, lt_type) == 0, "found %lld\n",
- (long long)(int)offsetof(struct luda_type, lt_type));
- LASSERTF((int)sizeof(((struct luda_type *)0)->lt_type) == 2, "found %lld\n",
- (long long)(int)sizeof(((struct luda_type *)0)->lt_type));
-
- /* Checks for struct lu_dirpage */
- LASSERTF((int)sizeof(struct lu_dirpage) == 24, "found %lld\n",
- (long long)(int)sizeof(struct lu_dirpage));
- LASSERTF((int)offsetof(struct lu_dirpage, ldp_hash_start) == 0, "found %lld\n",
- (long long)(int)offsetof(struct lu_dirpage, ldp_hash_start));
- LASSERTF((int)sizeof(((struct lu_dirpage *)0)->ldp_hash_start) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct lu_dirpage *)0)->ldp_hash_start));
- LASSERTF((int)offsetof(struct lu_dirpage, ldp_hash_end) == 8, "found %lld\n",
- (long long)(int)offsetof(struct lu_dirpage, ldp_hash_end));
- LASSERTF((int)sizeof(((struct lu_dirpage *)0)->ldp_hash_end) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct lu_dirpage *)0)->ldp_hash_end));
- LASSERTF((int)offsetof(struct lu_dirpage, ldp_flags) == 16, "found %lld\n",
- (long long)(int)offsetof(struct lu_dirpage, ldp_flags));
- LASSERTF((int)sizeof(((struct lu_dirpage *)0)->ldp_flags) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct lu_dirpage *)0)->ldp_flags));
- LASSERTF((int)offsetof(struct lu_dirpage, ldp_pad0) == 20, "found %lld\n",
- (long long)(int)offsetof(struct lu_dirpage, ldp_pad0));
- LASSERTF((int)sizeof(((struct lu_dirpage *)0)->ldp_pad0) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct lu_dirpage *)0)->ldp_pad0));
- LASSERTF((int)offsetof(struct lu_dirpage, ldp_entries[0]) == 24, "found %lld\n",
- (long long)(int)offsetof(struct lu_dirpage, ldp_entries[0]));
- LASSERTF((int)sizeof(((struct lu_dirpage *)0)->ldp_entries[0]) == 32, "found %lld\n",
- (long long)(int)sizeof(((struct lu_dirpage *)0)->ldp_entries[0]));
- LASSERTF(LDF_EMPTY == 1, "found %lld\n",
- (long long)LDF_EMPTY);
- LASSERTF(LDF_COLLIDE == 2, "found %lld\n",
- (long long)LDF_COLLIDE);
- LASSERTF(LU_PAGE_SIZE == 4096, "found %lld\n",
- (long long)LU_PAGE_SIZE);
- /* Checks for union lu_page */
- LASSERTF((int)sizeof(union lu_page) == 4096, "found %lld\n",
- (long long)(int)sizeof(union lu_page));
-
- /* Checks for struct lustre_handle */
- LASSERTF((int)sizeof(struct lustre_handle) == 8, "found %lld\n",
- (long long)(int)sizeof(struct lustre_handle));
- LASSERTF((int)offsetof(struct lustre_handle, cookie) == 0, "found %lld\n",
- (long long)(int)offsetof(struct lustre_handle, cookie));
- LASSERTF((int)sizeof(((struct lustre_handle *)0)->cookie) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct lustre_handle *)0)->cookie));
-
- /* Checks for struct lustre_msg_v2 */
- LASSERTF((int)sizeof(struct lustre_msg_v2) == 32, "found %lld\n",
- (long long)(int)sizeof(struct lustre_msg_v2));
- LASSERTF((int)offsetof(struct lustre_msg_v2, lm_bufcount) == 0, "found %lld\n",
- (long long)(int)offsetof(struct lustre_msg_v2, lm_bufcount));
- LASSERTF((int)sizeof(((struct lustre_msg_v2 *)0)->lm_bufcount) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct lustre_msg_v2 *)0)->lm_bufcount));
- LASSERTF((int)offsetof(struct lustre_msg_v2, lm_secflvr) == 4, "found %lld\n",
- (long long)(int)offsetof(struct lustre_msg_v2, lm_secflvr));
- LASSERTF((int)sizeof(((struct lustre_msg_v2 *)0)->lm_secflvr) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct lustre_msg_v2 *)0)->lm_secflvr));
- LASSERTF((int)offsetof(struct lustre_msg_v2, lm_magic) == 8, "found %lld\n",
- (long long)(int)offsetof(struct lustre_msg_v2, lm_magic));
- LASSERTF((int)sizeof(((struct lustre_msg_v2 *)0)->lm_magic) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct lustre_msg_v2 *)0)->lm_magic));
- LASSERTF((int)offsetof(struct lustre_msg_v2, lm_repsize) == 12, "found %lld\n",
- (long long)(int)offsetof(struct lustre_msg_v2, lm_repsize));
- LASSERTF((int)sizeof(((struct lustre_msg_v2 *)0)->lm_repsize) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct lustre_msg_v2 *)0)->lm_repsize));
- LASSERTF((int)offsetof(struct lustre_msg_v2, lm_cksum) == 16, "found %lld\n",
- (long long)(int)offsetof(struct lustre_msg_v2, lm_cksum));
- LASSERTF((int)sizeof(((struct lustre_msg_v2 *)0)->lm_cksum) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct lustre_msg_v2 *)0)->lm_cksum));
- LASSERTF((int)offsetof(struct lustre_msg_v2, lm_flags) == 20, "found %lld\n",
- (long long)(int)offsetof(struct lustre_msg_v2, lm_flags));
- LASSERTF((int)sizeof(((struct lustre_msg_v2 *)0)->lm_flags) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct lustre_msg_v2 *)0)->lm_flags));
- LASSERTF((int)offsetof(struct lustre_msg_v2, lm_padding_2) == 24, "found %lld\n",
- (long long)(int)offsetof(struct lustre_msg_v2, lm_padding_2));
- LASSERTF((int)sizeof(((struct lustre_msg_v2 *)0)->lm_padding_2) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct lustre_msg_v2 *)0)->lm_padding_2));
- LASSERTF((int)offsetof(struct lustre_msg_v2, lm_padding_3) == 28, "found %lld\n",
- (long long)(int)offsetof(struct lustre_msg_v2, lm_padding_3));
- LASSERTF((int)sizeof(((struct lustre_msg_v2 *)0)->lm_padding_3) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct lustre_msg_v2 *)0)->lm_padding_3));
- LASSERTF((int)offsetof(struct lustre_msg_v2, lm_buflens[0]) == 32, "found %lld\n",
- (long long)(int)offsetof(struct lustre_msg_v2, lm_buflens[0]));
- LASSERTF((int)sizeof(((struct lustre_msg_v2 *)0)->lm_buflens[0]) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct lustre_msg_v2 *)0)->lm_buflens[0]));
- LASSERTF(LUSTRE_MSG_MAGIC_V2 == 0x0BD00BD3, "found 0x%.8x\n",
- LUSTRE_MSG_MAGIC_V2);
- LASSERTF(LUSTRE_MSG_MAGIC_V2_SWABBED == 0xD30BD00B, "found 0x%.8x\n",
- LUSTRE_MSG_MAGIC_V2_SWABBED);
-
- /* Checks for struct ptlrpc_body */
- LASSERTF((int)sizeof(struct ptlrpc_body_v3) == 184, "found %lld\n",
- (long long)(int)sizeof(struct ptlrpc_body_v3));
- LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_handle) == 0, "found %lld\n",
- (long long)(int)offsetof(struct ptlrpc_body_v3, pb_handle));
- LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_handle) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_handle));
- LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_type) == 8, "found %lld\n",
- (long long)(int)offsetof(struct ptlrpc_body_v3, pb_type));
- LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_type) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_type));
- LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_version) == 12, "found %lld\n",
- (long long)(int)offsetof(struct ptlrpc_body_v3, pb_version));
- LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_version) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_version));
- LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_opc) == 16, "found %lld\n",
- (long long)(int)offsetof(struct ptlrpc_body_v3, pb_opc));
- LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_opc) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_opc));
- LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_status) == 20, "found %lld\n",
- (long long)(int)offsetof(struct ptlrpc_body_v3, pb_status));
- LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_status) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_status));
- LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_last_xid) == 24, "found %lld\n",
- (long long)(int)offsetof(struct ptlrpc_body_v3, pb_last_xid));
- LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_last_xid) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_last_xid));
- LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_last_seen) == 32, "found %lld\n",
- (long long)(int)offsetof(struct ptlrpc_body_v3, pb_last_seen));
- LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_last_seen) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_last_seen));
- LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_last_committed) == 40, "found %lld\n",
- (long long)(int)offsetof(struct ptlrpc_body_v3, pb_last_committed));
- LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_last_committed) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_last_committed));
- LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_transno) == 48, "found %lld\n",
- (long long)(int)offsetof(struct ptlrpc_body_v3, pb_transno));
- LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_transno) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_transno));
- LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_flags) == 56, "found %lld\n",
- (long long)(int)offsetof(struct ptlrpc_body_v3, pb_flags));
- LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_flags) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_flags));
- LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_op_flags) == 60, "found %lld\n",
- (long long)(int)offsetof(struct ptlrpc_body_v3, pb_op_flags));
- LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_op_flags) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_op_flags));
- LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_conn_cnt) == 64, "found %lld\n",
- (long long)(int)offsetof(struct ptlrpc_body_v3, pb_conn_cnt));
- LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_conn_cnt) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_conn_cnt));
- LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_timeout) == 68, "found %lld\n",
- (long long)(int)offsetof(struct ptlrpc_body_v3, pb_timeout));
- LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_timeout) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_timeout));
- LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_service_time) == 72, "found %lld\n",
- (long long)(int)offsetof(struct ptlrpc_body_v3, pb_service_time));
- LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_service_time) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_service_time));
- LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_limit) == 76, "found %lld\n",
- (long long)(int)offsetof(struct ptlrpc_body_v3, pb_limit));
- LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_limit) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_limit));
- LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_slv) == 80, "found %lld\n",
- (long long)(int)offsetof(struct ptlrpc_body_v3, pb_slv));
- LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_slv) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_slv));
- CLASSERT(PTLRPC_NUM_VERSIONS == 4);
- LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_pre_versions) == 88, "found %lld\n",
- (long long)(int)offsetof(struct ptlrpc_body_v3, pb_pre_versions));
- LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_pre_versions) == 32, "found %lld\n",
- (long long)(int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_pre_versions));
- LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_padding) == 120, "found %lld\n",
- (long long)(int)offsetof(struct ptlrpc_body_v3, pb_padding));
- LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_padding) == 32, "found %lld\n",
- (long long)(int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_padding));
- CLASSERT(JOBSTATS_JOBID_SIZE == 32);
- LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_jobid) == 152, "found %lld\n",
- (long long)(int)offsetof(struct ptlrpc_body_v3, pb_jobid));
- LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_jobid) == 32, "found %lld\n",
- (long long)(int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_jobid));
- LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_handle) == (int)offsetof(struct ptlrpc_body_v2, pb_handle), "%d != %d\n",
- (int)offsetof(struct ptlrpc_body_v3, pb_handle), (int)offsetof(struct ptlrpc_body_v2, pb_handle));
- LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_handle) == (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_handle), "%d != %d\n",
- (int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_handle), (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_handle));
- LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_type) == (int)offsetof(struct ptlrpc_body_v2, pb_type), "%d != %d\n",
- (int)offsetof(struct ptlrpc_body_v3, pb_type), (int)offsetof(struct ptlrpc_body_v2, pb_type));
- LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_type) == (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_type), "%d != %d\n",
- (int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_type), (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_type));
- LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_version) == (int)offsetof(struct ptlrpc_body_v2, pb_version), "%d != %d\n",
- (int)offsetof(struct ptlrpc_body_v3, pb_version), (int)offsetof(struct ptlrpc_body_v2, pb_version));
- LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_version) == (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_version), "%d != %d\n",
- (int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_version), (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_version));
- LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_opc) == (int)offsetof(struct ptlrpc_body_v2, pb_opc), "%d != %d\n",
- (int)offsetof(struct ptlrpc_body_v3, pb_opc), (int)offsetof(struct ptlrpc_body_v2, pb_opc));
- LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_opc) == (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_opc), "%d != %d\n",
- (int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_opc), (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_opc));
- LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_status) == (int)offsetof(struct ptlrpc_body_v2, pb_status), "%d != %d\n",
- (int)offsetof(struct ptlrpc_body_v3, pb_status), (int)offsetof(struct ptlrpc_body_v2, pb_status));
- LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_status) == (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_status), "%d != %d\n",
- (int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_status), (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_status));
- LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_last_xid) == (int)offsetof(struct ptlrpc_body_v2, pb_last_xid), "%d != %d\n",
- (int)offsetof(struct ptlrpc_body_v3, pb_last_xid), (int)offsetof(struct ptlrpc_body_v2, pb_last_xid));
- LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_last_xid) == (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_last_xid), "%d != %d\n",
- (int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_last_xid), (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_last_xid));
- LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_last_seen) == (int)offsetof(struct ptlrpc_body_v2, pb_last_seen), "%d != %d\n",
- (int)offsetof(struct ptlrpc_body_v3, pb_last_seen), (int)offsetof(struct ptlrpc_body_v2, pb_last_seen));
- LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_last_seen) == (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_last_seen), "%d != %d\n",
- (int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_last_seen), (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_last_seen));
- LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_last_committed) == (int)offsetof(struct ptlrpc_body_v2, pb_last_committed), "%d != %d\n",
- (int)offsetof(struct ptlrpc_body_v3, pb_last_committed), (int)offsetof(struct ptlrpc_body_v2, pb_last_committed));
- LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_last_committed) == (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_last_committed), "%d != %d\n",
- (int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_last_committed), (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_last_committed));
- LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_transno) == (int)offsetof(struct ptlrpc_body_v2, pb_transno), "%d != %d\n",
- (int)offsetof(struct ptlrpc_body_v3, pb_transno), (int)offsetof(struct ptlrpc_body_v2, pb_transno));
- LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_transno) == (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_transno), "%d != %d\n",
- (int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_transno), (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_transno));
- LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_flags) == (int)offsetof(struct ptlrpc_body_v2, pb_flags), "%d != %d\n",
- (int)offsetof(struct ptlrpc_body_v3, pb_flags), (int)offsetof(struct ptlrpc_body_v2, pb_flags));
- LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_flags) == (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_flags), "%d != %d\n",
- (int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_flags), (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_flags));
- LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_op_flags) == (int)offsetof(struct ptlrpc_body_v2, pb_op_flags), "%d != %d\n",
- (int)offsetof(struct ptlrpc_body_v3, pb_op_flags), (int)offsetof(struct ptlrpc_body_v2, pb_op_flags));
- LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_op_flags) == (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_op_flags), "%d != %d\n",
- (int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_op_flags), (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_op_flags));
- LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_conn_cnt) == (int)offsetof(struct ptlrpc_body_v2, pb_conn_cnt), "%d != %d\n",
- (int)offsetof(struct ptlrpc_body_v3, pb_conn_cnt), (int)offsetof(struct ptlrpc_body_v2, pb_conn_cnt));
- LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_conn_cnt) == (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_conn_cnt), "%d != %d\n",
- (int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_conn_cnt), (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_conn_cnt));
- LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_timeout) == (int)offsetof(struct ptlrpc_body_v2, pb_timeout), "%d != %d\n",
- (int)offsetof(struct ptlrpc_body_v3, pb_timeout), (int)offsetof(struct ptlrpc_body_v2, pb_timeout));
- LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_timeout) == (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_timeout), "%d != %d\n",
- (int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_timeout), (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_timeout));
- LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_service_time) == (int)offsetof(struct ptlrpc_body_v2, pb_service_time), "%d != %d\n",
- (int)offsetof(struct ptlrpc_body_v3, pb_service_time), (int)offsetof(struct ptlrpc_body_v2, pb_service_time));
- LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_service_time) == (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_service_time), "%d != %d\n",
- (int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_service_time), (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_service_time));
- LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_limit) == (int)offsetof(struct ptlrpc_body_v2, pb_limit), "%d != %d\n",
- (int)offsetof(struct ptlrpc_body_v3, pb_limit), (int)offsetof(struct ptlrpc_body_v2, pb_limit));
- LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_limit) == (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_limit), "%d != %d\n",
- (int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_limit), (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_limit));
- LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_slv) == (int)offsetof(struct ptlrpc_body_v2, pb_slv), "%d != %d\n",
- (int)offsetof(struct ptlrpc_body_v3, pb_slv), (int)offsetof(struct ptlrpc_body_v2, pb_slv));
- LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_slv) == (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_slv), "%d != %d\n",
- (int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_slv), (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_slv));
- LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_pre_versions) == (int)offsetof(struct ptlrpc_body_v2, pb_pre_versions), "%d != %d\n",
- (int)offsetof(struct ptlrpc_body_v3, pb_pre_versions), (int)offsetof(struct ptlrpc_body_v2, pb_pre_versions));
- LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_pre_versions) == (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_pre_versions), "%d != %d\n",
- (int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_pre_versions), (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_pre_versions));
- LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_padding) == (int)offsetof(struct ptlrpc_body_v2, pb_padding), "%d != %d\n",
- (int)offsetof(struct ptlrpc_body_v3, pb_padding), (int)offsetof(struct ptlrpc_body_v2, pb_padding));
- LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_padding) == (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_padding), "%d != %d\n",
- (int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_padding), (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_padding));
- LASSERTF(MSG_PTLRPC_BODY_OFF == 0, "found %lld\n",
- (long long)MSG_PTLRPC_BODY_OFF);
- LASSERTF(REQ_REC_OFF == 1, "found %lld\n",
- (long long)REQ_REC_OFF);
- LASSERTF(REPLY_REC_OFF == 1, "found %lld\n",
- (long long)REPLY_REC_OFF);
- LASSERTF(DLM_LOCKREQ_OFF == 1, "found %lld\n",
- (long long)DLM_LOCKREQ_OFF);
- LASSERTF(DLM_REQ_REC_OFF == 2, "found %lld\n",
- (long long)DLM_REQ_REC_OFF);
- LASSERTF(DLM_INTENT_IT_OFF == 2, "found %lld\n",
- (long long)DLM_INTENT_IT_OFF);
- LASSERTF(DLM_INTENT_REC_OFF == 3, "found %lld\n",
- (long long)DLM_INTENT_REC_OFF);
- LASSERTF(DLM_LOCKREPLY_OFF == 1, "found %lld\n",
- (long long)DLM_LOCKREPLY_OFF);
- LASSERTF(DLM_REPLY_REC_OFF == 2, "found %lld\n",
- (long long)DLM_REPLY_REC_OFF);
- LASSERTF(MSG_PTLRPC_HEADER_OFF == 31, "found %lld\n",
- (long long)MSG_PTLRPC_HEADER_OFF);
- LASSERTF(PTLRPC_MSG_VERSION == 0x00000003, "found 0x%.8x\n",
- PTLRPC_MSG_VERSION);
- LASSERTF(LUSTRE_VERSION_MASK == 0xffff0000, "found 0x%.8x\n",
- LUSTRE_VERSION_MASK);
- LASSERTF(LUSTRE_OBD_VERSION == 0x00010000, "found 0x%.8x\n",
- LUSTRE_OBD_VERSION);
- LASSERTF(LUSTRE_MDS_VERSION == 0x00020000, "found 0x%.8x\n",
- LUSTRE_MDS_VERSION);
- LASSERTF(LUSTRE_OST_VERSION == 0x00030000, "found 0x%.8x\n",
- LUSTRE_OST_VERSION);
- LASSERTF(LUSTRE_DLM_VERSION == 0x00040000, "found 0x%.8x\n",
- LUSTRE_DLM_VERSION);
- LASSERTF(LUSTRE_LOG_VERSION == 0x00050000, "found 0x%.8x\n",
- LUSTRE_LOG_VERSION);
- LASSERTF(LUSTRE_MGS_VERSION == 0x00060000, "found 0x%.8x\n",
- LUSTRE_MGS_VERSION);
- LASSERTF(MSGHDR_AT_SUPPORT == 1, "found %lld\n",
- (long long)MSGHDR_AT_SUPPORT);
- LASSERTF(MSGHDR_CKSUM_INCOMPAT18 == 2, "found %lld\n",
- (long long)MSGHDR_CKSUM_INCOMPAT18);
- LASSERTF(MSG_OP_FLAG_MASK == 0xffff0000UL, "found 0x%.8xUL\n",
- (unsigned)MSG_OP_FLAG_MASK);
- LASSERTF(MSG_OP_FLAG_SHIFT == 16, "found %lld\n",
- (long long)MSG_OP_FLAG_SHIFT);
- LASSERTF(MSG_GEN_FLAG_MASK == 0x0000ffffUL, "found 0x%.8xUL\n",
- (unsigned)MSG_GEN_FLAG_MASK);
- LASSERTF(MSG_LAST_REPLAY == 0x00000001UL, "found 0x%.8xUL\n",
- (unsigned)MSG_LAST_REPLAY);
- LASSERTF(MSG_RESENT == 0x00000002UL, "found 0x%.8xUL\n",
- (unsigned)MSG_RESENT);
- LASSERTF(MSG_REPLAY == 0x00000004UL, "found 0x%.8xUL\n",
- (unsigned)MSG_REPLAY);
- LASSERTF(MSG_DELAY_REPLAY == 0x00000010UL, "found 0x%.8xUL\n",
- (unsigned)MSG_DELAY_REPLAY);
- LASSERTF(MSG_VERSION_REPLAY == 0x00000020UL, "found 0x%.8xUL\n",
- (unsigned)MSG_VERSION_REPLAY);
- LASSERTF(MSG_REQ_REPLAY_DONE == 0x00000040UL, "found 0x%.8xUL\n",
- (unsigned)MSG_REQ_REPLAY_DONE);
- LASSERTF(MSG_LOCK_REPLAY_DONE == 0x00000080UL, "found 0x%.8xUL\n",
- (unsigned)MSG_LOCK_REPLAY_DONE);
- LASSERTF(MSG_CONNECT_RECOVERING == 0x00000001UL, "found 0x%.8xUL\n",
- (unsigned)MSG_CONNECT_RECOVERING);
- LASSERTF(MSG_CONNECT_RECONNECT == 0x00000002UL, "found 0x%.8xUL\n",
- (unsigned)MSG_CONNECT_RECONNECT);
- LASSERTF(MSG_CONNECT_REPLAYABLE == 0x00000004UL, "found 0x%.8xUL\n",
- (unsigned)MSG_CONNECT_REPLAYABLE);
- LASSERTF(MSG_CONNECT_LIBCLIENT == 0x00000010UL, "found 0x%.8xUL\n",
- (unsigned)MSG_CONNECT_LIBCLIENT);
- LASSERTF(MSG_CONNECT_INITIAL == 0x00000020UL, "found 0x%.8xUL\n",
- (unsigned)MSG_CONNECT_INITIAL);
- LASSERTF(MSG_CONNECT_ASYNC == 0x00000040UL, "found 0x%.8xUL\n",
- (unsigned)MSG_CONNECT_ASYNC);
- LASSERTF(MSG_CONNECT_NEXT_VER == 0x00000080UL, "found 0x%.8xUL\n",
- (unsigned)MSG_CONNECT_NEXT_VER);
- LASSERTF(MSG_CONNECT_TRANSNO == 0x00000100UL, "found 0x%.8xUL\n",
- (unsigned)MSG_CONNECT_TRANSNO);
-
- /* Checks for struct obd_connect_data */
- LASSERTF((int)sizeof(struct obd_connect_data) == 192, "found %lld\n",
- (long long)(int)sizeof(struct obd_connect_data));
- LASSERTF((int)offsetof(struct obd_connect_data, ocd_connect_flags) == 0, "found %lld\n",
- (long long)(int)offsetof(struct obd_connect_data, ocd_connect_flags));
- LASSERTF((int)sizeof(((struct obd_connect_data *)0)->ocd_connect_flags) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct obd_connect_data *)0)->ocd_connect_flags));
- LASSERTF((int)offsetof(struct obd_connect_data, ocd_version) == 8, "found %lld\n",
- (long long)(int)offsetof(struct obd_connect_data, ocd_version));
- LASSERTF((int)sizeof(((struct obd_connect_data *)0)->ocd_version) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct obd_connect_data *)0)->ocd_version));
- LASSERTF((int)offsetof(struct obd_connect_data, ocd_grant) == 12, "found %lld\n",
- (long long)(int)offsetof(struct obd_connect_data, ocd_grant));
- LASSERTF((int)sizeof(((struct obd_connect_data *)0)->ocd_grant) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct obd_connect_data *)0)->ocd_grant));
- LASSERTF((int)offsetof(struct obd_connect_data, ocd_index) == 16, "found %lld\n",
- (long long)(int)offsetof(struct obd_connect_data, ocd_index));
- LASSERTF((int)sizeof(((struct obd_connect_data *)0)->ocd_index) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct obd_connect_data *)0)->ocd_index));
- LASSERTF((int)offsetof(struct obd_connect_data, ocd_brw_size) == 20, "found %lld\n",
- (long long)(int)offsetof(struct obd_connect_data, ocd_brw_size));
- LASSERTF((int)sizeof(((struct obd_connect_data *)0)->ocd_brw_size) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct obd_connect_data *)0)->ocd_brw_size));
- LASSERTF((int)offsetof(struct obd_connect_data, ocd_ibits_known) == 24, "found %lld\n",
- (long long)(int)offsetof(struct obd_connect_data, ocd_ibits_known));
- LASSERTF((int)sizeof(((struct obd_connect_data *)0)->ocd_ibits_known) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct obd_connect_data *)0)->ocd_ibits_known));
- LASSERTF((int)offsetof(struct obd_connect_data, ocd_blocksize) == 32, "found %lld\n",
- (long long)(int)offsetof(struct obd_connect_data, ocd_blocksize));
- LASSERTF((int)sizeof(((struct obd_connect_data *)0)->ocd_blocksize) == 1, "found %lld\n",
- (long long)(int)sizeof(((struct obd_connect_data *)0)->ocd_blocksize));
- LASSERTF((int)offsetof(struct obd_connect_data, ocd_inodespace) == 33, "found %lld\n",
- (long long)(int)offsetof(struct obd_connect_data, ocd_inodespace));
- LASSERTF((int)sizeof(((struct obd_connect_data *)0)->ocd_inodespace) == 1, "found %lld\n",
- (long long)(int)sizeof(((struct obd_connect_data *)0)->ocd_inodespace));
- LASSERTF((int)offsetof(struct obd_connect_data, ocd_grant_extent) == 34, "found %lld\n",
- (long long)(int)offsetof(struct obd_connect_data, ocd_grant_extent));
- LASSERTF((int)sizeof(((struct obd_connect_data *)0)->ocd_grant_extent) == 2, "found %lld\n",
- (long long)(int)sizeof(((struct obd_connect_data *)0)->ocd_grant_extent));
- LASSERTF((int)offsetof(struct obd_connect_data, ocd_unused) == 36, "found %lld\n",
- (long long)(int)offsetof(struct obd_connect_data, ocd_unused));
- LASSERTF((int)sizeof(((struct obd_connect_data *)0)->ocd_unused) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct obd_connect_data *)0)->ocd_unused));
- LASSERTF((int)offsetof(struct obd_connect_data, ocd_transno) == 40, "found %lld\n",
- (long long)(int)offsetof(struct obd_connect_data, ocd_transno));
- LASSERTF((int)sizeof(((struct obd_connect_data *)0)->ocd_transno) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct obd_connect_data *)0)->ocd_transno));
- LASSERTF((int)offsetof(struct obd_connect_data, ocd_group) == 48, "found %lld\n",
- (long long)(int)offsetof(struct obd_connect_data, ocd_group));
- LASSERTF((int)sizeof(((struct obd_connect_data *)0)->ocd_group) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct obd_connect_data *)0)->ocd_group));
- LASSERTF((int)offsetof(struct obd_connect_data, ocd_cksum_types) == 52, "found %lld\n",
- (long long)(int)offsetof(struct obd_connect_data, ocd_cksum_types));
- LASSERTF((int)sizeof(((struct obd_connect_data *)0)->ocd_cksum_types) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct obd_connect_data *)0)->ocd_cksum_types));
- LASSERTF((int)offsetof(struct obd_connect_data, ocd_max_easize) == 56, "found %lld\n",
- (long long)(int)offsetof(struct obd_connect_data, ocd_max_easize));
- LASSERTF((int)sizeof(((struct obd_connect_data *)0)->ocd_max_easize) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct obd_connect_data *)0)->ocd_max_easize));
- LASSERTF((int)offsetof(struct obd_connect_data, ocd_instance) == 60, "found %lld\n",
- (long long)(int)offsetof(struct obd_connect_data, ocd_instance));
- LASSERTF((int)sizeof(((struct obd_connect_data *)0)->ocd_instance) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct obd_connect_data *)0)->ocd_instance));
- LASSERTF((int)offsetof(struct obd_connect_data, ocd_maxbytes) == 64, "found %lld\n",
- (long long)(int)offsetof(struct obd_connect_data, ocd_maxbytes));
- LASSERTF((int)sizeof(((struct obd_connect_data *)0)->ocd_maxbytes) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct obd_connect_data *)0)->ocd_maxbytes));
- LASSERTF((int)offsetof(struct obd_connect_data, padding1) == 72, "found %lld\n",
- (long long)(int)offsetof(struct obd_connect_data, padding1));
- LASSERTF((int)sizeof(((struct obd_connect_data *)0)->padding1) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct obd_connect_data *)0)->padding1));
- LASSERTF((int)offsetof(struct obd_connect_data, padding2) == 80, "found %lld\n",
- (long long)(int)offsetof(struct obd_connect_data, padding2));
- LASSERTF((int)sizeof(((struct obd_connect_data *)0)->padding2) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct obd_connect_data *)0)->padding2));
- LASSERTF((int)offsetof(struct obd_connect_data, padding3) == 88, "found %lld\n",
- (long long)(int)offsetof(struct obd_connect_data, padding3));
- LASSERTF((int)sizeof(((struct obd_connect_data *)0)->padding3) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct obd_connect_data *)0)->padding3));
- LASSERTF((int)offsetof(struct obd_connect_data, padding4) == 96, "found %lld\n",
- (long long)(int)offsetof(struct obd_connect_data, padding4));
- LASSERTF((int)sizeof(((struct obd_connect_data *)0)->padding4) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct obd_connect_data *)0)->padding4));
- LASSERTF((int)offsetof(struct obd_connect_data, padding5) == 104, "found %lld\n",
- (long long)(int)offsetof(struct obd_connect_data, padding5));
- LASSERTF((int)sizeof(((struct obd_connect_data *)0)->padding5) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct obd_connect_data *)0)->padding5));
- LASSERTF((int)offsetof(struct obd_connect_data, padding6) == 112, "found %lld\n",
- (long long)(int)offsetof(struct obd_connect_data, padding6));
- LASSERTF((int)sizeof(((struct obd_connect_data *)0)->padding6) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct obd_connect_data *)0)->padding6));
- LASSERTF((int)offsetof(struct obd_connect_data, padding7) == 120, "found %lld\n",
- (long long)(int)offsetof(struct obd_connect_data, padding7));
- LASSERTF((int)sizeof(((struct obd_connect_data *)0)->padding7) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct obd_connect_data *)0)->padding7));
- LASSERTF((int)offsetof(struct obd_connect_data, padding8) == 128, "found %lld\n",
- (long long)(int)offsetof(struct obd_connect_data, padding8));
- LASSERTF((int)sizeof(((struct obd_connect_data *)0)->padding8) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct obd_connect_data *)0)->padding8));
- LASSERTF((int)offsetof(struct obd_connect_data, padding9) == 136, "found %lld\n",
- (long long)(int)offsetof(struct obd_connect_data, padding9));
- LASSERTF((int)sizeof(((struct obd_connect_data *)0)->padding9) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct obd_connect_data *)0)->padding9));
- LASSERTF((int)offsetof(struct obd_connect_data, paddingA) == 144, "found %lld\n",
- (long long)(int)offsetof(struct obd_connect_data, paddingA));
- LASSERTF((int)sizeof(((struct obd_connect_data *)0)->paddingA) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct obd_connect_data *)0)->paddingA));
- LASSERTF((int)offsetof(struct obd_connect_data, paddingB) == 152, "found %lld\n",
- (long long)(int)offsetof(struct obd_connect_data, paddingB));
- LASSERTF((int)sizeof(((struct obd_connect_data *)0)->paddingB) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct obd_connect_data *)0)->paddingB));
- LASSERTF((int)offsetof(struct obd_connect_data, paddingC) == 160, "found %lld\n",
- (long long)(int)offsetof(struct obd_connect_data, paddingC));
- LASSERTF((int)sizeof(((struct obd_connect_data *)0)->paddingC) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct obd_connect_data *)0)->paddingC));
- LASSERTF((int)offsetof(struct obd_connect_data, paddingD) == 168, "found %lld\n",
- (long long)(int)offsetof(struct obd_connect_data, paddingD));
- LASSERTF((int)sizeof(((struct obd_connect_data *)0)->paddingD) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct obd_connect_data *)0)->paddingD));
- LASSERTF((int)offsetof(struct obd_connect_data, paddingE) == 176, "found %lld\n",
- (long long)(int)offsetof(struct obd_connect_data, paddingE));
- LASSERTF((int)sizeof(((struct obd_connect_data *)0)->paddingE) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct obd_connect_data *)0)->paddingE));
- LASSERTF((int)offsetof(struct obd_connect_data, paddingF) == 184, "found %lld\n",
- (long long)(int)offsetof(struct obd_connect_data, paddingF));
- LASSERTF((int)sizeof(((struct obd_connect_data *)0)->paddingF) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct obd_connect_data *)0)->paddingF));
- LASSERTF(OBD_CONNECT_RDONLY == 0x1ULL, "found 0x%.16llxULL\n",
- OBD_CONNECT_RDONLY);
- LASSERTF(OBD_CONNECT_INDEX == 0x2ULL, "found 0x%.16llxULL\n",
- OBD_CONNECT_INDEX);
- LASSERTF(OBD_CONNECT_MDS == 0x4ULL, "found 0x%.16llxULL\n",
- OBD_CONNECT_MDS);
- LASSERTF(OBD_CONNECT_GRANT == 0x8ULL, "found 0x%.16llxULL\n",
- OBD_CONNECT_GRANT);
- LASSERTF(OBD_CONNECT_SRVLOCK == 0x10ULL, "found 0x%.16llxULL\n",
- OBD_CONNECT_SRVLOCK);
- LASSERTF(OBD_CONNECT_VERSION == 0x20ULL, "found 0x%.16llxULL\n",
- OBD_CONNECT_VERSION);
- LASSERTF(OBD_CONNECT_REQPORTAL == 0x40ULL, "found 0x%.16llxULL\n",
- OBD_CONNECT_REQPORTAL);
- LASSERTF(OBD_CONNECT_ACL == 0x80ULL, "found 0x%.16llxULL\n",
- OBD_CONNECT_ACL);
- LASSERTF(OBD_CONNECT_XATTR == 0x100ULL, "found 0x%.16llxULL\n",
- OBD_CONNECT_XATTR);
- LASSERTF(OBD_CONNECT_CROW == 0x200ULL, "found 0x%.16llxULL\n",
- OBD_CONNECT_CROW);
- LASSERTF(OBD_CONNECT_TRUNCLOCK == 0x400ULL, "found 0x%.16llxULL\n",
- OBD_CONNECT_TRUNCLOCK);
- LASSERTF(OBD_CONNECT_TRANSNO == 0x800ULL, "found 0x%.16llxULL\n",
- OBD_CONNECT_TRANSNO);
- LASSERTF(OBD_CONNECT_IBITS == 0x1000ULL, "found 0x%.16llxULL\n",
- OBD_CONNECT_IBITS);
- LASSERTF(OBD_CONNECT_JOIN == 0x2000ULL, "found 0x%.16llxULL\n",
- OBD_CONNECT_JOIN);
- LASSERTF(OBD_CONNECT_ATTRFID == 0x4000ULL, "found 0x%.16llxULL\n",
- OBD_CONNECT_ATTRFID);
- LASSERTF(OBD_CONNECT_NODEVOH == 0x8000ULL, "found 0x%.16llxULL\n",
- OBD_CONNECT_NODEVOH);
- LASSERTF(OBD_CONNECT_RMT_CLIENT == 0x10000ULL, "found 0x%.16llxULL\n",
- OBD_CONNECT_RMT_CLIENT);
- LASSERTF(OBD_CONNECT_RMT_CLIENT_FORCE == 0x20000ULL, "found 0x%.16llxULL\n",
- OBD_CONNECT_RMT_CLIENT_FORCE);
- LASSERTF(OBD_CONNECT_BRW_SIZE == 0x40000ULL, "found 0x%.16llxULL\n",
- OBD_CONNECT_BRW_SIZE);
- LASSERTF(OBD_CONNECT_QUOTA64 == 0x80000ULL, "found 0x%.16llxULL\n",
- OBD_CONNECT_QUOTA64);
- LASSERTF(OBD_CONNECT_MDS_CAPA == 0x100000ULL, "found 0x%.16llxULL\n",
- OBD_CONNECT_MDS_CAPA);
- LASSERTF(OBD_CONNECT_OSS_CAPA == 0x200000ULL, "found 0x%.16llxULL\n",
- OBD_CONNECT_OSS_CAPA);
- LASSERTF(OBD_CONNECT_CANCELSET == 0x400000ULL, "found 0x%.16llxULL\n",
- OBD_CONNECT_CANCELSET);
- LASSERTF(OBD_CONNECT_SOM == 0x800000ULL, "found 0x%.16llxULL\n",
- OBD_CONNECT_SOM);
- LASSERTF(OBD_CONNECT_AT == 0x1000000ULL, "found 0x%.16llxULL\n",
- OBD_CONNECT_AT);
- LASSERTF(OBD_CONNECT_LRU_RESIZE == 0x2000000ULL, "found 0x%.16llxULL\n",
- OBD_CONNECT_LRU_RESIZE);
- LASSERTF(OBD_CONNECT_MDS_MDS == 0x4000000ULL, "found 0x%.16llxULL\n",
- OBD_CONNECT_MDS_MDS);
- LASSERTF(OBD_CONNECT_REAL == 0x8000000ULL, "found 0x%.16llxULL\n",
- OBD_CONNECT_REAL);
- LASSERTF(OBD_CONNECT_CHANGE_QS == 0x10000000ULL, "found 0x%.16llxULL\n",
- OBD_CONNECT_CHANGE_QS);
- LASSERTF(OBD_CONNECT_CKSUM == 0x20000000ULL, "found 0x%.16llxULL\n",
- OBD_CONNECT_CKSUM);
- LASSERTF(OBD_CONNECT_FID == 0x40000000ULL, "found 0x%.16llxULL\n",
- OBD_CONNECT_FID);
- LASSERTF(OBD_CONNECT_VBR == 0x80000000ULL, "found 0x%.16llxULL\n",
- OBD_CONNECT_VBR);
- LASSERTF(OBD_CONNECT_LOV_V3 == 0x100000000ULL, "found 0x%.16llxULL\n",
- OBD_CONNECT_LOV_V3);
- LASSERTF(OBD_CONNECT_GRANT_SHRINK == 0x200000000ULL, "found 0x%.16llxULL\n",
- OBD_CONNECT_GRANT_SHRINK);
- LASSERTF(OBD_CONNECT_SKIP_ORPHAN == 0x400000000ULL, "found 0x%.16llxULL\n",
- OBD_CONNECT_SKIP_ORPHAN);
- LASSERTF(OBD_CONNECT_MAX_EASIZE == 0x800000000ULL, "found 0x%.16llxULL\n",
- OBD_CONNECT_MAX_EASIZE);
- LASSERTF(OBD_CONNECT_FULL20 == 0x1000000000ULL, "found 0x%.16llxULL\n",
- OBD_CONNECT_FULL20);
- LASSERTF(OBD_CONNECT_LAYOUTLOCK == 0x2000000000ULL, "found 0x%.16llxULL\n",
- OBD_CONNECT_LAYOUTLOCK);
- LASSERTF(OBD_CONNECT_64BITHASH == 0x4000000000ULL, "found 0x%.16llxULL\n",
- OBD_CONNECT_64BITHASH);
- LASSERTF(OBD_CONNECT_MAXBYTES == 0x8000000000ULL, "found 0x%.16llxULL\n",
- OBD_CONNECT_MAXBYTES);
- LASSERTF(OBD_CONNECT_IMP_RECOV == 0x10000000000ULL, "found 0x%.16llxULL\n",
- OBD_CONNECT_IMP_RECOV);
- LASSERTF(OBD_CONNECT_JOBSTATS == 0x20000000000ULL, "found 0x%.16llxULL\n",
- OBD_CONNECT_JOBSTATS);
- LASSERTF(OBD_CONNECT_UMASK == 0x40000000000ULL, "found 0x%.16llxULL\n",
- OBD_CONNECT_UMASK);
- LASSERTF(OBD_CONNECT_EINPROGRESS == 0x80000000000ULL, "found 0x%.16llxULL\n",
- OBD_CONNECT_EINPROGRESS);
- LASSERTF(OBD_CONNECT_GRANT_PARAM == 0x100000000000ULL, "found 0x%.16llxULL\n",
- OBD_CONNECT_GRANT_PARAM);
- LASSERTF(OBD_CONNECT_FLOCK_OWNER == 0x200000000000ULL, "found 0x%.16llxULL\n",
- OBD_CONNECT_FLOCK_OWNER);
- LASSERTF(OBD_CONNECT_LVB_TYPE == 0x400000000000ULL, "found 0x%.16llxULL\n",
- OBD_CONNECT_LVB_TYPE);
- LASSERTF(OBD_CONNECT_NANOSEC_TIME == 0x800000000000ULL, "found 0x%.16llxULL\n",
- OBD_CONNECT_NANOSEC_TIME);
- LASSERTF(OBD_CONNECT_LIGHTWEIGHT == 0x1000000000000ULL, "found 0x%.16llxULL\n",
- OBD_CONNECT_LIGHTWEIGHT);
- LASSERTF(OBD_CONNECT_SHORTIO == 0x2000000000000ULL, "found 0x%.16llxULL\n",
- OBD_CONNECT_SHORTIO);
- LASSERTF(OBD_CONNECT_PINGLESS == 0x4000000000000ULL, "found 0x%.16llxULL\n",
- OBD_CONNECT_PINGLESS);
- LASSERTF(OBD_CONNECT_FLOCK_DEAD == 0x8000000000000ULL,
- "found 0x%.16llxULL\n", OBD_CONNECT_FLOCK_DEAD);
- LASSERTF(OBD_CKSUM_CRC32 == 0x00000001UL, "found 0x%.8xUL\n",
- (unsigned)OBD_CKSUM_CRC32);
- LASSERTF(OBD_CKSUM_ADLER == 0x00000002UL, "found 0x%.8xUL\n",
- (unsigned)OBD_CKSUM_ADLER);
- LASSERTF(OBD_CKSUM_CRC32C == 0x00000004UL, "found 0x%.8xUL\n",
- (unsigned)OBD_CKSUM_CRC32C);
-
- /* Checks for struct obdo */
- LASSERTF((int)sizeof(struct obdo) == 208, "found %lld\n",
- (long long)(int)sizeof(struct obdo));
- LASSERTF((int)offsetof(struct obdo, o_valid) == 0, "found %lld\n",
- (long long)(int)offsetof(struct obdo, o_valid));
- LASSERTF((int)sizeof(((struct obdo *)0)->o_valid) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct obdo *)0)->o_valid));
- LASSERTF((int)offsetof(struct obdo, o_oi) == 8, "found %lld\n",
- (long long)(int)offsetof(struct obdo, o_oi));
- LASSERTF((int)sizeof(((struct obdo *)0)->o_oi) == 16, "found %lld\n",
- (long long)(int)sizeof(((struct obdo *)0)->o_oi));
- LASSERTF((int)offsetof(struct obdo, o_parent_seq) == 24, "found %lld\n",
- (long long)(int)offsetof(struct obdo, o_parent_seq));
- LASSERTF((int)sizeof(((struct obdo *)0)->o_parent_seq) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct obdo *)0)->o_parent_seq));
- LASSERTF((int)offsetof(struct obdo, o_size) == 32, "found %lld\n",
- (long long)(int)offsetof(struct obdo, o_size));
- LASSERTF((int)sizeof(((struct obdo *)0)->o_size) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct obdo *)0)->o_size));
- LASSERTF((int)offsetof(struct obdo, o_mtime) == 40, "found %lld\n",
- (long long)(int)offsetof(struct obdo, o_mtime));
- LASSERTF((int)sizeof(((struct obdo *)0)->o_mtime) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct obdo *)0)->o_mtime));
- LASSERTF((int)offsetof(struct obdo, o_atime) == 48, "found %lld\n",
- (long long)(int)offsetof(struct obdo, o_atime));
- LASSERTF((int)sizeof(((struct obdo *)0)->o_atime) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct obdo *)0)->o_atime));
- LASSERTF((int)offsetof(struct obdo, o_ctime) == 56, "found %lld\n",
- (long long)(int)offsetof(struct obdo, o_ctime));
- LASSERTF((int)sizeof(((struct obdo *)0)->o_ctime) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct obdo *)0)->o_ctime));
- LASSERTF((int)offsetof(struct obdo, o_blocks) == 64, "found %lld\n",
- (long long)(int)offsetof(struct obdo, o_blocks));
- LASSERTF((int)sizeof(((struct obdo *)0)->o_blocks) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct obdo *)0)->o_blocks));
- LASSERTF((int)offsetof(struct obdo, o_grant) == 72, "found %lld\n",
- (long long)(int)offsetof(struct obdo, o_grant));
- LASSERTF((int)sizeof(((struct obdo *)0)->o_grant) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct obdo *)0)->o_grant));
- LASSERTF((int)offsetof(struct obdo, o_blksize) == 80, "found %lld\n",
- (long long)(int)offsetof(struct obdo, o_blksize));
- LASSERTF((int)sizeof(((struct obdo *)0)->o_blksize) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct obdo *)0)->o_blksize));
- LASSERTF((int)offsetof(struct obdo, o_mode) == 84, "found %lld\n",
- (long long)(int)offsetof(struct obdo, o_mode));
- LASSERTF((int)sizeof(((struct obdo *)0)->o_mode) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct obdo *)0)->o_mode));
- LASSERTF((int)offsetof(struct obdo, o_uid) == 88, "found %lld\n",
- (long long)(int)offsetof(struct obdo, o_uid));
- LASSERTF((int)sizeof(((struct obdo *)0)->o_uid) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct obdo *)0)->o_uid));
- LASSERTF((int)offsetof(struct obdo, o_gid) == 92, "found %lld\n",
- (long long)(int)offsetof(struct obdo, o_gid));
- LASSERTF((int)sizeof(((struct obdo *)0)->o_gid) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct obdo *)0)->o_gid));
- LASSERTF((int)offsetof(struct obdo, o_flags) == 96, "found %lld\n",
- (long long)(int)offsetof(struct obdo, o_flags));
- LASSERTF((int)sizeof(((struct obdo *)0)->o_flags) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct obdo *)0)->o_flags));
- LASSERTF((int)offsetof(struct obdo, o_nlink) == 100, "found %lld\n",
- (long long)(int)offsetof(struct obdo, o_nlink));
- LASSERTF((int)sizeof(((struct obdo *)0)->o_nlink) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct obdo *)0)->o_nlink));
- LASSERTF((int)offsetof(struct obdo, o_parent_oid) == 104, "found %lld\n",
- (long long)(int)offsetof(struct obdo, o_parent_oid));
- LASSERTF((int)sizeof(((struct obdo *)0)->o_parent_oid) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct obdo *)0)->o_parent_oid));
- LASSERTF((int)offsetof(struct obdo, o_misc) == 108, "found %lld\n",
- (long long)(int)offsetof(struct obdo, o_misc));
- LASSERTF((int)sizeof(((struct obdo *)0)->o_misc) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct obdo *)0)->o_misc));
- LASSERTF((int)offsetof(struct obdo, o_ioepoch) == 112, "found %lld\n",
- (long long)(int)offsetof(struct obdo, o_ioepoch));
- LASSERTF((int)sizeof(((struct obdo *)0)->o_ioepoch) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct obdo *)0)->o_ioepoch));
- LASSERTF((int)offsetof(struct obdo, o_stripe_idx) == 120, "found %lld\n",
- (long long)(int)offsetof(struct obdo, o_stripe_idx));
- LASSERTF((int)sizeof(((struct obdo *)0)->o_stripe_idx) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct obdo *)0)->o_stripe_idx));
- LASSERTF((int)offsetof(struct obdo, o_parent_ver) == 124, "found %lld\n",
- (long long)(int)offsetof(struct obdo, o_parent_ver));
- LASSERTF((int)sizeof(((struct obdo *)0)->o_parent_ver) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct obdo *)0)->o_parent_ver));
- LASSERTF((int)offsetof(struct obdo, o_handle) == 128, "found %lld\n",
- (long long)(int)offsetof(struct obdo, o_handle));
- LASSERTF((int)sizeof(((struct obdo *)0)->o_handle) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct obdo *)0)->o_handle));
- LASSERTF((int)offsetof(struct obdo, o_lcookie) == 136, "found %lld\n",
- (long long)(int)offsetof(struct obdo, o_lcookie));
- LASSERTF((int)sizeof(((struct obdo *)0)->o_lcookie) == 32, "found %lld\n",
- (long long)(int)sizeof(((struct obdo *)0)->o_lcookie));
- LASSERTF((int)offsetof(struct obdo, o_uid_h) == 168, "found %lld\n",
- (long long)(int)offsetof(struct obdo, o_uid_h));
- LASSERTF((int)sizeof(((struct obdo *)0)->o_uid_h) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct obdo *)0)->o_uid_h));
- LASSERTF((int)offsetof(struct obdo, o_gid_h) == 172, "found %lld\n",
- (long long)(int)offsetof(struct obdo, o_gid_h));
- LASSERTF((int)sizeof(((struct obdo *)0)->o_gid_h) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct obdo *)0)->o_gid_h));
- LASSERTF((int)offsetof(struct obdo, o_data_version) == 176, "found %lld\n",
- (long long)(int)offsetof(struct obdo, o_data_version));
- LASSERTF((int)sizeof(((struct obdo *)0)->o_data_version) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct obdo *)0)->o_data_version));
- LASSERTF((int)offsetof(struct obdo, o_padding_4) == 184, "found %lld\n",
- (long long)(int)offsetof(struct obdo, o_padding_4));
- LASSERTF((int)sizeof(((struct obdo *)0)->o_padding_4) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct obdo *)0)->o_padding_4));
- LASSERTF((int)offsetof(struct obdo, o_padding_5) == 192, "found %lld\n",
- (long long)(int)offsetof(struct obdo, o_padding_5));
- LASSERTF((int)sizeof(((struct obdo *)0)->o_padding_5) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct obdo *)0)->o_padding_5));
- LASSERTF((int)offsetof(struct obdo, o_padding_6) == 200, "found %lld\n",
- (long long)(int)offsetof(struct obdo, o_padding_6));
- LASSERTF((int)sizeof(((struct obdo *)0)->o_padding_6) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct obdo *)0)->o_padding_6));
- LASSERTF(OBD_MD_FLID == (0x00000001ULL), "found 0x%.16llxULL\n",
- OBD_MD_FLID);
- LASSERTF(OBD_MD_FLATIME == (0x00000002ULL), "found 0x%.16llxULL\n",
- OBD_MD_FLATIME);
- LASSERTF(OBD_MD_FLMTIME == (0x00000004ULL), "found 0x%.16llxULL\n",
- OBD_MD_FLMTIME);
- LASSERTF(OBD_MD_FLCTIME == (0x00000008ULL), "found 0x%.16llxULL\n",
- OBD_MD_FLCTIME);
- LASSERTF(OBD_MD_FLSIZE == (0x00000010ULL), "found 0x%.16llxULL\n",
- OBD_MD_FLSIZE);
- LASSERTF(OBD_MD_FLBLOCKS == (0x00000020ULL), "found 0x%.16llxULL\n",
- OBD_MD_FLBLOCKS);
- LASSERTF(OBD_MD_FLBLKSZ == (0x00000040ULL), "found 0x%.16llxULL\n",
- OBD_MD_FLBLKSZ);
- LASSERTF(OBD_MD_FLMODE == (0x00000080ULL), "found 0x%.16llxULL\n",
- OBD_MD_FLMODE);
- LASSERTF(OBD_MD_FLTYPE == (0x00000100ULL), "found 0x%.16llxULL\n",
- OBD_MD_FLTYPE);
- LASSERTF(OBD_MD_FLUID == (0x00000200ULL), "found 0x%.16llxULL\n",
- OBD_MD_FLUID);
- LASSERTF(OBD_MD_FLGID == (0x00000400ULL), "found 0x%.16llxULL\n",
- OBD_MD_FLGID);
- LASSERTF(OBD_MD_FLFLAGS == (0x00000800ULL), "found 0x%.16llxULL\n",
- OBD_MD_FLFLAGS);
- LASSERTF(OBD_MD_FLNLINK == (0x00002000ULL), "found 0x%.16llxULL\n",
- OBD_MD_FLNLINK);
- LASSERTF(OBD_MD_FLGENER == (0x00004000ULL), "found 0x%.16llxULL\n",
- OBD_MD_FLGENER);
- LASSERTF(OBD_MD_FLRDEV == (0x00010000ULL), "found 0x%.16llxULL\n",
- OBD_MD_FLRDEV);
- LASSERTF(OBD_MD_FLEASIZE == (0x00020000ULL), "found 0x%.16llxULL\n",
- OBD_MD_FLEASIZE);
- LASSERTF(OBD_MD_LINKNAME == (0x00040000ULL), "found 0x%.16llxULL\n",
- OBD_MD_LINKNAME);
- LASSERTF(OBD_MD_FLHANDLE == (0x00080000ULL), "found 0x%.16llxULL\n",
- OBD_MD_FLHANDLE);
- LASSERTF(OBD_MD_FLCKSUM == (0x00100000ULL), "found 0x%.16llxULL\n",
- OBD_MD_FLCKSUM);
- LASSERTF(OBD_MD_FLQOS == (0x00200000ULL), "found 0x%.16llxULL\n",
- OBD_MD_FLQOS);
- LASSERTF(OBD_MD_FLCOOKIE == (0x00800000ULL), "found 0x%.16llxULL\n",
- OBD_MD_FLCOOKIE);
- LASSERTF(OBD_MD_FLGROUP == (0x01000000ULL), "found 0x%.16llxULL\n",
- OBD_MD_FLGROUP);
- LASSERTF(OBD_MD_FLFID == (0x02000000ULL), "found 0x%.16llxULL\n",
- OBD_MD_FLFID);
- LASSERTF(OBD_MD_FLEPOCH == (0x04000000ULL), "found 0x%.16llxULL\n",
- OBD_MD_FLEPOCH);
- LASSERTF(OBD_MD_FLGRANT == (0x08000000ULL), "found 0x%.16llxULL\n",
- OBD_MD_FLGRANT);
- LASSERTF(OBD_MD_FLDIREA == (0x10000000ULL), "found 0x%.16llxULL\n",
- OBD_MD_FLDIREA);
- LASSERTF(OBD_MD_FLUSRQUOTA == (0x20000000ULL), "found 0x%.16llxULL\n",
- OBD_MD_FLUSRQUOTA);
- LASSERTF(OBD_MD_FLGRPQUOTA == (0x40000000ULL), "found 0x%.16llxULL\n",
- OBD_MD_FLGRPQUOTA);
- LASSERTF(OBD_MD_FLMODEASIZE == (0x80000000ULL), "found 0x%.16llxULL\n",
- OBD_MD_FLMODEASIZE);
- LASSERTF(OBD_MD_MDS == (0x0000000100000000ULL), "found 0x%.16llxULL\n",
- OBD_MD_MDS);
- LASSERTF(OBD_MD_REINT == (0x0000000200000000ULL), "found 0x%.16llxULL\n",
- OBD_MD_REINT);
- LASSERTF(OBD_MD_MEA == (0x0000000400000000ULL), "found 0x%.16llxULL\n",
- OBD_MD_MEA);
- LASSERTF(OBD_MD_TSTATE == (0x0000000800000000ULL),
- "found 0x%.16llxULL\n", OBD_MD_TSTATE);
- LASSERTF(OBD_MD_FLXATTR == (0x0000001000000000ULL), "found 0x%.16llxULL\n",
- OBD_MD_FLXATTR);
- LASSERTF(OBD_MD_FLXATTRLS == (0x0000002000000000ULL), "found 0x%.16llxULL\n",
- OBD_MD_FLXATTRLS);
- LASSERTF(OBD_MD_FLXATTRRM == (0x0000004000000000ULL), "found 0x%.16llxULL\n",
- OBD_MD_FLXATTRRM);
- LASSERTF(OBD_MD_FLACL == (0x0000008000000000ULL), "found 0x%.16llxULL\n",
- OBD_MD_FLACL);
- LASSERTF(OBD_MD_FLRMTPERM == (0x0000010000000000ULL), "found 0x%.16llxULL\n",
- OBD_MD_FLRMTPERM);
- LASSERTF(OBD_MD_FLMDSCAPA == (0x0000020000000000ULL), "found 0x%.16llxULL\n",
- OBD_MD_FLMDSCAPA);
- LASSERTF(OBD_MD_FLOSSCAPA == (0x0000040000000000ULL), "found 0x%.16llxULL\n",
- OBD_MD_FLOSSCAPA);
- LASSERTF(OBD_MD_FLCKSPLIT == (0x0000080000000000ULL), "found 0x%.16llxULL\n",
- OBD_MD_FLCKSPLIT);
- LASSERTF(OBD_MD_FLCROSSREF == (0x0000100000000000ULL), "found 0x%.16llxULL\n",
- OBD_MD_FLCROSSREF);
- LASSERTF(OBD_MD_FLGETATTRLOCK == (0x0000200000000000ULL), "found 0x%.16llxULL\n",
- OBD_MD_FLGETATTRLOCK);
- LASSERTF(OBD_MD_FLRMTLSETFACL == (0x0001000000000000ULL), "found 0x%.16llxULL\n",
- OBD_MD_FLRMTLSETFACL);
- LASSERTF(OBD_MD_FLRMTLGETFACL == (0x0002000000000000ULL), "found 0x%.16llxULL\n",
- OBD_MD_FLRMTLGETFACL);
- LASSERTF(OBD_MD_FLRMTRSETFACL == (0x0004000000000000ULL), "found 0x%.16llxULL\n",
- OBD_MD_FLRMTRSETFACL);
- LASSERTF(OBD_MD_FLRMTRGETFACL == (0x0008000000000000ULL), "found 0x%.16llxULL\n",
- OBD_MD_FLRMTRGETFACL);
- LASSERTF(OBD_MD_FLDATAVERSION == (0x0010000000000000ULL), "found 0x%.16llxULL\n",
- OBD_MD_FLDATAVERSION);
- CLASSERT(OBD_FL_INLINEDATA == 0x00000001);
- CLASSERT(OBD_FL_OBDMDEXISTS == 0x00000002);
- CLASSERT(OBD_FL_DELORPHAN == 0x00000004);
- CLASSERT(OBD_FL_NORPC == 0x00000008);
- CLASSERT(OBD_FL_IDONLY == 0x00000010);
- CLASSERT(OBD_FL_RECREATE_OBJS == 0x00000020);
- CLASSERT(OBD_FL_DEBUG_CHECK == 0x00000040);
- CLASSERT(OBD_FL_NO_USRQUOTA == 0x00000100);
- CLASSERT(OBD_FL_NO_GRPQUOTA == 0x00000200);
- CLASSERT(OBD_FL_CREATE_CROW == 0x00000400);
- CLASSERT(OBD_FL_SRVLOCK == 0x00000800);
- CLASSERT(OBD_FL_CKSUM_CRC32 == 0x00001000);
- CLASSERT(OBD_FL_CKSUM_ADLER == 0x00002000);
- CLASSERT(OBD_FL_CKSUM_CRC32C == 0x00004000);
- CLASSERT(OBD_FL_CKSUM_RSVD2 == 0x00008000);
- CLASSERT(OBD_FL_CKSUM_RSVD3 == 0x00010000);
- CLASSERT(OBD_FL_SHRINK_GRANT == 0x00020000);
- CLASSERT(OBD_FL_MMAP == 0x00040000);
- CLASSERT(OBD_FL_RECOV_RESEND == 0x00080000);
- CLASSERT(OBD_FL_NOSPC_BLK == 0x00100000);
- CLASSERT(OBD_FL_LOCAL_MASK == 0xf0000000);
-
- /* Checks for struct lov_ost_data_v1 */
- LASSERTF((int)sizeof(struct lov_ost_data_v1) == 24, "found %lld\n",
- (long long)(int)sizeof(struct lov_ost_data_v1));
- LASSERTF((int)offsetof(struct lov_ost_data_v1, l_ost_oi) == 0, "found %lld\n",
- (long long)(int)offsetof(struct lov_ost_data_v1, l_ost_oi));
- LASSERTF((int)sizeof(((struct lov_ost_data_v1 *)0)->l_ost_oi) == 16, "found %lld\n",
- (long long)(int)sizeof(((struct lov_ost_data_v1 *)0)->l_ost_oi));
- LASSERTF((int)offsetof(struct lov_ost_data_v1, l_ost_gen) == 16, "found %lld\n",
- (long long)(int)offsetof(struct lov_ost_data_v1, l_ost_gen));
- LASSERTF((int)sizeof(((struct lov_ost_data_v1 *)0)->l_ost_gen) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct lov_ost_data_v1 *)0)->l_ost_gen));
- LASSERTF((int)offsetof(struct lov_ost_data_v1, l_ost_idx) == 20, "found %lld\n",
- (long long)(int)offsetof(struct lov_ost_data_v1, l_ost_idx));
- LASSERTF((int)sizeof(((struct lov_ost_data_v1 *)0)->l_ost_idx) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct lov_ost_data_v1 *)0)->l_ost_idx));
-
- /* Checks for struct lov_mds_md_v1 */
- LASSERTF((int)sizeof(struct lov_mds_md_v1) == 32, "found %lld\n",
- (long long)(int)sizeof(struct lov_mds_md_v1));
- LASSERTF((int)offsetof(struct lov_mds_md_v1, lmm_magic) == 0, "found %lld\n",
- (long long)(int)offsetof(struct lov_mds_md_v1, lmm_magic));
- LASSERTF((int)sizeof(((struct lov_mds_md_v1 *)0)->lmm_magic) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct lov_mds_md_v1 *)0)->lmm_magic));
- LASSERTF((int)offsetof(struct lov_mds_md_v1, lmm_pattern) == 4, "found %lld\n",
- (long long)(int)offsetof(struct lov_mds_md_v1, lmm_pattern));
- LASSERTF((int)sizeof(((struct lov_mds_md_v1 *)0)->lmm_pattern) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct lov_mds_md_v1 *)0)->lmm_pattern));
- LASSERTF((int)offsetof(struct lov_mds_md_v1, lmm_oi) == 8, "found %lld\n",
- (long long)(int)offsetof(struct lov_mds_md_v1, lmm_oi));
- LASSERTF((int)sizeof(((struct lov_mds_md_v1 *)0)->lmm_oi) == 16, "found %lld\n",
- (long long)(int)sizeof(((struct lov_mds_md_v1 *)0)->lmm_oi));
- LASSERTF((int)offsetof(struct lov_mds_md_v1, lmm_stripe_size) == 24, "found %lld\n",
- (long long)(int)offsetof(struct lov_mds_md_v1, lmm_stripe_size));
- LASSERTF((int)sizeof(((struct lov_mds_md_v1 *)0)->lmm_stripe_size) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct lov_mds_md_v1 *)0)->lmm_stripe_size));
- LASSERTF((int)offsetof(struct lov_mds_md_v1, lmm_stripe_count) == 28, "found %lld\n",
- (long long)(int)offsetof(struct lov_mds_md_v1, lmm_stripe_count));
- LASSERTF((int)sizeof(((struct lov_mds_md_v1 *)0)->lmm_stripe_count) == 2, "found %lld\n",
- (long long)(int)sizeof(((struct lov_mds_md_v1 *)0)->lmm_stripe_count));
- LASSERTF((int)offsetof(struct lov_mds_md_v1, lmm_layout_gen) == 30, "found %lld\n",
- (long long)(int)offsetof(struct lov_mds_md_v1, lmm_layout_gen));
- LASSERTF((int)sizeof(((struct lov_mds_md_v1 *)0)->lmm_layout_gen) == 2, "found %lld\n",
- (long long)(int)sizeof(((struct lov_mds_md_v1 *)0)->lmm_layout_gen));
- LASSERTF((int)offsetof(struct lov_mds_md_v1, lmm_objects[0]) == 32, "found %lld\n",
- (long long)(int)offsetof(struct lov_mds_md_v1, lmm_objects[0]));
- LASSERTF((int)sizeof(((struct lov_mds_md_v1 *)0)->lmm_objects[0]) == 24, "found %lld\n",
- (long long)(int)sizeof(((struct lov_mds_md_v1 *)0)->lmm_objects[0]));
- CLASSERT(LOV_MAGIC_V1 == 0x0BD10BD0);
-
- /* Checks for struct lov_mds_md_v3 */
- LASSERTF((int)sizeof(struct lov_mds_md_v3) == 48, "found %lld\n",
- (long long)(int)sizeof(struct lov_mds_md_v3));
- LASSERTF((int)offsetof(struct lov_mds_md_v3, lmm_magic) == 0, "found %lld\n",
- (long long)(int)offsetof(struct lov_mds_md_v3, lmm_magic));
- LASSERTF((int)sizeof(((struct lov_mds_md_v3 *)0)->lmm_magic) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct lov_mds_md_v3 *)0)->lmm_magic));
- LASSERTF((int)offsetof(struct lov_mds_md_v3, lmm_pattern) == 4, "found %lld\n",
- (long long)(int)offsetof(struct lov_mds_md_v3, lmm_pattern));
- LASSERTF((int)sizeof(((struct lov_mds_md_v3 *)0)->lmm_pattern) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct lov_mds_md_v3 *)0)->lmm_pattern));
- LASSERTF((int)offsetof(struct lov_mds_md_v3, lmm_oi) == 8, "found %lld\n",
- (long long)(int)offsetof(struct lov_mds_md_v3, lmm_oi));
- LASSERTF((int)sizeof(((struct lov_mds_md_v3 *)0)->lmm_oi) == 16, "found %lld\n",
- (long long)(int)sizeof(((struct lov_mds_md_v3 *)0)->lmm_oi));
- LASSERTF((int)offsetof(struct lov_mds_md_v3, lmm_stripe_size) == 24, "found %lld\n",
- (long long)(int)offsetof(struct lov_mds_md_v3, lmm_stripe_size));
- LASSERTF((int)sizeof(((struct lov_mds_md_v3 *)0)->lmm_stripe_size) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct lov_mds_md_v3 *)0)->lmm_stripe_size));
- LASSERTF((int)offsetof(struct lov_mds_md_v3, lmm_stripe_count) == 28, "found %lld\n",
- (long long)(int)offsetof(struct lov_mds_md_v3, lmm_stripe_count));
- LASSERTF((int)sizeof(((struct lov_mds_md_v3 *)0)->lmm_stripe_count) == 2, "found %lld\n",
- (long long)(int)sizeof(((struct lov_mds_md_v3 *)0)->lmm_stripe_count));
- LASSERTF((int)offsetof(struct lov_mds_md_v3, lmm_layout_gen) == 30, "found %lld\n",
- (long long)(int)offsetof(struct lov_mds_md_v3, lmm_layout_gen));
- LASSERTF((int)sizeof(((struct lov_mds_md_v3 *)0)->lmm_layout_gen) == 2, "found %lld\n",
- (long long)(int)sizeof(((struct lov_mds_md_v3 *)0)->lmm_layout_gen));
- CLASSERT(LOV_MAXPOOLNAME == 16);
- LASSERTF((int)offsetof(struct lov_mds_md_v3, lmm_pool_name[16]) == 48, "found %lld\n",
- (long long)(int)offsetof(struct lov_mds_md_v3, lmm_pool_name[16]));
- LASSERTF((int)sizeof(((struct lov_mds_md_v3 *)0)->lmm_pool_name[16]) == 1, "found %lld\n",
- (long long)(int)sizeof(((struct lov_mds_md_v3 *)0)->lmm_pool_name[16]));
- LASSERTF((int)offsetof(struct lov_mds_md_v3, lmm_objects[0]) == 48, "found %lld\n",
- (long long)(int)offsetof(struct lov_mds_md_v3, lmm_objects[0]));
- LASSERTF((int)sizeof(((struct lov_mds_md_v3 *)0)->lmm_objects[0]) == 24, "found %lld\n",
- (long long)(int)sizeof(((struct lov_mds_md_v3 *)0)->lmm_objects[0]));
- CLASSERT(LOV_MAGIC_V3 == 0x0BD30BD0);
- LASSERTF(LOV_PATTERN_RAID0 == 0x00000001UL, "found 0x%.8xUL\n",
- (unsigned)LOV_PATTERN_RAID0);
- LASSERTF(LOV_PATTERN_RAID1 == 0x00000002UL, "found 0x%.8xUL\n",
- (unsigned)LOV_PATTERN_RAID1);
- LASSERTF(LOV_PATTERN_FIRST == 0x00000100UL, "found 0x%.8xUL\n",
- (unsigned)LOV_PATTERN_FIRST);
- LASSERTF(LOV_PATTERN_CMOBD == 0x00000200UL, "found 0x%.8xUL\n",
- (unsigned)LOV_PATTERN_CMOBD);
-
- /* Checks for struct obd_statfs */
- LASSERTF((int)sizeof(struct obd_statfs) == 144, "found %lld\n",
- (long long)(int)sizeof(struct obd_statfs));
- LASSERTF((int)offsetof(struct obd_statfs, os_type) == 0, "found %lld\n",
- (long long)(int)offsetof(struct obd_statfs, os_type));
- LASSERTF((int)sizeof(((struct obd_statfs *)0)->os_type) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct obd_statfs *)0)->os_type));
- LASSERTF((int)offsetof(struct obd_statfs, os_blocks) == 8, "found %lld\n",
- (long long)(int)offsetof(struct obd_statfs, os_blocks));
- LASSERTF((int)sizeof(((struct obd_statfs *)0)->os_blocks) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct obd_statfs *)0)->os_blocks));
- LASSERTF((int)offsetof(struct obd_statfs, os_bfree) == 16, "found %lld\n",
- (long long)(int)offsetof(struct obd_statfs, os_bfree));
- LASSERTF((int)sizeof(((struct obd_statfs *)0)->os_bfree) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct obd_statfs *)0)->os_bfree));
- LASSERTF((int)offsetof(struct obd_statfs, os_bavail) == 24, "found %lld\n",
- (long long)(int)offsetof(struct obd_statfs, os_bavail));
- LASSERTF((int)sizeof(((struct obd_statfs *)0)->os_bavail) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct obd_statfs *)0)->os_bavail));
- LASSERTF((int)offsetof(struct obd_statfs, os_ffree) == 40, "found %lld\n",
- (long long)(int)offsetof(struct obd_statfs, os_ffree));
- LASSERTF((int)sizeof(((struct obd_statfs *)0)->os_ffree) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct obd_statfs *)0)->os_ffree));
- LASSERTF((int)offsetof(struct obd_statfs, os_fsid) == 48, "found %lld\n",
- (long long)(int)offsetof(struct obd_statfs, os_fsid));
- LASSERTF((int)sizeof(((struct obd_statfs *)0)->os_fsid) == 40, "found %lld\n",
- (long long)(int)sizeof(((struct obd_statfs *)0)->os_fsid));
- LASSERTF((int)offsetof(struct obd_statfs, os_bsize) == 88, "found %lld\n",
- (long long)(int)offsetof(struct obd_statfs, os_bsize));
- LASSERTF((int)sizeof(((struct obd_statfs *)0)->os_bsize) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct obd_statfs *)0)->os_bsize));
- LASSERTF((int)offsetof(struct obd_statfs, os_namelen) == 92, "found %lld\n",
- (long long)(int)offsetof(struct obd_statfs, os_namelen));
- LASSERTF((int)sizeof(((struct obd_statfs *)0)->os_namelen) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct obd_statfs *)0)->os_namelen));
- LASSERTF((int)offsetof(struct obd_statfs, os_state) == 104, "found %lld\n",
- (long long)(int)offsetof(struct obd_statfs, os_state));
- LASSERTF((int)sizeof(((struct obd_statfs *)0)->os_state) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct obd_statfs *)0)->os_state));
- LASSERTF((int)offsetof(struct obd_statfs, os_fprecreated) == 108, "found %lld\n",
- (long long)(int)offsetof(struct obd_statfs, os_fprecreated));
- LASSERTF((int)sizeof(((struct obd_statfs *)0)->os_fprecreated) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct obd_statfs *)0)->os_fprecreated));
- LASSERTF((int)offsetof(struct obd_statfs, os_spare2) == 112, "found %lld\n",
- (long long)(int)offsetof(struct obd_statfs, os_spare2));
- LASSERTF((int)sizeof(((struct obd_statfs *)0)->os_spare2) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct obd_statfs *)0)->os_spare2));
- LASSERTF((int)offsetof(struct obd_statfs, os_spare3) == 116, "found %lld\n",
- (long long)(int)offsetof(struct obd_statfs, os_spare3));
- LASSERTF((int)sizeof(((struct obd_statfs *)0)->os_spare3) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct obd_statfs *)0)->os_spare3));
- LASSERTF((int)offsetof(struct obd_statfs, os_spare4) == 120, "found %lld\n",
- (long long)(int)offsetof(struct obd_statfs, os_spare4));
- LASSERTF((int)sizeof(((struct obd_statfs *)0)->os_spare4) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct obd_statfs *)0)->os_spare4));
- LASSERTF((int)offsetof(struct obd_statfs, os_spare5) == 124, "found %lld\n",
- (long long)(int)offsetof(struct obd_statfs, os_spare5));
- LASSERTF((int)sizeof(((struct obd_statfs *)0)->os_spare5) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct obd_statfs *)0)->os_spare5));
- LASSERTF((int)offsetof(struct obd_statfs, os_spare6) == 128, "found %lld\n",
- (long long)(int)offsetof(struct obd_statfs, os_spare6));
- LASSERTF((int)sizeof(((struct obd_statfs *)0)->os_spare6) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct obd_statfs *)0)->os_spare6));
- LASSERTF((int)offsetof(struct obd_statfs, os_spare7) == 132, "found %lld\n",
- (long long)(int)offsetof(struct obd_statfs, os_spare7));
- LASSERTF((int)sizeof(((struct obd_statfs *)0)->os_spare7) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct obd_statfs *)0)->os_spare7));
- LASSERTF((int)offsetof(struct obd_statfs, os_spare8) == 136, "found %lld\n",
- (long long)(int)offsetof(struct obd_statfs, os_spare8));
- LASSERTF((int)sizeof(((struct obd_statfs *)0)->os_spare8) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct obd_statfs *)0)->os_spare8));
- LASSERTF((int)offsetof(struct obd_statfs, os_spare9) == 140, "found %lld\n",
- (long long)(int)offsetof(struct obd_statfs, os_spare9));
- LASSERTF((int)sizeof(((struct obd_statfs *)0)->os_spare9) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct obd_statfs *)0)->os_spare9));
-
- /* Checks for struct obd_ioobj */
- LASSERTF((int)sizeof(struct obd_ioobj) == 24, "found %lld\n",
- (long long)(int)sizeof(struct obd_ioobj));
- LASSERTF((int)offsetof(struct obd_ioobj, ioo_oid) == 0, "found %lld\n",
- (long long)(int)offsetof(struct obd_ioobj, ioo_oid));
- LASSERTF((int)sizeof(((struct obd_ioobj *)0)->ioo_oid) == 16, "found %lld\n",
- (long long)(int)sizeof(((struct obd_ioobj *)0)->ioo_oid));
- LASSERTF((int)offsetof(struct obd_ioobj, ioo_max_brw) == 16, "found %lld\n",
- (long long)(int)offsetof(struct obd_ioobj, ioo_max_brw));
- LASSERTF((int)sizeof(((struct obd_ioobj *)0)->ioo_max_brw) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct obd_ioobj *)0)->ioo_max_brw));
- LASSERTF((int)offsetof(struct obd_ioobj, ioo_bufcnt) == 20, "found %lld\n",
- (long long)(int)offsetof(struct obd_ioobj, ioo_bufcnt));
- LASSERTF((int)sizeof(((struct obd_ioobj *)0)->ioo_bufcnt) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct obd_ioobj *)0)->ioo_bufcnt));
-
- /* Checks for union lquota_id */
- LASSERTF((int)sizeof(union lquota_id) == 16, "found %lld\n",
- (long long)(int)sizeof(union lquota_id));
-
- LASSERTF(QUOTABLOCK_BITS == 10, "found %lld\n",
- (long long)QUOTABLOCK_BITS);
- LASSERTF(QUOTABLOCK_SIZE == 1024, "found %lld\n",
- (long long)QUOTABLOCK_SIZE);
-
- /* Checks for struct obd_quotactl */
- LASSERTF((int)sizeof(struct obd_quotactl) == 112, "found %lld\n",
- (long long)(int)sizeof(struct obd_quotactl));
- LASSERTF((int)offsetof(struct obd_quotactl, qc_cmd) == 0, "found %lld\n",
- (long long)(int)offsetof(struct obd_quotactl, qc_cmd));
- LASSERTF((int)sizeof(((struct obd_quotactl *)0)->qc_cmd) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct obd_quotactl *)0)->qc_cmd));
- LASSERTF((int)offsetof(struct obd_quotactl, qc_type) == 4, "found %lld\n",
- (long long)(int)offsetof(struct obd_quotactl, qc_type));
- LASSERTF((int)sizeof(((struct obd_quotactl *)0)->qc_type) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct obd_quotactl *)0)->qc_type));
- LASSERTF((int)offsetof(struct obd_quotactl, qc_id) == 8, "found %lld\n",
- (long long)(int)offsetof(struct obd_quotactl, qc_id));
- LASSERTF((int)sizeof(((struct obd_quotactl *)0)->qc_id) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct obd_quotactl *)0)->qc_id));
- LASSERTF((int)offsetof(struct obd_quotactl, qc_stat) == 12, "found %lld\n",
- (long long)(int)offsetof(struct obd_quotactl, qc_stat));
- LASSERTF((int)sizeof(((struct obd_quotactl *)0)->qc_stat) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct obd_quotactl *)0)->qc_stat));
- LASSERTF((int)offsetof(struct obd_quotactl, qc_dqinfo) == 16, "found %lld\n",
- (long long)(int)offsetof(struct obd_quotactl, qc_dqinfo));
- LASSERTF((int)sizeof(((struct obd_quotactl *)0)->qc_dqinfo) == 24, "found %lld\n",
- (long long)(int)sizeof(((struct obd_quotactl *)0)->qc_dqinfo));
- LASSERTF((int)offsetof(struct obd_quotactl, qc_dqblk) == 40, "found %lld\n",
- (long long)(int)offsetof(struct obd_quotactl, qc_dqblk));
- LASSERTF((int)sizeof(((struct obd_quotactl *)0)->qc_dqblk) == 72, "found %lld\n",
- (long long)(int)sizeof(((struct obd_quotactl *)0)->qc_dqblk));
-
- /* Checks for struct obd_dqinfo */
- LASSERTF((int)sizeof(struct obd_dqinfo) == 24, "found %lld\n",
- (long long)(int)sizeof(struct obd_dqinfo));
- LASSERTF((int)offsetof(struct obd_dqinfo, dqi_bgrace) == 0, "found %lld\n",
- (long long)(int)offsetof(struct obd_dqinfo, dqi_bgrace));
- LASSERTF((int)sizeof(((struct obd_dqinfo *)0)->dqi_bgrace) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct obd_dqinfo *)0)->dqi_bgrace));
- LASSERTF((int)offsetof(struct obd_dqinfo, dqi_igrace) == 8, "found %lld\n",
- (long long)(int)offsetof(struct obd_dqinfo, dqi_igrace));
- LASSERTF((int)sizeof(((struct obd_dqinfo *)0)->dqi_igrace) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct obd_dqinfo *)0)->dqi_igrace));
- LASSERTF((int)offsetof(struct obd_dqinfo, dqi_flags) == 16, "found %lld\n",
- (long long)(int)offsetof(struct obd_dqinfo, dqi_flags));
- LASSERTF((int)sizeof(((struct obd_dqinfo *)0)->dqi_flags) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct obd_dqinfo *)0)->dqi_flags));
- LASSERTF((int)offsetof(struct obd_dqinfo, dqi_valid) == 20, "found %lld\n",
- (long long)(int)offsetof(struct obd_dqinfo, dqi_valid));
- LASSERTF((int)sizeof(((struct obd_dqinfo *)0)->dqi_valid) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct obd_dqinfo *)0)->dqi_valid));
-
- /* Checks for struct obd_dqblk */
- LASSERTF((int)sizeof(struct obd_dqblk) == 72, "found %lld\n",
- (long long)(int)sizeof(struct obd_dqblk));
- LASSERTF((int)offsetof(struct obd_dqblk, dqb_bhardlimit) == 0, "found %lld\n",
- (long long)(int)offsetof(struct obd_dqblk, dqb_bhardlimit));
- LASSERTF((int)sizeof(((struct obd_dqblk *)0)->dqb_bhardlimit) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct obd_dqblk *)0)->dqb_bhardlimit));
- LASSERTF((int)offsetof(struct obd_dqblk, dqb_bsoftlimit) == 8, "found %lld\n",
- (long long)(int)offsetof(struct obd_dqblk, dqb_bsoftlimit));
- LASSERTF((int)sizeof(((struct obd_dqblk *)0)->dqb_bsoftlimit) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct obd_dqblk *)0)->dqb_bsoftlimit));
- LASSERTF((int)offsetof(struct obd_dqblk, dqb_curspace) == 16, "found %lld\n",
- (long long)(int)offsetof(struct obd_dqblk, dqb_curspace));
- LASSERTF((int)sizeof(((struct obd_dqblk *)0)->dqb_curspace) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct obd_dqblk *)0)->dqb_curspace));
- LASSERTF((int)offsetof(struct obd_dqblk, dqb_ihardlimit) == 24, "found %lld\n",
- (long long)(int)offsetof(struct obd_dqblk, dqb_ihardlimit));
- LASSERTF((int)sizeof(((struct obd_dqblk *)0)->dqb_ihardlimit) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct obd_dqblk *)0)->dqb_ihardlimit));
- LASSERTF((int)offsetof(struct obd_dqblk, dqb_isoftlimit) == 32, "found %lld\n",
- (long long)(int)offsetof(struct obd_dqblk, dqb_isoftlimit));
- LASSERTF((int)sizeof(((struct obd_dqblk *)0)->dqb_isoftlimit) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct obd_dqblk *)0)->dqb_isoftlimit));
- LASSERTF((int)offsetof(struct obd_dqblk, dqb_curinodes) == 40, "found %lld\n",
- (long long)(int)offsetof(struct obd_dqblk, dqb_curinodes));
- LASSERTF((int)sizeof(((struct obd_dqblk *)0)->dqb_curinodes) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct obd_dqblk *)0)->dqb_curinodes));
- LASSERTF((int)offsetof(struct obd_dqblk, dqb_btime) == 48, "found %lld\n",
- (long long)(int)offsetof(struct obd_dqblk, dqb_btime));
- LASSERTF((int)sizeof(((struct obd_dqblk *)0)->dqb_btime) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct obd_dqblk *)0)->dqb_btime));
- LASSERTF((int)offsetof(struct obd_dqblk, dqb_itime) == 56, "found %lld\n",
- (long long)(int)offsetof(struct obd_dqblk, dqb_itime));
- LASSERTF((int)sizeof(((struct obd_dqblk *)0)->dqb_itime) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct obd_dqblk *)0)->dqb_itime));
- LASSERTF((int)offsetof(struct obd_dqblk, dqb_valid) == 64, "found %lld\n",
- (long long)(int)offsetof(struct obd_dqblk, dqb_valid));
- LASSERTF((int)sizeof(((struct obd_dqblk *)0)->dqb_valid) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct obd_dqblk *)0)->dqb_valid));
- LASSERTF((int)offsetof(struct obd_dqblk, dqb_padding) == 68, "found %lld\n",
- (long long)(int)offsetof(struct obd_dqblk, dqb_padding));
- LASSERTF((int)sizeof(((struct obd_dqblk *)0)->dqb_padding) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct obd_dqblk *)0)->dqb_padding));
- LASSERTF(Q_QUOTACHECK == 0x800100, "found 0x%.8x\n",
- Q_QUOTACHECK);
- LASSERTF(Q_INITQUOTA == 0x800101, "found 0x%.8x\n",
- Q_INITQUOTA);
- LASSERTF(Q_GETOINFO == 0x800102, "found 0x%.8x\n",
- Q_GETOINFO);
- LASSERTF(Q_GETOQUOTA == 0x800103, "found 0x%.8x\n",
- Q_GETOQUOTA);
- LASSERTF(Q_FINVALIDATE == 0x800104, "found 0x%.8x\n",
- Q_FINVALIDATE);
-
- /* Checks for struct lquota_acct_rec */
- LASSERTF((int)sizeof(struct lquota_acct_rec) == 16, "found %lld\n",
- (long long)(int)sizeof(struct lquota_acct_rec));
- LASSERTF((int)offsetof(struct lquota_acct_rec, bspace) == 0, "found %lld\n",
- (long long)(int)offsetof(struct lquota_acct_rec, bspace));
- LASSERTF((int)sizeof(((struct lquota_acct_rec *)0)->bspace) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct lquota_acct_rec *)0)->bspace));
- LASSERTF((int)offsetof(struct lquota_acct_rec, ispace) == 8, "found %lld\n",
- (long long)(int)offsetof(struct lquota_acct_rec, ispace));
- LASSERTF((int)sizeof(((struct lquota_acct_rec *)0)->ispace) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct lquota_acct_rec *)0)->ispace));
-
- /* Checks for struct lquota_glb_rec */
- LASSERTF((int)sizeof(struct lquota_glb_rec) == 32, "found %lld\n",
- (long long)(int)sizeof(struct lquota_glb_rec));
- LASSERTF((int)offsetof(struct lquota_glb_rec, qbr_hardlimit) == 0, "found %lld\n",
- (long long)(int)offsetof(struct lquota_glb_rec, qbr_hardlimit));
- LASSERTF((int)sizeof(((struct lquota_glb_rec *)0)->qbr_hardlimit) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct lquota_glb_rec *)0)->qbr_hardlimit));
- LASSERTF((int)offsetof(struct lquota_glb_rec, qbr_softlimit) == 8, "found %lld\n",
- (long long)(int)offsetof(struct lquota_glb_rec, qbr_softlimit));
- LASSERTF((int)sizeof(((struct lquota_glb_rec *)0)->qbr_softlimit) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct lquota_glb_rec *)0)->qbr_softlimit));
- LASSERTF((int)offsetof(struct lquota_glb_rec, qbr_time) == 16, "found %lld\n",
- (long long)(int)offsetof(struct lquota_glb_rec, qbr_time));
- LASSERTF((int)sizeof(((struct lquota_glb_rec *)0)->qbr_time) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct lquota_glb_rec *)0)->qbr_time));
- LASSERTF((int)offsetof(struct lquota_glb_rec, qbr_granted) == 24, "found %lld\n",
- (long long)(int)offsetof(struct lquota_glb_rec, qbr_granted));
- LASSERTF((int)sizeof(((struct lquota_glb_rec *)0)->qbr_granted) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct lquota_glb_rec *)0)->qbr_granted));
-
- /* Checks for struct lquota_slv_rec */
- LASSERTF((int)sizeof(struct lquota_slv_rec) == 8, "found %lld\n",
- (long long)(int)sizeof(struct lquota_slv_rec));
- LASSERTF((int)offsetof(struct lquota_slv_rec, qsr_granted) == 0, "found %lld\n",
- (long long)(int)offsetof(struct lquota_slv_rec, qsr_granted));
- LASSERTF((int)sizeof(((struct lquota_slv_rec *)0)->qsr_granted) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct lquota_slv_rec *)0)->qsr_granted));
-
- /* Checks for struct idx_info */
- LASSERTF((int)sizeof(struct idx_info) == 80, "found %lld\n",
- (long long)(int)sizeof(struct idx_info));
- LASSERTF((int)offsetof(struct idx_info, ii_magic) == 0, "found %lld\n",
- (long long)(int)offsetof(struct idx_info, ii_magic));
- LASSERTF((int)sizeof(((struct idx_info *)0)->ii_magic) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct idx_info *)0)->ii_magic));
- LASSERTF((int)offsetof(struct idx_info, ii_flags) == 4, "found %lld\n",
- (long long)(int)offsetof(struct idx_info, ii_flags));
- LASSERTF((int)sizeof(((struct idx_info *)0)->ii_flags) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct idx_info *)0)->ii_flags));
- LASSERTF((int)offsetof(struct idx_info, ii_count) == 8, "found %lld\n",
- (long long)(int)offsetof(struct idx_info, ii_count));
- LASSERTF((int)sizeof(((struct idx_info *)0)->ii_count) == 2, "found %lld\n",
- (long long)(int)sizeof(((struct idx_info *)0)->ii_count));
- LASSERTF((int)offsetof(struct idx_info, ii_pad0) == 10, "found %lld\n",
- (long long)(int)offsetof(struct idx_info, ii_pad0));
- LASSERTF((int)sizeof(((struct idx_info *)0)->ii_pad0) == 2, "found %lld\n",
- (long long)(int)sizeof(((struct idx_info *)0)->ii_pad0));
- LASSERTF((int)offsetof(struct idx_info, ii_attrs) == 12, "found %lld\n",
- (long long)(int)offsetof(struct idx_info, ii_attrs));
- LASSERTF((int)sizeof(((struct idx_info *)0)->ii_attrs) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct idx_info *)0)->ii_attrs));
- LASSERTF((int)offsetof(struct idx_info, ii_fid) == 16, "found %lld\n",
- (long long)(int)offsetof(struct idx_info, ii_fid));
- LASSERTF((int)sizeof(((struct idx_info *)0)->ii_fid) == 16, "found %lld\n",
- (long long)(int)sizeof(((struct idx_info *)0)->ii_fid));
- LASSERTF((int)offsetof(struct idx_info, ii_version) == 32, "found %lld\n",
- (long long)(int)offsetof(struct idx_info, ii_version));
- LASSERTF((int)sizeof(((struct idx_info *)0)->ii_version) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct idx_info *)0)->ii_version));
- LASSERTF((int)offsetof(struct idx_info, ii_hash_start) == 40, "found %lld\n",
- (long long)(int)offsetof(struct idx_info, ii_hash_start));
- LASSERTF((int)sizeof(((struct idx_info *)0)->ii_hash_start) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct idx_info *)0)->ii_hash_start));
- LASSERTF((int)offsetof(struct idx_info, ii_hash_end) == 48, "found %lld\n",
- (long long)(int)offsetof(struct idx_info, ii_hash_end));
- LASSERTF((int)sizeof(((struct idx_info *)0)->ii_hash_end) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct idx_info *)0)->ii_hash_end));
- LASSERTF((int)offsetof(struct idx_info, ii_keysize) == 56, "found %lld\n",
- (long long)(int)offsetof(struct idx_info, ii_keysize));
- LASSERTF((int)sizeof(((struct idx_info *)0)->ii_keysize) == 2, "found %lld\n",
- (long long)(int)sizeof(((struct idx_info *)0)->ii_keysize));
- LASSERTF((int)offsetof(struct idx_info, ii_recsize) == 58, "found %lld\n",
- (long long)(int)offsetof(struct idx_info, ii_recsize));
- LASSERTF((int)sizeof(((struct idx_info *)0)->ii_recsize) == 2, "found %lld\n",
- (long long)(int)sizeof(((struct idx_info *)0)->ii_recsize));
- LASSERTF((int)offsetof(struct idx_info, ii_pad1) == 60, "found %lld\n",
- (long long)(int)offsetof(struct idx_info, ii_pad1));
- LASSERTF((int)sizeof(((struct idx_info *)0)->ii_pad1) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct idx_info *)0)->ii_pad1));
- LASSERTF((int)offsetof(struct idx_info, ii_pad2) == 64, "found %lld\n",
- (long long)(int)offsetof(struct idx_info, ii_pad2));
- LASSERTF((int)sizeof(((struct idx_info *)0)->ii_pad2) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct idx_info *)0)->ii_pad2));
- LASSERTF((int)offsetof(struct idx_info, ii_pad3) == 72, "found %lld\n",
- (long long)(int)offsetof(struct idx_info, ii_pad3));
- LASSERTF((int)sizeof(((struct idx_info *)0)->ii_pad3) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct idx_info *)0)->ii_pad3));
- CLASSERT(IDX_INFO_MAGIC == 0x3D37CC37);
-
- /* Checks for struct lu_idxpage */
- LASSERTF((int)sizeof(struct lu_idxpage) == 16, "found %lld\n",
- (long long)(int)sizeof(struct lu_idxpage));
- LASSERTF((int)offsetof(struct lu_idxpage, lip_magic) == 0, "found %lld\n",
- (long long)(int)offsetof(struct lu_idxpage, lip_magic));
- LASSERTF((int)sizeof(((struct lu_idxpage *)0)->lip_magic) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct lu_idxpage *)0)->lip_magic));
- LASSERTF((int)offsetof(struct lu_idxpage, lip_flags) == 4, "found %lld\n",
- (long long)(int)offsetof(struct lu_idxpage, lip_flags));
- LASSERTF((int)sizeof(((struct lu_idxpage *)0)->lip_flags) == 2, "found %lld\n",
- (long long)(int)sizeof(((struct lu_idxpage *)0)->lip_flags));
- LASSERTF((int)offsetof(struct lu_idxpage, lip_nr) == 6, "found %lld\n",
- (long long)(int)offsetof(struct lu_idxpage, lip_nr));
- LASSERTF((int)sizeof(((struct lu_idxpage *)0)->lip_nr) == 2, "found %lld\n",
- (long long)(int)sizeof(((struct lu_idxpage *)0)->lip_nr));
- LASSERTF((int)offsetof(struct lu_idxpage, lip_pad0) == 8, "found %lld\n",
- (long long)(int)offsetof(struct lu_idxpage, lip_pad0));
- LASSERTF((int)sizeof(((struct lu_idxpage *)0)->lip_pad0) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct lu_idxpage *)0)->lip_pad0));
- CLASSERT(LIP_MAGIC == 0x8A6D6B6C);
- LASSERTF(LIP_HDR_SIZE == 16, "found %lld\n",
- (long long)LIP_HDR_SIZE);
- LASSERTF(II_FL_NOHASH == 1, "found %lld\n",
- (long long)II_FL_NOHASH);
- LASSERTF(II_FL_VARKEY == 2, "found %lld\n",
- (long long)II_FL_VARKEY);
- LASSERTF(II_FL_VARREC == 4, "found %lld\n",
- (long long)II_FL_VARREC);
- LASSERTF(II_FL_NONUNQ == 8, "found %lld\n",
- (long long)II_FL_NONUNQ);
-
- /* Checks for struct niobuf_remote */
- LASSERTF((int)sizeof(struct niobuf_remote) == 16, "found %lld\n",
- (long long)(int)sizeof(struct niobuf_remote));
- LASSERTF((int)offsetof(struct niobuf_remote, offset) == 0, "found %lld\n",
- (long long)(int)offsetof(struct niobuf_remote, offset));
- LASSERTF((int)sizeof(((struct niobuf_remote *)0)->offset) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct niobuf_remote *)0)->offset));
- LASSERTF((int)offsetof(struct niobuf_remote, len) == 8, "found %lld\n",
- (long long)(int)offsetof(struct niobuf_remote, len));
- LASSERTF((int)sizeof(((struct niobuf_remote *)0)->len) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct niobuf_remote *)0)->len));
- LASSERTF((int)offsetof(struct niobuf_remote, flags) == 12, "found %lld\n",
- (long long)(int)offsetof(struct niobuf_remote, flags));
- LASSERTF((int)sizeof(((struct niobuf_remote *)0)->flags) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct niobuf_remote *)0)->flags));
- LASSERTF(OBD_BRW_READ == 0x01, "found 0x%.8x\n",
- OBD_BRW_READ);
- LASSERTF(OBD_BRW_WRITE == 0x02, "found 0x%.8x\n",
- OBD_BRW_WRITE);
- LASSERTF(OBD_BRW_SYNC == 0x08, "found 0x%.8x\n",
- OBD_BRW_SYNC);
- LASSERTF(OBD_BRW_CHECK == 0x10, "found 0x%.8x\n",
- OBD_BRW_CHECK);
- LASSERTF(OBD_BRW_FROM_GRANT == 0x20, "found 0x%.8x\n",
- OBD_BRW_FROM_GRANT);
- LASSERTF(OBD_BRW_GRANTED == 0x40, "found 0x%.8x\n",
- OBD_BRW_GRANTED);
- LASSERTF(OBD_BRW_NOCACHE == 0x80, "found 0x%.8x\n",
- OBD_BRW_NOCACHE);
- LASSERTF(OBD_BRW_NOQUOTA == 0x100, "found 0x%.8x\n",
- OBD_BRW_NOQUOTA);
- LASSERTF(OBD_BRW_SRVLOCK == 0x200, "found 0x%.8x\n",
- OBD_BRW_SRVLOCK);
- LASSERTF(OBD_BRW_ASYNC == 0x400, "found 0x%.8x\n",
- OBD_BRW_ASYNC);
- LASSERTF(OBD_BRW_MEMALLOC == 0x800, "found 0x%.8x\n",
- OBD_BRW_MEMALLOC);
-
- /* Checks for struct ost_body */
- LASSERTF((int)sizeof(struct ost_body) == 208, "found %lld\n",
- (long long)(int)sizeof(struct ost_body));
- LASSERTF((int)offsetof(struct ost_body, oa) == 0, "found %lld\n",
- (long long)(int)offsetof(struct ost_body, oa));
- LASSERTF((int)sizeof(((struct ost_body *)0)->oa) == 208, "found %lld\n",
- (long long)(int)sizeof(((struct ost_body *)0)->oa));
-
- /* Checks for struct ll_fid */
- LASSERTF((int)sizeof(struct ll_fid) == 16, "found %lld\n",
- (long long)(int)sizeof(struct ll_fid));
- LASSERTF((int)offsetof(struct ll_fid, id) == 0, "found %lld\n",
- (long long)(int)offsetof(struct ll_fid, id));
- LASSERTF((int)sizeof(((struct ll_fid *)0)->id) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct ll_fid *)0)->id));
- LASSERTF((int)offsetof(struct ll_fid, generation) == 8, "found %lld\n",
- (long long)(int)offsetof(struct ll_fid, generation));
- LASSERTF((int)sizeof(((struct ll_fid *)0)->generation) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct ll_fid *)0)->generation));
- LASSERTF((int)offsetof(struct ll_fid, f_type) == 12, "found %lld\n",
- (long long)(int)offsetof(struct ll_fid, f_type));
- LASSERTF((int)sizeof(((struct ll_fid *)0)->f_type) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct ll_fid *)0)->f_type));
-
- /* Checks for struct mdt_body */
- LASSERTF((int)sizeof(struct mdt_body) == 216, "found %lld\n",
- (long long)(int)sizeof(struct mdt_body));
- LASSERTF((int)offsetof(struct mdt_body, fid1) == 0, "found %lld\n",
- (long long)(int)offsetof(struct mdt_body, fid1));
- LASSERTF((int)sizeof(((struct mdt_body *)0)->fid1) == 16, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_body *)0)->fid1));
- LASSERTF((int)offsetof(struct mdt_body, fid2) == 16, "found %lld\n",
- (long long)(int)offsetof(struct mdt_body, fid2));
- LASSERTF((int)sizeof(((struct mdt_body *)0)->fid2) == 16, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_body *)0)->fid2));
- LASSERTF((int)offsetof(struct mdt_body, handle) == 32, "found %lld\n",
- (long long)(int)offsetof(struct mdt_body, handle));
- LASSERTF((int)sizeof(((struct mdt_body *)0)->handle) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_body *)0)->handle));
- LASSERTF((int)offsetof(struct mdt_body, valid) == 40, "found %lld\n",
- (long long)(int)offsetof(struct mdt_body, valid));
- LASSERTF((int)sizeof(((struct mdt_body *)0)->valid) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_body *)0)->valid));
- LASSERTF((int)offsetof(struct mdt_body, size) == 48, "found %lld\n",
- (long long)(int)offsetof(struct mdt_body, size));
- LASSERTF((int)sizeof(((struct mdt_body *)0)->size) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_body *)0)->size));
- LASSERTF((int)offsetof(struct mdt_body, mtime) == 56, "found %lld\n",
- (long long)(int)offsetof(struct mdt_body, mtime));
- LASSERTF((int)sizeof(((struct mdt_body *)0)->mtime) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_body *)0)->mtime));
- LASSERTF((int)offsetof(struct mdt_body, atime) == 64, "found %lld\n",
- (long long)(int)offsetof(struct mdt_body, atime));
- LASSERTF((int)sizeof(((struct mdt_body *)0)->atime) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_body *)0)->atime));
- LASSERTF((int)offsetof(struct mdt_body, ctime) == 72, "found %lld\n",
- (long long)(int)offsetof(struct mdt_body, ctime));
- LASSERTF((int)sizeof(((struct mdt_body *)0)->ctime) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_body *)0)->ctime));
- LASSERTF((int)offsetof(struct mdt_body, blocks) == 80, "found %lld\n",
- (long long)(int)offsetof(struct mdt_body, blocks));
- LASSERTF((int)sizeof(((struct mdt_body *)0)->blocks) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_body *)0)->blocks));
- LASSERTF((int)offsetof(struct mdt_body, t_state) == 96, "found %lld\n",
- (long long)(int)offsetof(struct mdt_body, t_state));
- LASSERTF((int)sizeof(((struct mdt_body *)0)->t_state) == 8,
- "found %lld\n",
- (long long)(int)sizeof(((struct mdt_body *)0)->t_state));
- LASSERTF((int)offsetof(struct mdt_body, fsuid) == 104, "found %lld\n",
- (long long)(int)offsetof(struct mdt_body, fsuid));
- LASSERTF((int)sizeof(((struct mdt_body *)0)->fsuid) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_body *)0)->fsuid));
- LASSERTF((int)offsetof(struct mdt_body, fsgid) == 108, "found %lld\n",
- (long long)(int)offsetof(struct mdt_body, fsgid));
- LASSERTF((int)sizeof(((struct mdt_body *)0)->fsgid) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_body *)0)->fsgid));
- LASSERTF((int)offsetof(struct mdt_body, capability) == 112, "found %lld\n",
- (long long)(int)offsetof(struct mdt_body, capability));
- LASSERTF((int)sizeof(((struct mdt_body *)0)->capability) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_body *)0)->capability));
- LASSERTF((int)offsetof(struct mdt_body, mode) == 116, "found %lld\n",
- (long long)(int)offsetof(struct mdt_body, mode));
- LASSERTF((int)sizeof(((struct mdt_body *)0)->mode) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_body *)0)->mode));
- LASSERTF((int)offsetof(struct mdt_body, uid) == 120, "found %lld\n",
- (long long)(int)offsetof(struct mdt_body, uid));
- LASSERTF((int)sizeof(((struct mdt_body *)0)->uid) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_body *)0)->uid));
- LASSERTF((int)offsetof(struct mdt_body, gid) == 124, "found %lld\n",
- (long long)(int)offsetof(struct mdt_body, gid));
- LASSERTF((int)sizeof(((struct mdt_body *)0)->gid) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_body *)0)->gid));
- LASSERTF((int)offsetof(struct mdt_body, flags) == 128, "found %lld\n",
- (long long)(int)offsetof(struct mdt_body, flags));
- LASSERTF((int)sizeof(((struct mdt_body *)0)->flags) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_body *)0)->flags));
- LASSERTF((int)offsetof(struct mdt_body, rdev) == 132, "found %lld\n",
- (long long)(int)offsetof(struct mdt_body, rdev));
- LASSERTF((int)sizeof(((struct mdt_body *)0)->rdev) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_body *)0)->rdev));
- LASSERTF((int)offsetof(struct mdt_body, nlink) == 136, "found %lld\n",
- (long long)(int)offsetof(struct mdt_body, nlink));
- LASSERTF((int)sizeof(((struct mdt_body *)0)->nlink) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_body *)0)->nlink));
- LASSERTF((int)offsetof(struct mdt_body, unused2) == 140, "found %lld\n",
- (long long)(int)offsetof(struct mdt_body, unused2));
- LASSERTF((int)sizeof(((struct mdt_body *)0)->unused2) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_body *)0)->unused2));
- LASSERTF((int)offsetof(struct mdt_body, suppgid) == 144, "found %lld\n",
- (long long)(int)offsetof(struct mdt_body, suppgid));
- LASSERTF((int)sizeof(((struct mdt_body *)0)->suppgid) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_body *)0)->suppgid));
- LASSERTF((int)offsetof(struct mdt_body, eadatasize) == 148, "found %lld\n",
- (long long)(int)offsetof(struct mdt_body, eadatasize));
- LASSERTF((int)sizeof(((struct mdt_body *)0)->eadatasize) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_body *)0)->eadatasize));
- LASSERTF((int)offsetof(struct mdt_body, aclsize) == 152, "found %lld\n",
- (long long)(int)offsetof(struct mdt_body, aclsize));
- LASSERTF((int)sizeof(((struct mdt_body *)0)->aclsize) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_body *)0)->aclsize));
- LASSERTF((int)offsetof(struct mdt_body, max_mdsize) == 156, "found %lld\n",
- (long long)(int)offsetof(struct mdt_body, max_mdsize));
- LASSERTF((int)sizeof(((struct mdt_body *)0)->max_mdsize) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_body *)0)->max_mdsize));
- LASSERTF((int)offsetof(struct mdt_body, max_cookiesize) == 160, "found %lld\n",
- (long long)(int)offsetof(struct mdt_body, max_cookiesize));
- LASSERTF((int)sizeof(((struct mdt_body *)0)->max_cookiesize) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_body *)0)->max_cookiesize));
- LASSERTF((int)offsetof(struct mdt_body, uid_h) == 164, "found %lld\n",
- (long long)(int)offsetof(struct mdt_body, uid_h));
- LASSERTF((int)sizeof(((struct mdt_body *)0)->uid_h) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_body *)0)->uid_h));
- LASSERTF((int)offsetof(struct mdt_body, gid_h) == 168, "found %lld\n",
- (long long)(int)offsetof(struct mdt_body, gid_h));
- LASSERTF((int)sizeof(((struct mdt_body *)0)->gid_h) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_body *)0)->gid_h));
- LASSERTF((int)offsetof(struct mdt_body, padding_5) == 172, "found %lld\n",
- (long long)(int)offsetof(struct mdt_body, padding_5));
- LASSERTF((int)sizeof(((struct mdt_body *)0)->padding_5) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_body *)0)->padding_5));
- LASSERTF((int)offsetof(struct mdt_body, padding_6) == 176, "found %lld\n",
- (long long)(int)offsetof(struct mdt_body, padding_6));
- LASSERTF((int)sizeof(((struct mdt_body *)0)->padding_6) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_body *)0)->padding_6));
- LASSERTF((int)offsetof(struct mdt_body, padding_7) == 184, "found %lld\n",
- (long long)(int)offsetof(struct mdt_body, padding_7));
- LASSERTF((int)sizeof(((struct mdt_body *)0)->padding_7) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_body *)0)->padding_7));
- LASSERTF((int)offsetof(struct mdt_body, padding_8) == 192, "found %lld\n",
- (long long)(int)offsetof(struct mdt_body, padding_8));
- LASSERTF((int)sizeof(((struct mdt_body *)0)->padding_8) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_body *)0)->padding_8));
- LASSERTF((int)offsetof(struct mdt_body, padding_9) == 200, "found %lld\n",
- (long long)(int)offsetof(struct mdt_body, padding_9));
- LASSERTF((int)sizeof(((struct mdt_body *)0)->padding_9) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_body *)0)->padding_9));
- LASSERTF((int)offsetof(struct mdt_body, padding_10) == 208, "found %lld\n",
- (long long)(int)offsetof(struct mdt_body, padding_10));
- LASSERTF((int)sizeof(((struct mdt_body *)0)->padding_10) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_body *)0)->padding_10));
- LASSERTF(MDS_FMODE_CLOSED == 000000000000UL, "found 0%.11oUL\n",
- MDS_FMODE_CLOSED);
- LASSERTF(MDS_FMODE_EXEC == 000000000004UL, "found 0%.11oUL\n",
- MDS_FMODE_EXEC);
- LASSERTF(MDS_FMODE_EPOCH == 000001000000UL, "found 0%.11oUL\n",
- MDS_FMODE_EPOCH);
- LASSERTF(MDS_FMODE_TRUNC == 000002000000UL, "found 0%.11oUL\n",
- MDS_FMODE_TRUNC);
- LASSERTF(MDS_FMODE_SOM == 000004000000UL, "found 0%.11oUL\n",
- MDS_FMODE_SOM);
- LASSERTF(MDS_OPEN_CREATED == 000000000010UL, "found 0%.11oUL\n",
- MDS_OPEN_CREATED);
- LASSERTF(MDS_OPEN_CROSS == 000000000020UL, "found 0%.11oUL\n",
- MDS_OPEN_CROSS);
- LASSERTF(MDS_OPEN_CREAT == 000000000100UL, "found 0%.11oUL\n",
- MDS_OPEN_CREAT);
- LASSERTF(MDS_OPEN_EXCL == 000000000200UL, "found 0%.11oUL\n",
- MDS_OPEN_EXCL);
- LASSERTF(MDS_OPEN_TRUNC == 000000001000UL, "found 0%.11oUL\n",
- MDS_OPEN_TRUNC);
- LASSERTF(MDS_OPEN_APPEND == 000000002000UL, "found 0%.11oUL\n",
- MDS_OPEN_APPEND);
- LASSERTF(MDS_OPEN_SYNC == 000000010000UL, "found 0%.11oUL\n",
- MDS_OPEN_SYNC);
- LASSERTF(MDS_OPEN_DIRECTORY == 000000200000UL, "found 0%.11oUL\n",
- MDS_OPEN_DIRECTORY);
- LASSERTF(MDS_OPEN_BY_FID == 000040000000UL, "found 0%.11oUL\n",
- MDS_OPEN_BY_FID);
- LASSERTF(MDS_OPEN_DELAY_CREATE == 000100000000UL, "found 0%.11oUL\n",
- MDS_OPEN_DELAY_CREATE);
- LASSERTF(MDS_OPEN_OWNEROVERRIDE == 000200000000UL, "found 0%.11oUL\n",
- MDS_OPEN_OWNEROVERRIDE);
- LASSERTF(MDS_OPEN_JOIN_FILE == 000400000000UL, "found 0%.11oUL\n",
- MDS_OPEN_JOIN_FILE);
- LASSERTF(MDS_OPEN_LOCK == 004000000000UL, "found 0%.11oUL\n",
- MDS_OPEN_LOCK);
- LASSERTF(MDS_OPEN_HAS_EA == 010000000000UL, "found 0%.11oUL\n",
- MDS_OPEN_HAS_EA);
- LASSERTF(MDS_OPEN_HAS_OBJS == 020000000000UL, "found 0%.11oUL\n",
- MDS_OPEN_HAS_OBJS);
- LASSERTF(MDS_OPEN_NORESTORE == 00000000000100000000000ULL, "found 0%.22lloULL\n",
- (long long)MDS_OPEN_NORESTORE);
- LASSERTF(MDS_OPEN_NEWSTRIPE == 00000000000200000000000ULL, "found 0%.22lloULL\n",
- (long long)MDS_OPEN_NEWSTRIPE);
- LASSERTF(MDS_OPEN_VOLATILE == 00000000000400000000000ULL, "found 0%.22lloULL\n",
- (long long)MDS_OPEN_VOLATILE);
- LASSERTF(LUSTRE_SYNC_FL == 0x00000008, "found 0x%.8x\n",
- LUSTRE_SYNC_FL);
- LASSERTF(LUSTRE_IMMUTABLE_FL == 0x00000010, "found 0x%.8x\n",
- LUSTRE_IMMUTABLE_FL);
- LASSERTF(LUSTRE_APPEND_FL == 0x00000020, "found 0x%.8x\n",
- LUSTRE_APPEND_FL);
- LASSERTF(LUSTRE_NOATIME_FL == 0x00000080, "found 0x%.8x\n",
- LUSTRE_NOATIME_FL);
- LASSERTF(LUSTRE_DIRSYNC_FL == 0x00010000, "found 0x%.8x\n",
- LUSTRE_DIRSYNC_FL);
- LASSERTF(MDS_INODELOCK_LOOKUP == 0x000001, "found 0x%.8x\n",
- MDS_INODELOCK_LOOKUP);
- LASSERTF(MDS_INODELOCK_UPDATE == 0x000002, "found 0x%.8x\n",
- MDS_INODELOCK_UPDATE);
- LASSERTF(MDS_INODELOCK_OPEN == 0x000004, "found 0x%.8x\n",
- MDS_INODELOCK_OPEN);
- LASSERTF(MDS_INODELOCK_LAYOUT == 0x000008, "found 0x%.8x\n",
- MDS_INODELOCK_LAYOUT);
-
- /* Checks for struct mdt_ioepoch */
- LASSERTF((int)sizeof(struct mdt_ioepoch) == 24, "found %lld\n",
- (long long)(int)sizeof(struct mdt_ioepoch));
- LASSERTF((int)offsetof(struct mdt_ioepoch, handle) == 0, "found %lld\n",
- (long long)(int)offsetof(struct mdt_ioepoch, handle));
- LASSERTF((int)sizeof(((struct mdt_ioepoch *)0)->handle) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_ioepoch *)0)->handle));
- LASSERTF((int)offsetof(struct mdt_ioepoch, ioepoch) == 8, "found %lld\n",
- (long long)(int)offsetof(struct mdt_ioepoch, ioepoch));
- LASSERTF((int)sizeof(((struct mdt_ioepoch *)0)->ioepoch) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_ioepoch *)0)->ioepoch));
- LASSERTF((int)offsetof(struct mdt_ioepoch, flags) == 16, "found %lld\n",
- (long long)(int)offsetof(struct mdt_ioepoch, flags));
- LASSERTF((int)sizeof(((struct mdt_ioepoch *)0)->flags) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_ioepoch *)0)->flags));
- LASSERTF((int)offsetof(struct mdt_ioepoch, padding) == 20, "found %lld\n",
- (long long)(int)offsetof(struct mdt_ioepoch, padding));
- LASSERTF((int)sizeof(((struct mdt_ioepoch *)0)->padding) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_ioepoch *)0)->padding));
-
- /* Checks for struct mdt_remote_perm */
- LASSERTF((int)sizeof(struct mdt_remote_perm) == 32, "found %lld\n",
- (long long)(int)sizeof(struct mdt_remote_perm));
- LASSERTF((int)offsetof(struct mdt_remote_perm, rp_uid) == 0, "found %lld\n",
- (long long)(int)offsetof(struct mdt_remote_perm, rp_uid));
- LASSERTF((int)sizeof(((struct mdt_remote_perm *)0)->rp_uid) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_remote_perm *)0)->rp_uid));
- LASSERTF((int)offsetof(struct mdt_remote_perm, rp_gid) == 4, "found %lld\n",
- (long long)(int)offsetof(struct mdt_remote_perm, rp_gid));
- LASSERTF((int)sizeof(((struct mdt_remote_perm *)0)->rp_gid) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_remote_perm *)0)->rp_gid));
- LASSERTF((int)offsetof(struct mdt_remote_perm, rp_fsuid) == 8, "found %lld\n",
- (long long)(int)offsetof(struct mdt_remote_perm, rp_fsuid));
- LASSERTF((int)sizeof(((struct mdt_remote_perm *)0)->rp_fsuid) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_remote_perm *)0)->rp_fsuid));
- LASSERTF((int)offsetof(struct mdt_remote_perm, rp_fsgid) == 16, "found %lld\n",
- (long long)(int)offsetof(struct mdt_remote_perm, rp_fsgid));
- LASSERTF((int)sizeof(((struct mdt_remote_perm *)0)->rp_fsgid) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_remote_perm *)0)->rp_fsgid));
- LASSERTF((int)offsetof(struct mdt_remote_perm, rp_access_perm) == 24, "found %lld\n",
- (long long)(int)offsetof(struct mdt_remote_perm, rp_access_perm));
- LASSERTF((int)sizeof(((struct mdt_remote_perm *)0)->rp_access_perm) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_remote_perm *)0)->rp_access_perm));
- LASSERTF((int)offsetof(struct mdt_remote_perm, rp_padding) == 28, "found %lld\n",
- (long long)(int)offsetof(struct mdt_remote_perm, rp_padding));
- LASSERTF((int)sizeof(((struct mdt_remote_perm *)0)->rp_padding) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_remote_perm *)0)->rp_padding));
- LASSERTF(CFS_SETUID_PERM == 0x00000001UL, "found 0x%.8xUL\n",
- (unsigned)CFS_SETUID_PERM);
- LASSERTF(CFS_SETGID_PERM == 0x00000002UL, "found 0x%.8xUL\n",
- (unsigned)CFS_SETGID_PERM);
- LASSERTF(CFS_SETGRP_PERM == 0x00000004UL, "found 0x%.8xUL\n",
- (unsigned)CFS_SETGRP_PERM);
- LASSERTF(CFS_RMTACL_PERM == 0x00000008UL, "found 0x%.8xUL\n",
- (unsigned)CFS_RMTACL_PERM);
- LASSERTF(CFS_RMTOWN_PERM == 0x00000010UL, "found 0x%.8xUL\n",
- (unsigned)CFS_RMTOWN_PERM);
-
- /* Checks for struct mdt_rec_setattr */
- LASSERTF((int)sizeof(struct mdt_rec_setattr) == 136, "found %lld\n",
- (long long)(int)sizeof(struct mdt_rec_setattr));
- LASSERTF((int)offsetof(struct mdt_rec_setattr, sa_opcode) == 0, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_setattr, sa_opcode));
- LASSERTF((int)sizeof(((struct mdt_rec_setattr *)0)->sa_opcode) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_setattr *)0)->sa_opcode));
- LASSERTF((int)offsetof(struct mdt_rec_setattr, sa_cap) == 4, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_setattr, sa_cap));
- LASSERTF((int)sizeof(((struct mdt_rec_setattr *)0)->sa_cap) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_setattr *)0)->sa_cap));
- LASSERTF((int)offsetof(struct mdt_rec_setattr, sa_fsuid) == 8, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_setattr, sa_fsuid));
- LASSERTF((int)sizeof(((struct mdt_rec_setattr *)0)->sa_fsuid) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_setattr *)0)->sa_fsuid));
- LASSERTF((int)offsetof(struct mdt_rec_setattr, sa_fsuid_h) == 12, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_setattr, sa_fsuid_h));
- LASSERTF((int)sizeof(((struct mdt_rec_setattr *)0)->sa_fsuid_h) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_setattr *)0)->sa_fsuid_h));
- LASSERTF((int)offsetof(struct mdt_rec_setattr, sa_fsgid) == 16, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_setattr, sa_fsgid));
- LASSERTF((int)sizeof(((struct mdt_rec_setattr *)0)->sa_fsgid) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_setattr *)0)->sa_fsgid));
- LASSERTF((int)offsetof(struct mdt_rec_setattr, sa_fsgid_h) == 20, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_setattr, sa_fsgid_h));
- LASSERTF((int)sizeof(((struct mdt_rec_setattr *)0)->sa_fsgid_h) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_setattr *)0)->sa_fsgid_h));
- LASSERTF((int)offsetof(struct mdt_rec_setattr, sa_suppgid) == 24, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_setattr, sa_suppgid));
- LASSERTF((int)sizeof(((struct mdt_rec_setattr *)0)->sa_suppgid) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_setattr *)0)->sa_suppgid));
- LASSERTF((int)offsetof(struct mdt_rec_setattr, sa_suppgid_h) == 28, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_setattr, sa_suppgid_h));
- LASSERTF((int)sizeof(((struct mdt_rec_setattr *)0)->sa_suppgid_h) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_setattr *)0)->sa_suppgid_h));
- LASSERTF((int)offsetof(struct mdt_rec_setattr, sa_padding_1) == 32, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_setattr, sa_padding_1));
- LASSERTF((int)sizeof(((struct mdt_rec_setattr *)0)->sa_padding_1) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_setattr *)0)->sa_padding_1));
- LASSERTF((int)offsetof(struct mdt_rec_setattr, sa_padding_1_h) == 36, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_setattr, sa_padding_1_h));
- LASSERTF((int)sizeof(((struct mdt_rec_setattr *)0)->sa_padding_1_h) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_setattr *)0)->sa_padding_1_h));
- LASSERTF((int)offsetof(struct mdt_rec_setattr, sa_fid) == 40, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_setattr, sa_fid));
- LASSERTF((int)sizeof(((struct mdt_rec_setattr *)0)->sa_fid) == 16, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_setattr *)0)->sa_fid));
- LASSERTF((int)offsetof(struct mdt_rec_setattr, sa_valid) == 56, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_setattr, sa_valid));
- LASSERTF((int)sizeof(((struct mdt_rec_setattr *)0)->sa_valid) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_setattr *)0)->sa_valid));
- LASSERTF((int)offsetof(struct mdt_rec_setattr, sa_uid) == 64, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_setattr, sa_uid));
- LASSERTF((int)sizeof(((struct mdt_rec_setattr *)0)->sa_uid) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_setattr *)0)->sa_uid));
- LASSERTF((int)offsetof(struct mdt_rec_setattr, sa_gid) == 68, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_setattr, sa_gid));
- LASSERTF((int)sizeof(((struct mdt_rec_setattr *)0)->sa_gid) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_setattr *)0)->sa_gid));
- LASSERTF((int)offsetof(struct mdt_rec_setattr, sa_size) == 72, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_setattr, sa_size));
- LASSERTF((int)sizeof(((struct mdt_rec_setattr *)0)->sa_size) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_setattr *)0)->sa_size));
- LASSERTF((int)offsetof(struct mdt_rec_setattr, sa_blocks) == 80, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_setattr, sa_blocks));
- LASSERTF((int)sizeof(((struct mdt_rec_setattr *)0)->sa_blocks) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_setattr *)0)->sa_blocks));
- LASSERTF((int)offsetof(struct mdt_rec_setattr, sa_mtime) == 88, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_setattr, sa_mtime));
- LASSERTF((int)sizeof(((struct mdt_rec_setattr *)0)->sa_mtime) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_setattr *)0)->sa_mtime));
- LASSERTF((int)offsetof(struct mdt_rec_setattr, sa_atime) == 96, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_setattr, sa_atime));
- LASSERTF((int)sizeof(((struct mdt_rec_setattr *)0)->sa_atime) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_setattr *)0)->sa_atime));
- LASSERTF((int)offsetof(struct mdt_rec_setattr, sa_ctime) == 104, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_setattr, sa_ctime));
- LASSERTF((int)sizeof(((struct mdt_rec_setattr *)0)->sa_ctime) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_setattr *)0)->sa_ctime));
- LASSERTF((int)offsetof(struct mdt_rec_setattr, sa_attr_flags) == 112, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_setattr, sa_attr_flags));
- LASSERTF((int)sizeof(((struct mdt_rec_setattr *)0)->sa_attr_flags) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_setattr *)0)->sa_attr_flags));
- LASSERTF((int)offsetof(struct mdt_rec_setattr, sa_mode) == 116, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_setattr, sa_mode));
- LASSERTF((int)sizeof(((struct mdt_rec_setattr *)0)->sa_mode) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_setattr *)0)->sa_mode));
- LASSERTF((int)offsetof(struct mdt_rec_setattr, sa_bias) == 120, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_setattr, sa_bias));
- LASSERTF((int)sizeof(((struct mdt_rec_setattr *)0)->sa_bias) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_setattr *)0)->sa_bias));
- LASSERTF((int)offsetof(struct mdt_rec_setattr, sa_padding_3) == 124, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_setattr, sa_padding_3));
- LASSERTF((int)sizeof(((struct mdt_rec_setattr *)0)->sa_padding_3) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_setattr *)0)->sa_padding_3));
- LASSERTF((int)offsetof(struct mdt_rec_setattr, sa_padding_4) == 128, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_setattr, sa_padding_4));
- LASSERTF((int)sizeof(((struct mdt_rec_setattr *)0)->sa_padding_4) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_setattr *)0)->sa_padding_4));
- LASSERTF((int)offsetof(struct mdt_rec_setattr, sa_padding_5) == 132, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_setattr, sa_padding_5));
- LASSERTF((int)sizeof(((struct mdt_rec_setattr *)0)->sa_padding_5) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_setattr *)0)->sa_padding_5));
-
- /* Checks for struct mdt_rec_create */
- LASSERTF((int)sizeof(struct mdt_rec_create) == 136, "found %lld\n",
- (long long)(int)sizeof(struct mdt_rec_create));
- LASSERTF((int)offsetof(struct mdt_rec_create, cr_opcode) == 0, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_create, cr_opcode));
- LASSERTF((int)sizeof(((struct mdt_rec_create *)0)->cr_opcode) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_create *)0)->cr_opcode));
- LASSERTF((int)offsetof(struct mdt_rec_create, cr_cap) == 4, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_create, cr_cap));
- LASSERTF((int)sizeof(((struct mdt_rec_create *)0)->cr_cap) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_create *)0)->cr_cap));
- LASSERTF((int)offsetof(struct mdt_rec_create, cr_fsuid) == 8, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_create, cr_fsuid));
- LASSERTF((int)sizeof(((struct mdt_rec_create *)0)->cr_fsuid) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_create *)0)->cr_fsuid));
- LASSERTF((int)offsetof(struct mdt_rec_create, cr_fsuid_h) == 12, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_create, cr_fsuid_h));
- LASSERTF((int)sizeof(((struct mdt_rec_create *)0)->cr_fsuid_h) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_create *)0)->cr_fsuid_h));
- LASSERTF((int)offsetof(struct mdt_rec_create, cr_fsgid) == 16, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_create, cr_fsgid));
- LASSERTF((int)sizeof(((struct mdt_rec_create *)0)->cr_fsgid) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_create *)0)->cr_fsgid));
- LASSERTF((int)offsetof(struct mdt_rec_create, cr_fsgid_h) == 20, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_create, cr_fsgid_h));
- LASSERTF((int)sizeof(((struct mdt_rec_create *)0)->cr_fsgid_h) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_create *)0)->cr_fsgid_h));
- LASSERTF((int)offsetof(struct mdt_rec_create, cr_suppgid1) == 24, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_create, cr_suppgid1));
- LASSERTF((int)sizeof(((struct mdt_rec_create *)0)->cr_suppgid1) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_create *)0)->cr_suppgid1));
- LASSERTF((int)offsetof(struct mdt_rec_create, cr_suppgid1_h) == 28, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_create, cr_suppgid1_h));
- LASSERTF((int)sizeof(((struct mdt_rec_create *)0)->cr_suppgid1_h) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_create *)0)->cr_suppgid1_h));
- LASSERTF((int)offsetof(struct mdt_rec_create, cr_suppgid2) == 32, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_create, cr_suppgid2));
- LASSERTF((int)sizeof(((struct mdt_rec_create *)0)->cr_suppgid2) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_create *)0)->cr_suppgid2));
- LASSERTF((int)offsetof(struct mdt_rec_create, cr_suppgid2_h) == 36, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_create, cr_suppgid2_h));
- LASSERTF((int)sizeof(((struct mdt_rec_create *)0)->cr_suppgid2_h) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_create *)0)->cr_suppgid2_h));
- LASSERTF((int)offsetof(struct mdt_rec_create, cr_fid1) == 40, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_create, cr_fid1));
- LASSERTF((int)sizeof(((struct mdt_rec_create *)0)->cr_fid1) == 16, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_create *)0)->cr_fid1));
- LASSERTF((int)offsetof(struct mdt_rec_create, cr_fid2) == 56, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_create, cr_fid2));
- LASSERTF((int)sizeof(((struct mdt_rec_create *)0)->cr_fid2) == 16, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_create *)0)->cr_fid2));
- LASSERTF((int)offsetof(struct mdt_rec_create, cr_old_handle) == 72, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_create, cr_old_handle));
- LASSERTF((int)sizeof(((struct mdt_rec_create *)0)->cr_old_handle) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_create *)0)->cr_old_handle));
- LASSERTF((int)offsetof(struct mdt_rec_create, cr_time) == 80, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_create, cr_time));
- LASSERTF((int)sizeof(((struct mdt_rec_create *)0)->cr_time) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_create *)0)->cr_time));
- LASSERTF((int)offsetof(struct mdt_rec_create, cr_rdev) == 88, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_create, cr_rdev));
- LASSERTF((int)sizeof(((struct mdt_rec_create *)0)->cr_rdev) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_create *)0)->cr_rdev));
- LASSERTF((int)offsetof(struct mdt_rec_create, cr_ioepoch) == 96, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_create, cr_ioepoch));
- LASSERTF((int)sizeof(((struct mdt_rec_create *)0)->cr_ioepoch) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_create *)0)->cr_ioepoch));
- LASSERTF((int)offsetof(struct mdt_rec_create, cr_padding_1) == 104, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_create, cr_padding_1));
- LASSERTF((int)sizeof(((struct mdt_rec_create *)0)->cr_padding_1) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_create *)0)->cr_padding_1));
- LASSERTF((int)offsetof(struct mdt_rec_create, cr_mode) == 112, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_create, cr_mode));
- LASSERTF((int)sizeof(((struct mdt_rec_create *)0)->cr_mode) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_create *)0)->cr_mode));
- LASSERTF((int)offsetof(struct mdt_rec_create, cr_bias) == 116, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_create, cr_bias));
- LASSERTF((int)sizeof(((struct mdt_rec_create *)0)->cr_bias) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_create *)0)->cr_bias));
- LASSERTF((int)offsetof(struct mdt_rec_create, cr_flags_l) == 120, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_create, cr_flags_l));
- LASSERTF((int)sizeof(((struct mdt_rec_create *)0)->cr_flags_l) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_create *)0)->cr_flags_l));
- LASSERTF((int)offsetof(struct mdt_rec_create, cr_flags_h) == 124, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_create, cr_flags_h));
- LASSERTF((int)sizeof(((struct mdt_rec_create *)0)->cr_flags_h) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_create *)0)->cr_flags_h));
- LASSERTF((int)offsetof(struct mdt_rec_create, cr_umask) == 128, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_create, cr_umask));
- LASSERTF((int)sizeof(((struct mdt_rec_create *)0)->cr_umask) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_create *)0)->cr_umask));
- LASSERTF((int)offsetof(struct mdt_rec_create, cr_padding_4) == 132, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_create, cr_padding_4));
- LASSERTF((int)sizeof(((struct mdt_rec_create *)0)->cr_padding_4) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_create *)0)->cr_padding_4));
-
- /* Checks for struct mdt_rec_link */
- LASSERTF((int)sizeof(struct mdt_rec_link) == 136, "found %lld\n",
- (long long)(int)sizeof(struct mdt_rec_link));
- LASSERTF((int)offsetof(struct mdt_rec_link, lk_opcode) == 0, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_link, lk_opcode));
- LASSERTF((int)sizeof(((struct mdt_rec_link *)0)->lk_opcode) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_link *)0)->lk_opcode));
- LASSERTF((int)offsetof(struct mdt_rec_link, lk_cap) == 4, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_link, lk_cap));
- LASSERTF((int)sizeof(((struct mdt_rec_link *)0)->lk_cap) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_link *)0)->lk_cap));
- LASSERTF((int)offsetof(struct mdt_rec_link, lk_fsuid) == 8, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_link, lk_fsuid));
- LASSERTF((int)sizeof(((struct mdt_rec_link *)0)->lk_fsuid) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_link *)0)->lk_fsuid));
- LASSERTF((int)offsetof(struct mdt_rec_link, lk_fsuid_h) == 12, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_link, lk_fsuid_h));
- LASSERTF((int)sizeof(((struct mdt_rec_link *)0)->lk_fsuid_h) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_link *)0)->lk_fsuid_h));
- LASSERTF((int)offsetof(struct mdt_rec_link, lk_fsgid) == 16, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_link, lk_fsgid));
- LASSERTF((int)sizeof(((struct mdt_rec_link *)0)->lk_fsgid) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_link *)0)->lk_fsgid));
- LASSERTF((int)offsetof(struct mdt_rec_link, lk_fsgid_h) == 20, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_link, lk_fsgid_h));
- LASSERTF((int)sizeof(((struct mdt_rec_link *)0)->lk_fsgid_h) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_link *)0)->lk_fsgid_h));
- LASSERTF((int)offsetof(struct mdt_rec_link, lk_suppgid1) == 24, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_link, lk_suppgid1));
- LASSERTF((int)sizeof(((struct mdt_rec_link *)0)->lk_suppgid1) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_link *)0)->lk_suppgid1));
- LASSERTF((int)offsetof(struct mdt_rec_link, lk_suppgid1_h) == 28, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_link, lk_suppgid1_h));
- LASSERTF((int)sizeof(((struct mdt_rec_link *)0)->lk_suppgid1_h) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_link *)0)->lk_suppgid1_h));
- LASSERTF((int)offsetof(struct mdt_rec_link, lk_suppgid2) == 32, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_link, lk_suppgid2));
- LASSERTF((int)sizeof(((struct mdt_rec_link *)0)->lk_suppgid2) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_link *)0)->lk_suppgid2));
- LASSERTF((int)offsetof(struct mdt_rec_link, lk_suppgid2_h) == 36, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_link, lk_suppgid2_h));
- LASSERTF((int)sizeof(((struct mdt_rec_link *)0)->lk_suppgid2_h) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_link *)0)->lk_suppgid2_h));
- LASSERTF((int)offsetof(struct mdt_rec_link, lk_fid1) == 40, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_link, lk_fid1));
- LASSERTF((int)sizeof(((struct mdt_rec_link *)0)->lk_fid1) == 16, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_link *)0)->lk_fid1));
- LASSERTF((int)offsetof(struct mdt_rec_link, lk_fid2) == 56, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_link, lk_fid2));
- LASSERTF((int)sizeof(((struct mdt_rec_link *)0)->lk_fid2) == 16, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_link *)0)->lk_fid2));
- LASSERTF((int)offsetof(struct mdt_rec_link, lk_time) == 72, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_link, lk_time));
- LASSERTF((int)sizeof(((struct mdt_rec_link *)0)->lk_time) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_link *)0)->lk_time));
- LASSERTF((int)offsetof(struct mdt_rec_link, lk_padding_1) == 80, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_link, lk_padding_1));
- LASSERTF((int)sizeof(((struct mdt_rec_link *)0)->lk_padding_1) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_link *)0)->lk_padding_1));
- LASSERTF((int)offsetof(struct mdt_rec_link, lk_padding_2) == 88, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_link, lk_padding_2));
- LASSERTF((int)sizeof(((struct mdt_rec_link *)0)->lk_padding_2) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_link *)0)->lk_padding_2));
- LASSERTF((int)offsetof(struct mdt_rec_link, lk_padding_3) == 96, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_link, lk_padding_3));
- LASSERTF((int)sizeof(((struct mdt_rec_link *)0)->lk_padding_3) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_link *)0)->lk_padding_3));
- LASSERTF((int)offsetof(struct mdt_rec_link, lk_padding_4) == 104, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_link, lk_padding_4));
- LASSERTF((int)sizeof(((struct mdt_rec_link *)0)->lk_padding_4) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_link *)0)->lk_padding_4));
- LASSERTF((int)offsetof(struct mdt_rec_link, lk_bias) == 112, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_link, lk_bias));
- LASSERTF((int)sizeof(((struct mdt_rec_link *)0)->lk_bias) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_link *)0)->lk_bias));
- LASSERTF((int)offsetof(struct mdt_rec_link, lk_padding_5) == 116, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_link, lk_padding_5));
- LASSERTF((int)sizeof(((struct mdt_rec_link *)0)->lk_padding_5) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_link *)0)->lk_padding_5));
- LASSERTF((int)offsetof(struct mdt_rec_link, lk_padding_6) == 120, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_link, lk_padding_6));
- LASSERTF((int)sizeof(((struct mdt_rec_link *)0)->lk_padding_6) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_link *)0)->lk_padding_6));
- LASSERTF((int)offsetof(struct mdt_rec_link, lk_padding_7) == 124, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_link, lk_padding_7));
- LASSERTF((int)sizeof(((struct mdt_rec_link *)0)->lk_padding_7) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_link *)0)->lk_padding_7));
- LASSERTF((int)offsetof(struct mdt_rec_link, lk_padding_8) == 128, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_link, lk_padding_8));
- LASSERTF((int)sizeof(((struct mdt_rec_link *)0)->lk_padding_8) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_link *)0)->lk_padding_8));
- LASSERTF((int)offsetof(struct mdt_rec_link, lk_padding_9) == 132, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_link, lk_padding_9));
- LASSERTF((int)sizeof(((struct mdt_rec_link *)0)->lk_padding_9) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_link *)0)->lk_padding_9));
-
- /* Checks for struct mdt_rec_unlink */
- LASSERTF((int)sizeof(struct mdt_rec_unlink) == 136, "found %lld\n",
- (long long)(int)sizeof(struct mdt_rec_unlink));
- LASSERTF((int)offsetof(struct mdt_rec_unlink, ul_opcode) == 0, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_unlink, ul_opcode));
- LASSERTF((int)sizeof(((struct mdt_rec_unlink *)0)->ul_opcode) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_unlink *)0)->ul_opcode));
- LASSERTF((int)offsetof(struct mdt_rec_unlink, ul_cap) == 4, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_unlink, ul_cap));
- LASSERTF((int)sizeof(((struct mdt_rec_unlink *)0)->ul_cap) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_unlink *)0)->ul_cap));
- LASSERTF((int)offsetof(struct mdt_rec_unlink, ul_fsuid) == 8, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_unlink, ul_fsuid));
- LASSERTF((int)sizeof(((struct mdt_rec_unlink *)0)->ul_fsuid) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_unlink *)0)->ul_fsuid));
- LASSERTF((int)offsetof(struct mdt_rec_unlink, ul_fsuid_h) == 12, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_unlink, ul_fsuid_h));
- LASSERTF((int)sizeof(((struct mdt_rec_unlink *)0)->ul_fsuid_h) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_unlink *)0)->ul_fsuid_h));
- LASSERTF((int)offsetof(struct mdt_rec_unlink, ul_fsgid) == 16, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_unlink, ul_fsgid));
- LASSERTF((int)sizeof(((struct mdt_rec_unlink *)0)->ul_fsgid) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_unlink *)0)->ul_fsgid));
- LASSERTF((int)offsetof(struct mdt_rec_unlink, ul_fsgid_h) == 20, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_unlink, ul_fsgid_h));
- LASSERTF((int)sizeof(((struct mdt_rec_unlink *)0)->ul_fsgid_h) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_unlink *)0)->ul_fsgid_h));
- LASSERTF((int)offsetof(struct mdt_rec_unlink, ul_suppgid1) == 24, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_unlink, ul_suppgid1));
- LASSERTF((int)sizeof(((struct mdt_rec_unlink *)0)->ul_suppgid1) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_unlink *)0)->ul_suppgid1));
- LASSERTF((int)offsetof(struct mdt_rec_unlink, ul_suppgid1_h) == 28, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_unlink, ul_suppgid1_h));
- LASSERTF((int)sizeof(((struct mdt_rec_unlink *)0)->ul_suppgid1_h) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_unlink *)0)->ul_suppgid1_h));
- LASSERTF((int)offsetof(struct mdt_rec_unlink, ul_suppgid2) == 32, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_unlink, ul_suppgid2));
- LASSERTF((int)sizeof(((struct mdt_rec_unlink *)0)->ul_suppgid2) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_unlink *)0)->ul_suppgid2));
- LASSERTF((int)offsetof(struct mdt_rec_unlink, ul_suppgid2_h) == 36, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_unlink, ul_suppgid2_h));
- LASSERTF((int)sizeof(((struct mdt_rec_unlink *)0)->ul_suppgid2_h) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_unlink *)0)->ul_suppgid2_h));
- LASSERTF((int)offsetof(struct mdt_rec_unlink, ul_fid1) == 40, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_unlink, ul_fid1));
- LASSERTF((int)sizeof(((struct mdt_rec_unlink *)0)->ul_fid1) == 16, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_unlink *)0)->ul_fid1));
- LASSERTF((int)offsetof(struct mdt_rec_unlink, ul_fid2) == 56, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_unlink, ul_fid2));
- LASSERTF((int)sizeof(((struct mdt_rec_unlink *)0)->ul_fid2) == 16, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_unlink *)0)->ul_fid2));
- LASSERTF((int)offsetof(struct mdt_rec_unlink, ul_time) == 72, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_unlink, ul_time));
- LASSERTF((int)sizeof(((struct mdt_rec_unlink *)0)->ul_time) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_unlink *)0)->ul_time));
- LASSERTF((int)offsetof(struct mdt_rec_unlink, ul_padding_2) == 80, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_unlink, ul_padding_2));
- LASSERTF((int)sizeof(((struct mdt_rec_unlink *)0)->ul_padding_2) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_unlink *)0)->ul_padding_2));
- LASSERTF((int)offsetof(struct mdt_rec_unlink, ul_padding_3) == 88, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_unlink, ul_padding_3));
- LASSERTF((int)sizeof(((struct mdt_rec_unlink *)0)->ul_padding_3) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_unlink *)0)->ul_padding_3));
- LASSERTF((int)offsetof(struct mdt_rec_unlink, ul_padding_4) == 96, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_unlink, ul_padding_4));
- LASSERTF((int)sizeof(((struct mdt_rec_unlink *)0)->ul_padding_4) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_unlink *)0)->ul_padding_4));
- LASSERTF((int)offsetof(struct mdt_rec_unlink, ul_padding_5) == 104, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_unlink, ul_padding_5));
- LASSERTF((int)sizeof(((struct mdt_rec_unlink *)0)->ul_padding_5) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_unlink *)0)->ul_padding_5));
- LASSERTF((int)offsetof(struct mdt_rec_unlink, ul_bias) == 112, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_unlink, ul_bias));
- LASSERTF((int)sizeof(((struct mdt_rec_unlink *)0)->ul_bias) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_unlink *)0)->ul_bias));
- LASSERTF((int)offsetof(struct mdt_rec_unlink, ul_mode) == 116, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_unlink, ul_mode));
- LASSERTF((int)sizeof(((struct mdt_rec_unlink *)0)->ul_mode) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_unlink *)0)->ul_mode));
- LASSERTF((int)offsetof(struct mdt_rec_unlink, ul_padding_6) == 120, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_unlink, ul_padding_6));
- LASSERTF((int)sizeof(((struct mdt_rec_unlink *)0)->ul_padding_6) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_unlink *)0)->ul_padding_6));
- LASSERTF((int)offsetof(struct mdt_rec_unlink, ul_padding_7) == 124, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_unlink, ul_padding_7));
- LASSERTF((int)sizeof(((struct mdt_rec_unlink *)0)->ul_padding_7) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_unlink *)0)->ul_padding_7));
- LASSERTF((int)offsetof(struct mdt_rec_unlink, ul_padding_8) == 128, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_unlink, ul_padding_8));
- LASSERTF((int)sizeof(((struct mdt_rec_unlink *)0)->ul_padding_8) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_unlink *)0)->ul_padding_8));
- LASSERTF((int)offsetof(struct mdt_rec_unlink, ul_padding_9) == 132, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_unlink, ul_padding_9));
- LASSERTF((int)sizeof(((struct mdt_rec_unlink *)0)->ul_padding_9) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_unlink *)0)->ul_padding_9));
-
- /* Checks for struct mdt_rec_rename */
- LASSERTF((int)sizeof(struct mdt_rec_rename) == 136, "found %lld\n",
- (long long)(int)sizeof(struct mdt_rec_rename));
- LASSERTF((int)offsetof(struct mdt_rec_rename, rn_opcode) == 0, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_rename, rn_opcode));
- LASSERTF((int)sizeof(((struct mdt_rec_rename *)0)->rn_opcode) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_rename *)0)->rn_opcode));
- LASSERTF((int)offsetof(struct mdt_rec_rename, rn_cap) == 4, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_rename, rn_cap));
- LASSERTF((int)sizeof(((struct mdt_rec_rename *)0)->rn_cap) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_rename *)0)->rn_cap));
- LASSERTF((int)offsetof(struct mdt_rec_rename, rn_fsuid) == 8, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_rename, rn_fsuid));
- LASSERTF((int)sizeof(((struct mdt_rec_rename *)0)->rn_fsuid) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_rename *)0)->rn_fsuid));
- LASSERTF((int)offsetof(struct mdt_rec_rename, rn_fsuid_h) == 12, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_rename, rn_fsuid_h));
- LASSERTF((int)sizeof(((struct mdt_rec_rename *)0)->rn_fsuid_h) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_rename *)0)->rn_fsuid_h));
- LASSERTF((int)offsetof(struct mdt_rec_rename, rn_fsgid) == 16, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_rename, rn_fsgid));
- LASSERTF((int)sizeof(((struct mdt_rec_rename *)0)->rn_fsgid) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_rename *)0)->rn_fsgid));
- LASSERTF((int)offsetof(struct mdt_rec_rename, rn_fsgid_h) == 20, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_rename, rn_fsgid_h));
- LASSERTF((int)sizeof(((struct mdt_rec_rename *)0)->rn_fsgid_h) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_rename *)0)->rn_fsgid_h));
- LASSERTF((int)offsetof(struct mdt_rec_rename, rn_suppgid1) == 24, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_rename, rn_suppgid1));
- LASSERTF((int)sizeof(((struct mdt_rec_rename *)0)->rn_suppgid1) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_rename *)0)->rn_suppgid1));
- LASSERTF((int)offsetof(struct mdt_rec_rename, rn_suppgid1_h) == 28, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_rename, rn_suppgid1_h));
- LASSERTF((int)sizeof(((struct mdt_rec_rename *)0)->rn_suppgid1_h) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_rename *)0)->rn_suppgid1_h));
- LASSERTF((int)offsetof(struct mdt_rec_rename, rn_suppgid2) == 32, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_rename, rn_suppgid2));
- LASSERTF((int)sizeof(((struct mdt_rec_rename *)0)->rn_suppgid2) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_rename *)0)->rn_suppgid2));
- LASSERTF((int)offsetof(struct mdt_rec_rename, rn_suppgid2_h) == 36, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_rename, rn_suppgid2_h));
- LASSERTF((int)sizeof(((struct mdt_rec_rename *)0)->rn_suppgid2_h) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_rename *)0)->rn_suppgid2_h));
- LASSERTF((int)offsetof(struct mdt_rec_rename, rn_fid1) == 40, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_rename, rn_fid1));
- LASSERTF((int)sizeof(((struct mdt_rec_rename *)0)->rn_fid1) == 16, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_rename *)0)->rn_fid1));
- LASSERTF((int)offsetof(struct mdt_rec_rename, rn_fid2) == 56, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_rename, rn_fid2));
- LASSERTF((int)sizeof(((struct mdt_rec_rename *)0)->rn_fid2) == 16, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_rename *)0)->rn_fid2));
- LASSERTF((int)offsetof(struct mdt_rec_rename, rn_time) == 72, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_rename, rn_time));
- LASSERTF((int)sizeof(((struct mdt_rec_rename *)0)->rn_time) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_rename *)0)->rn_time));
- LASSERTF((int)offsetof(struct mdt_rec_rename, rn_padding_1) == 80, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_rename, rn_padding_1));
- LASSERTF((int)sizeof(((struct mdt_rec_rename *)0)->rn_padding_1) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_rename *)0)->rn_padding_1));
- LASSERTF((int)offsetof(struct mdt_rec_rename, rn_padding_2) == 88, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_rename, rn_padding_2));
- LASSERTF((int)sizeof(((struct mdt_rec_rename *)0)->rn_padding_2) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_rename *)0)->rn_padding_2));
- LASSERTF((int)offsetof(struct mdt_rec_rename, rn_padding_3) == 96, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_rename, rn_padding_3));
- LASSERTF((int)sizeof(((struct mdt_rec_rename *)0)->rn_padding_3) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_rename *)0)->rn_padding_3));
- LASSERTF((int)offsetof(struct mdt_rec_rename, rn_padding_4) == 104, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_rename, rn_padding_4));
- LASSERTF((int)sizeof(((struct mdt_rec_rename *)0)->rn_padding_4) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_rename *)0)->rn_padding_4));
- LASSERTF((int)offsetof(struct mdt_rec_rename, rn_bias) == 112, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_rename, rn_bias));
- LASSERTF((int)sizeof(((struct mdt_rec_rename *)0)->rn_bias) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_rename *)0)->rn_bias));
- LASSERTF((int)offsetof(struct mdt_rec_rename, rn_mode) == 116, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_rename, rn_mode));
- LASSERTF((int)sizeof(((struct mdt_rec_rename *)0)->rn_mode) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_rename *)0)->rn_mode));
- LASSERTF((int)offsetof(struct mdt_rec_rename, rn_padding_5) == 120, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_rename, rn_padding_5));
- LASSERTF((int)sizeof(((struct mdt_rec_rename *)0)->rn_padding_5) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_rename *)0)->rn_padding_5));
- LASSERTF((int)offsetof(struct mdt_rec_rename, rn_padding_6) == 124, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_rename, rn_padding_6));
- LASSERTF((int)sizeof(((struct mdt_rec_rename *)0)->rn_padding_6) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_rename *)0)->rn_padding_6));
- LASSERTF((int)offsetof(struct mdt_rec_rename, rn_padding_7) == 128, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_rename, rn_padding_7));
- LASSERTF((int)sizeof(((struct mdt_rec_rename *)0)->rn_padding_7) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_rename *)0)->rn_padding_7));
- LASSERTF((int)offsetof(struct mdt_rec_rename, rn_padding_8) == 132, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_rename, rn_padding_8));
- LASSERTF((int)sizeof(((struct mdt_rec_rename *)0)->rn_padding_8) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_rename *)0)->rn_padding_8));
-
- /* Checks for struct mdt_rec_setxattr */
- LASSERTF((int)sizeof(struct mdt_rec_setxattr) == 136, "found %lld\n",
- (long long)(int)sizeof(struct mdt_rec_setxattr));
- LASSERTF((int)offsetof(struct mdt_rec_setxattr, sx_opcode) == 0, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_setxattr, sx_opcode));
- LASSERTF((int)sizeof(((struct mdt_rec_setxattr *)0)->sx_opcode) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_setxattr *)0)->sx_opcode));
- LASSERTF((int)offsetof(struct mdt_rec_setxattr, sx_cap) == 4, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_setxattr, sx_cap));
- LASSERTF((int)sizeof(((struct mdt_rec_setxattr *)0)->sx_cap) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_setxattr *)0)->sx_cap));
- LASSERTF((int)offsetof(struct mdt_rec_setxattr, sx_fsuid) == 8, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_setxattr, sx_fsuid));
- LASSERTF((int)sizeof(((struct mdt_rec_setxattr *)0)->sx_fsuid) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_setxattr *)0)->sx_fsuid));
- LASSERTF((int)offsetof(struct mdt_rec_setxattr, sx_fsuid_h) == 12, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_setxattr, sx_fsuid_h));
- LASSERTF((int)sizeof(((struct mdt_rec_setxattr *)0)->sx_fsuid_h) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_setxattr *)0)->sx_fsuid_h));
- LASSERTF((int)offsetof(struct mdt_rec_setxattr, sx_fsgid) == 16, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_setxattr, sx_fsgid));
- LASSERTF((int)sizeof(((struct mdt_rec_setxattr *)0)->sx_fsgid) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_setxattr *)0)->sx_fsgid));
- LASSERTF((int)offsetof(struct mdt_rec_setxattr, sx_fsgid_h) == 20, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_setxattr, sx_fsgid_h));
- LASSERTF((int)sizeof(((struct mdt_rec_setxattr *)0)->sx_fsgid_h) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_setxattr *)0)->sx_fsgid_h));
- LASSERTF((int)offsetof(struct mdt_rec_setxattr, sx_suppgid1) == 24, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_setxattr, sx_suppgid1));
- LASSERTF((int)sizeof(((struct mdt_rec_setxattr *)0)->sx_suppgid1) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_setxattr *)0)->sx_suppgid1));
- LASSERTF((int)offsetof(struct mdt_rec_setxattr, sx_suppgid1_h) == 28, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_setxattr, sx_suppgid1_h));
- LASSERTF((int)sizeof(((struct mdt_rec_setxattr *)0)->sx_suppgid1_h) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_setxattr *)0)->sx_suppgid1_h));
- LASSERTF((int)offsetof(struct mdt_rec_setxattr, sx_suppgid2) == 32, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_setxattr, sx_suppgid2));
- LASSERTF((int)sizeof(((struct mdt_rec_setxattr *)0)->sx_suppgid2) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_setxattr *)0)->sx_suppgid2));
- LASSERTF((int)offsetof(struct mdt_rec_setxattr, sx_suppgid2_h) == 36, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_setxattr, sx_suppgid2_h));
- LASSERTF((int)sizeof(((struct mdt_rec_setxattr *)0)->sx_suppgid2_h) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_setxattr *)0)->sx_suppgid2_h));
- LASSERTF((int)offsetof(struct mdt_rec_setxattr, sx_fid) == 40, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_setxattr, sx_fid));
- LASSERTF((int)sizeof(((struct mdt_rec_setxattr *)0)->sx_fid) == 16, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_setxattr *)0)->sx_fid));
- LASSERTF((int)offsetof(struct mdt_rec_setxattr, sx_padding_1) == 56, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_setxattr, sx_padding_1));
- LASSERTF((int)sizeof(((struct mdt_rec_setxattr *)0)->sx_padding_1) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_setxattr *)0)->sx_padding_1));
- LASSERTF((int)offsetof(struct mdt_rec_setxattr, sx_padding_2) == 64, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_setxattr, sx_padding_2));
- LASSERTF((int)sizeof(((struct mdt_rec_setxattr *)0)->sx_padding_2) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_setxattr *)0)->sx_padding_2));
- LASSERTF((int)offsetof(struct mdt_rec_setxattr, sx_padding_3) == 68, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_setxattr, sx_padding_3));
- LASSERTF((int)sizeof(((struct mdt_rec_setxattr *)0)->sx_padding_3) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_setxattr *)0)->sx_padding_3));
- LASSERTF((int)offsetof(struct mdt_rec_setxattr, sx_valid) == 72, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_setxattr, sx_valid));
- LASSERTF((int)sizeof(((struct mdt_rec_setxattr *)0)->sx_valid) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_setxattr *)0)->sx_valid));
- LASSERTF((int)offsetof(struct mdt_rec_setxattr, sx_time) == 80, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_setxattr, sx_time));
- LASSERTF((int)sizeof(((struct mdt_rec_setxattr *)0)->sx_time) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_setxattr *)0)->sx_time));
- LASSERTF((int)offsetof(struct mdt_rec_setxattr, sx_padding_5) == 88, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_setxattr, sx_padding_5));
- LASSERTF((int)sizeof(((struct mdt_rec_setxattr *)0)->sx_padding_5) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_setxattr *)0)->sx_padding_5));
- LASSERTF((int)offsetof(struct mdt_rec_setxattr, sx_padding_6) == 96, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_setxattr, sx_padding_6));
- LASSERTF((int)sizeof(((struct mdt_rec_setxattr *)0)->sx_padding_6) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_setxattr *)0)->sx_padding_6));
- LASSERTF((int)offsetof(struct mdt_rec_setxattr, sx_padding_7) == 104, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_setxattr, sx_padding_7));
- LASSERTF((int)sizeof(((struct mdt_rec_setxattr *)0)->sx_padding_7) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_setxattr *)0)->sx_padding_7));
- LASSERTF((int)offsetof(struct mdt_rec_setxattr, sx_size) == 112, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_setxattr, sx_size));
- LASSERTF((int)sizeof(((struct mdt_rec_setxattr *)0)->sx_size) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_setxattr *)0)->sx_size));
- LASSERTF((int)offsetof(struct mdt_rec_setxattr, sx_flags) == 116, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_setxattr, sx_flags));
- LASSERTF((int)sizeof(((struct mdt_rec_setxattr *)0)->sx_flags) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_setxattr *)0)->sx_flags));
- LASSERTF((int)offsetof(struct mdt_rec_setxattr, sx_padding_8) == 120, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_setxattr, sx_padding_8));
- LASSERTF((int)sizeof(((struct mdt_rec_setxattr *)0)->sx_padding_8) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_setxattr *)0)->sx_padding_8));
- LASSERTF((int)offsetof(struct mdt_rec_setxattr, sx_padding_9) == 124, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_setxattr, sx_padding_9));
- LASSERTF((int)sizeof(((struct mdt_rec_setxattr *)0)->sx_padding_9) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_setxattr *)0)->sx_padding_9));
- LASSERTF((int)offsetof(struct mdt_rec_setxattr, sx_padding_10) == 128, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_setxattr, sx_padding_10));
- LASSERTF((int)sizeof(((struct mdt_rec_setxattr *)0)->sx_padding_10) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_setxattr *)0)->sx_padding_10));
- LASSERTF((int)offsetof(struct mdt_rec_setxattr, sx_padding_11) == 132, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_setxattr, sx_padding_11));
- LASSERTF((int)sizeof(((struct mdt_rec_setxattr *)0)->sx_padding_11) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_setxattr *)0)->sx_padding_11));
-
- /* Checks for struct mdt_rec_reint */
- LASSERTF((int)sizeof(struct mdt_rec_reint) == 136, "found %lld\n",
- (long long)(int)sizeof(struct mdt_rec_reint));
- LASSERTF((int)offsetof(struct mdt_rec_reint, rr_opcode) == 0, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_reint, rr_opcode));
- LASSERTF((int)sizeof(((struct mdt_rec_reint *)0)->rr_opcode) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_reint *)0)->rr_opcode));
- LASSERTF((int)offsetof(struct mdt_rec_reint, rr_cap) == 4, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_reint, rr_cap));
- LASSERTF((int)sizeof(((struct mdt_rec_reint *)0)->rr_cap) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_reint *)0)->rr_cap));
- LASSERTF((int)offsetof(struct mdt_rec_reint, rr_fsuid) == 8, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_reint, rr_fsuid));
- LASSERTF((int)sizeof(((struct mdt_rec_reint *)0)->rr_fsuid) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_reint *)0)->rr_fsuid));
- LASSERTF((int)offsetof(struct mdt_rec_reint, rr_fsuid_h) == 12, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_reint, rr_fsuid_h));
- LASSERTF((int)sizeof(((struct mdt_rec_reint *)0)->rr_fsuid_h) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_reint *)0)->rr_fsuid_h));
- LASSERTF((int)offsetof(struct mdt_rec_reint, rr_fsgid) == 16, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_reint, rr_fsgid));
- LASSERTF((int)sizeof(((struct mdt_rec_reint *)0)->rr_fsgid) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_reint *)0)->rr_fsgid));
- LASSERTF((int)offsetof(struct mdt_rec_reint, rr_fsgid_h) == 20, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_reint, rr_fsgid_h));
- LASSERTF((int)sizeof(((struct mdt_rec_reint *)0)->rr_fsgid_h) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_reint *)0)->rr_fsgid_h));
- LASSERTF((int)offsetof(struct mdt_rec_reint, rr_suppgid1) == 24, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_reint, rr_suppgid1));
- LASSERTF((int)sizeof(((struct mdt_rec_reint *)0)->rr_suppgid1) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_reint *)0)->rr_suppgid1));
- LASSERTF((int)offsetof(struct mdt_rec_reint, rr_suppgid1_h) == 28, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_reint, rr_suppgid1_h));
- LASSERTF((int)sizeof(((struct mdt_rec_reint *)0)->rr_suppgid1_h) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_reint *)0)->rr_suppgid1_h));
- LASSERTF((int)offsetof(struct mdt_rec_reint, rr_suppgid2) == 32, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_reint, rr_suppgid2));
- LASSERTF((int)sizeof(((struct mdt_rec_reint *)0)->rr_suppgid2) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_reint *)0)->rr_suppgid2));
- LASSERTF((int)offsetof(struct mdt_rec_reint, rr_suppgid2_h) == 36, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_reint, rr_suppgid2_h));
- LASSERTF((int)sizeof(((struct mdt_rec_reint *)0)->rr_suppgid2_h) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_reint *)0)->rr_suppgid2_h));
- LASSERTF((int)offsetof(struct mdt_rec_reint, rr_fid1) == 40, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_reint, rr_fid1));
- LASSERTF((int)sizeof(((struct mdt_rec_reint *)0)->rr_fid1) == 16, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_reint *)0)->rr_fid1));
- LASSERTF((int)offsetof(struct mdt_rec_reint, rr_fid2) == 56, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_reint, rr_fid2));
- LASSERTF((int)sizeof(((struct mdt_rec_reint *)0)->rr_fid2) == 16, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_reint *)0)->rr_fid2));
- LASSERTF((int)offsetof(struct mdt_rec_reint, rr_mtime) == 72, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_reint, rr_mtime));
- LASSERTF((int)sizeof(((struct mdt_rec_reint *)0)->rr_mtime) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_reint *)0)->rr_mtime));
- LASSERTF((int)offsetof(struct mdt_rec_reint, rr_atime) == 80, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_reint, rr_atime));
- LASSERTF((int)sizeof(((struct mdt_rec_reint *)0)->rr_atime) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_reint *)0)->rr_atime));
- LASSERTF((int)offsetof(struct mdt_rec_reint, rr_ctime) == 88, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_reint, rr_ctime));
- LASSERTF((int)sizeof(((struct mdt_rec_reint *)0)->rr_ctime) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_reint *)0)->rr_ctime));
- LASSERTF((int)offsetof(struct mdt_rec_reint, rr_size) == 96, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_reint, rr_size));
- LASSERTF((int)sizeof(((struct mdt_rec_reint *)0)->rr_size) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_reint *)0)->rr_size));
- LASSERTF((int)offsetof(struct mdt_rec_reint, rr_blocks) == 104, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_reint, rr_blocks));
- LASSERTF((int)sizeof(((struct mdt_rec_reint *)0)->rr_blocks) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_reint *)0)->rr_blocks));
- LASSERTF((int)offsetof(struct mdt_rec_reint, rr_bias) == 112, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_reint, rr_bias));
- LASSERTF((int)sizeof(((struct mdt_rec_reint *)0)->rr_bias) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_reint *)0)->rr_bias));
- LASSERTF((int)offsetof(struct mdt_rec_reint, rr_mode) == 116, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_reint, rr_mode));
- LASSERTF((int)sizeof(((struct mdt_rec_reint *)0)->rr_mode) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_reint *)0)->rr_mode));
- LASSERTF((int)offsetof(struct mdt_rec_reint, rr_flags) == 120, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_reint, rr_flags));
- LASSERTF((int)sizeof(((struct mdt_rec_reint *)0)->rr_flags) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_reint *)0)->rr_flags));
- LASSERTF((int)offsetof(struct mdt_rec_reint, rr_flags_h) == 124, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_reint, rr_flags_h));
- LASSERTF((int)sizeof(((struct mdt_rec_reint *)0)->rr_flags_h) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_reint *)0)->rr_flags_h));
- LASSERTF((int)offsetof(struct mdt_rec_reint, rr_umask) == 128, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_reint, rr_umask));
- LASSERTF((int)sizeof(((struct mdt_rec_reint *)0)->rr_umask) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_reint *)0)->rr_umask));
- LASSERTF((int)offsetof(struct mdt_rec_reint, rr_padding_4) == 132, "found %lld\n",
- (long long)(int)offsetof(struct mdt_rec_reint, rr_padding_4));
- LASSERTF((int)sizeof(((struct mdt_rec_reint *)0)->rr_padding_4) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_rec_reint *)0)->rr_padding_4));
-
- /* Checks for struct lmv_desc */
- LASSERTF((int)sizeof(struct lmv_desc) == 88, "found %lld\n",
- (long long)(int)sizeof(struct lmv_desc));
- LASSERTF((int)offsetof(struct lmv_desc, ld_tgt_count) == 0, "found %lld\n",
- (long long)(int)offsetof(struct lmv_desc, ld_tgt_count));
- LASSERTF((int)sizeof(((struct lmv_desc *)0)->ld_tgt_count) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct lmv_desc *)0)->ld_tgt_count));
- LASSERTF((int)offsetof(struct lmv_desc, ld_active_tgt_count) == 4, "found %lld\n",
- (long long)(int)offsetof(struct lmv_desc, ld_active_tgt_count));
- LASSERTF((int)sizeof(((struct lmv_desc *)0)->ld_active_tgt_count) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct lmv_desc *)0)->ld_active_tgt_count));
- LASSERTF((int)offsetof(struct lmv_desc, ld_default_stripe_count) == 8, "found %lld\n",
- (long long)(int)offsetof(struct lmv_desc, ld_default_stripe_count));
- LASSERTF((int)sizeof(((struct lmv_desc *)0)->ld_default_stripe_count) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct lmv_desc *)0)->ld_default_stripe_count));
- LASSERTF((int)offsetof(struct lmv_desc, ld_pattern) == 12, "found %lld\n",
- (long long)(int)offsetof(struct lmv_desc, ld_pattern));
- LASSERTF((int)sizeof(((struct lmv_desc *)0)->ld_pattern) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct lmv_desc *)0)->ld_pattern));
- LASSERTF((int)offsetof(struct lmv_desc, ld_default_hash_size) == 16, "found %lld\n",
- (long long)(int)offsetof(struct lmv_desc, ld_default_hash_size));
- LASSERTF((int)sizeof(((struct lmv_desc *)0)->ld_default_hash_size) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct lmv_desc *)0)->ld_default_hash_size));
- LASSERTF((int)offsetof(struct lmv_desc, ld_padding_1) == 24, "found %lld\n",
- (long long)(int)offsetof(struct lmv_desc, ld_padding_1));
- LASSERTF((int)sizeof(((struct lmv_desc *)0)->ld_padding_1) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct lmv_desc *)0)->ld_padding_1));
- LASSERTF((int)offsetof(struct lmv_desc, ld_padding_2) == 32, "found %lld\n",
- (long long)(int)offsetof(struct lmv_desc, ld_padding_2));
- LASSERTF((int)sizeof(((struct lmv_desc *)0)->ld_padding_2) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct lmv_desc *)0)->ld_padding_2));
- LASSERTF((int)offsetof(struct lmv_desc, ld_qos_maxage) == 36, "found %lld\n",
- (long long)(int)offsetof(struct lmv_desc, ld_qos_maxage));
- LASSERTF((int)sizeof(((struct lmv_desc *)0)->ld_qos_maxage) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct lmv_desc *)0)->ld_qos_maxage));
- LASSERTF((int)offsetof(struct lmv_desc, ld_padding_3) == 40, "found %lld\n",
- (long long)(int)offsetof(struct lmv_desc, ld_padding_3));
- LASSERTF((int)sizeof(((struct lmv_desc *)0)->ld_padding_3) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct lmv_desc *)0)->ld_padding_3));
- LASSERTF((int)offsetof(struct lmv_desc, ld_padding_4) == 44, "found %lld\n",
- (long long)(int)offsetof(struct lmv_desc, ld_padding_4));
- LASSERTF((int)sizeof(((struct lmv_desc *)0)->ld_padding_4) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct lmv_desc *)0)->ld_padding_4));
- LASSERTF((int)offsetof(struct lmv_desc, ld_uuid) == 48, "found %lld\n",
- (long long)(int)offsetof(struct lmv_desc, ld_uuid));
- LASSERTF((int)sizeof(((struct lmv_desc *)0)->ld_uuid) == 40, "found %lld\n",
- (long long)(int)sizeof(((struct lmv_desc *)0)->ld_uuid));
-
- /* Checks for struct lmv_stripe_md */
- LASSERTF((int)sizeof(struct lmv_stripe_md) == 32, "found %lld\n",
- (long long)(int)sizeof(struct lmv_stripe_md));
- LASSERTF((int)offsetof(struct lmv_stripe_md, mea_magic) == 0, "found %lld\n",
- (long long)(int)offsetof(struct lmv_stripe_md, mea_magic));
- LASSERTF((int)sizeof(((struct lmv_stripe_md *)0)->mea_magic) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct lmv_stripe_md *)0)->mea_magic));
- LASSERTF((int)offsetof(struct lmv_stripe_md, mea_count) == 4, "found %lld\n",
- (long long)(int)offsetof(struct lmv_stripe_md, mea_count));
- LASSERTF((int)sizeof(((struct lmv_stripe_md *)0)->mea_count) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct lmv_stripe_md *)0)->mea_count));
- LASSERTF((int)offsetof(struct lmv_stripe_md, mea_master) == 8, "found %lld\n",
- (long long)(int)offsetof(struct lmv_stripe_md, mea_master));
- LASSERTF((int)sizeof(((struct lmv_stripe_md *)0)->mea_master) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct lmv_stripe_md *)0)->mea_master));
- LASSERTF((int)offsetof(struct lmv_stripe_md, mea_padding) == 12, "found %lld\n",
- (long long)(int)offsetof(struct lmv_stripe_md, mea_padding));
- LASSERTF((int)sizeof(((struct lmv_stripe_md *)0)->mea_padding) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct lmv_stripe_md *)0)->mea_padding));
- CLASSERT(LOV_MAXPOOLNAME == 16);
- LASSERTF((int)offsetof(struct lmv_stripe_md, mea_pool_name[16]) == 32, "found %lld\n",
- (long long)(int)offsetof(struct lmv_stripe_md, mea_pool_name[16]));
- LASSERTF((int)sizeof(((struct lmv_stripe_md *)0)->mea_pool_name[16]) == 1, "found %lld\n",
- (long long)(int)sizeof(((struct lmv_stripe_md *)0)->mea_pool_name[16]));
- LASSERTF((int)offsetof(struct lmv_stripe_md, mea_ids[0]) == 32, "found %lld\n",
- (long long)(int)offsetof(struct lmv_stripe_md, mea_ids[0]));
- LASSERTF((int)sizeof(((struct lmv_stripe_md *)0)->mea_ids[0]) == 16, "found %lld\n",
- (long long)(int)sizeof(((struct lmv_stripe_md *)0)->mea_ids[0]));
-
- /* Checks for struct lov_desc */
- LASSERTF((int)sizeof(struct lov_desc) == 88, "found %lld\n",
- (long long)(int)sizeof(struct lov_desc));
- LASSERTF((int)offsetof(struct lov_desc, ld_tgt_count) == 0, "found %lld\n",
- (long long)(int)offsetof(struct lov_desc, ld_tgt_count));
- LASSERTF((int)sizeof(((struct lov_desc *)0)->ld_tgt_count) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct lov_desc *)0)->ld_tgt_count));
- LASSERTF((int)offsetof(struct lov_desc, ld_active_tgt_count) == 4, "found %lld\n",
- (long long)(int)offsetof(struct lov_desc, ld_active_tgt_count));
- LASSERTF((int)sizeof(((struct lov_desc *)0)->ld_active_tgt_count) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct lov_desc *)0)->ld_active_tgt_count));
- LASSERTF((int)offsetof(struct lov_desc, ld_default_stripe_count) == 8, "found %lld\n",
- (long long)(int)offsetof(struct lov_desc, ld_default_stripe_count));
- LASSERTF((int)sizeof(((struct lov_desc *)0)->ld_default_stripe_count) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct lov_desc *)0)->ld_default_stripe_count));
- LASSERTF((int)offsetof(struct lov_desc, ld_pattern) == 12, "found %lld\n",
- (long long)(int)offsetof(struct lov_desc, ld_pattern));
- LASSERTF((int)sizeof(((struct lov_desc *)0)->ld_pattern) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct lov_desc *)0)->ld_pattern));
- LASSERTF((int)offsetof(struct lov_desc, ld_default_stripe_size) == 16, "found %lld\n",
- (long long)(int)offsetof(struct lov_desc, ld_default_stripe_size));
- LASSERTF((int)sizeof(((struct lov_desc *)0)->ld_default_stripe_size) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct lov_desc *)0)->ld_default_stripe_size));
- LASSERTF((int)offsetof(struct lov_desc, ld_default_stripe_offset) == 24, "found %lld\n",
- (long long)(int)offsetof(struct lov_desc, ld_default_stripe_offset));
- LASSERTF((int)sizeof(((struct lov_desc *)0)->ld_default_stripe_offset) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct lov_desc *)0)->ld_default_stripe_offset));
- LASSERTF((int)offsetof(struct lov_desc, ld_padding_0) == 32, "found %lld\n",
- (long long)(int)offsetof(struct lov_desc, ld_padding_0));
- LASSERTF((int)sizeof(((struct lov_desc *)0)->ld_padding_0) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct lov_desc *)0)->ld_padding_0));
- LASSERTF((int)offsetof(struct lov_desc, ld_qos_maxage) == 36, "found %lld\n",
- (long long)(int)offsetof(struct lov_desc, ld_qos_maxage));
- LASSERTF((int)sizeof(((struct lov_desc *)0)->ld_qos_maxage) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct lov_desc *)0)->ld_qos_maxage));
- LASSERTF((int)offsetof(struct lov_desc, ld_padding_1) == 40, "found %lld\n",
- (long long)(int)offsetof(struct lov_desc, ld_padding_1));
- LASSERTF((int)sizeof(((struct lov_desc *)0)->ld_padding_1) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct lov_desc *)0)->ld_padding_1));
- LASSERTF((int)offsetof(struct lov_desc, ld_padding_2) == 44, "found %lld\n",
- (long long)(int)offsetof(struct lov_desc, ld_padding_2));
- LASSERTF((int)sizeof(((struct lov_desc *)0)->ld_padding_2) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct lov_desc *)0)->ld_padding_2));
- LASSERTF((int)offsetof(struct lov_desc, ld_uuid) == 48, "found %lld\n",
- (long long)(int)offsetof(struct lov_desc, ld_uuid));
- LASSERTF((int)sizeof(((struct lov_desc *)0)->ld_uuid) == 40, "found %lld\n",
- (long long)(int)sizeof(((struct lov_desc *)0)->ld_uuid));
- CLASSERT(LOV_DESC_MAGIC == 0xB0CCDE5C);
-
- /* Checks for struct ldlm_res_id */
- LASSERTF((int)sizeof(struct ldlm_res_id) == 32, "found %lld\n",
- (long long)(int)sizeof(struct ldlm_res_id));
- CLASSERT(RES_NAME_SIZE == 4);
- LASSERTF((int)offsetof(struct ldlm_res_id, name[4]) == 32, "found %lld\n",
- (long long)(int)offsetof(struct ldlm_res_id, name[4]));
- LASSERTF((int)sizeof(((struct ldlm_res_id *)0)->name[4]) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct ldlm_res_id *)0)->name[4]));
-
- /* Checks for struct ldlm_extent */
- LASSERTF((int)sizeof(struct ldlm_extent) == 24, "found %lld\n",
- (long long)(int)sizeof(struct ldlm_extent));
- LASSERTF((int)offsetof(struct ldlm_extent, start) == 0, "found %lld\n",
- (long long)(int)offsetof(struct ldlm_extent, start));
- LASSERTF((int)sizeof(((struct ldlm_extent *)0)->start) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct ldlm_extent *)0)->start));
- LASSERTF((int)offsetof(struct ldlm_extent, end) == 8, "found %lld\n",
- (long long)(int)offsetof(struct ldlm_extent, end));
- LASSERTF((int)sizeof(((struct ldlm_extent *)0)->end) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct ldlm_extent *)0)->end));
- LASSERTF((int)offsetof(struct ldlm_extent, gid) == 16, "found %lld\n",
- (long long)(int)offsetof(struct ldlm_extent, gid));
- LASSERTF((int)sizeof(((struct ldlm_extent *)0)->gid) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct ldlm_extent *)0)->gid));
-
- /* Checks for struct ldlm_inodebits */
- LASSERTF((int)sizeof(struct ldlm_inodebits) == 8, "found %lld\n",
- (long long)(int)sizeof(struct ldlm_inodebits));
- LASSERTF((int)offsetof(struct ldlm_inodebits, bits) == 0, "found %lld\n",
- (long long)(int)offsetof(struct ldlm_inodebits, bits));
- LASSERTF((int)sizeof(((struct ldlm_inodebits *)0)->bits) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct ldlm_inodebits *)0)->bits));
-
- /* Checks for struct ldlm_flock_wire */
- LASSERTF((int)sizeof(struct ldlm_flock_wire) == 32, "found %lld\n",
- (long long)(int)sizeof(struct ldlm_flock_wire));
- LASSERTF((int)offsetof(struct ldlm_flock_wire, lfw_start) == 0, "found %lld\n",
- (long long)(int)offsetof(struct ldlm_flock_wire, lfw_start));
- LASSERTF((int)sizeof(((struct ldlm_flock_wire *)0)->lfw_start) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct ldlm_flock_wire *)0)->lfw_start));
- LASSERTF((int)offsetof(struct ldlm_flock_wire, lfw_end) == 8, "found %lld\n",
- (long long)(int)offsetof(struct ldlm_flock_wire, lfw_end));
- LASSERTF((int)sizeof(((struct ldlm_flock_wire *)0)->lfw_end) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct ldlm_flock_wire *)0)->lfw_end));
- LASSERTF((int)offsetof(struct ldlm_flock_wire, lfw_owner) == 16, "found %lld\n",
- (long long)(int)offsetof(struct ldlm_flock_wire, lfw_owner));
- LASSERTF((int)sizeof(((struct ldlm_flock_wire *)0)->lfw_owner) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct ldlm_flock_wire *)0)->lfw_owner));
- LASSERTF((int)offsetof(struct ldlm_flock_wire, lfw_padding) == 24, "found %lld\n",
- (long long)(int)offsetof(struct ldlm_flock_wire, lfw_padding));
- LASSERTF((int)sizeof(((struct ldlm_flock_wire *)0)->lfw_padding) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct ldlm_flock_wire *)0)->lfw_padding));
- LASSERTF((int)offsetof(struct ldlm_flock_wire, lfw_pid) == 28, "found %lld\n",
- (long long)(int)offsetof(struct ldlm_flock_wire, lfw_pid));
- LASSERTF((int)sizeof(((struct ldlm_flock_wire *)0)->lfw_pid) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct ldlm_flock_wire *)0)->lfw_pid));
-
- /* Checks for struct ldlm_intent */
- LASSERTF((int)sizeof(struct ldlm_intent) == 8, "found %lld\n",
- (long long)(int)sizeof(struct ldlm_intent));
- LASSERTF((int)offsetof(struct ldlm_intent, opc) == 0, "found %lld\n",
- (long long)(int)offsetof(struct ldlm_intent, opc));
- LASSERTF((int)sizeof(((struct ldlm_intent *)0)->opc) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct ldlm_intent *)0)->opc));
-
- /* Checks for struct ldlm_resource_desc */
- LASSERTF((int)sizeof(struct ldlm_resource_desc) == 40, "found %lld\n",
- (long long)(int)sizeof(struct ldlm_resource_desc));
- LASSERTF((int)offsetof(struct ldlm_resource_desc, lr_type) == 0, "found %lld\n",
- (long long)(int)offsetof(struct ldlm_resource_desc, lr_type));
- LASSERTF((int)sizeof(((struct ldlm_resource_desc *)0)->lr_type) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct ldlm_resource_desc *)0)->lr_type));
- LASSERTF((int)offsetof(struct ldlm_resource_desc, lr_padding) == 4, "found %lld\n",
- (long long)(int)offsetof(struct ldlm_resource_desc, lr_padding));
- LASSERTF((int)sizeof(((struct ldlm_resource_desc *)0)->lr_padding) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct ldlm_resource_desc *)0)->lr_padding));
- LASSERTF((int)offsetof(struct ldlm_resource_desc, lr_name) == 8, "found %lld\n",
- (long long)(int)offsetof(struct ldlm_resource_desc, lr_name));
- LASSERTF((int)sizeof(((struct ldlm_resource_desc *)0)->lr_name) == 32, "found %lld\n",
- (long long)(int)sizeof(((struct ldlm_resource_desc *)0)->lr_name));
-
- /* Checks for struct ldlm_lock_desc */
- LASSERTF((int)sizeof(struct ldlm_lock_desc) == 80, "found %lld\n",
- (long long)(int)sizeof(struct ldlm_lock_desc));
- LASSERTF((int)offsetof(struct ldlm_lock_desc, l_resource) == 0, "found %lld\n",
- (long long)(int)offsetof(struct ldlm_lock_desc, l_resource));
- LASSERTF((int)sizeof(((struct ldlm_lock_desc *)0)->l_resource) == 40, "found %lld\n",
- (long long)(int)sizeof(((struct ldlm_lock_desc *)0)->l_resource));
- LASSERTF((int)offsetof(struct ldlm_lock_desc, l_req_mode) == 40, "found %lld\n",
- (long long)(int)offsetof(struct ldlm_lock_desc, l_req_mode));
- LASSERTF((int)sizeof(((struct ldlm_lock_desc *)0)->l_req_mode) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct ldlm_lock_desc *)0)->l_req_mode));
- LASSERTF((int)offsetof(struct ldlm_lock_desc, l_granted_mode) == 44, "found %lld\n",
- (long long)(int)offsetof(struct ldlm_lock_desc, l_granted_mode));
- LASSERTF((int)sizeof(((struct ldlm_lock_desc *)0)->l_granted_mode) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct ldlm_lock_desc *)0)->l_granted_mode));
- LASSERTF((int)offsetof(struct ldlm_lock_desc, l_policy_data) == 48, "found %lld\n",
- (long long)(int)offsetof(struct ldlm_lock_desc, l_policy_data));
- LASSERTF((int)sizeof(((struct ldlm_lock_desc *)0)->l_policy_data) == 32, "found %lld\n",
- (long long)(int)sizeof(((struct ldlm_lock_desc *)0)->l_policy_data));
-
- /* Checks for struct ldlm_request */
- LASSERTF((int)sizeof(struct ldlm_request) == 104, "found %lld\n",
- (long long)(int)sizeof(struct ldlm_request));
- LASSERTF((int)offsetof(struct ldlm_request, lock_flags) == 0, "found %lld\n",
- (long long)(int)offsetof(struct ldlm_request, lock_flags));
- LASSERTF((int)sizeof(((struct ldlm_request *)0)->lock_flags) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct ldlm_request *)0)->lock_flags));
- LASSERTF((int)offsetof(struct ldlm_request, lock_count) == 4, "found %lld\n",
- (long long)(int)offsetof(struct ldlm_request, lock_count));
- LASSERTF((int)sizeof(((struct ldlm_request *)0)->lock_count) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct ldlm_request *)0)->lock_count));
- LASSERTF((int)offsetof(struct ldlm_request, lock_desc) == 8, "found %lld\n",
- (long long)(int)offsetof(struct ldlm_request, lock_desc));
- LASSERTF((int)sizeof(((struct ldlm_request *)0)->lock_desc) == 80, "found %lld\n",
- (long long)(int)sizeof(((struct ldlm_request *)0)->lock_desc));
- LASSERTF((int)offsetof(struct ldlm_request, lock_handle) == 88, "found %lld\n",
- (long long)(int)offsetof(struct ldlm_request, lock_handle));
- LASSERTF((int)sizeof(((struct ldlm_request *)0)->lock_handle) == 16, "found %lld\n",
- (long long)(int)sizeof(((struct ldlm_request *)0)->lock_handle));
-
- /* Checks for struct ldlm_reply */
- LASSERTF((int)sizeof(struct ldlm_reply) == 112, "found %lld\n",
- (long long)(int)sizeof(struct ldlm_reply));
- LASSERTF((int)offsetof(struct ldlm_reply, lock_flags) == 0, "found %lld\n",
- (long long)(int)offsetof(struct ldlm_reply, lock_flags));
- LASSERTF((int)sizeof(((struct ldlm_reply *)0)->lock_flags) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct ldlm_reply *)0)->lock_flags));
- LASSERTF((int)offsetof(struct ldlm_reply, lock_padding) == 4, "found %lld\n",
- (long long)(int)offsetof(struct ldlm_reply, lock_padding));
- LASSERTF((int)sizeof(((struct ldlm_reply *)0)->lock_padding) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct ldlm_reply *)0)->lock_padding));
- LASSERTF((int)offsetof(struct ldlm_reply, lock_desc) == 8, "found %lld\n",
- (long long)(int)offsetof(struct ldlm_reply, lock_desc));
- LASSERTF((int)sizeof(((struct ldlm_reply *)0)->lock_desc) == 80, "found %lld\n",
- (long long)(int)sizeof(((struct ldlm_reply *)0)->lock_desc));
- LASSERTF((int)offsetof(struct ldlm_reply, lock_handle) == 88, "found %lld\n",
- (long long)(int)offsetof(struct ldlm_reply, lock_handle));
- LASSERTF((int)sizeof(((struct ldlm_reply *)0)->lock_handle) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct ldlm_reply *)0)->lock_handle));
- LASSERTF((int)offsetof(struct ldlm_reply, lock_policy_res1) == 96, "found %lld\n",
- (long long)(int)offsetof(struct ldlm_reply, lock_policy_res1));
- LASSERTF((int)sizeof(((struct ldlm_reply *)0)->lock_policy_res1) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct ldlm_reply *)0)->lock_policy_res1));
- LASSERTF((int)offsetof(struct ldlm_reply, lock_policy_res2) == 104, "found %lld\n",
- (long long)(int)offsetof(struct ldlm_reply, lock_policy_res2));
- LASSERTF((int)sizeof(((struct ldlm_reply *)0)->lock_policy_res2) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct ldlm_reply *)0)->lock_policy_res2));
-
- /* Checks for struct ost_lvb_v1 */
- LASSERTF((int)sizeof(struct ost_lvb_v1) == 40, "found %lld\n",
- (long long)(int)sizeof(struct ost_lvb_v1));
- LASSERTF((int)offsetof(struct ost_lvb_v1, lvb_size) == 0, "found %lld\n",
- (long long)(int)offsetof(struct ost_lvb_v1, lvb_size));
- LASSERTF((int)sizeof(((struct ost_lvb_v1 *)0)->lvb_size) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct ost_lvb_v1 *)0)->lvb_size));
- LASSERTF((int)offsetof(struct ost_lvb_v1, lvb_mtime) == 8, "found %lld\n",
- (long long)(int)offsetof(struct ost_lvb_v1, lvb_mtime));
- LASSERTF((int)sizeof(((struct ost_lvb_v1 *)0)->lvb_mtime) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct ost_lvb_v1 *)0)->lvb_mtime));
- LASSERTF((int)offsetof(struct ost_lvb_v1, lvb_atime) == 16, "found %lld\n",
- (long long)(int)offsetof(struct ost_lvb_v1, lvb_atime));
- LASSERTF((int)sizeof(((struct ost_lvb_v1 *)0)->lvb_atime) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct ost_lvb_v1 *)0)->lvb_atime));
- LASSERTF((int)offsetof(struct ost_lvb_v1, lvb_ctime) == 24, "found %lld\n",
- (long long)(int)offsetof(struct ost_lvb_v1, lvb_ctime));
- LASSERTF((int)sizeof(((struct ost_lvb_v1 *)0)->lvb_ctime) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct ost_lvb_v1 *)0)->lvb_ctime));
- LASSERTF((int)offsetof(struct ost_lvb_v1, lvb_blocks) == 32, "found %lld\n",
- (long long)(int)offsetof(struct ost_lvb_v1, lvb_blocks));
- LASSERTF((int)sizeof(((struct ost_lvb_v1 *)0)->lvb_blocks) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct ost_lvb_v1 *)0)->lvb_blocks));
-
- /* Checks for struct ost_lvb */
- LASSERTF((int)sizeof(struct ost_lvb) == 56, "found %lld\n",
- (long long)(int)sizeof(struct ost_lvb));
- LASSERTF((int)offsetof(struct ost_lvb, lvb_size) == 0, "found %lld\n",
- (long long)(int)offsetof(struct ost_lvb, lvb_size));
- LASSERTF((int)sizeof(((struct ost_lvb *)0)->lvb_size) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct ost_lvb *)0)->lvb_size));
- LASSERTF((int)offsetof(struct ost_lvb, lvb_mtime) == 8, "found %lld\n",
- (long long)(int)offsetof(struct ost_lvb, lvb_mtime));
- LASSERTF((int)sizeof(((struct ost_lvb *)0)->lvb_mtime) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct ost_lvb *)0)->lvb_mtime));
- LASSERTF((int)offsetof(struct ost_lvb, lvb_atime) == 16, "found %lld\n",
- (long long)(int)offsetof(struct ost_lvb, lvb_atime));
- LASSERTF((int)sizeof(((struct ost_lvb *)0)->lvb_atime) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct ost_lvb *)0)->lvb_atime));
- LASSERTF((int)offsetof(struct ost_lvb, lvb_ctime) == 24, "found %lld\n",
- (long long)(int)offsetof(struct ost_lvb, lvb_ctime));
- LASSERTF((int)sizeof(((struct ost_lvb *)0)->lvb_ctime) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct ost_lvb *)0)->lvb_ctime));
- LASSERTF((int)offsetof(struct ost_lvb, lvb_blocks) == 32, "found %lld\n",
- (long long)(int)offsetof(struct ost_lvb, lvb_blocks));
- LASSERTF((int)sizeof(((struct ost_lvb *)0)->lvb_blocks) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct ost_lvb *)0)->lvb_blocks));
- LASSERTF((int)offsetof(struct ost_lvb, lvb_mtime_ns) == 40, "found %lld\n",
- (long long)(int)offsetof(struct ost_lvb, lvb_mtime_ns));
- LASSERTF((int)sizeof(((struct ost_lvb *)0)->lvb_mtime_ns) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct ost_lvb *)0)->lvb_mtime_ns));
- LASSERTF((int)offsetof(struct ost_lvb, lvb_atime_ns) == 44, "found %lld\n",
- (long long)(int)offsetof(struct ost_lvb, lvb_atime_ns));
- LASSERTF((int)sizeof(((struct ost_lvb *)0)->lvb_atime_ns) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct ost_lvb *)0)->lvb_atime_ns));
- LASSERTF((int)offsetof(struct ost_lvb, lvb_ctime_ns) == 48, "found %lld\n",
- (long long)(int)offsetof(struct ost_lvb, lvb_ctime_ns));
- LASSERTF((int)sizeof(((struct ost_lvb *)0)->lvb_ctime_ns) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct ost_lvb *)0)->lvb_ctime_ns));
- LASSERTF((int)offsetof(struct ost_lvb, lvb_padding) == 52, "found %lld\n",
- (long long)(int)offsetof(struct ost_lvb, lvb_padding));
- LASSERTF((int)sizeof(((struct ost_lvb *)0)->lvb_padding) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct ost_lvb *)0)->lvb_padding));
-
- /* Checks for struct lquota_lvb */
- LASSERTF((int)sizeof(struct lquota_lvb) == 40, "found %lld\n",
- (long long)(int)sizeof(struct lquota_lvb));
- LASSERTF((int)offsetof(struct lquota_lvb, lvb_flags) == 0, "found %lld\n",
- (long long)(int)offsetof(struct lquota_lvb, lvb_flags));
- LASSERTF((int)sizeof(((struct lquota_lvb *)0)->lvb_flags) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct lquota_lvb *)0)->lvb_flags));
- LASSERTF((int)offsetof(struct lquota_lvb, lvb_id_may_rel) == 8, "found %lld\n",
- (long long)(int)offsetof(struct lquota_lvb, lvb_id_may_rel));
- LASSERTF((int)sizeof(((struct lquota_lvb *)0)->lvb_id_may_rel) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct lquota_lvb *)0)->lvb_id_may_rel));
- LASSERTF((int)offsetof(struct lquota_lvb, lvb_id_rel) == 16, "found %lld\n",
- (long long)(int)offsetof(struct lquota_lvb, lvb_id_rel));
- LASSERTF((int)sizeof(((struct lquota_lvb *)0)->lvb_id_rel) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct lquota_lvb *)0)->lvb_id_rel));
- LASSERTF((int)offsetof(struct lquota_lvb, lvb_id_qunit) == 24, "found %lld\n",
- (long long)(int)offsetof(struct lquota_lvb, lvb_id_qunit));
- LASSERTF((int)sizeof(((struct lquota_lvb *)0)->lvb_id_qunit) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct lquota_lvb *)0)->lvb_id_qunit));
- LASSERTF((int)offsetof(struct lquota_lvb, lvb_pad1) == 32, "found %lld\n",
- (long long)(int)offsetof(struct lquota_lvb, lvb_pad1));
- LASSERTF((int)sizeof(((struct lquota_lvb *)0)->lvb_pad1) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct lquota_lvb *)0)->lvb_pad1));
- LASSERTF(LQUOTA_FL_EDQUOT == 1, "found %lld\n",
- (long long)LQUOTA_FL_EDQUOT);
-
- /* Checks for struct ldlm_gl_lquota_desc */
- LASSERTF((int)sizeof(struct ldlm_gl_lquota_desc) == 64, "found %lld\n",
- (long long)(int)sizeof(struct ldlm_gl_lquota_desc));
- LASSERTF((int)offsetof(struct ldlm_gl_lquota_desc, gl_id) == 0, "found %lld\n",
- (long long)(int)offsetof(struct ldlm_gl_lquota_desc, gl_id));
- LASSERTF((int)sizeof(((struct ldlm_gl_lquota_desc *)0)->gl_id) == 16, "found %lld\n",
- (long long)(int)sizeof(((struct ldlm_gl_lquota_desc *)0)->gl_id));
- LASSERTF((int)offsetof(struct ldlm_gl_lquota_desc, gl_flags) == 16, "found %lld\n",
- (long long)(int)offsetof(struct ldlm_gl_lquota_desc, gl_flags));
- LASSERTF((int)sizeof(((struct ldlm_gl_lquota_desc *)0)->gl_flags) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct ldlm_gl_lquota_desc *)0)->gl_flags));
- LASSERTF((int)offsetof(struct ldlm_gl_lquota_desc, gl_ver) == 24, "found %lld\n",
- (long long)(int)offsetof(struct ldlm_gl_lquota_desc, gl_ver));
- LASSERTF((int)sizeof(((struct ldlm_gl_lquota_desc *)0)->gl_ver) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct ldlm_gl_lquota_desc *)0)->gl_ver));
- LASSERTF((int)offsetof(struct ldlm_gl_lquota_desc, gl_hardlimit) == 32, "found %lld\n",
- (long long)(int)offsetof(struct ldlm_gl_lquota_desc, gl_hardlimit));
- LASSERTF((int)sizeof(((struct ldlm_gl_lquota_desc *)0)->gl_hardlimit) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct ldlm_gl_lquota_desc *)0)->gl_hardlimit));
- LASSERTF((int)offsetof(struct ldlm_gl_lquota_desc, gl_softlimit) == 40, "found %lld\n",
- (long long)(int)offsetof(struct ldlm_gl_lquota_desc, gl_softlimit));
- LASSERTF((int)sizeof(((struct ldlm_gl_lquota_desc *)0)->gl_softlimit) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct ldlm_gl_lquota_desc *)0)->gl_softlimit));
- LASSERTF((int)offsetof(struct ldlm_gl_lquota_desc, gl_time) == 48, "found %lld\n",
- (long long)(int)offsetof(struct ldlm_gl_lquota_desc, gl_time));
- LASSERTF((int)sizeof(((struct ldlm_gl_lquota_desc *)0)->gl_time) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct ldlm_gl_lquota_desc *)0)->gl_time));
- LASSERTF((int)offsetof(struct ldlm_gl_lquota_desc, gl_pad2) == 56, "found %lld\n",
- (long long)(int)offsetof(struct ldlm_gl_lquota_desc, gl_pad2));
- LASSERTF((int)sizeof(((struct ldlm_gl_lquota_desc *)0)->gl_pad2) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct ldlm_gl_lquota_desc *)0)->gl_pad2));
-
- /* Checks for struct mgs_send_param */
- LASSERTF((int)sizeof(struct mgs_send_param) == 1024, "found %lld\n",
- (long long)(int)sizeof(struct mgs_send_param));
- CLASSERT(MGS_PARAM_MAXLEN == 1024);
- LASSERTF((int)offsetof(struct mgs_send_param, mgs_param[1024]) == 1024, "found %lld\n",
- (long long)(int)offsetof(struct mgs_send_param, mgs_param[1024]));
- LASSERTF((int)sizeof(((struct mgs_send_param *)0)->mgs_param[1024]) == 1, "found %lld\n",
- (long long)(int)sizeof(((struct mgs_send_param *)0)->mgs_param[1024]));
-
- /* Checks for struct cfg_marker */
- LASSERTF((int)sizeof(struct cfg_marker) == 160, "found %lld\n",
- (long long)(int)sizeof(struct cfg_marker));
- LASSERTF((int)offsetof(struct cfg_marker, cm_step) == 0, "found %lld\n",
- (long long)(int)offsetof(struct cfg_marker, cm_step));
- LASSERTF((int)sizeof(((struct cfg_marker *)0)->cm_step) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct cfg_marker *)0)->cm_step));
- LASSERTF((int)offsetof(struct cfg_marker, cm_flags) == 4, "found %lld\n",
- (long long)(int)offsetof(struct cfg_marker, cm_flags));
- LASSERTF((int)sizeof(((struct cfg_marker *)0)->cm_flags) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct cfg_marker *)0)->cm_flags));
- LASSERTF((int)offsetof(struct cfg_marker, cm_vers) == 8, "found %lld\n",
- (long long)(int)offsetof(struct cfg_marker, cm_vers));
- LASSERTF((int)sizeof(((struct cfg_marker *)0)->cm_vers) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct cfg_marker *)0)->cm_vers));
- LASSERTF((int)offsetof(struct cfg_marker, cm_padding) == 12, "found %lld\n",
- (long long)(int)offsetof(struct cfg_marker, cm_padding));
- LASSERTF((int)sizeof(((struct cfg_marker *)0)->cm_padding) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct cfg_marker *)0)->cm_padding));
- LASSERTF((int)offsetof(struct cfg_marker, cm_createtime) == 16, "found %lld\n",
- (long long)(int)offsetof(struct cfg_marker, cm_createtime));
- LASSERTF((int)sizeof(((struct cfg_marker *)0)->cm_createtime) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct cfg_marker *)0)->cm_createtime));
- LASSERTF((int)offsetof(struct cfg_marker, cm_canceltime) == 24, "found %lld\n",
- (long long)(int)offsetof(struct cfg_marker, cm_canceltime));
- LASSERTF((int)sizeof(((struct cfg_marker *)0)->cm_canceltime) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct cfg_marker *)0)->cm_canceltime));
- LASSERTF((int)offsetof(struct cfg_marker, cm_tgtname) == 32, "found %lld\n",
- (long long)(int)offsetof(struct cfg_marker, cm_tgtname));
- LASSERTF((int)sizeof(((struct cfg_marker *)0)->cm_tgtname) == 64, "found %lld\n",
- (long long)(int)sizeof(((struct cfg_marker *)0)->cm_tgtname));
- LASSERTF((int)offsetof(struct cfg_marker, cm_comment) == 96, "found %lld\n",
- (long long)(int)offsetof(struct cfg_marker, cm_comment));
- LASSERTF((int)sizeof(((struct cfg_marker *)0)->cm_comment) == 64, "found %lld\n",
- (long long)(int)sizeof(((struct cfg_marker *)0)->cm_comment));
-
- /* Checks for struct llog_logid */
- LASSERTF((int)sizeof(struct llog_logid) == 20, "found %lld\n",
- (long long)(int)sizeof(struct llog_logid));
- LASSERTF((int)offsetof(struct llog_logid, lgl_oi) == 0, "found %lld\n",
- (long long)(int)offsetof(struct llog_logid, lgl_oi));
- LASSERTF((int)sizeof(((struct llog_logid *)0)->lgl_oi) == 16, "found %lld\n",
- (long long)(int)sizeof(((struct llog_logid *)0)->lgl_oi));
- LASSERTF((int)offsetof(struct llog_logid, lgl_ogen) == 16, "found %lld\n",
- (long long)(int)offsetof(struct llog_logid, lgl_ogen));
- LASSERTF((int)sizeof(((struct llog_logid *)0)->lgl_ogen) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct llog_logid *)0)->lgl_ogen));
- CLASSERT(OST_SZ_REC == 274730752);
- CLASSERT(MDS_UNLINK_REC == 274801668);
- CLASSERT(MDS_UNLINK64_REC == 275325956);
- CLASSERT(MDS_SETATTR64_REC == 275325953);
- CLASSERT(OBD_CFG_REC == 274857984);
- CLASSERT(LLOG_GEN_REC == 274989056);
- CLASSERT(CHANGELOG_REC == 275120128);
- CLASSERT(CHANGELOG_USER_REC == 275185664);
- CLASSERT(LLOG_HDR_MAGIC == 275010873);
- CLASSERT(LLOG_LOGID_MAGIC == 275010875);
-
- /* Checks for struct llog_catid */
- LASSERTF((int)sizeof(struct llog_catid) == 32, "found %lld\n",
- (long long)(int)sizeof(struct llog_catid));
- LASSERTF((int)offsetof(struct llog_catid, lci_logid) == 0, "found %lld\n",
- (long long)(int)offsetof(struct llog_catid, lci_logid));
- LASSERTF((int)sizeof(((struct llog_catid *)0)->lci_logid) == 20, "found %lld\n",
- (long long)(int)sizeof(((struct llog_catid *)0)->lci_logid));
- LASSERTF((int)offsetof(struct llog_catid, lci_padding1) == 20, "found %lld\n",
- (long long)(int)offsetof(struct llog_catid, lci_padding1));
- LASSERTF((int)sizeof(((struct llog_catid *)0)->lci_padding1) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct llog_catid *)0)->lci_padding1));
- LASSERTF((int)offsetof(struct llog_catid, lci_padding2) == 24, "found %lld\n",
- (long long)(int)offsetof(struct llog_catid, lci_padding2));
- LASSERTF((int)sizeof(((struct llog_catid *)0)->lci_padding2) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct llog_catid *)0)->lci_padding2));
- LASSERTF((int)offsetof(struct llog_catid, lci_padding3) == 28, "found %lld\n",
- (long long)(int)offsetof(struct llog_catid, lci_padding3));
- LASSERTF((int)sizeof(((struct llog_catid *)0)->lci_padding3) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct llog_catid *)0)->lci_padding3));
-
- /* Checks for struct llog_rec_hdr */
- LASSERTF((int)sizeof(struct llog_rec_hdr) == 16, "found %lld\n",
- (long long)(int)sizeof(struct llog_rec_hdr));
- LASSERTF((int)offsetof(struct llog_rec_hdr, lrh_len) == 0, "found %lld\n",
- (long long)(int)offsetof(struct llog_rec_hdr, lrh_len));
- LASSERTF((int)sizeof(((struct llog_rec_hdr *)0)->lrh_len) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct llog_rec_hdr *)0)->lrh_len));
- LASSERTF((int)offsetof(struct llog_rec_hdr, lrh_index) == 4, "found %lld\n",
- (long long)(int)offsetof(struct llog_rec_hdr, lrh_index));
- LASSERTF((int)sizeof(((struct llog_rec_hdr *)0)->lrh_index) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct llog_rec_hdr *)0)->lrh_index));
- LASSERTF((int)offsetof(struct llog_rec_hdr, lrh_type) == 8, "found %lld\n",
- (long long)(int)offsetof(struct llog_rec_hdr, lrh_type));
- LASSERTF((int)sizeof(((struct llog_rec_hdr *)0)->lrh_type) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct llog_rec_hdr *)0)->lrh_type));
- LASSERTF((int)offsetof(struct llog_rec_hdr, lrh_id) == 12, "found %lld\n",
- (long long)(int)offsetof(struct llog_rec_hdr, lrh_id));
- LASSERTF((int)sizeof(((struct llog_rec_hdr *)0)->lrh_id) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct llog_rec_hdr *)0)->lrh_id));
-
- /* Checks for struct llog_rec_tail */
- LASSERTF((int)sizeof(struct llog_rec_tail) == 8, "found %lld\n",
- (long long)(int)sizeof(struct llog_rec_tail));
- LASSERTF((int)offsetof(struct llog_rec_tail, lrt_len) == 0, "found %lld\n",
- (long long)(int)offsetof(struct llog_rec_tail, lrt_len));
- LASSERTF((int)sizeof(((struct llog_rec_tail *)0)->lrt_len) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct llog_rec_tail *)0)->lrt_len));
- LASSERTF((int)offsetof(struct llog_rec_tail, lrt_index) == 4, "found %lld\n",
- (long long)(int)offsetof(struct llog_rec_tail, lrt_index));
- LASSERTF((int)sizeof(((struct llog_rec_tail *)0)->lrt_index) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct llog_rec_tail *)0)->lrt_index));
-
- /* Checks for struct llog_logid_rec */
- LASSERTF((int)sizeof(struct llog_logid_rec) == 64, "found %lld\n",
- (long long)(int)sizeof(struct llog_logid_rec));
- LASSERTF((int)offsetof(struct llog_logid_rec, lid_hdr) == 0, "found %lld\n",
- (long long)(int)offsetof(struct llog_logid_rec, lid_hdr));
- LASSERTF((int)sizeof(((struct llog_logid_rec *)0)->lid_hdr) == 16, "found %lld\n",
- (long long)(int)sizeof(((struct llog_logid_rec *)0)->lid_hdr));
- LASSERTF((int)offsetof(struct llog_logid_rec, lid_id) == 16, "found %lld\n",
- (long long)(int)offsetof(struct llog_logid_rec, lid_id));
- LASSERTF((int)sizeof(((struct llog_logid_rec *)0)->lid_id) == 20, "found %lld\n",
- (long long)(int)sizeof(((struct llog_logid_rec *)0)->lid_id));
- LASSERTF((int)offsetof(struct llog_logid_rec, lid_padding1) == 36, "found %lld\n",
- (long long)(int)offsetof(struct llog_logid_rec, lid_padding1));
- LASSERTF((int)sizeof(((struct llog_logid_rec *)0)->lid_padding1) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct llog_logid_rec *)0)->lid_padding1));
- LASSERTF((int)offsetof(struct llog_logid_rec, lid_padding2) == 40, "found %lld\n",
- (long long)(int)offsetof(struct llog_logid_rec, lid_padding2));
- LASSERTF((int)sizeof(((struct llog_logid_rec *)0)->lid_padding2) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct llog_logid_rec *)0)->lid_padding2));
- LASSERTF((int)offsetof(struct llog_logid_rec, lid_padding3) == 48, "found %lld\n",
- (long long)(int)offsetof(struct llog_logid_rec, lid_padding3));
- LASSERTF((int)sizeof(((struct llog_logid_rec *)0)->lid_padding3) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct llog_logid_rec *)0)->lid_padding3));
- LASSERTF((int)offsetof(struct llog_logid_rec, lid_tail) == 56, "found %lld\n",
- (long long)(int)offsetof(struct llog_logid_rec, lid_tail));
- LASSERTF((int)sizeof(((struct llog_logid_rec *)0)->lid_tail) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct llog_logid_rec *)0)->lid_tail));
-
- /* Checks for struct llog_unlink_rec */
- LASSERTF((int)sizeof(struct llog_unlink_rec) == 40, "found %lld\n",
- (long long)(int)sizeof(struct llog_unlink_rec));
- LASSERTF((int)offsetof(struct llog_unlink_rec, lur_hdr) == 0, "found %lld\n",
- (long long)(int)offsetof(struct llog_unlink_rec, lur_hdr));
- LASSERTF((int)sizeof(((struct llog_unlink_rec *)0)->lur_hdr) == 16, "found %lld\n",
- (long long)(int)sizeof(((struct llog_unlink_rec *)0)->lur_hdr));
- LASSERTF((int)offsetof(struct llog_unlink_rec, lur_oid) == 16, "found %lld\n",
- (long long)(int)offsetof(struct llog_unlink_rec, lur_oid));
- LASSERTF((int)sizeof(((struct llog_unlink_rec *)0)->lur_oid) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct llog_unlink_rec *)0)->lur_oid));
- LASSERTF((int)offsetof(struct llog_unlink_rec, lur_oseq) == 24, "found %lld\n",
- (long long)(int)offsetof(struct llog_unlink_rec, lur_oseq));
- LASSERTF((int)sizeof(((struct llog_unlink_rec *)0)->lur_oseq) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct llog_unlink_rec *)0)->lur_oseq));
- LASSERTF((int)offsetof(struct llog_unlink_rec, lur_count) == 28, "found %lld\n",
- (long long)(int)offsetof(struct llog_unlink_rec, lur_count));
- LASSERTF((int)sizeof(((struct llog_unlink_rec *)0)->lur_count) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct llog_unlink_rec *)0)->lur_count));
- LASSERTF((int)offsetof(struct llog_unlink_rec, lur_tail) == 32, "found %lld\n",
- (long long)(int)offsetof(struct llog_unlink_rec, lur_tail));
- LASSERTF((int)sizeof(((struct llog_unlink_rec *)0)->lur_tail) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct llog_unlink_rec *)0)->lur_tail));
- /* Checks for struct llog_unlink64_rec */
- LASSERTF((int)sizeof(struct llog_unlink64_rec) == 64, "found %lld\n",
- (long long)(int)sizeof(struct llog_unlink64_rec));
- LASSERTF((int)offsetof(struct llog_unlink64_rec, lur_hdr) == 0, "found %lld\n",
- (long long)(int)offsetof(struct llog_unlink64_rec, lur_hdr));
- LASSERTF((int)sizeof(((struct llog_unlink64_rec *)0)->lur_hdr) == 16, "found %lld\n",
- (long long)(int)sizeof(((struct llog_unlink64_rec *)0)->lur_hdr));
- LASSERTF((int)offsetof(struct llog_unlink64_rec, lur_fid) == 16, "found %lld\n",
- (long long)(int)offsetof(struct llog_unlink64_rec, lur_fid));
- LASSERTF((int)sizeof(((struct llog_unlink64_rec *)0)->lur_fid) == 16, "found %lld\n",
- (long long)(int)sizeof(((struct llog_unlink64_rec *)0)->lur_fid));
- LASSERTF((int)offsetof(struct llog_unlink64_rec, lur_count) == 32, "found %lld\n",
- (long long)(int)offsetof(struct llog_unlink64_rec, lur_count));
- LASSERTF((int)sizeof(((struct llog_unlink64_rec *)0)->lur_count) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct llog_unlink64_rec *)0)->lur_count));
- LASSERTF((int)offsetof(struct llog_unlink64_rec, lur_tail) == 56, "found %lld\n",
- (long long)(int)offsetof(struct llog_unlink64_rec, lur_tail));
- LASSERTF((int)sizeof(((struct llog_unlink64_rec *)0)->lur_tail) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct llog_unlink64_rec *)0)->lur_tail));
- LASSERTF((int)offsetof(struct llog_unlink64_rec, lur_padding1) == 36, "found %lld\n",
- (long long)(int)offsetof(struct llog_unlink64_rec, lur_padding1));
- LASSERTF((int)sizeof(((struct llog_unlink64_rec *)0)->lur_padding1) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct llog_unlink64_rec *)0)->lur_padding1));
- LASSERTF((int)offsetof(struct llog_unlink64_rec, lur_padding2) == 40, "found %lld\n",
- (long long)(int)offsetof(struct llog_unlink64_rec, lur_padding2));
- LASSERTF((int)sizeof(((struct llog_unlink64_rec *)0)->lur_padding2) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct llog_unlink64_rec *)0)->lur_padding2));
- LASSERTF((int)offsetof(struct llog_unlink64_rec, lur_padding3) == 48, "found %lld\n",
- (long long)(int)offsetof(struct llog_unlink64_rec, lur_padding3));
- LASSERTF((int)sizeof(((struct llog_unlink64_rec *)0)->lur_padding3) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct llog_unlink64_rec *)0)->lur_padding3));
-
- /* Checks for struct llog_setattr64_rec */
- LASSERTF((int)sizeof(struct llog_setattr64_rec) == 64, "found %lld\n",
- (long long)(int)sizeof(struct llog_setattr64_rec));
- LASSERTF((int)offsetof(struct llog_setattr64_rec, lsr_hdr) == 0, "found %lld\n",
- (long long)(int)offsetof(struct llog_setattr64_rec, lsr_hdr));
- LASSERTF((int)sizeof(((struct llog_setattr64_rec *)0)->lsr_hdr) == 16, "found %lld\n",
- (long long)(int)sizeof(((struct llog_setattr64_rec *)0)->lsr_hdr));
- LASSERTF((int)offsetof(struct llog_setattr64_rec, lsr_oi) == 16, "found %lld\n",
- (long long)(int)offsetof(struct llog_setattr64_rec, lsr_oi));
- LASSERTF((int)sizeof(((struct llog_setattr64_rec *)0)->lsr_oi) == 16, "found %lld\n",
- (long long)(int)sizeof(((struct llog_setattr64_rec *)0)->lsr_oi));
- LASSERTF((int)offsetof(struct llog_setattr64_rec, lsr_uid) == 32, "found %lld\n",
- (long long)(int)offsetof(struct llog_setattr64_rec, lsr_uid));
- LASSERTF((int)sizeof(((struct llog_setattr64_rec *)0)->lsr_uid) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct llog_setattr64_rec *)0)->lsr_uid));
- LASSERTF((int)offsetof(struct llog_setattr64_rec, lsr_uid_h) == 36, "found %lld\n",
- (long long)(int)offsetof(struct llog_setattr64_rec, lsr_uid_h));
- LASSERTF((int)sizeof(((struct llog_setattr64_rec *)0)->lsr_uid_h) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct llog_setattr64_rec *)0)->lsr_uid_h));
- LASSERTF((int)offsetof(struct llog_setattr64_rec, lsr_gid) == 40, "found %lld\n",
- (long long)(int)offsetof(struct llog_setattr64_rec, lsr_gid));
- LASSERTF((int)sizeof(((struct llog_setattr64_rec *)0)->lsr_gid) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct llog_setattr64_rec *)0)->lsr_gid));
- LASSERTF((int)offsetof(struct llog_setattr64_rec, lsr_gid_h) == 44, "found %lld\n",
- (long long)(int)offsetof(struct llog_setattr64_rec, lsr_gid_h));
- LASSERTF((int)sizeof(((struct llog_setattr64_rec *)0)->lsr_gid_h) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct llog_setattr64_rec *)0)->lsr_gid_h));
- LASSERTF((int)offsetof(struct llog_setattr64_rec, lsr_padding) == 48, "found %lld\n",
- (long long)(int)offsetof(struct llog_setattr64_rec, lsr_padding));
- LASSERTF((int)sizeof(((struct llog_setattr64_rec *)0)->lsr_padding) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct llog_setattr64_rec *)0)->lsr_padding));
- LASSERTF((int)offsetof(struct llog_setattr64_rec, lsr_tail) == 56, "found %lld\n",
- (long long)(int)offsetof(struct llog_setattr64_rec, lsr_tail));
- LASSERTF((int)sizeof(((struct llog_setattr64_rec *)0)->lsr_tail) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct llog_setattr64_rec *)0)->lsr_tail));
-
- /* Checks for struct llog_size_change_rec */
- LASSERTF((int)sizeof(struct llog_size_change_rec) == 64, "found %lld\n",
- (long long)(int)sizeof(struct llog_size_change_rec));
- LASSERTF((int)offsetof(struct llog_size_change_rec, lsc_hdr) == 0, "found %lld\n",
- (long long)(int)offsetof(struct llog_size_change_rec, lsc_hdr));
- LASSERTF((int)sizeof(((struct llog_size_change_rec *)0)->lsc_hdr) == 16, "found %lld\n",
- (long long)(int)sizeof(((struct llog_size_change_rec *)0)->lsc_hdr));
- LASSERTF((int)offsetof(struct llog_size_change_rec, lsc_fid) == 16, "found %lld\n",
- (long long)(int)offsetof(struct llog_size_change_rec, lsc_fid));
- LASSERTF((int)sizeof(((struct llog_size_change_rec *)0)->lsc_fid) == 16, "found %lld\n",
- (long long)(int)sizeof(((struct llog_size_change_rec *)0)->lsc_fid));
- LASSERTF((int)offsetof(struct llog_size_change_rec, lsc_ioepoch) == 32, "found %lld\n",
- (long long)(int)offsetof(struct llog_size_change_rec, lsc_ioepoch));
- LASSERTF((int)sizeof(((struct llog_size_change_rec *)0)->lsc_ioepoch) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct llog_size_change_rec *)0)->lsc_ioepoch));
- LASSERTF((int)offsetof(struct llog_size_change_rec, lsc_padding1) == 36, "found %lld\n",
- (long long)(int)offsetof(struct llog_size_change_rec, lsc_padding1));
- LASSERTF((int)sizeof(((struct llog_size_change_rec *)0)->lsc_padding1) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct llog_size_change_rec *)0)->lsc_padding1));
- LASSERTF((int)offsetof(struct llog_size_change_rec, lsc_padding2) == 40, "found %lld\n",
- (long long)(int)offsetof(struct llog_size_change_rec, lsc_padding2));
- LASSERTF((int)sizeof(((struct llog_size_change_rec *)0)->lsc_padding2) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct llog_size_change_rec *)0)->lsc_padding2));
- LASSERTF((int)offsetof(struct llog_size_change_rec, lsc_padding3) == 48, "found %lld\n",
- (long long)(int)offsetof(struct llog_size_change_rec, lsc_padding3));
- LASSERTF((int)sizeof(((struct llog_size_change_rec *)0)->lsc_padding3) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct llog_size_change_rec *)0)->lsc_padding3));
- LASSERTF((int)offsetof(struct llog_size_change_rec, lsc_tail) == 56, "found %lld\n",
- (long long)(int)offsetof(struct llog_size_change_rec, lsc_tail));
- LASSERTF((int)sizeof(((struct llog_size_change_rec *)0)->lsc_tail) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct llog_size_change_rec *)0)->lsc_tail));
-
- /* Checks for struct changelog_rec */
- LASSERTF((int)sizeof(struct changelog_rec) == 64, "found %lld\n",
- (long long)(int)sizeof(struct changelog_rec));
- LASSERTF((int)offsetof(struct changelog_rec, cr_namelen) == 0, "found %lld\n",
- (long long)(int)offsetof(struct changelog_rec, cr_namelen));
- LASSERTF((int)sizeof(((struct changelog_rec *)0)->cr_namelen) == 2, "found %lld\n",
- (long long)(int)sizeof(((struct changelog_rec *)0)->cr_namelen));
- LASSERTF((int)offsetof(struct changelog_rec, cr_flags) == 2, "found %lld\n",
- (long long)(int)offsetof(struct changelog_rec, cr_flags));
- LASSERTF((int)sizeof(((struct changelog_rec *)0)->cr_flags) == 2, "found %lld\n",
- (long long)(int)sizeof(((struct changelog_rec *)0)->cr_flags));
- LASSERTF((int)offsetof(struct changelog_rec, cr_type) == 4, "found %lld\n",
- (long long)(int)offsetof(struct changelog_rec, cr_type));
- LASSERTF((int)sizeof(((struct changelog_rec *)0)->cr_type) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct changelog_rec *)0)->cr_type));
- LASSERTF((int)offsetof(struct changelog_rec, cr_index) == 8, "found %lld\n",
- (long long)(int)offsetof(struct changelog_rec, cr_index));
- LASSERTF((int)sizeof(((struct changelog_rec *)0)->cr_index) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct changelog_rec *)0)->cr_index));
- LASSERTF((int)offsetof(struct changelog_rec, cr_prev) == 16, "found %lld\n",
- (long long)(int)offsetof(struct changelog_rec, cr_prev));
- LASSERTF((int)sizeof(((struct changelog_rec *)0)->cr_prev) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct changelog_rec *)0)->cr_prev));
- LASSERTF((int)offsetof(struct changelog_rec, cr_time) == 24, "found %lld\n",
- (long long)(int)offsetof(struct changelog_rec, cr_time));
- LASSERTF((int)sizeof(((struct changelog_rec *)0)->cr_time) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct changelog_rec *)0)->cr_time));
- LASSERTF((int)offsetof(struct changelog_rec, cr_tfid) == 32, "found %lld\n",
- (long long)(int)offsetof(struct changelog_rec, cr_tfid));
- LASSERTF((int)sizeof(((struct changelog_rec *)0)->cr_tfid) == 16, "found %lld\n",
- (long long)(int)sizeof(((struct changelog_rec *)0)->cr_tfid));
- LASSERTF((int)offsetof(struct changelog_rec, cr_pfid) == 48, "found %lld\n",
- (long long)(int)offsetof(struct changelog_rec, cr_pfid));
- LASSERTF((int)sizeof(((struct changelog_rec *)0)->cr_pfid) == 16, "found %lld\n",
- (long long)(int)sizeof(((struct changelog_rec *)0)->cr_pfid));
-
- /* Checks for struct changelog_ext_rec */
- LASSERTF((int)sizeof(struct changelog_ext_rec) == 96, "found %lld\n",
- (long long)(int)sizeof(struct changelog_ext_rec));
- LASSERTF((int)offsetof(struct changelog_ext_rec, cr_namelen) == 0, "found %lld\n",
- (long long)(int)offsetof(struct changelog_ext_rec, cr_namelen));
- LASSERTF((int)sizeof(((struct changelog_ext_rec *)0)->cr_namelen) == 2, "found %lld\n",
- (long long)(int)sizeof(((struct changelog_ext_rec *)0)->cr_namelen));
- LASSERTF((int)offsetof(struct changelog_ext_rec, cr_flags) == 2, "found %lld\n",
- (long long)(int)offsetof(struct changelog_ext_rec, cr_flags));
- LASSERTF((int)sizeof(((struct changelog_ext_rec *)0)->cr_flags) == 2, "found %lld\n",
- (long long)(int)sizeof(((struct changelog_ext_rec *)0)->cr_flags));
- LASSERTF((int)offsetof(struct changelog_ext_rec, cr_type) == 4, "found %lld\n",
- (long long)(int)offsetof(struct changelog_ext_rec, cr_type));
- LASSERTF((int)sizeof(((struct changelog_ext_rec *)0)->cr_type) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct changelog_ext_rec *)0)->cr_type));
- LASSERTF((int)offsetof(struct changelog_ext_rec, cr_index) == 8, "found %lld\n",
- (long long)(int)offsetof(struct changelog_ext_rec, cr_index));
- LASSERTF((int)sizeof(((struct changelog_ext_rec *)0)->cr_index) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct changelog_ext_rec *)0)->cr_index));
- LASSERTF((int)offsetof(struct changelog_ext_rec, cr_prev) == 16, "found %lld\n",
- (long long)(int)offsetof(struct changelog_ext_rec, cr_prev));
- LASSERTF((int)sizeof(((struct changelog_ext_rec *)0)->cr_prev) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct changelog_ext_rec *)0)->cr_prev));
- LASSERTF((int)offsetof(struct changelog_ext_rec, cr_time) == 24, "found %lld\n",
- (long long)(int)offsetof(struct changelog_ext_rec, cr_time));
- LASSERTF((int)sizeof(((struct changelog_ext_rec *)0)->cr_time) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct changelog_ext_rec *)0)->cr_time));
- LASSERTF((int)offsetof(struct changelog_ext_rec, cr_tfid) == 32, "found %lld\n",
- (long long)(int)offsetof(struct changelog_ext_rec, cr_tfid));
- LASSERTF((int)sizeof(((struct changelog_ext_rec *)0)->cr_tfid) == 16, "found %lld\n",
- (long long)(int)sizeof(((struct changelog_ext_rec *)0)->cr_tfid));
- LASSERTF((int)offsetof(struct changelog_ext_rec, cr_pfid) == 48, "found %lld\n",
- (long long)(int)offsetof(struct changelog_ext_rec, cr_pfid));
- LASSERTF((int)sizeof(((struct changelog_ext_rec *)0)->cr_pfid) == 16, "found %lld\n",
- (long long)(int)sizeof(((struct changelog_ext_rec *)0)->cr_pfid));
- LASSERTF((int)offsetof(struct changelog_ext_rec, cr_sfid) == 64, "found %lld\n",
- (long long)(int)offsetof(struct changelog_ext_rec, cr_sfid));
- LASSERTF((int)sizeof(((struct changelog_ext_rec *)0)->cr_sfid) == 16, "found %lld\n",
- (long long)(int)sizeof(((struct changelog_ext_rec *)0)->cr_sfid));
- LASSERTF((int)offsetof(struct changelog_ext_rec, cr_spfid) == 80, "found %lld\n",
- (long long)(int)offsetof(struct changelog_ext_rec, cr_spfid));
- LASSERTF((int)sizeof(((struct changelog_ext_rec *)0)->cr_spfid) == 16, "found %lld\n",
- (long long)(int)sizeof(((struct changelog_ext_rec *)0)->cr_spfid));
-
- /* Checks for struct changelog_setinfo */
- LASSERTF((int)sizeof(struct changelog_setinfo) == 12, "found %lld\n",
- (long long)(int)sizeof(struct changelog_setinfo));
- LASSERTF((int)offsetof(struct changelog_setinfo, cs_recno) == 0, "found %lld\n",
- (long long)(int)offsetof(struct changelog_setinfo, cs_recno));
- LASSERTF((int)sizeof(((struct changelog_setinfo *)0)->cs_recno) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct changelog_setinfo *)0)->cs_recno));
- LASSERTF((int)offsetof(struct changelog_setinfo, cs_id) == 8, "found %lld\n",
- (long long)(int)offsetof(struct changelog_setinfo, cs_id));
- LASSERTF((int)sizeof(((struct changelog_setinfo *)0)->cs_id) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct changelog_setinfo *)0)->cs_id));
-
- /* Checks for struct llog_changelog_rec */
- LASSERTF((int)sizeof(struct llog_changelog_rec) == 88, "found %lld\n",
- (long long)(int)sizeof(struct llog_changelog_rec));
- LASSERTF((int)offsetof(struct llog_changelog_rec, cr_hdr) == 0, "found %lld\n",
- (long long)(int)offsetof(struct llog_changelog_rec, cr_hdr));
- LASSERTF((int)sizeof(((struct llog_changelog_rec *)0)->cr_hdr) == 16, "found %lld\n",
- (long long)(int)sizeof(((struct llog_changelog_rec *)0)->cr_hdr));
- LASSERTF((int)offsetof(struct llog_changelog_rec, cr) == 16, "found %lld\n",
- (long long)(int)offsetof(struct llog_changelog_rec, cr));
- LASSERTF((int)sizeof(((struct llog_changelog_rec *)0)->cr) == 64, "found %lld\n",
- (long long)(int)sizeof(((struct llog_changelog_rec *)0)->cr));
- LASSERTF((int)offsetof(struct llog_changelog_rec, cr_tail) == 80, "found %lld\n",
- (long long)(int)offsetof(struct llog_changelog_rec, cr_tail));
- LASSERTF((int)sizeof(((struct llog_changelog_rec *)0)->cr_tail) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct llog_changelog_rec *)0)->cr_tail));
-
- /* Checks for struct llog_changelog_user_rec */
- LASSERTF((int)sizeof(struct llog_changelog_user_rec) == 40, "found %lld\n",
- (long long)(int)sizeof(struct llog_changelog_user_rec));
- LASSERTF((int)offsetof(struct llog_changelog_user_rec, cur_hdr) == 0, "found %lld\n",
- (long long)(int)offsetof(struct llog_changelog_user_rec, cur_hdr));
- LASSERTF((int)sizeof(((struct llog_changelog_user_rec *)0)->cur_hdr) == 16, "found %lld\n",
- (long long)(int)sizeof(((struct llog_changelog_user_rec *)0)->cur_hdr));
- LASSERTF((int)offsetof(struct llog_changelog_user_rec, cur_id) == 16, "found %lld\n",
- (long long)(int)offsetof(struct llog_changelog_user_rec, cur_id));
- LASSERTF((int)sizeof(((struct llog_changelog_user_rec *)0)->cur_id) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct llog_changelog_user_rec *)0)->cur_id));
- LASSERTF((int)offsetof(struct llog_changelog_user_rec, cur_padding) == 20, "found %lld\n",
- (long long)(int)offsetof(struct llog_changelog_user_rec, cur_padding));
- LASSERTF((int)sizeof(((struct llog_changelog_user_rec *)0)->cur_padding) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct llog_changelog_user_rec *)0)->cur_padding));
- LASSERTF((int)offsetof(struct llog_changelog_user_rec, cur_endrec) == 24, "found %lld\n",
- (long long)(int)offsetof(struct llog_changelog_user_rec, cur_endrec));
- LASSERTF((int)sizeof(((struct llog_changelog_user_rec *)0)->cur_endrec) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct llog_changelog_user_rec *)0)->cur_endrec));
- LASSERTF((int)offsetof(struct llog_changelog_user_rec, cur_tail) == 32, "found %lld\n",
- (long long)(int)offsetof(struct llog_changelog_user_rec, cur_tail));
- LASSERTF((int)sizeof(((struct llog_changelog_user_rec *)0)->cur_tail) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct llog_changelog_user_rec *)0)->cur_tail));
-
- /* Checks for struct llog_gen */
- LASSERTF((int)sizeof(struct llog_gen) == 16, "found %lld\n",
- (long long)(int)sizeof(struct llog_gen));
- LASSERTF((int)offsetof(struct llog_gen, mnt_cnt) == 0, "found %lld\n",
- (long long)(int)offsetof(struct llog_gen, mnt_cnt));
- LASSERTF((int)sizeof(((struct llog_gen *)0)->mnt_cnt) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct llog_gen *)0)->mnt_cnt));
- LASSERTF((int)offsetof(struct llog_gen, conn_cnt) == 8, "found %lld\n",
- (long long)(int)offsetof(struct llog_gen, conn_cnt));
- LASSERTF((int)sizeof(((struct llog_gen *)0)->conn_cnt) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct llog_gen *)0)->conn_cnt));
-
- /* Checks for struct llog_gen_rec */
- LASSERTF((int)sizeof(struct llog_gen_rec) == 64, "found %lld\n",
- (long long)(int)sizeof(struct llog_gen_rec));
- LASSERTF((int)offsetof(struct llog_gen_rec, lgr_hdr) == 0, "found %lld\n",
- (long long)(int)offsetof(struct llog_gen_rec, lgr_hdr));
- LASSERTF((int)sizeof(((struct llog_gen_rec *)0)->lgr_hdr) == 16, "found %lld\n",
- (long long)(int)sizeof(((struct llog_gen_rec *)0)->lgr_hdr));
- LASSERTF((int)offsetof(struct llog_gen_rec, lgr_gen) == 16, "found %lld\n",
- (long long)(int)offsetof(struct llog_gen_rec, lgr_gen));
- LASSERTF((int)sizeof(((struct llog_gen_rec *)0)->lgr_gen) == 16, "found %lld\n",
- (long long)(int)sizeof(((struct llog_gen_rec *)0)->lgr_gen));
- LASSERTF((int)offsetof(struct llog_gen_rec, lgr_tail) == 56, "found %lld\n",
- (long long)(int)offsetof(struct llog_gen_rec, lgr_tail));
- LASSERTF((int)sizeof(((struct llog_gen_rec *)0)->lgr_tail) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct llog_gen_rec *)0)->lgr_tail));
-
- /* Checks for struct llog_log_hdr */
- LASSERTF((int)sizeof(struct llog_log_hdr) == 8192, "found %lld\n",
- (long long)(int)sizeof(struct llog_log_hdr));
- LASSERTF((int)offsetof(struct llog_log_hdr, llh_hdr) == 0, "found %lld\n",
- (long long)(int)offsetof(struct llog_log_hdr, llh_hdr));
- LASSERTF((int)sizeof(((struct llog_log_hdr *)0)->llh_hdr) == 16, "found %lld\n",
- (long long)(int)sizeof(((struct llog_log_hdr *)0)->llh_hdr));
- LASSERTF((int)offsetof(struct llog_log_hdr, llh_timestamp) == 16, "found %lld\n",
- (long long)(int)offsetof(struct llog_log_hdr, llh_timestamp));
- LASSERTF((int)sizeof(((struct llog_log_hdr *)0)->llh_timestamp) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct llog_log_hdr *)0)->llh_timestamp));
- LASSERTF((int)offsetof(struct llog_log_hdr, llh_count) == 24, "found %lld\n",
- (long long)(int)offsetof(struct llog_log_hdr, llh_count));
- LASSERTF((int)sizeof(((struct llog_log_hdr *)0)->llh_count) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct llog_log_hdr *)0)->llh_count));
- LASSERTF((int)offsetof(struct llog_log_hdr, llh_bitmap_offset) == 28, "found %lld\n",
- (long long)(int)offsetof(struct llog_log_hdr, llh_bitmap_offset));
- LASSERTF((int)sizeof(((struct llog_log_hdr *)0)->llh_bitmap_offset) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct llog_log_hdr *)0)->llh_bitmap_offset));
- LASSERTF((int)offsetof(struct llog_log_hdr, llh_size) == 32, "found %lld\n",
- (long long)(int)offsetof(struct llog_log_hdr, llh_size));
- LASSERTF((int)sizeof(((struct llog_log_hdr *)0)->llh_size) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct llog_log_hdr *)0)->llh_size));
- LASSERTF((int)offsetof(struct llog_log_hdr, llh_flags) == 36, "found %lld\n",
- (long long)(int)offsetof(struct llog_log_hdr, llh_flags));
- LASSERTF((int)sizeof(((struct llog_log_hdr *)0)->llh_flags) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct llog_log_hdr *)0)->llh_flags));
- LASSERTF((int)offsetof(struct llog_log_hdr, llh_cat_idx) == 40, "found %lld\n",
- (long long)(int)offsetof(struct llog_log_hdr, llh_cat_idx));
- LASSERTF((int)sizeof(((struct llog_log_hdr *)0)->llh_cat_idx) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct llog_log_hdr *)0)->llh_cat_idx));
- LASSERTF((int)offsetof(struct llog_log_hdr, llh_tgtuuid) == 44, "found %lld\n",
- (long long)(int)offsetof(struct llog_log_hdr, llh_tgtuuid));
- LASSERTF((int)sizeof(((struct llog_log_hdr *)0)->llh_tgtuuid) == 40, "found %lld\n",
- (long long)(int)sizeof(((struct llog_log_hdr *)0)->llh_tgtuuid));
- LASSERTF((int)offsetof(struct llog_log_hdr, llh_reserved) == 84, "found %lld\n",
- (long long)(int)offsetof(struct llog_log_hdr, llh_reserved));
- LASSERTF((int)sizeof(((struct llog_log_hdr *)0)->llh_reserved) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct llog_log_hdr *)0)->llh_reserved));
- LASSERTF((int)offsetof(struct llog_log_hdr, llh_bitmap) == 88, "found %lld\n",
- (long long)(int)offsetof(struct llog_log_hdr, llh_bitmap));
- LASSERTF((int)sizeof(((struct llog_log_hdr *)0)->llh_bitmap) == 8096, "found %lld\n",
- (long long)(int)sizeof(((struct llog_log_hdr *)0)->llh_bitmap));
- LASSERTF((int)offsetof(struct llog_log_hdr, llh_tail) == 8184, "found %lld\n",
- (long long)(int)offsetof(struct llog_log_hdr, llh_tail));
- LASSERTF((int)sizeof(((struct llog_log_hdr *)0)->llh_tail) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct llog_log_hdr *)0)->llh_tail));
-
- /* Checks for struct llog_cookie */
- LASSERTF((int)sizeof(struct llog_cookie) == 32, "found %lld\n",
- (long long)(int)sizeof(struct llog_cookie));
- LASSERTF((int)offsetof(struct llog_cookie, lgc_lgl) == 0, "found %lld\n",
- (long long)(int)offsetof(struct llog_cookie, lgc_lgl));
- LASSERTF((int)sizeof(((struct llog_cookie *)0)->lgc_lgl) == 20, "found %lld\n",
- (long long)(int)sizeof(((struct llog_cookie *)0)->lgc_lgl));
- LASSERTF((int)offsetof(struct llog_cookie, lgc_subsys) == 20, "found %lld\n",
- (long long)(int)offsetof(struct llog_cookie, lgc_subsys));
- LASSERTF((int)sizeof(((struct llog_cookie *)0)->lgc_subsys) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct llog_cookie *)0)->lgc_subsys));
- LASSERTF((int)offsetof(struct llog_cookie, lgc_index) == 24, "found %lld\n",
- (long long)(int)offsetof(struct llog_cookie, lgc_index));
- LASSERTF((int)sizeof(((struct llog_cookie *)0)->lgc_index) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct llog_cookie *)0)->lgc_index));
- LASSERTF((int)offsetof(struct llog_cookie, lgc_padding) == 28, "found %lld\n",
- (long long)(int)offsetof(struct llog_cookie, lgc_padding));
- LASSERTF((int)sizeof(((struct llog_cookie *)0)->lgc_padding) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct llog_cookie *)0)->lgc_padding));
-
- /* Checks for struct llogd_body */
- LASSERTF((int)sizeof(struct llogd_body) == 48, "found %lld\n",
- (long long)(int)sizeof(struct llogd_body));
- LASSERTF((int)offsetof(struct llogd_body, lgd_logid) == 0, "found %lld\n",
- (long long)(int)offsetof(struct llogd_body, lgd_logid));
- LASSERTF((int)sizeof(((struct llogd_body *)0)->lgd_logid) == 20, "found %lld\n",
- (long long)(int)sizeof(((struct llogd_body *)0)->lgd_logid));
- LASSERTF((int)offsetof(struct llogd_body, lgd_ctxt_idx) == 20, "found %lld\n",
- (long long)(int)offsetof(struct llogd_body, lgd_ctxt_idx));
- LASSERTF((int)sizeof(((struct llogd_body *)0)->lgd_ctxt_idx) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct llogd_body *)0)->lgd_ctxt_idx));
- LASSERTF((int)offsetof(struct llogd_body, lgd_llh_flags) == 24, "found %lld\n",
- (long long)(int)offsetof(struct llogd_body, lgd_llh_flags));
- LASSERTF((int)sizeof(((struct llogd_body *)0)->lgd_llh_flags) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct llogd_body *)0)->lgd_llh_flags));
- LASSERTF((int)offsetof(struct llogd_body, lgd_index) == 28, "found %lld\n",
- (long long)(int)offsetof(struct llogd_body, lgd_index));
- LASSERTF((int)sizeof(((struct llogd_body *)0)->lgd_index) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct llogd_body *)0)->lgd_index));
- LASSERTF((int)offsetof(struct llogd_body, lgd_saved_index) == 32, "found %lld\n",
- (long long)(int)offsetof(struct llogd_body, lgd_saved_index));
- LASSERTF((int)sizeof(((struct llogd_body *)0)->lgd_saved_index) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct llogd_body *)0)->lgd_saved_index));
- LASSERTF((int)offsetof(struct llogd_body, lgd_len) == 36, "found %lld\n",
- (long long)(int)offsetof(struct llogd_body, lgd_len));
- LASSERTF((int)sizeof(((struct llogd_body *)0)->lgd_len) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct llogd_body *)0)->lgd_len));
- LASSERTF((int)offsetof(struct llogd_body, lgd_cur_offset) == 40, "found %lld\n",
- (long long)(int)offsetof(struct llogd_body, lgd_cur_offset));
- LASSERTF((int)sizeof(((struct llogd_body *)0)->lgd_cur_offset) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct llogd_body *)0)->lgd_cur_offset));
- CLASSERT(LLOG_ORIGIN_HANDLE_CREATE == 501);
- CLASSERT(LLOG_ORIGIN_HANDLE_NEXT_BLOCK == 502);
- CLASSERT(LLOG_ORIGIN_HANDLE_READ_HEADER == 503);
- CLASSERT(LLOG_ORIGIN_HANDLE_WRITE_REC == 504);
- CLASSERT(LLOG_ORIGIN_HANDLE_CLOSE == 505);
- CLASSERT(LLOG_ORIGIN_CONNECT == 506);
- CLASSERT(LLOG_CATINFO == 507);
- CLASSERT(LLOG_ORIGIN_HANDLE_PREV_BLOCK == 508);
- CLASSERT(LLOG_ORIGIN_HANDLE_DESTROY == 509);
- CLASSERT(LLOG_FIRST_OPC == 501);
- CLASSERT(LLOG_LAST_OPC == 510);
-
- /* Checks for struct llogd_conn_body */
- LASSERTF((int)sizeof(struct llogd_conn_body) == 40, "found %lld\n",
- (long long)(int)sizeof(struct llogd_conn_body));
- LASSERTF((int)offsetof(struct llogd_conn_body, lgdc_gen) == 0, "found %lld\n",
- (long long)(int)offsetof(struct llogd_conn_body, lgdc_gen));
- LASSERTF((int)sizeof(((struct llogd_conn_body *)0)->lgdc_gen) == 16, "found %lld\n",
- (long long)(int)sizeof(((struct llogd_conn_body *)0)->lgdc_gen));
- LASSERTF((int)offsetof(struct llogd_conn_body, lgdc_logid) == 16, "found %lld\n",
- (long long)(int)offsetof(struct llogd_conn_body, lgdc_logid));
- LASSERTF((int)sizeof(((struct llogd_conn_body *)0)->lgdc_logid) == 20, "found %lld\n",
- (long long)(int)sizeof(((struct llogd_conn_body *)0)->lgdc_logid));
- LASSERTF((int)offsetof(struct llogd_conn_body, lgdc_ctxt_idx) == 36, "found %lld\n",
- (long long)(int)offsetof(struct llogd_conn_body, lgdc_ctxt_idx));
- LASSERTF((int)sizeof(((struct llogd_conn_body *)0)->lgdc_ctxt_idx) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct llogd_conn_body *)0)->lgdc_ctxt_idx));
-
- /* Checks for struct ll_fiemap_info_key */
- LASSERTF((int)sizeof(struct ll_fiemap_info_key) == 248, "found %lld\n",
- (long long)(int)sizeof(struct ll_fiemap_info_key));
- LASSERTF((int)offsetof(struct ll_fiemap_info_key, name[8]) == 8, "found %lld\n",
- (long long)(int)offsetof(struct ll_fiemap_info_key, name[8]));
- LASSERTF((int)sizeof(((struct ll_fiemap_info_key *)0)->name[8]) == 1, "found %lld\n",
- (long long)(int)sizeof(((struct ll_fiemap_info_key *)0)->name[8]));
- LASSERTF((int)offsetof(struct ll_fiemap_info_key, oa) == 8, "found %lld\n",
- (long long)(int)offsetof(struct ll_fiemap_info_key, oa));
- LASSERTF((int)sizeof(((struct ll_fiemap_info_key *)0)->oa) == 208, "found %lld\n",
- (long long)(int)sizeof(((struct ll_fiemap_info_key *)0)->oa));
- LASSERTF((int)offsetof(struct ll_fiemap_info_key, fiemap) == 216, "found %lld\n",
- (long long)(int)offsetof(struct ll_fiemap_info_key, fiemap));
- LASSERTF((int)sizeof(((struct ll_fiemap_info_key *)0)->fiemap) == 32, "found %lld\n",
- (long long)(int)sizeof(((struct ll_fiemap_info_key *)0)->fiemap));
-
- /* Checks for struct quota_body */
- LASSERTF((int)sizeof(struct quota_body) == 112, "found %lld\n",
- (long long)(int)sizeof(struct quota_body));
- LASSERTF((int)offsetof(struct quota_body, qb_fid) == 0, "found %lld\n",
- (long long)(int)offsetof(struct quota_body, qb_fid));
- LASSERTF((int)sizeof(((struct quota_body *)0)->qb_fid) == 16, "found %lld\n",
- (long long)(int)sizeof(((struct quota_body *)0)->qb_fid));
- LASSERTF((int)offsetof(struct quota_body, qb_id) == 16, "found %lld\n",
- (long long)(int)offsetof(struct quota_body, qb_id));
- LASSERTF((int)sizeof(((struct quota_body *)0)->qb_id) == 16, "found %lld\n",
- (long long)(int)sizeof(((struct quota_body *)0)->qb_id));
- LASSERTF((int)offsetof(struct quota_body, qb_flags) == 32, "found %lld\n",
- (long long)(int)offsetof(struct quota_body, qb_flags));
- LASSERTF((int)sizeof(((struct quota_body *)0)->qb_flags) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct quota_body *)0)->qb_flags));
- LASSERTF((int)offsetof(struct quota_body, qb_padding) == 36, "found %lld\n",
- (long long)(int)offsetof(struct quota_body, qb_padding));
- LASSERTF((int)sizeof(((struct quota_body *)0)->qb_padding) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct quota_body *)0)->qb_padding));
- LASSERTF((int)offsetof(struct quota_body, qb_count) == 40, "found %lld\n",
- (long long)(int)offsetof(struct quota_body, qb_count));
- LASSERTF((int)sizeof(((struct quota_body *)0)->qb_count) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct quota_body *)0)->qb_count));
- LASSERTF((int)offsetof(struct quota_body, qb_usage) == 48, "found %lld\n",
- (long long)(int)offsetof(struct quota_body, qb_usage));
- LASSERTF((int)sizeof(((struct quota_body *)0)->qb_usage) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct quota_body *)0)->qb_usage));
- LASSERTF((int)offsetof(struct quota_body, qb_slv_ver) == 56, "found %lld\n",
- (long long)(int)offsetof(struct quota_body, qb_slv_ver));
- LASSERTF((int)sizeof(((struct quota_body *)0)->qb_slv_ver) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct quota_body *)0)->qb_slv_ver));
- LASSERTF((int)offsetof(struct quota_body, qb_lockh) == 64, "found %lld\n",
- (long long)(int)offsetof(struct quota_body, qb_lockh));
- LASSERTF((int)sizeof(((struct quota_body *)0)->qb_lockh) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct quota_body *)0)->qb_lockh));
- LASSERTF((int)offsetof(struct quota_body, qb_glb_lockh) == 72, "found %lld\n",
- (long long)(int)offsetof(struct quota_body, qb_glb_lockh));
- LASSERTF((int)sizeof(((struct quota_body *)0)->qb_glb_lockh) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct quota_body *)0)->qb_glb_lockh));
- LASSERTF((int)offsetof(struct quota_body, qb_padding1[4]) == 112, "found %lld\n",
- (long long)(int)offsetof(struct quota_body, qb_padding1[4]));
- LASSERTF((int)sizeof(((struct quota_body *)0)->qb_padding1[4]) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct quota_body *)0)->qb_padding1[4]));
-
- /* Checks for struct mgs_target_info */
- LASSERTF((int)sizeof(struct mgs_target_info) == 4544, "found %lld\n",
- (long long)(int)sizeof(struct mgs_target_info));
- LASSERTF((int)offsetof(struct mgs_target_info, mti_lustre_ver) == 0, "found %lld\n",
- (long long)(int)offsetof(struct mgs_target_info, mti_lustre_ver));
- LASSERTF((int)sizeof(((struct mgs_target_info *)0)->mti_lustre_ver) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mgs_target_info *)0)->mti_lustre_ver));
- LASSERTF((int)offsetof(struct mgs_target_info, mti_stripe_index) == 4, "found %lld\n",
- (long long)(int)offsetof(struct mgs_target_info, mti_stripe_index));
- LASSERTF((int)sizeof(((struct mgs_target_info *)0)->mti_stripe_index) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mgs_target_info *)0)->mti_stripe_index));
- LASSERTF((int)offsetof(struct mgs_target_info, mti_config_ver) == 8, "found %lld\n",
- (long long)(int)offsetof(struct mgs_target_info, mti_config_ver));
- LASSERTF((int)sizeof(((struct mgs_target_info *)0)->mti_config_ver) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mgs_target_info *)0)->mti_config_ver));
- LASSERTF((int)offsetof(struct mgs_target_info, mti_flags) == 12, "found %lld\n",
- (long long)(int)offsetof(struct mgs_target_info, mti_flags));
- LASSERTF((int)sizeof(((struct mgs_target_info *)0)->mti_flags) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mgs_target_info *)0)->mti_flags));
- LASSERTF((int)offsetof(struct mgs_target_info, mti_nid_count) == 16, "found %lld\n",
- (long long)(int)offsetof(struct mgs_target_info, mti_nid_count));
- LASSERTF((int)sizeof(((struct mgs_target_info *)0)->mti_nid_count) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mgs_target_info *)0)->mti_nid_count));
- LASSERTF((int)offsetof(struct mgs_target_info, mti_instance) == 20, "found %lld\n",
- (long long)(int)offsetof(struct mgs_target_info, mti_instance));
- LASSERTF((int)sizeof(((struct mgs_target_info *)0)->mti_instance) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mgs_target_info *)0)->mti_instance));
- LASSERTF((int)offsetof(struct mgs_target_info, mti_fsname) == 24, "found %lld\n",
- (long long)(int)offsetof(struct mgs_target_info, mti_fsname));
- LASSERTF((int)sizeof(((struct mgs_target_info *)0)->mti_fsname) == 64, "found %lld\n",
- (long long)(int)sizeof(((struct mgs_target_info *)0)->mti_fsname));
- LASSERTF((int)offsetof(struct mgs_target_info, mti_svname) == 88, "found %lld\n",
- (long long)(int)offsetof(struct mgs_target_info, mti_svname));
- LASSERTF((int)sizeof(((struct mgs_target_info *)0)->mti_svname) == 64, "found %lld\n",
- (long long)(int)sizeof(((struct mgs_target_info *)0)->mti_svname));
- LASSERTF((int)offsetof(struct mgs_target_info, mti_uuid) == 152, "found %lld\n",
- (long long)(int)offsetof(struct mgs_target_info, mti_uuid));
- LASSERTF((int)sizeof(((struct mgs_target_info *)0)->mti_uuid) == 40, "found %lld\n",
- (long long)(int)sizeof(((struct mgs_target_info *)0)->mti_uuid));
- LASSERTF((int)offsetof(struct mgs_target_info, mti_nids) == 192, "found %lld\n",
- (long long)(int)offsetof(struct mgs_target_info, mti_nids));
- LASSERTF((int)sizeof(((struct mgs_target_info *)0)->mti_nids) == 256, "found %lld\n",
- (long long)(int)sizeof(((struct mgs_target_info *)0)->mti_nids));
- LASSERTF((int)offsetof(struct mgs_target_info, mti_params) == 448, "found %lld\n",
- (long long)(int)offsetof(struct mgs_target_info, mti_params));
- LASSERTF((int)sizeof(((struct mgs_target_info *)0)->mti_params) == 4096, "found %lld\n",
- (long long)(int)sizeof(((struct mgs_target_info *)0)->mti_params));
-
- /* Checks for struct lustre_capa */
- LASSERTF((int)sizeof(struct lustre_capa) == 120, "found %lld\n",
- (long long)(int)sizeof(struct lustre_capa));
- LASSERTF((int)offsetof(struct lustre_capa, lc_fid) == 0, "found %lld\n",
- (long long)(int)offsetof(struct lustre_capa, lc_fid));
- LASSERTF((int)sizeof(((struct lustre_capa *)0)->lc_fid) == 16, "found %lld\n",
- (long long)(int)sizeof(((struct lustre_capa *)0)->lc_fid));
- LASSERTF((int)offsetof(struct lustre_capa, lc_opc) == 16, "found %lld\n",
- (long long)(int)offsetof(struct lustre_capa, lc_opc));
- LASSERTF((int)sizeof(((struct lustre_capa *)0)->lc_opc) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct lustre_capa *)0)->lc_opc));
- LASSERTF((int)offsetof(struct lustre_capa, lc_uid) == 24, "found %lld\n",
- (long long)(int)offsetof(struct lustre_capa, lc_uid));
- LASSERTF((int)sizeof(((struct lustre_capa *)0)->lc_uid) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct lustre_capa *)0)->lc_uid));
- LASSERTF((int)offsetof(struct lustre_capa, lc_gid) == 32, "found %lld\n",
- (long long)(int)offsetof(struct lustre_capa, lc_gid));
- LASSERTF((int)sizeof(((struct lustre_capa *)0)->lc_gid) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct lustre_capa *)0)->lc_gid));
- LASSERTF((int)offsetof(struct lustre_capa, lc_flags) == 40, "found %lld\n",
- (long long)(int)offsetof(struct lustre_capa, lc_flags));
- LASSERTF((int)sizeof(((struct lustre_capa *)0)->lc_flags) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct lustre_capa *)0)->lc_flags));
- LASSERTF((int)offsetof(struct lustre_capa, lc_keyid) == 44, "found %lld\n",
- (long long)(int)offsetof(struct lustre_capa, lc_keyid));
- LASSERTF((int)sizeof(((struct lustre_capa *)0)->lc_keyid) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct lustre_capa *)0)->lc_keyid));
- LASSERTF((int)offsetof(struct lustre_capa, lc_timeout) == 48, "found %lld\n",
- (long long)(int)offsetof(struct lustre_capa, lc_timeout));
- LASSERTF((int)sizeof(((struct lustre_capa *)0)->lc_timeout) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct lustre_capa *)0)->lc_timeout));
- LASSERTF((int)offsetof(struct lustre_capa, lc_expiry) == 52, "found %lld\n",
- (long long)(int)offsetof(struct lustre_capa, lc_expiry));
- LASSERTF((int)sizeof(((struct lustre_capa *)0)->lc_expiry) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct lustre_capa *)0)->lc_expiry));
- CLASSERT(CAPA_HMAC_MAX_LEN == 64);
- LASSERTF((int)offsetof(struct lustre_capa, lc_hmac[64]) == 120, "found %lld\n",
- (long long)(int)offsetof(struct lustre_capa, lc_hmac[64]));
- LASSERTF((int)sizeof(((struct lustre_capa *)0)->lc_hmac[64]) == 1, "found %lld\n",
- (long long)(int)sizeof(((struct lustre_capa *)0)->lc_hmac[64]));
-
- /* Checks for struct lustre_capa_key */
- LASSERTF((int)sizeof(struct lustre_capa_key) == 72, "found %lld\n",
- (long long)(int)sizeof(struct lustre_capa_key));
- LASSERTF((int)offsetof(struct lustre_capa_key, lk_seq) == 0, "found %lld\n",
- (long long)(int)offsetof(struct lustre_capa_key, lk_seq));
- LASSERTF((int)sizeof(((struct lustre_capa_key *)0)->lk_seq) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct lustre_capa_key *)0)->lk_seq));
- LASSERTF((int)offsetof(struct lustre_capa_key, lk_keyid) == 8, "found %lld\n",
- (long long)(int)offsetof(struct lustre_capa_key, lk_keyid));
- LASSERTF((int)sizeof(((struct lustre_capa_key *)0)->lk_keyid) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct lustre_capa_key *)0)->lk_keyid));
- LASSERTF((int)offsetof(struct lustre_capa_key, lk_padding) == 12, "found %lld\n",
- (long long)(int)offsetof(struct lustre_capa_key, lk_padding));
- LASSERTF((int)sizeof(((struct lustre_capa_key *)0)->lk_padding) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct lustre_capa_key *)0)->lk_padding));
- CLASSERT(CAPA_HMAC_KEY_MAX_LEN == 56);
- LASSERTF((int)offsetof(struct lustre_capa_key, lk_key[56]) == 72, "found %lld\n",
- (long long)(int)offsetof(struct lustre_capa_key, lk_key[56]));
- LASSERTF((int)sizeof(((struct lustre_capa_key *)0)->lk_key[56]) == 1, "found %lld\n",
- (long long)(int)sizeof(((struct lustre_capa_key *)0)->lk_key[56]));
-
- /* Checks for struct getinfo_fid2path */
- LASSERTF((int)sizeof(struct getinfo_fid2path) == 32, "found %lld\n",
- (long long)(int)sizeof(struct getinfo_fid2path));
- LASSERTF((int)offsetof(struct getinfo_fid2path, gf_fid) == 0, "found %lld\n",
- (long long)(int)offsetof(struct getinfo_fid2path, gf_fid));
- LASSERTF((int)sizeof(((struct getinfo_fid2path *)0)->gf_fid) == 16, "found %lld\n",
- (long long)(int)sizeof(((struct getinfo_fid2path *)0)->gf_fid));
- LASSERTF((int)offsetof(struct getinfo_fid2path, gf_recno) == 16, "found %lld\n",
- (long long)(int)offsetof(struct getinfo_fid2path, gf_recno));
- LASSERTF((int)sizeof(((struct getinfo_fid2path *)0)->gf_recno) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct getinfo_fid2path *)0)->gf_recno));
- LASSERTF((int)offsetof(struct getinfo_fid2path, gf_linkno) == 24, "found %lld\n",
- (long long)(int)offsetof(struct getinfo_fid2path, gf_linkno));
- LASSERTF((int)sizeof(((struct getinfo_fid2path *)0)->gf_linkno) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct getinfo_fid2path *)0)->gf_linkno));
- LASSERTF((int)offsetof(struct getinfo_fid2path, gf_pathlen) == 28, "found %lld\n",
- (long long)(int)offsetof(struct getinfo_fid2path, gf_pathlen));
- LASSERTF((int)sizeof(((struct getinfo_fid2path *)0)->gf_pathlen) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct getinfo_fid2path *)0)->gf_pathlen));
- LASSERTF((int)offsetof(struct getinfo_fid2path, gf_path[0]) == 32, "found %lld\n",
- (long long)(int)offsetof(struct getinfo_fid2path, gf_path[0]));
- LASSERTF((int)sizeof(((struct getinfo_fid2path *)0)->gf_path[0]) == 1, "found %lld\n",
- (long long)(int)sizeof(((struct getinfo_fid2path *)0)->gf_path[0]));
-
- /* Checks for struct ll_user_fiemap */
- LASSERTF((int)sizeof(struct ll_user_fiemap) == 32, "found %lld\n",
- (long long)(int)sizeof(struct ll_user_fiemap));
- LASSERTF((int)offsetof(struct ll_user_fiemap, fm_start) == 0, "found %lld\n",
- (long long)(int)offsetof(struct ll_user_fiemap, fm_start));
- LASSERTF((int)sizeof(((struct ll_user_fiemap *)0)->fm_start) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct ll_user_fiemap *)0)->fm_start));
- LASSERTF((int)offsetof(struct ll_user_fiemap, fm_length) == 8, "found %lld\n",
- (long long)(int)offsetof(struct ll_user_fiemap, fm_length));
- LASSERTF((int)sizeof(((struct ll_user_fiemap *)0)->fm_length) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct ll_user_fiemap *)0)->fm_length));
- LASSERTF((int)offsetof(struct ll_user_fiemap, fm_flags) == 16, "found %lld\n",
- (long long)(int)offsetof(struct ll_user_fiemap, fm_flags));
- LASSERTF((int)sizeof(((struct ll_user_fiemap *)0)->fm_flags) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct ll_user_fiemap *)0)->fm_flags));
- LASSERTF((int)offsetof(struct ll_user_fiemap, fm_mapped_extents) == 20, "found %lld\n",
- (long long)(int)offsetof(struct ll_user_fiemap, fm_mapped_extents));
- LASSERTF((int)sizeof(((struct ll_user_fiemap *)0)->fm_mapped_extents) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct ll_user_fiemap *)0)->fm_mapped_extents));
- LASSERTF((int)offsetof(struct ll_user_fiemap, fm_extent_count) == 24, "found %lld\n",
- (long long)(int)offsetof(struct ll_user_fiemap, fm_extent_count));
- LASSERTF((int)sizeof(((struct ll_user_fiemap *)0)->fm_extent_count) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct ll_user_fiemap *)0)->fm_extent_count));
- LASSERTF((int)offsetof(struct ll_user_fiemap, fm_reserved) == 28, "found %lld\n",
- (long long)(int)offsetof(struct ll_user_fiemap, fm_reserved));
- LASSERTF((int)sizeof(((struct ll_user_fiemap *)0)->fm_reserved) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct ll_user_fiemap *)0)->fm_reserved));
- LASSERTF((int)offsetof(struct ll_user_fiemap, fm_extents) == 32, "found %lld\n",
- (long long)(int)offsetof(struct ll_user_fiemap, fm_extents));
- LASSERTF((int)sizeof(((struct ll_user_fiemap *)0)->fm_extents) == 0, "found %lld\n",
- (long long)(int)sizeof(((struct ll_user_fiemap *)0)->fm_extents));
- CLASSERT(FIEMAP_FLAG_SYNC == 0x00000001);
- CLASSERT(FIEMAP_FLAG_XATTR == 0x00000002);
- CLASSERT(FIEMAP_FLAG_DEVICE_ORDER == 0x40000000);
-
- /* Checks for struct ll_fiemap_extent */
- LASSERTF((int)sizeof(struct ll_fiemap_extent) == 56, "found %lld\n",
- (long long)(int)sizeof(struct ll_fiemap_extent));
- LASSERTF((int)offsetof(struct ll_fiemap_extent, fe_logical) == 0, "found %lld\n",
- (long long)(int)offsetof(struct ll_fiemap_extent, fe_logical));
- LASSERTF((int)sizeof(((struct ll_fiemap_extent *)0)->fe_logical) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct ll_fiemap_extent *)0)->fe_logical));
- LASSERTF((int)offsetof(struct ll_fiemap_extent, fe_physical) == 8, "found %lld\n",
- (long long)(int)offsetof(struct ll_fiemap_extent, fe_physical));
- LASSERTF((int)sizeof(((struct ll_fiemap_extent *)0)->fe_physical) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct ll_fiemap_extent *)0)->fe_physical));
- LASSERTF((int)offsetof(struct ll_fiemap_extent, fe_length) == 16, "found %lld\n",
- (long long)(int)offsetof(struct ll_fiemap_extent, fe_length));
- LASSERTF((int)sizeof(((struct ll_fiemap_extent *)0)->fe_length) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct ll_fiemap_extent *)0)->fe_length));
- LASSERTF((int)offsetof(struct ll_fiemap_extent, fe_flags) == 40, "found %lld\n",
- (long long)(int)offsetof(struct ll_fiemap_extent, fe_flags));
- LASSERTF((int)sizeof(((struct ll_fiemap_extent *)0)->fe_flags) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct ll_fiemap_extent *)0)->fe_flags));
- LASSERTF((int)offsetof(struct ll_fiemap_extent, fe_device) == 44, "found %lld\n",
- (long long)(int)offsetof(struct ll_fiemap_extent, fe_device));
- LASSERTF((int)sizeof(((struct ll_fiemap_extent *)0)->fe_device) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct ll_fiemap_extent *)0)->fe_device));
- CLASSERT(FIEMAP_EXTENT_LAST == 0x00000001);
- CLASSERT(FIEMAP_EXTENT_UNKNOWN == 0x00000002);
- CLASSERT(FIEMAP_EXTENT_DELALLOC == 0x00000004);
- CLASSERT(FIEMAP_EXTENT_ENCODED == 0x00000008);
- CLASSERT(FIEMAP_EXTENT_DATA_ENCRYPTED == 0x00000080);
- CLASSERT(FIEMAP_EXTENT_NOT_ALIGNED == 0x00000100);
- CLASSERT(FIEMAP_EXTENT_DATA_INLINE == 0x00000200);
- CLASSERT(FIEMAP_EXTENT_DATA_TAIL == 0x00000400);
- CLASSERT(FIEMAP_EXTENT_UNWRITTEN == 0x00000800);
- CLASSERT(FIEMAP_EXTENT_MERGED == 0x00001000);
- CLASSERT(FIEMAP_EXTENT_NO_DIRECT == 0x40000000);
- CLASSERT(FIEMAP_EXTENT_NET == 0x80000000);
-
- /* Checks for type posix_acl_xattr_entry */
- LASSERTF((int)sizeof(posix_acl_xattr_entry) == 8, "found %lld\n",
- (long long)(int)sizeof(posix_acl_xattr_entry));
- LASSERTF((int)offsetof(posix_acl_xattr_entry, e_tag) == 0, "found %lld\n",
- (long long)(int)offsetof(posix_acl_xattr_entry, e_tag));
- LASSERTF((int)sizeof(((posix_acl_xattr_entry *)0)->e_tag) == 2, "found %lld\n",
- (long long)(int)sizeof(((posix_acl_xattr_entry *)0)->e_tag));
- LASSERTF((int)offsetof(posix_acl_xattr_entry, e_perm) == 2, "found %lld\n",
- (long long)(int)offsetof(posix_acl_xattr_entry, e_perm));
- LASSERTF((int)sizeof(((posix_acl_xattr_entry *)0)->e_perm) == 2, "found %lld\n",
- (long long)(int)sizeof(((posix_acl_xattr_entry *)0)->e_perm));
- LASSERTF((int)offsetof(posix_acl_xattr_entry, e_id) == 4, "found %lld\n",
- (long long)(int)offsetof(posix_acl_xattr_entry, e_id));
- LASSERTF((int)sizeof(((posix_acl_xattr_entry *)0)->e_id) == 4, "found %lld\n",
- (long long)(int)sizeof(((posix_acl_xattr_entry *)0)->e_id));
-
- /* Checks for type posix_acl_xattr_header */
- LASSERTF((int)sizeof(posix_acl_xattr_header) == 4, "found %lld\n",
- (long long)(int)sizeof(posix_acl_xattr_header));
- LASSERTF((int)offsetof(posix_acl_xattr_header, a_version) == 0, "found %lld\n",
- (long long)(int)offsetof(posix_acl_xattr_header, a_version));
- LASSERTF((int)sizeof(((posix_acl_xattr_header *)0)->a_version) == 4, "found %lld\n",
- (long long)(int)sizeof(((posix_acl_xattr_header *)0)->a_version));
- LASSERTF((int)offsetof(posix_acl_xattr_header, a_entries) == 4, "found %lld\n",
- (long long)(int)offsetof(posix_acl_xattr_header, a_entries));
- LASSERTF((int)sizeof(((posix_acl_xattr_header *)0)->a_entries) == 0, "found %lld\n",
- (long long)(int)sizeof(((posix_acl_xattr_header *)0)->a_entries));
-
- /* Checks for struct link_ea_header */
- LASSERTF((int)sizeof(struct link_ea_header) == 24, "found %lld\n",
- (long long)(int)sizeof(struct link_ea_header));
- LASSERTF((int)offsetof(struct link_ea_header, leh_magic) == 0, "found %lld\n",
- (long long)(int)offsetof(struct link_ea_header, leh_magic));
- LASSERTF((int)sizeof(((struct link_ea_header *)0)->leh_magic) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct link_ea_header *)0)->leh_magic));
- LASSERTF((int)offsetof(struct link_ea_header, leh_reccount) == 4, "found %lld\n",
- (long long)(int)offsetof(struct link_ea_header, leh_reccount));
- LASSERTF((int)sizeof(((struct link_ea_header *)0)->leh_reccount) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct link_ea_header *)0)->leh_reccount));
- LASSERTF((int)offsetof(struct link_ea_header, leh_len) == 8, "found %lld\n",
- (long long)(int)offsetof(struct link_ea_header, leh_len));
- LASSERTF((int)sizeof(((struct link_ea_header *)0)->leh_len) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct link_ea_header *)0)->leh_len));
- LASSERTF((int)offsetof(struct link_ea_header, padding1) == 16, "found %lld\n",
- (long long)(int)offsetof(struct link_ea_header, padding1));
- LASSERTF((int)sizeof(((struct link_ea_header *)0)->padding1) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct link_ea_header *)0)->padding1));
- LASSERTF((int)offsetof(struct link_ea_header, padding2) == 20, "found %lld\n",
- (long long)(int)offsetof(struct link_ea_header, padding2));
- LASSERTF((int)sizeof(((struct link_ea_header *)0)->padding2) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct link_ea_header *)0)->padding2));
- CLASSERT(LINK_EA_MAGIC == 0x11EAF1DFUL);
-
- /* Checks for struct link_ea_entry */
- LASSERTF((int)sizeof(struct link_ea_entry) == 18, "found %lld\n",
- (long long)(int)sizeof(struct link_ea_entry));
- LASSERTF((int)offsetof(struct link_ea_entry, lee_reclen) == 0, "found %lld\n",
- (long long)(int)offsetof(struct link_ea_entry, lee_reclen));
- LASSERTF((int)sizeof(((struct link_ea_entry *)0)->lee_reclen) == 2, "found %lld\n",
- (long long)(int)sizeof(((struct link_ea_entry *)0)->lee_reclen));
- LASSERTF((int)offsetof(struct link_ea_entry, lee_parent_fid) == 2, "found %lld\n",
- (long long)(int)offsetof(struct link_ea_entry, lee_parent_fid));
- LASSERTF((int)sizeof(((struct link_ea_entry *)0)->lee_parent_fid) == 16, "found %lld\n",
- (long long)(int)sizeof(((struct link_ea_entry *)0)->lee_parent_fid));
- LASSERTF((int)offsetof(struct link_ea_entry, lee_name) == 18, "found %lld\n",
- (long long)(int)offsetof(struct link_ea_entry, lee_name));
- LASSERTF((int)sizeof(((struct link_ea_entry *)0)->lee_name) == 0, "found %lld\n",
- (long long)(int)sizeof(((struct link_ea_entry *)0)->lee_name));
-
- /* Checks for struct layout_intent */
- LASSERTF((int)sizeof(struct layout_intent) == 24, "found %lld\n",
- (long long)(int)sizeof(struct layout_intent));
- LASSERTF((int)offsetof(struct layout_intent, li_opc) == 0, "found %lld\n",
- (long long)(int)offsetof(struct layout_intent, li_opc));
- LASSERTF((int)sizeof(((struct layout_intent *)0)->li_opc) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct layout_intent *)0)->li_opc));
- LASSERTF((int)offsetof(struct layout_intent, li_flags) == 4, "found %lld\n",
- (long long)(int)offsetof(struct layout_intent, li_flags));
- LASSERTF((int)sizeof(((struct layout_intent *)0)->li_flags) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct layout_intent *)0)->li_flags));
- LASSERTF((int)offsetof(struct layout_intent, li_start) == 8, "found %lld\n",
- (long long)(int)offsetof(struct layout_intent, li_start));
- LASSERTF((int)sizeof(((struct layout_intent *)0)->li_start) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct layout_intent *)0)->li_start));
- LASSERTF((int)offsetof(struct layout_intent, li_end) == 16, "found %lld\n",
- (long long)(int)offsetof(struct layout_intent, li_end));
- LASSERTF((int)sizeof(((struct layout_intent *)0)->li_end) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct layout_intent *)0)->li_end));
- LASSERTF(LAYOUT_INTENT_ACCESS == 0, "found %lld\n",
- (long long)LAYOUT_INTENT_ACCESS);
- LASSERTF(LAYOUT_INTENT_READ == 1, "found %lld\n",
- (long long)LAYOUT_INTENT_READ);
- LASSERTF(LAYOUT_INTENT_WRITE == 2, "found %lld\n",
- (long long)LAYOUT_INTENT_WRITE);
- LASSERTF(LAYOUT_INTENT_GLIMPSE == 3, "found %lld\n",
- (long long)LAYOUT_INTENT_GLIMPSE);
- LASSERTF(LAYOUT_INTENT_TRUNC == 4, "found %lld\n",
- (long long)LAYOUT_INTENT_TRUNC);
- LASSERTF(LAYOUT_INTENT_RELEASE == 5, "found %lld\n",
- (long long)LAYOUT_INTENT_RELEASE);
- LASSERTF(LAYOUT_INTENT_RESTORE == 6, "found %lld\n",
- (long long)LAYOUT_INTENT_RESTORE);
-
- /* Checks for struct hsm_action_item */
- LASSERTF((int)sizeof(struct hsm_action_item) == 72, "found %lld\n",
- (long long)(int)sizeof(struct hsm_action_item));
- LASSERTF((int)offsetof(struct hsm_action_item, hai_len) == 0, "found %lld\n",
- (long long)(int)offsetof(struct hsm_action_item, hai_len));
- LASSERTF((int)sizeof(((struct hsm_action_item *)0)->hai_len) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct hsm_action_item *)0)->hai_len));
- LASSERTF((int)offsetof(struct hsm_action_item, hai_action) == 4, "found %lld\n",
- (long long)(int)offsetof(struct hsm_action_item, hai_action));
- LASSERTF((int)sizeof(((struct hsm_action_item *)0)->hai_action) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct hsm_action_item *)0)->hai_action));
- LASSERTF((int)offsetof(struct hsm_action_item, hai_fid) == 8, "found %lld\n",
- (long long)(int)offsetof(struct hsm_action_item, hai_fid));
- LASSERTF((int)sizeof(((struct hsm_action_item *)0)->hai_fid) == 16, "found %lld\n",
- (long long)(int)sizeof(((struct hsm_action_item *)0)->hai_fid));
- LASSERTF((int)offsetof(struct hsm_action_item, hai_dfid) == 24, "found %lld\n",
- (long long)(int)offsetof(struct hsm_action_item, hai_dfid));
- LASSERTF((int)sizeof(((struct hsm_action_item *)0)->hai_dfid) == 16, "found %lld\n",
- (long long)(int)sizeof(((struct hsm_action_item *)0)->hai_dfid));
- LASSERTF((int)offsetof(struct hsm_action_item, hai_extent) == 40, "found %lld\n",
- (long long)(int)offsetof(struct hsm_action_item, hai_extent));
- LASSERTF((int)sizeof(((struct hsm_action_item *)0)->hai_extent) == 16, "found %lld\n",
- (long long)(int)sizeof(((struct hsm_action_item *)0)->hai_extent));
- LASSERTF((int)offsetof(struct hsm_action_item, hai_cookie) == 56, "found %lld\n",
- (long long)(int)offsetof(struct hsm_action_item, hai_cookie));
- LASSERTF((int)sizeof(((struct hsm_action_item *)0)->hai_cookie) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct hsm_action_item *)0)->hai_cookie));
- LASSERTF((int)offsetof(struct hsm_action_item, hai_gid) == 64, "found %lld\n",
- (long long)(int)offsetof(struct hsm_action_item, hai_gid));
- LASSERTF((int)sizeof(((struct hsm_action_item *)0)->hai_gid) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct hsm_action_item *)0)->hai_gid));
- LASSERTF((int)offsetof(struct hsm_action_item, hai_data) == 72, "found %lld\n",
- (long long)(int)offsetof(struct hsm_action_item, hai_data));
- LASSERTF((int)sizeof(((struct hsm_action_item *)0)->hai_data) == 0, "found %lld\n",
- (long long)(int)sizeof(((struct hsm_action_item *)0)->hai_data));
-
- /* Checks for struct hsm_action_list */
- LASSERTF((int)sizeof(struct hsm_action_list) == 32, "found %lld\n",
- (long long)(int)sizeof(struct hsm_action_list));
- LASSERTF((int)offsetof(struct hsm_action_list, hal_version) == 0, "found %lld\n",
- (long long)(int)offsetof(struct hsm_action_list, hal_version));
- LASSERTF((int)sizeof(((struct hsm_action_list *)0)->hal_version) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct hsm_action_list *)0)->hal_version));
- LASSERTF((int)offsetof(struct hsm_action_list, hal_count) == 4, "found %lld\n",
- (long long)(int)offsetof(struct hsm_action_list, hal_count));
- LASSERTF((int)sizeof(((struct hsm_action_list *)0)->hal_count) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct hsm_action_list *)0)->hal_count));
- LASSERTF((int)offsetof(struct hsm_action_list, hal_compound_id) == 8, "found %lld\n",
- (long long)(int)offsetof(struct hsm_action_list, hal_compound_id));
- LASSERTF((int)sizeof(((struct hsm_action_list *)0)->hal_compound_id) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct hsm_action_list *)0)->hal_compound_id));
- LASSERTF((int)offsetof(struct hsm_action_list, hal_flags) == 16, "found %lld\n",
- (long long)(int)offsetof(struct hsm_action_list, hal_flags));
- LASSERTF((int)sizeof(((struct hsm_action_list *)0)->hal_flags) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct hsm_action_list *)0)->hal_flags));
- LASSERTF((int)offsetof(struct hsm_action_list, hal_archive_id) == 24, "found %lld\n",
- (long long)(int)offsetof(struct hsm_action_list, hal_archive_id));
- LASSERTF((int)sizeof(((struct hsm_action_list *)0)->hal_archive_id) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct hsm_action_list *)0)->hal_archive_id));
- LASSERTF((int)offsetof(struct hsm_action_list, padding1) == 28, "found %lld\n",
- (long long)(int)offsetof(struct hsm_action_list, padding1));
- LASSERTF((int)sizeof(((struct hsm_action_list *)0)->padding1) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct hsm_action_list *)0)->padding1));
- LASSERTF((int)offsetof(struct hsm_action_list, hal_fsname) == 32, "found %lld\n",
- (long long)(int)offsetof(struct hsm_action_list, hal_fsname));
- LASSERTF((int)sizeof(((struct hsm_action_list *)0)->hal_fsname) == 0, "found %lld\n",
- (long long)(int)sizeof(((struct hsm_action_list *)0)->hal_fsname));
-
- /* Checks for struct hsm_progress */
- LASSERTF((int)sizeof(struct hsm_progress) == 48, "found %lld\n",
- (long long)(int)sizeof(struct hsm_progress));
- LASSERTF((int)offsetof(struct hsm_progress, hp_fid) == 0, "found %lld\n",
- (long long)(int)offsetof(struct hsm_progress, hp_fid));
- LASSERTF((int)sizeof(((struct hsm_progress *)0)->hp_fid) == 16, "found %lld\n",
- (long long)(int)sizeof(((struct hsm_progress *)0)->hp_fid));
- LASSERTF((int)offsetof(struct hsm_progress, hp_cookie) == 16, "found %lld\n",
- (long long)(int)offsetof(struct hsm_progress, hp_cookie));
- LASSERTF((int)sizeof(((struct hsm_progress *)0)->hp_cookie) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct hsm_progress *)0)->hp_cookie));
- LASSERTF((int)offsetof(struct hsm_progress, hp_extent) == 24, "found %lld\n",
- (long long)(int)offsetof(struct hsm_progress, hp_extent));
- LASSERTF((int)sizeof(((struct hsm_progress *)0)->hp_extent) == 16, "found %lld\n",
- (long long)(int)sizeof(((struct hsm_progress *)0)->hp_extent));
- LASSERTF((int)offsetof(struct hsm_progress, hp_flags) == 40, "found %lld\n",
- (long long)(int)offsetof(struct hsm_progress, hp_flags));
- LASSERTF((int)sizeof(((struct hsm_progress *)0)->hp_flags) == 2, "found %lld\n",
- (long long)(int)sizeof(((struct hsm_progress *)0)->hp_flags));
- LASSERTF((int)offsetof(struct hsm_progress, hp_errval) == 42, "found %lld\n",
- (long long)(int)offsetof(struct hsm_progress, hp_errval));
- LASSERTF((int)sizeof(((struct hsm_progress *)0)->hp_errval) == 2, "found %lld\n",
- (long long)(int)sizeof(((struct hsm_progress *)0)->hp_errval));
- LASSERTF((int)offsetof(struct hsm_progress, padding) == 44, "found %lld\n",
- (long long)(int)offsetof(struct hsm_progress, padding));
- LASSERTF((int)sizeof(((struct hsm_progress *)0)->padding) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct hsm_progress *)0)->padding));
- LASSERTF(HP_FLAG_COMPLETED == 0x01, "found 0x%.8x\n",
- HP_FLAG_COMPLETED);
- LASSERTF(HP_FLAG_RETRY == 0x02, "found 0x%.8x\n",
- HP_FLAG_RETRY);
-
- LASSERTF((int)offsetof(struct hsm_copy, hc_data_version) == 0, "found %lld\n",
- (long long)(int)offsetof(struct hsm_copy, hc_data_version));
- LASSERTF((int)sizeof(((struct hsm_copy *)0)->hc_data_version) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct hsm_copy *)0)->hc_data_version));
- LASSERTF((int)offsetof(struct hsm_copy, hc_flags) == 8, "found %lld\n",
- (long long)(int)offsetof(struct hsm_copy, hc_flags));
- LASSERTF((int)sizeof(((struct hsm_copy *)0)->hc_flags) == 2, "found %lld\n",
- (long long)(int)sizeof(((struct hsm_copy *)0)->hc_flags));
- LASSERTF((int)offsetof(struct hsm_copy, hc_errval) == 10, "found %lld\n",
- (long long)(int)offsetof(struct hsm_copy, hc_errval));
- LASSERTF((int)sizeof(((struct hsm_copy *)0)->hc_errval) == 2, "found %lld\n",
- (long long)(int)sizeof(((struct hsm_copy *)0)->hc_errval));
- LASSERTF((int)offsetof(struct hsm_copy, padding) == 12, "found %lld\n",
- (long long)(int)offsetof(struct hsm_copy, padding));
- LASSERTF((int)sizeof(((struct hsm_copy *)0)->padding) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct hsm_copy *)0)->padding));
- LASSERTF((int)offsetof(struct hsm_copy, hc_hai) == 16, "found %lld\n",
- (long long)(int)offsetof(struct hsm_copy, hc_hai));
- LASSERTF((int)sizeof(((struct hsm_copy *)0)->hc_hai) == 72, "found %lld\n",
- (long long)(int)sizeof(((struct hsm_copy *)0)->hc_hai));
-
- /* Checks for struct hsm_progress_kernel */
- LASSERTF((int)sizeof(struct hsm_progress_kernel) == 64, "found %lld\n",
- (long long)(int)sizeof(struct hsm_progress_kernel));
- LASSERTF((int)offsetof(struct hsm_progress_kernel, hpk_fid) == 0, "found %lld\n",
- (long long)(int)offsetof(struct hsm_progress_kernel, hpk_fid));
- LASSERTF((int)sizeof(((struct hsm_progress_kernel *)0)->hpk_fid) == 16, "found %lld\n",
- (long long)(int)sizeof(((struct hsm_progress_kernel *)0)->hpk_fid));
- LASSERTF((int)offsetof(struct hsm_progress_kernel, hpk_cookie) == 16, "found %lld\n",
- (long long)(int)offsetof(struct hsm_progress_kernel, hpk_cookie));
- LASSERTF((int)sizeof(((struct hsm_progress_kernel *)0)->hpk_cookie) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct hsm_progress_kernel *)0)->hpk_cookie));
- LASSERTF((int)offsetof(struct hsm_progress_kernel, hpk_extent) == 24, "found %lld\n",
- (long long)(int)offsetof(struct hsm_progress_kernel, hpk_extent));
- LASSERTF((int)sizeof(((struct hsm_progress_kernel *)0)->hpk_extent) == 16, "found %lld\n",
- (long long)(int)sizeof(((struct hsm_progress_kernel *)0)->hpk_extent));
- LASSERTF((int)offsetof(struct hsm_progress_kernel, hpk_flags) == 40, "found %lld\n",
- (long long)(int)offsetof(struct hsm_progress_kernel, hpk_flags));
- LASSERTF((int)sizeof(((struct hsm_progress_kernel *)0)->hpk_flags) == 2, "found %lld\n",
- (long long)(int)sizeof(((struct hsm_progress_kernel *)0)->hpk_flags));
- LASSERTF((int)offsetof(struct hsm_progress_kernel, hpk_errval) == 42, "found %lld\n",
- (long long)(int)offsetof(struct hsm_progress_kernel, hpk_errval));
- LASSERTF((int)sizeof(((struct hsm_progress_kernel *)0)->hpk_errval) == 2, "found %lld\n",
- (long long)(int)sizeof(((struct hsm_progress_kernel *)0)->hpk_errval));
- LASSERTF((int)offsetof(struct hsm_progress_kernel, hpk_padding1) == 44, "found %lld\n",
- (long long)(int)offsetof(struct hsm_progress_kernel, hpk_padding1));
- LASSERTF((int)sizeof(((struct hsm_progress_kernel *)0)->hpk_padding1) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct hsm_progress_kernel *)0)->hpk_padding1));
- LASSERTF((int)offsetof(struct hsm_progress_kernel, hpk_data_version) == 48, "found %lld\n",
- (long long)(int)offsetof(struct hsm_progress_kernel, hpk_data_version));
- LASSERTF((int)sizeof(((struct hsm_progress_kernel *)0)->hpk_data_version) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct hsm_progress_kernel *)0)->hpk_data_version));
- LASSERTF((int)offsetof(struct hsm_progress_kernel, hpk_padding2) == 56, "found %lld\n",
- (long long)(int)offsetof(struct hsm_progress_kernel, hpk_padding2));
- LASSERTF((int)sizeof(((struct hsm_progress_kernel *)0)->hpk_padding2) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct hsm_progress_kernel *)0)->hpk_padding2));
-
- /* Checks for struct hsm_user_item */
- LASSERTF((int)sizeof(struct hsm_user_item) == 32, "found %lld\n",
- (long long)(int)sizeof(struct hsm_user_item));
- LASSERTF((int)offsetof(struct hsm_user_item, hui_fid) == 0, "found %lld\n",
- (long long)(int)offsetof(struct hsm_user_item, hui_fid));
- LASSERTF((int)sizeof(((struct hsm_user_item *)0)->hui_fid) == 16, "found %lld\n",
- (long long)(int)sizeof(((struct hsm_user_item *)0)->hui_fid));
- LASSERTF((int)offsetof(struct hsm_user_item, hui_extent) == 16, "found %lld\n",
- (long long)(int)offsetof(struct hsm_user_item, hui_extent));
- LASSERTF((int)sizeof(((struct hsm_user_item *)0)->hui_extent) == 16, "found %lld\n",
- (long long)(int)sizeof(((struct hsm_user_item *)0)->hui_extent));
-
- /* Checks for struct hsm_user_state */
- LASSERTF((int)sizeof(struct hsm_user_state) == 32, "found %lld\n",
- (long long)(int)sizeof(struct hsm_user_state));
- LASSERTF((int)offsetof(struct hsm_user_state, hus_states) == 0, "found %lld\n",
- (long long)(int)offsetof(struct hsm_user_state, hus_states));
- LASSERTF((int)sizeof(((struct hsm_user_state *)0)->hus_states) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct hsm_user_state *)0)->hus_states));
- LASSERTF((int)offsetof(struct hsm_user_state, hus_archive_id) == 4, "found %lld\n",
- (long long)(int)offsetof(struct hsm_user_state, hus_archive_id));
- LASSERTF((int)sizeof(((struct hsm_user_state *)0)->hus_archive_id) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct hsm_user_state *)0)->hus_archive_id));
- LASSERTF((int)offsetof(struct hsm_user_state, hus_in_progress_state) == 8, "found %lld\n",
- (long long)(int)offsetof(struct hsm_user_state, hus_in_progress_state));
- LASSERTF((int)sizeof(((struct hsm_user_state *)0)->hus_in_progress_state) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct hsm_user_state *)0)->hus_in_progress_state));
- LASSERTF((int)offsetof(struct hsm_user_state, hus_in_progress_action) == 12, "found %lld\n",
- (long long)(int)offsetof(struct hsm_user_state, hus_in_progress_action));
- LASSERTF((int)sizeof(((struct hsm_user_state *)0)->hus_in_progress_action) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct hsm_user_state *)0)->hus_in_progress_action));
- LASSERTF((int)offsetof(struct hsm_user_state, hus_in_progress_location) == 16, "found %lld\n",
- (long long)(int)offsetof(struct hsm_user_state, hus_in_progress_location));
- LASSERTF((int)sizeof(((struct hsm_user_state *)0)->hus_in_progress_location) == 16, "found %lld\n",
- (long long)(int)sizeof(((struct hsm_user_state *)0)->hus_in_progress_location));
-
- /* Checks for struct hsm_state_set */
- LASSERTF((int)sizeof(struct hsm_state_set) == 24, "found %lld\n",
- (long long)(int)sizeof(struct hsm_state_set));
- LASSERTF((int)offsetof(struct hsm_state_set, hss_valid) == 0, "found %lld\n",
- (long long)(int)offsetof(struct hsm_state_set, hss_valid));
- LASSERTF((int)sizeof(((struct hsm_state_set *)0)->hss_valid) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct hsm_state_set *)0)->hss_valid));
- LASSERTF((int)offsetof(struct hsm_state_set, hss_archive_id) == 4, "found %lld\n",
- (long long)(int)offsetof(struct hsm_state_set, hss_archive_id));
- LASSERTF((int)sizeof(((struct hsm_state_set *)0)->hss_archive_id) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct hsm_state_set *)0)->hss_archive_id));
- LASSERTF((int)offsetof(struct hsm_state_set, hss_setmask) == 8, "found %lld\n",
- (long long)(int)offsetof(struct hsm_state_set, hss_setmask));
- LASSERTF((int)sizeof(((struct hsm_state_set *)0)->hss_setmask) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct hsm_state_set *)0)->hss_setmask));
- LASSERTF((int)offsetof(struct hsm_state_set, hss_clearmask) == 16, "found %lld\n",
- (long long)(int)offsetof(struct hsm_state_set, hss_clearmask));
- LASSERTF((int)sizeof(((struct hsm_state_set *)0)->hss_clearmask) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct hsm_state_set *)0)->hss_clearmask));
-
- /* Checks for struct hsm_current_action */
- LASSERTF((int)sizeof(struct hsm_current_action) == 24, "found %lld\n",
- (long long)(int)sizeof(struct hsm_current_action));
- LASSERTF((int)offsetof(struct hsm_current_action, hca_state) == 0, "found %lld\n",
- (long long)(int)offsetof(struct hsm_current_action, hca_state));
- LASSERTF((int)sizeof(((struct hsm_current_action *)0)->hca_state) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct hsm_current_action *)0)->hca_state));
- LASSERTF((int)offsetof(struct hsm_current_action, hca_action) == 4, "found %lld\n",
- (long long)(int)offsetof(struct hsm_current_action, hca_action));
- LASSERTF((int)sizeof(((struct hsm_current_action *)0)->hca_action) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct hsm_current_action *)0)->hca_action));
- LASSERTF((int)offsetof(struct hsm_current_action, hca_location) == 8, "found %lld\n",
- (long long)(int)offsetof(struct hsm_current_action, hca_location));
- LASSERTF((int)sizeof(((struct hsm_current_action *)0)->hca_location) == 16, "found %lld\n",
- (long long)(int)sizeof(((struct hsm_current_action *)0)->hca_location));
-
- /* Checks for struct hsm_request */
- LASSERTF((int)sizeof(struct hsm_request) == 24, "found %lld\n",
- (long long)(int)sizeof(struct hsm_request));
- LASSERTF((int)offsetof(struct hsm_request, hr_action) == 0, "found %lld\n",
- (long long)(int)offsetof(struct hsm_request, hr_action));
- LASSERTF((int)sizeof(((struct hsm_request *)0)->hr_action) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct hsm_request *)0)->hr_action));
- LASSERTF((int)offsetof(struct hsm_request, hr_archive_id) == 4, "found %lld\n",
- (long long)(int)offsetof(struct hsm_request, hr_archive_id));
- LASSERTF((int)sizeof(((struct hsm_request *)0)->hr_archive_id) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct hsm_request *)0)->hr_archive_id));
- LASSERTF((int)offsetof(struct hsm_request, hr_flags) == 8, "found %lld\n",
- (long long)(int)offsetof(struct hsm_request, hr_flags));
- LASSERTF((int)sizeof(((struct hsm_request *)0)->hr_flags) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct hsm_request *)0)->hr_flags));
- LASSERTF((int)offsetof(struct hsm_request, hr_itemcount) == 16, "found %lld\n",
- (long long)(int)offsetof(struct hsm_request, hr_itemcount));
- LASSERTF((int)sizeof(((struct hsm_request *)0)->hr_itemcount) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct hsm_request *)0)->hr_itemcount));
- LASSERTF((int)offsetof(struct hsm_request, hr_data_len) == 20, "found %lld\n",
- (long long)(int)offsetof(struct hsm_request, hr_data_len));
- LASSERTF((int)sizeof(((struct hsm_request *)0)->hr_data_len) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct hsm_request *)0)->hr_data_len));
- LASSERTF(HSM_FORCE_ACTION == 0x00000001UL, "found 0x%.8xUL\n",
- (unsigned)HSM_FORCE_ACTION);
- LASSERTF(HSM_GHOST_COPY == 0x00000002UL, "found 0x%.8xUL\n",
- (unsigned)HSM_GHOST_COPY);
-
- /* Checks for struct hsm_user_request */
- LASSERTF((int)sizeof(struct hsm_user_request) == 24, "found %lld\n",
- (long long)(int)sizeof(struct hsm_user_request));
- LASSERTF((int)offsetof(struct hsm_user_request, hur_request) == 0, "found %lld\n",
- (long long)(int)offsetof(struct hsm_user_request, hur_request));
- LASSERTF((int)sizeof(((struct hsm_user_request *)0)->hur_request) == 24, "found %lld\n",
- (long long)(int)sizeof(((struct hsm_user_request *)0)->hur_request));
- LASSERTF((int)offsetof(struct hsm_user_request, hur_user_item) == 24, "found %lld\n",
- (long long)(int)offsetof(struct hsm_user_request, hur_user_item));
- LASSERTF((int)sizeof(((struct hsm_user_request *)0)->hur_user_item) == 0, "found %lld\n",
- (long long)(int)sizeof(((struct hsm_user_request *)0)->hur_user_item));
-
- /* Checks for struct hsm_user_import */
- LASSERTF(sizeof(struct hsm_user_import) == 48, "found %lld\n",
- (long long)sizeof(struct hsm_user_import));
- LASSERTF(offsetof(struct hsm_user_import, hui_size) == 0,
- "found %lld\n",
- (long long)offsetof(struct hsm_user_import, hui_size));
- LASSERTF(sizeof(((struct hsm_user_import *)0)->hui_size) == 8,
- "found %lld\n",
- (long long)sizeof(((struct hsm_user_import *)0)->hui_size));
- LASSERTF(offsetof(struct hsm_user_import, hui_uid) == 32,
- "found %lld\n",
- (long long)offsetof(struct hsm_user_import, hui_uid));
- LASSERTF(sizeof(((struct hsm_user_import *)0)->hui_uid) == 4,
- "found %lld\n",
- (long long)sizeof(((struct hsm_user_import *)0)->hui_uid));
- LASSERTF(offsetof(struct hsm_user_import, hui_gid) == 36,
- "found %lld\n",
- (long long)offsetof(struct hsm_user_import, hui_gid));
- LASSERTF(sizeof(((struct hsm_user_import *)0)->hui_gid) == 4,
- "found %lld\n",
- (long long)sizeof(((struct hsm_user_import *)0)->hui_gid));
- LASSERTF(offsetof(struct hsm_user_import, hui_mode) == 40,
- "found %lld\n",
- (long long)offsetof(struct hsm_user_import, hui_mode));
- LASSERTF(sizeof(((struct hsm_user_import *)0)->hui_mode) == 4,
- "found %lld\n",
- (long long)sizeof(((struct hsm_user_import *)0)->hui_mode));
- LASSERTF(offsetof(struct hsm_user_import, hui_atime) == 8,
- "found %lld\n",
- (long long)offsetof(struct hsm_user_import, hui_atime));
- LASSERTF(sizeof(((struct hsm_user_import *)0)->hui_atime) == 8,
- "found %lld\n",
- (long long)sizeof(((struct hsm_user_import *)0)->hui_atime));
- LASSERTF(offsetof(struct hsm_user_import, hui_atime_ns) == 24,
- "found %lld\n",
- (long long)(int)offsetof(struct hsm_user_import, hui_atime_ns));
- LASSERTF(sizeof(((struct hsm_user_import *)0)->hui_atime_ns) == 4,
- "found %lld\n",
- (long long)sizeof(((struct hsm_user_import *)0)->hui_atime_ns));
- LASSERTF(offsetof(struct hsm_user_import, hui_mtime) == 16,
- "found %lld\n",
- (long long)offsetof(struct hsm_user_import, hui_mtime));
- LASSERTF(sizeof(((struct hsm_user_import *)0)->hui_mtime) == 8,
- "found %lld\n",
- (long long)sizeof(((struct hsm_user_import *)0)->hui_mtime));
- LASSERTF(offsetof(struct hsm_user_import, hui_mtime_ns) == 28,
- "found %lld\n",
- (long long)offsetof(struct hsm_user_import, hui_mtime_ns));
- LASSERTF(sizeof(((struct hsm_user_import *)0)->hui_mtime_ns) == 4,
- "found %lld\n",
- (long long)sizeof(((struct hsm_user_import *)0)->hui_mtime_ns));
- LASSERTF(offsetof(struct hsm_user_import, hui_archive_id) == 44,
- "found %lld\n",
- (long long)offsetof(struct hsm_user_import, hui_archive_id));
- LASSERTF(sizeof(((struct hsm_user_import *)0)->hui_archive_id) == 4,
- "found %lld\n",
- (long long)sizeof(((struct hsm_user_import *)0)->hui_archive_id));
-
- /* Checks for struct update_buf */
- LASSERTF((int)sizeof(struct update_buf) == 8, "found %lld\n",
- (long long)(int)sizeof(struct update_buf));
- LASSERTF((int)offsetof(struct update_buf, ub_magic) == 0, "found %lld\n",
- (long long)(int)offsetof(struct update_buf, ub_magic));
- LASSERTF((int)sizeof(((struct update_buf *)0)->ub_magic) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct update_buf *)0)->ub_magic));
- LASSERTF((int)offsetof(struct update_buf, ub_count) == 4, "found %lld\n",
- (long long)(int)offsetof(struct update_buf, ub_count));
- LASSERTF((int)sizeof(((struct update_buf *)0)->ub_count) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct update_buf *)0)->ub_count));
- LASSERTF((int)offsetof(struct update_buf, ub_bufs) == 8, "found %lld\n",
- (long long)(int)offsetof(struct update_buf, ub_bufs));
- LASSERTF((int)sizeof(((struct update_buf *)0)->ub_bufs) == 0, "found %lld\n",
- (long long)(int)sizeof(((struct update_buf *)0)->ub_bufs));
-
- /* Checks for struct update_reply */
- LASSERTF((int)sizeof(struct update_reply) == 8, "found %lld\n",
- (long long)(int)sizeof(struct update_reply));
- LASSERTF((int)offsetof(struct update_reply, ur_version) == 0, "found %lld\n",
- (long long)(int)offsetof(struct update_reply, ur_version));
- LASSERTF((int)sizeof(((struct update_reply *)0)->ur_version) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct update_reply *)0)->ur_version));
- LASSERTF((int)offsetof(struct update_reply, ur_count) == 4, "found %lld\n",
- (long long)(int)offsetof(struct update_reply, ur_count));
- LASSERTF((int)sizeof(((struct update_reply *)0)->ur_count) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct update_reply *)0)->ur_count));
- LASSERTF((int)offsetof(struct update_reply, ur_lens) == 8, "found %lld\n",
- (long long)(int)offsetof(struct update_reply, ur_lens));
- LASSERTF((int)sizeof(((struct update_reply *)0)->ur_lens) == 0, "found %lld\n",
- (long long)(int)sizeof(((struct update_reply *)0)->ur_lens));
-
- /* Checks for struct update */
- LASSERTF((int)sizeof(struct update) == 56, "found %lld\n",
- (long long)(int)sizeof(struct update));
- LASSERTF((int)offsetof(struct update, u_type) == 0, "found %lld\n",
- (long long)(int)offsetof(struct update, u_type));
- LASSERTF((int)sizeof(((struct update *)0)->u_type) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct update *)0)->u_type));
- LASSERTF((int)offsetof(struct update, u_batchid) == 4, "found %lld\n",
- (long long)(int)offsetof(struct update, u_batchid));
- LASSERTF((int)sizeof(((struct update *)0)->u_batchid) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct update *)0)->u_batchid));
- LASSERTF((int)offsetof(struct update, u_fid) == 8, "found %lld\n",
- (long long)(int)offsetof(struct update, u_fid));
- LASSERTF((int)sizeof(((struct update *)0)->u_fid) == 16, "found %lld\n",
- (long long)(int)sizeof(((struct update *)0)->u_fid));
- LASSERTF((int)offsetof(struct update, u_lens) == 24, "found %lld\n",
- (long long)(int)offsetof(struct update, u_lens));
- LASSERTF((int)sizeof(((struct update *)0)->u_lens) == 32, "found %lld\n",
- (long long)(int)sizeof(((struct update *)0)->u_lens));
- LASSERTF((int)offsetof(struct update, u_bufs) == 56, "found %lld\n",
- (long long)(int)offsetof(struct update, u_bufs));
- LASSERTF((int)sizeof(((struct update *)0)->u_bufs) == 0, "found %lld\n",
- (long long)(int)sizeof(((struct update *)0)->u_bufs));
-}
diff --git a/drivers/staging/lustre/sysfs-fs-lustre b/drivers/staging/lustre/sysfs-fs-lustre
deleted file mode 100644
index 873e2cf31217..000000000000
--- a/drivers/staging/lustre/sysfs-fs-lustre
+++ /dev/null
@@ -1,646 +0,0 @@
-What: /sys/fs/lustre/version
-Date: May 2015
-Contact: "Oleg Drokin" <oleg.drokin@intel.com>
-Description:
- Shows current running lustre version.
-
-What: /sys/fs/lustre/pinger
-Date: May 2015
-Contact: "Oleg Drokin" <oleg.drokin@intel.com>
-Description:
- Shows if the lustre module has pinger support.
- "on" means yes and "off" means no.
-
-What: /sys/fs/lustre/health
-Date: May 2015
-Contact: "Oleg Drokin" <oleg.drokin@intel.com>
-Description:
- Shows whenever current system state believed to be "healthy",
- "NOT HEALTHY", or "LBUG" whenever lustre has experienced
- an internal assertion failure
-
-What: /sys/fs/lustre/jobid_name
-Date: May 2015
-Contact: "Oleg Drokin" <oleg.drokin@intel.com>
-Description:
- Currently running job "name" for this node to be transferred
- to Lustre servers for purposes of QoS and statistics gathering.
- Writing into this file will change the name, reading outputs
- currently set value.
-
-What: /sys/fs/lustre/jobid_var
-Date: May 2015
-Contact: "Oleg Drokin" <oleg.drokin@intel.com>
-Description:
- Control file for lustre "jobstats" functionality, write new
- value from the list below to change the mode:
- disable - disable job name reporting to the servers (default)
- procname_uid - form the job name as the current running
- command name and pid with a dot in between
- e.g. dd.1253
- nodelocal - use jobid_name value from above.
-
-What: /sys/fs/lustre/timeout
-Date: June 2015
-Contact: "Oleg Drokin" <oleg.drokin@intel.com>
-Description:
- Controls "lustre timeout" variable, also known as obd_timeout
- in some old manual. In the past obd_timeout was of paramount
- importance as the timeout value used everywhere and where
- other timeouts were derived from. These days it's much less
- important as network timeouts are mostly determined by
- AT (adaptive timeouts).
- Unit: seconds, default: 100
-
-What: /sys/fs/lustre/max_dirty_mb
-Date: June 2015
-Contact: "Oleg Drokin" <oleg.drokin@intel.com>
-Description:
- Controls total number of dirty cache (in megabytes) allowed
- across all mounted lustre filesystems.
- Since writeout of dirty pages in Lustre is somewhat expensive,
- when you allow to many dirty pages, this might lead to
- performance degradations as kernel tries to desperately
- find some pages to free/writeout.
- Default 1/2 RAM. Min value 4, max value 9/10 of RAM.
-
-What: /sys/fs/lustre/debug_peer_on_timeout
-Date: June 2015
-Contact: "Oleg Drokin" <oleg.drokin@intel.com>
-Description:
- Control if lnet debug information should be printed when
- an RPC timeout occurs.
- 0 disabled (default)
- 1 enabled
-
-What: /sys/fs/lustre/dump_on_timeout
-Date: June 2015
-Contact: "Oleg Drokin" <oleg.drokin@intel.com>
-Description:
- Controls if Lustre debug log should be dumped when an RPC
- timeout occurs. This is useful if yout debug buffer typically
- rolls over by the time you notice RPC timeouts.
-
-What: /sys/fs/lustre/dump_on_eviction
-Date: June 2015
-Contact: "Oleg Drokin" <oleg.drokin@intel.com>
-Description:
- Controls if Lustre debug log should be dumped when an this
- client is evicted from one of the servers.
- This is useful if yout debug buffer typically rolls over
- by the time you notice the eviction event.
-
-What: /sys/fs/lustre/at_min
-Date: July 2015
-Contact: "Oleg Drokin" <oleg.drokin@intel.com>
-Description:
- Controls minimum adaptive timeout in seconds. If you encounter
- a case where clients timeout due to server-reported processing
- time being too short, you might consider increasing this value.
- One common case of this if the underlying network has
- unpredictable long delays.
- Default: 0
-
-What: /sys/fs/lustre/at_max
-Date: July 2015
-Contact: "Oleg Drokin" <oleg.drokin@intel.com>
-Description:
- Controls maximum adaptive timeout in seconds. If at_max timeout
- is reached for an RPC, the RPC will time out.
- Some genuinuely slow network hardware might warrant increasing
- this value.
- Setting this value to 0 disables Adaptive Timeouts
- functionality and old-style obd_timeout value is then used.
- Default: 600
-
-What: /sys/fs/lustre/at_extra
-Date: July 2015
-Contact: "Oleg Drokin" <oleg.drokin@intel.com>
-Description:
- Controls how much extra time to request for unfinished requests
- in processing in seconds. Normally a server-side parameter, it
- is also used on the client for responses to various LDLM ASTs
- that are handled with a special server thread on the client.
- This is a way for the servers to ask the clients not to time
- out the request that reached current servicing time estimate
- yet and give it some more time.
- Default: 30
-
-What: /sys/fs/lustre/at_early_margin
-Date: July 2015
-Contact: "Oleg Drokin" <oleg.drokin@intel.com>
-Description:
- Controls when to send the early reply for requests that are
- about to timeout as an offset to the estimated service time in
- seconds..
- Default: 5
-
-What: /sys/fs/lustre/at_history
-Date: July 2015
-Contact: "Oleg Drokin" <oleg.drokin@intel.com>
-Description:
- Controls for how many seconds to remember slowest events
- encountered by adaptive timeouts code.
- Default: 600
-
-What: /sys/fs/lustre/llite/<fsname>-<uuid>/blocksize
-Date: May 2015
-Contact: "Oleg Drokin" <oleg.drokin@intel.com>
-Description:
- Biggest blocksize on object storage server for this filesystem.
-
-What: /sys/fs/lustre/llite/<fsname>-<uuid>/kbytestotal
-Date: May 2015
-Contact: "Oleg Drokin" <oleg.drokin@intel.com>
-Description:
- Shows total number of kilobytes of space on this filesystem
-
-What: /sys/fs/lustre/llite/<fsname>-<uuid>/kbytesfree
-Date: May 2015
-Contact: "Oleg Drokin" <oleg.drokin@intel.com>
-Description:
- Shows total number of free kilobytes of space on this filesystem
-
-What: /sys/fs/lustre/llite/<fsname>-<uuid>/kbytesavail
-Date: May 2015
-Contact: "Oleg Drokin" <oleg.drokin@intel.com>
-Description:
- Shows total number of free kilobytes of space on this filesystem
- actually available for use (taking into account per-client
- grants and filesystem reservations).
-
-What: /sys/fs/lustre/llite/<fsname>-<uuid>/filestotal
-Date: May 2015
-Contact: "Oleg Drokin" <oleg.drokin@intel.com>
-Description:
- Shows total number of inodes on the filesystem.
-
-What: /sys/fs/lustre/llite/<fsname>-<uuid>/filesfree
-Date: May 2015
-Contact: "Oleg Drokin" <oleg.drokin@intel.com>
-Description:
- Shows estimated number of free inodes on the filesystem
-
-What: /sys/fs/lustre/llite/<fsname>-<uuid>/client_type
-Date: May 2015
-Contact: "Oleg Drokin" <oleg.drokin@intel.com>
-Description:
- Shows whenever this filesystem considers this client to be
- compute cluster-local or remote. Remote clients have
- additional uid/gid convrting logic applied.
-
-What: /sys/fs/lustre/llite/<fsname>-<uuid>/fstype
-Date: May 2015
-Contact: "Oleg Drokin" <oleg.drokin@intel.com>
-Description:
- Shows filesystem type of the filesystem
-
-What: /sys/fs/lustre/llite/<fsname>-<uuid>/uuid
-Date: May 2015
-Contact: "Oleg Drokin" <oleg.drokin@intel.com>
-Description:
- Shows this filesystem superblock uuid
-
-What: /sys/fs/lustre/llite/<fsname>-<uuid>/max_read_ahead_mb
-Date: May 2015
-Contact: "Oleg Drokin" <oleg.drokin@intel.com>
-Description:
- Sets maximum number of megabytes in system memory to be
- given to read-ahead cache.
-
-What: /sys/fs/lustre/llite/<fsname>-<uuid>/max_read_ahead_per_file_mb
-Date: May 2015
-Contact: "Oleg Drokin" <oleg.drokin@intel.com>
-Description:
- Sets maximum number of megabytes to read-ahead for a single file
-
-What: /sys/fs/lustre/llite/<fsname>-<uuid>/max_read_ahead_whole_mb
-Date: May 2015
-Contact: "Oleg Drokin" <oleg.drokin@intel.com>
-Description:
- For small reads, how many megabytes to actually request from
- the server as initial read-ahead.
-
-What: /sys/fs/lustre/llite/<fsname>-<uuid>/checksum_pages
-Date: May 2015
-Contact: "Oleg Drokin" <oleg.drokin@intel.com>
-Description:
- Enables or disables per-page checksum at llite layer, before
- the pages are actually given to lower level for network transfer
-
-What: /sys/fs/lustre/llite/<fsname>-<uuid>/stats_track_pid
-Date: May 2015
-Contact: "Oleg Drokin" <oleg.drokin@intel.com>
-Description:
- Limit Lustre vfs operations gathering to just a single pid.
- 0 to track everything.
-
-What: /sys/fs/lustre/llite/<fsname>-<uuid>/stats_track_ppid
-Date: May 2015
-Contact: "Oleg Drokin" <oleg.drokin@intel.com>
-Description:
- Limit Lustre vfs operations gathering to just a single ppid.
- 0 to track everything.
-
-What: /sys/fs/lustre/llite/<fsname>-<uuid>/stats_track_gid
-Date: May 2015
-Contact: "Oleg Drokin" <oleg.drokin@intel.com>
-Description:
- Limit Lustre vfs operations gathering to just a single gid.
- 0 to track everything.
-
-What: /sys/fs/lustre/llite/<fsname>-<uuid>/statahead_max
-Date: May 2015
-Contact: "Oleg Drokin" <oleg.drokin@intel.com>
-Description:
- Controls maximum number of statahead requests to send when
- sequential readdir+stat pattern is detected.
-
-What: /sys/fs/lustre/llite/<fsname>-<uuid>/statahead_agl
-Date: May 2015
-Contact: "Oleg Drokin" <oleg.drokin@intel.com>
-Description:
- Controls if AGL (async glimpse ahead - obtain object information
- from OSTs in parallel with MDS during statahead) should be
- enabled or disabled.
- 0 to disable, 1 to enable.
-
-What: /sys/fs/lustre/llite/<fsname>-<uuid>/lazystatfs
-Date: May 2015
-Contact: "Oleg Drokin" <oleg.drokin@intel.com>
-Description:
- Controls statfs(2) behaviour in the face of down servers.
- If 0, always wait for all servers to come online,
- if 1, ignote inactive servers.
-
-What: /sys/fs/lustre/llite/<fsname>-<uuid>/max_easize
-Date: May 2015
-Contact: "Oleg Drokin" <oleg.drokin@intel.com>
-Description:
- Shows maximum number of bytes file striping data could be
- in current configuration of storage.
-
-What: /sys/fs/lustre/llite/<fsname>-<uuid>/default_easize
-Date: May 2015
-Contact: "Oleg Drokin" <oleg.drokin@intel.com>
-Description:
- Shows maximum observed file striping data seen by this
- filesystem client instance.
-
-What: /sys/fs/lustre/llite/<fsname>-<uuid>/xattr_cache
-Date: May 2015
-Contact: "Oleg Drokin" <oleg.drokin@intel.com>
-Description:
- Controls extended attributes client-side cache.
- 1 to enable, 0 to disable.
-
-What: /sys/fs/lustre/ldlm/cancel_unused_locks_before_replay
-Date: May 2015
-Contact: "Oleg Drokin" <oleg.drokin@intel.com>
-Description:
- Controls if client should replay unused locks during recovery
- If a client tends to have a lot of unused locks in LRU,
- recovery times might become prolonged.
- 1 - just locally cancel unused locks (default)
- 0 - replay unused locks.
-
-What: /sys/fs/lustre/ldlm/namespaces/<name>/resource_count
-Date: May 2015
-Contact: "Oleg Drokin" <oleg.drokin@intel.com>
-Description:
- Displays number of lock resources (objects on which individual
- locks are taken) currently allocated in this namespace.
-
-What: /sys/fs/lustre/ldlm/namespaces/<name>/lock_count
-Date: May 2015
-Contact: "Oleg Drokin" <oleg.drokin@intel.com>
-Description:
- Displays number or locks allocated in this namespace.
-
-What: /sys/fs/lustre/ldlm/namespaces/<name>/lru_size
-Date: May 2015
-Contact: "Oleg Drokin" <oleg.drokin@intel.com>
-Description:
- Controls and displays LRU size limit for unused locks for this
- namespace.
- 0 - LRU size is unlimited, controlled by server resources
- positive number - number of locks to allow in lock LRU list
-
-What: /sys/fs/lustre/ldlm/namespaces/<name>/lock_unused_count
-Date: May 2015
-Contact: "Oleg Drokin" <oleg.drokin@intel.com>
-Description:
- Display number of locks currently sitting in the LRU list
- of this namespace
-
-What: /sys/fs/lustre/ldlm/namespaces/<name>/lru_max_age
-Date: May 2015
-Contact: "Oleg Drokin" <oleg.drokin@intel.com>
-Description:
- Maximum number of milliseconds a lock could sit in LRU list
- before client would voluntarily cancel it as unused.
-
-What: /sys/fs/lustre/ldlm/namespaces/<name>/early_lock_cancel
-Date: May 2015
-Contact: "Oleg Drokin" <oleg.drokin@intel.com>
-Description:
- Controls "early lock cancellation" feature on this namespace
- if supported by the server.
- When enabled, tries to preemtively cancel locks that would be
- cancelled by verious operations and bundle the cancellation
- requests in the same RPC as the main operation, which results
- in significant speedups due to reduced lock-pingpong RPCs.
- 0 - disabled
- 1 - enabled (default)
-
-What: /sys/fs/lustre/ldlm/namespaces/<name>/pool/granted
-Date: May 2015
-Contact: "Oleg Drokin" <oleg.drokin@intel.com>
-Description:
- Displays number of granted locks in this namespace
-
-What: /sys/fs/lustre/ldlm/namespaces/<name>/pool/grant_rate
-Date: May 2015
-Contact: "Oleg Drokin" <oleg.drokin@intel.com>
-Description:
- Number of granted locks in this namespace during last
- time interval
-
-What: /sys/fs/lustre/ldlm/namespaces/<name>/pool/cancel_rate
-Date: May 2015
-Contact: "Oleg Drokin" <oleg.drokin@intel.com>
-Description:
- Number of lock cancellations in this namespace during
- last time interval
-
-What: /sys/fs/lustre/ldlm/namespaces/<name>/pool/grant_speed
-Date: May 2015
-Contact: "Oleg Drokin" <oleg.drokin@intel.com>
-Description:
- Calculated speed of lock granting (grant_rate - cancel_rate)
- in this namespace
-
-What: /sys/fs/lustre/ldlm/namespaces/<name>/pool/grant_plan
-Date: May 2015
-Contact: "Oleg Drokin" <oleg.drokin@intel.com>
-Description:
- Estimated number of locks to be granted in the next time
- interval in this namespace
-
-What: /sys/fs/lustre/ldlm/namespaces/<name>/pool/limit
-Date: May 2015
-Contact: "Oleg Drokin" <oleg.drokin@intel.com>
-Description:
- Controls number of allowed locks in this pool.
- When lru_size is 0, this is the actual limit then.
-
-What: /sys/fs/lustre/ldlm/namespaces/<name>/pool/lock_volume_factor
-Date: May 2015
-Contact: "Oleg Drokin" <oleg.drokin@intel.com>
-Description:
- Multiplier for all lock volume calculations above.
- Default is 1. Increase to make the client to more agressively
- clean it's lock LRU list for this namespace.
-
-What: /sys/fs/lustre/ldlm/namespaces/<name>/pool/server_lock_volume
-Date: May 2015
-Contact: "Oleg Drokin" <oleg.drokin@intel.com>
-Description:
- Calculated server lock volume.
-
-What: /sys/fs/lustre/ldlm/namespaces/<name>/pool/recalc_period
-Date: May 2015
-Contact: "Oleg Drokin" <oleg.drokin@intel.com>
-Description:
- Controls length of time between recalculation of above
- values (in seconds).
-
-What: /sys/fs/lustre/ldlm/services/ldlm_cbd/threads_min
-Date: May 2015
-Contact: "Oleg Drokin" <oleg.drokin@intel.com>
-Description:
- Controls minimum number of ldlm callback threads to start.
-
-What: /sys/fs/lustre/ldlm/services/ldlm_cbd/threads_max
-Date: May 2015
-Contact: "Oleg Drokin" <oleg.drokin@intel.com>
-Description:
- Controls maximum number of ldlm callback threads to start.
-
-What: /sys/fs/lustre/ldlm/services/ldlm_cbd/threads_started
-Date: May 2015
-Contact: "Oleg Drokin" <oleg.drokin@intel.com>
-Description:
- Shows actual number of ldlm callback threads running.
-
-What: /sys/fs/lustre/ldlm/services/ldlm_cbd/high_priority_ratio
-Date: May 2015
-Contact: "Oleg Drokin" <oleg.drokin@intel.com>
-Description:
- Controls what percentage of ldlm callback threads is dedicated
- to "high priority" incoming requests.
-
-What: /sys/fs/lustre/{obdtype}/{connection_name}/blocksize
-Date: May 2015
-Contact: "Oleg Drokin" <oleg.drokin@intel.com>
-Description:
- Blocksize on backend filesystem for service behind this obd
- device (or biggest blocksize for compound devices like lov
- and lmv)
-
-What: /sys/fs/lustre/{obdtype}/{connection_name}/kbytestotal
-Date: May 2015
-Contact: "Oleg Drokin" <oleg.drokin@intel.com>
-Description:
- Total number of kilobytes of space on backend filesystem
- for service behind this obd (or total amount for compound
- devices like lov lmv)
-
-What: /sys/fs/lustre/{obdtype}/{connection_name}/kbytesfree
-Date: May 2015
-Contact: "Oleg Drokin" <oleg.drokin@intel.com>
-Description:
- Number of free kilobytes on backend filesystem for service
- behind this obd (or total amount for compound devices
- like lov lmv)
-
-What: /sys/fs/lustre/{obdtype}/{connection_name}/kbytesavail
-Date: May 2015
-Contact: "Oleg Drokin" <oleg.drokin@intel.com>
-Description:
- Number of kilobytes of free space on backend filesystem
- for service behind this obd (or total amount for compound
- devices like lov lmv) that is actually available for use
- (taking into account per-client and filesystem reservations).
-
-What: /sys/fs/lustre/{obdtype}/{connection_name}/filestotal
-Date: May 2015
-Contact: "Oleg Drokin" <oleg.drokin@intel.com>
-Description:
- Number of inodes on backend filesystem for service behind this
- obd.
-
-What: /sys/fs/lustre/{obdtype}/{connection_name}/filesfree
-Date: May 2015
-Contact: "Oleg Drokin" <oleg.drokin@intel.com>
-Description:
- Number of free inodes on backend filesystem for service
- behind this obd.
-
-What: /sys/fs/lustre/mdc/{connection_name}/max_pages_per_rpc
-Date: May 2015
-Contact: "Oleg Drokin" <oleg.drokin@intel.com>
-Description:
- Maximum number of readdir pages to fit into a single readdir
- RPC.
-
-What: /sys/fs/lustre/{mdc,osc}/{connection_name}/max_rpcs_in_flight
-Date: May 2015
-Contact: "Oleg Drokin" <oleg.drokin@intel.com>
-Description:
- Maximum number of parallel RPCs on the wire to allow on
- this connection. Increasing this number would help on higher
- latency links, but has a chance of overloading a server
- if you have too many clients like this.
- Default: 8
-
-What: /sys/fs/lustre/osc/{connection_name}/max_pages_per_rpc
-Date: May 2015
-Contact: "Oleg Drokin" <oleg.drokin@intel.com>
-Description:
- Maximum number of pages to fit into a single RPC.
- Typically bigger RPCs allow for better performance.
- Default: however many pages to form 1M of data (256 pages
- for 4K page sized platforms)
-
-What: /sys/fs/lustre/osc/{connection_name}/active
-Date: May 2015
-Contact: "Oleg Drokin" <oleg.drokin@intel.com>
-Description:
- Controls accessibility of this connection. If set to 0,
- fail all accesses immediately.
-
-What: /sys/fs/lustre/osc/{connection_name}/checksums
-Date: May 2015
-Contact: "Oleg Drokin" <oleg.drokin@intel.com>
-Description:
- Controls whenever to checksum bulk RPC data over the wire
- to this target.
- 1: enable (default) ; 0: disable
-
-What: /sys/fs/lustre/osc/{connection_name}/contention_seconds
-Date: May 2015
-Contact: "Oleg Drokin" <oleg.drokin@intel.com>
-Description:
- Controls for how long to consider a file contended once
- indicated as such by the server.
- When a file is considered contended, all operations switch to
- synchronous lockless mode to avoid cache and lock pingpong.
-
-What: /sys/fs/lustre/osc/{connection_name}/cur_dirty_bytes
-Date: May 2015
-Contact: "Oleg Drokin" <oleg.drokin@intel.com>
-Description:
- Displays how many dirty bytes is presently in the cache for this
- target.
-
-What: /sys/fs/lustre/osc/{connection_name}/cur_grant_bytes
-Date: May 2015
-Contact: "Oleg Drokin" <oleg.drokin@intel.com>
-Description:
- Shows how many bytes we have as a "dirty cache" grant from the
- server. Writing a value smaller than shown allows to release
- some grant back to the server.
- Dirty cache grant is a way Lustre ensures that cached successful
- writes on client do not end up discarded by the server due to
- lack of space later on.
-
-What: /sys/fs/lustre/osc/{connection_name}/cur_lost_grant_bytes
-Date: May 2015
-Contact: "Oleg Drokin" <oleg.drokin@intel.com>
-Description:
- Shows how many granted bytes were released to the server due
- to lack of write activity on this client.
-
-What: /sys/fs/lustre/osc/{connection_name}/grant_shrink_interval
-Date: May 2015
-Contact: "Oleg Drokin" <oleg.drokin@intel.com>
-Description:
- Number of seconds with no write activity for this target
- to start releasing dirty grant back to the server.
-
-What: /sys/fs/lustre/osc/{connection_name}/destroys_in_flight
-Date: May 2015
-Contact: "Oleg Drokin" <oleg.drokin@intel.com>
-Description:
- Number of DESTROY RPCs currently in flight to this target.
-
-What: /sys/fs/lustre/osc/{connection_name}/lockless_truncate
-Date: May 2015
-Contact: "Oleg Drokin" <oleg.drokin@intel.com>
-Description:
- Controls whether lockless truncate RPCs are allowed to this
- target.
- Lockless truncate causes server to perform the locking which
- is beneficial if the truncate is not followed by a write
- immediately.
- 1: enable ; 0: disable (default)
-
-What: /sys/fs/lustre/osc/{connection_name}/max_dirty_mb
-Date: May 2015
-Contact: "Oleg Drokin" <oleg.drokin@intel.com>
-Description:
- Controls how much dirty data this client can accumulate
- for this target. This is orthogonal to dirty grant and is
- a hard limit even if the server would allow a bigger dirty
- cache.
- While allowing higher dirty cache is beneficial for write
- performance, flushing write cache takes longer and as such
- the node might be more prone to OOMs.
- Having this value set too low might result in not being able
- to sent too many parallel WRITE RPCs.
- Default: 32
-
-What: /sys/fs/lustre/osc/{connection_name}/resend_count
-Date: May 2015
-Contact: "Oleg Drokin" <oleg.drokin@intel.com>
-Description:
- Controls how many times to try and resend RPCs to this target
- that failed with "recoverable" status, such as EAGAIN,
- ENOMEM.
-
-What: /sys/fs/lustre/lov/{connection_name}/numobd
-Date: May 2015
-Contact: "Oleg Drokin" <oleg.drokin@intel.com>
-Description:
- Number of OSC targets managed by this LOV instance.
-
-What: /sys/fs/lustre/lov/{connection_name}/activeobd
-Date: May 2015
-Contact: "Oleg Drokin" <oleg.drokin@intel.com>
-Description:
- Number of OSC targets managed by this LOV instance that are
- actually active.
-
-What: /sys/fs/lustre/lmv/{connection_name}/numobd
-Date: May 2015
-Contact: "Oleg Drokin" <oleg.drokin@intel.com>
-Description:
- Number of MDC targets managed by this LMV instance.
-
-What: /sys/fs/lustre/lmv/{connection_name}/activeobd
-Date: May 2015
-Contact: "Oleg Drokin" <oleg.drokin@intel.com>
-Description:
- Number of MDC targets managed by this LMV instance that are
- actually active.
-
-What: /sys/fs/lustre/lmv/{connection_name}/placement
-Date: May 2015
-Contact: "Oleg Drokin" <oleg.drokin@intel.com>
-Description:
- Determines policy of inode placement in case of multiple
- metadata servers:
- CHAR - based on a hash of the file name used at creation time
- (Default)
- NID - based on a hash of creating client network id.
diff --git a/drivers/staging/media/Kconfig b/drivers/staging/media/Kconfig
index 14697686eea5..ab250c89cd4d 100644
--- a/drivers/staging/media/Kconfig
+++ b/drivers/staging/media/Kconfig
@@ -1,37 +1,59 @@
+# SPDX-License-Identifier: GPL-2.0
menuconfig STAGING_MEDIA
- bool "Media staging drivers"
- default n
- ---help---
- This option allows you to select a number of media drivers that
+ bool "Media staging drivers"
+ default n
+ help
+ This option allows you to select a number of media drivers that
don't have the "normal" Linux kernel quality level.
Most of them don't follow properly the V4L, DVB and/or RC API's,
so, they won't likely work fine with the existing applications.
That also means that, once fixed, their API's will change to match
the existing ones.
- If you wish to work on these drivers, to help improve them, or
- to report problems you have with them, please use the
+ If you wish to work on these drivers, to help improve them, or
+ to report problems you have with them, please use the
linux-media@vger.kernel.org mailing list.
- If in doubt, say N here.
+ If in doubt, say N here.
-if STAGING_MEDIA
+if STAGING_MEDIA && MEDIA_SUPPORT
# Please keep them in alphabetic order
-source "drivers/staging/media/bcm2048/Kconfig"
+source "drivers/staging/media/atomisp/Kconfig"
-source "drivers/staging/media/cxd2099/Kconfig"
+source "drivers/staging/media/av7110/Kconfig"
-source "drivers/staging/media/davinci_vpfe/Kconfig"
+source "drivers/staging/media/imx/Kconfig"
-source "drivers/staging/media/mn88472/Kconfig"
+source "drivers/staging/media/ipu3/Kconfig"
-source "drivers/staging/media/mn88473/Kconfig"
+source "drivers/staging/media/ipu7/Kconfig"
-source "drivers/staging/media/omap4iss/Kconfig"
+source "drivers/staging/media/max96712/Kconfig"
-# Keep LIRC at the end, as it has sub-menus
-source "drivers/staging/media/lirc/Kconfig"
+source "drivers/staging/media/meson/vdec/Kconfig"
+
+source "drivers/staging/media/starfive/Kconfig"
+
+source "drivers/staging/media/sunxi/Kconfig"
+
+source "drivers/staging/media/tegra-video/Kconfig"
+
+menuconfig STAGING_MEDIA_DEPRECATED
+ bool "Media staging drivers (DEPRECATED)"
+ default n
+ help
+ This option enables deprecated media drivers that are
+ scheduled for future removal from the kernel.
+
+ If you wish to work on these drivers to prevent their removal,
+ then contact the linux-media@vger.kernel.org mailing list.
+
+ If in doubt, say N here.
+
+if STAGING_MEDIA_DEPRECATED
+source "drivers/staging/media/deprecated/atmel/Kconfig"
+endif
endif
diff --git a/drivers/staging/media/Makefile b/drivers/staging/media/Makefile
index 34c557b4c6d6..4a073938b2b2 100644
--- a/drivers/staging/media/Makefile
+++ b/drivers/staging/media/Makefile
@@ -1,7 +1,12 @@
-obj-$(CONFIG_I2C_BCM2048) += bcm2048/
-obj-$(CONFIG_DVB_CXD2099) += cxd2099/
-obj-$(CONFIG_LIRC_STAGING) += lirc/
-obj-$(CONFIG_VIDEO_DM365_VPFE) += davinci_vpfe/
-obj-$(CONFIG_VIDEO_OMAP4) += omap4iss/
-obj-$(CONFIG_DVB_MN88472) += mn88472/
-obj-$(CONFIG_DVB_MN88473) += mn88473/
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_VIDEO_ATMEL_ISC_BASE) += deprecated/atmel/
+obj-$(CONFIG_INTEL_ATOMISP) += atomisp/
+obj-$(CONFIG_VIDEO_IMX_MEDIA) += imx/
+obj-$(CONFIG_VIDEO_MAX96712) += max96712/
+obj-$(CONFIG_VIDEO_MESON_VDEC) += meson/vdec/
+obj-$(CONFIG_VIDEO_STARFIVE_CAMSS) += starfive/
+obj-$(CONFIG_VIDEO_SUNXI) += sunxi/
+obj-$(CONFIG_VIDEO_TEGRA) += tegra-video/
+obj-$(CONFIG_VIDEO_IPU3_IMGU) += ipu3/
+obj-$(CONFIG_VIDEO_INTEL_IPU7) += ipu7/
+obj-$(CONFIG_DVB_AV7110) += av7110/
diff --git a/drivers/staging/media/atomisp/Kconfig b/drivers/staging/media/atomisp/Kconfig
new file mode 100644
index 000000000000..fbe507ae6ec6
--- /dev/null
+++ b/drivers/staging/media/atomisp/Kconfig
@@ -0,0 +1,32 @@
+# SPDX-License-Identifier: GPL-2.0
+menuconfig INTEL_ATOMISP
+ bool "Enable support to Intel Atom ISP camera drivers"
+ depends on X86 && EFI && PCI && ACPI
+ depends on COMMON_CLK
+ select IOSF_MBI
+ select MEDIA_CONTROLLER
+ help
+ Enable support for the Intel ISP2 camera interfaces and MIPI
+ sensor drivers.
+
+config VIDEO_ATOMISP
+ tristate "Intel Atom Image Signal Processor Driver"
+ depends on VIDEO_DEV && INTEL_ATOMISP
+ depends on INTEL_SKL_INT3472
+ depends on IPU_BRIDGE
+ depends on MEDIA_PCI_SUPPORT
+ depends on PMIC_OPREGION
+ depends on I2C
+ select V4L2_FWNODE
+ select IOSF_MBI
+ select VIDEOBUF2_VMALLOC
+ select VIDEO_V4L2_SUBDEV_API
+ help
+ Say Y here if your platform supports Intel Atom SoC
+ camera imaging subsystem.
+ To compile this driver as a module, choose M here: the
+ module will be called atomisp
+
+if VIDEO_ATOMISP
+source "drivers/staging/media/atomisp/i2c/Kconfig"
+endif
diff --git a/drivers/staging/media/atomisp/Makefile b/drivers/staging/media/atomisp/Makefile
new file mode 100644
index 000000000000..1d0fbe22036b
--- /dev/null
+++ b/drivers/staging/media/atomisp/Makefile
@@ -0,0 +1,302 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for camera drivers.
+#
+obj-$(CONFIG_INTEL_ATOMISP) += i2c/
+obj-$(CONFIG_VIDEO_ATOMISP) += atomisp.o
+obj-$(CONFIG_VIDEO_ATOMISP) += pci/atomisp_gmin_platform.o
+
+# While on staging, keep debug enabled
+DEFINES += -DDEBUG
+
+atomisp = $(srctree)/drivers/staging/media/atomisp/
+
+# SPDX-License-Identifier: GPL-2.0
+atomisp-objs += \
+ pci/atomisp_cmd.o \
+ pci/atomisp_compat_css20.o \
+ pci/atomisp_csi2.o \
+ pci/atomisp_csi2_bridge.o \
+ pci/atomisp_fops.o \
+ pci/atomisp_ioctl.o \
+ pci/atomisp_subdev.o \
+ pci/atomisp_v4l2.o \
+ pci/sh_css_firmware.o \
+ pci/sh_css_host_data.o \
+ pci/sh_css_hrt.o \
+ pci/sh_css_metrics.o \
+ pci/sh_css_mipi.o \
+ pci/sh_css_mmu.o \
+ pci/sh_css.o \
+ pci/sh_css_param_dvs.o \
+ pci/sh_css_param_shading.o \
+ pci/sh_css_params.o \
+ pci/sh_css_properties.o \
+ pci/sh_css_sp.o \
+ pci/sh_css_stream_format.o \
+ pci/sh_css_version.o \
+ pci/base/circbuf/src/circbuf.o \
+ pci/base/refcount/src/refcount.o \
+ pci/camera/pipe/src/pipe_binarydesc.o \
+ pci/camera/pipe/src/pipe_stagedesc.o \
+ pci/camera/pipe/src/pipe_util.o \
+ pci/camera/util/src/util.o \
+ pci/hmm/hmm_bo.o \
+ pci/hmm/hmm.o \
+ pci/ia_css_device_access.o \
+ pci/ia_css_isp_configs.o \
+ pci/ia_css_isp_states.o \
+ pci/ia_css_isp_params.o \
+ pci/isp/kernels/aa/aa_2/ia_css_aa2.host.o \
+ pci/isp/kernels/anr/anr_1.0/ia_css_anr.host.o \
+ pci/isp/kernels/anr/anr_2/ia_css_anr2.host.o \
+ pci/isp/kernels/anr/anr_2/ia_css_anr2_table.host.o \
+ pci/isp/kernels/bh/bh_2/ia_css_bh.host.o \
+ pci/isp/kernels/bnlm/ia_css_bnlm.host.o \
+ pci/isp/kernels/bnr/bnr_1.0/ia_css_bnr.host.o \
+ pci/isp/kernels/bnr/bnr2_2/ia_css_bnr2_2.host.o \
+ pci/isp/kernels/cnr/cnr_1.0/ia_css_cnr.host.o \
+ pci/isp/kernels/cnr/cnr_2/ia_css_cnr2.host.o \
+ pci/isp/kernels/conversion/conversion_1.0/ia_css_conversion.host.o \
+ pci/isp/kernels/copy_output/copy_output_1.0/ia_css_copy_output.host.o \
+ pci/isp/kernels/crop/crop_1.0/ia_css_crop.host.o \
+ pci/isp/kernels/csc/csc_1.0/ia_css_csc.host.o \
+ pci/isp/kernels/ctc/ctc_1.0/ia_css_ctc.host.o \
+ pci/isp/kernels/ctc/ctc_1.0/ia_css_ctc_table.host.o \
+ pci/isp/kernels/ctc/ctc1_5/ia_css_ctc1_5.host.o \
+ pci/isp/kernels/ctc/ctc2/ia_css_ctc2.host.o \
+ pci/isp/kernels/de/de_1.0/ia_css_de.host.o \
+ pci/isp/kernels/de/de_2/ia_css_de2.host.o \
+ pci/isp/kernels/dpc2/ia_css_dpc2.host.o \
+ pci/isp/kernels/dp/dp_1.0/ia_css_dp.host.o \
+ pci/isp/kernels/dvs/dvs_1.0/ia_css_dvs.host.o \
+ pci/isp/kernels/eed1_8/ia_css_eed1_8.host.o \
+ pci/isp/kernels/fc/fc_1.0/ia_css_formats.host.o \
+ pci/isp/kernels/fpn/fpn_1.0/ia_css_fpn.host.o \
+ pci/isp/kernels/gc/gc_1.0/ia_css_gc.host.o \
+ pci/isp/kernels/gc/gc_1.0/ia_css_gc_table.host.o \
+ pci/isp/kernels/gc/gc_2/ia_css_gc2.host.o \
+ pci/isp/kernels/gc/gc_2/ia_css_gc2_table.host.o \
+ pci/isp/kernels/hdr/ia_css_hdr.host.o \
+ pci/isp/kernels/ipu2_io_ls/bayer_io_ls/ia_css_bayer_io.host.o \
+ pci/isp/kernels/ipu2_io_ls/bayer_io_ls/ia_css_bayer_io.host.o \
+ pci/isp/kernels/ipu2_io_ls/yuv444_io_ls/ia_css_yuv444_io.host.o \
+ pci/isp/kernels/iterator/iterator_1.0/ia_css_iterator.host.o \
+ pci/isp/kernels/macc/macc_1.0/ia_css_macc.host.o \
+ pci/isp/kernels/macc/macc_1.0/ia_css_macc_table.host.o \
+ pci/isp/kernels/macc/macc1_5/ia_css_macc1_5.host.o \
+ pci/isp/kernels/macc/macc1_5/ia_css_macc1_5_table.host.o \
+ pci/isp/kernels/norm/norm_1.0/ia_css_norm.host.o \
+ pci/isp/kernels/ob/ob_1.0/ia_css_ob.host.o \
+ pci/isp/kernels/ob/ob2/ia_css_ob2.host.o \
+ pci/isp/kernels/output/output_1.0/ia_css_output.host.o \
+ pci/isp/kernels/qplane/qplane_2/ia_css_qplane.host.o \
+ pci/isp/kernels/raw_aa_binning/raw_aa_binning_1.0/ia_css_raa.host.o \
+ pci/isp/kernels/raw/raw_1.0/ia_css_raw.host.o \
+ pci/isp/kernels/ref/ref_1.0/ia_css_ref.host.o \
+ pci/isp/kernels/s3a/s3a_1.0/ia_css_s3a.host.o \
+ pci/isp/kernels/sc/sc_1.0/ia_css_sc.host.o \
+ pci/isp/kernels/sdis/sdis_1.0/ia_css_sdis.host.o \
+ pci/isp/kernels/sdis/sdis_2/ia_css_sdis2.host.o \
+ pci/isp/kernels/tdf/tdf_1.0/ia_css_tdf.host.o \
+ pci/isp/kernels/tnr/tnr_1.0/ia_css_tnr.host.o \
+ pci/isp/kernels/vf/vf_1.0/ia_css_vf.host.o \
+ pci/isp/kernels/wb/wb_1.0/ia_css_wb.host.o \
+ pci/isp/kernels/xnr/xnr_1.0/ia_css_xnr.host.o \
+ pci/isp/kernels/xnr/xnr_1.0/ia_css_xnr_table.host.o \
+ pci/isp/kernels/xnr/xnr_3.0/ia_css_xnr3.host.o \
+ pci/isp/kernels/ynr/ynr_1.0/ia_css_ynr.host.o \
+ pci/isp/kernels/ynr/ynr_2/ia_css_ynr2.host.o \
+ pci/mmu/isp_mmu.o \
+ pci/mmu/sh_mmu_mrfld.o \
+ pci/runtime/binary/src/binary.o \
+ pci/runtime/bufq/src/bufq.o \
+ pci/runtime/debug/src/ia_css_debug.o \
+ pci/runtime/eventq/src/eventq.o \
+ pci/runtime/event/src/event.o \
+ pci/runtime/frame/src/frame.o \
+ pci/runtime/ifmtr/src/ifmtr.o \
+ pci/runtime/inputfifo/src/inputfifo.o \
+ pci/runtime/isp_param/src/isp_param.o \
+ pci/runtime/isys/src/csi_rx_rmgr.o \
+ pci/runtime/isys/src/isys_dma_rmgr.o \
+ pci/runtime/isys/src/isys_init.o \
+ pci/runtime/isys/src/isys_stream2mmio_rmgr.o \
+ pci/runtime/isys/src/rx.o \
+ pci/runtime/isys/src/virtual_isys.o \
+ pci/runtime/pipeline/src/pipeline.o \
+ pci/runtime/queue/src/queue_access.o \
+ pci/runtime/queue/src/queue.o \
+ pci/runtime/rmgr/src/rmgr.o \
+ pci/runtime/rmgr/src/rmgr_vbuf.o \
+ pci/runtime/spctrl/src/spctrl.o \
+ pci/runtime/timer/src/timer.o \
+ pci/hive_isp_css_common/host/debug.o \
+ pci/hive_isp_css_common/host/dma.o \
+ pci/hive_isp_css_common/host/event_fifo.o \
+ pci/hive_isp_css_common/host/fifo_monitor.o \
+ pci/hive_isp_css_common/host/gdc.o \
+ pci/hive_isp_css_common/host/gp_device.o \
+ pci/hive_isp_css_common/host/gp_timer.o \
+ pci/hive_isp_css_common/host/hmem.o \
+ pci/hive_isp_css_common/host/input_formatter.o \
+ pci/hive_isp_css_common/host/input_system.o \
+ pci/hive_isp_css_common/host/irq.o \
+ pci/hive_isp_css_common/host/isp.o \
+ pci/hive_isp_css_common/host/mmu.o \
+ pci/hive_isp_css_common/host/sp.o \
+ pci/hive_isp_css_common/host/timed_ctrl.o \
+ pci/hive_isp_css_common/host/vmem.o \
+ pci/hive_isp_css_shared/host/tag.o \
+ pci/system_local.o \
+ pci/runtime/isys/src/ibuf_ctrl_rmgr.o \
+ pci/css_2401_system/host/csi_rx.o \
+ pci/css_2401_system/host/ibuf_ctrl.o \
+ pci/css_2401_system/host/isys_dma.o \
+ pci/css_2401_system/host/isys_irq.o \
+ pci/css_2401_system/host/isys_stream2mmio.o
+
+INCLUDES += \
+ -I$(atomisp)/ \
+ -I$(atomisp)/include/ \
+ -I$(atomisp)/include/hmm/ \
+ -I$(atomisp)/include/mmu/ \
+ -I$(atomisp)/pci/ \
+ -I$(atomisp)/pci/base/circbuf/interface/ \
+ -I$(atomisp)/pci/base/refcount/interface/ \
+ -I$(atomisp)/pci/camera/pipe/interface/ \
+ -I$(atomisp)/pci/camera/util/interface/ \
+ -I$(atomisp)/pci/hive_isp_css_common/ \
+ -I$(atomisp)/pci/hive_isp_css_common/host/ \
+ -I$(atomisp)/pci/hive_isp_css_include/ \
+ -I$(atomisp)/pci/hive_isp_css_include/device_access/ \
+ -I$(atomisp)/pci/hive_isp_css_include/host/ \
+ -I$(atomisp)/pci/hive_isp_css_shared/ \
+ -I$(atomisp)/pci/hive_isp_css_shared/host/ \
+ -I$(atomisp)/pci/isp/kernels/ \
+ -I$(atomisp)/pci/isp/kernels/aa/aa_2/ \
+ -I$(atomisp)/pci/isp/kernels/anr/anr_1.0/ \
+ -I$(atomisp)/pci/isp/kernels/anr/anr_2/ \
+ -I$(atomisp)/pci/isp/kernels/bh/bh_2/ \
+ -I$(atomisp)/pci/isp/kernels/bnlm/ \
+ -I$(atomisp)/pci/isp/kernels/bnr/ \
+ -I$(atomisp)/pci/isp/kernels/bnr/bnr_1.0/ \
+ -I$(atomisp)/pci/isp/kernels/bnr/bnr2_2/ \
+ -I$(atomisp)/pci/isp/kernels/cnr/ \
+ -I$(atomisp)/pci/isp/kernels/cnr/cnr_1.0/ \
+ -I$(atomisp)/pci/isp/kernels/cnr/cnr_2/ \
+ -I$(atomisp)/pci/isp/kernels/conversion/ \
+ -I$(atomisp)/pci/isp/kernels/conversion/conversion_1.0/ \
+ -I$(atomisp)/pci/isp/kernels/copy_output/ \
+ -I$(atomisp)/pci/isp/kernels/copy_output/copy_output_1.0/ \
+ -I$(atomisp)/pci/isp/kernels/crop/ \
+ -I$(atomisp)/pci/isp/kernels/crop/crop_1.0/ \
+ -I$(atomisp)/pci/isp/kernels/csc/ \
+ -I$(atomisp)/pci/isp/kernels/csc/csc_1.0/ \
+ -I$(atomisp)/pci/isp/kernels/ctc/ \
+ -I$(atomisp)/pci/isp/kernels/ctc/ctc_1.0/ \
+ -I$(atomisp)/pci/isp/kernels/ctc/ctc1_5/ \
+ -I$(atomisp)/pci/isp/kernels/ctc/ctc2/ \
+ -I$(atomisp)/pci/isp/kernels/de/ \
+ -I$(atomisp)/pci/isp/kernels/de/de_1.0/ \
+ -I$(atomisp)/pci/isp/kernels/de/de_2/ \
+ -I$(atomisp)/pci/isp/kernels/dp/ \
+ -I$(atomisp)/pci/isp/kernels/dpc2/ \
+ -I$(atomisp)/pci/isp/kernels/dp/dp_1.0/ \
+ -I$(atomisp)/pci/isp/kernels/dvs/ \
+ -I$(atomisp)/pci/isp/kernels/dvs/dvs_1.0/ \
+ -I$(atomisp)/pci/isp/kernels/eed1_8/ \
+ -I$(atomisp)/pci/isp/kernels/fc/ \
+ -I$(atomisp)/pci/isp/kernels/fc/fc_1.0/ \
+ -I$(atomisp)/pci/isp/kernels/fixedbds/ \
+ -I$(atomisp)/pci/isp/kernels/fixedbds/fixedbds_1.0/ \
+ -I$(atomisp)/pci/isp/kernels/fpn/ \
+ -I$(atomisp)/pci/isp/kernels/fpn/fpn_1.0/ \
+ -I$(atomisp)/pci/isp/kernels/gc/ \
+ -I$(atomisp)/pci/isp/kernels/gc/gc_1.0/ \
+ -I$(atomisp)/pci/isp/kernels/gc/gc_2/ \
+ -I$(atomisp)/pci/isp/kernels/hdr/ \
+ -I$(atomisp)/pci/isp/kernels/ipu2_io_ls/ \
+ -I$(atomisp)/pci/isp/kernels/ipu2_io_ls/ \
+ -I$(atomisp)/pci/isp/kernels/ipu2_io_ls/bayer_io_ls/ \
+ -I$(atomisp)/pci/isp/kernels/ipu2_io_ls/bayer_io_ls/ \
+ -I$(atomisp)/pci/isp/kernels/ipu2_io_ls/common/ \
+ -I$(atomisp)/pci/isp/kernels/ipu2_io_ls/common/ \
+ -I$(atomisp)/pci/isp/kernels/ipu2_io_ls/yuv444_io_ls/ \
+ -I$(atomisp)/pci/isp/kernels/ipu2_io_ls/yuv444_io_ls/ \
+ -I$(atomisp)/pci/isp/kernels/iterator/ \
+ -I$(atomisp)/pci/isp/kernels/iterator/iterator_1.0/ \
+ -I$(atomisp)/pci/isp/kernels/macc/ \
+ -I$(atomisp)/pci/isp/kernels/macc/macc_1.0/ \
+ -I$(atomisp)/pci/isp/kernels/macc/macc1_5/ \
+ -I$(atomisp)/pci/isp/kernels/norm/ \
+ -I$(atomisp)/pci/isp/kernels/norm/norm_1.0/ \
+ -I$(atomisp)/pci/isp/kernels/ob/ \
+ -I$(atomisp)/pci/isp/kernels/ob/ob_1.0/ \
+ -I$(atomisp)/pci/isp/kernels/ob/ob2/ \
+ -I$(atomisp)/pci/isp/kernels/output/ \
+ -I$(atomisp)/pci/isp/kernels/output/output_1.0/ \
+ -I$(atomisp)/pci/isp/kernels/qplane/ \
+ -I$(atomisp)/pci/isp/kernels/qplane/qplane_2/ \
+ -I$(atomisp)/pci/isp/kernels/raw/ \
+ -I$(atomisp)/pci/isp/kernels/raw_aa_binning/ \
+ -I$(atomisp)/pci/isp/kernels/raw_aa_binning/raw_aa_binning_1.0/ \
+ -I$(atomisp)/pci/isp/kernels/raw/raw_1.0/ \
+ -I$(atomisp)/pci/isp/kernels/ref/ \
+ -I$(atomisp)/pci/isp/kernels/ref/ref_1.0/ \
+ -I$(atomisp)/pci/isp/kernels/s3a/ \
+ -I$(atomisp)/pci/isp/kernels/s3a/s3a_1.0/ \
+ -I$(atomisp)/pci/isp/kernels/sc/ \
+ -I$(atomisp)/pci/isp/kernels/sc/sc_1.0/ \
+ -I$(atomisp)/pci/isp/kernels/sdis/ \
+ -I$(atomisp)/pci/isp/kernels/sdis/common/ \
+ -I$(atomisp)/pci/isp/kernels/sdis/sdis_1.0/ \
+ -I$(atomisp)/pci/isp/kernels/sdis/sdis_2/ \
+ -I$(atomisp)/pci/isp/kernels/tdf/ \
+ -I$(atomisp)/pci/isp/kernels/tdf/tdf_1.0/ \
+ -I$(atomisp)/pci/isp/kernels/tnr/ \
+ -I$(atomisp)/pci/isp/kernels/tnr/tnr_1.0/ \
+ -I$(atomisp)/pci/isp/kernels/tnr/tnr3/ \
+ -I$(atomisp)/pci/isp/kernels/uds/ \
+ -I$(atomisp)/pci/isp/kernels/uds/uds_1.0/ \
+ -I$(atomisp)/pci/isp/kernels/vf/ \
+ -I$(atomisp)/pci/isp/kernels/vf/vf_1.0/ \
+ -I$(atomisp)/pci/isp/kernels/wb/ \
+ -I$(atomisp)/pci/isp/kernels/wb/wb_1.0/ \
+ -I$(atomisp)/pci/isp/kernels/xnr/ \
+ -I$(atomisp)/pci/isp/kernels/xnr/xnr_1.0/ \
+ -I$(atomisp)/pci/isp/kernels/xnr/xnr_3.0/ \
+ -I$(atomisp)/pci/isp/kernels/ynr/ \
+ -I$(atomisp)/pci/isp/kernels/ynr/ynr_1.0/ \
+ -I$(atomisp)/pci/isp/kernels/ynr/ynr_2/ \
+ -I$(atomisp)/pci/isp/modes/interface/ \
+ -I$(atomisp)/pci/runtime/binary/interface/ \
+ -I$(atomisp)/pci/runtime/bufq/interface/ \
+ -I$(atomisp)/pci/runtime/debug/interface/ \
+ -I$(atomisp)/pci/runtime/event/interface/ \
+ -I$(atomisp)/pci/runtime/eventq/interface/ \
+ -I$(atomisp)/pci/runtime/frame/interface/ \
+ -I$(atomisp)/pci/runtime/ifmtr/interface/ \
+ -I$(atomisp)/pci/runtime/inputfifo/interface/ \
+ -I$(atomisp)/pci/runtime/isp_param/interface/ \
+ -I$(atomisp)/pci/runtime/isys/interface/ \
+ -I$(atomisp)/pci/runtime/isys/src/ \
+ -I$(atomisp)/pci/runtime/pipeline/interface/ \
+ -I$(atomisp)/pci/runtime/queue/interface/ \
+ -I$(atomisp)/pci/runtime/queue/src/ \
+ -I$(atomisp)/pci/runtime/rmgr/interface/ \
+ -I$(atomisp)/pci/runtime/spctrl/interface/ \
+ -I$(atomisp)/pci/runtime/tagger/interface/ \
+ -I$(atomisp)/pci/css_2401_system/ \
+ -I$(atomisp)/pci/css_2401_system/host/ \
+ -I$(atomisp)/pci/css_2401_system/hrt/
+
+DEFINES := -DHRT_HW -DHRT_ISP_CSS_CUSTOM_HOST -DHRT_USE_VIR_ADDRS -D__HOST__
+#DEFINES += -DUSE_DYNAMIC_BIN
+#DEFINES += -DISP_POWER_GATING
+#DEFINES += -DUSE_INTERRUPTS
+#DEFINES += -DUSE_SSSE3
+#DEFINES += -DPUNIT_CAMERA_BUSY
+#DEFINES += -DUSE_KMEM_CACHE
+
+ccflags-y += $(INCLUDES) $(DEFINES) -fno-common
diff --git a/drivers/staging/media/atomisp/TODO b/drivers/staging/media/atomisp/TODO
new file mode 100644
index 000000000000..82be275b4a0a
--- /dev/null
+++ b/drivers/staging/media/atomisp/TODO
@@ -0,0 +1,71 @@
+TODO
+====
+
+1. Items which MUST be fixed before the driver can be moved out of staging:
+
+* Remove/disable private IOCTLs
+
+* Remove/disable custom v4l2-ctrls
+
+* Remove unnecessary/unwanted module parameters
+
+* Remove abuse of priv field in various v4l2 userspace API structs
+
+* Without a 3A library the capture behaviour is not very good. To take a good
+ picture, the exposure/gain needs to be tuned using v4l2-ctl on the sensor
+ subdev. To fix this, support for the atomisp needs to be added to libcamera.
+
+ This MUST be done before moving the driver out of staging so that we can
+ still make changes to e.g. the mediactl topology if necessary for
+ libcamera integration. Since this would be a userspace API break, this
+ means that at least proof-of-concept libcamera integration needs to be
+ ready before moving the driver out of staging.
+
+
+2. Items which SHOULD also be fixed eventually:
+
+* The driver is intended to drive the PCI exposed versions of the device.
+ It will not detect those devices enumerated via ACPI as a field of the
+ i915 GPU driver (only a problem on BYT).
+
+ There are some patches adding i915 GPU support floating at the Yocto's
+ Aero repository (so far, untested upstream).
+
+* Ensure that the driver will pass v4l2-compliance tests
+
+* Fix not all v4l2 apps working, e.g. cheese does not work
+
+* The atomisp code still has a lot of cruft which needs cleaning up
+
+
+Testing
+=======
+
+Since libcamera support is not available yet, the easiest way to test for
+now is using v4l2-ctl to select the input and gstreamer for streaming.
+
+To select the input run:
+
+v4l2-ctl -i <input>
+
+Where <input> is 0 (front cam) or 1 (back cam).
+
+The simplest gstreamer pipeline for testing running the sensor
+at its max resolution is:
+
+gst-launch-1.0 v4l2src ! videoconvert ! xvimagesink sync=false
+
+To select e.g 640x480 as resolution use:
+
+gst-launch-1.0 v4l2src ! video/x-raw,format=YV12,width=640,height=480 ! \
+ videoconvert ! xvimagesink sync=false
+
+And to show fps use:
+
+gst-launch-1.0 v4l2src ! video/x-raw,format=YV12,width=640,height=480 ! \
+ videoconvert ! fpsdisplaysink video-sink=xvimagesink sync=false
+
+Often the image will be over / under exposed. This can be fixed by using
+v4l2-ctl on the sensor subdev to tweak the exposure ctrl; or by using a GUI
+app for v4l2-controls which also supports subdev such as the Fedora patched
+gtk-v4l tool.
diff --git a/drivers/staging/media/atomisp/i2c/Kconfig b/drivers/staging/media/atomisp/i2c/Kconfig
new file mode 100644
index 000000000000..7d447ba7a7e1
--- /dev/null
+++ b/drivers/staging/media/atomisp/i2c/Kconfig
@@ -0,0 +1,28 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Kconfig for sensor drivers
+#
+
+config VIDEO_ATOMISP_OV2722
+ tristate "OVT ov2722 sensor support"
+ depends on ACPI
+ depends on I2C && VIDEO_DEV
+ help
+ This is a Video4Linux2 sensor-level driver for the OVT
+ OV2722 raw camera.
+
+ OVT is a 2M raw sensor.
+
+ It currently only works with the atomisp driver.
+
+config VIDEO_ATOMISP_GC2235
+ tristate "Galaxy gc2235 sensor support"
+ depends on ACPI
+ depends on I2C && VIDEO_DEV
+ help
+ This is a Video4Linux2 sensor-level driver for the OVT
+ GC2235 raw camera.
+
+ GC2235 is a 2M raw sensor.
+
+ It currently only works with the atomisp driver.
diff --git a/drivers/staging/media/atomisp/i2c/Makefile b/drivers/staging/media/atomisp/i2c/Makefile
new file mode 100644
index 000000000000..161aa542ef34
--- /dev/null
+++ b/drivers/staging/media/atomisp/i2c/Makefile
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for sensor drivers
+#
+
+obj-$(CONFIG_VIDEO_ATOMISP_GC2235) += atomisp-gc2235.o
+obj-$(CONFIG_VIDEO_ATOMISP_OV2722) += atomisp-ov2722.o
diff --git a/drivers/staging/media/atomisp/i2c/atomisp-gc2235.c b/drivers/staging/media/atomisp/i2c/atomisp-gc2235.c
new file mode 100644
index 000000000000..6050637a0def
--- /dev/null
+++ b/drivers/staging/media/atomisp/i2c/atomisp-gc2235.c
@@ -0,0 +1,870 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Support for GalaxyCore GC2235 2M camera sensor.
+ *
+ * Copyright (c) 2014 Intel Corporation. All Rights Reserved.
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/kmod.h>
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/i2c.h>
+#include <linux/moduleparam.h>
+#include <media/v4l2-device.h>
+#include "../include/linux/atomisp_gmin_platform.h"
+#include <linux/acpi.h>
+#include <linux/io.h>
+
+#include "gc2235.h"
+
+/* i2c read/write stuff */
+static int gc2235_read_reg(struct i2c_client *client,
+ u16 data_length, u16 reg, u16 *val)
+{
+ int err;
+ struct i2c_msg msg[2];
+ unsigned char data[6];
+
+ if (!client->adapter) {
+ dev_err(&client->dev, "%s error, no client->adapter\n",
+ __func__);
+ return -ENODEV;
+ }
+
+ if (data_length != GC2235_8BIT) {
+ dev_err(&client->dev, "%s error, invalid data length\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ memset(msg, 0, sizeof(msg));
+
+ msg[0].addr = client->addr;
+ msg[0].flags = 0;
+ msg[0].len = 1;
+ msg[0].buf = data;
+
+ /* high byte goes out first */
+ data[0] = (u8)(reg & 0xff);
+
+ msg[1].addr = client->addr;
+ msg[1].len = data_length;
+ msg[1].flags = I2C_M_RD;
+ msg[1].buf = data;
+
+ err = i2c_transfer(client->adapter, msg, 2);
+ if (err != 2) {
+ if (err >= 0)
+ err = -EIO;
+ dev_err(&client->dev,
+ "read from offset 0x%x error %d", reg, err);
+ return err;
+ }
+
+ *val = 0;
+ /* high byte comes first */
+ if (data_length == GC2235_8BIT)
+ *val = (u8)data[0];
+
+ return 0;
+}
+
+static int gc2235_i2c_write(struct i2c_client *client, u16 len, u8 *data)
+{
+ struct i2c_msg msg;
+ const int num_msg = 1;
+ int ret;
+
+ msg.addr = client->addr;
+ msg.flags = 0;
+ msg.len = len;
+ msg.buf = data;
+ ret = i2c_transfer(client->adapter, &msg, 1);
+
+ return ret == num_msg ? 0 : -EIO;
+}
+
+static int gc2235_write_reg(struct i2c_client *client, u16 data_length,
+ u8 reg, u8 val)
+{
+ int ret;
+ unsigned char data[4] = {0};
+ const u16 len = data_length + sizeof(u8); /* 16-bit address + data */
+
+ if (data_length != GC2235_8BIT) {
+ dev_err(&client->dev,
+ "%s error, invalid data_length\n", __func__);
+ return -EINVAL;
+ }
+
+ /* high byte goes out first */
+ data[0] = reg;
+ data[1] = val;
+
+ ret = gc2235_i2c_write(client, len, data);
+ if (ret)
+ dev_err(&client->dev,
+ "write error: wrote 0x%x to offset 0x%x error %d",
+ val, reg, ret);
+
+ return ret;
+}
+
+static int __gc2235_flush_reg_array(struct i2c_client *client,
+ struct gc2235_write_ctrl *ctrl)
+{
+ u16 size;
+
+ if (ctrl->index == 0)
+ return 0;
+
+ size = sizeof(u8) + ctrl->index; /* 8-bit address + data */
+ ctrl->index = 0;
+
+ return gc2235_i2c_write(client, size, (u8 *)&ctrl->buffer);
+}
+
+static int __gc2235_buf_reg_array(struct i2c_client *client,
+ struct gc2235_write_ctrl *ctrl,
+ const struct gc2235_reg *next)
+{
+ int size;
+
+ if (next->type != GC2235_8BIT)
+ return -EINVAL;
+
+ size = 1;
+ ctrl->buffer.data[ctrl->index] = (u8)next->val;
+
+ /* When first item is added, we need to store its starting address */
+ if (ctrl->index == 0)
+ ctrl->buffer.addr = next->reg;
+
+ ctrl->index += size;
+
+ /*
+ * Buffer cannot guarantee free space for u32? Better flush it to avoid
+ * possible lack of memory for next item.
+ */
+ if (ctrl->index + sizeof(u8) >= GC2235_MAX_WRITE_BUF_SIZE)
+ return __gc2235_flush_reg_array(client, ctrl);
+
+ return 0;
+}
+
+static int __gc2235_write_reg_is_consecutive(struct i2c_client *client,
+ struct gc2235_write_ctrl *ctrl,
+ const struct gc2235_reg *next)
+{
+ if (ctrl->index == 0)
+ return 1;
+
+ return ctrl->buffer.addr + ctrl->index == next->reg;
+}
+
+static int gc2235_write_reg_array(struct i2c_client *client,
+ const struct gc2235_reg *reglist)
+{
+ const struct gc2235_reg *next = reglist;
+ struct gc2235_write_ctrl ctrl;
+ int err;
+
+ ctrl.index = 0;
+ for (; next->type != GC2235_TOK_TERM; next++) {
+ switch (next->type & GC2235_TOK_MASK) {
+ case GC2235_TOK_DELAY:
+ err = __gc2235_flush_reg_array(client, &ctrl);
+ if (err)
+ return err;
+ msleep(next->val);
+ break;
+ default:
+ /*
+ * If next address is not consecutive, data needs to be
+ * flushed before proceed.
+ */
+ if (!__gc2235_write_reg_is_consecutive(client, &ctrl,
+ next)) {
+ err = __gc2235_flush_reg_array(client, &ctrl);
+ if (err)
+ return err;
+ }
+ err = __gc2235_buf_reg_array(client, &ctrl, next);
+ if (err) {
+ dev_err(&client->dev, "%s: write error, aborted\n",
+ __func__);
+ return err;
+ }
+ break;
+ }
+ }
+
+ return __gc2235_flush_reg_array(client, &ctrl);
+}
+
+static long __gc2235_set_exposure(struct v4l2_subdev *sd, int coarse_itg,
+ int gain, int digitgain)
+
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ u16 coarse_integration = (u16)coarse_itg;
+ int ret = 0;
+ u16 expo_coarse_h, expo_coarse_l, gain_val = 0xF0, gain_val2 = 0xF0;
+
+ expo_coarse_h = coarse_integration >> 8;
+ expo_coarse_l = coarse_integration & 0xff;
+
+ ret = gc2235_write_reg(client, GC2235_8BIT,
+ GC2235_EXPOSURE_H, expo_coarse_h);
+ ret = gc2235_write_reg(client, GC2235_8BIT,
+ GC2235_EXPOSURE_L, expo_coarse_l);
+
+ if (gain <= 0x58) {
+ gain_val = 0x40;
+ gain_val2 = 0x58;
+ } else if (gain < 256) {
+ gain_val = 0x40;
+ gain_val2 = gain;
+ } else {
+ gain_val2 = 64 * gain / 256;
+ gain_val = 0xff;
+ }
+
+ ret = gc2235_write_reg(client, GC2235_8BIT,
+ GC2235_GLOBAL_GAIN, (u8)gain_val);
+ ret = gc2235_write_reg(client, GC2235_8BIT,
+ GC2235_PRE_GAIN, (u8)gain_val2);
+
+ return ret;
+}
+
+static int gc2235_set_exposure(struct v4l2_subdev *sd, int exposure,
+ int gain, int digitgain)
+{
+ struct gc2235_device *dev = to_gc2235_sensor(sd);
+ int ret;
+
+ mutex_lock(&dev->input_lock);
+ ret = __gc2235_set_exposure(sd, exposure, gain, digitgain);
+ mutex_unlock(&dev->input_lock);
+
+ return ret;
+}
+
+static long gc2235_s_exposure(struct v4l2_subdev *sd,
+ struct atomisp_exposure *exposure)
+{
+ int exp = exposure->integration_time[0];
+ int gain = exposure->gain[0];
+ int digitgain = exposure->gain[1];
+
+ /* we should not accept the invalid value below. */
+ if (gain == 0) {
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+
+ v4l2_err(client, "%s: invalid value\n", __func__);
+ return -EINVAL;
+ }
+
+ return gc2235_set_exposure(sd, exp, gain, digitgain);
+}
+
+static long gc2235_ioctl(struct v4l2_subdev *sd, unsigned int cmd, void *arg)
+{
+ switch (cmd) {
+ case ATOMISP_IOC_S_EXPOSURE:
+ return gc2235_s_exposure(sd, arg);
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/*
+ * This returns the exposure time being used. This should only be used
+ * for filling in EXIF data, not for actual image processing.
+ */
+static int gc2235_q_exposure(struct v4l2_subdev *sd, s32 *value)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ u16 reg_v, reg_v2;
+ int ret;
+
+ /* get exposure */
+ ret = gc2235_read_reg(client, GC2235_8BIT,
+ GC2235_EXPOSURE_L,
+ &reg_v);
+ if (ret)
+ goto err;
+
+ ret = gc2235_read_reg(client, GC2235_8BIT,
+ GC2235_EXPOSURE_H,
+ &reg_v2);
+ if (ret)
+ goto err;
+
+ reg_v += reg_v2 << 8;
+
+ *value = reg_v;
+err:
+ return ret;
+}
+
+static int gc2235_g_volatile_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct gc2235_device *dev =
+ container_of(ctrl->handler, struct gc2235_device, ctrl_handler);
+ int ret = 0;
+
+ switch (ctrl->id) {
+ case V4L2_CID_EXPOSURE_ABSOLUTE:
+ ret = gc2235_q_exposure(&dev->sd, &ctrl->val);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static const struct v4l2_ctrl_ops ctrl_ops = {
+ .g_volatile_ctrl = gc2235_g_volatile_ctrl
+};
+
+static struct v4l2_ctrl_config gc2235_controls[] = {
+ {
+ .ops = &ctrl_ops,
+ .id = V4L2_CID_EXPOSURE_ABSOLUTE,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "exposure",
+ .min = 0x0,
+ .max = 0xffff,
+ .step = 0x01,
+ .def = 0x00,
+ .flags = 0,
+ },
+};
+
+static int __gc2235_init(struct v4l2_subdev *sd)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+
+ /* restore settings */
+ gc2235_res = gc2235_res_preview;
+ N_RES = N_RES_PREVIEW;
+
+ return gc2235_write_reg_array(client, gc2235_init_settings);
+}
+
+static int is_init;
+
+static int power_ctrl(struct v4l2_subdev *sd, bool flag)
+{
+ int ret = -1;
+ struct gc2235_device *dev = to_gc2235_sensor(sd);
+
+ if (!dev || !dev->platform_data)
+ return -ENODEV;
+
+ if (flag) {
+ ret = dev->platform_data->v1p8_ctrl(sd, 1);
+ usleep_range(60, 90);
+ if (ret == 0)
+ ret |= dev->platform_data->v2p8_ctrl(sd, 1);
+ } else {
+ ret = dev->platform_data->v1p8_ctrl(sd, 0);
+ ret |= dev->platform_data->v2p8_ctrl(sd, 0);
+ }
+ return ret;
+}
+
+static int gpio_ctrl(struct v4l2_subdev *sd, bool flag)
+{
+ struct gc2235_device *dev = to_gc2235_sensor(sd);
+ int ret;
+
+ if (!dev || !dev->platform_data)
+ return -ENODEV;
+
+ ret = dev->platform_data->gpio1_ctrl(sd, !flag);
+ usleep_range(60, 90);
+ ret |= dev->platform_data->gpio0_ctrl(sd, flag);
+
+ return ret;
+}
+
+static int power_up(struct v4l2_subdev *sd)
+{
+ struct gc2235_device *dev = to_gc2235_sensor(sd);
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ int ret;
+
+ if (!dev->platform_data) {
+ dev_err(&client->dev,
+ "no camera_sensor_platform_data");
+ return -ENODEV;
+ }
+ /* power control */
+ ret = power_ctrl(sd, 1);
+ if (ret)
+ goto fail_power;
+
+ /* according to DS, at least 5ms is needed between DOVDD and PWDN */
+ usleep_range(5000, 6000);
+
+ ret = dev->platform_data->flisclk_ctrl(sd, 1);
+ if (ret)
+ goto fail_clk;
+ usleep_range(5000, 6000);
+
+ /* gpio ctrl */
+ ret = gpio_ctrl(sd, 1);
+ if (ret) {
+ ret = gpio_ctrl(sd, 1);
+ if (ret)
+ goto fail_power;
+ }
+
+ msleep(5);
+ return 0;
+
+fail_clk:
+ gpio_ctrl(sd, 0);
+fail_power:
+ power_ctrl(sd, 0);
+ dev_err(&client->dev, "sensor power-up failed\n");
+
+ return ret;
+}
+
+static int power_down(struct v4l2_subdev *sd)
+{
+ struct gc2235_device *dev = to_gc2235_sensor(sd);
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ int ret = 0;
+
+ if (!dev->platform_data) {
+ dev_err(&client->dev,
+ "no camera_sensor_platform_data");
+ return -ENODEV;
+ }
+ /* gpio ctrl */
+ ret = gpio_ctrl(sd, 0);
+ if (ret) {
+ ret = gpio_ctrl(sd, 0);
+ if (ret)
+ dev_err(&client->dev, "gpio failed 2\n");
+ }
+
+ ret = dev->platform_data->flisclk_ctrl(sd, 0);
+ if (ret)
+ dev_err(&client->dev, "flisclk failed\n");
+
+ /* power control */
+ ret = power_ctrl(sd, 0);
+ if (ret)
+ dev_err(&client->dev, "vprog failed.\n");
+
+ return ret;
+}
+
+static int gc2235_s_power(struct v4l2_subdev *sd, int on)
+{
+ int ret;
+
+ if (on == 0) {
+ ret = power_down(sd);
+ } else {
+ ret = power_up(sd);
+ if (!ret)
+ ret = __gc2235_init(sd);
+ is_init = 1;
+ }
+ return ret;
+}
+
+static int gc2235_startup(struct v4l2_subdev *sd)
+{
+ struct gc2235_device *dev = to_gc2235_sensor(sd);
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ int ret = 0;
+
+ if (is_init == 0) {
+ /*
+ * force gc2235 to do a reset in res change, otherwise it
+ * can not output normal after switching res. and it is not
+ * necessary for first time run up after power on, for the sack
+ * of performance
+ */
+ power_down(sd);
+ power_up(sd);
+ gc2235_write_reg_array(client, gc2235_init_settings);
+ }
+
+ ret = gc2235_write_reg_array(client, dev->res->regs);
+ if (ret) {
+ dev_err(&client->dev, "gc2235 write register err.\n");
+ return ret;
+ }
+ is_init = 0;
+
+ return ret;
+}
+
+static int gc2235_set_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_format *format)
+{
+ struct v4l2_mbus_framefmt *fmt = &format->format;
+ struct gc2235_device *dev = to_gc2235_sensor(sd);
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct camera_mipi_info *gc2235_info = NULL;
+ struct gc2235_resolution *res;
+ int ret = 0;
+
+ gc2235_info = v4l2_get_subdev_hostdata(sd);
+ if (!gc2235_info)
+ return -EINVAL;
+ if (format->pad)
+ return -EINVAL;
+ if (!fmt)
+ return -EINVAL;
+
+ mutex_lock(&dev->input_lock);
+ res = v4l2_find_nearest_size(gc2235_res_preview,
+ ARRAY_SIZE(gc2235_res_preview), width,
+ height, fmt->width, fmt->height);
+ if (!res)
+ res = &gc2235_res_preview[N_RES - 1];
+
+ fmt->width = res->width;
+ fmt->height = res->height;
+ dev->res = res;
+
+ fmt->code = MEDIA_BUS_FMT_SGRBG10_1X10;
+ if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
+ *v4l2_subdev_state_get_format(sd_state, 0) = *fmt;
+ mutex_unlock(&dev->input_lock);
+ return 0;
+ }
+
+ ret = gc2235_startup(sd);
+ if (ret) {
+ dev_err(&client->dev, "gc2235 startup err\n");
+ goto err;
+ }
+
+err:
+ mutex_unlock(&dev->input_lock);
+ return ret;
+}
+
+static int gc2235_get_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_format *format)
+{
+ struct v4l2_mbus_framefmt *fmt = &format->format;
+ struct gc2235_device *dev = to_gc2235_sensor(sd);
+
+ if (format->pad)
+ return -EINVAL;
+
+ if (!fmt)
+ return -EINVAL;
+
+ fmt->width = dev->res->width;
+ fmt->height = dev->res->height;
+ fmt->code = MEDIA_BUS_FMT_SGRBG10_1X10;
+
+ return 0;
+}
+
+static int gc2235_detect(struct i2c_client *client)
+{
+ struct i2c_adapter *adapter = client->adapter;
+ u16 high = 0, low = 0;
+ u16 id;
+
+ if (!i2c_check_functionality(adapter, I2C_FUNC_I2C))
+ return -ENODEV;
+
+ gc2235_read_reg(client, GC2235_8BIT, GC2235_SENSOR_ID_H, &high);
+ gc2235_read_reg(client, GC2235_8BIT, GC2235_SENSOR_ID_L, &low);
+ id = ((high << 8) | low);
+
+ if (id != GC2235_ID) {
+ dev_err(&client->dev, "sensor ID error, 0x%x\n", id);
+ return -ENODEV;
+ }
+
+ dev_info(&client->dev, "detect gc2235 success\n");
+ return 0;
+}
+
+static int gc2235_s_stream(struct v4l2_subdev *sd, int enable)
+{
+ struct gc2235_device *dev = to_gc2235_sensor(sd);
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ int ret;
+
+ mutex_lock(&dev->input_lock);
+
+ if (enable)
+ ret = gc2235_write_reg_array(client, gc2235_stream_on);
+ else
+ ret = gc2235_write_reg_array(client, gc2235_stream_off);
+
+ mutex_unlock(&dev->input_lock);
+ return ret;
+}
+
+static int gc2235_s_config(struct v4l2_subdev *sd,
+ int irq, void *platform_data)
+{
+ struct gc2235_device *dev = to_gc2235_sensor(sd);
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ int ret = 0;
+
+ if (!platform_data)
+ return -ENODEV;
+
+ dev->platform_data =
+ (struct camera_sensor_platform_data *)platform_data;
+
+ mutex_lock(&dev->input_lock);
+ /*
+ * power off the module, then power on it in future
+ * as first power on by board may not fulfill the
+ * power on sequqence needed by the module
+ */
+ ret = power_down(sd);
+ if (ret) {
+ dev_err(&client->dev, "gc2235 power-off err.\n");
+ goto fail_power_off;
+ }
+
+ ret = power_up(sd);
+ if (ret) {
+ dev_err(&client->dev, "gc2235 power-up err.\n");
+ goto fail_power_on;
+ }
+
+ ret = dev->platform_data->csi_cfg(sd, 1);
+ if (ret)
+ goto fail_csi_cfg;
+
+ /* config & detect sensor */
+ ret = gc2235_detect(client);
+ if (ret) {
+ dev_err(&client->dev, "gc2235_detect err s_config.\n");
+ goto fail_csi_cfg;
+ }
+
+ /* turn off sensor, after probed */
+ ret = power_down(sd);
+ if (ret) {
+ dev_err(&client->dev, "gc2235 power-off err.\n");
+ goto fail_csi_cfg;
+ }
+ mutex_unlock(&dev->input_lock);
+
+ return 0;
+
+fail_csi_cfg:
+ dev->platform_data->csi_cfg(sd, 0);
+fail_power_on:
+ power_down(sd);
+ dev_err(&client->dev, "sensor power-gating failed\n");
+fail_power_off:
+ mutex_unlock(&dev->input_lock);
+ return ret;
+}
+
+static int gc2235_get_frame_interval(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_frame_interval *interval)
+{
+ struct gc2235_device *dev = to_gc2235_sensor(sd);
+
+ /*
+ * FIXME: Implement support for V4L2_SUBDEV_FORMAT_TRY, using the V4L2
+ * subdev active state API.
+ */
+ if (interval->which != V4L2_SUBDEV_FORMAT_ACTIVE)
+ return -EINVAL;
+
+ interval->interval.numerator = 1;
+ interval->interval.denominator = dev->res->fps;
+
+ return 0;
+}
+
+static int gc2235_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ if (code->index >= MAX_FMTS)
+ return -EINVAL;
+
+ code->code = MEDIA_BUS_FMT_SBGGR10_1X10;
+ return 0;
+}
+
+static int gc2235_enum_frame_size(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_frame_size_enum *fse)
+{
+ int index = fse->index;
+
+ if (index >= N_RES)
+ return -EINVAL;
+
+ fse->min_width = gc2235_res[index].width;
+ fse->min_height = gc2235_res[index].height;
+ fse->max_width = gc2235_res[index].width;
+ fse->max_height = gc2235_res[index].height;
+
+ return 0;
+}
+
+static int gc2235_g_skip_frames(struct v4l2_subdev *sd, u32 *frames)
+{
+ struct gc2235_device *dev = to_gc2235_sensor(sd);
+
+ mutex_lock(&dev->input_lock);
+ *frames = dev->res->skip_frames;
+ mutex_unlock(&dev->input_lock);
+
+ return 0;
+}
+
+static const struct v4l2_subdev_sensor_ops gc2235_sensor_ops = {
+ .g_skip_frames = gc2235_g_skip_frames,
+};
+
+static const struct v4l2_subdev_video_ops gc2235_video_ops = {
+ .s_stream = gc2235_s_stream,
+};
+
+static const struct v4l2_subdev_core_ops gc2235_core_ops = {
+ .s_power = gc2235_s_power,
+ .ioctl = gc2235_ioctl,
+};
+
+static const struct v4l2_subdev_pad_ops gc2235_pad_ops = {
+ .enum_mbus_code = gc2235_enum_mbus_code,
+ .enum_frame_size = gc2235_enum_frame_size,
+ .get_fmt = gc2235_get_fmt,
+ .set_fmt = gc2235_set_fmt,
+ .get_frame_interval = gc2235_get_frame_interval,
+};
+
+static const struct v4l2_subdev_ops gc2235_ops = {
+ .core = &gc2235_core_ops,
+ .video = &gc2235_video_ops,
+ .pad = &gc2235_pad_ops,
+ .sensor = &gc2235_sensor_ops,
+};
+
+static void gc2235_remove(struct i2c_client *client)
+{
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct gc2235_device *dev = to_gc2235_sensor(sd);
+
+ dev->platform_data->csi_cfg(sd, 0);
+
+ v4l2_device_unregister_subdev(sd);
+ media_entity_cleanup(&dev->sd.entity);
+ v4l2_ctrl_handler_free(&dev->ctrl_handler);
+ kfree(dev);
+}
+
+static int gc2235_probe(struct i2c_client *client)
+{
+ struct gc2235_device *dev;
+ void *gcpdev;
+ int ret;
+ unsigned int i;
+
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (!dev)
+ return -ENOMEM;
+
+ mutex_init(&dev->input_lock);
+
+ dev->res = &gc2235_res_preview[0];
+ v4l2_i2c_subdev_init(&dev->sd, client, &gc2235_ops);
+
+ gcpdev = gmin_camera_platform_data(&dev->sd,
+ ATOMISP_INPUT_FORMAT_RAW_10,
+ atomisp_bayer_order_grbg);
+
+ ret = gc2235_s_config(&dev->sd, client->irq, gcpdev);
+ if (ret)
+ goto out_free;
+
+ dev->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ dev->pad.flags = MEDIA_PAD_FL_SOURCE;
+ dev->format.code = MEDIA_BUS_FMT_SBGGR10_1X10;
+ dev->sd.entity.function = MEDIA_ENT_F_CAM_SENSOR;
+ ret =
+ v4l2_ctrl_handler_init(&dev->ctrl_handler,
+ ARRAY_SIZE(gc2235_controls));
+ if (ret) {
+ gc2235_remove(client);
+ return ret;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(gc2235_controls); i++)
+ v4l2_ctrl_new_custom(&dev->ctrl_handler, &gc2235_controls[i],
+ NULL);
+
+ if (dev->ctrl_handler.error) {
+ gc2235_remove(client);
+ return dev->ctrl_handler.error;
+ }
+
+ /* Use same lock for controls as for everything else. */
+ dev->ctrl_handler.lock = &dev->input_lock;
+ dev->sd.ctrl_handler = &dev->ctrl_handler;
+
+ ret = media_entity_pads_init(&dev->sd.entity, 1, &dev->pad);
+ if (ret)
+ gc2235_remove(client);
+
+ return atomisp_register_i2c_module(&dev->sd, gcpdev);
+
+out_free:
+ v4l2_device_unregister_subdev(&dev->sd);
+ kfree(dev);
+
+ return ret;
+}
+
+static const struct acpi_device_id gc2235_acpi_match[] = {
+ { "INT33F8" },
+ {},
+};
+MODULE_DEVICE_TABLE(acpi, gc2235_acpi_match);
+
+static struct i2c_driver gc2235_driver = {
+ .driver = {
+ .name = "gc2235",
+ .acpi_match_table = gc2235_acpi_match,
+ },
+ .probe = gc2235_probe,
+ .remove = gc2235_remove,
+};
+module_i2c_driver(gc2235_driver);
+
+MODULE_AUTHOR("Shuguang Gong <Shuguang.Gong@intel.com>");
+MODULE_DESCRIPTION("A low-level driver for GC2235 sensors");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/media/atomisp/i2c/atomisp-ov2722.c b/drivers/staging/media/atomisp/i2c/atomisp-ov2722.c
new file mode 100644
index 000000000000..a4519babf37d
--- /dev/null
+++ b/drivers/staging/media/atomisp/i2c/atomisp-ov2722.c
@@ -0,0 +1,1015 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Support for OmniVision OV2722 1080p HD camera sensor.
+ *
+ * Copyright (c) 2013 Intel Corporation. All Rights Reserved.
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/kmod.h>
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/i2c.h>
+#include <linux/moduleparam.h>
+#include <media/v4l2-device.h>
+#include "../include/linux/atomisp_gmin_platform.h"
+#include <linux/acpi.h>
+#include <linux/io.h>
+
+#include "ov2722.h"
+
+/* i2c read/write stuff */
+static int ov2722_read_reg(struct i2c_client *client,
+ u16 data_length, u16 reg, u16 *val)
+{
+ int err;
+ struct i2c_msg msg[2];
+ unsigned char data[6];
+
+ if (!client->adapter) {
+ dev_err(&client->dev, "%s error, no client->adapter\n",
+ __func__);
+ return -ENODEV;
+ }
+
+ if (data_length != OV2722_8BIT && data_length != OV2722_16BIT &&
+ data_length != OV2722_32BIT) {
+ dev_err(&client->dev, "%s error, invalid data length\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ memset(msg, 0, sizeof(msg));
+
+ msg[0].addr = client->addr;
+ msg[0].flags = 0;
+ msg[0].len = I2C_MSG_LENGTH;
+ msg[0].buf = data;
+
+ /* high byte goes out first */
+ data[0] = (u8)(reg >> 8);
+ data[1] = (u8)(reg & 0xff);
+
+ msg[1].addr = client->addr;
+ msg[1].len = data_length;
+ msg[1].flags = I2C_M_RD;
+ msg[1].buf = data;
+
+ err = i2c_transfer(client->adapter, msg, 2);
+ if (err != 2) {
+ if (err >= 0)
+ err = -EIO;
+ dev_err(&client->dev,
+ "read from offset 0x%x error %d", reg, err);
+ return err;
+ }
+
+ *val = 0;
+ /* high byte comes first */
+ if (data_length == OV2722_8BIT)
+ *val = (u8)data[0];
+ else if (data_length == OV2722_16BIT)
+ *val = be16_to_cpu(*(__be16 *)&data[0]);
+ else
+ *val = be32_to_cpu(*(__be32 *)&data[0]);
+
+ return 0;
+}
+
+static int ov2722_i2c_write(struct i2c_client *client, u16 len, u8 *data)
+{
+ struct i2c_msg msg;
+ const int num_msg = 1;
+ int ret;
+
+ msg.addr = client->addr;
+ msg.flags = 0;
+ msg.len = len;
+ msg.buf = data;
+ ret = i2c_transfer(client->adapter, &msg, 1);
+
+ return ret == num_msg ? 0 : -EIO;
+}
+
+static int ov2722_write_reg(struct i2c_client *client, u16 data_length,
+ u16 reg, u16 val)
+{
+ int ret;
+ unsigned char data[4] = {0};
+ __be16 *wreg = (__be16 *)data;
+ const u16 len = data_length + sizeof(u16); /* 16-bit address + data */
+
+ if (data_length != OV2722_8BIT && data_length != OV2722_16BIT) {
+ dev_err(&client->dev,
+ "%s error, invalid data_length\n", __func__);
+ return -EINVAL;
+ }
+
+ /* high byte goes out first */
+ *wreg = cpu_to_be16(reg);
+
+ if (data_length == OV2722_8BIT) {
+ data[2] = (u8)(val);
+ } else {
+ /* OV2722_16BIT */
+ __be16 *wdata = (__be16 *)&data[2];
+
+ *wdata = cpu_to_be16(val);
+ }
+
+ ret = ov2722_i2c_write(client, len, data);
+ if (ret)
+ dev_err(&client->dev,
+ "write error: wrote 0x%x to offset 0x%x error %d",
+ val, reg, ret);
+
+ return ret;
+}
+
+/*
+ * ov2722_write_reg_array - Initializes a list of OV2722 registers
+ * @client: i2c driver client structure
+ * @reglist: list of registers to be written
+ *
+ * This function initializes a list of registers. When consecutive addresses
+ * are found in a row on the list, this function creates a buffer and sends
+ * consecutive data in a single i2c_transfer().
+ *
+ * __ov2722_flush_reg_array, __ov2722_buf_reg_array() and
+ * __ov2722_write_reg_is_consecutive() are internal functions to
+ * ov2722_write_reg_array_fast() and should be not used anywhere else.
+ *
+ */
+
+static int __ov2722_flush_reg_array(struct i2c_client *client,
+ struct ov2722_write_ctrl *ctrl)
+{
+ u16 size;
+ __be16 *data16 = (void *)&ctrl->buffer.addr;
+
+ if (ctrl->index == 0)
+ return 0;
+
+ size = sizeof(u16) + ctrl->index; /* 16-bit address + data */
+ *data16 = cpu_to_be16(ctrl->buffer.addr);
+ ctrl->index = 0;
+
+ return ov2722_i2c_write(client, size, (u8 *)&ctrl->buffer);
+}
+
+static int __ov2722_buf_reg_array(struct i2c_client *client,
+ struct ov2722_write_ctrl *ctrl,
+ const struct ov2722_reg *next)
+{
+ int size;
+ __be16 *data16;
+
+ switch (next->type) {
+ case OV2722_8BIT:
+ size = 1;
+ ctrl->buffer.data[ctrl->index] = (u8)next->val;
+ break;
+ case OV2722_16BIT:
+ size = 2;
+ data16 = (void *)&ctrl->buffer.data[ctrl->index];
+ *data16 = cpu_to_be16((u16)next->val);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* When first item is added, we need to store its starting address */
+ if (ctrl->index == 0)
+ ctrl->buffer.addr = next->reg;
+
+ ctrl->index += size;
+
+ /*
+ * Buffer cannot guarantee free space for u32? Better flush it to avoid
+ * possible lack of memory for next item.
+ */
+ if (ctrl->index + sizeof(u16) >= OV2722_MAX_WRITE_BUF_SIZE)
+ return __ov2722_flush_reg_array(client, ctrl);
+
+ return 0;
+}
+
+static int __ov2722_write_reg_is_consecutive(struct i2c_client *client,
+ struct ov2722_write_ctrl *ctrl,
+ const struct ov2722_reg *next)
+{
+ if (ctrl->index == 0)
+ return 1;
+
+ return ctrl->buffer.addr + ctrl->index == next->reg;
+}
+
+static int ov2722_write_reg_array(struct i2c_client *client,
+ const struct ov2722_reg *reglist)
+{
+ const struct ov2722_reg *next = reglist;
+ struct ov2722_write_ctrl ctrl;
+ int err;
+
+ ctrl.index = 0;
+ for (; next->type != OV2722_TOK_TERM; next++) {
+ switch (next->type & OV2722_TOK_MASK) {
+ case OV2722_TOK_DELAY:
+ err = __ov2722_flush_reg_array(client, &ctrl);
+ if (err)
+ return err;
+ msleep(next->val);
+ break;
+ default:
+ /*
+ * If next address is not consecutive, data needs to be
+ * flushed before proceed.
+ */
+ if (!__ov2722_write_reg_is_consecutive(client, &ctrl,
+ next)) {
+ err = __ov2722_flush_reg_array(client, &ctrl);
+ if (err)
+ return err;
+ }
+ err = __ov2722_buf_reg_array(client, &ctrl, next);
+ if (err) {
+ dev_err(&client->dev, "%s: write error, aborted\n",
+ __func__);
+ return err;
+ }
+ break;
+ }
+ }
+
+ return __ov2722_flush_reg_array(client, &ctrl);
+}
+
+static long __ov2722_set_exposure(struct v4l2_subdev *sd, int coarse_itg,
+ int gain, int digitgain)
+
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct ov2722_device *dev = to_ov2722_sensor(sd);
+ u16 hts, vts;
+ int ret;
+
+ dev_dbg(&client->dev, "set_exposure without group hold\n");
+
+ /* clear VTS_DIFF on manual mode */
+ ret = ov2722_write_reg(client, OV2722_16BIT, OV2722_VTS_DIFF_H, 0);
+ if (ret)
+ return ret;
+
+ hts = dev->pixels_per_line;
+ vts = dev->lines_per_frame;
+
+ if ((coarse_itg + OV2722_COARSE_INTG_TIME_MAX_MARGIN) > vts)
+ vts = coarse_itg + OV2722_COARSE_INTG_TIME_MAX_MARGIN;
+
+ coarse_itg <<= 4;
+ digitgain <<= 2;
+
+ ret = ov2722_write_reg(client, OV2722_16BIT,
+ OV2722_VTS_H, vts);
+ if (ret)
+ return ret;
+
+ ret = ov2722_write_reg(client, OV2722_16BIT,
+ OV2722_HTS_H, hts);
+ if (ret)
+ return ret;
+
+ /* set exposure */
+ ret = ov2722_write_reg(client, OV2722_8BIT,
+ OV2722_AEC_PK_EXPO_L,
+ coarse_itg & 0xff);
+ if (ret)
+ return ret;
+
+ ret = ov2722_write_reg(client, OV2722_16BIT,
+ OV2722_AEC_PK_EXPO_H,
+ (coarse_itg >> 8) & 0xfff);
+ if (ret)
+ return ret;
+
+ /* set analog gain */
+ ret = ov2722_write_reg(client, OV2722_16BIT,
+ OV2722_AGC_ADJ_H, gain);
+ if (ret)
+ return ret;
+
+ /* set digital gain */
+ ret = ov2722_write_reg(client, OV2722_16BIT,
+ OV2722_MWB_GAIN_R_H, digitgain);
+ if (ret)
+ return ret;
+
+ ret = ov2722_write_reg(client, OV2722_16BIT,
+ OV2722_MWB_GAIN_G_H, digitgain);
+ if (ret)
+ return ret;
+
+ ret = ov2722_write_reg(client, OV2722_16BIT,
+ OV2722_MWB_GAIN_B_H, digitgain);
+
+ return ret;
+}
+
+static int ov2722_set_exposure(struct v4l2_subdev *sd, int exposure,
+ int gain, int digitgain)
+{
+ struct ov2722_device *dev = to_ov2722_sensor(sd);
+ int ret;
+
+ mutex_lock(&dev->input_lock);
+ ret = __ov2722_set_exposure(sd, exposure, gain, digitgain);
+ mutex_unlock(&dev->input_lock);
+
+ return ret;
+}
+
+static long ov2722_s_exposure(struct v4l2_subdev *sd,
+ struct atomisp_exposure *exposure)
+{
+ int exp = exposure->integration_time[0];
+ int gain = exposure->gain[0];
+ int digitgain = exposure->gain[1];
+
+ /* we should not accept the invalid value below. */
+ if (gain == 0) {
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+
+ v4l2_err(client, "%s: invalid value\n", __func__);
+ return -EINVAL;
+ }
+
+ return ov2722_set_exposure(sd, exp, gain, digitgain);
+}
+
+static long ov2722_ioctl(struct v4l2_subdev *sd, unsigned int cmd, void *arg)
+{
+ switch (cmd) {
+ case ATOMISP_IOC_S_EXPOSURE:
+ return ov2722_s_exposure(sd, arg);
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/* This returns the exposure time being used. This should only be used
+ * for filling in EXIF data, not for actual image processing.
+ */
+static int ov2722_q_exposure(struct v4l2_subdev *sd, s32 *value)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ u16 reg_v, reg_v2;
+ int ret;
+
+ /* get exposure */
+ ret = ov2722_read_reg(client, OV2722_8BIT,
+ OV2722_AEC_PK_EXPO_L,
+ &reg_v);
+ if (ret)
+ goto err;
+
+ ret = ov2722_read_reg(client, OV2722_8BIT,
+ OV2722_AEC_PK_EXPO_M,
+ &reg_v2);
+ if (ret)
+ goto err;
+
+ reg_v += reg_v2 << 8;
+ ret = ov2722_read_reg(client, OV2722_8BIT,
+ OV2722_AEC_PK_EXPO_H,
+ &reg_v2);
+ if (ret)
+ goto err;
+
+ *value = reg_v + (((u32)reg_v2 << 16));
+err:
+ return ret;
+}
+
+static int ov2722_g_volatile_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct ov2722_device *dev =
+ container_of(ctrl->handler, struct ov2722_device, ctrl_handler);
+ int ret = 0;
+ unsigned int val;
+
+ switch (ctrl->id) {
+ case V4L2_CID_EXPOSURE_ABSOLUTE:
+ ret = ov2722_q_exposure(&dev->sd, &ctrl->val);
+ break;
+ case V4L2_CID_LINK_FREQ:
+ val = dev->res->mipi_freq;
+ if (val == 0)
+ return -EINVAL;
+
+ ctrl->val = val * 1000; /* To Hz */
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static const struct v4l2_ctrl_ops ctrl_ops = {
+ .g_volatile_ctrl = ov2722_g_volatile_ctrl
+};
+
+static const struct v4l2_ctrl_config ov2722_controls[] = {
+ {
+ .ops = &ctrl_ops,
+ .id = V4L2_CID_EXPOSURE_ABSOLUTE,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "exposure",
+ .min = 0x0,
+ .max = 0xffff,
+ .step = 0x01,
+ .def = 0x00,
+ .flags = 0,
+ },
+ {
+ .ops = &ctrl_ops,
+ .id = V4L2_CID_LINK_FREQ,
+ .name = "Link Frequency",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .min = 1,
+ .max = 1500000 * 1000,
+ .step = 1,
+ .def = 1,
+ .flags = V4L2_CTRL_FLAG_VOLATILE | V4L2_CTRL_FLAG_READ_ONLY,
+ },
+};
+
+static int ov2722_init(struct v4l2_subdev *sd)
+{
+ struct ov2722_device *dev = to_ov2722_sensor(sd);
+
+ mutex_lock(&dev->input_lock);
+
+ /* restore settings */
+ ov2722_res = ov2722_res_preview;
+ N_RES = N_RES_PREVIEW;
+
+ mutex_unlock(&dev->input_lock);
+
+ return 0;
+}
+
+static int power_ctrl(struct v4l2_subdev *sd, bool flag)
+{
+ int ret = -1;
+ struct ov2722_device *dev = to_ov2722_sensor(sd);
+
+ if (!dev || !dev->platform_data)
+ return -ENODEV;
+
+ if (flag) {
+ ret = dev->platform_data->v1p8_ctrl(sd, 1);
+ if (ret == 0) {
+ ret = dev->platform_data->v2p8_ctrl(sd, 1);
+ if (ret)
+ dev->platform_data->v1p8_ctrl(sd, 0);
+ }
+ } else {
+ ret = dev->platform_data->v1p8_ctrl(sd, 0);
+ ret |= dev->platform_data->v2p8_ctrl(sd, 0);
+ }
+
+ return ret;
+}
+
+static int gpio_ctrl(struct v4l2_subdev *sd, bool flag)
+{
+ struct ov2722_device *dev = to_ov2722_sensor(sd);
+ int ret = -1;
+
+ if (!dev || !dev->platform_data)
+ return -ENODEV;
+
+ /* Note: the GPIO order is asymmetric: always RESET#
+ * before PWDN# when turning it on or off.
+ */
+ ret = dev->platform_data->gpio0_ctrl(sd, flag);
+ ret |= dev->platform_data->gpio1_ctrl(sd, flag);
+ return ret;
+}
+
+static int power_up(struct v4l2_subdev *sd)
+{
+ struct ov2722_device *dev = to_ov2722_sensor(sd);
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ int ret;
+
+ if (!dev->platform_data) {
+ dev_err(&client->dev,
+ "no camera_sensor_platform_data");
+ return -ENODEV;
+ }
+
+ /* power control */
+ ret = power_ctrl(sd, 1);
+ if (ret)
+ goto fail_power;
+
+ /* according to DS, at least 5ms is needed between DOVDD and PWDN */
+ usleep_range(5000, 6000);
+
+ /* gpio ctrl */
+ ret = gpio_ctrl(sd, 1);
+ if (ret) {
+ ret = gpio_ctrl(sd, 0);
+ if (ret)
+ goto fail_power;
+ }
+
+ /* flis clock control */
+ ret = dev->platform_data->flisclk_ctrl(sd, 1);
+ if (ret)
+ goto fail_clk;
+
+ /* according to DS, 20ms is needed between PWDN and i2c access */
+ msleep(20);
+
+ return 0;
+
+fail_clk:
+ gpio_ctrl(sd, 0);
+fail_power:
+ power_ctrl(sd, 0);
+ dev_err(&client->dev, "sensor power-up failed\n");
+
+ return ret;
+}
+
+static int power_down(struct v4l2_subdev *sd)
+{
+ struct ov2722_device *dev = to_ov2722_sensor(sd);
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ int ret = 0;
+
+ if (!dev->platform_data) {
+ dev_err(&client->dev,
+ "no camera_sensor_platform_data");
+ return -ENODEV;
+ }
+
+ ret = dev->platform_data->flisclk_ctrl(sd, 0);
+ if (ret)
+ dev_err(&client->dev, "flisclk failed\n");
+
+ /* gpio ctrl */
+ ret = gpio_ctrl(sd, 0);
+ if (ret) {
+ ret = gpio_ctrl(sd, 0);
+ if (ret)
+ dev_err(&client->dev, "gpio failed 2\n");
+ }
+
+ /* power control */
+ ret = power_ctrl(sd, 0);
+ if (ret)
+ dev_err(&client->dev, "vprog failed.\n");
+
+ return ret;
+}
+
+static int ov2722_s_power(struct v4l2_subdev *sd, int on)
+{
+ int ret;
+
+ if (on == 0)
+ return power_down(sd);
+
+ ret = power_up(sd);
+ if (!ret)
+ return ov2722_init(sd);
+
+ return ret;
+}
+
+/* TODO: remove it. */
+static int ov2722_startup(struct v4l2_subdev *sd)
+{
+ struct ov2722_device *dev = to_ov2722_sensor(sd);
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ int ret = 0;
+
+ ret = ov2722_write_reg(client, OV2722_8BIT,
+ OV2722_SW_RESET, 0x01);
+ if (ret) {
+ dev_err(&client->dev, "ov2722 reset err.\n");
+ return ret;
+ }
+
+ ret = ov2722_write_reg_array(client, dev->res->regs);
+ if (ret) {
+ dev_err(&client->dev, "ov2722 write register err.\n");
+ return ret;
+ }
+
+ return ret;
+}
+
+static int ov2722_set_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_format *format)
+{
+ struct v4l2_mbus_framefmt *fmt = &format->format;
+ struct ov2722_device *dev = to_ov2722_sensor(sd);
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct ov2722_resolution *res;
+ struct camera_mipi_info *ov2722_info = NULL;
+ int ret = 0;
+
+ if (format->pad)
+ return -EINVAL;
+ if (!fmt)
+ return -EINVAL;
+ ov2722_info = v4l2_get_subdev_hostdata(sd);
+ if (!ov2722_info)
+ return -EINVAL;
+
+ res = v4l2_find_nearest_size(ov2722_res_preview,
+ ARRAY_SIZE(ov2722_res_preview), width,
+ height, fmt->width, fmt->height);
+ if (!res)
+ res = &ov2722_res_preview[N_RES - 1];
+
+ fmt->width = res->width;
+ fmt->height = res->height;
+ dev->res = res;
+
+ fmt->code = MEDIA_BUS_FMT_SGRBG10_1X10;
+ if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
+ *v4l2_subdev_state_get_format(sd_state, 0) = *fmt;
+ return 0;
+ }
+
+ mutex_lock(&dev->input_lock);
+
+ dev->pixels_per_line = dev->res->pixels_per_line;
+ dev->lines_per_frame = dev->res->lines_per_frame;
+
+ ret = ov2722_startup(sd);
+ if (ret) {
+ int i = 0;
+
+ dev_err(&client->dev, "ov2722 startup err, retry to power up\n");
+ for (i = 0; i < OV2722_POWER_UP_RETRY_NUM; i++) {
+ dev_err(&client->dev,
+ "ov2722 retry to power up %d/%d times, result: ",
+ i + 1, OV2722_POWER_UP_RETRY_NUM);
+ power_down(sd);
+ ret = power_up(sd);
+ if (ret) {
+ dev_err(&client->dev, "power up failed, continue\n");
+ continue;
+ }
+ ret = ov2722_startup(sd);
+ if (ret) {
+ dev_err(&client->dev, " startup FAILED!\n");
+ } else {
+ dev_err(&client->dev, " startup SUCCESS!\n");
+ break;
+ }
+ }
+ if (ret) {
+ dev_err(&client->dev, "ov2722 startup err\n");
+ goto err;
+ }
+ }
+
+err:
+ mutex_unlock(&dev->input_lock);
+ return ret;
+}
+
+static int ov2722_get_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_format *format)
+{
+ struct v4l2_mbus_framefmt *fmt = &format->format;
+ struct ov2722_device *dev = to_ov2722_sensor(sd);
+
+ if (format->pad)
+ return -EINVAL;
+ if (!fmt)
+ return -EINVAL;
+
+ fmt->width = dev->res->width;
+ fmt->height = dev->res->height;
+ fmt->code = MEDIA_BUS_FMT_SBGGR10_1X10;
+
+ return 0;
+}
+
+static int ov2722_detect(struct i2c_client *client)
+{
+ struct i2c_adapter *adapter = client->adapter;
+ u16 high = 0, low = 0;
+ u16 id;
+ u8 revision;
+
+ if (!i2c_check_functionality(adapter, I2C_FUNC_I2C))
+ return -ENODEV;
+
+ ov2722_read_reg(client, OV2722_8BIT,
+ OV2722_SC_CMMN_CHIP_ID_H, &high);
+ ov2722_read_reg(client, OV2722_8BIT,
+ OV2722_SC_CMMN_CHIP_ID_L, &low);
+ id = (high << 8) | low;
+
+ if ((id != OV2722_ID) && (id != OV2720_ID)) {
+ dev_err(&client->dev, "sensor ID error\n");
+ return -ENODEV;
+ }
+
+ high = 0;
+ ov2722_read_reg(client, OV2722_8BIT,
+ OV2722_SC_CMMN_SUB_ID, &high);
+ revision = (u8)high & 0x0f;
+
+ dev_dbg(&client->dev, "sensor_revision = 0x%x\n", revision);
+ dev_dbg(&client->dev, "detect ov2722 success\n");
+ return 0;
+}
+
+static int ov2722_s_stream(struct v4l2_subdev *sd, int enable)
+{
+ struct ov2722_device *dev = to_ov2722_sensor(sd);
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ int ret;
+
+ mutex_lock(&dev->input_lock);
+
+ ret = ov2722_write_reg(client, OV2722_8BIT, OV2722_SW_STREAM,
+ enable ? OV2722_START_STREAMING :
+ OV2722_STOP_STREAMING);
+
+ mutex_unlock(&dev->input_lock);
+ return ret;
+}
+
+static int ov2722_s_config(struct v4l2_subdev *sd,
+ int irq, void *platform_data)
+{
+ struct ov2722_device *dev = to_ov2722_sensor(sd);
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ int ret = 0;
+
+ if (!platform_data)
+ return -ENODEV;
+
+ dev->platform_data =
+ (struct camera_sensor_platform_data *)platform_data;
+
+ mutex_lock(&dev->input_lock);
+
+ /* power off the module, then power on it in future
+ * as first power on by board may not fulfill the
+ * power on sequqence needed by the module
+ */
+ ret = power_down(sd);
+ if (ret) {
+ dev_err(&client->dev, "ov2722 power-off err.\n");
+ goto fail_power_off;
+ }
+
+ ret = power_up(sd);
+ if (ret) {
+ dev_err(&client->dev, "ov2722 power-up err.\n");
+ goto fail_power_on;
+ }
+
+ ret = dev->platform_data->csi_cfg(sd, 1);
+ if (ret)
+ goto fail_csi_cfg;
+
+ /* config & detect sensor */
+ ret = ov2722_detect(client);
+ if (ret) {
+ dev_err(&client->dev, "ov2722_detect err s_config.\n");
+ goto fail_csi_cfg;
+ }
+
+ /* turn off sensor, after probed */
+ ret = power_down(sd);
+ if (ret) {
+ dev_err(&client->dev, "ov2722 power-off err.\n");
+ goto fail_csi_cfg;
+ }
+ mutex_unlock(&dev->input_lock);
+
+ return 0;
+
+fail_csi_cfg:
+ dev->platform_data->csi_cfg(sd, 0);
+fail_power_on:
+ power_down(sd);
+ dev_err(&client->dev, "sensor power-gating failed\n");
+fail_power_off:
+ mutex_unlock(&dev->input_lock);
+ return ret;
+}
+
+static int ov2722_get_frame_interval(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_frame_interval *interval)
+{
+ struct ov2722_device *dev = to_ov2722_sensor(sd);
+
+ /*
+ * FIXME: Implement support for V4L2_SUBDEV_FORMAT_TRY, using the V4L2
+ * subdev active state API.
+ */
+ if (interval->which != V4L2_SUBDEV_FORMAT_ACTIVE)
+ return -EINVAL;
+
+ interval->interval.numerator = 1;
+ interval->interval.denominator = dev->res->fps;
+
+ return 0;
+}
+
+static int ov2722_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ if (code->index >= MAX_FMTS)
+ return -EINVAL;
+
+ code->code = MEDIA_BUS_FMT_SBGGR10_1X10;
+ return 0;
+}
+
+static int ov2722_enum_frame_size(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_frame_size_enum *fse)
+{
+ int index = fse->index;
+
+ if (index >= N_RES)
+ return -EINVAL;
+
+ fse->min_width = ov2722_res[index].width;
+ fse->min_height = ov2722_res[index].height;
+ fse->max_width = ov2722_res[index].width;
+ fse->max_height = ov2722_res[index].height;
+
+ return 0;
+}
+
+static int ov2722_g_skip_frames(struct v4l2_subdev *sd, u32 *frames)
+{
+ struct ov2722_device *dev = to_ov2722_sensor(sd);
+
+ mutex_lock(&dev->input_lock);
+ *frames = dev->res->skip_frames;
+ mutex_unlock(&dev->input_lock);
+
+ return 0;
+}
+
+static const struct v4l2_subdev_sensor_ops ov2722_sensor_ops = {
+ .g_skip_frames = ov2722_g_skip_frames,
+};
+
+static const struct v4l2_subdev_video_ops ov2722_video_ops = {
+ .s_stream = ov2722_s_stream,
+};
+
+static const struct v4l2_subdev_core_ops ov2722_core_ops = {
+ .s_power = ov2722_s_power,
+ .ioctl = ov2722_ioctl,
+};
+
+static const struct v4l2_subdev_pad_ops ov2722_pad_ops = {
+ .enum_mbus_code = ov2722_enum_mbus_code,
+ .enum_frame_size = ov2722_enum_frame_size,
+ .get_fmt = ov2722_get_fmt,
+ .set_fmt = ov2722_set_fmt,
+ .get_frame_interval = ov2722_get_frame_interval,
+};
+
+static const struct v4l2_subdev_ops ov2722_ops = {
+ .core = &ov2722_core_ops,
+ .video = &ov2722_video_ops,
+ .pad = &ov2722_pad_ops,
+ .sensor = &ov2722_sensor_ops,
+};
+
+static void ov2722_remove(struct i2c_client *client)
+{
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct ov2722_device *dev = to_ov2722_sensor(sd);
+
+ dev->platform_data->csi_cfg(sd, 0);
+ v4l2_ctrl_handler_free(&dev->ctrl_handler);
+ v4l2_device_unregister_subdev(sd);
+
+ atomisp_gmin_remove_subdev(sd);
+
+ media_entity_cleanup(&dev->sd.entity);
+ kfree(dev);
+}
+
+static int __ov2722_init_ctrl_handler(struct ov2722_device *dev)
+{
+ struct v4l2_ctrl_handler *hdl;
+ unsigned int i;
+
+ hdl = &dev->ctrl_handler;
+ v4l2_ctrl_handler_init(&dev->ctrl_handler, ARRAY_SIZE(ov2722_controls));
+ for (i = 0; i < ARRAY_SIZE(ov2722_controls); i++)
+ v4l2_ctrl_new_custom(&dev->ctrl_handler, &ov2722_controls[i],
+ NULL);
+
+ dev->link_freq = v4l2_ctrl_find(&dev->ctrl_handler, V4L2_CID_LINK_FREQ);
+
+ if (dev->ctrl_handler.error || !dev->link_freq)
+ return dev->ctrl_handler.error;
+
+ dev->sd.ctrl_handler = hdl;
+
+ return 0;
+}
+
+static int ov2722_probe(struct i2c_client *client)
+{
+ struct ov2722_device *dev;
+ void *ovpdev;
+ int ret;
+
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (!dev)
+ return -ENOMEM;
+
+ mutex_init(&dev->input_lock);
+
+ dev->res = &ov2722_res_preview[0];
+ v4l2_i2c_subdev_init(&dev->sd, client, &ov2722_ops);
+
+ ovpdev = gmin_camera_platform_data(&dev->sd,
+ ATOMISP_INPUT_FORMAT_RAW_10,
+ atomisp_bayer_order_grbg);
+
+ ret = ov2722_s_config(&dev->sd, client->irq, ovpdev);
+ if (ret)
+ goto out_free;
+
+ ret = __ov2722_init_ctrl_handler(dev);
+ if (ret)
+ goto out_ctrl_handler_free;
+
+ dev->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ dev->pad.flags = MEDIA_PAD_FL_SOURCE;
+ dev->format.code = MEDIA_BUS_FMT_SBGGR10_1X10;
+ dev->sd.entity.function = MEDIA_ENT_F_CAM_SENSOR;
+
+ ret = media_entity_pads_init(&dev->sd.entity, 1, &dev->pad);
+ if (ret)
+ ov2722_remove(client);
+
+ return atomisp_register_i2c_module(&dev->sd, ovpdev);
+
+out_ctrl_handler_free:
+ v4l2_ctrl_handler_free(&dev->ctrl_handler);
+
+out_free:
+ atomisp_gmin_remove_subdev(&dev->sd);
+ v4l2_device_unregister_subdev(&dev->sd);
+ kfree(dev);
+ return ret;
+}
+
+static const struct acpi_device_id ov2722_acpi_match[] = {
+ { "INT33FB" },
+ {},
+};
+MODULE_DEVICE_TABLE(acpi, ov2722_acpi_match);
+
+static struct i2c_driver ov2722_driver = {
+ .driver = {
+ .name = "ov2722",
+ .acpi_match_table = ov2722_acpi_match,
+ },
+ .probe = ov2722_probe,
+ .remove = ov2722_remove,
+};
+module_i2c_driver(ov2722_driver);
+
+MODULE_AUTHOR("Wei Liu <wei.liu@intel.com>");
+MODULE_DESCRIPTION("A low-level driver for OmniVision 2722 sensors");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/media/atomisp/i2c/gc2235.h b/drivers/staging/media/atomisp/i2c/gc2235.h
new file mode 100644
index 000000000000..7dd9a676fb98
--- /dev/null
+++ b/drivers/staging/media/atomisp/i2c/gc2235.h
@@ -0,0 +1,633 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for GalaxyCore GC2235 2M camera sensor.
+ *
+ * Copyright (c) 2014 Intel Corporation. All Rights Reserved.
+ */
+
+#ifndef __GC2235_H__
+#define __GC2235_H__
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/i2c.h>
+#include <linux/delay.h>
+#include <linux/videodev2.h>
+#include <linux/spinlock.h>
+#include <media/v4l2-subdev.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-ctrls.h>
+#include <linux/v4l2-mediabus.h>
+#include <media/media-entity.h>
+
+#include "../include/linux/atomisp_platform.h"
+
+/*
+ * FIXME: non-preview resolutions are currently broken
+ */
+#define ENABLE_NON_PREVIEW 0
+
+/* Defines for register writes and register array processing */
+#define I2C_MSG_LENGTH 0x2
+#define I2C_RETRY_COUNT 5
+
+#define GC2235_FOCAL_LENGTH_NUM 278 /*2.78mm*/
+
+#define MAX_FMTS 1
+
+/*
+ * focal length bits definition:
+ * bits 31-16: numerator, bits 15-0: denominator
+ */
+#define GC2235_FOCAL_LENGTH_DEFAULT 0x1160064
+
+/*
+ * current f-number bits definition:
+ * bits 31-16: numerator, bits 15-0: denominator
+ */
+#define GC2235_F_NUMBER_DEFAULT 0x1a000a
+
+/*
+ * f-number range bits definition:
+ * bits 31-24: max f-number numerator
+ * bits 23-16: max f-number denominator
+ * bits 15-8: min f-number numerator
+ * bits 7-0: min f-number denominator
+ */
+#define GC2235_F_NUMBER_RANGE 0x1a0a1a0a
+#define GC2235_ID 0x2235
+
+#define GC2235_FINE_INTG_TIME_MIN 0
+#define GC2235_FINE_INTG_TIME_MAX_MARGIN 0
+#define GC2235_COARSE_INTG_TIME_MIN 1
+#define GC2235_COARSE_INTG_TIME_MAX_MARGIN 6
+
+/*
+ * GC2235 System control registers
+ */
+#define GC2235_SENSOR_ID_H 0xF0
+#define GC2235_SENSOR_ID_L 0xF1
+#define GC2235_RESET_RELATED 0xFE
+#define GC2235_SW_RESET 0x8
+#define GC2235_MIPI_RESET 0x3
+#define GC2235_RESET_BIT 0x4
+#define GC2235_REGISTER_PAGE_0 0x0
+#define GC2235_REGISTER_PAGE_3 0x3
+
+#define GC2235_V_CROP_START_H 0x91
+#define GC2235_V_CROP_START_L 0x92
+#define GC2235_H_CROP_START_H 0x93
+#define GC2235_H_CROP_START_L 0x94
+#define GC2235_V_OUTSIZE_H 0x95
+#define GC2235_V_OUTSIZE_L 0x96
+#define GC2235_H_OUTSIZE_H 0x97
+#define GC2235_H_OUTSIZE_L 0x98
+
+#define GC2235_HB_H 0x5
+#define GC2235_HB_L 0x6
+#define GC2235_VB_H 0x7
+#define GC2235_VB_L 0x8
+#define GC2235_SH_DELAY_H 0x11
+#define GC2235_SH_DELAY_L 0x12
+
+#define GC2235_CSI2_MODE 0x10
+
+#define GC2235_EXPOSURE_H 0x3
+#define GC2235_EXPOSURE_L 0x4
+#define GC2235_GLOBAL_GAIN 0xB0
+#define GC2235_PRE_GAIN 0xB1
+#define GC2235_AWB_R_GAIN 0xB3
+#define GC2235_AWB_G_GAIN 0xB4
+#define GC2235_AWB_B_GAIN 0xB5
+
+#define GC2235_START_STREAMING 0x91
+#define GC2235_STOP_STREAMING 0x0
+
+struct regval_list {
+ u16 reg_num;
+ u8 value;
+};
+
+struct gc2235_resolution {
+ u8 *desc;
+ const struct gc2235_reg *regs;
+ int res;
+ int width;
+ int height;
+ int fps;
+ int pix_clk_freq;
+ u32 skip_frames;
+ u16 pixels_per_line;
+ u16 lines_per_frame;
+ bool used;
+};
+
+struct gc2235_format {
+ u8 *desc;
+ u32 pixelformat;
+ struct gc2235_reg *regs;
+};
+
+/*
+ * gc2235 device structure.
+ */
+struct gc2235_device {
+ struct v4l2_subdev sd;
+ struct media_pad pad;
+ struct v4l2_mbus_framefmt format;
+ struct mutex input_lock;
+ struct v4l2_ctrl_handler ctrl_handler;
+ struct gc2235_resolution *res;
+
+ struct camera_sensor_platform_data *platform_data;
+ u8 type;
+};
+
+enum gc2235_tok_type {
+ GC2235_8BIT = 0x0001,
+ GC2235_16BIT = 0x0002,
+ GC2235_32BIT = 0x0004,
+ GC2235_TOK_TERM = 0xf000, /* terminating token for reg list */
+ GC2235_TOK_DELAY = 0xfe00, /* delay token for reg list */
+ GC2235_TOK_MASK = 0xfff0
+};
+
+/*
+ * struct gc2235_reg - MI sensor register format
+ * @type: type of the register
+ * @reg: 8-bit offset to register
+ * @val: 8/16/32-bit register value
+ *
+ * Define a structure for sensor register initialization values
+ */
+struct gc2235_reg {
+ enum gc2235_tok_type type;
+ u8 reg;
+ u32 val; /* @set value for read/mod/write, @mask */
+};
+
+#define to_gc2235_sensor(x) container_of(x, struct gc2235_device, sd)
+
+#define GC2235_MAX_WRITE_BUF_SIZE 30
+
+struct gc2235_write_buffer {
+ u8 addr;
+ u8 data[GC2235_MAX_WRITE_BUF_SIZE];
+};
+
+struct gc2235_write_ctrl {
+ int index;
+ struct gc2235_write_buffer buffer;
+};
+
+static const struct gc2235_reg gc2235_stream_on[] = {
+ { GC2235_8BIT, 0xfe, 0x03}, /* switch to P3 */
+ { GC2235_8BIT, 0x10, 0x91}, /* start mipi */
+ { GC2235_8BIT, 0xfe, 0x00}, /* switch to P0 */
+ { GC2235_TOK_TERM, 0, 0 }
+};
+
+static const struct gc2235_reg gc2235_stream_off[] = {
+ { GC2235_8BIT, 0xfe, 0x03}, /* switch to P3 */
+ { GC2235_8BIT, 0x10, 0x01}, /* stop mipi */
+ { GC2235_8BIT, 0xfe, 0x00}, /* switch to P0 */
+ { GC2235_TOK_TERM, 0, 0 }
+};
+
+static const struct gc2235_reg gc2235_init_settings[] = {
+ /* System */
+ { GC2235_8BIT, 0xfe, 0x80 },
+ { GC2235_8BIT, 0xfe, 0x80 },
+ { GC2235_8BIT, 0xfe, 0x80 },
+ { GC2235_8BIT, 0xf2, 0x00 },
+ { GC2235_8BIT, 0xf6, 0x00 },
+ { GC2235_8BIT, 0xfc, 0x06 },
+ { GC2235_8BIT, 0xf7, 0x15 },
+ { GC2235_8BIT, 0xf8, 0x84 },
+ { GC2235_8BIT, 0xf9, 0xfe },
+ { GC2235_8BIT, 0xfa, 0x00 },
+ { GC2235_8BIT, 0xfe, 0x00 },
+ /* Analog & cisctl */
+ { GC2235_8BIT, 0x03, 0x04 },
+ { GC2235_8BIT, 0x04, 0x9E },
+ { GC2235_8BIT, 0x05, 0x00 },
+ { GC2235_8BIT, 0x06, 0xfd },
+ { GC2235_8BIT, 0x07, 0x00 },
+ { GC2235_8BIT, 0x08, 0x14 },
+ { GC2235_8BIT, 0x0a, 0x02 }, /* row start */
+ { GC2235_8BIT, 0x0c, 0x00 }, /* col start */
+ { GC2235_8BIT, 0x0d, 0x04 }, /* win height 1232 */
+ { GC2235_8BIT, 0x0e, 0xd0 },
+ { GC2235_8BIT, 0x0f, 0x06 }, /* win width: 1616 */
+ { GC2235_8BIT, 0x10, 0x60 },
+ { GC2235_8BIT, 0x17, 0x15 }, /* mirror flip */
+ { GC2235_8BIT, 0x18, 0x1a },
+ { GC2235_8BIT, 0x19, 0x06 },
+ { GC2235_8BIT, 0x1a, 0x01 },
+ { GC2235_8BIT, 0x1b, 0x4d },
+ { GC2235_8BIT, 0x1e, 0x88 },
+ { GC2235_8BIT, 0x1f, 0x48 },
+ { GC2235_8BIT, 0x20, 0x03 },
+ { GC2235_8BIT, 0x21, 0x7f },
+ { GC2235_8BIT, 0x22, 0x83 },
+ { GC2235_8BIT, 0x23, 0x42 },
+ { GC2235_8BIT, 0x24, 0x16 },
+ { GC2235_8BIT, 0x26, 0x01 }, /*analog gain*/
+ { GC2235_8BIT, 0x27, 0x30 },
+ { GC2235_8BIT, 0x3f, 0x00 }, /* PRC */
+ /* blk */
+ { GC2235_8BIT, 0x40, 0xa3 },
+ { GC2235_8BIT, 0x41, 0x82 },
+ { GC2235_8BIT, 0x43, 0x20 },
+ { GC2235_8BIT, 0x5e, 0x18 },
+ { GC2235_8BIT, 0x5f, 0x18 },
+ { GC2235_8BIT, 0x60, 0x18 },
+ { GC2235_8BIT, 0x61, 0x18 },
+ { GC2235_8BIT, 0x62, 0x18 },
+ { GC2235_8BIT, 0x63, 0x18 },
+ { GC2235_8BIT, 0x64, 0x18 },
+ { GC2235_8BIT, 0x65, 0x18 },
+ { GC2235_8BIT, 0x66, 0x20 },
+ { GC2235_8BIT, 0x67, 0x20 },
+ { GC2235_8BIT, 0x68, 0x20 },
+ { GC2235_8BIT, 0x69, 0x20 },
+ /* Gain */
+ { GC2235_8BIT, 0xb2, 0x00 },
+ { GC2235_8BIT, 0xb3, 0x40 },
+ { GC2235_8BIT, 0xb4, 0x40 },
+ { GC2235_8BIT, 0xb5, 0x40 },
+ /* Dark sun */
+ { GC2235_8BIT, 0xbc, 0x00 },
+
+ { GC2235_8BIT, 0xfe, 0x03 },
+ { GC2235_8BIT, 0x10, 0x01 }, /* disable mipi */
+ { GC2235_8BIT, 0xfe, 0x00 }, /* switch to P0 */
+ { GC2235_TOK_TERM, 0, 0 }
+};
+
+/*
+ * Register settings for various resolution
+ */
+#if ENABLE_NON_PREVIEW
+static const struct gc2235_reg gc2235_1296_736_30fps[] = {
+ { GC2235_8BIT, 0x8b, 0xa0 },
+ { GC2235_8BIT, 0x8c, 0x02 },
+
+ { GC2235_8BIT, 0x07, 0x01 }, /* VBI */
+ { GC2235_8BIT, 0x08, 0x44 },
+ { GC2235_8BIT, 0x09, 0x00 }, /* row start */
+ { GC2235_8BIT, 0x0a, 0xf0 },
+ { GC2235_8BIT, 0x0b, 0x00 }, /* col start */
+ { GC2235_8BIT, 0x0c, 0xa0 },
+ { GC2235_8BIT, 0x0d, 0x02 }, /* win height 736 */
+ { GC2235_8BIT, 0x0e, 0xf0 },
+ { GC2235_8BIT, 0x0f, 0x05 }, /* win width: 1296 */
+ { GC2235_8BIT, 0x10, 0x20 },
+
+ { GC2235_8BIT, 0x90, 0x01 },
+ { GC2235_8BIT, 0x92, 0x08 },
+ { GC2235_8BIT, 0x94, 0x08 },
+ { GC2235_8BIT, 0x95, 0x02 }, /* crop win height 736 */
+ { GC2235_8BIT, 0x96, 0xe0 },
+ { GC2235_8BIT, 0x97, 0x05 }, /* crop win width 1296 */
+ { GC2235_8BIT, 0x98, 0x10 },
+ /* mimi init */
+ { GC2235_8BIT, 0xfe, 0x03 }, /* switch to P3 */
+ { GC2235_8BIT, 0x01, 0x07 },
+ { GC2235_8BIT, 0x02, 0x11 },
+ { GC2235_8BIT, 0x03, 0x11 },
+ { GC2235_8BIT, 0x06, 0x80 },
+ { GC2235_8BIT, 0x11, 0x2b },
+ /* set mipi buffer */
+ { GC2235_8BIT, 0x12, 0x54 }, /* val_low = (width * 10 / 8) & 0xFF */
+ { GC2235_8BIT, 0x13, 0x06 }, /* val_high = (width * 10 / 8) >> 8 */
+
+ { GC2235_8BIT, 0x15, 0x12 }, /* DPHY mode*/
+ { GC2235_8BIT, 0x04, 0x10 },
+ { GC2235_8BIT, 0x05, 0x00 },
+ { GC2235_8BIT, 0x17, 0x01 },
+
+ { GC2235_8BIT, 0x22, 0x01 },
+ { GC2235_8BIT, 0x23, 0x05 },
+ { GC2235_8BIT, 0x24, 0x10 },
+ { GC2235_8BIT, 0x25, 0x10 },
+ { GC2235_8BIT, 0x26, 0x02 },
+ { GC2235_8BIT, 0x21, 0x10 },
+ { GC2235_8BIT, 0x29, 0x01 },
+ { GC2235_8BIT, 0x2a, 0x02 },
+ { GC2235_8BIT, 0x2b, 0x02 },
+
+ { GC2235_8BIT, 0x10, 0x01 }, /* disable mipi */
+ { GC2235_8BIT, 0xfe, 0x00 }, /* switch to P0 */
+ { GC2235_TOK_TERM, 0, 0 }
+};
+
+static const struct gc2235_reg gc2235_960_640_30fps[] = {
+ { GC2235_8BIT, 0x8b, 0xa0 },
+ { GC2235_8BIT, 0x8c, 0x02 },
+
+ { GC2235_8BIT, 0x07, 0x02 }, /* VBI */
+ { GC2235_8BIT, 0x08, 0xA4 },
+ { GC2235_8BIT, 0x09, 0x01 }, /* row start */
+ { GC2235_8BIT, 0x0a, 0x18 },
+ { GC2235_8BIT, 0x0b, 0x01 }, /* col start */
+ { GC2235_8BIT, 0x0c, 0x40 },
+ { GC2235_8BIT, 0x0d, 0x02 }, /* win height 656 */
+ { GC2235_8BIT, 0x0e, 0x90 },
+ { GC2235_8BIT, 0x0f, 0x03 }, /* win width: 976 */
+ { GC2235_8BIT, 0x10, 0xd0 },
+
+ { GC2235_8BIT, 0x90, 0x01 },
+ { GC2235_8BIT, 0x92, 0x02 },
+ { GC2235_8BIT, 0x94, 0x06 },
+ { GC2235_8BIT, 0x95, 0x02 }, /* crop win height 640 */
+ { GC2235_8BIT, 0x96, 0x80 },
+ { GC2235_8BIT, 0x97, 0x03 }, /* crop win width 960 */
+ { GC2235_8BIT, 0x98, 0xc0 },
+ /* mimp init */
+ { GC2235_8BIT, 0xfe, 0x03 }, /* switch to P3 */
+ { GC2235_8BIT, 0x01, 0x07 },
+ { GC2235_8BIT, 0x02, 0x11 },
+ { GC2235_8BIT, 0x03, 0x11 },
+ { GC2235_8BIT, 0x06, 0x80 },
+ { GC2235_8BIT, 0x11, 0x2b },
+ /* set mipi buffer */
+ { GC2235_8BIT, 0x12, 0xb0 }, /* val_low = (width * 10 / 8) & 0xFF */
+ { GC2235_8BIT, 0x13, 0x04 }, /* val_high = (width * 10 / 8) >> 8 */
+
+ { GC2235_8BIT, 0x15, 0x12 }, /* DPHY mode*/
+ { GC2235_8BIT, 0x04, 0x10 },
+ { GC2235_8BIT, 0x05, 0x00 },
+ { GC2235_8BIT, 0x17, 0x01 },
+ { GC2235_8BIT, 0x22, 0x01 },
+ { GC2235_8BIT, 0x23, 0x05 },
+ { GC2235_8BIT, 0x24, 0x10 },
+ { GC2235_8BIT, 0x25, 0x10 },
+ { GC2235_8BIT, 0x26, 0x02 },
+ { GC2235_8BIT, 0x21, 0x10 },
+ { GC2235_8BIT, 0x29, 0x01 },
+ { GC2235_8BIT, 0x2a, 0x02 },
+ { GC2235_8BIT, 0x2b, 0x02 },
+ { GC2235_8BIT, 0x10, 0x01 }, /* disable mipi */
+ { GC2235_8BIT, 0xfe, 0x00 }, /* switch to P0 */
+ { GC2235_TOK_TERM, 0, 0 }
+};
+#endif
+
+static const struct gc2235_reg gc2235_1600_900_30fps[] = {
+ { GC2235_8BIT, 0x8b, 0xa0 },
+ { GC2235_8BIT, 0x8c, 0x02 },
+
+ { GC2235_8BIT, 0x0d, 0x03 }, /* win height 932 */
+ { GC2235_8BIT, 0x0e, 0xa4 },
+ { GC2235_8BIT, 0x0f, 0x06 }, /* win width: 1632 */
+ { GC2235_8BIT, 0x10, 0x50 },
+
+ { GC2235_8BIT, 0x90, 0x01 },
+ { GC2235_8BIT, 0x92, 0x02 },
+ { GC2235_8BIT, 0x94, 0x06 },
+ { GC2235_8BIT, 0x95, 0x03 }, /* crop win height 900 */
+ { GC2235_8BIT, 0x96, 0x84 },
+ { GC2235_8BIT, 0x97, 0x06 }, /* crop win width 1600 */
+ { GC2235_8BIT, 0x98, 0x40 },
+ /* mimi init */
+ { GC2235_8BIT, 0xfe, 0x03 }, /* switch to P3 */
+ { GC2235_8BIT, 0x01, 0x07 },
+ { GC2235_8BIT, 0x02, 0x11 },
+ { GC2235_8BIT, 0x03, 0x11 },
+ { GC2235_8BIT, 0x06, 0x80 },
+ { GC2235_8BIT, 0x11, 0x2b },
+ /* set mipi buffer */
+ { GC2235_8BIT, 0x12, 0xd0 }, /* val_low = (width * 10 / 8) & 0xFF */
+ { GC2235_8BIT, 0x13, 0x07 }, /* val_high = (width * 10 / 8) >> 8 */
+
+ { GC2235_8BIT, 0x15, 0x12 }, /* DPHY mode*/
+ { GC2235_8BIT, 0x04, 0x10 },
+ { GC2235_8BIT, 0x05, 0x00 },
+ { GC2235_8BIT, 0x17, 0x01 },
+ { GC2235_8BIT, 0x22, 0x01 },
+ { GC2235_8BIT, 0x23, 0x05 },
+ { GC2235_8BIT, 0x24, 0x10 },
+ { GC2235_8BIT, 0x25, 0x10 },
+ { GC2235_8BIT, 0x26, 0x02 },
+ { GC2235_8BIT, 0x21, 0x10 },
+ { GC2235_8BIT, 0x29, 0x01 },
+ { GC2235_8BIT, 0x2a, 0x02 },
+ { GC2235_8BIT, 0x2b, 0x02 },
+ { GC2235_8BIT, 0x10, 0x01 }, /* disable mipi */
+ { GC2235_8BIT, 0xfe, 0x00 }, /* switch to P0 */
+ { GC2235_TOK_TERM, 0, 0 }
+};
+
+static const struct gc2235_reg gc2235_1616_1082_30fps[] = {
+ { GC2235_8BIT, 0x8b, 0xa0 },
+ { GC2235_8BIT, 0x8c, 0x02 },
+
+ { GC2235_8BIT, 0x0d, 0x04 }, /* win height 1232 */
+ { GC2235_8BIT, 0x0e, 0xd0 },
+ { GC2235_8BIT, 0x0f, 0x06 }, /* win width: 1616 */
+ { GC2235_8BIT, 0x10, 0x50 },
+
+ { GC2235_8BIT, 0x90, 0x01 },
+ { GC2235_8BIT, 0x92, 0x4a },
+ { GC2235_8BIT, 0x94, 0x00 },
+ { GC2235_8BIT, 0x95, 0x04 }, /* crop win height 1082 */
+ { GC2235_8BIT, 0x96, 0x3a },
+ { GC2235_8BIT, 0x97, 0x06 }, /* crop win width 1616 */
+ { GC2235_8BIT, 0x98, 0x50 },
+ /* mimp init */
+ { GC2235_8BIT, 0xfe, 0x03 }, /* switch to P3 */
+ { GC2235_8BIT, 0x01, 0x07 },
+ { GC2235_8BIT, 0x02, 0x11 },
+ { GC2235_8BIT, 0x03, 0x11 },
+ { GC2235_8BIT, 0x06, 0x80 },
+ { GC2235_8BIT, 0x11, 0x2b },
+ /* set mipi buffer */
+ { GC2235_8BIT, 0x12, 0xe4 }, /* val_low = (width * 10 / 8) & 0xFF */
+ { GC2235_8BIT, 0x13, 0x07 }, /* val_high = (width * 10 / 8) >> 8 */
+
+ { GC2235_8BIT, 0x15, 0x12 }, /* DPHY mode*/
+ { GC2235_8BIT, 0x04, 0x10 },
+ { GC2235_8BIT, 0x05, 0x00 },
+ { GC2235_8BIT, 0x17, 0x01 },
+ { GC2235_8BIT, 0x22, 0x01 },
+ { GC2235_8BIT, 0x23, 0x05 },
+ { GC2235_8BIT, 0x24, 0x10 },
+ { GC2235_8BIT, 0x25, 0x10 },
+ { GC2235_8BIT, 0x26, 0x02 },
+ { GC2235_8BIT, 0x21, 0x10 },
+ { GC2235_8BIT, 0x29, 0x01 },
+ { GC2235_8BIT, 0x2a, 0x02 },
+ { GC2235_8BIT, 0x2b, 0x02 },
+ { GC2235_8BIT, 0x10, 0x01 }, /* disable mipi */
+ { GC2235_8BIT, 0xfe, 0x00 }, /* switch to P0 */
+ { GC2235_TOK_TERM, 0, 0 }
+};
+
+static const struct gc2235_reg gc2235_1616_1216_30fps[] = {
+ { GC2235_8BIT, 0x8b, 0xa0 },
+ { GC2235_8BIT, 0x8c, 0x02 },
+
+ { GC2235_8BIT, 0x0d, 0x04 }, /* win height 1232 */
+ { GC2235_8BIT, 0x0e, 0xd0 },
+ { GC2235_8BIT, 0x0f, 0x06 }, /* win width: 1616 */
+ { GC2235_8BIT, 0x10, 0x50 },
+
+ { GC2235_8BIT, 0x90, 0x01 },
+ { GC2235_8BIT, 0x92, 0x02 },
+ { GC2235_8BIT, 0x94, 0x00 },
+ { GC2235_8BIT, 0x95, 0x04 }, /* crop win height 1216 */
+ { GC2235_8BIT, 0x96, 0xc0 },
+ { GC2235_8BIT, 0x97, 0x06 }, /* crop win width 1616 */
+ { GC2235_8BIT, 0x98, 0x50 },
+ /* mimi init */
+ { GC2235_8BIT, 0xfe, 0x03 }, /* switch to P3 */
+ { GC2235_8BIT, 0x01, 0x07 },
+ { GC2235_8BIT, 0x02, 0x11 },
+ { GC2235_8BIT, 0x03, 0x11 },
+ { GC2235_8BIT, 0x06, 0x80 },
+ { GC2235_8BIT, 0x11, 0x2b },
+ /* set mipi buffer */
+ { GC2235_8BIT, 0x12, 0xe4 }, /* val_low = (width * 10 / 8) & 0xFF */
+ { GC2235_8BIT, 0x13, 0x07 }, /* val_high = (width * 10 / 8) >> 8 */
+ { GC2235_8BIT, 0x15, 0x12 }, /* DPHY mode*/
+ { GC2235_8BIT, 0x04, 0x10 },
+ { GC2235_8BIT, 0x05, 0x00 },
+ { GC2235_8BIT, 0x17, 0x01 },
+ { GC2235_8BIT, 0x22, 0x01 },
+ { GC2235_8BIT, 0x23, 0x05 },
+ { GC2235_8BIT, 0x24, 0x10 },
+ { GC2235_8BIT, 0x25, 0x10 },
+ { GC2235_8BIT, 0x26, 0x02 },
+ { GC2235_8BIT, 0x21, 0x10 },
+ { GC2235_8BIT, 0x29, 0x01 },
+ { GC2235_8BIT, 0x2a, 0x02 },
+ { GC2235_8BIT, 0x2b, 0x02 },
+ { GC2235_8BIT, 0x10, 0x01 }, /* disable mipi */
+ { GC2235_8BIT, 0xfe, 0x00 }, /* switch to P0 */
+ { GC2235_TOK_TERM, 0, 0 }
+};
+
+static struct gc2235_resolution gc2235_res_preview[] = {
+ {
+ .desc = "gc2235_1600_900_30fps",
+ .width = 1600,
+ .height = 900,
+ .pix_clk_freq = 30,
+ .fps = 30,
+ .used = 0,
+ .pixels_per_line = 2132,
+ .lines_per_frame = 1068,
+ .skip_frames = 3,
+ .regs = gc2235_1600_900_30fps,
+ },
+
+ {
+ .desc = "gc2235_1600_1066_30fps",
+ .width = 1616,
+ .height = 1082,
+ .pix_clk_freq = 30,
+ .fps = 30,
+ .used = 0,
+ .pixels_per_line = 2132,
+ .lines_per_frame = 1368,
+ .skip_frames = 3,
+ .regs = gc2235_1616_1082_30fps,
+ },
+ {
+ .desc = "gc2235_1600_1200_30fps",
+ .width = 1616,
+ .height = 1216,
+ .pix_clk_freq = 30,
+ .fps = 30,
+ .used = 0,
+ .pixels_per_line = 2132,
+ .lines_per_frame = 1368,
+ .skip_frames = 3,
+ .regs = gc2235_1616_1216_30fps,
+ },
+
+};
+
+#define N_RES_PREVIEW (ARRAY_SIZE(gc2235_res_preview))
+
+/*
+ * Disable non-preview configurations until the configuration selection is
+ * improved.
+ */
+#if ENABLE_NON_PREVIEW
+static struct gc2235_resolution gc2235_res_still[] = {
+ {
+ .desc = "gc2235_1600_900_30fps",
+ .width = 1600,
+ .height = 900,
+ .pix_clk_freq = 30,
+ .fps = 30,
+ .used = 0,
+ .pixels_per_line = 2132,
+ .lines_per_frame = 1068,
+ .skip_frames = 3,
+ .regs = gc2235_1600_900_30fps,
+ },
+ {
+ .desc = "gc2235_1600_1066_30fps",
+ .width = 1616,
+ .height = 1082,
+ .pix_clk_freq = 30,
+ .fps = 30,
+ .used = 0,
+ .pixels_per_line = 2132,
+ .lines_per_frame = 1368,
+ .skip_frames = 3,
+ .regs = gc2235_1616_1082_30fps,
+ },
+ {
+ .desc = "gc2235_1600_1200_30fps",
+ .width = 1616,
+ .height = 1216,
+ .pix_clk_freq = 30,
+ .fps = 30,
+ .used = 0,
+ .pixels_per_line = 2132,
+ .lines_per_frame = 1368,
+ .skip_frames = 3,
+ .regs = gc2235_1616_1216_30fps,
+ },
+
+};
+
+#define N_RES_STILL (ARRAY_SIZE(gc2235_res_still))
+
+static struct gc2235_resolution gc2235_res_video[] = {
+ {
+ .desc = "gc2235_1296_736_30fps",
+ .width = 1296,
+ .height = 736,
+ .pix_clk_freq = 30,
+ .fps = 30,
+ .used = 0,
+ .pixels_per_line = 1828,
+ .lines_per_frame = 888,
+ .skip_frames = 3,
+ .regs = gc2235_1296_736_30fps,
+ },
+ {
+ .desc = "gc2235_960_640_30fps",
+ .width = 960,
+ .height = 640,
+ .pix_clk_freq = 30,
+ .fps = 30,
+ .used = 0,
+ .pixels_per_line = 1492,
+ .lines_per_frame = 792,
+ .skip_frames = 3,
+ .regs = gc2235_960_640_30fps,
+ },
+
+};
+
+#define N_RES_VIDEO (ARRAY_SIZE(gc2235_res_video))
+#endif
+
+static struct gc2235_resolution *gc2235_res = gc2235_res_preview;
+static unsigned long N_RES = N_RES_PREVIEW;
+#endif
diff --git a/drivers/staging/media/atomisp/i2c/ov2722.h b/drivers/staging/media/atomisp/i2c/ov2722.h
new file mode 100644
index 000000000000..00317d105305
--- /dev/null
+++ b/drivers/staging/media/atomisp/i2c/ov2722.h
@@ -0,0 +1,1226 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for OmniVision OV2722 1080p HD camera sensor.
+ *
+ * Copyright (c) 2013 Intel Corporation. All Rights Reserved.
+ */
+
+#ifndef __OV2722_H__
+#define __OV2722_H__
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/i2c.h>
+#include <linux/delay.h>
+#include <linux/videodev2.h>
+#include <linux/spinlock.h>
+#include <media/v4l2-subdev.h>
+#include <media/v4l2-device.h>
+#include <linux/v4l2-mediabus.h>
+#include <media/media-entity.h>
+#include <media/v4l2-ctrls.h>
+
+#include "../include/linux/atomisp_platform.h"
+
+#define OV2722_POWER_UP_RETRY_NUM 5
+
+/* Defines for register writes and register array processing */
+#define I2C_MSG_LENGTH 0x2
+#define I2C_RETRY_COUNT 5
+
+#define OV2722_FOCAL_LENGTH_NUM 278 /*2.78mm*/
+
+#define MAX_FMTS 1
+
+/*
+ * focal length bits definition:
+ * bits 31-16: numerator, bits 15-0: denominator
+ */
+#define OV2722_FOCAL_LENGTH_DEFAULT 0x1160064
+
+/*
+ * current f-number bits definition:
+ * bits 31-16: numerator, bits 15-0: denominator
+ */
+#define OV2722_F_NUMBER_DEFAULT 0x1a000a
+
+/*
+ * f-number range bits definition:
+ * bits 31-24: max f-number numerator
+ * bits 23-16: max f-number denominator
+ * bits 15-8: min f-number numerator
+ * bits 7-0: min f-number denominator
+ */
+#define OV2722_F_NUMBER_RANGE 0x1a0a1a0a
+#define OV2720_ID 0x2720
+#define OV2722_ID 0x2722
+
+#define OV2722_FINE_INTG_TIME_MIN 0
+#define OV2722_FINE_INTG_TIME_MAX_MARGIN 0
+#define OV2722_COARSE_INTG_TIME_MIN 1
+#define OV2722_COARSE_INTG_TIME_MAX_MARGIN 4
+
+/*
+ * OV2722 System control registers
+ */
+#define OV2722_SW_SLEEP 0x0100
+#define OV2722_SW_RESET 0x0103
+#define OV2722_SW_STREAM 0x0100
+
+#define OV2722_SC_CMMN_CHIP_ID_H 0x300A
+#define OV2722_SC_CMMN_CHIP_ID_L 0x300B
+#define OV2722_SC_CMMN_SCCB_ID 0x300C
+#define OV2722_SC_CMMN_SUB_ID 0x302A /* process, version*/
+
+#define OV2722_SC_CMMN_PAD_OEN0 0x3000
+#define OV2722_SC_CMMN_PAD_OEN1 0x3001
+#define OV2722_SC_CMMN_PAD_OEN2 0x3002
+#define OV2722_SC_CMMN_PAD_OUT0 0x3008
+#define OV2722_SC_CMMN_PAD_OUT1 0x3009
+#define OV2722_SC_CMMN_PAD_OUT2 0x300D
+#define OV2722_SC_CMMN_PAD_SEL0 0x300E
+#define OV2722_SC_CMMN_PAD_SEL1 0x300F
+#define OV2722_SC_CMMN_PAD_SEL2 0x3010
+
+#define OV2722_SC_CMMN_PAD_PK 0x3011
+#define OV2722_SC_CMMN_A_PWC_PK_O_13 0x3013
+#define OV2722_SC_CMMN_A_PWC_PK_O_14 0x3014
+
+#define OV2722_SC_CMMN_CLKRST0 0x301A
+#define OV2722_SC_CMMN_CLKRST1 0x301B
+#define OV2722_SC_CMMN_CLKRST2 0x301C
+#define OV2722_SC_CMMN_CLKRST3 0x301D
+#define OV2722_SC_CMMN_CLKRST4 0x301E
+#define OV2722_SC_CMMN_CLKRST5 0x3005
+#define OV2722_SC_CMMN_PCLK_DIV_CTRL 0x3007
+#define OV2722_SC_CMMN_CLOCK_SEL 0x3020
+#define OV2722_SC_SOC_CLKRST5 0x3040
+
+#define OV2722_SC_CMMN_PLL_CTRL0 0x3034
+#define OV2722_SC_CMMN_PLL_CTRL1 0x3035
+#define OV2722_SC_CMMN_PLL_CTRL2 0x3039
+#define OV2722_SC_CMMN_PLL_CTRL3 0x3037
+#define OV2722_SC_CMMN_PLL_MULTIPLIER 0x3036
+#define OV2722_SC_CMMN_PLL_DEBUG_OPT 0x3038
+#define OV2722_SC_CMMN_PLLS_CTRL0 0x303A
+#define OV2722_SC_CMMN_PLLS_CTRL1 0x303B
+#define OV2722_SC_CMMN_PLLS_CTRL2 0x303C
+#define OV2722_SC_CMMN_PLLS_CTRL3 0x303D
+
+#define OV2722_SC_CMMN_MIPI_PHY_16 0x3016
+#define OV2722_SC_CMMN_MIPI_PHY_17 0x3017
+#define OV2722_SC_CMMN_MIPI_SC_CTRL_18 0x3018
+#define OV2722_SC_CMMN_MIPI_SC_CTRL_19 0x3019
+#define OV2722_SC_CMMN_MIPI_SC_CTRL_21 0x3021
+#define OV2722_SC_CMMN_MIPI_SC_CTRL_22 0x3022
+
+#define OV2722_AEC_PK_EXPO_H 0x3500
+#define OV2722_AEC_PK_EXPO_M 0x3501
+#define OV2722_AEC_PK_EXPO_L 0x3502
+#define OV2722_AEC_MANUAL_CTRL 0x3503
+#define OV2722_AGC_ADJ_H 0x3508
+#define OV2722_AGC_ADJ_L 0x3509
+#define OV2722_VTS_DIFF_H 0x350c
+#define OV2722_VTS_DIFF_L 0x350d
+#define OV2722_GROUP_ACCESS 0x3208
+#define OV2722_HTS_H 0x380c
+#define OV2722_HTS_L 0x380d
+#define OV2722_VTS_H 0x380e
+#define OV2722_VTS_L 0x380f
+
+#define OV2722_MWB_GAIN_R_H 0x5186
+#define OV2722_MWB_GAIN_R_L 0x5187
+#define OV2722_MWB_GAIN_G_H 0x5188
+#define OV2722_MWB_GAIN_G_L 0x5189
+#define OV2722_MWB_GAIN_B_H 0x518a
+#define OV2722_MWB_GAIN_B_L 0x518b
+
+#define OV2722_H_CROP_START_H 0x3800
+#define OV2722_H_CROP_START_L 0x3801
+#define OV2722_V_CROP_START_H 0x3802
+#define OV2722_V_CROP_START_L 0x3803
+#define OV2722_H_CROP_END_H 0x3804
+#define OV2722_H_CROP_END_L 0x3805
+#define OV2722_V_CROP_END_H 0x3806
+#define OV2722_V_CROP_END_L 0x3807
+#define OV2722_H_OUTSIZE_H 0x3808
+#define OV2722_H_OUTSIZE_L 0x3809
+#define OV2722_V_OUTSIZE_H 0x380a
+#define OV2722_V_OUTSIZE_L 0x380b
+
+#define OV2722_START_STREAMING 0x01
+#define OV2722_STOP_STREAMING 0x00
+
+struct regval_list {
+ u16 reg_num;
+ u8 value;
+};
+
+struct ov2722_resolution {
+ u8 *desc;
+ const struct ov2722_reg *regs;
+ int res;
+ int width;
+ int height;
+ int fps;
+ int pix_clk_freq;
+ u32 skip_frames;
+ u16 pixels_per_line;
+ u16 lines_per_frame;
+ bool used;
+ int mipi_freq;
+};
+
+struct ov2722_format {
+ u8 *desc;
+ u32 pixelformat;
+ struct ov2722_reg *regs;
+};
+
+/*
+ * ov2722 device structure.
+ */
+struct ov2722_device {
+ struct v4l2_subdev sd;
+ struct media_pad pad;
+ struct v4l2_mbus_framefmt format;
+ struct mutex input_lock;
+ struct ov2722_resolution *res;
+
+ struct camera_sensor_platform_data *platform_data;
+ u16 pixels_per_line;
+ u16 lines_per_frame;
+ u8 type;
+
+ struct v4l2_ctrl_handler ctrl_handler;
+ struct v4l2_ctrl *link_freq;
+};
+
+enum ov2722_tok_type {
+ OV2722_8BIT = 0x0001,
+ OV2722_16BIT = 0x0002,
+ OV2722_32BIT = 0x0004,
+ OV2722_TOK_TERM = 0xf000, /* terminating token for reg list */
+ OV2722_TOK_DELAY = 0xfe00, /* delay token for reg list */
+ OV2722_TOK_MASK = 0xfff0
+};
+
+/**
+ * struct ov2722_reg - MI sensor register format
+ * @type: type of the register
+ * @reg: 16-bit offset to register
+ * @val: 8/16/32-bit register value
+ *
+ * Define a structure for sensor register initialization values
+ */
+struct ov2722_reg {
+ enum ov2722_tok_type type;
+ u16 reg;
+ u32 val; /* @set value for read/mod/write, @mask */
+};
+
+#define to_ov2722_sensor(x) container_of(x, struct ov2722_device, sd)
+
+#define OV2722_MAX_WRITE_BUF_SIZE 30
+
+struct ov2722_write_buffer {
+ u16 addr;
+ u8 data[OV2722_MAX_WRITE_BUF_SIZE];
+};
+
+struct ov2722_write_ctrl {
+ int index;
+ struct ov2722_write_buffer buffer;
+};
+
+/*
+ * Register settings for various resolution
+ */
+#if 0
+static const struct ov2722_reg ov2722_QVGA_30fps[] = {
+ {OV2722_8BIT, 0x3718, 0x10},
+ {OV2722_8BIT, 0x3702, 0x0c},
+ {OV2722_8BIT, 0x373a, 0x1c},
+ {OV2722_8BIT, 0x3715, 0x01},
+ {OV2722_8BIT, 0x3703, 0x0c},
+ {OV2722_8BIT, 0x3705, 0x06},
+ {OV2722_8BIT, 0x3730, 0x0e},
+ {OV2722_8BIT, 0x3704, 0x1c},
+ {OV2722_8BIT, 0x3f06, 0x00},
+ {OV2722_8BIT, 0x371c, 0x00},
+ {OV2722_8BIT, 0x371d, 0x46},
+ {OV2722_8BIT, 0x371e, 0x00},
+ {OV2722_8BIT, 0x371f, 0x63},
+ {OV2722_8BIT, 0x3708, 0x61},
+ {OV2722_8BIT, 0x3709, 0x12},
+ {OV2722_8BIT, 0x3800, 0x01},
+ {OV2722_8BIT, 0x3801, 0x42}, /* H crop start: 322 */
+ {OV2722_8BIT, 0x3802, 0x00},
+ {OV2722_8BIT, 0x3803, 0x20}, /* V crop start: 32 */
+ {OV2722_8BIT, 0x3804, 0x06},
+ {OV2722_8BIT, 0x3805, 0x95}, /* H crop end: 1685 */
+ {OV2722_8BIT, 0x3806, 0x04},
+ {OV2722_8BIT, 0x3807, 0x27}, /* V crop end: 1063 */
+ {OV2722_8BIT, 0x3808, 0x01},
+ {OV2722_8BIT, 0x3809, 0x50}, /* H output size: 336 */
+ {OV2722_8BIT, 0x380a, 0x01},
+ {OV2722_8BIT, 0x380b, 0x00}, /* V output size: 256 */
+
+ /* H blank timing */
+ {OV2722_8BIT, 0x380c, 0x08},
+ {OV2722_8BIT, 0x380d, 0x00}, /* H total size: 2048 */
+ {OV2722_8BIT, 0x380e, 0x04},
+ {OV2722_8BIT, 0x380f, 0xa0}, /* V total size: 1184 */
+ {OV2722_8BIT, 0x3810, 0x00},
+ {OV2722_8BIT, 0x3811, 0x04}, /* H window offset: 5 */
+ {OV2722_8BIT, 0x3812, 0x00},
+ {OV2722_8BIT, 0x3813, 0x01}, /* V window offset: 2 */
+ {OV2722_8BIT, 0x3820, 0xc0},
+ {OV2722_8BIT, 0x3821, 0x06}, /* flip isp*/
+ {OV2722_8BIT, 0x3814, 0x71},
+ {OV2722_8BIT, 0x3815, 0x71},
+ {OV2722_8BIT, 0x3612, 0x49},
+ {OV2722_8BIT, 0x3618, 0x00},
+ {OV2722_8BIT, 0x3a08, 0x01},
+ {OV2722_8BIT, 0x3a09, 0xc3},
+ {OV2722_8BIT, 0x3a0a, 0x01},
+ {OV2722_8BIT, 0x3a0b, 0x77},
+ {OV2722_8BIT, 0x3a0d, 0x00},
+ {OV2722_8BIT, 0x3a0e, 0x00},
+ {OV2722_8BIT, 0x4520, 0x09},
+ {OV2722_8BIT, 0x4837, 0x1b},
+ {OV2722_8BIT, 0x3000, 0xff},
+ {OV2722_8BIT, 0x3001, 0xff},
+ {OV2722_8BIT, 0x3002, 0xf0},
+ {OV2722_8BIT, 0x3600, 0x08},
+ {OV2722_8BIT, 0x3621, 0xc0},
+ {OV2722_8BIT, 0x3632, 0x53}, /* added for power opt */
+ {OV2722_8BIT, 0x3633, 0x63},
+ {OV2722_8BIT, 0x3634, 0x24},
+ {OV2722_8BIT, 0x3f01, 0x0c},
+ {OV2722_8BIT, 0x5001, 0xc1}, /* v_en, h_en, blc_en */
+ {OV2722_8BIT, 0x3614, 0xf0},
+ {OV2722_8BIT, 0x3630, 0x2d},
+ {OV2722_8BIT, 0x370b, 0x62},
+ {OV2722_8BIT, 0x3706, 0x61},
+ {OV2722_8BIT, 0x4000, 0x02},
+ {OV2722_8BIT, 0x4002, 0xc5},
+ {OV2722_8BIT, 0x4005, 0x08},
+ {OV2722_8BIT, 0x404f, 0x84},
+ {OV2722_8BIT, 0x4051, 0x00},
+ {OV2722_8BIT, 0x5000, 0xff},
+ {OV2722_8BIT, 0x3a18, 0x00},
+ {OV2722_8BIT, 0x3a19, 0x80},
+ {OV2722_8BIT, 0x4521, 0x00},
+ {OV2722_8BIT, 0x5183, 0xb0}, /* AWB red */
+ {OV2722_8BIT, 0x5184, 0xb0}, /* AWB green */
+ {OV2722_8BIT, 0x5185, 0xb0}, /* AWB blue */
+ {OV2722_8BIT, 0x5180, 0x03}, /* AWB manual mode */
+ {OV2722_8BIT, 0x370c, 0x0c},
+ {OV2722_8BIT, 0x4800, 0x24}, /* clk lane gate enable */
+ {OV2722_8BIT, 0x3035, 0x00},
+ {OV2722_8BIT, 0x3036, 0x26},
+ {OV2722_8BIT, 0x3037, 0xa1},
+ {OV2722_8BIT, 0x303e, 0x19},
+ {OV2722_8BIT, 0x3038, 0x06},
+ {OV2722_8BIT, 0x3018, 0x04},
+
+ /* Added for power optimization */
+ {OV2722_8BIT, 0x3000, 0x00},
+ {OV2722_8BIT, 0x3001, 0x00},
+ {OV2722_8BIT, 0x3002, 0x00},
+ {OV2722_8BIT, 0x3a0f, 0x40},
+ {OV2722_8BIT, 0x3a10, 0x38},
+ {OV2722_8BIT, 0x3a1b, 0x48},
+ {OV2722_8BIT, 0x3a1e, 0x30},
+ {OV2722_8BIT, 0x3a11, 0x90},
+ {OV2722_8BIT, 0x3a1f, 0x10},
+ {OV2722_8BIT, 0x3011, 0x22},
+ {OV2722_8BIT, 0x3a00, 0x58},
+ {OV2722_8BIT, 0x3503, 0x17},
+ {OV2722_8BIT, 0x3500, 0x00},
+ {OV2722_8BIT, 0x3501, 0x46},
+ {OV2722_8BIT, 0x3502, 0x00},
+ {OV2722_8BIT, 0x3508, 0x00},
+ {OV2722_8BIT, 0x3509, 0x10},
+ {OV2722_TOK_TERM, 0, 0},
+
+};
+
+static const struct ov2722_reg ov2722_480P_30fps[] = {
+ {OV2722_8BIT, 0x3718, 0x10},
+ {OV2722_8BIT, 0x3702, 0x18},
+ {OV2722_8BIT, 0x373a, 0x3c},
+ {OV2722_8BIT, 0x3715, 0x01},
+ {OV2722_8BIT, 0x3703, 0x1d},
+ {OV2722_8BIT, 0x3705, 0x12},
+ {OV2722_8BIT, 0x3730, 0x1f},
+ {OV2722_8BIT, 0x3704, 0x3f},
+ {OV2722_8BIT, 0x3f06, 0x1d},
+ {OV2722_8BIT, 0x371c, 0x00},
+ {OV2722_8BIT, 0x371d, 0x83},
+ {OV2722_8BIT, 0x371e, 0x00},
+ {OV2722_8BIT, 0x371f, 0xbd},
+ {OV2722_8BIT, 0x3708, 0x63},
+ {OV2722_8BIT, 0x3709, 0x52},
+ {OV2722_8BIT, 0x3800, 0x00},
+ {OV2722_8BIT, 0x3801, 0xf2}, /* H crop start: 322 - 80 = 242*/
+ {OV2722_8BIT, 0x3802, 0x00},
+ {OV2722_8BIT, 0x3803, 0x20}, /* V crop start: 32*/
+ {OV2722_8BIT, 0x3804, 0x06},
+ {OV2722_8BIT, 0x3805, 0xBB}, /* H crop end: 1643 + 80 = 1723*/
+ {OV2722_8BIT, 0x3806, 0x04},
+ {OV2722_8BIT, 0x3807, 0x03}, /* V crop end: 1027*/
+ {OV2722_8BIT, 0x3808, 0x02},
+ {OV2722_8BIT, 0x3809, 0xE0}, /* H output size: 656 +80 = 736*/
+ {OV2722_8BIT, 0x380a, 0x01},
+ {OV2722_8BIT, 0x380b, 0xF0}, /* V output size: 496 */
+
+ /* H blank timing */
+ {OV2722_8BIT, 0x380c, 0x08},
+ {OV2722_8BIT, 0x380d, 0x00}, /* H total size: 2048 */
+ {OV2722_8BIT, 0x380e, 0x04},
+ {OV2722_8BIT, 0x380f, 0xa0}, /* V total size: 1184 */
+ {OV2722_8BIT, 0x3810, 0x00},
+ {OV2722_8BIT, 0x3811, 0x04}, /* H window offset: 5 */
+ {OV2722_8BIT, 0x3812, 0x00},
+ {OV2722_8BIT, 0x3813, 0x01}, /* V window offset: 2 */
+ {OV2722_8BIT, 0x3820, 0x80},
+ {OV2722_8BIT, 0x3821, 0x06}, /* flip isp*/
+ {OV2722_8BIT, 0x3814, 0x31},
+ {OV2722_8BIT, 0x3815, 0x31},
+ {OV2722_8BIT, 0x3612, 0x4b},
+ {OV2722_8BIT, 0x3618, 0x04},
+ {OV2722_8BIT, 0x3a08, 0x02},
+ {OV2722_8BIT, 0x3a09, 0x67},
+ {OV2722_8BIT, 0x3a0a, 0x02},
+ {OV2722_8BIT, 0x3a0b, 0x00},
+ {OV2722_8BIT, 0x3a0d, 0x00},
+ {OV2722_8BIT, 0x3a0e, 0x00},
+ {OV2722_8BIT, 0x4520, 0x0a},
+ {OV2722_8BIT, 0x4837, 0x1b},
+ {OV2722_8BIT, 0x3000, 0xff},
+ {OV2722_8BIT, 0x3001, 0xff},
+ {OV2722_8BIT, 0x3002, 0xf0},
+ {OV2722_8BIT, 0x3600, 0x08},
+ {OV2722_8BIT, 0x3621, 0xc0},
+ {OV2722_8BIT, 0x3632, 0x53}, /* added for power opt */
+ {OV2722_8BIT, 0x3633, 0x63},
+ {OV2722_8BIT, 0x3634, 0x24},
+ {OV2722_8BIT, 0x3f01, 0x0c},
+ {OV2722_8BIT, 0x5001, 0xc1}, /* v_en, h_en, blc_en */
+ {OV2722_8BIT, 0x3614, 0xf0},
+ {OV2722_8BIT, 0x3630, 0x2d},
+ {OV2722_8BIT, 0x370b, 0x62},
+ {OV2722_8BIT, 0x3706, 0x61},
+ {OV2722_8BIT, 0x4000, 0x02},
+ {OV2722_8BIT, 0x4002, 0xc5},
+ {OV2722_8BIT, 0x4005, 0x08},
+ {OV2722_8BIT, 0x404f, 0x84},
+ {OV2722_8BIT, 0x4051, 0x00},
+ {OV2722_8BIT, 0x5000, 0xff},
+ {OV2722_8BIT, 0x3a18, 0x00},
+ {OV2722_8BIT, 0x3a19, 0x80},
+ {OV2722_8BIT, 0x4521, 0x00},
+ {OV2722_8BIT, 0x5183, 0xb0}, /* AWB red */
+ {OV2722_8BIT, 0x5184, 0xb0}, /* AWB green */
+ {OV2722_8BIT, 0x5185, 0xb0}, /* AWB blue */
+ {OV2722_8BIT, 0x5180, 0x03}, /* AWB manual mode */
+ {OV2722_8BIT, 0x370c, 0x0c},
+ {OV2722_8BIT, 0x4800, 0x24}, /* clk lane gate enable */
+ {OV2722_8BIT, 0x3035, 0x00},
+ {OV2722_8BIT, 0x3036, 0x26},
+ {OV2722_8BIT, 0x3037, 0xa1},
+ {OV2722_8BIT, 0x303e, 0x19},
+ {OV2722_8BIT, 0x3038, 0x06},
+ {OV2722_8BIT, 0x3018, 0x04},
+
+ /* Added for power optimization */
+ {OV2722_8BIT, 0x3000, 0x00},
+ {OV2722_8BIT, 0x3001, 0x00},
+ {OV2722_8BIT, 0x3002, 0x00},
+ {OV2722_8BIT, 0x3a0f, 0x40},
+ {OV2722_8BIT, 0x3a10, 0x38},
+ {OV2722_8BIT, 0x3a1b, 0x48},
+ {OV2722_8BIT, 0x3a1e, 0x30},
+ {OV2722_8BIT, 0x3a11, 0x90},
+ {OV2722_8BIT, 0x3a1f, 0x10},
+ {OV2722_8BIT, 0x3011, 0x22},
+ {OV2722_8BIT, 0x3a00, 0x58},
+ {OV2722_8BIT, 0x3503, 0x17},
+ {OV2722_8BIT, 0x3500, 0x00},
+ {OV2722_8BIT, 0x3501, 0x46},
+ {OV2722_8BIT, 0x3502, 0x00},
+ {OV2722_8BIT, 0x3508, 0x00},
+ {OV2722_8BIT, 0x3509, 0x10},
+ {OV2722_TOK_TERM, 0, 0},
+};
+
+static const struct ov2722_reg ov2722_VGA_30fps[] = {
+ {OV2722_8BIT, 0x3718, 0x10},
+ {OV2722_8BIT, 0x3702, 0x18},
+ {OV2722_8BIT, 0x373a, 0x3c},
+ {OV2722_8BIT, 0x3715, 0x01},
+ {OV2722_8BIT, 0x3703, 0x1d},
+ {OV2722_8BIT, 0x3705, 0x12},
+ {OV2722_8BIT, 0x3730, 0x1f},
+ {OV2722_8BIT, 0x3704, 0x3f},
+ {OV2722_8BIT, 0x3f06, 0x1d},
+ {OV2722_8BIT, 0x371c, 0x00},
+ {OV2722_8BIT, 0x371d, 0x83},
+ {OV2722_8BIT, 0x371e, 0x00},
+ {OV2722_8BIT, 0x371f, 0xbd},
+ {OV2722_8BIT, 0x3708, 0x63},
+ {OV2722_8BIT, 0x3709, 0x52},
+ {OV2722_8BIT, 0x3800, 0x01},
+ {OV2722_8BIT, 0x3801, 0x42}, /* H crop start: 322 */
+ {OV2722_8BIT, 0x3802, 0x00},
+ {OV2722_8BIT, 0x3803, 0x20}, /* V crop start: 32*/
+ {OV2722_8BIT, 0x3804, 0x06},
+ {OV2722_8BIT, 0x3805, 0x6B}, /* H crop end: 1643*/
+ {OV2722_8BIT, 0x3806, 0x04},
+ {OV2722_8BIT, 0x3807, 0x03}, /* V crop end: 1027*/
+ {OV2722_8BIT, 0x3808, 0x02},
+ {OV2722_8BIT, 0x3809, 0x90}, /* H output size: 656 */
+ {OV2722_8BIT, 0x380a, 0x01},
+ {OV2722_8BIT, 0x380b, 0xF0}, /* V output size: 496 */
+
+ /* H blank timing */
+ {OV2722_8BIT, 0x380c, 0x08},
+ {OV2722_8BIT, 0x380d, 0x00}, /* H total size: 2048 */
+ {OV2722_8BIT, 0x380e, 0x04},
+ {OV2722_8BIT, 0x380f, 0xa0}, /* V total size: 1184 */
+ {OV2722_8BIT, 0x3810, 0x00},
+ {OV2722_8BIT, 0x3811, 0x04}, /* H window offset: 5 */
+ {OV2722_8BIT, 0x3812, 0x00},
+ {OV2722_8BIT, 0x3813, 0x01}, /* V window offset: 2 */
+ {OV2722_8BIT, 0x3820, 0x80},
+ {OV2722_8BIT, 0x3821, 0x06}, /* flip isp*/
+ {OV2722_8BIT, 0x3814, 0x31},
+ {OV2722_8BIT, 0x3815, 0x31},
+ {OV2722_8BIT, 0x3612, 0x4b},
+ {OV2722_8BIT, 0x3618, 0x04},
+ {OV2722_8BIT, 0x3a08, 0x02},
+ {OV2722_8BIT, 0x3a09, 0x67},
+ {OV2722_8BIT, 0x3a0a, 0x02},
+ {OV2722_8BIT, 0x3a0b, 0x00},
+ {OV2722_8BIT, 0x3a0d, 0x00},
+ {OV2722_8BIT, 0x3a0e, 0x00},
+ {OV2722_8BIT, 0x4520, 0x0a},
+ {OV2722_8BIT, 0x4837, 0x29},
+ {OV2722_8BIT, 0x3000, 0xff},
+ {OV2722_8BIT, 0x3001, 0xff},
+ {OV2722_8BIT, 0x3002, 0xf0},
+ {OV2722_8BIT, 0x3600, 0x08},
+ {OV2722_8BIT, 0x3621, 0xc0},
+ {OV2722_8BIT, 0x3632, 0x53}, /* added for power opt */
+ {OV2722_8BIT, 0x3633, 0x63},
+ {OV2722_8BIT, 0x3634, 0x24},
+ {OV2722_8BIT, 0x3f01, 0x0c},
+ {OV2722_8BIT, 0x5001, 0xc1}, /* v_en, h_en, blc_en */
+ {OV2722_8BIT, 0x3614, 0xf0},
+ {OV2722_8BIT, 0x3630, 0x2d},
+ {OV2722_8BIT, 0x370b, 0x62},
+ {OV2722_8BIT, 0x3706, 0x61},
+ {OV2722_8BIT, 0x4000, 0x02},
+ {OV2722_8BIT, 0x4002, 0xc5},
+ {OV2722_8BIT, 0x4005, 0x08},
+ {OV2722_8BIT, 0x404f, 0x84},
+ {OV2722_8BIT, 0x4051, 0x00},
+ {OV2722_8BIT, 0x5000, 0xff},
+ {OV2722_8BIT, 0x3a18, 0x00},
+ {OV2722_8BIT, 0x3a19, 0x80},
+ {OV2722_8BIT, 0x4521, 0x00},
+ {OV2722_8BIT, 0x5183, 0xb0}, /* AWB red */
+ {OV2722_8BIT, 0x5184, 0xb0}, /* AWB green */
+ {OV2722_8BIT, 0x5185, 0xb0}, /* AWB blue */
+ {OV2722_8BIT, 0x5180, 0x03}, /* AWB manual mode */
+ {OV2722_8BIT, 0x370c, 0x0c},
+ {OV2722_8BIT, 0x4800, 0x24}, /* clk lane gate enable */
+ {OV2722_8BIT, 0x3035, 0x00},
+ {OV2722_8BIT, 0x3036, 0x26},
+ {OV2722_8BIT, 0x3037, 0xa1},
+ {OV2722_8BIT, 0x303e, 0x19},
+ {OV2722_8BIT, 0x3038, 0x06},
+ {OV2722_8BIT, 0x3018, 0x04},
+
+ /* Added for power optimization */
+ {OV2722_8BIT, 0x3000, 0x00},
+ {OV2722_8BIT, 0x3001, 0x00},
+ {OV2722_8BIT, 0x3002, 0x00},
+ {OV2722_8BIT, 0x3a0f, 0x40},
+ {OV2722_8BIT, 0x3a10, 0x38},
+ {OV2722_8BIT, 0x3a1b, 0x48},
+ {OV2722_8BIT, 0x3a1e, 0x30},
+ {OV2722_8BIT, 0x3a11, 0x90},
+ {OV2722_8BIT, 0x3a1f, 0x10},
+ {OV2722_8BIT, 0x3011, 0x22},
+ {OV2722_8BIT, 0x3a00, 0x58},
+ {OV2722_8BIT, 0x3503, 0x17},
+ {OV2722_8BIT, 0x3500, 0x00},
+ {OV2722_8BIT, 0x3501, 0x46},
+ {OV2722_8BIT, 0x3502, 0x00},
+ {OV2722_8BIT, 0x3508, 0x00},
+ {OV2722_8BIT, 0x3509, 0x10},
+ {OV2722_TOK_TERM, 0, 0},
+};
+#endif
+
+static const struct ov2722_reg ov2722_1632_1092_30fps[] = {
+ {OV2722_8BIT, 0x3021, 0x03}, /* For stand wait for
+ a whole frame complete.(vblank) */
+ {OV2722_8BIT, 0x3718, 0x10},
+ {OV2722_8BIT, 0x3702, 0x24},
+ {OV2722_8BIT, 0x373a, 0x60},
+ {OV2722_8BIT, 0x3715, 0x01},
+ {OV2722_8BIT, 0x3703, 0x2e},
+ {OV2722_8BIT, 0x3705, 0x10},
+ {OV2722_8BIT, 0x3730, 0x30},
+ {OV2722_8BIT, 0x3704, 0x62},
+ {OV2722_8BIT, 0x3f06, 0x3a},
+ {OV2722_8BIT, 0x371c, 0x00},
+ {OV2722_8BIT, 0x371d, 0xc4},
+ {OV2722_8BIT, 0x371e, 0x01},
+ {OV2722_8BIT, 0x371f, 0x0d},
+ {OV2722_8BIT, 0x3708, 0x61},
+ {OV2722_8BIT, 0x3709, 0x12},
+ {OV2722_8BIT, 0x3800, 0x00},
+ {OV2722_8BIT, 0x3801, 0x9E}, /* H crop start: 158 */
+ {OV2722_8BIT, 0x3802, 0x00},
+ {OV2722_8BIT, 0x3803, 0x01}, /* V crop start: 1 */
+ {OV2722_8BIT, 0x3804, 0x07},
+ {OV2722_8BIT, 0x3805, 0x05}, /* H crop end: 1797 */
+ {OV2722_8BIT, 0x3806, 0x04},
+ {OV2722_8BIT, 0x3807, 0x45}, /* V crop end: 1093 */
+
+ {OV2722_8BIT, 0x3808, 0x06},
+ {OV2722_8BIT, 0x3809, 0x60}, /* H output size: 1632 */
+ {OV2722_8BIT, 0x380a, 0x04},
+ {OV2722_8BIT, 0x380b, 0x44}, /* V output size: 1092 */
+ {OV2722_8BIT, 0x380c, 0x08},
+ {OV2722_8BIT, 0x380d, 0xd4}, /* H timing: 2260 */
+ {OV2722_8BIT, 0x380e, 0x04},
+ {OV2722_8BIT, 0x380f, 0xdc}, /* V timing: 1244 */
+ {OV2722_8BIT, 0x3810, 0x00},
+ {OV2722_8BIT, 0x3811, 0x03}, /* H window offset: 3 */
+ {OV2722_8BIT, 0x3812, 0x00},
+ {OV2722_8BIT, 0x3813, 0x02}, /* V window offset: 2 */
+ {OV2722_8BIT, 0x3820, 0x80},
+ {OV2722_8BIT, 0x3821, 0x06}, /* mirror */
+ {OV2722_8BIT, 0x3814, 0x11},
+ {OV2722_8BIT, 0x3815, 0x11},
+ {OV2722_8BIT, 0x3612, 0x0b},
+ {OV2722_8BIT, 0x3618, 0x04},
+ {OV2722_8BIT, 0x3a08, 0x01},
+ {OV2722_8BIT, 0x3a09, 0x50},
+ {OV2722_8BIT, 0x3a0a, 0x01},
+ {OV2722_8BIT, 0x3a0b, 0x18},
+ {OV2722_8BIT, 0x3a0d, 0x03},
+ {OV2722_8BIT, 0x3a0e, 0x03},
+ {OV2722_8BIT, 0x4520, 0x00},
+ {OV2722_8BIT, 0x4837, 0x1b},
+ {OV2722_8BIT, 0x3600, 0x08},
+ {OV2722_8BIT, 0x3621, 0xc0},
+ {OV2722_8BIT, 0x3632, 0xd2}, /* added for power opt */
+ {OV2722_8BIT, 0x3633, 0x23},
+ {OV2722_8BIT, 0x3634, 0x54},
+ {OV2722_8BIT, 0x3f01, 0x0c},
+ {OV2722_8BIT, 0x5001, 0xc1},
+ {OV2722_8BIT, 0x3614, 0xf0},
+ {OV2722_8BIT, 0x3630, 0x2d},
+ {OV2722_8BIT, 0x370b, 0x62},
+ {OV2722_8BIT, 0x3706, 0x61},
+ {OV2722_8BIT, 0x4000, 0x02},
+ {OV2722_8BIT, 0x4002, 0xc5},
+ {OV2722_8BIT, 0x4005, 0x08},
+ {OV2722_8BIT, 0x404f, 0x84},
+ {OV2722_8BIT, 0x4051, 0x00},
+ {OV2722_8BIT, 0x5000, 0xcf}, /* manual 3a */
+ {OV2722_8BIT, 0x301d, 0xf0}, /* enable group hold */
+ {OV2722_8BIT, 0x3a18, 0x00},
+ {OV2722_8BIT, 0x3a19, 0x80},
+ {OV2722_8BIT, 0x4521, 0x00},
+ {OV2722_8BIT, 0x5183, 0xb0},
+ {OV2722_8BIT, 0x5184, 0xb0},
+ {OV2722_8BIT, 0x5185, 0xb0},
+ {OV2722_8BIT, 0x370c, 0x0c},
+ {OV2722_8BIT, 0x3035, 0x00},
+ {OV2722_8BIT, 0x3036, 0x2c}, /* 422.4 MHz */
+ {OV2722_8BIT, 0x3037, 0xa1},
+ {OV2722_8BIT, 0x303e, 0x19},
+ {OV2722_8BIT, 0x3038, 0x06},
+ {OV2722_8BIT, 0x3018, 0x04},
+ {OV2722_8BIT, 0x3000, 0x00}, /* added for power optimization */
+ {OV2722_8BIT, 0x3001, 0x00},
+ {OV2722_8BIT, 0x3002, 0x00},
+ {OV2722_8BIT, 0x3a0f, 0x40},
+ {OV2722_8BIT, 0x3a10, 0x38},
+ {OV2722_8BIT, 0x3a1b, 0x48},
+ {OV2722_8BIT, 0x3a1e, 0x30},
+ {OV2722_8BIT, 0x3a11, 0x90},
+ {OV2722_8BIT, 0x3a1f, 0x10},
+ {OV2722_8BIT, 0x3503, 0x17}, /* manual 3a */
+ {OV2722_8BIT, 0x3500, 0x00},
+ {OV2722_8BIT, 0x3501, 0x3F},
+ {OV2722_8BIT, 0x3502, 0x00},
+ {OV2722_8BIT, 0x3508, 0x00},
+ {OV2722_8BIT, 0x3509, 0x00},
+ {OV2722_TOK_TERM, 0, 0}
+};
+
+static const struct ov2722_reg ov2722_1452_1092_30fps[] = {
+ {OV2722_8BIT, 0x3021, 0x03}, /* For stand wait for
+ a whole frame complete.(vblank) */
+ {OV2722_8BIT, 0x3718, 0x10},
+ {OV2722_8BIT, 0x3702, 0x24},
+ {OV2722_8BIT, 0x373a, 0x60},
+ {OV2722_8BIT, 0x3715, 0x01},
+ {OV2722_8BIT, 0x3703, 0x2e},
+ {OV2722_8BIT, 0x3705, 0x10},
+ {OV2722_8BIT, 0x3730, 0x30},
+ {OV2722_8BIT, 0x3704, 0x62},
+ {OV2722_8BIT, 0x3f06, 0x3a},
+ {OV2722_8BIT, 0x371c, 0x00},
+ {OV2722_8BIT, 0x371d, 0xc4},
+ {OV2722_8BIT, 0x371e, 0x01},
+ {OV2722_8BIT, 0x371f, 0x0d},
+ {OV2722_8BIT, 0x3708, 0x61},
+ {OV2722_8BIT, 0x3709, 0x12},
+ {OV2722_8BIT, 0x3800, 0x00},
+ {OV2722_8BIT, 0x3801, 0xF8}, /* H crop start: 248 */
+ {OV2722_8BIT, 0x3802, 0x00},
+ {OV2722_8BIT, 0x3803, 0x01}, /* V crop start: 1 */
+ {OV2722_8BIT, 0x3804, 0x06},
+ {OV2722_8BIT, 0x3805, 0xab}, /* H crop end: 1707 */
+ {OV2722_8BIT, 0x3806, 0x04},
+ {OV2722_8BIT, 0x3807, 0x45}, /* V crop end: 1093 */
+ {OV2722_8BIT, 0x3808, 0x05},
+ {OV2722_8BIT, 0x3809, 0xac}, /* H output size: 1452 */
+ {OV2722_8BIT, 0x380a, 0x04},
+ {OV2722_8BIT, 0x380b, 0x44}, /* V output size: 1092 */
+ {OV2722_8BIT, 0x380c, 0x08},
+ {OV2722_8BIT, 0x380d, 0xd4}, /* H timing: 2260 */
+ {OV2722_8BIT, 0x380e, 0x04},
+ {OV2722_8BIT, 0x380f, 0xdc}, /* V timing: 1244 */
+ {OV2722_8BIT, 0x3810, 0x00},
+ {OV2722_8BIT, 0x3811, 0x03}, /* H window offset: 3 */
+ {OV2722_8BIT, 0x3812, 0x00},
+ {OV2722_8BIT, 0x3813, 0x02}, /* V window offset: 2 */
+ {OV2722_8BIT, 0x3820, 0x80},
+ {OV2722_8BIT, 0x3821, 0x06}, /* mirror */
+ {OV2722_8BIT, 0x3814, 0x11},
+ {OV2722_8BIT, 0x3815, 0x11},
+ {OV2722_8BIT, 0x3612, 0x0b},
+ {OV2722_8BIT, 0x3618, 0x04},
+ {OV2722_8BIT, 0x3a08, 0x01},
+ {OV2722_8BIT, 0x3a09, 0x50},
+ {OV2722_8BIT, 0x3a0a, 0x01},
+ {OV2722_8BIT, 0x3a0b, 0x18},
+ {OV2722_8BIT, 0x3a0d, 0x03},
+ {OV2722_8BIT, 0x3a0e, 0x03},
+ {OV2722_8BIT, 0x4520, 0x00},
+ {OV2722_8BIT, 0x4837, 0x1b},
+ {OV2722_8BIT, 0x3600, 0x08},
+ {OV2722_8BIT, 0x3621, 0xc0},
+ {OV2722_8BIT, 0x3632, 0xd2}, /* added for power opt */
+ {OV2722_8BIT, 0x3633, 0x23},
+ {OV2722_8BIT, 0x3634, 0x54},
+ {OV2722_8BIT, 0x3f01, 0x0c},
+ {OV2722_8BIT, 0x5001, 0xc1},
+ {OV2722_8BIT, 0x3614, 0xf0},
+ {OV2722_8BIT, 0x3630, 0x2d},
+ {OV2722_8BIT, 0x370b, 0x62},
+ {OV2722_8BIT, 0x3706, 0x61},
+ {OV2722_8BIT, 0x4000, 0x02},
+ {OV2722_8BIT, 0x4002, 0xc5},
+ {OV2722_8BIT, 0x4005, 0x08},
+ {OV2722_8BIT, 0x404f, 0x84},
+ {OV2722_8BIT, 0x4051, 0x00},
+ {OV2722_8BIT, 0x5000, 0xcf}, /* manual 3a */
+ {OV2722_8BIT, 0x301d, 0xf0}, /* enable group hold */
+ {OV2722_8BIT, 0x3a18, 0x00},
+ {OV2722_8BIT, 0x3a19, 0x80},
+ {OV2722_8BIT, 0x4521, 0x00},
+ {OV2722_8BIT, 0x5183, 0xb0},
+ {OV2722_8BIT, 0x5184, 0xb0},
+ {OV2722_8BIT, 0x5185, 0xb0},
+ {OV2722_8BIT, 0x370c, 0x0c},
+ {OV2722_8BIT, 0x3035, 0x00},
+ {OV2722_8BIT, 0x3036, 0x2c}, /* 422.4 MHz */
+ {OV2722_8BIT, 0x3037, 0xa1},
+ {OV2722_8BIT, 0x303e, 0x19},
+ {OV2722_8BIT, 0x3038, 0x06},
+ {OV2722_8BIT, 0x3018, 0x04},
+ {OV2722_8BIT, 0x3000, 0x00}, /* added for power optimization */
+ {OV2722_8BIT, 0x3001, 0x00},
+ {OV2722_8BIT, 0x3002, 0x00},
+ {OV2722_8BIT, 0x3a0f, 0x40},
+ {OV2722_8BIT, 0x3a10, 0x38},
+ {OV2722_8BIT, 0x3a1b, 0x48},
+ {OV2722_8BIT, 0x3a1e, 0x30},
+ {OV2722_8BIT, 0x3a11, 0x90},
+ {OV2722_8BIT, 0x3a1f, 0x10},
+ {OV2722_8BIT, 0x3503, 0x17}, /* manual 3a */
+ {OV2722_8BIT, 0x3500, 0x00},
+ {OV2722_8BIT, 0x3501, 0x3F},
+ {OV2722_8BIT, 0x3502, 0x00},
+ {OV2722_8BIT, 0x3508, 0x00},
+ {OV2722_8BIT, 0x3509, 0x00},
+ {OV2722_TOK_TERM, 0, 0}
+};
+
+#if 0
+static const struct ov2722_reg ov2722_1M3_30fps[] = {
+ {OV2722_8BIT, 0x3718, 0x10},
+ {OV2722_8BIT, 0x3702, 0x24},
+ {OV2722_8BIT, 0x373a, 0x60},
+ {OV2722_8BIT, 0x3715, 0x01},
+ {OV2722_8BIT, 0x3703, 0x2e},
+ {OV2722_8BIT, 0x3705, 0x10},
+ {OV2722_8BIT, 0x3730, 0x30},
+ {OV2722_8BIT, 0x3704, 0x62},
+ {OV2722_8BIT, 0x3f06, 0x3a},
+ {OV2722_8BIT, 0x371c, 0x00},
+ {OV2722_8BIT, 0x371d, 0xc4},
+ {OV2722_8BIT, 0x371e, 0x01},
+ {OV2722_8BIT, 0x371f, 0x0d},
+ {OV2722_8BIT, 0x3708, 0x61},
+ {OV2722_8BIT, 0x3709, 0x12},
+ {OV2722_8BIT, 0x3800, 0x01},
+ {OV2722_8BIT, 0x3801, 0x4a}, /* H crop start: 330 */
+ {OV2722_8BIT, 0x3802, 0x00},
+ {OV2722_8BIT, 0x3803, 0x03}, /* V crop start: 3 */
+ {OV2722_8BIT, 0x3804, 0x06},
+ {OV2722_8BIT, 0x3805, 0xe1}, /* H crop end: 1761 */
+ {OV2722_8BIT, 0x3806, 0x04},
+ {OV2722_8BIT, 0x3807, 0x47}, /* V crop end: 1095 */
+ {OV2722_8BIT, 0x3808, 0x05},
+ {OV2722_8BIT, 0x3809, 0x88}, /* H output size: 1416 */
+ {OV2722_8BIT, 0x380a, 0x04},
+ {OV2722_8BIT, 0x380b, 0x0a}, /* V output size: 1034 */
+
+ /* H blank timing */
+ {OV2722_8BIT, 0x380c, 0x08},
+ {OV2722_8BIT, 0x380d, 0x00}, /* H total size: 2048 */
+ {OV2722_8BIT, 0x380e, 0x04},
+ {OV2722_8BIT, 0x380f, 0xa0}, /* V total size: 1184 */
+ {OV2722_8BIT, 0x3810, 0x00},
+ {OV2722_8BIT, 0x3811, 0x05}, /* H window offset: 5 */
+ {OV2722_8BIT, 0x3812, 0x00},
+ {OV2722_8BIT, 0x3813, 0x02}, /* V window offset: 2 */
+ {OV2722_8BIT, 0x3820, 0x80},
+ {OV2722_8BIT, 0x3821, 0x06}, /* flip isp */
+ {OV2722_8BIT, 0x3814, 0x11},
+ {OV2722_8BIT, 0x3815, 0x11},
+ {OV2722_8BIT, 0x3612, 0x0b},
+ {OV2722_8BIT, 0x3618, 0x04},
+ {OV2722_8BIT, 0x3a08, 0x01},
+ {OV2722_8BIT, 0x3a09, 0x50},
+ {OV2722_8BIT, 0x3a0a, 0x01},
+ {OV2722_8BIT, 0x3a0b, 0x18},
+ {OV2722_8BIT, 0x3a0d, 0x03},
+ {OV2722_8BIT, 0x3a0e, 0x03},
+ {OV2722_8BIT, 0x4520, 0x00},
+ {OV2722_8BIT, 0x4837, 0x1b},
+ {OV2722_8BIT, 0x3000, 0xff},
+ {OV2722_8BIT, 0x3001, 0xff},
+ {OV2722_8BIT, 0x3002, 0xf0},
+ {OV2722_8BIT, 0x3600, 0x08},
+ {OV2722_8BIT, 0x3621, 0xc0},
+ {OV2722_8BIT, 0x3632, 0xd2}, /* added for power opt */
+ {OV2722_8BIT, 0x3633, 0x23},
+ {OV2722_8BIT, 0x3634, 0x54},
+ {OV2722_8BIT, 0x3f01, 0x0c},
+ {OV2722_8BIT, 0x5001, 0xc1}, /* v_en, h_en, blc_en */
+ {OV2722_8BIT, 0x3614, 0xf0},
+ {OV2722_8BIT, 0x3630, 0x2d},
+ {OV2722_8BIT, 0x370b, 0x62},
+ {OV2722_8BIT, 0x3706, 0x61},
+ {OV2722_8BIT, 0x4000, 0x02},
+ {OV2722_8BIT, 0x4002, 0xc5},
+ {OV2722_8BIT, 0x4005, 0x08},
+ {OV2722_8BIT, 0x404f, 0x84},
+ {OV2722_8BIT, 0x4051, 0x00},
+ {OV2722_8BIT, 0x5000, 0xcf},
+ {OV2722_8BIT, 0x3a18, 0x00},
+ {OV2722_8BIT, 0x3a19, 0x80},
+ {OV2722_8BIT, 0x4521, 0x00},
+ {OV2722_8BIT, 0x5183, 0xb0}, /* AWB red */
+ {OV2722_8BIT, 0x5184, 0xb0}, /* AWB green */
+ {OV2722_8BIT, 0x5185, 0xb0}, /* AWB blue */
+ {OV2722_8BIT, 0x5180, 0x03}, /* AWB manual mode */
+ {OV2722_8BIT, 0x370c, 0x0c},
+ {OV2722_8BIT, 0x4800, 0x24}, /* clk lane gate enable */
+ {OV2722_8BIT, 0x3035, 0x00},
+ {OV2722_8BIT, 0x3036, 0x26},
+ {OV2722_8BIT, 0x3037, 0xa1},
+ {OV2722_8BIT, 0x303e, 0x19},
+ {OV2722_8BIT, 0x3038, 0x06},
+ {OV2722_8BIT, 0x3018, 0x04},
+
+ /* Added for power optimization */
+ {OV2722_8BIT, 0x3000, 0x00},
+ {OV2722_8BIT, 0x3001, 0x00},
+ {OV2722_8BIT, 0x3002, 0x00},
+ {OV2722_8BIT, 0x3a0f, 0x40},
+ {OV2722_8BIT, 0x3a10, 0x38},
+ {OV2722_8BIT, 0x3a1b, 0x48},
+ {OV2722_8BIT, 0x3a1e, 0x30},
+ {OV2722_8BIT, 0x3a11, 0x90},
+ {OV2722_8BIT, 0x3a1f, 0x10},
+ {OV2722_8BIT, 0x3503, 0x17},
+ {OV2722_8BIT, 0x3500, 0x00},
+ {OV2722_8BIT, 0x3501, 0x46},
+ {OV2722_8BIT, 0x3502, 0x00},
+ {OV2722_8BIT, 0x3508, 0x00},
+ {OV2722_8BIT, 0x3509, 0x10},
+ {OV2722_TOK_TERM, 0, 0},
+};
+#endif
+
+static const struct ov2722_reg ov2722_1080p_30fps[] = {
+ {OV2722_8BIT, 0x3021, 0x03}, /* For stand wait for a whole
+ frame complete.(vblank) */
+ {OV2722_8BIT, 0x3718, 0x10},
+ {OV2722_8BIT, 0x3702, 0x24},
+ {OV2722_8BIT, 0x373a, 0x60},
+ {OV2722_8BIT, 0x3715, 0x01},
+ {OV2722_8BIT, 0x3703, 0x2e},
+ {OV2722_8BIT, 0x3705, 0x2b},
+ {OV2722_8BIT, 0x3730, 0x30},
+ {OV2722_8BIT, 0x3704, 0x62},
+ {OV2722_8BIT, 0x3f06, 0x3a},
+ {OV2722_8BIT, 0x371c, 0x00},
+ {OV2722_8BIT, 0x371d, 0xc4},
+ {OV2722_8BIT, 0x371e, 0x01},
+ {OV2722_8BIT, 0x371f, 0x28},
+ {OV2722_8BIT, 0x3708, 0x61},
+ {OV2722_8BIT, 0x3709, 0x12},
+ {OV2722_8BIT, 0x3800, 0x00},
+ {OV2722_8BIT, 0x3801, 0x08}, /* H crop start: 8 */
+ {OV2722_8BIT, 0x3802, 0x00},
+ {OV2722_8BIT, 0x3803, 0x01}, /* V crop start: 1 */
+ {OV2722_8BIT, 0x3804, 0x07},
+ {OV2722_8BIT, 0x3805, 0x9b}, /* H crop end: 1947 */
+ {OV2722_8BIT, 0x3806, 0x04},
+ {OV2722_8BIT, 0x3807, 0x45}, /* V crop end: 1093 */
+ {OV2722_8BIT, 0x3808, 0x07},
+ {OV2722_8BIT, 0x3809, 0x8c}, /* H output size: 1932 */
+ {OV2722_8BIT, 0x380a, 0x04},
+ {OV2722_8BIT, 0x380b, 0x44}, /* V output size: 1092 */
+ {OV2722_8BIT, 0x380c, 0x08},
+ {OV2722_8BIT, 0x380d, 0x14}, /* H timing: 2068 */
+ {OV2722_8BIT, 0x380e, 0x04},
+ {OV2722_8BIT, 0x380f, 0x5a}, /* V timing: 1114 */
+ {OV2722_8BIT, 0x3810, 0x00},
+ {OV2722_8BIT, 0x3811, 0x03}, /* H window offset: 3 */
+ {OV2722_8BIT, 0x3812, 0x00},
+ {OV2722_8BIT, 0x3813, 0x02}, /* V window offset: 2 */
+ {OV2722_8BIT, 0x3820, 0x80},
+ {OV2722_8BIT, 0x3821, 0x06}, /* mirror */
+ {OV2722_8BIT, 0x3814, 0x11},
+ {OV2722_8BIT, 0x3815, 0x11},
+ {OV2722_8BIT, 0x3612, 0x4b},
+ {OV2722_8BIT, 0x3618, 0x04},
+ {OV2722_8BIT, 0x3a08, 0x01},
+ {OV2722_8BIT, 0x3a09, 0x50},
+ {OV2722_8BIT, 0x3a0a, 0x01},
+ {OV2722_8BIT, 0x3a0b, 0x18},
+ {OV2722_8BIT, 0x3a0d, 0x03},
+ {OV2722_8BIT, 0x3a0e, 0x03},
+ {OV2722_8BIT, 0x4520, 0x00},
+ {OV2722_8BIT, 0x4837, 0x1b},
+ {OV2722_8BIT, 0x3000, 0xff},
+ {OV2722_8BIT, 0x3001, 0xff},
+ {OV2722_8BIT, 0x3002, 0xf0},
+ {OV2722_8BIT, 0x3600, 0x08},
+ {OV2722_8BIT, 0x3621, 0xc0},
+ {OV2722_8BIT, 0x3632, 0x53}, /* added for power opt */
+ {OV2722_8BIT, 0x3633, 0x63},
+ {OV2722_8BIT, 0x3634, 0x24},
+ {OV2722_8BIT, 0x3f01, 0x0c},
+ {OV2722_8BIT, 0x5001, 0xc1},
+ {OV2722_8BIT, 0x3614, 0xf0},
+ {OV2722_8BIT, 0x3630, 0x2d},
+ {OV2722_8BIT, 0x370b, 0x62},
+ {OV2722_8BIT, 0x3706, 0x61},
+ {OV2722_8BIT, 0x4000, 0x02},
+ {OV2722_8BIT, 0x4002, 0xc5},
+ {OV2722_8BIT, 0x4005, 0x08},
+ {OV2722_8BIT, 0x404f, 0x84},
+ {OV2722_8BIT, 0x4051, 0x00},
+ {OV2722_8BIT, 0x5000, 0xcd}, /* manual 3a */
+ {OV2722_8BIT, 0x301d, 0xf0}, /* enable group hold */
+ {OV2722_8BIT, 0x3a18, 0x00},
+ {OV2722_8BIT, 0x3a19, 0x80},
+ {OV2722_8BIT, 0x3503, 0x17},
+ {OV2722_8BIT, 0x4521, 0x00},
+ {OV2722_8BIT, 0x5183, 0xb0},
+ {OV2722_8BIT, 0x5184, 0xb0},
+ {OV2722_8BIT, 0x5185, 0xb0},
+ {OV2722_8BIT, 0x370c, 0x0c},
+ {OV2722_8BIT, 0x3035, 0x00},
+ {OV2722_8BIT, 0x3036, 0x24}, /* 345.6 MHz */
+ {OV2722_8BIT, 0x3037, 0xa1},
+ {OV2722_8BIT, 0x303e, 0x19},
+ {OV2722_8BIT, 0x3038, 0x06},
+ {OV2722_8BIT, 0x3018, 0x04},
+ {OV2722_8BIT, 0x3000, 0x00}, /* added for power optimization */
+ {OV2722_8BIT, 0x3001, 0x00},
+ {OV2722_8BIT, 0x3002, 0x00},
+ {OV2722_8BIT, 0x3a0f, 0x40},
+ {OV2722_8BIT, 0x3a10, 0x38},
+ {OV2722_8BIT, 0x3a1b, 0x48},
+ {OV2722_8BIT, 0x3a1e, 0x30},
+ {OV2722_8BIT, 0x3a11, 0x90},
+ {OV2722_8BIT, 0x3a1f, 0x10},
+ {OV2722_8BIT, 0x3011, 0x22},
+ {OV2722_8BIT, 0x3500, 0x00},
+ {OV2722_8BIT, 0x3501, 0x3F},
+ {OV2722_8BIT, 0x3502, 0x00},
+ {OV2722_8BIT, 0x3508, 0x00},
+ {OV2722_8BIT, 0x3509, 0x00},
+ {OV2722_TOK_TERM, 0, 0}
+};
+
+#if 0 /* Currently unused */
+static const struct ov2722_reg ov2722_720p_30fps[] = {
+ {OV2722_8BIT, 0x3021, 0x03},
+ {OV2722_8BIT, 0x3718, 0x10},
+ {OV2722_8BIT, 0x3702, 0x24},
+ {OV2722_8BIT, 0x373a, 0x60},
+ {OV2722_8BIT, 0x3715, 0x01},
+ {OV2722_8BIT, 0x3703, 0x2e},
+ {OV2722_8BIT, 0x3705, 0x10},
+ {OV2722_8BIT, 0x3730, 0x30},
+ {OV2722_8BIT, 0x3704, 0x62},
+ {OV2722_8BIT, 0x3f06, 0x3a},
+ {OV2722_8BIT, 0x371c, 0x00},
+ {OV2722_8BIT, 0x371d, 0xc4},
+ {OV2722_8BIT, 0x371e, 0x01},
+ {OV2722_8BIT, 0x371f, 0x0d},
+ {OV2722_8BIT, 0x3708, 0x61},
+ {OV2722_8BIT, 0x3709, 0x12},
+ {OV2722_8BIT, 0x3800, 0x01},
+ {OV2722_8BIT, 0x3801, 0x40}, /* H crop start: 320 */
+ {OV2722_8BIT, 0x3802, 0x00},
+ {OV2722_8BIT, 0x3803, 0xb1}, /* V crop start: 177 */
+ {OV2722_8BIT, 0x3804, 0x06},
+ {OV2722_8BIT, 0x3805, 0x55}, /* H crop end: 1621 */
+ {OV2722_8BIT, 0x3806, 0x03},
+ {OV2722_8BIT, 0x3807, 0x95}, /* V crop end: 918 */
+ {OV2722_8BIT, 0x3808, 0x05},
+ {OV2722_8BIT, 0x3809, 0x10}, /* H output size: 0x0788==1928 */
+ {OV2722_8BIT, 0x380a, 0x02},
+ {OV2722_8BIT, 0x380b, 0xe0}, /* output size: 0x02DE==734 */
+ {OV2722_8BIT, 0x380c, 0x08},
+ {OV2722_8BIT, 0x380d, 0x00}, /* H timing: 2048 */
+ {OV2722_8BIT, 0x380e, 0x04},
+ {OV2722_8BIT, 0x380f, 0xa3}, /* V timing: 1187 */
+ {OV2722_8BIT, 0x3810, 0x00},
+ {OV2722_8BIT, 0x3811, 0x03}, /* H window offset: 3 */
+ {OV2722_8BIT, 0x3812, 0x00},
+ {OV2722_8BIT, 0x3813, 0x02}, /* V window offset: 2 */
+ {OV2722_8BIT, 0x3820, 0x80},
+ {OV2722_8BIT, 0x3821, 0x06}, /* mirror */
+ {OV2722_8BIT, 0x3814, 0x11},
+ {OV2722_8BIT, 0x3815, 0x11},
+ {OV2722_8BIT, 0x3612, 0x0b},
+ {OV2722_8BIT, 0x3618, 0x04},
+ {OV2722_8BIT, 0x3a08, 0x01},
+ {OV2722_8BIT, 0x3a09, 0x50},
+ {OV2722_8BIT, 0x3a0a, 0x01},
+ {OV2722_8BIT, 0x3a0b, 0x18},
+ {OV2722_8BIT, 0x3a0d, 0x03},
+ {OV2722_8BIT, 0x3a0e, 0x03},
+ {OV2722_8BIT, 0x4520, 0x00},
+ {OV2722_8BIT, 0x4837, 0x1b},
+ {OV2722_8BIT, 0x3600, 0x08},
+ {OV2722_8BIT, 0x3621, 0xc0},
+ {OV2722_8BIT, 0x3632, 0xd2}, /* added for power opt */
+ {OV2722_8BIT, 0x3633, 0x23},
+ {OV2722_8BIT, 0x3634, 0x54},
+ {OV2722_8BIT, 0x3f01, 0x0c},
+ {OV2722_8BIT, 0x5001, 0xc1},
+ {OV2722_8BIT, 0x3614, 0xf0},
+ {OV2722_8BIT, 0x3630, 0x2d},
+ {OV2722_8BIT, 0x370b, 0x62},
+ {OV2722_8BIT, 0x3706, 0x61},
+ {OV2722_8BIT, 0x4000, 0x02},
+ {OV2722_8BIT, 0x4002, 0xc5},
+ {OV2722_8BIT, 0x4005, 0x08},
+ {OV2722_8BIT, 0x404f, 0x84},
+ {OV2722_8BIT, 0x4051, 0x00},
+ {OV2722_8BIT, 0x5000, 0xcf}, /* manual 3a */
+ {OV2722_8BIT, 0x301d, 0xf0}, /* enable group hold */
+ {OV2722_8BIT, 0x3a18, 0x00},
+ {OV2722_8BIT, 0x3a19, 0x80},
+ {OV2722_8BIT, 0x4521, 0x00},
+ {OV2722_8BIT, 0x5183, 0xb0},
+ {OV2722_8BIT, 0x5184, 0xb0},
+ {OV2722_8BIT, 0x5185, 0xb0},
+ {OV2722_8BIT, 0x370c, 0x0c},
+ {OV2722_8BIT, 0x3035, 0x00},
+ {OV2722_8BIT, 0x3036, 0x26}, /* {0x3036, 0x2c}, //422.4 MHz */
+ {OV2722_8BIT, 0x3037, 0xa1},
+ {OV2722_8BIT, 0x303e, 0x19},
+ {OV2722_8BIT, 0x3038, 0x06},
+ {OV2722_8BIT, 0x3018, 0x04},
+ {OV2722_8BIT, 0x3000, 0x00}, /* added for power optimization */
+ {OV2722_8BIT, 0x3001, 0x00},
+ {OV2722_8BIT, 0x3002, 0x00},
+ {OV2722_8BIT, 0x3a0f, 0x40},
+ {OV2722_8BIT, 0x3a10, 0x38},
+ {OV2722_8BIT, 0x3a1b, 0x48},
+ {OV2722_8BIT, 0x3a1e, 0x30},
+ {OV2722_8BIT, 0x3a11, 0x90},
+ {OV2722_8BIT, 0x3a1f, 0x10},
+ {OV2722_8BIT, 0x3503, 0x17}, /* manual 3a */
+ {OV2722_8BIT, 0x3500, 0x00},
+ {OV2722_8BIT, 0x3501, 0x3F},
+ {OV2722_8BIT, 0x3502, 0x00},
+ {OV2722_8BIT, 0x3508, 0x00},
+ {OV2722_8BIT, 0x3509, 0x00},
+ {OV2722_TOK_TERM, 0, 0},
+};
+#endif
+
+static struct ov2722_resolution ov2722_res_preview[] = {
+ {
+ .desc = "ov2722_1632_1092_30fps",
+ .width = 1632,
+ .height = 1092,
+ .fps = 30,
+ .pix_clk_freq = 85,
+ .used = 0,
+ .pixels_per_line = 2260,
+ .lines_per_frame = 1244,
+ .skip_frames = 3,
+ .regs = ov2722_1632_1092_30fps,
+ .mipi_freq = 422400,
+ },
+ {
+ .desc = "ov2722_1452_1092_30fps",
+ .width = 1452,
+ .height = 1092,
+ .fps = 30,
+ .pix_clk_freq = 85,
+ .used = 0,
+ .pixels_per_line = 2260,
+ .lines_per_frame = 1244,
+ .skip_frames = 3,
+ .regs = ov2722_1452_1092_30fps,
+ .mipi_freq = 422400,
+ },
+ {
+ .desc = "ov2722_1080P_30fps",
+ .width = 1932,
+ .height = 1092,
+ .pix_clk_freq = 69,
+ .fps = 30,
+ .used = 0,
+ .pixels_per_line = 2068,
+ .lines_per_frame = 1114,
+ .skip_frames = 3,
+ .regs = ov2722_1080p_30fps,
+ .mipi_freq = 345600,
+ },
+};
+
+#define N_RES_PREVIEW (ARRAY_SIZE(ov2722_res_preview))
+
+/*
+ * Disable non-preview configurations until the configuration selection is
+ * improved.
+ */
+#if 0
+struct ov2722_resolution ov2722_res_still[] = {
+ {
+ .desc = "ov2722_480P_30fps",
+ .width = 1632,
+ .height = 1092,
+ .fps = 30,
+ .pix_clk_freq = 85,
+ .used = 0,
+ .pixels_per_line = 2260,
+ .lines_per_frame = 1244,
+ .skip_frames = 3,
+ .regs = ov2722_1632_1092_30fps,
+ .mipi_freq = 422400,
+ },
+ {
+ .desc = "ov2722_1452_1092_30fps",
+ .width = 1452,
+ .height = 1092,
+ .fps = 30,
+ .pix_clk_freq = 85,
+ .used = 0,
+ .pixels_per_line = 2260,
+ .lines_per_frame = 1244,
+ .skip_frames = 3,
+ .regs = ov2722_1452_1092_30fps,
+ .mipi_freq = 422400,
+ },
+ {
+ .desc = "ov2722_1080P_30fps",
+ .width = 1932,
+ .height = 1092,
+ .pix_clk_freq = 69,
+ .fps = 30,
+ .used = 0,
+ .pixels_per_line = 2068,
+ .lines_per_frame = 1114,
+ .skip_frames = 3,
+ .regs = ov2722_1080p_30fps,
+ .mipi_freq = 345600,
+ },
+};
+
+#define N_RES_STILL (ARRAY_SIZE(ov2722_res_still))
+
+struct ov2722_resolution ov2722_res_video[] = {
+ {
+ .desc = "ov2722_QVGA_30fps",
+ .width = 336,
+ .height = 256,
+ .fps = 30,
+ .pix_clk_freq = 73,
+ .used = 0,
+ .pixels_per_line = 2048,
+ .lines_per_frame = 1184,
+ .skip_frames = 3,
+ .regs = ov2722_QVGA_30fps,
+ .mipi_freq = 364800,
+ },
+ {
+ .desc = "ov2722_480P_30fps",
+ .width = 736,
+ .height = 496,
+ .fps = 30,
+ .pix_clk_freq = 73,
+ .used = 0,
+ .pixels_per_line = 2048,
+ .lines_per_frame = 1184,
+ .skip_frames = 3,
+ .regs = ov2722_480P_30fps,
+ },
+ {
+ .desc = "ov2722_1080P_30fps",
+ .width = 1932,
+ .height = 1092,
+ .pix_clk_freq = 69,
+ .fps = 30,
+ .used = 0,
+ .pixels_per_line = 2068,
+ .lines_per_frame = 1114,
+ .skip_frames = 3,
+ .regs = ov2722_1080p_30fps,
+ .mipi_freq = 345600,
+ },
+};
+
+#define N_RES_VIDEO (ARRAY_SIZE(ov2722_res_video))
+#endif
+
+static struct ov2722_resolution *ov2722_res = ov2722_res_preview;
+static unsigned long N_RES = N_RES_PREVIEW;
+#endif
diff --git a/drivers/staging/media/atomisp/include/hmm/hmm.h b/drivers/staging/media/atomisp/include/hmm/hmm.h
new file mode 100644
index 000000000000..a7aef27f54de
--- /dev/null
+++ b/drivers/staging/media/atomisp/include/hmm/hmm.h
@@ -0,0 +1,70 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Medifield PNW Camera Imaging ISP subsystem.
+ *
+ * Copyright (c) 2010 Intel Corporation. All Rights Reserved.
+ *
+ * Copyright (c) 2010 Silicon Hive www.siliconhive.com.
+ */
+
+#ifndef __HMM_H__
+#define __HMM_H__
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/mm.h>
+
+#include "hmm_common.h"
+#include "hmm/hmm_bo.h"
+#include "ia_css_types.h"
+
+#define mmgr_NULL ((ia_css_ptr)0)
+#define mmgr_EXCEPTION ((ia_css_ptr) - 1)
+
+int hmm_init(void);
+void hmm_cleanup(void);
+
+ia_css_ptr hmm_alloc(size_t bytes);
+ia_css_ptr hmm_create_from_vmalloc_buf(size_t bytes, void *vmalloc_addr);
+
+void hmm_free(ia_css_ptr ptr);
+int hmm_load(ia_css_ptr virt, void *data, unsigned int bytes);
+int hmm_store(ia_css_ptr virt, const void *data, unsigned int bytes);
+int hmm_set(ia_css_ptr virt, int c, unsigned int bytes);
+int hmm_flush(ia_css_ptr virt, unsigned int bytes);
+
+/*
+ * get kernel memory physical address from ISP virtual address.
+ */
+phys_addr_t hmm_virt_to_phys(ia_css_ptr virt);
+
+/*
+ * map ISP memory starts with virt to kernel virtual address
+ * by using vmap. return NULL if failed.
+ *
+ * virt must be the start address of ISP memory (return by hmm_alloc),
+ * do not pass any other address.
+ */
+void *hmm_vmap(ia_css_ptr virt, bool cached);
+void hmm_vunmap(ia_css_ptr virt);
+
+/*
+ * flush the cache for the vmapped buffer.
+ * if the buffer has not been vmapped, return directly.
+ */
+void hmm_flush_vmap(ia_css_ptr virt);
+
+/*
+ * map ISP memory starts with virt to specific vma.
+ *
+ * used for mmap operation.
+ *
+ * virt must be the start address of ISP memory (return by hmm_alloc),
+ * do not pass any other address.
+ */
+int hmm_mmap(struct vm_area_struct *vma, ia_css_ptr virt);
+
+extern struct hmm_bo_device bo_device;
+
+#endif
diff --git a/drivers/staging/media/atomisp/include/hmm/hmm_bo.h b/drivers/staging/media/atomisp/include/hmm/hmm_bo.h
new file mode 100644
index 000000000000..e09ac29ac43d
--- /dev/null
+++ b/drivers/staging/media/atomisp/include/hmm/hmm_bo.h
@@ -0,0 +1,261 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Medifield PNW Camera Imaging ISP subsystem.
+ *
+ * Copyright (c) 2010 Intel Corporation. All Rights Reserved.
+ *
+ * Copyright (c) 2010 Silicon Hive www.siliconhive.com.
+ */
+
+#ifndef __HMM_BO_H__
+#define __HMM_BO_H__
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#include <linux/mutex.h>
+#include "mmu/isp_mmu.h"
+#include "hmm/hmm_common.h"
+#include "ia_css_types.h"
+
+#define check_bodev_null_return(bdev, exp) \
+ check_null_return(bdev, exp, \
+ "NULL hmm_bo_device.\n")
+
+#define check_bodev_null_return_void(bdev) \
+ check_null_return_void(bdev, \
+ "NULL hmm_bo_device.\n")
+
+#define check_bo_status_yes_goto(bo, _status, label) \
+ var_not_equal_goto((bo->status & (_status)), (_status), \
+ label, \
+ "HMM buffer status not contain %s.\n", \
+ #_status)
+
+#define check_bo_status_no_goto(bo, _status, label) \
+ var_equal_goto((bo->status & (_status)), (_status), \
+ label, \
+ "HMM buffer status contains %s.\n", \
+ #_status)
+
+#define rbtree_node_to_hmm_bo(root_node) \
+ container_of((root_node), struct hmm_buffer_object, node)
+
+#define list_to_hmm_bo(list_ptr) \
+ list_entry((list_ptr), struct hmm_buffer_object, list)
+
+#define kref_to_hmm_bo(kref_ptr) \
+ list_entry((kref_ptr), struct hmm_buffer_object, kref)
+
+#define check_bo_null_return(bo, exp) \
+ check_null_return(bo, exp, "NULL hmm buffer object.\n")
+
+#define check_bo_null_return_void(bo) \
+ check_null_return_void(bo, "NULL hmm buffer object.\n")
+
+#define ISP_VM_START 0x0
+#define ISP_VM_SIZE (0x7FFFFFFF) /* 2G address space */
+#define ISP_PTR_NULL NULL
+
+#define HMM_BO_DEVICE_INITED 0x1
+
+enum hmm_bo_type {
+ HMM_BO_PRIVATE,
+ HMM_BO_VMALLOC,
+ HMM_BO_LAST,
+};
+
+#define HMM_BO_MASK 0x1
+#define HMM_BO_FREE 0x0
+#define HMM_BO_ALLOCED 0x1
+#define HMM_BO_PAGE_ALLOCED 0x2
+#define HMM_BO_BINDED 0x4
+#define HMM_BO_MMAPED 0x8
+#define HMM_BO_VMAPED 0x10
+#define HMM_BO_VMAPED_CACHED 0x20
+#define HMM_BO_ACTIVE 0x1000
+
+struct hmm_bo_device {
+ struct isp_mmu mmu;
+
+ /* start/pgnr/size is used to record the virtual memory of this bo */
+ unsigned int start;
+ unsigned int pgnr;
+ unsigned int size;
+
+ /* list lock is used to protect the entire_bo_list */
+ spinlock_t list_lock;
+ int flag;
+
+ /* linked list for entire buffer object */
+ struct list_head entire_bo_list;
+ /* rbtree for maintain entire allocated vm */
+ struct rb_root allocated_rbtree;
+ /* rbtree for maintain entire free vm */
+ struct rb_root free_rbtree;
+ struct mutex rbtree_mutex;
+ struct kmem_cache *bo_cache;
+};
+
+struct hmm_buffer_object {
+ struct hmm_bo_device *bdev;
+ struct list_head list;
+ struct kref kref;
+
+ struct page **pages;
+
+ /* mutex protecting this BO */
+ struct mutex mutex;
+ enum hmm_bo_type type;
+ int mmap_count;
+ int status;
+ void *vmap_addr; /* kernel virtual address by vmap */
+
+ struct rb_node node;
+ unsigned int start;
+ unsigned int end;
+ unsigned int pgnr;
+ /*
+ * When insert a bo which has the same pgnr with an existed
+ * bo node in the free_rbtree, using "prev & next" pointer
+ * to maintain a bo linked list instead of insert this bo
+ * into free_rbtree directly, it will make sure each node
+ * in free_rbtree has different pgnr.
+ * "prev & next" default is NULL.
+ */
+ struct hmm_buffer_object *prev;
+ struct hmm_buffer_object *next;
+};
+
+struct hmm_buffer_object *hmm_bo_alloc(struct hmm_bo_device *bdev,
+ unsigned int pgnr);
+
+void hmm_bo_release(struct hmm_buffer_object *bo);
+
+int hmm_bo_device_init(struct hmm_bo_device *bdev,
+ struct isp_mmu_client *mmu_driver,
+ unsigned int vaddr_start, unsigned int size);
+
+/*
+ * clean up all hmm_bo_device related things.
+ */
+void hmm_bo_device_exit(struct hmm_bo_device *bdev);
+
+/*
+ * whether the bo device is inited or not.
+ */
+int hmm_bo_device_inited(struct hmm_bo_device *bdev);
+
+/*
+ * increase buffer object reference.
+ */
+void hmm_bo_ref(struct hmm_buffer_object *bo);
+
+/*
+ * decrease buffer object reference. if reference reaches 0,
+ * release function of the buffer object will be called.
+ *
+ * this call is also used to release hmm_buffer_object or its
+ * upper level object with it embedded in. you need to call
+ * this function when it is no longer used.
+ *
+ * Note:
+ *
+ * user dont need to care about internal resource release of
+ * the buffer object in the release callback, it will be
+ * handled internally.
+ *
+ * this call will only release internal resource of the buffer
+ * object but will not free the buffer object itself, as the
+ * buffer object can be both pre-allocated statically or
+ * dynamically allocated. so user need to deal with the release
+ * of the buffer object itself manually. below example shows
+ * the normal case of using the buffer object.
+ *
+ * struct hmm_buffer_object *bo = hmm_bo_create(bdev, pgnr);
+ * ......
+ * hmm_bo_unref(bo);
+ *
+ * or:
+ *
+ * struct hmm_buffer_object bo;
+ *
+ * hmm_bo_init(bdev, &bo, pgnr, NULL);
+ * ...
+ * hmm_bo_unref(&bo);
+ */
+void hmm_bo_unref(struct hmm_buffer_object *bo);
+
+int hmm_bo_allocated(struct hmm_buffer_object *bo);
+
+/*
+ * Allocate/Free physical pages for the bo. Type indicates if the
+ * pages will be allocated by using video driver (for share buffer)
+ * or by ISP driver itself.
+ */
+int hmm_bo_alloc_pages(struct hmm_buffer_object *bo,
+ enum hmm_bo_type type,
+ void *vmalloc_addr);
+void hmm_bo_free_pages(struct hmm_buffer_object *bo);
+int hmm_bo_page_allocated(struct hmm_buffer_object *bo);
+
+/*
+ * bind/unbind the physical pages to a virtual address space.
+ */
+int hmm_bo_bind(struct hmm_buffer_object *bo);
+void hmm_bo_unbind(struct hmm_buffer_object *bo);
+int hmm_bo_binded(struct hmm_buffer_object *bo);
+
+/*
+ * vmap buffer object's pages to contiguous kernel virtual address.
+ * if the buffer has been vmaped, return the virtual address directly.
+ */
+void *hmm_bo_vmap(struct hmm_buffer_object *bo, bool cached);
+
+/*
+ * flush the cache for the vmapped buffer object's pages,
+ * if the buffer has not been vmapped, return directly.
+ */
+void hmm_bo_flush_vmap(struct hmm_buffer_object *bo);
+
+/*
+ * vunmap buffer object's kernel virtual address.
+ */
+void hmm_bo_vunmap(struct hmm_buffer_object *bo);
+
+/*
+ * mmap the bo's physical pages to specific vma.
+ *
+ * vma's address space size must be the same as bo's size,
+ * otherwise it will return -EINVAL.
+ *
+ * vma->vm_flags will be set to (VM_RESERVED | VM_IO).
+ */
+int hmm_bo_mmap(struct vm_area_struct *vma,
+ struct hmm_buffer_object *bo);
+
+/*
+ * find the buffer object by its virtual address vaddr.
+ * return NULL if no such buffer object found.
+ */
+struct hmm_buffer_object *hmm_bo_device_search_start(
+ struct hmm_bo_device *bdev, ia_css_ptr vaddr);
+
+/*
+ * find the buffer object by its virtual address.
+ * it does not need to be the start address of one bo,
+ * it can be an address within the range of one bo.
+ * return NULL if no such buffer object found.
+ */
+struct hmm_buffer_object *hmm_bo_device_search_in_range(
+ struct hmm_bo_device *bdev, ia_css_ptr vaddr);
+
+/*
+ * find the buffer object with kernel virtual address vaddr.
+ * return NULL if no such buffer object found.
+ */
+struct hmm_buffer_object *hmm_bo_device_search_vmap_start(
+ struct hmm_bo_device *bdev, const void *vaddr);
+
+#endif
diff --git a/drivers/staging/media/atomisp/include/hmm/hmm_common.h b/drivers/staging/media/atomisp/include/hmm/hmm_common.h
new file mode 100644
index 000000000000..b251e96cc19d
--- /dev/null
+++ b/drivers/staging/media/atomisp/include/hmm/hmm_common.h
@@ -0,0 +1,60 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Medifield PNW Camera Imaging ISP subsystem.
+ *
+ * Copyright (c) 2010 Intel Corporation. All Rights Reserved.
+ *
+ * Copyright (c) 2010 Silicon Hive www.siliconhive.com.
+ */
+
+#ifndef __HMM_BO_COMMON_H__
+#define __HMM_BO_COMMON_H__
+
+#define HMM_BO_NAME "HMM"
+
+/*
+ * some common use micros
+ */
+#define var_equal_return(var1, var2, exp, fmt, arg ...) \
+ do { \
+ if ((var1) == (var2)) { \
+ dev_err(atomisp_dev, \
+ fmt, ## arg); \
+ return exp;\
+ } \
+ } while (0)
+
+#define var_equal_return_void(var1, var2, fmt, arg ...) \
+ do { \
+ if ((var1) == (var2)) { \
+ dev_err(atomisp_dev, \
+ fmt, ## arg); \
+ return;\
+ } \
+ } while (0)
+
+#define var_equal_goto(var1, var2, label, fmt, arg ...) \
+ do { \
+ if ((var1) == (var2)) { \
+ dev_err(atomisp_dev, \
+ fmt, ## arg); \
+ goto label;\
+ } \
+ } while (0)
+
+#define var_not_equal_goto(var1, var2, label, fmt, arg ...) \
+ do { \
+ if ((var1) != (var2)) { \
+ dev_err(atomisp_dev, \
+ fmt, ## arg); \
+ goto label;\
+ } \
+ } while (0)
+
+#define check_null_return(ptr, exp, fmt, arg ...) \
+ var_equal_return(ptr, NULL, exp, fmt, ## arg)
+
+#define check_null_return_void(ptr, fmt, arg ...) \
+ var_equal_return_void(ptr, NULL, fmt, ## arg)
+
+#endif
diff --git a/drivers/staging/media/atomisp/include/linux/atomisp.h b/drivers/staging/media/atomisp/include/linux/atomisp.h
new file mode 100644
index 000000000000..3c8fa3f5808d
--- /dev/null
+++ b/drivers/staging/media/atomisp/include/linux/atomisp.h
@@ -0,0 +1,897 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Medifield PNW Camera Imaging ISP subsystem.
+ *
+ * Copyright (c) 2010 Intel Corporation. All Rights Reserved.
+ */
+
+#ifndef _ATOM_ISP_H
+#define _ATOM_ISP_H
+
+#include <linux/types.h>
+
+/* struct media_device_info.hw_revision */
+#define ATOMISP_HW_REVISION_MASK 0x0000ff00
+#define ATOMISP_HW_REVISION_SHIFT 8
+#define ATOMISP_HW_REVISION_ISP2300 0x00
+#define ATOMISP_HW_REVISION_ISP2400 0x10
+#define ATOMISP_HW_REVISION_ISP2401_LEGACY 0x11
+#define ATOMISP_HW_REVISION_ISP2401 0x20
+
+#define ATOMISP_HW_STEPPING_MASK 0x000000ff
+#define ATOMISP_HW_STEPPING_A0 0x00
+#define ATOMISP_HW_STEPPING_B0 0x10
+
+/*ISP binary running mode*/
+#define CI_MODE_PREVIEW 0x8000
+#define CI_MODE_VIDEO 0x4000
+#define CI_MODE_STILL_CAPTURE 0x2000
+#define CI_MODE_NONE 0x0000
+
+#define OUTPUT_MODE_FILE 0x0100
+#define OUTPUT_MODE_TEXT 0x0200
+
+/*
+ * Camera HAL sets this flag in v4l2_buffer reserved2 to indicate this
+ * buffer has a per-frame parameter.
+ */
+#define ATOMISP_BUFFER_HAS_PER_FRAME_SETTING 0x80000000
+
+/* Custom format for RAW capture from M10MO 0x3130314d */
+#define V4L2_PIX_FMT_CUSTOM_M10MO_RAW v4l2_fourcc('M', '1', '0', '1')
+
+/* Custom media bus formats being used in atomisp */
+#define V4L2_MBUS_FMT_CUSTOM_YUV420 0x8001
+#define V4L2_MBUS_FMT_CUSTOM_YVU420 0x8002
+#define V4L2_MBUS_FMT_CUSTOM_YUV422P 0x8003
+#define V4L2_MBUS_FMT_CUSTOM_YUV444 0x8004
+#define V4L2_MBUS_FMT_CUSTOM_NV12 0x8005
+#define V4L2_MBUS_FMT_CUSTOM_NV21 0x8006
+#define V4L2_MBUS_FMT_CUSTOM_NV16 0x8007
+#define V4L2_MBUS_FMT_CUSTOM_YUYV 0x8008
+#define V4L2_MBUS_FMT_CUSTOM_SBGGR16 0x8009
+#define V4L2_MBUS_FMT_CUSTOM_RGB32 0x800a
+
+/* Custom media bus format for M10MO RAW capture */
+#if 0
+#define V4L2_MBUS_FMT_CUSTOM_M10MO_RAW 0x800b
+#endif
+
+/* Configuration used by Bayer noise reduction and YCC noise reduction */
+struct atomisp_nr_config {
+ /* [gain] Strength of noise reduction for Bayer NR (Used by Bayer NR) */
+ unsigned int bnr_gain;
+ /* [gain] Strength of noise reduction for YCC NR (Used by YCC NR) */
+ unsigned int ynr_gain;
+ /* [intensity] Sensitivity of Edge (Used by Bayer NR) */
+ unsigned int direction;
+ /* [intensity] coring threshold for Cb (Used by YCC NR) */
+ unsigned int threshold_cb;
+ /* [intensity] coring threshold for Cr (Used by YCC NR) */
+ unsigned int threshold_cr;
+};
+
+/* Temporal noise reduction configuration */
+struct atomisp_tnr_config {
+ unsigned int gain; /* [gain] Strength of NR */
+ unsigned int threshold_y;/* [intensity] Motion sensitivity for Y */
+ unsigned int threshold_uv;/* [intensity] Motion sensitivity for U/V */
+};
+
+/* Histogram. This contains num_elements values of type unsigned int.
+ * The data pointer is a DDR pointer (virtual address).
+ */
+struct atomisp_histogram {
+ unsigned int num_elements;
+ void __user *data;
+};
+
+enum atomisp_ob_mode {
+ atomisp_ob_mode_none,
+ atomisp_ob_mode_fixed,
+ atomisp_ob_mode_raster
+};
+
+/* Optical black level configuration */
+struct atomisp_ob_config {
+ /* Obtical black level mode (Fixed / Raster) */
+ enum atomisp_ob_mode mode;
+ /* [intensity] optical black level for GR (relevant for fixed mode) */
+ unsigned int level_gr;
+ /* [intensity] optical black level for R (relevant for fixed mode) */
+ unsigned int level_r;
+ /* [intensity] optical black level for B (relevant for fixed mode) */
+ unsigned int level_b;
+ /* [intensity] optical black level for GB (relevant for fixed mode) */
+ unsigned int level_gb;
+ /* [BQ] 0..63 start position of OB area (relevant for raster mode) */
+ unsigned short start_position;
+ /* [BQ] start..63 end position of OB area (relevant for raster mode) */
+ unsigned short end_position;
+};
+
+/* Edge enhancement (sharpen) configuration */
+struct atomisp_ee_config {
+ /* [gain] The strength of sharpness. u5_11 */
+ unsigned int gain;
+ /* [intensity] The threshold that divides noises from edge. u8_8 */
+ unsigned int threshold;
+ /* [gain] The strength of sharpness in pell-mell area. u5_11 */
+ unsigned int detail_gain;
+};
+
+struct atomisp_3a_output {
+ int ae_y;
+ int awb_cnt;
+ int awb_gr;
+ int awb_r;
+ int awb_b;
+ int awb_gb;
+ int af_hpf1;
+ int af_hpf2;
+};
+
+enum atomisp_calibration_type {
+ calibration_type1,
+ calibration_type2,
+ calibration_type3
+};
+
+struct atomisp_gc_config {
+ __u16 gain_k1;
+ __u16 gain_k2;
+};
+
+struct atomisp_3a_config {
+ unsigned int ae_y_coef_r; /* [gain] Weight of R for Y */
+ unsigned int ae_y_coef_g; /* [gain] Weight of G for Y */
+ unsigned int ae_y_coef_b; /* [gain] Weight of B for Y */
+ unsigned int awb_lg_high_raw; /* [intensity]
+ AWB level gate high for raw */
+ unsigned int awb_lg_low; /* [intensity] AWB level gate low */
+ unsigned int awb_lg_high; /* [intensity] AWB level gate high */
+ int af_fir1_coef[7]; /* [factor] AF FIR coefficients of fir1 */
+ int af_fir2_coef[7]; /* [factor] AF FIR coefficients of fir2 */
+};
+
+struct atomisp_dvs_grid_info {
+ u32 enable;
+ u32 width;
+ u32 aligned_width;
+ u32 height;
+ u32 aligned_height;
+ u32 bqs_per_grid_cell;
+ u32 num_hor_coefs;
+ u32 num_ver_coefs;
+};
+
+struct atomisp_dvs_envelop {
+ unsigned int width;
+ unsigned int height;
+};
+
+struct atomisp_grid_info {
+ u32 enable;
+ u32 use_dmem;
+ u32 has_histogram;
+ u32 s3a_width;
+ u32 s3a_height;
+ u32 aligned_width;
+ u32 aligned_height;
+ u32 s3a_bqs_per_grid_cell;
+ u32 deci_factor_log2;
+ u32 elem_bit_depth;
+};
+
+struct atomisp_dis_vector {
+ int x;
+ int y;
+};
+
+/* DVS 2.0 Coefficient types. This structure contains 4 pointers to
+ * arrays that contain the coefficients for each type.
+ */
+struct atomisp_dvs2_coef_types {
+ short __user *odd_real; /** real part of the odd coefficients*/
+ short __user *odd_imag; /** imaginary part of the odd coefficients*/
+ short __user *even_real;/** real part of the even coefficients*/
+ short __user *even_imag;/** imaginary part of the even coefficients*/
+};
+
+/*
+ * DVS 2.0 Statistic types. This structure contains 4 pointers to
+ * arrays that contain the statistics for each type.
+ */
+struct atomisp_dvs2_stat_types {
+ int __user *odd_real; /** real part of the odd statistics*/
+ int __user *odd_imag; /** imaginary part of the odd statistics*/
+ int __user *even_real;/** real part of the even statistics*/
+ int __user *even_imag;/** imaginary part of the even statistics*/
+};
+
+struct atomisp_dis_coefficients {
+ struct atomisp_dvs_grid_info grid_info;
+ struct atomisp_dvs2_coef_types hor_coefs;
+ struct atomisp_dvs2_coef_types ver_coefs;
+};
+
+struct atomisp_dvs2_statistics {
+ struct atomisp_dvs_grid_info grid_info;
+ struct atomisp_dvs2_stat_types hor_prod;
+ struct atomisp_dvs2_stat_types ver_prod;
+};
+
+struct atomisp_dis_statistics {
+ struct atomisp_dvs2_statistics dvs2_stat;
+ u32 exp_id;
+};
+
+struct atomisp_3a_rgby_output {
+ u32 r;
+ u32 g;
+ u32 b;
+ u32 y;
+};
+
+/*
+ * Because we have 2 pipes at max to output metadata, therefore driver will use
+ * ATOMISP_MAIN_METADATA to specify the metadata from the pipe which keeps
+ * streaming always and use ATOMISP_SEC_METADATA to specify the metadata from
+ * the pipe which is streaming by request like capture pipe of ZSL or SDV mode
+ * as secondary metadata. And for the use case which has only one pipe
+ * streaming like online capture, ATOMISP_MAIN_METADATA will be used.
+ */
+enum atomisp_metadata_type {
+ ATOMISP_MAIN_METADATA = 0,
+ ATOMISP_SEC_METADATA,
+ ATOMISP_METADATA_TYPE_NUM,
+};
+
+struct atomisp_ext_isp_ctrl {
+ u32 id;
+ u32 data;
+};
+
+struct atomisp_3a_statistics {
+ struct atomisp_grid_info grid_info;
+ struct atomisp_3a_output __user *data;
+ struct atomisp_3a_rgby_output __user *rgby_data;
+ u32 exp_id; /* exposure ID */
+ u32 isp_config_id; /* isp config ID */
+};
+
+/* White Balance (Gain Adjust) */
+struct atomisp_wb_config {
+ unsigned int integer_bits;
+ unsigned int gr; /* unsigned <integer_bits>.<16-integer_bits> */
+ unsigned int r; /* unsigned <integer_bits>.<16-integer_bits> */
+ unsigned int b; /* unsigned <integer_bits>.<16-integer_bits> */
+ unsigned int gb; /* unsigned <integer_bits>.<16-integer_bits> */
+};
+
+/* Color Space Conversion settings */
+struct atomisp_cc_config {
+ unsigned int fraction_bits;
+ int matrix[3 * 3]; /* RGB2YUV Color matrix, signed
+ <13-fraction_bits>.<fraction_bits> */
+};
+
+/* De pixel noise configuration */
+struct atomisp_de_config {
+ unsigned int pixelnoise;
+ unsigned int c1_coring_threshold;
+ unsigned int c2_coring_threshold;
+};
+
+/* Chroma enhancement */
+struct atomisp_ce_config {
+ unsigned char uv_level_min;
+ unsigned char uv_level_max;
+};
+
+/* Defect pixel correction configuration */
+struct atomisp_dp_config {
+ /* [intensity] The threshold of defect Pixel Correction, representing
+ * the permissible difference of intensity between one pixel and its
+ * surrounding pixels. Smaller values result in more frequent pixel
+ * corrections. u0_16
+ */
+ unsigned int threshold;
+ /* [gain] The sensitivity of mis-correction. ISP will miss a lot of
+ * defects if the value is set too large. u8_8
+ */
+ unsigned int gain;
+ unsigned int gr;
+ unsigned int r;
+ unsigned int b;
+ unsigned int gb;
+};
+
+/* XNR threshold */
+struct atomisp_xnr_config {
+ __u16 threshold;
+};
+
+/* metadata config */
+struct atomisp_metadata_config {
+ u32 metadata_height;
+ u32 metadata_stride;
+};
+
+/*
+ * Generic resolution structure.
+ */
+struct atomisp_resolution {
+ u32 width; /** Width */
+ u32 height; /** Height */
+};
+
+/*
+ * This specifies the coordinates (x,y)
+ */
+struct atomisp_zoom_point {
+ s32 x; /** x coordinate */
+ s32 y; /** y coordinate */
+};
+
+/*
+ * This specifies the region
+ */
+struct atomisp_zoom_region {
+ struct atomisp_zoom_point
+ origin; /* Starting point coordinates for the region */
+ struct atomisp_resolution resolution; /* Region resolution */
+};
+
+struct atomisp_dz_config {
+ u32 dx; /** Horizontal zoom factor */
+ u32 dy; /** Vertical zoom factor */
+ struct atomisp_zoom_region zoom_region; /** region for zoom */
+};
+
+struct atomisp_parm {
+ struct atomisp_grid_info info;
+ struct atomisp_dvs_grid_info dvs_grid;
+ struct atomisp_dvs_envelop dvs_envelop;
+ struct atomisp_wb_config wb_config;
+ struct atomisp_cc_config cc_config;
+ struct atomisp_ob_config ob_config;
+ struct atomisp_de_config de_config;
+ struct atomisp_dz_config dz_config;
+ struct atomisp_ce_config ce_config;
+ struct atomisp_dp_config dp_config;
+ struct atomisp_nr_config nr_config;
+ struct atomisp_ee_config ee_config;
+ struct atomisp_tnr_config tnr_config;
+ struct atomisp_metadata_config metadata_config;
+};
+
+struct dvs2_bq_resolution {
+ int width_bq; /* width [BQ] */
+ int height_bq; /* height [BQ] */
+};
+
+struct atomisp_dvs2_bq_resolutions {
+ /* GDC source image size [BQ] */
+ struct dvs2_bq_resolution source_bq;
+ /* GDC output image size [BQ] */
+ struct dvs2_bq_resolution output_bq;
+ /* GDC effective envelope size [BQ] */
+ struct dvs2_bq_resolution envelope_bq;
+ /* isp pipe filter size [BQ] */
+ struct dvs2_bq_resolution ispfilter_bq;
+ /* GDC shit size [BQ] */
+ struct dvs2_bq_resolution gdc_shift_bq;
+};
+
+struct atomisp_dvs_6axis_config {
+ u32 exp_id;
+ u32 width_y;
+ u32 height_y;
+ u32 width_uv;
+ u32 height_uv;
+ u32 *xcoords_y;
+ u32 *ycoords_y;
+ u32 *xcoords_uv;
+ u32 *ycoords_uv;
+};
+
+struct atomisp_formats_config {
+ u32 video_full_range_flag;
+};
+
+struct atomisp_parameters {
+ struct atomisp_wb_config *wb_config; /* White Balance config */
+ struct atomisp_cc_config *cc_config; /* Color Correction config */
+ struct atomisp_tnr_config *tnr_config; /* Temporal Noise Reduction */
+ struct atomisp_ecd_config *ecd_config; /* Eigen Color Demosaicing */
+ struct atomisp_ynr_config *ynr_config; /* Y(Luma) Noise Reduction */
+ struct atomisp_fc_config *fc_config; /* Fringe Control */
+ struct atomisp_formats_config *formats_config; /* Formats Control */
+ struct atomisp_cnr_config *cnr_config; /* Chroma Noise Reduction */
+ struct atomisp_macc_config *macc_config; /* MACC */
+ struct atomisp_ctc_config *ctc_config; /* Chroma Tone Control */
+ struct atomisp_aa_config *aa_config; /* Anti-Aliasing */
+ struct atomisp_aa_config *baa_config; /* Anti-Aliasing */
+ struct atomisp_ce_config *ce_config;
+ struct atomisp_dvs_6axis_config *dvs_6axis_config;
+ struct atomisp_ob_config *ob_config; /* Objective Black config */
+ struct atomisp_dp_config *dp_config; /* Dead Pixel config */
+ struct atomisp_nr_config *nr_config; /* Noise Reduction config */
+ struct atomisp_ee_config *ee_config; /* Edge Enhancement config */
+ struct atomisp_de_config *de_config; /* Demosaic config */
+ struct atomisp_gc_config *gc_config; /* Gamma Correction config */
+ struct atomisp_anr_config *anr_config; /* Advanced Noise Reduction */
+ struct atomisp_3a_config *a3a_config; /* 3A Statistics config */
+ struct atomisp_xnr_config *xnr_config; /* eXtra Noise Reduction */
+ struct atomisp_dz_config *dz_config; /* Digital Zoom */
+ struct atomisp_cc_config *yuv2rgb_cc_config; /* Color
+ Correction config */
+ struct atomisp_cc_config *rgb2yuv_cc_config; /* Color
+ Correction config */
+ struct atomisp_macc_table *macc_table;
+ struct atomisp_gamma_table *gamma_table;
+ struct atomisp_ctc_table *ctc_table;
+ struct atomisp_xnr_table *xnr_table;
+ struct atomisp_rgb_gamma_table *r_gamma_table;
+ struct atomisp_rgb_gamma_table *g_gamma_table;
+ struct atomisp_rgb_gamma_table *b_gamma_table;
+ struct atomisp_vector *motion_vector; /* For 2-axis DVS */
+ struct atomisp_shading_table *shading_table;
+ struct atomisp_morph_table *morph_table;
+ struct atomisp_dvs_coefficients *dvs_coefs; /* DVS 1.0 coefficients */
+ struct atomisp_dis_coefficients *dvs2_coefs; /* DVS 2.0 coefficients */
+ struct atomisp_capture_config *capture_config;
+ struct atomisp_anr_thres *anr_thres;
+
+ void *lin_2500_config; /* Skylake: Linearization config */
+ void *obgrid_2500_config; /* Skylake: OBGRID config */
+ void *bnr_2500_config; /* Skylake: bayer denoise config */
+ void *shd_2500_config; /* Skylake: shading config */
+ void *dm_2500_config; /* Skylake: demosaic config */
+ void *rgbpp_2500_config; /* Skylake: RGBPP config */
+ void *dvs_stat_2500_config; /* Skylake: DVS STAT config */
+ void *lace_stat_2500_config; /* Skylake: LACE STAT config */
+ void *yuvp1_2500_config; /* Skylake: yuvp1 config */
+ void *yuvp2_2500_config; /* Skylake: yuvp2 config */
+ void *tnr_2500_config; /* Skylake: TNR config */
+ void *dpc_2500_config; /* Skylake: DPC config */
+ void *awb_2500_config; /* Skylake: auto white balance config */
+ void *awb_fr_2500_config; /* Skylake: auto white balance filter response config */
+ void *anr_2500_config; /* Skylake: ANR config */
+ void *af_2500_config; /* Skylake: auto focus config */
+ void *ae_2500_config; /* Skylake: auto exposure config */
+ void *bds_2500_config; /* Skylake: bayer downscaler config */
+ void *dvs_2500_config; /* Skylake: digital video stabilization config */
+ void *res_mgr_2500_config;
+
+ /*
+ * Output frame pointer the config is to be applied to (optional),
+ * set to NULL to make this config is applied as global.
+ */
+ void *output_frame;
+ /*
+ * Unique ID to track which config was actually applied to a particular
+ * frame, driver will send this id back with output frame together.
+ */
+ u32 isp_config_id;
+
+ /*
+ * Switch to control per_frame setting:
+ * 0: this is a global setting
+ * 1: this is a per_frame setting
+ * PLEASE KEEP THIS AT THE END OF THE STRUCTURE!!
+ */
+ u32 per_frame_setting;
+};
+
+#define ATOMISP_GAMMA_TABLE_SIZE 1024
+struct atomisp_gamma_table {
+ unsigned short data[ATOMISP_GAMMA_TABLE_SIZE];
+};
+
+/* Morphing table for advanced ISP.
+ * Each line of width elements takes up COORD_TABLE_EXT_WIDTH elements
+ * in memory.
+ */
+#define ATOMISP_MORPH_TABLE_NUM_PLANES 6
+struct atomisp_morph_table {
+ unsigned int enabled;
+
+ unsigned int height;
+ unsigned int width; /* number of valid elements per line */
+ unsigned short __user *coordinates_x[ATOMISP_MORPH_TABLE_NUM_PLANES];
+ unsigned short __user *coordinates_y[ATOMISP_MORPH_TABLE_NUM_PLANES];
+};
+
+#define ATOMISP_NUM_SC_COLORS 4
+#define ATOMISP_SC_FLAG_QUERY BIT(0)
+
+struct atomisp_shading_table {
+ __u32 enable;
+
+ __u32 sensor_width;
+ __u32 sensor_height;
+ __u32 width;
+ __u32 height;
+ __u32 fraction_bits;
+
+ __u16 *data[ATOMISP_NUM_SC_COLORS];
+};
+
+/* parameter for MACC */
+#define ATOMISP_NUM_MACC_AXES 16
+struct atomisp_macc_table {
+ short data[4 * ATOMISP_NUM_MACC_AXES];
+};
+
+struct atomisp_macc_config {
+ int color_effect;
+ struct atomisp_macc_table table;
+};
+
+/* Parameter for ctc parameter control */
+#define ATOMISP_CTC_TABLE_SIZE 1024
+struct atomisp_ctc_table {
+ unsigned short data[ATOMISP_CTC_TABLE_SIZE];
+};
+
+/* Parameter for overlay image loading */
+struct atomisp_overlay {
+ /* the frame containing the overlay data The overlay frame width should
+ * be the multiples of 2*ISP_VEC_NELEMS. The overlay frame height
+ * should be the multiples of 2.
+ */
+ struct v4l2_framebuffer *frame;
+ /* Y value of overlay background */
+ unsigned char bg_y;
+ /* U value of overlay background */
+ char bg_u;
+ /* V value of overlay background */
+ char bg_v;
+ /* the blending percent of input data for Y subpixels */
+ unsigned char blend_input_perc_y;
+ /* the blending percent of input data for U subpixels */
+ unsigned char blend_input_perc_u;
+ /* the blending percent of input data for V subpixels */
+ unsigned char blend_input_perc_v;
+ /* the blending percent of overlay data for Y subpixels */
+ unsigned char blend_overlay_perc_y;
+ /* the blending percent of overlay data for U subpixels */
+ unsigned char blend_overlay_perc_u;
+ /* the blending percent of overlay data for V subpixels */
+ unsigned char blend_overlay_perc_v;
+ /* the overlay start x pixel position on output frame It should be the
+ multiples of 2*ISP_VEC_NELEMS. */
+ unsigned int overlay_start_x;
+ /* the overlay start y pixel position on output frame It should be the
+ multiples of 2. */
+ unsigned int overlay_start_y;
+};
+
+struct atomisp_exposure {
+ unsigned int integration_time[8];
+ unsigned int shutter_speed[8];
+ unsigned int gain[4];
+ unsigned int aperture;
+};
+
+/* For texture streaming. */
+struct atomisp_bc_video_package {
+ int ioctl_cmd;
+ int device_id;
+ int inputparam;
+ int outputparam;
+};
+
+enum atomisp_focus_hp {
+ ATOMISP_FOCUS_HP_IN_PROGRESS = (1U << 2),
+ ATOMISP_FOCUS_HP_COMPLETE = (2U << 2),
+ ATOMISP_FOCUS_HP_FAILED = (3U << 2)
+};
+
+/* Masks */
+#define ATOMISP_FOCUS_STATUS_MOVING BIT(0)
+#define ATOMISP_FOCUS_STATUS_ACCEPTS_NEW_MOVE BIT(1)
+#define ATOMISP_FOCUS_STATUS_HOME_POSITION (3U << 2)
+
+enum atomisp_camera_port {
+ ATOMISP_CAMERA_PORT_SECONDARY,
+ ATOMISP_CAMERA_PORT_PRIMARY,
+ ATOMISP_CAMERA_PORT_TERTIARY,
+ ATOMISP_CAMERA_NR_PORTS
+};
+
+enum atomisp_ext_isp_id {
+ EXT_ISP_CID_ISO = 0,
+ EXT_ISP_CID_CAPTURE_HDR,
+ EXT_ISP_CID_CAPTURE_LLS,
+ EXT_ISP_CID_FOCUS_MODE,
+ EXT_ISP_CID_FOCUS_EXECUTION,
+ EXT_ISP_CID_TOUCH_POSX,
+ EXT_ISP_CID_TOUCH_POSY,
+ EXT_ISP_CID_CAF_STATUS,
+ EXT_ISP_CID_AF_STATUS,
+ EXT_ISP_CID_GET_AF_MODE,
+ EXT_ISP_CID_CAPTURE_BURST,
+ EXT_ISP_CID_ZOOM,
+ EXT_ISP_CID_SHOT_MODE
+};
+
+#define EXT_ISP_FOCUS_MODE_NORMAL 0
+#define EXT_ISP_FOCUS_MODE_MACRO 1
+#define EXT_ISP_FOCUS_MODE_TOUCH_AF 2
+#define EXT_ISP_FOCUS_MODE_PREVIEW_CAF 3
+#define EXT_ISP_FOCUS_MODE_MOVIE_CAF 4
+#define EXT_ISP_FOCUS_MODE_FACE_CAF 5
+#define EXT_ISP_FOCUS_MODE_TOUCH_MACRO 6
+#define EXT_ISP_FOCUS_MODE_TOUCH_CAF 7
+
+#define EXT_ISP_FOCUS_STOP 0
+#define EXT_ISP_FOCUS_SEARCH 1
+#define EXT_ISP_PAN_FOCUSING 2
+
+#define EXT_ISP_CAF_RESTART_CHECK 1
+#define EXT_ISP_CAF_STATUS_FOCUSING 2
+#define EXT_ISP_CAF_STATUS_SUCCESS 3
+#define EXT_ISP_CAF_STATUS_FAIL 4
+
+#define EXT_ISP_AF_STATUS_INVALID 1
+#define EXT_ISP_AF_STATUS_FOCUSING 2
+#define EXT_ISP_AF_STATUS_SUCCESS 3
+#define EXT_ISP_AF_STATUS_FAIL 4
+
+enum atomisp_burst_capture_options {
+ EXT_ISP_BURST_CAPTURE_CTRL_START = 0,
+ EXT_ISP_BURST_CAPTURE_CTRL_STOP
+};
+
+#define EXT_ISP_SHOT_MODE_AUTO 0
+#define EXT_ISP_SHOT_MODE_BEAUTY_FACE 1
+#define EXT_ISP_SHOT_MODE_BEST_PHOTO 2
+#define EXT_ISP_SHOT_MODE_DRAMA 3
+#define EXT_ISP_SHOT_MODE_BEST_FACE 4
+#define EXT_ISP_SHOT_MODE_ERASER 5
+#define EXT_ISP_SHOT_MODE_PANORAMA 6
+#define EXT_ISP_SHOT_MODE_RICH_TONE_HDR 7
+#define EXT_ISP_SHOT_MODE_NIGHT 8
+#define EXT_ISP_SHOT_MODE_SOUND_SHOT 9
+#define EXT_ISP_SHOT_MODE_ANIMATED_PHOTO 10
+#define EXT_ISP_SHOT_MODE_SPORTS 11
+
+/*Private IOCTLs for ISP */
+#define ATOMISP_IOC_G_XNR \
+ _IOR('v', BASE_VIDIOC_PRIVATE + 0, int)
+#define ATOMISP_IOC_S_XNR \
+ _IOW('v', BASE_VIDIOC_PRIVATE + 0, int)
+#define ATOMISP_IOC_G_NR \
+ _IOR('v', BASE_VIDIOC_PRIVATE + 1, struct atomisp_nr_config)
+#define ATOMISP_IOC_S_NR \
+ _IOW('v', BASE_VIDIOC_PRIVATE + 1, struct atomisp_nr_config)
+#define ATOMISP_IOC_G_TNR \
+ _IOR('v', BASE_VIDIOC_PRIVATE + 2, struct atomisp_tnr_config)
+#define ATOMISP_IOC_S_TNR \
+ _IOW('v', BASE_VIDIOC_PRIVATE + 2, struct atomisp_tnr_config)
+#define ATOMISP_IOC_G_HISTOGRAM \
+ _IOWR('v', BASE_VIDIOC_PRIVATE + 3, struct atomisp_histogram)
+#define ATOMISP_IOC_S_HISTOGRAM \
+ _IOW('v', BASE_VIDIOC_PRIVATE + 3, struct atomisp_histogram)
+#define ATOMISP_IOC_G_BLACK_LEVEL_COMP \
+ _IOR('v', BASE_VIDIOC_PRIVATE + 4, struct atomisp_ob_config)
+#define ATOMISP_IOC_S_BLACK_LEVEL_COMP \
+ _IOW('v', BASE_VIDIOC_PRIVATE + 4, struct atomisp_ob_config)
+#define ATOMISP_IOC_G_EE \
+ _IOR('v', BASE_VIDIOC_PRIVATE + 5, struct atomisp_ee_config)
+#define ATOMISP_IOC_S_EE \
+ _IOW('v', BASE_VIDIOC_PRIVATE + 5, struct atomisp_ee_config)
+/* Digital Image Stabilization:
+ * 1. get dis statistics: reads DIS statistics from ISP (every frame)
+ * 2. set dis coefficients: set DIS filter coefficients (one time)
+ * 3. set dis motion vector: set motion vector (result of DIS, every frame)
+ */
+#define ATOMISP_IOC_G_DIS_STAT \
+ _IOWR('v', BASE_VIDIOC_PRIVATE + 6, struct atomisp_dis_statistics)
+
+#define ATOMISP_IOC_G_DVS2_BQ_RESOLUTIONS \
+ _IOR('v', BASE_VIDIOC_PRIVATE + 6, struct atomisp_dvs2_bq_resolutions)
+
+#define ATOMISP_IOC_S_DIS_COEFS \
+ _IOW('v', BASE_VIDIOC_PRIVATE + 6, struct atomisp_dis_coefficients)
+
+#define ATOMISP_IOC_S_DIS_VECTOR \
+ _IOW('v', BASE_VIDIOC_PRIVATE + 6, struct atomisp_dvs_6axis_config)
+
+#define ATOMISP_IOC_G_3A_STAT \
+ _IOWR('v', BASE_VIDIOC_PRIVATE + 7, struct atomisp_3a_statistics)
+#define ATOMISP_IOC_G_ISP_PARM \
+ _IOR('v', BASE_VIDIOC_PRIVATE + 8, struct atomisp_parm)
+#define ATOMISP_IOC_S_ISP_PARM \
+ _IOW('v', BASE_VIDIOC_PRIVATE + 8, struct atomisp_parm)
+#define ATOMISP_IOC_G_ISP_GAMMA \
+ _IOR('v', BASE_VIDIOC_PRIVATE + 9, struct atomisp_gamma_table)
+#define ATOMISP_IOC_S_ISP_GAMMA \
+ _IOW('v', BASE_VIDIOC_PRIVATE + 9, struct atomisp_gamma_table)
+#define ATOMISP_IOC_G_ISP_GDC_TAB \
+ _IOR('v', BASE_VIDIOC_PRIVATE + 10, struct atomisp_morph_table)
+#define ATOMISP_IOC_S_ISP_GDC_TAB \
+ _IOW('v', BASE_VIDIOC_PRIVATE + 10, struct atomisp_morph_table)
+
+/* macc parameter control*/
+#define ATOMISP_IOC_G_ISP_MACC \
+ _IOR('v', BASE_VIDIOC_PRIVATE + 12, struct atomisp_macc_config)
+#define ATOMISP_IOC_S_ISP_MACC \
+ _IOW('v', BASE_VIDIOC_PRIVATE + 12, struct atomisp_macc_config)
+
+/* Defect pixel detection & Correction */
+#define ATOMISP_IOC_G_ISP_BAD_PIXEL_DETECTION \
+ _IOR('v', BASE_VIDIOC_PRIVATE + 13, struct atomisp_dp_config)
+#define ATOMISP_IOC_S_ISP_BAD_PIXEL_DETECTION \
+ _IOW('v', BASE_VIDIOC_PRIVATE + 13, struct atomisp_dp_config)
+
+/* False Color Correction */
+#define ATOMISP_IOC_G_ISP_FALSE_COLOR_CORRECTION \
+ _IOR('v', BASE_VIDIOC_PRIVATE + 14, struct atomisp_de_config)
+#define ATOMISP_IOC_S_ISP_FALSE_COLOR_CORRECTION \
+ _IOW('v', BASE_VIDIOC_PRIVATE + 14, struct atomisp_de_config)
+
+/* ctc parameter control */
+#define ATOMISP_IOC_G_ISP_CTC \
+ _IOR('v', BASE_VIDIOC_PRIVATE + 15, struct atomisp_ctc_table)
+#define ATOMISP_IOC_S_ISP_CTC \
+ _IOW('v', BASE_VIDIOC_PRIVATE + 15, struct atomisp_ctc_table)
+
+/* white balance Correction */
+#define ATOMISP_IOC_G_ISP_WHITE_BALANCE \
+ _IOR('v', BASE_VIDIOC_PRIVATE + 16, struct atomisp_wb_config)
+#define ATOMISP_IOC_S_ISP_WHITE_BALANCE \
+ _IOW('v', BASE_VIDIOC_PRIVATE + 16, struct atomisp_wb_config)
+
+/* fpn table loading */
+#define ATOMISP_IOC_S_ISP_FPN_TABLE \
+ _IOW('v', BASE_VIDIOC_PRIVATE + 17, struct v4l2_framebuffer)
+
+/* overlay image loading */
+#define ATOMISP_IOC_G_ISP_OVERLAY \
+ _IOWR('v', BASE_VIDIOC_PRIVATE + 18, struct atomisp_overlay)
+#define ATOMISP_IOC_S_ISP_OVERLAY \
+ _IOW('v', BASE_VIDIOC_PRIVATE + 18, struct atomisp_overlay)
+
+/* bcd driver bridge */
+#define ATOMISP_IOC_CAMERA_BRIDGE \
+ _IOWR('v', BASE_VIDIOC_PRIVATE + 19, struct atomisp_bc_video_package)
+
+#define ATOMISP_IOC_S_EXPOSURE \
+ _IOW('v', BASE_VIDIOC_PRIVATE + 21, struct atomisp_exposure)
+
+/* white balance Correction */
+#define ATOMISP_IOC_G_3A_CONFIG \
+ _IOR('v', BASE_VIDIOC_PRIVATE + 23, struct atomisp_3a_config)
+#define ATOMISP_IOC_S_3A_CONFIG \
+ _IOW('v', BASE_VIDIOC_PRIVATE + 23, struct atomisp_3a_config)
+
+/* LCS (shading) table write */
+#define ATOMISP_IOC_S_ISP_SHD_TAB \
+ _IOWR('v', BASE_VIDIOC_PRIVATE + 27, struct atomisp_shading_table)
+
+/* Gamma Correction */
+#define ATOMISP_IOC_G_ISP_GAMMA_CORRECTION \
+ _IOR('v', BASE_VIDIOC_PRIVATE + 28, struct atomisp_gc_config)
+
+#define ATOMISP_IOC_S_ISP_GAMMA_CORRECTION \
+ _IOW('v', BASE_VIDIOC_PRIVATE + 28, struct atomisp_gc_config)
+
+#define ATOMISP_IOC_S_PARAMETERS \
+ _IOW('v', BASE_VIDIOC_PRIVATE + 32, struct atomisp_parameters)
+
+#define ATOMISP_IOC_EXP_ID_UNLOCK \
+ _IOW('v', BASE_VIDIOC_PRIVATE + 36, int)
+
+#define ATOMISP_IOC_EXP_ID_CAPTURE \
+ _IOW('v', BASE_VIDIOC_PRIVATE + 37, int)
+
+#define ATOMISP_IOC_S_ENABLE_DZ_CAPT_PIPE \
+ _IOWR('v', BASE_VIDIOC_PRIVATE + 38, unsigned int)
+
+#define ATOMISP_IOC_G_FORMATS_CONFIG \
+ _IOR('v', BASE_VIDIOC_PRIVATE + 39, struct atomisp_formats_config)
+
+#define ATOMISP_IOC_S_FORMATS_CONFIG \
+ _IOW('v', BASE_VIDIOC_PRIVATE + 39, struct atomisp_formats_config)
+
+#define ATOMISP_IOC_INJECT_A_FAKE_EVENT \
+ _IOW('v', BASE_VIDIOC_PRIVATE + 42, int)
+
+#define ATOMISP_IOC_S_ARRAY_RESOLUTION \
+ _IOW('v', BASE_VIDIOC_PRIVATE + 45, struct atomisp_resolution)
+
+/* for depth mode sensor frame sync compensation */
+#define ATOMISP_IOC_G_DEPTH_SYNC_COMP \
+ _IOR('v', BASE_VIDIOC_PRIVATE + 46, unsigned int)
+
+#define ATOMISP_IOC_S_SENSOR_EE_CONFIG \
+ _IOW('v', BASE_VIDIOC_PRIVATE + 47, unsigned int)
+
+/*
+ * Reserved ioctls. We have customer implementing it internally.
+ * We can't use both numbers to not cause ABI conflict.
+ * Anyway, those ioctls are hacks and not implemented by us:
+ *
+ * #define ATOMISP_IOC_G_SENSOR_REG \
+ * _IOW('v', BASE_VIDIOC_PRIVATE + 55, struct atomisp_sensor_regs)
+ * #define ATOMISP_IOC_S_SENSOR_REG \
+ * _IOW('v', BASE_VIDIOC_PRIVATE + 56, struct atomisp_sensor_regs)
+ */
+
+/* ISP Private control IDs */
+#define V4L2_CID_ATOMISP_BAD_PIXEL_DETECTION \
+ (V4L2_CID_PRIVATE_BASE + 0)
+#define V4L2_CID_ATOMISP_POSTPROCESS_GDC_CAC \
+ (V4L2_CID_PRIVATE_BASE + 1)
+#define V4L2_CID_ATOMISP_VIDEO_STABLIZATION \
+ (V4L2_CID_PRIVATE_BASE + 2)
+#define V4L2_CID_ATOMISP_FIXED_PATTERN_NR \
+ (V4L2_CID_PRIVATE_BASE + 3)
+#define V4L2_CID_ATOMISP_FALSE_COLOR_CORRECTION \
+ (V4L2_CID_PRIVATE_BASE + 4)
+#define V4L2_CID_ATOMISP_LOW_LIGHT \
+ (V4L2_CID_PRIVATE_BASE + 5)
+
+/* Camera class:
+ * Exposure, Flash and privacy (indicator) light controls, to be upstreamed */
+#define V4L2_CID_CAMERA_LASTP1 (V4L2_CID_CAMERA_CLASS_BASE + 1024)
+
+#define V4L2_CID_RUN_MODE (V4L2_CID_CAMERA_LASTP1 + 20)
+#define ATOMISP_RUN_MODE_VIDEO 1
+#define ATOMISP_RUN_MODE_STILL_CAPTURE 2
+#define ATOMISP_RUN_MODE_PREVIEW 3
+#define ATOMISP_RUN_MODE_MIN 1
+#define ATOMISP_RUN_MODE_MAX 3
+
+#define V4L2_CID_ENABLE_VFPP (V4L2_CID_CAMERA_LASTP1 + 21)
+#define V4L2_CID_ATOMISP_CONTINUOUS_MODE (V4L2_CID_CAMERA_LASTP1 + 22)
+#define V4L2_CID_ATOMISP_CONTINUOUS_RAW_BUFFER_SIZE \
+ (V4L2_CID_CAMERA_LASTP1 + 23)
+#define V4L2_CID_ATOMISP_CONTINUOUS_VIEWFINDER \
+ (V4L2_CID_CAMERA_LASTP1 + 24)
+
+#define V4L2_CID_VFPP (V4L2_CID_CAMERA_LASTP1 + 25)
+#define ATOMISP_VFPP_ENABLE 0
+#define ATOMISP_VFPP_DISABLE_SCALER 1
+#define ATOMISP_VFPP_DISABLE_LOWLAT 2
+
+#define V4L2_CID_START_ZSL_CAPTURE (V4L2_CID_CAMERA_LASTP1 + 28)
+/* Lock and unlock raw buffer */
+#define V4L2_CID_ENABLE_RAW_BUFFER_LOCK (V4L2_CID_CAMERA_LASTP1 + 29)
+
+#define V4L2_CID_EXPOSURE_ZONE_NUM (V4L2_CID_CAMERA_LASTP1 + 31)
+/* Disable digital zoom */
+#define V4L2_CID_DISABLE_DZ (V4L2_CID_CAMERA_LASTP1 + 32)
+
+#define V4L2_CID_ATOMISP_SELECT_ISP_VERSION (V4L2_CID_CAMERA_LASTP1 + 38)
+
+#define V4L2_BUF_FLAG_BUFFER_INVALID 0x0400
+#define V4L2_BUF_FLAG_BUFFER_VALID 0x0800
+
+#define V4L2_BUF_TYPE_VIDEO_CAPTURE_ION (V4L2_BUF_TYPE_PRIVATE + 1024)
+
+#define V4L2_EVENT_ATOMISP_3A_STATS_READY (V4L2_EVENT_PRIVATE_START + 1)
+#define V4L2_EVENT_ATOMISP_METADATA_READY (V4L2_EVENT_PRIVATE_START + 2)
+#define V4L2_EVENT_ATOMISP_ACC_COMPLETE (V4L2_EVENT_PRIVATE_START + 4)
+#define V4L2_EVENT_ATOMISP_PAUSE_BUFFER (V4L2_EVENT_PRIVATE_START + 5)
+#define V4L2_EVENT_ATOMISP_CSS_RESET (V4L2_EVENT_PRIVATE_START + 6)
+/* Nonstandard color effects for V4L2_CID_COLORFX */
+enum {
+ V4L2_COLORFX_SKIN_WHITEN_LOW = 1001,
+ V4L2_COLORFX_SKIN_WHITEN_HIGH = 1002,
+ V4L2_COLORFX_WARM = 1003,
+ V4L2_COLORFX_COLD = 1004,
+ V4L2_COLORFX_WASHED = 1005,
+ V4L2_COLORFX_RED = 1006,
+ V4L2_COLORFX_GREEN = 1007,
+ V4L2_COLORFX_BLUE = 1008,
+ V4L2_COLORFX_PINK = 1009,
+ V4L2_COLORFX_YELLOW = 1010,
+ V4L2_COLORFX_PURPLE = 1011,
+};
+
+#endif /* _ATOM_ISP_H */
diff --git a/drivers/staging/media/atomisp/include/linux/atomisp_gmin_platform.h b/drivers/staging/media/atomisp/include/linux/atomisp_gmin_platform.h
new file mode 100644
index 000000000000..426c5ee4ec18
--- /dev/null
+++ b/drivers/staging/media/atomisp/include/linux/atomisp_gmin_platform.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel MID SoC Camera Imaging ISP subsystem.
+ *
+ * Copyright (c) 2014 Intel Corporation. All Rights Reserved.
+ */
+#ifndef ATOMISP_GMIN_PLATFORM_H_
+#define ATOMISP_GMIN_PLATFORM_H_
+
+#include "atomisp_platform.h"
+
+int atomisp_register_i2c_module(struct v4l2_subdev *subdev,
+ struct camera_sensor_platform_data *plat_data);
+int atomisp_gmin_remove_subdev(struct v4l2_subdev *sd);
+int gmin_get_var_int(struct device *dev, bool is_gmin,
+ const char *var, int def);
+struct camera_sensor_platform_data *
+gmin_camera_platform_data(
+ struct v4l2_subdev *subdev,
+ enum atomisp_input_format csi_format,
+ enum atomisp_bayer_order csi_bayer);
+#endif
diff --git a/drivers/staging/media/atomisp/include/linux/atomisp_platform.h b/drivers/staging/media/atomisp/include/linux/atomisp_platform.h
new file mode 100644
index 000000000000..6146555fe9cf
--- /dev/null
+++ b/drivers/staging/media/atomisp/include/linux/atomisp_platform.h
@@ -0,0 +1,181 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Medifield PNW Camera Imaging ISP subsystem.
+ *
+ * Copyright (c) 2010 Intel Corporation. All Rights Reserved.
+ */
+#ifndef ATOMISP_PLATFORM_H_
+#define ATOMISP_PLATFORM_H_
+
+#include <asm/cpu_device_id.h>
+#include <asm/processor.h>
+
+#include <linux/i2c.h>
+#include <media/v4l2-subdev.h>
+#include "atomisp.h"
+
+#define MAX_SENSORS_PER_PORT 4
+#define MAX_STREAMS_PER_CHANNEL 2
+
+#define CAMERA_MODULE_ID_LEN 64
+
+enum atomisp_bayer_order {
+ atomisp_bayer_order_grbg,
+ atomisp_bayer_order_rggb,
+ atomisp_bayer_order_bggr,
+ atomisp_bayer_order_gbrg
+};
+
+enum atomisp_input_stream_id {
+ ATOMISP_INPUT_STREAM_GENERAL = 0,
+ ATOMISP_INPUT_STREAM_CAPTURE = 0,
+ ATOMISP_INPUT_STREAM_POSTVIEW,
+ ATOMISP_INPUT_STREAM_PREVIEW,
+ ATOMISP_INPUT_STREAM_VIDEO,
+ ATOMISP_INPUT_STREAM_NUM
+};
+
+enum atomisp_input_format {
+ ATOMISP_INPUT_FORMAT_YUV420_8_LEGACY,/* 8 bits per subpixel (legacy) */
+ ATOMISP_INPUT_FORMAT_YUV420_8, /* 8 bits per subpixel */
+ ATOMISP_INPUT_FORMAT_YUV420_10,/* 10 bits per subpixel */
+ ATOMISP_INPUT_FORMAT_YUV420_16,/* 16 bits per subpixel */
+ ATOMISP_INPUT_FORMAT_YUV422_8, /* UYVY..UVYV, 8 bits per subpixel */
+ ATOMISP_INPUT_FORMAT_YUV422_10,/* UYVY..UVYV, 10 bits per subpixel */
+ ATOMISP_INPUT_FORMAT_YUV422_16,/* UYVY..UVYV, 16 bits per subpixel */
+ ATOMISP_INPUT_FORMAT_RGB_444, /* BGR..BGR, 4 bits per subpixel */
+ ATOMISP_INPUT_FORMAT_RGB_555, /* BGR..BGR, 5 bits per subpixel */
+ ATOMISP_INPUT_FORMAT_RGB_565, /* BGR..BGR, 5 bits B and R, 6 bits G */
+ ATOMISP_INPUT_FORMAT_RGB_666, /* BGR..BGR, 6 bits per subpixel */
+ ATOMISP_INPUT_FORMAT_RGB_888, /* BGR..BGR, 8 bits per subpixel */
+ ATOMISP_INPUT_FORMAT_RAW_6, /* RAW data, 6 bits per pixel */
+ ATOMISP_INPUT_FORMAT_RAW_7, /* RAW data, 7 bits per pixel */
+ ATOMISP_INPUT_FORMAT_RAW_8, /* RAW data, 8 bits per pixel */
+ ATOMISP_INPUT_FORMAT_RAW_10, /* RAW data, 10 bits per pixel */
+ ATOMISP_INPUT_FORMAT_RAW_12, /* RAW data, 12 bits per pixel */
+ ATOMISP_INPUT_FORMAT_RAW_14, /* RAW data, 14 bits per pixel */
+ ATOMISP_INPUT_FORMAT_RAW_16, /* RAW data, 16 bits per pixel */
+ ATOMISP_INPUT_FORMAT_BINARY_8, /* Binary byte stream. */
+
+ /* CSI2-MIPI specific format: Generic short packet data. It is used to
+ * keep the timing information for the opening/closing of shutters,
+ * triggering of flashes and etc.
+ */
+ ATOMISP_INPUT_FORMAT_GENERIC_SHORT1, /* Generic Short Packet Code 1 */
+ ATOMISP_INPUT_FORMAT_GENERIC_SHORT2, /* Generic Short Packet Code 2 */
+ ATOMISP_INPUT_FORMAT_GENERIC_SHORT3, /* Generic Short Packet Code 3 */
+ ATOMISP_INPUT_FORMAT_GENERIC_SHORT4, /* Generic Short Packet Code 4 */
+ ATOMISP_INPUT_FORMAT_GENERIC_SHORT5, /* Generic Short Packet Code 5 */
+ ATOMISP_INPUT_FORMAT_GENERIC_SHORT6, /* Generic Short Packet Code 6 */
+ ATOMISP_INPUT_FORMAT_GENERIC_SHORT7, /* Generic Short Packet Code 7 */
+ ATOMISP_INPUT_FORMAT_GENERIC_SHORT8, /* Generic Short Packet Code 8 */
+
+ /* CSI2-MIPI specific format: YUV data.
+ */
+ ATOMISP_INPUT_FORMAT_YUV420_8_SHIFT, /* YUV420 8-bit (Chroma Shifted
+ Pixel Sampling) */
+ ATOMISP_INPUT_FORMAT_YUV420_10_SHIFT, /* YUV420 8-bit (Chroma Shifted
+ Pixel Sampling) */
+
+ /* CSI2-MIPI specific format: Generic long packet data
+ */
+ ATOMISP_INPUT_FORMAT_EMBEDDED, /* Embedded 8-bit non Image Data */
+
+ /* CSI2-MIPI specific format: User defined byte-based data. For example,
+ * the data transmitter (e.g. the SoC sensor) can keep the JPEG data as
+ * the User Defined Data Type 4 and the MPEG data as the
+ * User Defined Data Type 7.
+ */
+ ATOMISP_INPUT_FORMAT_USER_DEF1, /* User defined 8-bit data type 1 */
+ ATOMISP_INPUT_FORMAT_USER_DEF2, /* User defined 8-bit data type 2 */
+ ATOMISP_INPUT_FORMAT_USER_DEF3, /* User defined 8-bit data type 3 */
+ ATOMISP_INPUT_FORMAT_USER_DEF4, /* User defined 8-bit data type 4 */
+ ATOMISP_INPUT_FORMAT_USER_DEF5, /* User defined 8-bit data type 5 */
+ ATOMISP_INPUT_FORMAT_USER_DEF6, /* User defined 8-bit data type 6 */
+ ATOMISP_INPUT_FORMAT_USER_DEF7, /* User defined 8-bit data type 7 */
+ ATOMISP_INPUT_FORMAT_USER_DEF8, /* User defined 8-bit data type 8 */
+};
+
+#define N_ATOMISP_INPUT_FORMAT (ATOMISP_INPUT_FORMAT_USER_DEF8 + 1)
+
+struct intel_v4l2_subdev_table {
+ enum atomisp_camera_port port;
+ unsigned int lanes;
+ struct v4l2_subdev *subdev;
+};
+
+/*
+ * Sensor of external ISP can send multiple streams with different mipi data
+ * type in the same virtual channel. This information needs to come from the
+ * sensor or external ISP
+ */
+struct atomisp_isys_config_info {
+ u8 input_format;
+ u16 width;
+ u16 height;
+};
+
+struct atomisp_input_stream_info {
+ enum atomisp_input_stream_id stream;
+ u8 enable;
+ /* Sensor driver fills ch_id with the id
+ of the virtual channel. */
+ u8 ch_id;
+ /* Tells how many streams in this virtual channel. If 0 ignore rest
+ * and the input format will be from mipi_info */
+ u8 isys_configs;
+ /*
+ * if more isys_configs is more than 0, sensor needs to configure the
+ * input format differently. width and height can be 0. If width and
+ * height is not zero, then the corresponding data needs to be set
+ */
+ struct atomisp_isys_config_info isys_info[MAX_STREAMS_PER_CHANNEL];
+};
+
+struct camera_sensor_platform_data {
+ int (*flisclk_ctrl)(struct v4l2_subdev *subdev, int flag);
+ int (*csi_cfg)(struct v4l2_subdev *subdev, int flag);
+
+ /*
+ * New G-Min power and GPIO interface to control individual
+ * lines as implemented on all known camera modules.
+ */
+ int (*gpio0_ctrl)(struct v4l2_subdev *subdev, int on);
+ int (*gpio1_ctrl)(struct v4l2_subdev *subdev, int on);
+ int (*v1p8_ctrl)(struct v4l2_subdev *subdev, int on);
+ int (*v2p8_ctrl)(struct v4l2_subdev *subdev, int on);
+ int (*v1p2_ctrl)(struct v4l2_subdev *subdev, int on);
+};
+
+struct camera_mipi_info {
+ enum atomisp_camera_port port;
+ unsigned int num_lanes;
+ enum atomisp_input_format input_format;
+ enum atomisp_bayer_order raw_bayer_order;
+ enum atomisp_input_format metadata_format;
+ u32 metadata_width;
+ u32 metadata_height;
+ const u32 *metadata_effective_width;
+};
+
+const struct intel_v4l2_subdev_table *atomisp_platform_get_subdevs(void);
+int atomisp_register_sensor_no_gmin(struct v4l2_subdev *subdev, u32 lanes,
+ enum atomisp_input_format format,
+ enum atomisp_bayer_order bayer_order);
+void atomisp_unregister_subdev(struct v4l2_subdev *subdev);
+
+/* API from old platform_camera.h, new CPUID implementation */
+#define __IS_SOC(x) (boot_cpu_data.x86_vfm == x)
+#define __IS_SOCS(x, y) (boot_cpu_data.x86_vfm == x || boot_cpu_data.x86_vfm == y)
+
+#define IS_MFLD __IS_SOC(INTEL_ATOM_SALTWELL_MID)
+#define IS_BYT __IS_SOC(INTEL_ATOM_SILVERMONT)
+#define IS_CHT __IS_SOC(INTEL_ATOM_AIRMONT)
+#define IS_MRFD __IS_SOC(INTEL_ATOM_SILVERMONT_MID)
+#define IS_MOFD __IS_SOC(INTEL_ATOM_SILVERMONT_MID2)
+
+/* Both CHT and MOFD come with ISP2401 */
+#define IS_ISP2401 __IS_SOCS(INTEL_ATOM_AIRMONT, \
+ INTEL_ATOM_SILVERMONT_MID2)
+
+#endif /* ATOMISP_PLATFORM_H_ */
diff --git a/drivers/staging/media/atomisp/include/mmu/isp_mmu.h b/drivers/staging/media/atomisp/include/mmu/isp_mmu.h
new file mode 100644
index 000000000000..ee9839b080a6
--- /dev/null
+++ b/drivers/staging/media/atomisp/include/mmu/isp_mmu.h
@@ -0,0 +1,158 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Medifield PNW Camera Imaging ISP subsystem.
+ *
+ * Copyright (c) 2010 Intel Corporation. All Rights Reserved.
+ *
+ * Copyright (c) 2010 Silicon Hive www.siliconhive.com.
+ */
+/*
+ * ISP MMU driver for classic two-level page tables
+ */
+#ifndef __ISP_MMU_H__
+#define __ISP_MMU_H__
+
+#include <linux/types.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+
+/*
+ * do not change these values, the page size for ISP must be the
+ * same as kernel's page size.
+ */
+#define ISP_PAGE_OFFSET 12
+#define ISP_PAGE_SIZE BIT(ISP_PAGE_OFFSET)
+#define ISP_PAGE_MASK (~(phys_addr_t)(ISP_PAGE_SIZE - 1))
+
+#define ISP_L1PT_OFFSET 22
+#define ISP_L1PT_MASK (~((1U << ISP_L1PT_OFFSET) - 1))
+
+#define ISP_L2PT_OFFSET 12
+#define ISP_L2PT_MASK (~(ISP_L1PT_MASK | (~(ISP_PAGE_MASK))))
+
+#define ISP_L1PT_PTES 1024
+#define ISP_L2PT_PTES 1024
+
+#define ISP_PTR_TO_L1_IDX(x) ((((x) & ISP_L1PT_MASK)) \
+ >> ISP_L1PT_OFFSET)
+
+#define ISP_PTR_TO_L2_IDX(x) ((((x) & ISP_L2PT_MASK)) \
+ >> ISP_L2PT_OFFSET)
+
+#define ISP_PAGE_ALIGN(x) (((x) + (ISP_PAGE_SIZE - 1)) \
+ & ISP_PAGE_MASK)
+
+#define ISP_PT_TO_VIRT(l1_idx, l2_idx, offset) do {\
+ ((l1_idx) << ISP_L1PT_OFFSET) | \
+ ((l2_idx) << ISP_L2PT_OFFSET) | \
+ (offset)\
+} while (0)
+
+#define pgnr_to_size(pgnr) ((pgnr) << ISP_PAGE_OFFSET)
+#define size_to_pgnr_ceil(size) (((size) + (1 << ISP_PAGE_OFFSET) - 1)\
+ >> ISP_PAGE_OFFSET)
+#define size_to_pgnr_bottom(size) ((size) >> ISP_PAGE_OFFSET)
+
+struct isp_mmu;
+
+struct isp_mmu_client {
+ /*
+ * const value
+ *
+ * @name:
+ * driver name
+ * @pte_valid_mask:
+ * should be 1 bit valid data, meaning the value should
+ * be power of 2.
+ */
+ char *name;
+ unsigned int pte_valid_mask;
+ unsigned int null_pte;
+
+ /*
+ * get page directory base address (physical address).
+ *
+ * must be provided.
+ */
+ unsigned int (*get_pd_base)(struct isp_mmu *mmu, phys_addr_t pd_base);
+ /*
+ * callback to flush tlb.
+ *
+ * tlb_flush_range will at least flush TLBs containing
+ * address mapping from addr to addr + size.
+ *
+ * tlb_flush_all will flush all TLBs.
+ *
+ * tlb_flush_all is must be provided. if tlb_flush_range is
+ * not valid, it will set to tlb_flush_all by default.
+ */
+ void (*tlb_flush_range)(struct isp_mmu *mmu,
+ unsigned int addr, unsigned int size);
+ void (*tlb_flush_all)(struct isp_mmu *mmu);
+ unsigned int (*phys_to_pte)(struct isp_mmu *mmu,
+ phys_addr_t phys);
+ phys_addr_t (*pte_to_phys)(struct isp_mmu *mmu,
+ unsigned int pte);
+
+};
+
+struct isp_mmu {
+ struct isp_mmu_client *driver;
+ unsigned int l1_pte;
+ int l2_pgt_refcount[ISP_L1PT_PTES];
+ phys_addr_t base_address;
+
+ struct mutex pt_mutex;
+};
+
+/* flags for PDE and PTE */
+#define ISP_PTE_VALID_MASK(mmu) \
+ ((mmu)->driver->pte_valid_mask)
+
+#define ISP_PTE_VALID(mmu, pte) \
+ ((pte) & ISP_PTE_VALID_MASK(mmu))
+
+#define NULL_PAGE ((phys_addr_t)(-1) & ISP_PAGE_MASK)
+#define PAGE_VALID(page) ((page) != NULL_PAGE)
+
+/*
+ * init mmu with specific mmu driver.
+ */
+int isp_mmu_init(struct isp_mmu *mmu, struct isp_mmu_client *driver);
+/*
+ * cleanup all mmu related things.
+ */
+void isp_mmu_exit(struct isp_mmu *mmu);
+
+/*
+ * setup/remove address mapping for pgnr continuous physical pages
+ * and isp_virt.
+ *
+ * map/unmap is mutex lock protected, and caller does not have
+ * to do lock/unlock operation.
+ *
+ * map/unmap will not flush tlb, and caller needs to deal with
+ * this itself.
+ */
+int isp_mmu_map(struct isp_mmu *mmu, unsigned int isp_virt,
+ phys_addr_t phys, unsigned int pgnr);
+
+void isp_mmu_unmap(struct isp_mmu *mmu, unsigned int isp_virt,
+ unsigned int pgnr);
+
+static inline void isp_mmu_flush_tlb_all(struct isp_mmu *mmu)
+{
+ if (mmu->driver && mmu->driver->tlb_flush_all)
+ mmu->driver->tlb_flush_all(mmu);
+}
+
+#define isp_mmu_flush_tlb isp_mmu_flush_tlb_all
+
+static inline void isp_mmu_flush_tlb_range(struct isp_mmu *mmu,
+ unsigned int start, unsigned int size)
+{
+ if (mmu->driver && mmu->driver->tlb_flush_range)
+ mmu->driver->tlb_flush_range(mmu, start, size);
+}
+
+#endif /* ISP_MMU_H_ */
diff --git a/drivers/staging/media/atomisp/include/mmu/sh_mmu_mrfld.h b/drivers/staging/media/atomisp/include/mmu/sh_mmu_mrfld.h
new file mode 100644
index 000000000000..bdec30ff8309
--- /dev/null
+++ b/drivers/staging/media/atomisp/include/mmu/sh_mmu_mrfld.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Merrifield PNW Camera Imaging ISP subsystem.
+ *
+ * Copyright (c) 2010 Intel Corporation. All Rights Reserved.
+ *
+ * Copyright (c) 2010 Silicon Hive www.siliconhive.com.
+ */
+
+#ifndef __SH_MMU_MRFLD_H__
+#define __SH_MMU_MRFLD_H__
+
+extern struct isp_mmu_client sh_mmu_mrfld;
+#endif
diff --git a/drivers/staging/media/atomisp/notes.txt b/drivers/staging/media/atomisp/notes.txt
new file mode 100644
index 000000000000..c04c283ff438
--- /dev/null
+++ b/drivers/staging/media/atomisp/notes.txt
@@ -0,0 +1,43 @@
+Some notes about the working of the atomisp drivers (learned while working
+on cleaning it up).
+
+The atomisp seems to be a generic DSP(ISP) like processor without a fixed
+pipeline. It does not have its own memory, but instead uses main memory.
+The ISP has its own address-space and main memory needs to be mapped into
+its address space through the ISP's MMU.
+
+Memory is allocated by the hmm code. hmm_alloc() returns an ISP virtual
+address. The hmm code keeps a list of all allocations and when necessary
+the hmm code finds the backing hmm-buffer-object (hmm_bo) by looking
+up the hmm_bo based on the ISP virtual address.
+
+The actual processing pipeline is made by loading one or more programs,
+called binaries. The shisp_240??0_v21.bin firmware file contains many
+different binaries. Binaries are picked by filling a ia_css_binary_descr
+struct with various input and output parameters and then calling
+ia_css_binary_find(). Some binaries support creating multiple outputs
+(preview + video frame?) at the same time.
+
+For example for the /dev/video0 preview node load_preview_binaries()
+from atomisp/pci/sh_css.c is called and then loads a preview and
+optionally a scalar binary. Note when digital zoom is disabled
+(it is enabled by default) only the preview binary is loaded.
+So in this case a single binary handles the entire pipeline.
+
+Since getting a picture requires multiple processing steps,
+this means that unlike in fixed pipelines the soft pipelines
+on the ISP can do multiple processing steps in a single pipeline
+element (in a single binary).
+
+###
+
+The sensor drivers use of v4l2_get_subdev_hostdata(), which returns
+a camera_mipi_info struct. This struct is allocated/managed by
+the core atomisp code. The most important parts of the struct
+are filled by the atomisp core itself, like e.g. the port number.
+
+Other members of camera_mipi_info which are set by some drivers are:
+-metadata_width, metadata_height, metadata_effective_width, set by
+ the ov5693 driver (and used by the atomisp core)
+-raw_bayer_order, adjusted by the ov2680 driver when flipping since
+ flipping can change the bayer order
diff --git a/drivers/staging/media/atomisp/pci/atomisp-regs.h b/drivers/staging/media/atomisp/pci/atomisp-regs.h
new file mode 100644
index 000000000000..15e61098a675
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp-regs.h
@@ -0,0 +1,185 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Medifield PNW Camera Imaging ISP subsystem.
+ *
+ * Copyright (c) 2012 Intel Corporation. All Rights Reserved.
+ */
+
+#ifndef ATOMISP_REGS_H
+#define ATOMISP_REGS_H
+
+/* common register definitions */
+#define PCICMDSTS 0x01
+#define INTR 0x0f
+#define MSI_CAPID 0x24
+#define MSI_ADDRESS 0x25
+#define MSI_DATA 0x26
+#define INTR_CTL 0x27
+
+#define PCI_MSI_CAPID 0x90
+#define PCI_MSI_ADDR 0x94
+#define PCI_MSI_DATA 0x98
+#define PCI_INTERRUPT_CTRL 0x9C
+#define PCI_I_CONTROL 0xfc
+
+/* MRFLD specific register definitions */
+#define MRFLD_CSI_AFE 0x39
+#define MRFLD_CSI_CONTROL 0x3a
+#define MRFLD_CSI_RCOMP 0x3d
+
+#define MRFLD_PCI_PMCS 0x84
+#define MRFLD_PCI_CSI_ACCESS_CTRL_VIOL 0xd4
+#define MRFLD_PCI_CSI_AFE_HS_CONTROL 0xdc
+#define MRFLD_PCI_CSI_AFE_RCOMP_CONTROL 0xe0
+#define MRFLD_PCI_CSI_CONTROL 0xe8
+#define MRFLD_PCI_CSI_AFE_TRIM_CONTROL 0xe4
+#define MRFLD_PCI_CSI_DEADLINE_CONTROL 0xec
+#define MRFLD_PCI_CSI_RCOMP_CONTROL 0xf4
+
+/* Select Arasan (legacy)/Intel input system */
+#define MRFLD_PCI_CSI_CONTROL_PARPATHEN BIT(24)
+/* Enable CSI interface (ANN B0/K0) */
+#define MRFLD_PCI_CSI_CONTROL_CSI_READY BIT(25)
+
+/*
+ * Enables the combining of adjacent 32-byte read requests to the same
+ * cache line. When cleared, each 32-byte read request is sent as a
+ * separate request on the IB interface.
+ */
+#define MRFLD_PCI_I_CONTROL_ENABLE_READ_COMBINING 0x1
+
+/*
+ * Register: MRFLD_PCI_CSI_RCOMP_CONTROL
+ * If cleared, the high speed clock going to the digital logic is gated when
+ * RCOMP update is happening. The clock is gated for a minimum of 100 nsec.
+ * If this bit is set, then the high speed clock is not gated during the
+ * update cycle.
+ */
+#define MRFLD_PCI_CSI_HS_OVR_CLK_GATE_ON_UPDATE 0x800000
+
+/*
+ * Enables the combining of adjacent 32-byte write requests to the same
+ * cache line. When cleared, each 32-byte write request is sent as a
+ * separate request on the IB interface.
+ */
+#define MRFLD_PCI_I_CONTROL_ENABLE_WRITE_COMBINING 0x2
+
+#define MRFLD_PCI_I_CONTROL_SRSE_RESET_MASK 0xc
+
+#define MRFLD_PCI_CSI1_HSRXCLKTRIM 0x2
+#define MRFLD_PCI_CSI1_HSRXCLKTRIM_SHIFT 16
+#define MRFLD_PCI_CSI2_HSRXCLKTRIM 0x3
+#define MRFLD_PCI_CSI2_HSRXCLKTRIM_SHIFT 24
+#define MRFLD_PCI_CSI3_HSRXCLKTRIM 0x2
+#define MRFLD_PCI_CSI3_HSRXCLKTRIM_SHIFT 28
+#define MRFLD_PCI_CSI_HSRXCLKTRIM_MASK 0xf
+
+/*
+ * This register is IUINT MMIO register, it is used to select the CSI
+ * receiver backend.
+ * 1: SH CSI backend
+ * 0: Arasan CSI backend
+ */
+#define MRFLD_CSI_RECEIVER_SELECTION_REG 0x8081c
+
+#define MRFLD_INTR_CLEAR_REG 0x50c
+#define MRFLD_INTR_STATUS_REG 0x508
+#define MRFLD_INTR_ENABLE_REG 0x510
+
+#define MRFLD_MAX_ZOOM_FACTOR 1024
+
+/* MRFLD ISP POWER related */
+#define MRFLD_ISPSSPM0 0x39
+#define MRFLD_ISPSSPM0_ISPSSC_OFFSET 0
+#define MRFLD_ISPSSPM0_ISPSSS_OFFSET 24
+#define MRFLD_ISPSSPM0_ISPSSC_MASK 0x3
+#define MRFLD_ISPSSPM0_IUNIT_POWER_ON 0
+#define MRFLD_ISPSSPM0_IUNIT_POWER_OFF 0x3
+#define MRFLD_ISPSSDVFS 0x13F
+#define MRFLD_BIT0 0x0001
+#define MRFLD_BIT1 0x0002
+
+/* MRFLD CSI lane configuration related */
+#define MRFLD_PORT_CONFIG_NUM 8
+#define MRFLD_PORT1_ENABLE_SHIFT 0
+#define MRFLD_PORT2_ENABLE_SHIFT 1
+#define MRFLD_PORT3_ENABLE_SHIFT 2
+#define MRFLD_PORT1_LANES_SHIFT 3
+#define MRFLD_PORT2_LANES_SHIFT 7
+#define MRFLD_PORT3_LANES_SHIFT 8
+#define MRFLD_PORT_CONFIG_MASK 0x000f03ff
+#define MRFLD_PORT_CONFIGCODE_SHIFT 16
+#define MRFLD_ALL_CSI_PORTS_OFF_MASK 0x7
+
+#define CHV_PORT3_LANES_SHIFT 9
+#define CHV_PORT_CONFIG_MASK 0x1f07ff
+
+#define ISPSSPM1 0x3a
+#define ISP_FREQ_STAT_MASK (0x1f << ISP_FREQ_STAT_OFFSET)
+#define ISP_REQ_FREQ_MASK 0x1f
+#define ISP_FREQ_VALID_MASK (0x1 << ISP_FREQ_VALID_OFFSET)
+#define ISP_FREQ_STAT_OFFSET 0x18
+#define ISP_REQ_GUAR_FREQ_OFFSET 0x8
+#define ISP_REQ_FREQ_OFFSET 0x0
+#define ISP_FREQ_VALID_OFFSET 0x7
+#define ISP_FREQ_RULE_ANY 0x0
+
+#define ISP_FREQ_457MHZ 0x1C9
+#define ISP_FREQ_400MHZ 0x190
+#define ISP_FREQ_356MHZ 0x164
+#define ISP_FREQ_320MHZ 0x140
+#define ISP_FREQ_266MHZ 0x10a
+#define ISP_FREQ_200MHZ 0xc8
+#define ISP_FREQ_100MHZ 0x64
+
+#define HPLL_FREQ_800MHZ 0x320
+#define HPLL_FREQ_1600MHZ 0x640
+#define HPLL_FREQ_2000MHZ 0x7D0
+
+#define CCK_FUSE_REG_0 0x08
+#define CCK_FUSE_HPLL_FREQ_MASK 0x03
+
+/* ISP2401 CSI2+ receiver delay settings */
+#define CSI2_PORT_A_BASE 0xC0000
+#define CSI2_PORT_B_BASE 0xC2000
+#define CSI2_PORT_C_BASE 0xC4000
+
+#define CSI2_LANE_CL_BASE 0x418
+#define CSI2_LANE_D0_BASE 0x420
+#define CSI2_LANE_D1_BASE 0x428
+#define CSI2_LANE_D2_BASE 0x430
+#define CSI2_LANE_D3_BASE 0x438
+
+#define CSI2_REG_RX_CSI_DLY_CNT_TERMEN 0
+#define CSI2_REG_RX_CSI_DLY_CNT_SETTLE 0x4
+
+#define CSI2_PORT_A_RX_CSI_DLY_CNT_TERMEN_CLANE 0xC0418
+#define CSI2_PORT_A_RX_CSI_DLY_CNT_SETTLE_CLANE 0xC041C
+#define CSI2_PORT_A_RX_CSI_DLY_CNT_TERMEN_DLANE0 0xC0420
+#define CSI2_PORT_A_RX_CSI_DLY_CNT_SETTLE_DLANE0 0xC0424
+#define CSI2_PORT_A_RX_CSI_DLY_CNT_TERMEN_DLANE1 0xC0428
+#define CSI2_PORT_A_RX_CSI_DLY_CNT_SETTLE_DLANE1 0xC042C
+#define CSI2_PORT_A_RX_CSI_DLY_CNT_TERMEN_DLANE2 0xC0430
+#define CSI2_PORT_A_RX_CSI_DLY_CNT_SETTLE_DLANE2 0xC0434
+#define CSI2_PORT_A_RX_CSI_DLY_CNT_TERMEN_DLANE3 0xC0438
+#define CSI2_PORT_A_RX_CSI_DLY_CNT_SETTLE_DLANE3 0xC043C
+
+#define CSI2_PORT_B_RX_CSI_DLY_CNT_TERMEN_CLANE 0xC2418
+#define CSI2_PORT_B_RX_CSI_DLY_CNT_SETTLE_CLANE 0xC241C
+#define CSI2_PORT_B_RX_CSI_DLY_CNT_TERMEN_DLANE0 0xC2420
+#define CSI2_PORT_B_RX_CSI_DLY_CNT_SETTLE_DLANE0 0xC2424
+#define CSI2_PORT_B_RX_CSI_DLY_CNT_TERMEN_DLANE1 0xC2428
+#define CSI2_PORT_B_RX_CSI_DLY_CNT_SETTLE_DLANE1 0xC242C
+
+#define CSI2_PORT_C_RX_CSI_DLY_CNT_TERMEN_CLANE 0xC4418
+#define CSI2_PORT_C_RX_CSI_DLY_CNT_SETTLE_CLANE 0xC441C
+#define CSI2_PORT_C_RX_CSI_DLY_CNT_TERMEN_DLANE0 0xC4420
+#define CSI2_PORT_C_RX_CSI_DLY_CNT_SETTLE_DLANE0 0xC4424
+#define CSI2_PORT_C_RX_CSI_DLY_CNT_TERMEN_DLANE1 0xC4428
+#define CSI2_PORT_C_RX_CSI_DLY_CNT_SETTLE_DLANE1 0xC442C
+
+#define DMA_BURST_SIZE_REG 0xCD408
+
+#define ISP_DFS_TRY_TIMES 2
+
+#endif /* ATOMISP_REGS_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp_cmd.c b/drivers/staging/media/atomisp/pci/atomisp_cmd.c
new file mode 100644
index 000000000000..3a4eb4f6d3be
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp_cmd.c
@@ -0,0 +1,4663 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Support for Medifield PNW Camera Imaging ISP subsystem.
+ *
+ * Copyright (c) 2010 Intel Corporation. All Rights Reserved.
+ *
+ * Copyright (c) 2010 Silicon Hive www.siliconhive.com.
+ */
+#include <linux/errno.h>
+#include <linux/firmware.h>
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/kfifo.h>
+#include <linux/pm_runtime.h>
+#include <linux/timer.h>
+
+#include <asm/iosf_mbi.h>
+
+#include <media/v4l2-event.h>
+
+#define CREATE_TRACE_POINTS
+#include "atomisp_trace_event.h"
+
+#include "atomisp_cmd.h"
+#include "atomisp_common.h"
+#include "atomisp_fops.h"
+#include "atomisp_internal.h"
+#include "atomisp_ioctl.h"
+#include "atomisp-regs.h"
+#include "atomisp_tables.h"
+#include "atomisp_compat.h"
+#include "atomisp_subdev.h"
+#include "atomisp_dfs_tables.h"
+
+#include <hmm/hmm.h>
+
+#include "sh_css_hrt.h"
+#include "sh_css_defs.h"
+#include "system_global.h"
+#include "sh_css_internal.h"
+#include "sh_css_sp.h"
+#include "gp_device.h"
+#include "device_access.h"
+#include "irq.h"
+
+#include "ia_css_types.h"
+#include "ia_css_stream.h"
+#include "ia_css_debug.h"
+#include "bits.h"
+
+union host {
+ struct {
+ void *kernel_ptr;
+ void __user *user_ptr;
+ int size;
+ } scalar;
+ struct {
+ void *hmm_ptr;
+ } ptr;
+};
+
+/*
+ * get sensor:dis71430/ov2720 related info from v4l2_subdev->priv data field.
+ * subdev->priv is set in mrst.c
+ */
+struct camera_mipi_info *atomisp_to_sensor_mipi_info(struct v4l2_subdev *sd)
+{
+ return (struct camera_mipi_info *)v4l2_get_subdev_hostdata(sd);
+}
+
+/*
+ * get struct atomisp_video_pipe from v4l2 video_device
+ */
+struct atomisp_video_pipe *atomisp_to_video_pipe(struct video_device *dev)
+{
+ return (struct atomisp_video_pipe *)
+ container_of(dev, struct atomisp_video_pipe, vdev);
+}
+
+static unsigned short atomisp_get_sensor_fps(struct atomisp_sub_device *asd)
+{
+ struct v4l2_subdev_frame_interval fi = { 0 };
+ struct atomisp_device *isp = asd->isp;
+
+ unsigned short fps = 0;
+ int ret;
+
+ ret = v4l2_subdev_call_state_active(isp->inputs[asd->input_curr].sensor,
+ pad, get_frame_interval, &fi);
+
+ if (!ret && fi.interval.numerator)
+ fps = fi.interval.denominator / fi.interval.numerator;
+
+ return fps;
+}
+
+/*
+ * DFS progress is shown as follows:
+ * 1. Target frequency is calculated according to FPS/Resolution/ISP running
+ * mode.
+ * 2. Ratio is calculated using formula: 2 * HPLL / target frequency - 1
+ * with proper rounding.
+ * 3. Set ratio to ISPFREQ40, 1 to FREQVALID and ISPFREQGUAR40
+ * to 200MHz in ISPSSPM1.
+ * 4. Wait for FREQVALID to be cleared by P-Unit.
+ * 5. Wait for field ISPFREQSTAT40 in ISPSSPM1 turn to ratio set in 3.
+ */
+static int write_target_freq_to_hw(struct atomisp_device *isp,
+ unsigned int new_freq)
+{
+ unsigned int ratio, timeout, guar_ratio;
+ u32 isp_sspm1 = 0;
+ int i;
+
+ if (!isp->hpll_freq) {
+ dev_err(isp->dev, "failed to get hpll_freq. no change to freq\n");
+ return -EINVAL;
+ }
+
+ iosf_mbi_read(BT_MBI_UNIT_PMC, MBI_REG_READ, ISPSSPM1, &isp_sspm1);
+ if (isp_sspm1 & ISP_FREQ_VALID_MASK) {
+ dev_dbg(isp->dev, "clearing ISPSSPM1 valid bit.\n");
+ iosf_mbi_write(BT_MBI_UNIT_PMC, MBI_REG_WRITE, ISPSSPM1,
+ isp_sspm1 & ~(1 << ISP_FREQ_VALID_OFFSET));
+ }
+
+ ratio = (2 * isp->hpll_freq + new_freq / 2) / new_freq - 1;
+ guar_ratio = (2 * isp->hpll_freq + 200 / 2) / 200 - 1;
+
+ iosf_mbi_read(BT_MBI_UNIT_PMC, MBI_REG_READ, ISPSSPM1, &isp_sspm1);
+ isp_sspm1 &= ~(0x1F << ISP_REQ_FREQ_OFFSET);
+
+ for (i = 0; i < ISP_DFS_TRY_TIMES; i++) {
+ iosf_mbi_write(BT_MBI_UNIT_PMC, MBI_REG_WRITE, ISPSSPM1,
+ isp_sspm1
+ | ratio << ISP_REQ_FREQ_OFFSET
+ | 1 << ISP_FREQ_VALID_OFFSET
+ | guar_ratio << ISP_REQ_GUAR_FREQ_OFFSET);
+
+ iosf_mbi_read(BT_MBI_UNIT_PMC, MBI_REG_READ, ISPSSPM1, &isp_sspm1);
+ timeout = 20;
+ while ((isp_sspm1 & ISP_FREQ_VALID_MASK) && timeout) {
+ iosf_mbi_read(BT_MBI_UNIT_PMC, MBI_REG_READ, ISPSSPM1, &isp_sspm1);
+ dev_dbg(isp->dev, "waiting for ISPSSPM1 valid bit to be 0.\n");
+ udelay(100);
+ timeout--;
+ }
+
+ if (timeout != 0)
+ break;
+ }
+
+ if (timeout == 0) {
+ dev_err(isp->dev, "DFS failed due to HW error.\n");
+ return -EINVAL;
+ }
+
+ iosf_mbi_read(BT_MBI_UNIT_PMC, MBI_REG_READ, ISPSSPM1, &isp_sspm1);
+ timeout = 10;
+ while (((isp_sspm1 >> ISP_FREQ_STAT_OFFSET) != ratio) && timeout) {
+ iosf_mbi_read(BT_MBI_UNIT_PMC, MBI_REG_READ, ISPSSPM1, &isp_sspm1);
+ dev_dbg(isp->dev, "waiting for ISPSSPM1 status bit to be 0x%x.\n",
+ new_freq);
+ udelay(100);
+ timeout--;
+ }
+ if (timeout == 0) {
+ dev_err(isp->dev, "DFS target freq is rejected by HW.\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int atomisp_freq_scaling(struct atomisp_device *isp,
+ enum atomisp_dfs_mode mode,
+ bool force)
+{
+ const struct atomisp_dfs_config *dfs;
+ unsigned int new_freq;
+ struct atomisp_freq_scaling_rule curr_rules;
+ int i, ret;
+ unsigned short fps = 0;
+
+ dfs = isp->dfs;
+
+ if (dfs->lowest_freq == 0 || dfs->max_freq_at_vmin == 0 ||
+ dfs->highest_freq == 0 || dfs->dfs_table_size == 0 ||
+ !dfs->dfs_table) {
+ dev_err(isp->dev, "DFS configuration is invalid.\n");
+ return -EINVAL;
+ }
+
+ if (mode == ATOMISP_DFS_MODE_LOW) {
+ new_freq = dfs->lowest_freq;
+ goto done;
+ }
+
+ if (mode == ATOMISP_DFS_MODE_MAX) {
+ new_freq = dfs->highest_freq;
+ goto done;
+ }
+
+ fps = atomisp_get_sensor_fps(&isp->asd);
+ if (fps == 0) {
+ dev_info(isp->dev,
+ "Sensor didn't report FPS. Using DFS max mode.\n");
+ new_freq = dfs->highest_freq;
+ goto done;
+ }
+
+ curr_rules.width = isp->asd.fmt[ATOMISP_SUBDEV_PAD_SOURCE].fmt.width;
+ curr_rules.height = isp->asd.fmt[ATOMISP_SUBDEV_PAD_SOURCE].fmt.height;
+ curr_rules.fps = fps;
+ curr_rules.run_mode = isp->asd.run_mode->val;
+
+ /* search for the target frequency by looping freq rules*/
+ for (i = 0; i < dfs->dfs_table_size; i++) {
+ if (curr_rules.width != dfs->dfs_table[i].width &&
+ dfs->dfs_table[i].width != ISP_FREQ_RULE_ANY)
+ continue;
+ if (curr_rules.height != dfs->dfs_table[i].height &&
+ dfs->dfs_table[i].height != ISP_FREQ_RULE_ANY)
+ continue;
+ if (curr_rules.fps != dfs->dfs_table[i].fps &&
+ dfs->dfs_table[i].fps != ISP_FREQ_RULE_ANY)
+ continue;
+ if (curr_rules.run_mode != dfs->dfs_table[i].run_mode &&
+ dfs->dfs_table[i].run_mode != ISP_FREQ_RULE_ANY)
+ continue;
+ break;
+ }
+
+ if (i == dfs->dfs_table_size)
+ new_freq = dfs->max_freq_at_vmin;
+ else
+ new_freq = dfs->dfs_table[i].isp_freq;
+
+done:
+ dev_dbg(isp->dev, "DFS target frequency=%d.\n", new_freq);
+
+ if ((new_freq == isp->running_freq) && !force)
+ return 0;
+
+ dev_dbg(isp->dev, "Programming DFS frequency to %d\n", new_freq);
+
+ ret = write_target_freq_to_hw(isp, new_freq);
+ if (!ret) {
+ isp->running_freq = new_freq;
+ trace_ipu_pstate(new_freq, -1);
+ }
+ return ret;
+}
+
+/*
+ * reset and restore ISP
+ */
+int atomisp_reset(struct atomisp_device *isp)
+{
+ /* Reset ISP by power-cycling it */
+ int ret = 0;
+
+ dev_dbg(isp->dev, "%s\n", __func__);
+
+ ret = atomisp_power_off(isp->dev);
+ if (ret < 0)
+ dev_err(isp->dev, "atomisp_power_off failed, %d\n", ret);
+
+ ret = atomisp_power_on(isp->dev);
+ if (ret < 0) {
+ dev_err(isp->dev, "atomisp_power_on failed, %d\n", ret);
+ isp->isp_fatal_error = true;
+ }
+
+ return ret;
+}
+
+/*
+ * interrupt disable functions
+ */
+static void disable_isp_irq(enum hrt_isp_css_irq irq)
+{
+ irq_disable_channel(IRQ0_ID, irq);
+
+ if (irq != hrt_isp_css_irq_sp)
+ return;
+
+ cnd_sp_irq_enable(SP0_ID, false);
+}
+
+/*
+ * interrupt clean function
+ */
+static void clear_isp_irq(enum hrt_isp_css_irq irq)
+{
+ irq_clear_all(IRQ0_ID);
+}
+
+void atomisp_msi_irq_init(struct atomisp_device *isp)
+{
+ struct pci_dev *pdev = to_pci_dev(isp->dev);
+ u32 msg32;
+ u16 msg16;
+
+ pci_read_config_dword(pdev, PCI_MSI_CAPID, &msg32);
+ msg32 |= 1 << MSI_ENABLE_BIT;
+ pci_write_config_dword(pdev, PCI_MSI_CAPID, msg32);
+
+ msg32 = (1 << INTR_IER) | (1 << INTR_IIR);
+ pci_write_config_dword(pdev, PCI_INTERRUPT_CTRL, msg32);
+
+ pci_read_config_word(pdev, PCI_COMMAND, &msg16);
+ msg16 |= (PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER |
+ PCI_COMMAND_INTX_DISABLE);
+ pci_write_config_word(pdev, PCI_COMMAND, msg16);
+}
+
+void atomisp_msi_irq_uninit(struct atomisp_device *isp)
+{
+ struct pci_dev *pdev = to_pci_dev(isp->dev);
+ u32 msg32;
+ u16 msg16;
+
+ pci_read_config_dword(pdev, PCI_MSI_CAPID, &msg32);
+ msg32 &= ~(1 << MSI_ENABLE_BIT);
+ pci_write_config_dword(pdev, PCI_MSI_CAPID, msg32);
+
+ msg32 = 0x0;
+ pci_write_config_dword(pdev, PCI_INTERRUPT_CTRL, msg32);
+
+ pci_read_config_word(pdev, PCI_COMMAND, &msg16);
+ msg16 &= ~(PCI_COMMAND_MASTER);
+ pci_write_config_word(pdev, PCI_COMMAND, msg16);
+}
+
+static void atomisp_sof_event(struct atomisp_sub_device *asd)
+{
+ struct v4l2_event event = {0};
+
+ event.type = V4L2_EVENT_FRAME_SYNC;
+ event.u.frame_sync.frame_sequence = atomic_read(&asd->sof_count);
+
+ v4l2_event_queue(asd->subdev.devnode, &event);
+}
+
+void atomisp_eof_event(struct atomisp_sub_device *asd, uint8_t exp_id)
+{
+ struct v4l2_event event = {0};
+
+ event.type = V4L2_EVENT_FRAME_END;
+ event.u.frame_sync.frame_sequence = exp_id;
+
+ v4l2_event_queue(asd->subdev.devnode, &event);
+}
+
+static void atomisp_3a_stats_ready_event(struct atomisp_sub_device *asd,
+ uint8_t exp_id)
+{
+ struct v4l2_event event = {0};
+
+ event.type = V4L2_EVENT_ATOMISP_3A_STATS_READY;
+ event.u.frame_sync.frame_sequence = exp_id;
+
+ v4l2_event_queue(asd->subdev.devnode, &event);
+}
+
+static void atomisp_metadata_ready_event(struct atomisp_sub_device *asd,
+ enum atomisp_metadata_type md_type)
+{
+ struct v4l2_event event = {0};
+
+ event.type = V4L2_EVENT_ATOMISP_METADATA_READY;
+ event.u.data[0] = md_type;
+
+ v4l2_event_queue(asd->subdev.devnode, &event);
+}
+
+static void atomisp_reset_event(struct atomisp_sub_device *asd)
+{
+ struct v4l2_event event = {0};
+
+ event.type = V4L2_EVENT_ATOMISP_CSS_RESET;
+
+ v4l2_event_queue(asd->subdev.devnode, &event);
+}
+
+static void print_csi_rx_errors(enum mipi_port_id port,
+ struct atomisp_device *isp)
+{
+ u32 infos = 0;
+
+ atomisp_css_rx_get_irq_info(port, &infos);
+
+ dev_err(isp->dev, "CSI Receiver port %d errors:\n", port);
+ if (infos & IA_CSS_RX_IRQ_INFO_BUFFER_OVERRUN)
+ dev_err(isp->dev, " buffer overrun");
+ if (infos & IA_CSS_RX_IRQ_INFO_ERR_SOT)
+ dev_err(isp->dev, " start-of-transmission error");
+ if (infos & IA_CSS_RX_IRQ_INFO_ERR_SOT_SYNC)
+ dev_err(isp->dev, " start-of-transmission sync error");
+ if (infos & IA_CSS_RX_IRQ_INFO_ERR_CONTROL)
+ dev_err(isp->dev, " control error");
+ if (infos & IA_CSS_RX_IRQ_INFO_ERR_ECC_DOUBLE)
+ dev_err(isp->dev, " 2 or more ECC errors");
+ if (infos & IA_CSS_RX_IRQ_INFO_ERR_CRC)
+ dev_err(isp->dev, " CRC mismatch");
+ if (infos & IA_CSS_RX_IRQ_INFO_ERR_UNKNOWN_ID)
+ dev_err(isp->dev, " unknown error");
+ if (infos & IA_CSS_RX_IRQ_INFO_ERR_FRAME_SYNC)
+ dev_err(isp->dev, " frame sync error");
+ if (infos & IA_CSS_RX_IRQ_INFO_ERR_FRAME_DATA)
+ dev_err(isp->dev, " frame data error");
+ if (infos & IA_CSS_RX_IRQ_INFO_ERR_DATA_TIMEOUT)
+ dev_err(isp->dev, " data timeout");
+ if (infos & IA_CSS_RX_IRQ_INFO_ERR_UNKNOWN_ESC)
+ dev_err(isp->dev, " unknown escape command entry");
+ if (infos & IA_CSS_RX_IRQ_INFO_ERR_LINE_SYNC)
+ dev_err(isp->dev, " line sync error");
+}
+
+/* Clear irq reg */
+static void clear_irq_reg(struct atomisp_device *isp)
+{
+ struct pci_dev *pdev = to_pci_dev(isp->dev);
+ u32 msg_ret;
+
+ pci_read_config_dword(pdev, PCI_INTERRUPT_CTRL, &msg_ret);
+ msg_ret |= 1 << INTR_IIR;
+ pci_write_config_dword(pdev, PCI_INTERRUPT_CTRL, msg_ret);
+}
+
+/* interrupt handling function*/
+irqreturn_t atomisp_isr(int irq, void *dev)
+{
+ struct atomisp_device *isp = (struct atomisp_device *)dev;
+ struct atomisp_css_event eof_event;
+ unsigned int irq_infos = 0;
+ unsigned long flags;
+ int err;
+
+ spin_lock_irqsave(&isp->lock, flags);
+
+ if (!isp->css_initialized) {
+ spin_unlock_irqrestore(&isp->lock, flags);
+ return IRQ_HANDLED;
+ }
+ err = atomisp_css_irq_translate(isp, &irq_infos);
+ if (err) {
+ spin_unlock_irqrestore(&isp->lock, flags);
+ return IRQ_NONE;
+ }
+
+ clear_irq_reg(isp);
+
+ if (!isp->asd.streaming)
+ goto out_nowake;
+
+ if (irq_infos & IA_CSS_IRQ_INFO_CSS_RECEIVER_SOF) {
+ atomic_inc(&isp->asd.sof_count);
+ atomisp_sof_event(&isp->asd);
+
+ /*
+ * If sequence_temp and sequence are the same there where no frames
+ * lost so we can increase sequence_temp.
+ * If not then processing of frame is still in progress and driver
+ * needs to keep old sequence_temp value.
+ * NOTE: There is assumption here that ISP will not start processing
+ * next frame from sensor before old one is completely done.
+ */
+ if (atomic_read(&isp->asd.sequence) == atomic_read(&isp->asd.sequence_temp))
+ atomic_set(&isp->asd.sequence_temp, atomic_read(&isp->asd.sof_count));
+
+ dev_dbg_ratelimited(isp->dev, "irq:0x%x (SOF)\n", irq_infos);
+ irq_infos &= ~IA_CSS_IRQ_INFO_CSS_RECEIVER_SOF;
+ }
+
+ if (irq_infos & IA_CSS_IRQ_INFO_EVENTS_READY)
+ atomic_set(&isp->asd.sequence, atomic_read(&isp->asd.sequence_temp));
+
+ if ((irq_infos & IA_CSS_IRQ_INFO_INPUT_SYSTEM_ERROR) ||
+ (irq_infos & IA_CSS_IRQ_INFO_IF_ERROR)) {
+ /* handle mipi receiver error */
+ u32 rx_infos;
+ enum mipi_port_id port;
+
+ for (port = MIPI_PORT0_ID; port <= MIPI_PORT2_ID;
+ port++) {
+ print_csi_rx_errors(port, isp);
+ atomisp_css_rx_get_irq_info(port, &rx_infos);
+ atomisp_css_rx_clear_irq_info(port, rx_infos);
+ }
+ }
+
+ if (irq_infos & IA_CSS_IRQ_INFO_ISYS_EVENTS_READY) {
+ while (ia_css_dequeue_isys_event(&eof_event.event) == 0) {
+ atomisp_eof_event(&isp->asd, eof_event.event.exp_id);
+ dev_dbg_ratelimited(isp->dev, "ISYS event: EOF exp_id %d\n",
+ eof_event.event.exp_id);
+ }
+
+ irq_infos &= ~IA_CSS_IRQ_INFO_ISYS_EVENTS_READY;
+ if (irq_infos == 0)
+ goto out_nowake;
+ }
+
+ spin_unlock_irqrestore(&isp->lock, flags);
+
+ dev_dbg_ratelimited(isp->dev, "irq:0x%x (unhandled)\n", irq_infos);
+
+ return IRQ_WAKE_THREAD;
+
+out_nowake:
+ spin_unlock_irqrestore(&isp->lock, flags);
+
+ if (irq_infos)
+ dev_dbg_ratelimited(isp->dev, "irq:0x%x (ignored, as not streaming anymore)\n",
+ irq_infos);
+
+ return IRQ_HANDLED;
+}
+
+void atomisp_clear_css_buffer_counters(struct atomisp_sub_device *asd)
+{
+ int i;
+
+ memset(asd->s3a_bufs_in_css, 0, sizeof(asd->s3a_bufs_in_css));
+ for (i = 0; i < ATOMISP_INPUT_STREAM_NUM; i++)
+ memset(asd->metadata_bufs_in_css[i], 0,
+ sizeof(asd->metadata_bufs_in_css[i]));
+ asd->dis_bufs_in_css = 0;
+}
+
+/* 0x100000 is the start of dmem inside SP */
+#define SP_DMEM_BASE 0x100000
+
+void dump_sp_dmem(struct atomisp_device *isp, unsigned int addr,
+ unsigned int size)
+{
+ unsigned int data = 0;
+ unsigned int size32 = DIV_ROUND_UP(size, sizeof(u32));
+
+ dev_dbg(isp->dev, "atomisp mmio base: %p\n", isp->base);
+ dev_dbg(isp->dev, "%s, addr:0x%x, size: %d, size32: %d\n", __func__,
+ addr, size, size32);
+ if (size32 * 4 + addr > 0x4000) {
+ dev_err(isp->dev, "illegal size (%d) or addr (0x%x)\n",
+ size32, addr);
+ return;
+ }
+ addr += SP_DMEM_BASE;
+ addr &= 0x003FFFFF;
+ do {
+ data = readl(isp->base + addr);
+ dev_dbg(isp->dev, "%s, \t [0x%x]:0x%x\n", __func__, addr, data);
+ addr += sizeof(u32);
+ } while (--size32);
+}
+
+int atomisp_buffers_in_css(struct atomisp_video_pipe *pipe)
+{
+ unsigned long irqflags;
+ struct list_head *pos;
+ int buffers_in_css = 0;
+
+ spin_lock_irqsave(&pipe->irq_lock, irqflags);
+
+ list_for_each(pos, &pipe->buffers_in_css)
+ buffers_in_css++;
+
+ spin_unlock_irqrestore(&pipe->irq_lock, irqflags);
+
+ return buffers_in_css;
+}
+
+void atomisp_buffer_done(struct ia_css_frame *frame, enum vb2_buffer_state state)
+{
+ struct atomisp_video_pipe *pipe = vb_to_pipe(&frame->vb.vb2_buf);
+
+ lockdep_assert_held(&pipe->irq_lock);
+
+ frame->vb.vb2_buf.timestamp = ktime_get_ns();
+ frame->vb.field = pipe->pix.field;
+ frame->vb.sequence = atomic_read(&pipe->asd->sequence);
+ list_del(&frame->queue);
+ if (state == VB2_BUF_STATE_DONE)
+ vb2_set_plane_payload(&frame->vb.vb2_buf, 0, pipe->pix.sizeimage);
+ vb2_buffer_done(&frame->vb.vb2_buf, state);
+}
+
+void atomisp_flush_video_pipe(struct atomisp_video_pipe *pipe, enum vb2_buffer_state state,
+ bool warn_on_css_frames)
+{
+ struct ia_css_frame *frame, *_frame;
+ unsigned long irqflags;
+
+ spin_lock_irqsave(&pipe->irq_lock, irqflags);
+
+ list_for_each_entry_safe(frame, _frame, &pipe->buffers_in_css, queue) {
+ if (warn_on_css_frames)
+ dev_warn(pipe->isp->dev, "Warning: CSS frames queued on flush\n");
+ atomisp_buffer_done(frame, state);
+ }
+
+ list_for_each_entry_safe(frame, _frame, &pipe->activeq, queue)
+ atomisp_buffer_done(frame, state);
+
+ list_for_each_entry_safe(frame, _frame, &pipe->buffers_waiting_for_param, queue) {
+ pipe->frame_request_config_id[frame->vb.vb2_buf.index] = 0;
+ atomisp_buffer_done(frame, state);
+ }
+
+ spin_unlock_irqrestore(&pipe->irq_lock, irqflags);
+}
+
+/* clean out the parameters that did not apply */
+void atomisp_flush_params_queue(struct atomisp_video_pipe *pipe)
+{
+ struct atomisp_css_params_with_list *param;
+
+ while (!list_empty(&pipe->per_frame_params)) {
+ param = list_entry(pipe->per_frame_params.next,
+ struct atomisp_css_params_with_list, list);
+ list_del(&param->list);
+ atomisp_free_css_parameters(&param->params);
+ kvfree(param);
+ }
+}
+
+/* Re-queue per-frame parameters */
+static void atomisp_recover_params_queue(struct atomisp_video_pipe *pipe)
+{
+ struct atomisp_css_params_with_list *param;
+ int i;
+
+ for (i = 0; i < VIDEO_MAX_FRAME; i++) {
+ param = pipe->frame_params[i];
+ if (param)
+ list_add_tail(&param->list, &pipe->per_frame_params);
+ pipe->frame_params[i] = NULL;
+ }
+ atomisp_handle_parameter_and_buffer(pipe);
+}
+
+void atomisp_buf_done(struct atomisp_sub_device *asd, int error,
+ enum ia_css_buffer_type buf_type,
+ enum ia_css_pipe_id css_pipe_id,
+ bool q_buffers, enum atomisp_input_stream_id stream_id)
+{
+ struct atomisp_video_pipe *pipe = NULL;
+ struct atomisp_css_buffer buffer;
+ bool requeue = false;
+ unsigned long irqflags;
+ struct ia_css_frame *frame = NULL;
+ struct atomisp_s3a_buf *s3a_buf = NULL, *_s3a_buf_tmp, *s3a_iter;
+ struct atomisp_dis_buf *dis_buf = NULL, *_dis_buf_tmp, *dis_iter;
+ struct atomisp_metadata_buf *md_buf = NULL, *_md_buf_tmp, *md_iter;
+ enum atomisp_metadata_type md_type;
+ struct atomisp_device *isp = asd->isp;
+ int i, err;
+
+ lockdep_assert_held(&isp->mutex);
+
+ if (
+ buf_type != IA_CSS_BUFFER_TYPE_METADATA &&
+ buf_type != IA_CSS_BUFFER_TYPE_3A_STATISTICS &&
+ buf_type != IA_CSS_BUFFER_TYPE_DIS_STATISTICS &&
+ buf_type != IA_CSS_BUFFER_TYPE_OUTPUT_FRAME &&
+ buf_type != IA_CSS_BUFFER_TYPE_SEC_OUTPUT_FRAME &&
+ buf_type != IA_CSS_BUFFER_TYPE_RAW_OUTPUT_FRAME &&
+ buf_type != IA_CSS_BUFFER_TYPE_SEC_VF_OUTPUT_FRAME &&
+ buf_type != IA_CSS_BUFFER_TYPE_VF_OUTPUT_FRAME) {
+ dev_err(isp->dev, "%s, unsupported buffer type: %d\n",
+ __func__, buf_type);
+ return;
+ }
+
+ memset(&buffer, 0, sizeof(struct atomisp_css_buffer));
+ buffer.css_buffer.type = buf_type;
+ err = atomisp_css_dequeue_buffer(asd, stream_id, css_pipe_id,
+ buf_type, &buffer);
+ if (err) {
+ dev_err(isp->dev,
+ "atomisp_css_dequeue_buffer failed: 0x%x\n", err);
+ return;
+ }
+
+ switch (buf_type) {
+ case IA_CSS_BUFFER_TYPE_3A_STATISTICS:
+ list_for_each_entry_safe(s3a_iter, _s3a_buf_tmp,
+ &asd->s3a_stats_in_css, list) {
+ if (s3a_iter->s3a_data ==
+ buffer.css_buffer.data.stats_3a) {
+ list_del_init(&s3a_iter->list);
+ list_add_tail(&s3a_iter->list,
+ &asd->s3a_stats_ready);
+ s3a_buf = s3a_iter;
+ break;
+ }
+ }
+
+ asd->s3a_bufs_in_css[css_pipe_id]--;
+ atomisp_3a_stats_ready_event(asd, buffer.css_buffer.exp_id);
+ if (s3a_buf)
+ dev_dbg(isp->dev, "%s: s3a stat with exp_id %d is ready\n",
+ __func__, s3a_buf->s3a_data->exp_id);
+ else
+ dev_dbg(isp->dev, "%s: s3a stat is ready with no exp_id found\n",
+ __func__);
+ break;
+ case IA_CSS_BUFFER_TYPE_METADATA:
+ if (error)
+ break;
+
+ md_type = ATOMISP_MAIN_METADATA;
+ list_for_each_entry_safe(md_iter, _md_buf_tmp,
+ &asd->metadata_in_css[md_type], list) {
+ if (md_iter->metadata ==
+ buffer.css_buffer.data.metadata) {
+ list_del_init(&md_iter->list);
+ list_add_tail(&md_iter->list,
+ &asd->metadata_ready[md_type]);
+ md_buf = md_iter;
+ break;
+ }
+ }
+ asd->metadata_bufs_in_css[stream_id][css_pipe_id]--;
+ atomisp_metadata_ready_event(asd, md_type);
+ if (md_buf)
+ dev_dbg(isp->dev, "%s: metadata with exp_id %d is ready\n",
+ __func__, md_buf->metadata->exp_id);
+ else
+ dev_dbg(isp->dev, "%s: metadata is ready with no exp_id found\n",
+ __func__);
+ break;
+ case IA_CSS_BUFFER_TYPE_DIS_STATISTICS:
+ list_for_each_entry_safe(dis_iter, _dis_buf_tmp,
+ &asd->dis_stats_in_css, list) {
+ if (dis_iter->dis_data ==
+ buffer.css_buffer.data.stats_dvs) {
+ spin_lock_irqsave(&asd->dis_stats_lock,
+ irqflags);
+ list_del_init(&dis_iter->list);
+ list_add(&dis_iter->list, &asd->dis_stats);
+ asd->params.dis_proj_data_valid = true;
+ spin_unlock_irqrestore(&asd->dis_stats_lock,
+ irqflags);
+ dis_buf = dis_iter;
+ break;
+ }
+ }
+ asd->dis_bufs_in_css--;
+ if (dis_buf)
+ dev_dbg(isp->dev, "%s: dis stat with exp_id %d is ready\n",
+ __func__, dis_buf->dis_data->exp_id);
+ else
+ dev_dbg(isp->dev, "%s: dis stat is ready with no exp_id found\n",
+ __func__);
+ break;
+ case IA_CSS_BUFFER_TYPE_VF_OUTPUT_FRAME:
+ case IA_CSS_BUFFER_TYPE_SEC_VF_OUTPUT_FRAME:
+ frame = buffer.css_buffer.data.frame;
+ if (!frame) {
+ WARN_ON(1);
+ break;
+ }
+ if (!frame->valid)
+ error = true;
+
+ pipe = vb_to_pipe(&frame->vb.vb2_buf);
+
+ dev_dbg(isp->dev, "%s: vf frame with exp_id %d is ready\n",
+ __func__, frame->exp_id);
+ pipe->frame_config_id[frame->vb.vb2_buf.index] = frame->isp_config_id;
+ break;
+ case IA_CSS_BUFFER_TYPE_OUTPUT_FRAME:
+ case IA_CSS_BUFFER_TYPE_SEC_OUTPUT_FRAME:
+ frame = buffer.css_buffer.data.frame;
+ if (!frame) {
+ WARN_ON(1);
+ break;
+ }
+
+ if (!frame->valid)
+ error = true;
+
+ pipe = vb_to_pipe(&frame->vb.vb2_buf);
+
+ dev_dbg(isp->dev, "%s: main frame with exp_id %d is ready\n",
+ __func__, frame->exp_id);
+
+ i = frame->vb.vb2_buf.index;
+
+ /* free the parameters */
+ if (pipe->frame_params[i]) {
+ if (asd->params.dvs_6axis == pipe->frame_params[i]->params.dvs_6axis)
+ asd->params.dvs_6axis = NULL;
+ atomisp_free_css_parameters(&pipe->frame_params[i]->params);
+ kvfree(pipe->frame_params[i]);
+ pipe->frame_params[i] = NULL;
+ }
+
+ pipe->frame_config_id[i] = frame->isp_config_id;
+
+ if (asd->params.css_update_params_needed) {
+ atomisp_apply_css_parameters(asd,
+ &asd->params.css_param);
+ if (asd->params.css_param.update_flag.dz_config)
+ asd->params.config.dz_config = &asd->params.css_param.dz_config;
+ /* New global dvs 6axis config should be blocked
+ * here if there's a buffer with per-frame parameters
+ * pending in CSS frame buffer queue.
+ * This is to aviod zooming vibration since global
+ * parameters take effect immediately while
+ * per-frame parameters are taken after previous
+ * buffers in CSS got processed.
+ */
+ if (asd->params.dvs_6axis)
+ atomisp_css_set_dvs_6axis(asd,
+ asd->params.dvs_6axis);
+ else
+ asd->params.css_update_params_needed = false;
+ /* The update flag should not be cleaned here
+ * since it is still going to be used to make up
+ * following per-frame parameters.
+ * This will introduce more copy work since each
+ * time when updating global parameters, the whole
+ * parameter set are applied.
+ * FIXME: A new set of parameter copy functions can
+ * be added to make up per-frame parameters based on
+ * solid structures stored in asd->params.css_param
+ * instead of using shadow pointers in update flag.
+ */
+ atomisp_css_update_isp_params(asd);
+ }
+ break;
+ default:
+ break;
+ }
+ if (frame) {
+ spin_lock_irqsave(&pipe->irq_lock, irqflags);
+ atomisp_buffer_done(frame, error ? VB2_BUF_STATE_ERROR : VB2_BUF_STATE_DONE);
+ spin_unlock_irqrestore(&pipe->irq_lock, irqflags);
+ }
+
+ /*
+ * Requeue should only be done for 3a and dis buffers.
+ * Queue/dequeue order will change if driver recycles image buffers.
+ */
+ if (requeue) {
+ err = atomisp_css_queue_buffer(asd,
+ stream_id, css_pipe_id,
+ buf_type, &buffer);
+ if (err)
+ dev_err(isp->dev, "%s, q to css fails: %d\n",
+ __func__, err);
+ return;
+ }
+ if (!error && q_buffers)
+ atomisp_qbuffers_to_css(asd);
+}
+
+void atomisp_assert_recovery_work(struct work_struct *work)
+{
+ struct atomisp_device *isp = container_of(work, struct atomisp_device,
+ assert_recovery_work);
+ struct pci_dev *pdev = to_pci_dev(isp->dev);
+ unsigned long flags;
+ int ret;
+
+ mutex_lock(&isp->mutex);
+
+ if (!isp->asd.streaming)
+ goto out_unlock;
+
+ atomisp_css_irq_enable(isp, IA_CSS_IRQ_INFO_CSS_RECEIVER_SOF, false);
+
+ spin_lock_irqsave(&isp->lock, flags);
+ isp->asd.streaming = false;
+ spin_unlock_irqrestore(&isp->lock, flags);
+
+ /* stream off sensor */
+ ret = v4l2_subdev_call(isp->inputs[isp->asd.input_curr].csi_remote_source,
+ video, s_stream, 0);
+ if (ret)
+ dev_warn(isp->dev, "Stopping sensor stream failed: %d\n", ret);
+
+ atomisp_clear_css_buffer_counters(&isp->asd);
+
+ atomisp_css_stop(&isp->asd, true);
+
+ isp->asd.preview_exp_id = 1;
+ isp->asd.postview_exp_id = 1;
+ /* notify HAL the CSS reset */
+ dev_dbg(isp->dev, "send reset event to %s\n", isp->asd.subdev.devnode->name);
+ atomisp_reset_event(&isp->asd);
+
+ /* clear irq */
+ disable_isp_irq(hrt_isp_css_irq_sp);
+ clear_isp_irq(hrt_isp_css_irq_sp);
+
+ /* Set the SRSE to 3 before resetting */
+ pci_write_config_dword(pdev, PCI_I_CONTROL,
+ isp->saved_regs.i_control | MRFLD_PCI_I_CONTROL_SRSE_RESET_MASK);
+
+ /* reset ISP and restore its state */
+ atomisp_reset(isp);
+
+ atomisp_css_input_set_mode(&isp->asd, IA_CSS_INPUT_MODE_BUFFERED_SENSOR);
+
+ /* Recreate streams destroyed by atomisp_css_stop() */
+ atomisp_create_pipes_stream(&isp->asd);
+
+ /* Invalidate caches. FIXME: should flush only necessary buffers */
+ wbinvd();
+
+ if (atomisp_css_start(&isp->asd)) {
+ dev_warn(isp->dev, "start SP failed, so do not set streaming to be enable!\n");
+ } else {
+ spin_lock_irqsave(&isp->lock, flags);
+ isp->asd.streaming = true;
+ spin_unlock_irqrestore(&isp->lock, flags);
+ }
+
+ atomisp_csi2_configure(&isp->asd);
+
+ atomisp_css_irq_enable(isp, IA_CSS_IRQ_INFO_CSS_RECEIVER_SOF,
+ atomisp_css_valid_sof(isp));
+
+ if (atomisp_freq_scaling(isp, ATOMISP_DFS_MODE_AUTO, true) < 0)
+ dev_dbg(isp->dev, "DFS auto failed while recovering!\n");
+
+ /* Dequeueing buffers is not needed, CSS will recycle buffers that it has */
+ atomisp_flush_video_pipe(&isp->asd.video_out, VB2_BUF_STATE_ERROR, false);
+
+ /* Requeue unprocessed per-frame parameters. */
+ atomisp_recover_params_queue(&isp->asd.video_out);
+
+ ret = v4l2_subdev_call(isp->inputs[isp->asd.input_curr].csi_remote_source,
+ video, s_stream, 1);
+ if (ret)
+ dev_err(isp->dev, "Starting sensor stream failed: %d\n", ret);
+
+out_unlock:
+ mutex_unlock(&isp->mutex);
+}
+
+irqreturn_t atomisp_isr_thread(int irq, void *isp_ptr)
+{
+ struct atomisp_device *isp = isp_ptr;
+ unsigned long flags;
+ bool streaming;
+
+ spin_lock_irqsave(&isp->lock, flags);
+ streaming = isp->asd.streaming;
+ spin_unlock_irqrestore(&isp->lock, flags);
+
+ if (!streaming)
+ return IRQ_HANDLED;
+
+ /*
+ * The standard CSS2.0 API tells the following calling sequence of
+ * dequeue ready buffers:
+ * while (ia_css_dequeue_psys_event(...)) {
+ * switch (event.type) {
+ * ...
+ * ia_css_pipe_dequeue_buffer()
+ * }
+ * }
+ * That is, dequeue event and buffer are one after another.
+ *
+ * But the following implementation is to first deuque all the event
+ * to a FIFO, then process the event in the FIFO.
+ * This will not have issue in single stream mode, but it do have some
+ * issue in multiple stream case. The issue is that
+ * ia_css_pipe_dequeue_buffer() will not return the corrent buffer in
+ * a specific pipe.
+ *
+ * This is due to ia_css_pipe_dequeue_buffer() does not take the
+ * ia_css_pipe parameter.
+ *
+ * So:
+ * For CSS2.0: we change the way to not dequeue all the event at one
+ * time, instead, dequue one and process one, then another
+ */
+ mutex_lock(&isp->mutex);
+ atomisp_css_isr_thread(isp);
+ mutex_unlock(&isp->mutex);
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * Get internal fmt according to V4L2 fmt
+ */
+static enum ia_css_frame_format
+v4l2_fmt_to_sh_fmt(u32 fmt)
+{
+ switch (fmt) {
+ case V4L2_PIX_FMT_YUV420:
+ return IA_CSS_FRAME_FORMAT_YUV420;
+ case V4L2_PIX_FMT_YVU420:
+ return IA_CSS_FRAME_FORMAT_YV12;
+ case V4L2_PIX_FMT_YUV422P:
+ return IA_CSS_FRAME_FORMAT_YUV422;
+ case V4L2_PIX_FMT_YUV444:
+ return IA_CSS_FRAME_FORMAT_YUV444;
+ case V4L2_PIX_FMT_NV12:
+ return IA_CSS_FRAME_FORMAT_NV12;
+ case V4L2_PIX_FMT_NV21:
+ return IA_CSS_FRAME_FORMAT_NV21;
+ case V4L2_PIX_FMT_NV16:
+ return IA_CSS_FRAME_FORMAT_NV16;
+ case V4L2_PIX_FMT_NV61:
+ return IA_CSS_FRAME_FORMAT_NV61;
+ case V4L2_PIX_FMT_UYVY:
+ return IA_CSS_FRAME_FORMAT_UYVY;
+ case V4L2_PIX_FMT_YUYV:
+ return IA_CSS_FRAME_FORMAT_YUYV;
+ case V4L2_PIX_FMT_RGB24:
+ return IA_CSS_FRAME_FORMAT_PLANAR_RGB888;
+ case V4L2_PIX_FMT_RGBX32:
+ return IA_CSS_FRAME_FORMAT_RGBA888;
+ case V4L2_PIX_FMT_RGB565:
+ return IA_CSS_FRAME_FORMAT_RGB565;
+#if 0
+ case V4L2_PIX_FMT_JPEG:
+ case V4L2_PIX_FMT_CUSTOM_M10MO_RAW:
+ return IA_CSS_FRAME_FORMAT_BINARY_8;
+#endif
+ case V4L2_PIX_FMT_SBGGR16:
+ case V4L2_PIX_FMT_SBGGR10:
+ case V4L2_PIX_FMT_SGBRG10:
+ case V4L2_PIX_FMT_SGRBG10:
+ case V4L2_PIX_FMT_SRGGB10:
+ case V4L2_PIX_FMT_SBGGR12:
+ case V4L2_PIX_FMT_SGBRG12:
+ case V4L2_PIX_FMT_SGRBG12:
+ case V4L2_PIX_FMT_SRGGB12:
+ case V4L2_PIX_FMT_SBGGR8:
+ case V4L2_PIX_FMT_SGBRG8:
+ case V4L2_PIX_FMT_SGRBG8:
+ case V4L2_PIX_FMT_SRGGB8:
+ return IA_CSS_FRAME_FORMAT_RAW;
+ default:
+ return -EINVAL;
+ }
+}
+
+/*
+ * raw format match between SH format and V4L2 format
+ */
+static int raw_output_format_match_input(u32 input, u32 output)
+{
+ if ((input == ATOMISP_INPUT_FORMAT_RAW_12) &&
+ ((output == V4L2_PIX_FMT_SRGGB12) ||
+ (output == V4L2_PIX_FMT_SGRBG12) ||
+ (output == V4L2_PIX_FMT_SBGGR12) ||
+ (output == V4L2_PIX_FMT_SGBRG12)))
+ return 0;
+
+ if ((input == ATOMISP_INPUT_FORMAT_RAW_10) &&
+ ((output == V4L2_PIX_FMT_SRGGB10) ||
+ (output == V4L2_PIX_FMT_SGRBG10) ||
+ (output == V4L2_PIX_FMT_SBGGR10) ||
+ (output == V4L2_PIX_FMT_SGBRG10)))
+ return 0;
+
+ if ((input == ATOMISP_INPUT_FORMAT_RAW_8) &&
+ ((output == V4L2_PIX_FMT_SRGGB8) ||
+ (output == V4L2_PIX_FMT_SGRBG8) ||
+ (output == V4L2_PIX_FMT_SBGGR8) ||
+ (output == V4L2_PIX_FMT_SGBRG8)))
+ return 0;
+
+ if ((input == ATOMISP_INPUT_FORMAT_RAW_16) && (output == V4L2_PIX_FMT_SBGGR16))
+ return 0;
+
+ return -EINVAL;
+}
+
+u32 atomisp_get_pixel_depth(u32 pixelformat)
+{
+ switch (pixelformat) {
+ case V4L2_PIX_FMT_YUV420:
+ case V4L2_PIX_FMT_NV12:
+ case V4L2_PIX_FMT_NV21:
+ case V4L2_PIX_FMT_YVU420:
+ return 12;
+ case V4L2_PIX_FMT_YUV422P:
+ case V4L2_PIX_FMT_YUYV:
+ case V4L2_PIX_FMT_UYVY:
+ case V4L2_PIX_FMT_NV16:
+ case V4L2_PIX_FMT_NV61:
+ case V4L2_PIX_FMT_RGB565:
+ case V4L2_PIX_FMT_SBGGR16:
+ case V4L2_PIX_FMT_SBGGR12:
+ case V4L2_PIX_FMT_SGBRG12:
+ case V4L2_PIX_FMT_SGRBG12:
+ case V4L2_PIX_FMT_SRGGB12:
+ case V4L2_PIX_FMT_SBGGR10:
+ case V4L2_PIX_FMT_SGBRG10:
+ case V4L2_PIX_FMT_SGRBG10:
+ case V4L2_PIX_FMT_SRGGB10:
+ return 16;
+ case V4L2_PIX_FMT_RGB24:
+ case V4L2_PIX_FMT_YUV444:
+ return 24;
+ case V4L2_PIX_FMT_RGBX32:
+ return 32;
+ case V4L2_PIX_FMT_JPEG:
+ case V4L2_PIX_FMT_CUSTOM_M10MO_RAW:
+ case V4L2_PIX_FMT_SBGGR8:
+ case V4L2_PIX_FMT_SGBRG8:
+ case V4L2_PIX_FMT_SGRBG8:
+ case V4L2_PIX_FMT_SRGGB8:
+ return 8;
+ default:
+ return 8 * 2; /* raw type now */
+ }
+}
+
+bool atomisp_is_mbuscode_raw(uint32_t code)
+{
+ return code >= 0x3000 && code < 0x4000;
+}
+
+/*
+ * ISP features control function
+ */
+
+/*
+ * Set ISP capture mode based on current settings
+ */
+static void atomisp_update_capture_mode(struct atomisp_sub_device *asd)
+{
+ if (asd->params.gdc_cac_en)
+ atomisp_css_capture_set_mode(asd, IA_CSS_CAPTURE_MODE_ADVANCED);
+ else if (asd->params.low_light)
+ atomisp_css_capture_set_mode(asd, IA_CSS_CAPTURE_MODE_LOW_LIGHT);
+ else if (asd->video_out.sh_fmt == IA_CSS_FRAME_FORMAT_RAW)
+ atomisp_css_capture_set_mode(asd, IA_CSS_CAPTURE_MODE_RAW);
+ else
+ atomisp_css_capture_set_mode(asd, IA_CSS_CAPTURE_MODE_PRIMARY);
+}
+
+/*
+ * Function to enable/disable lens geometry distortion correction (GDC) and
+ * chromatic aberration correction (CAC)
+ */
+int atomisp_gdc_cac(struct atomisp_sub_device *asd, int flag,
+ __s32 *value)
+{
+ if (flag == 0) {
+ *value = asd->params.gdc_cac_en;
+ return 0;
+ }
+
+ asd->params.gdc_cac_en = !!*value;
+ if (asd->params.gdc_cac_en)
+ asd->params.config.morph_table = asd->params.css_param.morph_table;
+ else
+ asd->params.config.morph_table = NULL;
+
+ asd->params.css_update_params_needed = true;
+ atomisp_update_capture_mode(asd);
+ return 0;
+}
+
+/*
+ * Function to enable/disable low light mode including ANR
+ */
+int atomisp_low_light(struct atomisp_sub_device *asd, int flag,
+ __s32 *value)
+{
+ if (flag == 0) {
+ *value = asd->params.low_light;
+ return 0;
+ }
+
+ asd->params.low_light = (*value != 0);
+ atomisp_update_capture_mode(asd);
+ return 0;
+}
+
+/*
+ * Function to enable/disable extra noise reduction (XNR) in low light
+ * condition
+ */
+int atomisp_xnr(struct atomisp_sub_device *asd, int flag,
+ int *xnr_enable)
+{
+ if (flag == 0) {
+ *xnr_enable = asd->params.xnr_en;
+ return 0;
+ }
+
+ atomisp_css_capture_enable_xnr(asd, !!*xnr_enable);
+
+ return 0;
+}
+
+/*
+ * Function to configure bayer noise reduction
+ */
+int atomisp_nr(struct atomisp_sub_device *asd, int flag,
+ struct atomisp_nr_config *arg)
+{
+ if (flag == 0) {
+ /* Get nr config from current setup */
+ if (atomisp_css_get_nr_config(asd, arg))
+ return -EINVAL;
+ } else {
+ /* Set nr config to isp parameters */
+ memcpy(&asd->params.css_param.nr_config, arg,
+ sizeof(struct ia_css_nr_config));
+ asd->params.config.nr_config = &asd->params.css_param.nr_config;
+ asd->params.css_update_params_needed = true;
+ }
+ return 0;
+}
+
+/*
+ * Function to configure temporal noise reduction (TNR)
+ */
+int atomisp_tnr(struct atomisp_sub_device *asd, int flag,
+ struct atomisp_tnr_config *config)
+{
+ /* Get tnr config from current setup */
+ if (flag == 0) {
+ /* Get tnr config from current setup */
+ if (atomisp_css_get_tnr_config(asd, config))
+ return -EINVAL;
+ } else {
+ /* Set tnr config to isp parameters */
+ memcpy(&asd->params.css_param.tnr_config, config,
+ sizeof(struct ia_css_tnr_config));
+ asd->params.config.tnr_config = &asd->params.css_param.tnr_config;
+ asd->params.css_update_params_needed = true;
+ }
+
+ return 0;
+}
+
+/*
+ * Function to configure black level compensation
+ */
+int atomisp_black_level(struct atomisp_sub_device *asd, int flag,
+ struct atomisp_ob_config *config)
+{
+ if (flag == 0) {
+ /* Get ob config from current setup */
+ if (atomisp_css_get_ob_config(asd, config))
+ return -EINVAL;
+ } else {
+ /* Set ob config to isp parameters */
+ memcpy(&asd->params.css_param.ob_config, config,
+ sizeof(struct ia_css_ob_config));
+ asd->params.config.ob_config = &asd->params.css_param.ob_config;
+ asd->params.css_update_params_needed = true;
+ }
+
+ return 0;
+}
+
+/*
+ * Function to configure edge enhancement
+ */
+int atomisp_ee(struct atomisp_sub_device *asd, int flag,
+ struct atomisp_ee_config *config)
+{
+ if (flag == 0) {
+ /* Get ee config from current setup */
+ if (atomisp_css_get_ee_config(asd, config))
+ return -EINVAL;
+ } else {
+ /* Set ee config to isp parameters */
+ memcpy(&asd->params.css_param.ee_config, config,
+ sizeof(asd->params.css_param.ee_config));
+ asd->params.config.ee_config = &asd->params.css_param.ee_config;
+ asd->params.css_update_params_needed = true;
+ }
+
+ return 0;
+}
+
+/*
+ * Function to update Gamma table for gamma, brightness and contrast config
+ */
+int atomisp_gamma(struct atomisp_sub_device *asd, int flag,
+ struct atomisp_gamma_table *config)
+{
+ if (flag == 0) {
+ /* Get gamma table from current setup */
+ if (atomisp_css_get_gamma_table(asd, config))
+ return -EINVAL;
+ } else {
+ /* Set gamma table to isp parameters */
+ memcpy(&asd->params.css_param.gamma_table, config,
+ sizeof(asd->params.css_param.gamma_table));
+ asd->params.config.gamma_table = &asd->params.css_param.gamma_table;
+ }
+
+ return 0;
+}
+
+/*
+ * Function to update Ctc table for Chroma Enhancement
+ */
+int atomisp_ctc(struct atomisp_sub_device *asd, int flag,
+ struct atomisp_ctc_table *config)
+{
+ if (flag == 0) {
+ /* Get ctc table from current setup */
+ if (atomisp_css_get_ctc_table(asd, config))
+ return -EINVAL;
+ } else {
+ /* Set ctc table to isp parameters */
+ memcpy(&asd->params.css_param.ctc_table, config,
+ sizeof(asd->params.css_param.ctc_table));
+ atomisp_css_set_ctc_table(asd, &asd->params.css_param.ctc_table);
+ }
+
+ return 0;
+}
+
+/*
+ * Function to update gamma correction parameters
+ */
+int atomisp_gamma_correction(struct atomisp_sub_device *asd, int flag,
+ struct atomisp_gc_config *config)
+{
+ if (flag == 0) {
+ /* Get gamma correction params from current setup */
+ if (atomisp_css_get_gc_config(asd, config))
+ return -EINVAL;
+ } else {
+ /* Set gamma correction params to isp parameters */
+ memcpy(&asd->params.css_param.gc_config, config,
+ sizeof(asd->params.css_param.gc_config));
+ asd->params.config.gc_config = &asd->params.css_param.gc_config;
+ asd->params.css_update_params_needed = true;
+ }
+
+ return 0;
+}
+
+/*
+ * Function to update narrow gamma flag
+ */
+int atomisp_formats(struct atomisp_sub_device *asd, int flag,
+ struct atomisp_formats_config *config)
+{
+ if (flag == 0) {
+ /* Get narrow gamma flag from current setup */
+ if (atomisp_css_get_formats_config(asd, config))
+ return -EINVAL;
+ } else {
+ /* Set narrow gamma flag to isp parameters */
+ memcpy(&asd->params.css_param.formats_config, config,
+ sizeof(asd->params.css_param.formats_config));
+ asd->params.config.formats_config = &asd->params.css_param.formats_config;
+ }
+
+ return 0;
+}
+
+void atomisp_free_internal_buffers(struct atomisp_sub_device *asd)
+{
+ atomisp_free_css_parameters(&asd->params.css_param);
+}
+
+static void atomisp_update_grid_info(struct atomisp_sub_device *asd,
+ enum ia_css_pipe_id pipe_id)
+{
+ struct atomisp_device *isp = asd->isp;
+ int err;
+
+ if (atomisp_css_get_grid_info(asd, pipe_id))
+ return;
+
+ /* We must free all buffers because they no longer match
+ the grid size. */
+ atomisp_css_free_stat_buffers(asd);
+
+ err = atomisp_alloc_css_stat_bufs(asd, ATOMISP_INPUT_STREAM_GENERAL);
+ if (err) {
+ dev_err(isp->dev, "stat_buf allocate error\n");
+ goto err;
+ }
+
+ if (atomisp_alloc_3a_output_buf(asd)) {
+ /* Failure for 3A buffers does not influence DIS buffers */
+ if (asd->params.s3a_output_bytes != 0) {
+ /* For SOC sensor happens s3a_output_bytes == 0,
+ * using if condition to exclude false error log */
+ dev_err(isp->dev, "Failed to allocate memory for 3A statistics\n");
+ }
+ goto err;
+ }
+
+ if (atomisp_alloc_dis_coef_buf(asd)) {
+ dev_err(isp->dev,
+ "Failed to allocate memory for DIS statistics\n");
+ goto err;
+ }
+
+ if (atomisp_alloc_metadata_output_buf(asd)) {
+ dev_err(isp->dev, "Failed to allocate memory for metadata\n");
+ goto err;
+ }
+
+ return;
+
+err:
+ atomisp_css_free_stat_buffers(asd);
+ return;
+}
+
+static void atomisp_curr_user_grid_info(struct atomisp_sub_device *asd,
+ struct atomisp_grid_info *info)
+{
+ memcpy(info, &asd->params.curr_grid_info.s3a_grid,
+ sizeof(struct ia_css_3a_grid_info));
+}
+
+int atomisp_compare_grid(struct atomisp_sub_device *asd,
+ struct atomisp_grid_info *atomgrid)
+{
+ struct atomisp_grid_info tmp = {0};
+
+ atomisp_curr_user_grid_info(asd, &tmp);
+ return memcmp(atomgrid, &tmp, sizeof(tmp));
+}
+
+/*
+ * Function to update Gdc table for gdc
+ */
+int atomisp_gdc_cac_table(struct atomisp_sub_device *asd, int flag,
+ struct atomisp_morph_table *config)
+{
+ int ret;
+ int i;
+ struct atomisp_device *isp = asd->isp;
+
+ if (flag == 0) {
+ /* Get gdc table from current setup */
+ struct ia_css_morph_table tab = {0};
+
+ atomisp_css_get_morph_table(asd, &tab);
+
+ config->width = tab.width;
+ config->height = tab.height;
+
+ for (i = 0; i < IA_CSS_MORPH_TABLE_NUM_PLANES; i++) {
+ ret = copy_to_user(config->coordinates_x[i],
+ tab.coordinates_x[i], tab.height *
+ tab.width * sizeof(*tab.coordinates_x[i]));
+ if (ret) {
+ dev_err(isp->dev,
+ "Failed to copy to User for x\n");
+ return -EFAULT;
+ }
+ ret = copy_to_user(config->coordinates_y[i],
+ tab.coordinates_y[i], tab.height *
+ tab.width * sizeof(*tab.coordinates_y[i]));
+ if (ret) {
+ dev_err(isp->dev,
+ "Failed to copy to User for y\n");
+ return -EFAULT;
+ }
+ }
+ } else {
+ struct ia_css_morph_table *tab =
+ asd->params.css_param.morph_table;
+
+ /* free first if we have one */
+ if (tab) {
+ atomisp_css_morph_table_free(tab);
+ asd->params.css_param.morph_table = NULL;
+ }
+
+ /* allocate new one */
+ tab = atomisp_css_morph_table_allocate(config->width,
+ config->height);
+
+ if (!tab) {
+ dev_err(isp->dev, "out of memory\n");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < IA_CSS_MORPH_TABLE_NUM_PLANES; i++) {
+ ret = copy_from_user(tab->coordinates_x[i],
+ config->coordinates_x[i],
+ config->height * config->width *
+ sizeof(*config->coordinates_x[i]));
+ if (ret) {
+ dev_err(isp->dev,
+ "Failed to copy from User for x, ret %d\n",
+ ret);
+ atomisp_css_morph_table_free(tab);
+ return -EFAULT;
+ }
+ ret = copy_from_user(tab->coordinates_y[i],
+ config->coordinates_y[i],
+ config->height * config->width *
+ sizeof(*config->coordinates_y[i]));
+ if (ret) {
+ dev_err(isp->dev,
+ "Failed to copy from User for y, ret is %d\n",
+ ret);
+ atomisp_css_morph_table_free(tab);
+ return -EFAULT;
+ }
+ }
+ asd->params.css_param.morph_table = tab;
+ if (asd->params.gdc_cac_en)
+ asd->params.config.morph_table = tab;
+ }
+
+ return 0;
+}
+
+int atomisp_macc_table(struct atomisp_sub_device *asd, int flag,
+ struct atomisp_macc_config *config)
+{
+ struct ia_css_macc_table *macc_table;
+
+ switch (config->color_effect) {
+ case V4L2_COLORFX_NONE:
+ macc_table = &asd->params.css_param.macc_table;
+ break;
+ case V4L2_COLORFX_SKY_BLUE:
+ macc_table = &blue_macc_table;
+ break;
+ case V4L2_COLORFX_GRASS_GREEN:
+ macc_table = &green_macc_table;
+ break;
+ case V4L2_COLORFX_SKIN_WHITEN_LOW:
+ macc_table = &skin_low_macc_table;
+ break;
+ case V4L2_COLORFX_SKIN_WHITEN:
+ macc_table = &skin_medium_macc_table;
+ break;
+ case V4L2_COLORFX_SKIN_WHITEN_HIGH:
+ macc_table = &skin_high_macc_table;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (flag == 0) {
+ /* Get macc table from current setup */
+ memcpy(&config->table, macc_table,
+ sizeof(struct ia_css_macc_table));
+ } else {
+ memcpy(macc_table, &config->table,
+ sizeof(struct ia_css_macc_table));
+ if (config->color_effect == asd->params.color_effect)
+ asd->params.config.macc_table = macc_table;
+ }
+
+ return 0;
+}
+
+int atomisp_set_dis_vector(struct atomisp_sub_device *asd,
+ struct atomisp_dis_vector *vector)
+{
+ atomisp_css_video_set_dis_vector(asd, vector);
+
+ asd->params.dis_proj_data_valid = false;
+ asd->params.css_update_params_needed = true;
+ return 0;
+}
+
+/*
+ * Function to set/get image stablization statistics
+ */
+int atomisp_get_dis_stat(struct atomisp_sub_device *asd,
+ struct atomisp_dis_statistics *stats)
+{
+ return atomisp_css_get_dis_stat(asd, stats);
+}
+
+/*
+ * Function set camrea_prefiles.xml current sensor pixel array size
+ */
+int atomisp_set_array_res(struct atomisp_sub_device *asd,
+ struct atomisp_resolution *config)
+{
+ dev_dbg(asd->isp->dev, ">%s start\n", __func__);
+ if (!config) {
+ dev_err(asd->isp->dev, "Set sensor array size is not valid\n");
+ return -EINVAL;
+ }
+
+ asd->sensor_array_res.width = config->width;
+ asd->sensor_array_res.height = config->height;
+ return 0;
+}
+
+/*
+ * Function to get DVS2 BQ resolution settings
+ */
+int atomisp_get_dvs2_bq_resolutions(struct atomisp_sub_device *asd,
+ struct atomisp_dvs2_bq_resolutions *bq_res)
+{
+ struct ia_css_pipe_config *pipe_cfg = NULL;
+
+ struct ia_css_stream *stream =
+ asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream;
+ if (!stream) {
+ dev_warn(asd->isp->dev, "stream is not created");
+ return -EAGAIN;
+ }
+
+ pipe_cfg = &asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL]
+ .pipe_configs[IA_CSS_PIPE_ID_VIDEO];
+
+ if (!bq_res)
+ return -EINVAL;
+
+ /* the GDC output resolution */
+ bq_res->output_bq.width_bq = pipe_cfg->output_info[0].res.width / 2;
+ bq_res->output_bq.height_bq = pipe_cfg->output_info[0].res.height / 2;
+
+ bq_res->envelope_bq.width_bq = 0;
+ bq_res->envelope_bq.height_bq = 0;
+ /* the GDC input resolution */
+ bq_res->source_bq.width_bq = bq_res->output_bq.width_bq +
+ pipe_cfg->dvs_envelope.width / 2;
+ bq_res->source_bq.height_bq = bq_res->output_bq.height_bq +
+ pipe_cfg->dvs_envelope.height / 2;
+ /*
+ * Bad pixels caused by spatial filter processing
+ * ISP filter resolution should be given by CSS/FW, but for now
+ * there is not such API to query, and it is fixed value, so
+ * hardcoded here.
+ */
+ bq_res->ispfilter_bq.width_bq = 12 / 2;
+ bq_res->ispfilter_bq.height_bq = 12 / 2;
+ /* spatial filter shift, always 4 pixels */
+ bq_res->gdc_shift_bq.width_bq = 4 / 2;
+ bq_res->gdc_shift_bq.height_bq = 4 / 2;
+
+ if (asd->params.video_dis_en) {
+ bq_res->envelope_bq.width_bq = pipe_cfg->dvs_envelope.width / 2 -
+ bq_res->ispfilter_bq.width_bq;
+ bq_res->envelope_bq.height_bq = pipe_cfg->dvs_envelope.height / 2 -
+ bq_res->ispfilter_bq.height_bq;
+ }
+
+ dev_dbg(asd->isp->dev,
+ "source_bq.width_bq %d, source_bq.height_bq %d,\nispfilter_bq.width_bq %d, ispfilter_bq.height_bq %d,\ngdc_shift_bq.width_bq %d, gdc_shift_bq.height_bq %d,\nenvelope_bq.width_bq %d, envelope_bq.height_bq %d,\noutput_bq.width_bq %d, output_bq.height_bq %d\n",
+ bq_res->source_bq.width_bq, bq_res->source_bq.height_bq,
+ bq_res->ispfilter_bq.width_bq, bq_res->ispfilter_bq.height_bq,
+ bq_res->gdc_shift_bq.width_bq, bq_res->gdc_shift_bq.height_bq,
+ bq_res->envelope_bq.width_bq, bq_res->envelope_bq.height_bq,
+ bq_res->output_bq.width_bq, bq_res->output_bq.height_bq);
+
+ return 0;
+}
+
+int atomisp_set_dis_coefs(struct atomisp_sub_device *asd,
+ struct atomisp_dis_coefficients *coefs)
+{
+ return atomisp_css_set_dis_coefs(asd, coefs);
+}
+
+/*
+ * Function to set/get 3A stat from isp
+ */
+int atomisp_3a_stat(struct atomisp_sub_device *asd, int flag,
+ struct atomisp_3a_statistics *config)
+{
+ struct atomisp_device *isp = asd->isp;
+ struct atomisp_s3a_buf *s3a_buf;
+ unsigned long ret;
+
+ if (flag != 0)
+ return -EINVAL;
+
+ /* sanity check to avoid writing into unallocated memory. */
+ if (asd->params.s3a_output_bytes == 0)
+ return -EINVAL;
+
+ if (atomisp_compare_grid(asd, &config->grid_info) != 0) {
+ /* If the grid info in the argument differs from the current
+ grid info, we tell the caller to reset the grid size and
+ try again. */
+ return -EAGAIN;
+ }
+
+ if (list_empty(&asd->s3a_stats_ready)) {
+ dev_err(isp->dev, "3a statistics is not valid.\n");
+ return -EAGAIN;
+ }
+
+ s3a_buf = list_entry(asd->s3a_stats_ready.next,
+ struct atomisp_s3a_buf, list);
+ if (s3a_buf->s3a_map)
+ ia_css_translate_3a_statistics(
+ asd->params.s3a_user_stat, s3a_buf->s3a_map);
+ else
+ ia_css_get_3a_statistics(asd->params.s3a_user_stat,
+ s3a_buf->s3a_data);
+
+ config->exp_id = s3a_buf->s3a_data->exp_id;
+ config->isp_config_id = s3a_buf->s3a_data->isp_config_id;
+
+ ret = copy_to_user(config->data, asd->params.s3a_user_stat->data,
+ asd->params.s3a_output_bytes);
+ if (ret) {
+ dev_err(isp->dev, "copy to user failed: copied %lu bytes\n",
+ ret);
+ return -EFAULT;
+ }
+
+ /* Move to free buffer list */
+ list_del_init(&s3a_buf->list);
+ list_add_tail(&s3a_buf->list, &asd->s3a_stats);
+ dev_dbg(isp->dev, "%s: finish getting exp_id %d 3a stat, isp_config_id %d\n",
+ __func__,
+ config->exp_id, config->isp_config_id);
+ return 0;
+}
+
+/*
+ * Function to calculate real zoom region for every pipe
+ */
+int atomisp_calculate_real_zoom_region(struct atomisp_sub_device *asd,
+ struct ia_css_dz_config *dz_config,
+ enum ia_css_pipe_id css_pipe_id)
+
+{
+ struct atomisp_stream_env *stream_env =
+ &asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL];
+ struct atomisp_resolution eff_res, out_res;
+ int w_offset, h_offset;
+
+ memset(&eff_res, 0, sizeof(eff_res));
+ memset(&out_res, 0, sizeof(out_res));
+
+ if (dz_config->dx || dz_config->dy)
+ return 0;
+
+ if (css_pipe_id != IA_CSS_PIPE_ID_PREVIEW
+ && css_pipe_id != IA_CSS_PIPE_ID_CAPTURE) {
+ dev_err(asd->isp->dev, "%s the set pipe no support crop region"
+ , __func__);
+ return -EINVAL;
+ }
+
+ eff_res.width =
+ stream_env->stream_config.input_config.effective_res.width;
+ eff_res.height =
+ stream_env->stream_config.input_config.effective_res.height;
+ if (eff_res.width == 0 || eff_res.height == 0) {
+ dev_err(asd->isp->dev, "%s err effective resolution"
+ , __func__);
+ return -EINVAL;
+ }
+
+ if (dz_config->zoom_region.resolution.width
+ == asd->sensor_array_res.width
+ || dz_config->zoom_region.resolution.height
+ == asd->sensor_array_res.height) {
+ /*no need crop region*/
+ dz_config->zoom_region.origin.x = 0;
+ dz_config->zoom_region.origin.y = 0;
+ dz_config->zoom_region.resolution.width = eff_res.width;
+ dz_config->zoom_region.resolution.height = eff_res.height;
+ return 0;
+ }
+
+ /* FIXME:
+ * This is not the correct implementation with Google's definition, due
+ * to firmware limitation.
+ * map real crop region base on above calculating base max crop region.
+ */
+
+ if (!IS_ISP2401) {
+ dz_config->zoom_region.origin.x = dz_config->zoom_region.origin.x
+ * eff_res.width
+ / asd->sensor_array_res.width;
+ dz_config->zoom_region.origin.y = dz_config->zoom_region.origin.y
+ * eff_res.height
+ / asd->sensor_array_res.height;
+ dz_config->zoom_region.resolution.width = dz_config->zoom_region.resolution.width
+ * eff_res.width
+ / asd->sensor_array_res.width;
+ dz_config->zoom_region.resolution.height = dz_config->zoom_region.resolution.height
+ * eff_res.height
+ / asd->sensor_array_res.height;
+ /*
+ * Set same ratio of crop region resolution and current pipe output
+ * resolution
+ */
+ out_res.width = stream_env->pipe_configs[css_pipe_id].output_info[0].res.width;
+ out_res.height = stream_env->pipe_configs[css_pipe_id].output_info[0].res.height;
+ if (out_res.width == 0 || out_res.height == 0) {
+ dev_err(asd->isp->dev, "%s err current pipe output resolution"
+ , __func__);
+ return -EINVAL;
+ }
+ } else {
+ out_res.width = stream_env->pipe_configs[css_pipe_id].output_info[0].res.width;
+ out_res.height = stream_env->pipe_configs[css_pipe_id].output_info[0].res.height;
+ if (out_res.width == 0 || out_res.height == 0) {
+ dev_err(asd->isp->dev, "%s err current pipe output resolution"
+ , __func__);
+ return -EINVAL;
+ }
+
+ if (asd->sensor_array_res.width * out_res.height
+ < out_res.width * asd->sensor_array_res.height) {
+ h_offset = asd->sensor_array_res.height
+ - asd->sensor_array_res.width
+ * out_res.height / out_res.width;
+ h_offset = h_offset / 2;
+ if (dz_config->zoom_region.origin.y < h_offset)
+ dz_config->zoom_region.origin.y = 0;
+ else
+ dz_config->zoom_region.origin.y = dz_config->zoom_region.origin.y - h_offset;
+ w_offset = 0;
+ } else {
+ w_offset = asd->sensor_array_res.width
+ - asd->sensor_array_res.height
+ * out_res.width / out_res.height;
+ w_offset = w_offset / 2;
+ if (dz_config->zoom_region.origin.x < w_offset)
+ dz_config->zoom_region.origin.x = 0;
+ else
+ dz_config->zoom_region.origin.x = dz_config->zoom_region.origin.x - w_offset;
+ h_offset = 0;
+ }
+ dz_config->zoom_region.origin.x = dz_config->zoom_region.origin.x
+ * eff_res.width
+ / (asd->sensor_array_res.width - 2 * w_offset);
+ dz_config->zoom_region.origin.y = dz_config->zoom_region.origin.y
+ * eff_res.height
+ / (asd->sensor_array_res.height - 2 * h_offset);
+ dz_config->zoom_region.resolution.width = dz_config->zoom_region.resolution.width
+ * eff_res.width
+ / (asd->sensor_array_res.width - 2 * w_offset);
+ dz_config->zoom_region.resolution.height = dz_config->zoom_region.resolution.height
+ * eff_res.height
+ / (asd->sensor_array_res.height - 2 * h_offset);
+ }
+
+ if (out_res.width * dz_config->zoom_region.resolution.height
+ > dz_config->zoom_region.resolution.width * out_res.height) {
+ dz_config->zoom_region.resolution.height =
+ dz_config->zoom_region.resolution.width
+ * out_res.height / out_res.width;
+ } else {
+ dz_config->zoom_region.resolution.width =
+ dz_config->zoom_region.resolution.height
+ * out_res.width / out_res.height;
+ }
+ dev_dbg(asd->isp->dev,
+ "%s crop region:(%d,%d),(%d,%d) eff_res(%d, %d) array_size(%d,%d) out_res(%d, %d)\n",
+ __func__, dz_config->zoom_region.origin.x,
+ dz_config->zoom_region.origin.y,
+ dz_config->zoom_region.resolution.width,
+ dz_config->zoom_region.resolution.height,
+ eff_res.width, eff_res.height,
+ asd->sensor_array_res.width,
+ asd->sensor_array_res.height,
+ out_res.width, out_res.height);
+
+ if ((dz_config->zoom_region.origin.x +
+ dz_config->zoom_region.resolution.width
+ > eff_res.width) ||
+ (dz_config->zoom_region.origin.y +
+ dz_config->zoom_region.resolution.height
+ > eff_res.height))
+ return -EINVAL;
+
+ return 0;
+}
+
+/*
+ * Function to check the zoom region whether is effective
+ */
+static bool atomisp_check_zoom_region(
+ struct atomisp_sub_device *asd,
+ struct ia_css_dz_config *dz_config)
+{
+ struct atomisp_resolution config;
+ bool flag = false;
+ unsigned int w, h;
+
+ memset(&config, 0, sizeof(struct atomisp_resolution));
+
+ if (dz_config->dx && dz_config->dy)
+ return true;
+
+ config.width = asd->sensor_array_res.width;
+ config.height = asd->sensor_array_res.height;
+ w = dz_config->zoom_region.origin.x +
+ dz_config->zoom_region.resolution.width;
+ h = dz_config->zoom_region.origin.y +
+ dz_config->zoom_region.resolution.height;
+
+ if ((w <= config.width) && (h <= config.height) && w > 0 && h > 0)
+ flag = true;
+ else
+ /* setting error zoom region */
+ dev_err(asd->isp->dev,
+ "%s zoom region ERROR:dz_config:(%d,%d),(%d,%d)array_res(%d, %d)\n",
+ __func__, dz_config->zoom_region.origin.x,
+ dz_config->zoom_region.origin.y,
+ dz_config->zoom_region.resolution.width,
+ dz_config->zoom_region.resolution.height,
+ config.width, config.height);
+
+ return flag;
+}
+
+void atomisp_apply_css_parameters(
+ struct atomisp_sub_device *asd,
+ struct atomisp_css_params *css_param)
+{
+ if (css_param->update_flag.wb_config)
+ asd->params.config.wb_config = &css_param->wb_config;
+
+ if (css_param->update_flag.ob_config)
+ asd->params.config.ob_config = &css_param->ob_config;
+
+ if (css_param->update_flag.dp_config)
+ asd->params.config.dp_config = &css_param->dp_config;
+
+ if (css_param->update_flag.nr_config)
+ asd->params.config.nr_config = &css_param->nr_config;
+
+ if (css_param->update_flag.ee_config)
+ asd->params.config.ee_config = &css_param->ee_config;
+
+ if (css_param->update_flag.tnr_config)
+ asd->params.config.tnr_config = &css_param->tnr_config;
+
+ if (css_param->update_flag.a3a_config)
+ asd->params.config.s3a_config = &css_param->s3a_config;
+
+ if (css_param->update_flag.ctc_config)
+ asd->params.config.ctc_config = &css_param->ctc_config;
+
+ if (css_param->update_flag.cnr_config)
+ asd->params.config.cnr_config = &css_param->cnr_config;
+
+ if (css_param->update_flag.ecd_config)
+ asd->params.config.ecd_config = &css_param->ecd_config;
+
+ if (css_param->update_flag.ynr_config)
+ asd->params.config.ynr_config = &css_param->ynr_config;
+
+ if (css_param->update_flag.fc_config)
+ asd->params.config.fc_config = &css_param->fc_config;
+
+ if (css_param->update_flag.macc_config)
+ asd->params.config.macc_config = &css_param->macc_config;
+
+ if (css_param->update_flag.aa_config)
+ asd->params.config.aa_config = &css_param->aa_config;
+
+ if (css_param->update_flag.anr_config)
+ asd->params.config.anr_config = &css_param->anr_config;
+
+ if (css_param->update_flag.xnr_config)
+ asd->params.config.xnr_config = &css_param->xnr_config;
+
+ if (css_param->update_flag.yuv2rgb_cc_config)
+ asd->params.config.yuv2rgb_cc_config = &css_param->yuv2rgb_cc_config;
+
+ if (css_param->update_flag.rgb2yuv_cc_config)
+ asd->params.config.rgb2yuv_cc_config = &css_param->rgb2yuv_cc_config;
+
+ if (css_param->update_flag.macc_table)
+ asd->params.config.macc_table = &css_param->macc_table;
+
+ if (css_param->update_flag.xnr_table)
+ asd->params.config.xnr_table = &css_param->xnr_table;
+
+ if (css_param->update_flag.r_gamma_table)
+ asd->params.config.r_gamma_table = &css_param->r_gamma_table;
+
+ if (css_param->update_flag.g_gamma_table)
+ asd->params.config.g_gamma_table = &css_param->g_gamma_table;
+
+ if (css_param->update_flag.b_gamma_table)
+ asd->params.config.b_gamma_table = &css_param->b_gamma_table;
+
+ if (css_param->update_flag.anr_thres)
+ atomisp_css_set_anr_thres(asd, &css_param->anr_thres);
+
+ if (css_param->update_flag.shading_table)
+ asd->params.config.shading_table = css_param->shading_table;
+
+ if (css_param->update_flag.morph_table && asd->params.gdc_cac_en)
+ asd->params.config.morph_table = css_param->morph_table;
+
+ if (css_param->update_flag.dvs2_coefs) {
+ struct ia_css_dvs_grid_info *dvs_grid_info =
+ atomisp_css_get_dvs_grid_info(
+ &asd->params.curr_grid_info);
+
+ if (dvs_grid_info && dvs_grid_info->enable)
+ atomisp_css_set_dvs2_coefs(asd, css_param->dvs2_coeff);
+ }
+
+ if (css_param->update_flag.dvs_6axis_config)
+ atomisp_css_set_dvs_6axis(asd, css_param->dvs_6axis);
+
+ atomisp_css_set_isp_config_id(asd, css_param->isp_config_id);
+ /*
+ * These configurations are on used by ISP1.x, not for ISP2.x,
+ * so do not handle them. see comments of ia_css_isp_config.
+ * 1 cc_config
+ * 2 ce_config
+ * 3 de_config
+ * 4 gc_config
+ * 5 gamma_table
+ * 6 ctc_table
+ * 7 dvs_coefs
+ */
+}
+
+static unsigned int long copy_from_compatible(void *to, const void *from,
+ unsigned long n, bool from_user)
+{
+ if (from_user)
+ return copy_from_user(to, (void __user *)from, n);
+ else
+ memcpy(to, from, n);
+ return 0;
+}
+
+int atomisp_cp_general_isp_parameters(struct atomisp_sub_device *asd,
+ struct atomisp_parameters *arg,
+ struct atomisp_css_params *css_param,
+ bool from_user)
+{
+ struct atomisp_parameters *cur_config = &css_param->update_flag;
+
+ if (!arg || !asd || !css_param)
+ return -EINVAL;
+
+ if (arg->wb_config && (from_user || !cur_config->wb_config)) {
+ if (copy_from_compatible(&css_param->wb_config, arg->wb_config,
+ sizeof(struct ia_css_wb_config),
+ from_user))
+ return -EFAULT;
+ css_param->update_flag.wb_config =
+ (struct atomisp_wb_config *)&css_param->wb_config;
+ }
+
+ if (arg->ob_config && (from_user || !cur_config->ob_config)) {
+ if (copy_from_compatible(&css_param->ob_config, arg->ob_config,
+ sizeof(struct ia_css_ob_config),
+ from_user))
+ return -EFAULT;
+ css_param->update_flag.ob_config =
+ (struct atomisp_ob_config *)&css_param->ob_config;
+ }
+
+ if (arg->dp_config && (from_user || !cur_config->dp_config)) {
+ if (copy_from_compatible(&css_param->dp_config, arg->dp_config,
+ sizeof(struct ia_css_dp_config),
+ from_user))
+ return -EFAULT;
+ css_param->update_flag.dp_config =
+ (struct atomisp_dp_config *)&css_param->dp_config;
+ }
+
+ if (asd->run_mode->val != ATOMISP_RUN_MODE_VIDEO) {
+ if (arg->dz_config && (from_user || !cur_config->dz_config)) {
+ if (copy_from_compatible(&css_param->dz_config,
+ arg->dz_config,
+ sizeof(struct ia_css_dz_config),
+ from_user))
+ return -EFAULT;
+ if (!atomisp_check_zoom_region(asd,
+ &css_param->dz_config)) {
+ dev_err(asd->isp->dev, "crop region error!");
+ return -EINVAL;
+ }
+ css_param->update_flag.dz_config =
+ (struct atomisp_dz_config *)
+ &css_param->dz_config;
+ }
+ }
+
+ if (arg->nr_config && (from_user || !cur_config->nr_config)) {
+ if (copy_from_compatible(&css_param->nr_config, arg->nr_config,
+ sizeof(struct ia_css_nr_config),
+ from_user))
+ return -EFAULT;
+ css_param->update_flag.nr_config =
+ (struct atomisp_nr_config *)&css_param->nr_config;
+ }
+
+ if (arg->ee_config && (from_user || !cur_config->ee_config)) {
+ if (copy_from_compatible(&css_param->ee_config, arg->ee_config,
+ sizeof(struct ia_css_ee_config),
+ from_user))
+ return -EFAULT;
+ css_param->update_flag.ee_config =
+ (struct atomisp_ee_config *)&css_param->ee_config;
+ }
+
+ if (arg->tnr_config && (from_user || !cur_config->tnr_config)) {
+ if (copy_from_compatible(&css_param->tnr_config,
+ arg->tnr_config,
+ sizeof(struct ia_css_tnr_config),
+ from_user))
+ return -EFAULT;
+ css_param->update_flag.tnr_config =
+ (struct atomisp_tnr_config *)
+ &css_param->tnr_config;
+ }
+
+ if (arg->a3a_config && (from_user || !cur_config->a3a_config)) {
+ if (copy_from_compatible(&css_param->s3a_config,
+ arg->a3a_config,
+ sizeof(struct ia_css_3a_config),
+ from_user))
+ return -EFAULT;
+ css_param->update_flag.a3a_config =
+ (struct atomisp_3a_config *)&css_param->s3a_config;
+ }
+
+ if (arg->ctc_config && (from_user || !cur_config->ctc_config)) {
+ if (copy_from_compatible(&css_param->ctc_config,
+ arg->ctc_config,
+ sizeof(struct ia_css_ctc_config),
+ from_user))
+ return -EFAULT;
+ css_param->update_flag.ctc_config =
+ (struct atomisp_ctc_config *)
+ &css_param->ctc_config;
+ }
+
+ if (arg->cnr_config && (from_user || !cur_config->cnr_config)) {
+ if (copy_from_compatible(&css_param->cnr_config,
+ arg->cnr_config,
+ sizeof(struct ia_css_cnr_config),
+ from_user))
+ return -EFAULT;
+ css_param->update_flag.cnr_config =
+ (struct atomisp_cnr_config *)
+ &css_param->cnr_config;
+ }
+
+ if (arg->ecd_config && (from_user || !cur_config->ecd_config)) {
+ if (copy_from_compatible(&css_param->ecd_config,
+ arg->ecd_config,
+ sizeof(struct ia_css_ecd_config),
+ from_user))
+ return -EFAULT;
+ css_param->update_flag.ecd_config =
+ (struct atomisp_ecd_config *)
+ &css_param->ecd_config;
+ }
+
+ if (arg->ynr_config && (from_user || !cur_config->ynr_config)) {
+ if (copy_from_compatible(&css_param->ynr_config,
+ arg->ynr_config,
+ sizeof(struct ia_css_ynr_config),
+ from_user))
+ return -EFAULT;
+ css_param->update_flag.ynr_config =
+ (struct atomisp_ynr_config *)
+ &css_param->ynr_config;
+ }
+
+ if (arg->fc_config && (from_user || !cur_config->fc_config)) {
+ if (copy_from_compatible(&css_param->fc_config,
+ arg->fc_config,
+ sizeof(struct ia_css_fc_config),
+ from_user))
+ return -EFAULT;
+ css_param->update_flag.fc_config =
+ (struct atomisp_fc_config *)&css_param->fc_config;
+ }
+
+ if (arg->macc_config && (from_user || !cur_config->macc_config)) {
+ if (copy_from_compatible(&css_param->macc_config,
+ arg->macc_config,
+ sizeof(struct ia_css_macc_config),
+ from_user))
+ return -EFAULT;
+ css_param->update_flag.macc_config =
+ (struct atomisp_macc_config *)
+ &css_param->macc_config;
+ }
+
+ if (arg->aa_config && (from_user || !cur_config->aa_config)) {
+ if (copy_from_compatible(&css_param->aa_config, arg->aa_config,
+ sizeof(struct ia_css_aa_config),
+ from_user))
+ return -EFAULT;
+ css_param->update_flag.aa_config =
+ (struct atomisp_aa_config *)&css_param->aa_config;
+ }
+
+ if (arg->anr_config && (from_user || !cur_config->anr_config)) {
+ if (copy_from_compatible(&css_param->anr_config,
+ arg->anr_config,
+ sizeof(struct ia_css_anr_config),
+ from_user))
+ return -EFAULT;
+ css_param->update_flag.anr_config =
+ (struct atomisp_anr_config *)
+ &css_param->anr_config;
+ }
+
+ if (arg->xnr_config && (from_user || !cur_config->xnr_config)) {
+ if (copy_from_compatible(&css_param->xnr_config,
+ arg->xnr_config,
+ sizeof(struct ia_css_xnr_config),
+ from_user))
+ return -EFAULT;
+ css_param->update_flag.xnr_config =
+ (struct atomisp_xnr_config *)
+ &css_param->xnr_config;
+ }
+
+ if (arg->yuv2rgb_cc_config &&
+ (from_user || !cur_config->yuv2rgb_cc_config)) {
+ if (copy_from_compatible(&css_param->yuv2rgb_cc_config,
+ arg->yuv2rgb_cc_config,
+ sizeof(struct ia_css_cc_config),
+ from_user))
+ return -EFAULT;
+ css_param->update_flag.yuv2rgb_cc_config =
+ (struct atomisp_cc_config *)
+ &css_param->yuv2rgb_cc_config;
+ }
+
+ if (arg->rgb2yuv_cc_config &&
+ (from_user || !cur_config->rgb2yuv_cc_config)) {
+ if (copy_from_compatible(&css_param->rgb2yuv_cc_config,
+ arg->rgb2yuv_cc_config,
+ sizeof(struct ia_css_cc_config),
+ from_user))
+ return -EFAULT;
+ css_param->update_flag.rgb2yuv_cc_config =
+ (struct atomisp_cc_config *)
+ &css_param->rgb2yuv_cc_config;
+ }
+
+ if (arg->macc_table && (from_user || !cur_config->macc_table)) {
+ if (copy_from_compatible(&css_param->macc_table,
+ arg->macc_table,
+ sizeof(struct ia_css_macc_table),
+ from_user))
+ return -EFAULT;
+ css_param->update_flag.macc_table =
+ (struct atomisp_macc_table *)
+ &css_param->macc_table;
+ }
+
+ if (arg->xnr_table && (from_user || !cur_config->xnr_table)) {
+ if (copy_from_compatible(&css_param->xnr_table,
+ arg->xnr_table,
+ sizeof(struct ia_css_xnr_table),
+ from_user))
+ return -EFAULT;
+ css_param->update_flag.xnr_table =
+ (struct atomisp_xnr_table *)&css_param->xnr_table;
+ }
+
+ if (arg->r_gamma_table && (from_user || !cur_config->r_gamma_table)) {
+ if (copy_from_compatible(&css_param->r_gamma_table,
+ arg->r_gamma_table,
+ sizeof(struct ia_css_rgb_gamma_table),
+ from_user))
+ return -EFAULT;
+ css_param->update_flag.r_gamma_table =
+ (struct atomisp_rgb_gamma_table *)
+ &css_param->r_gamma_table;
+ }
+
+ if (arg->g_gamma_table && (from_user || !cur_config->g_gamma_table)) {
+ if (copy_from_compatible(&css_param->g_gamma_table,
+ arg->g_gamma_table,
+ sizeof(struct ia_css_rgb_gamma_table),
+ from_user))
+ return -EFAULT;
+ css_param->update_flag.g_gamma_table =
+ (struct atomisp_rgb_gamma_table *)
+ &css_param->g_gamma_table;
+ }
+
+ if (arg->b_gamma_table && (from_user || !cur_config->b_gamma_table)) {
+ if (copy_from_compatible(&css_param->b_gamma_table,
+ arg->b_gamma_table,
+ sizeof(struct ia_css_rgb_gamma_table),
+ from_user))
+ return -EFAULT;
+ css_param->update_flag.b_gamma_table =
+ (struct atomisp_rgb_gamma_table *)
+ &css_param->b_gamma_table;
+ }
+
+ if (arg->anr_thres && (from_user || !cur_config->anr_thres)) {
+ if (copy_from_compatible(&css_param->anr_thres, arg->anr_thres,
+ sizeof(struct ia_css_anr_thres),
+ from_user))
+ return -EFAULT;
+ css_param->update_flag.anr_thres =
+ (struct atomisp_anr_thres *)&css_param->anr_thres;
+ }
+
+ if (from_user)
+ css_param->isp_config_id = arg->isp_config_id;
+ /*
+ * These configurations are on used by ISP1.x, not for ISP2.x,
+ * so do not handle them. see comments of ia_css_isp_config.
+ * 1 cc_config
+ * 2 ce_config
+ * 3 de_config
+ * 4 gc_config
+ * 5 gamma_table
+ * 6 ctc_table
+ * 7 dvs_coefs
+ */
+ return 0;
+}
+
+int atomisp_cp_lsc_table(struct atomisp_sub_device *asd,
+ struct atomisp_shading_table *source_st,
+ struct atomisp_css_params *css_param,
+ bool from_user)
+{
+ unsigned int i;
+ unsigned int len_table;
+ struct ia_css_shading_table *shading_table;
+ struct ia_css_shading_table *old_table;
+ struct atomisp_shading_table *st, dest_st;
+
+ if (!source_st)
+ return 0;
+
+ if (!css_param)
+ return -EINVAL;
+
+ if (!from_user && css_param->update_flag.shading_table)
+ return 0;
+
+ if (IS_ISP2401) {
+ if (copy_from_compatible(&dest_st, source_st,
+ sizeof(struct atomisp_shading_table),
+ from_user)) {
+ dev_err(asd->isp->dev, "copy shading table failed!");
+ return -EFAULT;
+ }
+ st = &dest_st;
+ } else {
+ st = source_st;
+ }
+
+ old_table = css_param->shading_table;
+
+ /* user config is to disable the shading table. */
+ if (!st->enable) {
+ /* Generate a minimum table with enable = 0. */
+ shading_table = atomisp_css_shading_table_alloc(1, 1);
+ if (!shading_table)
+ return -ENOMEM;
+ shading_table->enable = 0;
+ goto set_lsc;
+ }
+
+ /* Setting a new table. Validate first - all tables must be set */
+ for (i = 0; i < ATOMISP_NUM_SC_COLORS; i++) {
+ if (!st->data[i]) {
+ dev_err(asd->isp->dev, "shading table validate failed");
+ return -EINVAL;
+ }
+ }
+
+ /* Shading table size per color */
+ if (st->width > SH_CSS_MAX_SCTBL_WIDTH_PER_COLOR ||
+ st->height > SH_CSS_MAX_SCTBL_HEIGHT_PER_COLOR) {
+ dev_err(asd->isp->dev, "shading table w/h validate failed!");
+ return -EINVAL;
+ }
+
+ shading_table = atomisp_css_shading_table_alloc(st->width, st->height);
+ if (!shading_table)
+ return -ENOMEM;
+
+ len_table = st->width * st->height * ATOMISP_SC_TYPE_SIZE;
+ for (i = 0; i < ATOMISP_NUM_SC_COLORS; i++) {
+ if (copy_from_compatible(shading_table->data[i],
+ st->data[i], len_table, from_user)) {
+ atomisp_css_shading_table_free(shading_table);
+ return -EFAULT;
+ }
+ }
+ shading_table->sensor_width = st->sensor_width;
+ shading_table->sensor_height = st->sensor_height;
+ shading_table->fraction_bits = st->fraction_bits;
+ shading_table->enable = st->enable;
+
+ /* No need to update shading table if it is the same */
+ if (old_table &&
+ old_table->sensor_width == shading_table->sensor_width &&
+ old_table->sensor_height == shading_table->sensor_height &&
+ old_table->width == shading_table->width &&
+ old_table->height == shading_table->height &&
+ old_table->fraction_bits == shading_table->fraction_bits &&
+ old_table->enable == shading_table->enable) {
+ bool data_is_same = true;
+
+ for (i = 0; i < ATOMISP_NUM_SC_COLORS; i++) {
+ if (memcmp(shading_table->data[i], old_table->data[i],
+ len_table) != 0) {
+ data_is_same = false;
+ break;
+ }
+ }
+
+ if (data_is_same) {
+ atomisp_css_shading_table_free(shading_table);
+ return 0;
+ }
+ }
+
+set_lsc:
+ /* set LSC to CSS */
+ css_param->shading_table = shading_table;
+ css_param->update_flag.shading_table = (struct atomisp_shading_table *)shading_table;
+ asd->params.sc_en = shading_table;
+
+ if (old_table)
+ atomisp_css_shading_table_free(old_table);
+
+ return 0;
+}
+
+int atomisp_css_cp_dvs2_coefs(struct atomisp_sub_device *asd,
+ struct ia_css_dvs2_coefficients *coefs,
+ struct atomisp_css_params *css_param,
+ bool from_user)
+{
+ struct ia_css_dvs_grid_info *cur =
+ atomisp_css_get_dvs_grid_info(&asd->params.curr_grid_info);
+ int dvs_hor_coef_bytes, dvs_ver_coef_bytes;
+ struct ia_css_dvs2_coefficients dvs2_coefs;
+
+ if (!coefs || !cur)
+ return 0;
+
+ if (!from_user && css_param->update_flag.dvs2_coefs)
+ return 0;
+
+ if (!IS_ISP2401) {
+ if (sizeof(*cur) != sizeof(coefs->grid) ||
+ memcmp(&coefs->grid, cur, sizeof(coefs->grid))) {
+ dev_err(asd->isp->dev, "dvs grid mismatch!\n");
+ /* If the grid info in the argument differs from the current
+ grid info, we tell the caller to reset the grid size and
+ try again. */
+ return -EAGAIN;
+ }
+
+ if (!coefs->hor_coefs.odd_real ||
+ !coefs->hor_coefs.odd_imag ||
+ !coefs->hor_coefs.even_real ||
+ !coefs->hor_coefs.even_imag ||
+ !coefs->ver_coefs.odd_real ||
+ !coefs->ver_coefs.odd_imag ||
+ !coefs->ver_coefs.even_real ||
+ !coefs->ver_coefs.even_imag)
+ return -EINVAL;
+
+ if (!css_param->dvs2_coeff) {
+ /* DIS coefficients. */
+ css_param->dvs2_coeff = ia_css_dvs2_coefficients_allocate(cur);
+ if (!css_param->dvs2_coeff)
+ return -ENOMEM;
+ }
+
+ dvs_hor_coef_bytes = asd->params.dvs_hor_coef_bytes;
+ dvs_ver_coef_bytes = asd->params.dvs_ver_coef_bytes;
+ if (copy_from_compatible(css_param->dvs2_coeff->hor_coefs.odd_real,
+ coefs->hor_coefs.odd_real, dvs_hor_coef_bytes, from_user) ||
+ copy_from_compatible(css_param->dvs2_coeff->hor_coefs.odd_imag,
+ coefs->hor_coefs.odd_imag, dvs_hor_coef_bytes, from_user) ||
+ copy_from_compatible(css_param->dvs2_coeff->hor_coefs.even_real,
+ coefs->hor_coefs.even_real, dvs_hor_coef_bytes, from_user) ||
+ copy_from_compatible(css_param->dvs2_coeff->hor_coefs.even_imag,
+ coefs->hor_coefs.even_imag, dvs_hor_coef_bytes, from_user) ||
+ copy_from_compatible(css_param->dvs2_coeff->ver_coefs.odd_real,
+ coefs->ver_coefs.odd_real, dvs_ver_coef_bytes, from_user) ||
+ copy_from_compatible(css_param->dvs2_coeff->ver_coefs.odd_imag,
+ coefs->ver_coefs.odd_imag, dvs_ver_coef_bytes, from_user) ||
+ copy_from_compatible(css_param->dvs2_coeff->ver_coefs.even_real,
+ coefs->ver_coefs.even_real, dvs_ver_coef_bytes, from_user) ||
+ copy_from_compatible(css_param->dvs2_coeff->ver_coefs.even_imag,
+ coefs->ver_coefs.even_imag, dvs_ver_coef_bytes, from_user)) {
+ ia_css_dvs2_coefficients_free(css_param->dvs2_coeff);
+ css_param->dvs2_coeff = NULL;
+ return -EFAULT;
+ }
+ } else {
+ if (copy_from_compatible(&dvs2_coefs, coefs,
+ sizeof(struct ia_css_dvs2_coefficients),
+ from_user)) {
+ dev_err(asd->isp->dev, "copy dvs2 coef failed");
+ return -EFAULT;
+ }
+
+ if (sizeof(*cur) != sizeof(dvs2_coefs.grid) ||
+ memcmp(&dvs2_coefs.grid, cur, sizeof(dvs2_coefs.grid))) {
+ dev_err(asd->isp->dev, "dvs grid mismatch!\n");
+ /* If the grid info in the argument differs from the current
+ grid info, we tell the caller to reset the grid size and
+ try again. */
+ return -EAGAIN;
+ }
+
+ if (!dvs2_coefs.hor_coefs.odd_real ||
+ !dvs2_coefs.hor_coefs.odd_imag ||
+ !dvs2_coefs.hor_coefs.even_real ||
+ !dvs2_coefs.hor_coefs.even_imag ||
+ !dvs2_coefs.ver_coefs.odd_real ||
+ !dvs2_coefs.ver_coefs.odd_imag ||
+ !dvs2_coefs.ver_coefs.even_real ||
+ !dvs2_coefs.ver_coefs.even_imag)
+ return -EINVAL;
+
+ if (!css_param->dvs2_coeff) {
+ /* DIS coefficients. */
+ css_param->dvs2_coeff = ia_css_dvs2_coefficients_allocate(cur);
+ if (!css_param->dvs2_coeff)
+ return -ENOMEM;
+ }
+
+ dvs_hor_coef_bytes = asd->params.dvs_hor_coef_bytes;
+ dvs_ver_coef_bytes = asd->params.dvs_ver_coef_bytes;
+ if (copy_from_compatible(css_param->dvs2_coeff->hor_coefs.odd_real,
+ dvs2_coefs.hor_coefs.odd_real, dvs_hor_coef_bytes, from_user) ||
+ copy_from_compatible(css_param->dvs2_coeff->hor_coefs.odd_imag,
+ dvs2_coefs.hor_coefs.odd_imag, dvs_hor_coef_bytes, from_user) ||
+ copy_from_compatible(css_param->dvs2_coeff->hor_coefs.even_real,
+ dvs2_coefs.hor_coefs.even_real, dvs_hor_coef_bytes, from_user) ||
+ copy_from_compatible(css_param->dvs2_coeff->hor_coefs.even_imag,
+ dvs2_coefs.hor_coefs.even_imag, dvs_hor_coef_bytes, from_user) ||
+ copy_from_compatible(css_param->dvs2_coeff->ver_coefs.odd_real,
+ dvs2_coefs.ver_coefs.odd_real, dvs_ver_coef_bytes, from_user) ||
+ copy_from_compatible(css_param->dvs2_coeff->ver_coefs.odd_imag,
+ dvs2_coefs.ver_coefs.odd_imag, dvs_ver_coef_bytes, from_user) ||
+ copy_from_compatible(css_param->dvs2_coeff->ver_coefs.even_real,
+ dvs2_coefs.ver_coefs.even_real, dvs_ver_coef_bytes, from_user) ||
+ copy_from_compatible(css_param->dvs2_coeff->ver_coefs.even_imag,
+ dvs2_coefs.ver_coefs.even_imag, dvs_ver_coef_bytes, from_user)) {
+ ia_css_dvs2_coefficients_free(css_param->dvs2_coeff);
+ css_param->dvs2_coeff = NULL;
+ return -EFAULT;
+ }
+ }
+
+ css_param->update_flag.dvs2_coefs =
+ (struct atomisp_dis_coefficients *)css_param->dvs2_coeff;
+ return 0;
+}
+
+int atomisp_cp_dvs_6axis_config(struct atomisp_sub_device *asd,
+ struct atomisp_dvs_6axis_config *source_6axis_config,
+ struct atomisp_css_params *css_param,
+ bool from_user)
+{
+ struct ia_css_dvs_6axis_config *dvs_6axis_config;
+ struct ia_css_dvs_6axis_config *old_6axis_config;
+ struct ia_css_stream *stream =
+ asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream;
+ struct ia_css_dvs_grid_info *dvs_grid_info =
+ atomisp_css_get_dvs_grid_info(&asd->params.curr_grid_info);
+ int ret = -EFAULT;
+
+ if (!stream) {
+ dev_err(asd->isp->dev, "%s: internal error!", __func__);
+ return -EINVAL;
+ }
+
+ if (!source_6axis_config || !dvs_grid_info)
+ return 0;
+
+ if (!dvs_grid_info->enable)
+ return 0;
+
+ if (!from_user && css_param->update_flag.dvs_6axis_config)
+ return 0;
+
+ /* check whether need to reallocate for 6 axis config */
+ old_6axis_config = css_param->dvs_6axis;
+ dvs_6axis_config = old_6axis_config;
+
+ if (IS_ISP2401) {
+ struct ia_css_dvs_6axis_config t_6axis_config;
+
+ if (copy_from_compatible(&t_6axis_config, source_6axis_config,
+ sizeof(struct atomisp_dvs_6axis_config),
+ from_user)) {
+ dev_err(asd->isp->dev, "copy morph table failed!");
+ return -EFAULT;
+ }
+
+ if (old_6axis_config &&
+ (old_6axis_config->width_y != t_6axis_config.width_y ||
+ old_6axis_config->height_y != t_6axis_config.height_y ||
+ old_6axis_config->width_uv != t_6axis_config.width_uv ||
+ old_6axis_config->height_uv != t_6axis_config.height_uv)) {
+ ia_css_dvs2_6axis_config_free(css_param->dvs_6axis);
+ css_param->dvs_6axis = NULL;
+
+ dvs_6axis_config = ia_css_dvs2_6axis_config_allocate(stream);
+ if (!dvs_6axis_config)
+ return -ENOMEM;
+ } else if (!dvs_6axis_config) {
+ dvs_6axis_config = ia_css_dvs2_6axis_config_allocate(stream);
+ if (!dvs_6axis_config)
+ return -ENOMEM;
+ }
+
+ dvs_6axis_config->exp_id = t_6axis_config.exp_id;
+
+ if (copy_from_compatible(dvs_6axis_config->xcoords_y,
+ t_6axis_config.xcoords_y,
+ t_6axis_config.width_y *
+ t_6axis_config.height_y *
+ sizeof(*dvs_6axis_config->xcoords_y),
+ from_user))
+ goto error;
+ if (copy_from_compatible(dvs_6axis_config->ycoords_y,
+ t_6axis_config.ycoords_y,
+ t_6axis_config.width_y *
+ t_6axis_config.height_y *
+ sizeof(*dvs_6axis_config->ycoords_y),
+ from_user))
+ goto error;
+ if (copy_from_compatible(dvs_6axis_config->xcoords_uv,
+ t_6axis_config.xcoords_uv,
+ t_6axis_config.width_uv *
+ t_6axis_config.height_uv *
+ sizeof(*dvs_6axis_config->xcoords_uv),
+ from_user))
+ goto error;
+ if (copy_from_compatible(dvs_6axis_config->ycoords_uv,
+ t_6axis_config.ycoords_uv,
+ t_6axis_config.width_uv *
+ t_6axis_config.height_uv *
+ sizeof(*dvs_6axis_config->ycoords_uv),
+ from_user))
+ goto error;
+ } else {
+ if (old_6axis_config &&
+ (old_6axis_config->width_y != source_6axis_config->width_y ||
+ old_6axis_config->height_y != source_6axis_config->height_y ||
+ old_6axis_config->width_uv != source_6axis_config->width_uv ||
+ old_6axis_config->height_uv != source_6axis_config->height_uv)) {
+ ia_css_dvs2_6axis_config_free(css_param->dvs_6axis);
+ css_param->dvs_6axis = NULL;
+
+ dvs_6axis_config = ia_css_dvs2_6axis_config_allocate(stream);
+ if (!dvs_6axis_config) {
+ ret = -ENOMEM;
+ goto error;
+ }
+ } else if (!dvs_6axis_config) {
+ dvs_6axis_config = ia_css_dvs2_6axis_config_allocate(stream);
+ if (!dvs_6axis_config) {
+ ret = -ENOMEM;
+ goto error;
+ }
+ }
+
+ dvs_6axis_config->exp_id = source_6axis_config->exp_id;
+
+ if (copy_from_compatible(dvs_6axis_config->xcoords_y,
+ source_6axis_config->xcoords_y,
+ source_6axis_config->width_y *
+ source_6axis_config->height_y *
+ sizeof(*source_6axis_config->xcoords_y),
+ from_user))
+ goto error;
+ if (copy_from_compatible(dvs_6axis_config->ycoords_y,
+ source_6axis_config->ycoords_y,
+ source_6axis_config->width_y *
+ source_6axis_config->height_y *
+ sizeof(*source_6axis_config->ycoords_y),
+ from_user))
+ goto error;
+ if (copy_from_compatible(dvs_6axis_config->xcoords_uv,
+ source_6axis_config->xcoords_uv,
+ source_6axis_config->width_uv *
+ source_6axis_config->height_uv *
+ sizeof(*source_6axis_config->xcoords_uv),
+ from_user))
+ goto error;
+ if (copy_from_compatible(dvs_6axis_config->ycoords_uv,
+ source_6axis_config->ycoords_uv,
+ source_6axis_config->width_uv *
+ source_6axis_config->height_uv *
+ sizeof(*source_6axis_config->ycoords_uv),
+ from_user))
+ goto error;
+ }
+ css_param->dvs_6axis = dvs_6axis_config;
+ css_param->update_flag.dvs_6axis_config =
+ (struct atomisp_dvs_6axis_config *)dvs_6axis_config;
+ return 0;
+
+error:
+ if (dvs_6axis_config)
+ ia_css_dvs2_6axis_config_free(dvs_6axis_config);
+ return ret;
+}
+
+int atomisp_cp_morph_table(struct atomisp_sub_device *asd,
+ struct atomisp_morph_table *source_morph_table,
+ struct atomisp_css_params *css_param,
+ bool from_user)
+{
+ int ret = -EFAULT;
+ unsigned int i;
+ struct ia_css_morph_table *morph_table;
+ struct ia_css_morph_table *old_morph_table;
+
+ if (!source_morph_table)
+ return 0;
+
+ if (!from_user && css_param->update_flag.morph_table)
+ return 0;
+
+ old_morph_table = css_param->morph_table;
+
+ if (IS_ISP2401) {
+ struct ia_css_morph_table mtbl;
+
+ if (copy_from_compatible(&mtbl, source_morph_table,
+ sizeof(struct atomisp_morph_table),
+ from_user)) {
+ dev_err(asd->isp->dev, "copy morph table failed!");
+ return -EFAULT;
+ }
+
+ morph_table = atomisp_css_morph_table_allocate(
+ mtbl.width,
+ mtbl.height);
+ if (!morph_table)
+ return -ENOMEM;
+
+ for (i = 0; i < IA_CSS_MORPH_TABLE_NUM_PLANES; i++) {
+ if (copy_from_compatible(morph_table->coordinates_x[i],
+ (__force void *)source_morph_table->coordinates_x[i],
+ mtbl.height * mtbl.width *
+ sizeof(*morph_table->coordinates_x[i]),
+ from_user))
+ goto error;
+
+ if (copy_from_compatible(morph_table->coordinates_y[i],
+ (__force void *)source_morph_table->coordinates_y[i],
+ mtbl.height * mtbl.width *
+ sizeof(*morph_table->coordinates_y[i]),
+ from_user))
+ goto error;
+ }
+ } else {
+ morph_table = atomisp_css_morph_table_allocate(
+ source_morph_table->width,
+ source_morph_table->height);
+ if (!morph_table) {
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ for (i = 0; i < IA_CSS_MORPH_TABLE_NUM_PLANES; i++) {
+ if (copy_from_compatible(morph_table->coordinates_x[i],
+ (__force void *)source_morph_table->coordinates_x[i],
+ source_morph_table->height * source_morph_table->width *
+ sizeof(*source_morph_table->coordinates_x[i]),
+ from_user))
+ goto error;
+
+ if (copy_from_compatible(morph_table->coordinates_y[i],
+ (__force void *)source_morph_table->coordinates_y[i],
+ source_morph_table->height * source_morph_table->width *
+ sizeof(*source_morph_table->coordinates_y[i]),
+ from_user))
+ goto error;
+ }
+ }
+
+ css_param->morph_table = morph_table;
+ if (old_morph_table)
+ atomisp_css_morph_table_free(old_morph_table);
+ css_param->update_flag.morph_table =
+ (struct atomisp_morph_table *)morph_table;
+ return 0;
+
+error:
+ if (morph_table)
+ atomisp_css_morph_table_free(morph_table);
+ return ret;
+}
+
+int atomisp_makeup_css_parameters(struct atomisp_sub_device *asd,
+ struct atomisp_parameters *arg,
+ struct atomisp_css_params *css_param)
+{
+ int ret;
+
+ ret = atomisp_cp_general_isp_parameters(asd, arg, css_param, false);
+ if (ret)
+ return ret;
+ ret = atomisp_cp_lsc_table(asd, arg->shading_table, css_param, false);
+ if (ret)
+ return ret;
+ ret = atomisp_cp_morph_table(asd, arg->morph_table, css_param, false);
+ if (ret)
+ return ret;
+ ret = atomisp_css_cp_dvs2_coefs(asd,
+ (struct ia_css_dvs2_coefficients *)arg->dvs2_coefs,
+ css_param, false);
+ if (ret)
+ return ret;
+ ret = atomisp_cp_dvs_6axis_config(asd, arg->dvs_6axis_config,
+ css_param, false);
+ return ret;
+}
+
+void atomisp_free_css_parameters(struct atomisp_css_params *css_param)
+{
+ if (css_param->dvs_6axis) {
+ ia_css_dvs2_6axis_config_free(css_param->dvs_6axis);
+ css_param->dvs_6axis = NULL;
+ }
+ if (css_param->dvs2_coeff) {
+ ia_css_dvs2_coefficients_free(css_param->dvs2_coeff);
+ css_param->dvs2_coeff = NULL;
+ }
+ if (css_param->shading_table) {
+ ia_css_shading_table_free(css_param->shading_table);
+ css_param->shading_table = NULL;
+ }
+ if (css_param->morph_table) {
+ ia_css_morph_table_free(css_param->morph_table);
+ css_param->morph_table = NULL;
+ }
+}
+
+static void atomisp_move_frame_to_activeq(struct ia_css_frame *frame,
+ struct atomisp_css_params_with_list *param)
+{
+ struct atomisp_video_pipe *pipe = vb_to_pipe(&frame->vb.vb2_buf);
+ unsigned long irqflags;
+
+ pipe->frame_params[frame->vb.vb2_buf.index] = param;
+ spin_lock_irqsave(&pipe->irq_lock, irqflags);
+ list_move_tail(&frame->queue, &pipe->activeq);
+ spin_unlock_irqrestore(&pipe->irq_lock, irqflags);
+}
+
+/*
+ * Check parameter queue list and buffer queue list to find out if matched items
+ * and then set parameter to CSS and enqueue buffer to CSS.
+ * Of course, if the buffer in buffer waiting list is not bound to a per-frame
+ * parameter, it will be enqueued into CSS as long as the per-frame setting
+ * buffers before it get enqueued.
+ */
+void atomisp_handle_parameter_and_buffer(struct atomisp_video_pipe *pipe)
+{
+ struct atomisp_sub_device *asd = pipe->asd;
+ struct ia_css_frame *frame = NULL, *frame_tmp;
+ struct atomisp_css_params_with_list *param = NULL, *param_tmp;
+ bool need_to_enqueue_buffer = false;
+ int i;
+
+ lockdep_assert_held(&asd->isp->mutex);
+
+ /*
+ * CSS/FW requires set parameter and enqueue buffer happen after ISP
+ * is streamon.
+ */
+ if (!asd->streaming)
+ return;
+
+ if (list_empty(&pipe->per_frame_params) ||
+ list_empty(&pipe->buffers_waiting_for_param))
+ return;
+
+ list_for_each_entry_safe(frame, frame_tmp,
+ &pipe->buffers_waiting_for_param, queue) {
+ i = frame->vb.vb2_buf.index;
+ if (pipe->frame_request_config_id[i]) {
+ list_for_each_entry_safe(param, param_tmp,
+ &pipe->per_frame_params, list) {
+ if (pipe->frame_request_config_id[i] != param->params.isp_config_id)
+ continue;
+
+ list_del(&param->list);
+
+ /*
+ * clear the request config id as the buffer
+ * will be handled and enqueued into CSS soon
+ */
+ pipe->frame_request_config_id[i] = 0;
+ atomisp_move_frame_to_activeq(frame, param);
+ need_to_enqueue_buffer = true;
+ break;
+ }
+
+ /* If this is the end, stop further loop */
+ if (list_entry_is_head(param, &pipe->per_frame_params, list))
+ break;
+ } else {
+ atomisp_move_frame_to_activeq(frame, NULL);
+ need_to_enqueue_buffer = true;
+ }
+ }
+
+ if (!need_to_enqueue_buffer)
+ return;
+
+ atomisp_qbuffers_to_css(asd);
+}
+
+/*
+ * Function to configure ISP parameters
+ */
+int atomisp_set_parameters(struct video_device *vdev,
+ struct atomisp_parameters *arg)
+{
+ struct atomisp_video_pipe *pipe = atomisp_to_video_pipe(vdev);
+ struct atomisp_sub_device *asd = pipe->asd;
+ struct atomisp_css_params_with_list *param = NULL;
+ struct atomisp_css_params *css_param = &asd->params.css_param;
+ int ret;
+
+ lockdep_assert_held(&asd->isp->mutex);
+
+ if (!asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream) {
+ dev_err(asd->isp->dev, "%s: internal error!\n", __func__);
+ return -EINVAL;
+ }
+
+ dev_dbg(asd->isp->dev, "set parameter(per_frame_setting %d) isp_config_id %d of %s\n",
+ arg->per_frame_setting, arg->isp_config_id, vdev->name);
+
+ if (arg->per_frame_setting) {
+ /*
+ * Per-frame setting enabled, we allocate a new parameter
+ * buffer to cache the parameters and only when frame buffers
+ * are ready, the parameters will be set to CSS.
+ * per-frame setting only works for the main output frame.
+ */
+ param = kvzalloc(sizeof(*param), GFP_KERNEL);
+ if (!param) {
+ dev_err(asd->isp->dev, "%s: failed to alloc params buffer\n",
+ __func__);
+ return -ENOMEM;
+ }
+ css_param = &param->params;
+ }
+
+ ret = atomisp_cp_general_isp_parameters(asd, arg, css_param, true);
+ if (ret)
+ goto apply_parameter_failed;
+
+ ret = atomisp_cp_lsc_table(asd, arg->shading_table, css_param, true);
+ if (ret)
+ goto apply_parameter_failed;
+
+ ret = atomisp_cp_morph_table(asd, arg->morph_table, css_param, true);
+ if (ret)
+ goto apply_parameter_failed;
+
+ ret = atomisp_css_cp_dvs2_coefs(asd,
+ (struct ia_css_dvs2_coefficients *)arg->dvs2_coefs,
+ css_param, true);
+ if (ret)
+ goto apply_parameter_failed;
+
+ ret = atomisp_cp_dvs_6axis_config(asd, arg->dvs_6axis_config,
+ css_param, true);
+ if (ret)
+ goto apply_parameter_failed;
+
+ if (!arg->per_frame_setting) {
+ /* indicate to CSS that we have parameters to be updated */
+ asd->params.css_update_params_needed = true;
+ } else {
+ list_add_tail(&param->list, &pipe->per_frame_params);
+ atomisp_handle_parameter_and_buffer(pipe);
+ }
+
+ return 0;
+
+apply_parameter_failed:
+ if (css_param)
+ atomisp_free_css_parameters(css_param);
+ kvfree(param);
+
+ return ret;
+}
+
+/*
+ * Function to set/get isp parameters to isp
+ */
+int atomisp_param(struct atomisp_sub_device *asd, int flag,
+ struct atomisp_parm *config)
+{
+ struct ia_css_pipe_config *vp_cfg =
+ &asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].
+ pipe_configs[IA_CSS_PIPE_ID_VIDEO];
+
+ /* Read parameter for 3A binary info */
+ if (flag == 0) {
+ struct ia_css_dvs_grid_info *dvs_grid_info =
+ atomisp_css_get_dvs_grid_info(
+ &asd->params.curr_grid_info);
+
+ atomisp_curr_user_grid_info(asd, &config->info);
+
+ /* We always return the resolution and stride even if there is
+ * no valid metadata. This allows the caller to get the
+ * information needed to allocate user-space buffers. */
+ config->metadata_config.metadata_height = asd->
+ stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream_info.
+ metadata_info.resolution.height;
+ config->metadata_config.metadata_stride = asd->
+ stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream_info.
+ metadata_info.stride;
+
+ /* update dvs grid info */
+ if (dvs_grid_info)
+ memcpy(&config->dvs_grid,
+ dvs_grid_info,
+ sizeof(struct ia_css_dvs_grid_info));
+
+ if (asd->run_mode->val != ATOMISP_RUN_MODE_VIDEO) {
+ config->dvs_envelop.width = 0;
+ config->dvs_envelop.height = 0;
+ return 0;
+ }
+
+ /* update dvs envelop info */
+ config->dvs_envelop.width = vp_cfg->dvs_envelope.width;
+ config->dvs_envelop.height = vp_cfg->dvs_envelope.height;
+ return 0;
+ }
+
+ memcpy(&asd->params.css_param.wb_config, &config->wb_config,
+ sizeof(struct ia_css_wb_config));
+ memcpy(&asd->params.css_param.ob_config, &config->ob_config,
+ sizeof(struct ia_css_ob_config));
+ memcpy(&asd->params.css_param.dp_config, &config->dp_config,
+ sizeof(struct ia_css_dp_config));
+ memcpy(&asd->params.css_param.de_config, &config->de_config,
+ sizeof(struct ia_css_de_config));
+ memcpy(&asd->params.css_param.dz_config, &config->dz_config,
+ sizeof(struct ia_css_dz_config));
+ memcpy(&asd->params.css_param.ce_config, &config->ce_config,
+ sizeof(struct ia_css_ce_config));
+ memcpy(&asd->params.css_param.nr_config, &config->nr_config,
+ sizeof(struct ia_css_nr_config));
+ memcpy(&asd->params.css_param.ee_config, &config->ee_config,
+ sizeof(struct ia_css_ee_config));
+ memcpy(&asd->params.css_param.tnr_config, &config->tnr_config,
+ sizeof(struct ia_css_tnr_config));
+
+ if (asd->params.color_effect == V4L2_COLORFX_NEGATIVE) {
+ asd->params.css_param.cc_config.matrix[3] = -config->cc_config.matrix[3];
+ asd->params.css_param.cc_config.matrix[4] = -config->cc_config.matrix[4];
+ asd->params.css_param.cc_config.matrix[5] = -config->cc_config.matrix[5];
+ asd->params.css_param.cc_config.matrix[6] = -config->cc_config.matrix[6];
+ asd->params.css_param.cc_config.matrix[7] = -config->cc_config.matrix[7];
+ asd->params.css_param.cc_config.matrix[8] = -config->cc_config.matrix[8];
+ }
+
+ if (asd->params.color_effect != V4L2_COLORFX_SEPIA &&
+ asd->params.color_effect != V4L2_COLORFX_BW) {
+ memcpy(&asd->params.css_param.cc_config, &config->cc_config,
+ sizeof(struct ia_css_cc_config));
+ asd->params.config.cc_config = &asd->params.css_param.cc_config;
+ }
+
+ asd->params.config.wb_config = &asd->params.css_param.wb_config;
+ asd->params.config.ob_config = &asd->params.css_param.ob_config;
+ asd->params.config.de_config = &asd->params.css_param.de_config;
+ asd->params.config.dz_config = &asd->params.css_param.dz_config;
+ asd->params.config.ce_config = &asd->params.css_param.ce_config;
+ asd->params.config.dp_config = &asd->params.css_param.dp_config;
+ asd->params.config.nr_config = &asd->params.css_param.nr_config;
+ asd->params.config.ee_config = &asd->params.css_param.ee_config;
+ asd->params.config.tnr_config = &asd->params.css_param.tnr_config;
+ asd->params.css_update_params_needed = true;
+
+ return 0;
+}
+
+/*
+ * Function to configure color effect of the image
+ */
+int atomisp_color_effect(struct atomisp_sub_device *asd, int flag,
+ __s32 *effect)
+{
+ struct ia_css_cc_config *cc_config = NULL;
+ struct ia_css_macc_table *macc_table = NULL;
+ struct ia_css_ctc_table *ctc_table = NULL;
+ int ret = 0;
+ struct v4l2_control control;
+ struct atomisp_device *isp = asd->isp;
+
+ if (flag == 0) {
+ *effect = asd->params.color_effect;
+ return 0;
+ }
+
+ control.id = V4L2_CID_COLORFX;
+ control.value = *effect;
+ ret =
+ v4l2_s_ctrl(NULL, isp->inputs[asd->input_curr].sensor->ctrl_handler,
+ &control);
+ /*
+ * if set color effect to sensor successfully, return
+ * 0 directly.
+ */
+ if (!ret) {
+ asd->params.color_effect = (u32)*effect;
+ return 0;
+ }
+
+ if (*effect == asd->params.color_effect)
+ return 0;
+
+ /*
+ * isp_subdev->params.macc_en should be set to false.
+ */
+ asd->params.macc_en = false;
+
+ switch (*effect) {
+ case V4L2_COLORFX_NONE:
+ macc_table = &asd->params.css_param.macc_table;
+ asd->params.macc_en = true;
+ break;
+ case V4L2_COLORFX_SEPIA:
+ cc_config = &sepia_cc_config;
+ break;
+ case V4L2_COLORFX_NEGATIVE:
+ cc_config = &nega_cc_config;
+ break;
+ case V4L2_COLORFX_BW:
+ cc_config = &mono_cc_config;
+ break;
+ case V4L2_COLORFX_SKY_BLUE:
+ macc_table = &blue_macc_table;
+ asd->params.macc_en = true;
+ break;
+ case V4L2_COLORFX_GRASS_GREEN:
+ macc_table = &green_macc_table;
+ asd->params.macc_en = true;
+ break;
+ case V4L2_COLORFX_SKIN_WHITEN_LOW:
+ macc_table = &skin_low_macc_table;
+ asd->params.macc_en = true;
+ break;
+ case V4L2_COLORFX_SKIN_WHITEN:
+ macc_table = &skin_medium_macc_table;
+ asd->params.macc_en = true;
+ break;
+ case V4L2_COLORFX_SKIN_WHITEN_HIGH:
+ macc_table = &skin_high_macc_table;
+ asd->params.macc_en = true;
+ break;
+ case V4L2_COLORFX_VIVID:
+ ctc_table = &vivid_ctc_table;
+ break;
+ default:
+ return -EINVAL;
+ }
+ atomisp_update_capture_mode(asd);
+
+ if (cc_config)
+ asd->params.config.cc_config = cc_config;
+ if (macc_table)
+ asd->params.config.macc_table = macc_table;
+ if (ctc_table)
+ atomisp_css_set_ctc_table(asd, ctc_table);
+ asd->params.color_effect = (u32)*effect;
+ asd->params.css_update_params_needed = true;
+ return 0;
+}
+
+/*
+ * Function to configure bad pixel correction
+ */
+int atomisp_bad_pixel(struct atomisp_sub_device *asd, int flag,
+ __s32 *value)
+{
+ if (flag == 0) {
+ *value = asd->params.bad_pixel_en;
+ return 0;
+ }
+ asd->params.bad_pixel_en = !!*value;
+
+ return 0;
+}
+
+/*
+ * Function to configure bad pixel correction params
+ */
+int atomisp_bad_pixel_param(struct atomisp_sub_device *asd, int flag,
+ struct atomisp_dp_config *config)
+{
+ if (flag == 0) {
+ /* Get bad pixel from current setup */
+ if (atomisp_css_get_dp_config(asd, config))
+ return -EINVAL;
+ } else {
+ /* Set bad pixel to isp parameters */
+ memcpy(&asd->params.css_param.dp_config, config,
+ sizeof(asd->params.css_param.dp_config));
+ asd->params.config.dp_config = &asd->params.css_param.dp_config;
+ asd->params.css_update_params_needed = true;
+ }
+
+ return 0;
+}
+
+/*
+ * Function to enable/disable video image stablization
+ */
+int atomisp_video_stable(struct atomisp_sub_device *asd, int flag,
+ __s32 *value)
+{
+ if (flag == 0)
+ *value = asd->params.video_dis_en;
+ else
+ asd->params.video_dis_en = !!*value;
+
+ return 0;
+}
+
+/*
+ * Function to configure fixed pattern noise
+ */
+int atomisp_fixed_pattern(struct atomisp_sub_device *asd, int flag,
+ __s32 *value)
+{
+ if (flag == 0) {
+ *value = asd->params.fpn_en;
+ return 0;
+ }
+
+ if (*value == 0) {
+ asd->params.fpn_en = false;
+ return 0;
+ }
+
+ /* Add function to get black from sensor with shutter off */
+ return 0;
+}
+
+static unsigned int
+atomisp_bytesperline_to_padded_width(unsigned int bytesperline,
+ enum ia_css_frame_format format)
+{
+ switch (format) {
+ case IA_CSS_FRAME_FORMAT_UYVY:
+ case IA_CSS_FRAME_FORMAT_YUYV:
+ case IA_CSS_FRAME_FORMAT_RAW:
+ case IA_CSS_FRAME_FORMAT_RGB565:
+ return bytesperline / 2;
+ case IA_CSS_FRAME_FORMAT_RGBA888:
+ return bytesperline / 4;
+ /* The following cases could be removed, but we leave them
+ in to document the formats that are included. */
+ case IA_CSS_FRAME_FORMAT_NV11:
+ case IA_CSS_FRAME_FORMAT_NV12:
+ case IA_CSS_FRAME_FORMAT_NV16:
+ case IA_CSS_FRAME_FORMAT_NV21:
+ case IA_CSS_FRAME_FORMAT_NV61:
+ case IA_CSS_FRAME_FORMAT_YV12:
+ case IA_CSS_FRAME_FORMAT_YV16:
+ case IA_CSS_FRAME_FORMAT_YUV420:
+ case IA_CSS_FRAME_FORMAT_YUV420_16:
+ case IA_CSS_FRAME_FORMAT_YUV422:
+ case IA_CSS_FRAME_FORMAT_YUV422_16:
+ case IA_CSS_FRAME_FORMAT_YUV444:
+ case IA_CSS_FRAME_FORMAT_YUV_LINE:
+ case IA_CSS_FRAME_FORMAT_PLANAR_RGB888:
+ case IA_CSS_FRAME_FORMAT_QPLANE6:
+ case IA_CSS_FRAME_FORMAT_BINARY_8:
+ default:
+ return bytesperline;
+ }
+}
+
+static int
+atomisp_v4l2_framebuffer_to_css_frame(const struct v4l2_framebuffer *arg,
+ struct ia_css_frame **result)
+{
+ struct ia_css_frame *res = NULL;
+ unsigned int padded_width;
+ enum ia_css_frame_format sh_format;
+ char *tmp_buf = NULL;
+ int ret = 0;
+
+ sh_format = v4l2_fmt_to_sh_fmt(arg->fmt.pixelformat);
+ padded_width = atomisp_bytesperline_to_padded_width(
+ arg->fmt.bytesperline, sh_format);
+
+ /* Note: the padded width on an ia_css_frame is in elements, not in
+ bytes. The RAW frame we use here should always be a 16bit RAW
+ frame. This is why we bytesperline/2 is equal to the padded with */
+ if (ia_css_frame_allocate(&res, arg->fmt.width, arg->fmt.height,
+ sh_format, padded_width, 0)) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ tmp_buf = vmalloc(arg->fmt.sizeimage);
+ if (!tmp_buf) {
+ ret = -ENOMEM;
+ goto err;
+ }
+ if (copy_from_user(tmp_buf, (void __user __force *)arg->base,
+ arg->fmt.sizeimage)) {
+ ret = -EFAULT;
+ goto err;
+ }
+
+ if (hmm_store(res->data, tmp_buf, arg->fmt.sizeimage)) {
+ ret = -EINVAL;
+ goto err;
+ }
+
+err:
+ if (ret && res)
+ ia_css_frame_free(res);
+ vfree(tmp_buf);
+ if (ret == 0)
+ *result = res;
+ return ret;
+}
+
+/*
+ * Function to configure fixed pattern noise table
+ */
+int atomisp_fixed_pattern_table(struct atomisp_sub_device *asd,
+ struct v4l2_framebuffer *arg)
+{
+ struct ia_css_frame *raw_black_frame = NULL;
+ int ret;
+
+ if (!arg)
+ return -EINVAL;
+
+ ret = atomisp_v4l2_framebuffer_to_css_frame(arg, &raw_black_frame);
+ if (ret)
+ return ret;
+
+ if (sh_css_set_black_frame(asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream,
+ raw_black_frame) != 0)
+ return -ENOMEM;
+
+ ia_css_frame_free(raw_black_frame);
+ return ret;
+}
+
+/*
+ * Function to configure false color correction
+ */
+int atomisp_false_color(struct atomisp_sub_device *asd, int flag,
+ __s32 *value)
+{
+ /* Get nr config from current setup */
+ if (flag == 0) {
+ *value = asd->params.false_color;
+ return 0;
+ }
+
+ /* Set nr config to isp parameters */
+ if (*value) {
+ asd->params.config.de_config = NULL;
+ } else {
+ asd->params.css_param.de_config.pixelnoise = 0;
+ asd->params.config.de_config = &asd->params.css_param.de_config;
+ }
+ asd->params.css_update_params_needed = true;
+ asd->params.false_color = *value;
+ return 0;
+}
+
+/*
+ * Function to configure bad pixel correction params
+ */
+int atomisp_false_color_param(struct atomisp_sub_device *asd, int flag,
+ struct atomisp_de_config *config)
+{
+ if (flag == 0) {
+ /* Get false color from current setup */
+ if (atomisp_css_get_de_config(asd, config))
+ return -EINVAL;
+ } else {
+ /* Set false color to isp parameters */
+ memcpy(&asd->params.css_param.de_config, config,
+ sizeof(asd->params.css_param.de_config));
+ asd->params.config.de_config = &asd->params.css_param.de_config;
+ asd->params.css_update_params_needed = true;
+ }
+
+ return 0;
+}
+
+/*
+ * Function to configure white balance params
+ */
+int atomisp_white_balance_param(struct atomisp_sub_device *asd, int flag,
+ struct atomisp_wb_config *config)
+{
+ if (flag == 0) {
+ /* Get white balance from current setup */
+ if (atomisp_css_get_wb_config(asd, config))
+ return -EINVAL;
+ } else {
+ /* Set white balance to isp parameters */
+ memcpy(&asd->params.css_param.wb_config, config,
+ sizeof(asd->params.css_param.wb_config));
+ asd->params.config.wb_config = &asd->params.css_param.wb_config;
+ asd->params.css_update_params_needed = true;
+ }
+
+ return 0;
+}
+
+int atomisp_3a_config_param(struct atomisp_sub_device *asd, int flag,
+ struct atomisp_3a_config *config)
+{
+ struct atomisp_device *isp = asd->isp;
+
+ dev_dbg(isp->dev, ">%s %d\n", __func__, flag);
+
+ if (flag == 0) {
+ /* Get white balance from current setup */
+ if (atomisp_css_get_3a_config(asd, config))
+ return -EINVAL;
+ } else {
+ /* Set white balance to isp parameters */
+ memcpy(&asd->params.css_param.s3a_config, config,
+ sizeof(asd->params.css_param.s3a_config));
+ asd->params.config.s3a_config = &asd->params.css_param.s3a_config;
+ asd->params.css_update_params_needed = true;
+ }
+
+ dev_dbg(isp->dev, "<%s %d\n", __func__, flag);
+ return 0;
+}
+
+/*
+ * Function to setup digital zoom
+ */
+int atomisp_digital_zoom(struct atomisp_sub_device *asd, int flag,
+ __s32 *value)
+{
+ u32 zoom;
+ struct atomisp_device *isp = asd->isp;
+
+ unsigned int max_zoom = MRFLD_MAX_ZOOM_FACTOR;
+
+ if (flag == 0) {
+ atomisp_css_get_zoom_factor(asd, &zoom);
+ *value = max_zoom - zoom;
+ } else {
+ if (*value < 0)
+ return -EINVAL;
+
+ zoom = max_zoom - min_t(u32, max_zoom - 1, *value);
+ atomisp_css_set_zoom_factor(asd, zoom);
+
+ dev_dbg(isp->dev, "%s, zoom: %d\n", __func__, zoom);
+ asd->params.css_update_params_needed = true;
+ }
+
+ return 0;
+}
+
+static void __atomisp_update_stream_env(struct atomisp_sub_device *asd,
+ u16 stream_index, struct atomisp_input_stream_info *stream_info)
+{
+ int i;
+
+ /* assign virtual channel id return from sensor driver query */
+ asd->stream_env[stream_index].ch_id = stream_info->ch_id;
+ asd->stream_env[stream_index].isys_configs = stream_info->isys_configs;
+ for (i = 0; i < stream_info->isys_configs; i++) {
+ asd->stream_env[stream_index].isys_info[i].input_format =
+ stream_info->isys_info[i].input_format;
+ asd->stream_env[stream_index].isys_info[i].width =
+ stream_info->isys_info[i].width;
+ asd->stream_env[stream_index].isys_info[i].height =
+ stream_info->isys_info[i].height;
+ }
+}
+
+static void __atomisp_init_stream_info(u16 stream_index,
+ struct atomisp_input_stream_info *stream_info)
+{
+ int i;
+
+ stream_info->enable = 1;
+ stream_info->stream = stream_index;
+ stream_info->ch_id = 0;
+ stream_info->isys_configs = 0;
+ for (i = 0; i < MAX_STREAMS_PER_CHANNEL; i++) {
+ stream_info->isys_info[i].input_format = 0;
+ stream_info->isys_info[i].width = 0;
+ stream_info->isys_info[i].height = 0;
+ }
+}
+
+static void atomisp_fill_pix_format(struct v4l2_pix_format *f,
+ u32 width, u32 height,
+ const struct atomisp_format_bridge *br_fmt)
+{
+ u32 bytes;
+
+ f->width = width;
+ f->height = height;
+ f->pixelformat = br_fmt->pixelformat;
+
+ /* Adding padding to width for bytesperline calculation */
+ width = ia_css_frame_pad_width(width, br_fmt->sh_fmt);
+ bytes = BITS_TO_BYTES(br_fmt->depth * width);
+
+ if (br_fmt->planar)
+ f->bytesperline = width;
+ else
+ f->bytesperline = bytes;
+
+ f->sizeimage = PAGE_ALIGN(height * bytes);
+
+ if (f->field == V4L2_FIELD_ANY)
+ f->field = V4L2_FIELD_NONE;
+
+ /*
+ * FIXME: do we need to set this up differently, depending on the
+ * sensor or the pipeline?
+ */
+ f->colorspace = V4L2_COLORSPACE_REC709;
+ f->ycbcr_enc = V4L2_YCBCR_ENC_709;
+ f->xfer_func = V4L2_XFER_FUNC_709;
+}
+
+/* Get sensor padding values for the non padded width x height resolution */
+void atomisp_get_padding(struct atomisp_device *isp, u32 width, u32 height,
+ u32 *padding_w, u32 *padding_h)
+{
+ struct atomisp_input_subdev *input = &isp->inputs[isp->asd.input_curr];
+ struct v4l2_rect native_rect = input->native_rect;
+ const struct atomisp_in_fmt_conv *fc = NULL;
+ u32 min_pad_w = ISP2400_MIN_PAD_W;
+ u32 min_pad_h = ISP2400_MIN_PAD_H;
+ struct v4l2_mbus_framefmt *sink;
+
+ if (!input->crop_support) {
+ *padding_w = pad_w;
+ *padding_h = pad_h;
+ return;
+ }
+
+ width = min(width, input->active_rect.width);
+ height = min(height, input->active_rect.height);
+
+ if (input->binning_support && width <= (input->active_rect.width / 2) &&
+ height <= (input->active_rect.height / 2)) {
+ native_rect.width /= 2;
+ native_rect.height /= 2;
+ }
+
+ *padding_w = min_t(u32, (native_rect.width - width) & ~1, pad_w);
+ *padding_h = min_t(u32, (native_rect.height - height) & ~1, pad_h);
+
+ /* The below minimum padding requirements are for BYT / ISP2400 only */
+ if (IS_ISP2401)
+ return;
+
+ sink = atomisp_subdev_get_ffmt(&isp->asd.subdev, NULL, V4L2_SUBDEV_FORMAT_ACTIVE,
+ ATOMISP_SUBDEV_PAD_SINK);
+ if (sink)
+ fc = atomisp_find_in_fmt_conv(sink->code);
+ if (!fc) {
+ dev_warn(isp->dev, "%s: Could not get sensor format\n", __func__);
+ goto apply_min_padding;
+ }
+
+ /*
+ * The ISP only supports GRBG for other bayer-orders additional padding
+ * is used so that the raw sensor data can be cropped to fix the order.
+ */
+ if (fc->bayer_order == IA_CSS_BAYER_ORDER_RGGB ||
+ fc->bayer_order == IA_CSS_BAYER_ORDER_GBRG)
+ min_pad_w += 2;
+
+ if (fc->bayer_order == IA_CSS_BAYER_ORDER_BGGR ||
+ fc->bayer_order == IA_CSS_BAYER_ORDER_GBRG)
+ min_pad_h += 2;
+
+apply_min_padding:
+ *padding_w = max_t(u32, *padding_w, min_pad_w);
+ *padding_h = max_t(u32, *padding_h, min_pad_h);
+}
+
+int atomisp_s_sensor_power(struct atomisp_device *isp, unsigned int input, bool on)
+{
+ int ret;
+
+ if (isp->inputs[input].sensor_on == on)
+ return 0;
+
+ ret = v4l2_subdev_call(isp->inputs[input].sensor, core, s_power, on);
+ if (ret && ret != -ENOIOCTLCMD) {
+ dev_err(isp->dev, "Error setting sensor power %d: %d\n", on, ret);
+ return ret;
+ }
+
+ isp->inputs[input].sensor_on = on;
+ return 0;
+}
+
+int atomisp_select_input(struct atomisp_device *isp, unsigned int input)
+{
+ unsigned int input_orig = isp->asd.input_curr;
+ int ret;
+
+ /* Power on new sensor */
+ ret = atomisp_s_sensor_power(isp, input, 1);
+ if (ret)
+ return ret;
+
+ isp->asd.input_curr = input;
+
+ /* Power off previous sensor */
+ if (input != input_orig)
+ atomisp_s_sensor_power(isp, input_orig, 0);
+
+ atomisp_setup_input_links(isp);
+ return 0;
+}
+
+/*
+ * Ensure the CSI-receiver -> ISP link for input_curr is marked as enabled and
+ * the other CSI-receiver -> ISP links are disabled.
+ */
+void atomisp_setup_input_links(struct atomisp_device *isp)
+{
+ struct media_link *link;
+
+ lockdep_assert_held(&isp->media_dev.graph_mutex);
+
+ for (int i = 0; i < ATOMISP_CAMERA_NR_PORTS; i++) {
+ link = media_entity_find_link(
+ &isp->csi2_port[i].subdev.entity.pads[CSI2_PAD_SOURCE],
+ &isp->asd.subdev.entity.pads[ATOMISP_SUBDEV_PAD_SINK]);
+ if (!link) {
+ dev_err(isp->dev, "Error cannot find CSI2-port[%d] -> ISP link\n", i);
+ continue; /* Should never happen */
+ }
+
+ /*
+ * Modify the flags directly, calling media_entity_setup_link()
+ * will end up calling atomisp_link_setup() which calls this
+ * function again leading to endless recursion.
+ */
+ if (isp->sensor_subdevs[i] == isp->inputs[isp->asd.input_curr].csi_remote_source)
+ link->flags |= MEDIA_LNK_FL_ENABLED;
+ else
+ link->flags &= ~MEDIA_LNK_FL_ENABLED;
+
+ link->reverse->flags = link->flags;
+ }
+}
+
+static int atomisp_set_sensor_crop_and_fmt(struct atomisp_device *isp,
+ struct v4l2_mbus_framefmt *ffmt,
+ int which)
+{
+ struct atomisp_input_subdev *input = &isp->inputs[isp->asd.input_curr];
+ struct v4l2_subdev_selection sel = {
+ .which = which,
+ .target = V4L2_SEL_TGT_CROP,
+ .r.width = ffmt->width,
+ .r.height = ffmt->height,
+ };
+ struct v4l2_subdev_format format = {
+ .which = which,
+ .format = *ffmt,
+ };
+ struct v4l2_subdev_state *sd_state;
+ int ret = 0;
+
+ if (!input->sensor)
+ return -EINVAL;
+
+ /*
+ * Some old sensor drivers already write the registers on set_fmt
+ * instead of on stream on, power on the sensor now (on newer
+ * sensor drivers the s_power op is a no-op).
+ */
+ if (which == V4L2_SUBDEV_FORMAT_ACTIVE) {
+ ret = atomisp_s_sensor_power(isp, isp->asd.input_curr, 1);
+ if (ret)
+ return ret;
+ }
+
+ sd_state = (which == V4L2_SUBDEV_FORMAT_TRY) ? input->try_sd_state :
+ input->sensor->active_state;
+ if (sd_state)
+ v4l2_subdev_lock_state(sd_state);
+
+ if (!input->crop_support)
+ goto set_fmt;
+
+ /* Cropping is done before binning, when binning double the crop rect */
+ if (input->binning_support && sel.r.width <= (input->native_rect.width / 2) &&
+ sel.r.height <= (input->native_rect.height / 2)) {
+ sel.r.width *= 2;
+ sel.r.height *= 2;
+ }
+
+ /* Clamp to avoid top/left calculations overflowing */
+ sel.r.width = min(sel.r.width, input->native_rect.width);
+ sel.r.height = min(sel.r.height, input->native_rect.height);
+
+ sel.r.left = ((input->native_rect.width - sel.r.width) / 2) & ~1;
+ sel.r.top = ((input->native_rect.height - sel.r.height) / 2) & ~1;
+
+ ret = v4l2_subdev_call(input->sensor, pad, set_selection, sd_state, &sel);
+ if (ret)
+ dev_err(isp->dev, "Error setting crop to (%d,%d)/%ux%u: %d\n",
+ sel.r.left, sel.r.top, sel.r.width, sel.r.height, ret);
+
+set_fmt:
+ if (ret == 0) {
+ ret = v4l2_subdev_call(input->sensor, pad, set_fmt, sd_state, &format);
+ dev_dbg(isp->dev, "Set sensor format ret: %d size %dx%d\n",
+ ret, format.format.width, format.format.height);
+ }
+
+ if (sd_state)
+ v4l2_subdev_unlock_state(sd_state);
+
+ /* Propagate new fmt to sensor ISP */
+ if (ret == 0 && which == V4L2_SUBDEV_FORMAT_ACTIVE && input->sensor_isp) {
+ sd_state = v4l2_subdev_lock_and_get_active_state(input->sensor_isp);
+
+ format.pad = SENSOR_ISP_PAD_SINK;
+ ret = v4l2_subdev_call(input->sensor_isp, pad, set_fmt, sd_state, &format);
+ dev_dbg(isp->dev, "Set sensor ISP sink format ret: %d size %dx%d\n",
+ ret, format.format.width, format.format.height);
+
+ if (ret == 0) {
+ format.pad = SENSOR_ISP_PAD_SOURCE;
+ ret = v4l2_subdev_call(input->sensor_isp, pad, set_fmt, sd_state, &format);
+ dev_dbg(isp->dev, "Set sensor ISP source format ret: %d size %dx%d\n",
+ ret, format.format.width, format.format.height);
+ }
+
+ if (sd_state)
+ v4l2_subdev_unlock_state(sd_state);
+ }
+
+ /* Propagate new fmt to CSI port */
+ if (ret == 0 && which == V4L2_SUBDEV_FORMAT_ACTIVE) {
+ format.pad = CSI2_PAD_SINK;
+ ret = v4l2_subdev_call(input->csi_port, pad, set_fmt, NULL, &format);
+ if (ret)
+ return ret;
+ }
+
+ *ffmt = format.format;
+ return ret;
+}
+
+/* This function looks up the closest available resolution. */
+int atomisp_try_fmt(struct atomisp_device *isp, struct v4l2_pix_format *f,
+ const struct atomisp_format_bridge **fmt_ret,
+ const struct atomisp_format_bridge **snr_fmt_ret)
+{
+ const struct atomisp_format_bridge *fmt, *snr_fmt;
+ struct atomisp_sub_device *asd = &isp->asd;
+ struct v4l2_mbus_framefmt ffmt = { };
+ u32 padding_w, padding_h;
+ int ret;
+
+ fmt = atomisp_get_format_bridge(f->pixelformat);
+ /* Currently, raw formats are broken!!! */
+ if (!fmt || fmt->sh_fmt == IA_CSS_FRAME_FORMAT_RAW) {
+ f->pixelformat = V4L2_PIX_FMT_YUV420;
+
+ fmt = atomisp_get_format_bridge(f->pixelformat);
+ if (!fmt)
+ return -EINVAL;
+ }
+
+ /*
+ * The preview pipeline does not support width > 1920. Also limit height
+ * to avoid sensor drivers still picking a too wide resolution.
+ */
+ if (asd->run_mode->val == ATOMISP_RUN_MODE_PREVIEW) {
+ f->width = min(f->width, 1920U);
+ f->height = min(f->height, 1440U);
+ }
+
+ /*
+ * atomisp_set_fmt() will set the sensor resolution to the requested
+ * resolution + padding. Add padding here and remove it again after
+ * the set_fmt call, like atomisp_set_fmt_to_snr() does.
+ */
+ atomisp_get_padding(isp, f->width, f->height, &padding_w, &padding_h);
+ v4l2_fill_mbus_format(&ffmt, f, fmt->mbus_code);
+ ffmt.width += padding_w;
+ ffmt.height += padding_h;
+
+ dev_dbg(isp->dev, "try_mbus_fmt: try %ux%u\n", ffmt.width, ffmt.height);
+
+ ret = atomisp_set_sensor_crop_and_fmt(isp, &ffmt, V4L2_SUBDEV_FORMAT_TRY);
+ if (ret)
+ return ret;
+
+ dev_dbg(isp->dev, "try_mbus_fmt: got %ux%u\n", ffmt.width, ffmt.height);
+
+ snr_fmt = atomisp_get_format_bridge_from_mbus(ffmt.code);
+ if (!snr_fmt) {
+ dev_err(isp->dev, "unknown sensor format 0x%8.8x\n",
+ ffmt.code);
+ return -EINVAL;
+ }
+
+ f->width = ffmt.width - padding_w;
+ f->height = ffmt.height - padding_h;
+
+ /*
+ * If the format is jpeg or custom RAW, then the width and height will
+ * not satisfy the normal atomisp requirements and no need to check
+ * the below conditions. So just assign to what is being returned from
+ * the sensor driver.
+ */
+ if (f->pixelformat == V4L2_PIX_FMT_JPEG ||
+ f->pixelformat == V4L2_PIX_FMT_CUSTOM_M10MO_RAW)
+ goto out_fill_pix_format;
+
+ /* app vs isp */
+ f->width = rounddown(clamp_t(u32, f->width, ATOM_ISP_MIN_WIDTH,
+ ATOM_ISP_MAX_WIDTH), ATOM_ISP_STEP_WIDTH);
+ f->height = rounddown(clamp_t(u32, f->height, ATOM_ISP_MIN_HEIGHT,
+ ATOM_ISP_MAX_HEIGHT), ATOM_ISP_STEP_HEIGHT);
+
+out_fill_pix_format:
+ atomisp_fill_pix_format(f, f->width, f->height, fmt);
+
+ if (fmt_ret)
+ *fmt_ret = fmt;
+
+ if (snr_fmt_ret)
+ *snr_fmt_ret = snr_fmt;
+
+ return 0;
+}
+
+enum mipi_port_id atomisp_port_to_mipi_port(struct atomisp_device *isp,
+ enum atomisp_camera_port port)
+{
+ switch (port) {
+ case ATOMISP_CAMERA_PORT_PRIMARY:
+ return MIPI_PORT0_ID;
+ case ATOMISP_CAMERA_PORT_SECONDARY:
+ return MIPI_PORT1_ID;
+ case ATOMISP_CAMERA_PORT_TERTIARY:
+ return MIPI_PORT2_ID;
+ default:
+ dev_err(isp->dev, "unsupported port: %d\n", port);
+ return MIPI_PORT0_ID;
+ }
+}
+
+static inline int atomisp_set_sensor_mipi_to_isp(
+ struct atomisp_sub_device *asd,
+ enum atomisp_input_stream_id stream_id,
+ struct camera_mipi_info *mipi_info)
+{
+ struct v4l2_control ctrl;
+ struct atomisp_device *isp = asd->isp;
+ struct atomisp_input_subdev *input = &isp->inputs[asd->input_curr];
+ const struct atomisp_in_fmt_conv *fc;
+ int mipi_freq = 0;
+ unsigned int input_format, bayer_order;
+ enum atomisp_input_format metadata_format = ATOMISP_INPUT_FORMAT_EMBEDDED;
+ u32 mipi_port, metadata_width = 0, metadata_height = 0;
+
+ ctrl.id = V4L2_CID_LINK_FREQ;
+ if (v4l2_g_ctrl(input->sensor->ctrl_handler, &ctrl) == 0)
+ mipi_freq = ctrl.value;
+
+ if (asd->stream_env[stream_id].isys_configs == 1) {
+ input_format =
+ asd->stream_env[stream_id].isys_info[0].input_format;
+ atomisp_css_isys_set_format(asd, stream_id,
+ input_format, IA_CSS_STREAM_DEFAULT_ISYS_STREAM_IDX);
+ } else if (asd->stream_env[stream_id].isys_configs == 2) {
+ atomisp_css_isys_two_stream_cfg_update_stream1(
+ asd, stream_id,
+ asd->stream_env[stream_id].isys_info[0].input_format,
+ asd->stream_env[stream_id].isys_info[0].width,
+ asd->stream_env[stream_id].isys_info[0].height);
+
+ atomisp_css_isys_two_stream_cfg_update_stream2(
+ asd, stream_id,
+ asd->stream_env[stream_id].isys_info[1].input_format,
+ asd->stream_env[stream_id].isys_info[1].width,
+ asd->stream_env[stream_id].isys_info[1].height);
+ }
+
+ /* Compatibility for sensors which provide no media bus code
+ * in s_mbus_framefmt() nor support pad formats. */
+ if (mipi_info && mipi_info->input_format != -1) {
+ bayer_order = mipi_info->raw_bayer_order;
+
+ /* Input stream config is still needs configured */
+ /* TODO: Check if this is necessary */
+ fc = atomisp_find_in_fmt_conv_by_atomisp_in_fmt(
+ mipi_info->input_format);
+ if (!fc)
+ return -EINVAL;
+ input_format = fc->atomisp_in_fmt;
+ metadata_format = mipi_info->metadata_format;
+ metadata_width = mipi_info->metadata_width;
+ metadata_height = mipi_info->metadata_height;
+ } else {
+ struct v4l2_mbus_framefmt *sink;
+
+ sink = atomisp_subdev_get_ffmt(&asd->subdev, NULL,
+ V4L2_SUBDEV_FORMAT_ACTIVE,
+ ATOMISP_SUBDEV_PAD_SINK);
+ fc = atomisp_find_in_fmt_conv(sink->code);
+ if (!fc)
+ return -EINVAL;
+ input_format = fc->atomisp_in_fmt;
+ bayer_order = fc->bayer_order;
+ }
+
+ atomisp_css_input_set_format(asd, stream_id, input_format);
+ atomisp_css_input_set_bayer_order(asd, stream_id, bayer_order);
+
+ fc = atomisp_find_in_fmt_conv_by_atomisp_in_fmt(metadata_format);
+ if (!fc)
+ return -EINVAL;
+
+ input_format = fc->atomisp_in_fmt;
+ mipi_port = atomisp_port_to_mipi_port(isp, input->port);
+ atomisp_css_input_configure_port(asd, mipi_port,
+ isp->sensor_lanes[mipi_port],
+ 0xffff4, mipi_freq,
+ input_format,
+ metadata_width, metadata_height);
+ return 0;
+}
+
+static int configure_pp_input_nop(struct atomisp_sub_device *asd,
+ unsigned int width, unsigned int height)
+{
+ return 0;
+}
+
+static int configure_output_nop(struct atomisp_sub_device *asd,
+ unsigned int width, unsigned int height,
+ unsigned int min_width,
+ enum ia_css_frame_format sh_fmt)
+{
+ return 0;
+}
+
+static int get_frame_info_nop(struct atomisp_sub_device *asd,
+ struct ia_css_frame_info *finfo)
+{
+ return 0;
+}
+
+/*
+ * Resets CSS parameters that depend on input resolution.
+ *
+ * Update params like CSS RAW binning, 2ppc mode and pp_input
+ * which depend on input size, but are not automatically
+ * handled in CSS when the input resolution is changed.
+ */
+static int css_input_resolution_changed(struct atomisp_sub_device *asd,
+ struct v4l2_mbus_framefmt *ffmt)
+{
+ struct atomisp_metadata_buf *md_buf = NULL, *_md_buf;
+ unsigned int i;
+
+ dev_dbg(asd->isp->dev, "css_input_resolution_changed to %ux%u\n",
+ ffmt->width, ffmt->height);
+
+ if (IS_ISP2401)
+ atomisp_css_input_set_two_pixels_per_clock(asd, false);
+ else
+ atomisp_css_input_set_two_pixels_per_clock(asd, true);
+
+ /*
+ * If sensor input changed, which means metadata resolution changed
+ * together. Release all metadata buffers here to let it re-allocated
+ * next time in reqbufs.
+ */
+ for (i = 0; i < ATOMISP_METADATA_TYPE_NUM; i++) {
+ list_for_each_entry_safe(md_buf, _md_buf, &asd->metadata[i],
+ list) {
+ atomisp_css_free_metadata_buffer(md_buf);
+ list_del(&md_buf->list);
+ kfree(md_buf);
+ }
+ }
+ return 0;
+
+ /*
+ * TODO: atomisp_css_preview_configure_pp_input() not
+ * reset due to CSS bug tracked as PSI BZ 115124
+ */
+}
+
+static int atomisp_set_fmt_to_isp(struct video_device *vdev,
+ struct ia_css_frame_info *output_info,
+ const struct v4l2_pix_format *pix)
+{
+ struct camera_mipi_info *mipi_info;
+ struct atomisp_device *isp = video_get_drvdata(vdev);
+ struct atomisp_sub_device *asd = atomisp_to_video_pipe(vdev)->asd;
+ struct atomisp_input_subdev *input = &isp->inputs[asd->input_curr];
+ const struct atomisp_format_bridge *format;
+ struct v4l2_rect *isp_sink_crop;
+ enum ia_css_pipe_id pipe_id;
+ int (*configure_output)(struct atomisp_sub_device *asd,
+ unsigned int width, unsigned int height,
+ unsigned int min_width,
+ enum ia_css_frame_format sh_fmt) =
+ configure_output_nop;
+ int (*get_frame_info)(struct atomisp_sub_device *asd,
+ struct ia_css_frame_info *finfo) =
+ get_frame_info_nop;
+ int (*configure_pp_input)(struct atomisp_sub_device *asd,
+ unsigned int width, unsigned int height) =
+ configure_pp_input_nop;
+ const struct atomisp_in_fmt_conv *fc = NULL;
+ struct v4l2_mbus_framefmt *ffmt;
+ int ret, i;
+
+ isp_sink_crop = atomisp_subdev_get_rect(
+ &asd->subdev, NULL, V4L2_SUBDEV_FORMAT_ACTIVE,
+ ATOMISP_SUBDEV_PAD_SINK, V4L2_SEL_TGT_CROP);
+
+ format = atomisp_get_format_bridge(pix->pixelformat);
+ if (!format)
+ return -EINVAL;
+
+ mipi_info = atomisp_to_sensor_mipi_info(input->sensor);
+
+ if (atomisp_set_sensor_mipi_to_isp(asd, ATOMISP_INPUT_STREAM_GENERAL,
+ mipi_info))
+ return -EINVAL;
+
+ if (mipi_info)
+ fc = atomisp_find_in_fmt_conv_by_atomisp_in_fmt(mipi_info->input_format);
+ if (!fc) {
+ ffmt = atomisp_subdev_get_ffmt(&asd->subdev, NULL,
+ V4L2_SUBDEV_FORMAT_ACTIVE,
+ ATOMISP_SUBDEV_PAD_SINK);
+ fc = atomisp_find_in_fmt_conv(ffmt->code);
+ }
+ if (!fc)
+ return -EINVAL;
+
+ if (format->sh_fmt == IA_CSS_FRAME_FORMAT_RAW &&
+ raw_output_format_match_input(fc->atomisp_in_fmt, pix->pixelformat))
+ return -EINVAL;
+
+ /*
+ * Configure viewfinder also when vfpp is disabled: the
+ * CSS still requires viewfinder configuration.
+ */
+ {
+ u32 width, height;
+
+ if (pix->width < 640 || pix->height < 480) {
+ width = pix->width;
+ height = pix->height;
+ } else {
+ width = 640;
+ height = 480;
+ }
+
+ if (asd->run_mode->val == ATOMISP_RUN_MODE_VIDEO ||
+ asd->vfpp->val == ATOMISP_VFPP_DISABLE_SCALER) {
+ atomisp_css_video_configure_viewfinder(asd, width, height, 0,
+ IA_CSS_FRAME_FORMAT_NV12);
+ } else if (asd->run_mode->val == ATOMISP_RUN_MODE_STILL_CAPTURE ||
+ asd->vfpp->val == ATOMISP_VFPP_DISABLE_LOWLAT) {
+ atomisp_css_capture_configure_viewfinder(asd, width, height, 0,
+ IA_CSS_FRAME_FORMAT_NV12);
+ }
+ }
+
+ atomisp_css_input_set_mode(asd, IA_CSS_INPUT_MODE_BUFFERED_SENSOR);
+
+ for (i = 0; i < IA_CSS_PIPE_ID_NUM; i++)
+ asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].pipe_extra_configs[i].disable_vf_pp = asd->vfpp->val != ATOMISP_VFPP_ENABLE;
+
+ /* ISP2401 new input system need to use copy pipe */
+ if (asd->copy_mode) {
+ pipe_id = IA_CSS_PIPE_ID_COPY;
+ atomisp_css_capture_enable_online(asd, ATOMISP_INPUT_STREAM_GENERAL, false);
+ } else if (asd->vfpp->val == ATOMISP_VFPP_DISABLE_SCALER) {
+ /* video same in continuouscapture and online modes */
+ configure_output = atomisp_css_video_configure_output;
+ get_frame_info = atomisp_css_video_get_output_frame_info;
+ pipe_id = IA_CSS_PIPE_ID_VIDEO;
+ } else if (asd->run_mode->val == ATOMISP_RUN_MODE_VIDEO) {
+ configure_output = atomisp_css_video_configure_output;
+ get_frame_info = atomisp_css_video_get_output_frame_info;
+ pipe_id = IA_CSS_PIPE_ID_VIDEO;
+ } else if (asd->run_mode->val == ATOMISP_RUN_MODE_PREVIEW) {
+ configure_output = atomisp_css_preview_configure_output;
+ get_frame_info = atomisp_css_preview_get_output_frame_info;
+ configure_pp_input = atomisp_css_preview_configure_pp_input;
+ pipe_id = IA_CSS_PIPE_ID_PREVIEW;
+ } else {
+ if (format->sh_fmt == IA_CSS_FRAME_FORMAT_RAW) {
+ atomisp_css_capture_set_mode(asd, IA_CSS_CAPTURE_MODE_RAW);
+ atomisp_css_enable_dz(asd, false);
+ } else {
+ atomisp_update_capture_mode(asd);
+ }
+
+ /* in case of ANR, force capture pipe to offline mode */
+ atomisp_css_capture_enable_online(asd, ATOMISP_INPUT_STREAM_GENERAL,
+ !asd->params.low_light);
+
+ configure_output = atomisp_css_capture_configure_output;
+ get_frame_info = atomisp_css_capture_get_output_frame_info;
+ configure_pp_input = atomisp_css_capture_configure_pp_input;
+ pipe_id = IA_CSS_PIPE_ID_CAPTURE;
+
+ if (asd->run_mode->val != ATOMISP_RUN_MODE_STILL_CAPTURE) {
+ dev_err(isp->dev,
+ "Need to set the running mode first\n");
+ asd->run_mode->val = ATOMISP_RUN_MODE_STILL_CAPTURE;
+ }
+ }
+
+ if (asd->copy_mode)
+ ret = atomisp_css_copy_configure_output(asd, ATOMISP_INPUT_STREAM_GENERAL,
+ pix->width, pix->height,
+ format->planar ? pix->bytesperline :
+ pix->bytesperline * 8 / format->depth,
+ format->sh_fmt);
+ else
+ ret = configure_output(asd, pix->width, pix->height,
+ format->planar ? pix->bytesperline :
+ pix->bytesperline * 8 / format->depth,
+ format->sh_fmt);
+ if (ret) {
+ dev_err(isp->dev, "configure_output %ux%u, format %8.8x\n",
+ pix->width, pix->height, format->sh_fmt);
+ return -EINVAL;
+ }
+
+ ret = configure_pp_input(asd, isp_sink_crop->width, isp_sink_crop->height);
+ if (ret) {
+ dev_err(isp->dev, "configure_pp_input %ux%u\n",
+ isp_sink_crop->width,
+ isp_sink_crop->height);
+ return -EINVAL;
+ }
+ if (asd->copy_mode)
+ ret = atomisp_css_copy_get_output_frame_info(asd,
+ ATOMISP_INPUT_STREAM_GENERAL,
+ output_info);
+ else
+ ret = get_frame_info(asd, output_info);
+ if (ret) {
+ dev_err(isp->dev, "__get_frame_info %ux%u (padded to %u) returned %d\n",
+ pix->width, pix->height, pix->bytesperline, ret);
+ return ret;
+ }
+
+ atomisp_update_grid_info(asd, pipe_id);
+ return 0;
+}
+
+static void atomisp_get_dis_envelop(struct atomisp_sub_device *asd,
+ unsigned int width, unsigned int height,
+ unsigned int *dvs_env_w, unsigned int *dvs_env_h)
+{
+ if (asd->params.video_dis_en &&
+ asd->run_mode->val == ATOMISP_RUN_MODE_VIDEO) {
+ /* envelope is 20% of the output resolution */
+ /*
+ * dvs envelope cannot be round up.
+ * it would cause ISP timeout and color switch issue
+ */
+ *dvs_env_w = rounddown(width / 5, ATOM_ISP_STEP_WIDTH);
+ *dvs_env_h = rounddown(height / 5, ATOM_ISP_STEP_HEIGHT);
+ }
+
+ asd->params.dis_proj_data_valid = false;
+ asd->params.css_update_params_needed = true;
+}
+
+static void atomisp_check_copy_mode(struct atomisp_sub_device *asd,
+ const struct v4l2_pix_format *f)
+{
+ struct v4l2_mbus_framefmt *sink, *src;
+
+ if (!IS_ISP2401) {
+ /* Only used for the new input system */
+ asd->copy_mode = false;
+ return;
+ }
+
+ sink = atomisp_subdev_get_ffmt(&asd->subdev, NULL,
+ V4L2_SUBDEV_FORMAT_ACTIVE, ATOMISP_SUBDEV_PAD_SINK);
+ src = atomisp_subdev_get_ffmt(&asd->subdev, NULL,
+ V4L2_SUBDEV_FORMAT_ACTIVE, ATOMISP_SUBDEV_PAD_SOURCE);
+
+ if (sink->code == src->code && sink->width == f->width && sink->height == f->height)
+ asd->copy_mode = true;
+ else
+ asd->copy_mode = false;
+
+ dev_dbg(asd->isp->dev, "copy_mode: %d\n", asd->copy_mode);
+}
+
+static int atomisp_set_fmt_to_snr(struct video_device *vdev, const struct v4l2_pix_format *f,
+ unsigned int dvs_env_w, unsigned int dvs_env_h)
+{
+ struct atomisp_video_pipe *pipe = atomisp_to_video_pipe(vdev);
+ struct atomisp_sub_device *asd = pipe->asd;
+ struct atomisp_device *isp = asd->isp;
+ const struct atomisp_format_bridge *format;
+ struct v4l2_mbus_framefmt req_ffmt, ffmt = { };
+ struct atomisp_input_stream_info *stream_info =
+ (struct atomisp_input_stream_info *)&ffmt.reserved;
+ int ret;
+
+ format = atomisp_get_format_bridge(f->pixelformat);
+ if (!format)
+ return -EINVAL;
+
+ v4l2_fill_mbus_format(&ffmt, f, format->mbus_code);
+ ffmt.height += asd->sink_pad_padding_h + dvs_env_h;
+ ffmt.width += asd->sink_pad_padding_w + dvs_env_w;
+
+ dev_dbg(isp->dev, "s_mbus_fmt: ask %ux%u (padding %ux%u, dvs %ux%u)\n",
+ ffmt.width, ffmt.height, asd->sink_pad_padding_w, asd->sink_pad_padding_h,
+ dvs_env_w, dvs_env_h);
+
+ __atomisp_init_stream_info(ATOMISP_INPUT_STREAM_GENERAL, stream_info);
+
+ req_ffmt = ffmt;
+
+ /* Disable dvs if resolution can't be supported by sensor */
+ if (asd->params.video_dis_en && asd->run_mode->val == ATOMISP_RUN_MODE_VIDEO) {
+ ret = atomisp_set_sensor_crop_and_fmt(isp, &ffmt, V4L2_SUBDEV_FORMAT_TRY);
+ if (ret)
+ return ret;
+
+ dev_dbg(isp->dev, "video dis: sensor width: %d, height: %d\n",
+ ffmt.width, ffmt.height);
+
+ if (ffmt.width < req_ffmt.width ||
+ ffmt.height < req_ffmt.height) {
+ req_ffmt.height -= dvs_env_h;
+ req_ffmt.width -= dvs_env_w;
+ ffmt = req_ffmt;
+ dev_warn(isp->dev,
+ "can not enable video dis due to sensor limitation.");
+ asd->params.video_dis_en = false;
+ }
+ }
+
+ ret = atomisp_set_sensor_crop_and_fmt(isp, &ffmt, V4L2_SUBDEV_FORMAT_ACTIVE);
+ if (ret)
+ return ret;
+
+ __atomisp_update_stream_env(asd, ATOMISP_INPUT_STREAM_GENERAL, stream_info);
+
+ dev_dbg(isp->dev, "sensor width: %d, height: %d\n",
+ ffmt.width, ffmt.height);
+
+ if (ffmt.width < ATOM_ISP_STEP_WIDTH ||
+ ffmt.height < ATOM_ISP_STEP_HEIGHT)
+ return -EINVAL;
+
+ if (asd->params.video_dis_en && asd->run_mode->val == ATOMISP_RUN_MODE_VIDEO &&
+ (ffmt.width < req_ffmt.width || ffmt.height < req_ffmt.height)) {
+ dev_warn(isp->dev,
+ "can not enable video dis due to sensor limitation.");
+ asd->params.video_dis_en = false;
+ }
+
+ atomisp_subdev_set_ffmt(&asd->subdev, NULL,
+ V4L2_SUBDEV_FORMAT_ACTIVE,
+ ATOMISP_SUBDEV_PAD_SINK, &ffmt);
+
+ return css_input_resolution_changed(asd, &ffmt);
+}
+
+int atomisp_set_fmt(struct video_device *vdev, struct v4l2_format *f)
+{
+ struct atomisp_device *isp = video_get_drvdata(vdev);
+ struct atomisp_video_pipe *pipe = atomisp_to_video_pipe(vdev);
+ struct atomisp_sub_device *asd = pipe->asd;
+ const struct atomisp_format_bridge *format_bridge;
+ const struct atomisp_format_bridge *snr_format_bridge;
+ struct ia_css_frame_info output_info;
+ unsigned int dvs_env_w = 0, dvs_env_h = 0;
+ struct v4l2_mbus_framefmt isp_source_fmt = {0};
+ struct v4l2_rect isp_sink_crop;
+ int ret;
+
+ ret = atomisp_pipe_check(pipe, true);
+ if (ret)
+ return ret;
+
+ dev_dbg(isp->dev,
+ "setting resolution %ux%u bytesperline %u\n",
+ f->fmt.pix.width, f->fmt.pix.height, f->fmt.pix.bytesperline);
+
+ /* Ensure that the resolution is equal or below the maximum supported */
+ ret = atomisp_try_fmt(isp, &f->fmt.pix, &format_bridge, &snr_format_bridge);
+ if (ret)
+ return ret;
+
+ pipe->sh_fmt = format_bridge->sh_fmt;
+ pipe->pix.pixelformat = format_bridge->pixelformat;
+
+ atomisp_subdev_get_ffmt(&asd->subdev, NULL,
+ V4L2_SUBDEV_FORMAT_ACTIVE,
+ ATOMISP_SUBDEV_PAD_SINK)->code =
+ snr_format_bridge->mbus_code;
+
+ isp_source_fmt.code = format_bridge->mbus_code;
+ atomisp_subdev_set_ffmt(&asd->subdev, NULL,
+ V4L2_SUBDEV_FORMAT_ACTIVE,
+ ATOMISP_SUBDEV_PAD_SOURCE, &isp_source_fmt);
+
+ if (atomisp_subdev_format_conversion(asd)) {
+ atomisp_get_padding(isp, f->fmt.pix.width, f->fmt.pix.height,
+ &asd->sink_pad_padding_w, &asd->sink_pad_padding_h);
+ } else {
+ asd->sink_pad_padding_w = 0;
+ asd->sink_pad_padding_h = 0;
+ }
+
+ atomisp_get_dis_envelop(asd, f->fmt.pix.width, f->fmt.pix.height,
+ &dvs_env_w, &dvs_env_h);
+
+ ret = atomisp_set_fmt_to_snr(vdev, &f->fmt.pix, dvs_env_w, dvs_env_h);
+ if (ret) {
+ dev_warn(isp->dev,
+ "Set format to sensor failed with %d\n", ret);
+ return -EINVAL;
+ }
+
+ atomisp_csi_lane_config(isp);
+
+ atomisp_check_copy_mode(asd, &f->fmt.pix);
+
+ isp_sink_crop = *atomisp_subdev_get_rect(&asd->subdev, NULL,
+ V4L2_SUBDEV_FORMAT_ACTIVE,
+ ATOMISP_SUBDEV_PAD_SINK,
+ V4L2_SEL_TGT_CROP);
+
+ /* Try to enable YUV downscaling if ISP input is 10 % (either
+ * width or height) bigger than the desired result. */
+ if (!IS_MOFD ||
+ isp_sink_crop.width * 9 / 10 < f->fmt.pix.width ||
+ isp_sink_crop.height * 9 / 10 < f->fmt.pix.height ||
+ (atomisp_subdev_format_conversion(asd) &&
+ (asd->run_mode->val == ATOMISP_RUN_MODE_VIDEO ||
+ asd->vfpp->val == ATOMISP_VFPP_DISABLE_SCALER))) {
+ isp_sink_crop.width = f->fmt.pix.width;
+ isp_sink_crop.height = f->fmt.pix.height;
+
+ atomisp_subdev_set_selection(&asd->subdev, NULL,
+ V4L2_SUBDEV_FORMAT_ACTIVE,
+ ATOMISP_SUBDEV_PAD_SOURCE, V4L2_SEL_TGT_COMPOSE,
+ 0, &isp_sink_crop);
+ } else {
+ struct v4l2_rect main_compose = {0};
+
+ main_compose.width = isp_sink_crop.width;
+ main_compose.height =
+ DIV_ROUND_UP(main_compose.width * f->fmt.pix.height,
+ f->fmt.pix.width);
+ if (main_compose.height > isp_sink_crop.height) {
+ main_compose.height = isp_sink_crop.height;
+ main_compose.width =
+ DIV_ROUND_UP(main_compose.height *
+ f->fmt.pix.width,
+ f->fmt.pix.height);
+ }
+
+ atomisp_subdev_set_selection(&asd->subdev, NULL,
+ V4L2_SUBDEV_FORMAT_ACTIVE,
+ ATOMISP_SUBDEV_PAD_SOURCE,
+ V4L2_SEL_TGT_COMPOSE, 0,
+ &main_compose);
+ }
+
+ ret = atomisp_set_fmt_to_isp(vdev, &output_info, &f->fmt.pix);
+ if (ret) {
+ dev_warn(isp->dev, "Can't set format on ISP. Error %d\n", ret);
+ return -EINVAL;
+ }
+
+ atomisp_fill_pix_format(&pipe->pix, f->fmt.pix.width, f->fmt.pix.height, format_bridge);
+
+ f->fmt.pix = pipe->pix;
+
+ dev_dbg(isp->dev, "%s: %dx%d, image size: %d, %d bytes per line\n",
+ __func__,
+ f->fmt.pix.width, f->fmt.pix.height,
+ f->fmt.pix.sizeimage, f->fmt.pix.bytesperline);
+
+ return 0;
+}
+
+int atomisp_set_shading_table(struct atomisp_sub_device *asd,
+ struct atomisp_shading_table *user_shading_table)
+{
+ struct ia_css_shading_table *shading_table;
+ struct ia_css_shading_table *free_table;
+ unsigned int len_table;
+ int i;
+ int ret = 0;
+
+ if (!user_shading_table)
+ return -EINVAL;
+
+ if (!user_shading_table->enable) {
+ asd->params.config.shading_table = NULL;
+ asd->params.sc_en = false;
+ return 0;
+ }
+
+ /* If enabling, all tables must be set */
+ for (i = 0; i < ATOMISP_NUM_SC_COLORS; i++) {
+ if (!user_shading_table->data[i])
+ return -EINVAL;
+ }
+
+ /* Shading table size per color */
+ if (user_shading_table->width > SH_CSS_MAX_SCTBL_WIDTH_PER_COLOR ||
+ user_shading_table->height > SH_CSS_MAX_SCTBL_HEIGHT_PER_COLOR)
+ return -EINVAL;
+
+ shading_table = atomisp_css_shading_table_alloc(
+ user_shading_table->width, user_shading_table->height);
+ if (!shading_table)
+ return -ENOMEM;
+
+ len_table = user_shading_table->width * user_shading_table->height *
+ ATOMISP_SC_TYPE_SIZE;
+ for (i = 0; i < ATOMISP_NUM_SC_COLORS; i++) {
+ ret = copy_from_user(shading_table->data[i],
+ (void __user *)user_shading_table->data[i],
+ len_table);
+ if (ret) {
+ free_table = shading_table;
+ ret = -EFAULT;
+ goto out;
+ }
+ }
+ shading_table->sensor_width = user_shading_table->sensor_width;
+ shading_table->sensor_height = user_shading_table->sensor_height;
+ shading_table->fraction_bits = user_shading_table->fraction_bits;
+
+ free_table = asd->params.css_param.shading_table;
+ asd->params.css_param.shading_table = shading_table;
+ asd->params.config.shading_table = shading_table;
+ asd->params.sc_en = true;
+
+out:
+ if (free_table)
+ atomisp_css_shading_table_free(free_table);
+
+ return ret;
+}
+
+static int __checking_exp_id(struct atomisp_sub_device *asd, int exp_id)
+{
+ struct atomisp_device *isp = asd->isp;
+
+ if (!asd->enable_raw_buffer_lock->val) {
+ dev_warn(isp->dev, "%s Raw Buffer Lock is disable.\n", __func__);
+ return -EINVAL;
+ }
+ if (!asd->streaming) {
+ dev_err(isp->dev, "%s streaming %d invalid exp_id %d.\n",
+ __func__, exp_id, asd->streaming);
+ return -EINVAL;
+ }
+ if ((exp_id > ATOMISP_MAX_EXP_ID) || (exp_id <= 0)) {
+ dev_err(isp->dev, "%s exp_id %d invalid.\n", __func__, exp_id);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+void atomisp_init_raw_buffer_bitmap(struct atomisp_sub_device *asd)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&asd->raw_buffer_bitmap_lock, flags);
+ memset(asd->raw_buffer_bitmap, 0, sizeof(asd->raw_buffer_bitmap));
+ asd->raw_buffer_locked_count = 0;
+ spin_unlock_irqrestore(&asd->raw_buffer_bitmap_lock, flags);
+}
+
+static int __is_raw_buffer_locked(struct atomisp_sub_device *asd, int exp_id)
+{
+ int *bitmap, bit;
+ unsigned long flags;
+ int ret;
+
+ if (__checking_exp_id(asd, exp_id))
+ return -EINVAL;
+
+ bitmap = asd->raw_buffer_bitmap + exp_id / 32;
+ bit = exp_id % 32;
+ spin_lock_irqsave(&asd->raw_buffer_bitmap_lock, flags);
+ ret = ((*bitmap) & (1 << bit));
+ spin_unlock_irqrestore(&asd->raw_buffer_bitmap_lock, flags);
+ return !ret;
+}
+
+static int __clear_raw_buffer_bitmap(struct atomisp_sub_device *asd, int exp_id)
+{
+ int *bitmap, bit;
+ unsigned long flags;
+
+ if (__is_raw_buffer_locked(asd, exp_id))
+ return -EINVAL;
+
+ bitmap = asd->raw_buffer_bitmap + exp_id / 32;
+ bit = exp_id % 32;
+ spin_lock_irqsave(&asd->raw_buffer_bitmap_lock, flags);
+ (*bitmap) &= ~(1 << bit);
+ asd->raw_buffer_locked_count--;
+ spin_unlock_irqrestore(&asd->raw_buffer_bitmap_lock, flags);
+
+ dev_dbg(asd->isp->dev, "%s: exp_id %d, raw_buffer_locked_count %d\n",
+ __func__, exp_id, asd->raw_buffer_locked_count);
+ return 0;
+}
+
+int atomisp_exp_id_capture(struct atomisp_sub_device *asd, int *exp_id)
+{
+ struct atomisp_device *isp = asd->isp;
+ int value = *exp_id;
+ int ret;
+
+ lockdep_assert_held(&isp->mutex);
+
+ ret = __is_raw_buffer_locked(asd, value);
+ if (ret) {
+ dev_err(isp->dev, "%s exp_id %d invalid %d.\n", __func__, value, ret);
+ return -EINVAL;
+ }
+
+ dev_dbg(isp->dev, "%s exp_id %d\n", __func__, value);
+ ret = atomisp_css_exp_id_capture(asd, value);
+ if (ret) {
+ dev_err(isp->dev, "%s exp_id %d failed.\n", __func__, value);
+ return -EIO;
+ }
+ return 0;
+}
+
+int atomisp_exp_id_unlock(struct atomisp_sub_device *asd, int *exp_id)
+{
+ struct atomisp_device *isp = asd->isp;
+ int value = *exp_id;
+ int ret;
+
+ lockdep_assert_held(&isp->mutex);
+
+ ret = __clear_raw_buffer_bitmap(asd, value);
+ if (ret) {
+ dev_err(isp->dev, "%s exp_id %d invalid %d.\n", __func__, value, ret);
+ return -EINVAL;
+ }
+
+ dev_dbg(isp->dev, "%s exp_id %d\n", __func__, value);
+ ret = atomisp_css_exp_id_unlock(asd, value);
+ if (ret)
+ dev_err(isp->dev, "%s exp_id %d failed, err %d.\n",
+ __func__, value, ret);
+
+ return ret;
+}
+
+int atomisp_enable_dz_capt_pipe(struct atomisp_sub_device *asd,
+ unsigned int *enable)
+{
+ bool value;
+
+ if (!enable)
+ return -EINVAL;
+
+ value = *enable > 0;
+
+ atomisp_en_dz_capt_pipe(asd, value);
+
+ return 0;
+}
+
+int atomisp_inject_a_fake_event(struct atomisp_sub_device *asd, int *event)
+{
+ if (!event || !asd->streaming)
+ return -EINVAL;
+
+ lockdep_assert_held(&asd->isp->mutex);
+
+ dev_dbg(asd->isp->dev, "%s: trying to inject a fake event 0x%x\n",
+ __func__, *event);
+
+ switch (*event) {
+ case V4L2_EVENT_FRAME_SYNC:
+ atomisp_sof_event(asd);
+ break;
+ case V4L2_EVENT_FRAME_END:
+ atomisp_eof_event(asd, 0);
+ break;
+ case V4L2_EVENT_ATOMISP_3A_STATS_READY:
+ atomisp_3a_stats_ready_event(asd, 0);
+ break;
+ case V4L2_EVENT_ATOMISP_METADATA_READY:
+ atomisp_metadata_ready_event(asd, 0);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
diff --git a/drivers/staging/media/atomisp/pci/atomisp_cmd.h b/drivers/staging/media/atomisp/pci/atomisp_cmd.h
new file mode 100644
index 000000000000..82199dc9284e
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp_cmd.h
@@ -0,0 +1,301 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Medifield PNW Camera Imaging ISP subsystem.
+ *
+ * Copyright (c) 2010 Intel Corporation. All Rights Reserved.
+ *
+ * Copyright (c) 2010 Silicon Hive www.siliconhive.com.
+ */
+
+#ifndef __ATOMISP_CMD_H__
+#define __ATOMISP_CMD_H__
+
+#include "../../include/linux/atomisp.h"
+#include <linux/interrupt.h>
+#include <linux/videodev2.h>
+
+#include <media/v4l2-subdev.h>
+
+#include "atomisp_internal.h"
+
+#include "ia_css_types.h"
+#include "ia_css.h"
+
+struct atomisp_device;
+struct ia_css_frame;
+
+#define MSI_ENABLE_BIT 16
+#define INTR_DISABLE_BIT 10
+#define BUS_MASTER_ENABLE 2
+#define MEMORY_SPACE_ENABLE 1
+#define INTR_IER 24
+#define INTR_IIR 16
+
+/* Helper function */
+void dump_sp_dmem(struct atomisp_device *isp, unsigned int addr,
+ unsigned int size);
+struct camera_mipi_info *atomisp_to_sensor_mipi_info(struct v4l2_subdev *sd);
+struct atomisp_video_pipe *atomisp_to_video_pipe(struct video_device *dev);
+int atomisp_reset(struct atomisp_device *isp);
+int atomisp_buffers_in_css(struct atomisp_video_pipe *pipe);
+void atomisp_buffer_done(struct ia_css_frame *frame, enum vb2_buffer_state state);
+void atomisp_flush_video_pipe(struct atomisp_video_pipe *pipe, enum vb2_buffer_state state,
+ bool warn_on_css_frames);
+void atomisp_clear_css_buffer_counters(struct atomisp_sub_device *asd);
+
+/* Interrupt functions */
+void atomisp_msi_irq_init(struct atomisp_device *isp);
+void atomisp_msi_irq_uninit(struct atomisp_device *isp);
+void atomisp_assert_recovery_work(struct work_struct *work);
+irqreturn_t atomisp_isr(int irq, void *dev);
+irqreturn_t atomisp_isr_thread(int irq, void *isp_ptr);
+const struct atomisp_format_bridge *get_atomisp_format_bridge_from_mbus(
+ u32 mbus_code);
+bool atomisp_is_mbuscode_raw(uint32_t code);
+
+/* Get internal fmt according to V4L2 fmt */
+bool atomisp_is_viewfinder_support(struct atomisp_device *isp);
+
+/* ISP features control function */
+
+/*
+ * Function to enable/disable lens geometry distortion correction (GDC) and
+ * chromatic aberration correction (CAC)
+ */
+int atomisp_gdc_cac(struct atomisp_sub_device *asd, int flag,
+ __s32 *value);
+
+/* Function to enable/disable low light mode (including ANR) */
+int atomisp_low_light(struct atomisp_sub_device *asd, int flag,
+ __s32 *value);
+
+/*
+ * Function to enable/disable extra noise reduction (XNR) in low light
+ * condition
+ */
+int atomisp_xnr(struct atomisp_sub_device *asd, int flag, int *arg);
+
+int atomisp_formats(struct atomisp_sub_device *asd, int flag,
+ struct atomisp_formats_config *config);
+
+/* Function to configure noise reduction */
+int atomisp_nr(struct atomisp_sub_device *asd, int flag,
+ struct atomisp_nr_config *config);
+
+/* Function to configure temporal noise reduction (TNR) */
+int atomisp_tnr(struct atomisp_sub_device *asd, int flag,
+ struct atomisp_tnr_config *config);
+
+/* Function to configure black level compensation */
+int atomisp_black_level(struct atomisp_sub_device *asd, int flag,
+ struct atomisp_ob_config *config);
+
+/* Function to configure edge enhancement */
+int atomisp_ee(struct atomisp_sub_device *asd, int flag,
+ struct atomisp_ee_config *config);
+
+/* Function to update Gamma table for gamma, brightness and contrast config */
+int atomisp_gamma(struct atomisp_sub_device *asd, int flag,
+ struct atomisp_gamma_table *config);
+
+/* Function to update Ctc table for Chroma Enhancement */
+int atomisp_ctc(struct atomisp_sub_device *asd, int flag,
+ struct atomisp_ctc_table *config);
+
+/* Function to update gamma correction parameters */
+int atomisp_gamma_correction(struct atomisp_sub_device *asd, int flag,
+ struct atomisp_gc_config *config);
+
+/* Function to update Gdc table for gdc */
+int atomisp_gdc_cac_table(struct atomisp_sub_device *asd, int flag,
+ struct atomisp_morph_table *config);
+
+/* Function to update table for macc */
+int atomisp_macc_table(struct atomisp_sub_device *asd, int flag,
+ struct atomisp_macc_config *config);
+
+/* Function to get DIS statistics. */
+int atomisp_get_dis_stat(struct atomisp_sub_device *asd,
+ struct atomisp_dis_statistics *stats);
+
+/* Function to get DVS2 BQ resolution settings */
+int atomisp_get_dvs2_bq_resolutions(struct atomisp_sub_device *asd,
+ struct atomisp_dvs2_bq_resolutions *bq_res);
+
+/* Function to set the DIS coefficients. */
+int atomisp_set_dis_coefs(struct atomisp_sub_device *asd,
+ struct atomisp_dis_coefficients *coefs);
+
+/* Function to set the DIS motion vector. */
+int atomisp_set_dis_vector(struct atomisp_sub_device *asd,
+ struct atomisp_dis_vector *vector);
+
+/* Function to set/get 3A stat from isp */
+int atomisp_3a_stat(struct atomisp_sub_device *asd, int flag,
+ struct atomisp_3a_statistics *config);
+
+int atomisp_set_parameters(struct video_device *vdev,
+ struct atomisp_parameters *arg);
+
+/* Function to set/get isp parameters to isp */
+int atomisp_param(struct atomisp_sub_device *asd, int flag,
+ struct atomisp_parm *config);
+
+/* Function to configure color effect of the image */
+int atomisp_color_effect(struct atomisp_sub_device *asd, int flag,
+ __s32 *effect);
+
+/* Function to configure bad pixel correction */
+int atomisp_bad_pixel(struct atomisp_sub_device *asd, int flag,
+ __s32 *value);
+
+/* Function to configure bad pixel correction params */
+int atomisp_bad_pixel_param(struct atomisp_sub_device *asd, int flag,
+ struct atomisp_dp_config *config);
+
+/* Function to enable/disable video image stablization */
+int atomisp_video_stable(struct atomisp_sub_device *asd, int flag,
+ __s32 *value);
+
+/* Function to configure fixed pattern noise */
+int atomisp_fixed_pattern(struct atomisp_sub_device *asd, int flag,
+ __s32 *value);
+
+/* Function to configure fixed pattern noise table */
+int atomisp_fixed_pattern_table(struct atomisp_sub_device *asd,
+ struct v4l2_framebuffer *config);
+
+/* Function to configure false color correction */
+int atomisp_false_color(struct atomisp_sub_device *asd, int flag,
+ __s32 *value);
+
+/* Function to configure false color correction params */
+int atomisp_false_color_param(struct atomisp_sub_device *asd, int flag,
+ struct atomisp_de_config *config);
+
+/* Function to configure white balance params */
+int atomisp_white_balance_param(struct atomisp_sub_device *asd, int flag,
+ struct atomisp_wb_config *config);
+
+int atomisp_3a_config_param(struct atomisp_sub_device *asd, int flag,
+ struct atomisp_3a_config *config);
+
+/* Function to setup digital zoom */
+int atomisp_digital_zoom(struct atomisp_sub_device *asd, int flag,
+ __s32 *value);
+
+/* Function set camera_prefiles.xml current sensor pixel array size */
+int atomisp_set_array_res(struct atomisp_sub_device *asd,
+ struct atomisp_resolution *config);
+
+/* Function to calculate real zoom region for every pipe */
+int atomisp_calculate_real_zoom_region(struct atomisp_sub_device *asd,
+ struct ia_css_dz_config *dz_config,
+ enum ia_css_pipe_id css_pipe_id);
+
+int atomisp_cp_general_isp_parameters(struct atomisp_sub_device *asd,
+ struct atomisp_parameters *arg,
+ struct atomisp_css_params *css_param,
+ bool from_user);
+
+int atomisp_cp_lsc_table(struct atomisp_sub_device *asd,
+ struct atomisp_shading_table *source_st,
+ struct atomisp_css_params *css_param,
+ bool from_user);
+
+int atomisp_css_cp_dvs2_coefs(struct atomisp_sub_device *asd,
+ struct ia_css_dvs2_coefficients *coefs,
+ struct atomisp_css_params *css_param,
+ bool from_user);
+
+int atomisp_cp_morph_table(struct atomisp_sub_device *asd,
+ struct atomisp_morph_table *source_morph_table,
+ struct atomisp_css_params *css_param,
+ bool from_user);
+
+int atomisp_cp_dvs_6axis_config(struct atomisp_sub_device *asd,
+ struct atomisp_dvs_6axis_config *user_6axis_config,
+ struct atomisp_css_params *css_param,
+ bool from_user);
+
+int atomisp_makeup_css_parameters(struct atomisp_sub_device *asd,
+ struct atomisp_parameters *arg,
+ struct atomisp_css_params *css_param);
+
+int atomisp_compare_grid(struct atomisp_sub_device *asd,
+ struct atomisp_grid_info *atomgrid);
+
+/* Get sensor padding values for the non padded width x height resolution */
+void atomisp_get_padding(struct atomisp_device *isp, u32 width, u32 height,
+ u32 *padding_w, u32 *padding_h);
+
+/* Set sensor power (no-op if already on/off) */
+int atomisp_s_sensor_power(struct atomisp_device *isp, unsigned int input, bool on);
+
+/* Select which sensor to use, must be called with a valid input */
+int atomisp_select_input(struct atomisp_device *isp, unsigned int input);
+
+/* Setup media-controller links to reflect input_curr setting */
+void atomisp_setup_input_links(struct atomisp_device *isp);
+
+/* This function looks up the closest available resolution. */
+int atomisp_try_fmt(struct atomisp_device *isp, struct v4l2_pix_format *f,
+ const struct atomisp_format_bridge **fmt_ret,
+ const struct atomisp_format_bridge **snr_fmt_ret);
+
+int atomisp_set_fmt(struct video_device *vdev, struct v4l2_format *f);
+
+int atomisp_set_shading_table(struct atomisp_sub_device *asd,
+ struct atomisp_shading_table *shading_table);
+
+void atomisp_free_internal_buffers(struct atomisp_sub_device *asd);
+
+int atomisp_freq_scaling(struct atomisp_device *vdev,
+ enum atomisp_dfs_mode mode,
+ bool force);
+
+void atomisp_buf_done(struct atomisp_sub_device *asd, int error,
+ enum ia_css_buffer_type buf_type,
+ enum ia_css_pipe_id css_pipe_id,
+ bool q_buffers, enum atomisp_input_stream_id stream_id);
+
+/* Events. Only one event has to be exported for now. */
+void atomisp_eof_event(struct atomisp_sub_device *asd, uint8_t exp_id);
+
+enum mipi_port_id atomisp_port_to_mipi_port(struct atomisp_device *isp,
+ enum atomisp_camera_port port);
+
+void atomisp_apply_css_parameters(
+ struct atomisp_sub_device *asd,
+ struct atomisp_css_params *css_param);
+void atomisp_free_css_parameters(struct atomisp_css_params *css_param);
+
+void atomisp_handle_parameter_and_buffer(struct atomisp_video_pipe *pipe);
+
+void atomisp_flush_params_queue(struct atomisp_video_pipe *asd);
+
+/* Function to do Raw Buffer related operation, after enable Lock Unlock Raw Buffer */
+int atomisp_exp_id_unlock(struct atomisp_sub_device *asd, int *exp_id);
+int atomisp_exp_id_capture(struct atomisp_sub_device *asd, int *exp_id);
+
+void atomisp_init_raw_buffer_bitmap(struct atomisp_sub_device *asd);
+
+/* Function to enable/disable zoom for capture pipe */
+int atomisp_enable_dz_capt_pipe(struct atomisp_sub_device *asd,
+ unsigned int *enable);
+
+u32 atomisp_get_pixel_depth(u32 pixelformat);
+
+/* Function for HAL to inject a fake event to wake up poll thread */
+int atomisp_inject_a_fake_event(struct atomisp_sub_device *asd, int *event);
+
+/*
+ * Function for HAL to query how many invalid frames at the beginning of ISP
+ * pipeline output
+ */
+int atomisp_get_invalid_frame_num(struct video_device *vdev,
+ int *invalid_frame_num);
+
+int atomisp_power_off(struct device *dev);
+int atomisp_power_on(struct device *dev);
+#endif /* __ATOMISP_CMD_H__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp_common.h b/drivers/staging/media/atomisp/pci/atomisp_common.h
new file mode 100644
index 000000000000..a2462fc306fb
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp_common.h
@@ -0,0 +1,59 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Medifield PNW Camera Imaging ISP subsystem.
+ *
+ * Copyright (c) 2010 Intel Corporation. All Rights Reserved.
+ *
+ * Copyright (c) 2010 Silicon Hive www.siliconhive.com.
+ */
+
+#ifndef __ATOMISP_COMMON_H__
+#define __ATOMISP_COMMON_H__
+
+#include "../../include/linux/atomisp.h"
+
+#include <linux/v4l2-mediabus.h>
+
+#include <media/videobuf2-v4l2.h>
+
+#include "atomisp_compat.h"
+
+#include "ia_css.h"
+
+extern int dbg_level;
+extern int dbg_func;
+extern int pad_w;
+extern int pad_h;
+
+/* Minimum padding requirements for ISP2400 (BYT) */
+#define ISP2400_MIN_PAD_W 12
+#define ISP2400_MIN_PAD_H 12
+
+#define CSS_DTRACE_VERBOSITY_LEVEL 5 /* Controls trace verbosity */
+#define CSS_DTRACE_VERBOSITY_TIMEOUT 9 /* Verbosity on ISP timeout */
+#define MRFLD_MAX_ZOOM_FACTOR 1024
+
+/* ISP2401 */
+#define ATOMISP_CSS_ISP_PIPE_VERSION_2_7 1
+
+struct atomisp_format_bridge {
+ unsigned int pixelformat;
+ unsigned int depth;
+ u32 mbus_code;
+ enum ia_css_frame_format sh_fmt;
+ unsigned char description[32]; /* the same as struct v4l2_fmtdesc */
+ bool planar;
+};
+
+struct atomisp_fmt {
+ u32 pixelformat;
+ u32 depth;
+ u32 bytesperline;
+ u32 framesize;
+ u32 imagesize;
+ u32 width;
+ u32 height;
+ u32 bayer_order;
+};
+
+#endif
diff --git a/drivers/staging/media/atomisp/pci/atomisp_compat.h b/drivers/staging/media/atomisp/pci/atomisp_compat.h
new file mode 100644
index 000000000000..f0704a7e3bff
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp_compat.h
@@ -0,0 +1,381 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Clovertrail PNW Camera Imaging ISP subsystem.
+ *
+ * Copyright (c) 2012 Intel Corporation. All Rights Reserved.
+ */
+
+#ifndef __ATOMISP_COMPAT_H__
+#define __ATOMISP_COMPAT_H__
+
+#include "atomisp_compat_css20.h"
+
+#include "../../include/linux/atomisp.h"
+
+struct atomisp_device;
+struct atomisp_sub_device;
+struct video_device;
+enum atomisp_input_stream_id;
+
+struct atomisp_metadata_buf {
+ struct ia_css_metadata *metadata;
+ void *md_vptr;
+ struct list_head list;
+};
+
+void atomisp_css2_hw_store_32(hrt_address addr, uint32_t data);
+void atomisp_load_uint32(hrt_address addr, uint32_t *data);
+
+int atomisp_css_init(struct atomisp_device *isp);
+
+void atomisp_css_uninit(struct atomisp_device *isp);
+
+void atomisp_css_init_struct(struct atomisp_sub_device *asd);
+
+int atomisp_css_irq_translate(struct atomisp_device *isp,
+ unsigned int *infos);
+
+void atomisp_css_rx_get_irq_info(enum mipi_port_id port,
+ unsigned int *infos);
+
+void atomisp_css_rx_clear_irq_info(enum mipi_port_id port,
+ unsigned int infos);
+
+int atomisp_css_irq_enable(struct atomisp_device *isp,
+ enum ia_css_irq_info info, bool enable);
+
+int atomisp_q_video_buffer_to_css(struct atomisp_sub_device *asd,
+ struct ia_css_frame *frame,
+ enum atomisp_input_stream_id stream_id,
+ enum ia_css_buffer_type css_buf_type,
+ enum ia_css_pipe_id css_pipe_id);
+
+int atomisp_q_s3a_buffer_to_css(struct atomisp_sub_device *asd,
+ struct atomisp_s3a_buf *s3a_buf,
+ enum atomisp_input_stream_id stream_id,
+ enum ia_css_pipe_id css_pipe_id);
+
+int atomisp_q_metadata_buffer_to_css(struct atomisp_sub_device *asd,
+ struct atomisp_metadata_buf *metadata_buf,
+ enum atomisp_input_stream_id stream_id,
+ enum ia_css_pipe_id css_pipe_id);
+
+int atomisp_q_dis_buffer_to_css(struct atomisp_sub_device *asd,
+ struct atomisp_dis_buf *dis_buf,
+ enum atomisp_input_stream_id stream_id,
+ enum ia_css_pipe_id css_pipe_id);
+
+void ia_css_mmu_invalidate_cache(void);
+
+int atomisp_css_start(struct atomisp_sub_device *asd);
+
+void atomisp_css_update_isp_params(struct atomisp_sub_device *asd);
+void atomisp_css_update_isp_params_on_pipe(struct atomisp_sub_device *asd,
+ struct ia_css_pipe *pipe);
+
+int atomisp_css_queue_buffer(struct atomisp_sub_device *asd,
+ enum atomisp_input_stream_id stream_id,
+ enum ia_css_pipe_id pipe_id,
+ enum ia_css_buffer_type buf_type,
+ struct atomisp_css_buffer *isp_css_buffer);
+
+int atomisp_css_dequeue_buffer(struct atomisp_sub_device *asd,
+ enum atomisp_input_stream_id stream_id,
+ enum ia_css_pipe_id pipe_id,
+ enum ia_css_buffer_type buf_type,
+ struct atomisp_css_buffer *isp_css_buffer);
+
+int atomisp_css_allocate_stat_buffers(struct atomisp_sub_device *asd,
+ u16 stream_id,
+ struct atomisp_s3a_buf *s3a_buf,
+ struct atomisp_dis_buf *dis_buf,
+ struct atomisp_metadata_buf *md_buf);
+
+void atomisp_css_free_stat_buffers(struct atomisp_sub_device *asd);
+
+void atomisp_css_free_3a_buffer(struct atomisp_s3a_buf *s3a_buf);
+
+void atomisp_css_free_dis_buffer(struct atomisp_dis_buf *dis_buf);
+
+void atomisp_css_free_metadata_buffer(struct atomisp_metadata_buf
+ *metadata_buf);
+
+int atomisp_css_get_grid_info(struct atomisp_sub_device *asd,
+ enum ia_css_pipe_id pipe_id);
+
+int atomisp_alloc_3a_output_buf(struct atomisp_sub_device *asd);
+
+int atomisp_alloc_dis_coef_buf(struct atomisp_sub_device *asd);
+
+int atomisp_alloc_metadata_output_buf(struct atomisp_sub_device *asd);
+
+void atomisp_free_metadata_output_buf(struct atomisp_sub_device *asd);
+
+void atomisp_css_temp_pipe_to_pipe_id(struct atomisp_sub_device *asd,
+ struct atomisp_css_event *current_event);
+
+int atomisp_css_isys_set_resolution(struct atomisp_sub_device *asd,
+ enum atomisp_input_stream_id stream_id,
+ struct v4l2_mbus_framefmt *ffmt,
+ int isys_stream);
+
+void atomisp_css_isys_set_link(struct atomisp_sub_device *asd,
+ enum atomisp_input_stream_id stream_id,
+ int link,
+ int isys_stream);
+
+void atomisp_css_isys_set_valid(struct atomisp_sub_device *asd,
+ enum atomisp_input_stream_id stream_id,
+ bool valid,
+ int isys_stream);
+
+void atomisp_css_isys_set_format(struct atomisp_sub_device *asd,
+ enum atomisp_input_stream_id stream_id,
+ enum atomisp_input_format format,
+ int isys_stream);
+
+int atomisp_css_set_default_isys_config(struct atomisp_sub_device *asd,
+ enum atomisp_input_stream_id stream_id,
+ struct v4l2_mbus_framefmt *ffmt);
+
+void atomisp_css_isys_two_stream_cfg_update_stream1(
+ struct atomisp_sub_device *asd,
+ enum atomisp_input_stream_id stream_id,
+ enum atomisp_input_format input_format,
+ unsigned int width, unsigned int height);
+
+void atomisp_css_isys_two_stream_cfg_update_stream2(
+ struct atomisp_sub_device *asd,
+ enum atomisp_input_stream_id stream_id,
+ enum atomisp_input_format input_format,
+ unsigned int width, unsigned int height);
+
+int atomisp_css_input_set_resolution(struct atomisp_sub_device *asd,
+ enum atomisp_input_stream_id stream_id,
+ struct v4l2_mbus_framefmt *ffmt);
+
+void atomisp_css_input_set_binning_factor(struct atomisp_sub_device *asd,
+ enum atomisp_input_stream_id stream_id,
+ unsigned int bin_factor);
+
+void atomisp_css_input_set_bayer_order(struct atomisp_sub_device *asd,
+ enum atomisp_input_stream_id stream_id,
+ enum ia_css_bayer_order bayer_order);
+
+void atomisp_css_input_set_format(struct atomisp_sub_device *asd,
+ enum atomisp_input_stream_id stream_id,
+ enum atomisp_input_format format);
+
+int atomisp_css_input_set_effective_resolution(
+ struct atomisp_sub_device *asd,
+ enum atomisp_input_stream_id stream_id,
+ unsigned int width,
+ unsigned int height);
+
+void atomisp_css_video_set_dis_envelope(struct atomisp_sub_device *asd,
+ unsigned int dvs_w, unsigned int dvs_h);
+
+void atomisp_css_input_set_two_pixels_per_clock(
+ struct atomisp_sub_device *asd,
+ bool two_ppc);
+
+void atomisp_css_enable_dz(struct atomisp_sub_device *asd, bool enable);
+
+void atomisp_css_capture_set_mode(struct atomisp_sub_device *asd,
+ enum ia_css_capture_mode mode);
+
+void atomisp_css_input_set_mode(struct atomisp_sub_device *asd,
+ enum ia_css_input_mode mode);
+
+void atomisp_css_capture_enable_online(struct atomisp_sub_device *asd,
+ unsigned short stream_index, bool enable);
+
+void atomisp_css_preview_enable_online(struct atomisp_sub_device *asd,
+ unsigned short stream_index, bool enable);
+
+int atomisp_css_input_configure_port(struct atomisp_sub_device *asd,
+ enum mipi_port_id port,
+ unsigned int num_lanes,
+ unsigned int timeout,
+ unsigned int mipi_freq,
+ enum atomisp_input_format metadata_format,
+ unsigned int metadata_width,
+ unsigned int metadata_height);
+
+int atomisp_create_pipes_stream(struct atomisp_sub_device *asd);
+void atomisp_destroy_pipes_stream(struct atomisp_sub_device *asd);
+
+void atomisp_css_stop(struct atomisp_sub_device *asd, bool in_reset);
+
+void atomisp_css_continuous_set_num_raw_frames(
+ struct atomisp_sub_device *asd,
+ int num_frames);
+
+int atomisp_css_copy_configure_output(struct atomisp_sub_device *asd,
+ unsigned int stream_index,
+ unsigned int width, unsigned int height,
+ unsigned int padded_width,
+ enum ia_css_frame_format format);
+
+int atomisp_css_preview_configure_output(struct atomisp_sub_device *asd,
+ unsigned int width, unsigned int height,
+ unsigned int min_width,
+ enum ia_css_frame_format format);
+
+int atomisp_css_capture_configure_output(struct atomisp_sub_device *asd,
+ unsigned int width, unsigned int height,
+ unsigned int min_width,
+ enum ia_css_frame_format format);
+
+int atomisp_css_video_configure_output(struct atomisp_sub_device *asd,
+ unsigned int width, unsigned int height,
+ unsigned int min_width,
+ enum ia_css_frame_format format);
+
+int atomisp_get_css_frame_info(struct atomisp_sub_device *asd,
+ struct ia_css_frame_info *frame_info);
+
+int atomisp_css_video_configure_viewfinder(struct atomisp_sub_device *asd,
+ unsigned int width, unsigned int height,
+ unsigned int min_width,
+ enum ia_css_frame_format format);
+
+int atomisp_css_capture_configure_viewfinder(
+ struct atomisp_sub_device *asd,
+ unsigned int width, unsigned int height,
+ unsigned int min_width,
+ enum ia_css_frame_format format);
+
+int atomisp_css_video_get_viewfinder_frame_info(
+ struct atomisp_sub_device *asd,
+ struct ia_css_frame_info *info);
+
+int atomisp_css_capture_get_viewfinder_frame_info(
+ struct atomisp_sub_device *asd,
+ struct ia_css_frame_info *info);
+
+int atomisp_css_copy_get_output_frame_info(
+ struct atomisp_sub_device *asd,
+ unsigned int stream_index,
+ struct ia_css_frame_info *info);
+
+int atomisp_css_preview_get_output_frame_info(
+ struct atomisp_sub_device *asd,
+ struct ia_css_frame_info *info);
+
+int atomisp_css_capture_get_output_frame_info(
+ struct atomisp_sub_device *asd,
+ struct ia_css_frame_info *info);
+
+int atomisp_css_video_get_output_frame_info(
+ struct atomisp_sub_device *asd,
+ struct ia_css_frame_info *info);
+
+int atomisp_css_preview_configure_pp_input(
+ struct atomisp_sub_device *asd,
+ unsigned int width, unsigned int height);
+
+int atomisp_css_capture_configure_pp_input(
+ struct atomisp_sub_device *asd,
+ unsigned int width, unsigned int height);
+
+int atomisp_css_video_configure_pp_input(
+ struct atomisp_sub_device *asd,
+ unsigned int width, unsigned int height);
+
+int atomisp_css_offline_capture_configure(struct atomisp_sub_device *asd,
+ int num_captures, unsigned int skip, int offset);
+int atomisp_css_exp_id_capture(struct atomisp_sub_device *asd, int exp_id);
+int atomisp_css_exp_id_unlock(struct atomisp_sub_device *asd, int exp_id);
+
+int atomisp_css_capture_enable_xnr(struct atomisp_sub_device *asd,
+ bool enable);
+
+void atomisp_css_set_ctc_table(struct atomisp_sub_device *asd,
+ struct ia_css_ctc_table *ctc_table);
+
+void atomisp_css_video_set_dis_vector(struct atomisp_sub_device *asd,
+ struct atomisp_dis_vector *vector);
+
+void atomisp_css_set_dvs2_coefs(struct atomisp_sub_device *asd,
+ struct ia_css_dvs2_coefficients *coefs);
+
+int atomisp_css_set_dis_coefs(struct atomisp_sub_device *asd,
+ struct atomisp_dis_coefficients *coefs);
+
+void atomisp_css_set_zoom_factor(struct atomisp_sub_device *asd,
+ unsigned int zoom);
+
+int atomisp_css_get_wb_config(struct atomisp_sub_device *asd,
+ struct atomisp_wb_config *config);
+
+int atomisp_css_get_ob_config(struct atomisp_sub_device *asd,
+ struct atomisp_ob_config *config);
+
+int atomisp_css_get_dp_config(struct atomisp_sub_device *asd,
+ struct atomisp_dp_config *config);
+
+int atomisp_css_get_de_config(struct atomisp_sub_device *asd,
+ struct atomisp_de_config *config);
+
+int atomisp_css_get_nr_config(struct atomisp_sub_device *asd,
+ struct atomisp_nr_config *config);
+
+int atomisp_css_get_ee_config(struct atomisp_sub_device *asd,
+ struct atomisp_ee_config *config);
+
+int atomisp_css_get_tnr_config(struct atomisp_sub_device *asd,
+ struct atomisp_tnr_config *config);
+
+int atomisp_css_get_ctc_table(struct atomisp_sub_device *asd,
+ struct atomisp_ctc_table *config);
+
+int atomisp_css_get_gamma_table(struct atomisp_sub_device *asd,
+ struct atomisp_gamma_table *config);
+
+int atomisp_css_get_gc_config(struct atomisp_sub_device *asd,
+ struct atomisp_gc_config *config);
+
+int atomisp_css_get_3a_config(struct atomisp_sub_device *asd,
+ struct atomisp_3a_config *config);
+
+int atomisp_css_get_formats_config(struct atomisp_sub_device *asd,
+ struct atomisp_formats_config *formats_config);
+
+void atomisp_css_set_formats_config(struct atomisp_sub_device *asd,
+ struct ia_css_formats_config *formats_config);
+
+int atomisp_css_get_zoom_factor(struct atomisp_sub_device *asd,
+ unsigned int *zoom);
+
+struct ia_css_shading_table *atomisp_css_shading_table_alloc(
+ unsigned int width, unsigned int height);
+
+void atomisp_css_set_shading_table(struct atomisp_sub_device *asd,
+ struct ia_css_shading_table *table);
+
+void atomisp_css_shading_table_free(struct ia_css_shading_table *table);
+
+struct ia_css_morph_table *atomisp_css_morph_table_allocate(
+ unsigned int width, unsigned int height);
+
+void atomisp_css_set_morph_table(struct atomisp_sub_device *asd,
+ struct ia_css_morph_table *table);
+
+void atomisp_css_get_morph_table(struct atomisp_sub_device *asd,
+ struct ia_css_morph_table *table);
+
+void atomisp_css_morph_table_free(struct ia_css_morph_table *table);
+
+int atomisp_css_get_dis_stat(struct atomisp_sub_device *asd,
+ struct atomisp_dis_statistics *stats);
+
+int atomisp_css_update_stream(struct atomisp_sub_device *asd);
+
+int atomisp_css_isr_thread(struct atomisp_device *isp);
+
+bool atomisp_css_valid_sof(struct atomisp_device *isp);
+
+void atomisp_en_dz_capt_pipe(struct atomisp_sub_device *asd, bool enable);
+
+#endif
diff --git a/drivers/staging/media/atomisp/pci/atomisp_compat_css20.c b/drivers/staging/media/atomisp/pci/atomisp_compat_css20.c
new file mode 100644
index 000000000000..be5f37f4a6fd
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp_compat_css20.c
@@ -0,0 +1,3397 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Support for Clovertrail PNW Camera Imaging ISP subsystem.
+ *
+ * Copyright (c) 2013 Intel Corporation. All Rights Reserved.
+ */
+
+#include <media/v4l2-dev.h>
+#include <media/v4l2-event.h>
+
+#include "mmu/isp_mmu.h"
+#include "mmu/sh_mmu_mrfld.h"
+#include "hmm/hmm_bo.h"
+#include "hmm/hmm.h"
+
+#include "atomisp_compat.h"
+#include "atomisp_internal.h"
+#include "atomisp_cmd.h"
+#include "atomisp-regs.h"
+#include "atomisp_fops.h"
+#include "atomisp_ioctl.h"
+
+#include "ia_css_debug.h"
+#include "ia_css_isp_param.h"
+#include "sh_css_hrt.h"
+#include "ia_css_isys.h"
+
+#include <linux/io.h>
+#include <linux/pm_runtime.h>
+
+/* Assume max number of ACC stages */
+#define MAX_ACC_STAGES 20
+
+/* Ideally, this should come from CSS headers */
+#define NO_LINK -1
+
+/*
+ * to serialize MMIO access , this is due to ISP2400 silicon issue Sighting
+ * #4684168, if concurrency access happened, system may hard hang.
+ */
+static DEFINE_SPINLOCK(mmio_lock);
+
+enum frame_info_type {
+ ATOMISP_CSS_VF_FRAME,
+ ATOMISP_CSS_SECOND_VF_FRAME,
+ ATOMISP_CSS_OUTPUT_FRAME,
+ ATOMISP_CSS_SECOND_OUTPUT_FRAME,
+ ATOMISP_CSS_RAW_FRAME,
+};
+
+struct bayer_ds_factor {
+ unsigned int numerator;
+ unsigned int denominator;
+};
+
+static void atomisp_css2_hw_store_8(hrt_address addr, uint8_t data)
+{
+ struct atomisp_device *isp = dev_get_drvdata(atomisp_dev);
+ unsigned long flags;
+
+ spin_lock_irqsave(&mmio_lock, flags);
+ writeb(data, isp->base + (addr & 0x003FFFFF));
+ spin_unlock_irqrestore(&mmio_lock, flags);
+}
+
+static void atomisp_css2_hw_store_16(hrt_address addr, uint16_t data)
+{
+ struct atomisp_device *isp = dev_get_drvdata(atomisp_dev);
+ unsigned long flags;
+
+ spin_lock_irqsave(&mmio_lock, flags);
+ writew(data, isp->base + (addr & 0x003FFFFF));
+ spin_unlock_irqrestore(&mmio_lock, flags);
+}
+
+void atomisp_css2_hw_store_32(hrt_address addr, uint32_t data)
+{
+ struct atomisp_device *isp = dev_get_drvdata(atomisp_dev);
+ unsigned long flags;
+
+ spin_lock_irqsave(&mmio_lock, flags);
+ writel(data, isp->base + (addr & 0x003FFFFF));
+ spin_unlock_irqrestore(&mmio_lock, flags);
+}
+
+static uint8_t atomisp_css2_hw_load_8(hrt_address addr)
+{
+ struct atomisp_device *isp = dev_get_drvdata(atomisp_dev);
+ unsigned long flags;
+ u8 ret;
+
+ spin_lock_irqsave(&mmio_lock, flags);
+ ret = readb(isp->base + (addr & 0x003FFFFF));
+ spin_unlock_irqrestore(&mmio_lock, flags);
+ return ret;
+}
+
+static uint16_t atomisp_css2_hw_load_16(hrt_address addr)
+{
+ struct atomisp_device *isp = dev_get_drvdata(atomisp_dev);
+ unsigned long flags;
+ u16 ret;
+
+ spin_lock_irqsave(&mmio_lock, flags);
+ ret = readw(isp->base + (addr & 0x003FFFFF));
+ spin_unlock_irqrestore(&mmio_lock, flags);
+ return ret;
+}
+
+static uint32_t atomisp_css2_hw_load_32(hrt_address addr)
+{
+ struct atomisp_device *isp = dev_get_drvdata(atomisp_dev);
+ unsigned long flags;
+ u32 ret;
+
+ spin_lock_irqsave(&mmio_lock, flags);
+ ret = readl(isp->base + (addr & 0x003FFFFF));
+ spin_unlock_irqrestore(&mmio_lock, flags);
+ return ret;
+}
+
+static void atomisp_css2_hw_store(hrt_address addr, const void *from, uint32_t n)
+{
+ struct atomisp_device *isp = dev_get_drvdata(atomisp_dev);
+ unsigned long flags;
+ unsigned int i;
+
+ addr &= 0x003FFFFF;
+ spin_lock_irqsave(&mmio_lock, flags);
+ for (i = 0; i < n; i++, from++)
+ writeb(*(s8 *)from, isp->base + addr + i);
+
+ spin_unlock_irqrestore(&mmio_lock, flags);
+}
+
+static void atomisp_css2_hw_load(hrt_address addr, void *to, uint32_t n)
+{
+ struct atomisp_device *isp = dev_get_drvdata(atomisp_dev);
+ unsigned long flags;
+ unsigned int i;
+
+ addr &= 0x003FFFFF;
+ spin_lock_irqsave(&mmio_lock, flags);
+ for (i = 0; i < n; i++, to++)
+ *(s8 *)to = readb(isp->base + addr + i);
+ spin_unlock_irqrestore(&mmio_lock, flags);
+}
+
+static int __printf(1, 0) atomisp_vprintk(const char *fmt, va_list args)
+{
+ vprintk(fmt, args);
+ return 0;
+}
+
+void atomisp_load_uint32(hrt_address addr, uint32_t *data)
+{
+ *data = atomisp_css2_hw_load_32(addr);
+}
+
+static int hmm_get_mmu_base_addr(struct device *dev, unsigned int *mmu_base_addr)
+{
+ if (!sh_mmu_mrfld.get_pd_base) {
+ dev_err(dev, "get mmu base address failed.\n");
+ return -EINVAL;
+ }
+
+ *mmu_base_addr = sh_mmu_mrfld.get_pd_base(&bo_device.mmu,
+ bo_device.mmu.base_address);
+ return 0;
+}
+
+static void __dump_pipe_config(struct atomisp_sub_device *asd,
+ struct atomisp_stream_env *stream_env,
+ unsigned int pipe_id)
+{
+ struct atomisp_device *isp = asd->isp;
+
+ if (stream_env->pipes[pipe_id]) {
+ struct ia_css_pipe_config *p_config;
+ struct ia_css_pipe_extra_config *pe_config;
+
+ p_config = &stream_env->pipe_configs[pipe_id];
+ pe_config = &stream_env->pipe_extra_configs[pipe_id];
+ dev_dbg(isp->dev, "dumping pipe[%d] config:\n", pipe_id);
+ dev_dbg(isp->dev,
+ "pipe_config.pipe_mode:%d.\n", p_config->mode);
+ dev_dbg(isp->dev,
+ "pipe_config.output_info[0] w=%d, h=%d.\n",
+ p_config->output_info[0].res.width,
+ p_config->output_info[0].res.height);
+ dev_dbg(isp->dev,
+ "pipe_config.vf_pp_in_res w=%d, h=%d.\n",
+ p_config->vf_pp_in_res.width,
+ p_config->vf_pp_in_res.height);
+ dev_dbg(isp->dev,
+ "pipe_config.capt_pp_in_res w=%d, h=%d.\n",
+ p_config->capt_pp_in_res.width,
+ p_config->capt_pp_in_res.height);
+ dev_dbg(isp->dev,
+ "pipe_config.output.padded w=%d.\n",
+ p_config->output_info[0].padded_width);
+ dev_dbg(isp->dev,
+ "pipe_config.vf_output_info[0] w=%d, h=%d.\n",
+ p_config->vf_output_info[0].res.width,
+ p_config->vf_output_info[0].res.height);
+ dev_dbg(isp->dev,
+ "pipe_config.bayer_ds_out_res w=%d, h=%d.\n",
+ p_config->bayer_ds_out_res.width,
+ p_config->bayer_ds_out_res.height);
+ dev_dbg(isp->dev,
+ "pipe_config.envelope w=%d, h=%d.\n",
+ p_config->dvs_envelope.width,
+ p_config->dvs_envelope.height);
+ dev_dbg(isp->dev,
+ "pipe_config.dvs_frame_delay=%d.\n",
+ p_config->dvs_frame_delay);
+ dev_dbg(isp->dev,
+ "pipe_config.isp_pipe_version:%d.\n",
+ p_config->isp_pipe_version);
+ dev_dbg(isp->dev,
+ "pipe_config.default_capture_config.capture_mode=%d.\n",
+ p_config->default_capture_config.mode);
+ dev_dbg(isp->dev,
+ "pipe_config.enable_dz=%d.\n",
+ p_config->enable_dz);
+ dev_dbg(isp->dev,
+ "pipe_config.default_capture_config.enable_xnr=%d.\n",
+ p_config->default_capture_config.enable_xnr);
+ dev_dbg(isp->dev,
+ "dumping pipe[%d] extra config:\n", pipe_id);
+ dev_dbg(isp->dev,
+ "pipe_extra_config.enable_raw_binning:%d.\n",
+ pe_config->enable_raw_binning);
+ dev_dbg(isp->dev,
+ "pipe_extra_config.enable_yuv_ds:%d.\n",
+ pe_config->enable_yuv_ds);
+ dev_dbg(isp->dev,
+ "pipe_extra_config.enable_high_speed:%d.\n",
+ pe_config->enable_high_speed);
+ dev_dbg(isp->dev,
+ "pipe_extra_config.enable_dvs_6axis:%d.\n",
+ pe_config->enable_dvs_6axis);
+ dev_dbg(isp->dev,
+ "pipe_extra_config.enable_reduced_pipe:%d.\n",
+ pe_config->enable_reduced_pipe);
+ dev_dbg(isp->dev,
+ "pipe_(extra_)config.enable_dz:%d.\n",
+ p_config->enable_dz);
+ dev_dbg(isp->dev,
+ "pipe_extra_config.disable_vf_pp:%d.\n",
+ pe_config->disable_vf_pp);
+ }
+}
+
+static void __dump_stream_config(struct atomisp_sub_device *asd,
+ struct atomisp_stream_env *stream_env)
+{
+ struct atomisp_device *isp = asd->isp;
+ struct ia_css_stream_config *s_config;
+ int j;
+ bool valid_stream = false;
+
+ for (j = 0; j < IA_CSS_PIPE_ID_NUM; j++) {
+ if (stream_env->pipes[j]) {
+ __dump_pipe_config(asd, stream_env, j);
+ valid_stream = true;
+ }
+ }
+ if (!valid_stream)
+ return;
+ s_config = &stream_env->stream_config;
+ dev_dbg(isp->dev, "stream_config.mode=%d.\n", s_config->mode);
+
+ if (s_config->mode == IA_CSS_INPUT_MODE_SENSOR ||
+ s_config->mode == IA_CSS_INPUT_MODE_BUFFERED_SENSOR) {
+ dev_dbg(isp->dev, "stream_config.source.port.port=%d.\n",
+ s_config->source.port.port);
+ dev_dbg(isp->dev, "stream_config.source.port.num_lanes=%d.\n",
+ s_config->source.port.num_lanes);
+ dev_dbg(isp->dev, "stream_config.source.port.timeout=%d.\n",
+ s_config->source.port.timeout);
+ dev_dbg(isp->dev, "stream_config.source.port.rxcount=0x%x.\n",
+ s_config->source.port.rxcount);
+ dev_dbg(isp->dev, "stream_config.source.port.compression.type=%d.\n",
+ s_config->source.port.compression.type);
+ dev_dbg(isp->dev,
+ "stream_config.source.port.compression.compressed_bits_per_pixel=%d.\n",
+ s_config->source.port.compression.
+ compressed_bits_per_pixel);
+ dev_dbg(isp->dev,
+ "stream_config.source.port.compression.uncompressed_bits_per_pixel=%d.\n",
+ s_config->source.port.compression.
+ uncompressed_bits_per_pixel);
+ } else if (s_config->mode == IA_CSS_INPUT_MODE_PRBS) {
+ dev_dbg(isp->dev, "stream_config.source.prbs.id=%d.\n",
+ s_config->source.prbs.id);
+ dev_dbg(isp->dev, "stream_config.source.prbs.h_blank=%d.\n",
+ s_config->source.prbs.h_blank);
+ dev_dbg(isp->dev, "stream_config.source.prbs.v_blank=%d.\n",
+ s_config->source.prbs.v_blank);
+ dev_dbg(isp->dev, "stream_config.source.prbs.seed=%d.\n",
+ s_config->source.prbs.seed);
+ dev_dbg(isp->dev, "stream_config.source.prbs.seed1=%d.\n",
+ s_config->source.prbs.seed1);
+ }
+
+ for (j = 0; j < IA_CSS_STREAM_MAX_ISYS_STREAM_PER_CH; j++) {
+ dev_dbg(isp->dev, "stream_configisys_config[%d].input_res w=%d, h=%d.\n",
+ j,
+ s_config->isys_config[j].input_res.width,
+ s_config->isys_config[j].input_res.height);
+
+ dev_dbg(isp->dev, "stream_configisys_config[%d].linked_isys_stream_id=%d\n",
+ j,
+ s_config->isys_config[j].linked_isys_stream_id);
+
+ dev_dbg(isp->dev, "stream_configisys_config[%d].format=%d\n",
+ j,
+ s_config->isys_config[j].format);
+
+ dev_dbg(isp->dev, "stream_configisys_config[%d].valid=%d.\n",
+ j,
+ s_config->isys_config[j].valid);
+ }
+
+ dev_dbg(isp->dev, "stream_config.input_config.input_res w=%d, h=%d.\n",
+ s_config->input_config.input_res.width,
+ s_config->input_config.input_res.height);
+
+ dev_dbg(isp->dev, "stream_config.input_config.effective_res w=%d, h=%d.\n",
+ s_config->input_config.effective_res.width,
+ s_config->input_config.effective_res.height);
+
+ dev_dbg(isp->dev, "stream_config.input_config.format=%d\n",
+ s_config->input_config.format);
+
+ dev_dbg(isp->dev, "stream_config.input_config.bayer_order=%d.\n",
+ s_config->input_config.bayer_order);
+
+ dev_dbg(isp->dev, "stream_config.pixels_per_clock=%d.\n",
+ s_config->pixels_per_clock);
+ dev_dbg(isp->dev, "stream_config.online=%d.\n", s_config->online);
+ dev_dbg(isp->dev, "stream_config.continuous=%d.\n",
+ s_config->continuous);
+ dev_dbg(isp->dev, "stream_config.disable_cont_viewfinder=%d.\n",
+ s_config->disable_cont_viewfinder);
+ dev_dbg(isp->dev, "stream_config.channel_id=%d.\n",
+ s_config->channel_id);
+ dev_dbg(isp->dev, "stream_config.init_num_cont_raw_buf=%d.\n",
+ s_config->init_num_cont_raw_buf);
+ dev_dbg(isp->dev, "stream_config.target_num_cont_raw_buf=%d.\n",
+ s_config->target_num_cont_raw_buf);
+ dev_dbg(isp->dev, "stream_config.left_padding=%d.\n",
+ s_config->left_padding);
+ dev_dbg(isp->dev, "stream_config.sensor_binning_factor=%d.\n",
+ s_config->sensor_binning_factor);
+ dev_dbg(isp->dev, "stream_config.pixels_per_clock=%d.\n",
+ s_config->pixels_per_clock);
+ dev_dbg(isp->dev, "stream_config.pack_raw_pixels=%d.\n",
+ s_config->pack_raw_pixels);
+ dev_dbg(isp->dev, "stream_config.flash_gpio_pin=%d.\n",
+ s_config->flash_gpio_pin);
+ dev_dbg(isp->dev, "stream_config.mipi_buffer_config.size_mem_words=%d.\n",
+ s_config->mipi_buffer_config.size_mem_words);
+ dev_dbg(isp->dev, "stream_config.mipi_buffer_config.contiguous=%d.\n",
+ s_config->mipi_buffer_config.contiguous);
+ dev_dbg(isp->dev, "stream_config.metadata_config.data_type=%d.\n",
+ s_config->metadata_config.data_type);
+ dev_dbg(isp->dev, "stream_config.metadata_config.resolution w=%d, h=%d.\n",
+ s_config->metadata_config.resolution.width,
+ s_config->metadata_config.resolution.height);
+}
+
+static int __destroy_stream(struct atomisp_sub_device *asd,
+ struct atomisp_stream_env *stream_env)
+{
+ struct atomisp_device *isp = asd->isp;
+ unsigned long timeout;
+
+ if (!stream_env->stream)
+ return 0;
+
+ if (stream_env->stream_state == CSS_STREAM_STARTED
+ && ia_css_stream_stop(stream_env->stream) != 0) {
+ dev_err(isp->dev, "stop stream failed.\n");
+ return -EINVAL;
+ }
+
+ if (stream_env->stream_state == CSS_STREAM_STARTED) {
+ timeout = jiffies + msecs_to_jiffies(200);
+ while (1) {
+ if (ia_css_stream_has_stopped(stream_env->stream))
+ break;
+
+ if (time_after(jiffies, timeout)) {
+ dev_warn(isp->dev, "stop stream timeout.\n");
+ break;
+ }
+
+ usleep_range(100, 200);
+ }
+ }
+
+ stream_env->stream_state = CSS_STREAM_STOPPED;
+
+ if (ia_css_stream_destroy(stream_env->stream)) {
+ dev_err(isp->dev, "destroy stream failed.\n");
+ return -EINVAL;
+ }
+ stream_env->stream_state = CSS_STREAM_UNINIT;
+ stream_env->stream = NULL;
+
+ return 0;
+}
+
+static int __destroy_streams(struct atomisp_sub_device *asd)
+{
+ int ret, i;
+
+ for (i = 0; i < ATOMISP_INPUT_STREAM_NUM; i++) {
+ ret = __destroy_stream(asd, &asd->stream_env[i]);
+ if (ret)
+ return ret;
+ }
+ asd->stream_prepared = false;
+ return 0;
+}
+
+static int __create_stream(struct atomisp_sub_device *asd,
+ struct atomisp_stream_env *stream_env)
+{
+ int pipe_index = 0, i;
+ struct ia_css_pipe *multi_pipes[IA_CSS_PIPE_ID_NUM];
+
+ for (i = 0; i < IA_CSS_PIPE_ID_NUM; i++) {
+ if (stream_env->pipes[i])
+ multi_pipes[pipe_index++] = stream_env->pipes[i];
+ }
+ if (pipe_index == 0)
+ return 0;
+
+ stream_env->stream_config.target_num_cont_raw_buf =
+ asd->continuous_raw_buffer_size->val;
+ stream_env->stream_config.channel_id = stream_env->ch_id;
+ stream_env->stream_config.ia_css_enable_raw_buffer_locking =
+ asd->enable_raw_buffer_lock->val;
+
+ __dump_stream_config(asd, stream_env);
+ if (ia_css_stream_create(&stream_env->stream_config,
+ pipe_index, multi_pipes, &stream_env->stream) != 0)
+ return -EINVAL;
+ if (ia_css_stream_get_info(stream_env->stream,
+ &stream_env->stream_info) != 0) {
+ ia_css_stream_destroy(stream_env->stream);
+ stream_env->stream = NULL;
+ return -EINVAL;
+ }
+
+ stream_env->stream_state = CSS_STREAM_CREATED;
+ return 0;
+}
+
+static int __create_streams(struct atomisp_sub_device *asd)
+{
+ int ret, i;
+
+ for (i = 0; i < ATOMISP_INPUT_STREAM_NUM; i++) {
+ ret = __create_stream(asd, &asd->stream_env[i]);
+ if (ret)
+ goto rollback;
+ }
+ asd->stream_prepared = true;
+ return 0;
+rollback:
+ for (i--; i >= 0; i--)
+ __destroy_stream(asd, &asd->stream_env[i]);
+ return ret;
+}
+
+static int __destroy_stream_pipes(struct atomisp_sub_device *asd,
+ struct atomisp_stream_env *stream_env)
+{
+ struct atomisp_device *isp = asd->isp;
+ int ret = 0;
+ int i;
+
+ for (i = 0; i < IA_CSS_PIPE_ID_NUM; i++) {
+ if (!stream_env->pipes[i])
+ continue;
+ if (ia_css_pipe_destroy(stream_env->pipes[i])
+ != 0) {
+ dev_err(isp->dev,
+ "destroy pipe[%d]failed.cannot recover.\n", i);
+ ret = -EINVAL;
+ }
+ stream_env->pipes[i] = NULL;
+ stream_env->update_pipe[i] = false;
+ }
+ return ret;
+}
+
+static int __destroy_pipes(struct atomisp_sub_device *asd)
+{
+ struct atomisp_device *isp = asd->isp;
+ int i;
+ int ret = 0;
+
+ for (i = 0; i < ATOMISP_INPUT_STREAM_NUM; i++) {
+ if (asd->stream_env[i].stream) {
+ dev_err(isp->dev,
+ "cannot destroy css pipes for stream[%d].\n",
+ i);
+ continue;
+ }
+
+ ret = __destroy_stream_pipes(asd, &asd->stream_env[i]);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+void atomisp_destroy_pipes_stream(struct atomisp_sub_device *asd)
+{
+ if (__destroy_streams(asd))
+ dev_warn(asd->isp->dev, "destroy stream failed.\n");
+
+ if (__destroy_pipes(asd))
+ dev_warn(asd->isp->dev, "destroy pipe failed.\n");
+}
+
+static void __apply_additional_pipe_config(
+ struct atomisp_sub_device *asd,
+ struct atomisp_stream_env *stream_env,
+ enum ia_css_pipe_id pipe_id)
+{
+ struct atomisp_device *isp = asd->isp;
+
+ if (pipe_id < 0 || pipe_id >= IA_CSS_PIPE_ID_NUM) {
+ dev_err(isp->dev,
+ "wrong pipe_id for additional pipe config.\n");
+ return;
+ }
+
+ /* apply default pipe config */
+ stream_env->pipe_configs[pipe_id].isp_pipe_version = 2;
+ stream_env->pipe_configs[pipe_id].enable_dz =
+ asd->disable_dz->val ? false : true;
+ /* apply isp 2.2 specific config for baytrail*/
+ switch (pipe_id) {
+ case IA_CSS_PIPE_ID_CAPTURE:
+ /* enable capture pp/dz manually or digital zoom would
+ * fail*/
+ if (stream_env->pipe_configs[pipe_id].
+ default_capture_config.mode == IA_CSS_CAPTURE_MODE_RAW)
+ stream_env->pipe_configs[pipe_id].enable_dz = false;
+ break;
+ case IA_CSS_PIPE_ID_VIDEO:
+ /* enable reduced pipe to have binary
+ * video_dz_2_min selected*/
+ stream_env->pipe_extra_configs[pipe_id]
+ .enable_reduced_pipe = true;
+ stream_env->pipe_configs[pipe_id]
+ .enable_dz = false;
+
+ if (asd->params.video_dis_en) {
+ stream_env->pipe_extra_configs[pipe_id]
+ .enable_dvs_6axis = true;
+ stream_env->pipe_configs[pipe_id]
+ .dvs_frame_delay =
+ ATOMISP_CSS2_NUM_DVS_FRAME_DELAY;
+ }
+ break;
+ case IA_CSS_PIPE_ID_PREVIEW:
+ break;
+ case IA_CSS_PIPE_ID_YUVPP:
+ case IA_CSS_PIPE_ID_COPY:
+ stream_env->pipe_configs[pipe_id].enable_dz = false;
+ break;
+ default:
+ break;
+ }
+}
+
+static bool is_pipe_valid_to_current_run_mode(struct atomisp_sub_device *asd,
+ enum ia_css_pipe_id pipe_id)
+{
+ if (pipe_id == IA_CSS_PIPE_ID_YUVPP)
+ return true;
+
+ if (asd->vfpp) {
+ if (asd->vfpp->val == ATOMISP_VFPP_DISABLE_SCALER) {
+ if (pipe_id == IA_CSS_PIPE_ID_VIDEO)
+ return true;
+ else
+ return false;
+ } else if (asd->vfpp->val == ATOMISP_VFPP_DISABLE_LOWLAT) {
+ if (pipe_id == IA_CSS_PIPE_ID_CAPTURE)
+ return true;
+ else
+ return false;
+ }
+ }
+
+ if (!asd->run_mode)
+ return false;
+
+ if (asd->copy_mode && pipe_id == IA_CSS_PIPE_ID_COPY)
+ return true;
+
+ switch (asd->run_mode->val) {
+ case ATOMISP_RUN_MODE_STILL_CAPTURE:
+ if (pipe_id == IA_CSS_PIPE_ID_CAPTURE)
+ return true;
+
+ return false;
+ case ATOMISP_RUN_MODE_PREVIEW:
+ if (pipe_id == IA_CSS_PIPE_ID_PREVIEW)
+ return true;
+
+ return false;
+ case ATOMISP_RUN_MODE_VIDEO:
+ if (pipe_id == IA_CSS_PIPE_ID_VIDEO || pipe_id == IA_CSS_PIPE_ID_YUVPP)
+ return true;
+
+ return false;
+ }
+
+ return false;
+}
+
+static int __create_pipe(struct atomisp_sub_device *asd,
+ struct atomisp_stream_env *stream_env,
+ enum ia_css_pipe_id pipe_id)
+{
+ struct atomisp_device *isp = asd->isp;
+ struct ia_css_pipe_extra_config extra_config;
+ int ret;
+
+ if (pipe_id >= IA_CSS_PIPE_ID_NUM)
+ return -EINVAL;
+
+ if (!stream_env->pipe_configs[pipe_id].output_info[0].res.width)
+ return 0;
+
+ if (!is_pipe_valid_to_current_run_mode(asd, pipe_id))
+ return 0;
+
+ ia_css_pipe_extra_config_defaults(&extra_config);
+
+ __apply_additional_pipe_config(asd, stream_env, pipe_id);
+ if (!memcmp(&extra_config,
+ &stream_env->pipe_extra_configs[pipe_id],
+ sizeof(extra_config)))
+ ret = ia_css_pipe_create(
+ &stream_env->pipe_configs[pipe_id],
+ &stream_env->pipes[pipe_id]);
+ else
+ ret = ia_css_pipe_create_extra(
+ &stream_env->pipe_configs[pipe_id],
+ &stream_env->pipe_extra_configs[pipe_id],
+ &stream_env->pipes[pipe_id]);
+ if (ret)
+ dev_err(isp->dev, "create pipe[%d] error.\n", pipe_id);
+ return ret;
+}
+
+static int __create_pipes(struct atomisp_sub_device *asd)
+{
+ int ret;
+ int i, j;
+
+ for (i = 0; i < ATOMISP_INPUT_STREAM_NUM; i++) {
+ for (j = 0; j < IA_CSS_PIPE_ID_NUM; j++) {
+ ret = __create_pipe(asd, &asd->stream_env[i], j);
+ if (ret)
+ break;
+ }
+ if (j < IA_CSS_PIPE_ID_NUM)
+ goto pipe_err;
+ }
+ return 0;
+pipe_err:
+ for (; i >= 0; i--) {
+ for (j--; j >= 0; j--) {
+ if (asd->stream_env[i].pipes[j]) {
+ ia_css_pipe_destroy(asd->stream_env[i].pipes[j]);
+ asd->stream_env[i].pipes[j] = NULL;
+ }
+ }
+ j = IA_CSS_PIPE_ID_NUM;
+ }
+ return -EINVAL;
+}
+
+int atomisp_create_pipes_stream(struct atomisp_sub_device *asd)
+{
+ int ret;
+
+ ret = __create_pipes(asd);
+ if (ret) {
+ dev_err(asd->isp->dev, "create pipe failed %d.\n", ret);
+ return ret;
+ }
+
+ ret = __create_streams(asd);
+ if (ret) {
+ dev_warn(asd->isp->dev, "create stream failed %d.\n", ret);
+ __destroy_pipes(asd);
+ return ret;
+ }
+
+ return 0;
+}
+
+int atomisp_css_update_stream(struct atomisp_sub_device *asd)
+{
+ atomisp_destroy_pipes_stream(asd);
+ return atomisp_create_pipes_stream(asd);
+}
+
+int atomisp_css_init(struct atomisp_device *isp)
+{
+ unsigned int mmu_base_addr;
+ int ret;
+ int err;
+
+ ret = hmm_get_mmu_base_addr(isp->dev, &mmu_base_addr);
+ if (ret)
+ return ret;
+
+ /* Init ISP */
+ err = ia_css_init(isp->dev, &isp->css_env.isp_css_env,
+ (uint32_t)mmu_base_addr, IA_CSS_IRQ_TYPE_PULSE);
+ if (err) {
+ dev_err(isp->dev, "css init failed --- bad firmware?\n");
+ return -EINVAL;
+ }
+ ia_css_enable_isys_event_queue(true);
+
+ isp->css_initialized = true;
+ dev_dbg(isp->dev, "sh_css_init success\n");
+
+ return 0;
+}
+
+static inline int __set_css_print_env(struct atomisp_device *isp, int opt)
+{
+ int ret = 0;
+
+ if (opt == 0)
+ isp->css_env.isp_css_env.print_env.debug_print = NULL;
+ else if (opt == 1)
+ isp->css_env.isp_css_env.print_env.debug_print = atomisp_vprintk;
+ else
+ ret = -EINVAL;
+
+ return ret;
+}
+
+int atomisp_css_load_firmware(struct atomisp_device *isp)
+{
+ int err;
+
+ /* set css env */
+ isp->css_env.isp_css_fw.data = (void *)isp->firmware->data;
+ isp->css_env.isp_css_fw.bytes = isp->firmware->size;
+
+ isp->css_env.isp_css_env.hw_access_env.store_8 =
+ atomisp_css2_hw_store_8;
+ isp->css_env.isp_css_env.hw_access_env.store_16 =
+ atomisp_css2_hw_store_16;
+ isp->css_env.isp_css_env.hw_access_env.store_32 =
+ atomisp_css2_hw_store_32;
+
+ isp->css_env.isp_css_env.hw_access_env.load_8 = atomisp_css2_hw_load_8;
+ isp->css_env.isp_css_env.hw_access_env.load_16 =
+ atomisp_css2_hw_load_16;
+ isp->css_env.isp_css_env.hw_access_env.load_32 =
+ atomisp_css2_hw_load_32;
+
+ isp->css_env.isp_css_env.hw_access_env.load = atomisp_css2_hw_load;
+ isp->css_env.isp_css_env.hw_access_env.store = atomisp_css2_hw_store;
+
+ __set_css_print_env(isp, dbg_func);
+
+ isp->css_env.isp_css_env.print_env.error_print = atomisp_vprintk;
+
+ /* load isp fw into ISP memory */
+ err = ia_css_load_firmware(isp->dev, &isp->css_env.isp_css_env,
+ &isp->css_env.isp_css_fw);
+ if (err) {
+ dev_err(isp->dev, "css load fw failed.\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+void atomisp_css_uninit(struct atomisp_device *isp)
+{
+ isp->css_initialized = false;
+ ia_css_uninit();
+}
+
+int atomisp_css_irq_translate(struct atomisp_device *isp,
+ unsigned int *infos)
+{
+ int err;
+
+ err = ia_css_irq_translate(infos);
+ if (err) {
+ dev_warn(isp->dev,
+ "%s:failed to translate irq (err = %d,infos = %d)\n",
+ __func__, err, *infos);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+void atomisp_css_rx_get_irq_info(enum mipi_port_id port,
+ unsigned int *infos)
+{
+ if (IS_ISP2401)
+ *infos = 0;
+ else
+ ia_css_isys_rx_get_irq_info(port, infos);
+}
+
+void atomisp_css_rx_clear_irq_info(enum mipi_port_id port,
+ unsigned int infos)
+{
+ if (!IS_ISP2401)
+ ia_css_isys_rx_clear_irq_info(port, infos);
+}
+
+int atomisp_css_irq_enable(struct atomisp_device *isp,
+ enum ia_css_irq_info info, bool enable)
+{
+ dev_dbg(isp->dev, "%s: css irq info 0x%08x: %s (%d).\n",
+ __func__, info,
+ enable ? "enable" : "disable", enable);
+ if (ia_css_irq_enable(info, enable)) {
+ dev_warn(isp->dev, "%s:Invalid irq info: 0x%08x when %s.\n",
+ __func__, info,
+ enable ? "enabling" : "disabling");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+void atomisp_css_init_struct(struct atomisp_sub_device *asd)
+{
+ int i, j;
+
+ for (i = 0; i < ATOMISP_INPUT_STREAM_NUM; i++) {
+ asd->stream_env[i].stream = NULL;
+ for (j = 0; j < IA_CSS_PIPE_MODE_NUM; j++) {
+ asd->stream_env[i].pipes[j] = NULL;
+ asd->stream_env[i].update_pipe[j] = false;
+ ia_css_pipe_config_defaults(
+ &asd->stream_env[i].pipe_configs[j]);
+ ia_css_pipe_extra_config_defaults(
+ &asd->stream_env[i].pipe_extra_configs[j]);
+ }
+ ia_css_stream_config_defaults(&asd->stream_env[i].stream_config);
+ }
+}
+
+int atomisp_q_video_buffer_to_css(struct atomisp_sub_device *asd,
+ struct ia_css_frame *frame,
+ enum atomisp_input_stream_id stream_id,
+ enum ia_css_buffer_type css_buf_type,
+ enum ia_css_pipe_id css_pipe_id)
+{
+ struct atomisp_stream_env *stream_env = &asd->stream_env[stream_id];
+ struct ia_css_buffer css_buf = {0};
+ int err;
+
+ css_buf.type = css_buf_type;
+ css_buf.data.frame = frame;
+
+ err = ia_css_pipe_enqueue_buffer(
+ stream_env->pipes[css_pipe_id], &css_buf);
+ if (err)
+ return -EINVAL;
+
+ return 0;
+}
+
+int atomisp_q_metadata_buffer_to_css(struct atomisp_sub_device *asd,
+ struct atomisp_metadata_buf *metadata_buf,
+ enum atomisp_input_stream_id stream_id,
+ enum ia_css_pipe_id css_pipe_id)
+{
+ struct atomisp_stream_env *stream_env = &asd->stream_env[stream_id];
+ struct ia_css_buffer buffer = {0};
+ struct atomisp_device *isp = asd->isp;
+
+ buffer.type = IA_CSS_BUFFER_TYPE_METADATA;
+ buffer.data.metadata = metadata_buf->metadata;
+ if (ia_css_pipe_enqueue_buffer(stream_env->pipes[css_pipe_id],
+ &buffer)) {
+ dev_err(isp->dev, "failed to q meta data buffer\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int atomisp_q_s3a_buffer_to_css(struct atomisp_sub_device *asd,
+ struct atomisp_s3a_buf *s3a_buf,
+ enum atomisp_input_stream_id stream_id,
+ enum ia_css_pipe_id css_pipe_id)
+{
+ struct atomisp_stream_env *stream_env = &asd->stream_env[stream_id];
+ struct ia_css_buffer buffer = {0};
+ struct atomisp_device *isp = asd->isp;
+
+ buffer.type = IA_CSS_BUFFER_TYPE_3A_STATISTICS;
+ buffer.data.stats_3a = s3a_buf->s3a_data;
+ if (ia_css_pipe_enqueue_buffer(
+ stream_env->pipes[css_pipe_id],
+ &buffer)) {
+ dev_dbg(isp->dev, "failed to q s3a stat buffer\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int atomisp_q_dis_buffer_to_css(struct atomisp_sub_device *asd,
+ struct atomisp_dis_buf *dis_buf,
+ enum atomisp_input_stream_id stream_id,
+ enum ia_css_pipe_id css_pipe_id)
+{
+ struct atomisp_stream_env *stream_env = &asd->stream_env[stream_id];
+ struct ia_css_buffer buffer = {0};
+ struct atomisp_device *isp = asd->isp;
+
+ buffer.type = IA_CSS_BUFFER_TYPE_DIS_STATISTICS;
+ buffer.data.stats_dvs = dis_buf->dis_data;
+ if (ia_css_pipe_enqueue_buffer(
+ stream_env->pipes[css_pipe_id],
+ &buffer)) {
+ dev_dbg(isp->dev, "failed to q dvs stat buffer\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int atomisp_css_start(struct atomisp_sub_device *asd)
+{
+ struct atomisp_device *isp = asd->isp;
+ bool sp_is_started = false;
+ int ret = 0, i = 0;
+
+ if (!sh_css_hrt_system_is_idle())
+ dev_err(isp->dev, "CSS HW not idle before starting SP\n");
+
+ if (ia_css_start_sp()) {
+ dev_err(isp->dev, "start sp error.\n");
+ ret = -EINVAL;
+ goto start_err;
+ }
+
+ sp_is_started = true;
+
+ for (i = 0; i < ATOMISP_INPUT_STREAM_NUM; i++) {
+ if (asd->stream_env[i].stream) {
+ if (ia_css_stream_start(asd->stream_env[i]
+ .stream) != 0) {
+ dev_err(isp->dev, "stream[%d] start error.\n", i);
+ ret = -EINVAL;
+ goto start_err;
+ } else {
+ asd->stream_env[i].stream_state = CSS_STREAM_STARTED;
+ dev_dbg(isp->dev, "stream[%d] started.\n", i);
+ }
+ }
+ }
+
+ return 0;
+
+start_err:
+ /*
+ * CSS 2.0 API limitation: ia_css_stop_sp() can only be called after
+ * destroying all pipes.
+ */
+ if (sp_is_started) {
+ atomisp_destroy_pipes_stream(asd);
+ ia_css_stop_sp();
+ atomisp_create_pipes_stream(asd);
+ }
+
+ return ret;
+}
+
+void atomisp_css_update_isp_params(struct atomisp_sub_device *asd)
+{
+ /*
+ * FIXME!
+ * for ISP2401 new input system, this api is under development.
+ * Calling it would cause kernel panic.
+ *
+ * VIED BZ: 1458
+ *
+ * Check if it is Cherry Trail and also new input system
+ */
+ if (asd->copy_mode) {
+ dev_warn(asd->isp->dev,
+ "%s: ia_css_stream_set_isp_config() not supported in copy mode!.\n",
+ __func__);
+ return;
+ }
+
+ ia_css_stream_set_isp_config(
+ asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream,
+ &asd->params.config);
+ memset(&asd->params.config, 0, sizeof(asd->params.config));
+}
+
+void atomisp_css_update_isp_params_on_pipe(struct atomisp_sub_device *asd,
+ struct ia_css_pipe *pipe)
+{
+ int ret;
+
+ if (!pipe) {
+ atomisp_css_update_isp_params(asd);
+ return;
+ }
+
+ dev_dbg(asd->isp->dev,
+ "%s: apply parameter for ia_css_frame %p with isp_config_id %d on pipe %p.\n",
+ __func__, asd->params.config.output_frame,
+ asd->params.config.isp_config_id, pipe);
+
+ ret = ia_css_stream_set_isp_config_on_pipe(
+ asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream,
+ &asd->params.config, pipe);
+ if (ret)
+ dev_warn(asd->isp->dev, "%s: ia_css_stream_set_isp_config_on_pipe failed %d\n",
+ __func__, ret);
+ memset(&asd->params.config, 0, sizeof(asd->params.config));
+}
+
+int atomisp_css_queue_buffer(struct atomisp_sub_device *asd,
+ enum atomisp_input_stream_id stream_id,
+ enum ia_css_pipe_id pipe_id,
+ enum ia_css_buffer_type buf_type,
+ struct atomisp_css_buffer *isp_css_buffer)
+{
+ if (ia_css_pipe_enqueue_buffer(
+ asd->stream_env[stream_id].pipes[pipe_id],
+ &isp_css_buffer->css_buffer)
+ != 0)
+ return -EINVAL;
+
+ return 0;
+}
+
+int atomisp_css_dequeue_buffer(struct atomisp_sub_device *asd,
+ enum atomisp_input_stream_id stream_id,
+ enum ia_css_pipe_id pipe_id,
+ enum ia_css_buffer_type buf_type,
+ struct atomisp_css_buffer *isp_css_buffer)
+{
+ struct atomisp_device *isp = asd->isp;
+ int err;
+
+ err = ia_css_pipe_dequeue_buffer(
+ asd->stream_env[stream_id].pipes[pipe_id],
+ &isp_css_buffer->css_buffer);
+ if (err) {
+ dev_err(isp->dev,
+ "ia_css_pipe_dequeue_buffer failed: 0x%x\n", err);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int atomisp_css_allocate_stat_buffers(struct atomisp_sub_device *asd,
+ u16 stream_id,
+ struct atomisp_s3a_buf *s3a_buf,
+ struct atomisp_dis_buf *dis_buf,
+ struct atomisp_metadata_buf *md_buf)
+{
+ struct atomisp_device *isp = asd->isp;
+ struct ia_css_dvs_grid_info *dvs_grid_info =
+ atomisp_css_get_dvs_grid_info(&asd->params.curr_grid_info);
+
+ if (s3a_buf && asd->params.curr_grid_info.s3a_grid.enable) {
+ void *s3a_ptr;
+
+ s3a_buf->s3a_data = ia_css_isp_3a_statistics_allocate(
+ &asd->params.curr_grid_info.s3a_grid);
+ if (!s3a_buf->s3a_data) {
+ dev_err(isp->dev, "3a buf allocation failed.\n");
+ return -EINVAL;
+ }
+
+ s3a_ptr = hmm_vmap(s3a_buf->s3a_data->data_ptr, true);
+ s3a_buf->s3a_map = ia_css_isp_3a_statistics_map_allocate(
+ s3a_buf->s3a_data, s3a_ptr);
+ }
+
+ if (dis_buf && dvs_grid_info && dvs_grid_info->enable) {
+ void *dvs_ptr;
+
+ dis_buf->dis_data = ia_css_isp_dvs2_statistics_allocate(
+ dvs_grid_info);
+ if (!dis_buf->dis_data) {
+ dev_err(isp->dev, "dvs buf allocation failed.\n");
+ if (s3a_buf)
+ ia_css_isp_3a_statistics_free(s3a_buf->s3a_data);
+ return -EINVAL;
+ }
+
+ dvs_ptr = hmm_vmap(dis_buf->dis_data->data_ptr, true);
+ dis_buf->dvs_map = ia_css_isp_dvs_statistics_map_allocate(
+ dis_buf->dis_data, dvs_ptr);
+ }
+
+ if (asd->stream_env[stream_id].stream_info.
+ metadata_info.size && md_buf) {
+ md_buf->metadata = ia_css_metadata_allocate(
+ &asd->stream_env[stream_id].stream_info.metadata_info);
+ if (!md_buf->metadata) {
+ if (s3a_buf)
+ ia_css_isp_3a_statistics_free(s3a_buf->s3a_data);
+ if (dis_buf)
+ ia_css_isp_dvs2_statistics_free(dis_buf->dis_data);
+ dev_err(isp->dev, "metadata buf allocation failed.\n");
+ return -EINVAL;
+ }
+ md_buf->md_vptr = hmm_vmap(md_buf->metadata->address, false);
+ }
+
+ return 0;
+}
+
+void atomisp_css_free_3a_buffer(struct atomisp_s3a_buf *s3a_buf)
+{
+ if (s3a_buf->s3a_data)
+ hmm_vunmap(s3a_buf->s3a_data->data_ptr);
+
+ ia_css_isp_3a_statistics_map_free(s3a_buf->s3a_map);
+ s3a_buf->s3a_map = NULL;
+ ia_css_isp_3a_statistics_free(s3a_buf->s3a_data);
+}
+
+void atomisp_css_free_dis_buffer(struct atomisp_dis_buf *dis_buf)
+{
+ if (dis_buf->dis_data)
+ hmm_vunmap(dis_buf->dis_data->data_ptr);
+
+ ia_css_isp_dvs_statistics_map_free(dis_buf->dvs_map);
+ dis_buf->dvs_map = NULL;
+ ia_css_isp_dvs2_statistics_free(dis_buf->dis_data);
+}
+
+void atomisp_css_free_metadata_buffer(struct atomisp_metadata_buf *metadata_buf)
+{
+ if (metadata_buf->md_vptr) {
+ hmm_vunmap(metadata_buf->metadata->address);
+ metadata_buf->md_vptr = NULL;
+ }
+ ia_css_metadata_free(metadata_buf->metadata);
+}
+
+void atomisp_css_free_stat_buffers(struct atomisp_sub_device *asd)
+{
+ struct atomisp_s3a_buf *s3a_buf, *_s3a_buf;
+ struct atomisp_dis_buf *dis_buf, *_dis_buf;
+ struct atomisp_metadata_buf *md_buf, *_md_buf;
+ struct ia_css_dvs_grid_info *dvs_grid_info =
+ atomisp_css_get_dvs_grid_info(&asd->params.curr_grid_info);
+ unsigned int i;
+
+ /* 3A statistics use vmalloc, DIS use kmalloc */
+ if (dvs_grid_info && dvs_grid_info->enable) {
+ ia_css_dvs2_coefficients_free(asd->params.css_param.dvs2_coeff);
+ ia_css_dvs2_statistics_free(asd->params.dvs_stat);
+ asd->params.css_param.dvs2_coeff = NULL;
+ asd->params.dvs_stat = NULL;
+ asd->params.dvs_hor_proj_bytes = 0;
+ asd->params.dvs_ver_proj_bytes = 0;
+ asd->params.dvs_hor_coef_bytes = 0;
+ asd->params.dvs_ver_coef_bytes = 0;
+ asd->params.dis_proj_data_valid = false;
+ list_for_each_entry_safe(dis_buf, _dis_buf,
+ &asd->dis_stats, list) {
+ atomisp_css_free_dis_buffer(dis_buf);
+ list_del(&dis_buf->list);
+ kfree(dis_buf);
+ }
+ list_for_each_entry_safe(dis_buf, _dis_buf,
+ &asd->dis_stats_in_css, list) {
+ atomisp_css_free_dis_buffer(dis_buf);
+ list_del(&dis_buf->list);
+ kfree(dis_buf);
+ }
+ }
+ if (asd->params.curr_grid_info.s3a_grid.enable) {
+ ia_css_3a_statistics_free(asd->params.s3a_user_stat);
+ asd->params.s3a_user_stat = NULL;
+ asd->params.s3a_output_bytes = 0;
+ list_for_each_entry_safe(s3a_buf, _s3a_buf,
+ &asd->s3a_stats, list) {
+ atomisp_css_free_3a_buffer(s3a_buf);
+ list_del(&s3a_buf->list);
+ kfree(s3a_buf);
+ }
+ list_for_each_entry_safe(s3a_buf, _s3a_buf,
+ &asd->s3a_stats_in_css, list) {
+ atomisp_css_free_3a_buffer(s3a_buf);
+ list_del(&s3a_buf->list);
+ kfree(s3a_buf);
+ }
+ list_for_each_entry_safe(s3a_buf, _s3a_buf,
+ &asd->s3a_stats_ready, list) {
+ atomisp_css_free_3a_buffer(s3a_buf);
+ list_del(&s3a_buf->list);
+ kfree(s3a_buf);
+ }
+ }
+
+ if (asd->params.css_param.dvs_6axis) {
+ ia_css_dvs2_6axis_config_free(asd->params.css_param.dvs_6axis);
+ asd->params.css_param.dvs_6axis = NULL;
+ }
+
+ for (i = 0; i < ATOMISP_METADATA_TYPE_NUM; i++) {
+ list_for_each_entry_safe(md_buf, _md_buf,
+ &asd->metadata[i], list) {
+ atomisp_css_free_metadata_buffer(md_buf);
+ list_del(&md_buf->list);
+ kfree(md_buf);
+ }
+ list_for_each_entry_safe(md_buf, _md_buf,
+ &asd->metadata_in_css[i], list) {
+ atomisp_css_free_metadata_buffer(md_buf);
+ list_del(&md_buf->list);
+ kfree(md_buf);
+ }
+ list_for_each_entry_safe(md_buf, _md_buf,
+ &asd->metadata_ready[i], list) {
+ atomisp_css_free_metadata_buffer(md_buf);
+ list_del(&md_buf->list);
+ kfree(md_buf);
+ }
+ }
+ asd->params.metadata_width_size = 0;
+ atomisp_free_metadata_output_buf(asd);
+}
+
+int atomisp_css_get_grid_info(struct atomisp_sub_device *asd,
+ enum ia_css_pipe_id pipe_id)
+{
+ struct ia_css_pipe_info p_info;
+ struct ia_css_grid_info old_info;
+ struct atomisp_device *isp = asd->isp;
+ int md_width = asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].
+ stream_config.metadata_config.resolution.width;
+
+ memset(&p_info, 0, sizeof(struct ia_css_pipe_info));
+ memset(&old_info, 0, sizeof(struct ia_css_grid_info));
+
+ if (ia_css_pipe_get_info(
+ asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].pipes[pipe_id],
+ &p_info) != 0) {
+ dev_err(isp->dev, "ia_css_pipe_get_info failed\n");
+ return -EINVAL;
+ }
+
+ memcpy(&old_info, &asd->params.curr_grid_info,
+ sizeof(struct ia_css_grid_info));
+ memcpy(&asd->params.curr_grid_info, &p_info.grid_info,
+ sizeof(struct ia_css_grid_info));
+ /*
+ * Record which css pipe enables s3a_grid.
+ * Currently would have one css pipe that need it
+ */
+ if (asd->params.curr_grid_info.s3a_grid.enable) {
+ if (asd->params.s3a_enabled_pipe != IA_CSS_PIPE_ID_NUM)
+ dev_dbg(isp->dev, "css pipe %d enabled s3a grid replaced by: %d.\n",
+ asd->params.s3a_enabled_pipe, pipe_id);
+ asd->params.s3a_enabled_pipe = pipe_id;
+ }
+
+ /* If the grid info has not changed and the buffers for 3A and
+ * DIS statistics buffers are allocated or buffer size would be zero
+ * then no need to do anything. */
+ if (((!memcmp(&old_info, &asd->params.curr_grid_info, sizeof(old_info))
+ && asd->params.s3a_user_stat && asd->params.dvs_stat)
+ || asd->params.curr_grid_info.s3a_grid.width == 0
+ || asd->params.curr_grid_info.s3a_grid.height == 0)
+ && asd->params.metadata_width_size == md_width) {
+ dev_dbg(isp->dev,
+ "grid info change escape. memcmp=%d, s3a_user_stat=%d,dvs_stat=%d, s3a.width=%d, s3a.height=%d, metadata width =%d\n",
+ !memcmp(&old_info, &asd->params.curr_grid_info,
+ sizeof(old_info)),
+ !!asd->params.s3a_user_stat, !!asd->params.dvs_stat,
+ asd->params.curr_grid_info.s3a_grid.width,
+ asd->params.curr_grid_info.s3a_grid.height,
+ asd->params.metadata_width_size);
+ return -EINVAL;
+ }
+ asd->params.metadata_width_size = md_width;
+
+ return 0;
+}
+
+int atomisp_alloc_3a_output_buf(struct atomisp_sub_device *asd)
+{
+ if (!asd->params.curr_grid_info.s3a_grid.width ||
+ !asd->params.curr_grid_info.s3a_grid.height)
+ return 0;
+
+ asd->params.s3a_user_stat = ia_css_3a_statistics_allocate(
+ &asd->params.curr_grid_info.s3a_grid);
+ if (!asd->params.s3a_user_stat)
+ return -ENOMEM;
+ /* 3A statistics. These can be big, so we use vmalloc. */
+ asd->params.s3a_output_bytes =
+ asd->params.curr_grid_info.s3a_grid.width *
+ asd->params.curr_grid_info.s3a_grid.height *
+ sizeof(*asd->params.s3a_user_stat->data);
+
+ return 0;
+}
+
+int atomisp_alloc_dis_coef_buf(struct atomisp_sub_device *asd)
+{
+ struct ia_css_dvs_grid_info *dvs_grid =
+ atomisp_css_get_dvs_grid_info(&asd->params.curr_grid_info);
+
+ if (!dvs_grid)
+ return 0;
+
+ if (!dvs_grid->enable) {
+ dev_dbg(asd->isp->dev, "%s: dvs_grid not enabled.\n", __func__);
+ return 0;
+ }
+
+ /* DIS coefficients. */
+ asd->params.css_param.dvs2_coeff = ia_css_dvs2_coefficients_allocate(
+ dvs_grid);
+ if (!asd->params.css_param.dvs2_coeff)
+ return -ENOMEM;
+
+ asd->params.dvs_hor_coef_bytes = dvs_grid->num_hor_coefs *
+ sizeof(*asd->params.css_param.dvs2_coeff->hor_coefs.odd_real);
+
+ asd->params.dvs_ver_coef_bytes = dvs_grid->num_ver_coefs *
+ sizeof(*asd->params.css_param.dvs2_coeff->ver_coefs.odd_real);
+
+ /* DIS projections. */
+ asd->params.dis_proj_data_valid = false;
+ asd->params.dvs_stat = ia_css_dvs2_statistics_allocate(dvs_grid);
+ if (!asd->params.dvs_stat)
+ return -ENOMEM;
+
+ asd->params.dvs_hor_proj_bytes =
+ dvs_grid->aligned_height * dvs_grid->aligned_width *
+ sizeof(*asd->params.dvs_stat->hor_prod.odd_real);
+
+ asd->params.dvs_ver_proj_bytes =
+ dvs_grid->aligned_height * dvs_grid->aligned_width *
+ sizeof(*asd->params.dvs_stat->ver_prod.odd_real);
+
+ return 0;
+}
+
+int atomisp_alloc_metadata_output_buf(struct atomisp_sub_device *asd)
+{
+ int i;
+
+ /* We allocate the cpu-side buffer used for communication with user
+ * space */
+ for (i = 0; i < ATOMISP_METADATA_TYPE_NUM; i++) {
+ asd->params.metadata_user[i] = kvmalloc(
+ asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].
+ stream_info.metadata_info.size, GFP_KERNEL);
+ if (!asd->params.metadata_user[i]) {
+ while (--i >= 0) {
+ kvfree(asd->params.metadata_user[i]);
+ asd->params.metadata_user[i] = NULL;
+ }
+ return -ENOMEM;
+ }
+ }
+
+ return 0;
+}
+
+void atomisp_free_metadata_output_buf(struct atomisp_sub_device *asd)
+{
+ unsigned int i;
+
+ for (i = 0; i < ATOMISP_METADATA_TYPE_NUM; i++) {
+ if (asd->params.metadata_user[i]) {
+ kvfree(asd->params.metadata_user[i]);
+ asd->params.metadata_user[i] = NULL;
+ }
+ }
+}
+
+void atomisp_css_temp_pipe_to_pipe_id(struct atomisp_sub_device *asd,
+ struct atomisp_css_event *current_event)
+{
+ /*
+ * FIXME!
+ * Pipe ID reported in CSS event is not correct for new system's
+ * copy pipe.
+ * VIED BZ: 1463
+ */
+ ia_css_temp_pipe_to_pipe_id(current_event->event.pipe,
+ &current_event->pipe);
+ if (asd && asd->copy_mode &&
+ current_event->pipe == IA_CSS_PIPE_ID_CAPTURE)
+ current_event->pipe = IA_CSS_PIPE_ID_COPY;
+}
+
+int atomisp_css_isys_set_resolution(struct atomisp_sub_device *asd,
+ enum atomisp_input_stream_id stream_id,
+ struct v4l2_mbus_framefmt *ffmt,
+ int isys_stream)
+{
+ struct ia_css_stream_config *s_config =
+ &asd->stream_env[stream_id].stream_config;
+
+ if (isys_stream >= IA_CSS_STREAM_MAX_ISYS_STREAM_PER_CH)
+ return -EINVAL;
+
+ s_config->isys_config[isys_stream].input_res.width = ffmt->width;
+ s_config->isys_config[isys_stream].input_res.height = ffmt->height;
+ return 0;
+}
+
+int atomisp_css_input_set_resolution(struct atomisp_sub_device *asd,
+ enum atomisp_input_stream_id stream_id,
+ struct v4l2_mbus_framefmt *ffmt)
+{
+ struct ia_css_stream_config *s_config =
+ &asd->stream_env[stream_id].stream_config;
+
+ s_config->input_config.input_res.width = ffmt->width;
+ s_config->input_config.input_res.height = ffmt->height;
+ return 0;
+}
+
+void atomisp_css_input_set_binning_factor(struct atomisp_sub_device *asd,
+ enum atomisp_input_stream_id stream_id,
+ unsigned int bin_factor)
+{
+ asd->stream_env[stream_id]
+ .stream_config.sensor_binning_factor = bin_factor;
+}
+
+void atomisp_css_input_set_bayer_order(struct atomisp_sub_device *asd,
+ enum atomisp_input_stream_id stream_id,
+ enum ia_css_bayer_order bayer_order)
+{
+ struct ia_css_stream_config *s_config =
+ &asd->stream_env[stream_id].stream_config;
+ s_config->input_config.bayer_order = bayer_order;
+}
+
+void atomisp_css_isys_set_link(struct atomisp_sub_device *asd,
+ enum atomisp_input_stream_id stream_id,
+ int link,
+ int isys_stream)
+{
+ struct ia_css_stream_config *s_config =
+ &asd->stream_env[stream_id].stream_config;
+
+ s_config->isys_config[isys_stream].linked_isys_stream_id = link;
+}
+
+void atomisp_css_isys_set_valid(struct atomisp_sub_device *asd,
+ enum atomisp_input_stream_id stream_id,
+ bool valid,
+ int isys_stream)
+{
+ struct ia_css_stream_config *s_config =
+ &asd->stream_env[stream_id].stream_config;
+
+ s_config->isys_config[isys_stream].valid = valid;
+}
+
+void atomisp_css_isys_set_format(struct atomisp_sub_device *asd,
+ enum atomisp_input_stream_id stream_id,
+ enum atomisp_input_format format,
+ int isys_stream)
+{
+ struct ia_css_stream_config *s_config =
+ &asd->stream_env[stream_id].stream_config;
+
+ s_config->isys_config[isys_stream].format = format;
+}
+
+void atomisp_css_input_set_format(struct atomisp_sub_device *asd,
+ enum atomisp_input_stream_id stream_id,
+ enum atomisp_input_format format)
+{
+ struct ia_css_stream_config *s_config =
+ &asd->stream_env[stream_id].stream_config;
+
+ s_config->input_config.format = format;
+}
+
+int atomisp_css_set_default_isys_config(struct atomisp_sub_device *asd,
+ enum atomisp_input_stream_id stream_id,
+ struct v4l2_mbus_framefmt *ffmt)
+{
+ int i;
+ struct ia_css_stream_config *s_config =
+ &asd->stream_env[stream_id].stream_config;
+ /*
+ * Set all isys configs to not valid.
+ * Currently we support only one stream per channel
+ */
+ for (i = IA_CSS_STREAM_ISYS_STREAM_0;
+ i < IA_CSS_STREAM_MAX_ISYS_STREAM_PER_CH; i++)
+ s_config->isys_config[i].valid = false;
+
+ atomisp_css_isys_set_resolution(asd, stream_id, ffmt,
+ IA_CSS_STREAM_DEFAULT_ISYS_STREAM_IDX);
+ atomisp_css_isys_set_format(asd, stream_id,
+ s_config->input_config.format,
+ IA_CSS_STREAM_DEFAULT_ISYS_STREAM_IDX);
+ atomisp_css_isys_set_link(asd, stream_id, NO_LINK,
+ IA_CSS_STREAM_DEFAULT_ISYS_STREAM_IDX);
+ atomisp_css_isys_set_valid(asd, stream_id, true,
+ IA_CSS_STREAM_DEFAULT_ISYS_STREAM_IDX);
+
+ return 0;
+}
+
+void atomisp_css_isys_two_stream_cfg_update_stream1(
+ struct atomisp_sub_device *asd,
+ enum atomisp_input_stream_id stream_id,
+ enum atomisp_input_format input_format,
+ unsigned int width, unsigned int height)
+{
+ struct ia_css_stream_config *s_config =
+ &asd->stream_env[stream_id].stream_config;
+
+ s_config->isys_config[IA_CSS_STREAM_ISYS_STREAM_0].input_res.width =
+ width;
+ s_config->isys_config[IA_CSS_STREAM_ISYS_STREAM_0].input_res.height =
+ height;
+ s_config->isys_config[IA_CSS_STREAM_ISYS_STREAM_0].format =
+ input_format;
+ s_config->isys_config[IA_CSS_STREAM_ISYS_STREAM_0].valid = true;
+}
+
+void atomisp_css_isys_two_stream_cfg_update_stream2(
+ struct atomisp_sub_device *asd,
+ enum atomisp_input_stream_id stream_id,
+ enum atomisp_input_format input_format,
+ unsigned int width, unsigned int height)
+{
+ struct ia_css_stream_config *s_config =
+ &asd->stream_env[stream_id].stream_config;
+
+ s_config->isys_config[IA_CSS_STREAM_ISYS_STREAM_1].input_res.width =
+ width;
+ s_config->isys_config[IA_CSS_STREAM_ISYS_STREAM_1].input_res.height =
+ height;
+ s_config->isys_config[IA_CSS_STREAM_ISYS_STREAM_1].linked_isys_stream_id
+ = IA_CSS_STREAM_ISYS_STREAM_0;
+ s_config->isys_config[IA_CSS_STREAM_ISYS_STREAM_1].format =
+ input_format;
+ s_config->isys_config[IA_CSS_STREAM_ISYS_STREAM_1].valid = true;
+}
+
+int atomisp_css_input_set_effective_resolution(
+ struct atomisp_sub_device *asd,
+ enum atomisp_input_stream_id stream_id,
+ unsigned int width, unsigned int height)
+{
+ struct ia_css_stream_config *s_config =
+ &asd->stream_env[stream_id].stream_config;
+ s_config->input_config.effective_res.width = width;
+ s_config->input_config.effective_res.height = height;
+ return 0;
+}
+
+void atomisp_css_video_set_dis_envelope(struct atomisp_sub_device *asd,
+ unsigned int dvs_w, unsigned int dvs_h)
+{
+ asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL]
+ .pipe_configs[IA_CSS_PIPE_ID_VIDEO].dvs_envelope.width = dvs_w;
+ asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL]
+ .pipe_configs[IA_CSS_PIPE_ID_VIDEO].dvs_envelope.height = dvs_h;
+}
+
+void atomisp_css_input_set_two_pixels_per_clock(
+ struct atomisp_sub_device *asd,
+ bool two_ppc)
+{
+ int i;
+
+ if (asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL]
+ .stream_config.pixels_per_clock == (two_ppc ? 2 : 1))
+ return;
+
+ asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL]
+ .stream_config.pixels_per_clock = (two_ppc ? 2 : 1);
+ for (i = 0; i < IA_CSS_PIPE_ID_NUM; i++)
+ asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL]
+ .update_pipe[i] = true;
+}
+
+void atomisp_css_enable_dz(struct atomisp_sub_device *asd, bool enable)
+{
+ int i;
+
+ for (i = 0; i < IA_CSS_PIPE_ID_NUM; i++)
+ asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL]
+ .pipe_configs[i].enable_dz = enable;
+}
+
+void atomisp_css_capture_set_mode(struct atomisp_sub_device *asd,
+ enum ia_css_capture_mode mode)
+{
+ struct atomisp_stream_env *stream_env =
+ &asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL];
+
+ if (stream_env->pipe_configs[IA_CSS_PIPE_ID_CAPTURE]
+ .default_capture_config.mode == mode)
+ return;
+
+ stream_env->pipe_configs[IA_CSS_PIPE_ID_CAPTURE].
+ default_capture_config.mode = mode;
+ stream_env->update_pipe[IA_CSS_PIPE_ID_CAPTURE] = true;
+}
+
+void atomisp_css_input_set_mode(struct atomisp_sub_device *asd,
+ enum ia_css_input_mode mode)
+{
+ unsigned int size_mem_words;
+ int i;
+
+ for (i = 0; i < ATOMISP_INPUT_STREAM_NUM; i++)
+ asd->stream_env[i].stream_config.mode = mode;
+
+ if (mode != IA_CSS_INPUT_MODE_BUFFERED_SENSOR)
+ return;
+
+ for (i = 0; i < ATOMISP_INPUT_STREAM_NUM; i++) {
+ /*
+ * TODO: sensor needs to export the embedded_data_size_words
+ * information to atomisp for each setting.
+ * Here using a large safe value.
+ */
+ struct ia_css_stream_config *s_config =
+ &asd->stream_env[i].stream_config;
+
+ if (s_config->input_config.input_res.width == 0)
+ continue;
+
+ if (ia_css_mipi_frame_calculate_size(
+ s_config->input_config.input_res.width,
+ s_config->input_config.input_res.height,
+ s_config->input_config.format,
+ true,
+ 0x13000,
+ &size_mem_words) != 0) {
+ if (IS_MRFD)
+ size_mem_words = CSS_MIPI_FRAME_BUFFER_SIZE_2;
+ else
+ size_mem_words = CSS_MIPI_FRAME_BUFFER_SIZE_1;
+ dev_warn(asd->isp->dev,
+ "ia_css_mipi_frame_calculate_size failed,applying pre-defined MIPI buffer size %u.\n",
+ size_mem_words);
+ }
+ s_config->mipi_buffer_config.size_mem_words = size_mem_words;
+ s_config->mipi_buffer_config.nof_mipi_buffers = 2;
+ }
+}
+
+void atomisp_css_capture_enable_online(struct atomisp_sub_device *asd,
+ unsigned short stream_index, bool enable)
+{
+ struct atomisp_stream_env *stream_env =
+ &asd->stream_env[stream_index];
+
+ if (stream_env->stream_config.online == !!enable)
+ return;
+
+ stream_env->stream_config.online = !!enable;
+ stream_env->update_pipe[IA_CSS_PIPE_ID_CAPTURE] = true;
+}
+
+void atomisp_css_preview_enable_online(struct atomisp_sub_device *asd,
+ unsigned short stream_index, bool enable)
+{
+ struct atomisp_stream_env *stream_env =
+ &asd->stream_env[stream_index];
+ int i;
+
+ if (stream_env->stream_config.online != !!enable) {
+ stream_env->stream_config.online = !!enable;
+ for (i = 0; i < IA_CSS_PIPE_ID_NUM; i++)
+ stream_env->update_pipe[i] = true;
+ }
+}
+
+int atomisp_css_input_configure_port(
+ struct atomisp_sub_device *asd,
+ enum mipi_port_id port,
+ unsigned int num_lanes,
+ unsigned int timeout,
+ unsigned int mipi_freq,
+ enum atomisp_input_format metadata_format,
+ unsigned int metadata_width,
+ unsigned int metadata_height)
+{
+ int i;
+ struct atomisp_stream_env *stream_env;
+ /*
+ * Calculate rx_count as follows:
+ * Input: mipi_freq : CSI-2 bus frequency in Hz
+ * UI = 1 / (2 * mipi_freq) : period of one bit on the bus
+ * min = 85e-9 + 6 * UI : Limits for rx_count in seconds
+ * max = 145e-9 + 10 * UI
+ * rxcount0 = min / (4 / mipi_freq) : convert seconds to byte clocks
+ * rxcount = rxcount0 - 2 : adjust for better results
+ * The formula below is simplified version of the above with
+ * 10-bit fixed points for improved accuracy.
+ */
+ const unsigned int rxcount =
+ min(((mipi_freq / 46000) - 1280) >> 10, 0xffU) * 0x01010101U;
+
+ for (i = 0; i < ATOMISP_INPUT_STREAM_NUM; i++) {
+ stream_env = &asd->stream_env[i];
+ stream_env->stream_config.source.port.port = port;
+ stream_env->stream_config.source.port.num_lanes = num_lanes;
+ stream_env->stream_config.source.port.timeout = timeout;
+ if (mipi_freq)
+ stream_env->stream_config.source.port.rxcount = rxcount;
+ stream_env->stream_config.
+ metadata_config.data_type = metadata_format;
+ stream_env->stream_config.
+ metadata_config.resolution.width = metadata_width;
+ stream_env->stream_config.
+ metadata_config.resolution.height = metadata_height;
+ }
+
+ return 0;
+}
+
+void atomisp_css_stop(struct atomisp_sub_device *asd, bool in_reset)
+{
+ unsigned long irqflags;
+ unsigned int i;
+
+ /*
+ * CSS 2.0 API limitation: ia_css_stop_sp() can only be called after
+ * destroying all pipes.
+ */
+ atomisp_destroy_pipes_stream(asd);
+
+ atomisp_init_raw_buffer_bitmap(asd);
+
+ ia_css_stop_sp();
+
+ if (!in_reset) {
+ struct atomisp_stream_env *stream_env;
+ int i, j;
+
+ for (i = 0; i < ATOMISP_INPUT_STREAM_NUM; i++) {
+ stream_env = &asd->stream_env[i];
+ for (j = 0; j < IA_CSS_PIPE_ID_NUM; j++) {
+ ia_css_pipe_config_defaults(
+ &stream_env->pipe_configs[j]);
+ ia_css_pipe_extra_config_defaults(
+ &stream_env->pipe_extra_configs[j]);
+ }
+ ia_css_stream_config_defaults(
+ &stream_env->stream_config);
+ }
+ memset(&asd->params.config, 0, sizeof(asd->params.config));
+ asd->params.css_update_params_needed = false;
+ }
+
+ /* move stats buffers to free queue list */
+ list_splice_init(&asd->s3a_stats_in_css, &asd->s3a_stats);
+ list_splice_init(&asd->s3a_stats_ready, &asd->s3a_stats);
+
+ spin_lock_irqsave(&asd->dis_stats_lock, irqflags);
+ list_splice_init(&asd->dis_stats_in_css, &asd->dis_stats);
+ asd->params.dis_proj_data_valid = false;
+ spin_unlock_irqrestore(&asd->dis_stats_lock, irqflags);
+
+ for (i = 0; i < ATOMISP_METADATA_TYPE_NUM; i++) {
+ list_splice_init(&asd->metadata_in_css[i], &asd->metadata[i]);
+ list_splice_init(&asd->metadata_ready[i], &asd->metadata[i]);
+ }
+
+ atomisp_flush_params_queue(&asd->video_out);
+ atomisp_free_css_parameters(&asd->params.css_param);
+ memset(&asd->params.css_param, 0, sizeof(asd->params.css_param));
+}
+
+void atomisp_css_continuous_set_num_raw_frames(
+ struct atomisp_sub_device *asd,
+ int num_frames)
+{
+ if (asd->enable_raw_buffer_lock->val) {
+ asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL]
+ .stream_config.init_num_cont_raw_buf =
+ ATOMISP_CSS2_NUM_OFFLINE_INIT_CONTINUOUS_FRAMES_LOCK_EN;
+ if (asd->run_mode->val == ATOMISP_RUN_MODE_VIDEO &&
+ asd->params.video_dis_en)
+ asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL]
+ .stream_config.init_num_cont_raw_buf +=
+ ATOMISP_CSS2_NUM_DVS_FRAME_DELAY;
+ } else {
+ asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL]
+ .stream_config.init_num_cont_raw_buf =
+ ATOMISP_CSS2_NUM_OFFLINE_INIT_CONTINUOUS_FRAMES;
+ }
+
+ if (asd->params.video_dis_en)
+ asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL]
+ .stream_config.init_num_cont_raw_buf +=
+ ATOMISP_CSS2_NUM_DVS_FRAME_DELAY;
+
+ asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL]
+ .stream_config.target_num_cont_raw_buf = num_frames;
+}
+
+static enum ia_css_pipe_mode __pipe_id_to_pipe_mode(
+ struct atomisp_sub_device *asd,
+ enum ia_css_pipe_id pipe_id)
+{
+ struct atomisp_device *isp = asd->isp;
+ struct camera_mipi_info *mipi_info = atomisp_to_sensor_mipi_info(
+ isp->inputs[asd->input_curr].sensor);
+
+ switch (pipe_id) {
+ case IA_CSS_PIPE_ID_COPY:
+ /* Currently only YUVPP mode supports YUV420_Legacy format.
+ * Revert this when other pipe modes can support
+ * YUV420_Legacy format.
+ */
+ if (mipi_info && mipi_info->input_format ==
+ ATOMISP_INPUT_FORMAT_YUV420_8_LEGACY)
+ return IA_CSS_PIPE_MODE_YUVPP;
+ return IA_CSS_PIPE_MODE_COPY;
+ case IA_CSS_PIPE_ID_PREVIEW:
+ return IA_CSS_PIPE_MODE_PREVIEW;
+ case IA_CSS_PIPE_ID_CAPTURE:
+ return IA_CSS_PIPE_MODE_CAPTURE;
+ case IA_CSS_PIPE_ID_VIDEO:
+ return IA_CSS_PIPE_MODE_VIDEO;
+ case IA_CSS_PIPE_ID_YUVPP:
+ return IA_CSS_PIPE_MODE_YUVPP;
+ default:
+ WARN_ON(1);
+ return IA_CSS_PIPE_MODE_PREVIEW;
+ }
+}
+
+static void __configure_output(struct atomisp_sub_device *asd,
+ unsigned int stream_index,
+ unsigned int width, unsigned int height,
+ unsigned int min_width,
+ enum ia_css_frame_format format,
+ enum ia_css_pipe_id pipe_id)
+{
+ struct atomisp_device *isp = asd->isp;
+ struct atomisp_stream_env *stream_env =
+ &asd->stream_env[stream_index];
+ struct ia_css_stream_config *s_config = &stream_env->stream_config;
+
+ stream_env->pipe_configs[pipe_id].mode =
+ __pipe_id_to_pipe_mode(asd, pipe_id);
+ stream_env->update_pipe[pipe_id] = true;
+
+ stream_env->pipe_configs[pipe_id].output_info[0].res.width = width;
+ stream_env->pipe_configs[pipe_id].output_info[0].res.height = height;
+ stream_env->pipe_configs[pipe_id].output_info[0].format = format;
+ stream_env->pipe_configs[pipe_id].output_info[0].padded_width = min_width;
+
+ /* isp binary 2.2 specific setting*/
+ if (width > s_config->input_config.effective_res.width ||
+ height > s_config->input_config.effective_res.height) {
+ s_config->input_config.effective_res.width = width;
+ s_config->input_config.effective_res.height = height;
+ }
+
+ dev_dbg(isp->dev, "configuring pipe[%d] output info w=%d.h=%d.f=%d.\n",
+ pipe_id, width, height, format);
+}
+
+/*
+ * For CSS2.1, capture pipe uses capture_pp_in_res to configure yuv
+ * downscaling input resolution.
+ */
+static void __configure_capture_pp_input(struct atomisp_sub_device *asd,
+ unsigned int width, unsigned int height,
+ enum ia_css_pipe_id pipe_id)
+{
+ struct atomisp_device *isp = asd->isp;
+ struct atomisp_stream_env *stream_env =
+ &asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL];
+ struct ia_css_stream_config *stream_config = &stream_env->stream_config;
+ struct ia_css_pipe_config *pipe_configs =
+ &stream_env->pipe_configs[pipe_id];
+ struct ia_css_pipe_extra_config *pipe_extra_configs =
+ &stream_env->pipe_extra_configs[pipe_id];
+ unsigned int hor_ds_factor = 0, ver_ds_factor = 0;
+
+ if (width == 0 && height == 0)
+ return;
+
+ if (width * 9 / 10 < pipe_configs->output_info[0].res.width ||
+ height * 9 / 10 < pipe_configs->output_info[0].res.height)
+ return;
+ /* here just copy the calculation in css */
+ hor_ds_factor = CEIL_DIV(width >> 1,
+ pipe_configs->output_info[0].res.width);
+ ver_ds_factor = CEIL_DIV(height >> 1,
+ pipe_configs->output_info[0].res.height);
+
+ if ((asd->isp->media_dev.hw_revision <
+ (ATOMISP_HW_REVISION_ISP2401 << ATOMISP_HW_REVISION_SHIFT) ||
+ IS_CHT) && hor_ds_factor != ver_ds_factor) {
+ dev_warn(asd->isp->dev,
+ "Cropping for capture due to FW limitation");
+ return;
+ }
+
+ pipe_configs->mode = __pipe_id_to_pipe_mode(asd, pipe_id);
+ stream_env->update_pipe[pipe_id] = true;
+
+ pipe_extra_configs->enable_yuv_ds = true;
+
+ pipe_configs->capt_pp_in_res.width =
+ stream_config->input_config.effective_res.width;
+ pipe_configs->capt_pp_in_res.height =
+ stream_config->input_config.effective_res.height;
+
+ dev_dbg(isp->dev, "configuring pipe[%d]capture pp input w=%d.h=%d.\n",
+ pipe_id, width, height);
+}
+
+/*
+ * For CSS2.1, preview pipe could support bayer downscaling, yuv decimation and
+ * yuv downscaling, which needs addtional configurations.
+ */
+static void __configure_preview_pp_input(struct atomisp_sub_device *asd,
+ unsigned int width, unsigned int height,
+ enum ia_css_pipe_id pipe_id)
+{
+ struct atomisp_device *isp = asd->isp;
+ int out_width, out_height, yuv_ds_in_width, yuv_ds_in_height;
+ struct atomisp_stream_env *stream_env =
+ &asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL];
+ struct ia_css_stream_config *stream_config = &stream_env->stream_config;
+ struct ia_css_pipe_config *pipe_configs =
+ &stream_env->pipe_configs[pipe_id];
+ struct ia_css_pipe_extra_config *pipe_extra_configs =
+ &stream_env->pipe_extra_configs[pipe_id];
+ struct ia_css_resolution *bayer_ds_out_res =
+ &pipe_configs->bayer_ds_out_res;
+ struct ia_css_resolution *vf_pp_in_res =
+ &pipe_configs->vf_pp_in_res;
+ struct ia_css_resolution *effective_res =
+ &stream_config->input_config.effective_res;
+
+ static const struct bayer_ds_factor bds_fct[] = {{2, 1}, {3, 2}, {5, 4} };
+ /*
+ * BZ201033: YUV decimation factor of 4 causes couple of rightmost
+ * columns to be shaded. Remove this factor to work around the CSS bug.
+ * const unsigned int yuv_dec_fct[] = {4, 2};
+ */
+ static const unsigned int yuv_dec_fct[] = { 2 };
+ unsigned int i;
+
+ if (width == 0 && height == 0)
+ return;
+
+ pipe_configs->mode = __pipe_id_to_pipe_mode(asd, pipe_id);
+ stream_env->update_pipe[pipe_id] = true;
+
+ out_width = pipe_configs->output_info[0].res.width;
+ out_height = pipe_configs->output_info[0].res.height;
+
+ /*
+ * The ISP could do bayer downscaling, yuv decimation and yuv
+ * downscaling:
+ * 1: Bayer Downscaling: between effective resolution and
+ * bayer_ds_res_out;
+ * 2: YUV Decimation: between bayer_ds_res_out and vf_pp_in_res;
+ * 3: YUV Downscaling: between vf_pp_in_res and final vf output
+ *
+ * Rule for Bayer Downscaling: support factor 2, 1.5 and 1.25
+ * Rule for YUV Decimation: support factor 2, 4
+ * Rule for YUV Downscaling: arbitrary value below 2
+ *
+ * General rule of factor distribution among these stages:
+ * 1: try to do Bayer downscaling first if not in online mode.
+ * 2: try to do maximum of 2 for YUV downscaling
+ * 3: the remainling for YUV decimation
+ *
+ * Note:
+ * Do not configure bayer_ds_out_res if:
+ * online == 1 or continuous == 0 or raw_binning = 0
+ */
+ if (stream_config->online || !stream_config->continuous ||
+ !pipe_extra_configs->enable_raw_binning) {
+ bayer_ds_out_res->width = 0;
+ bayer_ds_out_res->height = 0;
+ } else {
+ bayer_ds_out_res->width = effective_res->width;
+ bayer_ds_out_res->height = effective_res->height;
+
+ for (i = 0; i < ARRAY_SIZE(bds_fct); i++) {
+ if (effective_res->width >= out_width *
+ bds_fct[i].numerator / bds_fct[i].denominator &&
+ effective_res->height >= out_height *
+ bds_fct[i].numerator / bds_fct[i].denominator) {
+ bayer_ds_out_res->width =
+ effective_res->width *
+ bds_fct[i].denominator /
+ bds_fct[i].numerator;
+ bayer_ds_out_res->height =
+ effective_res->height *
+ bds_fct[i].denominator /
+ bds_fct[i].numerator;
+ break;
+ }
+ }
+ }
+ /*
+ * calculate YUV Decimation, YUV downscaling facor:
+ * YUV Downscaling factor must not exceed 2.
+ * YUV Decimation factor could be 2, 4.
+ */
+ /* first decide the yuv_ds input resolution */
+ if (bayer_ds_out_res->width == 0) {
+ yuv_ds_in_width = effective_res->width;
+ yuv_ds_in_height = effective_res->height;
+ } else {
+ yuv_ds_in_width = bayer_ds_out_res->width;
+ yuv_ds_in_height = bayer_ds_out_res->height;
+ }
+
+ vf_pp_in_res->width = yuv_ds_in_width;
+ vf_pp_in_res->height = yuv_ds_in_height;
+
+ /* find out the yuv decimation factor */
+ for (i = 0; i < ARRAY_SIZE(yuv_dec_fct); i++) {
+ if (yuv_ds_in_width >= out_width * yuv_dec_fct[i] &&
+ yuv_ds_in_height >= out_height * yuv_dec_fct[i]) {
+ vf_pp_in_res->width = yuv_ds_in_width / yuv_dec_fct[i];
+ vf_pp_in_res->height = yuv_ds_in_height / yuv_dec_fct[i];
+ break;
+ }
+ }
+
+ if (vf_pp_in_res->width == out_width &&
+ vf_pp_in_res->height == out_height) {
+ pipe_extra_configs->enable_yuv_ds = false;
+ vf_pp_in_res->width = 0;
+ vf_pp_in_res->height = 0;
+ } else {
+ pipe_extra_configs->enable_yuv_ds = true;
+ }
+
+ dev_dbg(isp->dev, "configuring pipe[%d]preview pp input w=%d.h=%d.\n",
+ pipe_id, width, height);
+}
+
+/*
+ * For CSS2.1, offline video pipe could support bayer decimation, and
+ * yuv downscaling, which needs addtional configurations.
+ */
+static void __configure_video_pp_input(struct atomisp_sub_device *asd,
+ unsigned int width, unsigned int height,
+ enum ia_css_pipe_id pipe_id)
+{
+ struct atomisp_device *isp = asd->isp;
+ int out_width, out_height;
+ struct atomisp_stream_env *stream_env =
+ &asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL];
+ struct ia_css_stream_config *stream_config = &stream_env->stream_config;
+ struct ia_css_pipe_config *pipe_configs =
+ &stream_env->pipe_configs[pipe_id];
+ struct ia_css_pipe_extra_config *pipe_extra_configs =
+ &stream_env->pipe_extra_configs[pipe_id];
+ struct ia_css_resolution *bayer_ds_out_res =
+ &pipe_configs->bayer_ds_out_res;
+ struct ia_css_resolution *effective_res =
+ &stream_config->input_config.effective_res;
+
+ static const struct bayer_ds_factor bds_factors[] = {
+ {8, 1}, {6, 1}, {4, 1}, {3, 1}, {2, 1}, {3, 2}
+ };
+ unsigned int i;
+
+ if (width == 0 && height == 0)
+ return;
+
+ pipe_configs->mode = __pipe_id_to_pipe_mode(asd, pipe_id);
+ stream_env->update_pipe[pipe_id] = true;
+
+ pipe_extra_configs->enable_yuv_ds = false;
+
+ /*
+ * If DVS is enabled, video binary will take care the dvs envelope
+ * and usually the bayer_ds_out_res should be larger than 120% of
+ * destination resolution, the extra 20% will be cropped as DVS
+ * envelope. But, if the bayer_ds_out_res is less than 120% of the
+ * destination. The ISP can still work, but DVS quality is not good.
+ */
+ /* taking at least 10% as envelope */
+ if (asd->params.video_dis_en) {
+ out_width = pipe_configs->output_info[0].res.width * 110 / 100;
+ out_height = pipe_configs->output_info[0].res.height * 110 / 100;
+ } else {
+ out_width = pipe_configs->output_info[0].res.width;
+ out_height = pipe_configs->output_info[0].res.height;
+ }
+
+ /*
+ * calculate bayer decimate factor:
+ * 1: only 1.5, 2, 4 and 8 get supported
+ * 2: Do not configure bayer_ds_out_res if:
+ * online == 1 or continuous == 0 or raw_binning = 0
+ */
+ if (stream_config->online || !stream_config->continuous) {
+ bayer_ds_out_res->width = 0;
+ bayer_ds_out_res->height = 0;
+ goto done;
+ }
+
+ pipe_extra_configs->enable_raw_binning = true;
+ bayer_ds_out_res->width = effective_res->width;
+ bayer_ds_out_res->height = effective_res->height;
+
+ for (i = 0; i < sizeof(bds_factors) / sizeof(struct bayer_ds_factor);
+ i++) {
+ if (effective_res->width >= out_width *
+ bds_factors[i].numerator / bds_factors[i].denominator &&
+ effective_res->height >= out_height *
+ bds_factors[i].numerator / bds_factors[i].denominator) {
+ bayer_ds_out_res->width = effective_res->width *
+ bds_factors[i].denominator /
+ bds_factors[i].numerator;
+ bayer_ds_out_res->height = effective_res->height *
+ bds_factors[i].denominator /
+ bds_factors[i].numerator;
+ break;
+ }
+ }
+
+ /*
+ * DVS is cropped from BDS output, so we do not really need to set the
+ * envelope to 20% of output resolution here. always set it to 12x12
+ * per firmware requirement.
+ */
+ pipe_configs->dvs_envelope.width = 12;
+ pipe_configs->dvs_envelope.height = 12;
+
+done:
+ if (pipe_id == IA_CSS_PIPE_ID_YUVPP)
+ stream_config->left_padding = -1;
+ else
+ stream_config->left_padding = 12;
+ dev_dbg(isp->dev, "configuring pipe[%d]video pp input w=%d.h=%d.\n",
+ pipe_id, width, height);
+}
+
+static void __configure_vf_output(struct atomisp_sub_device *asd,
+ unsigned int width, unsigned int height,
+ unsigned int min_width,
+ enum ia_css_frame_format format,
+ enum ia_css_pipe_id pipe_id)
+{
+ struct atomisp_device *isp = asd->isp;
+ struct atomisp_stream_env *stream_env =
+ &asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL];
+ stream_env->pipe_configs[pipe_id].mode =
+ __pipe_id_to_pipe_mode(asd, pipe_id);
+ stream_env->update_pipe[pipe_id] = true;
+
+ stream_env->pipe_configs[pipe_id].vf_output_info[0].res.width = width;
+ stream_env->pipe_configs[pipe_id].vf_output_info[0].res.height = height;
+ stream_env->pipe_configs[pipe_id].vf_output_info[0].format = format;
+ stream_env->pipe_configs[pipe_id].vf_output_info[0].padded_width =
+ min_width;
+ dev_dbg(isp->dev,
+ "configuring pipe[%d] vf output info w=%d.h=%d.f=%d.\n",
+ pipe_id, width, height, format);
+}
+
+static int __get_frame_info(struct atomisp_sub_device *asd,
+ unsigned int stream_index,
+ struct ia_css_frame_info *info,
+ enum frame_info_type type,
+ enum ia_css_pipe_id pipe_id)
+{
+ struct atomisp_device *isp = asd->isp;
+ int ret;
+ struct ia_css_pipe_info p_info;
+
+ /* FIXME! No need to destroy/recreate all streams */
+ ret = atomisp_css_update_stream(asd);
+ if (ret)
+ return ret;
+
+ ret = ia_css_pipe_get_info(asd->stream_env[stream_index].pipes[pipe_id],
+ &p_info);
+ if (ret) {
+ dev_err(isp->dev, "can't get info from pipe\n");
+ goto get_info_err;
+ }
+
+ switch (type) {
+ case ATOMISP_CSS_VF_FRAME:
+ *info = p_info.vf_output_info[0];
+ dev_dbg(isp->dev, "getting vf frame info.\n");
+ break;
+ case ATOMISP_CSS_SECOND_VF_FRAME:
+ *info = p_info.vf_output_info[1];
+ dev_dbg(isp->dev, "getting second vf frame info.\n");
+ break;
+ case ATOMISP_CSS_OUTPUT_FRAME:
+ *info = p_info.output_info[0];
+ dev_dbg(isp->dev, "getting main frame info.\n");
+ break;
+ case ATOMISP_CSS_SECOND_OUTPUT_FRAME:
+ *info = p_info.output_info[1];
+ dev_dbg(isp->dev, "getting second main frame info.\n");
+ break;
+ default:
+ case ATOMISP_CSS_RAW_FRAME:
+ *info = p_info.raw_output_info;
+ dev_dbg(isp->dev, "getting raw frame info.\n");
+ break;
+ }
+ dev_dbg(isp->dev, "get frame info: w=%d, h=%d, num_invalid_frames %d.\n",
+ info->res.width, info->res.height, p_info.num_invalid_frames);
+
+ return 0;
+
+get_info_err:
+ atomisp_destroy_pipes_stream(asd);
+ return -EINVAL;
+}
+
+static unsigned int atomisp_get_pipe_index(struct atomisp_sub_device *asd)
+{
+ if (asd->copy_mode)
+ return IA_CSS_PIPE_ID_COPY;
+
+ switch (asd->run_mode->val) {
+ case ATOMISP_RUN_MODE_VIDEO:
+ return IA_CSS_PIPE_ID_VIDEO;
+ case ATOMISP_RUN_MODE_STILL_CAPTURE:
+ return IA_CSS_PIPE_ID_CAPTURE;
+ case ATOMISP_RUN_MODE_PREVIEW:
+ return IA_CSS_PIPE_ID_PREVIEW;
+ }
+
+ dev_warn(asd->isp->dev, "cannot determine pipe-index return default preview pipe\n");
+ return IA_CSS_PIPE_ID_PREVIEW;
+}
+
+int atomisp_get_css_frame_info(struct atomisp_sub_device *asd,
+ struct ia_css_frame_info *frame_info)
+{
+ struct ia_css_pipe_info info;
+ int pipe_index = atomisp_get_pipe_index(asd);
+ int stream_index;
+ struct atomisp_device *isp = asd->isp;
+
+ stream_index = (pipe_index == IA_CSS_PIPE_ID_YUVPP) ?
+ ATOMISP_INPUT_STREAM_VIDEO :
+ ATOMISP_INPUT_STREAM_GENERAL;
+
+ if (0 != ia_css_pipe_get_info(asd->stream_env[stream_index]
+ .pipes[pipe_index], &info)) {
+ dev_dbg(isp->dev, "ia_css_pipe_get_info FAILED");
+ return -EINVAL;
+ }
+
+ *frame_info = info.output_info[0];
+ return 0;
+}
+
+int atomisp_css_copy_configure_output(struct atomisp_sub_device *asd,
+ unsigned int stream_index,
+ unsigned int width, unsigned int height,
+ unsigned int padded_width,
+ enum ia_css_frame_format format)
+{
+ asd->stream_env[stream_index].pipe_configs[IA_CSS_PIPE_ID_COPY].
+ default_capture_config.mode =
+ IA_CSS_CAPTURE_MODE_RAW;
+
+ __configure_output(asd, stream_index, width, height, padded_width,
+ format, IA_CSS_PIPE_ID_COPY);
+ return 0;
+}
+
+int atomisp_css_preview_configure_output(struct atomisp_sub_device *asd,
+ unsigned int width, unsigned int height,
+ unsigned int min_width,
+ enum ia_css_frame_format format)
+{
+ __configure_output(asd, ATOMISP_INPUT_STREAM_GENERAL, width, height,
+ min_width, format, IA_CSS_PIPE_ID_PREVIEW);
+ return 0;
+}
+
+int atomisp_css_capture_configure_output(struct atomisp_sub_device *asd,
+ unsigned int width, unsigned int height,
+ unsigned int min_width,
+ enum ia_css_frame_format format)
+{
+ __configure_output(asd, ATOMISP_INPUT_STREAM_GENERAL, width, height,
+ min_width, format, IA_CSS_PIPE_ID_CAPTURE);
+ return 0;
+}
+
+int atomisp_css_video_configure_output(struct atomisp_sub_device *asd,
+ unsigned int width, unsigned int height,
+ unsigned int min_width,
+ enum ia_css_frame_format format)
+{
+ __configure_output(asd, ATOMISP_INPUT_STREAM_GENERAL, width, height,
+ min_width, format, IA_CSS_PIPE_ID_VIDEO);
+ return 0;
+}
+
+int atomisp_css_video_configure_viewfinder(
+ struct atomisp_sub_device *asd,
+ unsigned int width, unsigned int height,
+ unsigned int min_width,
+ enum ia_css_frame_format format)
+{
+ __configure_vf_output(asd, width, height, min_width, format,
+ IA_CSS_PIPE_ID_VIDEO);
+ return 0;
+}
+
+int atomisp_css_capture_configure_viewfinder(
+ struct atomisp_sub_device *asd,
+ unsigned int width, unsigned int height,
+ unsigned int min_width,
+ enum ia_css_frame_format format)
+{
+ __configure_vf_output(asd, width, height, min_width, format, IA_CSS_PIPE_ID_CAPTURE);
+ return 0;
+}
+
+int atomisp_css_video_get_viewfinder_frame_info(
+ struct atomisp_sub_device *asd,
+ struct ia_css_frame_info *info)
+{
+ return __get_frame_info(asd, ATOMISP_INPUT_STREAM_GENERAL, info,
+ ATOMISP_CSS_VF_FRAME, IA_CSS_PIPE_ID_VIDEO);
+}
+
+int atomisp_css_capture_get_viewfinder_frame_info(
+ struct atomisp_sub_device *asd,
+ struct ia_css_frame_info *info)
+{
+ return __get_frame_info(asd, ATOMISP_INPUT_STREAM_GENERAL, info,
+ ATOMISP_CSS_VF_FRAME, IA_CSS_PIPE_ID_CAPTURE);
+}
+
+int atomisp_css_copy_get_output_frame_info(
+ struct atomisp_sub_device *asd,
+ unsigned int stream_index,
+ struct ia_css_frame_info *info)
+{
+ return __get_frame_info(asd, stream_index, info,
+ ATOMISP_CSS_OUTPUT_FRAME, IA_CSS_PIPE_ID_COPY);
+}
+
+int atomisp_css_preview_get_output_frame_info(
+ struct atomisp_sub_device *asd,
+ struct ia_css_frame_info *info)
+{
+ return __get_frame_info(asd, ATOMISP_INPUT_STREAM_GENERAL, info,
+ ATOMISP_CSS_OUTPUT_FRAME, IA_CSS_PIPE_ID_PREVIEW);
+}
+
+int atomisp_css_capture_get_output_frame_info(
+ struct atomisp_sub_device *asd,
+ struct ia_css_frame_info *info)
+{
+ return __get_frame_info(asd, ATOMISP_INPUT_STREAM_GENERAL, info,
+ ATOMISP_CSS_OUTPUT_FRAME, IA_CSS_PIPE_ID_CAPTURE);
+}
+
+int atomisp_css_video_get_output_frame_info(
+ struct atomisp_sub_device *asd,
+ struct ia_css_frame_info *info)
+{
+ return __get_frame_info(asd, ATOMISP_INPUT_STREAM_GENERAL, info,
+ ATOMISP_CSS_OUTPUT_FRAME, IA_CSS_PIPE_ID_VIDEO);
+}
+
+int atomisp_css_preview_configure_pp_input(
+ struct atomisp_sub_device *asd,
+ unsigned int width, unsigned int height)
+{
+ struct atomisp_stream_env *stream_env =
+ &asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL];
+ __configure_preview_pp_input(asd, width, height, IA_CSS_PIPE_ID_PREVIEW);
+
+ if (width > stream_env->pipe_configs[IA_CSS_PIPE_ID_CAPTURE].
+ capt_pp_in_res.width)
+ __configure_capture_pp_input(asd, width, height, IA_CSS_PIPE_ID_CAPTURE);
+
+ return 0;
+}
+
+int atomisp_css_capture_configure_pp_input(
+ struct atomisp_sub_device *asd,
+ unsigned int width, unsigned int height)
+{
+ __configure_capture_pp_input(asd, width, height, IA_CSS_PIPE_ID_CAPTURE);
+ return 0;
+}
+
+int atomisp_css_video_configure_pp_input(
+ struct atomisp_sub_device *asd,
+ unsigned int width, unsigned int height)
+{
+ struct atomisp_stream_env *stream_env =
+ &asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL];
+
+ __configure_video_pp_input(asd, width, height, IA_CSS_PIPE_ID_VIDEO);
+
+ if (width > stream_env->pipe_configs[IA_CSS_PIPE_ID_CAPTURE].
+ capt_pp_in_res.width)
+ __configure_capture_pp_input(asd, width, height, IA_CSS_PIPE_ID_CAPTURE);
+
+ return 0;
+}
+
+int atomisp_css_offline_capture_configure(struct atomisp_sub_device *asd,
+ int num_captures, unsigned int skip, int offset)
+{
+ int ret;
+
+ dev_dbg(asd->isp->dev, "%s num_capture:%d skip:%d offset:%d\n",
+ __func__, num_captures, skip, offset);
+
+ ret = ia_css_stream_capture(
+ asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream,
+ num_captures, skip, offset);
+ if (ret)
+ return -EINVAL;
+
+ return 0;
+}
+
+int atomisp_css_exp_id_capture(struct atomisp_sub_device *asd, int exp_id)
+{
+ int ret;
+
+ ret = ia_css_stream_capture_frame(
+ asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream,
+ exp_id);
+ if (ret == -ENOBUFS) {
+ /* capture cmd queue is full */
+ return -EBUSY;
+ } else if (ret) {
+ return -EIO;
+ }
+
+ return 0;
+}
+
+int atomisp_css_exp_id_unlock(struct atomisp_sub_device *asd, int exp_id)
+{
+ int ret;
+
+ ret = ia_css_unlock_raw_frame(
+ asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream,
+ exp_id);
+ if (ret == -ENOBUFS)
+ return -EAGAIN;
+ else if (ret)
+ return -EIO;
+
+ return 0;
+}
+
+int atomisp_css_capture_enable_xnr(struct atomisp_sub_device *asd,
+ bool enable)
+{
+ asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL]
+ .pipe_configs[IA_CSS_PIPE_ID_CAPTURE]
+ .default_capture_config.enable_xnr = enable;
+ asd->params.capture_config.enable_xnr = enable;
+ asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL]
+ .update_pipe[IA_CSS_PIPE_ID_CAPTURE] = true;
+
+ return 0;
+}
+
+void atomisp_css_set_ctc_table(struct atomisp_sub_device *asd,
+ struct ia_css_ctc_table *ctc_table)
+{
+ int i;
+ u16 *vamem_ptr = ctc_table->data.vamem_1;
+ int data_size = IA_CSS_VAMEM_1_CTC_TABLE_SIZE;
+ bool valid = false;
+
+ /* workaround: if ctc_table is all 0, do not apply it */
+ if (ctc_table->vamem_type == IA_CSS_VAMEM_TYPE_2) {
+ vamem_ptr = ctc_table->data.vamem_2;
+ data_size = IA_CSS_VAMEM_2_CTC_TABLE_SIZE;
+ }
+
+ for (i = 0; i < data_size; i++) {
+ if (*(vamem_ptr + i)) {
+ valid = true;
+ break;
+ }
+ }
+
+ if (valid)
+ asd->params.config.ctc_table = ctc_table;
+ else
+ dev_warn(asd->isp->dev, "Bypass the invalid ctc_table.\n");
+}
+
+void atomisp_css_set_anr_thres(struct atomisp_sub_device *asd,
+ struct ia_css_anr_thres *anr_thres)
+{
+ asd->params.config.anr_thres = anr_thres;
+}
+
+void atomisp_css_set_dvs_6axis(struct atomisp_sub_device *asd,
+ struct ia_css_dvs_6axis_config *dvs_6axis)
+{
+ asd->params.config.dvs_6axis_config = dvs_6axis;
+}
+
+void atomisp_css_video_set_dis_vector(struct atomisp_sub_device *asd,
+ struct atomisp_dis_vector *vector)
+{
+ if (!asd->params.config.motion_vector)
+ asd->params.config.motion_vector = &asd->params.css_param.motion_vector;
+
+ memset(asd->params.config.motion_vector,
+ 0, sizeof(struct ia_css_vector));
+ asd->params.css_param.motion_vector.x = vector->x;
+ asd->params.css_param.motion_vector.y = vector->y;
+}
+
+static int atomisp_compare_dvs_grid(struct atomisp_sub_device *asd,
+ struct atomisp_dvs_grid_info *atomgrid)
+{
+ struct ia_css_dvs_grid_info *cur =
+ atomisp_css_get_dvs_grid_info(&asd->params.curr_grid_info);
+
+ if (!cur) {
+ dev_err(asd->isp->dev, "dvs grid not available!\n");
+ return -EINVAL;
+ }
+
+ if (sizeof(*cur) != sizeof(*atomgrid)) {
+ dev_err(asd->isp->dev, "dvs grid mismatch!\n");
+ return -EINVAL;
+ }
+
+ if (!cur->enable) {
+ dev_err(asd->isp->dev, "dvs not enabled!\n");
+ return -EINVAL;
+ }
+
+ return memcmp(atomgrid, cur, sizeof(*cur));
+}
+
+void atomisp_css_set_dvs2_coefs(struct atomisp_sub_device *asd,
+ struct ia_css_dvs2_coefficients *coefs)
+{
+ asd->params.config.dvs2_coefs = coefs;
+}
+
+int atomisp_css_set_dis_coefs(struct atomisp_sub_device *asd,
+ struct atomisp_dis_coefficients *coefs)
+{
+ if (atomisp_compare_dvs_grid(asd, &coefs->grid_info) != 0)
+ /* If the grid info in the argument differs from the current
+ grid info, we tell the caller to reset the grid size and
+ try again. */
+ return -EAGAIN;
+
+ if (!coefs->hor_coefs.odd_real ||
+ !coefs->hor_coefs.odd_imag ||
+ !coefs->hor_coefs.even_real ||
+ !coefs->hor_coefs.even_imag ||
+ !coefs->ver_coefs.odd_real ||
+ !coefs->ver_coefs.odd_imag ||
+ !coefs->ver_coefs.even_real ||
+ !coefs->ver_coefs.even_imag ||
+ !asd->params.css_param.dvs2_coeff->hor_coefs.odd_real ||
+ !asd->params.css_param.dvs2_coeff->hor_coefs.odd_imag ||
+ !asd->params.css_param.dvs2_coeff->hor_coefs.even_real ||
+ !asd->params.css_param.dvs2_coeff->hor_coefs.even_imag ||
+ !asd->params.css_param.dvs2_coeff->ver_coefs.odd_real ||
+ !asd->params.css_param.dvs2_coeff->ver_coefs.odd_imag ||
+ !asd->params.css_param.dvs2_coeff->ver_coefs.even_real ||
+ !asd->params.css_param.dvs2_coeff->ver_coefs.even_imag)
+ return -EINVAL;
+
+ if (copy_from_user(asd->params.css_param.dvs2_coeff->hor_coefs.odd_real,
+ coefs->hor_coefs.odd_real, asd->params.dvs_hor_coef_bytes))
+ return -EFAULT;
+ if (copy_from_user(asd->params.css_param.dvs2_coeff->hor_coefs.odd_imag,
+ coefs->hor_coefs.odd_imag, asd->params.dvs_hor_coef_bytes))
+ return -EFAULT;
+ if (copy_from_user(asd->params.css_param.dvs2_coeff->hor_coefs.even_real,
+ coefs->hor_coefs.even_real, asd->params.dvs_hor_coef_bytes))
+ return -EFAULT;
+ if (copy_from_user(asd->params.css_param.dvs2_coeff->hor_coefs.even_imag,
+ coefs->hor_coefs.even_imag, asd->params.dvs_hor_coef_bytes))
+ return -EFAULT;
+
+ if (copy_from_user(asd->params.css_param.dvs2_coeff->ver_coefs.odd_real,
+ coefs->ver_coefs.odd_real, asd->params.dvs_ver_coef_bytes))
+ return -EFAULT;
+ if (copy_from_user(asd->params.css_param.dvs2_coeff->ver_coefs.odd_imag,
+ coefs->ver_coefs.odd_imag, asd->params.dvs_ver_coef_bytes))
+ return -EFAULT;
+ if (copy_from_user(asd->params.css_param.dvs2_coeff->ver_coefs.even_real,
+ coefs->ver_coefs.even_real, asd->params.dvs_ver_coef_bytes))
+ return -EFAULT;
+ if (copy_from_user(asd->params.css_param.dvs2_coeff->ver_coefs.even_imag,
+ coefs->ver_coefs.even_imag, asd->params.dvs_ver_coef_bytes))
+ return -EFAULT;
+
+ asd->params.css_param.update_flag.dvs2_coefs =
+ (struct atomisp_dis_coefficients *)
+ asd->params.css_param.dvs2_coeff;
+ /* FIXME! */
+ /* asd->params.dis_proj_data_valid = false; */
+ asd->params.css_update_params_needed = true;
+
+ return 0;
+}
+
+void atomisp_css_set_zoom_factor(struct atomisp_sub_device *asd,
+ unsigned int zoom)
+{
+ struct atomisp_device *isp = asd->isp;
+
+ if (zoom == asd->params.css_param.dz_config.dx &&
+ zoom == asd->params.css_param.dz_config.dy) {
+ dev_dbg(isp->dev, "same zoom scale. skipped.\n");
+ return;
+ }
+
+ memset(&asd->params.css_param.dz_config, 0,
+ sizeof(struct ia_css_dz_config));
+ asd->params.css_param.dz_config.dx = zoom;
+ asd->params.css_param.dz_config.dy = zoom;
+
+ asd->params.css_param.update_flag.dz_config =
+ (struct atomisp_dz_config *)&asd->params.css_param.dz_config;
+ asd->params.css_update_params_needed = true;
+}
+
+void atomisp_css_set_formats_config(struct atomisp_sub_device *asd,
+ struct ia_css_formats_config *formats_config)
+{
+ asd->params.config.formats_config = formats_config;
+}
+
+int atomisp_css_get_wb_config(struct atomisp_sub_device *asd,
+ struct atomisp_wb_config *config)
+{
+ struct ia_css_wb_config wb_config;
+ struct ia_css_isp_config isp_config;
+ struct atomisp_device *isp = asd->isp;
+
+ if (!asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream) {
+ dev_err(isp->dev, "%s called after streamoff, skipping.\n",
+ __func__);
+ return -EINVAL;
+ }
+ memset(&wb_config, 0, sizeof(struct ia_css_wb_config));
+ memset(&isp_config, 0, sizeof(struct ia_css_isp_config));
+ isp_config.wb_config = &wb_config;
+ ia_css_stream_get_isp_config(
+ asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream,
+ &isp_config);
+ memcpy(config, &wb_config, sizeof(*config));
+
+ return 0;
+}
+
+int atomisp_css_get_ob_config(struct atomisp_sub_device *asd,
+ struct atomisp_ob_config *config)
+{
+ struct ia_css_ob_config ob_config;
+ struct ia_css_isp_config isp_config;
+ struct atomisp_device *isp = asd->isp;
+
+ if (!asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream) {
+ dev_err(isp->dev, "%s called after streamoff, skipping.\n",
+ __func__);
+ return -EINVAL;
+ }
+ memset(&ob_config, 0, sizeof(struct ia_css_ob_config));
+ memset(&isp_config, 0, sizeof(struct ia_css_isp_config));
+ isp_config.ob_config = &ob_config;
+ ia_css_stream_get_isp_config(
+ asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream,
+ &isp_config);
+ memcpy(config, &ob_config, sizeof(*config));
+
+ return 0;
+}
+
+int atomisp_css_get_dp_config(struct atomisp_sub_device *asd,
+ struct atomisp_dp_config *config)
+{
+ struct ia_css_dp_config dp_config;
+ struct ia_css_isp_config isp_config;
+ struct atomisp_device *isp = asd->isp;
+
+ if (!asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream) {
+ dev_err(isp->dev, "%s called after streamoff, skipping.\n",
+ __func__);
+ return -EINVAL;
+ }
+ memset(&dp_config, 0, sizeof(struct ia_css_dp_config));
+ memset(&isp_config, 0, sizeof(struct ia_css_isp_config));
+ isp_config.dp_config = &dp_config;
+ ia_css_stream_get_isp_config(
+ asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream,
+ &isp_config);
+ memcpy(config, &dp_config, sizeof(*config));
+
+ return 0;
+}
+
+int atomisp_css_get_de_config(struct atomisp_sub_device *asd,
+ struct atomisp_de_config *config)
+{
+ struct ia_css_de_config de_config;
+ struct ia_css_isp_config isp_config;
+ struct atomisp_device *isp = asd->isp;
+
+ if (!asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream) {
+ dev_err(isp->dev, "%s called after streamoff, skipping.\n",
+ __func__);
+ return -EINVAL;
+ }
+ memset(&de_config, 0, sizeof(struct ia_css_de_config));
+ memset(&isp_config, 0, sizeof(struct ia_css_isp_config));
+ isp_config.de_config = &de_config;
+ ia_css_stream_get_isp_config(
+ asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream,
+ &isp_config);
+ memcpy(config, &de_config, sizeof(*config));
+
+ return 0;
+}
+
+int atomisp_css_get_nr_config(struct atomisp_sub_device *asd,
+ struct atomisp_nr_config *config)
+{
+ struct ia_css_nr_config nr_config;
+ struct ia_css_isp_config isp_config;
+ struct atomisp_device *isp = asd->isp;
+
+ if (!asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream) {
+ dev_err(isp->dev, "%s called after streamoff, skipping.\n",
+ __func__);
+ return -EINVAL;
+ }
+ memset(&nr_config, 0, sizeof(struct ia_css_nr_config));
+ memset(&isp_config, 0, sizeof(struct ia_css_isp_config));
+
+ isp_config.nr_config = &nr_config;
+ ia_css_stream_get_isp_config(
+ asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream,
+ &isp_config);
+ memcpy(config, &nr_config, sizeof(*config));
+
+ return 0;
+}
+
+int atomisp_css_get_ee_config(struct atomisp_sub_device *asd,
+ struct atomisp_ee_config *config)
+{
+ struct ia_css_ee_config ee_config;
+ struct ia_css_isp_config isp_config;
+ struct atomisp_device *isp = asd->isp;
+
+ if (!asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream) {
+ dev_err(isp->dev, "%s called after streamoff, skipping.\n",
+ __func__);
+ return -EINVAL;
+ }
+ memset(&ee_config, 0, sizeof(struct ia_css_ee_config));
+ memset(&isp_config, 0, sizeof(struct ia_css_isp_config));
+ isp_config.ee_config = &ee_config;
+ ia_css_stream_get_isp_config(
+ asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream,
+ &isp_config);
+ memcpy(config, &ee_config, sizeof(*config));
+
+ return 0;
+}
+
+int atomisp_css_get_tnr_config(struct atomisp_sub_device *asd,
+ struct atomisp_tnr_config *config)
+{
+ struct ia_css_tnr_config tnr_config;
+ struct ia_css_isp_config isp_config;
+ struct atomisp_device *isp = asd->isp;
+
+ if (!asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream) {
+ dev_err(isp->dev, "%s called after streamoff, skipping.\n",
+ __func__);
+ return -EINVAL;
+ }
+ memset(&tnr_config, 0, sizeof(struct ia_css_tnr_config));
+ memset(&isp_config, 0, sizeof(struct ia_css_isp_config));
+ isp_config.tnr_config = &tnr_config;
+ ia_css_stream_get_isp_config(
+ asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream,
+ &isp_config);
+ memcpy(config, &tnr_config, sizeof(*config));
+
+ return 0;
+}
+
+int atomisp_css_get_ctc_table(struct atomisp_sub_device *asd,
+ struct atomisp_ctc_table *config)
+{
+ struct ia_css_ctc_table *tab;
+ struct ia_css_isp_config isp_config;
+ struct atomisp_device *isp = asd->isp;
+
+ if (!asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream) {
+ dev_err(isp->dev, "%s called after streamoff, skipping.\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ tab = vzalloc(sizeof(struct ia_css_ctc_table));
+ if (!tab)
+ return -ENOMEM;
+
+ memset(&isp_config, 0, sizeof(struct ia_css_isp_config));
+ isp_config.ctc_table = tab;
+ ia_css_stream_get_isp_config(
+ asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream,
+ &isp_config);
+ memcpy(config, tab, sizeof(*tab));
+ vfree(tab);
+
+ return 0;
+}
+
+int atomisp_css_get_gamma_table(struct atomisp_sub_device *asd,
+ struct atomisp_gamma_table *config)
+{
+ struct ia_css_gamma_table *tab;
+ struct ia_css_isp_config isp_config;
+ struct atomisp_device *isp = asd->isp;
+
+ if (!asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream) {
+ dev_err(isp->dev, "%s called after streamoff, skipping.\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ tab = vzalloc(sizeof(struct ia_css_gamma_table));
+ if (!tab)
+ return -ENOMEM;
+
+ memset(&isp_config, 0, sizeof(struct ia_css_isp_config));
+ isp_config.gamma_table = tab;
+ ia_css_stream_get_isp_config(
+ asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream,
+ &isp_config);
+ memcpy(config, tab, sizeof(*tab));
+ vfree(tab);
+
+ return 0;
+}
+
+int atomisp_css_get_gc_config(struct atomisp_sub_device *asd,
+ struct atomisp_gc_config *config)
+{
+ struct ia_css_gc_config gc_config;
+ struct ia_css_isp_config isp_config;
+ struct atomisp_device *isp = asd->isp;
+
+ if (!asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream) {
+ dev_err(isp->dev, "%s called after streamoff, skipping.\n",
+ __func__);
+ return -EINVAL;
+ }
+ memset(&gc_config, 0, sizeof(struct ia_css_gc_config));
+ memset(&isp_config, 0, sizeof(struct ia_css_isp_config));
+ isp_config.gc_config = &gc_config;
+ ia_css_stream_get_isp_config(
+ asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream,
+ &isp_config);
+ /* Get gamma correction params from current setup */
+ memcpy(config, &gc_config, sizeof(*config));
+
+ return 0;
+}
+
+int atomisp_css_get_3a_config(struct atomisp_sub_device *asd,
+ struct atomisp_3a_config *config)
+{
+ struct ia_css_3a_config s3a_config;
+ struct ia_css_isp_config isp_config;
+ struct atomisp_device *isp = asd->isp;
+
+ if (!asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream) {
+ dev_err(isp->dev, "%s called after streamoff, skipping.\n",
+ __func__);
+ return -EINVAL;
+ }
+ memset(&s3a_config, 0, sizeof(struct ia_css_3a_config));
+ memset(&isp_config, 0, sizeof(struct ia_css_isp_config));
+ isp_config.s3a_config = &s3a_config;
+ ia_css_stream_get_isp_config(
+ asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream,
+ &isp_config);
+ /* Get white balance from current setup */
+ memcpy(config, &s3a_config, sizeof(*config));
+
+ return 0;
+}
+
+int atomisp_css_get_formats_config(struct atomisp_sub_device *asd,
+ struct atomisp_formats_config *config)
+{
+ struct ia_css_formats_config formats_config;
+ struct ia_css_isp_config isp_config;
+ struct atomisp_device *isp = asd->isp;
+
+ if (!asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream) {
+ dev_err(isp->dev, "%s called after streamoff, skipping.\n",
+ __func__);
+ return -EINVAL;
+ }
+ memset(&formats_config, 0, sizeof(formats_config));
+ memset(&isp_config, 0, sizeof(isp_config));
+ isp_config.formats_config = &formats_config;
+ ia_css_stream_get_isp_config(
+ asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream,
+ &isp_config);
+ /* Get narrow gamma from current setup */
+ memcpy(config, &formats_config, sizeof(*config));
+
+ return 0;
+}
+
+int atomisp_css_get_zoom_factor(struct atomisp_sub_device *asd,
+ unsigned int *zoom)
+{
+ struct ia_css_dz_config dz_config; /** Digital Zoom */
+ struct ia_css_isp_config isp_config;
+ struct atomisp_device *isp = asd->isp;
+
+ if (!asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream) {
+ dev_err(isp->dev, "%s called after streamoff, skipping.\n",
+ __func__);
+ return -EINVAL;
+ }
+ memset(&dz_config, 0, sizeof(struct ia_css_dz_config));
+ memset(&isp_config, 0, sizeof(struct ia_css_isp_config));
+ isp_config.dz_config = &dz_config;
+ ia_css_stream_get_isp_config(
+ asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream,
+ &isp_config);
+ *zoom = dz_config.dx;
+
+ return 0;
+}
+
+/*
+ * Function to set/get image stablization statistics
+ */
+int atomisp_css_get_dis_stat(struct atomisp_sub_device *asd,
+ struct atomisp_dis_statistics *stats)
+{
+ struct atomisp_device *isp = asd->isp;
+ struct atomisp_dis_buf *dis_buf;
+ unsigned long flags;
+
+ lockdep_assert_held(&isp->mutex);
+
+ if (!asd->params.dvs_stat->hor_prod.odd_real ||
+ !asd->params.dvs_stat->hor_prod.odd_imag ||
+ !asd->params.dvs_stat->hor_prod.even_real ||
+ !asd->params.dvs_stat->hor_prod.even_imag ||
+ !asd->params.dvs_stat->ver_prod.odd_real ||
+ !asd->params.dvs_stat->ver_prod.odd_imag ||
+ !asd->params.dvs_stat->ver_prod.even_real ||
+ !asd->params.dvs_stat->ver_prod.even_imag)
+ return -EINVAL;
+
+ /* isp needs to be streaming to get DIS statistics */
+ if (!asd->streaming)
+ return -EINVAL;
+
+ if (atomisp_compare_dvs_grid(asd, &stats->dvs2_stat.grid_info) != 0)
+ /* If the grid info in the argument differs from the current
+ grid info, we tell the caller to reset the grid size and
+ try again. */
+ return -EAGAIN;
+
+ spin_lock_irqsave(&asd->dis_stats_lock, flags);
+ if (!asd->params.dis_proj_data_valid || list_empty(&asd->dis_stats)) {
+ spin_unlock_irqrestore(&asd->dis_stats_lock, flags);
+ dev_err(isp->dev, "dis statistics is not valid.\n");
+ return -EAGAIN;
+ }
+
+ dis_buf = list_entry(asd->dis_stats.next,
+ struct atomisp_dis_buf, list);
+ list_del_init(&dis_buf->list);
+ spin_unlock_irqrestore(&asd->dis_stats_lock, flags);
+
+ if (dis_buf->dvs_map)
+ ia_css_translate_dvs2_statistics(
+ asd->params.dvs_stat, dis_buf->dvs_map);
+ else
+ ia_css_get_dvs2_statistics(asd->params.dvs_stat,
+ dis_buf->dis_data);
+ stats->exp_id = dis_buf->dis_data->exp_id;
+
+ spin_lock_irqsave(&asd->dis_stats_lock, flags);
+ list_add_tail(&dis_buf->list, &asd->dis_stats);
+ spin_unlock_irqrestore(&asd->dis_stats_lock, flags);
+
+ if (copy_to_user(stats->dvs2_stat.ver_prod.odd_real,
+ asd->params.dvs_stat->ver_prod.odd_real,
+ asd->params.dvs_ver_proj_bytes))
+ return -EFAULT;
+ if (copy_to_user(stats->dvs2_stat.ver_prod.odd_imag,
+ asd->params.dvs_stat->ver_prod.odd_imag,
+ asd->params.dvs_ver_proj_bytes))
+ return -EFAULT;
+ if (copy_to_user(stats->dvs2_stat.ver_prod.even_real,
+ asd->params.dvs_stat->ver_prod.even_real,
+ asd->params.dvs_ver_proj_bytes))
+ return -EFAULT;
+ if (copy_to_user(stats->dvs2_stat.ver_prod.even_imag,
+ asd->params.dvs_stat->ver_prod.even_imag,
+ asd->params.dvs_ver_proj_bytes))
+ return -EFAULT;
+ if (copy_to_user(stats->dvs2_stat.hor_prod.odd_real,
+ asd->params.dvs_stat->hor_prod.odd_real,
+ asd->params.dvs_hor_proj_bytes))
+ return -EFAULT;
+ if (copy_to_user(stats->dvs2_stat.hor_prod.odd_imag,
+ asd->params.dvs_stat->hor_prod.odd_imag,
+ asd->params.dvs_hor_proj_bytes))
+ return -EFAULT;
+ if (copy_to_user(stats->dvs2_stat.hor_prod.even_real,
+ asd->params.dvs_stat->hor_prod.even_real,
+ asd->params.dvs_hor_proj_bytes))
+ return -EFAULT;
+ if (copy_to_user(stats->dvs2_stat.hor_prod.even_imag,
+ asd->params.dvs_stat->hor_prod.even_imag,
+ asd->params.dvs_hor_proj_bytes))
+ return -EFAULT;
+
+ return 0;
+}
+
+struct ia_css_shading_table *atomisp_css_shading_table_alloc(
+ unsigned int width, unsigned int height)
+{
+ return ia_css_shading_table_alloc(width, height);
+}
+
+void atomisp_css_set_shading_table(struct atomisp_sub_device *asd,
+ struct ia_css_shading_table *table)
+{
+ asd->params.config.shading_table = table;
+}
+
+void atomisp_css_shading_table_free(struct ia_css_shading_table *table)
+{
+ ia_css_shading_table_free(table);
+}
+
+struct ia_css_morph_table *atomisp_css_morph_table_allocate(
+ unsigned int width, unsigned int height)
+{
+ return ia_css_morph_table_allocate(width, height);
+}
+
+void atomisp_css_set_morph_table(struct atomisp_sub_device *asd,
+ struct ia_css_morph_table *table)
+{
+ asd->params.config.morph_table = table;
+}
+
+void atomisp_css_get_morph_table(struct atomisp_sub_device *asd,
+ struct ia_css_morph_table *table)
+{
+ struct ia_css_isp_config isp_config;
+ struct atomisp_device *isp = asd->isp;
+
+ if (!asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream) {
+ dev_err(isp->dev,
+ "%s called after streamoff, skipping.\n", __func__);
+ return;
+ }
+ memset(table, 0, sizeof(struct ia_css_morph_table));
+ memset(&isp_config, 0, sizeof(struct ia_css_isp_config));
+ isp_config.morph_table = table;
+ ia_css_stream_get_isp_config(
+ asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream,
+ &isp_config);
+}
+
+void atomisp_css_morph_table_free(struct ia_css_morph_table *table)
+{
+ ia_css_morph_table_free(table);
+}
+
+static bool atomisp_css_isr_get_stream_id(struct ia_css_pipe *css_pipe,
+ struct atomisp_device *isp,
+ enum atomisp_input_stream_id *stream_id)
+{
+ struct atomisp_stream_env *stream_env;
+ int i, j;
+
+ if (!isp->asd.streaming)
+ return false;
+
+ for (i = 0; i < ATOMISP_INPUT_STREAM_NUM; i++) {
+ stream_env = &isp->asd.stream_env[i];
+ for (j = 0; j < IA_CSS_PIPE_ID_NUM; j++) {
+ if (stream_env->pipes[j] && stream_env->pipes[j] == css_pipe) {
+ *stream_id = i;
+ return true;
+ }
+ }
+ }
+
+ return false;
+}
+
+int atomisp_css_isr_thread(struct atomisp_device *isp)
+{
+ enum atomisp_input_stream_id stream_id = 0;
+ struct atomisp_css_event current_event;
+
+ lockdep_assert_held(&isp->mutex);
+
+ while (!ia_css_dequeue_psys_event(&current_event.event)) {
+ if (current_event.event.type ==
+ IA_CSS_EVENT_TYPE_FW_ASSERT) {
+ /*
+ * Received FW assertion signal,
+ * trigger WDT to recover
+ */
+ dev_err(isp->dev,
+ "%s: ISP reports FW_ASSERT event! fw_assert_module_id %d fw_assert_line_no %d\n",
+ __func__,
+ current_event.event.fw_assert_module_id,
+ current_event.event.fw_assert_line_no);
+
+ queue_work(system_long_wq, &isp->assert_recovery_work);
+ return -EINVAL;
+ } else if (current_event.event.type == IA_CSS_EVENT_TYPE_FW_WARNING) {
+ dev_warn(isp->dev, "%s: ISP reports warning, code is %d, exp_id %d\n",
+ __func__, current_event.event.fw_warning,
+ current_event.event.exp_id);
+ continue;
+ }
+
+ if (!atomisp_css_isr_get_stream_id(current_event.event.pipe, isp, &stream_id)) {
+ if (current_event.event.type == IA_CSS_EVENT_TYPE_TIMER)
+ dev_dbg(isp->dev,
+ "event: Timer event.");
+ else
+ dev_warn(isp->dev, "%s:no subdev.event:%d",
+ __func__,
+ current_event.event.type);
+ continue;
+ }
+
+ atomisp_css_temp_pipe_to_pipe_id(&isp->asd, &current_event);
+ switch (current_event.event.type) {
+ case IA_CSS_EVENT_TYPE_OUTPUT_FRAME_DONE:
+ dev_dbg(isp->dev, "event: Output frame done");
+ atomisp_buf_done(&isp->asd, 0, IA_CSS_BUFFER_TYPE_OUTPUT_FRAME,
+ current_event.pipe, true, stream_id);
+ break;
+ case IA_CSS_EVENT_TYPE_SECOND_OUTPUT_FRAME_DONE:
+ dev_dbg(isp->dev, "event: Second output frame done");
+ atomisp_buf_done(&isp->asd, 0, IA_CSS_BUFFER_TYPE_SEC_OUTPUT_FRAME,
+ current_event.pipe, true, stream_id);
+ break;
+ case IA_CSS_EVENT_TYPE_3A_STATISTICS_DONE:
+ dev_dbg(isp->dev, "event: 3A stats frame done");
+ atomisp_buf_done(&isp->asd, 0,
+ IA_CSS_BUFFER_TYPE_3A_STATISTICS,
+ current_event.pipe,
+ false, stream_id);
+ break;
+ case IA_CSS_EVENT_TYPE_METADATA_DONE:
+ dev_dbg(isp->dev, "event: metadata frame done");
+ atomisp_buf_done(&isp->asd, 0,
+ IA_CSS_BUFFER_TYPE_METADATA,
+ current_event.pipe,
+ false, stream_id);
+ break;
+ case IA_CSS_EVENT_TYPE_VF_OUTPUT_FRAME_DONE:
+ dev_dbg(isp->dev, "event: VF output frame done");
+ atomisp_buf_done(&isp->asd, 0,
+ IA_CSS_BUFFER_TYPE_VF_OUTPUT_FRAME,
+ current_event.pipe, true, stream_id);
+ break;
+ case IA_CSS_EVENT_TYPE_SECOND_VF_OUTPUT_FRAME_DONE:
+ dev_dbg(isp->dev, "event: second VF output frame done");
+ atomisp_buf_done(&isp->asd, 0,
+ IA_CSS_BUFFER_TYPE_SEC_VF_OUTPUT_FRAME,
+ current_event.pipe, true, stream_id);
+ break;
+ case IA_CSS_EVENT_TYPE_DIS_STATISTICS_DONE:
+ dev_dbg(isp->dev, "event: dis stats frame done");
+ atomisp_buf_done(&isp->asd, 0,
+ IA_CSS_BUFFER_TYPE_DIS_STATISTICS,
+ current_event.pipe,
+ false, stream_id);
+ break;
+ case IA_CSS_EVENT_TYPE_PIPELINE_DONE:
+ dev_dbg(isp->dev, "event: pipeline done");
+ break;
+ case IA_CSS_EVENT_TYPE_ACC_STAGE_COMPLETE:
+ dev_warn(isp->dev, "unexpected event: acc stage done");
+ break;
+ default:
+ dev_dbg(isp->dev, "unhandled css stored event: 0x%x\n",
+ current_event.event.type);
+ break;
+ }
+ }
+
+ return 0;
+}
+
+bool atomisp_css_valid_sof(struct atomisp_device *isp)
+{
+ unsigned int i;
+
+ /* Loop for each css vc stream */
+ for (i = 0; i < ATOMISP_INPUT_STREAM_NUM; i++) {
+ if (!isp->asd.stream_env[i].stream)
+ continue;
+
+ dev_dbg(isp->dev, "stream #%d: mode: %d\n",
+ i, isp->asd.stream_env[i].stream_config.mode);
+ if (isp->asd.stream_env[i].stream_config.mode == IA_CSS_INPUT_MODE_BUFFERED_SENSOR)
+ return false;
+ }
+
+ return true;
+}
+
+int atomisp_css_debug_dump_isp_binary(void)
+{
+ ia_css_debug_dump_isp_binary();
+ return 0;
+}
+
+int atomisp_css_dump_sp_raw_copy_linecount(bool reduced)
+{
+ sh_css_dump_sp_raw_copy_linecount(reduced);
+ return 0;
+}
+
+static const char * const fw_type_name[] = {
+ [ia_css_sp_firmware] = "SP",
+ [ia_css_isp_firmware] = "ISP",
+ [ia_css_bootloader_firmware] = "BootLoader",
+ [ia_css_acc_firmware] = "accel",
+};
+
+static const char * const fw_acc_type_name[] = {
+ [IA_CSS_ACC_NONE] = "Normal",
+ [IA_CSS_ACC_OUTPUT] = "Accel stage on output",
+ [IA_CSS_ACC_VIEWFINDER] = "Accel stage on viewfinder",
+ [IA_CSS_ACC_STANDALONE] = "Stand-alone acceleration",
+};
+
+int atomisp_css_dump_blob_infor(struct atomisp_device *isp)
+{
+ struct ia_css_blob_descr *bd = sh_css_blob_info;
+ unsigned int i, nm = sh_css_num_binaries;
+
+ if (nm == 0)
+ return -EPERM;
+ if (!bd)
+ return -EPERM;
+
+ /*
+ * The sh_css_load_firmware function discard the initial
+ * "SPS" binaries
+ */
+ for (i = 0; i < sh_css_num_binaries - NUM_OF_SPS; i++) {
+ switch (bd[i].header.type) {
+ case ia_css_isp_firmware:
+ dev_dbg(isp->dev, "Num%2d type %s (%s), binary id is %2d, name is %s\n",
+ i + NUM_OF_SPS,
+ fw_type_name[bd[i].header.type],
+ fw_acc_type_name[bd[i].header.info.isp.type],
+ bd[i].header.info.isp.sp.id,
+ bd[i].name);
+ break;
+ default:
+ dev_dbg(isp->dev, "Num%2d type %s, name is %s\n",
+ i + NUM_OF_SPS, fw_type_name[bd[i].header.type],
+ bd[i].name);
+ }
+ }
+
+ return 0;
+}
+
+void atomisp_css_set_isp_config_id(struct atomisp_sub_device *asd,
+ uint32_t isp_config_id)
+{
+ asd->params.config.isp_config_id = isp_config_id;
+}
+
+void atomisp_css_set_isp_config_applied_frame(struct atomisp_sub_device *asd,
+ struct ia_css_frame *output_frame)
+{
+ asd->params.config.output_frame = output_frame;
+}
+
+int atomisp_get_css_dbgfunc(void)
+{
+ return dbg_func;
+}
+
+int atomisp_set_css_dbgfunc(struct atomisp_device *isp, int opt)
+{
+ int ret;
+
+ ret = __set_css_print_env(isp, opt);
+ if (ret == 0)
+ dbg_func = opt;
+
+ return ret;
+}
+
+void atomisp_en_dz_capt_pipe(struct atomisp_sub_device *asd, bool enable)
+{
+ ia_css_en_dz_capt_pipe(
+ asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream,
+ enable);
+}
+
+struct ia_css_dvs_grid_info *atomisp_css_get_dvs_grid_info(
+ struct ia_css_grid_info *grid_info)
+{
+ if (!grid_info)
+ return NULL;
+
+#ifdef IA_CSS_DVS_STAT_GRID_INFO_SUPPORTED
+ return &grid_info->dvs_grid.dvs_grid_info;
+#else
+ return &grid_info->dvs_grid;
+#endif
+}
diff --git a/drivers/staging/media/atomisp/pci/atomisp_compat_css20.h b/drivers/staging/media/atomisp/pci/atomisp_compat_css20.h
new file mode 100644
index 000000000000..75781807544a
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp_compat_css20.h
@@ -0,0 +1,158 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Clovertrail PNW Camera Imaging ISP subsystem.
+ *
+ * Copyright (c) 2013 Intel Corporation. All Rights Reserved.
+ */
+
+#ifndef __ATOMISP_COMPAT_CSS20_H__
+#define __ATOMISP_COMPAT_CSS20_H__
+
+#include <media/v4l2-mediabus.h>
+
+#include "ia_css.h"
+#include "ia_css_types.h"
+#include "ia_css_acc_types.h"
+#include "sh_css_legacy.h"
+
+#define ATOMISP_CSS2_PIPE_MAX 2
+#define ATOMISP_CSS2_NUM_OFFLINE_INIT_CONTINUOUS_FRAMES 3
+#define ATOMISP_CSS2_NUM_OFFLINE_INIT_CONTINUOUS_FRAMES_LOCK_EN 4
+#define ATOMISP_CSS2_NUM_DVS_FRAME_DELAY 2
+
+#define CSS_MIPI_FRAME_BUFFER_SIZE_1 0x60000
+#define CSS_MIPI_FRAME_BUFFER_SIZE_2 0x80000
+
+struct atomisp_device;
+struct atomisp_sub_device;
+
+#define MAX_STREAMS_PER_CHANNEL 2
+
+/*
+ * These are used to indicate the css stream state, corresponding
+ * stream handling can be done via judging the different state.
+ */
+enum atomisp_css_stream_state {
+ CSS_STREAM_UNINIT,
+ CSS_STREAM_CREATED,
+ CSS_STREAM_STARTED,
+ CSS_STREAM_STOPPED,
+};
+
+/*
+ * Sensor of external ISP can send multiple steams with different mipi data
+ * type in the same virtual channel. This information needs to come from the
+ * sensor or external ISP
+ */
+struct atomisp_css_isys_config_info {
+ unsigned int input_format;
+ unsigned int width;
+ unsigned int height;
+};
+
+struct atomisp_stream_env {
+ struct ia_css_stream *stream;
+ struct ia_css_stream_config stream_config;
+ struct ia_css_stream_info stream_info;
+ struct ia_css_pipe *pipes[IA_CSS_PIPE_ID_NUM];
+ struct ia_css_pipe *multi_pipes[IA_CSS_PIPE_ID_NUM];
+ struct ia_css_pipe_config pipe_configs[IA_CSS_PIPE_ID_NUM];
+ struct ia_css_pipe_extra_config pipe_extra_configs[IA_CSS_PIPE_ID_NUM];
+ bool update_pipe[IA_CSS_PIPE_ID_NUM];
+ enum atomisp_css_stream_state stream_state;
+ struct ia_css_stream *acc_stream;
+ enum atomisp_css_stream_state acc_stream_state;
+ struct ia_css_stream_config acc_stream_config;
+ unsigned int ch_id; /* virtual channel ID */
+ unsigned int isys_configs;
+ struct atomisp_css_isys_config_info isys_info[MAX_STREAMS_PER_CHANNEL];
+};
+
+struct atomisp_css_env {
+ struct ia_css_env isp_css_env;
+ struct ia_css_fw isp_css_fw;
+};
+
+struct atomisp_s3a_buf {
+ struct ia_css_isp_3a_statistics *s3a_data;
+ struct ia_css_isp_3a_statistics_map *s3a_map;
+ struct list_head list;
+};
+
+struct atomisp_dis_buf {
+ struct ia_css_isp_dvs_statistics *dis_data;
+ struct ia_css_isp_dvs_statistics_map *dvs_map;
+ struct list_head list;
+};
+
+struct atomisp_css_buffer {
+ struct ia_css_buffer css_buffer;
+};
+
+struct atomisp_css_event {
+ enum ia_css_pipe_id pipe;
+ struct ia_css_event event;
+};
+
+void atomisp_css_set_macc_config(struct atomisp_sub_device *asd,
+ struct ia_css_macc_config *macc_config);
+
+void atomisp_css_set_ecd_config(struct atomisp_sub_device *asd,
+ struct ia_css_ecd_config *ecd_config);
+
+void atomisp_css_set_ynr_config(struct atomisp_sub_device *asd,
+ struct ia_css_ynr_config *ynr_config);
+
+void atomisp_css_set_fc_config(struct atomisp_sub_device *asd,
+ struct ia_css_fc_config *fc_config);
+
+void atomisp_css_set_aa_config(struct atomisp_sub_device *asd,
+ struct ia_css_aa_config *aa_config);
+
+void atomisp_css_set_baa_config(struct atomisp_sub_device *asd,
+ struct ia_css_aa_config *baa_config);
+
+void atomisp_css_set_anr_config(struct atomisp_sub_device *asd,
+ struct ia_css_anr_config *anr_config);
+
+void atomisp_css_set_xnr_config(struct atomisp_sub_device *asd,
+ struct ia_css_xnr_config *xnr_config);
+
+void atomisp_css_set_cnr_config(struct atomisp_sub_device *asd,
+ struct ia_css_cnr_config *cnr_config);
+
+void atomisp_css_set_ctc_config(struct atomisp_sub_device *asd,
+ struct ia_css_ctc_config *ctc_config);
+
+void atomisp_css_set_yuv2rgb_cc_config(struct atomisp_sub_device *asd,
+ struct ia_css_cc_config *yuv2rgb_cc_config);
+
+void atomisp_css_set_rgb2yuv_cc_config(struct atomisp_sub_device *asd,
+ struct ia_css_cc_config *rgb2yuv_cc_config);
+
+void atomisp_css_set_anr_thres(struct atomisp_sub_device *asd,
+ struct ia_css_anr_thres *anr_thres);
+
+int atomisp_css_load_firmware(struct atomisp_device *isp);
+
+void atomisp_css_set_dvs_6axis(struct atomisp_sub_device *asd,
+ struct ia_css_dvs_6axis_config *dvs_6axis);
+
+int atomisp_css_debug_dump_isp_binary(void);
+
+int atomisp_css_dump_sp_raw_copy_linecount(bool reduced);
+
+int atomisp_css_dump_blob_infor(struct atomisp_device *isp);
+
+void atomisp_css_set_isp_config_id(struct atomisp_sub_device *asd,
+ uint32_t isp_config_id);
+
+void atomisp_css_set_isp_config_applied_frame(struct atomisp_sub_device *asd,
+ struct ia_css_frame *output_frame);
+
+int atomisp_get_css_dbgfunc(void);
+
+int atomisp_set_css_dbgfunc(struct atomisp_device *isp, int opt);
+struct ia_css_dvs_grid_info *atomisp_css_get_dvs_grid_info(
+ struct ia_css_grid_info *grid_info);
+#endif
diff --git a/drivers/staging/media/atomisp/pci/atomisp_csi2.c b/drivers/staging/media/atomisp/pci/atomisp_csi2.c
new file mode 100644
index 000000000000..95b9113d75e9
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp_csi2.c
@@ -0,0 +1,359 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Support for Medifield PNW Camera Imaging ISP subsystem.
+ *
+ * Copyright (c) 2010 Intel Corporation. All Rights Reserved.
+ */
+
+#include <media/v4l2-event.h>
+#include <media/v4l2-mediabus.h>
+#include "atomisp_cmd.h"
+#include "atomisp_internal.h"
+#include "atomisp-regs.h"
+
+static struct
+v4l2_mbus_framefmt *__csi2_get_format(struct atomisp_mipi_csi2_device *csi2,
+ struct v4l2_subdev_state *sd_state,
+ enum v4l2_subdev_format_whence which,
+ unsigned int pad)
+{
+ if (which == V4L2_SUBDEV_FORMAT_TRY)
+ return v4l2_subdev_state_get_format(sd_state, pad);
+ else
+ return &csi2->formats[pad];
+}
+
+/*
+ * csi2_enum_mbus_code - Handle pixel format enumeration
+ * @sd : pointer to v4l2 subdev structure
+ * @fh : V4L2 subdev file handle
+ * @code : pointer to v4l2_subdev_pad_mbus_code_enum structure
+ * return -EINVAL or zero on success
+ */
+static int csi2_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ const struct atomisp_in_fmt_conv *ic = atomisp_in_fmt_conv;
+ unsigned int i = 0;
+
+ while (ic->code) {
+ if (i == code->index) {
+ code->code = ic->code;
+ return 0;
+ }
+ i++, ic++;
+ }
+
+ return -EINVAL;
+}
+
+/*
+ * csi2_get_format - Handle get format by pads subdev method
+ * @sd : pointer to v4l2 subdev structure
+ * @fh : V4L2 subdev file handle
+ * @pad: pad num
+ * @fmt: pointer to v4l2 format structure
+ * return -EINVAL or zero on success
+ */
+static int csi2_get_format(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_format *fmt)
+{
+ struct atomisp_mipi_csi2_device *csi2 = v4l2_get_subdevdata(sd);
+ struct v4l2_mbus_framefmt *format;
+
+ format = __csi2_get_format(csi2, sd_state, fmt->which, fmt->pad);
+
+ fmt->format = *format;
+
+ return 0;
+}
+
+int atomisp_csi2_set_ffmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ unsigned int which, uint16_t pad,
+ struct v4l2_mbus_framefmt *ffmt)
+{
+ struct atomisp_mipi_csi2_device *csi2 = v4l2_get_subdevdata(sd);
+ struct v4l2_mbus_framefmt *actual_ffmt = __csi2_get_format(csi2,
+ sd_state,
+ which, pad);
+
+ if (pad == CSI2_PAD_SINK) {
+ const struct atomisp_in_fmt_conv *ic;
+ struct v4l2_mbus_framefmt tmp_ffmt;
+
+ ic = atomisp_find_in_fmt_conv(ffmt->code);
+ if (ic)
+ actual_ffmt->code = ic->code;
+ else
+ actual_ffmt->code = atomisp_in_fmt_conv[0].code;
+
+ actual_ffmt->width = clamp_t(u32, ffmt->width,
+ ATOM_ISP_MIN_WIDTH,
+ ATOM_ISP_MAX_WIDTH);
+ actual_ffmt->height = clamp_t(u32, ffmt->height,
+ ATOM_ISP_MIN_HEIGHT,
+ ATOM_ISP_MAX_HEIGHT);
+ actual_ffmt->field = ffmt->field;
+
+ tmp_ffmt = *ffmt = *actual_ffmt;
+
+ /* Always use V4L2_FIELD_ANY to match the ISP sink pad */
+ tmp_ffmt.field = V4L2_FIELD_ANY;
+ return atomisp_csi2_set_ffmt(sd, sd_state, which,
+ CSI2_PAD_SOURCE,
+ &tmp_ffmt);
+ }
+
+ /* FIXME: DPCM decompression */
+ *actual_ffmt = *ffmt = *__csi2_get_format(csi2, sd_state, which,
+ CSI2_PAD_SINK);
+
+ return 0;
+}
+
+/*
+ * csi2_set_format - Handle set format by pads subdev method
+ * @sd : pointer to v4l2 subdev structure
+ * @fh : V4L2 subdev file handle
+ * @pad: pad num
+ * @fmt: pointer to v4l2 format structure
+ * return -EINVAL or zero on success
+ */
+static int csi2_set_format(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_format *fmt)
+{
+ return atomisp_csi2_set_ffmt(sd, sd_state, fmt->which, fmt->pad,
+ &fmt->format);
+}
+
+/* subdev pad operations */
+static const struct v4l2_subdev_pad_ops csi2_pad_ops = {
+ .enum_mbus_code = csi2_enum_mbus_code,
+ .get_fmt = csi2_get_format,
+ .set_fmt = csi2_set_format,
+ .link_validate = v4l2_subdev_link_validate_default,
+};
+
+/* subdev operations */
+static const struct v4l2_subdev_ops csi2_ops = {
+ .pad = &csi2_pad_ops,
+};
+
+/* media operations */
+static const struct media_entity_operations csi2_media_ops = {
+ .link_validate = v4l2_subdev_link_validate,
+};
+
+/*
+ * ispcsi2_init_entities - Initialize subdev and media entity.
+ * @csi2: Pointer to ispcsi2 structure.
+ * return -ENOMEM or zero on success
+ */
+static int mipi_csi2_init_entities(struct atomisp_mipi_csi2_device *csi2,
+ int port)
+{
+ struct v4l2_subdev *sd = &csi2->subdev;
+ struct media_pad *pads = csi2->pads;
+ struct media_entity *me = &sd->entity;
+ int ret;
+
+ v4l2_subdev_init(sd, &csi2_ops);
+ snprintf(sd->name, sizeof(sd->name), "ATOM ISP CSI2-port%d", port);
+
+ v4l2_set_subdevdata(sd, csi2);
+ sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+
+ pads[CSI2_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE;
+ pads[CSI2_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
+
+ me->ops = &csi2_media_ops;
+ me->function = MEDIA_ENT_F_VID_IF_BRIDGE;
+ ret = media_entity_pads_init(me, CSI2_PADS_NUM, pads);
+ if (ret < 0)
+ return ret;
+
+ csi2->formats[CSI2_PAD_SINK].code = atomisp_in_fmt_conv[0].code;
+ csi2->formats[CSI2_PAD_SOURCE].code = atomisp_in_fmt_conv[0].code;
+
+ return 0;
+}
+
+void
+atomisp_mipi_csi2_unregister_entities(struct atomisp_mipi_csi2_device *csi2)
+{
+ media_entity_cleanup(&csi2->subdev.entity);
+ v4l2_device_unregister_subdev(&csi2->subdev);
+}
+
+int atomisp_mipi_csi2_register_entities(struct atomisp_mipi_csi2_device *csi2,
+ struct v4l2_device *vdev)
+{
+ int ret;
+
+ /* Register the subdev and video nodes. */
+ ret = v4l2_device_register_subdev(vdev, &csi2->subdev);
+ if (ret < 0)
+ goto error;
+
+ return 0;
+
+error:
+ atomisp_mipi_csi2_unregister_entities(csi2);
+ return ret;
+}
+
+static const int LIMIT_SHIFT = 6; /* Limit numeric range into 31 bits */
+
+static int
+atomisp_csi2_configure_calc(const short int coeffs[2], int mipi_freq, int def)
+{
+ /* Delay counter accuracy, 1/0.0625 for ANN/CHT, 1/0.125 for BXT */
+ static const int accinv = 16; /* 1 / COUNT_ACC */
+ int r;
+
+ if (mipi_freq >> LIMIT_SHIFT <= 0)
+ return def;
+
+ r = accinv * coeffs[1] * (500000000 >> LIMIT_SHIFT);
+ r /= mipi_freq >> LIMIT_SHIFT;
+ r += accinv * coeffs[0];
+
+ return r;
+}
+
+static void atomisp_csi2_configure_isp2401(struct atomisp_sub_device *asd)
+{
+ /*
+ * The ISP2401 new input system CSI2+ receiver has several
+ * parameters affecting the receiver timings. These depend
+ * on the MIPI bus frequency F in Hz (sensor transmitter rate)
+ * as follows:
+ * register value = (A/1e9 + B * UI) / COUNT_ACC
+ * where
+ * UI = 1 / (2 * F) in seconds
+ * COUNT_ACC = counter accuracy in seconds
+ * For ANN and CHV, COUNT_ACC = 0.0625 ns
+ * For BXT, COUNT_ACC = 0.125 ns
+ * A and B are coefficients from the table below,
+ * depending whether the register minimum or maximum value is
+ * calculated.
+ * Minimum Maximum
+ * Clock lane A B A B
+ * reg_rx_csi_dly_cnt_termen_clane 0 0 38 0
+ * reg_rx_csi_dly_cnt_settle_clane 95 -8 300 -16
+ * Data lanes
+ * reg_rx_csi_dly_cnt_termen_dlane0 0 0 35 4
+ * reg_rx_csi_dly_cnt_settle_dlane0 85 -2 145 -6
+ * reg_rx_csi_dly_cnt_termen_dlane1 0 0 35 4
+ * reg_rx_csi_dly_cnt_settle_dlane1 85 -2 145 -6
+ * reg_rx_csi_dly_cnt_termen_dlane2 0 0 35 4
+ * reg_rx_csi_dly_cnt_settle_dlane2 85 -2 145 -6
+ * reg_rx_csi_dly_cnt_termen_dlane3 0 0 35 4
+ * reg_rx_csi_dly_cnt_settle_dlane3 85 -2 145 -6
+ *
+ * We use the minimum values in the calculations below.
+ */
+ static const short int coeff_clk_termen[] = { 0, 0 };
+ static const short int coeff_clk_settle[] = { 95, -8 };
+ static const short int coeff_dat_termen[] = { 0, 0 };
+ static const short int coeff_dat_settle[] = { 85, -2 };
+ static const int TERMEN_DEFAULT = 0 * 0;
+ static const int SETTLE_DEFAULT = 0x480;
+
+ static const hrt_address csi2_port_base[] = {
+ [ATOMISP_CAMERA_PORT_PRIMARY] = CSI2_PORT_A_BASE,
+ [ATOMISP_CAMERA_PORT_SECONDARY] = CSI2_PORT_B_BASE,
+ [ATOMISP_CAMERA_PORT_TERTIARY] = CSI2_PORT_C_BASE,
+ };
+ /* Number of lanes on each port, excluding clock lane */
+ static const unsigned char csi2_port_lanes[] = {
+ [ATOMISP_CAMERA_PORT_PRIMARY] = 4,
+ [ATOMISP_CAMERA_PORT_SECONDARY] = 2,
+ [ATOMISP_CAMERA_PORT_TERTIARY] = 2,
+ };
+ static const hrt_address csi2_lane_base[] = {
+ CSI2_LANE_CL_BASE,
+ CSI2_LANE_D0_BASE,
+ CSI2_LANE_D1_BASE,
+ CSI2_LANE_D2_BASE,
+ CSI2_LANE_D3_BASE,
+ };
+
+ int clk_termen;
+ int clk_settle;
+ int dat_termen;
+ int dat_settle;
+
+ struct v4l2_control ctrl;
+ struct atomisp_device *isp = asd->isp;
+ int mipi_freq = 0;
+ enum atomisp_camera_port port;
+ int n;
+
+ port = isp->inputs[asd->input_curr].port;
+
+ ctrl.id = V4L2_CID_LINK_FREQ;
+ if (v4l2_g_ctrl
+ (isp->inputs[asd->input_curr].sensor->ctrl_handler, &ctrl) == 0)
+ mipi_freq = ctrl.value;
+
+ clk_termen = atomisp_csi2_configure_calc(coeff_clk_termen, mipi_freq,
+ TERMEN_DEFAULT);
+ clk_settle = atomisp_csi2_configure_calc(coeff_clk_settle, mipi_freq,
+ SETTLE_DEFAULT);
+ dat_termen = atomisp_csi2_configure_calc(coeff_dat_termen, mipi_freq,
+ TERMEN_DEFAULT);
+ dat_settle = atomisp_csi2_configure_calc(coeff_dat_settle, mipi_freq,
+ SETTLE_DEFAULT);
+
+ for (n = 0; n < csi2_port_lanes[port] + 1; n++) {
+ hrt_address base = csi2_port_base[port] + csi2_lane_base[n];
+
+ atomisp_css2_hw_store_32(base + CSI2_REG_RX_CSI_DLY_CNT_TERMEN,
+ n == 0 ? clk_termen : dat_termen);
+ atomisp_css2_hw_store_32(base + CSI2_REG_RX_CSI_DLY_CNT_SETTLE,
+ n == 0 ? clk_settle : dat_settle);
+ }
+}
+
+void atomisp_csi2_configure(struct atomisp_sub_device *asd)
+{
+ if (IS_HWREVISION(asd->isp, ATOMISP_HW_REVISION_ISP2401))
+ atomisp_csi2_configure_isp2401(asd);
+}
+
+/*
+ * atomisp_mipi_csi2_cleanup - Routine for module driver cleanup
+ */
+void atomisp_mipi_csi2_cleanup(struct atomisp_device *isp)
+{
+}
+
+int atomisp_mipi_csi2_init(struct atomisp_device *isp)
+{
+ struct atomisp_mipi_csi2_device *csi2_port;
+ unsigned int i;
+ int ret;
+
+ ret = atomisp_csi2_bridge_init(isp);
+ if (ret < 0)
+ return ret;
+
+ for (i = 0; i < ATOMISP_CAMERA_NR_PORTS; i++) {
+ csi2_port = &isp->csi2_port[i];
+ csi2_port->isp = isp;
+ ret = mipi_csi2_init_entities(csi2_port, i);
+ if (ret < 0)
+ goto fail;
+ }
+
+ return 0;
+
+fail:
+ atomisp_mipi_csi2_cleanup(isp);
+ return ret;
+}
diff --git a/drivers/staging/media/atomisp/pci/atomisp_csi2.h b/drivers/staging/media/atomisp/pci/atomisp_csi2.h
new file mode 100644
index 000000000000..ec762f8fb922
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp_csi2.h
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Medifield PNW Camera Imaging ISP subsystem.
+ *
+ * Copyright (c) 2010 Intel Corporation. All Rights Reserved.
+ */
+#ifndef __ATOMISP_CSI2_H__
+#define __ATOMISP_CSI2_H__
+
+#include <linux/gpio/consumer.h>
+#include <linux/property.h>
+
+#include <media/v4l2-subdev.h>
+#include <media/v4l2-ctrls.h>
+
+#include "../../include/linux/atomisp.h"
+
+#define CSI2_PAD_SINK 0
+#define CSI2_PAD_SOURCE 1
+#define CSI2_PADS_NUM 2
+
+struct v4l2_device;
+
+struct atomisp_device;
+struct atomisp_sub_device;
+
+struct atomisp_mipi_csi2_device {
+ struct v4l2_subdev subdev;
+ struct media_pad pads[CSI2_PADS_NUM];
+ struct v4l2_mbus_framefmt formats[CSI2_PADS_NUM];
+
+ struct v4l2_ctrl_handler ctrls;
+ struct atomisp_device *isp;
+};
+
+int atomisp_csi2_set_ffmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ unsigned int which, uint16_t pad,
+ struct v4l2_mbus_framefmt *ffmt);
+int atomisp_mipi_csi2_init(struct atomisp_device *isp);
+void atomisp_mipi_csi2_cleanup(struct atomisp_device *isp);
+void atomisp_mipi_csi2_unregister_entities(
+ struct atomisp_mipi_csi2_device *csi2);
+int atomisp_mipi_csi2_register_entities(struct atomisp_mipi_csi2_device *csi2,
+ struct v4l2_device *vdev);
+int atomisp_csi2_bridge_init(struct atomisp_device *isp);
+int atomisp_csi2_bridge_parse_firmware(struct atomisp_device *isp);
+
+void atomisp_csi2_configure(struct atomisp_sub_device *asd);
+
+#endif /* __ATOMISP_CSI2_H__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp_csi2_bridge.c b/drivers/staging/media/atomisp/pci/atomisp_csi2_bridge.c
new file mode 100644
index 000000000000..2a90f86e515f
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp_csi2_bridge.c
@@ -0,0 +1,554 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Code to build software firmware node graph for atomisp2 connected sensors
+ * from ACPI tables.
+ *
+ * Copyright (C) 2023 Hans de Goede <hdegoede@redhat.com>
+ *
+ * Based on drivers/media/pci/intel/ipu3/cio2-bridge.c written by:
+ * Dan Scally <djrscally@gmail.com>
+ */
+
+#include <linux/acpi.h>
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/dmi.h>
+#include <linux/platform_data/x86/int3472.h>
+#include <linux/property.h>
+
+#include <media/ipu-bridge.h>
+#include <media/v4l2-fwnode.h>
+
+#include "atomisp_cmd.h"
+#include "atomisp_csi2.h"
+#include "atomisp_internal.h"
+
+#define PMC_CLK_RATE_19_2MHZ 19200000
+
+/*
+ * dc2f6c4f-045b-4f1d-97b9-882a6860a4be
+ * This _DSM GUID returns a package with n*2 strings, with each set of 2 strings
+ * forming a key, value pair for settings like e.g. "CsiLanes" = "1".
+ */
+static const guid_t atomisp_dsm_guid =
+ GUID_INIT(0xdc2f6c4f, 0x045b, 0x4f1d,
+ 0x97, 0xb9, 0x88, 0x2a, 0x68, 0x60, 0xa4, 0xbe);
+
+/*
+ * 75c9a639-5c8a-4a00-9f48-a9c3b5da789f
+ * This _DSM GUID returns a string giving the VCM type e.g. "AD5823".
+ */
+static const guid_t vcm_dsm_guid =
+ GUID_INIT(0x75c9a639, 0x5c8a, 0x4a00,
+ 0x9f, 0x48, 0xa9, 0xc3, 0xb5, 0xda, 0x78, 0x9f);
+
+struct atomisp_sensor_config {
+ int lanes;
+ bool vcm;
+};
+
+#define ATOMISP_SENSOR_CONFIG(_HID, _LANES, _VCM) \
+{ \
+ .id = _HID, \
+ .driver_data = (long)&((const struct atomisp_sensor_config) { \
+ .lanes = _LANES, \
+ .vcm = _VCM, \
+ }) \
+}
+
+/*
+ * gmin_cfg parsing code. This is a cleaned up version of the gmin_cfg parsing
+ * code from atomisp_gmin_platform.c.
+ * Once all sensors are moved to v4l2-async probing atomisp_gmin_platform.c can
+ * be removed and the duplication of this code goes away.
+ */
+struct gmin_cfg_var {
+ const char *acpi_dev_name;
+ const char *key;
+ const char *val;
+};
+
+static struct gmin_cfg_var lenovo_ideapad_miix_310_vars[] = {
+ /* _DSM contains the wrong CsiPort! */
+ { "OVTI2680:01", "CsiPort", "0" },
+ {}
+};
+
+static struct gmin_cfg_var xiaomi_mipad2_vars[] = {
+ /* _DSM contains the wrong CsiPort for the front facing OV5693 sensor */
+ { "INT33BE:00", "CsiPort", "0" },
+ /* _DSM contains the wrong CsiLanes for the back facing T4KA3 sensor */
+ { "XMCC0003:00", "CsiLanes", "4" },
+ {}
+};
+
+static const struct dmi_system_id gmin_cfg_dmi_overrides[] = {
+ {
+ /* Lenovo Ideapad Miix 310 */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "MIIX 310-10"),
+ },
+ .driver_data = lenovo_ideapad_miix_310_vars,
+ },
+ {
+ /* Xiaomi Mipad2 */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Xiaomi Inc"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Mipad2"),
+ },
+ .driver_data = xiaomi_mipad2_vars,
+ },
+ {}
+};
+
+static char *gmin_cfg_get_dsm(struct acpi_device *adev, const char *key)
+{
+ union acpi_object *obj, *key_el, *val_el;
+ char *val = NULL;
+ int i;
+
+ obj = acpi_evaluate_dsm_typed(adev->handle, &atomisp_dsm_guid, 0, 0,
+ NULL, ACPI_TYPE_PACKAGE);
+ if (!obj)
+ return NULL;
+
+ for (i = 0; i < obj->package.count - 1; i += 2) {
+ key_el = &obj->package.elements[i + 0];
+ val_el = &obj->package.elements[i + 1];
+
+ if (key_el->type != ACPI_TYPE_STRING || val_el->type != ACPI_TYPE_STRING)
+ break;
+
+ if (!strcmp(key_el->string.pointer, key)) {
+ val = kstrdup(val_el->string.pointer, GFP_KERNEL);
+ if (!val)
+ break;
+
+ acpi_handle_info(adev->handle, "%s: Using DSM entry %s=%s\n",
+ dev_name(&adev->dev), key, val);
+ break;
+ }
+ }
+
+ ACPI_FREE(obj);
+ return val;
+}
+
+static char *gmin_cfg_get_dmi_override(struct acpi_device *adev, const char *key)
+{
+ const struct dmi_system_id *id;
+ struct gmin_cfg_var *gv;
+
+ id = dmi_first_match(gmin_cfg_dmi_overrides);
+ if (!id)
+ return NULL;
+
+ for (gv = id->driver_data; gv->acpi_dev_name; gv++) {
+ if (strcmp(gv->acpi_dev_name, acpi_dev_name(adev)))
+ continue;
+
+ if (strcmp(key, gv->key))
+ continue;
+
+ acpi_handle_info(adev->handle, "%s: Using DMI entry %s=%s\n",
+ dev_name(&adev->dev), key, gv->val);
+ return kstrdup(gv->val, GFP_KERNEL);
+ }
+
+ return NULL;
+}
+
+static char *gmin_cfg_get(struct acpi_device *adev, const char *key)
+{
+ char *val;
+
+ val = gmin_cfg_get_dmi_override(adev, key);
+ if (val)
+ return val;
+
+ return gmin_cfg_get_dsm(adev, key);
+}
+
+static int gmin_cfg_get_int(struct acpi_device *adev, const char *key, int default_val)
+{
+ char *str_val;
+ long int_val;
+ int ret;
+
+ str_val = gmin_cfg_get(adev, key);
+ if (!str_val)
+ goto out_use_default;
+
+ ret = kstrtoul(str_val, 0, &int_val);
+ kfree(str_val);
+ if (ret)
+ goto out_use_default;
+
+ return int_val;
+
+out_use_default:
+ acpi_handle_info(adev->handle, "%s: Using default %s=%d\n",
+ dev_name(&adev->dev), key, default_val);
+ return default_val;
+}
+
+static int atomisp_csi2_get_pmc_clk_nr_from_acpi_pr0(struct acpi_device *adev)
+{
+ /* ACPI_PATH_SEGMENT_LENGTH is guaranteed to be big enough for name + 0 term. */
+ char name[ACPI_PATH_SEGMENT_LENGTH];
+ struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+ struct acpi_buffer b_name = { sizeof(name), name };
+ union acpi_object *package, *element;
+ int i, ret = -ENOENT;
+ acpi_handle rhandle;
+ acpi_status status;
+ u8 clock_num;
+
+ status = acpi_evaluate_object_typed(adev->handle, "_PR0", NULL, &buffer, ACPI_TYPE_PACKAGE);
+ if (ACPI_FAILURE(status))
+ return -ENOENT;
+
+ package = buffer.pointer;
+ for (i = 0; i < package->package.count; i++) {
+ element = &package->package.elements[i];
+
+ if (element->type != ACPI_TYPE_LOCAL_REFERENCE)
+ continue;
+
+ rhandle = element->reference.handle;
+ if (!rhandle)
+ continue;
+
+ acpi_get_name(rhandle, ACPI_SINGLE_NAME, &b_name);
+
+ if (str_has_prefix(name, "CLK") && !kstrtou8(&name[3], 10, &clock_num) &&
+ clock_num <= 4) {
+ ret = clock_num;
+ break;
+ }
+ }
+
+ ACPI_FREE(buffer.pointer);
+
+ if (ret < 0)
+ acpi_handle_warn(adev->handle, "%s: Could not find PMC clk in _PR0\n",
+ dev_name(&adev->dev));
+
+ return ret;
+}
+
+static int atomisp_csi2_set_pmc_clk_freq(struct acpi_device *adev, int clock_num)
+{
+ struct clk *clk;
+ char name[14];
+ int ret;
+
+ if (clock_num < 0)
+ return 0;
+
+ snprintf(name, sizeof(name), "pmc_plt_clk_%d", clock_num);
+
+ clk = clk_get(NULL, name);
+ if (IS_ERR(clk)) {
+ ret = PTR_ERR(clk);
+ acpi_handle_err(adev->handle, "%s: Error getting clk %s: %d\n",
+ dev_name(&adev->dev), name, ret);
+ return ret;
+ }
+
+ /*
+ * The firmware might enable the clock at boot, to change
+ * the rate we must ensure the clock is disabled.
+ */
+ ret = clk_prepare_enable(clk);
+ if (!ret)
+ clk_disable_unprepare(clk);
+ if (!ret)
+ ret = clk_set_rate(clk, PMC_CLK_RATE_19_2MHZ);
+ if (ret)
+ acpi_handle_err(adev->handle, "%s: Error setting clk-rate for %s: %d\n",
+ dev_name(&adev->dev), name, ret);
+
+ clk_put(clk);
+ return ret;
+}
+
+static int atomisp_csi2_get_port(struct acpi_device *adev, int clock_num)
+{
+ int port;
+
+ /*
+ * Compare clock-number to the PMC-clock used for CsiPort 1
+ * in the CHT/BYT reference designs.
+ */
+ if (IS_ISP2401)
+ port = clock_num == 4 ? 1 : 0;
+ else
+ port = clock_num == 0 ? 1 : 0;
+
+ /* Intel DSM or DMI quirk overrides _PR0 CLK derived default */
+ return gmin_cfg_get_int(adev, "CsiPort", port);
+}
+
+/*
+ * Alloc and fill an int3472_discrete_device struct so that we can re-use
+ * the INT3472 sensor GPIO mapping code.
+ *
+ * This gets called from ipu_bridge_init() which runs only once per boot,
+ * the created faux int3472 device is leaked on purpose to keep the created
+ * GPIO mappings around permanently.
+ */
+static int atomisp_csi2_add_gpio_mappings(struct acpi_device *adev)
+{
+ struct int3472_discrete_device *int3472;
+ int ret;
+
+ /* Max num GPIOs we've seen plus a terminator */
+ int3472 = kzalloc(struct_size(int3472, gpios.table, INT3472_MAX_SENSOR_GPIOS + 1),
+ GFP_KERNEL);
+ if (!int3472)
+ return -ENOMEM;
+
+ /*
+ * On atomisp the _DSM to get the GPIO type must be made on the sensor
+ * adev, rather than on a separate INT3472 adev.
+ */
+ int3472->adev = adev;
+ int3472->dev = &adev->dev;
+ int3472->sensor = adev;
+
+ int3472->sensor_name = kasprintf(GFP_KERNEL, I2C_DEV_NAME_FORMAT, acpi_dev_name(adev));
+ if (!int3472->sensor_name) {
+ kfree(int3472);
+ return -ENOMEM;
+ }
+
+ ret = int3472_discrete_parse_crs(int3472);
+ if (ret) {
+ int3472_discrete_cleanup(int3472);
+ kfree(int3472);
+ }
+
+ return ret;
+}
+
+static char *atomisp_csi2_get_vcm_type(struct acpi_device *adev)
+{
+ union acpi_object *obj;
+ char *vcm_type;
+
+ obj = acpi_evaluate_dsm_typed(adev->handle, &vcm_dsm_guid, 0, 0,
+ NULL, ACPI_TYPE_STRING);
+ if (!obj)
+ return NULL;
+
+ vcm_type = kstrdup(obj->string.pointer, GFP_KERNEL);
+ ACPI_FREE(obj);
+
+ if (!vcm_type)
+ return NULL;
+
+ string_lower(vcm_type, vcm_type);
+ return vcm_type;
+}
+
+static const struct acpi_device_id atomisp_sensor_configs[] = {
+ /*
+ * FIXME ov5693 modules have a VCM, but for unknown reasons
+ * the sensor fails to start streaming when instantiating
+ * an i2c-client for the VCM, so it is disabled for now.
+ */
+ ATOMISP_SENSOR_CONFIG("INT33BE", 2, false), /* OV5693 */
+ {}
+};
+
+static int atomisp_csi2_parse_sensor_fwnode(struct acpi_device *adev,
+ struct ipu_sensor *sensor)
+{
+ const struct acpi_device_id *id;
+ int ret, clock_num;
+ bool vcm = false;
+ int lanes = 1;
+
+ id = acpi_match_acpi_device(atomisp_sensor_configs, adev);
+ if (id) {
+ struct atomisp_sensor_config *cfg =
+ (struct atomisp_sensor_config *)id->driver_data;
+
+ lanes = cfg->lanes;
+ vcm = cfg->vcm;
+ }
+
+ /*
+ * ACPI takes care of turning the PMC clock on and off, but on BYT
+ * the clock defaults to 25 MHz instead of the expected 19.2 MHz.
+ * Get the PMC-clock number from ACPI PR0 method and set it to 19.2 MHz.
+ * The PMC-clock number is also used to determine the default CSI port.
+ */
+ clock_num = atomisp_csi2_get_pmc_clk_nr_from_acpi_pr0(adev);
+
+ ret = atomisp_csi2_set_pmc_clk_freq(adev, clock_num);
+ if (ret)
+ return ret;
+
+ sensor->link = atomisp_csi2_get_port(adev, clock_num);
+ if (sensor->link >= ATOMISP_CAMERA_NR_PORTS) {
+ acpi_handle_err(adev->handle, "%s: Invalid port: %u\n",
+ dev_name(&adev->dev), sensor->link);
+ return -EINVAL;
+ }
+
+ sensor->lanes = gmin_cfg_get_int(adev, "CsiLanes", lanes);
+ if (sensor->lanes > IPU_MAX_LANES) {
+ acpi_handle_err(adev->handle, "%s: Invalid lane-count: %d\n",
+ dev_name(&adev->dev), sensor->lanes);
+ return -EINVAL;
+ }
+
+ ret = atomisp_csi2_add_gpio_mappings(adev);
+ if (ret)
+ return ret;
+
+ sensor->mclkspeed = PMC_CLK_RATE_19_2MHZ;
+ sensor->rotation = 0;
+ sensor->orientation = (sensor->link == 1) ?
+ V4L2_FWNODE_ORIENTATION_BACK : V4L2_FWNODE_ORIENTATION_FRONT;
+
+ if (vcm)
+ sensor->vcm_type = atomisp_csi2_get_vcm_type(adev);
+
+ return 0;
+}
+
+int atomisp_csi2_bridge_init(struct atomisp_device *isp)
+{
+ struct device *dev = isp->dev;
+ struct fwnode_handle *fwnode;
+
+ /*
+ * This function is intended to run only once and then leave
+ * the created nodes attached even after a rmmod, therefore:
+ * 1. The bridge memory is leaked deliberately on success
+ * 2. If a secondary fwnode is already set exit early.
+ */
+ fwnode = dev_fwnode(dev);
+ if (fwnode && fwnode->secondary)
+ return 0;
+
+ return ipu_bridge_init(dev, atomisp_csi2_parse_sensor_fwnode);
+}
+
+/******* V4L2 sub-device asynchronous registration callbacks***********/
+
+struct sensor_async_subdev {
+ struct v4l2_async_connection asd;
+ int port;
+};
+
+#define to_sensor_asd(a) container_of(a, struct sensor_async_subdev, asd)
+#define notifier_to_atomisp(n) container_of(n, struct atomisp_device, notifier)
+
+/* .bound() notifier callback when a match is found */
+static int atomisp_notifier_bound(struct v4l2_async_notifier *notifier,
+ struct v4l2_subdev *sd,
+ struct v4l2_async_connection *asd)
+{
+ struct atomisp_device *isp = notifier_to_atomisp(notifier);
+ struct sensor_async_subdev *s_asd = to_sensor_asd(asd);
+ int ret;
+
+ if (s_asd->port >= ATOMISP_CAMERA_NR_PORTS) {
+ dev_err(isp->dev, "port %d not supported\n", s_asd->port);
+ return -EINVAL;
+ }
+
+ if (isp->sensor_subdevs[s_asd->port]) {
+ dev_err(isp->dev, "port %d already has a sensor attached\n", s_asd->port);
+ return -EBUSY;
+ }
+
+ ret = ipu_bridge_instantiate_vcm(sd->dev);
+ if (ret)
+ return ret;
+
+ isp->sensor_subdevs[s_asd->port] = sd;
+ return 0;
+}
+
+/* The .unbind callback */
+static void atomisp_notifier_unbind(struct v4l2_async_notifier *notifier,
+ struct v4l2_subdev *sd,
+ struct v4l2_async_connection *asd)
+{
+ struct atomisp_device *isp = notifier_to_atomisp(notifier);
+ struct sensor_async_subdev *s_asd = to_sensor_asd(asd);
+
+ isp->sensor_subdevs[s_asd->port] = NULL;
+}
+
+/* .complete() is called after all subdevices have been located */
+static int atomisp_notifier_complete(struct v4l2_async_notifier *notifier)
+{
+ struct atomisp_device *isp = notifier_to_atomisp(notifier);
+
+ return atomisp_register_device_nodes(isp);
+}
+
+static const struct v4l2_async_notifier_operations atomisp_async_ops = {
+ .bound = atomisp_notifier_bound,
+ .unbind = atomisp_notifier_unbind,
+ .complete = atomisp_notifier_complete,
+};
+
+int atomisp_csi2_bridge_parse_firmware(struct atomisp_device *isp)
+{
+ int i, mipi_port, ret;
+
+ v4l2_async_nf_init(&isp->notifier, &isp->v4l2_dev);
+ isp->notifier.ops = &atomisp_async_ops;
+
+ for (i = 0; i < ATOMISP_CAMERA_NR_PORTS; i++) {
+ struct v4l2_fwnode_endpoint vep = {
+ .bus_type = V4L2_MBUS_CSI2_DPHY,
+ };
+ struct sensor_async_subdev *s_asd;
+ struct fwnode_handle *ep;
+
+ ep = fwnode_graph_get_endpoint_by_id(dev_fwnode(isp->dev), i, 0,
+ FWNODE_GRAPH_ENDPOINT_NEXT);
+ if (!ep)
+ continue;
+
+ ret = v4l2_fwnode_endpoint_parse(ep, &vep);
+ if (ret)
+ goto err_parse;
+
+ if (vep.base.port >= ATOMISP_CAMERA_NR_PORTS) {
+ dev_err(isp->dev, "port %d not supported\n", vep.base.port);
+ ret = -EINVAL;
+ goto err_parse;
+ }
+
+ mipi_port = atomisp_port_to_mipi_port(isp, vep.base.port);
+ isp->sensor_lanes[mipi_port] = vep.bus.mipi_csi2.num_data_lanes;
+
+ s_asd = v4l2_async_nf_add_fwnode_remote(&isp->notifier, ep,
+ struct sensor_async_subdev);
+ if (IS_ERR(s_asd)) {
+ ret = PTR_ERR(s_asd);
+ goto err_parse;
+ }
+
+ s_asd->port = vep.base.port;
+
+ fwnode_handle_put(ep);
+ continue;
+
+err_parse:
+ fwnode_handle_put(ep);
+ return ret;
+ }
+
+ return 0;
+}
diff --git a/drivers/staging/media/atomisp/pci/atomisp_dfs_tables.h b/drivers/staging/media/atomisp/pci/atomisp_dfs_tables.h
new file mode 100644
index 000000000000..03d7c08b67e6
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp_dfs_tables.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ *
+ * Copyright (c) 2013 Intel Corporation. All Rights Reserved.
+ */
+#ifndef __ATOMISP_DFS_TABLES_H__
+#define __ATOMISP_DFS_TABLES_H__
+
+#include <linux/kernel.h>
+
+struct atomisp_freq_scaling_rule {
+ unsigned int width;
+ unsigned int height;
+ unsigned short fps;
+ unsigned int isp_freq;
+ unsigned int run_mode;
+};
+
+struct atomisp_dfs_config {
+ unsigned int lowest_freq;
+ unsigned int max_freq_at_vmin;
+ unsigned int highest_freq;
+ const struct atomisp_freq_scaling_rule *dfs_table;
+ unsigned int dfs_table_size;
+};
+
+extern const struct atomisp_dfs_config dfs_config_cht_soc;
+
+#endif /* __ATOMISP_DFS_TABLES_H__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp_fops.c b/drivers/staging/media/atomisp/pci/atomisp_fops.c
new file mode 100644
index 000000000000..c7aef066f209
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp_fops.c
@@ -0,0 +1,568 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Support for Medifield PNW Camera Imaging ISP subsystem.
+ *
+ * Copyright (c) 2010 Intel Corporation. All Rights Reserved.
+ *
+ * Copyright (c) 2010 Silicon Hive www.siliconhive.com.
+ */
+
+#include <linux/module.h>
+#include <linux/pm_runtime.h>
+
+#include <media/v4l2-ioctl.h>
+#include <media/videobuf2-vmalloc.h>
+
+#include "atomisp_cmd.h"
+#include "atomisp_common.h"
+#include "atomisp_fops.h"
+#include "atomisp_internal.h"
+#include "atomisp_ioctl.h"
+#include "atomisp_compat.h"
+#include "atomisp_subdev.h"
+#include "atomisp_v4l2.h"
+#include "atomisp-regs.h"
+#include "hmm/hmm.h"
+
+#include "ia_css_frame.h"
+#include "type_support.h"
+#include "device_access/device_access.h"
+
+/*
+ * Videobuf2 ops
+ */
+static int atomisp_queue_setup(struct vb2_queue *vq,
+ unsigned int *nbuffers, unsigned int *nplanes,
+ unsigned int sizes[], struct device *alloc_devs[])
+{
+ struct atomisp_video_pipe *pipe = container_of(vq, struct atomisp_video_pipe, vb_queue);
+ int ret;
+
+ mutex_lock(&pipe->asd->isp->mutex); /* for get_css_frame_info() / set_fmt() */
+
+ /*
+ * When VIDIOC_S_FMT has not been called before VIDIOC_REQBUFS, then
+ * this will fail. Call atomisp_set_fmt() ourselves and try again.
+ */
+ ret = atomisp_get_css_frame_info(pipe->asd, &pipe->frame_info);
+ if (ret) {
+ struct v4l2_format f = {
+ .fmt.pix.pixelformat = V4L2_PIX_FMT_YUV420,
+ .fmt.pix.width = 10000,
+ .fmt.pix.height = 10000,
+ };
+
+ ret = atomisp_set_fmt(&pipe->vdev, &f);
+ if (ret)
+ goto out;
+
+ ret = atomisp_get_css_frame_info(pipe->asd, &pipe->frame_info);
+ if (ret)
+ goto out;
+ }
+
+ atomisp_alloc_css_stat_bufs(pipe->asd, ATOMISP_INPUT_STREAM_GENERAL);
+
+ *nplanes = 1;
+ sizes[0] = PAGE_ALIGN(pipe->pix.sizeimage);
+
+out:
+ mutex_unlock(&pipe->asd->isp->mutex);
+ return ret;
+}
+
+static int atomisp_buf_init(struct vb2_buffer *vb)
+{
+ struct atomisp_video_pipe *pipe = vb_to_pipe(vb);
+ struct ia_css_frame *frame = vb_to_frame(vb);
+ int ret;
+
+ ret = ia_css_frame_init_from_info(frame, &pipe->frame_info);
+ if (ret)
+ return ret;
+
+ if (frame->data_bytes > vb2_plane_size(vb, 0)) {
+ dev_err(pipe->asd->isp->dev, "Internal error frame.data_bytes(%u) > vb.length(%lu)\n",
+ frame->data_bytes, vb2_plane_size(vb, 0));
+ return -EIO;
+ }
+
+ frame->data = hmm_create_from_vmalloc_buf(vb2_plane_size(vb, 0),
+ vb2_plane_vaddr(vb, 0));
+ if (frame->data == mmgr_NULL)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static int atomisp_q_one_metadata_buffer(struct atomisp_sub_device *asd,
+ enum atomisp_input_stream_id stream_id,
+ enum ia_css_pipe_id css_pipe_id)
+{
+ struct atomisp_metadata_buf *metadata_buf;
+ enum atomisp_metadata_type md_type = ATOMISP_MAIN_METADATA;
+ struct list_head *metadata_list;
+
+ if (asd->metadata_bufs_in_css[stream_id][css_pipe_id] >=
+ ATOMISP_CSS_Q_DEPTH)
+ return 0; /* we have reached CSS queue depth */
+
+ if (!list_empty(&asd->metadata[md_type])) {
+ metadata_list = &asd->metadata[md_type];
+ } else if (!list_empty(&asd->metadata_ready[md_type])) {
+ metadata_list = &asd->metadata_ready[md_type];
+ } else {
+ dev_warn(asd->isp->dev, "%s: No metadata buffers available for type %d!\n",
+ __func__, md_type);
+ return -EINVAL;
+ }
+
+ metadata_buf = list_entry(metadata_list->next,
+ struct atomisp_metadata_buf, list);
+ list_del_init(&metadata_buf->list);
+
+ if (atomisp_q_metadata_buffer_to_css(asd, metadata_buf,
+ stream_id, css_pipe_id)) {
+ list_add(&metadata_buf->list, metadata_list);
+ return -EINVAL;
+ } else {
+ list_add_tail(&metadata_buf->list,
+ &asd->metadata_in_css[md_type]);
+ }
+ asd->metadata_bufs_in_css[stream_id][css_pipe_id]++;
+
+ return 0;
+}
+
+static int atomisp_q_one_s3a_buffer(struct atomisp_sub_device *asd,
+ enum atomisp_input_stream_id stream_id,
+ enum ia_css_pipe_id css_pipe_id)
+{
+ struct atomisp_s3a_buf *s3a_buf;
+ struct list_head *s3a_list;
+ unsigned int exp_id;
+
+ if (asd->s3a_bufs_in_css[css_pipe_id] >= ATOMISP_CSS_Q_DEPTH)
+ return 0; /* we have reached CSS queue depth */
+
+ if (!list_empty(&asd->s3a_stats)) {
+ s3a_list = &asd->s3a_stats;
+ } else if (!list_empty(&asd->s3a_stats_ready)) {
+ s3a_list = &asd->s3a_stats_ready;
+ } else {
+ dev_warn(asd->isp->dev, "%s: No s3a buffers available!\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ s3a_buf = list_entry(s3a_list->next, struct atomisp_s3a_buf, list);
+ list_del_init(&s3a_buf->list);
+ exp_id = s3a_buf->s3a_data->exp_id;
+
+ hmm_flush_vmap(s3a_buf->s3a_data->data_ptr);
+ if (atomisp_q_s3a_buffer_to_css(asd, s3a_buf,
+ stream_id, css_pipe_id)) {
+ /* got from head, so return back to the head */
+ list_add(&s3a_buf->list, s3a_list);
+ return -EINVAL;
+ } else {
+ list_add_tail(&s3a_buf->list, &asd->s3a_stats_in_css);
+ if (s3a_list == &asd->s3a_stats_ready)
+ dev_dbg(asd->isp->dev, "drop one s3a stat with exp_id %d\n", exp_id);
+ }
+
+ asd->s3a_bufs_in_css[css_pipe_id]++;
+ return 0;
+}
+
+static int atomisp_q_one_dis_buffer(struct atomisp_sub_device *asd,
+ enum atomisp_input_stream_id stream_id,
+ enum ia_css_pipe_id css_pipe_id)
+{
+ struct atomisp_dis_buf *dis_buf;
+ unsigned long irqflags;
+
+ if (asd->dis_bufs_in_css >= ATOMISP_CSS_Q_DEPTH)
+ return 0; /* we have reached CSS queue depth */
+
+ spin_lock_irqsave(&asd->dis_stats_lock, irqflags);
+ if (list_empty(&asd->dis_stats)) {
+ spin_unlock_irqrestore(&asd->dis_stats_lock, irqflags);
+ dev_warn(asd->isp->dev, "%s: No dis buffers available!\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ dis_buf = list_entry(asd->dis_stats.prev,
+ struct atomisp_dis_buf, list);
+ list_del_init(&dis_buf->list);
+ spin_unlock_irqrestore(&asd->dis_stats_lock, irqflags);
+
+ hmm_flush_vmap(dis_buf->dis_data->data_ptr);
+ if (atomisp_q_dis_buffer_to_css(asd, dis_buf,
+ stream_id, css_pipe_id)) {
+ spin_lock_irqsave(&asd->dis_stats_lock, irqflags);
+ /* got from tail, so return back to the tail */
+ list_add_tail(&dis_buf->list, &asd->dis_stats);
+ spin_unlock_irqrestore(&asd->dis_stats_lock, irqflags);
+ return -EINVAL;
+ } else {
+ spin_lock_irqsave(&asd->dis_stats_lock, irqflags);
+ list_add_tail(&dis_buf->list, &asd->dis_stats_in_css);
+ spin_unlock_irqrestore(&asd->dis_stats_lock, irqflags);
+ }
+
+ asd->dis_bufs_in_css++;
+
+ return 0;
+}
+
+static int atomisp_q_video_buffers_to_css(struct atomisp_sub_device *asd,
+ struct atomisp_video_pipe *pipe,
+ enum atomisp_input_stream_id stream_id,
+ enum ia_css_buffer_type css_buf_type,
+ enum ia_css_pipe_id css_pipe_id)
+{
+ struct atomisp_css_params_with_list *param;
+ struct ia_css_dvs_grid_info *dvs_grid =
+ atomisp_css_get_dvs_grid_info(&asd->params.curr_grid_info);
+ unsigned long irqflags;
+ int space, err = 0;
+
+ lockdep_assert_held(&asd->isp->mutex);
+
+ if (WARN_ON(css_pipe_id >= IA_CSS_PIPE_ID_NUM))
+ return -EINVAL;
+
+ space = ATOMISP_CSS_Q_DEPTH - atomisp_buffers_in_css(pipe);
+ while (space--) {
+ struct ia_css_frame *frame;
+
+ spin_lock_irqsave(&pipe->irq_lock, irqflags);
+ frame = list_first_entry_or_null(&pipe->activeq, struct ia_css_frame, queue);
+ if (frame)
+ list_move_tail(&frame->queue, &pipe->buffers_in_css);
+ spin_unlock_irqrestore(&pipe->irq_lock, irqflags);
+
+ if (!frame)
+ return -EINVAL;
+
+ /*
+ * If there is a per_frame setting to apply on the buffer,
+ * do it before buffer en-queueing.
+ */
+ param = pipe->frame_params[frame->vb.vb2_buf.index];
+ if (param) {
+ atomisp_makeup_css_parameters(asd,
+ &asd->params.css_param.update_flag,
+ &param->params);
+ atomisp_apply_css_parameters(asd, &param->params);
+
+ if (param->params.update_flag.dz_config &&
+ asd->run_mode->val != ATOMISP_RUN_MODE_VIDEO) {
+ err = atomisp_calculate_real_zoom_region(asd,
+ &param->params.dz_config, css_pipe_id);
+ if (!err)
+ asd->params.config.dz_config = &param->params.dz_config;
+ }
+ atomisp_css_set_isp_config_applied_frame(asd, frame);
+ atomisp_css_update_isp_params_on_pipe(asd,
+ asd->stream_env[stream_id].pipes[css_pipe_id]);
+ asd->params.dvs_6axis = (struct ia_css_dvs_6axis_config *)
+ param->params.dvs_6axis;
+
+ /*
+ * WORKAROUND:
+ * Because the camera halv3 can't ensure to set zoom
+ * region to per_frame setting and global setting at
+ * same time and only set zoom region to pre_frame
+ * setting now.so when the pre_frame setting include
+ * zoom region,I will set it to global setting.
+ */
+ if (param->params.update_flag.dz_config &&
+ asd->run_mode->val != ATOMISP_RUN_MODE_VIDEO
+ && !err) {
+ memcpy(&asd->params.css_param.dz_config,
+ &param->params.dz_config,
+ sizeof(struct ia_css_dz_config));
+ asd->params.css_param.update_flag.dz_config =
+ (struct atomisp_dz_config *)
+ &asd->params.css_param.dz_config;
+ asd->params.css_update_params_needed = true;
+ }
+ pipe->frame_params[frame->vb.vb2_buf.index] = NULL;
+ }
+ /* Enqueue buffer */
+ err = atomisp_q_video_buffer_to_css(asd, frame, stream_id,
+ css_buf_type, css_pipe_id);
+ if (err) {
+ spin_lock_irqsave(&pipe->irq_lock, irqflags);
+ list_move_tail(&frame->queue, &pipe->activeq);
+ spin_unlock_irqrestore(&pipe->irq_lock, irqflags);
+ dev_err(asd->isp->dev, "%s, css q fails: %d\n",
+ __func__, err);
+ return -EINVAL;
+ }
+
+ /* enqueue 3A/DIS/metadata buffers */
+ if (asd->params.curr_grid_info.s3a_grid.enable &&
+ css_pipe_id == asd->params.s3a_enabled_pipe &&
+ css_buf_type == IA_CSS_BUFFER_TYPE_OUTPUT_FRAME)
+ atomisp_q_one_s3a_buffer(asd, stream_id,
+ css_pipe_id);
+
+ if (asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream_info.
+ metadata_info.size &&
+ css_buf_type == IA_CSS_BUFFER_TYPE_OUTPUT_FRAME)
+ atomisp_q_one_metadata_buffer(asd, stream_id,
+ css_pipe_id);
+
+ if (dvs_grid && dvs_grid->enable &&
+ css_pipe_id == IA_CSS_PIPE_ID_VIDEO &&
+ css_buf_type == IA_CSS_BUFFER_TYPE_OUTPUT_FRAME)
+ atomisp_q_one_dis_buffer(asd, stream_id,
+ css_pipe_id);
+ }
+
+ return 0;
+}
+
+/* queue all available buffers to css */
+int atomisp_qbuffers_to_css(struct atomisp_sub_device *asd)
+{
+ enum ia_css_pipe_id pipe_id;
+
+ if (asd->copy_mode) {
+ pipe_id = IA_CSS_PIPE_ID_COPY;
+ } else if (asd->vfpp->val == ATOMISP_VFPP_DISABLE_SCALER) {
+ pipe_id = IA_CSS_PIPE_ID_VIDEO;
+ } else if (asd->vfpp->val == ATOMISP_VFPP_DISABLE_LOWLAT) {
+ pipe_id = IA_CSS_PIPE_ID_CAPTURE;
+ } else if (asd->run_mode->val == ATOMISP_RUN_MODE_VIDEO) {
+ pipe_id = IA_CSS_PIPE_ID_VIDEO;
+ } else if (asd->run_mode->val == ATOMISP_RUN_MODE_PREVIEW) {
+ pipe_id = IA_CSS_PIPE_ID_PREVIEW;
+ } else {
+ /* ATOMISP_RUN_MODE_STILL_CAPTURE */
+ pipe_id = IA_CSS_PIPE_ID_CAPTURE;
+ }
+
+ atomisp_q_video_buffers_to_css(asd, &asd->video_out,
+ ATOMISP_INPUT_STREAM_GENERAL,
+ IA_CSS_BUFFER_TYPE_OUTPUT_FRAME, pipe_id);
+ return 0;
+}
+
+static void atomisp_buf_queue(struct vb2_buffer *vb)
+{
+ struct atomisp_video_pipe *pipe = vb_to_pipe(vb);
+ struct ia_css_frame *frame = vb_to_frame(vb);
+ struct atomisp_sub_device *asd = pipe->asd;
+ unsigned long irqflags;
+ int ret;
+
+ mutex_lock(&asd->isp->mutex);
+
+ ret = atomisp_pipe_check(pipe, false);
+ if (ret) {
+ spin_lock_irqsave(&pipe->irq_lock, irqflags);
+ atomisp_buffer_done(frame, VB2_BUF_STATE_ERROR);
+ spin_unlock_irqrestore(&pipe->irq_lock, irqflags);
+ goto out_unlock;
+ }
+
+ /* FIXME this ugliness comes from the original atomisp buffer handling */
+ if (!(vb->skip_cache_sync_on_finish && vb->skip_cache_sync_on_prepare))
+ wbinvd();
+
+ pipe->frame_params[vb->index] = NULL;
+
+ spin_lock_irqsave(&pipe->irq_lock, irqflags);
+ /*
+ * when a frame buffer meets following conditions, it should be put into
+ * the waiting list:
+ * 1. It is not a main output frame, and it has a per-frame parameter
+ * to go with it.
+ * 2. It is not a main output frame, and the waiting buffer list is not
+ * empty, to keep the FIFO sequence of frame buffer processing, it
+ * is put to waiting list until previous per-frame parameter buffers
+ * get enqueued.
+ */
+ if (pipe->frame_request_config_id[vb->index] ||
+ !list_empty(&pipe->buffers_waiting_for_param))
+ list_add_tail(&frame->queue, &pipe->buffers_waiting_for_param);
+ else
+ list_add_tail(&frame->queue, &pipe->activeq);
+
+ spin_unlock_irqrestore(&pipe->irq_lock, irqflags);
+
+ /* TODO: do this better, not best way to queue to css */
+ if (asd->streaming) {
+ if (!list_empty(&pipe->buffers_waiting_for_param))
+ atomisp_handle_parameter_and_buffer(pipe);
+ else
+ atomisp_qbuffers_to_css(asd);
+ }
+
+out_unlock:
+ mutex_unlock(&asd->isp->mutex);
+}
+
+static void atomisp_buf_cleanup(struct vb2_buffer *vb)
+{
+ struct atomisp_video_pipe *pipe = vb_to_pipe(vb);
+ struct ia_css_frame *frame = vb_to_frame(vb);
+ int index = frame->vb.vb2_buf.index;
+
+ pipe->frame_request_config_id[index] = 0;
+ pipe->frame_params[index] = NULL;
+
+ hmm_free(frame->data);
+}
+
+const struct vb2_ops atomisp_vb2_ops = {
+ .queue_setup = atomisp_queue_setup,
+ .buf_init = atomisp_buf_init,
+ .buf_cleanup = atomisp_buf_cleanup,
+ .buf_queue = atomisp_buf_queue,
+ .start_streaming = atomisp_start_streaming,
+ .stop_streaming = atomisp_stop_streaming,
+};
+
+static void atomisp_dev_init_struct(struct atomisp_device *isp)
+{
+ isp->isp_fatal_error = false;
+
+ /*
+ * For Merrifield, frequency is scalable.
+ * After boot-up, the default frequency is 200MHz.
+ */
+ isp->running_freq = ISP_FREQ_200MHZ;
+}
+
+static void atomisp_subdev_init_struct(struct atomisp_sub_device *asd)
+{
+ memset(&asd->params.css_param, 0, sizeof(asd->params.css_param));
+ asd->params.color_effect = V4L2_COLORFX_NONE;
+ asd->params.bad_pixel_en = true;
+ asd->params.gdc_cac_en = false;
+ asd->params.video_dis_en = false;
+ asd->params.sc_en = false;
+ asd->params.fpn_en = false;
+ asd->params.xnr_en = false;
+ asd->params.false_color = 0;
+ asd->params.yuv_ds_en = 0;
+ /* s3a grid not enabled for any pipe */
+ asd->params.s3a_enabled_pipe = IA_CSS_PIPE_ID_NUM;
+
+ asd->copy_mode = false;
+
+ asd->stream_prepared = false;
+ asd->high_speed_mode = false;
+ asd->sensor_array_res.height = 0;
+ asd->sensor_array_res.width = 0;
+ atomisp_css_init_struct(asd);
+}
+
+/*
+ * file operation functions
+ */
+static int atomisp_open(struct file *file)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct atomisp_device *isp = video_get_drvdata(vdev);
+ struct atomisp_video_pipe *pipe = atomisp_to_video_pipe(vdev);
+ struct atomisp_sub_device *asd = pipe->asd;
+ int ret;
+
+ dev_dbg(isp->dev, "open device %s\n", vdev->name);
+
+ ret = v4l2_fh_open(file);
+ if (ret)
+ return ret;
+
+ mutex_lock(&isp->mutex);
+
+ if (!isp->input_cnt) {
+ dev_err(isp->dev, "no camera attached\n");
+ ret = -EINVAL;
+ goto error;
+ }
+
+ /*
+ * atomisp does not allow multiple open
+ */
+ if (pipe->users) {
+ dev_dbg(isp->dev, "video node already opened\n");
+ ret = -EBUSY;
+ goto error;
+ }
+
+ /* runtime power management, turn on ISP */
+ ret = pm_runtime_resume_and_get(vdev->v4l2_dev->dev);
+ if (ret < 0) {
+ dev_err(isp->dev, "Failed to power on device\n");
+ goto error;
+ }
+
+ atomisp_dev_init_struct(isp);
+ atomisp_subdev_init_struct(asd);
+
+ pipe->users++;
+ mutex_unlock(&isp->mutex);
+ return 0;
+
+error:
+ mutex_unlock(&isp->mutex);
+ v4l2_fh_release(file);
+ return ret;
+}
+
+static int atomisp_release(struct file *file)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct atomisp_device *isp = video_get_drvdata(vdev);
+ struct atomisp_video_pipe *pipe = atomisp_to_video_pipe(vdev);
+ struct atomisp_sub_device *asd = pipe->asd;
+ struct v4l2_subdev_fh fh;
+
+ v4l2_fh_init(&fh.vfh, vdev);
+
+ dev_dbg(isp->dev, "release device %s\n", vdev->name);
+
+ /* Note file must not be used after this! */
+ vb2_fop_release(file);
+
+ mutex_lock(&isp->mutex);
+
+ pipe->users--;
+
+ atomisp_css_free_stat_buffers(asd);
+ atomisp_free_internal_buffers(asd);
+
+ atomisp_s_sensor_power(isp, asd->input_curr, 0);
+
+ atomisp_destroy_pipes_stream(asd);
+
+ if (pm_runtime_put_sync(vdev->v4l2_dev->dev) < 0)
+ dev_err(isp->dev, "Failed to power off device\n");
+
+ mutex_unlock(&isp->mutex);
+ return 0;
+}
+
+const struct v4l2_file_operations atomisp_fops = {
+ .owner = THIS_MODULE,
+ .open = atomisp_open,
+ .release = atomisp_release,
+ .mmap = vb2_fop_mmap,
+ .poll = vb2_fop_poll,
+ .unlocked_ioctl = video_ioctl2,
+#ifdef CONFIG_COMPAT
+ /*
+ * this was removed because of bugs, the interface
+ * needs to be made safe for compat tasks instead.
+ .compat_ioctl32 = atomisp_compat_ioctl32,
+ */
+#endif
+};
diff --git a/drivers/staging/media/atomisp/pci/atomisp_fops.h b/drivers/staging/media/atomisp/pci/atomisp_fops.h
new file mode 100644
index 000000000000..a5c84029b025
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp_fops.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Medifield PNW Camera Imaging ISP subsystem.
+ *
+ * Copyright (c) 2010 Intel Corporation. All Rights Reserved.
+ *
+ * Copyright (c) 2010 Silicon Hive www.siliconhive.com.
+ */
+
+#ifndef __ATOMISP_FOPS_H__
+#define __ATOMISP_FOPS_H__
+#include "atomisp_subdev.h"
+
+/*
+ * Memory help functions for image frame and private parameters
+ */
+
+int atomisp_qbuffers_to_css(struct atomisp_sub_device *asd);
+
+extern const struct vb2_ops atomisp_vb2_ops;
+extern const struct v4l2_file_operations atomisp_fops;
+
+#endif /* __ATOMISP_FOPS_H__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp_gmin_platform.c b/drivers/staging/media/atomisp/pci/atomisp_gmin_platform.c
new file mode 100644
index 000000000000..964cc3bcc0ac
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp_gmin_platform.c
@@ -0,0 +1,1318 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/dmi.h>
+#include <linux/efi.h>
+#include <linux/pci.h>
+#include <linux/acpi.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <media/v4l2-subdev.h>
+#include <linux/mfd/intel_soc_pmic.h>
+#include <linux/regulator/consumer.h>
+#include <linux/gpio/consumer.h>
+#include <linux/gpio.h>
+#include <linux/platform_device.h>
+#include "../../include/linux/atomisp_platform.h"
+#include "../../include/linux/atomisp_gmin_platform.h"
+
+#define MAX_SUBDEVS 8
+
+enum clock_rate {
+ VLV2_CLK_XTAL_25_0MHz = 0,
+ VLV2_CLK_PLL_19P2MHZ = 1
+};
+
+#define CLK_RATE_19_2MHZ 19200000
+#define CLK_RATE_25_0MHZ 25000000
+
+/* Valid clock number range from 0 to 5 */
+#define MAX_CLK_COUNT 5
+
+/* X-Powers AXP288 register set */
+#define ALDO1_SEL_REG 0x28
+#define ALDO1_CTRL3_REG 0x13
+#define ALDO1_2P8V 0x16
+#define ALDO1_CTRL3_SHIFT 0x05
+
+#define ELDO_CTRL_REG 0x12
+
+#define ELDO1_SEL_REG 0x19
+#define ELDO1_1P6V 0x12
+#define ELDO1_CTRL_SHIFT 0x00
+
+#define ELDO2_SEL_REG 0x1a
+#define ELDO2_1P8V 0x16
+#define ELDO2_CTRL_SHIFT 0x01
+
+/* TI SND9039 PMIC register set */
+#define LDO9_REG 0x49
+#define LDO10_REG 0x4a
+#define LDO11_REG 0x4b
+
+#define LDO_2P8V_ON 0x2f /* 0x2e selects 2.85V ... */
+#define LDO_2P8V_OFF 0x2e /* ... bottom bit is "enabled" */
+
+#define LDO_1P8V_ON 0x59 /* 0x58 selects 1.80V ... */
+#define LDO_1P8V_OFF 0x58 /* ... bottom bit is "enabled" */
+
+/* CRYSTAL COVE PMIC register set */
+#define CRYSTAL_BYT_1P8V_REG 0x5d
+#define CRYSTAL_BYT_2P8V_REG 0x66
+
+#define CRYSTAL_CHT_1P8V_REG 0x57
+#define CRYSTAL_CHT_2P8V_REG 0x5d
+
+#define CRYSTAL_ON 0x63
+#define CRYSTAL_OFF 0x62
+
+struct gmin_subdev {
+ struct v4l2_subdev *subdev;
+ enum clock_rate clock_src;
+ struct clk *pmc_clk;
+ struct gpio_desc *gpio0;
+ struct gpio_desc *gpio1;
+ struct regulator *v1p8_reg;
+ struct regulator *v2p8_reg;
+ struct regulator *v1p2_reg;
+ enum atomisp_camera_port csi_port;
+ unsigned int csi_lanes;
+ enum atomisp_input_format csi_fmt;
+ enum atomisp_bayer_order csi_bayer;
+
+ bool clock_on;
+ bool v1p8_on;
+ bool v2p8_on;
+ bool v1p2_on;
+
+ u8 pwm_i2c_addr;
+
+ /* For PMIC AXP */
+ int eldo1_sel_reg, eldo1_1p6v, eldo1_ctrl_shift;
+ int eldo2_sel_reg, eldo2_1p8v, eldo2_ctrl_shift;
+};
+
+static struct gmin_subdev gmin_subdevs[MAX_SUBDEVS];
+
+/* ACPI HIDs for the PMICs that could be used by this driver */
+#define PMIC_ACPI_AXP "INT33F4" /* XPower AXP288 PMIC */
+#define PMIC_ACPI_TI "INT33F5" /* Dollar Cove TI PMIC */
+#define PMIC_ACPI_CRYSTALCOVE "INT33FD" /* Crystal Cove PMIC */
+
+#define PMIC_PLATFORM_TI "intel_soc_pmic_chtdc_ti"
+
+static enum {
+ PMIC_UNSET = 0,
+ PMIC_REGULATOR,
+ PMIC_AXP,
+ PMIC_TI,
+ PMIC_CRYSTALCOVE
+} pmic_id;
+
+static const char *pmic_name[] = {
+ [PMIC_UNSET] = "ACPI device PM",
+ [PMIC_REGULATOR] = "regulator driver",
+ [PMIC_AXP] = "XPower AXP288 PMIC",
+ [PMIC_TI] = "Dollar Cove TI PMIC",
+ [PMIC_CRYSTALCOVE] = "Crystal Cove PMIC",
+};
+
+static DEFINE_MUTEX(gmin_regulator_mutex);
+static int gmin_v1p8_enable_count;
+static int gmin_v2p8_enable_count;
+
+/* The atomisp uses subdev==NULL for the end-of-list marker, so leave space. */
+static struct intel_v4l2_subdev_table pdata_subdevs[MAX_SUBDEVS + 1];
+
+static struct gmin_subdev *find_gmin_subdev(struct v4l2_subdev *subdev);
+
+const struct intel_v4l2_subdev_table *atomisp_platform_get_subdevs(void)
+{
+ return pdata_subdevs;
+}
+EXPORT_SYMBOL_GPL(atomisp_platform_get_subdevs);
+
+int atomisp_register_i2c_module(struct v4l2_subdev *subdev,
+ struct camera_sensor_platform_data *plat_data)
+{
+ int i;
+ struct gmin_subdev *gs;
+ struct i2c_client *client = v4l2_get_subdevdata(subdev);
+ struct acpi_device *adev = ACPI_COMPANION(&client->dev);
+
+ /* The windows driver model (and thus most BIOSes by default)
+ * uses ACPI runtime power management for camera devices, but
+ * we don't. Disable it, or else the rails will be needlessly
+ * tickled during suspend/resume. This has caused power and
+ * performance issues on multiple devices.
+ */
+
+ /*
+ * Turn off the device before disabling ACPI power resources
+ * (the sensor driver has already probed it at this point).
+ * This avoids leaking the reference count of the (possibly shared)
+ * ACPI power resources which were enabled/referenced before probe().
+ */
+ acpi_device_set_power(adev, ACPI_STATE_D3_COLD);
+ adev->power.flags.power_resources = 0;
+
+ for (i = 0; i < MAX_SUBDEVS; i++)
+ if (!pdata_subdevs[i].subdev)
+ break;
+
+ if (i == MAX_SUBDEVS)
+ return -ENOMEM;
+
+ /* Note subtlety of initialization order: at the point where
+ * this registration API gets called, the platform data
+ * callbacks have probably already been invoked, so the
+ * gmin_subdev struct is already initialized for us.
+ */
+ gs = find_gmin_subdev(subdev);
+ if (!gs)
+ return -ENODEV;
+
+ pdata_subdevs[i].port = gs->csi_port;
+ pdata_subdevs[i].lanes = gs->csi_lanes;
+ pdata_subdevs[i].subdev = subdev;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(atomisp_register_i2c_module);
+
+int atomisp_gmin_remove_subdev(struct v4l2_subdev *sd)
+{
+ int i, j;
+
+ if (!sd)
+ return 0;
+
+ for (i = 0; i < MAX_SUBDEVS; i++) {
+ if (pdata_subdevs[i].subdev == sd) {
+ for (j = i + 1; j <= MAX_SUBDEVS; j++)
+ pdata_subdevs[j - 1] = pdata_subdevs[j];
+ }
+ if (gmin_subdevs[i].subdev == sd) {
+ if (gmin_subdevs[i].gpio0)
+ gpiod_put(gmin_subdevs[i].gpio0);
+ gmin_subdevs[i].gpio0 = NULL;
+ if (gmin_subdevs[i].gpio1)
+ gpiod_put(gmin_subdevs[i].gpio1);
+ gmin_subdevs[i].gpio1 = NULL;
+ if (pmic_id == PMIC_REGULATOR) {
+ regulator_put(gmin_subdevs[i].v1p8_reg);
+ regulator_put(gmin_subdevs[i].v2p8_reg);
+ regulator_put(gmin_subdevs[i].v1p2_reg);
+ }
+ gmin_subdevs[i].subdev = NULL;
+ }
+ }
+ return 0;
+}
+EXPORT_SYMBOL_GPL(atomisp_gmin_remove_subdev);
+
+struct gmin_cfg_var {
+ const char *name, *val;
+};
+
+static struct gmin_cfg_var ffrd8_vars[] = {
+ { "INTCF1B:00_ImxId", "0x134" },
+ { "INTCF1B:00_CsiPort", "1" },
+ { "INTCF1B:00_CsiLanes", "4" },
+ { "INTCF1B:00_CamClk", "0" },
+ {},
+};
+
+static struct gmin_cfg_var mrd7_vars[] = {
+ {"INT33F8:00_CamType", "1"},
+ {"INT33F8:00_CsiPort", "1"},
+ {"INT33F8:00_CsiLanes", "2"},
+ {"INT33F8:00_CsiFmt", "13"},
+ {"INT33F8:00_CsiBayer", "0"},
+ {"INT33F8:00_CamClk", "0"},
+
+ {"INT33F9:00_CamType", "1"},
+ {"INT33F9:00_CsiPort", "0"},
+ {"INT33F9:00_CsiLanes", "1"},
+ {"INT33F9:00_CsiFmt", "13"},
+ {"INT33F9:00_CsiBayer", "0"},
+ {"INT33F9:00_CamClk", "1"},
+ {},
+};
+
+static struct gmin_cfg_var i8880_vars[] = {
+ {"XXOV2680:00_CsiPort", "1"},
+ {"XXOV2680:00_CsiLanes", "1"},
+ {"XXOV2680:00_CamClk", "0"},
+
+ {"XXGC0310:00_CsiPort", "0"},
+ {"XXGC0310:00_CsiLanes", "1"},
+ {"XXGC0310:00_CamClk", "1"},
+ {},
+};
+
+/*
+ * Surface 3 does not describe CsiPort/CsiLanes in both DSDT and EFI.
+ */
+static struct gmin_cfg_var surface3_vars[] = {
+ {"APTA0330:00_CsiPort", "0"},
+ {"APTA0330:00_CsiLanes", "2"},
+
+ {"OVTI8835:00_CsiPort", "1"},
+ {"OVTI8835:00_CsiLanes", "4"},
+ {},
+};
+
+static struct gmin_cfg_var lenovo_ideapad_miix_310_vars[] = {
+ /* _DSM contains the wrong CsiPort! */
+ { "OVTI2680:01_CsiPort", "0" },
+ {}
+};
+
+static const struct dmi_system_id gmin_vars[] = {
+ /*
+ * These DMI IDs were present when the atomisp driver was merged into
+ * drivers/staging and it is unclear if they are really necessary.
+ */
+ {
+ .ident = "BYT-T FFD8",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "BYT-T FFD8"),
+ },
+ .driver_data = ffrd8_vars,
+ },
+ {
+ .ident = "MRD7",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "TABLET"),
+ DMI_MATCH(DMI_BOARD_VERSION, "MRD 7"),
+ },
+ .driver_data = mrd7_vars,
+ },
+ {
+ .ident = "VTA0803",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "VTA0803"),
+ },
+ .driver_data = i8880_vars,
+ },
+ /* Later added DMI ids, these are confirmed to really be necessary! */
+ {
+ .ident = "Surface 3",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "Surface 3"),
+ },
+ .driver_data = surface3_vars,
+ },
+ {
+ .ident = "Lenovo Ideapad Miix 310",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "MIIX 310-10"),
+ },
+ .driver_data = lenovo_ideapad_miix_310_vars,
+ },
+ {}
+};
+
+#define GMIN_CFG_VAR_EFI_GUID EFI_GUID(0xecb54cd9, 0xe5ae, 0x4fdc, \
+ 0xa9, 0x71, 0xe8, 0x77, \
+ 0x75, 0x60, 0x68, 0xf7)
+
+static const guid_t atomisp_dsm_guid = GUID_INIT(0xdc2f6c4f, 0x045b, 0x4f1d,
+ 0x97, 0xb9, 0x88, 0x2a,
+ 0x68, 0x60, 0xa4, 0xbe);
+
+#define CFG_VAR_NAME_MAX 64
+
+#define GMIN_PMC_CLK_NAME 14 /* "pmc_plt_clk_[0..5]" */
+static char gmin_pmc_clk_name[GMIN_PMC_CLK_NAME];
+
+static struct i2c_client *gmin_i2c_dev_exists(struct device *dev, char *name,
+ struct i2c_client **client)
+{
+ struct acpi_device *adev;
+
+ adev = acpi_dev_get_first_match_dev(name, NULL, -1);
+ if (!adev)
+ return NULL;
+
+ *client = i2c_find_device_by_fwnode(acpi_fwnode_handle(adev));
+ acpi_dev_put(adev);
+ if (!*client)
+ return NULL;
+
+ dev_dbg(dev, "found '%s' at address 0x%02x, adapter %d\n",
+ (*client)->name, (*client)->addr, (*client)->adapter->nr);
+ return *client;
+}
+
+static int gmin_i2c_write(struct device *dev, u16 i2c_addr, u8 reg,
+ u32 value, u32 mask)
+{
+ int ret;
+
+ /*
+ * FIXME: Right now, the intel_pmic driver just write values
+ * directly at the regmap, instead of properly implementing
+ * i2c_transfer() mechanism. Let's use the same interface here,
+ * as otherwise we may face issues.
+ */
+
+ dev_dbg(dev,
+ "I2C write, addr: 0x%02x, reg: 0x%02x, value: 0x%02x, mask: 0x%02x\n",
+ i2c_addr, reg, value, mask);
+
+ ret = intel_soc_pmic_exec_mipi_pmic_seq_element(i2c_addr, reg, value, mask);
+ if (ret == -EOPNOTSUPP)
+ dev_err(dev,
+ "ACPI didn't mapped the OpRegion needed to access I2C address 0x%02x.\n"
+ "Need to compile the kernel using CONFIG_*_PMIC_OPREGION settings\n",
+ i2c_addr);
+
+ return ret;
+}
+
+static int atomisp_get_acpi_power(struct device *dev)
+{
+ char name[5];
+ struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+ struct acpi_buffer b_name = { sizeof(name), name };
+ union acpi_object *package, *element;
+ acpi_handle handle = ACPI_HANDLE(dev);
+ acpi_handle rhandle;
+ acpi_status status;
+ int clock_num = -1;
+ int i;
+
+ status = acpi_evaluate_object(handle, "_PR0", NULL, &buffer);
+ if (!ACPI_SUCCESS(status))
+ return -1;
+
+ package = buffer.pointer;
+
+ if (!buffer.length || !package
+ || package->type != ACPI_TYPE_PACKAGE
+ || !package->package.count)
+ goto fail;
+
+ for (i = 0; i < package->package.count; i++) {
+ element = &package->package.elements[i];
+
+ if (element->type != ACPI_TYPE_LOCAL_REFERENCE)
+ continue;
+
+ rhandle = element->reference.handle;
+ if (!rhandle)
+ goto fail;
+
+ acpi_get_name(rhandle, ACPI_SINGLE_NAME, &b_name);
+
+ dev_dbg(dev, "Found PM resource '%s'\n", name);
+ if (strlen(name) == 4 && !strncmp(name, "CLK", 3)) {
+ if (name[3] >= '0' && name[3] <= '4')
+ clock_num = name[3] - '0';
+#if 0
+ /*
+ * We could abort here, but let's parse all resources,
+ * as this is helpful for debugging purposes
+ */
+ if (clock_num >= 0)
+ break;
+#endif
+ }
+ }
+
+fail:
+ ACPI_FREE(buffer.pointer);
+
+ return clock_num;
+}
+
+static u8 gmin_get_pmic_id_and_addr(struct device *dev)
+{
+ struct i2c_client *power = NULL;
+ static u8 pmic_i2c_addr;
+
+ if (pmic_id)
+ return pmic_i2c_addr;
+
+ if (gmin_i2c_dev_exists(dev, PMIC_ACPI_TI, &power)) {
+ pmic_id = PMIC_TI;
+ } else if (gmin_i2c_dev_exists(dev, PMIC_ACPI_AXP, &power)) {
+ pmic_id = PMIC_AXP;
+ } else if (gmin_i2c_dev_exists(dev, PMIC_ACPI_CRYSTALCOVE, &power)) {
+ pmic_id = PMIC_CRYSTALCOVE;
+ } else {
+ pmic_id = PMIC_REGULATOR;
+ return 0;
+ }
+
+ pmic_i2c_addr = power->addr;
+ put_device(&power->dev);
+ return pmic_i2c_addr;
+}
+
+static int gmin_detect_pmic(struct v4l2_subdev *subdev)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(subdev);
+ struct device *dev = &client->dev;
+ u8 pmic_i2c_addr;
+
+ pmic_i2c_addr = gmin_get_pmic_id_and_addr(dev);
+ dev_info(dev, "gmin: power management provided via %s (i2c addr 0x%02x)\n",
+ pmic_name[pmic_id], pmic_i2c_addr);
+ return pmic_i2c_addr;
+}
+
+static int gmin_subdev_add(struct gmin_subdev *gs)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(gs->subdev);
+ struct device *dev = &client->dev;
+ struct acpi_device *adev = ACPI_COMPANION(dev);
+ int ret, default_val, clock_num = -1;
+
+ dev_info(dev, "%s: ACPI path is %pfw\n", __func__, dev_fwnode(dev));
+
+ /* WA:CHT requires XTAL clock as PLL is not stable. */
+ gs->clock_src = gmin_get_var_int(dev, false, "ClkSrc",
+ VLV2_CLK_PLL_19P2MHZ);
+
+ /*
+ * Get ACPI _PR0 derived clock here already because it is used
+ * to determine the csi_port default.
+ */
+ if (acpi_device_power_manageable(adev))
+ clock_num = atomisp_get_acpi_power(dev);
+
+ /* Compare clock to CsiPort 1 pmc-clock used in the CHT/BYT reference designs */
+ if (IS_ISP2401)
+ default_val = clock_num == 4 ? 1 : 0;
+ else
+ default_val = clock_num == 0 ? 1 : 0;
+
+ gs->csi_port = gmin_get_var_int(dev, false, "CsiPort", default_val);
+ gs->csi_lanes = gmin_get_var_int(dev, false, "CsiLanes", 1);
+
+ gs->gpio0 = gpiod_get_index(dev, NULL, 0, GPIOD_OUT_LOW);
+ if (IS_ERR(gs->gpio0))
+ gs->gpio0 = NULL;
+ else
+ dev_info(dev, "will handle gpio0 via ACPI\n");
+
+ gs->gpio1 = gpiod_get_index(dev, NULL, 1, GPIOD_OUT_LOW);
+ if (IS_ERR(gs->gpio1))
+ gs->gpio1 = NULL;
+ else
+ dev_info(dev, "will handle gpio1 via ACPI\n");
+
+ /*
+ * FIXME:
+ *
+ * The ACPI handling code checks for the _PR? tables in order to
+ * know what is required to switch the device from power state
+ * D0 (_PR0) up to D3COLD (_PR3).
+ *
+ * The adev->flags.power_manageable is set to true if the device
+ * has a _PR0 table, which can be checked by calling
+ * acpi_device_power_manageable(adev).
+ *
+ * However, this only says that the device can be set to power off
+ * mode.
+ *
+ * At least on the DSDT tables we've seen so far, there's no _PR3,
+ * nor _PS3 (which would have a somewhat similar effect).
+ * So, using ACPI for power management won't work, except if adding
+ * an ACPI override logic somewhere.
+ *
+ * So, at least for the existing devices we know, the check below
+ * will always be false.
+ */
+ if (acpi_device_can_wakeup(adev) &&
+ acpi_device_can_poweroff(adev)) {
+ dev_info(dev,
+ "gmin: power management provided via device PM\n");
+ return 0;
+ }
+
+ /*
+ * The code below is here due to backward compatibility with devices
+ * whose ACPI BIOS may not contain everything that would be needed
+ * in order to set clocks and do power management.
+ */
+
+ /*
+ * According with :
+ * https://github.com/projectceladon/hardware-intel-kernelflinger/blob/master/doc/fastboot.md
+ *
+ * The "CamClk" EFI var is set via fastboot on some Android devices,
+ * and seems to contain the number of the clock used to feed the
+ * sensor.
+ *
+ * On systems with a proper ACPI table, this is given via the _PR0
+ * power resource table. The logic below should first check if there
+ * is a power resource already, falling back to the EFI vars detection
+ * otherwise.
+ */
+
+ /* If getting the clock from _PR0 above failed, fall-back to EFI and/or DMI match */
+ if (clock_num < 0)
+ clock_num = gmin_get_var_int(dev, false, "CamClk", 0);
+
+ if (clock_num < 0 || clock_num > MAX_CLK_COUNT) {
+ dev_err(dev, "Invalid clock number\n");
+ return -EINVAL;
+ }
+
+ snprintf(gmin_pmc_clk_name, sizeof(gmin_pmc_clk_name),
+ "%s_%d", "pmc_plt_clk", clock_num);
+
+ gs->pmc_clk = devm_clk_get(dev, gmin_pmc_clk_name);
+ if (IS_ERR(gs->pmc_clk)) {
+ ret = PTR_ERR(gs->pmc_clk);
+ dev_err(dev, "Failed to get clk from %s: %d\n", gmin_pmc_clk_name, ret);
+ return ret;
+ }
+ dev_info(dev, "Will use CLK%d (%s)\n", clock_num, gmin_pmc_clk_name);
+
+ /*
+ * The firmware might enable the clock at
+ * boot (this information may or may not
+ * be reflected in the enable clock register).
+ * To change the rate we must disable the clock
+ * first to cover these cases. Due to common
+ * clock framework restrictions that do not allow
+ * to disable a clock that has not been enabled,
+ * we need to enable the clock first.
+ */
+ ret = clk_prepare_enable(gs->pmc_clk);
+ if (!ret)
+ clk_disable_unprepare(gs->pmc_clk);
+
+ switch (pmic_id) {
+ case PMIC_REGULATOR:
+ gs->v1p8_reg = regulator_get(dev, "V1P8SX");
+ gs->v2p8_reg = regulator_get(dev, "V2P8SX");
+
+ gs->v1p2_reg = regulator_get(dev, "V1P2A");
+
+ /* Note: ideally we would initialize v[12]p8_on to the
+ * output of regulator_is_enabled(), but sadly that
+ * API is broken with the current drivers, returning
+ * "1" for a regulator that will then emit a
+ * "unbalanced disable" WARNing if we try to disable
+ * it.
+ */
+ break;
+
+ case PMIC_AXP:
+ gs->eldo1_1p6v = gmin_get_var_int(dev, false,
+ "eldo1_1p8v",
+ ELDO1_1P6V);
+ gs->eldo1_sel_reg = gmin_get_var_int(dev, false,
+ "eldo1_sel_reg",
+ ELDO1_SEL_REG);
+ gs->eldo1_ctrl_shift = gmin_get_var_int(dev, false,
+ "eldo1_ctrl_shift",
+ ELDO1_CTRL_SHIFT);
+ gs->eldo2_1p8v = gmin_get_var_int(dev, false,
+ "eldo2_1p8v",
+ ELDO2_1P8V);
+ gs->eldo2_sel_reg = gmin_get_var_int(dev, false,
+ "eldo2_sel_reg",
+ ELDO2_SEL_REG);
+ gs->eldo2_ctrl_shift = gmin_get_var_int(dev, false,
+ "eldo2_ctrl_shift",
+ ELDO2_CTRL_SHIFT);
+ break;
+
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static struct gmin_subdev *find_gmin_subdev(struct v4l2_subdev *subdev)
+{
+ int i;
+
+ for (i = 0; i < MAX_SUBDEVS; i++)
+ if (gmin_subdevs[i].subdev == subdev)
+ return &gmin_subdevs[i];
+ return NULL;
+}
+
+static struct gmin_subdev *find_free_gmin_subdev_slot(void)
+{
+ unsigned int i;
+
+ for (i = 0; i < MAX_SUBDEVS; i++)
+ if (gmin_subdevs[i].subdev == NULL)
+ return &gmin_subdevs[i];
+ return NULL;
+}
+
+static int axp_regulator_set(struct device *dev, struct gmin_subdev *gs,
+ int sel_reg, u8 setting,
+ int ctrl_reg, int shift, bool on)
+{
+ int ret;
+ int val;
+
+ ret = gmin_i2c_write(dev, gs->pwm_i2c_addr, sel_reg, setting, 0xff);
+ if (ret)
+ return ret;
+
+ val = on ? 1 << shift : 0;
+
+ ret = gmin_i2c_write(dev, gs->pwm_i2c_addr, ctrl_reg, val, 1 << shift);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+/*
+ * Some boards contain a hw-bug where turning eldo2 back on after having turned
+ * it off causes the CPLM3218 ambient-light-sensor on the image-sensor's I2C bus
+ * to crash, hanging the bus. Do not turn eldo2 off on these systems.
+ */
+static const struct dmi_system_id axp_leave_eldo2_on_ids[] = {
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TrekStor"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "SurfTab duo W1 10.1 (VT4)"),
+ },
+ },
+ { }
+};
+
+static int axp_v1p8_on(struct device *dev, struct gmin_subdev *gs)
+{
+ int ret;
+
+ ret = axp_regulator_set(dev, gs, gs->eldo2_sel_reg, gs->eldo2_1p8v,
+ ELDO_CTRL_REG, gs->eldo2_ctrl_shift, true);
+ if (ret)
+ return ret;
+
+ /*
+ * This sleep comes out of the gc2235 driver, which is the
+ * only one I currently see that wants to set both 1.8v rails.
+ */
+ usleep_range(110, 150);
+
+ ret = axp_regulator_set(dev, gs, gs->eldo1_sel_reg, gs->eldo1_1p6v,
+ ELDO_CTRL_REG, gs->eldo1_ctrl_shift, true);
+ return ret;
+}
+
+static int axp_v1p8_off(struct device *dev, struct gmin_subdev *gs)
+{
+ int ret;
+
+ ret = axp_regulator_set(dev, gs, gs->eldo1_sel_reg, gs->eldo1_1p6v,
+ ELDO_CTRL_REG, gs->eldo1_ctrl_shift, false);
+ if (ret)
+ return ret;
+
+ if (dmi_check_system(axp_leave_eldo2_on_ids))
+ return 0;
+
+ ret = axp_regulator_set(dev, gs, gs->eldo2_sel_reg, gs->eldo2_1p8v,
+ ELDO_CTRL_REG, gs->eldo2_ctrl_shift, false);
+ return ret;
+}
+
+static int gmin_gpio0_ctrl(struct v4l2_subdev *subdev, int on)
+{
+ struct gmin_subdev *gs = find_gmin_subdev(subdev);
+
+ if (gs) {
+ gpiod_set_value(gs->gpio0, on);
+ return 0;
+ }
+ return -EINVAL;
+}
+
+static int gmin_gpio1_ctrl(struct v4l2_subdev *subdev, int on)
+{
+ struct gmin_subdev *gs = find_gmin_subdev(subdev);
+
+ if (gs) {
+ gpiod_set_value(gs->gpio1, on);
+ return 0;
+ }
+ return -EINVAL;
+}
+
+static int gmin_v1p2_ctrl(struct v4l2_subdev *subdev, int on)
+{
+ struct gmin_subdev *gs = find_gmin_subdev(subdev);
+
+ if (!gs || gs->v1p2_on == on)
+ return 0;
+ gs->v1p2_on = on;
+
+ /* use regulator for PMIC */
+ if (gs->v1p2_reg) {
+ if (on)
+ return regulator_enable(gs->v1p2_reg);
+ else
+ return regulator_disable(gs->v1p2_reg);
+ }
+
+ /* TODO:v1p2 may need to extend to other PMICs */
+
+ return -EINVAL;
+}
+
+static int gmin_v1p8_ctrl(struct v4l2_subdev *subdev, int on)
+{
+ struct gmin_subdev *gs = find_gmin_subdev(subdev);
+ int ret;
+ int value;
+ int reg;
+
+ if (!gs || gs->v1p8_on == on)
+ return 0;
+
+ gs->v1p8_on = on;
+
+ ret = 0;
+ mutex_lock(&gmin_regulator_mutex);
+ if (on) {
+ gmin_v1p8_enable_count++;
+ if (gmin_v1p8_enable_count > 1)
+ goto out; /* Already on */
+ } else {
+ gmin_v1p8_enable_count--;
+ if (gmin_v1p8_enable_count > 0)
+ goto out; /* Still needed */
+ }
+
+ if (gs->v1p8_reg) {
+ regulator_set_voltage(gs->v1p8_reg, 1800000, 1800000);
+ if (on)
+ ret = regulator_enable(gs->v1p8_reg);
+ else
+ ret = regulator_disable(gs->v1p8_reg);
+
+ goto out;
+ }
+
+ switch (pmic_id) {
+ case PMIC_AXP:
+ if (on)
+ ret = axp_v1p8_on(subdev->dev, gs);
+ else
+ ret = axp_v1p8_off(subdev->dev, gs);
+ break;
+ case PMIC_TI:
+ value = on ? LDO_1P8V_ON : LDO_1P8V_OFF;
+
+ ret = gmin_i2c_write(subdev->dev, gs->pwm_i2c_addr,
+ LDO10_REG, value, 0xff);
+ break;
+ case PMIC_CRYSTALCOVE:
+ if (IS_ISP2401)
+ reg = CRYSTAL_CHT_1P8V_REG;
+ else
+ reg = CRYSTAL_BYT_1P8V_REG;
+
+ value = on ? CRYSTAL_ON : CRYSTAL_OFF;
+
+ ret = gmin_i2c_write(subdev->dev, gs->pwm_i2c_addr,
+ reg, value, 0xff);
+ break;
+ default:
+ dev_err(subdev->dev, "Couldn't set power mode for v1p8\n");
+ ret = -EINVAL;
+ }
+
+out:
+ mutex_unlock(&gmin_regulator_mutex);
+ return ret;
+}
+
+static int gmin_v2p8_ctrl(struct v4l2_subdev *subdev, int on)
+{
+ struct gmin_subdev *gs = find_gmin_subdev(subdev);
+ int ret;
+ int value;
+ int reg;
+
+ if (WARN_ON(!gs))
+ return -ENODEV;
+
+ if (gs->v2p8_on == on)
+ return 0;
+ gs->v2p8_on = on;
+
+ ret = 0;
+ mutex_lock(&gmin_regulator_mutex);
+ if (on) {
+ gmin_v2p8_enable_count++;
+ if (gmin_v2p8_enable_count > 1)
+ goto out; /* Already on */
+ } else {
+ gmin_v2p8_enable_count--;
+ if (gmin_v2p8_enable_count > 0)
+ goto out; /* Still needed */
+ }
+
+ if (gs->v2p8_reg) {
+ regulator_set_voltage(gs->v2p8_reg, 2900000, 2900000);
+ if (on)
+ ret = regulator_enable(gs->v2p8_reg);
+ else
+ ret = regulator_disable(gs->v2p8_reg);
+
+ goto out;
+ }
+
+ switch (pmic_id) {
+ case PMIC_AXP:
+ ret = axp_regulator_set(subdev->dev, gs, ALDO1_SEL_REG,
+ ALDO1_2P8V, ALDO1_CTRL3_REG,
+ ALDO1_CTRL3_SHIFT, on);
+ break;
+ case PMIC_TI:
+ value = on ? LDO_2P8V_ON : LDO_2P8V_OFF;
+
+ ret = gmin_i2c_write(subdev->dev, gs->pwm_i2c_addr,
+ LDO9_REG, value, 0xff);
+ break;
+ case PMIC_CRYSTALCOVE:
+ if (IS_ISP2401)
+ reg = CRYSTAL_CHT_2P8V_REG;
+ else
+ reg = CRYSTAL_BYT_2P8V_REG;
+
+ value = on ? CRYSTAL_ON : CRYSTAL_OFF;
+
+ ret = gmin_i2c_write(subdev->dev, gs->pwm_i2c_addr,
+ reg, value, 0xff);
+ break;
+ default:
+ dev_err(subdev->dev, "Couldn't set power mode for v2p8\n");
+ ret = -EINVAL;
+ }
+
+out:
+ mutex_unlock(&gmin_regulator_mutex);
+ return ret;
+}
+
+static int gmin_acpi_pm_ctrl(struct v4l2_subdev *subdev, int on)
+{
+ int ret = 0;
+ struct gmin_subdev *gs = find_gmin_subdev(subdev);
+ struct i2c_client *client = v4l2_get_subdevdata(subdev);
+ struct acpi_device *adev = ACPI_COMPANION(&client->dev);
+
+ /* Use the ACPI power management to control it */
+ on = !!on;
+ if (gs->clock_on == on)
+ return 0;
+
+ dev_dbg(subdev->dev, "Setting power state to %s\n",
+ on ? "on" : "off");
+
+ if (on)
+ ret = acpi_device_set_power(adev,
+ ACPI_STATE_D0);
+ else
+ ret = acpi_device_set_power(adev,
+ ACPI_STATE_D3_COLD);
+
+ if (!ret)
+ gs->clock_on = on;
+ else
+ dev_err(subdev->dev, "Couldn't set power state to %s\n",
+ on ? "on" : "off");
+
+ return ret;
+}
+
+static int gmin_flisclk_ctrl(struct v4l2_subdev *subdev, int on)
+{
+ int ret = 0;
+ struct gmin_subdev *gs = find_gmin_subdev(subdev);
+ struct i2c_client *client = v4l2_get_subdevdata(subdev);
+
+ if (gs->clock_on == !!on)
+ return 0;
+
+ if (on) {
+ ret = clk_set_rate(gs->pmc_clk,
+ gs->clock_src ? CLK_RATE_19_2MHZ : CLK_RATE_25_0MHZ);
+
+ if (ret)
+ dev_err(&client->dev, "unable to set PMC rate %d\n",
+ gs->clock_src);
+
+ ret = clk_prepare_enable(gs->pmc_clk);
+ if (ret == 0)
+ gs->clock_on = true;
+ } else {
+ clk_disable_unprepare(gs->pmc_clk);
+ gs->clock_on = false;
+ }
+
+ return ret;
+}
+
+static int camera_sensor_csi_alloc(struct v4l2_subdev *sd, u32 port, u32 lanes,
+ u32 format, u32 bayer_order)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct camera_mipi_info *csi;
+
+ csi = kzalloc(sizeof(*csi), GFP_KERNEL);
+ if (!csi)
+ return -ENOMEM;
+
+ csi->port = port;
+ csi->num_lanes = lanes;
+ csi->input_format = format;
+ csi->raw_bayer_order = bayer_order;
+ v4l2_set_subdev_hostdata(sd, csi);
+ csi->metadata_format = ATOMISP_INPUT_FORMAT_EMBEDDED;
+ csi->metadata_effective_width = NULL;
+ dev_info(&client->dev,
+ "camera pdata: port: %d lanes: %d order: %8.8x\n",
+ port, lanes, bayer_order);
+
+ return 0;
+}
+
+static void camera_sensor_csi_free(struct v4l2_subdev *sd)
+{
+ struct camera_mipi_info *csi;
+
+ csi = v4l2_get_subdev_hostdata(sd);
+ kfree(csi);
+}
+
+static int gmin_csi_cfg(struct v4l2_subdev *sd, int flag)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct gmin_subdev *gs = find_gmin_subdev(sd);
+
+ if (!client || !gs)
+ return -ENODEV;
+
+ if (flag)
+ return camera_sensor_csi_alloc(sd, gs->csi_port, gs->csi_lanes,
+ gs->csi_fmt, gs->csi_bayer);
+ camera_sensor_csi_free(sd);
+ return 0;
+}
+
+int atomisp_register_sensor_no_gmin(struct v4l2_subdev *subdev, u32 lanes,
+ enum atomisp_input_format format,
+ enum atomisp_bayer_order bayer_order)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(subdev);
+ struct acpi_device *adev = ACPI_COMPANION(&client->dev);
+ int i, ret, clock_num, port = 0;
+
+ if (adev) {
+ /* Get ACPI _PR0 derived clock to determine the csi_port default */
+ if (acpi_device_power_manageable(adev)) {
+ clock_num = atomisp_get_acpi_power(&client->dev);
+
+ /* Compare clock to CsiPort 1 pmc-clock used in the CHT/BYT reference designs */
+ if (IS_ISP2401)
+ port = clock_num == 4 ? 1 : 0;
+ else
+ port = clock_num == 0 ? 1 : 0;
+ }
+
+ port = gmin_get_var_int(&client->dev, false, "CsiPort", port);
+ lanes = gmin_get_var_int(&client->dev, false, "CsiLanes", lanes);
+ }
+
+ for (i = 0; i < MAX_SUBDEVS; i++)
+ if (!pdata_subdevs[i].subdev)
+ break;
+
+ if (i >= MAX_SUBDEVS) {
+ dev_err(&client->dev, "Error too many subdevs already registered\n");
+ return -ENOMEM;
+ }
+
+ ret = camera_sensor_csi_alloc(subdev, port, lanes, format, bayer_order);
+ if (ret)
+ return ret;
+
+ pdata_subdevs[i].port = port;
+ pdata_subdevs[i].lanes = lanes;
+ pdata_subdevs[i].subdev = subdev;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(atomisp_register_sensor_no_gmin);
+
+void atomisp_unregister_subdev(struct v4l2_subdev *subdev)
+{
+ int i;
+
+ for (i = 0; i < MAX_SUBDEVS; i++) {
+ if (pdata_subdevs[i].subdev != subdev)
+ continue;
+
+ camera_sensor_csi_free(subdev);
+ pdata_subdevs[i].subdev = NULL;
+ pdata_subdevs[i].port = 0;
+ break;
+ }
+}
+EXPORT_SYMBOL_GPL(atomisp_unregister_subdev);
+
+static struct camera_sensor_platform_data pmic_gmin_plat = {
+ .gpio0_ctrl = gmin_gpio0_ctrl,
+ .gpio1_ctrl = gmin_gpio1_ctrl,
+ .v1p8_ctrl = gmin_v1p8_ctrl,
+ .v2p8_ctrl = gmin_v2p8_ctrl,
+ .v1p2_ctrl = gmin_v1p2_ctrl,
+ .flisclk_ctrl = gmin_flisclk_ctrl,
+ .csi_cfg = gmin_csi_cfg,
+};
+
+static struct camera_sensor_platform_data acpi_gmin_plat = {
+ .gpio0_ctrl = gmin_gpio0_ctrl,
+ .gpio1_ctrl = gmin_gpio1_ctrl,
+ .v1p8_ctrl = gmin_acpi_pm_ctrl,
+ .v2p8_ctrl = gmin_acpi_pm_ctrl,
+ .v1p2_ctrl = gmin_acpi_pm_ctrl,
+ .flisclk_ctrl = gmin_acpi_pm_ctrl,
+ .csi_cfg = gmin_csi_cfg,
+};
+
+struct camera_sensor_platform_data *
+gmin_camera_platform_data(struct v4l2_subdev *subdev,
+ enum atomisp_input_format csi_format,
+ enum atomisp_bayer_order csi_bayer)
+{
+ u8 pmic_i2c_addr = gmin_detect_pmic(subdev);
+ struct gmin_subdev *gs;
+
+ gs = find_free_gmin_subdev_slot();
+ gs->subdev = subdev;
+ gs->csi_fmt = csi_format;
+ gs->csi_bayer = csi_bayer;
+ gs->pwm_i2c_addr = pmic_i2c_addr;
+
+ gmin_subdev_add(gs);
+ if (gs->pmc_clk)
+ return &pmic_gmin_plat;
+ else
+ return &acpi_gmin_plat;
+}
+EXPORT_SYMBOL_GPL(gmin_camera_platform_data);
+
+static int gmin_get_hardcoded_var(struct device *dev,
+ struct gmin_cfg_var *varlist,
+ const char *var8, char *out, size_t *out_len)
+{
+ struct gmin_cfg_var *gv;
+
+ for (gv = varlist; gv->name; gv++) {
+ size_t vl;
+
+ if (strcmp(var8, gv->name))
+ continue;
+
+ dev_info(dev, "Found DMI entry for '%s'\n", var8);
+
+ vl = strlen(gv->val);
+ if (vl > *out_len - 1)
+ return -ENOSPC;
+
+ strscpy(out, gv->val, *out_len);
+ *out_len = vl;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+
+static int gmin_get_config_dsm_var(struct device *dev,
+ const char *var,
+ char *out, size_t *out_len)
+{
+ acpi_handle handle = ACPI_HANDLE(dev);
+ union acpi_object *obj, *cur = NULL;
+ int i;
+
+ /*
+ * The data reported by "CamClk" seems to be either 0 or 1 at the
+ * _DSM table.
+ *
+ * At the ACPI tables we looked so far, this is not related to the
+ * actual clock source for the sensor, which is given by the
+ * _PR0 ACPI table. So, ignore it, as otherwise this will be
+ * set to a wrong value.
+ */
+ if (!strcmp(var, "CamClk"))
+ return -EINVAL;
+
+ /* Return on unexpected object type */
+ obj = acpi_evaluate_dsm_typed(handle, &atomisp_dsm_guid, 0, 0, NULL,
+ ACPI_TYPE_PACKAGE);
+ if (!obj) {
+ dev_info_once(dev, "Didn't find ACPI _DSM table.\n");
+ return -EINVAL;
+ }
+
+#if 0 /* Just for debugging purposes */
+ for (i = 0; i < obj->package.count; i++) {
+ union acpi_object *cur = &obj->package.elements[i];
+
+ if (cur->type == ACPI_TYPE_INTEGER)
+ dev_info(dev, "object #%d, type %d, value: %lld\n",
+ i, cur->type, cur->integer.value);
+ else if (cur->type == ACPI_TYPE_STRING)
+ dev_info(dev, "object #%d, type %d, string: %s\n",
+ i, cur->type, cur->string.pointer);
+ else
+ dev_info(dev, "object #%d, type %d\n",
+ i, cur->type);
+ }
+#endif
+
+ /* Seek for the desired var */
+ for (i = 0; i < obj->package.count - 1; i += 2) {
+ if (obj->package.elements[i].type == ACPI_TYPE_STRING &&
+ !strcmp(obj->package.elements[i].string.pointer, var)) {
+ /* Next element should be the required value */
+ cur = &obj->package.elements[i + 1];
+ break;
+ }
+ }
+
+ if (!cur) {
+ dev_info(dev, "didn't found _DSM entry for '%s'\n", var);
+ ACPI_FREE(obj);
+ return -EINVAL;
+ }
+
+ /*
+ * While it could be possible to have an ACPI_TYPE_INTEGER,
+ * and read the value from cur->integer.value, the table
+ * seen so far uses the string type. So, produce a warning
+ * if it founds something different than string, letting it
+ * to fall back to the old code.
+ */
+ if (cur->type != ACPI_TYPE_STRING) {
+ dev_info(dev, "found non-string _DSM entry for '%s'\n", var);
+ ACPI_FREE(obj);
+ return -EINVAL;
+ }
+
+ dev_info(dev, "found _DSM entry for '%s': %s\n", var,
+ cur->string.pointer);
+ strscpy(out, cur->string.pointer, *out_len);
+ *out_len = strlen(out);
+
+ ACPI_FREE(obj);
+ return 0;
+}
+
+/* Retrieves a device-specific configuration variable. The dev
+ * argument should be a device with an ACPI companion, as all
+ * configuration is based on firmware ID.
+ */
+static int gmin_get_config_var(struct device *maindev,
+ bool is_gmin,
+ const char *var,
+ char *out, size_t *out_len)
+{
+ struct acpi_device *adev = ACPI_COMPANION(maindev);
+ efi_char16_t var16[CFG_VAR_NAME_MAX];
+ const struct dmi_system_id *id;
+ char var8[CFG_VAR_NAME_MAX];
+ efi_status_t status;
+ int i, ret;
+
+ if (!is_gmin && adev)
+ ret = snprintf(var8, sizeof(var8), "%s_%s", acpi_dev_name(adev), var);
+ else
+ ret = snprintf(var8, sizeof(var8), "gmin_%s", var);
+
+ if (ret < 0 || ret >= sizeof(var8) - 1)
+ return -EINVAL;
+
+ /* DMI based quirks override both the _DSM table and EFI variables */
+ id = dmi_first_match(gmin_vars);
+ if (id) {
+ ret = gmin_get_hardcoded_var(maindev, id->driver_data, var8,
+ out, out_len);
+ if (!ret)
+ return 0;
+ }
+
+ /* For sensors, try first to use the _DSM table */
+ if (!is_gmin) {
+ ret = gmin_get_config_dsm_var(maindev, var, out, out_len);
+ if (!ret)
+ return 0;
+ }
+
+ /* Our variable names are ASCII by construction, but EFI names
+ * are wide chars. Convert and zero-pad.
+ */
+ memset(var16, 0, sizeof(var16));
+ for (i = 0; i < sizeof(var8) && var8[i]; i++)
+ var16[i] = var8[i];
+
+ status = EFI_UNSUPPORTED;
+ if (efi_rt_services_supported(EFI_RT_SUPPORTED_GET_VARIABLE))
+ status = efi.get_variable(var16, &GMIN_CFG_VAR_EFI_GUID, NULL,
+ (unsigned long *)out_len, out);
+ if (status == EFI_SUCCESS) {
+ dev_info(maindev, "found EFI entry for '%s'\n", var8);
+ return 0;
+ }
+ if (is_gmin)
+ dev_info(maindev, "Failed to find EFI gmin variable %s\n", var8);
+ else
+ dev_info(maindev, "Failed to find EFI variable %s\n", var8);
+ return -ENOENT;
+}
+
+int gmin_get_var_int(struct device *dev, bool is_gmin, const char *var, int def)
+{
+ char val[CFG_VAR_NAME_MAX + 1];
+ size_t len = CFG_VAR_NAME_MAX;
+ long result;
+ int ret;
+
+ ret = gmin_get_config_var(dev, is_gmin, var, val, &len);
+ if (!ret) {
+ val[len] = 0;
+ ret = kstrtol(val, 0, &result);
+ } else {
+ dev_info(dev, "%s: using default (%d)\n", var, def);
+ }
+
+ return ret ? def : result;
+}
+EXPORT_SYMBOL_GPL(gmin_get_var_int);
+
+/* PCI quirk: The BYT ISP advertises PCI runtime PM but it doesn't
+ * work. Disable so the kernel framework doesn't hang the device
+ * trying. The driver itself does direct calls to the PUNIT to manage
+ * ISP power.
+ */
+static void isp_pm_cap_fixup(struct pci_dev *pdev)
+{
+ dev_info(&pdev->dev, "Disabling PCI power management on camera ISP\n");
+ pdev->pm_cap = 0;
+}
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0f38, isp_pm_cap_fixup);
+
+MODULE_DESCRIPTION("Ancillary routines for binding ACPI devices");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/media/atomisp/pci/atomisp_internal.h b/drivers/staging/media/atomisp/pci/atomisp_internal.h
new file mode 100644
index 000000000000..5a69580b8251
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp_internal.h
@@ -0,0 +1,217 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Medifield PNW Camera Imaging ISP subsystem.
+ *
+ * Copyright (c) 2010 Intel Corporation. All Rights Reserved.
+ *
+ * Copyright (c) 2010 Silicon Hive www.siliconhive.com.
+ */
+#ifndef __ATOMISP_INTERNAL_H__
+#define __ATOMISP_INTERNAL_H__
+
+#include "../../include/linux/atomisp_platform.h"
+#include <linux/firmware.h>
+#include <linux/kernel.h>
+#include <linux/pm_qos.h>
+#include <linux/idr.h>
+
+#include <media/media-device.h>
+#include <media/v4l2-async.h>
+#include <media/v4l2-subdev.h>
+
+/* ISP2400*/
+#include "ia_css_types.h"
+#include "sh_css_legacy.h"
+
+#include "atomisp_csi2.h"
+#include "atomisp_subdev.h"
+#include "atomisp_compat.h"
+
+#include "gp_device.h"
+#include "irq.h"
+#include <linux/vmalloc.h>
+
+#define V4L2_EVENT_FRAME_END 5
+
+#define IS_HWREVISION(isp, rev) \
+ (((isp)->media_dev.hw_revision & ATOMISP_HW_REVISION_MASK) == \
+ ((rev) << ATOMISP_HW_REVISION_SHIFT))
+
+#define ATOMISP_PCI_DEVICE_SOC_BYT 0x0f38
+/* MRFLD with 0x1178: ISP freq can burst to 457MHz */
+#define ATOMISP_PCI_DEVICE_SOC_MRFLD 0x1178
+/* MRFLD with 0x1179: max ISP freq limited to 400MHz */
+#define ATOMISP_PCI_DEVICE_SOC_MRFLD_1179 0x1179
+/* MRFLD with 0x117a: max ISP freq is 400MHz and max freq at Vmin is 200MHz */
+#define ATOMISP_PCI_DEVICE_SOC_MRFLD_117A 0x117a
+#define ATOMISP_PCI_DEVICE_SOC_ANN 0x1478
+#define ATOMISP_PCI_DEVICE_SOC_CHT 0x22b8
+
+#define ATOMISP_PCI_REV_MRFLD_A0_MAX 0
+#define ATOMISP_PCI_REV_BYT_A0_MAX 4
+
+#define ATOM_ISP_STEP_WIDTH 2
+#define ATOM_ISP_STEP_HEIGHT 2
+
+#define ATOM_ISP_MIN_WIDTH 4
+#define ATOM_ISP_MIN_HEIGHT 4
+#define ATOM_ISP_MAX_WIDTH UINT_MAX
+#define ATOM_ISP_MAX_HEIGHT UINT_MAX
+
+/* sub-QCIF resolution */
+#define ATOM_RESOLUTION_SUBQCIF_WIDTH 128
+#define ATOM_RESOLUTION_SUBQCIF_HEIGHT 96
+
+#define ATOM_ISP_I2C_BUS_1 4
+#define ATOM_ISP_I2C_BUS_2 5
+
+#define ATOM_ISP_POWER_DOWN 0
+#define ATOM_ISP_POWER_UP 1
+
+#define ATOM_ISP_MAX_INPUTS 3
+
+#define ATOMISP_SC_TYPE_SIZE 2
+
+#define ATOMISP_ISP_TIMEOUT_DURATION (2 * HZ)
+#define ATOMISP_EXT_ISP_TIMEOUT_DURATION (6 * HZ)
+#define ATOMISP_WDT_KEEP_CURRENT_DELAY 0
+#define ATOMISP_ISP_MAX_TIMEOUT_COUNT 2
+#define ATOMISP_CSS_STOP_TIMEOUT_US 200000
+
+#define ATOMISP_CSS_Q_DEPTH 3
+#define ATOMISP_CSS_EVENTS_MAX 16
+#define ATOMISP_CONT_RAW_FRAMES 15
+#define ATOMISP_METADATA_QUEUE_DEPTH_FOR_HAL 8
+#define ATOMISP_S3A_BUF_QUEUE_DEPTH_FOR_HAL 8
+
+/*
+ * Define how fast CPU should be able to serve ISP interrupts.
+ * The bigger the value, the higher risk that the ISP is not
+ * triggered sufficiently fast for it to process image during
+ * vertical blanking time, increasing risk of dropped frames.
+ * 1000 us is a reasonable value considering that the processing
+ * time is typically ~2000 us.
+ */
+#define ATOMISP_MAX_ISR_LATENCY 1000
+
+/* Add new YUVPP pipe for SOC sensor. */
+#define ATOMISP_CSS_SUPPORT_YUVPP 1
+
+#define ATOMISP_CSS_OUTPUT_SECOND_INDEX 1
+#define ATOMISP_CSS_OUTPUT_DEFAULT_INDEX 0
+
+/* ISP2401 */
+#define ATOMISP_ION_DEVICE_FD_OFFSET 16
+#define ATOMISP_ION_SHARED_FD_MASK (0xFFFF)
+#define ATOMISP_ION_DEVICE_FD_MASK (~ATOMISP_ION_SHARED_FD_MASK)
+#define ION_FD_UNSET (-1)
+
+#define DIV_NEAREST_STEP(n, d, step) \
+ round_down((2 * (n) + (d) * (step)) / (2 * (d)), (step))
+
+#define SENSOR_ISP_PAD_SINK 0
+#define SENSOR_ISP_PAD_SOURCE 1
+#define SENSOR_ISP_PADS_NUM 2
+
+struct atomisp_input_subdev {
+ enum atomisp_camera_port port;
+ u32 code; /* MEDIA_BUS_FMT_* */
+ bool binning_support;
+ bool crop_support;
+ bool sensor_on;
+ struct v4l2_subdev *sensor;
+ struct v4l2_subdev *sensor_isp;
+ struct v4l2_subdev *csi_port;
+ struct v4l2_subdev *csi_remote_source;
+ /* Sensor rects for sensors which support crop */
+ struct v4l2_rect native_rect;
+ struct v4l2_rect active_rect;
+ /* Sensor state for which == V4L2_SUBDEV_FORMAT_TRY calls */
+ struct v4l2_subdev_state *try_sd_state;
+};
+
+enum atomisp_dfs_mode {
+ ATOMISP_DFS_MODE_AUTO = 0,
+ ATOMISP_DFS_MODE_LOW,
+ ATOMISP_DFS_MODE_MAX,
+};
+
+struct atomisp_regs {
+ /* PCI config space info */
+ u16 pcicmdsts;
+ u32 ispmmadr;
+ u32 msicap;
+ u32 msi_addr;
+ u16 msi_data;
+ u8 intr;
+ u32 interrupt_control;
+ u32 pmcs;
+ u32 cg_dis;
+ u32 i_control;
+
+ /* I-Unit PHY related info */
+ u32 csi_rcomp_config;
+ u32 csi_afe_dly;
+ u32 csi_control;
+
+ /* New for MRFLD */
+ u32 csi_afe_rcomp_config;
+ u32 csi_afe_hs_control;
+ u32 csi_deadline_control;
+ u32 csi_access_viol;
+};
+
+/*
+ * ci device struct
+ */
+struct atomisp_device {
+ struct device *dev;
+ struct v4l2_device v4l2_dev;
+ struct media_device media_dev;
+ struct atomisp_sub_device asd;
+ struct v4l2_async_notifier notifier;
+ void *mmu_l1_base;
+ void __iomem *base;
+ const struct firmware *firmware;
+
+ struct dev_pm_domain pm_domain;
+ struct pm_qos_request pm_qos;
+ s32 max_isr_latency;
+ bool pm_only;
+
+ struct atomisp_mipi_csi2_device csi2_port[ATOMISP_CAMERA_NR_PORTS];
+
+ /* Purpose of mutex is to protect and serialize use of isp data
+ * structures and css API calls. */
+ struct mutex mutex;
+
+ /*
+ * Number of lanes used by each sensor per port.
+ * Note this is indexed by mipi_port_id not atomisp_camera_port.
+ */
+ int sensor_lanes[N_MIPI_PORT_ID];
+ struct v4l2_subdev *sensor_subdevs[ATOMISP_CAMERA_NR_PORTS];
+ unsigned int input_cnt;
+ struct atomisp_input_subdev inputs[ATOM_ISP_MAX_INPUTS];
+
+ struct atomisp_regs saved_regs;
+ struct atomisp_css_env css_env;
+
+ bool isp_fatal_error;
+ struct work_struct assert_recovery_work;
+
+ spinlock_t lock; /* Protects asd.streaming */
+
+ const struct atomisp_dfs_config *dfs;
+ unsigned int hpll_freq;
+ unsigned int running_freq;
+
+ bool css_initialized;
+};
+
+#define v4l2_dev_to_atomisp_device(dev) \
+ container_of(dev, struct atomisp_device, v4l2_dev)
+
+extern struct device *atomisp_dev;
+
+#endif /* __ATOMISP_INTERNAL_H__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp_ioctl.c b/drivers/staging/media/atomisp/pci/atomisp_ioctl.c
new file mode 100644
index 000000000000..bb8b2f2213b0
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp_ioctl.c
@@ -0,0 +1,1577 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Support for Medifield PNW Camera Imaging ISP subsystem.
+ *
+ * Copyright (c) 2010 Intel Corporation. All Rights Reserved.
+ *
+ * Copyright (c) 2010 Silicon Hive www.siliconhive.com.
+ */
+
+#include <linux/delay.h>
+#include <linux/pci.h>
+
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-event.h>
+
+#include "atomisp_cmd.h"
+#include "atomisp_common.h"
+#include "atomisp_fops.h"
+#include "atomisp_internal.h"
+#include "atomisp_ioctl.h"
+#include "atomisp-regs.h"
+#include "atomisp_compat.h"
+
+#include "sh_css_hrt.h"
+
+#include "gp_device.h"
+#include "device_access.h"
+#include "irq.h"
+
+static const char *DRIVER = "atomisp"; /* max size 15 */
+static const char *CARD = "ATOM ISP"; /* max size 31 */
+
+/*
+ * FIXME: ISP should not know beforehand all CIDs supported by sensor.
+ * Instead, it needs to propagate to sensor unknown CIDs.
+ */
+static struct v4l2_query_ext_ctrl ci_v4l2_controls[] = {
+ {
+ .id = V4L2_CID_AUTO_WHITE_BALANCE,
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .name = "Automatic White Balance",
+ .minimum = 0,
+ .maximum = 1,
+ .step = 1,
+ .default_value = 0,
+ },
+ {
+ .id = V4L2_CID_RED_BALANCE,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Red Balance",
+ .minimum = 0x00,
+ .maximum = 0xff,
+ .step = 1,
+ .default_value = 0x00,
+ },
+ {
+ .id = V4L2_CID_BLUE_BALANCE,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Blue Balance",
+ .minimum = 0x00,
+ .maximum = 0xff,
+ .step = 1,
+ .default_value = 0x00,
+ },
+ {
+ .id = V4L2_CID_GAMMA,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Gamma",
+ .minimum = 0x00,
+ .maximum = 0xff,
+ .step = 1,
+ .default_value = 0x00,
+ },
+ {
+ .id = V4L2_CID_COLORFX,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Image Color Effect",
+ .minimum = 0,
+ .maximum = 9,
+ .step = 1,
+ .default_value = 0,
+ },
+ {
+ .id = V4L2_CID_ATOMISP_BAD_PIXEL_DETECTION,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Bad Pixel Correction",
+ .minimum = 0,
+ .maximum = 1,
+ .step = 1,
+ .default_value = 0,
+ },
+ {
+ .id = V4L2_CID_ATOMISP_POSTPROCESS_GDC_CAC,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "GDC/CAC",
+ .minimum = 0,
+ .maximum = 1,
+ .step = 1,
+ .default_value = 0,
+ },
+ {
+ .id = V4L2_CID_ATOMISP_VIDEO_STABLIZATION,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Video Stabilization",
+ .minimum = 0,
+ .maximum = 1,
+ .step = 1,
+ .default_value = 0,
+ },
+ {
+ .id = V4L2_CID_ATOMISP_FIXED_PATTERN_NR,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Fixed Pattern Noise Reduction",
+ .minimum = 0,
+ .maximum = 1,
+ .step = 1,
+ .default_value = 0,
+ },
+ {
+ .id = V4L2_CID_ATOMISP_FALSE_COLOR_CORRECTION,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "False Color Correction",
+ .minimum = 0,
+ .maximum = 1,
+ .step = 1,
+ .default_value = 0,
+ },
+ {
+ .id = V4L2_CID_ATOMISP_LOW_LIGHT,
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .name = "Low light mode",
+ .minimum = 0,
+ .maximum = 1,
+ .step = 1,
+ .default_value = 1,
+ },
+};
+
+static const u32 ctrls_num = ARRAY_SIZE(ci_v4l2_controls);
+
+/*
+ * supported V4L2 fmts and resolutions
+ */
+const struct atomisp_format_bridge atomisp_output_fmts[] = {
+ {
+ .pixelformat = V4L2_PIX_FMT_YUV420,
+ .depth = 12,
+ .mbus_code = V4L2_MBUS_FMT_CUSTOM_YUV420,
+ .sh_fmt = IA_CSS_FRAME_FORMAT_YUV420,
+ .description = "YUV420, planar",
+ .planar = true
+ }, {
+ .pixelformat = V4L2_PIX_FMT_YVU420,
+ .depth = 12,
+ .mbus_code = V4L2_MBUS_FMT_CUSTOM_YVU420,
+ .sh_fmt = IA_CSS_FRAME_FORMAT_YV12,
+ .description = "YVU420, planar",
+ .planar = true
+ }, {
+ .pixelformat = V4L2_PIX_FMT_YUV422P,
+ .depth = 16,
+ .mbus_code = V4L2_MBUS_FMT_CUSTOM_YUV422P,
+ .sh_fmt = IA_CSS_FRAME_FORMAT_YUV422,
+ .description = "YUV422, planar",
+ .planar = true
+ }, {
+ .pixelformat = V4L2_PIX_FMT_YUV444,
+ .depth = 24,
+ .mbus_code = V4L2_MBUS_FMT_CUSTOM_YUV444,
+ .sh_fmt = IA_CSS_FRAME_FORMAT_YUV444,
+ .description = "YUV444"
+ }, {
+ .pixelformat = V4L2_PIX_FMT_NV12,
+ .depth = 12,
+ .mbus_code = V4L2_MBUS_FMT_CUSTOM_NV12,
+ .sh_fmt = IA_CSS_FRAME_FORMAT_NV12,
+ .description = "NV12, Y-plane, CbCr interleaved",
+ .planar = true
+ }, {
+ .pixelformat = V4L2_PIX_FMT_NV21,
+ .depth = 12,
+ .mbus_code = V4L2_MBUS_FMT_CUSTOM_NV21,
+ .sh_fmt = IA_CSS_FRAME_FORMAT_NV21,
+ .description = "NV21, Y-plane, CbCr interleaved",
+ .planar = true
+ }, {
+ .pixelformat = V4L2_PIX_FMT_NV16,
+ .depth = 16,
+ .mbus_code = V4L2_MBUS_FMT_CUSTOM_NV16,
+ .sh_fmt = IA_CSS_FRAME_FORMAT_NV16,
+ .description = "NV16, Y-plane, CbCr interleaved",
+ .planar = true
+ }, {
+ .pixelformat = V4L2_PIX_FMT_YUYV,
+ .depth = 16,
+ .mbus_code = V4L2_MBUS_FMT_CUSTOM_YUYV,
+ .sh_fmt = IA_CSS_FRAME_FORMAT_YUYV,
+ .description = "YUYV, interleaved"
+ }, {
+ .pixelformat = V4L2_PIX_FMT_UYVY,
+ .depth = 16,
+ .mbus_code = MEDIA_BUS_FMT_UYVY8_1X16,
+ .sh_fmt = IA_CSS_FRAME_FORMAT_UYVY,
+ .description = "UYVY, interleaved"
+ }, {
+ .pixelformat = V4L2_PIX_FMT_SBGGR16,
+ .depth = 16,
+ .mbus_code = V4L2_MBUS_FMT_CUSTOM_SBGGR16,
+ .sh_fmt = IA_CSS_FRAME_FORMAT_RAW,
+ .description = "Bayer 16"
+ }, {
+ .pixelformat = V4L2_PIX_FMT_SBGGR8,
+ .depth = 8,
+ .mbus_code = MEDIA_BUS_FMT_SBGGR8_1X8,
+ .sh_fmt = IA_CSS_FRAME_FORMAT_RAW,
+ .description = "Bayer 8"
+ }, {
+ .pixelformat = V4L2_PIX_FMT_SGBRG8,
+ .depth = 8,
+ .mbus_code = MEDIA_BUS_FMT_SGBRG8_1X8,
+ .sh_fmt = IA_CSS_FRAME_FORMAT_RAW,
+ .description = "Bayer 8"
+ }, {
+ .pixelformat = V4L2_PIX_FMT_SGRBG8,
+ .depth = 8,
+ .mbus_code = MEDIA_BUS_FMT_SGRBG8_1X8,
+ .sh_fmt = IA_CSS_FRAME_FORMAT_RAW,
+ .description = "Bayer 8"
+ }, {
+ .pixelformat = V4L2_PIX_FMT_SRGGB8,
+ .depth = 8,
+ .mbus_code = MEDIA_BUS_FMT_SRGGB8_1X8,
+ .sh_fmt = IA_CSS_FRAME_FORMAT_RAW,
+ .description = "Bayer 8"
+ }, {
+ .pixelformat = V4L2_PIX_FMT_SBGGR10,
+ .depth = 16,
+ .mbus_code = MEDIA_BUS_FMT_SBGGR10_1X10,
+ .sh_fmt = IA_CSS_FRAME_FORMAT_RAW,
+ .description = "Bayer 10"
+ }, {
+ .pixelformat = V4L2_PIX_FMT_SGBRG10,
+ .depth = 16,
+ .mbus_code = MEDIA_BUS_FMT_SGBRG10_1X10,
+ .sh_fmt = IA_CSS_FRAME_FORMAT_RAW,
+ .description = "Bayer 10"
+ }, {
+ .pixelformat = V4L2_PIX_FMT_SGRBG10,
+ .depth = 16,
+ .mbus_code = MEDIA_BUS_FMT_SGRBG10_1X10,
+ .sh_fmt = IA_CSS_FRAME_FORMAT_RAW,
+ .description = "Bayer 10"
+ }, {
+ .pixelformat = V4L2_PIX_FMT_SRGGB10,
+ .depth = 16,
+ .mbus_code = MEDIA_BUS_FMT_SRGGB10_1X10,
+ .sh_fmt = IA_CSS_FRAME_FORMAT_RAW,
+ .description = "Bayer 10"
+ }, {
+ .pixelformat = V4L2_PIX_FMT_SBGGR12,
+ .depth = 16,
+ .mbus_code = MEDIA_BUS_FMT_SBGGR12_1X12,
+ .sh_fmt = IA_CSS_FRAME_FORMAT_RAW,
+ .description = "Bayer 12"
+ }, {
+ .pixelformat = V4L2_PIX_FMT_SGBRG12,
+ .depth = 16,
+ .mbus_code = MEDIA_BUS_FMT_SGBRG12_1X12,
+ .sh_fmt = IA_CSS_FRAME_FORMAT_RAW,
+ .description = "Bayer 12"
+ }, {
+ .pixelformat = V4L2_PIX_FMT_SGRBG12,
+ .depth = 16,
+ .mbus_code = MEDIA_BUS_FMT_SGRBG12_1X12,
+ .sh_fmt = IA_CSS_FRAME_FORMAT_RAW,
+ .description = "Bayer 12"
+ }, {
+ .pixelformat = V4L2_PIX_FMT_SRGGB12,
+ .depth = 16,
+ .mbus_code = MEDIA_BUS_FMT_SRGGB12_1X12,
+ .sh_fmt = IA_CSS_FRAME_FORMAT_RAW,
+ .description = "Bayer 12"
+ }, {
+ .pixelformat = V4L2_PIX_FMT_RGB565,
+ .depth = 16,
+ .mbus_code = MEDIA_BUS_FMT_BGR565_2X8_LE,
+ .sh_fmt = IA_CSS_FRAME_FORMAT_RGB565,
+ .description = "16 RGB 5-6-5"
+#if 0
+ }, {
+ /*
+ * Broken, showing vertical columns with random data.
+ * For each 128 pixels in a row the last 28 (32?) or so pixels
+ * contain random data.
+ */
+ .pixelformat = V4L2_PIX_FMT_RGBX32,
+ .depth = 32,
+ .mbus_code = V4L2_MBUS_FMT_CUSTOM_RGB32,
+ .sh_fmt = IA_CSS_FRAME_FORMAT_RGBA888,
+ .description = "32 RGB 8-8-8-8"
+ }, {
+ .pixelformat = V4L2_PIX_FMT_JPEG,
+ .depth = 8,
+ .mbus_code = MEDIA_BUS_FMT_JPEG_1X8,
+ .sh_fmt = IA_CSS_FRAME_FORMAT_BINARY_8,
+ .description = "JPEG"
+ }, {
+ /* This is a custom format being used by M10MO to send the RAW data */
+ .pixelformat = V4L2_PIX_FMT_CUSTOM_M10MO_RAW,
+ .depth = 8,
+ .mbus_code = V4L2_MBUS_FMT_CUSTOM_M10MO_RAW,
+ .sh_fmt = IA_CSS_FRAME_FORMAT_BINARY_8,
+ .description = "Custom RAW for M10MO"
+#endif
+ },
+};
+
+const struct atomisp_format_bridge *
+atomisp_get_format_bridge(unsigned int pixelformat)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(atomisp_output_fmts); i++) {
+ if (atomisp_output_fmts[i].pixelformat == pixelformat)
+ return &atomisp_output_fmts[i];
+ }
+
+ return NULL;
+}
+
+const struct atomisp_format_bridge *
+atomisp_get_format_bridge_from_mbus(u32 mbus_code)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(atomisp_output_fmts); i++) {
+ if (mbus_code == atomisp_output_fmts[i].mbus_code)
+ return &atomisp_output_fmts[i];
+ }
+
+ return NULL;
+}
+
+int atomisp_pipe_check(struct atomisp_video_pipe *pipe, bool settings_change)
+{
+ lockdep_assert_held(&pipe->isp->mutex);
+
+ if (pipe->isp->isp_fatal_error)
+ return -EIO;
+
+ if (settings_change && vb2_is_busy(&pipe->vb_queue)) {
+ dev_err(pipe->isp->dev, "Set fmt/input IOCTL while streaming\n");
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+/*
+ * v4l2 ioctls
+ * return ISP capabilities
+ */
+static int atomisp_querycap(struct file *file, void *fh,
+ struct v4l2_capability *cap)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct atomisp_device *isp = video_get_drvdata(vdev);
+
+ strscpy(cap->driver, DRIVER, sizeof(cap->driver));
+ strscpy(cap->card, CARD, sizeof(cap->card));
+ snprintf(cap->bus_info, sizeof(cap->bus_info), "PCI:%s", dev_name(isp->dev));
+
+ return 0;
+}
+
+/*
+ * enum input are used to check primary/secondary camera
+ */
+static int atomisp_enum_input(struct file *file, void *fh,
+ struct v4l2_input *input)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct atomisp_device *isp = video_get_drvdata(vdev);
+ int index = input->index;
+
+ if (index >= isp->input_cnt)
+ return -EINVAL;
+
+ if (!isp->inputs[index].sensor)
+ return -EINVAL;
+
+ memset(input, 0, sizeof(struct v4l2_input));
+ strscpy(input->name, isp->inputs[index].sensor->name,
+ sizeof(input->name));
+
+ input->type = V4L2_INPUT_TYPE_CAMERA;
+ input->index = index;
+ input->reserved[1] = isp->inputs[index].port;
+
+ return 0;
+}
+
+/*
+ * get input are used to get current primary/secondary camera
+ */
+static int atomisp_g_input(struct file *file, void *fh, unsigned int *input)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct atomisp_sub_device *asd = atomisp_to_video_pipe(vdev)->asd;
+
+ *input = asd->input_curr;
+ return 0;
+}
+
+static int atomisp_s_fmt_cap(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ struct video_device *vdev = video_devdata(file);
+
+ return atomisp_set_fmt(vdev, f);
+}
+
+/*
+ * set input are used to set current primary/secondary camera
+ */
+static int atomisp_s_input(struct file *file, void *fh, unsigned int input)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct atomisp_device *isp = video_get_drvdata(vdev);
+ struct atomisp_video_pipe *pipe = atomisp_to_video_pipe(vdev);
+ int ret;
+
+ if (input >= isp->input_cnt)
+ return -EINVAL;
+
+ if (!isp->inputs[input].sensor)
+ return -EINVAL;
+
+ ret = atomisp_pipe_check(pipe, true);
+ if (ret)
+ return ret;
+
+ mutex_lock(&isp->media_dev.graph_mutex);
+ ret = atomisp_select_input(isp, input);
+ mutex_unlock(&isp->media_dev.graph_mutex);
+
+ return ret;
+}
+
+/*
+ * With crop any framesize <= sensor-size can be made, give
+ * userspace a list of sizes to choice from.
+ */
+static int atomisp_enum_framesizes_crop_inner(struct atomisp_device *isp,
+ struct v4l2_frmsizeenum *fsize,
+ const struct v4l2_rect *active,
+ const struct v4l2_rect *native,
+ int *valid_sizes)
+{
+ static const struct v4l2_frmsize_discrete frame_sizes[] = {
+ { 1920, 1440 },
+ { 1920, 1200 },
+ { 1920, 1080 },
+ { 1600, 1200 },
+ { 1600, 1080 },
+ { 1600, 900 },
+ { 1440, 1080 },
+ { 1280, 960 },
+ { 1280, 720 },
+ { 800, 600 },
+ { 640, 480 },
+ };
+ u32 padding_w, padding_h;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(frame_sizes); i++) {
+ atomisp_get_padding(isp, frame_sizes[i].width, frame_sizes[i].height,
+ &padding_w, &padding_h);
+
+ if ((frame_sizes[i].width + padding_w) > native->width ||
+ (frame_sizes[i].height + padding_h) > native->height)
+ continue;
+
+ /*
+ * Skip sizes where width and height are less then 5/8th of the
+ * sensor size to avoid sizes with a too small field of view.
+ */
+ if (frame_sizes[i].width < (active->width * 5 / 8) &&
+ frame_sizes[i].height < (active->height * 5 / 8))
+ continue;
+
+ if (*valid_sizes == fsize->index) {
+ fsize->type = V4L2_FRMSIZE_TYPE_DISCRETE;
+ fsize->discrete = frame_sizes[i];
+ return 0;
+ }
+
+ (*valid_sizes)++;
+ }
+
+ return -EINVAL;
+}
+
+static int atomisp_enum_framesizes_crop(struct atomisp_device *isp,
+ struct v4l2_frmsizeenum *fsize)
+{
+ struct atomisp_input_subdev *input = &isp->inputs[isp->asd.input_curr];
+ struct v4l2_rect active = input->active_rect;
+ struct v4l2_rect native = input->native_rect;
+ int ret, valid_sizes = 0;
+
+ ret = atomisp_enum_framesizes_crop_inner(isp, fsize, &active, &native, &valid_sizes);
+ if (ret == 0)
+ return 0;
+
+ if (!input->binning_support)
+ return -EINVAL;
+
+ active.width /= 2;
+ active.height /= 2;
+ native.width /= 2;
+ native.height /= 2;
+
+ return atomisp_enum_framesizes_crop_inner(isp, fsize, &active, &native, &valid_sizes);
+}
+
+static int atomisp_enum_framesizes(struct file *file, void *priv,
+ struct v4l2_frmsizeenum *fsize)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct atomisp_device *isp = video_get_drvdata(vdev);
+ struct atomisp_sub_device *asd = atomisp_to_video_pipe(vdev)->asd;
+ struct atomisp_input_subdev *input = &isp->inputs[asd->input_curr];
+ struct v4l2_subdev_frame_size_enum fse = {
+ .index = fsize->index,
+ .which = V4L2_SUBDEV_FORMAT_ACTIVE,
+ .code = input->code,
+ };
+ struct v4l2_subdev_state *act_sd_state;
+ int ret;
+
+ if (!input->sensor)
+ return -EINVAL;
+
+ if (input->crop_support)
+ return atomisp_enum_framesizes_crop(isp, fsize);
+
+ act_sd_state = v4l2_subdev_lock_and_get_active_state(input->sensor);
+ ret = v4l2_subdev_call(input->sensor, pad, enum_frame_size,
+ act_sd_state, &fse);
+ if (act_sd_state)
+ v4l2_subdev_unlock_state(act_sd_state);
+ if (ret)
+ return ret;
+
+ fsize->type = V4L2_FRMSIZE_TYPE_DISCRETE;
+ fsize->discrete.width = fse.max_width - pad_w;
+ fsize->discrete.height = fse.max_height - pad_h;
+
+ return 0;
+}
+
+static int atomisp_enum_frameintervals(struct file *file, void *priv,
+ struct v4l2_frmivalenum *fival)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct atomisp_device *isp = video_get_drvdata(vdev);
+ struct atomisp_sub_device *asd = atomisp_to_video_pipe(vdev)->asd;
+ struct atomisp_input_subdev *input = &isp->inputs[asd->input_curr];
+ struct v4l2_subdev_frame_interval_enum fie = {
+ .code = atomisp_in_fmt_conv[0].code,
+ .index = fival->index,
+ .width = fival->width,
+ .height = fival->height,
+ .which = V4L2_SUBDEV_FORMAT_ACTIVE,
+ };
+ struct v4l2_subdev_state *act_sd_state;
+ int ret;
+
+ if (!input->sensor)
+ return -EINVAL;
+
+ act_sd_state = v4l2_subdev_lock_and_get_active_state(input->sensor);
+ ret = v4l2_subdev_call(input->sensor, pad, enum_frame_interval,
+ act_sd_state, &fie);
+ if (act_sd_state)
+ v4l2_subdev_unlock_state(act_sd_state);
+ if (ret)
+ return ret;
+
+ fival->type = V4L2_FRMIVAL_TYPE_DISCRETE;
+ fival->discrete = fie.interval;
+
+ return ret;
+}
+
+static int atomisp_enum_fmt_cap(struct file *file, void *fh,
+ struct v4l2_fmtdesc *f)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct atomisp_device *isp = video_get_drvdata(vdev);
+ struct atomisp_sub_device *asd = atomisp_to_video_pipe(vdev)->asd;
+ struct atomisp_input_subdev *input = &isp->inputs[asd->input_curr];
+ struct v4l2_subdev_mbus_code_enum code = {
+ .which = V4L2_SUBDEV_FORMAT_ACTIVE,
+ };
+ const struct atomisp_format_bridge *format;
+ struct v4l2_subdev_state *act_sd_state;
+ unsigned int i, fi = 0;
+ int ret;
+
+ if (!input->sensor)
+ return -EINVAL;
+
+ act_sd_state = v4l2_subdev_lock_and_get_active_state(input->sensor);
+ ret = v4l2_subdev_call(input->sensor, pad, enum_mbus_code,
+ act_sd_state, &code);
+ if (act_sd_state)
+ v4l2_subdev_unlock_state(act_sd_state);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < ARRAY_SIZE(atomisp_output_fmts); i++) {
+ format = &atomisp_output_fmts[i];
+
+ /*
+ * Is the atomisp-supported format is valid for the
+ * sensor (configuration)? If not, skip it.
+ *
+ * FIXME: fix the pipeline to allow sensor format too.
+ */
+ if (format->sh_fmt == IA_CSS_FRAME_FORMAT_RAW)
+ continue;
+
+ /* Found a match. Now let's pick f->index'th one. */
+ if (fi < f->index) {
+ fi++;
+ continue;
+ }
+
+ strscpy(f->description, format->description,
+ sizeof(f->description));
+ f->pixelformat = format->pixelformat;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+/* This function looks up the closest available resolution. */
+static int atomisp_try_fmt_cap(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct atomisp_device *isp = video_get_drvdata(vdev);
+
+ return atomisp_try_fmt(isp, &f->fmt.pix, NULL, NULL);
+}
+
+static int atomisp_g_fmt_cap(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct atomisp_video_pipe *pipe;
+
+ pipe = atomisp_to_video_pipe(vdev);
+
+ f->fmt.pix = pipe->pix;
+
+ /* If s_fmt was issued, just return whatever is was previously set */
+ if (f->fmt.pix.sizeimage)
+ return 0;
+
+ f->fmt.pix.pixelformat = V4L2_PIX_FMT_YUV420;
+ f->fmt.pix.width = 10000;
+ f->fmt.pix.height = 10000;
+
+ return atomisp_try_fmt_cap(file, fh, f);
+}
+
+int atomisp_alloc_css_stat_bufs(struct atomisp_sub_device *asd,
+ uint16_t stream_id)
+{
+ struct atomisp_device *isp = asd->isp;
+ struct atomisp_s3a_buf *s3a_buf = NULL, *_s3a_buf;
+ struct atomisp_dis_buf *dis_buf = NULL, *_dis_buf;
+ struct atomisp_metadata_buf *md_buf = NULL, *_md_buf;
+ int count;
+ struct ia_css_dvs_grid_info *dvs_grid_info =
+ atomisp_css_get_dvs_grid_info(&asd->params.curr_grid_info);
+ unsigned int i;
+
+ if (list_empty(&asd->s3a_stats) &&
+ asd->params.curr_grid_info.s3a_grid.enable) {
+ count = ATOMISP_CSS_Q_DEPTH +
+ ATOMISP_S3A_BUF_QUEUE_DEPTH_FOR_HAL;
+ dev_dbg(isp->dev, "allocating %d 3a buffers\n", count);
+ while (count--) {
+ s3a_buf = kzalloc(sizeof(struct atomisp_s3a_buf), GFP_KERNEL);
+ if (!s3a_buf)
+ goto error;
+
+ if (atomisp_css_allocate_stat_buffers(
+ asd, stream_id, s3a_buf, NULL, NULL)) {
+ kfree(s3a_buf);
+ goto error;
+ }
+
+ list_add_tail(&s3a_buf->list, &asd->s3a_stats);
+ }
+ }
+
+ if (list_empty(&asd->dis_stats) && dvs_grid_info &&
+ dvs_grid_info->enable) {
+ count = ATOMISP_CSS_Q_DEPTH + 1;
+ dev_dbg(isp->dev, "allocating %d dis buffers\n", count);
+ while (count--) {
+ dis_buf = kzalloc(sizeof(struct atomisp_dis_buf), GFP_KERNEL);
+ if (!dis_buf)
+ goto error;
+ if (atomisp_css_allocate_stat_buffers(
+ asd, stream_id, NULL, dis_buf, NULL)) {
+ kfree(dis_buf);
+ goto error;
+ }
+
+ list_add_tail(&dis_buf->list, &asd->dis_stats);
+ }
+ }
+
+ for (i = 0; i < ATOMISP_METADATA_TYPE_NUM; i++) {
+ if (list_empty(&asd->metadata[i]) &&
+ list_empty(&asd->metadata_ready[i]) &&
+ list_empty(&asd->metadata_in_css[i])) {
+ count = ATOMISP_CSS_Q_DEPTH +
+ ATOMISP_METADATA_QUEUE_DEPTH_FOR_HAL;
+ dev_dbg(isp->dev, "allocating %d metadata buffers for type %d\n",
+ count, i);
+ while (count--) {
+ md_buf = kzalloc(sizeof(struct atomisp_metadata_buf),
+ GFP_KERNEL);
+ if (!md_buf)
+ goto error;
+
+ if (atomisp_css_allocate_stat_buffers(
+ asd, stream_id, NULL, NULL, md_buf)) {
+ kfree(md_buf);
+ goto error;
+ }
+ list_add_tail(&md_buf->list, &asd->metadata[i]);
+ }
+ }
+ }
+ return 0;
+
+error:
+ dev_err(isp->dev, "failed to allocate statistics buffers\n");
+
+ list_for_each_entry_safe(dis_buf, _dis_buf, &asd->dis_stats, list) {
+ atomisp_css_free_dis_buffer(dis_buf);
+ list_del(&dis_buf->list);
+ kfree(dis_buf);
+ }
+
+ list_for_each_entry_safe(s3a_buf, _s3a_buf, &asd->s3a_stats, list) {
+ atomisp_css_free_3a_buffer(s3a_buf);
+ list_del(&s3a_buf->list);
+ kfree(s3a_buf);
+ }
+
+ for (i = 0; i < ATOMISP_METADATA_TYPE_NUM; i++) {
+ list_for_each_entry_safe(md_buf, _md_buf, &asd->metadata[i],
+ list) {
+ atomisp_css_free_metadata_buffer(md_buf);
+ list_del(&md_buf->list);
+ kfree(md_buf);
+ }
+ }
+ return -ENOMEM;
+}
+
+/*
+ * FIXME the abuse of buf->reserved2 in the qbuf and dqbuf wrappers comes from
+ * the original atomisp buffer handling and should be replaced with proper V4L2
+ * per frame parameters use.
+ *
+ * Once this is fixed these wrappers can be removed, replacing them with direct
+ * calls to vb2_ioctl_[d]qbuf().
+ */
+static int atomisp_qbuf_wrapper(struct file *file, void *fh, struct v4l2_buffer *buf)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct atomisp_device *isp = video_get_drvdata(vdev);
+ struct atomisp_video_pipe *pipe = atomisp_to_video_pipe(vdev);
+
+ if (buf->index >= vb2_get_num_buffers(vdev->queue))
+ return -EINVAL;
+
+ if (buf->reserved2 & ATOMISP_BUFFER_HAS_PER_FRAME_SETTING) {
+ /* this buffer will have a per-frame parameter */
+ pipe->frame_request_config_id[buf->index] = buf->reserved2 &
+ ~ATOMISP_BUFFER_HAS_PER_FRAME_SETTING;
+ dev_dbg(isp->dev,
+ "This buffer requires per_frame setting which has isp_config_id %d\n",
+ pipe->frame_request_config_id[buf->index]);
+ } else {
+ pipe->frame_request_config_id[buf->index] = 0;
+ }
+
+ return vb2_ioctl_qbuf(file, fh, buf);
+}
+
+static int atomisp_dqbuf_wrapper(struct file *file, void *fh, struct v4l2_buffer *buf)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct atomisp_video_pipe *pipe = atomisp_to_video_pipe(vdev);
+ struct atomisp_device *isp = video_get_drvdata(vdev);
+ struct ia_css_frame *frame;
+ struct vb2_buffer *vb;
+ int ret;
+
+ ret = vb2_ioctl_dqbuf(file, fh, buf);
+ if (ret)
+ return ret;
+
+ vb = vb2_get_buffer(&pipe->vb_queue, buf->index);
+ frame = vb_to_frame(vb);
+
+ /* reserved bit[31:16] is used for exp_id */
+ buf->reserved = 0;
+ if (!(buf->flags & V4L2_BUF_FLAG_ERROR))
+ buf->reserved |= frame->exp_id;
+ buf->reserved2 = pipe->frame_config_id[buf->index];
+
+ dev_dbg(isp->dev,
+ "dqbuf buffer %d (%s) with exp_id %d, isp_config_id %d\n",
+ buf->index, vdev->name, buf->reserved >> 16, buf->reserved2);
+ return 0;
+}
+
+/* Input system HW workaround */
+/* Input system address translation corrupts burst during */
+/* invalidate. SW workaround for this is to set burst length */
+/* manually to 128 in case of 13MPx snapshot and to 1 otherwise. */
+static void atomisp_dma_burst_len_cfg(struct atomisp_sub_device *asd)
+{
+ struct v4l2_mbus_framefmt *sink;
+
+ sink = atomisp_subdev_get_ffmt(&asd->subdev, NULL,
+ V4L2_SUBDEV_FORMAT_ACTIVE,
+ ATOMISP_SUBDEV_PAD_SINK);
+
+ if (sink->width * sink->height >= 4096 * 3072)
+ atomisp_css2_hw_store_32(DMA_BURST_SIZE_REG, 0x7F);
+ else
+ atomisp_css2_hw_store_32(DMA_BURST_SIZE_REG, 0x00);
+}
+
+static void atomisp_stop_stream(struct atomisp_video_pipe *pipe,
+ bool stop_sensor, enum vb2_buffer_state state)
+{
+ struct atomisp_sub_device *asd = pipe->asd;
+ struct atomisp_device *isp = asd->isp;
+ struct pci_dev *pdev = to_pci_dev(isp->dev);
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&isp->lock, flags);
+ asd->streaming = false;
+ spin_unlock_irqrestore(&isp->lock, flags);
+
+ atomisp_clear_css_buffer_counters(asd);
+ atomisp_css_irq_enable(isp, IA_CSS_IRQ_INFO_CSS_RECEIVER_SOF, false);
+
+ atomisp_css_stop(asd, false);
+
+ atomisp_subdev_cleanup_pending_events(asd);
+
+ if (stop_sensor) {
+ ret = v4l2_subdev_call(isp->inputs[asd->input_curr].csi_remote_source,
+ video, s_stream, 0);
+ if (ret)
+ dev_warn(isp->dev, "Stopping sensor stream failed: %d\n", ret);
+ }
+
+ /* Disable the CSI interface on ANN B0/K0 */
+ if (isp->media_dev.hw_revision >= ((ATOMISP_HW_REVISION_ISP2401 <<
+ ATOMISP_HW_REVISION_SHIFT) | ATOMISP_HW_STEPPING_B0)) {
+ pci_write_config_word(pdev, MRFLD_PCI_CSI_CONTROL,
+ isp->saved_regs.csi_control & ~MRFLD_PCI_CSI_CONTROL_CSI_READY);
+ }
+
+ if (atomisp_freq_scaling(isp, ATOMISP_DFS_MODE_LOW, false))
+ dev_warn(isp->dev, "DFS failed.\n");
+
+ /*
+ * ISP work around, need to reset ISP to allow next stream on to work.
+ * Streams have already been destroyed by atomisp_css_stop().
+ * Disable PUNIT/ISP acknowledge/handshake - SRSE=3 and then reset.
+ */
+ pci_write_config_dword(pdev, PCI_I_CONTROL,
+ isp->saved_regs.i_control | MRFLD_PCI_I_CONTROL_SRSE_RESET_MASK);
+ atomisp_reset(isp);
+
+ atomisp_flush_video_pipe(pipe, state, false);
+
+ /* Streams were destroyed by atomisp_css_stop(), recreate them. */
+ ret = atomisp_create_pipes_stream(&isp->asd);
+ if (ret)
+ dev_warn(isp->dev, "Recreating streams failed: %d\n", ret);
+
+ media_pipeline_stop(&asd->video_out.vdev.entity.pads[0]);
+}
+
+int atomisp_start_streaming(struct vb2_queue *vq, unsigned int count)
+{
+ struct atomisp_video_pipe *pipe = vq_to_pipe(vq);
+ struct atomisp_sub_device *asd = pipe->asd;
+ struct atomisp_device *isp = asd->isp;
+ struct pci_dev *pdev = to_pci_dev(isp->dev);
+ unsigned long irqflags;
+ int ret;
+
+ dev_dbg(isp->dev, "Start stream\n");
+
+ mutex_lock(&isp->mutex);
+
+ ret = atomisp_pipe_check(pipe, false);
+ if (ret) {
+ atomisp_flush_video_pipe(pipe, VB2_BUF_STATE_QUEUED, true);
+ goto out_unlock;
+ }
+
+ /*
+ * When running a classic v4l2 app after a media-controller aware
+ * app, the CSI-receiver -> ISP link for the current sensor may be
+ * disabled. Fix this up before marking the pipeline as started.
+ */
+ mutex_lock(&isp->media_dev.graph_mutex);
+ atomisp_setup_input_links(isp);
+ ret = __media_pipeline_start(&asd->video_out.vdev.entity.pads[0], &asd->video_out.pipe);
+ mutex_unlock(&isp->media_dev.graph_mutex);
+ if (ret) {
+ dev_err(isp->dev, "Error starting mc pipeline: %d\n", ret);
+ atomisp_flush_video_pipe(pipe, VB2_BUF_STATE_QUEUED, true);
+ goto out_unlock;
+ }
+
+ /* Input system HW workaround */
+ atomisp_dma_burst_len_cfg(asd);
+
+ /* Invalidate caches. FIXME: should flush only necessary buffers */
+ wbinvd();
+
+ if (asd->params.css_update_params_needed) {
+ atomisp_apply_css_parameters(asd, &asd->params.css_param);
+ if (asd->params.css_param.update_flag.dz_config)
+ asd->params.config.dz_config = &asd->params.css_param.dz_config;
+ atomisp_css_update_isp_params(asd);
+ asd->params.css_update_params_needed = false;
+ memset(&asd->params.css_param.update_flag, 0,
+ sizeof(struct atomisp_parameters));
+ }
+ asd->params.dvs_6axis = NULL;
+
+ ret = atomisp_css_start(asd);
+ if (ret) {
+ atomisp_flush_video_pipe(pipe, VB2_BUF_STATE_QUEUED, true);
+ media_pipeline_stop(&asd->video_out.vdev.entity.pads[0]);
+ goto out_unlock;
+ }
+
+ spin_lock_irqsave(&isp->lock, irqflags);
+ asd->streaming = true;
+ spin_unlock_irqrestore(&isp->lock, irqflags);
+ atomic_set(&asd->sof_count, 0);
+ atomic_set(&asd->sequence, 0);
+ atomic_set(&asd->sequence_temp, 0);
+
+ asd->params.dis_proj_data_valid = false;
+ asd->latest_preview_exp_id = 0;
+ asd->postview_exp_id = 1;
+ asd->preview_exp_id = 1;
+
+ /* handle per_frame_setting parameter and buffers */
+ atomisp_handle_parameter_and_buffer(pipe);
+
+ atomisp_qbuffers_to_css(asd);
+
+ atomisp_css_irq_enable(isp, IA_CSS_IRQ_INFO_CSS_RECEIVER_SOF,
+ atomisp_css_valid_sof(isp));
+ atomisp_csi2_configure(asd);
+
+ if (atomisp_freq_scaling(isp, ATOMISP_DFS_MODE_AUTO, false) < 0)
+ dev_dbg(isp->dev, "DFS auto mode failed!\n");
+
+ /* Enable the CSI interface on ANN B0/K0 */
+ if (isp->media_dev.hw_revision >= ((ATOMISP_HW_REVISION_ISP2401 <<
+ ATOMISP_HW_REVISION_SHIFT) | ATOMISP_HW_STEPPING_B0)) {
+ pci_write_config_word(pdev, MRFLD_PCI_CSI_CONTROL,
+ isp->saved_regs.csi_control | MRFLD_PCI_CSI_CONTROL_CSI_READY);
+ }
+
+ /* stream on the sensor */
+ ret = v4l2_subdev_call(isp->inputs[asd->input_curr].csi_remote_source,
+ video, s_stream, 1);
+ if (ret) {
+ dev_err(isp->dev, "Starting sensor stream failed: %d\n", ret);
+ atomisp_stop_stream(pipe, false, VB2_BUF_STATE_QUEUED);
+ }
+
+out_unlock:
+ mutex_unlock(&isp->mutex);
+ return ret;
+}
+
+void atomisp_stop_streaming(struct vb2_queue *vq)
+{
+ struct atomisp_video_pipe *pipe = vq_to_pipe(vq);
+ struct atomisp_device *isp = pipe->asd->isp;
+
+ dev_dbg(isp->dev, "Stop stream\n");
+
+ mutex_lock(&isp->mutex);
+ atomisp_stop_stream(pipe, true, VB2_BUF_STATE_ERROR);
+ mutex_unlock(&isp->mutex);
+}
+
+/*
+ * To get the current value of a control.
+ * applications initialize the id field of a struct v4l2_control and
+ * call this ioctl with a pointer to this structure
+ */
+static int atomisp_g_ctrl(struct file *file, void *fh,
+ struct v4l2_control *control)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct atomisp_sub_device *asd = atomisp_to_video_pipe(vdev)->asd;
+ int i, ret = -EINVAL;
+
+ for (i = 0; i < ctrls_num; i++) {
+ if (ci_v4l2_controls[i].id == control->id) {
+ ret = 0;
+ break;
+ }
+ }
+
+ if (ret)
+ return ret;
+
+ switch (control->id) {
+ case V4L2_CID_COLORFX:
+ ret = atomisp_color_effect(asd, 0, &control->value);
+ break;
+ case V4L2_CID_ATOMISP_BAD_PIXEL_DETECTION:
+ ret = atomisp_bad_pixel(asd, 0, &control->value);
+ break;
+ case V4L2_CID_ATOMISP_POSTPROCESS_GDC_CAC:
+ ret = atomisp_gdc_cac(asd, 0, &control->value);
+ break;
+ case V4L2_CID_ATOMISP_VIDEO_STABLIZATION:
+ ret = atomisp_video_stable(asd, 0, &control->value);
+ break;
+ case V4L2_CID_ATOMISP_FIXED_PATTERN_NR:
+ ret = atomisp_fixed_pattern(asd, 0, &control->value);
+ break;
+ case V4L2_CID_ATOMISP_FALSE_COLOR_CORRECTION:
+ ret = atomisp_false_color(asd, 0, &control->value);
+ break;
+ case V4L2_CID_ATOMISP_LOW_LIGHT:
+ ret = atomisp_low_light(asd, 0, &control->value);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+/*
+ * To change the value of a control.
+ * applications initialize the id and value fields of a struct v4l2_control
+ * and call this ioctl.
+ */
+static int atomisp_s_ctrl(struct file *file, void *fh,
+ struct v4l2_control *control)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct atomisp_sub_device *asd = atomisp_to_video_pipe(vdev)->asd;
+ int i, ret = -EINVAL;
+
+ for (i = 0; i < ctrls_num; i++) {
+ if (ci_v4l2_controls[i].id == control->id) {
+ ret = 0;
+ break;
+ }
+ }
+
+ if (ret)
+ return ret;
+
+ switch (control->id) {
+ case V4L2_CID_COLORFX:
+ ret = atomisp_color_effect(asd, 1, &control->value);
+ break;
+ case V4L2_CID_ATOMISP_BAD_PIXEL_DETECTION:
+ ret = atomisp_bad_pixel(asd, 1, &control->value);
+ break;
+ case V4L2_CID_ATOMISP_POSTPROCESS_GDC_CAC:
+ ret = atomisp_gdc_cac(asd, 1, &control->value);
+ break;
+ case V4L2_CID_ATOMISP_VIDEO_STABLIZATION:
+ ret = atomisp_video_stable(asd, 1, &control->value);
+ break;
+ case V4L2_CID_ATOMISP_FIXED_PATTERN_NR:
+ ret = atomisp_fixed_pattern(asd, 1, &control->value);
+ break;
+ case V4L2_CID_ATOMISP_FALSE_COLOR_CORRECTION:
+ ret = atomisp_false_color(asd, 1, &control->value);
+ break;
+ case V4L2_CID_ATOMISP_LOW_LIGHT:
+ ret = atomisp_low_light(asd, 1, &control->value);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+ return ret;
+}
+
+/*
+ * To query the attributes of a control.
+ * applications set the id field of a struct v4l2_query_ext_ctrl and call the
+ * this ioctl with a pointer to this structure. The driver fills
+ * the rest of the structure.
+ */
+static int atomisp_query_ext_ctrl(struct file *file, void *fh,
+ struct v4l2_query_ext_ctrl *qc)
+{
+ int i;
+
+ /* TODO: implement V4L2_CTRL_FLAG_NEXT_CTRL */
+ if (qc->id & V4L2_CTRL_FLAG_NEXT_CTRL)
+ return -EINVAL;
+
+ for (i = 0; i < ctrls_num; i++) {
+ if (ci_v4l2_controls[i].id == qc->id) {
+ *qc = ci_v4l2_controls[i];
+ qc->elems = 1;
+ qc->elem_size = 4;
+ return 0;
+ }
+ }
+
+ /*
+ * This is probably not needed, but this flag has been set for
+ * many kernel versions. Leave it to avoid breaking any apps.
+ */
+ qc->flags = V4L2_CTRL_FLAG_DISABLED;
+ return -EINVAL;
+}
+
+static int atomisp_camera_g_ext_ctrls(struct file *file, void *fh,
+ struct v4l2_ext_controls *c)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct atomisp_sub_device *asd = atomisp_to_video_pipe(vdev)->asd;
+ struct v4l2_control ctrl;
+ int i;
+ int ret = 0;
+
+ for (i = 0; i < c->count; i++) {
+ ctrl.id = c->controls[i].id;
+ ctrl.value = c->controls[i].value;
+ switch (ctrl.id) {
+ case V4L2_CID_ZOOM_ABSOLUTE:
+ ret = atomisp_digital_zoom(asd, 0, &ctrl.value);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ if (ret) {
+ c->error_idx = i;
+ break;
+ }
+ c->controls[i].value = ctrl.value;
+ }
+ return ret;
+}
+
+/* This ioctl allows the application to get multiple controls by class */
+static int atomisp_g_ext_ctrls(struct file *file, void *fh,
+ struct v4l2_ext_controls *c)
+{
+ struct v4l2_control ctrl;
+ int i, ret = 0;
+
+ /*
+ * input_lock is not need for the Camera related IOCTLs
+ * The input_lock downgrade the FPS of 3A
+ */
+ ret = atomisp_camera_g_ext_ctrls(file, fh, c);
+ if (ret != -EINVAL)
+ return ret;
+
+ for (i = 0; i < c->count; i++) {
+ ctrl.id = c->controls[i].id;
+ ctrl.value = c->controls[i].value;
+ ret = atomisp_g_ctrl(file, fh, &ctrl);
+ c->controls[i].value = ctrl.value;
+ if (ret) {
+ c->error_idx = i;
+ break;
+ }
+ }
+ return ret;
+}
+
+static int atomisp_camera_s_ext_ctrls(struct file *file, void *fh,
+ struct v4l2_ext_controls *c)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct atomisp_sub_device *asd = atomisp_to_video_pipe(vdev)->asd;
+ struct v4l2_control ctrl;
+ int i;
+ int ret = 0;
+
+ for (i = 0; i < c->count; i++) {
+ struct v4l2_ctrl *ctr;
+
+ ctrl.id = c->controls[i].id;
+ ctrl.value = c->controls[i].value;
+ switch (ctrl.id) {
+ case V4L2_CID_ZOOM_ABSOLUTE:
+ ret = atomisp_digital_zoom(asd, 1, &ctrl.value);
+ break;
+ default:
+ ctr = v4l2_ctrl_find(&asd->ctrl_handler, ctrl.id);
+ if (ctr)
+ ret = v4l2_ctrl_s_ctrl(ctr, ctrl.value);
+ else
+ ret = -EINVAL;
+ }
+
+ if (ret) {
+ c->error_idx = i;
+ break;
+ }
+ c->controls[i].value = ctrl.value;
+ }
+ return ret;
+}
+
+/* This ioctl allows the application to set multiple controls by class */
+static int atomisp_s_ext_ctrls(struct file *file, void *fh,
+ struct v4l2_ext_controls *c)
+{
+ struct v4l2_control ctrl;
+ int i, ret = 0;
+
+ /*
+ * input_lock is not need for the Camera related IOCTLs
+ * The input_lock downgrade the FPS of 3A
+ */
+ ret = atomisp_camera_s_ext_ctrls(file, fh, c);
+ if (ret != -EINVAL)
+ return ret;
+
+ for (i = 0; i < c->count; i++) {
+ ctrl.id = c->controls[i].id;
+ ctrl.value = c->controls[i].value;
+ ret = atomisp_s_ctrl(file, fh, &ctrl);
+ c->controls[i].value = ctrl.value;
+ if (ret) {
+ c->error_idx = i;
+ break;
+ }
+ }
+ return ret;
+}
+
+/*
+ * vidioc_g/s_param are used to switch isp running mode
+ */
+static int atomisp_g_parm(struct file *file, void *fh,
+ struct v4l2_streamparm *parm)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct atomisp_sub_device *asd = atomisp_to_video_pipe(vdev)->asd;
+ struct atomisp_device *isp = video_get_drvdata(vdev);
+
+ if (parm->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) {
+ dev_err(isp->dev, "unsupported v4l2 buf type\n");
+ return -EINVAL;
+ }
+
+ parm->parm.capture.capturemode = asd->run_mode->val;
+
+ return 0;
+}
+
+static int atomisp_s_parm(struct file *file, void *fh,
+ struct v4l2_streamparm *parm)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct atomisp_device *isp = video_get_drvdata(vdev);
+ struct atomisp_sub_device *asd = atomisp_to_video_pipe(vdev)->asd;
+ int mode;
+ int rval;
+ int fps;
+
+ if (parm->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) {
+ dev_err(isp->dev, "unsupported v4l2 buf type\n");
+ return -EINVAL;
+ }
+
+ asd->high_speed_mode = false;
+ switch (parm->parm.capture.capturemode) {
+ case CI_MODE_NONE: {
+ struct v4l2_subdev_frame_interval fi = {0};
+
+ fi.interval = parm->parm.capture.timeperframe;
+
+ rval = v4l2_subdev_call_state_active(isp->inputs[asd->input_curr].csi_remote_source,
+ pad, set_frame_interval, &fi);
+ if (!rval)
+ parm->parm.capture.timeperframe = fi.interval;
+
+ if (fi.interval.numerator != 0) {
+ fps = fi.interval.denominator / fi.interval.numerator;
+ if (fps > 30)
+ asd->high_speed_mode = true;
+ }
+
+ return rval == -ENOIOCTLCMD ? 0 : rval;
+ }
+ case CI_MODE_VIDEO:
+ mode = ATOMISP_RUN_MODE_VIDEO;
+ break;
+ case CI_MODE_STILL_CAPTURE:
+ mode = ATOMISP_RUN_MODE_STILL_CAPTURE;
+ break;
+ case CI_MODE_PREVIEW:
+ mode = ATOMISP_RUN_MODE_PREVIEW;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ rval = v4l2_ctrl_s_ctrl(asd->run_mode, mode);
+
+ return rval == -ENOIOCTLCMD ? 0 : rval;
+}
+
+static long atomisp_vidioc_default(struct file *file, void *fh,
+ bool valid_prio, unsigned int cmd, void *arg)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct atomisp_sub_device *asd = atomisp_to_video_pipe(vdev)->asd;
+ int err;
+
+ switch (cmd) {
+ case ATOMISP_IOC_G_XNR:
+ err = atomisp_xnr(asd, 0, arg);
+ break;
+
+ case ATOMISP_IOC_S_XNR:
+ err = atomisp_xnr(asd, 1, arg);
+ break;
+
+ case ATOMISP_IOC_G_NR:
+ err = atomisp_nr(asd, 0, arg);
+ break;
+
+ case ATOMISP_IOC_S_NR:
+ err = atomisp_nr(asd, 1, arg);
+ break;
+
+ case ATOMISP_IOC_G_TNR:
+ err = atomisp_tnr(asd, 0, arg);
+ break;
+
+ case ATOMISP_IOC_S_TNR:
+ err = atomisp_tnr(asd, 1, arg);
+ break;
+
+ case ATOMISP_IOC_G_BLACK_LEVEL_COMP:
+ err = atomisp_black_level(asd, 0, arg);
+ break;
+
+ case ATOMISP_IOC_S_BLACK_LEVEL_COMP:
+ err = atomisp_black_level(asd, 1, arg);
+ break;
+
+ case ATOMISP_IOC_G_EE:
+ err = atomisp_ee(asd, 0, arg);
+ break;
+
+ case ATOMISP_IOC_S_EE:
+ err = atomisp_ee(asd, 1, arg);
+ break;
+
+ case ATOMISP_IOC_G_DIS_STAT:
+ err = atomisp_get_dis_stat(asd, arg);
+ break;
+
+ case ATOMISP_IOC_G_DVS2_BQ_RESOLUTIONS:
+ err = atomisp_get_dvs2_bq_resolutions(asd, arg);
+ break;
+
+ case ATOMISP_IOC_S_DIS_COEFS:
+ err = atomisp_css_cp_dvs2_coefs(asd, arg,
+ &asd->params.css_param, true);
+ if (!err && arg)
+ asd->params.css_update_params_needed = true;
+ break;
+
+ case ATOMISP_IOC_S_DIS_VECTOR:
+ err = atomisp_cp_dvs_6axis_config(asd, arg,
+ &asd->params.css_param, true);
+ if (!err && arg)
+ asd->params.css_update_params_needed = true;
+ break;
+
+ case ATOMISP_IOC_G_ISP_PARM:
+ err = atomisp_param(asd, 0, arg);
+ break;
+
+ case ATOMISP_IOC_S_ISP_PARM:
+ err = atomisp_param(asd, 1, arg);
+ break;
+
+ case ATOMISP_IOC_G_3A_STAT:
+ err = atomisp_3a_stat(asd, 0, arg);
+ break;
+
+ case ATOMISP_IOC_G_ISP_GAMMA:
+ err = atomisp_gamma(asd, 0, arg);
+ break;
+
+ case ATOMISP_IOC_S_ISP_GAMMA:
+ err = atomisp_gamma(asd, 1, arg);
+ break;
+
+ case ATOMISP_IOC_G_ISP_GDC_TAB:
+ err = atomisp_gdc_cac_table(asd, 0, arg);
+ break;
+
+ case ATOMISP_IOC_S_ISP_GDC_TAB:
+ err = atomisp_gdc_cac_table(asd, 1, arg);
+ break;
+
+ case ATOMISP_IOC_G_ISP_MACC:
+ err = atomisp_macc_table(asd, 0, arg);
+ break;
+
+ case ATOMISP_IOC_S_ISP_MACC:
+ err = atomisp_macc_table(asd, 1, arg);
+ break;
+
+ case ATOMISP_IOC_G_ISP_BAD_PIXEL_DETECTION:
+ err = atomisp_bad_pixel_param(asd, 0, arg);
+ break;
+
+ case ATOMISP_IOC_S_ISP_BAD_PIXEL_DETECTION:
+ err = atomisp_bad_pixel_param(asd, 1, arg);
+ break;
+
+ case ATOMISP_IOC_G_ISP_FALSE_COLOR_CORRECTION:
+ err = atomisp_false_color_param(asd, 0, arg);
+ break;
+
+ case ATOMISP_IOC_S_ISP_FALSE_COLOR_CORRECTION:
+ err = atomisp_false_color_param(asd, 1, arg);
+ break;
+
+ case ATOMISP_IOC_G_ISP_CTC:
+ err = atomisp_ctc(asd, 0, arg);
+ break;
+
+ case ATOMISP_IOC_S_ISP_CTC:
+ err = atomisp_ctc(asd, 1, arg);
+ break;
+
+ case ATOMISP_IOC_G_ISP_WHITE_BALANCE:
+ err = atomisp_white_balance_param(asd, 0, arg);
+ break;
+
+ case ATOMISP_IOC_S_ISP_WHITE_BALANCE:
+ err = atomisp_white_balance_param(asd, 1, arg);
+ break;
+
+ case ATOMISP_IOC_G_3A_CONFIG:
+ err = atomisp_3a_config_param(asd, 0, arg);
+ break;
+
+ case ATOMISP_IOC_S_3A_CONFIG:
+ err = atomisp_3a_config_param(asd, 1, arg);
+ break;
+
+ case ATOMISP_IOC_S_ISP_FPN_TABLE:
+ err = atomisp_fixed_pattern_table(asd, arg);
+ break;
+
+ case ATOMISP_IOC_S_ISP_SHD_TAB:
+ err = atomisp_set_shading_table(asd, arg);
+ break;
+
+ case ATOMISP_IOC_G_ISP_GAMMA_CORRECTION:
+ err = atomisp_gamma_correction(asd, 0, arg);
+ break;
+
+ case ATOMISP_IOC_S_ISP_GAMMA_CORRECTION:
+ err = atomisp_gamma_correction(asd, 1, arg);
+ break;
+
+ case ATOMISP_IOC_S_PARAMETERS:
+ err = atomisp_set_parameters(vdev, arg);
+ break;
+
+ case ATOMISP_IOC_EXP_ID_UNLOCK:
+ err = atomisp_exp_id_unlock(asd, arg);
+ break;
+ case ATOMISP_IOC_EXP_ID_CAPTURE:
+ err = atomisp_exp_id_capture(asd, arg);
+ break;
+ case ATOMISP_IOC_S_ENABLE_DZ_CAPT_PIPE:
+ err = atomisp_enable_dz_capt_pipe(asd, arg);
+ break;
+ case ATOMISP_IOC_G_FORMATS_CONFIG:
+ err = atomisp_formats(asd, 0, arg);
+ break;
+
+ case ATOMISP_IOC_S_FORMATS_CONFIG:
+ err = atomisp_formats(asd, 1, arg);
+ break;
+ case ATOMISP_IOC_INJECT_A_FAKE_EVENT:
+ err = atomisp_inject_a_fake_event(asd, arg);
+ break;
+ case ATOMISP_IOC_S_ARRAY_RESOLUTION:
+ err = atomisp_set_array_res(asd, arg);
+ break;
+ default:
+ err = -EINVAL;
+ break;
+ }
+
+ return err;
+}
+
+const struct v4l2_ioctl_ops atomisp_ioctl_ops = {
+ .vidioc_querycap = atomisp_querycap,
+ .vidioc_enum_input = atomisp_enum_input,
+ .vidioc_g_input = atomisp_g_input,
+ .vidioc_s_input = atomisp_s_input,
+ .vidioc_query_ext_ctrl = atomisp_query_ext_ctrl,
+ .vidioc_s_ext_ctrls = atomisp_s_ext_ctrls,
+ .vidioc_g_ext_ctrls = atomisp_g_ext_ctrls,
+ .vidioc_enum_framesizes = atomisp_enum_framesizes,
+ .vidioc_enum_frameintervals = atomisp_enum_frameintervals,
+ .vidioc_enum_fmt_vid_cap = atomisp_enum_fmt_cap,
+ .vidioc_try_fmt_vid_cap = atomisp_try_fmt_cap,
+ .vidioc_g_fmt_vid_cap = atomisp_g_fmt_cap,
+ .vidioc_s_fmt_vid_cap = atomisp_s_fmt_cap,
+ .vidioc_reqbufs = vb2_ioctl_reqbufs,
+ .vidioc_querybuf = vb2_ioctl_querybuf,
+ .vidioc_qbuf = atomisp_qbuf_wrapper,
+ .vidioc_dqbuf = atomisp_dqbuf_wrapper,
+ .vidioc_expbuf = vb2_ioctl_expbuf,
+ .vidioc_streamon = vb2_ioctl_streamon,
+ .vidioc_streamoff = vb2_ioctl_streamoff,
+ .vidioc_default = atomisp_vidioc_default,
+ .vidioc_s_parm = atomisp_s_parm,
+ .vidioc_g_parm = atomisp_g_parm,
+};
diff --git a/drivers/staging/media/atomisp/pci/atomisp_ioctl.h b/drivers/staging/media/atomisp/pci/atomisp_ioctl.h
new file mode 100644
index 000000000000..57f608f9db56
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp_ioctl.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Medifield PNW Camera Imaging ISP subsystem.
+ *
+ * Copyright (c) 2010 Intel Corporation. All Rights Reserved.
+ *
+ * Copyright (c) 2010 Silicon Hive www.siliconhive.com.
+ */
+
+#ifndef __ATOMISP_IOCTL_H__
+#define __ATOMISP_IOCTL_H__
+
+#include "ia_css.h"
+
+struct atomisp_device;
+struct atomisp_video_pipe;
+
+extern const struct atomisp_format_bridge atomisp_output_fmts[];
+
+const struct
+atomisp_format_bridge *atomisp_get_format_bridge(unsigned int pixelformat);
+
+const struct
+atomisp_format_bridge *atomisp_get_format_bridge_from_mbus(u32 mbus_code);
+
+int atomisp_pipe_check(struct atomisp_video_pipe *pipe, bool streaming_ok);
+
+int atomisp_alloc_css_stat_bufs(struct atomisp_sub_device *asd,
+ uint16_t stream_id);
+
+int atomisp_start_streaming(struct vb2_queue *vq, unsigned int count);
+void atomisp_stop_streaming(struct vb2_queue *vq);
+
+extern const struct v4l2_ioctl_ops atomisp_ioctl_ops;
+
+#endif /* __ATOMISP_IOCTL_H__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp_subdev.c b/drivers/staging/media/atomisp/pci/atomisp_subdev.c
new file mode 100644
index 000000000000..3d56ca83ecb7
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp_subdev.c
@@ -0,0 +1,919 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Support for Medifield PNW Camera Imaging ISP subsystem.
+ *
+ * Copyright (c) 2010 Intel Corporation. All Rights Reserved.
+ */
+#include <linux/module.h>
+#include <linux/uaccess.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/mm.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+
+#include <media/v4l2-event.h>
+#include <media/v4l2-mediabus.h>
+#include <media/videobuf2-vmalloc.h>
+#include "atomisp_cmd.h"
+#include "atomisp_common.h"
+#include "atomisp_compat.h"
+#include "atomisp_fops.h"
+#include "atomisp_internal.h"
+#include "atomisp_ioctl.h"
+
+const struct atomisp_in_fmt_conv atomisp_in_fmt_conv[] = {
+ { MEDIA_BUS_FMT_SBGGR8_1X8, 8, 8, ATOMISP_INPUT_FORMAT_RAW_8, IA_CSS_BAYER_ORDER_BGGR },
+ { MEDIA_BUS_FMT_SGBRG8_1X8, 8, 8, ATOMISP_INPUT_FORMAT_RAW_8, IA_CSS_BAYER_ORDER_GBRG },
+ { MEDIA_BUS_FMT_SGRBG8_1X8, 8, 8, ATOMISP_INPUT_FORMAT_RAW_8, IA_CSS_BAYER_ORDER_GRBG },
+ { MEDIA_BUS_FMT_SRGGB8_1X8, 8, 8, ATOMISP_INPUT_FORMAT_RAW_8, IA_CSS_BAYER_ORDER_RGGB },
+ { MEDIA_BUS_FMT_SBGGR10_1X10, 10, 10, ATOMISP_INPUT_FORMAT_RAW_10, IA_CSS_BAYER_ORDER_BGGR },
+ { MEDIA_BUS_FMT_SGBRG10_1X10, 10, 10, ATOMISP_INPUT_FORMAT_RAW_10, IA_CSS_BAYER_ORDER_GBRG },
+ { MEDIA_BUS_FMT_SGRBG10_1X10, 10, 10, ATOMISP_INPUT_FORMAT_RAW_10, IA_CSS_BAYER_ORDER_GRBG },
+ { MEDIA_BUS_FMT_SRGGB10_1X10, 10, 10, ATOMISP_INPUT_FORMAT_RAW_10, IA_CSS_BAYER_ORDER_RGGB },
+ { MEDIA_BUS_FMT_SBGGR12_1X12, 12, 12, ATOMISP_INPUT_FORMAT_RAW_12, IA_CSS_BAYER_ORDER_BGGR },
+ { MEDIA_BUS_FMT_SGBRG12_1X12, 12, 12, ATOMISP_INPUT_FORMAT_RAW_12, IA_CSS_BAYER_ORDER_GBRG },
+ { MEDIA_BUS_FMT_SGRBG12_1X12, 12, 12, ATOMISP_INPUT_FORMAT_RAW_12, IA_CSS_BAYER_ORDER_GRBG },
+ { MEDIA_BUS_FMT_SRGGB12_1X12, 12, 12, ATOMISP_INPUT_FORMAT_RAW_12, IA_CSS_BAYER_ORDER_RGGB },
+ { MEDIA_BUS_FMT_UYVY8_1X16, 8, 8, ATOMISP_INPUT_FORMAT_YUV422_8, 0 },
+ { MEDIA_BUS_FMT_YUYV8_1X16, 8, 8, ATOMISP_INPUT_FORMAT_YUV422_8, 0 },
+#if 0 // disabled due to clang warnings
+ { MEDIA_BUS_FMT_JPEG_1X8, 8, 8, IA_CSS_FRAME_FORMAT_BINARY_8, 0 },
+ { V4L2_MBUS_FMT_CUSTOM_NV12, 12, 12, IA_CSS_FRAME_FORMAT_NV12, 0 },
+ { V4L2_MBUS_FMT_CUSTOM_NV21, 12, 12, IA_CSS_FRAME_FORMAT_NV21, 0 },
+#endif
+ { V4L2_MBUS_FMT_CUSTOM_YUV420, 12, 12, ATOMISP_INPUT_FORMAT_YUV420_8_LEGACY, 0 },
+#if 0
+ { V4L2_MBUS_FMT_CUSTOM_M10MO_RAW, 8, 8, IA_CSS_FRAME_FORMAT_BINARY_8, 0 },
+#endif
+ /* no valid V4L2 MBUS code for metadata format, so leave it 0. */
+ { 0, 0, 0, ATOMISP_INPUT_FORMAT_EMBEDDED, 0 },
+ {}
+};
+
+static const struct {
+ u32 code;
+ u32 compressed;
+} compressed_codes[] = {
+ { MEDIA_BUS_FMT_SBGGR10_1X10, MEDIA_BUS_FMT_SBGGR10_DPCM8_1X8 },
+ { MEDIA_BUS_FMT_SGBRG10_1X10, MEDIA_BUS_FMT_SGBRG10_DPCM8_1X8 },
+ { MEDIA_BUS_FMT_SGRBG10_1X10, MEDIA_BUS_FMT_SGRBG10_DPCM8_1X8 },
+ { MEDIA_BUS_FMT_SRGGB10_1X10, MEDIA_BUS_FMT_SRGGB10_DPCM8_1X8 },
+};
+
+u32 atomisp_subdev_uncompressed_code(u32 code)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(compressed_codes); i++)
+ if (code == compressed_codes[i].compressed)
+ return compressed_codes[i].code;
+
+ return code;
+}
+
+bool atomisp_subdev_is_compressed(u32 code)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(atomisp_in_fmt_conv) - 1; i++)
+ if (code == atomisp_in_fmt_conv[i].code)
+ return atomisp_in_fmt_conv[i].bpp !=
+ atomisp_in_fmt_conv[i].depth;
+
+ return false;
+}
+
+const struct atomisp_in_fmt_conv *atomisp_find_in_fmt_conv(u32 code)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(atomisp_in_fmt_conv) - 1; i++)
+ if (code == atomisp_in_fmt_conv[i].code)
+ return atomisp_in_fmt_conv + i;
+
+ return NULL;
+}
+
+const struct atomisp_in_fmt_conv *atomisp_find_in_fmt_conv_by_atomisp_in_fmt(
+ enum atomisp_input_format atomisp_in_fmt)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(atomisp_in_fmt_conv) - 1; i++)
+ if (atomisp_in_fmt_conv[i].atomisp_in_fmt == atomisp_in_fmt)
+ return atomisp_in_fmt_conv + i;
+
+ return NULL;
+}
+
+bool atomisp_subdev_format_conversion(struct atomisp_sub_device *asd)
+{
+ struct v4l2_mbus_framefmt *sink, *src;
+
+ sink = atomisp_subdev_get_ffmt(&asd->subdev, NULL,
+ V4L2_SUBDEV_FORMAT_ACTIVE, ATOMISP_SUBDEV_PAD_SINK);
+ src = atomisp_subdev_get_ffmt(&asd->subdev, NULL,
+ V4L2_SUBDEV_FORMAT_ACTIVE, ATOMISP_SUBDEV_PAD_SOURCE);
+
+ return atomisp_is_mbuscode_raw(sink->code)
+ && !atomisp_is_mbuscode_raw(src->code);
+}
+
+/*
+ * V4L2 subdev operations
+ */
+
+/*
+ * isp_subdev_ioctl - CCDC module private ioctl's
+ * @sd: ISP V4L2 subdevice
+ * @cmd: ioctl command
+ * @arg: ioctl argument
+ *
+ * Return 0 on success or a negative error code otherwise.
+ */
+static long isp_subdev_ioctl(struct v4l2_subdev *sd,
+ unsigned int cmd, void *arg)
+{
+ return 0;
+}
+
+static int isp_subdev_subscribe_event(struct v4l2_subdev *sd,
+ struct v4l2_fh *fh,
+ struct v4l2_event_subscription *sub)
+{
+ struct atomisp_sub_device *isp_sd = v4l2_get_subdevdata(sd);
+ struct atomisp_device *isp = isp_sd->isp;
+
+ if (sub->type != V4L2_EVENT_FRAME_SYNC &&
+ sub->type != V4L2_EVENT_FRAME_END &&
+ sub->type != V4L2_EVENT_ATOMISP_3A_STATS_READY &&
+ sub->type != V4L2_EVENT_ATOMISP_METADATA_READY &&
+ sub->type != V4L2_EVENT_ATOMISP_PAUSE_BUFFER &&
+ sub->type != V4L2_EVENT_ATOMISP_CSS_RESET &&
+ sub->type != V4L2_EVENT_ATOMISP_ACC_COMPLETE)
+ return -EINVAL;
+
+ if (sub->type == V4L2_EVENT_FRAME_SYNC &&
+ !atomisp_css_valid_sof(isp))
+ return -EINVAL;
+
+ return v4l2_event_subscribe(fh, sub, 16, NULL);
+}
+
+/*
+ * isp_subdev_enum_mbus_code - Handle pixel format enumeration
+ * @sd: pointer to v4l2 subdev structure
+ * @fh : V4L2 subdev file handle
+ * @code: pointer to v4l2_subdev_pad_mbus_code_enum structure
+ * return -EINVAL or zero on success
+ */
+static int isp_subdev_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ if (code->index >= ARRAY_SIZE(atomisp_in_fmt_conv) - 1)
+ return -EINVAL;
+
+ code->code = atomisp_in_fmt_conv[code->index].code;
+
+ return 0;
+}
+
+static int isp_subdev_validate_rect(struct v4l2_subdev *sd, uint32_t pad,
+ uint32_t target)
+{
+ switch (pad) {
+ case ATOMISP_SUBDEV_PAD_SINK:
+ switch (target) {
+ case V4L2_SEL_TGT_CROP:
+ return 0;
+ }
+ break;
+ default:
+ switch (target) {
+ case V4L2_SEL_TGT_COMPOSE:
+ return 0;
+ }
+ break;
+ }
+
+ return -EINVAL;
+}
+
+struct v4l2_rect *atomisp_subdev_get_rect(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ u32 which, uint32_t pad,
+ uint32_t target)
+{
+ struct atomisp_sub_device *isp_sd = v4l2_get_subdevdata(sd);
+
+ if (which == V4L2_SUBDEV_FORMAT_TRY) {
+ switch (target) {
+ case V4L2_SEL_TGT_CROP:
+ return v4l2_subdev_state_get_crop(sd_state, pad);
+ case V4L2_SEL_TGT_COMPOSE:
+ return v4l2_subdev_state_get_compose(sd_state, pad);
+ }
+ }
+
+ switch (target) {
+ case V4L2_SEL_TGT_CROP:
+ return &isp_sd->fmt[pad].crop;
+ case V4L2_SEL_TGT_COMPOSE:
+ return &isp_sd->fmt[pad].compose;
+ }
+
+ return NULL;
+}
+
+struct v4l2_mbus_framefmt
+*atomisp_subdev_get_ffmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state, uint32_t which,
+ uint32_t pad)
+{
+ struct atomisp_sub_device *isp_sd = v4l2_get_subdevdata(sd);
+
+ if (which == V4L2_SUBDEV_FORMAT_TRY)
+ return v4l2_subdev_state_get_format(sd_state, pad);
+
+ return &isp_sd->fmt[pad].fmt;
+}
+
+static void isp_get_fmt_rect(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ uint32_t which,
+ struct v4l2_mbus_framefmt **ffmt,
+ struct v4l2_rect *crop[ATOMISP_SUBDEV_PADS_NUM],
+ struct v4l2_rect *comp[ATOMISP_SUBDEV_PADS_NUM])
+{
+ unsigned int i;
+
+ for (i = 0; i < ATOMISP_SUBDEV_PADS_NUM; i++) {
+ ffmt[i] = atomisp_subdev_get_ffmt(sd, sd_state, which, i);
+ crop[i] = atomisp_subdev_get_rect(sd, sd_state, which, i,
+ V4L2_SEL_TGT_CROP);
+ comp[i] = atomisp_subdev_get_rect(sd, sd_state, which, i,
+ V4L2_SEL_TGT_COMPOSE);
+ }
+}
+
+static int isp_subdev_get_selection(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_selection *sel)
+{
+ struct v4l2_rect *rec;
+ int rval = isp_subdev_validate_rect(sd, sel->pad, sel->target);
+
+ if (rval)
+ return rval;
+
+ rec = atomisp_subdev_get_rect(sd, sd_state, sel->which, sel->pad,
+ sel->target);
+ if (!rec)
+ return -EINVAL;
+
+ sel->r = *rec;
+ return 0;
+}
+
+static const char *atomisp_pad_str(unsigned int pad)
+{
+ static const char *const pad_str[] = {
+ "ATOMISP_SUBDEV_PAD_SINK",
+ "ATOMISP_SUBDEV_PAD_SOURCE",
+ };
+
+ if (pad >= ARRAY_SIZE(pad_str))
+ return "ATOMISP_INVALID_PAD";
+ return pad_str[pad];
+}
+
+int atomisp_subdev_set_selection(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ u32 which, uint32_t pad, uint32_t target,
+ u32 flags, struct v4l2_rect *r)
+{
+ struct atomisp_sub_device *isp_sd = v4l2_get_subdevdata(sd);
+ struct atomisp_device *isp = isp_sd->isp;
+ struct v4l2_mbus_framefmt *ffmt[ATOMISP_SUBDEV_PADS_NUM];
+ struct v4l2_rect *crop[ATOMISP_SUBDEV_PADS_NUM],
+ *comp[ATOMISP_SUBDEV_PADS_NUM];
+
+ if ((pad == ATOMISP_SUBDEV_PAD_SINK && target != V4L2_SEL_TGT_CROP) ||
+ (pad == ATOMISP_SUBDEV_PAD_SOURCE && target != V4L2_SEL_TGT_COMPOSE))
+ return -EINVAL;
+
+ isp_get_fmt_rect(sd, sd_state, which, ffmt, crop, comp);
+
+ dev_dbg(isp->dev,
+ "sel: pad %s tgt %s l %d t %d w %d h %d which %s f 0x%8.8x\n",
+ atomisp_pad_str(pad), target == V4L2_SEL_TGT_CROP
+ ? "V4L2_SEL_TGT_CROP" : "V4L2_SEL_TGT_COMPOSE",
+ r->left, r->top, r->width, r->height,
+ which == V4L2_SUBDEV_FORMAT_TRY ? "V4L2_SUBDEV_FORMAT_TRY"
+ : "V4L2_SUBDEV_FORMAT_ACTIVE", flags);
+
+ r->width = rounddown(r->width, ATOM_ISP_STEP_WIDTH);
+ r->height = rounddown(r->height, ATOM_ISP_STEP_HEIGHT);
+
+ if (pad == ATOMISP_SUBDEV_PAD_SINK) {
+ /* Only crop target supported on sink pad. */
+ unsigned int dvs_w, dvs_h;
+
+ crop[pad]->width = ffmt[pad]->width;
+ crop[pad]->height = ffmt[pad]->height;
+
+ if (atomisp_subdev_format_conversion(isp_sd)
+ && crop[pad]->width && crop[pad]->height) {
+ crop[pad]->width -= isp_sd->sink_pad_padding_w;
+ crop[pad]->height -= isp_sd->sink_pad_padding_h;
+ }
+
+ if (isp_sd->params.video_dis_en &&
+ isp_sd->run_mode->val == ATOMISP_RUN_MODE_VIDEO) {
+ /*
+ * This resolution contains 20 % of DVS slack
+ * (of the desired captured image before
+ * scaling, or 1 / 6 of what we get from the
+ * sensor) in both width and height. Remove it.
+ */
+ crop[pad]->width = roundup(crop[pad]->width * 5 / 6,
+ ATOM_ISP_STEP_WIDTH);
+ crop[pad]->height = roundup(crop[pad]->height * 5 / 6,
+ ATOM_ISP_STEP_HEIGHT);
+ }
+
+ crop[pad]->width = min(crop[pad]->width, r->width);
+ crop[pad]->height = min(crop[pad]->height, r->height);
+
+ if (!(flags & V4L2_SEL_FLAG_KEEP_CONFIG)) {
+ struct v4l2_rect tmp = *crop[pad];
+
+ atomisp_subdev_set_selection(sd, sd_state, which,
+ ATOMISP_SUBDEV_PAD_SOURCE,
+ V4L2_SEL_TGT_COMPOSE, flags, &tmp);
+ }
+
+ if (which == V4L2_SUBDEV_FORMAT_TRY)
+ goto get_rect;
+
+ if (isp_sd->params.video_dis_en &&
+ isp_sd->run_mode->val == ATOMISP_RUN_MODE_VIDEO) {
+ dvs_w = rounddown(crop[pad]->width / 5,
+ ATOM_ISP_STEP_WIDTH);
+ dvs_h = rounddown(crop[pad]->height / 5,
+ ATOM_ISP_STEP_HEIGHT);
+ } else if (!isp_sd->params.video_dis_en &&
+ isp_sd->run_mode->val == ATOMISP_RUN_MODE_VIDEO) {
+ /*
+ * For CSS2.0, digital zoom needs to set dvs envelope to 12
+ * when dvs is disabled.
+ */
+ dvs_w = dvs_h = 12;
+ } else {
+ dvs_w = dvs_h = 0;
+ }
+ atomisp_css_video_set_dis_envelope(isp_sd, dvs_w, dvs_h);
+ atomisp_css_input_set_effective_resolution(isp_sd,
+ ATOMISP_INPUT_STREAM_GENERAL,
+ crop[pad]->width,
+ crop[pad]->height);
+ } else if (isp_sd->run_mode->val != ATOMISP_RUN_MODE_PREVIEW) {
+ /* Only compose target is supported on source pads. */
+ if (isp_sd->vfpp->val == ATOMISP_VFPP_DISABLE_LOWLAT) {
+ /* Scaling is disabled in this mode */
+ r->width = crop[ATOMISP_SUBDEV_PAD_SINK]->width;
+ r->height = crop[ATOMISP_SUBDEV_PAD_SINK]->height;
+ }
+
+ if (crop[ATOMISP_SUBDEV_PAD_SINK]->width == r->width
+ && crop[ATOMISP_SUBDEV_PAD_SINK]->height == r->height)
+ isp_sd->params.yuv_ds_en = false;
+ else
+ isp_sd->params.yuv_ds_en = true;
+
+ comp[pad]->width = r->width;
+ comp[pad]->height = r->height;
+
+ if (r->width == 0 || r->height == 0 ||
+ crop[ATOMISP_SUBDEV_PAD_SINK]->width == 0 ||
+ crop[ATOMISP_SUBDEV_PAD_SINK]->height == 0)
+ goto get_rect;
+ /*
+ * do cropping on sensor input if ratio of required resolution
+ * is different with sensor output resolution ratio:
+ *
+ * ratio = width / height
+ *
+ * if ratio_output < ratio_sensor:
+ * effect_width = sensor_height * out_width / out_height;
+ * effect_height = sensor_height;
+ * else
+ * effect_width = sensor_width;
+ * effect_height = sensor_width * out_height / out_width;
+ *
+ */
+ if (r->width * crop[ATOMISP_SUBDEV_PAD_SINK]->height <
+ crop[ATOMISP_SUBDEV_PAD_SINK]->width * r->height)
+ atomisp_css_input_set_effective_resolution(isp_sd,
+ ATOMISP_INPUT_STREAM_GENERAL,
+ rounddown(crop[ATOMISP_SUBDEV_PAD_SINK]->
+ height * r->width / r->height,
+ ATOM_ISP_STEP_WIDTH),
+ crop[ATOMISP_SUBDEV_PAD_SINK]->height);
+ else
+ atomisp_css_input_set_effective_resolution(isp_sd,
+ ATOMISP_INPUT_STREAM_GENERAL,
+ crop[ATOMISP_SUBDEV_PAD_SINK]->width,
+ rounddown(crop[ATOMISP_SUBDEV_PAD_SINK]->
+ width * r->height / r->width,
+ ATOM_ISP_STEP_WIDTH));
+ } else {
+ comp[pad]->width = r->width;
+ comp[pad]->height = r->height;
+ }
+
+get_rect:
+ /* Set format dimensions on non-sink pads as well. */
+ if (pad != ATOMISP_SUBDEV_PAD_SINK) {
+ ffmt[pad]->width = comp[pad]->width;
+ ffmt[pad]->height = comp[pad]->height;
+ }
+
+ if (!atomisp_subdev_get_rect(sd, sd_state, which, pad, target))
+ return -EINVAL;
+ *r = *atomisp_subdev_get_rect(sd, sd_state, which, pad, target);
+
+ dev_dbg(isp->dev, "sel actual: l %d t %d w %d h %d\n",
+ r->left, r->top, r->width, r->height);
+
+ return 0;
+}
+
+static int isp_subdev_set_selection(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_selection *sel)
+{
+ int rval = isp_subdev_validate_rect(sd, sel->pad, sel->target);
+
+ if (rval)
+ return rval;
+
+ return atomisp_subdev_set_selection(sd, sd_state, sel->which,
+ sel->pad,
+ sel->target, sel->flags, &sel->r);
+}
+
+void atomisp_subdev_set_ffmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ uint32_t which,
+ u32 pad, struct v4l2_mbus_framefmt *ffmt)
+{
+ struct atomisp_sub_device *isp_sd = v4l2_get_subdevdata(sd);
+ struct atomisp_device *isp = isp_sd->isp;
+ struct v4l2_mbus_framefmt *__ffmt =
+ atomisp_subdev_get_ffmt(sd, sd_state, which, pad);
+
+ dev_dbg(isp->dev, "ffmt: pad %s w %d h %d code 0x%8.8x which %s\n",
+ atomisp_pad_str(pad), ffmt->width, ffmt->height, ffmt->code,
+ which == V4L2_SUBDEV_FORMAT_TRY ? "V4L2_SUBDEV_FORMAT_TRY"
+ : "V4L2_SUBDEV_FORMAT_ACTIVE");
+
+ switch (pad) {
+ case ATOMISP_SUBDEV_PAD_SINK: {
+ const struct atomisp_in_fmt_conv *fc =
+ atomisp_find_in_fmt_conv(ffmt->code);
+ struct v4l2_rect r = {};
+
+ if (!fc) {
+ fc = atomisp_in_fmt_conv;
+ ffmt->code = fc->code;
+ dev_dbg(isp->dev, "using 0x%8.8x instead\n",
+ ffmt->code);
+ }
+
+ *__ffmt = *ffmt;
+
+ /* Propagate new ffmt to selection */
+ r.width = ffmt->width;
+ r.height = ffmt->height;
+ /* Only crop target supported on sink pad. */
+ atomisp_subdev_set_selection(sd, sd_state, which, pad,
+ V4L2_SEL_TGT_CROP, 0, &r);
+
+ if (which == V4L2_SUBDEV_FORMAT_ACTIVE) {
+ atomisp_css_input_set_resolution(isp_sd,
+ ATOMISP_INPUT_STREAM_GENERAL, ffmt);
+ atomisp_css_input_set_binning_factor(isp_sd,
+ ATOMISP_INPUT_STREAM_GENERAL,
+ 0);
+ atomisp_css_input_set_bayer_order(isp_sd, ATOMISP_INPUT_STREAM_GENERAL,
+ fc->bayer_order);
+ atomisp_css_input_set_format(isp_sd, ATOMISP_INPUT_STREAM_GENERAL,
+ fc->atomisp_in_fmt);
+ atomisp_css_set_default_isys_config(isp_sd, ATOMISP_INPUT_STREAM_GENERAL,
+ ffmt);
+ }
+
+ break;
+ }
+ case ATOMISP_SUBDEV_PAD_SOURCE:
+ __ffmt->code = ffmt->code;
+ break;
+ }
+}
+
+/*
+ * isp_subdev_get_format - Retrieve the video format on a pad
+ * @sd : ISP V4L2 subdevice
+ * @fh : V4L2 subdev file handle
+ * @pad: Pad number
+ * @fmt: Format
+ *
+ * Return 0 on success or -EINVAL if the pad is invalid or doesn't correspond
+ * to the format type.
+ */
+static int isp_subdev_get_format(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_format *fmt)
+{
+ fmt->format = *atomisp_subdev_get_ffmt(sd, sd_state, fmt->which,
+ fmt->pad);
+
+ return 0;
+}
+
+/*
+ * isp_subdev_set_format - Set the video format on a pad
+ * @sd : ISP subdev V4L2 subdevice
+ * @fh : V4L2 subdev file handle
+ * @pad: Pad number
+ * @fmt: Format
+ *
+ * Return 0 on success or -EINVAL if the pad is invalid or doesn't correspond
+ * to the format type.
+ */
+static int isp_subdev_set_format(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_format *fmt)
+{
+ atomisp_subdev_set_ffmt(sd, sd_state, fmt->which, fmt->pad,
+ &fmt->format);
+
+ return 0;
+}
+
+/* V4L2 subdev core operations */
+static const struct v4l2_subdev_core_ops isp_subdev_v4l2_core_ops = {
+ .ioctl = isp_subdev_ioctl,
+ .subscribe_event = isp_subdev_subscribe_event,
+ .unsubscribe_event = v4l2_event_subdev_unsubscribe,
+};
+
+/* V4L2 subdev pad operations */
+static const struct v4l2_subdev_pad_ops isp_subdev_v4l2_pad_ops = {
+ .enum_mbus_code = isp_subdev_enum_mbus_code,
+ .get_fmt = isp_subdev_get_format,
+ .set_fmt = isp_subdev_set_format,
+ .get_selection = isp_subdev_get_selection,
+ .set_selection = isp_subdev_set_selection,
+ .link_validate = v4l2_subdev_link_validate_default,
+};
+
+/* V4L2 subdev operations */
+static const struct v4l2_subdev_ops isp_subdev_v4l2_ops = {
+ .core = &isp_subdev_v4l2_core_ops,
+ .pad = &isp_subdev_v4l2_pad_ops,
+};
+
+static void isp_subdev_init_params(struct atomisp_sub_device *asd)
+{
+ unsigned int i;
+
+ /* parameters initialization */
+ INIT_LIST_HEAD(&asd->s3a_stats);
+ INIT_LIST_HEAD(&asd->s3a_stats_in_css);
+ INIT_LIST_HEAD(&asd->s3a_stats_ready);
+ INIT_LIST_HEAD(&asd->dis_stats);
+ INIT_LIST_HEAD(&asd->dis_stats_in_css);
+ spin_lock_init(&asd->dis_stats_lock);
+ for (i = 0; i < ATOMISP_METADATA_TYPE_NUM; i++) {
+ INIT_LIST_HEAD(&asd->metadata[i]);
+ INIT_LIST_HEAD(&asd->metadata_in_css[i]);
+ INIT_LIST_HEAD(&asd->metadata_ready[i]);
+ }
+}
+
+/* media operations */
+static int atomisp_link_setup(struct media_entity *entity,
+ const struct media_pad *local,
+ const struct media_pad *remote, u32 flags)
+{
+ struct v4l2_subdev *sd = container_of(entity, struct v4l2_subdev,
+ entity);
+ struct atomisp_sub_device *asd = v4l2_get_subdevdata(sd);
+ struct atomisp_device *isp = asd->isp;
+ int i;
+
+ /* ISP's source is immutable */
+ if (local != &asd->pads[ATOMISP_SUBDEV_PAD_SINK]) {
+ v4l2_err(sd, "Error pad %d does not support changing flags\n",
+ local->index);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < isp->input_cnt; i++) {
+ if (&isp->inputs[i].csi_port->entity.pads[CSI2_PAD_SOURCE] == remote)
+ break;
+ }
+
+ if (i == isp->input_cnt) {
+ v4l2_err(sd, "Error no sensor for selected CSI receiver\n");
+ return -EINVAL;
+ }
+
+ /* Turn off the sensor on link disable */
+ if (!(flags & MEDIA_LNK_FL_ENABLED)) {
+ atomisp_s_sensor_power(isp, i, 0);
+ return 0;
+ }
+
+ return atomisp_select_input(isp, i);
+}
+
+static const struct media_entity_operations isp_subdev_media_ops = {
+ .link_validate = v4l2_subdev_link_validate,
+ .link_setup = atomisp_link_setup,
+ /* .set_power = v4l2_subdev_set_power, */
+};
+
+static const char *const ctrl_run_mode_menu[] = {
+ [ATOMISP_RUN_MODE_VIDEO] = "Video",
+ [ATOMISP_RUN_MODE_STILL_CAPTURE] = "Still capture",
+ [ATOMISP_RUN_MODE_PREVIEW] = "Preview",
+};
+
+static const struct v4l2_ctrl_config ctrl_run_mode = {
+ .id = V4L2_CID_RUN_MODE,
+ .name = "Atomisp run mode",
+ .type = V4L2_CTRL_TYPE_MENU,
+ .min = ATOMISP_RUN_MODE_MIN,
+ .def = ATOMISP_RUN_MODE_PREVIEW,
+ .max = ATOMISP_RUN_MODE_MAX,
+ .qmenu = ctrl_run_mode_menu,
+};
+
+static const char *const ctrl_vfpp_mode_menu[] = {
+ "Enable", /* vfpp always enabled */
+ "Disable to scaler mode", /* CSS into video mode and disable */
+ "Disable to low latency mode", /* CSS into still mode and disable */
+};
+
+static const struct v4l2_ctrl_config ctrl_vfpp = {
+ .id = V4L2_CID_VFPP,
+ .name = "Atomisp vf postprocess",
+ .type = V4L2_CTRL_TYPE_MENU,
+ .min = 0,
+ .def = 0,
+ .max = 2,
+ .qmenu = ctrl_vfpp_mode_menu,
+};
+
+/*
+ * Control for continuous mode raw buffer size
+ *
+ * The size of the RAW ringbuffer sets limit on how much
+ * back in time application can go when requesting capture
+ * frames to be rendered, and how many frames can be rendered
+ * in a burst at full sensor rate.
+ *
+ * Note: this setting has a big impact on memory consumption of
+ * the CSS subsystem.
+ */
+static const struct v4l2_ctrl_config ctrl_continuous_raw_buffer_size = {
+ .id = V4L2_CID_ATOMISP_CONTINUOUS_RAW_BUFFER_SIZE,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Continuous raw ringbuffer size",
+ .min = 1,
+ .max = 100, /* depends on CSS version, runtime checked */
+ .step = 1,
+ .def = 3,
+};
+
+/*
+ * Control for enabling continuous viewfinder
+ *
+ * When enabled, and ISP is in continuous mode (see ctrl_continuous_mode ),
+ * preview pipeline continues concurrently with capture
+ * processing. When disabled, and continuous mode is used,
+ * preview is paused while captures are processed, but
+ * full pipeline restart is not needed.
+ *
+ * By setting this to disabled, capture processing is
+ * essentially given priority over preview, and the effective
+ * capture output rate may be higher than with continuous
+ * viewfinder enabled.
+ */
+static const struct v4l2_ctrl_config ctrl_continuous_viewfinder = {
+ .id = V4L2_CID_ATOMISP_CONTINUOUS_VIEWFINDER,
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .name = "Continuous viewfinder",
+ .min = 0,
+ .max = 1,
+ .step = 1,
+ .def = 0,
+};
+
+/*
+ * Control for enabling Lock&Unlock Raw Buffer mechanism
+ *
+ * When enabled, Raw Buffer can be locked and unlocked.
+ * Application can hold the exp_id of Raw Buffer
+ * and unlock it when no longer needed.
+ * Note: Make sure set this configuration before creating stream.
+ */
+static const struct v4l2_ctrl_config ctrl_enable_raw_buffer_lock = {
+ .id = V4L2_CID_ENABLE_RAW_BUFFER_LOCK,
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .name = "Lock Unlock Raw Buffer",
+ .min = 0,
+ .max = 1,
+ .step = 1,
+ .def = 0,
+};
+
+/*
+ * Control to disable digital zoom of the whole stream
+ *
+ * When it is true, pipe configuration enable_dz will be set to false.
+ * This can help get a better performance by disabling pp binary.
+ *
+ * Note: Make sure set this configuration before creating stream.
+ */
+static const struct v4l2_ctrl_config ctrl_disable_dz = {
+ .id = V4L2_CID_DISABLE_DZ,
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .name = "Disable digital zoom",
+ .min = 0,
+ .max = 1,
+ .step = 1,
+ .def = 0,
+};
+
+static int atomisp_init_subdev_pipe(struct atomisp_sub_device *asd,
+ struct atomisp_video_pipe *pipe, enum v4l2_buf_type buf_type)
+{
+ int ret;
+
+ pipe->type = buf_type;
+ pipe->asd = asd;
+ pipe->isp = asd->isp;
+ spin_lock_init(&pipe->irq_lock);
+ mutex_init(&pipe->vb_queue_mutex);
+
+ /* Init videobuf2 queue structure */
+ pipe->vb_queue.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ pipe->vb_queue.io_modes = VB2_MMAP | VB2_DMABUF;
+ pipe->vb_queue.buf_struct_size = sizeof(struct ia_css_frame);
+ pipe->vb_queue.ops = &atomisp_vb2_ops;
+ pipe->vb_queue.mem_ops = &vb2_vmalloc_memops;
+ pipe->vb_queue.timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+ pipe->vb_queue.lock = &pipe->vb_queue_mutex;
+ ret = vb2_queue_init(&pipe->vb_queue);
+ if (ret)
+ return ret;
+
+ pipe->vdev.queue = &pipe->vb_queue;
+
+ INIT_LIST_HEAD(&pipe->buffers_in_css);
+ INIT_LIST_HEAD(&pipe->activeq);
+ INIT_LIST_HEAD(&pipe->buffers_waiting_for_param);
+ INIT_LIST_HEAD(&pipe->per_frame_params);
+
+ return 0;
+}
+
+/*
+ * isp_subdev_init_entities - Initialize V4L2 subdev and media entity
+ * @asd: ISP CCDC module
+ *
+ * Return 0 on success and a negative error code on failure.
+ */
+static int isp_subdev_init_entities(struct atomisp_sub_device *asd)
+{
+ struct v4l2_subdev *sd = &asd->subdev;
+ struct media_pad *pads = asd->pads;
+ struct media_entity *me = &sd->entity;
+ int ret;
+
+ v4l2_subdev_init(sd, &isp_subdev_v4l2_ops);
+ sprintf(sd->name, "Atom ISP");
+ v4l2_set_subdevdata(sd, asd);
+ sd->flags |= V4L2_SUBDEV_FL_HAS_EVENTS | V4L2_SUBDEV_FL_HAS_DEVNODE;
+
+ pads[ATOMISP_SUBDEV_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
+ pads[ATOMISP_SUBDEV_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE;
+
+ asd->fmt[ATOMISP_SUBDEV_PAD_SINK].fmt.code = MEDIA_BUS_FMT_SBGGR10_1X10;
+ asd->fmt[ATOMISP_SUBDEV_PAD_SOURCE].fmt.code = MEDIA_BUS_FMT_SBGGR10_1X10;
+
+ me->ops = &isp_subdev_media_ops;
+ me->function = MEDIA_ENT_F_PROC_VIDEO_ISP;
+ ret = media_entity_pads_init(me, ATOMISP_SUBDEV_PADS_NUM, pads);
+ if (ret < 0)
+ return ret;
+
+ ret = atomisp_init_subdev_pipe(asd, &asd->video_out, V4L2_BUF_TYPE_VIDEO_CAPTURE);
+ if (ret)
+ return ret;
+
+ ret = atomisp_video_init(&asd->video_out);
+ if (ret < 0)
+ return ret;
+
+ ret = v4l2_ctrl_handler_init(&asd->ctrl_handler, 1);
+ if (ret)
+ return ret;
+
+ asd->run_mode = v4l2_ctrl_new_custom(&asd->ctrl_handler,
+ &ctrl_run_mode, NULL);
+ asd->vfpp = v4l2_ctrl_new_custom(&asd->ctrl_handler,
+ &ctrl_vfpp, NULL);
+ asd->continuous_viewfinder = v4l2_ctrl_new_custom(&asd->ctrl_handler,
+ &ctrl_continuous_viewfinder,
+ NULL);
+ asd->continuous_raw_buffer_size =
+ v4l2_ctrl_new_custom(&asd->ctrl_handler,
+ &ctrl_continuous_raw_buffer_size,
+ NULL);
+
+ asd->enable_raw_buffer_lock =
+ v4l2_ctrl_new_custom(&asd->ctrl_handler,
+ &ctrl_enable_raw_buffer_lock,
+ NULL);
+ asd->disable_dz =
+ v4l2_ctrl_new_custom(&asd->ctrl_handler,
+ &ctrl_disable_dz,
+ NULL);
+
+ /* Make controls visible on subdev as well. */
+ asd->subdev.ctrl_handler = &asd->ctrl_handler;
+ spin_lock_init(&asd->raw_buffer_bitmap_lock);
+ return asd->ctrl_handler.error;
+}
+
+static void atomisp_subdev_cleanup_entities(struct atomisp_sub_device *asd)
+{
+ v4l2_ctrl_handler_free(&asd->ctrl_handler);
+
+ media_entity_cleanup(&asd->subdev.entity);
+}
+
+void atomisp_subdev_cleanup_pending_events(struct atomisp_sub_device *asd)
+{
+ struct v4l2_fh *fh, *fh_tmp;
+ struct v4l2_event event;
+ unsigned int i, pending_event;
+
+ list_for_each_entry_safe(fh, fh_tmp,
+ &asd->subdev.devnode->fh_list, list) {
+ pending_event = v4l2_event_pending(fh);
+ for (i = 0; i < pending_event; i++)
+ v4l2_event_dequeue(fh, &event, 1);
+ }
+}
+
+void atomisp_subdev_unregister_entities(struct atomisp_sub_device *asd)
+{
+ atomisp_subdev_cleanup_entities(asd);
+ v4l2_device_unregister_subdev(&asd->subdev);
+ atomisp_video_unregister(&asd->video_out);
+}
+
+int atomisp_subdev_register_subdev(struct atomisp_sub_device *asd,
+ struct v4l2_device *vdev)
+{
+ return v4l2_device_register_subdev(vdev, &asd->subdev);
+}
+
+/*
+ * atomisp_subdev_init - ISP Subdevice initialization.
+ * @dev: Device pointer specific to the ATOM ISP.
+ *
+ * TODO: Get the initialisation values from platform data.
+ *
+ * Return 0 on success or a negative error code otherwise.
+ */
+int atomisp_subdev_init(struct atomisp_device *isp)
+{
+ int ret;
+
+ isp->asd.isp = isp;
+ isp_subdev_init_params(&isp->asd);
+ ret = isp_subdev_init_entities(&isp->asd);
+ if (ret < 0)
+ atomisp_subdev_cleanup_entities(&isp->asd);
+
+ return ret;
+}
diff --git a/drivers/staging/media/atomisp/pci/atomisp_subdev.h b/drivers/staging/media/atomisp/pci/atomisp_subdev.h
new file mode 100644
index 000000000000..e1d0168cb91d
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp_subdev.h
@@ -0,0 +1,342 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Medifield PNW Camera Imaging ISP subsystem.
+ *
+ * Copyright (c) 2010 Intel Corporation. All Rights Reserved.
+ */
+#ifndef __ATOMISP_SUBDEV_H__
+#define __ATOMISP_SUBDEV_H__
+
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-subdev.h>
+#include <media/videobuf2-v4l2.h>
+#include "atomisp_common.h"
+#include "atomisp_compat.h"
+#include "atomisp_v4l2.h"
+
+#include "ia_css.h"
+
+/* EXP_ID's ranger is 1 ~ 250 */
+#define ATOMISP_MAX_EXP_ID (250)
+
+#define ATOMISP_SUBDEV_PAD_SINK 0
+#define ATOMISP_SUBDEV_PAD_SOURCE 1
+#define ATOMISP_SUBDEV_PADS_NUM 2
+
+struct atomisp_in_fmt_conv {
+ u32 code;
+ u8 bpp; /* bits per pixel */
+ u8 depth; /* uncompressed */
+ enum atomisp_input_format atomisp_in_fmt;
+ enum ia_css_bayer_order bayer_order;
+};
+
+struct atomisp_sub_device;
+
+struct atomisp_video_pipe {
+ struct video_device vdev;
+ enum v4l2_buf_type type;
+ struct media_pad pad;
+ struct media_pipeline pipe;
+ struct vb2_queue vb_queue;
+ /* Lock for vb_queue, when also taking isp->mutex this must be taken first! */
+ struct mutex vb_queue_mutex;
+ /* List of video-buffers handed over to the CSS */
+ struct list_head buffers_in_css;
+ /* List of video-buffers handed over to the driver, but not yet to the CSS */
+ struct list_head activeq;
+ /*
+ * the buffers waiting for per-frame parameters, this is only valid
+ * in per-frame setting mode.
+ */
+ struct list_head buffers_waiting_for_param;
+ /* the link list to store per_frame parameters */
+ struct list_head per_frame_params;
+
+ /* Filled through atomisp_get_css_frame_info() on queue setup */
+ struct ia_css_frame_info frame_info;
+
+ /*
+ * irq_lock is used to protect video buffer state change operations and
+ * also to make activeq and capq operations atomic.
+ */
+ spinlock_t irq_lock;
+ unsigned int users;
+
+ struct atomisp_device *isp;
+ struct v4l2_pix_format pix;
+ u32 sh_fmt;
+
+ struct atomisp_sub_device *asd;
+
+ /*
+ * This frame_config_id is got from CSS when dequueues buffers from CSS,
+ * it is used to indicate which parameter it has applied.
+ */
+ unsigned int frame_config_id[VIDEO_MAX_FRAME];
+ /*
+ * This config id is set when camera HAL enqueues buffer, it has a
+ * non-zero value to indicate which parameter it needs to applu
+ */
+ unsigned int frame_request_config_id[VIDEO_MAX_FRAME];
+ struct atomisp_css_params_with_list *frame_params[VIDEO_MAX_FRAME];
+};
+
+#define vq_to_pipe(queue) \
+ container_of(queue, struct atomisp_video_pipe, vb_queue)
+
+#define vb_to_pipe(vb) vq_to_pipe((vb)->vb2_queue)
+
+struct atomisp_pad_format {
+ struct v4l2_mbus_framefmt fmt;
+ struct v4l2_rect crop;
+ struct v4l2_rect compose;
+};
+
+/*
+ * This structure is used to cache the CSS parameters, it aligns to
+ * struct ia_css_isp_config but without un-supported and deprecated parts.
+ */
+struct atomisp_css_params {
+ struct ia_css_wb_config wb_config;
+ struct ia_css_cc_config cc_config;
+ struct ia_css_tnr_config tnr_config;
+ struct ia_css_ecd_config ecd_config;
+ struct ia_css_ynr_config ynr_config;
+ struct ia_css_fc_config fc_config;
+ struct ia_css_formats_config formats_config;
+ struct ia_css_cnr_config cnr_config;
+ struct ia_css_macc_config macc_config;
+ struct ia_css_ctc_config ctc_config;
+ struct ia_css_aa_config aa_config;
+ struct ia_css_aa_config baa_config;
+ struct ia_css_ce_config ce_config;
+ struct ia_css_ob_config ob_config;
+ struct ia_css_dp_config dp_config;
+ struct ia_css_de_config de_config;
+ struct ia_css_gc_config gc_config;
+ struct ia_css_nr_config nr_config;
+ struct ia_css_ee_config ee_config;
+ struct ia_css_anr_config anr_config;
+ struct ia_css_3a_config s3a_config;
+ struct ia_css_xnr_config xnr_config;
+ struct ia_css_dz_config dz_config;
+ struct ia_css_cc_config yuv2rgb_cc_config;
+ struct ia_css_cc_config rgb2yuv_cc_config;
+ struct ia_css_macc_table macc_table;
+ struct ia_css_gamma_table gamma_table;
+ struct ia_css_ctc_table ctc_table;
+
+ struct ia_css_xnr_table xnr_table;
+ struct ia_css_rgb_gamma_table r_gamma_table;
+ struct ia_css_rgb_gamma_table g_gamma_table;
+ struct ia_css_rgb_gamma_table b_gamma_table;
+
+ struct ia_css_vector motion_vector;
+ struct ia_css_anr_thres anr_thres;
+
+ struct ia_css_dvs_6axis_config *dvs_6axis;
+ struct ia_css_dvs2_coefficients *dvs2_coeff;
+ struct ia_css_shading_table *shading_table;
+ struct ia_css_morph_table *morph_table;
+
+ /*
+ * Used to store the user pointer address of the frame. driver needs to
+ * translate to ia_css_frame * and then set to CSS.
+ */
+ void *output_frame;
+ u32 isp_config_id;
+
+ /* Indicates which parameters need to be updated. */
+ struct atomisp_parameters update_flag;
+};
+
+struct atomisp_subdev_params {
+ int yuv_ds_en;
+ unsigned int color_effect;
+ bool gdc_cac_en;
+ bool macc_en;
+ bool bad_pixel_en;
+ bool video_dis_en;
+ bool sc_en;
+ bool fpn_en;
+ bool xnr_en;
+ bool low_light;
+ int false_color;
+ unsigned int histogram_elenum;
+
+ /* Current grid info */
+ struct ia_css_grid_info curr_grid_info;
+ enum ia_css_pipe_id s3a_enabled_pipe;
+
+ int s3a_output_bytes;
+
+ bool dis_proj_data_valid;
+
+ struct ia_css_dz_config dz_config; /** Digital Zoom */
+ struct ia_css_capture_config capture_config;
+
+ struct ia_css_isp_config config;
+
+ /* current configurations */
+ struct atomisp_css_params css_param;
+
+ /*
+ * Intermediate buffers used to communicate data between
+ * CSS and user space.
+ */
+ struct ia_css_3a_statistics *s3a_user_stat;
+
+ void *metadata_user[ATOMISP_METADATA_TYPE_NUM];
+ u32 metadata_width_size;
+
+ struct ia_css_dvs2_statistics *dvs_stat;
+ struct ia_css_dvs_6axis_config *dvs_6axis;
+ u32 exp_id;
+ int dvs_hor_coef_bytes;
+ int dvs_ver_coef_bytes;
+ int dvs_ver_proj_bytes;
+ int dvs_hor_proj_bytes;
+
+ /* Flag to check if driver needs to update params to css */
+ bool css_update_params_needed;
+};
+
+struct atomisp_css_params_with_list {
+ /* parameters for CSS */
+ struct atomisp_css_params params;
+ struct list_head list;
+};
+
+struct atomisp_sub_device {
+ struct v4l2_subdev subdev;
+ struct media_pad pads[ATOMISP_SUBDEV_PADS_NUM];
+ struct atomisp_pad_format fmt[ATOMISP_SUBDEV_PADS_NUM];
+ /* Padding for currently set sink-pad fmt */
+ u32 sink_pad_padding_w;
+ u32 sink_pad_padding_h;
+
+ unsigned int output;
+ struct atomisp_video_pipe video_out;
+ struct atomisp_device *isp;
+ struct v4l2_ctrl_handler ctrl_handler;
+ struct v4l2_ctrl *run_mode;
+ struct v4l2_ctrl *vfpp;
+ struct v4l2_ctrl *continuous_raw_buffer_size;
+ struct v4l2_ctrl *continuous_viewfinder;
+ struct v4l2_ctrl *enable_raw_buffer_lock;
+
+ /* ISP2401 */
+ struct v4l2_ctrl *ion_dev_fd;
+
+ struct v4l2_ctrl *disable_dz;
+
+ struct atomisp_subdev_params params;
+
+ struct atomisp_stream_env stream_env[ATOMISP_INPUT_STREAM_NUM];
+
+ struct v4l2_pix_format dvs_envelop;
+ unsigned int s3a_bufs_in_css[IA_CSS_PIPE_ID_NUM];
+ unsigned int dis_bufs_in_css;
+
+ unsigned int metadata_bufs_in_css
+ [ATOMISP_INPUT_STREAM_NUM][IA_CSS_PIPE_ID_NUM];
+ /* The list of free and available metadata buffers for CSS */
+ struct list_head metadata[ATOMISP_METADATA_TYPE_NUM];
+ /* The list of metadata buffers which have been en-queued to CSS */
+ struct list_head metadata_in_css[ATOMISP_METADATA_TYPE_NUM];
+ /* The list of metadata buffers which are ready for userspace to get */
+ struct list_head metadata_ready[ATOMISP_METADATA_TYPE_NUM];
+
+ /* The list of free and available s3a stat buffers for CSS */
+ struct list_head s3a_stats;
+ /* The list of s3a stat buffers which have been en-queued to CSS */
+ struct list_head s3a_stats_in_css;
+ /* The list of s3a stat buffers which are ready for userspace to get */
+ struct list_head s3a_stats_ready;
+
+ struct list_head dis_stats;
+ struct list_head dis_stats_in_css;
+ spinlock_t dis_stats_lock;
+
+ /* This field specifies which camera (v4l2 input) is selected. */
+ int input_curr;
+
+ atomic_t sof_count;
+ atomic_t sequence; /* Sequence value that is assigned to buffer. */
+ atomic_t sequence_temp;
+
+ /*
+ * Writers of streaming must hold both isp->mutex and isp->lock.
+ * Readers of streaming need to hold only one of the two locks.
+ */
+ bool streaming;
+ bool stream_prepared; /* whether css stream is created */
+ bool recreate_streams_on_resume;
+
+ unsigned int latest_preview_exp_id; /* CSS ZSL/SDV raw buffer id */
+
+ bool copy_mode; /* CSI2+ use copy mode */
+
+ int raw_buffer_bitmap[ATOMISP_MAX_EXP_ID / 32 +
+ 1]; /* Record each Raw Buffer lock status */
+ int raw_buffer_locked_count;
+ spinlock_t raw_buffer_bitmap_lock;
+
+ /* ISP2401 */
+ bool re_trigger_capture;
+
+ struct atomisp_resolution sensor_array_res;
+ bool high_speed_mode; /* Indicate whether now is a high speed mode */
+
+ unsigned int preview_exp_id;
+ unsigned int postview_exp_id;
+};
+
+extern const struct atomisp_in_fmt_conv atomisp_in_fmt_conv[];
+
+u32 atomisp_subdev_uncompressed_code(u32 code);
+bool atomisp_subdev_is_compressed(u32 code);
+const struct atomisp_in_fmt_conv *atomisp_find_in_fmt_conv(u32 code);
+
+/* ISP2400 */
+const struct atomisp_in_fmt_conv *atomisp_find_in_fmt_conv_by_atomisp_in_fmt(
+ enum atomisp_input_format atomisp_in_fmt);
+
+/* ISP2401 */
+const struct atomisp_in_fmt_conv
+*atomisp_find_in_fmt_conv_by_atomisp_in_fmt(enum atomisp_input_format
+ atomisp_in_fmt);
+
+const struct atomisp_in_fmt_conv *atomisp_find_in_fmt_conv_compressed(u32 code);
+bool atomisp_subdev_format_conversion(struct atomisp_sub_device *asd);
+
+/* Get pointer to appropriate format */
+struct v4l2_mbus_framefmt
+*atomisp_subdev_get_ffmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state, uint32_t which,
+ uint32_t pad);
+struct v4l2_rect *atomisp_subdev_get_rect(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ u32 which, uint32_t pad,
+ uint32_t target);
+int atomisp_subdev_set_selection(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ u32 which, uint32_t pad, uint32_t target,
+ u32 flags, struct v4l2_rect *r);
+/* Actually set the format */
+void atomisp_subdev_set_ffmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ uint32_t which,
+ u32 pad, struct v4l2_mbus_framefmt *ffmt);
+
+void atomisp_subdev_cleanup_pending_events(struct atomisp_sub_device *asd);
+
+void atomisp_subdev_unregister_entities(struct atomisp_sub_device *asd);
+int atomisp_subdev_register_subdev(struct atomisp_sub_device *asd,
+ struct v4l2_device *vdev);
+int atomisp_subdev_init(struct atomisp_device *isp);
+void atomisp_subdev_cleanup(struct atomisp_device *isp);
+
+#endif /* __ATOMISP_SUBDEV_H__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp_tables.h b/drivers/staging/media/atomisp/pci/atomisp_tables.h
new file mode 100644
index 000000000000..33e6079aa73b
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp_tables.h
@@ -0,0 +1,177 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Medifield PNW Camera Imaging ISP subsystem.
+ *
+ * Copyright (c) 2010 Intel Corporation. All Rights Reserved.
+ *
+ * Copyright (c) 2010 Silicon Hive www.siliconhive.com.
+ */
+#ifndef __ATOMISP_TABLES_H__
+#define __ATOMISP_TABLES_H__
+
+#include "sh_css_params.h"
+
+/*Sepia image effect table*/
+static struct ia_css_cc_config sepia_cc_config = {
+ .fraction_bits = 8,
+ .matrix = {141, 18, 68, -40, -5, -19, 35, 4, 16},
+};
+
+/*Negative image effect table*/
+static struct ia_css_cc_config nega_cc_config = {
+ .fraction_bits = 8,
+ .matrix = {255, 29, 120, 0, 374, 342, 0, 672, -301},
+};
+
+/*Mono image effect table*/
+static struct ia_css_cc_config mono_cc_config = {
+ .fraction_bits = 8,
+ .matrix = {255, 29, 120, 0, 0, 0, 0, 0, 0},
+};
+
+/*Skin whiten image effect table*/
+static struct ia_css_macc_table skin_low_macc_table = {
+ .data = {
+ 8192, 0, 0, 8192,
+ 8192, 0, 0, 8192,
+ 8192, 0, 0, 8192,
+ 8192, 0, 0, 8192,
+ 7168, 0, 2048, 8192,
+ 5120, -1024, 2048, 8192,
+ 8192, 2048, -1024, 5120,
+ 8192, 2048, 0, 7168,
+ 8192, 0, 0, 8192,
+ 8192, 0, 0, 8192,
+ 8192, 0, 0, 8192,
+ 8192, 0, 0, 8192,
+ 8192, 0, 0, 8192,
+ 8192, 0, 0, 8192,
+ 8192, 0, 0, 8192,
+ 8192, 0, 0, 8192
+ }
+};
+
+static struct ia_css_macc_table skin_medium_macc_table = {
+ .data = {
+ 8192, 0, 0, 8192,
+ 8192, 0, 0, 8192,
+ 8192, 0, 0, 8192,
+ 8192, 0, 0, 8192,
+ 5120, 0, 6144, 8192,
+ 3072, -1024, 2048, 6144,
+ 6144, 2048, -1024, 3072,
+ 8192, 6144, 0, 5120,
+ 8192, 0, 0, 8192,
+ 8192, 0, 0, 8192,
+ 8192, 0, 0, 8192,
+ 8192, 0, 0, 8192,
+ 8192, 0, 0, 8192,
+ 8192, 0, 0, 8192,
+ 8192, 0, 0, 8192,
+ 8192, 0, 0, 8192
+ }
+};
+
+static struct ia_css_macc_table skin_high_macc_table = {
+ .data = {
+ 8192, 0, 0, 8192,
+ 8192, 0, 0, 8192,
+ 8192, 0, 0, 8192,
+ 8192, 0, 0, 8192,
+ 4096, 0, 8192, 8192,
+ 0, -2048, 4096, 6144,
+ 6144, 4096, -2048, 0,
+ 8192, 8192, 0, 4096,
+ 8192, 0, 0, 8192,
+ 8192, 0, 0, 8192,
+ 8192, 0, 0, 8192,
+ 8192, 0, 0, 8192,
+ 8192, 0, 0, 8192,
+ 8192, 0, 0, 8192,
+ 8192, 0, 0, 8192,
+ 8192, 0, 0, 8192
+ }
+};
+
+/*Blue enhencement image effect table*/
+static struct ia_css_macc_table blue_macc_table = {
+ .data = {
+ 9728, -3072, 0, 8192,
+ 8192, 0, 0, 8192,
+ 8192, 0, 0, 8192,
+ 8192, 0, 0, 8192,
+ 8192, 0, 0, 8192,
+ 8192, 0, 0, 8192,
+ 8192, 0, 0, 8192,
+ 8192, 0, 0, 8192,
+ 8192, 0, 0, 8192,
+ 8192, 0, 0, 8192,
+ 8192, 0, 0, 8192,
+ 8192, 0, 0, 8192,
+ 9728, 0, -3072, 8192,
+ 12800, 1536, -3072, 8192,
+ 11264, 0, 0, 11264,
+ 9728, -3072, 0, 11264
+ }
+};
+
+/*Green enhencement image effect table*/
+static struct ia_css_macc_table green_macc_table = {
+ .data = {
+ 8192, 0, 0, 8192,
+ 8192, 0, 0, 8192,
+ 8192, 0, 0, 8192,
+ 8192, 0, 0, 8192,
+ 8192, 0, 0, 8192,
+ 8192, 0, 0, 8192,
+ 8192, 0, 0, 8192,
+ 10240, 4096, 0, 8192,
+ 10240, 4096, 0, 12288,
+ 12288, 0, 0, 12288,
+ 14336, -2048, 4096, 8192,
+ 10240, 0, 4096, 8192,
+ 8192, 0, 0, 8192,
+ 8192, 0, 0, 8192,
+ 8192, 0, 0, 8192,
+ 8192, 0, 0, 8192
+ }
+};
+
+static struct ia_css_ctc_table vivid_ctc_table = {
+ .data.vamem_2 = {
+ 0, 384, 837, 957, 1011, 1062, 1083, 1080,
+ 1078, 1077, 1053, 1039, 1012, 992, 969, 951,
+ 929, 906, 886, 866, 845, 823, 809, 790,
+ 772, 758, 741, 726, 711, 701, 688, 675,
+ 666, 656, 648, 639, 633, 626, 618, 612,
+ 603, 594, 582, 572, 557, 545, 529, 516,
+ 504, 491, 480, 467, 459, 447, 438, 429,
+ 419, 412, 404, 397, 389, 382, 376, 368,
+ 363, 357, 351, 345, 340, 336, 330, 326,
+ 321, 318, 312, 308, 304, 300, 297, 294,
+ 291, 286, 284, 281, 278, 275, 271, 268,
+ 261, 257, 251, 245, 240, 235, 232, 225,
+ 223, 218, 213, 209, 206, 204, 199, 197,
+ 193, 189, 186, 185, 183, 179, 177, 175,
+ 172, 170, 169, 167, 164, 164, 162, 160,
+ 158, 157, 156, 154, 154, 152, 151, 150,
+ 149, 148, 146, 147, 146, 144, 143, 143,
+ 142, 141, 140, 141, 139, 138, 138, 138,
+ 137, 136, 136, 135, 134, 134, 134, 133,
+ 132, 132, 131, 130, 131, 130, 129, 128,
+ 129, 127, 127, 127, 127, 125, 125, 125,
+ 123, 123, 122, 120, 118, 115, 114, 111,
+ 110, 108, 106, 105, 103, 102, 100, 99,
+ 97, 97, 96, 95, 94, 93, 93, 91,
+ 91, 91, 90, 90, 89, 89, 88, 88,
+ 89, 88, 88, 87, 87, 87, 87, 86,
+ 87, 87, 86, 87, 86, 86, 84, 84,
+ 82, 80, 78, 76, 74, 72, 70, 68,
+ 67, 65, 62, 60, 58, 56, 55, 54,
+ 53, 51, 49, 49, 47, 45, 45, 45,
+ 41, 40, 39, 39, 34, 33, 34, 32,
+ 25, 23, 24, 20, 13, 9, 12, 0,
+ 0
+ }
+};
+#endif
diff --git a/drivers/staging/media/atomisp/pci/atomisp_trace_event.h b/drivers/staging/media/atomisp/pci/atomisp_trace_event.h
new file mode 100644
index 000000000000..4c58a8f5c7e8
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp_trace_event.h
@@ -0,0 +1,117 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support Camera Imaging tracer core.
+ *
+ * Copyright (c) 2013 Intel Corporation. All Rights Reserved.
+ */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM atomisp
+
+#if !defined(ATOMISP_TRACE_EVENT_H) || defined(TRACE_HEADER_MULTI_READ)
+#define ATOMISP_TRACE_EVENT_H
+
+#include <linux/tracepoint.h>
+#include <linux/string.h>
+TRACE_EVENT(camera_meminfo,
+
+ TP_PROTO(const char *name, int uptr_size, int counter, int sys_size,
+ int sys_res_size, int cam_sys_use, int cam_dyc_use,
+ int cam_res_use),
+
+ TP_ARGS(name, uptr_size, counter, sys_size, sys_res_size, cam_sys_use,
+ cam_dyc_use, cam_res_use),
+
+ TP_STRUCT__entry(
+ __array(char, name, 24)
+ __field(int, uptr_size)
+ __field(int, counter)
+ __field(int, sys_size)
+ __field(int, sys_res_size)
+ __field(int, cam_res_use)
+ __field(int, cam_dyc_use)
+ __field(int, cam_sys_use)
+ ),
+
+ TP_fast_assign(
+ strscpy(__entry->name, name, 24);
+ __entry->uptr_size = uptr_size;
+ __entry->counter = counter;
+ __entry->sys_size = sys_size;
+ __entry->sys_res_size = sys_res_size;
+ __entry->cam_res_use = cam_res_use;
+ __entry->cam_dyc_use = cam_dyc_use;
+ __entry->cam_sys_use = cam_sys_use;
+ ),
+
+ TP_printk(
+ "<%s> User ptr memory:%d pages,\tISP private memory used:%d pages:\tsysFP system size:%d,\treserved size:%d\tcamFP sysUse:%d,\tdycUse:%d,\tresUse:%d.\n",
+ __entry->name, __entry->uptr_size, __entry->counter,
+ __entry->sys_size, __entry->sys_res_size, __entry->cam_sys_use,
+ __entry->cam_dyc_use, __entry->cam_res_use)
+ );
+
+TRACE_EVENT(camera_debug,
+
+ TP_PROTO(const char *name, char *info, const int line),
+
+ TP_ARGS(name, info, line),
+
+ TP_STRUCT__entry(
+ __array(char, name, 24)
+ __array(char, info, 24)
+ __field(int, line)
+ ),
+
+ TP_fast_assign(
+ strscpy(__entry->name, name, 24);
+ strscpy(__entry->info, info, 24);
+ __entry->line = line;
+ ),
+
+ TP_printk("<%s>-<%d> %s\n", __entry->name, __entry->line,
+ __entry->info)
+ );
+
+TRACE_EVENT(ipu_cstate,
+
+ TP_PROTO(int cstate),
+
+ TP_ARGS(cstate),
+
+ TP_STRUCT__entry(
+ __field(int, cstate)
+ ),
+
+ TP_fast_assign(
+ __entry->cstate = cstate;
+ ),
+
+ TP_printk("cstate=%d", __entry->cstate)
+ );
+
+TRACE_EVENT(ipu_pstate,
+
+ TP_PROTO(int freq, int util),
+
+ TP_ARGS(freq, util),
+
+ TP_STRUCT__entry(
+ __field(int, freq)
+ __field(int, util)
+ ),
+
+ TP_fast_assign(
+ __entry->freq = freq;
+ __entry->util = util;
+ ),
+
+ TP_printk("freq=%d util=%d", __entry->freq, __entry->util)
+ );
+#endif
+
+#undef TRACE_INCLUDE_PATH
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_PATH .
+#define TRACE_INCLUDE_FILE atomisp_trace_event
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/drivers/staging/media/atomisp/pci/atomisp_v4l2.c b/drivers/staging/media/atomisp/pci/atomisp_v4l2.c
new file mode 100644
index 000000000000..900a67552d6a
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp_v4l2.c
@@ -0,0 +1,1512 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Support for Medifield PNW Camera Imaging ISP subsystem.
+ *
+ * Copyright (c) 2010-2017 Intel Corporation. All Rights Reserved.
+ *
+ * Copyright (c) 2010 Silicon Hive www.siliconhive.com.
+ */
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/pm_domain.h>
+#include <linux/pm_runtime.h>
+#include <linux/pm_qos.h>
+#include <linux/timer.h>
+#include <linux/delay.h>
+#include <linux/dmi.h>
+#include <linux/interrupt.h>
+#include <linux/bits.h>
+#include <media/v4l2-fwnode.h>
+
+#include <asm/iosf_mbi.h>
+
+#include "../../include/linux/atomisp_gmin_platform.h"
+
+#include "atomisp_cmd.h"
+#include "atomisp_common.h"
+#include "atomisp_fops.h"
+#include "atomisp_ioctl.h"
+#include "atomisp_internal.h"
+#include "atomisp-regs.h"
+#include "atomisp_dfs_tables.h"
+#include "hmm/hmm.h"
+#include "atomisp_trace_event.h"
+
+#include "sh_css_firmware.h"
+
+#include "device_access.h"
+
+/* Timeouts to wait for all subdevs to be registered */
+#define SUBDEV_WAIT_TIMEOUT 50 /* ms */
+#define SUBDEV_WAIT_TIMEOUT_MAX_COUNT 40 /* up to 2 seconds */
+
+/* G-Min addition: pull this in from intel_mid_pm.h */
+#define CSTATE_EXIT_LATENCY_C1 1
+
+/* cross component debug message flag */
+int dbg_level;
+module_param(dbg_level, int, 0644);
+MODULE_PARM_DESC(dbg_level, "debug message level (default:0)");
+
+/* log function switch */
+int dbg_func = 1;
+module_param(dbg_func, int, 0644);
+MODULE_PARM_DESC(dbg_func,
+ "log function switch non/printk (default:printk)");
+
+/*
+ * Set to 16x16 since this is the amount of lines and pixels the sensor
+ * exports extra. If these are kept at the 10x8 that they were on, in yuv
+ * downscaling modes incorrect resolutions where requested to the sensor
+ * driver with strange outcomes as a result. The proper way tot do this
+ * would be to have a list of tables the specify the sensor res, mipi rec,
+ * output res, and isp output res. however since we do not have this yet,
+ * the chosen solution is the next best thing.
+ */
+int pad_w = 16;
+module_param(pad_w, int, 0644);
+MODULE_PARM_DESC(pad_w, "extra data for ISP processing");
+
+int pad_h = 16;
+module_param(pad_h, int, 0644);
+MODULE_PARM_DESC(pad_h, "extra data for ISP processing");
+
+/*
+ * FIXME: this is a hack to make easier to support ISP2401 variant.
+ * As a given system will either be ISP2401 or not, we can just use
+ * a boolean, in order to replace existing #ifdef ISP2401 everywhere.
+ *
+ * Once this driver gets into a better shape, however, the best would
+ * be to replace this to something stored inside atomisp allocated
+ * structures.
+ */
+
+struct device *atomisp_dev;
+
+static const struct atomisp_freq_scaling_rule dfs_rules_merr[] = {
+ {
+ .width = ISP_FREQ_RULE_ANY,
+ .height = ISP_FREQ_RULE_ANY,
+ .fps = ISP_FREQ_RULE_ANY,
+ .isp_freq = ISP_FREQ_400MHZ,
+ .run_mode = ATOMISP_RUN_MODE_VIDEO,
+ },
+ {
+ .width = ISP_FREQ_RULE_ANY,
+ .height = ISP_FREQ_RULE_ANY,
+ .fps = ISP_FREQ_RULE_ANY,
+ .isp_freq = ISP_FREQ_400MHZ,
+ .run_mode = ATOMISP_RUN_MODE_STILL_CAPTURE,
+ },
+ {
+ .width = ISP_FREQ_RULE_ANY,
+ .height = ISP_FREQ_RULE_ANY,
+ .fps = ISP_FREQ_RULE_ANY,
+ .isp_freq = ISP_FREQ_400MHZ,
+ .run_mode = ATOMISP_RUN_MODE_PREVIEW,
+ },
+};
+
+/* Merrifield and Moorefield DFS rules */
+static const struct atomisp_dfs_config dfs_config_merr = {
+ .lowest_freq = ISP_FREQ_200MHZ,
+ .max_freq_at_vmin = ISP_FREQ_400MHZ,
+ .highest_freq = ISP_FREQ_457MHZ,
+ .dfs_table = dfs_rules_merr,
+ .dfs_table_size = ARRAY_SIZE(dfs_rules_merr),
+};
+
+static const struct atomisp_freq_scaling_rule dfs_rules_merr_1179[] = {
+ {
+ .width = ISP_FREQ_RULE_ANY,
+ .height = ISP_FREQ_RULE_ANY,
+ .fps = ISP_FREQ_RULE_ANY,
+ .isp_freq = ISP_FREQ_400MHZ,
+ .run_mode = ATOMISP_RUN_MODE_VIDEO,
+ },
+ {
+ .width = ISP_FREQ_RULE_ANY,
+ .height = ISP_FREQ_RULE_ANY,
+ .fps = ISP_FREQ_RULE_ANY,
+ .isp_freq = ISP_FREQ_400MHZ,
+ .run_mode = ATOMISP_RUN_MODE_STILL_CAPTURE,
+ },
+ {
+ .width = ISP_FREQ_RULE_ANY,
+ .height = ISP_FREQ_RULE_ANY,
+ .fps = ISP_FREQ_RULE_ANY,
+ .isp_freq = ISP_FREQ_400MHZ,
+ .run_mode = ATOMISP_RUN_MODE_PREVIEW,
+ },
+};
+
+static const struct atomisp_dfs_config dfs_config_merr_1179 = {
+ .lowest_freq = ISP_FREQ_200MHZ,
+ .max_freq_at_vmin = ISP_FREQ_400MHZ,
+ .highest_freq = ISP_FREQ_400MHZ,
+ .dfs_table = dfs_rules_merr_1179,
+ .dfs_table_size = ARRAY_SIZE(dfs_rules_merr_1179),
+};
+
+static const struct atomisp_freq_scaling_rule dfs_rules_merr_117a[] = {
+ {
+ .width = 1920,
+ .height = 1080,
+ .fps = 30,
+ .isp_freq = ISP_FREQ_266MHZ,
+ .run_mode = ATOMISP_RUN_MODE_VIDEO,
+ },
+ {
+ .width = 1080,
+ .height = 1920,
+ .fps = 30,
+ .isp_freq = ISP_FREQ_266MHZ,
+ .run_mode = ATOMISP_RUN_MODE_VIDEO,
+ },
+ {
+ .width = 1920,
+ .height = 1080,
+ .fps = 45,
+ .isp_freq = ISP_FREQ_320MHZ,
+ .run_mode = ATOMISP_RUN_MODE_VIDEO,
+ },
+ {
+ .width = 1080,
+ .height = 1920,
+ .fps = 45,
+ .isp_freq = ISP_FREQ_320MHZ,
+ .run_mode = ATOMISP_RUN_MODE_VIDEO,
+ },
+ {
+ .width = ISP_FREQ_RULE_ANY,
+ .height = ISP_FREQ_RULE_ANY,
+ .fps = 60,
+ .isp_freq = ISP_FREQ_356MHZ,
+ .run_mode = ATOMISP_RUN_MODE_VIDEO,
+ },
+ {
+ .width = ISP_FREQ_RULE_ANY,
+ .height = ISP_FREQ_RULE_ANY,
+ .fps = ISP_FREQ_RULE_ANY,
+ .isp_freq = ISP_FREQ_200MHZ,
+ .run_mode = ATOMISP_RUN_MODE_VIDEO,
+ },
+ {
+ .width = ISP_FREQ_RULE_ANY,
+ .height = ISP_FREQ_RULE_ANY,
+ .fps = ISP_FREQ_RULE_ANY,
+ .isp_freq = ISP_FREQ_400MHZ,
+ .run_mode = ATOMISP_RUN_MODE_STILL_CAPTURE,
+ },
+ {
+ .width = ISP_FREQ_RULE_ANY,
+ .height = ISP_FREQ_RULE_ANY,
+ .fps = ISP_FREQ_RULE_ANY,
+ .isp_freq = ISP_FREQ_200MHZ,
+ .run_mode = ATOMISP_RUN_MODE_PREVIEW,
+ },
+};
+
+static struct atomisp_dfs_config dfs_config_merr_117a = {
+ .lowest_freq = ISP_FREQ_200MHZ,
+ .max_freq_at_vmin = ISP_FREQ_200MHZ,
+ .highest_freq = ISP_FREQ_400MHZ,
+ .dfs_table = dfs_rules_merr_117a,
+ .dfs_table_size = ARRAY_SIZE(dfs_rules_merr_117a),
+};
+
+static const struct atomisp_freq_scaling_rule dfs_rules_byt[] = {
+ {
+ .width = ISP_FREQ_RULE_ANY,
+ .height = ISP_FREQ_RULE_ANY,
+ .fps = ISP_FREQ_RULE_ANY,
+ .isp_freq = ISP_FREQ_400MHZ,
+ .run_mode = ATOMISP_RUN_MODE_VIDEO,
+ },
+ {
+ .width = ISP_FREQ_RULE_ANY,
+ .height = ISP_FREQ_RULE_ANY,
+ .fps = ISP_FREQ_RULE_ANY,
+ .isp_freq = ISP_FREQ_400MHZ,
+ .run_mode = ATOMISP_RUN_MODE_STILL_CAPTURE,
+ },
+ {
+ .width = ISP_FREQ_RULE_ANY,
+ .height = ISP_FREQ_RULE_ANY,
+ .fps = ISP_FREQ_RULE_ANY,
+ .isp_freq = ISP_FREQ_400MHZ,
+ .run_mode = ATOMISP_RUN_MODE_PREVIEW,
+ },
+};
+
+static const struct atomisp_dfs_config dfs_config_byt = {
+ .lowest_freq = ISP_FREQ_200MHZ,
+ .max_freq_at_vmin = ISP_FREQ_400MHZ,
+ .highest_freq = ISP_FREQ_400MHZ,
+ .dfs_table = dfs_rules_byt,
+ .dfs_table_size = ARRAY_SIZE(dfs_rules_byt),
+};
+
+static const struct atomisp_freq_scaling_rule dfs_rules_cht[] = {
+ {
+ .width = ISP_FREQ_RULE_ANY,
+ .height = ISP_FREQ_RULE_ANY,
+ .fps = ISP_FREQ_RULE_ANY,
+ .isp_freq = ISP_FREQ_320MHZ,
+ .run_mode = ATOMISP_RUN_MODE_VIDEO,
+ },
+ {
+ .width = ISP_FREQ_RULE_ANY,
+ .height = ISP_FREQ_RULE_ANY,
+ .fps = ISP_FREQ_RULE_ANY,
+ .isp_freq = ISP_FREQ_356MHZ,
+ .run_mode = ATOMISP_RUN_MODE_STILL_CAPTURE,
+ },
+ {
+ .width = ISP_FREQ_RULE_ANY,
+ .height = ISP_FREQ_RULE_ANY,
+ .fps = ISP_FREQ_RULE_ANY,
+ .isp_freq = ISP_FREQ_320MHZ,
+ .run_mode = ATOMISP_RUN_MODE_PREVIEW,
+ },
+};
+
+static const struct atomisp_freq_scaling_rule dfs_rules_cht_soc[] = {
+ {
+ .width = ISP_FREQ_RULE_ANY,
+ .height = ISP_FREQ_RULE_ANY,
+ .fps = ISP_FREQ_RULE_ANY,
+ .isp_freq = ISP_FREQ_356MHZ,
+ .run_mode = ATOMISP_RUN_MODE_VIDEO,
+ },
+ {
+ .width = ISP_FREQ_RULE_ANY,
+ .height = ISP_FREQ_RULE_ANY,
+ .fps = ISP_FREQ_RULE_ANY,
+ .isp_freq = ISP_FREQ_356MHZ,
+ .run_mode = ATOMISP_RUN_MODE_STILL_CAPTURE,
+ },
+ {
+ .width = ISP_FREQ_RULE_ANY,
+ .height = ISP_FREQ_RULE_ANY,
+ .fps = ISP_FREQ_RULE_ANY,
+ .isp_freq = ISP_FREQ_320MHZ,
+ .run_mode = ATOMISP_RUN_MODE_PREVIEW,
+ },
+};
+
+static const struct atomisp_dfs_config dfs_config_cht = {
+ .lowest_freq = ISP_FREQ_100MHZ,
+ .max_freq_at_vmin = ISP_FREQ_356MHZ,
+ .highest_freq = ISP_FREQ_356MHZ,
+ .dfs_table = dfs_rules_cht,
+ .dfs_table_size = ARRAY_SIZE(dfs_rules_cht),
+};
+
+/* This one should be visible also by atomisp_cmd.c */
+const struct atomisp_dfs_config dfs_config_cht_soc = {
+ .lowest_freq = ISP_FREQ_100MHZ,
+ .max_freq_at_vmin = ISP_FREQ_356MHZ,
+ .highest_freq = ISP_FREQ_356MHZ,
+ .dfs_table = dfs_rules_cht_soc,
+ .dfs_table_size = ARRAY_SIZE(dfs_rules_cht_soc),
+};
+
+int atomisp_video_init(struct atomisp_video_pipe *video)
+{
+ int ret;
+
+ video->pad.flags = MEDIA_PAD_FL_SINK;
+ ret = media_entity_pads_init(&video->vdev.entity, 1, &video->pad);
+ if (ret < 0)
+ return ret;
+
+ /* Initialize the video device. */
+ strscpy(video->vdev.name, "ATOMISP video output", sizeof(video->vdev.name));
+ video->vdev.fops = &atomisp_fops;
+ video->vdev.ioctl_ops = &atomisp_ioctl_ops;
+ video->vdev.lock = &video->isp->mutex;
+ video->vdev.release = video_device_release_empty;
+ video_set_drvdata(&video->vdev, video->isp);
+
+ return 0;
+}
+
+void atomisp_video_unregister(struct atomisp_video_pipe *video)
+{
+ if (video_is_registered(&video->vdev)) {
+ media_entity_cleanup(&video->vdev.entity);
+ video_unregister_device(&video->vdev);
+ }
+}
+
+static int atomisp_save_iunit_reg(struct atomisp_device *isp)
+{
+ struct pci_dev *pdev = to_pci_dev(isp->dev);
+
+ dev_dbg(isp->dev, "%s\n", __func__);
+
+ pci_read_config_word(pdev, PCI_COMMAND, &isp->saved_regs.pcicmdsts);
+ /* isp->saved_regs.ispmmadr is set from the atomisp_pci_probe() */
+ pci_read_config_dword(pdev, PCI_MSI_CAPID, &isp->saved_regs.msicap);
+ pci_read_config_dword(pdev, PCI_MSI_ADDR, &isp->saved_regs.msi_addr);
+ pci_read_config_word(pdev, PCI_MSI_DATA, &isp->saved_regs.msi_data);
+ pci_read_config_byte(pdev, PCI_INTERRUPT_LINE, &isp->saved_regs.intr);
+ pci_read_config_dword(pdev, PCI_INTERRUPT_CTRL, &isp->saved_regs.interrupt_control);
+
+ pci_read_config_dword(pdev, MRFLD_PCI_PMCS, &isp->saved_regs.pmcs);
+ /* Ensure read/write combining is enabled. */
+ pci_read_config_dword(pdev, PCI_I_CONTROL, &isp->saved_regs.i_control);
+ isp->saved_regs.i_control |=
+ MRFLD_PCI_I_CONTROL_ENABLE_READ_COMBINING |
+ MRFLD_PCI_I_CONTROL_ENABLE_WRITE_COMBINING;
+ pci_read_config_dword(pdev, MRFLD_PCI_CSI_ACCESS_CTRL_VIOL,
+ &isp->saved_regs.csi_access_viol);
+ pci_read_config_dword(pdev, MRFLD_PCI_CSI_RCOMP_CONTROL,
+ &isp->saved_regs.csi_rcomp_config);
+ /*
+ * Hardware bugs require setting CSI_HS_OVR_CLK_GATE_ON_UPDATE.
+ * ANN/CHV: RCOMP updates do not happen when using CSI2+ path
+ * and sensor sending "continuous clock".
+ * TNG/ANN/CHV: MIPI packets are lost if the HS entry sequence
+ * is missed, and IUNIT can hang.
+ * For both issues, setting this bit is a workaround.
+ */
+ isp->saved_regs.csi_rcomp_config |= MRFLD_PCI_CSI_HS_OVR_CLK_GATE_ON_UPDATE;
+ pci_read_config_dword(pdev, MRFLD_PCI_CSI_AFE_TRIM_CONTROL,
+ &isp->saved_regs.csi_afe_dly);
+ pci_read_config_dword(pdev, MRFLD_PCI_CSI_CONTROL,
+ &isp->saved_regs.csi_control);
+ if (isp->media_dev.hw_revision >=
+ (ATOMISP_HW_REVISION_ISP2401 << ATOMISP_HW_REVISION_SHIFT))
+ isp->saved_regs.csi_control |= MRFLD_PCI_CSI_CONTROL_PARPATHEN;
+ /*
+ * On CHT CSI_READY bit should be enabled before stream on
+ */
+ if (IS_CHT && (isp->media_dev.hw_revision >= ((ATOMISP_HW_REVISION_ISP2401 <<
+ ATOMISP_HW_REVISION_SHIFT) | ATOMISP_HW_STEPPING_B0)))
+ isp->saved_regs.csi_control |= MRFLD_PCI_CSI_CONTROL_CSI_READY;
+ pci_read_config_dword(pdev, MRFLD_PCI_CSI_AFE_RCOMP_CONTROL,
+ &isp->saved_regs.csi_afe_rcomp_config);
+ pci_read_config_dword(pdev, MRFLD_PCI_CSI_AFE_HS_CONTROL,
+ &isp->saved_regs.csi_afe_hs_control);
+ pci_read_config_dword(pdev, MRFLD_PCI_CSI_DEADLINE_CONTROL,
+ &isp->saved_regs.csi_deadline_control);
+ return 0;
+}
+
+static int atomisp_restore_iunit_reg(struct atomisp_device *isp)
+{
+ struct pci_dev *pdev = to_pci_dev(isp->dev);
+
+ dev_dbg(isp->dev, "%s\n", __func__);
+
+ pci_write_config_word(pdev, PCI_COMMAND, isp->saved_regs.pcicmdsts);
+ pci_write_config_dword(pdev, PCI_BASE_ADDRESS_0, isp->saved_regs.ispmmadr);
+ pci_write_config_dword(pdev, PCI_MSI_CAPID, isp->saved_regs.msicap);
+ pci_write_config_dword(pdev, PCI_MSI_ADDR, isp->saved_regs.msi_addr);
+ pci_write_config_word(pdev, PCI_MSI_DATA, isp->saved_regs.msi_data);
+ pci_write_config_byte(pdev, PCI_INTERRUPT_LINE, isp->saved_regs.intr);
+ pci_write_config_dword(pdev, PCI_INTERRUPT_CTRL, isp->saved_regs.interrupt_control);
+ pci_write_config_dword(pdev, PCI_I_CONTROL, isp->saved_regs.i_control);
+
+ pci_write_config_dword(pdev, MRFLD_PCI_PMCS, isp->saved_regs.pmcs);
+ pci_write_config_dword(pdev, MRFLD_PCI_CSI_ACCESS_CTRL_VIOL,
+ isp->saved_regs.csi_access_viol);
+ pci_write_config_dword(pdev, MRFLD_PCI_CSI_RCOMP_CONTROL,
+ isp->saved_regs.csi_rcomp_config);
+ pci_write_config_dword(pdev, MRFLD_PCI_CSI_AFE_TRIM_CONTROL,
+ isp->saved_regs.csi_afe_dly);
+ pci_write_config_dword(pdev, MRFLD_PCI_CSI_CONTROL,
+ isp->saved_regs.csi_control);
+ pci_write_config_dword(pdev, MRFLD_PCI_CSI_AFE_RCOMP_CONTROL,
+ isp->saved_regs.csi_afe_rcomp_config);
+ pci_write_config_dword(pdev, MRFLD_PCI_CSI_AFE_HS_CONTROL,
+ isp->saved_regs.csi_afe_hs_control);
+ pci_write_config_dword(pdev, MRFLD_PCI_CSI_DEADLINE_CONTROL,
+ isp->saved_regs.csi_deadline_control);
+
+ /*
+ * for MRFLD, Software/firmware needs to write a 1 to bit0
+ * of the register at CSI_RECEIVER_SELECTION_REG to enable
+ * SH CSI backend write 0 will enable Arasan CSI backend,
+ * which has bugs(like sighting:4567697 and 4567699) and
+ * will be removed in B0
+ */
+ atomisp_css2_hw_store_32(MRFLD_CSI_RECEIVER_SELECTION_REG, 1);
+ return 0;
+}
+
+static int atomisp_mrfld_pre_power_down(struct atomisp_device *isp)
+{
+ struct pci_dev *pdev = to_pci_dev(isp->dev);
+ u32 irq;
+ unsigned long flags;
+
+ spin_lock_irqsave(&isp->lock, flags);
+
+ /*
+ * MRFLD HAS requirement: cannot power off i-unit if
+ * ISP has IRQ not serviced.
+ * So, here we need to check if there is any pending
+ * IRQ, if so, waiting for it to be served
+ */
+ pci_read_config_dword(pdev, PCI_INTERRUPT_CTRL, &irq);
+ irq &= BIT(INTR_IIR);
+ pci_write_config_dword(pdev, PCI_INTERRUPT_CTRL, irq);
+
+ pci_read_config_dword(pdev, PCI_INTERRUPT_CTRL, &irq);
+ if (!(irq & BIT(INTR_IIR)))
+ goto done;
+
+ atomisp_css2_hw_store_32(MRFLD_INTR_CLEAR_REG, 0xFFFFFFFF);
+ atomisp_load_uint32(MRFLD_INTR_STATUS_REG, &irq);
+ if (irq != 0) {
+ dev_err(isp->dev,
+ "%s: fail to clear isp interrupt status reg=0x%x\n",
+ __func__, irq);
+ spin_unlock_irqrestore(&isp->lock, flags);
+ return -EAGAIN;
+ } else {
+ pci_read_config_dword(pdev, PCI_INTERRUPT_CTRL, &irq);
+ irq &= BIT(INTR_IIR);
+ pci_write_config_dword(pdev, PCI_INTERRUPT_CTRL, irq);
+
+ pci_read_config_dword(pdev, PCI_INTERRUPT_CTRL, &irq);
+ if (!(irq & BIT(INTR_IIR))) {
+ atomisp_css2_hw_store_32(MRFLD_INTR_ENABLE_REG, 0x0);
+ goto done;
+ }
+ dev_err(isp->dev,
+ "%s: error in iunit interrupt. status reg=0x%x\n",
+ __func__, irq);
+ spin_unlock_irqrestore(&isp->lock, flags);
+ return -EAGAIN;
+ }
+done:
+ /*
+ * MRFLD WORKAROUND:
+ * before powering off IUNIT, clear the pending interrupts
+ * and disable the interrupt. driver should avoid writing 0
+ * to IIR. It could block subsequent interrupt messages.
+ * HW sighting:4568410.
+ */
+ pci_read_config_dword(pdev, PCI_INTERRUPT_CTRL, &irq);
+ irq &= ~BIT(INTR_IER);
+ pci_write_config_dword(pdev, PCI_INTERRUPT_CTRL, irq);
+
+ atomisp_msi_irq_uninit(isp);
+ atomisp_freq_scaling(isp, ATOMISP_DFS_MODE_LOW, true);
+ spin_unlock_irqrestore(&isp->lock, flags);
+
+ return 0;
+}
+
+/*
+ * WA for DDR DVFS enable/disable
+ * By default, ISP will force DDR DVFS 1600MHz before disable DVFS
+ */
+static void punit_ddr_dvfs_enable(bool enable)
+{
+ int reg;
+
+ iosf_mbi_read(BT_MBI_UNIT_PMC, MBI_REG_READ, MRFLD_ISPSSDVFS, &reg);
+ if (enable) {
+ reg &= ~(MRFLD_BIT0 | MRFLD_BIT1);
+ } else {
+ reg |= MRFLD_BIT1;
+ reg &= ~(MRFLD_BIT0);
+ }
+ iosf_mbi_write(BT_MBI_UNIT_PMC, MBI_REG_WRITE, MRFLD_ISPSSDVFS, reg);
+}
+
+static int atomisp_mrfld_power(struct atomisp_device *isp, bool enable)
+{
+ struct pci_dev *pdev = to_pci_dev(isp->dev);
+ unsigned long timeout;
+ u32 val = enable ? MRFLD_ISPSSPM0_IUNIT_POWER_ON :
+ MRFLD_ISPSSPM0_IUNIT_POWER_OFF;
+
+ dev_dbg(isp->dev, "IUNIT power-%s.\n", enable ? "on" : "off");
+
+ /* WA for P-Unit, if DVFS enabled, ISP timeout observed */
+ if (IS_CHT && enable && !isp->pm_only) {
+ punit_ddr_dvfs_enable(false);
+ msleep(20);
+ }
+
+ /* Write to ISPSSPM0 bit[1:0] to power on/off the IUNIT */
+ iosf_mbi_modify(BT_MBI_UNIT_PMC, MBI_REG_READ, MRFLD_ISPSSPM0,
+ val, MRFLD_ISPSSPM0_ISPSSC_MASK);
+
+ /* WA:Enable DVFS */
+ if (IS_CHT && !enable && !isp->pm_only)
+ punit_ddr_dvfs_enable(true);
+
+ /*
+ * There should be no IUNIT access while power-down is
+ * in progress. HW sighting: 4567865.
+ * Wait up to 50 ms for the IUNIT to shut down.
+ * And we do the same for power on.
+ */
+ timeout = jiffies + msecs_to_jiffies(50);
+ do {
+ u32 tmp;
+
+ /* Wait until ISPSSPM0 bit[25:24] shows the right value */
+ iosf_mbi_read(BT_MBI_UNIT_PMC, MBI_REG_READ, MRFLD_ISPSSPM0, &tmp);
+ tmp = (tmp >> MRFLD_ISPSSPM0_ISPSSS_OFFSET) & MRFLD_ISPSSPM0_ISPSSC_MASK;
+ if (tmp == val) {
+ trace_ipu_cstate(enable);
+ pdev->current_state = enable ? PCI_D0 : PCI_D3cold;
+ return 0;
+ }
+
+ if (time_after(jiffies, timeout))
+ break;
+
+ /* FIXME: experienced value for delay */
+ usleep_range(100, 150);
+ } while (1);
+
+ dev_err(isp->dev, "IUNIT power-%s timeout.\n", enable ? "on" : "off");
+ return -EBUSY;
+}
+
+int atomisp_power_off(struct device *dev)
+{
+ struct atomisp_device *isp = dev_get_drvdata(dev);
+ struct pci_dev *pdev = to_pci_dev(dev);
+ int ret;
+ u32 reg;
+
+ if (isp->pm_only) {
+ pci_write_config_dword(pdev, PCI_INTERRUPT_CTRL, 0);
+ } else {
+ atomisp_css_uninit(isp);
+
+ ret = atomisp_mrfld_pre_power_down(isp);
+ if (ret)
+ return ret;
+ }
+
+ /*
+ * MRFLD IUNIT DPHY is located in an always-power-on island
+ * MRFLD HW design need all CSI ports are disabled before
+ * powering down the IUNIT.
+ */
+ pci_read_config_dword(pdev, MRFLD_PCI_CSI_CONTROL, &reg);
+ reg |= MRFLD_ALL_CSI_PORTS_OFF_MASK;
+ pci_write_config_dword(pdev, MRFLD_PCI_CSI_CONTROL, reg);
+
+ cpu_latency_qos_update_request(&isp->pm_qos, PM_QOS_DEFAULT_VALUE);
+ pci_save_state(pdev);
+ return atomisp_mrfld_power(isp, false);
+}
+
+int atomisp_power_on(struct device *dev)
+{
+ struct atomisp_device *isp = (struct atomisp_device *)
+ dev_get_drvdata(dev);
+ int ret;
+
+ ret = atomisp_mrfld_power(isp, true);
+ if (ret)
+ return ret;
+
+ pci_restore_state(to_pci_dev(dev));
+ cpu_latency_qos_update_request(&isp->pm_qos, isp->max_isr_latency);
+
+ if (isp->pm_only)
+ return 0;
+
+ /*restore register values for iUnit and iUnitPHY registers*/
+ if (isp->saved_regs.pcicmdsts)
+ atomisp_restore_iunit_reg(isp);
+
+ atomisp_freq_scaling(isp, ATOMISP_DFS_MODE_LOW, true);
+
+ return atomisp_css_init(isp);
+}
+
+static int atomisp_suspend(struct device *dev)
+{
+ struct atomisp_device *isp = (struct atomisp_device *)
+ dev_get_drvdata(dev);
+ unsigned long flags;
+
+ /* FIXME: Suspend is not supported by sensors. Abort if streaming. */
+ spin_lock_irqsave(&isp->lock, flags);
+ if (isp->asd.streaming) {
+ spin_unlock_irqrestore(&isp->lock, flags);
+ dev_err(isp->dev, "atomisp cannot suspend at this time.\n");
+ return -EINVAL;
+ }
+ spin_unlock_irqrestore(&isp->lock, flags);
+
+ pm_runtime_resume(dev);
+
+ isp->asd.recreate_streams_on_resume = isp->asd.stream_prepared;
+ atomisp_destroy_pipes_stream(&isp->asd);
+
+ return atomisp_power_off(dev);
+}
+
+static int atomisp_resume(struct device *dev)
+{
+ struct atomisp_device *isp = dev_get_drvdata(dev);
+ int ret;
+
+ ret = atomisp_power_on(dev);
+ if (ret)
+ return ret;
+
+ if (isp->asd.recreate_streams_on_resume)
+ ret = atomisp_create_pipes_stream(&isp->asd);
+
+ return ret;
+}
+
+int atomisp_csi_lane_config(struct atomisp_device *isp)
+{
+ struct pci_dev *pdev = to_pci_dev(isp->dev);
+ static const struct {
+ u8 code;
+ u8 lanes[N_MIPI_PORT_ID];
+ } portconfigs[] = {
+ /* Tangier/Merrifield available lane configurations */
+ { 0x00, { 4, 1, 0 } }, /* 00000 */
+ { 0x01, { 3, 1, 0 } }, /* 00001 */
+ { 0x02, { 2, 1, 0 } }, /* 00010 */
+ { 0x03, { 1, 1, 0 } }, /* 00011 */
+ { 0x04, { 2, 1, 2 } }, /* 00100 */
+ { 0x08, { 3, 1, 1 } }, /* 01000 */
+ { 0x09, { 2, 1, 1 } }, /* 01001 */
+ { 0x0a, { 1, 1, 1 } }, /* 01010 */
+
+ /* Anniedale/Moorefield only configurations */
+ { 0x10, { 4, 2, 0 } }, /* 10000 */
+ { 0x11, { 3, 2, 0 } }, /* 10001 */
+ { 0x12, { 2, 2, 0 } }, /* 10010 */
+ { 0x13, { 1, 2, 0 } }, /* 10011 */
+ { 0x14, { 2, 2, 2 } }, /* 10100 */
+ { 0x18, { 3, 2, 1 } }, /* 11000 */
+ { 0x19, { 2, 2, 1 } }, /* 11001 */
+ { 0x1a, { 1, 2, 1 } }, /* 11010 */
+ };
+
+ unsigned int i, j;
+ u32 csi_control;
+ int nportconfigs;
+ u32 port_config_mask;
+ int port3_lanes_shift;
+
+ if (isp->media_dev.hw_revision <
+ ATOMISP_HW_REVISION_ISP2401_LEGACY <<
+ ATOMISP_HW_REVISION_SHIFT) {
+ /* Merrifield */
+ port_config_mask = MRFLD_PORT_CONFIG_MASK;
+ port3_lanes_shift = MRFLD_PORT3_LANES_SHIFT;
+ } else {
+ /* Moorefield / Cherryview */
+ port_config_mask = CHV_PORT_CONFIG_MASK;
+ port3_lanes_shift = CHV_PORT3_LANES_SHIFT;
+ }
+
+ if (isp->media_dev.hw_revision <
+ ATOMISP_HW_REVISION_ISP2401 <<
+ ATOMISP_HW_REVISION_SHIFT) {
+ /* Merrifield / Moorefield legacy input system */
+ nportconfigs = MRFLD_PORT_CONFIG_NUM;
+ } else {
+ /* Moorefield / Cherryview new input system */
+ nportconfigs = ARRAY_SIZE(portconfigs);
+ }
+
+ for (i = 0; i < nportconfigs; i++) {
+ for (j = 0; j < N_MIPI_PORT_ID; j++)
+ if (isp->sensor_lanes[j] &&
+ isp->sensor_lanes[j] != portconfigs[i].lanes[j])
+ break;
+
+ if (j == N_MIPI_PORT_ID)
+ break; /* Found matching setting */
+ }
+
+ if (i >= nportconfigs) {
+ dev_err(isp->dev,
+ "%s: could not find the CSI port setting for %d-%d-%d\n",
+ __func__,
+ isp->sensor_lanes[0], isp->sensor_lanes[1], isp->sensor_lanes[2]);
+ return -EINVAL;
+ }
+
+ pci_read_config_dword(pdev, MRFLD_PCI_CSI_CONTROL, &csi_control);
+ csi_control &= ~port_config_mask;
+ csi_control |= (portconfigs[i].code << MRFLD_PORT_CONFIGCODE_SHIFT)
+ | (portconfigs[i].lanes[0] ? 0 : (1 << MRFLD_PORT1_ENABLE_SHIFT))
+ | (portconfigs[i].lanes[1] ? 0 : (1 << MRFLD_PORT2_ENABLE_SHIFT))
+ | (portconfigs[i].lanes[2] ? 0 : (1 << MRFLD_PORT3_ENABLE_SHIFT))
+ | (((1 << portconfigs[i].lanes[0]) - 1) << MRFLD_PORT1_LANES_SHIFT)
+ | (((1 << portconfigs[i].lanes[1]) - 1) << MRFLD_PORT2_LANES_SHIFT)
+ | (((1 << portconfigs[i].lanes[2]) - 1) << port3_lanes_shift);
+
+ pci_write_config_dword(pdev, MRFLD_PCI_CSI_CONTROL, csi_control);
+
+ dev_dbg(isp->dev,
+ "%s: the portconfig is %d-%d-%d, CSI_CONTROL is 0x%08X\n",
+ __func__, portconfigs[i].lanes[0], portconfigs[i].lanes[1],
+ portconfigs[i].lanes[2], csi_control);
+
+ return 0;
+}
+
+static int atomisp_subdev_probe(struct atomisp_device *isp)
+{
+ const struct intel_v4l2_subdev_table *subdevs;
+ int ret, mipi_port;
+
+ ret = atomisp_csi2_bridge_parse_firmware(isp);
+ if (ret)
+ return ret;
+
+ /*
+ * TODO: this is left here for now to allow testing atomisp-sensor
+ * drivers which are still using the atomisp_gmin_platform infra before
+ * converting them to standard v4l2 sensor drivers using runtime-pm +
+ * ACPI for pm and v4l2_async_register_subdev_sensor() registration.
+ */
+ for (subdevs = atomisp_platform_get_subdevs(); subdevs->subdev; subdevs++) {
+ ret = v4l2_device_register_subdev(&isp->v4l2_dev, subdevs->subdev);
+ if (ret)
+ continue;
+
+ if (subdevs->port >= ATOMISP_CAMERA_NR_PORTS) {
+ dev_err(isp->dev, "port %d not supported\n", subdevs->port);
+ continue;
+ }
+
+ if (isp->sensor_subdevs[subdevs->port]) {
+ dev_err(isp->dev, "port %d already has a sensor attached\n",
+ subdevs->port);
+ continue;
+ }
+
+ mipi_port = atomisp_port_to_mipi_port(isp, subdevs->port);
+ isp->sensor_lanes[mipi_port] = subdevs->lanes;
+ isp->sensor_subdevs[subdevs->port] = subdevs->subdev;
+ }
+
+ return atomisp_csi_lane_config(isp);
+}
+
+static void atomisp_unregister_entities(struct atomisp_device *isp)
+{
+ unsigned int i;
+ struct v4l2_subdev *sd, *next;
+
+ atomisp_subdev_unregister_entities(&isp->asd);
+ for (i = 0; i < ATOMISP_CAMERA_NR_PORTS; i++)
+ atomisp_mipi_csi2_unregister_entities(&isp->csi2_port[i]);
+
+ list_for_each_entry_safe(sd, next, &isp->v4l2_dev.subdevs, list)
+ v4l2_device_unregister_subdev(sd);
+
+ v4l2_device_unregister(&isp->v4l2_dev);
+ media_device_unregister(&isp->media_dev);
+ media_device_cleanup(&isp->media_dev);
+
+ for (i = 0; i < isp->input_cnt; i++)
+ __v4l2_subdev_state_free(isp->inputs[i].try_sd_state);
+}
+
+static int atomisp_register_entities(struct atomisp_device *isp)
+{
+ int ret = 0;
+ unsigned int i;
+
+ isp->media_dev.dev = isp->dev;
+
+ strscpy(isp->media_dev.model, "Intel Atom ISP",
+ sizeof(isp->media_dev.model));
+
+ media_device_init(&isp->media_dev);
+ isp->v4l2_dev.mdev = &isp->media_dev;
+ ret = v4l2_device_register(isp->dev, &isp->v4l2_dev);
+ if (ret < 0) {
+ dev_err(isp->dev, "%s: V4L2 device registration failed (%d)\n",
+ __func__, ret);
+ goto v4l2_device_failed;
+ }
+
+ ret = atomisp_subdev_probe(isp);
+ if (ret < 0)
+ goto csi_and_subdev_probe_failed;
+
+ /* Register internal entities */
+ for (i = 0; i < ATOMISP_CAMERA_NR_PORTS; i++) {
+ ret = atomisp_mipi_csi2_register_entities(&isp->csi2_port[i],
+ &isp->v4l2_dev);
+ if (ret == 0)
+ continue;
+
+ /* error case */
+ dev_err(isp->dev, "failed to register the CSI port: %d\n", i);
+ /* deregister all registered CSI ports */
+ while (i--)
+ atomisp_mipi_csi2_unregister_entities(
+ &isp->csi2_port[i]);
+
+ goto csi_and_subdev_probe_failed;
+ }
+
+ ret = atomisp_subdev_register_subdev(&isp->asd, &isp->v4l2_dev);
+ if (ret < 0) {
+ dev_err(isp->dev, "atomisp_subdev_register_subdev fail\n");
+ goto subdev_register_failed;
+ }
+
+ return 0;
+
+subdev_register_failed:
+ for (i = 0; i < ATOMISP_CAMERA_NR_PORTS; i++)
+ atomisp_mipi_csi2_unregister_entities(&isp->csi2_port[i]);
+csi_and_subdev_probe_failed:
+ v4l2_device_unregister(&isp->v4l2_dev);
+v4l2_device_failed:
+ media_device_unregister(&isp->media_dev);
+ media_device_cleanup(&isp->media_dev);
+ return ret;
+}
+
+static void atomisp_init_sensor(struct atomisp_input_subdev *input)
+{
+ static struct lock_class_key try_sd_state_key;
+ struct v4l2_subdev_mbus_code_enum mbus_code_enum = { };
+ struct v4l2_subdev_frame_size_enum fse = { };
+ struct v4l2_subdev_selection sel = { };
+ struct v4l2_subdev_state *try_sd_state, *act_sd_state;
+ int i, err;
+
+ /*
+ * FIXME: Drivers are not supposed to use __v4l2_subdev_state_alloc()
+ * but atomisp needs this for try_fmt on its /dev/video# node since
+ * it emulates a normal v4l2 device there, passing through try_fmt /
+ * set_fmt to the sensor.
+ */
+ try_sd_state = __v4l2_subdev_state_alloc(input->sensor,
+ "atomisp:try_sd_state->lock",
+ &try_sd_state_key);
+ if (IS_ERR(try_sd_state))
+ return;
+
+ input->try_sd_state = try_sd_state;
+
+ act_sd_state = v4l2_subdev_lock_and_get_active_state(input->sensor);
+
+ mbus_code_enum.which = V4L2_SUBDEV_FORMAT_ACTIVE;
+ err = v4l2_subdev_call(input->sensor, pad, enum_mbus_code,
+ act_sd_state, &mbus_code_enum);
+ if (!err)
+ input->code = mbus_code_enum.code;
+
+ sel.which = V4L2_SUBDEV_FORMAT_ACTIVE;
+ sel.target = V4L2_SEL_TGT_NATIVE_SIZE;
+ err = v4l2_subdev_call(input->sensor, pad, get_selection,
+ act_sd_state, &sel);
+ if (err)
+ goto unlock_act_sd_state;
+
+ input->native_rect = sel.r;
+
+ sel.which = V4L2_SUBDEV_FORMAT_ACTIVE;
+ sel.target = V4L2_SEL_TGT_CROP_DEFAULT;
+ err = v4l2_subdev_call(input->sensor, pad, get_selection,
+ act_sd_state, &sel);
+ if (err)
+ goto unlock_act_sd_state;
+
+ input->active_rect = sel.r;
+
+ /*
+ * Check for a framesize with half active_rect width and height,
+ * if found assume the sensor supports binning.
+ * Do this before changing the crop-rect since that may influence
+ * enum_frame_size results.
+ */
+ for (i = 0; ; i++) {
+ fse.index = i;
+ fse.code = input->code;
+ fse.which = V4L2_SUBDEV_FORMAT_ACTIVE;
+
+ err = v4l2_subdev_call(input->sensor, pad, enum_frame_size,
+ act_sd_state, &fse);
+ if (err)
+ break;
+
+ if (fse.min_width <= (input->active_rect.width / 2) &&
+ fse.min_height <= (input->active_rect.height / 2)) {
+ input->binning_support = true;
+ break;
+ }
+ }
+
+ /*
+ * The ISP also wants the non-active pixels at the border of the sensor
+ * for padding, set the crop rect to cover the entire sensor instead
+ * of only the default active area.
+ *
+ * Do this for both try and active formats since the crop rect in
+ * try_sd_state may influence (clamp size) in calls with which == try.
+ */
+ sel.which = V4L2_SUBDEV_FORMAT_TRY;
+ sel.target = V4L2_SEL_TGT_CROP;
+ sel.r = input->native_rect;
+
+ /* Don't lock try_sd_state if the lock is shared with the active state */
+ if (!input->sensor->state_lock)
+ v4l2_subdev_lock_state(input->try_sd_state);
+
+ err = v4l2_subdev_call(input->sensor, pad, set_selection,
+ input->try_sd_state, &sel);
+
+ if (!input->sensor->state_lock)
+ v4l2_subdev_unlock_state(input->try_sd_state);
+
+ if (err)
+ goto unlock_act_sd_state;
+
+ sel.which = V4L2_SUBDEV_FORMAT_ACTIVE;
+ sel.target = V4L2_SEL_TGT_CROP;
+ sel.r = input->native_rect;
+ err = v4l2_subdev_call(input->sensor, pad, set_selection,
+ act_sd_state, &sel);
+ if (err)
+ goto unlock_act_sd_state;
+
+ dev_info(input->sensor->dev, "Supports crop native %dx%d active %dx%d binning %d\n",
+ input->native_rect.width, input->native_rect.height,
+ input->active_rect.width, input->active_rect.height,
+ input->binning_support);
+
+ input->crop_support = true;
+
+unlock_act_sd_state:
+ if (act_sd_state)
+ v4l2_subdev_unlock_state(act_sd_state);
+}
+
+int atomisp_register_device_nodes(struct atomisp_device *isp)
+{
+ struct media_pad *sensor_isp_sink, *sensor_src;
+ struct atomisp_input_subdev *input;
+ int i, err, source_pad;
+
+ for (i = 0; i < ATOMISP_CAMERA_NR_PORTS; i++) {
+ err = media_create_pad_link(&isp->csi2_port[i].subdev.entity,
+ CSI2_PAD_SOURCE, &isp->asd.subdev.entity,
+ ATOMISP_SUBDEV_PAD_SINK, 0);
+ if (err)
+ return err;
+
+ if (!isp->sensor_subdevs[i])
+ continue;
+
+ input = &isp->inputs[isp->input_cnt];
+
+ input->port = i;
+ input->csi_port = &isp->csi2_port[i].subdev;
+ input->csi_remote_source = isp->sensor_subdevs[i];
+
+ /*
+ * Special case for sensors with a ISP in the sensor modelled
+ * as a separate v4l2-subdev, like the mt9m114.
+ */
+ if (isp->sensor_subdevs[i]->entity.function == MEDIA_ENT_F_PROC_VIDEO_ISP) {
+ input->sensor_isp = isp->sensor_subdevs[i];
+ source_pad = SENSOR_ISP_PAD_SOURCE;
+
+ sensor_isp_sink = &input->sensor_isp->entity.pads[SENSOR_ISP_PAD_SINK];
+ sensor_src = media_pad_remote_pad_first(sensor_isp_sink);
+ if (!sensor_src) {
+ dev_err(isp->dev, "Error could not find remote pad for sensor ISP sink\n");
+ return -ENOENT;
+ }
+
+ input->sensor = media_entity_to_v4l2_subdev(sensor_src->entity);
+ } else {
+ input->sensor = isp->sensor_subdevs[i];
+ source_pad = 0;
+ }
+
+ atomisp_init_sensor(input);
+
+ err = media_create_pad_link(&isp->sensor_subdevs[i]->entity, source_pad,
+ &isp->csi2_port[i].subdev.entity,
+ CSI2_PAD_SINK,
+ MEDIA_LNK_FL_ENABLED | MEDIA_LNK_FL_IMMUTABLE);
+ if (err)
+ return err;
+
+ isp->input_cnt++;
+ }
+
+ if (!isp->input_cnt)
+ dev_warn(isp->dev, "no camera attached or fail to detect\n");
+ else
+ dev_info(isp->dev, "detected %d camera sensors\n", isp->input_cnt);
+
+ mutex_lock(&isp->media_dev.graph_mutex);
+ atomisp_setup_input_links(isp);
+ mutex_unlock(&isp->media_dev.graph_mutex);
+
+ isp->asd.video_out.vdev.v4l2_dev = &isp->v4l2_dev;
+ isp->asd.video_out.vdev.device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
+ err = video_register_device(&isp->asd.video_out.vdev, VFL_TYPE_VIDEO, -1);
+ if (err)
+ return err;
+
+ err = media_create_pad_link(&isp->asd.subdev.entity, ATOMISP_SUBDEV_PAD_SOURCE,
+ &isp->asd.video_out.vdev.entity, 0,
+ MEDIA_LNK_FL_ENABLED | MEDIA_LNK_FL_IMMUTABLE);
+ if (err)
+ return err;
+
+ err = v4l2_device_register_subdev_nodes(&isp->v4l2_dev);
+ if (err)
+ return err;
+
+ return media_device_register(&isp->media_dev);
+}
+
+static int atomisp_initialize_modules(struct atomisp_device *isp)
+{
+ int ret;
+
+ ret = atomisp_mipi_csi2_init(isp);
+ if (ret < 0) {
+ dev_err(isp->dev, "mipi csi2 initialization failed\n");
+ goto error_mipi_csi2;
+ }
+
+ ret = atomisp_subdev_init(isp);
+ if (ret < 0) {
+ dev_err(isp->dev, "ISP subdev initialization failed\n");
+ goto error_isp_subdev;
+ }
+
+ return 0;
+
+error_isp_subdev:
+error_mipi_csi2:
+ atomisp_mipi_csi2_cleanup(isp);
+ return ret;
+}
+
+static void atomisp_uninitialize_modules(struct atomisp_device *isp)
+{
+ atomisp_mipi_csi2_cleanup(isp);
+}
+
+const struct firmware *
+atomisp_load_firmware(struct atomisp_device *isp)
+{
+ const struct firmware *fw;
+ int rc;
+ char *fw_path = NULL;
+
+ if ((isp->media_dev.hw_revision >> ATOMISP_HW_REVISION_SHIFT) ==
+ ATOMISP_HW_REVISION_ISP2401)
+ fw_path = "intel/ipu/shisp_2401a0_v21.bin";
+
+ if (isp->media_dev.hw_revision ==
+ ((ATOMISP_HW_REVISION_ISP2401_LEGACY << ATOMISP_HW_REVISION_SHIFT) |
+ ATOMISP_HW_STEPPING_A0))
+ fw_path = "intel/ipu/shisp_2401a0_legacy_v21.bin";
+
+ if (isp->media_dev.hw_revision ==
+ ((ATOMISP_HW_REVISION_ISP2400 << ATOMISP_HW_REVISION_SHIFT) |
+ ATOMISP_HW_STEPPING_B0))
+ fw_path = "intel/ipu/shisp_2400b0_v21.bin";
+
+ if (!fw_path) {
+ dev_err(isp->dev, "Unsupported hw_revision 0x%x\n",
+ isp->media_dev.hw_revision);
+ return NULL;
+ }
+
+ rc = request_firmware(&fw, fw_path, isp->dev);
+ /* Fallback to old fw_path without "intel/ipu/" prefix */
+ if (rc)
+ rc = request_firmware(&fw, kbasename(fw_path), isp->dev);
+ if (rc) {
+ dev_err(isp->dev,
+ "atomisp: Error %d while requesting firmware %s\n",
+ rc, fw_path);
+ return NULL;
+ }
+
+ return fw;
+}
+
+static void atomisp_pm_init(struct atomisp_device *isp)
+{
+ /*
+ * The atomisp does not use standard PCI power-management through the
+ * PCI config space. Instead this driver directly tells the P-Unit to
+ * disable the ISP over the IOSF. The standard PCI subsystem pm_ops will
+ * try to access the config space before (resume) / after (suspend) this
+ * driver has turned the ISP on / off, resulting in the following errors:
+ *
+ * "Unable to change power state from D0 to D3hot, device inaccessible"
+ * "Unable to change power state from D3cold to D0, device inaccessible"
+ *
+ * To avoid these errors override the pm_domain so that all the PCI
+ * subsys suspend / resume handling is skipped.
+ */
+ isp->pm_domain.ops.runtime_suspend = atomisp_power_off;
+ isp->pm_domain.ops.runtime_resume = atomisp_power_on;
+ isp->pm_domain.ops.suspend = atomisp_suspend;
+ isp->pm_domain.ops.resume = atomisp_resume;
+
+ cpu_latency_qos_add_request(&isp->pm_qos, PM_QOS_DEFAULT_VALUE);
+ dev_pm_domain_set(isp->dev, &isp->pm_domain);
+
+ pm_runtime_allow(isp->dev);
+ pm_runtime_put_sync_suspend(isp->dev);
+}
+
+static void atomisp_pm_uninit(struct atomisp_device *isp)
+{
+ pm_runtime_get_sync(isp->dev);
+ pm_runtime_forbid(isp->dev);
+ dev_pm_domain_set(isp->dev, NULL);
+ cpu_latency_qos_remove_request(&isp->pm_qos);
+}
+
+#define ATOM_ISP_PCI_BAR 0
+
+static int atomisp_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct atomisp_device *isp;
+ unsigned int start;
+ u32 val;
+ int err;
+
+ /* Pointer to struct device. */
+ atomisp_dev = &pdev->dev;
+
+ start = pci_resource_start(pdev, ATOM_ISP_PCI_BAR);
+ dev_dbg(&pdev->dev, "start: 0x%x\n", start);
+
+ isp = devm_kzalloc(&pdev->dev, sizeof(*isp), GFP_KERNEL);
+ if (!isp)
+ return -ENOMEM;
+
+ isp->dev = &pdev->dev;
+ isp->saved_regs.ispmmadr = start;
+ isp->asd.isp = isp;
+
+ mutex_init(&isp->mutex);
+ spin_lock_init(&isp->lock);
+
+ /* This is not a true PCI device on SoC, so the delay is not needed. */
+ pdev->d3hot_delay = 0;
+
+ pci_set_drvdata(pdev, isp);
+
+ switch (id->device) {
+ case ATOMISP_PCI_DEVICE_SOC_MRFLD:
+ case ATOMISP_PCI_DEVICE_SOC_MRFLD_1179:
+ case ATOMISP_PCI_DEVICE_SOC_MRFLD_117A:
+ isp->media_dev.hw_revision =
+ (ATOMISP_HW_REVISION_ISP2400
+ << ATOMISP_HW_REVISION_SHIFT) |
+ ATOMISP_HW_STEPPING_B0;
+
+ switch (id->device) {
+ case ATOMISP_PCI_DEVICE_SOC_MRFLD_1179:
+ isp->dfs = &dfs_config_merr_1179;
+ break;
+ case ATOMISP_PCI_DEVICE_SOC_MRFLD_117A:
+ isp->dfs = &dfs_config_merr_117a;
+
+ break;
+ default:
+ isp->dfs = &dfs_config_merr;
+ break;
+ }
+ isp->hpll_freq = HPLL_FREQ_1600MHZ;
+ break;
+ case ATOMISP_PCI_DEVICE_SOC_BYT:
+ isp->media_dev.hw_revision =
+ (ATOMISP_HW_REVISION_ISP2400
+ << ATOMISP_HW_REVISION_SHIFT) |
+ ATOMISP_HW_STEPPING_B0;
+
+ /*
+ * Note: some Intel-based tablets with Android use a different
+ * DFS table. Based on the comments at the Yocto Aero meta
+ * version of this driver (at the ssid.h header), they're
+ * identified via a "spid" var:
+ *
+ * androidboot.spid=vend:cust:manu:plat:prod:hard
+ *
+ * As we don't have this upstream, nor we know enough details
+ * to use a DMI or PCI match table, the old code was just
+ * removed, but let's keep a note here as a reminder that,
+ * for certain devices, we may need to limit the max DFS
+ * frequency to be below certain values, adjusting the
+ * resolution accordingly.
+ */
+ isp->dfs = &dfs_config_byt;
+
+ /*
+ * HPLL frequency is known to be device-specific, but we don't
+ * have specs yet for exactly how it varies. Default to
+ * BYT-CR but let provisioning set it via EFI variable
+ */
+ isp->hpll_freq = gmin_get_var_int(&pdev->dev, false, "HpllFreq", HPLL_FREQ_2000MHZ);
+
+ /*
+ * for BYT/CHT we are put isp into D3cold to avoid pci registers access
+ * in power off. Set d3cold_delay to 0 since default 100ms is not
+ * necessary.
+ */
+ pdev->d3cold_delay = 0;
+ break;
+ case ATOMISP_PCI_DEVICE_SOC_ANN:
+ isp->media_dev.hw_revision = (ATOMISP_HW_REVISION_ISP2401
+ << ATOMISP_HW_REVISION_SHIFT);
+ isp->media_dev.hw_revision |= pdev->revision < 2 ?
+ ATOMISP_HW_STEPPING_A0 : ATOMISP_HW_STEPPING_B0;
+ isp->dfs = &dfs_config_merr;
+ isp->hpll_freq = HPLL_FREQ_1600MHZ;
+ break;
+ case ATOMISP_PCI_DEVICE_SOC_CHT:
+ isp->media_dev.hw_revision = (ATOMISP_HW_REVISION_ISP2401
+ << ATOMISP_HW_REVISION_SHIFT);
+ isp->media_dev.hw_revision |= pdev->revision < 2 ?
+ ATOMISP_HW_STEPPING_A0 : ATOMISP_HW_STEPPING_B0;
+
+ isp->dfs = &dfs_config_cht;
+ pdev->d3cold_delay = 0;
+
+ iosf_mbi_read(BT_MBI_UNIT_CCK, MBI_REG_READ, CCK_FUSE_REG_0, &val);
+ switch (val & CCK_FUSE_HPLL_FREQ_MASK) {
+ case 0x00:
+ isp->hpll_freq = HPLL_FREQ_800MHZ;
+ break;
+ case 0x01:
+ isp->hpll_freq = HPLL_FREQ_1600MHZ;
+ break;
+ case 0x02:
+ isp->hpll_freq = HPLL_FREQ_2000MHZ;
+ break;
+ default:
+ isp->hpll_freq = HPLL_FREQ_1600MHZ;
+ dev_warn(&pdev->dev, "read HPLL from cck failed. Default to 1600 MHz.\n");
+ }
+ break;
+ default:
+ dev_err(&pdev->dev, "un-supported IUNIT device\n");
+ return -ENODEV;
+ }
+
+ if (pdev->revision <= ATOMISP_PCI_REV_BYT_A0_MAX) {
+ dev_err(&pdev->dev, "revision %d is not supported\n", pdev->revision);
+ return -ENODEV;
+ }
+
+ dev_info(&pdev->dev, "ISP HPLL frequency base = %d MHz\n", isp->hpll_freq);
+
+ isp->max_isr_latency = ATOMISP_MAX_ISR_LATENCY;
+
+ /* Load isp firmware from user space */
+ isp->firmware = atomisp_load_firmware(isp);
+ if (!isp->firmware) {
+ /* No firmware continue in pm-only mode for S0i3 support */
+ dev_info(&pdev->dev, "Continuing in power-management only mode\n");
+ isp->pm_only = true;
+ atomisp_pm_init(isp);
+ return 0;
+ }
+
+ err = sh_css_check_firmware_version(isp->dev, isp->firmware->data);
+ if (err) {
+ dev_dbg(&pdev->dev, "Firmware version check failed\n");
+ goto error_release_firmware;
+ }
+
+ err = pcim_enable_device(pdev);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to enable ISP PCI device (%d)\n", err);
+ goto error_release_firmware;
+ }
+
+ err = pcim_iomap_regions(pdev, BIT(ATOM_ISP_PCI_BAR), pci_name(pdev));
+ if (err) {
+ dev_err(&pdev->dev, "Failed to I/O memory remapping (%d)\n", err);
+ goto error_release_firmware;
+ }
+
+ isp->base = pcim_iomap_table(pdev)[ATOM_ISP_PCI_BAR];
+
+ pci_set_master(pdev);
+
+ err = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI);
+ if (err < 0) {
+ dev_err(&pdev->dev, "Failed to enable msi (%d)\n", err);
+ goto error_release_firmware;
+ }
+
+ atomisp_msi_irq_init(isp);
+
+ /*
+ * for MRFLD, Software/firmware needs to write a 1 to bit 0 of
+ * the register at CSI_RECEIVER_SELECTION_REG to enable SH CSI
+ * backend write 0 will enable Arasan CSI backend, which has
+ * bugs(like sighting:4567697 and 4567699) and will be removed
+ * in B0
+ */
+ atomisp_css2_hw_store_32(MRFLD_CSI_RECEIVER_SELECTION_REG, 1);
+
+ switch (id->device) {
+ case ATOMISP_PCI_DEVICE_SOC_MRFLD:
+ case ATOMISP_PCI_DEVICE_SOC_MRFLD_1179:
+ case ATOMISP_PCI_DEVICE_SOC_MRFLD_117A:
+ /*
+ * Workaround for imbalance data eye issue which is observed
+ * on TNG B0.
+ */
+ pci_read_config_dword(pdev, MRFLD_PCI_CSI_AFE_TRIM_CONTROL, &val);
+ val &= ~((MRFLD_PCI_CSI_HSRXCLKTRIM_MASK << MRFLD_PCI_CSI1_HSRXCLKTRIM_SHIFT) |
+ (MRFLD_PCI_CSI_HSRXCLKTRIM_MASK << MRFLD_PCI_CSI2_HSRXCLKTRIM_SHIFT) |
+ (MRFLD_PCI_CSI_HSRXCLKTRIM_MASK << MRFLD_PCI_CSI3_HSRXCLKTRIM_SHIFT));
+ val |= (MRFLD_PCI_CSI1_HSRXCLKTRIM << MRFLD_PCI_CSI1_HSRXCLKTRIM_SHIFT) |
+ (MRFLD_PCI_CSI2_HSRXCLKTRIM << MRFLD_PCI_CSI2_HSRXCLKTRIM_SHIFT) |
+ (MRFLD_PCI_CSI3_HSRXCLKTRIM << MRFLD_PCI_CSI3_HSRXCLKTRIM_SHIFT);
+ pci_write_config_dword(pdev, MRFLD_PCI_CSI_AFE_TRIM_CONTROL, val);
+ break;
+ default:
+ break;
+ }
+
+ err = atomisp_initialize_modules(isp);
+ if (err < 0) {
+ dev_err(&pdev->dev, "atomisp_initialize_modules (%d)\n", err);
+ goto error_irq_uninit;
+ }
+
+ err = atomisp_register_entities(isp);
+ if (err < 0) {
+ dev_err(&pdev->dev, "atomisp_register_entities failed (%d)\n", err);
+ goto error_uninitialize_modules;
+ }
+
+ INIT_WORK(&isp->assert_recovery_work, atomisp_assert_recovery_work);
+
+ /* save the iunit context only once after all the values are init'ed. */
+ atomisp_save_iunit_reg(isp);
+
+ /* Init ISP memory management */
+ hmm_init();
+
+ err = devm_request_threaded_irq(&pdev->dev, pdev->irq,
+ atomisp_isr, atomisp_isr_thread,
+ IRQF_SHARED, "isp_irq", isp);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to request irq (%d)\n", err);
+ goto error_unregister_entities;
+ }
+
+ /* Load firmware into ISP memory */
+ err = atomisp_css_load_firmware(isp);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to init css.\n");
+ goto error_free_irq;
+ }
+ /* Clear FW image from memory */
+ release_firmware(isp->firmware);
+ isp->firmware = NULL;
+ isp->css_env.isp_css_fw.data = NULL;
+
+ atomisp_pm_init(isp);
+
+ err = v4l2_async_nf_register(&isp->notifier);
+ if (err) {
+ dev_err(isp->dev, "failed to register async notifier : %d\n", err);
+ goto error_unload_firmware;
+ }
+
+ return 0;
+
+error_unload_firmware:
+ atomisp_pm_uninit(isp);
+ ia_css_unload_firmware();
+error_free_irq:
+ devm_free_irq(&pdev->dev, pdev->irq, isp);
+error_unregister_entities:
+ hmm_cleanup();
+ atomisp_unregister_entities(isp);
+error_uninitialize_modules:
+ atomisp_uninitialize_modules(isp);
+error_irq_uninit:
+ atomisp_msi_irq_uninit(isp);
+ pci_free_irq_vectors(pdev);
+error_release_firmware:
+ release_firmware(isp->firmware);
+ return err;
+}
+
+static void atomisp_pci_remove(struct pci_dev *pdev)
+{
+ struct atomisp_device *isp = pci_get_drvdata(pdev);
+
+ atomisp_pm_uninit(isp);
+
+ if (isp->pm_only)
+ return;
+
+ /* Undo ia_css_init() from atomisp_power_on() */
+ atomisp_css_uninit(isp);
+ ia_css_unload_firmware();
+ devm_free_irq(&pdev->dev, pdev->irq, isp);
+ hmm_cleanup();
+
+ atomisp_unregister_entities(isp);
+ atomisp_uninitialize_modules(isp);
+ atomisp_msi_irq_uninit(isp);
+ pci_free_irq_vectors(pdev);
+}
+
+static const struct pci_device_id atomisp_pci_tbl[] = {
+ /* Merrifield */
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, ATOMISP_PCI_DEVICE_SOC_MRFLD)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, ATOMISP_PCI_DEVICE_SOC_MRFLD_1179)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, ATOMISP_PCI_DEVICE_SOC_MRFLD_117A)},
+ /* Baytrail */
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, ATOMISP_PCI_DEVICE_SOC_BYT)},
+ /* Anniedale (Merrifield+ / Moorefield) */
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, ATOMISP_PCI_DEVICE_SOC_ANN)},
+ /* Cherrytrail */
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, ATOMISP_PCI_DEVICE_SOC_CHT)},
+ {0,}
+};
+MODULE_DEVICE_TABLE(pci, atomisp_pci_tbl);
+
+static struct pci_driver atomisp_pci_driver = {
+ .name = "atomisp-isp2",
+ .id_table = atomisp_pci_tbl,
+ .probe = atomisp_pci_probe,
+ .remove = atomisp_pci_remove,
+};
+
+module_pci_driver(atomisp_pci_driver);
+
+MODULE_AUTHOR("Wen Wang <wen.w.wang@intel.com>");
+MODULE_AUTHOR("Xiaolin Zhang <xiaolin.zhang@intel.com>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Intel ATOM Platform ISP Driver");
+MODULE_IMPORT_NS("INTEL_IPU_BRIDGE");
+MODULE_IMPORT_NS("INTEL_INT3472_DISCRETE");
diff --git a/drivers/staging/media/atomisp/pci/atomisp_v4l2.h b/drivers/staging/media/atomisp/pci/atomisp_v4l2.h
new file mode 100644
index 000000000000..f1b740360009
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp_v4l2.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Medifield PNW Camera Imaging ISP subsystem.
+ *
+ * Copyright (c) 2010 Intel Corporation. All Rights Reserved.
+ *
+ * Copyright (c) 2010 Silicon Hive www.siliconhive.com.
+ */
+
+#ifndef __ATOMISP_V4L2_H__
+#define __ATOMISP_V4L2_H__
+
+struct atomisp_video_pipe;
+struct v4l2_device;
+struct atomisp_device;
+struct firmware;
+
+int atomisp_video_init(struct atomisp_video_pipe *video);
+void atomisp_video_unregister(struct atomisp_video_pipe *video);
+const struct firmware *atomisp_load_firmware(struct atomisp_device *isp);
+int atomisp_csi_lane_config(struct atomisp_device *isp);
+int atomisp_register_device_nodes(struct atomisp_device *isp);
+
+#endif /* __ATOMISP_V4L2_H__ */
diff --git a/drivers/staging/media/atomisp/pci/base/circbuf/interface/ia_css_circbuf.h b/drivers/staging/media/atomisp/pci/base/circbuf/interface/ia_css_circbuf.h
new file mode 100644
index 000000000000..86300991d30e
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/base/circbuf/interface/ia_css_circbuf.h
@@ -0,0 +1,368 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef _IA_CSS_CIRCBUF_H
+#define _IA_CSS_CIRCBUF_H
+
+#include <sp.h>
+#include <type_support.h>
+#include <math_support.h>
+#include <assert_support.h>
+#include <platform_support.h>
+#include "ia_css_circbuf_comm.h"
+#include "ia_css_circbuf_desc.h"
+
+/****************************************************************
+ *
+ * Data structures.
+ *
+ ****************************************************************/
+/**
+ * @brief Data structure for the circular buffer.
+ */
+typedef struct ia_css_circbuf_s ia_css_circbuf_t;
+struct ia_css_circbuf_s {
+ ia_css_circbuf_desc_t *desc; /* Pointer to the descriptor of the circbuf */
+ ia_css_circbuf_elem_t *elems; /* an array of elements */
+};
+
+/**
+ * @brief Create the circular buffer.
+ *
+ * @param cb The pointer to the circular buffer.
+ * @param elems An array of elements.
+ * @param desc The descriptor set to the size using ia_css_circbuf_desc_init().
+ */
+void ia_css_circbuf_create(
+ ia_css_circbuf_t *cb,
+ ia_css_circbuf_elem_t *elems,
+ ia_css_circbuf_desc_t *desc);
+
+/**
+ * @brief Destroy the circular buffer.
+ *
+ * @param cb The pointer to the circular buffer.
+ */
+void ia_css_circbuf_destroy(
+ ia_css_circbuf_t *cb);
+
+/**
+ * @brief Pop a value out of the circular buffer.
+ * Get a value at the head of the circular buffer.
+ * The user should call "ia_css_circbuf_is_empty()"
+ * to avoid accessing to an empty buffer.
+ *
+ * @param cb The pointer to the circular buffer.
+ *
+ * @return the pop-out value.
+ */
+uint32_t ia_css_circbuf_pop(
+ ia_css_circbuf_t *cb);
+
+/**
+ * @brief Extract a value out of the circular buffer.
+ * Get a value at an arbitrary position in the circular
+ * buffer. The user should call "ia_css_circbuf_is_empty()"
+ * to avoid accessing to an empty buffer.
+ *
+ * @param cb The pointer to the circular buffer.
+ * @param offset The offset from "start" to the target position.
+ *
+ * @return the extracted value.
+ */
+uint32_t ia_css_circbuf_extract(
+ ia_css_circbuf_t *cb,
+ int offset);
+
+/****************************************************************
+ *
+ * Inline functions.
+ *
+ ****************************************************************/
+/**
+ * @brief Set the "val" field in the element.
+ *
+ * @param elem The pointer to the element.
+ * @param val The value to be set.
+ */
+static inline void ia_css_circbuf_elem_set_val(
+ ia_css_circbuf_elem_t *elem,
+ uint32_t val)
+{
+ OP___assert(elem);
+
+ elem->val = val;
+}
+
+/**
+ * @brief Initialize the element.
+ *
+ * @param elem The pointer to the element.
+ */
+static inline void ia_css_circbuf_elem_init(
+ ia_css_circbuf_elem_t *elem)
+{
+ OP___assert(elem);
+ ia_css_circbuf_elem_set_val(elem, 0);
+}
+
+/**
+ * @brief Copy an element.
+ *
+ * @param src The element as the copy source.
+ * @param dest The element as the copy destination.
+ */
+static inline void ia_css_circbuf_elem_cpy(
+ ia_css_circbuf_elem_t *src,
+ ia_css_circbuf_elem_t *dest)
+{
+ OP___assert(src);
+ OP___assert(dest);
+
+ ia_css_circbuf_elem_set_val(dest, src->val);
+}
+
+/**
+ * @brief Get position in the circular buffer.
+ *
+ * @param cb The pointer to the circular buffer.
+ * @param base The base position.
+ * @param offset The offset.
+ *
+ * @return the position at offset.
+ */
+static inline uint8_t ia_css_circbuf_get_pos_at_offset(
+ ia_css_circbuf_t *cb,
+ u32 base,
+ int offset)
+{
+ u8 dest;
+
+ OP___assert(cb);
+ OP___assert(cb->desc);
+ OP___assert(cb->desc->size > 0);
+
+ /* step 1: adjudst the offset */
+ while (offset < 0) {
+ offset += cb->desc->size;
+ }
+
+ /* step 2: shift and round by the upper limit */
+ dest = OP_std_modadd(base, offset, cb->desc->size);
+
+ return dest;
+}
+
+/**
+ * @brief Get the offset between two positions in the circular buffer.
+ * Get the offset from the source position to the terminal position,
+ * along the direction in which the new elements come in.
+ *
+ * @param cb The pointer to the circular buffer.
+ * @param src_pos The source position.
+ * @param dest_pos The terminal position.
+ *
+ * @return the offset.
+ */
+static inline int ia_css_circbuf_get_offset(
+ ia_css_circbuf_t *cb,
+ u32 src_pos,
+ uint32_t dest_pos)
+{
+ int offset;
+
+ OP___assert(cb);
+ OP___assert(cb->desc);
+
+ offset = (int)(dest_pos - src_pos);
+ offset += (offset < 0) ? cb->desc->size : 0;
+
+ return offset;
+}
+
+/**
+ * @brief Get the maximum number of elements.
+ *
+ * @param cb The pointer to the circular buffer.
+ *
+ * @return the maximum number of elements.
+ *
+ * TODO: Test this API.
+ */
+static inline uint32_t ia_css_circbuf_get_size(
+ ia_css_circbuf_t *cb)
+{
+ OP___assert(cb);
+ OP___assert(cb->desc);
+
+ return cb->desc->size;
+}
+
+/**
+ * @brief Get the number of available elements.
+ *
+ * @param cb The pointer to the circular buffer.
+ *
+ * @return the number of available elements.
+ */
+static inline uint32_t ia_css_circbuf_get_num_elems(
+ ia_css_circbuf_t *cb)
+{
+ int num;
+
+ OP___assert(cb);
+ OP___assert(cb->desc);
+
+ num = ia_css_circbuf_get_offset(cb, cb->desc->start, cb->desc->end);
+
+ return (uint32_t)num;
+}
+
+/**
+ * @brief Test if the circular buffer is empty.
+ *
+ * @param cb The pointer to the circular buffer.
+ *
+ * @return
+ * - true when it is empty.
+ * - false when it is not empty.
+ */
+static inline bool ia_css_circbuf_is_empty(
+ ia_css_circbuf_t *cb)
+{
+ OP___assert(cb);
+ OP___assert(cb->desc);
+
+ return ia_css_circbuf_desc_is_empty(cb->desc);
+}
+
+/**
+ * @brief Test if the circular buffer is full.
+ *
+ * @param cb The pointer to the circular buffer.
+ *
+ * @return
+ * - true when it is full.
+ * - false when it is not full.
+ */
+static inline bool ia_css_circbuf_is_full(ia_css_circbuf_t *cb)
+{
+ OP___assert(cb);
+ OP___assert(cb->desc);
+
+ return ia_css_circbuf_desc_is_full(cb->desc);
+}
+
+/**
+ * @brief Write a new element into the circular buffer.
+ * Write a new element WITHOUT checking whether the
+ * circular buffer is full or not. So it also overwrites
+ * the oldest element when the buffer is full.
+ *
+ * @param cb The pointer to the circular buffer.
+ * @param elem The new element.
+ */
+static inline void ia_css_circbuf_write(
+ ia_css_circbuf_t *cb,
+ ia_css_circbuf_elem_t elem)
+{
+ OP___assert(cb);
+ OP___assert(cb->desc);
+
+ /* Cannot continue as the queue is full*/
+ assert(!ia_css_circbuf_is_full(cb));
+
+ ia_css_circbuf_elem_cpy(&elem, &cb->elems[cb->desc->end]);
+
+ cb->desc->end = ia_css_circbuf_get_pos_at_offset(cb, cb->desc->end, 1);
+}
+
+/**
+ * @brief Push a value in the circular buffer.
+ * Put a new value at the tail of the circular buffer.
+ * The user should call "ia_css_circbuf_is_full()"
+ * to avoid accessing to a full buffer.
+ *
+ * @param cb The pointer to the circular buffer.
+ * @param val The value to be pushed in.
+ */
+static inline void ia_css_circbuf_push(
+ ia_css_circbuf_t *cb,
+ uint32_t val)
+{
+ ia_css_circbuf_elem_t elem;
+
+ OP___assert(cb);
+
+ /* set up an element */
+ ia_css_circbuf_elem_init(&elem);
+ ia_css_circbuf_elem_set_val(&elem, val);
+
+ /* write the element into the buffer */
+ ia_css_circbuf_write(cb, elem);
+}
+
+/**
+ * @brief Get the number of free elements.
+ *
+ * @param cb The pointer to the circular buffer.
+ *
+ * @return: The number of free elements.
+ */
+static inline uint32_t ia_css_circbuf_get_free_elems(
+ ia_css_circbuf_t *cb)
+{
+ OP___assert(cb);
+ OP___assert(cb->desc);
+
+ return ia_css_circbuf_desc_get_free_elems(cb->desc);
+}
+
+/**
+ * @brief Peek an element in Circular Buffer.
+ *
+ * @param cb The pointer to the circular buffer.
+ * @param offset Offset to the element.
+ *
+ * @return the elements value.
+ */
+uint32_t ia_css_circbuf_peek(
+ ia_css_circbuf_t *cb,
+ int offset);
+
+/**
+ * @brief Get an element in Circular Buffer.
+ *
+ * @param cb The pointer to the circular buffer.
+ * @param offset Offset to the element.
+ *
+ * @return the elements value.
+ */
+uint32_t ia_css_circbuf_peek_from_start(
+ ia_css_circbuf_t *cb,
+ int offset);
+
+/**
+ * @brief Increase Size of a Circular Buffer.
+ * Use 'CAUTION' before using this function, This was added to
+ * support / fix issue with increasing size for tagger only
+ *
+ * @param cb The pointer to the circular buffer.
+ * @param sz_delta delta increase for new size
+ * @param elems (optional) pointers to new additional elements
+ * cb element array size will not be increased dynamically,
+ * but new elements should be added at the end to existing
+ * cb element array which if of max_size >= new size
+ *
+ * @return true on successfully increasing the size
+ * false on failure
+ */
+bool ia_css_circbuf_increase_size(
+ ia_css_circbuf_t *cb,
+ unsigned int sz_delta,
+ ia_css_circbuf_elem_t *elems);
+
+#endif /*_IA_CSS_CIRCBUF_H */
diff --git a/drivers/staging/media/atomisp/pci/base/circbuf/interface/ia_css_circbuf_comm.h b/drivers/staging/media/atomisp/pci/base/circbuf/interface/ia_css_circbuf_comm.h
new file mode 100644
index 000000000000..971e07f2acc5
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/base/circbuf/interface/ia_css_circbuf_comm.h
@@ -0,0 +1,56 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef _IA_CSS_CIRCBUF_COMM_H
+#define _IA_CSS_CIRCBUF_COMM_H
+
+#include <linux/build_bug.h>
+
+#include <type_support.h> /* uint8_t, uint32_t */
+
+#define IA_CSS_CIRCBUF_PADDING 1 /* The circular buffer is implemented in lock-less manner, wherein
+ * the head and tail can advance independently without any locks.
+ * But to achieve this, an extra buffer element is required to detect
+ * queue full & empty conditions, wherein the tail trails the head for
+ * full and is equal to head for empty condition. This causes 1 buffer
+ * not being available for use.
+ */
+
+/****************************************************************
+ *
+ * Portable Data structures
+ *
+ ****************************************************************/
+/**
+ * @brief Data structure for the circular descriptor.
+ */
+typedef struct ia_css_circbuf_desc_s ia_css_circbuf_desc_t;
+struct ia_css_circbuf_desc_s {
+ u8 size; /* the maximum number of elements*/
+ u8 step; /* number of bytes per element */
+ u8 start; /* index of the oldest element */
+ u8 end; /* index at which to write the new element */
+};
+
+#define SIZE_OF_IA_CSS_CIRCBUF_DESC_S_STRUCT \
+ (4 * sizeof(uint8_t))
+
+static_assert(sizeof(struct ia_css_circbuf_desc_s) == SIZE_OF_IA_CSS_CIRCBUF_DESC_S_STRUCT);
+
+/**
+ * @brief Data structure for the circular buffer element.
+ */
+typedef struct ia_css_circbuf_elem_s ia_css_circbuf_elem_t;
+struct ia_css_circbuf_elem_s {
+ u32 val; /* the value stored in the element */
+};
+
+#define SIZE_OF_IA_CSS_CIRCBUF_ELEM_S_STRUCT \
+ (sizeof(uint32_t))
+
+static_assert(sizeof(struct ia_css_circbuf_elem_s) == SIZE_OF_IA_CSS_CIRCBUF_ELEM_S_STRUCT);
+
+#endif /*_IA_CSS_CIRCBUF_COMM_H*/
diff --git a/drivers/staging/media/atomisp/pci/base/circbuf/interface/ia_css_circbuf_desc.h b/drivers/staging/media/atomisp/pci/base/circbuf/interface/ia_css_circbuf_desc.h
new file mode 100644
index 000000000000..5645a7bf493c
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/base/circbuf/interface/ia_css_circbuf_desc.h
@@ -0,0 +1,165 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef _IA_CSS_CIRCBUF_DESC_H_
+#define _IA_CSS_CIRCBUF_DESC_H_
+
+#include <type_support.h>
+#include <math_support.h>
+#include <platform_support.h>
+#include <sp.h>
+#include "ia_css_circbuf_comm.h"
+/****************************************************************
+ *
+ * Inline functions.
+ *
+ ****************************************************************/
+/**
+ * @brief Test if the circular buffer is empty.
+ *
+ * @param cb_desc The pointer to the circular buffer descriptor.
+ *
+ * @return
+ * - true when it is empty.
+ * - false when it is not empty.
+ */
+static inline bool ia_css_circbuf_desc_is_empty(
+ ia_css_circbuf_desc_t *cb_desc)
+{
+ OP___assert(cb_desc);
+ return (cb_desc->end == cb_desc->start);
+}
+
+/**
+ * @brief Test if the circular buffer descriptor is full.
+ *
+ * @param cb_desc The pointer to the circular buffer
+ * descriptor.
+ *
+ * @return
+ * - true when it is full.
+ * - false when it is not full.
+ */
+static inline bool ia_css_circbuf_desc_is_full(
+ ia_css_circbuf_desc_t *cb_desc)
+{
+ OP___assert(cb_desc);
+ return (OP_std_modadd(cb_desc->end, 1, cb_desc->size) == cb_desc->start);
+}
+
+/**
+ * @brief Initialize the circular buffer descriptor
+ *
+ * @param cb_desc The pointer circular buffer descriptor
+ * @param size The size of the circular buffer
+ */
+static inline void ia_css_circbuf_desc_init(
+ ia_css_circbuf_desc_t *cb_desc,
+ int8_t size)
+{
+ OP___assert(cb_desc);
+ cb_desc->size = size;
+}
+
+/**
+ * @brief Get a position in the circular buffer descriptor.
+ *
+ * @param cb The pointer to the circular buffer descriptor.
+ * @param base The base position.
+ * @param offset The offset.
+ *
+ * @return the position in the circular buffer descriptor.
+ */
+static inline uint8_t ia_css_circbuf_desc_get_pos_at_offset(
+ ia_css_circbuf_desc_t *cb_desc,
+ u32 base,
+ int offset)
+{
+ u8 dest;
+
+ OP___assert(cb_desc);
+ OP___assert(cb_desc->size > 0);
+
+ /* step 1: adjust the offset */
+ while (offset < 0) {
+ offset += cb_desc->size;
+ }
+
+ /* step 2: shift and round by the upper limit */
+ dest = OP_std_modadd(base, offset, cb_desc->size);
+
+ return dest;
+}
+
+/**
+ * @brief Get the offset between two positions in the circular buffer
+ * descriptor.
+ * Get the offset from the source position to the terminal position,
+ * along the direction in which the new elements come in.
+ *
+ * @param cb_desc The pointer to the circular buffer descriptor.
+ * @param src_pos The source position.
+ * @param dest_pos The terminal position.
+ *
+ * @return the offset.
+ */
+static inline int ia_css_circbuf_desc_get_offset(
+ ia_css_circbuf_desc_t *cb_desc,
+ u32 src_pos,
+ uint32_t dest_pos)
+{
+ int offset;
+
+ OP___assert(cb_desc);
+
+ offset = (int)(dest_pos - src_pos);
+ offset += (offset < 0) ? cb_desc->size : 0;
+
+ return offset;
+}
+
+/**
+ * @brief Get the number of available elements.
+ *
+ * @param cb_desc The pointer to the circular buffer.
+ *
+ * @return The number of available elements.
+ */
+static inline uint32_t ia_css_circbuf_desc_get_num_elems(
+ ia_css_circbuf_desc_t *cb_desc)
+{
+ int num;
+
+ OP___assert(cb_desc);
+
+ num = ia_css_circbuf_desc_get_offset(cb_desc,
+ cb_desc->start,
+ cb_desc->end);
+
+ return (uint32_t)num;
+}
+
+/**
+ * @brief Get the number of free elements.
+ *
+ * @param cb_desc The pointer to the circular buffer descriptor.
+ *
+ * @return: The number of free elements.
+ */
+static inline uint32_t ia_css_circbuf_desc_get_free_elems(
+ ia_css_circbuf_desc_t *cb_desc)
+{
+ u32 num;
+
+ OP___assert(cb_desc);
+
+ num = ia_css_circbuf_desc_get_offset(cb_desc,
+ cb_desc->start,
+ cb_desc->end);
+
+ return (cb_desc->size - num);
+}
+#endif /*_IA_CSS_CIRCBUF_DESC_H_ */
diff --git a/drivers/staging/media/atomisp/pci/base/circbuf/src/circbuf.c b/drivers/staging/media/atomisp/pci/base/circbuf/src/circbuf.c
new file mode 100644
index 000000000000..cb34d0b5abb5
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/base/circbuf/src/circbuf.c
@@ -0,0 +1,312 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#include "ia_css_circbuf.h"
+
+#include <assert_support.h>
+
+/**********************************************************************
+ *
+ * Forward declarations.
+ *
+ **********************************************************************/
+/*
+ * @brief Read the oldest element from the circular buffer.
+ * Read the oldest element WITHOUT checking whether the
+ * circular buffer is empty or not. The oldest element is
+ * also removed out from the circular buffer.
+ *
+ * @param cb The pointer to the circular buffer.
+ *
+ * @return the oldest element.
+ */
+static inline ia_css_circbuf_elem_t
+ia_css_circbuf_read(ia_css_circbuf_t *cb);
+
+/*
+ * @brief Shift a chunk of elements in the circular buffer.
+ * A chunk of elements (i.e. the ones from the "start" position
+ * to the "chunk_src" position) are shifted in the circular buffer,
+ * along the direction of new elements coming.
+ *
+ * @param cb The pointer to the circular buffer.
+ * @param chunk_src The position at which the first element in the chunk is.
+ * @param chunk_dest The position to which the first element in the chunk would be shift.
+ */
+static inline void ia_css_circbuf_shift_chunk(ia_css_circbuf_t *cb,
+ u32 chunk_src,
+ uint32_t chunk_dest);
+
+/*
+ * @brief Get the "val" field in the element.
+ *
+ * @param elem The pointer to the element.
+ *
+ * @return the "val" field.
+ */
+static inline uint32_t
+ia_css_circbuf_elem_get_val(ia_css_circbuf_elem_t *elem);
+
+/**********************************************************************
+ *
+ * Non-inline functions.
+ *
+ **********************************************************************/
+/*
+ * @brief Create the circular buffer.
+ * Refer to "ia_css_circbuf.h" for details.
+ */
+void
+ia_css_circbuf_create(ia_css_circbuf_t *cb,
+ ia_css_circbuf_elem_t *elems,
+ ia_css_circbuf_desc_t *desc)
+{
+ u32 i;
+
+ OP___assert(desc);
+
+ cb->desc = desc;
+ /* Initialize to defaults */
+ cb->desc->start = 0;
+ cb->desc->end = 0;
+ cb->desc->step = 0;
+
+ for (i = 0; i < cb->desc->size; i++)
+ ia_css_circbuf_elem_init(&elems[i]);
+
+ cb->elems = elems;
+}
+
+/*
+ * @brief Destroy the circular buffer.
+ * Refer to "ia_css_circbuf.h" for details.
+ */
+void ia_css_circbuf_destroy(ia_css_circbuf_t *cb)
+{
+ cb->desc = NULL;
+
+ cb->elems = NULL;
+}
+
+/*
+ * @brief Pop a value out of the circular buffer.
+ * Refer to "ia_css_circbuf.h" for details.
+ */
+uint32_t ia_css_circbuf_pop(ia_css_circbuf_t *cb)
+{
+ u32 ret;
+ ia_css_circbuf_elem_t elem;
+
+ assert(!ia_css_circbuf_is_empty(cb));
+
+ /* read an element from the buffer */
+ elem = ia_css_circbuf_read(cb);
+ ret = ia_css_circbuf_elem_get_val(&elem);
+ return ret;
+}
+
+/*
+ * @brief Extract a value out of the circular buffer.
+ * Refer to "ia_css_circbuf.h" for details.
+ */
+uint32_t ia_css_circbuf_extract(ia_css_circbuf_t *cb, int offset)
+{
+ int max_offset;
+ u32 val;
+ u32 pos;
+ u32 src_pos;
+ u32 dest_pos;
+
+ /* get the maximum offset */
+ max_offset = ia_css_circbuf_get_offset(cb, cb->desc->start, cb->desc->end);
+ max_offset--;
+
+ /*
+ * Step 1: When the target element is at the "start" position.
+ */
+ if (offset == 0) {
+ val = ia_css_circbuf_pop(cb);
+ return val;
+ }
+
+ /*
+ * Step 2: When the target element is out of the range.
+ */
+ if (offset > max_offset) {
+ val = 0;
+ return val;
+ }
+
+ /*
+ * Step 3: When the target element is between the "start" and
+ * "end" position.
+ */
+ /* get the position of the target element */
+ pos = ia_css_circbuf_get_pos_at_offset(cb, cb->desc->start, offset);
+
+ /* get the value from the target element */
+ val = ia_css_circbuf_elem_get_val(&cb->elems[pos]);
+
+ /* shift the elements */
+ src_pos = ia_css_circbuf_get_pos_at_offset(cb, pos, -1);
+ dest_pos = pos;
+ ia_css_circbuf_shift_chunk(cb, src_pos, dest_pos);
+
+ return val;
+}
+
+/*
+ * @brief Peek an element from the circular buffer.
+ * Refer to "ia_css_circbuf.h" for details.
+ */
+uint32_t ia_css_circbuf_peek(ia_css_circbuf_t *cb, int offset)
+{
+ int pos;
+
+ pos = ia_css_circbuf_get_pos_at_offset(cb, cb->desc->end, offset);
+
+ /* get the value at the position */
+ return cb->elems[pos].val;
+}
+
+/*
+ * @brief Get the value of an element from the circular buffer.
+ * Refer to "ia_css_circbuf.h" for details.
+ */
+uint32_t ia_css_circbuf_peek_from_start(ia_css_circbuf_t *cb, int offset)
+{
+ int pos;
+
+ pos = ia_css_circbuf_get_pos_at_offset(cb, cb->desc->start, offset);
+
+ /* get the value at the position */
+ return cb->elems[pos].val;
+}
+
+/* @brief increase size of a circular buffer.
+ * Use 'CAUTION' before using this function. This was added to
+ * support / fix issue with increasing size for tagger only
+ * Please refer to "ia_css_circbuf.h" for details.
+ */
+bool ia_css_circbuf_increase_size(
+ ia_css_circbuf_t *cb,
+ unsigned int sz_delta,
+ ia_css_circbuf_elem_t *elems)
+{
+ u8 curr_size;
+ u8 curr_end;
+ unsigned int i;
+
+ if (!cb || sz_delta == 0)
+ return false;
+
+ curr_size = cb->desc->size;
+ curr_end = cb->desc->end;
+ /* We assume cb was pre defined as global to allow
+ * increase in size */
+ /* FM: are we sure this cannot cause size to become too big? */
+ if (((uint8_t)(cb->desc->size + (uint8_t)sz_delta) > cb->desc->size) &&
+ ((uint8_t)sz_delta == sz_delta))
+ cb->desc->size += (uint8_t)sz_delta;
+ else
+ return false; /* overflow in size */
+
+ /* If elems are passed update them else we assume its been taken
+ * care before calling this function */
+ if (elems) {
+ /* cb element array size will not be increased dynamically,
+ * but pointers to new elements can be added at the end
+ * of existing pre defined cb element array of
+ * size >= new size if not already added */
+ for (i = curr_size; i < cb->desc->size; i++)
+ cb->elems[i] = elems[i - curr_size];
+ }
+ /* Fix Start / End */
+ if (curr_end < cb->desc->start) {
+ if (curr_end == 0) {
+ /* Easily fix End */
+ cb->desc->end = curr_size;
+ } else {
+ /* Move elements and fix Start*/
+ ia_css_circbuf_shift_chunk(cb,
+ curr_size - 1,
+ curr_size + sz_delta - 1);
+ }
+ }
+
+ return true;
+}
+
+/****************************************************************
+ *
+ * Inline functions.
+ *
+ ****************************************************************/
+/*
+ * @brief Get the "val" field in the element.
+ * Refer to "Forward declarations" for details.
+ */
+static inline uint32_t
+ia_css_circbuf_elem_get_val(ia_css_circbuf_elem_t *elem)
+{
+ return elem->val;
+}
+
+/*
+ * @brief Read the oldest element from the circular buffer.
+ * Refer to "Forward declarations" for details.
+ */
+static inline ia_css_circbuf_elem_t
+ia_css_circbuf_read(ia_css_circbuf_t *cb)
+{
+ ia_css_circbuf_elem_t elem;
+
+ /* get the element from the target position */
+ elem = cb->elems[cb->desc->start];
+
+ /* clear the target position */
+ ia_css_circbuf_elem_init(&cb->elems[cb->desc->start]);
+
+ /* adjust the "start" position */
+ cb->desc->start = ia_css_circbuf_get_pos_at_offset(cb, cb->desc->start, 1);
+ return elem;
+}
+
+/*
+ * @brief Shift a chunk of elements in the circular buffer.
+ * Refer to "Forward declarations" for details.
+ */
+static inline void
+ia_css_circbuf_shift_chunk(ia_css_circbuf_t *cb,
+ u32 chunk_src, uint32_t chunk_dest)
+{
+ int chunk_offset;
+ int chunk_sz;
+ int i;
+
+ /* get the chunk offset and size */
+ chunk_offset = ia_css_circbuf_get_offset(cb,
+ chunk_src, chunk_dest);
+ chunk_sz = ia_css_circbuf_get_offset(cb, cb->desc->start, chunk_src) + 1;
+
+ /* shift each element to its terminal position */
+ for (i = 0; i < chunk_sz; i++) {
+ /* copy the element from the source to the destination */
+ ia_css_circbuf_elem_cpy(&cb->elems[chunk_src],
+ &cb->elems[chunk_dest]);
+
+ /* clear the source position */
+ ia_css_circbuf_elem_init(&cb->elems[chunk_src]);
+
+ /* adjust the source/terminal positions */
+ chunk_src = ia_css_circbuf_get_pos_at_offset(cb, chunk_src, -1);
+ chunk_dest = ia_css_circbuf_get_pos_at_offset(cb, chunk_dest, -1);
+ }
+
+ /* adjust the index "start" */
+ cb->desc->start = ia_css_circbuf_get_pos_at_offset(cb, cb->desc->start,
+ chunk_offset);
+}
diff --git a/drivers/staging/media/atomisp/pci/base/refcount/interface/ia_css_refcount.h b/drivers/staging/media/atomisp/pci/base/refcount/interface/ia_css_refcount.h
new file mode 100644
index 000000000000..d78859e3a5b0
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/base/refcount/interface/ia_css_refcount.h
@@ -0,0 +1,76 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef _IA_CSS_REFCOUNT_H_
+#define _IA_CSS_REFCOUNT_H_
+
+#include <type_support.h>
+#include <system_local.h>
+#include <ia_css_err.h>
+#include <ia_css_types.h>
+
+typedef void (*clear_func)(ia_css_ptr ptr);
+
+/*! \brief Function for initializing refcount list
+ *
+ * \param[in] size Size of the refcount list.
+ * \return ia_css_err
+ */
+int ia_css_refcount_init(uint32_t size);
+
+/*! \brief Function for de-initializing refcount list
+ *
+ * \return None
+ */
+void ia_css_refcount_uninit(void);
+
+/*! \brief Function for increasing reference by 1.
+ *
+ * \param[in] id ID of the object.
+ * \param[in] ptr Data of the object (ptr).
+ * \return ia_css_ptr (saved address)
+ */
+ia_css_ptr ia_css_refcount_increment(s32 id, ia_css_ptr ptr);
+
+/*! \brief Function for decrease reference by 1.
+ *
+ * \param[in] id ID of the object.
+ * \param[in] ptr Data of the object (ptr).
+ *
+ * - true, if it is successful.
+ * - false, otherwise.
+ */
+bool ia_css_refcount_decrement(s32 id, ia_css_ptr ptr);
+
+/*! \brief Function to check if reference count is 1.
+ *
+ * \param[in] ptr Data of the object (ptr).
+ *
+ * - true, if it is successful.
+ * - false, otherwise.
+ */
+bool ia_css_refcount_is_single(ia_css_ptr ptr);
+
+/*! \brief Function to clear reference list objects.
+ *
+ * \param[in] id ID of the object.
+ * \param[in] clear_func function to be run to free reference objects.
+ *
+ * return None
+ */
+void ia_css_refcount_clear(s32 id,
+ clear_func clear_func_ptr);
+
+/*! \brief Function to verify if object is valid
+ *
+ * \param[in] ptr Data of the object (ptr)
+ *
+ * - true, if valid
+ * - false, if invalid
+ */
+bool ia_css_refcount_is_valid(ia_css_ptr ptr);
+
+#endif /* _IA_CSS_REFCOUNT_H_ */
diff --git a/drivers/staging/media/atomisp/pci/base/refcount/src/refcount.c b/drivers/staging/media/atomisp/pci/base/refcount/src/refcount.c
new file mode 100644
index 000000000000..58e4e3173b40
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/base/refcount/src/refcount.c
@@ -0,0 +1,268 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#include "hmm.h"
+
+#include "ia_css_refcount.h"
+#include "sh_css_defs.h"
+
+#include "platform_support.h"
+
+#include "assert_support.h"
+
+#include "ia_css_debug.h"
+
+/* TODO: enable for other memory aswell
+ now only for ia_css_ptr */
+struct ia_css_refcount_entry {
+ u32 count;
+ ia_css_ptr data;
+ s32 id;
+};
+
+struct ia_css_refcount_list {
+ u32 size;
+ struct ia_css_refcount_entry *items;
+};
+
+static struct ia_css_refcount_list myrefcount;
+
+static struct ia_css_refcount_entry *refcount_find_entry(ia_css_ptr ptr,
+ bool firstfree)
+{
+ u32 i;
+
+ if (ptr == 0)
+ return NULL;
+ if (!myrefcount.items) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_ERROR,
+ "%s(): Ref count not initialized!\n", __func__);
+ return NULL;
+ }
+
+ for (i = 0; i < myrefcount.size; i++) {
+ if ((&myrefcount.items[i])->data == 0) {
+ if (firstfree) {
+ /* for new entry */
+ return &myrefcount.items[i];
+ }
+ }
+ if ((&myrefcount.items[i])->data == ptr) {
+ /* found entry */
+ return &myrefcount.items[i];
+ }
+ }
+ return NULL;
+}
+
+int ia_css_refcount_init(uint32_t size)
+{
+ int err = 0;
+
+ if (size == 0) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "%s(): Size of 0 for Ref count init!\n", __func__);
+ return -EINVAL;
+ }
+ if (myrefcount.items) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "%s(): Ref count is already initialized\n", __func__);
+ return -EINVAL;
+ }
+ myrefcount.items =
+ kvmalloc(sizeof(struct ia_css_refcount_entry) * size, GFP_KERNEL);
+ if (!myrefcount.items)
+ err = -ENOMEM;
+ if (!err) {
+ memset(myrefcount.items, 0,
+ sizeof(struct ia_css_refcount_entry) * size);
+ myrefcount.size = size;
+ }
+ return err;
+}
+
+void ia_css_refcount_uninit(void)
+{
+ struct ia_css_refcount_entry *entry;
+ u32 i;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "%s() entry\n", __func__);
+ for (i = 0; i < myrefcount.size; i++) {
+ /* driver verifier tool has issues with &arr[i]
+ and prefers arr + i; as these are actually equivalent
+ the line below uses + i
+ */
+ entry = myrefcount.items + i;
+ if (entry->data != mmgr_NULL) {
+ /* ia_css_debug_dtrace(IA_CSS_DBG_TRACE,
+ "ia_css_refcount_uninit: freeing (%x)\n",
+ entry->data);*/
+ hmm_free(entry->data);
+ entry->data = mmgr_NULL;
+ entry->count = 0;
+ entry->id = 0;
+ }
+ }
+ kvfree(myrefcount.items);
+ myrefcount.items = NULL;
+ myrefcount.size = 0;
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "%s() leave\n", __func__);
+}
+
+ia_css_ptr ia_css_refcount_increment(s32 id, ia_css_ptr ptr)
+{
+ struct ia_css_refcount_entry *entry;
+
+ if (ptr == mmgr_NULL)
+ return ptr;
+
+ entry = refcount_find_entry(ptr, false);
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "%s(%x) 0x%x\n", __func__, id, ptr);
+
+ if (!entry) {
+ entry = refcount_find_entry(ptr, true);
+ assert(entry);
+ if (!entry)
+ return mmgr_NULL;
+ entry->id = id;
+ }
+
+ if (entry->id != id) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_ERROR,
+ "%s(): Ref count IDS do not match!\n", __func__);
+ return mmgr_NULL;
+ }
+
+ if (entry->data == ptr)
+ entry->count += 1;
+ else if (entry->data == mmgr_NULL) {
+ entry->data = ptr;
+ entry->count = 1;
+ } else
+ return mmgr_NULL;
+
+ return ptr;
+}
+
+bool ia_css_refcount_decrement(s32 id, ia_css_ptr ptr)
+{
+ struct ia_css_refcount_entry *entry;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "%s(%x) 0x%x\n", __func__, id, ptr);
+
+ if (ptr == mmgr_NULL)
+ return false;
+
+ entry = refcount_find_entry(ptr, false);
+
+ if (entry) {
+ if (entry->id != id) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_ERROR,
+ "%s(): Ref count IDS do not match!\n", __func__);
+ return false;
+ }
+ if (entry->count > 0) {
+ entry->count -= 1;
+ if (entry->count == 0) {
+ /* ia_css_debug_dtrace(IA_CSS_DBEUG_TRACE,
+ "ia_css_refcount_decrement: freeing\n");*/
+ hmm_free(ptr);
+ entry->data = mmgr_NULL;
+ entry->id = 0;
+ }
+ return true;
+ }
+ }
+
+ /* SHOULD NOT HAPPEN: ptr not managed by refcount, or not
+ valid anymore */
+ if (entry)
+ IA_CSS_ERROR("id %x, ptr 0x%x entry %p entry->id %x entry->count %d\n",
+ id, ptr, entry, entry->id, entry->count);
+ else
+ IA_CSS_ERROR("entry NULL\n");
+ assert(false);
+
+ return false;
+}
+
+bool ia_css_refcount_is_single(ia_css_ptr ptr)
+{
+ struct ia_css_refcount_entry *entry;
+
+ if (ptr == mmgr_NULL)
+ return false;
+
+ entry = refcount_find_entry(ptr, false);
+
+ if (entry)
+ return (entry->count == 1);
+
+ return true;
+}
+
+void ia_css_refcount_clear(s32 id, clear_func clear_func_ptr)
+{
+ struct ia_css_refcount_entry *entry;
+ u32 i;
+ u32 count = 0;
+
+ assert(clear_func_ptr);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "%s(%x)\n",
+ __func__, id);
+
+ for (i = 0; i < myrefcount.size; i++) {
+ /* driver verifier tool has issues with &arr[i]
+ and prefers arr + i; as these are actually equivalent
+ the line below uses + i
+ */
+ entry = myrefcount.items + i;
+ if ((entry->data != mmgr_NULL) && (entry->id == id)) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "%s: %x: 0x%x\n", __func__,
+ id, entry->data);
+ if (clear_func_ptr) {
+ /* clear using provided function */
+ clear_func_ptr(entry->data);
+ } else {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "%s: using hmm_free: no clear_func\n", __func__);
+ hmm_free(entry->data);
+ }
+
+ if (entry->count != 0) {
+ IA_CSS_WARNING("Ref count for entry %x is not zero!", entry->id);
+ }
+
+ assert(entry->count == 0);
+
+ entry->data = mmgr_NULL;
+ entry->count = 0;
+ entry->id = 0;
+ count++;
+ }
+ }
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "%s(%x): cleared %d\n", __func__, id,
+ count);
+}
+
+bool ia_css_refcount_is_valid(ia_css_ptr ptr)
+{
+ struct ia_css_refcount_entry *entry;
+
+ if (ptr == mmgr_NULL)
+ return false;
+
+ entry = refcount_find_entry(ptr, false);
+
+ return entry;
+}
diff --git a/drivers/staging/media/atomisp/pci/bits.h b/drivers/staging/media/atomisp/pci/bits.h
new file mode 100644
index 000000000000..ebd9393d910f
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/bits.h
@@ -0,0 +1,96 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef _HRT_BITS_H
+#define _HRT_BITS_H
+
+#include <linux/args.h>
+
+#define _hrt_ones(n) CONCATENATE(_hrt_ones_, n)
+#define _hrt_ones_0x0 0x00000000U
+#define _hrt_ones_0x1 0x00000001U
+#define _hrt_ones_0x2 0x00000003U
+#define _hrt_ones_0x3 0x00000007U
+#define _hrt_ones_0x4 0x0000000FU
+#define _hrt_ones_0x5 0x0000001FU
+#define _hrt_ones_0x6 0x0000003FU
+#define _hrt_ones_0x7 0x0000007FU
+#define _hrt_ones_0x8 0x000000FFU
+#define _hrt_ones_0x9 0x000001FFU
+#define _hrt_ones_0xA 0x000003FFU
+#define _hrt_ones_0xB 0x000007FFU
+#define _hrt_ones_0xC 0x00000FFFU
+#define _hrt_ones_0xD 0x00001FFFU
+#define _hrt_ones_0xE 0x00003FFFU
+#define _hrt_ones_0xF 0x00007FFFU
+#define _hrt_ones_0x10 0x0000FFFFU
+#define _hrt_ones_0x11 0x0001FFFFU
+#define _hrt_ones_0x12 0x0003FFFFU
+#define _hrt_ones_0x13 0x0007FFFFU
+#define _hrt_ones_0x14 0x000FFFFFU
+#define _hrt_ones_0x15 0x001FFFFFU
+#define _hrt_ones_0x16 0x003FFFFFU
+#define _hrt_ones_0x17 0x007FFFFFU
+#define _hrt_ones_0x18 0x00FFFFFFU
+#define _hrt_ones_0x19 0x01FFFFFFU
+#define _hrt_ones_0x1A 0x03FFFFFFU
+#define _hrt_ones_0x1B 0x07FFFFFFU
+#define _hrt_ones_0x1C 0x0FFFFFFFU
+#define _hrt_ones_0x1D 0x1FFFFFFFU
+#define _hrt_ones_0x1E 0x3FFFFFFFU
+#define _hrt_ones_0x1F 0x7FFFFFFFU
+#define _hrt_ones_0x20 0xFFFFFFFFU
+
+#define _hrt_ones_0 _hrt_ones_0x0
+#define _hrt_ones_1 _hrt_ones_0x1
+#define _hrt_ones_2 _hrt_ones_0x2
+#define _hrt_ones_3 _hrt_ones_0x3
+#define _hrt_ones_4 _hrt_ones_0x4
+#define _hrt_ones_5 _hrt_ones_0x5
+#define _hrt_ones_6 _hrt_ones_0x6
+#define _hrt_ones_7 _hrt_ones_0x7
+#define _hrt_ones_8 _hrt_ones_0x8
+#define _hrt_ones_9 _hrt_ones_0x9
+#define _hrt_ones_10 _hrt_ones_0xA
+#define _hrt_ones_11 _hrt_ones_0xB
+#define _hrt_ones_12 _hrt_ones_0xC
+#define _hrt_ones_13 _hrt_ones_0xD
+#define _hrt_ones_14 _hrt_ones_0xE
+#define _hrt_ones_15 _hrt_ones_0xF
+#define _hrt_ones_16 _hrt_ones_0x10
+#define _hrt_ones_17 _hrt_ones_0x11
+#define _hrt_ones_18 _hrt_ones_0x12
+#define _hrt_ones_19 _hrt_ones_0x13
+#define _hrt_ones_20 _hrt_ones_0x14
+#define _hrt_ones_21 _hrt_ones_0x15
+#define _hrt_ones_22 _hrt_ones_0x16
+#define _hrt_ones_23 _hrt_ones_0x17
+#define _hrt_ones_24 _hrt_ones_0x18
+#define _hrt_ones_25 _hrt_ones_0x19
+#define _hrt_ones_26 _hrt_ones_0x1A
+#define _hrt_ones_27 _hrt_ones_0x1B
+#define _hrt_ones_28 _hrt_ones_0x1C
+#define _hrt_ones_29 _hrt_ones_0x1D
+#define _hrt_ones_30 _hrt_ones_0x1E
+#define _hrt_ones_31 _hrt_ones_0x1F
+#define _hrt_ones_32 _hrt_ones_0x20
+
+#define _hrt_mask(b, n) \
+ (_hrt_ones(n) << (b))
+#define _hrt_get_bits(w, b, n) \
+ (((w) >> (b)) & _hrt_ones(n))
+#define _hrt_set_bits(w, b, n, v) \
+ (((w) & ~_hrt_mask(b, n)) | (((v) & _hrt_ones(n)) << (b)))
+#define _hrt_get_bit(w, b) \
+ (((w) >> (b)) & 1)
+#define _hrt_set_bit(w, b, v) \
+ (((w) & (~(1 << (b)))) | (((v) & 1) << (b)))
+#define _hrt_set_lower_half(w, v) \
+ _hrt_set_bits(w, 0, 16, v)
+#define _hrt_set_upper_half(w, v) \
+ _hrt_set_bits(w, 16, 16, v)
+
+#endif /* _HRT_BITS_H */
diff --git a/drivers/staging/media/atomisp/pci/camera/pipe/interface/ia_css_pipe_binarydesc.h b/drivers/staging/media/atomisp/pci/camera/pipe/interface/ia_css_pipe_binarydesc.h
new file mode 100644
index 000000000000..4e687f318c0e
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/camera/pipe/interface/ia_css_pipe_binarydesc.h
@@ -0,0 +1,286 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __IA_CSS_PIPE_BINARYDESC_H__
+#define __IA_CSS_PIPE_BINARYDESC_H__
+
+#include <linux/math.h>
+
+#include <ia_css_types.h> /* ia_css_pipe */
+#include <ia_css_frame_public.h> /* ia_css_frame_info */
+#include <ia_css_binary.h> /* ia_css_binary_descr */
+
+/* @brief Get a binary descriptor for copy.
+ *
+ * @param[in] pipe
+ * @param[out] copy_desc
+ * @param[in/out] in_info
+ * @param[in/out] out_info
+ * @param[in/out] vf_info
+ * @return None
+ *
+ */
+void ia_css_pipe_get_copy_binarydesc(
+ struct ia_css_pipe const *const pipe,
+ struct ia_css_binary_descr *copy_descr,
+ struct ia_css_frame_info *in_info,
+ struct ia_css_frame_info *out_info,
+ struct ia_css_frame_info *vf_info);
+
+/* @brief Get a binary descriptor for vfpp.
+ *
+ * @param[in] pipe
+ * @param[out] vfpp_descr
+ * @param[in/out] in_info
+ * @param[in/out] out_info
+ * @return None
+ *
+ */
+void ia_css_pipe_get_vfpp_binarydesc(
+ struct ia_css_pipe const *const pipe,
+ struct ia_css_binary_descr *vf_pp_descr,
+ struct ia_css_frame_info *in_info,
+ struct ia_css_frame_info *out_info);
+
+/* @brief Get numerator and denominator of bayer downscaling factor.
+ *
+ * @param[in] bds_factor: The bayer downscaling factor.
+ * (= The bds_factor member in the sh_css_bds_factor structure.)
+ * @param[out] bds: The rational fraction of the bayer downscaling factor.
+ * (= The respective member in the sh_css_bds_factor structure.)
+ * @return 0 or error code upon error.
+ *
+ */
+int sh_css_bds_factor_get_fract(unsigned int bds_factor, struct u32_fract *bds);
+
+/* @brief Get a binary descriptor for preview stage.
+ *
+ * @param[in] pipe
+ * @param[out] preview_descr
+ * @param[in/out] in_info
+ * @param[in/out] bds_out_info
+ * @param[in/out] out_info
+ * @param[in/out] vf_info
+ * @return 0 or error code upon error.
+ *
+ */
+int ia_css_pipe_get_preview_binarydesc(
+ struct ia_css_pipe *const pipe,
+ struct ia_css_binary_descr *preview_descr,
+ struct ia_css_frame_info *in_info,
+ struct ia_css_frame_info *bds_out_info,
+ struct ia_css_frame_info *out_info,
+ struct ia_css_frame_info *vf_info);
+
+/* @brief Get a binary descriptor for video stage.
+ *
+ * @param[in/out] pipe
+ * @param[out] video_descr
+ * @param[in/out] in_info
+ * @param[in/out] bds_out_info
+ * @param[in/out] vf_info
+ * @return 0 or error code upon error.
+ *
+ */
+int ia_css_pipe_get_video_binarydesc(
+ struct ia_css_pipe *const pipe,
+ struct ia_css_binary_descr *video_descr,
+ struct ia_css_frame_info *in_info,
+ struct ia_css_frame_info *bds_out_info,
+ struct ia_css_frame_info *out_info,
+ struct ia_css_frame_info *vf_info,
+ int stream_config_left_padding);
+
+/* @brief Get a binary descriptor for yuv scaler stage.
+ *
+ * @param[in/out] pipe
+ * @param[out] yuv_scaler_descr
+ * @param[in/out] in_info
+ * @param[in/out] out_info
+ * @param[in/out] internal_out_info
+ * @param[in/out] vf_info
+ * @return None
+ *
+ */
+void ia_css_pipe_get_yuvscaler_binarydesc(
+ struct ia_css_pipe const *const pipe,
+ struct ia_css_binary_descr *yuv_scaler_descr,
+ struct ia_css_frame_info *in_info,
+ struct ia_css_frame_info *out_info,
+ struct ia_css_frame_info *internal_out_info,
+ struct ia_css_frame_info *vf_info);
+
+/* @brief Get a binary descriptor for capture pp stage.
+ *
+ * @param[in/out] pipe
+ * @param[out] capture_pp_descr
+ * @param[in/out] in_info
+ * @param[in/out] vf_info
+ * @return None
+ *
+ */
+void ia_css_pipe_get_capturepp_binarydesc(
+ struct ia_css_pipe *const pipe,
+ struct ia_css_binary_descr *capture_pp_descr,
+ struct ia_css_frame_info *in_info,
+ struct ia_css_frame_info *out_info,
+ struct ia_css_frame_info *vf_info);
+
+/* @brief Get a binary descriptor for primary capture.
+ *
+ * @param[in] pipe
+ * @param[out] prim_descr
+ * @param[in/out] in_info
+ * @param[in/out] out_info
+ * @param[in/out] vf_info
+ * @return None
+ *
+ */
+void ia_css_pipe_get_primary_binarydesc(
+ struct ia_css_pipe const *const pipe,
+ struct ia_css_binary_descr *prim_descr,
+ struct ia_css_frame_info *in_info,
+ struct ia_css_frame_info *out_info,
+ struct ia_css_frame_info *vf_info,
+ unsigned int stage_idx);
+
+/* @brief Get a binary descriptor for pre gdc stage.
+ *
+ * @param[in] pipe
+ * @param[out] pre_gdc_descr
+ * @param[in/out] in_info
+ * @param[in/out] out_info
+ * @return None
+ *
+ */
+void ia_css_pipe_get_pre_gdc_binarydesc(
+ struct ia_css_pipe const *const pipe,
+ struct ia_css_binary_descr *gdc_descr,
+ struct ia_css_frame_info *in_info,
+ struct ia_css_frame_info *out_info);
+
+/* @brief Get a binary descriptor for gdc stage.
+ *
+ * @param[in] pipe
+ * @param[out] gdc_descr
+ * @param[in/out] in_info
+ * @param[in/out] out_info
+ * @return None
+ *
+ */
+void ia_css_pipe_get_gdc_binarydesc(
+ struct ia_css_pipe const *const pipe,
+ struct ia_css_binary_descr *gdc_descr,
+ struct ia_css_frame_info *in_info,
+ struct ia_css_frame_info *out_info);
+
+/* @brief Get a binary descriptor for post gdc.
+ *
+ * @param[in] pipe
+ * @param[out] post_gdc_descr
+ * @param[in/out] in_info
+ * @param[in/out] out_info
+ * @param[in/out] vf_info
+ * @return None
+ *
+ */
+void ia_css_pipe_get_post_gdc_binarydesc(
+ struct ia_css_pipe const *const pipe,
+ struct ia_css_binary_descr *post_gdc_descr,
+ struct ia_css_frame_info *in_info,
+ struct ia_css_frame_info *out_info,
+ struct ia_css_frame_info *vf_info);
+
+/* @brief Get a binary descriptor for de.
+ *
+ * @param[in] pipe
+ * @param[out] pre_de_descr
+ * @param[in/out] in_info
+ * @param[in/out] out_info
+ * @return None
+ *
+ */
+void ia_css_pipe_get_pre_de_binarydesc(
+ struct ia_css_pipe const *const pipe,
+ struct ia_css_binary_descr *pre_de_descr,
+ struct ia_css_frame_info *in_info,
+ struct ia_css_frame_info *out_info);
+
+/* @brief Get a binary descriptor for pre anr stage.
+ *
+ * @param[in] pipe
+ * @param[out] pre_anr_descr
+ * @param[in/out] in_info
+ * @param[in/out] out_info
+ * @return None
+ *
+ */
+void ia_css_pipe_get_pre_anr_binarydesc(
+ struct ia_css_pipe const *const pipe,
+ struct ia_css_binary_descr *pre_anr_descr,
+ struct ia_css_frame_info *in_info,
+ struct ia_css_frame_info *out_info);
+
+/* @brief Get a binary descriptor for ANR stage.
+ *
+ * @param[in] pipe
+ * @param[out] anr_descr
+ * @param[in/out] in_info
+ * @param[in/out] out_info
+ * @return None
+ *
+ */
+void ia_css_pipe_get_anr_binarydesc(
+ struct ia_css_pipe const *const pipe,
+ struct ia_css_binary_descr *anr_descr,
+ struct ia_css_frame_info *in_info,
+ struct ia_css_frame_info *out_info);
+
+/* @brief Get a binary descriptor for post anr stage.
+ *
+ * @param[in] pipe
+ * @param[out] post_anr_descr
+ * @param[in/out] in_info
+ * @param[in/out] out_info
+ * @param[in/out] vf_info
+ * @return None
+ *
+ */
+void ia_css_pipe_get_post_anr_binarydesc(
+ struct ia_css_pipe const *const pipe,
+ struct ia_css_binary_descr *post_anr_descr,
+ struct ia_css_frame_info *in_info,
+ struct ia_css_frame_info *out_info,
+ struct ia_css_frame_info *vf_info);
+
+/* @brief Get a binary descriptor for ldc stage.
+ *
+ * @param[in/out] pipe
+ * @param[out] capture_pp_descr
+ * @param[in/out] in_info
+ * @param[in/out] vf_info
+ * @return None
+ *
+ */
+void ia_css_pipe_get_ldc_binarydesc(
+ struct ia_css_pipe const *const pipe,
+ struct ia_css_binary_descr *ldc_descr,
+ struct ia_css_frame_info *in_info,
+ struct ia_css_frame_info *out_info);
+
+/* @brief Calculates the required BDS factor
+ *
+ * @param[in] input_res
+ * @param[in] output_res
+ * @param[in/out] bds_factor
+ * @return 0 or error code upon error.
+ */
+int binarydesc_calculate_bds_factor(
+ struct ia_css_resolution input_res,
+ struct ia_css_resolution output_res,
+ unsigned int *bds_factor);
+
+#endif /* __IA_CSS_PIPE_BINARYDESC_H__ */
diff --git a/drivers/staging/media/atomisp/pci/camera/pipe/interface/ia_css_pipe_stagedesc.h b/drivers/staging/media/atomisp/pci/camera/pipe/interface/ia_css_pipe_stagedesc.h
new file mode 100644
index 000000000000..61d1bf66fbd6
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/camera/pipe/interface/ia_css_pipe_stagedesc.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __IA_CSS_PIPE_STAGEDESC_H__
+#define __IA_CSS_PIPE_STAGEDESC_H__
+
+#include <ia_css_acc_types.h> /* ia_css_fw_info */
+#include <ia_css_frame_public.h>
+#include <ia_css_binary.h>
+#include "ia_css_pipeline.h"
+#include "ia_css_pipeline_common.h"
+
+void ia_css_pipe_get_generic_stage_desc(
+ struct ia_css_pipeline_stage_desc *stage_desc,
+ struct ia_css_binary *binary,
+ struct ia_css_frame *out_frame[],
+ struct ia_css_frame *in_frame,
+ struct ia_css_frame *vf_frame);
+
+void ia_css_pipe_get_firmwares_stage_desc(
+ struct ia_css_pipeline_stage_desc *stage_desc,
+ struct ia_css_binary *binary,
+ struct ia_css_frame *out_frame[],
+ struct ia_css_frame *in_frame,
+ struct ia_css_frame *vf_frame,
+ const struct ia_css_fw_info *fw,
+ unsigned int mode);
+
+void ia_css_pipe_get_sp_func_stage_desc(
+ struct ia_css_pipeline_stage_desc *stage_desc,
+ struct ia_css_frame *out_frame,
+ enum ia_css_pipeline_stage_sp_func sp_func,
+ unsigned int max_input_width);
+
+#endif /*__IA_CSS_PIPE_STAGEDESC__H__ */
diff --git a/drivers/staging/media/atomisp/pci/camera/pipe/interface/ia_css_pipe_util.h b/drivers/staging/media/atomisp/pci/camera/pipe/interface/ia_css_pipe_util.h
new file mode 100644
index 000000000000..a0bdcc37553f
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/camera/pipe/interface/ia_css_pipe_util.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __IA_CSS_PIPE_UTIL_H__
+#define __IA_CSS_PIPE_UTIL_H__
+
+#include <ia_css_types.h>
+#include <ia_css_frame_public.h>
+
+/* @brief Get Input format bits per pixel based on stream configuration of this
+ * pipe.
+ *
+ * @param[in] pipe
+ * @return bits per pixel for the underlying stream
+ *
+ */
+unsigned int ia_css_pipe_util_pipe_input_format_bpp(
+ const struct ia_css_pipe *const pipe);
+
+void ia_css_pipe_util_create_output_frames(
+ struct ia_css_frame *frames[]);
+
+void ia_css_pipe_util_set_output_frames(
+ struct ia_css_frame *frames[],
+ unsigned int idx,
+ struct ia_css_frame *frame);
+
+#endif /* __IA_CSS_PIPE_UTIL_H__ */
diff --git a/drivers/staging/media/atomisp/pci/camera/pipe/src/pipe_binarydesc.c b/drivers/staging/media/atomisp/pci/camera/pipe/src/pipe_binarydesc.c
new file mode 100644
index 000000000000..8d7925a7ee0c
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/camera/pipe/src/pipe_binarydesc.c
@@ -0,0 +1,835 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/math.h>
+
+#include "ia_css_pipe_binarydesc.h"
+#include "ia_css_frame_format.h"
+#include "ia_css_pipe.h"
+#include "ia_css_pipe_util.h"
+#include "ia_css_util.h"
+#include "ia_css_debug.h"
+#include "sh_css_params.h"
+#include <assert_support.h>
+/* HRT_GDC_N */
+#include "gdc_device.h"
+
+/* This module provides a binary descriptions to used to find a binary. Since,
+ * every stage is associated with a binary, it implicity helps stage
+ * description. Apart from providing a binary description, this module also
+ * populates the frame info's when required.*/
+
+/* Generic descriptor for offline binaries. Internal function. */
+static void pipe_binarydesc_get_offline(
+ struct ia_css_pipe const *const pipe,
+ const int mode,
+ struct ia_css_binary_descr *descr,
+ struct ia_css_frame_info *in_info,
+ struct ia_css_frame_info *out_info[],
+ struct ia_css_frame_info *vf_info)
+{
+ unsigned int i;
+ /* in_info, out_info, vf_info can be NULL */
+ assert(pipe);
+ assert(descr);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "pipe_binarydesc_get_offline() enter:\n");
+
+ descr->mode = mode;
+ descr->online = false;
+ descr->continuous = pipe->stream->config.continuous;
+ descr->striped = false;
+ descr->two_ppc = false;
+ descr->enable_yuv_ds = false;
+ descr->enable_high_speed = false;
+ descr->enable_dvs_6axis = false;
+ descr->enable_reduced_pipe = false;
+ descr->enable_dz = true;
+ descr->enable_xnr = false;
+ descr->enable_dpc = false;
+ descr->enable_tnr = false;
+ descr->enable_capture_pp_bli = false;
+ descr->enable_fractional_ds = false;
+ descr->dvs_env.width = 0;
+ descr->dvs_env.height = 0;
+ descr->stream_format = pipe->stream->config.input_config.format;
+ descr->in_info = in_info;
+ descr->bds_out_info = NULL;
+ for (i = 0; i < IA_CSS_BINARY_MAX_OUTPUT_PORTS; i++)
+ descr->out_info[i] = out_info[i];
+ descr->vf_info = vf_info;
+ descr->isp_pipe_version = pipe->config.isp_pipe_version;
+ descr->required_bds_factor = SH_CSS_BDS_FACTOR_1_00;
+ descr->stream_config_left_padding = -1;
+}
+
+void ia_css_pipe_get_copy_binarydesc(
+ struct ia_css_pipe const *const pipe,
+ struct ia_css_binary_descr *copy_descr,
+ struct ia_css_frame_info *in_info,
+ struct ia_css_frame_info *out_info,
+ struct ia_css_frame_info *vf_info)
+{
+ struct ia_css_frame_info *out_infos[IA_CSS_BINARY_MAX_OUTPUT_PORTS];
+ unsigned int i;
+ /* out_info can be NULL */
+ assert(pipe);
+ assert(in_info);
+ IA_CSS_ENTER_PRIVATE("");
+
+ *in_info = *out_info;
+ out_infos[0] = out_info;
+ for (i = 1; i < IA_CSS_BINARY_MAX_OUTPUT_PORTS; i++)
+ out_infos[i] = NULL;
+ pipe_binarydesc_get_offline(pipe, IA_CSS_BINARY_MODE_COPY,
+ copy_descr, in_info, out_infos, vf_info);
+ copy_descr->online = true;
+ copy_descr->continuous = false;
+ copy_descr->two_ppc = (pipe->stream->config.pixels_per_clock == 2);
+ copy_descr->enable_dz = false;
+ copy_descr->isp_pipe_version = IA_CSS_PIPE_VERSION_1;
+ IA_CSS_LEAVE_PRIVATE("");
+}
+
+void ia_css_pipe_get_vfpp_binarydesc(
+ struct ia_css_pipe const *const pipe,
+ struct ia_css_binary_descr *vf_pp_descr,
+ struct ia_css_frame_info *in_info,
+ struct ia_css_frame_info *out_info)
+{
+ struct ia_css_frame_info *out_infos[IA_CSS_BINARY_MAX_OUTPUT_PORTS];
+ unsigned int i;
+ /* out_info can be NULL ??? */
+ assert(pipe);
+ assert(in_info);
+ IA_CSS_ENTER_PRIVATE("");
+
+ in_info->raw_bit_depth = 0;
+ out_infos[0] = out_info;
+ for (i = 1; i < IA_CSS_BINARY_MAX_OUTPUT_PORTS; i++)
+ out_infos[i] = NULL;
+
+ pipe_binarydesc_get_offline(pipe, IA_CSS_BINARY_MODE_VF_PP,
+ vf_pp_descr, in_info, out_infos, NULL);
+ vf_pp_descr->enable_fractional_ds = true;
+ IA_CSS_LEAVE_PRIVATE("");
+}
+
+static struct u32_fract bds_factors_list[] = {
+ [SH_CSS_BDS_FACTOR_1_00] = {1, 1},
+ [SH_CSS_BDS_FACTOR_1_25] = {5, 4},
+ [SH_CSS_BDS_FACTOR_1_50] = {3, 2},
+ [SH_CSS_BDS_FACTOR_2_00] = {2, 1},
+ [SH_CSS_BDS_FACTOR_2_25] = {9, 4},
+ [SH_CSS_BDS_FACTOR_2_50] = {5, 2},
+ [SH_CSS_BDS_FACTOR_3_00] = {3, 1},
+ [SH_CSS_BDS_FACTOR_4_00] = {4, 1},
+ [SH_CSS_BDS_FACTOR_4_50] = {9, 2},
+ [SH_CSS_BDS_FACTOR_5_00] = {5, 1},
+ [SH_CSS_BDS_FACTOR_6_00] = {6, 1},
+ [SH_CSS_BDS_FACTOR_8_00] = {8, 1},
+};
+
+int sh_css_bds_factor_get_fract(unsigned int bds_factor, struct u32_fract *bds)
+{
+ /* Throw an error since bds_factor cannot be found in bds_factors_list */
+ if (bds_factor >= ARRAY_SIZE(bds_factors_list))
+ return -EINVAL;
+
+ *bds = bds_factors_list[bds_factor];
+ return 0;
+}
+
+int binarydesc_calculate_bds_factor(
+ struct ia_css_resolution input_res,
+ struct ia_css_resolution output_res,
+ unsigned int *bds_factor)
+{
+ unsigned int i;
+ unsigned int in_w = input_res.width,
+ in_h = input_res.height,
+ out_w = output_res.width, out_h = output_res.height;
+
+ unsigned int max_bds_factor = 8;
+ unsigned int max_rounding_margin = 2;
+ /* delta in pixels to account for rounding margin in the calculation */
+ unsigned int delta = max_bds_factor * max_rounding_margin;
+
+ /* Assert if the resolutions are not set */
+ assert(in_w != 0 && in_h != 0);
+ assert(out_w != 0 && out_h != 0);
+
+ /* Loop over all bds factors until a match is found */
+ for (i = 0; i < ARRAY_SIZE(bds_factors_list); i++) {
+ unsigned int num = bds_factors_list[i].numerator;
+ unsigned int den = bds_factors_list[i].denominator;
+
+ /* See width-wise and height-wise if this bds_factor
+ * satisfies the condition */
+ bool cond = (out_w * num / den + delta > in_w) &&
+ (out_w * num / den <= in_w) &&
+ (out_h * num / den + delta > in_h) &&
+ (out_h * num / den <= in_h);
+
+ if (cond) {
+ *bds_factor = i;
+ return 0;
+ }
+ }
+
+ /* Throw an error since a suitable bds_factor cannot be found */
+ return -EINVAL;
+}
+
+int ia_css_pipe_get_preview_binarydesc(
+ struct ia_css_pipe *const pipe,
+ struct ia_css_binary_descr *preview_descr,
+ struct ia_css_frame_info *in_info,
+ struct ia_css_frame_info *bds_out_info,
+ struct ia_css_frame_info *out_info,
+ struct ia_css_frame_info *vf_info)
+{
+ int err;
+ struct ia_css_frame_info *out_infos[IA_CSS_BINARY_MAX_OUTPUT_PORTS];
+ int mode = IA_CSS_BINARY_MODE_PREVIEW;
+ unsigned int i;
+
+ assert(pipe);
+ assert(in_info);
+ assert(out_info);
+ assert(vf_info);
+ IA_CSS_ENTER_PRIVATE("");
+
+ /*
+ * Set up the info of the input frame with
+ * the ISP required resolution
+ */
+ in_info->res = pipe->config.input_effective_res;
+ in_info->padded_width = in_info->res.width;
+ in_info->raw_bit_depth = ia_css_pipe_util_pipe_input_format_bpp(pipe);
+
+ if (ia_css_util_is_input_format_yuv(pipe->stream->config.input_config.format))
+ mode = IA_CSS_BINARY_MODE_COPY;
+ else
+ in_info->format = IA_CSS_FRAME_FORMAT_RAW;
+
+ out_infos[0] = out_info;
+ for (i = 1; i < IA_CSS_BINARY_MAX_OUTPUT_PORTS; i++)
+ out_infos[i] = NULL;
+
+ pipe_binarydesc_get_offline(pipe, mode,
+ preview_descr, in_info, out_infos, vf_info);
+ if (pipe->stream->config.online) {
+ preview_descr->online = pipe->stream->config.online;
+ preview_descr->two_ppc =
+ (pipe->stream->config.pixels_per_clock == 2);
+ }
+ preview_descr->stream_format = pipe->stream->config.input_config.format;
+
+ /* TODO: Remove this when bds_out_info is available! */
+ *bds_out_info = *in_info;
+
+ if (pipe->extra_config.enable_raw_binning) {
+ if (pipe->config.bayer_ds_out_res.width != 0 &&
+ pipe->config.bayer_ds_out_res.height != 0) {
+ bds_out_info->res.width =
+ pipe->config.bayer_ds_out_res.width;
+ bds_out_info->res.height =
+ pipe->config.bayer_ds_out_res.height;
+ bds_out_info->padded_width =
+ pipe->config.bayer_ds_out_res.width;
+ err =
+ binarydesc_calculate_bds_factor(in_info->res,
+ bds_out_info->res,
+ &preview_descr->required_bds_factor);
+ if (err)
+ return err;
+ } else {
+ bds_out_info->res.width = in_info->res.width / 2;
+ bds_out_info->res.height = in_info->res.height / 2;
+ bds_out_info->padded_width = in_info->padded_width / 2;
+ preview_descr->required_bds_factor =
+ SH_CSS_BDS_FACTOR_2_00;
+ }
+ } else {
+ /* TODO: Remove this when bds_out_info->is available! */
+ bds_out_info->res.width = in_info->res.width;
+ bds_out_info->res.height = in_info->res.height;
+ bds_out_info->padded_width = in_info->padded_width;
+ preview_descr->required_bds_factor = SH_CSS_BDS_FACTOR_1_00;
+ }
+ pipe->required_bds_factor = preview_descr->required_bds_factor;
+
+ /* bayer ds and fractional ds cannot be enabled at the same time,
+ so we disable bds_out_info when fractional ds is used */
+ if (!pipe->extra_config.enable_fractional_ds)
+ preview_descr->bds_out_info = bds_out_info;
+ else
+ preview_descr->bds_out_info = NULL;
+ /*
+ ----Preview binary-----
+ --in-->|--out->|vf_veceven|--|--->vf
+ -----------------------
+ * Preview binary normally doesn't have a vf_port but
+ * instead it has an output port. However, the output is
+ * generated by vf_veceven module in which we might have
+ * a downscaling (by 1x, 2x, or 4x). Because the resolution
+ * might change, we need two different info, namely out_info
+ * & vf_info. In fill_binary_info we use out&vf info to
+ * calculate vf decimation factor.
+ */
+ *out_info = *vf_info;
+
+ /* In case of preview_ds binary, we can do any fractional amount
+ * of downscale, so there is no DS needed in vf_veceven. Therefore,
+ * out and vf infos will be the same. Otherwise, we set out resolution
+ * equal to in resolution. */
+ if (!pipe->extra_config.enable_fractional_ds) {
+ /* TODO: Change this when bds_out_info is available! */
+ out_info->res.width = bds_out_info->res.width;
+ out_info->res.height = bds_out_info->res.height;
+ out_info->padded_width = bds_out_info->padded_width;
+ }
+ preview_descr->enable_fractional_ds =
+ pipe->extra_config.enable_fractional_ds;
+
+ preview_descr->enable_dpc = pipe->config.enable_dpc;
+
+ preview_descr->isp_pipe_version = pipe->config.isp_pipe_version;
+ IA_CSS_LEAVE_ERR_PRIVATE(0);
+ return 0;
+}
+
+int ia_css_pipe_get_video_binarydesc(
+ struct ia_css_pipe *const pipe,
+ struct ia_css_binary_descr *video_descr,
+ struct ia_css_frame_info *in_info,
+ struct ia_css_frame_info *bds_out_info,
+ struct ia_css_frame_info *out_info,
+ struct ia_css_frame_info *vf_info,
+ int stream_config_left_padding)
+{
+ int mode = IA_CSS_BINARY_MODE_VIDEO;
+ unsigned int i;
+ struct ia_css_frame_info *out_infos[IA_CSS_BINARY_MAX_OUTPUT_PORTS];
+ int err = 0;
+ bool stream_dz_config = false;
+
+ /* vf_info can be NULL */
+ assert(pipe);
+ assert(in_info);
+ /* assert(vf_info != NULL); */
+ IA_CSS_ENTER_PRIVATE("");
+
+ /* The solution below is not optimal; we should move to using ia_css_pipe_get_copy_binarydesc()
+ * But for now this fixes things; this code used to be there but was removed
+ * with gerrit 8908 as this was wrong for Skycam; however 240x still needs this
+ */
+ if (ia_css_util_is_input_format_yuv(pipe->stream->config.input_config.format))
+ mode = IA_CSS_BINARY_MODE_COPY;
+
+ in_info->res = pipe->config.input_effective_res;
+ in_info->padded_width = in_info->res.width;
+ in_info->format = IA_CSS_FRAME_FORMAT_RAW;
+ in_info->raw_bit_depth = ia_css_pipe_util_pipe_input_format_bpp(pipe);
+ out_infos[0] = out_info;
+ for (i = 1; i < IA_CSS_BINARY_MAX_OUTPUT_PORTS; i++)
+ out_infos[i] = NULL;
+
+ pipe_binarydesc_get_offline(pipe, mode,
+ video_descr, in_info, out_infos, vf_info);
+
+ if (pipe->stream->config.online) {
+ video_descr->online = pipe->stream->config.online;
+ video_descr->two_ppc =
+ (pipe->stream->config.pixels_per_clock == 2);
+ }
+
+ if (mode == IA_CSS_BINARY_MODE_VIDEO) {
+ stream_dz_config =
+ ((pipe->stream->isp_params_configs->dz_config.dx !=
+ HRT_GDC_N)
+ || (pipe->stream->isp_params_configs->dz_config.dy !=
+ HRT_GDC_N));
+
+ video_descr->enable_dz = pipe->config.enable_dz
+ || stream_dz_config;
+ video_descr->dvs_env = pipe->config.dvs_envelope;
+ video_descr->enable_yuv_ds = pipe->extra_config.enable_yuv_ds;
+ video_descr->enable_high_speed =
+ pipe->extra_config.enable_high_speed;
+ video_descr->enable_dvs_6axis =
+ pipe->extra_config.enable_dvs_6axis;
+ video_descr->enable_reduced_pipe =
+ pipe->extra_config.enable_reduced_pipe;
+ video_descr->isp_pipe_version = pipe->config.isp_pipe_version;
+ video_descr->enable_fractional_ds =
+ pipe->extra_config.enable_fractional_ds;
+ video_descr->enable_dpc =
+ pipe->config.enable_dpc;
+ video_descr->enable_tnr =
+ pipe->config.enable_tnr;
+
+ if (pipe->extra_config.enable_raw_binning) {
+ if (pipe->config.bayer_ds_out_res.width != 0 &&
+ pipe->config.bayer_ds_out_res.height != 0) {
+ bds_out_info->res.width =
+ pipe->config.bayer_ds_out_res.width;
+ bds_out_info->res.height =
+ pipe->config.bayer_ds_out_res.height;
+ bds_out_info->padded_width =
+ pipe->config.bayer_ds_out_res.width;
+ err =
+ binarydesc_calculate_bds_factor(
+ in_info->res, bds_out_info->res,
+ &video_descr->required_bds_factor);
+ if (err)
+ return err;
+ } else {
+ bds_out_info->res.width =
+ in_info->res.width / 2;
+ bds_out_info->res.height =
+ in_info->res.height / 2;
+ bds_out_info->padded_width =
+ in_info->padded_width / 2;
+ video_descr->required_bds_factor =
+ SH_CSS_BDS_FACTOR_2_00;
+ }
+ } else {
+ bds_out_info->res.width = in_info->res.width;
+ bds_out_info->res.height = in_info->res.height;
+ bds_out_info->padded_width = in_info->padded_width;
+ video_descr->required_bds_factor =
+ SH_CSS_BDS_FACTOR_1_00;
+ }
+
+ pipe->required_bds_factor = video_descr->required_bds_factor;
+
+ /* bayer ds and fractional ds cannot be enabled
+ at the same time, so we disable bds_out_info when
+ fractional ds is used */
+ if (!pipe->extra_config.enable_fractional_ds)
+ video_descr->bds_out_info = bds_out_info;
+ else
+ video_descr->bds_out_info = NULL;
+
+ video_descr->enable_fractional_ds =
+ pipe->extra_config.enable_fractional_ds;
+ video_descr->stream_config_left_padding = stream_config_left_padding;
+ }
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+}
+
+void ia_css_pipe_get_yuvscaler_binarydesc(
+ struct ia_css_pipe const *const pipe,
+ struct ia_css_binary_descr *yuv_scaler_descr,
+ struct ia_css_frame_info *in_info,
+ struct ia_css_frame_info *out_info,
+ struct ia_css_frame_info *internal_out_info,
+ struct ia_css_frame_info *vf_info)
+{
+ struct ia_css_frame_info *out_infos[IA_CSS_BINARY_MAX_OUTPUT_PORTS];
+ struct ia_css_frame_info *this_vf_info = NULL;
+
+ assert(pipe);
+ assert(in_info);
+ /* Note: if the following assert fails, the number of ports has been
+ * changed; in that case an additional initializer must be added
+ * a few lines below after which this assert can be updated.
+ */
+ assert(IA_CSS_BINARY_MAX_OUTPUT_PORTS == 2);
+ IA_CSS_ENTER_PRIVATE("");
+
+ in_info->padded_width = in_info->res.width;
+ in_info->raw_bit_depth = 0;
+ ia_css_frame_info_set_width(in_info, in_info->res.width, 0);
+ out_infos[0] = out_info;
+ out_infos[1] = internal_out_info;
+ /* add initializers here if
+ * assert(IA_CSS_BINARY_MAX_OUTPUT_PORTS == ...);
+ * fails
+ */
+
+ if (vf_info) {
+ this_vf_info = (vf_info->res.width == 0 &&
+ vf_info->res.height == 0) ? NULL : vf_info;
+ }
+
+ pipe_binarydesc_get_offline(pipe,
+ IA_CSS_BINARY_MODE_CAPTURE_PP,
+ yuv_scaler_descr,
+ in_info, out_infos, this_vf_info);
+
+ yuv_scaler_descr->enable_fractional_ds = true;
+ IA_CSS_LEAVE_PRIVATE("");
+}
+
+void ia_css_pipe_get_capturepp_binarydesc(
+ struct ia_css_pipe *const pipe,
+ struct ia_css_binary_descr *capture_pp_descr,
+ struct ia_css_frame_info *in_info,
+ struct ia_css_frame_info *out_info,
+ struct ia_css_frame_info *vf_info)
+{
+ unsigned int i;
+ struct ia_css_frame_info *out_infos[IA_CSS_BINARY_MAX_OUTPUT_PORTS];
+
+ assert(pipe);
+ assert(in_info);
+ assert(vf_info);
+ IA_CSS_ENTER_PRIVATE("");
+
+ /* the in_info is only used for resolution to enable
+ bayer down scaling. */
+ if (pipe->out_yuv_ds_input_info.res.width)
+ *in_info = pipe->out_yuv_ds_input_info;
+ else
+ *in_info = *out_info;
+ in_info->format = IA_CSS_FRAME_FORMAT_YUV420;
+ in_info->raw_bit_depth = 0;
+ ia_css_frame_info_set_width(in_info, in_info->res.width, 0);
+
+ out_infos[0] = out_info;
+ for (i = 1; i < IA_CSS_BINARY_MAX_OUTPUT_PORTS; i++)
+ out_infos[i] = NULL;
+
+ pipe_binarydesc_get_offline(pipe,
+ IA_CSS_BINARY_MODE_CAPTURE_PP,
+ capture_pp_descr,
+ in_info, out_infos, vf_info);
+
+ capture_pp_descr->enable_capture_pp_bli =
+ pipe->config.default_capture_config.enable_capture_pp_bli;
+ capture_pp_descr->enable_fractional_ds = true;
+ capture_pp_descr->enable_xnr =
+ pipe->config.default_capture_config.enable_xnr != 0;
+ IA_CSS_LEAVE_PRIVATE("");
+}
+
+/* lookup table for high quality primary binaries */
+static unsigned int primary_hq_binary_modes[NUM_PRIMARY_HQ_STAGES] = {
+ IA_CSS_BINARY_MODE_PRIMARY_HQ_STAGE0,
+ IA_CSS_BINARY_MODE_PRIMARY_HQ_STAGE1,
+ IA_CSS_BINARY_MODE_PRIMARY_HQ_STAGE2,
+ IA_CSS_BINARY_MODE_PRIMARY_HQ_STAGE3,
+ IA_CSS_BINARY_MODE_PRIMARY_HQ_STAGE4,
+ IA_CSS_BINARY_MODE_PRIMARY_HQ_STAGE5
+};
+
+void ia_css_pipe_get_primary_binarydesc(
+ struct ia_css_pipe const *const pipe,
+ struct ia_css_binary_descr *prim_descr,
+ struct ia_css_frame_info *in_info,
+ struct ia_css_frame_info *out_info,
+ struct ia_css_frame_info *vf_info,
+ unsigned int stage_idx)
+{
+ enum ia_css_pipe_version pipe_version = pipe->config.isp_pipe_version;
+ int mode;
+ unsigned int i;
+ struct ia_css_frame_info *out_infos[IA_CSS_BINARY_MAX_OUTPUT_PORTS];
+
+ assert(pipe);
+ assert(in_info);
+ assert(out_info);
+ assert(stage_idx < NUM_PRIMARY_HQ_STAGES);
+ /* vf_info can be NULL - example video_binarydescr */
+ /*assert(vf_info != NULL);*/
+ IA_CSS_ENTER_PRIVATE("");
+
+ if (pipe_version == IA_CSS_PIPE_VERSION_2_6_1)
+ mode = primary_hq_binary_modes[stage_idx];
+ else
+ mode = IA_CSS_BINARY_MODE_PRIMARY;
+
+ if (ia_css_util_is_input_format_yuv(pipe->stream->config.input_config.format))
+ mode = IA_CSS_BINARY_MODE_COPY;
+
+ in_info->res = pipe->config.input_effective_res;
+ in_info->padded_width = in_info->res.width;
+
+ if (pipe->stream->config.pack_raw_pixels)
+ in_info->format = IA_CSS_FRAME_FORMAT_RAW_PACKED;
+ else
+ in_info->format = IA_CSS_FRAME_FORMAT_RAW;
+
+ in_info->raw_bit_depth = ia_css_pipe_util_pipe_input_format_bpp(pipe);
+ out_infos[0] = out_info;
+ for (i = 1; i < IA_CSS_BINARY_MAX_OUTPUT_PORTS; i++)
+ out_infos[i] = NULL;
+
+ pipe_binarydesc_get_offline(pipe, mode,
+ prim_descr, in_info, out_infos, vf_info);
+
+ if (pipe->stream->config.online &&
+ pipe->stream->config.mode != IA_CSS_INPUT_MODE_MEMORY) {
+ prim_descr->online = true;
+ prim_descr->two_ppc =
+ (pipe->stream->config.pixels_per_clock == 2);
+ prim_descr->stream_format = pipe->stream->config.input_config.format;
+ }
+ if (mode == IA_CSS_BINARY_MODE_PRIMARY) {
+ prim_descr->isp_pipe_version = pipe->config.isp_pipe_version;
+ prim_descr->enable_fractional_ds =
+ pipe->extra_config.enable_fractional_ds;
+ /* We have both striped and non-striped primary binaries,
+ * if continuous viewfinder is required, then we must select
+ * a striped one. Otherwise we prefer to use a non-striped
+ * since it has better performance. */
+ if (pipe_version == IA_CSS_PIPE_VERSION_2_6_1)
+ prim_descr->striped = false;
+ else
+ prim_descr->striped = prim_descr->continuous &&
+ (!pipe->stream->stop_copy_preview || !pipe->stream->disable_cont_vf);
+ }
+ IA_CSS_LEAVE_PRIVATE("");
+}
+
+void ia_css_pipe_get_pre_gdc_binarydesc(
+ struct ia_css_pipe const *const pipe,
+ struct ia_css_binary_descr *pre_gdc_descr,
+ struct ia_css_frame_info *in_info,
+ struct ia_css_frame_info *out_info)
+{
+ unsigned int i;
+ struct ia_css_frame_info *out_infos[IA_CSS_BINARY_MAX_OUTPUT_PORTS];
+
+ assert(pipe);
+ assert(in_info);
+ assert(out_info);
+ IA_CSS_ENTER_PRIVATE("");
+
+ *in_info = *out_info;
+ in_info->format = IA_CSS_FRAME_FORMAT_RAW;
+ in_info->raw_bit_depth = ia_css_pipe_util_pipe_input_format_bpp(pipe);
+ out_infos[0] = out_info;
+ for (i = 1; i < IA_CSS_BINARY_MAX_OUTPUT_PORTS; i++)
+ out_infos[i] = NULL;
+
+ pipe_binarydesc_get_offline(pipe, IA_CSS_BINARY_MODE_PRE_ISP,
+ pre_gdc_descr, in_info, out_infos, NULL);
+ pre_gdc_descr->isp_pipe_version = pipe->config.isp_pipe_version;
+ IA_CSS_LEAVE_PRIVATE("");
+}
+
+void ia_css_pipe_get_gdc_binarydesc(
+ struct ia_css_pipe const *const pipe,
+ struct ia_css_binary_descr *gdc_descr,
+ struct ia_css_frame_info *in_info,
+ struct ia_css_frame_info *out_info)
+{
+ unsigned int i;
+ struct ia_css_frame_info *out_infos[IA_CSS_BINARY_MAX_OUTPUT_PORTS];
+
+ assert(pipe);
+ assert(in_info);
+ assert(out_info);
+ IA_CSS_ENTER_PRIVATE("");
+
+ *in_info = *out_info;
+ in_info->format = IA_CSS_FRAME_FORMAT_QPLANE6;
+ out_infos[0] = out_info;
+ for (i = 1; i < IA_CSS_BINARY_MAX_OUTPUT_PORTS; i++)
+ out_infos[i] = NULL;
+
+ pipe_binarydesc_get_offline(pipe, IA_CSS_BINARY_MODE_GDC,
+ gdc_descr, in_info, out_infos, NULL);
+ IA_CSS_LEAVE_PRIVATE("");
+}
+
+void ia_css_pipe_get_post_gdc_binarydesc(
+ struct ia_css_pipe const *const pipe,
+ struct ia_css_binary_descr *post_gdc_descr,
+ struct ia_css_frame_info *in_info,
+ struct ia_css_frame_info *out_info,
+ struct ia_css_frame_info *vf_info)
+{
+ unsigned int i;
+ struct ia_css_frame_info *out_infos[IA_CSS_BINARY_MAX_OUTPUT_PORTS];
+
+ assert(pipe);
+ assert(in_info);
+ assert(out_info);
+ assert(vf_info);
+ IA_CSS_ENTER_PRIVATE("");
+
+ *in_info = *out_info;
+ in_info->format = IA_CSS_FRAME_FORMAT_YUV420_16;
+ in_info->raw_bit_depth = 16;
+ out_infos[0] = out_info;
+ for (i = 1; i < IA_CSS_BINARY_MAX_OUTPUT_PORTS; i++)
+ out_infos[i] = NULL;
+
+ pipe_binarydesc_get_offline(pipe, IA_CSS_BINARY_MODE_POST_ISP,
+ post_gdc_descr, in_info, out_infos, vf_info);
+
+ post_gdc_descr->isp_pipe_version = pipe->config.isp_pipe_version;
+ IA_CSS_LEAVE_PRIVATE("");
+}
+
+void ia_css_pipe_get_pre_de_binarydesc(
+ struct ia_css_pipe const *const pipe,
+ struct ia_css_binary_descr *pre_de_descr,
+ struct ia_css_frame_info *in_info,
+ struct ia_css_frame_info *out_info)
+{
+ unsigned int i;
+ struct ia_css_frame_info *out_infos[IA_CSS_BINARY_MAX_OUTPUT_PORTS];
+
+ assert(pipe);
+ assert(in_info);
+ assert(out_info);
+ IA_CSS_ENTER_PRIVATE("");
+
+ *in_info = *out_info;
+ in_info->format = IA_CSS_FRAME_FORMAT_RAW;
+ in_info->raw_bit_depth = ia_css_pipe_util_pipe_input_format_bpp(pipe);
+ out_infos[0] = out_info;
+ for (i = 1; i < IA_CSS_BINARY_MAX_OUTPUT_PORTS; i++)
+ out_infos[i] = NULL;
+
+ if (pipe->config.isp_pipe_version == IA_CSS_PIPE_VERSION_1)
+ pipe_binarydesc_get_offline(pipe, IA_CSS_BINARY_MODE_PRE_ISP,
+ pre_de_descr, in_info, out_infos, NULL);
+ else if (pipe->config.isp_pipe_version == IA_CSS_PIPE_VERSION_2_2) {
+ pipe_binarydesc_get_offline(pipe, IA_CSS_BINARY_MODE_PRE_DE,
+ pre_de_descr, in_info, out_infos, NULL);
+ }
+
+ if (pipe->stream->config.online) {
+ pre_de_descr->online = true;
+ pre_de_descr->two_ppc =
+ (pipe->stream->config.pixels_per_clock == 2);
+ pre_de_descr->stream_format = pipe->stream->config.input_config.format;
+ }
+ pre_de_descr->isp_pipe_version = pipe->config.isp_pipe_version;
+ IA_CSS_LEAVE_PRIVATE("");
+}
+
+void ia_css_pipe_get_pre_anr_binarydesc(
+ struct ia_css_pipe const *const pipe,
+ struct ia_css_binary_descr *pre_anr_descr,
+ struct ia_css_frame_info *in_info,
+ struct ia_css_frame_info *out_info)
+{
+ unsigned int i;
+ struct ia_css_frame_info *out_infos[IA_CSS_BINARY_MAX_OUTPUT_PORTS];
+
+ assert(pipe);
+ assert(in_info);
+ assert(out_info);
+ IA_CSS_ENTER_PRIVATE("");
+
+ *in_info = *out_info;
+ in_info->format = IA_CSS_FRAME_FORMAT_RAW;
+ in_info->raw_bit_depth = ia_css_pipe_util_pipe_input_format_bpp(pipe);
+ out_infos[0] = out_info;
+ for (i = 1; i < IA_CSS_BINARY_MAX_OUTPUT_PORTS; i++)
+ out_infos[i] = NULL;
+
+ pipe_binarydesc_get_offline(pipe, IA_CSS_BINARY_MODE_PRE_ISP,
+ pre_anr_descr, in_info, out_infos, NULL);
+
+ if (pipe->stream->config.online) {
+ pre_anr_descr->online = true;
+ pre_anr_descr->two_ppc =
+ (pipe->stream->config.pixels_per_clock == 2);
+ pre_anr_descr->stream_format = pipe->stream->config.input_config.format;
+ }
+ pre_anr_descr->isp_pipe_version = pipe->config.isp_pipe_version;
+ IA_CSS_LEAVE_PRIVATE("");
+}
+
+void ia_css_pipe_get_anr_binarydesc(
+ struct ia_css_pipe const *const pipe,
+ struct ia_css_binary_descr *anr_descr,
+ struct ia_css_frame_info *in_info,
+ struct ia_css_frame_info *out_info)
+{
+ unsigned int i;
+ struct ia_css_frame_info *out_infos[IA_CSS_BINARY_MAX_OUTPUT_PORTS];
+
+ assert(pipe);
+ assert(in_info);
+ assert(out_info);
+ IA_CSS_ENTER_PRIVATE("");
+
+ *in_info = *out_info;
+ in_info->format = IA_CSS_FRAME_FORMAT_RAW;
+ in_info->raw_bit_depth = ANR_ELEMENT_BITS;
+ out_infos[0] = out_info;
+ for (i = 1; i < IA_CSS_BINARY_MAX_OUTPUT_PORTS; i++)
+ out_infos[i] = NULL;
+
+ pipe_binarydesc_get_offline(pipe, IA_CSS_BINARY_MODE_ANR,
+ anr_descr, in_info, out_infos, NULL);
+
+ anr_descr->isp_pipe_version = pipe->config.isp_pipe_version;
+ IA_CSS_LEAVE_PRIVATE("");
+}
+
+void ia_css_pipe_get_post_anr_binarydesc(
+ struct ia_css_pipe const *const pipe,
+ struct ia_css_binary_descr *post_anr_descr,
+ struct ia_css_frame_info *in_info,
+ struct ia_css_frame_info *out_info,
+ struct ia_css_frame_info *vf_info)
+{
+ unsigned int i;
+ struct ia_css_frame_info *out_infos[IA_CSS_BINARY_MAX_OUTPUT_PORTS];
+
+ assert(pipe);
+ assert(in_info);
+ assert(out_info);
+ assert(vf_info);
+ IA_CSS_ENTER_PRIVATE("");
+
+ *in_info = *out_info;
+ in_info->format = IA_CSS_FRAME_FORMAT_RAW;
+ in_info->raw_bit_depth = ANR_ELEMENT_BITS;
+ out_infos[0] = out_info;
+ for (i = 1; i < IA_CSS_BINARY_MAX_OUTPUT_PORTS; i++)
+ out_infos[i] = NULL;
+
+ pipe_binarydesc_get_offline(pipe, IA_CSS_BINARY_MODE_POST_ISP,
+ post_anr_descr, in_info, out_infos, vf_info);
+
+ post_anr_descr->isp_pipe_version = pipe->config.isp_pipe_version;
+ IA_CSS_LEAVE_PRIVATE("");
+}
+
+void ia_css_pipe_get_ldc_binarydesc(
+ struct ia_css_pipe const *const pipe,
+ struct ia_css_binary_descr *ldc_descr,
+ struct ia_css_frame_info *in_info,
+ struct ia_css_frame_info *out_info)
+{
+ unsigned int i;
+ struct ia_css_frame_info *out_infos[IA_CSS_BINARY_MAX_OUTPUT_PORTS];
+
+ assert(pipe);
+ assert(in_info);
+ assert(out_info);
+ IA_CSS_ENTER_PRIVATE("");
+
+ *in_info = *out_info;
+
+ in_info->format = IA_CSS_FRAME_FORMAT_YUV420;
+ in_info->raw_bit_depth = 0;
+ ia_css_frame_info_set_width(in_info, in_info->res.width, 0);
+
+ out_infos[0] = out_info;
+ for (i = 1; i < IA_CSS_BINARY_MAX_OUTPUT_PORTS; i++)
+ out_infos[i] = NULL;
+
+ pipe_binarydesc_get_offline(pipe, IA_CSS_BINARY_MODE_CAPTURE_PP,
+ ldc_descr, in_info, out_infos, NULL);
+ ldc_descr->enable_dvs_6axis =
+ pipe->extra_config.enable_dvs_6axis;
+ IA_CSS_LEAVE_PRIVATE("");
+}
diff --git a/drivers/staging/media/atomisp/pci/camera/pipe/src/pipe_stagedesc.c b/drivers/staging/media/atomisp/pci/camera/pipe/src/pipe_stagedesc.c
new file mode 100644
index 000000000000..a9f736398f50
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/camera/pipe/src/pipe_stagedesc.c
@@ -0,0 +1,89 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#include "ia_css_pipe_stagedesc.h"
+#include "assert_support.h"
+#include "ia_css_debug.h"
+
+void ia_css_pipe_get_generic_stage_desc(
+ struct ia_css_pipeline_stage_desc *stage_desc,
+ struct ia_css_binary *binary,
+ struct ia_css_frame *out_frame[],
+ struct ia_css_frame *in_frame,
+ struct ia_css_frame *vf_frame)
+{
+ unsigned int i;
+
+ IA_CSS_ENTER_PRIVATE("stage_desc = %p, binary = %p, out_frame = %p, in_frame = %p, vf_frame = %p",
+ stage_desc, binary, out_frame, in_frame, vf_frame);
+
+ assert(stage_desc && binary && binary->info);
+ if (!stage_desc || !binary || !binary->info) {
+ IA_CSS_ERROR("invalid arguments");
+ goto ERR;
+ }
+
+ stage_desc->binary = binary;
+ stage_desc->firmware = NULL;
+ stage_desc->sp_func = IA_CSS_PIPELINE_NO_FUNC;
+ stage_desc->max_input_width = 0;
+ stage_desc->mode = binary->info->sp.pipeline.mode;
+ stage_desc->in_frame = in_frame;
+ for (i = 0; i < IA_CSS_BINARY_MAX_OUTPUT_PORTS; i++) {
+ stage_desc->out_frame[i] = out_frame[i];
+ }
+ stage_desc->vf_frame = vf_frame;
+ERR:
+ IA_CSS_LEAVE_PRIVATE("");
+}
+
+void ia_css_pipe_get_firmwares_stage_desc(
+ struct ia_css_pipeline_stage_desc *stage_desc,
+ struct ia_css_binary *binary,
+ struct ia_css_frame *out_frame[],
+ struct ia_css_frame *in_frame,
+ struct ia_css_frame *vf_frame,
+ const struct ia_css_fw_info *fw,
+ unsigned int mode)
+{
+ unsigned int i;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_pipe_get_firmwares_stage_desc() enter:\n");
+ stage_desc->binary = binary;
+ stage_desc->firmware = fw;
+ stage_desc->sp_func = IA_CSS_PIPELINE_NO_FUNC;
+ stage_desc->max_input_width = 0;
+ stage_desc->mode = mode;
+ stage_desc->in_frame = in_frame;
+ for (i = 0; i < IA_CSS_BINARY_MAX_OUTPUT_PORTS; i++) {
+ stage_desc->out_frame[i] = out_frame[i];
+ }
+ stage_desc->vf_frame = vf_frame;
+}
+
+void ia_css_pipe_get_sp_func_stage_desc(
+ struct ia_css_pipeline_stage_desc *stage_desc,
+ struct ia_css_frame *out_frame,
+ enum ia_css_pipeline_stage_sp_func sp_func,
+ unsigned int max_input_width)
+{
+ unsigned int i;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_pipe_get_sp_func_stage_desc() enter:\n");
+ stage_desc->binary = NULL;
+ stage_desc->firmware = NULL;
+ stage_desc->sp_func = sp_func;
+ stage_desc->max_input_width = max_input_width;
+ stage_desc->mode = (unsigned int)-1;
+ stage_desc->in_frame = NULL;
+ stage_desc->out_frame[0] = out_frame;
+ for (i = 1; i < IA_CSS_BINARY_MAX_OUTPUT_PORTS; i++) {
+ stage_desc->out_frame[i] = NULL;
+ }
+ stage_desc->vf_frame = NULL;
+}
diff --git a/drivers/staging/media/atomisp/pci/camera/pipe/src/pipe_util.c b/drivers/staging/media/atomisp/pci/camera/pipe/src/pipe_util.c
new file mode 100644
index 000000000000..c7c42b472cc7
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/camera/pipe/src/pipe_util.c
@@ -0,0 +1,42 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#include "ia_css_pipe_util.h"
+#include "ia_css_frame_public.h"
+#include "ia_css_pipe.h"
+#include "ia_css_util.h"
+#include "assert_support.h"
+
+unsigned int ia_css_pipe_util_pipe_input_format_bpp(
+ const struct ia_css_pipe *const pipe)
+{
+ assert(pipe);
+ assert(pipe->stream);
+
+ return ia_css_util_input_format_bpp(pipe->stream->config.input_config.format,
+ pipe->stream->config.pixels_per_clock == 2);
+}
+
+void ia_css_pipe_util_create_output_frames(
+ struct ia_css_frame *frames[])
+{
+ unsigned int i;
+
+ assert(frames);
+ for (i = 0; i < IA_CSS_BINARY_MAX_OUTPUT_PORTS; i++) {
+ frames[i] = NULL;
+ }
+}
+
+void ia_css_pipe_util_set_output_frames(
+ struct ia_css_frame *frames[],
+ unsigned int idx,
+ struct ia_css_frame *frame)
+{
+ assert(idx < IA_CSS_BINARY_MAX_OUTPUT_PORTS);
+
+ frames[idx] = frame;
+}
diff --git a/drivers/staging/media/atomisp/pci/camera/util/interface/ia_css_util.h b/drivers/staging/media/atomisp/pci/camera/util/interface/ia_css_util.h
new file mode 100644
index 000000000000..24cd99a659ca
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/camera/util/interface/ia_css_util.h
@@ -0,0 +1,123 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __IA_CSS_UTIL_H__
+#define __IA_CSS_UTIL_H__
+
+#include <linux/errno.h>
+
+#include <ia_css_err.h>
+#include <type_support.h>
+#include <ia_css_frame_public.h>
+#include <ia_css_stream_public.h>
+#include <ia_css_stream_format.h>
+
+/* @brief convert "errno" error code to "ia_css_err" error code
+ *
+ * @param[in] "errno" error code
+ * @return "ia_css_err" error code
+ *
+ */
+int ia_css_convert_errno(
+ int in_err);
+
+/* @brief check vf frame info.
+ *
+ * @param[in] info
+ * @return 0 or error code upon error.
+ *
+ */
+int ia_css_util_check_vf_info(
+ const struct ia_css_frame_info *const info);
+
+/* @brief check input configuration.
+ *
+ * @param[in] stream_config
+ * @param[in] must_be_raw
+ * @return 0 or error code upon error.
+ *
+ */
+int ia_css_util_check_input(
+ const struct ia_css_stream_config *const stream_config,
+ bool must_be_raw,
+ bool must_be_yuv);
+
+/* @brief check vf and out frame info.
+ *
+ * @param[in] out_info
+ * @param[in] vf_info
+ * @return 0 or error code upon error.
+ *
+ */
+int ia_css_util_check_vf_out_info(
+ const struct ia_css_frame_info *const out_info,
+ const struct ia_css_frame_info *const vf_info);
+
+/* @brief check width and height
+ *
+ * @param[in] width
+ * @param[in] height
+ * @return 0 or error code upon error.
+ *
+ */
+int ia_css_util_check_res(
+ unsigned int width,
+ unsigned int height);
+
+/* ISP2401 */
+/* @brief compare resolutions (less or equal)
+ *
+ * @param[in] a resolution
+ * @param[in] b resolution
+ * @return true if both dimensions of a are less or
+ * equal than those of b, false otherwise
+ *
+ */
+bool ia_css_util_res_leq(
+ struct ia_css_resolution a,
+ struct ia_css_resolution b);
+
+/* ISP2401 */
+/**
+ * @brief Check if resolution is zero
+ *
+ * @param[in] resolution The resolution to check
+ *
+ * @returns true if resolution is zero
+ */
+bool ia_css_util_resolution_is_zero(
+ const struct ia_css_resolution resolution);
+
+/* @brief check width and height
+ *
+ * @param[in] stream_format
+ * @param[in] two_ppc
+ * @return bits per pixel based on given parameters.
+ *
+ */
+unsigned int ia_css_util_input_format_bpp(
+ enum atomisp_input_format stream_format,
+ bool two_ppc);
+
+/* @brief check if input format it raw
+ *
+ * @param[in] stream_format
+ * @return true if the input format is raw or false otherwise
+ *
+ */
+bool ia_css_util_is_input_format_raw(
+ enum atomisp_input_format stream_format);
+
+/* @brief check if input format it yuv
+ *
+ * @param[in] stream_format
+ * @return true if the input format is yuv or false otherwise
+ *
+ */
+bool ia_css_util_is_input_format_yuv(
+ enum atomisp_input_format stream_format);
+
+#endif /* __IA_CSS_UTIL_H__ */
diff --git a/drivers/staging/media/atomisp/pci/camera/util/src/util.c b/drivers/staging/media/atomisp/pci/camera/util/src/util.c
new file mode 100644
index 000000000000..3a7abdc8f6b3
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/camera/util/src/util.c
@@ -0,0 +1,185 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#include "ia_css_util.h"
+#include <ia_css_frame.h>
+#include <assert_support.h>
+#include <math_support.h>
+
+/* for ia_css_binary_max_vf_width() */
+#include "ia_css_binary.h"
+
+/* MW: Table look-up ??? */
+unsigned int ia_css_util_input_format_bpp(
+ enum atomisp_input_format format,
+ bool two_ppc)
+{
+ unsigned int rval = 0;
+
+ switch (format) {
+ case ATOMISP_INPUT_FORMAT_YUV420_8_LEGACY:
+ case ATOMISP_INPUT_FORMAT_YUV420_8:
+ case ATOMISP_INPUT_FORMAT_YUV422_8:
+ case ATOMISP_INPUT_FORMAT_RGB_888:
+ case ATOMISP_INPUT_FORMAT_RAW_8:
+ case ATOMISP_INPUT_FORMAT_BINARY_8:
+ case ATOMISP_INPUT_FORMAT_EMBEDDED:
+ rval = 8;
+ break;
+ case ATOMISP_INPUT_FORMAT_YUV420_10:
+ case ATOMISP_INPUT_FORMAT_YUV422_10:
+ case ATOMISP_INPUT_FORMAT_RAW_10:
+ rval = 10;
+ break;
+ case ATOMISP_INPUT_FORMAT_YUV420_16:
+ case ATOMISP_INPUT_FORMAT_YUV422_16:
+ rval = 16;
+ break;
+ case ATOMISP_INPUT_FORMAT_RGB_444:
+ rval = 4;
+ break;
+ case ATOMISP_INPUT_FORMAT_RGB_555:
+ rval = 5;
+ break;
+ case ATOMISP_INPUT_FORMAT_RGB_565:
+ rval = 65;
+ break;
+ case ATOMISP_INPUT_FORMAT_RGB_666:
+ case ATOMISP_INPUT_FORMAT_RAW_6:
+ rval = 6;
+ break;
+ case ATOMISP_INPUT_FORMAT_RAW_7:
+ rval = 7;
+ break;
+ case ATOMISP_INPUT_FORMAT_RAW_12:
+ rval = 12;
+ break;
+ case ATOMISP_INPUT_FORMAT_RAW_14:
+ if (two_ppc)
+ rval = 14;
+ else
+ rval = 12;
+ break;
+ case ATOMISP_INPUT_FORMAT_RAW_16:
+ if (two_ppc)
+ rval = 16;
+ else
+ rval = 12;
+ break;
+ default:
+ rval = 0;
+ break;
+ }
+ return rval;
+}
+
+int ia_css_util_check_vf_info(
+ const struct ia_css_frame_info *const info)
+{
+ int err;
+ unsigned int max_vf_width;
+
+ assert(info);
+ err = ia_css_frame_check_info(info);
+ if (err)
+ return err;
+ max_vf_width = ia_css_binary_max_vf_width();
+ if (max_vf_width != 0 && info->res.width > max_vf_width * 2)
+ return -EINVAL;
+ return 0;
+}
+
+int ia_css_util_check_vf_out_info(
+ const struct ia_css_frame_info *const out_info,
+ const struct ia_css_frame_info *const vf_info)
+{
+ int err;
+
+ assert(out_info);
+ assert(vf_info);
+
+ err = ia_css_frame_check_info(out_info);
+ if (err)
+ return err;
+ err = ia_css_util_check_vf_info(vf_info);
+ if (err)
+ return err;
+ return 0;
+}
+
+/* ISP2401 */
+bool ia_css_util_res_leq(struct ia_css_resolution a, struct ia_css_resolution b)
+{
+ return a.width <= b.width && a.height <= b.height;
+}
+
+/* ISP2401 */
+bool ia_css_util_resolution_is_zero(const struct ia_css_resolution resolution)
+{
+ return (resolution.width == 0) || (resolution.height == 0);
+}
+
+int ia_css_util_check_res(unsigned int width, unsigned int height)
+{
+ const struct ia_css_resolution resolution = { .width = width, .height = height };
+
+ if (ia_css_util_resolution_is_zero(resolution))
+ return -EINVAL;
+
+ /* height can be odd number for jpeg/embedded data from ISYS2401 */
+ if (width & 1)
+ return -EINVAL;
+
+ return 0;
+}
+
+bool ia_css_util_is_input_format_raw(enum atomisp_input_format format)
+{
+ return ((format == ATOMISP_INPUT_FORMAT_RAW_6) ||
+ (format == ATOMISP_INPUT_FORMAT_RAW_7) ||
+ (format == ATOMISP_INPUT_FORMAT_RAW_8) ||
+ (format == ATOMISP_INPUT_FORMAT_RAW_10) ||
+ (format == ATOMISP_INPUT_FORMAT_RAW_12));
+ /* raw_14 and raw_16 are not supported as input formats to the ISP.
+ * They can only be copied to a frame in memory using the
+ * copy binary.
+ */
+}
+
+bool ia_css_util_is_input_format_yuv(enum atomisp_input_format format)
+{
+ return format == ATOMISP_INPUT_FORMAT_YUV420_8_LEGACY ||
+ format == ATOMISP_INPUT_FORMAT_YUV420_8 ||
+ format == ATOMISP_INPUT_FORMAT_YUV420_10 ||
+ format == ATOMISP_INPUT_FORMAT_YUV420_16 ||
+ format == ATOMISP_INPUT_FORMAT_YUV422_8 ||
+ format == ATOMISP_INPUT_FORMAT_YUV422_10 ||
+ format == ATOMISP_INPUT_FORMAT_YUV422_16;
+}
+
+int ia_css_util_check_input(
+ const struct ia_css_stream_config *const stream_config,
+ bool must_be_raw,
+ bool must_be_yuv)
+{
+ assert(stream_config);
+
+ if (!stream_config)
+ return -EINVAL;
+
+ if (stream_config->input_config.effective_res.width == 0 ||
+ stream_config->input_config.effective_res.height == 0)
+ return -EINVAL;
+ if (must_be_raw &&
+ !ia_css_util_is_input_format_raw(stream_config->input_config.format))
+ return -EINVAL;
+
+ if (must_be_yuv &&
+ !ia_css_util_is_input_format_yuv(stream_config->input_config.format))
+ return -EINVAL;
+
+ return 0;
+}
diff --git a/drivers/staging/media/atomisp/pci/cell_params.h b/drivers/staging/media/atomisp/pci/cell_params.h
new file mode 100644
index 000000000000..a2a07b78f2a0
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/cell_params.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef _cell_params_h
+#define _cell_params_h
+
+#define SP_PMEM_LOG_WIDTH_BITS 6 /*Width of PC, 64 bits, 8 bytes*/
+#define SP_ICACHE_TAG_BITS 4 /*size of tag*/
+#define SP_ICACHE_SET_BITS 8 /* 256 sets*/
+#define SP_ICACHE_BLOCKS_PER_SET_BITS 1 /* 2 way associative*/
+#define SP_ICACHE_BLOCK_ADDRESS_BITS 11 /* 2048 lines capacity*/
+
+#define SP_ICACHE_ADDRESS_BITS \
+ (SP_ICACHE_TAG_BITS + SP_ICACHE_BLOCK_ADDRESS_BITS)
+
+#define SP_PMEM_DEPTH BIT(SP_ICACHE_ADDRESS_BITS)
+
+#define SP_FIFO_0_DEPTH 0
+#define SP_FIFO_1_DEPTH 0
+#define SP_FIFO_2_DEPTH 0
+#define SP_FIFO_3_DEPTH 0
+#define SP_FIFO_4_DEPTH 0
+#define SP_FIFO_5_DEPTH 0
+#define SP_FIFO_6_DEPTH 0
+#define SP_FIFO_7_DEPTH 0
+
+#define SP_SLV_BUS_MAXBURSTSIZE 1
+
+#endif /* _cell_params_h */
diff --git a/drivers/staging/media/atomisp/pci/css_2401_system/csi_rx_global.h b/drivers/staging/media/atomisp/pci/css_2401_system/csi_rx_global.h
new file mode 100644
index 000000000000..ab9d091acb4e
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/css_2401_system/csi_rx_global.h
@@ -0,0 +1,55 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __CSI_RX_GLOBAL_H_INCLUDED__
+#define __CSI_RX_GLOBAL_H_INCLUDED__
+
+#include <type_support.h>
+
+typedef enum {
+ CSI_MIPI_PACKET_TYPE_UNDEFINED = 0,
+ CSI_MIPI_PACKET_TYPE_LONG,
+ CSI_MIPI_PACKET_TYPE_SHORT,
+ CSI_MIPI_PACKET_TYPE_RESERVED,
+ N_CSI_MIPI_PACKET_TYPE
+} csi_mipi_packet_type_t;
+
+typedef struct csi_rx_backend_lut_entry_s csi_rx_backend_lut_entry_t;
+struct csi_rx_backend_lut_entry_s {
+ u32 long_packet_entry;
+ u32 short_packet_entry;
+};
+
+typedef struct csi_rx_backend_cfg_s csi_rx_backend_cfg_t;
+struct csi_rx_backend_cfg_s {
+ /* LUT entry for the packet */
+ csi_rx_backend_lut_entry_t lut_entry;
+
+ /* can be derived from the Data Type */
+ csi_mipi_packet_type_t csi_mipi_packet_type;
+
+ struct {
+ bool comp_enable;
+ u32 virtual_channel;
+ u32 data_type;
+ u32 comp_scheme;
+ u32 comp_predictor;
+ u32 comp_bit_idx;
+ } csi_mipi_cfg;
+};
+
+typedef struct csi_rx_frontend_cfg_s csi_rx_frontend_cfg_t;
+struct csi_rx_frontend_cfg_s {
+ u32 active_lanes;
+};
+
+extern const u32 N_SHORT_PACKET_LUT_ENTRIES[N_CSI_RX_BACKEND_ID];
+extern const u32 N_LONG_PACKET_LUT_ENTRIES[N_CSI_RX_BACKEND_ID];
+extern const u32 N_CSI_RX_FE_CTRL_DLANES[N_CSI_RX_FRONTEND_ID];
+/* sid_width for CSI_RX_BACKEND<N>_ID */
+extern const u32 N_CSI_RX_BE_SID_WIDTH[N_CSI_RX_BACKEND_ID];
+
+#endif /* __CSI_RX_GLOBAL_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/css_2401_system/host/csi_rx.c b/drivers/staging/media/atomisp/pci/css_2401_system/host/csi_rx.c
new file mode 100644
index 000000000000..f82522e2da94
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/css_2401_system/host/csi_rx.c
@@ -0,0 +1,33 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#include "system_global.h"
+#include "csi_rx_global.h"
+
+const u32 N_SHORT_PACKET_LUT_ENTRIES[N_CSI_RX_BACKEND_ID] = {
+ 4, /* 4 entries at CSI_RX_BACKEND0_ID*/
+ 4, /* 4 entries at CSI_RX_BACKEND1_ID*/
+ 4 /* 4 entries at CSI_RX_BACKEND2_ID*/
+};
+
+const u32 N_LONG_PACKET_LUT_ENTRIES[N_CSI_RX_BACKEND_ID] = {
+ 8, /* 8 entries at CSI_RX_BACKEND0_ID*/
+ 4, /* 4 entries at CSI_RX_BACKEND1_ID*/
+ 4 /* 4 entries at CSI_RX_BACKEND2_ID*/
+};
+
+const u32 N_CSI_RX_FE_CTRL_DLANES[N_CSI_RX_FRONTEND_ID] = {
+ N_CSI_RX_DLANE_ID, /* 4 dlanes for CSI_RX_FR0NTEND0_ID */
+ N_CSI_RX_DLANE_ID, /* 4 dlanes for CSI_RX_FR0NTEND1_ID */
+ N_CSI_RX_DLANE_ID /* 4 dlanes for CSI_RX_FR0NTEND2_ID */
+};
+
+/* sid_width for CSI_RX_BACKEND<N>_ID */
+const u32 N_CSI_RX_BE_SID_WIDTH[N_CSI_RX_BACKEND_ID] = {
+ 3,
+ 2,
+ 2
+};
diff --git a/drivers/staging/media/atomisp/pci/css_2401_system/host/csi_rx_local.h b/drivers/staging/media/atomisp/pci/css_2401_system/host/csi_rx_local.h
new file mode 100644
index 000000000000..81e83554d317
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/css_2401_system/host/csi_rx_local.h
@@ -0,0 +1,54 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __CSI_RX_LOCAL_H_INCLUDED__
+#define __CSI_RX_LOCAL_H_INCLUDED__
+
+#include "csi_rx_global.h"
+#define N_CSI_RX_BE_MIPI_COMP_FMT_REG 4
+#define N_CSI_RX_BE_MIPI_CUSTOM_PEC 12
+#define N_CSI_RX_BE_SHORT_PKT_LUT 4
+#define N_CSI_RX_BE_LONG_PKT_LUT 8
+typedef struct csi_rx_fe_ctrl_state_s csi_rx_fe_ctrl_state_t;
+typedef struct csi_rx_fe_ctrl_lane_s csi_rx_fe_ctrl_lane_t;
+typedef struct csi_rx_be_ctrl_state_s csi_rx_be_ctrl_state_t;
+/*mipi_backend_custom_mode_pixel_extraction_config*/
+typedef struct csi_rx_be_ctrl_pec_s csi_rx_be_ctrl_pec_t;
+
+struct csi_rx_fe_ctrl_lane_s {
+ hrt_data termen;
+ hrt_data settle;
+};
+
+struct csi_rx_fe_ctrl_state_s {
+ hrt_data enable;
+ hrt_data nof_enable_lanes;
+ hrt_data error_handling;
+ hrt_data status;
+ hrt_data status_dlane_hs;
+ hrt_data status_dlane_lp;
+ csi_rx_fe_ctrl_lane_t clane;
+ csi_rx_fe_ctrl_lane_t dlane[N_CSI_RX_DLANE_ID];
+};
+
+struct csi_rx_be_ctrl_state_s {
+ hrt_data enable;
+ hrt_data status;
+ hrt_data comp_format_reg[N_CSI_RX_BE_MIPI_COMP_FMT_REG];
+ hrt_data raw16;
+ hrt_data raw18;
+ hrt_data force_raw8;
+ hrt_data irq_status;
+ hrt_data custom_mode_enable;
+ hrt_data custom_mode_data_state;
+ hrt_data pec[N_CSI_RX_BE_MIPI_CUSTOM_PEC];
+ hrt_data custom_mode_valid_eop_config;
+ hrt_data global_lut_disregard_reg;
+ hrt_data packet_status_stall;
+ hrt_data short_packet_lut_entry[N_CSI_RX_BE_SHORT_PKT_LUT];
+ hrt_data long_packet_lut_entry[N_CSI_RX_BE_LONG_PKT_LUT];
+};
+#endif /* __CSI_RX_LOCAL_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/css_2401_system/host/csi_rx_private.h b/drivers/staging/media/atomisp/pci/css_2401_system/host/csi_rx_private.h
new file mode 100644
index 000000000000..989f55bec519
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/css_2401_system/host/csi_rx_private.h
@@ -0,0 +1,297 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __CSI_RX_PRIVATE_H_INCLUDED__
+#define __CSI_RX_PRIVATE_H_INCLUDED__
+
+#include "rx_csi_defs.h"
+#include "mipi_backend_defs.h"
+#include "csi_rx.h"
+
+#include "device_access.h" /* ia_css_device_load_uint32 */
+
+#include "assert_support.h" /* assert */
+#include "print_support.h" /* print */
+
+/*****************************************************
+ *
+ * Device level interface (DLI).
+ *
+ *****************************************************/
+/**
+ * @brief Load the register value.
+ * Refer to "csi_rx_public.h" for details.
+ */
+static inline hrt_data csi_rx_fe_ctrl_reg_load(
+ const csi_rx_frontend_ID_t ID,
+ const hrt_address reg)
+{
+ assert(ID < N_CSI_RX_FRONTEND_ID);
+ assert(CSI_RX_FE_CTRL_BASE[ID] != (hrt_address)-1);
+ return ia_css_device_load_uint32(CSI_RX_FE_CTRL_BASE[ID] + reg * sizeof(
+ hrt_data));
+}
+
+/**
+ * @brief Store a value to the register.
+ * Refer to "ibuf_ctrl_public.h" for details.
+ */
+static inline void csi_rx_fe_ctrl_reg_store(
+ const csi_rx_frontend_ID_t ID,
+ const hrt_address reg,
+ const hrt_data value)
+{
+ assert(ID < N_CSI_RX_FRONTEND_ID);
+ assert(CSI_RX_FE_CTRL_BASE[ID] != (hrt_address)-1);
+
+ ia_css_device_store_uint32(CSI_RX_FE_CTRL_BASE[ID] + reg * sizeof(hrt_data),
+ value);
+}
+
+/**
+ * @brief Load the register value.
+ * Refer to "csi_rx_public.h" for details.
+ */
+static inline hrt_data csi_rx_be_ctrl_reg_load(
+ const csi_rx_backend_ID_t ID,
+ const hrt_address reg)
+{
+ assert(ID < N_CSI_RX_BACKEND_ID);
+ assert(CSI_RX_BE_CTRL_BASE[ID] != (hrt_address)-1);
+ return ia_css_device_load_uint32(CSI_RX_BE_CTRL_BASE[ID] + reg * sizeof(
+ hrt_data));
+}
+
+/**
+ * @brief Store a value to the register.
+ * Refer to "ibuf_ctrl_public.h" for details.
+ */
+static inline void csi_rx_be_ctrl_reg_store(
+ const csi_rx_backend_ID_t ID,
+ const hrt_address reg,
+ const hrt_data value)
+{
+ assert(ID < N_CSI_RX_BACKEND_ID);
+ assert(CSI_RX_BE_CTRL_BASE[ID] != (hrt_address)-1);
+
+ ia_css_device_store_uint32(CSI_RX_BE_CTRL_BASE[ID] + reg * sizeof(hrt_data),
+ value);
+}
+
+/* end of DLI */
+
+/*****************************************************
+ *
+ * Native command interface (NCI).
+ *
+ *****************************************************/
+/**
+ * @brief Get the state of the csi rx fe dlane process.
+ * Refer to "csi_rx_public.h" for details.
+ */
+static inline void csi_rx_fe_ctrl_get_dlane_state(
+ const csi_rx_frontend_ID_t ID,
+ const u32 lane,
+ csi_rx_fe_ctrl_lane_t *dlane_state)
+{
+ dlane_state->termen =
+ csi_rx_fe_ctrl_reg_load(ID, _HRT_CSI_RX_DLY_CNT_TERMEN_DLANE_REG_IDX(lane));
+ dlane_state->settle =
+ csi_rx_fe_ctrl_reg_load(ID, _HRT_CSI_RX_DLY_CNT_SETTLE_DLANE_REG_IDX(lane));
+}
+
+/**
+ * @brief Get the csi rx fe state.
+ * Refer to "csi_rx_public.h" for details.
+ */
+static inline void csi_rx_fe_ctrl_get_state(
+ const csi_rx_frontend_ID_t ID,
+ csi_rx_fe_ctrl_state_t *state)
+{
+ u32 i;
+
+ state->enable =
+ csi_rx_fe_ctrl_reg_load(ID, _HRT_CSI_RX_ENABLE_REG_IDX);
+ state->nof_enable_lanes =
+ csi_rx_fe_ctrl_reg_load(ID, _HRT_CSI_RX_NOF_ENABLED_LANES_REG_IDX);
+ state->error_handling =
+ csi_rx_fe_ctrl_reg_load(ID, _HRT_CSI_RX_ERROR_HANDLING_REG_IDX);
+ state->status =
+ csi_rx_fe_ctrl_reg_load(ID, _HRT_CSI_RX_STATUS_REG_IDX);
+ state->status_dlane_hs =
+ csi_rx_fe_ctrl_reg_load(ID, _HRT_CSI_RX_STATUS_DLANE_HS_REG_IDX);
+ state->status_dlane_lp =
+ csi_rx_fe_ctrl_reg_load(ID, _HRT_CSI_RX_STATUS_DLANE_LP_REG_IDX);
+ state->clane.termen =
+ csi_rx_fe_ctrl_reg_load(ID, _HRT_CSI_RX_DLY_CNT_TERMEN_CLANE_REG_IDX);
+ state->clane.settle =
+ csi_rx_fe_ctrl_reg_load(ID, _HRT_CSI_RX_DLY_CNT_SETTLE_CLANE_REG_IDX);
+
+ /*
+ * Get the values of the register-set per
+ * dlane.
+ */
+ for (i = 0; i < N_CSI_RX_FE_CTRL_DLANES[ID]; i++) {
+ csi_rx_fe_ctrl_get_dlane_state(
+ ID,
+ i,
+ &state->dlane[i]);
+ }
+}
+
+/**
+ * @brief dump the csi rx fe state.
+ * Refer to "csi_rx_public.h" for details.
+ */
+static inline void csi_rx_fe_ctrl_dump_state(
+ const csi_rx_frontend_ID_t ID,
+ csi_rx_fe_ctrl_state_t *state)
+{
+ u32 i;
+
+ ia_css_print("CSI RX FE STATE Controller %d Enable state 0x%x\n", ID,
+ state->enable);
+ ia_css_print("CSI RX FE STATE Controller %d No Of enable lanes 0x%x\n", ID,
+ state->nof_enable_lanes);
+ ia_css_print("CSI RX FE STATE Controller %d Error handling 0x%x\n", ID,
+ state->error_handling);
+ ia_css_print("CSI RX FE STATE Controller %d Status 0x%x\n", ID, state->status);
+ ia_css_print("CSI RX FE STATE Controller %d Status Dlane HS 0x%x\n", ID,
+ state->status_dlane_hs);
+ ia_css_print("CSI RX FE STATE Controller %d Status Dlane LP 0x%x\n", ID,
+ state->status_dlane_lp);
+ ia_css_print("CSI RX FE STATE Controller %d Status term enable LP 0x%x\n", ID,
+ state->clane.termen);
+ ia_css_print("CSI RX FE STATE Controller %d Status term settle LP 0x%x\n", ID,
+ state->clane.settle);
+
+ /*
+ * Get the values of the register-set per
+ * dlane.
+ */
+ for (i = 0; i < N_CSI_RX_FE_CTRL_DLANES[ID]; i++) {
+ ia_css_print("CSI RX FE STATE Controller %d DLANE ID %d termen 0x%x\n", ID, i,
+ state->dlane[i].termen);
+ ia_css_print("CSI RX FE STATE Controller %d DLANE ID %d settle 0x%x\n", ID, i,
+ state->dlane[i].settle);
+ }
+}
+
+/**
+ * @brief Get the csi rx be state.
+ * Refer to "csi_rx_public.h" for details.
+ */
+static inline void csi_rx_be_ctrl_get_state(
+ const csi_rx_backend_ID_t ID,
+ csi_rx_be_ctrl_state_t *state)
+{
+ u32 i;
+
+ state->enable =
+ csi_rx_be_ctrl_reg_load(ID, _HRT_MIPI_BACKEND_ENABLE_REG_IDX);
+
+ state->status =
+ csi_rx_be_ctrl_reg_load(ID, _HRT_MIPI_BACKEND_STATUS_REG_IDX);
+
+ for (i = 0; i < N_CSI_RX_BE_MIPI_COMP_FMT_REG ; i++) {
+ state->comp_format_reg[i] =
+ csi_rx_be_ctrl_reg_load(ID,
+ _HRT_MIPI_BACKEND_COMP_FORMAT_REG0_IDX + i);
+ }
+
+ state->raw16 =
+ csi_rx_be_ctrl_reg_load(ID, _HRT_MIPI_BACKEND_RAW16_CONFIG_REG_IDX);
+
+ state->raw18 =
+ csi_rx_be_ctrl_reg_load(ID, _HRT_MIPI_BACKEND_RAW18_CONFIG_REG_IDX);
+ state->force_raw8 =
+ csi_rx_be_ctrl_reg_load(ID, _HRT_MIPI_BACKEND_FORCE_RAW8_REG_IDX);
+ state->irq_status =
+ csi_rx_be_ctrl_reg_load(ID, _HRT_MIPI_BACKEND_IRQ_STATUS_REG_IDX);
+#if 0 /* device access error for these registers */
+ /* ToDo: rootcause this failure */
+ state->custom_mode_enable =
+ csi_rx_be_ctrl_reg_load(ID, _HRT_MIPI_BACKEND_CUST_EN_REG_IDX);
+
+ state->custom_mode_data_state =
+ csi_rx_be_ctrl_reg_load(ID, _HRT_MIPI_BACKEND_CUST_DATA_STATE_REG_IDX);
+ for (i = 0; i < N_CSI_RX_BE_MIPI_CUSTOM_PEC ; i++) {
+ state->pec[i] =
+ csi_rx_be_ctrl_reg_load(ID, _HRT_MIPI_BACKEND_CUST_PIX_EXT_S0P0_REG_IDX + i);
+ }
+ state->custom_mode_valid_eop_config =
+ csi_rx_be_ctrl_reg_load(ID, _HRT_MIPI_BACKEND_CUST_PIX_VALID_EOP_REG_IDX);
+#endif
+ state->global_lut_disregard_reg =
+ csi_rx_be_ctrl_reg_load(ID, _HRT_MIPI_BACKEND_GLOBAL_LUT_DISREGARD_REG_IDX);
+ state->packet_status_stall =
+ csi_rx_be_ctrl_reg_load(ID, _HRT_MIPI_BACKEND_PKT_STALL_STATUS_REG_IDX);
+ /*
+ * Get the values of the register-set per
+ * lut.
+ */
+ for (i = 0; i < N_SHORT_PACKET_LUT_ENTRIES[ID]; i++) {
+ state->short_packet_lut_entry[i] =
+ csi_rx_be_ctrl_reg_load(ID, _HRT_MIPI_BACKEND_SP_LUT_ENTRY_0_REG_IDX + i);
+ }
+ for (i = 0; i < N_LONG_PACKET_LUT_ENTRIES[ID]; i++) {
+ state->long_packet_lut_entry[i] =
+ csi_rx_be_ctrl_reg_load(ID, _HRT_MIPI_BACKEND_LP_LUT_ENTRY_0_REG_IDX + i);
+ }
+}
+
+/**
+ * @brief Dump the csi rx be state.
+ * Refer to "csi_rx_public.h" for details.
+ */
+static inline void csi_rx_be_ctrl_dump_state(
+ const csi_rx_backend_ID_t ID,
+ csi_rx_be_ctrl_state_t *state)
+{
+ u32 i;
+
+ ia_css_print("CSI RX BE STATE Controller %d Enable 0x%x\n", ID, state->enable);
+ ia_css_print("CSI RX BE STATE Controller %d Status 0x%x\n", ID, state->status);
+
+ for (i = 0; i < N_CSI_RX_BE_MIPI_COMP_FMT_REG ; i++) {
+ ia_css_print("CSI RX BE STATE Controller %d comp format reg vc%d value 0x%x\n",
+ ID, i, state->status);
+ }
+ ia_css_print("CSI RX BE STATE Controller %d RAW16 0x%x\n", ID, state->raw16);
+ ia_css_print("CSI RX BE STATE Controller %d RAW18 0x%x\n", ID, state->raw18);
+ ia_css_print("CSI RX BE STATE Controller %d Force RAW8 0x%x\n", ID,
+ state->force_raw8);
+ ia_css_print("CSI RX BE STATE Controller %d IRQ state 0x%x\n", ID,
+ state->irq_status);
+#if 0 /* ToDo:Getting device access error for this register */
+ for (i = 0; i < N_CSI_RX_BE_MIPI_CUSTOM_PEC ; i++) {
+ ia_css_print("CSI RX BE STATE Controller %d PEC ID %d custom pec 0x%x\n", ID, i,
+ state->pec[i]);
+ }
+#endif
+ ia_css_print("CSI RX BE STATE Controller %d Global LUT disregard reg 0x%x\n",
+ ID, state->global_lut_disregard_reg);
+ ia_css_print("CSI RX BE STATE Controller %d packet stall reg 0x%x\n", ID,
+ state->packet_status_stall);
+ /*
+ * Get the values of the register-set per
+ * lut.
+ */
+ for (i = 0; i < N_SHORT_PACKET_LUT_ENTRIES[ID]; i++) {
+ ia_css_print("CSI RX BE STATE Controller ID %d Short packet entry %d short packet lut id 0x%x\n",
+ ID, i,
+ state->short_packet_lut_entry[i]);
+ }
+ for (i = 0; i < N_LONG_PACKET_LUT_ENTRIES[ID]; i++) {
+ ia_css_print("CSI RX BE STATE Controller ID %d Long packet entry %d long packet lut id 0x%x\n",
+ ID, i,
+ state->long_packet_lut_entry[i]);
+ }
+}
+
+/* end of NCI */
+
+#endif /* __CSI_RX_PRIVATE_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/css_2401_system/host/ibuf_ctrl.c b/drivers/staging/media/atomisp/pci/css_2401_system/host/ibuf_ctrl.c
new file mode 100644
index 000000000000..a99928a13e75
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/css_2401_system/host/ibuf_ctrl.c
@@ -0,0 +1,15 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#include <type_support.h>
+#include "system_global.h"
+#include "ibuf_ctrl_global.h"
+
+const u32 N_IBUF_CTRL_PROCS[N_IBUF_CTRL_ID] = {
+ 8, /* IBUF_CTRL0_ID supports at most 8 processes */
+ 4, /* IBUF_CTRL1_ID supports at most 4 processes */
+ 4 /* IBUF_CTRL2_ID supports at most 4 processes */
+};
diff --git a/drivers/staging/media/atomisp/pci/css_2401_system/host/ibuf_ctrl_local.h b/drivers/staging/media/atomisp/pci/css_2401_system/host/ibuf_ctrl_local.h
new file mode 100644
index 000000000000..814687251ec5
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/css_2401_system/host/ibuf_ctrl_local.h
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __IBUF_CTRL_LOCAL_H_INCLUDED__
+#define __IBUF_CTRL_LOCAL_H_INCLUDED__
+
+#include "ibuf_ctrl_global.h"
+#include "ibuf_ctrl_local.h"
+
+typedef struct ibuf_ctrl_proc_state_s ibuf_ctrl_proc_state_t;
+typedef struct ibuf_ctrl_state_s ibuf_ctrl_state_t;
+
+struct ibuf_ctrl_proc_state_s {
+ hrt_data num_items;
+ hrt_data num_stores;
+ hrt_data dma_channel;
+ hrt_data dma_command;
+ hrt_data ibuf_st_addr;
+ hrt_data ibuf_stride;
+ hrt_data ibuf_end_addr;
+ hrt_data dest_st_addr;
+ hrt_data dest_stride;
+ hrt_data dest_end_addr;
+ hrt_data sync_frame;
+ hrt_data sync_command;
+ hrt_data store_command;
+ hrt_data shift_returned_items;
+ hrt_data elems_ibuf;
+ hrt_data elems_dest;
+ hrt_data cur_stores;
+ hrt_data cur_acks;
+ hrt_data cur_s2m_ibuf_addr;
+ hrt_data cur_dma_ibuf_addr;
+ hrt_data cur_dma_dest_addr;
+ hrt_data cur_isp_dest_addr;
+ hrt_data dma_cmds_send;
+ hrt_data main_cntrl_state;
+ hrt_data dma_sync_state;
+ hrt_data isp_sync_state;
+};
+
+struct ibuf_ctrl_state_s {
+ hrt_data recalc_words;
+ hrt_data arbiters;
+ ibuf_ctrl_proc_state_t proc_state[N_STREAM2MMIO_SID_ID];
+};
+
+#endif /* __IBUF_CTRL_LOCAL_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/css_2401_system/host/isys_dma.c b/drivers/staging/media/atomisp/pci/css_2401_system/host/isys_dma.c
new file mode 100644
index 000000000000..dfa7edefdff8
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/css_2401_system/host/isys_dma.c
@@ -0,0 +1,26 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#include "system_local.h"
+#include "isys_dma_global.h"
+#include "assert_support.h"
+#include "isys_dma_private.h"
+
+const isys2401_dma_channel N_ISYS2401_DMA_CHANNEL_PROCS[N_ISYS2401_DMA_ID] = {
+ N_ISYS2401_DMA_CHANNEL
+};
+
+void isys2401_dma_set_max_burst_size(
+ const isys2401_dma_ID_t dma_id,
+ uint32_t max_burst_size)
+{
+ assert(dma_id < N_ISYS2401_DMA_ID);
+ assert((max_burst_size > 0x00) && (max_burst_size <= 0xFF));
+
+ isys2401_dma_reg_store(dma_id,
+ DMA_DEV_INFO_REG_IDX(_DMA_V2_DEV_INTERF_MAX_BURST_IDX, HIVE_DMA_BUS_DDR_CONN),
+ (max_burst_size - 1));
+}
diff --git a/drivers/staging/media/atomisp/pci/css_2401_system/host/isys_dma_private.h b/drivers/staging/media/atomisp/pci/css_2401_system/host/isys_dma_private.h
new file mode 100644
index 000000000000..da51b44d48a9
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/css_2401_system/host/isys_dma_private.h
@@ -0,0 +1,49 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __ISYS_DMA_PRIVATE_H_INCLUDED__
+#define __ISYS_DMA_PRIVATE_H_INCLUDED__
+
+#include "isys_dma_public.h"
+#include "device_access.h"
+#include "assert_support.h"
+#include "dma.h"
+#include "dma_v2_defs.h"
+#include "print_support.h"
+
+void isys2401_dma_reg_store(const isys2401_dma_ID_t dma_id,
+ const unsigned int reg,
+ const hrt_data value)
+{
+ unsigned int reg_loc;
+
+ assert(dma_id < N_ISYS2401_DMA_ID);
+ assert(ISYS2401_DMA_BASE[dma_id] != (hrt_address) - 1);
+
+ reg_loc = ISYS2401_DMA_BASE[dma_id] + (reg * sizeof(hrt_data));
+
+ ia_css_device_store_uint32(reg_loc, value);
+}
+
+hrt_data isys2401_dma_reg_load(const isys2401_dma_ID_t dma_id,
+ const unsigned int reg)
+{
+ unsigned int reg_loc;
+ hrt_data value;
+
+ assert(dma_id < N_ISYS2401_DMA_ID);
+ assert(ISYS2401_DMA_BASE[dma_id] != (hrt_address) - 1);
+
+ reg_loc = ISYS2401_DMA_BASE[dma_id] + (reg * sizeof(hrt_data));
+
+ value = ia_css_device_load_uint32(reg_loc);
+ ia_css_print("isys dma load from addr(0x%x) val(%u)\n", reg_loc,
+ (unsigned int)value);
+
+ return value;
+}
+
+#endif /* __ISYS_DMA_PRIVATE_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/css_2401_system/host/isys_irq.c b/drivers/staging/media/atomisp/pci/css_2401_system/host/isys_irq.c
new file mode 100644
index 000000000000..3847884e2dc0
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/css_2401_system/host/isys_irq.c
@@ -0,0 +1,34 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#include <system_local.h>
+#include "device_access.h"
+#include "assert_support.h"
+#include "ia_css_debug.h"
+#include "isys_irq.h"
+
+#ifndef __INLINE_ISYS2401_IRQ__
+/*
+ * Include definitions for isys irq private functions. isys_irq.h includes
+ * declarations of these functions by including isys_irq_public.h.
+ */
+#include "isys_irq_private.h"
+#endif
+
+/* Public interface */
+void isys_irqc_status_enable(const isys_irq_ID_t isys_irqc_id)
+{
+ assert(isys_irqc_id < N_ISYS_IRQ_ID);
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "Setting irq mask for port %u\n",
+ isys_irqc_id);
+ isys_irqc_reg_store(isys_irqc_id, ISYS_IRQ_MASK_REG_IDX,
+ ISYS_IRQ_MASK_REG_VALUE);
+ isys_irqc_reg_store(isys_irqc_id, ISYS_IRQ_CLEAR_REG_IDX,
+ ISYS_IRQ_CLEAR_REG_VALUE);
+ isys_irqc_reg_store(isys_irqc_id, ISYS_IRQ_ENABLE_REG_IDX,
+ ISYS_IRQ_ENABLE_REG_VALUE);
+}
diff --git a/drivers/staging/media/atomisp/pci/css_2401_system/host/isys_irq_local.h b/drivers/staging/media/atomisp/pci/css_2401_system/host/isys_irq_local.h
new file mode 100644
index 000000000000..0c5f1094f901
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/css_2401_system/host/isys_irq_local.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __ISYS_IRQ_LOCAL_H__
+#define __ISYS_IRQ_LOCAL_H__
+
+#include <type_support.h>
+
+typedef struct isys_irqc_state_s isys_irqc_state_t;
+
+struct isys_irqc_state_s {
+ hrt_data edge;
+ hrt_data mask;
+ hrt_data status;
+ hrt_data enable;
+ hrt_data level_no;
+ /*hrt_data clear; */ /* write-only register */
+};
+
+
+#endif /* __ISYS_IRQ_LOCAL_H__ */
diff --git a/drivers/staging/media/atomisp/pci/css_2401_system/host/isys_irq_private.h b/drivers/staging/media/atomisp/pci/css_2401_system/host/isys_irq_private.h
new file mode 100644
index 000000000000..4bd8209aaa01
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/css_2401_system/host/isys_irq_private.h
@@ -0,0 +1,96 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __ISYS_IRQ_PRIVATE_H__
+#define __ISYS_IRQ_PRIVATE_H__
+
+#include "isys_irq_global.h"
+#include "isys_irq_local.h"
+
+
+/* -------------------------------------------------------+
+ | Native command interface (NCI) |
+ + -------------------------------------------------------*/
+
+/**
+* @brief Get the isys irq status.
+* Refer to "isys_irq.h" for details.
+*/
+void isys_irqc_state_get(
+ const isys_irq_ID_t isys_irqc_id,
+ isys_irqc_state_t *state)
+{
+ state->edge = isys_irqc_reg_load(isys_irqc_id, ISYS_IRQ_EDGE_REG_IDX);
+ state->mask = isys_irqc_reg_load(isys_irqc_id, ISYS_IRQ_MASK_REG_IDX);
+ state->status = isys_irqc_reg_load(isys_irqc_id, ISYS_IRQ_STATUS_REG_IDX);
+ state->enable = isys_irqc_reg_load(isys_irqc_id, ISYS_IRQ_ENABLE_REG_IDX);
+ state->level_no = isys_irqc_reg_load(isys_irqc_id, ISYS_IRQ_LEVEL_NO_REG_IDX);
+ /*
+ ** Invalid to read/load from write-only register 'clear'
+ ** state->clear = isys_irqc_reg_load(isys_irqc_id, ISYS_IRQ_CLEAR_REG_IDX);
+ */
+}
+
+/**
+* @brief Dump the isys irq status.
+* Refer to "isys_irq.h" for details.
+*/
+void isys_irqc_state_dump(
+ const isys_irq_ID_t isys_irqc_id,
+ const isys_irqc_state_t *state)
+{
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "isys irq controller id %d\n\tstatus:0x%x\n\tedge:0x%x\n\tmask:0x%x\n\tenable:0x%x\n\tlevel_not_pulse:0x%x\n",
+ isys_irqc_id,
+ state->status, state->edge, state->mask, state->enable, state->level_no);
+}
+
+/* end of NCI */
+
+/* -------------------------------------------------------+
+ | Device level interface (DLI) |
+ + -------------------------------------------------------*/
+
+/* Support functions */
+void isys_irqc_reg_store(
+ const isys_irq_ID_t isys_irqc_id,
+ const unsigned int reg_idx,
+ const hrt_data value)
+{
+ unsigned int reg_addr;
+
+ assert(isys_irqc_id < N_ISYS_IRQ_ID);
+ assert(reg_idx <= ISYS_IRQ_LEVEL_NO_REG_IDX);
+
+ reg_addr = ISYS_IRQ_BASE[isys_irqc_id] + (reg_idx * sizeof(hrt_data));
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "isys irq store at addr(0x%x) val(%u)\n", reg_addr, (unsigned int)value);
+
+ ia_css_device_store_uint32(reg_addr, value);
+}
+
+hrt_data isys_irqc_reg_load(
+ const isys_irq_ID_t isys_irqc_id,
+ const unsigned int reg_idx)
+{
+ unsigned int reg_addr;
+ hrt_data value;
+
+ assert(isys_irqc_id < N_ISYS_IRQ_ID);
+ assert(reg_idx <= ISYS_IRQ_LEVEL_NO_REG_IDX);
+
+ reg_addr = ISYS_IRQ_BASE[isys_irqc_id] + (reg_idx * sizeof(hrt_data));
+ value = ia_css_device_load_uint32(reg_addr);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "isys irq load from addr(0x%x) val(%u)\n", reg_addr, (unsigned int)value);
+
+ return value;
+}
+
+/* end of DLI */
+
+
+#endif /* __ISYS_IRQ_PRIVATE_H__ */
diff --git a/drivers/staging/media/atomisp/pci/css_2401_system/host/isys_stream2mmio.c b/drivers/staging/media/atomisp/pci/css_2401_system/host/isys_stream2mmio.c
new file mode 100644
index 000000000000..21b19b4abeaa
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/css_2401_system/host/isys_stream2mmio.c
@@ -0,0 +1,13 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#include "isys_stream2mmio.h"
+
+const stream2mmio_sid_ID_t N_STREAM2MMIO_SID_PROCS[N_STREAM2MMIO_ID] = {
+ N_STREAM2MMIO_SID_ID,
+ STREAM2MMIO_SID4_ID,
+ STREAM2MMIO_SID4_ID
+};
diff --git a/drivers/staging/media/atomisp/pci/css_2401_system/host/isys_stream2mmio_local.h b/drivers/staging/media/atomisp/pci/css_2401_system/host/isys_stream2mmio_local.h
new file mode 100644
index 000000000000..de985674bddd
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/css_2401_system/host/isys_stream2mmio_local.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __ISYS_STREAM2MMIO_LOCAL_H_INCLUDED__
+#define __ISYS_STREAM2MMIO_LOCAL_H_INCLUDED__
+
+#include "isys_stream2mmio_global.h"
+
+typedef struct stream2mmio_state_s stream2mmio_state_t;
+typedef struct stream2mmio_sid_state_s stream2mmio_sid_state_t;
+
+struct stream2mmio_sid_state_s {
+ hrt_data rcv_ack;
+ hrt_data pix_width_id;
+ hrt_data start_addr;
+ hrt_data end_addr;
+ hrt_data strides;
+ hrt_data num_items;
+ hrt_data block_when_no_cmd;
+};
+
+struct stream2mmio_state_s {
+ stream2mmio_sid_state_t sid_state[N_STREAM2MMIO_SID_ID];
+};
+#endif /* __ISYS_STREAM2MMIO_LOCAL_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/css_2401_system/host/isys_stream2mmio_private.h b/drivers/staging/media/atomisp/pci/css_2401_system/host/isys_stream2mmio_private.h
new file mode 100644
index 000000000000..3210dd6bf9ca
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/css_2401_system/host/isys_stream2mmio_private.h
@@ -0,0 +1,159 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __ISYS_STREAM2MMIO_PRIVATE_H_INCLUDED__
+#define __ISYS_STREAM2MMIO_PRIVATE_H_INCLUDED__
+
+#include "isys_stream2mmio_public.h"
+#include "device_access.h" /* ia_css_device_load_uint32 */
+#include "assert_support.h" /* assert */
+#include "print_support.h" /* print */
+
+#define STREAM2MMIO_COMMAND_REG_ID 0
+#define STREAM2MMIO_ACKNOWLEDGE_REG_ID 1
+#define STREAM2MMIO_PIX_WIDTH_ID_REG_ID 2
+#define STREAM2MMIO_START_ADDR_REG_ID 3 /* master port address,NOT Byte */
+#define STREAM2MMIO_END_ADDR_REG_ID 4 /* master port address,NOT Byte */
+#define STREAM2MMIO_STRIDE_REG_ID 5 /* stride in master port words, increment is per packet for long sids, stride is not used for short sid's*/
+#define STREAM2MMIO_NUM_ITEMS_REG_ID 6 /* number of packets for store packets cmd, number of words for store_words cmd */
+#define STREAM2MMIO_BLOCK_WHEN_NO_CMD_REG_ID 7 /* if this register is 1, input will be stalled if there is no pending command for this sid */
+#define STREAM2MMIO_REGS_PER_SID 8
+
+/*****************************************************
+ *
+ * Native command interface (NCI).
+ *
+ *****************************************************/
+/**
+ * @brief Get the stream2mmio-controller state.
+ * Refer to "stream2mmio_public.h" for details.
+ */
+STORAGE_CLASS_STREAM2MMIO_C void stream2mmio_get_state(
+ const stream2mmio_ID_t ID,
+ stream2mmio_state_t *state)
+{
+ stream2mmio_sid_ID_t i;
+
+ /*
+ * Get the values of the register-set per
+ * stream2mmio-controller sids.
+ */
+ for (i = STREAM2MMIO_SID0_ID; i < N_STREAM2MMIO_SID_PROCS[ID]; i++) {
+ stream2mmio_get_sid_state(ID, i, &state->sid_state[i]);
+ }
+}
+
+/**
+ * @brief Get the state of the stream2mmio-controller sidess.
+ * Refer to "stream2mmio_public.h" for details.
+ */
+STORAGE_CLASS_STREAM2MMIO_C void stream2mmio_get_sid_state(
+ const stream2mmio_ID_t ID,
+ const stream2mmio_sid_ID_t sid_id,
+ stream2mmio_sid_state_t *state)
+{
+ state->rcv_ack =
+ stream2mmio_reg_load(ID, sid_id, STREAM2MMIO_ACKNOWLEDGE_REG_ID);
+
+ state->pix_width_id =
+ stream2mmio_reg_load(ID, sid_id, STREAM2MMIO_PIX_WIDTH_ID_REG_ID);
+
+ state->start_addr =
+ stream2mmio_reg_load(ID, sid_id, STREAM2MMIO_START_ADDR_REG_ID);
+
+ state->end_addr =
+ stream2mmio_reg_load(ID, sid_id, STREAM2MMIO_END_ADDR_REG_ID);
+
+ state->strides =
+ stream2mmio_reg_load(ID, sid_id, STREAM2MMIO_STRIDE_REG_ID);
+
+ state->num_items =
+ stream2mmio_reg_load(ID, sid_id, STREAM2MMIO_NUM_ITEMS_REG_ID);
+
+ state->block_when_no_cmd =
+ stream2mmio_reg_load(ID, sid_id, STREAM2MMIO_BLOCK_WHEN_NO_CMD_REG_ID);
+}
+
+/**
+ * @brief Dump the state of the stream2mmio-controller sidess.
+ * Refer to "stream2mmio_public.h" for details.
+ */
+STORAGE_CLASS_STREAM2MMIO_C void stream2mmio_print_sid_state(
+ stream2mmio_sid_state_t *state)
+{
+ ia_css_print("\t \t Receive acks 0x%x\n", state->rcv_ack);
+ ia_css_print("\t \t Pixel width 0x%x\n", state->pix_width_id);
+ ia_css_print("\t \t Startaddr 0x%x\n", state->start_addr);
+ ia_css_print("\t \t Endaddr 0x%x\n", state->end_addr);
+ ia_css_print("\t \t Strides 0x%x\n", state->strides);
+ ia_css_print("\t \t Num Items 0x%x\n", state->num_items);
+ ia_css_print("\t \t block when no cmd 0x%x\n", state->block_when_no_cmd);
+}
+
+/**
+ * @brief Dump the ibuf-controller state.
+ * Refer to "stream2mmio_public.h" for details.
+ */
+STORAGE_CLASS_STREAM2MMIO_C void stream2mmio_dump_state(
+ const stream2mmio_ID_t ID,
+ stream2mmio_state_t *state)
+{
+ stream2mmio_sid_ID_t i;
+
+ /*
+ * Get the values of the register-set per
+ * stream2mmio-controller sids.
+ */
+ for (i = STREAM2MMIO_SID0_ID; i < N_STREAM2MMIO_SID_PROCS[ID]; i++) {
+ ia_css_print("StREAM2MMIO ID %d SID %d\n", ID, i);
+ stream2mmio_print_sid_state(&state->sid_state[i]);
+ }
+}
+
+/* end of NCI */
+
+/*****************************************************
+ *
+ * Device level interface (DLI).
+ *
+ *****************************************************/
+/**
+ * @brief Load the register value.
+ * Refer to "stream2mmio_public.h" for details.
+ */
+STORAGE_CLASS_STREAM2MMIO_C hrt_data stream2mmio_reg_load(
+ const stream2mmio_ID_t ID,
+ const stream2mmio_sid_ID_t sid_id,
+ const uint32_t reg_idx)
+{
+ u32 reg_bank_offset;
+
+ assert(ID < N_STREAM2MMIO_ID);
+
+ reg_bank_offset = STREAM2MMIO_REGS_PER_SID * sid_id;
+ return ia_css_device_load_uint32(STREAM2MMIO_CTRL_BASE[ID] +
+ (reg_bank_offset + reg_idx) * sizeof(hrt_data));
+}
+
+/**
+ * @brief Store a value to the register.
+ * Refer to "stream2mmio_public.h" for details.
+ */
+STORAGE_CLASS_STREAM2MMIO_C void stream2mmio_reg_store(
+ const stream2mmio_ID_t ID,
+ const hrt_address reg,
+ const hrt_data value)
+{
+ assert(ID < N_STREAM2MMIO_ID);
+ assert(STREAM2MMIO_CTRL_BASE[ID] != (hrt_address)-1);
+
+ ia_css_device_store_uint32(STREAM2MMIO_CTRL_BASE[ID] +
+ reg * sizeof(hrt_data), value);
+}
+
+/* end of DLI */
+
+#endif /* __ISYS_STREAM2MMIO_PRIVATE_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/css_2401_system/host/pixelgen_local.h b/drivers/staging/media/atomisp/pci/css_2401_system/host/pixelgen_local.h
new file mode 100644
index 000000000000..4e091406efa9
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/css_2401_system/host/pixelgen_local.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __PIXELGEN_LOCAL_H_INCLUDED__
+#define __PIXELGEN_LOCAL_H_INCLUDED__
+
+#include "pixelgen_global.h"
+
+typedef struct pixelgen_ctrl_state_s pixelgen_ctrl_state_t;
+struct pixelgen_ctrl_state_s {
+ hrt_data com_enable;
+ hrt_data prbs_rstval0;
+ hrt_data prbs_rstval1;
+ hrt_data syng_sid;
+ hrt_data syng_free_run;
+ hrt_data syng_pause;
+ hrt_data syng_nof_frames;
+ hrt_data syng_nof_pixels;
+ hrt_data syng_nof_line;
+ hrt_data syng_hblank_cyc;
+ hrt_data syng_vblank_cyc;
+ hrt_data syng_stat_hcnt;
+ hrt_data syng_stat_vcnt;
+ hrt_data syng_stat_fcnt;
+ hrt_data syng_stat_done;
+ hrt_data tpg_mode;
+ hrt_data tpg_hcnt_mask;
+ hrt_data tpg_vcnt_mask;
+ hrt_data tpg_xycnt_mask;
+ hrt_data tpg_hcnt_delta;
+ hrt_data tpg_vcnt_delta;
+ hrt_data tpg_r1;
+ hrt_data tpg_g1;
+ hrt_data tpg_b1;
+ hrt_data tpg_r2;
+ hrt_data tpg_g2;
+ hrt_data tpg_b2;
+};
+#endif /* __PIXELGEN_LOCAL_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/css_2401_system/host/pixelgen_private.h b/drivers/staging/media/atomisp/pci/css_2401_system/host/pixelgen_private.h
new file mode 100644
index 000000000000..b8b98106bd31
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/css_2401_system/host/pixelgen_private.h
@@ -0,0 +1,175 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __PIXELGEN_PRIVATE_H_INCLUDED__
+#define __PIXELGEN_PRIVATE_H_INCLUDED__
+#include "pixelgen_public.h"
+#include "PixelGen_SysBlock_defs.h"
+#include "device_access.h" /* ia_css_device_load_uint32 */
+#include "assert_support.h" /* assert */
+
+/*****************************************************
+ *
+ * Device level interface (DLI).
+ *
+ *****************************************************/
+/**
+ * @brief Load the register value.
+ * Refer to "pixelgen_public.h" for details.
+ */
+STORAGE_CLASS_PIXELGEN_C hrt_data pixelgen_ctrl_reg_load(
+ const pixelgen_ID_t ID,
+ const hrt_address reg)
+{
+ assert(ID < N_PIXELGEN_ID);
+ assert(PIXELGEN_CTRL_BASE[ID] != (hrt_address) - 1);
+ return ia_css_device_load_uint32(PIXELGEN_CTRL_BASE[ID] + reg * sizeof(
+ hrt_data));
+}
+
+/**
+ * @brief Store a value to the register.
+ * Refer to "pixelgen_ctrl_public.h" for details.
+ */
+STORAGE_CLASS_PIXELGEN_C void pixelgen_ctrl_reg_store(
+ const pixelgen_ID_t ID,
+ const hrt_address reg,
+ const hrt_data value)
+{
+ assert(ID < N_PIXELGEN_ID);
+ assert(PIXELGEN_CTRL_BASE[ID] != (hrt_address)-1);
+
+ ia_css_device_store_uint32(PIXELGEN_CTRL_BASE[ID] + reg * sizeof(hrt_data),
+ value);
+}
+
+/* end of DLI */
+
+/*****************************************************
+ *
+ * Native command interface (NCI).
+ *
+ *****************************************************/
+/**
+ * @brief Get the pixelgen state.
+ * Refer to "pixelgen_public.h" for details.
+ */
+STORAGE_CLASS_PIXELGEN_C void pixelgen_ctrl_get_state(
+ const pixelgen_ID_t ID,
+ pixelgen_ctrl_state_t *state)
+{
+ state->com_enable =
+ pixelgen_ctrl_reg_load(ID, _PXG_COM_ENABLE_REG_IDX);
+ state->prbs_rstval0 =
+ pixelgen_ctrl_reg_load(ID, _PXG_PRBS_RSTVAL_REG0_IDX);
+ state->prbs_rstval1 =
+ pixelgen_ctrl_reg_load(ID, _PXG_PRBS_RSTVAL_REG1_IDX);
+ state->syng_sid =
+ pixelgen_ctrl_reg_load(ID, _PXG_SYNG_SID_REG_IDX);
+ state->syng_free_run =
+ pixelgen_ctrl_reg_load(ID, _PXG_SYNG_FREE_RUN_REG_IDX);
+ state->syng_pause =
+ pixelgen_ctrl_reg_load(ID, _PXG_SYNG_PAUSE_REG_IDX);
+ state->syng_nof_frames =
+ pixelgen_ctrl_reg_load(ID, _PXG_SYNG_NOF_FRAME_REG_IDX);
+ state->syng_nof_pixels =
+ pixelgen_ctrl_reg_load(ID, _PXG_SYNG_NOF_PIXEL_REG_IDX);
+ state->syng_nof_line =
+ pixelgen_ctrl_reg_load(ID, _PXG_SYNG_NOF_LINE_REG_IDX);
+ state->syng_hblank_cyc =
+ pixelgen_ctrl_reg_load(ID, _PXG_SYNG_HBLANK_CYC_REG_IDX);
+ state->syng_vblank_cyc =
+ pixelgen_ctrl_reg_load(ID, _PXG_SYNG_VBLANK_CYC_REG_IDX);
+ state->syng_stat_hcnt =
+ pixelgen_ctrl_reg_load(ID, _PXG_SYNG_STAT_HCNT_REG_IDX);
+ state->syng_stat_vcnt =
+ pixelgen_ctrl_reg_load(ID, _PXG_SYNG_STAT_VCNT_REG_IDX);
+ state->syng_stat_fcnt =
+ pixelgen_ctrl_reg_load(ID, _PXG_SYNG_STAT_FCNT_REG_IDX);
+ state->syng_stat_done =
+ pixelgen_ctrl_reg_load(ID, _PXG_SYNG_STAT_DONE_REG_IDX);
+ state->tpg_mode =
+ pixelgen_ctrl_reg_load(ID, _PXG_TPG_MODE_REG_IDX);
+ state->tpg_hcnt_mask =
+ pixelgen_ctrl_reg_load(ID, _PXG_TPG_HCNT_MASK_REG_IDX);
+ state->tpg_vcnt_mask =
+ pixelgen_ctrl_reg_load(ID, _PXG_TPG_VCNT_MASK_REG_IDX);
+ state->tpg_xycnt_mask =
+ pixelgen_ctrl_reg_load(ID, _PXG_TPG_XYCNT_MASK_REG_IDX);
+ state->tpg_hcnt_delta =
+ pixelgen_ctrl_reg_load(ID, _PXG_TPG_HCNT_DELTA_REG_IDX);
+ state->tpg_vcnt_delta =
+ pixelgen_ctrl_reg_load(ID, _PXG_TPG_VCNT_DELTA_REG_IDX);
+ state->tpg_r1 =
+ pixelgen_ctrl_reg_load(ID, _PXG_TPG_R1_REG_IDX);
+ state->tpg_g1 =
+ pixelgen_ctrl_reg_load(ID, _PXG_TPG_G1_REG_IDX);
+ state->tpg_b1 =
+ pixelgen_ctrl_reg_load(ID, _PXG_TPG_B1_REG_IDX);
+ state->tpg_r2 =
+ pixelgen_ctrl_reg_load(ID, _PXG_TPG_R2_REG_IDX);
+ state->tpg_g2 =
+ pixelgen_ctrl_reg_load(ID, _PXG_TPG_G2_REG_IDX);
+ state->tpg_b2 =
+ pixelgen_ctrl_reg_load(ID, _PXG_TPG_B2_REG_IDX);
+}
+
+/**
+ * @brief Dump the pixelgen state.
+ * Refer to "pixelgen_public.h" for details.
+ */
+STORAGE_CLASS_PIXELGEN_C void pixelgen_ctrl_dump_state(
+ const pixelgen_ID_t ID,
+ pixelgen_ctrl_state_t *state)
+{
+ ia_css_print("Pixel Generator ID %d Enable 0x%x\n", ID, state->com_enable);
+ ia_css_print("Pixel Generator ID %d PRBS reset value 0 0x%x\n", ID,
+ state->prbs_rstval0);
+ ia_css_print("Pixel Generator ID %d PRBS reset value 1 0x%x\n", ID,
+ state->prbs_rstval1);
+ ia_css_print("Pixel Generator ID %d SYNC SID 0x%x\n", ID, state->syng_sid);
+ ia_css_print("Pixel Generator ID %d syng free run 0x%x\n", ID,
+ state->syng_free_run);
+ ia_css_print("Pixel Generator ID %d syng pause 0x%x\n", ID, state->syng_pause);
+ ia_css_print("Pixel Generator ID %d syng no of frames 0x%x\n", ID,
+ state->syng_nof_frames);
+ ia_css_print("Pixel Generator ID %d syng no of pixels 0x%x\n", ID,
+ state->syng_nof_pixels);
+ ia_css_print("Pixel Generator ID %d syng no of line 0x%x\n", ID,
+ state->syng_nof_line);
+ ia_css_print("Pixel Generator ID %d syng hblank cyc 0x%x\n", ID,
+ state->syng_hblank_cyc);
+ ia_css_print("Pixel Generator ID %d syng vblank cyc 0x%x\n", ID,
+ state->syng_vblank_cyc);
+ ia_css_print("Pixel Generator ID %d syng stat hcnt 0x%x\n", ID,
+ state->syng_stat_hcnt);
+ ia_css_print("Pixel Generator ID %d syng stat vcnt 0x%x\n", ID,
+ state->syng_stat_vcnt);
+ ia_css_print("Pixel Generator ID %d syng stat fcnt 0x%x\n", ID,
+ state->syng_stat_fcnt);
+ ia_css_print("Pixel Generator ID %d syng stat done 0x%x\n", ID,
+ state->syng_stat_done);
+ ia_css_print("Pixel Generator ID %d tpg mode 0x%x\n", ID, state->tpg_mode);
+ ia_css_print("Pixel Generator ID %d tpg hcnt mask 0x%x\n", ID,
+ state->tpg_hcnt_mask);
+ ia_css_print("Pixel Generator ID %d tpg hcnt mask 0x%x\n", ID,
+ state->tpg_hcnt_mask);
+ ia_css_print("Pixel Generator ID %d tpg xycnt mask 0x%x\n", ID,
+ state->tpg_xycnt_mask);
+ ia_css_print("Pixel Generator ID %d tpg hcnt delta 0x%x\n", ID,
+ state->tpg_hcnt_delta);
+ ia_css_print("Pixel Generator ID %d tpg vcnt delta 0x%x\n", ID,
+ state->tpg_vcnt_delta);
+ ia_css_print("Pixel Generator ID %d tpg r1 0x%x\n", ID, state->tpg_r1);
+ ia_css_print("Pixel Generator ID %d tpg g1 0x%x\n", ID, state->tpg_g1);
+ ia_css_print("Pixel Generator ID %d tpg b1 0x%x\n", ID, state->tpg_b1);
+ ia_css_print("Pixel Generator ID %d tpg r2 0x%x\n", ID, state->tpg_r2);
+ ia_css_print("Pixel Generator ID %d tpg g2 0x%x\n", ID, state->tpg_g2);
+ ia_css_print("Pixel Generator ID %d tpg b2 0x%x\n", ID, state->tpg_b2);
+}
+
+/* end of NCI */
+#endif /* __PIXELGEN_PRIVATE_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/css_2401_system/hrt/PixelGen_SysBlock_defs.h b/drivers/staging/media/atomisp/pci/css_2401_system/hrt/PixelGen_SysBlock_defs.h
new file mode 100644
index 000000000000..323925ba2971
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/css_2401_system/hrt/PixelGen_SysBlock_defs.h
@@ -0,0 +1,105 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef _PixelGen_SysBlock_defs_h
+#define _PixelGen_SysBlock_defs_h
+
+/* Parematers and User_Parameters for HSS */
+#define _PXG_PPC Ppc
+#define _PXG_PIXEL_BITS PixelWidth
+#define _PXG_MAX_NOF_SID MaxNofSids
+#define _PXG_DATA_BITS DataWidth
+#define _PXG_CNT_BITS CntWidth
+#define _PXG_FIFODEPTH FifoDepth
+#define _PXG_DBG Dbg_device_not_included
+
+/* ID's and Address */
+#define _PXG_ADRRESS_ALIGN_REG 4
+
+#define _PXG_COM_ENABLE_REG_IDX 0
+#define _PXG_PRBS_RSTVAL_REG0_IDX 1
+#define _PXG_PRBS_RSTVAL_REG1_IDX 2
+#define _PXG_SYNG_SID_REG_IDX 3
+#define _PXG_SYNG_FREE_RUN_REG_IDX 4
+#define _PXG_SYNG_PAUSE_REG_IDX 5
+#define _PXG_SYNG_NOF_FRAME_REG_IDX 6
+#define _PXG_SYNG_NOF_PIXEL_REG_IDX 7
+#define _PXG_SYNG_NOF_LINE_REG_IDX 8
+#define _PXG_SYNG_HBLANK_CYC_REG_IDX 9
+#define _PXG_SYNG_VBLANK_CYC_REG_IDX 10
+#define _PXG_SYNG_STAT_HCNT_REG_IDX 11
+#define _PXG_SYNG_STAT_VCNT_REG_IDX 12
+#define _PXG_SYNG_STAT_FCNT_REG_IDX 13
+#define _PXG_SYNG_STAT_DONE_REG_IDX 14
+#define _PXG_TPG_MODE_REG_IDX 15
+#define _PXG_TPG_HCNT_MASK_REG_IDX 16
+#define _PXG_TPG_VCNT_MASK_REG_IDX 17
+#define _PXG_TPG_XYCNT_MASK_REG_IDX 18
+#define _PXG_TPG_HCNT_DELTA_REG_IDX 19
+#define _PXG_TPG_VCNT_DELTA_REG_IDX 20
+#define _PXG_TPG_R1_REG_IDX 21
+#define _PXG_TPG_G1_REG_IDX 22
+#define _PXG_TPG_B1_REG_IDX 23
+#define _PXG_TPG_R2_REG_IDX 24
+#define _PXG_TPG_G2_REG_IDX 25
+#define _PXG_TPG_B2_REG_IDX 26
+/* */
+#define _PXG_SYNG_PAUSE_CYCLES 0
+/* Subblock ID's */
+#define _PXG_DISABLE_IDX 0
+#define _PXG_PRBS_IDX 0
+#define _PXG_TPG_IDX 1
+#define _PXG_SYNG_IDX 2
+#define _PXG_SMUX_IDX 3
+/* Register Widths */
+#define _PXG_COM_ENABLE_REG_WIDTH 2
+#define _PXG_COM_SRST_REG_WIDTH 4
+#define _PXG_PRBS_RSTVAL_REG0_WIDTH 31
+#define _PXG_PRBS_RSTVAL_REG1_WIDTH 31
+
+#define _PXG_SYNG_SID_REG_WIDTH 3
+
+#define _PXG_SYNG_FREE_RUN_REG_WIDTH 1
+#define _PXG_SYNG_PAUSE_REG_WIDTH 1
+/*
+#define _PXG_SYNG_NOF_FRAME_REG_WIDTH <sync_gen_cnt_width>
+#define _PXG_SYNG_NOF_PIXEL_REG_WIDTH <sync_gen_cnt_width>
+#define _PXG_SYNG_NOF_LINE_REG_WIDTH <sync_gen_cnt_width>
+#define _PXG_SYNG_HBLANK_CYC_REG_WIDTH <sync_gen_cnt_width>
+#define _PXG_SYNG_VBLANK_CYC_REG_WIDTH <sync_gen_cnt_width>
+#define _PXG_SYNG_STAT_HCNT_REG_WIDTH <sync_gen_cnt_width>
+#define _PXG_SYNG_STAT_VCNT_REG_WIDTH <sync_gen_cnt_width>
+#define _PXG_SYNG_STAT_FCNT_REG_WIDTH <sync_gen_cnt_width>
+*/
+#define _PXG_SYNG_STAT_DONE_REG_WIDTH 1
+#define _PXG_TPG_MODE_REG_WIDTH 2
+/*
+#define _PXG_TPG_HCNT_MASK_REG_WIDTH <sync_gen_cnt_width>
+#define _PXG_TPG_VCNT_MASK_REG_WIDTH <sync_gen_cnt_width>
+#define _PXG_TPG_XYCNT_MASK_REG_WIDTH <pixle_width>
+*/
+#define _PXG_TPG_HCNT_DELTA_REG_WIDTH 4
+#define _PXG_TPG_VCNT_DELTA_REG_WIDTH 4
+/*
+#define _PXG_TPG_R1_REG_WIDTH <pixle_width>
+#define _PXG_TPG_G1_REG_WIDTH <pixle_width>
+#define _PXG_TPG_B1_REG_WIDTH <pixle_width>
+#define _PXG_TPG_R2_REG_WIDTH <pixle_width>
+#define _PXG_TPG_G2_REG_WIDTH <pixle_width>
+#define _PXG_TPG_B2_REG_WIDTH <pixle_width>
+*/
+#define _PXG_FIFO_DEPTH 2
+/* MISC */
+#define _PXG_ENABLE_REG_VAL 1
+#define _PXG_PRBS_ENABLE_REG_VAL 1
+#define _PXG_TPG_ENABLE_REG_VAL 2
+#define _PXG_SYNG_ENABLE_REG_VAL 4
+#define _PXG_FIFO_ENABLE_REG_VAL 8
+#define _PXG_PXL_BITS 14
+#define _PXG_INVALID_FLAG 0xDEADBEEF
+#define _PXG_CAFE_FLAG 0xCAFEBABE
+
+#endif /* _PixelGen_SysBlock_defs_h */
diff --git a/drivers/staging/media/atomisp/pci/css_2401_system/hrt/ibuf_cntrl_defs.h b/drivers/staging/media/atomisp/pci/css_2401_system/hrt/ibuf_cntrl_defs.h
new file mode 100644
index 000000000000..a41e2b957bd1
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/css_2401_system/hrt/ibuf_cntrl_defs.h
@@ -0,0 +1,126 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef _ibuf_cntrl_defs_h_
+#define _ibuf_cntrl_defs_h_
+
+#include <stream2mmio_defs.h>
+#include <dma_v2_defs.h>
+
+#define _IBUF_CNTRL_REG_ALIGN 4
+/* alignment of register banks, first bank are shared configuration and status registers: */
+#define _IBUF_CNTRL_PROC_REG_ALIGN 32
+
+/* the actual amount of configuration registers per proc: */
+#define _IBUF_CNTRL_CONFIG_REGS_PER_PROC 18
+/* the actual amount of shared configuration registers: */
+#define _IBUF_CNTRL_CONFIG_REGS_NO_PROC 0
+
+/* the actual amount of status registers per proc */
+#define _IBUF_CNTRL_STATUS_REGS_PER_PROC (_IBUF_CNTRL_CONFIG_REGS_PER_PROC + 10)
+/* the actual amount shared status registers */
+#define _IBUF_CNTRL_STATUS_REGS_NO_PROC (_IBUF_CNTRL_CONFIG_REGS_NO_PROC + 2)
+
+/* time out bits, maximum time out value is 2^_IBUF_CNTRL_TIME_OUT_BITS - 1 */
+#define _IBUF_CNTRL_TIME_OUT_BITS 5
+
+/* command token definition */
+#define _IBUF_CNTRL_CMD_TOKEN_LSB 0
+#define _IBUF_CNTRL_CMD_TOKEN_MSB 1
+
+/* Str2MMIO defines */
+#define _IBUF_CNTRL_STREAM2MMIO_CMD_TOKEN_MSB _STREAM2MMIO_CMD_TOKEN_CMD_MSB
+#define _IBUF_CNTRL_STREAM2MMIO_CMD_TOKEN_LSB _STREAM2MMIO_CMD_TOKEN_CMD_LSB
+#define _IBUF_CNTRL_STREAM2MMIO_NUM_ITEMS_BITS _STREAM2MMIO_PACK_NUM_ITEMS_BITS
+#define _IBUF_CNTRL_STREAM2MMIO_ACK_EOF_BIT _STREAM2MMIO_PACK_ACK_EOF_BIT
+#define _IBUF_CNTRL_STREAM2MMIO_ACK_TOKEN_VALID_BIT _STREAM2MMIO_ACK_TOKEN_VALID_BIT
+
+/* acknowledge token definition */
+#define _IBUF_CNTRL_ACK_TOKEN_STORES_IDX 0
+#define _IBUF_CNTRL_ACK_TOKEN_STORES_BITS 15
+#define _IBUF_CNTRL_ACK_TOKEN_ITEMS_IDX (_IBUF_CNTRL_ACK_TOKEN_STORES_BITS + _IBUF_CNTRL_ACK_TOKEN_STORES_IDX)
+#define _IBUF_CNTRL_ACK_TOKEN_ITEMS_BITS _STREAM2MMIO_PACK_NUM_ITEMS_BITS
+#define _IBUF_CNTRL_ACK_TOKEN_LSB _IBUF_CNTRL_ACK_TOKEN_STORES_IDX
+#define _IBUF_CNTRL_ACK_TOKEN_MSB (_IBUF_CNTRL_ACK_TOKEN_ITEMS_BITS + _IBUF_CNTRL_ACK_TOKEN_ITEMS_IDX - 1)
+/* bit 31 indicates a valid ack: */
+#define _IBUF_CNTRL_ACK_TOKEN_VALID_BIT (_IBUF_CNTRL_ACK_TOKEN_ITEMS_BITS + _IBUF_CNTRL_ACK_TOKEN_ITEMS_IDX)
+
+/*shared registers:*/
+#define _IBUF_CNTRL_RECALC_WORDS_STATUS 0
+#define _IBUF_CNTRL_ARBITERS_STATUS 1
+
+#define _IBUF_CNTRL_SET_CRUN 2 /* NO PHYSICAL REGISTER!! Only used in HSS model */
+
+/*register addresses for each proc: */
+#define _IBUF_CNTRL_CMD 0
+#define _IBUF_CNTRL_ACK 1
+
+/* number of items (packets or words) per frame: */
+#define _IBUF_CNTRL_NUM_ITEMS_PER_STORE 2
+
+/* number of stores (packets or words) per store/buffer: */
+#define _IBUF_CNTRL_NUM_STORES_PER_FRAME 3
+
+/* the channel and command in the DMA */
+#define _IBUF_CNTRL_DMA_CHANNEL 4
+#define _IBUF_CNTRL_DMA_CMD 5
+
+/* the start address and stride of the buffers */
+#define _IBUF_CNTRL_BUFFER_START_ADDRESS 6
+#define _IBUF_CNTRL_BUFFER_STRIDE 7
+#define _IBUF_CNTRL_BUFFER_END_ADDRESS 8
+
+/* destination start address, stride and end address; should be the same as in the DMA */
+#define _IBUF_CNTRL_DEST_START_ADDRESS 9
+#define _IBUF_CNTRL_DEST_STRIDE 10
+#define _IBUF_CNTRL_DEST_END_ADDRESS 11
+
+/* send a frame sync or not, default 1 */
+#define _IBUF_CNTRL_SYNC_FRAME 12
+
+/* str2mmio cmds */
+#define _IBUF_CNTRL_STR2MMIO_SYNC_CMD 13
+#define _IBUF_CNTRL_STR2MMIO_STORE_CMD 14
+
+/* num elems p word*/
+#define _IBUF_CNTRL_SHIFT_ITEMS 15
+#define _IBUF_CNTRL_ELEMS_P_WORD_IBUF 16
+#define _IBUF_CNTRL_ELEMS_P_WORD_DEST 17
+
+/* STATUS */
+/* current frame and stores in buffer */
+#define _IBUF_CNTRL_CUR_STORES 18
+#define _IBUF_CNTRL_CUR_ACKS 19
+
+/* current buffer and destination address for DMA cmd's */
+#define _IBUF_CNTRL_CUR_S2M_IBUF_ADDR 20
+#define _IBUF_CNTRL_CUR_DMA_IBUF_ADDR 21
+#define _IBUF_CNTRL_CUR_DMA_DEST_ADDR 22
+#define _IBUF_CNTRL_CUR_ISP_DEST_ADDR 23
+
+#define _IBUF_CNTRL_CUR_NR_DMA_CMDS_SEND 24
+
+#define _IBUF_CNTRL_MAIN_CNTRL_STATE 25
+#define _IBUF_CNTRL_DMA_SYNC_STATE 26
+#define _IBUF_CNTRL_ISP_SYNC_STATE 27
+
+/*Commands: */
+#define _IBUF_CNTRL_CMD_STORE_FRAME_IDX 0
+#define _IBUF_CNTRL_CMD_ONLINE_IDX 1
+
+/* initialize, copy st_addr to cur_addr etc */
+#define _IBUF_CNTRL_CMD_INITIALIZE 0
+
+/* store an online frame (sync with ISP, use end cfg start, stride and end address: */
+#define _IBUF_CNTRL_CMD_STORE_ONLINE_FRAME ((1 << _IBUF_CNTRL_CMD_STORE_FRAME_IDX) | (1 << _IBUF_CNTRL_CMD_ONLINE_IDX))
+
+/* store an offline frame (don't sync with ISP, requires start address as 2nd token, no end address: */
+#define _IBUF_CNTRL_CMD_STORE_OFFLINE_FRAME BIT(_IBUF_CNTRL_CMD_STORE_FRAME_IDX)
+
+/* false command token, should be different then commands. Use online bit, not store frame: */
+#define _IBUF_CNTRL_FALSE_ACK 2
+
+#endif
diff --git a/drivers/staging/media/atomisp/pci/css_2401_system/hrt/mipi_backend_common_defs.h b/drivers/staging/media/atomisp/pci/css_2401_system/hrt/mipi_backend_common_defs.h
new file mode 100644
index 000000000000..bc7639adcd8d
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/css_2401_system/hrt/mipi_backend_common_defs.h
@@ -0,0 +1,197 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef _css_receiver_2400_common_defs_h_
+#define _css_receiver_2400_common_defs_h_
+#ifndef _mipi_backend_common_defs_h_
+#define _mipi_backend_common_defs_h_
+
+#define _HRT_CSS_RECEIVER_2400_GEN_SHORT_DATA_WIDTH 16
+#define _HRT_CSS_RECEIVER_2400_GEN_SHORT_CH_ID_WIDTH 2
+#define _HRT_CSS_RECEIVER_2400_GEN_SHORT_FMT_TYPE_WIDTH 3
+#define _HRT_CSS_RECEIVER_2400_GEN_SHORT_STR_REAL_WIDTH (_HRT_CSS_RECEIVER_2400_GEN_SHORT_DATA_WIDTH + _HRT_CSS_RECEIVER_2400_GEN_SHORT_CH_ID_WIDTH + _HRT_CSS_RECEIVER_2400_GEN_SHORT_FMT_TYPE_WIDTH)
+#define _HRT_CSS_RECEIVER_2400_GEN_SHORT_STR_WIDTH 32 /* use 32 to be compatibel with streaming monitor !, MSB's of interface are tied to '0' */
+
+/* Definition of data format ID at the interface CSS_receiver capture/acquisition units */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_YUV420_8 24 /* 01 1000 YUV420 8-bit */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_YUV420_10 25 /* 01 1001 YUV420 10-bit */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_YUV420_8L 26 /* 01 1010 YUV420 8-bit legacy */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_YUV422_8 30 /* 01 1110 YUV422 8-bit */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_YUV422_10 31 /* 01 1111 YUV422 10-bit */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_RGB444 32 /* 10 0000 RGB444 */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_RGB555 33 /* 10 0001 RGB555 */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_RGB565 34 /* 10 0010 RGB565 */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_RGB666 35 /* 10 0011 RGB666 */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_RGB888 36 /* 10 0100 RGB888 */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_RAW6 40 /* 10 1000 RAW6 */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_RAW7 41 /* 10 1001 RAW7 */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_RAW8 42 /* 10 1010 RAW8 */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_RAW10 43 /* 10 1011 RAW10 */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_RAW12 44 /* 10 1100 RAW12 */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_RAW14 45 /* 10 1101 RAW14 */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_USR_DEF_1 48 /* 11 0000 JPEG [User Defined 8-bit Data Type 1] */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_USR_DEF_2 49 /* 11 0001 User Defined 8-bit Data Type 2 */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_USR_DEF_3 50 /* 11 0010 User Defined 8-bit Data Type 3 */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_USR_DEF_4 51 /* 11 0011 User Defined 8-bit Data Type 4 */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_USR_DEF_5 52 /* 11 0100 User Defined 8-bit Data Type 5 */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_USR_DEF_6 53 /* 11 0101 User Defined 8-bit Data Type 6 */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_USR_DEF_7 54 /* 11 0110 User Defined 8-bit Data Type 7 */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_USR_DEF_8 55 /* 11 0111 User Defined 8-bit Data Type 8 */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_Emb 18 /* 01 0010 embedded eight bit non image data */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_SOF 0 /* 00 0000 frame start */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_EOF 1 /* 00 0001 frame end */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_SOL 2 /* 00 0010 line start */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_EOL 3 /* 00 0011 line end */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_GEN_SH1 8 /* 00 1000 Generic Short Packet Code 1 */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_GEN_SH2 9 /* 00 1001 Generic Short Packet Code 2 */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_GEN_SH3 10 /* 00 1010 Generic Short Packet Code 3 */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_GEN_SH4 11 /* 00 1011 Generic Short Packet Code 4 */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_GEN_SH5 12 /* 00 1100 Generic Short Packet Code 5 */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_GEN_SH6 13 /* 00 1101 Generic Short Packet Code 6 */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_GEN_SH7 14 /* 00 1110 Generic Short Packet Code 7 */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_GEN_SH8 15 /* 00 1111 Generic Short Packet Code 8 */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_YUV420_8_CSPS 28 /* 01 1100 YUV420 8-bit (Chroma Shifted Pixel Sampling) */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_YUV420_10_CSPS 29 /* 01 1101 YUV420 10-bit (Chroma Shifted Pixel Sampling) */
+/* used reserved mipi positions for these */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_RAW16 46
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_RAW18 47
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_RAW18_2 37
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_RAW18_3 38
+
+//_HRT_CSS_RECEIVER_2400_FMT_TYPE_CUSTOM 63
+#define _HRT_MIPI_BACKEND_FMT_TYPE_CUSTOM 63
+
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_WIDTH 6
+
+/* Definition of format_types at the interface CSS --> input_selector*/
+/* !! Changes here should be copied to systems/isp/isp_css/bin/conv_transmitter_cmd.tcl !! */
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_RGB888 0 // 36 'h24
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_RGB555 1 // 33 'h
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_RGB444 2 // 32
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_RGB565 3 // 34
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_RGB666 4 // 35
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_RAW8 5 // 42
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_RAW10 6 // 43
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_RAW6 7 // 40
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_RAW7 8 // 41
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_RAW12 9 // 43
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_RAW14 10 // 45
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_YUV420_8 11 // 30
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_YUV420_10 12 // 25
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_YUV422_8 13 // 30
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_YUV422_10 14 // 31
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_USR_DEF_1 15 // 48
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_YUV420_8L 16 // 26
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_Emb 17 // 18
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_USR_DEF_2 18 // 49
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_USR_DEF_3 19 // 50
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_USR_DEF_4 20 // 51
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_USR_DEF_5 21 // 52
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_USR_DEF_6 22 // 53
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_USR_DEF_7 23 // 54
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_USR_DEF_8 24 // 55
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_YUV420_8_CSPS 25 // 28
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_YUV420_10_CSPS 26 // 29
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_RAW16 27 // ?
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_RAW18 28 // ?
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_RAW18_2 29 // ? Option 2 for depacketiser
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_RAW18_3 30 // ? Option 3 for depacketiser
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_CUSTOM 31 // to signal custom decoding
+
+/* definition for state machine of data FIFO for decode different type of data */
+#define _HRT_CSS_RECEIVER_2400_YUV420_8_REPEAT_PTN 1
+#define _HRT_CSS_RECEIVER_2400_YUV420_10_REPEAT_PTN 5
+#define _HRT_CSS_RECEIVER_2400_YUV420_8L_REPEAT_PTN 1
+#define _HRT_CSS_RECEIVER_2400_YUV422_8_REPEAT_PTN 1
+#define _HRT_CSS_RECEIVER_2400_YUV422_10_REPEAT_PTN 5
+#define _HRT_CSS_RECEIVER_2400_RGB444_REPEAT_PTN 2
+#define _HRT_CSS_RECEIVER_2400_RGB555_REPEAT_PTN 2
+#define _HRT_CSS_RECEIVER_2400_RGB565_REPEAT_PTN 2
+#define _HRT_CSS_RECEIVER_2400_RGB666_REPEAT_PTN 9
+#define _HRT_CSS_RECEIVER_2400_RGB888_REPEAT_PTN 3
+#define _HRT_CSS_RECEIVER_2400_RAW6_REPEAT_PTN 3
+#define _HRT_CSS_RECEIVER_2400_RAW7_REPEAT_PTN 7
+#define _HRT_CSS_RECEIVER_2400_RAW8_REPEAT_PTN 1
+#define _HRT_CSS_RECEIVER_2400_RAW10_REPEAT_PTN 5
+#define _HRT_CSS_RECEIVER_2400_RAW12_REPEAT_PTN 3
+#define _HRT_CSS_RECEIVER_2400_RAW14_REPEAT_PTN 7
+
+#define _HRT_CSS_RECEIVER_2400_MAX_REPEAT_PTN _HRT_CSS_RECEIVER_2400_RGB666_REPEAT_PTN
+
+#define _HRT_CSS_RECEIVER_2400_BE_COMP_FMT_IDX 0
+#define _HRT_CSS_RECEIVER_2400_BE_COMP_FMT_WIDTH 3
+#define _HRT_CSS_RECEIVER_2400_BE_COMP_PRED_IDX 3
+#define _HRT_CSS_RECEIVER_2400_BE_COMP_PRED_WIDTH 1
+#define _HRT_CSS_RECEIVER_2400_BE_COMP_USD_BITS 4 /* bits per USD type */
+
+#define _HRT_CSS_RECEIVER_2400_BE_RAW16_DATAID_IDX 0
+#define _HRT_CSS_RECEIVER_2400_BE_RAW16_EN_IDX 6
+#define _HRT_CSS_RECEIVER_2400_BE_RAW18_DATAID_IDX 0
+#define _HRT_CSS_RECEIVER_2400_BE_RAW18_OPTION_IDX 6
+#define _HRT_CSS_RECEIVER_2400_BE_RAW18_EN_IDX 8
+
+#define _HRT_CSS_RECEIVER_2400_BE_COMP_NO_COMP 0
+#define _HRT_CSS_RECEIVER_2400_BE_COMP_10_6_10 1
+#define _HRT_CSS_RECEIVER_2400_BE_COMP_10_7_10 2
+#define _HRT_CSS_RECEIVER_2400_BE_COMP_10_8_10 3
+#define _HRT_CSS_RECEIVER_2400_BE_COMP_12_6_12 4
+#define _HRT_CSS_RECEIVER_2400_BE_COMP_12_7_12 5
+#define _HRT_CSS_RECEIVER_2400_BE_COMP_12_8_12 6
+
+/* packet bit definition */
+#define _HRT_CSS_RECEIVER_2400_PKT_SOP_IDX 32
+#define _HRT_CSS_RECEIVER_2400_PKT_SOP_BITS 1
+#define _HRT_CSS_RECEIVER_2400_PKT_CH_ID_IDX 22
+#define _HRT_CSS_RECEIVER_2400_PKT_CH_ID_BITS 2
+#define _HRT_CSS_RECEIVER_2400_PKT_FMT_ID_IDX 16
+#define _HRT_CSS_RECEIVER_2400_PKT_FMT_ID_BITS 6
+#define _HRT_CSS_RECEIVER_2400_PH_DATA_FIELD_IDX 0
+#define _HRT_CSS_RECEIVER_2400_PH_DATA_FIELD_BITS 16
+#define _HRT_CSS_RECEIVER_2400_PKT_PAYLOAD_IDX 0
+#define _HRT_CSS_RECEIVER_2400_PKT_PAYLOAD_BITS 32
+
+/*************************************************************************************************/
+/* Custom Decoding */
+/* These Custom Defs are defined based on design-time config in "mipi_backend_pixel_formatter.chdl" !! */
+/*************************************************************************************************/
+/*
+#define BE_CUST_EN_IDX 0 // 2bits
+#define BE_CUST_EN_DATAID_IDX 2 // 6bits MIPI DATA ID
+#define BE_CUST_EN_WIDTH 8
+#define BE_CUST_MODE_ALL 1 // Enable Custom Decoding for all DATA IDs
+#define BE_CUST_MODE_ONE 3 // Enable Custom Decoding for ONE DATA ID, programmed in CUST_EN_DATA_ID
+
+// Data State config = {get_bits(6bits), valid(1bit)} //
+#define BE_CUST_DATA_STATE_S0_IDX 0 // 7bits
+#define BE_CUST_DATA_STATE_S1_IDX 8 //7 // 7bits
+#define BE_CUST_DATA_STATE_S2_IDX 16//14 // 7bits /
+#define BE_CUST_DATA_STATE_WIDTH 24//21
+#define BE_CUST_DATA_STATE_VALID_IDX 0 // 1bits
+#define BE_CUST_DATA_STATE_GETBITS_IDX 1 // 6bits
+
+// Pixel Extractor config
+#define BE_CUST_PIX_EXT_DATA_ALIGN_IDX 0 // 6bits
+#define BE_CUST_PIX_EXT_PIX_ALIGN_IDX 6//5 // 5bits
+#define BE_CUST_PIX_EXT_PIX_MASK_IDX 11//10 // 18bits
+#define BE_CUST_PIX_EXT_PIX_EN_IDX 29 //28 // 1bits
+
+#define BE_CUST_PIX_EXT_WIDTH 30//29
+
+// Pixel Valid & EoP config = {[eop,valid](especial), [eop,valid](normal)}
+#define BE_CUST_PIX_VALID_EOP_P0_IDX 0 // 4bits
+#define BE_CUST_PIX_VALID_EOP_P1_IDX 4 // 4bits
+#define BE_CUST_PIX_VALID_EOP_P2_IDX 8 // 4bits
+#define BE_CUST_PIX_VALID_EOP_P3_IDX 12 // 4bits
+#define BE_CUST_PIX_VALID_EOP_WIDTH 16
+#define BE_CUST_PIX_VALID_EOP_NOR_VALID_IDX 0 // Normal (NO less get_bits case) Valid - 1bits
+#define BE_CUST_PIX_VALID_EOP_NOR_EOP_IDX 1 // Normal (NO less get_bits case) EoP - 1bits
+#define BE_CUST_PIX_VALID_EOP_ESP_VALID_IDX 2 // Especial (less get_bits case) Valid - 1bits
+#define BE_CUST_PIX_VALID_EOP_ESP_EOP_IDX 3 // Especial (less get_bits case) EoP - 1bits
+
+*/
+
+#endif /* _mipi_backend_common_defs_h_ */
+#endif /* _css_receiver_2400_common_defs_h_ */
diff --git a/drivers/staging/media/atomisp/pci/css_2401_system/hrt/mipi_backend_defs.h b/drivers/staging/media/atomisp/pci/css_2401_system/hrt/mipi_backend_defs.h
new file mode 100644
index 000000000000..91747af997c5
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/css_2401_system/hrt/mipi_backend_defs.h
@@ -0,0 +1,200 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef _mipi_backend_defs_h
+#define _mipi_backend_defs_h
+
+#include "mipi_backend_common_defs.h"
+
+#define MIPI_BACKEND_REG_ALIGN 4 // assuming 32 bit control bus width
+
+#define _HRT_MIPI_BACKEND_NOF_IRQS 3 // sid_lut
+
+// SH Backend Register IDs
+#define _HRT_MIPI_BACKEND_ENABLE_REG_IDX 0
+#define _HRT_MIPI_BACKEND_STATUS_REG_IDX 1
+//#define _HRT_MIPI_BACKEND_HIGH_PREC_REG_IDX 2
+#define _HRT_MIPI_BACKEND_COMP_FORMAT_REG0_IDX 2
+#define _HRT_MIPI_BACKEND_COMP_FORMAT_REG1_IDX 3
+#define _HRT_MIPI_BACKEND_COMP_FORMAT_REG2_IDX 4
+#define _HRT_MIPI_BACKEND_COMP_FORMAT_REG3_IDX 5
+#define _HRT_MIPI_BACKEND_RAW16_CONFIG_REG_IDX 6
+#define _HRT_MIPI_BACKEND_RAW18_CONFIG_REG_IDX 7
+#define _HRT_MIPI_BACKEND_FORCE_RAW8_REG_IDX 8
+#define _HRT_MIPI_BACKEND_IRQ_STATUS_REG_IDX 9
+#define _HRT_MIPI_BACKEND_IRQ_CLEAR_REG_IDX 10
+////
+#define _HRT_MIPI_BACKEND_CUST_EN_REG_IDX 11
+#define _HRT_MIPI_BACKEND_CUST_DATA_STATE_REG_IDX 12
+#define _HRT_MIPI_BACKEND_CUST_PIX_EXT_S0P0_REG_IDX 13
+#define _HRT_MIPI_BACKEND_CUST_PIX_EXT_S0P1_REG_IDX 14
+#define _HRT_MIPI_BACKEND_CUST_PIX_EXT_S0P2_REG_IDX 15
+#define _HRT_MIPI_BACKEND_CUST_PIX_EXT_S0P3_REG_IDX 16
+#define _HRT_MIPI_BACKEND_CUST_PIX_EXT_S1P0_REG_IDX 17
+#define _HRT_MIPI_BACKEND_CUST_PIX_EXT_S1P1_REG_IDX 18
+#define _HRT_MIPI_BACKEND_CUST_PIX_EXT_S1P2_REG_IDX 19
+#define _HRT_MIPI_BACKEND_CUST_PIX_EXT_S1P3_REG_IDX 20
+#define _HRT_MIPI_BACKEND_CUST_PIX_EXT_S2P0_REG_IDX 21
+#define _HRT_MIPI_BACKEND_CUST_PIX_EXT_S2P1_REG_IDX 22
+#define _HRT_MIPI_BACKEND_CUST_PIX_EXT_S2P2_REG_IDX 23
+#define _HRT_MIPI_BACKEND_CUST_PIX_EXT_S2P3_REG_IDX 24
+#define _HRT_MIPI_BACKEND_CUST_PIX_VALID_EOP_REG_IDX 25
+////
+#define _HRT_MIPI_BACKEND_GLOBAL_LUT_DISREGARD_REG_IDX 26
+#define _HRT_MIPI_BACKEND_PKT_STALL_STATUS_REG_IDX 27
+//#define _HRT_MIPI_BACKEND_SP_LUT_ENABLE_REG_IDX 28
+#define _HRT_MIPI_BACKEND_SP_LUT_ENTRY_0_REG_IDX 28
+#define _HRT_MIPI_BACKEND_SP_LUT_ENTRY_1_REG_IDX 29
+#define _HRT_MIPI_BACKEND_SP_LUT_ENTRY_2_REG_IDX 30
+#define _HRT_MIPI_BACKEND_SP_LUT_ENTRY_3_REG_IDX 31
+
+#define _HRT_MIPI_BACKEND_NOF_REGISTERS 32 // excluding the LP LUT entries
+
+#define _HRT_MIPI_BACKEND_LP_LUT_ENTRY_0_REG_IDX 32
+
+/////////////////////////////////////////////////////////////////////////////////////////////////////
+#define _HRT_MIPI_BACKEND_ENABLE_REG_WIDTH 1
+#define _HRT_MIPI_BACKEND_STATUS_REG_WIDTH 1
+//#define _HRT_MIPI_BACKEND_HIGH_PREC_REG_WIDTH 1
+#define _HRT_MIPI_BACKEND_COMP_FORMAT_REG_WIDTH 32
+#define _HRT_MIPI_BACKEND_RAW16_CONFIG_REG_WIDTH 7
+#define _HRT_MIPI_BACKEND_RAW18_CONFIG_REG_WIDTH 9
+#define _HRT_MIPI_BACKEND_FORCE_RAW8_REG_WIDTH 8
+#define _HRT_MIPI_BACKEND_IRQ_STATUS_REG_WIDTH _HRT_MIPI_BACKEND_NOF_IRQS
+#define _HRT_MIPI_BACKEND_IRQ_CLEAR_REG_WIDTH 0
+#define _HRT_MIPI_BACKEND_GLOBAL_LUT_DISREGARD_REG_WIDTH 1
+#define _HRT_MIPI_BACKEND_PKT_STALL_STATUS_REG_WIDTH 1 + 2 + 6
+//#define _HRT_MIPI_BACKEND_SP_LUT_ENABLE_REG_WIDTH 1
+//#define _HRT_MIPI_BACKEND_SP_LUT_ENTRY_0_REG_WIDTH 7
+//#define _HRT_MIPI_BACKEND_SP_LUT_ENTRY_1_REG_WIDTH 7
+//#define _HRT_MIPI_BACKEND_SP_LUT_ENTRY_2_REG_WIDTH 7
+//#define _HRT_MIPI_BACKEND_SP_LUT_ENTRY_3_REG_WIDTH 7
+
+/////////////////////////////////////////////////////////////////////////////////////////////////////
+
+#define _HRT_MIPI_BACKEND_NOF_SP_LUT_ENTRIES 4
+
+//#define _HRT_MIPI_BACKEND_MAX_NOF_LP_LUT_ENTRIES 16 // to satisfy hss model static array declaration
+
+#define _HRT_MIPI_BACKEND_CHANNEL_ID_WIDTH 2
+#define _HRT_MIPI_BACKEND_FORMAT_TYPE_WIDTH 6
+#define _HRT_MIPI_BACKEND_PACKET_ID_WIDTH _HRT_MIPI_BACKEND_CHANNEL_ID_WIDTH + _HRT_MIPI_BACKEND_FORMAT_TYPE_WIDTH
+
+#define _HRT_MIPI_BACKEND_STREAMING_PIX_A_LSB 0
+#define _HRT_MIPI_BACKEND_STREAMING_PIX_A_MSB(pix_width) (_HRT_MIPI_BACKEND_STREAMING_PIX_A_LSB + (pix_width) - 1)
+#define _HRT_MIPI_BACKEND_STREAMING_PIX_A_VAL_BIT(pix_width) (_HRT_MIPI_BACKEND_STREAMING_PIX_A_MSB(pix_width) + 1)
+#define _HRT_MIPI_BACKEND_STREAMING_PIX_B_LSB(pix_width) (_HRT_MIPI_BACKEND_STREAMING_PIX_A_VAL_BIT(pix_width) + 1)
+#define _HRT_MIPI_BACKEND_STREAMING_PIX_B_MSB(pix_width) (_HRT_MIPI_BACKEND_STREAMING_PIX_B_LSB(pix_width) + (pix_width) - 1)
+#define _HRT_MIPI_BACKEND_STREAMING_PIX_B_VAL_BIT(pix_width) (_HRT_MIPI_BACKEND_STREAMING_PIX_B_MSB(pix_width) + 1)
+#define _HRT_MIPI_BACKEND_STREAMING_SOP_BIT(pix_width) (_HRT_MIPI_BACKEND_STREAMING_PIX_B_VAL_BIT(pix_width) + 1)
+#define _HRT_MIPI_BACKEND_STREAMING_EOP_BIT(pix_width) (_HRT_MIPI_BACKEND_STREAMING_SOP_BIT(pix_width) + 1)
+#define _HRT_MIPI_BACKEND_STREAMING_WIDTH(pix_width) (_HRT_MIPI_BACKEND_STREAMING_EOP_BIT(pix_width) + 1)
+
+/*************************************************************************************************/
+/* Custom Decoding */
+/* These Custom Defs are defined based on design-time config in "mipi_backend_pixel_formatter.chdl" !! */
+/*************************************************************************************************/
+#define _HRT_MIPI_BACKEND_CUST_EN_IDX 0 /* 2bits */
+#define _HRT_MIPI_BACKEND_CUST_EN_DATAID_IDX 2 /* 6bits MIPI DATA ID */
+#define _HRT_MIPI_BACKEND_CUST_EN_HIGH_PREC_IDX 8 // 1 bit
+#define _HRT_MIPI_BACKEND_CUST_EN_WIDTH 9
+#define _HRT_MIPI_BACKEND_CUST_MODE_ALL 1 /* Enable Custom Decoding for all DATA IDs */
+#define _HRT_MIPI_BACKEND_CUST_MODE_ONE 3 /* Enable Custom Decoding for ONE DATA ID, programmed in CUST_EN_DATA_ID */
+
+#define _HRT_MIPI_BACKEND_CUST_EN_OPTION_IDX 1
+
+/* Data State config = {get_bits(6bits), valid(1bit)} */
+#define _HRT_MIPI_BACKEND_CUST_DATA_STATE_S0_IDX 0 /* 7bits */
+#define _HRT_MIPI_BACKEND_CUST_DATA_STATE_S1_IDX 8 /* 7bits */
+#define _HRT_MIPI_BACKEND_CUST_DATA_STATE_S2_IDX 16 /* was 14 7bits */
+#define _HRT_MIPI_BACKEND_CUST_DATA_STATE_WIDTH 24 /* was 21*/
+#define _HRT_MIPI_BACKEND_CUST_DATA_STATE_VALID_IDX 0 /* 1bits */
+#define _HRT_MIPI_BACKEND_CUST_DATA_STATE_GETBITS_IDX 1 /* 6bits */
+
+/* Pixel Extractor config */
+#define _HRT_MIPI_BACKEND_CUST_PIX_EXT_DATA_ALIGN_IDX 0 /* 6bits */
+#define _HRT_MIPI_BACKEND_CUST_PIX_EXT_PIX_ALIGN_IDX 6 /* 5bits */
+#define _HRT_MIPI_BACKEND_CUST_PIX_EXT_PIX_MASK_IDX 11 /* was 10 18bits */
+#define _HRT_MIPI_BACKEND_CUST_PIX_EXT_PIX_EN_IDX 29 /* was 28 1bits */
+
+#define _HRT_MIPI_BACKEND_CUST_PIX_EXT_WIDTH 30 /* was 29 */
+
+/* Pixel Valid & EoP config = {[eop,valid](especial), [eop,valid](normal)} */
+#define _HRT_MIPI_BACKEND_CUST_PIX_VALID_EOP_P0_IDX 0 /* 4bits */
+#define _HRT_MIPI_BACKEND_CUST_PIX_VALID_EOP_P1_IDX 4 /* 4bits */
+#define _HRT_MIPI_BACKEND_CUST_PIX_VALID_EOP_P2_IDX 8 /* 4bits */
+#define _HRT_MIPI_BACKEND_CUST_PIX_VALID_EOP_P3_IDX 12 /* 4bits */
+#define _HRT_MIPI_BACKEND_CUST_PIX_VALID_EOP_WIDTH 16
+#define _HRT_MIPI_BACKEND_CUST_PIX_VALID_EOP_NOR_VALID_IDX 0 /* Normal (NO less get_bits case) Valid - 1bits */
+#define _HRT_MIPI_BACKEND_CUST_PIX_VALID_EOP_NOR_EOP_IDX 1 /* Normal (NO less get_bits case) EoP - 1bits */
+#define _HRT_MIPI_BACKEND_CUST_PIX_VALID_EOP_ESP_VALID_IDX 2 /* Especial (less get_bits case) Valid - 1bits */
+#define _HRT_MIPI_BACKEND_CUST_PIX_VALID_EOP_ESP_EOP_IDX 3 /* Especial (less get_bits case) EoP - 1bits */
+
+/*************************************************************************************************/
+/* MIPI backend output streaming interface definition */
+/* These parameters define the fields within the streaming bus. These should also be used by the */
+/* subsequent block, ie stream2mmio. */
+/*************************************************************************************************/
+/* The pipe backend - stream2mmio should be design time configurable in */
+/* PixWidth - Number of bits per pixel */
+/* PPC - Pixel per Clocks */
+/* NumSids - Max number of source Ids (ifc's) and derived from that: */
+/* SidWidth - Number of bits required for the sid parameter */
+/* In order to keep this configurability, below Macro's have these as a parameter */
+/*************************************************************************************************/
+
+#define HRT_MIPI_BACKEND_STREAM_EOP_BIT 0
+#define HRT_MIPI_BACKEND_STREAM_SOP_BIT 1
+#define HRT_MIPI_BACKEND_STREAM_EOF_BIT 2
+#define HRT_MIPI_BACKEND_STREAM_SOF_BIT 3
+#define HRT_MIPI_BACKEND_STREAM_CHID_LS_BIT 4
+#define HRT_MIPI_BACKEND_STREAM_CHID_MS_BIT(sid_width) (HRT_MIPI_BACKEND_STREAM_CHID_LS_BIT + (sid_width) - 1)
+#define HRT_MIPI_BACKEND_STREAM_PIX_VAL_BIT(sid_width, p) (HRT_MIPI_BACKEND_STREAM_CHID_MS_BIT(sid_width) + 1 + p)
+
+#define HRT_MIPI_BACKEND_STREAM_PIX_LS_BIT(sid_width, ppc, pix_width, p) (HRT_MIPI_BACKEND_STREAM_PIX_VAL_BIT(sid_width, ppc) + ((pix_width) * p))
+#define HRT_MIPI_BACKEND_STREAM_PIX_MS_BIT(sid_width, ppc, pix_width, p) (HRT_MIPI_BACKEND_STREAM_PIX_LS_BIT(sid_width, ppc, pix_width, p) + (pix_width) - 1)
+
+#if 0
+//#define HRT_MIPI_BACKEND_STREAM_PIX_BITS 14
+//#define HRT_MIPI_BACKEND_STREAM_CHID_BITS 4
+//#define HRT_MIPI_BACKEND_STREAM_PPC 4
+#endif
+
+#define HRT_MIPI_BACKEND_STREAM_BITS(sid_width, ppc, pix_width) (HRT_MIPI_BACKEND_STREAM_PIX_MS_BIT(sid_width, ppc, pix_width, (ppc - 1)) + 1)
+
+/* SP and LP LUT BIT POSITIONS */
+#define HRT_MIPI_BACKEND_LUT_PKT_DISREGARD_BIT 0 // 0
+#define HRT_MIPI_BACKEND_LUT_SID_LS_BIT HRT_MIPI_BACKEND_LUT_PKT_DISREGARD_BIT + 1 // 1
+#define HRT_MIPI_BACKEND_LUT_SID_MS_BIT(sid_width) (HRT_MIPI_BACKEND_LUT_SID_LS_BIT + (sid_width) - 1) // 1 + (4) - 1 = 4
+#define HRT_MIPI_BACKEND_LUT_MIPI_CH_ID_LS_BIT(sid_width) HRT_MIPI_BACKEND_LUT_SID_MS_BIT(sid_width) + 1 // 5
+#define HRT_MIPI_BACKEND_LUT_MIPI_CH_ID_MS_BIT(sid_width) HRT_MIPI_BACKEND_LUT_MIPI_CH_ID_LS_BIT(sid_width) + _HRT_MIPI_BACKEND_CHANNEL_ID_WIDTH - 1 // 6
+#define HRT_MIPI_BACKEND_LUT_MIPI_FMT_LS_BIT(sid_width) HRT_MIPI_BACKEND_LUT_MIPI_CH_ID_MS_BIT(sid_width) + 1 // 7
+#define HRT_MIPI_BACKEND_LUT_MIPI_FMT_MS_BIT(sid_width) HRT_MIPI_BACKEND_LUT_MIPI_FMT_LS_BIT(sid_width) + _HRT_MIPI_BACKEND_FORMAT_TYPE_WIDTH - 1 // 12
+
+/* #define HRT_MIPI_BACKEND_SP_LUT_BITS(sid_width) HRT_MIPI_BACKEND_LUT_MIPI_CH_ID_MS_BIT(sid_width) + 1 // 7 */
+
+#define HRT_MIPI_BACKEND_SP_LUT_BITS(sid_width) HRT_MIPI_BACKEND_LUT_SID_MS_BIT(sid_width) + 1
+#define HRT_MIPI_BACKEND_LP_LUT_BITS(sid_width) HRT_MIPI_BACKEND_LUT_MIPI_FMT_MS_BIT(sid_width) + 1 // 13
+
+// temp solution
+//#define HRT_MIPI_BACKEND_STREAM_PIXA_VAL_BIT HRT_MIPI_BACKEND_STREAM_CHID_MS_BIT + 1 // 8
+//#define HRT_MIPI_BACKEND_STREAM_PIXB_VAL_BIT HRT_MIPI_BACKEND_STREAM_PIXA_VAL_BIT + 1 // 9
+//#define HRT_MIPI_BACKEND_STREAM_PIXC_VAL_BIT HRT_MIPI_BACKEND_STREAM_PIXB_VAL_BIT + 1 // 10
+//#define HRT_MIPI_BACKEND_STREAM_PIXD_VAL_BIT HRT_MIPI_BACKEND_STREAM_PIXC_VAL_BIT + 1 // 11
+//#define HRT_MIPI_BACKEND_STREAM_PIXA_LS_BIT HRT_MIPI_BACKEND_STREAM_PIXD_VAL_BIT + 1 // 12
+//#define HRT_MIPI_BACKEND_STREAM_PIXA_MS_BIT HRT_MIPI_BACKEND_STREAM_PIXA_LS_BIT + HRT_MIPI_BACKEND_STREAM_PIX_BITS - 1 // 25
+//#define HRT_MIPI_BACKEND_STREAM_PIXB_LS_BIT HRT_MIPI_BACKEND_STREAM_PIXA_MS_BIT + 1 // 26
+//#define HRT_MIPI_BACKEND_STREAM_PIXB_MS_BIT HRT_MIPI_BACKEND_STREAM_PIXB_LS_BIT + HRT_MIPI_BACKEND_STREAM_PIX_BITS - 1 // 39
+//#define HRT_MIPI_BACKEND_STREAM_PIXC_LS_BIT HRT_MIPI_BACKEND_STREAM_PIXB_MS_BIT + 1 // 40
+//#define HRT_MIPI_BACKEND_STREAM_PIXC_MS_BIT HRT_MIPI_BACKEND_STREAM_PIXC_LS_BIT + HRT_MIPI_BACKEND_STREAM_PIX_BITS - 1 // 53
+//#define HRT_MIPI_BACKEND_STREAM_PIXD_LS_BIT HRT_MIPI_BACKEND_STREAM_PIXC_MS_BIT + 1 // 54
+//#define HRT_MIPI_BACKEND_STREAM_PIXD_MS_BIT HRT_MIPI_BACKEND_STREAM_PIXD_LS_BIT + HRT_MIPI_BACKEND_STREAM_PIX_BITS - 1 // 67
+
+// vc hidden in pixb data (passed as raw12 the pipe)
+#define HRT_MIPI_BACKEND_STREAM_VC_LS_BIT(sid_width, ppc, pix_width) HRT_MIPI_BACKEND_STREAM_PIX_LS_BIT(sid_width, ppc, pix_width, 1) + 10 //HRT_MIPI_BACKEND_STREAM_PIXB_LS_BIT + 10 // 36
+#define HRT_MIPI_BACKEND_STREAM_VC_MS_BIT(sid_width, ppc, pix_width) HRT_MIPI_BACKEND_STREAM_VC_LS_BIT(sid_width, ppc, pix_width) + 1 // 37
+
+#endif /* _mipi_backend_defs_h */
diff --git a/drivers/staging/media/atomisp/pci/css_2401_system/hrt/rx_csi_defs.h b/drivers/staging/media/atomisp/pci/css_2401_system/hrt/rx_csi_defs.h
new file mode 100644
index 000000000000..12df1ab491a0
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/css_2401_system/hrt/rx_csi_defs.h
@@ -0,0 +1,161 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef _csi_rx_defs_h
+#define _csi_rx_defs_h
+
+//#include "rx_csi_common_defs.h"
+
+#define MIPI_PKT_DATA_WIDTH 32
+//#define CLK_CROSSING_FIFO_DEPTH 16
+#define _CSI_RX_REG_ALIGN 4
+
+//define number of IRQ (see below for definition of each IRQ bits)
+#define CSI_RX_NOF_IRQS_BYTE_DOMAIN 11
+#define CSI_RX_NOF_IRQS_ISP_DOMAIN 15 // CSI_RX_NOF_IRQS_BYTE_DOMAIN + remaining from Dphy_rx already on ISP clock domain
+
+// REGISTER DESCRIPTION
+//#define _HRT_CSI_RX_SOFTRESET_REG_IDX 0
+#define _HRT_CSI_RX_ENABLE_REG_IDX 0
+#define _HRT_CSI_RX_NOF_ENABLED_LANES_REG_IDX 1
+#define _HRT_CSI_RX_ERROR_HANDLING_REG_IDX 2
+#define _HRT_CSI_RX_STATUS_REG_IDX 3
+#define _HRT_CSI_RX_STATUS_DLANE_HS_REG_IDX 4
+#define _HRT_CSI_RX_STATUS_DLANE_LP_REG_IDX 5
+//#define _HRT_CSI_RX_IRQ_CONFIG_REG_IDX 6
+#define _HRT_CSI_RX_DLY_CNT_TERMEN_CLANE_REG_IDX 6
+#define _HRT_CSI_RX_DLY_CNT_SETTLE_CLANE_REG_IDX 7
+#define _HRT_CSI_RX_DLY_CNT_TERMEN_DLANE_REG_IDX(lane_idx) (8 + (2 * lane_idx))
+#define _HRT_CSI_RX_DLY_CNT_SETTLE_DLANE_REG_IDX(lane_idx) (8 + (2 * lane_idx) + 1)
+
+#define _HRT_CSI_RX_NOF_REGISTERS(nof_dlanes) (8 + 2 * (nof_dlanes))
+
+//#define _HRT_CSI_RX_SOFTRESET_REG_WIDTH 1
+#define _HRT_CSI_RX_ENABLE_REG_WIDTH 1
+#define _HRT_CSI_RX_NOF_ENABLED_LANES_REG_WIDTH 3
+#define _HRT_CSI_RX_ERROR_HANDLING_REG_WIDTH 4
+#define _HRT_CSI_RX_STATUS_REG_WIDTH 1
+#define _HRT_CSI_RX_STATUS_DLANE_HS_REG_WIDTH 8
+#define _HRT_CSI_RX_STATUS_DLANE_LP_REG_WIDTH 24
+#define _HRT_CSI_RX_IRQ_CONFIG_REG_WIDTH (CSI_RX_NOF_IRQS_ISP_DOMAIN)
+#define _HRT_CSI_RX_DLY_CNT_REG_WIDTH 24
+//#define _HRT_CSI_RX_IRQ_STATUS_REG_WIDTH NOF_IRQS
+//#define _HRT_CSI_RX_IRQ_CLEAR_REG_WIDTH 0
+
+#define ONE_LANE_ENABLED 0
+#define TWO_LANES_ENABLED 1
+#define THREE_LANES_ENABLED 2
+#define FOUR_LANES_ENABLED 3
+
+// Error handling reg bit positions
+#define ERR_DECISION_BIT 0
+#define DISC_RESERVED_SP_BIT 1
+#define DISC_RESERVED_LP_BIT 2
+#define DIS_INCOMP_PKT_CHK_BIT 3
+
+#define _HRT_CSI_RX_IRQ_CONFIG_REG_VAL_POSEDGE 0
+#define _HRT_CSI_RX_IRQ_CONFIG_REG_VAL_ORIGINAL 1
+
+// Interrupt bits
+#define _HRT_RX_CSI_IRQ_SINGLE_PH_ERROR_CORRECTED 0
+#define _HRT_RX_CSI_IRQ_MULTIPLE_PH_ERROR_DETECTED 1
+#define _HRT_RX_CSI_IRQ_PAYLOAD_CHECKSUM_ERROR 2
+#define _HRT_RX_CSI_IRQ_FIFO_FULL_ERROR 3
+#define _HRT_RX_CSI_IRQ_RESERVED_SP_DETECTED 4
+#define _HRT_RX_CSI_IRQ_RESERVED_LP_DETECTED 5
+//#define _HRT_RX_CSI_IRQ_PREMATURE_SOP 6
+#define _HRT_RX_CSI_IRQ_INCOMPLETE_PACKET 6
+#define _HRT_RX_CSI_IRQ_FRAME_SYNC_ERROR 7
+#define _HRT_RX_CSI_IRQ_LINE_SYNC_ERROR 8
+#define _HRT_RX_CSI_IRQ_DLANE_HS_SOT_ERROR 9
+#define _HRT_RX_CSI_IRQ_DLANE_HS_SOT_SYNC_ERROR 10
+
+#define _HRT_RX_CSI_IRQ_DLANE_ESC_ERROR 11
+#define _HRT_RX_CSI_IRQ_DLANE_TRIGGERESC 12
+#define _HRT_RX_CSI_IRQ_DLANE_ULPSESC 13
+#define _HRT_RX_CSI_IRQ_CLANE_ULPSCLKNOT 14
+
+/* OLD ARASAN FRONTEND IRQs
+#define _HRT_RX_CSI_IRQ_OVERRUN_BIT 0
+#define _HRT_RX_CSI_IRQ_RESERVED_BIT 1
+#define _HRT_RX_CSI_IRQ_SLEEP_MODE_ENTRY_BIT 2
+#define _HRT_RX_CSI_IRQ_SLEEP_MODE_EXIT_BIT 3
+#define _HRT_RX_CSI_IRQ_ERR_SOT_HS_BIT 4
+#define _HRT_RX_CSI_IRQ_ERR_SOT_SYNC_HS_BIT 5
+#define _HRT_RX_CSI_IRQ_ERR_CONTROL_BIT 6
+#define _HRT_RX_CSI_IRQ_ERR_ECC_DOUBLE_BIT 7
+#define _HRT_RX_CSI_IRQ_ERR_ECC_CORRECTED_BIT 8
+#define _HRT_RX_CSI_IRQ_ERR_ECC_NO_CORRECTION_BIT 9
+#define _HRT_RX_CSI_IRQ_ERR_CRC_BIT 10
+#define _HRT_RX_CSI_IRQ_ERR_ID_BIT 11
+#define _HRT_RX_CSI_IRQ_ERR_FRAME_SYNC_BIT 12
+#define _HRT_RX_CSI_IRQ_ERR_FRAME_DATA_BIT 13
+#define _HRT_RX_CSI_IRQ_DATA_TIMEOUT_BIT 14
+#define _HRT_RX_CSI_IRQ_ERR_ESCAPE_BIT 15
+#define _HRT_RX_CSI_IRQ_ERR_LINE_SYNC_BIT 16
+*/
+
+////Bit Description for reg _HRT_CSI_RX_STATUS_DLANE_HS_REG_IDX
+#define _HRT_CSI_RX_STATUS_DLANE_HS_SOT_ERR_LANE0 0
+#define _HRT_CSI_RX_STATUS_DLANE_HS_SOT_ERR_LANE1 1
+#define _HRT_CSI_RX_STATUS_DLANE_HS_SOT_ERR_LANE2 2
+#define _HRT_CSI_RX_STATUS_DLANE_HS_SOT_ERR_LANE3 3
+#define _HRT_CSI_RX_STATUS_DLANE_HS_SOT_SYNC_ERR_LANE0 4
+#define _HRT_CSI_RX_STATUS_DLANE_HS_SOT_SYNC_ERR_LANE1 5
+#define _HRT_CSI_RX_STATUS_DLANE_HS_SOT_SYNC_ERR_LANE2 6
+#define _HRT_CSI_RX_STATUS_DLANE_HS_SOT_SYNC_ERR_LANE3 7
+
+////Bit Description for reg _HRT_CSI_RX_STATUS_DLANE_LP_REG_IDX
+#define _HRT_CSI_RX_STATUS_DLANE_LP_ESC_ERR_LANE0 0
+#define _HRT_CSI_RX_STATUS_DLANE_LP_ESC_ERR_LANE1 1
+#define _HRT_CSI_RX_STATUS_DLANE_LP_ESC_ERR_LANE2 2
+#define _HRT_CSI_RX_STATUS_DLANE_LP_ESC_ERR_LANE3 3
+#define _HRT_CSI_RX_STATUS_DLANE_LP_TRIGGERESC0_LANE0 4
+#define _HRT_CSI_RX_STATUS_DLANE_LP_TRIGGERESC1_LANE0 5
+#define _HRT_CSI_RX_STATUS_DLANE_LP_TRIGGERESC2_LANE0 6
+#define _HRT_CSI_RX_STATUS_DLANE_LP_TRIGGERESC3_LANE0 7
+#define _HRT_CSI_RX_STATUS_DLANE_LP_TRIGGERESC0_LANE1 8
+#define _HRT_CSI_RX_STATUS_DLANE_LP_TRIGGERESC1_LANE1 9
+#define _HRT_CSI_RX_STATUS_DLANE_LP_TRIGGERESC2_LANE1 10
+#define _HRT_CSI_RX_STATUS_DLANE_LP_TRIGGERESC3_LANE1 11
+#define _HRT_CSI_RX_STATUS_DLANE_LP_TRIGGERESC0_LANE2 12
+#define _HRT_CSI_RX_STATUS_DLANE_LP_TRIGGERESC1_LANE2 13
+#define _HRT_CSI_RX_STATUS_DLANE_LP_TRIGGERESC2_LANE2 14
+#define _HRT_CSI_RX_STATUS_DLANE_LP_TRIGGERESC3_LANE2 15
+#define _HRT_CSI_RX_STATUS_DLANE_LP_TRIGGERESC0_LANE3 16
+#define _HRT_CSI_RX_STATUS_DLANE_LP_TRIGGERESC1_LANE3 17
+#define _HRT_CSI_RX_STATUS_DLANE_LP_TRIGGERESC2_LANE3 18
+#define _HRT_CSI_RX_STATUS_DLANE_LP_TRIGGERESC3_LANE3 19
+#define _HRT_CSI_RX_STATUS_DLANE_LP_ULPSESC_LANE0 20
+#define _HRT_CSI_RX_STATUS_DLANE_LP_ULPSESC_LANE1 21
+#define _HRT_CSI_RX_STATUS_DLANE_LP_ULPSESC_LANE2 22
+#define _HRT_CSI_RX_STATUS_DLANE_LP_ULPSESC_LANE3 23
+
+/*********************************************************/
+/*** Relevant declarations from rx_csi_common_defs.h *****/
+/*********************************************************/
+/* packet bit definition */
+#define _HRT_RX_CSI_PKT_SOP_BITPOS 32
+#define _HRT_RX_CSI_PKT_EOP_BITPOS 33
+#define _HRT_RX_CSI_PKT_PAYLOAD_BITPOS 0
+#define _HRT_RX_CSI_PH_CH_ID_BITPOS 22
+#define _HRT_RX_CSI_PH_FMT_ID_BITPOS 16
+#define _HRT_RX_CSI_PH_DATA_FIELD_BITPOS 0
+
+#define _HRT_RX_CSI_PKT_SOP_BITS 1
+#define _HRT_RX_CSI_PKT_EOP_BITS 1
+#define _HRT_RX_CSI_PKT_PAYLOAD_BITS 32
+#define _HRT_RX_CSI_PH_CH_ID_BITS 2
+#define _HRT_RX_CSI_PH_FMT_ID_BITS 6
+#define _HRT_RX_CSI_PH_DATA_FIELD_BITS 16
+
+/* Definition of data format ID at the interface CSS_receiver units */
+#define _HRT_RX_CSI_DATA_FORMAT_ID_SOF 0 /* 00 0000 frame start */
+#define _HRT_RX_CSI_DATA_FORMAT_ID_EOF 1 /* 00 0001 frame end */
+#define _HRT_RX_CSI_DATA_FORMAT_ID_SOL 2 /* 00 0010 line start */
+#define _HRT_RX_CSI_DATA_FORMAT_ID_EOL 3 /* 00 0011 line end */
+
+#endif /* _csi_rx_defs_h */
diff --git a/drivers/staging/media/atomisp/pci/css_2401_system/hrt/stream2mmio_defs.h b/drivers/staging/media/atomisp/pci/css_2401_system/hrt/stream2mmio_defs.h
new file mode 100644
index 000000000000..495db4770e84
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/css_2401_system/hrt/stream2mmio_defs.h
@@ -0,0 +1,60 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef _STREAM2MMMIO_DEFS_H
+#define _STREAM2MMMIO_DEFS_H
+
+#include <mipi_backend_defs.h>
+
+#define _STREAM2MMIO_REG_ALIGN 4
+
+#define _STREAM2MMIO_COMMAND_REG_ID 0
+#define _STREAM2MMIO_ACKNOWLEDGE_REG_ID 1
+#define _STREAM2MMIO_PIX_WIDTH_ID_REG_ID 2
+#define _STREAM2MMIO_START_ADDR_REG_ID 3 /* master port address,NOT Byte */
+#define _STREAM2MMIO_END_ADDR_REG_ID 4 /* master port address,NOT Byte */
+#define _STREAM2MMIO_STRIDE_REG_ID 5 /* stride in master port words, increment is per packet for long sids, stride is not used for short sid's*/
+#define _STREAM2MMIO_NUM_ITEMS_REG_ID 6 /* number of packets for store packets cmd, number of words for store_words cmd */
+#define _STREAM2MMIO_BLOCK_WHEN_NO_CMD_REG_ID 7 /* if this register is 1, input will be stalled if there is no pending command for this sid */
+#define _STREAM2MMIO_REGS_PER_SID 8
+
+#define _STREAM2MMIO_SID_REG_OFFSET 8
+#define _STREAM2MMIO_MAX_NOF_SIDS 64 /* value used in hss model */
+
+/* command token definition */
+#define _STREAM2MMIO_CMD_TOKEN_CMD_LSB 0 /* bits 1-0 is for the command field */
+#define _STREAM2MMIO_CMD_TOKEN_CMD_MSB 1
+
+#define _STREAM2MMIO_CMD_TOKEN_WIDTH (_STREAM2MMIO_CMD_TOKEN_CMD_MSB + 1 - _STREAM2MMIO_CMD_TOKEN_CMD_LSB)
+
+#define _STREAM2MMIO_CMD_TOKEN_STORE_WORDS 0 /* command for storing a number of output words indicated by reg _STREAM2MMIO_NUM_ITEMS */
+#define _STREAM2MMIO_CMD_TOKEN_STORE_PACKETS 1 /* command for storing a number of packets indicated by reg _STREAM2MMIO_NUM_ITEMS */
+#define _STREAM2MMIO_CMD_TOKEN_SYNC_FRAME 2 /* command for waiting for a frame start */
+
+/* acknowledges from packer module */
+/* fields: eof - indicates whether last (short) packet received was an eof packet */
+/* eop - indicates whether command has ended due to packet end or due to no of words requested has been received */
+/* count - indicates number of words stored */
+#define _STREAM2MMIO_PACK_NUM_ITEMS_BITS 16
+#define _STREAM2MMIO_PACK_ACK_EOP_BIT _STREAM2MMIO_PACK_NUM_ITEMS_BITS
+#define _STREAM2MMIO_PACK_ACK_EOF_BIT (_STREAM2MMIO_PACK_ACK_EOP_BIT + 1)
+
+/* acknowledge token definition */
+#define _STREAM2MMIO_ACK_TOKEN_NUM_ITEMS_LSB 0 /* bits 3-0 is for the command field */
+#define _STREAM2MMIO_ACK_TOKEN_NUM_ITEMS_MSB (_STREAM2MMIO_PACK_NUM_ITEMS_BITS - 1)
+#define _STREAM2MMIO_ACK_TOKEN_EOP_BIT _STREAM2MMIO_PACK_ACK_EOP_BIT
+#define _STREAM2MMIO_ACK_TOKEN_EOF_BIT _STREAM2MMIO_PACK_ACK_EOF_BIT
+#define _STREAM2MMIO_ACK_TOKEN_VALID_BIT (_STREAM2MMIO_ACK_TOKEN_EOF_BIT + 1) /* this bit indicates a valid ack */
+/* if there is no valid ack, a read */
+/* on the ack register returns 0 */
+#define _STREAM2MMIO_ACK_TOKEN_WIDTH (_STREAM2MMIO_ACK_TOKEN_VALID_BIT + 1)
+
+/* commands for packer module */
+#define _STREAM2MMIO_PACK_CMD_STORE_WORDS 0
+#define _STREAM2MMIO_PACK_CMD_STORE_LONG_PACKET 1
+#define _STREAM2MMIO_PACK_CMD_STORE_SHORT_PACKET 2
+
+#endif /* _STREAM2MMIO_DEFS_H */
diff --git a/drivers/staging/media/atomisp/pci/css_2401_system/ibuf_ctrl_global.h b/drivers/staging/media/atomisp/pci/css_2401_system/ibuf_ctrl_global.h
new file mode 100644
index 000000000000..cab7236dc292
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/css_2401_system/ibuf_ctrl_global.h
@@ -0,0 +1,71 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __IBUF_CTRL_GLOBAL_H_INCLUDED__
+#define __IBUF_CTRL_GLOBAL_H_INCLUDED__
+
+#include <type_support.h>
+
+#include <ibuf_cntrl_defs.h> /* _IBUF_CNTRL_RECALC_WORDS_STATUS,
+ * _IBUF_CNTRL_ARBITERS_STATUS,
+ * _IBUF_CNTRL_PROC_REG_ALIGN,
+ * etc.
+ */
+
+/* Definition of contents of main controller state register is lacking
+ * in ibuf_cntrl_defs.h, so define these here:
+ */
+#define _IBUF_CNTRL_MAIN_CNTRL_FSM_MASK 0xf
+#define _IBUF_CNTRL_MAIN_CNTRL_FSM_NEXT_COMMAND_CHECK 0x9
+#define _IBUF_CNTRL_MAIN_CNTRL_MEM_INP_BUF_ALLOC BIT(8)
+#define _IBUF_CNTRL_DMA_SYNC_WAIT_FOR_SYNC 1
+#define _IBUF_CNTRL_DMA_SYNC_FSM_WAIT_FOR_ACK (0x3 << 1)
+
+struct isp2401_ib_buffer_s {
+ u32 start_addr; /* start address of the buffer in the
+ * "input-buffer hardware block"
+ */
+
+ u32 stride; /* stride per buffer line (in bytes) */
+ u32 lines; /* lines in the buffer */
+};
+typedef struct isp2401_ib_buffer_s isp2401_ib_buffer_t;
+
+typedef struct ibuf_ctrl_cfg_s ibuf_ctrl_cfg_t;
+struct ibuf_ctrl_cfg_s {
+ bool online;
+
+ struct {
+ /* DMA configuration */
+ u32 channel;
+ u32 cmd; /* must be _DMA_V2_MOVE_A2B_NO_SYNC_CHK_COMMAND */
+
+ /* DMA reconfiguration */
+ u32 shift_returned_items;
+ u32 elems_per_word_in_ibuf;
+ u32 elems_per_word_in_dest;
+ } dma_cfg;
+
+ isp2401_ib_buffer_t ib_buffer;
+
+ struct {
+ u32 stride;
+ u32 start_addr;
+ u32 lines;
+ } dest_buf_cfg;
+
+ u32 items_per_store;
+ u32 stores_per_frame;
+
+ struct {
+ u32 sync_cmd; /* must be _STREAM2MMIO_CMD_TOKEN_SYNC_FRAME */
+ u32 store_cmd; /* must be _STREAM2MMIO_CMD_TOKEN_STORE_PACKETS */
+ } stream2mmio_cfg;
+};
+
+extern const u32 N_IBUF_CTRL_PROCS[N_IBUF_CTRL_ID];
+
+#endif /* __IBUF_CTRL_GLOBAL_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/css_2401_system/isys_dma_global.h b/drivers/staging/media/atomisp/pci/css_2401_system/isys_dma_global.h
new file mode 100644
index 000000000000..de89bc4829f0
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/css_2401_system/isys_dma_global.h
@@ -0,0 +1,82 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __ISYS_DMA_GLOBAL_H_INCLUDED__
+#define __ISYS_DMA_GLOBAL_H_INCLUDED__
+
+#include <type_support.h>
+
+#define HIVE_ISYS2401_DMA_IBUF_DDR_CONN 0
+#define HIVE_ISYS2401_DMA_IBUF_VMEM_CONN 1
+#define _DMA_V2_ZERO_EXTEND 0
+#define _DMA_V2_SIGN_EXTEND 1
+
+#define _DMA_ZERO_EXTEND _DMA_V2_ZERO_EXTEND
+#define _DMA_SIGN_EXTEND _DMA_V2_SIGN_EXTEND
+
+/********************************************************
+ *
+ * DMA Port.
+ *
+ * The DMA port definition for the input system
+ * 2401 DMA is the duplication of the DMA port
+ * definition for the CSS system DMA. It is duplicated
+ * here just as the temporal step before the device library
+ * is available. The device library is suppose to provide
+ * the capability of reusing the control interface of the
+ * same device prototypes. The refactor team will work on
+ * this, right?
+ *
+ ********************************************************/
+typedef struct isys2401_dma_port_cfg_s isys2401_dma_port_cfg_t;
+struct isys2401_dma_port_cfg_s {
+ u32 stride;
+ u32 elements;
+ u32 cropping;
+ u32 width;
+};
+
+/* end of DMA Port */
+
+/************************************************
+ *
+ * DMA Device.
+ *
+ * The DMA device definition for the input system
+ * 2401 DMA is the duplicattion of the DMA device
+ * definition for the CSS system DMA. It is duplicated
+ * here just as the temporal step before the device library
+ * is available. The device library is suppose to provide
+ * the capability of reusing the control interface of the
+ * same device prototypes. The refactor team will work on
+ * this, right?
+ *
+ ************************************************/
+typedef enum {
+ isys2401_dma_ibuf_to_ddr_connection = HIVE_ISYS2401_DMA_IBUF_DDR_CONN,
+ isys2401_dma_ibuf_to_vmem_connection = HIVE_ISYS2401_DMA_IBUF_VMEM_CONN
+} isys2401_dma_connection;
+
+typedef enum {
+ isys2401_dma_zero_extension = _DMA_ZERO_EXTEND,
+ isys2401_dma_sign_extension = _DMA_SIGN_EXTEND
+} isys2401_dma_extension;
+
+typedef struct isys2401_dma_cfg_s isys2401_dma_cfg_t;
+struct isys2401_dma_cfg_s {
+ isys2401_dma_channel channel;
+ isys2401_dma_connection connection;
+ isys2401_dma_extension extension;
+ u32 height;
+};
+
+/* end of DMA Device */
+
+/* isys2401_dma_channel limits per DMA ID */
+extern const isys2401_dma_channel
+N_ISYS2401_DMA_CHANNEL_PROCS[N_ISYS2401_DMA_ID];
+
+#endif /* __ISYS_DMA_GLOBAL_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/css_2401_system/isys_irq_global.h b/drivers/staging/media/atomisp/pci/css_2401_system/isys_irq_global.h
new file mode 100644
index 000000000000..bd584ccc1a9a
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/css_2401_system/isys_irq_global.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __ISYS_IRQ_GLOBAL_H__
+#define __ISYS_IRQ_GLOBAL_H__
+
+
+/* Register offset/index from base location */
+#define ISYS_IRQ_EDGE_REG_IDX (0)
+#define ISYS_IRQ_MASK_REG_IDX (ISYS_IRQ_EDGE_REG_IDX + 1)
+#define ISYS_IRQ_STATUS_REG_IDX (ISYS_IRQ_EDGE_REG_IDX + 2)
+#define ISYS_IRQ_CLEAR_REG_IDX (ISYS_IRQ_EDGE_REG_IDX + 3)
+#define ISYS_IRQ_ENABLE_REG_IDX (ISYS_IRQ_EDGE_REG_IDX + 4)
+#define ISYS_IRQ_LEVEL_NO_REG_IDX (ISYS_IRQ_EDGE_REG_IDX + 5)
+
+/* Register values */
+#define ISYS_IRQ_MASK_REG_VALUE (0xFFFF)
+#define ISYS_IRQ_CLEAR_REG_VALUE (0xFFFF)
+#define ISYS_IRQ_ENABLE_REG_VALUE (0xFFFF)
+
+
+#endif /* __ISYS_IRQ_GLOBAL_H__ */
diff --git a/drivers/staging/media/atomisp/pci/css_2401_system/isys_stream2mmio_global.h b/drivers/staging/media/atomisp/pci/css_2401_system/isys_stream2mmio_global.h
new file mode 100644
index 000000000000..c6d8d9e72981
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/css_2401_system/isys_stream2mmio_global.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __ISYS_STREAM2MMIO_GLOBAL_H_INCLUDED__
+#define __ISYS_STREAM2MMIO_GLOBAL_H_INCLUDED__
+
+#include <type_support.h>
+
+typedef struct stream2mmio_cfg_s stream2mmio_cfg_t;
+struct stream2mmio_cfg_s {
+ u32 bits_per_pixel;
+ u32 enable_blocking;
+};
+
+/* Stream2MMIO limits per ID*/
+/*
+ * Stream2MMIO 0 has 8 SIDs that are indexed by
+ * [STREAM2MMIO_SID0_ID...STREAM2MMIO_SID7_ID].
+ *
+ * Stream2MMIO 1 has 4 SIDs that are indexed by
+ * [STREAM2MMIO_SID0_ID...TREAM2MMIO_SID3_ID].
+ *
+ * Stream2MMIO 2 has 4 SIDs that are indexed by
+ * [STREAM2MMIO_SID0_ID...STREAM2MMIO_SID3_ID].
+ */
+extern const stream2mmio_sid_ID_t N_STREAM2MMIO_SID_PROCS[N_STREAM2MMIO_ID];
+
+#endif /* __ISYS_STREAM2MMIO_GLOBAL_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/css_2401_system/pixelgen_global.h b/drivers/staging/media/atomisp/pci/css_2401_system/pixelgen_global.h
new file mode 100644
index 000000000000..59e0b44bfdc3
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/css_2401_system/pixelgen_global.h
@@ -0,0 +1,82 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __PIXELGEN_GLOBAL_H_INCLUDED__
+#define __PIXELGEN_GLOBAL_H_INCLUDED__
+
+#include <type_support.h>
+
+/**
+ * Pixel-generator. ("pixelgen_global.h")
+ */
+/*
+ * Duplicates "sync_generator_cfg_t" in "input_system_global.h".
+ */
+typedef struct isp2401_sync_generator_cfg_s isp2401_sync_generator_cfg_t;
+struct isp2401_sync_generator_cfg_s {
+ u32 hblank_cycles;
+ u32 vblank_cycles;
+ u32 pixels_per_clock;
+ u32 nr_of_frames;
+ u32 pixels_per_line;
+ u32 lines_per_frame;
+};
+
+typedef enum {
+ PIXELGEN_TPG_MODE_RAMP = 0,
+ PIXELGEN_TPG_MODE_CHBO,
+ PIXELGEN_TPG_MODE_MONO,
+ N_PIXELGEN_TPG_MODE
+} pixelgen_tpg_mode_t;
+
+/*
+ * "pixelgen_tpg_cfg_t" duplicates parts of
+ * "tpg_cfg_t" in "input_system_global.h".
+ */
+typedef struct pixelgen_tpg_cfg_s pixelgen_tpg_cfg_t;
+struct pixelgen_tpg_cfg_s {
+ pixelgen_tpg_mode_t mode; /* CHBO, MONO */
+
+ struct {
+ /* be used by CHBO and MON */
+ u32 R1;
+ u32 G1;
+ u32 B1;
+
+ /* be used by CHBO only */
+ u32 R2;
+ u32 G2;
+ u32 B2;
+ } color_cfg;
+
+ struct {
+ u32 h_mask; /* horizontal mask */
+ u32 v_mask; /* vertical mask */
+ u32 hv_mask; /* horizontal+vertical mask? */
+ } mask_cfg;
+
+ struct {
+ s32 h_delta; /* horizontal delta? */
+ s32 v_delta; /* vertical delta? */
+ } delta_cfg;
+
+ isp2401_sync_generator_cfg_t sync_gen_cfg;
+};
+
+/*
+ * "pixelgen_prbs_cfg_t" duplicates parts of
+ * prbs_cfg_t" in "input_system_global.h".
+ */
+typedef struct pixelgen_prbs_cfg_s pixelgen_prbs_cfg_t;
+struct pixelgen_prbs_cfg_s {
+ s32 seed0;
+ s32 seed1;
+
+ isp2401_sync_generator_cfg_t sync_gen_cfg;
+};
+
+/* end of Pixel-generator: TPG. ("pixelgen_global.h") */
+#endif /* __PIXELGEN_GLOBAL_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/css_receiver_2400_common_defs.h b/drivers/staging/media/atomisp/pci/css_receiver_2400_common_defs.h
new file mode 100644
index 000000000000..fc733a72d1e3
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/css_receiver_2400_common_defs.h
@@ -0,0 +1,190 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef _css_receiver_2400_common_defs_h_
+#define _css_receiver_2400_common_defs_h_
+#ifndef _mipi_backend_common_defs_h_
+#define _mipi_backend_common_defs_h_
+
+#define _HRT_CSS_RECEIVER_2400_GEN_SHORT_DATA_WIDTH 16
+#define _HRT_CSS_RECEIVER_2400_GEN_SHORT_CH_ID_WIDTH 2
+#define _HRT_CSS_RECEIVER_2400_GEN_SHORT_FMT_TYPE_WIDTH 3
+#define _HRT_CSS_RECEIVER_2400_GEN_SHORT_STR_REAL_WIDTH (_HRT_CSS_RECEIVER_2400_GEN_SHORT_DATA_WIDTH + _HRT_CSS_RECEIVER_2400_GEN_SHORT_CH_ID_WIDTH + _HRT_CSS_RECEIVER_2400_GEN_SHORT_FMT_TYPE_WIDTH)
+#define _HRT_CSS_RECEIVER_2400_GEN_SHORT_STR_WIDTH 32 /* use 32 to be compatibel with streaming monitor !, MSB's of interface are tied to '0' */
+
+/* Definition of data format ID at the interface CSS_receiver capture/acquisition units */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_YUV420_8 24 /* 01 1000 YUV420 8-bit */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_YUV420_10 25 /* 01 1001 YUV420 10-bit */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_YUV420_8L 26 /* 01 1010 YUV420 8-bit legacy */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_YUV422_8 30 /* 01 1110 YUV422 8-bit */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_YUV422_10 31 /* 01 1111 YUV422 10-bit */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_RGB444 32 /* 10 0000 RGB444 */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_RGB555 33 /* 10 0001 RGB555 */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_RGB565 34 /* 10 0010 RGB565 */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_RGB666 35 /* 10 0011 RGB666 */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_RGB888 36 /* 10 0100 RGB888 */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_RAW6 40 /* 10 1000 RAW6 */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_RAW7 41 /* 10 1001 RAW7 */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_RAW8 42 /* 10 1010 RAW8 */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_RAW10 43 /* 10 1011 RAW10 */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_RAW12 44 /* 10 1100 RAW12 */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_RAW14 45 /* 10 1101 RAW14 */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_USR_DEF_1 48 /* 11 0000 JPEG [User Defined 8-bit Data Type 1] */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_USR_DEF_2 49 /* 11 0001 User Defined 8-bit Data Type 2 */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_USR_DEF_3 50 /* 11 0010 User Defined 8-bit Data Type 3 */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_USR_DEF_4 51 /* 11 0011 User Defined 8-bit Data Type 4 */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_USR_DEF_5 52 /* 11 0100 User Defined 8-bit Data Type 5 */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_USR_DEF_6 53 /* 11 0101 User Defined 8-bit Data Type 6 */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_USR_DEF_7 54 /* 11 0110 User Defined 8-bit Data Type 7 */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_USR_DEF_8 55 /* 11 0111 User Defined 8-bit Data Type 8 */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_Emb 18 /* 01 0010 embedded eight bit non image data */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_SOF 0 /* 00 0000 frame start */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_EOF 1 /* 00 0001 frame end */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_SOL 2 /* 00 0010 line start */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_EOL 3 /* 00 0011 line end */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_GEN_SH1 8 /* 00 1000 Generic Short Packet Code 1 */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_GEN_SH2 9 /* 00 1001 Generic Short Packet Code 2 */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_GEN_SH3 10 /* 00 1010 Generic Short Packet Code 3 */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_GEN_SH4 11 /* 00 1011 Generic Short Packet Code 4 */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_GEN_SH5 12 /* 00 1100 Generic Short Packet Code 5 */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_GEN_SH6 13 /* 00 1101 Generic Short Packet Code 6 */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_GEN_SH7 14 /* 00 1110 Generic Short Packet Code 7 */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_GEN_SH8 15 /* 00 1111 Generic Short Packet Code 8 */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_YUV420_8_CSPS 28 /* 01 1100 YUV420 8-bit (Chroma Shifted Pixel Sampling) */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_YUV420_10_CSPS 29 /* 01 1101 YUV420 10-bit (Chroma Shifted Pixel Sampling) */
+/* used reserved mipi positions for these */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_RAW16 46
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_RAW18 47
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_RAW18_2 37
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_RAW18_3 38
+
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_WIDTH 6
+
+/* Definition of format_types at the interface CSS --> input_selector*/
+/* !! Changes here should be copied to systems/isp/isp_css/bin/conv_transmitter_cmd.tcl !! */
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_RGB888 0 // 36 'h24
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_RGB555 1 // 33 'h
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_RGB444 2 // 32
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_RGB565 3 // 34
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_RGB666 4 // 35
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_RAW8 5 // 42
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_RAW10 6 // 43
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_RAW6 7 // 40
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_RAW7 8 // 41
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_RAW12 9 // 43
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_RAW14 10 // 45
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_YUV420_8 11 // 30
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_YUV420_10 12 // 25
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_YUV422_8 13 // 30
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_YUV422_10 14 // 31
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_USR_DEF_1 15 // 48
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_YUV420_8L 16 // 26
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_Emb 17 // 18
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_USR_DEF_2 18 // 49
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_USR_DEF_3 19 // 50
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_USR_DEF_4 20 // 51
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_USR_DEF_5 21 // 52
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_USR_DEF_6 22 // 53
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_USR_DEF_7 23 // 54
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_USR_DEF_8 24 // 55
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_YUV420_8_CSPS 25 // 28
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_YUV420_10_CSPS 26 // 29
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_RAW16 27 // ?
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_RAW18 28 // ?
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_RAW18_2 29 // ? Option 2 for depacketiser
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_RAW18_3 30 // ? Option 3 for depacketiser
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_CUSTOM 31 // to signal custom decoding
+
+/* definition for state machine of data FIFO for decode different type of data */
+#define _HRT_CSS_RECEIVER_2400_YUV420_8_REPEAT_PTN 1
+#define _HRT_CSS_RECEIVER_2400_YUV420_10_REPEAT_PTN 5
+#define _HRT_CSS_RECEIVER_2400_YUV420_8L_REPEAT_PTN 1
+#define _HRT_CSS_RECEIVER_2400_YUV422_8_REPEAT_PTN 1
+#define _HRT_CSS_RECEIVER_2400_YUV422_10_REPEAT_PTN 5
+#define _HRT_CSS_RECEIVER_2400_RGB444_REPEAT_PTN 2
+#define _HRT_CSS_RECEIVER_2400_RGB555_REPEAT_PTN 2
+#define _HRT_CSS_RECEIVER_2400_RGB565_REPEAT_PTN 2
+#define _HRT_CSS_RECEIVER_2400_RGB666_REPEAT_PTN 9
+#define _HRT_CSS_RECEIVER_2400_RGB888_REPEAT_PTN 3
+#define _HRT_CSS_RECEIVER_2400_RAW6_REPEAT_PTN 3
+#define _HRT_CSS_RECEIVER_2400_RAW7_REPEAT_PTN 7
+#define _HRT_CSS_RECEIVER_2400_RAW8_REPEAT_PTN 1
+#define _HRT_CSS_RECEIVER_2400_RAW10_REPEAT_PTN 5
+#define _HRT_CSS_RECEIVER_2400_RAW12_REPEAT_PTN 3
+#define _HRT_CSS_RECEIVER_2400_RAW14_REPEAT_PTN 7
+
+#define _HRT_CSS_RECEIVER_2400_MAX_REPEAT_PTN _HRT_CSS_RECEIVER_2400_RGB666_REPEAT_PTN
+
+#define _HRT_CSS_RECEIVER_2400_BE_COMP_FMT_IDX 0
+#define _HRT_CSS_RECEIVER_2400_BE_COMP_FMT_WIDTH 3
+#define _HRT_CSS_RECEIVER_2400_BE_COMP_PRED_IDX 3
+#define _HRT_CSS_RECEIVER_2400_BE_COMP_PRED_WIDTH 1
+#define _HRT_CSS_RECEIVER_2400_BE_COMP_USD_BITS 4 /* bits per USD type */
+
+#define _HRT_CSS_RECEIVER_2400_BE_RAW16_DATAID_IDX 0
+#define _HRT_CSS_RECEIVER_2400_BE_RAW16_EN_IDX 6
+#define _HRT_CSS_RECEIVER_2400_BE_RAW18_DATAID_IDX 0
+#define _HRT_CSS_RECEIVER_2400_BE_RAW18_OPTION_IDX 6
+#define _HRT_CSS_RECEIVER_2400_BE_RAW18_EN_IDX 8
+
+#define _HRT_CSS_RECEIVER_2400_BE_COMP_NO_COMP 0
+#define _HRT_CSS_RECEIVER_2400_BE_COMP_10_6_10 1
+#define _HRT_CSS_RECEIVER_2400_BE_COMP_10_7_10 2
+#define _HRT_CSS_RECEIVER_2400_BE_COMP_10_8_10 3
+#define _HRT_CSS_RECEIVER_2400_BE_COMP_12_6_12 4
+#define _HRT_CSS_RECEIVER_2400_BE_COMP_12_7_12 5
+#define _HRT_CSS_RECEIVER_2400_BE_COMP_12_8_12 6
+
+/* packet bit definition */
+#define _HRT_CSS_RECEIVER_2400_PKT_SOP_IDX 32
+#define _HRT_CSS_RECEIVER_2400_PKT_SOP_BITS 1
+#define _HRT_CSS_RECEIVER_2400_PKT_CH_ID_IDX 22
+#define _HRT_CSS_RECEIVER_2400_PKT_CH_ID_BITS 2
+#define _HRT_CSS_RECEIVER_2400_PKT_FMT_ID_IDX 16
+#define _HRT_CSS_RECEIVER_2400_PKT_FMT_ID_BITS 6
+#define _HRT_CSS_RECEIVER_2400_PH_DATA_FIELD_IDX 0
+#define _HRT_CSS_RECEIVER_2400_PH_DATA_FIELD_BITS 16
+#define _HRT_CSS_RECEIVER_2400_PKT_PAYLOAD_IDX 0
+#define _HRT_CSS_RECEIVER_2400_PKT_PAYLOAD_BITS 32
+
+/*************************************************************************************************/
+/* Custom Decoding */
+/* These Custom Defs are defined based on design-time config in "csi_be_pixel_formatter.chdl" !! */
+/*************************************************************************************************/
+#define BE_CUST_EN_IDX 0 /* 2bits */
+#define BE_CUST_EN_DATAID_IDX 2 /* 6bits MIPI DATA ID */
+#define BE_CUST_EN_WIDTH 8
+#define BE_CUST_MODE_ALL 1 /* Enable Custom Decoding for all DATA IDs */
+#define BE_CUST_MODE_ONE 3 /* Enable Custom Decoding for ONE DATA ID, programmed in CUST_EN_DATA_ID */
+
+/* Data State config = {get_bits(6bits), valid(1bit)} */
+#define BE_CUST_DATA_STATE_S0_IDX 0 /* 7bits */
+#define BE_CUST_DATA_STATE_S1_IDX 7 /* 7bits */
+#define BE_CUST_DATA_STATE_S2_IDX 14 /* 7bits */
+#define BE_CUST_DATA_STATE_WIDTH 21
+#define BE_CUST_DATA_STATE_VALID_IDX 0 /* 1bits */
+#define BE_CUST_DATA_STATE_GETBITS_IDX 1 /* 6bits */
+
+/* Pixel Extractor config */
+#define BE_CUST_PIX_EXT_DATA_ALIGN_IDX 0 /* 5bits */
+#define BE_CUST_PIX_EXT_PIX_ALIGN_IDX 5 /* 5bits */
+#define BE_CUST_PIX_EXT_PIX_MASK_IDX 10 /* 18bits */
+#define BE_CUST_PIX_EXT_PIX_EN_IDX 28 /* 1bits */
+#define BE_CUST_PIX_EXT_WIDTH 29
+
+/* Pixel Valid & EoP config = {[eop,valid](especial), [eop,valid](normal)} */
+#define BE_CUST_PIX_VALID_EOP_P0_IDX 0 /* 4bits */
+#define BE_CUST_PIX_VALID_EOP_P1_IDX 4 /* 4bits */
+#define BE_CUST_PIX_VALID_EOP_P2_IDX 8 /* 4bits */
+#define BE_CUST_PIX_VALID_EOP_P3_IDX 12 /* 4bits */
+#define BE_CUST_PIX_VALID_EOP_WIDTH 16
+#define BE_CUST_PIX_VALID_EOP_NOR_VALID_IDX 0 /* Normal (NO less get_bits case) Valid - 1bits */
+#define BE_CUST_PIX_VALID_EOP_NOR_EOP_IDX 1 /* Normal (NO less get_bits case) EoP - 1bits */
+#define BE_CUST_PIX_VALID_EOP_ESP_VALID_IDX 2 /* Especial (less get_bits case) Valid - 1bits */
+#define BE_CUST_PIX_VALID_EOP_ESP_EOP_IDX 3 /* Especial (less get_bits case) EoP - 1bits */
+
+#endif /* _mipi_backend_common_defs_h_ */
+#endif /* _css_receiver_2400_common_defs_h_ */
diff --git a/drivers/staging/media/atomisp/pci/css_receiver_2400_defs.h b/drivers/staging/media/atomisp/pci/css_receiver_2400_defs.h
new file mode 100644
index 000000000000..6dd6e7290eb8
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/css_receiver_2400_defs.h
@@ -0,0 +1,248 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef _css_receiver_2400_defs_h_
+#define _css_receiver_2400_defs_h_
+
+#include "css_receiver_2400_common_defs.h"
+
+#define CSS_RECEIVER_DATA_WIDTH 8
+#define CSS_RECEIVER_RX_TRIG 4
+#define CSS_RECEIVER_RF_WORD 32
+#define CSS_RECEIVER_IMG_PROC_RF_ADDR 10
+#define CSS_RECEIVER_CSI_RF_ADDR 4
+#define CSS_RECEIVER_DATA_OUT 12
+#define CSS_RECEIVER_CHN_NO 2
+#define CSS_RECEIVER_DWORD_CNT 11
+#define CSS_RECEIVER_FORMAT_TYP 5
+#define CSS_RECEIVER_HRESPONSE 2
+#define CSS_RECEIVER_STATE_WIDTH 3
+#define CSS_RECEIVER_FIFO_DAT 32
+#define CSS_RECEIVER_CNT_VAL 2
+#define CSS_RECEIVER_PRED10_VAL 10
+#define CSS_RECEIVER_PRED12_VAL 12
+#define CSS_RECEIVER_CNT_WIDTH 8
+#define CSS_RECEIVER_WORD_CNT 16
+#define CSS_RECEIVER_PIXEL_LEN 6
+#define CSS_RECEIVER_PIXEL_CNT 5
+#define CSS_RECEIVER_COMP_8_BIT 8
+#define CSS_RECEIVER_COMP_7_BIT 7
+#define CSS_RECEIVER_COMP_6_BIT 6
+
+#define CSI_CONFIG_WIDTH 4
+
+/* division of gen_short data, ch_id and fmt_type over streaming data interface */
+#define _HRT_CSS_RECEIVER_2400_GEN_SHORT_STR_DATA_BIT_LSB 0
+#define _HRT_CSS_RECEIVER_2400_GEN_SHORT_STR_FMT_TYPE_BIT_LSB (_HRT_CSS_RECEIVER_2400_GEN_SHORT_STR_DATA_BIT_LSB + _HRT_CSS_RECEIVER_2400_GEN_SHORT_DATA_WIDTH)
+#define _HRT_CSS_RECEIVER_2400_GEN_SHORT_STR_CH_ID_BIT_LSB (_HRT_CSS_RECEIVER_2400_GEN_SHORT_STR_FMT_TYPE_BIT_LSB + _HRT_CSS_RECEIVER_2400_GEN_SHORT_FMT_TYPE_WIDTH)
+#define _HRT_CSS_RECEIVER_2400_GEN_SHORT_STR_DATA_BIT_MSB (_HRT_CSS_RECEIVER_2400_GEN_SHORT_STR_FMT_TYPE_BIT_LSB - 1)
+#define _HRT_CSS_RECEIVER_2400_GEN_SHORT_STR_FMT_TYPE_BIT_MSB (_HRT_CSS_RECEIVER_2400_GEN_SHORT_STR_CH_ID_BIT_LSB - 1)
+#define _HRT_CSS_RECEIVER_2400_GEN_SHORT_STR_CH_ID_BIT_MSB (_HRT_CSS_RECEIVER_2400_GEN_SHORT_STR_REAL_WIDTH - 1)
+
+#define _HRT_CSS_RECEIVER_2400_REG_ALIGN 4
+#define _HRT_CSS_RECEIVER_2400_BYTES_PER_PKT 4
+
+#define hrt_css_receiver_2400_4_lane_port_offset 0x100
+#define hrt_css_receiver_2400_1_lane_port_offset 0x200
+#define hrt_css_receiver_2400_2_lane_port_offset 0x300
+#define hrt_css_receiver_2400_backend_port_offset 0x100
+
+#define _HRT_CSS_RECEIVER_2400_DEVICE_READY_REG_IDX 0
+#define _HRT_CSS_RECEIVER_2400_IRQ_STATUS_REG_IDX 1
+#define _HRT_CSS_RECEIVER_2400_IRQ_ENABLE_REG_IDX 2
+#define _HRT_CSS_RECEIVER_2400_CSI2_FUNC_PROG_REG_IDX 3
+#define _HRT_CSS_RECEIVER_2400_INIT_COUNT_REG_IDX 4
+#define _HRT_CSS_RECEIVER_2400_FS_TO_LS_DELAY_REG_IDX 7
+#define _HRT_CSS_RECEIVER_2400_LS_TO_DATA_DELAY_REG_IDX 8
+#define _HRT_CSS_RECEIVER_2400_DATA_TO_LE_DELAY_REG_IDX 9
+#define _HRT_CSS_RECEIVER_2400_LE_TO_FE_DELAY_REG_IDX 10
+#define _HRT_CSS_RECEIVER_2400_FE_TO_FS_DELAY_REG_IDX 11
+#define _HRT_CSS_RECEIVER_2400_LE_TO_LS_DELAY_REG_IDX 12
+#define _HRT_CSS_RECEIVER_2400_TWO_PIXEL_EN_REG_IDX 13
+#define _HRT_CSS_RECEIVER_2400_RAW16_18_DATAID_REG_IDX 14
+#define _HRT_CSS_RECEIVER_2400_SYNC_COUNT_REG_IDX 15
+#define _HRT_CSS_RECEIVER_2400_RX_COUNT_REG_IDX 16
+#define _HRT_CSS_RECEIVER_2400_BACKEND_RST_REG_IDX 17
+#define _HRT_CSS_RECEIVER_2400_COMP_SCHEME_VC0_REG0_IDX 18
+#define _HRT_CSS_RECEIVER_2400_COMP_SCHEME_VC0_REG1_IDX 19
+#define _HRT_CSS_RECEIVER_2400_COMP_SCHEME_VC1_REG0_IDX 20
+#define _HRT_CSS_RECEIVER_2400_COMP_SCHEME_VC1_REG1_IDX 21
+#define _HRT_CSS_RECEIVER_2400_COMP_SCHEME_VC2_REG0_IDX 22
+#define _HRT_CSS_RECEIVER_2400_COMP_SCHEME_VC2_REG1_IDX 23
+#define _HRT_CSS_RECEIVER_2400_COMP_SCHEME_VC3_REG0_IDX 24
+#define _HRT_CSS_RECEIVER_2400_COMP_SCHEME_VC3_REG1_IDX 25
+#define _HRT_CSS_RECEIVER_2400_RAW18_REG_IDX 26
+#define _HRT_CSS_RECEIVER_2400_FORCE_RAW8_REG_IDX 27
+#define _HRT_CSS_RECEIVER_2400_RAW16_REG_IDX 28
+
+/* Interrupt bits for IRQ_STATUS and IRQ_ENABLE registers */
+#define _HRT_CSS_RECEIVER_2400_IRQ_OVERRUN_BIT 0
+#define _HRT_CSS_RECEIVER_2400_IRQ_RESERVED_BIT 1
+#define _HRT_CSS_RECEIVER_2400_IRQ_SLEEP_MODE_ENTRY_BIT 2
+#define _HRT_CSS_RECEIVER_2400_IRQ_SLEEP_MODE_EXIT_BIT 3
+#define _HRT_CSS_RECEIVER_2400_IRQ_ERR_SOT_HS_BIT 4
+#define _HRT_CSS_RECEIVER_2400_IRQ_ERR_SOT_SYNC_HS_BIT 5
+#define _HRT_CSS_RECEIVER_2400_IRQ_ERR_CONTROL_BIT 6
+#define _HRT_CSS_RECEIVER_2400_IRQ_ERR_ECC_DOUBLE_BIT 7
+#define _HRT_CSS_RECEIVER_2400_IRQ_ERR_ECC_CORRECTED_BIT 8
+#define _HRT_CSS_RECEIVER_2400_IRQ_ERR_ECC_NO_CORRECTION_BIT 9
+#define _HRT_CSS_RECEIVER_2400_IRQ_ERR_CRC_BIT 10
+#define _HRT_CSS_RECEIVER_2400_IRQ_ERR_ID_BIT 11
+#define _HRT_CSS_RECEIVER_2400_IRQ_ERR_FRAME_SYNC_BIT 12
+#define _HRT_CSS_RECEIVER_2400_IRQ_ERR_FRAME_DATA_BIT 13
+#define _HRT_CSS_RECEIVER_2400_IRQ_DATA_TIMEOUT_BIT 14
+#define _HRT_CSS_RECEIVER_2400_IRQ_ERR_ESCAPE_BIT 15
+#define _HRT_CSS_RECEIVER_2400_IRQ_ERR_LINE_SYNC_BIT 16
+
+#define _HRT_CSS_RECEIVER_2400_IRQ_OVERRUN_CAUSE_ "Fifo Overrun"
+#define _HRT_CSS_RECEIVER_2400_IRQ_RESERVED_CAUSE_ "Reserved"
+#define _HRT_CSS_RECEIVER_2400_IRQ_SLEEP_MODE_ENTRY_CAUSE_ "Sleep mode entry"
+#define _HRT_CSS_RECEIVER_2400_IRQ_SLEEP_MODE_EXIT_CAUSE_ "Sleep mode exit"
+#define _HRT_CSS_RECEIVER_2400_IRQ_ERR_SOT_HS_CAUSE_ "Error high speed SOT"
+#define _HRT_CSS_RECEIVER_2400_IRQ_ERR_SOT_SYNC_HS_CAUSE_ "Error high speed sync SOT"
+#define _HRT_CSS_RECEIVER_2400_IRQ_ERR_CONTROL_CAUSE_ "Error control"
+#define _HRT_CSS_RECEIVER_2400_IRQ_ERR_ECC_DOUBLE_CAUSE_ "Error correction double bit"
+#define _HRT_CSS_RECEIVER_2400_IRQ_ERR_ECC_CORRECTED_CAUSE_ "Error correction single bit"
+#define _HRT_CSS_RECEIVER_2400_IRQ_ERR_ECC_NO_CORRECTION_CAUSE_ "No error"
+#define _HRT_CSS_RECEIVER_2400_IRQ_ERR_CRC_CAUSE_ "Error cyclic redundancy check"
+#define _HRT_CSS_RECEIVER_2400_IRQ_ERR_ID_CAUSE_ "Error id"
+#define _HRT_CSS_RECEIVER_2400_IRQ_ERR_FRAME_SYNC_CAUSE_ "Error frame sync"
+#define _HRT_CSS_RECEIVER_2400_IRQ_ERR_FRAME_DATA_CAUSE_ "Error frame data"
+#define _HRT_CSS_RECEIVER_2400_IRQ_DATA_TIMEOUT_CAUSE_ "Data time-out"
+#define _HRT_CSS_RECEIVER_2400_IRQ_ERR_ESCAPE_CAUSE_ "Error escape"
+#define _HRT_CSS_RECEIVER_2400_IRQ_ERR_LINE_SYNC_CAUSE_ "Error line sync"
+
+/* Bits for CSI2_DEVICE_READY register */
+#define _HRT_CSS_RECEIVER_2400_CSI2_DEVICE_READY_IDX 0
+#define _HRT_CSS_RECEIVER_2400_CSI2_MASK_INIT_TIME_OUT_ERR_IDX 2
+#define _HRT_CSS_RECEIVER_2400_CSI2_MASK_OVER_RUN_ERR_IDX 3
+#define _HRT_CSS_RECEIVER_2400_CSI2_MASK_SOT_SYNC_ERR_IDX 4
+#define _HRT_CSS_RECEIVER_2400_CSI2_MASK_RECEIVE_DATA_TIME_OUT_ERR_IDX 5
+#define _HRT_CSS_RECEIVER_2400_CSI2_MASK_ECC_TWO_BIT_ERR_IDX 6
+#define _HRT_CSS_RECEIVER_2400_CSI2_MASK_DATA_ID_ERR_IDX 7
+
+/* Bits for CSI2_FUNC_PROG register */
+#define _HRT_CSS_RECEIVER_2400_CSI2_DATA_TIMEOUT_IDX 0
+#define _HRT_CSS_RECEIVER_2400_CSI2_DATA_TIMEOUT_BITS 19
+
+/* Bits for INIT_COUNT register */
+#define _HRT_CSS_RECEIVER_2400_INIT_TIMER_IDX 0
+#define _HRT_CSS_RECEIVER_2400_INIT_TIMER_BITS 16
+
+/* Bits for COUNT registers */
+#define _HRT_CSS_RECEIVER_2400_SYNC_COUNT_IDX 0
+#define _HRT_CSS_RECEIVER_2400_SYNC_COUNT_BITS 8
+#define _HRT_CSS_RECEIVER_2400_RX_COUNT_IDX 0
+#define _HRT_CSS_RECEIVER_2400_RX_COUNT_BITS 8
+
+/* Bits for RAW116_18_DATAID register */
+#define _HRT_CSS_RECEIVER_2400_RAW16_18_DATAID_RAW16_BITS_IDX 0
+#define _HRT_CSS_RECEIVER_2400_RAW16_18_DATAID_RAW16_BITS_BITS 6
+#define _HRT_CSS_RECEIVER_2400_RAW16_18_DATAID_RAW18_BITS_IDX 8
+#define _HRT_CSS_RECEIVER_2400_RAW16_18_DATAID_RAW18_BITS_BITS 6
+
+/* Bits for COMP_FORMAT register, this selects the compression data format */
+#define _HRT_CSS_RECEIVER_2400_COMP_RAW_BITS_IDX 0
+#define _HRT_CSS_RECEIVER_2400_COMP_RAW_BITS_BITS 8
+#define _HRT_CSS_RECEIVER_2400_COMP_NUM_BITS_IDX (_HRT_CSS_RECEIVER_2400_COMP_RAW_BITS_IDX + _HRT_CSS_RECEIVER_2400_COMP_RAW_BITS_BITS)
+#define _HRT_CSS_RECEIVER_2400_COMP_NUM_BITS_BITS 8
+
+/* Bits for COMP_PREDICT register, this selects the predictor algorithm */
+#define _HRT_CSS_RECEIVER_2400_PREDICT_NO_COMP 0
+#define _HRT_CSS_RECEIVER_2400_PREDICT_1 1
+#define _HRT_CSS_RECEIVER_2400_PREDICT_2 2
+
+/* Number of bits used for the delay registers */
+#define _HRT_CSS_RECEIVER_2400_DELAY_BITS 8
+
+/* Bits for COMP_SCHEME register, this selects the compression scheme for a VC */
+#define _HRT_CSS_RECEIVER_2400_COMP_SCHEME_USD1_BITS_IDX 0
+#define _HRT_CSS_RECEIVER_2400_COMP_SCHEME_USD2_BITS_IDX 5
+#define _HRT_CSS_RECEIVER_2400_COMP_SCHEME_USD3_BITS_IDX 10
+#define _HRT_CSS_RECEIVER_2400_COMP_SCHEME_USD4_BITS_IDX 15
+#define _HRT_CSS_RECEIVER_2400_COMP_SCHEME_USD5_BITS_IDX 20
+#define _HRT_CSS_RECEIVER_2400_COMP_SCHEME_USD6_BITS_IDX 25
+#define _HRT_CSS_RECEIVER_2400_COMP_SCHEME_USD7_BITS_IDX 0
+#define _HRT_CSS_RECEIVER_2400_COMP_SCHEME_USD8_BITS_IDX 5
+#define _HRT_CSS_RECEIVER_2400_COMP_SCHEME_USD_BITS_BITS 5
+#define _HRT_CSS_RECEIVER_2400_COMP_SCHEME_USD_FMT_BITS_IDX 0
+#define _HRT_CSS_RECEIVER_2400_COMP_SCHEME_USD_FMT_BITS_BITS 3
+#define _HRT_CSS_RECEIVER_2400_COMP_SCHEME_USD_PRED_BITS_IDX 3
+#define _HRT_CSS_RECEIVER_2400_COMP_SCHEME_USD_PRED_BITS_BITS 2
+
+/* BITS for backend RAW16 and RAW 18 registers */
+
+#define _HRT_CSS_RECEIVER_2400_RAW18_DATAID_IDX 0
+#define _HRT_CSS_RECEIVER_2400_RAW18_DATAID_BITS 6
+#define _HRT_CSS_RECEIVER_2400_RAW18_OPTION_IDX 6
+#define _HRT_CSS_RECEIVER_2400_RAW18_OPTION_BITS 2
+#define _HRT_CSS_RECEIVER_2400_RAW18_EN_IDX 8
+#define _HRT_CSS_RECEIVER_2400_RAW18_EN_BITS 1
+
+#define _HRT_CSS_RECEIVER_2400_RAW16_DATAID_IDX 0
+#define _HRT_CSS_RECEIVER_2400_RAW16_DATAID_BITS 6
+#define _HRT_CSS_RECEIVER_2400_RAW16_OPTION_IDX 6
+#define _HRT_CSS_RECEIVER_2400_RAW16_OPTION_BITS 2
+#define _HRT_CSS_RECEIVER_2400_RAW16_EN_IDX 8
+#define _HRT_CSS_RECEIVER_2400_RAW16_EN_BITS 1
+
+/* These hsync and vsync values are for HSS simulation only */
+#define _HRT_CSS_RECEIVER_2400_HSYNC_VAL BIT(16)
+#define _HRT_CSS_RECEIVER_2400_VSYNC_VAL BIT(17)
+
+#define _HRT_CSS_RECEIVER_2400_BE_STREAMING_WIDTH 28
+#define _HRT_CSS_RECEIVER_2400_BE_STREAMING_PIX_A_LSB 0
+#define _HRT_CSS_RECEIVER_2400_BE_STREAMING_PIX_A_MSB (_HRT_CSS_RECEIVER_2400_BE_STREAMING_PIX_A_LSB + CSS_RECEIVER_DATA_OUT - 1)
+#define _HRT_CSS_RECEIVER_2400_BE_STREAMING_PIX_A_VAL_BIT (_HRT_CSS_RECEIVER_2400_BE_STREAMING_PIX_A_MSB + 1)
+#define _HRT_CSS_RECEIVER_2400_BE_STREAMING_PIX_B_LSB (_HRT_CSS_RECEIVER_2400_BE_STREAMING_PIX_A_VAL_BIT + 1)
+#define _HRT_CSS_RECEIVER_2400_BE_STREAMING_PIX_B_MSB (_HRT_CSS_RECEIVER_2400_BE_STREAMING_PIX_B_LSB + CSS_RECEIVER_DATA_OUT - 1)
+#define _HRT_CSS_RECEIVER_2400_BE_STREAMING_PIX_B_VAL_BIT (_HRT_CSS_RECEIVER_2400_BE_STREAMING_PIX_B_MSB + 1)
+#define _HRT_CSS_RECEIVER_2400_BE_STREAMING_SOP_BIT (_HRT_CSS_RECEIVER_2400_BE_STREAMING_PIX_B_VAL_BIT + 1)
+#define _HRT_CSS_RECEIVER_2400_BE_STREAMING_EOP_BIT (_HRT_CSS_RECEIVER_2400_BE_STREAMING_SOP_BIT + 1)
+
+// SH Backend Register IDs
+#define _HRT_CSS_RECEIVER_2400_BE_GSP_ACC_OVL_REG_IDX 0
+#define _HRT_CSS_RECEIVER_2400_BE_SRST_REG_IDX 1
+#define _HRT_CSS_RECEIVER_2400_BE_TWO_PPC_REG_IDX 2
+#define _HRT_CSS_RECEIVER_2400_BE_COMP_FORMAT_REG0_IDX 3
+#define _HRT_CSS_RECEIVER_2400_BE_COMP_FORMAT_REG1_IDX 4
+#define _HRT_CSS_RECEIVER_2400_BE_COMP_FORMAT_REG2_IDX 5
+#define _HRT_CSS_RECEIVER_2400_BE_COMP_FORMAT_REG3_IDX 6
+#define _HRT_CSS_RECEIVER_2400_BE_SEL_REG_IDX 7
+#define _HRT_CSS_RECEIVER_2400_BE_RAW16_CONFIG_REG_IDX 8
+#define _HRT_CSS_RECEIVER_2400_BE_RAW18_CONFIG_REG_IDX 9
+#define _HRT_CSS_RECEIVER_2400_BE_FORCE_RAW8_REG_IDX 10
+#define _HRT_CSS_RECEIVER_2400_BE_IRQ_STATUS_REG_IDX 11
+#define _HRT_CSS_RECEIVER_2400_BE_IRQ_CLEAR_REG_IDX 12
+#define _HRT_CSS_RECEIVER_2400_BE_CUST_EN_REG_IDX 13
+#define _HRT_CSS_RECEIVER_2400_BE_CUST_DATA_STATE_REG_IDX 14 /* Data State 0,1,2 config */
+#define _HRT_CSS_RECEIVER_2400_BE_CUST_PIX_EXT_S0P0_REG_IDX 15 /* Pixel Extractor config for Data State 0 & Pix 0 */
+#define _HRT_CSS_RECEIVER_2400_BE_CUST_PIX_EXT_S0P1_REG_IDX 16 /* Pixel Extractor config for Data State 0 & Pix 1 */
+#define _HRT_CSS_RECEIVER_2400_BE_CUST_PIX_EXT_S0P2_REG_IDX 17 /* Pixel Extractor config for Data State 0 & Pix 2 */
+#define _HRT_CSS_RECEIVER_2400_BE_CUST_PIX_EXT_S0P3_REG_IDX 18 /* Pixel Extractor config for Data State 0 & Pix 3 */
+#define _HRT_CSS_RECEIVER_2400_BE_CUST_PIX_EXT_S1P0_REG_IDX 19 /* Pixel Extractor config for Data State 1 & Pix 0 */
+#define _HRT_CSS_RECEIVER_2400_BE_CUST_PIX_EXT_S1P1_REG_IDX 20 /* Pixel Extractor config for Data State 1 & Pix 1 */
+#define _HRT_CSS_RECEIVER_2400_BE_CUST_PIX_EXT_S1P2_REG_IDX 21 /* Pixel Extractor config for Data State 1 & Pix 2 */
+#define _HRT_CSS_RECEIVER_2400_BE_CUST_PIX_EXT_S1P3_REG_IDX 22 /* Pixel Extractor config for Data State 1 & Pix 3 */
+#define _HRT_CSS_RECEIVER_2400_BE_CUST_PIX_EXT_S2P0_REG_IDX 23 /* Pixel Extractor config for Data State 2 & Pix 0 */
+#define _HRT_CSS_RECEIVER_2400_BE_CUST_PIX_EXT_S2P1_REG_IDX 24 /* Pixel Extractor config for Data State 2 & Pix 1 */
+#define _HRT_CSS_RECEIVER_2400_BE_CUST_PIX_EXT_S2P2_REG_IDX 25 /* Pixel Extractor config for Data State 2 & Pix 2 */
+#define _HRT_CSS_RECEIVER_2400_BE_CUST_PIX_EXT_S2P3_REG_IDX 26 /* Pixel Extractor config for Data State 2 & Pix 3 */
+#define _HRT_CSS_RECEIVER_2400_BE_CUST_PIX_VALID_EOP_REG_IDX 27 /* Pixel Valid & EoP config for Pix 0,1,2,3 */
+
+#define _HRT_CSS_RECEIVER_2400_BE_NOF_REGISTERS 28
+
+#define _HRT_CSS_RECEIVER_2400_BE_SRST_HE 0
+#define _HRT_CSS_RECEIVER_2400_BE_SRST_RCF 1
+#define _HRT_CSS_RECEIVER_2400_BE_SRST_PF 2
+#define _HRT_CSS_RECEIVER_2400_BE_SRST_SM 3
+#define _HRT_CSS_RECEIVER_2400_BE_SRST_PD 4
+#define _HRT_CSS_RECEIVER_2400_BE_SRST_SD 5
+#define _HRT_CSS_RECEIVER_2400_BE_SRST_OT 6
+#define _HRT_CSS_RECEIVER_2400_BE_SRST_BC 7
+#define _HRT_CSS_RECEIVER_2400_BE_SRST_WIDTH 8
+
+#endif /* _css_receiver_2400_defs_h_ */
diff --git a/drivers/staging/media/atomisp/pci/css_trace.h b/drivers/staging/media/atomisp/pci/css_trace.h
new file mode 100644
index 000000000000..5b878a6735a1
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/css_trace.h
@@ -0,0 +1,269 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __CSS_TRACE_H_
+#define __CSS_TRACE_H_
+
+#include <type_support.h>
+#include "sh_css_internal.h" /* for SH_CSS_MAX_SP_THREADS */
+
+/*
+ structs and constants for tracing
+*/
+
+/* one tracer item: major, minor and counter. The counter value can be used for GP data */
+struct trace_item_t {
+ u8 major;
+ u8 minor;
+ u16 counter;
+};
+
+#define MAX_SCRATCH_DATA 4
+#define MAX_CMD_DATA 2
+
+/* trace header: holds the version and the topology of the tracer. */
+struct trace_header_t {
+ /* 1st dword: descriptor */
+ u8 version;
+ u8 max_threads;
+ u16 max_tracer_points;
+ /* 2nd field: command + data */
+ /* 2nd dword */
+ u32 command;
+ /* 3rd & 4th dword */
+ u32 data[MAX_CMD_DATA];
+ /* 3rd field: debug pointer */
+ /* 5th & 6th dword: debug pointer mechanism */
+ u32 debug_ptr_signature;
+ u32 debug_ptr_value;
+ /* Rest of the header: status & scratch data */
+ u8 thr_status_byte[SH_CSS_MAX_SP_THREADS];
+ u16 thr_status_word[SH_CSS_MAX_SP_THREADS];
+ u32 thr_status_dword[SH_CSS_MAX_SP_THREADS];
+ u32 scratch_debug[MAX_SCRATCH_DATA];
+};
+
+/* offsets for master_port read/write */
+#define HDR_HDR_OFFSET 0 /* offset of the header */
+#define HDR_COMMAND_OFFSET offsetof(struct trace_header_t, command)
+#define HDR_DATA_OFFSET offsetof(struct trace_header_t, data)
+#define HDR_DEBUG_SIGNATURE_OFFSET offsetof(struct trace_header_t, debug_ptr_signature)
+#define HDR_DEBUG_POINTER_OFFSET offsetof(struct trace_header_t, debug_ptr_value)
+#define HDR_STATUS_OFFSET offsetof(struct trace_header_t, thr_status_byte)
+#define HDR_STATUS_OFFSET_BYTE offsetof(struct trace_header_t, thr_status_byte)
+#define HDR_STATUS_OFFSET_WORD offsetof(struct trace_header_t, thr_status_word)
+#define HDR_STATUS_OFFSET_DWORD offsetof(struct trace_header_t, thr_status_dword)
+#define HDR_STATUS_OFFSET_SCRATCH offsetof(struct trace_header_t, scratch_debug)
+
+/*
+Trace version history:
+ 1: initial version, hdr = descr, command & ptr.
+ 2: added ISP + 24-bit fields.
+ 3: added thread ID.
+ 4: added status in header.
+*/
+#define TRACER_VER 4
+
+#define TRACE_BUFF_ADDR 0xA000
+#define TRACE_BUFF_SIZE 0x1000 /* 4K allocated */
+
+#define TRACE_ENABLE_SP0 0
+#define TRACE_ENABLE_SP1 0
+#define TRACE_ENABLE_ISP 0
+
+enum TRACE_CORE_ID {
+ TRACE_SP0_ID,
+ TRACE_SP1_ID,
+ TRACE_ISP_ID
+};
+
+/* TODO: add timing format? */
+enum TRACE_DUMP_FORMAT {
+ TRACE_DUMP_FORMAT_POINT_NO_TID,
+ TRACE_DUMP_FORMAT_VALUE24,
+ TRACE_DUMP_FORMAT_VALUE24_TIMING,
+ TRACE_DUMP_FORMAT_VALUE24_TIMING_DELTA,
+ TRACE_DUMP_FORMAT_POINT
+};
+
+/* currently divided as follows:*/
+#if (TRACE_ENABLE_SP0 + TRACE_ENABLE_SP1 + TRACE_ENABLE_ISP == 3)
+/* can be divided as needed */
+#define TRACE_SP0_SIZE (TRACE_BUFF_SIZE / 4)
+#define TRACE_SP1_SIZE (TRACE_BUFF_SIZE / 4)
+#define TRACE_ISP_SIZE (TRACE_BUFF_SIZE / 2)
+#elif (TRACE_ENABLE_SP0 + TRACE_ENABLE_SP1 + TRACE_ENABLE_ISP == 2)
+#if TRACE_ENABLE_SP0
+#define TRACE_SP0_SIZE (TRACE_BUFF_SIZE / 2)
+#else
+#define TRACE_SP0_SIZE (0)
+#endif
+#if TRACE_ENABLE_SP1
+#define TRACE_SP1_SIZE (TRACE_BUFF_SIZE / 2)
+#else
+#define TRACE_SP1_SIZE (0)
+#endif
+#if TRACE_ENABLE_ISP
+#define TRACE_ISP_SIZE (TRACE_BUFF_SIZE / 2)
+#else
+#define TRACE_ISP_SIZE (0)
+#endif
+#elif (TRACE_ENABLE_SP0 + TRACE_ENABLE_SP1 + TRACE_ENABLE_ISP == 1)
+#if TRACE_ENABLE_SP0
+#define TRACE_SP0_SIZE (TRACE_BUFF_SIZE)
+#else
+#define TRACE_SP0_SIZE (0)
+#endif
+#if TRACE_ENABLE_SP1
+#define TRACE_SP1_SIZE (TRACE_BUFF_SIZE)
+#else
+#define TRACE_SP1_SIZE (0)
+#endif
+#if TRACE_ENABLE_ISP
+#define TRACE_ISP_SIZE (TRACE_BUFF_SIZE)
+#else
+#define TRACE_ISP_SIZE (0)
+#endif
+#else
+#define TRACE_SP0_SIZE (0)
+#define TRACE_SP1_SIZE (0)
+#define TRACE_ISP_SIZE (0)
+#endif
+
+#define TRACE_SP0_ADDR (TRACE_BUFF_ADDR)
+#define TRACE_SP1_ADDR (TRACE_SP0_ADDR + TRACE_SP0_SIZE)
+#define TRACE_ISP_ADDR (TRACE_SP1_ADDR + TRACE_SP1_SIZE)
+
+/* check if it's a legal division */
+#if (TRACE_BUFF_SIZE < TRACE_SP0_SIZE + TRACE_SP1_SIZE + TRACE_ISP_SIZE)
+#error trace sizes are not divided correctly and are above limit
+#endif
+
+#define TRACE_SP0_HEADER_ADDR (TRACE_SP0_ADDR)
+#define TRACE_SP0_HEADER_SIZE (sizeof(struct trace_header_t))
+#define TRACE_SP0_ITEM_SIZE (sizeof(struct trace_item_t))
+#define TRACE_SP0_DATA_ADDR (TRACE_SP0_HEADER_ADDR + TRACE_SP0_HEADER_SIZE)
+#define TRACE_SP0_DATA_SIZE (TRACE_SP0_SIZE - TRACE_SP0_HEADER_SIZE)
+#define TRACE_SP0_MAX_POINTS (TRACE_SP0_DATA_SIZE / TRACE_SP0_ITEM_SIZE)
+
+#define TRACE_SP1_HEADER_ADDR (TRACE_SP1_ADDR)
+#define TRACE_SP1_HEADER_SIZE (sizeof(struct trace_header_t))
+#define TRACE_SP1_ITEM_SIZE (sizeof(struct trace_item_t))
+#define TRACE_SP1_DATA_ADDR (TRACE_SP1_HEADER_ADDR + TRACE_SP1_HEADER_SIZE)
+#define TRACE_SP1_DATA_SIZE (TRACE_SP1_SIZE - TRACE_SP1_HEADER_SIZE)
+#define TRACE_SP1_MAX_POINTS (TRACE_SP1_DATA_SIZE / TRACE_SP1_ITEM_SIZE)
+
+#define TRACE_ISP_HEADER_ADDR (TRACE_ISP_ADDR)
+#define TRACE_ISP_HEADER_SIZE (sizeof(struct trace_header_t))
+#define TRACE_ISP_ITEM_SIZE (sizeof(struct trace_item_t))
+#define TRACE_ISP_DATA_ADDR (TRACE_ISP_HEADER_ADDR + TRACE_ISP_HEADER_SIZE)
+#define TRACE_ISP_DATA_SIZE (TRACE_ISP_SIZE - TRACE_ISP_HEADER_SIZE)
+#define TRACE_ISP_MAX_POINTS (TRACE_ISP_DATA_SIZE / TRACE_ISP_ITEM_SIZE)
+
+/* common majors */
+/* SP0 */
+#define MAJOR_MAIN 1
+#define MAJOR_ISP_STAGE_ENTRY 2
+#define MAJOR_DMA_PRXY 3
+#define MAJOR_START_ISP 4
+/* SP1 */
+#define MAJOR_OBSERVER_ISP0_EVENT 21
+#define MAJOR_OBSERVER_OUTPUT_FORM_EVENT 22
+#define MAJOR_OBSERVER_OUTPUT_SCAL_EVENT 23
+#define MAJOR_OBSERVER_IF_ACK 24
+#define MAJOR_OBSERVER_SP0_EVENT 25
+#define MAJOR_OBSERVER_SP_TERMINATE_EVENT 26
+#define MAJOR_OBSERVER_DMA_ACK 27
+#define MAJOR_OBSERVER_ACC_ACK 28
+
+#define DEBUG_PTR_SIGNATURE 0xABCD /* signature for the debug parameter pointer */
+
+/* command codes (1st byte) */
+typedef enum {
+ CMD_SET_ONE_MAJOR = 1, /* mask in one major. 2nd byte in the command is the major code */
+ CMD_UNSET_ONE_MAJOR = 2, /* mask out one major. 2nd byte in the command is the major code */
+ CMD_SET_ALL_MAJORS = 3, /* set the major print mask. the full mask is in the data DWORD */
+ CMD_SET_VERBOSITY = 4 /* set verbosity level */
+} DBG_commands;
+
+/* command signature */
+#define CMD_SIGNATURE 0xAABBCC00
+
+/* shared macros in traces infrastructure */
+/* increment the pointer cyclicly */
+#define DBG_NEXT_ITEM(x, max_items) (((x + 1) >= max_items) ? 0 : x + 1)
+#define DBG_PREV_ITEM(x, max_items) ((x) ? x - 1 : max_items - 1)
+
+#define FIELD_MASK(width) (((1 << (width)) - 1))
+#define FIELD_PACK(value, mask, offset) (((value) & (mask)) << (offset))
+#define FIELD_UNPACK(value, mask, offset) (((value) >> (offset)) & (mask))
+
+#define FIELD_VALUE_OFFSET (0)
+#define FIELD_VALUE_WIDTH (16)
+#define FIELD_VALUE_MASK FIELD_MASK(FIELD_VALUE_WIDTH)
+#define FIELD_VALUE_PACK(f) FIELD_PACK(f, FIELD_VALUE_MASK, FIELD_VALUE_OFFSET)
+#define FIELD_VALUE_UNPACK(f) FIELD_UNPACK(f, FIELD_VALUE_MASK, FIELD_VALUE_OFFSET)
+
+#define FIELD_MINOR_OFFSET (FIELD_VALUE_OFFSET + FIELD_VALUE_WIDTH)
+#define FIELD_MINOR_WIDTH (8)
+#define FIELD_MINOR_MASK FIELD_MASK(FIELD_MINOR_WIDTH)
+#define FIELD_MINOR_PACK(f) FIELD_PACK(f, FIELD_MINOR_MASK, FIELD_MINOR_OFFSET)
+#define FIELD_MINOR_UNPACK(f) FIELD_UNPACK(f, FIELD_MINOR_MASK, FIELD_MINOR_OFFSET)
+
+#define FIELD_MAJOR_OFFSET (FIELD_MINOR_OFFSET + FIELD_MINOR_WIDTH)
+#define FIELD_MAJOR_WIDTH (5)
+#define FIELD_MAJOR_MASK FIELD_MASK(FIELD_MAJOR_WIDTH)
+#define FIELD_MAJOR_PACK(f) FIELD_PACK(f, FIELD_MAJOR_MASK, FIELD_MAJOR_OFFSET)
+#define FIELD_MAJOR_UNPACK(f) FIELD_UNPACK(f, FIELD_MAJOR_MASK, FIELD_MAJOR_OFFSET)
+
+/* for quick traces - only insertion, compatible with the regular point */
+#define FIELD_FULL_MAJOR_WIDTH (8)
+#define FIELD_FULL_MAJOR_MASK FIELD_MASK(FIELD_FULL_MAJOR_WIDTH)
+#define FIELD_FULL_MAJOR_PACK(f) FIELD_PACK(f, FIELD_FULL_MAJOR_MASK, FIELD_MAJOR_OFFSET)
+
+/* The following 2 fields are used only when FIELD_TID value is 111b.
+ * it means we don't want to use thread id, but format. In this case,
+ * the last 2 MSB bits of the major field will indicates the format
+ */
+#define FIELD_MAJOR_W_FMT_OFFSET FIELD_MAJOR_OFFSET
+#define FIELD_MAJOR_W_FMT_WIDTH (3)
+#define FIELD_MAJOR_W_FMT_MASK FIELD_MASK(FIELD_MAJOR_W_FMT_WIDTH)
+#define FIELD_MAJOR_W_FMT_PACK(f) FIELD_PACK(f, FIELD_MAJOR_W_FMT_MASK, FIELD_MAJOR_W_FMT_OFFSET)
+#define FIELD_MAJOR_W_FMT_UNPACK(f) FIELD_UNPACK(f, FIELD_MAJOR_W_FMT_MASK, FIELD_MAJOR_W_FMT_OFFSET)
+
+#define FIELD_FORMAT_OFFSET (FIELD_MAJOR_OFFSET + FIELD_MAJOR_W_FMT_WIDTH)
+#define FIELD_FORMAT_WIDTH (2)
+#define FIELD_FORMAT_MASK FIELD_MASK(FIELD_MAJOR_W_FMT_WIDTH)
+#define FIELD_FORMAT_PACK(f) FIELD_PACK(f, FIELD_FORMAT_MASK, FIELD_FORMAT_OFFSET)
+#define FIELD_FORMAT_UNPACK(f) FIELD_UNPACK(f, FIELD_FORMAT_MASK, FIELD_FORMAT_OFFSET)
+
+#define FIELD_TID_SEL_FORMAT_PAT (7)
+
+#define FIELD_TID_OFFSET (FIELD_MAJOR_OFFSET + FIELD_MAJOR_WIDTH)
+#define FIELD_TID_WIDTH (3)
+#define FIELD_TID_MASK FIELD_MASK(FIELD_TID_WIDTH)
+#define FIELD_TID_PACK(f) FIELD_PACK(f, FIELD_TID_MASK, FIELD_TID_OFFSET)
+#define FIELD_TID_UNPACK(f) FIELD_UNPACK(f, FIELD_TID_MASK, FIELD_TID_OFFSET)
+
+#define FIELD_VALUE_24_OFFSET (0)
+#define FIELD_VALUE_24_WIDTH (24)
+#define FIELD_VALUE_24_MASK FIELD_MASK(FIELD_VALUE_24_WIDTH)
+#define FIELD_VALUE_24_PACK(f) FIELD_PACK(f, FIELD_VALUE_24_MASK, FIELD_VALUE_24_OFFSET)
+#define FIELD_VALUE_24_UNPACK(f) FIELD_UNPACK(f, FIELD_VALUE_24_MASK, FIELD_VALUE_24_OFFSET)
+
+#define PACK_TRACEPOINT(tid, major, minor, value) \
+ (FIELD_TID_PACK(tid) | FIELD_MAJOR_PACK(major) | FIELD_MINOR_PACK(minor) | FIELD_VALUE_PACK(value))
+
+#define PACK_QUICK_TRACEPOINT(major, minor) \
+ (FIELD_FULL_MAJOR_PACK(major) | FIELD_MINOR_PACK(minor))
+
+#define PACK_FORMATTED_TRACEPOINT(format, major, minor, value) \
+ (FIELD_TID_PACK(FIELD_TID_SEL_FORMAT_PAT) | FIELD_FORMAT_PACK(format) | FIELD_MAJOR_PACK(major) | FIELD_MINOR_PACK(minor) | FIELD_VALUE_PACK(value))
+
+#define PACK_TRACE_VALUE24(major, value) \
+ (FIELD_TID_PACK(FIELD_TID_SEL_FORMAT_PAT) | FIELD_MAJOR_PACK(major) | FIELD_VALUE_24_PACK(value))
+
+#endif /* __CSS_TRACE_H_ */
diff --git a/drivers/staging/media/atomisp/pci/dma_v2_defs.h b/drivers/staging/media/atomisp/pci/dma_v2_defs.h
new file mode 100644
index 000000000000..79cbdc7242d5
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/dma_v2_defs.h
@@ -0,0 +1,191 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef _dma_v2_defs_h
+#define _dma_v2_defs_h
+
+#define _DMA_V2_NUM_CHANNELS_ID MaxNumChannels
+#define _DMA_V2_CONNECTIONS_ID Connections
+#define _DMA_V2_DEV_ELEM_WIDTHS_ID DevElemWidths
+#define _DMA_V2_DEV_FIFO_DEPTH_ID DevFifoDepth
+#define _DMA_V2_DEV_FIFO_RD_LAT_ID DevFifoRdLat
+#define _DMA_V2_DEV_FIFO_LAT_BYPASS_ID DevFifoRdLatBypass
+#define _DMA_V2_DEV_NO_BURST_ID DevNoBurst
+#define _DMA_V2_DEV_RD_ACCEPT_ID DevRdAccept
+#define _DMA_V2_DEV_SRMD_ID DevSRMD
+#define _DMA_V2_DEV_HAS_CRUN_ID CRunMasters
+#define _DMA_V2_CTRL_ACK_FIFO_DEPTH_ID CtrlAckFifoDepth
+#define _DMA_V2_CMD_FIFO_DEPTH_ID CommandFifoDepth
+#define _DMA_V2_CMD_FIFO_RD_LAT_ID CommandFifoRdLat
+#define _DMA_V2_CMD_FIFO_LAT_BYPASS_ID CommandFifoRdLatBypass
+#define _DMA_V2_NO_PACK_ID has_no_pack
+
+#define _DMA_V2_REG_ALIGN 4
+#define _DMA_V2_REG_ADDR_BITS 2
+
+/* Command word */
+#define _DMA_V2_CMD_IDX 0
+#define _DMA_V2_CMD_BITS 6
+#define _DMA_V2_CHANNEL_IDX (_DMA_V2_CMD_IDX + _DMA_V2_CMD_BITS)
+#define _DMA_V2_CHANNEL_BITS 5
+
+/* The command to set a parameter contains the PARAM field next */
+#define _DMA_V2_PARAM_IDX (_DMA_V2_CHANNEL_IDX + _DMA_V2_CHANNEL_BITS)
+#define _DMA_V2_PARAM_BITS 4
+
+/* Commands to read, write or init specific blocks contain these
+ three values */
+#define _DMA_V2_SPEC_DEV_A_XB_IDX (_DMA_V2_CHANNEL_IDX + _DMA_V2_CHANNEL_BITS)
+#define _DMA_V2_SPEC_DEV_A_XB_BITS 8
+#define _DMA_V2_SPEC_DEV_B_XB_IDX (_DMA_V2_SPEC_DEV_A_XB_IDX + _DMA_V2_SPEC_DEV_A_XB_BITS)
+#define _DMA_V2_SPEC_DEV_B_XB_BITS 8
+#define _DMA_V2_SPEC_YB_IDX (_DMA_V2_SPEC_DEV_B_XB_IDX + _DMA_V2_SPEC_DEV_B_XB_BITS)
+#define _DMA_V2_SPEC_YB_BITS (32 - _DMA_V2_SPEC_DEV_B_XB_BITS - _DMA_V2_SPEC_DEV_A_XB_BITS - _DMA_V2_CMD_BITS - _DMA_V2_CHANNEL_BITS)
+
+/* */
+#define _DMA_V2_CMD_CTRL_IDX 4
+#define _DMA_V2_CMD_CTRL_BITS 4
+
+/* Packing setup word */
+#define _DMA_V2_CONNECTION_IDX 0
+#define _DMA_V2_CONNECTION_BITS 4
+#define _DMA_V2_EXTENSION_IDX (_DMA_V2_CONNECTION_IDX + _DMA_V2_CONNECTION_BITS)
+#define _DMA_V2_EXTENSION_BITS 1
+
+/* Elements packing word */
+#define _DMA_V2_ELEMENTS_IDX 0
+#define _DMA_V2_ELEMENTS_BITS 8
+#define _DMA_V2_LEFT_CROPPING_IDX (_DMA_V2_ELEMENTS_IDX + _DMA_V2_ELEMENTS_BITS)
+#define _DMA_V2_LEFT_CROPPING_BITS 8
+
+#define _DMA_V2_WIDTH_IDX 0
+#define _DMA_V2_WIDTH_BITS 16
+
+#define _DMA_V2_HEIGHT_IDX 0
+#define _DMA_V2_HEIGHT_BITS 16
+
+#define _DMA_V2_STRIDE_IDX 0
+#define _DMA_V2_STRIDE_BITS 32
+
+/* Command IDs */
+#define _DMA_V2_MOVE_B2A_COMMAND 0
+#define _DMA_V2_MOVE_B2A_BLOCK_COMMAND 1
+#define _DMA_V2_MOVE_B2A_NO_SYNC_CHK_COMMAND 2
+#define _DMA_V2_MOVE_B2A_BLOCK_NO_SYNC_CHK_COMMAND 3
+#define _DMA_V2_MOVE_A2B_COMMAND 4
+#define _DMA_V2_MOVE_A2B_BLOCK_COMMAND 5
+#define _DMA_V2_MOVE_A2B_NO_SYNC_CHK_COMMAND 6
+#define _DMA_V2_MOVE_A2B_BLOCK_NO_SYNC_CHK_COMMAND 7
+#define _DMA_V2_INIT_A_COMMAND 8
+#define _DMA_V2_INIT_A_BLOCK_COMMAND 9
+#define _DMA_V2_INIT_A_NO_SYNC_CHK_COMMAND 10
+#define _DMA_V2_INIT_A_BLOCK_NO_SYNC_CHK_COMMAND 11
+#define _DMA_V2_INIT_B_COMMAND 12
+#define _DMA_V2_INIT_B_BLOCK_COMMAND 13
+#define _DMA_V2_INIT_B_NO_SYNC_CHK_COMMAND 14
+#define _DMA_V2_INIT_B_BLOCK_NO_SYNC_CHK_COMMAND 15
+#define _DMA_V2_NO_ACK_MOVE_B2A_NO_SYNC_CHK_COMMAND (_DMA_V2_MOVE_B2A_NO_SYNC_CHK_COMMAND + 16)
+#define _DMA_V2_NO_ACK_MOVE_B2A_BLOCK_NO_SYNC_CHK_COMMAND (_DMA_V2_MOVE_B2A_BLOCK_NO_SYNC_CHK_COMMAND + 16)
+#define _DMA_V2_NO_ACK_MOVE_A2B_NO_SYNC_CHK_COMMAND (_DMA_V2_MOVE_A2B_NO_SYNC_CHK_COMMAND + 16)
+#define _DMA_V2_NO_ACK_MOVE_A2B_BLOCK_NO_SYNC_CHK_COMMAND (_DMA_V2_MOVE_A2B_BLOCK_NO_SYNC_CHK_COMMAND + 16)
+#define _DMA_V2_NO_ACK_INIT_A_NO_SYNC_CHK_COMMAND (_DMA_V2_INIT_A_NO_SYNC_CHK_COMMAND + 16)
+#define _DMA_V2_NO_ACK_INIT_A_BLOCK_NO_SYNC_CHK_COMMAND (_DMA_V2_INIT_A_BLOCK_NO_SYNC_CHK_COMMAND + 16)
+#define _DMA_V2_NO_ACK_INIT_B_NO_SYNC_CHK_COMMAND (_DMA_V2_INIT_B_NO_SYNC_CHK_COMMAND + 16)
+#define _DMA_V2_NO_ACK_INIT_B_BLOCK_NO_SYNC_CHK_COMMAND (_DMA_V2_INIT_B_BLOCK_NO_SYNC_CHK_COMMAND + 16)
+#define _DMA_V2_CONFIG_CHANNEL_COMMAND 32
+#define _DMA_V2_SET_CHANNEL_PARAM_COMMAND 33
+#define _DMA_V2_SET_CRUN_COMMAND 62
+
+/* Channel Parameter IDs */
+#define _DMA_V2_PACKING_SETUP_PARAM 0
+#define _DMA_V2_STRIDE_A_PARAM 1
+#define _DMA_V2_ELEM_CROPPING_A_PARAM 2
+#define _DMA_V2_WIDTH_A_PARAM 3
+#define _DMA_V2_STRIDE_B_PARAM 4
+#define _DMA_V2_ELEM_CROPPING_B_PARAM 5
+#define _DMA_V2_WIDTH_B_PARAM 6
+#define _DMA_V2_HEIGHT_PARAM 7
+#define _DMA_V2_QUEUED_CMDS 8
+
+/* Parameter Constants */
+#define _DMA_V2_ZERO_EXTEND 0
+#define _DMA_V2_SIGN_EXTEND 1
+
+/* SLAVE address map */
+#define _DMA_V2_SEL_FSM_CMD 0
+#define _DMA_V2_SEL_CH_REG 1
+#define _DMA_V2_SEL_CONN_GROUP 2
+#define _DMA_V2_SEL_DEV_INTERF 3
+
+#define _DMA_V2_ADDR_SEL_COMP_IDX 12
+#define _DMA_V2_ADDR_SEL_COMP_BITS 4
+#define _DMA_V2_ADDR_SEL_CH_REG_IDX 2
+#define _DMA_V2_ADDR_SEL_CH_REG_BITS 6
+#define _DMA_V2_ADDR_SEL_PARAM_IDX (_DMA_V2_ADDR_SEL_CH_REG_BITS + _DMA_V2_ADDR_SEL_CH_REG_IDX)
+#define _DMA_V2_ADDR_SEL_PARAM_BITS 4
+
+#define _DMA_V2_ADDR_SEL_GROUP_COMP_IDX 2
+#define _DMA_V2_ADDR_SEL_GROUP_COMP_BITS 6
+#define _DMA_V2_ADDR_SEL_GROUP_COMP_INFO_IDX (_DMA_V2_ADDR_SEL_GROUP_COMP_BITS + _DMA_V2_ADDR_SEL_GROUP_COMP_IDX)
+#define _DMA_V2_ADDR_SEL_GROUP_COMP_INFO_BITS 4
+
+#define _DMA_V2_ADDR_SEL_DEV_INTERF_IDX_IDX 2
+#define _DMA_V2_ADDR_SEL_DEV_INTERF_IDX_BITS 6
+#define _DMA_V2_ADDR_SEL_DEV_INTERF_INFO_IDX (_DMA_V2_ADDR_SEL_DEV_INTERF_IDX_IDX + _DMA_V2_ADDR_SEL_DEV_INTERF_IDX_BITS)
+#define _DMA_V2_ADDR_SEL_DEV_INTERF_INFO_BITS 4
+
+#define _DMA_V2_FSM_GROUP_CMD_IDX 0
+#define _DMA_V2_FSM_GROUP_ADDR_SRC_IDX 1
+#define _DMA_V2_FSM_GROUP_ADDR_DEST_IDX 2
+#define _DMA_V2_FSM_GROUP_CMD_CTRL_IDX 3
+#define _DMA_V2_FSM_GROUP_FSM_CTRL_IDX 4
+#define _DMA_V2_FSM_GROUP_FSM_PACK_IDX 5
+#define _DMA_V2_FSM_GROUP_FSM_REQ_IDX 6
+#define _DMA_V2_FSM_GROUP_FSM_WR_IDX 7
+
+#define _DMA_V2_FSM_GROUP_FSM_CTRL_STATE_IDX 0
+#define _DMA_V2_FSM_GROUP_FSM_CTRL_REQ_DEV_IDX 1
+#define _DMA_V2_FSM_GROUP_FSM_CTRL_REQ_ADDR_IDX 2
+#define _DMA_V2_FSM_GROUP_FSM_CTRL_REQ_STRIDE_IDX 3
+#define _DMA_V2_FSM_GROUP_FSM_CTRL_REQ_XB_IDX 4
+#define _DMA_V2_FSM_GROUP_FSM_CTRL_REQ_YB_IDX 5
+#define _DMA_V2_FSM_GROUP_FSM_CTRL_PACK_REQ_DEV_IDX 6
+#define _DMA_V2_FSM_GROUP_FSM_CTRL_PACK_WR_DEV_IDX 7
+#define _DMA_V2_FSM_GROUP_FSM_CTRL_WR_ADDR_IDX 8
+#define _DMA_V2_FSM_GROUP_FSM_CTRL_WR_STRIDE_IDX 9
+#define _DMA_V2_FSM_GROUP_FSM_CTRL_PACK_REQ_XB_IDX 10
+#define _DMA_V2_FSM_GROUP_FSM_CTRL_PACK_WR_YB_IDX 11
+#define _DMA_V2_FSM_GROUP_FSM_CTRL_PACK_WR_XB_IDX 12
+#define _DMA_V2_FSM_GROUP_FSM_CTRL_PACK_ELEM_REQ_IDX 13
+#define _DMA_V2_FSM_GROUP_FSM_CTRL_PACK_ELEM_WR_IDX 14
+#define _DMA_V2_FSM_GROUP_FSM_CTRL_PACK_S_Z_IDX 15
+#define _DMA_V2_FSM_GROUP_FSM_CTRL_CMD_CTRL_IDX 15
+
+#define _DMA_V2_FSM_GROUP_FSM_PACK_STATE_IDX 0
+#define _DMA_V2_FSM_GROUP_FSM_PACK_CNT_YB_IDX 1
+#define _DMA_V2_FSM_GROUP_FSM_PACK_CNT_XB_REQ_IDX 2
+#define _DMA_V2_FSM_GROUP_FSM_PACK_CNT_XB_WR_IDX 3
+
+#define _DMA_V2_FSM_GROUP_FSM_REQ_STATE_IDX 0
+#define _DMA_V2_FSM_GROUP_FSM_REQ_CNT_YB_IDX 1
+#define _DMA_V2_FSM_GROUP_FSM_REQ_CNT_XB_IDX 2
+#define _DMA_V2_FSM_GROUP_FSM_REQ_XB_REMAINING_IDX 3
+#define _DMA_V2_FSM_GROUP_FSM_REQ_CNT_BURST_IDX 4
+
+#define _DMA_V2_FSM_GROUP_FSM_WR_STATE_IDX 0
+#define _DMA_V2_FSM_GROUP_FSM_WR_CNT_YB_IDX 1
+#define _DMA_V2_FSM_GROUP_FSM_WR_CNT_XB_IDX 2
+#define _DMA_V2_FSM_GROUP_FSM_WR_XB_REMAINING_IDX 3
+#define _DMA_V2_FSM_GROUP_FSM_WR_CNT_BURST_IDX 4
+
+#define _DMA_V2_DEV_INTERF_REQ_SIDE_STATUS_IDX 0
+#define _DMA_V2_DEV_INTERF_SEND_SIDE_STATUS_IDX 1
+#define _DMA_V2_DEV_INTERF_FIFO_STATUS_IDX 2
+#define _DMA_V2_DEV_INTERF_REQ_ONLY_COMPLETE_BURST_IDX 3
+#define _DMA_V2_DEV_INTERF_MAX_BURST_IDX 4
+#define _DMA_V2_DEV_INTERF_CHK_ADDR_ALIGN 5
+
+#endif /* _dma_v2_defs_h */
diff --git a/drivers/staging/media/atomisp/pci/gdc_v2_defs.h b/drivers/staging/media/atomisp/pci/gdc_v2_defs.h
new file mode 100644
index 000000000000..ca2fee141f76
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/gdc_v2_defs.h
@@ -0,0 +1,155 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef HRT_GDC_v2_defs_h_
+#define HRT_GDC_v2_defs_h_
+
+#define HRT_GDC_IS_V2
+
+#define HRT_GDC_N 1024 /* Top-level design constant, equal to the number of entries in the LUT */
+#define HRT_GDC_FRAC_BITS 10 /* Number of fractional bits in the GDC block, driven by the size of the LUT */
+
+#define HRT_GDC_BLI_FRAC_BITS 4 /* Number of fractional bits for the bi-linear interpolation type */
+#define HRT_GDC_BLI_COEF_ONE BIT(HRT_GDC_BLI_FRAC_BITS)
+
+#define HRT_GDC_BCI_COEF_BITS 14 /* 14 bits per coefficient */
+#define HRT_GDC_BCI_COEF_ONE (1 << (HRT_GDC_BCI_COEF_BITS - 2)) /* We represent signed 10 bit coefficients. */
+/* The supported range is [-256, .., +256] */
+/* in 14-bit signed notation, */
+/* We need all ten bits (MSB must be zero). */
+/* -s is inserted to solve this issue, and */
+/* therefore "1" is equal to +256. */
+#define HRT_GDC_BCI_COEF_MASK ((1 << HRT_GDC_BCI_COEF_BITS) - 1)
+
+#define HRT_GDC_LUT_BYTES (HRT_GDC_N * 4 * 2) /* 1024 addresses, 4 coefficients per address, */
+/* 2 bytes per coefficient */
+
+#define _HRT_GDC_REG_ALIGN 4
+
+// 31 30 29 25 24 0
+// |-----|---|--------|------------------------|
+// | CMD | C | Reg_ID | Value |
+
+// There are just two commands possible for the GDC block:
+// 1 - Configure reg
+// 0 - Data token
+
+// C - Reserved bit
+// Used in protocol to indicate whether it is C-run or other type of runs
+// In case of C-run, this bit has a value of 1, for all the other runs, it is 0.
+
+// Reg_ID - Address of the register to be configured
+
+// Value - Value to store to the addressed register, maximum of 24 bits
+
+// Configure reg command is not followed by any other token.
+// The address of the register and the data to be filled in is contained in the same token
+
+// When the first data token is received, it must be:
+// 1. FRX and FRY (device configured in one of the scaling modes) ***DEFAULT MODE***, or,
+// 2. P0'X (device configured in one of the tetragon modes)
+// After the first data token is received, pre-defined number of tokens with the following meaning follow:
+// 1. two tokens: SRC address ; DST address
+// 2. nine tokens: P0'Y, .., P3'Y ; SRC address ; DST address
+
+#define HRT_GDC_CONFIG_CMD 1
+#define HRT_GDC_DATA_CMD 0
+
+#define HRT_GDC_CMD_POS 31
+#define HRT_GDC_CMD_BITS 1
+#define HRT_GDC_CRUN_POS 30
+#define HRT_GDC_REG_ID_POS 25
+#define HRT_GDC_REG_ID_BITS 5
+#define HRT_GDC_DATA_POS 0
+#define HRT_GDC_DATA_BITS 25
+
+#define HRT_GDC_FRYIPXFRX_BITS 26
+#define HRT_GDC_P0X_BITS 23
+
+#define HRT_GDC_MAX_OXDIM (8192 - 64)
+#define HRT_GDC_MAX_OYDIM 4095
+#define HRT_GDC_MAX_IXDIM (8192 - 64)
+#define HRT_GDC_MAX_IYDIM 4095
+#define HRT_GDC_MAX_DS_FAC 16
+#define HRT_GDC_MAX_DX (HRT_GDC_MAX_DS_FAC * HRT_GDC_N - 1)
+#define HRT_GDC_MAX_DY HRT_GDC_MAX_DX
+
+/* GDC lookup tables entries are 10 bits values, but they're
+ stored 2 by 2 as 32 bit values, yielding 16 bits per entry.
+ A GDC lookup table contains 64 * 4 elements */
+
+#define HRT_GDC_PERF_1_1_pix 0
+#define HRT_GDC_PERF_2_1_pix 1
+#define HRT_GDC_PERF_1_2_pix 2
+#define HRT_GDC_PERF_2_2_pix 3
+
+#define HRT_GDC_NND_MODE 0
+#define HRT_GDC_BLI_MODE 1
+#define HRT_GDC_BCI_MODE 2
+#define HRT_GDC_LUT_MODE 3
+
+#define HRT_GDC_SCAN_STB 0
+#define HRT_GDC_SCAN_STR 1
+
+#define HRT_GDC_MODE_SCALING 0
+#define HRT_GDC_MODE_TETRAGON 1
+
+#define HRT_GDC_LUT_COEFF_OFFSET 16
+#define HRT_GDC_FRY_BIT_OFFSET 16
+// FRYIPXFRX is the only register where we store two values in one field,
+// to save one token in the scaling protocol.
+// Like this, we have three tokens in the scaling protocol,
+// Otherwise, we would have had four.
+// The register bit-map is:
+// 31 26 25 16 15 10 9 0
+// |------|----------|------|----------|
+// | XXXX | FRY | IPX | FRX |
+
+#define HRT_GDC_CE_FSM0_POS 0
+#define HRT_GDC_CE_FSM0_LEN 2
+#define HRT_GDC_CE_OPY_POS 2
+#define HRT_GDC_CE_OPY_LEN 14
+#define HRT_GDC_CE_OPX_POS 16
+#define HRT_GDC_CE_OPX_LEN 16
+// CHK_ENGINE register bit-map:
+// 31 16 15 2 1 0
+// |----------------|-----------|----|
+// | OPX | OPY |FSM0|
+// However, for the time being at least,
+// this implementation is meaningless in hss model,
+// So, we just return 0
+
+#define HRT_GDC_CHK_ENGINE_IDX 0
+#define HRT_GDC_WOIX_IDX 1
+#define HRT_GDC_WOIY_IDX 2
+#define HRT_GDC_BPP_IDX 3
+#define HRT_GDC_FRYIPXFRX_IDX 4
+#define HRT_GDC_OXDIM_IDX 5
+#define HRT_GDC_OYDIM_IDX 6
+#define HRT_GDC_SRC_ADDR_IDX 7
+#define HRT_GDC_SRC_END_ADDR_IDX 8
+#define HRT_GDC_SRC_WRAP_ADDR_IDX 9
+#define HRT_GDC_SRC_STRIDE_IDX 10
+#define HRT_GDC_DST_ADDR_IDX 11
+#define HRT_GDC_DST_STRIDE_IDX 12
+#define HRT_GDC_DX_IDX 13
+#define HRT_GDC_DY_IDX 14
+#define HRT_GDC_P0X_IDX 15
+#define HRT_GDC_P0Y_IDX 16
+#define HRT_GDC_P1X_IDX 17
+#define HRT_GDC_P1Y_IDX 18
+#define HRT_GDC_P2X_IDX 19
+#define HRT_GDC_P2Y_IDX 20
+#define HRT_GDC_P3X_IDX 21
+#define HRT_GDC_P3Y_IDX 22
+#define HRT_GDC_PERF_POINT_IDX 23 // 1x1 ; 1x2 ; 2x1 ; 2x2 pixels per cc
+#define HRT_GDC_INTERP_TYPE_IDX 24 // NND ; BLI ; BCI ; LUT
+#define HRT_GDC_SCAN_IDX 25 // 0 = STB (Slide To Bottom) ; 1 = STR (Slide To Right)
+#define HRT_GDC_PROC_MODE_IDX 26 // 0 = Scaling ; 1 = Tetragon
+
+#define HRT_GDC_LUT_IDX 32
+
+#endif /* HRT_GDC_v2_defs_h_ */
diff --git a/drivers/staging/media/atomisp/pci/gp_timer_defs.h b/drivers/staging/media/atomisp/pci/gp_timer_defs.h
new file mode 100644
index 000000000000..4b1b8ae1c194
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/gp_timer_defs.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef _gp_timer_defs_h
+#define _gp_timer_defs_h
+
+#define _HRT_GP_TIMER_REG_ALIGN 4
+
+#define HIVE_GP_TIMER_RESET_REG_IDX 0
+#define HIVE_GP_TIMER_OVERALL_ENABLE_REG_IDX 1
+#define HIVE_GP_TIMER_ENABLE_REG_IDX(timer) (HIVE_GP_TIMER_OVERALL_ENABLE_REG_IDX + 1 + timer)
+#define HIVE_GP_TIMER_VALUE_REG_IDX(timer, timers) (HIVE_GP_TIMER_ENABLE_REG_IDX(timers) + timer)
+#define HIVE_GP_TIMER_COUNT_TYPE_REG_IDX(timer, timers) (HIVE_GP_TIMER_VALUE_REG_IDX(timers, timers) + timer)
+#define HIVE_GP_TIMER_SIGNAL_SELECT_REG_IDX(timer, timers) (HIVE_GP_TIMER_COUNT_TYPE_REG_IDX(timers, timers) + timer)
+#define HIVE_GP_TIMER_IRQ_TRIGGER_VALUE_REG_IDX(irq, timers) (HIVE_GP_TIMER_SIGNAL_SELECT_REG_IDX(timers, timers) + irq)
+#define HIVE_GP_TIMER_IRQ_TIMER_SELECT_REG_IDX(irq, timers, irqs) (HIVE_GP_TIMER_IRQ_TRIGGER_VALUE_REG_IDX(irqs, timers) + irq)
+#define HIVE_GP_TIMER_IRQ_ENABLE_REG_IDX(irq, timers, irqs) (HIVE_GP_TIMER_IRQ_TIMER_SELECT_REG_IDX(irqs, timers, irqs) + irq)
+
+#define HIVE_GP_TIMER_COUNT_TYPE_HIGH 0
+#define HIVE_GP_TIMER_COUNT_TYPE_LOW 1
+#define HIVE_GP_TIMER_COUNT_TYPE_POSEDGE 2
+#define HIVE_GP_TIMER_COUNT_TYPE_NEGEDGE 3
+#define HIVE_GP_TIMER_COUNT_TYPES 4
+
+#endif /* _gp_timer_defs_h */
diff --git a/drivers/staging/media/atomisp/pci/gpio_block_defs.h b/drivers/staging/media/atomisp/pci/gpio_block_defs.h
new file mode 100644
index 000000000000..a4ce5a6dbd93
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/gpio_block_defs.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef _gpio_block_defs_h_
+#define _gpio_block_defs_h_
+
+/* R/W registers */
+#define _gpio_block_reg_do_e 0
+#define _gpio_block_reg_do_select 1
+#define _gpio_block_reg_do_0 2
+#define _gpio_block_reg_do_1 3
+
+#endif /* _gpio_block_defs_h_ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_common/debug_global.h b/drivers/staging/media/atomisp/pci/hive_isp_css_common/debug_global.h
new file mode 100644
index 000000000000..15fb0ad4a5f0
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_common/debug_global.h
@@ -0,0 +1,66 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __DEBUG_GLOBAL_H_INCLUDED__
+#define __DEBUG_GLOBAL_H_INCLUDED__
+
+#include <type_support.h>
+
+#define DEBUG_BUF_SIZE 1024
+#define DEBUG_BUF_MASK (DEBUG_BUF_SIZE - 1)
+
+#define DEBUG_DATA_ENABLE_ADDR 0x00
+#define DEBUG_DATA_BUF_MODE_ADDR 0x04
+#define DEBUG_DATA_HEAD_ADDR 0x08
+#define DEBUG_DATA_TAIL_ADDR 0x0C
+#define DEBUG_DATA_BUF_ADDR 0x10
+
+#define DEBUG_DATA_ENABLE_DDR_ADDR 0x00
+#define DEBUG_DATA_BUF_MODE_DDR_ADDR HIVE_ISP_DDR_WORD_BYTES
+#define DEBUG_DATA_HEAD_DDR_ADDR (2 * HIVE_ISP_DDR_WORD_BYTES)
+#define DEBUG_DATA_TAIL_DDR_ADDR (3 * HIVE_ISP_DDR_WORD_BYTES)
+#define DEBUG_DATA_BUF_DDR_ADDR (4 * HIVE_ISP_DDR_WORD_BYTES)
+
+#define DEBUG_BUFFER_ISP_DMEM_ADDR 0x0
+
+/*
+ * The linear buffer mode will accept data until the first
+ * overflow and then stop accepting new data
+ * The circular buffer mode will accept if there is place
+ * and discard the data if the buffer is full
+ */
+typedef enum {
+ DEBUG_BUFFER_MODE_LINEAR = 0,
+ DEBUG_BUFFER_MODE_CIRCULAR,
+ N_DEBUG_BUFFER_MODE
+} debug_buf_mode_t;
+
+struct debug_data_s {
+ u32 enable;
+ u32 bufmode;
+ u32 head;
+ u32 tail;
+ u32 buf[DEBUG_BUF_SIZE];
+};
+
+/* thread.sp.c doesn't have a notion of HIVE_ISP_DDR_WORD_BYTES
+ still one point of control is needed for debug purposes */
+
+#ifdef HIVE_ISP_DDR_WORD_BYTES
+struct debug_data_ddr_s {
+ u32 enable;
+ s8 padding1[HIVE_ISP_DDR_WORD_BYTES - sizeof(uint32_t)];
+ u32 bufmode;
+ s8 padding2[HIVE_ISP_DDR_WORD_BYTES - sizeof(uint32_t)];
+ u32 head;
+ s8 padding3[HIVE_ISP_DDR_WORD_BYTES - sizeof(uint32_t)];
+ u32 tail;
+ s8 padding4[HIVE_ISP_DDR_WORD_BYTES - sizeof(uint32_t)];
+ u32 buf[DEBUG_BUF_SIZE];
+};
+#endif
+
+#endif /* __DEBUG_GLOBAL_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_common/dma_global.h b/drivers/staging/media/atomisp/pci/hive_isp_css_common/dma_global.h
new file mode 100644
index 000000000000..05b10e3d19dc
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_common/dma_global.h
@@ -0,0 +1,245 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __DMA_GLOBAL_H_INCLUDED__
+#define __DMA_GLOBAL_H_INCLUDED__
+
+#include <type_support.h>
+
+#define IS_DMA_VERSION_2
+
+#define HIVE_ISP_NUM_DMA_CONNS 3
+#define HIVE_ISP_NUM_DMA_CHANNELS 32
+
+#define N_DMA_CHANNEL_ID HIVE_ISP_NUM_DMA_CHANNELS
+
+#include "dma_v2_defs.h"
+
+/*
+ * Command token bit mappings
+ *
+ * transfer / config
+ * param id[4] channel id[5] cmd id[6]
+ * | b14 .. b11 | b10 ... b6 | b5 ... b0 |
+ *
+ * fast transfer:
+ * height[5] width[8] width[8] channel id[5] cmd id[6]
+ * | b31 .. b26 | b25 .. b18 | b17 .. b11 | b10 ... b6 | b5 ... b0 |
+ *
+ */
+
+#define _DMA_PACKING_SETUP_PARAM _DMA_V2_PACKING_SETUP_PARAM
+#define _DMA_HEIGHT_PARAM _DMA_V2_HEIGHT_PARAM
+#define _DMA_STRIDE_A_PARAM _DMA_V2_STRIDE_A_PARAM
+#define _DMA_ELEM_CROPPING_A_PARAM _DMA_V2_ELEM_CROPPING_A_PARAM
+#define _DMA_WIDTH_A_PARAM _DMA_V2_WIDTH_A_PARAM
+#define _DMA_STRIDE_B_PARAM _DMA_V2_STRIDE_B_PARAM
+#define _DMA_ELEM_CROPPING_B_PARAM _DMA_V2_ELEM_CROPPING_B_PARAM
+#define _DMA_WIDTH_B_PARAM _DMA_V2_WIDTH_B_PARAM
+
+#define _DMA_ZERO_EXTEND _DMA_V2_ZERO_EXTEND
+#define _DMA_SIGN_EXTEND _DMA_V2_SIGN_EXTEND
+
+typedef unsigned int dma_channel;
+
+typedef enum {
+ dma_isp_to_bus_connection = HIVE_DMA_ISP_BUS_CONN,
+ dma_isp_to_ddr_connection = HIVE_DMA_ISP_DDR_CONN,
+ dma_bus_to_ddr_connection = HIVE_DMA_BUS_DDR_CONN,
+} dma_connection;
+
+typedef enum {
+ dma_zero_extension = _DMA_ZERO_EXTEND,
+ dma_sign_extension = _DMA_SIGN_EXTEND
+} dma_extension;
+
+#define DMA_PROP_SHIFT(val, param) ((val) << _DMA_V2_ ## param ## _IDX)
+#define DMA_PROP_MASK(param) ((1U << _DMA_V2_ ## param ## _BITS) - 1)
+#define DMA_PACK(val, param) DMA_PROP_SHIFT((val) & DMA_PROP_MASK(param), param)
+
+#define DMA_PACK_COMMAND(cmd) DMA_PACK(cmd, CMD)
+#define DMA_PACK_CHANNEL(ch) DMA_PACK(ch, CHANNEL)
+#define DMA_PACK_PARAM(par) DMA_PACK(par, PARAM)
+#define DMA_PACK_EXTENSION(ext) DMA_PACK(ext, EXTENSION)
+#define DMA_PACK_LEFT_CROPPING(lc) DMA_PACK(lc, LEFT_CROPPING)
+#define DMA_PACK_WIDTH_A(w) DMA_PACK(w, SPEC_DEV_A_XB)
+#define DMA_PACK_WIDTH_B(w) DMA_PACK(w, SPEC_DEV_B_XB)
+#define DMA_PACK_HEIGHT(h) DMA_PACK(h, SPEC_YB)
+
+#define DMA_PACK_CMD_CHANNEL(cmd, ch) (DMA_PACK_COMMAND(cmd) | DMA_PACK_CHANNEL(ch))
+#define DMA_PACK_SETUP(conn, ext) ((conn) | DMA_PACK_EXTENSION(ext))
+#define DMA_PACK_CROP_ELEMS(elems, crop) ((elems) | DMA_PACK_LEFT_CROPPING(crop))
+
+#define hive_dma_snd(dma_id, token) OP_std_snd(dma_id, (unsigned int)(token))
+
+#define DMA_PACK_BLOCK_CMD(cmd, ch, width_a, width_b, height) \
+ (DMA_PACK_COMMAND(cmd) | \
+ DMA_PACK_CHANNEL(ch) | \
+ DMA_PACK_WIDTH_A(width_a) | \
+ DMA_PACK_WIDTH_B(width_b) | \
+ DMA_PACK_HEIGHT(height))
+
+#define hive_dma_move_data(dma_id, read, channel, addr_a, addr_b, to_is_var, from_is_var) \
+{ \
+ hive_dma_snd(dma_id, DMA_PACK(_DMA_V2_SET_CRUN_COMMAND, CMD)); \
+ hive_dma_snd(dma_id, DMA_PACK_CMD_CHANNEL(read ? _DMA_V2_MOVE_B2A_COMMAND : _DMA_V2_MOVE_A2B_COMMAND, channel)); \
+ hive_dma_snd(dma_id, read ? (unsigned int)(addr_b) : (unsigned int)(addr_a)); \
+ hive_dma_snd(dma_id, read ? (unsigned int)(addr_a) : (unsigned int)(addr_b)); \
+ hive_dma_snd(dma_id, to_is_var); \
+ hive_dma_snd(dma_id, from_is_var); \
+}
+
+#define hive_dma_move_data_no_ack(dma_id, read, channel, addr_a, addr_b, to_is_var, from_is_var) \
+{ \
+ hive_dma_snd(dma_id, DMA_PACK(_DMA_V2_SET_CRUN_COMMAND, CMD)); \
+ hive_dma_snd(dma_id, DMA_PACK_CMD_CHANNEL(read ? _DMA_V2_NO_ACK_MOVE_B2A_NO_SYNC_CHK_COMMAND : _DMA_V2_NO_ACK_MOVE_A2B_NO_SYNC_CHK_COMMAND, channel)); \
+ hive_dma_snd(dma_id, read ? (unsigned int)(addr_b) : (unsigned int)(addr_a)); \
+ hive_dma_snd(dma_id, read ? (unsigned int)(addr_a) : (unsigned int)(addr_b)); \
+ hive_dma_snd(dma_id, to_is_var); \
+ hive_dma_snd(dma_id, from_is_var); \
+}
+
+#define hive_dma_move_b2a_data(dma_id, channel, to_addr, from_addr, to_is_var, from_is_var) \
+{ \
+ hive_dma_move_data(dma_id, true, channel, to_addr, from_addr, to_is_var, from_is_var) \
+}
+
+#define hive_dma_move_a2b_data(dma_id, channel, from_addr, to_addr, from_is_var, to_is_var) \
+{ \
+ hive_dma_move_data(dma_id, false, channel, from_addr, to_addr, from_is_var, to_is_var) \
+}
+
+#define hive_dma_set_data(dma_id, channel, address, value, is_var) \
+{ \
+ hive_dma_snd(dma_id, DMA_PACK(_DMA_V2_SET_CRUN_COMMAND, CMD)); \
+ hive_dma_snd(dma_id, DMA_PACK_CMD_CHANNEL(_DMA_V2_INIT_A_COMMAND, channel)); \
+ hive_dma_snd(dma_id, value); \
+ hive_dma_snd(dma_id, address); \
+ hive_dma_snd(dma_id, is_var); \
+}
+
+#define hive_dma_clear_data(dma_id, channel, address, is_var) hive_dma_set_data(dma_id, channel, address, 0, is_var)
+
+#define hive_dma_configure(dma_id, channel, connection, extension, height, \
+ stride_A, elems_A, cropping_A, width_A, \
+ stride_B, elems_B, cropping_B, width_B) \
+{ \
+ hive_dma_snd(dma_id, DMA_PACK_CMD_CHANNEL(_DMA_V2_CONFIG_CHANNEL_COMMAND, channel)); \
+ hive_dma_snd(dma_id, DMA_PACK_SETUP(connection, extension)); \
+ hive_dma_snd(dma_id, stride_A); \
+ hive_dma_snd(dma_id, DMA_PACK_CROP_ELEMS(elems_A, cropping_A)); \
+ hive_dma_snd(dma_id, width_A); \
+ hive_dma_snd(dma_id, stride_B); \
+ hive_dma_snd(dma_id, DMA_PACK_CROP_ELEMS(elems_B, cropping_B)); \
+ hive_dma_snd(dma_id, width_B); \
+ hive_dma_snd(dma_id, height); \
+}
+
+#define hive_dma_execute(dma_id, channel, cmd, to_addr, from_addr_value, to_is_var, from_is_var) \
+{ \
+ hive_dma_snd(dma_id, DMA_PACK(_DMA_V2_SET_CRUN_COMMAND, CMD)); \
+ hive_dma_snd(dma_id, DMA_PACK_CMD_CHANNEL(cmd, channel)); \
+ hive_dma_snd(dma_id, to_addr); \
+ hive_dma_snd(dma_id, from_addr_value); \
+ hive_dma_snd(dma_id, to_is_var); \
+ if ((cmd & DMA_CLEAR_CMDBIT) == 0) { \
+ hive_dma_snd(dma_id, from_is_var); \
+ } \
+}
+
+#define hive_dma_configure_fast(dma_id, channel, connection, extension, elems_A, elems_B) \
+{ \
+ hive_dma_snd(dma_id, DMA_PACK_CMD_CHANNEL(_DMA_V2_CONFIG_CHANNEL_COMMAND, channel)); \
+ hive_dma_snd(dma_id, DMA_PACK_SETUP(connection, extension)); \
+ hive_dma_snd(dma_id, 0); \
+ hive_dma_snd(dma_id, DMA_PACK_CROP_ELEMS(elems_A, 0)); \
+ hive_dma_snd(dma_id, 0); \
+ hive_dma_snd(dma_id, 0); \
+ hive_dma_snd(dma_id, DMA_PACK_CROP_ELEMS(elems_B, 0)); \
+ hive_dma_snd(dma_id, 0); \
+ hive_dma_snd(dma_id, 1); \
+}
+
+#define hive_dma_set_parameter(dma_id, channel, param, value) \
+{ \
+ hive_dma_snd(dma_id, _DMA_V2_SET_CHANNEL_PARAM_COMMAND | DMA_PACK_CHANNEL(channel) | DMA_PACK_PARAM(param)); \
+ hive_dma_snd(dma_id, value); \
+}
+
+#define DMA_SPECIFIC_CMDBIT 0x01
+#define DMA_CHECK_CMDBIT 0x02
+#define DMA_RW_CMDBIT 0x04
+#define DMA_CLEAR_CMDBIT 0x08
+#define DMA_ACK_CMDBIT 0x10
+#define DMA_CFG_CMDBIT 0x20
+#define DMA_PARAM_CMDBIT 0x01
+
+/* Write complete check not necessary if there's no ack */
+#define DMA_NOACK_CMD (DMA_ACK_CMDBIT | DMA_CHECK_CMDBIT)
+#define DMA_CFG_CMD (DMA_CFG_CMDBIT)
+#define DMA_CFGPARAM_CMD (DMA_CFG_CMDBIT | DMA_PARAM_CMDBIT)
+
+#define DMA_CMD_NEEDS_ACK(cmd) ((cmd & DMA_NOACK_CMD) == 0)
+#define DMA_CMD_IS_TRANSFER(cmd) ((cmd & DMA_CFG_CMDBIT) == 0)
+#define DMA_CMD_IS_WR(cmd) ((cmd & DMA_RW_CMDBIT) != 0)
+#define DMA_CMD_IS_RD(cmd) ((cmd & DMA_RW_CMDBIT) == 0)
+#define DMA_CMD_IS_CLR(cmd) ((cmd & DMA_CLEAR_CMDBIT) != 0)
+#define DMA_CMD_IS_CFG(cmd) ((cmd & DMA_CFG_CMDBIT) != 0)
+#define DMA_CMD_IS_PARAMCFG(cmd) ((cmd & DMA_CFGPARAM_CMD) == DMA_CFGPARAM_CMD)
+
+/* As a matter of convention */
+#define DMA_TRANSFER_READ DMA_TRANSFER_B2A
+#define DMA_TRANSFER_WRITE DMA_TRANSFER_A2B
+/* store/load from the PoV of the system(memory) */
+#define DMA_TRANSFER_STORE DMA_TRANSFER_B2A
+#define DMA_TRANSFER_LOAD DMA_TRANSFER_A2B
+#define DMA_TRANSFER_CLEAR DMA_TRANSFER_CLEAR_A
+
+typedef enum {
+ DMA_TRANSFER_CLEAR_A = DMA_CLEAR_CMDBIT, /* 8 */
+ DMA_TRANSFER_CLEAR_B = DMA_CLEAR_CMDBIT | DMA_RW_CMDBIT, /* 12 */
+ DMA_TRANSFER_A2B = DMA_RW_CMDBIT, /* 4 */
+ DMA_TRANSFER_B2A = 0, /* 0 */
+ DMA_TRANSFER_CLEAR_A_NOACK = DMA_CLEAR_CMDBIT | DMA_NOACK_CMD, /* 26 */
+ DMA_TRANSFER_CLEAR_B_NOACK = DMA_CLEAR_CMDBIT | DMA_RW_CMDBIT | DMA_NOACK_CMD, /* 30 */
+ DMA_TRANSFER_A2B_NOACK = DMA_RW_CMDBIT | DMA_NOACK_CMD, /* 22 */
+ DMA_TRANSFER_B2A_NOACK = DMA_NOACK_CMD, /* 18 */
+ DMA_FASTTRANSFER_CLEAR_A = DMA_CLEAR_CMDBIT | DMA_SPECIFIC_CMDBIT,
+ DMA_FASTTRANSFER_CLEAR_B = DMA_CLEAR_CMDBIT | DMA_RW_CMDBIT | DMA_SPECIFIC_CMDBIT,
+ DMA_FASTTRANSFER_A2B = DMA_RW_CMDBIT | DMA_SPECIFIC_CMDBIT,
+ DMA_FASTTRANSFER_B2A = DMA_SPECIFIC_CMDBIT,
+ DMA_FASTTRANSFER_CLEAR_A_NOACK = DMA_CLEAR_CMDBIT | DMA_NOACK_CMD | DMA_SPECIFIC_CMDBIT,
+ DMA_FASTTRANSFER_CLEAR_B_NOACK = DMA_CLEAR_CMDBIT | DMA_RW_CMDBIT | DMA_NOACK_CMD | DMA_SPECIFIC_CMDBIT,
+ DMA_FASTTRANSFER_A2B_NOACK = DMA_RW_CMDBIT | DMA_NOACK_CMD | DMA_SPECIFIC_CMDBIT,
+ DMA_FASTTRANSFER_B2A_NOACK = DMA_NOACK_CMD | DMA_SPECIFIC_CMDBIT,
+} dma_transfer_type_t;
+
+typedef enum {
+ DMA_CONFIG_SETUP = _DMA_V2_PACKING_SETUP_PARAM,
+ DMA_CONFIG_HEIGHT = _DMA_V2_HEIGHT_PARAM,
+ DMA_CONFIG_STRIDE_A_ = _DMA_V2_STRIDE_A_PARAM,
+ DMA_CONFIG_CROP_ELEM_A = _DMA_V2_ELEM_CROPPING_A_PARAM,
+ DMA_CONFIG_WIDTH_A = _DMA_V2_WIDTH_A_PARAM,
+ DMA_CONFIG_STRIDE_B_ = _DMA_V2_STRIDE_B_PARAM,
+ DMA_CONFIG_CROP_ELEM_B = _DMA_V2_ELEM_CROPPING_B_PARAM,
+ DMA_CONFIG_WIDTH_B = _DMA_V2_WIDTH_B_PARAM,
+} dma_config_type_t;
+
+struct dma_port_config {
+ u8 crop, elems;
+ u16 width;
+ u32 stride;
+};
+
+/* Descriptor for dma configuration */
+struct dma_channel_config {
+ u8 connection;
+ u8 extension;
+ u8 height;
+ struct dma_port_config a, b;
+};
+
+#endif /* __DMA_GLOBAL_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_common/event_fifo_global.h b/drivers/staging/media/atomisp/pci/hive_isp_css_common/event_fifo_global.h
new file mode 100644
index 000000000000..f4ec956b6035
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_common/event_fifo_global.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __EVENT_FIFO_GLOBAL_H
+#define __EVENT_FIFO_GLOBAL_H
+
+/*#error "event_global.h: No global event information permitted"*/
+
+#endif /* __EVENT_FIFO_GLOBAL_H */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_common/fifo_monitor_global.h b/drivers/staging/media/atomisp/pci/hive_isp_css_common/fifo_monitor_global.h
new file mode 100644
index 000000000000..6ac588579b9d
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_common/fifo_monitor_global.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __FIFO_MONITOR_GLOBAL_H_INCLUDED__
+#define __FIFO_MONITOR_GLOBAL_H_INCLUDED__
+
+#define IS_FIFO_MONITOR_VERSION_2
+
+/*
+#define HIVE_ISP_CSS_STREAM_SWITCH_NONE 0
+#define HIVE_ISP_CSS_STREAM_SWITCH_SP 1
+#define HIVE_ISP_CSS_STREAM_SWITCH_ISP 2
+ *
+ * Actually, "HIVE_ISP_CSS_STREAM_SWITCH_SP = 1", "HIVE_ISP_CSS_STREAM_SWITCH_ISP = 0"
+ * "hive_isp_css_stream_switch_hrt.h"
+ */
+#define HIVE_ISP_CSS_STREAM_SWITCH_ISP 0
+#define HIVE_ISP_CSS_STREAM_SWITCH_SP 1
+#define HIVE_ISP_CSS_STREAM_SWITCH_NONE 2
+
+#endif /* __FIFO_MONITOR_GLOBAL_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_common/gdc_global.h b/drivers/staging/media/atomisp/pci/hive_isp_css_common/gdc_global.h
new file mode 100644
index 000000000000..06f88305e18b
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_common/gdc_global.h
@@ -0,0 +1,81 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __GDC_GLOBAL_H_INCLUDED__
+#define __GDC_GLOBAL_H_INCLUDED__
+
+#define IS_GDC_VERSION_2
+
+#include <type_support.h>
+#include "gdc_v2_defs.h"
+
+/*
+ * Storage addresses for packed data transfer
+ */
+#define GDC_PARAM_ICX_LEFT_ROUNDED_IDX 0
+#define GDC_PARAM_OXDIM_FLOORED_IDX 1
+#define GDC_PARAM_OXDIM_LAST_IDX 2
+#define GDC_PARAM_WOIX_LAST_IDX 3
+#define GDC_PARAM_IY_TOPLEFT_IDX 4
+#define GDC_PARAM_CHUNK_CNT_IDX 5
+/*#define GDC_PARAM_ELEMENTS_PER_XMEM_ADDR_IDX 6 */ /* Derived from bpp */
+#define GDC_PARAM_BPP_IDX 6
+#define GDC_PARAM_BLOCK_HEIGHT_IDX 7
+/*#define GDC_PARAM_DMA_CHANNEL_STRIDE_A_IDX 8*/ /* The DMA stride == the GDC buffer stride */
+#define GDC_PARAM_WOIX_IDX 8
+#define GDC_PARAM_DMA_CHANNEL_STRIDE_B_IDX 9
+#define GDC_PARAM_DMA_CHANNEL_WIDTH_A_IDX 10
+#define GDC_PARAM_DMA_CHANNEL_WIDTH_B_IDX 11
+#define GDC_PARAM_VECTORS_PER_LINE_IN_IDX 12
+#define GDC_PARAM_VECTORS_PER_LINE_OUT_IDX 13
+#define GDC_PARAM_VMEM_IN_DIMY_IDX 14
+#define GDC_PARAM_COMMAND_IDX 15
+#define N_GDC_PARAM 16
+
+/* Because of the packed parameter transfer max(params) == max(fragments) */
+#define N_GDC_FRAGMENTS N_GDC_PARAM
+
+/* The GDC is capable of higher internal precision than the parameter data structures */
+#define HRT_GDC_COORD_SCALE_BITS 6
+#define HRT_GDC_COORD_SCALE BIT(HRT_GDC_COORD_SCALE_BITS)
+
+typedef enum {
+ GDC_CH0_ID = 0,
+ N_GDC_CHANNEL_ID
+} gdc_channel_ID_t;
+
+typedef enum {
+ gdc_8_bpp = 8,
+ gdc_10_bpp = 10,
+ gdc_12_bpp = 12,
+ gdc_14_bpp = 14
+} gdc_bits_per_pixel_t;
+
+typedef struct gdc_scale_param_mem_s {
+ u16 params[N_GDC_PARAM];
+ u16 ipx_start_array[N_GDC_PARAM];
+ u16 ibuf_offset[N_GDC_PARAM];
+ u16 obuf_offset[N_GDC_PARAM];
+} gdc_scale_param_mem_t;
+
+typedef struct gdc_warp_param_mem_s {
+ u32 origin_x;
+ u32 origin_y;
+ u32 in_addr_offset;
+ u32 in_block_width;
+ u32 in_block_height;
+ u32 p0_x;
+ u32 p0_y;
+ u32 p1_x;
+ u32 p1_y;
+ u32 p2_x;
+ u32 p2_y;
+ u32 p3_x;
+ u32 p3_y;
+ u32 padding[3];
+} gdc_warp_param_mem_t;
+
+#endif /* __GDC_GLOBAL_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_common/gp_device_global.h b/drivers/staging/media/atomisp/pci/hive_isp_css_common/gp_device_global.h
new file mode 100644
index 000000000000..958837a40feb
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_common/gp_device_global.h
@@ -0,0 +1,76 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __GP_DEVICE_GLOBAL_H_INCLUDED__
+#define __GP_DEVICE_GLOBAL_H_INCLUDED__
+
+#define IS_GP_DEVICE_VERSION_2
+
+#define _REG_GP_IRQ_REQ0_ADDR 0x08
+#define _REG_GP_IRQ_REQ1_ADDR 0x0C
+/* The SP sends SW interrupt info to this register */
+#define _REG_GP_IRQ_REQUEST0_ADDR _REG_GP_IRQ_REQ0_ADDR
+#define _REG_GP_IRQ_REQUEST1_ADDR _REG_GP_IRQ_REQ1_ADDR
+
+/* The SP configures FIFO switches in these registers */
+#define _REG_GP_SWITCH_IF_ADDR 0x40
+#define _REG_GP_SWITCH_GDC1_ADDR 0x44
+#define _REG_GP_SWITCH_GDC2_ADDR 0x48
+/* @ INPUT_FORMATTER_BASE -> GP_DEVICE_BASE */
+#define _REG_GP_IFMT_input_switch_lut_reg0 0x00030800
+#define _REG_GP_IFMT_input_switch_lut_reg1 0x00030804
+#define _REG_GP_IFMT_input_switch_lut_reg2 0x00030808
+#define _REG_GP_IFMT_input_switch_lut_reg3 0x0003080C
+#define _REG_GP_IFMT_input_switch_lut_reg4 0x00030810
+#define _REG_GP_IFMT_input_switch_lut_reg5 0x00030814
+#define _REG_GP_IFMT_input_switch_lut_reg6 0x00030818
+#define _REG_GP_IFMT_input_switch_lut_reg7 0x0003081C
+#define _REG_GP_IFMT_input_switch_fsync_lut 0x00030820
+#define _REG_GP_IFMT_srst 0x00030824
+#define _REG_GP_IFMT_slv_reg_srst 0x00030828
+#define _REG_GP_IFMT_input_switch_ch_id_fmt_type 0x0003082C
+
+/* @ GP_DEVICE_BASE */
+#define _REG_GP_SYNCGEN_ENABLE_ADDR 0x00090000
+#define _REG_GP_SYNCGEN_FREE_RUNNING_ADDR 0x00090004
+#define _REG_GP_SYNCGEN_PAUSE_ADDR 0x00090008
+#define _REG_GP_NR_FRAMES_ADDR 0x0009000C
+#define _REG_GP_SYNGEN_NR_PIX_ADDR 0x00090010
+#define _REG_GP_SYNGEN_NR_LINES_ADDR 0x00090014
+#define _REG_GP_SYNGEN_HBLANK_CYCLES_ADDR 0x00090018
+#define _REG_GP_SYNGEN_VBLANK_CYCLES_ADDR 0x0009001C
+#define _REG_GP_ISEL_SOF_ADDR 0x00090020
+#define _REG_GP_ISEL_EOF_ADDR 0x00090024
+#define _REG_GP_ISEL_SOL_ADDR 0x00090028
+#define _REG_GP_ISEL_EOL_ADDR 0x0009002C
+#define _REG_GP_ISEL_LFSR_ENABLE_ADDR 0x00090030
+#define _REG_GP_ISEL_LFSR_ENABLE_B_ADDR 0x00090034
+#define _REG_GP_ISEL_LFSR_RESET_VALUE_ADDR 0x00090038
+#define _REG_GP_ISEL_TPG_ENABLE_ADDR 0x0009003C
+#define _REG_GP_ISEL_TPG_ENABLE_B_ADDR 0x00090040
+#define _REG_GP_ISEL_HOR_CNT_MASK_ADDR 0x00090044
+#define _REG_GP_ISEL_VER_CNT_MASK_ADDR 0x00090048
+#define _REG_GP_ISEL_XY_CNT_MASK_ADDR 0x0009004C
+#define _REG_GP_ISEL_HOR_CNT_DELTA_ADDR 0x00090050
+#define _REG_GP_ISEL_VER_CNT_DELTA_ADDR 0x00090054
+#define _REG_GP_ISEL_TPG_MODE_ADDR 0x00090058
+#define _REG_GP_ISEL_TPG_RED1_ADDR 0x0009005C
+#define _REG_GP_ISEL_TPG_GREEN1_ADDR 0x00090060
+#define _REG_GP_ISEL_TPG_BLUE1_ADDR 0x00090064
+#define _REG_GP_ISEL_TPG_RED2_ADDR 0x00090068
+#define _REG_GP_ISEL_TPG_GREEN2_ADDR 0x0009006C
+#define _REG_GP_ISEL_TPG_BLUE2_ADDR 0x00090070
+#define _REG_GP_ISEL_CH_ID_ADDR 0x00090074
+#define _REG_GP_ISEL_FMT_TYPE_ADDR 0x00090078
+#define _REG_GP_ISEL_DATA_SEL_ADDR 0x0009007C
+#define _REG_GP_ISEL_SBAND_SEL_ADDR 0x00090080
+#define _REG_GP_ISEL_SYNC_SEL_ADDR 0x00090084
+#define _REG_GP_SYNCGEN_HOR_CNT_ADDR 0x00090088
+#define _REG_GP_SYNCGEN_VER_CNT_ADDR 0x0009008C
+#define _REG_GP_SYNCGEN_FRAME_CNT_ADDR 0x00090090
+#define _REG_GP_SOFT_RESET_ADDR 0x00090094
+
+#endif /* __GP_DEVICE_GLOBAL_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_common/gp_timer_global.h b/drivers/staging/media/atomisp/pci/hive_isp_css_common/gp_timer_global.h
new file mode 100644
index 000000000000..1bc698530e4c
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_common/gp_timer_global.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __GP_TIMER_GLOBAL_H_INCLUDED__
+#define __GP_TIMER_GLOBAL_H_INCLUDED__
+
+#include "hive_isp_css_defs.h" /*HIVE_GP_TIMER_SP_DMEM_ERROR_IRQ */
+
+/* from gp_timer_defs.h*/
+#define GP_TIMER_COUNT_TYPE_HIGH 0
+#define GP_TIMER_COUNT_TYPE_LOW 1
+#define GP_TIMER_COUNT_TYPE_POSEDGE 2
+#define GP_TIMER_COUNT_TYPE_NEGEDGE 3
+#define GP_TIMER_COUNT_TYPE_TYPES 4
+
+/* timer - 3 is selected */
+#define GP_TIMER_SEL 3
+
+/*HIVE_GP_TIMER_SP_DMEM_ERROR_IRQ is selected*/
+#define GP_TIMER_SIGNAL_SELECT HIVE_GP_TIMER_SP_DMEM_ERROR_IRQ
+
+#endif /* __GP_TIMER_GLOBAL_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_common/gpio_global.h b/drivers/staging/media/atomisp/pci/hive_isp_css_common/gpio_global.h
new file mode 100644
index 000000000000..a756f175362d
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_common/gpio_global.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __GPIO_GLOBAL_H_INCLUDED__
+#define __GPIO_GLOBAL_H_INCLUDED__
+
+#include <gpio_block_defs.h>
+
+#define HIVE_GPIO_STROBE_TRIGGER_PIN 2
+
+#endif /* __GPIO_GLOBAL_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_common/hmem_global.h b/drivers/staging/media/atomisp/pci/hive_isp_css_common/hmem_global.h
new file mode 100644
index 000000000000..0cbd06b50bba
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_common/hmem_global.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __HMEM_GLOBAL_H_INCLUDED__
+#define __HMEM_GLOBAL_H_INCLUDED__
+
+#include <type_support.h>
+
+#define IS_HMEM_VERSION_1
+
+#include "isp.h"
+
+/*
+#define ISP_HIST_ADDRESS_BITS 12
+#define ISP_HIST_ALIGNMENT 4
+#define ISP_HIST_COMP_IN_PREC 12
+#define ISP_HIST_DEPTH 1024
+#define ISP_HIST_WIDTH 24
+#define ISP_HIST_COMPONENTS 4
+*/
+#define ISP_HIST_ALIGNMENT_LOG2 2
+
+#define HMEM_SIZE_LOG2 (ISP_HIST_ADDRESS_BITS - ISP_HIST_ALIGNMENT_LOG2)
+#define HMEM_SIZE ISP_HIST_DEPTH
+
+#define HMEM_UNIT_SIZE (HMEM_SIZE / ISP_HIST_COMPONENTS)
+#define HMEM_UNIT_COUNT ISP_HIST_COMPONENTS
+
+#define HMEM_RANGE_LOG2 ISP_HIST_WIDTH
+#define HMEM_RANGE BIT(HMEM_RANGE_LOG2)
+
+typedef u32 hmem_data_t;
+
+#endif /* __HMEM_GLOBAL_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/debug.c b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/debug.c
new file mode 100644
index 000000000000..8513e78856b2
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/debug.c
@@ -0,0 +1,63 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010-2016, Intel Corporation.
+ */
+
+#include "debug.h"
+
+#include "hmm.h"
+
+#ifndef __INLINE_DEBUG__
+#include "debug_private.h"
+#endif /* __INLINE_DEBUG__ */
+
+#define __INLINE_SP__
+#include "sp.h"
+
+#include "assert_support.h"
+
+/* The address of the remote copy */
+hrt_address debug_buffer_address = (hrt_address) - 1;
+ia_css_ptr debug_buffer_ddr_address = (ia_css_ptr)-1;
+/* The local copy */
+static debug_data_t debug_data;
+debug_data_t *debug_data_ptr = &debug_data;
+
+void debug_buffer_init(const hrt_address addr)
+{
+ debug_buffer_address = addr;
+
+ debug_data.head = 0;
+ debug_data.tail = 0;
+}
+
+void debug_buffer_ddr_init(const ia_css_ptr addr)
+{
+ debug_buf_mode_t mode = DEBUG_BUFFER_MODE_LINEAR;
+ u32 enable = 1;
+ u32 head = 0;
+ u32 tail = 0;
+ /* set the ddr queue */
+ debug_buffer_ddr_address = addr;
+ hmm_store(addr + DEBUG_DATA_BUF_MODE_DDR_ADDR,
+ &mode, sizeof(debug_buf_mode_t));
+ hmm_store(addr + DEBUG_DATA_HEAD_DDR_ADDR,
+ &head, sizeof(uint32_t));
+ hmm_store(addr + DEBUG_DATA_TAIL_DDR_ADDR,
+ &tail, sizeof(uint32_t));
+ hmm_store(addr + DEBUG_DATA_ENABLE_DDR_ADDR,
+ &enable, sizeof(uint32_t));
+
+ /* set the local copy */
+ debug_data.head = 0;
+ debug_data.tail = 0;
+}
+
+void debug_buffer_setmode(const debug_buf_mode_t mode)
+{
+ assert(debug_buffer_address != ((hrt_address)-1));
+
+ sp_dmem_store_uint32(SP0_ID,
+ debug_buffer_address + DEBUG_DATA_BUF_MODE_ADDR, mode);
+}
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/debug_local.h b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/debug_local.h
new file mode 100644
index 000000000000..f80becfb7cab
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/debug_local.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010-2015, Intel Corporation.
+ */
+
+#ifndef __DEBUG_LOCAL_H_INCLUDED__
+#define __DEBUG_LOCAL_H_INCLUDED__
+
+#include "debug_global.h"
+
+#endif /* __DEBUG_LOCAL_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/debug_private.h b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/debug_private.h
new file mode 100644
index 000000000000..568f9bf92ad8
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/debug_private.h
@@ -0,0 +1,116 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010-2015, Intel Corporation.
+ */
+
+#ifndef __DEBUG_PRIVATE_H_INCLUDED__
+#define __DEBUG_PRIVATE_H_INCLUDED__
+
+#include "debug_public.h"
+
+#include "sp.h"
+
+#define __INLINE_ISP__
+#include "isp.h"
+
+#include "assert_support.h"
+
+STORAGE_CLASS_DEBUG_C bool is_debug_buffer_empty(void)
+{
+ return (debug_data_ptr->head == debug_data_ptr->tail);
+}
+
+STORAGE_CLASS_DEBUG_C hrt_data debug_dequeue(void)
+{
+ hrt_data value = 0;
+
+ assert(debug_buffer_address != ((hrt_address) - 1));
+
+ debug_synch_queue();
+
+ if (!is_debug_buffer_empty()) {
+ value = debug_data_ptr->buf[debug_data_ptr->head];
+ debug_data_ptr->head = (debug_data_ptr->head + 1) & DEBUG_BUF_MASK;
+ sp_dmem_store_uint32(SP0_ID, debug_buffer_address + DEBUG_DATA_HEAD_ADDR,
+ debug_data_ptr->head);
+ }
+
+ return value;
+}
+
+STORAGE_CLASS_DEBUG_C void debug_synch_queue(void)
+{
+ u32 remote_tail = sp_dmem_load_uint32(SP0_ID,
+ debug_buffer_address + DEBUG_DATA_TAIL_ADDR);
+ /* We could move the remote head after the upload, but we would have to limit the upload w.r.t. the local head. This is easier */
+ if (remote_tail > debug_data_ptr->tail) {
+ size_t delta = remote_tail - debug_data_ptr->tail;
+
+ sp_dmem_load(SP0_ID, debug_buffer_address + DEBUG_DATA_BUF_ADDR +
+ debug_data_ptr->tail * sizeof(uint32_t),
+ (void *)&debug_data_ptr->buf[debug_data_ptr->tail], delta * sizeof(uint32_t));
+ } else if (remote_tail < debug_data_ptr->tail) {
+ size_t delta = DEBUG_BUF_SIZE - debug_data_ptr->tail;
+
+ sp_dmem_load(SP0_ID, debug_buffer_address + DEBUG_DATA_BUF_ADDR +
+ debug_data_ptr->tail * sizeof(uint32_t),
+ (void *)&debug_data_ptr->buf[debug_data_ptr->tail], delta * sizeof(uint32_t));
+ sp_dmem_load(SP0_ID, debug_buffer_address + DEBUG_DATA_BUF_ADDR,
+ (void *)&debug_data_ptr->buf[0],
+ remote_tail * sizeof(uint32_t));
+ } /* else we are up to date */
+ debug_data_ptr->tail = remote_tail;
+}
+
+STORAGE_CLASS_DEBUG_C void debug_synch_queue_isp(void)
+{
+ u32 remote_tail = isp_dmem_load_uint32(ISP0_ID,
+ DEBUG_BUFFER_ISP_DMEM_ADDR + DEBUG_DATA_TAIL_ADDR);
+ /* We could move the remote head after the upload, but we would have to limit the upload w.r.t. the local head. This is easier */
+ if (remote_tail > debug_data_ptr->tail) {
+ size_t delta = remote_tail - debug_data_ptr->tail;
+
+ isp_dmem_load(ISP0_ID, DEBUG_BUFFER_ISP_DMEM_ADDR + DEBUG_DATA_BUF_ADDR +
+ debug_data_ptr->tail * sizeof(uint32_t),
+ (void *)&debug_data_ptr->buf[debug_data_ptr->tail], delta * sizeof(uint32_t));
+ } else if (remote_tail < debug_data_ptr->tail) {
+ size_t delta = DEBUG_BUF_SIZE - debug_data_ptr->tail;
+
+ isp_dmem_load(ISP0_ID, DEBUG_BUFFER_ISP_DMEM_ADDR + DEBUG_DATA_BUF_ADDR +
+ debug_data_ptr->tail * sizeof(uint32_t),
+ (void *)&debug_data_ptr->buf[debug_data_ptr->tail], delta * sizeof(uint32_t));
+ isp_dmem_load(ISP0_ID, DEBUG_BUFFER_ISP_DMEM_ADDR + DEBUG_DATA_BUF_ADDR,
+ (void *)&debug_data_ptr->buf[0],
+ remote_tail * sizeof(uint32_t));
+ } /* else we are up to date */
+ debug_data_ptr->tail = remote_tail;
+}
+
+STORAGE_CLASS_DEBUG_C void debug_synch_queue_ddr(void)
+{
+ u32 remote_tail;
+
+ hmm_load(debug_buffer_ddr_address + DEBUG_DATA_TAIL_DDR_ADDR, &remote_tail,
+ sizeof(uint32_t));
+ /* We could move the remote head after the upload, but we would have to limit the upload w.r.t. the local head. This is easier */
+ if (remote_tail > debug_data_ptr->tail) {
+ size_t delta = remote_tail - debug_data_ptr->tail;
+
+ hmm_load(debug_buffer_ddr_address + DEBUG_DATA_BUF_DDR_ADDR +
+ debug_data_ptr->tail * sizeof(uint32_t),
+ (void *)&debug_data_ptr->buf[debug_data_ptr->tail], delta * sizeof(uint32_t));
+ } else if (remote_tail < debug_data_ptr->tail) {
+ size_t delta = DEBUG_BUF_SIZE - debug_data_ptr->tail;
+
+ hmm_load(debug_buffer_ddr_address + DEBUG_DATA_BUF_DDR_ADDR +
+ debug_data_ptr->tail * sizeof(uint32_t),
+ (void *)&debug_data_ptr->buf[debug_data_ptr->tail], delta * sizeof(uint32_t));
+ hmm_load(debug_buffer_ddr_address + DEBUG_DATA_BUF_DDR_ADDR,
+ (void *)&debug_data_ptr->buf[0],
+ remote_tail * sizeof(uint32_t));
+ } /* else we are up to date */
+ debug_data_ptr->tail = remote_tail;
+}
+
+#endif /* __DEBUG_PRIVATE_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/dma.c b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/dma.c
new file mode 100644
index 000000000000..f7a8cb38d068
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/dma.c
@@ -0,0 +1,25 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010-2016, Intel Corporation.
+ */
+
+#include <linux/kernel.h>
+
+#include "dma.h"
+
+#include "assert_support.h"
+
+#ifndef __INLINE_DMA__
+#include "dma_private.h"
+#endif /* __INLINE_DMA__ */
+
+void
+dma_set_max_burst_size(const dma_ID_t ID, dma_connection conn,
+ uint32_t max_burst_size)
+{
+ assert(ID < N_DMA_ID);
+ assert(max_burst_size > 0);
+ dma_reg_store(ID, DMA_DEV_INFO_REG_IDX(_DMA_DEV_INTERF_MAX_BURST_IDX, conn),
+ max_burst_size - 1);
+}
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/dma_local.h b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/dma_local.h
new file mode 100644
index 000000000000..7e51dea39b1a
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/dma_local.h
@@ -0,0 +1,82 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010-2015, Intel Corporation.
+ */
+
+#ifndef __DMA_LOCAL_H_INCLUDED__
+#define __DMA_LOCAL_H_INCLUDED__
+
+#include <type_support.h>
+#include "dma_global.h"
+
+#include <bits.h> /* _hrt_get_bits() */
+#include <hive_isp_css_defs.h> /* HIVE_DMA_NUM_CHANNELS */
+#include <dma_v2_defs.h>
+
+#define _DMA_FSM_GROUP_CMD_IDX _DMA_V2_FSM_GROUP_CMD_IDX
+#define _DMA_FSM_GROUP_ADDR_A_IDX _DMA_V2_FSM_GROUP_ADDR_SRC_IDX
+#define _DMA_FSM_GROUP_ADDR_B_IDX _DMA_V2_FSM_GROUP_ADDR_DEST_IDX
+
+#define _DMA_FSM_GROUP_CMD_CTRL_IDX _DMA_V2_FSM_GROUP_CMD_CTRL_IDX
+
+#define _DMA_FSM_GROUP_FSM_CTRL_IDX _DMA_V2_FSM_GROUP_FSM_CTRL_IDX
+#define _DMA_FSM_GROUP_FSM_CTRL_STATE_IDX _DMA_V2_FSM_GROUP_FSM_CTRL_STATE_IDX
+#define _DMA_FSM_GROUP_FSM_CTRL_REQ_DEV_IDX _DMA_V2_FSM_GROUP_FSM_CTRL_REQ_DEV_IDX
+#define _DMA_FSM_GROUP_FSM_CTRL_REQ_ADDR_IDX _DMA_V2_FSM_GROUP_FSM_CTRL_REQ_ADDR_IDX
+#define _DMA_FSM_GROUP_FSM_CTRL_REQ_STRIDE_IDX _DMA_V2_FSM_GROUP_FSM_CTRL_REQ_STRIDE_IDX
+#define _DMA_FSM_GROUP_FSM_CTRL_REQ_XB_IDX _DMA_V2_FSM_GROUP_FSM_CTRL_REQ_XB_IDX
+#define _DMA_FSM_GROUP_FSM_CTRL_REQ_YB_IDX _DMA_V2_FSM_GROUP_FSM_CTRL_REQ_YB_IDX
+#define _DMA_FSM_GROUP_FSM_CTRL_PACK_REQ_DEV_IDX _DMA_V2_FSM_GROUP_FSM_CTRL_PACK_REQ_DEV_IDX
+#define _DMA_FSM_GROUP_FSM_CTRL_PACK_WR_DEV_IDX _DMA_V2_FSM_GROUP_FSM_CTRL_PACK_WR_DEV_IDX
+#define _DMA_FSM_GROUP_FSM_CTRL_WR_ADDR_IDX _DMA_V2_FSM_GROUP_FSM_CTRL_WR_ADDR_IDX
+#define _DMA_FSM_GROUP_FSM_CTRL_WR_STRIDE_IDX _DMA_V2_FSM_GROUP_FSM_CTRL_WR_STRIDE_IDX
+#define _DMA_FSM_GROUP_FSM_CTRL_PACK_REQ_XB_IDX _DMA_V2_FSM_GROUP_FSM_CTRL_PACK_REQ_XB_IDX
+#define _DMA_FSM_GROUP_FSM_CTRL_PACK_WR_YB_IDX _DMA_V2_FSM_GROUP_FSM_CTRL_PACK_WR_YB_IDX
+#define _DMA_FSM_GROUP_FSM_CTRL_PACK_WR_XB_IDX _DMA_V2_FSM_GROUP_FSM_CTRL_PACK_WR_XB_IDX
+#define _DMA_FSM_GROUP_FSM_CTRL_PACK_ELEM_REQ_IDX _DMA_V2_FSM_GROUP_FSM_CTRL_PACK_ELEM_REQ_IDX
+#define _DMA_FSM_GROUP_FSM_CTRL_PACK_ELEM_WR_IDX _DMA_V2_FSM_GROUP_FSM_CTRL_PACK_ELEM_WR_IDX
+#define _DMA_FSM_GROUP_FSM_CTRL_PACK_S_Z_IDX _DMA_V2_FSM_GROUP_FSM_CTRL_PACK_S_Z_IDX
+
+#define _DMA_FSM_GROUP_FSM_PACK_IDX _DMA_V2_FSM_GROUP_FSM_PACK_IDX
+#define _DMA_FSM_GROUP_FSM_PACK_STATE_IDX _DMA_V2_FSM_GROUP_FSM_PACK_STATE_IDX
+#define _DMA_FSM_GROUP_FSM_PACK_CNT_YB_IDX _DMA_V2_FSM_GROUP_FSM_PACK_CNT_YB_IDX
+#define _DMA_FSM_GROUP_FSM_PACK_CNT_XB_REQ_IDX _DMA_V2_FSM_GROUP_FSM_PACK_CNT_XB_REQ_IDX
+#define _DMA_FSM_GROUP_FSM_PACK_CNT_XB_WR_IDX _DMA_V2_FSM_GROUP_FSM_PACK_CNT_XB_WR_IDX
+
+#define _DMA_FSM_GROUP_FSM_REQ_IDX _DMA_V2_FSM_GROUP_FSM_REQ_IDX
+#define _DMA_FSM_GROUP_FSM_REQ_STATE_IDX _DMA_V2_FSM_GROUP_FSM_REQ_STATE_IDX
+#define _DMA_FSM_GROUP_FSM_REQ_CNT_YB_IDX _DMA_V2_FSM_GROUP_FSM_REQ_CNT_YB_IDX
+#define _DMA_FSM_GROUP_FSM_REQ_CNT_XB_IDX _DMA_V2_FSM_GROUP_FSM_REQ_CNT_XB_IDX
+
+#define _DMA_FSM_GROUP_FSM_WR_IDX _DMA_V2_FSM_GROUP_FSM_WR_IDX
+#define _DMA_FSM_GROUP_FSM_WR_STATE_IDX _DMA_V2_FSM_GROUP_FSM_WR_STATE_IDX
+#define _DMA_FSM_GROUP_FSM_WR_CNT_YB_IDX _DMA_V2_FSM_GROUP_FSM_WR_CNT_YB_IDX
+#define _DMA_FSM_GROUP_FSM_WR_CNT_XB_IDX _DMA_V2_FSM_GROUP_FSM_WR_CNT_XB_IDX
+
+#define _DMA_DEV_INTERF_MAX_BURST_IDX _DMA_V2_DEV_INTERF_MAX_BURST_IDX
+
+/*
+ * Macro's to compute the DMA parameter register indices
+ */
+#define DMA_SEL_COMP(comp) (((comp) & _hrt_ones(_DMA_V2_ADDR_SEL_COMP_BITS)) << _DMA_V2_ADDR_SEL_COMP_IDX)
+#define DMA_SEL_CH(ch) (((ch) & _hrt_ones(_DMA_V2_ADDR_SEL_CH_REG_BITS)) << _DMA_V2_ADDR_SEL_CH_REG_IDX)
+#define DMA_SEL_PARAM(param) (((param) & _hrt_ones(_DMA_V2_ADDR_SEL_PARAM_BITS)) << _DMA_V2_ADDR_SEL_PARAM_IDX)
+/* CG = Connection Group */
+#define DMA_SEL_CG_INFO(info) (((info) & _hrt_ones(_DMA_V2_ADDR_SEL_GROUP_COMP_INFO_BITS)) << _DMA_V2_ADDR_SEL_GROUP_COMP_INFO_IDX)
+#define DMA_SEL_CG_COMP(comp) (((comp) & _hrt_ones(_DMA_V2_ADDR_SEL_GROUP_COMP_BITS)) << _DMA_V2_ADDR_SEL_GROUP_COMP_IDX)
+#define DMA_SEL_DEV_INFO(info) (((info) & _hrt_ones(_DMA_V2_ADDR_SEL_DEV_INTERF_INFO_BITS)) << _DMA_V2_ADDR_SEL_DEV_INTERF_INFO_IDX)
+#define DMA_SEL_DEV_ID(dev) (((dev) & _hrt_ones(_DMA_V2_ADDR_SEL_DEV_INTERF_IDX_BITS)) << _DMA_V2_ADDR_SEL_DEV_INTERF_IDX_IDX)
+
+#define DMA_COMMAND_FSM_REG_IDX (DMA_SEL_COMP(_DMA_V2_SEL_FSM_CMD) >> 2)
+#define DMA_CHANNEL_PARAM_REG_IDX(ch, param) ((DMA_SEL_COMP(_DMA_V2_SEL_CH_REG) | DMA_SEL_CH(ch) | DMA_SEL_PARAM(param)) >> 2)
+#define DMA_CG_INFO_REG_IDX(info_id, comp_id) ((DMA_SEL_COMP(_DMA_V2_SEL_CONN_GROUP) | DMA_SEL_CG_INFO(info_id) | DMA_SEL_CG_COMP(comp_id)) >> 2)
+#define DMA_DEV_INFO_REG_IDX(info_id, dev_id) ((DMA_SEL_COMP(_DMA_V2_SEL_DEV_INTERF) | DMA_SEL_DEV_INFO(info_id) | DMA_SEL_DEV_ID(dev_id)) >> 2)
+#define DMA_RST_REG_IDX (DMA_SEL_COMP(_DMA_V2_SEL_RESET) >> 2)
+
+#define DMA_GET_CONNECTION(val) _hrt_get_bits(val, _DMA_V2_CONNECTION_IDX, _DMA_V2_CONNECTION_BITS)
+#define DMA_GET_EXTENSION(val) _hrt_get_bits(val, _DMA_V2_EXTENSION_IDX, _DMA_V2_EXTENSION_BITS)
+#define DMA_GET_ELEMENTS(val) _hrt_get_bits(val, _DMA_V2_ELEMENTS_IDX, _DMA_V2_ELEMENTS_BITS)
+#define DMA_GET_CROPPING(val) _hrt_get_bits(val, _DMA_V2_LEFT_CROPPING_IDX, _DMA_V2_LEFT_CROPPING_BITS)
+
+#endif /* __DMA_LOCAL_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/dma_private.h b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/dma_private.h
new file mode 100644
index 000000000000..02553f2d9375
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/dma_private.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010-2015, Intel Corporation.
+ */
+
+#ifndef __DMA_PRIVATE_H_INCLUDED__
+#define __DMA_PRIVATE_H_INCLUDED__
+
+#include "dma_public.h"
+
+#include "device_access.h"
+
+#include "assert_support.h"
+
+STORAGE_CLASS_DMA_C void dma_reg_store(const dma_ID_t ID,
+ const unsigned int reg,
+ const hrt_data value)
+{
+ assert(ID < N_DMA_ID);
+ assert(DMA_BASE[ID] != (hrt_address) - 1);
+ ia_css_device_store_uint32(DMA_BASE[ID] + reg * sizeof(hrt_data), value);
+}
+
+STORAGE_CLASS_DMA_C hrt_data dma_reg_load(const dma_ID_t ID,
+ const unsigned int reg)
+{
+ assert(ID < N_DMA_ID);
+ assert(DMA_BASE[ID] != (hrt_address) - 1);
+ return ia_css_device_load_uint32(DMA_BASE[ID] + reg * sizeof(hrt_data));
+}
+
+#endif /* __DMA_PRIVATE_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/event_fifo.c b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/event_fifo.c
new file mode 100644
index 000000000000..8f61d9054e55
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/event_fifo.c
@@ -0,0 +1,11 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010-2015, Intel Corporation.
+ */
+
+#include "event_fifo.h"
+
+#ifndef __INLINE_EVENT__
+#include "event_fifo_private.h"
+#endif /* __INLINE_EVENT__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/event_fifo_local.h b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/event_fifo_local.h
new file mode 100644
index 000000000000..ce1916637a92
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/event_fifo_local.h
@@ -0,0 +1,53 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010-2015, Intel Corporation.
+ */
+
+#ifndef _EVENT_FIFO_LOCAL_H
+#define _EVENT_FIFO_LOCAL_H
+
+/*
+ * All events come from connections mapped on the system
+ * bus but do not use a global IRQ
+ */
+#include "event_fifo_global.h"
+
+typedef enum {
+ SP0_EVENT_ID,
+ ISP0_EVENT_ID,
+ STR2MIPI_EVENT_ID,
+ N_EVENT_ID
+} event_ID_t;
+
+#define EVENT_QUERY_BIT 0
+
+/* Events are read from FIFO */
+static const hrt_address event_source_addr[N_EVENT_ID] = {
+ 0x0000000000380000ULL,
+ 0x0000000000380004ULL,
+ 0xffffffffffffffffULL
+};
+
+/* Read from FIFO are blocking, query data availability */
+static const hrt_address event_source_query_addr[N_EVENT_ID] = {
+ 0x0000000000380010ULL,
+ 0x0000000000380014ULL,
+ 0xffffffffffffffffULL
+};
+
+/* Events are written to FIFO */
+static const hrt_address event_sink_addr[N_EVENT_ID] = {
+ 0x0000000000380008ULL,
+ 0x000000000038000CULL,
+ 0x0000000000090104ULL
+};
+
+/* Writes to FIFO are blocking, query data space */
+static const hrt_address event_sink_query_addr[N_EVENT_ID] = {
+ 0x0000000000380018ULL,
+ 0x000000000038001CULL,
+ 0x000000000009010CULL
+};
+
+#endif /* _EVENT_FIFO_LOCAL_H */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/event_fifo_private.h b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/event_fifo_private.h
new file mode 100644
index 000000000000..439c69444942
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/event_fifo_private.h
@@ -0,0 +1,69 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __EVENT_FIFO_PRIVATE_H
+#define __EVENT_FIFO_PRIVATE_H
+
+#include "event_fifo_public.h"
+
+#include "device_access.h"
+
+#include "assert_support.h"
+
+#include <bits.h> /* _hrt_get_bits() */
+
+STORAGE_CLASS_EVENT_C void event_wait_for(const event_ID_t ID)
+{
+ assert(ID < N_EVENT_ID);
+ assert(event_source_addr[ID] != ((hrt_address) - 1));
+ (void)ia_css_device_load_uint32(event_source_addr[ID]);
+ return;
+}
+
+STORAGE_CLASS_EVENT_C void cnd_event_wait_for(const event_ID_t ID,
+ const bool cnd)
+{
+ if (cnd) {
+ event_wait_for(ID);
+ }
+}
+
+STORAGE_CLASS_EVENT_C hrt_data event_receive_token(const event_ID_t ID)
+{
+ assert(ID < N_EVENT_ID);
+ assert(event_source_addr[ID] != ((hrt_address) - 1));
+ return ia_css_device_load_uint32(event_source_addr[ID]);
+}
+
+STORAGE_CLASS_EVENT_C void event_send_token(const event_ID_t ID,
+ const hrt_data token)
+{
+ assert(ID < N_EVENT_ID);
+ assert(event_sink_addr[ID] != ((hrt_address) - 1));
+ ia_css_device_store_uint32(event_sink_addr[ID], token);
+}
+
+STORAGE_CLASS_EVENT_C bool is_event_pending(const event_ID_t ID)
+{
+ hrt_data value;
+
+ assert(ID < N_EVENT_ID);
+ assert(event_source_query_addr[ID] != ((hrt_address) - 1));
+ value = ia_css_device_load_uint32(event_source_query_addr[ID]);
+ return !_hrt_get_bit(value, EVENT_QUERY_BIT);
+}
+
+STORAGE_CLASS_EVENT_C bool can_event_send_token(const event_ID_t ID)
+{
+ hrt_data value;
+
+ assert(ID < N_EVENT_ID);
+ assert(event_sink_query_addr[ID] != ((hrt_address) - 1));
+ value = ia_css_device_load_uint32(event_sink_query_addr[ID]);
+ return !_hrt_get_bit(value, EVENT_QUERY_BIT);
+}
+
+#endif /* __EVENT_FIFO_PRIVATE_H */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/fifo_monitor.c b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/fifo_monitor.c
new file mode 100644
index 000000000000..f0de78815456
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/fifo_monitor.c
@@ -0,0 +1,561 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010-2015, Intel Corporation.
+ */
+
+#include "fifo_monitor.h"
+
+#include <type_support.h>
+#include "device_access.h"
+
+#include <bits.h>
+
+#include "gp_device.h"
+
+#include "assert_support.h"
+
+#ifndef __INLINE_FIFO_MONITOR__
+#define STORAGE_CLASS_FIFO_MONITOR_DATA static const
+#else
+#define STORAGE_CLASS_FIFO_MONITOR_DATA const
+#endif /* __INLINE_FIFO_MONITOR__ */
+
+STORAGE_CLASS_FIFO_MONITOR_DATA unsigned int FIFO_SWITCH_ADDR[N_FIFO_SWITCH] = {
+ _REG_GP_SWITCH_IF_ADDR,
+ _REG_GP_SWITCH_GDC1_ADDR,
+ _REG_GP_SWITCH_GDC2_ADDR
+};
+
+#ifndef __INLINE_FIFO_MONITOR__
+#include "fifo_monitor_private.h"
+#endif /* __INLINE_FIFO_MONITOR__ */
+
+static inline bool fifo_monitor_status_valid(
+ const fifo_monitor_ID_t ID,
+ const unsigned int reg,
+ const unsigned int port_id);
+
+static inline bool fifo_monitor_status_accept(
+ const fifo_monitor_ID_t ID,
+ const unsigned int reg,
+ const unsigned int port_id);
+
+void fifo_channel_get_state(
+ const fifo_monitor_ID_t ID,
+ const fifo_channel_t channel_id,
+ fifo_channel_state_t *state)
+{
+ assert(channel_id < N_FIFO_CHANNEL);
+ assert(state);
+
+ switch (channel_id) {
+ case FIFO_CHANNEL_ISP0_TO_SP0:
+ state->src_valid = fifo_monitor_status_valid(ID,
+ HIVE_GP_REGS_ISP_STREAM_STAT_IDX,
+ ISP_STR_MON_PORT_SND_SP); /* ISP_STR_MON_PORT_ISP2SP */
+ state->fifo_accept = fifo_monitor_status_accept(ID,
+ HIVE_GP_REGS_ISP_STREAM_STAT_IDX,
+ ISP_STR_MON_PORT_SND_SP);
+ state->fifo_valid = fifo_monitor_status_valid(ID,
+ HIVE_GP_REGS_SP_STREAM_STAT_IDX,
+ SP_STR_MON_PORT_RCV_ISP); /* ISP_STR_MON_PORT_SP2ISP */
+ state->sink_accept = fifo_monitor_status_accept(ID,
+ HIVE_GP_REGS_SP_STREAM_STAT_IDX,
+ SP_STR_MON_PORT_RCV_ISP);
+ break;
+ case FIFO_CHANNEL_SP0_TO_ISP0:
+ state->src_valid = fifo_monitor_status_valid(ID,
+ HIVE_GP_REGS_SP_STREAM_STAT_IDX,
+ SP_STR_MON_PORT_SND_ISP); /* ISP_STR_MON_PORT_SP2ISP */
+ state->fifo_accept = fifo_monitor_status_accept(ID,
+ HIVE_GP_REGS_SP_STREAM_STAT_IDX,
+ SP_STR_MON_PORT_SND_ISP);
+ state->fifo_valid = fifo_monitor_status_valid(ID,
+ HIVE_GP_REGS_ISP_STREAM_STAT_IDX,
+ ISP_STR_MON_PORT_RCV_SP); /* ISP_STR_MON_PORT_ISP2SP */
+ state->sink_accept = fifo_monitor_status_accept(ID,
+ HIVE_GP_REGS_ISP_STREAM_STAT_IDX,
+ ISP_STR_MON_PORT_RCV_SP);
+ break;
+ case FIFO_CHANNEL_ISP0_TO_IF0:
+ state->src_valid = fifo_monitor_status_valid(ID,
+ HIVE_GP_REGS_ISP_STREAM_STAT_IDX,
+ ISP_STR_MON_PORT_SND_PIF_A); /* ISP_STR_MON_PORT_ISP2PIFA */
+ state->fifo_accept = fifo_monitor_status_accept(ID,
+ HIVE_GP_REGS_ISP_STREAM_STAT_IDX,
+ ISP_STR_MON_PORT_SND_PIF_A);
+ state->fifo_valid = fifo_monitor_status_valid(ID,
+ HIVE_GP_REGS_MOD_STREAM_STAT_IDX,
+ MOD_STR_MON_PORT_RCV_PIF_A); /* MOD_STR_MON_PORT_CELLS2PIFA */
+ state->sink_accept = fifo_monitor_status_accept(ID,
+ HIVE_GP_REGS_MOD_STREAM_STAT_IDX,
+ MOD_STR_MON_PORT_RCV_PIF_A);
+ break;
+ case FIFO_CHANNEL_IF0_TO_ISP0:
+ state->src_valid = fifo_monitor_status_valid(ID,
+ HIVE_GP_REGS_MOD_STREAM_STAT_IDX,
+ MOD_STR_MON_PORT_SND_PIF_A); /* MOD_STR_MON_PORT_PIFA2CELLS */
+ state->fifo_accept = fifo_monitor_status_accept(ID,
+ HIVE_GP_REGS_MOD_STREAM_STAT_IDX,
+ MOD_STR_MON_PORT_SND_PIF_A);
+ state->fifo_valid = fifo_monitor_status_valid(ID,
+ HIVE_GP_REGS_ISP_STREAM_STAT_IDX,
+ ISP_STR_MON_PORT_RCV_PIF_A); /* ISP_STR_MON_PORT_PIFA2ISP */
+ state->sink_accept = fifo_monitor_status_accept(ID,
+ HIVE_GP_REGS_ISP_STREAM_STAT_IDX,
+ ISP_STR_MON_PORT_RCV_PIF_A);
+ break;
+ case FIFO_CHANNEL_ISP0_TO_IF1:
+ state->src_valid = fifo_monitor_status_valid(ID,
+ HIVE_GP_REGS_ISP_STREAM_STAT_IDX,
+ ISP_STR_MON_PORT_SND_PIF_B); /* ISP_STR_MON_PORT_ISP2PIFA */
+ state->fifo_accept = fifo_monitor_status_accept(ID,
+ HIVE_GP_REGS_ISP_STREAM_STAT_IDX,
+ ISP_STR_MON_PORT_SND_PIF_B);
+ state->fifo_valid = fifo_monitor_status_valid(ID,
+ HIVE_GP_REGS_MOD_STREAM_STAT_IDX,
+ MOD_STR_MON_PORT_RCV_PIF_B); /* MOD_STR_MON_PORT_CELLS2PIFB */
+ state->sink_accept = fifo_monitor_status_accept(ID,
+ HIVE_GP_REGS_MOD_STREAM_STAT_IDX,
+ MOD_STR_MON_PORT_RCV_PIF_B);
+ break;
+ case FIFO_CHANNEL_IF1_TO_ISP0:
+ state->src_valid = fifo_monitor_status_valid(ID,
+ HIVE_GP_REGS_MOD_STREAM_STAT_IDX,
+ MOD_STR_MON_PORT_SND_PIF_B); /* MOD_STR_MON_PORT_PIFB2CELLS */
+ state->fifo_accept = fifo_monitor_status_accept(ID,
+ HIVE_GP_REGS_MOD_STREAM_STAT_IDX,
+ MOD_STR_MON_PORT_SND_PIF_B);
+ state->fifo_valid = fifo_monitor_status_valid(ID,
+ HIVE_GP_REGS_ISP_STREAM_STAT_IDX,
+ ISP_STR_MON_PORT_RCV_PIF_B); /* ISP_STR_MON_PORT_PIFB2ISP */
+ state->sink_accept = fifo_monitor_status_accept(ID,
+ HIVE_GP_REGS_ISP_STREAM_STAT_IDX,
+ ISP_STR_MON_PORT_RCV_PIF_B);
+ break;
+ case FIFO_CHANNEL_ISP0_TO_DMA0:
+ state->src_valid = fifo_monitor_status_valid(ID,
+ HIVE_GP_REGS_ISP_STREAM_STAT_IDX,
+ ISP_STR_MON_PORT_SND_DMA); /* ISP_STR_MON_PORT_ISP2DMA */
+ state->fifo_accept = fifo_monitor_status_accept(ID,
+ HIVE_GP_REGS_ISP_STREAM_STAT_IDX,
+ ISP_STR_MON_PORT_SND_DMA);
+ state->fifo_valid = fifo_monitor_status_valid(ID,
+ HIVE_GP_REGS_MOD_STREAM_STAT_IDX,
+ MOD_STR_MON_PORT_RCV_DMA_FR_ISP); /* MOD_STR_MON_PORT_ISP2DMA */
+ state->sink_accept = fifo_monitor_status_accept(ID,
+ HIVE_GP_REGS_MOD_STREAM_STAT_IDX,
+ MOD_STR_MON_PORT_RCV_DMA_FR_ISP);
+ break;
+ case FIFO_CHANNEL_DMA0_TO_ISP0:
+ state->src_valid = fifo_monitor_status_valid(ID,
+ HIVE_GP_REGS_MOD_STREAM_STAT_IDX,
+ MOD_STR_MON_PORT_SND_DMA2ISP); /* MOD_STR_MON_PORT_DMA2ISP */
+ state->fifo_accept = fifo_monitor_status_accept(ID,
+ HIVE_GP_REGS_MOD_STREAM_STAT_IDX,
+ MOD_STR_MON_PORT_SND_DMA2ISP);
+ state->fifo_valid = fifo_monitor_status_valid(ID,
+ HIVE_GP_REGS_ISP_STREAM_STAT_IDX,
+ ISP_STR_MON_PORT_RCV_DMA); /* ISP_STR_MON_PORT_DMA2ISP */
+ state->sink_accept = fifo_monitor_status_accept(ID,
+ HIVE_GP_REGS_ISP_STREAM_STAT_IDX,
+ ISP_STR_MON_PORT_RCV_DMA);
+ break;
+ case FIFO_CHANNEL_ISP0_TO_GDC0:
+ state->src_valid = fifo_monitor_status_valid(ID,
+ HIVE_GP_REGS_ISP_STREAM_STAT_IDX,
+ ISP_STR_MON_PORT_SND_GDC); /* ISP_STR_MON_PORT_ISP2GDC1 */
+ state->fifo_accept = fifo_monitor_status_accept(ID,
+ HIVE_GP_REGS_ISP_STREAM_STAT_IDX,
+ ISP_STR_MON_PORT_SND_GDC);
+ state->fifo_valid = fifo_monitor_status_valid(ID,
+ HIVE_GP_REGS_MOD_STREAM_STAT_IDX,
+ MOD_STR_MON_PORT_RCV_GDC); /* MOD_STR_MON_PORT_CELLS2GDC1 */
+ state->sink_accept = fifo_monitor_status_accept(ID,
+ HIVE_GP_REGS_MOD_STREAM_STAT_IDX,
+ MOD_STR_MON_PORT_RCV_GDC);
+ break;
+ case FIFO_CHANNEL_GDC0_TO_ISP0:
+ state->fifo_valid = fifo_monitor_status_valid(ID,
+ HIVE_GP_REGS_MOD_STREAM_STAT_IDX,
+ MOD_STR_MON_PORT_SND_GDC); /* MOD_STR_MON_PORT_GDC12CELLS */
+ state->sink_accept = fifo_monitor_status_accept(ID,
+ HIVE_GP_REGS_MOD_STREAM_STAT_IDX,
+ MOD_STR_MON_PORT_SND_GDC);
+ state->src_valid = fifo_monitor_status_valid(ID,
+ HIVE_GP_REGS_ISP_STREAM_STAT_IDX,
+ ISP_STR_MON_PORT_RCV_GDC); /* ISP_STR_MON_PORT_GDC12ISP */
+ state->fifo_accept = fifo_monitor_status_accept(ID,
+ HIVE_GP_REGS_ISP_STREAM_STAT_IDX,
+ ISP_STR_MON_PORT_RCV_GDC);
+ break;
+ case FIFO_CHANNEL_ISP0_TO_GDC1:
+ state->src_valid = fifo_monitor_status_valid(ID,
+ HIVE_GP_REGS_ISP_STREAM_STAT_IDX,
+ ISP_STR_MON_PORT_ISP2GDC2);
+ state->fifo_accept = fifo_monitor_status_accept(ID,
+ HIVE_GP_REGS_ISP_STREAM_STAT_IDX,
+ ISP_STR_MON_PORT_ISP2GDC2);
+ state->fifo_valid = fifo_monitor_status_valid(ID,
+ HIVE_GP_REGS_MOD_STREAM_STAT_IDX,
+ MOD_STR_MON_PORT_CELLS2GDC2);
+ state->sink_accept = fifo_monitor_status_accept(ID,
+ HIVE_GP_REGS_MOD_STREAM_STAT_IDX,
+ MOD_STR_MON_PORT_CELLS2GDC2);
+ break;
+ case FIFO_CHANNEL_GDC1_TO_ISP0:
+ state->fifo_valid = fifo_monitor_status_valid(ID,
+ HIVE_GP_REGS_MOD_STREAM_STAT_IDX,
+ MOD_STR_MON_PORT_GDC22CELLS);
+ state->sink_accept = fifo_monitor_status_accept(ID,
+ HIVE_GP_REGS_MOD_STREAM_STAT_IDX,
+ MOD_STR_MON_PORT_GDC22CELLS);
+ state->src_valid = fifo_monitor_status_valid(ID,
+ HIVE_GP_REGS_ISP_STREAM_STAT_IDX,
+ ISP_STR_MON_PORT_GDC22ISP);
+ state->fifo_accept = fifo_monitor_status_accept(ID,
+ HIVE_GP_REGS_ISP_STREAM_STAT_IDX,
+ ISP_STR_MON_PORT_GDC22ISP);
+ break;
+ case FIFO_CHANNEL_ISP0_TO_HOST0:
+ state->src_valid = fifo_monitor_status_valid(ID,
+ HIVE_GP_REGS_ISP_STREAM_STAT_IDX,
+ ISP_STR_MON_PORT_SND_GPD); /* ISP_STR_MON_PORT_ISP2GPD */
+ state->fifo_accept = fifo_monitor_status_accept(ID,
+ HIVE_GP_REGS_ISP_STREAM_STAT_IDX,
+ ISP_STR_MON_PORT_SND_GPD);
+ {
+ hrt_data value = ia_css_device_load_uint32(0x0000000000380014ULL);
+
+ state->fifo_valid = !_hrt_get_bit(value, 0);
+ state->sink_accept = false; /* no monitor connected */
+ }
+ break;
+ case FIFO_CHANNEL_HOST0_TO_ISP0: {
+ hrt_data value = ia_css_device_load_uint32(0x000000000038001CULL);
+
+ state->fifo_valid = false; /* no monitor connected */
+ state->sink_accept = !_hrt_get_bit(value, 0);
+ }
+ state->src_valid = fifo_monitor_status_valid(ID,
+ HIVE_GP_REGS_ISP_STREAM_STAT_IDX,
+ ISP_STR_MON_PORT_RCV_GPD); /* ISP_STR_MON_PORT_FA2ISP */
+ state->fifo_accept = fifo_monitor_status_accept(ID,
+ HIVE_GP_REGS_ISP_STREAM_STAT_IDX,
+ ISP_STR_MON_PORT_RCV_GPD);
+ break;
+ case FIFO_CHANNEL_SP0_TO_IF0:
+ state->src_valid = fifo_monitor_status_valid(ID,
+ HIVE_GP_REGS_SP_STREAM_STAT_IDX,
+ SP_STR_MON_PORT_SND_PIF_A); /* SP_STR_MON_PORT_SP2PIFA */
+ state->fifo_accept = fifo_monitor_status_accept(ID,
+ HIVE_GP_REGS_SP_STREAM_STAT_IDX,
+ SP_STR_MON_PORT_SND_PIF_A);
+ state->fifo_valid = fifo_monitor_status_valid(ID,
+ HIVE_GP_REGS_MOD_STREAM_STAT_IDX,
+ MOD_STR_MON_PORT_RCV_PIF_A); /* MOD_STR_MON_PORT_CELLS2PIFA */
+ state->sink_accept = fifo_monitor_status_accept(ID,
+ HIVE_GP_REGS_MOD_STREAM_STAT_IDX,
+ MOD_STR_MON_PORT_RCV_PIF_A);
+ break;
+ case FIFO_CHANNEL_IF0_TO_SP0:
+ state->src_valid = fifo_monitor_status_valid(ID,
+ HIVE_GP_REGS_MOD_STREAM_STAT_IDX,
+ MOD_STR_MON_PORT_SND_PIF_A); /* MOD_STR_MON_PORT_PIFA2CELLS */
+ state->fifo_accept = fifo_monitor_status_accept(ID,
+ HIVE_GP_REGS_MOD_STREAM_STAT_IDX,
+ MOD_STR_MON_PORT_SND_PIF_A);
+ state->fifo_valid = fifo_monitor_status_valid(ID,
+ HIVE_GP_REGS_SP_STREAM_STAT_IDX,
+ SP_STR_MON_PORT_RCV_PIF_A); /* SP_STR_MON_PORT_PIFA2SP */
+ state->sink_accept = fifo_monitor_status_accept(ID,
+ HIVE_GP_REGS_SP_STREAM_STAT_IDX,
+ SP_STR_MON_PORT_RCV_PIF_A);
+ break;
+ case FIFO_CHANNEL_SP0_TO_IF1:
+ state->src_valid = fifo_monitor_status_valid(ID,
+ HIVE_GP_REGS_SP_STREAM_STAT_IDX,
+ SP_STR_MON_PORT_SND_PIF_B); /* SP_STR_MON_PORT_SP2PIFB */
+ state->fifo_accept = fifo_monitor_status_accept(ID,
+ HIVE_GP_REGS_SP_STREAM_STAT_IDX,
+ SP_STR_MON_PORT_SND_PIF_B);
+ state->fifo_valid = fifo_monitor_status_valid(ID,
+ HIVE_GP_REGS_MOD_STREAM_STAT_IDX,
+ MOD_STR_MON_PORT_RCV_PIF_B); /* MOD_STR_MON_PORT_CELLS2PIFB */
+ state->sink_accept = fifo_monitor_status_accept(ID,
+ HIVE_GP_REGS_MOD_STREAM_STAT_IDX,
+ MOD_STR_MON_PORT_RCV_PIF_B);
+ break;
+ case FIFO_CHANNEL_IF1_TO_SP0:
+ state->src_valid = fifo_monitor_status_valid(ID,
+ HIVE_GP_REGS_MOD_STREAM_STAT_IDX,
+ MOD_STR_MON_PORT_SND_PIF_B); /* MOD_STR_MON_PORT_PIFB2CELLS */
+ state->fifo_accept = fifo_monitor_status_accept(ID,
+ HIVE_GP_REGS_MOD_STREAM_STAT_IDX,
+ MOD_STR_MON_PORT_SND_PIF_B);
+ state->fifo_valid = fifo_monitor_status_valid(ID,
+ HIVE_GP_REGS_SP_STREAM_STAT_IDX,
+ ISP_STR_MON_PORT_RCV_PIF_B); /* SP_STR_MON_PORT_PIFB2SP */
+ state->sink_accept = fifo_monitor_status_accept(ID,
+ HIVE_GP_REGS_SP_STREAM_STAT_IDX,
+ ISP_STR_MON_PORT_RCV_PIF_B);
+ break;
+ case FIFO_CHANNEL_SP0_TO_IF2:
+ state->src_valid = fifo_monitor_status_valid(ID,
+ HIVE_GP_REGS_SP_STREAM_STAT_IDX,
+ SP_STR_MON_PORT_SND_SIF); /* SP_STR_MON_PORT_SP2SIF */
+ state->fifo_accept = fifo_monitor_status_accept(ID,
+ HIVE_GP_REGS_SP_STREAM_STAT_IDX,
+ SP_STR_MON_PORT_SND_SIF);
+ state->fifo_valid = fifo_monitor_status_valid(ID,
+ HIVE_GP_REGS_MOD_STREAM_STAT_IDX,
+ MOD_STR_MON_PORT_RCV_SIF); /* MOD_STR_MON_PORT_SP2SIF */
+ state->sink_accept = fifo_monitor_status_accept(ID,
+ HIVE_GP_REGS_MOD_STREAM_STAT_IDX,
+ MOD_STR_MON_PORT_RCV_SIF);
+ break;
+ case FIFO_CHANNEL_IF2_TO_SP0:
+ state->src_valid = fifo_monitor_status_valid(ID,
+ HIVE_GP_REGS_MOD_STREAM_STAT_IDX,
+ MOD_STR_MON_PORT_SND_SIF); /* MOD_STR_MON_PORT_SIF2SP */
+ state->fifo_accept = fifo_monitor_status_accept(ID,
+ HIVE_GP_REGS_MOD_STREAM_STAT_IDX,
+ MOD_STR_MON_PORT_SND_SIF);
+ state->fifo_valid = fifo_monitor_status_valid(ID,
+ HIVE_GP_REGS_SP_STREAM_STAT_IDX,
+ SP_STR_MON_PORT_RCV_SIF); /* SP_STR_MON_PORT_SIF2SP */
+ state->sink_accept = fifo_monitor_status_accept(ID,
+ HIVE_GP_REGS_SP_STREAM_STAT_IDX,
+ SP_STR_MON_PORT_RCV_SIF);
+ break;
+ case FIFO_CHANNEL_SP0_TO_DMA0:
+ state->src_valid = fifo_monitor_status_valid(ID,
+ HIVE_GP_REGS_SP_STREAM_STAT_IDX,
+ SP_STR_MON_PORT_SND_DMA); /* SP_STR_MON_PORT_SP2DMA */
+ state->fifo_accept = fifo_monitor_status_accept(ID,
+ HIVE_GP_REGS_SP_STREAM_STAT_IDX,
+ SP_STR_MON_PORT_SND_DMA);
+ state->fifo_valid = fifo_monitor_status_valid(ID,
+ HIVE_GP_REGS_MOD_STREAM_STAT_IDX,
+ MOD_STR_MON_PORT_RCV_DMA_FR_SP); /* MOD_STR_MON_PORT_SP2DMA */
+ state->sink_accept = fifo_monitor_status_accept(ID,
+ HIVE_GP_REGS_MOD_STREAM_STAT_IDX,
+ MOD_STR_MON_PORT_RCV_DMA_FR_SP);
+ break;
+ case FIFO_CHANNEL_DMA0_TO_SP0:
+ state->src_valid = fifo_monitor_status_valid(ID,
+ HIVE_GP_REGS_MOD_STREAM_STAT_IDX,
+ MOD_STR_MON_PORT_SND_DMA2SP); /* MOD_STR_MON_PORT_DMA2SP */
+ state->fifo_accept = fifo_monitor_status_accept(ID,
+ HIVE_GP_REGS_MOD_STREAM_STAT_IDX,
+ MOD_STR_MON_PORT_SND_DMA2SP);
+ state->fifo_valid = fifo_monitor_status_valid(ID,
+ HIVE_GP_REGS_SP_STREAM_STAT_IDX,
+ SP_STR_MON_PORT_RCV_DMA); /* SP_STR_MON_PORT_DMA2SP */
+ state->sink_accept = fifo_monitor_status_accept(ID,
+ HIVE_GP_REGS_SP_STREAM_STAT_IDX,
+ SP_STR_MON_PORT_RCV_DMA);
+ break;
+ case FIFO_CHANNEL_SP0_TO_GDC0:
+ state->src_valid = fifo_monitor_status_valid(ID,
+ HIVE_GP_REGS_SP_STREAM_STAT_B_IDX,
+ SP_STR_MON_PORT_B_SP2GDC1);
+ state->fifo_accept = fifo_monitor_status_accept(ID,
+ HIVE_GP_REGS_SP_STREAM_STAT_B_IDX,
+ SP_STR_MON_PORT_B_SP2GDC1);
+ state->fifo_valid = fifo_monitor_status_valid(ID,
+ HIVE_GP_REGS_MOD_STREAM_STAT_IDX,
+ MOD_STR_MON_PORT_CELLS2GDC1);
+ state->sink_accept = fifo_monitor_status_accept(ID,
+ HIVE_GP_REGS_MOD_STREAM_STAT_IDX,
+ MOD_STR_MON_PORT_CELLS2GDC1);
+ break;
+ case FIFO_CHANNEL_GDC0_TO_SP0:
+ state->fifo_valid = fifo_monitor_status_valid(ID,
+ HIVE_GP_REGS_MOD_STREAM_STAT_IDX,
+ MOD_STR_MON_PORT_GDC12CELLS);
+ state->sink_accept = fifo_monitor_status_accept(ID,
+ HIVE_GP_REGS_MOD_STREAM_STAT_IDX,
+ MOD_STR_MON_PORT_GDC12CELLS);
+ state->src_valid = fifo_monitor_status_valid(ID,
+ HIVE_GP_REGS_SP_STREAM_STAT_B_IDX,
+ SP_STR_MON_PORT_B_GDC12SP);
+ state->fifo_accept = fifo_monitor_status_accept(ID,
+ HIVE_GP_REGS_SP_STREAM_STAT_B_IDX,
+ SP_STR_MON_PORT_B_GDC12SP);
+ break;
+ case FIFO_CHANNEL_SP0_TO_GDC1:
+ state->src_valid = fifo_monitor_status_valid(ID,
+ HIVE_GP_REGS_SP_STREAM_STAT_B_IDX,
+ SP_STR_MON_PORT_B_SP2GDC2);
+ state->fifo_accept = fifo_monitor_status_accept(ID,
+ HIVE_GP_REGS_SP_STREAM_STAT_B_IDX,
+ SP_STR_MON_PORT_B_SP2GDC2);
+ state->fifo_valid = fifo_monitor_status_valid(ID,
+ HIVE_GP_REGS_MOD_STREAM_STAT_IDX,
+ MOD_STR_MON_PORT_CELLS2GDC2);
+ state->sink_accept = fifo_monitor_status_accept(ID,
+ HIVE_GP_REGS_MOD_STREAM_STAT_IDX,
+ MOD_STR_MON_PORT_CELLS2GDC2);
+ break;
+ case FIFO_CHANNEL_GDC1_TO_SP0:
+ state->fifo_valid = fifo_monitor_status_valid(ID,
+ HIVE_GP_REGS_MOD_STREAM_STAT_IDX,
+ MOD_STR_MON_PORT_GDC22CELLS);
+ state->sink_accept = fifo_monitor_status_accept(ID,
+ HIVE_GP_REGS_MOD_STREAM_STAT_IDX,
+ MOD_STR_MON_PORT_GDC22CELLS);
+ state->src_valid = fifo_monitor_status_valid(ID,
+ HIVE_GP_REGS_SP_STREAM_STAT_B_IDX,
+ SP_STR_MON_PORT_B_GDC22SP);
+ state->fifo_accept = fifo_monitor_status_accept(ID,
+ HIVE_GP_REGS_SP_STREAM_STAT_B_IDX,
+ SP_STR_MON_PORT_B_GDC22SP);
+ break;
+ case FIFO_CHANNEL_SP0_TO_HOST0:
+ state->src_valid = fifo_monitor_status_valid(ID,
+ HIVE_GP_REGS_SP_STREAM_STAT_IDX,
+ SP_STR_MON_PORT_SND_GPD); /* SP_STR_MON_PORT_SP2GPD */
+ state->fifo_accept = fifo_monitor_status_accept(ID,
+ HIVE_GP_REGS_SP_STREAM_STAT_IDX,
+ SP_STR_MON_PORT_SND_GPD);
+ {
+ hrt_data value = ia_css_device_load_uint32(0x0000000000380010ULL);
+
+ state->fifo_valid = !_hrt_get_bit(value, 0);
+ state->sink_accept = false; /* no monitor connected */
+ }
+ break;
+ case FIFO_CHANNEL_HOST0_TO_SP0: {
+ hrt_data value = ia_css_device_load_uint32(0x0000000000380018ULL);
+
+ state->fifo_valid = false; /* no monitor connected */
+ state->sink_accept = !_hrt_get_bit(value, 0);
+ }
+ state->src_valid = fifo_monitor_status_valid(ID,
+ HIVE_GP_REGS_SP_STREAM_STAT_IDX,
+ SP_STR_MON_PORT_RCV_GPD); /* SP_STR_MON_PORT_FA2SP */
+ state->fifo_accept = fifo_monitor_status_accept(ID,
+ HIVE_GP_REGS_SP_STREAM_STAT_IDX,
+ SP_STR_MON_PORT_RCV_GPD);
+ break;
+ case FIFO_CHANNEL_SP0_TO_STREAM2MEM0:
+ state->src_valid = fifo_monitor_status_valid(ID,
+ HIVE_GP_REGS_SP_STREAM_STAT_IDX,
+ SP_STR_MON_PORT_SND_MC); /* SP_STR_MON_PORT_SP2MC */
+ state->fifo_accept = fifo_monitor_status_accept(ID,
+ HIVE_GP_REGS_SP_STREAM_STAT_IDX,
+ SP_STR_MON_PORT_SND_MC);
+ state->fifo_valid = fifo_monitor_status_valid(ID,
+ HIVE_GP_REGS_MOD_STREAM_STAT_IDX,
+ MOD_STR_MON_PORT_RCV_MC); /* MOD_STR_MON_PORT_SP2MC */
+ state->sink_accept = fifo_monitor_status_accept(ID,
+ HIVE_GP_REGS_MOD_STREAM_STAT_IDX,
+ MOD_STR_MON_PORT_RCV_MC);
+ break;
+ case FIFO_CHANNEL_STREAM2MEM0_TO_SP0:
+ state->fifo_valid = fifo_monitor_status_valid(ID,
+ HIVE_GP_REGS_MOD_STREAM_STAT_IDX,
+ MOD_STR_MON_PORT_SND_MC); /* SP_STR_MON_PORT_MC2SP */
+ state->sink_accept = fifo_monitor_status_accept(ID,
+ HIVE_GP_REGS_MOD_STREAM_STAT_IDX,
+ MOD_STR_MON_PORT_SND_MC);
+ state->src_valid = fifo_monitor_status_valid(ID,
+ HIVE_GP_REGS_SP_STREAM_STAT_IDX,
+ SP_STR_MON_PORT_RCV_MC); /* MOD_STR_MON_PORT_MC2SP */
+ state->fifo_accept = fifo_monitor_status_accept(ID,
+ HIVE_GP_REGS_SP_STREAM_STAT_IDX,
+ SP_STR_MON_PORT_RCV_MC);
+ break;
+ case FIFO_CHANNEL_SP0_TO_INPUT_SYSTEM0:
+ state->src_valid = fifo_monitor_status_valid(ID,
+ HIVE_GP_REGS_SP_STREAM_STAT_IDX,
+ SP_STR_MON_PORT_SP2ISYS);
+ state->fifo_accept = fifo_monitor_status_accept(ID,
+ HIVE_GP_REGS_SP_STREAM_STAT_IDX,
+ SP_STR_MON_PORT_SP2ISYS);
+ state->fifo_valid = false;
+ state->sink_accept = false;
+ break;
+ case FIFO_CHANNEL_INPUT_SYSTEM0_TO_SP0:
+ state->fifo_valid = false;
+ state->sink_accept = false;
+ state->src_valid = fifo_monitor_status_valid(ID,
+ HIVE_GP_REGS_SP_STREAM_STAT_IDX,
+ SP_STR_MON_PORT_ISYS2SP);
+ state->fifo_accept = fifo_monitor_status_accept(ID,
+ HIVE_GP_REGS_SP_STREAM_STAT_IDX,
+ SP_STR_MON_PORT_ISYS2SP);
+ break;
+ default:
+ assert(0);
+ break;
+ }
+
+ return;
+}
+
+void fifo_switch_get_state(
+ const fifo_monitor_ID_t ID,
+ const fifo_switch_t switch_id,
+ fifo_switch_state_t *state)
+{
+ hrt_data data = (hrt_data)-1;
+
+ assert(ID == FIFO_MONITOR0_ID);
+ assert(switch_id < N_FIFO_SWITCH);
+ assert(state);
+
+ (void)ID;
+
+ data = gp_device_reg_load(GP_DEVICE0_ID, FIFO_SWITCH_ADDR[switch_id]);
+
+ state->is_none = (data == HIVE_ISP_CSS_STREAM_SWITCH_NONE);
+ state->is_sp = (data == HIVE_ISP_CSS_STREAM_SWITCH_SP);
+ state->is_isp = (data == HIVE_ISP_CSS_STREAM_SWITCH_ISP);
+
+ return;
+}
+
+void fifo_monitor_get_state(
+ const fifo_monitor_ID_t ID,
+ fifo_monitor_state_t *state)
+{
+ fifo_channel_t ch_id;
+ fifo_switch_t sw_id;
+
+ assert(ID < N_FIFO_MONITOR_ID);
+ assert(state);
+
+ for (ch_id = 0; ch_id < N_FIFO_CHANNEL; ch_id++) {
+ fifo_channel_get_state(ID, ch_id,
+ &state->fifo_channels[ch_id]);
+ }
+
+ for (sw_id = 0; sw_id < N_FIFO_SWITCH; sw_id++) {
+ fifo_switch_get_state(ID, sw_id,
+ &state->fifo_switches[sw_id]);
+ }
+ return;
+}
+
+static inline bool fifo_monitor_status_valid(
+ const fifo_monitor_ID_t ID,
+ const unsigned int reg,
+ const unsigned int port_id)
+{
+ hrt_data data = fifo_monitor_reg_load(ID, reg);
+
+ return (data >> (((port_id * 2) + _hive_str_mon_valid_offset))) & 0x1;
+}
+
+static inline bool fifo_monitor_status_accept(
+ const fifo_monitor_ID_t ID,
+ const unsigned int reg,
+ const unsigned int port_id)
+{
+ hrt_data data = fifo_monitor_reg_load(ID, reg);
+
+ return (data >> (((port_id * 2) + _hive_str_mon_accept_offset))) & 0x1;
+}
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/fifo_monitor_local.h b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/fifo_monitor_local.h
new file mode 100644
index 000000000000..e02ccaee34ab
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/fifo_monitor_local.h
@@ -0,0 +1,91 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010-2015, Intel Corporation.
+ */
+
+#ifndef __FIFO_MONITOR_LOCAL_H_INCLUDED__
+#define __FIFO_MONITOR_LOCAL_H_INCLUDED__
+
+#include <type_support.h>
+#include "fifo_monitor_global.h"
+
+#include "hive_isp_css_defs.h" /* ISP_STR_MON_PORT_SND_SP, ... */
+
+#define _hive_str_mon_valid_offset 0
+#define _hive_str_mon_accept_offset 1
+
+#define FIFO_CHANNEL_SP_VALID_MASK 0x55555555
+#define FIFO_CHANNEL_SP_VALID_B_MASK 0x00000055
+#define FIFO_CHANNEL_ISP_VALID_MASK 0x15555555
+#define FIFO_CHANNEL_MOD_VALID_MASK 0x55555555
+
+typedef enum fifo_switch {
+ FIFO_SWITCH_IF,
+ FIFO_SWITCH_GDC0,
+ FIFO_SWITCH_GDC1,
+ N_FIFO_SWITCH
+} fifo_switch_t;
+
+typedef enum fifo_channel {
+ FIFO_CHANNEL_ISP0_TO_SP0,
+ FIFO_CHANNEL_SP0_TO_ISP0,
+ FIFO_CHANNEL_ISP0_TO_IF0,
+ FIFO_CHANNEL_IF0_TO_ISP0,
+ FIFO_CHANNEL_ISP0_TO_IF1,
+ FIFO_CHANNEL_IF1_TO_ISP0,
+ FIFO_CHANNEL_ISP0_TO_DMA0,
+ FIFO_CHANNEL_DMA0_TO_ISP0,
+ FIFO_CHANNEL_ISP0_TO_GDC0,
+ FIFO_CHANNEL_GDC0_TO_ISP0,
+ FIFO_CHANNEL_ISP0_TO_GDC1,
+ FIFO_CHANNEL_GDC1_TO_ISP0,
+ FIFO_CHANNEL_ISP0_TO_HOST0,
+ FIFO_CHANNEL_HOST0_TO_ISP0,
+ FIFO_CHANNEL_SP0_TO_IF0,
+ FIFO_CHANNEL_IF0_TO_SP0,
+ FIFO_CHANNEL_SP0_TO_IF1,
+ FIFO_CHANNEL_IF1_TO_SP0,
+ FIFO_CHANNEL_SP0_TO_IF2,
+ FIFO_CHANNEL_IF2_TO_SP0,
+ FIFO_CHANNEL_SP0_TO_DMA0,
+ FIFO_CHANNEL_DMA0_TO_SP0,
+ FIFO_CHANNEL_SP0_TO_GDC0,
+ FIFO_CHANNEL_GDC0_TO_SP0,
+ FIFO_CHANNEL_SP0_TO_GDC1,
+ FIFO_CHANNEL_GDC1_TO_SP0,
+ FIFO_CHANNEL_SP0_TO_HOST0,
+ FIFO_CHANNEL_HOST0_TO_SP0,
+ FIFO_CHANNEL_SP0_TO_STREAM2MEM0,
+ FIFO_CHANNEL_STREAM2MEM0_TO_SP0,
+ FIFO_CHANNEL_SP0_TO_INPUT_SYSTEM0,
+ FIFO_CHANNEL_INPUT_SYSTEM0_TO_SP0,
+ /*
+ * No clue what this is
+ *
+ FIFO_CHANNEL_SP0_TO_IRQ0,
+ FIFO_CHANNEL_IRQ0_TO_SP0,
+ */
+ N_FIFO_CHANNEL
+} fifo_channel_t;
+
+struct fifo_channel_state_s {
+ bool src_valid;
+ bool fifo_accept;
+ bool fifo_valid;
+ bool sink_accept;
+};
+
+/* The switch is tri-state */
+struct fifo_switch_state_s {
+ bool is_none;
+ bool is_isp;
+ bool is_sp;
+};
+
+struct fifo_monitor_state_s {
+ struct fifo_channel_state_s fifo_channels[N_FIFO_CHANNEL];
+ struct fifo_switch_state_s fifo_switches[N_FIFO_SWITCH];
+};
+
+#endif /* __FIFO_MONITOR_LOCAL_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/fifo_monitor_private.h b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/fifo_monitor_private.h
new file mode 100644
index 000000000000..53a3fb796aab
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/fifo_monitor_private.h
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010-2015, Intel Corporation.
+ */
+
+#ifndef __FIFO_MONITOR_PRIVATE_H_INCLUDED__
+#define __FIFO_MONITOR_PRIVATE_H_INCLUDED__
+
+#include "fifo_monitor_public.h"
+
+#define __INLINE_GP_DEVICE__
+#include "gp_device.h"
+
+#include "device_access.h"
+
+#include "assert_support.h"
+
+#ifdef __INLINE_FIFO_MONITOR__
+extern const unsigned int FIFO_SWITCH_ADDR[N_FIFO_SWITCH];
+#endif
+
+STORAGE_CLASS_FIFO_MONITOR_C void fifo_switch_set(
+ const fifo_monitor_ID_t ID,
+ const fifo_switch_t switch_id,
+ const hrt_data sel)
+{
+ assert(ID == FIFO_MONITOR0_ID);
+ assert(FIFO_MONITOR_BASE[ID] != (hrt_address) - 1);
+ assert(switch_id < N_FIFO_SWITCH);
+ (void)ID;
+
+ gp_device_reg_store(GP_DEVICE0_ID, FIFO_SWITCH_ADDR[switch_id], sel);
+
+ return;
+}
+
+STORAGE_CLASS_FIFO_MONITOR_C hrt_data fifo_switch_get(
+ const fifo_monitor_ID_t ID,
+ const fifo_switch_t switch_id)
+{
+ assert(ID == FIFO_MONITOR0_ID);
+ assert(FIFO_MONITOR_BASE[ID] != (hrt_address) - 1);
+ assert(switch_id < N_FIFO_SWITCH);
+ (void)ID;
+
+ return gp_device_reg_load(GP_DEVICE0_ID, FIFO_SWITCH_ADDR[switch_id]);
+}
+
+STORAGE_CLASS_FIFO_MONITOR_C void fifo_monitor_reg_store(
+ const fifo_monitor_ID_t ID,
+ const unsigned int reg,
+ const hrt_data value)
+{
+ assert(ID < N_FIFO_MONITOR_ID);
+ assert(FIFO_MONITOR_BASE[ID] != (hrt_address) - 1);
+ ia_css_device_store_uint32(FIFO_MONITOR_BASE[ID] + reg * sizeof(hrt_data),
+ value);
+ return;
+}
+
+STORAGE_CLASS_FIFO_MONITOR_C hrt_data fifo_monitor_reg_load(
+ const fifo_monitor_ID_t ID,
+ const unsigned int reg)
+{
+ assert(ID < N_FIFO_MONITOR_ID);
+ assert(FIFO_MONITOR_BASE[ID] != (hrt_address) - 1);
+ return ia_css_device_load_uint32(FIFO_MONITOR_BASE[ID] + reg * sizeof(
+ hrt_data));
+}
+
+#endif /* __FIFO_MONITOR_PRIVATE_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/gdc.c b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/gdc.c
new file mode 100644
index 000000000000..8bb78b4d7c67
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/gdc.c
@@ -0,0 +1,106 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010-2015, Intel Corporation.
+ */
+
+/* The name "gdc.h is already taken" */
+#include "gdc_device.h"
+
+#include "device_access.h"
+
+#include "assert_support.h"
+
+/*
+ * Local function declarations
+ */
+static inline void gdc_reg_store(
+ const gdc_ID_t ID,
+ const unsigned int reg,
+ const hrt_data value);
+
+#ifndef __INLINE_GDC__
+#include "gdc_private.h"
+#endif /* __INLINE_GDC__ */
+
+/*
+ * Exported function implementations
+ */
+void gdc_lut_store(
+ const gdc_ID_t ID,
+ const int data[4][HRT_GDC_N])
+{
+ unsigned int i, lut_offset = HRT_GDC_LUT_IDX;
+
+ assert(ID < N_GDC_ID);
+ assert(HRT_GDC_LUT_COEFF_OFFSET <= (4 * sizeof(hrt_data)));
+
+ for (i = 0; i < HRT_GDC_N; i++) {
+ hrt_data entry_0 = data[0][i] & HRT_GDC_BCI_COEF_MASK;
+ hrt_data entry_1 = data[1][i] & HRT_GDC_BCI_COEF_MASK;
+ hrt_data entry_2 = data[2][i] & HRT_GDC_BCI_COEF_MASK;
+ hrt_data entry_3 = data[3][i] & HRT_GDC_BCI_COEF_MASK;
+
+ hrt_data word_0 = entry_0 |
+ (entry_1 << HRT_GDC_LUT_COEFF_OFFSET);
+ hrt_data word_1 = entry_2 |
+ (entry_3 << HRT_GDC_LUT_COEFF_OFFSET);
+
+ gdc_reg_store(ID, lut_offset++, word_0);
+ gdc_reg_store(ID, lut_offset++, word_1);
+ }
+ return;
+}
+
+/*
+ * Input LUT format:
+ * c0[0-1023], c1[0-1023], c2[0-1023] c3[0-1023]
+ *
+ * Output LUT format (interleaved):
+ * c0[0], c1[0], c2[0], c3[0], c0[1], c1[1], c2[1], c3[1], ....
+ * c0[1023], c1[1023], c2[1023], c3[1023]
+ *
+ * The first format needs c0[0], c1[0] (which are 1024 words apart)
+ * to program gdc LUT registers. This makes it difficult to do piecemeal
+ * reads in SP side gdc_lut_store
+ *
+ * Interleaved format allows use of contiguous bytes to store into
+ * gdc LUT registers.
+ *
+ * See gdc_lut_store() definition in host/gdc.c vs sp/gdc_private.h
+ *
+ */
+void gdc_lut_convert_to_isp_format(const int in_lut[4][HRT_GDC_N],
+ int out_lut[4][HRT_GDC_N])
+{
+ unsigned int i;
+ int *out = (int *)out_lut;
+
+ for (i = 0; i < HRT_GDC_N; i++) {
+ out[0] = in_lut[0][i];
+ out[1] = in_lut[1][i];
+ out[2] = in_lut[2][i];
+ out[3] = in_lut[3][i];
+ out += 4;
+ }
+}
+
+int gdc_get_unity(
+ const gdc_ID_t ID)
+{
+ assert(ID < N_GDC_ID);
+ (void)ID;
+ return (int)(1UL << HRT_GDC_FRAC_BITS);
+}
+
+/*
+ * Local function implementations
+ */
+static inline void gdc_reg_store(
+ const gdc_ID_t ID,
+ const unsigned int reg,
+ const hrt_data value)
+{
+ ia_css_device_store_uint32(GDC_BASE[ID] + reg * sizeof(hrt_data), value);
+ return;
+}
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/gdc_local.h b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/gdc_local.h
new file mode 100644
index 000000000000..bde637b519b3
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/gdc_local.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010-2015, Intel Corporation.
+ */
+
+#ifndef __GDC_LOCAL_H_INCLUDED__
+#define __GDC_LOCAL_H_INCLUDED__
+
+#include "gdc_global.h"
+
+#endif /* __GDC_LOCAL_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/gdc_private.h b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/gdc_private.h
new file mode 100644
index 000000000000..03ffb16c956d
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/gdc_private.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010-2015, Intel Corporation.
+ */
+
+#ifndef __GDC_PRIVATE_H_INCLUDED__
+#define __GDC_PRIVATE_H_INCLUDED__
+
+#include "gdc_public.h"
+
+#endif /* __GDC_PRIVATE_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/gp_device.c b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/gp_device.c
new file mode 100644
index 000000000000..b934d20c88ea
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/gp_device.c
@@ -0,0 +1,100 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010-2015, Intel Corporation.
+ */
+
+#include "assert_support.h"
+#include "gp_device.h"
+
+#ifndef __INLINE_GP_DEVICE__
+#include "gp_device_private.h"
+#endif /* __INLINE_GP_DEVICE__ */
+
+void gp_device_get_state(
+ const gp_device_ID_t ID,
+ gp_device_state_t *state)
+{
+ assert(ID < N_GP_DEVICE_ID);
+ assert(state);
+
+ state->syncgen_enable = gp_device_reg_load(ID,
+ _REG_GP_SYNCGEN_ENABLE_ADDR);
+ state->syncgen_free_running = gp_device_reg_load(ID,
+ _REG_GP_SYNCGEN_FREE_RUNNING_ADDR);
+ state->syncgen_pause = gp_device_reg_load(ID,
+ _REG_GP_SYNCGEN_PAUSE_ADDR);
+ state->nr_frames = gp_device_reg_load(ID,
+ _REG_GP_NR_FRAMES_ADDR);
+ state->syngen_nr_pix = gp_device_reg_load(ID,
+ _REG_GP_SYNGEN_NR_PIX_ADDR);
+ state->syngen_nr_pix = gp_device_reg_load(ID,
+ _REG_GP_SYNGEN_NR_PIX_ADDR);
+ state->syngen_nr_lines = gp_device_reg_load(ID,
+ _REG_GP_SYNGEN_NR_LINES_ADDR);
+ state->syngen_hblank_cycles = gp_device_reg_load(ID,
+ _REG_GP_SYNGEN_HBLANK_CYCLES_ADDR);
+ state->syngen_vblank_cycles = gp_device_reg_load(ID,
+ _REG_GP_SYNGEN_VBLANK_CYCLES_ADDR);
+ state->isel_sof = gp_device_reg_load(ID,
+ _REG_GP_ISEL_SOF_ADDR);
+ state->isel_eof = gp_device_reg_load(ID,
+ _REG_GP_ISEL_EOF_ADDR);
+ state->isel_sol = gp_device_reg_load(ID,
+ _REG_GP_ISEL_SOL_ADDR);
+ state->isel_eol = gp_device_reg_load(ID,
+ _REG_GP_ISEL_EOL_ADDR);
+ state->isel_lfsr_enable = gp_device_reg_load(ID,
+ _REG_GP_ISEL_LFSR_ENABLE_ADDR);
+ state->isel_lfsr_enable_b = gp_device_reg_load(ID,
+ _REG_GP_ISEL_LFSR_ENABLE_B_ADDR);
+ state->isel_lfsr_reset_value = gp_device_reg_load(ID,
+ _REG_GP_ISEL_LFSR_RESET_VALUE_ADDR);
+ state->isel_tpg_enable = gp_device_reg_load(ID,
+ _REG_GP_ISEL_TPG_ENABLE_ADDR);
+ state->isel_tpg_enable_b = gp_device_reg_load(ID,
+ _REG_GP_ISEL_TPG_ENABLE_B_ADDR);
+ state->isel_hor_cnt_mask = gp_device_reg_load(ID,
+ _REG_GP_ISEL_HOR_CNT_MASK_ADDR);
+ state->isel_ver_cnt_mask = gp_device_reg_load(ID,
+ _REG_GP_ISEL_VER_CNT_MASK_ADDR);
+ state->isel_xy_cnt_mask = gp_device_reg_load(ID,
+ _REG_GP_ISEL_XY_CNT_MASK_ADDR);
+ state->isel_hor_cnt_delta = gp_device_reg_load(ID,
+ _REG_GP_ISEL_HOR_CNT_DELTA_ADDR);
+ state->isel_ver_cnt_delta = gp_device_reg_load(ID,
+ _REG_GP_ISEL_VER_CNT_DELTA_ADDR);
+ state->isel_tpg_mode = gp_device_reg_load(ID,
+ _REG_GP_ISEL_TPG_MODE_ADDR);
+ state->isel_tpg_red1 = gp_device_reg_load(ID,
+ _REG_GP_ISEL_TPG_RED1_ADDR);
+ state->isel_tpg_green1 = gp_device_reg_load(ID,
+ _REG_GP_ISEL_TPG_GREEN1_ADDR);
+ state->isel_tpg_blue1 = gp_device_reg_load(ID,
+ _REG_GP_ISEL_TPG_BLUE1_ADDR);
+ state->isel_tpg_red2 = gp_device_reg_load(ID,
+ _REG_GP_ISEL_TPG_RED2_ADDR);
+ state->isel_tpg_green2 = gp_device_reg_load(ID,
+ _REG_GP_ISEL_TPG_GREEN2_ADDR);
+ state->isel_tpg_blue2 = gp_device_reg_load(ID,
+ _REG_GP_ISEL_TPG_BLUE2_ADDR);
+ state->isel_ch_id = gp_device_reg_load(ID,
+ _REG_GP_ISEL_CH_ID_ADDR);
+ state->isel_fmt_type = gp_device_reg_load(ID,
+ _REG_GP_ISEL_FMT_TYPE_ADDR);
+ state->isel_data_sel = gp_device_reg_load(ID,
+ _REG_GP_ISEL_DATA_SEL_ADDR);
+ state->isel_sband_sel = gp_device_reg_load(ID,
+ _REG_GP_ISEL_SBAND_SEL_ADDR);
+ state->isel_sync_sel = gp_device_reg_load(ID,
+ _REG_GP_ISEL_SYNC_SEL_ADDR);
+ state->syncgen_hor_cnt = gp_device_reg_load(ID,
+ _REG_GP_SYNCGEN_HOR_CNT_ADDR);
+ state->syncgen_ver_cnt = gp_device_reg_load(ID,
+ _REG_GP_SYNCGEN_VER_CNT_ADDR);
+ state->syncgen_frame_cnt = gp_device_reg_load(ID,
+ _REG_GP_SYNCGEN_FRAME_CNT_ADDR);
+ state->soft_reset = gp_device_reg_load(ID,
+ _REG_GP_SOFT_RESET_ADDR);
+ return;
+}
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/gp_device_local.h b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/gp_device_local.h
new file mode 100644
index 000000000000..2fcb95cf1b95
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/gp_device_local.h
@@ -0,0 +1,135 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010-2015, Intel Corporation.
+ */
+
+#ifndef __GP_DEVICE_LOCAL_H_INCLUDED__
+#define __GP_DEVICE_LOCAL_H_INCLUDED__
+
+#include "gp_device_global.h"
+
+/* @ GP_REGS_BASE -> GP_DEVICE_BASE */
+#define _REG_GP_SDRAM_WAKEUP_ADDR 0x00
+#define _REG_GP_IDLE_ADDR 0x04
+/* #define _REG_GP_IRQ_REQ0_ADDR 0x08 */
+/* #define _REG_GP_IRQ_REQ1_ADDR 0x0C */
+#define _REG_GP_SP_STREAM_STAT_ADDR 0x10
+#define _REG_GP_SP_STREAM_STAT_B_ADDR 0x14
+#define _REG_GP_ISP_STREAM_STAT_ADDR 0x18
+#define _REG_GP_MOD_STREAM_STAT_ADDR 0x1C
+#define _REG_GP_SP_STREAM_STAT_IRQ_COND_ADDR 0x20
+#define _REG_GP_SP_STREAM_STAT_B_IRQ_COND_ADDR 0x24
+#define _REG_GP_ISP_STREAM_STAT_IRQ_COND_ADDR 0x28
+#define _REG_GP_MOD_STREAM_STAT_IRQ_COND_ADDR 0x2C
+#define _REG_GP_SP_STREAM_STAT_IRQ_ENABLE_ADDR 0x30
+#define _REG_GP_SP_STREAM_STAT_B_IRQ_ENABLE_ADDR 0x34
+#define _REG_GP_ISP_STREAM_STAT_IRQ_ENABLE_ADDR 0x38
+#define _REG_GP_MOD_STREAM_STAT_IRQ_ENABLE_ADDR 0x3C
+/*
+#define _REG_GP_SWITCH_IF_ADDR 0x40
+#define _REG_GP_SWITCH_GDC1_ADDR 0x44
+#define _REG_GP_SWITCH_GDC2_ADDR 0x48
+*/
+#define _REG_GP_SLV_REG_RST_ADDR 0x50
+#define _REG_GP_SWITCH_ISYS2401_ADDR 0x54
+
+/* @ INPUT_FORMATTER_BASE -> GP_DEVICE_BASE */
+/*
+#define _REG_GP_IFMT_input_switch_lut_reg0 0x00030800
+#define _REG_GP_IFMT_input_switch_lut_reg1 0x00030804
+#define _REG_GP_IFMT_input_switch_lut_reg2 0x00030808
+#define _REG_GP_IFMT_input_switch_lut_reg3 0x0003080C
+#define _REG_GP_IFMT_input_switch_lut_reg4 0x00030810
+#define _REG_GP_IFMT_input_switch_lut_reg5 0x00030814
+#define _REG_GP_IFMT_input_switch_lut_reg6 0x00030818
+#define _REG_GP_IFMT_input_switch_lut_reg7 0x0003081C
+#define _REG_GP_IFMT_input_switch_fsync_lut 0x00030820
+#define _REG_GP_IFMT_srst 0x00030824
+#define _REG_GP_IFMT_slv_reg_srst 0x00030828
+#define _REG_GP_IFMT_input_switch_ch_id_fmt_type 0x0003082C
+*/
+/* @ GP_DEVICE_BASE */
+/*
+#define _REG_GP_SYNCGEN_ENABLE_ADDR 0x00090000
+#define _REG_GP_SYNCGEN_FREE_RUNNING_ADDR 0x00090004
+#define _REG_GP_SYNCGEN_PAUSE_ADDR 0x00090008
+#define _REG_GP_NR_FRAMES_ADDR 0x0009000C
+#define _REG_GP_SYNGEN_NR_PIX_ADDR 0x00090010
+#define _REG_GP_SYNGEN_NR_LINES_ADDR 0x00090014
+#define _REG_GP_SYNGEN_HBLANK_CYCLES_ADDR 0x00090018
+#define _REG_GP_SYNGEN_VBLANK_CYCLES_ADDR 0x0009001C
+#define _REG_GP_ISEL_SOF_ADDR 0x00090020
+#define _REG_GP_ISEL_EOF_ADDR 0x00090024
+#define _REG_GP_ISEL_SOL_ADDR 0x00090028
+#define _REG_GP_ISEL_EOL_ADDR 0x0009002C
+#define _REG_GP_ISEL_LFSR_ENABLE_ADDR 0x00090030
+#define _REG_GP_ISEL_LFSR_ENABLE_B_ADDR 0x00090034
+#define _REG_GP_ISEL_LFSR_RESET_VALUE_ADDR 0x00090038
+#define _REG_GP_ISEL_TPG_ENABLE_ADDR 0x0009003C
+#define _REG_GP_ISEL_TPG_ENABLE_B_ADDR 0x00090040
+#define _REG_GP_ISEL_HOR_CNT_MASK_ADDR 0x00090044
+#define _REG_GP_ISEL_VER_CNT_MASK_ADDR 0x00090048
+#define _REG_GP_ISEL_XY_CNT_MASK_ADDR 0x0009004C
+#define _REG_GP_ISEL_HOR_CNT_DELTA_ADDR 0x00090050
+#define _REG_GP_ISEL_VER_CNT_DELTA_ADDR 0x00090054
+#define _REG_GP_ISEL_TPG_MODE_ADDR 0x00090058
+#define _REG_GP_ISEL_TPG_RED1_ADDR 0x0009005C
+#define _REG_GP_ISEL_TPG_GREEN1_ADDR 0x00090060
+#define _REG_GP_ISEL_TPG_BLUE1_ADDR 0x00090064
+#define _REG_GP_ISEL_TPG_RED2_ADDR 0x00090068
+#define _REG_GP_ISEL_TPG_GREEN2_ADDR 0x0009006C
+#define _REG_GP_ISEL_TPG_BLUE2_ADDR 0x00090070
+#define _REG_GP_ISEL_CH_ID_ADDR 0x00090074
+#define _REG_GP_ISEL_FMT_TYPE_ADDR 0x00090078
+#define _REG_GP_ISEL_DATA_SEL_ADDR 0x0009007C
+#define _REG_GP_ISEL_SBAND_SEL_ADDR 0x00090080
+#define _REG_GP_ISEL_SYNC_SEL_ADDR 0x00090084
+#define _REG_GP_SYNCGEN_HOR_CNT_ADDR 0x00090088
+#define _REG_GP_SYNCGEN_VER_CNT_ADDR 0x0009008C
+#define _REG_GP_SYNCGEN_FRAME_CNT_ADDR 0x00090090
+#define _REG_GP_SOFT_RESET_ADDR 0x00090094
+*/
+
+struct gp_device_state_s {
+ int syncgen_enable;
+ int syncgen_free_running;
+ int syncgen_pause;
+ int nr_frames;
+ int syngen_nr_pix;
+ int syngen_nr_lines;
+ int syngen_hblank_cycles;
+ int syngen_vblank_cycles;
+ int isel_sof;
+ int isel_eof;
+ int isel_sol;
+ int isel_eol;
+ int isel_lfsr_enable;
+ int isel_lfsr_enable_b;
+ int isel_lfsr_reset_value;
+ int isel_tpg_enable;
+ int isel_tpg_enable_b;
+ int isel_hor_cnt_mask;
+ int isel_ver_cnt_mask;
+ int isel_xy_cnt_mask;
+ int isel_hor_cnt_delta;
+ int isel_ver_cnt_delta;
+ int isel_tpg_mode;
+ int isel_tpg_red1;
+ int isel_tpg_green1;
+ int isel_tpg_blue1;
+ int isel_tpg_red2;
+ int isel_tpg_green2;
+ int isel_tpg_blue2;
+ int isel_ch_id;
+ int isel_fmt_type;
+ int isel_data_sel;
+ int isel_sband_sel;
+ int isel_sync_sel;
+ int syncgen_hor_cnt;
+ int syncgen_ver_cnt;
+ int syncgen_frame_cnt;
+ int soft_reset;
+};
+
+#endif /* __GP_DEVICE_LOCAL_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/gp_device_private.h b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/gp_device_private.h
new file mode 100644
index 000000000000..71f20992ee98
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/gp_device_private.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010-2015, Intel Corporation.
+ */
+
+#ifndef __GP_DEVICE_PRIVATE_H_INCLUDED__
+#define __GP_DEVICE_PRIVATE_H_INCLUDED__
+
+#include "gp_device_public.h"
+
+#include "device_access.h"
+
+#include "assert_support.h"
+
+STORAGE_CLASS_GP_DEVICE_C void gp_device_reg_store(
+ const gp_device_ID_t ID,
+ const unsigned int reg_addr,
+ const hrt_data value)
+{
+ assert(ID < N_GP_DEVICE_ID);
+ assert(GP_DEVICE_BASE[ID] != (hrt_address) - 1);
+ assert((reg_addr % sizeof(hrt_data)) == 0);
+ ia_css_device_store_uint32(GP_DEVICE_BASE[ID] + reg_addr, value);
+ return;
+}
+
+STORAGE_CLASS_GP_DEVICE_C hrt_data gp_device_reg_load(
+ const gp_device_ID_t ID,
+ const hrt_address reg_addr)
+{
+ assert(ID < N_GP_DEVICE_ID);
+ assert(GP_DEVICE_BASE[ID] != (hrt_address)-1);
+ assert((reg_addr % sizeof(hrt_data)) == 0);
+ return ia_css_device_load_uint32(GP_DEVICE_BASE[ID] + reg_addr);
+}
+
+#endif /* __GP_DEVICE_PRIVATE_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/gp_timer.c b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/gp_timer.c
new file mode 100644
index 000000000000..d04c179a5ecd
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/gp_timer.c
@@ -0,0 +1,62 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010-2015, Intel Corporation.
+ */
+
+#include <type_support.h> /*uint32_t */
+#include "gp_timer.h" /*system_local.h,
+ gp_timer_public.h*/
+
+#ifndef __INLINE_GP_TIMER__
+#include "gp_timer_private.h" /*device_access.h*/
+#endif /* __INLINE_GP_TIMER__ */
+#include "system_local.h"
+
+/* FIXME: not sure if reg_load(), reg_store() should be API.
+ */
+static uint32_t
+gp_timer_reg_load(uint32_t reg);
+
+static void
+gp_timer_reg_store(u32 reg, uint32_t value);
+
+static uint32_t
+gp_timer_reg_load(uint32_t reg)
+{
+ return ia_css_device_load_uint32(
+ GP_TIMER_BASE +
+ (reg * sizeof(uint32_t)));
+}
+
+static void
+gp_timer_reg_store(u32 reg, uint32_t value)
+{
+ ia_css_device_store_uint32((GP_TIMER_BASE +
+ (reg * sizeof(uint32_t))),
+ value);
+}
+
+void gp_timer_init(gp_timer_ID_t ID)
+{
+ /* set_overall_enable*/
+ gp_timer_reg_store(_REG_GP_TIMER_OVERALL_ENABLE, 1);
+
+ /*set enable*/
+ gp_timer_reg_store(_REG_GP_TIMER_ENABLE_ID(ID), 1);
+
+ /* set signal select */
+ gp_timer_reg_store(_REG_GP_TIMER_SIGNAL_SELECT_ID(ID), GP_TIMER_SIGNAL_SELECT);
+
+ /*set count type */
+ gp_timer_reg_store(_REG_GP_TIMER_COUNT_TYPE_ID(ID), GP_TIMER_COUNT_TYPE_LOW);
+
+ /*reset gp timer */
+ gp_timer_reg_store(_REG_GP_TIMER_RESET_REG, 0xFF);
+}
+
+uint32_t
+gp_timer_read(gp_timer_ID_t ID)
+{
+ return gp_timer_reg_load(_REG_GP_TIMER_VALUE_ID(ID));
+}
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/gp_timer_local.h b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/gp_timer_local.h
new file mode 100644
index 000000000000..779eeee650d4
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/gp_timer_local.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010-2015, Intel Corporation.
+ */
+
+#ifndef __GP_TIMER_LOCAL_H_INCLUDED__
+#define __GP_TIMER_LOCAL_H_INCLUDED__
+
+#include "gp_timer_global.h" /*GP_TIMER_SEL
+ GP_TIMER_SIGNAL_SELECT*/
+
+#include "gp_timer_defs.h" /*HIVE_GP_TIMER_xxx registers*/
+#include "hive_isp_css_defs.h" /*HIVE_GP_TIMER_NUM_COUNTERS
+ HIVE_GP_TIMER_NUM_IRQS*/
+
+#define _REG_GP_TIMER_RESET_REG HIVE_GP_TIMER_RESET_REG_IDX
+#define _REG_GP_TIMER_OVERALL_ENABLE HIVE_GP_TIMER_OVERALL_ENABLE_REG_IDX
+
+/*Register offsets for timers [1,7] can be obtained
+ * by adding (GP_TIMERx_ID * sizeof(uint32_t))*/
+#define _REG_GP_TIMER_ENABLE_ID(timer_id) HIVE_GP_TIMER_ENABLE_REG_IDX(timer_id)
+#define _REG_GP_TIMER_VALUE_ID(timer_id) HIVE_GP_TIMER_VALUE_REG_IDX(timer_id, HIVE_GP_TIMER_NUM_COUNTERS)
+#define _REG_GP_TIMER_COUNT_TYPE_ID(timer_id) HIVE_GP_TIMER_COUNT_TYPE_REG_IDX(timer_id, HIVE_GP_TIMER_NUM_COUNTERS)
+#define _REG_GP_TIMER_SIGNAL_SELECT_ID(timer_id) HIVE_GP_TIMER_SIGNAL_SELECT_REG_IDX(timer_id, HIVE_GP_TIMER_NUM_COUNTERS)
+
+#define _REG_GP_TIMER_IRQ_TRIGGER_VALUE_ID(irq_id) HIVE_GP_TIMER_IRQ_TRIGGER_VALUE_REG_IDX(irq_id, HIVE_GP_TIMER_NUM_COUNTERS)
+
+#define _REG_GP_TIMER_IRQ_TIMER_SELECT_ID(irq_id) \
+ HIVE_GP_TIMER_IRQ_TIMER_SELECT_REG_IDX(irq_id, HIVE_GP_TIMER_NUM_COUNTERS, HIVE_GP_TIMER_NUM_IRQS)
+
+#define _REG_GP_TIMER_IRQ_ENABLE_ID(irq_id) \
+ HIVE_GP_TIMER_IRQ_ENABLE_REG_IDX(irq_id, HIVE_GP_TIMER_NUM_COUNTERS, HIVE_GP_TIMER_NUM_IRQS)
+
+#endif /*__GP_TIMER_LOCAL_H_INCLUDED__*/
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/gp_timer_private.h b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/gp_timer_private.h
new file mode 100644
index 000000000000..45ea7daaaa1b
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/gp_timer_private.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010-2015, Intel Corporation.
+ */
+
+#ifndef __GP_TIMER_PRIVATE_H_INCLUDED__
+#define __GP_TIMER_PRIVATE_H_INCLUDED__
+
+#include "gp_timer_public.h"
+#include "device_access.h"
+#include "assert_support.h"
+
+#endif /* __GP_TIMER_PRIVATE_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/gpio_private.h b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/gpio_private.h
new file mode 100644
index 000000000000..bcf6538ac0dc
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/gpio_private.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010-2015, Intel Corporation.
+ */
+
+#ifndef __GPIO_PRIVATE_H_INCLUDED__
+#define __GPIO_PRIVATE_H_INCLUDED__
+
+#include "assert_support.h"
+#include "device_access.h"
+
+static inline void gpio_reg_store(
+ const gpio_ID_t ID,
+ const unsigned int reg,
+ const hrt_data value)
+{
+ OP___assert(ID < N_GPIO_ID);
+ OP___assert(GPIO_BASE[ID] != (hrt_address) - 1);
+ ia_css_device_store_uint32(GPIO_BASE[ID] + reg * sizeof(hrt_data), value);
+ return;
+}
+
+static inline hrt_data gpio_reg_load(
+ const gpio_ID_t ID,
+ const unsigned int reg)
+{
+ OP___assert(ID < N_GPIO_ID);
+ OP___assert(GPIO_BASE[ID] != (hrt_address) - 1);
+ return ia_css_device_load_uint32(GPIO_BASE[ID] + reg * sizeof(hrt_data));
+}
+
+#endif /* __GPIO_PRIVATE_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/hmem.c b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/hmem.c
new file mode 100644
index 000000000000..8b999acd6253
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/hmem.c
@@ -0,0 +1,11 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010-2015, Intel Corporation.
+ */
+
+#include "hmem.h"
+
+#ifndef __INLINE_HMEM__
+#include "hmem_private.h"
+#endif /* __INLINE_HMEM__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/hmem_local.h b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/hmem_local.h
new file mode 100644
index 000000000000..e9f8024a01b4
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/hmem_local.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010-2015, Intel Corporation.
+ */
+
+#ifndef __HMEM_LOCAL_H_INCLUDED__
+#define __HMEM_LOCAL_H_INCLUDED__
+
+#include "hmem_global.h"
+
+#endif /* __HMEM_LOCAL_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/hmem_private.h b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/hmem_private.h
new file mode 100644
index 000000000000..0d58b321552f
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/hmem_private.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010-2015, Intel Corporation.
+ */
+
+#ifndef __HMEM_PRIVATE_H_INCLUDED__
+#define __HMEM_PRIVATE_H_INCLUDED__
+
+#include "hmem_public.h"
+
+#include "assert_support.h"
+
+STORAGE_CLASS_HMEM_C size_t sizeof_hmem(
+ const hmem_ID_t ID)
+{
+ assert(ID < N_HMEM_ID);
+ (void)ID;
+ return HMEM_SIZE * sizeof(hmem_data_t);
+}
+
+#endif /* __HMEM_PRIVATE_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/input_formatter.c b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/input_formatter.c
new file mode 100644
index 000000000000..40b3f1e48c56
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/input_formatter.c
@@ -0,0 +1,235 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010-2015, Intel Corporation.
+ */
+
+#include "system_global.h"
+
+
+#include "input_formatter.h"
+#include <type_support.h>
+#include "gp_device.h"
+
+#include "assert_support.h"
+
+#ifndef __INLINE_INPUT_FORMATTER__
+#include "input_formatter_private.h"
+#endif /* __INLINE_INPUT_FORMATTER__ */
+
+static const unsigned int input_formatter_alignment[N_INPUT_FORMATTER_ID] = {
+ ISP_VEC_ALIGN, ISP_VEC_ALIGN, HIVE_ISP_CTRL_DATA_BYTES
+};
+
+const hrt_address HIVE_IF_SRST_ADDRESS[N_INPUT_FORMATTER_ID] = {
+ INPUT_FORMATTER0_SRST_OFFSET,
+ INPUT_FORMATTER1_SRST_OFFSET,
+ INPUT_FORMATTER2_SRST_OFFSET,
+ INPUT_FORMATTER3_SRST_OFFSET
+};
+
+const hrt_data HIVE_IF_SRST_MASK[N_INPUT_FORMATTER_ID] = {
+ INPUT_FORMATTER0_SRST_MASK,
+ INPUT_FORMATTER1_SRST_MASK,
+ INPUT_FORMATTER2_SRST_MASK,
+ INPUT_FORMATTER3_SRST_MASK
+};
+
+const u8 HIVE_IF_SWITCH_CODE[N_INPUT_FORMATTER_ID] = {
+ HIVE_INPUT_SWITCH_SELECT_IF_PRIM,
+ HIVE_INPUT_SWITCH_SELECT_IF_PRIM,
+ HIVE_INPUT_SWITCH_SELECT_IF_SEC,
+ HIVE_INPUT_SWITCH_SELECT_STR_TO_MEM
+};
+
+/* MW Should be part of system_global.h, where we have the main enumeration */
+static const bool HIVE_IF_BIN_COPY[N_INPUT_FORMATTER_ID] = {
+ false, false, false, true
+};
+
+void input_formatter_rst(
+ const input_formatter_ID_t ID)
+{
+ hrt_address addr;
+ hrt_data rst;
+
+ assert(ID < N_INPUT_FORMATTER_ID);
+
+ addr = HIVE_IF_SRST_ADDRESS[ID];
+ rst = HIVE_IF_SRST_MASK[ID];
+
+ /* TEMPORARY HACK: THIS RESET BREAKS THE METADATA FEATURE
+ * WICH USES THE STREAM2MEMRY BLOCK.
+ * MUST BE FIXED PROPERLY
+ */
+ if (!HIVE_IF_BIN_COPY[ID]) {
+ input_formatter_reg_store(ID, addr, rst);
+ }
+
+ return;
+}
+
+unsigned int input_formatter_get_alignment(
+ const input_formatter_ID_t ID)
+{
+ assert(ID < N_INPUT_FORMATTER_ID);
+
+ return input_formatter_alignment[ID];
+}
+
+void input_formatter_set_fifo_blocking_mode(
+ const input_formatter_ID_t ID,
+ const bool enable)
+{
+ assert(ID < N_INPUT_FORMATTER_ID);
+
+ /* cnd_input_formatter_reg_store() */
+ if (!HIVE_IF_BIN_COPY[ID]) {
+ input_formatter_reg_store(ID,
+ HIVE_IF_BLOCK_FIFO_NO_REQ_ADDRESS, enable);
+ }
+ return;
+}
+
+void input_formatter_get_switch_state(
+ const input_formatter_ID_t ID,
+ input_formatter_switch_state_t *state)
+{
+ assert(ID < N_INPUT_FORMATTER_ID);
+ assert(state);
+
+ /* We'll change this into an intelligent function to get switch info per IF */
+ (void)ID;
+
+ state->if_input_switch_lut_reg[0] = gp_device_reg_load(GP_DEVICE0_ID,
+ _REG_GP_IFMT_input_switch_lut_reg0);
+ state->if_input_switch_lut_reg[1] = gp_device_reg_load(GP_DEVICE0_ID,
+ _REG_GP_IFMT_input_switch_lut_reg1);
+ state->if_input_switch_lut_reg[2] = gp_device_reg_load(GP_DEVICE0_ID,
+ _REG_GP_IFMT_input_switch_lut_reg2);
+ state->if_input_switch_lut_reg[3] = gp_device_reg_load(GP_DEVICE0_ID,
+ _REG_GP_IFMT_input_switch_lut_reg3);
+ state->if_input_switch_lut_reg[4] = gp_device_reg_load(GP_DEVICE0_ID,
+ _REG_GP_IFMT_input_switch_lut_reg4);
+ state->if_input_switch_lut_reg[5] = gp_device_reg_load(GP_DEVICE0_ID,
+ _REG_GP_IFMT_input_switch_lut_reg5);
+ state->if_input_switch_lut_reg[6] = gp_device_reg_load(GP_DEVICE0_ID,
+ _REG_GP_IFMT_input_switch_lut_reg6);
+ state->if_input_switch_lut_reg[7] = gp_device_reg_load(GP_DEVICE0_ID,
+ _REG_GP_IFMT_input_switch_lut_reg7);
+ state->if_input_switch_fsync_lut = gp_device_reg_load(GP_DEVICE0_ID,
+ _REG_GP_IFMT_input_switch_fsync_lut);
+ state->if_input_switch_ch_id_fmt_type = gp_device_reg_load(GP_DEVICE0_ID,
+ _REG_GP_IFMT_input_switch_ch_id_fmt_type);
+
+ return;
+}
+
+void input_formatter_get_state(
+ const input_formatter_ID_t ID,
+ input_formatter_state_t *state)
+{
+ assert(ID < N_INPUT_FORMATTER_ID);
+ assert(state);
+ /*
+ state->reset = input_formatter_reg_load(ID,
+ HIVE_IF_RESET_ADDRESS);
+ */
+ state->start_line = input_formatter_reg_load(ID,
+ HIVE_IF_START_LINE_ADDRESS);
+ state->start_column = input_formatter_reg_load(ID,
+ HIVE_IF_START_COLUMN_ADDRESS);
+ state->cropped_height = input_formatter_reg_load(ID,
+ HIVE_IF_CROPPED_HEIGHT_ADDRESS);
+ state->cropped_width = input_formatter_reg_load(ID,
+ HIVE_IF_CROPPED_WIDTH_ADDRESS);
+ state->ver_decimation = input_formatter_reg_load(ID,
+ HIVE_IF_VERTICAL_DECIMATION_ADDRESS);
+ state->hor_decimation = input_formatter_reg_load(ID,
+ HIVE_IF_HORIZONTAL_DECIMATION_ADDRESS);
+ state->hor_deinterleaving = input_formatter_reg_load(ID,
+ HIVE_IF_H_DEINTERLEAVING_ADDRESS);
+ state->left_padding = input_formatter_reg_load(ID,
+ HIVE_IF_LEFTPADDING_WIDTH_ADDRESS);
+ state->eol_offset = input_formatter_reg_load(ID,
+ HIVE_IF_END_OF_LINE_OFFSET_ADDRESS);
+ state->vmem_start_address = input_formatter_reg_load(ID,
+ HIVE_IF_VMEM_START_ADDRESS_ADDRESS);
+ state->vmem_end_address = input_formatter_reg_load(ID,
+ HIVE_IF_VMEM_END_ADDRESS_ADDRESS);
+ state->vmem_increment = input_formatter_reg_load(ID,
+ HIVE_IF_VMEM_INCREMENT_ADDRESS);
+ state->is_yuv420 = input_formatter_reg_load(ID,
+ HIVE_IF_YUV_420_FORMAT_ADDRESS);
+ state->vsync_active_low = input_formatter_reg_load(ID,
+ HIVE_IF_VSYNCK_ACTIVE_LOW_ADDRESS);
+ state->hsync_active_low = input_formatter_reg_load(ID,
+ HIVE_IF_HSYNCK_ACTIVE_LOW_ADDRESS);
+ state->allow_fifo_overflow = input_formatter_reg_load(ID,
+ HIVE_IF_ALLOW_FIFO_OVERFLOW_ADDRESS);
+ state->block_fifo_when_no_req = input_formatter_reg_load(ID,
+ HIVE_IF_BLOCK_FIFO_NO_REQ_ADDRESS);
+ state->ver_deinterleaving = input_formatter_reg_load(ID,
+ HIVE_IF_V_DEINTERLEAVING_ADDRESS);
+ /* FSM */
+ state->fsm_sync_status = input_formatter_reg_load(ID,
+ HIVE_IF_FSM_SYNC_STATUS);
+ state->fsm_sync_counter = input_formatter_reg_load(ID,
+ HIVE_IF_FSM_SYNC_COUNTER);
+ state->fsm_crop_status = input_formatter_reg_load(ID,
+ HIVE_IF_FSM_CROP_STATUS);
+ state->fsm_crop_line_counter = input_formatter_reg_load(ID,
+ HIVE_IF_FSM_CROP_LINE_COUNTER);
+ state->fsm_crop_pixel_counter = input_formatter_reg_load(ID,
+ HIVE_IF_FSM_CROP_PIXEL_COUNTER);
+ state->fsm_deinterleaving_index = input_formatter_reg_load(ID,
+ HIVE_IF_FSM_DEINTERLEAVING_IDX);
+ state->fsm_dec_h_counter = input_formatter_reg_load(ID,
+ HIVE_IF_FSM_DECIMATION_H_COUNTER);
+ state->fsm_dec_v_counter = input_formatter_reg_load(ID,
+ HIVE_IF_FSM_DECIMATION_V_COUNTER);
+ state->fsm_dec_block_v_counter = input_formatter_reg_load(ID,
+ HIVE_IF_FSM_DECIMATION_BLOCK_V_COUNTER);
+ state->fsm_padding_status = input_formatter_reg_load(ID,
+ HIVE_IF_FSM_PADDING_STATUS);
+ state->fsm_padding_elem_counter = input_formatter_reg_load(ID,
+ HIVE_IF_FSM_PADDING_ELEMENT_COUNTER);
+ state->fsm_vector_support_error = input_formatter_reg_load(ID,
+ HIVE_IF_FSM_VECTOR_SUPPORT_ERROR);
+ state->fsm_vector_buffer_full = input_formatter_reg_load(ID,
+ HIVE_IF_FSM_VECTOR_SUPPORT_BUFF_FULL);
+ state->vector_support = input_formatter_reg_load(ID,
+ HIVE_IF_FSM_VECTOR_SUPPORT);
+ state->sensor_data_lost = input_formatter_reg_load(ID,
+ HIVE_IF_FIFO_SENSOR_STATUS);
+
+ return;
+}
+
+void input_formatter_bin_get_state(
+ const input_formatter_ID_t ID,
+ input_formatter_bin_state_t *state)
+{
+ assert(ID < N_INPUT_FORMATTER_ID);
+ assert(state);
+
+ state->reset = input_formatter_reg_load(ID,
+ HIVE_STR2MEM_SOFT_RESET_REG_ADDRESS);
+ state->input_endianness = input_formatter_reg_load(ID,
+ HIVE_STR2MEM_INPUT_ENDIANNESS_REG_ADDRESS);
+ state->output_endianness = input_formatter_reg_load(ID,
+ HIVE_STR2MEM_OUTPUT_ENDIANNESS_REG_ADDRESS);
+ state->bitswap = input_formatter_reg_load(ID,
+ HIVE_STR2MEM_BIT_SWAPPING_REG_ADDRESS);
+ state->block_synch = input_formatter_reg_load(ID,
+ HIVE_STR2MEM_BLOCK_SYNC_LEVEL_REG_ADDRESS);
+ state->packet_synch = input_formatter_reg_load(ID,
+ HIVE_STR2MEM_PACKET_SYNC_LEVEL_REG_ADDRESS);
+ state->readpostwrite_synch = input_formatter_reg_load(ID,
+ HIVE_STR2MEM_READ_POST_WRITE_SYNC_ENABLE_REG_ADDRESS);
+ state->is_2ppc = input_formatter_reg_load(ID,
+ HIVE_STR2MEM_DUAL_BYTE_INPUTS_ENABLED_REG_ADDRESS);
+ state->en_status_update = input_formatter_reg_load(ID,
+ HIVE_STR2MEM_EN_STAT_UPDATE_ADDRESS);
+ return;
+}
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/input_formatter_local.h b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/input_formatter_local.h
new file mode 100644
index 000000000000..84cd031f8b8d
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/input_formatter_local.h
@@ -0,0 +1,109 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010-2015, Intel Corporation.
+ */
+
+#ifndef __INPUT_FORMATTER_LOCAL_H_INCLUDED__
+#define __INPUT_FORMATTER_LOCAL_H_INCLUDED__
+
+#include "input_formatter_global.h"
+
+#include "isp.h" /* ISP_VEC_ALIGN */
+
+typedef struct input_formatter_switch_state_s input_formatter_switch_state_t;
+typedef struct input_formatter_state_s input_formatter_state_t;
+typedef struct input_formatter_bin_state_s input_formatter_bin_state_t;
+
+#define HIVE_IF_FSM_SYNC_STATUS 0x100
+#define HIVE_IF_FSM_SYNC_COUNTER 0x104
+#define HIVE_IF_FSM_DEINTERLEAVING_IDX 0x114
+#define HIVE_IF_FSM_DECIMATION_H_COUNTER 0x118
+#define HIVE_IF_FSM_DECIMATION_V_COUNTER 0x11C
+#define HIVE_IF_FSM_DECIMATION_BLOCK_V_COUNTER 0x120
+#define HIVE_IF_FSM_PADDING_STATUS 0x124
+#define HIVE_IF_FSM_PADDING_ELEMENT_COUNTER 0x128
+#define HIVE_IF_FSM_VECTOR_SUPPORT_ERROR 0x12C
+#define HIVE_IF_FSM_VECTOR_SUPPORT_BUFF_FULL 0x130
+#define HIVE_IF_FSM_VECTOR_SUPPORT 0x134
+#define HIVE_IF_FIFO_SENSOR_STATUS 0x138
+
+/*
+ * The switch LUT's coding defines a sink for each
+ * single channel ID + channel format type. Conversely
+ * the sink (i.e. an input formatter) can be reached
+ * from multiple channel & format type combinations
+ *
+ * LUT[0,1] channel=0, format type {0,1,...31}
+ * LUT[2,3] channel=1, format type {0,1,...31}
+ * LUT[4,5] channel=2, format type {0,1,...31}
+ * LUT[6,7] channel=3, format type {0,1,...31}
+ *
+ * Each register hold 16 2-bit fields encoding the sink
+ * {0,1,2,3}, "0" means unconnected.
+ *
+ * The single FSYNCH register uses four 3-bit fields of 1-hot
+ * encoded sink information, "0" means unconnected.
+ *
+ * The encoding is redundant. The FSYNCH setting will connect
+ * a channel to a sink. At that point the LUT's belonging to
+ * that channel can be directed to another sink. Thus the data
+ * goes to another place than the synch
+ */
+struct input_formatter_switch_state_s {
+ int if_input_switch_lut_reg[8];
+ int if_input_switch_fsync_lut;
+ int if_input_switch_ch_id_fmt_type;
+ bool if_input_switch_map[HIVE_SWITCH_N_CHANNELS][HIVE_SWITCH_N_FORMATTYPES];
+};
+
+struct input_formatter_state_s {
+ /* int reset; */
+ int start_line;
+ int start_column;
+ int cropped_height;
+ int cropped_width;
+ int ver_decimation;
+ int hor_decimation;
+ int ver_deinterleaving;
+ int hor_deinterleaving;
+ int left_padding;
+ int eol_offset;
+ int vmem_start_address;
+ int vmem_end_address;
+ int vmem_increment;
+ int is_yuv420;
+ int vsync_active_low;
+ int hsync_active_low;
+ int allow_fifo_overflow;
+ int block_fifo_when_no_req;
+ int fsm_sync_status;
+ int fsm_sync_counter;
+ int fsm_crop_status;
+ int fsm_crop_line_counter;
+ int fsm_crop_pixel_counter;
+ int fsm_deinterleaving_index;
+ int fsm_dec_h_counter;
+ int fsm_dec_v_counter;
+ int fsm_dec_block_v_counter;
+ int fsm_padding_status;
+ int fsm_padding_elem_counter;
+ int fsm_vector_support_error;
+ int fsm_vector_buffer_full;
+ int vector_support;
+ int sensor_data_lost;
+};
+
+struct input_formatter_bin_state_s {
+ u32 reset;
+ u32 input_endianness;
+ u32 output_endianness;
+ u32 bitswap;
+ u32 block_synch;
+ u32 packet_synch;
+ u32 readpostwrite_synch;
+ u32 is_2ppc;
+ u32 en_status_update;
+};
+
+#endif /* __INPUT_FORMATTER_LOCAL_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/input_formatter_private.h b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/input_formatter_private.h
new file mode 100644
index 000000000000..6b6ba49656e5
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/input_formatter_private.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010-2015, Intel Corporation.
+ */
+
+#ifndef __INPUT_FORMATTER_PRIVATE_H_INCLUDED__
+#define __INPUT_FORMATTER_PRIVATE_H_INCLUDED__
+
+#include "input_formatter_public.h"
+
+#include "device_access.h"
+
+#include "assert_support.h"
+
+STORAGE_CLASS_INPUT_FORMATTER_C void input_formatter_reg_store(
+ const input_formatter_ID_t ID,
+ const hrt_address reg_addr,
+ const hrt_data value)
+{
+ assert(ID < N_INPUT_FORMATTER_ID);
+ assert(INPUT_FORMATTER_BASE[ID] != (hrt_address)-1);
+ assert((reg_addr % sizeof(hrt_data)) == 0);
+ ia_css_device_store_uint32(INPUT_FORMATTER_BASE[ID] + reg_addr, value);
+ return;
+}
+
+STORAGE_CLASS_INPUT_FORMATTER_C hrt_data input_formatter_reg_load(
+ const input_formatter_ID_t ID,
+ const unsigned int reg_addr)
+{
+ assert(ID < N_INPUT_FORMATTER_ID);
+ assert(INPUT_FORMATTER_BASE[ID] != (hrt_address)-1);
+ assert((reg_addr % sizeof(hrt_data)) == 0);
+ return ia_css_device_load_uint32(INPUT_FORMATTER_BASE[ID] + reg_addr);
+}
+
+#endif /* __INPUT_FORMATTER_PRIVATE_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/input_system.c b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/input_system.c
new file mode 100644
index 000000000000..9f1199c4761c
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/input_system.c
@@ -0,0 +1,1297 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010-015, Intel Corporation.
+ */
+
+#include "system_global.h"
+
+
+#include "input_system.h"
+#include <type_support.h>
+#include "gp_device.h"
+
+#include "assert_support.h"
+
+#ifndef __INLINE_INPUT_SYSTEM__
+#include "input_system_private.h"
+#endif /* __INLINE_INPUT_SYSTEM__ */
+
+#define ZERO (0x0)
+#define ONE (1U)
+
+static const isp2400_ib_buffer_t IB_BUFFER_NULL = {0, 0, 0 };
+
+static input_system_err_t input_system_configure_channel(
+ const channel_cfg_t channel);
+
+static input_system_err_t input_system_configure_channel_sensor(
+ const channel_cfg_t channel);
+
+static input_system_err_t input_buffer_configuration(void);
+
+static input_system_err_t configuration_to_registers(void);
+
+static void receiver_rst(const rx_ID_t ID);
+static void input_system_network_rst(const input_system_ID_t ID);
+
+static void capture_unit_configure(
+ const input_system_ID_t ID,
+ const sub_system_ID_t sub_id,
+ const isp2400_ib_buffer_t *const cfg);
+
+static void acquisition_unit_configure(
+ const input_system_ID_t ID,
+ const sub_system_ID_t sub_id,
+ const isp2400_ib_buffer_t *const cfg);
+
+static void ctrl_unit_configure(
+ const input_system_ID_t ID,
+ const sub_system_ID_t sub_id,
+ const ctrl_unit_cfg_t *const cfg);
+
+static void input_system_network_configure(
+ const input_system_ID_t ID,
+ const input_system_network_cfg_t *const cfg);
+
+// MW: CSI is previously named as "rx" short for "receiver"
+static input_system_err_t set_csi_cfg(
+ csi_cfg_t *const lhs,
+ const csi_cfg_t *const rhs,
+ input_system_config_flags_t *const flags);
+
+static input_system_err_t set_source_type(
+ input_system_source_t *const lhs,
+ const input_system_source_t rhs,
+ input_system_config_flags_t *const flags);
+
+static input_system_err_t input_system_multiplexer_cfg(
+ input_system_multiplex_t *const lhs,
+ const input_system_multiplex_t rhs,
+ input_system_config_flags_t *const flags);
+
+static void gp_device_rst(const gp_device_ID_t ID);
+
+static void input_selector_cfg_for_sensor(const gp_device_ID_t ID);
+
+static void input_switch_rst(const gp_device_ID_t ID);
+
+static void input_switch_cfg(
+ const gp_device_ID_t ID,
+ const input_switch_cfg_t *const cfg
+);
+
+void receiver_set_compression(
+ const rx_ID_t ID,
+ const unsigned int cfg_ID,
+ const mipi_compressor_t comp,
+ const mipi_predictor_t pred)
+{
+ const unsigned int field_id = cfg_ID % N_MIPI_FORMAT_CUSTOM;
+ const unsigned int ch_id = cfg_ID / N_MIPI_FORMAT_CUSTOM;
+ hrt_data val;
+ hrt_address addr = 0;
+ hrt_data reg;
+
+ assert(ID < N_RX_ID);
+ assert(cfg_ID < N_MIPI_COMPRESSOR_CONTEXT);
+ assert(field_id < N_MIPI_FORMAT_CUSTOM);
+ assert(ch_id < N_RX_CHANNEL_ID);
+ assert(comp < N_MIPI_COMPRESSOR_METHODS);
+ assert(pred < N_MIPI_PREDICTOR_TYPES);
+
+ val = (((uint8_t)pred) << 3) | comp;
+
+ switch (ch_id) {
+ case 0:
+ addr = ((field_id < 6) ? _HRT_CSS_RECEIVER_2400_COMP_SCHEME_VC0_REG0_IDX :
+ _HRT_CSS_RECEIVER_2400_COMP_SCHEME_VC0_REG1_IDX);
+ break;
+ case 1:
+ addr = ((field_id < 6) ? _HRT_CSS_RECEIVER_2400_COMP_SCHEME_VC1_REG0_IDX :
+ _HRT_CSS_RECEIVER_2400_COMP_SCHEME_VC1_REG1_IDX);
+ break;
+ case 2:
+ addr = ((field_id < 6) ? _HRT_CSS_RECEIVER_2400_COMP_SCHEME_VC2_REG0_IDX :
+ _HRT_CSS_RECEIVER_2400_COMP_SCHEME_VC2_REG1_IDX);
+ break;
+ case 3:
+ addr = ((field_id < 6) ? _HRT_CSS_RECEIVER_2400_COMP_SCHEME_VC3_REG0_IDX :
+ _HRT_CSS_RECEIVER_2400_COMP_SCHEME_VC3_REG1_IDX);
+ break;
+ default:
+ /* should not happen */
+ assert(false);
+ return;
+ }
+
+ reg = ((field_id < 6) ? (val << (field_id * 5)) : (val << ((
+ field_id - 6) * 5)));
+ receiver_reg_store(ID, addr, reg);
+}
+
+void receiver_port_enable(
+ const rx_ID_t ID,
+ const enum mipi_port_id port_ID,
+ const bool cnd)
+{
+ hrt_data reg = receiver_port_reg_load(ID, port_ID,
+ _HRT_CSS_RECEIVER_DEVICE_READY_REG_IDX);
+
+ if (cnd) {
+ reg |= 0x01;
+ } else {
+ reg &= ~0x01;
+ }
+
+ receiver_port_reg_store(ID, port_ID,
+ _HRT_CSS_RECEIVER_DEVICE_READY_REG_IDX, reg);
+}
+
+bool is_receiver_port_enabled(
+ const rx_ID_t ID,
+ const enum mipi_port_id port_ID)
+{
+ hrt_data reg = receiver_port_reg_load(ID, port_ID,
+ _HRT_CSS_RECEIVER_DEVICE_READY_REG_IDX);
+ return ((reg & 0x01) != 0);
+}
+
+void receiver_irq_enable(
+ const rx_ID_t ID,
+ const enum mipi_port_id port_ID,
+ const rx_irq_info_t irq_info)
+{
+ receiver_port_reg_store(ID,
+ port_ID, _HRT_CSS_RECEIVER_IRQ_ENABLE_REG_IDX, irq_info);
+}
+
+rx_irq_info_t receiver_get_irq_info(
+ const rx_ID_t ID,
+ const enum mipi_port_id port_ID)
+{
+ return receiver_port_reg_load(ID,
+ port_ID, _HRT_CSS_RECEIVER_IRQ_STATUS_REG_IDX);
+}
+
+void receiver_irq_clear(
+ const rx_ID_t ID,
+ const enum mipi_port_id port_ID,
+ const rx_irq_info_t irq_info)
+{
+ receiver_port_reg_store(ID,
+ port_ID, _HRT_CSS_RECEIVER_IRQ_STATUS_REG_IDX, irq_info);
+}
+
+// MW: "2400" in the name is not good, but this is to avoid a naming conflict
+static input_system_cfg2400_t config;
+
+static void receiver_rst(
+ const rx_ID_t ID)
+{
+ enum mipi_port_id port_id;
+
+ assert(ID < N_RX_ID);
+
+// Disable all ports.
+ for (port_id = MIPI_PORT0_ID; port_id < N_MIPI_PORT_ID; port_id++) {
+ receiver_port_enable(ID, port_id, false);
+ }
+
+ // AM: Additional actions for stopping receiver?
+}
+
+//Single function to reset all the devices mapped via GP_DEVICE.
+static void gp_device_rst(const gp_device_ID_t ID)
+{
+ assert(ID < N_GP_DEVICE_ID);
+
+ gp_device_reg_store(ID, _REG_GP_SYNCGEN_ENABLE_ADDR, ZERO);
+ // gp_device_reg_store(ID, _REG_GP_SYNCGEN_FREE_RUNNING_ADDR, ZERO);
+ // gp_device_reg_store(ID, _REG_GP_SYNCGEN_PAUSE_ADDR, ONE);
+ // gp_device_reg_store(ID, _REG_GP_NR_FRAMES_ADDR, ZERO);
+ // gp_device_reg_store(ID, _REG_GP_SYNGEN_NR_PIX_ADDR, ZERO);
+ // gp_device_reg_store(ID, _REG_GP_SYNGEN_NR_PIX_ADDR, ZERO);
+ // gp_device_reg_store(ID, _REG_GP_SYNGEN_NR_LINES_ADDR, ZERO);
+ // gp_device_reg_store(ID, _REG_GP_SYNGEN_HBLANK_CYCLES_ADDR, ZERO);
+ // gp_device_reg_store(ID, _REG_GP_SYNGEN_VBLANK_CYCLES_ADDR, ZERO);
+// AM: Following calls cause strange warnings. Probably they should not be initialized.
+// gp_device_reg_store(ID, _REG_GP_ISEL_SOF_ADDR, ZERO);
+// gp_device_reg_store(ID, _REG_GP_ISEL_EOF_ADDR, ZERO);
+// gp_device_reg_store(ID, _REG_GP_ISEL_SOL_ADDR, ZERO);
+// gp_device_reg_store(ID, _REG_GP_ISEL_EOL_ADDR, ZERO);
+ gp_device_reg_store(ID, _REG_GP_ISEL_LFSR_ENABLE_ADDR, ZERO);
+ gp_device_reg_store(ID, _REG_GP_ISEL_LFSR_ENABLE_B_ADDR, ZERO);
+ gp_device_reg_store(ID, _REG_GP_ISEL_LFSR_RESET_VALUE_ADDR, ZERO);
+ gp_device_reg_store(ID, _REG_GP_ISEL_TPG_ENABLE_ADDR, ZERO);
+ gp_device_reg_store(ID, _REG_GP_ISEL_TPG_ENABLE_B_ADDR, ZERO);
+ gp_device_reg_store(ID, _REG_GP_ISEL_HOR_CNT_MASK_ADDR, ZERO);
+ gp_device_reg_store(ID, _REG_GP_ISEL_VER_CNT_MASK_ADDR, ZERO);
+ gp_device_reg_store(ID, _REG_GP_ISEL_XY_CNT_MASK_ADDR, ZERO);
+ gp_device_reg_store(ID, _REG_GP_ISEL_HOR_CNT_DELTA_ADDR, ZERO);
+ gp_device_reg_store(ID, _REG_GP_ISEL_VER_CNT_DELTA_ADDR, ZERO);
+ gp_device_reg_store(ID, _REG_GP_ISEL_TPG_MODE_ADDR, ZERO);
+ gp_device_reg_store(ID, _REG_GP_ISEL_TPG_RED1_ADDR, ZERO);
+ gp_device_reg_store(ID, _REG_GP_ISEL_TPG_GREEN1_ADDR, ZERO);
+ gp_device_reg_store(ID, _REG_GP_ISEL_TPG_BLUE1_ADDR, ZERO);
+ gp_device_reg_store(ID, _REG_GP_ISEL_TPG_RED2_ADDR, ZERO);
+ gp_device_reg_store(ID, _REG_GP_ISEL_TPG_GREEN2_ADDR, ZERO);
+ gp_device_reg_store(ID, _REG_GP_ISEL_TPG_BLUE2_ADDR, ZERO);
+ //gp_device_reg_store(ID, _REG_GP_ISEL_CH_ID_ADDR, ZERO);
+ //gp_device_reg_store(ID, _REG_GP_ISEL_FMT_TYPE_ADDR, ZERO);
+ gp_device_reg_store(ID, _REG_GP_ISEL_DATA_SEL_ADDR, ZERO);
+ gp_device_reg_store(ID, _REG_GP_ISEL_SBAND_SEL_ADDR, ZERO);
+ gp_device_reg_store(ID, _REG_GP_ISEL_SYNC_SEL_ADDR, ZERO);
+ // gp_device_reg_store(ID, _REG_GP_SYNCGEN_HOR_CNT_ADDR, ZERO);
+ // gp_device_reg_store(ID, _REG_GP_SYNCGEN_VER_CNT_ADDR, ZERO);
+ // gp_device_reg_store(ID, _REG_GP_SYNCGEN_FRAME_CNT_ADDR, ZERO);
+ gp_device_reg_store(ID, _REG_GP_SOFT_RESET_ADDR,
+ ZERO); // AM: Maybe this soft reset is not safe.
+}
+
+static void input_selector_cfg_for_sensor(const gp_device_ID_t ID)
+{
+ assert(ID < N_GP_DEVICE_ID);
+
+ gp_device_reg_store(ID, _REG_GP_ISEL_SOF_ADDR, ONE);
+ gp_device_reg_store(ID, _REG_GP_ISEL_EOF_ADDR, ONE);
+ gp_device_reg_store(ID, _REG_GP_ISEL_SOL_ADDR, ONE);
+ gp_device_reg_store(ID, _REG_GP_ISEL_EOL_ADDR, ONE);
+ gp_device_reg_store(ID, _REG_GP_ISEL_CH_ID_ADDR, ZERO);
+ gp_device_reg_store(ID, _REG_GP_ISEL_FMT_TYPE_ADDR, ZERO);
+ gp_device_reg_store(ID, _REG_GP_ISEL_DATA_SEL_ADDR, ZERO);
+ gp_device_reg_store(ID, _REG_GP_ISEL_SBAND_SEL_ADDR, ZERO);
+ gp_device_reg_store(ID, _REG_GP_ISEL_SYNC_SEL_ADDR, ZERO);
+ gp_device_reg_store(ID, _REG_GP_SOFT_RESET_ADDR, ZERO);
+}
+
+static void input_switch_rst(const gp_device_ID_t ID)
+{
+ int addr;
+
+ assert(ID < N_GP_DEVICE_ID);
+
+ // Initialize the data&hsync LUT.
+ for (addr = _REG_GP_IFMT_input_switch_lut_reg0;
+ addr <= _REG_GP_IFMT_input_switch_lut_reg7; addr += SIZEOF_HRT_REG) {
+ gp_device_reg_store(ID, addr, ZERO);
+ }
+
+ // Initialize the vsync LUT.
+ gp_device_reg_store(ID,
+ _REG_GP_IFMT_input_switch_fsync_lut,
+ ZERO);
+}
+
+static void input_switch_cfg(
+ const gp_device_ID_t ID,
+ const input_switch_cfg_t *const cfg)
+{
+ int addr_offset;
+
+ assert(ID < N_GP_DEVICE_ID);
+ assert(cfg);
+
+ // Initialize the data&hsync LUT.
+ for (addr_offset = 0; addr_offset < N_RX_CHANNEL_ID * 2; addr_offset++) {
+ assert(addr_offset * SIZEOF_HRT_REG + _REG_GP_IFMT_input_switch_lut_reg0 <=
+ _REG_GP_IFMT_input_switch_lut_reg7);
+ gp_device_reg_store(ID,
+ _REG_GP_IFMT_input_switch_lut_reg0 + addr_offset * SIZEOF_HRT_REG,
+ cfg->hsync_data_reg[addr_offset]);
+ }
+
+ // Initialize the vsync LUT.
+ gp_device_reg_store(ID,
+ _REG_GP_IFMT_input_switch_fsync_lut,
+ cfg->vsync_data_reg);
+}
+
+static void input_system_network_rst(const input_system_ID_t ID)
+{
+ unsigned int sub_id;
+
+ // Reset all 3 multicasts.
+ input_system_sub_system_reg_store(ID,
+ GPREGS_UNIT0_ID,
+ HIVE_ISYS_GPREG_MULTICAST_A_IDX,
+ INPUT_SYSTEM_DISCARD_ALL);
+ input_system_sub_system_reg_store(ID,
+ GPREGS_UNIT0_ID,
+ HIVE_ISYS_GPREG_MULTICAST_B_IDX,
+ INPUT_SYSTEM_DISCARD_ALL);
+ input_system_sub_system_reg_store(ID,
+ GPREGS_UNIT0_ID,
+ HIVE_ISYS_GPREG_MULTICAST_C_IDX,
+ INPUT_SYSTEM_DISCARD_ALL);
+
+ // Reset stream mux.
+ input_system_sub_system_reg_store(ID,
+ GPREGS_UNIT0_ID,
+ HIVE_ISYS_GPREG_MUX_IDX,
+ N_INPUT_SYSTEM_MULTIPLEX);
+
+ // Reset 3 capture units.
+ for (sub_id = CAPTURE_UNIT0_ID; sub_id < CAPTURE_UNIT0_ID + N_CAPTURE_UNIT_ID;
+ sub_id++) {
+ input_system_sub_system_reg_store(ID,
+ sub_id,
+ CAPT_INIT_REG_ID,
+ 1U << CAPT_INIT_RST_REG_BIT);
+ }
+
+ // Reset acquisition unit.
+ for (sub_id = ACQUISITION_UNIT0_ID;
+ sub_id < ACQUISITION_UNIT0_ID + N_ACQUISITION_UNIT_ID; sub_id++) {
+ input_system_sub_system_reg_store(ID,
+ sub_id,
+ ACQ_INIT_REG_ID,
+ 1U << ACQ_INIT_RST_REG_BIT);
+ }
+
+ // DMA unit reset is not needed.
+
+ // Reset controller units.
+ // NB: In future we need to keep part of ctrl_state for split capture and
+ for (sub_id = CTRL_UNIT0_ID; sub_id < CTRL_UNIT0_ID + N_CTRL_UNIT_ID;
+ sub_id++) {
+ input_system_sub_system_reg_store(ID,
+ sub_id,
+ ISYS_CTRL_INIT_REG_ID,
+ 1U); //AM: Is there any named constant?
+ }
+}
+
+// Function that resets current configuration.
+input_system_err_t input_system_configuration_reset(void)
+{
+ unsigned int i;
+
+ receiver_rst(RX0_ID);
+
+ input_system_network_rst(INPUT_SYSTEM0_ID);
+
+ gp_device_rst(GP_DEVICE0_ID);
+
+ input_switch_rst(GP_DEVICE0_ID);
+
+ //target_rst();
+
+ // Reset IRQ_CTRLs.
+
+ // Reset configuration data structures.
+ for (i = 0; i < N_CHANNELS; i++) {
+ config.ch_flags[i] = INPUT_SYSTEM_CFG_FLAG_RESET;
+ config.target_isp_flags[i] = INPUT_SYSTEM_CFG_FLAG_RESET;
+ config.target_sp_flags[i] = INPUT_SYSTEM_CFG_FLAG_RESET;
+ config.target_strm2mem_flags[i] = INPUT_SYSTEM_CFG_FLAG_RESET;
+ }
+
+ for (i = 0; i < N_CSI_PORTS; i++) {
+ config.csi_buffer_flags[i] = INPUT_SYSTEM_CFG_FLAG_RESET;
+ config.multicast[i] = INPUT_SYSTEM_DISCARD_ALL;
+ }
+
+ config.source_type_flags = INPUT_SYSTEM_CFG_FLAG_RESET;
+ config.acquisition_buffer_unique_flags = INPUT_SYSTEM_CFG_FLAG_RESET;
+ config.unallocated_ib_mem_words = IB_CAPACITY_IN_WORDS;
+ //config.acq_allocated_ib_mem_words = 0;
+
+ /* Set the start of the session configuration. */
+ config.session_flags = INPUT_SYSTEM_CFG_FLAG_REQUIRED;
+
+ return INPUT_SYSTEM_ERR_NO_ERROR;
+}
+
+// MW: Comments are good, but doxygen is required, place it at the declaration
+// Function that appends the channel to current configuration.
+static input_system_err_t input_system_configure_channel(
+ const channel_cfg_t channel)
+{
+ input_system_err_t error = INPUT_SYSTEM_ERR_NO_ERROR;
+ // Check if channel is not already configured.
+ if (config.ch_flags[channel.ch_id] & INPUT_SYSTEM_CFG_FLAG_SET) {
+ return INPUT_SYSTEM_ERR_CHANNEL_ALREADY_SET;
+ } else {
+ switch (channel.source_type) {
+ case INPUT_SYSTEM_SOURCE_SENSOR:
+ error = input_system_configure_channel_sensor(channel);
+ break;
+ case INPUT_SYSTEM_SOURCE_PRBS:
+ case INPUT_SYSTEM_SOURCE_FIFO:
+ default:
+ return INPUT_SYSTEM_ERR_PARAMETER_NOT_SUPPORTED;
+ }
+
+ if (error != INPUT_SYSTEM_ERR_NO_ERROR) return error;
+ // Input switch channel configurations must be combined in united config.
+ config.input_switch_cfg.hsync_data_reg[channel.source_cfg.csi_cfg.csi_port * 2]
+ =
+ channel.target_cfg.input_switch_channel_cfg.hsync_data_reg[0];
+ config.input_switch_cfg.hsync_data_reg[channel.source_cfg.csi_cfg.csi_port * 2 +
+ 1] =
+ channel.target_cfg.input_switch_channel_cfg.hsync_data_reg[1];
+ config.input_switch_cfg.vsync_data_reg |=
+ (channel.target_cfg.input_switch_channel_cfg.vsync_data_reg & 0x7) <<
+ (channel.source_cfg.csi_cfg.csi_port * 3);
+
+ // Other targets are just copied and marked as set.
+ config.target_isp[channel.source_cfg.csi_cfg.csi_port] =
+ channel.target_cfg.target_isp_cfg;
+ config.target_sp[channel.source_cfg.csi_cfg.csi_port] =
+ channel.target_cfg.target_sp_cfg;
+ config.target_strm2mem[channel.source_cfg.csi_cfg.csi_port] =
+ channel.target_cfg.target_strm2mem_cfg;
+ config.target_isp_flags[channel.source_cfg.csi_cfg.csi_port] |=
+ INPUT_SYSTEM_CFG_FLAG_SET;
+ config.target_sp_flags[channel.source_cfg.csi_cfg.csi_port] |=
+ INPUT_SYSTEM_CFG_FLAG_SET;
+ config.target_strm2mem_flags[channel.source_cfg.csi_cfg.csi_port] |=
+ INPUT_SYSTEM_CFG_FLAG_SET;
+
+ config.ch_flags[channel.ch_id] = INPUT_SYSTEM_CFG_FLAG_SET;
+ }
+ return INPUT_SYSTEM_ERR_NO_ERROR;
+}
+
+// Function that partitions input buffer space with determining addresses.
+static input_system_err_t input_buffer_configuration(void)
+{
+ u32 current_address = 0;
+ u32 unallocated_memory = IB_CAPACITY_IN_WORDS;
+
+ isp2400_ib_buffer_t candidate_buffer_acq = IB_BUFFER_NULL;
+ u32 size_requested;
+ input_system_config_flags_t acq_already_specified = INPUT_SYSTEM_CFG_FLAG_RESET;
+ input_system_csi_port_t port;
+
+ for (port = INPUT_SYSTEM_PORT_A; port < N_INPUT_SYSTEM_PORTS; port++) {
+ csi_cfg_t source = config.csi_value[port];//.csi_cfg;
+
+ if (config.csi_flags[port] & INPUT_SYSTEM_CFG_FLAG_SET) {
+ // Check and set csi buffer in input buffer.
+ switch (source.buffering_mode) {
+ case INPUT_SYSTEM_FIFO_CAPTURE:
+ case INPUT_SYSTEM_XMEM_ACQUIRE:
+ config.csi_buffer_flags[port] =
+ INPUT_SYSTEM_CFG_FLAG_BLOCKED; // Well, not used.
+ break;
+
+ case INPUT_SYSTEM_FIFO_CAPTURE_WITH_COUNTING:
+ case INPUT_SYSTEM_SRAM_BUFFERING:
+ case INPUT_SYSTEM_XMEM_BUFFERING:
+ case INPUT_SYSTEM_XMEM_CAPTURE:
+ size_requested = source.csi_buffer.mem_reg_size *
+ source.csi_buffer.nof_mem_regs;
+ if (source.csi_buffer.mem_reg_size > 0
+ && source.csi_buffer.nof_mem_regs > 0
+ && size_requested <= unallocated_memory
+ ) {
+ config.csi_buffer[port].mem_reg_addr = current_address;
+ config.csi_buffer[port].mem_reg_size = source.csi_buffer.mem_reg_size;
+ config.csi_buffer[port].nof_mem_regs = source.csi_buffer.nof_mem_regs;
+ current_address += size_requested;
+ unallocated_memory -= size_requested;
+ config.csi_buffer_flags[port] = INPUT_SYSTEM_CFG_FLAG_SET;
+ } else {
+ config.csi_buffer_flags[port] |= INPUT_SYSTEM_CFG_FLAG_CONFLICT;
+ return INPUT_SYSTEM_ERR_CONFLICT_ON_RESOURCE;
+ }
+ break;
+
+ default:
+ config.csi_buffer_flags[port] |= INPUT_SYSTEM_CFG_FLAG_CONFLICT;
+ return INPUT_SYSTEM_ERR_PARAMETER_NOT_SUPPORTED;
+ }
+
+ // Check acquisition buffer specified but set it later since it has to be unique.
+ switch (source.buffering_mode) {
+ case INPUT_SYSTEM_FIFO_CAPTURE:
+ case INPUT_SYSTEM_SRAM_BUFFERING:
+ case INPUT_SYSTEM_XMEM_CAPTURE:
+ // Nothing to do.
+ break;
+
+ case INPUT_SYSTEM_FIFO_CAPTURE_WITH_COUNTING:
+ case INPUT_SYSTEM_XMEM_BUFFERING:
+ case INPUT_SYSTEM_XMEM_ACQUIRE:
+ if (acq_already_specified == INPUT_SYSTEM_CFG_FLAG_RESET) {
+ size_requested = source.acquisition_buffer.mem_reg_size
+ * source.acquisition_buffer.nof_mem_regs;
+ if (source.acquisition_buffer.mem_reg_size > 0
+ && source.acquisition_buffer.nof_mem_regs > 0
+ && size_requested <= unallocated_memory
+ ) {
+ candidate_buffer_acq = source.acquisition_buffer;
+ acq_already_specified = INPUT_SYSTEM_CFG_FLAG_SET;
+ }
+ } else {
+ // Check if specified acquisition buffer is the same as specified before.
+ if (source.acquisition_buffer.mem_reg_size != candidate_buffer_acq.mem_reg_size
+ || source.acquisition_buffer.nof_mem_regs != candidate_buffer_acq.nof_mem_regs
+ ) {
+ config.acquisition_buffer_unique_flags |= INPUT_SYSTEM_CFG_FLAG_CONFLICT;
+ return INPUT_SYSTEM_ERR_CONFLICT_ON_RESOURCE;
+ }
+ }
+ break;
+
+ default:
+ return INPUT_SYSTEM_ERR_PARAMETER_NOT_SUPPORTED;
+ }
+ } else {
+ config.csi_buffer_flags[port] = INPUT_SYSTEM_CFG_FLAG_BLOCKED;
+ }
+ } // end of for ( port )
+
+ // Set the acquisition buffer at the end.
+ size_requested = candidate_buffer_acq.mem_reg_size *
+ candidate_buffer_acq.nof_mem_regs;
+ if (acq_already_specified == INPUT_SYSTEM_CFG_FLAG_SET
+ && size_requested <= unallocated_memory) {
+ config.acquisition_buffer_unique.mem_reg_addr = current_address;
+ config.acquisition_buffer_unique.mem_reg_size =
+ candidate_buffer_acq.mem_reg_size;
+ config.acquisition_buffer_unique.nof_mem_regs =
+ candidate_buffer_acq.nof_mem_regs;
+ current_address += size_requested;
+ unallocated_memory -= size_requested;
+ config.acquisition_buffer_unique_flags = INPUT_SYSTEM_CFG_FLAG_SET;
+
+ assert(current_address <= IB_CAPACITY_IN_WORDS);
+ }
+
+ return INPUT_SYSTEM_ERR_NO_ERROR;
+}
+
+static void capture_unit_configure(
+ const input_system_ID_t ID,
+ const sub_system_ID_t sub_id,
+ const isp2400_ib_buffer_t *const cfg)
+{
+ assert(ID < N_INPUT_SYSTEM_ID);
+ assert(/*(sub_id >= CAPTURE_UNIT0_ID) &&*/ (sub_id <=
+ CAPTURE_UNIT2_ID)); // Commented part is always true.
+ assert(cfg);
+
+ input_system_sub_system_reg_store(ID,
+ sub_id,
+ CAPT_START_ADDR_REG_ID,
+ cfg->mem_reg_addr);
+ input_system_sub_system_reg_store(ID,
+ sub_id,
+ CAPT_MEM_REGION_SIZE_REG_ID,
+ cfg->mem_reg_size);
+ input_system_sub_system_reg_store(ID,
+ sub_id,
+ CAPT_NUM_MEM_REGIONS_REG_ID,
+ cfg->nof_mem_regs);
+}
+
+static void acquisition_unit_configure(
+ const input_system_ID_t ID,
+ const sub_system_ID_t sub_id,
+ const isp2400_ib_buffer_t *const cfg)
+{
+ assert(ID < N_INPUT_SYSTEM_ID);
+ assert(sub_id == ACQUISITION_UNIT0_ID);
+ assert(cfg);
+
+ input_system_sub_system_reg_store(ID,
+ sub_id,
+ ACQ_START_ADDR_REG_ID,
+ cfg->mem_reg_addr);
+ input_system_sub_system_reg_store(ID,
+ sub_id,
+ ACQ_NUM_MEM_REGIONS_REG_ID,
+ cfg->nof_mem_regs);
+ input_system_sub_system_reg_store(ID,
+ sub_id,
+ ACQ_MEM_REGION_SIZE_REG_ID,
+ cfg->mem_reg_size);
+}
+
+static void ctrl_unit_configure(
+ const input_system_ID_t ID,
+ const sub_system_ID_t sub_id,
+ const ctrl_unit_cfg_t *const cfg)
+{
+ assert(ID < N_INPUT_SYSTEM_ID);
+ assert(sub_id == CTRL_UNIT0_ID);
+ assert(cfg);
+
+ input_system_sub_system_reg_store(ID,
+ sub_id,
+ ISYS_CTRL_CAPT_START_ADDR_A_REG_ID,
+ cfg->buffer_mipi[CAPTURE_UNIT0_ID].mem_reg_addr);
+ input_system_sub_system_reg_store(ID,
+ sub_id,
+ ISYS_CTRL_CAPT_MEM_REGION_SIZE_A_REG_ID,
+ cfg->buffer_mipi[CAPTURE_UNIT0_ID].mem_reg_size);
+ input_system_sub_system_reg_store(ID,
+ sub_id,
+ ISYS_CTRL_CAPT_NUM_MEM_REGIONS_A_REG_ID,
+ cfg->buffer_mipi[CAPTURE_UNIT0_ID].nof_mem_regs);
+
+ input_system_sub_system_reg_store(ID,
+ sub_id,
+ ISYS_CTRL_CAPT_START_ADDR_B_REG_ID,
+ cfg->buffer_mipi[CAPTURE_UNIT1_ID].mem_reg_addr);
+ input_system_sub_system_reg_store(ID,
+ sub_id,
+ ISYS_CTRL_CAPT_MEM_REGION_SIZE_B_REG_ID,
+ cfg->buffer_mipi[CAPTURE_UNIT1_ID].mem_reg_size);
+ input_system_sub_system_reg_store(ID,
+ sub_id,
+ ISYS_CTRL_CAPT_NUM_MEM_REGIONS_B_REG_ID,
+ cfg->buffer_mipi[CAPTURE_UNIT1_ID].nof_mem_regs);
+
+ input_system_sub_system_reg_store(ID,
+ sub_id,
+ ISYS_CTRL_CAPT_START_ADDR_C_REG_ID,
+ cfg->buffer_mipi[CAPTURE_UNIT2_ID].mem_reg_addr);
+ input_system_sub_system_reg_store(ID,
+ sub_id,
+ ISYS_CTRL_CAPT_MEM_REGION_SIZE_C_REG_ID,
+ cfg->buffer_mipi[CAPTURE_UNIT2_ID].mem_reg_size);
+ input_system_sub_system_reg_store(ID,
+ sub_id,
+ ISYS_CTRL_CAPT_NUM_MEM_REGIONS_C_REG_ID,
+ cfg->buffer_mipi[CAPTURE_UNIT2_ID].nof_mem_regs);
+
+ input_system_sub_system_reg_store(ID,
+ sub_id,
+ ISYS_CTRL_ACQ_START_ADDR_REG_ID,
+ cfg->buffer_acquire[ACQUISITION_UNIT0_ID - ACQUISITION_UNIT0_ID].mem_reg_addr);
+ input_system_sub_system_reg_store(ID,
+ sub_id,
+ ISYS_CTRL_ACQ_MEM_REGION_SIZE_REG_ID,
+ cfg->buffer_acquire[ACQUISITION_UNIT0_ID - ACQUISITION_UNIT0_ID].mem_reg_size);
+ input_system_sub_system_reg_store(ID,
+ sub_id,
+ ISYS_CTRL_ACQ_NUM_MEM_REGIONS_REG_ID,
+ cfg->buffer_acquire[ACQUISITION_UNIT0_ID - ACQUISITION_UNIT0_ID].nof_mem_regs);
+ input_system_sub_system_reg_store(ID,
+ sub_id,
+ ISYS_CTRL_CAPT_RESERVE_ONE_MEM_REGION_REG_ID,
+ 0);
+}
+
+static void input_system_network_configure(
+ const input_system_ID_t ID,
+ const input_system_network_cfg_t *const cfg)
+{
+ u32 sub_id;
+
+ assert(ID < N_INPUT_SYSTEM_ID);
+ assert(cfg);
+
+ // Set all 3 multicasts.
+ input_system_sub_system_reg_store(ID,
+ GPREGS_UNIT0_ID,
+ HIVE_ISYS_GPREG_MULTICAST_A_IDX,
+ cfg->multicast_cfg[CAPTURE_UNIT0_ID]);
+ input_system_sub_system_reg_store(ID,
+ GPREGS_UNIT0_ID,
+ HIVE_ISYS_GPREG_MULTICAST_B_IDX,
+ cfg->multicast_cfg[CAPTURE_UNIT1_ID]);
+ input_system_sub_system_reg_store(ID,
+ GPREGS_UNIT0_ID,
+ HIVE_ISYS_GPREG_MULTICAST_C_IDX,
+ cfg->multicast_cfg[CAPTURE_UNIT2_ID]);
+
+ // Set stream mux.
+ input_system_sub_system_reg_store(ID,
+ GPREGS_UNIT0_ID,
+ HIVE_ISYS_GPREG_MUX_IDX,
+ cfg->mux_cfg);
+
+ // Set capture units.
+ for (sub_id = CAPTURE_UNIT0_ID; sub_id < CAPTURE_UNIT0_ID + N_CAPTURE_UNIT_ID;
+ sub_id++) {
+ capture_unit_configure(ID,
+ sub_id,
+ &cfg->ctrl_unit_cfg[ID].buffer_mipi[sub_id - CAPTURE_UNIT0_ID]);
+ }
+
+ // Set acquisition units.
+ for (sub_id = ACQUISITION_UNIT0_ID;
+ sub_id < ACQUISITION_UNIT0_ID + N_ACQUISITION_UNIT_ID; sub_id++) {
+ acquisition_unit_configure(ID,
+ sub_id,
+ &cfg->ctrl_unit_cfg[sub_id - ACQUISITION_UNIT0_ID].buffer_acquire[sub_id -
+ ACQUISITION_UNIT0_ID]);
+ }
+
+ // No DMA configuration needed. Ctrl_unit will fully control it.
+
+ // Set controller units.
+ for (sub_id = CTRL_UNIT0_ID; sub_id < CTRL_UNIT0_ID + N_CTRL_UNIT_ID;
+ sub_id++) {
+ ctrl_unit_configure(ID,
+ sub_id,
+ &cfg->ctrl_unit_cfg[sub_id - CTRL_UNIT0_ID]);
+ }
+}
+
+static input_system_err_t configuration_to_registers(void)
+{
+ input_system_network_cfg_t input_system_network_cfg;
+ int i;
+
+ assert(config.source_type_flags & INPUT_SYSTEM_CFG_FLAG_SET);
+
+ switch (config.source_type) {
+ case INPUT_SYSTEM_SOURCE_SENSOR:
+
+ // Determine stream multicasts setting based on the mode of csi_cfg_t.
+ // AM: This should be moved towards earlier function call, e.g. in
+ // the commit function.
+ for (i = MIPI_PORT0_ID; i < N_MIPI_PORT_ID; i++) {
+ if (config.csi_flags[i] & INPUT_SYSTEM_CFG_FLAG_SET) {
+ switch (config.csi_value[i].buffering_mode) {
+ case INPUT_SYSTEM_FIFO_CAPTURE:
+ config.multicast[i] = INPUT_SYSTEM_CSI_BACKEND;
+ break;
+
+ case INPUT_SYSTEM_XMEM_CAPTURE:
+ case INPUT_SYSTEM_SRAM_BUFFERING:
+ case INPUT_SYSTEM_XMEM_BUFFERING:
+ config.multicast[i] = INPUT_SYSTEM_INPUT_BUFFER;
+ break;
+
+ case INPUT_SYSTEM_FIFO_CAPTURE_WITH_COUNTING:
+ config.multicast[i] = INPUT_SYSTEM_MULTICAST;
+ break;
+
+ case INPUT_SYSTEM_XMEM_ACQUIRE:
+ config.multicast[i] = INPUT_SYSTEM_DISCARD_ALL;
+ break;
+
+ default:
+ config.multicast[i] = INPUT_SYSTEM_DISCARD_ALL;
+ return INPUT_SYSTEM_ERR_PARAMETER_NOT_SUPPORTED;
+ //break;
+ }
+ } else {
+ config.multicast[i] = INPUT_SYSTEM_DISCARD_ALL;
+ }
+
+ input_system_network_cfg.multicast_cfg[i] = config.multicast[i];
+
+ } // for
+
+ input_system_network_cfg.mux_cfg = config.multiplexer;
+
+ input_system_network_cfg.ctrl_unit_cfg[CTRL_UNIT0_ID -
+ CTRL_UNIT0_ID].buffer_mipi[CAPTURE_UNIT0_ID] =
+ config.csi_buffer[MIPI_PORT0_ID];
+ input_system_network_cfg.ctrl_unit_cfg[CTRL_UNIT0_ID -
+ CTRL_UNIT0_ID].buffer_mipi[CAPTURE_UNIT1_ID] =
+ config.csi_buffer[MIPI_PORT1_ID];
+ input_system_network_cfg.ctrl_unit_cfg[CTRL_UNIT0_ID -
+ CTRL_UNIT0_ID].buffer_mipi[CAPTURE_UNIT2_ID] =
+ config.csi_buffer[MIPI_PORT2_ID];
+ input_system_network_cfg.ctrl_unit_cfg[CTRL_UNIT0_ID -
+ CTRL_UNIT0_ID].buffer_acquire[ACQUISITION_UNIT0_ID -
+ ACQUISITION_UNIT0_ID] =
+ config.acquisition_buffer_unique;
+
+ // First set input network around CSI receiver.
+ input_system_network_configure(INPUT_SYSTEM0_ID, &input_system_network_cfg);
+
+ // Set the CSI receiver.
+ //...
+ break;
+
+ case INPUT_SYSTEM_SOURCE_PRBS:
+ case INPUT_SYSTEM_SOURCE_FIFO:
+ break;
+
+ default:
+ return INPUT_SYSTEM_ERR_PARAMETER_NOT_SUPPORTED;
+
+ } // end of switch (source_type)
+
+ // Set input selector.
+ input_selector_cfg_for_sensor(GP_DEVICE0_ID);
+
+ // Set input switch.
+ input_switch_cfg(GP_DEVICE0_ID, &config.input_switch_cfg);
+
+ // Set input formatters.
+ // AM: IF are set dynamically.
+ return INPUT_SYSTEM_ERR_NO_ERROR;
+}
+
+// Function that applies the whole configuration.
+input_system_err_t input_system_configuration_commit(void)
+{
+ // The last configuration step is to configure the input buffer.
+ input_system_err_t error = input_buffer_configuration();
+
+ if (error != INPUT_SYSTEM_ERR_NO_ERROR) {
+ return error;
+ }
+
+ // Translate the whole configuration into registers.
+ error = configuration_to_registers();
+ if (error != INPUT_SYSTEM_ERR_NO_ERROR) {
+ return error;
+ }
+
+ // Translate the whole configuration into ctrl commands etc.
+
+ return INPUT_SYSTEM_ERR_NO_ERROR;
+}
+
+// FIFO
+
+input_system_err_t input_system_csi_fifo_channel_cfg(
+ u32 ch_id,
+ input_system_csi_port_t port,
+ backend_channel_cfg_t backend_ch,
+ target_cfg2400_t target
+)
+{
+ channel_cfg_t channel;
+
+ channel.ch_id = ch_id;
+ channel.backend_ch = backend_ch;
+ channel.source_type = INPUT_SYSTEM_SOURCE_SENSOR;
+ //channel.source
+ channel.source_cfg.csi_cfg.csi_port = port;
+ channel.source_cfg.csi_cfg.buffering_mode = INPUT_SYSTEM_FIFO_CAPTURE;
+ channel.source_cfg.csi_cfg.csi_buffer = IB_BUFFER_NULL;
+ channel.source_cfg.csi_cfg.acquisition_buffer = IB_BUFFER_NULL;
+ channel.source_cfg.csi_cfg.nof_xmem_buffers = 0;
+
+ channel.target_cfg = target;
+ return input_system_configure_channel(channel);
+}
+
+input_system_err_t input_system_csi_fifo_channel_with_counting_cfg(
+ u32 ch_id,
+ u32 nof_frames,
+ input_system_csi_port_t port,
+ backend_channel_cfg_t backend_ch,
+ u32 csi_mem_reg_size,
+ u32 csi_nof_mem_regs,
+ target_cfg2400_t target
+)
+{
+ channel_cfg_t channel;
+
+ channel.ch_id = ch_id;
+ channel.backend_ch = backend_ch;
+ channel.source_type = INPUT_SYSTEM_SOURCE_SENSOR;
+ //channel.source
+ channel.source_cfg.csi_cfg.csi_port = port;
+ channel.source_cfg.csi_cfg.buffering_mode =
+ INPUT_SYSTEM_FIFO_CAPTURE_WITH_COUNTING;
+ channel.source_cfg.csi_cfg.csi_buffer.mem_reg_size = csi_mem_reg_size;
+ channel.source_cfg.csi_cfg.csi_buffer.nof_mem_regs = csi_nof_mem_regs;
+ channel.source_cfg.csi_cfg.csi_buffer.mem_reg_addr = 0;
+ channel.source_cfg.csi_cfg.acquisition_buffer = IB_BUFFER_NULL;
+ channel.source_cfg.csi_cfg.nof_xmem_buffers = nof_frames;
+
+ channel.target_cfg = target;
+ return input_system_configure_channel(channel);
+}
+
+// SRAM
+
+input_system_err_t input_system_csi_sram_channel_cfg(
+ u32 ch_id,
+ input_system_csi_port_t port,
+ backend_channel_cfg_t backend_ch,
+ u32 csi_mem_reg_size,
+ u32 csi_nof_mem_regs,
+ // uint32_t acq_mem_reg_size,
+ // uint32_t acq_nof_mem_regs,
+ target_cfg2400_t target
+)
+{
+ channel_cfg_t channel;
+
+ channel.ch_id = ch_id;
+ channel.backend_ch = backend_ch;
+ channel.source_type = INPUT_SYSTEM_SOURCE_SENSOR;
+ //channel.source
+ channel.source_cfg.csi_cfg.csi_port = port;
+ channel.source_cfg.csi_cfg.buffering_mode = INPUT_SYSTEM_SRAM_BUFFERING;
+ channel.source_cfg.csi_cfg.csi_buffer.mem_reg_size = csi_mem_reg_size;
+ channel.source_cfg.csi_cfg.csi_buffer.nof_mem_regs = csi_nof_mem_regs;
+ channel.source_cfg.csi_cfg.csi_buffer.mem_reg_addr = 0;
+ channel.source_cfg.csi_cfg.acquisition_buffer = IB_BUFFER_NULL;
+ channel.source_cfg.csi_cfg.nof_xmem_buffers = 0;
+
+ channel.target_cfg = target;
+ return input_system_configure_channel(channel);
+}
+
+//XMEM
+
+// Collects all parameters and puts them in channel_cfg_t.
+input_system_err_t input_system_csi_xmem_channel_cfg(
+ u32 ch_id,
+ input_system_csi_port_t port,
+ backend_channel_cfg_t backend_ch,
+ u32 csi_mem_reg_size,
+ u32 csi_nof_mem_regs,
+ u32 acq_mem_reg_size,
+ u32 acq_nof_mem_regs,
+ target_cfg2400_t target,
+ uint32_t nof_xmem_buffers
+)
+{
+ channel_cfg_t channel;
+
+ channel.ch_id = ch_id;
+ channel.backend_ch = backend_ch;
+ channel.source_type = INPUT_SYSTEM_SOURCE_SENSOR;
+ //channel.source
+ channel.source_cfg.csi_cfg.csi_port = port;
+ channel.source_cfg.csi_cfg.buffering_mode = INPUT_SYSTEM_XMEM_BUFFERING;
+ channel.source_cfg.csi_cfg.csi_buffer.mem_reg_size = csi_mem_reg_size;
+ channel.source_cfg.csi_cfg.csi_buffer.nof_mem_regs = csi_nof_mem_regs;
+ channel.source_cfg.csi_cfg.csi_buffer.mem_reg_addr = 0;
+ channel.source_cfg.csi_cfg.acquisition_buffer.mem_reg_size = acq_mem_reg_size;
+ channel.source_cfg.csi_cfg.acquisition_buffer.nof_mem_regs = acq_nof_mem_regs;
+ channel.source_cfg.csi_cfg.acquisition_buffer.mem_reg_addr = 0;
+ channel.source_cfg.csi_cfg.nof_xmem_buffers = nof_xmem_buffers;
+
+ channel.target_cfg = target;
+ return input_system_configure_channel(channel);
+}
+
+input_system_err_t input_system_csi_xmem_acquire_only_channel_cfg(
+ u32 ch_id,
+ u32 nof_frames,
+ input_system_csi_port_t port,
+ backend_channel_cfg_t backend_ch,
+ u32 acq_mem_reg_size,
+ u32 acq_nof_mem_regs,
+ target_cfg2400_t target)
+{
+ channel_cfg_t channel;
+
+ channel.ch_id = ch_id;
+ channel.backend_ch = backend_ch;
+ channel.source_type = INPUT_SYSTEM_SOURCE_SENSOR;
+ //channel.source
+ channel.source_cfg.csi_cfg.csi_port = port;
+ channel.source_cfg.csi_cfg.buffering_mode = INPUT_SYSTEM_XMEM_ACQUIRE;
+ channel.source_cfg.csi_cfg.csi_buffer = IB_BUFFER_NULL;
+ channel.source_cfg.csi_cfg.acquisition_buffer.mem_reg_size = acq_mem_reg_size;
+ channel.source_cfg.csi_cfg.acquisition_buffer.nof_mem_regs = acq_nof_mem_regs;
+ channel.source_cfg.csi_cfg.acquisition_buffer.mem_reg_addr = 0;
+ channel.source_cfg.csi_cfg.nof_xmem_buffers = nof_frames;
+
+ channel.target_cfg = target;
+ return input_system_configure_channel(channel);
+}
+
+input_system_err_t input_system_csi_xmem_capture_only_channel_cfg(
+ u32 ch_id,
+ u32 nof_frames,
+ input_system_csi_port_t port,
+ u32 csi_mem_reg_size,
+ u32 csi_nof_mem_regs,
+ u32 acq_mem_reg_size,
+ u32 acq_nof_mem_regs,
+ target_cfg2400_t target)
+{
+ channel_cfg_t channel;
+
+ channel.ch_id = ch_id;
+ //channel.backend_ch = backend_ch;
+ channel.source_type = INPUT_SYSTEM_SOURCE_SENSOR;
+ //channel.source
+ channel.source_cfg.csi_cfg.csi_port = port;
+ //channel.source_cfg.csi_cfg.backend_ch = backend_ch;
+ channel.source_cfg.csi_cfg.buffering_mode = INPUT_SYSTEM_XMEM_CAPTURE;
+ channel.source_cfg.csi_cfg.csi_buffer.mem_reg_size = csi_mem_reg_size;
+ channel.source_cfg.csi_cfg.csi_buffer.nof_mem_regs = csi_nof_mem_regs;
+ channel.source_cfg.csi_cfg.csi_buffer.mem_reg_addr = 0;
+ channel.source_cfg.csi_cfg.acquisition_buffer.mem_reg_size = acq_mem_reg_size;
+ channel.source_cfg.csi_cfg.acquisition_buffer.nof_mem_regs = acq_nof_mem_regs;
+ channel.source_cfg.csi_cfg.acquisition_buffer.mem_reg_addr = 0;
+ channel.source_cfg.csi_cfg.nof_xmem_buffers = nof_frames;
+
+ channel.target_cfg = target;
+ return input_system_configure_channel(channel);
+}
+
+// Non - CSI
+
+input_system_err_t input_system_prbs_channel_cfg(
+ u32 ch_id,
+ u32 nof_frames,//not used yet
+ u32 seed,
+ u32 sync_gen_width,
+ u32 sync_gen_height,
+ u32 sync_gen_hblank_cycles,
+ u32 sync_gen_vblank_cycles,
+ target_cfg2400_t target
+)
+{
+ channel_cfg_t channel;
+
+ (void)nof_frames;
+
+ channel.ch_id = ch_id;
+ channel.source_type = INPUT_SYSTEM_SOURCE_PRBS;
+
+ channel.source_cfg.prbs_cfg.seed = seed;
+ channel.source_cfg.prbs_cfg.sync_gen_cfg.width = sync_gen_width;
+ channel.source_cfg.prbs_cfg.sync_gen_cfg.height = sync_gen_height;
+ channel.source_cfg.prbs_cfg.sync_gen_cfg.hblank_cycles = sync_gen_hblank_cycles;
+ channel.source_cfg.prbs_cfg.sync_gen_cfg.vblank_cycles = sync_gen_vblank_cycles;
+
+ channel.target_cfg = target;
+
+ return input_system_configure_channel(channel);
+}
+
+// MW: Don't use system specific names, (even in system specific files) "cfg2400" -> cfg
+input_system_err_t input_system_gpfifo_channel_cfg(
+ u32 ch_id,
+ u32 nof_frames, //not used yet
+
+ target_cfg2400_t target)
+{
+ channel_cfg_t channel;
+
+ (void)nof_frames;
+
+ channel.ch_id = ch_id;
+ channel.source_type = INPUT_SYSTEM_SOURCE_FIFO;
+
+ channel.target_cfg = target;
+ return input_system_configure_channel(channel);
+}
+
+///////////////////////////////////////////////////////////////////////////
+//
+// Private specialized functions for channel setting.
+//
+///////////////////////////////////////////////////////////////////////////
+
+// Fills the parameters to config.csi_value[port]
+static input_system_err_t input_system_configure_channel_sensor(
+ const channel_cfg_t channel)
+{
+ const u32 port = channel.source_cfg.csi_cfg.csi_port;
+ input_system_err_t status = INPUT_SYSTEM_ERR_NO_ERROR;
+
+ input_system_multiplex_t mux;
+
+ if (port >= N_INPUT_SYSTEM_PORTS)
+ return INPUT_SYSTEM_ERR_GENERIC;
+
+ //check if port > N_INPUT_SYSTEM_MULTIPLEX
+
+ status = set_source_type(&config.source_type, channel.source_type,
+ &config.source_type_flags);
+ if (status != INPUT_SYSTEM_ERR_NO_ERROR) return status;
+
+ // Check for conflicts on source (implicitly on multicast, capture unit and input buffer).
+
+ status = set_csi_cfg(&config.csi_value[port], &channel.source_cfg.csi_cfg,
+ &config.csi_flags[port]);
+ if (status != INPUT_SYSTEM_ERR_NO_ERROR) return status;
+
+ switch (channel.source_cfg.csi_cfg.buffering_mode) {
+ case INPUT_SYSTEM_FIFO_CAPTURE:
+
+ // Check for conflicts on mux.
+ mux = INPUT_SYSTEM_MIPI_PORT0 + port;
+ status = input_system_multiplexer_cfg(&config.multiplexer, mux,
+ &config.multiplexer_flags);
+ if (status != INPUT_SYSTEM_ERR_NO_ERROR) return status;
+ config.multicast[port] = INPUT_SYSTEM_CSI_BACKEND;
+
+ // Shared resource, so it should be blocked.
+ //config.mux_flags |= INPUT_SYSTEM_CFG_FLAG_BLOCKED;
+ //config.csi_buffer_flags[port] |= INPUT_SYSTEM_CFG_FLAG_BLOCKED;
+ //config.acquisition_buffer_unique_flags |= INPUT_SYSTEM_CFG_FLAG_BLOCKED;
+
+ break;
+ case INPUT_SYSTEM_SRAM_BUFFERING:
+
+ // Check for conflicts on mux.
+ mux = INPUT_SYSTEM_ACQUISITION_UNIT;
+ status = input_system_multiplexer_cfg(&config.multiplexer, mux,
+ &config.multiplexer_flags);
+ if (status != INPUT_SYSTEM_ERR_NO_ERROR) return status;
+ config.multicast[port] = INPUT_SYSTEM_INPUT_BUFFER;
+
+ // Shared resource, so it should be blocked.
+ //config.mux_flags |= INPUT_SYSTEM_CFG_FLAG_BLOCKED;
+ //config.csi_buffer_flags[port] |= INPUT_SYSTEM_CFG_FLAG_BLOCKED;
+ //config.acquisition_buffer_unique_flags |= INPUT_SYSTEM_CFG_FLAG_BLOCKED;
+
+ break;
+ case INPUT_SYSTEM_XMEM_BUFFERING:
+
+ // Check for conflicts on mux.
+ mux = INPUT_SYSTEM_ACQUISITION_UNIT;
+ status = input_system_multiplexer_cfg(&config.multiplexer, mux,
+ &config.multiplexer_flags);
+ if (status != INPUT_SYSTEM_ERR_NO_ERROR) return status;
+ config.multicast[port] = INPUT_SYSTEM_INPUT_BUFFER;
+
+ // Shared resource, so it should be blocked.
+ //config.mux_flags |= INPUT_SYSTEM_CFG_FLAG_BLOCKED;
+ //config.csi_buffer_flags[port] |= INPUT_SYSTEM_CFG_FLAG_BLOCKED;
+ //config.acquisition_buffer_unique_flags |= INPUT_SYSTEM_CFG_FLAG_BLOCKED;
+
+ break;
+ case INPUT_SYSTEM_FIFO_CAPTURE_WITH_COUNTING:
+ case INPUT_SYSTEM_XMEM_CAPTURE:
+ case INPUT_SYSTEM_XMEM_ACQUIRE:
+ default:
+ return INPUT_SYSTEM_ERR_PARAMETER_NOT_SUPPORTED;
+ }
+
+ return INPUT_SYSTEM_ERR_NO_ERROR;
+}
+
+// Test flags and set structure.
+static input_system_err_t set_source_type(
+ input_system_source_t *const lhs,
+ const input_system_source_t rhs,
+ input_system_config_flags_t *const flags)
+{
+ // MW: Not enough asserts
+ assert(lhs);
+ assert(flags);
+
+ if ((*flags) & INPUT_SYSTEM_CFG_FLAG_BLOCKED) {
+ *flags |= INPUT_SYSTEM_CFG_FLAG_CONFLICT;
+ return INPUT_SYSTEM_ERR_CONFLICT_ON_RESOURCE;
+ }
+
+ if ((*flags) & INPUT_SYSTEM_CFG_FLAG_SET) {
+ // Check for consistency with already set value.
+ if ((*lhs) == (rhs)) {
+ return INPUT_SYSTEM_ERR_NO_ERROR;
+ } else {
+ *flags |= INPUT_SYSTEM_CFG_FLAG_CONFLICT;
+ return INPUT_SYSTEM_ERR_CONFLICT_ON_RESOURCE;
+ }
+ }
+ // Check the value (individually).
+ if (rhs >= N_INPUT_SYSTEM_SOURCE) {
+ *flags |= INPUT_SYSTEM_CFG_FLAG_CONFLICT;
+ return INPUT_SYSTEM_ERR_CONFLICT_ON_RESOURCE;
+ }
+ // Set the value.
+ *lhs = rhs;
+
+ *flags |= INPUT_SYSTEM_CFG_FLAG_SET;
+ return INPUT_SYSTEM_ERR_NO_ERROR;
+}
+
+// Test flags and set structure.
+static input_system_err_t set_csi_cfg(
+ csi_cfg_t *const lhs,
+ const csi_cfg_t *const rhs,
+ input_system_config_flags_t *const flags)
+{
+ u32 memory_required;
+ u32 acq_memory_required;
+
+ assert(lhs);
+ assert(flags);
+
+ if ((*flags) & INPUT_SYSTEM_CFG_FLAG_BLOCKED) {
+ *flags |= INPUT_SYSTEM_CFG_FLAG_CONFLICT;
+ return INPUT_SYSTEM_ERR_CONFLICT_ON_RESOURCE;
+ }
+
+ if (*flags & INPUT_SYSTEM_CFG_FLAG_SET) {
+ // check for consistency with already set value.
+ if (/*lhs->backend_ch == rhs.backend_ch
+ &&*/ lhs->buffering_mode == rhs->buffering_mode
+ && lhs->csi_buffer.mem_reg_size == rhs->csi_buffer.mem_reg_size
+ && lhs->csi_buffer.nof_mem_regs == rhs->csi_buffer.nof_mem_regs
+ && lhs->acquisition_buffer.mem_reg_size == rhs->acquisition_buffer.mem_reg_size
+ && lhs->acquisition_buffer.nof_mem_regs == rhs->acquisition_buffer.nof_mem_regs
+ && lhs->nof_xmem_buffers == rhs->nof_xmem_buffers
+ ) {
+ return INPUT_SYSTEM_ERR_NO_ERROR;
+ } else {
+ *flags |= INPUT_SYSTEM_CFG_FLAG_CONFLICT;
+ return INPUT_SYSTEM_ERR_CONFLICT_ON_RESOURCE;
+ }
+ }
+ // Check the value (individually).
+ // no check for backend_ch
+ // no check for nof_xmem_buffers
+ memory_required = rhs->csi_buffer.mem_reg_size * rhs->csi_buffer.nof_mem_regs;
+ acq_memory_required = rhs->acquisition_buffer.mem_reg_size *
+ rhs->acquisition_buffer.nof_mem_regs;
+ if (rhs->buffering_mode >= N_INPUT_SYSTEM_BUFFERING_MODE
+ ||
+ // Check if required memory is available in input buffer (SRAM).
+ (memory_required + acq_memory_required) > config.unallocated_ib_mem_words
+
+ ) {
+ *flags |= INPUT_SYSTEM_CFG_FLAG_CONFLICT;
+ return INPUT_SYSTEM_ERR_CONFLICT_ON_RESOURCE;
+ }
+ // Set the value.
+ //lhs[port]->backend_ch = rhs.backend_ch;
+ lhs->buffering_mode = rhs->buffering_mode;
+ lhs->nof_xmem_buffers = rhs->nof_xmem_buffers;
+
+ lhs->csi_buffer.mem_reg_size = rhs->csi_buffer.mem_reg_size;
+ lhs->csi_buffer.nof_mem_regs = rhs->csi_buffer.nof_mem_regs;
+ lhs->acquisition_buffer.mem_reg_size = rhs->acquisition_buffer.mem_reg_size;
+ lhs->acquisition_buffer.nof_mem_regs = rhs->acquisition_buffer.nof_mem_regs;
+ // ALX: NB: Here we just set buffer parameters, but still not allocate it
+ // (no addresses determined). That will be done during commit.
+
+ // FIXIT: acq_memory_required is not deducted, since it can be allocated multiple times.
+ config.unallocated_ib_mem_words -= memory_required;
+//assert(config.unallocated_ib_mem_words >=0);
+ *flags |= INPUT_SYSTEM_CFG_FLAG_SET;
+ return INPUT_SYSTEM_ERR_NO_ERROR;
+}
+
+// Test flags and set structure.
+static input_system_err_t input_system_multiplexer_cfg(
+ input_system_multiplex_t *const lhs,
+ const input_system_multiplex_t rhs,
+ input_system_config_flags_t *const flags)
+{
+ assert(lhs);
+ assert(flags);
+
+ if ((*flags) & INPUT_SYSTEM_CFG_FLAG_BLOCKED) {
+ *flags |= INPUT_SYSTEM_CFG_FLAG_CONFLICT;
+ return INPUT_SYSTEM_ERR_CONFLICT_ON_RESOURCE;
+ }
+
+ if ((*flags) & INPUT_SYSTEM_CFG_FLAG_SET) {
+ // Check for consistency with already set value.
+ if ((*lhs) == (rhs)) {
+ return INPUT_SYSTEM_ERR_NO_ERROR;
+ } else {
+ *flags |= INPUT_SYSTEM_CFG_FLAG_CONFLICT;
+ return INPUT_SYSTEM_ERR_CONFLICT_ON_RESOURCE;
+ }
+ }
+ // Check the value (individually).
+ if (rhs >= N_INPUT_SYSTEM_MULTIPLEX) {
+ *flags |= INPUT_SYSTEM_CFG_FLAG_CONFLICT;
+ return INPUT_SYSTEM_ERR_PARAMETER_NOT_SUPPORTED;
+ }
+ // Set the value.
+ *lhs = rhs;
+
+ *flags |= INPUT_SYSTEM_CFG_FLAG_SET;
+ return INPUT_SYSTEM_ERR_NO_ERROR;
+}
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/irq.c b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/irq.c
new file mode 100644
index 000000000000..b66560bca625
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/irq.c
@@ -0,0 +1,419 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010-2015, Intel Corporation.
+ */
+
+#include "assert_support.h"
+#include "irq.h"
+
+#ifndef __INLINE_GP_DEVICE__
+#define __INLINE_GP_DEVICE__
+#endif
+#include "gp_device.h" /* _REG_GP_IRQ_REQUEST_ADDR */
+
+static inline void irq_wait_for_write_complete(
+ const irq_ID_t ID);
+
+static inline bool any_irq_channel_enabled(
+ const irq_ID_t ID);
+
+static inline irq_ID_t virq_get_irq_id(const enum virq_id irq_ID,
+ unsigned int *channel_ID);
+
+#ifndef __INLINE_IRQ__
+#include "irq_private.h"
+#endif /* __INLINE_IRQ__ */
+
+static unsigned short IRQ_N_CHANNEL[N_IRQ_ID] = {
+ IRQ0_ID_N_CHANNEL,
+ IRQ1_ID_N_CHANNEL,
+ IRQ2_ID_N_CHANNEL,
+ IRQ3_ID_N_CHANNEL
+};
+
+static unsigned short IRQ_N_ID_OFFSET[N_IRQ_ID + 1] = {
+ IRQ0_ID_OFFSET,
+ IRQ1_ID_OFFSET,
+ IRQ2_ID_OFFSET,
+ IRQ3_ID_OFFSET,
+ IRQ_END_OFFSET
+};
+
+static enum virq_id IRQ_NESTING_ID[N_IRQ_ID] = {
+ N_virq_id,
+ virq_ifmt,
+ virq_isys,
+ virq_isel
+};
+
+void irq_clear_all(
+ const irq_ID_t ID)
+{
+ hrt_data mask = 0xFFFFFFFF;
+
+ assert(ID < N_IRQ_ID);
+ assert(IRQ_N_CHANNEL[ID] <= HRT_DATA_WIDTH);
+
+ if (IRQ_N_CHANNEL[ID] < HRT_DATA_WIDTH) {
+ mask = ~((~(hrt_data)0) >> IRQ_N_CHANNEL[ID]);
+ }
+
+ irq_reg_store(ID,
+ _HRT_IRQ_CONTROLLER_CLEAR_REG_IDX, mask);
+ return;
+}
+
+/*
+ * Do we want the user to be able to set the signalling method ?
+ */
+void irq_enable_channel(
+ const irq_ID_t ID,
+ const unsigned int irq_id)
+{
+ unsigned int mask = irq_reg_load(ID,
+ _HRT_IRQ_CONTROLLER_MASK_REG_IDX);
+ unsigned int enable = irq_reg_load(ID,
+ _HRT_IRQ_CONTROLLER_ENABLE_REG_IDX);
+ unsigned int edge_in = irq_reg_load(ID,
+ _HRT_IRQ_CONTROLLER_EDGE_REG_IDX);
+ unsigned int me = 1U << irq_id;
+
+ assert(ID < N_IRQ_ID);
+ assert(irq_id < IRQ_N_CHANNEL[ID]);
+
+ mask |= me;
+ enable |= me;
+ edge_in |= me; /* rising edge */
+
+ /* to avoid mishaps configuration must follow the following order */
+
+ /* mask this interrupt */
+ irq_reg_store(ID,
+ _HRT_IRQ_CONTROLLER_MASK_REG_IDX, mask & ~me);
+ /* rising edge at input */
+ irq_reg_store(ID,
+ _HRT_IRQ_CONTROLLER_EDGE_REG_IDX, edge_in);
+ /* enable interrupt to output */
+ irq_reg_store(ID,
+ _HRT_IRQ_CONTROLLER_ENABLE_REG_IDX, enable);
+ /* clear current irq only */
+ irq_reg_store(ID,
+ _HRT_IRQ_CONTROLLER_CLEAR_REG_IDX, me);
+ /* unmask interrupt from input */
+ irq_reg_store(ID,
+ _HRT_IRQ_CONTROLLER_MASK_REG_IDX, mask);
+
+ irq_wait_for_write_complete(ID);
+
+ return;
+}
+
+void irq_enable_pulse(
+ const irq_ID_t ID,
+ bool pulse)
+{
+ unsigned int edge_out = 0x0;
+
+ if (pulse) {
+ edge_out = 0xffffffff;
+ }
+ /* output is given as edge, not pulse */
+ irq_reg_store(ID,
+ _HRT_IRQ_CONTROLLER_EDGE_NOT_PULSE_REG_IDX, edge_out);
+ return;
+}
+
+void irq_disable_channel(
+ const irq_ID_t ID,
+ const unsigned int irq_id)
+{
+ unsigned int mask = irq_reg_load(ID,
+ _HRT_IRQ_CONTROLLER_MASK_REG_IDX);
+ unsigned int enable = irq_reg_load(ID,
+ _HRT_IRQ_CONTROLLER_ENABLE_REG_IDX);
+ unsigned int me = 1U << irq_id;
+
+ assert(ID < N_IRQ_ID);
+ assert(irq_id < IRQ_N_CHANNEL[ID]);
+
+ mask &= ~me;
+ enable &= ~me;
+
+ /* enable interrupt to output */
+ irq_reg_store(ID,
+ _HRT_IRQ_CONTROLLER_ENABLE_REG_IDX, enable);
+ /* unmask interrupt from input */
+ irq_reg_store(ID,
+ _HRT_IRQ_CONTROLLER_MASK_REG_IDX, mask);
+ /* clear current irq only */
+ irq_reg_store(ID,
+ _HRT_IRQ_CONTROLLER_CLEAR_REG_IDX, me);
+
+ irq_wait_for_write_complete(ID);
+
+ return;
+}
+
+enum hrt_isp_css_irq_status irq_get_channel_id(
+ const irq_ID_t ID,
+ unsigned int *irq_id)
+{
+ unsigned int irq_status = irq_reg_load(ID,
+ _HRT_IRQ_CONTROLLER_STATUS_REG_IDX);
+ unsigned int idx;
+ enum hrt_isp_css_irq_status status = hrt_isp_css_irq_status_success;
+
+ assert(ID < N_IRQ_ID);
+ assert(irq_id);
+
+ /* find the first irq bit */
+ for (idx = 0; idx < IRQ_N_CHANNEL[ID]; idx++) {
+ if (irq_status & (1U << idx))
+ break;
+ }
+ if (idx == IRQ_N_CHANNEL[ID])
+ return hrt_isp_css_irq_status_error;
+
+ /* now check whether there are more bits set */
+ if (irq_status != (1U << idx))
+ status = hrt_isp_css_irq_status_more_irqs;
+
+ irq_reg_store(ID,
+ _HRT_IRQ_CONTROLLER_CLEAR_REG_IDX, 1U << idx);
+
+ irq_wait_for_write_complete(ID);
+
+ if (irq_id)
+ *irq_id = (unsigned int)idx;
+
+ return status;
+}
+
+static const hrt_address IRQ_REQUEST_ADDR[N_IRQ_SW_CHANNEL_ID] = {
+ _REG_GP_IRQ_REQUEST0_ADDR,
+ _REG_GP_IRQ_REQUEST1_ADDR
+};
+
+void irq_raise(
+ const irq_ID_t ID,
+ const irq_sw_channel_id_t irq_id)
+{
+ hrt_address addr;
+
+ OP___assert(ID == IRQ0_ID);
+ OP___assert(IRQ_BASE[ID] != (hrt_address)-1);
+ OP___assert(irq_id < N_IRQ_SW_CHANNEL_ID);
+
+ (void)ID;
+
+ addr = IRQ_REQUEST_ADDR[irq_id];
+ /* The SW IRQ pins are remapped to offset zero */
+ gp_device_reg_store(GP_DEVICE0_ID,
+ (unsigned int)addr, 1);
+ gp_device_reg_store(GP_DEVICE0_ID,
+ (unsigned int)addr, 0);
+ return;
+}
+
+bool any_virq_signal(void)
+{
+ unsigned int irq_status = irq_reg_load(IRQ0_ID,
+ _HRT_IRQ_CONTROLLER_STATUS_REG_IDX);
+
+ return (irq_status != 0);
+}
+
+void cnd_virq_enable_channel(
+ const enum virq_id irq_ID,
+ const bool en)
+{
+ irq_ID_t i;
+ unsigned int channel_ID;
+ irq_ID_t ID = virq_get_irq_id(irq_ID, &channel_ID);
+
+ assert(ID < N_IRQ_ID);
+
+ for (i = IRQ1_ID; i < N_IRQ_ID; i++) {
+ /* It is not allowed to enable the pin of a nested IRQ directly */
+ assert(irq_ID != IRQ_NESTING_ID[i]);
+ }
+
+ if (en) {
+ irq_enable_channel(ID, channel_ID);
+ if (IRQ_NESTING_ID[ID] != N_virq_id) {
+ /* Single level nesting, otherwise we'd need to recurse */
+ irq_enable_channel(IRQ0_ID, IRQ_NESTING_ID[ID]);
+ }
+ } else {
+ irq_disable_channel(ID, channel_ID);
+ if ((IRQ_NESTING_ID[ID] != N_virq_id) && !any_irq_channel_enabled(ID)) {
+ /* Only disable the top if the nested ones are empty */
+ irq_disable_channel(IRQ0_ID, IRQ_NESTING_ID[ID]);
+ }
+ }
+ return;
+}
+
+void virq_clear_all(void)
+{
+ irq_ID_t irq_id;
+
+ for (irq_id = (irq_ID_t)0; irq_id < N_IRQ_ID; irq_id++) {
+ irq_clear_all(irq_id);
+ }
+ return;
+}
+
+enum hrt_isp_css_irq_status
+virq_get_channel_signals(struct virq_info *irq_info)
+{
+ enum hrt_isp_css_irq_status irq_status = hrt_isp_css_irq_status_error;
+ irq_ID_t ID;
+
+ assert(irq_info);
+
+ for (ID = (irq_ID_t)0 ; ID < N_IRQ_ID; ID++) {
+ if (any_irq_channel_enabled(ID)) {
+ hrt_data irq_data = irq_reg_load(ID,
+ _HRT_IRQ_CONTROLLER_STATUS_REG_IDX);
+
+ if (irq_data != 0) {
+ /* The error condition is an IRQ pulse received with no IRQ status written */
+ irq_status = hrt_isp_css_irq_status_success;
+ }
+
+ irq_info->irq_status_reg[ID] |= irq_data;
+
+ irq_reg_store(ID,
+ _HRT_IRQ_CONTROLLER_CLEAR_REG_IDX, irq_data);
+
+ irq_wait_for_write_complete(ID);
+ }
+ }
+
+ return irq_status;
+}
+
+void virq_clear_info(struct virq_info *irq_info)
+{
+ irq_ID_t ID;
+
+ assert(irq_info);
+
+ for (ID = (irq_ID_t)0 ; ID < N_IRQ_ID; ID++) {
+ irq_info->irq_status_reg[ID] = 0;
+ }
+ return;
+}
+
+enum hrt_isp_css_irq_status virq_get_channel_id(
+ enum virq_id *irq_id)
+{
+ unsigned int irq_status = irq_reg_load(IRQ0_ID,
+ _HRT_IRQ_CONTROLLER_STATUS_REG_IDX);
+ unsigned int idx;
+ enum hrt_isp_css_irq_status status = hrt_isp_css_irq_status_success;
+ irq_ID_t ID;
+
+ assert(irq_id);
+
+ /* find the first irq bit on device 0 */
+ for (idx = 0; idx < IRQ_N_CHANNEL[IRQ0_ID]; idx++) {
+ if (irq_status & (1U << idx))
+ break;
+ }
+
+ if (idx == IRQ_N_CHANNEL[IRQ0_ID]) {
+ return hrt_isp_css_irq_status_error;
+ }
+
+ /* Check whether there are more bits set on device 0 */
+ if (irq_status != (1U << idx)) {
+ status = hrt_isp_css_irq_status_more_irqs;
+ }
+
+ /* Check whether we have an IRQ on one of the nested devices */
+ for (ID = N_IRQ_ID - 1 ; ID > (irq_ID_t)0; ID--) {
+ if (IRQ_NESTING_ID[ID] == (enum virq_id)idx) {
+ break;
+ }
+ }
+
+ /* If we have a nested IRQ, load that state, discard the device 0 state */
+ if (ID != IRQ0_ID) {
+ irq_status = irq_reg_load(ID,
+ _HRT_IRQ_CONTROLLER_STATUS_REG_IDX);
+ /* find the first irq bit on device "id" */
+ for (idx = 0; idx < IRQ_N_CHANNEL[ID]; idx++) {
+ if (irq_status & (1U << idx))
+ break;
+ }
+
+ if (idx == IRQ_N_CHANNEL[ID]) {
+ return hrt_isp_css_irq_status_error;
+ }
+
+ /* Alternatively check whether there are more bits set on this device */
+ if (irq_status != (1U << idx)) {
+ status = hrt_isp_css_irq_status_more_irqs;
+ } else {
+ /* If this device is empty, clear the state on device 0 */
+ irq_reg_store(IRQ0_ID,
+ _HRT_IRQ_CONTROLLER_CLEAR_REG_IDX, 1U << IRQ_NESTING_ID[ID]);
+ }
+ } /* if (ID != IRQ0_ID) */
+
+ /* Here we proceed to clear the IRQ on detected device, if no nested IRQ, this is device 0 */
+ irq_reg_store(ID,
+ _HRT_IRQ_CONTROLLER_CLEAR_REG_IDX, 1U << idx);
+
+ irq_wait_for_write_complete(ID);
+
+ idx += IRQ_N_ID_OFFSET[ID];
+ if (irq_id)
+ *irq_id = (enum virq_id)idx;
+
+ return status;
+}
+
+static inline void irq_wait_for_write_complete(
+ const irq_ID_t ID)
+{
+ assert(ID < N_IRQ_ID);
+ assert(IRQ_BASE[ID] != (hrt_address)-1);
+ (void)ia_css_device_load_uint32(IRQ_BASE[ID] +
+ _HRT_IRQ_CONTROLLER_ENABLE_REG_IDX * sizeof(hrt_data));
+}
+
+static inline bool any_irq_channel_enabled(
+ const irq_ID_t ID)
+{
+ hrt_data en_reg;
+
+ assert(ID < N_IRQ_ID);
+
+ en_reg = irq_reg_load(ID,
+ _HRT_IRQ_CONTROLLER_ENABLE_REG_IDX);
+
+ return (en_reg != 0);
+}
+
+static inline irq_ID_t virq_get_irq_id(
+ const enum virq_id irq_ID,
+ unsigned int *channel_ID)
+{
+ irq_ID_t ID;
+
+ assert(channel_ID);
+
+ for (ID = (irq_ID_t)0 ; ID < N_IRQ_ID; ID++) {
+ if (irq_ID < IRQ_N_ID_OFFSET[ID + 1]) {
+ break;
+ }
+ }
+
+ *channel_ID = (unsigned int)irq_ID - IRQ_N_ID_OFFSET[ID];
+
+ return ID;
+}
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/irq_local.h b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/irq_local.h
new file mode 100644
index 000000000000..c74cd18c7aec
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/irq_local.h
@@ -0,0 +1,109 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010-2015, Intel Corporation.
+ */
+
+#ifndef __IRQ_LOCAL_H_INCLUDED__
+#define __IRQ_LOCAL_H_INCLUDED__
+
+#include "irq_global.h"
+
+#include <irq_controller_defs.h>
+
+/* IRQ0_ID */
+#include "hive_isp_css_defs.h"
+#define HIVE_GP_DEV_IRQ_NUM_IRQS 32
+/* IRQ1_ID */
+#include "input_formatter_subsystem_defs.h"
+#define HIVE_IFMT_IRQ_NUM_IRQS 5
+/* IRQ2_ID */
+#include "input_system_defs.h"
+/* IRQ3_ID */
+#include "input_selector_defs.h"
+
+#define IRQ_ID_OFFSET 32
+#define IRQ0_ID_OFFSET 0
+#define IRQ1_ID_OFFSET IRQ_ID_OFFSET
+#define IRQ2_ID_OFFSET (2 * IRQ_ID_OFFSET)
+#define IRQ3_ID_OFFSET (3 * IRQ_ID_OFFSET)
+#define IRQ_END_OFFSET (4 * IRQ_ID_OFFSET)
+
+#define IRQ0_ID_N_CHANNEL HIVE_GP_DEV_IRQ_NUM_IRQS
+#define IRQ1_ID_N_CHANNEL HIVE_IFMT_IRQ_NUM_IRQS
+#define IRQ2_ID_N_CHANNEL HIVE_ISYS_IRQ_NUM_BITS
+#define IRQ3_ID_N_CHANNEL HIVE_ISEL_IRQ_NUM_IRQS
+
+enum virq_id {
+ virq_gpio_pin_0 = IRQ0_ID_OFFSET + HIVE_GP_DEV_IRQ_GPIO_PIN_0_BIT_ID,
+ virq_gpio_pin_1 = IRQ0_ID_OFFSET + HIVE_GP_DEV_IRQ_GPIO_PIN_1_BIT_ID,
+ virq_gpio_pin_2 = IRQ0_ID_OFFSET + HIVE_GP_DEV_IRQ_GPIO_PIN_2_BIT_ID,
+ virq_gpio_pin_3 = IRQ0_ID_OFFSET + HIVE_GP_DEV_IRQ_GPIO_PIN_3_BIT_ID,
+ virq_gpio_pin_4 = IRQ0_ID_OFFSET + HIVE_GP_DEV_IRQ_GPIO_PIN_4_BIT_ID,
+ virq_gpio_pin_5 = IRQ0_ID_OFFSET + HIVE_GP_DEV_IRQ_GPIO_PIN_5_BIT_ID,
+ virq_gpio_pin_6 = IRQ0_ID_OFFSET + HIVE_GP_DEV_IRQ_GPIO_PIN_6_BIT_ID,
+ virq_gpio_pin_7 = IRQ0_ID_OFFSET + HIVE_GP_DEV_IRQ_GPIO_PIN_7_BIT_ID,
+ virq_gpio_pin_8 = IRQ0_ID_OFFSET + HIVE_GP_DEV_IRQ_GPIO_PIN_8_BIT_ID,
+ virq_gpio_pin_9 = IRQ0_ID_OFFSET + HIVE_GP_DEV_IRQ_GPIO_PIN_9_BIT_ID,
+ virq_gpio_pin_10 = IRQ0_ID_OFFSET + HIVE_GP_DEV_IRQ_GPIO_PIN_10_BIT_ID,
+ virq_gpio_pin_11 = IRQ0_ID_OFFSET + HIVE_GP_DEV_IRQ_GPIO_PIN_11_BIT_ID,
+ virq_sp = IRQ0_ID_OFFSET + HIVE_GP_DEV_IRQ_SP_BIT_ID,
+ virq_isp = IRQ0_ID_OFFSET + HIVE_GP_DEV_IRQ_ISP_BIT_ID,
+ virq_isys = IRQ0_ID_OFFSET + HIVE_GP_DEV_IRQ_ISYS_BIT_ID,
+ virq_isel = IRQ0_ID_OFFSET + HIVE_GP_DEV_IRQ_ISEL_BIT_ID,
+ virq_ifmt = IRQ0_ID_OFFSET + HIVE_GP_DEV_IRQ_IFMT_BIT_ID,
+ virq_sp_stream_mon = IRQ0_ID_OFFSET + HIVE_GP_DEV_IRQ_SP_STREAM_MON_BIT_ID,
+ virq_isp_stream_mon = IRQ0_ID_OFFSET + HIVE_GP_DEV_IRQ_ISP_STREAM_MON_BIT_ID,
+ virq_mod_stream_mon = IRQ0_ID_OFFSET + HIVE_GP_DEV_IRQ_MOD_STREAM_MON_BIT_ID,
+ virq_isp_pmem_error = IRQ0_ID_OFFSET + HIVE_GP_DEV_IRQ_ISP_PMEM_ERROR_BIT_ID,
+ virq_isp_bamem_error = IRQ0_ID_OFFSET + HIVE_GP_DEV_IRQ_ISP_BAMEM_ERROR_BIT_ID,
+ virq_isp_dmem_error = IRQ0_ID_OFFSET + HIVE_GP_DEV_IRQ_ISP_DMEM_ERROR_BIT_ID,
+ virq_sp_icache_mem_error = IRQ0_ID_OFFSET + HIVE_GP_DEV_IRQ_SP_ICACHE_MEM_ERROR_BIT_ID,
+ virq_sp_dmem_error = IRQ0_ID_OFFSET + HIVE_GP_DEV_IRQ_SP_DMEM_ERROR_BIT_ID,
+ virq_mmu_cache_mem_error = IRQ0_ID_OFFSET + HIVE_GP_DEV_IRQ_MMU_CACHE_MEM_ERROR_BIT_ID,
+ virq_gp_timer_0 = IRQ0_ID_OFFSET + HIVE_GP_DEV_IRQ_GP_TIMER_0_BIT_ID,
+ virq_gp_timer_1 = IRQ0_ID_OFFSET + HIVE_GP_DEV_IRQ_GP_TIMER_1_BIT_ID,
+ virq_sw_pin_0 = IRQ0_ID_OFFSET + HIVE_GP_DEV_IRQ_SW_PIN_0_BIT_ID,
+ virq_sw_pin_1 = IRQ0_ID_OFFSET + HIVE_GP_DEV_IRQ_SW_PIN_1_BIT_ID,
+ virq_dma = IRQ0_ID_OFFSET + HIVE_GP_DEV_IRQ_DMA_BIT_ID,
+ virq_sp_stream_mon_b = IRQ0_ID_OFFSET + HIVE_GP_DEV_IRQ_SP_STREAM_MON_B_BIT_ID,
+
+ virq_ifmt0_id = IRQ1_ID_OFFSET + HIVE_IFMT_IRQ_IFT_PRIM_BIT_ID,
+ virq_ifmt1_id = IRQ1_ID_OFFSET + HIVE_IFMT_IRQ_IFT_PRIM_B_BIT_ID,
+ virq_ifmt2_id = IRQ1_ID_OFFSET + HIVE_IFMT_IRQ_IFT_SEC_BIT_ID,
+ virq_ifmt3_id = IRQ1_ID_OFFSET + HIVE_IFMT_IRQ_MEM_CPY_BIT_ID,
+ virq_ifmt_sideband_changed = IRQ1_ID_OFFSET + HIVE_IFMT_IRQ_SIDEBAND_CHANGED_BIT_ID,
+
+ virq_isys_sof = IRQ2_ID_OFFSET + HIVE_ISYS_IRQ_CSI_SOF_BIT_ID,
+ virq_isys_eof = IRQ2_ID_OFFSET + HIVE_ISYS_IRQ_CSI_EOF_BIT_ID,
+ virq_isys_sol = IRQ2_ID_OFFSET + HIVE_ISYS_IRQ_CSI_SOL_BIT_ID,
+ virq_isys_eol = IRQ2_ID_OFFSET + HIVE_ISYS_IRQ_CSI_EOL_BIT_ID,
+ virq_isys_csi = IRQ2_ID_OFFSET + HIVE_ISYS_IRQ_CSI_RECEIVER_BIT_ID,
+ virq_isys_csi_be = IRQ2_ID_OFFSET + HIVE_ISYS_IRQ_CSI_RECEIVER_BE_BIT_ID,
+ virq_isys_capt0_id_no_sop = IRQ2_ID_OFFSET + HIVE_ISYS_IRQ_CAP_UNIT_A_NO_SOP,
+ virq_isys_capt0_id_late_sop = IRQ2_ID_OFFSET + HIVE_ISYS_IRQ_CAP_UNIT_A_LATE_SOP,
+ virq_isys_capt1_id_no_sop = IRQ2_ID_OFFSET + HIVE_ISYS_IRQ_CAP_UNIT_B_NO_SOP,
+ virq_isys_capt1_id_late_sop = IRQ2_ID_OFFSET + HIVE_ISYS_IRQ_CAP_UNIT_B_LATE_SOP,
+ virq_isys_capt2_id_no_sop = IRQ2_ID_OFFSET + HIVE_ISYS_IRQ_CAP_UNIT_C_NO_SOP,
+ virq_isys_capt2_id_late_sop = IRQ2_ID_OFFSET + HIVE_ISYS_IRQ_CAP_UNIT_C_LATE_SOP,
+ virq_isys_acq_sop_mismatch = IRQ2_ID_OFFSET + HIVE_ISYS_IRQ_ACQ_UNIT_SOP_MISMATCH,
+ virq_isys_ctrl_capt0 = IRQ2_ID_OFFSET + HIVE_ISYS_IRQ_INP_CTRL_CAPA,
+ virq_isys_ctrl_capt1 = IRQ2_ID_OFFSET + HIVE_ISYS_IRQ_INP_CTRL_CAPB,
+ virq_isys_ctrl_capt2 = IRQ2_ID_OFFSET + HIVE_ISYS_IRQ_INP_CTRL_CAPC,
+ virq_isys_cio_to_ahb = IRQ2_ID_OFFSET + HIVE_ISYS_IRQ_CIO2AHB,
+ virq_isys_dma = IRQ2_ID_OFFSET + HIVE_ISYS_IRQ_DMA_BIT_ID,
+ virq_isys_fifo_monitor = IRQ2_ID_OFFSET + HIVE_ISYS_IRQ_STREAM_MON_BIT_ID,
+
+ virq_isel_sof = IRQ3_ID_OFFSET + HIVE_ISEL_IRQ_SYNC_GEN_SOF_BIT_ID,
+ virq_isel_eof = IRQ3_ID_OFFSET + HIVE_ISEL_IRQ_SYNC_GEN_EOF_BIT_ID,
+ virq_isel_sol = IRQ3_ID_OFFSET + HIVE_ISEL_IRQ_SYNC_GEN_SOL_BIT_ID,
+ virq_isel_eol = IRQ3_ID_OFFSET + HIVE_ISEL_IRQ_SYNC_GEN_EOL_BIT_ID,
+
+ N_virq_id = IRQ_END_OFFSET
+};
+
+struct virq_info {
+ hrt_data irq_status_reg[N_IRQ_ID];
+};
+
+#endif /* __IRQ_LOCAL_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/irq_private.h b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/irq_private.h
new file mode 100644
index 000000000000..ae0a8466a70a
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/irq_private.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010-2015, Intel Corporation.
+ */
+
+#ifndef __IRQ_PRIVATE_H_INCLUDED__
+#define __IRQ_PRIVATE_H_INCLUDED__
+
+#include "irq_public.h"
+
+#include "device_access.h"
+
+#include "assert_support.h"
+
+STORAGE_CLASS_IRQ_C void irq_reg_store(
+ const irq_ID_t ID,
+ const unsigned int reg,
+ const hrt_data value)
+{
+ assert(ID < N_IRQ_ID);
+ assert(IRQ_BASE[ID] != (hrt_address) - 1);
+ ia_css_device_store_uint32(IRQ_BASE[ID] + reg * sizeof(hrt_data), value);
+ return;
+}
+
+STORAGE_CLASS_IRQ_C hrt_data irq_reg_load(
+ const irq_ID_t ID,
+ const unsigned int reg)
+{
+ assert(ID < N_IRQ_ID);
+ assert(IRQ_BASE[ID] != (hrt_address) - 1);
+ return ia_css_device_load_uint32(IRQ_BASE[ID] + reg * sizeof(hrt_data));
+}
+
+#endif /* __IRQ_PRIVATE_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/isp.c b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/isp.c
new file mode 100644
index 000000000000..39cccbfa3fca
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/isp.c
@@ -0,0 +1,61 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010-2015, Intel Corporation.
+ */
+
+#include <linux/delay.h>
+
+#include <system_global.h>
+#include "isp.h"
+
+#ifndef __INLINE_ISP__
+#include "isp_private.h"
+#endif /* __INLINE_ISP__ */
+
+#include "assert_support.h"
+
+void cnd_isp_irq_enable(
+ const isp_ID_t ID,
+ const bool cnd)
+{
+ if (cnd) {
+ isp_ctrl_setbit(ID, ISP_IRQ_READY_REG, ISP_IRQ_READY_BIT);
+ /* Enabling the IRQ immediately triggers an interrupt, clear it */
+ isp_ctrl_setbit(ID, ISP_IRQ_CLEAR_REG, ISP_IRQ_CLEAR_BIT);
+ } else {
+ isp_ctrl_clearbit(ID, ISP_IRQ_READY_REG,
+ ISP_IRQ_READY_BIT);
+ }
+ return;
+}
+
+/* ISP functions to control the ISP state from the host, even in crun. */
+
+/* Inspect readiness of an ISP indexed by ID */
+unsigned int isp_is_ready(isp_ID_t ID)
+{
+ assert(ID < N_ISP_ID);
+ return isp_ctrl_getbit(ID, ISP_SC_REG, ISP_IDLE_BIT);
+}
+
+/* Inspect sleeping of an ISP indexed by ID */
+unsigned int isp_is_sleeping(isp_ID_t ID)
+{
+ assert(ID < N_ISP_ID);
+ return isp_ctrl_getbit(ID, ISP_SC_REG, ISP_SLEEPING_BIT);
+}
+
+/* To be called by the host immediately before starting ISP ID. */
+void isp_start(isp_ID_t ID)
+{
+ assert(ID < N_ISP_ID);
+}
+
+/* Wake up ISP ID. */
+void isp_wake(isp_ID_t ID)
+{
+ assert(ID < N_ISP_ID);
+ isp_ctrl_setbit(ID, ISP_SC_REG, ISP_START_BIT);
+ udelay(1);
+}
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/isp_local.h b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/isp_local.h
new file mode 100644
index 000000000000..b5c1ba55c991
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/isp_local.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010-2015, Intel Corporation.
+ */
+
+#ifndef __ISP_LOCAL_H_INCLUDED__
+#define __ISP_LOCAL_H_INCLUDED__
+
+#include "isp_global.h"
+
+#include <isp2400_support.h>
+
+#define HIVE_ISP_VMEM_MASK ((1U << ISP_VMEM_ELEMBITS) - 1)
+
+#endif /* __ISP_LOCAL_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/isp_private.h b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/isp_private.h
new file mode 100644
index 000000000000..177770a9bc1d
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/isp_private.h
@@ -0,0 +1,152 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __ISP_PRIVATE_H_INCLUDED__
+#define __ISP_PRIVATE_H_INCLUDED__
+
+#ifdef HRT_MEMORY_ACCESS
+#include <hrt/api.h>
+#endif
+
+#include "isp_public.h"
+
+#include "device_access.h"
+
+#include "assert_support.h"
+#include "type_support.h"
+
+STORAGE_CLASS_ISP_C void isp_ctrl_store(
+ const isp_ID_t ID,
+ const unsigned int reg,
+ const hrt_data value)
+{
+ assert(ID < N_ISP_ID);
+ assert(ISP_CTRL_BASE[ID] != (hrt_address) - 1);
+#if !defined(HRT_MEMORY_ACCESS)
+ ia_css_device_store_uint32(ISP_CTRL_BASE[ID] + reg * sizeof(hrt_data), value);
+#else
+ hrt_master_port_store_32(ISP_CTRL_BASE[ID] + reg * sizeof(hrt_data), value);
+#endif
+ return;
+}
+
+STORAGE_CLASS_ISP_C hrt_data isp_ctrl_load(
+ const isp_ID_t ID,
+ const unsigned int reg)
+{
+ assert(ID < N_ISP_ID);
+ assert(ISP_CTRL_BASE[ID] != (hrt_address) - 1);
+#if !defined(HRT_MEMORY_ACCESS)
+ return ia_css_device_load_uint32(ISP_CTRL_BASE[ID] + reg * sizeof(hrt_data));
+#else
+ return hrt_master_port_uload_32(ISP_CTRL_BASE[ID] + reg * sizeof(hrt_data));
+#endif
+}
+
+STORAGE_CLASS_ISP_C bool isp_ctrl_getbit(
+ const isp_ID_t ID,
+ const unsigned int reg,
+ const unsigned int bit)
+{
+ hrt_data val = isp_ctrl_load(ID, reg);
+
+ return (val & (1UL << bit)) != 0;
+}
+
+STORAGE_CLASS_ISP_C void isp_ctrl_setbit(
+ const isp_ID_t ID,
+ const unsigned int reg,
+ const unsigned int bit)
+{
+ hrt_data data = isp_ctrl_load(ID, reg);
+
+ isp_ctrl_store(ID, reg, (data | (1UL << bit)));
+ return;
+}
+
+STORAGE_CLASS_ISP_C void isp_ctrl_clearbit(
+ const isp_ID_t ID,
+ const unsigned int reg,
+ const unsigned int bit)
+{
+ hrt_data data = isp_ctrl_load(ID, reg);
+
+ isp_ctrl_store(ID, reg, (data & ~(1UL << bit)));
+ return;
+}
+
+STORAGE_CLASS_ISP_C void isp_dmem_store(
+ const isp_ID_t ID,
+ unsigned int addr,
+ const void *data,
+ const size_t size)
+{
+ assert(ID < N_ISP_ID);
+ assert(ISP_DMEM_BASE[ID] != (hrt_address) - 1);
+#if !defined(HRT_MEMORY_ACCESS)
+ ia_css_device_store(ISP_DMEM_BASE[ID] + addr, data, size);
+#else
+ hrt_master_port_store(ISP_DMEM_BASE[ID] + addr, data, size);
+#endif
+ return;
+}
+
+STORAGE_CLASS_ISP_C void isp_dmem_load(
+ const isp_ID_t ID,
+ const unsigned int addr,
+ void *data,
+ const size_t size)
+{
+ assert(ID < N_ISP_ID);
+ assert(ISP_DMEM_BASE[ID] != (hrt_address) - 1);
+#if !defined(HRT_MEMORY_ACCESS)
+ ia_css_device_load(ISP_DMEM_BASE[ID] + addr, data, size);
+#else
+ hrt_master_port_load(ISP_DMEM_BASE[ID] + addr, data, size);
+#endif
+ return;
+}
+
+STORAGE_CLASS_ISP_C void isp_dmem_store_uint32(
+ const isp_ID_t ID,
+ unsigned int addr,
+ const uint32_t data)
+{
+ assert(ID < N_ISP_ID);
+ assert(ISP_DMEM_BASE[ID] != (hrt_address) - 1);
+ (void)ID;
+#if !defined(HRT_MEMORY_ACCESS)
+ ia_css_device_store_uint32(ISP_DMEM_BASE[ID] + addr, data);
+#else
+ hrt_master_port_store_32(ISP_DMEM_BASE[ID] + addr, data);
+#endif
+ return;
+}
+
+STORAGE_CLASS_ISP_C uint32_t isp_dmem_load_uint32(
+ const isp_ID_t ID,
+ const unsigned int addr)
+{
+ assert(ID < N_ISP_ID);
+ assert(ISP_DMEM_BASE[ID] != (hrt_address) - 1);
+ (void)ID;
+#if !defined(HRT_MEMORY_ACCESS)
+ return ia_css_device_load_uint32(ISP_DMEM_BASE[ID] + addr);
+#else
+ return hrt_master_port_uload_32(ISP_DMEM_BASE[ID] + addr);
+#endif
+}
+
+STORAGE_CLASS_ISP_C uint32_t isp_2w_cat_1w(
+ const u16 x0,
+ const uint16_t x1)
+{
+ u32 out = ((uint32_t)(x1 & HIVE_ISP_VMEM_MASK) << ISP_VMEM_ELEMBITS)
+ | (x0 & HIVE_ISP_VMEM_MASK);
+ return out;
+}
+
+#endif /* __ISP_PRIVATE_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/mmu.c b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/mmu.c
new file mode 100644
index 000000000000..064e88a5e064
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/mmu.c
@@ -0,0 +1,38 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010-2015, Intel Corporation.
+ */
+
+/* The name "mmu.h is already taken" */
+#include "mmu_device.h"
+
+void mmu_set_page_table_base_index(
+ const mmu_ID_t ID,
+ const hrt_data base_index)
+{
+ mmu_reg_store(ID, _HRT_MMU_PAGE_TABLE_BASE_ADDRESS_REG_IDX, base_index);
+ return;
+}
+
+hrt_data mmu_get_page_table_base_index(
+ const mmu_ID_t ID)
+{
+ return mmu_reg_load(ID, _HRT_MMU_PAGE_TABLE_BASE_ADDRESS_REG_IDX);
+}
+
+void mmu_invalidate_cache(
+ const mmu_ID_t ID)
+{
+ mmu_reg_store(ID, _HRT_MMU_INVALIDATE_TLB_REG_IDX, 1);
+ return;
+}
+
+void mmu_invalidate_cache_all(void)
+{
+ mmu_ID_t mmu_id;
+
+ for (mmu_id = (mmu_ID_t)0; mmu_id < N_MMU_ID; mmu_id++) {
+ mmu_invalidate_cache(mmu_id);
+ }
+}
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/mmu_local.h b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/mmu_local.h
new file mode 100644
index 000000000000..f62651953873
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/mmu_local.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010-2015, Intel Corporation.
+ */
+
+#ifndef __MMU_LOCAL_H_INCLUDED__
+#define __MMU_LOCAL_H_INCLUDED__
+
+#include "mmu_global.h"
+
+#endif /* __MMU_LOCAL_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/sp.c b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/sp.c
new file mode 100644
index 000000000000..0fb8a675439e
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/sp.c
@@ -0,0 +1,26 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010-2015, Intel Corporation.
+ */
+
+#include "sp.h"
+
+#ifndef __INLINE_SP__
+#include "sp_private.h"
+#endif /* __INLINE_SP__ */
+
+#include "assert_support.h"
+
+void cnd_sp_irq_enable(
+ const sp_ID_t ID,
+ const bool cnd)
+{
+ if (cnd) {
+ sp_ctrl_setbit(ID, SP_IRQ_READY_REG, SP_IRQ_READY_BIT);
+ /* Enabling the IRQ immediately triggers an interrupt, clear it */
+ sp_ctrl_setbit(ID, SP_IRQ_CLEAR_REG, SP_IRQ_CLEAR_BIT);
+ } else {
+ sp_ctrl_clearbit(ID, SP_IRQ_READY_REG, SP_IRQ_READY_BIT);
+ }
+}
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/sp_local.h b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/sp_local.h
new file mode 100644
index 000000000000..48546491eb83
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/sp_local.h
@@ -0,0 +1,67 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010-2015, Intel Corporation.
+ */
+
+#ifndef __SP_LOCAL_H_INCLUDED__
+#define __SP_LOCAL_H_INCLUDED__
+
+#include <type_support.h>
+#include "sp_global.h"
+
+#define sp_address_of(var) (HIVE_ADDR_ ## var)
+
+/*
+ * deprecated
+ */
+#define store_sp_int(var, value) \
+ sp_dmem_store_uint32(SP0_ID, (unsigned int)sp_address_of(var), \
+ (uint32_t)(value))
+
+#define store_sp_ptr(var, value) \
+ sp_dmem_store_uint32(SP0_ID, (unsigned int)sp_address_of(var), \
+ (uint32_t)(value))
+
+#define load_sp_uint(var) \
+ sp_dmem_load_uint32(SP0_ID, (unsigned int)sp_address_of(var))
+
+#define load_sp_array_uint8(array_name, index) \
+ sp_dmem_load_uint8(SP0_ID, (unsigned int)sp_address_of(array_name) + \
+ (index) * sizeof(uint8_t))
+
+#define load_sp_array_uint16(array_name, index) \
+ sp_dmem_load_uint16(SP0_ID, (unsigned int)sp_address_of(array_name) + \
+ (index) * sizeof(uint16_t))
+
+#define load_sp_array_uint(array_name, index) \
+ sp_dmem_load_uint32(SP0_ID, (unsigned int)sp_address_of(array_name) + \
+ (index) * sizeof(uint32_t))
+
+#define store_sp_var(var, data, bytes) \
+ sp_dmem_store(SP0_ID, (unsigned int)sp_address_of(var), data, bytes)
+
+#define store_sp_array_uint8(array_name, index, value) \
+ sp_dmem_store_uint8(SP0_ID, (unsigned int)sp_address_of(array_name) + \
+ (index) * sizeof(uint8_t), value)
+
+#define store_sp_array_uint16(array_name, index, value) \
+ sp_dmem_store_uint16(SP0_ID, (unsigned int)sp_address_of(array_name) + \
+ (index) * sizeof(uint16_t), value)
+
+#define store_sp_array_uint(array_name, index, value) \
+ sp_dmem_store_uint32(SP0_ID, (unsigned int)sp_address_of(array_name) + \
+ (index) * sizeof(uint32_t), value)
+
+#define store_sp_var_with_offset(var, offset, data, bytes) \
+ sp_dmem_store(SP0_ID, (unsigned int)sp_address_of(var) + \
+ offset, data, bytes)
+
+#define load_sp_var(var, data, bytes) \
+ sp_dmem_load(SP0_ID, (unsigned int)sp_address_of(var), data, bytes)
+
+#define load_sp_var_with_offset(var, offset, data, bytes) \
+ sp_dmem_load(SP0_ID, (unsigned int)sp_address_of(var) + offset, \
+ data, bytes)
+
+#endif /* __SP_LOCAL_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/sp_private.h b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/sp_private.h
new file mode 100644
index 000000000000..c69778411f2f
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/sp_private.h
@@ -0,0 +1,158 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010-2015, Intel Corporation.
+ */
+
+#ifndef __SP_PRIVATE_H_INCLUDED__
+#define __SP_PRIVATE_H_INCLUDED__
+
+#include "sp_public.h"
+
+#include "device_access.h"
+
+#include "assert_support.h"
+
+STORAGE_CLASS_SP_C void sp_ctrl_store(
+ const sp_ID_t ID,
+ const hrt_address reg,
+ const hrt_data value)
+{
+ assert(ID < N_SP_ID);
+ assert(SP_CTRL_BASE[ID] != (hrt_address)-1);
+ ia_css_device_store_uint32(SP_CTRL_BASE[ID] + reg * sizeof(hrt_data), value);
+ return;
+}
+
+STORAGE_CLASS_SP_C hrt_data sp_ctrl_load(
+ const sp_ID_t ID,
+ const hrt_address reg)
+{
+ assert(ID < N_SP_ID);
+ assert(SP_CTRL_BASE[ID] != (hrt_address)-1);
+ return ia_css_device_load_uint32(SP_CTRL_BASE[ID] + reg * sizeof(hrt_data));
+}
+
+STORAGE_CLASS_SP_C bool sp_ctrl_getbit(
+ const sp_ID_t ID,
+ const hrt_address reg,
+ const unsigned int bit)
+{
+ hrt_data val = sp_ctrl_load(ID, reg);
+
+ return (val & (1UL << bit)) != 0;
+}
+
+STORAGE_CLASS_SP_C void sp_ctrl_setbit(
+ const sp_ID_t ID,
+ const hrt_address reg,
+ const unsigned int bit)
+{
+ hrt_data data = sp_ctrl_load(ID, reg);
+
+ sp_ctrl_store(ID, reg, (data | (1UL << bit)));
+ return;
+}
+
+STORAGE_CLASS_SP_C void sp_ctrl_clearbit(
+ const sp_ID_t ID,
+ const hrt_address reg,
+ const unsigned int bit)
+{
+ hrt_data data = sp_ctrl_load(ID, reg);
+
+ sp_ctrl_store(ID, reg, (data & ~(1UL << bit)));
+ return;
+}
+
+STORAGE_CLASS_SP_C void sp_dmem_store(
+ const sp_ID_t ID,
+ hrt_address addr,
+ const void *data,
+ const size_t size)
+{
+ assert(ID < N_SP_ID);
+ assert(SP_DMEM_BASE[ID] != (hrt_address)-1);
+ ia_css_device_store(SP_DMEM_BASE[ID] + addr, data, size);
+ return;
+}
+
+STORAGE_CLASS_SP_C void sp_dmem_load(
+ const sp_ID_t ID,
+ const hrt_address addr,
+ void *data,
+ const size_t size)
+{
+ assert(ID < N_SP_ID);
+ assert(SP_DMEM_BASE[ID] != (hrt_address)-1);
+ ia_css_device_load(SP_DMEM_BASE[ID] + addr, data, size);
+ return;
+}
+
+STORAGE_CLASS_SP_C void sp_dmem_store_uint8(
+ const sp_ID_t ID,
+ hrt_address addr,
+ const uint8_t data)
+{
+ assert(ID < N_SP_ID);
+ assert(SP_DMEM_BASE[ID] != (hrt_address)-1);
+ (void)ID;
+ ia_css_device_store_uint8(SP_DMEM_BASE[SP0_ID] + addr, data);
+ return;
+}
+
+STORAGE_CLASS_SP_C void sp_dmem_store_uint16(
+ const sp_ID_t ID,
+ hrt_address addr,
+ const uint16_t data)
+{
+ assert(ID < N_SP_ID);
+ assert(SP_DMEM_BASE[ID] != (hrt_address)-1);
+ (void)ID;
+ ia_css_device_store_uint16(SP_DMEM_BASE[SP0_ID] + addr, data);
+ return;
+}
+
+STORAGE_CLASS_SP_C void sp_dmem_store_uint32(
+ const sp_ID_t ID,
+ hrt_address addr,
+ const uint32_t data)
+{
+ assert(ID < N_SP_ID);
+ assert(SP_DMEM_BASE[ID] != (hrt_address)-1);
+ (void)ID;
+ ia_css_device_store_uint32(SP_DMEM_BASE[SP0_ID] + addr, data);
+ return;
+}
+
+STORAGE_CLASS_SP_C uint8_t sp_dmem_load_uint8(
+ const sp_ID_t ID,
+ const hrt_address addr)
+{
+ assert(ID < N_SP_ID);
+ assert(SP_DMEM_BASE[ID] != (hrt_address)-1);
+ (void)ID;
+ return ia_css_device_load_uint8(SP_DMEM_BASE[SP0_ID] + addr);
+}
+
+STORAGE_CLASS_SP_C uint16_t sp_dmem_load_uint16(
+ const sp_ID_t ID,
+ const hrt_address addr)
+{
+ assert(ID < N_SP_ID);
+ assert(SP_DMEM_BASE[ID] != (hrt_address)-1);
+ (void)ID;
+ return ia_css_device_load_uint16(SP_DMEM_BASE[SP0_ID] + addr);
+}
+
+STORAGE_CLASS_SP_C uint32_t sp_dmem_load_uint32(
+ const sp_ID_t ID,
+ const hrt_address addr)
+{
+ assert(ID < N_SP_ID);
+ assert(SP_DMEM_BASE[ID] != (hrt_address)-1);
+ (void)ID;
+ return ia_css_device_load_uint32(SP_DMEM_BASE[SP0_ID] + addr);
+}
+
+#endif /* __SP_PRIVATE_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/timed_ctrl.c b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/timed_ctrl.c
new file mode 100644
index 000000000000..948b0b0a0272
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/timed_ctrl.c
@@ -0,0 +1,66 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#include "timed_ctrl.h"
+
+#ifndef __INLINE_TIMED_CTRL__
+#include "timed_ctrl_private.h"
+#endif /* __INLINE_TIMED_CTRL__ */
+
+#include "assert_support.h"
+
+void timed_ctrl_snd_commnd(
+ const timed_ctrl_ID_t ID,
+ hrt_data mask,
+ hrt_data condition,
+ hrt_data counter,
+ hrt_address addr,
+ hrt_data value)
+{
+ OP___assert(ID == TIMED_CTRL0_ID);
+ OP___assert(TIMED_CTRL_BASE[ID] != (hrt_address)-1);
+
+ timed_ctrl_reg_store(ID, _HRT_TIMED_CONTROLLER_CMD_REG_IDX, mask);
+ timed_ctrl_reg_store(ID, _HRT_TIMED_CONTROLLER_CMD_REG_IDX, condition);
+ timed_ctrl_reg_store(ID, _HRT_TIMED_CONTROLLER_CMD_REG_IDX, counter);
+ timed_ctrl_reg_store(ID, _HRT_TIMED_CONTROLLER_CMD_REG_IDX, (hrt_data)addr);
+ timed_ctrl_reg_store(ID, _HRT_TIMED_CONTROLLER_CMD_REG_IDX, value);
+}
+
+/* pqiao TODO: make sure the following commands get
+ correct BASE address both for csim and android */
+
+void timed_ctrl_snd_sp_commnd(
+ const timed_ctrl_ID_t ID,
+ hrt_data mask,
+ hrt_data condition,
+ hrt_data counter,
+ const sp_ID_t SP_ID,
+ hrt_address offset,
+ hrt_data value)
+{
+ OP___assert(SP_ID < N_SP_ID);
+ OP___assert(SP_DMEM_BASE[SP_ID] != (hrt_address)-1);
+
+ timed_ctrl_snd_commnd(ID, mask, condition, counter,
+ SP_DMEM_BASE[SP_ID] + offset, value);
+}
+
+void timed_ctrl_snd_gpio_commnd(
+ const timed_ctrl_ID_t ID,
+ hrt_data mask,
+ hrt_data condition,
+ hrt_data counter,
+ const gpio_ID_t GPIO_ID,
+ hrt_address offset,
+ hrt_data value)
+{
+ OP___assert(GPIO_ID < N_GPIO_ID);
+ OP___assert(GPIO_BASE[GPIO_ID] != (hrt_address)-1);
+
+ timed_ctrl_snd_commnd(ID, mask, condition, counter,
+ GPIO_BASE[GPIO_ID] + offset, value);
+}
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/timed_ctrl_local.h b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/timed_ctrl_local.h
new file mode 100644
index 000000000000..bf97f272adba
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/timed_ctrl_local.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010-2015, Intel Corporation.
+ */
+
+#ifndef __TIMED_CTRL_LOCAL_H_INCLUDED__
+#define __TIMED_CTRL_LOCAL_H_INCLUDED__
+
+#include "timed_ctrl_global.h"
+
+#endif /* __TIMED_CTRL_LOCAL_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/timed_ctrl_private.h b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/timed_ctrl_private.h
new file mode 100644
index 000000000000..7b90b4cf0402
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/timed_ctrl_private.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010-2015, Intel Corporation.
+ */
+
+#ifndef __TIMED_CTRL_PRIVATE_H_INCLUDED__
+#define __TIMED_CTRL_PRIVATE_H_INCLUDED__
+
+#include "timed_ctrl_public.h"
+
+#include "device_access.h"
+
+#include "assert_support.h"
+
+STORAGE_CLASS_TIMED_CTRL_C void timed_ctrl_reg_store(
+ const timed_ctrl_ID_t ID,
+ const unsigned int reg,
+ const hrt_data value)
+{
+ OP___assert(ID < N_TIMED_CTRL_ID);
+ OP___assert(TIMED_CTRL_BASE[ID] != (hrt_address) - 1);
+ ia_css_device_store_uint32(TIMED_CTRL_BASE[ID] + reg * sizeof(hrt_data), value);
+}
+
+#endif /* __GP_DEVICE_PRIVATE_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/vamem_local.h b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/vamem_local.h
new file mode 100644
index 000000000000..2882849660ce
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/vamem_local.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010-2015, Intel Corporation.
+ */
+
+#ifndef __VAMEM_LOCAL_H_INCLUDED__
+#define __VAMEM_LOCAL_H_INCLUDED__
+
+#include "vamem_global.h"
+
+#endif /* __VAMEM_LOCAL_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/vmem.c b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/vmem.c
new file mode 100644
index 000000000000..722b684fbc37
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/vmem.c
@@ -0,0 +1,275 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010 - 2016, Intel Corporation.
+ */
+
+#include "isp.h"
+#include "vmem.h"
+#include "vmem_local.h"
+
+#if !defined(HRT_MEMORY_ACCESS)
+#include "ia_css_device_access.h"
+#endif
+#include "assert_support.h"
+
+typedef unsigned long long hive_uedge;
+typedef hive_uedge *hive_wide;
+
+/* Copied from SDK: sim_semantics.c */
+
+/* subword bits move like this: MSB[____xxxx____]LSB -> MSB[00000000xxxx]LSB */
+static inline hive_uedge
+subword(hive_uedge w, unsigned int start, unsigned int end)
+{
+ return (w & (((1ULL << (end - 1)) - 1) << 1 | 1)) >> start;
+}
+
+/* inverse subword bits move like this: MSB[xxxx____xxxx]LSB -> MSB[xxxx0000xxxx]LSB */
+static inline hive_uedge
+inv_subword(hive_uedge w, unsigned int start, unsigned int end)
+{
+ return w & (~(((1ULL << (end - 1)) - 1) << 1 | 1) | ((1ULL << start) - 1));
+}
+
+#define uedge_bits (8 * sizeof(hive_uedge))
+#define move_lower_bits(target, target_bit, src, src_bit) move_subword(target, target_bit, src, 0, src_bit)
+#define move_upper_bits(target, target_bit, src, src_bit) move_subword(target, target_bit, src, src_bit, uedge_bits)
+#define move_word(target, target_bit, src) move_subword(target, target_bit, src, 0, uedge_bits)
+
+static void
+move_subword(
+ hive_uedge *target,
+ unsigned int target_bit,
+ hive_uedge src,
+ unsigned int src_start,
+ unsigned int src_end)
+{
+ unsigned int start_elem = target_bit / uedge_bits;
+ unsigned int start_bit = target_bit % uedge_bits;
+ unsigned int subword_width = src_end - src_start;
+
+ hive_uedge src_subword = subword(src, src_start, src_end);
+
+ if (subword_width + start_bit > uedge_bits) { /* overlap */
+ hive_uedge old_val1;
+ hive_uedge old_val0 = inv_subword(target[start_elem], start_bit, uedge_bits);
+
+ target[start_elem] = old_val0 | (src_subword << start_bit);
+ old_val1 = inv_subword(target[start_elem + 1], 0,
+ subword_width + start_bit - uedge_bits);
+ target[start_elem + 1] = old_val1 | (src_subword >> (uedge_bits - start_bit));
+ } else {
+ hive_uedge old_val = inv_subword(target[start_elem], start_bit,
+ start_bit + subword_width);
+
+ target[start_elem] = old_val | (src_subword << start_bit);
+ }
+}
+
+static void
+hive_sim_wide_unpack(
+ hive_wide vector,
+ hive_wide elem,
+ hive_uint elem_bits,
+ hive_uint index)
+{
+ /* pointers into wide_type: */
+ unsigned int start_elem = (elem_bits * index) / uedge_bits;
+ unsigned int start_bit = (elem_bits * index) % uedge_bits;
+ unsigned int end_elem = (elem_bits * (index + 1) - 1) / uedge_bits;
+ unsigned int end_bit = ((elem_bits * (index + 1) - 1) % uedge_bits) + 1;
+
+ if (elem_bits == uedge_bits) {
+ /* easy case for speedup: */
+ elem[0] = vector[index];
+ } else if (start_elem == end_elem) {
+ /* only one (<=64 bits) element needs to be (partly) copied: */
+ move_subword(elem, 0, vector[start_elem], start_bit, end_bit);
+ } else {
+ /* general case: handles edge spanning cases (includes >64bit elements) */
+ unsigned int bits_written = 0;
+ unsigned int i;
+
+ move_upper_bits(elem, bits_written, vector[start_elem], start_bit);
+ bits_written += (64 - start_bit);
+ for (i = start_elem + 1; i < end_elem; i++) {
+ move_word(elem, bits_written, vector[i]);
+ bits_written += uedge_bits;
+ }
+ move_lower_bits(elem, bits_written, vector[end_elem], end_bit);
+ }
+}
+
+static void
+hive_sim_wide_pack(
+ hive_wide vector,
+ hive_wide elem,
+ hive_uint elem_bits,
+ hive_uint index)
+{
+ /* pointers into wide_type: */
+ unsigned int start_elem = (elem_bits * index) / uedge_bits;
+
+ /* easy case for speedup: */
+ if (elem_bits == uedge_bits) {
+ vector[start_elem] = elem[0];
+ } else if (elem_bits > uedge_bits) {
+ unsigned int bits_to_write = elem_bits;
+ unsigned int start_bit = elem_bits * index;
+ unsigned int i = 0;
+
+ for (; bits_to_write > uedge_bits;
+ bits_to_write -= uedge_bits, i++, start_bit += uedge_bits) {
+ move_word(vector, start_bit, elem[i]);
+ }
+ move_lower_bits(vector, start_bit, elem[i], bits_to_write);
+ } else {
+ /* only one element needs to be (partly) copied: */
+ move_lower_bits(vector, elem_bits * index, elem[0], elem_bits);
+ }
+}
+
+static void load_vector(
+ const isp_ID_t ID,
+ t_vmem_elem *to,
+ const t_vmem_elem *from)
+{
+ unsigned int i;
+ hive_uedge *data;
+ unsigned int size = sizeof(short) * ISP_NWAY;
+
+ VMEM_ARRAY(v, 2 * ISP_NWAY); /* Need 2 vectors to work around vmem hss bug */
+ assert(ISP_BAMEM_BASE[ID] != (hrt_address) - 1);
+#if !defined(HRT_MEMORY_ACCESS)
+ ia_css_device_load(ISP_BAMEM_BASE[ID] + (unsigned long)from, &v[0][0], size);
+#else
+ hrt_master_port_load(ISP_BAMEM_BASE[ID] + (unsigned long)from, &v[0][0], size);
+#endif
+ data = (hive_uedge *)v;
+ for (i = 0; i < ISP_NWAY; i++) {
+ hive_uedge elem = 0;
+
+ hive_sim_wide_unpack(data, &elem, ISP_VEC_ELEMBITS, i);
+ to[i] = elem;
+ }
+ udelay(1); /* Spend at least 1 cycles per vector */
+}
+
+static void store_vector(
+ const isp_ID_t ID,
+ t_vmem_elem *to,
+ const t_vmem_elem *from)
+{
+ unsigned int i;
+ unsigned int size = sizeof(short) * ISP_NWAY;
+
+ VMEM_ARRAY(v, 2 * ISP_NWAY); /* Need 2 vectors to work around vmem hss bug */
+ //load_vector (&v[1][0], &to[ISP_NWAY]); /* Fetch the next vector, since it will be overwritten. */
+ hive_uedge *data = (hive_uedge *)v;
+
+ for (i = 0; i < ISP_NWAY; i++) {
+ hive_sim_wide_pack(data, (hive_wide)&from[i], ISP_VEC_ELEMBITS, i);
+ }
+ assert(ISP_BAMEM_BASE[ID] != (hrt_address) - 1);
+#if !defined(HRT_MEMORY_ACCESS)
+ ia_css_device_store(ISP_BAMEM_BASE[ID] + (unsigned long)to, &v, size);
+#else
+ //hrt_mem_store (ISP, VMEM, (unsigned)to, &v, siz); /* This will overwrite the next vector as well */
+ hrt_master_port_store(ISP_BAMEM_BASE[ID] + (unsigned long)to, &v, size);
+#endif
+ udelay(1); /* Spend at least 1 cycles per vector */
+}
+
+void isp_vmem_load(
+ const isp_ID_t ID,
+ const t_vmem_elem *from,
+ t_vmem_elem *to,
+ unsigned int elems) /* In t_vmem_elem */
+{
+ unsigned int c;
+ const t_vmem_elem *vp = from;
+
+ assert(ID < N_ISP_ID);
+ assert((unsigned long)from % ISP_VEC_ALIGN == 0);
+ assert(elems % ISP_NWAY == 0);
+ for (c = 0; c < elems; c += ISP_NWAY) {
+ load_vector(ID, &to[c], vp);
+ vp = (t_vmem_elem *)((char *)vp + ISP_VEC_ALIGN);
+ }
+}
+
+void isp_vmem_store(
+ const isp_ID_t ID,
+ t_vmem_elem *to,
+ const t_vmem_elem *from,
+ unsigned int elems) /* In t_vmem_elem */
+{
+ unsigned int c;
+ t_vmem_elem *vp = to;
+
+ assert(ID < N_ISP_ID);
+ assert((unsigned long)to % ISP_VEC_ALIGN == 0);
+ assert(elems % ISP_NWAY == 0);
+ for (c = 0; c < elems; c += ISP_NWAY) {
+ store_vector(ID, vp, &from[c]);
+ vp = (t_vmem_elem *)((char *)vp + ISP_VEC_ALIGN);
+ }
+}
+
+void isp_vmem_2d_load(
+ const isp_ID_t ID,
+ const t_vmem_elem *from,
+ t_vmem_elem *to,
+ unsigned int height,
+ unsigned int width,
+ unsigned int stride_to, /* In t_vmem_elem */
+
+ unsigned stride_from /* In t_vmem_elem */)
+{
+ unsigned int h;
+
+ assert(ID < N_ISP_ID);
+ assert((unsigned long)from % ISP_VEC_ALIGN == 0);
+ assert(width % ISP_NWAY == 0);
+ assert(stride_from % ISP_NWAY == 0);
+ for (h = 0; h < height; h++) {
+ unsigned int c;
+ const t_vmem_elem *vp = from;
+
+ for (c = 0; c < width; c += ISP_NWAY) {
+ load_vector(ID, &to[stride_to * h + c], vp);
+ vp = (t_vmem_elem *)((char *)vp + ISP_VEC_ALIGN);
+ }
+ from = (const t_vmem_elem *)((const char *)from + stride_from / ISP_NWAY *
+ ISP_VEC_ALIGN);
+ }
+}
+
+void isp_vmem_2d_store(
+ const isp_ID_t ID,
+ t_vmem_elem *to,
+ const t_vmem_elem *from,
+ unsigned int height,
+ unsigned int width,
+ unsigned int stride_to, /* In t_vmem_elem */
+
+ unsigned stride_from /* In t_vmem_elem */)
+{
+ unsigned int h;
+
+ assert(ID < N_ISP_ID);
+ assert((unsigned long)to % ISP_VEC_ALIGN == 0);
+ assert(width % ISP_NWAY == 0);
+ assert(stride_to % ISP_NWAY == 0);
+ for (h = 0; h < height; h++) {
+ unsigned int c;
+ t_vmem_elem *vp = to;
+
+ for (c = 0; c < width; c += ISP_NWAY) {
+ store_vector(ID, vp, &from[stride_from * h + c]);
+ vp = (t_vmem_elem *)((char *)vp + ISP_VEC_ALIGN);
+ }
+ to = (t_vmem_elem *)((char *)to + stride_to / ISP_NWAY * ISP_VEC_ALIGN);
+ }
+}
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/vmem_local.h b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/vmem_local.h
new file mode 100644
index 000000000000..0163d31cd169
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/vmem_local.h
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010-2015, Intel Corporation.
+ */
+
+#ifndef __VMEM_LOCAL_H_INCLUDED__
+#define __VMEM_LOCAL_H_INCLUDED__
+
+#include "type_support.h"
+#include "vmem_global.h"
+
+typedef u16 t_vmem_elem;
+typedef s16 t_svmem_elem;
+
+#define VMEM_ARRAY(x, s) t_vmem_elem x[(s) / ISP_NWAY][ISP_NWAY]
+#define SVMEM_ARRAY(x, s) t_svmem_elem x[(s) / ISP_NWAY][ISP_NWAY]
+
+void isp_vmem_load(
+ const isp_ID_t ID,
+ const t_vmem_elem *from,
+ t_vmem_elem *to,
+ unsigned int elems); /* In t_vmem_elem */
+
+void isp_vmem_store(
+ const isp_ID_t ID,
+ t_vmem_elem *to,
+ const t_vmem_elem *from,
+ unsigned int elems); /* In t_vmem_elem */
+
+void isp_vmem_2d_load(
+ const isp_ID_t ID,
+ const t_vmem_elem *from,
+ t_vmem_elem *to,
+ unsigned int height,
+ unsigned int width,
+ unsigned int stride_to, /* In t_vmem_elem */
+
+ unsigned stride_from /* In t_vmem_elem */);
+
+void isp_vmem_2d_store(
+ const isp_ID_t ID,
+ t_vmem_elem *to,
+ const t_vmem_elem *from,
+ unsigned int height,
+ unsigned int width,
+ unsigned int stride_to, /* In t_vmem_elem */
+
+ unsigned stride_from /* In t_vmem_elem */);
+
+#endif /* __VMEM_LOCAL_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/vmem_private.h b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/vmem_private.h
new file mode 100644
index 000000000000..ca685cf15676
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/vmem_private.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010-2015, Intel Corporation.
+ */
+
+#ifndef __VMEM_PRIVATE_H_INCLUDED__
+#define __VMEM_PRIVATE_H_INCLUDED__
+
+#include "vmem_public.h"
+
+#endif /* __VMEM_PRIVATE_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_common/input_formatter_global.h b/drivers/staging/media/atomisp/pci/hive_isp_css_common/input_formatter_global.h
new file mode 100644
index 000000000000..b8ed8964c7b8
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_common/input_formatter_global.h
@@ -0,0 +1,106 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __INPUT_FORMATTER_GLOBAL_H_INCLUDED__
+#define __INPUT_FORMATTER_GLOBAL_H_INCLUDED__
+
+#define IS_INPUT_FORMATTER_VERSION2
+#define IS_INPUT_SWITCH_VERSION2
+
+#include <type_support.h>
+#include <system_local.h>
+#include "if_defs.h"
+#include "str2mem_defs.h"
+#include "input_switch_2400_defs.h"
+
+#define _HIVE_INPUT_SWITCH_GET_FSYNC_REG_LSB(ch_id) ((ch_id) * 3)
+
+#define HIVE_SWITCH_N_CHANNELS 4
+#define HIVE_SWITCH_N_FORMATTYPES 32
+#define HIVE_SWITCH_N_SWITCH_CODE 4
+#define HIVE_SWITCH_M_CHANNELS 0x00000003
+#define HIVE_SWITCH_M_FORMATTYPES 0x0000001f
+#define HIVE_SWITCH_M_SWITCH_CODE 0x00000003
+#define HIVE_SWITCH_M_FSYNC 0x00000007
+
+#define HIVE_SWITCH_ENCODE_FSYNC(x) \
+ (1U << (((x) - 1) & HIVE_SWITCH_M_CHANNELS))
+
+#define _HIVE_INPUT_SWITCH_GET_LUT_FIELD(reg, bit_index) \
+ (((reg) >> (bit_index)) & HIVE_SWITCH_M_SWITCH_CODE)
+#define _HIVE_INPUT_SWITCH_SET_LUT_FIELD(reg, bit_index, val) \
+ (((reg) & ~(HIVE_SWITCH_M_SWITCH_CODE << (bit_index))) | (((hrt_data)(val) & HIVE_SWITCH_M_SWITCH_CODE) << (bit_index)))
+#define _HIVE_INPUT_SWITCH_GET_FSYNC_FIELD(reg, bit_index) \
+ (((reg) >> (bit_index)) & HIVE_SWITCH_M_FSYNC)
+#define _HIVE_INPUT_SWITCH_SET_FSYNC_FIELD(reg, bit_index, val) \
+ (((reg) & ~(HIVE_SWITCH_M_FSYNC << (bit_index))) | (((hrt_data)(val) & HIVE_SWITCH_M_FSYNC) << (bit_index)))
+
+typedef struct input_formatter_cfg_s input_formatter_cfg_t;
+
+/* Hardware registers */
+/*#define HIVE_IF_RESET_ADDRESS 0x000*/ /* deprecated */
+#define HIVE_IF_START_LINE_ADDRESS 0x004
+#define HIVE_IF_START_COLUMN_ADDRESS 0x008
+#define HIVE_IF_CROPPED_HEIGHT_ADDRESS 0x00C
+#define HIVE_IF_CROPPED_WIDTH_ADDRESS 0x010
+#define HIVE_IF_VERTICAL_DECIMATION_ADDRESS 0x014
+#define HIVE_IF_HORIZONTAL_DECIMATION_ADDRESS 0x018
+#define HIVE_IF_H_DEINTERLEAVING_ADDRESS 0x01C
+#define HIVE_IF_LEFTPADDING_WIDTH_ADDRESS 0x020
+#define HIVE_IF_END_OF_LINE_OFFSET_ADDRESS 0x024
+#define HIVE_IF_VMEM_START_ADDRESS_ADDRESS 0x028
+#define HIVE_IF_VMEM_END_ADDRESS_ADDRESS 0x02C
+#define HIVE_IF_VMEM_INCREMENT_ADDRESS 0x030
+#define HIVE_IF_YUV_420_FORMAT_ADDRESS 0x034
+#define HIVE_IF_VSYNCK_ACTIVE_LOW_ADDRESS 0x038
+#define HIVE_IF_HSYNCK_ACTIVE_LOW_ADDRESS 0x03C
+#define HIVE_IF_ALLOW_FIFO_OVERFLOW_ADDRESS 0x040
+#define HIVE_IF_BLOCK_FIFO_NO_REQ_ADDRESS 0x044
+#define HIVE_IF_V_DEINTERLEAVING_ADDRESS 0x048
+#define HIVE_IF_FSM_CROP_PIXEL_COUNTER 0x110
+#define HIVE_IF_FSM_CROP_LINE_COUNTER 0x10C
+#define HIVE_IF_FSM_CROP_STATUS 0x108
+
+/* Registers only for simulation */
+#define HIVE_IF_CRUN_MODE_ADDRESS 0x04C
+#define HIVE_IF_DUMP_OUTPUT_ADDRESS 0x050
+
+/* Follow the DMA syntax, "cmd" last */
+#define IF_PACK(val, cmd) ((val & 0x0fff) | (cmd /*& 0xf000*/))
+
+#define HIVE_STR2MEM_SOFT_RESET_REG_ADDRESS (_STR2MEM_SOFT_RESET_REG_ID * _STR2MEM_REG_ALIGN)
+#define HIVE_STR2MEM_INPUT_ENDIANNESS_REG_ADDRESS (_STR2MEM_INPUT_ENDIANNESS_REG_ID * _STR2MEM_REG_ALIGN)
+#define HIVE_STR2MEM_OUTPUT_ENDIANNESS_REG_ADDRESS (_STR2MEM_OUTPUT_ENDIANNESS_REG_ID * _STR2MEM_REG_ALIGN)
+#define HIVE_STR2MEM_BIT_SWAPPING_REG_ADDRESS (_STR2MEM_BIT_SWAPPING_REG_ID * _STR2MEM_REG_ALIGN)
+#define HIVE_STR2MEM_BLOCK_SYNC_LEVEL_REG_ADDRESS (_STR2MEM_BLOCK_SYNC_LEVEL_REG_ID * _STR2MEM_REG_ALIGN)
+#define HIVE_STR2MEM_PACKET_SYNC_LEVEL_REG_ADDRESS (_STR2MEM_PACKET_SYNC_LEVEL_REG_ID * _STR2MEM_REG_ALIGN)
+#define HIVE_STR2MEM_READ_POST_WRITE_SYNC_ENABLE_REG_ADDRESS (_STR2MEM_READ_POST_WRITE_SYNC_ENABLE_REG_ID * _STR2MEM_REG_ALIGN)
+#define HIVE_STR2MEM_DUAL_BYTE_INPUTS_ENABLED_REG_ADDRESS (_STR2MEM_DUAL_BYTE_INPUTS_ENABLED_REG_ID * _STR2MEM_REG_ALIGN)
+#define HIVE_STR2MEM_EN_STAT_UPDATE_ADDRESS (_STR2MEM_EN_STAT_UPDATE_ID * _STR2MEM_REG_ALIGN)
+
+/*
+ * This data structure is shared between host and SP
+ */
+struct input_formatter_cfg_s {
+ u32 start_line;
+ u32 start_column;
+ u32 left_padding;
+ u32 cropped_height;
+ u32 cropped_width;
+ u32 deinterleaving;
+ u32 buf_vecs;
+ u32 buf_start_index;
+ u32 buf_increment;
+ u32 buf_eol_offset;
+ u32 is_yuv420_format;
+ u32 block_no_reqs;
+};
+
+extern const hrt_address HIVE_IF_SRST_ADDRESS[N_INPUT_FORMATTER_ID];
+extern const hrt_data HIVE_IF_SRST_MASK[N_INPUT_FORMATTER_ID];
+extern const u8 HIVE_IF_SWITCH_CODE[N_INPUT_FORMATTER_ID];
+
+#endif /* __INPUT_FORMATTER_GLOBAL_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_common/irq_global.h b/drivers/staging/media/atomisp/pci/hive_isp_css_common/irq_global.h
new file mode 100644
index 000000000000..69e68ecd6bc3
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_common/irq_global.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __IRQ_GLOBAL_H_INCLUDED__
+#define __IRQ_GLOBAL_H_INCLUDED__
+
+#include <system_local.h>
+
+#define IS_IRQ_VERSION_2
+#define IS_IRQ_MAP_VERSION_2
+
+/* We cannot include the (hrt host ID) file defining the "CSS_RECEIVER" property without side effects */
+#ifndef HAS_NO_RX
+#include "irq_types_hrt.h"
+#endif
+
+/* The IRQ is not mapped uniformly on its related interfaces */
+#define IRQ_SW_CHANNEL_OFFSET HIVE_GP_DEV_IRQ_SW_PIN_0_BIT_ID
+
+typedef enum {
+ IRQ_SW_CHANNEL0_ID = hrt_isp_css_irq_sw_pin_0 - IRQ_SW_CHANNEL_OFFSET,
+ IRQ_SW_CHANNEL1_ID = hrt_isp_css_irq_sw_pin_1 - IRQ_SW_CHANNEL_OFFSET,
+ N_IRQ_SW_CHANNEL_ID
+} irq_sw_channel_id_t;
+
+#endif /* __IRQ_GLOBAL_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_common/isp_global.h b/drivers/staging/media/atomisp/pci/hive_isp_css_common/isp_global.h
new file mode 100644
index 000000000000..2df468bd76d6
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_common/isp_global.h
@@ -0,0 +1,91 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __ISP_GLOBAL_H_INCLUDED__
+#define __ISP_GLOBAL_H_INCLUDED__
+
+#include <system_local.h>
+
+#include "mamoiada_params.h"
+
+#define ISP_PMEM_WIDTH_LOG2 ISP_LOG2_PMEM_WIDTH
+#define ISP_PMEM_SIZE ISP_PMEM_DEPTH
+
+#define ISP_NWAY_LOG2 6
+#define ISP_VEC_NELEMS_LOG2 ISP_NWAY_LOG2
+
+#ifdef PIPE_GENERATION
+#define PIPEMEM(x) MEM(x)
+#define ISP_NWAY BIT(ISP_NWAY_LOG2)
+#else
+#define PIPEMEM(x)
+#endif
+
+/* The number of data bytes in a vector disregarding the reduced precision */
+#define ISP_VEC_BYTES (ISP_VEC_NELEMS * sizeof(uint16_t))
+
+/* ISP SC Registers */
+#define ISP_SC_REG 0x00
+#define ISP_PC_REG 0x07
+#define ISP_IRQ_READY_REG 0x00
+#define ISP_IRQ_CLEAR_REG 0x00
+
+/* ISP SC Register bits */
+#define ISP_RST_BIT 0x00
+#define ISP_START_BIT 0x01
+#define ISP_BREAK_BIT 0x02
+#define ISP_RUN_BIT 0x03
+#define ISP_BROKEN_BIT 0x04
+#define ISP_IDLE_BIT 0x05 /* READY */
+#define ISP_SLEEPING_BIT 0x06
+#define ISP_STALLING_BIT 0x07
+#define ISP_IRQ_CLEAR_BIT 0x08
+#define ISP_IRQ_READY_BIT 0x0A
+#define ISP_IRQ_SLEEPING_BIT 0x0B
+
+/* ISP Register bits */
+#define ISP_CTRL_SINK_BIT 0x00
+#define ISP_PMEM_SINK_BIT 0x01
+#define ISP_DMEM_SINK_BIT 0x02
+#define ISP_FIFO0_SINK_BIT 0x03
+#define ISP_FIFO1_SINK_BIT 0x04
+#define ISP_FIFO2_SINK_BIT 0x05
+#define ISP_FIFO3_SINK_BIT 0x06
+#define ISP_FIFO4_SINK_BIT 0x07
+#define ISP_FIFO5_SINK_BIT 0x08
+#define ISP_FIFO6_SINK_BIT 0x09
+#define ISP_VMEM_SINK_BIT 0x0A
+#define ISP_VAMEM1_SINK_BIT 0x0B
+#define ISP_VAMEM2_SINK_BIT 0x0C
+#define ISP_VAMEM3_SINK_BIT 0x0D
+#define ISP_HMEM_SINK_BIT 0x0E
+
+#define ISP_CTRL_SINK_REG 0x08
+#define ISP_PMEM_SINK_REG 0x08
+#define ISP_DMEM_SINK_REG 0x08
+#define ISP_FIFO0_SINK_REG 0x08
+#define ISP_FIFO1_SINK_REG 0x08
+#define ISP_FIFO2_SINK_REG 0x08
+#define ISP_FIFO3_SINK_REG 0x08
+#define ISP_FIFO4_SINK_REG 0x08
+#define ISP_FIFO5_SINK_REG 0x08
+#define ISP_FIFO6_SINK_REG 0x08
+#define ISP_VMEM_SINK_REG 0x08
+#define ISP_VAMEM1_SINK_REG 0x08
+#define ISP_VAMEM2_SINK_REG 0x08
+#define ISP_VAMEM3_SINK_REG 0x08
+#define ISP_HMEM_SINK_REG 0x08
+
+/* ISP2401 */
+#define BAMEM VMEM
+#define XNR3_DOWN_BAMEM_BASE_ADDRESS (0x16880)
+#define XNR3_UP_BAMEM_BASE_ADDRESS (0x12880)
+#define bmem_ldrow(fu, pid, offset, data) bmem_ldrow_s(fu, pid, offset, data)
+#define bmem_strow(fu, pid, offset, data) bmem_strow_s(fu, pid, offset, data)
+#define bmem_ldblk(fu, pid, offset, data) bmem_ldblk_s(fu, pid, offset, data)
+#define bmem_stblk(fu, pid, offset, data) bmem_stblk_s(fu, pid, offset, data)
+
+#endif /* __ISP_GLOBAL_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_common/mmu_global.h b/drivers/staging/media/atomisp/pci/hive_isp_css_common/mmu_global.h
new file mode 100644
index 000000000000..be8977f6233d
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_common/mmu_global.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __MMU_GLOBAL_H_INCLUDED__
+#define __MMU_GLOBAL_H_INCLUDED__
+
+#define IS_MMU_VERSION_2
+
+#include <mmu_defs.h>
+
+#endif /* __MMU_GLOBAL_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_common/sp_global.h b/drivers/staging/media/atomisp/pci/hive_isp_css_common/sp_global.h
new file mode 100644
index 000000000000..db72d7b29f73
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_common/sp_global.h
@@ -0,0 +1,75 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __SP_GLOBAL_H_INCLUDED__
+#define __SP_GLOBAL_H_INCLUDED__
+
+#include <system_local.h>
+
+#include <scalar_processor_2400_params.h>
+
+#define SP_PMEM_WIDTH_LOG2 SP_PMEM_LOG_WIDTH_BITS
+#define SP_PMEM_SIZE SP_PMEM_DEPTH
+
+#define SP_DMEM_SIZE 0x4000
+
+/* SP Registers */
+#define SP_PC_REG 0x09
+#define SP_SC_REG 0x00
+#define SP_START_ADDR_REG 0x01
+#define SP_ICACHE_ADDR_REG 0x05
+#define SP_IRQ_READY_REG 0x00
+#define SP_IRQ_CLEAR_REG 0x00
+#define SP_ICACHE_INV_REG 0x00
+#define SP_CTRL_SINK_REG 0x0A
+
+/* SP Register bits */
+#define SP_RST_BIT 0x00
+#define SP_START_BIT 0x01
+#define SP_BREAK_BIT 0x02
+#define SP_RUN_BIT 0x03
+#define SP_BROKEN_BIT 0x04
+#define SP_IDLE_BIT 0x05 /* READY */
+#define SP_SLEEPING_BIT 0x06
+#define SP_STALLING_BIT 0x07
+#define SP_IRQ_CLEAR_BIT 0x08
+#define SP_IRQ_READY_BIT 0x0A
+#define SP_IRQ_SLEEPING_BIT 0x0B
+
+#define SP_ICACHE_INV_BIT 0x0C
+#define SP_IPREFETCH_EN_BIT 0x0D
+
+#define SP_FIFO0_SINK_BIT 0x00
+#define SP_FIFO1_SINK_BIT 0x01
+#define SP_FIFO2_SINK_BIT 0x02
+#define SP_FIFO3_SINK_BIT 0x03
+#define SP_FIFO4_SINK_BIT 0x04
+#define SP_FIFO5_SINK_BIT 0x05
+#define SP_FIFO6_SINK_BIT 0x06
+#define SP_FIFO7_SINK_BIT 0x07
+#define SP_FIFO8_SINK_BIT 0x08
+#define SP_FIFO9_SINK_BIT 0x09
+#define SP_FIFOA_SINK_BIT 0x0A
+#define SP_DMEM_SINK_BIT 0x0B
+#define SP_CTRL_MT_SINK_BIT 0x0C
+#define SP_ICACHE_MT_SINK_BIT 0x0D
+
+#define SP_FIFO0_SINK_REG 0x0A
+#define SP_FIFO1_SINK_REG 0x0A
+#define SP_FIFO2_SINK_REG 0x0A
+#define SP_FIFO3_SINK_REG 0x0A
+#define SP_FIFO4_SINK_REG 0x0A
+#define SP_FIFO5_SINK_REG 0x0A
+#define SP_FIFO6_SINK_REG 0x0A
+#define SP_FIFO7_SINK_REG 0x0A
+#define SP_FIFO8_SINK_REG 0x0A
+#define SP_FIFO9_SINK_REG 0x0A
+#define SP_FIFOA_SINK_REG 0x0A
+#define SP_DMEM_SINK_REG 0x0A
+#define SP_CTRL_MT_SINK_REG 0x0A
+#define SP_ICACHE_MT_SINK_REG 0x0A
+
+#endif /* __SP_GLOBAL_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_common/timed_ctrl_global.h b/drivers/staging/media/atomisp/pci/hive_isp_css_common/timed_ctrl_global.h
new file mode 100644
index 000000000000..2dc0fb88399f
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_common/timed_ctrl_global.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __TIMED_CTRL_GLOBAL_H_INCLUDED__
+#define __TIMED_CTRL_GLOBAL_H_INCLUDED__
+
+#define IS_TIMED_CTRL_VERSION_1
+
+#include "timed_controller_defs.h"
+
+/**
+ * Order of the input bits for the timed controller taken from
+ * ISP_CSS_2401 System Architecture Description valid for
+ * 2400, 2401.
+ *
+ * Check for other systems.
+ */
+#define HIVE_TIMED_CTRL_GPIO_PIN_0_BIT_ID 0
+#define HIVE_TIMED_CTRL_GPIO_PIN_1_BIT_ID 1
+#define HIVE_TIMED_CTRL_GPIO_PIN_2_BIT_ID 2
+#define HIVE_TIMED_CTRL_GPIO_PIN_3_BIT_ID 3
+#define HIVE_TIMED_CTRL_GPIO_PIN_4_BIT_ID 4
+#define HIVE_TIMED_CTRL_GPIO_PIN_5_BIT_ID 5
+#define HIVE_TIMED_CTRL_GPIO_PIN_6_BIT_ID 6
+#define HIVE_TIMED_CTRL_GPIO_PIN_7_BIT_ID 7
+#define HIVE_TIMED_CTRL_GPIO_PIN_8_BIT_ID 8
+#define HIVE_TIMED_CTRL_GPIO_PIN_9_BIT_ID 9
+#define HIVE_TIMED_CTRL_GPIO_PIN_10_BIT_ID 10
+#define HIVE_TIMED_CTRL_GPIO_PIN_11_BIT_ID 11
+#define HIVE_TIMED_CTRL_IRQ_SP_BIT_ID 12
+#define HIVE_TIMED_CTRL_IRQ_ISP_BIT_ID 13
+#define HIVE_TIMED_CTRL_IRQ_INPUT_SYSTEM_BIT_ID 14
+#define HIVE_TIMED_CTRL_IRQ_INPUT_SELECTOR_BIT_ID 15
+#define HIVE_TIMED_CTRL_IRQ_IF_BLOCK_BIT_ID 16
+#define HIVE_TIMED_CTRL_IRQ_GP_TIMER_0_BIT_ID 17
+#define HIVE_TIMED_CTRL_IRQ_GP_TIMER_1_BIT_ID 18
+#define HIVE_TIMED_CTRL_CSI_SOL_BIT_ID 19
+#define HIVE_TIMED_CTRL_CSI_EOL_BIT_ID 20
+#define HIVE_TIMED_CTRL_CSI_SOF_BIT_ID 21
+#define HIVE_TIMED_CTRL_CSI_EOF_BIT_ID 22
+#define HIVE_TIMED_CTRL_IRQ_IS_STREAMING_MONITOR_BIT_ID 23
+
+#endif /* __TIMED_CTRL_GLOBAL_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_common/vamem_global.h b/drivers/staging/media/atomisp/pci/hive_isp_css_common/vamem_global.h
new file mode 100644
index 000000000000..0042925f1d2b
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_common/vamem_global.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __VAMEM_GLOBAL_H_INCLUDED__
+#define __VAMEM_GLOBAL_H_INCLUDED__
+
+#include <type_support.h>
+
+#define IS_VAMEM_VERSION_2
+
+/* (log) stepsize of linear interpolation */
+#define VAMEM_INTERP_STEP_LOG2 4
+#define VAMEM_INTERP_STEP BIT(VAMEM_INTERP_STEP_LOG2)
+/* (physical) size of the tables */
+#define VAMEM_TABLE_UNIT_SIZE ((1 << (ISP_VAMEM_ADDRESS_BITS - VAMEM_INTERP_STEP_LOG2)) + 1)
+/* (logical) size of the tables */
+#define VAMEM_TABLE_UNIT_STEP ((VAMEM_TABLE_UNIT_SIZE - 1) << 1)
+/* Number of tables */
+#define VAMEM_TABLE_UNIT_COUNT (ISP_VAMEM_DEPTH / VAMEM_TABLE_UNIT_STEP)
+
+typedef u16 vamem_data_t;
+
+#endif /* __VAMEM_GLOBAL_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_common/vmem_global.h b/drivers/staging/media/atomisp/pci/hive_isp_css_common/vmem_global.h
new file mode 100644
index 000000000000..6d8bde959b52
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_common/vmem_global.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __VMEM_GLOBAL_H_INCLUDED__
+#define __VMEM_GLOBAL_H_INCLUDED__
+
+#include "isp.h"
+
+#define VMEM_SIZE ISP_VMEM_DEPTH
+#define VMEM_ELEMBITS ISP_VMEM_ELEMBITS
+#define VMEM_ALIGN ISP_VMEM_ALIGN
+
+#ifndef PIPE_GENERATION
+typedef tvector *pvector;
+#endif
+
+#endif /* __VMEM_GLOBAL_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_defs.h b/drivers/staging/media/atomisp/pci/hive_isp_css_defs.h
new file mode 100644
index 000000000000..0e2c8ec534a1
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_defs.h
@@ -0,0 +1,403 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef _hive_isp_css_defs_h__
+#define _hive_isp_css_defs_h__
+
+#define HIVE_ISP_CTRL_DATA_WIDTH 32
+#define HIVE_ISP_CTRL_ADDRESS_WIDTH 32
+#define HIVE_ISP_CTRL_MAX_BURST_SIZE 1
+#define HIVE_ISP_DDR_ADDRESS_WIDTH 36
+
+#define HIVE_ISP_HOST_MAX_BURST_SIZE 8 /* host supports bursts in order to prevent repeating DDRAM accesses */
+#define HIVE_ISP_NUM_GPIO_PINS 12
+
+/* This list of vector num_elems/elem_bits pairs is valid both in C as initializer
+ and in the DMA parameter list */
+#define HIVE_ISP_DDR_DMA_SPECS {{32, 8}, {16, 16}, {18, 14}, {25, 10}, {21, 12}}
+#define HIVE_ISP_DDR_WORD_BITS 256
+#define HIVE_ISP_DDR_WORD_BYTES (HIVE_ISP_DDR_WORD_BITS / 8)
+#define HIVE_ISP_DDR_BYTES (512 * 1024 * 1024) /* hss only */
+#define HIVE_ISP_DDR_BYTES_RTL (127 * 1024 * 1024) /* RTL only */
+#define HIVE_ISP_DDR_SMALL_BYTES (128 * 256 / 8)
+#define HIVE_ISP_PAGE_SHIFT 12
+#define HIVE_ISP_PAGE_SIZE BIT(HIVE_ISP_PAGE_SHIFT)
+
+#define CSS_DDR_WORD_BITS HIVE_ISP_DDR_WORD_BITS
+#define CSS_DDR_WORD_BYTES HIVE_ISP_DDR_WORD_BYTES
+
+/* If HIVE_ISP_DDR_BASE_OFFSET is set to a non-zero value, the wide bus just before the DDRAM gets an extra dummy port where */
+/* address range 0 .. HIVE_ISP_DDR_BASE_OFFSET-1 maps onto. This effectively creates an offset for the DDRAM from system perspective */
+#define HIVE_ISP_DDR_BASE_OFFSET 0x120000000 /* 0x200000 */
+
+#define HIVE_DMA_ISP_BUS_CONN 0
+#define HIVE_DMA_ISP_DDR_CONN 1
+#define HIVE_DMA_BUS_DDR_CONN 2
+#define HIVE_DMA_ISP_MASTER master_port0
+#define HIVE_DMA_BUS_MASTER master_port1
+#define HIVE_DMA_DDR_MASTER master_port2
+
+#define HIVE_DMA_NUM_CHANNELS 32 /* old value was 8 */
+#define HIVE_DMA_CMD_FIFO_DEPTH 24 /* old value was 12 */
+
+#define HIVE_IF_PIXEL_WIDTH 12
+
+#define HIVE_MMU_TLB_SETS 8
+#define HIVE_MMU_TLB_SET_BLOCKS 8
+#define HIVE_MMU_TLB_BLOCK_ELEMENTS 8
+#define HIVE_MMU_PAGE_TABLE_LEVELS 2
+#define HIVE_MMU_PAGE_BYTES HIVE_ISP_PAGE_SIZE
+
+#define HIVE_ISP_CH_ID_BITS 2
+#define HIVE_ISP_FMT_TYPE_BITS 5
+#define HIVE_ISP_ISEL_SEL_BITS 2
+
+#define HIVE_GP_REGS_SDRAM_WAKEUP_IDX 0
+#define HIVE_GP_REGS_IDLE_IDX 1
+#define HIVE_GP_REGS_IRQ_0_IDX 2
+#define HIVE_GP_REGS_IRQ_1_IDX 3
+#define HIVE_GP_REGS_SP_STREAM_STAT_IDX 4
+#define HIVE_GP_REGS_SP_STREAM_STAT_B_IDX 5
+#define HIVE_GP_REGS_ISP_STREAM_STAT_IDX 6
+#define HIVE_GP_REGS_MOD_STREAM_STAT_IDX 7
+#define HIVE_GP_REGS_SP_STREAM_STAT_IRQ_COND_IDX 8
+#define HIVE_GP_REGS_SP_STREAM_STAT_B_IRQ_COND_IDX 9
+#define HIVE_GP_REGS_ISP_STREAM_STAT_IRQ_COND_IDX 10
+#define HIVE_GP_REGS_MOD_STREAM_STAT_IRQ_COND_IDX 11
+#define HIVE_GP_REGS_SP_STREAM_STAT_IRQ_ENABLE_IDX 12
+#define HIVE_GP_REGS_SP_STREAM_STAT_B_IRQ_ENABLE_IDX 13
+#define HIVE_GP_REGS_ISP_STREAM_STAT_IRQ_ENABLE_IDX 14
+#define HIVE_GP_REGS_MOD_STREAM_STAT_IRQ_ENABLE_IDX 15
+#define HIVE_GP_REGS_SWITCH_PRIM_IF_IDX 16
+#define HIVE_GP_REGS_SWITCH_GDC1_IDX 17
+#define HIVE_GP_REGS_SWITCH_GDC2_IDX 18
+#define HIVE_GP_REGS_SRST_IDX 19
+#define HIVE_GP_REGS_SLV_REG_SRST_IDX 20
+
+/* Bit numbers of the soft reset register */
+#define HIVE_GP_REGS_SRST_ISYS_CBUS 0
+#define HIVE_GP_REGS_SRST_ISEL_CBUS 1
+#define HIVE_GP_REGS_SRST_IFMT_CBUS 2
+#define HIVE_GP_REGS_SRST_GPDEV_CBUS 3
+#define HIVE_GP_REGS_SRST_GPIO 4
+#define HIVE_GP_REGS_SRST_TC 5
+#define HIVE_GP_REGS_SRST_GPTIMER 6
+#define HIVE_GP_REGS_SRST_FACELLFIFOS 7
+#define HIVE_GP_REGS_SRST_D_OSYS 8
+#define HIVE_GP_REGS_SRST_IFT_SEC_PIPE 9
+#define HIVE_GP_REGS_SRST_GDC1 10
+#define HIVE_GP_REGS_SRST_GDC2 11
+#define HIVE_GP_REGS_SRST_VEC_BUS 12
+#define HIVE_GP_REGS_SRST_ISP 13
+#define HIVE_GP_REGS_SRST_SLV_GRP_BUS 14
+#define HIVE_GP_REGS_SRST_DMA 15
+#define HIVE_GP_REGS_SRST_SF_ISP_SP 16
+#define HIVE_GP_REGS_SRST_SF_PIF_CELLS 17
+#define HIVE_GP_REGS_SRST_SF_SIF_SP 18
+#define HIVE_GP_REGS_SRST_SF_MC_SP 19
+#define HIVE_GP_REGS_SRST_SF_ISYS_SP 20
+#define HIVE_GP_REGS_SRST_SF_DMA_CELLS 21
+#define HIVE_GP_REGS_SRST_SF_GDC1_CELLS 22
+#define HIVE_GP_REGS_SRST_SF_GDC2_CELLS 23
+#define HIVE_GP_REGS_SRST_SP 24
+#define HIVE_GP_REGS_SRST_OCP2CIO 25
+#define HIVE_GP_REGS_SRST_NBUS 26
+#define HIVE_GP_REGS_SRST_HOST12BUS 27
+#define HIVE_GP_REGS_SRST_WBUS 28
+#define HIVE_GP_REGS_SRST_IC_OSYS 29
+#define HIVE_GP_REGS_SRST_WBUS_IC 30
+
+/* Bit numbers of the slave register soft reset register */
+#define HIVE_GP_REGS_SLV_REG_SRST_DMA 0
+#define HIVE_GP_REGS_SLV_REG_SRST_GDC1 1
+#define HIVE_GP_REGS_SLV_REG_SRST_GDC2 2
+
+/* order of the input bits for the irq controller */
+#define HIVE_GP_DEV_IRQ_GPIO_PIN_0_BIT_ID 0
+#define HIVE_GP_DEV_IRQ_GPIO_PIN_1_BIT_ID 1
+#define HIVE_GP_DEV_IRQ_GPIO_PIN_2_BIT_ID 2
+#define HIVE_GP_DEV_IRQ_GPIO_PIN_3_BIT_ID 3
+#define HIVE_GP_DEV_IRQ_GPIO_PIN_4_BIT_ID 4
+#define HIVE_GP_DEV_IRQ_GPIO_PIN_5_BIT_ID 5
+#define HIVE_GP_DEV_IRQ_GPIO_PIN_6_BIT_ID 6
+#define HIVE_GP_DEV_IRQ_GPIO_PIN_7_BIT_ID 7
+#define HIVE_GP_DEV_IRQ_GPIO_PIN_8_BIT_ID 8
+#define HIVE_GP_DEV_IRQ_GPIO_PIN_9_BIT_ID 9
+#define HIVE_GP_DEV_IRQ_GPIO_PIN_10_BIT_ID 10
+#define HIVE_GP_DEV_IRQ_GPIO_PIN_11_BIT_ID 11
+#define HIVE_GP_DEV_IRQ_SP_BIT_ID 12
+#define HIVE_GP_DEV_IRQ_ISP_BIT_ID 13
+#define HIVE_GP_DEV_IRQ_ISYS_BIT_ID 14
+#define HIVE_GP_DEV_IRQ_ISEL_BIT_ID 15
+#define HIVE_GP_DEV_IRQ_IFMT_BIT_ID 16
+#define HIVE_GP_DEV_IRQ_SP_STREAM_MON_BIT_ID 17
+#define HIVE_GP_DEV_IRQ_ISP_STREAM_MON_BIT_ID 18
+#define HIVE_GP_DEV_IRQ_MOD_STREAM_MON_BIT_ID 19
+#define HIVE_GP_DEV_IRQ_ISP_PMEM_ERROR_BIT_ID 20
+#define HIVE_GP_DEV_IRQ_ISP_BAMEM_ERROR_BIT_ID 21
+#define HIVE_GP_DEV_IRQ_ISP_DMEM_ERROR_BIT_ID 22
+#define HIVE_GP_DEV_IRQ_SP_ICACHE_MEM_ERROR_BIT_ID 23
+#define HIVE_GP_DEV_IRQ_SP_DMEM_ERROR_BIT_ID 24
+#define HIVE_GP_DEV_IRQ_MMU_CACHE_MEM_ERROR_BIT_ID 25
+#define HIVE_GP_DEV_IRQ_GP_TIMER_0_BIT_ID 26
+#define HIVE_GP_DEV_IRQ_GP_TIMER_1_BIT_ID 27
+#define HIVE_GP_DEV_IRQ_SW_PIN_0_BIT_ID 28
+#define HIVE_GP_DEV_IRQ_SW_PIN_1_BIT_ID 29
+#define HIVE_GP_DEV_IRQ_DMA_BIT_ID 30
+#define HIVE_GP_DEV_IRQ_SP_STREAM_MON_B_BIT_ID 31
+
+#define HIVE_GP_REGS_NUM_SW_IRQ_REGS 2
+
+/* order of the input bits for the timed controller */
+#define HIVE_GP_DEV_TC_GPIO_PIN_0_BIT_ID 0
+#define HIVE_GP_DEV_TC_GPIO_PIN_1_BIT_ID 1
+#define HIVE_GP_DEV_TC_GPIO_PIN_2_BIT_ID 2
+#define HIVE_GP_DEV_TC_GPIO_PIN_3_BIT_ID 3
+#define HIVE_GP_DEV_TC_GPIO_PIN_4_BIT_ID 4
+#define HIVE_GP_DEV_TC_GPIO_PIN_5_BIT_ID 5
+#define HIVE_GP_DEV_TC_GPIO_PIN_6_BIT_ID 6
+#define HIVE_GP_DEV_TC_GPIO_PIN_7_BIT_ID 7
+#define HIVE_GP_DEV_TC_GPIO_PIN_8_BIT_ID 8
+#define HIVE_GP_DEV_TC_GPIO_PIN_9_BIT_ID 9
+#define HIVE_GP_DEV_TC_GPIO_PIN_10_BIT_ID 10
+#define HIVE_GP_DEV_TC_GPIO_PIN_11_BIT_ID 11
+#define HIVE_GP_DEV_TC_SP_BIT_ID 12
+#define HIVE_GP_DEV_TC_ISP_BIT_ID 13
+#define HIVE_GP_DEV_TC_ISYS_BIT_ID 14
+#define HIVE_GP_DEV_TC_ISEL_BIT_ID 15
+#define HIVE_GP_DEV_TC_IFMT_BIT_ID 16
+#define HIVE_GP_DEV_TC_GP_TIMER_0_BIT_ID 17
+#define HIVE_GP_DEV_TC_GP_TIMER_1_BIT_ID 18
+#define HIVE_GP_DEV_TC_MIPI_SOL_BIT_ID 19
+#define HIVE_GP_DEV_TC_MIPI_EOL_BIT_ID 20
+#define HIVE_GP_DEV_TC_MIPI_SOF_BIT_ID 21
+#define HIVE_GP_DEV_TC_MIPI_EOF_BIT_ID 22
+#define HIVE_GP_DEV_TC_INPSYS_SM 23
+
+/* definitions for the gp_timer block */
+#define HIVE_GP_TIMER_0 0
+#define HIVE_GP_TIMER_1 1
+#define HIVE_GP_TIMER_2 2
+#define HIVE_GP_TIMER_3 3
+#define HIVE_GP_TIMER_4 4
+#define HIVE_GP_TIMER_5 5
+#define HIVE_GP_TIMER_6 6
+#define HIVE_GP_TIMER_7 7
+#define HIVE_GP_TIMER_NUM_COUNTERS 8
+
+#define HIVE_GP_TIMER_IRQ_0 0
+#define HIVE_GP_TIMER_IRQ_1 1
+#define HIVE_GP_TIMER_NUM_IRQS 2
+
+#define HIVE_GP_TIMER_GPIO_0_BIT_ID 0
+#define HIVE_GP_TIMER_GPIO_1_BIT_ID 1
+#define HIVE_GP_TIMER_GPIO_2_BIT_ID 2
+#define HIVE_GP_TIMER_GPIO_3_BIT_ID 3
+#define HIVE_GP_TIMER_GPIO_4_BIT_ID 4
+#define HIVE_GP_TIMER_GPIO_5_BIT_ID 5
+#define HIVE_GP_TIMER_GPIO_6_BIT_ID 6
+#define HIVE_GP_TIMER_GPIO_7_BIT_ID 7
+#define HIVE_GP_TIMER_GPIO_8_BIT_ID 8
+#define HIVE_GP_TIMER_GPIO_9_BIT_ID 9
+#define HIVE_GP_TIMER_GPIO_10_BIT_ID 10
+#define HIVE_GP_TIMER_GPIO_11_BIT_ID 11
+#define HIVE_GP_TIMER_INP_SYS_IRQ 12
+#define HIVE_GP_TIMER_ISEL_IRQ 13
+#define HIVE_GP_TIMER_IFMT_IRQ 14
+#define HIVE_GP_TIMER_SP_STRMON_IRQ 15
+#define HIVE_GP_TIMER_SP_B_STRMON_IRQ 16
+#define HIVE_GP_TIMER_ISP_STRMON_IRQ 17
+#define HIVE_GP_TIMER_MOD_STRMON_IRQ 18
+#define HIVE_GP_TIMER_ISP_BAMEM_ERROR_IRQ 20
+#define HIVE_GP_TIMER_ISP_DMEM_ERROR_IRQ 21
+#define HIVE_GP_TIMER_SP_ICACHE_MEM_ERROR_IRQ 22
+#define HIVE_GP_TIMER_SP_DMEM_ERROR_IRQ 23
+#define HIVE_GP_TIMER_SP_OUT_RUN_DP 24
+#define HIVE_GP_TIMER_SP_WIRE_DEBUG_LM_MSINK_RUN_I0_I0 25
+#define HIVE_GP_TIMER_SP_WIRE_DEBUG_LM_MSINK_RUN_I0_I1 26
+#define HIVE_GP_TIMER_SP_WIRE_DEBUG_LM_MSINK_RUN_I0_I2 27
+#define HIVE_GP_TIMER_SP_WIRE_DEBUG_LM_MSINK_RUN_I0_I3 28
+#define HIVE_GP_TIMER_SP_WIRE_DEBUG_LM_MSINK_RUN_I0_I4 29
+#define HIVE_GP_TIMER_SP_WIRE_DEBUG_LM_MSINK_RUN_I0_I5 30
+#define HIVE_GP_TIMER_SP_WIRE_DEBUG_LM_MSINK_RUN_I0_I6 31
+#define HIVE_GP_TIMER_SP_WIRE_DEBUG_LM_MSINK_RUN_I0_I7 32
+#define HIVE_GP_TIMER_SP_WIRE_DEBUG_LM_MSINK_RUN_I0_I8 33
+#define HIVE_GP_TIMER_SP_WIRE_DEBUG_LM_MSINK_RUN_I0_I9 34
+#define HIVE_GP_TIMER_SP_WIRE_DEBUG_LM_MSINK_RUN_I0_I10 35
+#define HIVE_GP_TIMER_SP_WIRE_DEBUG_LM_MSINK_RUN_I1_I0 36
+#define HIVE_GP_TIMER_SP_WIRE_DEBUG_LM_MSINK_RUN_I2_I0 37
+#define HIVE_GP_TIMER_SP_WIRE_DEBUG_LM_MSINK_RUN_I3_I0 38
+#define HIVE_GP_TIMER_ISP_OUT_RUN_DP 39
+#define HIVE_GP_TIMER_ISP_WIRE_DEBUG_LM_MSINK_RUN_I0_I0 40
+#define HIVE_GP_TIMER_ISP_WIRE_DEBUG_LM_MSINK_RUN_I0_I1 41
+#define HIVE_GP_TIMER_ISP_WIRE_DEBUG_LM_MSINK_RUN_I1_I0 42
+#define HIVE_GP_TIMER_ISP_WIRE_DEBUG_LM_MSINK_RUN_I2_I0 43
+#define HIVE_GP_TIMER_ISP_WIRE_DEBUG_LM_MSINK_RUN_I2_I1 44
+#define HIVE_GP_TIMER_ISP_WIRE_DEBUG_LM_MSINK_RUN_I2_I2 45
+#define HIVE_GP_TIMER_ISP_WIRE_DEBUG_LM_MSINK_RUN_I2_I3 46
+#define HIVE_GP_TIMER_ISP_WIRE_DEBUG_LM_MSINK_RUN_I2_I4 47
+#define HIVE_GP_TIMER_ISP_WIRE_DEBUG_LM_MSINK_RUN_I2_I5 48
+#define HIVE_GP_TIMER_ISP_WIRE_DEBUG_LM_MSINK_RUN_I2_I6 49
+#define HIVE_GP_TIMER_ISP_WIRE_DEBUG_LM_MSINK_RUN_I3_I0 50
+#define HIVE_GP_TIMER_ISP_WIRE_DEBUG_LM_MSINK_RUN_I4_I0 51
+#define HIVE_GP_TIMER_ISP_WIRE_DEBUG_LM_MSINK_RUN_I5_I0 52
+#define HIVE_GP_TIMER_ISP_WIRE_DEBUG_LM_MSINK_RUN_I6_I0 53
+#define HIVE_GP_TIMER_ISP_WIRE_DEBUG_LM_MSINK_RUN_I7_I0 54
+#define HIVE_GP_TIMER_MIPI_SOL_BIT_ID 55
+#define HIVE_GP_TIMER_MIPI_EOL_BIT_ID 56
+#define HIVE_GP_TIMER_MIPI_SOF_BIT_ID 57
+#define HIVE_GP_TIMER_MIPI_EOF_BIT_ID 58
+#define HIVE_GP_TIMER_INPSYS_SM 59
+
+/* port definitions for the streaming monitors */
+/* port definititions SP streaming monitor, monitors the status of streaming ports at the SP side of the streaming FIFO's */
+#define SP_STR_MON_PORT_SP2SIF 0
+#define SP_STR_MON_PORT_SIF2SP 1
+#define SP_STR_MON_PORT_SP2MC 2
+#define SP_STR_MON_PORT_MC2SP 3
+#define SP_STR_MON_PORT_SP2DMA 4
+#define SP_STR_MON_PORT_DMA2SP 5
+#define SP_STR_MON_PORT_SP2ISP 6
+#define SP_STR_MON_PORT_ISP2SP 7
+#define SP_STR_MON_PORT_SP2GPD 8
+#define SP_STR_MON_PORT_FA2SP 9
+#define SP_STR_MON_PORT_SP2ISYS 10
+#define SP_STR_MON_PORT_ISYS2SP 11
+#define SP_STR_MON_PORT_SP2PIFA 12
+#define SP_STR_MON_PORT_PIFA2SP 13
+#define SP_STR_MON_PORT_SP2PIFB 14
+#define SP_STR_MON_PORT_PIFB2SP 15
+
+#define SP_STR_MON_PORT_B_SP2GDC1 0
+#define SP_STR_MON_PORT_B_GDC12SP 1
+#define SP_STR_MON_PORT_B_SP2GDC2 2
+#define SP_STR_MON_PORT_B_GDC22SP 3
+
+/* previously used SP streaming monitor port identifiers, kept for backward compatibility */
+#define SP_STR_MON_PORT_SND_SIF SP_STR_MON_PORT_SP2SIF
+#define SP_STR_MON_PORT_RCV_SIF SP_STR_MON_PORT_SIF2SP
+#define SP_STR_MON_PORT_SND_MC SP_STR_MON_PORT_SP2MC
+#define SP_STR_MON_PORT_RCV_MC SP_STR_MON_PORT_MC2SP
+#define SP_STR_MON_PORT_SND_DMA SP_STR_MON_PORT_SP2DMA
+#define SP_STR_MON_PORT_RCV_DMA SP_STR_MON_PORT_DMA2SP
+#define SP_STR_MON_PORT_SND_ISP SP_STR_MON_PORT_SP2ISP
+#define SP_STR_MON_PORT_RCV_ISP SP_STR_MON_PORT_ISP2SP
+#define SP_STR_MON_PORT_SND_GPD SP_STR_MON_PORT_SP2GPD
+#define SP_STR_MON_PORT_RCV_GPD SP_STR_MON_PORT_FA2SP
+/* Deprecated */
+#define SP_STR_MON_PORT_SND_PIF SP_STR_MON_PORT_SP2PIFA
+#define SP_STR_MON_PORT_RCV_PIF SP_STR_MON_PORT_PIFA2SP
+#define SP_STR_MON_PORT_SND_PIFB SP_STR_MON_PORT_SP2PIFB
+#define SP_STR_MON_PORT_RCV_PIFB SP_STR_MON_PORT_PIFB2SP
+
+#define SP_STR_MON_PORT_SND_PIF_A SP_STR_MON_PORT_SP2PIFA
+#define SP_STR_MON_PORT_RCV_PIF_A SP_STR_MON_PORT_PIFA2SP
+#define SP_STR_MON_PORT_SND_PIF_B SP_STR_MON_PORT_SP2PIFB
+#define SP_STR_MON_PORT_RCV_PIF_B SP_STR_MON_PORT_PIFB2SP
+
+/* port definititions ISP streaming monitor, monitors the status of streaming ports at the ISP side of the streaming FIFO's */
+#define ISP_STR_MON_PORT_ISP2PIFA 0
+#define ISP_STR_MON_PORT_PIFA2ISP 1
+#define ISP_STR_MON_PORT_ISP2PIFB 2
+#define ISP_STR_MON_PORT_PIFB2ISP 3
+#define ISP_STR_MON_PORT_ISP2DMA 4
+#define ISP_STR_MON_PORT_DMA2ISP 5
+#define ISP_STR_MON_PORT_ISP2GDC1 6
+#define ISP_STR_MON_PORT_GDC12ISP 7
+#define ISP_STR_MON_PORT_ISP2GDC2 8
+#define ISP_STR_MON_PORT_GDC22ISP 9
+#define ISP_STR_MON_PORT_ISP2GPD 10
+#define ISP_STR_MON_PORT_FA2ISP 11
+#define ISP_STR_MON_PORT_ISP2SP 12
+#define ISP_STR_MON_PORT_SP2ISP 13
+
+/* previously used ISP streaming monitor port identifiers, kept for backward compatibility */
+#define ISP_STR_MON_PORT_SND_PIF_A ISP_STR_MON_PORT_ISP2PIFA
+#define ISP_STR_MON_PORT_RCV_PIF_A ISP_STR_MON_PORT_PIFA2ISP
+#define ISP_STR_MON_PORT_SND_PIF_B ISP_STR_MON_PORT_ISP2PIFB
+#define ISP_STR_MON_PORT_RCV_PIF_B ISP_STR_MON_PORT_PIFB2ISP
+#define ISP_STR_MON_PORT_SND_DMA ISP_STR_MON_PORT_ISP2DMA
+#define ISP_STR_MON_PORT_RCV_DMA ISP_STR_MON_PORT_DMA2ISP
+#define ISP_STR_MON_PORT_SND_GDC ISP_STR_MON_PORT_ISP2GDC1
+#define ISP_STR_MON_PORT_RCV_GDC ISP_STR_MON_PORT_GDC12ISP
+#define ISP_STR_MON_PORT_SND_GPD ISP_STR_MON_PORT_ISP2GPD
+#define ISP_STR_MON_PORT_RCV_GPD ISP_STR_MON_PORT_FA2ISP
+#define ISP_STR_MON_PORT_SND_SP ISP_STR_MON_PORT_ISP2SP
+#define ISP_STR_MON_PORT_RCV_SP ISP_STR_MON_PORT_SP2ISP
+
+/* port definititions MOD streaming monitor, monitors the status of streaming ports at the module side of the streaming FIFO's */
+
+#define MOD_STR_MON_PORT_PIFA2CELLS 0
+#define MOD_STR_MON_PORT_CELLS2PIFA 1
+#define MOD_STR_MON_PORT_PIFB2CELLS 2
+#define MOD_STR_MON_PORT_CELLS2PIFB 3
+#define MOD_STR_MON_PORT_SIF2SP 4
+#define MOD_STR_MON_PORT_SP2SIF 5
+#define MOD_STR_MON_PORT_MC2SP 6
+#define MOD_STR_MON_PORT_SP2MC 7
+#define MOD_STR_MON_PORT_DMA2ISP 8
+#define MOD_STR_MON_PORT_ISP2DMA 9
+#define MOD_STR_MON_PORT_DMA2SP 10
+#define MOD_STR_MON_PORT_SP2DMA 11
+#define MOD_STR_MON_PORT_GDC12CELLS 12
+#define MOD_STR_MON_PORT_CELLS2GDC1 13
+#define MOD_STR_MON_PORT_GDC22CELLS 14
+#define MOD_STR_MON_PORT_CELLS2GDC2 15
+
+#define MOD_STR_MON_PORT_SND_PIF_A 0
+#define MOD_STR_MON_PORT_RCV_PIF_A 1
+#define MOD_STR_MON_PORT_SND_PIF_B 2
+#define MOD_STR_MON_PORT_RCV_PIF_B 3
+#define MOD_STR_MON_PORT_SND_SIF 4
+#define MOD_STR_MON_PORT_RCV_SIF 5
+#define MOD_STR_MON_PORT_SND_MC 6
+#define MOD_STR_MON_PORT_RCV_MC 7
+#define MOD_STR_MON_PORT_SND_DMA2ISP 8
+#define MOD_STR_MON_PORT_RCV_DMA_FR_ISP 9
+#define MOD_STR_MON_PORT_SND_DMA2SP 10
+#define MOD_STR_MON_PORT_RCV_DMA_FR_SP 11
+#define MOD_STR_MON_PORT_SND_GDC 12
+#define MOD_STR_MON_PORT_RCV_GDC 13
+
+/* testbench signals: */
+
+/* testbench GP adapter register ids */
+#define HIVE_TESTBENCH_GPIO_DATA_OUT_REG_IDX 0
+#define HIVE_TESTBENCH_GPIO_DIR_OUT_REG_IDX 1
+#define HIVE_TESTBENCH_IRQ_REG_IDX 2
+#define HIVE_TESTBENCH_SDRAM_WAKEUP_REG_IDX 3
+#define HIVE_TESTBENCH_IDLE_REG_IDX 4
+#define HIVE_TESTBENCH_GPIO_DATA_IN_REG_IDX 5
+#define HIVE_TESTBENCH_MIPI_BFM_EN_REG_IDX 6
+#define HIVE_TESTBENCH_CSI_CONFIG_REG_IDX 7
+#define HIVE_TESTBENCH_DDR_STALL_EN_REG_IDX 8
+
+#define HIVE_TESTBENCH_ISP_PMEM_ERROR_IRQ_REG_IDX 9
+#define HIVE_TESTBENCH_ISP_BAMEM_ERROR_IRQ_REG_IDX 10
+#define HIVE_TESTBENCH_ISP_DMEM_ERROR_IRQ_REG_IDX 11
+#define HIVE_TESTBENCH_SP_ICACHE_MEM_ERROR_IRQ_REG_IDX 12
+#define HIVE_TESTBENCH_SP_DMEM_ERROR_IRQ_REG_IDX 13
+
+/* Signal monitor input bit ids */
+#define HIVE_TESTBENCH_SIG_MON_GPIO_PIN_O_BIT_ID 0
+#define HIVE_TESTBENCH_SIG_MON_GPIO_PIN_1_BIT_ID 1
+#define HIVE_TESTBENCH_SIG_MON_GPIO_PIN_2_BIT_ID 2
+#define HIVE_TESTBENCH_SIG_MON_GPIO_PIN_3_BIT_ID 3
+#define HIVE_TESTBENCH_SIG_MON_GPIO_PIN_4_BIT_ID 4
+#define HIVE_TESTBENCH_SIG_MON_GPIO_PIN_5_BIT_ID 5
+#define HIVE_TESTBENCH_SIG_MON_GPIO_PIN_6_BIT_ID 6
+#define HIVE_TESTBENCH_SIG_MON_GPIO_PIN_7_BIT_ID 7
+#define HIVE_TESTBENCH_SIG_MON_GPIO_PIN_8_BIT_ID 8
+#define HIVE_TESTBENCH_SIG_MON_GPIO_PIN_9_BIT_ID 9
+#define HIVE_TESTBENCH_SIG_MON_GPIO_PIN_10_BIT_ID 10
+#define HIVE_TESTBENCH_SIG_MON_GPIO_PIN_11_BIT_ID 11
+#define HIVE_TESTBENCH_SIG_MON_IRQ_PIN_BIT_ID 12
+#define HIVE_TESTBENCH_SIG_MON_SDRAM_WAKEUP_PIN_BIT_ID 13
+#define HIVE_TESTBENCH_SIG_MON_IDLE_PIN_BIT_ID 14
+
+#define ISP2400_DEBUG_NETWORK 1
+
+#endif /* _hive_isp_css_defs_h__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_include/assert_support.h b/drivers/staging/media/atomisp/pci/hive_isp_css_include/assert_support.h
new file mode 100644
index 000000000000..4010976eb4f3
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_include/assert_support.h
@@ -0,0 +1,44 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __ASSERT_SUPPORT_H_INCLUDED__
+#define __ASSERT_SUPPORT_H_INCLUDED__
+
+/* Compile time assertion */
+#ifndef CT_ASSERT
+#define CT_ASSERT(cnd) ((void)sizeof(char[(cnd) ? 1 : -1]))
+#endif /* CT_ASSERT */
+
+#include <linux/bug.h>
+
+/* TODO: it would be cleaner to use this:
+ * #define assert(cnd) BUG_ON(cnd)
+ * but that causes many compiler warnings (==errors) under Android
+ * because it seems that the BUG_ON() macro is not seen as a check by
+ * gcc like the BUG() macro is.
+ */
+#define assert(cnd) \
+ do { \
+ if (!(cnd)) \
+ BUG(); \
+ } while (0)
+
+#ifndef PIPE_GENERATION
+/* Deprecated OP___assert, this is still used in ~1000 places
+ * in the code. This will be removed over time.
+ * The implementation for the pipe generation tool is in see support.isp.h
+ */
+#define OP___assert(cnd) assert(cnd)
+
+static inline void compile_time_assert(unsigned int cond)
+{
+ /* Call undefined function if cond is false */
+ void _compile_time_assert(void);
+ if (!cond) _compile_time_assert();
+}
+#endif /* PIPE_GENERATION */
+
+#endif /* __ASSERT_SUPPORT_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_include/bitop_support.h b/drivers/staging/media/atomisp/pci/hive_isp_css_include/bitop_support.h
new file mode 100644
index 000000000000..fda1adf6c601
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_include/bitop_support.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __BITOP_SUPPORT_H_INCLUDED__
+#define __BITOP_SUPPORT_H_INCLUDED__
+
+#define bitop_setbit(a, b) ((a) |= (1UL << (b)))
+
+#define bitop_getbit(a, b) (((a) & (1UL << (b))) != 0)
+
+#define bitop_clearbit(a, b) ((a) &= ~(1UL << (b)))
+
+#endif /* __BITOP_SUPPORT_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_include/csi_rx.h b/drivers/staging/media/atomisp/pci/hive_isp_css_include/csi_rx.h
new file mode 100644
index 000000000000..8fd1b846d827
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_include/csi_rx.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __CSI_RX_H_INCLUDED__
+#define __CSI_RX_H_INCLUDED__
+
+/*
+ * This file is included on every cell {SP,ISP,host} and on every system
+ * that uses the input system device(s). It defines the API to DLI bridge
+ *
+ * System and cell specific interfaces and inline code are included
+ * conditionally through Makefile path settings.
+ *
+ * - system and cell agnostic interfaces, constants and identifiers
+ * - public: system agnostic, cell specific interfaces
+ * - private: system dependent, cell specific interfaces &
+ * inline implementations
+ * - global: system specific constants and identifiers
+ * - local: system and cell specific constants and identifiers
+ */
+
+#include "system_local.h"
+#include "csi_rx_local.h"
+
+#ifndef __INLINE_CSI_RX__
+#include "csi_rx_public.h"
+#else /* __INLINE_CSI_RX__ */
+#include "csi_rx_private.h"
+#endif /* __INLINE_CSI_RX__ */
+
+#endif /* __CSI_RX_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_include/debug.h b/drivers/staging/media/atomisp/pci/hive_isp_css_include/debug.h
new file mode 100644
index 000000000000..fc6de151ac64
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_include/debug.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __DEBUG_H_INCLUDED__
+#define __DEBUG_H_INCLUDED__
+
+/*
+ * This file is included on every cell {SP,ISP,host} and on every system
+ * that uses the DMA device. It defines the API to DLI bridge
+ *
+ * System and cell specific interfaces and inline code are included
+ * conditionally through Makefile path settings.
+ *
+ * - . system and cell agnostic interfaces, constants and identifiers
+ * - public: system agnostic, cell specific interfaces
+ * - private: system dependent, cell specific interfaces & inline implementations
+ * - global: system specific constants and identifiers
+ * - local: system and cell specific constants and identifiers
+ *
+ */
+
+#include "system_local.h"
+#include "debug_local.h"
+
+#ifndef __INLINE_DEBUG__
+#define STORAGE_CLASS_DEBUG_H extern
+#define STORAGE_CLASS_DEBUG_C
+#include "debug_public.h"
+#else /* __INLINE_DEBUG__ */
+#define STORAGE_CLASS_DEBUG_H static inline
+#define STORAGE_CLASS_DEBUG_C static inline
+#include "debug_private.h"
+#endif /* __INLINE_DEBUG__ */
+
+#endif /* __DEBUG_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_include/device_access/device_access.h b/drivers/staging/media/atomisp/pci/hive_isp_css_include/device_access/device_access.h
new file mode 100644
index 000000000000..ca33a1e03d8d
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_include/device_access/device_access.h
@@ -0,0 +1,170 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/**
+Support for Intel Camera Imaging ISP subsystem.
+Copyright (c) 2010 - 2015, Intel Corporation.
+
+*/
+
+#ifndef __DEVICE_ACCESS_H_INCLUDED__
+#define __DEVICE_ACCESS_H_INCLUDED__
+
+/*!
+ * \brief
+ * Define the public interface for physical system
+ * access functions to SRAM and registers. Access
+ * types are limited to those defined in <stdint.h>
+ * All accesses are aligned
+ *
+ * The address representation is private to the system
+ * and represented as/stored in "hrt_address".
+ *
+ * The system global address can differ by an offset;
+ * The device base address. This offset must be added
+ * by the implementation of the access function
+ *
+ * "store" is a transfer to the device
+ * "load" is a transfer from the device
+ */
+
+#include <type_support.h>
+
+/*
+ * User provided file that defines the system address types:
+ * - hrt_address a type that can hold the (sub)system address range
+ */
+#include "system_local.h"
+/*
+ * We cannot assume that the global system address size is the size of
+ * a pointer because a (say) 64-bit host can be simulated in a 32-bit
+ * environment. Only if the host environment is modelled as on the target
+ * we could use a pointer. Even then, prototyping may need to be done
+ * before the target environment is available. AS we cannot wait for that
+ * we are stuck with integer addresses
+ */
+
+/*typedef char *sys_address;*/
+typedef hrt_address sys_address;
+
+/*! Set the (sub)system base address
+
+ \param base_addr[in] The offset on which the (sub)system is located
+ in the global address map
+
+ \return none,
+ */
+void device_set_base_address(
+ const sys_address base_addr);
+
+/*! Get the (sub)system base address
+
+ \return base_address,
+ */
+sys_address device_get_base_address(void);
+
+/*! Read an 8-bit value from a device register or memory in the device
+
+ \param addr[in] Local address
+
+ \return device[addr]
+ */
+uint8_t ia_css_device_load_uint8(
+ const hrt_address addr);
+
+/*! Read a 16-bit value from a device register or memory in the device
+
+ \param addr[in] Local address
+
+ \return device[addr]
+ */
+uint16_t ia_css_device_load_uint16(
+ const hrt_address addr);
+
+/*! Read a 32-bit value from a device register or memory in the device
+
+ \param addr[in] Local address
+
+ \return device[addr]
+ */
+uint32_t ia_css_device_load_uint32(
+ const hrt_address addr);
+
+/*! Read a 64-bit value from a device register or memory in the device
+
+ \param addr[in] Local address
+
+ \return device[addr]
+ */
+uint64_t ia_css_device_load_uint64(
+ const hrt_address addr);
+
+/*! Write an 8-bit value to a device register or memory in the device
+
+ \param addr[in] Local address
+ \param data[in] value
+
+ \return none, device[addr] = value
+ */
+void ia_css_device_store_uint8(
+ const hrt_address addr,
+ const uint8_t data);
+
+/*! Write a 16-bit value to a device register or memory in the device
+
+ \param addr[in] Local address
+ \param data[in] value
+
+ \return none, device[addr] = value
+ */
+void ia_css_device_store_uint16(
+ const hrt_address addr,
+ const uint16_t data);
+
+/*! Write a 32-bit value to a device register or memory in the device
+
+ \param addr[in] Local address
+ \param data[in] value
+
+ \return none, device[addr] = value
+ */
+void ia_css_device_store_uint32(
+ const hrt_address addr,
+ const uint32_t data);
+
+/*! Write a 64-bit value to a device register or memory in the device
+
+ \param addr[in] Local address
+ \param data[in] value
+
+ \return none, device[addr] = value
+ */
+void ia_css_device_store_uint64(
+ const hrt_address addr,
+ const uint64_t data);
+
+/*! Read an array of bytes from device registers or memory in the device
+
+ \param addr[in] Local address
+ \param data[out] pointer to the destination array
+ \param size[in] number of bytes to read
+
+ \return none
+ */
+void ia_css_device_load(
+ const hrt_address addr,
+ void *data,
+ const size_t size);
+
+/*! Write an array of bytes to device registers or memory in the device
+
+ \param addr[in] Local address
+ \param data[in] pointer to the source array
+ \param size[in] number of bytes to write
+
+ \return none
+ */
+void ia_css_device_store(
+ const hrt_address addr,
+ const void *data,
+ const size_t size);
+
+#endif /* __DEVICE_ACCESS_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_include/dma.h b/drivers/staging/media/atomisp/pci/hive_isp_css_include/dma.h
new file mode 100644
index 000000000000..1e0731391ad8
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_include/dma.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __DMA_H_INCLUDED__
+#define __DMA_H_INCLUDED__
+
+/*
+ * This file is included on every cell {SP,ISP,host} and on every system
+ * that uses the DMA device. It defines the API to DLI bridge
+ *
+ * System and cell specific interfaces and inline code are included
+ * conditionally through Makefile path settings.
+ *
+ * - . system and cell agnostic interfaces, constants and identifiers
+ * - public: system agnostic, cell specific interfaces
+ * - private: system dependent, cell specific interfaces & inline implementations
+ * - global: system specific constants and identifiers
+ * - local: system and cell specific constants and identifiers
+ *
+ */
+
+#include "system_local.h"
+#include "dma_local.h"
+
+#ifndef __INLINE_DMA__
+#define STORAGE_CLASS_DMA_H extern
+#define STORAGE_CLASS_DMA_C
+#include "dma_public.h"
+#else /* __INLINE_DMA__ */
+#define STORAGE_CLASS_DMA_H static inline
+#define STORAGE_CLASS_DMA_C static inline
+#include "dma_private.h"
+#endif /* __INLINE_DMA__ */
+
+#endif /* __DMA_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_include/event_fifo.h b/drivers/staging/media/atomisp/pci/hive_isp_css_include/event_fifo.h
new file mode 100644
index 000000000000..cd1a0fb57267
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_include/event_fifo.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __EVENT_FIFO_H
+#define __EVENT_FIFO_H
+
+/*
+ * This file is included on every cell {SP,ISP,host} and on every system
+ * that uses the IRQ device. It defines the API to DLI bridge
+ *
+ * System and cell specific interfaces and inline code are included
+ * conditionally through Makefile path settings.
+ *
+ * - . system and cell agnostic interfaces, constants and identifiers
+ * - public: system agnostic, cell specific interfaces
+ * - private: system dependent, cell specific interfaces & inline implementations
+ * - global: system specific constants and identifiers
+ * - local: system and cell specific constants and identifiers
+ */
+
+#include "system_local.h"
+#include "event_fifo_local.h"
+
+#ifndef __INLINE_EVENT__
+#define STORAGE_CLASS_EVENT_H extern
+#define STORAGE_CLASS_EVENT_C
+#include "event_fifo_public.h"
+#else /* __INLINE_EVENT__ */
+#define STORAGE_CLASS_EVENT_H static inline
+#define STORAGE_CLASS_EVENT_C static inline
+#include "event_fifo_private.h"
+#endif /* __INLINE_EVENT__ */
+
+#endif /* __EVENT_FIFO_H */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_include/fifo_monitor.h b/drivers/staging/media/atomisp/pci/hive_isp_css_include/fifo_monitor.h
new file mode 100644
index 000000000000..e85b50d78003
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_include/fifo_monitor.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __FIFO_MONITOR_H_INCLUDED__
+#define __FIFO_MONITOR_H_INCLUDED__
+
+/*
+ * This file is included on every cell {SP,ISP,host} and on every system
+ * that uses the input system device(s). It defines the API to DLI bridge
+ *
+ * System and cell specific interfaces and inline code are included
+ * conditionally through Makefile path settings.
+ *
+ * - . system and cell agnostic interfaces, constants and identifiers
+ * - public: system agnostic, cell specific interfaces
+ * - private: system dependent, cell specific interfaces & inline implementations
+ * - global: system specific constants and identifiers
+ * - local: system and cell specific constants and identifiers
+ */
+
+#include "system_local.h"
+#include "fifo_monitor_local.h"
+
+#ifndef __INLINE_FIFO_MONITOR__
+#define STORAGE_CLASS_FIFO_MONITOR_H extern
+#define STORAGE_CLASS_FIFO_MONITOR_C
+#include "fifo_monitor_public.h"
+#else /* __INLINE_FIFO_MONITOR__ */
+#define STORAGE_CLASS_FIFO_MONITOR_H static inline
+#define STORAGE_CLASS_FIFO_MONITOR_C static inline
+#include "fifo_monitor_private.h"
+#endif /* __INLINE_FIFO_MONITOR__ */
+
+#endif /* __FIFO_MONITOR_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_include/gdc_device.h b/drivers/staging/media/atomisp/pci/hive_isp_css_include/gdc_device.h
new file mode 100644
index 000000000000..d8633859c393
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_include/gdc_device.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __GDC_DEVICE_H_INCLUDED__
+#define __GDC_DEVICE_H_INCLUDED__
+
+/* The file gdc.h already exists */
+
+/*
+ * This file is included on every cell {SP,ISP,host} and on every system
+ * that uses the GDC device. It defines the API to DLI bridge
+ *
+ * System and cell specific interfaces and inline code are included
+ * conditionally through Makefile path settings.
+ *
+ * - . system and cell agnostic interfaces, constants and identifiers
+ * - public: system agnostic, cell specific interfaces
+ * - private: system dependent, cell specific interfaces & inline implementations
+ * - global: system specific constants and identifiers
+ * - local: system and cell specific constants and identifiers
+ */
+
+#include "system_local.h"
+#include "gdc_local.h"
+
+#ifndef __INLINE_GDC__
+#define STORAGE_CLASS_GDC_H extern
+#define STORAGE_CLASS_GDC_C
+#include "gdc_public.h"
+#else /* __INLINE_GDC__ */
+#define STORAGE_CLASS_GDC_H static inline
+#define STORAGE_CLASS_GDC_C static inline
+#include "gdc_private.h"
+#endif /* __INLINE_GDC__ */
+
+#endif /* __GDC_DEVICE_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_include/gp_device.h b/drivers/staging/media/atomisp/pci/hive_isp_css_include/gp_device.h
new file mode 100644
index 000000000000..33ab0642a931
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_include/gp_device.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __GP_DEVICE_H_INCLUDED__
+#define __GP_DEVICE_H_INCLUDED__
+
+/*
+ * This file is included on every cell {SP,ISP,host} and on every system
+ * that uses the input system device(s). It defines the API to DLI bridge
+ *
+ * System and cell specific interfaces and inline code are included
+ * conditionally through Makefile path settings.
+ *
+ * - . system and cell agnostic interfaces, constants and identifiers
+ * - public: system agnostic, cell specific interfaces
+ * - private: system dependent, cell specific interfaces & inline implementations
+ * - global: system specific constants and identifiers
+ * - local: system and cell specific constants and identifiers
+ */
+
+#include "system_local.h"
+#include "gp_device_local.h"
+
+#ifndef __INLINE_GP_DEVICE__
+#define STORAGE_CLASS_GP_DEVICE_H extern
+#define STORAGE_CLASS_GP_DEVICE_C
+#include "gp_device_public.h"
+#else /* __INLINE_GP_DEVICE__ */
+#define STORAGE_CLASS_GP_DEVICE_H static inline
+#define STORAGE_CLASS_GP_DEVICE_C static inline
+#include "gp_device_private.h"
+#endif /* __INLINE_GP_DEVICE__ */
+
+#endif /* __GP_DEVICE_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_include/gp_timer.h b/drivers/staging/media/atomisp/pci/hive_isp_css_include/gp_timer.h
new file mode 100644
index 000000000000..94f81af70007
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_include/gp_timer.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __GP_TIMER_H_INCLUDED__
+#define __GP_TIMER_H_INCLUDED__
+
+/*
+ * This file is included on every cell {SP,ISP,host} and on every system
+ * that uses the input system device(s). It defines the API to DLI bridge
+ *
+ * System and cell specific interfaces and inline code are included
+ * conditionally through Makefile path settings.
+ *
+ * - . system and cell agnostic interfaces, constants and identifiers
+ * - public: system agnostic, cell specific interfaces
+ * - private: system dependent, cell specific interfaces & inline implementations
+ * - global: system specific constants and identifiers
+ * - local: system and cell specific constants and identifiers
+ */
+
+#include "system_local.h" /*GP_TIMER_BASE address */
+#include "gp_timer_local.h" /*GP_TIMER register offsets */
+
+#ifndef __INLINE_GP_TIMER__
+#define STORAGE_CLASS_GP_TIMER_H extern
+#define STORAGE_CLASS_GP_TIMER_C
+#include "gp_timer_public.h" /* functions*/
+#else /* __INLINE_GP_TIMER__ */
+#define STORAGE_CLASS_GP_TIMER_H static inline
+#define STORAGE_CLASS_GP_TIMER_C static inline
+#include "gp_timer_private.h" /* inline functions*/
+#endif /* __INLINE_GP_TIMER__ */
+
+#endif /* __GP_TIMER_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_include/hmem.h b/drivers/staging/media/atomisp/pci/hive_isp_css_include/hmem.h
new file mode 100644
index 000000000000..c916618a0fb9
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_include/hmem.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __HMEM_H_INCLUDED__
+#define __HMEM_H_INCLUDED__
+
+/*
+ * This file is included on every cell {SP,ISP,host} and on every system
+ * that uses the HMEM device. It defines the API to DLI bridge
+ *
+ * System and cell specific interfaces and inline code are included
+ * conditionally through Makefile path settings.
+ *
+ * - . system and cell agnostic interfaces, constants and identifiers
+ * - public: system agnostic, cell specific interfaces
+ * - private: system dependent, cell specific interfaces & inline implementations
+ * - global: system specific constants and identifiers
+ * - local: system and cell specific constants and identifiers
+ */
+
+#include "system_local.h"
+#include "hmem_local.h"
+
+#ifndef __INLINE_HMEM__
+#define STORAGE_CLASS_HMEM_H extern
+#define STORAGE_CLASS_HMEM_C
+#include "hmem_public.h"
+#else /* __INLINE_HMEM__ */
+#define STORAGE_CLASS_HMEM_H static inline
+#define STORAGE_CLASS_HMEM_C static inline
+#include "hmem_private.h"
+#endif /* __INLINE_HMEM__ */
+
+#endif /* __HMEM_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_include/host/csi_rx_public.h b/drivers/staging/media/atomisp/pci/hive_isp_css_include/host/csi_rx_public.h
new file mode 100644
index 000000000000..2002960f078e
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_include/host/csi_rx_public.h
@@ -0,0 +1,125 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __CSI_RX_PUBLIC_H_INCLUDED__
+#define __CSI_RX_PUBLIC_H_INCLUDED__
+
+/*****************************************************
+ *
+ * Native command interface (NCI).
+ *
+ *****************************************************/
+/**
+ * @brief Get the csi rx frontend state.
+ * Get the state of the csi rx frontend regiester-set.
+ *
+ * @param[in] id The global unique ID of the csi rx fe controller.
+ * @param[out] state Point to the register-state.
+ */
+void csi_rx_fe_ctrl_get_state(
+ const csi_rx_frontend_ID_t ID,
+ csi_rx_fe_ctrl_state_t *state);
+/**
+ * @brief Dump the csi rx frontend state.
+ * Dump the state of the csi rx frontend regiester-set.
+ *
+ * @param[in] id The global unique ID of the csi rx fe controller.
+ * @param[in] state Point to the register-state.
+ */
+void csi_rx_fe_ctrl_dump_state(
+ const csi_rx_frontend_ID_t ID,
+ csi_rx_fe_ctrl_state_t *state);
+/**
+ * @brief Get the state of the csi rx fe dlane.
+ * Get the state of the register set per dlane process.
+ *
+ * @param[in] id The global unique ID of the input-buffer controller.
+ * @param[in] lane The lane ID.
+ * @param[out] state Point to the dlane state.
+ */
+void csi_rx_fe_ctrl_get_dlane_state(
+ const csi_rx_frontend_ID_t ID,
+ const u32 lane,
+ csi_rx_fe_ctrl_lane_t *dlane_state);
+/**
+ * @brief Get the csi rx backend state.
+ * Get the state of the csi rx backend regiester-set.
+ *
+ * @param[in] id The global unique ID of the csi rx be controller.
+ * @param[out] state Point to the register-state.
+ */
+void csi_rx_be_ctrl_get_state(
+ const csi_rx_backend_ID_t ID,
+ csi_rx_be_ctrl_state_t *state);
+/**
+ * @brief Dump the csi rx backend state.
+ * Dump the state of the csi rx backend regiester-set.
+ *
+ * @param[in] id The global unique ID of the csi rx be controller.
+ * @param[in] state Point to the register-state.
+ */
+void csi_rx_be_ctrl_dump_state(
+ const csi_rx_backend_ID_t ID,
+ csi_rx_be_ctrl_state_t *state);
+/* end of NCI */
+
+/*****************************************************
+ *
+ * Device level interface (DLI).
+ *
+ *****************************************************/
+/**
+ * @brief Load the register value.
+ * Load the value of the register of the csi rx fe.
+ *
+ * @param[in] ID The global unique ID for the ibuf-controller instance.
+ * @param[in] reg The offset address of the register.
+ *
+ * @return the value of the register.
+ */
+hrt_data csi_rx_fe_ctrl_reg_load(
+ const csi_rx_frontend_ID_t ID,
+ const hrt_address reg);
+/**
+ * @brief Store a value to the register.
+ * Store a value to the register of the csi rx fe.
+ *
+ * @param[in] ID The global unique ID for the ibuf-controller instance.
+ * @param[in] reg The offset address of the register.
+ * @param[in] value The value to be stored.
+ *
+ */
+void csi_rx_fe_ctrl_reg_store(
+ const csi_rx_frontend_ID_t ID,
+ const hrt_address reg,
+ const hrt_data value);
+/**
+ * @brief Load the register value.
+ * Load the value of the register of the csirx be.
+ *
+ * @param[in] ID The global unique ID for the ibuf-controller instance.
+ * @param[in] reg The offset address of the register.
+ *
+ * @return the value of the register.
+ */
+hrt_data csi_rx_be_ctrl_reg_load(
+ const csi_rx_backend_ID_t ID,
+ const hrt_address reg);
+/**
+ * @brief Store a value to the register.
+ * Store a value to the register of the csi rx be.
+ *
+ * @param[in] ID The global unique ID for the ibuf-controller instance.
+ * @param[in] reg The offset address of the register.
+ * @param[in] value The value to be stored.
+ *
+ */
+void csi_rx_be_ctrl_reg_store(
+ const csi_rx_backend_ID_t ID,
+ const hrt_address reg,
+ const hrt_data value);
+/* end of DLI */
+#endif /* __CSI_RX_PUBLIC_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_include/host/debug_public.h b/drivers/staging/media/atomisp/pci/hive_isp_css_include/host/debug_public.h
new file mode 100644
index 000000000000..947381e5b8a8
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_include/host/debug_public.h
@@ -0,0 +1,91 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __DEBUG_PUBLIC_H_INCLUDED__
+#define __DEBUG_PUBLIC_H_INCLUDED__
+
+#include <type_support.h>
+#include <ia_css_types.h>
+#include "system_local.h"
+
+/*! brief
+ *
+ * Simple queuing trace buffer for debug data
+ * instantiatable in SP DMEM
+ *
+ * The buffer has a remote and a local store
+ * which contain duplicate data (when in sync).
+ * The buffers are automatically synched when the
+ * user dequeues, or manualy using the synch function
+ *
+ * An alternative (storage efficient) implementation
+ * could manage the buffers to contain unique data
+ *
+ * The buffer empty status is computed from local
+ * state which does not reflect the presence of data
+ * in the remote buffer (unless the alternative
+ * implementation is followed)
+ */
+
+typedef struct debug_data_s debug_data_t;
+typedef struct debug_data_ddr_s debug_data_ddr_t;
+
+extern debug_data_t *debug_data_ptr;
+extern hrt_address debug_buffer_address;
+extern ia_css_ptr debug_buffer_ddr_address;
+
+/*! Check the empty state of the local debug data buffer
+
+ \return isEmpty(buffer)
+ */
+STORAGE_CLASS_DEBUG_H bool is_debug_buffer_empty(void);
+
+/*! Dequeue a token from the debug data buffer
+
+ \return isEmpty(buffer)?0:buffer[head]
+ */
+STORAGE_CLASS_DEBUG_H hrt_data debug_dequeue(void);
+
+/*! Synchronise the remote buffer to the local buffer
+
+ \return none
+ */
+STORAGE_CLASS_DEBUG_H void debug_synch_queue(void);
+
+/*! Synchronise the remote buffer to the local buffer
+
+ \return none
+ */
+STORAGE_CLASS_DEBUG_H void debug_synch_queue_isp(void);
+
+/*! Synchronise the remote buffer to the local buffer
+
+ \return none
+ */
+STORAGE_CLASS_DEBUG_H void debug_synch_queue_ddr(void);
+
+/*! Set the offset/address of the (remote) debug buffer
+
+ \return none
+ */
+void debug_buffer_init(
+ const hrt_address addr);
+
+/*! Set the offset/address of the (remote) debug buffer
+
+ \return none
+ */
+void debug_buffer_ddr_init(
+ const ia_css_ptr addr);
+
+/*! Set the (remote) operating mode of the debug buffer
+
+ \return none
+ */
+void debug_buffer_setmode(
+ const debug_buf_mode_t mode);
+
+#endif /* __DEBUG_PUBLIC_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_include/host/dma_public.h b/drivers/staging/media/atomisp/pci/hive_isp_css_include/host/dma_public.h
new file mode 100644
index 000000000000..fe3b1b8ecc50
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_include/host/dma_public.h
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __DMA_PUBLIC_H_INCLUDED__
+#define __DMA_PUBLIC_H_INCLUDED__
+
+#include "system_local.h"
+
+/*! Write to a control register of DMA[ID]
+
+ \param ID[in] DMA identifier
+ \param reg[in] register index
+ \param value[in] The data to be written
+
+ \return none, DMA[ID].ctrl[reg] = value
+ */
+STORAGE_CLASS_DMA_H void dma_reg_store(
+ const dma_ID_t ID,
+ const unsigned int reg,
+ const hrt_data value);
+
+/*! Read from a control register of DMA[ID]
+
+ \param ID[in] DMA identifier
+ \param reg[in] register index
+ \param value[in] The data to be written
+
+ \return DMA[ID].ctrl[reg]
+ */
+STORAGE_CLASS_DMA_H hrt_data dma_reg_load(
+ const dma_ID_t ID,
+ const unsigned int reg);
+
+/*! Set maximum burst size of DMA[ID]
+
+ \param ID[in] DMA identifier
+ \param conn[in] Connection to set max burst size for
+ \param max_burst_size[in] Maximum burst size in words
+
+ \return none
+*/
+void
+dma_set_max_burst_size(
+ dma_ID_t ID,
+ dma_connection conn,
+ uint32_t max_burst_size);
+
+#endif /* __DMA_PUBLIC_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_include/host/event_fifo_public.h b/drivers/staging/media/atomisp/pci/hive_isp_css_include/host/event_fifo_public.h
new file mode 100644
index 000000000000..9c2365e59b63
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_include/host/event_fifo_public.h
@@ -0,0 +1,71 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __EVENT_FIFO_PUBLIC_H
+#define __EVENT_FIFO_PUBLIC_H
+
+#include <type_support.h>
+#include "system_local.h"
+
+/*! Blocking read from an event source EVENT[ID]
+
+ \param ID[in] EVENT identifier
+
+ \return none, dequeue(event_queue[ID])
+ */
+STORAGE_CLASS_EVENT_H void event_wait_for(
+ const event_ID_t ID);
+
+/*! Conditional blocking wait for an event source EVENT[ID]
+
+ \param ID[in] EVENT identifier
+ \param cnd[in] predicate
+
+ \return none, if(cnd) dequeue(event_queue[ID])
+ */
+STORAGE_CLASS_EVENT_H void cnd_event_wait_for(
+ const event_ID_t ID,
+ const bool cnd);
+
+/*! Blocking read from an event source EVENT[ID]
+
+ \param ID[in] EVENT identifier
+
+ \return dequeue(event_queue[ID])
+ */
+STORAGE_CLASS_EVENT_H hrt_data event_receive_token(
+ const event_ID_t ID);
+
+/*! Blocking write to an event sink EVENT[ID]
+
+ \param ID[in] EVENT identifier
+ \param token[in] token to be written on the event
+
+ \return none, enqueue(event_queue[ID])
+ */
+STORAGE_CLASS_EVENT_H void event_send_token(
+ const event_ID_t ID,
+ const hrt_data token);
+
+/*! Query an event source EVENT[ID]
+
+ \param ID[in] EVENT identifier
+
+ \return !isempty(event_queue[ID])
+ */
+STORAGE_CLASS_EVENT_H bool is_event_pending(
+ const event_ID_t ID);
+
+/*! Query an event sink EVENT[ID]
+
+ \param ID[in] EVENT identifier
+
+ \return !isfull(event_queue[ID])
+ */
+STORAGE_CLASS_EVENT_H bool can_event_send_token(
+ const event_ID_t ID);
+
+#endif /* __EVENT_FIFO_PUBLIC_H */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_include/host/fifo_monitor_public.h b/drivers/staging/media/atomisp/pci/hive_isp_css_include/host/fifo_monitor_public.h
new file mode 100644
index 000000000000..37cb9eb13919
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_include/host/fifo_monitor_public.h
@@ -0,0 +1,102 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __FIFO_MONITOR_PUBLIC_H_INCLUDED__
+#define __FIFO_MONITOR_PUBLIC_H_INCLUDED__
+
+#include "system_local.h"
+
+typedef struct fifo_channel_state_s fifo_channel_state_t;
+typedef struct fifo_switch_state_s fifo_switch_state_t;
+typedef struct fifo_monitor_state_s fifo_monitor_state_t;
+
+/*! Set a fifo switch multiplex
+
+ \param ID[in] FIFO_MONITOR identifier
+ \param switch_id[in] fifo switch identifier
+ \param sel[in] fifo switch selector
+
+ \return none, fifo_switch[switch_id].sel = sel
+ */
+STORAGE_CLASS_FIFO_MONITOR_H void fifo_switch_set(
+ const fifo_monitor_ID_t ID,
+ const fifo_switch_t switch_id,
+ const hrt_data sel);
+
+/*! Get a fifo switch multiplex
+
+ \param ID[in] FIFO_MONITOR identifier
+ \param switch_id[in] fifo switch identifier
+
+ \return fifo_switch[switch_id].sel
+ */
+STORAGE_CLASS_FIFO_MONITOR_H hrt_data fifo_switch_get(
+ const fifo_monitor_ID_t ID,
+ const fifo_switch_t switch_id);
+
+/*! Read the state of FIFO_MONITOR[ID]
+
+ \param ID[in] FIFO_MONITOR identifier
+ \param state[out] fifo monitor state structure
+
+ \return none, state = FIFO_MONITOR[ID].state
+ */
+void fifo_monitor_get_state(
+ const fifo_monitor_ID_t ID,
+ fifo_monitor_state_t *state);
+
+/*! Read the state of a fifo channel
+
+ \param ID[in] FIFO_MONITOR identifier
+ \param channel_id[in] fifo channel identifier
+ \param state[out] fifo channel state structure
+
+ \return none, state = fifo_channel[channel_id].state
+ */
+void fifo_channel_get_state(
+ const fifo_monitor_ID_t ID,
+ const fifo_channel_t channel_id,
+ fifo_channel_state_t *state);
+
+/*! Read the state of a fifo switch
+
+ \param ID[in] FIFO_MONITOR identifier
+ \param switch_id[in] fifo switch identifier
+ \param state[out] fifo switch state structure
+
+ \return none, state = fifo_switch[switch_id].state
+ */
+void fifo_switch_get_state(
+ const fifo_monitor_ID_t ID,
+ const fifo_switch_t switch_id,
+ fifo_switch_state_t *state);
+
+/*! Write to a control register of FIFO_MONITOR[ID]
+
+ \param ID[in] FIFO_MONITOR identifier
+ \param reg[in] register index
+ \param value[in] The data to be written
+
+ \return none, FIFO_MONITOR[ID].ctrl[reg] = value
+ */
+STORAGE_CLASS_FIFO_MONITOR_H void fifo_monitor_reg_store(
+ const fifo_monitor_ID_t ID,
+ const unsigned int reg,
+ const hrt_data value);
+
+/*! Read from a control register of FIFO_MONITOR[ID]
+
+ \param ID[in] FIFO_MONITOR identifier
+ \param reg[in] register index
+ \param value[in] The data to be written
+
+ \return FIFO_MONITOR[ID].ctrl[reg]
+ */
+STORAGE_CLASS_FIFO_MONITOR_H hrt_data fifo_monitor_reg_load(
+ const fifo_monitor_ID_t ID,
+ const unsigned int reg);
+
+#endif /* __FIFO_MONITOR_PUBLIC_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_include/host/gdc_public.h b/drivers/staging/media/atomisp/pci/hive_isp_css_include/host/gdc_public.h
new file mode 100644
index 000000000000..77654b163598
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_include/host/gdc_public.h
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __GDC_PUBLIC_H_INCLUDED__
+#define __GDC_PUBLIC_H_INCLUDED__
+
+/*! Write the bicubic interpolation table of GDC[ID]
+
+ \param ID[in] GDC identifier
+ \param data[in] The data matrix to be written
+
+ \pre
+ - data must point to a matrix[4][HRT_GDC_N]
+
+ \implementation dependent
+ - The value of "HRT_GDC_N" is device specific
+ - The LUT should not be partially written
+ - The LUT format is a quadri-phase interpolation
+ table. The layout is device specific
+ - The range of the values data[n][m] is device
+ specific
+
+ \return none, GDC[ID].lut[0...3][0...HRT_GDC_N-1] = data
+ */
+void gdc_lut_store(
+ const gdc_ID_t ID,
+ const int data[4][HRT_GDC_N]);
+
+/*! Convert the bicubic interpolation table of GDC[ID] to the ISP-specific format
+
+ \param ID[in] GDC identifier
+ \param in_lut[in] The data matrix to be converted
+ \param out_lut[out] The data matrix as the output of conversion
+ */
+void gdc_lut_convert_to_isp_format(
+ const int in_lut[4][HRT_GDC_N],
+ int out_lut[4][HRT_GDC_N]);
+
+/*! Return the integer representation of 1.0 of GDC[ID]
+
+ \param ID[in] GDC identifier
+
+ \return unity
+ */
+int gdc_get_unity(
+ const gdc_ID_t ID);
+
+#endif /* __GDC_PUBLIC_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_include/host/gp_device_public.h b/drivers/staging/media/atomisp/pci/hive_isp_css_include/host/gp_device_public.h
new file mode 100644
index 000000000000..b93cd00a0b1d
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_include/host/gp_device_public.h
@@ -0,0 +1,50 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __GP_DEVICE_PUBLIC_H_INCLUDED__
+#define __GP_DEVICE_PUBLIC_H_INCLUDED__
+
+#include "system_local.h"
+
+typedef struct gp_device_state_s gp_device_state_t;
+
+/*! Read the state of GP_DEVICE[ID]
+
+ \param ID[in] GP_DEVICE identifier
+ \param state[out] gp device state structure
+
+ \return none, state = GP_DEVICE[ID].state
+ */
+void gp_device_get_state(
+ const gp_device_ID_t ID,
+ gp_device_state_t *state);
+
+/*! Write to a control register of GP_DEVICE[ID]
+
+ \param ID[in] GP_DEVICE identifier
+ \param reg_addr[in] register byte address
+ \param value[in] The data to be written
+
+ \return none, GP_DEVICE[ID].ctrl[reg] = value
+ */
+STORAGE_CLASS_GP_DEVICE_H void gp_device_reg_store(
+ const gp_device_ID_t ID,
+ const unsigned int reg_addr,
+ const hrt_data value);
+
+/*! Read from a control register of GP_DEVICE[ID]
+
+ \param ID[in] GP_DEVICE identifier
+ \param reg_addr[in] register byte address
+ \param value[in] The data to be written
+
+ \return GP_DEVICE[ID].ctrl[reg]
+ */
+STORAGE_CLASS_GP_DEVICE_H hrt_data gp_device_reg_load(
+ const gp_device_ID_t ID,
+ const hrt_address reg_addr);
+
+#endif /* __GP_DEVICE_PUBLIC_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_include/host/gp_timer_public.h b/drivers/staging/media/atomisp/pci/hive_isp_css_include/host/gp_timer_public.h
new file mode 100644
index 000000000000..cc016eb9e84d
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_include/host/gp_timer_public.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __GP_TIMER_PUBLIC_H_INCLUDED__
+#define __GP_TIMER_PUBLIC_H_INCLUDED__
+
+#include "system_local.h"
+
+/*! initialize mentioned timer
+param ID timer_id
+*/
+extern void
+gp_timer_init(gp_timer_ID_t ID);
+
+/*! read timer value for (platform selected)selected timer.
+param ID timer_id
+ \return uint32_t 32 bit timer value
+*/
+extern uint32_t
+gp_timer_read(gp_timer_ID_t ID);
+
+#endif /* __GP_TIMER_PUBLIC_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_include/host/hmem_public.h b/drivers/staging/media/atomisp/pci/hive_isp_css_include/host/hmem_public.h
new file mode 100644
index 000000000000..7cfc2228c0b8
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_include/host/hmem_public.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __HMEM_PUBLIC_H_INCLUDED__
+#define __HMEM_PUBLIC_H_INCLUDED__
+
+#include <linux/types.h> /* size_t */
+
+/*! Return the size of HMEM[ID]
+
+ \param ID[in] HMEM identifier
+
+ \Note: The size is the byte size of the area it occupies
+ in the address map. I.e. disregarding internal structure
+
+ \return sizeof(HMEM[ID])
+ */
+STORAGE_CLASS_HMEM_H size_t sizeof_hmem(
+ const hmem_ID_t ID);
+
+#endif /* __HMEM_PUBLIC_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_include/host/input_formatter_public.h b/drivers/staging/media/atomisp/pci/hive_isp_css_include/host/input_formatter_public.h
new file mode 100644
index 000000000000..e67d252aa43f
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_include/host/input_formatter_public.h
@@ -0,0 +1,107 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __INPUT_FORMATTER_PUBLIC_H_INCLUDED__
+#define __INPUT_FORMATTER_PUBLIC_H_INCLUDED__
+
+#include <type_support.h>
+#include "system_local.h"
+
+/*! Reset INPUT_FORMATTER[ID]
+
+ \param ID[in] INPUT_FORMATTER identifier
+
+ \return none, reset(INPUT_FORMATTER[ID])
+ */
+void input_formatter_rst(
+ const input_formatter_ID_t ID);
+
+/*! Set the blocking mode of INPUT_FORMATTER[ID]
+
+ \param ID[in] INPUT_FORMATTER identifier
+ \param enable[in] blocking enable flag
+
+ \use
+ - In HW, the capture unit will deliver an infinite stream of frames,
+ the input formatter will synchronise on the first SOF. In simulation
+ there are only a fixed number of frames, presented only once. By
+ enabling blocking the inputformatter will wait on the first presented
+ frame, thus avoiding race in the simulation setup.
+
+ \return none, INPUT_FORMATTER[ID].blocking_mode = enable
+ */
+void input_formatter_set_fifo_blocking_mode(
+ const input_formatter_ID_t ID,
+ const bool enable);
+
+/*! Return the data alignment of INPUT_FORMATTER[ID]
+
+ \param ID[in] INPUT_FORMATTER identifier
+
+ \return alignment(INPUT_FORMATTER[ID].data)
+ */
+unsigned int input_formatter_get_alignment(
+ const input_formatter_ID_t ID);
+
+/*! Read the source switch state into INPUT_FORMATTER[ID]
+
+ \param ID[in] INPUT_FORMATTER identifier
+ \param state[out] input formatter switch state structure
+
+ \return none, state = INPUT_FORMATTER[ID].switch_state
+ */
+void input_formatter_get_switch_state(
+ const input_formatter_ID_t ID,
+ input_formatter_switch_state_t *state);
+
+/*! Read the control registers of INPUT_FORMATTER[ID]
+
+ \param ID[in] INPUT_FORMATTER identifier
+ \param state[out] input formatter state structure
+
+ \return none, state = INPUT_FORMATTER[ID].state
+ */
+void input_formatter_get_state(
+ const input_formatter_ID_t ID,
+ input_formatter_state_t *state);
+
+/*! Read the control registers of bin copy INPUT_FORMATTER[ID]
+
+ \param ID[in] INPUT_FORMATTER identifier
+ \param state[out] input formatter state structure
+
+ \return none, state = INPUT_FORMATTER[ID].state
+ */
+void input_formatter_bin_get_state(
+ const input_formatter_ID_t ID,
+ input_formatter_bin_state_t *state);
+
+/*! Write to a control register of INPUT_FORMATTER[ID]
+
+ \param ID[in] INPUT_FORMATTER identifier
+ \param reg_addr[in] register byte address
+ \param value[in] The data to be written
+
+ \return none, INPUT_FORMATTER[ID].ctrl[reg] = value
+ */
+STORAGE_CLASS_INPUT_FORMATTER_H void input_formatter_reg_store(
+ const input_formatter_ID_t ID,
+ const hrt_address reg_addr,
+ const hrt_data value);
+
+/*! Read from a control register of INPUT_FORMATTER[ID]
+
+ \param ID[in] INPUT_FORMATTER identifier
+ \param reg_addr[in] register byte address
+ \param value[in] The data to be written
+
+ \return INPUT_FORMATTER[ID].ctrl[reg]
+ */
+STORAGE_CLASS_INPUT_FORMATTER_H hrt_data input_formatter_reg_load(
+ const input_formatter_ID_t ID,
+ const unsigned int reg_addr);
+
+#endif /* __INPUT_FORMATTER_PUBLIC_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_include/host/irq_public.h b/drivers/staging/media/atomisp/pci/hive_isp_css_include/host/irq_public.h
new file mode 100644
index 000000000000..901e10f8743c
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_include/host/irq_public.h
@@ -0,0 +1,164 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __IRQ_PUBLIC_H_INCLUDED__
+#define __IRQ_PUBLIC_H_INCLUDED__
+
+#include <type_support.h>
+#include "system_local.h"
+
+/*! Write to a control register of IRQ[ID]
+
+ \param ID[in] IRQ identifier
+ \param reg[in] register index
+ \param value[in] The data to be written
+
+ \return none, IRQ[ID].ctrl[reg] = value
+ */
+STORAGE_CLASS_IRQ_H void irq_reg_store(
+ const irq_ID_t ID,
+ const unsigned int reg,
+ const hrt_data value);
+
+/*! Read from a control register of IRQ[ID]
+
+ \param ID[in] IRQ identifier
+ \param reg[in] register index
+ \param value[in] The data to be written
+
+ \return IRQ[ID].ctrl[reg]
+ */
+STORAGE_CLASS_IRQ_H hrt_data irq_reg_load(
+ const irq_ID_t ID,
+ const unsigned int reg);
+
+/*! Enable an IRQ channel of IRQ[ID] with a mode
+
+ \param ID[in] IRQ (device) identifier
+ \param irq[in] IRQ (channel) identifier
+
+ \return none, enable(IRQ[ID].channel[irq_ID])
+ */
+void irq_enable_channel(
+ const irq_ID_t ID,
+ const unsigned int irq_ID);
+
+/*! Enable pulse interrupts for IRQ[ID] with a mode
+
+ \param ID[in] IRQ (device) identifier
+ \param enable enable/disable pulse interrupts
+
+ \return none
+ */
+void irq_enable_pulse(
+ const irq_ID_t ID,
+ bool pulse);
+
+/*! Disable an IRQ channel of IRQ[ID]
+
+ \param ID[in] IRQ (device) identifier
+ \param irq[in] IRQ (channel) identifier
+
+ \return none, disable(IRQ[ID].channel[irq_ID])
+ */
+void irq_disable_channel(
+ const irq_ID_t ID,
+ const unsigned int irq);
+
+/*! Clear the state of all IRQ channels of IRQ[ID]
+
+ \param ID[in] IRQ (device) identifier
+
+ \return none, clear(IRQ[ID].channel[])
+ */
+void irq_clear_all(
+ const irq_ID_t ID);
+
+/*! Return the ID of a signalling IRQ channel of IRQ[ID]
+
+ \param ID[in] IRQ (device) identifier
+ \param irq_id[out] active IRQ (channel) identifier
+
+ \Note: This function operates as strtok(), based on the return
+ state the user is informed if there are additional signalling
+ channels
+
+ \return state(IRQ[ID])
+ */
+enum hrt_isp_css_irq_status irq_get_channel_id(
+ const irq_ID_t ID,
+ unsigned int *irq_id);
+
+/*! Raise an interrupt on channel irq_id of device IRQ[ID]
+
+ \param ID[in] IRQ (device) identifier
+ \param irq_id[in] IRQ (channel) identifier
+
+ \return none, signal(IRQ[ID].channel[irq_id])
+ */
+void irq_raise(
+ const irq_ID_t ID,
+ const irq_sw_channel_id_t irq_id);
+
+/*! Test if any IRQ channel of the virtual super IRQ has raised a signal
+
+ \return any(VIRQ.channel[irq_ID] != 0)
+ */
+bool any_virq_signal(void);
+
+/*! Enable an IRQ channel of the virtual super IRQ
+
+ \param irq[in] IRQ (channel) identifier
+ \param en[in] predicate channel enable
+
+ \return none, VIRQ.channel[irq_ID].enable = en
+ */
+void cnd_virq_enable_channel(
+ const enum virq_id irq_ID,
+ const bool en);
+
+/*! Clear the state of all IRQ channels of the virtual super IRQ
+
+ \return none, clear(VIRQ.channel[])
+ */
+void virq_clear_all(void);
+
+/*! Clear the IRQ info state of the virtual super IRQ
+
+ \param irq_info[in/out] The IRQ (channel) state
+
+ \return none
+ */
+void virq_clear_info(struct virq_info *irq_info);
+
+/*! Return the ID of a signalling IRQ channel of the virtual super IRQ
+
+ \param irq_id[out] active IRQ (channel) identifier
+
+ \Note: This function operates as strtok(), based on the return
+ state the user is informed if there are additional signalling
+ channels
+
+ \return state(IRQ[...])
+ */
+enum hrt_isp_css_irq_status virq_get_channel_id(
+ enum virq_id *irq_id);
+
+/*! Return the IDs of all signaling IRQ channels of the virtual super IRQ
+
+ \param irq_info[out] all active IRQ (channel) identifiers
+
+ \Note: Unlike "irq_get_channel_id()" this function returns all
+ channel signaling info. The new info is OR'd with the current
+ info state. N.B. this is the same as repeatedly calling the function
+ "irq_get_channel_id()" in a (non-blocked) handler routine
+
+ \return (error(state(IRQ[...]))
+ */
+enum hrt_isp_css_irq_status
+virq_get_channel_signals(struct virq_info *irq_info);
+
+#endif /* __IRQ_PUBLIC_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_include/host/isp_public.h b/drivers/staging/media/atomisp/pci/hive_isp_css_include/host/isp_public.h
new file mode 100644
index 000000000000..78abd77c5bdb
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_include/host/isp_public.h
@@ -0,0 +1,164 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __ISP_PUBLIC_H_INCLUDED__
+#define __ISP_PUBLIC_H_INCLUDED__
+
+#include <type_support.h>
+#include "system_local.h"
+
+/*! Enable or disable the program complete irq signal of ISP[ID]
+
+ \param ID[in] SP identifier
+ \param cnd[in] predicate
+
+ \return none, if(cnd) enable(ISP[ID].irq) else disable(ISP[ID].irq)
+ */
+void cnd_isp_irq_enable(
+ const isp_ID_t ID,
+ const bool cnd);
+
+/*! Write to the status and control register of ISP[ID]
+
+ \param ID[in] ISP identifier
+ \param reg[in] register index
+ \param value[in] The data to be written
+
+ \return none, ISP[ID].sc[reg] = value
+ */
+STORAGE_CLASS_ISP_H void isp_ctrl_store(
+ const isp_ID_t ID,
+ const unsigned int reg,
+ const hrt_data value);
+
+/*! Read from the status and control register of ISP[ID]
+
+ \param ID[in] ISP identifier
+ \param reg[in] register index
+ \param value[in] The data to be written
+
+ \return ISP[ID].sc[reg]
+ */
+STORAGE_CLASS_ISP_H hrt_data isp_ctrl_load(
+ const isp_ID_t ID,
+ const unsigned int reg);
+
+/*! Get the status of a bitfield in the control register of ISP[ID]
+
+ \param ID[in] ISP identifier
+ \param reg[in] register index
+ \param bit[in] The bit index to be checked
+
+ \return (ISP[ID].sc[reg] & (1<<bit)) != 0
+ */
+STORAGE_CLASS_ISP_H bool isp_ctrl_getbit(
+ const isp_ID_t ID,
+ const unsigned int reg,
+ const unsigned int bit);
+
+/*! Set a bitfield in the control register of ISP[ID]
+
+ \param ID[in] ISP identifier
+ \param reg[in] register index
+ \param bit[in] The bit index to be set
+
+ \return none, ISP[ID].sc[reg] |= (1<<bit)
+ */
+STORAGE_CLASS_ISP_H void isp_ctrl_setbit(
+ const isp_ID_t ID,
+ const unsigned int reg,
+ const unsigned int bit);
+
+/*! Clear a bitfield in the control register of ISP[ID]
+
+ \param ID[in] ISP identifier
+ \param reg[in] register index
+ \param bit[in] The bit index to be set
+
+ \return none, ISP[ID].sc[reg] &= ~(1<<bit)
+ */
+STORAGE_CLASS_ISP_H void isp_ctrl_clearbit(
+ const isp_ID_t ID,
+ const unsigned int reg,
+ const unsigned int bit);
+
+/*! Write to the DMEM of ISP[ID]
+
+ \param ID[in] ISP identifier
+ \param addr[in] the address in DMEM
+ \param data[in] The data to be written
+ \param size[in] The size(in bytes) of the data to be written
+
+ \return none, ISP[ID].dmem[addr...addr+size-1] = data
+ */
+STORAGE_CLASS_ISP_H void isp_dmem_store(
+ const isp_ID_t ID,
+ unsigned int addr,
+ const void *data,
+ const size_t size);
+
+/*! Read from the DMEM of ISP[ID]
+
+ \param ID[in] ISP identifier
+ \param addr[in] the address in DMEM
+ \param data[in] The data to be read
+ \param size[in] The size(in bytes) of the data to be read
+
+ \return none, data = ISP[ID].dmem[addr...addr+size-1]
+ */
+STORAGE_CLASS_ISP_H void isp_dmem_load(
+ const isp_ID_t ID,
+ const unsigned int addr,
+ void *data,
+ const size_t size);
+
+/*! Write a 32-bit datum to the DMEM of ISP[ID]
+
+ \param ID[in] ISP identifier
+ \param addr[in] the address in DMEM
+ \param data[in] The data to be written
+ \param size[in] The size(in bytes) of the data to be written
+
+ \return none, ISP[ID].dmem[addr] = data
+ */
+STORAGE_CLASS_ISP_H void isp_dmem_store_uint32(
+ const isp_ID_t ID,
+ unsigned int addr,
+ const uint32_t data);
+
+/*! Load a 32-bit datum from the DMEM of ISP[ID]
+
+ \param ID[in] ISP identifier
+ \param addr[in] the address in DMEM
+ \param data[in] The data to be read
+ \param size[in] The size(in bytes) of the data to be read
+
+ \return none, data = ISP[ID].dmem[addr]
+ */
+STORAGE_CLASS_ISP_H uint32_t isp_dmem_load_uint32(
+ const isp_ID_t ID,
+ const unsigned int addr);
+
+/*! Concatenate the LSW and MSW into a double precision word
+
+ \param x0[in] Integer containing the LSW
+ \param x1[in] Integer containing the MSW
+
+ \return x0 | (x1 << bits_per_vector_element)
+ */
+STORAGE_CLASS_ISP_H uint32_t isp_2w_cat_1w(
+ const u16 x0,
+ const uint16_t x1);
+
+unsigned int isp_is_ready(isp_ID_t ID);
+
+unsigned int isp_is_sleeping(isp_ID_t ID);
+
+void isp_start(isp_ID_t ID);
+
+void isp_wake(isp_ID_t ID);
+
+#endif /* __ISP_PUBLIC_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_include/host/isys_dma_public.h b/drivers/staging/media/atomisp/pci/hive_isp_css_include/host/isys_dma_public.h
new file mode 100644
index 000000000000..fceab6dcbddb
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_include/host/isys_dma_public.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __ISYS_DMA_PUBLIC_H_INCLUDED__
+#define __ISYS_DMA_PUBLIC_H_INCLUDED__
+
+
+#include "system_local.h"
+#include "type_support.h"
+
+extern void isys2401_dma_reg_store(
+ const isys2401_dma_ID_t dma_id,
+ const unsigned int reg,
+ const hrt_data value);
+
+extern hrt_data isys2401_dma_reg_load(
+ const isys2401_dma_ID_t dma_id,
+ const unsigned int reg);
+
+void isys2401_dma_set_max_burst_size(
+ const isys2401_dma_ID_t dma_id,
+ uint32_t max_burst_size);
+
+
+#endif /* __ISYS_DMA_PUBLIC_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_include/host/isys_irq_public.h b/drivers/staging/media/atomisp/pci/hive_isp_css_include/host/isys_irq_public.h
new file mode 100644
index 000000000000..a728b9b9fd52
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_include/host/isys_irq_public.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __ISYS_IRQ_PUBLIC_H__
+#define __ISYS_IRQ_PUBLIC_H__
+
+#include "isys_irq_global.h"
+#include "isys_irq_local.h"
+
+
+void isys_irqc_state_get(const isys_irq_ID_t isys_irqc_id,
+ isys_irqc_state_t *state);
+
+void isys_irqc_state_dump(const isys_irq_ID_t isys_irqc_id,
+ const isys_irqc_state_t *state);
+
+void isys_irqc_reg_store(const isys_irq_ID_t isys_irqc_id,
+ const unsigned int reg_idx,
+ const hrt_data value);
+
+hrt_data isys_irqc_reg_load(const isys_irq_ID_t isys_irqc_id,
+ const unsigned int reg_idx);
+
+void isys_irqc_status_enable(const isys_irq_ID_t isys_irqc_id);
+
+
+#endif /* __ISYS_IRQ_PUBLIC_H__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_include/host/isys_stream2mmio_public.h b/drivers/staging/media/atomisp/pci/hive_isp_css_include/host/isys_stream2mmio_public.h
new file mode 100644
index 000000000000..7ed55d427cf6
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_include/host/isys_stream2mmio_public.h
@@ -0,0 +1,93 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __ISYS_STREAM2MMIO_PUBLIC_H_INCLUDED__
+#define __ISYS_STREAM2MMIO_PUBLIC_H_INCLUDED__
+
+/*****************************************************
+ *
+ * Native command interface (NCI).
+ *
+ *****************************************************/
+/**
+ * @brief Get the stream2mmio-controller state.
+ * Get the state of the stream2mmio-controller regiester-set.
+ *
+ * @param[in] id The global unique ID of the steeam2mmio controller.
+ * @param[out] state Point to the register-state.
+ */
+STORAGE_CLASS_STREAM2MMIO_H void stream2mmio_get_state(
+ const stream2mmio_ID_t ID,
+ stream2mmio_state_t *state);
+
+/**
+ * @brief Get the state of the stream2mmio-controller sidess.
+ * Get the state of the register set per buf-controller sidess.
+ *
+ * @param[in] id The global unique ID of the steeam2mmio controller.
+ * @param[in] sid_id The sid ID.
+ * @param[out] state Point to the sid state.
+ */
+STORAGE_CLASS_STREAM2MMIO_H void stream2mmio_get_sid_state(
+ const stream2mmio_ID_t ID,
+ const stream2mmio_sid_ID_t sid_id,
+ stream2mmio_sid_state_t *state);
+/* end of NCI */
+
+/*****************************************************
+ *
+ * Device level interface (DLI).
+ *
+ *****************************************************/
+/**
+ * @brief Load the register value.
+ * Load the value of the register of the stream2mmio-controller.
+ *
+ * @param[in] ID The global unique ID for the stream2mmio-controller instance.
+ * @param[in] sid_id The SID in question.
+ * @param[in] reg_idx The offset address of the register.
+ *
+ * @return the value of the register.
+ */
+STORAGE_CLASS_STREAM2MMIO_H hrt_data stream2mmio_reg_load(
+ const stream2mmio_ID_t ID,
+ const stream2mmio_sid_ID_t sid_id,
+ const uint32_t reg_idx);
+
+/**
+ * @brief Dump the SID processor state.
+ * Dump the state of the sid regiester-set.
+ *
+ * @param[in] state Pointer to the register-state.
+ */
+STORAGE_CLASS_STREAM2MMIO_H void stream2mmio_print_sid_state(
+ stream2mmio_sid_state_t *state);
+/**
+ * @brief Dump the stream2mmio state.
+ * Dump the state of the ibuf-controller regiester-set.
+ *
+ * @param[in] id The global unique ID of the st2mmio
+ * @param[in] state Pointer to the register-state.
+ */
+STORAGE_CLASS_STREAM2MMIO_H void stream2mmio_dump_state(
+ const stream2mmio_ID_t ID,
+ stream2mmio_state_t *state);
+/**
+ * @brief Store a value to the register.
+ * Store a value to the registe of the stream2mmio-controller.
+ *
+ * @param[in] ID The global unique ID for the stream2mmio-controller instance.
+ * @param[in] reg The offset address of the register.
+ * @param[in] value The value to be stored.
+ *
+ */
+STORAGE_CLASS_STREAM2MMIO_H void stream2mmio_reg_store(
+ const stream2mmio_ID_t ID,
+ const hrt_address reg,
+ const hrt_data value);
+/* end of DLI */
+
+#endif /* __ISYS_STREAM2MMIO_PUBLIC_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_include/host/mmu_public.h b/drivers/staging/media/atomisp/pci/hive_isp_css_include/host/mmu_public.h
new file mode 100644
index 000000000000..1a435a348318
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_include/host/mmu_public.h
@@ -0,0 +1,86 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __MMU_PUBLIC_H_INCLUDED__
+#define __MMU_PUBLIC_H_INCLUDED__
+
+#include "system_local.h"
+#include "device_access.h"
+#include "assert_support.h"
+
+/*! Set the page table base index of MMU[ID]
+
+ \param ID[in] MMU identifier
+ \param base_index[in] page table base index
+
+ \return none, MMU[ID].page_table_base_index = base_index
+ */
+void mmu_set_page_table_base_index(
+ const mmu_ID_t ID,
+ const hrt_data base_index);
+
+/*! Get the page table base index of MMU[ID]
+
+ \param ID[in] MMU identifier
+ \param base_index[in] page table base index
+
+ \return MMU[ID].page_table_base_index
+ */
+hrt_data mmu_get_page_table_base_index(
+ const mmu_ID_t ID);
+
+/*! Invalidate the page table cache of MMU[ID]
+
+ \param ID[in] MMU identifier
+
+ \return none
+ */
+void mmu_invalidate_cache(
+ const mmu_ID_t ID);
+
+/*! Invalidate the page table cache of all MMUs
+
+ \return none
+ */
+void mmu_invalidate_cache_all(void);
+
+/*! Write to a control register of MMU[ID]
+
+ \param ID[in] MMU identifier
+ \param reg[in] register index
+ \param value[in] The data to be written
+
+ \return none, MMU[ID].ctrl[reg] = value
+ */
+static inline void mmu_reg_store(
+ const mmu_ID_t ID,
+ const unsigned int reg,
+ const hrt_data value)
+{
+ assert(ID < N_MMU_ID);
+ assert(MMU_BASE[ID] != (hrt_address) - 1);
+ ia_css_device_store_uint32(MMU_BASE[ID] + reg * sizeof(hrt_data), value);
+ return;
+}
+
+/*! Read from a control register of MMU[ID]
+
+ \param ID[in] MMU identifier
+ \param reg[in] register index
+ \param value[in] The data to be written
+
+ \return MMU[ID].ctrl[reg]
+ */
+static inline hrt_data mmu_reg_load(
+ const mmu_ID_t ID,
+ const unsigned int reg)
+{
+ assert(ID < N_MMU_ID);
+ assert(MMU_BASE[ID] != (hrt_address) - 1);
+ return ia_css_device_load_uint32(MMU_BASE[ID] + reg * sizeof(hrt_data));
+}
+
+#endif /* __MMU_PUBLIC_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_include/host/pixelgen_public.h b/drivers/staging/media/atomisp/pci/hive_isp_css_include/host/pixelgen_public.h
new file mode 100644
index 000000000000..dc31ce3cd741
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_include/host/pixelgen_public.h
@@ -0,0 +1,69 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __PIXELGEN_PUBLIC_H_INCLUDED__
+#define __PIXELGEN_PUBLIC_H_INCLUDED__
+
+/*****************************************************
+ *
+ * Native command interface (NCI).
+ *
+ *****************************************************/
+/**
+ * @brief Get the pixelgen state.
+ * Get the state of the pixelgen regiester-set.
+ *
+ * @param[in] id The global unique ID of the pixelgen controller.
+ * @param[out] state Point to the register-state.
+ */
+STORAGE_CLASS_PIXELGEN_H void pixelgen_ctrl_get_state(
+ const pixelgen_ID_t ID,
+ pixelgen_ctrl_state_t *state);
+/**
+ * @brief Dump the pixelgen state.
+ * Dump the state of the pixelgen regiester-set.
+ *
+ * @param[in] id The global unique ID of the pixelgen controller.
+ * @param[in] state Point to the register-state.
+ */
+STORAGE_CLASS_PIXELGEN_H void pixelgen_ctrl_dump_state(
+ const pixelgen_ID_t ID,
+ pixelgen_ctrl_state_t *state);
+/* end of NCI */
+
+/*****************************************************
+ *
+ * Device level interface (DLI).
+ *
+ *****************************************************/
+/**
+ * @brief Load the register value.
+ * Load the value of the register of the pixelgen
+ *
+ * @param[in] ID The global unique ID for the pixelgen instance.
+ * @param[in] reg The offset address of the register.
+ *
+ * @return the value of the register.
+ */
+STORAGE_CLASS_PIXELGEN_H hrt_data pixelgen_ctrl_reg_load(
+ const pixelgen_ID_t ID,
+ const hrt_address reg);
+/**
+ * @brief Store a value to the register.
+ * Store a value to the registe of the pixelgen
+ *
+ * @param[in] ID The global unique ID for the pixelgen.
+ * @param[in] reg The offset address of the register.
+ * @param[in] value The value to be stored.
+ *
+ */
+STORAGE_CLASS_PIXELGEN_H void pixelgen_ctrl_reg_store(
+ const pixelgen_ID_t ID,
+ const hrt_address reg,
+ const hrt_data value);
+/* end of DLI */
+
+#endif /* __PIXELGEN_PUBLIC_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_include/host/sp_public.h b/drivers/staging/media/atomisp/pci/hive_isp_css_include/host/sp_public.h
new file mode 100644
index 000000000000..15faa52d4ab6
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_include/host/sp_public.h
@@ -0,0 +1,199 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __SP_PUBLIC_H_INCLUDED__
+#define __SP_PUBLIC_H_INCLUDED__
+
+#include <type_support.h>
+#include "system_local.h"
+
+/*! Enable or disable the program complete irq signal of SP[ID]
+
+ \param ID[in] SP identifier
+ \param cnd[in] predicate
+
+ \return none, if(cnd) enable(SP[ID].irq) else disable(SP[ID].irq)
+ */
+void cnd_sp_irq_enable(
+ const sp_ID_t ID,
+ const bool cnd);
+
+/*! Write to the status and control register of SP[ID]
+
+ \param ID[in] SP identifier
+ \param reg[in] register index
+ \param value[in] The data to be written
+
+ \return none, SP[ID].sc[reg] = value
+ */
+STORAGE_CLASS_SP_H void sp_ctrl_store(
+ const sp_ID_t ID,
+ const hrt_address reg,
+ const hrt_data value);
+
+/*! Read from the status and control register of SP[ID]
+
+ \param ID[in] SP identifier
+ \param reg[in] register index
+ \param value[in] The data to be written
+
+ \return SP[ID].sc[reg]
+ */
+STORAGE_CLASS_SP_H hrt_data sp_ctrl_load(
+ const sp_ID_t ID,
+ const hrt_address reg);
+
+/*! Get the status of a bitfield in the control register of SP[ID]
+
+ \param ID[in] SP identifier
+ \param reg[in] register index
+ \param bit[in] The bit index to be checked
+
+ \return (SP[ID].sc[reg] & (1<<bit)) != 0
+ */
+STORAGE_CLASS_SP_H bool sp_ctrl_getbit(
+ const sp_ID_t ID,
+ const hrt_address reg,
+ const unsigned int bit);
+
+/*! Set a bitfield in the control register of SP[ID]
+
+ \param ID[in] SP identifier
+ \param reg[in] register index
+ \param bit[in] The bit index to be set
+
+ \return none, SP[ID].sc[reg] |= (1<<bit)
+ */
+STORAGE_CLASS_SP_H void sp_ctrl_setbit(
+ const sp_ID_t ID,
+ const hrt_address reg,
+ const unsigned int bit);
+
+/*! Clear a bitfield in the control register of SP[ID]
+
+ \param ID[in] SP identifier
+ \param reg[in] register index
+ \param bit[in] The bit index to be set
+
+ \return none, SP[ID].sc[reg] &= ~(1<<bit)
+ */
+STORAGE_CLASS_SP_H void sp_ctrl_clearbit(
+ const sp_ID_t ID,
+ const hrt_address reg,
+ const unsigned int bit);
+
+/*! Write to the DMEM of SP[ID]
+
+ \param ID[in] SP identifier
+ \param addr[in] the address in DMEM
+ \param data[in] The data to be written
+ \param size[in] The size(in bytes) of the data to be written
+
+ \return none, SP[ID].dmem[addr...addr+size-1] = data
+ */
+STORAGE_CLASS_SP_H void sp_dmem_store(
+ const sp_ID_t ID,
+ hrt_address addr,
+ const void *data,
+ const size_t size);
+
+/*! Read from the DMEM of SP[ID]
+
+ \param ID[in] SP identifier
+ \param addr[in] the address in DMEM
+ \param data[in] The data to be read
+ \param size[in] The size(in bytes) of the data to be read
+
+ \return none, data = SP[ID].dmem[addr...addr+size-1]
+ */
+STORAGE_CLASS_SP_H void sp_dmem_load(
+ const sp_ID_t ID,
+ const hrt_address addr,
+ void *data,
+ const size_t size);
+
+/*! Write a 8-bit datum to the DMEM of SP[ID]
+
+ \param ID[in] SP identifier
+ \param addr[in] the address in DMEM
+ \param data[in] The data to be written
+ \param size[in] The size(in bytes) of the data to be written
+
+ \return none, SP[ID].dmem[addr...addr+size-1] = data
+ */
+STORAGE_CLASS_SP_H void sp_dmem_store_uint8(
+ const sp_ID_t ID,
+ hrt_address addr,
+ const uint8_t data);
+
+/*! Write a 16-bit datum to the DMEM of SP[ID]
+
+ \param ID[in] SP identifier
+ \param addr[in] the address in DMEM
+ \param data[in] The data to be written
+ \param size[in] The size(in bytes) of the data to be written
+
+ \return none, SP[ID].dmem[addr...addr+size-1] = data
+ */
+STORAGE_CLASS_SP_H void sp_dmem_store_uint16(
+ const sp_ID_t ID,
+ hrt_address addr,
+ const uint16_t data);
+
+/*! Write a 32-bit datum to the DMEM of SP[ID]
+
+ \param ID[in] SP identifier
+ \param addr[in] the address in DMEM
+ \param data[in] The data to be written
+ \param size[in] The size(in bytes) of the data to be written
+
+ \return none, SP[ID].dmem[addr...addr+size-1] = data
+ */
+STORAGE_CLASS_SP_H void sp_dmem_store_uint32(
+ const sp_ID_t ID,
+ hrt_address addr,
+ const uint32_t data);
+
+/*! Load a 8-bit datum from the DMEM of SP[ID]
+
+ \param ID[in] SP identifier
+ \param addr[in] the address in DMEM
+ \param data[in] The data to be read
+ \param size[in] The size(in bytes) of the data to be read
+
+ \return none, data = SP[ID].dmem[addr...addr+size-1]
+ */
+STORAGE_CLASS_SP_H uint8_t sp_dmem_load_uint8(
+ const sp_ID_t ID,
+ const hrt_address addr);
+
+/*! Load a 16-bit datum from the DMEM of SP[ID]
+
+ \param ID[in] SP identifier
+ \param addr[in] the address in DMEM
+ \param data[in] The data to be read
+ \param size[in] The size(in bytes) of the data to be read
+
+ \return none, data = SP[ID].dmem[addr...addr+size-1]
+ */
+STORAGE_CLASS_SP_H uint16_t sp_dmem_load_uint16(
+ const sp_ID_t ID,
+ const hrt_address addr);
+
+/*! Load a 32-bit datum from the DMEM of SP[ID]
+
+ \param ID[in] SP identifier
+ \param addr[in] the address in DMEM
+ \param data[in] The data to be read
+ \param size[in] The size(in bytes) of the data to be read
+
+ \return none, data = SP[ID].dmem[addr...addr+size-1]
+ */
+STORAGE_CLASS_SP_H uint32_t sp_dmem_load_uint32(
+ const sp_ID_t ID,
+ const hrt_address addr);
+
+#endif /* __SP_PUBLIC_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_include/host/tag_public.h b/drivers/staging/media/atomisp/pci/hive_isp_css_include/host/tag_public.h
new file mode 100644
index 000000000000..ad83ff97bbd6
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_include/host/tag_public.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __TAG_PUBLIC_H_INCLUDED__
+#define __TAG_PUBLIC_H_INCLUDED__
+
+/**
+ * @brief Creates the tag description from the given parameters.
+ * @param[in] num_captures
+ * @param[in] skip
+ * @param[in] offset
+ * @param[out] tag_descr
+ */
+void
+sh_css_create_tag_descr(int num_captures,
+ unsigned int skip,
+ int offset,
+ unsigned int exp_id,
+ struct sh_css_tag_descr *tag_descr);
+
+/**
+ * @brief Encodes the members of tag description into a 32-bit value.
+ * @param[in] tag Pointer to the tag description
+ * @return (unsigned int) Encoded 32-bit tag-info
+ */
+unsigned int
+sh_css_encode_tag_descr(struct sh_css_tag_descr *tag);
+
+#endif /* __TAG_PUBLIC_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_include/host/timed_ctrl_public.h b/drivers/staging/media/atomisp/pci/hive_isp_css_include/host/timed_ctrl_public.h
new file mode 100644
index 000000000000..51e0bc0d0f97
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_include/host/timed_ctrl_public.h
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __TIMED_CTRL_PUBLIC_H_INCLUDED__
+#define __TIMED_CTRL_PUBLIC_H_INCLUDED__
+
+#include "system_local.h"
+
+/*! Write to a control register of TIMED_CTRL[ID]
+
+ \param ID[in] TIMED_CTRL identifier
+ \param reg_addr[in] register byte address
+ \param value[in] The data to be written
+
+ \return none, TIMED_CTRL[ID].ctrl[reg] = value
+ */
+STORAGE_CLASS_TIMED_CTRL_H void timed_ctrl_reg_store(
+ const timed_ctrl_ID_t ID,
+ const unsigned int reg_addr,
+ const hrt_data value);
+
+void timed_ctrl_snd_commnd(
+ const timed_ctrl_ID_t ID,
+ hrt_data mask,
+ hrt_data condition,
+ hrt_data counter,
+ hrt_address addr,
+ hrt_data value);
+
+void timed_ctrl_snd_sp_commnd(
+ const timed_ctrl_ID_t ID,
+ hrt_data mask,
+ hrt_data condition,
+ hrt_data counter,
+ const sp_ID_t SP_ID,
+ hrt_address offset,
+ hrt_data value);
+
+void timed_ctrl_snd_gpio_commnd(
+ const timed_ctrl_ID_t ID,
+ hrt_data mask,
+ hrt_data condition,
+ hrt_data counter,
+ const gpio_ID_t GPIO_ID,
+ hrt_address offset,
+ hrt_data value);
+
+#endif /* __TIMED_CTRL_PUBLIC_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_include/host/vamem_public.h b/drivers/staging/media/atomisp/pci/hive_isp_css_include/host/vamem_public.h
new file mode 100644
index 000000000000..7aede5b00dee
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_include/host/vamem_public.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __VAMEM_PUBLIC_H_INCLUDED__
+#define __VAMEM_PUBLIC_H_INCLUDED__
+
+#endif /* __VAMEM_PUBLIC_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_include/host/vmem_public.h b/drivers/staging/media/atomisp/pci/hive_isp_css_include/host/vmem_public.h
new file mode 100644
index 000000000000..0257079437be
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_include/host/vmem_public.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __VMEM_PUBLIC_H_INCLUDED__
+#define __VMEM_PUBLIC_H_INCLUDED__
+
+#include "isp.h" /* tmemvectoru */
+
+#endif /* __VMEM_PUBLIC_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_include/input_formatter.h b/drivers/staging/media/atomisp/pci/hive_isp_css_include/input_formatter.h
new file mode 100644
index 000000000000..21d9ea26f929
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_include/input_formatter.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __INPUT_FORMATTER_H_INCLUDED__
+#define __INPUT_FORMATTER_H_INCLUDED__
+
+/*
+ * This file is included on every cell {SP,ISP,host} and on every system
+ * that uses the input system device(s). It defines the API to DLI bridge
+ *
+ * System and cell specific interfaces and inline code are included
+ * conditionally through Makefile path settings.
+ *
+ * - . system and cell agnostic interfaces, constants and identifiers
+ * - public: system agnostic, cell specific interfaces
+ * - private: system dependent, cell specific interfaces & inline implementations
+ * - global: system specific constants and identifiers
+ * - local: system and cell specific constants and identifiers
+ */
+
+#include "system_local.h"
+#include "input_formatter_local.h"
+
+#ifndef __INLINE_INPUT_FORMATTER__
+#define STORAGE_CLASS_INPUT_FORMATTER_H extern
+#define STORAGE_CLASS_INPUT_FORMATTER_C
+#include "input_formatter_public.h"
+#else /* __INLINE_INPUT_FORMATTER__ */
+#define STORAGE_CLASS_INPUT_FORMATTER_H static inline
+#define STORAGE_CLASS_INPUT_FORMATTER_C static inline
+#include "input_formatter_private.h"
+#endif /* __INLINE_INPUT_FORMATTER__ */
+
+#endif /* __INPUT_FORMATTER_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_include/input_system.h b/drivers/staging/media/atomisp/pci/hive_isp_css_include/input_system.h
new file mode 100644
index 000000000000..97a698ce83d9
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_include/input_system.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __INPUT_SYSTEM_H_INCLUDED__
+#define __INPUT_SYSTEM_H_INCLUDED__
+
+/*
+ * This file is included on every cell {SP,ISP,host} and on every system
+ * that uses the input system device(s). It defines the API to DLI bridge
+ *
+ * System and cell specific interfaces and inline code are included
+ * conditionally through Makefile path settings.
+ *
+ * - . system and cell agnostic interfaces, constants and identifiers
+ * - public: system agnostic, cell specific interfaces
+ * - private: system dependent, cell specific interfaces & inline implementations
+ * - global: system specific constants and identifiers
+ * - local: system and cell specific constants and identifiers
+ */
+
+#include "system_local.h"
+#include "input_system_local.h"
+
+#ifndef __INLINE_INPUT_SYSTEM__
+#define STORAGE_CLASS_INPUT_SYSTEM_H extern
+#define STORAGE_CLASS_INPUT_SYSTEM_C
+#include "input_system_public.h"
+#else /* __INLINE_INPUT_SYSTEM__ */
+#define STORAGE_CLASS_INPUT_SYSTEM_H static inline
+#define STORAGE_CLASS_INPUT_SYSTEM_C static inline
+#include "input_system_private.h"
+#endif /* __INLINE_INPUT_SYSTEM__ */
+
+#endif /* __INPUT_SYSTEM_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_include/irq.h b/drivers/staging/media/atomisp/pci/hive_isp_css_include/irq.h
new file mode 100644
index 000000000000..0f6c8f081a07
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_include/irq.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __IRQ_H_INCLUDED__
+#define __IRQ_H_INCLUDED__
+
+/*
+ * This file is included on every cell {SP,ISP,host} and on every system
+ * that uses the IRQ device. It defines the API to DLI bridge
+ *
+ * System and cell specific interfaces and inline code are included
+ * conditionally through Makefile path settings.
+ *
+ * - . system and cell agnostic interfaces, constants and identifiers
+ * - public: system agnostic, cell specific interfaces
+ * - private: system dependent, cell specific interfaces & inline implementations
+ * - global: system specific constants and identifiers
+ * - local: system and cell specific constants and identifiers
+ */
+
+#include "system_local.h"
+#include "irq_local.h"
+
+#ifndef __INLINE_IRQ__
+#define STORAGE_CLASS_IRQ_H extern
+#define STORAGE_CLASS_IRQ_C
+#include "irq_public.h"
+#else /* __INLINE_IRQ__ */
+#define STORAGE_CLASS_IRQ_H static inline
+#define STORAGE_CLASS_IRQ_C static inline
+#include "irq_private.h"
+#endif /* __INLINE_IRQ__ */
+
+#endif /* __IRQ_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_include/isp.h b/drivers/staging/media/atomisp/pci/hive_isp_css_include/isp.h
new file mode 100644
index 000000000000..0ffea40ff001
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_include/isp.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __ISP_H_INCLUDED__
+#define __ISP_H_INCLUDED__
+
+/*
+ * This file is included on every cell {SP,ISP,host} and on every system
+ * that uses the ISP cell. It defines the API to DLI bridge
+ *
+ * System and cell specific interfaces and inline code are included
+ * conditionally through Makefile path settings.
+ *
+ * - . system and cell agnostic interfaces, constants and identifiers
+ * - public: system agnostic, cell specific interfaces
+ * - private: system dependent, cell specific interfaces & inline implementations
+ * - global: system specific constants and identifiers
+ * - local: system and cell specific constants and identifiers
+ */
+
+#include "system_local.h"
+#include "isp_local.h"
+
+#ifndef __INLINE_ISP__
+#define STORAGE_CLASS_ISP_H extern
+#define STORAGE_CLASS_ISP_C
+#include "isp_public.h"
+#else /* __INLINE_iSP__ */
+#define STORAGE_CLASS_ISP_H static inline
+#define STORAGE_CLASS_ISP_C static inline
+#include "isp_private.h"
+#endif /* __INLINE_ISP__ */
+
+#endif /* __ISP_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_include/isys_irq.h b/drivers/staging/media/atomisp/pci/hive_isp_css_include/isys_irq.h
new file mode 100644
index 000000000000..a4a8e92df592
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_include/isys_irq.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __IA_CSS_ISYS_IRQ_H__
+#define __IA_CSS_ISYS_IRQ_H__
+
+#include <type_support.h>
+#include <system_local.h>
+
+
+#include "isys_irq_public.h"
+
+
+#endif /* __IA_CSS_ISYS_IRQ_H__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_include/isys_stream2mmio.h b/drivers/staging/media/atomisp/pci/hive_isp_css_include/isys_stream2mmio.h
new file mode 100644
index 000000000000..e88710c65cb9
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_include/isys_stream2mmio.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __ISYS_STREAM2MMIO_H_INCLUDED__
+#define __ISYS_STREAM2MMIO_H_INCLUDED__
+
+/*
+ * This file is included on every cell {SP,ISP,host} and on every system
+ * that uses the input system device(s). It defines the API to DLI bridge
+ *
+ * System and cell specific interfaces and inline code are included
+ * conditionally through Makefile path settings.
+ *
+ * - system and cell agnostic interfaces, constants and identifiers
+ * - public: system agnostic, cell specific interfaces
+ * - private: system dependent, cell specific interfaces &
+ * inline implementations
+ * - global: system specific constants and identifiers
+ * - local: system and cell specific constants and identifiers
+ */
+
+#include "system_local.h"
+#include "isys_stream2mmio_local.h"
+
+#ifndef __INLINE_STREAM2MMIO__
+#define STORAGE_CLASS_STREAM2MMIO_H extern
+#define STORAGE_CLASS_STREAM2MMIO_C
+#include "isys_stream2mmio_public.h"
+#else /* __INLINE_STREAM2MMIO__ */
+#define STORAGE_CLASS_STREAM2MMIO_H static inline
+#define STORAGE_CLASS_STREAM2MMIO_C static inline
+#include "isys_stream2mmio_private.h"
+#endif /* __INLINE_STREAM2MMIO__ */
+
+#endif /* __ISYS_STREAM2MMIO_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_include/math_support.h b/drivers/staging/media/atomisp/pci/hive_isp_css_include/math_support.h
new file mode 100644
index 000000000000..2cb5c986790a
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_include/math_support.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __MATH_SUPPORT_H
+#define __MATH_SUPPORT_H
+
+/* Override the definition of max/min from Linux kernel */
+#include <linux/minmax.h>
+
+#define CEIL_DIV(a, b) (((b) != 0) ? ((a) + (b) - 1) / (b) : 0)
+#define CEIL_MUL(a, b) (CEIL_DIV(a, b) * (b))
+#define CEIL_SHIFT(a, b) (((a) + (1 << (b)) - 1) >> (b))
+
+/*
+ * For SP and ISP, SDK provides the definition of OP_std_modadd.
+ * We need it only for host
+ */
+#define OP_std_modadd(base, offset, size) ((base + offset) % (size))
+
+#endif /* __MATH_SUPPORT_H */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_include/misc_support.h b/drivers/staging/media/atomisp/pci/hive_isp_css_include/misc_support.h
new file mode 100644
index 000000000000..f8c5a88ae1ad
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_include/misc_support.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __MISC_SUPPORT_H_INCLUDED__
+#define __MISC_SUPPORT_H_INCLUDED__
+
+/* suppress compiler warnings on unused variables */
+#ifndef NOT_USED
+#define NOT_USED(a) ((void)(a))
+#endif
+
+/* Calculate the total bytes for pow(2) byte alignment */
+#define tot_bytes_for_pow2_align(pow2, cur_bytes) ((cur_bytes + (pow2 - 1)) & ~(pow2 - 1))
+
+#endif /* __MISC_SUPPORT_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_include/mmu_device.h b/drivers/staging/media/atomisp/pci/hive_isp_css_include/mmu_device.h
new file mode 100644
index 000000000000..761909efac44
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_include/mmu_device.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __MMU_DEVICE_H_INCLUDED__
+#define __MMU_DEVICE_H_INCLUDED__
+
+/* The file mmu.h already exists */
+
+/*
+ * This file is included on every cell {SP,ISP,host} and on every system
+ * that uses the MMU device. It defines the API to DLI bridge
+ *
+ * System and cell specific interfaces and inline code are included
+ * conditionally through Makefile path settings.
+ *
+ * - . system and cell agnostic interfaces, constants and identifiers
+ * - public: system agnostic, cell specific interfaces
+ * - private: system dependent, cell specific interfaces & inline implementations
+ * - global: system specific constants and identifiers
+ * - local: system and cell specific constants and identifiers
+ */
+
+#include "system_local.h"
+#include "mmu_local.h"
+
+#include "mmu_public.h"
+
+#endif /* __MMU_DEVICE_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_include/pixelgen.h b/drivers/staging/media/atomisp/pci/hive_isp_css_include/pixelgen.h
new file mode 100644
index 000000000000..3215098ee60a
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_include/pixelgen.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __PIXELGEN_H_INCLUDED__
+#define __PIXELGEN_H_INCLUDED__
+
+/*
+ * This file is included on every cell {SP,ISP,host} and on every system
+ * that uses the input system device(s). It defines the API to DLI bridge
+ *
+ * System and cell specific interfaces and inline code are included
+ * conditionally through Makefile path settings.
+ *
+ * - system and cell agnostic interfaces, constants and identifiers
+ * - public: system agnostic, cell specific interfaces
+ * - private: system dependent, cell specific interfaces &
+ * inline implementations
+ * - global: system specific constants and identifiers
+ * - local: system and cell specific constants and identifiers
+ */
+
+#include "system_local.h"
+#include "pixelgen_local.h"
+
+#ifndef __INLINE_PIXELGEN__
+#define STORAGE_CLASS_PIXELGEN_H extern
+#define STORAGE_CLASS_PIXELGEN_C
+#include "pixelgen_public.h"
+#else /* __INLINE_PIXELGEN__ */
+#define STORAGE_CLASS_PIXELGEN_H static inline
+#define STORAGE_CLASS_PIXELGEN_C static inline
+#include "pixelgen_private.h"
+#endif /* __INLINE_PIXELGEN__ */
+
+#endif /* __PIXELGEN_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_include/platform_support.h b/drivers/staging/media/atomisp/pci/hive_isp_css_include/platform_support.h
new file mode 100644
index 000000000000..473d8d4fb9ba
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_include/platform_support.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __PLATFORM_SUPPORT_H_INCLUDED__
+#define __PLATFORM_SUPPORT_H_INCLUDED__
+
+/**
+* @file
+* Platform specific includes and functionality.
+*/
+
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+
+#define UINT16_MAX USHRT_MAX
+#define UINT32_MAX UINT_MAX
+#define UCHAR_MAX (255)
+
+#define CSS_ALIGN(d, a) d __attribute__((aligned(a)))
+
+#endif /* __PLATFORM_SUPPORT_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_include/print_support.h b/drivers/staging/media/atomisp/pci/hive_isp_css_include/print_support.h
new file mode 100644
index 000000000000..e6ce7b51d341
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_include/print_support.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __PRINT_SUPPORT_H_INCLUDED__
+#define __PRINT_SUPPORT_H_INCLUDED__
+
+#include <linux/stdarg.h>
+
+extern int (*sh_css_printf)(const char *fmt, va_list args);
+/* depends on host supplied print function in ia_css_init() */
+static inline __printf(1, 2) void ia_css_print(const char *fmt, ...)
+{
+ va_list ap;
+
+ if (sh_css_printf) {
+ va_start(ap, fmt);
+ sh_css_printf(fmt, ap);
+ va_end(ap);
+ }
+}
+
+/* Start adding support for bxt tracing functions for poc. From
+ * bxt_sandbox/support/print_support.h. */
+/* TODO: support these macros in userspace. */
+#define PWARN(format, ...) ia_css_print("warning: ", ##__VA_ARGS__)
+#define PRINT(format, ...) ia_css_print(format, ##__VA_ARGS__)
+#define PERROR(format, ...) ia_css_print("error: " format, ##__VA_ARGS__)
+#define PDEBUG(format, ...) ia_css_print("debug: " format, ##__VA_ARGS__)
+
+#endif /* __PRINT_SUPPORT_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_include/queue.h b/drivers/staging/media/atomisp/pci/hive_isp_css_include/queue.h
new file mode 100644
index 000000000000..d778bcb56646
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_include/queue.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __QUEUE_H_INCLUDED__
+#define __QUEUE_H_INCLUDED__
+
+/*
+ * This file is included on every cell {SP,ISP,host} and is system agnostic
+ *
+ * System and cell specific interfaces and inline code are included
+ * conditionally through Makefile path settings.
+ *
+ * - system and cell agnostic interfaces, constants and identifiers
+ * - public: cell specific interfaces
+ * - private: cell specific inline implementations
+ * - global: inter cell constants and identifiers
+ * - local: cell specific constants and identifiers
+ *
+ */
+
+#include "queue_local.h"
+
+#ifndef __INLINE_QUEUE__
+#define STORAGE_CLASS_QUEUE_H extern
+#define STORAGE_CLASS_QUEUE_C
+/* #include "queue_public.h" */
+#include "ia_css_queue.h"
+#else /* __INLINE_QUEUE__ */
+#define STORAGE_CLASS_QUEUE_H static inline
+#define STORAGE_CLASS_QUEUE_C static inline
+#include "queue_private.h"
+#endif /* __INLINE_QUEUE__ */
+
+#endif /* __QUEUE_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_include/resource.h b/drivers/staging/media/atomisp/pci/hive_isp_css_include/resource.h
new file mode 100644
index 000000000000..c7a92b530f89
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_include/resource.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __RESOURCE_H_INCLUDED__
+#define __RESOURCE_H_INCLUDED__
+
+/*
+ * This file is included on every cell {SP,ISP,host} and on every system
+ * that uses a RESOURCE manager. It defines the API to DLI bridge
+ *
+ * System and cell specific interfaces and inline code are included
+ * conditionally through Makefile path settings.
+ *
+ * - . system and cell agnostic interfaces, constants and identifiers
+ * - public: system agnostic, cell specific interfaces
+ * - private: system dependent, cell specific interfaces & inline implementations
+ * - global: system specific constants and identifiers
+ * - local: system and cell specific constants and identifiers
+ *
+ */
+
+#include "system_local.h"
+#include "resource_local.h"
+
+#ifndef __INLINE_RESOURCE__
+#define STORAGE_CLASS_RESOURCE_H extern
+#define STORAGE_CLASS_RESOURCE_C
+#include "resource_public.h"
+#else /* __INLINE_RESOURCE__ */
+#define STORAGE_CLASS_RESOURCE_H static inline
+#define STORAGE_CLASS_RESOURCE_C static inline
+#include "resource_private.h"
+#endif /* __INLINE_RESOURCE__ */
+
+#endif /* __RESOURCE_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_include/sp.h b/drivers/staging/media/atomisp/pci/hive_isp_css_include/sp.h
new file mode 100644
index 000000000000..92724ae59e6b
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_include/sp.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __SP_H_INCLUDED__
+#define __SP_H_INCLUDED__
+
+/*
+ * This file is included on every cell {SP,ISP,host} and on every system
+ * that uses the SP cell. It defines the API to DLI bridge
+ *
+ * System and cell specific interfaces and inline code are included
+ * conditionally through Makefile path settings.
+ *
+ * - . system and cell agnostic interfaces, constants and identifiers
+ * - public: system agnostic, cell specific interfaces
+ * - private: system dependent, cell specific interfaces & inline implementations
+ * - global: system specific constants and identifiers
+ * - local: system and cell specific constants and identifiers
+ */
+
+#include "system_local.h"
+#include "sp_local.h"
+
+#ifndef __INLINE_SP__
+#define STORAGE_CLASS_SP_H extern
+#define STORAGE_CLASS_SP_C
+#include "sp_public.h"
+#else /* __INLINE_SP__ */
+#define STORAGE_CLASS_SP_H static inline
+#define STORAGE_CLASS_SP_C static inline
+#include "sp_private.h"
+#endif /* __INLINE_SP__ */
+
+#endif /* __SP_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_include/tag.h b/drivers/staging/media/atomisp/pci/hive_isp_css_include/tag.h
new file mode 100644
index 000000000000..b335222c7334
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_include/tag.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __TAG_H_INCLUDED__
+#define __TAG_H_INCLUDED__
+
+/*
+ * This file is included on every cell {SP,ISP,host} and is system agnostic
+ *
+ * System and cell specific interfaces and inline code are included
+ * conditionally through Makefile path settings.
+ *
+ * - . system and cell agnostic interfaces, constants and identifiers
+ * - public: cell specific interfaces
+ * - private: cell specific inline implementations
+ * - global: inter cell constants and identifiers
+ * - local: cell specific constants and identifiers
+ *
+ */
+
+#include "tag_local.h"
+
+#ifndef __INLINE_TAG__
+#define STORAGE_CLASS_TAG_H extern
+#define STORAGE_CLASS_TAG_C
+#include "tag_public.h"
+#else /* __INLINE_TAG__ */
+#define STORAGE_CLASS_TAG_H static inline
+#define STORAGE_CLASS_TAG_C static inline
+#include "tag_private.h"
+#endif /* __INLINE_TAG__ */
+
+#endif /* __TAG_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_include/timed_ctrl.h b/drivers/staging/media/atomisp/pci/hive_isp_css_include/timed_ctrl.h
new file mode 100644
index 000000000000..b8f48b3c9f1e
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_include/timed_ctrl.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __TIMED_CTRL_H_INCLUDED__
+#define __TIMED_CTRL_H_INCLUDED__
+
+/*
+ * This file is included on every cell {SP,ISP,host} and on every system
+ * that uses the input system device(s). It defines the API to DLI bridge
+ *
+ * System and cell specific interfaces and inline code are included
+ * conditionally through Makefile path settings.
+ *
+ * - . system and cell agnostic interfaces, constants and identifiers
+ * - public: system agnostic, cell specific interfaces
+ * - private: system dependent, cell specific interfaces & inline implementations
+ * - global: system specific constants and identifiers
+ * - local: system and cell specific constants and identifiers
+ */
+
+#include "system_local.h"
+#include "timed_ctrl_local.h"
+
+#ifndef __INLINE_TIMED_CTRL__
+#define STORAGE_CLASS_TIMED_CTRL_H extern
+#define STORAGE_CLASS_TIMED_CTRL_C
+#include "timed_ctrl_public.h"
+#else /* __INLINE_TIMED_CTRL__ */
+#define STORAGE_CLASS_TIMED_CTRL_H static inline
+#define STORAGE_CLASS_TIMED_CTRL_C static inline
+#include "timed_ctrl_private.h"
+#endif /* __INLINE_TIMED_CTRL__ */
+
+#endif /* __TIMED_CTRL_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_include/type_support.h b/drivers/staging/media/atomisp/pci/hive_isp_css_include/type_support.h
new file mode 100644
index 000000000000..097be6bd3cb5
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_include/type_support.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __TYPE_SUPPORT_H_INCLUDED__
+#define __TYPE_SUPPORT_H_INCLUDED__
+
+/**
+* @file
+* Platform specific types.
+*
+* Per the DLI spec, types are in "type_support.h" and
+* "platform_support.h" is for unclassified/to be refactored
+* platform specific definitions.
+*/
+
+#define IA_CSS_UINT8_T_BITS 8
+#define IA_CSS_UINT16_T_BITS 16
+#define IA_CSS_UINT32_T_BITS 32
+#define IA_CSS_INT32_T_BITS 32
+#define IA_CSS_UINT64_T_BITS 64
+
+#define CHAR_BIT (8)
+
+#include <linux/errno.h>
+#include <linux/limits.h>
+#include <linux/types.h>
+
+#define HOST_ADDRESS(x) (unsigned long)(x)
+
+#endif /* __TYPE_SUPPORT_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_include/vamem.h b/drivers/staging/media/atomisp/pci/hive_isp_css_include/vamem.h
new file mode 100644
index 000000000000..0c3328074571
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_include/vamem.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __VAMEM_H_INCLUDED__
+#define __VAMEM_H_INCLUDED__
+
+/*
+ * This file is included on every cell {SP,ISP,host} and on every system
+ * that uses the VAMEM device. It defines the API to DLI bridge
+ *
+ * System and cell specific interfaces and inline code are included
+ * conditionally through Makefile path settings.
+ *
+ * - . system and cell agnostic interfaces, constants and identifiers
+ * - public: system agnostic, cell specific interfaces
+ * - private: system dependent, cell specific interfaces & inline implementations
+ * - global: system specific constants and identifiers
+ * - local: system and cell specific constants and identifiers
+ */
+
+#include "system_local.h"
+#include "vamem_local.h"
+#include "vamem_public.h"
+
+#endif /* __VAMEM_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_include/vmem.h b/drivers/staging/media/atomisp/pci/hive_isp_css_include/vmem.h
new file mode 100644
index 000000000000..8e8d187d1221
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_include/vmem.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __VMEM_H_INCLUDED__
+#define __VMEM_H_INCLUDED__
+
+/*
+ * This file is included on every cell {SP,ISP,host} and on every system
+ * that uses the VMEM device. It defines the API to DLI bridge
+ *
+ * System and cell specific interfaces and inline code are included
+ * conditionally through Makefile path settings.
+ *
+ * - . system and cell agnostic interfaces, constants and identifiers
+ * - public: system agnostic, cell specific interfaces
+ * - private: system dependent, cell specific interfaces & inline implementations
+ * - global: system specific constants and identifiers
+ * - local: system and cell specific constants and identifiers
+ */
+
+#include "system_local.h"
+#include "vmem_local.h"
+
+#ifndef __INLINE_VMEM__
+#define STORAGE_CLASS_VMEM_H extern
+#define STORAGE_CLASS_VMEM_C
+#include "vmem_public.h"
+#else /* __INLINE_VMEM__ */
+#define STORAGE_CLASS_VMEM_H static inline
+#define STORAGE_CLASS_VMEM_C static inline
+#include "vmem_private.h"
+#endif /* __INLINE_VMEM__ */
+
+#endif /* __VMEM_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_shared/host/queue_local.h b/drivers/staging/media/atomisp/pci/hive_isp_css_shared/host/queue_local.h
new file mode 100644
index 000000000000..fe10cad60f5c
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_shared/host/queue_local.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __QUEUE_LOCAL_H_INCLUDED__
+#define __QUEUE_LOCAL_H_INCLUDED__
+
+#include "queue_global.h"
+
+#endif /* __QUEUE_LOCAL_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_shared/host/queue_private.h b/drivers/staging/media/atomisp/pci/hive_isp_css_shared/host/queue_private.h
new file mode 100644
index 000000000000..e0a8d8493d45
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_shared/host/queue_private.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __QUEUE_PRIVATE_H_INCLUDED__
+#define __QUEUE_PRIVATE_H_INCLUDED__
+
+#endif /* __QUEUE_PRIVATE_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_shared/host/tag.c b/drivers/staging/media/atomisp/pci/hive_isp_css_shared/host/tag.c
new file mode 100644
index 000000000000..c68b096444de
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_shared/host/tag.c
@@ -0,0 +1,83 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#include "tag.h"
+#include <platform_support.h> /* NULL */
+#include <assert_support.h>
+#include "tag_local.h"
+
+/*
+ * @brief Creates the tag description from the given parameters.
+ * @param[in] num_captures
+ * @param[in] skip
+ * @param[in] offset
+ * @param[out] tag_descr
+ */
+void
+sh_css_create_tag_descr(int num_captures,
+ unsigned int skip,
+ int offset,
+ unsigned int exp_id,
+ struct sh_css_tag_descr *tag_descr)
+{
+ assert(tag_descr);
+
+ tag_descr->num_captures = num_captures;
+ tag_descr->skip = skip;
+ tag_descr->offset = offset;
+ tag_descr->exp_id = exp_id;
+}
+
+/*
+ * @brief Encodes the members of tag description into a 32-bit value.
+ * @param[in] tag Pointer to the tag description
+ * @return (unsigned int) Encoded 32-bit tag-info
+ */
+unsigned int
+sh_css_encode_tag_descr(struct sh_css_tag_descr *tag)
+{
+ int num_captures;
+ unsigned int num_captures_sign;
+ unsigned int skip;
+ int offset;
+ unsigned int offset_sign;
+ unsigned int exp_id;
+ unsigned int encoded_tag;
+
+ assert(tag);
+
+ if (tag->num_captures < 0) {
+ num_captures = -tag->num_captures;
+ num_captures_sign = 1;
+ } else {
+ num_captures = tag->num_captures;
+ num_captures_sign = 0;
+ }
+ skip = tag->skip;
+ if (tag->offset < 0) {
+ offset = -tag->offset;
+ offset_sign = 1;
+ } else {
+ offset = tag->offset;
+ offset_sign = 0;
+ }
+ exp_id = tag->exp_id;
+
+ if (exp_id != 0) {
+ /* we encode either an exp_id or capture data */
+ assert((num_captures == 0) && (skip == 0) && (offset == 0));
+
+ encoded_tag = TAG_EXP | (exp_id & 0xFF) << TAG_EXP_ID_SHIFT;
+ } else {
+ encoded_tag = TAG_CAP
+ | ((num_captures_sign & 0x00000001) << TAG_NUM_CAPTURES_SIGN_SHIFT)
+ | ((offset_sign & 0x00000001) << TAG_OFFSET_SIGN_SHIFT)
+ | ((num_captures & 0x000000FF) << TAG_NUM_CAPTURES_SHIFT)
+ | ((skip & 0x000000FF) << TAG_OFFSET_SHIFT)
+ | ((offset & 0x000000FF) << TAG_SKIP_SHIFT);
+ }
+ return encoded_tag;
+}
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_shared/host/tag_local.h b/drivers/staging/media/atomisp/pci/hive_isp_css_shared/host/tag_local.h
new file mode 100644
index 000000000000..138b55be5d9d
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_shared/host/tag_local.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __TAG_LOCAL_H_INCLUDED__
+#define __TAG_LOCAL_H_INCLUDED__
+
+#include "tag_global.h"
+
+#define SH_CSS_MINIMUM_TAG_ID (-1)
+
+#endif /* __TAG_LOCAL_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_shared/host/tag_private.h b/drivers/staging/media/atomisp/pci/hive_isp_css_shared/host/tag_private.h
new file mode 100644
index 000000000000..bc98fcab9a55
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_shared/host/tag_private.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __TAG_PRIVATE_H_INCLUDED__
+#define __TAG_PRIVATE_H_INCLUDED__
+
+#endif /* __TAG_PRIVATE_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_shared/queue_global.h b/drivers/staging/media/atomisp/pci/hive_isp_css_shared/queue_global.h
new file mode 100644
index 000000000000..f133348f4c04
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_shared/queue_global.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __QUEUE_GLOBAL_H_INCLUDED__
+#define __QUEUE_GLOBAL_H_INCLUDED__
+
+#endif /* __QUEUE_GLOBAL_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_shared/sw_event_global.h b/drivers/staging/media/atomisp/pci/hive_isp_css_shared/sw_event_global.h
new file mode 100644
index 000000000000..c9ee03d60498
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_shared/sw_event_global.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __SW_EVENT_GLOBAL_H_INCLUDED__
+#define __SW_EVENT_GLOBAL_H_INCLUDED__
+
+#define MAX_NR_OF_PAYLOADS_PER_SW_EVENT 4
+
+enum ia_css_psys_sw_event {
+ IA_CSS_PSYS_SW_EVENT_BUFFER_ENQUEUED, /* from host to SP */
+ IA_CSS_PSYS_SW_EVENT_BUFFER_DEQUEUED, /* from SP to host */
+ IA_CSS_PSYS_SW_EVENT_EVENT_DEQUEUED, /* from SP to host, one way only */
+ IA_CSS_PSYS_SW_EVENT_START_STREAM,
+ IA_CSS_PSYS_SW_EVENT_STOP_STREAM,
+ IA_CSS_PSYS_SW_EVENT_MIPI_BUFFERS_READY,
+ IA_CSS_PSYS_SW_EVENT_UNLOCK_RAW_BUFFER,
+ IA_CSS_PSYS_SW_EVENT_STAGE_ENABLE_DISABLE /* for extension state change enable/disable */
+};
+
+enum ia_css_isys_sw_event {
+ IA_CSS_ISYS_SW_EVENT_EVENT_DEQUEUED
+};
+
+#endif /* __SW_EVENT_GLOBAL_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_shared/tag_global.h b/drivers/staging/media/atomisp/pci/hive_isp_css_shared/tag_global.h
new file mode 100644
index 000000000000..7aef49429f98
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_shared/tag_global.h
@@ -0,0 +1,48 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __TAG_GLOBAL_H_INCLUDED__
+#define __TAG_GLOBAL_H_INCLUDED__
+
+/* offsets for encoding/decoding the tag into an uint32_t */
+
+#define TAG_CAP 1
+#define TAG_EXP 2
+
+#define TAG_NUM_CAPTURES_SIGN_SHIFT 6
+#define TAG_OFFSET_SIGN_SHIFT 7
+#define TAG_NUM_CAPTURES_SHIFT 8
+#define TAG_OFFSET_SHIFT 16
+#define TAG_SKIP_SHIFT 24
+
+#define TAG_EXP_ID_SHIFT 8
+
+/* Data structure containing the tagging information which is used in
+ * continuous mode to specify which frames should be captured.
+ * num_captures The number of RAW frames to be processed to
+ * YUV. Setting this to -1 will make continuous
+ * capture run until it is stopped.
+ * skip Skip N frames in between captures. This can be
+ * used to select a slower capture frame rate than
+ * the sensor output frame rate.
+ * offset Start the RAW-to-YUV processing at RAW buffer
+ * with this offset. This allows the user to
+ * process RAW frames that were captured in the
+ * past or future.
+ * exp_id Exposure id of the RAW frame to tag.
+ *
+ * NOTE: Either exp_id = 0 or all other fields are 0
+ * (so yeah, this could be a union)
+ */
+
+struct sh_css_tag_descr {
+ int num_captures;
+ unsigned int skip;
+ int offset;
+ unsigned int exp_id;
+};
+
+#endif /* __TAG_GLOBAL_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_streaming_to_mipi_types_hrt.h b/drivers/staging/media/atomisp/pci/hive_isp_css_streaming_to_mipi_types_hrt.h
new file mode 100644
index 000000000000..7579b1ec2596
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_streaming_to_mipi_types_hrt.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef _hive_isp_css_streaming_to_mipi_types_hrt_h_
+#define _hive_isp_css_streaming_to_mipi_types_hrt_h_
+
+#include <streaming_to_mipi_defs.h>
+
+#define _HIVE_ISP_CH_ID_MASK ((1U << HIVE_ISP_CH_ID_BITS) - 1)
+#define _HIVE_ISP_FMT_TYPE_MASK ((1U << HIVE_ISP_FMT_TYPE_BITS) - 1)
+
+#define _HIVE_STR_TO_MIPI_FMT_TYPE_LSB (HIVE_STR_TO_MIPI_CH_ID_LSB + HIVE_ISP_CH_ID_BITS)
+#define _HIVE_STR_TO_MIPI_DATA_B_LSB (HIVE_STR_TO_MIPI_DATA_A_LSB + HIVE_IF_PIXEL_WIDTH)
+
+#endif /* _hive_isp_css_streaming_to_mipi_types_hrt_h_ */
diff --git a/drivers/staging/media/atomisp/pci/hive_types.h b/drivers/staging/media/atomisp/pci/hive_types.h
new file mode 100644
index 000000000000..c5c5ce3f2228
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hive_types.h
@@ -0,0 +1,80 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef _HRT_HIVE_TYPES_H
+#define _HRT_HIVE_TYPES_H
+
+#include "version.h"
+
+/* boolean data type */
+typedef unsigned int hive_bool;
+#define hive_false 0
+#define hive_true 1
+
+typedef signed char hive_int8;
+typedef short hive_int16;
+typedef int hive_int32;
+typedef long long hive_int64;
+
+typedef unsigned char hive_uint8;
+typedef unsigned short hive_uint16;
+typedef unsigned int hive_uint32;
+typedef unsigned long long hive_uint64;
+
+#define HRT_DATA_WIDTH 32
+#define HRT_ADDRESS_WIDTH 64
+#define HRT_DATA_BYTES (HRT_DATA_WIDTH / 8)
+#define HRT_ADDRESS_BYTES (HRT_ADDRESS_WIDTH / 8)
+#define SIZEOF_HRT_REG (HRT_DATA_WIDTH >> 3)
+
+typedef hive_uint32 hrt_data;
+typedef hive_uint64 hrt_address;
+
+/* use 64 bit addresses in simulation, where possible */
+typedef hive_uint64 hive_sim_address;
+
+/* below is for csim, not for hrt, rename and move this elsewhere */
+
+typedef unsigned int hive_uint;
+typedef hive_uint32 hive_address;
+typedef hive_address hive_slave_address;
+typedef hive_address hive_mem_address;
+
+/* MMIO devices */
+typedef hive_uint hive_mmio_id;
+typedef hive_mmio_id hive_slave_id;
+typedef hive_mmio_id hive_port_id;
+typedef hive_mmio_id hive_master_id;
+typedef hive_mmio_id hive_mem_id;
+typedef hive_mmio_id hive_dev_id;
+typedef hive_mmio_id hive_fifo_id;
+
+typedef hive_uint hive_hier_id;
+typedef hive_hier_id hive_device_id;
+typedef hive_device_id hive_proc_id;
+typedef hive_device_id hive_cell_id;
+typedef hive_device_id hive_host_id;
+typedef hive_device_id hive_bus_id;
+typedef hive_device_id hive_bridge_id;
+typedef hive_device_id hive_fifo_adapter_id;
+typedef hive_device_id hive_custom_device_id;
+
+typedef hive_uint hive_slot_id;
+typedef hive_uint hive_fu_id;
+typedef hive_uint hive_reg_file_id;
+typedef hive_uint hive_reg_id;
+
+/* Streaming devices */
+typedef hive_uint hive_outport_id;
+typedef hive_uint hive_inport_id;
+
+typedef hive_uint hive_msink_id;
+
+/* HRT specific */
+typedef char *hive_program;
+typedef char *hive_function;
+
+#endif /* _HRT_HIVE_TYPES_H */
diff --git a/drivers/staging/media/atomisp/pci/hmm/hmm.c b/drivers/staging/media/atomisp/pci/hmm/hmm.c
new file mode 100644
index 000000000000..f998b57f90c4
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hmm/hmm.c
@@ -0,0 +1,499 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Support for Medifield PNW Camera Imaging ISP subsystem.
+ *
+ * Copyright (c) 2010-2017 Intel Corporation. All Rights Reserved.
+ *
+ * Copyright (c) 2010 Silicon Hive www.siliconhive.com.
+ */
+/*
+ * This file contains entry functions for memory management of ISP driver
+ */
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/mm.h>
+#include <linux/highmem.h> /* for kmap */
+#include <linux/io.h> /* for page_to_phys */
+#include <linux/sysfs.h>
+
+#include "hmm/hmm.h"
+#include "hmm/hmm_bo.h"
+
+#include "atomisp_internal.h"
+#include "asm/cacheflush.h"
+#include "mmu/isp_mmu.h"
+#include "mmu/sh_mmu_mrfld.h"
+
+struct hmm_bo_device bo_device;
+static ia_css_ptr dummy_ptr = mmgr_EXCEPTION;
+static bool hmm_initialized;
+
+int hmm_init(void)
+{
+ int ret;
+
+ ret = hmm_bo_device_init(&bo_device, &sh_mmu_mrfld,
+ ISP_VM_START, ISP_VM_SIZE);
+ if (ret)
+ dev_err(atomisp_dev, "hmm_bo_device_init failed.\n");
+
+ hmm_initialized = true;
+
+ /*
+ * As hmm use NULL to indicate invalid ISP virtual address,
+ * and ISP_VM_START is defined to 0 too, so we allocate
+ * one piece of dummy memory, which should return value 0,
+ * at the beginning, to avoid hmm_alloc return 0 in the
+ * further allocation.
+ */
+ dummy_ptr = hmm_alloc(1);
+
+ return ret;
+}
+
+void hmm_cleanup(void)
+{
+ if (dummy_ptr == mmgr_EXCEPTION)
+ return;
+
+ /* free dummy memory first */
+ hmm_free(dummy_ptr);
+ dummy_ptr = 0;
+
+ hmm_bo_device_exit(&bo_device);
+ hmm_initialized = false;
+}
+
+static ia_css_ptr __hmm_alloc(size_t bytes, enum hmm_bo_type type,
+ void *vmalloc_addr)
+{
+ unsigned int pgnr;
+ struct hmm_buffer_object *bo;
+ int ret;
+
+ /*
+ * Check if we are initialized. In the ideal world we wouldn't need
+ * this but we can tackle it once the driver is a lot cleaner
+ */
+
+ if (!hmm_initialized)
+ hmm_init();
+ /* Get page number from size */
+ pgnr = size_to_pgnr_ceil(bytes);
+
+ /* Buffer object structure init */
+ bo = hmm_bo_alloc(&bo_device, pgnr);
+ if (!bo) {
+ dev_err(atomisp_dev, "hmm_bo_create failed.\n");
+ goto create_bo_err;
+ }
+
+ /* Allocate pages for memory */
+ ret = hmm_bo_alloc_pages(bo, type, vmalloc_addr);
+ if (ret) {
+ dev_err(atomisp_dev, "hmm_bo_alloc_pages failed.\n");
+ goto alloc_page_err;
+ }
+
+ /* Combine the virtual address and pages together */
+ ret = hmm_bo_bind(bo);
+ if (ret) {
+ dev_err(atomisp_dev, "hmm_bo_bind failed.\n");
+ goto bind_err;
+ }
+
+ return bo->start;
+
+bind_err:
+ hmm_bo_free_pages(bo);
+alloc_page_err:
+ hmm_bo_unref(bo);
+create_bo_err:
+ return 0;
+}
+
+ia_css_ptr hmm_alloc(size_t bytes)
+{
+ return __hmm_alloc(bytes, HMM_BO_PRIVATE, NULL);
+}
+
+ia_css_ptr hmm_create_from_vmalloc_buf(size_t bytes, void *vmalloc_addr)
+{
+ return __hmm_alloc(bytes, HMM_BO_VMALLOC, vmalloc_addr);
+}
+
+void hmm_free(ia_css_ptr virt)
+{
+ struct hmm_buffer_object *bo;
+
+ if (WARN_ON(virt == mmgr_EXCEPTION))
+ return;
+
+ bo = hmm_bo_device_search_start(&bo_device, (unsigned int)virt);
+
+ if (!bo) {
+ dev_err(atomisp_dev,
+ "can not find buffer object start with address 0x%x\n",
+ (unsigned int)virt);
+ return;
+ }
+
+ hmm_bo_unbind(bo);
+ hmm_bo_free_pages(bo);
+ hmm_bo_unref(bo);
+}
+
+static inline int hmm_check_bo(struct hmm_buffer_object *bo, unsigned int ptr)
+{
+ if (!bo) {
+ dev_err(atomisp_dev,
+ "can not find buffer object contains address 0x%x\n",
+ ptr);
+ return -EINVAL;
+ }
+
+ if (!hmm_bo_page_allocated(bo)) {
+ dev_err(atomisp_dev,
+ "buffer object has no page allocated.\n");
+ return -EINVAL;
+ }
+
+ if (!hmm_bo_allocated(bo)) {
+ dev_err(atomisp_dev,
+ "buffer object has no virtual address space allocated.\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/* Read function in ISP memory management */
+static int load_and_flush_by_kmap(ia_css_ptr virt, void *data,
+ unsigned int bytes)
+{
+ struct hmm_buffer_object *bo;
+ unsigned int idx, offset, len;
+ char *src, *des;
+ int ret;
+
+ bo = hmm_bo_device_search_in_range(&bo_device, virt);
+ ret = hmm_check_bo(bo, virt);
+ if (ret)
+ return ret;
+
+ des = (char *)data;
+ while (bytes) {
+ idx = (virt - bo->start) >> PAGE_SHIFT;
+ offset = (virt - bo->start) - (idx << PAGE_SHIFT);
+
+ src = (char *)kmap_local_page(bo->pages[idx]) + offset;
+
+ if ((bytes + offset) >= PAGE_SIZE) {
+ len = PAGE_SIZE - offset;
+ bytes -= len;
+ } else {
+ len = bytes;
+ bytes = 0;
+ }
+
+ virt += len; /* update virt for next loop */
+
+ if (des) {
+ memcpy(des, src, len);
+ des += len;
+ }
+
+ clflush_cache_range(src, len);
+
+ kunmap_local(src);
+ }
+
+ return 0;
+}
+
+/* Read function in ISP memory management */
+static int load_and_flush(ia_css_ptr virt, void *data, unsigned int bytes)
+{
+ struct hmm_buffer_object *bo;
+ int ret;
+
+ bo = hmm_bo_device_search_in_range(&bo_device, virt);
+ ret = hmm_check_bo(bo, virt);
+ if (ret)
+ return ret;
+
+ if (bo->status & HMM_BO_VMAPED || bo->status & HMM_BO_VMAPED_CACHED) {
+ void *src = bo->vmap_addr;
+
+ src += (virt - bo->start);
+ memcpy(data, src, bytes);
+ if (bo->status & HMM_BO_VMAPED_CACHED)
+ clflush_cache_range(src, bytes);
+ } else {
+ void *vptr;
+
+ vptr = hmm_bo_vmap(bo, true);
+ if (!vptr)
+ return load_and_flush_by_kmap(virt, data, bytes);
+ else
+ vptr = vptr + (virt - bo->start);
+
+ memcpy(data, vptr, bytes);
+ clflush_cache_range(vptr, bytes);
+ hmm_bo_vunmap(bo);
+ }
+
+ return 0;
+}
+
+/* Read function in ISP memory management */
+int hmm_load(ia_css_ptr virt, void *data, unsigned int bytes)
+{
+ if (!virt) {
+ dev_warn(atomisp_dev,
+ "hmm_store: address is NULL\n");
+ return -EINVAL;
+ }
+ if (!data) {
+ dev_err(atomisp_dev,
+ "hmm_store: data is a NULL argument\n");
+ return -EINVAL;
+ }
+ return load_and_flush(virt, data, bytes);
+}
+
+/* Flush hmm data from the data cache */
+int hmm_flush(ia_css_ptr virt, unsigned int bytes)
+{
+ return load_and_flush(virt, NULL, bytes);
+}
+
+/* Write function in ISP memory management */
+int hmm_store(ia_css_ptr virt, const void *data, unsigned int bytes)
+{
+ struct hmm_buffer_object *bo;
+ unsigned int idx, offset, len;
+ char *src, *des;
+ int ret;
+
+ if (!virt) {
+ dev_warn(atomisp_dev,
+ "hmm_store: address is NULL\n");
+ return -EINVAL;
+ }
+ if (!data) {
+ dev_err(atomisp_dev,
+ "hmm_store: data is a NULL argument\n");
+ return -EINVAL;
+ }
+
+ bo = hmm_bo_device_search_in_range(&bo_device, virt);
+ ret = hmm_check_bo(bo, virt);
+ if (ret)
+ return ret;
+
+ if (bo->status & HMM_BO_VMAPED || bo->status & HMM_BO_VMAPED_CACHED) {
+ void *dst = bo->vmap_addr;
+
+ dst += (virt - bo->start);
+ memcpy(dst, data, bytes);
+ if (bo->status & HMM_BO_VMAPED_CACHED)
+ clflush_cache_range(dst, bytes);
+ } else {
+ void *vptr;
+
+ vptr = hmm_bo_vmap(bo, true);
+ if (vptr) {
+ vptr = vptr + (virt - bo->start);
+
+ memcpy(vptr, data, bytes);
+ clflush_cache_range(vptr, bytes);
+ hmm_bo_vunmap(bo);
+ return 0;
+ }
+ }
+
+ src = (char *)data;
+ while (bytes) {
+ idx = (virt - bo->start) >> PAGE_SHIFT;
+ offset = (virt - bo->start) - (idx << PAGE_SHIFT);
+
+ des = (char *)kmap_local_page(bo->pages[idx]);
+
+ if (!des) {
+ dev_err(atomisp_dev,
+ "kmap buffer object page failed: pg_idx = %d\n",
+ idx);
+ return -EINVAL;
+ }
+
+ des += offset;
+
+ if ((bytes + offset) >= PAGE_SIZE) {
+ len = PAGE_SIZE - offset;
+ bytes -= len;
+ } else {
+ len = bytes;
+ bytes = 0;
+ }
+
+ virt += len;
+
+ memcpy(des, src, len);
+
+ src += len;
+
+ clflush_cache_range(des, len);
+
+ kunmap_local(des);
+ }
+
+ return 0;
+}
+
+/* memset function in ISP memory management */
+int hmm_set(ia_css_ptr virt, int c, unsigned int bytes)
+{
+ struct hmm_buffer_object *bo;
+ unsigned int idx, offset, len;
+ char *des;
+ int ret;
+
+ bo = hmm_bo_device_search_in_range(&bo_device, virt);
+ ret = hmm_check_bo(bo, virt);
+ if (ret)
+ return ret;
+
+ if (bo->status & HMM_BO_VMAPED || bo->status & HMM_BO_VMAPED_CACHED) {
+ void *dst = bo->vmap_addr;
+
+ dst += (virt - bo->start);
+ memset(dst, c, bytes);
+
+ if (bo->status & HMM_BO_VMAPED_CACHED)
+ clflush_cache_range(dst, bytes);
+ } else {
+ void *vptr;
+
+ vptr = hmm_bo_vmap(bo, true);
+ if (vptr) {
+ vptr = vptr + (virt - bo->start);
+ memset(vptr, c, bytes);
+ clflush_cache_range(vptr, bytes);
+ hmm_bo_vunmap(bo);
+ return 0;
+ }
+ }
+
+ while (bytes) {
+ idx = (virt - bo->start) >> PAGE_SHIFT;
+ offset = (virt - bo->start) - (idx << PAGE_SHIFT);
+
+ des = (char *)kmap_local_page(bo->pages[idx]) + offset;
+
+ if ((bytes + offset) >= PAGE_SIZE) {
+ len = PAGE_SIZE - offset;
+ bytes -= len;
+ } else {
+ len = bytes;
+ bytes = 0;
+ }
+
+ virt += len;
+
+ memset(des, c, len);
+
+ clflush_cache_range(des, len);
+
+ kunmap_local(des);
+ }
+
+ return 0;
+}
+
+/* Virtual address to physical address convert */
+phys_addr_t hmm_virt_to_phys(ia_css_ptr virt)
+{
+ unsigned int idx, offset;
+ struct hmm_buffer_object *bo;
+
+ bo = hmm_bo_device_search_in_range(&bo_device, virt);
+ if (!bo) {
+ dev_err(atomisp_dev,
+ "can not find buffer object contains address 0x%x\n",
+ virt);
+ return -1;
+ }
+
+ idx = (virt - bo->start) >> PAGE_SHIFT;
+ offset = (virt - bo->start) - (idx << PAGE_SHIFT);
+
+ return page_to_phys(bo->pages[idx]) + offset;
+}
+
+int hmm_mmap(struct vm_area_struct *vma, ia_css_ptr virt)
+{
+ struct hmm_buffer_object *bo;
+
+ bo = hmm_bo_device_search_start(&bo_device, virt);
+ if (!bo) {
+ dev_err(atomisp_dev,
+ "can not find buffer object start with address 0x%x\n",
+ virt);
+ return -EINVAL;
+ }
+
+ return hmm_bo_mmap(vma, bo);
+}
+
+/* Map ISP virtual address into IA virtual address */
+void *hmm_vmap(ia_css_ptr virt, bool cached)
+{
+ struct hmm_buffer_object *bo;
+ void *ptr;
+
+ bo = hmm_bo_device_search_in_range(&bo_device, virt);
+ if (!bo) {
+ dev_err(atomisp_dev,
+ "can not find buffer object contains address 0x%x\n",
+ virt);
+ return NULL;
+ }
+
+ ptr = hmm_bo_vmap(bo, cached);
+ if (ptr)
+ return ptr + (virt - bo->start);
+ else
+ return NULL;
+}
+
+/* Flush the memory which is mapped as cached memory through hmm_vmap */
+void hmm_flush_vmap(ia_css_ptr virt)
+{
+ struct hmm_buffer_object *bo;
+
+ bo = hmm_bo_device_search_in_range(&bo_device, virt);
+ if (!bo) {
+ dev_warn(atomisp_dev,
+ "can not find buffer object contains address 0x%x\n",
+ virt);
+ return;
+ }
+
+ hmm_bo_flush_vmap(bo);
+}
+
+void hmm_vunmap(ia_css_ptr virt)
+{
+ struct hmm_buffer_object *bo;
+
+ bo = hmm_bo_device_search_in_range(&bo_device, virt);
+ if (!bo) {
+ dev_warn(atomisp_dev,
+ "can not find buffer object contains address 0x%x\n",
+ virt);
+ return;
+ }
+
+ hmm_bo_vunmap(bo);
+}
diff --git a/drivers/staging/media/atomisp/pci/hmm/hmm_bo.c b/drivers/staging/media/atomisp/pci/hmm/hmm_bo.c
new file mode 100644
index 000000000000..5d0cd5260d3a
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/hmm/hmm_bo.c
@@ -0,0 +1,1075 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Support for Medifield PNW Camera Imaging ISP subsystem.
+ *
+ * Copyright (c) 2010 Intel Corporation. All Rights Reserved.
+ *
+ * Copyright (c) 2010 Silicon Hive www.siliconhive.com.
+ */
+/*
+ * This file contains functions for buffer object structure management
+ */
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/gfp.h> /* for GFP_ATOMIC */
+#include <linux/mm.h>
+#include <linux/mm_types.h>
+#include <linux/hugetlb.h>
+#include <linux/highmem.h>
+#include <linux/slab.h> /* for kmalloc */
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/string.h>
+#include <linux/list.h>
+#include <linux/errno.h>
+#include <linux/io.h>
+#include <asm/current.h>
+#include <linux/sched/signal.h>
+#include <linux/file.h>
+
+#include <asm/set_memory.h>
+
+#include "atomisp_internal.h"
+#include "hmm/hmm_common.h"
+#include "hmm/hmm_bo.h"
+
+static int __bo_init(struct hmm_bo_device *bdev, struct hmm_buffer_object *bo,
+ unsigned int pgnr)
+{
+ check_bodev_null_return(bdev, -EINVAL);
+ /* prevent zero size buffer object */
+ if (pgnr == 0) {
+ dev_err(atomisp_dev, "0 size buffer is not allowed.\n");
+ return -EINVAL;
+ }
+
+ memset(bo, 0, sizeof(*bo));
+ mutex_init(&bo->mutex);
+
+ /* init the bo->list HEAD as an element of entire_bo_list */
+ INIT_LIST_HEAD(&bo->list);
+
+ bo->bdev = bdev;
+ bo->vmap_addr = NULL;
+ bo->status = HMM_BO_FREE;
+ bo->start = bdev->start;
+ bo->pgnr = pgnr;
+ bo->end = bo->start + pgnr_to_size(pgnr);
+ bo->prev = NULL;
+ bo->next = NULL;
+
+ return 0;
+}
+
+static struct hmm_buffer_object *__bo_search_and_remove_from_free_rbtree(
+ struct rb_node *node, unsigned int pgnr)
+{
+ struct hmm_buffer_object *this, *ret_bo, *temp_bo;
+
+ this = rb_entry(node, struct hmm_buffer_object, node);
+ if (this->pgnr == pgnr ||
+ (this->pgnr > pgnr && !this->node.rb_left)) {
+ goto remove_bo_and_return;
+ } else {
+ if (this->pgnr < pgnr) {
+ if (!this->node.rb_right)
+ return NULL;
+ ret_bo = __bo_search_and_remove_from_free_rbtree(
+ this->node.rb_right, pgnr);
+ } else {
+ ret_bo = __bo_search_and_remove_from_free_rbtree(
+ this->node.rb_left, pgnr);
+ }
+ if (!ret_bo) {
+ if (this->pgnr > pgnr)
+ goto remove_bo_and_return;
+ else
+ return NULL;
+ }
+ return ret_bo;
+ }
+
+remove_bo_and_return:
+ /* NOTE: All nodes on free rbtree have a 'prev' that points to NULL.
+ * 1. check if 'this->next' is NULL:
+ * yes: erase 'this' node and rebalance rbtree, return 'this'.
+ */
+ if (!this->next) {
+ rb_erase(&this->node, &this->bdev->free_rbtree);
+ return this;
+ }
+ /* NOTE: if 'this->next' is not NULL, always return 'this->next' bo.
+ * 2. check if 'this->next->next' is NULL:
+ * yes: change the related 'next/prev' pointer,
+ * return 'this->next' but the rbtree stays unchanged.
+ */
+ temp_bo = this->next;
+ this->next = temp_bo->next;
+ if (temp_bo->next)
+ temp_bo->next->prev = this;
+ temp_bo->next = NULL;
+ temp_bo->prev = NULL;
+ return temp_bo;
+}
+
+static struct hmm_buffer_object *__bo_search_by_addr(struct rb_root *root,
+ ia_css_ptr start)
+{
+ struct rb_node *n = root->rb_node;
+ struct hmm_buffer_object *bo;
+
+ do {
+ bo = rb_entry(n, struct hmm_buffer_object, node);
+
+ if (bo->start > start) {
+ if (!n->rb_left)
+ return NULL;
+ n = n->rb_left;
+ } else if (bo->start < start) {
+ if (!n->rb_right)
+ return NULL;
+ n = n->rb_right;
+ } else {
+ return bo;
+ }
+ } while (n);
+
+ return NULL;
+}
+
+static struct hmm_buffer_object *__bo_search_by_addr_in_range(
+ struct rb_root *root, unsigned int start)
+{
+ struct rb_node *n = root->rb_node;
+ struct hmm_buffer_object *bo;
+
+ do {
+ bo = rb_entry(n, struct hmm_buffer_object, node);
+
+ if (bo->start > start) {
+ if (!n->rb_left)
+ return NULL;
+ n = n->rb_left;
+ } else {
+ if (bo->end > start)
+ return bo;
+ if (!n->rb_right)
+ return NULL;
+ n = n->rb_right;
+ }
+ } while (n);
+
+ return NULL;
+}
+
+static void __bo_insert_to_free_rbtree(struct rb_root *root,
+ struct hmm_buffer_object *bo)
+{
+ struct rb_node **new = &root->rb_node;
+ struct rb_node *parent = NULL;
+ struct hmm_buffer_object *this;
+ unsigned int pgnr = bo->pgnr;
+
+ while (*new) {
+ parent = *new;
+ this = container_of(*new, struct hmm_buffer_object, node);
+
+ if (pgnr < this->pgnr) {
+ new = &((*new)->rb_left);
+ } else if (pgnr > this->pgnr) {
+ new = &((*new)->rb_right);
+ } else {
+ bo->prev = this;
+ bo->next = this->next;
+ if (this->next)
+ this->next->prev = bo;
+ this->next = bo;
+ bo->status = (bo->status & ~HMM_BO_MASK) | HMM_BO_FREE;
+ return;
+ }
+ }
+
+ bo->status = (bo->status & ~HMM_BO_MASK) | HMM_BO_FREE;
+
+ rb_link_node(&bo->node, parent, new);
+ rb_insert_color(&bo->node, root);
+}
+
+static void __bo_insert_to_alloc_rbtree(struct rb_root *root,
+ struct hmm_buffer_object *bo)
+{
+ struct rb_node **new = &root->rb_node;
+ struct rb_node *parent = NULL;
+ struct hmm_buffer_object *this;
+ unsigned int start = bo->start;
+
+ while (*new) {
+ parent = *new;
+ this = container_of(*new, struct hmm_buffer_object, node);
+
+ if (start < this->start)
+ new = &((*new)->rb_left);
+ else
+ new = &((*new)->rb_right);
+ }
+
+ kref_init(&bo->kref);
+ bo->status = (bo->status & ~HMM_BO_MASK) | HMM_BO_ALLOCED;
+
+ rb_link_node(&bo->node, parent, new);
+ rb_insert_color(&bo->node, root);
+}
+
+static struct hmm_buffer_object *__bo_break_up(struct hmm_bo_device *bdev,
+ struct hmm_buffer_object *bo,
+ unsigned int pgnr)
+{
+ struct hmm_buffer_object *new_bo;
+ unsigned long flags;
+ int ret;
+
+ new_bo = kmem_cache_alloc(bdev->bo_cache, GFP_KERNEL);
+ if (!new_bo) {
+ dev_err(atomisp_dev, "%s: __bo_alloc failed!\n", __func__);
+ return NULL;
+ }
+ ret = __bo_init(bdev, new_bo, pgnr);
+ if (ret) {
+ dev_err(atomisp_dev, "%s: __bo_init failed!\n", __func__);
+ kmem_cache_free(bdev->bo_cache, new_bo);
+ return NULL;
+ }
+
+ new_bo->start = bo->start;
+ new_bo->end = new_bo->start + pgnr_to_size(pgnr);
+ bo->start = new_bo->end;
+ bo->pgnr = bo->pgnr - pgnr;
+
+ spin_lock_irqsave(&bdev->list_lock, flags);
+ list_add_tail(&new_bo->list, &bo->list);
+ spin_unlock_irqrestore(&bdev->list_lock, flags);
+
+ return new_bo;
+}
+
+static void __bo_take_off_handling(struct hmm_buffer_object *bo)
+{
+ struct hmm_bo_device *bdev = bo->bdev;
+ /* There are 4 situations when we take off a known bo from free rbtree:
+ * 1. if bo->next && bo->prev == NULL, bo is a rbtree node
+ * and does not have a linked list after bo, to take off this bo,
+ * we just need erase bo directly and rebalance the free rbtree
+ */
+ if (!bo->prev && !bo->next) {
+ rb_erase(&bo->node, &bdev->free_rbtree);
+ /* 2. when bo->next != NULL && bo->prev == NULL, bo is a rbtree node,
+ * and has a linked list,to take off this bo we need erase bo
+ * first, then, insert bo->next into free rbtree and rebalance
+ * the free rbtree
+ */
+ } else if (!bo->prev && bo->next) {
+ bo->next->prev = NULL;
+ rb_erase(&bo->node, &bdev->free_rbtree);
+ __bo_insert_to_free_rbtree(&bdev->free_rbtree, bo->next);
+ bo->next = NULL;
+ /* 3. when bo->prev != NULL && bo->next == NULL, bo is not a rbtree
+ * node, bo is the last element of the linked list after rbtree
+ * node, to take off this bo, we just need set the "prev/next"
+ * pointers to NULL, the free rbtree stays unchanged
+ */
+ } else if (bo->prev && !bo->next) {
+ bo->prev->next = NULL;
+ bo->prev = NULL;
+ /* 4. when bo->prev != NULL && bo->next != NULL ,bo is not a rbtree
+ * node, bo is in the middle of the linked list after rbtree node,
+ * to take off this bo, we just set take the "prev/next" pointers
+ * to NULL, the free rbtree stays unchanged
+ */
+ } else if (bo->prev && bo->next) {
+ bo->next->prev = bo->prev;
+ bo->prev->next = bo->next;
+ bo->next = NULL;
+ bo->prev = NULL;
+ }
+}
+
+static struct hmm_buffer_object *__bo_merge(struct hmm_buffer_object *bo,
+ struct hmm_buffer_object *next_bo)
+{
+ struct hmm_bo_device *bdev;
+ unsigned long flags;
+
+ bdev = bo->bdev;
+ next_bo->start = bo->start;
+ next_bo->pgnr = next_bo->pgnr + bo->pgnr;
+
+ spin_lock_irqsave(&bdev->list_lock, flags);
+ list_del(&bo->list);
+ spin_unlock_irqrestore(&bdev->list_lock, flags);
+
+ kmem_cache_free(bo->bdev->bo_cache, bo);
+
+ return next_bo;
+}
+
+/*
+ * hmm_bo_device functions.
+ */
+int hmm_bo_device_init(struct hmm_bo_device *bdev,
+ struct isp_mmu_client *mmu_driver,
+ unsigned int vaddr_start,
+ unsigned int size)
+{
+ struct hmm_buffer_object *bo;
+ unsigned long flags;
+ int ret;
+
+ check_bodev_null_return(bdev, -EINVAL);
+
+ ret = isp_mmu_init(&bdev->mmu, mmu_driver);
+ if (ret) {
+ dev_err(atomisp_dev, "isp_mmu_init failed.\n");
+ return ret;
+ }
+
+ bdev->start = vaddr_start;
+ bdev->pgnr = size_to_pgnr_ceil(size);
+ bdev->size = pgnr_to_size(bdev->pgnr);
+
+ spin_lock_init(&bdev->list_lock);
+ mutex_init(&bdev->rbtree_mutex);
+
+
+ INIT_LIST_HEAD(&bdev->entire_bo_list);
+ bdev->allocated_rbtree = RB_ROOT;
+ bdev->free_rbtree = RB_ROOT;
+
+ bdev->bo_cache = kmem_cache_create("bo_cache",
+ sizeof(struct hmm_buffer_object), 0, 0, NULL);
+ if (!bdev->bo_cache) {
+ dev_err(atomisp_dev, "%s: create cache failed!\n", __func__);
+ isp_mmu_exit(&bdev->mmu);
+ return -ENOMEM;
+ }
+
+ bo = kmem_cache_alloc(bdev->bo_cache, GFP_KERNEL);
+ if (!bo) {
+ dev_err(atomisp_dev, "%s: __bo_alloc failed!\n", __func__);
+ isp_mmu_exit(&bdev->mmu);
+ return -ENOMEM;
+ }
+
+ ret = __bo_init(bdev, bo, bdev->pgnr);
+ if (ret) {
+ dev_err(atomisp_dev, "%s: __bo_init failed!\n", __func__);
+ kmem_cache_free(bdev->bo_cache, bo);
+ isp_mmu_exit(&bdev->mmu);
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(&bdev->list_lock, flags);
+ list_add_tail(&bo->list, &bdev->entire_bo_list);
+ spin_unlock_irqrestore(&bdev->list_lock, flags);
+
+ __bo_insert_to_free_rbtree(&bdev->free_rbtree, bo);
+
+ bdev->flag = HMM_BO_DEVICE_INITED;
+
+ return 0;
+}
+
+struct hmm_buffer_object *hmm_bo_alloc(struct hmm_bo_device *bdev,
+ unsigned int pgnr)
+{
+ struct hmm_buffer_object *bo, *new_bo;
+ struct rb_root *root = &bdev->free_rbtree;
+
+ check_bodev_null_return(bdev, NULL);
+ var_equal_return(hmm_bo_device_inited(bdev), 0, NULL,
+ "hmm_bo_device not inited yet.\n");
+
+ if (pgnr == 0) {
+ dev_err(atomisp_dev, "0 size buffer is not allowed.\n");
+ return NULL;
+ }
+
+ mutex_lock(&bdev->rbtree_mutex);
+ bo = __bo_search_and_remove_from_free_rbtree(root->rb_node, pgnr);
+ if (!bo) {
+ mutex_unlock(&bdev->rbtree_mutex);
+ dev_err(atomisp_dev, "%s: Out of Memory! hmm_bo_alloc failed",
+ __func__);
+ return NULL;
+ }
+
+ if (bo->pgnr > pgnr) {
+ new_bo = __bo_break_up(bdev, bo, pgnr);
+ if (!new_bo) {
+ mutex_unlock(&bdev->rbtree_mutex);
+ dev_err(atomisp_dev, "%s: __bo_break_up failed!\n",
+ __func__);
+ return NULL;
+ }
+
+ __bo_insert_to_alloc_rbtree(&bdev->allocated_rbtree, new_bo);
+ __bo_insert_to_free_rbtree(&bdev->free_rbtree, bo);
+
+ mutex_unlock(&bdev->rbtree_mutex);
+ return new_bo;
+ }
+
+ __bo_insert_to_alloc_rbtree(&bdev->allocated_rbtree, bo);
+
+ mutex_unlock(&bdev->rbtree_mutex);
+ return bo;
+}
+
+void hmm_bo_release(struct hmm_buffer_object *bo)
+{
+ struct hmm_bo_device *bdev = bo->bdev;
+ struct hmm_buffer_object *next_bo, *prev_bo;
+
+ mutex_lock(&bdev->rbtree_mutex);
+
+ /*
+ * FIX ME:
+ *
+ * how to destroy the bo when it is stilled MMAPED?
+ *
+ * ideally, this will not happened as hmm_bo_release
+ * will only be called when kref reaches 0, and in mmap
+ * operation the hmm_bo_ref will eventually be called.
+ * so, if this happened, something goes wrong.
+ */
+ if (bo->status & HMM_BO_MMAPED) {
+ mutex_unlock(&bdev->rbtree_mutex);
+ dev_dbg(atomisp_dev, "destroy bo which is MMAPED, do nothing\n");
+ return;
+ }
+
+ if (bo->status & HMM_BO_BINDED) {
+ dev_warn(atomisp_dev, "the bo is still binded, unbind it first...\n");
+ hmm_bo_unbind(bo);
+ }
+
+ if (bo->status & HMM_BO_PAGE_ALLOCED) {
+ dev_warn(atomisp_dev, "the pages is not freed, free pages first\n");
+ hmm_bo_free_pages(bo);
+ }
+ if (bo->status & HMM_BO_VMAPED || bo->status & HMM_BO_VMAPED_CACHED) {
+ dev_warn(atomisp_dev, "the vunmap is not done, do it...\n");
+ hmm_bo_vunmap(bo);
+ }
+
+ rb_erase(&bo->node, &bdev->allocated_rbtree);
+
+ prev_bo = list_entry(bo->list.prev, struct hmm_buffer_object, list);
+ next_bo = list_entry(bo->list.next, struct hmm_buffer_object, list);
+
+ if (bo->list.prev != &bdev->entire_bo_list &&
+ prev_bo->end == bo->start &&
+ (prev_bo->status & HMM_BO_MASK) == HMM_BO_FREE) {
+ __bo_take_off_handling(prev_bo);
+ bo = __bo_merge(prev_bo, bo);
+ }
+
+ if (bo->list.next != &bdev->entire_bo_list &&
+ next_bo->start == bo->end &&
+ (next_bo->status & HMM_BO_MASK) == HMM_BO_FREE) {
+ __bo_take_off_handling(next_bo);
+ bo = __bo_merge(bo, next_bo);
+ }
+
+ __bo_insert_to_free_rbtree(&bdev->free_rbtree, bo);
+
+ mutex_unlock(&bdev->rbtree_mutex);
+ return;
+}
+
+void hmm_bo_device_exit(struct hmm_bo_device *bdev)
+{
+ struct hmm_buffer_object *bo;
+ unsigned long flags;
+
+ dev_dbg(atomisp_dev, "%s: entering!\n", __func__);
+
+ check_bodev_null_return_void(bdev);
+
+ /*
+ * release all allocated bos even they a in use
+ * and all bos will be merged into a big bo
+ */
+ while (!RB_EMPTY_ROOT(&bdev->allocated_rbtree))
+ hmm_bo_release(
+ rbtree_node_to_hmm_bo(bdev->allocated_rbtree.rb_node));
+
+ dev_dbg(atomisp_dev, "%s: finished releasing all allocated bos!\n",
+ __func__);
+
+ /* free all bos to release all ISP virtual memory */
+ while (!list_empty(&bdev->entire_bo_list)) {
+ bo = list_to_hmm_bo(bdev->entire_bo_list.next);
+
+ spin_lock_irqsave(&bdev->list_lock, flags);
+ list_del(&bo->list);
+ spin_unlock_irqrestore(&bdev->list_lock, flags);
+
+ kmem_cache_free(bdev->bo_cache, bo);
+ }
+
+ dev_dbg(atomisp_dev, "%s: finished to free all bos!\n", __func__);
+
+ kmem_cache_destroy(bdev->bo_cache);
+
+ isp_mmu_exit(&bdev->mmu);
+}
+
+int hmm_bo_device_inited(struct hmm_bo_device *bdev)
+{
+ check_bodev_null_return(bdev, -EINVAL);
+
+ return bdev->flag == HMM_BO_DEVICE_INITED;
+}
+
+int hmm_bo_allocated(struct hmm_buffer_object *bo)
+{
+ check_bo_null_return(bo, 0);
+
+ return bo->status & HMM_BO_ALLOCED;
+}
+
+struct hmm_buffer_object *hmm_bo_device_search_start(
+ struct hmm_bo_device *bdev, ia_css_ptr vaddr)
+{
+ struct hmm_buffer_object *bo;
+
+ check_bodev_null_return(bdev, NULL);
+
+ mutex_lock(&bdev->rbtree_mutex);
+ bo = __bo_search_by_addr(&bdev->allocated_rbtree, vaddr);
+ if (!bo) {
+ mutex_unlock(&bdev->rbtree_mutex);
+ dev_err(atomisp_dev, "%s can not find bo with addr: 0x%x\n",
+ __func__, vaddr);
+ return NULL;
+ }
+ mutex_unlock(&bdev->rbtree_mutex);
+
+ return bo;
+}
+
+struct hmm_buffer_object *hmm_bo_device_search_in_range(
+ struct hmm_bo_device *bdev, unsigned int vaddr)
+{
+ struct hmm_buffer_object *bo;
+
+ check_bodev_null_return(bdev, NULL);
+
+ mutex_lock(&bdev->rbtree_mutex);
+ bo = __bo_search_by_addr_in_range(&bdev->allocated_rbtree, vaddr);
+ if (!bo) {
+ mutex_unlock(&bdev->rbtree_mutex);
+ dev_err(atomisp_dev, "%s can not find bo contain addr: 0x%x\n",
+ __func__, vaddr);
+ return NULL;
+ }
+ mutex_unlock(&bdev->rbtree_mutex);
+
+ return bo;
+}
+
+struct hmm_buffer_object *hmm_bo_device_search_vmap_start(
+ struct hmm_bo_device *bdev, const void *vaddr)
+{
+ struct list_head *pos;
+ struct hmm_buffer_object *bo;
+ unsigned long flags;
+
+ check_bodev_null_return(bdev, NULL);
+
+ spin_lock_irqsave(&bdev->list_lock, flags);
+ list_for_each(pos, &bdev->entire_bo_list) {
+ bo = list_to_hmm_bo(pos);
+ /* pass bo which has no vm_node allocated */
+ if ((bo->status & HMM_BO_MASK) == HMM_BO_FREE)
+ continue;
+ if (bo->vmap_addr == vaddr)
+ goto found;
+ }
+ spin_unlock_irqrestore(&bdev->list_lock, flags);
+ return NULL;
+found:
+ spin_unlock_irqrestore(&bdev->list_lock, flags);
+ return bo;
+}
+
+static void free_pages_bulk_array(unsigned long nr_pages, struct page **page_array)
+{
+ unsigned long i;
+
+ for (i = 0; i < nr_pages; i++)
+ __free_pages(page_array[i], 0);
+}
+
+static void free_private_bo_pages(struct hmm_buffer_object *bo)
+{
+ set_pages_array_wb(bo->pages, bo->pgnr);
+ free_pages_bulk_array(bo->pgnr, bo->pages);
+}
+
+/*Allocate pages which will be used only by ISP*/
+static int alloc_private_pages(struct hmm_buffer_object *bo)
+{
+ const gfp_t gfp = __GFP_NOWARN | __GFP_RECLAIM | __GFP_FS;
+ int ret;
+
+ ret = alloc_pages_bulk(gfp, bo->pgnr, bo->pages);
+ if (ret != bo->pgnr) {
+ free_pages_bulk_array(ret, bo->pages);
+ dev_err(atomisp_dev, "alloc_pages_bulk() failed\n");
+ return -ENOMEM;
+ }
+
+ ret = set_pages_array_uc(bo->pages, bo->pgnr);
+ if (ret) {
+ dev_err(atomisp_dev, "set pages uncacheable failed.\n");
+ free_pages_bulk_array(bo->pgnr, bo->pages);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int alloc_vmalloc_pages(struct hmm_buffer_object *bo, void *vmalloc_addr)
+{
+ void *vaddr = vmalloc_addr;
+ int i;
+
+ for (i = 0; i < bo->pgnr; i++) {
+ bo->pages[i] = vmalloc_to_page(vaddr);
+ if (!bo->pages[i]) {
+ dev_err(atomisp_dev, "Error could not get page %d of vmalloc buf\n", i);
+ return -ENOMEM;
+ }
+ vaddr += PAGE_SIZE;
+ }
+
+ return 0;
+}
+
+/*
+ * allocate/free physical pages for the bo.
+ *
+ * type indicate where are the pages from. currently we have 3 types
+ * of memory: HMM_BO_PRIVATE, HMM_BO_VMALLOC.
+ *
+ * vmalloc_addr is only valid when type is HMM_BO_VMALLOC.
+ */
+int hmm_bo_alloc_pages(struct hmm_buffer_object *bo,
+ enum hmm_bo_type type,
+ void *vmalloc_addr)
+{
+ int ret = -EINVAL;
+
+ check_bo_null_return(bo, -EINVAL);
+
+ mutex_lock(&bo->mutex);
+ check_bo_status_no_goto(bo, HMM_BO_PAGE_ALLOCED, status_err);
+
+ bo->pages = kcalloc(bo->pgnr, sizeof(struct page *), GFP_KERNEL);
+ if (unlikely(!bo->pages)) {
+ ret = -ENOMEM;
+ goto alloc_err;
+ }
+
+ if (type == HMM_BO_PRIVATE) {
+ ret = alloc_private_pages(bo);
+ } else if (type == HMM_BO_VMALLOC) {
+ ret = alloc_vmalloc_pages(bo, vmalloc_addr);
+ } else {
+ dev_err(atomisp_dev, "invalid buffer type.\n");
+ ret = -EINVAL;
+ }
+ if (ret)
+ goto alloc_err;
+
+ bo->type = type;
+
+ bo->status |= HMM_BO_PAGE_ALLOCED;
+
+ mutex_unlock(&bo->mutex);
+
+ return 0;
+
+alloc_err:
+ kfree(bo->pages);
+ mutex_unlock(&bo->mutex);
+ dev_err(atomisp_dev, "alloc pages err...\n");
+ return ret;
+status_err:
+ mutex_unlock(&bo->mutex);
+ dev_err(atomisp_dev,
+ "buffer object has already page allocated.\n");
+ return -EINVAL;
+}
+
+/*
+ * free physical pages of the bo.
+ */
+void hmm_bo_free_pages(struct hmm_buffer_object *bo)
+{
+ check_bo_null_return_void(bo);
+
+ mutex_lock(&bo->mutex);
+
+ check_bo_status_yes_goto(bo, HMM_BO_PAGE_ALLOCED, status_err2);
+
+ /* clear the flag anyway. */
+ bo->status &= (~HMM_BO_PAGE_ALLOCED);
+
+ if (bo->type == HMM_BO_PRIVATE)
+ free_private_bo_pages(bo);
+ else if (bo->type == HMM_BO_VMALLOC)
+ ; /* No-op, nothing to do */
+ else
+ dev_err(atomisp_dev, "invalid buffer type.\n");
+
+ kfree(bo->pages);
+ mutex_unlock(&bo->mutex);
+
+ return;
+
+status_err2:
+ mutex_unlock(&bo->mutex);
+ dev_err(atomisp_dev,
+ "buffer object not page allocated yet.\n");
+}
+
+int hmm_bo_page_allocated(struct hmm_buffer_object *bo)
+{
+ check_bo_null_return(bo, 0);
+
+ return bo->status & HMM_BO_PAGE_ALLOCED;
+}
+
+/*
+ * bind the physical pages to a virtual address space.
+ */
+int hmm_bo_bind(struct hmm_buffer_object *bo)
+{
+ int ret;
+ unsigned int virt;
+ struct hmm_bo_device *bdev;
+ unsigned int i;
+
+ check_bo_null_return(bo, -EINVAL);
+
+ mutex_lock(&bo->mutex);
+
+ check_bo_status_yes_goto(bo,
+ HMM_BO_PAGE_ALLOCED | HMM_BO_ALLOCED,
+ status_err1);
+
+ check_bo_status_no_goto(bo, HMM_BO_BINDED, status_err2);
+
+ bdev = bo->bdev;
+
+ virt = bo->start;
+
+ for (i = 0; i < bo->pgnr; i++) {
+ ret =
+ isp_mmu_map(&bdev->mmu, virt,
+ page_to_phys(bo->pages[i]), 1);
+ if (ret)
+ goto map_err;
+ virt += (1 << PAGE_SHIFT);
+ }
+
+ /*
+ * flush TBL here.
+ *
+ * theoretically, we donot need to flush TLB as we didnot change
+ * any existed address mappings, but for Silicon Hive's MMU, its
+ * really a bug here. I guess when fetching PTEs (page table entity)
+ * to TLB, its MMU will fetch additional INVALID PTEs automatically
+ * for performance issue. EX, we only set up 1 page address mapping,
+ * meaning updating 1 PTE, but the MMU fetches 4 PTE at one time,
+ * so the additional 3 PTEs are invalid.
+ */
+ if (bo->start != 0x0)
+ isp_mmu_flush_tlb_range(&bdev->mmu, bo->start,
+ (bo->pgnr << PAGE_SHIFT));
+
+ bo->status |= HMM_BO_BINDED;
+
+ mutex_unlock(&bo->mutex);
+
+ return 0;
+
+map_err:
+ /* unbind the physical pages with related virtual address space */
+ virt = bo->start;
+ for ( ; i > 0; i--) {
+ isp_mmu_unmap(&bdev->mmu, virt, 1);
+ virt += pgnr_to_size(1);
+ }
+
+ mutex_unlock(&bo->mutex);
+ dev_err(atomisp_dev,
+ "setup MMU address mapping failed.\n");
+ return ret;
+
+status_err2:
+ mutex_unlock(&bo->mutex);
+ dev_err(atomisp_dev, "buffer object already binded.\n");
+ return -EINVAL;
+status_err1:
+ mutex_unlock(&bo->mutex);
+ dev_err(atomisp_dev,
+ "buffer object vm_node or page not allocated.\n");
+ return -EINVAL;
+}
+
+/*
+ * unbind the physical pages with related virtual address space.
+ */
+void hmm_bo_unbind(struct hmm_buffer_object *bo)
+{
+ unsigned int virt;
+ struct hmm_bo_device *bdev;
+ unsigned int i;
+
+ check_bo_null_return_void(bo);
+
+ mutex_lock(&bo->mutex);
+
+ check_bo_status_yes_goto(bo,
+ HMM_BO_PAGE_ALLOCED |
+ HMM_BO_ALLOCED |
+ HMM_BO_BINDED, status_err);
+
+ bdev = bo->bdev;
+
+ virt = bo->start;
+
+ for (i = 0; i < bo->pgnr; i++) {
+ isp_mmu_unmap(&bdev->mmu, virt, 1);
+ virt += pgnr_to_size(1);
+ }
+
+ /*
+ * flush TLB as the address mapping has been removed and
+ * related TLBs should be invalidated.
+ */
+ isp_mmu_flush_tlb_range(&bdev->mmu, bo->start,
+ (bo->pgnr << PAGE_SHIFT));
+
+ bo->status &= (~HMM_BO_BINDED);
+
+ mutex_unlock(&bo->mutex);
+
+ return;
+
+status_err:
+ mutex_unlock(&bo->mutex);
+ dev_err(atomisp_dev,
+ "buffer vm or page not allocated or not binded yet.\n");
+}
+
+int hmm_bo_binded(struct hmm_buffer_object *bo)
+{
+ int ret;
+
+ check_bo_null_return(bo, 0);
+
+ mutex_lock(&bo->mutex);
+
+ ret = bo->status & HMM_BO_BINDED;
+
+ mutex_unlock(&bo->mutex);
+
+ return ret;
+}
+
+void *hmm_bo_vmap(struct hmm_buffer_object *bo, bool cached)
+{
+ check_bo_null_return(bo, NULL);
+
+ mutex_lock(&bo->mutex);
+ if (((bo->status & HMM_BO_VMAPED) && !cached) ||
+ ((bo->status & HMM_BO_VMAPED_CACHED) && cached)) {
+ mutex_unlock(&bo->mutex);
+ return bo->vmap_addr;
+ }
+
+ /* cached status need to be changed, so vunmap first */
+ if (bo->status & HMM_BO_VMAPED || bo->status & HMM_BO_VMAPED_CACHED) {
+ vunmap(bo->vmap_addr);
+ bo->vmap_addr = NULL;
+ bo->status &= ~(HMM_BO_VMAPED | HMM_BO_VMAPED_CACHED);
+ }
+
+ bo->vmap_addr = vmap(bo->pages, bo->pgnr, VM_MAP,
+ cached ? PAGE_KERNEL : PAGE_KERNEL_NOCACHE);
+ if (unlikely(!bo->vmap_addr)) {
+ mutex_unlock(&bo->mutex);
+ dev_err(atomisp_dev, "vmap failed...\n");
+ return NULL;
+ }
+ bo->status |= (cached ? HMM_BO_VMAPED_CACHED : HMM_BO_VMAPED);
+
+ mutex_unlock(&bo->mutex);
+ return bo->vmap_addr;
+}
+
+void hmm_bo_flush_vmap(struct hmm_buffer_object *bo)
+{
+ check_bo_null_return_void(bo);
+
+ mutex_lock(&bo->mutex);
+ if (!(bo->status & HMM_BO_VMAPED_CACHED) || !bo->vmap_addr) {
+ mutex_unlock(&bo->mutex);
+ return;
+ }
+
+ clflush_cache_range(bo->vmap_addr, bo->pgnr * PAGE_SIZE);
+ mutex_unlock(&bo->mutex);
+}
+
+void hmm_bo_vunmap(struct hmm_buffer_object *bo)
+{
+ check_bo_null_return_void(bo);
+
+ mutex_lock(&bo->mutex);
+ if (bo->status & HMM_BO_VMAPED || bo->status & HMM_BO_VMAPED_CACHED) {
+ vunmap(bo->vmap_addr);
+ bo->vmap_addr = NULL;
+ bo->status &= ~(HMM_BO_VMAPED | HMM_BO_VMAPED_CACHED);
+ }
+
+ mutex_unlock(&bo->mutex);
+ return;
+}
+
+void hmm_bo_ref(struct hmm_buffer_object *bo)
+{
+ check_bo_null_return_void(bo);
+
+ kref_get(&bo->kref);
+}
+
+static void kref_hmm_bo_release(struct kref *kref)
+{
+ if (!kref)
+ return;
+
+ hmm_bo_release(kref_to_hmm_bo(kref));
+}
+
+void hmm_bo_unref(struct hmm_buffer_object *bo)
+{
+ check_bo_null_return_void(bo);
+
+ kref_put(&bo->kref, kref_hmm_bo_release);
+}
+
+static void hmm_bo_vm_open(struct vm_area_struct *vma)
+{
+ struct hmm_buffer_object *bo =
+ (struct hmm_buffer_object *)vma->vm_private_data;
+
+ check_bo_null_return_void(bo);
+
+ hmm_bo_ref(bo);
+
+ mutex_lock(&bo->mutex);
+
+ bo->status |= HMM_BO_MMAPED;
+
+ bo->mmap_count++;
+
+ mutex_unlock(&bo->mutex);
+}
+
+static void hmm_bo_vm_close(struct vm_area_struct *vma)
+{
+ struct hmm_buffer_object *bo =
+ (struct hmm_buffer_object *)vma->vm_private_data;
+
+ check_bo_null_return_void(bo);
+
+ hmm_bo_unref(bo);
+
+ mutex_lock(&bo->mutex);
+
+ bo->mmap_count--;
+
+ if (!bo->mmap_count) {
+ bo->status &= (~HMM_BO_MMAPED);
+ vma->vm_private_data = NULL;
+ }
+
+ mutex_unlock(&bo->mutex);
+}
+
+static const struct vm_operations_struct hmm_bo_vm_ops = {
+ .open = hmm_bo_vm_open,
+ .close = hmm_bo_vm_close,
+};
+
+/*
+ * mmap the bo to user space.
+ */
+int hmm_bo_mmap(struct vm_area_struct *vma, struct hmm_buffer_object *bo)
+{
+ unsigned int start, end;
+ unsigned int virt;
+ unsigned int pgnr, i;
+ unsigned int pfn;
+
+ check_bo_null_return(bo, -EINVAL);
+
+ check_bo_status_yes_goto(bo, HMM_BO_PAGE_ALLOCED, status_err);
+
+ pgnr = bo->pgnr;
+ start = vma->vm_start;
+ end = vma->vm_end;
+
+ /*
+ * check vma's virtual address space size and buffer object's size.
+ * must be the same.
+ */
+ if ((start + pgnr_to_size(pgnr)) != end) {
+ dev_warn(atomisp_dev,
+ "vma's address space size not equal to buffer object's size");
+ return -EINVAL;
+ }
+
+ virt = vma->vm_start;
+ for (i = 0; i < pgnr; i++) {
+ pfn = page_to_pfn(bo->pages[i]);
+ if (remap_pfn_range(vma, virt, pfn, PAGE_SIZE, PAGE_SHARED)) {
+ dev_warn(atomisp_dev,
+ "remap_pfn_range failed: virt = 0x%x, pfn = 0x%x, mapped_pgnr = %d\n",
+ virt, pfn, 1);
+ return -EINVAL;
+ }
+ virt += PAGE_SIZE;
+ }
+
+ vma->vm_private_data = bo;
+
+ vma->vm_ops = &hmm_bo_vm_ops;
+ vm_flags_set(vma, VM_IO | VM_DONTEXPAND | VM_DONTDUMP);
+
+ /*
+ * call hmm_bo_vm_open explicitly.
+ */
+ hmm_bo_vm_open(vma);
+
+ return 0;
+
+status_err:
+ dev_err(atomisp_dev, "buffer page not allocated yet.\n");
+ return -EINVAL;
+}
diff --git a/drivers/staging/media/atomisp/pci/ia_css.h b/drivers/staging/media/atomisp/pci/ia_css.h
new file mode 100644
index 000000000000..0186f7e5ce06
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/ia_css.h
@@ -0,0 +1,48 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Release Version: irci_stable_candrpv_0415_20150521_0458 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef _IA_CSS_H_
+#define _IA_CSS_H_
+
+/* @file
+ * This file is the starting point of the CSS-API. It includes all CSS-API
+ * header files.
+ */
+
+#include "ia_css_3a.h"
+#include "ia_css_acc_types.h"
+#include "ia_css_buffer.h"
+#include "ia_css_control.h"
+#include "ia_css_device_access.h"
+#include "ia_css_dvs.h"
+#include "ia_css_env.h"
+#include "ia_css_err.h"
+#include "ia_css_event_public.h"
+#include "ia_css_firmware.h"
+#include "ia_css_frame_public.h"
+#include "ia_css_input_port.h"
+#include "ia_css_irq.h"
+#include "ia_css_metadata.h"
+#include "ia_css_mipi.h"
+#include "ia_css_pipe_public.h"
+#include "ia_css_prbs.h"
+#include "ia_css_properties.h"
+#include "ia_css_stream_format.h"
+#include "ia_css_stream_public.h"
+#include "ia_css_version.h"
+#include "ia_css_mmu.h"
+#include "ia_css_morph.h"
+#include "ia_css_shading.h"
+#include "ia_css_timer.h"
+
+/*
+ Please do not add code to this file. Public functionality is to be
+ exposed in a function/data type specific header file.
+ Please add to the appropriate header file or create a new one.
+ */
+
+#endif /* _IA_CSS_H_ */
diff --git a/drivers/staging/media/atomisp/pci/ia_css_3a.h b/drivers/staging/media/atomisp/pci/ia_css_3a.h
new file mode 100644
index 000000000000..1a10f91a77d4
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/ia_css_3a.h
@@ -0,0 +1,186 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __IA_CSS_3A_H
+#define __IA_CSS_3A_H
+
+/* @file
+ * This file contains types used for 3A statistics
+ */
+
+#include <linux/build_bug.h>
+
+#include <math_support.h>
+#include <type_support.h>
+#include "ia_css_types.h"
+#include "ia_css_err.h"
+#include "system_global.h"
+
+enum ia_css_3a_tables {
+ IA_CSS_S3A_TBL_HI,
+ IA_CSS_S3A_TBL_LO,
+ IA_CSS_RGBY_TBL,
+ IA_CSS_NUM_3A_TABLES
+};
+
+/* Structure that holds 3A statistics in the ISP internal
+ * format. Use ia_css_get_3a_statistics() to translate
+ * this to the format used on the host (3A library).
+ * */
+struct ia_css_isp_3a_statistics {
+ union {
+ struct {
+ ia_css_ptr s3a_tbl;
+ } dmem;
+ struct {
+ ia_css_ptr s3a_tbl_hi;
+ ia_css_ptr s3a_tbl_lo;
+ } vmem;
+ } data;
+ struct {
+ ia_css_ptr rgby_tbl;
+ } data_hmem;
+ u32 exp_id; /** exposure id, to match statistics to a frame,
+ see ia_css_event_public.h for more detail. */
+ u32 isp_config_id;/** Unique ID to track which config was actually applied to a particular frame */
+ ia_css_ptr data_ptr; /** pointer to base of all data */
+ u32 size; /** total size of all data */
+ u32 dmem_size;
+ u32 vmem_size; /** both lo and hi have this size */
+ u32 hmem_size;
+};
+
+#define SIZE_OF_DMEM_STRUCT \
+ (SIZE_OF_IA_CSS_PTR)
+
+#define SIZE_OF_VMEM_STRUCT \
+ (2 * SIZE_OF_IA_CSS_PTR)
+
+#define SIZE_OF_DATA_UNION \
+ (MAX(SIZE_OF_DMEM_STRUCT, SIZE_OF_VMEM_STRUCT))
+
+#define SIZE_OF_DATA_HMEM_STRUCT \
+ (SIZE_OF_IA_CSS_PTR)
+
+#define SIZE_OF_IA_CSS_ISP_3A_STATISTICS_STRUCT \
+ (SIZE_OF_DATA_UNION + \
+ SIZE_OF_DATA_HMEM_STRUCT + \
+ sizeof(uint32_t) + \
+ sizeof(uint32_t) + \
+ SIZE_OF_IA_CSS_PTR + \
+ 4 * sizeof(uint32_t))
+
+static_assert(sizeof(struct ia_css_isp_3a_statistics) == SIZE_OF_IA_CSS_ISP_3A_STATISTICS_STRUCT);
+
+/* Map with host-side pointers to ISP-format statistics.
+ * These pointers can either be copies of ISP data or memory mapped
+ * ISP pointers.
+ * All of the data behind these pointers is allocated contiguously, the
+ * allocated pointer is stored in the data_ptr field. The other fields
+ * point into this one block of data.
+ */
+struct ia_css_isp_3a_statistics_map {
+ void *data_ptr; /** Pointer to start of memory */
+ struct ia_css_3a_output *dmem_stats;
+ u16 *vmem_stats_hi;
+ u16 *vmem_stats_lo;
+ struct ia_css_bh_table *hmem_stats;
+ u32 size; /** total size in bytes of data_ptr */
+ u32 data_allocated; /** indicate whether data_ptr
+ was allocated or not. */
+};
+
+/* @brief Copy and translate 3A statistics from an ISP buffer to a host buffer
+ * @param[out] host_stats Host buffer.
+ * @param[in] isp_stats ISP buffer.
+ * @return error value if temporary memory cannot be allocated
+ *
+ * This copies 3a statistics from an ISP pointer to a host pointer and then
+ * translates some of the statistics, details depend on which ISP binary is
+ * used.
+ * Always use this function, never copy the buffer directly.
+ */
+int
+ia_css_get_3a_statistics(struct ia_css_3a_statistics *host_stats,
+ const struct ia_css_isp_3a_statistics *isp_stats);
+
+/* @brief Translate 3A statistics from ISP format to host format.
+ * @param[out] host_stats host-format statistics
+ * @param[in] isp_stats ISP-format statistics
+ * @return None
+ *
+ * This function translates statistics from the internal ISP-format to
+ * the host-format. This function does not include an additional copy
+ * step.
+ * */
+void
+ia_css_translate_3a_statistics(
+ struct ia_css_3a_statistics *host_stats,
+ const struct ia_css_isp_3a_statistics_map *isp_stats);
+
+/* Convenience functions for alloc/free of certain datatypes */
+
+/* @brief Allocate memory for the 3a statistics on the ISP
+ * @param[in] grid The grid.
+ * @return Pointer to the allocated 3a statistics buffer on the ISP
+*/
+struct ia_css_isp_3a_statistics *
+ia_css_isp_3a_statistics_allocate(const struct ia_css_3a_grid_info *grid);
+
+/* @brief Free the 3a statistics memory on the isp
+ * @param[in] me Pointer to the 3a statistics buffer on the ISP.
+ * @return None
+*/
+void
+ia_css_isp_3a_statistics_free(struct ia_css_isp_3a_statistics *me);
+
+/* @brief Allocate memory for the 3a statistics on the host
+ * @param[in] grid The grid.
+ * @return Pointer to the allocated 3a statistics buffer on the host
+*/
+struct ia_css_3a_statistics *
+ia_css_3a_statistics_allocate(const struct ia_css_3a_grid_info *grid);
+
+/* @brief Free the 3a statistics memory on the host
+ * @param[in] me Pointer to the 3a statistics buffer on the host.
+ * @return None
+ */
+void
+ia_css_3a_statistics_free(struct ia_css_3a_statistics *me);
+
+/* @brief Allocate a 3a statistics map structure
+ * @param[in] isp_stats pointer to ISP 3a statistis struct
+ * @param[in] data_ptr host-side pointer to ISP 3a statistics.
+ * @return Pointer to the allocated 3a statistics map
+ *
+ * This function allocates the ISP 3a statistics map structure
+ * and uses the data_ptr as base pointer to set the appropriate
+ * pointers to all relevant subsets of the 3a statistics (dmem,
+ * vmem, hmem).
+ * If the data_ptr is NULL, this function will allocate the host-side
+ * memory. This information is stored in the struct and used in the
+ * ia_css_isp_3a_statistics_map_free() function to determine whether
+ * the memory should be freed or not.
+ * Note that this function does not allocate or map any ISP
+ * memory.
+*/
+struct ia_css_isp_3a_statistics_map *
+ia_css_isp_3a_statistics_map_allocate(
+ const struct ia_css_isp_3a_statistics *isp_stats,
+ void *data_ptr);
+
+/* @brief Free the 3a statistics map
+ * @param[in] me Pointer to the 3a statistics map
+ * @return None
+ *
+ * This function frees the map struct. If the data_ptr inside it
+ * was allocated inside ia_css_isp_3a_statistics_map_allocate(), it
+ * will be freed in this function. Otherwise it will not be freed.
+ */
+void
+ia_css_isp_3a_statistics_map_free(struct ia_css_isp_3a_statistics_map *me);
+
+#endif /* __IA_CSS_3A_H */
diff --git a/drivers/staging/media/atomisp/pci/ia_css_acc_types.h b/drivers/staging/media/atomisp/pci/ia_css_acc_types.h
new file mode 100644
index 000000000000..e13ca0d84847
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/ia_css_acc_types.h
@@ -0,0 +1,460 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef _IA_CSS_ACC_TYPES_H
+#define _IA_CSS_ACC_TYPES_H
+
+/* @file
+ * This file contains types used for acceleration
+ */
+
+#include <system_local.h> /* HAS_IRQ_MAP_VERSION_# */
+#include <type_support.h>
+#include <platform_support.h>
+#include <debug_global.h>
+#include <linux/bits.h>
+
+#include "ia_css_types.h"
+#include "ia_css_frame_format.h"
+
+/* Should be included without the path.
+ However, that requires adding the path to numerous makefiles
+ that have nothing to do with isp parameters.
+ */
+#include "runtime/isp_param/interface/ia_css_isp_param_types.h"
+
+/* Types for the acceleration API.
+ * These should be moved to sh_css_internal.h once the old acceleration
+ * argument handling has been completed.
+ * After that, interpretation of these structures is no longer needed
+ * in the kernel and HAL.
+*/
+
+/* Type of acceleration.
+ */
+enum ia_css_acc_type {
+ IA_CSS_ACC_NONE, /** Normal binary */
+ IA_CSS_ACC_OUTPUT, /** Accelerator stage on output frame */
+ IA_CSS_ACC_VIEWFINDER, /** Accelerator stage on viewfinder frame */
+ IA_CSS_ACC_STANDALONE, /** Stand-alone acceleration */
+};
+
+/* Cells types
+ */
+enum ia_css_cell_type {
+ IA_CSS_SP0 = 0,
+ IA_CSS_SP1,
+ IA_CSS_ISP,
+ MAX_NUM_OF_CELLS
+};
+
+/* Firmware types.
+ */
+enum ia_css_fw_type {
+ ia_css_sp_firmware, /** Firmware for the SP */
+ ia_css_isp_firmware, /** Firmware for the ISP */
+ ia_css_bootloader_firmware, /** Firmware for the BootLoader */
+ ia_css_acc_firmware /** Firmware for accelrations */
+};
+
+struct ia_css_blob_descr;
+
+/* Blob descriptor.
+ * This structure describes an SP or ISP blob.
+ * It describes the test, data and bss sections as well as position in a
+ * firmware file.
+ * For convenience, it contains dynamic data after loading.
+ */
+struct ia_css_blob_info {
+ /** Static blob data */
+ u32 offset; /** Blob offset in fw file */
+ struct ia_css_isp_param_memory_offsets
+ memory_offsets; /** offset wrt hdr in bytes */
+ u32 prog_name_offset; /** offset wrt hdr in bytes */
+ u32 size; /** Size of blob */
+ u32 padding_size; /** total accumulation of bytes added due to section alignment */
+ u32 icache_source; /** Position of icache in blob */
+ u32 icache_size; /** Size of icache section */
+ u32 icache_padding;/** bytes added due to icache section alignment */
+ u32 text_source; /** Position of text in blob */
+ u32 text_size; /** Size of text section */
+ u32 text_padding; /** bytes added due to text section alignment */
+ u32 data_source; /** Position of data in blob */
+ u32 data_target; /** Start of data in SP dmem */
+ u32 data_size; /** Size of text section */
+ u32 data_padding; /** bytes added due to data section alignment */
+ u32 bss_target; /** Start position of bss in SP dmem */
+ u32 bss_size; /** Size of bss section */
+ /** Dynamic data filled by loader */
+ CSS_ALIGN(const void *code,
+ 8); /** Code section absolute pointer within fw, code = icache + text */
+ CSS_ALIGN(const void *data,
+ 8); /** Data section absolute pointer within fw, data = data + bss */
+};
+
+struct ia_css_binary_input_info {
+ u32 min_width;
+ u32 min_height;
+ u32 max_width;
+ u32 max_height;
+ u32 source; /* memory, sensor, variable */
+};
+
+struct ia_css_binary_output_info {
+ u32 min_width;
+ u32 min_height;
+ u32 max_width;
+ u32 max_height;
+ u32 num_chunks;
+ u32 variable_format;
+};
+
+struct ia_css_binary_internal_info {
+ u32 max_width;
+ u32 max_height;
+};
+
+struct ia_css_binary_bds_info {
+ u32 supported_bds_factors;
+};
+
+struct ia_css_binary_dvs_info {
+ u32 max_envelope_width;
+ u32 max_envelope_height;
+};
+
+struct ia_css_binary_vf_dec_info {
+ u32 is_variable;
+ u32 max_log_downscale;
+};
+
+struct ia_css_binary_s3a_info {
+ u32 s3atbl_use_dmem;
+ u32 fixed_s3a_deci_log;
+};
+
+/* DPC related binary info */
+struct ia_css_binary_dpc_info {
+ u32 bnr_lite; /** bnr lite enable flag */
+};
+
+struct ia_css_binary_iterator_info {
+ u32 num_stripes;
+ u32 row_stripes_height;
+ u32 row_stripes_overlap_lines;
+};
+
+struct ia_css_binary_address_info {
+ u32 isp_addresses; /* Address in ISP dmem */
+ u32 main_entry; /* Address of entry fct */
+ u32 in_frame; /* Address in ISP dmem */
+ u32 out_frame; /* Address in ISP dmem */
+ u32 in_data; /* Address in ISP dmem */
+ u32 out_data; /* Address in ISP dmem */
+ u32 sh_dma_cmd_ptr; /* In ISP dmem */
+};
+
+struct ia_css_binary_uds_info {
+ u16 bpp;
+ u16 use_bci;
+ u16 use_str;
+ u16 woix;
+ u16 woiy;
+ u16 extra_out_vecs;
+ u16 vectors_per_line_in;
+ u16 vectors_per_line_out;
+ u16 vectors_c_per_line_in;
+ u16 vectors_c_per_line_out;
+ u16 vmem_gdc_in_block_height_y;
+ u16 vmem_gdc_in_block_height_c;
+ /* uint16_t padding; */
+};
+
+struct ia_css_binary_pipeline_info {
+ u32 mode;
+ u32 isp_pipe_version;
+ u32 pipelining;
+ u32 c_subsampling;
+ u32 top_cropping;
+ u32 left_cropping;
+ u32 variable_resolution;
+};
+
+struct ia_css_binary_block_info {
+ u32 block_width;
+ u32 block_height;
+ u32 output_block_height;
+};
+
+/* Structure describing an ISP binary.
+ * It describes the capabilities of a binary, like the maximum resolution,
+ * support features, dma channels, uds features, etc.
+ * This part is to be used by the SP.
+ * Future refactoring should move binary properties to ia_css_binary_xinfo,
+ * thereby making the SP code more binary independent.
+ */
+struct ia_css_binary_info {
+ CSS_ALIGN(u32 id, 8); /* IA_CSS_BINARY_ID_* */
+ struct ia_css_binary_pipeline_info pipeline;
+ struct ia_css_binary_input_info input;
+ struct ia_css_binary_output_info output;
+ struct ia_css_binary_internal_info internal;
+ struct ia_css_binary_bds_info bds;
+ struct ia_css_binary_dvs_info dvs;
+ struct ia_css_binary_vf_dec_info vf_dec;
+ struct ia_css_binary_s3a_info s3a;
+ struct ia_css_binary_dpc_info dpc_bnr; /** DPC related binary info */
+ struct ia_css_binary_iterator_info iterator;
+ struct ia_css_binary_address_info addresses;
+ struct ia_css_binary_uds_info uds;
+ struct ia_css_binary_block_info block;
+ struct ia_css_isp_param_isp_segments mem_initializers;
+ /* MW: Packing (related) bools in an integer ?? */
+ struct {
+ u8 reduced_pipe;
+ u8 vf_veceven;
+ u8 dis;
+ u8 dvs_envelope;
+ u8 uds;
+ u8 dvs_6axis;
+ u8 block_output;
+ u8 streaming_dma;
+ u8 ds;
+ u8 bayer_fir_6db;
+ u8 raw_binning;
+ u8 continuous;
+ u8 s3a;
+ u8 fpnr;
+ u8 sc;
+ u8 macc;
+ u8 output;
+ u8 ref_frame;
+ u8 tnr;
+ u8 xnr;
+ u8 params;
+ u8 ca_gdc;
+ u8 isp_addresses;
+ u8 in_frame;
+ u8 out_frame;
+ u8 high_speed;
+ u8 dpc;
+ u8 padding[2];
+ } enable;
+ struct {
+ /* DMA channel ID: [0,...,HIVE_ISP_NUM_DMA_CHANNELS> */
+ u8 ref_y_channel;
+ u8 ref_c_channel;
+ u8 tnr_channel;
+ u8 tnr_out_channel;
+ u8 dvs_coords_channel;
+ u8 output_channel;
+ u8 c_channel;
+ u8 vfout_channel;
+ u8 vfout_c_channel;
+ u8 vfdec_bits_per_pixel;
+ u8 claimed_by_isp;
+ u8 padding[2];
+ } dma;
+};
+
+/* Structure describing an ISP binary.
+ * It describes the capabilities of a binary, like the maximum resolution,
+ * support features, dma channels, uds features, etc.
+ */
+struct ia_css_binary_xinfo {
+ /* Part that is of interest to the SP. */
+ struct ia_css_binary_info sp;
+
+ /* Rest of the binary info, only interesting to the host. */
+ enum ia_css_acc_type type;
+
+ CSS_ALIGN(s32 num_output_formats, 8);
+ enum ia_css_frame_format output_formats[IA_CSS_FRAME_FORMAT_NUM];
+
+ CSS_ALIGN(s32 num_vf_formats, 8); /** number of supported vf formats */
+ enum ia_css_frame_format
+ vf_formats[IA_CSS_FRAME_FORMAT_NUM]; /** types of supported vf formats */
+ u8 num_output_pins;
+ ia_css_ptr xmem_addr;
+
+ CSS_ALIGN(const struct ia_css_blob_descr *blob, 8);
+ CSS_ALIGN(u32 blob_index, 8);
+ CSS_ALIGN(union ia_css_all_memory_offsets mem_offsets, 8);
+ CSS_ALIGN(struct ia_css_binary_xinfo *next, 8);
+};
+
+/* Structure describing the Bootloader (an ISP binary).
+ * It contains several address, either in ddr, isp_dmem or
+ * the entry function in icache.
+ */
+struct ia_css_bl_info {
+ u32 num_dma_cmds; /** Number of cmds sent by CSS */
+ u32 dma_cmd_list; /** Dma command list sent by CSS */
+ u32 sw_state; /** Polled from css */
+ /* Entry functions */
+ u32 bl_entry; /** The SP entry function */
+};
+
+/* Structure describing the SP binary.
+ * It contains several address, either in ddr, sp_dmem or
+ * the entry function in pmem.
+ */
+struct ia_css_sp_info {
+ u32 init_dmem_data; /** data sect config, stored to dmem */
+ u32 per_frame_data; /** Per frame data, stored to dmem */
+ u32 group; /** Per pipeline data, loaded by dma */
+ u32 output; /** SP output data, loaded by dmem */
+ u32 host_sp_queue; /** Host <-> SP queues */
+ u32 host_sp_com;/** Host <-> SP commands */
+ u32 isp_started; /** Polled from sensor thread, csim only */
+ u32 sw_state; /** Polled from css */
+ u32 host_sp_queues_initialized; /** Polled from the SP */
+ u32 sleep_mode; /** different mode to halt SP */
+ u32 invalidate_tlb; /** inform SP to invalidate mmu TLB */
+
+ /* ISP2400 */
+ u32 stop_copy_preview; /** suspend copy and preview pipe when capture */
+
+ u32 debug_buffer_ddr_address; /** inform SP the address
+ of DDR debug queue */
+ u32 perf_counter_input_system_error; /** input system perf
+ counter array */
+
+ u32 threads_stack; /** sp thread's stack pointers */
+ u32 threads_stack_size; /** sp thread's stack sizes */
+ u32 curr_binary_id; /** current binary id */
+ u32 raw_copy_line_count; /** raw copy line counter */
+ u32 ddr_parameter_address; /** acc param ddrptr, sp dmem */
+ u32 ddr_parameter_size; /** acc param size, sp dmem */
+ /* Entry functions */
+ u32 sp_entry; /** The SP entry function */
+ u32 tagger_frames_addr; /** Base address of tagger state */
+};
+
+/* The following #if is there because this header file is also included
+ by SP and ISP code but they do not need this data and HIVECC has alignment
+ issue with the firmware struct/union's.
+ More permanent solution will be to refactor this include.
+*/
+
+/* Accelerator firmware information.
+ */
+struct ia_css_acc_info {
+ u32 per_frame_data; /** Dummy for now */
+};
+
+/* Firmware information.
+ */
+union ia_css_fw_union {
+ struct ia_css_binary_xinfo isp; /** ISP info */
+ struct ia_css_sp_info sp; /** SP info */
+ struct ia_css_bl_info bl; /** Bootloader info */
+ struct ia_css_acc_info acc; /** Accelerator info */
+};
+
+/* Firmware information.
+ */
+struct ia_css_fw_info {
+ size_t header_size; /** size of fw header */
+
+ CSS_ALIGN(u32 type, 8);
+ union ia_css_fw_union info; /** Binary info */
+ struct ia_css_blob_info blob; /** Blob info */
+ /* Dynamic part */
+ struct ia_css_fw_info *next;
+
+ CSS_ALIGN(u32 loaded, 8); /** Firmware has been loaded */
+ CSS_ALIGN(const u8 *isp_code, 8); /** ISP pointer to code */
+ /** Firmware handle between user space and kernel */
+ CSS_ALIGN(u32 handle, 8);
+ /** Sections to copy from/to ISP */
+ struct ia_css_isp_param_css_segments mem_initializers;
+ /** Initializer for local ISP memories */
+};
+
+struct ia_css_blob_descr {
+ const unsigned char *blob;
+ struct ia_css_fw_info header;
+ const char *name;
+ union ia_css_all_memory_offsets mem_offsets;
+};
+
+struct ia_css_acc_fw;
+
+/* Structure describing the SP binary of a stand-alone accelerator.
+ */
+struct ia_css_acc_sp {
+ void (*init)(struct ia_css_acc_fw *); /** init for crun */
+ u32 sp_prog_name_offset; /** program name offset wrt hdr in bytes */
+ u32 sp_blob_offset; /** blob offset wrt hdr in bytes */
+ void *entry; /** Address of sp entry point */
+ u32 *css_abort; /** SP dmem abort flag */
+ void *isp_code; /** SP dmem address holding xmem
+ address of isp code */
+ struct ia_css_fw_info fw; /** SP fw descriptor */
+ const u8 *code; /** ISP pointer of allocated SP code */
+};
+
+/* Acceleration firmware descriptor.
+ * This descriptor describes either SP code (stand-alone), or
+ * ISP code (a separate pipeline stage).
+ */
+struct ia_css_acc_fw_hdr {
+ enum ia_css_acc_type type; /** Type of accelerator */
+ u32 isp_prog_name_offset; /** program name offset wrt
+ header in bytes */
+ u32 isp_blob_offset; /** blob offset wrt header
+ in bytes */
+ u32 isp_size; /** Size of isp blob */
+ const u8 *isp_code; /** ISP pointer to code */
+ struct ia_css_acc_sp sp; /** Standalone sp code */
+ /** Firmware handle between user space and kernel */
+ u32 handle;
+ struct ia_css_data parameters; /** Current SP parameters */
+};
+
+/* Firmware structure.
+ * This contains the header and actual blobs.
+ * For standalone, it contains SP and ISP blob.
+ * For a pipeline stage accelerator, it contains ISP code only.
+ * Since its members are variable size, their offsets are described in the
+ * header and computed using the access macros below.
+ */
+struct ia_css_acc_fw {
+ struct ia_css_acc_fw_hdr header; /** firmware header */
+ /*
+ int8_t isp_progname[]; **< ISP program name
+ int8_t sp_progname[]; **< SP program name, stand-alone only
+ uint8_t sp_code[]; **< SP blob, stand-alone only
+ uint8_t isp_code[]; **< ISP blob
+ */
+};
+
+/* Access macros for firmware */
+#define IA_CSS_ACC_OFFSET(t, f, n) ((t)((uint8_t *)(f) + (f->header.n)))
+#define IA_CSS_ACC_SP_PROG_NAME(f) IA_CSS_ACC_OFFSET(const char *, f, \
+ sp.sp_prog_name_offset)
+#define IA_CSS_ACC_ISP_PROG_NAME(f) IA_CSS_ACC_OFFSET(const char *, f, \
+ isp_prog_name_offset)
+#define IA_CSS_ACC_SP_CODE(f) IA_CSS_ACC_OFFSET(uint8_t *, f, \
+ sp.sp_blob_offset)
+#define IA_CSS_ACC_SP_DATA(f) (IA_CSS_ACC_SP_CODE(f) + \
+ (f)->header.sp.fw.blob.data_source)
+#define IA_CSS_ACC_ISP_CODE(f) IA_CSS_ACC_OFFSET(uint8_t*, f,\
+ isp_blob_offset)
+#define IA_CSS_ACC_ISP_SIZE(f) ((f)->header.isp_size)
+
+/* Binary name follows header immediately */
+#define IA_CSS_EXT_ISP_PROG_NAME(f) ((const char *)(f) + (f)->blob.prog_name_offset)
+#define IA_CSS_EXT_ISP_MEM_OFFSETS(f) \
+ ((const struct ia_css_memory_offsets *)((const char *)(f) + (f)->blob.mem_offsets))
+
+enum ia_css_sp_sleep_mode {
+ SP_DISABLE_SLEEP_MODE = 0,
+ SP_SLEEP_AFTER_FRAME = BIT(0),
+ SP_SLEEP_AFTER_IRQ = BIT(1),
+};
+#endif /* _IA_CSS_ACC_TYPES_H */
diff --git a/drivers/staging/media/atomisp/pci/ia_css_buffer.h b/drivers/staging/media/atomisp/pci/ia_css_buffer.h
new file mode 100644
index 000000000000..7c00dd1d33fd
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/ia_css_buffer.h
@@ -0,0 +1,77 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __IA_CSS_BUFFER_H
+#define __IA_CSS_BUFFER_H
+
+/* @file
+ * This file contains datastructures and types for buffers used in CSS
+ */
+
+#include <type_support.h>
+#include "ia_css_types.h"
+#include "ia_css_timer.h"
+
+/* Enumeration of buffer types. Buffers can be queued and de-queued
+ * to hand them over between IA and ISP.
+ */
+enum ia_css_buffer_type {
+ IA_CSS_BUFFER_TYPE_INVALID = -1,
+ IA_CSS_BUFFER_TYPE_3A_STATISTICS = 0,
+ IA_CSS_BUFFER_TYPE_DIS_STATISTICS,
+ IA_CSS_BUFFER_TYPE_LACE_STATISTICS,
+ IA_CSS_BUFFER_TYPE_INPUT_FRAME,
+ IA_CSS_BUFFER_TYPE_OUTPUT_FRAME,
+ IA_CSS_BUFFER_TYPE_SEC_OUTPUT_FRAME,
+ IA_CSS_BUFFER_TYPE_VF_OUTPUT_FRAME,
+ IA_CSS_BUFFER_TYPE_SEC_VF_OUTPUT_FRAME,
+ IA_CSS_BUFFER_TYPE_RAW_OUTPUT_FRAME,
+ IA_CSS_BUFFER_TYPE_CUSTOM_INPUT,
+ IA_CSS_BUFFER_TYPE_CUSTOM_OUTPUT,
+ IA_CSS_BUFFER_TYPE_METADATA,
+ IA_CSS_BUFFER_TYPE_PARAMETER_SET,
+ IA_CSS_BUFFER_TYPE_PER_FRAME_PARAMETER_SET,
+ IA_CSS_NUM_DYNAMIC_BUFFER_TYPE,
+ IA_CSS_NUM_BUFFER_TYPE
+};
+
+/* Driver API is not SP/ISP visible, 64 bit types not supported on hivecc */
+
+/* Buffer structure. This is a container structure that enables content
+ * independent buffer queues and access functions.
+ */
+struct ia_css_buffer {
+ enum ia_css_buffer_type type; /** Buffer type. */
+ unsigned int exp_id;
+ /** exposure id for this buffer; 0 = not available
+ see ia_css_event_public.h for more detail. */
+ union {
+ struct ia_css_isp_3a_statistics
+ *stats_3a; /** 3A statistics & optionally RGBY statistics. */
+ struct ia_css_isp_dvs_statistics *stats_dvs; /** DVS statistics. */
+ struct ia_css_isp_skc_dvs_statistics *stats_skc_dvs; /** SKC DVS statistics. */
+ struct ia_css_frame *frame; /** Frame buffer. */
+ struct ia_css_acc_param *custom_data; /** Custom buffer. */
+ struct ia_css_metadata *metadata; /** Sensor metadata. */
+ } data; /** Buffer data pointer. */
+ u64 driver_cookie; /** cookie for the driver */
+ struct ia_css_time_meas
+ timing_data; /** timing data (readings from the timer) */
+ struct ia_css_clock_tick
+ isys_eof_clock_tick; /** ISYS's end of frame timer tick*/
+};
+
+/* @brief Dequeue param buffers from sp2host_queue
+ *
+ * @return None
+ *
+ * This function must be called at every driver interrupt handler to prevent
+ * overflow of sp2host_queue.
+ */
+void
+ia_css_dequeue_param_buffers(void);
+
+#endif /* __IA_CSS_BUFFER_H */
diff --git a/drivers/staging/media/atomisp/pci/ia_css_control.h b/drivers/staging/media/atomisp/pci/ia_css_control.h
new file mode 100644
index 000000000000..d374ceaf7574
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/ia_css_control.h
@@ -0,0 +1,112 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __IA_CSS_CONTROL_H
+#define __IA_CSS_CONTROL_H
+
+/* @file
+ * This file contains functionality for starting and controlling CSS
+ */
+
+#include <type_support.h>
+#include <ia_css_env.h>
+#include <ia_css_firmware.h>
+#include <ia_css_irq.h>
+
+/* @brief Initialize the CSS API.
+ * @param[in] env Environment, provides functions to access the
+ * environment in which the CSS code runs. This is
+ * used for host side memory access and message
+ * printing. May not be NULL.
+ * @param[in] l1_base Base index (isp2400)
+ * of the L1 page table. This is a physical
+ * address or index.
+ * @param[in] irq_type The type of interrupt to be used (edge or level)
+ * @return Returns -EINVAL in case of any
+ * errors and 0 otherwise.
+ *
+ * This function initializes the API which includes allocating and initializing
+ * internal data structures.
+ * ia_css_load_firmware() must be called to load the firmware before calling
+ * this function.
+ */
+int ia_css_init(struct device *dev,
+ const struct ia_css_env *env,
+ u32 l1_base,
+ enum ia_css_irq_type irq_type);
+
+/* @brief Un-initialize the CSS API.
+ * @return None
+ *
+ * This function deallocates all memory that has been allocated by the CSS API.
+ * After this function is called, no other CSS functions should be called.
+ */
+void
+ia_css_uninit(void);
+
+/* @brief Enable use of a separate queue for ISYS events.
+ *
+ * @param[in] enable: enable or disable use of separate ISYS event queues.
+ * @return error if called when SP is running.
+ *
+ * @deprecated{This is a temporary function that allows drivers to migrate to
+ * the use of the separate ISYS event queue. Once all drivers supports this, it
+ * will be made the default and this function will be removed.
+ * This function should only be called when the SP is not running, calling it
+ * when the SP is running will result in an error value being returned. }
+ */
+int
+ia_css_enable_isys_event_queue(bool enable);
+
+/* @brief Test whether the ISP has started.
+ *
+ * @return Boolean flag true if the ISP has started or false otherwise.
+ *
+ * Temporary function to poll whether the ISP has been started. Once it has,
+ * the sensor can also be started. */
+bool
+ia_css_isp_has_started(void);
+
+/* @brief Test whether the SP has initialized.
+ *
+ * @return Boolean flag true if the SP has initialized or false otherwise.
+ *
+ * Temporary function to poll whether the SP has been initialized. Once it has,
+ * we can enqueue buffers. */
+bool
+ia_css_sp_has_initialized(void);
+
+/* @brief Test whether the SP has terminated.
+ *
+ * @return Boolean flag true if the SP has terminated or false otherwise.
+ *
+ * Temporary function to poll whether the SP has been terminated. Once it has,
+ * we can switch mode. */
+bool
+ia_css_sp_has_terminated(void);
+
+/* @brief start SP hardware
+ *
+ * @return 0 or error code upon error.
+ *
+ * It will boot the SP hardware and start multi-threading infrastructure.
+ * All threads will be started and blocked by semaphore. This function should
+ * be called before any ia_css_stream_start().
+ */
+int
+ia_css_start_sp(void);
+
+/* @brief stop SP hardware
+ *
+ * @return 0 or error code upon error.
+ *
+ * This function will terminate all threads and shut down SP. It should be
+ * called after all ia_css_stream_stop().
+ */
+int
+ia_css_stop_sp(void);
+
+#endif /* __IA_CSS_CONTROL_H */
diff --git a/drivers/staging/media/atomisp/pci/ia_css_device_access.c b/drivers/staging/media/atomisp/pci/ia_css_device_access.c
new file mode 100644
index 000000000000..8ee7656f614b
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/ia_css_device_access.c
@@ -0,0 +1,87 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#include "ia_css_device_access.h"
+#include <type_support.h> /* for uint*, size_t */
+#include <system_local.h> /* for hrt_address */
+#include <ia_css_env.h> /* for ia_css_hw_access_env */
+#include <assert_support.h> /* for assert */
+
+static struct ia_css_hw_access_env my_env;
+
+void
+ia_css_device_access_init(const struct ia_css_hw_access_env *env)
+{
+ assert(env);
+
+ my_env = *env;
+}
+
+uint8_t
+ia_css_device_load_uint8(const hrt_address addr)
+{
+ return my_env.load_8(addr);
+}
+
+uint16_t
+ia_css_device_load_uint16(const hrt_address addr)
+{
+ return my_env.load_16(addr);
+}
+
+uint32_t
+ia_css_device_load_uint32(const hrt_address addr)
+{
+ return my_env.load_32(addr);
+}
+
+uint64_t
+ia_css_device_load_uint64(const hrt_address addr)
+{
+ assert(0);
+
+ (void)addr;
+ return 0;
+}
+
+void
+ia_css_device_store_uint8(const hrt_address addr, const uint8_t data)
+{
+ my_env.store_8(addr, data);
+}
+
+void
+ia_css_device_store_uint16(const hrt_address addr, const uint16_t data)
+{
+ my_env.store_16(addr, data);
+}
+
+void
+ia_css_device_store_uint32(const hrt_address addr, const uint32_t data)
+{
+ my_env.store_32(addr, data);
+}
+
+void
+ia_css_device_store_uint64(const hrt_address addr, const uint64_t data)
+{
+ assert(0);
+
+ (void)addr;
+ (void)data;
+}
+
+void
+ia_css_device_load(const hrt_address addr, void *data, const size_t size)
+{
+ my_env.load(addr, data, (uint32_t)size);
+}
+
+void
+ia_css_device_store(const hrt_address addr, const void *data, const size_t size)
+{
+ my_env.store(addr, data, (uint32_t)size);
+}
diff --git a/drivers/staging/media/atomisp/pci/ia_css_device_access.h b/drivers/staging/media/atomisp/pci/ia_css_device_access.h
new file mode 100644
index 000000000000..f2ea16c093b6
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/ia_css_device_access.h
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef _IA_CSS_DEVICE_ACCESS_H
+#define _IA_CSS_DEVICE_ACCESS_H
+
+/* @file
+ * File containing internal functions for the CSS-API to access the CSS device.
+ */
+
+#include <type_support.h> /* for uint*, size_t */
+#include <system_local.h> /* for hrt_address */
+#include <ia_css_env.h> /* for ia_css_hw_access_env */
+
+void
+ia_css_device_access_init(const struct ia_css_hw_access_env *env);
+
+uint8_t
+ia_css_device_load_uint8(const hrt_address addr);
+
+uint16_t
+ia_css_device_load_uint16(const hrt_address addr);
+
+uint32_t
+ia_css_device_load_uint32(const hrt_address addr);
+
+uint64_t
+ia_css_device_load_uint64(const hrt_address addr);
+
+void
+ia_css_device_store_uint8(const hrt_address addr, const uint8_t data);
+
+void
+ia_css_device_store_uint16(const hrt_address addr, const uint16_t data);
+
+void
+ia_css_device_store_uint32(const hrt_address addr, const uint32_t data);
+
+void
+ia_css_device_store_uint64(const hrt_address addr, const uint64_t data);
+
+void
+ia_css_device_load(const hrt_address addr, void *data, const size_t size);
+
+void
+ia_css_device_store(const hrt_address addr, const void *data,
+ const size_t size);
+
+#endif /* _IA_CSS_DEVICE_ACCESS_H */
diff --git a/drivers/staging/media/atomisp/pci/ia_css_dvs.h b/drivers/staging/media/atomisp/pci/ia_css_dvs.h
new file mode 100644
index 000000000000..6930f1ec3aee
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/ia_css_dvs.h
@@ -0,0 +1,293 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __IA_CSS_DVS_H
+#define __IA_CSS_DVS_H
+
+/* @file
+ * This file contains types for DVS statistics
+ */
+
+#include <linux/build_bug.h>
+
+#include <type_support.h>
+#include "ia_css_types.h"
+#include "ia_css_err.h"
+#include "ia_css_stream_public.h"
+
+enum dvs_statistics_type {
+ DVS_STATISTICS,
+ DVS2_STATISTICS,
+ SKC_DVS_STATISTICS
+};
+
+/* Structure that holds DVS statistics in the ISP internal
+ * format. Use ia_css_get_dvs_statistics() to translate
+ * this to the format used on the host (DVS engine).
+ * */
+struct ia_css_isp_dvs_statistics {
+ ia_css_ptr hor_proj;
+ ia_css_ptr ver_proj;
+ u32 hor_size;
+ u32 ver_size;
+ u32 exp_id; /** see ia_css_event_public.h for more detail */
+ ia_css_ptr data_ptr; /* base pointer containing all memory */
+ u32 size; /* size of allocated memory in data_ptr */
+};
+
+/* Structure that holds SKC DVS statistics in the ISP internal
+ * format. Use ia_css_dvs_statistics_get() to translate this to
+ * the format used on the host.
+ * */
+struct ia_css_isp_skc_dvs_statistics;
+
+#define SIZE_OF_IA_CSS_ISP_DVS_STATISTICS_STRUCT \
+ ((3 * SIZE_OF_IA_CSS_PTR) + \
+ (4 * sizeof(uint32_t)))
+
+static_assert(sizeof(struct ia_css_isp_dvs_statistics) == SIZE_OF_IA_CSS_ISP_DVS_STATISTICS_STRUCT);
+
+/* Map with host-side pointers to ISP-format statistics.
+ * These pointers can either be copies of ISP data or memory mapped
+ * ISP pointers.
+ * All of the data behind these pointers is allocatd contiguously, the
+ * allocated pointer is stored in the data_ptr field. The other fields
+ * point into this one block of data.
+ */
+struct ia_css_isp_dvs_statistics_map {
+ void *data_ptr;
+ s32 *hor_proj;
+ s32 *ver_proj;
+ u32 size; /* total size in bytes */
+ u32 data_allocated; /* indicate whether data was allocated */
+};
+
+union ia_css_dvs_statistics_isp {
+ struct ia_css_isp_dvs_statistics *p_dvs_statistics_isp;
+ struct ia_css_isp_skc_dvs_statistics *p_skc_dvs_statistics_isp;
+};
+
+union ia_css_dvs_statistics_host {
+ struct ia_css_dvs_statistics *p_dvs_statistics_host;
+ struct ia_css_dvs2_statistics *p_dvs2_statistics_host;
+ struct ia_css_skc_dvs_statistics *p_skc_dvs_statistics_host;
+};
+
+/* @brief Copy DVS statistics from an ISP buffer to a host buffer.
+ * @param[in] host_stats Host buffer
+ * @param[in] isp_stats ISP buffer
+ * @return error value if temporary memory cannot be allocated
+ *
+ * This may include a translation step as well depending
+ * on the ISP version.
+ * Always use this function, never copy the buffer directly.
+ * Note that this function uses the mem_load function from the CSS
+ * environment struct.
+ * In certain environments this may be slow. In those cases it is
+ * advised to map the ISP memory into a host-side pointer and use
+ * the ia_css_translate_dvs_statistics() function instead.
+ */
+int
+ia_css_get_dvs_statistics(struct ia_css_dvs_statistics *host_stats,
+ const struct ia_css_isp_dvs_statistics *isp_stats);
+
+/* @brief Translate DVS statistics from ISP format to host format
+ * @param[in] host_stats Host buffer
+ * @param[in] isp_stats ISP buffer
+ * @return None
+ *
+ * This function translates the dvs statistics from the ISP-internal
+ * format to the format used by the DVS library on the CPU.
+ * This function takes a host-side pointer as input. This can either
+ * point to a copy of the data or be a memory mapped pointer to the
+ * ISP memory pages.
+ */
+void
+ia_css_translate_dvs_statistics(
+ struct ia_css_dvs_statistics *host_stats,
+ const struct ia_css_isp_dvs_statistics_map *isp_stats);
+
+/* @brief Copy DVS 2.0 statistics from an ISP buffer to a host buffer.
+ * @param[in] host_stats Host buffer
+ * @param[in] isp_stats ISP buffer
+ * @return error value if temporary memory cannot be allocated
+ *
+ * This may include a translation step as well depending
+ * on the ISP version.
+ * Always use this function, never copy the buffer directly.
+ * Note that this function uses the mem_load function from the CSS
+ * environment struct.
+ * In certain environments this may be slow. In those cases it is
+ * advised to map the ISP memory into a host-side pointer and use
+ * the ia_css_translate_dvs2_statistics() function instead.
+ */
+int
+ia_css_get_dvs2_statistics(struct ia_css_dvs2_statistics *host_stats,
+ const struct ia_css_isp_dvs_statistics *isp_stats);
+
+/* @brief Translate DVS2 statistics from ISP format to host format
+ * @param[in] host_stats Host buffer
+ * @param[in] isp_stats ISP buffer
+ * @return None
+ *
+ * This function translates the dvs2 statistics from the ISP-internal
+ * format to the format used by the DVS2 library on the CPU.
+ * This function takes a host-side pointer as input. This can either
+ * point to a copy of the data or be a memory mapped pointer to the
+ * ISP memory pages.
+ */
+void
+ia_css_translate_dvs2_statistics(
+ struct ia_css_dvs2_statistics *host_stats,
+ const struct ia_css_isp_dvs_statistics_map *isp_stats);
+
+/* @brief Copy DVS statistics from an ISP buffer to a host buffer.
+ * @param[in] type - DVS statistics type
+ * @param[in] host_stats Host buffer
+ * @param[in] isp_stats ISP buffer
+ * @return None
+ */
+void
+ia_css_dvs_statistics_get(enum dvs_statistics_type type,
+ union ia_css_dvs_statistics_host *host_stats,
+ const union ia_css_dvs_statistics_isp *isp_stats);
+
+/* @brief Allocate the DVS statistics memory on the ISP
+ * @param[in] grid The grid.
+ * @return Pointer to the allocated DVS statistics buffer on the ISP
+*/
+struct ia_css_isp_dvs_statistics *
+ia_css_isp_dvs_statistics_allocate(const struct ia_css_dvs_grid_info *grid);
+
+/* @brief Free the DVS statistics memory on the ISP
+ * @param[in] me Pointer to the DVS statistics buffer on the ISP.
+ * @return None
+*/
+void
+ia_css_isp_dvs_statistics_free(struct ia_css_isp_dvs_statistics *me);
+
+/* @brief Allocate the DVS 2.0 statistics memory
+ * @param[in] grid The grid.
+ * @return Pointer to the allocated DVS statistics buffer on the ISP
+*/
+struct ia_css_isp_dvs_statistics *
+ia_css_isp_dvs2_statistics_allocate(const struct ia_css_dvs_grid_info *grid);
+
+/* @brief Free the DVS 2.0 statistics memory
+ * @param[in] me Pointer to the DVS statistics buffer on the ISP.
+ * @return None
+*/
+void
+ia_css_isp_dvs2_statistics_free(struct ia_css_isp_dvs_statistics *me);
+
+/* @brief Allocate the DVS statistics memory on the host
+ * @param[in] grid The grid.
+ * @return Pointer to the allocated DVS statistics buffer on the host
+*/
+struct ia_css_dvs_statistics *
+ia_css_dvs_statistics_allocate(const struct ia_css_dvs_grid_info *grid);
+
+/* @brief Free the DVS statistics memory on the host
+ * @param[in] me Pointer to the DVS statistics buffer on the host.
+ * @return None
+*/
+void
+ia_css_dvs_statistics_free(struct ia_css_dvs_statistics *me);
+
+/* @brief Allocate the DVS coefficients memory
+ * @param[in] grid The grid.
+ * @return Pointer to the allocated DVS coefficients buffer
+*/
+struct ia_css_dvs_coefficients *
+ia_css_dvs_coefficients_allocate(const struct ia_css_dvs_grid_info *grid);
+
+/* @brief Free the DVS coefficients memory
+ * @param[in] me Pointer to the DVS coefficients buffer.
+ * @return None
+ */
+void
+ia_css_dvs_coefficients_free(struct ia_css_dvs_coefficients *me);
+
+/* @brief Allocate the DVS 2.0 statistics memory on the host
+ * @param[in] grid The grid.
+ * @return Pointer to the allocated DVS 2.0 statistics buffer on the host
+ */
+struct ia_css_dvs2_statistics *
+ia_css_dvs2_statistics_allocate(const struct ia_css_dvs_grid_info *grid);
+
+/* @brief Free the DVS 2.0 statistics memory
+ * @param[in] me Pointer to the DVS 2.0 statistics buffer on the host.
+ * @return None
+*/
+void
+ia_css_dvs2_statistics_free(struct ia_css_dvs2_statistics *me);
+
+/* @brief Allocate the DVS 2.0 coefficients memory
+ * @param[in] grid The grid.
+ * @return Pointer to the allocated DVS 2.0 coefficients buffer
+*/
+struct ia_css_dvs2_coefficients *
+ia_css_dvs2_coefficients_allocate(const struct ia_css_dvs_grid_info *grid);
+
+/* @brief Free the DVS 2.0 coefficients memory
+ * @param[in] me Pointer to the DVS 2.0 coefficients buffer.
+ * @return None
+*/
+void
+ia_css_dvs2_coefficients_free(struct ia_css_dvs2_coefficients *me);
+
+/* @brief Allocate the DVS 2.0 6-axis config memory
+ * @param[in] stream The stream.
+ * @return Pointer to the allocated DVS 6axis configuration buffer
+*/
+struct ia_css_dvs_6axis_config *
+ia_css_dvs2_6axis_config_allocate(const struct ia_css_stream *stream);
+
+/* @brief Free the DVS 2.0 6-axis config memory
+ * @param[in] dvs_6axis_config Pointer to the DVS 6axis configuration buffer
+ * @return None
+ */
+void
+ia_css_dvs2_6axis_config_free(struct ia_css_dvs_6axis_config *dvs_6axis_config);
+
+/* @brief Allocate a dvs statistics map structure
+ * @param[in] isp_stats pointer to ISP dvs statistis struct
+ * @param[in] data_ptr host-side pointer to ISP dvs statistics.
+ * @return Pointer to the allocated dvs statistics map
+ *
+ * This function allocates the ISP dvs statistics map structure
+ * and uses the data_ptr as base pointer to set the appropriate
+ * pointers to all relevant subsets of the dvs statistics (dmem,
+ * vmem, hmem).
+ * If the data_ptr is NULL, this function will allocate the host-side
+ * memory. This information is stored in the struct and used in the
+ * ia_css_isp_dvs_statistics_map_free() function to determine whether
+ * the memory should be freed or not.
+ * Note that this function does not allocate or map any ISP
+ * memory.
+*/
+struct ia_css_isp_dvs_statistics_map *
+ia_css_isp_dvs_statistics_map_allocate(
+ const struct ia_css_isp_dvs_statistics *isp_stats,
+ void *data_ptr);
+
+/* @brief Free the dvs statistics map
+ * @param[in] me Pointer to the dvs statistics map
+ * @return None
+ *
+ * This function frees the map struct. If the data_ptr inside it
+ * was allocated inside ia_css_isp_dvs_statistics_map_allocate(), it
+ * will be freed in this function. Otherwise it will not be freed.
+ */
+void
+ia_css_isp_dvs_statistics_map_free(struct ia_css_isp_dvs_statistics_map *me);
+
+/* @brief Allocate memory for the SKC DVS statistics on the ISP
+ * @return Pointer to the allocated ACC DVS statistics buffer on the ISP
+*/
+struct ia_css_isp_skc_dvs_statistics *ia_css_skc_dvs_statistics_allocate(void);
+
+#endif /* __IA_CSS_DVS_H */
diff --git a/drivers/staging/media/atomisp/pci/ia_css_env.h b/drivers/staging/media/atomisp/pci/ia_css_env.h
new file mode 100644
index 000000000000..42bf739c51f5
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/ia_css_env.h
@@ -0,0 +1,87 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __IA_CSS_ENV_H
+#define __IA_CSS_ENV_H
+
+#include <type_support.h>
+#include <linux/stdarg.h> /* va_list */
+#include <linux/bits.h>
+#include "ia_css_types.h"
+#include "ia_css_acc_types.h"
+
+/* @file
+ * This file contains prototypes for functions that need to be provided to the
+ * CSS-API host-code by the environment in which the CSS-API code runs.
+ */
+
+/* Memory allocation attributes, for use in ia_css_css_mem_env. */
+enum ia_css_mem_attr {
+ IA_CSS_MEM_ATTR_CACHED = BIT(0),
+ IA_CSS_MEM_ATTR_ZEROED = BIT(1),
+ IA_CSS_MEM_ATTR_PAGEALIGN = BIT(2),
+ IA_CSS_MEM_ATTR_CONTIGUOUS = BIT(3),
+};
+
+/* Environment with function pointers for local IA memory allocation.
+ * This provides the CSS code with environment specific functionality
+ * for memory allocation of small local buffers such as local data structures.
+ * This is never expected to allocate more than one page of memory (4K bytes).
+ */
+struct ia_css_cpu_mem_env {
+ void (*flush)(struct ia_css_acc_fw *fw);
+ /** Flush function to flush the cache for given accelerator. */
+};
+
+/* Environment with function pointers to access the CSS hardware. This includes
+ * registers and local memories.
+ */
+struct ia_css_hw_access_env {
+ void (*store_8)(hrt_address addr, uint8_t data);
+ /** Store an 8 bit value into an address in the CSS HW address space.
+ The address must be an 8 bit aligned address. */
+ void (*store_16)(hrt_address addr, uint16_t data);
+ /** Store a 16 bit value into an address in the CSS HW address space.
+ The address must be a 16 bit aligned address. */
+ void (*store_32)(hrt_address addr, uint32_t data);
+ /** Store a 32 bit value into an address in the CSS HW address space.
+ The address must be a 32 bit aligned address. */
+ uint8_t (*load_8)(hrt_address addr);
+ /** Load an 8 bit value from an address in the CSS HW address
+ space. The address must be an 8 bit aligned address. */
+ uint16_t (*load_16)(hrt_address addr);
+ /** Load a 16 bit value from an address in the CSS HW address
+ space. The address must be a 16 bit aligned address. */
+ uint32_t (*load_32)(hrt_address addr);
+ /** Load a 32 bit value from an address in the CSS HW address
+ space. The address must be a 32 bit aligned address. */
+ void (*store)(hrt_address addr, const void *data, uint32_t bytes);
+ /** Store a number of bytes into a byte-aligned address in the CSS HW address space. */
+ void (*load)(hrt_address addr, void *data, uint32_t bytes);
+ /** Load a number of bytes from a byte-aligned address in the CSS HW address space. */
+};
+
+/* Environment with function pointers to print error and debug messages.
+ */
+struct ia_css_print_env {
+ int __printf(1, 0) (*debug_print)(const char *fmt, va_list args);
+ /** Print a debug message. */
+ int __printf(1, 0) (*error_print)(const char *fmt, va_list args);
+ /** Print an error message.*/
+};
+
+/* Environment structure. This includes function pointers to access several
+ * features provided by the environment in which the CSS API is used.
+ * This is used to run the camera IP in multiple platforms such as Linux,
+ * Windows and several simulation environments.
+ */
+struct ia_css_env {
+ struct ia_css_cpu_mem_env cpu_mem_env; /** local flush. */
+ struct ia_css_hw_access_env hw_access_env; /** CSS HW access functions */
+ struct ia_css_print_env print_env; /** Message printing env. */
+};
+
+#endif /* __IA_CSS_ENV_H */
diff --git a/drivers/staging/media/atomisp/pci/ia_css_err.h b/drivers/staging/media/atomisp/pci/ia_css_err.h
new file mode 100644
index 000000000000..5d3ffed9e7fb
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/ia_css_err.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __IA_CSS_ERR_H
+#define __IA_CSS_ERR_H
+
+/* @file
+ * This file contains possible return values for most
+ * functions in the CSS-API.
+ */
+
+/* FW warnings. This enum contains a value for each warning that
+ * the SP FW could indicate potential performance issue
+ */
+enum ia_css_fw_warning {
+ IA_CSS_FW_WARNING_NONE,
+ IA_CSS_FW_WARNING_ISYS_QUEUE_FULL, /* < CSS system delayed because of insufficient space in the ISys queue.
+ This warning can be avoided by de-queuing ISYS buffers more timely. */
+ IA_CSS_FW_WARNING_PSYS_QUEUE_FULL, /* < CSS system delayed because of insufficient space in the PSys queue.
+ This warning can be avoided by de-queuing PSYS buffers more timely. */
+ IA_CSS_FW_WARNING_CIRCBUF_ALL_LOCKED, /* < CSS system delayed because of insufficient available buffers.
+ This warning can be avoided by unlocking locked frame-buffers more timely. */
+ IA_CSS_FW_WARNING_EXP_ID_LOCKED, /* < Exposure ID skipped because the frame associated to it was still locked.
+ This warning can be avoided by unlocking locked frame-buffers more timely. */
+ IA_CSS_FW_WARNING_TAG_EXP_ID_FAILED, /* < Exposure ID cannot be found on the circular buffer.
+ This warning can be avoided by unlocking locked frame-buffers more timely. */
+ IA_CSS_FW_WARNING_FRAME_PARAM_MISMATCH, /* < Frame and param pair mismatched in tagger.
+ This warning can be avoided by providing a param set for each frame. */
+};
+
+#endif /* __IA_CSS_ERR_H */
diff --git a/drivers/staging/media/atomisp/pci/ia_css_event_public.h b/drivers/staging/media/atomisp/pci/ia_css_event_public.h
new file mode 100644
index 000000000000..f7215dd96739
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/ia_css_event_public.h
@@ -0,0 +1,174 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __IA_CSS_EVENT_PUBLIC_H
+#define __IA_CSS_EVENT_PUBLIC_H
+
+/* @file
+ * This file contains CSS-API events functionality
+ */
+
+#include <type_support.h> /* uint8_t */
+#include <ia_css_err.h> /* ia_css_err */
+#include <ia_css_types.h> /* ia_css_pipe */
+#include <ia_css_timer.h> /* ia_css_timer */
+#include <linux/bits.h>
+
+/* The event type, distinguishes the kind of events that
+ * can are generated by the CSS system.
+ *
+ * !!!IMPORTANT!!! KEEP THE FOLLOWING IN SYNC:
+ * 1) "enum ia_css_event_type" (ia_css_event_public.h)
+ * 2) "enum sh_css_sp_event_type" (sh_css_internal.h)
+ * 3) "enum ia_css_event_type event_id_2_event_mask" (event_handler.sp.c)
+ * 4) "enum ia_css_event_type convert_event_sp_to_host_domain" (sh_css.c)
+ */
+enum ia_css_event_type {
+ IA_CSS_EVENT_TYPE_OUTPUT_FRAME_DONE = BIT(0),
+ /** Output frame ready. */
+ IA_CSS_EVENT_TYPE_SECOND_OUTPUT_FRAME_DONE = BIT(1),
+ /** Second output frame ready. */
+ IA_CSS_EVENT_TYPE_VF_OUTPUT_FRAME_DONE = BIT(2),
+ /** Viewfinder Output frame ready. */
+ IA_CSS_EVENT_TYPE_SECOND_VF_OUTPUT_FRAME_DONE = BIT(3),
+ /** Second viewfinder Output frame ready. */
+ IA_CSS_EVENT_TYPE_3A_STATISTICS_DONE = BIT(4),
+ /** Indication that 3A statistics are available. */
+ IA_CSS_EVENT_TYPE_DIS_STATISTICS_DONE = BIT(5),
+ /** Indication that DIS statistics are available. */
+ IA_CSS_EVENT_TYPE_PIPELINE_DONE = BIT(6),
+ /** Pipeline Done event, sent after last pipeline stage. */
+ IA_CSS_EVENT_TYPE_FRAME_TAGGED = BIT(7),
+ /** Frame tagged. */
+ IA_CSS_EVENT_TYPE_INPUT_FRAME_DONE = BIT(8),
+ /** Input frame ready. */
+ IA_CSS_EVENT_TYPE_METADATA_DONE = BIT(9),
+ /** Metadata ready. */
+ IA_CSS_EVENT_TYPE_LACE_STATISTICS_DONE = BIT(10),
+ /** Indication that LACE statistics are available. */
+ IA_CSS_EVENT_TYPE_ACC_STAGE_COMPLETE = BIT(11),
+ /** Extension stage complete. */
+ IA_CSS_EVENT_TYPE_TIMER = BIT(12),
+ /** Timer event for measuring the SP side latencies. It contains the
+ 32-bit timer value from the SP */
+ IA_CSS_EVENT_TYPE_PORT_EOF = BIT(13),
+ /** End Of Frame event, sent when in buffered sensor mode. */
+ IA_CSS_EVENT_TYPE_FW_WARNING = BIT(14),
+ /** Performance warning encounter by FW */
+ IA_CSS_EVENT_TYPE_FW_ASSERT = BIT(15),
+ /** Assertion hit by FW */
+};
+
+#define IA_CSS_EVENT_TYPE_NONE 0
+
+/* IA_CSS_EVENT_TYPE_ALL is a mask for all pipe related events.
+ * The other events (such as PORT_EOF) cannot be enabled/disabled
+ * and are hence excluded from this macro.
+ */
+#define IA_CSS_EVENT_TYPE_ALL \
+ (IA_CSS_EVENT_TYPE_OUTPUT_FRAME_DONE | \
+ IA_CSS_EVENT_TYPE_SECOND_OUTPUT_FRAME_DONE | \
+ IA_CSS_EVENT_TYPE_VF_OUTPUT_FRAME_DONE | \
+ IA_CSS_EVENT_TYPE_SECOND_VF_OUTPUT_FRAME_DONE | \
+ IA_CSS_EVENT_TYPE_3A_STATISTICS_DONE | \
+ IA_CSS_EVENT_TYPE_DIS_STATISTICS_DONE | \
+ IA_CSS_EVENT_TYPE_PIPELINE_DONE | \
+ IA_CSS_EVENT_TYPE_FRAME_TAGGED | \
+ IA_CSS_EVENT_TYPE_INPUT_FRAME_DONE | \
+ IA_CSS_EVENT_TYPE_METADATA_DONE | \
+ IA_CSS_EVENT_TYPE_LACE_STATISTICS_DONE | \
+ IA_CSS_EVENT_TYPE_ACC_STAGE_COMPLETE)
+
+/* The event struct, container for the event type and its related values.
+ * Depending on the event type, either pipe or port will be filled.
+ * Pipeline related events (like buffer/frame events) will return a valid and filled pipe handle.
+ * For non pipeline related events (but i.e. stream specific, like EOF event), the port will be
+ * filled.
+ */
+struct ia_css_event {
+ struct ia_css_pipe *pipe;
+ /** Pipe handle on which event happened, NULL for non pipe related
+ events. */
+ enum ia_css_event_type type;
+ /** Type of Event, always valid/filled. */
+ u8 port;
+ /** Port number for EOF event (not valid for other events). */
+ u8 exp_id;
+ /** Exposure id for EOF/FRAME_TAGGED/FW_WARNING event (not valid for other events)
+ The exposure ID is unique only within a logical stream and it is
+ only generated on systems that have an input system (such as 2400
+ and 2401).
+ Most outputs produced by the CSS are tagged with an exposure ID.
+ This allows users of the CSS API to keep track of which buffer
+ was generated from which sensor output frame. This includes:
+ EOF event, output frames, 3A statistics, DVS statistics and
+ sensor metadata.
+ Exposure IDs start at IA_CSS_MIN_EXPOSURE_ID, increment by one
+ until IA_CSS_MAX_EXPOSURE_ID is reached, after that they wrap
+ around to IA_CSS_MIN_EXPOSURE_ID again.
+ Note that in case frames are dropped, this will not be reflected
+ in the exposure IDs. Therefor applications should not use this
+ to detect frame drops. */
+ u32 fw_handle;
+ /** Firmware Handle for ACC_STAGE_COMPLETE event (not valid for other
+ events). */
+ enum ia_css_fw_warning fw_warning;
+ /** Firmware warning code, only for WARNING events. */
+ u8 fw_assert_module_id;
+ /** Firmware module id, only for ASSERT events, should be logged by driver. */
+ u16 fw_assert_line_no;
+ /** Firmware line number, only for ASSERT events, should be logged by driver. */
+ clock_value_t timer_data;
+ /** For storing the full 32-bit of the timer value. Valid only for TIMER
+ event */
+ u8 timer_code;
+ /** For storing the code of the TIMER event. Valid only for
+ TIMER event */
+ u8 timer_subcode;
+ /** For storing the subcode of the TIMER event. Valid only
+ for TIMER event */
+};
+
+/* @brief Dequeue a PSYS event from the CSS system.
+ *
+ * @param[out] event Pointer to the event struct which will be filled by
+ * this function if an event is available.
+ * @return -ENODATA if no events are
+ * available or
+ * 0 otherwise.
+ *
+ * This function dequeues an event from the PSYS event queue. The queue is
+ * between the Host CPU and the CSS system. This function can be
+ * called after an interrupt has been generated that signalled that a new event
+ * was available and can be used in a polling-like situation where the NO_EVENT
+ * return value is used to determine whether an event was available or not.
+ */
+int
+ia_css_dequeue_psys_event(struct ia_css_event *event);
+
+/* @brief Dequeue an ISYS event from the CSS system.
+ *
+ * @param[out] event Pointer to the event struct which will be filled by
+ * this function if an event is available.
+ * @return -ENODATA if no events are
+ * available or
+ * 0 otherwise.
+ *
+ * This function dequeues an event from the ISYS event queue. The queue is
+ * between host and the CSS system.
+ * Unlike the ia_css_dequeue_psys_event() function, this function can be called
+ * directly from an interrupt service routine (ISR) and it is safe to call
+ * this function in parallel with other CSS API functions (but only one
+ * call to this function should be in flight at any point in time).
+ *
+ * The reason for having the ISYS events separate is to prevent them from
+ * incurring additional latency due to locks being held by other CSS API
+ * functions.
+ */
+int
+ia_css_dequeue_isys_event(struct ia_css_event *event);
+
+#endif /* __IA_CSS_EVENT_PUBLIC_H */
diff --git a/drivers/staging/media/atomisp/pci/ia_css_firmware.h b/drivers/staging/media/atomisp/pci/ia_css_firmware.h
new file mode 100644
index 000000000000..fcfa400cfdd1
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/ia_css_firmware.h
@@ -0,0 +1,57 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __IA_CSS_FIRMWARE_H
+#define __IA_CSS_FIRMWARE_H
+
+/* @file
+ * This file contains firmware loading/unloading support functionality
+ */
+
+#include <linux/device.h>
+#include "ia_css_err.h"
+#include "ia_css_env.h"
+
+/* CSS firmware package structure.
+ */
+struct ia_css_fw {
+ void *data; /** pointer to the firmware data */
+ unsigned int bytes; /** length in bytes of firmware data */
+};
+
+struct device;
+
+/* @brief Loads the firmware
+ * @param[in] env Environment, provides functions to access the
+ * environment in which the CSS code runs. This is
+ * used for host side memory access and message
+ * printing.
+ * @param[in] fw Firmware package containing the firmware for all
+ * predefined ISP binaries.
+ * @return Returns -EINVAL in case of any
+ * errors and 0 otherwise.
+ *
+ * This function interprets the firmware package. All
+ * contents of this firmware package are copied into local data structures, so
+ * the fw pointer could be freed after this function completes.
+ */
+int
+ia_css_load_firmware(struct device *dev, const struct ia_css_env *env,
+ const struct ia_css_fw *fw);
+
+/* @brief Unloads the firmware
+ * @return None
+ *
+ * This function unloads the firmware loaded by ia_css_load_firmware.
+ * It is pointless to call this function if no firmware is loaded,
+ * but it won't harm. Use this to deallocate all memory associated with the firmware.
+ * This function may only be called when the CSS API is in uninitialized state
+ * (e.g. after calling ia_css_uninit()).
+ */
+void
+ia_css_unload_firmware(void);
+
+#endif /* __IA_CSS_FIRMWARE_H */
diff --git a/drivers/staging/media/atomisp/pci/ia_css_frac.h b/drivers/staging/media/atomisp/pci/ia_css_frac.h
new file mode 100644
index 000000000000..f3f92da6a3f0
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/ia_css_frac.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef _IA_CSS_FRAC_H
+#define _IA_CSS_FRAC_H
+
+/* @file
+ * This file contains typedefs used for fractional numbers
+ */
+
+#include <type_support.h>
+
+/* Fixed point types.
+ * NOTE: the 16 bit fixed point types actually occupy 32 bits
+ * to save on extension operations in the ISP code.
+ */
+/* Unsigned fixed point value, 0 integer bits, 16 fractional bits */
+typedef u32 ia_css_u0_16;
+/* Unsigned fixed point value, 5 integer bits, 11 fractional bits */
+typedef u32 ia_css_u5_11;
+/* Unsigned fixed point value, 8 integer bits, 8 fractional bits */
+typedef u32 ia_css_u8_8;
+/* Signed fixed point value, 0 integer bits, 15 fractional bits */
+typedef s32 ia_css_s0_15;
+
+#endif /* _IA_CSS_FRAC_H */
diff --git a/drivers/staging/media/atomisp/pci/ia_css_frame_format.h b/drivers/staging/media/atomisp/pci/ia_css_frame_format.h
new file mode 100644
index 000000000000..0cb9c0fbe88c
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/ia_css_frame_format.h
@@ -0,0 +1,93 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __IA_CSS_FRAME_FORMAT_H
+#define __IA_CSS_FRAME_FORMAT_H
+
+/* @file
+ * This file contains information about formats supported in the ISP
+ */
+
+/* Frame formats, some of these come from fourcc.org, others are
+ better explained by video4linux2. The NV11 seems to be described only
+ on MSDN pages, but even those seem to be gone now.
+ Frames can come in many forms, the main categories are RAW, RGB and YUV
+ (or YCbCr). The YUV frames come in 4 flavors, determined by how the U and V
+ values are subsampled:
+ 1. YUV420: hor = 2, ver = 2
+ 2. YUV411: hor = 4, ver = 1
+ 3. YUV422: hor = 2, ver = 1
+ 4. YUV444: hor = 1, ver = 1
+
+ Warning: not all frame formats are supported as input or output to/from ISP.
+ Some of these formats are therefore not defined in the output table module.
+ Modifications in below frame format enum can require modifications in the
+ output table module.
+
+ Warning2: Throughout the CSS code assumptions are made on the order
+ of formats in this enumeration type, or some sort of copy is maintained.
+ The following files are identified:
+ - FileSupport.h
+ - css/isp/kernels/fc/fc_1.0/formats.isp.c
+ - css/isp/kernels/output/output_1.0/output_table.isp.c
+ - css/isp/kernels/output/sc_output_1.0/formats.hive.c
+ - css/isp/modes/interface/isp_formats.isp.h
+ - css/bxt_sandbox/psyspoc/interface/ia_css_pg_info.h
+ - css/bxt_sandbox/psysapi/data/interface/ia_css_program_group_data.h
+ - css/bxt_sandbox/isysapi/interface/ia_css_isysapi_fw_types.h
+*/
+enum ia_css_frame_format {
+ IA_CSS_FRAME_FORMAT_NV11 = 0, /** 12 bit YUV 411, Y, UV plane */
+ IA_CSS_FRAME_FORMAT_NV12, /** 12 bit YUV 420, Y, UV plane */
+ IA_CSS_FRAME_FORMAT_NV12_16, /** 16 bit YUV 420, Y, UV plane */
+ IA_CSS_FRAME_FORMAT_NV12_TILEY, /** 12 bit YUV 420, Intel proprietary tiled format, TileY */
+ IA_CSS_FRAME_FORMAT_NV16, /** 16 bit YUV 422, Y, UV plane */
+ IA_CSS_FRAME_FORMAT_NV21, /** 12 bit YUV 420, Y, VU plane */
+ IA_CSS_FRAME_FORMAT_NV61, /** 16 bit YUV 422, Y, VU plane */
+ IA_CSS_FRAME_FORMAT_YV12, /** 12 bit YUV 420, Y, V, U plane */
+ IA_CSS_FRAME_FORMAT_YV16, /** 16 bit YUV 422, Y, V, U plane */
+ IA_CSS_FRAME_FORMAT_YUV420, /** 12 bit YUV 420, Y, U, V plane */
+ IA_CSS_FRAME_FORMAT_YUV420_16, /** yuv420, 16 bits per subpixel */
+ IA_CSS_FRAME_FORMAT_YUV422, /** 16 bit YUV 422, Y, U, V plane */
+ IA_CSS_FRAME_FORMAT_YUV422_16, /** yuv422, 16 bits per subpixel */
+ IA_CSS_FRAME_FORMAT_UYVY, /** 16 bit YUV 422, UYVY interleaved */
+ IA_CSS_FRAME_FORMAT_YUYV, /** 16 bit YUV 422, YUYV interleaved */
+ IA_CSS_FRAME_FORMAT_YUV444, /** 24 bit YUV 444, Y, U, V plane */
+ IA_CSS_FRAME_FORMAT_YUV_LINE, /** Internal format, 2 y lines followed
+ by a uvinterleaved line */
+ IA_CSS_FRAME_FORMAT_RAW, /** RAW, 1 plane */
+ IA_CSS_FRAME_FORMAT_RGB565, /** 16 bit RGB, 1 plane. Each 3 sub
+ pixels are packed into one 16 bit
+ value, 5 bits for R, 6 bits for G
+ and 5 bits for B. */
+ IA_CSS_FRAME_FORMAT_PLANAR_RGB888, /** 24 bit RGB, 3 planes */
+ IA_CSS_FRAME_FORMAT_RGBA888, /** 32 bit RGBA, 1 plane, A=Alpha
+ (alpha is unused) */
+ IA_CSS_FRAME_FORMAT_QPLANE6, /** Internal, for advanced ISP */
+ IA_CSS_FRAME_FORMAT_BINARY_8, /** byte stream, used for jpeg. For
+ frames of this type, we set the
+ height to 1 and the width to the
+ number of allocated bytes. */
+ IA_CSS_FRAME_FORMAT_MIPI, /** MIPI frame, 1 plane */
+ IA_CSS_FRAME_FORMAT_RAW_PACKED, /** RAW, 1 plane, packed */
+ IA_CSS_FRAME_FORMAT_CSI_MIPI_YUV420_8, /** 8 bit per Y/U/V.
+ Y odd line; UYVY
+ interleaved even line */
+ IA_CSS_FRAME_FORMAT_CSI_MIPI_LEGACY_YUV420_8, /** Legacy YUV420. UY odd
+ line; VY even line */
+ IA_CSS_FRAME_FORMAT_CSI_MIPI_YUV420_10 /** 10 bit per Y/U/V. Y odd
+ line; UYVY interleaved
+ even line */
+};
+
+/* NOTE: IA_CSS_FRAME_FORMAT_NUM was purposely defined outside of enum type ia_css_frame_format, */
+/* because of issues this would cause with the Clockwork code checking tool. */
+#define IA_CSS_FRAME_FORMAT_NUM (IA_CSS_FRAME_FORMAT_CSI_MIPI_YUV420_10 + 1)
+
+/* Number of valid output frame formats for ISP **/
+#define IA_CSS_FRAME_OUT_FORMAT_NUM (IA_CSS_FRAME_FORMAT_RGBA888 + 1)
+
+#endif /* __IA_CSS_FRAME_FORMAT_H */
diff --git a/drivers/staging/media/atomisp/pci/ia_css_frame_public.h b/drivers/staging/media/atomisp/pci/ia_css_frame_public.h
new file mode 100644
index 000000000000..7acfedb541d8
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/ia_css_frame_public.h
@@ -0,0 +1,253 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __IA_CSS_FRAME_PUBLIC_H
+#define __IA_CSS_FRAME_PUBLIC_H
+
+/* @file
+ * This file contains structs to describe various frame-formats supported by the ISP.
+ */
+
+#include <media/videobuf2-v4l2.h>
+#include <type_support.h>
+#include "ia_css_err.h"
+#include "ia_css_types.h"
+#include "ia_css_frame_format.h"
+#include "ia_css_buffer.h"
+
+/* For RAW input, the bayer order needs to be specified separately. There
+ * are 4 possible orders. The name is constructed by taking the first two
+ * colors on the first line and the first two colors from the second line.
+ */
+enum ia_css_bayer_order {
+ IA_CSS_BAYER_ORDER_GRBG, /** GRGRGRGRGR .. BGBGBGBGBG */
+ IA_CSS_BAYER_ORDER_RGGB, /** RGRGRGRGRG .. GBGBGBGBGB */
+ IA_CSS_BAYER_ORDER_BGGR, /** BGBGBGBGBG .. GRGRGRGRGR */
+ IA_CSS_BAYER_ORDER_GBRG, /** GBGBGBGBGB .. RGRGRGRGRG */
+};
+
+#define IA_CSS_BAYER_ORDER_NUM (IA_CSS_BAYER_ORDER_GBRG + 1)
+
+/* Frame plane structure. This describes one plane in an image
+ * frame buffer.
+ */
+struct ia_css_frame_plane {
+ unsigned int height; /** height of a plane in lines */
+ unsigned int width; /** width of a line, in DMA elements, note that
+ for RGB565 the three subpixels are stored in
+ one element. For all other formats this is
+ the number of subpixels per line. */
+ unsigned int stride; /** stride of a line in bytes */
+ unsigned int offset; /** offset in bytes to start of frame data.
+ offset is wrt data field in ia_css_frame */
+};
+
+/* Binary "plane". This is used to story binary streams such as jpeg
+ * images. This is not actually a real plane.
+ */
+struct ia_css_frame_binary_plane {
+ unsigned int size; /** number of bytes in the stream */
+ struct ia_css_frame_plane data; /** plane */
+};
+
+/* Container for planar YUV frames. This contains 3 planes.
+ */
+struct ia_css_frame_yuv_planes {
+ struct ia_css_frame_plane y; /** Y plane */
+ struct ia_css_frame_plane u; /** U plane */
+ struct ia_css_frame_plane v; /** V plane */
+};
+
+/* Container for semi-planar YUV frames.
+ */
+struct ia_css_frame_nv_planes {
+ struct ia_css_frame_plane y; /** Y plane */
+ struct ia_css_frame_plane uv; /** UV plane */
+};
+
+/* Container for planar RGB frames. Each color has its own plane.
+ */
+struct ia_css_frame_rgb_planes {
+ struct ia_css_frame_plane r; /** Red plane */
+ struct ia_css_frame_plane g; /** Green plane */
+ struct ia_css_frame_plane b; /** Blue plane */
+};
+
+/* Container for 6-plane frames. These frames are used internally
+ * in the advanced ISP only.
+ */
+struct ia_css_frame_plane6_planes {
+ struct ia_css_frame_plane r; /** Red plane */
+ struct ia_css_frame_plane r_at_b; /** Red at blue plane */
+ struct ia_css_frame_plane gr; /** Red-green plane */
+ struct ia_css_frame_plane gb; /** Blue-green plane */
+ struct ia_css_frame_plane b; /** Blue plane */
+ struct ia_css_frame_plane b_at_r; /** Blue at red plane */
+};
+
+/* Crop info struct - stores the lines to be cropped in isp */
+struct ia_css_crop_info {
+ /* the final start column and start line
+ * sum of lines to be cropped + bayer offset
+ */
+ unsigned int start_column;
+ unsigned int start_line;
+};
+
+/* Frame info struct. This describes the contents of an image frame buffer.
+ */
+struct ia_css_frame_info {
+ struct ia_css_resolution res; /** Frame resolution (valid data) */
+ unsigned int padded_width; /** stride of line in memory (in pixels) */
+ enum ia_css_frame_format format; /** format of the frame data */
+ unsigned int raw_bit_depth; /** number of valid bits per pixel,
+ only valid for RAW bayer frames */
+ enum ia_css_bayer_order raw_bayer_order; /** bayer order, only valid
+ for RAW bayer frames */
+ /* the params below are computed based on bayer_order
+ * we can remove the raw_bayer_order if it is redundant
+ * keeping it for now as bxt and fpn code seem to use it
+ */
+ struct ia_css_crop_info crop_info;
+};
+
+#define IA_CSS_BINARY_DEFAULT_FRAME_INFO { \
+ .format = IA_CSS_FRAME_FORMAT_NUM, \
+ .raw_bayer_order = IA_CSS_BAYER_ORDER_NUM, \
+}
+
+/**
+ * Specifies the DVS loop delay in "frame periods"
+ */
+enum ia_css_frame_delay {
+ IA_CSS_FRAME_DELAY_0, /** Frame delay = 0 */
+ IA_CSS_FRAME_DELAY_1, /** Frame delay = 1 */
+ IA_CSS_FRAME_DELAY_2 /** Frame delay = 2 */
+};
+
+/* Frame structure. This structure describes an image buffer or frame.
+ * This is the main structure used for all input and output images.
+ */
+struct ia_css_frame {
+ /*
+ * The videobuf2 core will allocate buffers including room for private
+ * data (the rest of struct ia_css_frame). The vb2_v4l2_buffer must
+ * be the first member for this to work!
+ * Note the atomisp code also uses ia_css_frame-s which are not used
+ * as v4l2-buffers in some places. In this case the vb2 member will
+ * be unused.
+ */
+ struct vb2_v4l2_buffer vb;
+ /* List-head for linking into the activeq or buffers_waiting_for_param list */
+ struct list_head queue;
+ struct ia_css_frame_info frame_info; /** info struct describing the frame */
+ ia_css_ptr data; /** pointer to start of image data */
+ unsigned int data_bytes; /** size of image data in bytes */
+ /* LA: move this to ia_css_buffer */
+ /*
+ * -1 if data address is static during life time of pipeline
+ * >=0 if data address can change per pipeline/frame iteration
+ * index to dynamic data: ia_css_frame_in, ia_css_frame_out
+ * ia_css_frame_out_vf
+ * index to host-sp queue id: queue_0, queue_1 etc.
+ */
+ int dynamic_queue_id;
+ /*
+ * if it is dynamic frame, buf_type indicates which buffer type it
+ * should use for event generation. we have this because in vf_pp
+ * binary, we use output port, but we expect VF_OUTPUT_DONE event
+ */
+ enum ia_css_buffer_type buf_type;
+ unsigned int exp_id;
+ /** exposure id, see ia_css_event_public.h for more detail */
+ u32 isp_config_id; /** Unique ID to track which config was actually applied to a particular frame */
+ bool valid; /** First video output frame is not valid */
+ union {
+ unsigned int _initialisation_dummy;
+ struct ia_css_frame_plane raw;
+ struct ia_css_frame_plane rgb;
+ struct ia_css_frame_rgb_planes planar_rgb;
+ struct ia_css_frame_plane yuyv;
+ struct ia_css_frame_yuv_planes yuv;
+ struct ia_css_frame_nv_planes nv;
+ struct ia_css_frame_plane6_planes plane6;
+ struct ia_css_frame_binary_plane binary;
+ } planes; /** frame planes, select the right one based on
+ info.format */
+};
+
+#define vb_to_frame(vb2) \
+ container_of(to_vb2_v4l2_buffer(vb2), struct ia_css_frame, vb)
+
+#define DEFAULT_FRAME { \
+ .frame_info = IA_CSS_BINARY_DEFAULT_FRAME_INFO, \
+ .dynamic_queue_id = SH_CSS_INVALID_QUEUE_ID, \
+ .buf_type = IA_CSS_BUFFER_TYPE_INVALID, \
+}
+
+/* @brief Allocate a CSS frame structure
+ *
+ * @param frame The allocated frame.
+ * @param width The width (in pixels) of the frame.
+ * @param height The height (in lines) of the frame.
+ * @param format The frame format.
+ * @param stride The padded stride, in pixels.
+ * @param raw_bit_depth The raw bit depth, in bits.
+ * @return The error code.
+ *
+ * Allocate a CSS frame structure. The memory for the frame data will be
+ * allocated in the CSS address space.
+ */
+int
+ia_css_frame_allocate(struct ia_css_frame **frame,
+ unsigned int width,
+ unsigned int height,
+ enum ia_css_frame_format format,
+ unsigned int stride,
+ unsigned int raw_bit_depth);
+
+/* @brief Initialize a CSS frame structure using a frame info structure.
+ *
+ * @param frame The allocated frame.
+ * @param[in] info The frame info structure.
+ * @return The error code.
+ *
+ * Initialize a frame using the resolution and format from a frame info struct.
+ */
+int ia_css_frame_init_from_info(struct ia_css_frame *frame,
+ const struct ia_css_frame_info *info);
+
+/* @brief Allocate a CSS frame structure using a frame info structure.
+ *
+ * @param frame The allocated frame.
+ * @param[in] info The frame info structure.
+ * @return The error code.
+ *
+ * Allocate a frame using the resolution and format from a frame info struct.
+ * This is a convenience function, implemented on top of
+ * ia_css_frame_allocate().
+ */
+int
+ia_css_frame_allocate_from_info(struct ia_css_frame **frame,
+ const struct ia_css_frame_info *info);
+/* @brief Free a CSS frame structure.
+ *
+ * @param[in] frame Pointer to the frame.
+ * @return None
+ *
+ * Free a CSS frame structure. This will free both the frame structure
+ * and the pixel data pointer contained within the frame structure.
+ */
+void
+ia_css_frame_free(struct ia_css_frame *frame);
+
+static inline const struct ia_css_frame_info *
+ia_css_frame_get_info(const struct ia_css_frame *frame)
+{
+ return frame ? &frame->frame_info : NULL;
+}
+
+#endif /* __IA_CSS_FRAME_PUBLIC_H */
diff --git a/drivers/staging/media/atomisp/pci/ia_css_host_data.h b/drivers/staging/media/atomisp/pci/ia_css_host_data.h
new file mode 100644
index 000000000000..0e45650cc1ab
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/ia_css_host_data.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Release Version: irci_stable_candrpv_0415_20150521_0458 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __SH_CSS_HOST_DATA_H
+#define __SH_CSS_HOST_DATA_H
+
+#include <ia_css_types.h> /* ia_css_pipe */
+
+/**
+ * @brief Allocate structure ia_css_host_data.
+ *
+ * @param[in] size Size of the requested host data
+ *
+ * @return
+ * - NULL, can't allocate requested size
+ * - pointer to structure, field address points to host data with size bytes
+ */
+struct ia_css_host_data *
+ia_css_host_data_allocate(size_t size);
+
+/**
+ * @brief Free structure ia_css_host_data.
+ *
+ * @param[in] me Pointer to structure, if a NULL is passed functions
+ * returns without error. Otherwise a valid pointer to
+ * structure must be passed and a related memory
+ * is freed.
+ *
+ * @return
+ */
+void ia_css_host_data_free(struct ia_css_host_data *me);
+
+#endif /* __SH_CSS_HOST_DATA_H */
diff --git a/drivers/staging/media/atomisp/pci/ia_css_input_port.h b/drivers/staging/media/atomisp/pci/ia_css_input_port.h
new file mode 100644
index 000000000000..f138dfa8f6b2
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/ia_css_input_port.h
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+/* For MIPI_PORT0_ID to MIPI_PORT2_ID */
+#include "system_global.h"
+
+#ifndef __IA_CSS_INPUT_PORT_H
+#define __IA_CSS_INPUT_PORT_H
+
+/* @file
+ * This file contains information about the possible input ports for CSS
+ */
+
+/* Backward compatible for CSS API 2.0 only
+ * TO BE REMOVED when all drivers move to CSS API 2.1
+ */
+#define IA_CSS_CSI2_PORT_4LANE MIPI_PORT0_ID
+#define IA_CSS_CSI2_PORT_1LANE MIPI_PORT1_ID
+#define IA_CSS_CSI2_PORT_2LANE MIPI_PORT2_ID
+
+/* The CSI2 interface supports 2 types of compression or can
+ * be run without compression.
+ */
+enum ia_css_csi2_compression_type {
+ IA_CSS_CSI2_COMPRESSION_TYPE_NONE, /** No compression */
+ IA_CSS_CSI2_COMPRESSION_TYPE_1, /** Compression scheme 1 */
+ IA_CSS_CSI2_COMPRESSION_TYPE_2 /** Compression scheme 2 */
+};
+
+struct ia_css_csi2_compression {
+ enum ia_css_csi2_compression_type type;
+ /** Compression used */
+ unsigned int compressed_bits_per_pixel;
+ /** Compressed bits per pixel (only when compression is enabled) */
+ unsigned int uncompressed_bits_per_pixel;
+ /** Uncompressed bits per pixel (only when compression is enabled) */
+};
+
+/* Input port structure.
+ */
+struct ia_css_input_port {
+ enum mipi_port_id port; /** Physical CSI-2 port */
+ unsigned int num_lanes; /** Number of lanes used (4-lane port only) */
+ unsigned int timeout; /** Timeout value */
+ unsigned int rxcount; /** Register value, should include all lanes */
+ struct ia_css_csi2_compression compression; /** Compression used */
+};
+
+#endif /* __IA_CSS_INPUT_PORT_H */
diff --git a/drivers/staging/media/atomisp/pci/ia_css_irq.h b/drivers/staging/media/atomisp/pci/ia_css_irq.h
new file mode 100644
index 000000000000..2a4f11f9d785
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/ia_css_irq.h
@@ -0,0 +1,225 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __IA_CSS_IRQ_H
+#define __IA_CSS_IRQ_H
+
+/* @file
+ * This file contains information for Interrupts/IRQs from CSS
+ */
+
+#include "ia_css_err.h"
+#include "ia_css_pipe_public.h"
+#include "ia_css_input_port.h"
+#include <linux/bits.h>
+
+/* Interrupt types, these enumerate all supported interrupt types.
+ */
+enum ia_css_irq_type {
+ IA_CSS_IRQ_TYPE_EDGE, /** Edge (level) sensitive interrupt */
+ IA_CSS_IRQ_TYPE_PULSE /** Pulse-shaped interrupt */
+};
+
+/* Interrupt request type.
+ * When the CSS hardware generates an interrupt, a function in this API
+ * needs to be called to retrieve information about the interrupt.
+ * This interrupt type is part of this information and indicates what
+ * type of information the interrupt signals.
+ *
+ * Note that one interrupt can carry multiple interrupt types. For
+ * example: the online video ISP will generate only 2 interrupts, one to
+ * signal that the statistics (3a and DIS) are ready and one to signal
+ * that all output frames are done (output and viewfinder).
+ *
+ * DEPRECATED, this interface is not portable it should only define user
+ * (SW) interrupts
+ */
+enum ia_css_irq_info {
+ IA_CSS_IRQ_INFO_CSS_RECEIVER_ERROR = BIT(0),
+ /** the css receiver has encountered an error */
+ IA_CSS_IRQ_INFO_CSS_RECEIVER_FIFO_OVERFLOW = BIT(1),
+ /** the FIFO in the csi receiver has overflown */
+ IA_CSS_IRQ_INFO_CSS_RECEIVER_SOF = BIT(2),
+ /** the css receiver received the start of frame */
+ IA_CSS_IRQ_INFO_CSS_RECEIVER_EOF = BIT(3),
+ /** the css receiver received the end of frame */
+ IA_CSS_IRQ_INFO_CSS_RECEIVER_SOL = BIT(4),
+ /** the css receiver received the start of line */
+ IA_CSS_IRQ_INFO_EVENTS_READY = BIT(5),
+ /** One or more events are available in the PSYS event queue */
+ IA_CSS_IRQ_INFO_CSS_RECEIVER_EOL = BIT(6),
+ /** the css receiver received the end of line */
+ IA_CSS_IRQ_INFO_CSS_RECEIVER_SIDEBAND_CHANGED = BIT(7),
+ /** the css receiver received a change in side band signals */
+ IA_CSS_IRQ_INFO_CSS_RECEIVER_GEN_SHORT_0 = BIT(8),
+ /** generic short packets (0) */
+ IA_CSS_IRQ_INFO_CSS_RECEIVER_GEN_SHORT_1 = BIT(9),
+ /** generic short packets (1) */
+ IA_CSS_IRQ_INFO_IF_PRIM_ERROR = BIT(10),
+ /** the primary input formatter (A) has encountered an error */
+ IA_CSS_IRQ_INFO_IF_PRIM_B_ERROR = BIT(11),
+ /** the primary input formatter (B) has encountered an error */
+ IA_CSS_IRQ_INFO_IF_SEC_ERROR = BIT(12),
+ /** the secondary input formatter has encountered an error */
+ IA_CSS_IRQ_INFO_STREAM_TO_MEM_ERROR = BIT(13),
+ /** the stream-to-memory device has encountered an error */
+ IA_CSS_IRQ_INFO_SW_0 = BIT(14),
+ /** software interrupt 0 */
+ IA_CSS_IRQ_INFO_SW_1 = BIT(15),
+ /** software interrupt 1 */
+ IA_CSS_IRQ_INFO_SW_2 = BIT(16),
+ /** software interrupt 2 */
+ IA_CSS_IRQ_INFO_ISP_BINARY_STATISTICS_READY = BIT(17),
+ /** ISP binary statistics are ready */
+ IA_CSS_IRQ_INFO_INPUT_SYSTEM_ERROR = BIT(18),
+ /** the input system is in error */
+ IA_CSS_IRQ_INFO_IF_ERROR = BIT(19),
+ /** the input formatter is in error */
+ IA_CSS_IRQ_INFO_DMA_ERROR = BIT(20),
+ /** the dma is in error */
+ IA_CSS_IRQ_INFO_ISYS_EVENTS_READY = BIT(21),
+ /** end-of-frame events are ready in the isys_event queue */
+};
+
+/* CSS receiver error types. Whenever the CSS receiver has encountered
+ * an error, this enumeration is used to indicate which errors have occurred.
+ *
+ * Note that multiple error flags can be enabled at once and that this is in
+ * fact common (whenever an error occurs, it usually results in multiple
+ * errors).
+ *
+ * DEPRECATED: This interface is not portable, different systems have
+ * different receiver types, or possibly none in case of tests systems.
+ */
+enum ia_css_rx_irq_info {
+ IA_CSS_RX_IRQ_INFO_BUFFER_OVERRUN = BIT(0), /** buffer overrun */
+ IA_CSS_RX_IRQ_INFO_ENTER_SLEEP_MODE = BIT(1), /** entering sleep mode */
+ IA_CSS_RX_IRQ_INFO_EXIT_SLEEP_MODE = BIT(2), /** exited sleep mode */
+ IA_CSS_RX_IRQ_INFO_ECC_CORRECTED = BIT(3), /** ECC corrected */
+ IA_CSS_RX_IRQ_INFO_ERR_SOT = BIT(4),
+ /** Start of transmission */
+ IA_CSS_RX_IRQ_INFO_ERR_SOT_SYNC = BIT(5), /** SOT sync (??) */
+ IA_CSS_RX_IRQ_INFO_ERR_CONTROL = BIT(6), /** Control (??) */
+ IA_CSS_RX_IRQ_INFO_ERR_ECC_DOUBLE = BIT(7), /** Double ECC */
+ IA_CSS_RX_IRQ_INFO_ERR_CRC = BIT(8), /** CRC error */
+ IA_CSS_RX_IRQ_INFO_ERR_UNKNOWN_ID = BIT(9), /** Unknown ID */
+ IA_CSS_RX_IRQ_INFO_ERR_FRAME_SYNC = BIT(10), /** Frame sync error */
+ IA_CSS_RX_IRQ_INFO_ERR_FRAME_DATA = BIT(11), /** Frame data error */
+ IA_CSS_RX_IRQ_INFO_ERR_DATA_TIMEOUT = BIT(12), /** Timeout occurred */
+ IA_CSS_RX_IRQ_INFO_ERR_UNKNOWN_ESC = BIT(13), /** Unknown escape seq. */
+ IA_CSS_RX_IRQ_INFO_ERR_LINE_SYNC = BIT(14), /** Line Sync error */
+ IA_CSS_RX_IRQ_INFO_INIT_TIMEOUT = BIT(15),
+};
+
+/* Interrupt info structure. This structure contains information about an
+ * interrupt. This needs to be used after an interrupt is received on the IA
+ * to perform the correct action.
+ */
+struct ia_css_irq {
+ enum ia_css_irq_info type; /** Interrupt type. */
+ unsigned int sw_irq_0_val; /** In case of SW interrupt 0, value. */
+ unsigned int sw_irq_1_val; /** In case of SW interrupt 1, value. */
+ unsigned int sw_irq_2_val; /** In case of SW interrupt 2, value. */
+ struct ia_css_pipe *pipe;
+ /** The image pipe that generated the interrupt. */
+};
+
+/* @brief Obtain interrupt information.
+ *
+ * @param[out] info Pointer to the interrupt info. The interrupt
+ * information wil be written to this info.
+ * @return If an error is encountered during the interrupt info
+ * and no interrupt could be translated successfully, this
+ * will return IA_CSS_INTERNAL_ERROR. Otherwise
+ * 0.
+ *
+ * This function is expected to be executed after an interrupt has been sent
+ * to the IA from the CSS. This function returns information about the interrupt
+ * which is needed by the IA code to properly handle the interrupt. This
+ * information includes the image pipe, buffer type etc.
+ */
+int
+ia_css_irq_translate(unsigned int *info);
+
+/* @brief Get CSI receiver error info.
+ *
+ * @param[out] irq_bits Pointer to the interrupt bits. The interrupt
+ * bits will be written this info.
+ * This will be the error bits that are enabled in the CSI
+ * receiver error register.
+ * @return None
+ *
+ * This function should be used whenever a CSI receiver error interrupt is
+ * generated. It provides the detailed information (bits) on the exact error
+ * that occurred.
+ *
+ *@deprecated {this function is DEPRECATED since it only works on CSI port 1.
+ * Use the function below instead and specify the appropriate port.}
+ */
+void
+ia_css_rx_get_irq_info(unsigned int *irq_bits);
+
+/* @brief Get CSI receiver error info.
+ *
+ * @param[in] port Input port identifier.
+ * @param[out] irq_bits Pointer to the interrupt bits. The interrupt
+ * bits will be written this info.
+ * This will be the error bits that are enabled in the CSI
+ * receiver error register.
+ * @return None
+ *
+ * This function should be used whenever a CSI receiver error interrupt is
+ * generated. It provides the detailed information (bits) on the exact error
+ * that occurred.
+ */
+void
+ia_css_rx_port_get_irq_info(enum mipi_port_id port, unsigned int *irq_bits);
+
+/* @brief Clear CSI receiver error info.
+ *
+ * @param[in] irq_bits The bits that should be cleared from the CSI receiver
+ * interrupt bits register.
+ * @return None
+ *
+ * This function should be called after ia_css_rx_get_irq_info has been called
+ * and the error bits have been interpreted. It is advised to use the return
+ * value of that function as the argument to this function to make sure no new
+ * error bits get overwritten.
+ *
+ * @deprecated{this function is DEPRECATED since it only works on CSI port 1.
+ * Use the function below instead and specify the appropriate port.}
+ */
+void
+ia_css_rx_clear_irq_info(unsigned int irq_bits);
+
+/* @brief Clear CSI receiver error info.
+ *
+ * @param[in] port Input port identifier.
+ * @param[in] irq_bits The bits that should be cleared from the CSI receiver
+ * interrupt bits register.
+ * @return None
+ *
+ * This function should be called after ia_css_rx_get_irq_info has been called
+ * and the error bits have been interpreted. It is advised to use the return
+ * value of that function as the argument to this function to make sure no new
+ * error bits get overwritten.
+ */
+void
+ia_css_rx_port_clear_irq_info(enum mipi_port_id port, unsigned int irq_bits);
+
+/* @brief Enable or disable specific interrupts.
+ *
+ * @param[in] type The interrupt type that will be enabled/disabled.
+ * @param[in] enable enable or disable.
+ * @return Returns IA_CSS_INTERNAL_ERROR if this interrupt
+ * type cannot be enabled/disabled which is true for
+ * CSS internal interrupts. Otherwise returns
+ * 0.
+ */
+int
+ia_css_irq_enable(enum ia_css_irq_info type, bool enable);
+
+#endif /* __IA_CSS_IRQ_H */
diff --git a/drivers/staging/media/atomisp/pci/ia_css_isp_configs.c b/drivers/staging/media/atomisp/pci/ia_css_isp_configs.c
new file mode 100644
index 000000000000..38c9c62366d6
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/ia_css_isp_configs.c
@@ -0,0 +1,312 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+/* Generated code: do not edit or commmit. */
+
+#define IA_CSS_INCLUDE_CONFIGURATIONS
+#include "ia_css_pipeline.h"
+#include "ia_css_isp_configs.h"
+#include "ia_css_debug.h"
+#include "assert_support.h"
+
+int ia_css_configure_iterator(const struct ia_css_binary *binary,
+ const struct ia_css_iterator_configuration *config_dmem)
+{
+ unsigned int offset = 0;
+ unsigned int size = 0;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "%s:\n", __func__);
+
+ if (!binary->info->mem_offsets.offsets.config)
+ return 0;
+
+ size = binary->info->mem_offsets.offsets.config->dmem.iterator.size;
+ if (!size)
+ return 0;
+
+ offset = binary->info->mem_offsets.offsets.config->dmem.iterator.offset;
+
+ ia_css_iterator_config((struct sh_css_isp_iterator_isp_config *)
+ &binary->mem_params.params[IA_CSS_PARAM_CLASS_CONFIG][IA_CSS_ISP_DMEM].address[offset],
+ config_dmem, size);
+ return 0;
+}
+
+int ia_css_configure_copy_output(const struct ia_css_binary *binary,
+ const struct ia_css_copy_output_configuration *config_dmem)
+{
+ unsigned int offset = 0;
+ unsigned int size = 0;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "%s:\n", __func__);
+
+ if (!binary->info->mem_offsets.offsets.config)
+ return 0;
+
+ size = binary->info->mem_offsets.offsets.config->dmem.copy_output.size;
+ if (!size)
+ return 0;
+
+ offset = binary->info->mem_offsets.offsets.config->dmem.copy_output.offset;
+
+ ia_css_copy_output_config((struct sh_css_isp_copy_output_isp_config *)
+ &binary->mem_params.params[IA_CSS_PARAM_CLASS_CONFIG][IA_CSS_ISP_DMEM].address[offset],
+ config_dmem, size);
+ return 0;
+}
+
+/* Code generated by genparam/genconfig.c:gen_configure_function() */
+
+int ia_css_configure_crop(const struct ia_css_binary *binary,
+ const struct ia_css_crop_configuration *config_dmem)
+{
+ unsigned int offset = 0;
+ unsigned int size = 0;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "%s:\n", __func__);
+
+ if (!binary->info->mem_offsets.offsets.config)
+ return 0;
+
+ size = binary->info->mem_offsets.offsets.config->dmem.crop.size;
+ if (!size)
+ return 0;
+
+ offset = binary->info->mem_offsets.offsets.config->dmem.crop.offset;
+
+ ia_css_crop_config((struct sh_css_isp_crop_isp_config *)
+ &binary->mem_params.params[IA_CSS_PARAM_CLASS_CONFIG][IA_CSS_ISP_DMEM].address[offset],
+ config_dmem, size);
+ return 0;
+}
+
+int ia_css_configure_fpn(const struct ia_css_binary *binary,
+ const struct ia_css_fpn_configuration *config_dmem)
+{
+ unsigned int offset = 0;
+ unsigned int size = 0;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "%s:\n", __func__);
+
+ if (!binary->info->mem_offsets.offsets.config)
+ return 0;
+
+ size = binary->info->mem_offsets.offsets.config->dmem.fpn.size;
+ if (!size)
+ return 0;
+
+ offset = binary->info->mem_offsets.offsets.config->dmem.fpn.offset;
+ ia_css_fpn_config((struct sh_css_isp_fpn_isp_config *)
+ &binary->mem_params.params[IA_CSS_PARAM_CLASS_CONFIG][IA_CSS_ISP_DMEM].address[offset],
+ config_dmem, size);
+ return 0;
+}
+
+int ia_css_configure_dvs(const struct ia_css_binary *binary,
+ const struct ia_css_dvs_configuration *config_dmem)
+{
+ unsigned int offset = 0;
+ unsigned int size = 0;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "%s:\n", __func__);
+
+ if (!binary->info->mem_offsets.offsets.config)
+ return 0;
+
+ size = binary->info->mem_offsets.offsets.config->dmem.dvs.size;
+ if (!size)
+ return 0;
+
+ offset = binary->info->mem_offsets.offsets.config->dmem.dvs.offset;
+ ia_css_dvs_config((struct sh_css_isp_dvs_isp_config *)
+ &binary->mem_params.params[IA_CSS_PARAM_CLASS_CONFIG][IA_CSS_ISP_DMEM].address[offset],
+ config_dmem, size);
+ return 0;
+}
+
+int ia_css_configure_qplane(const struct ia_css_binary *binary,
+ const struct ia_css_qplane_configuration *config_dmem)
+{
+ unsigned int offset = 0;
+ unsigned int size = 0;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "%s:\n", __func__);
+
+ if (!binary->info->mem_offsets.offsets.config)
+ return 0;
+
+ size = binary->info->mem_offsets.offsets.config->dmem.qplane.size;
+ if (!size)
+ return 0;
+
+ offset = binary->info->mem_offsets.offsets.config->dmem.qplane.offset;
+ ia_css_qplane_config((struct sh_css_isp_qplane_isp_config *)
+ &binary->mem_params.params[IA_CSS_PARAM_CLASS_CONFIG][IA_CSS_ISP_DMEM].address[offset],
+ config_dmem, size);
+
+ return 0;
+}
+
+int ia_css_configure_output0(const struct ia_css_binary *binary,
+ const struct ia_css_output0_configuration *config_dmem)
+{
+ unsigned int offset = 0;
+ unsigned int size = 0;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "%s:\n", __func__);
+
+ if (!binary->info->mem_offsets.offsets.config)
+ return 0;
+
+ size = binary->info->mem_offsets.offsets.config->dmem.output0.size;
+ if (!size)
+ return 0;
+
+ offset = binary->info->mem_offsets.offsets.config->dmem.output0.offset;
+
+ ia_css_output0_config((struct sh_css_isp_output_isp_config *)
+ &binary->mem_params.params[IA_CSS_PARAM_CLASS_CONFIG][IA_CSS_ISP_DMEM].address[offset],
+ config_dmem, size);
+ return 0;
+}
+
+int ia_css_configure_output1(const struct ia_css_binary *binary,
+ const struct ia_css_output1_configuration *config_dmem)
+{
+ unsigned int offset = 0;
+ unsigned int size = 0;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "%s:\n", __func__);
+
+ if (!binary->info->mem_offsets.offsets.config)
+ return 0;
+
+ size = binary->info->mem_offsets.offsets.config->dmem.output1.size;
+ if (!size)
+ return 0;
+
+ offset = binary->info->mem_offsets.offsets.config->dmem.output1.offset;
+
+ ia_css_output1_config((struct sh_css_isp_output_isp_config *)
+ &binary->mem_params.params[IA_CSS_PARAM_CLASS_CONFIG][IA_CSS_ISP_DMEM].address[offset],
+ config_dmem, size);
+ return 0;
+}
+
+int ia_css_configure_output(const struct ia_css_binary *binary,
+ const struct ia_css_output_configuration *config_dmem)
+{
+ unsigned int offset = 0;
+ unsigned int size = 0;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "%s:\n", __func__);
+
+ if (!binary->info->mem_offsets.offsets.config)
+ return 0;
+
+ size = binary->info->mem_offsets.offsets.config->dmem.output.size;
+ if (!size)
+ return 0;
+
+ offset = binary->info->mem_offsets.offsets.config->dmem.output.offset;
+
+ ia_css_output_config((struct sh_css_isp_output_isp_config *)
+ &binary->mem_params.params[IA_CSS_PARAM_CLASS_CONFIG][IA_CSS_ISP_DMEM].address[offset],
+ config_dmem, size);
+ return 0;
+}
+
+int ia_css_configure_raw(const struct ia_css_binary *binary,
+ const struct ia_css_raw_configuration *config_dmem)
+{
+ unsigned int offset = 0;
+ unsigned int size = 0;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "%s:\n", __func__);
+
+ if (!binary->info->mem_offsets.offsets.config)
+ return 0;
+
+ size = binary->info->mem_offsets.offsets.config->dmem.raw.size;
+ if (!size)
+ return 0;
+
+ offset = binary->info->mem_offsets.offsets.config->dmem.raw.offset;
+
+ ia_css_raw_config((struct sh_css_isp_raw_isp_config *)
+ &binary->mem_params.params[IA_CSS_PARAM_CLASS_CONFIG][IA_CSS_ISP_DMEM].address[offset],
+ config_dmem, size);
+ return 0;
+}
+
+int ia_css_configure_tnr(const struct ia_css_binary *binary,
+ const struct ia_css_tnr_configuration *config_dmem)
+{
+ unsigned int offset = 0;
+ unsigned int size = 0;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "%s:\n", __func__);
+
+ if (!binary->info->mem_offsets.offsets.config)
+ return 0;
+
+ size = binary->info->mem_offsets.offsets.config->dmem.tnr.size;
+ if (!size)
+ return 0;
+
+ offset = binary->info->mem_offsets.offsets.config->dmem.tnr.offset;
+
+ ia_css_tnr_config((struct sh_css_isp_tnr_isp_config *)
+ &binary->mem_params.params[IA_CSS_PARAM_CLASS_CONFIG][IA_CSS_ISP_DMEM].address[offset],
+ config_dmem, size);
+ return 0;
+}
+
+int ia_css_configure_ref(const struct ia_css_binary *binary,
+ const struct ia_css_ref_configuration *config_dmem)
+{
+ unsigned int offset = 0;
+ unsigned int size = 0;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "%s:\n", __func__);
+
+ if (!binary->info->mem_offsets.offsets.config)
+ return 0;
+
+ size = binary->info->mem_offsets.offsets.config->dmem.ref.size;
+ if (!size)
+ return 0;
+
+ offset = binary->info->mem_offsets.offsets.config->dmem.ref.offset;
+
+ ia_css_ref_config((struct sh_css_isp_ref_isp_config *)
+ &binary->mem_params.params[IA_CSS_PARAM_CLASS_CONFIG][IA_CSS_ISP_DMEM].address[offset],
+ config_dmem, size);
+ return 0;
+}
+
+int ia_css_configure_vf(const struct ia_css_binary *binary,
+ const struct ia_css_vf_configuration *config_dmem)
+{
+ unsigned int offset = 0;
+ unsigned int size = 0;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "%s:\n", __func__);
+
+ if (!binary->info->mem_offsets.offsets.config)
+ return 0;
+
+ size = binary->info->mem_offsets.offsets.config->dmem.vf.size;
+ if (!size)
+ return 0;
+
+ offset = binary->info->mem_offsets.offsets.config->dmem.vf.offset;
+
+ ia_css_vf_config((struct sh_css_isp_vf_isp_config *)
+ &binary->mem_params.params[IA_CSS_PARAM_CLASS_CONFIG][IA_CSS_ISP_DMEM].address[offset],
+ config_dmem, size);
+ return 0;
+}
diff --git a/drivers/staging/media/atomisp/pci/ia_css_isp_configs.h b/drivers/staging/media/atomisp/pci/ia_css_isp_configs.h
new file mode 100644
index 000000000000..226902d2100b
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/ia_css_isp_configs.h
@@ -0,0 +1,110 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifdef IA_CSS_INCLUDE_CONFIGURATIONS
+#include "isp/kernels/crop/crop_1.0/ia_css_crop.host.h"
+#include "isp/kernels/dvs/dvs_1.0/ia_css_dvs.host.h"
+#include "isp/kernels/fpn/fpn_1.0/ia_css_fpn.host.h"
+#include "isp/kernels/ob/ob_1.0/ia_css_ob.host.h"
+#include "isp/kernels/output/output_1.0/ia_css_output.host.h"
+#include "isp/kernels/qplane/qplane_2/ia_css_qplane.host.h"
+#include "isp/kernels/raw/raw_1.0/ia_css_raw.host.h"
+#include "isp/kernels/ref/ref_1.0/ia_css_ref.host.h"
+#include "isp/kernels/s3a/s3a_1.0/ia_css_s3a.host.h"
+#include "isp/kernels/tnr/tnr_1.0/ia_css_tnr.host.h"
+#include "isp/kernels/vf/vf_1.0/ia_css_vf.host.h"
+#include "isp/kernels/iterator/iterator_1.0/ia_css_iterator.host.h"
+#include "isp/kernels/copy_output/copy_output_1.0/ia_css_copy_output.host.h"
+#endif
+
+#ifndef _IA_CSS_ISP_CONFIG_H
+#define _IA_CSS_ISP_CONFIG_H
+
+enum ia_css_configuration_ids {
+ IA_CSS_ITERATOR_CONFIG_ID,
+ IA_CSS_COPY_OUTPUT_CONFIG_ID,
+ IA_CSS_CROP_CONFIG_ID,
+ IA_CSS_FPN_CONFIG_ID,
+ IA_CSS_DVS_CONFIG_ID,
+ IA_CSS_QPLANE_CONFIG_ID,
+ IA_CSS_OUTPUT0_CONFIG_ID,
+ IA_CSS_OUTPUT1_CONFIG_ID,
+ IA_CSS_OUTPUT_CONFIG_ID,
+ IA_CSS_RAW_CONFIG_ID,
+ IA_CSS_TNR_CONFIG_ID,
+ IA_CSS_REF_CONFIG_ID,
+ IA_CSS_VF_CONFIG_ID,
+
+ /* ISP 2401 */
+ IA_CSS_SC_CONFIG_ID,
+
+ IA_CSS_NUM_CONFIGURATION_IDS
+};
+
+struct ia_css_config_memory_offsets {
+ struct {
+ struct ia_css_isp_parameter iterator;
+ struct ia_css_isp_parameter copy_output;
+ struct ia_css_isp_parameter crop;
+ struct ia_css_isp_parameter fpn;
+ struct ia_css_isp_parameter dvs;
+ struct ia_css_isp_parameter qplane;
+ struct ia_css_isp_parameter output0;
+ struct ia_css_isp_parameter output1;
+ struct ia_css_isp_parameter output;
+ struct ia_css_isp_parameter raw;
+ struct ia_css_isp_parameter tnr;
+ struct ia_css_isp_parameter ref;
+ struct ia_css_isp_parameter vf;
+ } dmem;
+};
+
+#if defined(IA_CSS_INCLUDE_CONFIGURATIONS)
+
+#include "ia_css_stream.h" /* struct ia_css_stream */
+#include "ia_css_binary.h" /* struct ia_css_binary */
+
+int ia_css_configure_iterator(const struct ia_css_binary *binary,
+ const struct ia_css_iterator_configuration *config_dmem);
+
+int ia_css_configure_copy_output(const struct ia_css_binary *binary,
+ const struct ia_css_copy_output_configuration *config_dmem);
+
+int ia_css_configure_crop(const struct ia_css_binary *binary,
+ const struct ia_css_crop_configuration *config_dmem);
+
+int ia_css_configure_fpn(const struct ia_css_binary *binary,
+ const struct ia_css_fpn_configuration *config_dmem);
+
+int ia_css_configure_dvs(const struct ia_css_binary *binary,
+ const struct ia_css_dvs_configuration *config_dmem);
+
+int ia_css_configure_qplane(const struct ia_css_binary *binary,
+ const struct ia_css_qplane_configuration *config_dmem);
+int ia_css_configure_output0(const struct ia_css_binary *binary,
+ const struct ia_css_output0_configuration *config_dmem);
+
+int ia_css_configure_output1(const struct ia_css_binary *binary,
+ const struct ia_css_output1_configuration *config_dmem);
+
+int ia_css_configure_output(const struct ia_css_binary *binary,
+ const struct ia_css_output_configuration *config_dmem);
+
+int ia_css_configure_raw(const struct ia_css_binary *binary,
+ const struct ia_css_raw_configuration *config_dmem);
+
+int ia_css_configure_tnr(const struct ia_css_binary *binary,
+ const struct ia_css_tnr_configuration *config_dmem);
+
+int ia_css_configure_ref(const struct ia_css_binary *binary,
+ const struct ia_css_ref_configuration *config_dmem);
+
+int ia_css_configure_vf(const struct ia_css_binary *binary,
+ const struct ia_css_vf_configuration *config_dmem);
+
+#endif /* IA_CSS_INCLUDE_CONFIGURATION */
+
+#endif /* _IA_CSS_ISP_CONFIG_H */
diff --git a/drivers/staging/media/atomisp/pci/ia_css_isp_params.c b/drivers/staging/media/atomisp/pci/ia_css_isp_params.c
new file mode 100644
index 000000000000..1cd3322b0da0
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/ia_css_isp_params.c
@@ -0,0 +1,3335 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#define IA_CSS_INCLUDE_PARAMETERS
+#include "sh_css_params.h"
+#include "isp/kernels/aa/aa_2/ia_css_aa2.host.h"
+#include "isp/kernels/anr/anr_1.0/ia_css_anr.host.h"
+#include "isp/kernels/anr/anr_2/ia_css_anr2.host.h"
+#include "isp/kernels/bh/bh_2/ia_css_bh.host.h"
+#include "isp/kernels/bnr/bnr_1.0/ia_css_bnr.host.h"
+#include "isp/kernels/bnr/bnr2_2/ia_css_bnr2_2.host.h"
+#include "isp/kernels/cnr/cnr_2/ia_css_cnr2.host.h"
+#include "isp/kernels/crop/crop_1.0/ia_css_crop.host.h"
+#include "isp/kernels/csc/csc_1.0/ia_css_csc.host.h"
+#include "isp/kernels/ctc/ctc_1.0/ia_css_ctc.host.h"
+#include "isp/kernels/ctc/ctc1_5/ia_css_ctc1_5.host.h"
+#include "isp/kernels/ctc/ctc2/ia_css_ctc2.host.h"
+#include "isp/kernels/de/de_1.0/ia_css_de.host.h"
+#include "isp/kernels/de/de_2/ia_css_de2.host.h"
+#include "isp/kernels/dp/dp_1.0/ia_css_dp.host.h"
+#include "isp/kernels/fixedbds/fixedbds_1.0/ia_css_fixedbds_param.h"
+#include "isp/kernels/fpn/fpn_1.0/ia_css_fpn.host.h"
+#include "isp/kernels/gc/gc_1.0/ia_css_gc.host.h"
+#include "isp/kernels/gc/gc_2/ia_css_gc2.host.h"
+#include "isp/kernels/macc/macc_1.0/ia_css_macc.host.h"
+#include "isp/kernels/macc/macc1_5/ia_css_macc1_5.host.h"
+#include "isp/kernels/ob/ob_1.0/ia_css_ob.host.h"
+#include "isp/kernels/ob/ob2/ia_css_ob2.host.h"
+#include "isp/kernels/output/output_1.0/ia_css_output.host.h"
+#include "isp/kernels/raw_aa_binning/raw_aa_binning_1.0/ia_css_raa.host.h"
+#include "isp/kernels/s3a/s3a_1.0/ia_css_s3a.host.h"
+#include "isp/kernels/sc/sc_1.0/ia_css_sc.host.h"
+#include "isp/kernels/sdis/sdis_1.0/ia_css_sdis.host.h"
+#include "isp/kernels/sdis/sdis_2/ia_css_sdis2.host.h"
+#include "isp/kernels/tnr/tnr_1.0/ia_css_tnr.host.h"
+#include "isp/kernels/uds/uds_1.0/ia_css_uds_param.h"
+#include "isp/kernels/wb/wb_1.0/ia_css_wb.host.h"
+#include "isp/kernels/xnr/xnr_1.0/ia_css_xnr.host.h"
+#include "isp/kernels/xnr/xnr_3.0/ia_css_xnr3.host.h"
+#include "isp/kernels/ynr/ynr_1.0/ia_css_ynr.host.h"
+#include "isp/kernels/ynr/ynr_2/ia_css_ynr2.host.h"
+#include "isp/kernels/fc/fc_1.0/ia_css_formats.host.h"
+#include "isp/kernels/tdf/tdf_1.0/ia_css_tdf.host.h"
+#include "isp/kernels/dpc2/ia_css_dpc2.host.h"
+#include "isp/kernels/eed1_8/ia_css_eed1_8.host.h"
+#include "isp/kernels/bnlm/ia_css_bnlm.host.h"
+#include "isp/kernels/conversion/conversion_1.0/ia_css_conversion.host.h"
+/* Generated code: do not edit or commmit. */
+
+#include "ia_css_pipeline.h"
+#include "ia_css_isp_params.h"
+#include "ia_css_debug.h"
+#include "assert_support.h"
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_aa(
+ unsigned int pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ unsigned int size =
+ stage->binary->info->mem_offsets.offsets.param->dmem.aa.size;
+ unsigned int offset =
+ stage->binary->info->mem_offsets.offsets.param->dmem.aa.offset;
+
+ if (size) {
+ struct sh_css_isp_aa_params *t = (struct sh_css_isp_aa_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset];
+ t->strength = params->aa_config.strength;
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_anr(
+ unsigned int pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params);
+
+ {
+ unsigned int size =
+ stage->binary->info->mem_offsets.offsets.param->dmem.anr.size;
+
+ unsigned int offset =
+ stage->binary->info->mem_offsets.offsets.param->dmem.anr.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_process_anr() enter:\n");
+
+ ia_css_anr_encode((struct sh_css_isp_anr_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
+ &params->anr_config,
+ size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] =
+ true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_process_anr() leave:\n");
+ }
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_anr2(
+ unsigned int pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params);
+
+ {
+ unsigned int size =
+ stage->binary->info->mem_offsets.offsets.param->vmem.anr2.size;
+
+ unsigned int offset =
+ stage->binary->info->mem_offsets.offsets.param->vmem.anr2.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_process_anr2() enter:\n");
+
+ ia_css_anr2_vmem_encode((struct ia_css_isp_anr2_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_VMEM].address[offset],
+ &params->anr_thres,
+ size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_VMEM] =
+ true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_process_anr2() leave:\n");
+ }
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_bh(
+ unsigned int pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params);
+
+ {
+ unsigned int size =
+ stage->binary->info->mem_offsets.offsets.param->dmem.bh.size;
+
+ unsigned int offset =
+ stage->binary->info->mem_offsets.offsets.param->dmem.bh.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_bh() enter:\n");
+
+ ia_css_bh_encode((struct sh_css_isp_bh_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
+ &params->s3a_config,
+ size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] =
+ true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_bh() leave:\n");
+ }
+ }
+ {
+ unsigned int size =
+ stage->binary->info->mem_offsets.offsets.param->hmem0.bh.size;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_bh() enter:\n");
+
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_HMEM0] =
+ true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_bh() leave:\n");
+ }
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_cnr(
+ unsigned int pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params);
+
+ {
+ unsigned int size =
+ stage->binary->info->mem_offsets.offsets.param->dmem.cnr.size;
+
+ unsigned int offset =
+ stage->binary->info->mem_offsets.offsets.param->dmem.cnr.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_process_cnr() enter:\n");
+
+ ia_css_cnr_encode((struct sh_css_isp_cnr_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
+ &params->cnr_config,
+ size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] =
+ true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_process_cnr() leave:\n");
+ }
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_crop(
+ unsigned int pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params);
+
+ {
+ unsigned int size =
+ stage->binary->info->mem_offsets.offsets.param->dmem.crop.size;
+
+ unsigned int offset =
+ stage->binary->info->mem_offsets.offsets.param->dmem.crop.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_process_crop() enter:\n");
+
+ ia_css_crop_encode((struct sh_css_isp_crop_isp_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
+ &params->crop_config,
+ size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] =
+ true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_process_crop() leave:\n");
+ }
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_csc(
+ unsigned int pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params);
+
+ {
+ unsigned int size =
+ stage->binary->info->mem_offsets.offsets.param->dmem.csc.size;
+
+ unsigned int offset =
+ stage->binary->info->mem_offsets.offsets.param->dmem.csc.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_process_csc() enter:\n");
+
+ ia_css_csc_encode((struct sh_css_isp_csc_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
+ &params->cc_config,
+ size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] =
+ true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_process_csc() leave:\n");
+ }
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_dp(
+ unsigned int pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params);
+
+ {
+ unsigned int size =
+ stage->binary->info->mem_offsets.offsets.param->dmem.dp.size;
+
+ unsigned int offset =
+ stage->binary->info->mem_offsets.offsets.param->dmem.dp.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_dp() enter:\n");
+
+ ia_css_dp_encode((struct sh_css_isp_dp_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
+ &params->dp_config,
+ size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] =
+ true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_dp() leave:\n");
+ }
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_bnr(
+ unsigned int pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params);
+
+ {
+ unsigned int size =
+ stage->binary->info->mem_offsets.offsets.param->dmem.bnr.size;
+
+ unsigned int offset =
+ stage->binary->info->mem_offsets.offsets.param->dmem.bnr.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_process_bnr() enter:\n");
+
+ ia_css_bnr_encode((struct sh_css_isp_bnr_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
+ &params->nr_config,
+ size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] =
+ true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_process_bnr() leave:\n");
+ }
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_de(
+ unsigned int pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params);
+
+ {
+ unsigned int size =
+ stage->binary->info->mem_offsets.offsets.param->dmem.de.size;
+
+ unsigned int offset =
+ stage->binary->info->mem_offsets.offsets.param->dmem.de.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_de() enter:\n");
+
+ ia_css_de_encode((struct sh_css_isp_de_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
+ &params->de_config,
+ size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] =
+ true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_de() leave:\n");
+ }
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_ecd(
+ unsigned int pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params);
+
+ {
+ unsigned int size =
+ stage->binary->info->mem_offsets.offsets.param->dmem.ecd.size;
+
+ unsigned int offset =
+ stage->binary->info->mem_offsets.offsets.param->dmem.ecd.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_process_ecd() enter:\n");
+
+ ia_css_ecd_encode((struct sh_css_isp_ecd_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
+ &params->ecd_config,
+ size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] =
+ true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_process_ecd() leave:\n");
+ }
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_formats(
+ unsigned int pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params);
+
+ {
+ unsigned int size =
+ stage->binary->info->mem_offsets.offsets.param->dmem.formats.size;
+
+ unsigned int offset =
+ stage->binary->info->mem_offsets.offsets.param->dmem.formats.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_process_formats() enter:\n");
+
+ ia_css_formats_encode((struct sh_css_isp_formats_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
+ &params->formats_config,
+ size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] =
+ true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_process_formats() leave:\n");
+ }
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_fpn(
+ unsigned int pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params);
+
+ {
+ unsigned int size =
+ stage->binary->info->mem_offsets.offsets.param->dmem.fpn.size;
+
+ unsigned int offset =
+ stage->binary->info->mem_offsets.offsets.param->dmem.fpn.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_process_fpn() enter:\n");
+
+ ia_css_fpn_encode((struct sh_css_isp_fpn_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
+ &params->fpn_config,
+ size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] =
+ true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_process_fpn() leave:\n");
+ }
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_gc(
+ unsigned int pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params);
+
+ {
+ unsigned int size =
+ stage->binary->info->mem_offsets.offsets.param->dmem.gc.size;
+
+ unsigned int offset =
+ stage->binary->info->mem_offsets.offsets.param->dmem.gc.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_gc() enter:\n");
+
+ ia_css_gc_encode((struct sh_css_isp_gc_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
+ &params->gc_config,
+ size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] =
+ true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_gc() leave:\n");
+ }
+ }
+ {
+ unsigned int size =
+ stage->binary->info->mem_offsets.offsets.param->vamem1.gc.size;
+
+ unsigned int offset =
+ stage->binary->info->mem_offsets.offsets.param->vamem1.gc.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_gc() enter:\n");
+
+ ia_css_gc_vamem_encode((struct sh_css_isp_gc_vamem_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_VAMEM1].address[offset],
+ &params->gc_table,
+ size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_VAMEM1] =
+ true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_gc() leave:\n");
+ }
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_ce(
+ unsigned int pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params);
+
+ {
+ unsigned int size =
+ stage->binary->info->mem_offsets.offsets.param->dmem.ce.size;
+
+ unsigned int offset =
+ stage->binary->info->mem_offsets.offsets.param->dmem.ce.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_ce() enter:\n");
+
+ ia_css_ce_encode((struct sh_css_isp_ce_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
+ &params->ce_config,
+ size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] =
+ true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_ce() leave:\n");
+ }
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_yuv2rgb(
+ unsigned int pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params);
+
+ {
+ unsigned int size =
+ stage->binary->info->mem_offsets.offsets.param->dmem.yuv2rgb.size;
+
+ unsigned int offset =
+ stage->binary->info->mem_offsets.offsets.param->dmem.yuv2rgb.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_process_yuv2rgb() enter:\n");
+
+ ia_css_yuv2rgb_encode((struct sh_css_isp_csc_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
+ &params->yuv2rgb_cc_config,
+ size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] =
+ true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_process_yuv2rgb() leave:\n");
+ }
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_rgb2yuv(
+ unsigned int pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params);
+
+ {
+ unsigned int size =
+ stage->binary->info->mem_offsets.offsets.param->dmem.rgb2yuv.size;
+
+ unsigned int offset =
+ stage->binary->info->mem_offsets.offsets.param->dmem.rgb2yuv.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_process_rgb2yuv() enter:\n");
+
+ ia_css_rgb2yuv_encode((struct sh_css_isp_csc_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
+ &params->rgb2yuv_cc_config,
+ size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] =
+ true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_process_rgb2yuv() leave:\n");
+ }
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_r_gamma(
+ unsigned int pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params);
+
+ {
+ unsigned int size =
+ stage->binary->info->mem_offsets.offsets.param->vamem0.r_gamma.size;
+
+ unsigned int offset =
+ stage->binary->info->mem_offsets.offsets.param->vamem0.r_gamma.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_process_r_gamma() enter:\n");
+
+ ia_css_r_gamma_vamem_encode((struct sh_css_isp_rgb_gamma_vamem_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_VAMEM0].address[offset],
+ &params->r_gamma_table,
+ size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_VAMEM0] =
+ true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_process_r_gamma() leave:\n");
+ }
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_g_gamma(
+ unsigned int pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params);
+
+ {
+ unsigned int size =
+ stage->binary->info->mem_offsets.offsets.param->vamem1.g_gamma.size;
+
+ unsigned int offset =
+ stage->binary->info->mem_offsets.offsets.param->vamem1.g_gamma.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_process_g_gamma() enter:\n");
+
+ ia_css_g_gamma_vamem_encode((struct sh_css_isp_rgb_gamma_vamem_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_VAMEM1].address[offset],
+ &params->g_gamma_table,
+ size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_VAMEM1] =
+ true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_process_g_gamma() leave:\n");
+ }
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_b_gamma(
+ unsigned int pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params);
+
+ {
+ unsigned int size =
+ stage->binary->info->mem_offsets.offsets.param->vamem2.b_gamma.size;
+
+ unsigned int offset =
+ stage->binary->info->mem_offsets.offsets.param->vamem2.b_gamma.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_process_b_gamma() enter:\n");
+
+ ia_css_b_gamma_vamem_encode((struct sh_css_isp_rgb_gamma_vamem_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_VAMEM2].address[offset],
+ &params->b_gamma_table,
+ size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_VAMEM2] =
+ true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_process_b_gamma() leave:\n");
+ }
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_uds(
+ unsigned int pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params);
+
+ {
+ unsigned int size =
+ stage->binary->info->mem_offsets.offsets.param->dmem.uds.size;
+
+ unsigned int offset =
+ stage->binary->info->mem_offsets.offsets.param->dmem.uds.offset;
+
+ if (size) {
+ struct sh_css_sp_uds_params *p;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_process_uds() enter:\n");
+
+ p = (struct sh_css_sp_uds_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset];
+ p->crop_pos = params->uds_config.crop_pos;
+ p->uds = params->uds_config.uds;
+
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] =
+ true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_process_uds() leave:\n");
+ }
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_raa(
+ unsigned int pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params);
+
+ {
+ unsigned int size =
+ stage->binary->info->mem_offsets.offsets.param->dmem.raa.size;
+
+ unsigned int offset =
+ stage->binary->info->mem_offsets.offsets.param->dmem.raa.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_process_raa() enter:\n");
+
+ ia_css_raa_encode((struct sh_css_isp_aa_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
+ &params->raa_config,
+ size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] =
+ true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_process_raa() leave:\n");
+ }
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_s3a(
+ unsigned int pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params);
+
+ {
+ unsigned int size =
+ stage->binary->info->mem_offsets.offsets.param->dmem.s3a.size;
+
+ unsigned int offset =
+ stage->binary->info->mem_offsets.offsets.param->dmem.s3a.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_process_s3a() enter:\n");
+
+ ia_css_s3a_encode((struct sh_css_isp_s3a_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
+ &params->s3a_config,
+ size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] =
+ true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_process_s3a() leave:\n");
+ }
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_ob(
+ unsigned int pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params);
+
+ {
+ unsigned int size =
+ stage->binary->info->mem_offsets.offsets.param->dmem.ob.size;
+
+ unsigned int offset =
+ stage->binary->info->mem_offsets.offsets.param->dmem.ob.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_ob() enter:\n");
+
+ ia_css_ob_encode((struct sh_css_isp_ob_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
+ &params->ob_config,
+ &params->stream_configs.ob, size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] =
+ true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_ob() leave:\n");
+ }
+ }
+ {
+ unsigned int size =
+ stage->binary->info->mem_offsets.offsets.param->vmem.ob.size;
+
+ unsigned int offset =
+ stage->binary->info->mem_offsets.offsets.param->vmem.ob.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_ob() enter:\n");
+
+ ia_css_ob_vmem_encode((struct sh_css_isp_ob_vmem_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_VMEM].address[offset],
+ &params->ob_config,
+ &params->stream_configs.ob, size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_VMEM] =
+ true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_ob() leave:\n");
+ }
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_output(
+ unsigned int pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params);
+
+ {
+ unsigned int size =
+ stage->binary->info->mem_offsets.offsets.param->dmem.output.size;
+
+ unsigned int offset =
+ stage->binary->info->mem_offsets.offsets.param->dmem.output.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_process_output() enter:\n");
+
+ ia_css_output_encode((struct sh_css_isp_output_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
+ &params->output_config,
+ size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] =
+ true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_process_output() leave:\n");
+ }
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_sc(
+ unsigned int pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params);
+
+ {
+ unsigned int size =
+ stage->binary->info->mem_offsets.offsets.param->dmem.sc.size;
+
+ unsigned int offset =
+ stage->binary->info->mem_offsets.offsets.param->dmem.sc.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_sc() enter:\n");
+
+ ia_css_sc_encode((struct sh_css_isp_sc_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
+ &params->sc_config,
+ size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] =
+ true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_sc() leave:\n");
+ }
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_bds(
+ unsigned int pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params);
+
+ {
+ unsigned int size =
+ stage->binary->info->mem_offsets.offsets.param->dmem.bds.size;
+
+ unsigned int offset =
+ stage->binary->info->mem_offsets.offsets.param->dmem.bds.offset;
+
+ if (size) {
+ struct sh_css_isp_bds_params *p;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_process_bds() enter:\n");
+
+ p = (struct sh_css_isp_bds_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset];
+ p->baf_strength = params->bds_config.strength;
+
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] =
+ true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_process_bds() leave:\n");
+ }
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_tnr(
+ unsigned int pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params);
+
+ {
+ unsigned int size =
+ stage->binary->info->mem_offsets.offsets.param->dmem.tnr.size;
+
+ unsigned int offset =
+ stage->binary->info->mem_offsets.offsets.param->dmem.tnr.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_process_tnr() enter:\n");
+
+ ia_css_tnr_encode((struct sh_css_isp_tnr_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
+ &params->tnr_config,
+ size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] =
+ true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_process_tnr() leave:\n");
+ }
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_macc(
+ unsigned int pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params);
+
+ {
+ unsigned int size =
+ stage->binary->info->mem_offsets.offsets.param->dmem.macc.size;
+
+ unsigned int offset =
+ stage->binary->info->mem_offsets.offsets.param->dmem.macc.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_process_macc() enter:\n");
+
+ ia_css_macc_encode((struct sh_css_isp_macc_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
+ &params->macc_config,
+ size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] =
+ true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_process_macc() leave:\n");
+ }
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_sdis_horicoef(
+ unsigned int pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params);
+
+ {
+ unsigned int size =
+ stage->binary->info->mem_offsets.offsets.param->vmem.sdis_horicoef.size;
+
+ unsigned int offset =
+ stage->binary->info->mem_offsets.offsets.param->vmem.sdis_horicoef.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_process_sdis_horicoef() enter:\n");
+
+ ia_css_sdis_horicoef_vmem_encode((struct sh_css_isp_sdis_hori_coef_tbl *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_VMEM].address[offset],
+ &params->dvs_coefs,
+ size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_VMEM] =
+ true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_process_sdis_horicoef() leave:\n");
+ }
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_sdis_vertcoef(
+ unsigned int pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params);
+
+ {
+ unsigned int size =
+ stage->binary->info->mem_offsets.offsets.param->vmem.sdis_vertcoef.size;
+
+ unsigned int offset =
+ stage->binary->info->mem_offsets.offsets.param->vmem.sdis_vertcoef.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_process_sdis_vertcoef() enter:\n");
+
+ ia_css_sdis_vertcoef_vmem_encode((struct sh_css_isp_sdis_vert_coef_tbl *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_VMEM].address[offset],
+ &params->dvs_coefs,
+ size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_VMEM] =
+ true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_process_sdis_vertcoef() leave:\n");
+ }
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_sdis_horiproj(
+ unsigned int pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params);
+
+ {
+ unsigned int size =
+ stage->binary->info->mem_offsets.offsets.param->dmem.sdis_horiproj.size;
+
+ unsigned int offset =
+ stage->binary->info->mem_offsets.offsets.param->dmem.sdis_horiproj.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_process_sdis_horiproj() enter:\n");
+
+ ia_css_sdis_horiproj_encode((struct sh_css_isp_sdis_hori_proj_tbl *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
+ &params->dvs_coefs,
+ size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] =
+ true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_process_sdis_horiproj() leave:\n");
+ }
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_sdis_vertproj(
+ unsigned int pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params);
+
+ {
+ unsigned int size =
+ stage->binary->info->mem_offsets.offsets.param->dmem.sdis_vertproj.size;
+
+ unsigned int offset =
+ stage->binary->info->mem_offsets.offsets.param->dmem.sdis_vertproj.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_process_sdis_vertproj() enter:\n");
+
+ ia_css_sdis_vertproj_encode((struct sh_css_isp_sdis_vert_proj_tbl *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
+ &params->dvs_coefs,
+ size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] =
+ true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_process_sdis_vertproj() leave:\n");
+ }
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_sdis2_horicoef(
+ unsigned int pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params);
+
+ {
+ unsigned int size =
+ stage->binary->info->mem_offsets.offsets.param->vmem.sdis2_horicoef.size;
+
+ unsigned int offset =
+ stage->binary->info->mem_offsets.offsets.param->vmem.sdis2_horicoef.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_process_sdis2_horicoef() enter:\n");
+
+ ia_css_sdis2_horicoef_vmem_encode((struct sh_css_isp_sdis_hori_coef_tbl *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_VMEM].address[offset],
+ &params->dvs2_coefs,
+ size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_VMEM] =
+ true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_process_sdis2_horicoef() leave:\n");
+ }
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_sdis2_vertcoef(
+ unsigned int pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params);
+
+ {
+ unsigned int size =
+ stage->binary->info->mem_offsets.offsets.param->vmem.sdis2_vertcoef.size;
+
+ unsigned int offset =
+ stage->binary->info->mem_offsets.offsets.param->vmem.sdis2_vertcoef.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_process_sdis2_vertcoef() enter:\n");
+
+ ia_css_sdis2_vertcoef_vmem_encode((struct sh_css_isp_sdis_vert_coef_tbl *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_VMEM].address[offset],
+ &params->dvs2_coefs,
+ size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_VMEM] =
+ true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_process_sdis2_vertcoef() leave:\n");
+ }
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_sdis2_horiproj(
+ unsigned int pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params);
+
+ {
+ unsigned int size =
+ stage->binary->info->mem_offsets.offsets.param->dmem.sdis2_horiproj.size;
+
+ unsigned int offset =
+ stage->binary->info->mem_offsets.offsets.param->dmem.sdis2_horiproj.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_process_sdis2_horiproj() enter:\n");
+
+ ia_css_sdis2_horiproj_encode((struct sh_css_isp_sdis_hori_proj_tbl *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
+ &params->dvs2_coefs,
+ size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] =
+ true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_process_sdis2_horiproj() leave:\n");
+ }
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_sdis2_vertproj(
+ unsigned int pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params);
+
+ {
+ unsigned int size =
+ stage->binary->info->mem_offsets.offsets.param->dmem.sdis2_vertproj.size;
+
+ unsigned int offset =
+ stage->binary->info->mem_offsets.offsets.param->dmem.sdis2_vertproj.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_process_sdis2_vertproj() enter:\n");
+
+ ia_css_sdis2_vertproj_encode((struct sh_css_isp_sdis_vert_proj_tbl *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
+ &params->dvs2_coefs,
+ size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] =
+ true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_process_sdis2_vertproj() leave:\n");
+ }
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_wb(
+ unsigned int pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params);
+
+ {
+ unsigned int size =
+ stage->binary->info->mem_offsets.offsets.param->dmem.wb.size;
+
+ unsigned int offset =
+ stage->binary->info->mem_offsets.offsets.param->dmem.wb.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_wb() enter:\n");
+
+ ia_css_wb_encode((struct sh_css_isp_wb_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
+ &params->wb_config,
+ size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] =
+ true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_wb() leave:\n");
+ }
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_nr(
+ unsigned int pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params);
+
+ {
+ unsigned int size =
+ stage->binary->info->mem_offsets.offsets.param->dmem.nr.size;
+
+ unsigned int offset =
+ stage->binary->info->mem_offsets.offsets.param->dmem.nr.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_nr() enter:\n");
+
+ ia_css_nr_encode((struct sh_css_isp_ynr_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
+ &params->nr_config,
+ size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] =
+ true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_nr() leave:\n");
+ }
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_yee(
+ unsigned int pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params);
+
+ {
+ unsigned int size =
+ stage->binary->info->mem_offsets.offsets.param->dmem.yee.size;
+
+ unsigned int offset =
+ stage->binary->info->mem_offsets.offsets.param->dmem.yee.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_process_yee() enter:\n");
+
+ ia_css_yee_encode((struct sh_css_isp_yee_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
+ &params->yee_config,
+ size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] =
+ true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_process_yee() leave:\n");
+ }
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_ynr(
+ unsigned int pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params);
+
+ {
+ unsigned int size =
+ stage->binary->info->mem_offsets.offsets.param->dmem.ynr.size;
+
+ unsigned int offset =
+ stage->binary->info->mem_offsets.offsets.param->dmem.ynr.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_process_ynr() enter:\n");
+
+ ia_css_ynr_encode((struct sh_css_isp_yee2_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
+ &params->ynr_config,
+ size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] =
+ true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_process_ynr() leave:\n");
+ }
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_fc(
+ unsigned int pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params);
+
+ {
+ unsigned int size =
+ stage->binary->info->mem_offsets.offsets.param->dmem.fc.size;
+
+ unsigned int offset =
+ stage->binary->info->mem_offsets.offsets.param->dmem.fc.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_fc() enter:\n");
+
+ ia_css_fc_encode((struct sh_css_isp_fc_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
+ &params->fc_config,
+ size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] =
+ true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_fc() leave:\n");
+ }
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_ctc(
+ unsigned int pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params);
+
+ {
+ unsigned int size =
+ stage->binary->info->mem_offsets.offsets.param->dmem.ctc.size;
+
+ unsigned int offset =
+ stage->binary->info->mem_offsets.offsets.param->dmem.ctc.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_process_ctc() enter:\n");
+
+ ia_css_ctc_encode((struct sh_css_isp_ctc_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
+ &params->ctc_config,
+ size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] =
+ true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_process_ctc() leave:\n");
+ }
+ }
+ {
+ unsigned int size =
+ stage->binary->info->mem_offsets.offsets.param->vamem0.ctc.size;
+
+ unsigned int offset =
+ stage->binary->info->mem_offsets.offsets.param->vamem0.ctc.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_process_ctc() enter:\n");
+
+ ia_css_ctc_vamem_encode((struct sh_css_isp_ctc_vamem_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_VAMEM0].address[offset],
+ &params->ctc_table,
+ size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_VAMEM0] =
+ true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_process_ctc() leave:\n");
+ }
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_xnr_table(
+ unsigned int pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params);
+
+ {
+ unsigned int size =
+ stage->binary->info->mem_offsets.offsets.param->vamem1.xnr_table.size;
+
+ unsigned int offset =
+ stage->binary->info->mem_offsets.offsets.param->vamem1.xnr_table.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_process_xnr_table() enter:\n");
+
+ ia_css_xnr_table_vamem_encode((struct sh_css_isp_xnr_vamem_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_VAMEM1].address[offset],
+ &params->xnr_table,
+ size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_VAMEM1] =
+ true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_process_xnr_table() leave:\n");
+ }
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_xnr(
+ unsigned int pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params);
+
+ {
+ unsigned int size =
+ stage->binary->info->mem_offsets.offsets.param->dmem.xnr.size;
+
+ unsigned int offset =
+ stage->binary->info->mem_offsets.offsets.param->dmem.xnr.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_process_xnr() enter:\n");
+
+ ia_css_xnr_encode((struct sh_css_isp_xnr_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
+ &params->xnr_config,
+ size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] =
+ true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_process_xnr() leave:\n");
+ }
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_xnr3(
+ unsigned int pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params);
+
+ {
+ unsigned int size =
+ stage->binary->info->mem_offsets.offsets.param->dmem.xnr3.size;
+
+ unsigned int offset =
+ stage->binary->info->mem_offsets.offsets.param->dmem.xnr3.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_process_xnr3() enter:\n");
+
+ ia_css_xnr3_encode((struct sh_css_isp_xnr3_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
+ &params->xnr3_config,
+ size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] =
+ true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_process_xnr3() leave:\n");
+ }
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_param_process_table() */
+
+void (*ia_css_kernel_process_param[IA_CSS_NUM_PARAMETER_IDS])(
+ unsigned int pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params) = {
+ ia_css_process_aa,
+ ia_css_process_anr,
+ ia_css_process_anr2,
+ ia_css_process_bh,
+ ia_css_process_cnr,
+ ia_css_process_crop,
+ ia_css_process_csc,
+ ia_css_process_dp,
+ ia_css_process_bnr,
+ ia_css_process_de,
+ ia_css_process_ecd,
+ ia_css_process_formats,
+ ia_css_process_fpn,
+ ia_css_process_gc,
+ ia_css_process_ce,
+ ia_css_process_yuv2rgb,
+ ia_css_process_rgb2yuv,
+ ia_css_process_r_gamma,
+ ia_css_process_g_gamma,
+ ia_css_process_b_gamma,
+ ia_css_process_uds,
+ ia_css_process_raa,
+ ia_css_process_s3a,
+ ia_css_process_ob,
+ ia_css_process_output,
+ ia_css_process_sc,
+ ia_css_process_bds,
+ ia_css_process_tnr,
+ ia_css_process_macc,
+ ia_css_process_sdis_horicoef,
+ ia_css_process_sdis_vertcoef,
+ ia_css_process_sdis_horiproj,
+ ia_css_process_sdis_vertproj,
+ ia_css_process_sdis2_horicoef,
+ ia_css_process_sdis2_vertcoef,
+ ia_css_process_sdis2_horiproj,
+ ia_css_process_sdis2_vertproj,
+ ia_css_process_wb,
+ ia_css_process_nr,
+ ia_css_process_yee,
+ ia_css_process_ynr,
+ ia_css_process_fc,
+ ia_css_process_ctc,
+ ia_css_process_xnr_table,
+ ia_css_process_xnr,
+ ia_css_process_xnr3,
+};
+
+/* Code generated by genparam/gencode.c:gen_get_function() */
+
+static void
+ia_css_get_dp_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_dp_config *config)
+{
+ if (!config)
+ return;
+
+ assert(params);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_get_dp_config() enter: config=%p\n",
+ config);
+
+ *config = params->dp_config;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_get_dp_config() leave\n");
+ ia_css_dp_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+}
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_dp_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_dp_config *config)
+{
+ if (!config)
+ return;
+
+ assert(params);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_dp_config() enter:\n");
+ ia_css_dp_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+ params->dp_config = *config;
+ params->config_changed[IA_CSS_DP_ID] = true;
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_set_dp_config() leave: return_void\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_get_function() */
+
+static void
+ia_css_get_wb_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_wb_config *config)
+{
+ if (!config)
+ return;
+
+ assert(params);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_get_wb_config() enter: config=%p\n",
+ config);
+
+ *config = params->wb_config;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_get_wb_config() leave\n");
+ ia_css_wb_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+}
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_wb_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_wb_config *config)
+{
+ if (!config)
+ return;
+
+ assert(params);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_wb_config() enter:\n");
+ ia_css_wb_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+ params->wb_config = *config;
+ params->config_changed[IA_CSS_WB_ID] = true;
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_set_wb_config() leave: return_void\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_get_function() */
+
+static void
+ia_css_get_tnr_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_tnr_config *config)
+{
+ if (!config)
+ return;
+
+ assert(params);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_get_tnr_config() enter: config=%p\n",
+ config);
+
+ *config = params->tnr_config;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_get_tnr_config() leave\n");
+ ia_css_tnr_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+}
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_tnr_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_tnr_config *config)
+{
+ if (!config)
+ return;
+
+ assert(params);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_tnr_config() enter:\n");
+ ia_css_tnr_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+ params->tnr_config = *config;
+ params->config_changed[IA_CSS_TNR_ID] = true;
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_set_tnr_config() leave: return_void\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_get_function() */
+
+static void
+ia_css_get_ob_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_ob_config *config)
+{
+ if (!config)
+ return;
+
+ assert(params);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_get_ob_config() enter: config=%p\n",
+ config);
+
+ *config = params->ob_config;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_get_ob_config() leave\n");
+ ia_css_ob_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+}
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_ob_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_ob_config *config)
+{
+ if (!config)
+ return;
+
+ assert(params);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_ob_config() enter:\n");
+ ia_css_ob_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+ params->ob_config = *config;
+ params->config_changed[IA_CSS_OB_ID] = true;
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_set_ob_config() leave: return_void\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_get_function() */
+
+static void
+ia_css_get_de_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_de_config *config)
+{
+ if (!config)
+ return;
+
+ assert(params);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_get_de_config() enter: config=%p\n",
+ config);
+
+ *config = params->de_config;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_get_de_config() leave\n");
+ ia_css_de_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+}
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_de_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_de_config *config)
+{
+ if (!config)
+ return;
+
+ assert(params);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_de_config() enter:\n");
+ ia_css_de_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+ params->de_config = *config;
+ params->config_changed[IA_CSS_DE_ID] = true;
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_set_de_config() leave: return_void\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_get_function() */
+
+static void
+ia_css_get_anr_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_anr_config *config)
+{
+ if (!config)
+ return;
+
+ assert(params);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_get_anr_config() enter: config=%p\n",
+ config);
+
+ *config = params->anr_config;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_get_anr_config() leave\n");
+ ia_css_anr_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+}
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_anr_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_anr_config *config)
+{
+ if (!config)
+ return;
+
+ assert(params);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_anr_config() enter:\n");
+ ia_css_anr_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+ params->anr_config = *config;
+ params->config_changed[IA_CSS_ANR_ID] = true;
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_set_anr_config() leave: return_void\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_get_function() */
+
+static void
+ia_css_get_anr2_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_anr_thres *config)
+{
+ if (!config)
+ return;
+
+ assert(params);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_get_anr2_config() enter: config=%p\n",
+ config);
+
+ *config = params->anr_thres;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_get_anr2_config() leave\n");
+ ia_css_anr2_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+}
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_anr2_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_anr_thres *config)
+{
+ if (!config)
+ return;
+
+ assert(params);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_anr2_config() enter:\n");
+ ia_css_anr2_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+ params->anr_thres = *config;
+ params->config_changed[IA_CSS_ANR2_ID] = true;
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_set_anr2_config() leave: return_void\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_get_function() */
+
+static void
+ia_css_get_ce_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_ce_config *config)
+{
+ if (!config)
+ return;
+
+ assert(params);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_get_ce_config() enter: config=%p\n",
+ config);
+
+ *config = params->ce_config;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_get_ce_config() leave\n");
+ ia_css_ce_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+}
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_ce_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_ce_config *config)
+{
+ if (!config)
+ return;
+
+ assert(params);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_ce_config() enter:\n");
+ ia_css_ce_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+ params->ce_config = *config;
+ params->config_changed[IA_CSS_CE_ID] = true;
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_set_ce_config() leave: return_void\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_get_function() */
+
+static void
+ia_css_get_ecd_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_ecd_config *config)
+{
+ if (!config)
+ return;
+
+ assert(params);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_get_ecd_config() enter: config=%p\n",
+ config);
+
+ *config = params->ecd_config;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_get_ecd_config() leave\n");
+ ia_css_ecd_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+}
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_ecd_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_ecd_config *config)
+{
+ if (!config)
+ return;
+
+ assert(params);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_ecd_config() enter:\n");
+ ia_css_ecd_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+ params->ecd_config = *config;
+ params->config_changed[IA_CSS_ECD_ID] = true;
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_set_ecd_config() leave: return_void\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_get_function() */
+
+static void
+ia_css_get_ynr_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_ynr_config *config)
+{
+ if (!config)
+ return;
+
+ assert(params);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_get_ynr_config() enter: config=%p\n",
+ config);
+
+ *config = params->ynr_config;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_get_ynr_config() leave\n");
+ ia_css_ynr_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+}
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_ynr_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_ynr_config *config)
+{
+ if (!config)
+ return;
+
+ assert(params);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_ynr_config() enter:\n");
+ ia_css_ynr_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+ params->ynr_config = *config;
+ params->config_changed[IA_CSS_YNR_ID] = true;
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_set_ynr_config() leave: return_void\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_get_function() */
+
+static void
+ia_css_get_fc_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_fc_config *config)
+{
+ if (!config)
+ return;
+
+ assert(params);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_get_fc_config() enter: config=%p\n",
+ config);
+
+ *config = params->fc_config;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_get_fc_config() leave\n");
+ ia_css_fc_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+}
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_fc_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_fc_config *config)
+{
+ if (!config)
+ return;
+
+ assert(params);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_fc_config() enter:\n");
+ ia_css_fc_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+ params->fc_config = *config;
+ params->config_changed[IA_CSS_FC_ID] = true;
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_set_fc_config() leave: return_void\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_get_function() */
+
+static void
+ia_css_get_cnr_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_cnr_config *config)
+{
+ if (!config)
+ return;
+
+ assert(params);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_get_cnr_config() enter: config=%p\n",
+ config);
+
+ *config = params->cnr_config;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_get_cnr_config() leave\n");
+ ia_css_cnr_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+}
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_cnr_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_cnr_config *config)
+{
+ if (!config)
+ return;
+
+ assert(params);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_cnr_config() enter:\n");
+ ia_css_cnr_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+ params->cnr_config = *config;
+ params->config_changed[IA_CSS_CNR_ID] = true;
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_set_cnr_config() leave: return_void\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_get_function() */
+
+static void
+ia_css_get_macc_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_macc_config *config)
+{
+ if (!config)
+ return;
+
+ assert(params);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_get_macc_config() enter: config=%p\n",
+ config);
+
+ *config = params->macc_config;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_get_macc_config() leave\n");
+ ia_css_macc_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+}
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_macc_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_macc_config *config)
+{
+ if (!config)
+ return;
+
+ assert(params);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_macc_config() enter:\n");
+ ia_css_macc_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+ params->macc_config = *config;
+ params->config_changed[IA_CSS_MACC_ID] = true;
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_set_macc_config() leave: return_void\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_get_function() */
+
+static void
+ia_css_get_ctc_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_ctc_config *config)
+{
+ if (!config)
+ return;
+
+ assert(params);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_get_ctc_config() enter: config=%p\n",
+ config);
+
+ *config = params->ctc_config;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_get_ctc_config() leave\n");
+ ia_css_ctc_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+}
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_ctc_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_ctc_config *config)
+{
+ if (!config)
+ return;
+
+ assert(params);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_ctc_config() enter:\n");
+ ia_css_ctc_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+ params->ctc_config = *config;
+ params->config_changed[IA_CSS_CTC_ID] = true;
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_set_ctc_config() leave: return_void\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_get_function() */
+
+static void
+ia_css_get_aa_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_aa_config *config)
+{
+ if (!config)
+ return;
+
+ assert(params);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_get_aa_config() enter: config=%p\n",
+ config);
+
+ *config = params->aa_config;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_get_aa_config() leave\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_aa_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_aa_config *config)
+{
+ if (!config)
+ return;
+
+ assert(params);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_aa_config() enter:\n");
+ params->aa_config = *config;
+ params->config_changed[IA_CSS_AA_ID] = true;
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_set_aa_config() leave: return_void\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_get_function() */
+
+static void
+ia_css_get_yuv2rgb_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_cc_config *config)
+{
+ if (!config)
+ return;
+
+ assert(params);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_get_yuv2rgb_config() enter: config=%p\n",
+ config);
+
+ *config = params->yuv2rgb_cc_config;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_get_yuv2rgb_config() leave\n");
+ ia_css_yuv2rgb_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+}
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_yuv2rgb_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_cc_config *config)
+{
+ if (!config)
+ return;
+
+ assert(params);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_yuv2rgb_config() enter:\n");
+ ia_css_yuv2rgb_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+ params->yuv2rgb_cc_config = *config;
+ params->config_changed[IA_CSS_YUV2RGB_ID] = true;
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_set_yuv2rgb_config() leave: return_void\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_get_function() */
+
+static void
+ia_css_get_rgb2yuv_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_cc_config *config)
+{
+ if (!config)
+ return;
+
+ assert(params);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_get_rgb2yuv_config() enter: config=%p\n",
+ config);
+
+ *config = params->rgb2yuv_cc_config;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_get_rgb2yuv_config() leave\n");
+ ia_css_rgb2yuv_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+}
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_rgb2yuv_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_cc_config *config)
+{
+ if (!config)
+ return;
+
+ assert(params);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_rgb2yuv_config() enter:\n");
+ ia_css_rgb2yuv_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+ params->rgb2yuv_cc_config = *config;
+ params->config_changed[IA_CSS_RGB2YUV_ID] = true;
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_set_rgb2yuv_config() leave: return_void\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_get_function() */
+
+static void
+ia_css_get_csc_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_cc_config *config)
+{
+ if (!config)
+ return;
+
+ assert(params);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_get_csc_config() enter: config=%p\n",
+ config);
+
+ *config = params->cc_config;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_get_csc_config() leave\n");
+ ia_css_csc_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+}
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_csc_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_cc_config *config)
+{
+ if (!config)
+ return;
+
+ assert(params);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_csc_config() enter:\n");
+ ia_css_csc_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+ params->cc_config = *config;
+ params->config_changed[IA_CSS_CSC_ID] = true;
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_set_csc_config() leave: return_void\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_get_function() */
+
+static void
+ia_css_get_nr_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_nr_config *config)
+{
+ if (!config)
+ return;
+
+ assert(params);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_get_nr_config() enter: config=%p\n",
+ config);
+
+ *config = params->nr_config;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_get_nr_config() leave\n");
+ ia_css_nr_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+}
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_nr_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_nr_config *config)
+{
+ if (!config)
+ return;
+
+ assert(params);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_nr_config() enter:\n");
+ ia_css_nr_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+ params->nr_config = *config;
+ params->config_changed[IA_CSS_BNR_ID] = true;
+ params->config_changed[IA_CSS_NR_ID] = true;
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_set_nr_config() leave: return_void\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_get_function() */
+
+static void
+ia_css_get_gc_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_gc_config *config)
+{
+ if (!config)
+ return;
+
+ assert(params);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_get_gc_config() enter: config=%p\n",
+ config);
+
+ *config = params->gc_config;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_get_gc_config() leave\n");
+ ia_css_gc_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+}
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_gc_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_gc_config *config)
+{
+ if (!config)
+ return;
+
+ assert(params);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_gc_config() enter:\n");
+ ia_css_gc_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+ params->gc_config = *config;
+ params->config_changed[IA_CSS_GC_ID] = true;
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_set_gc_config() leave: return_void\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_get_function() */
+
+static void
+ia_css_get_sdis_horicoef_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_dvs_coefficients *config)
+{
+ if (!config)
+ return;
+
+ assert(params);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_get_sdis_horicoef_config() enter: config=%p\n",
+ config);
+
+ *config = params->dvs_coefs;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_get_sdis_horicoef_config() leave\n");
+ ia_css_sdis_horicoef_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+}
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_sdis_horicoef_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_dvs_coefficients *config)
+{
+ if (!config)
+ return;
+
+ assert(params);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_set_sdis_horicoef_config() enter:\n");
+ ia_css_sdis_horicoef_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+ params->dvs_coefs = *config;
+ params->config_changed[IA_CSS_SDIS_HORICOEF_ID] = true;
+ params->config_changed[IA_CSS_SDIS_VERTCOEF_ID] = true;
+ params->config_changed[IA_CSS_SDIS_HORIPROJ_ID] = true;
+ params->config_changed[IA_CSS_SDIS_VERTPROJ_ID] = true;
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_set_sdis_horicoef_config() leave: return_void\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_get_function() */
+
+static void
+ia_css_get_sdis_vertcoef_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_dvs_coefficients *config)
+{
+ if (!config)
+ return;
+
+ assert(params);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_get_sdis_vertcoef_config() enter: config=%p\n",
+ config);
+
+ *config = params->dvs_coefs;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_get_sdis_vertcoef_config() leave\n");
+ ia_css_sdis_vertcoef_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+}
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_sdis_vertcoef_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_dvs_coefficients *config)
+{
+ if (!config)
+ return;
+
+ assert(params);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_set_sdis_vertcoef_config() enter:\n");
+ ia_css_sdis_vertcoef_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+ params->dvs_coefs = *config;
+ params->config_changed[IA_CSS_SDIS_HORICOEF_ID] = true;
+ params->config_changed[IA_CSS_SDIS_VERTCOEF_ID] = true;
+ params->config_changed[IA_CSS_SDIS_HORIPROJ_ID] = true;
+ params->config_changed[IA_CSS_SDIS_VERTPROJ_ID] = true;
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_set_sdis_vertcoef_config() leave: return_void\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_get_function() */
+
+static void
+ia_css_get_sdis_horiproj_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_dvs_coefficients *config)
+{
+ if (!config)
+ return;
+
+ assert(params);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_get_sdis_horiproj_config() enter: config=%p\n",
+ config);
+
+ *config = params->dvs_coefs;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_get_sdis_horiproj_config() leave\n");
+ ia_css_sdis_horiproj_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+}
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_sdis_horiproj_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_dvs_coefficients *config)
+{
+ if (!config)
+ return;
+
+ assert(params);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_set_sdis_horiproj_config() enter:\n");
+ ia_css_sdis_horiproj_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+ params->dvs_coefs = *config;
+ params->config_changed[IA_CSS_SDIS_HORICOEF_ID] = true;
+ params->config_changed[IA_CSS_SDIS_VERTCOEF_ID] = true;
+ params->config_changed[IA_CSS_SDIS_HORIPROJ_ID] = true;
+ params->config_changed[IA_CSS_SDIS_VERTPROJ_ID] = true;
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_set_sdis_horiproj_config() leave: return_void\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_get_function() */
+
+static void
+ia_css_get_sdis_vertproj_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_dvs_coefficients *config)
+{
+ if (!config)
+ return;
+
+ assert(params);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_get_sdis_vertproj_config() enter: config=%p\n",
+ config);
+
+ *config = params->dvs_coefs;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_get_sdis_vertproj_config() leave\n");
+ ia_css_sdis_vertproj_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+}
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_sdis_vertproj_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_dvs_coefficients *config)
+{
+ if (!config)
+ return;
+
+ assert(params);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_set_sdis_vertproj_config() enter:\n");
+ ia_css_sdis_vertproj_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+ params->dvs_coefs = *config;
+ params->config_changed[IA_CSS_SDIS_HORICOEF_ID] = true;
+ params->config_changed[IA_CSS_SDIS_VERTCOEF_ID] = true;
+ params->config_changed[IA_CSS_SDIS_HORIPROJ_ID] = true;
+ params->config_changed[IA_CSS_SDIS_VERTPROJ_ID] = true;
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_set_sdis_vertproj_config() leave: return_void\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_get_function() */
+
+static void
+ia_css_get_sdis2_horicoef_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_dvs2_coefficients *config)
+{
+ if (!config)
+ return;
+
+ assert(params);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_get_sdis2_horicoef_config() enter: config=%p\n",
+ config);
+
+ *config = params->dvs2_coefs;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_get_sdis2_horicoef_config() leave\n");
+ ia_css_sdis2_horicoef_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+}
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_sdis2_horicoef_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_dvs2_coefficients *config)
+{
+ if (!config)
+ return;
+
+ assert(params);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_set_sdis2_horicoef_config() enter:\n");
+ ia_css_sdis2_horicoef_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+ params->dvs2_coefs = *config;
+ params->config_changed[IA_CSS_SDIS2_HORICOEF_ID] = true;
+ params->config_changed[IA_CSS_SDIS2_VERTCOEF_ID] = true;
+ params->config_changed[IA_CSS_SDIS2_HORIPROJ_ID] = true;
+ params->config_changed[IA_CSS_SDIS2_VERTPROJ_ID] = true;
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_set_sdis2_horicoef_config() leave: return_void\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_get_function() */
+
+static void
+ia_css_get_sdis2_vertcoef_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_dvs2_coefficients *config)
+{
+ if (!config)
+ return;
+
+ assert(params);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_get_sdis2_vertcoef_config() enter: config=%p\n",
+ config);
+
+ *config = params->dvs2_coefs;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_get_sdis2_vertcoef_config() leave\n");
+ ia_css_sdis2_vertcoef_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+}
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_sdis2_vertcoef_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_dvs2_coefficients *config)
+{
+ if (!config)
+ return;
+
+ assert(params);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_set_sdis2_vertcoef_config() enter:\n");
+ ia_css_sdis2_vertcoef_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+ params->dvs2_coefs = *config;
+ params->config_changed[IA_CSS_SDIS2_HORICOEF_ID] = true;
+ params->config_changed[IA_CSS_SDIS2_VERTCOEF_ID] = true;
+ params->config_changed[IA_CSS_SDIS2_HORIPROJ_ID] = true;
+ params->config_changed[IA_CSS_SDIS2_VERTPROJ_ID] = true;
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_set_sdis2_vertcoef_config() leave: return_void\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_get_function() */
+
+static void
+ia_css_get_sdis2_horiproj_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_dvs2_coefficients *config)
+{
+ if (!config)
+ return;
+
+ assert(params);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_get_sdis2_horiproj_config() enter: config=%p\n",
+ config);
+
+ *config = params->dvs2_coefs;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_get_sdis2_horiproj_config() leave\n");
+ ia_css_sdis2_horiproj_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+}
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_sdis2_horiproj_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_dvs2_coefficients *config)
+{
+ if (!config)
+ return;
+
+ assert(params);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_set_sdis2_horiproj_config() enter:\n");
+ ia_css_sdis2_horiproj_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+ params->dvs2_coefs = *config;
+ params->config_changed[IA_CSS_SDIS2_HORICOEF_ID] = true;
+ params->config_changed[IA_CSS_SDIS2_VERTCOEF_ID] = true;
+ params->config_changed[IA_CSS_SDIS2_HORIPROJ_ID] = true;
+ params->config_changed[IA_CSS_SDIS2_VERTPROJ_ID] = true;
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_set_sdis2_horiproj_config() leave: return_void\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_get_function() */
+
+static void
+ia_css_get_sdis2_vertproj_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_dvs2_coefficients *config)
+{
+ if (!config)
+ return;
+
+ assert(params);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_get_sdis2_vertproj_config() enter: config=%p\n",
+ config);
+
+ *config = params->dvs2_coefs;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_get_sdis2_vertproj_config() leave\n");
+ ia_css_sdis2_vertproj_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+}
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_sdis2_vertproj_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_dvs2_coefficients *config)
+{
+ if (!config)
+ return;
+
+ assert(params);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_set_sdis2_vertproj_config() enter:\n");
+ ia_css_sdis2_vertproj_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+ params->dvs2_coefs = *config;
+ params->config_changed[IA_CSS_SDIS2_HORICOEF_ID] = true;
+ params->config_changed[IA_CSS_SDIS2_VERTCOEF_ID] = true;
+ params->config_changed[IA_CSS_SDIS2_HORIPROJ_ID] = true;
+ params->config_changed[IA_CSS_SDIS2_VERTPROJ_ID] = true;
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_set_sdis2_vertproj_config() leave: return_void\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_get_function() */
+
+static void
+ia_css_get_r_gamma_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_rgb_gamma_table *config)
+{
+ if (!config)
+ return;
+
+ assert(params);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_get_r_gamma_config() enter: config=%p\n",
+ config);
+
+ *config = params->r_gamma_table;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_get_r_gamma_config() leave\n");
+ ia_css_r_gamma_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+}
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_r_gamma_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_rgb_gamma_table *config)
+{
+ if (!config)
+ return;
+
+ assert(params);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_r_gamma_config() enter:\n");
+ ia_css_r_gamma_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+ params->r_gamma_table = *config;
+ params->config_changed[IA_CSS_R_GAMMA_ID] = true;
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_set_r_gamma_config() leave: return_void\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_get_function() */
+
+static void
+ia_css_get_g_gamma_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_rgb_gamma_table *config)
+{
+ if (!config)
+ return;
+
+ assert(params);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_get_g_gamma_config() enter: config=%p\n",
+ config);
+
+ *config = params->g_gamma_table;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_get_g_gamma_config() leave\n");
+ ia_css_g_gamma_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+}
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_g_gamma_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_rgb_gamma_table *config)
+{
+ if (!config)
+ return;
+
+ assert(params);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_g_gamma_config() enter:\n");
+ ia_css_g_gamma_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+ params->g_gamma_table = *config;
+ params->config_changed[IA_CSS_G_GAMMA_ID] = true;
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_set_g_gamma_config() leave: return_void\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_get_function() */
+
+static void
+ia_css_get_b_gamma_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_rgb_gamma_table *config)
+{
+ if (!config)
+ return;
+
+ assert(params);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_get_b_gamma_config() enter: config=%p\n",
+ config);
+
+ *config = params->b_gamma_table;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_get_b_gamma_config() leave\n");
+ ia_css_b_gamma_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+}
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_b_gamma_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_rgb_gamma_table *config)
+{
+ if (!config)
+ return;
+
+ assert(params);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_b_gamma_config() enter:\n");
+ ia_css_b_gamma_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+ params->b_gamma_table = *config;
+ params->config_changed[IA_CSS_B_GAMMA_ID] = true;
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_set_b_gamma_config() leave: return_void\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_get_function() */
+
+static void
+ia_css_get_xnr_table_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_xnr_table *config)
+{
+ if (!config)
+ return;
+
+ assert(params);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_get_xnr_table_config() enter: config=%p\n",
+ config);
+
+ *config = params->xnr_table;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_get_xnr_table_config() leave\n");
+ ia_css_xnr_table_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+}
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_xnr_table_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_xnr_table *config)
+{
+ if (!config)
+ return;
+
+ assert(params);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_set_xnr_table_config() enter:\n");
+ ia_css_xnr_table_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+ params->xnr_table = *config;
+ params->config_changed[IA_CSS_XNR_TABLE_ID] = true;
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_set_xnr_table_config() leave: return_void\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_get_function() */
+
+static void
+ia_css_get_formats_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_formats_config *config)
+{
+ if (!config)
+ return;
+
+ assert(params);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_get_formats_config() enter: config=%p\n",
+ config);
+
+ *config = params->formats_config;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_get_formats_config() leave\n");
+ ia_css_formats_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+}
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_formats_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_formats_config *config)
+{
+ if (!config)
+ return;
+
+ assert(params);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_formats_config() enter:\n");
+ ia_css_formats_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+ params->formats_config = *config;
+ params->config_changed[IA_CSS_FORMATS_ID] = true;
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_set_formats_config() leave: return_void\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_get_function() */
+
+static void
+ia_css_get_xnr_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_xnr_config *config)
+{
+ if (!config)
+ return;
+
+ assert(params);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_get_xnr_config() enter: config=%p\n",
+ config);
+
+ *config = params->xnr_config;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_get_xnr_config() leave\n");
+ ia_css_xnr_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+}
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_xnr_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_xnr_config *config)
+{
+ if (!config)
+ return;
+
+ assert(params);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_xnr_config() enter:\n");
+ ia_css_xnr_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+ params->xnr_config = *config;
+ params->config_changed[IA_CSS_XNR_ID] = true;
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_set_xnr_config() leave: return_void\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_get_function() */
+
+static void
+ia_css_get_xnr3_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_xnr3_config *config)
+{
+ if (!config)
+ return;
+
+ assert(params);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_get_xnr3_config() enter: config=%p\n",
+ config);
+
+ *config = params->xnr3_config;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_get_xnr3_config() leave\n");
+ ia_css_xnr3_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+}
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_xnr3_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_xnr3_config *config)
+{
+ if (!config)
+ return;
+
+ assert(params);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_xnr3_config() enter:\n");
+ ia_css_xnr3_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+ params->xnr3_config = *config;
+ params->config_changed[IA_CSS_XNR3_ID] = true;
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_set_xnr3_config() leave: return_void\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_get_function() */
+
+static void
+ia_css_get_s3a_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_3a_config *config)
+{
+ if (!config)
+ return;
+
+ assert(params);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_get_s3a_config() enter: config=%p\n",
+ config);
+
+ *config = params->s3a_config;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_get_s3a_config() leave\n");
+ ia_css_s3a_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+}
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_s3a_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_3a_config *config)
+{
+ if (!config)
+ return;
+
+ assert(params);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_s3a_config() enter:\n");
+ ia_css_s3a_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+ params->s3a_config = *config;
+ params->config_changed[IA_CSS_BH_ID] = true;
+ params->config_changed[IA_CSS_S3A_ID] = true;
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_set_s3a_config() leave: return_void\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_get_function() */
+
+static void
+ia_css_get_output_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_output_config *config)
+{
+ if (!config)
+ return;
+
+ assert(params);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_get_output_config() enter: config=%p\n",
+ config);
+
+ *config = params->output_config;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_get_output_config() leave\n");
+ ia_css_output_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+}
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_output_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_output_config *config)
+{
+ if (!config)
+ return;
+
+ assert(params);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_output_config() enter:\n");
+ ia_css_output_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+ params->output_config = *config;
+ params->config_changed[IA_CSS_OUTPUT_ID] = true;
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_set_output_config() leave: return_void\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_global_access_function() */
+
+void
+ia_css_get_configs(struct ia_css_isp_parameters *params,
+ const struct ia_css_isp_config *config)
+{
+ ia_css_get_dp_config(params, config->dp_config);
+ ia_css_get_wb_config(params, config->wb_config);
+ ia_css_get_tnr_config(params, config->tnr_config);
+ ia_css_get_ob_config(params, config->ob_config);
+ ia_css_get_de_config(params, config->de_config);
+ ia_css_get_anr_config(params, config->anr_config);
+ ia_css_get_anr2_config(params, config->anr_thres);
+ ia_css_get_ce_config(params, config->ce_config);
+ ia_css_get_ecd_config(params, config->ecd_config);
+ ia_css_get_ynr_config(params, config->ynr_config);
+ ia_css_get_fc_config(params, config->fc_config);
+ ia_css_get_cnr_config(params, config->cnr_config);
+ ia_css_get_macc_config(params, config->macc_config);
+ ia_css_get_ctc_config(params, config->ctc_config);
+ ia_css_get_aa_config(params, config->aa_config);
+ ia_css_get_yuv2rgb_config(params, config->yuv2rgb_cc_config);
+ ia_css_get_rgb2yuv_config(params, config->rgb2yuv_cc_config);
+ ia_css_get_csc_config(params, config->cc_config);
+ ia_css_get_nr_config(params, config->nr_config);
+ ia_css_get_gc_config(params, config->gc_config);
+ ia_css_get_sdis_horicoef_config(params, config->dvs_coefs);
+ ia_css_get_sdis_vertcoef_config(params, config->dvs_coefs);
+ ia_css_get_sdis_horiproj_config(params, config->dvs_coefs);
+ ia_css_get_sdis_vertproj_config(params, config->dvs_coefs);
+ ia_css_get_sdis2_horicoef_config(params, config->dvs2_coefs);
+ ia_css_get_sdis2_vertcoef_config(params, config->dvs2_coefs);
+ ia_css_get_sdis2_horiproj_config(params, config->dvs2_coefs);
+ ia_css_get_sdis2_vertproj_config(params, config->dvs2_coefs);
+ ia_css_get_r_gamma_config(params, config->r_gamma_table);
+ ia_css_get_g_gamma_config(params, config->g_gamma_table);
+ ia_css_get_b_gamma_config(params, config->b_gamma_table);
+ ia_css_get_xnr_table_config(params, config->xnr_table);
+ ia_css_get_formats_config(params, config->formats_config);
+ ia_css_get_xnr_config(params, config->xnr_config);
+ ia_css_get_xnr3_config(params, config->xnr3_config);
+ ia_css_get_s3a_config(params, config->s3a_config);
+ ia_css_get_output_config(params, config->output_config);
+}
+
+/* Code generated by genparam/gencode.c:gen_global_access_function() */
+
+void
+ia_css_set_configs(struct ia_css_isp_parameters *params,
+ const struct ia_css_isp_config *config)
+{
+ ia_css_set_dp_config(params, config->dp_config);
+ ia_css_set_wb_config(params, config->wb_config);
+ ia_css_set_tnr_config(params, config->tnr_config);
+ ia_css_set_ob_config(params, config->ob_config);
+ ia_css_set_de_config(params, config->de_config);
+ ia_css_set_anr_config(params, config->anr_config);
+ ia_css_set_anr2_config(params, config->anr_thres);
+ ia_css_set_ce_config(params, config->ce_config);
+ ia_css_set_ecd_config(params, config->ecd_config);
+ ia_css_set_ynr_config(params, config->ynr_config);
+ ia_css_set_fc_config(params, config->fc_config);
+ ia_css_set_cnr_config(params, config->cnr_config);
+ ia_css_set_macc_config(params, config->macc_config);
+ ia_css_set_ctc_config(params, config->ctc_config);
+ ia_css_set_aa_config(params, config->aa_config);
+ ia_css_set_yuv2rgb_config(params, config->yuv2rgb_cc_config);
+ ia_css_set_rgb2yuv_config(params, config->rgb2yuv_cc_config);
+ ia_css_set_csc_config(params, config->cc_config);
+ ia_css_set_nr_config(params, config->nr_config);
+ ia_css_set_gc_config(params, config->gc_config);
+ ia_css_set_sdis_horicoef_config(params, config->dvs_coefs);
+ ia_css_set_sdis_vertcoef_config(params, config->dvs_coefs);
+ ia_css_set_sdis_horiproj_config(params, config->dvs_coefs);
+ ia_css_set_sdis_vertproj_config(params, config->dvs_coefs);
+ ia_css_set_sdis2_horicoef_config(params, config->dvs2_coefs);
+ ia_css_set_sdis2_vertcoef_config(params, config->dvs2_coefs);
+ ia_css_set_sdis2_horiproj_config(params, config->dvs2_coefs);
+ ia_css_set_sdis2_vertproj_config(params, config->dvs2_coefs);
+ ia_css_set_r_gamma_config(params, config->r_gamma_table);
+ ia_css_set_g_gamma_config(params, config->g_gamma_table);
+ ia_css_set_b_gamma_config(params, config->b_gamma_table);
+ ia_css_set_xnr_table_config(params, config->xnr_table);
+ ia_css_set_formats_config(params, config->formats_config);
+ ia_css_set_xnr_config(params, config->xnr_config);
+ ia_css_set_xnr3_config(params, config->xnr3_config);
+ ia_css_set_s3a_config(params, config->s3a_config);
+ ia_css_set_output_config(params, config->output_config);
+}
diff --git a/drivers/staging/media/atomisp/pci/ia_css_isp_params.h b/drivers/staging/media/atomisp/pci/ia_css_isp_params.h
new file mode 100644
index 000000000000..a542f8979905
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/ia_css_isp_params.h
@@ -0,0 +1,383 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+/* Generated code: do not edit or commmit. */
+
+#ifndef _IA_CSS_ISP_PARAM_H
+#define _IA_CSS_ISP_PARAM_H
+
+/* Code generated by genparam/gencode.c:gen_param_enum() */
+
+enum ia_css_parameter_ids {
+ IA_CSS_AA_ID,
+ IA_CSS_ANR_ID,
+ IA_CSS_ANR2_ID,
+ IA_CSS_BH_ID,
+ IA_CSS_CNR_ID,
+ IA_CSS_CROP_ID,
+ IA_CSS_CSC_ID,
+ IA_CSS_DP_ID,
+ IA_CSS_BNR_ID,
+ IA_CSS_DE_ID,
+ IA_CSS_ECD_ID,
+ IA_CSS_FORMATS_ID,
+ IA_CSS_FPN_ID,
+ IA_CSS_GC_ID,
+ IA_CSS_CE_ID,
+ IA_CSS_YUV2RGB_ID,
+ IA_CSS_RGB2YUV_ID,
+ IA_CSS_R_GAMMA_ID,
+ IA_CSS_G_GAMMA_ID,
+ IA_CSS_B_GAMMA_ID,
+ IA_CSS_UDS_ID,
+ IA_CSS_RAA_ID,
+ IA_CSS_S3A_ID,
+ IA_CSS_OB_ID,
+ IA_CSS_OUTPUT_ID,
+ IA_CSS_SC_ID,
+ IA_CSS_BDS_ID,
+ IA_CSS_TNR_ID,
+ IA_CSS_MACC_ID,
+ IA_CSS_SDIS_HORICOEF_ID,
+ IA_CSS_SDIS_VERTCOEF_ID,
+ IA_CSS_SDIS_HORIPROJ_ID,
+ IA_CSS_SDIS_VERTPROJ_ID,
+ IA_CSS_SDIS2_HORICOEF_ID,
+ IA_CSS_SDIS2_VERTCOEF_ID,
+ IA_CSS_SDIS2_HORIPROJ_ID,
+ IA_CSS_SDIS2_VERTPROJ_ID,
+ IA_CSS_WB_ID,
+ IA_CSS_NR_ID,
+ IA_CSS_YEE_ID,
+ IA_CSS_YNR_ID,
+ IA_CSS_FC_ID,
+ IA_CSS_CTC_ID,
+ IA_CSS_XNR_TABLE_ID,
+ IA_CSS_XNR_ID,
+ IA_CSS_XNR3_ID,
+ IA_CSS_NUM_PARAMETER_IDS
+};
+
+/* Code generated by genparam/gencode.c:gen_param_offsets() */
+
+struct ia_css_memory_offsets {
+ struct {
+ struct ia_css_isp_parameter aa;
+ struct ia_css_isp_parameter anr;
+ struct ia_css_isp_parameter bh;
+ struct ia_css_isp_parameter cnr;
+ struct ia_css_isp_parameter crop;
+ struct ia_css_isp_parameter csc;
+ struct ia_css_isp_parameter dp;
+ struct ia_css_isp_parameter bnr;
+ struct ia_css_isp_parameter de;
+ struct ia_css_isp_parameter ecd;
+ struct ia_css_isp_parameter formats;
+ struct ia_css_isp_parameter fpn;
+ struct ia_css_isp_parameter gc;
+ struct ia_css_isp_parameter ce;
+ struct ia_css_isp_parameter yuv2rgb;
+ struct ia_css_isp_parameter rgb2yuv;
+ struct ia_css_isp_parameter uds;
+ struct ia_css_isp_parameter raa;
+ struct ia_css_isp_parameter s3a;
+ struct ia_css_isp_parameter ob;
+ struct ia_css_isp_parameter output;
+ struct ia_css_isp_parameter sc;
+ struct ia_css_isp_parameter bds;
+ struct ia_css_isp_parameter tnr;
+ struct ia_css_isp_parameter macc;
+ struct ia_css_isp_parameter sdis_horiproj;
+ struct ia_css_isp_parameter sdis_vertproj;
+ struct ia_css_isp_parameter sdis2_horiproj;
+ struct ia_css_isp_parameter sdis2_vertproj;
+ struct ia_css_isp_parameter wb;
+ struct ia_css_isp_parameter nr;
+ struct ia_css_isp_parameter yee;
+ struct ia_css_isp_parameter ynr;
+ struct ia_css_isp_parameter fc;
+ struct ia_css_isp_parameter ctc;
+ struct ia_css_isp_parameter xnr;
+ struct ia_css_isp_parameter xnr3;
+ struct ia_css_isp_parameter get;
+ struct ia_css_isp_parameter put;
+ } dmem;
+ struct {
+ struct ia_css_isp_parameter anr2;
+ struct ia_css_isp_parameter ob;
+ struct ia_css_isp_parameter sdis_horicoef;
+ struct ia_css_isp_parameter sdis_vertcoef;
+ struct ia_css_isp_parameter sdis2_horicoef;
+ struct ia_css_isp_parameter sdis2_vertcoef;
+ } vmem;
+ struct {
+ struct ia_css_isp_parameter bh;
+ } hmem0;
+ struct {
+ struct ia_css_isp_parameter gc;
+ struct ia_css_isp_parameter g_gamma;
+ struct ia_css_isp_parameter xnr_table;
+ } vamem1;
+ struct {
+ struct ia_css_isp_parameter r_gamma;
+ struct ia_css_isp_parameter ctc;
+ } vamem0;
+ struct {
+ struct ia_css_isp_parameter b_gamma;
+ } vamem2;
+};
+
+#if defined(IA_CSS_INCLUDE_PARAMETERS)
+
+#include "ia_css_stream.h" /* struct ia_css_stream */
+#include "ia_css_binary.h" /* struct ia_css_binary */
+/* Code generated by genparam/gencode.c:gen_param_process_table() */
+
+struct ia_css_pipeline_stage; /* forward declaration */
+
+extern void (*ia_css_kernel_process_param[IA_CSS_NUM_PARAMETER_IDS])(
+ unsigned int pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params);
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_dp_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_dp_config *config);
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_wb_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_wb_config *config);
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_tnr_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_tnr_config *config);
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_ob_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_ob_config *config);
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_de_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_de_config *config);
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_anr_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_anr_config *config);
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_anr2_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_anr_thres *config);
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_ce_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_ce_config *config);
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_ecd_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_ecd_config *config);
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_ynr_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_ynr_config *config);
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_fc_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_fc_config *config);
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_cnr_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_cnr_config *config);
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_macc_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_macc_config *config);
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_ctc_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_ctc_config *config);
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_aa_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_aa_config *config);
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_yuv2rgb_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_cc_config *config);
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_rgb2yuv_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_cc_config *config);
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_csc_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_cc_config *config);
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_nr_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_nr_config *config);
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_gc_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_gc_config *config);
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_sdis_horicoef_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_dvs_coefficients *config);
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_sdis_vertcoef_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_dvs_coefficients *config);
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_sdis_horiproj_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_dvs_coefficients *config);
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_sdis_vertproj_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_dvs_coefficients *config);
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_sdis2_horicoef_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_dvs2_coefficients *config);
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_sdis2_vertcoef_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_dvs2_coefficients *config);
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_sdis2_horiproj_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_dvs2_coefficients *config);
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_sdis2_vertproj_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_dvs2_coefficients *config);
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_r_gamma_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_rgb_gamma_table *config);
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_g_gamma_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_rgb_gamma_table *config);
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_b_gamma_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_rgb_gamma_table *config);
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_xnr_table_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_xnr_table *config);
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_formats_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_formats_config *config);
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_xnr_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_xnr_config *config);
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_xnr3_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_xnr3_config *config);
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_s3a_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_3a_config *config);
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_output_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_output_config *config);
+
+/* Code generated by genparam/gencode.c:gen_global_access_function() */
+
+void
+ia_css_get_configs(struct ia_css_isp_parameters *params,
+ const struct ia_css_isp_config *config)
+;
+
+/* Code generated by genparam/gencode.c:gen_global_access_function() */
+
+void
+ia_css_set_configs(struct ia_css_isp_parameters *params,
+ const struct ia_css_isp_config *config)
+;
+
+#endif /* IA_CSS_INCLUDE_PARAMETER */
+#endif /* _IA_CSS_ISP_PARAM_H */
diff --git a/drivers/staging/media/atomisp/pci/ia_css_isp_states.c b/drivers/staging/media/atomisp/pci/ia_css_isp_states.c
new file mode 100644
index 000000000000..af1765040464
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/ia_css_isp_states.c
@@ -0,0 +1,215 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+/* Generated code: do not edit or commmit. */
+
+#include "ia_css_pipeline.h"
+#include "ia_css_isp_states.h"
+#include "ia_css_debug.h"
+#include "assert_support.h"
+
+/* Code generated by genparam/genstate.c:gen_init_function() */
+
+static void
+ia_css_initialize_aa_state(
+ const struct ia_css_binary *binary)
+{
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_initialize_aa_state() enter:\n");
+
+ {
+ unsigned int size = binary->info->mem_offsets.offsets.state->vmem.aa.size;
+
+ unsigned int offset = binary->info->mem_offsets.offsets.state->vmem.aa.offset;
+
+ if (size)
+ memset(&binary->mem_params.params[IA_CSS_PARAM_CLASS_STATE][IA_CSS_ISP_VMEM].address[offset],
+ 0, size);
+ }
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_initialize_aa_state() leave:\n");
+}
+
+/* Code generated by genparam/genstate.c:gen_init_function() */
+
+static void
+ia_css_initialize_cnr_state(
+ const struct ia_css_binary *binary)
+{
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_initialize_cnr_state() enter:\n");
+
+ {
+ unsigned int size = binary->info->mem_offsets.offsets.state->vmem.cnr.size;
+
+ unsigned int offset = binary->info->mem_offsets.offsets.state->vmem.cnr.offset;
+
+ if (size) {
+ ia_css_init_cnr_state(
+ &binary->mem_params.params[IA_CSS_PARAM_CLASS_STATE][IA_CSS_ISP_VMEM].address[offset],
+ size);
+ }
+ }
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_initialize_cnr_state() leave:\n");
+}
+
+/* Code generated by genparam/genstate.c:gen_init_function() */
+
+static void
+ia_css_initialize_cnr2_state(
+ const struct ia_css_binary *binary)
+{
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_initialize_cnr2_state() enter:\n");
+
+ {
+ unsigned int size = binary->info->mem_offsets.offsets.state->vmem.cnr2.size;
+
+ unsigned int offset = binary->info->mem_offsets.offsets.state->vmem.cnr2.offset;
+
+ if (size) {
+ ia_css_init_cnr2_state(
+ &binary->mem_params.params[IA_CSS_PARAM_CLASS_STATE][IA_CSS_ISP_VMEM].address[offset],
+ size);
+ }
+ }
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_initialize_cnr2_state() leave:\n");
+}
+
+/* Code generated by genparam/genstate.c:gen_init_function() */
+
+static void
+ia_css_initialize_dp_state(
+ const struct ia_css_binary *binary)
+{
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_initialize_dp_state() enter:\n");
+
+ {
+ unsigned int size = binary->info->mem_offsets.offsets.state->vmem.dp.size;
+
+ unsigned int offset = binary->info->mem_offsets.offsets.state->vmem.dp.offset;
+
+ if (size) {
+ ia_css_init_dp_state(
+ &binary->mem_params.params[IA_CSS_PARAM_CLASS_STATE][IA_CSS_ISP_VMEM].address[offset],
+ size);
+ }
+ }
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_initialize_dp_state() leave:\n");
+}
+
+/* Code generated by genparam/genstate.c:gen_init_function() */
+
+static void
+ia_css_initialize_de_state(
+ const struct ia_css_binary *binary)
+{
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_initialize_de_state() enter:\n");
+
+ {
+ unsigned int size = binary->info->mem_offsets.offsets.state->vmem.de.size;
+
+ unsigned int offset = binary->info->mem_offsets.offsets.state->vmem.de.offset;
+
+ if (size) {
+ ia_css_init_de_state(
+ &binary->mem_params.params[IA_CSS_PARAM_CLASS_STATE][IA_CSS_ISP_VMEM].address[offset],
+ size);
+ }
+ }
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_initialize_de_state() leave:\n");
+}
+
+/* Code generated by genparam/genstate.c:gen_init_function() */
+
+static void
+ia_css_initialize_tnr_state(
+ const struct ia_css_binary *binary)
+{
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_initialize_tnr_state() enter:\n");
+
+ {
+ unsigned int size = binary->info->mem_offsets.offsets.state->dmem.tnr.size;
+
+ unsigned int offset = binary->info->mem_offsets.offsets.state->dmem.tnr.offset;
+
+ if (size) {
+ ia_css_init_tnr_state((struct sh_css_isp_tnr_dmem_state *)
+ &binary->mem_params.params[IA_CSS_PARAM_CLASS_STATE][IA_CSS_ISP_DMEM].address[offset],
+ size);
+ }
+ }
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_initialize_tnr_state() leave:\n");
+}
+
+/* Code generated by genparam/genstate.c:gen_init_function() */
+
+static void
+ia_css_initialize_ref_state(
+ const struct ia_css_binary *binary)
+{
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_initialize_ref_state() enter:\n");
+
+ {
+ unsigned int size = binary->info->mem_offsets.offsets.state->dmem.ref.size;
+
+ unsigned int offset = binary->info->mem_offsets.offsets.state->dmem.ref.offset;
+
+ if (size) {
+ ia_css_init_ref_state((struct sh_css_isp_ref_dmem_state *)
+ &binary->mem_params.params[IA_CSS_PARAM_CLASS_STATE][IA_CSS_ISP_DMEM].address[offset],
+ size);
+ }
+ }
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_initialize_ref_state() leave:\n");
+}
+
+/* Code generated by genparam/genstate.c:gen_init_function() */
+
+static void
+ia_css_initialize_ynr_state(
+ const struct ia_css_binary *binary)
+{
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_initialize_ynr_state() enter:\n");
+
+ {
+ unsigned int size = binary->info->mem_offsets.offsets.state->vmem.ynr.size;
+
+ unsigned int offset = binary->info->mem_offsets.offsets.state->vmem.ynr.offset;
+
+ if (size) {
+ ia_css_init_ynr_state(
+ &binary->mem_params.params[IA_CSS_PARAM_CLASS_STATE][IA_CSS_ISP_VMEM].address[offset],
+ size);
+ }
+ }
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_initialize_ynr_state() leave:\n");
+}
+
+/* Code generated by genparam/genstate.c:gen_state_init_table() */
+
+void (*ia_css_kernel_init_state[IA_CSS_NUM_STATE_IDS])(
+ const struct ia_css_binary *binary) = {
+ ia_css_initialize_aa_state,
+ ia_css_initialize_cnr_state,
+ ia_css_initialize_cnr2_state,
+ ia_css_initialize_dp_state,
+ ia_css_initialize_de_state,
+ ia_css_initialize_tnr_state,
+ ia_css_initialize_ref_state,
+ ia_css_initialize_ynr_state,
+};
diff --git a/drivers/staging/media/atomisp/pci/ia_css_isp_states.h b/drivers/staging/media/atomisp/pci/ia_css_isp_states.h
new file mode 100644
index 000000000000..d637ea1d13f6
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/ia_css_isp_states.h
@@ -0,0 +1,65 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#define IA_CSS_INCLUDE_STATES
+#include "isp/kernels/aa/aa_2/ia_css_aa2.host.h"
+#include "isp/kernels/cnr/cnr_1.0/ia_css_cnr.host.h"
+#include "isp/kernels/cnr/cnr_2/ia_css_cnr2.host.h"
+#include "isp/kernels/de/de_1.0/ia_css_de.host.h"
+#include "isp/kernels/dp/dp_1.0/ia_css_dp.host.h"
+#include "isp/kernels/ref/ref_1.0/ia_css_ref.host.h"
+#include "isp/kernels/tnr/tnr_1.0/ia_css_tnr.host.h"
+#include "isp/kernels/ynr/ynr_1.0/ia_css_ynr.host.h"
+#include "isp/kernels/dpc2/ia_css_dpc2.host.h"
+#include "isp/kernels/eed1_8/ia_css_eed1_8.host.h"
+/* Generated code: do not edit or commmit. */
+
+#ifndef _IA_CSS_ISP_STATE_H
+#define _IA_CSS_ISP_STATE_H
+
+/* Code generated by genparam/gencode.c:gen_param_enum() */
+
+enum ia_css_state_ids {
+ IA_CSS_AA_STATE_ID,
+ IA_CSS_CNR_STATE_ID,
+ IA_CSS_CNR2_STATE_ID,
+ IA_CSS_DP_STATE_ID,
+ IA_CSS_DE_STATE_ID,
+ IA_CSS_TNR_STATE_ID,
+ IA_CSS_REF_STATE_ID,
+ IA_CSS_YNR_STATE_ID,
+ IA_CSS_NUM_STATE_IDS
+};
+
+/* Code generated by genparam/gencode.c:gen_param_offsets() */
+
+struct ia_css_state_memory_offsets {
+ struct {
+ struct ia_css_isp_parameter aa;
+ struct ia_css_isp_parameter cnr;
+ struct ia_css_isp_parameter cnr2;
+ struct ia_css_isp_parameter dp;
+ struct ia_css_isp_parameter de;
+ struct ia_css_isp_parameter ynr;
+ } vmem;
+ struct {
+ struct ia_css_isp_parameter tnr;
+ struct ia_css_isp_parameter ref;
+ } dmem;
+};
+
+#if defined(IA_CSS_INCLUDE_STATES)
+
+#include "ia_css_stream.h" /* struct ia_css_stream */
+#include "ia_css_binary.h" /* struct ia_css_binary */
+/* Code generated by genparam/genstate.c:gen_state_init_table() */
+
+extern void (*ia_css_kernel_init_state[IA_CSS_NUM_STATE_IDS])(
+ const struct ia_css_binary *binary);
+
+#endif /* IA_CSS_INCLUDE_STATE */
+
+#endif /* _IA_CSS_ISP_STATE_H */
diff --git a/drivers/staging/media/atomisp/pci/ia_css_metadata.h b/drivers/staging/media/atomisp/pci/ia_css_metadata.h
new file mode 100644
index 000000000000..348836175770
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/ia_css_metadata.h
@@ -0,0 +1,68 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __IA_CSS_METADATA_H
+#define __IA_CSS_METADATA_H
+
+/* @file
+ * This file contains structure for processing sensor metadata.
+ */
+
+#include <linux/build_bug.h>
+
+#include <type_support.h>
+#include "ia_css_types.h"
+#include "ia_css_stream_format.h"
+
+/* Metadata configuration. This data structure contains necessary info
+ * to process sensor metadata.
+ */
+struct ia_css_metadata_config {
+ enum atomisp_input_format data_type; /** Data type of CSI-2 embedded
+ data. The default value is ATOMISP_INPUT_FORMAT_EMBEDDED. For
+ certain sensors, user can choose non-default data type for embedded
+ data. */
+ struct ia_css_resolution resolution; /** Resolution */
+};
+
+struct ia_css_metadata_info {
+ struct ia_css_resolution resolution; /** Resolution */
+ u32 stride; /** Stride in bytes */
+ u32 size; /** Total size in bytes */
+};
+
+struct ia_css_metadata {
+ struct ia_css_metadata_info info; /** Layout info */
+ ia_css_ptr address; /** CSS virtual address */
+ u32 exp_id;
+ /** Exposure ID, see ia_css_event_public.h for more detail */
+};
+
+#define SIZE_OF_IA_CSS_METADATA_STRUCT sizeof(struct ia_css_metadata)
+
+static_assert(sizeof(struct ia_css_metadata) == SIZE_OF_IA_CSS_METADATA_STRUCT);
+
+/* @brief Allocate a metadata buffer.
+ * @param[in] metadata_info Metadata info struct, contains details on metadata buffers.
+ * @return Pointer of metadata buffer or NULL (if error)
+ *
+ * This function allocates a metadata buffer according to the properties
+ * specified in the metadata_info struct.
+ */
+struct ia_css_metadata *
+ia_css_metadata_allocate(const struct ia_css_metadata_info *metadata_info);
+
+/* @brief Free a metadata buffer.
+ *
+ * @param[in] metadata Pointer of metadata buffer.
+ * @return None
+ *
+ * This function frees a metadata buffer.
+ */
+void
+ia_css_metadata_free(struct ia_css_metadata *metadata);
+
+#endif /* __IA_CSS_METADATA_H */
diff --git a/drivers/staging/media/atomisp/pci/ia_css_mipi.h b/drivers/staging/media/atomisp/pci/ia_css_mipi.h
new file mode 100644
index 000000000000..9fb178c8f3a5
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/ia_css_mipi.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __IA_CSS_MIPI_H
+#define __IA_CSS_MIPI_H
+
+/* @file
+ * This file contains MIPI support functionality
+ */
+
+#include <type_support.h>
+#include "ia_css_err.h"
+#include "ia_css_stream_format.h"
+#include "ia_css_input_port.h"
+
+/* @brief Calculate the size of a mipi frame.
+ *
+ * @param[in] width The width (in pixels) of the frame.
+ * @param[in] height The height (in lines) of the frame.
+ * @param[in] format The frame (MIPI) format.
+ * @param[in] hasSOLandEOL Whether frame (MIPI) contains (optional) SOL and EOF packets.
+ * @param[in] embedded_data_size_words Embedded data size in memory words.
+ * @param size_mem_words The mipi frame size in memory words (32B).
+ * @return The error code.
+ *
+ * Calculate the size of a mipi frame, based on the resolution and format.
+ */
+int
+ia_css_mipi_frame_calculate_size(const unsigned int width,
+ const unsigned int height,
+ const enum atomisp_input_format format,
+ const bool hasSOLandEOL,
+ const unsigned int embedded_data_size_words,
+ unsigned int *size_mem_words);
+
+#endif /* __IA_CSS_MIPI_H */
diff --git a/drivers/staging/media/atomisp/pci/ia_css_mmu.h b/drivers/staging/media/atomisp/pci/ia_css_mmu.h
new file mode 100644
index 000000000000..8dc02c8f60a0
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/ia_css_mmu.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __IA_CSS_MMU_H
+#define __IA_CSS_MMU_H
+
+/* @file
+ * This file contains one support function for invalidating the CSS MMU cache
+ */
+
+/* @brief Invalidate the MMU internal cache.
+ * @return None
+ *
+ * This function triggers an invalidation of the translate-look-aside
+ * buffer (TLB) that's inside the CSS MMU. This function should be called
+ * every time the page tables used by the MMU change.
+ */
+void
+ia_css_mmu_invalidate_cache(void);
+
+#endif /* __IA_CSS_MMU_H */
diff --git a/drivers/staging/media/atomisp/pci/ia_css_mmu_private.h b/drivers/staging/media/atomisp/pci/ia_css_mmu_private.h
new file mode 100644
index 000000000000..4b5e09e051b5
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/ia_css_mmu_private.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __IA_CSS_MMU_PRIVATE_H
+#define __IA_CSS_MMU_PRIVATE_H
+
+#include "system_local.h"
+
+/*
+ * This function sets the L1 pagetable address.
+ * After power-up of the ISP the L1 pagetable can be set.
+ * Once being set the L1 pagetable is protected against
+ * further modifications.
+ */
+void
+sh_css_mmu_set_page_table_base_index(hrt_data base_index);
+
+#endif /* __IA_CSS_MMU_PRIVATE_H */
diff --git a/drivers/staging/media/atomisp/pci/ia_css_morph.h b/drivers/staging/media/atomisp/pci/ia_css_morph.h
new file mode 100644
index 000000000000..68997b26c70a
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/ia_css_morph.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __IA_CSS_MORPH_H
+#define __IA_CSS_MORPH_H
+
+/* @file
+ * This file contains supporting for morphing table
+ */
+
+#include <ia_css_types.h>
+
+/* @brief Morphing table
+ * @param[in] width Width of the morphing table.
+ * @param[in] height Height of the morphing table.
+ * @return Pointer to the morphing table
+*/
+struct ia_css_morph_table *
+ia_css_morph_table_allocate(unsigned int width, unsigned int height);
+
+/* @brief Free the morph table
+ * @param[in] me Pointer to the morph table.
+ * @return None
+*/
+void
+ia_css_morph_table_free(struct ia_css_morph_table *me);
+
+#endif /* __IA_CSS_MORPH_H */
diff --git a/drivers/staging/media/atomisp/pci/ia_css_pipe.h b/drivers/staging/media/atomisp/pci/ia_css_pipe.h
new file mode 100644
index 000000000000..77072694eb42
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/ia_css_pipe.h
@@ -0,0 +1,173 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __IA_CSS_PIPE_H__
+#define __IA_CSS_PIPE_H__
+
+#include <type_support.h>
+#include "ia_css_stream.h"
+#include "ia_css_frame.h"
+#include "ia_css_pipeline.h"
+#include "ia_css_binary.h"
+#include "sh_css_legacy.h"
+
+#define PIPE_ENTRY_EMPTY_TOKEN (~0U)
+#define PIPE_ENTRY_RESERVED_TOKEN (0x1)
+
+struct ia_css_preview_settings {
+ struct ia_css_binary copy_binary;
+ struct ia_css_binary preview_binary;
+ struct ia_css_binary vf_pp_binary;
+
+ /* 2401 only for these two - do we in fact use them for anything real */
+ struct ia_css_frame *delay_frames[MAX_NUM_VIDEO_DELAY_FRAMES];
+ struct ia_css_frame *tnr_frames[NUM_VIDEO_TNR_FRAMES];
+
+ struct ia_css_pipe *copy_pipe;
+ struct ia_css_pipe *capture_pipe;
+};
+
+#define IA_CSS_DEFAULT_PREVIEW_SETTINGS { \
+ .copy_binary = IA_CSS_BINARY_DEFAULT_SETTINGS, \
+ .preview_binary = IA_CSS_BINARY_DEFAULT_SETTINGS, \
+ .vf_pp_binary = IA_CSS_BINARY_DEFAULT_SETTINGS, \
+}
+
+struct ia_css_capture_settings {
+ struct ia_css_binary copy_binary;
+ /* we extend primary binary to multiple stages because in ISP2.6.1
+ * the computation load is too high to fit in one single binary. */
+ struct ia_css_binary primary_binary[MAX_NUM_PRIMARY_STAGES];
+ unsigned int num_primary_stage;
+ struct ia_css_binary pre_isp_binary;
+ struct ia_css_binary anr_gdc_binary;
+ struct ia_css_binary post_isp_binary;
+ struct ia_css_binary capture_pp_binary;
+ struct ia_css_binary vf_pp_binary;
+ struct ia_css_binary capture_ldc_binary;
+ struct ia_css_binary *yuv_scaler_binary;
+ struct ia_css_frame *delay_frames[MAX_NUM_VIDEO_DELAY_FRAMES];
+ bool *is_output_stage;
+ unsigned int num_yuv_scaler;
+};
+
+#define IA_CSS_DEFAULT_CAPTURE_SETTINGS { \
+ .copy_binary = IA_CSS_BINARY_DEFAULT_SETTINGS, \
+ .primary_binary = {IA_CSS_BINARY_DEFAULT_SETTINGS}, \
+ .pre_isp_binary = IA_CSS_BINARY_DEFAULT_SETTINGS, \
+ .anr_gdc_binary = IA_CSS_BINARY_DEFAULT_SETTINGS, \
+ .post_isp_binary = IA_CSS_BINARY_DEFAULT_SETTINGS, \
+ .capture_pp_binary = IA_CSS_BINARY_DEFAULT_SETTINGS, \
+ .vf_pp_binary = IA_CSS_BINARY_DEFAULT_SETTINGS, \
+ .capture_ldc_binary = IA_CSS_BINARY_DEFAULT_SETTINGS, \
+}
+
+struct ia_css_video_settings {
+ struct ia_css_binary copy_binary;
+ struct ia_css_binary video_binary;
+ struct ia_css_binary vf_pp_binary;
+ struct ia_css_binary *yuv_scaler_binary;
+ struct ia_css_frame *delay_frames[MAX_NUM_VIDEO_DELAY_FRAMES];
+ struct ia_css_frame *tnr_frames[NUM_VIDEO_TNR_FRAMES];
+ struct ia_css_frame *vf_pp_in_frame;
+ struct ia_css_pipe *copy_pipe;
+ struct ia_css_pipe *capture_pipe;
+ bool *is_output_stage;
+ unsigned int num_yuv_scaler;
+};
+
+#define IA_CSS_DEFAULT_VIDEO_SETTINGS { \
+ .copy_binary = IA_CSS_BINARY_DEFAULT_SETTINGS, \
+ .video_binary = IA_CSS_BINARY_DEFAULT_SETTINGS, \
+ .vf_pp_binary = IA_CSS_BINARY_DEFAULT_SETTINGS, \
+}
+
+struct ia_css_yuvpp_settings {
+ struct ia_css_binary copy_binary;
+ struct ia_css_binary *yuv_scaler_binary;
+ struct ia_css_binary *vf_pp_binary;
+ bool *is_output_stage;
+ unsigned int num_yuv_scaler;
+ unsigned int num_vf_pp;
+ unsigned int num_output;
+};
+
+#define IA_CSS_DEFAULT_YUVPP_SETTINGS { \
+ .copy_binary = IA_CSS_BINARY_DEFAULT_SETTINGS, \
+}
+
+struct osys_object;
+
+struct ia_css_pipe {
+ struct ia_css_pipe_config config;
+ struct ia_css_pipe_extra_config extra_config;
+ struct ia_css_pipe_info info;
+ enum ia_css_pipe_id mode;
+ struct ia_css_shading_table *shading_table;
+ struct ia_css_pipeline pipeline;
+ struct ia_css_frame_info output_info[IA_CSS_PIPE_MAX_OUTPUT_STAGE];
+ struct ia_css_frame_info bds_output_info;
+ struct ia_css_frame_info vf_output_info[IA_CSS_PIPE_MAX_OUTPUT_STAGE];
+ struct ia_css_frame_info out_yuv_ds_input_info;
+ struct ia_css_frame_info vf_yuv_ds_input_info;
+ struct ia_css_fw_info *output_stage; /* extra output stage */
+ struct ia_css_fw_info *vf_stage; /* extra vf_stage */
+ unsigned int required_bds_factor;
+ unsigned int dvs_frame_delay;
+ int num_invalid_frames;
+ bool enable_viewfinder[IA_CSS_PIPE_MAX_OUTPUT_STAGE];
+ struct ia_css_stream *stream;
+ struct ia_css_frame in_frame_struct;
+ struct ia_css_frame out_frame_struct;
+ struct ia_css_frame vf_frame_struct;
+ struct ia_css_frame *continuous_frames[NUM_CONTINUOUS_FRAMES];
+ struct ia_css_metadata *cont_md_buffers[NUM_CONTINUOUS_FRAMES];
+ union {
+ struct ia_css_preview_settings preview;
+ struct ia_css_video_settings video;
+ struct ia_css_capture_settings capture;
+ struct ia_css_yuvpp_settings yuvpp;
+ } pipe_settings;
+ ia_css_ptr scaler_pp_lut;
+ struct osys_object *osys_obj;
+
+ /* This number is unique per pipe each instance of css. This number is
+ * reused as pipeline number also. There is a 1-1 mapping between pipe_num
+ * and sp thread id. Current logic limits pipe_num to
+ * SH_CSS_MAX_SP_THREADS */
+ unsigned int pipe_num;
+};
+
+#define IA_CSS_DEFAULT_PIPE { \
+ .config = DEFAULT_PIPE_CONFIG, \
+ .info = DEFAULT_PIPE_INFO, \
+ .mode = IA_CSS_PIPE_ID_VIDEO, /* (pipe_id) */ \
+ .pipeline = DEFAULT_PIPELINE, \
+ .output_info = {IA_CSS_BINARY_DEFAULT_FRAME_INFO}, \
+ .bds_output_info = IA_CSS_BINARY_DEFAULT_FRAME_INFO, \
+ .vf_output_info = {IA_CSS_BINARY_DEFAULT_FRAME_INFO}, \
+ .out_yuv_ds_input_info = IA_CSS_BINARY_DEFAULT_FRAME_INFO, \
+ .vf_yuv_ds_input_info = IA_CSS_BINARY_DEFAULT_FRAME_INFO, \
+ .required_bds_factor = SH_CSS_BDS_FACTOR_1_00, \
+ .dvs_frame_delay = 1, \
+ .enable_viewfinder = {true}, \
+ .in_frame_struct = DEFAULT_FRAME, \
+ .out_frame_struct = DEFAULT_FRAME, \
+ .vf_frame_struct = DEFAULT_FRAME, \
+ .pipe_settings = { \
+ .preview = IA_CSS_DEFAULT_PREVIEW_SETTINGS \
+ }, \
+ .pipe_num = PIPE_ENTRY_EMPTY_TOKEN, \
+}
+
+void ia_css_pipe_map_queue(struct ia_css_pipe *pipe, bool map);
+
+int
+sh_css_param_update_isp_params(struct ia_css_pipe *curr_pipe,
+ struct ia_css_isp_parameters *params,
+ bool commit, struct ia_css_pipe *pipe);
+
+#endif /* __IA_CSS_PIPE_H__ */
diff --git a/drivers/staging/media/atomisp/pci/ia_css_pipe_public.h b/drivers/staging/media/atomisp/pci/ia_css_pipe_public.h
new file mode 100644
index 000000000000..2bb06b0ff5db
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/ia_css_pipe_public.h
@@ -0,0 +1,464 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __IA_CSS_PIPE_PUBLIC_H
+#define __IA_CSS_PIPE_PUBLIC_H
+
+/* @file
+ * This file contains the public interface for CSS pipes.
+ */
+
+#include <type_support.h>
+#include <ia_css_err.h>
+#include <ia_css_types.h>
+#include <ia_css_frame_public.h>
+#include <ia_css_buffer.h>
+/* ISP2401 */
+#include <ia_css_acc_types.h>
+
+enum {
+ IA_CSS_PIPE_OUTPUT_STAGE_0 = 0,
+ IA_CSS_PIPE_OUTPUT_STAGE_1,
+ IA_CSS_PIPE_MAX_OUTPUT_STAGE,
+};
+
+/* Enumeration of pipe modes. This mode can be used to create
+ * an image pipe for this mode. These pipes can be combined
+ * to configure and run streams on the ISP.
+ *
+ * For example, one can create a preview and capture pipe to
+ * create a continuous capture stream.
+ */
+enum ia_css_pipe_mode {
+ IA_CSS_PIPE_MODE_PREVIEW, /** Preview pipe */
+ IA_CSS_PIPE_MODE_VIDEO, /** Video pipe */
+ IA_CSS_PIPE_MODE_CAPTURE, /** Still capture pipe */
+ IA_CSS_PIPE_MODE_COPY, /** Copy pipe, only used for embedded/image data copying */
+ IA_CSS_PIPE_MODE_YUVPP, /** YUV post processing pipe, used for all use cases with YUV input,
+ for SoC sensor and external ISP */
+};
+
+/* Temporary define */
+#define IA_CSS_PIPE_MODE_NUM (IA_CSS_PIPE_MODE_YUVPP + 1)
+
+/**
+ * Enumeration of pipe versions.
+ * the order should match with definition in sh_css_defs.h
+ */
+enum ia_css_pipe_version {
+ IA_CSS_PIPE_VERSION_1 = 1, /** ISP1.0 pipe */
+ IA_CSS_PIPE_VERSION_2_2 = 2, /** ISP2.2 pipe */
+ IA_CSS_PIPE_VERSION_2_6_1 = 3, /** ISP2.6.1 pipe */
+ IA_CSS_PIPE_VERSION_2_7 = 4 /** ISP2.7 pipe */
+};
+
+/**
+ * Pipe configuration structure.
+ * Resolution properties are filled by Driver, kernel configurations are
+ * set by AIC
+ */
+struct ia_css_pipe_config {
+ enum ia_css_pipe_mode mode;
+ /** mode, indicates which mode the pipe should use. */
+ enum ia_css_pipe_version isp_pipe_version;
+ /** pipe version, indicates which imaging pipeline the pipe should use. */
+ struct ia_css_resolution input_effective_res;
+ /** input effective resolution */
+ struct ia_css_resolution bayer_ds_out_res;
+ /** bayer down scaling */
+ struct ia_css_resolution capt_pp_in_res;
+ /** capture post processing input resolution */
+ struct ia_css_resolution vf_pp_in_res;
+
+ /** ISP2401: view finder post processing input resolution */
+ struct ia_css_resolution output_system_in_res;
+ /** For IPU3 only: use output_system_in_res to specify what input resolution
+ will OSYS receive, this resolution is equal to the output resolution of GDC
+ if not determined CSS will set output_system_in_res with main osys output pin resolution
+ All other IPUs may ignore this property */
+ struct ia_css_resolution dvs_crop_out_res;
+ /** dvs crop, video only, not in use yet. Use dvs_envelope below. */
+ struct ia_css_frame_info output_info[IA_CSS_PIPE_MAX_OUTPUT_STAGE];
+ /** output of YUV scaling */
+ struct ia_css_frame_info vf_output_info[IA_CSS_PIPE_MAX_OUTPUT_STAGE];
+ /** output of VF YUV scaling */
+ struct ia_css_capture_config default_capture_config;
+ /** Default capture config for initial capture pipe configuration. */
+ struct ia_css_resolution dvs_envelope; /** temporary */
+ enum ia_css_frame_delay dvs_frame_delay;
+ /** indicates the DVS loop delay in frame periods */
+ bool enable_dz;
+ /** Disabling digital zoom for a pipeline, if this is set to false,
+ then setting a zoom factor will have no effect.
+ In some use cases this provides better performance. */
+ bool enable_dpc;
+ /** Disabling "Defect Pixel Correction" for a pipeline, if this is set
+ to false. In some use cases this provides better performance. */
+ bool enable_vfpp_bci;
+ /** Enabling BCI mode will cause yuv_scale binary to be picked up
+ instead of vf_pp. This only applies to viewfinder post
+ processing stages. */
+
+/* ISP2401 */
+ bool enable_tnr;
+ /** Enabling of TNR (temporal noise reduction). This is only applicable to video
+ pipes. Non video-pipes should always set this parameter to false. */
+
+ struct ia_css_isp_config *p_isp_config;
+ /** Pointer to ISP configuration */
+ struct ia_css_resolution gdc_in_buffer_res;
+ /** GDC in buffer resolution. */
+ struct ia_css_point gdc_in_buffer_offset;
+ /** GDC in buffer offset - indicates the pixel coordinates of the first valid pixel inside the buffer */
+
+/* ISP2401 */
+ struct ia_css_coordinate internal_frame_origin_bqs_on_sctbl;
+ /** Origin of internal frame positioned on shading table at shading correction in ISP.
+ NOTE: Shading table is larger than or equal to internal frame.
+ Shading table has shading gains and internal frame has bayer data.
+ The origin of internal frame is used in shading correction in ISP
+ to retrieve shading gains which correspond to bayer data. */
+};
+
+/**
+ * Default settings for newly created pipe configurations.
+ */
+#define DEFAULT_PIPE_CONFIG { \
+ .mode = IA_CSS_PIPE_MODE_PREVIEW, \
+ .isp_pipe_version = 1, \
+ .output_info = {IA_CSS_BINARY_DEFAULT_FRAME_INFO}, \
+ .vf_output_info = {IA_CSS_BINARY_DEFAULT_FRAME_INFO}, \
+ .default_capture_config = DEFAULT_CAPTURE_CONFIG, \
+ .dvs_frame_delay = IA_CSS_FRAME_DELAY_1, \
+}
+
+/* Pipe info, this struct describes properties of a pipe after it's stream has
+ * been created.
+ * ~~~** DO NOT ADD NEW FIELD **~~~ This structure will be deprecated.
+ * - On the Behalf of CSS-API Committee.
+ */
+struct ia_css_pipe_info {
+ struct ia_css_frame_info output_info[IA_CSS_PIPE_MAX_OUTPUT_STAGE];
+ /** Info about output resolution. This contains the stride which
+ should be used for memory allocation. */
+ struct ia_css_frame_info vf_output_info[IA_CSS_PIPE_MAX_OUTPUT_STAGE];
+ /** Info about viewfinder output resolution (optional). This contains
+ the stride that should be used for memory allocation. */
+ struct ia_css_frame_info raw_output_info;
+ /** Raw output resolution. This indicates the resolution of the
+ RAW bayer output for pipes that support this. Currently, only the
+ still capture pipes support this feature. When this resolution is
+ smaller than the input resolution, cropping will be performed by
+ the ISP. The first cropping that will be performed is on the upper
+ left corner where we crop 8 lines and 8 columns to remove the
+ pixels normally used to initialize the ISP filters.
+ This is why the raw output resolution should normally be set to
+ the input resolution - 8x8. */
+ /* ISP2401 */
+ struct ia_css_resolution output_system_in_res_info;
+ /** For IPU3 only. Info about output system in resolution which is considered
+ as gdc out resolution. */
+ struct ia_css_shading_info shading_info;
+ /** After an image pipe is created, this field will contain the info
+ for the shading correction. */
+ struct ia_css_grid_info grid_info;
+ /** After an image pipe is created, this field will contain the grid
+ info for 3A and DVS. */
+ int num_invalid_frames;
+ /** The very first frames in a started stream do not contain valid data.
+ In this field, the CSS-firmware communicates to the host-driver how
+ many initial frames will contain invalid data; this allows the
+ host-driver to discard those initial invalid frames and start it's
+ output at the first valid frame. */
+};
+
+/**
+ * Defaults for ia_css_pipe_info structs.
+ */
+#define DEFAULT_PIPE_INFO {\
+ .output_info = {IA_CSS_BINARY_DEFAULT_FRAME_INFO}, \
+ .vf_output_info = {IA_CSS_BINARY_DEFAULT_FRAME_INFO}, \
+ .raw_output_info = IA_CSS_BINARY_DEFAULT_FRAME_INFO, \
+ .shading_info = DEFAULT_SHADING_INFO, \
+ .grid_info = DEFAULT_GRID_INFO, \
+}
+
+/* @brief Load default pipe configuration
+ * @param[out] pipe_config The pipe configuration.
+ * @return None
+ *
+ * This function will load the default pipe configuration:
+@code
+ struct ia_css_pipe_config def_config = {
+ IA_CSS_PIPE_MODE_PREVIEW, // mode
+ 1, // isp_pipe_version
+ {0, 0}, // bayer_ds_out_res
+ {0, 0}, // capt_pp_in_res
+ {0, 0}, // vf_pp_in_res
+ {0, 0}, // dvs_crop_out_res
+ {{0, 0}, 0, 0, 0, 0}, // output_info
+ {{0, 0}, 0, 0, 0, 0}, // second_output_info
+ {{0, 0}, 0, 0, 0, 0}, // vf_output_info
+ {{0, 0}, 0, 0, 0, 0}, // second_vf_output_info
+ {
+ IA_CSS_CAPTURE_MODE_RAW, // mode
+ false, // enable_xnr
+ false // enable_raw_output
+ }, // default_capture_config
+ {0, 0}, // dvs_envelope
+ 1, // dvs_frame_delay
+ true, // enable_dz
+ NULL, // p_isp_config
+ };
+@endcode
+ */
+void ia_css_pipe_config_defaults(struct ia_css_pipe_config *pipe_config);
+
+/* @brief Create a pipe
+ * @param[in] config The pipe configuration.
+ * @param[out] pipe The pipe.
+ * @return 0 or the error code.
+ *
+ * This function will create a pipe with the given
+ * configuration.
+ */
+int
+ia_css_pipe_create(const struct ia_css_pipe_config *config,
+ struct ia_css_pipe **pipe);
+
+/* @brief Destroy a pipe
+ * @param[in] pipe The pipe.
+ * @return 0 or the error code.
+ *
+ * This function will destroy a given pipe.
+ */
+int
+ia_css_pipe_destroy(struct ia_css_pipe *pipe);
+
+/* @brief Provides information about a pipe
+ * @param[in] pipe The pipe.
+ * @param[out] pipe_info The pipe information.
+ * @return 0 or -EINVAL.
+ *
+ * This function will provide information about a given pipe.
+ */
+int
+ia_css_pipe_get_info(const struct ia_css_pipe *pipe,
+ struct ia_css_pipe_info *pipe_info);
+
+/* @brief Configure a pipe with filter coefficients.
+ * @param[in] pipe The pipe.
+ * @param[in] config The pointer to ISP configuration.
+ * @return 0 or error code upon error.
+ *
+ * This function configures the filter coefficients for an image
+ * pipe.
+ */
+int
+ia_css_pipe_set_isp_config(struct ia_css_pipe *pipe,
+ struct ia_css_isp_config *config);
+
+/* @brief Controls when the Event generator raises an IRQ to the Host.
+ *
+ * @param[in] pipe The pipe.
+ * @param[in] or_mask Binary or of enum ia_css_event_irq_mask_type. Each pipe
+ related event that is part of this mask will directly
+ raise an IRQ to the Host when the event occurs in the
+ CSS.
+ * @param[in] and_mask Binary or of enum ia_css_event_irq_mask_type. An event
+ IRQ for the Host is only raised after all pipe related
+ events have occurred at least once for all the active
+ pipes. Events are remembered and don't need to occurred
+ at the same moment in time. There is no control over
+ the order of these events. Once an IRQ has been raised
+ all remembered events are reset.
+ * @return 0.
+ *
+ Controls when the Event generator in the CSS raises an IRQ to the Host.
+ The main purpose of this function is to reduce the amount of interrupts
+ between the CSS and the Host. This will help saving power as it wakes up the
+ Host less often. In case both or_mask and and_mask are
+ IA_CSS_EVENT_TYPE_NONE for all pipes, no event IRQ's will be raised. An
+ exception holds for IA_CSS_EVENT_TYPE_PORT_EOF, for this event an IRQ is always
+ raised.
+ Note that events are still queued and the Host can poll for them. The
+ or_mask and and_mask may be active at the same time\n
+ \n
+ Default values, for all pipe id's, after ia_css_init:\n
+ or_mask = IA_CSS_EVENT_TYPE_ALL\n
+ and_mask = IA_CSS_EVENT_TYPE_NONE\n
+ \n
+ Examples\n
+ \code
+ ia_css_pipe_set_irq_mask(h_pipe,
+ IA_CSS_EVENT_TYPE_3A_STATISTICS_DONE |
+ IA_CSS_EVENT_TYPE_DIS_STATISTICS_DONE ,
+ IA_CSS_EVENT_TYPE_NONE);
+ \endcode
+ The event generator will only raise an interrupt to the Host when there are
+ 3A or DIS statistics available from the preview pipe. It will not generate
+ an interrupt for any other event of the preview pipe e.g when there is an
+ output frame available.
+
+ \code
+ ia_css_pipe_set_irq_mask(h_pipe_preview,
+ IA_CSS_EVENT_TYPE_NONE,
+ IA_CSS_EVENT_TYPE_OUTPUT_FRAME_DONE |
+ IA_CSS_EVENT_TYPE_3A_STATISTICS_DONE );
+
+ ia_css_pipe_set_irq_mask(h_pipe_capture,
+ IA_CSS_EVENT_TYPE_NONE,
+ IA_CSS_EVENT_TYPE_OUTPUT_FRAME_DONE );
+ \endcode
+ The event generator will only raise an interrupt to the Host when there is
+ both a frame done and 3A event available from the preview pipe AND when there
+ is a frame done available from the capture pipe. Note that these events
+ may occur at different moments in time. Also the order of the events is not
+ relevant.
+
+ \code
+ ia_css_pipe_set_irq_mask(h_pipe_preview,
+ IA_CSS_EVENT_TYPE_OUTPUT_FRAME_DONE,
+ IA_CSS_EVENT_TYPE_ALL );
+
+ ia_css_pipe_set_irq_mask(h_pipe_capture,
+ IA_CSS_EVENT_TYPE_OUTPUT_FRAME_DONE,
+ IA_CSS_EVENT_TYPE_ALL );
+ \endcode
+ The event generator will only raise an interrupt to the Host when there is an
+ output frame from the preview pipe OR an output frame from the capture pipe.
+ All other events (3A, VF output, pipeline done) will not raise an interrupt
+ to the Host. These events are not lost but always stored in the event queue.
+ */
+int
+ia_css_pipe_set_irq_mask(struct ia_css_pipe *pipe,
+ unsigned int or_mask,
+ unsigned int and_mask);
+
+/* @brief Reads the current event IRQ mask from the CSS.
+ *
+ * @param[in] pipe The pipe.
+ * @param[out] or_mask Current or_mask. The bits in this mask are a binary or
+ of enum ia_css_event_irq_mask_type. Pointer may be NULL.
+ * @param[out] and_mask Current and_mask.The bits in this mask are a binary or
+ of enum ia_css_event_irq_mask_type. Pointer may be NULL.
+ * @return 0.
+ *
+ Reads the current event IRQ mask from the CSS. Reading returns the actual
+ values as used by the SP and not any mirrored values stored at the Host.\n
+\n
+Precondition:\n
+SP must be running.\n
+
+*/
+int
+ia_css_event_get_irq_mask(const struct ia_css_pipe *pipe,
+ unsigned int *or_mask,
+ unsigned int *and_mask);
+
+/* @brief Queue a buffer for an image pipe.
+ *
+ * @param[in] pipe The pipe that will own the buffer.
+ * @param[in] buffer Pointer to the buffer.
+ * Note that the caller remains owner of the buffer
+ * structure. Only the data pointer within it will
+ * be passed into the internal queues.
+ * @return IA_CSS_INTERNAL_ERROR in case of unexpected errors,
+ * 0 otherwise.
+ *
+ * This function adds a buffer (which has a certain buffer type) to the queue
+ * for this type. This queue is owned by the image pipe. After this function
+ * completes successfully, the buffer is now owned by the image pipe and should
+ * no longer be accessed by any other code until it gets dequeued. The image
+ * pipe will dequeue buffers from this queue, use them and return them to the
+ * host code via an interrupt. Buffers will be consumed in the same order they
+ * get queued, but may be returned to the host out of order.
+ */
+int
+ia_css_pipe_enqueue_buffer(struct ia_css_pipe *pipe,
+ const struct ia_css_buffer *buffer);
+
+/* @brief Dequeue a buffer from an image pipe.
+ *
+ * @param[in] pipe The pipeline that the buffer queue belongs to.
+ * @param[in,out] buffer The buffer is used to lookup the type which determines
+ * which internal queue to use.
+ * The resulting buffer pointer is written into the dta
+ * field.
+ * @return IA_CSS_ERR_NO_BUFFER if the queue is empty or
+ * 0 otherwise.
+ *
+ * This function dequeues a buffer from a buffer queue. The queue is indicated
+ * by the buffer type argument. This function can be called after an interrupt
+ * has been generated that signalled that a new buffer was available and can
+ * be used in a polling-like situation where the NO_BUFFER return value is used
+ * to determine whether a buffer was available or not.
+ */
+int
+ia_css_pipe_dequeue_buffer(struct ia_css_pipe *pipe,
+ struct ia_css_buffer *buffer);
+
+/* @brief Get selected configuration settings
+ * @param[in] pipe The pipe.
+ * @param[out] config Configuration settings.
+ * @return None
+ */
+void
+ia_css_pipe_get_isp_config(struct ia_css_pipe *pipe,
+ struct ia_css_isp_config *config);
+
+/* @brief Set the scaler lut on this pipe. A copy of lut is made in the inuit
+ * address space. So the LUT can be freed by caller.
+ * @param[in] pipe Pipe handle.
+ * @param[in] lut Look up tabel
+ *
+ * @return
+ * 0 : Success
+ * -EINVAL : Invalid Parameters
+ *
+ * Note:
+ * 1) Note that both GDC's are programmed with the same table.
+ * 2) Current implementation ignores the pipe and overrides the
+ * global lut. This will be fixed in the future
+ * 3) This function must be called before stream start
+ *
+ */
+int
+ia_css_pipe_set_bci_scaler_lut(struct ia_css_pipe *pipe,
+ const void *lut);
+/* @brief Checking of DVS statistics ability
+ * @param[in] pipe_info The pipe info.
+ * @return true - has DVS statistics ability
+ * false - otherwise
+ */
+bool ia_css_pipe_has_dvs_stats(struct ia_css_pipe_info *pipe_info);
+
+/* ISP2401 */
+/* @brief Override the frameformat set on the output pins.
+ * @param[in] pipe Pipe handle.
+ * @param[in] output_pin Pin index to set the format on
+ * 0 - main output pin
+ * 1 - display output pin
+ * @param[in] format Format to set
+ *
+ * @return
+ * 0 : Success
+ * -EINVAL : Invalid Parameters
+ * -EINVAL : Pipe misses binary info
+ *
+ * Note:
+ * 1) This is an optional function to override the formats set in the pipe.
+ * 2) Only overriding with IA_CSS_FRAME_FORMAT_NV12_TILEY is currently allowed.
+ * 3) This function is only to be used on pipes that use the output system.
+ * 4) If this function is used, it MUST be called after ia_css_pipe_create.
+ * 5) If this function is used, this function MUST be called before ia_css_stream_start.
+ */
+int
+ia_css_pipe_override_frame_format(struct ia_css_pipe *pipe,
+ int output_pin,
+ enum ia_css_frame_format format);
+
+#endif /* __IA_CSS_PIPE_PUBLIC_H */
diff --git a/drivers/staging/media/atomisp/pci/ia_css_prbs.h b/drivers/staging/media/atomisp/pci/ia_css_prbs.h
new file mode 100644
index 000000000000..abdbcb8fda53
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/ia_css_prbs.h
@@ -0,0 +1,45 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __IA_CSS_PRBS_H
+#define __IA_CSS_PRBS_H
+
+/* @file
+ * This file contains support for Pseudo Random Bit Sequence (PRBS) inputs
+ */
+
+/* Enumerate the PRBS IDs.
+ */
+enum ia_css_prbs_id {
+ IA_CSS_PRBS_ID0,
+ IA_CSS_PRBS_ID1,
+ IA_CSS_PRBS_ID2
+};
+
+/**
+ * Maximum number of PRBS IDs.
+ *
+ * Make sure the value of this define gets changed to reflect the correct
+ * number of ia_css_prbs_id enum if you add/delete an item in the enum.
+ */
+#define N_CSS_PRBS_IDS (IA_CSS_PRBS_ID2 + 1)
+
+/**
+ * PRBS configuration structure.
+ *
+ * Seed the for the Pseudo Random Bit Sequence.
+ *
+ * @deprecated{This interface is deprecated, it is not portable -> move to input system API}
+ */
+struct ia_css_prbs_config {
+ enum ia_css_prbs_id id;
+ unsigned int h_blank; /** horizontal blank */
+ unsigned int v_blank; /** vertical blank */
+ int seed; /** random seed for the 1st 2-pixel-components/clock */
+ int seed1; /** random seed for the 2nd 2-pixel-components/clock */
+};
+
+#endif /* __IA_CSS_PRBS_H */
diff --git a/drivers/staging/media/atomisp/pci/ia_css_properties.h b/drivers/staging/media/atomisp/pci/ia_css_properties.h
new file mode 100644
index 000000000000..3f087e2df99a
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/ia_css_properties.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __IA_CSS_PROPERTIES_H
+#define __IA_CSS_PROPERTIES_H
+
+/* @file
+ * This file contains support for retrieving properties of some hardware the CSS system
+ */
+
+#include <type_support.h> /* bool */
+#include <ia_css_types.h> /* ia_css_vamem_type */
+
+struct ia_css_properties {
+ int gdc_coord_one;
+ bool l1_base_is_index; /** Indicate whether the L1 page base
+ is a page index or a byte address. */
+ enum ia_css_vamem_type vamem_type;
+};
+
+/* @brief Get hardware properties
+ * @param[in,out] properties The hardware properties
+ * @return None
+ *
+ * This function returns a number of hardware properties.
+ */
+void
+ia_css_get_properties(struct ia_css_properties *properties);
+
+#endif /* __IA_CSS_PROPERTIES_H */
diff --git a/drivers/staging/media/atomisp/pci/ia_css_shading.h b/drivers/staging/media/atomisp/pci/ia_css_shading.h
new file mode 100644
index 000000000000..99ad21c4bd68
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/ia_css_shading.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __IA_CSS_SHADING_H
+#define __IA_CSS_SHADING_H
+
+/* @file
+ * This file contains support for setting the shading table for CSS
+ */
+
+#include <ia_css_types.h>
+
+/* @brief Shading table
+ * @param[in] width Width of the shading table.
+ * @param[in] height Height of the shading table.
+ * @return Pointer to the shading table
+*/
+struct ia_css_shading_table *
+ia_css_shading_table_alloc(unsigned int width,
+ unsigned int height);
+
+/* @brief Free shading table
+ * @param[in] table Pointer to the shading table.
+ * @return None
+*/
+void
+ia_css_shading_table_free(struct ia_css_shading_table *table);
+
+#endif /* __IA_CSS_SHADING_H */
diff --git a/drivers/staging/media/atomisp/pci/ia_css_stream.h b/drivers/staging/media/atomisp/pci/ia_css_stream.h
new file mode 100644
index 000000000000..c8de632a8e12
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/ia_css_stream.h
@@ -0,0 +1,99 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef _IA_CSS_STREAM_H_
+#define _IA_CSS_STREAM_H_
+
+#include <type_support.h>
+#include <system_local.h>
+#include <input_system.h>
+#include "ia_css_types.h"
+#include "ia_css_stream_public.h"
+
+/**
+ * structure to hold all internal stream related information
+ */
+struct ia_css_stream {
+ struct ia_css_stream_config config;
+ struct ia_css_stream_info info;
+ rx_cfg_t csi_rx_config;
+ bool reconfigure_css_rx;
+ struct ia_css_pipe *last_pipe;
+ int num_pipes;
+ struct ia_css_pipe **pipes;
+ struct ia_css_pipe *continuous_pipe;
+ struct ia_css_isp_parameters *isp_params_configs;
+ struct ia_css_isp_parameters *per_frame_isp_params_configs;
+
+ bool cont_capt;
+ bool disable_cont_vf;
+
+ /* ISP2401 */
+ bool stop_copy_preview;
+ bool started;
+};
+
+/* @brief Get a binary in the stream, which binary has the shading correction.
+ *
+ * @param[in] stream: The stream.
+ * @return The binary which has the shading correction.
+ *
+ */
+struct ia_css_binary *
+ia_css_stream_get_shading_correction_binary(const struct ia_css_stream *stream);
+
+struct ia_css_binary *
+ia_css_stream_get_dvs_binary(const struct ia_css_stream *stream);
+
+struct ia_css_binary *
+ia_css_stream_get_3a_binary(const struct ia_css_stream *stream);
+
+unsigned int
+ia_css_stream_input_format_bits_per_pixel(struct ia_css_stream *stream);
+
+bool
+sh_css_params_set_binning_factor(struct ia_css_stream *stream,
+ unsigned int sensor_binning);
+
+void
+sh_css_invalidate_params(struct ia_css_stream *stream);
+
+/* The following functions are used for testing purposes only */
+const struct ia_css_fpn_table *
+ia_css_get_fpn_table(struct ia_css_stream *stream);
+
+/* @brief Get a pointer to the shading table.
+ *
+ * @param[in] stream: The stream.
+ * @return The pointer to the shading table.
+ *
+ */
+struct ia_css_shading_table *
+ia_css_get_shading_table(struct ia_css_stream *stream);
+
+void
+ia_css_get_isp_dis_coefficients(struct ia_css_stream *stream,
+ short *horizontal_coefficients,
+ short *vertical_coefficients);
+
+void
+ia_css_get_isp_dvs2_coefficients(struct ia_css_stream *stream,
+ short *hor_coefs_odd_real,
+ short *hor_coefs_odd_imag,
+ short *hor_coefs_even_real,
+ short *hor_coefs_even_imag,
+ short *ver_coefs_odd_real,
+ short *ver_coefs_odd_imag,
+ short *ver_coefs_even_real,
+ short *ver_coefs_even_imag);
+
+int
+ia_css_stream_isp_parameters_init(struct ia_css_stream *stream);
+
+void
+ia_css_stream_isp_parameters_uninit(struct ia_css_stream *stream);
+
+#endif /*_IA_CSS_STREAM_H_*/
diff --git a/drivers/staging/media/atomisp/pci/ia_css_stream_format.h b/drivers/staging/media/atomisp/pci/ia_css_stream_format.h
new file mode 100644
index 000000000000..6188e281189b
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/ia_css_stream_format.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __IA_CSS_STREAM_FORMAT_H
+#define __IA_CSS_STREAM_FORMAT_H
+
+/* @file
+ * This file contains formats usable for ISP streaming input
+ */
+
+#include <type_support.h> /* bool */
+#include "../../../include/linux/atomisp_platform.h"
+
+unsigned int ia_css_util_input_format_bpp(
+ enum atomisp_input_format format,
+ bool two_ppc);
+
+#endif /* __ATOMISP_INPUT_FORMAT_H */
diff --git a/drivers/staging/media/atomisp/pci/ia_css_stream_public.h b/drivers/staging/media/atomisp/pci/ia_css_stream_public.h
new file mode 100644
index 000000000000..a505f3797962
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/ia_css_stream_public.h
@@ -0,0 +1,553 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __IA_CSS_STREAM_PUBLIC_H
+#define __IA_CSS_STREAM_PUBLIC_H
+
+/* @file
+ * This file contains support for configuring and controlling streams
+ */
+
+#include <type_support.h>
+#include "ia_css_types.h"
+#include "ia_css_pipe_public.h"
+#include "ia_css_metadata.h"
+#include "ia_css_prbs.h"
+#include "ia_css_input_port.h"
+
+/*
+ * Input modes, these enumerate all supported input modes.
+ * This enum is part of the atomisp firmware ABI and must
+ * NOT be changed!
+ * Note that not all ISP modes support all input modes.
+ */
+enum ia_css_input_mode {
+ IA_CSS_INPUT_MODE_SENSOR, /** data from sensor */
+ IA_CSS_INPUT_MODE_FIFO, /** data from input-fifo */
+ IA_CSS_INPUT_MODE_TPG, /** data from test-pattern generator */
+ IA_CSS_INPUT_MODE_PRBS, /** data from pseudo-random bit stream */
+ IA_CSS_INPUT_MODE_MEMORY, /** data from a frame in memory */
+ IA_CSS_INPUT_MODE_BUFFERED_SENSOR /** data is sent through mipi buffer */
+};
+
+/* Structure of the MIPI buffer configuration
+ */
+struct ia_css_mipi_buffer_config {
+ unsigned int size_mem_words; /** The frame size in the system memory
+ words (32B) */
+ bool contiguous; /** Allocated memory physically
+ contiguously or not. \deprecated{Will be false always.}*/
+ unsigned int nof_mipi_buffers; /** The number of MIPI buffers required for this
+ stream */
+};
+
+enum {
+ IA_CSS_STREAM_ISYS_STREAM_0 = 0,
+ IA_CSS_STREAM_DEFAULT_ISYS_STREAM_IDX = IA_CSS_STREAM_ISYS_STREAM_0,
+ IA_CSS_STREAM_ISYS_STREAM_1,
+ IA_CSS_STREAM_MAX_ISYS_STREAM_PER_CH
+};
+
+/* This is input data configuration for one MIPI data type. We can have
+ * multiple of this in one virtual channel.
+ */
+struct ia_css_stream_isys_stream_config {
+ struct ia_css_resolution input_res; /** Resolution of input data */
+ enum atomisp_input_format format; /** Format of input stream. This data
+ format will be mapped to MIPI data
+ type internally. */
+ int linked_isys_stream_id; /** default value is -1, other value means
+ current isys_stream shares the same buffer with
+ indicated isys_stream*/
+ bool valid; /** indicate whether other fields have valid value */
+};
+
+struct ia_css_stream_input_config {
+ struct ia_css_resolution input_res; /** Resolution of input data */
+ struct ia_css_resolution effective_res; /** Resolution of input data.
+ Used for CSS 2400/1 System and deprecated for other
+ systems (replaced by input_effective_res in
+ ia_css_pipe_config) */
+ enum atomisp_input_format format; /** Format of input stream. This data
+ format will be mapped to MIPI data
+ type internally. */
+ enum ia_css_bayer_order bayer_order; /** Bayer order for RAW streams */
+};
+
+/* Input stream description. This describes how input will flow into the
+ * CSS. This is used to program the CSS hardware.
+ */
+struct ia_css_stream_config {
+ enum ia_css_input_mode mode; /** Input mode */
+ union {
+ struct ia_css_input_port port; /** Port, for sensor only. */
+ struct ia_css_prbs_config prbs; /** PRBS configuration */
+ } source; /** Source of input data */
+ unsigned int channel_id; /** Channel on which input data
+ will arrive. Use this field
+ to specify virtual channel id.
+ Valid values are: 0, 1, 2, 3 */
+ struct ia_css_stream_isys_stream_config
+ isys_config[IA_CSS_STREAM_MAX_ISYS_STREAM_PER_CH];
+ struct ia_css_stream_input_config input_config;
+
+ /*
+ * Currently, Linux and Windows platforms interpret the binning_factor
+ * parameter differently. In Linux, the binning factor is expressed
+ * in the form 2^N * 2^N
+ */
+ /* ISP2401 */
+ unsigned int sensor_binning_factor; /** Binning factor used by sensor
+ to produce image data. This is
+ used for shading correction. */
+ unsigned int pixels_per_clock; /** Number of pixels per clock, which can be
+ 1, 2 or 4. */
+ bool online; /** offline will activate RAW copy on SP, use this for
+ continuous capture. */
+ /* ISYS2401 usage: ISP receives data directly from sensor, no copy. */
+ unsigned int init_num_cont_raw_buf; /** initial number of raw buffers to
+ allocate */
+ unsigned int target_num_cont_raw_buf; /** total number of raw buffers to
+ allocate */
+ bool pack_raw_pixels; /** Pack pixels in the raw buffers */
+ bool continuous; /** Use SP copy feature to continuously capture frames
+ to system memory and run pipes in offline mode */
+ bool disable_cont_viewfinder; /** disable continuous viewfinder for ZSL use case */
+ s32 flash_gpio_pin; /** pin on which the flash is connected, -1 for no flash */
+ int left_padding; /** The number of input-formatter left-paddings, -1 for default from binary.*/
+ struct ia_css_mipi_buffer_config
+ mipi_buffer_config; /** mipi buffer configuration */
+ struct ia_css_metadata_config
+ metadata_config; /** Metadata configuration. */
+ bool ia_css_enable_raw_buffer_locking; /** Enable Raw Buffer Locking for HALv3 Support */
+ bool lock_all;
+ /** Lock all RAW buffers (true) or lock only buffers processed by
+ video or preview pipe (false).
+ This setting needs to be enabled to allow raw buffer locking
+ without continuous viewfinder. */
+};
+
+struct ia_css_stream;
+
+/* Stream info, this struct describes properties of a stream after it has been
+ * created.
+ */
+struct ia_css_stream_info {
+ struct ia_css_metadata_info metadata_info;
+ /** Info about the metadata layout, this contains the stride. */
+};
+
+/* @brief Load default stream configuration
+ * @param[in,out] stream_config The stream configuration.
+ * @return None
+ *
+ * This function will reset the stream configuration to the default state:
+@code
+ memset(stream_config, 0, sizeof(*stream_config));
+ stream_config->online = true;
+ stream_config->left_padding = -1;
+@endcode
+ */
+void ia_css_stream_config_defaults(struct ia_css_stream_config *stream_config);
+
+/*
+ * create the internal structures and fill in the configuration data and pipes
+ */
+
+/* @brief Creates a stream
+* @param[in] stream_config The stream configuration.
+* @param[in] num_pipes The number of pipes to incorporate in the stream.
+* @param[in] pipes The pipes.
+* @param[out] stream The stream.
+* @return 0 or the error code.
+*
+* This function will create a stream with a given configuration and given pipes.
+*/
+int
+ia_css_stream_create(const struct ia_css_stream_config *stream_config,
+ int num_pipes,
+ struct ia_css_pipe *pipes[],
+ struct ia_css_stream **stream);
+
+/* @brief Destroys a stream
+ * @param[in] stream The stream.
+ * @return 0 or the error code.
+ *
+ * This function will destroy a given stream.
+ */
+int
+ia_css_stream_destroy(struct ia_css_stream *stream);
+
+/* @brief Provides information about a stream
+ * @param[in] stream The stream.
+ * @param[out] stream_info The information about the stream.
+ * @return 0 or the error code.
+ *
+ * This function will destroy a given stream.
+ */
+int
+ia_css_stream_get_info(const struct ia_css_stream *stream,
+ struct ia_css_stream_info *stream_info);
+
+
+/* @brief Starts the stream.
+ * @param[in] stream The stream.
+ * @return 0 or the error code.
+ *
+ * The dynamic data in
+ * the buffers are not used and need to be queued with a separate call
+ * to ia_css_pipe_enqueue_buffer.
+ * NOTE: this function will only send start event to corresponding
+ * thread and will not start SP any more.
+ */
+int
+ia_css_stream_start(struct ia_css_stream *stream);
+
+/* @brief Stop the stream.
+ * @param[in] stream The stream.
+ * @return 0 or the error code.
+ *
+ * NOTE: this function will send stop event to pipes belong to this
+ * stream but will not terminate threads.
+ */
+int
+ia_css_stream_stop(struct ia_css_stream *stream);
+
+/* @brief Check if a stream has stopped
+ * @param[in] stream The stream.
+ * @return boolean flag
+ *
+ * This function will check if the stream has stopped and return the correspondent boolean flag.
+ */
+bool
+ia_css_stream_has_stopped(struct ia_css_stream *stream);
+
+/* @brief destroy a stream according to the stream seed previosly saved in the seed array.
+ * @param[in] stream The stream.
+ * @return 0 (no other errors are generated now)
+ *
+ * Destroy the stream and all the pipes related to it.
+ */
+int
+ia_css_stream_unload(struct ia_css_stream *stream);
+
+/* @brief Returns stream format
+ * @param[in] stream The stream.
+ * @return format of the string
+ *
+ * This function will return the stream format.
+ */
+enum atomisp_input_format
+ia_css_stream_get_format(const struct ia_css_stream *stream);
+
+/* @brief Check if the stream is configured for 2 pixels per clock
+ * @param[in] stream The stream.
+ * @return boolean flag
+ *
+ * This function will check if the stream is configured for 2 pixels per clock and
+ * return the correspondent boolean flag.
+ */
+bool
+ia_css_stream_get_two_pixels_per_clock(const struct ia_css_stream *stream);
+
+/* @brief Sets the output frame stride (at the last pipe)
+ * @param[in] stream The stream
+ * @param[in] output_padded_width - the output buffer stride.
+ * @return ia_css_err
+ *
+ * This function will Set the output frame stride (at the last pipe)
+ */
+int
+ia_css_stream_set_output_padded_width(struct ia_css_stream *stream,
+ unsigned int output_padded_width);
+
+/* @brief Return max number of continuous RAW frames.
+ * @param[in] stream The stream.
+ * @param[out] buffer_depth The maximum number of continuous RAW frames.
+ * @return 0 or -EINVAL
+ *
+ * This function will return the maximum number of continuous RAW frames
+ * the system can support.
+ */
+int
+ia_css_stream_get_max_buffer_depth(struct ia_css_stream *stream,
+ int *buffer_depth);
+
+/* @brief Set nr of continuous RAW frames to use.
+ *
+ * @param[in] stream The stream.
+ * @param[in] buffer_depth Number of frames to set.
+ * @return 0 or error code upon error.
+ *
+ * Set the number of continuous frames to use during continuous modes.
+ */
+int
+ia_css_stream_set_buffer_depth(struct ia_css_stream *stream, int buffer_depth);
+
+/* @brief Get number of continuous RAW frames to use.
+ * @param[in] stream The stream.
+ * @param[out] buffer_depth The number of frames to use
+ * @return 0 or -EINVAL
+ *
+ * Get the currently set number of continuous frames
+ * to use during continuous modes.
+ */
+int
+ia_css_stream_get_buffer_depth(struct ia_css_stream *stream, int *buffer_depth);
+
+/* ===== CAPTURE ===== */
+
+/* @brief Configure the continuous capture
+ *
+ * @param[in] stream The stream.
+ * @param[in] num_captures The number of RAW frames to be processed to
+ * YUV. Setting this to -1 will make continuous
+ * capture run until it is stopped.
+ * This number will also be used to allocate RAW
+ * buffers. To allow the viewfinder to also
+ * keep operating, 2 extra buffers will always be
+ * allocated.
+ * If the offset is negative and the skip setting
+ * is greater than 0, additional buffers may be
+ * needed.
+ * @param[in] skip Skip N frames in between captures. This can be
+ * used to select a slower capture frame rate than
+ * the sensor output frame rate.
+ * @param[in] offset Start the RAW-to-YUV processing at RAW buffer
+ * with this offset. This allows the user to
+ * process RAW frames that were captured in the
+ * past or future.
+ * @return 0 or error code upon error.
+ *
+ * For example, to capture the current frame plus the 2 previous
+ * frames and 2 subsequent frames, you would call
+ * ia_css_stream_capture(5, 0, -2).
+ */
+int
+ia_css_stream_capture(struct ia_css_stream *stream,
+ int num_captures,
+ unsigned int skip,
+ int offset);
+
+/* @brief Specify which raw frame to tag based on exp_id found in frame info
+ *
+ * @param[in] stream The stream.
+ * @param[in] exp_id The exposure id of the raw frame to tag.
+ *
+ * @return 0 or error code upon error.
+ *
+ * This function allows the user to tag a raw frame based on the exposure id
+ * found in the viewfinder frames' frame info.
+ */
+int
+ia_css_stream_capture_frame(struct ia_css_stream *stream,
+ unsigned int exp_id);
+
+/* ===== VIDEO ===== */
+
+/* @brief Send streaming data into the css input FIFO
+ *
+ * @param[in] stream The stream.
+ * @param[in] data Pointer to the pixels to be send.
+ * @param[in] width Width of the input frame.
+ * @param[in] height Height of the input frame.
+ * @return None
+ *
+ * Send streaming data into the css input FIFO. This is for testing purposes
+ * only. This uses the channel ID and input format as set by the user with
+ * the regular functions for this.
+ * This function blocks until the entire frame has been written into the
+ * input FIFO.
+ *
+ * Note:
+ * For higher flexibility the ia_css_stream_send_input_frame is replaced by
+ * three separate functions:
+ * 1) ia_css_stream_start_input_frame
+ * 2) ia_css_stream_send_input_line
+ * 3) ia_css_stream_end_input_frame
+ * In this way it is possible to stream multiple frames on different
+ * channel ID's on a line basis. It will be possible to simulate
+ * line-interleaved Stereo 3D muxed on 1 mipi port.
+ * These 3 functions are for testing purpose only and can be used in
+ * conjunction with ia_css_stream_send_input_frame
+ */
+void
+ia_css_stream_send_input_frame(const struct ia_css_stream *stream,
+ const unsigned short *data,
+ unsigned int width,
+ unsigned int height);
+
+/* @brief Start an input frame on the CSS input FIFO.
+ *
+ * @param[in] stream The stream.
+ * @return None
+ *
+ * Starts the streaming to mipi frame by sending SoF for channel channel_id.
+ * It will use the input_format and two_pixels_per_clock as provided by
+ * the user.
+ * For the "correct" use-case, input_format and two_pixels_per_clock must match
+ * with the values as set by the user with the regular functions.
+ * To simulate an error, the user can provide "incorrect" values for
+ * input_format and/or two_pixels_per_clock.
+ */
+void
+ia_css_stream_start_input_frame(const struct ia_css_stream *stream);
+
+/* @brief Send a line of input data into the CSS input FIFO.
+ *
+ * @param[in] stream The stream.
+ * @param[in] data Array of the first line of image data.
+ * @param width The width (in pixels) of the first line.
+ * @param[in] data2 Array of the second line of image data.
+ * @param width2 The width (in pixels) of the second line.
+ * @return None
+ *
+ * Sends 1 frame line. Start with SoL followed by width bytes of data, followed
+ * by width2 bytes of data2 and followed by and EoL
+ * It will use the input_format and two_pixels_per_clock settings as provided
+ * with the ia_css_stream_start_input_frame function call.
+ *
+ * This function blocks until the entire line has been written into the
+ * input FIFO.
+ */
+void
+ia_css_stream_send_input_line(const struct ia_css_stream *stream,
+ const unsigned short *data,
+ unsigned int width,
+ const unsigned short *data2,
+ unsigned int width2);
+
+/* @brief Send a line of input embedded data into the CSS input FIFO.
+ *
+ * @param[in] stream Pointer of the stream.
+ * @param[in] format Format of the embedded data.
+ * @param[in] data Pointer of the embedded data line.
+ * @param[in] width The width (in pixels) of the line.
+ * @return None
+ *
+ * Sends one embedded data line to input fifo. Start with SoL followed by
+ * width bytes of data, and followed by and EoL.
+ * It will use the two_pixels_per_clock settings as provided with the
+ * ia_css_stream_start_input_frame function call.
+ *
+ * This function blocks until the entire line has been written into the
+ * input FIFO.
+ */
+void
+ia_css_stream_send_input_embedded_line(const struct ia_css_stream *stream,
+ enum atomisp_input_format format,
+ const unsigned short *data,
+ unsigned int width);
+
+/* @brief End an input frame on the CSS input FIFO.
+ *
+ * @param[in] stream The stream.
+ * @return None
+ *
+ * Send the end-of-frame signal into the CSS input FIFO.
+ */
+void
+ia_css_stream_end_input_frame(const struct ia_css_stream *stream);
+
+/* @brief Configure a stream with filter coefficients.
+ * @deprecated {Replaced by
+ * ia_css_pipe_set_isp_config_on_pipe()}
+ *
+ * @param[in] stream The stream.
+ * @param[in] config The set of filter coefficients.
+ * @param[in] pipe Pipe to be updated when set isp config, NULL means to
+ * update all pipes in the stream.
+ * @return 0 or error code upon error.
+ *
+ * This function configures the filter coefficients for an image
+ * stream. For image pipes that do not execute any ISP filters, this
+ * function will have no effect.
+ * It is safe to call this function while the image stream is running,
+ * in fact this is the expected behavior most of the time. Proper
+ * resource locking and double buffering is in place to allow for this.
+ */
+int
+ia_css_stream_set_isp_config_on_pipe(struct ia_css_stream *stream,
+ const struct ia_css_isp_config *config,
+ struct ia_css_pipe *pipe);
+
+/* @brief Configure a stream with filter coefficients.
+ * @deprecated {Replaced by
+ * ia_css_pipe_set_isp_config()}
+ * @param[in] stream The stream.
+ * @param[in] config The set of filter coefficients.
+ * @return 0 or error code upon error.
+ *
+ * This function configures the filter coefficients for an image
+ * stream. For image pipes that do not execute any ISP filters, this
+ * function will have no effect. All pipes of a stream will be updated.
+ * See ::ia_css_stream_set_isp_config_on_pipe() for the per-pipe alternative.
+ * It is safe to call this function while the image stream is running,
+ * in fact this is the expected behaviour most of the time. Proper
+ * resource locking and double buffering is in place to allow for this.
+ */
+int
+ia_css_stream_set_isp_config(
+ struct ia_css_stream *stream,
+ const struct ia_css_isp_config *config);
+
+/* @brief Get selected configuration settings
+ * @param[in] stream The stream.
+ * @param[out] config Configuration settings.
+ * @return None
+ */
+void
+ia_css_stream_get_isp_config(const struct ia_css_stream *stream,
+ struct ia_css_isp_config *config);
+
+/* @brief allocate continuous raw frames for continuous capture
+ * @param[in] stream The stream.
+ * @return 0 or error code.
+ *
+ * because this allocation takes a long time (around 120ms per frame),
+ * we separate the allocation part and update part to let driver call
+ * this function without locking. This function is the allocation part
+ * and next one is update part
+ */
+int
+ia_css_alloc_continuous_frame_remain(struct ia_css_stream *stream);
+
+/* @brief allocate continuous raw frames for continuous capture
+ * @param[in] stream The stream.
+ * @return 0 or error code.
+ *
+ * because this allocation takes a long time (around 120ms per frame),
+ * we separate the allocation part and update part to let driver call
+ * this function without locking. This function is the update part
+ */
+int
+ia_css_update_continuous_frames(struct ia_css_stream *stream);
+
+/* @brief ia_css_unlock_raw_frame . unlock a raw frame (HALv3 Support)
+ * @param[in] stream The stream.
+ * @param[in] exp_id exposure id that uniquely identifies the locked Raw Frame Buffer
+ * @return ia_css_err 0 or error code
+ *
+ * As part of HALv3 Feature requirement, SP locks raw buffer until the Application
+ * releases its reference to a raw buffer (which are managed by SP), this function allows
+ * application to explicitly unlock that buffer in SP.
+ */
+int
+ia_css_unlock_raw_frame(struct ia_css_stream *stream, uint32_t exp_id);
+
+/* @brief ia_css_en_dz_capt_pipe . Enable/Disable digital zoom for capture pipe
+ * @param[in] stream The stream.
+ * @param[in] enable - true, disable - false
+ * @return None
+ *
+ * Enables or disables digital zoom for capture pipe in provided stream, if capture pipe
+ * exists. This function sets enable_zoom flag in CAPTURE_PP stage of the capture pipe.
+ * In process_zoom_and_motion(), decision to enable or disable zoom for every stage depends
+ * on this flag.
+ */
+void
+ia_css_en_dz_capt_pipe(struct ia_css_stream *stream, bool enable);
+#endif /* __IA_CSS_STREAM_PUBLIC_H */
diff --git a/drivers/staging/media/atomisp/pci/ia_css_timer.h b/drivers/staging/media/atomisp/pci/ia_css_timer.h
new file mode 100644
index 000000000000..da752834adf4
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/ia_css_timer.h
@@ -0,0 +1,61 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/**
+Support for Intel Camera Imaging ISP subsystem.
+Copyright (c) 2010 - 2015, Intel Corporation.
+
+*/
+
+#ifndef __IA_CSS_TIMER_H
+#define __IA_CSS_TIMER_H
+
+/* @file
+ * Timer interface definitions
+ */
+#include <type_support.h> /* for uint32_t */
+#include "ia_css_err.h"
+
+/* @brief timer reading definition */
+typedef u32 clock_value_t;
+
+/* @brief 32 bit clock tick,(timestamp based on timer-value of CSS-internal timer)*/
+struct ia_css_clock_tick {
+ clock_value_t ticks; /** measured time in ticks.*/
+};
+
+/* @brief TIMER event codes */
+enum ia_css_tm_event {
+ IA_CSS_TM_EVENT_AFTER_INIT,
+ /** Timer Event after Initialization */
+ IA_CSS_TM_EVENT_MAIN_END,
+ /** Timer Event after end of Main */
+ IA_CSS_TM_EVENT_THREAD_START,
+ /** Timer Event after thread start */
+ IA_CSS_TM_EVENT_FRAME_PROC_START,
+ /** Timer Event after Frame Process Start */
+ IA_CSS_TM_EVENT_FRAME_PROC_END
+ /** Timer Event after Frame Process End */
+};
+
+/* @brief code measurement common struct */
+struct ia_css_time_meas {
+ clock_value_t start_timer_value; /** measured time in ticks */
+ clock_value_t end_timer_value; /** measured time in ticks */
+};
+
+/**@brief SIZE_OF_IA_CSS_CLOCK_TICK_STRUCT checks to ensure correct alignment for struct ia_css_clock_tick. */
+#define SIZE_OF_IA_CSS_CLOCK_TICK_STRUCT sizeof(clock_value_t)
+/* @brief checks to ensure correct alignment for ia_css_time_meas. */
+#define SIZE_OF_IA_CSS_TIME_MEAS_STRUCT (sizeof(clock_value_t) \
+ + sizeof(clock_value_t))
+
+/* @brief API to fetch timer count directly
+*
+* @param curr_ts [out] measured count value
+* @return 0 if success
+*
+*/
+int
+ia_css_timer_get_current_tick(
+ struct ia_css_clock_tick *curr_ts);
+
+#endif /* __IA_CSS_TIMER_H */
diff --git a/drivers/staging/media/atomisp/pci/ia_css_types.h b/drivers/staging/media/atomisp/pci/ia_css_types.h
new file mode 100644
index 000000000000..676d7e20b282
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/ia_css_types.h
@@ -0,0 +1,596 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Release Version: irci_stable_candrpv_0415_20150521_0458 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef _IA_CSS_TYPES_H
+#define _IA_CSS_TYPES_H
+
+/* @file
+ * This file contains types used for the ia_css parameters.
+ * These types are in a separate file because they are expected
+ * to be used in software layers that do not access the CSS API
+ * directly but still need to forward parameters for it.
+ */
+
+#include <type_support.h>
+
+#include "ia_css_frac.h"
+
+#include "isp/kernels/aa/aa_2/ia_css_aa2_types.h"
+#include "isp/kernels/anr/anr_1.0/ia_css_anr_types.h"
+#include "isp/kernels/anr/anr_2/ia_css_anr2_types.h"
+#include "isp/kernels/cnr/cnr_2/ia_css_cnr2_types.h"
+#include "isp/kernels/csc/csc_1.0/ia_css_csc_types.h"
+#include "isp/kernels/ctc/ctc_1.0/ia_css_ctc_types.h"
+#include "isp/kernels/dp/dp_1.0/ia_css_dp_types.h"
+#include "isp/kernels/de/de_1.0/ia_css_de_types.h"
+#include "isp/kernels/de/de_2/ia_css_de2_types.h"
+#include "isp/kernels/fc/fc_1.0/ia_css_formats_types.h"
+#include "isp/kernels/fpn/fpn_1.0/ia_css_fpn_types.h"
+#include "isp/kernels/gc/gc_1.0/ia_css_gc_types.h"
+#include "isp/kernels/gc/gc_2/ia_css_gc2_types.h"
+#include "isp/kernels/macc/macc_1.0/ia_css_macc_types.h"
+#include "isp/kernels/ob/ob_1.0/ia_css_ob_types.h"
+#include "isp/kernels/s3a/s3a_1.0/ia_css_s3a_types.h"
+#include "isp/kernels/sc/sc_1.0/ia_css_sc_types.h"
+#include "isp/kernels/sdis/sdis_1.0/ia_css_sdis_types.h"
+#include "isp/kernels/sdis/sdis_2/ia_css_sdis2_types.h"
+#include "isp/kernels/tnr/tnr_1.0/ia_css_tnr_types.h"
+#include "isp/kernels/wb/wb_1.0/ia_css_wb_types.h"
+#include "isp/kernels/xnr/xnr_1.0/ia_css_xnr_types.h"
+#include "isp/kernels/xnr/xnr_3.0/ia_css_xnr3_types.h"
+
+/* ISP2401 */
+#include "isp/kernels/tnr/tnr3/ia_css_tnr3_types.h"
+
+#include "isp/kernels/ynr/ynr_1.0/ia_css_ynr_types.h"
+#include "isp/kernels/ynr/ynr_2/ia_css_ynr2_types.h"
+#include "isp/kernels/output/output_1.0/ia_css_output_types.h"
+
+#define IA_CSS_DVS_STAT_GRID_INFO_SUPPORTED
+/** Should be removed after Driver adaptation will be done */
+
+#define IA_CSS_VERSION_MAJOR 2
+#define IA_CSS_VERSION_MINOR 0
+#define IA_CSS_VERSION_REVISION 2
+
+#define IA_CSS_MORPH_TABLE_NUM_PLANES 6
+
+/* Min and max exposure IDs. These macros are here to allow
+ * the drivers to get this information. Changing these macros
+ * constitutes a CSS API change. */
+#define IA_CSS_ISYS_MIN_EXPOSURE_ID 1 /** Minimum exposure ID */
+#define IA_CSS_ISYS_MAX_EXPOSURE_ID 250 /** Maximum exposure ID */
+
+/* opaque types */
+struct ia_css_isp_parameters;
+struct ia_css_pipe;
+struct ia_css_memory_offsets;
+struct ia_css_config_memory_offsets;
+struct ia_css_state_memory_offsets;
+
+/* Virtual address within the CSS address space. */
+typedef u32 ia_css_ptr;
+
+#define SIZE_OF_IA_CSS_PTR sizeof(uint32_t)
+
+/* Generic resolution structure.
+ */
+struct ia_css_resolution {
+ u32 width; /** Width */
+ u32 height; /** Height */
+};
+
+/* Generic coordinate structure.
+ */
+struct ia_css_coordinate {
+ s32 x; /** Value of a coordinate on the horizontal axis */
+ s32 y; /** Value of a coordinate on the vertical axis */
+};
+
+/* Vector with signed values. This is used to indicate motion for
+ * Digital Image Stabilization.
+ */
+struct ia_css_vector {
+ s32 x; /** horizontal motion (in pixels) */
+ s32 y; /** vertical motion (in pixels) */
+};
+
+/* Short hands */
+#define IA_CSS_ISP_DMEM IA_CSS_ISP_DMEM0
+#define IA_CSS_ISP_VMEM IA_CSS_ISP_VMEM0
+
+/* CSS data descriptor */
+struct ia_css_data {
+ ia_css_ptr address; /** CSS virtual address */
+ u32 size; /** Disabled if 0 */
+};
+
+/* Host data descriptor */
+struct ia_css_host_data {
+ char *address; /** Host address */
+ u32 size; /** Disabled if 0 */
+};
+
+/* ISP data descriptor */
+struct ia_css_isp_data {
+ u32 address; /** ISP address */
+ u32 size; /** Disabled if 0 */
+};
+
+/* Shading Correction types. */
+enum ia_css_shading_correction_type {
+ IA_CSS_SHADING_CORRECTION_NONE, /** Shading Correction is not processed in the pipe. */
+ IA_CSS_SHADING_CORRECTION_TYPE_1 /** Shading Correction 1.0 (pipe 1.0 on ISP2300, pipe 2.2 on ISP2400/2401) */
+
+ /** More shading correction types can be added in the future. */
+};
+
+/* Shading Correction information. */
+struct ia_css_shading_info {
+ enum ia_css_shading_correction_type type; /** Shading Correction type. */
+
+ union { /* Shading Correction information of each Shading Correction types. */
+
+ /* Shading Correction information of IA_CSS_SHADING_CORRECTION_TYPE_1.
+ *
+ * This structure contains the information necessary to generate
+ * the shading table required in the isp.
+ * This structure is filled in the css,
+ * and the driver needs to get it to generate the shading table.
+ *
+ * Before the shading correction is applied, NxN-filter and/or scaling
+ * are applied in the isp, depending on the isp binaries.
+ * Then, these should be considered in generating the shading table.
+ * - Bad pixels on left/top sides generated by NxN-filter
+ * (Bad pixels are NOT considered currently,
+ * because they are subtle.)
+ * - Down-scaling/Up-scaling factor
+ *
+ * Shading correction is applied to the area
+ * which has real sensor data and margin.
+ * Then, the shading table should cover the area including margin.
+ * This structure has this information.
+ * - Origin coordinate of bayer (real sensor data)
+ * on the shading table
+ *
+ * ------------------------ISP 2401-----------------------
+ *
+ * the shading table directly required from ISP.
+ * This structure is filled in CSS, and the driver needs to get it to generate the shading table.
+ *
+ * The shading correction is applied to the bayer area which contains sensor data and padding data.
+ * The shading table should cover this bayer area.
+ *
+ * The shading table size directly required from ISP is expressed by these parameters.
+ * 1. uint32_t num_hor_grids;
+ * 2. uint32_t num_ver_grids;
+ * 3. uint32_t bqs_per_grid_cell;
+ *
+ * In some isp binaries, the bayer scaling is applied before the shading correction is applied.
+ * Then, this scaling factor should be considered in generating the shading table.
+ * The scaling factor is expressed by these parameters.
+ * 4. uint32_t bayer_scale_hor_ratio_in;
+ * 5. uint32_t bayer_scale_hor_ratio_out;
+ * 6. uint32_t bayer_scale_ver_ratio_in;
+ * 7. uint32_t bayer_scale_ver_ratio_out;
+ *
+ * The sensor data size inputted to ISP is expressed by this parameter.
+ * This is the size BEFORE the bayer scaling is applied.
+ * 8. struct ia_css_resolution isp_input_sensor_data_res_bqs;
+ *
+ * The origin of the sensor data area positioned on the shading table at the shading correction
+ * is expressed by this parameter.
+ * The size of this area assumes the size AFTER the bayer scaling is applied
+ * to the isp_input_sensor_data_resolution_bqs.
+ * 9. struct ia_css_coordinate sensor_data_origin_bqs_on_sctbl;
+ *
+ * ****** Definitions of the shading table and the sensor data at the shading correction ******
+ *
+ * (0,0)--------------------- TW -------------------------------
+ * | shading table |
+ * | (ox,oy)---------- W -------------------------- |
+ * | | sensor data | |
+ * | | | |
+ * TH H sensor data center | |
+ * | | (cx,cy) | |
+ * | | | |
+ * | | | |
+ * | | | |
+ * | ------------------------------------------- |
+ * | |
+ * ----------------------------------------------------------
+ *
+ * Example of still mode for output 1080p:
+ *
+ * num_hor_grids = 66
+ * num_ver_grids = 37
+ * bqs_per_grid_cell = 16
+ * bayer_scale_hor_ratio_in = 1
+ * bayer_scale_hor_ratio_out = 1
+ * bayer_scale_ver_ratio_in = 1
+ * bayer_scale_ver_ratio_out = 1
+ * isp_input_sensor_data_resolution_bqs = {966, 546}
+ * sensor_data_origin_bqs_on_sctbl = {61, 15}
+ *
+ * TW, TH [bqs]: width and height of shading table
+ * TW = (num_hor_grids - 1) * bqs_per_grid_cell = (66 - 1) * 16 = 1040
+ * TH = (num_ver_grids - 1) * bqs_per_grid_cell = (37 - 1) * 16 = 576
+ *
+ * W, H [bqs]: width and height of sensor data at shading correction
+ * W = sensor_data_res_bqs.width
+ * = isp_input_sensor_data_res_bqs.width
+ * * bayer_scale_hor_ratio_out / bayer_scale_hor_ratio_in + 0.5 = 966
+ * H = sensor_data_res_bqs.height
+ * = isp_input_sensor_data_res_bqs.height
+ * * bayer_scale_ver_ratio_out / bayer_scale_ver_ratio_in + 0.5 = 546
+ *
+ * (ox, oy) [bqs]: origin of sensor data positioned on shading table at shading correction
+ * ox = sensor_data_origin_bqs_on_sctbl.x = 61
+ * oy = sensor_data_origin_bqs_on_sctbl.y = 15
+ *
+ * (cx, cy) [bqs]: center of sensor data positioned on shading table at shading correction
+ * cx = ox + W/2 = 61 + 966/2 = 544
+ * cy = oy + H/2 = 15 + 546/2 = 288
+ *
+ * ****** Relation between the shading table and the sensor data ******
+ *
+ * The origin of the sensor data should be on the shading table.
+ * 0 <= ox < TW, 0 <= oy < TH
+ *
+ * ****** How to center the shading table on the sensor data ******
+ *
+ * To center the shading table on the sensor data,
+ * CSS decides the shading table size so that a certain grid point is positioned
+ * on the center of the sensor data at the shading correction.
+ * CSS expects the shading center is set on this grid point
+ * when the shading table data is calculated in AIC.
+ *
+ * W, H [bqs]: width and height of sensor data at shading correction
+ * W = sensor_data_res_bqs.width
+ * H = sensor_data_res_bqs.height
+ *
+ * (cx, cy) [bqs]: center of sensor data positioned on shading table at shading correction
+ * cx = sensor_data_origin_bqs_on_sctbl.x + W/2
+ * cy = sensor_data_origin_bqs_on_sctbl.y + H/2
+ *
+ * CSS decides the shading table size and the sensor data position
+ * so that the (cx, cy) satisfies this condition.
+ * mod(cx, bqs_per_grid_cell) = 0
+ * mod(cy, bqs_per_grid_cell) = 0
+ *
+ * ****** How to change the sensor data size by processes in the driver and ISP ******
+ *
+ * 1. sensor data size: Physical sensor size
+ * (The struct ia_css_shading_info does not have this information.)
+ * 2. process: Driver applies the sensor cropping/binning/scaling to physical sensor size.
+ * 3. sensor data size: ISP input size (== shading_info.isp_input_sensor_data_res_bqs)
+ * (ISP assumes the ISP input sensor data is centered on the physical sensor.)
+ * 4. process: ISP applies the bayer scaling by the factor of shading_info.bayer_scale_*.
+ * 5. sensor data size: Scaling factor * ISP input size (== shading_info.sensor_data_res_bqs)
+ * 6. process: ISP applies the shading correction.
+ *
+ * ISP block: SC1
+ * ISP1: SC1 is used.
+ * ISP2: SC1 is used.
+ */
+ struct {
+ /* ISP2400 */
+ u32 enable; /** Shading correction enabled.
+ 0:disabled, 1:enabled */
+
+ /* ISP2401 */
+ u32 num_hor_grids; /** Number of data points per line per color on shading table. */
+ u32 num_ver_grids; /** Number of lines of data points per color on shading table. */
+ u32 bqs_per_grid_cell; /** Grid cell size in BQ unit.
+ NOTE: bqs = size in BQ(Bayer Quad) unit.
+ 1BQ means {Gr,R,B,Gb} (2x2 pixels).
+ Horizontal 1 bqs corresponds to horizontal 2 pixels.
+ Vertical 1 bqs corresponds to vertical 2 pixels. */
+ u32 bayer_scale_hor_ratio_in;
+ u32 bayer_scale_hor_ratio_out;
+
+ /** Horizontal ratio of bayer scaling between input width and output width,
+ for the scaling which should be done before shading correction.
+ output_width = input_width * bayer_scale_hor_ratio_out
+ / bayer_scale_hor_ratio_in + 0.5 */
+ u32 bayer_scale_ver_ratio_in;
+ u32 bayer_scale_ver_ratio_out;
+
+ /** Vertical ratio of bayer scaling
+ between input height and output height, for the scaling
+ which should be done before shading correction.
+ output_height = input_height * bayer_scale_ver_ratio_out
+ / bayer_scale_ver_ratio_in */
+ /* ISP2400 */
+ u32 sc_bayer_origin_x_bqs_on_shading_table;
+ /** X coordinate (in bqs) of bayer origin on shading table.
+ This indicates the left-most pixel of bayer
+ (not include margin) inputted to the shading correction.
+ This corresponds to the left-most pixel of bayer
+ inputted to isp from sensor. */
+ /* ISP2400 */
+ u32 sc_bayer_origin_y_bqs_on_shading_table;
+ /** Y coordinate (in bqs) of bayer origin on shading table.
+ This indicates the top pixel of bayer
+ (not include margin) inputted to the shading correction.
+ This corresponds to the top pixel of bayer
+ inputted to isp from sensor. */
+
+ /** Vertical ratio of bayer scaling between input height and output height,
+ for the scaling which should be done before shading correction.
+ output_height = input_height * bayer_scale_ver_ratio_out
+ / bayer_scale_ver_ratio_in + 0.5 */
+ /* ISP2401 */
+ struct ia_css_resolution isp_input_sensor_data_res_bqs;
+ /** Sensor data size (in bqs) inputted to ISP. This is the size BEFORE bayer scaling.
+ NOTE: This is NOT the size of the physical sensor size.
+ CSS requests the driver that ISP inputs sensor data
+ by the size of isp_input_sensor_data_res_bqs.
+ The driver sends the sensor data to ISP,
+ after the adequate cropping/binning/scaling
+ are applied to the physical sensor data area.
+ ISP assumes the area of isp_input_sensor_data_res_bqs
+ is centered on the physical sensor. */
+ /* ISP2401 */
+ struct ia_css_resolution sensor_data_res_bqs;
+ /** Sensor data size (in bqs) at shading correction.
+ This is the size AFTER bayer scaling. */
+ /* ISP2401 */
+ struct ia_css_coordinate sensor_data_origin_bqs_on_sctbl;
+ /** Origin of sensor data area positioned on shading table at shading correction.
+ The coordinate x,y should be positive values. */
+ } type_1;
+
+ /** More structures can be added here when more shading correction types will be added
+ in the future. */
+ } info;
+};
+
+/* Default Shading Correction information of Shading Correction Type 1. */
+#define DEFAULT_SHADING_INFO_TYPE_1 \
+(struct ia_css_shading_info) { \
+ .type = IA_CSS_SHADING_CORRECTION_TYPE_1, \
+ .info = { \
+ .type_1 = { \
+ .bayer_scale_hor_ratio_in = 1, \
+ .bayer_scale_hor_ratio_out = 1, \
+ .bayer_scale_ver_ratio_in = 1, \
+ .bayer_scale_ver_ratio_out = 1, \
+ } \
+ } \
+}
+
+/* Default Shading Correction information. */
+#define DEFAULT_SHADING_INFO DEFAULT_SHADING_INFO_TYPE_1
+
+/* structure that describes the 3A and DIS grids */
+struct ia_css_grid_info {
+ /* \name ISP input size
+ * that is visible for user
+ * @{
+ */
+ u32 isp_in_width;
+ u32 isp_in_height;
+ /* @}*/
+
+ struct ia_css_3a_grid_info s3a_grid; /** 3A grid info */
+ union ia_css_dvs_grid_u dvs_grid;
+ /** All types of DVS statistics grid info union */
+
+ enum ia_css_vamem_type vamem_type;
+};
+
+/* defaults for ia_css_grid_info structs */
+#define DEFAULT_GRID_INFO { \
+ .dvs_grid = DEFAULT_DVS_GRID_INFO, \
+ .vamem_type = IA_CSS_VAMEM_TYPE_1 \
+}
+
+/* Morphing table, used for geometric distortion and chromatic abberration
+ * correction (GDCAC, also called GDC).
+ * This table describes the imperfections introduced by the lens, the
+ * advanced ISP can correct for these imperfections using this table.
+ */
+struct ia_css_morph_table {
+ u32 enable; /** To disable GDC, set this field to false. The
+ coordinates fields can be set to NULL in this case. */
+ u32 height; /** Table height */
+ u32 width; /** Table width */
+ u16 *coordinates_x[IA_CSS_MORPH_TABLE_NUM_PLANES];
+ /** X coordinates that describe the sensor imperfection */
+ u16 *coordinates_y[IA_CSS_MORPH_TABLE_NUM_PLANES];
+ /** Y coordinates that describe the sensor imperfection */
+};
+
+struct ia_css_dvs_6axis_config {
+ unsigned int exp_id;
+ /** Exposure ID, see ia_css_event_public.h for more detail */
+ u32 width_y;
+ u32 height_y;
+ u32 width_uv;
+ u32 height_uv;
+ u32 *xcoords_y;
+ u32 *ycoords_y;
+ u32 *xcoords_uv;
+ u32 *ycoords_uv;
+};
+
+/**
+ * This specifies the coordinates (x,y)
+ */
+struct ia_css_point {
+ s32 x; /** x coordinate */
+ s32 y; /** y coordinate */
+};
+
+/**
+ * This specifies the region
+ */
+struct ia_css_region {
+ struct ia_css_point origin; /** Starting point coordinates for the region */
+ struct ia_css_resolution resolution; /** Region resolution */
+};
+
+/**
+ * Digital zoom:
+ * This feature is currently available only for video, but will become
+ * available for preview and capture as well.
+ * Set the digital zoom factor, this is a logarithmic scale. The actual zoom
+ * factor will be 64/x.
+ * Setting dx or dy to 0 disables digital zoom for that direction.
+ * New API change for Digital zoom:(added struct ia_css_region zoom_region)
+ * zoom_region specifies the origin of the zoom region and width and
+ * height of that region.
+ * origin : This is the coordinate (x,y) within the effective input resolution
+ * of the stream. where, x >= 0 and y >= 0. (0,0) maps to the upper left of the
+ * effective input resolution.
+ * resolution : This is resolution of zoom region.
+ * where, x + width <= effective input width
+ * y + height <= effective input height
+ */
+struct ia_css_dz_config {
+ u32 dx; /** Horizontal zoom factor */
+ u32 dy; /** Vertical zoom factor */
+ struct ia_css_region zoom_region; /** region for zoom */
+};
+
+/* The still capture mode, this can be RAW (simply copy sensor input to DDR),
+ * Primary ISP, the Advanced ISP (GDC) or the low-light ISP (ANR).
+ */
+enum ia_css_capture_mode {
+ IA_CSS_CAPTURE_MODE_RAW, /** no processing, copy data only */
+ IA_CSS_CAPTURE_MODE_BAYER, /** bayer processing, up to demosaic */
+ IA_CSS_CAPTURE_MODE_PRIMARY, /** primary ISP */
+ IA_CSS_CAPTURE_MODE_ADVANCED, /** advanced ISP (GDC) */
+ IA_CSS_CAPTURE_MODE_LOW_LIGHT /** low light ISP (ANR) */
+};
+
+struct ia_css_capture_config {
+ enum ia_css_capture_mode mode; /** Still capture mode */
+ u32 enable_xnr; /** Enable/disable XNR */
+ u32 enable_raw_output;
+ bool enable_capture_pp_bli; /** Enable capture_pp_bli mode */
+};
+
+/* default settings for ia_css_capture_config structs */
+#define DEFAULT_CAPTURE_CONFIG { \
+ .mode = IA_CSS_CAPTURE_MODE_PRIMARY, \
+}
+
+/* ISP filter configuration. This is a collection of configurations
+ * for each of the ISP filters (modules).
+ *
+ * NOTE! The contents of all pointers is copied when get or set with the
+ * exception of the shading and morph tables. For these we only copy the
+ * pointer, so the caller must make sure the memory contents of these pointers
+ * remain valid as long as they are used by the CSS. This will be fixed in the
+ * future by copying the contents instead of just the pointer.
+ *
+ * Comment:
+ * ["ISP block", 1&2] : ISP block is used both for ISP1 and ISP2.
+ * ["ISP block", 1only] : ISP block is used only for ISP1.
+ * ["ISP block", 2only] : ISP block is used only for ISP2.
+ */
+struct ia_css_isp_config {
+ struct ia_css_wb_config *wb_config; /** White Balance
+ [WB1, 1&2] */
+ struct ia_css_cc_config *cc_config; /** Color Correction
+ [CSC1, 1only] */
+ struct ia_css_tnr_config *tnr_config; /** Temporal Noise Reduction
+ [TNR1, 1&2] */
+ struct ia_css_ecd_config *ecd_config; /** Eigen Color Demosaicing
+ [DE2, 2only] */
+ struct ia_css_ynr_config *ynr_config; /** Y(Luma) Noise Reduction
+ [YNR2&YEE2, 2only] */
+ struct ia_css_fc_config *fc_config; /** Fringe Control
+ [FC2, 2only] */
+ struct ia_css_formats_config
+ *formats_config; /** Formats Control for main output
+ [FORMATS, 1&2] */
+ struct ia_css_cnr_config *cnr_config; /** Chroma Noise Reduction
+ [CNR2, 2only] */
+ struct ia_css_macc_config *macc_config; /** MACC
+ [MACC2, 2only] */
+ struct ia_css_ctc_config *ctc_config; /** Chroma Tone Control
+ [CTC2, 2only] */
+ struct ia_css_aa_config *aa_config; /** YUV Anti-Aliasing
+ [AA2, 2only]
+ (not used currently) */
+ struct ia_css_aa_config *baa_config; /** Bayer Anti-Aliasing
+ [BAA2, 1&2] */
+ struct ia_css_ce_config *ce_config; /** Chroma Enhancement
+ [CE1, 1only] */
+ struct ia_css_dvs_6axis_config *dvs_6axis_config;
+ struct ia_css_ob_config *ob_config; /** Objective Black
+ [OB1, 1&2] */
+ struct ia_css_dp_config *dp_config; /** Defect Pixel Correction
+ [DPC1/DPC2, 1&2] */
+ struct ia_css_nr_config *nr_config; /** Noise Reduction
+ [BNR1&YNR1&CNR1, 1&2]*/
+ struct ia_css_ee_config *ee_config; /** Edge Enhancement
+ [YEE1, 1&2] */
+ struct ia_css_de_config *de_config; /** Demosaic
+ [DE1, 1only] */
+ struct ia_css_gc_config *gc_config; /** Gamma Correction (for YUV)
+ [GC1, 1only] */
+ struct ia_css_anr_config *anr_config; /** Advanced Noise Reduction */
+ struct ia_css_3a_config *s3a_config; /** 3A Statistics config */
+ struct ia_css_xnr_config *xnr_config; /** eXtra Noise Reduction */
+ struct ia_css_dz_config *dz_config; /** Digital Zoom */
+ struct ia_css_cc_config *yuv2rgb_cc_config; /** Color Correction
+ [CCM2, 2only] */
+ struct ia_css_cc_config *rgb2yuv_cc_config; /** Color Correction
+ [CSC2, 2only] */
+ struct ia_css_macc_table *macc_table; /** MACC
+ [MACC1/MACC2, 1&2]*/
+ struct ia_css_gamma_table *gamma_table; /** Gamma Correction (for YUV)
+ [GC1, 1only] */
+ struct ia_css_ctc_table *ctc_table; /** Chroma Tone Control
+ [CTC1, 1only] */
+
+ /* \deprecated */
+ struct ia_css_xnr_table *xnr_table; /** eXtra Noise Reduction
+ [XNR1, 1&2] */
+ struct ia_css_rgb_gamma_table *r_gamma_table;/** sRGB Gamma Correction
+ [GC2, 2only] */
+ struct ia_css_rgb_gamma_table *g_gamma_table;/** sRGB Gamma Correction
+ [GC2, 2only] */
+ struct ia_css_rgb_gamma_table *b_gamma_table;/** sRGB Gamma Correction
+ [GC2, 2only] */
+ struct ia_css_vector *motion_vector; /** For 2-axis DVS */
+ struct ia_css_shading_table *shading_table;
+ struct ia_css_morph_table *morph_table;
+ struct ia_css_dvs_coefficients *dvs_coefs; /** DVS 1.0 coefficients */
+ struct ia_css_dvs2_coefficients *dvs2_coefs; /** DVS 2.0 coefficients */
+ struct ia_css_capture_config *capture_config;
+ struct ia_css_anr_thres *anr_thres;
+ /* @deprecated{Old shading settings, see bugzilla bz675 for details} */
+ struct ia_css_shading_settings *shading_settings;
+ struct ia_css_xnr3_config *xnr3_config; /** eXtreme Noise Reduction v3 */
+ /* comment from Lasse: Be aware how this feature will affect coordinate
+ * normalization in different parts of the system. (e.g. face detection,
+ * touch focus, 3A statistics and windows of interest, shading correction,
+ * DVS, GDC) from IQ tool level and application level down-to ISP FW level.
+ * the risk for regression is not in the individual blocks, but how they
+ * integrate together. */
+ struct ia_css_output_config
+ *output_config; /** Main Output Mirroring, flipping */
+
+ struct ia_css_scaler_config
+ *scaler_config; /** Skylake: scaler config (optional) */
+ struct ia_css_formats_config
+ *formats_config_display;/** Formats control for viewfinder/display output (optional)
+ [OSYS, n/a] */
+ struct ia_css_output_config
+ *output_config_display; /** Viewfinder/display output mirroring, flipping (optional) */
+
+ struct ia_css_frame
+ *output_frame; /** Output frame the config is to be applied to (optional) */
+ u32 isp_config_id; /** Unique ID to track which config was actually applied to a particular frame */
+};
+
+#endif /* _IA_CSS_TYPES_H */
diff --git a/drivers/staging/media/atomisp/pci/ia_css_version.h b/drivers/staging/media/atomisp/pci/ia_css_version.h
new file mode 100644
index 000000000000..13b192dec8f6
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/ia_css_version.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __IA_CSS_VERSION_H
+#define __IA_CSS_VERSION_H
+
+/* @file
+ * This file contains functions to retrieve CSS-API version information
+ */
+
+#include <ia_css_err.h>
+
+/* a common size for the version arrays */
+#define MAX_VERSION_SIZE 500
+
+/* @brief Retrieves the current CSS version
+ * @param[out] version A pointer to a buffer where to put the generated
+ * version string. NULL is ignored.
+ * @param[in] max_size Size of the version buffer. If version string
+ * would be larger than max_size, an error is
+ * returned by this function.
+ *
+ * This function generates and returns the version string. If FW is loaded, it
+ * attaches the FW version.
+ */
+int
+ia_css_get_version(char *version, int max_size);
+
+#endif /* __IA_CSS_VERSION_H */
diff --git a/drivers/staging/media/atomisp/pci/ia_css_version_data.h b/drivers/staging/media/atomisp/pci/ia_css_version_data.h
new file mode 100644
index 000000000000..33fabac99bb7
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/ia_css_version_data.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+//
+// This file contains the version data for the CSS
+//
+// === Do not change - automatically generated ===
+//
+
+#ifndef __IA_CSS_VERSION_DATA_H
+#define __IA_CSS_VERSION_DATA_H
+
+#define ISP2400_CSS_VERSION_STRING "REL:20150521_21.4_0539; API:2.1.15.3; GIT:irci_candrpv_0415_20150504_35b345#35b345be52ac575f8934abb3a88fea26a94e7343; SDK:/nfs/iir/disks/iir_hivepackages_003/iir_hivepkgs_disk017/Css_Mizuchi/packages/Css_Mizuchi/int_css_mizuchi_20140829_1053; USER:viedifw; "
+#define ISP2401_CSS_VERSION_STRING "REL:20150911_37.5_1652; API:2.1.20.9; GIT:irci___#ebf437d53a8951bb7ff6d13fdb7270dab393a92a; SDK:; USER:viedifw; "
+
+#endif
diff --git a/drivers/staging/media/atomisp/pci/if_defs.h b/drivers/staging/media/atomisp/pci/if_defs.h
new file mode 100644
index 000000000000..7a6b23748550
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/if_defs.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef _IF_DEFS_H
+#define _IF_DEFS_H
+
+#define HIVE_IF_FRAME_REQUEST 0xA000
+#define HIVE_IF_LINES_REQUEST 0xB000
+#define HIVE_IF_VECTORS_REQUEST 0xC000
+
+#endif /* _IF_DEFS_H */
diff --git a/drivers/staging/media/atomisp/pci/input_formatter_subsystem_defs.h b/drivers/staging/media/atomisp/pci/input_formatter_subsystem_defs.h
new file mode 100644
index 000000000000..f29a3cab76ee
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/input_formatter_subsystem_defs.h
@@ -0,0 +1,45 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef _if_subsystem_defs_h__
+#define _if_subsystem_defs_h__
+
+#define HIVE_IFMT_GP_REGS_INPUT_SWITCH_LUT_REG_0 0
+#define HIVE_IFMT_GP_REGS_INPUT_SWITCH_LUT_REG_1 1
+#define HIVE_IFMT_GP_REGS_INPUT_SWITCH_LUT_REG_2 2
+#define HIVE_IFMT_GP_REGS_INPUT_SWITCH_LUT_REG_3 3
+#define HIVE_IFMT_GP_REGS_INPUT_SWITCH_LUT_REG_4 4
+#define HIVE_IFMT_GP_REGS_INPUT_SWITCH_LUT_REG_5 5
+#define HIVE_IFMT_GP_REGS_INPUT_SWITCH_LUT_REG_6 6
+#define HIVE_IFMT_GP_REGS_INPUT_SWITCH_LUT_REG_7 7
+#define HIVE_IFMT_GP_REGS_INPUT_SWITCH_FSYNC_LUT_REG 8
+#define HIVE_IFMT_GP_REGS_SRST_IDX 9
+#define HIVE_IFMT_GP_REGS_SLV_REG_SRST_IDX 10
+
+#define HIVE_IFMT_GP_REGS_CH_ID_FMT_TYPE_IDX 11
+
+#define HIVE_IFMT_GP_REGS_INPUT_SWITCH_LUT_REG_BASE HIVE_IFMT_GP_REGS_INPUT_SWITCH_LUT_REG_0
+
+/* order of the input bits for the ifmt irq controller */
+#define HIVE_IFMT_IRQ_IFT_PRIM_BIT_ID 0
+#define HIVE_IFMT_IRQ_IFT_PRIM_B_BIT_ID 1
+#define HIVE_IFMT_IRQ_IFT_SEC_BIT_ID 2
+#define HIVE_IFMT_IRQ_MEM_CPY_BIT_ID 3
+#define HIVE_IFMT_IRQ_SIDEBAND_CHANGED_BIT_ID 4
+
+/* order of the input bits for the ifmt Soft reset register */
+#define HIVE_IFMT_GP_REGS_SRST_IFT_PRIM_BIT_IDX 0
+#define HIVE_IFMT_GP_REGS_SRST_IFT_PRIM_B_BIT_IDX 1
+#define HIVE_IFMT_GP_REGS_SRST_IFT_SEC_BIT_IDX 2
+#define HIVE_IFMT_GP_REGS_SRST_MEM_CPY_BIT_IDX 3
+
+/* order of the input bits for the ifmt Soft reset register */
+#define HIVE_IFMT_GP_REGS_SLV_REG_SRST_IFT_PRIM_BIT_IDX 0
+#define HIVE_IFMT_GP_REGS_SLV_REG_SRST_IFT_PRIM_B_BIT_IDX 1
+#define HIVE_IFMT_GP_REGS_SLV_REG_SRST_IFT_SEC_BIT_IDX 2
+#define HIVE_IFMT_GP_REGS_SLV_REG_SRST_MEM_CPY_BIT_IDX 3
+
+#endif /* _if_subsystem_defs_h__ */
diff --git a/drivers/staging/media/atomisp/pci/input_selector_defs.h b/drivers/staging/media/atomisp/pci/input_selector_defs.h
new file mode 100644
index 000000000000..8d67aabb898e
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/input_selector_defs.h
@@ -0,0 +1,80 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef _input_selector_defs_h
+#define _input_selector_defs_h
+
+#ifndef HIVE_ISP_ISEL_SEL_BITS
+#define HIVE_ISP_ISEL_SEL_BITS 2
+#endif
+
+#ifndef HIVE_ISP_CH_ID_BITS
+#define HIVE_ISP_CH_ID_BITS 2
+#endif
+
+#ifndef HIVE_ISP_FMT_TYPE_BITS
+#define HIVE_ISP_FMT_TYPE_BITS 5
+#endif
+
+/* gp_register register id's -- Outputs */
+#define HIVE_ISEL_GP_REGS_SYNCGEN_ENABLE_IDX 0
+#define HIVE_ISEL_GP_REGS_SYNCGEN_FREE_RUNNING_IDX 1
+#define HIVE_ISEL_GP_REGS_SYNCGEN_PAUSE_IDX 2
+#define HIVE_ISEL_GP_REGS_SYNCGEN_NR_FRAMES_IDX 3
+#define HIVE_ISEL_GP_REGS_SYNCGEN_NR_PIX_IDX 4
+#define HIVE_ISEL_GP_REGS_SYNCGEN_NR_LINES_IDX 5
+#define HIVE_ISEL_GP_REGS_SYNCGEN_HBLANK_CYCLES_IDX 6
+#define HIVE_ISEL_GP_REGS_SYNCGEN_VBLANK_CYCLES_IDX 7
+
+#define HIVE_ISEL_GP_REGS_SOF_IDX 8
+#define HIVE_ISEL_GP_REGS_EOF_IDX 9
+#define HIVE_ISEL_GP_REGS_SOL_IDX 10
+#define HIVE_ISEL_GP_REGS_EOL_IDX 11
+
+#define HIVE_ISEL_GP_REGS_PRBS_ENABLE 12
+#define HIVE_ISEL_GP_REGS_PRBS_ENABLE_PORT_B 13
+#define HIVE_ISEL_GP_REGS_PRBS_LFSR_RESET_VALUE 14
+
+#define HIVE_ISEL_GP_REGS_TPG_ENABLE 15
+#define HIVE_ISEL_GP_REGS_TPG_ENABLE_PORT_B 16
+#define HIVE_ISEL_GP_REGS_TPG_HOR_CNT_MASK_IDX 17
+#define HIVE_ISEL_GP_REGS_TPG_VER_CNT_MASK_IDX 18
+#define HIVE_ISEL_GP_REGS_TPG_XY_CNT_MASK_IDX 19
+#define HIVE_ISEL_GP_REGS_TPG_HOR_CNT_DELTA_IDX 20
+#define HIVE_ISEL_GP_REGS_TPG_VER_CNT_DELTA_IDX 21
+#define HIVE_ISEL_GP_REGS_TPG_MODE_IDX 22
+#define HIVE_ISEL_GP_REGS_TPG_R1_IDX 23
+#define HIVE_ISEL_GP_REGS_TPG_G1_IDX 24
+#define HIVE_ISEL_GP_REGS_TPG_B1_IDX 25
+#define HIVE_ISEL_GP_REGS_TPG_R2_IDX 26
+#define HIVE_ISEL_GP_REGS_TPG_G2_IDX 27
+#define HIVE_ISEL_GP_REGS_TPG_B2_IDX 28
+
+#define HIVE_ISEL_GP_REGS_CH_ID_IDX 29
+#define HIVE_ISEL_GP_REGS_FMT_TYPE_IDX 30
+#define HIVE_ISEL_GP_REGS_DATA_SEL_IDX 31
+#define HIVE_ISEL_GP_REGS_SBAND_SEL_IDX 32
+#define HIVE_ISEL_GP_REGS_SYNC_SEL_IDX 33
+#define HIVE_ISEL_GP_REGS_SRST_IDX 37
+
+#define HIVE_ISEL_GP_REGS_SRST_SYNCGEN_BIT 0
+#define HIVE_ISEL_GP_REGS_SRST_PRBS_BIT 1
+#define HIVE_ISEL_GP_REGS_SRST_TPG_BIT 2
+#define HIVE_ISEL_GP_REGS_SRST_FIFO_BIT 3
+
+/* gp_register register id's -- Inputs */
+#define HIVE_ISEL_GP_REGS_SYNCGEN_HOR_CNT_IDX 34
+#define HIVE_ISEL_GP_REGS_SYNCGEN_VER_CNT_IDX 35
+#define HIVE_ISEL_GP_REGS_SYNCGEN_FRAMES_CNT_IDX 36
+
+/* irq sources isel irq controller */
+#define HIVE_ISEL_IRQ_SYNC_GEN_SOF_BIT_ID 0
+#define HIVE_ISEL_IRQ_SYNC_GEN_EOF_BIT_ID 1
+#define HIVE_ISEL_IRQ_SYNC_GEN_SOL_BIT_ID 2
+#define HIVE_ISEL_IRQ_SYNC_GEN_EOL_BIT_ID 3
+#define HIVE_ISEL_IRQ_NUM_IRQS 4
+
+#endif /* _input_selector_defs_h */
diff --git a/drivers/staging/media/atomisp/pci/input_switch_2400_defs.h b/drivers/staging/media/atomisp/pci/input_switch_2400_defs.h
new file mode 100644
index 000000000000..c56e00913204
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/input_switch_2400_defs.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef _input_switch_2400_defs_h
+#define _input_switch_2400_defs_h
+
+#define _HIVE_INPUT_SWITCH_GET_LUT_REG_ID(ch_id, fmt_type) (((ch_id) * 2) + ((fmt_type) >= 16))
+#define _HIVE_INPUT_SWITCH_GET_LUT_REG_LSB(fmt_type) (((fmt_type) % 16) * 2)
+
+#define HIVE_INPUT_SWITCH_SELECT_NO_OUTPUT 0
+#define HIVE_INPUT_SWITCH_SELECT_IF_PRIM 1
+#define HIVE_INPUT_SWITCH_SELECT_IF_SEC 2
+#define HIVE_INPUT_SWITCH_SELECT_STR_TO_MEM 3
+#define HIVE_INPUT_SWITCH_VSELECT_NO_OUTPUT 0
+#define HIVE_INPUT_SWITCH_VSELECT_IF_PRIM 1
+#define HIVE_INPUT_SWITCH_VSELECT_IF_SEC 2
+#define HIVE_INPUT_SWITCH_VSELECT_STR_TO_MEM 4
+
+#endif /* _input_switch_2400_defs_h */
diff --git a/drivers/staging/media/atomisp/pci/input_system_ctrl_defs.h b/drivers/staging/media/atomisp/pci/input_system_ctrl_defs.h
new file mode 100644
index 000000000000..e26b9ba3356a
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/input_system_ctrl_defs.h
@@ -0,0 +1,235 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef _input_system_ctrl_defs_h
+#define _input_system_ctrl_defs_h
+
+#define _INPUT_SYSTEM_CTRL_REG_ALIGN 4 /* assuming 32 bit control bus width */
+
+/* --------------------------------------------------*/
+
+/* --------------------------------------------------*/
+/* REGISTER INFO */
+/* --------------------------------------------------*/
+
+// Number of registers
+#define ISYS_CTRL_NOF_REGS 23
+
+// Register id's of MMIO slave accessible registers
+#define ISYS_CTRL_CAPT_START_ADDR_A_REG_ID 0
+#define ISYS_CTRL_CAPT_START_ADDR_B_REG_ID 1
+#define ISYS_CTRL_CAPT_START_ADDR_C_REG_ID 2
+#define ISYS_CTRL_CAPT_MEM_REGION_SIZE_A_REG_ID 3
+#define ISYS_CTRL_CAPT_MEM_REGION_SIZE_B_REG_ID 4
+#define ISYS_CTRL_CAPT_MEM_REGION_SIZE_C_REG_ID 5
+#define ISYS_CTRL_CAPT_NUM_MEM_REGIONS_A_REG_ID 6
+#define ISYS_CTRL_CAPT_NUM_MEM_REGIONS_B_REG_ID 7
+#define ISYS_CTRL_CAPT_NUM_MEM_REGIONS_C_REG_ID 8
+#define ISYS_CTRL_ACQ_START_ADDR_REG_ID 9
+#define ISYS_CTRL_ACQ_MEM_REGION_SIZE_REG_ID 10
+#define ISYS_CTRL_ACQ_NUM_MEM_REGIONS_REG_ID 11
+#define ISYS_CTRL_INIT_REG_ID 12
+#define ISYS_CTRL_LAST_COMMAND_REG_ID 13
+#define ISYS_CTRL_NEXT_COMMAND_REG_ID 14
+#define ISYS_CTRL_LAST_ACKNOWLEDGE_REG_ID 15
+#define ISYS_CTRL_NEXT_ACKNOWLEDGE_REG_ID 16
+#define ISYS_CTRL_FSM_STATE_INFO_REG_ID 17
+#define ISYS_CTRL_CAPT_A_FSM_STATE_INFO_REG_ID 18
+#define ISYS_CTRL_CAPT_B_FSM_STATE_INFO_REG_ID 19
+#define ISYS_CTRL_CAPT_C_FSM_STATE_INFO_REG_ID 20
+#define ISYS_CTRL_ACQ_FSM_STATE_INFO_REG_ID 21
+#define ISYS_CTRL_CAPT_RESERVE_ONE_MEM_REGION_REG_ID 22
+
+/* register reset value */
+#define ISYS_CTRL_CAPT_START_ADDR_A_REG_RSTVAL 0
+#define ISYS_CTRL_CAPT_START_ADDR_B_REG_RSTVAL 0
+#define ISYS_CTRL_CAPT_START_ADDR_C_REG_RSTVAL 0
+#define ISYS_CTRL_CAPT_MEM_REGION_SIZE_A_REG_RSTVAL 128
+#define ISYS_CTRL_CAPT_MEM_REGION_SIZE_B_REG_RSTVAL 128
+#define ISYS_CTRL_CAPT_MEM_REGION_SIZE_C_REG_RSTVAL 128
+#define ISYS_CTRL_CAPT_NUM_MEM_REGIONS_A_REG_RSTVAL 3
+#define ISYS_CTRL_CAPT_NUM_MEM_REGIONS_B_REG_RSTVAL 3
+#define ISYS_CTRL_CAPT_NUM_MEM_REGIONS_C_REG_RSTVAL 3
+#define ISYS_CTRL_ACQ_START_ADDR_REG_RSTVAL 0
+#define ISYS_CTRL_ACQ_MEM_REGION_SIZE_REG_RSTVAL 128
+#define ISYS_CTRL_ACQ_NUM_MEM_REGIONS_REG_RSTVAL 3
+#define ISYS_CTRL_INIT_REG_RSTVAL 0
+#define ISYS_CTRL_LAST_COMMAND_REG_RSTVAL 15 //0x0000_000F (to signal non-valid cmd/ack after reset/soft-reset)
+#define ISYS_CTRL_NEXT_COMMAND_REG_RSTVAL 15 //0x0000_000F (to signal non-valid cmd/ack after reset/soft-reset)
+#define ISYS_CTRL_LAST_ACKNOWLEDGE_REG_RSTVAL 15 //0x0000_000F (to signal non-valid cmd/ack after reset/soft-reset)
+#define ISYS_CTRL_NEXT_ACKNOWLEDGE_REG_RSTVAL 15 //0x0000_000F (to signal non-valid cmd/ack after reset/soft-reset)
+#define ISYS_CTRL_FSM_STATE_INFO_REG_RSTVAL 0
+#define ISYS_CTRL_CAPT_A_FSM_STATE_INFO_REG_RSTVAL 0
+#define ISYS_CTRL_CAPT_B_FSM_STATE_INFO_REG_RSTVAL 0
+#define ISYS_CTRL_CAPT_C_FSM_STATE_INFO_REG_RSTVAL 0
+#define ISYS_CTRL_ACQ_FSM_STATE_INFO_REG_RSTVAL 0
+#define ISYS_CTRL_CAPT_RESERVE_ONE_MEM_REGION_REG_RSTVAL 0
+
+/* register width value */
+#define ISYS_CTRL_CAPT_START_ADDR_A_REG_WIDTH 9
+#define ISYS_CTRL_CAPT_START_ADDR_B_REG_WIDTH 9
+#define ISYS_CTRL_CAPT_START_ADDR_C_REG_WIDTH 9
+#define ISYS_CTRL_CAPT_MEM_REGION_SIZE_A_REG_WIDTH 9
+#define ISYS_CTRL_CAPT_MEM_REGION_SIZE_B_REG_WIDTH 9
+#define ISYS_CTRL_CAPT_MEM_REGION_SIZE_C_REG_WIDTH 9
+#define ISYS_CTRL_CAPT_NUM_MEM_REGIONS_A_REG_WIDTH 9
+#define ISYS_CTRL_CAPT_NUM_MEM_REGIONS_B_REG_WIDTH 9
+#define ISYS_CTRL_CAPT_NUM_MEM_REGIONS_C_REG_WIDTH 9
+#define ISYS_CTRL_ACQ_START_ADDR_REG_WIDTH 9
+#define ISYS_CTRL_ACQ_MEM_REGION_SIZE_REG_WIDTH 9
+#define ISYS_CTRL_ACQ_NUM_MEM_REGIONS_REG_WIDTH 9
+#define ISYS_CTRL_INIT_REG_WIDTH 3
+#define ISYS_CTRL_LAST_COMMAND_REG_WIDTH 32 /* slave data width */
+#define ISYS_CTRL_NEXT_COMMAND_REG_WIDTH 32
+#define ISYS_CTRL_LAST_ACKNOWLEDGE_REG_WIDTH 32
+#define ISYS_CTRL_NEXT_ACKNOWLEDGE_REG_WIDTH 32
+#define ISYS_CTRL_FSM_STATE_INFO_REG_WIDTH 32
+#define ISYS_CTRL_CAPT_A_FSM_STATE_INFO_REG_WIDTH 32
+#define ISYS_CTRL_CAPT_B_FSM_STATE_INFO_REG_WIDTH 32
+#define ISYS_CTRL_CAPT_C_FSM_STATE_INFO_REG_WIDTH 32
+#define ISYS_CTRL_ACQ_FSM_STATE_INFO_REG_WIDTH 32
+#define ISYS_CTRL_CAPT_RESERVE_ONE_MEM_REGION_REG_WIDTH 1
+
+/* bit definitions */
+
+/* --------------------------------------------------*/
+/* TOKEN INFO */
+/* --------------------------------------------------*/
+
+/*
+InpSysCaptFramesAcq 1/0 [3:0] - 'b0000
+[7:4] - CaptPortId,
+ CaptA-'b0000
+ CaptB-'b0001
+ CaptC-'b0010
+[31:16] - NOF_frames
+InpSysCaptFrameExt 2/0 [3:0] - 'b0001'
+[7:4] - CaptPortId,
+ 'b0000 - CaptA
+ 'b0001 - CaptB
+ 'b0010 - CaptC
+
+ 2/1 [31:0] - external capture address
+InpSysAcqFrame 2/0 [3:0] - 'b0010,
+[31:4] - NOF_ext_mem_words
+ 2/1 [31:0] - external memory read start address
+InpSysOverruleON 1/0 [3:0] - 'b0011,
+[7:4] - overrule port id (opid)
+ 'b0000 - CaptA
+ 'b0001 - CaptB
+ 'b0010 - CaptC
+ 'b0011 - Acq
+ 'b0100 - DMA
+
+InpSysOverruleOFF 1/0 [3:0] - 'b0100,
+[7:4] - overrule port id (opid)
+ 'b0000 - CaptA
+ 'b0001 - CaptB
+ 'b0010 - CaptC
+ 'b0011 - Acq
+ 'b0100 - DMA
+
+InpSysOverruleCmd 2/0 [3:0] - 'b0101,
+[7:4] - overrule port id (opid)
+ 'b0000 - CaptA
+ 'b0001 - CaptB
+ 'b0010 - CaptC
+ 'b0011 - Acq
+ 'b0100 - DMA
+
+ 2/1 [31:0] - command token value for port opid
+
+acknowledge tokens:
+
+InpSysAckCFA 1/0 [3:0] - 'b0000
+ [7:4] - CaptPortId,
+ CaptA-'b0000
+ CaptB- 'b0001
+ CaptC-'b0010
+ [31:16] - NOF_frames
+InpSysAckCFE 1/0 [3:0] - 'b0001'
+[7:4] - CaptPortId,
+ 'b0000 - CaptA
+ 'b0001 - CaptB
+ 'b0010 - CaptC
+
+InpSysAckAF 1/0 [3:0] - 'b0010
+InpSysAckOverruleON 1/0 [3:0] - 'b0011,
+[7:4] - overrule port id (opid)
+ 'b0000 - CaptA
+ 'b0001 - CaptB
+ 'b0010 - CaptC
+ 'b0011 - Acq
+ 'b0100 - DMA
+
+InpSysAckOverruleOFF 1/0 [3:0] - 'b0100,
+[7:4] - overrule port id (opid)
+ 'b0000 - CaptA
+ 'b0001 - CaptB
+ 'b0010 - CaptC
+ 'b0011 - Acq
+ 'b0100 - DMA
+
+InpSysAckOverrule 2/0 [3:0] - 'b0101,
+[7:4] - overrule port id (opid)
+ 'b0000 - CaptA
+ 'b0001 - CaptB
+ 'b0010 - CaptC
+ 'b0011 - Acq
+ 'b0100 - DMA
+
+ 2/1 [31:0] - acknowledge token value from port opid
+
+*/
+
+/* Command and acknowledge tokens IDs */
+#define ISYS_CTRL_CAPT_FRAMES_ACQ_TOKEN_ID 0 /* 0000b */
+#define ISYS_CTRL_CAPT_FRAME_EXT_TOKEN_ID 1 /* 0001b */
+#define ISYS_CTRL_ACQ_FRAME_TOKEN_ID 2 /* 0010b */
+#define ISYS_CTRL_OVERRULE_ON_TOKEN_ID 3 /* 0011b */
+#define ISYS_CTRL_OVERRULE_OFF_TOKEN_ID 4 /* 0100b */
+#define ISYS_CTRL_OVERRULE_TOKEN_ID 5 /* 0101b */
+
+#define ISYS_CTRL_ACK_CFA_TOKEN_ID 0
+#define ISYS_CTRL_ACK_CFE_TOKEN_ID 1
+#define ISYS_CTRL_ACK_AF_TOKEN_ID 2
+#define ISYS_CTRL_ACK_OVERRULE_ON_TOKEN_ID 3
+#define ISYS_CTRL_ACK_OVERRULE_OFF_TOKEN_ID 4
+#define ISYS_CTRL_ACK_OVERRULE_TOKEN_ID 5
+#define ISYS_CTRL_ACK_DEVICE_ERROR_TOKEN_ID 6
+
+#define ISYS_CTRL_TOKEN_ID_MSB 3
+#define ISYS_CTRL_TOKEN_ID_LSB 0
+#define ISYS_CTRL_PORT_ID_TOKEN_MSB 7
+#define ISYS_CTRL_PORT_ID_TOKEN_LSB 4
+#define ISYS_CTRL_NOF_CAPT_TOKEN_MSB 31
+#define ISYS_CTRL_NOF_CAPT_TOKEN_LSB 16
+#define ISYS_CTRL_NOF_EXT_TOKEN_MSB 31
+#define ISYS_CTRL_NOF_EXT_TOKEN_LSB 8
+
+#define ISYS_CTRL_TOKEN_ID_IDX 0
+#define ISYS_CTRL_TOKEN_ID_BITS (ISYS_CTRL_TOKEN_ID_MSB - ISYS_CTRL_TOKEN_ID_LSB + 1)
+#define ISYS_CTRL_PORT_ID_IDX (ISYS_CTRL_TOKEN_ID_IDX + ISYS_CTRL_TOKEN_ID_BITS)
+#define ISYS_CTRL_PORT_ID_BITS (ISYS_CTRL_PORT_ID_TOKEN_MSB - ISYS_CTRL_PORT_ID_TOKEN_LSB + 1)
+#define ISYS_CTRL_NOF_CAPT_IDX ISYS_CTRL_NOF_CAPT_TOKEN_LSB
+#define ISYS_CTRL_NOF_CAPT_BITS (ISYS_CTRL_NOF_CAPT_TOKEN_MSB - ISYS_CTRL_NOF_CAPT_TOKEN_LSB + 1)
+#define ISYS_CTRL_NOF_EXT_IDX ISYS_CTRL_NOF_EXT_TOKEN_LSB
+#define ISYS_CTRL_NOF_EXT_BITS (ISYS_CTRL_NOF_EXT_TOKEN_MSB - ISYS_CTRL_NOF_EXT_TOKEN_LSB + 1)
+
+#define ISYS_CTRL_PORT_ID_CAPT_A 0 /* device ID for capture unit A */
+#define ISYS_CTRL_PORT_ID_CAPT_B 1 /* device ID for capture unit B */
+#define ISYS_CTRL_PORT_ID_CAPT_C 2 /* device ID for capture unit C */
+#define ISYS_CTRL_PORT_ID_ACQUISITION 3 /* device ID for acquistion unit */
+#define ISYS_CTRL_PORT_ID_DMA_CAPT_A 4 /* device ID for dma unit */
+#define ISYS_CTRL_PORT_ID_DMA_CAPT_B 5 /* device ID for dma unit */
+#define ISYS_CTRL_PORT_ID_DMA_CAPT_C 6 /* device ID for dma unit */
+#define ISYS_CTRL_PORT_ID_DMA_ACQ 7 /* device ID for dma unit */
+
+#define ISYS_CTRL_NO_ACQ_ACK 16 /* no ack from acquisition unit */
+#define ISYS_CTRL_NO_DMA_ACK 0
+#define ISYS_CTRL_NO_CAPT_ACK 16
+
+#endif /* _input_system_ctrl_defs_h */
diff --git a/drivers/staging/media/atomisp/pci/input_system_defs.h b/drivers/staging/media/atomisp/pci/input_system_defs.h
new file mode 100644
index 000000000000..ae07d1952146
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/input_system_defs.h
@@ -0,0 +1,118 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef _input_system_defs_h
+#define _input_system_defs_h
+
+/* csi controller modes */
+#define HIVE_CSI_CONFIG_MAIN 0
+#define HIVE_CSI_CONFIG_STEREO1 4
+#define HIVE_CSI_CONFIG_STEREO2 8
+
+/* general purpose register IDs */
+
+/* Stream Multicast select modes */
+#define HIVE_ISYS_GPREG_MULTICAST_A_IDX 0
+#define HIVE_ISYS_GPREG_MULTICAST_B_IDX 1
+#define HIVE_ISYS_GPREG_MULTICAST_C_IDX 2
+
+/* Stream Mux select modes */
+#define HIVE_ISYS_GPREG_MUX_IDX 3
+
+/* streaming monitor status and control */
+#define HIVE_ISYS_GPREG_STRMON_STAT_IDX 4
+#define HIVE_ISYS_GPREG_STRMON_COND_IDX 5
+#define HIVE_ISYS_GPREG_STRMON_IRQ_EN_IDX 6
+#define HIVE_ISYS_GPREG_SRST_IDX 7
+#define HIVE_ISYS_GPREG_SLV_REG_SRST_IDX 8
+#define HIVE_ISYS_GPREG_REG_PORT_A_IDX 9
+#define HIVE_ISYS_GPREG_REG_PORT_B_IDX 10
+
+/* Bit numbers of the soft reset register */
+#define HIVE_ISYS_GPREG_SRST_CAPT_FIFO_A_BIT 0
+#define HIVE_ISYS_GPREG_SRST_CAPT_FIFO_B_BIT 1
+#define HIVE_ISYS_GPREG_SRST_CAPT_FIFO_C_BIT 2
+#define HIVE_ISYS_GPREG_SRST_MULTICAST_A_BIT 3
+#define HIVE_ISYS_GPREG_SRST_MULTICAST_B_BIT 4
+#define HIVE_ISYS_GPREG_SRST_MULTICAST_C_BIT 5
+#define HIVE_ISYS_GPREG_SRST_CAPT_A_BIT 6
+#define HIVE_ISYS_GPREG_SRST_CAPT_B_BIT 7
+#define HIVE_ISYS_GPREG_SRST_CAPT_C_BIT 8
+#define HIVE_ISYS_GPREG_SRST_ACQ_BIT 9
+/* For ISYS_CTRL 5bits are defined to allow soft-reset per sub-controller and top-ctrl */
+#define HIVE_ISYS_GPREG_SRST_ISYS_CTRL_BIT 10 /*LSB for 5bit vector */
+#define HIVE_ISYS_GPREG_SRST_ISYS_CTRL_CAPT_A_BIT 10
+#define HIVE_ISYS_GPREG_SRST_ISYS_CTRL_CAPT_B_BIT 11
+#define HIVE_ISYS_GPREG_SRST_ISYS_CTRL_CAPT_C_BIT 12
+#define HIVE_ISYS_GPREG_SRST_ISYS_CTRL_ACQ_BIT 13
+#define HIVE_ISYS_GPREG_SRST_ISYS_CTRL_TOP_BIT 14
+/* -- */
+#define HIVE_ISYS_GPREG_SRST_STR_MUX_BIT 15
+#define HIVE_ISYS_GPREG_SRST_CIO2AHB_BIT 16
+#define HIVE_ISYS_GPREG_SRST_GEN_SHORT_FIFO_BIT 17
+#define HIVE_ISYS_GPREG_SRST_WIDE_BUS_BIT 18 // includes CIO conv
+#define HIVE_ISYS_GPREG_SRST_DMA_BIT 19
+#define HIVE_ISYS_GPREG_SRST_SF_CTRL_CAPT_A_BIT 20
+#define HIVE_ISYS_GPREG_SRST_SF_CTRL_CAPT_B_BIT 21
+#define HIVE_ISYS_GPREG_SRST_SF_CTRL_CAPT_C_BIT 22
+#define HIVE_ISYS_GPREG_SRST_SF_CTRL_ACQ_BIT 23
+#define HIVE_ISYS_GPREG_SRST_CSI_BE_OUT_BIT 24
+
+#define HIVE_ISYS_GPREG_SLV_REG_SRST_CAPT_A_BIT 0
+#define HIVE_ISYS_GPREG_SLV_REG_SRST_CAPT_B_BIT 1
+#define HIVE_ISYS_GPREG_SLV_REG_SRST_CAPT_C_BIT 2
+#define HIVE_ISYS_GPREG_SLV_REG_SRST_ACQ_BIT 3
+#define HIVE_ISYS_GPREG_SLV_REG_SRST_DMA_BIT 4
+#define HIVE_ISYS_GPREG_SLV_REG_SRST_ISYS_CTRL_BIT 5
+
+/* streaming monitor port id's */
+#define HIVE_ISYS_STR_MON_PORT_CAPA 0
+#define HIVE_ISYS_STR_MON_PORT_CAPB 1
+#define HIVE_ISYS_STR_MON_PORT_CAPC 2
+#define HIVE_ISYS_STR_MON_PORT_ACQ 3
+#define HIVE_ISYS_STR_MON_PORT_CSS_GENSH 4
+#define HIVE_ISYS_STR_MON_PORT_SF_GENSH 5
+#define HIVE_ISYS_STR_MON_PORT_SP2ISYS 6
+#define HIVE_ISYS_STR_MON_PORT_ISYS2SP 7
+#define HIVE_ISYS_STR_MON_PORT_PIXA 8
+#define HIVE_ISYS_STR_MON_PORT_PIXB 9
+
+/* interrupt bit ID's */
+#define HIVE_ISYS_IRQ_CSI_SOF_BIT_ID 0
+#define HIVE_ISYS_IRQ_CSI_EOF_BIT_ID 1
+#define HIVE_ISYS_IRQ_CSI_SOL_BIT_ID 2
+#define HIVE_ISYS_IRQ_CSI_EOL_BIT_ID 3
+#define HIVE_ISYS_IRQ_CSI_RECEIVER_BIT_ID 4
+#define HIVE_ISYS_IRQ_CSI_RECEIVER_BE_BIT_ID 5
+#define HIVE_ISYS_IRQ_CAP_UNIT_A_NO_SOP 6
+#define HIVE_ISYS_IRQ_CAP_UNIT_A_LATE_SOP 7
+/*#define HIVE_ISYS_IRQ_CAP_UNIT_A_UNDEF_PH 7*/
+#define HIVE_ISYS_IRQ_CAP_UNIT_B_NO_SOP 8
+#define HIVE_ISYS_IRQ_CAP_UNIT_B_LATE_SOP 9
+/*#define HIVE_ISYS_IRQ_CAP_UNIT_B_UNDEF_PH 10*/
+#define HIVE_ISYS_IRQ_CAP_UNIT_C_NO_SOP 10
+#define HIVE_ISYS_IRQ_CAP_UNIT_C_LATE_SOP 11
+/*#define HIVE_ISYS_IRQ_CAP_UNIT_C_UNDEF_PH 13*/
+#define HIVE_ISYS_IRQ_ACQ_UNIT_SOP_MISMATCH 12
+/*#define HIVE_ISYS_IRQ_ACQ_UNIT_UNDEF_PH 15*/
+#define HIVE_ISYS_IRQ_INP_CTRL_CAPA 13
+#define HIVE_ISYS_IRQ_INP_CTRL_CAPB 14
+#define HIVE_ISYS_IRQ_INP_CTRL_CAPC 15
+#define HIVE_ISYS_IRQ_CIO2AHB 16
+#define HIVE_ISYS_IRQ_DMA_BIT_ID 17
+#define HIVE_ISYS_IRQ_STREAM_MON_BIT_ID 18
+#define HIVE_ISYS_IRQ_NUM_BITS 19
+
+/* DMA */
+#define HIVE_ISYS_DMA_CHANNEL 0
+#define HIVE_ISYS_DMA_IBUF_DDR_CONN 0
+#define HIVE_ISYS_DMA_HEIGHT 1
+#define HIVE_ISYS_DMA_ELEMS 1 /* both master buses of same width */
+#define HIVE_ISYS_DMA_STRIDE 0 /* no stride required as height is fixed to 1 */
+#define HIVE_ISYS_DMA_CROP 0 /* no cropping */
+#define HIVE_ISYS_DMA_EXTENSION 0 /* no extension as elem width is same on both side */
+
+#endif /* _input_system_defs_h */
diff --git a/drivers/staging/media/atomisp/pci/input_system_global.h b/drivers/staging/media/atomisp/pci/input_system_global.h
new file mode 100644
index 000000000000..1450964445f6
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/input_system_global.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * (c) 2020 Mauro Carvalho Chehab <mchehab+huawei@kernel.org>
+ */
+
+
+#ifndef __INPUT_SYSTEM_GLOBAL_H_INCLUDED__
+#define __INPUT_SYSTEM_GLOBAL_H_INCLUDED__
+typedef enum {
+ INPUT_SYSTEM_ERR_NO_ERROR = 0,
+ /* ISP2401 */
+ INPUT_SYSTEM_ERR_CREATE_CHANNEL_FAIL,
+ INPUT_SYSTEM_ERR_CONFIGURE_CHANNEL_FAIL,
+ INPUT_SYSTEM_ERR_OPEN_CHANNEL_FAIL,
+ INPUT_SYSTEM_ERR_TRANSFER_FAIL,
+ INPUT_SYSTEM_ERR_CREATE_INPUT_PORT_FAIL,
+ INPUT_SYSTEM_ERR_CONFIGURE_INPUT_PORT_FAIL,
+ INPUT_SYSTEM_ERR_OPEN_INPUT_PORT_FAIL,
+ /* ISP2400 */
+ INPUT_SYSTEM_ERR_GENERIC,
+ INPUT_SYSTEM_ERR_CHANNEL_ALREADY_SET,
+ INPUT_SYSTEM_ERR_CONFLICT_ON_RESOURCE,
+ INPUT_SYSTEM_ERR_PARAMETER_NOT_SUPPORTED,
+} input_system_err_t;
+
+#include "isp2401_input_system_global.h"
+#include "isp2400_input_system_global.h"
+
+#endif /* __INPUT_SYSTEM_GLOBAL_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/input_system_local.h b/drivers/staging/media/atomisp/pci/input_system_local.h
new file mode 100644
index 000000000000..12f7acfeb79c
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/input_system_local.h
@@ -0,0 +1,142 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * (c) 2020 Mauro Carvalho Chehab <mchehab+huawei@kernel.org>
+ */
+
+#include "type_support.h"
+#include "input_system_global.h"
+
+typedef enum {
+ INPUT_SYSTEM_PORT_A = 0,
+ INPUT_SYSTEM_PORT_B,
+ INPUT_SYSTEM_PORT_C,
+ N_INPUT_SYSTEM_PORTS
+} input_system_csi_port_t;
+
+typedef struct ctrl_unit_cfg_s ctrl_unit_cfg_t;
+typedef struct input_system_network_cfg_s input_system_network_cfg_t;
+typedef struct target_cfg2400_s target_cfg2400_t;
+typedef struct channel_cfg_s channel_cfg_t;
+typedef struct backend_channel_cfg_s backend_channel_cfg_t;
+typedef struct input_system_cfg2400_s input_system_cfg2400_t;
+typedef struct mipi_port_state_s mipi_port_state_t;
+typedef struct rx_channel_state_s rx_channel_state_t;
+typedef struct input_switch_cfg_channel_s input_switch_cfg_channel_t;
+typedef struct input_switch_cfg_s input_switch_cfg_t;
+
+struct ctrl_unit_cfg_s {
+ isp2400_ib_buffer_t buffer_mipi[N_CAPTURE_UNIT_ID];
+ isp2400_ib_buffer_t buffer_acquire[N_ACQUISITION_UNIT_ID];
+};
+
+struct input_system_network_cfg_s {
+ input_system_connection_t multicast_cfg[N_CAPTURE_UNIT_ID];
+ input_system_multiplex_t mux_cfg;
+ ctrl_unit_cfg_t ctrl_unit_cfg[N_CTRL_UNIT_ID];
+};
+
+typedef struct {
+// TBD.
+ u32 dummy_parameter;
+} target_isp_cfg_t;
+
+typedef struct {
+// TBD.
+ u32 dummy_parameter;
+} target_sp_cfg_t;
+
+typedef struct {
+// TBD.
+ u32 dummy_parameter;
+} target_strm2mem_cfg_t;
+
+struct input_switch_cfg_channel_s {
+ u32 hsync_data_reg[2];
+ u32 vsync_data_reg;
+};
+
+struct backend_channel_cfg_s {
+ u32 fmt_control_word_1; // Format config.
+ u32 fmt_control_word_2;
+ u32 no_side_band;
+};
+
+typedef union {
+ csi_cfg_t csi_cfg;
+ tpg_cfg_t tpg_cfg;
+ prbs_cfg_t prbs_cfg;
+ gpfifo_cfg_t gpfifo_cfg;
+} source_cfg_t;
+
+struct input_switch_cfg_s {
+ u32 hsync_data_reg[N_RX_CHANNEL_ID * 2];
+ u32 vsync_data_reg;
+};
+
+/*
+ * In 2300 ports can be configured independently and stream
+ * formats need to be specified. In 2400, there are only 8
+ * supported configurations but the HW is fused to support
+ * only a single one.
+ *
+ * In 2300 the compressed format types are programmed by the
+ * user. In 2400 all stream formats are encoded on the stream.
+ *
+ * Use the enum to check validity of a user configuration
+ */
+typedef enum {
+ MONO_4L_1L_0L = 0,
+ MONO_3L_1L_0L,
+ MONO_2L_1L_0L,
+ MONO_1L_1L_0L,
+ STEREO_2L_1L_2L,
+ STEREO_3L_1L_1L,
+ STEREO_2L_1L_1L,
+ STEREO_1L_1L_1L,
+ N_RX_MODE
+} rx_mode_t;
+
+#define UNCOMPRESSED_BITS_PER_PIXEL_10 10
+#define UNCOMPRESSED_BITS_PER_PIXEL_12 12
+#define COMPRESSED_BITS_PER_PIXEL_6 6
+#define COMPRESSED_BITS_PER_PIXEL_7 7
+#define COMPRESSED_BITS_PER_PIXEL_8 8
+enum mipi_compressor {
+ MIPI_COMPRESSOR_NONE = 0,
+ MIPI_COMPRESSOR_10_6_10,
+ MIPI_COMPRESSOR_10_7_10,
+ MIPI_COMPRESSOR_10_8_10,
+ MIPI_COMPRESSOR_12_6_12,
+ MIPI_COMPRESSOR_12_7_12,
+ MIPI_COMPRESSOR_12_8_12,
+ N_MIPI_COMPRESSOR_METHODS
+};
+
+typedef enum mipi_compressor mipi_compressor_t;
+
+typedef enum {
+ MIPI_PREDICTOR_NONE = 0,
+ MIPI_PREDICTOR_TYPE1,
+ MIPI_PREDICTOR_TYPE2,
+ N_MIPI_PREDICTOR_TYPES
+} mipi_predictor_t;
+
+typedef struct rx_cfg_s rx_cfg_t;
+
+/*
+ * Applied per port
+ */
+struct rx_cfg_s {
+ rx_mode_t mode; /* The HW config */
+ enum mipi_port_id port; /* The port ID to apply the control on */
+ unsigned int timeout;
+ unsigned int initcount;
+ unsigned int synccount;
+ unsigned int rxcount;
+ mipi_predictor_t comp; /* Just for backward compatibility */
+ bool is_two_ppc;
+};
+
+#include "isp2401_input_system_local.h"
+#include "isp2400_input_system_local.h"
diff --git a/drivers/staging/media/atomisp/pci/input_system_private.h b/drivers/staging/media/atomisp/pci/input_system_private.h
new file mode 100644
index 000000000000..148ba2ca2290
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/input_system_private.h
@@ -0,0 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * (c) 2020 Mauro Carvalho Chehab <mchehab+huawei@kernel.org>
+ */
+
+#include "isp2401_input_system_private.h"
+#include "isp2400_input_system_private.h"
diff --git a/drivers/staging/media/atomisp/pci/input_system_public.h b/drivers/staging/media/atomisp/pci/input_system_public.h
new file mode 100644
index 000000000000..06b19434b6c0
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/input_system_public.h
@@ -0,0 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * (c) 2020 Mauro Carvalho Chehab <mchehab+huawei@kernel.org>
+ */
+
+#include "isp2400_input_system_public.h"
diff --git a/drivers/staging/media/atomisp/pci/irq_controller_defs.h b/drivers/staging/media/atomisp/pci/irq_controller_defs.h
new file mode 100644
index 000000000000..4703d991a8b8
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/irq_controller_defs.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef _irq_controller_defs_h
+#define _irq_controller_defs_h
+
+#define _HRT_IRQ_CONTROLLER_EDGE_REG_IDX 0
+#define _HRT_IRQ_CONTROLLER_MASK_REG_IDX 1
+#define _HRT_IRQ_CONTROLLER_STATUS_REG_IDX 2
+#define _HRT_IRQ_CONTROLLER_CLEAR_REG_IDX 3
+#define _HRT_IRQ_CONTROLLER_ENABLE_REG_IDX 4
+#define _HRT_IRQ_CONTROLLER_EDGE_NOT_PULSE_REG_IDX 5
+#define _HRT_IRQ_CONTROLLER_STR_OUT_ENABLE_REG_IDX 6
+
+#define _HRT_IRQ_CONTROLLER_REG_ALIGN 4
+
+#endif /* _irq_controller_defs_h */
diff --git a/drivers/staging/media/atomisp/pci/irq_types_hrt.h b/drivers/staging/media/atomisp/pci/irq_types_hrt.h
new file mode 100644
index 000000000000..3579fdaee37a
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/irq_types_hrt.h
@@ -0,0 +1,60 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef _HIVE_ISP_CSS_IRQ_TYPES_HRT_H_
+#define _HIVE_ISP_CSS_IRQ_TYPES_HRT_H_
+
+/*
+ * These are the indices of each interrupt in the interrupt
+ * controller's registers. these can be used as the irq_id
+ * argument to the hrt functions irq_controller.h.
+ *
+ * The definitions are taken from <system>_defs.h
+ */
+typedef enum hrt_isp_css_irq {
+ hrt_isp_css_irq_gpio_pin_0 = HIVE_GP_DEV_IRQ_GPIO_PIN_0_BIT_ID,
+ hrt_isp_css_irq_gpio_pin_1 = HIVE_GP_DEV_IRQ_GPIO_PIN_1_BIT_ID,
+ hrt_isp_css_irq_gpio_pin_2 = HIVE_GP_DEV_IRQ_GPIO_PIN_2_BIT_ID,
+ hrt_isp_css_irq_gpio_pin_3 = HIVE_GP_DEV_IRQ_GPIO_PIN_3_BIT_ID,
+ hrt_isp_css_irq_gpio_pin_4 = HIVE_GP_DEV_IRQ_GPIO_PIN_4_BIT_ID,
+ hrt_isp_css_irq_gpio_pin_5 = HIVE_GP_DEV_IRQ_GPIO_PIN_5_BIT_ID,
+ hrt_isp_css_irq_gpio_pin_6 = HIVE_GP_DEV_IRQ_GPIO_PIN_6_BIT_ID,
+ hrt_isp_css_irq_gpio_pin_7 = HIVE_GP_DEV_IRQ_GPIO_PIN_7_BIT_ID,
+ hrt_isp_css_irq_gpio_pin_8 = HIVE_GP_DEV_IRQ_GPIO_PIN_8_BIT_ID,
+ hrt_isp_css_irq_gpio_pin_9 = HIVE_GP_DEV_IRQ_GPIO_PIN_9_BIT_ID,
+ hrt_isp_css_irq_gpio_pin_10 = HIVE_GP_DEV_IRQ_GPIO_PIN_10_BIT_ID,
+ hrt_isp_css_irq_gpio_pin_11 = HIVE_GP_DEV_IRQ_GPIO_PIN_11_BIT_ID,
+ hrt_isp_css_irq_sp = HIVE_GP_DEV_IRQ_SP_BIT_ID,
+ hrt_isp_css_irq_isp = HIVE_GP_DEV_IRQ_ISP_BIT_ID,
+ hrt_isp_css_irq_isys = HIVE_GP_DEV_IRQ_ISYS_BIT_ID,
+ hrt_isp_css_irq_isel = HIVE_GP_DEV_IRQ_ISEL_BIT_ID,
+ hrt_isp_css_irq_ifmt = HIVE_GP_DEV_IRQ_IFMT_BIT_ID,
+ hrt_isp_css_irq_sp_stream_mon = HIVE_GP_DEV_IRQ_SP_STREAM_MON_BIT_ID,
+ hrt_isp_css_irq_isp_stream_mon = HIVE_GP_DEV_IRQ_ISP_STREAM_MON_BIT_ID,
+ hrt_isp_css_irq_mod_stream_mon = HIVE_GP_DEV_IRQ_MOD_STREAM_MON_BIT_ID,
+ hrt_isp_css_irq_isp_pmem_error = HIVE_GP_DEV_IRQ_ISP_PMEM_ERROR_BIT_ID,
+ hrt_isp_css_irq_isp_bamem_error = HIVE_GP_DEV_IRQ_ISP_BAMEM_ERROR_BIT_ID,
+ hrt_isp_css_irq_isp_dmem_error = HIVE_GP_DEV_IRQ_ISP_DMEM_ERROR_BIT_ID,
+ hrt_isp_css_irq_sp_icache_mem_error = HIVE_GP_DEV_IRQ_SP_ICACHE_MEM_ERROR_BIT_ID,
+ hrt_isp_css_irq_sp_dmem_error = HIVE_GP_DEV_IRQ_SP_DMEM_ERROR_BIT_ID,
+ hrt_isp_css_irq_mmu_cache_mem_error = HIVE_GP_DEV_IRQ_MMU_CACHE_MEM_ERROR_BIT_ID,
+ hrt_isp_css_irq_gp_timer_0 = HIVE_GP_DEV_IRQ_GP_TIMER_0_BIT_ID,
+ hrt_isp_css_irq_gp_timer_1 = HIVE_GP_DEV_IRQ_GP_TIMER_1_BIT_ID,
+ hrt_isp_css_irq_sw_pin_0 = HIVE_GP_DEV_IRQ_SW_PIN_0_BIT_ID,
+ hrt_isp_css_irq_sw_pin_1 = HIVE_GP_DEV_IRQ_SW_PIN_1_BIT_ID,
+ hrt_isp_css_irq_dma = HIVE_GP_DEV_IRQ_DMA_BIT_ID,
+ hrt_isp_css_irq_sp_stream_mon_b = HIVE_GP_DEV_IRQ_SP_STREAM_MON_B_BIT_ID,
+ /* this must (obviously) be the last on in the enum */
+ hrt_isp_css_irq_num_irqs
+} hrt_isp_css_irq_t;
+
+typedef enum hrt_isp_css_irq_status {
+ hrt_isp_css_irq_status_error,
+ hrt_isp_css_irq_status_more_irqs,
+ hrt_isp_css_irq_status_success
+} hrt_isp_css_irq_status_t;
+
+#endif /* _HIVE_ISP_CSS_IRQ_TYPES_HRT_H_ */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/aa/aa_2/ia_css_aa2.host.c b/drivers/staging/media/atomisp/pci/isp/kernels/aa/aa_2/ia_css_aa2.host.c
new file mode 100644
index 000000000000..968766c1bd2a
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/aa/aa_2/ia_css_aa2.host.c
@@ -0,0 +1,23 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#include "ia_css_types.h"
+#include "sh_css_defs.h"
+#ifndef IA_CSS_NO_DEBUG
+#include "ia_css_debug.h"
+#endif
+
+#include "ia_css_aa2.host.h"
+
+/* YUV Anti-Aliasing configuration. */
+const struct ia_css_aa_config default_aa_config = {
+ 8191 /* default should be 0 */
+};
+
+/* Bayer Anti-Aliasing configuration. */
+const struct ia_css_aa_config default_baa_config = {
+ 8191 /* default should be 0 */
+};
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/aa/aa_2/ia_css_aa2.host.h b/drivers/staging/media/atomisp/pci/isp/kernels/aa/aa_2/ia_css_aa2.host.h
new file mode 100644
index 000000000000..2232d725c5f4
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/aa/aa_2/ia_css_aa2.host.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __IA_CSS_AA_HOST_H
+#define __IA_CSS_AA_HOST_H
+
+#include "ia_css_aa2_types.h"
+#include "ia_css_aa2_param.h"
+
+/* YUV Anti-Aliasing configuration. */
+extern const struct ia_css_aa_config default_aa_config;
+
+/* Bayer Anti-Aliasing configuration. */
+extern const struct ia_css_aa_config default_baa_config;
+
+#endif /* __IA_CSS_AA_HOST_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/aa/aa_2/ia_css_aa2_param.h b/drivers/staging/media/atomisp/pci/isp/kernels/aa/aa_2/ia_css_aa2_param.h
new file mode 100644
index 000000000000..be0db7304946
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/aa/aa_2/ia_css_aa2_param.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __IA_CSS_AA_PARAM_H
+#define __IA_CSS_AA_PARAM_H
+
+#include "type_support.h"
+
+struct sh_css_isp_aa_params {
+ s32 strength;
+};
+
+#endif /* __IA_CSS_AA_PARAM_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/aa/aa_2/ia_css_aa2_types.h b/drivers/staging/media/atomisp/pci/isp/kernels/aa/aa_2/ia_css_aa2_types.h
new file mode 100644
index 000000000000..2f568a7062da
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/aa/aa_2/ia_css_aa2_types.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __IA_CSS_AA2_TYPES_H
+#define __IA_CSS_AA2_TYPES_H
+
+/* @file
+* CSS-API header file for Anti-Aliasing parameters.
+*/
+
+/* Anti-Aliasing configuration.
+ *
+ * This structure is used both for YUV AA and Bayer AA.
+ *
+ * 1. YUV Anti-Aliasing
+ * struct ia_css_aa_config *aa_config
+ *
+ * ISP block: AA2
+ * (ISP1: AA2 is not used.)
+ * ISP2: AA2 should be used. But, AA2 is not used currently.
+ *
+ * 2. Bayer Anti-Aliasing
+ * struct ia_css_aa_config *baa_config
+ *
+ * ISP block: BAA2
+ * ISP1: BAA2 is used.
+ * ISP2: BAA2 is used.
+ */
+struct ia_css_aa_config {
+ u16 strength; /** Strength of the filter.
+ u0.13, [0,8191],
+ default/ineffective 0 */
+};
+
+#endif /* __IA_CSS_AA2_TYPES_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/anr/anr_1.0/ia_css_anr.host.c b/drivers/staging/media/atomisp/pci/isp/kernels/anr/anr_1.0/ia_css_anr.host.c
new file mode 100644
index 000000000000..899d566234b9
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/anr/anr_1.0/ia_css_anr.host.c
@@ -0,0 +1,53 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#include "ia_css_types.h"
+#include "sh_css_defs.h"
+#include "ia_css_debug.h"
+
+#include "ia_css_anr.host.h"
+
+const struct ia_css_anr_config default_anr_config = {
+ 10,
+ {
+ 0, 3, 1, 2, 3, 6, 4, 5, 1, 4, 2, 3, 2, 5, 3, 4,
+ 0, 3, 1, 2, 3, 6, 4, 5, 1, 4, 2, 3, 2, 5, 3, 4,
+ 0, 3, 1, 2, 3, 6, 4, 5, 1, 4, 2, 3, 2, 5, 3, 4,
+ 0, 3, 1, 2, 3, 6, 4, 5, 1, 4, 2, 3, 2, 5, 3, 4
+ },
+ {10, 20, 30}
+};
+
+void
+ia_css_anr_encode(
+ struct sh_css_isp_anr_params *to,
+ const struct ia_css_anr_config *from,
+ unsigned int size)
+{
+ (void)size;
+ to->threshold = from->threshold;
+}
+
+void
+ia_css_anr_dump(
+ const struct sh_css_isp_anr_params *anr,
+ unsigned int level)
+{
+ if (!anr) return;
+ ia_css_debug_dtrace(level, "Advance Noise Reduction:\n");
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "anr_threshold", anr->threshold);
+}
+
+void
+ia_css_anr_debug_dtrace(
+ const struct ia_css_anr_config *config,
+ unsigned int level)
+{
+ ia_css_debug_dtrace(level,
+ "config.threshold=%d\n",
+ config->threshold);
+}
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/anr/anr_1.0/ia_css_anr.host.h b/drivers/staging/media/atomisp/pci/isp/kernels/anr/anr_1.0/ia_css_anr.host.h
new file mode 100644
index 000000000000..4f77900871c8
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/anr/anr_1.0/ia_css_anr.host.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __IA_CSS_ANR_HOST_H
+#define __IA_CSS_ANR_HOST_H
+
+#include "ia_css_anr_types.h"
+#include "ia_css_anr_param.h"
+
+extern const struct ia_css_anr_config default_anr_config;
+
+void
+ia_css_anr_encode(
+ struct sh_css_isp_anr_params *to,
+ const struct ia_css_anr_config *from,
+ unsigned int size);
+
+void
+ia_css_anr_dump(
+ const struct sh_css_isp_anr_params *anr,
+ unsigned int level);
+
+void
+ia_css_anr_debug_dtrace(
+ const struct ia_css_anr_config *config, unsigned int level)
+;
+
+#endif /* __IA_CSS_ANR_HOST_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/anr/anr_1.0/ia_css_anr_param.h b/drivers/staging/media/atomisp/pci/isp/kernels/anr/anr_1.0/ia_css_anr_param.h
new file mode 100644
index 000000000000..39ebcb0efc10
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/anr/anr_1.0/ia_css_anr_param.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __IA_CSS_ANR_PARAM_H
+#define __IA_CSS_ANR_PARAM_H
+
+#include "type_support.h"
+
+/* ANR (Advanced Noise Reduction) */
+struct sh_css_isp_anr_params {
+ s32 threshold;
+};
+
+#endif /* __IA_CSS_ANR_PARAM_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/anr/anr_1.0/ia_css_anr_types.h b/drivers/staging/media/atomisp/pci/isp/kernels/anr/anr_1.0/ia_css_anr_types.h
new file mode 100644
index 000000000000..bc2a78dff004
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/anr/anr_1.0/ia_css_anr_types.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __IA_CSS_ANR_TYPES_H
+#define __IA_CSS_ANR_TYPES_H
+
+/* @file
+ * CSS-API header file for Advanced Noise Reduction kernel v1
+ */
+
+#include <linux/math.h>
+
+/* Application specific DMA settings */
+#define ANR_BPP 10
+#define ANR_ELEMENT_BITS round_up(ANR_BPP, 8)
+
+/* Advanced Noise Reduction configuration.
+ * This is also known as Low-Light.
+ */
+struct ia_css_anr_config {
+ s32 threshold; /** Threshold */
+ s32 thresholds[4 * 4 * 4];
+ s32 factors[3];
+};
+
+#endif /* __IA_CSS_ANR_TYPES_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/anr/anr_2/ia_css_anr2.host.c b/drivers/staging/media/atomisp/pci/isp/kernels/anr/anr_2/ia_css_anr2.host.c
new file mode 100644
index 000000000000..09599884bdae
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/anr/anr_2/ia_css_anr2.host.c
@@ -0,0 +1,38 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#include "ia_css_types.h"
+#include "sh_css_defs.h"
+#include "ia_css_debug.h"
+
+#include "ia_css_anr2.host.h"
+
+void
+ia_css_anr2_vmem_encode(
+ struct ia_css_isp_anr2_params *to,
+ const struct ia_css_anr_thres *from,
+ size_t size)
+{
+ unsigned int i;
+
+ (void)size;
+ for (i = 0; i < ANR_PARAM_SIZE; i++) {
+ unsigned int j;
+
+ for (j = 0; j < ISP_VEC_NELEMS; j++) {
+ to->data[i][j] = from->data[i * ISP_VEC_NELEMS + j];
+ }
+ }
+}
+
+void
+ia_css_anr2_debug_dtrace(
+ const struct ia_css_anr_thres *config,
+ unsigned int level)
+{
+ (void)config;
+ (void)level;
+}
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/anr/anr_2/ia_css_anr2.host.h b/drivers/staging/media/atomisp/pci/isp/kernels/anr/anr_2/ia_css_anr2.host.h
new file mode 100644
index 000000000000..2b1105f21c1e
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/anr/anr_2/ia_css_anr2.host.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __IA_CSS_ANR2_HOST_H
+#define __IA_CSS_ANR2_HOST_H
+
+#include "sh_css_params.h"
+
+#include "ia_css_anr2_types.h"
+#include "ia_css_anr2_param.h"
+#include "ia_css_anr2_table.host.h"
+
+void
+ia_css_anr2_vmem_encode(
+ struct ia_css_isp_anr2_params *to,
+ const struct ia_css_anr_thres *from,
+ size_t size);
+
+void
+ia_css_anr2_debug_dtrace(
+ const struct ia_css_anr_thres *config, unsigned int level)
+;
+
+#endif /* __IA_CSS_ANR2_HOST_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/anr/anr_2/ia_css_anr2_param.h b/drivers/staging/media/atomisp/pci/isp/kernels/anr/anr_2/ia_css_anr2_param.h
new file mode 100644
index 000000000000..f0e0f2a0f30b
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/anr/anr_2/ia_css_anr2_param.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __IA_CSS_ANR2_PARAM_H
+#define __IA_CSS_ANR2_PARAM_H
+
+#include "vmem.h"
+#include "ia_css_anr2_types.h"
+
+/* Advanced Noise Reduction (ANR) thresholds */
+
+struct ia_css_isp_anr2_params {
+ VMEM_ARRAY(data, ANR_PARAM_SIZE * ISP_VEC_NELEMS);
+};
+
+#endif /* __IA_CSS_ANR2_PARAM_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/anr/anr_2/ia_css_anr2_table.host.c b/drivers/staging/media/atomisp/pci/isp/kernels/anr/anr_2/ia_css_anr2_table.host.c
new file mode 100644
index 000000000000..87cc5cb5dd5d
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/anr/anr_2/ia_css_anr2_table.host.c
@@ -0,0 +1,47 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#include "system_global.h"
+#include "ia_css_types.h"
+#include "ia_css_anr2_table.host.h"
+
+#if 1
+const struct ia_css_anr_thres default_anr_thres = {
+ {
+ 128, 384, 640, 896, 896, 640, 384, 128, 384, 1152, 1920, 2688, 2688, 1920, 1152, 384, 640, 1920, 3200, 4480, 4480, 3200, 1920, 640, 896, 2688, 4480, 6272, 6272, 4480, 2688, 896, 896, 2688, 4480, 6272, 6272, 4480, 2688, 896, 640, 1920, 3200, 4480, 4480, 3200, 1920, 640, 384, 1152, 1920, 2688, 2688, 1920, 1152, 384, 128, 384, 640, 896, 896, 640, 384, 128,
+ 0, 0, 30, 30, 10, 10, 20, 20, 0, 0, 30, 30, 10, 10, 20, 20, 0, 0, 30, 30, 10, 10, 20, 20, 0, 0, 30, 30, 10, 10, 20, 20, 0, 0, 30, 30, 10, 10, 20, 20, 0, 0, 30, 30, 10, 10, 20, 20, 0, 0, 30, 30, 10, 10, 20, 20, 0, 0, 30, 30, 10, 10, 20, 20,
+ 0, 0, 60, 60, 20, 20, 40, 40, 0, 0, 60, 60, 20, 20, 40, 40, 0, 0, 60, 60, 20, 20, 40, 40, 0, 0, 60, 60, 20, 20, 40, 40, 0, 0, 60, 60, 20, 20, 40, 40, 0, 0, 60, 60, 20, 20, 40, 40, 0, 0, 60, 60, 20, 20, 40, 40, 0, 0, 60, 60, 20, 20, 40, 40,
+ 0, 0, 90, 90, 30, 30, 60, 60, 0, 0, 90, 90, 30, 30, 60, 60, 0, 0, 90, 90, 30, 30, 60, 60, 0, 0, 90, 90, 30, 30, 60, 60, 0, 0, 90, 90, 30, 30, 60, 60, 0, 0, 90, 90, 30, 30, 60, 60, 0, 0, 90, 90, 30, 30, 60, 60, 0, 0, 90, 90, 30, 30, 60, 60,
+ 30, 30, 60, 60, 40, 40, 50, 50, 30, 30, 60, 60, 40, 40, 50, 50, 30, 30, 60, 60, 40, 40, 50, 50, 30, 30, 60, 60, 40, 40, 50, 50, 30, 30, 60, 60, 40, 40, 50, 50, 30, 30, 60, 60, 40, 40, 50, 50, 30, 30, 60, 60, 40, 40, 50, 50, 30, 30, 60, 60, 40, 40, 50, 50,
+ 60, 60, 120, 120, 80, 80, 100, 100, 60, 60, 120, 120, 80, 80, 100, 100, 60, 60, 120, 120, 80, 80, 100, 100, 60, 60, 120, 120, 80, 80, 100, 100, 60, 60, 120, 120, 80, 80, 100, 100, 60, 60, 120, 120, 80, 80, 100, 100, 60, 60, 120, 120, 80, 80, 100, 100, 60, 60, 120, 120, 80, 80, 100, 100,
+ 90, 90, 180, 180, 120, 120, 150, 150, 90, 90, 180, 180, 120, 120, 150, 150, 90, 90, 180, 180, 120, 120, 150, 150, 90, 90, 180, 180, 120, 120, 150, 150, 90, 90, 180, 180, 120, 120, 150, 150, 90, 90, 180, 180, 120, 120, 150, 150, 90, 90, 180, 180, 120, 120, 150, 150, 90, 90, 180, 180, 120, 120, 150, 150,
+ 10, 10, 40, 40, 20, 20, 30, 30, 10, 10, 40, 40, 20, 20, 30, 30, 10, 10, 40, 40, 20, 20, 30, 30, 10, 10, 40, 40, 20, 20, 30, 30, 10, 10, 40, 40, 20, 20, 30, 30, 10, 10, 40, 40, 20, 20, 30, 30, 10, 10, 40, 40, 20, 20, 30, 30, 10, 10, 40, 40, 20, 20, 30, 30,
+ 20, 20, 80, 80, 40, 40, 60, 60, 20, 20, 80, 80, 40, 40, 60, 60, 20, 20, 80, 80, 40, 40, 60, 60, 20, 20, 80, 80, 40, 40, 60, 60, 20, 20, 80, 80, 40, 40, 60, 60, 20, 20, 80, 80, 40, 40, 60, 60, 20, 20, 80, 80, 40, 40, 60, 60, 20, 20, 80, 80, 40, 40, 60, 60,
+ 30, 30, 120, 120, 60, 60, 90, 90, 30, 30, 120, 120, 60, 60, 90, 90, 30, 30, 120, 120, 60, 60, 90, 90, 30, 30, 120, 120, 60, 60, 90, 90, 30, 30, 120, 120, 60, 60, 90, 90, 30, 30, 120, 120, 60, 60, 90, 90, 30, 30, 120, 120, 60, 60, 90, 90, 30, 30, 120, 120, 60, 60, 90, 90,
+ 20, 20, 50, 50, 30, 30, 40, 40, 20, 20, 50, 50, 30, 30, 40, 40, 20, 20, 50, 50, 30, 30, 40, 40, 20, 20, 50, 50, 30, 30, 40, 40, 20, 20, 50, 50, 30, 30, 40, 40, 20, 20, 50, 50, 30, 30, 40, 40, 20, 20, 50, 50, 30, 30, 40, 40, 20, 20, 50, 50, 30, 30, 40, 40,
+ 40, 40, 100, 100, 60, 60, 80, 80, 40, 40, 100, 100, 60, 60, 80, 80, 40, 40, 100, 100, 60, 60, 80, 80, 40, 40, 100, 100, 60, 60, 80, 80, 40, 40, 100, 100, 60, 60, 80, 80, 40, 40, 100, 100, 60, 60, 80, 80, 40, 40, 100, 100, 60, 60, 80, 80, 40, 40, 100, 100, 60, 60, 80, 80,
+ 60, 60, 150, 150, 90, 90, 120, 120, 60, 60, 150, 150, 90, 90, 120, 120, 60, 60, 150, 150, 90, 90, 120, 120, 60, 60, 150, 150, 90, 90, 120, 120, 60, 60, 150, 150, 90, 90, 120, 120, 60, 60, 150, 150, 90, 90, 120, 120, 60, 60, 150, 150, 90, 90, 120, 120, 60, 60, 150, 150, 90, 90, 120, 120
+ }
+};
+#else
+const struct ia_css_anr_thres default_anr_thres = {
+ {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+ }
+};
+#endif
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/anr/anr_2/ia_css_anr2_table.host.h b/drivers/staging/media/atomisp/pci/isp/kernels/anr/anr_2/ia_css_anr2_table.host.h
new file mode 100644
index 000000000000..bda174abd54d
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/anr/anr_2/ia_css_anr2_table.host.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __IA_CSS_ANR2_TABLE_HOST_H
+#define __IA_CSS_ANR2_TABLE_HOST_H
+
+#include "ia_css_anr2_types.h"
+
+extern const struct ia_css_anr_thres default_anr_thres;
+
+#endif /* __IA_CSS_ANR2_TABLE_HOST_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/anr/anr_2/ia_css_anr2_types.h b/drivers/staging/media/atomisp/pci/isp/kernels/anr/anr_2/ia_css_anr2_types.h
new file mode 100644
index 000000000000..2c7c3c3fd8ce
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/anr/anr_2/ia_css_anr2_types.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __IA_CSS_ANR2_TYPES_H
+#define __IA_CSS_ANR2_TYPES_H
+
+/* @file
+* CSS-API header file for Advanced Noise Reduction kernel v2
+*/
+
+#include "type_support.h"
+
+#define ANR_PARAM_SIZE 13
+
+/* Advanced Noise Reduction (ANR) thresholds */
+struct ia_css_anr_thres {
+ s16 data[13 * 64];
+};
+
+#endif /* __IA_CSS_ANR2_TYPES_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/bh/bh_2/ia_css_bh.host.c b/drivers/staging/media/atomisp/pci/isp/kernels/bh/bh_2/ia_css_bh.host.c
new file mode 100644
index 000000000000..69c87e53f3c2
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/bh/bh_2/ia_css_bh.host.c
@@ -0,0 +1,55 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+
+#include "ia_css_types.h"
+#include "sh_css_internal.h"
+#include "assert_support.h"
+#include "sh_css_frac.h"
+
+#include "ia_css_bh.host.h"
+
+void
+ia_css_bh_hmem_decode(
+ struct ia_css_3a_rgby_output *out_ptr,
+ const struct ia_css_bh_table *hmem_buf)
+{
+ int i;
+
+ /*
+ * No weighted histogram, hence no grid definition
+ */
+ if (!hmem_buf)
+ return;
+ assert(sizeof_hmem(HMEM0_ID) == sizeof(*hmem_buf));
+
+ /* Deinterleave */
+ for (i = 0; i < HMEM_UNIT_SIZE; i++) {
+ out_ptr[i].r = hmem_buf->hmem[BH_COLOR_R][i];
+ out_ptr[i].g = hmem_buf->hmem[BH_COLOR_G][i];
+ out_ptr[i].b = hmem_buf->hmem[BH_COLOR_B][i];
+ out_ptr[i].y = hmem_buf->hmem[BH_COLOR_Y][i];
+ /* sh_css_print ("hmem[%d] = %d, %d, %d, %d\n",
+ i, out_ptr[i].r, out_ptr[i].g, out_ptr[i].b, out_ptr[i].y); */
+ }
+}
+
+void
+ia_css_bh_encode(
+ struct sh_css_isp_bh_params *to,
+ const struct ia_css_3a_config *from,
+ unsigned int size)
+{
+ (void)size;
+ /* coefficients to calculate Y */
+ to->y_coef_r =
+ uDIGIT_FITTING(from->ae_y_coef_r, 16, SH_CSS_AE_YCOEF_SHIFT);
+ to->y_coef_g =
+ uDIGIT_FITTING(from->ae_y_coef_g, 16, SH_CSS_AE_YCOEF_SHIFT);
+ to->y_coef_b =
+ uDIGIT_FITTING(from->ae_y_coef_b, 16, SH_CSS_AE_YCOEF_SHIFT);
+}
+
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/bh/bh_2/ia_css_bh.host.h b/drivers/staging/media/atomisp/pci/isp/kernels/bh/bh_2/ia_css_bh.host.h
new file mode 100644
index 000000000000..36b360cfe62e
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/bh/bh_2/ia_css_bh.host.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __IA_CSS_BH_HOST_H
+#define __IA_CSS_BH_HOST_H
+
+#include "ia_css_bh_param.h"
+#include "s3a/s3a_1.0/ia_css_s3a_types.h"
+
+void
+ia_css_bh_hmem_decode(
+ struct ia_css_3a_rgby_output *out_ptr,
+ const struct ia_css_bh_table *hmem_buf);
+
+void
+ia_css_bh_encode(
+ struct sh_css_isp_bh_params *to,
+ const struct ia_css_3a_config *from,
+ unsigned int size);
+
+#endif /* __IA_CSS_BH_HOST_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/bh/bh_2/ia_css_bh_param.h b/drivers/staging/media/atomisp/pci/isp/kernels/bh/bh_2/ia_css_bh_param.h
new file mode 100644
index 000000000000..634783fd0b5b
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/bh/bh_2/ia_css_bh_param.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __IA_CSS_HB_PARAM_H
+#define __IA_CSS_HB_PARAM_H
+
+#include "type_support.h"
+
+#ifndef PIPE_GENERATION
+#define __INLINE_HMEM__
+#include "hmem.h"
+#endif
+
+#include "ia_css_bh_types.h"
+
+/* AE (3A Support) */
+struct sh_css_isp_bh_params {
+ /* coefficients to calculate Y */
+ s32 y_coef_r;
+ s32 y_coef_g;
+ s32 y_coef_b;
+};
+
+/* This should be hmem_data_t, but that breaks the pipe generator */
+struct sh_css_isp_bh_hmem_params {
+ u32 bh[ISP_HIST_COMPONENTS][IA_CSS_HMEM_BH_UNIT_SIZE];
+};
+
+#endif /* __IA_CSS_HB_PARAM_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/bh/bh_2/ia_css_bh_types.h b/drivers/staging/media/atomisp/pci/isp/kernels/bh/bh_2/ia_css_bh_types.h
new file mode 100644
index 000000000000..c717c636f666
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/bh/bh_2/ia_css_bh_types.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __IA_CSS_BH_TYPES_H
+#define __IA_CSS_BH_TYPES_H
+
+/* Number of elements in the BH table.
+ * Should be consistent with hmem.h
+ */
+#define IA_CSS_HMEM_BH_TABLE_SIZE ISP_HIST_DEPTH
+#define IA_CSS_HMEM_BH_UNIT_SIZE (ISP_HIST_DEPTH / ISP_HIST_COMPONENTS)
+
+#define BH_COLOR_R (0)
+#define BH_COLOR_G (1)
+#define BH_COLOR_B (2)
+#define BH_COLOR_Y (3)
+#define BH_COLOR_NUM (4)
+
+/* BH table */
+struct ia_css_bh_table {
+ u32 hmem[ISP_HIST_COMPONENTS][IA_CSS_HMEM_BH_UNIT_SIZE];
+};
+
+#endif /* __IA_CSS_BH_TYPES_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/bnlm/ia_css_bnlm.host.c b/drivers/staging/media/atomisp/pci/isp/kernels/bnlm/ia_css_bnlm.host.c
new file mode 100644
index 000000000000..cd867937ee13
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/bnlm/ia_css_bnlm.host.c
@@ -0,0 +1,188 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#include "type_support.h"
+#include "ia_css_bnlm.host.h"
+
+#ifndef IA_CSS_NO_DEBUG
+#include "ia_css_debug.h" /* ia_css_debug_dtrace() */
+#endif
+#include <assert_support.h>
+
+#define BNLM_DIV_LUT_SIZE (12)
+static const s32 div_lut_nearests[BNLM_DIV_LUT_SIZE] = {
+ 0, 454, 948, 1484, 2070, 2710, 3412, 4184, 5035, 5978, 7025, 8191
+};
+
+static const s32 div_lut_slopes[BNLM_DIV_LUT_SIZE] = {
+ -7760, -6960, -6216, -5536, -4912, -4344, -3832, -3360, -2936, -2552, -2208, -2208
+ };
+
+static const s32 div_lut_intercepts[BNLM_DIV_LUT_SIZE] = {
+ 8184, 7752, 7336, 6928, 6536, 6152, 5776, 5416, 5064, 4728, 4408, 4408
+};
+
+/* Encodes a look-up table from BNLM public parameters to vmem parameters.
+ * Input:
+ * lut : bnlm_lut struct containing encoded vmem parameters look-up table
+ * lut_thr : array containing threshold values for lut
+ * lut_val : array containing output values related to lut_thr
+ * lut_size: Size of lut_val array
+ */
+static inline void
+bnlm_lut_encode(struct bnlm_lut *lut, const int32_t *lut_thr,
+ const s32 *lut_val, const uint32_t lut_size)
+{
+ u32 blk, i;
+ const u32 block_size = 16;
+ const u32 total_blocks = ISP_VEC_NELEMS / block_size;
+
+ /* Create VMEM LUTs from the threshold and value arrays.
+ *
+ * Min size of the LUT is 2 entries.
+ *
+ * Max size of the LUT is 16 entries, so that the LUT can fit into a
+ * single group of 16 elements inside a vector.
+ * Then these elements are copied into other groups inside the same
+ * vector. If the LUT size is less than 16, then remaining elements are
+ * set to 0.
+ */
+ assert((lut_size >= 2) && (lut_size <= block_size));
+ /* array lut_thr has (lut_size-1) entries */
+ for (i = 0; i < lut_size - 2; i++) {
+ /* Check if the lut_thr is monotonically increasing */
+ assert(lut_thr[i] <= lut_thr[i + 1]);
+ }
+
+ /* Initialize */
+ for (i = 0; i < total_blocks * block_size; i++) {
+ lut->thr[0][i] = 0;
+ lut->val[0][i] = 0;
+ }
+
+ /* Copy all data */
+ for (i = 0; i < lut_size - 1; i++) {
+ lut->thr[0][i] = lut_thr[i];
+ lut->val[0][i] = lut_val[i];
+ }
+ lut->val[0][i] = lut_val[i]; /* val has one more element than thr */
+
+ /* Copy data from first block to all blocks */
+ for (blk = 1; blk < total_blocks; blk++) {
+ u32 blk_offset = blk * block_size;
+
+ for (i = 1; i < lut_size; i++) {
+ lut->thr[0][blk_offset + i] = lut->thr[0][i];
+ lut->val[0][blk_offset + i] = lut->val[0][i];
+ }
+ }
+}
+
+/*
+ * - Encodes BNLM public parameters into VMEM parameters
+ * - Generates VMEM parameters which will needed internally ISP
+ */
+void
+ia_css_bnlm_vmem_encode(
+ struct bnlm_vmem_params *to,
+ const struct ia_css_bnlm_config *from,
+ size_t size)
+{
+ int i;
+ (void)size;
+
+ /* Initialize LUTs in VMEM parameters */
+ bnlm_lut_encode(&to->mu_root_lut, from->mu_root_lut_thr, from->mu_root_lut_val,
+ 16);
+ bnlm_lut_encode(&to->sad_norm_lut, from->sad_norm_lut_thr,
+ from->sad_norm_lut_val, 16);
+ bnlm_lut_encode(&to->sig_detail_lut, from->sig_detail_lut_thr,
+ from->sig_detail_lut_val, 16);
+ bnlm_lut_encode(&to->sig_rad_lut, from->sig_rad_lut_thr, from->sig_rad_lut_val,
+ 16);
+ bnlm_lut_encode(&to->rad_pow_lut, from->rad_pow_lut_thr, from->rad_pow_lut_val,
+ 16);
+ bnlm_lut_encode(&to->nl_0_lut, from->nl_0_lut_thr, from->nl_0_lut_val, 16);
+ bnlm_lut_encode(&to->nl_1_lut, from->nl_1_lut_thr, from->nl_1_lut_val, 16);
+ bnlm_lut_encode(&to->nl_2_lut, from->nl_2_lut_thr, from->nl_2_lut_val, 16);
+ bnlm_lut_encode(&to->nl_3_lut, from->nl_3_lut_thr, from->nl_3_lut_val, 16);
+
+ /* Initialize arrays in VMEM parameters */
+ memset(to->nl_th, 0, sizeof(to->nl_th));
+ to->nl_th[0][0] = from->nl_th[0];
+ to->nl_th[0][1] = from->nl_th[1];
+ to->nl_th[0][2] = from->nl_th[2];
+
+ memset(to->match_quality_max_idx, 0, sizeof(to->match_quality_max_idx));
+ to->match_quality_max_idx[0][0] = from->match_quality_max_idx[0];
+ to->match_quality_max_idx[0][1] = from->match_quality_max_idx[1];
+ to->match_quality_max_idx[0][2] = from->match_quality_max_idx[2];
+ to->match_quality_max_idx[0][3] = from->match_quality_max_idx[3];
+
+ bnlm_lut_encode(&to->div_lut, div_lut_nearests, div_lut_slopes,
+ BNLM_DIV_LUT_SIZE);
+ memset(to->div_lut_intercepts, 0, sizeof(to->div_lut_intercepts));
+ for (i = 0; i < BNLM_DIV_LUT_SIZE; i++) {
+ to->div_lut_intercepts[0][i] = div_lut_intercepts[i];
+ }
+
+ memset(to->power_of_2, 0, sizeof(to->power_of_2));
+ for (i = 0; i < (ISP_VEC_ELEMBITS - 1); i++) {
+ to->power_of_2[0][i] = 1 << i;
+ }
+}
+
+/* - Encodes BNLM public parameters into DMEM parameters */
+void
+ia_css_bnlm_encode(
+ struct bnlm_dmem_params *to,
+ const struct ia_css_bnlm_config *from,
+ size_t size)
+{
+ (void)size;
+ to->rad_enable = from->rad_enable;
+ to->rad_x_origin = from->rad_x_origin;
+ to->rad_y_origin = from->rad_y_origin;
+ to->avg_min_th = from->avg_min_th;
+ to->max_min_th = from->max_min_th;
+
+ to->exp_coeff_a = from->exp_coeff_a;
+ to->exp_coeff_b = from->exp_coeff_b;
+ to->exp_coeff_c = from->exp_coeff_c;
+ to->exp_exponent = from->exp_exponent;
+}
+
+/* Prints debug traces for BNLM public parameters */
+void
+ia_css_bnlm_debug_trace(
+ const struct ia_css_bnlm_config *config,
+ unsigned int level)
+{
+ if (!config)
+ return;
+
+#ifndef IA_CSS_NO_DEBUG
+ ia_css_debug_dtrace(level, "BNLM:\n");
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n", "rad_enable", config->rad_enable);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n", "rad_x_origin",
+ config->rad_x_origin);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n", "rad_y_origin",
+ config->rad_y_origin);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n", "avg_min_th", config->avg_min_th);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n", "max_min_th", config->max_min_th);
+
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n", "exp_coeff_a",
+ config->exp_coeff_a);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n", "exp_coeff_b",
+ config->exp_coeff_b);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n", "exp_coeff_c",
+ config->exp_coeff_c);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n", "exp_exponent",
+ config->exp_exponent);
+
+ /* ToDo: print traces for LUTs */
+#endif /* IA_CSS_NO_DEBUG */
+}
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/bnlm/ia_css_bnlm.host.h b/drivers/staging/media/atomisp/pci/isp/kernels/bnlm/ia_css_bnlm.host.h
new file mode 100644
index 000000000000..2e47ce5e5524
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/bnlm/ia_css_bnlm.host.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __IA_CSS_BNLM_HOST_H
+#define __IA_CSS_BNLM_HOST_H
+
+#include "ia_css_bnlm_types.h"
+#include "ia_css_bnlm_param.h"
+
+void
+ia_css_bnlm_vmem_encode(
+ struct bnlm_vmem_params *to,
+ const struct ia_css_bnlm_config *from,
+ size_t size);
+
+void
+ia_css_bnlm_encode(
+ struct bnlm_dmem_params *to,
+ const struct ia_css_bnlm_config *from,
+ size_t size);
+
+#ifndef IA_CSS_NO_DEBUG
+void
+ia_css_bnlm_debug_trace(
+ const struct ia_css_bnlm_config *config,
+ unsigned int level);
+#endif
+
+#endif /* __IA_CSS_BNLM_HOST_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/bnlm/ia_css_bnlm_param.h b/drivers/staging/media/atomisp/pci/isp/kernels/bnlm/ia_css_bnlm_param.h
new file mode 100644
index 000000000000..1d389a60d2bc
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/bnlm/ia_css_bnlm_param.h
@@ -0,0 +1,56 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __IA_CSS_BNLM_PARAM_H
+#define __IA_CSS_BNLM_PARAM_H
+
+#include "type_support.h"
+#include "vmem.h" /* needed for VMEM_ARRAY */
+
+struct bnlm_lut {
+ VMEM_ARRAY(thr, ISP_VEC_NELEMS); /* thresholds */
+ VMEM_ARRAY(val, ISP_VEC_NELEMS); /* values */
+};
+
+struct bnlm_vmem_params {
+ VMEM_ARRAY(nl_th, ISP_VEC_NELEMS);
+ VMEM_ARRAY(match_quality_max_idx, ISP_VEC_NELEMS);
+ struct bnlm_lut mu_root_lut;
+ struct bnlm_lut sad_norm_lut;
+ struct bnlm_lut sig_detail_lut;
+ struct bnlm_lut sig_rad_lut;
+ struct bnlm_lut rad_pow_lut;
+ struct bnlm_lut nl_0_lut;
+ struct bnlm_lut nl_1_lut;
+ struct bnlm_lut nl_2_lut;
+ struct bnlm_lut nl_3_lut;
+
+ /* LUTs used for division approximiation */
+ struct bnlm_lut div_lut;
+
+ VMEM_ARRAY(div_lut_intercepts, ISP_VEC_NELEMS);
+
+ /* 240x does not have an ISP instruction to left shift each element of a
+ * vector by different shift value. Hence it will be simulated by multiplying
+ * the elements by required 2^shift. */
+ VMEM_ARRAY(power_of_2, ISP_VEC_NELEMS);
+};
+
+/* BNLM ISP parameters */
+struct bnlm_dmem_params {
+ bool rad_enable;
+ s32 rad_x_origin;
+ s32 rad_y_origin;
+ s32 avg_min_th;
+ s32 max_min_th;
+
+ s32 exp_coeff_a;
+ u32 exp_coeff_b;
+ s32 exp_coeff_c;
+ u32 exp_exponent;
+};
+
+#endif /* __IA_CSS_BNLM_PARAM_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/bnlm/ia_css_bnlm_types.h b/drivers/staging/media/atomisp/pci/isp/kernels/bnlm/ia_css_bnlm_types.h
new file mode 100644
index 000000000000..150d3960caee
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/bnlm/ia_css_bnlm_types.h
@@ -0,0 +1,98 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __IA_CSS_BNLM_TYPES_H
+#define __IA_CSS_BNLM_TYPES_H
+
+/* @file
+* CSS-API header file for Bayer Non-Linear Mean parameters.
+*/
+
+#include "type_support.h" /* int32_t */
+
+/* Bayer Non-Linear Mean configuration
+ *
+ * \brief BNLM public parameters.
+ * \details Struct with all parameters for the BNLM kernel that can be set
+ * from the CSS API.
+ *
+ * ISP2.6.1: BNLM is used.
+ */
+struct ia_css_bnlm_config {
+ bool rad_enable; /** Enable a radial dependency in a weight calculation */
+ s32 rad_x_origin; /** Initial x coordinate for a radius calculation */
+ s32 rad_y_origin; /** Initial x coordinate for a radius calculation */
+ /* a threshold for average of weights if this < Th, do not denoise pixel */
+ s32 avg_min_th;
+ /* minimum weight for denoising if max < th, do not denoise pixel */
+ s32 max_min_th;
+
+ /**@{*/
+ /* Coefficient for approximation, in the form of (1 + x / N)^N,
+ * that fits the first-order exp() to default exp_lut in BNLM sheet
+ * */
+ s32 exp_coeff_a;
+ u32 exp_coeff_b;
+ s32 exp_coeff_c;
+ u32 exp_exponent;
+ /**@}*/
+
+ s32 nl_th[3]; /** Detail thresholds */
+
+ /* Index for n-th maximum candidate weight for each detail group */
+ s32 match_quality_max_idx[4];
+
+ /**@{*/
+ /* A lookup table for 1/sqrt(1+mu) approximation */
+ s32 mu_root_lut_thr[15];
+ s32 mu_root_lut_val[16];
+ /**@}*/
+ /**@{*/
+ /* A lookup table for SAD normalization */
+ s32 sad_norm_lut_thr[15];
+ s32 sad_norm_lut_val[16];
+ /**@}*/
+ /**@{*/
+ /* A lookup table that models a weight's dependency on textures */
+ s32 sig_detail_lut_thr[15];
+ s32 sig_detail_lut_val[16];
+ /**@}*/
+ /**@{*/
+ /* A lookup table that models a weight's dependency on a pixel's radial distance */
+ s32 sig_rad_lut_thr[15];
+ s32 sig_rad_lut_val[16];
+ /**@}*/
+ /**@{*/
+ /* A lookup table to control denoise power depending on a pixel's radial distance */
+ s32 rad_pow_lut_thr[15];
+ s32 rad_pow_lut_val[16];
+ /**@}*/
+ /**@{*/
+ /* Non linear transfer functions to calculate the blending coefficient depending on detail group */
+ /* detail group 0 */
+ /**@{*/
+ s32 nl_0_lut_thr[15];
+ s32 nl_0_lut_val[16];
+ /**@}*/
+ /**@{*/
+ /* detail group 1 */
+ s32 nl_1_lut_thr[15];
+ s32 nl_1_lut_val[16];
+ /**@}*/
+ /**@{*/
+ /* detail group 2 */
+ s32 nl_2_lut_thr[15];
+ s32 nl_2_lut_val[16];
+ /**@}*/
+ /**@{*/
+ /* detail group 3 */
+ s32 nl_3_lut_thr[15];
+ s32 nl_3_lut_val[16];
+ /**@}*/
+ /**@}*/
+};
+
+#endif /* __IA_CSS_BNLM_TYPES_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/bnr/bnr2_2/ia_css_bnr2_2.host.c b/drivers/staging/media/atomisp/pci/isp/kernels/bnr/bnr2_2/ia_css_bnr2_2.host.c
new file mode 100644
index 000000000000..b3a9b2f794a0
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/bnr/bnr2_2/ia_css_bnr2_2.host.c
@@ -0,0 +1,123 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#include "type_support.h"
+#include "ia_css_bnr2_2.host.h"
+
+#ifndef IA_CSS_NO_DEBUG
+#include "ia_css_debug.h" /* ia_css_debug_dtrace() */
+#endif
+
+/* Default kernel parameters. */
+const struct ia_css_bnr2_2_config default_bnr2_2_config = {
+ 200,
+ 200,
+ 200,
+ 0,
+ 0,
+ 0,
+ 200,
+ 200,
+ 200,
+ 0,
+ 0,
+ 0,
+ 0,
+ 4096,
+ 8191,
+ 128,
+ 1,
+ 0,
+ 0,
+ 0,
+ 8191,
+ 0,
+ 8191
+};
+
+void
+ia_css_bnr2_2_encode(
+ struct sh_css_isp_bnr2_2_params *to,
+ const struct ia_css_bnr2_2_config *from,
+ size_t size)
+{
+ (void)size;
+ to->d_var_gain_r = from->d_var_gain_r;
+ to->d_var_gain_g = from->d_var_gain_g;
+ to->d_var_gain_b = from->d_var_gain_b;
+ to->d_var_gain_slope_r = from->d_var_gain_slope_r;
+ to->d_var_gain_slope_g = from->d_var_gain_slope_g;
+ to->d_var_gain_slope_b = from->d_var_gain_slope_b;
+
+ to->n_var_gain_r = from->n_var_gain_r;
+ to->n_var_gain_g = from->n_var_gain_g;
+ to->n_var_gain_b = from->n_var_gain_b;
+ to->n_var_gain_slope_r = from->n_var_gain_slope_r;
+ to->n_var_gain_slope_g = from->n_var_gain_slope_g;
+ to->n_var_gain_slope_b = from->n_var_gain_slope_b;
+
+ to->dir_thres = from->dir_thres;
+ to->dir_thres_w = from->dir_thres_w;
+ to->var_offset_coef = from->var_offset_coef;
+
+ to->dir_gain = from->dir_gain;
+ to->detail_gain = from->detail_gain;
+ to->detail_gain_divisor = from->detail_gain_divisor;
+ to->detail_level_offset = from->detail_level_offset;
+
+ to->d_var_th_min = from->d_var_th_min;
+ to->d_var_th_max = from->d_var_th_max;
+ to->n_var_th_min = from->n_var_th_min;
+ to->n_var_th_max = from->n_var_th_max;
+}
+
+#ifndef IA_CSS_NO_DEBUG
+void
+ia_css_bnr2_2_debug_dtrace(
+ const struct ia_css_bnr2_2_config *bnr,
+ unsigned int level)
+{
+ if (!bnr)
+ return;
+
+ ia_css_debug_dtrace(level, "Bayer Noise Reduction 2.2:\n");
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n", "d_var_gain_r", bnr->d_var_gain_r);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n", "d_var_gain_g", bnr->d_var_gain_g);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n", "d_var_gain_b", bnr->d_var_gain_b);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n", "d_var_gain_slope_r",
+ bnr->d_var_gain_slope_r);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n", "d_var_gain_slope_g",
+ bnr->d_var_gain_slope_g);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n", "d_var_gain_slope_b",
+ bnr->d_var_gain_slope_b);
+
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n", "n_var_gain_r", bnr->n_var_gain_r);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n", "n_var_gain_g", bnr->n_var_gain_g);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n", "n_var_gain_b", bnr->n_var_gain_b);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n", "n_var_gain_slope_r",
+ bnr->n_var_gain_slope_r);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n", "n_var_gain_slope_g",
+ bnr->n_var_gain_slope_g);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n", "n_var_gain_slope_b",
+ bnr->n_var_gain_slope_b);
+
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n", "dir_thres", bnr->dir_thres);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n", "dir_thres_w", bnr->dir_thres_w);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n", "var_offset_coef",
+ bnr->var_offset_coef);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n", "dir_gain", bnr->dir_gain);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n", "detail_gain", bnr->detail_gain);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n", "detail_gain_divisor",
+ bnr->detail_gain_divisor);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n", "detail_level_offset",
+ bnr->detail_level_offset);
+
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n", "d_var_th_min", bnr->d_var_th_min);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n", "d_var_th_max", bnr->d_var_th_max);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n", "n_var_th_min", bnr->n_var_th_min);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n", "n_var_th_max", bnr->n_var_th_max);
+}
+#endif /* IA_CSS_NO_DEBUG */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/bnr/bnr2_2/ia_css_bnr2_2.host.h b/drivers/staging/media/atomisp/pci/isp/kernels/bnr/bnr2_2/ia_css_bnr2_2.host.h
new file mode 100644
index 000000000000..08d2c4c7f493
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/bnr/bnr2_2/ia_css_bnr2_2.host.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+#ifndef __IA_CSS_BNR2_2_HOST_H
+#define __IA_CSS_BNR2_2_HOST_H
+
+#include "ia_css_bnr2_2_types.h"
+#include "ia_css_bnr2_2_param.h"
+
+extern const struct ia_css_bnr2_2_config default_bnr2_2_config;
+
+void
+ia_css_bnr2_2_encode(
+ struct sh_css_isp_bnr2_2_params *to,
+ const struct ia_css_bnr2_2_config *from,
+ size_t size);
+
+#ifndef IA_CSS_NO_DEBUG
+void
+ia_css_bnr2_2_debug_dtrace(
+ const struct ia_css_bnr2_2_config *config,
+ unsigned int level);
+#endif
+
+#endif /* __IA_CSS_BNR2_2_HOST_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/bnr/bnr2_2/ia_css_bnr2_2_param.h b/drivers/staging/media/atomisp/pci/isp/kernels/bnr/bnr2_2/ia_css_bnr2_2_param.h
new file mode 100644
index 000000000000..1b1c1beae667
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/bnr/bnr2_2/ia_css_bnr2_2_param.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __IA_CSS_BNR2_2_PARAM_H
+#define __IA_CSS_BNR2_2_PARAM_H
+
+#include "type_support.h"
+
+/* BNR (Bayer Noise Reduction) ISP parameters */
+struct sh_css_isp_bnr2_2_params {
+ s32 d_var_gain_r;
+ s32 d_var_gain_g;
+ s32 d_var_gain_b;
+ s32 d_var_gain_slope_r;
+ s32 d_var_gain_slope_g;
+ s32 d_var_gain_slope_b;
+ s32 n_var_gain_r;
+ s32 n_var_gain_g;
+ s32 n_var_gain_b;
+ s32 n_var_gain_slope_r;
+ s32 n_var_gain_slope_g;
+ s32 n_var_gain_slope_b;
+ s32 dir_thres;
+ s32 dir_thres_w;
+ s32 var_offset_coef;
+ s32 dir_gain;
+ s32 detail_gain;
+ s32 detail_gain_divisor;
+ s32 detail_level_offset;
+ s32 d_var_th_min;
+ s32 d_var_th_max;
+ s32 n_var_th_min;
+ s32 n_var_th_max;
+};
+
+#endif /* __IA_CSS_BNR2_2_PARAM_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/bnr/bnr2_2/ia_css_bnr2_2_types.h b/drivers/staging/media/atomisp/pci/isp/kernels/bnr/bnr2_2/ia_css_bnr2_2_types.h
new file mode 100644
index 000000000000..387909c35c1a
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/bnr/bnr2_2/ia_css_bnr2_2_types.h
@@ -0,0 +1,63 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __IA_CSS_BNR2_2_TYPES_H
+#define __IA_CSS_BNR2_2_TYPES_H
+
+/* @file
+* CSS-API header file for Bayer Noise Reduction parameters.
+*/
+
+#include "type_support.h" /* int32_t */
+
+/* Bayer Noise Reduction 2.2 configuration
+ *
+ * \brief BNR2_2 public parameters.
+ * \details Struct with all parameters for the BNR2.2 kernel that can be set
+ * from the CSS API.
+ *
+ * ISP2.6.1: BNR2.2 is used.
+ */
+struct ia_css_bnr2_2_config {
+ /**@{*/
+ /* Directional variance gain for R/G/B components in dark region */
+ s32 d_var_gain_r;
+ s32 d_var_gain_g;
+ s32 d_var_gain_b;
+ /**@}*/
+ /**@{*/
+ /* Slope of Directional variance gain between dark and bright region */
+ s32 d_var_gain_slope_r;
+ s32 d_var_gain_slope_g;
+ s32 d_var_gain_slope_b;
+ /**@}*/
+ /**@{*/
+ /* Non-Directional variance gain for R/G/B components in dark region */
+ s32 n_var_gain_r;
+ s32 n_var_gain_g;
+ s32 n_var_gain_b;
+ /**@}*/
+ /**@{*/
+ /* Slope of Non-Directional variance gain between dark and bright region */
+ s32 n_var_gain_slope_r;
+ s32 n_var_gain_slope_g;
+ s32 n_var_gain_slope_b;
+ /**@}*/
+
+ s32 dir_thres; /** Threshold for directional filtering */
+ s32 dir_thres_w; /** Threshold width for directional filtering */
+ s32 var_offset_coef; /** Variance offset coefficient */
+ s32 dir_gain; /** Gain for directional coefficient */
+ s32 detail_gain; /** Gain for low contrast texture control */
+ s32 detail_gain_divisor; /** Gain divisor for low contrast texture control */
+ s32 detail_level_offset; /** Bias value for low contrast texture control */
+ s32 d_var_th_min; /** Minimum clipping value for directional variance*/
+ s32 d_var_th_max; /** Maximum clipping value for diretional variance*/
+ s32 n_var_th_min; /** Minimum clipping value for non-directional variance*/
+ s32 n_var_th_max; /** Maximum clipping value for non-directional variance*/
+};
+
+#endif /* __IA_CSS_BNR2_2_TYPES_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/bnr/bnr_1.0/ia_css_bnr.host.c b/drivers/staging/media/atomisp/pci/isp/kernels/bnr/bnr_1.0/ia_css_bnr.host.c
new file mode 100644
index 000000000000..0c08077c741b
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/bnr/bnr_1.0/ia_css_bnr.host.c
@@ -0,0 +1,57 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#include "ia_css_types.h"
+#include "sh_css_defs.h"
+#include "ia_css_debug.h"
+#include "sh_css_frac.h"
+
+#include "ia_css_bnr.host.h"
+
+void
+ia_css_bnr_encode(
+ struct sh_css_isp_bnr_params *to,
+ const struct ia_css_nr_config *from,
+ unsigned int size)
+{
+ (void)size;
+ /* BNR (Bayer Noise Reduction) */
+ to->threshold_low =
+ uDIGIT_FITTING(from->direction, 16, SH_CSS_BAYER_BITS);
+ to->threshold_width_log2 = uFRACTION_BITS_FITTING(8);
+ to->threshold_width =
+ 1 << to->threshold_width_log2;
+ to->gain_all =
+ uDIGIT_FITTING(from->bnr_gain, 16, SH_CSS_BNR_GAIN_SHIFT);
+ to->gain_dir =
+ uDIGIT_FITTING(from->bnr_gain, 16, SH_CSS_BNR_GAIN_SHIFT);
+ to->clip = uDIGIT_FITTING(16384U, 16, SH_CSS_BAYER_BITS);
+}
+
+void
+ia_css_bnr_dump(
+ const struct sh_css_isp_bnr_params *bnr,
+ unsigned int level)
+{
+ if (!bnr)
+ return;
+ ia_css_debug_dtrace(level, "Bayer Noise Reduction:\n");
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "bnr_gain_all", bnr->gain_all);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "bnr_gain_dir", bnr->gain_dir);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "bnr_threshold_low",
+ bnr->threshold_low);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "bnr_threshold_width_log2",
+ bnr->threshold_width_log2);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "bnr_threshold_width",
+ bnr->threshold_width);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "bnr_clip", bnr->clip);
+}
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/bnr/bnr_1.0/ia_css_bnr.host.h b/drivers/staging/media/atomisp/pci/isp/kernels/bnr/bnr_1.0/ia_css_bnr.host.h
new file mode 100644
index 000000000000..a5f0a12f42b1
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/bnr/bnr_1.0/ia_css_bnr.host.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __IA_CSS_BNR_HOST_H
+#define __IA_CSS_BNR_HOST_H
+
+#include "sh_css_params.h"
+
+#include "ynr/ynr_1.0/ia_css_ynr_types.h"
+#include "ia_css_bnr_param.h"
+
+void
+ia_css_bnr_encode(
+ struct sh_css_isp_bnr_params *to,
+ const struct ia_css_nr_config *from,
+ unsigned int size);
+
+void
+ia_css_bnr_dump(
+ const struct sh_css_isp_bnr_params *bnr,
+ unsigned int level);
+
+#endif /* __IA_CSS_DP_HOST_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/bnr/bnr_1.0/ia_css_bnr_param.h b/drivers/staging/media/atomisp/pci/isp/kernels/bnr/bnr_1.0/ia_css_bnr_param.h
new file mode 100644
index 000000000000..2afda7192171
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/bnr/bnr_1.0/ia_css_bnr_param.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __IA_CSS_BNR_PARAM_H
+#define __IA_CSS_BNR_PARAM_H
+
+#include "type_support.h"
+
+/* BNR (Bayer Noise Reduction) */
+struct sh_css_isp_bnr_params {
+ s32 gain_all;
+ s32 gain_dir;
+ s32 threshold_low;
+ s32 threshold_width_log2;
+ s32 threshold_width;
+ s32 clip;
+};
+
+#endif /* __IA_CSS_BNR_PARAM_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/cnr/cnr_1.0/ia_css_cnr.host.c b/drivers/staging/media/atomisp/pci/isp/kernels/cnr/cnr_1.0/ia_css_cnr.host.c
new file mode 100644
index 000000000000..54789d28a9bc
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/cnr/cnr_1.0/ia_css_cnr.host.c
@@ -0,0 +1,20 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#include "ia_css_types.h"
+#include "sh_css_defs.h"
+#include "ia_css_debug.h"
+
+#include "ia_css_cnr.host.h"
+
+/* keep the interface here, it is not enabled yet because host doesn't know the size of individual state */
+void
+ia_css_init_cnr_state(
+ void/*struct sh_css_isp_cnr_vmem_state*/ * state,
+ size_t size)
+{
+ memset(state, 0, size);
+}
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/cnr/cnr_1.0/ia_css_cnr.host.h b/drivers/staging/media/atomisp/pci/isp/kernels/cnr/cnr_1.0/ia_css_cnr.host.h
new file mode 100644
index 000000000000..d4ffe59ff8a0
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/cnr/cnr_1.0/ia_css_cnr.host.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __IA_CSS_CNR_HOST_H
+#define __IA_CSS_CNR_HOST_H
+
+#include "ia_css_cnr_param.h"
+
+void
+ia_css_init_cnr_state(
+ void/*struct sh_css_isp_cnr_vmem_state*/ * state,
+ size_t size);
+
+#endif /* __IA_CSS_CNR_HOST_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/cnr/cnr_1.0/ia_css_cnr_param.h b/drivers/staging/media/atomisp/pci/isp/kernels/cnr/cnr_1.0/ia_css_cnr_param.h
new file mode 100644
index 000000000000..6a8daab667a1
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/cnr/cnr_1.0/ia_css_cnr_param.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __IA_CSS_CNR_PARAM_H
+#define __IA_CSS_CNR_PARAM_H
+
+#include "type_support.h"
+
+/* CNR (Chroma Noise Reduction) */
+/* Reuse YNR1 param structure */
+#include "../../ynr/ynr_1.0/ia_css_ynr_param.h"
+
+#endif /* __IA_CSS_CNR_PARAM_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/cnr/cnr_2/ia_css_cnr2.host.c b/drivers/staging/media/atomisp/pci/isp/kernels/cnr/cnr_2/ia_css_cnr2.host.c
new file mode 100644
index 000000000000..a333c3aa5709
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/cnr/cnr_2/ia_css_cnr2.host.c
@@ -0,0 +1,65 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#include "ia_css_types.h"
+#include "sh_css_defs.h"
+#include "ia_css_debug.h"
+
+#include "ia_css_cnr2.host.h"
+
+const struct ia_css_cnr_config default_cnr_config = {
+ 0,
+ 0,
+ 100,
+ 100,
+ 100,
+ 50,
+ 50,
+ 50
+};
+
+void
+ia_css_cnr_encode(
+ struct sh_css_isp_cnr_params *to,
+ const struct ia_css_cnr_config *from,
+ unsigned int size)
+{
+ (void)size;
+ to->coring_u = from->coring_u;
+ to->coring_v = from->coring_v;
+ to->sense_gain_vy = from->sense_gain_vy;
+ to->sense_gain_vu = from->sense_gain_vu;
+ to->sense_gain_vv = from->sense_gain_vv;
+ to->sense_gain_hy = from->sense_gain_hy;
+ to->sense_gain_hu = from->sense_gain_hu;
+ to->sense_gain_hv = from->sense_gain_hv;
+}
+
+void
+ia_css_cnr_dump(
+ const struct sh_css_isp_cnr_params *cnr,
+ unsigned int level);
+
+void
+ia_css_cnr_debug_dtrace(
+ const struct ia_css_cnr_config *config,
+ unsigned int level)
+{
+ ia_css_debug_dtrace(level,
+ "config.coring_u=%d, config.coring_v=%d, config.sense_gain_vy=%d, config.sense_gain_hy=%d, config.sense_gain_vu=%d, config.sense_gain_hu=%d, config.sense_gain_vv=%d, config.sense_gain_hv=%d\n",
+ config->coring_u, config->coring_v,
+ config->sense_gain_vy, config->sense_gain_hy,
+ config->sense_gain_vu, config->sense_gain_hu,
+ config->sense_gain_vv, config->sense_gain_hv);
+}
+
+void
+ia_css_init_cnr2_state(
+ void/*struct sh_css_isp_cnr_vmem_state*/ * state,
+ size_t size)
+{
+ memset(state, 0, size);
+}
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/cnr/cnr_2/ia_css_cnr2.host.h b/drivers/staging/media/atomisp/pci/isp/kernels/cnr/cnr_2/ia_css_cnr2.host.h
new file mode 100644
index 000000000000..213009192f09
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/cnr/cnr_2/ia_css_cnr2.host.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __IA_CSS_CNR2_HOST_H
+#define __IA_CSS_CNR2_HOST_H
+
+#include "ia_css_cnr2_types.h"
+#include "ia_css_cnr2_param.h"
+
+extern const struct ia_css_cnr_config default_cnr_config;
+
+void
+ia_css_cnr_encode(
+ struct sh_css_isp_cnr_params *to,
+ const struct ia_css_cnr_config *from,
+ unsigned int size);
+
+void
+ia_css_cnr_dump(
+ const struct sh_css_isp_cnr_params *cnr,
+ unsigned int level);
+
+void
+ia_css_cnr_debug_dtrace(
+ const struct ia_css_cnr_config *config,
+ unsigned int level);
+
+void
+ia_css_init_cnr2_state(
+ void/*struct sh_css_isp_cnr_vmem_state*/ * state,
+ size_t size);
+#endif /* __IA_CSS_CNR2_HOST_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/cnr/cnr_2/ia_css_cnr2_param.h b/drivers/staging/media/atomisp/pci/isp/kernels/cnr/cnr_2/ia_css_cnr2_param.h
new file mode 100644
index 000000000000..5346f98ab8aa
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/cnr/cnr_2/ia_css_cnr2_param.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __IA_CSS_CNR2_PARAM_H
+#define __IA_CSS_CNR2_PARAM_H
+
+#include "type_support.h"
+
+/* CNR (Chroma Noise Reduction) */
+struct sh_css_isp_cnr_params {
+ s32 coring_u;
+ s32 coring_v;
+ s32 sense_gain_vy;
+ s32 sense_gain_vu;
+ s32 sense_gain_vv;
+ s32 sense_gain_hy;
+ s32 sense_gain_hu;
+ s32 sense_gain_hv;
+};
+
+#endif /* __IA_CSS_CNR2_PARAM_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/cnr/cnr_2/ia_css_cnr2_types.h b/drivers/staging/media/atomisp/pci/isp/kernels/cnr/cnr_2/ia_css_cnr2_types.h
new file mode 100644
index 000000000000..9bb9ca7f7ad2
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/cnr/cnr_2/ia_css_cnr2_types.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __IA_CSS_CNR2_TYPES_H
+#define __IA_CSS_CNR2_TYPES_H
+
+/* @file
+* CSS-API header file for Chroma Noise Reduction (CNR) parameters
+*/
+
+/* Chroma Noise Reduction configuration.
+ *
+ * Small sensitivity of edge means strong smoothness and NR performance.
+ * If you see blurred color on vertical edges,
+ * set higher values on sense_gain_h*.
+ * If you see blurred color on horizontal edges,
+ * set higher values on sense_gain_v*.
+ *
+ * ISP block: CNR2
+ * (ISP1: CNR1 is used.)
+ * (ISP2: CNR1 is used for Preview/Video.)
+ * ISP2: CNR2 is used for Still.
+ */
+struct ia_css_cnr_config {
+ u16 coring_u; /** Coring level of U.
+ u0.13, [0,8191], default/ineffective 0 */
+ u16 coring_v; /** Coring level of V.
+ u0.13, [0,8191], default/ineffective 0 */
+ u16 sense_gain_vy; /** Sensitivity of horizontal edge of Y.
+ u13.0, [0,8191], default 100, ineffective 8191 */
+ u16 sense_gain_vu; /** Sensitivity of horizontal edge of U.
+ u13.0, [0,8191], default 100, ineffective 8191 */
+ u16 sense_gain_vv; /** Sensitivity of horizontal edge of V.
+ u13.0, [0,8191], default 100, ineffective 8191 */
+ u16 sense_gain_hy; /** Sensitivity of vertical edge of Y.
+ u13.0, [0,8191], default 50, ineffective 8191 */
+ u16 sense_gain_hu; /** Sensitivity of vertical edge of U.
+ u13.0, [0,8191], default 50, ineffective 8191 */
+ u16 sense_gain_hv; /** Sensitivity of vertical edge of V.
+ u13.0, [0,8191], default 50, ineffective 8191 */
+};
+
+#endif /* __IA_CSS_CNR2_TYPES_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/conversion/conversion_1.0/ia_css_conversion.host.c b/drivers/staging/media/atomisp/pci/isp/kernels/conversion/conversion_1.0/ia_css_conversion.host.c
new file mode 100644
index 000000000000..958120b82e47
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/conversion/conversion_1.0/ia_css_conversion.host.c
@@ -0,0 +1,28 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#include "ia_css_types.h"
+#include "ia_css_conversion.host.h"
+
+const struct ia_css_conversion_config default_conversion_config = {
+ 0,
+ 0,
+ 0,
+ 0,
+};
+
+void
+ia_css_conversion_encode(
+ struct sh_css_isp_conversion_params *to,
+ const struct ia_css_conversion_config *from,
+ unsigned int size)
+{
+ (void)size;
+ to->en = from->en;
+ to->dummy0 = from->dummy0;
+ to->dummy1 = from->dummy1;
+ to->dummy2 = from->dummy2;
+}
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/conversion/conversion_1.0/ia_css_conversion.host.h b/drivers/staging/media/atomisp/pci/isp/kernels/conversion/conversion_1.0/ia_css_conversion.host.h
new file mode 100644
index 000000000000..69eadaffa4ce
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/conversion/conversion_1.0/ia_css_conversion.host.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __IA_CSS_CONVERSION_HOST_H
+#define __IA_CSS_CONVERSION_HOST_H
+
+#include "ia_css_conversion_types.h"
+#include "ia_css_conversion_param.h"
+
+extern const struct ia_css_conversion_config default_conversion_config;
+
+void
+ia_css_conversion_encode(
+ struct sh_css_isp_conversion_params *to,
+ const struct ia_css_conversion_config *from,
+ unsigned int size);
+
+#endif /* __IA_CSS_CONVERSION_HOST_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/conversion/conversion_1.0/ia_css_conversion_param.h b/drivers/staging/media/atomisp/pci/isp/kernels/conversion/conversion_1.0/ia_css_conversion_param.h
new file mode 100644
index 000000000000..f0601e236d73
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/conversion/conversion_1.0/ia_css_conversion_param.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __IA_CSS_CONVERSION_PARAM_H
+#define __IA_CSS_CONVERSION_PARAM_H
+
+#include "type_support.h"
+
+/* CONVERSION */
+struct sh_css_isp_conversion_params {
+ u32 en;
+ u32 dummy0;
+ u32 dummy1;
+ u32 dummy2;
+};
+
+#endif /* __IA_CSS_CONVERSION_PARAM_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/conversion/conversion_1.0/ia_css_conversion_types.h b/drivers/staging/media/atomisp/pci/isp/kernels/conversion/conversion_1.0/ia_css_conversion_types.h
new file mode 100644
index 000000000000..374261d25520
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/conversion/conversion_1.0/ia_css_conversion_types.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __IA_CSS_CONVERSION_TYPES_H
+#define __IA_CSS_CONVERSION_TYPES_H
+
+/**
+ * Conversion Kernel parameters.
+ * Deinterleave bayer quad into isys format
+ *
+ * ISP block: CONVERSION
+ *
+ */
+struct ia_css_conversion_config {
+ u32 en; /** en parameter */
+ u32 dummy0; /** dummy0 dummy parameter 0 */
+ u32 dummy1; /** dummy1 dummy parameter 1 */
+ u32 dummy2; /** dummy2 dummy parameter 2 */
+};
+
+#endif /* __IA_CSS_CONVERSION_TYPES_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/copy_output/copy_output_1.0/ia_css_copy_output.host.c b/drivers/staging/media/atomisp/pci/isp/kernels/copy_output/copy_output_1.0/ia_css_copy_output.host.c
new file mode 100644
index 000000000000..a42064dd1c85
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/copy_output/copy_output_1.0/ia_css_copy_output.host.c
@@ -0,0 +1,36 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#include "ia_css_copy_output.host.h"
+#include "ia_css_binary.h"
+#include "type_support.h"
+#define IA_CSS_INCLUDE_CONFIGURATIONS
+#include "ia_css_isp_configs.h"
+#include "isp.h"
+
+static const struct ia_css_copy_output_configuration default_config = {
+ .enable = false,
+};
+
+void
+ia_css_copy_output_config(
+ struct sh_css_isp_copy_output_isp_config *to,
+ const struct ia_css_copy_output_configuration *from,
+ unsigned int size)
+{
+ (void)size;
+ to->enable = from->enable;
+}
+
+int ia_css_copy_output_configure(const struct ia_css_binary *binary,
+ bool enable)
+{
+ struct ia_css_copy_output_configuration config = default_config;
+
+ config.enable = enable;
+
+ return ia_css_configure_copy_output(binary, &config);
+}
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/copy_output/copy_output_1.0/ia_css_copy_output.host.h b/drivers/staging/media/atomisp/pci/isp/kernels/copy_output/copy_output_1.0/ia_css_copy_output.host.h
new file mode 100644
index 000000000000..9257b99bf8dd
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/copy_output/copy_output_1.0/ia_css_copy_output.host.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __IA_CSS_COPY_OUTPUT_HOST_H
+#define __IA_CSS_COPY_OUTPUT_HOST_H
+
+#include "type_support.h"
+#include "ia_css_binary.h"
+
+#include "ia_css_copy_output_param.h"
+
+void
+ia_css_copy_output_config(
+ struct sh_css_isp_copy_output_isp_config *to,
+ const struct ia_css_copy_output_configuration *from,
+ unsigned int size);
+
+int ia_css_copy_output_configure(const struct ia_css_binary *binary,
+ bool enable);
+
+#endif /* __IA_CSS_COPY_OUTPUT_HOST_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/copy_output/copy_output_1.0/ia_css_copy_output_param.h b/drivers/staging/media/atomisp/pci/isp/kernels/copy_output/copy_output_1.0/ia_css_copy_output_param.h
new file mode 100644
index 000000000000..32e526717f59
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/copy_output/copy_output_1.0/ia_css_copy_output_param.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __IA_CSS_COPY_PARAM_H
+#define __IA_CSS_COPY_PARAM_H
+
+struct ia_css_copy_output_configuration {
+ bool enable;
+};
+
+struct sh_css_isp_copy_output_isp_config {
+ u32 enable;
+};
+
+#endif /* __IA_CSS_COPY_PARAM_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/crop/crop_1.0/ia_css_crop.host.c b/drivers/staging/media/atomisp/pci/isp/kernels/crop/crop_1.0/ia_css_crop.host.c
new file mode 100644
index 000000000000..7ec230986872
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/crop/crop_1.0/ia_css_crop.host.c
@@ -0,0 +1,58 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#include <assert_support.h>
+#include <ia_css_frame_public.h>
+#include <ia_css_frame.h>
+#include <ia_css_binary.h>
+#define IA_CSS_INCLUDE_CONFIGURATIONS
+#include "ia_css_isp_configs.h"
+#include "isp.h"
+#include "ia_css_crop.host.h"
+
+static const struct ia_css_crop_configuration default_config = {
+ .info = (struct ia_css_frame_info *)NULL,
+};
+
+void
+ia_css_crop_encode(
+ struct sh_css_isp_crop_isp_params *to,
+ const struct ia_css_crop_config *from,
+ unsigned int size)
+{
+ (void)size;
+ to->crop_pos = from->crop_pos;
+}
+
+int ia_css_crop_config(struct sh_css_isp_crop_isp_config *to,
+ const struct ia_css_crop_configuration *from,
+ unsigned int size)
+{
+ unsigned int elems_a = ISP_VEC_NELEMS;
+ int ret;
+
+ ret = ia_css_dma_configure_from_info(&to->port_b, from->info);
+ if (ret)
+ return ret;
+
+ to->width_a_over_b = elems_a / to->port_b.elems;
+
+ /* Assume divisiblity here, may need to generalize to fixed point. */
+ if (elems_a % to->port_b.elems != 0)
+ return -EINVAL;
+
+ return 0;
+}
+
+int ia_css_crop_configure(const struct ia_css_binary *binary,
+ const struct ia_css_frame_info *info)
+{
+ struct ia_css_crop_configuration config = default_config;
+
+ config.info = info;
+
+ return ia_css_configure_crop(binary, &config);
+}
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/crop/crop_1.0/ia_css_crop.host.h b/drivers/staging/media/atomisp/pci/isp/kernels/crop/crop_1.0/ia_css_crop.host.h
new file mode 100644
index 000000000000..9f51ecfdad14
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/crop/crop_1.0/ia_css_crop.host.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __IA_CSS_CROP_HOST_H
+#define __IA_CSS_CROP_HOST_H
+
+#include <ia_css_frame_public.h>
+#include <ia_css_binary.h>
+
+#include "ia_css_crop_types.h"
+#include "ia_css_crop_param.h"
+
+void
+ia_css_crop_encode(
+ struct sh_css_isp_crop_isp_params *to,
+ const struct ia_css_crop_config *from,
+ unsigned int size);
+
+int ia_css_crop_config(struct sh_css_isp_crop_isp_config *to,
+ const struct ia_css_crop_configuration *from,
+ unsigned int size);
+
+int ia_css_crop_configure(const struct ia_css_binary *binary,
+ const struct ia_css_frame_info *from);
+
+#endif /* __IA_CSS_CROP_HOST_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/crop/crop_1.0/ia_css_crop_param.h b/drivers/staging/media/atomisp/pci/isp/kernels/crop/crop_1.0/ia_css_crop_param.h
new file mode 100644
index 000000000000..b766847e5bcb
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/crop/crop_1.0/ia_css_crop_param.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __IA_CSS_CROP_PARAM_H
+#define __IA_CSS_CROP_PARAM_H
+
+#include <type_support.h>
+#include "dma.h"
+#include "sh_css_internal.h" /* sh_css_crop_pos */
+
+/* Crop frame */
+struct sh_css_isp_crop_isp_config {
+ u32 width_a_over_b;
+ struct dma_port_config port_b;
+};
+
+struct sh_css_isp_crop_isp_params {
+ struct sh_css_crop_pos crop_pos;
+};
+
+#endif /* __IA_CSS_CROP_PARAM_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/crop/crop_1.0/ia_css_crop_types.h b/drivers/staging/media/atomisp/pci/isp/kernels/crop/crop_1.0/ia_css_crop_types.h
new file mode 100644
index 000000000000..0571302014f8
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/crop/crop_1.0/ia_css_crop_types.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __IA_CSS_CROP_TYPES_H
+#define __IA_CSS_CROP_TYPES_H
+
+/* Crop frame
+ *
+ * ISP block: crop frame
+ */
+
+#include <ia_css_frame_public.h>
+#include "sh_css_uds.h" /* sh_css_crop_pos */
+
+struct ia_css_crop_config {
+ struct sh_css_crop_pos crop_pos;
+};
+
+struct ia_css_crop_configuration {
+ const struct ia_css_frame_info *info;
+};
+
+#endif /* __IA_CSS_CROP_TYPES_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/csc/csc_1.0/ia_css_csc.host.c b/drivers/staging/media/atomisp/pci/isp/kernels/csc/csc_1.0/ia_css_csc.host.c
new file mode 100644
index 000000000000..0f5ea8b3e5b3
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/csc/csc_1.0/ia_css_csc.host.c
@@ -0,0 +1,119 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#include "ia_css_types.h"
+#include "sh_css_defs.h"
+#ifndef IA_CSS_NO_DEBUG
+/* FIXME: See BZ 4427 */
+#include "ia_css_debug.h"
+#endif
+
+#include "ia_css_csc.host.h"
+
+const struct ia_css_cc_config default_cc_config = {
+ 8,
+ {255, 29, 120, 0, -374, -342, 0, -672, 301},
+};
+
+void
+ia_css_encode_cc(
+ struct sh_css_isp_csc_params *to,
+ const struct ia_css_cc_config *from,
+ unsigned int size)
+{
+ (void)size;
+#ifndef IA_CSS_NO_DEBUG
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_encode_cc() enter:\n");
+#endif
+
+ to->m_shift = (int16_t)from->fraction_bits;
+ to->m00 = (int16_t)from->matrix[0];
+ to->m01 = (int16_t)from->matrix[1];
+ to->m02 = (int16_t)from->matrix[2];
+ to->m10 = (int16_t)from->matrix[3];
+ to->m11 = (int16_t)from->matrix[4];
+ to->m12 = (int16_t)from->matrix[5];
+ to->m20 = (int16_t)from->matrix[6];
+ to->m21 = (int16_t)from->matrix[7];
+ to->m22 = (int16_t)from->matrix[8];
+
+#ifndef IA_CSS_NO_DEBUG
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_encode_cc() leave:\n");
+#endif
+}
+
+void
+ia_css_csc_encode(
+ struct sh_css_isp_csc_params *to,
+ const struct ia_css_cc_config *from,
+ unsigned int size)
+{
+ ia_css_encode_cc(to, from, size);
+}
+
+#ifndef IA_CSS_NO_DEBUG
+void
+ia_css_cc_dump(
+ const struct sh_css_isp_csc_params *csc,
+ unsigned int level,
+ const char *name)
+{
+ if (!csc) return;
+ ia_css_debug_dtrace(level, "%s\n", name);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "m_shift",
+ csc->m_shift);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "m00",
+ csc->m00);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "m01",
+ csc->m01);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "m02",
+ csc->m02);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "m10",
+ csc->m10);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "m11",
+ csc->m11);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "m12",
+ csc->m12);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "m20",
+ csc->m20);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "m21",
+ csc->m21);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "m22",
+ csc->m22);
+}
+
+void
+ia_css_csc_dump(
+ const struct sh_css_isp_csc_params *csc,
+ unsigned int level)
+{
+ ia_css_cc_dump(csc, level, "Color Space Conversion");
+}
+
+void
+ia_css_cc_config_debug_dtrace(
+ const struct ia_css_cc_config *config,
+ unsigned int level)
+{
+ ia_css_debug_dtrace(level,
+ "config.m[0]=%d, config.m[1]=%d, config.m[2]=%d, config.m[3]=%d, config.m[4]=%d, config.m[5]=%d, config.m[6]=%d, config.m[7]=%d, config.m[8]=%d\n",
+ config->matrix[0],
+ config->matrix[1], config->matrix[2],
+ config->matrix[3], config->matrix[4],
+ config->matrix[5], config->matrix[6],
+ config->matrix[7], config->matrix[8]);
+}
+#endif
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/csc/csc_1.0/ia_css_csc.host.h b/drivers/staging/media/atomisp/pci/isp/kernels/csc/csc_1.0/ia_css_csc.host.h
new file mode 100644
index 000000000000..9dcef6693939
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/csc/csc_1.0/ia_css_csc.host.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __IA_CSS_CSC_HOST_H
+#define __IA_CSS_CSC_HOST_H
+
+#include "ia_css_csc_types.h"
+#include "ia_css_csc_param.h"
+
+extern const struct ia_css_cc_config default_cc_config;
+
+void
+ia_css_encode_cc(
+ struct sh_css_isp_csc_params *to,
+ const struct ia_css_cc_config *from,
+ unsigned int size);
+
+void
+ia_css_csc_encode(
+ struct sh_css_isp_csc_params *to,
+ const struct ia_css_cc_config *from,
+ unsigned int size);
+
+#ifndef IA_CSS_NO_DEBUG
+void
+ia_css_cc_dump(
+ const struct sh_css_isp_csc_params *csc, unsigned int level,
+ const char *name);
+
+void
+ia_css_csc_dump(
+ const struct sh_css_isp_csc_params *csc,
+ unsigned int level);
+
+void
+ia_css_cc_config_debug_dtrace(
+ const struct ia_css_cc_config *config,
+ unsigned int level);
+
+#define ia_css_csc_debug_dtrace ia_css_cc_config_debug_dtrace
+#endif
+
+#endif /* __IA_CSS_CSC_HOST_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/csc/csc_1.0/ia_css_csc_param.h b/drivers/staging/media/atomisp/pci/isp/kernels/csc/csc_1.0/ia_css_csc_param.h
new file mode 100644
index 000000000000..f0380b2c571d
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/csc/csc_1.0/ia_css_csc_param.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __IA_CSS_CSC_PARAM_H
+#define __IA_CSS_CSC_PARAM_H
+
+#include "type_support.h"
+/* CSC (Color Space Conversion) */
+struct sh_css_isp_csc_params {
+ u16 m_shift;
+ s16 m00;
+ s16 m01;
+ s16 m02;
+ s16 m10;
+ s16 m11;
+ s16 m12;
+ s16 m20;
+ s16 m21;
+ s16 m22;
+};
+
+#endif /* __IA_CSS_CSC_PARAM_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/csc/csc_1.0/ia_css_csc_types.h b/drivers/staging/media/atomisp/pci/isp/kernels/csc/csc_1.0/ia_css_csc_types.h
new file mode 100644
index 000000000000..1e56d78e5ab9
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/csc/csc_1.0/ia_css_csc_types.h
@@ -0,0 +1,70 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __IA_CSS_CSC_TYPES_H
+#define __IA_CSS_CSC_TYPES_H
+
+/* @file
+* CSS-API header file for Color Space Conversion parameters.
+*/
+
+/* Color Correction configuration.
+ *
+ * This structure is used for 3 cases.
+ * ("YCgCo" is the output format of Demosaic.)
+ *
+ * 1. Color Space Conversion (YCgCo to YUV) for ISP1.
+ * ISP block: CSC1 (Color Space Conversion)
+ * struct ia_css_cc_config *cc_config
+ *
+ * 2. Color Correction Matrix (YCgCo to RGB) for ISP2.
+ * ISP block: CCM2 (Color Correction Matrix)
+ * struct ia_css_cc_config *yuv2rgb_cc_config
+ *
+ * 3. Color Space Conversion (RGB to YUV) for ISP2.
+ * ISP block: CSC2 (Color Space Conversion)
+ * struct ia_css_cc_config *rgb2yuv_cc_config
+ *
+ * default/ineffective:
+ * 1. YCgCo -> YUV
+ * 1 0.174 0.185
+ * 0 -0.66252 -0.66874
+ * 0 -0.83738 0.58131
+ *
+ * fraction_bits = 12
+ * 4096 713 758
+ * 0 -2714 -2739
+ * 0 -3430 2381
+ *
+ * 2. YCgCo -> RGB
+ * 1 -1 1
+ * 1 1 0
+ * 1 -1 -1
+ *
+ * fraction_bits = 12
+ * 4096 -4096 4096
+ * 4096 4096 0
+ * 4096 -4096 -4096
+ *
+ * 3. RGB -> YUV
+ * 0.299 0.587 0.114
+ * -0.16874 -0.33126 0.5
+ * 0.5 -0.41869 -0.08131
+ *
+ * fraction_bits = 13
+ * 2449 4809 934
+ * -1382 -2714 4096
+ * 4096 -3430 -666
+ */
+struct ia_css_cc_config {
+ u32 fraction_bits;/** Fractional bits of matrix.
+ u8.0, [0,13] */
+ s32 matrix[3 * 3]; /** Conversion matrix.
+ s[13-fraction_bits].[fraction_bits],
+ [-8192,8191] */
+};
+
+#endif /* __IA_CSS_CSC_TYPES_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/ctc/ctc1_5/ia_css_ctc1_5.host.c b/drivers/staging/media/atomisp/pci/isp/kernels/ctc/ctc1_5/ia_css_ctc1_5.host.c
new file mode 100644
index 000000000000..0089402bc12d
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/ctc/ctc1_5/ia_css_ctc1_5.host.c
@@ -0,0 +1,113 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#include "ia_css_types.h"
+#include "sh_css_defs.h"
+#include "ia_css_debug.h"
+#include "assert_support.h"
+
+#include "ctc/ctc_1.0/ia_css_ctc.host.h"
+#include "ia_css_ctc1_5.host.h"
+
+static void ctc_gradient(
+ int *dydx, int *shift,
+ int y1, int y0, int x1, int x0)
+{
+ int frc_bits = max(IA_CSS_CTC_COEF_SHIFT, 16);
+ int dy = y1 - y0;
+ int dx = x1 - x0;
+ int dydx_int;
+ int dydx_frc;
+ int sft;
+ /* max_dydx = the maxinum gradient = the maximum y (gain) */
+ int max_dydx = (1 << IA_CSS_CTC_COEF_SHIFT) - 1;
+
+ if (dx == 0) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ctc_gradient() error, illegal division operation\n");
+ return;
+ } else {
+ dydx_int = dy / dx;
+ dydx_frc = ((dy - dydx_int * dx) << frc_bits) / dx;
+ }
+
+ assert(y0 >= 0 && y0 <= max_dydx);
+ assert(y1 >= 0 && y1 <= max_dydx);
+ assert(x0 < x1);
+ assert(dydx);
+ assert(shift);
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ctc_gradient() enter:\n");
+
+ /* search "sft" which meets this condition:
+ (1 << (IA_CSS_CTC_COEF_SHIFT - 1))
+ <= (((float)dy / (float)dx) * (1 << sft))
+ <= ((1 << IA_CSS_CTC_COEF_SHIFT) - 1) */
+ for (sft = 0; sft <= IA_CSS_CTC_COEF_SHIFT; sft++) {
+ int tmp_dydx = (dydx_int << sft)
+ + (dydx_frc >> (frc_bits - sft));
+ if (tmp_dydx <= max_dydx) {
+ *dydx = tmp_dydx;
+ *shift = sft;
+ }
+ if (tmp_dydx >= max_dydx)
+ break;
+ }
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ctc_gradient() leave:\n");
+}
+
+void
+ia_css_ctc_encode(
+ struct sh_css_isp_ctc_params *to,
+ const struct ia_css_ctc_config *from,
+ unsigned int size)
+{
+ (void)size;
+ to->y0 = from->y0;
+ to->y1 = from->y1;
+ to->y2 = from->y2;
+ to->y3 = from->y3;
+ to->y4 = from->y4;
+ to->y5 = from->y5;
+
+ to->ce_gain_exp = from->ce_gain_exp;
+
+ to->x1 = from->x1;
+ to->x2 = from->x2;
+ to->x3 = from->x3;
+ to->x4 = from->x4;
+
+ ctc_gradient(&to->dydx0,
+ &to->dydx0_shift,
+ from->y1, from->y0,
+ from->x1, 0);
+
+ ctc_gradient(&to->dydx1,
+ &to->dydx1_shift,
+ from->y2, from->y1,
+ from->x2, from->x1);
+
+ ctc_gradient(&to->dydx2,
+ &to->dydx2_shift,
+ from->y3, from->y2,
+ from->x3, from->x2);
+
+ ctc_gradient(&to->dydx3,
+ &to->dydx3_shift,
+ from->y4, from->y3,
+ from->x4, from->x3);
+
+ ctc_gradient(&to->dydx4,
+ &to->dydx4_shift,
+ from->y5, from->y4,
+ SH_CSS_BAYER_MAXVAL, from->x4);
+}
+
+void
+ia_css_ctc_dump(
+ const struct sh_css_isp_ctc_params *ctc,
+ unsigned int level);
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/ctc/ctc1_5/ia_css_ctc1_5.host.h b/drivers/staging/media/atomisp/pci/isp/kernels/ctc/ctc1_5/ia_css_ctc1_5.host.h
new file mode 100644
index 000000000000..6dba55261c08
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/ctc/ctc1_5/ia_css_ctc1_5.host.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __IA_CSS_CTC1_5_HOST_H
+#define __IA_CSS_CTC1_5_HOST_H
+
+#include "sh_css_params.h"
+
+#include "ia_css_ctc1_5_param.h"
+
+void
+ia_css_ctc_encode(
+ struct sh_css_isp_ctc_params *to,
+ const struct ia_css_ctc_config *from,
+ unsigned int size);
+
+void
+ia_css_ctc_dump(
+ const struct sh_css_isp_ctc_params *ctc,
+ unsigned int level);
+
+#endif /* __IA_CSS_CTC1_5_HOST_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/ctc/ctc1_5/ia_css_ctc1_5_param.h b/drivers/staging/media/atomisp/pci/isp/kernels/ctc/ctc1_5/ia_css_ctc1_5_param.h
new file mode 100644
index 000000000000..35ef610ed226
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/ctc/ctc1_5/ia_css_ctc1_5_param.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __IA_CSS_CTC1_5_PARAM_H
+#define __IA_CSS_CTC1_5_PARAM_H
+
+#include "type_support.h"
+#include "ctc/ctc_1.0/ia_css_ctc_param.h" /* vamem params */
+
+/* CTC (Color Tone Control) */
+struct sh_css_isp_ctc_params {
+ s32 y0;
+ s32 y1;
+ s32 y2;
+ s32 y3;
+ s32 y4;
+ s32 y5;
+ s32 ce_gain_exp;
+ s32 x1;
+ s32 x2;
+ s32 x3;
+ s32 x4;
+ s32 dydx0;
+ s32 dydx0_shift;
+ s32 dydx1;
+ s32 dydx1_shift;
+ s32 dydx2;
+ s32 dydx2_shift;
+ s32 dydx3;
+ s32 dydx3_shift;
+ s32 dydx4;
+ s32 dydx4_shift;
+};
+
+#endif /* __IA_CSS_CTC1_5_PARAM_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/ctc/ctc2/ia_css_ctc2.host.c b/drivers/staging/media/atomisp/pci/isp/kernels/ctc/ctc2/ia_css_ctc2.host.c
new file mode 100644
index 000000000000..38751b8e9e6a
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/ctc/ctc2/ia_css_ctc2.host.c
@@ -0,0 +1,149 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#include "ia_css_types.h"
+#include "sh_css_defs.h"
+#include "assert_support.h"
+
+#include "ia_css_ctc2.host.h"
+
+#define INEFFECTIVE_VAL 4096
+#define BASIC_VAL 819
+
+/*Default configuration of parameters for Ctc2*/
+const struct ia_css_ctc2_config default_ctc2_config = {
+ INEFFECTIVE_VAL, INEFFECTIVE_VAL, INEFFECTIVE_VAL,
+ INEFFECTIVE_VAL, INEFFECTIVE_VAL, INEFFECTIVE_VAL,
+ BASIC_VAL * 2, BASIC_VAL * 4, BASIC_VAL * 6,
+ BASIC_VAL * 8, INEFFECTIVE_VAL, INEFFECTIVE_VAL,
+ BASIC_VAL >> 1, BASIC_VAL
+};
+
+/* (dydx) = ctc2_slope(y1, y0, x1, x0)
+ * -----------------------------------------------
+ * Calculation of the Slope of a Line = ((y1 - y0) >> 8)/(x1 - x0)
+ *
+ * Note: y1, y0 , x1 & x0 must lie within the range 0 <-> 8191
+ */
+static int ctc2_slope(int y1, int y0, int x1, int x0)
+{
+ const int shift_val = 8;
+ const int max_slope = (1 << IA_CSS_CTC_COEF_SHIFT) - 1;
+ int dy = y1 - y0;
+ int dx = x1 - x0;
+ int rounding = (dx + 1) >> 1;
+ int dy_shift = dy << shift_val;
+ int slope, dydx;
+
+ /*Protection for parameter values, & avoiding zero divisions*/
+ assert(y0 >= 0 && y0 <= max_slope);
+ assert(y1 >= 0 && y1 <= max_slope);
+ assert(x0 >= 0 && x0 <= max_slope);
+ assert(x1 > 0 && x1 <= max_slope);
+ assert(dx > 0);
+
+ if (dy < 0)
+ rounding = -rounding;
+ slope = (int)(dy_shift + rounding) / dx;
+
+ /*the slope must lie within the range
+ (-max_slope-1) >= (dydx) >= (max_slope)
+ */
+ if (slope <= -max_slope - 1) {
+ dydx = -max_slope - 1;
+ } else if (slope >= max_slope) {
+ dydx = max_slope;
+ } else {
+ dydx = slope;
+ }
+
+ return dydx;
+}
+
+/* (void) = ia_css_ctc2_vmem_encode(*to, *from)
+ * -----------------------------------------------
+ * VMEM Encode Function to translate Y parameters from userspace into ISP space
+ */
+void ia_css_ctc2_vmem_encode(struct ia_css_isp_ctc2_vmem_params *to,
+ const struct ia_css_ctc2_config *from,
+ size_t size)
+{
+ unsigned int i, j;
+ const unsigned int shffl_blck = 4;
+ const unsigned int length_zeros = 11;
+ short dydx0, dydx1, dydx2, dydx3, dydx4;
+
+ (void)size;
+ /*
+ * Calculation of slopes of lines interconnecting
+ * 0.0 -> y_x1 -> y_x2 -> y _x3 -> y_x4 -> 1.0
+ */
+ dydx0 = ctc2_slope(from->y_y1, from->y_y0,
+ from->y_x1, 0);
+ dydx1 = ctc2_slope(from->y_y2, from->y_y1,
+ from->y_x2, from->y_x1);
+ dydx2 = ctc2_slope(from->y_y3, from->y_y2,
+ from->y_x3, from->y_x2);
+ dydx3 = ctc2_slope(from->y_y4, from->y_y3,
+ from->y_x4, from->y_x3);
+ dydx4 = ctc2_slope(from->y_y5, from->y_y4,
+ SH_CSS_BAYER_MAXVAL, from->y_x4);
+
+ /*Fill 3 arrays with:
+ * - Luma input gain values y_y0, y_y1, y_y2, y_3, y_y4
+ * - Luma kneepoints 0, y_x1, y_x2, y_x3, y_x4
+ * - Calculated slopes dydx0, dyxd1, dydx2, dydx3, dydx4
+ *
+ * - Each 64-element array is divided in blocks of 16 elements:
+ * the 5 parameters + zeros in the remaining 11 positions
+ * - All blocks of the same array will contain the same data
+ */
+ for (i = 0; i < shffl_blck; i++) {
+ to->y_x[0][(i << shffl_blck)] = 0;
+ to->y_x[0][(i << shffl_blck) + 1] = from->y_x1;
+ to->y_x[0][(i << shffl_blck) + 2] = from->y_x2;
+ to->y_x[0][(i << shffl_blck) + 3] = from->y_x3;
+ to->y_x[0][(i << shffl_blck) + 4] = from->y_x4;
+
+ to->y_y[0][(i << shffl_blck)] = from->y_y0;
+ to->y_y[0][(i << shffl_blck) + 1] = from->y_y1;
+ to->y_y[0][(i << shffl_blck) + 2] = from->y_y2;
+ to->y_y[0][(i << shffl_blck) + 3] = from->y_y3;
+ to->y_y[0][(i << shffl_blck) + 4] = from->y_y4;
+
+ to->e_y_slope[0][(i << shffl_blck)] = dydx0;
+ to->e_y_slope[0][(i << shffl_blck) + 1] = dydx1;
+ to->e_y_slope[0][(i << shffl_blck) + 2] = dydx2;
+ to->e_y_slope[0][(i << shffl_blck) + 3] = dydx3;
+ to->e_y_slope[0][(i << shffl_blck) + 4] = dydx4;
+
+ for (j = 0; j < length_zeros; j++) {
+ to->y_x[0][(i << shffl_blck) + 5 + j] = 0;
+ to->y_y[0][(i << shffl_blck) + 5 + j] = 0;
+ to->e_y_slope[0][(i << shffl_blck) + 5 + j] = 0;
+ }
+ }
+}
+
+/* (void) = ia_css_ctc2_encode(*to, *from)
+ * -----------------------------------------------
+ * DMEM Encode Function to translate UV parameters from userspace into ISP space
+ */
+void ia_css_ctc2_encode(struct ia_css_isp_ctc2_dmem_params *to,
+ struct ia_css_ctc2_config *from,
+ size_t size)
+{
+ (void)size;
+
+ to->uv_y0 = from->uv_y0;
+ to->uv_y1 = from->uv_y1;
+ to->uv_x0 = from->uv_x0;
+ to->uv_x1 = from->uv_x1;
+
+ /*Slope Calculation*/
+ to->uv_dydx = ctc2_slope(from->uv_y1, from->uv_y0,
+ from->uv_x1, from->uv_x0);
+}
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/ctc/ctc2/ia_css_ctc2.host.h b/drivers/staging/media/atomisp/pci/isp/kernels/ctc/ctc2/ia_css_ctc2.host.h
new file mode 100644
index 000000000000..84ab060d6c88
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/ctc/ctc2/ia_css_ctc2.host.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __IA_CSS_CTC2_HOST_H
+#define __IA_CSS_CTC2_HOST_H
+
+#include "ia_css_ctc2_param.h"
+#include "ia_css_ctc2_types.h"
+
+extern const struct ia_css_ctc2_config default_ctc2_config;
+
+/*Encode Functions to translate parameters from userspace into ISP space*/
+
+void ia_css_ctc2_vmem_encode(struct ia_css_isp_ctc2_vmem_params *to,
+ const struct ia_css_ctc2_config *from,
+ size_t size);
+
+void ia_css_ctc2_encode(struct ia_css_isp_ctc2_dmem_params *to,
+ struct ia_css_ctc2_config *from,
+ size_t size);
+
+#endif /* __IA_CSS_CTC2_HOST_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/ctc/ctc2/ia_css_ctc2_param.h b/drivers/staging/media/atomisp/pci/isp/kernels/ctc/ctc2/ia_css_ctc2_param.h
new file mode 100644
index 000000000000..41337538abbb
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/ctc/ctc2/ia_css_ctc2_param.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __IA_CSS_CTC2_PARAM_H
+#define __IA_CSS_CTC2_PARAM_H
+
+#define IA_CSS_CTC_COEF_SHIFT 13
+#include "vmem.h" /* needed for VMEM_ARRAY */
+
+/* CTC (Chroma Tone Control)ISP Parameters */
+
+/*VMEM Luma params*/
+struct ia_css_isp_ctc2_vmem_params {
+ /** Gains by Y(Luma) at Y = 0.0,Y_X1, Y_X2, Y_X3, Y_X4*/
+ VMEM_ARRAY(y_x, ISP_VEC_NELEMS);
+ /* kneepoints by Y(Luma) 0.0, y_x1, y_x2, y _x3, y_x4*/
+ VMEM_ARRAY(y_y, ISP_VEC_NELEMS);
+ /* Slopes of lines interconnecting
+ * 0.0 -> y_x1 -> y_x2 -> y _x3 -> y_x4 -> 1.0*/
+ VMEM_ARRAY(e_y_slope, ISP_VEC_NELEMS);
+};
+
+/*DMEM Chroma params*/
+struct ia_css_isp_ctc2_dmem_params {
+ /* Gains by UV(Chroma) under kneepoints uv_x0 and uv_x1*/
+ s32 uv_y0;
+ s32 uv_y1;
+
+ /* Kneepoints by UV(Chroma)- uv_x0 and uv_x1*/
+ s32 uv_x0;
+ s32 uv_x1;
+
+ /* Slope of line interconnecting uv_x0 -> uv_x1*/
+ s32 uv_dydx;
+
+};
+#endif /* __IA_CSS_CTC2_PARAM_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/ctc/ctc2/ia_css_ctc2_types.h b/drivers/staging/media/atomisp/pci/isp/kernels/ctc/ctc2/ia_css_ctc2_types.h
new file mode 100644
index 000000000000..187c22f8da51
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/ctc/ctc2/ia_css_ctc2_types.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __IA_CSS_CTC2_TYPES_H
+#define __IA_CSS_CTC2_TYPES_H
+
+/* Chroma Tone Control configuration.
+*
+* ISP block: CTC2 (CTC by polygonal approximation)
+* (ISP1: CTC1 (CTC by look-up table) is used.)
+* ISP2: CTC2 is used.
+* ISP261: CTC2 (CTC by Fast Approximate Distance)
+*/
+struct ia_css_ctc2_config {
+ /** Gains by Y(Luma) at Y =0.0,Y_X1, Y_X2, Y_X3, Y_X4 and Y_X5
+ * --default/ineffective value: 4096(0.5f)
+ */
+ s32 y_y0;
+ s32 y_y1;
+ s32 y_y2;
+ s32 y_y3;
+ s32 y_y4;
+ s32 y_y5;
+ /* 1st-4th kneepoints by Y(Luma) --default/ineffective value:n/a
+ * requirement: 0.0 < y_x1 < y_x2 <y _x3 < y_x4 < 1.0
+ */
+ s32 y_x1;
+ s32 y_x2;
+ s32 y_x3;
+ s32 y_x4;
+ /* Gains by UV(Chroma) under threholds uv_x0 and uv_x1
+ * --default/ineffective value: 4096(0.5f)
+ */
+ s32 uv_y0;
+ s32 uv_y1;
+ /* Minimum and Maximum Thresholds by UV(Chroma)- uv_x0 and uv_x1
+ * --default/ineffective value: n/a
+ */
+ s32 uv_x0;
+ s32 uv_x1;
+};
+
+#endif /* __IA_CSS_CTC2_TYPES_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/ctc/ctc_1.0/ia_css_ctc.host.c b/drivers/staging/media/atomisp/pci/isp/kernels/ctc/ctc_1.0/ia_css_ctc.host.c
new file mode 100644
index 000000000000..f60cbe49ffab
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/ctc/ctc_1.0/ia_css_ctc.host.c
@@ -0,0 +1,50 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#include "ia_css_types.h"
+#include "sh_css_defs.h"
+#include "ia_css_debug.h"
+#include "assert_support.h"
+
+#include "ia_css_ctc.host.h"
+
+const struct ia_css_ctc_config default_ctc_config = {
+ ((1 << IA_CSS_CTC_COEF_SHIFT) + 1) / 2, /* 0.5 */
+ ((1 << IA_CSS_CTC_COEF_SHIFT) + 1) / 2, /* 0.5 */
+ ((1 << IA_CSS_CTC_COEF_SHIFT) + 1) / 2, /* 0.5 */
+ ((1 << IA_CSS_CTC_COEF_SHIFT) + 1) / 2, /* 0.5 */
+ ((1 << IA_CSS_CTC_COEF_SHIFT) + 1) / 2, /* 0.5 */
+ ((1 << IA_CSS_CTC_COEF_SHIFT) + 1) / 2, /* 0.5 */
+ 1,
+ SH_CSS_BAYER_MAXVAL / 5, /* To be implemented */
+ SH_CSS_BAYER_MAXVAL * 2 / 5, /* To be implemented */
+ SH_CSS_BAYER_MAXVAL * 3 / 5, /* To be implemented */
+ SH_CSS_BAYER_MAXVAL * 4 / 5, /* To be implemented */
+};
+
+void
+ia_css_ctc_vamem_encode(
+ struct sh_css_isp_ctc_vamem_params *to,
+ const struct ia_css_ctc_table *from,
+ unsigned int size)
+{
+ (void)size;
+ memcpy(&to->ctc, &from->data, sizeof(to->ctc));
+}
+
+void
+ia_css_ctc_debug_dtrace(
+ const struct ia_css_ctc_config *config,
+ unsigned int level)
+{
+ ia_css_debug_dtrace(level,
+ "config.ce_gain_exp=%d, config.y0=%d, config.x1=%d, config.y1=%d, config.x2=%d, config.y2=%d, config.x3=%d, config.y3=%d, config.x4=%d, config.y4=%d\n",
+ config->ce_gain_exp, config->y0,
+ config->x1, config->y1,
+ config->x2, config->y2,
+ config->x3, config->y3,
+ config->x4, config->y4);
+}
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/ctc/ctc_1.0/ia_css_ctc.host.h b/drivers/staging/media/atomisp/pci/isp/kernels/ctc/ctc_1.0/ia_css_ctc.host.h
new file mode 100644
index 000000000000..6ed1fe8b0b3c
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/ctc/ctc_1.0/ia_css_ctc.host.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __IA_CSS_CTC_HOST_H
+#define __IA_CSS_CTC_HOST_H
+
+#include "sh_css_params.h"
+
+#include "ia_css_ctc_param.h"
+#include "ia_css_ctc_table.host.h"
+
+extern const struct ia_css_ctc_config default_ctc_config;
+
+void
+ia_css_ctc_vamem_encode(
+ struct sh_css_isp_ctc_vamem_params *to,
+ const struct ia_css_ctc_table *from,
+ unsigned int size);
+
+void
+ia_css_ctc_debug_dtrace(
+ const struct ia_css_ctc_config *config, unsigned int level)
+;
+
+#endif /* __IA_CSS_CTC_HOST_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/ctc/ctc_1.0/ia_css_ctc_param.h b/drivers/staging/media/atomisp/pci/isp/kernels/ctc/ctc_1.0/ia_css_ctc_param.h
new file mode 100644
index 000000000000..ae3d183b4142
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/ctc/ctc_1.0/ia_css_ctc_param.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __IA_CSS_CTC_PARAM_H
+#define __IA_CSS_CTC_PARAM_H
+
+#include "type_support.h"
+#include <system_global.h>
+
+#include "ia_css_ctc_types.h"
+
+#ifndef PIPE_GENERATION
+#define SH_CSS_ISP_CTC_TABLE_SIZE_LOG2 IA_CSS_VAMEM_2_CTC_TABLE_SIZE_LOG2
+#define SH_CSS_ISP_CTC_TABLE_SIZE IA_CSS_VAMEM_2_CTC_TABLE_SIZE
+
+#else
+/* For pipe generation, the size is not relevant */
+#define SH_CSS_ISP_CTC_TABLE_SIZE 0
+#endif
+
+/* This should be vamem_data_t, but that breaks the pipe generator */
+struct sh_css_isp_ctc_vamem_params {
+ u16 ctc[SH_CSS_ISP_CTC_TABLE_SIZE];
+};
+
+#endif /* __IA_CSS_CTC_PARAM_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/ctc/ctc_1.0/ia_css_ctc_table.host.c b/drivers/staging/media/atomisp/pci/isp/kernels/ctc/ctc_1.0/ia_css_ctc_table.host.c
new file mode 100644
index 000000000000..632807fbbd1d
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/ctc/ctc_1.0/ia_css_ctc_table.host.c
@@ -0,0 +1,62 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#include <linux/string.h> /* for memcpy() */
+
+#include <type_support.h>
+#include "system_global.h"
+#include "vamem.h"
+#include "ia_css_types.h"
+#include "ia_css_ctc_table.host.h"
+
+struct ia_css_ctc_table default_ctc_table;
+
+
+static const uint16_t
+default_ctc_table_data[IA_CSS_VAMEM_2_CTC_TABLE_SIZE] = {
+ 0, 384, 837, 957, 1011, 1062, 1083, 1080,
+ 1078, 1077, 1053, 1039, 1012, 992, 969, 951,
+ 929, 906, 886, 866, 845, 823, 809, 790,
+ 772, 758, 741, 726, 711, 701, 688, 675,
+ 666, 656, 648, 639, 633, 626, 618, 612,
+ 603, 594, 582, 572, 557, 545, 529, 516,
+ 504, 491, 480, 467, 459, 447, 438, 429,
+ 419, 412, 404, 397, 389, 382, 376, 368,
+ 363, 357, 351, 345, 340, 336, 330, 326,
+ 321, 318, 312, 308, 304, 300, 297, 294,
+ 291, 286, 284, 281, 278, 275, 271, 268,
+ 261, 257, 251, 245, 240, 235, 232, 225,
+ 223, 218, 213, 209, 206, 204, 199, 197,
+ 193, 189, 186, 185, 183, 179, 177, 175,
+ 172, 170, 169, 167, 164, 164, 162, 160,
+ 158, 157, 156, 154, 154, 152, 151, 150,
+ 149, 148, 146, 147, 146, 144, 143, 143,
+ 142, 141, 140, 141, 139, 138, 138, 138,
+ 137, 136, 136, 135, 134, 134, 134, 133,
+ 132, 132, 131, 130, 131, 130, 129, 128,
+ 129, 127, 127, 127, 127, 125, 125, 125,
+ 123, 123, 122, 120, 118, 115, 114, 111,
+ 110, 108, 106, 105, 103, 102, 100, 99,
+ 97, 97, 96, 95, 94, 93, 93, 91,
+ 91, 91, 90, 90, 89, 89, 88, 88,
+ 89, 88, 88, 87, 87, 87, 87, 86,
+ 87, 87, 86, 87, 86, 86, 84, 84,
+ 82, 80, 78, 76, 74, 72, 70, 68,
+ 67, 65, 62, 60, 58, 56, 55, 54,
+ 53, 51, 49, 49, 47, 45, 45, 45,
+ 41, 40, 39, 39, 34, 33, 34, 32,
+ 25, 23, 24, 20, 13, 9, 12, 0,
+ 0
+};
+
+
+void
+ia_css_config_ctc_table(void)
+{
+ memcpy(default_ctc_table.data.vamem_2, default_ctc_table_data,
+ sizeof(default_ctc_table_data));
+ default_ctc_table.vamem_type = IA_CSS_VAMEM_TYPE_2;
+}
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/ctc/ctc_1.0/ia_css_ctc_table.host.h b/drivers/staging/media/atomisp/pci/isp/kernels/ctc/ctc_1.0/ia_css_ctc_table.host.h
new file mode 100644
index 000000000000..b78fc46231d4
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/ctc/ctc_1.0/ia_css_ctc_table.host.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __IA_CSS_CTC_TABLE_HOST_H
+#define __IA_CSS_CTC_TABLE_HOST_H
+
+#include "ia_css_ctc_types.h"
+
+extern struct ia_css_ctc_table default_ctc_table;
+
+void ia_css_config_ctc_table(void);
+
+#endif /* __IA_CSS_CTC_TABLE_HOST_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/ctc/ctc_1.0/ia_css_ctc_types.h b/drivers/staging/media/atomisp/pci/isp/kernels/ctc/ctc_1.0/ia_css_ctc_types.h
new file mode 100644
index 000000000000..67f4e01a3197
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/ctc/ctc_1.0/ia_css_ctc_types.h
@@ -0,0 +1,102 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __IA_CSS_CTC_TYPES_H
+#define __IA_CSS_CTC_TYPES_H
+
+#include <linux/bitops.h>
+
+/* @file
+* CSS-API header file for Chroma Tone Control parameters.
+*/
+
+/* Fractional bits for CTC gain (used only for ISP1).
+ *
+ * IA_CSS_CTC_COEF_SHIFT(=13) includes not only the fractional bits
+ * of gain(=8), but also the bits(=5) to convert chroma
+ * from 13bit precision to 8bit precision.
+ *
+ * Gain (struct ia_css_ctc_table) : u5.8
+ * Input(Chorma) : s0.12 (13bit precision)
+ * Output(Chorma): s0.7 (8bit precision)
+ * Output = (Input * Gain) >> IA_CSS_CTC_COEF_SHIFT
+ */
+#define IA_CSS_CTC_COEF_SHIFT 13
+
+/* Number of elements in the CTC table. */
+#define IA_CSS_VAMEM_1_CTC_TABLE_SIZE_LOG2 10
+/* Number of elements in the CTC table. */
+#define IA_CSS_VAMEM_1_CTC_TABLE_SIZE BIT(IA_CSS_VAMEM_1_CTC_TABLE_SIZE_LOG2)
+
+/* Number of elements in the CTC table. */
+#define IA_CSS_VAMEM_2_CTC_TABLE_SIZE_LOG2 8
+/* Number of elements in the CTC table. */
+#define IA_CSS_VAMEM_2_CTC_TABLE_SIZE ((1U << IA_CSS_VAMEM_2_CTC_TABLE_SIZE_LOG2) + 1)
+
+enum ia_css_vamem_type {
+ IA_CSS_VAMEM_TYPE_1,
+ IA_CSS_VAMEM_TYPE_2
+};
+
+/* Chroma Tone Control configuration.
+ *
+ * ISP block: CTC2 (CTC by polygonal line approximation)
+ * (ISP1: CTC1 (CTC by look-up table) is used.)
+ * ISP2: CTC2 is used.
+ */
+struct ia_css_ctc_config {
+ u16 y0; /** 1st kneepoint gain.
+ u[ce_gain_exp].[13-ce_gain_exp], [0,8191],
+ default/ineffective 4096(0.5) */
+ u16 y1; /** 2nd kneepoint gain.
+ u[ce_gain_exp].[13-ce_gain_exp], [0,8191],
+ default/ineffective 4096(0.5) */
+ u16 y2; /** 3rd kneepoint gain.
+ u[ce_gain_exp].[13-ce_gain_exp], [0,8191],
+ default/ineffective 4096(0.5) */
+ u16 y3; /** 4th kneepoint gain.
+ u[ce_gain_exp].[13-ce_gain_exp], [0,8191],
+ default/ineffective 4096(0.5) */
+ u16 y4; /** 5th kneepoint gain.
+ u[ce_gain_exp].[13-ce_gain_exp], [0,8191],
+ default/ineffective 4096(0.5) */
+ u16 y5; /** 6th kneepoint gain.
+ u[ce_gain_exp].[13-ce_gain_exp], [0,8191],
+ default/ineffective 4096(0.5) */
+ u16 ce_gain_exp; /** Common exponent of y-axis gain.
+ u8.0, [0,13],
+ default/ineffective 1 */
+ u16 x1; /** 2nd kneepoint luma.
+ u0.13, [0,8191], constraints: 0<x1<x2,
+ default/ineffective 1024 */
+ u16 x2; /** 3rd kneepoint luma.
+ u0.13, [0,8191], constraints: x1<x2<x3,
+ default/ineffective 2048 */
+ u16 x3; /** 4th kneepoint luma.
+ u0.13, [0,8191], constraints: x2<x3<x4,
+ default/ineffective 6144 */
+ u16 x4; /** 5tn kneepoint luma.
+ u0.13, [0,8191], constraints: x3<x4<8191,
+ default/ineffective 7168 */
+};
+
+union ia_css_ctc_data {
+ u16 vamem_1[IA_CSS_VAMEM_1_CTC_TABLE_SIZE];
+ u16 vamem_2[IA_CSS_VAMEM_2_CTC_TABLE_SIZE];
+};
+
+/* CTC table, used for Chroma Tone Control.
+ *
+ * ISP block: CTC1 (CTC by look-up table)
+ * ISP1: CTC1 is used.
+ * (ISP2: CTC2 (CTC by polygonal line approximation) is used.)
+ */
+struct ia_css_ctc_table {
+ enum ia_css_vamem_type vamem_type;
+ union ia_css_ctc_data data;
+};
+
+#endif /* __IA_CSS_CTC_TYPES_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/de/de_1.0/ia_css_de.host.c b/drivers/staging/media/atomisp/pci/isp/kernels/de/de_1.0/ia_css_de.host.c
new file mode 100644
index 000000000000..52a07feb48ec
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/de/de_1.0/ia_css_de.host.c
@@ -0,0 +1,71 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#include "ia_css_types.h"
+#include "sh_css_defs.h"
+#include "ia_css_debug.h"
+#include "sh_css_frac.h"
+#include "ia_css_de.host.h"
+
+const struct ia_css_de_config default_de_config = {
+ 0,
+ 0,
+ 0
+};
+
+void
+ia_css_de_encode(
+ struct sh_css_isp_de_params *to,
+ const struct ia_css_de_config *from,
+ unsigned int size)
+{
+ (void)size;
+ to->pixelnoise =
+ uDIGIT_FITTING(from->pixelnoise, 16, SH_CSS_BAYER_BITS);
+ to->c1_coring_threshold =
+ uDIGIT_FITTING(from->c1_coring_threshold, 16,
+ SH_CSS_BAYER_BITS);
+ to->c2_coring_threshold =
+ uDIGIT_FITTING(from->c2_coring_threshold, 16,
+ SH_CSS_BAYER_BITS);
+}
+
+void
+ia_css_de_dump(
+ const struct sh_css_isp_de_params *de,
+ unsigned int level)
+{
+ if (!de)
+ return;
+ ia_css_debug_dtrace(level, "Demosaic:\n");
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "de_pixelnoise", de->pixelnoise);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "de_c1_coring_threshold",
+ de->c1_coring_threshold);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "de_c2_coring_threshold",
+ de->c2_coring_threshold);
+}
+
+void
+ia_css_de_debug_dtrace(
+ const struct ia_css_de_config *config,
+ unsigned int level)
+{
+ ia_css_debug_dtrace(level,
+ "config.pixelnoise=%d, config.c1_coring_threshold=%d, config.c2_coring_threshold=%d\n",
+ config->pixelnoise,
+ config->c1_coring_threshold, config->c2_coring_threshold);
+}
+
+void
+ia_css_init_de_state(
+ void/*struct sh_css_isp_de_vmem_state*/ * state,
+ size_t size)
+{
+ memset(state, 0, size);
+}
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/de/de_1.0/ia_css_de.host.h b/drivers/staging/media/atomisp/pci/isp/kernels/de/de_1.0/ia_css_de.host.h
new file mode 100644
index 000000000000..b00d1c0fa249
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/de/de_1.0/ia_css_de.host.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __IA_CSS_DE_HOST_H
+#define __IA_CSS_DE_HOST_H
+
+#include "ia_css_de_types.h"
+#include "ia_css_de_param.h"
+
+extern const struct ia_css_de_config default_de_config;
+
+void
+ia_css_de_encode(
+ struct sh_css_isp_de_params *to,
+ const struct ia_css_de_config *from,
+ unsigned int size);
+
+void
+ia_css_de_dump(
+ const struct sh_css_isp_de_params *de,
+ unsigned int level);
+
+void
+ia_css_de_debug_dtrace(
+ const struct ia_css_de_config *config,
+ unsigned int level);
+
+void
+ia_css_init_de_state(
+ void/*struct sh_css_isp_de_vmem_state*/ * state,
+ size_t size);
+
+#endif /* __IA_CSS_DE_HOST_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/de/de_1.0/ia_css_de_param.h b/drivers/staging/media/atomisp/pci/isp/kernels/de/de_1.0/ia_css_de_param.h
new file mode 100644
index 000000000000..07db16340ef2
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/de/de_1.0/ia_css_de_param.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __IA_CSS_DE_PARAM_H
+#define __IA_CSS_DE_PARAM_H
+
+#include "type_support.h"
+
+/* DE (Demosaic) */
+struct sh_css_isp_de_params {
+ s32 pixelnoise;
+ s32 c1_coring_threshold;
+ s32 c2_coring_threshold;
+};
+
+#endif /* __IA_CSS_DE_PARAM_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/de/de_1.0/ia_css_de_types.h b/drivers/staging/media/atomisp/pci/isp/kernels/de/de_1.0/ia_css_de_types.h
new file mode 100644
index 000000000000..e1f025846b83
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/de/de_1.0/ia_css_de_types.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __IA_CSS_DE_TYPES_H
+#define __IA_CSS_DE_TYPES_H
+
+/* @file
+* CSS-API header file for Demosaic (bayer-to-YCgCo) parameters.
+*/
+
+/* Demosaic (bayer-to-YCgCo) configuration.
+ *
+ * ISP block: DE1
+ * ISP1: DE1 is used.
+ * (ISP2: DE2 is used.)
+ */
+struct ia_css_de_config {
+ ia_css_u0_16 pixelnoise; /** Pixel noise used in moire elimination.
+ u0.16, [0,65535],
+ default 0, ineffective 0 */
+ ia_css_u0_16 c1_coring_threshold; /** Coring threshold for C1.
+ This is the same as nr_config.threshold_cb.
+ u0.16, [0,65535],
+ default 128(0.001953125), ineffective 0 */
+ ia_css_u0_16 c2_coring_threshold; /** Coring threshold for C2.
+ This is the same as nr_config.threshold_cr.
+ u0.16, [0,65535],
+ default 128(0.001953125), ineffective 0 */
+};
+
+#endif /* __IA_CSS_DE_TYPES_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/de/de_2/ia_css_de2.host.c b/drivers/staging/media/atomisp/pci/isp/kernels/de/de_2/ia_css_de2.host.c
new file mode 100644
index 000000000000..16426b2cbfb4
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/de/de_2/ia_css_de2.host.c
@@ -0,0 +1,45 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#include "ia_css_types.h"
+#include "sh_css_defs.h"
+#include "ia_css_debug.h"
+
+#include "ia_css_de2.host.h"
+
+const struct ia_css_ecd_config default_ecd_config = {
+ (1 << (ISP_VEC_ELEMBITS - 1)) * 2 / 3, /* 2/3 */
+ (1 << (ISP_VEC_ELEMBITS - 1)) - 1, /* 1.0 */
+ 0, /* 0.0 */
+};
+
+void
+ia_css_ecd_encode(
+ struct sh_css_isp_ecd_params *to,
+ const struct ia_css_ecd_config *from,
+ unsigned int size)
+{
+ (void)size;
+ to->zip_strength = from->zip_strength;
+ to->fc_strength = from->fc_strength;
+ to->fc_debias = from->fc_debias;
+}
+
+void
+ia_css_ecd_dump(
+ const struct sh_css_isp_ecd_params *ecd,
+ unsigned int level);
+
+void
+ia_css_ecd_debug_dtrace(
+ const struct ia_css_ecd_config *config,
+ unsigned int level)
+{
+ ia_css_debug_dtrace(level,
+ "config.zip_strength=%d, config.fc_strength=%d, config.fc_debias=%d\n",
+ config->zip_strength,
+ config->fc_strength, config->fc_debias);
+}
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/de/de_2/ia_css_de2.host.h b/drivers/staging/media/atomisp/pci/isp/kernels/de/de_2/ia_css_de2.host.h
new file mode 100644
index 000000000000..0ce9e363aa2c
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/de/de_2/ia_css_de2.host.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __IA_CSS_DE2_HOST_H
+#define __IA_CSS_DE2_HOST_H
+
+#include "ia_css_de2_types.h"
+#include "ia_css_de2_param.h"
+
+extern const struct ia_css_ecd_config default_ecd_config;
+
+void
+ia_css_ecd_encode(
+ struct sh_css_isp_ecd_params *to,
+ const struct ia_css_ecd_config *from,
+ unsigned int size);
+
+void
+ia_css_ecd_dump(
+ const struct sh_css_isp_ecd_params *ecd,
+ unsigned int level);
+
+void
+ia_css_ecd_debug_dtrace(
+ const struct ia_css_ecd_config *config, unsigned int level);
+
+#endif /* __IA_CSS_DE2_HOST_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/de/de_2/ia_css_de2_param.h b/drivers/staging/media/atomisp/pci/isp/kernels/de/de_2/ia_css_de2_param.h
new file mode 100644
index 000000000000..ff6c6ed2fc13
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/de/de_2/ia_css_de2_param.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __IA_CSS_DE2_PARAM_H
+#define __IA_CSS_DE2_PARAM_H
+
+#include "type_support.h"
+
+/* Reuse DE1 params and extend them */
+#include "../de_1.0/ia_css_de_param.h"
+
+/* DE (Demosaic) */
+struct sh_css_isp_ecd_params {
+ s32 zip_strength;
+ s32 fc_strength;
+ s32 fc_debias;
+};
+
+#endif /* __IA_CSS_DE2_PARAM_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/de/de_2/ia_css_de2_types.h b/drivers/staging/media/atomisp/pci/isp/kernels/de/de_2/ia_css_de2_types.h
new file mode 100644
index 000000000000..ca36c001607c
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/de/de_2/ia_css_de2_types.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __IA_CSS_DE2_TYPES_H
+#define __IA_CSS_DE2_TYPES_H
+
+/* @file
+* CSS-API header file for Demosaicing parameters.
+*/
+
+/* Eigen Color Demosaicing configuration.
+ *
+ * ISP block: DE2
+ * (ISP1: DE1 is used.)
+ * ISP2: DE2 is used.
+ */
+struct ia_css_ecd_config {
+ u16 zip_strength; /** Strength of zipper reduction.
+ u0.13, [0,8191],
+ default 5489(0.67), ineffective 0 */
+ u16 fc_strength; /** Strength of false color reduction.
+ u0.13, [0,8191],
+ default 8191(almost 1.0), ineffective 0 */
+ u16 fc_debias; /** Prevent color change
+ on noise or Gr/Gb imbalance.
+ u0.13, [0,8191],
+ default 0, ineffective 0 */
+};
+
+#endif /* __IA_CSS_DE2_TYPES_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/dp/dp_1.0/ia_css_dp.host.c b/drivers/staging/media/atomisp/pci/isp/kernels/dp/dp_1.0/ia_css_dp.host.c
new file mode 100644
index 000000000000..98144a1a9db4
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/dp/dp_1.0/ia_css_dp.host.c
@@ -0,0 +1,123 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#include "ia_css_types.h"
+#include "sh_css_defs.h"
+#include "ia_css_debug.h"
+#include "sh_css_frac.h"
+
+#include "ia_css_dp.host.h"
+
+/* We use a different set of DPC configuration parameters when
+ * DPC is used before OBC and NORM. Currently these parameters
+ * are used in usecases which selects both BDS and DPC.
+ **/
+const struct ia_css_dp_config default_dp_10bpp_config = {
+ 1024,
+ 2048,
+ 32768,
+ 32768,
+ 32768,
+ 32768
+};
+
+const struct ia_css_dp_config default_dp_config = {
+ 8192,
+ 2048,
+ 32768,
+ 32768,
+ 32768,
+ 32768
+};
+
+void
+ia_css_dp_encode(
+ struct sh_css_isp_dp_params *to,
+ const struct ia_css_dp_config *from,
+ unsigned int size)
+{
+ int gain = from->gain;
+ int gr = from->gr;
+ int r = from->r;
+ int b = from->b;
+ int gb = from->gb;
+
+ (void)size;
+ to->threshold_single =
+ SH_CSS_BAYER_MAXVAL;
+ to->threshold_2adjacent =
+ uDIGIT_FITTING(from->threshold, 16, SH_CSS_BAYER_BITS);
+ to->gain =
+ uDIGIT_FITTING(from->gain, 8, SH_CSS_DP_GAIN_SHIFT);
+
+ to->coef_rr_gr =
+ uDIGIT_FITTING(gain * gr / r, 8, SH_CSS_DP_GAIN_SHIFT);
+ to->coef_rr_gb =
+ uDIGIT_FITTING(gain * gb / r, 8, SH_CSS_DP_GAIN_SHIFT);
+ to->coef_bb_gb =
+ uDIGIT_FITTING(gain * gb / b, 8, SH_CSS_DP_GAIN_SHIFT);
+ to->coef_bb_gr =
+ uDIGIT_FITTING(gain * gr / b, 8, SH_CSS_DP_GAIN_SHIFT);
+ to->coef_gr_rr =
+ uDIGIT_FITTING(gain * r / gr, 8, SH_CSS_DP_GAIN_SHIFT);
+ to->coef_gr_bb =
+ uDIGIT_FITTING(gain * b / gr, 8, SH_CSS_DP_GAIN_SHIFT);
+ to->coef_gb_bb =
+ uDIGIT_FITTING(gain * b / gb, 8, SH_CSS_DP_GAIN_SHIFT);
+ to->coef_gb_rr =
+ uDIGIT_FITTING(gain * r / gb, 8, SH_CSS_DP_GAIN_SHIFT);
+}
+
+void
+ia_css_dp_dump(
+ const struct sh_css_isp_dp_params *dp,
+ unsigned int level)
+{
+ if (!dp) return;
+ ia_css_debug_dtrace(level, "Defect Pixel Correction:\n");
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "dp_threshold_single_w_2adj_on",
+ dp->threshold_single);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "dp_threshold_2adj_w_2adj_on",
+ dp->threshold_2adjacent);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "dp_gain", dp->gain);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "dpc_coef_rr_gr", dp->coef_rr_gr);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "dpc_coef_rr_gb", dp->coef_rr_gb);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "dpc_coef_bb_gb", dp->coef_bb_gb);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "dpc_coef_bb_gr", dp->coef_bb_gr);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "dpc_coef_gr_rr", dp->coef_gr_rr);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "dpc_coef_gr_bb", dp->coef_gr_bb);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "dpc_coef_gb_bb", dp->coef_gb_bb);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "dpc_coef_gb_rr", dp->coef_gb_rr);
+}
+
+void
+ia_css_dp_debug_dtrace(
+ const struct ia_css_dp_config *config,
+ unsigned int level)
+{
+ ia_css_debug_dtrace(level,
+ "config.threshold=%d, config.gain=%d\n",
+ config->threshold, config->gain);
+}
+
+void
+ia_css_init_dp_state(
+ void/*struct sh_css_isp_dp_vmem_state*/ * state,
+ size_t size)
+{
+ memset(state, 0, size);
+}
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/dp/dp_1.0/ia_css_dp.host.h b/drivers/staging/media/atomisp/pci/isp/kernels/dp/dp_1.0/ia_css_dp.host.h
new file mode 100644
index 000000000000..93c48de872cc
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/dp/dp_1.0/ia_css_dp.host.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __IA_CSS_DP_HOST_H
+#define __IA_CSS_DP_HOST_H
+
+#include "ia_css_dp_types.h"
+#include "ia_css_dp_param.h"
+
+extern const struct ia_css_dp_config default_dp_config;
+
+/* ISP2401 */
+extern const struct ia_css_dp_config default_dp_10bpp_config;
+
+void
+ia_css_dp_encode(
+ struct sh_css_isp_dp_params *to,
+ const struct ia_css_dp_config *from,
+ unsigned int size);
+
+void
+ia_css_dp_dump(
+ const struct sh_css_isp_dp_params *dp,
+ unsigned int level);
+
+void
+ia_css_dp_debug_dtrace(
+ const struct ia_css_dp_config *config,
+ unsigned int level);
+
+void
+ia_css_init_dp_state(
+ void/*struct sh_css_isp_dp_vmem_state*/ * state,
+ size_t size);
+
+#endif /* __IA_CSS_DP_HOST_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/dp/dp_1.0/ia_css_dp_param.h b/drivers/staging/media/atomisp/pci/isp/kernels/dp/dp_1.0/ia_css_dp_param.h
new file mode 100644
index 000000000000..ff8061e820ec
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/dp/dp_1.0/ia_css_dp_param.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __IA_CSS_DP_PARAM_H
+#define __IA_CSS_DP_PARAM_H
+
+#include "type_support.h"
+#include "bnr/bnr_1.0/ia_css_bnr_param.h"
+
+/* DP (Defect Pixel Correction) */
+struct sh_css_isp_dp_params {
+ s32 threshold_single;
+ s32 threshold_2adjacent;
+ s32 gain;
+ s32 coef_rr_gr;
+ s32 coef_rr_gb;
+ s32 coef_bb_gb;
+ s32 coef_bb_gr;
+ s32 coef_gr_rr;
+ s32 coef_gr_bb;
+ s32 coef_gb_bb;
+ s32 coef_gb_rr;
+};
+
+#endif /* __IA_CSS_DP_PARAM_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/dp/dp_1.0/ia_css_dp_types.h b/drivers/staging/media/atomisp/pci/isp/kernels/dp/dp_1.0/ia_css_dp_types.h
new file mode 100644
index 000000000000..036727f3772d
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/dp/dp_1.0/ia_css_dp_types.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __IA_CSS_DP_TYPES_H
+#define __IA_CSS_DP_TYPES_H
+
+/* @file
+* CSS-API header file for Defect Pixel Correction (DPC) parameters.
+*/
+
+/* Defect Pixel Correction configuration.
+ *
+ * ISP block: DPC1 (DPC after WB)
+ * DPC2 (DPC before WB)
+ * ISP1: DPC1 is used.
+ * ISP2: DPC2 is used.
+ */
+struct ia_css_dp_config {
+ ia_css_u0_16 threshold; /** The threshold of defect pixel correction,
+ representing the permissible difference of
+ intensity between one pixel and its
+ surrounding pixels. Smaller values result
+ in more frequent pixel corrections.
+ u0.16, [0,65535],
+ default 8192, ineffective 65535 */
+ ia_css_u8_8 gain; /** The sensitivity of mis-correction. ISP will
+ miss a lot of defects if the value is set
+ too large.
+ u8.8, [0,65535],
+ default 4096, ineffective 65535 */
+ u32 gr; /* unsigned <integer_bits>.<16-integer_bits> */
+ u32 r; /* unsigned <integer_bits>.<16-integer_bits> */
+ u32 b; /* unsigned <integer_bits>.<16-integer_bits> */
+ u32 gb; /* unsigned <integer_bits>.<16-integer_bits> */
+};
+
+#endif /* __IA_CSS_DP_TYPES_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/dpc2/ia_css_dpc2.host.c b/drivers/staging/media/atomisp/pci/isp/kernels/dpc2/ia_css_dpc2.host.c
new file mode 100644
index 000000000000..82521aed7aff
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/dpc2/ia_css_dpc2.host.c
@@ -0,0 +1,57 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#include "ia_css_dpc2.host.h"
+#include "assert_support.h"
+
+void
+ia_css_dpc2_encode(
+ struct ia_css_isp_dpc2_params *to,
+ const struct ia_css_dpc2_config *from,
+ size_t size)
+{
+ (void)size;
+
+ assert((from->metric1 >= 0) && (from->metric1 <= METRIC1_ONE_FP));
+ assert((from->metric3 >= 0) && (from->metric3 <= METRIC3_ONE_FP));
+ assert((from->metric2 >= METRIC2_ONE_FP) &&
+ (from->metric2 < 256 * METRIC2_ONE_FP));
+ assert((from->wb_gain_gr > 0) && (from->wb_gain_gr < 16 * WBGAIN_ONE_FP));
+ assert((from->wb_gain_r > 0) && (from->wb_gain_r < 16 * WBGAIN_ONE_FP));
+ assert((from->wb_gain_b > 0) && (from->wb_gain_b < 16 * WBGAIN_ONE_FP));
+ assert((from->wb_gain_gb > 0) && (from->wb_gain_gb < 16 * WBGAIN_ONE_FP));
+
+ to->metric1 = from->metric1;
+ to->metric2 = from->metric2;
+ to->metric3 = from->metric3;
+
+ to->wb_gain_gr = from->wb_gain_gr;
+ to->wb_gain_r = from->wb_gain_r;
+ to->wb_gain_b = from->wb_gain_b;
+ to->wb_gain_gb = from->wb_gain_gb;
+}
+
+/* TODO: AM: This needs a proper implementation. */
+void
+ia_css_init_dpc2_state(
+ void *state,
+ size_t size)
+{
+ (void)state;
+ (void)size;
+}
+
+#ifndef IA_CSS_NO_DEBUG
+/* TODO: AM: This needs a proper implementation. */
+void
+ia_css_dpc2_debug_dtrace(
+ const struct ia_css_dpc2_config *config,
+ unsigned int level)
+{
+ (void)config;
+ (void)level;
+}
+#endif
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/dpc2/ia_css_dpc2.host.h b/drivers/staging/media/atomisp/pci/isp/kernels/dpc2/ia_css_dpc2.host.h
new file mode 100644
index 000000000000..85419182c1f5
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/dpc2/ia_css_dpc2.host.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __IA_CSS_DPC2_HOST_H
+#define __IA_CSS_DPC2_HOST_H
+
+#include "ia_css_dpc2_types.h"
+#include "ia_css_dpc2_param.h"
+
+void
+ia_css_dpc2_encode(
+ struct ia_css_isp_dpc2_params *to,
+ const struct ia_css_dpc2_config *from,
+ size_t size);
+
+void
+ia_css_init_dpc2_state(
+ void *state,
+ size_t size);
+
+#ifndef IA_CSS_NO_DEBUG
+void
+ia_css_dpc2_debug_dtrace(
+ const struct ia_css_dpc2_config *config,
+ unsigned int level);
+#endif
+
+#endif /* __IA_CSS_DPC2_HOST_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/dpc2/ia_css_dpc2_param.h b/drivers/staging/media/atomisp/pci/isp/kernels/dpc2/ia_css_dpc2_param.h
new file mode 100644
index 000000000000..b1bbc283e367
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/dpc2/ia_css_dpc2_param.h
@@ -0,0 +1,45 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __IA_CSS_DPC2_PARAM_H
+#define __IA_CSS_DPC2_PARAM_H
+
+#include <linux/math.h>
+
+#include "type_support.h"
+#include "vmem.h" /* for VMEM_ARRAY*/
+
+/* 4 planes : GR, R, B, GB */
+#define NUM_PLANES 4
+
+/* ToDo: Move this to testsetup */
+#define MAX_FRAME_SIMDWIDTH 30
+
+/* 3 lines state per color plane input_line_state */
+#define DPC2_STATE_INPUT_BUFFER_HEIGHT (3 * NUM_PLANES)
+/* Each plane has width equal to half frame line */
+#define DPC2_STATE_INPUT_BUFFER_WIDTH DIV_ROUND_UP(MAX_FRAME_SIMDWIDTH, 2)
+
+/* 1 line state per color plane for local deviation state*/
+#define DPC2_STATE_LOCAL_DEVIATION_BUFFER_HEIGHT (1 * NUM_PLANES)
+/* Each plane has width equal to half frame line */
+#define DPC2_STATE_LOCAL_DEVIATION_BUFFER_WIDTH DIV_ROUND_UP(MAX_FRAME_SIMDWIDTH, 2)
+
+/* MINMAX state buffer stores 1 full input line (GR-R color line) */
+#define DPC2_STATE_SECOND_MINMAX_BUFFER_HEIGHT 1
+#define DPC2_STATE_SECOND_MINMAX_BUFFER_WIDTH MAX_FRAME_SIMDWIDTH
+
+struct ia_css_isp_dpc2_params {
+ s32 metric1;
+ s32 metric2;
+ s32 metric3;
+ s32 wb_gain_gr;
+ s32 wb_gain_r;
+ s32 wb_gain_b;
+ s32 wb_gain_gb;
+};
+
+#endif /* __IA_CSS_DPC2_PARAM_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/dpc2/ia_css_dpc2_types.h b/drivers/staging/media/atomisp/pci/isp/kernels/dpc2/ia_css_dpc2_types.h
new file mode 100644
index 000000000000..c261899bcaa0
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/dpc2/ia_css_dpc2_types.h
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __IA_CSS_DPC2_TYPES_H
+#define __IA_CSS_DPC2_TYPES_H
+
+/* @file
+* CSS-API header file for Defect Pixel Correction 2 (DPC2) parameters.
+*/
+
+#include "type_support.h"
+
+/**@{*/
+/* Floating point constants for different metrics. */
+#define METRIC1_ONE_FP BIT(12)
+#define METRIC2_ONE_FP BIT(5)
+#define METRIC3_ONE_FP BIT(12)
+#define WBGAIN_ONE_FP BIT(9)
+/**@}*/
+
+/**@{*/
+/* Defect Pixel Correction 2 configuration.
+ *
+ * \brief DPC2 public parameters.
+ * \details Struct with all parameters for the Defect Pixel Correction 2
+ * kernel that can be set from the CSS API.
+ *
+ * ISP block: DPC1 (DPC after WB)
+ * DPC2 (DPC before WB)
+ * ISP1: DPC1 is used.
+ * ISP2: DPC2 is used.
+ *
+ */
+struct ia_css_dpc2_config {
+ /**@{*/
+ s32 metric1;
+ s32 metric2;
+ s32 metric3;
+ s32 wb_gain_gr;
+ s32 wb_gain_r;
+ s32 wb_gain_b;
+ s32 wb_gain_gb;
+ /**@}*/
+};
+
+/**@}*/
+
+#endif /* __IA_CSS_DPC2_TYPES_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/dvs/dvs_1.0/ia_css_dvs.host.c b/drivers/staging/media/atomisp/pci/isp/kernels/dvs/dvs_1.0/ia_css_dvs.host.c
new file mode 100644
index 000000000000..e9d6dd0bbfe2
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/dvs/dvs_1.0/ia_css_dvs.host.c
@@ -0,0 +1,289 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#include "hmm.h"
+
+#include "ia_css_frame_public.h"
+#define IA_CSS_INCLUDE_CONFIGURATIONS
+#include "ia_css_isp_configs.h"
+
+#include "ia_css_types.h"
+#include "ia_css_host_data.h"
+#include "sh_css_param_dvs.h"
+#include "sh_css_params.h"
+#include "ia_css_binary.h"
+#include "ia_css_debug.h"
+#include "assert_support.h"
+
+#include "ia_css_dvs.host.h"
+
+static const struct ia_css_dvs_configuration default_config = {
+ .info = (struct ia_css_frame_info *)NULL,
+};
+
+void
+ia_css_dvs_config(
+ struct sh_css_isp_dvs_isp_config *to,
+ const struct ia_css_dvs_configuration *from,
+ unsigned int size)
+{
+ (void)size;
+ to->num_horizontal_blocks =
+ DVS_NUM_BLOCKS_X(from->info->res.width);
+ to->num_vertical_blocks =
+ DVS_NUM_BLOCKS_Y(from->info->res.height);
+}
+
+int ia_css_dvs_configure(const struct ia_css_binary *binary,
+ const struct ia_css_frame_info *info)
+{
+ struct ia_css_dvs_configuration config = default_config;
+
+ config.info = info;
+
+ return ia_css_configure_dvs(binary, &config);
+}
+
+static void
+convert_coords_to_ispparams(
+ struct ia_css_host_data *gdc_warp_table,
+ const struct ia_css_dvs_6axis_config *config,
+ unsigned int i_stride,
+ unsigned int o_width,
+ unsigned int o_height,
+ unsigned int uv_flag)
+{
+ unsigned int i, j;
+ gdc_warp_param_mem_t s = { 0 };
+ unsigned int x00, x01, x10, x11,
+ y00, y01, y10, y11;
+
+ unsigned int xmin, ymin, xmax, ymax;
+ unsigned int topleft_x, topleft_y, bottom_x, bottom_y,
+ topleft_x_frac, topleft_y_frac;
+ unsigned int dvs_interp_envelope = (DVS_GDC_INTERP_METHOD == HRT_GDC_BLI_MODE ?
+ DVS_GDC_BLI_INTERP_ENVELOPE : DVS_GDC_BCI_INTERP_ENVELOPE);
+
+ /* number of blocks per height and width */
+ unsigned int num_blocks_y = (uv_flag ? DVS_NUM_BLOCKS_Y_CHROMA(
+ o_height) : DVS_NUM_BLOCKS_Y(o_height));
+ unsigned int num_blocks_x = (uv_flag ? DVS_NUM_BLOCKS_X_CHROMA(
+ o_width) : DVS_NUM_BLOCKS_X(
+ o_width)); // round num_x up to blockdim_x, if it concerns the Y0Y1 block (uv_flag==0) round up to even
+
+ unsigned int in_stride = i_stride * DVS_INPUT_BYTES_PER_PIXEL;
+ unsigned int width, height;
+ unsigned int *xbuff = NULL;
+ unsigned int *ybuff = NULL;
+ struct gdc_warp_param_mem_s *ptr;
+
+ assert(config);
+ assert(gdc_warp_table);
+ assert(gdc_warp_table->address);
+
+ ptr = (struct gdc_warp_param_mem_s *)gdc_warp_table->address;
+
+ ptr += (2 * uv_flag); /* format is Y0 Y1 UV, so UV starts at 3rd position */
+
+ if (uv_flag == 0) {
+ xbuff = config->xcoords_y;
+ ybuff = config->ycoords_y;
+ width = config->width_y;
+ height = config->height_y;
+ } else {
+ xbuff = config->xcoords_uv;
+ ybuff = config->ycoords_uv;
+ width = config->width_uv;
+ height = config->height_uv;
+ }
+
+ IA_CSS_LOG("blockdim_x %d blockdim_y %d",
+ DVS_BLOCKDIM_X, DVS_BLOCKDIM_Y_LUMA >> uv_flag);
+ IA_CSS_LOG("num_blocks_x %d num_blocks_y %d", num_blocks_x, num_blocks_y);
+ IA_CSS_LOG("width %d height %d", width, height);
+
+ assert(width == num_blocks_x +
+ 1); // the width and height of the provided morphing table should be 1 more than the number of blocks
+ assert(height == num_blocks_y + 1);
+
+ for (j = 0; j < num_blocks_y; j++) {
+ for (i = 0; i < num_blocks_x; i++) {
+ x00 = xbuff[j * width + i];
+ x01 = xbuff[j * width + (i + 1)];
+ x10 = xbuff[(j + 1) * width + i];
+ x11 = xbuff[(j + 1) * width + (i + 1)];
+
+ y00 = ybuff[j * width + i];
+ y01 = ybuff[j * width + (i + 1)];
+ y10 = ybuff[(j + 1) * width + i];
+ y11 = ybuff[(j + 1) * width + (i + 1)];
+
+ xmin = min(x00, x10);
+ xmax = max(x01, x11);
+ ymin = min(y00, y01);
+ ymax = max(y10, y11);
+
+ /* Assert that right column's X is greater */
+ assert(x01 >= xmin);
+ assert(x11 >= xmin);
+ /* Assert that bottom row's Y is greater */
+ assert(y10 >= ymin);
+ assert(y11 >= ymin);
+
+ topleft_y = ymin >> DVS_COORD_FRAC_BITS;
+ topleft_x = ((xmin >> DVS_COORD_FRAC_BITS)
+ >> XMEM_ALIGN_LOG2)
+ << (XMEM_ALIGN_LOG2);
+ s.in_addr_offset = topleft_y * in_stride + topleft_x;
+
+ /* similar to topleft_y calculation, but round up if ymax
+ * has any fraction bits */
+ bottom_y = DIV_ROUND_UP(ymax, BIT(DVS_COORD_FRAC_BITS));
+ s.in_block_height = bottom_y - topleft_y + dvs_interp_envelope;
+
+ bottom_x = DIV_ROUND_UP(xmax, BIT(DVS_COORD_FRAC_BITS));
+ s.in_block_width = bottom_x - topleft_x + dvs_interp_envelope;
+
+ topleft_x_frac = topleft_x << (DVS_COORD_FRAC_BITS);
+ topleft_y_frac = topleft_y << (DVS_COORD_FRAC_BITS);
+
+ s.p0_x = x00 - topleft_x_frac;
+ s.p1_x = x01 - topleft_x_frac;
+ s.p2_x = x10 - topleft_x_frac;
+ s.p3_x = x11 - topleft_x_frac;
+
+ s.p0_y = y00 - topleft_y_frac;
+ s.p1_y = y01 - topleft_y_frac;
+ s.p2_y = y10 - topleft_y_frac;
+ s.p3_y = y11 - topleft_y_frac;
+
+ // block should fit within the boundingbox.
+ assert(s.p0_x < (s.in_block_width << DVS_COORD_FRAC_BITS));
+ assert(s.p1_x < (s.in_block_width << DVS_COORD_FRAC_BITS));
+ assert(s.p2_x < (s.in_block_width << DVS_COORD_FRAC_BITS));
+ assert(s.p3_x < (s.in_block_width << DVS_COORD_FRAC_BITS));
+ assert(s.p0_y < (s.in_block_height << DVS_COORD_FRAC_BITS));
+ assert(s.p1_y < (s.in_block_height << DVS_COORD_FRAC_BITS));
+ assert(s.p2_y < (s.in_block_height << DVS_COORD_FRAC_BITS));
+ assert(s.p3_y < (s.in_block_height << DVS_COORD_FRAC_BITS));
+
+ // block size should be greater than zero.
+ assert(s.p0_x < s.p1_x);
+ assert(s.p2_x < s.p3_x);
+ assert(s.p0_y < s.p2_y);
+ assert(s.p1_y < s.p3_y);
+
+#if 0
+ printf("j: %d\ti:%d\n", j, i);
+ printf("offset: %d\n", s.in_addr_offset);
+ printf("p0_x: %d\n", s.p0_x);
+ printf("p0_y: %d\n", s.p0_y);
+ printf("p1_x: %d\n", s.p1_x);
+ printf("p1_y: %d\n", s.p1_y);
+ printf("p2_x: %d\n", s.p2_x);
+ printf("p2_y: %d\n", s.p2_y);
+ printf("p3_x: %d\n", s.p3_x);
+ printf("p3_y: %d\n", s.p3_y);
+
+ printf("p0_x_nofrac[0]: %d\n", s.p0_x >> DVS_COORD_FRAC_BITS);
+ printf("p0_y_nofrac[1]: %d\n", s.p0_y >> DVS_COORD_FRAC_BITS);
+ printf("p1_x_nofrac[2]: %d\n", s.p1_x >> DVS_COORD_FRAC_BITS);
+ printf("p1_y_nofrac[3]: %d\n", s.p1_y >> DVS_COORD_FRAC_BITS);
+ printf("p2_x_nofrac[0]: %d\n", s.p2_x >> DVS_COORD_FRAC_BITS);
+ printf("p2_y_nofrac[1]: %d\n", s.p2_y >> DVS_COORD_FRAC_BITS);
+ printf("p3_x_nofrac[2]: %d\n", s.p3_x >> DVS_COORD_FRAC_BITS);
+ printf("p3_y_nofrac[3]: %d\n", s.p3_y >> DVS_COORD_FRAC_BITS);
+ printf("\n");
+#endif
+
+ *ptr = s;
+
+ // storage format:
+ // Y0 Y1 UV0 Y2 Y3 UV1
+ /* if uv_flag equals true increment with 2 incase x is odd, this to
+ skip the uv position. */
+ if (uv_flag)
+ ptr += 3;
+ else
+ ptr += (1 + (i & 1));
+ }
+ }
+}
+
+struct ia_css_host_data *
+convert_allocate_dvs_6axis_config(
+ const struct ia_css_dvs_6axis_config *dvs_6axis_config,
+ const struct ia_css_binary *binary,
+ const struct ia_css_frame_info *dvs_in_frame_info)
+{
+ unsigned int i_stride;
+ unsigned int o_width;
+ unsigned int o_height;
+ struct ia_css_host_data *me;
+
+ assert(binary);
+ assert(dvs_6axis_config);
+ assert(dvs_in_frame_info);
+
+ me = ia_css_host_data_allocate((size_t)((DVS_6AXIS_BYTES(binary) / 2) * 3));
+
+ if (!me)
+ return NULL;
+
+ /*DVS only supports input frame of YUV420 or NV12. Fail for all other cases*/
+ assert((dvs_in_frame_info->format == IA_CSS_FRAME_FORMAT_NV12)
+ || (dvs_in_frame_info->format == IA_CSS_FRAME_FORMAT_YUV420));
+
+ i_stride = dvs_in_frame_info->padded_width;
+
+ o_width = binary->out_frame_info[0].res.width;
+ o_height = binary->out_frame_info[0].res.height;
+
+ /* Y plane */
+ convert_coords_to_ispparams(me, dvs_6axis_config,
+ i_stride, o_width, o_height, 0);
+
+ if (dvs_in_frame_info->format == IA_CSS_FRAME_FORMAT_YUV420) {
+ /*YUV420 has half the stride for U/V plane*/
+ i_stride /= 2;
+ }
+
+ /* UV plane (packed inside the y plane) */
+ convert_coords_to_ispparams(me, dvs_6axis_config,
+ i_stride, o_width / 2, o_height / 2, 1);
+
+ return me;
+}
+
+int
+store_dvs_6axis_config(
+ const struct ia_css_dvs_6axis_config *dvs_6axis_config,
+ const struct ia_css_binary *binary,
+ const struct ia_css_frame_info *dvs_in_frame_info,
+ ia_css_ptr ddr_addr_y) {
+ struct ia_css_host_data *me;
+
+ assert(dvs_6axis_config);
+ assert(ddr_addr_y != mmgr_NULL);
+ assert(dvs_in_frame_info);
+
+ me = convert_allocate_dvs_6axis_config(dvs_6axis_config,
+ binary,
+ dvs_in_frame_info);
+
+ if (!me)
+ {
+ IA_CSS_LEAVE_ERR_PRIVATE(-ENOMEM);
+ return -ENOMEM;
+ }
+
+ ia_css_params_store_ia_css_host_data(
+ ddr_addr_y,
+ me);
+ ia_css_host_data_free(me);
+
+ return 0;
+}
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/dvs/dvs_1.0/ia_css_dvs.host.h b/drivers/staging/media/atomisp/pci/isp/kernels/dvs/dvs_1.0/ia_css_dvs.host.h
new file mode 100644
index 000000000000..98995c9ed0a1
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/dvs/dvs_1.0/ia_css_dvs.host.h
@@ -0,0 +1,50 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __IA_CSS_DVS_HOST_H
+#define __IA_CSS_DVS_HOST_H
+
+#include "ia_css_frame_public.h"
+#include "ia_css_binary.h"
+#include "sh_css_params.h"
+
+#include "ia_css_types.h"
+#include "ia_css_dvs_types.h"
+#include "ia_css_dvs_param.h"
+
+/* For bilinear interpolation, we need to add +1 to input block height calculation.
+ * For bicubic interpolation, we will need to add +3 instaed */
+#define DVS_GDC_BLI_INTERP_ENVELOPE 1
+#define DVS_GDC_BCI_INTERP_ENVELOPE 3
+
+void
+ia_css_dvs_config(
+ struct sh_css_isp_dvs_isp_config *to,
+ const struct ia_css_dvs_configuration *from,
+ unsigned int size);
+
+int ia_css_dvs_configure(const struct ia_css_binary *binary,
+ const struct ia_css_frame_info *from);
+
+void
+convert_dvs_6axis_config(
+ struct ia_css_isp_parameters *params,
+ const struct ia_css_binary *binary);
+
+struct ia_css_host_data *
+convert_allocate_dvs_6axis_config(
+ const struct ia_css_dvs_6axis_config *dvs_6axis_config,
+ const struct ia_css_binary *binary,
+ const struct ia_css_frame_info *dvs_in_frame_info);
+
+int
+store_dvs_6axis_config(
+ const struct ia_css_dvs_6axis_config *dvs_6axis_config,
+ const struct ia_css_binary *binary,
+ const struct ia_css_frame_info *dvs_in_frame_info,
+ ia_css_ptr ddr_addr_y);
+
+#endif /* __IA_CSS_DVS_HOST_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/dvs/dvs_1.0/ia_css_dvs_param.h b/drivers/staging/media/atomisp/pci/isp/kernels/dvs/dvs_1.0/ia_css_dvs_param.h
new file mode 100644
index 000000000000..368f5b2b1ee8
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/dvs/dvs_1.0/ia_css_dvs_param.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __IA_CSS_DVS_PARAM_H
+#define __IA_CSS_DVS_PARAM_H
+
+#include <type_support.h>
+
+#if !defined(ENABLE_TPROXY) && !defined(ENABLE_CRUN_FOR_TD) && !defined(PARAMBIN_GENERATION)
+#include "dma.h"
+#endif /* !defined(ENABLE_TPROXY) && !defined(ENABLE_CRUN_FOR_TD) */
+
+#include "uds/uds_1.0/ia_css_uds_param.h"
+
+/* dvserence frame */
+struct sh_css_isp_dvs_isp_config {
+ u32 num_horizontal_blocks;
+ u32 num_vertical_blocks;
+};
+
+#endif /* __IA_CSS_DVS_PARAM_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/dvs/dvs_1.0/ia_css_dvs_types.h b/drivers/staging/media/atomisp/pci/isp/kernels/dvs/dvs_1.0/ia_css_dvs_types.h
new file mode 100644
index 000000000000..7057fac6e293
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/dvs/dvs_1.0/ia_css_dvs_types.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __IA_CSS_DVS_TYPES_H
+#define __IA_CSS_DVS_TYPES_H
+
+/* DVS frame
+ *
+ * ISP block: dvs frame
+ */
+
+#include "ia_css_frame_public.h"
+
+struct ia_css_dvs_configuration {
+ const struct ia_css_frame_info *info;
+};
+
+#endif /* __IA_CSS_DVS_TYPES_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/eed1_8/ia_css_eed1_8.host.c b/drivers/staging/media/atomisp/pci/isp/kernels/eed1_8/ia_css_eed1_8.host.c
new file mode 100644
index 000000000000..8e4451fcc8e3
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/eed1_8/ia_css_eed1_8.host.c
@@ -0,0 +1,326 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef IA_CSS_NO_DEBUG
+#include "ia_css_debug.h"
+#endif
+
+#include "type_support.h"
+#include "assert_support.h"
+#include "math_support.h" /* for min and max */
+
+#include "ia_css_eed1_8.host.h"
+
+/* WARNING1: Number of inv points should be less or equal to 16,
+ * due to implementation limitation. See kernel design document
+ * for more details.
+ * WARNING2: Do not modify the number of inv points without correcting
+ * the EED1_8 kernel implementation assumptions.
+ */
+#define NUMBER_OF_CHGRINV_POINTS 15
+#define NUMBER_OF_TCINV_POINTS 9
+#define NUMBER_OF_FCINV_POINTS 9
+
+static const s16 chgrinv_x[NUMBER_OF_CHGRINV_POINTS] = {
+ 0, 16, 64, 144, 272, 448, 672, 976,
+ 1376, 1888, 2528, 3312, 4256, 5376, 6688
+};
+
+static const s16 chgrinv_a[NUMBER_OF_CHGRINV_POINTS] = {
+ -7171, -256, -29, -3456, -1071, -475, -189, -102,
+ -48, -38, -10, -9, -7, -6, 0
+ };
+
+static const s16 chgrinv_b[NUMBER_OF_CHGRINV_POINTS] = {
+ 8191, 1021, 256, 114, 60, 37, 24, 17,
+ 12, 9, 6, 5, 4, 3, 2
+};
+
+static const s16 chgrinv_c[NUMBER_OF_CHGRINV_POINTS] = {
+ 1, 1, 1, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0
+};
+
+static const s16 tcinv_x[NUMBER_OF_TCINV_POINTS] = {
+ 0, 4, 11, 23, 42, 68, 102, 148, 205
+};
+
+static const s16 tcinv_a[NUMBER_OF_TCINV_POINTS] = {
+ -6364, -631, -126, -34, -13, -6, -4452, -2156, 0
+ };
+
+static const s16 tcinv_b[NUMBER_OF_TCINV_POINTS] = {
+ 8191, 1828, 726, 352, 197, 121, 80, 55, 40
+};
+
+static const s16 tcinv_c[NUMBER_OF_TCINV_POINTS] = {
+ 1, 1, 1, 1, 1, 1, 0, 0, 0
+};
+
+static const s16 fcinv_x[NUMBER_OF_FCINV_POINTS] = {
+ 0, 80, 216, 456, 824, 1344, 2040, 2952, 4096
+};
+
+static const s16 fcinv_a[NUMBER_OF_FCINV_POINTS] = {
+ -5244, -486, -86, -2849, -961, -400, -180, -86, 0
+ };
+
+static const s16 fcinv_b[NUMBER_OF_FCINV_POINTS] = {
+ 8191, 1637, 607, 287, 159, 98, 64, 44, 32
+};
+
+static const s16 fcinv_c[NUMBER_OF_FCINV_POINTS] = {
+ 1, 1, 1, 0, 0, 0, 0, 0, 0
+};
+
+void
+ia_css_eed1_8_vmem_encode(
+ struct eed1_8_vmem_params *to,
+ const struct ia_css_eed1_8_config *from,
+ size_t size)
+{
+ unsigned int i, j, base;
+ const unsigned int total_blocks = 4;
+ const unsigned int shuffle_block = 16;
+
+ (void)size;
+
+ /* Init */
+ for (i = 0; i < ISP_VEC_NELEMS; i++) {
+ to->e_dew_enh_x[0][i] = 0;
+ to->e_dew_enh_y[0][i] = 0;
+ to->e_dew_enh_a[0][i] = 0;
+ to->e_dew_enh_f[0][i] = 0;
+ to->chgrinv_x[0][i] = 0;
+ to->chgrinv_a[0][i] = 0;
+ to->chgrinv_b[0][i] = 0;
+ to->chgrinv_c[0][i] = 0;
+ to->tcinv_x[0][i] = 0;
+ to->tcinv_a[0][i] = 0;
+ to->tcinv_b[0][i] = 0;
+ to->tcinv_c[0][i] = 0;
+ to->fcinv_x[0][i] = 0;
+ to->fcinv_a[0][i] = 0;
+ to->fcinv_b[0][i] = 0;
+ to->fcinv_c[0][i] = 0;
+ }
+
+ /* Constraints on dew_enhance_seg_x and dew_enhance_seg_y:
+ * - values should be greater or equal to 0.
+ * - values should be ascending.
+ * - value of index zero is equal to 0.
+ */
+
+ /* Checking constraints: */
+ /* TODO: investigate if an assert is the right way to report that
+ * the constraints are violated.
+ */
+ for (j = 0; j < IA_CSS_NUMBER_OF_DEW_ENHANCE_SEGMENTS; j++) {
+ assert(from->dew_enhance_seg_x[j] > -1);
+ assert(from->dew_enhance_seg_y[j] > -1);
+ }
+
+ for (j = 1; j < IA_CSS_NUMBER_OF_DEW_ENHANCE_SEGMENTS; j++) {
+ assert(from->dew_enhance_seg_x[j] > from->dew_enhance_seg_x[j - 1]);
+ assert(from->dew_enhance_seg_y[j] > from->dew_enhance_seg_y[j - 1]);
+ }
+
+ assert(from->dew_enhance_seg_x[0] == 0);
+ assert(from->dew_enhance_seg_y[0] == 0);
+
+ /* Constraints on chgrinv_x, tcinv_x and fcinv_x:
+ * - values should be greater or equal to 0.
+ * - values should be ascending.
+ * - value of index zero is equal to 0.
+ */
+ assert(chgrinv_x[0] == 0);
+ assert(tcinv_x[0] == 0);
+ assert(fcinv_x[0] == 0);
+
+ for (j = 1; j < NUMBER_OF_CHGRINV_POINTS; j++) {
+ assert(chgrinv_x[j] > chgrinv_x[j - 1]);
+ }
+
+ for (j = 1; j < NUMBER_OF_TCINV_POINTS; j++) {
+ assert(tcinv_x[j] > tcinv_x[j - 1]);
+ }
+
+ for (j = 1; j < NUMBER_OF_FCINV_POINTS; j++) {
+ assert(fcinv_x[j] > fcinv_x[j - 1]);
+ }
+
+ /* The implementation of the calculating 1/x is based on the availability
+ * of the OP_vec_shuffle16 operation.
+ * A 64 element vector is split up in 4 blocks of 16 element. Each array is copied to
+ * a vector 4 times, (starting at 0, 16, 32 and 48). All array elements are copied or
+ * initialised as described in the KFS. The remaining elements of a vector are set to 0.
+ */
+ /* TODO: guard this code with above assumptions */
+ for (i = 0; i < total_blocks; i++) {
+ base = shuffle_block * i;
+
+ for (j = 0; j < IA_CSS_NUMBER_OF_DEW_ENHANCE_SEGMENTS; j++) {
+ to->e_dew_enh_x[0][base + j] = clamp(from->dew_enhance_seg_x[j],
+ 0, 8191);
+ to->e_dew_enh_y[0][base + j] = clamp(from->dew_enhance_seg_y[j],
+ -8192, 8191);
+ }
+
+ for (j = 0; j < (IA_CSS_NUMBER_OF_DEW_ENHANCE_SEGMENTS - 1); j++) {
+ to->e_dew_enh_a[0][base + j] = clamp(from->dew_enhance_seg_slope[j],
+ -8192, 8191);
+ /* Convert dew_enhance_seg_exp to flag:
+ * 0 -> 0
+ * 1...13 -> 1
+ */
+ to->e_dew_enh_f[0][base + j] = clamp(from->dew_enhance_seg_exp[j],
+ 0, 13) > 0;
+ }
+
+ /* Hard-coded to 0, in order to be able to handle out of
+ * range input in the same way as the other segments.
+ * See KFS for more details.
+ */
+ to->e_dew_enh_a[0][base + (IA_CSS_NUMBER_OF_DEW_ENHANCE_SEGMENTS - 1)] = 0;
+ to->e_dew_enh_f[0][base + (IA_CSS_NUMBER_OF_DEW_ENHANCE_SEGMENTS - 1)] = 0;
+
+ for (j = 0; j < NUMBER_OF_CHGRINV_POINTS; j++) {
+ to->chgrinv_x[0][base + j] = chgrinv_x[j];
+ to->chgrinv_a[0][base + j] = chgrinv_a[j];
+ to->chgrinv_b[0][base + j] = chgrinv_b[j];
+ to->chgrinv_c[0][base + j] = chgrinv_c[j];
+ }
+
+ for (j = 0; j < NUMBER_OF_TCINV_POINTS; j++) {
+ to->tcinv_x[0][base + j] = tcinv_x[j];
+ to->tcinv_a[0][base + j] = tcinv_a[j];
+ to->tcinv_b[0][base + j] = tcinv_b[j];
+ to->tcinv_c[0][base + j] = tcinv_c[j];
+ }
+
+ for (j = 0; j < NUMBER_OF_FCINV_POINTS; j++) {
+ to->fcinv_x[0][base + j] = fcinv_x[j];
+ to->fcinv_a[0][base + j] = fcinv_a[j];
+ to->fcinv_b[0][base + j] = fcinv_b[j];
+ to->fcinv_c[0][base + j] = fcinv_c[j];
+ }
+ }
+}
+
+void
+ia_css_eed1_8_encode(
+ struct eed1_8_dmem_params *to,
+ const struct ia_css_eed1_8_config *from,
+ size_t size)
+{
+ int i;
+ int min_exp = 0;
+
+ (void)size;
+
+ to->rbzp_strength = from->rbzp_strength;
+
+ to->fcstrength = from->fcstrength;
+ to->fcthres_0 = from->fcthres_0;
+ to->fc_sat_coef = from->fc_sat_coef;
+ to->fc_coring_prm = from->fc_coring_prm;
+ to->fc_slope = from->fcthres_1 - from->fcthres_0;
+
+ to->aerel_thres0 = from->aerel_thres0;
+ to->aerel_gain0 = from->aerel_gain0;
+ to->aerel_thres_diff = from->aerel_thres1 - from->aerel_thres0;
+ to->aerel_gain_diff = from->aerel_gain1 - from->aerel_gain0;
+
+ to->derel_thres0 = from->derel_thres0;
+ to->derel_gain0 = from->derel_gain0;
+ to->derel_thres_diff = (from->derel_thres1 - from->derel_thres0);
+ to->derel_gain_diff = (from->derel_gain1 - from->derel_gain0);
+
+ to->coring_pos0 = from->coring_pos0;
+ to->coring_pos_diff = (from->coring_pos1 - from->coring_pos0);
+ to->coring_neg0 = from->coring_neg0;
+ to->coring_neg_diff = (from->coring_neg1 - from->coring_neg0);
+
+ /* Note: (ISP_VEC_ELEMBITS -1)
+ * TODO: currently the testbench does not support to use
+ * ISP_VEC_ELEMBITS. Investigate how to fix this
+ */
+ to->gain_exp = (13 - from->gain_exp);
+ to->gain_pos0 = from->gain_pos0;
+ to->gain_pos_diff = (from->gain_pos1 - from->gain_pos0);
+ to->gain_neg0 = from->gain_neg0;
+ to->gain_neg_diff = (from->gain_neg1 - from->gain_neg0);
+
+ to->margin_pos0 = from->pos_margin0;
+ to->margin_pos_diff = (from->pos_margin1 - from->pos_margin0);
+ to->margin_neg0 = from->neg_margin0;
+ to->margin_neg_diff = (from->neg_margin1 - from->neg_margin0);
+
+ /* Encode DEWEnhance exp (e_dew_enh_asr) */
+ for (i = 0; i < (IA_CSS_NUMBER_OF_DEW_ENHANCE_SEGMENTS - 1); i++) {
+ min_exp = max(min_exp, from->dew_enhance_seg_exp[i]);
+ }
+ to->e_dew_enh_asr = 13 - clamp(min_exp, 0, 13);
+
+ to->dedgew_max = from->dedgew_max;
+}
+
+void
+ia_css_init_eed1_8_state(
+ void *state,
+ size_t size)
+{
+ memset(state, 0, size);
+}
+
+#ifndef IA_CSS_NO_DEBUG
+void
+ia_css_eed1_8_debug_dtrace(
+ const struct ia_css_eed1_8_config *eed,
+ unsigned int level)
+{
+ if (!eed)
+ return;
+
+ ia_css_debug_dtrace(level, "Edge Enhancing Demosaic 1.8:\n");
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n", "rbzp_strength",
+ eed->rbzp_strength);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n", "fcstrength", eed->fcstrength);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n", "fcthres_0", eed->fcthres_0);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n", "fcthres_1", eed->fcthres_1);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n", "fc_sat_coef", eed->fc_sat_coef);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n", "fc_coring_prm",
+ eed->fc_coring_prm);
+
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n", "aerel_thres0", eed->aerel_thres0);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n", "aerel_gain0", eed->aerel_gain0);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n", "aerel_thres1", eed->aerel_thres1);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n", "aerel_gain1", eed->aerel_gain1);
+
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n", "derel_thres0", eed->derel_thres0);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n", "derel_gain0", eed->derel_gain0);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n", "derel_thres1", eed->derel_thres1);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n", "derel_gain1", eed->derel_gain1);
+
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n", "coring_pos0", eed->coring_pos0);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n", "coring_pos1", eed->coring_pos1);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n", "coring_neg0", eed->coring_neg0);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n", "coring_neg1", eed->coring_neg1);
+
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n", "gain_exp", eed->gain_exp);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n", "gain_pos0", eed->gain_pos0);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n", "gain_pos1", eed->gain_pos1);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n", "gain_neg0", eed->gain_neg0);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n", "gain_neg1", eed->gain_neg1);
+
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n", "pos_margin0", eed->pos_margin0);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n", "pos_margin1", eed->pos_margin1);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n", "neg_margin0", eed->neg_margin0);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n", "neg_margin1", eed->neg_margin1);
+
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n", "dedgew_max", eed->dedgew_max);
+}
+#endif
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/eed1_8/ia_css_eed1_8.host.h b/drivers/staging/media/atomisp/pci/isp/kernels/eed1_8/ia_css_eed1_8.host.h
new file mode 100644
index 000000000000..4af4bae15190
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/eed1_8/ia_css_eed1_8.host.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __IA_CSS_EED1_8_HOST_H
+#define __IA_CSS_EED1_8_HOST_H
+
+#include "ia_css_eed1_8_types.h"
+#include "ia_css_eed1_8_param.h"
+
+void
+ia_css_eed1_8_vmem_encode(
+ struct eed1_8_vmem_params *to,
+ const struct ia_css_eed1_8_config *from,
+ size_t size);
+
+void
+ia_css_eed1_8_encode(
+ struct eed1_8_dmem_params *to,
+ const struct ia_css_eed1_8_config *from,
+ size_t size);
+
+void
+ia_css_init_eed1_8_state(
+ void *state,
+ size_t size);
+
+#ifndef IA_CSS_NO_DEBUG
+void
+ia_css_eed1_8_debug_dtrace(
+ const struct ia_css_eed1_8_config *config,
+ unsigned int level);
+#endif
+
+#endif /* __IA_CSS_EED1_8_HOST_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/eed1_8/ia_css_eed1_8_param.h b/drivers/staging/media/atomisp/pci/isp/kernels/eed1_8/ia_css_eed1_8_param.h
new file mode 100644
index 000000000000..a24eef965f16
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/eed1_8/ia_css_eed1_8_param.h
@@ -0,0 +1,147 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __IA_CSS_EED1_8_PARAM_H
+#define __IA_CSS_EED1_8_PARAM_H
+
+#include <linux/math.h>
+
+#include "type_support.h"
+#include "vmem.h" /* needed for VMEM_ARRAY */
+
+#include "ia_css_eed1_8_types.h" /* IA_CSS_NUMBER_OF_DEW_ENHANCE_SEGMENTS */
+
+/* Configuration parameters: */
+
+/* Enable median for false color correction
+ * 0: Do not use median
+ * 1: Use median
+ * Default: 1
+ */
+#define EED1_8_FC_ENABLE_MEDIAN 1
+
+/* Coring Threshold minima
+ * Used in Tint color suppression.
+ * Default: 1
+ */
+#define EED1_8_CORINGTHMIN 1
+
+/* Define size of the state..... TODO: check if this is the correct place */
+/* 4 planes : GR, R, B, GB */
+#define NUM_PLANES 4
+
+/* 5 lines state per color plane input_line_state */
+#define EED1_8_STATE_INPUT_BUFFER_HEIGHT (5 * NUM_PLANES)
+
+/* Each plane has width equal to half frame line */
+#define EED1_8_STATE_INPUT_BUFFER_WIDTH DIV_ROUND_UP(MAX_FRAME_SIMDWIDTH, 2)
+
+/* 1 line state per color plane LD_H state */
+#define EED1_8_STATE_LD_H_HEIGHT (1 * NUM_PLANES)
+#define EED1_8_STATE_LD_H_WIDTH DIV_ROUND_UP(MAX_FRAME_SIMDWIDTH, 2)
+
+/* 1 line state per color plane LD_V state */
+#define EED1_8_STATE_LD_V_HEIGHT (1 * NUM_PLANES)
+#define EED1_8_STATE_LD_V_WIDTH DIV_ROUND_UP(MAX_FRAME_SIMDWIDTH, 2)
+
+/* 1 line (single plane) state for D_Hr state */
+#define EED1_8_STATE_D_HR_HEIGHT 1
+#define EED1_8_STATE_D_HR_WIDTH DIV_ROUND_UP(MAX_FRAME_SIMDWIDTH, 2)
+
+/* 1 line (single plane) state for D_Hb state */
+#define EED1_8_STATE_D_HB_HEIGHT 1
+#define EED1_8_STATE_D_HB_WIDTH DIV_ROUND_UP(MAX_FRAME_SIMDWIDTH, 2)
+
+/* 2 lines (single plane) state for D_Vr state */
+#define EED1_8_STATE_D_VR_HEIGHT 2
+#define EED1_8_STATE_D_VR_WIDTH DIV_ROUND_UP(MAX_FRAME_SIMDWIDTH, 2)
+
+/* 2 line (single plane) state for D_Vb state */
+#define EED1_8_STATE_D_VB_HEIGHT 2
+#define EED1_8_STATE_D_VB_WIDTH DIV_ROUND_UP(MAX_FRAME_SIMDWIDTH, 2)
+
+/* 2 lines state for R and B (= 2 planes) rb_zipped_state */
+#define EED1_8_STATE_RB_ZIPPED_HEIGHT (2 * 2)
+#define EED1_8_STATE_RB_ZIPPED_WIDTH DIV_ROUND_UP(MAX_FRAME_SIMDWIDTH, 2)
+
+#if EED1_8_FC_ENABLE_MEDIAN
+/* 1 full input line (GR-R color line) for Yc state */
+#define EED1_8_STATE_YC_HEIGHT 1
+#define EED1_8_STATE_YC_WIDTH MAX_FRAME_SIMDWIDTH
+
+/* 1 line state per color plane Cg_state */
+#define EED1_8_STATE_CG_HEIGHT (1 * NUM_PLANES)
+#define EED1_8_STATE_CG_WIDTH DIV_ROUND_UP(MAX_FRAME_SIMDWIDTH, 2)
+
+/* 1 line state per color plane Co_state */
+#define EED1_8_STATE_CO_HEIGHT (1 * NUM_PLANES)
+#define EED1_8_STATE_CO_WIDTH DIV_ROUND_UP(MAX_FRAME_SIMDWIDTH, 2)
+
+/* 1 full input line (GR-R color line) for AbsK state */
+#define EED1_8_STATE_ABSK_HEIGHT 1
+#define EED1_8_STATE_ABSK_WIDTH MAX_FRAME_SIMDWIDTH
+#endif
+
+struct eed1_8_vmem_params {
+ VMEM_ARRAY(e_dew_enh_x, ISP_VEC_NELEMS);
+ SVMEM_ARRAY(e_dew_enh_y, ISP_VEC_NELEMS);
+ SVMEM_ARRAY(e_dew_enh_a, ISP_VEC_NELEMS);
+ VMEM_ARRAY(e_dew_enh_f, ISP_VEC_NELEMS);
+ VMEM_ARRAY(chgrinv_x, ISP_VEC_NELEMS);
+ VMEM_ARRAY(chgrinv_a, ISP_VEC_NELEMS);
+ VMEM_ARRAY(chgrinv_b, ISP_VEC_NELEMS);
+ VMEM_ARRAY(chgrinv_c, ISP_VEC_NELEMS);
+ VMEM_ARRAY(fcinv_x, ISP_VEC_NELEMS);
+ VMEM_ARRAY(fcinv_a, ISP_VEC_NELEMS);
+ VMEM_ARRAY(fcinv_b, ISP_VEC_NELEMS);
+ VMEM_ARRAY(fcinv_c, ISP_VEC_NELEMS);
+ VMEM_ARRAY(tcinv_x, ISP_VEC_NELEMS);
+ VMEM_ARRAY(tcinv_a, ISP_VEC_NELEMS);
+ VMEM_ARRAY(tcinv_b, ISP_VEC_NELEMS);
+ VMEM_ARRAY(tcinv_c, ISP_VEC_NELEMS);
+};
+
+/* EED (Edge Enhancing Demosaic) ISP parameters */
+struct eed1_8_dmem_params {
+ s32 rbzp_strength;
+
+ s32 fcstrength;
+ s32 fcthres_0;
+ s32 fc_sat_coef;
+ s32 fc_coring_prm;
+ s32 fc_slope;
+
+ s32 aerel_thres0;
+ s32 aerel_gain0;
+ s32 aerel_thres_diff;
+ s32 aerel_gain_diff;
+
+ s32 derel_thres0;
+ s32 derel_gain0;
+ s32 derel_thres_diff;
+ s32 derel_gain_diff;
+
+ s32 coring_pos0;
+ s32 coring_pos_diff;
+ s32 coring_neg0;
+ s32 coring_neg_diff;
+
+ s32 gain_exp;
+ s32 gain_pos0;
+ s32 gain_pos_diff;
+ s32 gain_neg0;
+ s32 gain_neg_diff;
+
+ s32 margin_pos0;
+ s32 margin_pos_diff;
+ s32 margin_neg0;
+ s32 margin_neg_diff;
+
+ s32 e_dew_enh_asr;
+ s32 dedgew_max;
+};
+
+#endif /* __IA_CSS_EED1_8_PARAM_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/eed1_8/ia_css_eed1_8_types.h b/drivers/staging/media/atomisp/pci/isp/kernels/eed1_8/ia_css_eed1_8_types.h
new file mode 100644
index 000000000000..0b977eb7ad71
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/eed1_8/ia_css_eed1_8_types.h
@@ -0,0 +1,79 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __IA_CSS_EED1_8_TYPES_H
+#define __IA_CSS_EED1_8_TYPES_H
+
+/* @file
+* CSS-API header file for Edge Enhanced Demosaic parameters.
+*/
+
+#include "type_support.h"
+
+/**
+ * \brief EED1_8 public parameters.
+ * \details Struct with all parameters for the EED1.8 kernel that can be set
+ * from the CSS API.
+ */
+
+/* parameter list is based on ISP261 CSS API public parameter list_all.xlsx from 28-01-2015 */
+
+/* Number of segments + 1 segment used in edge reliability enhancement
+ * Ineffective: N/A
+ * Default: 9
+ */
+#define IA_CSS_NUMBER_OF_DEW_ENHANCE_SEGMENTS 9
+
+/* Edge Enhanced Demosaic configuration
+ *
+ * ISP2.6.1: EED1_8 is used.
+ */
+
+struct ia_css_eed1_8_config {
+ s32 rbzp_strength; /** Strength of zipper reduction. */
+
+ s32 fcstrength; /** Strength of false color reduction. */
+ s32 fcthres_0; /** Threshold to prevent chroma coring due to noise or green disparity in dark region. */
+ s32 fcthres_1; /** Threshold to prevent chroma coring due to noise or green disparity in bright region. */
+ s32 fc_sat_coef; /** How much color saturation to maintain in high color saturation region. */
+ s32 fc_coring_prm; /** Chroma coring coefficient for tint color suppression. */
+
+ s32 aerel_thres0; /** Threshold for Non-Directional Reliability at dark region. */
+ s32 aerel_gain0; /** Gain for Non-Directional Reliability at dark region. */
+ s32 aerel_thres1; /** Threshold for Non-Directional Reliability at bright region. */
+ s32 aerel_gain1; /** Gain for Non-Directional Reliability at bright region. */
+
+ s32 derel_thres0; /** Threshold for Directional Reliability at dark region. */
+ s32 derel_gain0; /** Gain for Directional Reliability at dark region. */
+ s32 derel_thres1; /** Threshold for Directional Reliability at bright region. */
+ s32 derel_gain1; /** Gain for Directional Reliability at bright region. */
+
+ s32 coring_pos0; /** Positive Edge Coring Threshold in dark region. */
+ s32 coring_pos1; /** Positive Edge Coring Threshold in bright region. */
+ s32 coring_neg0; /** Negative Edge Coring Threshold in dark region. */
+ s32 coring_neg1; /** Negative Edge Coring Threshold in bright region. */
+
+ s32 gain_exp; /** Common Exponent of Gain. */
+ s32 gain_pos0; /** Gain for Positive Edge in dark region. */
+ s32 gain_pos1; /** Gain for Positive Edge in bright region. */
+ s32 gain_neg0; /** Gain for Negative Edge in dark region. */
+ s32 gain_neg1; /** Gain for Negative Edge in bright region. */
+
+ s32 pos_margin0; /** Margin for Positive Edge in dark region. */
+ s32 pos_margin1; /** Margin for Positive Edge in bright region. */
+ s32 neg_margin0; /** Margin for Negative Edge in dark region. */
+ s32 neg_margin1; /** Margin for Negative Edge in bright region. */
+
+ s32 dew_enhance_seg_x[IA_CSS_NUMBER_OF_DEW_ENHANCE_SEGMENTS]; /** Segment data for directional edge weight: X. */
+ s32 dew_enhance_seg_y[IA_CSS_NUMBER_OF_DEW_ENHANCE_SEGMENTS]; /** Segment data for directional edge weight: Y. */
+ s32 dew_enhance_seg_slope[(IA_CSS_NUMBER_OF_DEW_ENHANCE_SEGMENTS -
+ 1)]; /** Segment data for directional edge weight: Slope. */
+ s32 dew_enhance_seg_exp[(IA_CSS_NUMBER_OF_DEW_ENHANCE_SEGMENTS -
+ 1)]; /** Segment data for directional edge weight: Exponent. */
+ s32 dedgew_max; /** Max Weight for Directional Edge. */
+};
+
+#endif /* __IA_CSS_EED1_8_TYPES_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/fc/fc_1.0/ia_css_formats.host.c b/drivers/staging/media/atomisp/pci/isp/kernels/fc/fc_1.0/ia_css_formats.host.c
new file mode 100644
index 000000000000..6a10d3545278
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/fc/fc_1.0/ia_css_formats.host.c
@@ -0,0 +1,55 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#include "ia_css_formats.host.h"
+#include "ia_css_types.h"
+#include "sh_css_defs.h"
+
+/*#include "sh_css_frac.h"*/
+#ifndef IA_CSS_NO_DEBUG
+/* FIXME: See BZ 4427 */
+#include "ia_css_debug.h"
+#endif
+
+const struct ia_css_formats_config default_formats_config = {
+ 1
+};
+
+void
+ia_css_formats_encode(
+ struct sh_css_isp_formats_params *to,
+ const struct ia_css_formats_config *from,
+ unsigned int size)
+{
+ (void)size;
+ to->video_full_range_flag = from->video_full_range_flag;
+}
+
+#ifndef IA_CSS_NO_DEBUG
+/* FIXME: See BZ 4427 */
+void
+ia_css_formats_dump(
+ const struct sh_css_isp_formats_params *formats,
+ unsigned int level)
+{
+ if (!formats) return;
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "video_full_range_flag", formats->video_full_range_flag);
+}
+#endif
+
+#ifndef IA_CSS_NO_DEBUG
+/* FIXME: See BZ 4427 */
+void
+ia_css_formats_debug_dtrace(
+ const struct ia_css_formats_config *config,
+ unsigned int level)
+{
+ ia_css_debug_dtrace(level,
+ "config.video_full_range_flag=%d\n",
+ config->video_full_range_flag);
+}
+#endif
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/fc/fc_1.0/ia_css_formats.host.h b/drivers/staging/media/atomisp/pci/isp/kernels/fc/fc_1.0/ia_css_formats.host.h
new file mode 100644
index 000000000000..17a57ddaa321
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/fc/fc_1.0/ia_css_formats.host.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __IA_CSS_FORMATS_HOST_H
+#define __IA_CSS_FORMATS_HOST_H
+
+#include "ia_css_formats_types.h"
+#include "ia_css_formats_param.h"
+
+extern const struct ia_css_formats_config default_formats_config;
+
+void
+ia_css_formats_encode(
+ struct sh_css_isp_formats_params *to,
+ const struct ia_css_formats_config *from,
+ unsigned int size);
+#ifndef IA_CSS_NO_DEBUG
+/* FIXME: See BZ 4427 */
+void
+ia_css_formats_dump(
+ const struct sh_css_isp_formats_params *formats,
+ unsigned int level);
+#endif
+
+#ifndef IA_CSS_NO_DEBUG
+/* FIXME: See BZ 4427 */
+void
+ia_css_formats_debug_dtrace(
+ const struct ia_css_formats_config *formats,
+ unsigned int level);
+#endif /*IA_CSS_NO_DEBUG*/
+
+#endif /* __IA_CSS_FORMATS_HOST_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/fc/fc_1.0/ia_css_formats_param.h b/drivers/staging/media/atomisp/pci/isp/kernels/fc/fc_1.0/ia_css_formats_param.h
new file mode 100644
index 000000000000..2a16d93abf82
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/fc/fc_1.0/ia_css_formats_param.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __IA_CSS_FORMATS_PARAM_H
+#define __IA_CSS_FORMATS_PARAM_H
+
+#include "type_support.h"
+
+/* FORMATS (Format conversion) */
+struct sh_css_isp_formats_params {
+ s32 video_full_range_flag;
+};
+
+#endif /* __IA_CSS_FORMATS_PARAM_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/fc/fc_1.0/ia_css_formats_types.h b/drivers/staging/media/atomisp/pci/isp/kernels/fc/fc_1.0/ia_css_formats_types.h
new file mode 100644
index 000000000000..5958e7b2ddfe
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/fc/fc_1.0/ia_css_formats_types.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __IA_CSS_FORMATS_TYPES_H
+#define __IA_CSS_FORMATS_TYPES_H
+
+/* @file
+* CSS-API header file for output format parameters.
+*/
+
+#include "type_support.h"
+
+/* Formats configuration.
+ *
+ * ISP block: FORMATS
+ * ISP1: FORMATS is used.
+ * ISP2: FORMATS is used.
+ */
+struct ia_css_formats_config {
+ u32 video_full_range_flag; /** selects the range of YUV output.
+ u8.0, [0,1],
+ default 1, ineffective n/a\n
+ 1 - full range, luma 0-255, chroma 0-255\n
+ 0 - reduced range, luma 16-235, chroma 16-240 */
+};
+
+#endif /* __IA_CSS_FORMATS_TYPES_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/fixedbds/fixedbds_1.0/ia_css_fixedbds_param.h b/drivers/staging/media/atomisp/pci/isp/kernels/fixedbds/fixedbds_1.0/ia_css_fixedbds_param.h
new file mode 100644
index 000000000000..f7e5669d5125
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/fixedbds/fixedbds_1.0/ia_css_fixedbds_param.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __IA_CSS_FIXEDBDS_PARAM_H
+#define __IA_CSS_FIXEDBDS_PARAM_H
+
+#include "type_support.h"
+
+/* ISP2401 */
+#define BDS_UNIT 8
+#define FRAC_LOG 3
+#define FRAC_ACC BIT(FRAC_LOG)
+#if FRAC_ACC != BDS_UNIT
+#error "FRAC_ACC and BDS_UNIT need to be merged into one define"
+#endif
+
+struct sh_css_isp_bds_params {
+ int baf_strength;
+};
+
+#endif /* __IA_CSS_FIXEDBDS_PARAM_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/fixedbds/fixedbds_1.0/ia_css_fixedbds_types.h b/drivers/staging/media/atomisp/pci/isp/kernels/fixedbds/fixedbds_1.0/ia_css_fixedbds_types.h
new file mode 100644
index 000000000000..7626dfe82642
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/fixedbds/fixedbds_1.0/ia_css_fixedbds_types.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __IA_CSS_FIXEDBDS_TYPES_H
+#define __IA_CSS_FIXEDBDS_TYPES_H
+
+struct sh_css_bds_factor {
+ unsigned int numerator;
+ unsigned int denominator;
+ unsigned int bds_factor;
+};
+
+#endif /*__IA_CSS_FIXEDBDS_TYPES_H*/
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/fpn/fpn_1.0/ia_css_fpn.host.c b/drivers/staging/media/atomisp/pci/isp/kernels/fpn/fpn_1.0/ia_css_fpn.host.c
new file mode 100644
index 000000000000..e701b7e41ef4
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/fpn/fpn_1.0/ia_css_fpn.host.c
@@ -0,0 +1,85 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#include <linux/math.h>
+
+#include <assert_support.h>
+#include <ia_css_frame_public.h>
+#include <ia_css_frame.h>
+#include <ia_css_binary.h>
+#include <ia_css_types.h>
+#include <sh_css_defs.h>
+#include <ia_css_debug.h>
+
+#define IA_CSS_INCLUDE_CONFIGURATIONS
+#include "ia_css_isp_configs.h"
+#include "isp.h"
+
+#include "ia_css_fpn.host.h"
+
+void
+ia_css_fpn_encode(
+ struct sh_css_isp_fpn_params *to,
+ const struct ia_css_fpn_table *from,
+ unsigned int size)
+{
+ (void)size;
+ to->shift = from->shift;
+ to->enabled = from->data != NULL;
+}
+
+void
+ia_css_fpn_dump(
+ const struct sh_css_isp_fpn_params *fpn,
+ unsigned int level)
+{
+ if (!fpn)
+ return;
+ ia_css_debug_dtrace(level, "Fixed Pattern Noise Reduction:\n");
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "fpn_shift", fpn->shift);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "fpn_enabled", fpn->enabled);
+}
+
+int ia_css_fpn_config(struct sh_css_isp_fpn_isp_config *to,
+ const struct ia_css_fpn_configuration *from,
+ unsigned int size)
+{
+ unsigned int elems_a = ISP_VEC_NELEMS;
+ int ret;
+
+ ret = ia_css_dma_configure_from_info(&to->port_b, from->info);
+ if (ret)
+ return ret;
+
+ to->width_a_over_b = elems_a / to->port_b.elems;
+
+ /* Assume divisiblity here, may need to generalize to fixed point. */
+ if (elems_a % to->port_b.elems != 0)
+ return -EINVAL;
+
+ return 0;
+}
+
+int ia_css_fpn_configure(const struct ia_css_binary *binary,
+ const struct ia_css_frame_info *info)
+{
+ struct ia_css_frame_info my_info = IA_CSS_BINARY_DEFAULT_FRAME_INFO;
+ const struct ia_css_fpn_configuration config = {
+ &my_info
+ };
+
+ my_info.res.width = DIV_ROUND_UP(info->res.width, 2); /* Packed by 2x */
+ my_info.res.height = info->res.height;
+ my_info.padded_width = DIV_ROUND_UP(info->padded_width, 2); /* Packed by 2x */
+ my_info.format = info->format;
+ my_info.raw_bit_depth = FPN_BITS_PER_PIXEL;
+ my_info.raw_bayer_order = info->raw_bayer_order;
+ my_info.crop_info = info->crop_info;
+
+ return ia_css_configure_fpn(binary, &config);
+}
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/fpn/fpn_1.0/ia_css_fpn.host.h b/drivers/staging/media/atomisp/pci/isp/kernels/fpn/fpn_1.0/ia_css_fpn.host.h
new file mode 100644
index 000000000000..cdd4350c082c
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/fpn/fpn_1.0/ia_css_fpn.host.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __IA_CSS_FPN_HOST_H
+#define __IA_CSS_FPN_HOST_H
+
+#include "ia_css_binary.h"
+#include "ia_css_fpn_types.h"
+#include "ia_css_fpn_param.h"
+
+void
+ia_css_fpn_encode(
+ struct sh_css_isp_fpn_params *to,
+ const struct ia_css_fpn_table *from,
+ unsigned int size);
+
+void
+ia_css_fpn_dump(
+ const struct sh_css_isp_fpn_params *fpn,
+ unsigned int level);
+
+int ia_css_fpn_config(struct sh_css_isp_fpn_isp_config *to,
+ const struct ia_css_fpn_configuration *from,
+ unsigned int size);
+
+int ia_css_fpn_configure(const struct ia_css_binary *binary,
+ const struct ia_css_frame_info *from);
+
+#endif /* __IA_CSS_FPN_HOST_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/fpn/fpn_1.0/ia_css_fpn_param.h b/drivers/staging/media/atomisp/pci/isp/kernels/fpn/fpn_1.0/ia_css_fpn_param.h
new file mode 100644
index 000000000000..94edd1d21e9a
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/fpn/fpn_1.0/ia_css_fpn_param.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __IA_CSS_FPN_PARAM_H
+#define __IA_CSS_FPN_PARAM_H
+
+#include "type_support.h"
+
+#include "dma.h"
+
+#define FPN_BITS_PER_PIXEL 16
+
+/* FPNR (Fixed Pattern Noise Reduction) */
+struct sh_css_isp_fpn_params {
+ s32 shift;
+ s32 enabled;
+};
+
+struct sh_css_isp_fpn_isp_config {
+ u32 width_a_over_b;
+ struct dma_port_config port_b;
+};
+
+#endif /* __IA_CSS_FPN_PARAM_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/fpn/fpn_1.0/ia_css_fpn_types.h b/drivers/staging/media/atomisp/pci/isp/kernels/fpn/fpn_1.0/ia_css_fpn_types.h
new file mode 100644
index 000000000000..8b246c80e105
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/fpn/fpn_1.0/ia_css_fpn_types.h
@@ -0,0 +1,44 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __IA_CSS_FPN_TYPES_H
+#define __IA_CSS_FPN_TYPES_H
+
+/* @file
+* CSS-API header file for Fixed Pattern Noise parameters.
+*/
+
+/* Fixed Pattern Noise table.
+ *
+ * This contains the fixed patterns noise values
+ * obtained from a black frame capture.
+ *
+ * "shift" should be set as the smallest value
+ * which satisfies the requirement the maximum data is less than 64.
+ *
+ * ISP block: FPN1
+ * ISP1: FPN1 is used.
+ * ISP2: FPN1 is used.
+ */
+
+struct ia_css_fpn_table {
+ s16 *data; /** Table content (fixed patterns noise).
+ u0.[13-shift], [0,63] */
+ u32 width; /** Table width (in pixels).
+ This is the input frame width. */
+ u32 height; /** Table height (in pixels).
+ This is the input frame height. */
+ u32 shift; /** Common exponent of table content.
+ u8.0, [0,13] */
+ u32 enabled; /** Fpn is enabled.
+ bool */
+};
+
+struct ia_css_fpn_configuration {
+ const struct ia_css_frame_info *info;
+};
+
+#endif /* __IA_CSS_FPN_TYPES_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/gc/gc_1.0/ia_css_gc.host.c b/drivers/staging/media/atomisp/pci/isp/kernels/gc/gc_1.0/ia_css_gc.host.c
new file mode 100644
index 000000000000..afef48ebd818
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/gc/gc_1.0/ia_css_gc.host.c
@@ -0,0 +1,109 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#include "ia_css_types.h"
+#include "sh_css_defs.h"
+#ifndef IA_CSS_NO_DEBUG
+/* FIXME: See BZ 4427 */
+#include "ia_css_debug.h"
+#endif
+#include "sh_css_frac.h"
+#include "vamem.h"
+
+#include "ia_css_gc.host.h"
+
+const struct ia_css_gc_config default_gc_config = {
+ 0,
+ 0
+};
+
+const struct ia_css_ce_config default_ce_config = {
+ 0,
+ 255
+};
+
+void
+ia_css_gc_encode(
+ struct sh_css_isp_gc_params *to,
+ const struct ia_css_gc_config *from,
+ unsigned int size)
+{
+ (void)size;
+ to->gain_k1 =
+ uDIGIT_FITTING((int)from->gain_k1, 16,
+ IA_CSS_GAMMA_GAIN_K_SHIFT);
+ to->gain_k2 =
+ uDIGIT_FITTING((int)from->gain_k2, 16,
+ IA_CSS_GAMMA_GAIN_K_SHIFT);
+}
+
+void
+ia_css_ce_encode(
+ struct sh_css_isp_ce_params *to,
+ const struct ia_css_ce_config *from,
+ unsigned int size)
+{
+ (void)size;
+ to->uv_level_min = from->uv_level_min;
+ to->uv_level_max = from->uv_level_max;
+}
+
+void
+ia_css_gc_vamem_encode(
+ struct sh_css_isp_gc_vamem_params *to,
+ const struct ia_css_gamma_table *from,
+ unsigned int size)
+{
+ (void)size;
+ memcpy(&to->gc, &from->data, sizeof(to->gc));
+}
+
+#ifndef IA_CSS_NO_DEBUG
+void
+ia_css_gc_dump(
+ const struct sh_css_isp_gc_params *gc,
+ unsigned int level)
+{
+ if (!gc) return;
+ ia_css_debug_dtrace(level, "Gamma Correction:\n");
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "gamma_gain_k1", gc->gain_k1);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "gamma_gain_k2", gc->gain_k2);
+}
+
+void
+ia_css_ce_dump(
+ const struct sh_css_isp_ce_params *ce,
+ unsigned int level)
+{
+ ia_css_debug_dtrace(level, "Chroma Enhancement:\n");
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "ce_uv_level_min", ce->uv_level_min);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "ce_uv_level_max", ce->uv_level_max);
+}
+
+void
+ia_css_gc_debug_dtrace(
+ const struct ia_css_gc_config *config,
+ unsigned int level)
+{
+ ia_css_debug_dtrace(level,
+ "config.gain_k1=%d, config.gain_k2=%d\n",
+ config->gain_k1, config->gain_k2);
+}
+
+void
+ia_css_ce_debug_dtrace(
+ const struct ia_css_ce_config *config,
+ unsigned int level)
+{
+ ia_css_debug_dtrace(level,
+ "config.uv_level_min=%d, config.uv_level_max=%d\n",
+ config->uv_level_min, config->uv_level_max);
+}
+#endif
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/gc/gc_1.0/ia_css_gc.host.h b/drivers/staging/media/atomisp/pci/isp/kernels/gc/gc_1.0/ia_css_gc.host.h
new file mode 100644
index 000000000000..785bc5329dd3
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/gc/gc_1.0/ia_css_gc.host.h
@@ -0,0 +1,57 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __IA_CSS_GC_HOST_H
+#define __IA_CSS_GC_HOST_H
+
+#include "ia_css_gc_param.h"
+#include "ia_css_gc_table.host.h"
+
+extern const struct ia_css_gc_config default_gc_config;
+extern const struct ia_css_ce_config default_ce_config;
+
+void
+ia_css_gc_encode(
+ struct sh_css_isp_gc_params *to,
+ const struct ia_css_gc_config *from,
+ unsigned int size);
+
+void
+ia_css_gc_vamem_encode(
+ struct sh_css_isp_gc_vamem_params *to,
+ const struct ia_css_gamma_table *from,
+ unsigned int size);
+
+void
+ia_css_ce_encode(
+ struct sh_css_isp_ce_params *to,
+ const struct ia_css_ce_config *from,
+ unsigned int size);
+
+#ifndef IA_CSS_NO_DEBUG
+void
+ia_css_gc_dump(
+ const struct sh_css_isp_gc_params *gc,
+ unsigned int level);
+
+void
+ia_css_ce_dump(
+ const struct sh_css_isp_ce_params *ce,
+ unsigned int level);
+
+void
+ia_css_gc_debug_dtrace(
+ const struct ia_css_gc_config *config,
+ unsigned int level);
+
+void
+ia_css_ce_debug_dtrace(
+ const struct ia_css_ce_config *config,
+ unsigned int level);
+
+#endif
+
+#endif /* __IA_CSS_GC_HOST_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/gc/gc_1.0/ia_css_gc_param.h b/drivers/staging/media/atomisp/pci/isp/kernels/gc/gc_1.0/ia_css_gc_param.h
new file mode 100644
index 000000000000..fcd16196cc5a
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/gc/gc_1.0/ia_css_gc_param.h
@@ -0,0 +1,53 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __IA_CSS_GC_PARAM_H
+#define __IA_CSS_GC_PARAM_H
+
+#include "type_support.h"
+#ifndef PIPE_GENERATION
+#ifdef __ISP
+#define __INLINE_VAMEM__
+#endif
+#include "vamem.h"
+#include "ia_css_gc_types.h"
+
+#if defined(IS_VAMEM_VERSION_1)
+#define SH_CSS_ISP_GAMMA_TABLE_SIZE_LOG2 IA_CSS_VAMEM_1_GAMMA_TABLE_SIZE_LOG2
+#define SH_CSS_ISP_GC_TABLE_SIZE IA_CSS_VAMEM_1_GAMMA_TABLE_SIZE
+#elif defined(IS_VAMEM_VERSION_2)
+#define SH_CSS_ISP_GAMMA_TABLE_SIZE_LOG2 IA_CSS_VAMEM_2_GAMMA_TABLE_SIZE_LOG2
+#define SH_CSS_ISP_GC_TABLE_SIZE IA_CSS_VAMEM_2_GAMMA_TABLE_SIZE
+#else
+#error "Undefined vamem version"
+#endif
+
+#else
+/* For pipe generation, the size is not relevant */
+#define SH_CSS_ISP_GC_TABLE_SIZE 0
+#endif
+
+#define GAMMA_OUTPUT_BITS 8
+#define GAMMA_OUTPUT_MAX_VAL ((1 << GAMMA_OUTPUT_BITS) - 1)
+
+/* GC (Gamma Correction) */
+struct sh_css_isp_gc_params {
+ s32 gain_k1;
+ s32 gain_k2;
+};
+
+/* CE (Chroma Enhancement) */
+struct sh_css_isp_ce_params {
+ s32 uv_level_min;
+ s32 uv_level_max;
+};
+
+/* This should be vamem_data_t, but that breaks the pipe generator */
+struct sh_css_isp_gc_vamem_params {
+ u16 gc[SH_CSS_ISP_GC_TABLE_SIZE];
+};
+
+#endif /* __IA_CSS_GC_PARAM_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/gc/gc_1.0/ia_css_gc_table.host.c b/drivers/staging/media/atomisp/pci/isp/kernels/gc/gc_1.0/ia_css_gc_table.host.c
new file mode 100644
index 000000000000..b0427a9d6628
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/gc/gc_1.0/ia_css_gc_table.host.c
@@ -0,0 +1,62 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#include <linux/string.h> /* for memcpy() */
+
+#include <type_support.h>
+#include "system_global.h"
+#include "vamem.h"
+#include "ia_css_types.h"
+#include "ia_css_gc_table.host.h"
+
+
+struct ia_css_gamma_table default_gamma_table;
+
+static const uint16_t
+default_gamma_table_data[IA_CSS_VAMEM_2_GAMMA_TABLE_SIZE] = {
+ 0, 4, 8, 12, 17, 21, 27, 32,
+ 38, 44, 49, 55, 61, 66, 71, 76,
+ 80, 84, 88, 92, 95, 98, 102, 105,
+ 108, 110, 113, 116, 118, 121, 123, 126,
+ 128, 130, 132, 135, 137, 139, 141, 143,
+ 145, 146, 148, 150, 152, 153, 155, 156,
+ 158, 160, 161, 162, 164, 165, 166, 168,
+ 169, 170, 171, 172, 174, 175, 176, 177,
+ 178, 179, 180, 181, 182, 183, 184, 184,
+ 185, 186, 187, 188, 189, 189, 190, 191,
+ 192, 192, 193, 194, 195, 195, 196, 197,
+ 197, 198, 198, 199, 200, 200, 201, 201,
+ 202, 203, 203, 204, 204, 205, 205, 206,
+ 206, 207, 207, 208, 208, 209, 209, 210,
+ 210, 210, 211, 211, 212, 212, 213, 213,
+ 214, 214, 214, 215, 215, 216, 216, 216,
+ 217, 217, 218, 218, 218, 219, 219, 220,
+ 220, 220, 221, 221, 222, 222, 222, 223,
+ 223, 223, 224, 224, 225, 225, 225, 226,
+ 226, 226, 227, 227, 227, 228, 228, 228,
+ 229, 229, 229, 230, 230, 230, 231, 231,
+ 231, 232, 232, 232, 233, 233, 233, 234,
+ 234, 234, 234, 235, 235, 235, 236, 236,
+ 236, 237, 237, 237, 237, 238, 238, 238,
+ 239, 239, 239, 239, 240, 240, 240, 241,
+ 241, 241, 241, 242, 242, 242, 242, 243,
+ 243, 243, 243, 244, 244, 244, 245, 245,
+ 245, 245, 246, 246, 246, 246, 247, 247,
+ 247, 247, 248, 248, 248, 248, 249, 249,
+ 249, 249, 250, 250, 250, 250, 251, 251,
+ 251, 251, 252, 252, 252, 252, 253, 253,
+ 253, 253, 254, 254, 254, 254, 255, 255,
+ 255
+};
+
+
+void
+ia_css_config_gamma_table(void)
+{
+ memcpy(default_gamma_table.data.vamem_2, default_gamma_table_data,
+ sizeof(default_gamma_table_data));
+ default_gamma_table.vamem_type = IA_CSS_VAMEM_TYPE_2;
+}
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/gc/gc_1.0/ia_css_gc_table.host.h b/drivers/staging/media/atomisp/pci/isp/kernels/gc/gc_1.0/ia_css_gc_table.host.h
new file mode 100644
index 000000000000..f24e6d84e40a
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/gc/gc_1.0/ia_css_gc_table.host.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __IA_CSS_GC_TABLE_HOST_H
+#define __IA_CSS_GC_TABLE_HOST_H
+
+#include "ia_css_gc_types.h"
+
+extern struct ia_css_gamma_table default_gamma_table;
+
+void ia_css_config_gamma_table(void);
+
+#endif /* __IA_CSS_GC_TABLE_HOST_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/gc/gc_1.0/ia_css_gc_types.h b/drivers/staging/media/atomisp/pci/isp/kernels/gc/gc_1.0/ia_css_gc_types.h
new file mode 100644
index 000000000000..09d3fb41b73f
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/gc/gc_1.0/ia_css_gc_types.h
@@ -0,0 +1,89 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __IA_CSS_GC_TYPES_H
+#define __IA_CSS_GC_TYPES_H
+
+/* @file
+* CSS-API header file for Gamma Correction parameters.
+*/
+
+#include "isp/kernels/ctc/ctc_1.0/ia_css_ctc_types.h" /* FIXME: Needed for ia_css_vamem_type */
+
+/* Fractional bits for GAMMA gain */
+#define IA_CSS_GAMMA_GAIN_K_SHIFT 13
+
+/* Number of elements in the gamma table. */
+#define IA_CSS_VAMEM_1_GAMMA_TABLE_SIZE_LOG2 10
+#define IA_CSS_VAMEM_1_GAMMA_TABLE_SIZE BIT(IA_CSS_VAMEM_1_GAMMA_TABLE_SIZE_LOG2)
+
+/* Number of elements in the gamma table. */
+#define IA_CSS_VAMEM_2_GAMMA_TABLE_SIZE_LOG2 8
+#define IA_CSS_VAMEM_2_GAMMA_TABLE_SIZE ((1U << IA_CSS_VAMEM_2_GAMMA_TABLE_SIZE_LOG2) + 1)
+
+/* Gamma table, used for Y(Luma) Gamma Correction.
+ *
+ * ISP block: GC1 (YUV Gamma Correction)
+ * ISP1: GC1 is used.
+ * (ISP2: GC2(sRGB Gamma Correction) is used.)
+ */
+/** IA_CSS_VAMEM_TYPE_1(ISP2300) or
+ IA_CSS_VAMEM_TYPE_2(ISP2400) */
+union ia_css_gc_data {
+ u16 vamem_1[IA_CSS_VAMEM_1_GAMMA_TABLE_SIZE];
+ /** Y(Luma) Gamma table on vamem type 1. u0.8, [0,255] */
+ u16 vamem_2[IA_CSS_VAMEM_2_GAMMA_TABLE_SIZE];
+ /** Y(Luma) Gamma table on vamem type 2. u0.8, [0,255] */
+};
+
+struct ia_css_gamma_table {
+ enum ia_css_vamem_type vamem_type;
+ union ia_css_gc_data data;
+};
+
+/* Gamma Correction configuration (used only for YUV Gamma Correction).
+ *
+ * ISP block: GC1 (YUV Gamma Correction)
+ * ISP1: GC1 is used.
+ * (ISP2: GC2 (sRGB Gamma Correction) is used.)
+ */
+struct ia_css_gc_config {
+ u16 gain_k1; /** Gain to adjust U after YUV Gamma Correction.
+ u0.16, [0,65535],
+ default/ineffective 19000(0.29) */
+ u16 gain_k2; /** Gain to adjust V after YUV Gamma Correction.
+ u0.16, [0,65535],
+ default/ineffective 19000(0.29) */
+};
+
+/* Chroma Enhancement configuration.
+ *
+ * This parameter specifies range of chroma output level.
+ * The standard range is [0,255] or [16,240].
+ *
+ * ISP block: CE1
+ * ISP1: CE1 is used.
+ * (ISP2: CE1 is not used.)
+ */
+struct ia_css_ce_config {
+ u8 uv_level_min; /** Minimum of chroma output level.
+ u0.8, [0,255], default/ineffective 0 */
+ u8 uv_level_max; /** Maximum of chroma output level.
+ u0.8, [0,255], default/ineffective 255 */
+};
+
+/* Multi-Axes Color Correction (MACC) configuration.
+ *
+ * ISP block: MACC2 (MACC by matrix and exponent(ia_css_macc_config))
+ * (ISP1: MACC1 (MACC by only matrix) is used.)
+ * ISP2: MACC2 is used.
+ */
+struct ia_css_macc_config {
+ u8 exp; /** Common exponent of ia_css_macc_table.
+ u8.0, [0,13], default 1, ineffective 1 */
+};
+
+#endif /* __IA_CSS_GC_TYPES_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/gc/gc_2/ia_css_gc2.host.c b/drivers/staging/media/atomisp/pci/isp/kernels/gc/gc_2/ia_css_gc2.host.c
new file mode 100644
index 000000000000..3a32026a8f60
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/gc/gc_2/ia_css_gc2.host.c
@@ -0,0 +1,101 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#include "ia_css_types.h"
+#include "sh_css_defs.h"
+#ifndef IA_CSS_NO_DEBUG
+/* FIXME: See BZ 4427 */
+#include "ia_css_debug.h"
+#endif
+#include "csc/csc_1.0/ia_css_csc.host.h"
+#include "vamem.h"
+
+#include "ia_css_gc2.host.h"
+
+const struct ia_css_cc_config default_yuv2rgb_cc_config = {
+ 12,
+ {4096, -4096, 4096, 4096, 4096, 0, 4096, -4096, -4096}
+};
+
+const struct ia_css_cc_config default_rgb2yuv_cc_config = {
+ 13,
+ {2449, 4809, 934, -1382, -2714, 4096, 4096, -3430, -666}
+};
+
+void
+ia_css_yuv2rgb_encode(
+ struct sh_css_isp_csc_params *to,
+ const struct ia_css_cc_config *from,
+ unsigned int size)
+{
+ ia_css_encode_cc(to, from, size);
+}
+
+void
+ia_css_rgb2yuv_encode(
+ struct sh_css_isp_csc_params *to,
+ const struct ia_css_cc_config *from,
+ unsigned int size)
+{
+ ia_css_encode_cc(to, from, size);
+}
+
+void
+ia_css_r_gamma_vamem_encode(
+ struct sh_css_isp_rgb_gamma_vamem_params *to,
+ const struct ia_css_rgb_gamma_table *from,
+ unsigned int size)
+{
+ (void)size;
+ memcpy(&to->gc, &from->data, sizeof(to->gc));
+}
+
+void
+ia_css_g_gamma_vamem_encode(
+ struct sh_css_isp_rgb_gamma_vamem_params *to,
+ const struct ia_css_rgb_gamma_table *from,
+ unsigned int size)
+{
+ (void)size;
+ memcpy(&to->gc, &from->data, sizeof(to->gc));
+}
+
+void
+ia_css_b_gamma_vamem_encode(
+ struct sh_css_isp_rgb_gamma_vamem_params *to,
+ const struct ia_css_rgb_gamma_table *from,
+ unsigned int size)
+{
+ (void)size;
+ memcpy(&to->gc, &from->data, sizeof(to->gc));
+}
+
+#ifndef IA_CSS_NO_DEBUG
+void
+ia_css_yuv2rgb_dump(
+ const struct sh_css_isp_csc_params *yuv2rgb,
+ unsigned int level)
+{
+ ia_css_cc_dump(yuv2rgb, level, "YUV to RGB Conversion");
+}
+
+void
+ia_css_rgb2yuv_dump(
+ const struct sh_css_isp_csc_params *rgb2yuv,
+ unsigned int level)
+{
+ ia_css_cc_dump(rgb2yuv, level, "RGB to YUV Conversion");
+}
+
+void
+ia_css_rgb_gamma_table_debug_dtrace(
+ const struct ia_css_rgb_gamma_table *config,
+ unsigned int level)
+{
+ (void)config;
+ (void)level;
+}
+#endif
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/gc/gc_2/ia_css_gc2.host.h b/drivers/staging/media/atomisp/pci/isp/kernels/gc/gc_2/ia_css_gc2.host.h
new file mode 100644
index 000000000000..a7ac7241d477
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/gc/gc_2/ia_css_gc2.host.h
@@ -0,0 +1,71 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __IA_CSS_GC2_HOST_H
+#define __IA_CSS_GC2_HOST_H
+
+#include "ia_css_gc2_types.h"
+#include "ia_css_gc2_param.h"
+#include "ia_css_gc2_table.host.h"
+
+extern const struct ia_css_cc_config default_yuv2rgb_cc_config;
+extern const struct ia_css_cc_config default_rgb2yuv_cc_config;
+
+void
+ia_css_yuv2rgb_encode(
+ struct sh_css_isp_csc_params *to,
+ const struct ia_css_cc_config *from,
+ unsigned int size);
+
+void
+ia_css_rgb2yuv_encode(
+ struct sh_css_isp_csc_params *to,
+ const struct ia_css_cc_config *from,
+ unsigned int size);
+
+void
+ia_css_r_gamma_vamem_encode(
+ struct sh_css_isp_rgb_gamma_vamem_params *to,
+ const struct ia_css_rgb_gamma_table *from,
+ unsigned int size);
+
+void
+ia_css_g_gamma_vamem_encode(
+ struct sh_css_isp_rgb_gamma_vamem_params *to,
+ const struct ia_css_rgb_gamma_table *from,
+ unsigned int size);
+
+void
+ia_css_b_gamma_vamem_encode(
+ struct sh_css_isp_rgb_gamma_vamem_params *to,
+ const struct ia_css_rgb_gamma_table *from,
+ unsigned int size);
+
+#ifndef IA_CSS_NO_DEBUG
+void
+ia_css_yuv2rgb_dump(
+ const struct sh_css_isp_csc_params *yuv2rgb,
+ unsigned int level);
+
+void
+ia_css_rgb2yuv_dump(
+ const struct sh_css_isp_csc_params *rgb2yuv,
+ unsigned int level);
+
+void
+ia_css_rgb_gamma_table_debug_dtrace(
+ const struct ia_css_rgb_gamma_table *config,
+ unsigned int level);
+
+#define ia_css_yuv2rgb_debug_dtrace ia_css_cc_config_debug_dtrace
+#define ia_css_rgb2yuv_debug_dtrace ia_css_cc_config_debug_dtrace
+#define ia_css_r_gamma_debug_dtrace ia_css_rgb_gamma_table_debug_dtrace
+#define ia_css_g_gamma_debug_dtrace ia_css_rgb_gamma_table_debug_dtrace
+#define ia_css_b_gamma_debug_dtrace ia_css_rgb_gamma_table_debug_dtrace
+
+#endif
+
+#endif /* __IA_CSS_GC2_HOST_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/gc/gc_2/ia_css_gc2_param.h b/drivers/staging/media/atomisp/pci/isp/kernels/gc/gc_2/ia_css_gc2_param.h
new file mode 100644
index 000000000000..76c8521f0e15
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/gc/gc_2/ia_css_gc2_param.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __IA_CSS_GC2_PARAM_H
+#define __IA_CSS_GC2_PARAM_H
+
+#include "type_support.h"
+/* Extend GC1 */
+#include "ia_css_gc2_types.h"
+#include "gc/gc_1.0/ia_css_gc_param.h"
+#include "csc/csc_1.0/ia_css_csc_param.h"
+
+#ifndef PIPE_GENERATION
+#if defined(IS_VAMEM_VERSION_1)
+#define SH_CSS_ISP_RGB_GAMMA_TABLE_SIZE IA_CSS_VAMEM_1_RGB_GAMMA_TABLE_SIZE
+#elif defined(IS_VAMEM_VERSION_2)
+#define SH_CSS_ISP_RGB_GAMMA_TABLE_SIZE IA_CSS_VAMEM_2_RGB_GAMMA_TABLE_SIZE
+#else
+#error "Undefined vamem version"
+#endif
+
+#else
+/* For pipe generation, the size is not relevant */
+#define SH_CSS_ISP_RGB_GAMMA_TABLE_SIZE 0
+#endif
+
+/* This should be vamem_data_t, but that breaks the pipe generator */
+struct sh_css_isp_rgb_gamma_vamem_params {
+ u16 gc[SH_CSS_ISP_RGB_GAMMA_TABLE_SIZE];
+};
+
+#endif /* __IA_CSS_GC2_PARAM_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/gc/gc_2/ia_css_gc2_table.host.c b/drivers/staging/media/atomisp/pci/isp/kernels/gc/gc_2/ia_css_gc2_table.host.c
new file mode 100644
index 000000000000..101f703443ec
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/gc/gc_2/ia_css_gc2_table.host.c
@@ -0,0 +1,71 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#include <linux/string.h> /* for memcpy() */
+
+#include <type_support.h>
+#include "system_global.h"
+#include "vamem.h"
+#include "ia_css_types.h"
+#include "ia_css_gc2_table.host.h"
+
+struct ia_css_rgb_gamma_table default_r_gamma_table;
+struct ia_css_rgb_gamma_table default_g_gamma_table;
+struct ia_css_rgb_gamma_table default_b_gamma_table;
+
+/* Identical default gamma table for R, G, and B. */
+
+
+static const uint16_t
+default_gamma_table_data[IA_CSS_VAMEM_2_RGB_GAMMA_TABLE_SIZE] = {
+ 0, 72, 144, 216, 288, 360, 426, 486,
+ 541, 592, 641, 687, 730, 772, 812, 850,
+ 887, 923, 958, 991, 1024, 1055, 1086, 1117,
+ 1146, 1175, 1203, 1230, 1257, 1284, 1310, 1335,
+ 1360, 1385, 1409, 1433, 1457, 1480, 1502, 1525,
+ 1547, 1569, 1590, 1612, 1632, 1653, 1674, 1694,
+ 1714, 1734, 1753, 1772, 1792, 1811, 1829, 1848,
+ 1866, 1884, 1902, 1920, 1938, 1955, 1973, 1990,
+ 2007, 2024, 2040, 2057, 2074, 2090, 2106, 2122,
+ 2138, 2154, 2170, 2185, 2201, 2216, 2231, 2247,
+ 2262, 2277, 2291, 2306, 2321, 2335, 2350, 2364,
+ 2378, 2393, 2407, 2421, 2435, 2449, 2462, 2476,
+ 2490, 2503, 2517, 2530, 2543, 2557, 2570, 2583,
+ 2596, 2609, 2622, 2634, 2647, 2660, 2673, 2685,
+ 2698, 2710, 2722, 2735, 2747, 2759, 2771, 2783,
+ 2795, 2807, 2819, 2831, 2843, 2855, 2867, 2878,
+ 2890, 2901, 2913, 2924, 2936, 2947, 2958, 2970,
+ 2981, 2992, 3003, 3014, 3025, 3036, 3047, 3058,
+ 3069, 3080, 3091, 3102, 3112, 3123, 3134, 3144,
+ 3155, 3165, 3176, 3186, 3197, 3207, 3217, 3228,
+ 3238, 3248, 3258, 3268, 3279, 3289, 3299, 3309,
+ 3319, 3329, 3339, 3349, 3358, 3368, 3378, 3388,
+ 3398, 3407, 3417, 3427, 3436, 3446, 3455, 3465,
+ 3474, 3484, 3493, 3503, 3512, 3521, 3531, 3540,
+ 3549, 3559, 3568, 3577, 3586, 3595, 3605, 3614,
+ 3623, 3632, 3641, 3650, 3659, 3668, 3677, 3686,
+ 3694, 3703, 3712, 3721, 3730, 3739, 3747, 3756,
+ 3765, 3773, 3782, 3791, 3799, 3808, 3816, 3825,
+ 3833, 3842, 3850, 3859, 3867, 3876, 3884, 3893,
+ 3901, 3909, 3918, 3926, 3934, 3942, 3951, 3959,
+ 3967, 3975, 3984, 3992, 4000, 4008, 4016, 4024,
+ 4032, 4040, 4048, 4056, 4064, 4072, 4080, 4088,
+ 4095
+};
+
+void
+ia_css_config_rgb_gamma_tables(void)
+{
+ default_r_gamma_table.vamem_type = IA_CSS_VAMEM_TYPE_2;
+ default_g_gamma_table.vamem_type = IA_CSS_VAMEM_TYPE_2;
+ default_b_gamma_table.vamem_type = IA_CSS_VAMEM_TYPE_2;
+ memcpy(default_r_gamma_table.data.vamem_2, default_gamma_table_data,
+ sizeof(default_gamma_table_data));
+ memcpy(default_g_gamma_table.data.vamem_2, default_gamma_table_data,
+ sizeof(default_gamma_table_data));
+ memcpy(default_b_gamma_table.data.vamem_2, default_gamma_table_data,
+ sizeof(default_gamma_table_data));
+}
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/gc/gc_2/ia_css_gc2_table.host.h b/drivers/staging/media/atomisp/pci/isp/kernels/gc/gc_2/ia_css_gc2_table.host.h
new file mode 100644
index 000000000000..f2ce0cc47f86
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/gc/gc_2/ia_css_gc2_table.host.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __IA_CSS_GC2_TABLE_HOST_H
+#define __IA_CSS_GC2_TABLE_HOST_H
+
+#include "ia_css_gc2_types.h"
+
+extern struct ia_css_rgb_gamma_table default_r_gamma_table;
+extern struct ia_css_rgb_gamma_table default_g_gamma_table;
+extern struct ia_css_rgb_gamma_table default_b_gamma_table;
+
+void ia_css_config_rgb_gamma_tables(void);
+
+#endif /* __IA_CSS_GC2_TABLE_HOST_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/gc/gc_2/ia_css_gc2_types.h b/drivers/staging/media/atomisp/pci/isp/kernels/gc/gc_2/ia_css_gc2_types.h
new file mode 100644
index 000000000000..abb0d3d871b3
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/gc/gc_2/ia_css_gc2_types.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __IA_CSS_GC2_TYPES_H
+#define __IA_CSS_GC2_TYPES_H
+
+#include "isp/kernels/ctc/ctc_1.0/ia_css_ctc_types.h" /* FIXME: needed for ia_css_vamem_type */
+
+/* @file
+* CSS-API header file for Gamma Correction parameters.
+*/
+
+/* sRGB Gamma table, used for sRGB Gamma Correction.
+ *
+ * ISP block: GC2 (sRGB Gamma Correction)
+ * (ISP1: GC1(YUV Gamma Correction) is used.)
+ * ISP2: GC2 is used.
+ */
+
+/* Number of elements in the sRGB gamma table. */
+#define IA_CSS_VAMEM_1_RGB_GAMMA_TABLE_SIZE_LOG2 8
+#define IA_CSS_VAMEM_1_RGB_GAMMA_TABLE_SIZE BIT(IA_CSS_VAMEM_1_RGB_GAMMA_TABLE_SIZE_LOG2)
+
+/* Number of elements in the sRGB gamma table. */
+#define IA_CSS_VAMEM_2_RGB_GAMMA_TABLE_SIZE_LOG2 8
+#define IA_CSS_VAMEM_2_RGB_GAMMA_TABLE_SIZE ((1U << IA_CSS_VAMEM_2_RGB_GAMMA_TABLE_SIZE_LOG2) + 1)
+
+/** IA_CSS_VAMEM_TYPE_1(ISP2300) or
+ IA_CSS_VAMEM_TYPE_2(ISP2400) */
+union ia_css_rgb_gamma_data {
+ u16 vamem_1[IA_CSS_VAMEM_1_RGB_GAMMA_TABLE_SIZE];
+ /** RGB Gamma table on vamem type1. This table is not used,
+ because sRGB Gamma Correction is not implemented for ISP2300. */
+ u16 vamem_2[IA_CSS_VAMEM_2_RGB_GAMMA_TABLE_SIZE];
+ /** RGB Gamma table on vamem type2. u0.12, [0,4095] */
+};
+
+struct ia_css_rgb_gamma_table {
+ enum ia_css_vamem_type vamem_type;
+ union ia_css_rgb_gamma_data data;
+};
+
+#endif /* __IA_CSS_GC2_TYPES_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/hdr/ia_css_hdr.host.c b/drivers/staging/media/atomisp/pci/isp/kernels/hdr/ia_css_hdr.host.c
new file mode 100644
index 000000000000..7a53928ef26c
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/hdr/ia_css_hdr.host.c
@@ -0,0 +1,32 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Release Version: irci_stable_candrpv_0415_20150521_0458 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#include "ia_css_hdr.host.h"
+
+void
+ia_css_hdr_init_config(
+ struct sh_css_isp_hdr_params *to,
+ const struct ia_css_hdr_config *from,
+ unsigned int size)
+{
+ int i;
+ (void)size;
+
+ for (i = 0; i < HDR_NUM_INPUT_FRAMES - 1; i++) {
+ to->irradiance.match_shift[i] = from->irradiance.match_shift[i];
+ to->irradiance.match_mul[i] = from->irradiance.match_mul[i];
+ to->irradiance.thr_low[i] = from->irradiance.thr_low[i];
+ to->irradiance.thr_high[i] = from->irradiance.thr_high[i];
+ to->irradiance.thr_coeff[i] = from->irradiance.thr_coeff[i];
+ to->irradiance.thr_shift[i] = from->irradiance.thr_shift[i];
+ }
+ to->irradiance.test_irr = from->irradiance.test_irr;
+ to->irradiance.weight_bpp = from->irradiance.weight_bpp;
+
+ to->deghost.test_deg = from->deghost.test_deg;
+ to->exclusion.test_excl = from->exclusion.test_excl;
+}
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/hdr/ia_css_hdr.host.h b/drivers/staging/media/atomisp/pci/isp/kernels/hdr/ia_css_hdr.host.h
new file mode 100644
index 000000000000..916b72b082ec
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/hdr/ia_css_hdr.host.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Release Version: irci_stable_candrpv_0415_20150521_0458 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __IA_CSS_HDR_HOST_H
+#define __IA_CSS_HDR_HOST_H
+
+#include "ia_css_hdr_param.h"
+#include "ia_css_hdr_types.h"
+
+extern const struct ia_css_hdr_config default_hdr_config;
+
+void
+ia_css_hdr_init_config(
+ struct sh_css_isp_hdr_params *to,
+ const struct ia_css_hdr_config *from,
+ unsigned int size);
+
+#endif /* __IA_CSS_HDR_HOST_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/hdr/ia_css_hdr_param.h b/drivers/staging/media/atomisp/pci/isp/kernels/hdr/ia_css_hdr_param.h
new file mode 100644
index 000000000000..da787654440c
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/hdr/ia_css_hdr_param.h
@@ -0,0 +1,50 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Release Version: irci_stable_candrpv_0415_20150521_0458 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __IA_CSS_HDR_PARAMS_H
+#define __IA_CSS_HDR_PARAMS_H
+
+#include "type_support.h"
+
+#define HDR_NUM_INPUT_FRAMES (3)
+
+/* HDR irradiance map parameters on ISP. */
+struct sh_css_hdr_irradiance_params {
+ s32 test_irr;
+ s32 match_shift[HDR_NUM_INPUT_FRAMES -
+ 1]; /* Histogram matching shift parameter */
+ s32 match_mul[HDR_NUM_INPUT_FRAMES -
+ 1]; /* Histogram matching multiplication parameter */
+ s32 thr_low[HDR_NUM_INPUT_FRAMES -
+ 1]; /* Weight map soft threshold low bound parameter */
+ s32 thr_high[HDR_NUM_INPUT_FRAMES -
+ 1]; /* Weight map soft threshold high bound parameter */
+ s32 thr_coeff[HDR_NUM_INPUT_FRAMES -
+ 1]; /* Soft threshold linear function coefficient */
+ s32 thr_shift[HDR_NUM_INPUT_FRAMES -
+ 1]; /* Soft threshold precision shift parameter */
+ s32 weight_bpp; /* Weight map bits per pixel */
+};
+
+/* HDR deghosting parameters on ISP */
+struct sh_css_hdr_deghost_params {
+ s32 test_deg;
+};
+
+/* HDR exclusion parameters on ISP */
+struct sh_css_hdr_exclusion_params {
+ s32 test_excl;
+};
+
+/* HDR ISP parameters */
+struct sh_css_isp_hdr_params {
+ struct sh_css_hdr_irradiance_params irradiance;
+ struct sh_css_hdr_deghost_params deghost;
+ struct sh_css_hdr_exclusion_params exclusion;
+};
+
+#endif /* __IA_CSS_HDR_PARAMS_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/hdr/ia_css_hdr_types.h b/drivers/staging/media/atomisp/pci/isp/kernels/hdr/ia_css_hdr_types.h
new file mode 100644
index 000000000000..e32290d1c86b
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/hdr/ia_css_hdr_types.h
@@ -0,0 +1,61 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Release Version: irci_stable_candrpv_0415_20150521_0458 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __IA_CSS_HDR_TYPES_H
+#define __IA_CSS_HDR_TYPES_H
+
+#define IA_CSS_HDR_MAX_NUM_INPUT_FRAMES (3)
+
+/**
+ * \brief HDR Irradiance Parameters
+ * \detail Currently HDR parameters are used only for testing purposes
+ */
+struct ia_css_hdr_irradiance_params {
+ int test_irr; /** Test parameter */
+ int match_shift[IA_CSS_HDR_MAX_NUM_INPUT_FRAMES -
+ 1]; /** Histogram matching shift parameter */
+ int match_mul[IA_CSS_HDR_MAX_NUM_INPUT_FRAMES -
+ 1]; /** Histogram matching multiplication parameter */
+ int thr_low[IA_CSS_HDR_MAX_NUM_INPUT_FRAMES -
+ 1]; /** Weight map soft threshold low bound parameter */
+ int thr_high[IA_CSS_HDR_MAX_NUM_INPUT_FRAMES -
+ 1]; /** Weight map soft threshold high bound parameter */
+ int thr_coeff[IA_CSS_HDR_MAX_NUM_INPUT_FRAMES -
+ 1]; /** Soft threshold linear function coefficien */
+ int thr_shift[IA_CSS_HDR_MAX_NUM_INPUT_FRAMES -
+ 1]; /** Soft threshold precision shift parameter */
+ int weight_bpp; /** Weight map bits per pixel */
+};
+
+/**
+ * \brief HDR Deghosting Parameters
+ * \detail Currently HDR parameters are used only for testing purposes
+ */
+struct ia_css_hdr_deghost_params {
+ int test_deg; /** Test parameter */
+};
+
+/**
+ * \brief HDR Exclusion Parameters
+ * \detail Currently HDR parameters are used only for testing purposes
+ */
+struct ia_css_hdr_exclusion_params {
+ int test_excl; /** Test parameter */
+};
+
+/**
+ * \brief HDR public parameters.
+ * \details Struct with all parameters for HDR that can be seet from
+ * the CSS API. Currently, only test parameters are defined.
+ */
+struct ia_css_hdr_config {
+ struct ia_css_hdr_irradiance_params irradiance; /** HDR irradiance parameters */
+ struct ia_css_hdr_deghost_params deghost; /** HDR deghosting parameters */
+ struct ia_css_hdr_exclusion_params exclusion; /** HDR exclusion parameters */
+};
+
+#endif /* __IA_CSS_HDR_TYPES_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/ipu2_io_ls/bayer_io_ls/ia_css_bayer_io.host.c b/drivers/staging/media/atomisp/pci/isp/kernels/ipu2_io_ls/bayer_io_ls/ia_css_bayer_io.host.c
new file mode 100644
index 000000000000..38f371b6d6d5
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/ipu2_io_ls/bayer_io_ls/ia_css_bayer_io.host.c
@@ -0,0 +1,88 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010 - 2015, Intel Corporation.
+ */
+
+#include <linux/bitops.h>
+#include <linux/math.h>
+
+#include "ia_css_bayer_io.host.h"
+#include "dma.h"
+#ifndef IA_CSS_NO_DEBUG
+#include "ia_css_debug.h"
+#endif
+#include "ia_css_isp_params.h"
+#include "ia_css_frame.h"
+
+int ia_css_bayer_io_config(const struct ia_css_binary *binary,
+ const struct sh_css_binary_args *args)
+{
+ const struct ia_css_frame *in_frame = args->in_frame;
+ const struct ia_css_frame **out_frames = (const struct ia_css_frame **)
+ &args->out_frame;
+ const struct ia_css_frame_info *in_frame_info = ia_css_frame_get_info(in_frame);
+ const unsigned int ddr_elems_per_word =
+ DIV_ROUND_UP(HIVE_ISP_DDR_WORD_BITS, BITS_PER_TYPE(short));
+ unsigned int size_get = 0, size_put = 0;
+ unsigned int offset = 0;
+ int ret;
+
+ if (binary->info->mem_offsets.offsets.param) {
+ size_get = binary->info->mem_offsets.offsets.param->dmem.get.size;
+ offset = binary->info->mem_offsets.offsets.param->dmem.get.offset;
+ }
+
+ if (size_get) {
+ struct ia_css_common_io_config *to = (struct ia_css_common_io_config *)
+ &binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset];
+ struct dma_port_config config;
+#ifndef IA_CSS_NO_DEBUG
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_bayer_io_config() get part enter:\n");
+#endif
+
+ ret = ia_css_dma_configure_from_info(&config, in_frame_info);
+ if (ret)
+ return ret;
+ // The base_address of the input frame will be set in the ISP
+ to->width = in_frame_info->res.width;
+ to->height = in_frame_info->res.height;
+ to->stride = config.stride;
+ to->ddr_elems_per_word = ddr_elems_per_word;
+#ifndef IA_CSS_NO_DEBUG
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_bayer_io_config() get part leave:\n");
+#endif
+ }
+
+ if (binary->info->mem_offsets.offsets.param) {
+ size_put = binary->info->mem_offsets.offsets.param->dmem.put.size;
+ offset = binary->info->mem_offsets.offsets.param->dmem.put.offset;
+ }
+
+ if (size_put) {
+ struct ia_css_common_io_config *to = (struct ia_css_common_io_config *)
+ &binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset];
+ struct dma_port_config config;
+#ifndef IA_CSS_NO_DEBUG
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_bayer_io_config() put part enter:\n");
+#endif
+
+ ret = ia_css_dma_configure_from_info(&config, &out_frames[0]->frame_info);
+ if (ret)
+ return ret;
+ to->base_address = out_frames[0]->data;
+ to->width = out_frames[0]->frame_info.res.width;
+ to->height = out_frames[0]->frame_info.res.height;
+ to->stride = config.stride;
+ to->ddr_elems_per_word = ddr_elems_per_word;
+
+#ifndef IA_CSS_NO_DEBUG
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_bayer_io_config() put part leave:\n");
+#endif
+ }
+ return 0;
+}
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/ipu2_io_ls/bayer_io_ls/ia_css_bayer_io.host.h b/drivers/staging/media/atomisp/pci/isp/kernels/ipu2_io_ls/bayer_io_ls/ia_css_bayer_io.host.h
new file mode 100644
index 000000000000..3c82bbd79c4c
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/ipu2_io_ls/bayer_io_ls/ia_css_bayer_io.host.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010 - 2015, Intel Corporation.
+ */
+
+#ifndef __BAYER_IO_HOST_H
+#define __BAYER_IO_HOST_H
+
+#include "ia_css_bayer_io_param.h"
+#include "ia_css_bayer_io_types.h"
+#include "ia_css_binary.h"
+#include "sh_css_internal.h"
+
+int ia_css_bayer_io_config(const struct ia_css_binary *binary,
+ const struct sh_css_binary_args *args);
+
+#endif /*__BAYER_IO_HOST_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/ipu2_io_ls/bayer_io_ls/ia_css_bayer_io_param.h b/drivers/staging/media/atomisp/pci/isp/kernels/ipu2_io_ls/bayer_io_ls/ia_css_bayer_io_param.h
new file mode 100644
index 000000000000..1586decb3c1e
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/ipu2_io_ls/bayer_io_ls/ia_css_bayer_io_param.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010 - 2015, Intel Corporation.
+ */
+
+#ifndef __IA_CSS_BAYER_IO_PARAM
+#define __IA_CSS_BAYER_IO_PARAM
+
+#include "../common/ia_css_common_io_param.h"
+
+#endif /* __IA_CSS_BAYER_IO_PARAM */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/ipu2_io_ls/bayer_io_ls/ia_css_bayer_io_types.h b/drivers/staging/media/atomisp/pci/isp/kernels/ipu2_io_ls/bayer_io_ls/ia_css_bayer_io_types.h
new file mode 100644
index 000000000000..ec84c120d4f6
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/ipu2_io_ls/bayer_io_ls/ia_css_bayer_io_types.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010 - 2015, Intel Corporation.
+ */
+
+#ifndef __IA_CSS_BAYER_IO_TYPES_H
+#define __IA_CSS_BAYER_IO_TYPES_H
+
+#include "../common/ia_css_common_io_types.h"
+
+#endif /* __IA_CSS_BAYER_IO_TYPES_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/ipu2_io_ls/common/ia_css_common_io_param.h b/drivers/staging/media/atomisp/pci/isp/kernels/ipu2_io_ls/common/ia_css_common_io_param.h
new file mode 100644
index 000000000000..c9a3f7bfaa90
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/ipu2_io_ls/common/ia_css_common_io_param.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/**
+Support for Intel Camera Imaging ISP subsystem.
+Copyright (c) 2010 - 2015, Intel Corporation.
+
+*/
+
+#ifndef __IA_CSS_COMMON_IO_PARAM
+#define __IA_CSS_COMMON_IO_PARAM
+
+#include "../common/ia_css_common_io_types.h"
+
+#endif /* __IA_CSS_COMMON_IO_PARAM */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/ipu2_io_ls/common/ia_css_common_io_types.h b/drivers/staging/media/atomisp/pci/isp/kernels/ipu2_io_ls/common/ia_css_common_io_types.h
new file mode 100644
index 000000000000..c1c93b2245c5
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/ipu2_io_ls/common/ia_css_common_io_types.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/**
+Support for Intel Camera Imaging ISP subsystem.
+Copyright (c) 2010 - 2015, Intel Corporation.
+
+*/
+
+#ifndef __IA_CSS_COMMON_IO_TYPES
+#define __IA_CSS_COMMON_IO_TYPES
+
+#define MAX_IO_DMA_CHANNELS 3
+
+struct ia_css_common_io_config {
+ unsigned int base_address;
+ unsigned int width;
+ unsigned int height;
+ unsigned int stride;
+ unsigned int ddr_elems_per_word;
+ unsigned int dma_channel[MAX_IO_DMA_CHANNELS];
+};
+
+#endif /* __IA_CSS_COMMON_IO_TYPES */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/ipu2_io_ls/yuv444_io_ls/ia_css_yuv444_io.host.c b/drivers/staging/media/atomisp/pci/isp/kernels/ipu2_io_ls/yuv444_io_ls/ia_css_yuv444_io.host.c
new file mode 100644
index 000000000000..13054ffc0b35
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/ipu2_io_ls/yuv444_io_ls/ia_css_yuv444_io.host.c
@@ -0,0 +1,91 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+Support for Intel Camera Imaging ISP subsystem.
+Copyright (c) 2010 - 2015, Intel Corporation.
+
+*/
+
+#include <linux/bitops.h>
+#include <linux/math.h>
+
+#include "ia_css_yuv444_io.host.h"
+#include "dma.h"
+#ifndef IA_CSS_NO_DEBUG
+#include "ia_css_debug.h"
+#endif
+#include "ia_css_isp_params.h"
+#include "ia_css_frame.h"
+
+int ia_css_yuv444_io_config(const struct ia_css_binary *binary,
+ const struct sh_css_binary_args *args)
+{
+ const struct ia_css_frame *in_frame = args->in_frame;
+ const struct ia_css_frame **out_frames = (const struct ia_css_frame **)
+ &args->out_frame;
+ const struct ia_css_frame_info *in_frame_info = ia_css_frame_get_info(in_frame);
+ const unsigned int ddr_elems_per_word =
+ DIV_ROUND_UP(HIVE_ISP_DDR_WORD_BITS, BITS_PER_TYPE(short));
+ unsigned int size_get = 0, size_put = 0;
+ unsigned int offset = 0;
+ int ret;
+
+ if (binary->info->mem_offsets.offsets.param) {
+ size_get = binary->info->mem_offsets.offsets.param->dmem.get.size;
+ offset = binary->info->mem_offsets.offsets.param->dmem.get.offset;
+ }
+
+ if (size_get) {
+ struct ia_css_common_io_config *to = (struct ia_css_common_io_config *)
+ &binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset];
+ struct dma_port_config config;
+#ifndef IA_CSS_NO_DEBUG
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_yuv444_io_config() get part enter:\n");
+#endif
+
+ ret = ia_css_dma_configure_from_info(&config, in_frame_info);
+ if (ret)
+ return ret;
+
+ // The base_address of the input frame will be set in the ISP
+ to->width = in_frame_info->res.width;
+ to->height = in_frame_info->res.height;
+ to->stride = config.stride;
+ to->ddr_elems_per_word = ddr_elems_per_word;
+#ifndef IA_CSS_NO_DEBUG
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_yuv444_io_config() get part leave:\n");
+#endif
+ }
+
+ if (binary->info->mem_offsets.offsets.param) {
+ size_put = binary->info->mem_offsets.offsets.param->dmem.put.size;
+ offset = binary->info->mem_offsets.offsets.param->dmem.put.offset;
+ }
+
+ if (size_put) {
+ struct ia_css_common_io_config *to = (struct ia_css_common_io_config *)
+ &binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset];
+ struct dma_port_config config;
+#ifndef IA_CSS_NO_DEBUG
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_yuv444_io_config() put part enter:\n");
+#endif
+
+ ret = ia_css_dma_configure_from_info(&config, &out_frames[0]->frame_info);
+ if (ret)
+ return ret;
+
+ to->base_address = out_frames[0]->data;
+ to->width = out_frames[0]->frame_info.res.width;
+ to->height = out_frames[0]->frame_info.res.height;
+ to->stride = config.stride;
+ to->ddr_elems_per_word = ddr_elems_per_word;
+
+#ifndef IA_CSS_NO_DEBUG
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_yuv444_io_config() put part leave:\n");
+#endif
+ }
+ return 0;
+}
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/ipu2_io_ls/yuv444_io_ls/ia_css_yuv444_io.host.h b/drivers/staging/media/atomisp/pci/isp/kernels/ipu2_io_ls/yuv444_io_ls/ia_css_yuv444_io.host.h
new file mode 100644
index 000000000000..e6ce0cba44b9
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/ipu2_io_ls/yuv444_io_ls/ia_css_yuv444_io.host.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/**
+Support for Intel Camera Imaging ISP subsystem.
+Copyright (c) 2010 - 2015, Intel Corporation.
+
+*/
+
+#ifndef __YUV444_IO_HOST_H
+#define __YUV444_IO_HOST_H
+
+#include "ia_css_yuv444_io_param.h"
+#include "ia_css_yuv444_io_types.h"
+#include "ia_css_binary.h"
+#include "sh_css_internal.h"
+
+int ia_css_yuv444_io_config(const struct ia_css_binary *binary,
+ const struct sh_css_binary_args *args);
+
+#endif /*__YUV44_IO_HOST_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/ipu2_io_ls/yuv444_io_ls/ia_css_yuv444_io_param.h b/drivers/staging/media/atomisp/pci/isp/kernels/ipu2_io_ls/yuv444_io_ls/ia_css_yuv444_io_param.h
new file mode 100644
index 000000000000..429fcdb73f60
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/ipu2_io_ls/yuv444_io_ls/ia_css_yuv444_io_param.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/**
+Support for Intel Camera Imaging ISP subsystem.
+Copyright (c) 2010 - 2015, Intel Corporation.
+
+*/
+
+#ifndef __IA_CSS_YUV444_IO_PARAM
+#define __IA_CSS_YUV444_IO_PARAM
+
+#include "../common/ia_css_common_io_param.h"
+
+#endif
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/ipu2_io_ls/yuv444_io_ls/ia_css_yuv444_io_types.h b/drivers/staging/media/atomisp/pci/isp/kernels/ipu2_io_ls/yuv444_io_ls/ia_css_yuv444_io_types.h
new file mode 100644
index 000000000000..485ad17cb29b
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/ipu2_io_ls/yuv444_io_ls/ia_css_yuv444_io_types.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/**
+Support for Intel Camera Imaging ISP subsystem.
+Copyright (c) 2010 - 2015, Intel Corporation.
+
+*/
+
+#ifndef __IA_CSS_YUV444_IO_TYPES
+#define __IA_CSS_YUV444_IO_TYPES
+
+#include "../common/ia_css_common_io_types.h"
+
+#endif
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/iterator/iterator_1.0/ia_css_iterator.host.c b/drivers/staging/media/atomisp/pci/isp/kernels/iterator/iterator_1.0/ia_css_iterator.host.c
new file mode 100644
index 000000000000..b8ba26f56cfa
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/iterator/iterator_1.0/ia_css_iterator.host.c
@@ -0,0 +1,68 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#include "ia_css_iterator.host.h"
+#include "ia_css_frame_public.h"
+#include "ia_css_binary.h"
+#include "ia_css_err.h"
+#define IA_CSS_INCLUDE_CONFIGURATIONS
+#include "ia_css_isp_configs.h"
+
+static const struct ia_css_iterator_configuration default_config = {
+ .input_info = (struct ia_css_frame_info *)NULL,
+};
+
+void
+ia_css_iterator_config(
+ struct sh_css_isp_iterator_isp_config *to,
+ const struct ia_css_iterator_configuration *from,
+ unsigned int size)
+{
+ (void)size;
+ ia_css_frame_info_to_frame_sp_info(&to->input_info, from->input_info);
+ ia_css_frame_info_to_frame_sp_info(&to->internal_info, from->internal_info);
+ ia_css_frame_info_to_frame_sp_info(&to->output_info, from->output_info);
+ ia_css_frame_info_to_frame_sp_info(&to->vf_info, from->vf_info);
+ ia_css_resolution_to_sp_resolution(&to->dvs_envelope, from->dvs_envelope);
+}
+
+int ia_css_iterator_configure(const struct ia_css_binary *binary,
+ const struct ia_css_frame_info *in_info)
+{
+ struct ia_css_frame_info my_info = IA_CSS_BINARY_DEFAULT_FRAME_INFO;
+ struct ia_css_iterator_configuration config = default_config;
+
+ config.input_info = &binary->in_frame_info;
+ config.internal_info = &binary->internal_frame_info;
+ config.output_info = &binary->out_frame_info[0];
+ config.vf_info = &binary->vf_frame_info;
+ config.dvs_envelope = &binary->dvs_envelope;
+
+ /* Use in_info iso binary->in_frame_info.
+ * They can differ in padded width in case of scaling, e.g. for capture_pp.
+ * Find out why.
+ */
+ if (in_info)
+ config.input_info = in_info;
+ if (binary->out_frame_info[0].res.width == 0)
+ config.output_info = &binary->out_frame_info[1];
+ my_info = *config.output_info;
+ config.output_info = &my_info;
+ /* we do this only for preview pipe because in fill_binary_info function
+ * we assign vf_out res to out res, but for ISP internal processing, we need
+ * the original out res. for video pipe, it has two output pins --- out and
+ * vf_out, so it can keep these two resolutions already. */
+ if (binary->info->sp.pipeline.mode == IA_CSS_BINARY_MODE_PREVIEW &&
+ binary->vf_downscale_log2 > 0) {
+ /* TODO: Remove this after preview output decimation is fixed
+ * by configuring out&vf info files properly */
+ my_info.padded_width <<= binary->vf_downscale_log2;
+ my_info.res.width <<= binary->vf_downscale_log2;
+ my_info.res.height <<= binary->vf_downscale_log2;
+ }
+
+ return ia_css_configure_iterator(binary, &config);
+}
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/iterator/iterator_1.0/ia_css_iterator.host.h b/drivers/staging/media/atomisp/pci/isp/kernels/iterator/iterator_1.0/ia_css_iterator.host.h
new file mode 100644
index 000000000000..01b1dc76651a
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/iterator/iterator_1.0/ia_css_iterator.host.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __IA_CSS_ITERATOR_HOST_H
+#define __IA_CSS_ITERATOR_HOST_H
+
+#include "ia_css_frame_public.h"
+#include "ia_css_binary.h"
+#include "ia_css_err.h"
+#include "ia_css_iterator_param.h"
+
+void
+ia_css_iterator_config(
+ struct sh_css_isp_iterator_isp_config *to,
+ const struct ia_css_iterator_configuration *from,
+ unsigned int size);
+
+int
+ia_css_iterator_configure(
+ const struct ia_css_binary *binary,
+ const struct ia_css_frame_info *in_info);
+
+#endif /* __IA_CSS_ITERATOR_HOST_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/iterator/iterator_1.0/ia_css_iterator_param.h b/drivers/staging/media/atomisp/pci/isp/kernels/iterator/iterator_1.0/ia_css_iterator_param.h
new file mode 100644
index 000000000000..45151d903944
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/iterator/iterator_1.0/ia_css_iterator_param.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __IA_CSS_ITERATOR_PARAM_H
+#define __IA_CSS_ITERATOR_PARAM_H
+
+#include "ia_css_types.h" /* ia_css_resolution */
+#include "ia_css_frame_public.h" /* ia_css_frame_info */
+#include "ia_css_frame_comm.h" /* ia_css_frame_sp_info */
+
+struct ia_css_iterator_configuration {
+ const struct ia_css_frame_info *input_info;
+ const struct ia_css_frame_info *internal_info;
+ const struct ia_css_frame_info *output_info;
+ const struct ia_css_frame_info *vf_info;
+ const struct ia_css_resolution *dvs_envelope;
+};
+
+struct sh_css_isp_iterator_isp_config {
+ struct ia_css_frame_sp_info input_info;
+ struct ia_css_frame_sp_info internal_info;
+ struct ia_css_frame_sp_info output_info;
+ struct ia_css_frame_sp_info vf_info;
+ struct ia_css_sp_resolution dvs_envelope;
+};
+
+#endif /* __IA_CSS_ITERATOR_PARAM_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/macc/macc1_5/ia_css_macc1_5.host.c b/drivers/staging/media/atomisp/pci/isp/kernels/macc/macc1_5/ia_css_macc1_5.host.c
new file mode 100644
index 000000000000..c4f3afb85828
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/macc/macc1_5/ia_css_macc1_5.host.c
@@ -0,0 +1,66 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#include "ia_css_types.h"
+#include "sh_css_defs.h"
+
+#ifndef IA_CSS_NO_DEBUG
+/* FIXME: See BZ 4427 */
+#include "ia_css_debug.h"
+#endif
+
+#include "ia_css_macc1_5.host.h"
+
+const struct ia_css_macc1_5_config default_macc1_5_config = {
+ 1
+};
+
+void
+ia_css_macc1_5_encode(
+ struct sh_css_isp_macc1_5_params *to,
+ const struct ia_css_macc1_5_config *from,
+ unsigned int size)
+{
+ (void)size;
+ to->exp = from->exp;
+}
+
+void
+ia_css_macc1_5_vmem_encode(
+ struct sh_css_isp_macc1_5_vmem_params *params,
+ const struct ia_css_macc1_5_table *from,
+ unsigned int size)
+{
+ unsigned int i, j, k, idx;
+ static const unsigned int idx_map[] = {
+ 0, 1, 3, 2, 6, 7, 5, 4, 12, 13, 15, 14, 10, 11, 9, 8
+ };
+
+ (void)size;
+
+ for (k = 0; k < 4; k++)
+ for (i = 0; i < IA_CSS_MACC_NUM_AXES; i++) {
+ idx = idx_map[i] + (k * IA_CSS_MACC_NUM_AXES);
+ j = 4 * i;
+
+ params->data[0][(idx)] = from->data[j];
+ params->data[1][(idx)] = from->data[j + 1];
+ params->data[2][(idx)] = from->data[j + 2];
+ params->data[3][(idx)] = from->data[j + 3];
+ }
+}
+
+#ifndef IA_CSS_NO_DEBUG
+void
+ia_css_macc1_5_debug_dtrace(
+ const struct ia_css_macc1_5_config *config,
+ unsigned int level)
+{
+ ia_css_debug_dtrace(level,
+ "config.exp=%d\n",
+ config->exp);
+}
+#endif
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/macc/macc1_5/ia_css_macc1_5.host.h b/drivers/staging/media/atomisp/pci/isp/kernels/macc/macc1_5/ia_css_macc1_5.host.h
new file mode 100644
index 000000000000..631839aa3c57
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/macc/macc1_5/ia_css_macc1_5.host.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __IA_CSS_MACC1_5_HOST_H
+#define __IA_CSS_MACC1_5_HOST_H
+
+#include "ia_css_macc1_5_param.h"
+#include "ia_css_macc1_5_table.host.h"
+
+extern const struct ia_css_macc1_5_config default_macc1_5_config;
+
+void
+ia_css_macc1_5_encode(
+ struct sh_css_isp_macc1_5_params *to,
+ const struct ia_css_macc1_5_config *from,
+ unsigned int size);
+
+void
+ia_css_macc1_5_vmem_encode(
+ struct sh_css_isp_macc1_5_vmem_params *params,
+ const struct ia_css_macc1_5_table *from,
+ unsigned int size);
+
+#ifndef IA_CSS_NO_DEBUG
+void
+ia_css_macc1_5_debug_dtrace(
+ const struct ia_css_macc1_5_config *config,
+ unsigned int level);
+#endif
+#endif /* __IA_CSS_MACC1_5_HOST_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/macc/macc1_5/ia_css_macc1_5_param.h b/drivers/staging/media/atomisp/pci/isp/kernels/macc/macc1_5/ia_css_macc1_5_param.h
new file mode 100644
index 000000000000..0f38cd7ea9b1
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/macc/macc1_5/ia_css_macc1_5_param.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __IA_CSS_MACC1_5_PARAM_H
+#define __IA_CSS_MACC1_5_PARAM_H
+
+#include "type_support.h"
+#include "vmem.h"
+#include "ia_css_macc1_5_types.h"
+
+/* MACC */
+struct sh_css_isp_macc1_5_params {
+ s32 exp;
+};
+
+struct sh_css_isp_macc1_5_vmem_params {
+ VMEM_ARRAY(data, IA_CSS_MACC_NUM_COEFS * ISP_NWAY);
+};
+
+#endif /* __IA_CSS_MACC1_5_PARAM_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/macc/macc1_5/ia_css_macc1_5_table.host.c b/drivers/staging/media/atomisp/pci/isp/kernels/macc/macc1_5/ia_css_macc1_5_table.host.c
new file mode 100644
index 000000000000..aea74be18fe9
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/macc/macc1_5/ia_css_macc1_5_table.host.c
@@ -0,0 +1,26 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#include "system_global.h"
+#include "ia_css_types.h"
+#include "ia_css_macc1_5_table.host.h"
+
+/* Multi-Axes Color Correction table for ISP2.
+ * 64values = 2x2matrix for 16area, [s1.12]
+ * ineffective: 16 of "identity 2x2 matix" {4096,0,0,4096}
+ */
+const struct ia_css_macc1_5_table default_macc1_5_table = {
+ {
+ 4096, 0, 0, 4096, 4096, 0, 0, 4096,
+ 4096, 0, 0, 4096, 4096, 0, 0, 4096,
+ 4096, 0, 0, 4096, 4096, 0, 0, 4096,
+ 4096, 0, 0, 4096, 4096, 0, 0, 4096,
+ 4096, 0, 0, 4096, 4096, 0, 0, 4096,
+ 4096, 0, 0, 4096, 4096, 0, 0, 4096,
+ 4096, 0, 0, 4096, 4096, 0, 0, 4096,
+ 4096, 0, 0, 4096, 4096, 0, 0, 4096
+ }
+};
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/macc/macc1_5/ia_css_macc1_5_table.host.h b/drivers/staging/media/atomisp/pci/isp/kernels/macc/macc1_5/ia_css_macc1_5_table.host.h
new file mode 100644
index 000000000000..76a3a5035d10
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/macc/macc1_5/ia_css_macc1_5_table.host.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __IA_CSS_MACC1_5_TABLE_HOST_H
+#define __IA_CSS_MACC1_5_TABLE_HOST_H
+
+#include "macc/macc1_5/ia_css_macc1_5_types.h"
+
+extern const struct ia_css_macc1_5_table default_macc1_5_table;
+
+#endif /* __IA_CSS_MACC1_5_TABLE_HOST_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/macc/macc1_5/ia_css_macc1_5_types.h b/drivers/staging/media/atomisp/pci/isp/kernels/macc/macc1_5/ia_css_macc1_5_types.h
new file mode 100644
index 000000000000..b0538808ae56
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/macc/macc1_5/ia_css_macc1_5_types.h
@@ -0,0 +1,65 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __IA_CSS_MACC1_5_TYPES_H
+#define __IA_CSS_MACC1_5_TYPES_H
+
+/* @file
+* CSS-API header file for Multi-Axis Color Conversion algorithm parameters.
+*/
+
+/* Multi-Axis Color Conversion configuration
+ *
+ * ISP2.6.1: MACC1_5 is used.
+ */
+
+/* Number of axes in the MACC table. */
+#define IA_CSS_MACC_NUM_AXES 16
+/* Number of coefficients per MACC axes. */
+#define IA_CSS_MACC_NUM_COEFS 4
+
+/* Multi-Axes Color Correction (MACC) table.
+ *
+ * ISP block: MACC (MACC by only matrix)
+ * MACC1_5 (MACC by matrix and exponent(ia_css_macc_config))
+ * ISP1: MACC is used.
+ * ISP2: MACC1_5 is used.
+ *
+ * [MACC]
+ * OutU = (data00 * InU + data01 * InV) >> 13
+ * OutV = (data10 * InU + data11 * InV) >> 13
+ *
+ * default/ineffective:
+ * OutU = (8192 * InU + 0 * InV) >> 13
+ * OutV = ( 0 * InU + 8192 * InV) >> 13
+ *
+ * [MACC1_5]
+ * OutU = (data00 * InU + data01 * InV) >> (13 - exp)
+ * OutV = (data10 * InU + data11 * InV) >> (13 - exp)
+ *
+ * default/ineffective: (exp=1)
+ * OutU = (4096 * InU + 0 * InV) >> (13 - 1)
+ * OutV = ( 0 * InU + 4096 * InV) >> (13 - 1)
+ */
+struct ia_css_macc1_5_table {
+ s16 data[IA_CSS_MACC_NUM_COEFS * IA_CSS_MACC_NUM_AXES];
+ /** 16 of 2x2 matix
+ MACC1_5: s[macc_config.exp].[13-macc_config.exp], [-8192,8191]
+ default/ineffective: (s1.12)
+ 16 of "identity 2x2 matix" {4096,0,0,4096} */
+};
+
+/* Multi-Axes Color Correction (MACC) configuration.
+ *
+ * ISP block: MACC1_5 (MACC by matrix and exponent(ia_css_macc_config))
+ * ISP2: MACC1_5 is used.
+ */
+struct ia_css_macc1_5_config {
+ u8 exp; /** Common exponent of ia_css_macc_table.
+ u8.0, [0,13], default 1, ineffective 1 */
+};
+
+#endif /* __IA_CSS_MACC1_5_TYPES_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/macc/macc_1.0/ia_css_macc.host.c b/drivers/staging/media/atomisp/pci/isp/kernels/macc/macc_1.0/ia_css_macc.host.c
new file mode 100644
index 000000000000..6e2159f7b395
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/macc/macc_1.0/ia_css_macc.host.c
@@ -0,0 +1,41 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#include "ia_css_types.h"
+#include "sh_css_defs.h"
+#include "ia_css_debug.h"
+#include "sh_css_frac.h"
+
+#include "ia_css_macc.host.h"
+
+const struct ia_css_macc_config default_macc_config = {
+ 1,
+};
+
+void
+ia_css_macc_encode(
+ struct sh_css_isp_macc_params *to,
+ const struct ia_css_macc_config *from,
+ unsigned int size)
+{
+ (void)size;
+ to->exp = from->exp;
+}
+
+void
+ia_css_macc_dump(
+ const struct sh_css_isp_macc_params *macc,
+ unsigned int level);
+
+void
+ia_css_macc_debug_dtrace(
+ const struct ia_css_macc_config *config,
+ unsigned int level)
+{
+ ia_css_debug_dtrace(level,
+ "config.exp=%d\n",
+ config->exp);
+}
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/macc/macc_1.0/ia_css_macc.host.h b/drivers/staging/media/atomisp/pci/isp/kernels/macc/macc_1.0/ia_css_macc.host.h
new file mode 100644
index 000000000000..faba2dc79ed3
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/macc/macc_1.0/ia_css_macc.host.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __IA_CSS_MACC_HOST_H
+#define __IA_CSS_MACC_HOST_H
+
+#include "sh_css_params.h"
+
+#include "ia_css_macc_param.h"
+#include "ia_css_macc_table.host.h"
+
+extern const struct ia_css_macc_config default_macc_config;
+
+void
+ia_css_macc_encode(
+ struct sh_css_isp_macc_params *to,
+ const struct ia_css_macc_config *from,
+ unsigned int size);
+
+void
+ia_css_macc_dump(
+ const struct sh_css_isp_macc_params *macc,
+ unsigned int level);
+
+void
+ia_css_macc_debug_dtrace(
+ const struct ia_css_macc_config *config,
+ unsigned int level);
+
+#endif /* __IA_CSS_MACC_HOST_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/macc/macc_1.0/ia_css_macc_param.h b/drivers/staging/media/atomisp/pci/isp/kernels/macc/macc_1.0/ia_css_macc_param.h
new file mode 100644
index 000000000000..e8c91a12b944
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/macc/macc_1.0/ia_css_macc_param.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __IA_CSS_MACC_PARAM_H
+#define __IA_CSS_MACC_PARAM_H
+
+#include "type_support.h"
+
+/* MACC */
+struct sh_css_isp_macc_params {
+ s32 exp;
+};
+
+#endif /* __IA_CSS_MACC_PARAM_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/macc/macc_1.0/ia_css_macc_table.host.c b/drivers/staging/media/atomisp/pci/isp/kernels/macc/macc_1.0/ia_css_macc_table.host.c
new file mode 100644
index 000000000000..1720fa9d4f72
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/macc/macc_1.0/ia_css_macc_table.host.c
@@ -0,0 +1,43 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#include "system_global.h"
+#include "ia_css_types.h"
+#include "ia_css_macc_table.host.h"
+
+/* Multi-Axes Color Correction table for ISP1.
+ * 64values = 2x2matrix for 16area, [s2.13]
+ * ineffective: 16 of "identity 2x2 matrix" {8192,0,0,8192}
+ */
+const struct ia_css_macc_table default_macc_table = {
+ {
+ 8192, 0, 0, 8192, 8192, 0, 0, 8192,
+ 8192, 0, 0, 8192, 8192, 0, 0, 8192,
+ 8192, 0, 0, 8192, 8192, 0, 0, 8192,
+ 8192, 0, 0, 8192, 8192, 0, 0, 8192,
+ 8192, 0, 0, 8192, 8192, 0, 0, 8192,
+ 8192, 0, 0, 8192, 8192, 0, 0, 8192,
+ 8192, 0, 0, 8192, 8192, 0, 0, 8192,
+ 8192, 0, 0, 8192, 8192, 0, 0, 8192
+ }
+};
+
+/* Multi-Axes Color Correction table for ISP2.
+ * 64values = 2x2matrix for 16area, [s1.12]
+ * ineffective: 16 of "identity 2x2 matrix" {4096,0,0,4096}
+ */
+const struct ia_css_macc_table default_macc2_table = {
+ {
+ 4096, 0, 0, 4096, 4096, 0, 0, 4096,
+ 4096, 0, 0, 4096, 4096, 0, 0, 4096,
+ 4096, 0, 0, 4096, 4096, 0, 0, 4096,
+ 4096, 0, 0, 4096, 4096, 0, 0, 4096,
+ 4096, 0, 0, 4096, 4096, 0, 0, 4096,
+ 4096, 0, 0, 4096, 4096, 0, 0, 4096,
+ 4096, 0, 0, 4096, 4096, 0, 0, 4096,
+ 4096, 0, 0, 4096, 4096, 0, 0, 4096
+ }
+};
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/macc/macc_1.0/ia_css_macc_table.host.h b/drivers/staging/media/atomisp/pci/isp/kernels/macc/macc_1.0/ia_css_macc_table.host.h
new file mode 100644
index 000000000000..620a72f7cb1f
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/macc/macc_1.0/ia_css_macc_table.host.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __IA_CSS_MACC_TABLE_HOST_H
+#define __IA_CSS_MACC_TABLE_HOST_H
+
+#include "ia_css_macc_types.h"
+
+extern const struct ia_css_macc_table default_macc_table;
+extern const struct ia_css_macc_table default_macc2_table;
+
+#endif /* __IA_CSS_MACC_TABLE_HOST_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/macc/macc_1.0/ia_css_macc_types.h b/drivers/staging/media/atomisp/pci/isp/kernels/macc/macc_1.0/ia_css_macc_types.h
new file mode 100644
index 000000000000..20e34f53bec2
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/macc/macc_1.0/ia_css_macc_types.h
@@ -0,0 +1,55 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __IA_CSS_MACC_TYPES_H
+#define __IA_CSS_MACC_TYPES_H
+
+/* @file
+* CSS-API header file for Multi-Axis Color Correction (MACC) parameters.
+*/
+
+/* Number of axes in the MACC table. */
+#define IA_CSS_MACC_NUM_AXES 16
+/* Number of coefficients per MACC axes. */
+#define IA_CSS_MACC_NUM_COEFS 4
+/* The number of planes in the morphing table. */
+
+/* Multi-Axis Color Correction (MACC) table.
+ *
+ * ISP block: MACC1 (MACC by only matrix)
+ * MACC2 (MACC by matrix and exponent(ia_css_macc_config))
+ * ISP1: MACC1 is used.
+ * ISP2: MACC2 is used.
+ *
+ * [MACC1]
+ * OutU = (data00 * InU + data01 * InV) >> 13
+ * OutV = (data10 * InU + data11 * InV) >> 13
+ *
+ * default/ineffective:
+ * OutU = (8192 * InU + 0 * InV) >> 13
+ * OutV = ( 0 * InU + 8192 * InV) >> 13
+ *
+ * [MACC2]
+ * OutU = (data00 * InU + data01 * InV) >> (13 - exp)
+ * OutV = (data10 * InU + data11 * InV) >> (13 - exp)
+ *
+ * default/ineffective: (exp=1)
+ * OutU = (4096 * InU + 0 * InV) >> (13 - 1)
+ * OutV = ( 0 * InU + 4096 * InV) >> (13 - 1)
+ */
+
+struct ia_css_macc_table {
+ s16 data[IA_CSS_MACC_NUM_COEFS * IA_CSS_MACC_NUM_AXES];
+ /** 16 of 2x2 matix
+ MACC1: s2.13, [-65536,65535]
+ default/ineffective:
+ 16 of "identity 2x2 matix" {8192,0,0,8192}
+ MACC2: s[macc_config.exp].[13-macc_config.exp], [-8192,8191]
+ default/ineffective: (s1.12)
+ 16 of "identity 2x2 matix" {4096,0,0,4096} */
+};
+
+#endif /* __IA_CSS_MACC_TYPES_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/norm/norm_1.0/ia_css_norm.host.c b/drivers/staging/media/atomisp/pci/isp/kernels/norm/norm_1.0/ia_css_norm.host.c
new file mode 100644
index 000000000000..16e400b3cdaa
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/norm/norm_1.0/ia_css_norm.host.c
@@ -0,0 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#include "ia_css_norm.host.h"
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/norm/norm_1.0/ia_css_norm.host.h b/drivers/staging/media/atomisp/pci/isp/kernels/norm/norm_1.0/ia_css_norm.host.h
new file mode 100644
index 000000000000..7e1d334273e6
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/norm/norm_1.0/ia_css_norm.host.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __IA_CSS_NORM_HOST_H
+#define __IA_CSS_NORM_HOST_H
+
+#include "ia_css_norm_param.h"
+
+#endif /* __IA_CSS_NORM_HOST_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/norm/norm_1.0/ia_css_norm_param.h b/drivers/staging/media/atomisp/pci/isp/kernels/norm/norm_1.0/ia_css_norm_param.h
new file mode 100644
index 000000000000..0481213edf93
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/norm/norm_1.0/ia_css_norm_param.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __IA_CSS_NORM_PARAM_H
+#define __IA_CSS_NORM_PARAM_H
+
+#endif /* __IA_CSS_NORM_PARAM_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/ob/ob2/ia_css_ob2.host.c b/drivers/staging/media/atomisp/pci/isp/kernels/ob/ob2/ia_css_ob2.host.c
new file mode 100644
index 000000000000..5c9e56a0bd59
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/ob/ob2/ia_css_ob2.host.c
@@ -0,0 +1,68 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#include "ia_css_types.h"
+#include "sh_css_defs.h"
+#include "sh_css_frac.h"
+#ifndef IA_CSS_NO_DEBUG
+#include "ia_css_debug.h"
+#endif
+#include "isp.h"
+#include "ia_css_ob2.host.h"
+
+const struct ia_css_ob2_config default_ob2_config = {
+ 0,
+ 0,
+ 0,
+ 0
+};
+
+void
+ia_css_ob2_encode(
+ struct sh_css_isp_ob2_params *to,
+ const struct ia_css_ob2_config *from,
+ unsigned int size)
+{
+ (void)size;
+
+ /* Blacklevels types are u0_16 */
+ to->blacklevel_gr = uDIGIT_FITTING(from->level_gr, 16, SH_CSS_BAYER_BITS);
+ to->blacklevel_r = uDIGIT_FITTING(from->level_r, 16, SH_CSS_BAYER_BITS);
+ to->blacklevel_b = uDIGIT_FITTING(from->level_b, 16, SH_CSS_BAYER_BITS);
+ to->blacklevel_gb = uDIGIT_FITTING(from->level_gb, 16, SH_CSS_BAYER_BITS);
+}
+
+#ifndef IA_CSS_NO_DEBUG
+void
+ia_css_ob2_dump(
+ const struct sh_css_isp_ob2_params *ob2,
+ unsigned int level)
+{
+ if (!ob2)
+ return;
+
+ ia_css_debug_dtrace(level, "Optical Black 2:\n");
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "ob2_blacklevel_gr", ob2->blacklevel_gr);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "ob2_blacklevel_r", ob2->blacklevel_r);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "ob2_blacklevel_b", ob2->blacklevel_b);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "ob2_blacklevel_gb", ob2->blacklevel_gb);
+}
+
+void
+ia_css_ob2_debug_dtrace(
+ const struct ia_css_ob2_config *config,
+ unsigned int level)
+{
+ ia_css_debug_dtrace(level,
+ "config.level_gr=%d, config.level_r=%d, config.level_b=%d, config.level_gb=%d, ",
+ config->level_gr, config->level_r,
+ config->level_b, config->level_gb);
+}
+#endif
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/ob/ob2/ia_css_ob2.host.h b/drivers/staging/media/atomisp/pci/isp/kernels/ob/ob2/ia_css_ob2.host.h
new file mode 100644
index 000000000000..3bb192250d68
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/ob/ob2/ia_css_ob2.host.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __IA_CSS_OB2_HOST_H
+#define __IA_CSS_OB2_HOST_H
+
+#include "ia_css_ob2_types.h"
+#include "ia_css_ob2_param.h"
+
+extern const struct ia_css_ob2_config default_ob2_config;
+
+void
+ia_css_ob2_encode(
+ struct sh_css_isp_ob2_params *to,
+ const struct ia_css_ob2_config *from,
+ unsigned int size);
+
+#ifndef IA_CSS_NO_DEBUG
+void
+ia_css_ob2_dump(
+ const struct sh_css_isp_ob2_params *ob2,
+ unsigned int level);
+
+void
+ia_css_ob2_debug_dtrace(
+ const struct ia_css_ob2_config *config, unsigned int level);
+#endif
+
+#endif /* __IA_CSS_OB2_HOST_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/ob/ob2/ia_css_ob2_param.h b/drivers/staging/media/atomisp/pci/isp/kernels/ob/ob2/ia_css_ob2_param.h
new file mode 100644
index 000000000000..e64fb1a9a26c
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/ob/ob2/ia_css_ob2_param.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __IA_CSS_OB2_PARAM_H
+#define __IA_CSS_OB2_PARAM_H
+
+#include "type_support.h"
+
+/* OB2 (Optical Black) */
+struct sh_css_isp_ob2_params {
+ s32 blacklevel_gr;
+ s32 blacklevel_r;
+ s32 blacklevel_b;
+ s32 blacklevel_gb;
+};
+
+#endif /* __IA_CSS_OB2_PARAM_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/ob/ob2/ia_css_ob2_types.h b/drivers/staging/media/atomisp/pci/isp/kernels/ob/ob2/ia_css_ob2_types.h
new file mode 100644
index 000000000000..9e5cd58df563
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/ob/ob2/ia_css_ob2_types.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __IA_CSS_OB2_TYPES_H
+#define __IA_CSS_OB2_TYPES_H
+
+/* @file
+* CSS-API header file for Optical Black algorithm parameters.
+*/
+
+/* Optical Black configuration
+ *
+ * ISP2.6.1: OB2 is used.
+ */
+
+#include "ia_css_frac.h"
+
+struct ia_css_ob2_config {
+ ia_css_u0_16 level_gr; /** Black level for GR pixels.
+ u0.16, [0,65535],
+ default/ineffective 0 */
+ ia_css_u0_16 level_r; /** Black level for R pixels.
+ u0.16, [0,65535],
+ default/ineffective 0 */
+ ia_css_u0_16 level_b; /** Black level for B pixels.
+ u0.16, [0,65535],
+ default/ineffective 0 */
+ ia_css_u0_16 level_gb; /** Black level for GB pixels.
+ u0.16, [0,65535],
+ default/ineffective 0 */
+};
+
+#endif /* __IA_CSS_OB2_TYPES_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/ob/ob_1.0/ia_css_ob.host.c b/drivers/staging/media/atomisp/pci/isp/kernels/ob/ob_1.0/ia_css_ob.host.c
new file mode 100644
index 000000000000..6801a5d60513
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/ob/ob_1.0/ia_css_ob.host.c
@@ -0,0 +1,146 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#include "ia_css_types.h"
+#include "sh_css_defs.h"
+#include "ia_css_debug.h"
+#include "isp.h"
+
+#include "ia_css_ob.host.h"
+
+const struct ia_css_ob_config default_ob_config = {
+ IA_CSS_OB_MODE_NONE,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0
+};
+
+/* TODO: include ob.isp.h to get isp knowledge and
+ add assert on platform restrictions */
+
+void
+ia_css_ob_configure(
+ struct sh_css_isp_ob_stream_config *config,
+ unsigned int isp_pipe_version,
+ unsigned int raw_bit_depth)
+{
+ config->isp_pipe_version = isp_pipe_version;
+ config->raw_bit_depth = raw_bit_depth;
+}
+
+void
+ia_css_ob_encode(
+ struct sh_css_isp_ob_params *to,
+ const struct ia_css_ob_config *from,
+ const struct sh_css_isp_ob_stream_config *config,
+ unsigned int size)
+{
+ unsigned int ob_bit_depth
+ = config->isp_pipe_version == 2 ? SH_CSS_BAYER_BITS : config->raw_bit_depth;
+ unsigned int scale = 16 - ob_bit_depth;
+
+ (void)size;
+ switch (from->mode) {
+ case IA_CSS_OB_MODE_FIXED:
+ to->blacklevel_gr = from->level_gr >> scale;
+ to->blacklevel_r = from->level_r >> scale;
+ to->blacklevel_b = from->level_b >> scale;
+ to->blacklevel_gb = from->level_gb >> scale;
+ to->area_start_bq = 0;
+ to->area_length_bq = 0;
+ to->area_length_bq_inverse = 0;
+ break;
+ case IA_CSS_OB_MODE_RASTER:
+ to->blacklevel_gr = 0;
+ to->blacklevel_r = 0;
+ to->blacklevel_b = 0;
+ to->blacklevel_gb = 0;
+ to->area_start_bq = from->start_position;
+ to->area_length_bq =
+ (from->end_position - from->start_position) + 1;
+ to->area_length_bq_inverse = AREA_LENGTH_UNIT / to->area_length_bq;
+ break;
+ default:
+ to->blacklevel_gr = 0;
+ to->blacklevel_r = 0;
+ to->blacklevel_b = 0;
+ to->blacklevel_gb = 0;
+ to->area_start_bq = 0;
+ to->area_length_bq = 0;
+ to->area_length_bq_inverse = 0;
+ break;
+ }
+}
+
+void
+ia_css_ob_vmem_encode(
+ struct sh_css_isp_ob_vmem_params *to,
+ const struct ia_css_ob_config *from,
+ const struct sh_css_isp_ob_stream_config *config,
+ unsigned int size)
+{
+ struct sh_css_isp_ob_params tmp;
+ struct sh_css_isp_ob_params *ob = &tmp;
+
+ (void)size;
+ ia_css_ob_encode(&tmp, from, config, sizeof(tmp));
+
+ {
+ unsigned int i;
+ unsigned int sp_obarea_start_bq = ob->area_start_bq;
+ unsigned int sp_obarea_length_bq = ob->area_length_bq;
+ unsigned int low = sp_obarea_start_bq;
+ unsigned int high = low + sp_obarea_length_bq;
+ u16 all_ones = ~0;
+
+ for (i = 0; i < OBAREA_MASK_SIZE; i++) {
+ if (i >= low && i < high)
+ to->vmask[i / ISP_VEC_NELEMS][i % ISP_VEC_NELEMS] = all_ones;
+ else
+ to->vmask[i / ISP_VEC_NELEMS][i % ISP_VEC_NELEMS] = 0;
+ }
+ }
+}
+
+void
+ia_css_ob_dump(
+ const struct sh_css_isp_ob_params *ob,
+ unsigned int level)
+{
+ if (!ob) return;
+ ia_css_debug_dtrace(level, "Optical Black:\n");
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "ob_blacklevel_gr", ob->blacklevel_gr);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "ob_blacklevel_r", ob->blacklevel_r);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "ob_blacklevel_b", ob->blacklevel_b);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "ob_blacklevel_gb", ob->blacklevel_gb);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "obarea_start_bq", ob->area_start_bq);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "obarea_length_bq", ob->area_length_bq);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "obarea_length_bq_inverse",
+ ob->area_length_bq_inverse);
+}
+
+void
+ia_css_ob_debug_dtrace(
+ const struct ia_css_ob_config *config,
+ unsigned int level)
+{
+ ia_css_debug_dtrace(level,
+ "config.mode=%d, config.level_gr=%d, config.level_r=%d, config.level_b=%d, config.level_gb=%d, config.start_position=%d, config.end_position=%d\n",
+ config->mode,
+ config->level_gr, config->level_r,
+ config->level_b, config->level_gb,
+ config->start_position, config->end_position);
+}
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/ob/ob_1.0/ia_css_ob.host.h b/drivers/staging/media/atomisp/pci/isp/kernels/ob/ob_1.0/ia_css_ob.host.h
new file mode 100644
index 000000000000..636c6d5242f7
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/ob/ob_1.0/ia_css_ob.host.h
@@ -0,0 +1,45 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __IA_CSS_OB_HOST_H
+#define __IA_CSS_OB_HOST_H
+
+#include "ia_css_ob_types.h"
+#include "ia_css_ob_param.h"
+
+extern const struct ia_css_ob_config default_ob_config;
+
+void
+ia_css_ob_configure(
+ struct sh_css_isp_ob_stream_config *config,
+ unsigned int isp_pipe_version,
+ unsigned int raw_bit_depth);
+
+void
+ia_css_ob_encode(
+ struct sh_css_isp_ob_params *to,
+ const struct ia_css_ob_config *from,
+ const struct sh_css_isp_ob_stream_config *config,
+ unsigned int size);
+
+void
+ia_css_ob_vmem_encode(
+ struct sh_css_isp_ob_vmem_params *to,
+ const struct ia_css_ob_config *from,
+ const struct sh_css_isp_ob_stream_config *config,
+ unsigned int size);
+
+void
+ia_css_ob_dump(
+ const struct sh_css_isp_ob_params *ob,
+ unsigned int level);
+
+void
+ia_css_ob_debug_dtrace(
+ const struct ia_css_ob_config *config, unsigned int level)
+;
+
+#endif /* __IA_CSS_OB_HOST_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/ob/ob_1.0/ia_css_ob_param.h b/drivers/staging/media/atomisp/pci/isp/kernels/ob/ob_1.0/ia_css_ob_param.h
new file mode 100644
index 000000000000..366b5900fd28
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/ob/ob_1.0/ia_css_ob_param.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __IA_CSS_OB_PARAM_H
+#define __IA_CSS_OB_PARAM_H
+
+#include "type_support.h"
+#include "vmem.h"
+
+#define OBAREA_MASK_SIZE 64
+#define OBAREA_LENGTHBQ_INVERSE_SHIFT 12
+
+/* AREA_LENGTH_UNIT is dependent on NWAY, requires rewrite */
+#define AREA_LENGTH_UNIT BIT(12)
+
+/* OB (Optical Black) */
+struct sh_css_isp_ob_stream_config {
+ unsigned int isp_pipe_version;
+ unsigned int raw_bit_depth;
+};
+
+struct sh_css_isp_ob_params {
+ s32 blacklevel_gr;
+ s32 blacklevel_r;
+ s32 blacklevel_b;
+ s32 blacklevel_gb;
+ s32 area_start_bq;
+ s32 area_length_bq;
+ s32 area_length_bq_inverse;
+};
+
+struct sh_css_isp_ob_vmem_params {
+ VMEM_ARRAY(vmask, OBAREA_MASK_SIZE);
+};
+
+#endif /* __IA_CSS_OB_PARAM_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/ob/ob_1.0/ia_css_ob_types.h b/drivers/staging/media/atomisp/pci/isp/kernels/ob/ob_1.0/ia_css_ob_types.h
new file mode 100644
index 000000000000..b1470f8b424c
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/ob/ob_1.0/ia_css_ob_types.h
@@ -0,0 +1,60 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __IA_CSS_OB_TYPES_H
+#define __IA_CSS_OB_TYPES_H
+
+/* @file
+* CSS-API header file for Optical Black level parameters.
+*/
+
+#include "ia_css_frac.h"
+
+/* Optical black mode.
+ */
+enum ia_css_ob_mode {
+ IA_CSS_OB_MODE_NONE, /** OB has no effect. */
+ IA_CSS_OB_MODE_FIXED, /** Fixed OB */
+ IA_CSS_OB_MODE_RASTER /** Raster OB */
+};
+
+/* Optical Black level configuration.
+ *
+ * ISP block: OB1
+ * ISP1: OB1 is used.
+ * ISP2: OB1 is used.
+ */
+struct ia_css_ob_config {
+ enum ia_css_ob_mode mode; /** Mode (None / Fixed / Raster).
+ enum, [0,2],
+ default 1, ineffective 0 */
+ ia_css_u0_16 level_gr; /** Black level for GR pixels
+ (used for Fixed Mode only).
+ u0.16, [0,65535],
+ default/ineffective 0 */
+ ia_css_u0_16 level_r; /** Black level for R pixels
+ (used for Fixed Mode only).
+ u0.16, [0,65535],
+ default/ineffective 0 */
+ ia_css_u0_16 level_b; /** Black level for B pixels
+ (used for Fixed Mode only).
+ u0.16, [0,65535],
+ default/ineffective 0 */
+ ia_css_u0_16 level_gb; /** Black level for GB pixels
+ (used for Fixed Mode only).
+ u0.16, [0,65535],
+ default/ineffective 0 */
+ u16 start_position; /** Start position of OB area
+ (used for Raster Mode only).
+ u16.0, [0,63],
+ default/ineffective 0 */
+ u16 end_position; /** End position of OB area
+ (used for Raster Mode only).
+ u16.0, [0,63],
+ default/ineffective 0 */
+};
+
+#endif /* __IA_CSS_OB_TYPES_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/output/output_1.0/ia_css_output.host.c b/drivers/staging/media/atomisp/pci/isp/kernels/output/output_1.0/ia_css_output.host.c
new file mode 100644
index 000000000000..d09365e0c471
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/output/output_1.0/ia_css_output.host.c
@@ -0,0 +1,150 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#include "ia_css_frame.h"
+#include "ia_css_debug.h"
+#define IA_CSS_INCLUDE_CONFIGURATIONS
+#include "ia_css_isp_configs.h"
+#include "ia_css_output.host.h"
+#include "isp.h"
+
+#include "assert_support.h"
+
+const struct ia_css_output_config default_output_config = {
+ 0,
+ 0
+};
+
+static const struct ia_css_output_configuration default_output_configuration = {
+ .info = (struct ia_css_frame_info *)NULL,
+};
+
+static const struct ia_css_output0_configuration default_output0_configuration
+ = {
+ .info = (struct ia_css_frame_info *)NULL,
+};
+
+static const struct ia_css_output1_configuration default_output1_configuration
+ = {
+ .info = (struct ia_css_frame_info *)NULL,
+};
+
+void
+ia_css_output_encode(
+ struct sh_css_isp_output_params *to,
+ const struct ia_css_output_config *from,
+ unsigned int size)
+{
+ (void)size;
+ to->enable_hflip = from->enable_hflip;
+ to->enable_vflip = from->enable_vflip;
+}
+
+int ia_css_output_config(struct sh_css_isp_output_isp_config *to,
+ const struct ia_css_output_configuration *from,
+ unsigned int size)
+{
+ unsigned int elems_a = ISP_VEC_NELEMS;
+ int ret;
+
+ ret = ia_css_dma_configure_from_info(&to->port_b, from->info);
+ if (ret)
+ return ret;
+
+ to->width_a_over_b = elems_a / to->port_b.elems;
+ to->height = from->info ? from->info->res.height : 0;
+ to->enable = from->info != NULL;
+ ia_css_frame_info_to_frame_sp_info(&to->info, from->info);
+
+ /* Assume divisiblity here, may need to generalize to fixed point. */
+ if (elems_a % to->port_b.elems != 0)
+ return -EINVAL;
+
+ return 0;
+}
+
+int ia_css_output0_config(struct sh_css_isp_output_isp_config *to,
+ const struct ia_css_output0_configuration *from,
+ unsigned int size)
+{
+ return ia_css_output_config(to, (const struct ia_css_output_configuration *)from, size);
+}
+
+int ia_css_output1_config(struct sh_css_isp_output_isp_config *to,
+ const struct ia_css_output1_configuration *from,
+ unsigned int size)
+{
+ return ia_css_output_config(to, (const struct ia_css_output_configuration *)from, size);
+}
+
+int ia_css_output_configure(const struct ia_css_binary *binary,
+ const struct ia_css_frame_info *info)
+{
+ if (info) {
+ struct ia_css_output_configuration config =
+ default_output_configuration;
+
+ config.info = info;
+
+ return ia_css_configure_output(binary, &config);
+ }
+ return 0;
+}
+
+int ia_css_output0_configure(const struct ia_css_binary *binary,
+ const struct ia_css_frame_info *info)
+{
+ if (info) {
+ struct ia_css_output0_configuration config =
+ default_output0_configuration;
+
+ config.info = info;
+
+ return ia_css_configure_output0(binary, &config);
+ }
+ return 0;
+}
+
+int ia_css_output1_configure(const struct ia_css_binary *binary,
+ const struct ia_css_frame_info *info)
+{
+ if (info) {
+ struct ia_css_output1_configuration config =
+ default_output1_configuration;
+
+ config.info = info;
+
+ return ia_css_configure_output1(binary, &config);
+ }
+ return 0;
+}
+
+void
+ia_css_output_dump(
+ const struct sh_css_isp_output_params *output,
+ unsigned int level)
+{
+ if (!output) return;
+ ia_css_debug_dtrace(level, "Horizontal Output Flip:\n");
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "enable", output->enable_hflip);
+ ia_css_debug_dtrace(level, "Vertical Output Flip:\n");
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "enable", output->enable_vflip);
+}
+
+void
+ia_css_output_debug_dtrace(
+ const struct ia_css_output_config *config,
+ unsigned int level)
+{
+ ia_css_debug_dtrace(level,
+ "config.enable_hflip=%d",
+ config->enable_hflip);
+ ia_css_debug_dtrace(level,
+ "config.enable_vflip=%d",
+ config->enable_vflip);
+}
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/output/output_1.0/ia_css_output.host.h b/drivers/staging/media/atomisp/pci/isp/kernels/output/output_1.0/ia_css_output.host.h
new file mode 100644
index 000000000000..25408f1aede5
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/output/output_1.0/ia_css_output.host.h
@@ -0,0 +1,55 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __IA_CSS_OUTPUT_HOST_H
+#define __IA_CSS_OUTPUT_HOST_H
+
+#include "ia_css_frame_public.h"
+#include "ia_css_binary.h"
+
+#include "ia_css_output_types.h"
+#include "ia_css_output_param.h"
+
+extern const struct ia_css_output_config default_output_config;
+
+void
+ia_css_output_encode(
+ struct sh_css_isp_output_params *to,
+ const struct ia_css_output_config *from,
+ unsigned int size);
+
+int ia_css_output_config(struct sh_css_isp_output_isp_config *to,
+ const struct ia_css_output_configuration *from,
+ unsigned int size);
+
+int ia_css_output0_config(struct sh_css_isp_output_isp_config *to,
+ const struct ia_css_output0_configuration *from,
+ unsigned int size);
+
+int ia_css_output1_config(struct sh_css_isp_output_isp_config *to,
+ const struct ia_css_output1_configuration *from,
+ unsigned int size);
+
+int ia_css_output_configure(const struct ia_css_binary *binary,
+ const struct ia_css_frame_info *from);
+
+int ia_css_output0_configure(const struct ia_css_binary *binary,
+ const struct ia_css_frame_info *from);
+
+int ia_css_output1_configure(const struct ia_css_binary *binary,
+ const struct ia_css_frame_info *from);
+
+void
+ia_css_output_dump(
+ const struct sh_css_isp_output_params *output,
+ unsigned int level);
+
+void
+ia_css_output_debug_dtrace(
+ const struct ia_css_output_config *config,
+ unsigned int level);
+
+#endif /* __IA_CSS_OUTPUT_HOST_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/output/output_1.0/ia_css_output_param.h b/drivers/staging/media/atomisp/pci/isp/kernels/output/output_1.0/ia_css_output_param.h
new file mode 100644
index 000000000000..940d03e909b1
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/output/output_1.0/ia_css_output_param.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __IA_CSS_OUTPUT_PARAM_H
+#define __IA_CSS_OUTPUT_PARAM_H
+
+#include <type_support.h>
+#include "dma.h"
+#include "ia_css_frame_comm.h" /* ia_css_frame_sp_info */
+
+/* output frame */
+struct sh_css_isp_output_isp_config {
+ u32 width_a_over_b;
+ u32 height;
+ u32 enable;
+ struct ia_css_frame_sp_info info;
+ struct dma_port_config port_b;
+};
+
+struct sh_css_isp_output_params {
+ u8 enable_hflip;
+ u8 enable_vflip;
+};
+
+#endif /* __IA_CSS_OUTPUT_PARAM_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/output/output_1.0/ia_css_output_types.h b/drivers/staging/media/atomisp/pci/isp/kernels/output/output_1.0/ia_css_output_types.h
new file mode 100644
index 000000000000..7c17ef200571
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/output/output_1.0/ia_css_output_types.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __IA_CSS_OUTPUT_TYPES_H
+#define __IA_CSS_OUTPUT_TYPES_H
+
+/* @file
+* CSS-API header file for parameters of output frames.
+*/
+
+/* Output frame
+ *
+ * ISP block: output frame
+ */
+
+//#include "ia_css_frame_public.h"
+struct ia_css_frame_info;
+
+struct ia_css_output_configuration {
+ const struct ia_css_frame_info *info;
+};
+
+struct ia_css_output0_configuration {
+ const struct ia_css_frame_info *info;
+};
+
+struct ia_css_output1_configuration {
+ const struct ia_css_frame_info *info;
+};
+
+struct ia_css_output_config {
+ u8 enable_hflip; /** enable horizontal output mirroring */
+ u8 enable_vflip; /** enable vertical output mirroring */
+};
+
+#endif /* __IA_CSS_OUTPUT_TYPES_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/qplane/qplane_2/ia_css_qplane.host.c b/drivers/staging/media/atomisp/pci/isp/kernels/qplane/qplane_2/ia_css_qplane.host.c
new file mode 100644
index 000000000000..6296deb72bd4
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/qplane/qplane_2/ia_css_qplane.host.c
@@ -0,0 +1,55 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#include "ia_css_frame.h"
+#include "ia_css_types.h"
+#include "sh_css_defs.h"
+#include "ia_css_debug.h"
+#include "assert_support.h"
+#define IA_CSS_INCLUDE_CONFIGURATIONS
+#include "ia_css_isp_configs.h"
+#include "isp.h"
+
+#include "ia_css_qplane.host.h"
+
+static const struct ia_css_qplane_configuration default_config = {
+ .pipe = (struct sh_css_sp_pipeline *)NULL,
+};
+
+int ia_css_qplane_config(struct sh_css_isp_qplane_isp_config *to,
+ const struct ia_css_qplane_configuration *from,
+ unsigned int size)
+{
+ unsigned int elems_a = ISP_VEC_NELEMS;
+ int ret;
+
+ ret = ia_css_dma_configure_from_info(&to->port_b, from->info);
+ if (ret)
+ return ret;
+
+ to->width_a_over_b = elems_a / to->port_b.elems;
+
+ /* Assume divisiblity here, may need to generalize to fixed point. */
+ if (elems_a % to->port_b.elems != 0)
+ return -EINVAL;
+
+ to->inout_port_config = from->pipe->inout_port_config;
+ to->format = from->info->format;
+
+ return 0;
+}
+
+int ia_css_qplane_configure(const struct sh_css_sp_pipeline *pipe,
+ const struct ia_css_binary *binary,
+ const struct ia_css_frame_info *info)
+{
+ struct ia_css_qplane_configuration config = default_config;
+
+ config.pipe = pipe;
+ config.info = info;
+
+ return ia_css_configure_qplane(binary, &config);
+}
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/qplane/qplane_2/ia_css_qplane.host.h b/drivers/staging/media/atomisp/pci/isp/kernels/qplane/qplane_2/ia_css_qplane.host.h
new file mode 100644
index 000000000000..2fa1de5e3633
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/qplane/qplane_2/ia_css_qplane.host.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __IA_CSS_QPLANE_HOST_H
+#define __IA_CSS_QPLANE_HOST_H
+
+#include <ia_css_frame_public.h>
+#include <ia_css_binary.h>
+
+#if 0
+/* Cannot be included, since sh_css_internal.h is too generic
+ * e.g. for FW generation.
+*/
+#include "sh_css_internal.h" /* sh_css_sp_pipeline */
+#endif
+
+#include "ia_css_qplane_types.h"
+#include "ia_css_qplane_param.h"
+
+int ia_css_qplane_config(struct sh_css_isp_qplane_isp_config *to,
+ const struct ia_css_qplane_configuration *from,
+ unsigned int size);
+
+int ia_css_qplane_configure(const struct sh_css_sp_pipeline *pipe,
+ const struct ia_css_binary *binary,
+ const struct ia_css_frame_info *from);
+
+#endif /* __IA_CSS_QPLANE_HOST_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/qplane/qplane_2/ia_css_qplane_param.h b/drivers/staging/media/atomisp/pci/isp/kernels/qplane/qplane_2/ia_css_qplane_param.h
new file mode 100644
index 000000000000..a50c98ea3f30
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/qplane/qplane_2/ia_css_qplane_param.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __IA_CSS_QPLANE_PARAM_H
+#define __IA_CSS_QPLANE_PARAM_H
+
+#include <type_support.h>
+#include "dma.h"
+
+/* qplane channel */
+struct sh_css_isp_qplane_isp_config {
+ u32 width_a_over_b;
+ struct dma_port_config port_b;
+ u32 inout_port_config;
+ u32 input_needs_raw_binning;
+ u32 format; /* enum ia_css_frame_format */
+};
+
+#endif /* __IA_CSS_QPLANE_PARAM_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/qplane/qplane_2/ia_css_qplane_types.h b/drivers/staging/media/atomisp/pci/isp/kernels/qplane/qplane_2/ia_css_qplane_types.h
new file mode 100644
index 000000000000..ae9707e1f761
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/qplane/qplane_2/ia_css_qplane_types.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __IA_CSS_QPLANE_TYPES_H
+#define __IA_CSS_QPLANE_TYPES_H
+
+#include <ia_css_frame_public.h>
+#include "sh_css_internal.h"
+
+/* qplane frame
+ *
+ * ISP block: qplane frame
+ */
+
+struct ia_css_qplane_configuration {
+ const struct sh_css_sp_pipeline *pipe;
+ const struct ia_css_frame_info *info;
+};
+
+#endif /* __IA_CSS_QPLANE_TYPES_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/raw/raw_1.0/ia_css_raw.host.c b/drivers/staging/media/atomisp/pci/isp/kernels/raw/raw_1.0/ia_css_raw.host.c
new file mode 100644
index 000000000000..a00f8d049a33
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/raw/raw_1.0/ia_css_raw.host.c
@@ -0,0 +1,114 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#include "ia_css_frame.h"
+#include "ia_css_types.h"
+#include "sh_css_defs.h"
+#include "ia_css_debug.h"
+#include "assert_support.h"
+#define IA_CSS_INCLUDE_CONFIGURATIONS
+#include "ia_css_isp_configs.h"
+#include "isp.h"
+#include "isp/modes/interface/isp_types.h"
+
+#include "ia_css_raw.host.h"
+
+static const struct ia_css_raw_configuration default_config = {
+ .pipe = (struct sh_css_sp_pipeline *)NULL,
+};
+
+/* MW: These areMIPI / ISYS properties, not camera function properties */
+static enum sh_stream_format
+css2isp_stream_format(enum atomisp_input_format from) {
+ switch (from)
+ {
+ case ATOMISP_INPUT_FORMAT_YUV420_8_LEGACY:
+ return sh_stream_format_yuv420_legacy;
+ case ATOMISP_INPUT_FORMAT_YUV420_8:
+ case ATOMISP_INPUT_FORMAT_YUV420_10:
+ case ATOMISP_INPUT_FORMAT_YUV420_16:
+ return sh_stream_format_yuv420;
+ case ATOMISP_INPUT_FORMAT_YUV422_8:
+ case ATOMISP_INPUT_FORMAT_YUV422_10:
+ case ATOMISP_INPUT_FORMAT_YUV422_16:
+ return sh_stream_format_yuv422;
+ case ATOMISP_INPUT_FORMAT_RGB_444:
+ case ATOMISP_INPUT_FORMAT_RGB_555:
+ case ATOMISP_INPUT_FORMAT_RGB_565:
+ case ATOMISP_INPUT_FORMAT_RGB_666:
+ case ATOMISP_INPUT_FORMAT_RGB_888:
+ return sh_stream_format_rgb;
+ case ATOMISP_INPUT_FORMAT_RAW_6:
+ case ATOMISP_INPUT_FORMAT_RAW_7:
+ case ATOMISP_INPUT_FORMAT_RAW_8:
+ case ATOMISP_INPUT_FORMAT_RAW_10:
+ case ATOMISP_INPUT_FORMAT_RAW_12:
+ case ATOMISP_INPUT_FORMAT_RAW_14:
+ case ATOMISP_INPUT_FORMAT_RAW_16:
+ return sh_stream_format_raw;
+ case ATOMISP_INPUT_FORMAT_BINARY_8:
+ default:
+ return sh_stream_format_raw;
+ }
+}
+
+int ia_css_raw_config(struct sh_css_isp_raw_isp_config *to,
+ const struct ia_css_raw_configuration *from,
+ unsigned int size)
+{
+ unsigned int elems_a = ISP_VEC_NELEMS;
+ const struct ia_css_frame_info *in_info = from->in_info;
+ const struct ia_css_frame_info *internal_info = from->internal_info;
+ int ret;
+
+ if (!IS_ISP2401 || !in_info)
+ in_info = internal_info;
+
+ ret = ia_css_dma_configure_from_info(&to->port_b, in_info);
+ if (ret)
+ return ret;
+
+ /* Assume divisiblity here, may need to generalize to fixed point. */
+ assert((in_info->format == IA_CSS_FRAME_FORMAT_RAW_PACKED) ||
+ (elems_a % to->port_b.elems == 0));
+
+ to->width_a_over_b = elems_a / to->port_b.elems;
+ to->inout_port_config = from->pipe->inout_port_config;
+ to->format = in_info->format;
+ to->required_bds_factor = from->pipe->required_bds_factor;
+ to->two_ppc = from->two_ppc;
+ to->stream_format = css2isp_stream_format(from->stream_format);
+ to->deinterleaved = from->deinterleaved;
+
+ if (IS_ISP2401) {
+ to->start_column = in_info->crop_info.start_column;
+ to->start_line = in_info->crop_info.start_line;
+ to->enable_left_padding = from->enable_left_padding;
+ }
+
+ return 0;
+}
+
+int ia_css_raw_configure(const struct sh_css_sp_pipeline *pipe,
+ const struct ia_css_binary *binary,
+ const struct ia_css_frame_info *in_info,
+ const struct ia_css_frame_info *internal_info,
+ bool two_ppc,
+ bool deinterleaved)
+{
+ u8 enable_left_padding = (uint8_t)((binary->left_padding) ? 1 : 0);
+ struct ia_css_raw_configuration config = default_config;
+
+ config.pipe = pipe;
+ config.in_info = in_info;
+ config.internal_info = internal_info;
+ config.two_ppc = two_ppc;
+ config.stream_format = binary->input_format;
+ config.deinterleaved = deinterleaved;
+ config.enable_left_padding = enable_left_padding;
+
+ return ia_css_configure_raw(binary, &config);
+}
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/raw/raw_1.0/ia_css_raw.host.h b/drivers/staging/media/atomisp/pci/isp/kernels/raw/raw_1.0/ia_css_raw.host.h
new file mode 100644
index 000000000000..cae9c7fcb7f1
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/raw/raw_1.0/ia_css_raw.host.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __IA_CSS_RAW_HOST_H
+#define __IA_CSS_RAW_HOST_H
+
+#include "ia_css_binary.h"
+
+#include "ia_css_raw_types.h"
+#include "ia_css_raw_param.h"
+
+int ia_css_raw_config(struct sh_css_isp_raw_isp_config *to,
+ const struct ia_css_raw_configuration *from,
+ unsigned int size);
+
+int ia_css_raw_configure(const struct sh_css_sp_pipeline *pipe,
+ const struct ia_css_binary *binary,
+ const struct ia_css_frame_info *in_info,
+ const struct ia_css_frame_info *internal_info,
+ bool two_ppc,
+ bool deinterleaved);
+
+#endif /* __IA_CSS_RAW_HOST_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/raw/raw_1.0/ia_css_raw_param.h b/drivers/staging/media/atomisp/pci/isp/kernels/raw/raw_1.0/ia_css_raw_param.h
new file mode 100644
index 000000000000..24b34083d0e9
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/raw/raw_1.0/ia_css_raw_param.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __IA_CSS_RAW_PARAM_H
+#define __IA_CSS_RAW_PARAM_H
+
+#include "type_support.h"
+
+#include "dma.h"
+
+/* Raw channel */
+struct sh_css_isp_raw_isp_config {
+ u32 width_a_over_b;
+ struct dma_port_config port_b;
+ u32 inout_port_config;
+ u32 input_needs_raw_binning;
+ u32 format; /* enum ia_css_frame_format */
+ u32 required_bds_factor;
+ u32 two_ppc;
+ u32 stream_format; /* enum sh_stream_format */
+ u32 deinterleaved;
+ u32 start_column; /*left crop offset*/
+ u32 start_line; /*top crop offset*/
+ u8 enable_left_padding; /*need this for multiple binary case*/
+};
+
+#endif /* __IA_CSS_RAW_PARAM_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/raw/raw_1.0/ia_css_raw_types.h b/drivers/staging/media/atomisp/pci/isp/kernels/raw/raw_1.0/ia_css_raw_types.h
new file mode 100644
index 000000000000..ee25414ea8cd
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/raw/raw_1.0/ia_css_raw_types.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __IA_CSS_RAW_TYPES_H
+#define __IA_CSS_RAW_TYPES_H
+
+#include <ia_css_frame_public.h>
+#include "sh_css_internal.h"
+
+/* Raw frame
+ *
+ * ISP block: Raw frame
+ */
+
+struct ia_css_raw_configuration {
+ const struct sh_css_sp_pipeline *pipe;
+ const struct ia_css_frame_info *in_info;
+ const struct ia_css_frame_info *internal_info;
+ bool two_ppc;
+ enum atomisp_input_format stream_format;
+ bool deinterleaved;
+ u8 enable_left_padding;
+};
+
+#endif /* __IA_CSS_RAW_TYPES_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/raw_aa_binning/raw_aa_binning_1.0/ia_css_raa.host.c b/drivers/staging/media/atomisp/pci/isp/kernels/raw_aa_binning/raw_aa_binning_1.0/ia_css_raa.host.c
new file mode 100644
index 000000000000..cd7d1c13605a
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/raw_aa_binning/raw_aa_binning_1.0/ia_css_raa.host.c
@@ -0,0 +1,24 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+
+#include "ia_css_types.h"
+#include "sh_css_internal.h"
+#include "sh_css_frac.h"
+
+#include "ia_css_raa.host.h"
+
+void
+ia_css_raa_encode(
+ struct sh_css_isp_aa_params *to,
+ const struct ia_css_aa_config *from,
+ unsigned int size)
+{
+ (void)size;
+ (void)to;
+ (void)from;
+}
+
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/raw_aa_binning/raw_aa_binning_1.0/ia_css_raa.host.h b/drivers/staging/media/atomisp/pci/isp/kernels/raw_aa_binning/raw_aa_binning_1.0/ia_css_raa.host.h
new file mode 100644
index 000000000000..f800a05fe1dd
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/raw_aa_binning/raw_aa_binning_1.0/ia_css_raa.host.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __IA_CSS_RAA_HOST_H
+#define __IA_CSS_RAA_HOST_H
+
+#include "aa/aa_2/ia_css_aa2_types.h"
+#include "aa/aa_2/ia_css_aa2_param.h"
+
+void
+ia_css_raa_encode(
+ struct sh_css_isp_aa_params *to,
+ const struct ia_css_aa_config *from,
+ unsigned int size);
+
+#endif /* __IA_CSS_RAA_HOST_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/ref/ref_1.0/ia_css_ref.host.c b/drivers/staging/media/atomisp/pci/isp/kernels/ref/ref_1.0/ia_css_ref.host.c
new file mode 100644
index 000000000000..d9b68b08dd3c
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/ref/ref_1.0/ia_css_ref.host.c
@@ -0,0 +1,77 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#include <assert_support.h>
+#include <ia_css_frame_public.h>
+#include <ia_css_frame.h>
+#include <ia_css_binary.h>
+#define IA_CSS_INCLUDE_CONFIGURATIONS
+#include "ia_css_isp_configs.h"
+#include "isp.h"
+#include "ia_css_ref.host.h"
+
+int ia_css_ref_config(struct sh_css_isp_ref_isp_config *to,
+ const struct ia_css_ref_configuration *from,
+ unsigned int size)
+{
+ unsigned int elems_a = ISP_VEC_NELEMS, i;
+ int ret;
+
+ if (from->ref_frames[0]) {
+ ret = ia_css_dma_configure_from_info(&to->port_b, &from->ref_frames[0]->frame_info);
+ if (ret)
+ return ret;
+ to->width_a_over_b = elems_a / to->port_b.elems;
+ to->dvs_frame_delay = from->dvs_frame_delay;
+ } else {
+ to->width_a_over_b = 1;
+ to->dvs_frame_delay = 0;
+ to->port_b.elems = elems_a;
+ }
+ for (i = 0; i < MAX_NUM_VIDEO_DELAY_FRAMES; i++) {
+ if (from->ref_frames[i]) {
+ to->ref_frame_addr_y[i] = from->ref_frames[i]->data +
+ from->ref_frames[i]->planes.yuv.y.offset;
+ to->ref_frame_addr_c[i] = from->ref_frames[i]->data +
+ from->ref_frames[i]->planes.yuv.u.offset;
+ } else {
+ to->ref_frame_addr_y[i] = 0;
+ to->ref_frame_addr_c[i] = 0;
+ }
+ }
+
+ /* Assume divisiblity here, may need to generalize to fixed point. */
+ if (elems_a % to->port_b.elems != 0)
+ return -EINVAL;
+
+ return 0;
+}
+
+int ia_css_ref_configure(const struct ia_css_binary *binary,
+ const struct ia_css_frame * const *ref_frames,
+ const uint32_t dvs_frame_delay)
+{
+ struct ia_css_ref_configuration config;
+ unsigned int i;
+
+ for (i = 0; i < MAX_NUM_VIDEO_DELAY_FRAMES; i++)
+ config.ref_frames[i] = ref_frames[i];
+
+ config.dvs_frame_delay = dvs_frame_delay;
+
+ return ia_css_configure_ref(binary, &config);
+}
+
+void
+ia_css_init_ref_state(
+ struct sh_css_isp_ref_dmem_state *state,
+ unsigned int size)
+{
+ (void)size;
+ assert(MAX_NUM_VIDEO_DELAY_FRAMES >= 2);
+ state->ref_in_buf_idx = 0;
+ state->ref_out_buf_idx = 1;
+}
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/ref/ref_1.0/ia_css_ref.host.h b/drivers/staging/media/atomisp/pci/isp/kernels/ref/ref_1.0/ia_css_ref.host.h
new file mode 100644
index 000000000000..f0556bbcc868
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/ref/ref_1.0/ia_css_ref.host.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __IA_CSS_REF_HOST_H
+#define __IA_CSS_REF_HOST_H
+
+#include <ia_css_frame_public.h>
+#include <ia_css_binary.h>
+
+#include "ia_css_ref_types.h"
+#include "ia_css_ref_param.h"
+#include "ia_css_ref_state.h"
+
+int ia_css_ref_config(struct sh_css_isp_ref_isp_config *to,
+ const struct ia_css_ref_configuration *from,
+ unsigned int size);
+
+int ia_css_ref_configure(const struct ia_css_binary *binary,
+ const struct ia_css_frame * const *ref_frames,
+ const uint32_t dvs_frame_delay);
+
+void
+ia_css_init_ref_state(
+ struct sh_css_isp_ref_dmem_state *state,
+ unsigned int size);
+#endif /* __IA_CSS_REF_HOST_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/ref/ref_1.0/ia_css_ref_param.h b/drivers/staging/media/atomisp/pci/isp/kernels/ref/ref_1.0/ia_css_ref_param.h
new file mode 100644
index 000000000000..ec1be8b6c65c
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/ref/ref_1.0/ia_css_ref_param.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __IA_CSS_REF_PARAM_H
+#define __IA_CSS_REF_PARAM_H
+
+#include <type_support.h>
+#include "sh_css_defs.h"
+#include "dma.h"
+
+/* Reference frame */
+struct ia_css_ref_configuration {
+ const struct ia_css_frame *ref_frames[MAX_NUM_VIDEO_DELAY_FRAMES];
+ u32 dvs_frame_delay;
+};
+
+struct sh_css_isp_ref_isp_config {
+ u32 width_a_over_b;
+ struct dma_port_config port_b;
+ ia_css_ptr ref_frame_addr_y[MAX_NUM_VIDEO_DELAY_FRAMES];
+ ia_css_ptr ref_frame_addr_c[MAX_NUM_VIDEO_DELAY_FRAMES];
+ u32 dvs_frame_delay;
+};
+
+#endif /* __IA_CSS_REF_PARAM_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/ref/ref_1.0/ia_css_ref_state.h b/drivers/staging/media/atomisp/pci/isp/kernels/ref/ref_1.0/ia_css_ref_state.h
new file mode 100644
index 000000000000..b057e440a22f
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/ref/ref_1.0/ia_css_ref_state.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __IA_CSS_REF_STATE_H
+#define __IA_CSS_REF_STATE_H
+
+#include "type_support.h"
+
+/* REF (temporal noise reduction) */
+struct sh_css_isp_ref_dmem_state {
+ s32 ref_in_buf_idx;
+ s32 ref_out_buf_idx;
+};
+
+#endif /* __IA_CSS_REF_STATE_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/ref/ref_1.0/ia_css_ref_types.h b/drivers/staging/media/atomisp/pci/isp/kernels/ref/ref_1.0/ia_css_ref_types.h
new file mode 100644
index 000000000000..abc6af2da91b
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/ref/ref_1.0/ia_css_ref_types.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __IA_CSS_REF_TYPES_H
+#define __IA_CSS_REF_TYPES_H
+
+/* Reference frame
+ *
+ * ISP block: reference frame
+ */
+
+#include <ia_css_frame_public.h>
+
+#endif /* __IA_CSS_REF_TYPES_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/s3a/s3a_1.0/ia_css_s3a.host.c b/drivers/staging/media/atomisp/pci/isp/kernels/s3a/s3a_1.0/ia_css_s3a.host.c
new file mode 100644
index 000000000000..13678138c48c
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/s3a/s3a_1.0/ia_css_s3a.host.c
@@ -0,0 +1,373 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#include "ia_css_types.h"
+#include "sh_css_defs.h"
+#ifndef IA_CSS_NO_DEBUG
+#include "ia_css_debug.h"
+#endif
+#include "sh_css_frac.h"
+#include "assert_support.h"
+
+#include "bh/bh_2/ia_css_bh.host.h"
+#include "ia_css_s3a.host.h"
+
+const struct ia_css_3a_config default_3a_config = {
+ 25559,
+ 32768,
+ 7209,
+ 65535,
+ 0,
+ 65535,
+ {-3344, -6104, -19143, 19143, 6104, 3344, 0},
+ {1027, 0, -9219, 16384, -9219, 1027, 0}
+};
+
+static unsigned int s3a_raw_bit_depth;
+
+void
+ia_css_s3a_configure(unsigned int raw_bit_depth)
+{
+ s3a_raw_bit_depth = raw_bit_depth;
+}
+
+static void
+ia_css_ae_encode(
+ struct sh_css_isp_ae_params *to,
+ const struct ia_css_3a_config *from,
+ unsigned int size)
+{
+ (void)size;
+ /* coefficients to calculate Y */
+ to->y_coef_r =
+ uDIGIT_FITTING(from->ae_y_coef_r, 16, SH_CSS_AE_YCOEF_SHIFT);
+ to->y_coef_g =
+ uDIGIT_FITTING(from->ae_y_coef_g, 16, SH_CSS_AE_YCOEF_SHIFT);
+ to->y_coef_b =
+ uDIGIT_FITTING(from->ae_y_coef_b, 16, SH_CSS_AE_YCOEF_SHIFT);
+}
+
+static void
+ia_css_awb_encode(
+ struct sh_css_isp_awb_params *to,
+ const struct ia_css_3a_config *from,
+ unsigned int size)
+{
+ (void)size;
+ /* AWB level gate */
+ to->lg_high_raw =
+ uDIGIT_FITTING(from->awb_lg_high_raw, 16, s3a_raw_bit_depth);
+ to->lg_low =
+ uDIGIT_FITTING(from->awb_lg_low, 16, SH_CSS_BAYER_BITS);
+ to->lg_high =
+ uDIGIT_FITTING(from->awb_lg_high, 16, SH_CSS_BAYER_BITS);
+}
+
+static void
+ia_css_af_encode(
+ struct sh_css_isp_af_params *to,
+ const struct ia_css_3a_config *from,
+ unsigned int size)
+{
+ unsigned int i;
+ (void)size;
+
+ /* af fir coefficients */
+ for (i = 0; i < 7; ++i) {
+ to->fir1[i] =
+ sDIGIT_FITTING(from->af_fir1_coef[i], 15,
+ SH_CSS_AF_FIR_SHIFT);
+ to->fir2[i] =
+ sDIGIT_FITTING(from->af_fir2_coef[i], 15,
+ SH_CSS_AF_FIR_SHIFT);
+ }
+}
+
+void
+ia_css_s3a_encode(
+ struct sh_css_isp_s3a_params *to,
+ const struct ia_css_3a_config *from,
+ unsigned int size)
+{
+ (void)size;
+
+ ia_css_ae_encode(&to->ae, from, sizeof(to->ae));
+ ia_css_awb_encode(&to->awb, from, sizeof(to->awb));
+ ia_css_af_encode(&to->af, from, sizeof(to->af));
+}
+
+#if 0
+void
+ia_css_process_s3a(
+ unsigned int pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ short dmem_offset = stage->binary->info->mem_offsets->dmem.s3a;
+
+ assert(params);
+
+ if (dmem_offset >= 0) {
+ ia_css_s3a_encode((struct sh_css_isp_s3a_params *)
+ &stage->isp_mem_params[IA_CSS_ISP_DMEM0].address[dmem_offset],
+ &params->s3a_config);
+ ia_css_bh_encode((struct sh_css_isp_bh_params *)
+ &stage->isp_mem_params[IA_CSS_ISP_DMEM0].address[dmem_offset],
+ &params->s3a_config);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM0] =
+ true;
+ }
+
+ params->isp_params_changed = true;
+}
+#endif
+
+#ifndef IA_CSS_NO_DEBUG
+void
+ia_css_ae_dump(
+ const struct sh_css_isp_ae_params *ae,
+ unsigned int level)
+{
+ if (!ae) return;
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "ae_y_coef_r", ae->y_coef_r);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "ae_y_coef_g", ae->y_coef_g);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "ae_y_coef_b", ae->y_coef_b);
+}
+
+void
+ia_css_awb_dump(
+ const struct sh_css_isp_awb_params *awb,
+ unsigned int level)
+{
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "awb_lg_high_raw", awb->lg_high_raw);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "awb_lg_low", awb->lg_low);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "awb_lg_high", awb->lg_high);
+}
+
+void
+ia_css_af_dump(
+ const struct sh_css_isp_af_params *af,
+ unsigned int level)
+{
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "af_fir1[0]", af->fir1[0]);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "af_fir1[1]", af->fir1[1]);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "af_fir1[2]", af->fir1[2]);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "af_fir1[3]", af->fir1[3]);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "af_fir1[4]", af->fir1[4]);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "af_fir1[5]", af->fir1[5]);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "af_fir1[6]", af->fir1[6]);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "af_fir2[0]", af->fir2[0]);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "af_fir2[1]", af->fir2[1]);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "af_fir2[2]", af->fir2[2]);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "af_fir2[3]", af->fir2[3]);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "af_fir2[4]", af->fir2[4]);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "af_fir2[5]", af->fir2[5]);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "af_fir2[6]", af->fir2[6]);
+}
+
+void
+ia_css_s3a_dump(
+ const struct sh_css_isp_s3a_params *s3a,
+ unsigned int level)
+{
+ ia_css_debug_dtrace(level, "S3A Support:\n");
+ ia_css_ae_dump(&s3a->ae, level);
+ ia_css_awb_dump(&s3a->awb, level);
+ ia_css_af_dump(&s3a->af, level);
+}
+
+void
+ia_css_s3a_debug_dtrace(
+ const struct ia_css_3a_config *config,
+ unsigned int level)
+{
+ ia_css_debug_dtrace(level,
+ "config.ae_y_coef_r=%d, config.ae_y_coef_g=%d, config.ae_y_coef_b=%d, config.awb_lg_high_raw=%d, config.awb_lg_low=%d, config.awb_lg_high=%d\n",
+ config->ae_y_coef_r, config->ae_y_coef_g,
+ config->ae_y_coef_b, config->awb_lg_high_raw,
+ config->awb_lg_low, config->awb_lg_high);
+}
+#endif
+
+void
+ia_css_s3a_hmem_decode(
+ struct ia_css_3a_statistics *host_stats,
+ const struct ia_css_bh_table *hmem_buf)
+{
+ struct ia_css_3a_rgby_output *out_ptr;
+ int i;
+
+ /* pixel counts(BQ) for 3A area */
+ int count_for_3a;
+ int sum_r, diff;
+
+ assert(host_stats);
+ assert(host_stats->rgby_data);
+ assert(hmem_buf);
+
+ count_for_3a = host_stats->grid.width * host_stats->grid.height
+ * host_stats->grid.bqs_per_grid_cell
+ * host_stats->grid.bqs_per_grid_cell;
+
+ out_ptr = host_stats->rgby_data;
+
+ ia_css_bh_hmem_decode(out_ptr, hmem_buf);
+
+ /* Calculate sum of histogram of R,
+ which should not be less than count_for_3a */
+ sum_r = 0;
+ for (i = 0; i < HMEM_UNIT_SIZE; i++) {
+ sum_r += out_ptr[i].r;
+ }
+ if (sum_r < count_for_3a) {
+ /* histogram is invalid */
+ return;
+ }
+
+ /* Verify for sum of histogram of R/G/B/Y */
+#if 0
+ {
+ int sum_g = 0;
+ int sum_b = 0;
+ int sum_y = 0;
+
+ for (i = 0; i < HMEM_UNIT_SIZE; i++) {
+ sum_g += out_ptr[i].g;
+ sum_b += out_ptr[i].b;
+ sum_y += out_ptr[i].y;
+ }
+ if (sum_g != sum_r || sum_b != sum_r || sum_y != sum_r) {
+ /* histogram is invalid */
+ return;
+ }
+ }
+#endif
+
+ /*
+ * Limit the histogram area only to 3A area.
+ * In DSP, the histogram of 0 is incremented for pixels
+ * which are outside of 3A area. That amount should be subtracted here.
+ * hist[0] = hist[0] - ((sum of all hist[]) - (pixel count for 3A area))
+ */
+ diff = sum_r - count_for_3a;
+ out_ptr[0].r -= diff;
+ out_ptr[0].g -= diff;
+ out_ptr[0].b -= diff;
+ out_ptr[0].y -= diff;
+}
+
+void
+ia_css_s3a_dmem_decode(
+ struct ia_css_3a_statistics *host_stats,
+ const struct ia_css_3a_output *isp_stats)
+{
+ int isp_width, host_width, height, i;
+ struct ia_css_3a_output *host_ptr;
+
+ assert(host_stats);
+ assert(host_stats->data);
+ assert(isp_stats);
+
+ isp_width = host_stats->grid.aligned_width;
+ host_width = host_stats->grid.width;
+ height = host_stats->grid.height;
+ host_ptr = host_stats->data;
+
+ /* Getting 3A statistics from DMEM does not involve any
+ * transformation (like the VMEM version), we just copy the data
+ * using a different output width. */
+ for (i = 0; i < height; i++) {
+ memcpy(host_ptr, isp_stats, host_width * sizeof(*host_ptr));
+ isp_stats += isp_width;
+ host_ptr += host_width;
+ }
+}
+
+/* MW: this is an ISP function */
+static inline int
+merge_hi_lo_14(unsigned short hi, unsigned short lo)
+{
+ int val = (int)((((unsigned int)hi << 14) & 0xfffc000) |
+ ((unsigned int)lo & 0x3fff));
+ return val;
+}
+
+void
+ia_css_s3a_vmem_decode(
+ struct ia_css_3a_statistics *host_stats,
+ const u16 *isp_stats_hi,
+ const uint16_t *isp_stats_lo)
+{
+ int out_width, out_height, chunk, rest, kmax, y, x, k, elm_start, elm, ofs;
+ const u16 *hi, *lo;
+ struct ia_css_3a_output *output;
+
+ assert(host_stats);
+ assert(host_stats->data);
+ assert(isp_stats_hi);
+ assert(isp_stats_lo);
+
+ output = host_stats->data;
+ out_width = host_stats->grid.width;
+ out_height = host_stats->grid.height;
+ hi = isp_stats_hi;
+ lo = isp_stats_lo;
+
+ chunk = ISP_VEC_NELEMS >> host_stats->grid.deci_factor_log2;
+ chunk = max(chunk, 1);
+
+ for (y = 0; y < out_height; y++) {
+ elm_start = y * ISP_S3ATBL_HI_LO_STRIDE;
+ rest = out_width;
+ x = 0;
+ while (x < out_width) {
+ kmax = (rest > chunk) ? chunk : rest;
+ ofs = y * out_width + x;
+ elm = elm_start + x * sizeof(*output) / sizeof(int32_t);
+ for (k = 0; k < kmax; k++, elm++) {
+ output[ofs + k].ae_y = merge_hi_lo_14(
+ hi[elm + chunk * 0], lo[elm + chunk * 0]);
+ output[ofs + k].awb_cnt = merge_hi_lo_14(
+ hi[elm + chunk * 1], lo[elm + chunk * 1]);
+ output[ofs + k].awb_gr = merge_hi_lo_14(
+ hi[elm + chunk * 2], lo[elm + chunk * 2]);
+ output[ofs + k].awb_r = merge_hi_lo_14(
+ hi[elm + chunk * 3], lo[elm + chunk * 3]);
+ output[ofs + k].awb_b = merge_hi_lo_14(
+ hi[elm + chunk * 4], lo[elm + chunk * 4]);
+ output[ofs + k].awb_gb = merge_hi_lo_14(
+ hi[elm + chunk * 5], lo[elm + chunk * 5]);
+ output[ofs + k].af_hpf1 = merge_hi_lo_14(
+ hi[elm + chunk * 6], lo[elm + chunk * 6]);
+ output[ofs + k].af_hpf2 = merge_hi_lo_14(
+ hi[elm + chunk * 7], lo[elm + chunk * 7]);
+ }
+ x += chunk;
+ rest -= chunk;
+ }
+ }
+}
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/s3a/s3a_1.0/ia_css_s3a.host.h b/drivers/staging/media/atomisp/pci/isp/kernels/s3a/s3a_1.0/ia_css_s3a.host.h
new file mode 100644
index 000000000000..f000b4347472
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/s3a/s3a_1.0/ia_css_s3a.host.h
@@ -0,0 +1,69 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __IA_CSS_S3A_HOST_H
+#define __IA_CSS_S3A_HOST_H
+
+#include "ia_css_s3a_types.h"
+#include "ia_css_s3a_param.h"
+#include "bh/bh_2/ia_css_bh.host.h"
+
+extern const struct ia_css_3a_config default_3a_config;
+
+void
+ia_css_s3a_configure(
+ unsigned int raw_bit_depth);
+
+void
+ia_css_s3a_encode(
+ struct sh_css_isp_s3a_params *to,
+ const struct ia_css_3a_config *from,
+ unsigned int size);
+
+#ifndef IA_CSS_NO_DEBUG
+void
+ia_css_ae_dump(
+ const struct sh_css_isp_ae_params *ae,
+ unsigned int level);
+
+void
+ia_css_awb_dump(
+ const struct sh_css_isp_awb_params *awb,
+ unsigned int level);
+
+void
+ia_css_af_dump(
+ const struct sh_css_isp_af_params *af,
+ unsigned int level);
+
+void
+ia_css_s3a_dump(
+ const struct sh_css_isp_s3a_params *s3a,
+ unsigned int level);
+
+void
+ia_css_s3a_debug_dtrace(
+ const struct ia_css_3a_config *config,
+ unsigned int level);
+#endif
+
+void
+ia_css_s3a_hmem_decode(
+ struct ia_css_3a_statistics *host_stats,
+ const struct ia_css_bh_table *hmem_buf);
+
+void
+ia_css_s3a_dmem_decode(
+ struct ia_css_3a_statistics *host_stats,
+ const struct ia_css_3a_output *isp_stats);
+
+void
+ia_css_s3a_vmem_decode(
+ struct ia_css_3a_statistics *host_stats,
+ const u16 *isp_stats_hi,
+ const uint16_t *isp_stats_lo);
+
+#endif /* __IA_CSS_S3A_HOST_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/s3a/s3a_1.0/ia_css_s3a_param.h b/drivers/staging/media/atomisp/pci/isp/kernels/s3a/s3a_1.0/ia_css_s3a_param.h
new file mode 100644
index 000000000000..f88264781a20
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/s3a/s3a_1.0/ia_css_s3a_param.h
@@ -0,0 +1,45 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __IA_CSS_S3A_PARAM_H
+#define __IA_CSS_S3A_PARAM_H
+
+#include "type_support.h"
+
+/* AE (3A Support) */
+struct sh_css_isp_ae_params {
+ /* coefficients to calculate Y */
+ s32 y_coef_r;
+ s32 y_coef_g;
+ s32 y_coef_b;
+};
+
+/* AWB (3A Support) */
+struct sh_css_isp_awb_params {
+ s32 lg_high_raw;
+ s32 lg_low;
+ s32 lg_high;
+};
+
+/* AF (3A Support) */
+struct sh_css_isp_af_params {
+ s32 fir1[7];
+ s32 fir2[7];
+};
+
+/* S3A (3A Support) */
+struct sh_css_isp_s3a_params {
+ /* coefficients to calculate Y */
+ struct sh_css_isp_ae_params ae;
+
+ /* AWB level gate */
+ struct sh_css_isp_awb_params awb;
+
+ /* af fir coefficients */
+ struct sh_css_isp_af_params af;
+};
+
+#endif /* __IA_CSS_S3A_PARAM_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/s3a/s3a_1.0/ia_css_s3a_types.h b/drivers/staging/media/atomisp/pci/isp/kernels/s3a/s3a_1.0/ia_css_s3a_types.h
new file mode 100644
index 000000000000..b8206d2f3d31
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/s3a/s3a_1.0/ia_css_s3a_types.h
@@ -0,0 +1,213 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __IA_CSS_S3A_TYPES_H
+#define __IA_CSS_S3A_TYPES_H
+
+/* @file
+* CSS-API header file for 3A statistics parameters.
+*/
+
+#include <ia_css_frac.h>
+
+#if (defined(SYSTEM_css_skycam_c0_system)) && (!defined(PIPE_GENERATION))
+#include "../../../../components/stats_3a/src/stats_3a_public.h"
+#endif
+
+/* 3A configuration. This configures the 3A statistics collection
+ * module.
+ */
+
+/* 3A statistics grid
+ *
+ * ISP block: S3A1 (3A Support for 3A ver.1 (Histogram is not used for AE))
+ * S3A2 (3A Support for 3A ver.2 (Histogram is used for AE))
+ * ISP1: S3A1 is used.
+ * ISP2: S3A2 is used.
+ */
+struct ia_css_3a_grid_info {
+#if defined(SYSTEM_css_skycam_c0_system)
+ u32 ae_enable; /** ae enabled in binary,
+ 0:disabled, 1:enabled */
+ struct ae_public_config_grid_config
+ ae_grd_info; /** see description in ae_public.h*/
+
+ u32 awb_enable; /** awb enabled in binary,
+ 0:disabled, 1:enabled */
+ struct awb_public_config_grid_config
+ awb_grd_info; /** see description in awb_public.h*/
+
+ u32 af_enable; /** af enabled in binary,
+ 0:disabled, 1:enabled */
+ struct af_public_grid_config af_grd_info; /** see description in af_public.h*/
+
+ u32 awb_fr_enable; /** awb_fr enabled in binary,
+ 0:disabled, 1:enabled */
+ struct awb_fr_public_grid_config
+ awb_fr_grd_info;/** see description in awb_fr_public.h*/
+
+ u32 elem_bit_depth; /** TODO:Taken from BYT - need input from AIQ
+ if needed for SKC
+ Bit depth of element used
+ to calculate 3A statistics.
+ This is 13, which is the normalized
+ bayer bit depth in DSP. */
+
+#else
+ u32 enable; /** 3A statistics enabled.
+ 0:disabled, 1:enabled */
+ u32 use_dmem; /** DMEM or VMEM determines layout.
+ 0:3A statistics are stored to VMEM,
+ 1:3A statistics are stored to DMEM */
+ u32 has_histogram; /** Statistics include histogram.
+ 0:no histogram, 1:has histogram */
+ u32 width; /** Width of 3A grid table.
+ (= Horizontal number of grid cells
+ in table, which cells have effective
+ statistics.) */
+ u32 height; /** Height of 3A grid table.
+ (= Vertical number of grid cells
+ in table, which cells have effective
+ statistics.) */
+ u32 aligned_width; /** Horizontal stride (for alloc).
+ (= Horizontal number of grid cells
+ in table, which means
+ the allocated width.) */
+ u32 aligned_height; /** Vertical stride (for alloc).
+ (= Vertical number of grid cells
+ in table, which means
+ the allocated height.) */
+ u32 bqs_per_grid_cell; /** Grid cell size in BQ(Bayer Quad) unit.
+ (1BQ means {Gr,R,B,Gb}(2x2 pixels).)
+ Valid values are 8,16,32,64. */
+ u32 deci_factor_log2; /** log2 of bqs_per_grid_cell. */
+ u32 elem_bit_depth; /** Bit depth of element used
+ to calculate 3A statistics.
+ This is 13, which is the normalized
+ bayer bit depth in DSP. */
+#endif
+};
+
+/* This struct should be split into 3, for AE, AWB and AF.
+ * However, that will require driver/ 3A lib modifications.
+ */
+
+/* 3A configuration. This configures the 3A statistics collection
+ * module.
+ *
+ * ae_y_*: Coefficients to calculate luminance from bayer.
+ * awb_lg_*: Thresholds to check the saturated bayer pixels for AWB.
+ * Condition of effective pixel for AWB level gate check:
+ * bayer(sensor) <= awb_lg_high_raw &&
+ * bayer(when AWB statisitcs is calculated) >= awb_lg_low &&
+ * bayer(when AWB statisitcs is calculated) <= awb_lg_high
+ * af_fir*: Coefficients of high pass filter to calculate AF statistics.
+ *
+ * ISP block: S3A1(ae_y_* for AE/AF, awb_lg_* for AWB)
+ * S3A2(ae_y_* for AF, awb_lg_* for AWB)
+ * SDVS1(ae_y_*)
+ * SDVS2(ae_y_*)
+ * ISP1: S3A1 and SDVS1 are used.
+ * ISP2: S3A2 and SDVS2 are used.
+ */
+struct ia_css_3a_config {
+ ia_css_u0_16 ae_y_coef_r; /** Weight of R for Y.
+ u0.16, [0,65535],
+ default/ineffective 25559 */
+ ia_css_u0_16 ae_y_coef_g; /** Weight of G for Y.
+ u0.16, [0,65535],
+ default/ineffective 32768 */
+ ia_css_u0_16 ae_y_coef_b; /** Weight of B for Y.
+ u0.16, [0,65535],
+ default/ineffective 7209 */
+ ia_css_u0_16 awb_lg_high_raw; /** AWB level gate high for raw.
+ u0.16, [0,65535],
+ default 65472(=1023*64),
+ ineffective 65535 */
+ ia_css_u0_16 awb_lg_low; /** AWB level gate low.
+ u0.16, [0,65535],
+ default 64(=1*64),
+ ineffective 0 */
+ ia_css_u0_16 awb_lg_high; /** AWB level gate high.
+ u0.16, [0,65535],
+ default 65535,
+ ineffective 65535 */
+ ia_css_s0_15 af_fir1_coef[7]; /** AF FIR coefficients of fir1.
+ s0.15, [-32768,32767],
+ default/ineffective
+ -6689,-12207,-32768,32767,12207,6689,0 */
+ ia_css_s0_15 af_fir2_coef[7]; /** AF FIR coefficients of fir2.
+ s0.15, [-32768,32767],
+ default/ineffective
+ 2053,0,-18437,32767,-18437,2053,0 */
+};
+
+/* 3A statistics. This structure describes the data stored
+ * in each 3A grid point.
+ *
+ * ISP block: S3A1 (3A Support for 3A ver.1) (Histogram is not used for AE)
+ * S3A2 (3A Support for 3A ver.2) (Histogram is used for AE)
+ * - ae_y is used only for S3A1.
+ * - awb_* and af_* are used both for S3A1 and S3A2.
+ * ISP1: S3A1 is used.
+ * ISP2: S3A2 is used.
+ */
+struct ia_css_3a_output {
+ s32 ae_y; /** Sum of Y in a statistics window, for AE.
+ (u19.13) */
+ s32 awb_cnt; /** Number of effective pixels
+ in a statistics window.
+ Pixels passed by the AWB level gate check are
+ judged as "effective". (u32) */
+ s32 awb_gr; /** Sum of Gr in a statistics window, for AWB.
+ All Gr pixels (not only for effective pixels)
+ are summed. (u19.13) */
+ s32 awb_r; /** Sum of R in a statistics window, for AWB.
+ All R pixels (not only for effective pixels)
+ are summed. (u19.13) */
+ s32 awb_b; /** Sum of B in a statistics window, for AWB.
+ All B pixels (not only for effective pixels)
+ are summed. (u19.13) */
+ s32 awb_gb; /** Sum of Gb in a statistics window, for AWB.
+ All Gb pixels (not only for effective pixels)
+ are summed. (u19.13) */
+ s32 af_hpf1; /** Sum of |Y| following high pass filter af_fir1
+ within a statistics window, for AF. (u19.13) */
+ s32 af_hpf2; /** Sum of |Y| following high pass filter af_fir2
+ within a statistics window, for AF. (u19.13) */
+};
+
+/* 3A Statistics. This structure describes the statistics that are generated
+ * using the provided configuration (ia_css_3a_config).
+ */
+struct ia_css_3a_statistics {
+ struct ia_css_3a_grid_info
+ grid; /** grid info contains the dimensions of the 3A grid */
+ struct ia_css_3a_output
+ *data; /** the pointer to 3a_output[grid.width * grid.height]
+ containing the 3A statistics */
+ struct ia_css_3a_rgby_output *rgby_data;/** the pointer to 3a_rgby_output[256]
+ containing the histogram */
+};
+
+/* Histogram (Statistics for AE).
+ *
+ * 4 histograms(r,g,b,y),
+ * 256 bins for each histogram, unsigned 24bit value for each bin.
+ * struct ia_css_3a_rgby_output data[256];
+
+ * ISP block: HIST2
+ * (ISP1: HIST2 is not used.)
+ * ISP2: HIST2 is used.
+ */
+struct ia_css_3a_rgby_output {
+ u32 r; /** Number of R of one bin of the histogram R. (u24) */
+ u32 g; /** Number of G of one bin of the histogram G. (u24) */
+ u32 b; /** Number of B of one bin of the histogram B. (u24) */
+ u32 y; /** Number of Y of one bin of the histogram Y. (u24) */
+};
+
+#endif /* __IA_CSS_S3A_TYPES_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/sc/sc_1.0/ia_css_sc.host.c b/drivers/staging/media/atomisp/pci/isp/kernels/sc/sc_1.0/ia_css_sc.host.c
new file mode 100644
index 000000000000..16b7d36e1cc2
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/sc/sc_1.0/ia_css_sc.host.c
@@ -0,0 +1,82 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#include "ia_css_types.h"
+#include "sh_css_defs.h"
+#include "ia_css_debug.h"
+#include "assert_support.h"
+
+#define IA_CSS_INCLUDE_CONFIGURATIONS
+#include "ia_css_isp_configs.h"
+
+#include "ia_css_sc.host.h"
+
+void
+ia_css_sc_encode(
+ struct sh_css_isp_sc_params *to,
+ struct ia_css_shading_table **from,
+ unsigned int size)
+{
+ (void)size;
+ to->gain_shift = (*from)->fraction_bits;
+}
+
+void
+ia_css_sc_dump(
+ const struct sh_css_isp_sc_params *sc,
+ unsigned int level)
+{
+ if (!sc) return;
+ ia_css_debug_dtrace(level, "Shading Correction:\n");
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "sc_gain_shift", sc->gain_shift);
+}
+
+/* ------ deprecated(bz675) : from ------ */
+/* It looks like @parameter{} (in *.pipe) is used to generate the process/get/set functions,
+ for parameters which should be used in the isp kernels.
+ However, the ia_css_shading_settings structure has a parameter which is used only in the css,
+ and does not have a parameter which is used in the isp kernels.
+ Then, I did not use @parameter{} to generate the get/set function
+ for the ia_css_shading_settings structure. (michie) */
+void
+sh_css_get_shading_settings(const struct ia_css_isp_parameters *params,
+ struct ia_css_shading_settings *settings)
+{
+ if (!settings)
+ return;
+ assert(params);
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_get_shading_settings() enter: settings=%p\n", settings);
+
+ *settings = params->shading_settings;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_get_shading_settings() leave: settings.enable_shading_table_conversion=%d\n",
+ settings->enable_shading_table_conversion);
+}
+
+void
+sh_css_set_shading_settings(struct ia_css_isp_parameters *params,
+ const struct ia_css_shading_settings *settings)
+{
+ if (!settings)
+ return;
+ assert(params);
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_set_shading_settings() enter: settings.enable_shading_table_conversion=%d\n",
+ settings->enable_shading_table_conversion);
+
+ params->shading_settings = *settings;
+ params->shading_settings_changed = true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_set_shading_settings() leave: return_void\n");
+}
+
+/* ------ deprecated(bz675) : to ------ */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/sc/sc_1.0/ia_css_sc.host.h b/drivers/staging/media/atomisp/pci/isp/kernels/sc/sc_1.0/ia_css_sc.host.h
new file mode 100644
index 000000000000..1730b368d191
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/sc/sc_1.0/ia_css_sc.host.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __IA_CSS_SC_HOST_H
+#define __IA_CSS_SC_HOST_H
+
+#include "sh_css_params.h"
+
+#include "ia_css_sc_types.h"
+#include "ia_css_sc_param.h"
+
+void
+ia_css_sc_encode(
+ struct sh_css_isp_sc_params *to,
+ struct ia_css_shading_table **from,
+ unsigned int size);
+
+void
+ia_css_sc_dump(
+ const struct sh_css_isp_sc_params *sc,
+ unsigned int level);
+
+/* ------ deprecated(bz675) : from ------ */
+void
+sh_css_get_shading_settings(const struct ia_css_isp_parameters *params,
+ struct ia_css_shading_settings *settings);
+
+void
+sh_css_set_shading_settings(struct ia_css_isp_parameters *params,
+ const struct ia_css_shading_settings *settings);
+/* ------ deprecated(bz675) : to ------ */
+
+#endif /* __IA_CSS_SC_HOST_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/sc/sc_1.0/ia_css_sc_param.h b/drivers/staging/media/atomisp/pci/isp/kernels/sc/sc_1.0/ia_css_sc_param.h
new file mode 100644
index 000000000000..b49761ad39ca
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/sc/sc_1.0/ia_css_sc_param.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __IA_CSS_SC_PARAM_H
+#define __IA_CSS_SC_PARAM_H
+
+#include "type_support.h"
+
+/* SC (Shading Corrction) */
+struct sh_css_isp_sc_params {
+ s32 gain_shift;
+};
+
+/* Number of horizontal slice times for interpolated gain:
+ *
+ * The start position of the internal frame does not match the start position of the shading table.
+ * To get a vector of shading gains (interpolated horizontally and vertically)
+ * which matches a vector on the internal frame,
+ * vec_slice is used for 2 adjacent vectors of shading gains.
+ * The number of shift times by vec_slice is 8.
+ * Max grid cell bqs to support the shading table centerting: N = 32
+ * DIV_ROUND_UP(N-1, ISP_SLICE_NELEMS) = DIV_ROUND_UP(31, 4) = 8
+ */
+#define SH_CSS_SC_INTERPED_GAIN_HOR_SLICE_TIMES 8
+
+struct sh_css_isp_sc_isp_config {
+ u32 interped_gain_hor_slice_bqs[SH_CSS_SC_INTERPED_GAIN_HOR_SLICE_TIMES];
+ u32 internal_frame_origin_y_bqs_on_sctbl;
+};
+
+#endif /* __IA_CSS_SC_PARAM_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/sc/sc_1.0/ia_css_sc_types.h b/drivers/staging/media/atomisp/pci/isp/kernels/sc/sc_1.0/ia_css_sc_types.h
new file mode 100644
index 000000000000..2f91934f2c06
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/sc/sc_1.0/ia_css_sc_types.h
@@ -0,0 +1,112 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __IA_CSS_SC_TYPES_H
+#define __IA_CSS_SC_TYPES_H
+
+/* @file
+* CSS-API header file for Lens Shading Correction (SC) parameters.
+*/
+
+/* Number of color planes in the shading table. */
+#define IA_CSS_SC_NUM_COLORS 4
+
+/* The 4 colors that a shading table consists of.
+ * For each color we store a grid of values.
+ */
+enum ia_css_sc_color {
+ IA_CSS_SC_COLOR_GR, /** Green on a green-red line */
+ IA_CSS_SC_COLOR_R, /** Red */
+ IA_CSS_SC_COLOR_B, /** Blue */
+ IA_CSS_SC_COLOR_GB /** Green on a green-blue line */
+};
+
+/* Lens Shading Correction table.
+ *
+ * This describes the color shading artefacts
+ * introduced by lens imperfections. To correct artefacts,
+ * bayer values should be multiplied by gains in this table.
+ *
+ *------------ deprecated(bz675) : from ---------------------------
+ * When shading_settings.enable_shading_table_conversion is set as 0,
+ * this shading table is directly sent to the isp. This table should contain
+ * the data based on the ia_css_shading_info information filled in the css.
+ * So, the driver needs to get the ia_css_shading_info information
+ * from the css, prior to generating the shading table.
+ *
+ * When shading_settings.enable_shading_table_conversion is set as 1,
+ * this shading table is converted in the legacy way in the css
+ * before it is sent to the isp.
+ * The driver does not need to get the ia_css_shading_info information.
+ *
+ * NOTE:
+ * The shading table conversion will be removed from the css in the near future,
+ * because it does not support the bayer scaling by sensor.
+ * Also, we had better generate the shading table only in one place(AIC).
+ * At the moment, to support the old driver which assumes the conversion is done in the css,
+ * shading_settings.enable_shading_table_conversion is set as 1 by default.
+ *------------ deprecated(bz675) : to ---------------------------
+ *
+ * ISP block: SC1
+ * ISP1: SC1 is used.
+ * ISP2: SC1 is used.
+ */
+struct ia_css_shading_table {
+ u32 enable; /** Set to false for no shading correction.
+ The data field can be NULL when enable == true */
+ /* ------ deprecated(bz675) : from ------ */
+ u32 sensor_width; /** Native sensor width in pixels. */
+ u32 sensor_height; /** Native sensor height in lines.
+ When shading_settings.enable_shading_table_conversion is set
+ as 0, sensor_width and sensor_height are NOT used.
+ These are used only in the legacy shading table conversion
+ in the css, when shading_settings.
+ enable_shading_table_conversion is set as 1. */
+ /* ------ deprecated(bz675) : to ------ */
+ u32 width; /** Number of data points per line per color.
+ u8.0, [0,81] */
+ u32 height; /** Number of lines of data points per color.
+ u8.0, [0,61] */
+ u32 fraction_bits; /** Bits of fractional part in the data
+ points.
+ u8.0, [0,13] */
+ u16 *data[IA_CSS_SC_NUM_COLORS];
+ /** Table data, one array for each color.
+ Use ia_css_sc_color to index this array.
+ u[13-fraction_bits].[fraction_bits], [0,8191] */
+};
+
+/* ------ deprecated(bz675) : from ------ */
+/* Shading Correction settings.
+ *
+ * NOTE:
+ * This structure should be removed when the shading table conversion is
+ * removed from the css.
+ */
+struct ia_css_shading_settings {
+ u32 enable_shading_table_conversion; /** Set to 0,
+ if the conversion of the shading table should be disabled
+ in the css. (default 1)
+ 0: The shading table is directly sent to the isp.
+ The shading table should contain the data based on the
+ ia_css_shading_info information filled in the css.
+ 1: The shading table is converted in the css, to be fitted
+ to the shading table definition required in the isp.
+ NOTE:
+ Previously, the shading table was always converted in the css
+ before it was sent to the isp, and this config was not defined.
+ Currently, the driver is supposed to pass the shading table
+ which should be directly sent to the isp.
+ However, some drivers may still pass the shading table which
+ needs the conversion without setting this config as 1.
+ To support such an unexpected case for the time being,
+ enable_shading_table_conversion is set as 1 by default
+ in the css. */
+};
+
+/* ------ deprecated(bz675) : to ------ */
+
+#endif /* __IA_CSS_SC_TYPES_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/sdis/common/ia_css_sdis_common.host.h b/drivers/staging/media/atomisp/pci/isp/kernels/sdis/common/ia_css_sdis_common.host.h
new file mode 100644
index 000000000000..bad400b2fd37
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/sdis/common/ia_css_sdis_common.host.h
@@ -0,0 +1,93 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef _IA_CSS_SDIS_COMMON_HOST_H
+#define _IA_CSS_SDIS_COMMON_HOST_H
+
+#define ISP_MAX_SDIS_HOR_PROJ_NUM_ISP \
+ __ISP_SDIS_HOR_PROJ_NUM_ISP(ISP_MAX_INTERNAL_WIDTH, ISP_MAX_INTERNAL_HEIGHT, \
+ SH_CSS_DIS_DECI_FACTOR_LOG2, ISP_PIPE_VERSION)
+#define ISP_MAX_SDIS_VER_PROJ_NUM_ISP \
+ __ISP_SDIS_VER_PROJ_NUM_ISP(ISP_MAX_INTERNAL_WIDTH, \
+ SH_CSS_DIS_DECI_FACTOR_LOG2)
+
+#define _ISP_SDIS_HOR_COEF_NUM_VECS \
+ __ISP_SDIS_HOR_COEF_NUM_VECS(ISP_INTERNAL_WIDTH)
+#define ISP_MAX_SDIS_HOR_COEF_NUM_VECS \
+ __ISP_SDIS_HOR_COEF_NUM_VECS(ISP_MAX_INTERNAL_WIDTH)
+#define ISP_MAX_SDIS_VER_COEF_NUM_VECS \
+ __ISP_SDIS_VER_COEF_NUM_VECS(ISP_MAX_INTERNAL_HEIGHT)
+
+/* SDIS Coefficients: */
+/* The ISP uses vectors to store the coefficients, so we round
+ the number of coefficients up to vectors. */
+#define __ISP_SDIS_HOR_COEF_NUM_VECS(in_width) _ISP_VECS(_ISP_BQS(in_width))
+#define __ISP_SDIS_VER_COEF_NUM_VECS(in_height) _ISP_VECS(_ISP_BQS(in_height))
+
+/* SDIS Projections:
+ * SDIS1: Horizontal projections are calculated for each line.
+ * Vertical projections are calculated for each column.
+ * SDIS2: Projections are calculated for each grid cell.
+ * Grid cells that do not fall completely within the image are not
+ * valid. The host needs to use the bigger one for the stride but
+ * should only return the valid ones to the 3A. */
+#define __ISP_SDIS_HOR_PROJ_NUM_ISP(in_width, in_height, deci_factor_log2, \
+ isp_pipe_version) \
+ ((isp_pipe_version == 1) ? \
+ CEIL_SHIFT(_ISP_BQS(in_height), deci_factor_log2) : \
+ CEIL_SHIFT(_ISP_BQS(in_width), deci_factor_log2))
+
+#define __ISP_SDIS_VER_PROJ_NUM_ISP(in_width, deci_factor_log2) \
+ CEIL_SHIFT(_ISP_BQS(in_width), deci_factor_log2)
+
+#define SH_CSS_DIS_VER_NUM_COEF_TYPES(b) \
+ (((b)->info->sp.pipeline.isp_pipe_version == 2) ? \
+ IA_CSS_DVS2_NUM_COEF_TYPES : \
+ IA_CSS_DVS_NUM_COEF_TYPES)
+
+#ifndef PIPE_GENERATION
+#if defined(__ISP) || defined(MK_FIRMWARE)
+
+/* Array cannot be 2-dimensional, since driver ddr allocation does not know stride */
+struct sh_css_isp_sdis_hori_proj_tbl {
+ s32 tbl[ISP_DVS_NUM_COEF_TYPES * ISP_MAX_SDIS_HOR_PROJ_NUM_ISP];
+#if DVS2_PROJ_MARGIN > 0
+ s32 margin[DVS2_PROJ_MARGIN];
+#endif
+};
+
+struct sh_css_isp_sdis_vert_proj_tbl {
+ s32 tbl[ISP_DVS_NUM_COEF_TYPES * ISP_MAX_SDIS_VER_PROJ_NUM_ISP];
+#if DVS2_PROJ_MARGIN > 0
+ s32 margin[DVS2_PROJ_MARGIN];
+#endif
+};
+
+struct sh_css_isp_sdis_hori_coef_tbl {
+ VMEM_ARRAY(tbl[ISP_DVS_NUM_COEF_TYPES],
+ ISP_MAX_SDIS_HOR_COEF_NUM_VECS * ISP_NWAY);
+};
+
+struct sh_css_isp_sdis_vert_coef_tbl {
+ VMEM_ARRAY(tbl[ISP_DVS_NUM_COEF_TYPES],
+ ISP_MAX_SDIS_VER_COEF_NUM_VECS * ISP_NWAY);
+};
+
+#endif /* defined(__ISP) || defined (MK_FIRMWARE) */
+#endif /* PIPE_GENERATION */
+
+#ifndef PIPE_GENERATION
+struct s_sdis_config {
+ unsigned int horicoef_vectors;
+ unsigned int vertcoef_vectors;
+ unsigned int horiproj_num;
+ unsigned int vertproj_num;
+};
+
+extern struct s_sdis_config sdis_config;
+#endif
+
+#endif /* _IA_CSS_SDIS_COMMON_HOST_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/sdis/common/ia_css_sdis_common_types.h b/drivers/staging/media/atomisp/pci/isp/kernels/sdis/common/ia_css_sdis_common_types.h
new file mode 100644
index 000000000000..1a3790c1c353
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/sdis/common/ia_css_sdis_common_types.h
@@ -0,0 +1,211 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __IA_CSS_SDIS_COMMON_TYPES_H
+#define __IA_CSS_SDIS_COMMON_TYPES_H
+
+/* @file
+* CSS-API header file for DVS statistics parameters.
+*/
+
+#include <type_support.h>
+
+/* DVS statistics grid dimensions in number of cells.
+ */
+
+struct ia_css_dvs_grid_dim {
+ u32 width; /** Width of DVS grid table in cells */
+ u32 height; /** Height of DVS grid table in cells */
+};
+
+/* DVS statistics dimensions in number of cells for
+ * grid, coeffieicient and projection.
+ */
+
+struct ia_css_sdis_info {
+ struct {
+ struct ia_css_dvs_grid_dim dim; /* Dimensions */
+ struct ia_css_dvs_grid_dim pad; /* Padded dimensions */
+ } grid, coef, proj;
+ u32 deci_factor_log2;
+};
+
+/* DVS statistics grid
+ *
+ * ISP block: SDVS1 (DIS/DVS Support for DIS/DVS ver.1 (2-axes))
+ * SDVS2 (DVS Support for DVS ver.2 (6-axes))
+ * ISP1: SDVS1 is used.
+ * ISP2: SDVS2 is used.
+ */
+struct ia_css_dvs_grid_res {
+ u32 width; /** Width of DVS grid table.
+ (= Horizontal number of grid cells
+ in table, which cells have effective
+ statistics.)
+ For DVS1, this is equal to
+ the number of vertical statistics. */
+ u32 aligned_width; /** Stride of each grid line.
+ (= Horizontal number of grid cells
+ in table, which means
+ the allocated width.) */
+ u32 height; /** Height of DVS grid table.
+ (= Vertical number of grid cells
+ in table, which cells have effective
+ statistics.)
+ For DVS1, This is equal to
+ the number of horizontal statistics. */
+ u32 aligned_height;/** Stride of each grid column.
+ (= Vertical number of grid cells
+ in table, which means
+ the allocated height.) */
+};
+
+/* TODO: use ia_css_dvs_grid_res in here.
+ * However, that implies driver I/F changes
+ */
+struct ia_css_dvs_grid_info {
+ u32 enable; /** DVS statistics enabled.
+ 0:disabled, 1:enabled */
+ u32 width; /** Width of DVS grid table.
+ (= Horizontal number of grid cells
+ in table, which cells have effective
+ statistics.)
+ For DVS1, this is equal to
+ the number of vertical statistics. */
+ u32 aligned_width; /** Stride of each grid line.
+ (= Horizontal number of grid cells
+ in table, which means
+ the allocated width.) */
+ u32 height; /** Height of DVS grid table.
+ (= Vertical number of grid cells
+ in table, which cells have effective
+ statistics.)
+ For DVS1, This is equal to
+ the number of horizontal statistics. */
+ u32 aligned_height;/** Stride of each grid column.
+ (= Vertical number of grid cells
+ in table, which means
+ the allocated height.) */
+ u32 bqs_per_grid_cell; /** Grid cell size in BQ(Bayer Quad) unit.
+ (1BQ means {Gr,R,B,Gb}(2x2 pixels).)
+ For DVS1, valid value is 64.
+ For DVS2, valid value is only 64,
+ currently. */
+ u32 num_hor_coefs; /** Number of horizontal coefficients. */
+ u32 num_ver_coefs; /** Number of vertical coefficients. */
+};
+
+/* Number of DVS statistics levels
+ */
+#define IA_CSS_DVS_STAT_NUM_OF_LEVELS 3
+
+/* DVS statistics generated by accelerator global configuration
+ */
+struct dvs_stat_public_dvs_global_cfg {
+ unsigned char kappa;
+ /** DVS statistics global configuration - kappa */
+ unsigned char match_shift;
+ /** DVS statistics global configuration - match_shift */
+ unsigned char ybin_mode;
+ /** DVS statistics global configuration - y binning mode */
+};
+
+/* DVS statistics generated by accelerator level grid
+ * configuration
+ */
+struct dvs_stat_public_dvs_level_grid_cfg {
+ unsigned char grid_width;
+ /** DVS statistics grid width */
+ unsigned char grid_height;
+ /** DVS statistics grid height */
+ unsigned char block_width;
+ /** DVS statistics block width */
+ unsigned char block_height;
+ /** DVS statistics block height */
+};
+
+/* DVS statistics generated by accelerator level grid start
+ * configuration
+ */
+struct dvs_stat_public_dvs_level_grid_start {
+ unsigned short x_start;
+ /** DVS statistics level x start */
+ unsigned short y_start;
+ /** DVS statistics level y start */
+ unsigned char enable;
+ /** DVS statistics level enable */
+};
+
+/* DVS statistics generated by accelerator level grid end
+ * configuration
+ */
+struct dvs_stat_public_dvs_level_grid_end {
+ unsigned short x_end;
+ /** DVS statistics level x end */
+ unsigned short y_end;
+ /** DVS statistics level y end */
+};
+
+/* DVS statistics generated by accelerator Feature Extraction
+ * Region Of Interest (FE-ROI) configuration
+ */
+struct dvs_stat_public_dvs_level_fe_roi_cfg {
+ unsigned char x_start;
+ /** DVS statistics fe-roi level x start */
+ unsigned char y_start;
+ /** DVS statistics fe-roi level y start */
+ unsigned char x_end;
+ /** DVS statistics fe-roi level x end */
+ unsigned char y_end;
+ /** DVS statistics fe-roi level y end */
+};
+
+/* DVS statistics generated by accelerator public configuration
+ */
+struct dvs_stat_public_dvs_grd_cfg {
+ struct dvs_stat_public_dvs_level_grid_cfg grd_cfg;
+ /** DVS statistics level grid configuration */
+ struct dvs_stat_public_dvs_level_grid_start grd_start;
+ /** DVS statistics level grid start configuration */
+ struct dvs_stat_public_dvs_level_grid_end grd_end;
+ /** DVS statistics level grid end configuration */
+};
+
+/* DVS statistics grid generated by accelerator
+ */
+struct ia_css_dvs_stat_grid_info {
+ struct dvs_stat_public_dvs_global_cfg dvs_gbl_cfg;
+ /** DVS statistics global configuration (kappa, match, binning) */
+ struct dvs_stat_public_dvs_grd_cfg grd_cfg[IA_CSS_DVS_STAT_NUM_OF_LEVELS];
+ /** DVS statistics grid configuration (blocks and grids) */
+ struct dvs_stat_public_dvs_level_fe_roi_cfg
+ fe_roi_cfg[IA_CSS_DVS_STAT_NUM_OF_LEVELS];
+ /** DVS statistics FE ROI (region of interest) configuration */
+};
+
+/* DVS statistics generated by accelerator default grid info
+ */
+#define DEFAULT_DVS_GRID_INFO { \
+ .dvs_stat_grid_info = { \
+ .fe_roi_cfg = { \
+ [1] = { \
+ .x_start = 4 \
+ } \
+ } \
+ } \
+}
+
+/* Union that holds all types of DVS statistics grid info in
+ * CSS format
+ * */
+union ia_css_dvs_grid_u {
+ struct ia_css_dvs_stat_grid_info dvs_stat_grid_info;
+ /** DVS statistics produced by accelerator grid info */
+ struct ia_css_dvs_grid_info dvs_grid_info;
+ /** DVS (DVS1/DVS2) grid info */
+};
+
+#endif /* __IA_CSS_SDIS_COMMON_TYPES_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/sdis/sdis_1.0/ia_css_sdis.host.c b/drivers/staging/media/atomisp/pci/isp/kernels/sdis/sdis_1.0/ia_css_sdis.host.c
new file mode 100644
index 000000000000..a1bea8bd1a39
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/sdis/sdis_1.0/ia_css_sdis.host.c
@@ -0,0 +1,428 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#include "hmm.h"
+
+#include "assert_support.h"
+#include "ia_css_debug.h"
+#include "ia_css_sdis_types.h"
+#include "sdis/common/ia_css_sdis_common.host.h"
+#include "ia_css_sdis.host.h"
+
+const struct ia_css_dvs_coefficients default_sdis_config = {
+ .grid = { 0, 0, 0, 0, 0, 0, 0, 0 },
+ .hor_coefs = NULL,
+ .ver_coefs = NULL
+};
+
+static void
+fill_row(short *private, const short *public, unsigned int width,
+ unsigned int padding)
+{
+ assert((int)width >= 0);
+ assert((int)padding >= 0);
+ memcpy(private, public, width * sizeof(short));
+ memset(&private[width], 0, padding * sizeof(short));
+}
+
+void ia_css_sdis_horicoef_vmem_encode(
+ struct sh_css_isp_sdis_hori_coef_tbl *to,
+ const struct ia_css_dvs_coefficients *from,
+ unsigned int size)
+{
+ unsigned int aligned_width = from->grid.aligned_width *
+ from->grid.bqs_per_grid_cell;
+ unsigned int width = from->grid.num_hor_coefs;
+ int padding = aligned_width - width;
+ unsigned int stride = size / IA_CSS_DVS_NUM_COEF_TYPES / sizeof(short);
+ unsigned int total_bytes = aligned_width * IA_CSS_DVS_NUM_COEF_TYPES * sizeof(
+ short);
+ short *public = from->hor_coefs;
+ short *private = (short *)to;
+ unsigned int type;
+
+ /* Copy the table, add padding */
+ assert(padding >= 0);
+ assert(total_bytes <= size);
+ assert(size % (IA_CSS_DVS_NUM_COEF_TYPES * ISP_VEC_NELEMS * sizeof(
+ short)) == 0);
+
+ for (type = 0; type < IA_CSS_DVS_NUM_COEF_TYPES; type++) {
+ fill_row(&private[type * stride], &public[type * width], width, padding);
+ }
+}
+
+void ia_css_sdis_vertcoef_vmem_encode(
+ struct sh_css_isp_sdis_vert_coef_tbl *to,
+ const struct ia_css_dvs_coefficients *from,
+ unsigned int size)
+{
+ unsigned int aligned_height = from->grid.aligned_height *
+ from->grid.bqs_per_grid_cell;
+ unsigned int height = from->grid.num_ver_coefs;
+ int padding = aligned_height - height;
+ unsigned int stride = size / IA_CSS_DVS_NUM_COEF_TYPES / sizeof(short);
+ unsigned int total_bytes = aligned_height * IA_CSS_DVS_NUM_COEF_TYPES *
+ sizeof(short);
+ short *public = from->ver_coefs;
+ short *private = (short *)to;
+ unsigned int type;
+
+ /* Copy the table, add padding */
+ assert(padding >= 0);
+ assert(total_bytes <= size);
+ assert(size % (IA_CSS_DVS_NUM_COEF_TYPES * ISP_VEC_NELEMS * sizeof(
+ short)) == 0);
+
+ for (type = 0; type < IA_CSS_DVS_NUM_COEF_TYPES; type++) {
+ fill_row(&private[type * stride], &public[type * height], height, padding);
+ }
+}
+
+void ia_css_sdis_horiproj_encode(
+ struct sh_css_isp_sdis_hori_proj_tbl *to,
+ const struct ia_css_dvs_coefficients *from,
+ unsigned int size)
+{
+ (void)to;
+ (void)from;
+ (void)size;
+}
+
+void ia_css_sdis_vertproj_encode(
+ struct sh_css_isp_sdis_vert_proj_tbl *to,
+ const struct ia_css_dvs_coefficients *from,
+ unsigned int size)
+{
+ (void)to;
+ (void)from;
+ (void)size;
+}
+
+void ia_css_get_isp_dis_coefficients(
+ struct ia_css_stream *stream,
+ short *horizontal_coefficients,
+ short *vertical_coefficients)
+{
+ struct ia_css_isp_parameters *params;
+ unsigned int hor_num_isp, ver_num_isp;
+ unsigned int hor_num_3a, ver_num_3a;
+ int i;
+ struct ia_css_binary *dvs_binary;
+
+ IA_CSS_ENTER("void");
+
+ assert(horizontal_coefficients);
+ assert(vertical_coefficients);
+
+ params = stream->isp_params_configs;
+
+ /* Only video pipe supports DVS */
+ dvs_binary = ia_css_stream_get_dvs_binary(stream);
+ if (!dvs_binary)
+ return;
+
+ hor_num_isp = dvs_binary->dis.coef.pad.width;
+ ver_num_isp = dvs_binary->dis.coef.pad.height;
+ hor_num_3a = dvs_binary->dis.coef.dim.width;
+ ver_num_3a = dvs_binary->dis.coef.dim.height;
+
+ for (i = 0; i < IA_CSS_DVS_NUM_COEF_TYPES; i++) {
+ fill_row(&horizontal_coefficients[i * hor_num_isp],
+ &params->dvs_coefs.hor_coefs[i * hor_num_3a], hor_num_3a,
+ hor_num_isp - hor_num_3a);
+ }
+ for (i = 0; i < SH_CSS_DIS_VER_NUM_COEF_TYPES(dvs_binary); i++) {
+ fill_row(&vertical_coefficients[i * ver_num_isp],
+ &params->dvs_coefs.ver_coefs[i * ver_num_3a], ver_num_3a,
+ ver_num_isp - ver_num_3a);
+ }
+
+ IA_CSS_LEAVE("void");
+}
+
+size_t
+ia_css_sdis_hor_coef_tbl_bytes(
+ const struct ia_css_binary *binary)
+{
+ if (binary->info->sp.pipeline.isp_pipe_version == 1)
+ return sizeof(short) * IA_CSS_DVS_NUM_COEF_TYPES * binary->dis.coef.pad.width;
+ else
+ return sizeof(short) * IA_CSS_DVS2_NUM_COEF_TYPES * binary->dis.coef.pad.width;
+}
+
+size_t
+ia_css_sdis_ver_coef_tbl_bytes(
+ const struct ia_css_binary *binary)
+{
+ return sizeof(short) * SH_CSS_DIS_VER_NUM_COEF_TYPES(binary) *
+ binary->dis.coef.pad.height;
+}
+
+void
+ia_css_sdis_init_info(
+ struct ia_css_sdis_info *dis,
+ unsigned int sc_3a_dis_width,
+ unsigned int sc_3a_dis_padded_width,
+ unsigned int sc_3a_dis_height,
+ unsigned int isp_pipe_version,
+ unsigned int enabled)
+{
+ if (!enabled) {
+ *dis = (struct ia_css_sdis_info) { };
+ return;
+ }
+
+ dis->deci_factor_log2 = SH_CSS_DIS_DECI_FACTOR_LOG2;
+
+ dis->grid.dim.width =
+ _ISP_BQS(sc_3a_dis_width) >> SH_CSS_DIS_DECI_FACTOR_LOG2;
+ dis->grid.dim.height =
+ _ISP_BQS(sc_3a_dis_height) >> SH_CSS_DIS_DECI_FACTOR_LOG2;
+ dis->grid.pad.width =
+ CEIL_SHIFT(_ISP_BQS(sc_3a_dis_padded_width), SH_CSS_DIS_DECI_FACTOR_LOG2);
+ dis->grid.pad.height =
+ CEIL_SHIFT(_ISP_BQS(sc_3a_dis_height), SH_CSS_DIS_DECI_FACTOR_LOG2);
+
+ dis->coef.dim.width =
+ (_ISP_BQS(sc_3a_dis_width) >> SH_CSS_DIS_DECI_FACTOR_LOG2) <<
+ SH_CSS_DIS_DECI_FACTOR_LOG2;
+ dis->coef.dim.height =
+ (_ISP_BQS(sc_3a_dis_height) >> SH_CSS_DIS_DECI_FACTOR_LOG2) <<
+ SH_CSS_DIS_DECI_FACTOR_LOG2;
+ dis->coef.pad.width =
+ __ISP_SDIS_HOR_COEF_NUM_VECS(sc_3a_dis_padded_width) * ISP_VEC_NELEMS;
+ dis->coef.pad.height =
+ __ISP_SDIS_VER_COEF_NUM_VECS(sc_3a_dis_height) * ISP_VEC_NELEMS;
+ if (isp_pipe_version == 1) {
+ dis->proj.dim.width =
+ _ISP_BQS(sc_3a_dis_height) >> SH_CSS_DIS_DECI_FACTOR_LOG2;
+ dis->proj.dim.height =
+ _ISP_BQS(sc_3a_dis_width) >> SH_CSS_DIS_DECI_FACTOR_LOG2;
+ } else {
+ dis->proj.dim.width =
+ (_ISP_BQS(sc_3a_dis_width) >> SH_CSS_DIS_DECI_FACTOR_LOG2) *
+ (_ISP_BQS(sc_3a_dis_height) >> SH_CSS_DIS_DECI_FACTOR_LOG2);
+ dis->proj.dim.height =
+ (_ISP_BQS(sc_3a_dis_width) >> SH_CSS_DIS_DECI_FACTOR_LOG2) *
+ (_ISP_BQS(sc_3a_dis_height) >> SH_CSS_DIS_DECI_FACTOR_LOG2);
+ }
+ dis->proj.pad.width =
+ __ISP_SDIS_HOR_PROJ_NUM_ISP(sc_3a_dis_padded_width,
+ sc_3a_dis_height,
+ SH_CSS_DIS_DECI_FACTOR_LOG2,
+ isp_pipe_version);
+ dis->proj.pad.height =
+ __ISP_SDIS_VER_PROJ_NUM_ISP(sc_3a_dis_padded_width,
+ SH_CSS_DIS_DECI_FACTOR_LOG2);
+}
+
+void ia_css_sdis_clear_coefficients(
+ struct ia_css_dvs_coefficients *dvs_coefs)
+{
+ dvs_coefs->hor_coefs = NULL;
+ dvs_coefs->ver_coefs = NULL;
+}
+
+int
+ia_css_get_dvs_statistics(
+ struct ia_css_dvs_statistics *host_stats,
+ const struct ia_css_isp_dvs_statistics *isp_stats)
+{
+ struct ia_css_isp_dvs_statistics_map *map;
+ int ret = 0;
+
+ IA_CSS_ENTER("host_stats=%p, isp_stats=%p", host_stats, isp_stats);
+
+ assert(host_stats);
+ assert(isp_stats);
+
+ map = ia_css_isp_dvs_statistics_map_allocate(isp_stats, NULL);
+ if (map) {
+ hmm_load(isp_stats->data_ptr, map->data_ptr, isp_stats->size);
+ ia_css_translate_dvs_statistics(host_stats, map);
+ ia_css_isp_dvs_statistics_map_free(map);
+ } else {
+ IA_CSS_ERROR("out of memory");
+ ret = -ENOMEM;
+ }
+
+ IA_CSS_LEAVE_ERR(ret);
+ return ret;
+}
+
+void
+ia_css_translate_dvs_statistics(
+ struct ia_css_dvs_statistics *host_stats,
+ const struct ia_css_isp_dvs_statistics_map *isp_stats)
+{
+ unsigned int hor_num_isp, ver_num_isp, hor_num_dvs, ver_num_dvs, i;
+ s32 *hor_ptr_dvs, *ver_ptr_dvs, *hor_ptr_isp, *ver_ptr_isp;
+
+ assert(host_stats);
+ assert(host_stats->hor_proj);
+ assert(host_stats->ver_proj);
+ assert(isp_stats);
+ assert(isp_stats->hor_proj);
+ assert(isp_stats->ver_proj);
+
+ IA_CSS_ENTER("hproj=%p, vproj=%p, haddr=%p, vaddr=%p",
+ host_stats->hor_proj, host_stats->ver_proj,
+ isp_stats->hor_proj, isp_stats->ver_proj);
+
+ hor_num_isp = host_stats->grid.aligned_height;
+ ver_num_isp = host_stats->grid.aligned_width;
+ hor_ptr_isp = isp_stats->hor_proj;
+ ver_ptr_isp = isp_stats->ver_proj;
+ hor_num_dvs = host_stats->grid.height;
+ ver_num_dvs = host_stats->grid.width;
+ hor_ptr_dvs = host_stats->hor_proj;
+ ver_ptr_dvs = host_stats->ver_proj;
+
+ for (i = 0; i < IA_CSS_DVS_NUM_COEF_TYPES; i++) {
+ memcpy(hor_ptr_dvs, hor_ptr_isp, hor_num_dvs * sizeof(int32_t));
+ hor_ptr_isp += hor_num_isp;
+ hor_ptr_dvs += hor_num_dvs;
+
+ memcpy(ver_ptr_dvs, ver_ptr_isp, ver_num_dvs * sizeof(int32_t));
+ ver_ptr_isp += ver_num_isp;
+ ver_ptr_dvs += ver_num_dvs;
+ }
+
+ IA_CSS_LEAVE("void");
+}
+
+struct ia_css_isp_dvs_statistics *
+ia_css_isp_dvs_statistics_allocate(
+ const struct ia_css_dvs_grid_info *grid)
+{
+ struct ia_css_isp_dvs_statistics *me;
+ int hor_size, ver_size;
+
+ assert(grid);
+
+ IA_CSS_ENTER("grid=%p", grid);
+
+ if (!grid->enable)
+ return NULL;
+
+ me = kvcalloc(1, sizeof(*me), GFP_KERNEL);
+ if (!me)
+ goto err;
+
+ hor_size = CEIL_MUL(sizeof(int) * IA_CSS_DVS_NUM_COEF_TYPES *
+ grid->aligned_height,
+ HIVE_ISP_DDR_WORD_BYTES);
+ ver_size = CEIL_MUL(sizeof(int) * IA_CSS_DVS_NUM_COEF_TYPES *
+ grid->aligned_width,
+ HIVE_ISP_DDR_WORD_BYTES);
+
+ me->size = hor_size + ver_size;
+ me->data_ptr = hmm_alloc(me->size);
+ if (me->data_ptr == mmgr_NULL)
+ goto err;
+ me->hor_size = hor_size;
+ me->hor_proj = me->data_ptr;
+ me->ver_size = ver_size;
+ me->ver_proj = me->data_ptr + hor_size;
+
+ IA_CSS_LEAVE("return=%p", me);
+
+ return me;
+err:
+ ia_css_isp_dvs_statistics_free(me);
+
+ IA_CSS_LEAVE("return=%p", NULL);
+
+ return NULL;
+}
+
+struct ia_css_isp_dvs_statistics_map *
+ia_css_isp_dvs_statistics_map_allocate(
+ const struct ia_css_isp_dvs_statistics *isp_stats,
+ void *data_ptr)
+{
+ struct ia_css_isp_dvs_statistics_map *me;
+ /* Windows compiler does not like adding sizes to a void *
+ * so we use a local char * instead. */
+ char *base_ptr;
+
+ me = kvmalloc(sizeof(*me), GFP_KERNEL);
+ if (!me) {
+ IA_CSS_LOG("cannot allocate memory");
+ goto err;
+ }
+
+ me->data_ptr = data_ptr;
+ me->data_allocated = !data_ptr;
+
+ if (!me->data_ptr) {
+ me->data_ptr = kvmalloc(isp_stats->size, GFP_KERNEL);
+ if (!me->data_ptr) {
+ IA_CSS_LOG("cannot allocate memory");
+ goto err;
+ }
+ }
+ base_ptr = me->data_ptr;
+
+ me->size = isp_stats->size;
+ /* GCC complains when we assign a char * to a void *, so these
+ * casts are necessary unfortunately. */
+ me->hor_proj = (void *)base_ptr;
+ me->ver_proj = (void *)(base_ptr + isp_stats->hor_size);
+
+ return me;
+err:
+ kvfree(me);
+ return NULL;
+}
+
+void
+ia_css_isp_dvs_statistics_map_free(struct ia_css_isp_dvs_statistics_map *me)
+{
+ if (me) {
+ if (me->data_allocated)
+ kvfree(me->data_ptr);
+ kvfree(me);
+ }
+}
+
+void
+ia_css_isp_dvs_statistics_free(struct ia_css_isp_dvs_statistics *me)
+{
+ if (me) {
+ hmm_free(me->data_ptr);
+ kvfree(me);
+ }
+}
+
+void ia_css_sdis_horicoef_debug_dtrace(
+ const struct ia_css_dvs_coefficients *config, unsigned int level)
+{
+ (void)config;
+ (void)level;
+}
+
+void ia_css_sdis_vertcoef_debug_dtrace(
+ const struct ia_css_dvs_coefficients *config, unsigned int level)
+{
+ (void)config;
+ (void)level;
+}
+
+void ia_css_sdis_horiproj_debug_dtrace(
+ const struct ia_css_dvs_coefficients *config, unsigned int level)
+{
+ (void)config;
+ (void)level;
+}
+
+void ia_css_sdis_vertproj_debug_dtrace(
+ const struct ia_css_dvs_coefficients *config, unsigned int level)
+{
+ (void)config;
+ (void)level;
+}
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/sdis/sdis_1.0/ia_css_sdis.host.h b/drivers/staging/media/atomisp/pci/isp/kernels/sdis/sdis_1.0/ia_css_sdis.host.h
new file mode 100644
index 000000000000..c8566dafcbe5
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/sdis/sdis_1.0/ia_css_sdis.host.h
@@ -0,0 +1,93 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __IA_CSS_SDIS_HOST_H
+#define __IA_CSS_SDIS_HOST_H
+
+#include "ia_css_sdis_types.h"
+#include "ia_css_binary.h"
+#include "ia_css_stream.h"
+#include "sh_css_params.h"
+
+extern const struct ia_css_dvs_coefficients default_sdis_config;
+
+/* Opaque here, since size is binary dependent. */
+struct sh_css_isp_sdis_hori_coef_tbl;
+struct sh_css_isp_sdis_vert_coef_tbl;
+struct sh_css_isp_sdis_hori_proj_tbl;
+struct sh_css_isp_sdis_vert_proj_tbl;
+
+void ia_css_sdis_horicoef_vmem_encode(
+ struct sh_css_isp_sdis_hori_coef_tbl *to,
+ const struct ia_css_dvs_coefficients *from,
+ unsigned int size);
+
+void ia_css_sdis_vertcoef_vmem_encode(
+ struct sh_css_isp_sdis_vert_coef_tbl *to,
+ const struct ia_css_dvs_coefficients *from,
+ unsigned int size);
+
+void ia_css_sdis_horiproj_encode(
+ struct sh_css_isp_sdis_hori_proj_tbl *to,
+ const struct ia_css_dvs_coefficients *from,
+ unsigned int size);
+
+void ia_css_sdis_vertproj_encode(
+ struct sh_css_isp_sdis_vert_proj_tbl *to,
+ const struct ia_css_dvs_coefficients *from,
+ unsigned int size);
+
+void ia_css_get_isp_dis_coefficients(
+ struct ia_css_stream *stream,
+ short *horizontal_coefficients,
+ short *vertical_coefficients);
+
+int
+ia_css_get_dvs_statistics(
+ struct ia_css_dvs_statistics *host_stats,
+ const struct ia_css_isp_dvs_statistics *isp_stats);
+
+void
+ia_css_translate_dvs_statistics(
+ struct ia_css_dvs_statistics *host_stats,
+ const struct ia_css_isp_dvs_statistics_map *isp_stats);
+
+struct ia_css_isp_dvs_statistics *
+ia_css_isp_dvs_statistics_allocate(
+ const struct ia_css_dvs_grid_info *grid);
+
+void
+ia_css_isp_dvs_statistics_free(
+ struct ia_css_isp_dvs_statistics *me);
+
+size_t ia_css_sdis_hor_coef_tbl_bytes(const struct ia_css_binary *binary);
+size_t ia_css_sdis_ver_coef_tbl_bytes(const struct ia_css_binary *binary);
+
+void
+ia_css_sdis_init_info(
+ struct ia_css_sdis_info *dis,
+ unsigned int sc_3a_dis_width,
+ unsigned int sc_3a_dis_padded_width,
+ unsigned int sc_3a_dis_height,
+ unsigned int isp_pipe_version,
+ unsigned int enabled);
+
+void ia_css_sdis_clear_coefficients(
+ struct ia_css_dvs_coefficients *dvs_coefs);
+
+void ia_css_sdis_horicoef_debug_dtrace(
+ const struct ia_css_dvs_coefficients *config, unsigned int level);
+
+void ia_css_sdis_vertcoef_debug_dtrace(
+ const struct ia_css_dvs_coefficients *config, unsigned int level);
+
+void ia_css_sdis_horiproj_debug_dtrace(
+ const struct ia_css_dvs_coefficients *config, unsigned int level);
+
+void ia_css_sdis_vertproj_debug_dtrace(
+ const struct ia_css_dvs_coefficients *config, unsigned int level);
+
+#endif /* __IA_CSS_SDIS_HOST_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/sdis/sdis_1.0/ia_css_sdis_types.h b/drivers/staging/media/atomisp/pci/isp/kernels/sdis/sdis_1.0/ia_css_sdis_types.h
new file mode 100644
index 000000000000..02a91968c37a
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/sdis/sdis_1.0/ia_css_sdis_types.h
@@ -0,0 +1,47 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __IA_CSS_SDIS_TYPES_H
+#define __IA_CSS_SDIS_TYPES_H
+
+/* @file
+* CSS-API header file for DVS statistics parameters.
+*/
+
+/* Number of DVS coefficient types */
+#define IA_CSS_DVS_NUM_COEF_TYPES 6
+
+#ifndef PIPE_GENERATION
+#include "isp/kernels/sdis/common/ia_css_sdis_common_types.h"
+#endif
+
+/* DVS 1.0 Coefficients.
+ * This structure describes the coefficients that are needed for the dvs statistics.
+ */
+
+struct ia_css_dvs_coefficients {
+ struct ia_css_dvs_grid_info
+ grid;/** grid info contains the dimensions of the dvs grid */
+ s16 *hor_coefs; /** the pointer to int16_t[grid.num_hor_coefs * IA_CSS_DVS_NUM_COEF_TYPES]
+ containing the horizontal coefficients */
+ s16 *ver_coefs; /** the pointer to int16_t[grid.num_ver_coefs * IA_CSS_DVS_NUM_COEF_TYPES]
+ containing the vertical coefficients */
+};
+
+/* DVS 1.0 Statistics.
+ * This structure describes the statistics that are generated using the provided coefficients.
+ */
+
+struct ia_css_dvs_statistics {
+ struct ia_css_dvs_grid_info
+ grid;/** grid info contains the dimensions of the dvs grid */
+ s32 *hor_proj; /** the pointer to int16_t[grid.height * IA_CSS_DVS_NUM_COEF_TYPES]
+ containing the horizontal projections */
+ s32 *ver_proj; /** the pointer to int16_t[grid.width * IA_CSS_DVS_NUM_COEF_TYPES]
+ containing the vertical projections */
+};
+
+#endif /* __IA_CSS_SDIS_TYPES_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/sdis/sdis_2/ia_css_sdis2.host.c b/drivers/staging/media/atomisp/pci/isp/kernels/sdis/sdis_2/ia_css_sdis2.host.c
new file mode 100644
index 000000000000..027eae0ca69e
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/sdis/sdis_2/ia_css_sdis2.host.c
@@ -0,0 +1,340 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#include "hmm.h"
+
+#include <assert_support.h>
+#include "ia_css_debug.h"
+#include "ia_css_sdis2.host.h"
+
+const struct ia_css_dvs2_coefficients default_sdis2_config = {
+ .grid = { 0, 0, 0, 0, 0, 0, 0, 0 },
+ .hor_coefs = { NULL, NULL, NULL, NULL },
+ .ver_coefs = { NULL, NULL, NULL, NULL },
+};
+
+static void
+fill_row(short *private, const short *public, unsigned int width,
+ unsigned int padding)
+{
+ memcpy(private, public, width * sizeof(short));
+ memset(&private[width], 0, padding * sizeof(short));
+}
+
+void ia_css_sdis2_horicoef_vmem_encode(
+ struct sh_css_isp_sdis_hori_coef_tbl *to,
+ const struct ia_css_dvs2_coefficients *from,
+ unsigned int size)
+{
+ unsigned int aligned_width = from->grid.aligned_width *
+ from->grid.bqs_per_grid_cell;
+ unsigned int width = from->grid.num_hor_coefs;
+ int padding = aligned_width - width;
+ unsigned int stride = size / IA_CSS_DVS2_NUM_COEF_TYPES / sizeof(short);
+ unsigned int total_bytes = aligned_width * IA_CSS_DVS2_NUM_COEF_TYPES *
+ sizeof(short);
+ short *private = (short *)to;
+
+ /* Copy the table, add padding */
+ assert(padding >= 0);
+ assert(total_bytes <= size);
+ assert(size % (IA_CSS_DVS2_NUM_COEF_TYPES * ISP_VEC_NELEMS * sizeof(
+ short)) == 0);
+ fill_row(&private[0 * stride], from->hor_coefs.odd_real, width, padding);
+ fill_row(&private[1 * stride], from->hor_coefs.odd_imag, width, padding);
+ fill_row(&private[2 * stride], from->hor_coefs.even_real, width, padding);
+ fill_row(&private[3 * stride], from->hor_coefs.even_imag, width, padding);
+}
+
+void ia_css_sdis2_vertcoef_vmem_encode(
+ struct sh_css_isp_sdis_vert_coef_tbl *to,
+ const struct ia_css_dvs2_coefficients *from,
+ unsigned int size)
+{
+ unsigned int aligned_height = from->grid.aligned_height *
+ from->grid.bqs_per_grid_cell;
+ unsigned int height = from->grid.num_ver_coefs;
+ int padding = aligned_height - height;
+ unsigned int stride = size / IA_CSS_DVS2_NUM_COEF_TYPES / sizeof(short);
+ unsigned int total_bytes = aligned_height * IA_CSS_DVS2_NUM_COEF_TYPES *
+ sizeof(short);
+ short *private = (short *)to;
+
+ /* Copy the table, add padding */
+ assert(padding >= 0);
+ assert(total_bytes <= size);
+ assert(size % (IA_CSS_DVS2_NUM_COEF_TYPES * ISP_VEC_NELEMS * sizeof(
+ short)) == 0);
+ fill_row(&private[0 * stride], from->ver_coefs.odd_real, height, padding);
+ fill_row(&private[1 * stride], from->ver_coefs.odd_imag, height, padding);
+ fill_row(&private[2 * stride], from->ver_coefs.even_real, height, padding);
+ fill_row(&private[3 * stride], from->ver_coefs.even_imag, height, padding);
+}
+
+void ia_css_sdis2_horiproj_encode(
+ struct sh_css_isp_sdis_hori_proj_tbl *to,
+ const struct ia_css_dvs2_coefficients *from,
+ unsigned int size)
+{
+ (void)to;
+ (void)from;
+ (void)size;
+}
+
+void ia_css_sdis2_vertproj_encode(
+ struct sh_css_isp_sdis_vert_proj_tbl *to,
+ const struct ia_css_dvs2_coefficients *from,
+ unsigned int size)
+{
+ (void)to;
+ (void)from;
+ (void)size;
+}
+
+void ia_css_get_isp_dvs2_coefficients(
+ struct ia_css_stream *stream,
+ short *hor_coefs_odd_real,
+ short *hor_coefs_odd_imag,
+ short *hor_coefs_even_real,
+ short *hor_coefs_even_imag,
+ short *ver_coefs_odd_real,
+ short *ver_coefs_odd_imag,
+ short *ver_coefs_even_real,
+ short *ver_coefs_even_imag)
+{
+ struct ia_css_isp_parameters *params;
+ unsigned int hor_num_3a, ver_num_3a;
+ struct ia_css_binary *dvs_binary;
+
+ IA_CSS_ENTER("void");
+
+ assert(stream);
+ assert(hor_coefs_odd_real);
+ assert(hor_coefs_odd_imag);
+ assert(hor_coefs_even_real);
+ assert(hor_coefs_even_imag);
+ assert(ver_coefs_odd_real);
+ assert(ver_coefs_odd_imag);
+ assert(ver_coefs_even_real);
+ assert(ver_coefs_even_imag);
+
+ params = stream->isp_params_configs;
+
+ /* Only video pipe supports DVS */
+ dvs_binary = ia_css_stream_get_dvs_binary(stream);
+ if (!dvs_binary)
+ return;
+
+ hor_num_3a = dvs_binary->dis.coef.dim.width;
+ ver_num_3a = dvs_binary->dis.coef.dim.height;
+
+ memcpy(hor_coefs_odd_real, params->dvs2_coefs.hor_coefs.odd_real,
+ hor_num_3a * sizeof(short));
+ memcpy(hor_coefs_odd_imag, params->dvs2_coefs.hor_coefs.odd_imag,
+ hor_num_3a * sizeof(short));
+ memcpy(hor_coefs_even_real, params->dvs2_coefs.hor_coefs.even_real,
+ hor_num_3a * sizeof(short));
+ memcpy(hor_coefs_even_imag, params->dvs2_coefs.hor_coefs.even_imag,
+ hor_num_3a * sizeof(short));
+ memcpy(ver_coefs_odd_real, params->dvs2_coefs.ver_coefs.odd_real,
+ ver_num_3a * sizeof(short));
+ memcpy(ver_coefs_odd_imag, params->dvs2_coefs.ver_coefs.odd_imag,
+ ver_num_3a * sizeof(short));
+ memcpy(ver_coefs_even_real, params->dvs2_coefs.ver_coefs.even_real,
+ ver_num_3a * sizeof(short));
+ memcpy(ver_coefs_even_imag, params->dvs2_coefs.ver_coefs.even_imag,
+ ver_num_3a * sizeof(short));
+
+ IA_CSS_LEAVE("void");
+}
+
+void ia_css_sdis2_clear_coefficients(
+ struct ia_css_dvs2_coefficients *dvs2_coefs)
+{
+ dvs2_coefs->hor_coefs.odd_real = NULL;
+ dvs2_coefs->hor_coefs.odd_imag = NULL;
+ dvs2_coefs->hor_coefs.even_real = NULL;
+ dvs2_coefs->hor_coefs.even_imag = NULL;
+ dvs2_coefs->ver_coefs.odd_real = NULL;
+ dvs2_coefs->ver_coefs.odd_imag = NULL;
+ dvs2_coefs->ver_coefs.even_real = NULL;
+ dvs2_coefs->ver_coefs.even_imag = NULL;
+}
+
+int
+ia_css_get_dvs2_statistics(
+ struct ia_css_dvs2_statistics *host_stats,
+ const struct ia_css_isp_dvs_statistics *isp_stats) {
+ struct ia_css_isp_dvs_statistics_map *map;
+ int ret = 0;
+
+ IA_CSS_ENTER("host_stats=%p, isp_stats=%p", host_stats, isp_stats);
+
+ assert(host_stats);
+ assert(isp_stats);
+
+ map = ia_css_isp_dvs_statistics_map_allocate(isp_stats, NULL);
+ if (map)
+ {
+ hmm_load(isp_stats->data_ptr, map->data_ptr, isp_stats->size);
+ ia_css_translate_dvs2_statistics(host_stats, map);
+ ia_css_isp_dvs_statistics_map_free(map);
+ } else
+ {
+ IA_CSS_ERROR("out of memory");
+ ret = -ENOMEM;
+ }
+
+ IA_CSS_LEAVE_ERR(ret);
+ return ret;
+}
+
+void
+ia_css_translate_dvs2_statistics(
+ struct ia_css_dvs2_statistics *host_stats,
+ const struct ia_css_isp_dvs_statistics_map *isp_stats)
+{
+ unsigned int size_bytes, table_width, table_size, height;
+ unsigned int src_offset = 0, dst_offset = 0;
+ s32 *htemp_ptr, *vtemp_ptr;
+
+ assert(host_stats);
+ assert(host_stats->hor_prod.odd_real);
+ assert(host_stats->hor_prod.odd_imag);
+ assert(host_stats->hor_prod.even_real);
+ assert(host_stats->hor_prod.even_imag);
+ assert(host_stats->ver_prod.odd_real);
+ assert(host_stats->ver_prod.odd_imag);
+ assert(host_stats->ver_prod.even_real);
+ assert(host_stats->ver_prod.even_imag);
+ assert(isp_stats);
+ assert(isp_stats->hor_proj);
+ assert(isp_stats->ver_proj);
+
+ IA_CSS_ENTER("hor_coefs.odd_real=%p, hor_coefs.odd_imag=%p, hor_coefs.even_real=%p, hor_coefs.even_imag=%p, ver_coefs.odd_real=%p, ver_coefs.odd_imag=%p, ver_coefs.even_real=%p, ver_coefs.even_imag=%p, haddr=%p, vaddr=%p",
+ host_stats->hor_prod.odd_real, host_stats->hor_prod.odd_imag,
+ host_stats->hor_prod.even_real, host_stats->hor_prod.even_imag,
+ host_stats->ver_prod.odd_real, host_stats->ver_prod.odd_imag,
+ host_stats->ver_prod.even_real, host_stats->ver_prod.even_imag,
+ isp_stats->hor_proj, isp_stats->ver_proj);
+
+ /* Host side: reflecting the true width in bytes */
+ size_bytes = host_stats->grid.aligned_width * sizeof(*htemp_ptr);
+
+ /* DDR side: need to be aligned to the system bus width */
+ /* statistics table width in terms of 32-bit words*/
+ table_width = CEIL_MUL(size_bytes,
+ HIVE_ISP_DDR_WORD_BYTES) / sizeof(*htemp_ptr);
+ table_size = table_width * host_stats->grid.aligned_height;
+
+ htemp_ptr = isp_stats->hor_proj; /* horizontal stats */
+ vtemp_ptr = isp_stats->ver_proj; /* vertical stats */
+ for (height = 0; height < host_stats->grid.aligned_height; height++) {
+ /* hor stats */
+ memcpy(host_stats->hor_prod.odd_real + dst_offset,
+ &htemp_ptr[0 * table_size + src_offset], size_bytes);
+ memcpy(host_stats->hor_prod.odd_imag + dst_offset,
+ &htemp_ptr[1 * table_size + src_offset], size_bytes);
+ memcpy(host_stats->hor_prod.even_real + dst_offset,
+ &htemp_ptr[2 * table_size + src_offset], size_bytes);
+ memcpy(host_stats->hor_prod.even_imag + dst_offset,
+ &htemp_ptr[3 * table_size + src_offset], size_bytes);
+
+ /* ver stats */
+ memcpy(host_stats->ver_prod.odd_real + dst_offset,
+ &vtemp_ptr[0 * table_size + src_offset], size_bytes);
+ memcpy(host_stats->ver_prod.odd_imag + dst_offset,
+ &vtemp_ptr[1 * table_size + src_offset], size_bytes);
+ memcpy(host_stats->ver_prod.even_real + dst_offset,
+ &vtemp_ptr[2 * table_size + src_offset], size_bytes);
+ memcpy(host_stats->ver_prod.even_imag + dst_offset,
+ &vtemp_ptr[3 * table_size + src_offset], size_bytes);
+
+ src_offset += table_width; /* aligned table width */
+ dst_offset += host_stats->grid.aligned_width;
+ }
+
+ IA_CSS_LEAVE("void");
+}
+
+struct ia_css_isp_dvs_statistics *
+ia_css_isp_dvs2_statistics_allocate(
+ const struct ia_css_dvs_grid_info *grid)
+{
+ struct ia_css_isp_dvs_statistics *me;
+ int size;
+
+ assert(grid);
+
+ IA_CSS_ENTER("grid=%p", grid);
+
+ if (!grid->enable)
+ return NULL;
+
+ me = kvcalloc(1, sizeof(*me), GFP_KERNEL);
+ if (!me)
+ goto err;
+
+ /* on ISP 2 SDIS DMA model, every row of projection table width must be
+ aligned to HIVE_ISP_DDR_WORD_BYTES
+ */
+ size = CEIL_MUL(sizeof(int) * grid->aligned_width, HIVE_ISP_DDR_WORD_BYTES)
+ * grid->aligned_height * IA_CSS_DVS2_NUM_COEF_TYPES;
+
+ me->size = 2 * size;
+ me->data_ptr = hmm_alloc(me->size);
+ if (me->data_ptr == mmgr_NULL)
+ goto err;
+ me->hor_proj = me->data_ptr;
+ me->hor_size = size;
+ me->ver_proj = me->data_ptr + size;
+ me->ver_size = size;
+
+ IA_CSS_LEAVE("return=%p", me);
+ return me;
+err:
+ ia_css_isp_dvs2_statistics_free(me);
+ IA_CSS_LEAVE("return=%p", NULL);
+
+ return NULL;
+}
+
+void
+ia_css_isp_dvs2_statistics_free(struct ia_css_isp_dvs_statistics *me)
+{
+ if (me) {
+ hmm_free(me->data_ptr);
+ kvfree(me);
+ }
+}
+
+void ia_css_sdis2_horicoef_debug_dtrace(
+ const struct ia_css_dvs2_coefficients *config, unsigned int level)
+{
+ (void)config;
+ (void)level;
+}
+
+void ia_css_sdis2_vertcoef_debug_dtrace(
+ const struct ia_css_dvs2_coefficients *config, unsigned int level)
+{
+ (void)config;
+ (void)level;
+}
+
+void ia_css_sdis2_horiproj_debug_dtrace(
+ const struct ia_css_dvs2_coefficients *config, unsigned int level)
+{
+ (void)config;
+ (void)level;
+}
+
+void ia_css_sdis2_vertproj_debug_dtrace(
+ const struct ia_css_dvs2_coefficients *config, unsigned int level)
+{
+ (void)config;
+ (void)level;
+}
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/sdis/sdis_2/ia_css_sdis2.host.h b/drivers/staging/media/atomisp/pci/isp/kernels/sdis/sdis_2/ia_css_sdis2.host.h
new file mode 100644
index 000000000000..e8ec4066c1cb
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/sdis/sdis_2/ia_css_sdis2.host.h
@@ -0,0 +1,87 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __IA_CSS_SDIS2_HOST_H
+#define __IA_CSS_SDIS2_HOST_H
+
+#include "ia_css_sdis2_types.h"
+#include "ia_css_binary.h"
+#include "ia_css_stream.h"
+#include "sh_css_params.h"
+
+extern const struct ia_css_dvs2_coefficients default_sdis2_config;
+
+/* Opaque here, since size is binary dependent. */
+struct sh_css_isp_sdis_hori_coef_tbl;
+struct sh_css_isp_sdis_vert_coef_tbl;
+struct sh_css_isp_sdis_hori_proj_tbl;
+struct sh_css_isp_sdis_vert_proj_tbl;
+
+void ia_css_sdis2_horicoef_vmem_encode(
+ struct sh_css_isp_sdis_hori_coef_tbl *to,
+ const struct ia_css_dvs2_coefficients *from,
+ unsigned int size);
+
+void ia_css_sdis2_vertcoef_vmem_encode(
+ struct sh_css_isp_sdis_vert_coef_tbl *to,
+ const struct ia_css_dvs2_coefficients *from,
+ unsigned int size);
+
+void ia_css_sdis2_horiproj_encode(
+ struct sh_css_isp_sdis_hori_proj_tbl *to,
+ const struct ia_css_dvs2_coefficients *from,
+ unsigned int size);
+
+void ia_css_sdis2_vertproj_encode(
+ struct sh_css_isp_sdis_vert_proj_tbl *to,
+ const struct ia_css_dvs2_coefficients *from,
+ unsigned int size);
+
+void ia_css_get_isp_dvs2_coefficients(
+ struct ia_css_stream *stream,
+ short *hor_coefs_odd_real,
+ short *hor_coefs_odd_imag,
+ short *hor_coefs_even_real,
+ short *hor_coefs_even_imag,
+ short *ver_coefs_odd_real,
+ short *ver_coefs_odd_imag,
+ short *ver_coefs_even_real,
+ short *ver_coefs_even_imag);
+
+void ia_css_sdis2_clear_coefficients(
+ struct ia_css_dvs2_coefficients *dvs2_coefs);
+
+int
+ia_css_get_dvs2_statistics(
+ struct ia_css_dvs2_statistics *host_stats,
+ const struct ia_css_isp_dvs_statistics *isp_stats);
+
+void
+ia_css_translate_dvs2_statistics(
+ struct ia_css_dvs2_statistics *host_stats,
+ const struct ia_css_isp_dvs_statistics_map *isp_stats);
+
+struct ia_css_isp_dvs_statistics *
+ia_css_isp_dvs2_statistics_allocate(
+ const struct ia_css_dvs_grid_info *grid);
+
+void
+ia_css_isp_dvs2_statistics_free(
+ struct ia_css_isp_dvs_statistics *me);
+
+void ia_css_sdis2_horicoef_debug_dtrace(
+ const struct ia_css_dvs2_coefficients *config, unsigned int level);
+
+void ia_css_sdis2_vertcoef_debug_dtrace(
+ const struct ia_css_dvs2_coefficients *config, unsigned int level);
+
+void ia_css_sdis2_horiproj_debug_dtrace(
+ const struct ia_css_dvs2_coefficients *config, unsigned int level);
+
+void ia_css_sdis2_vertproj_debug_dtrace(
+ const struct ia_css_dvs2_coefficients *config, unsigned int level);
+
+#endif /* __IA_CSS_SDIS2_HOST_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/sdis/sdis_2/ia_css_sdis2_types.h b/drivers/staging/media/atomisp/pci/isp/kernels/sdis/sdis_2/ia_css_sdis2_types.h
new file mode 100644
index 000000000000..2bed08435755
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/sdis/sdis_2/ia_css_sdis2_types.h
@@ -0,0 +1,67 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __IA_CSS_SDIS2_TYPES_H
+#define __IA_CSS_SDIS2_TYPES_H
+
+/* @file
+* CSS-API header file for DVS statistics parameters.
+*/
+
+/* Number of DVS coefficient types */
+#define IA_CSS_DVS2_NUM_COEF_TYPES 4
+
+#ifndef PIPE_GENERATION
+#include "isp/kernels/sdis/common/ia_css_sdis_common_types.h"
+#endif
+
+/* DVS 2.0 Coefficient types. This structure contains 4 pointers to
+ * arrays that contain the coefficients for each type.
+ */
+struct ia_css_dvs2_coef_types {
+ s16 *odd_real; /** real part of the odd coefficients*/
+ s16 *odd_imag; /** imaginary part of the odd coefficients*/
+ s16 *even_real;/** real part of the even coefficients*/
+ s16 *even_imag;/** imaginary part of the even coefficients*/
+};
+
+/* DVS 2.0 Coefficients. This structure describes the coefficients that are needed for the dvs statistics.
+ * e.g. hor_coefs.odd_real is the pointer to int16_t[grid.num_hor_coefs] containing the horizontal odd real
+ * coefficients.
+ */
+struct ia_css_dvs2_coefficients {
+ struct ia_css_dvs_grid_info
+ grid; /** grid info contains the dimensions of the dvs grid */
+ struct ia_css_dvs2_coef_types
+ hor_coefs; /** struct with pointers that contain the horizontal coefficients */
+ struct ia_css_dvs2_coef_types
+ ver_coefs; /** struct with pointers that contain the vertical coefficients */
+};
+
+/* DVS 2.0 Statistic types. This structure contains 4 pointers to
+ * arrays that contain the statistics for each type.
+ */
+struct ia_css_dvs2_stat_types {
+ s32 *odd_real; /** real part of the odd statistics*/
+ s32 *odd_imag; /** imaginary part of the odd statistics*/
+ s32 *even_real;/** real part of the even statistics*/
+ s32 *even_imag;/** imaginary part of the even statistics*/
+};
+
+/* DVS 2.0 Statistics. This structure describes the statistics that are generated using the provided coefficients.
+ * e.g. hor_prod.odd_real is the pointer to int16_t[grid.aligned_height][grid.aligned_width] containing
+ * the horizontal odd real statistics. Valid statistics data area is int16_t[0..grid.height-1][0..grid.width-1]
+ */
+struct ia_css_dvs2_statistics {
+ struct ia_css_dvs_grid_info
+ grid; /** grid info contains the dimensions of the dvs grid */
+ struct ia_css_dvs2_stat_types
+ hor_prod; /** struct with pointers that contain the horizontal statistics */
+ struct ia_css_dvs2_stat_types
+ ver_prod; /** struct with pointers that contain the vertical statistics */
+};
+
+#endif /* __IA_CSS_SDIS2_TYPES_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/tdf/tdf_1.0/ia_css_tdf.host.c b/drivers/staging/media/atomisp/pci/isp/kernels/tdf/tdf_1.0/ia_css_tdf.host.c
new file mode 100644
index 000000000000..ce93a07a7961
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/tdf/tdf_1.0/ia_css_tdf.host.c
@@ -0,0 +1,66 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#include "ia_css_debug.h"
+#include "ia_css_tdf.host.h"
+
+static const s16 g_pyramid[8][8] = {
+ {128, 384, 640, 896, 896, 640, 384, 128},
+ {384, 1152, 1920, 2688, 2688, 1920, 1152, 384},
+ {640, 1920, 3200, 4480, 4480, 3200, 1920, 640},
+ {896, 2688, 4480, 6272, 6272, 4480, 2688, 896},
+ {896, 2688, 4480, 6272, 6272, 4480, 2688, 896},
+ {640, 1920, 3200, 4480, 4480, 3200, 1920, 640},
+ {384, 1152, 1920, 2688, 2688, 1920, 1152, 384},
+ {128, 384, 640, 896, 896, 640, 384, 128}
+};
+
+void
+ia_css_tdf_vmem_encode(
+ struct ia_css_isp_tdf_vmem_params *to,
+ const struct ia_css_tdf_config *from,
+ size_t size)
+{
+ unsigned int i;
+ (void)size;
+
+ for (i = 0; i < ISP_VEC_NELEMS; i++) {
+ to->pyramid[0][i] = g_pyramid[i / 8][i % 8];
+ to->threshold_flat[0][i] = from->thres_flat_table[i];
+ to->threshold_detail[0][i] = from->thres_detail_table[i];
+ }
+}
+
+void
+ia_css_tdf_encode(
+ struct ia_css_isp_tdf_dmem_params *to,
+ const struct ia_css_tdf_config *from,
+ size_t size)
+{
+ (void)size;
+ to->Epsilon_0 = from->epsilon_0;
+ to->Epsilon_1 = from->epsilon_1;
+ to->EpsScaleText = from->eps_scale_text;
+ to->EpsScaleEdge = from->eps_scale_edge;
+ to->Sepa_flat = from->sepa_flat;
+ to->Sepa_Edge = from->sepa_edge;
+ to->Blend_Flat = from->blend_flat;
+ to->Blend_Text = from->blend_text;
+ to->Blend_Edge = from->blend_edge;
+ to->Shading_Gain = from->shading_gain;
+ to->Shading_baseGain = from->shading_base_gain;
+ to->LocalY_Gain = from->local_y_gain;
+ to->LocalY_baseGain = from->local_y_base_gain;
+}
+
+void
+ia_css_tdf_debug_dtrace(
+ const struct ia_css_tdf_config *config,
+ unsigned int level)
+{
+ (void)config;
+ (void)level;
+}
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/tdf/tdf_1.0/ia_css_tdf.host.h b/drivers/staging/media/atomisp/pci/isp/kernels/tdf/tdf_1.0/ia_css_tdf.host.h
new file mode 100644
index 000000000000..22ec8745a083
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/tdf/tdf_1.0/ia_css_tdf.host.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __IA_CSS_TDF_HOST_H
+#define __IA_CSS_TDF_HOST_H
+
+#include "ia_css_tdf_types.h"
+#include "ia_css_tdf_param.h"
+
+void
+ia_css_tdf_vmem_encode(
+ struct ia_css_isp_tdf_vmem_params *to,
+ const struct ia_css_tdf_config *from,
+ size_t size);
+
+void
+ia_css_tdf_encode(
+ struct ia_css_isp_tdf_dmem_params *to,
+ const struct ia_css_tdf_config *from,
+ size_t size);
+
+void
+ia_css_tdf_debug_dtrace(
+ const struct ia_css_tdf_config *config, unsigned int level)
+;
+
+#endif /* __IA_CSS_TDF_HOST_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/tdf/tdf_1.0/ia_css_tdf_param.h b/drivers/staging/media/atomisp/pci/isp/kernels/tdf/tdf_1.0/ia_css_tdf_param.h
new file mode 100644
index 000000000000..512063894ee5
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/tdf/tdf_1.0/ia_css_tdf_param.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __IA_CSS_TDF_PARAM_H
+#define __IA_CSS_TDF_PARAM_H
+
+#include "type_support.h"
+#include "vmem.h" /* needed for VMEM_ARRAY */
+
+struct ia_css_isp_tdf_vmem_params {
+ VMEM_ARRAY(pyramid, ISP_VEC_NELEMS);
+ VMEM_ARRAY(threshold_flat, ISP_VEC_NELEMS);
+ VMEM_ARRAY(threshold_detail, ISP_VEC_NELEMS);
+};
+
+struct ia_css_isp_tdf_dmem_params {
+ s32 Epsilon_0;
+ s32 Epsilon_1;
+ s32 EpsScaleText;
+ s32 EpsScaleEdge;
+ s32 Sepa_flat;
+ s32 Sepa_Edge;
+ s32 Blend_Flat;
+ s32 Blend_Text;
+ s32 Blend_Edge;
+ s32 Shading_Gain;
+ s32 Shading_baseGain;
+ s32 LocalY_Gain;
+ s32 LocalY_baseGain;
+};
+
+#endif /* __IA_CSS_TDF_PARAM_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/tdf/tdf_1.0/ia_css_tdf_types.h b/drivers/staging/media/atomisp/pci/isp/kernels/tdf/tdf_1.0/ia_css_tdf_types.h
new file mode 100644
index 000000000000..a462365783af
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/tdf/tdf_1.0/ia_css_tdf_types.h
@@ -0,0 +1,44 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __IA_CSS_TDF_TYPES_H
+#define __IA_CSS_TDF_TYPES_H
+
+/* @file
+* CSS-API header file for Transform Domain Filter parameters.
+*/
+
+#include "type_support.h"
+
+/* Transform Domain Filter configuration
+ *
+ * \brief TDF public parameters.
+ * \details Struct with all parameters for the TDF kernel that can be set
+ * from the CSS API.
+ *
+ * ISP2.6.1: TDF is used.
+ */
+struct ia_css_tdf_config {
+ s32 thres_flat_table[64]; /** Final optimized strength table of NR for flat region. */
+ s32 thres_detail_table[64]; /** Final optimized strength table of NR for detail region. */
+ s32 epsilon_0; /** Coefficient to control variance for dark area (for flat region). */
+ s32 epsilon_1; /** Coefficient to control variance for bright area (for flat region). */
+ s32 eps_scale_text; /** Epsilon scaling coefficient for texture region. */
+ s32 eps_scale_edge; /** Epsilon scaling coefficient for edge region. */
+ s32 sepa_flat; /** Threshold to judge flat (edge < m_Flat_thre). */
+ s32 sepa_edge; /** Threshold to judge edge (edge > m_Edge_thre). */
+ s32 blend_flat; /** Blending ratio at flat region. */
+ s32 blend_text; /** Blending ratio at texture region. */
+ s32 blend_edge; /** Blending ratio at edge region. */
+ s32 shading_gain; /** Gain of Shading control. */
+ s32 shading_base_gain; /** Base Gain of Shading control. */
+ s32 local_y_gain; /** Gain of local luminance control. */
+ s32 local_y_base_gain; /** Base gain of local luminance control. */
+ s32 rad_x_origin; /** Initial x coord. for radius computation. */
+ s32 rad_y_origin; /** Initial y coord. for radius computation. */
+};
+
+#endif /* __IA_CSS_TDF_TYPES_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/tnr/tnr3/ia_css_tnr3_types.h b/drivers/staging/media/atomisp/pci/isp/kernels/tnr/tnr3/ia_css_tnr3_types.h
new file mode 100644
index 000000000000..42b760ffac67
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/tnr/tnr3/ia_css_tnr3_types.h
@@ -0,0 +1,56 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/**
+Support for Intel Camera Imaging ISP subsystem.
+Copyright (c) 2010 - 2015, Intel Corporation.
+
+*/
+
+#ifndef _IA_CSS_TNR3_TYPES_H
+#define _IA_CSS_TNR3_TYPES_H
+
+/* @file
+* CSS-API header file for Temporal Noise Reduction v3 (TNR3) kernel
+*/
+
+/**
+ * \brief Number of piecewise linear segments.
+ * \details The parameters to TNR3 are specified as a piecewise linear segment.
+ * The number of such segments is fixed at 3.
+ */
+#define TNR3_NUM_SEGMENTS 3
+
+/* Temporal Noise Reduction v3 (TNR3) configuration.
+ * The parameter to this kernel is fourfold
+ * 1. Three piecewise linear graphs (one for each plane) with three segments
+ * each. Each line graph has Luma values on the x axis and sigma values for
+ * each plane on the y axis. The three linear segments may have a different
+ * slope and the point of Luma value which where the slope may change is called
+ * a "Knee" point. As there are three such segments, four points need to be
+ * specified each on the Luma axis and the per plane Sigma axis. On the Luma
+ * axis two points are fixed (namely 0 and maximum luma value - depending on
+ * ISP bit depth). The other two points are the points where the slope may
+ * change its value. These two points are called knee points. The four points on
+ * the per plane sigma axis are also specified at the interface.
+ * 2. One rounding adjustment parameter for each plane
+ * 3. One maximum feedback threshold value for each plane
+ * 4. Selection of the reference frame buffer to be used for noise reduction.
+ */
+struct ia_css_tnr3_kernel_config {
+ unsigned int maxfb_y; /** Maximum Feedback Gain for Y */
+ unsigned int maxfb_u; /** Maximum Feedback Gain for U */
+ unsigned int maxfb_v; /** Maximum Feedback Gain for V */
+ unsigned int round_adj_y; /** Rounding Adjust for Y */
+ unsigned int round_adj_u; /** Rounding Adjust for U */
+ unsigned int round_adj_v; /** Rounding Adjust for V */
+ unsigned int knee_y[TNR3_NUM_SEGMENTS - 1]; /** Knee points */
+ unsigned int sigma_y[TNR3_NUM_SEGMENTS +
+ 1]; /** Standard deviation for Y at points Y0, Y1, Y2, Y3 */
+ unsigned int sigma_u[TNR3_NUM_SEGMENTS +
+ 1]; /** Standard deviation for U at points U0, U1, U2, U3 */
+ unsigned int sigma_v[TNR3_NUM_SEGMENTS +
+ 1]; /** Standard deviation for V at points V0, V1, V2, V3 */
+ unsigned int
+ ref_buf_select; /** Selection of the reference buffer */
+};
+
+#endif
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/tnr/tnr_1.0/ia_css_tnr.host.c b/drivers/staging/media/atomisp/pci/isp/kernels/tnr/tnr_1.0/ia_css_tnr.host.c
new file mode 100644
index 000000000000..31b96d93c3cc
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/tnr/tnr_1.0/ia_css_tnr.host.c
@@ -0,0 +1,113 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#include "ia_css_types.h"
+#include "ia_css_frame.h"
+#include "sh_css_defs.h"
+#include "ia_css_debug.h"
+#include "sh_css_frac.h"
+#include "assert_support.h"
+#define IA_CSS_INCLUDE_CONFIGURATIONS
+#include "ia_css_isp_configs.h"
+#include "isp.h"
+
+#include "ia_css_tnr.host.h"
+const struct ia_css_tnr_config default_tnr_config = {
+ 32768,
+ 32,
+ 32,
+};
+
+void
+ia_css_tnr_encode(
+ struct sh_css_isp_tnr_params *to,
+ const struct ia_css_tnr_config *from,
+ unsigned int size)
+{
+ (void)size;
+ to->coef =
+ uDIGIT_FITTING(from->gain, 16, SH_CSS_TNR_COEF_SHIFT);
+ to->threshold_Y =
+ uDIGIT_FITTING(from->threshold_y, 16, SH_CSS_ISP_YUV_BITS);
+ to->threshold_C =
+ uDIGIT_FITTING(from->threshold_uv, 16, SH_CSS_ISP_YUV_BITS);
+}
+
+void
+ia_css_tnr_dump(
+ const struct sh_css_isp_tnr_params *tnr,
+ unsigned int level)
+{
+ if (!tnr) return;
+ ia_css_debug_dtrace(level, "Temporal Noise Reduction:\n");
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "tnr_coef", tnr->coef);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "tnr_threshold_Y", tnr->threshold_Y);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "tnr_threshold_C", tnr->threshold_C);
+}
+
+void
+ia_css_tnr_debug_dtrace(
+ const struct ia_css_tnr_config *config,
+ unsigned int level)
+{
+ ia_css_debug_dtrace(level,
+ "config.gain=%d, config.threshold_y=%d, config.threshold_uv=%d\n",
+ config->gain,
+ config->threshold_y, config->threshold_uv);
+}
+
+int ia_css_tnr_config(struct sh_css_isp_tnr_isp_config *to,
+ const struct ia_css_tnr_configuration *from,
+ unsigned int size)
+{
+ unsigned int elems_a = ISP_VEC_NELEMS;
+ unsigned int i;
+ int ret;
+
+ ret = ia_css_dma_configure_from_info(&to->port_b, &from->tnr_frames[0]->frame_info);
+ if (ret)
+ return ret;
+ to->width_a_over_b = elems_a / to->port_b.elems;
+ to->frame_height = from->tnr_frames[0]->frame_info.res.height;
+ for (i = 0; i < NUM_VIDEO_TNR_FRAMES; i++) {
+ to->tnr_frame_addr[i] = from->tnr_frames[i]->data +
+ from->tnr_frames[i]->planes.yuyv.offset;
+ }
+
+ /* Assume divisiblity here, may need to generalize to fixed point. */
+ if (elems_a % to->port_b.elems != 0)
+ return -EINVAL;
+
+ return 0;
+}
+
+int ia_css_tnr_configure(const struct ia_css_binary *binary,
+ const struct ia_css_frame * const *frames)
+{
+ struct ia_css_tnr_configuration config;
+ unsigned int i;
+
+ for (i = 0; i < NUM_VIDEO_TNR_FRAMES; i++)
+ config.tnr_frames[i] = frames[i];
+
+ return ia_css_configure_tnr(binary, &config);
+}
+
+void
+ia_css_init_tnr_state(
+ struct sh_css_isp_tnr_dmem_state *state,
+ size_t size)
+{
+ (void)size;
+
+ assert(NUM_VIDEO_TNR_FRAMES >= 2);
+ assert(sizeof(*state) == size);
+ state->tnr_in_buf_idx = 0;
+ state->tnr_out_buf_idx = 1;
+}
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/tnr/tnr_1.0/ia_css_tnr.host.h b/drivers/staging/media/atomisp/pci/isp/kernels/tnr/tnr_1.0/ia_css_tnr.host.h
new file mode 100644
index 000000000000..3ab82458b945
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/tnr/tnr_1.0/ia_css_tnr.host.h
@@ -0,0 +1,44 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __IA_CSS_TNR_HOST_H
+#define __IA_CSS_TNR_HOST_H
+
+#include "ia_css_binary.h"
+#include "ia_css_tnr_state.h"
+#include "ia_css_tnr_types.h"
+#include "ia_css_tnr_param.h"
+
+extern const struct ia_css_tnr_config default_tnr_config;
+
+void
+ia_css_tnr_encode(
+ struct sh_css_isp_tnr_params *to,
+ const struct ia_css_tnr_config *from,
+ unsigned int size);
+
+void
+ia_css_tnr_dump(
+ const struct sh_css_isp_tnr_params *tnr,
+ unsigned int level);
+
+void
+ia_css_tnr_debug_dtrace(
+ const struct ia_css_tnr_config *config,
+ unsigned int level);
+
+int ia_css_tnr_config(struct sh_css_isp_tnr_isp_config *to,
+ const struct ia_css_tnr_configuration *from,
+ unsigned int size);
+
+int ia_css_tnr_configure(const struct ia_css_binary *binary,
+ const struct ia_css_frame * const *frames);
+
+void
+ia_css_init_tnr_state(
+ struct sh_css_isp_tnr_dmem_state *state,
+ size_t size);
+#endif /* __IA_CSS_TNR_HOST_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/tnr/tnr_1.0/ia_css_tnr_param.h b/drivers/staging/media/atomisp/pci/isp/kernels/tnr/tnr_1.0/ia_css_tnr_param.h
new file mode 100644
index 000000000000..1782a95aa0f2
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/tnr/tnr_1.0/ia_css_tnr_param.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __IA_CSS_TNR_PARAM_H
+#define __IA_CSS_TNR_PARAM_H
+
+#include "type_support.h"
+#include "sh_css_defs.h"
+#include "dma.h"
+
+/* TNR (Temporal Noise Reduction) */
+struct sh_css_isp_tnr_params {
+ s32 coef;
+ s32 threshold_Y;
+ s32 threshold_C;
+};
+
+struct ia_css_tnr_configuration {
+ const struct ia_css_frame *tnr_frames[NUM_VIDEO_TNR_FRAMES];
+};
+
+struct sh_css_isp_tnr_isp_config {
+ u32 width_a_over_b;
+ u32 frame_height;
+ struct dma_port_config port_b;
+ ia_css_ptr tnr_frame_addr[NUM_VIDEO_TNR_FRAMES];
+};
+
+#endif /* __IA_CSS_TNR_PARAM_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/tnr/tnr_1.0/ia_css_tnr_state.h b/drivers/staging/media/atomisp/pci/isp/kernels/tnr/tnr_1.0/ia_css_tnr_state.h
new file mode 100644
index 000000000000..867a6211c0ea
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/tnr/tnr_1.0/ia_css_tnr_state.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __IA_CSS_TNR_STATE_H
+#define __IA_CSS_TNR_STATE_H
+
+#include "type_support.h"
+
+/* TNR (temporal noise reduction) */
+struct sh_css_isp_tnr_dmem_state {
+ u32 tnr_in_buf_idx;
+ u32 tnr_out_buf_idx;
+};
+
+#endif /* __IA_CSS_TNR_STATE_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/tnr/tnr_1.0/ia_css_tnr_types.h b/drivers/staging/media/atomisp/pci/isp/kernels/tnr/tnr_1.0/ia_css_tnr_types.h
new file mode 100644
index 000000000000..574444750986
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/tnr/tnr_1.0/ia_css_tnr_types.h
@@ -0,0 +1,49 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __IA_CSS_TNR_TYPES_H
+#define __IA_CSS_TNR_TYPES_H
+
+/* @file
+* CSS-API header file for Temporal Noise Reduction (TNR) parameters.
+*/
+
+/* Temporal Noise Reduction (TNR) configuration.
+ *
+ * When difference between current frame and previous frame is less than or
+ * equal to threshold, TNR works and current frame is mixed
+ * with previous frame.
+ * When difference between current frame and previous frame is greater
+ * than threshold, we judge motion is detected. Then, TNR does not work and
+ * current frame is outputted as it is.
+ * Therefore, when threshold_y and threshold_uv are set as 0, TNR can be disabled.
+ *
+ * ISP block: TNR1
+ * ISP1: TNR1 is used.
+ * ISP2: TNR1 is used.
+ */
+
+struct ia_css_tnr_config {
+ ia_css_u0_16 gain; /** Interpolation ratio of current frame
+ and previous frame.
+ gain=0.0 -> previous frame is outputted.
+ gain=1.0 -> current frame is outputted.
+ u0.16, [0,65535],
+ default 32768(0.5), ineffective 65535(almost 1.0) */
+ ia_css_u0_16 threshold_y; /** Threshold to enable interpolation of Y.
+ If difference between current frame and
+ previous frame is greater than threshold_y,
+ TNR for Y is disabled.
+ u0.16, [0,65535], default/ineffective 0 */
+ ia_css_u0_16 threshold_uv; /** Threshold to enable interpolation of
+ U/V.
+ If difference between current frame and
+ previous frame is greater than threshold_uv,
+ TNR for UV is disabled.
+ u0.16, [0,65535], default/ineffective 0 */
+};
+
+#endif /* __IA_CSS_TNR_TYPES_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/uds/uds_1.0/ia_css_uds_param.h b/drivers/staging/media/atomisp/pci/isp/kernels/uds/uds_1.0/ia_css_uds_param.h
new file mode 100644
index 000000000000..520324949aa7
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/uds/uds_1.0/ia_css_uds_param.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __IA_CSS_UDS_PARAM_H
+#define __IA_CSS_UDS_PARAM_H
+
+#include "sh_css_uds.h"
+
+/* uds (Up and Down scaling) */
+struct ia_css_uds_config {
+ struct sh_css_crop_pos crop_pos;
+ struct sh_css_uds_info uds;
+};
+
+struct sh_css_sp_uds_params {
+ struct sh_css_crop_pos crop_pos;
+ struct sh_css_uds_info uds;
+};
+
+#endif /* __IA_CSS_UDS_PARAM_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/vf/vf_1.0/ia_css_vf.host.c b/drivers/staging/media/atomisp/pci/isp/kernels/vf/vf_1.0/ia_css_vf.host.c
new file mode 100644
index 000000000000..3c675063c4a7
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/vf/vf_1.0/ia_css_vf.host.c
@@ -0,0 +1,134 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#include "atomisp_internal.h"
+
+#include "ia_css_vf.host.h"
+#include <assert_support.h>
+#include <ia_css_err.h>
+#include <ia_css_frame.h>
+#include <ia_css_frame_public.h>
+#include <ia_css_pipeline.h>
+#define IA_CSS_INCLUDE_CONFIGURATIONS
+#include "ia_css_isp_configs.h"
+
+#include "isp.h"
+
+int ia_css_vf_config(struct sh_css_isp_vf_isp_config *to,
+ const struct ia_css_vf_configuration *from,
+ unsigned int size)
+{
+ unsigned int elems_a = ISP_VEC_NELEMS;
+ int ret;
+
+ to->vf_downscale_bits = from->vf_downscale_bits;
+ to->enable = from->info != NULL;
+
+ if (from->info) {
+ ia_css_frame_info_to_frame_sp_info(&to->info, from->info);
+ ret = ia_css_dma_configure_from_info(&to->dma.port_b, from->info);
+ if (ret)
+ return ret;
+ to->dma.width_a_over_b = elems_a / to->dma.port_b.elems;
+
+ /* Assume divisiblity here, may need to generalize to fixed point. */
+ if (elems_a % to->dma.port_b.elems != 0)
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/* compute the log2 of the downscale factor needed to get closest
+ * to the requested viewfinder resolution on the upper side. The output cannot
+ * be smaller than the requested viewfinder resolution.
+ */
+int
+sh_css_vf_downscale_log2(
+ const struct ia_css_frame_info *out_info,
+ const struct ia_css_frame_info *vf_info,
+ unsigned int *downscale_log2) {
+ unsigned int ds_log2 = 0;
+ unsigned int out_width;
+
+ if ((!out_info) || (!vf_info))
+ return -EINVAL;
+
+ out_width = out_info->res.width;
+
+ if (out_width == 0)
+ return -EINVAL;
+
+ /* downscale until width smaller than the viewfinder width. We don't
+ * test for the height since the vmem buffers only put restrictions on
+ * the width of a line, not on the number of lines in a frame.
+ */
+ while (out_width >= vf_info->res.width)
+ {
+ ds_log2++;
+ out_width /= 2;
+ }
+ /* now width is smaller, so we go up one step */
+ if ((ds_log2 > 0) && (out_width < ia_css_binary_max_vf_width()))
+ ds_log2--;
+ /* TODO: use actual max input resolution of vf_pp binary */
+ if ((out_info->res.width >> ds_log2) >= 2 * ia_css_binary_max_vf_width())
+ return -EINVAL;
+ *downscale_log2 = ds_log2;
+ return 0;
+}
+
+static int
+configure_kernel(
+ const struct ia_css_binary_info *info,
+ const struct ia_css_frame_info *out_info,
+ const struct ia_css_frame_info *vf_info,
+ unsigned int *downscale_log2,
+ struct ia_css_vf_configuration *config) {
+ int err;
+ unsigned int vf_log_ds = 0;
+
+ /* First compute value */
+ if (vf_info) {
+ err = sh_css_vf_downscale_log2(out_info, vf_info, &vf_log_ds);
+ if (err)
+ return err;
+ }
+ vf_log_ds = min(vf_log_ds, info->vf_dec.max_log_downscale);
+ *downscale_log2 = vf_log_ds;
+
+ /* Then store it in isp config section */
+ config->vf_downscale_bits = vf_log_ds;
+ return 0;
+}
+
+static void
+configure_dma(
+ struct ia_css_vf_configuration *config,
+ const struct ia_css_frame_info *vf_info)
+{
+ config->info = vf_info;
+}
+
+int ia_css_vf_configure(const struct ia_css_binary *binary,
+ const struct ia_css_frame_info *out_info,
+ struct ia_css_frame_info *vf_info,
+ unsigned int *downscale_log2)
+{
+ int err;
+ struct ia_css_vf_configuration config;
+ const struct ia_css_binary_info *info = &binary->info->sp;
+
+ err = configure_kernel(info, out_info, vf_info, downscale_log2, &config);
+ if (err)
+ dev_warn(atomisp_dev, "Couldn't setup downscale\n");
+
+ configure_dma(&config, vf_info);
+
+ if (vf_info)
+ vf_info->raw_bit_depth = info->dma.vfdec_bits_per_pixel;
+
+ return ia_css_configure_vf(binary, &config);
+}
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/vf/vf_1.0/ia_css_vf.host.h b/drivers/staging/media/atomisp/pci/isp/kernels/vf/vf_1.0/ia_css_vf.host.h
new file mode 100644
index 000000000000..d2085b7f4501
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/vf/vf_1.0/ia_css_vf.host.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __IA_CSS_VF_HOST_H
+#define __IA_CSS_VF_HOST_H
+
+#include "ia_css_frame_public.h"
+#include "ia_css_binary.h"
+
+#include "ia_css_vf_types.h"
+#include "ia_css_vf_param.h"
+
+/* compute the log2 of the downscale factor needed to get closest
+ * to the requested viewfinder resolution on the upper side. The output cannot
+ * be smaller than the requested viewfinder resolution.
+ */
+int
+sh_css_vf_downscale_log2(
+ const struct ia_css_frame_info *out_info,
+ const struct ia_css_frame_info *vf_info,
+ unsigned int *downscale_log2);
+
+int ia_css_vf_config(struct sh_css_isp_vf_isp_config *to,
+ const struct ia_css_vf_configuration *from,
+ unsigned int size);
+
+int
+ia_css_vf_configure(
+ const struct ia_css_binary *binary,
+ const struct ia_css_frame_info *out_info,
+ struct ia_css_frame_info *vf_info,
+ unsigned int *downscale_log2);
+
+#endif /* __IA_CSS_VF_HOST_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/vf/vf_1.0/ia_css_vf_param.h b/drivers/staging/media/atomisp/pci/isp/kernels/vf/vf_1.0/ia_css_vf_param.h
new file mode 100644
index 000000000000..cf1e4f401cd1
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/vf/vf_1.0/ia_css_vf_param.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __IA_CSS_VF_PARAM_H
+#define __IA_CSS_VF_PARAM_H
+
+#include "type_support.h"
+#include "dma.h"
+#include "gc/gc_1.0/ia_css_gc_param.h" /* GAMMA_OUTPUT_BITS */
+#include "ia_css_frame_comm.h" /* ia_css_frame_sp_info */
+#include "ia_css_vf_types.h"
+
+#define VFDEC_BITS_PER_PIXEL GAMMA_OUTPUT_BITS
+
+/* Viewfinder decimation */
+struct sh_css_isp_vf_isp_config {
+ u32 vf_downscale_bits; /** Log VF downscale value */
+ u32 enable;
+ struct ia_css_frame_sp_info info;
+ struct {
+ u32 width_a_over_b;
+ struct dma_port_config port_b;
+ } dma;
+};
+
+#endif /* __IA_CSS_VF_PARAM_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/vf/vf_1.0/ia_css_vf_types.h b/drivers/staging/media/atomisp/pci/isp/kernels/vf/vf_1.0/ia_css_vf_types.h
new file mode 100644
index 000000000000..0319fb1a33db
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/vf/vf_1.0/ia_css_vf_types.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __IA_CSS_VF_TYPES_H
+#define __IA_CSS_VF_TYPES_H
+
+/* Viewfinder decimation
+ *
+ * ISP block: vfeven_horizontal_downscale
+ */
+
+#include <ia_css_frame_public.h>
+#include <type_support.h>
+
+struct ia_css_vf_configuration {
+ u32 vf_downscale_bits; /** Log VF downscale value */
+ const struct ia_css_frame_info *info;
+};
+
+#endif /* __IA_CSS_VF_TYPES_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/wb/wb_1.0/ia_css_wb.host.c b/drivers/staging/media/atomisp/pci/isp/kernels/wb/wb_1.0/ia_css_wb.host.c
new file mode 100644
index 000000000000..bc18bdaaaf0d
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/wb/wb_1.0/ia_css_wb.host.c
@@ -0,0 +1,78 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#include "ia_css_types.h"
+#include "sh_css_defs.h"
+#ifndef IA_CSS_NO_DEBUG
+#include "ia_css_debug.h"
+#endif
+#include "sh_css_frac.h"
+
+#include "ia_css_wb.host.h"
+
+const struct ia_css_wb_config default_wb_config = {
+ 1,
+ 32768,
+ 32768,
+ 32768,
+ 32768
+};
+
+void
+ia_css_wb_encode(
+ struct sh_css_isp_wb_params *to,
+ const struct ia_css_wb_config *from,
+ unsigned int size)
+{
+ (void)size;
+ to->gain_shift =
+ uISP_REG_BIT - from->integer_bits;
+ to->gain_gr =
+ uDIGIT_FITTING(from->gr, 16 - from->integer_bits,
+ to->gain_shift);
+ to->gain_r =
+ uDIGIT_FITTING(from->r, 16 - from->integer_bits,
+ to->gain_shift);
+ to->gain_b =
+ uDIGIT_FITTING(from->b, 16 - from->integer_bits,
+ to->gain_shift);
+ to->gain_gb =
+ uDIGIT_FITTING(from->gb, 16 - from->integer_bits,
+ to->gain_shift);
+}
+
+#ifndef IA_CSS_NO_DEBUG
+void
+ia_css_wb_dump(
+ const struct sh_css_isp_wb_params *wb,
+ unsigned int level)
+{
+ if (!wb) return;
+ ia_css_debug_dtrace(level, "White Balance:\n");
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "wb_gain_shift", wb->gain_shift);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "wb_gain_gr", wb->gain_gr);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "wb_gain_r", wb->gain_r);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "wb_gain_b", wb->gain_b);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "wb_gain_gb", wb->gain_gb);
+}
+
+void
+ia_css_wb_debug_dtrace(
+ const struct ia_css_wb_config *config,
+ unsigned int level)
+{
+ ia_css_debug_dtrace(level,
+ "config.integer_bits=%d, config.gr=%d, config.r=%d, config.b=%d, config.gb=%d\n",
+ config->integer_bits,
+ config->gr, config->r,
+ config->b, config->gb);
+}
+#endif
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/wb/wb_1.0/ia_css_wb.host.h b/drivers/staging/media/atomisp/pci/isp/kernels/wb/wb_1.0/ia_css_wb.host.h
new file mode 100644
index 000000000000..841b6c6f7e7c
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/wb/wb_1.0/ia_css_wb.host.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __IA_CSS_WB_HOST_H
+#define __IA_CSS_WB_HOST_H
+
+#include "ia_css_wb_types.h"
+#include "ia_css_wb_param.h"
+
+extern const struct ia_css_wb_config default_wb_config;
+
+void
+ia_css_wb_encode(
+ struct sh_css_isp_wb_params *to,
+ const struct ia_css_wb_config *from,
+ unsigned int size);
+
+void
+ia_css_wb_dump(
+ const struct sh_css_isp_wb_params *wb,
+ unsigned int level);
+
+void
+ia_css_wb_debug_dtrace(
+ const struct ia_css_wb_config *wb,
+ unsigned int level);
+
+#endif /* __IA_CSS_WB_HOST_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/wb/wb_1.0/ia_css_wb_param.h b/drivers/staging/media/atomisp/pci/isp/kernels/wb/wb_1.0/ia_css_wb_param.h
new file mode 100644
index 000000000000..7a57c47e9246
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/wb/wb_1.0/ia_css_wb_param.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __IA_CSS_WB_PARAM_H
+#define __IA_CSS_WB_PARAM_H
+
+#include "type_support.h"
+
+/* WB (White Balance) */
+struct sh_css_isp_wb_params {
+ s32 gain_shift;
+ s32 gain_gr;
+ s32 gain_r;
+ s32 gain_b;
+ s32 gain_gb;
+};
+
+#endif /* __IA_CSS_WB_PARAM_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/wb/wb_1.0/ia_css_wb_types.h b/drivers/staging/media/atomisp/pci/isp/kernels/wb/wb_1.0/ia_css_wb_types.h
new file mode 100644
index 000000000000..3a59c7de7387
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/wb/wb_1.0/ia_css_wb_types.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __IA_CSS_WB_TYPES_H
+#define __IA_CSS_WB_TYPES_H
+
+/* @file
+* CSS-API header file for White Balance parameters.
+*/
+
+/* White Balance configuration (Gain Adjust).
+ *
+ * ISP block: WB1
+ * ISP1: WB1 is used.
+ * ISP2: WB1 is used.
+ */
+struct ia_css_wb_config {
+ u32 integer_bits; /** Common exponent of gains.
+ u8.0, [0,3],
+ default 1, ineffective 1 */
+ u32 gr; /** Significand of Gr gain.
+ u[integer_bits].[16-integer_bits], [0,65535],
+ default/ineffective 32768(u1.15, 1.0) */
+ u32 r; /** Significand of R gain.
+ u[integer_bits].[16-integer_bits], [0,65535],
+ default/ineffective 32768(u1.15, 1.0) */
+ u32 b; /** Significand of B gain.
+ u[integer_bits].[16-integer_bits], [0,65535],
+ default/ineffective 32768(u1.15, 1.0) */
+ u32 gb; /** Significand of Gb gain.
+ u[integer_bits].[16-integer_bits], [0,65535],
+ default/ineffective 32768(u1.15, 1.0) */
+};
+
+#endif /* __IA_CSS_WB_TYPES_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/xnr/xnr_1.0/ia_css_xnr.host.c b/drivers/staging/media/atomisp/pci/isp/kernels/xnr/xnr_1.0/ia_css_xnr.host.c
new file mode 100644
index 000000000000..9f9f3af9fb41
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/xnr/xnr_1.0/ia_css_xnr.host.c
@@ -0,0 +1,57 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#include "ia_css_types.h"
+#include "sh_css_defs.h"
+#include "ia_css_debug.h"
+#include "sh_css_frac.h"
+
+#include "ia_css_xnr.host.h"
+
+const struct ia_css_xnr_config default_xnr_config = {
+ /* default threshold 6400 translates to 25 on ISP. */
+ 6400
+};
+
+void
+ia_css_xnr_table_vamem_encode(
+ struct sh_css_isp_xnr_vamem_params *to,
+ const struct ia_css_xnr_table *from,
+ unsigned int size)
+{
+ (void)size;
+ memcpy(&to->xnr, &from->data, sizeof(to->xnr));
+}
+
+void
+ia_css_xnr_encode(
+ struct sh_css_isp_xnr_params *to,
+ const struct ia_css_xnr_config *from,
+ unsigned int size)
+{
+ (void)size;
+
+ to->threshold =
+ (uint16_t)uDIGIT_FITTING(from->threshold, 16, SH_CSS_ISP_YUV_BITS);
+}
+
+void
+ia_css_xnr_table_debug_dtrace(
+ const struct ia_css_xnr_table *config,
+ unsigned int level)
+{
+ (void)config;
+ (void)level;
+}
+
+void
+ia_css_xnr_debug_dtrace(
+ const struct ia_css_xnr_config *config,
+ unsigned int level)
+{
+ ia_css_debug_dtrace(level,
+ "config.threshold=%d\n", config->threshold);
+}
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/xnr/xnr_1.0/ia_css_xnr.host.h b/drivers/staging/media/atomisp/pci/isp/kernels/xnr/xnr_1.0/ia_css_xnr.host.h
new file mode 100644
index 000000000000..b8e63486a8a5
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/xnr/xnr_1.0/ia_css_xnr.host.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __IA_CSS_XNR_HOST_H
+#define __IA_CSS_XNR_HOST_H
+
+#include "sh_css_params.h"
+
+#include "ia_css_xnr_param.h"
+#include "ia_css_xnr_table.host.h"
+
+extern const struct ia_css_xnr_config default_xnr_config;
+
+void
+ia_css_xnr_table_vamem_encode(
+ struct sh_css_isp_xnr_vamem_params *to,
+ const struct ia_css_xnr_table *from,
+ unsigned int size);
+
+void
+ia_css_xnr_encode(
+ struct sh_css_isp_xnr_params *to,
+ const struct ia_css_xnr_config *from,
+ unsigned int size);
+
+void
+ia_css_xnr_table_debug_dtrace(
+ const struct ia_css_xnr_table *s3a,
+ unsigned int level);
+
+void
+ia_css_xnr_debug_dtrace(
+ const struct ia_css_xnr_config *config,
+ unsigned int level);
+
+#endif /* __IA_CSS_XNR_HOST_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/xnr/xnr_1.0/ia_css_xnr_param.h b/drivers/staging/media/atomisp/pci/isp/kernels/xnr/xnr_1.0/ia_css_xnr_param.h
new file mode 100644
index 000000000000..0e92c32242f3
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/xnr/xnr_1.0/ia_css_xnr_param.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __IA_CSS_XNR_PARAM_H
+#define __IA_CSS_XNR_PARAM_H
+
+#include "type_support.h"
+#include <system_global.h>
+
+#ifndef PIPE_GENERATION
+#define SH_CSS_ISP_XNR_TABLE_SIZE_LOG2 IA_CSS_VAMEM_2_XNR_TABLE_SIZE_LOG2
+#define SH_CSS_ISP_XNR_TABLE_SIZE IA_CSS_VAMEM_2_XNR_TABLE_SIZE
+
+#else
+/* For pipe generation, the size is not relevant */
+#define SH_CSS_ISP_XNR_TABLE_SIZE 0
+#endif
+
+/* This should be vamem_data_t, but that breaks the pipe generator */
+struct sh_css_isp_xnr_vamem_params {
+ u16 xnr[SH_CSS_ISP_XNR_TABLE_SIZE];
+};
+
+struct sh_css_isp_xnr_params {
+ /* XNR threshold.
+ * type:u0.16 but actual valid range is:[0,255]
+ * valid range is dependent on SH_CSS_ISP_YUV_BITS (currently 8bits)
+ * default: 25 */
+ u16 threshold;
+};
+
+#endif /* __IA_CSS_XNR_PARAM_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/xnr/xnr_1.0/ia_css_xnr_table.host.c b/drivers/staging/media/atomisp/pci/isp/kernels/xnr/xnr_1.0/ia_css_xnr_table.host.c
new file mode 100644
index 000000000000..ce8eef985e5d
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/xnr/xnr_1.0/ia_css_xnr_table.host.c
@@ -0,0 +1,43 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#include <linux/string.h> /* for memcpy() */
+
+#include <type_support.h>
+#include "system_global.h"
+#include "vamem.h"
+#include "ia_css_types.h"
+#include "ia_css_xnr_table.host.h"
+
+struct ia_css_xnr_table default_xnr_table;
+
+
+static const uint16_t
+default_xnr_table_data[IA_CSS_VAMEM_2_XNR_TABLE_SIZE] = {
+ /* 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 */
+ 8191 >> 1, 4096 >> 1, 2730 >> 1, 2048 >> 1, 1638 >> 1, 1365 >> 1, 1170 >> 1, 1024 >> 1, 910 >> 1, 819 >> 1, 744 >> 1, 682 >> 1, 630 >> 1, 585 >> 1,
+ 546 >> 1, 512 >> 1,
+
+ /* 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 */
+ 481 >> 1, 455 >> 1, 431 >> 1, 409 >> 1, 390 >> 1, 372 >> 1, 356 >> 1, 341 >> 1, 327 >> 1, 315 >> 1, 303 >> 1, 292 >> 1, 282 >> 1, 273 >> 1, 264 >> 1,
+ 256 >> 1,
+
+ /* 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 */
+ 248 >> 1, 240 >> 1, 234 >> 1, 227 >> 1, 221 >> 1, 215 >> 1, 210 >> 1, 204 >> 1, 199 >> 1, 195 >> 1, 190 >> 1, 186 >> 1, 182 >> 1, 178 >> 1, 174 >> 1,
+ 170 >> 1,
+
+ /* 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 */
+ 167 >> 1, 163 >> 1, 160 >> 1, 157 >> 1, 154 >> 1, 151 >> 1, 148 >> 1, 146 >> 1, 143 >> 1, 141 >> 1, 138 >> 1, 136 >> 1, 134 >> 1, 132 >> 1, 130 >> 1, 128 >> 1
+};
+
+
+void
+ia_css_config_xnr_table(void)
+{
+ memcpy(default_xnr_table.data.vamem_2, default_xnr_table_data,
+ sizeof(default_xnr_table_data));
+ default_xnr_table.vamem_type = IA_CSS_VAMEM_TYPE_2;
+}
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/xnr/xnr_1.0/ia_css_xnr_table.host.h b/drivers/staging/media/atomisp/pci/isp/kernels/xnr/xnr_1.0/ia_css_xnr_table.host.h
new file mode 100644
index 000000000000..dbe236f80d30
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/xnr/xnr_1.0/ia_css_xnr_table.host.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __IA_CSS_XNR_TABLE_HOST_H
+#define __IA_CSS_XNR_TABLE_HOST_H
+
+extern struct ia_css_xnr_table default_xnr_table;
+
+void ia_css_config_xnr_table(void);
+
+#endif /* __IA_CSS_XNR_TABLE_HOST_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/xnr/xnr_1.0/ia_css_xnr_types.h b/drivers/staging/media/atomisp/pci/isp/kernels/xnr/xnr_1.0/ia_css_xnr_types.h
new file mode 100644
index 000000000000..42ad8b9b51d3
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/xnr/xnr_1.0/ia_css_xnr_types.h
@@ -0,0 +1,62 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __IA_CSS_XNR_TYPES_H
+#define __IA_CSS_XNR_TYPES_H
+
+/* @file
+* CSS-API header file for Extra Noise Reduction (XNR) parameters.
+*/
+
+/* XNR table.
+ *
+ * NOTE: The driver does not need to set this table,
+ * because the default values are set inside the css.
+ *
+ * This table contains coefficients used for division in XNR.
+ *
+ * u0.12, [0,4095],
+ * {4095, 2048, 1365, .........., 65, 64}
+ * ({1/1, 1/2, 1/3, ............., 1/63, 1/64})
+ *
+ * ISP block: XNR1
+ * ISP1: XNR1 is used.
+ * ISP2: XNR1 is used.
+ *
+ */
+
+/* Number of elements in the xnr table. */
+#define IA_CSS_VAMEM_1_XNR_TABLE_SIZE_LOG2 6
+/* Number of elements in the xnr table. */
+#define IA_CSS_VAMEM_1_XNR_TABLE_SIZE BIT(IA_CSS_VAMEM_1_XNR_TABLE_SIZE_LOG2)
+
+/* Number of elements in the xnr table. */
+#define IA_CSS_VAMEM_2_XNR_TABLE_SIZE_LOG2 6
+/* Number of elements in the xnr table. */
+#define IA_CSS_VAMEM_2_XNR_TABLE_SIZE BIT(IA_CSS_VAMEM_2_XNR_TABLE_SIZE_LOG2)
+
+/** IA_CSS_VAMEM_TYPE_1(ISP2300) or
+ IA_CSS_VAMEM_TYPE_2(ISP2400) */
+union ia_css_xnr_data {
+ u16 vamem_1[IA_CSS_VAMEM_1_XNR_TABLE_SIZE];
+ /** Coefficients table on vamem type1. u0.12, [0,4095] */
+ u16 vamem_2[IA_CSS_VAMEM_2_XNR_TABLE_SIZE];
+ /** Coefficients table on vamem type2. u0.12, [0,4095] */
+};
+
+struct ia_css_xnr_table {
+ enum ia_css_vamem_type vamem_type;
+ union ia_css_xnr_data data;
+};
+
+struct ia_css_xnr_config {
+ /* XNR threshold.
+ * type:u0.16 valid range:[0,65535]
+ * default: 6400 */
+ u16 threshold;
+};
+
+#endif /* __IA_CSS_XNR_TYPES_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/xnr/xnr_3.0/ia_css_xnr3.host.c b/drivers/staging/media/atomisp/pci/isp/kernels/xnr/xnr_3.0/ia_css_xnr3.host.c
new file mode 100644
index 000000000000..e90dea58215b
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/xnr/xnr_3.0/ia_css_xnr3.host.c
@@ -0,0 +1,240 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#include <linux/log2.h>
+
+#include "type_support.h"
+#include "math_support.h"
+#include "sh_css_defs.h"
+#include "ia_css_types.h"
+#include "assert_support.h"
+#include "ia_css_xnr3.host.h"
+
+/* Maximum value for alpha on ISP interface */
+#define XNR_MAX_ALPHA ((1 << (ISP_VEC_ELEMBITS - 1)) - 1)
+
+/* Minimum value for sigma on host interface. Lower values translate to
+ * max_alpha.
+ */
+#define XNR_MIN_SIGMA (IA_CSS_XNR3_SIGMA_SCALE / 100)
+
+/*
+ * division look-up table
+ * Refers to XNR3.0.5
+ */
+#define XNR3_LOOK_UP_TABLE_POINTS 16
+
+static const s16 x[XNR3_LOOK_UP_TABLE_POINTS] = {
+ 1024, 1164, 1320, 1492, 1680, 1884, 2108, 2352,
+ 2616, 2900, 3208, 3540, 3896, 4276, 4684, 5120
+};
+
+static const s16 a[XNR3_LOOK_UP_TABLE_POINTS] = {
+ -7213, -5580, -4371, -3421, -2722, -2159, -6950, -5585,
+ -4529, -3697, -3010, -2485, -2070, -1727, -1428, 0
+ };
+
+static const s16 b[XNR3_LOOK_UP_TABLE_POINTS] = {
+ 4096, 3603, 3178, 2811, 2497, 2226, 1990, 1783,
+ 1603, 1446, 1307, 1185, 1077, 981, 895, 819
+};
+
+static const s16 c[XNR3_LOOK_UP_TABLE_POINTS] = {
+ 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+/*
+ * Default kernel parameters. In general, default is bypass mode or as close
+ * to the ineffective values as possible. Due to the chroma down+upsampling,
+ * perfect bypass mode is not possible for xnr3 filter itself. Instead, the
+ * 'blending' parameter is used to create a bypass.
+ */
+const struct ia_css_xnr3_config default_xnr3_config = {
+ /* sigma */
+ { 0, 0, 0, 0, 0, 0 },
+ /* coring */
+ { 0, 0, 0, 0 },
+ /* blending */
+ { 0 }
+};
+
+/*
+ * Compute an alpha value for the ISP kernel from sigma value on the host
+ * parameter interface as: alpha_scale * 1/(sigma/sigma_scale)
+ */
+static int32_t
+compute_alpha(int sigma)
+{
+ s32 alpha;
+ int offset = sigma / 2;
+
+ if (sigma < XNR_MIN_SIGMA) {
+ alpha = XNR_MAX_ALPHA;
+ } else {
+ alpha = ((IA_CSS_XNR3_SIGMA_SCALE * XNR_ALPHA_SCALE_FACTOR) + offset) / sigma;
+
+ if (alpha > XNR_MAX_ALPHA)
+ alpha = XNR_MAX_ALPHA;
+ }
+
+ return alpha;
+}
+
+/*
+ * Compute the scaled coring value for the ISP kernel from the value on the
+ * host parameter interface.
+ */
+static int32_t
+compute_coring(int coring)
+{
+ s32 isp_coring;
+ s32 isp_scale = XNR_CORING_SCALE_FACTOR;
+ s32 host_scale = IA_CSS_XNR3_CORING_SCALE;
+ s32 offset = host_scale / 2; /* fixed-point 0.5 */
+
+ /* Convert from public host-side scale factor to isp-side scale
+ * factor. Clip to [0, isp_scale-1).
+ */
+ isp_coring = ((coring * isp_scale) + offset) / host_scale;
+ return clamp(isp_coring, 0, isp_scale - 1);
+}
+
+/*
+ * Compute the scaled blending strength for the ISP kernel from the value on
+ * the host parameter interface.
+ */
+static int32_t
+compute_blending(int strength)
+{
+ s32 isp_strength;
+ s32 isp_scale = XNR_BLENDING_SCALE_FACTOR;
+ s32 host_scale = IA_CSS_XNR3_BLENDING_SCALE;
+ s32 offset = host_scale / 2; /* fixed-point 0.5 */
+
+ /* Convert from public host-side scale factor to isp-side scale
+ * factor. The blending factor is positive on the host side, but
+ * negative on the ISP side because +1.0 cannot be represented
+ * exactly as s0.11 fixed point, but -1.0 can.
+ */
+ isp_strength = -(((strength * isp_scale) + offset) / host_scale);
+ return MAX(MIN(isp_strength, 0), -isp_scale);
+}
+
+void
+ia_css_xnr3_encode(
+ struct sh_css_isp_xnr3_params *to,
+ const struct ia_css_xnr3_config *from,
+ unsigned int size)
+{
+ int kernel_size = XNR_FILTER_SIZE;
+ int adjust_factor = roundup_pow_of_two(kernel_size);
+ s32 max_diff = (1 << (ISP_VEC_ELEMBITS - 1)) - 1;
+ s32 min_diff = -(1 << (ISP_VEC_ELEMBITS - 1));
+
+ s32 alpha_y0 = compute_alpha(from->sigma.y0);
+ s32 alpha_y1 = compute_alpha(from->sigma.y1);
+ s32 alpha_u0 = compute_alpha(from->sigma.u0);
+ s32 alpha_u1 = compute_alpha(from->sigma.u1);
+ s32 alpha_v0 = compute_alpha(from->sigma.v0);
+ s32 alpha_v1 = compute_alpha(from->sigma.v1);
+ s32 alpha_ydiff = (alpha_y1 - alpha_y0) * adjust_factor / kernel_size;
+ s32 alpha_udiff = (alpha_u1 - alpha_u0) * adjust_factor / kernel_size;
+ s32 alpha_vdiff = (alpha_v1 - alpha_v0) * adjust_factor / kernel_size;
+
+ s32 coring_u0 = compute_coring(from->coring.u0);
+ s32 coring_u1 = compute_coring(from->coring.u1);
+ s32 coring_v0 = compute_coring(from->coring.v0);
+ s32 coring_v1 = compute_coring(from->coring.v1);
+ s32 coring_udiff = (coring_u1 - coring_u0) * adjust_factor / kernel_size;
+ s32 coring_vdiff = (coring_v1 - coring_v0) * adjust_factor / kernel_size;
+
+ s32 blending = compute_blending(from->blending.strength);
+
+ (void)size;
+
+ /* alpha's are represented in qN.5 format */
+ to->alpha.y0 = alpha_y0;
+ to->alpha.u0 = alpha_u0;
+ to->alpha.v0 = alpha_v0;
+ to->alpha.ydiff = clamp(alpha_ydiff, min_diff, max_diff);
+ to->alpha.udiff = clamp(alpha_udiff, min_diff, max_diff);
+ to->alpha.vdiff = clamp(alpha_vdiff, min_diff, max_diff);
+
+ /* coring parameters are expressed in q1.NN format */
+ to->coring.u0 = coring_u0;
+ to->coring.v0 = coring_v0;
+ to->coring.udiff = clamp(coring_udiff, min_diff, max_diff);
+ to->coring.vdiff = clamp(coring_vdiff, min_diff, max_diff);
+
+ /* blending strength is expressed in q1.NN format */
+ to->blending.strength = blending;
+}
+
+/* ISP2401 */
+/* (void) = ia_css_xnr3_vmem_encode(*to, *from)
+ * -----------------------------------------------
+ * VMEM Encode Function to translate UV parameters from userspace into ISP space
+*/
+void
+ia_css_xnr3_vmem_encode(
+ struct sh_css_isp_xnr3_vmem_params *to,
+ const struct ia_css_xnr3_config *from,
+ unsigned int size)
+{
+ unsigned int i, j, base;
+ const unsigned int total_blocks = 4;
+ const unsigned int shuffle_block = 16;
+
+ (void)from;
+ (void)size;
+
+ /* Init */
+ for (i = 0; i < ISP_VEC_NELEMS; i++) {
+ to->x[0][i] = 0;
+ to->a[0][i] = 0;
+ to->b[0][i] = 0;
+ to->c[0][i] = 0;
+ }
+
+ /* Constraints on "x":
+ * - values should be greater or equal to 0.
+ * - values should be ascending.
+ */
+ assert(x[0] >= 0);
+
+ for (j = 1; j < XNR3_LOOK_UP_TABLE_POINTS; j++) {
+ assert(x[j] >= 0);
+ assert(x[j] > x[j - 1]);
+ }
+
+ /* The implementation of the calulating 1/x is based on the availability
+ * of the OP_vec_shuffle16 operation.
+ * A 64 element vector is split up in 4 blocks of 16 element. Each array is copied to
+ * a vector 4 times, (starting at 0, 16, 32 and 48). All array elements are copied or
+ * initialised as described in the KFS. The remaining elements of a vector are set to 0.
+ */
+ /* TODO: guard this code with above assumptions */
+ for (i = 0; i < total_blocks; i++) {
+ base = shuffle_block * i;
+
+ for (j = 0; j < XNR3_LOOK_UP_TABLE_POINTS; j++) {
+ to->x[0][base + j] = x[j];
+ to->a[0][base + j] = a[j];
+ to->b[0][base + j] = b[j];
+ to->c[0][base + j] = c[j];
+ }
+ }
+}
+
+/* Dummy Function added as the tool expects it*/
+void
+ia_css_xnr3_debug_dtrace(
+ const struct ia_css_xnr3_config *config,
+ unsigned int level)
+{
+ (void)config;
+ (void)level;
+}
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/xnr/xnr_3.0/ia_css_xnr3.host.h b/drivers/staging/media/atomisp/pci/isp/kernels/xnr/xnr_3.0/ia_css_xnr3.host.h
new file mode 100644
index 000000000000..a5e1294527fc
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/xnr/xnr_3.0/ia_css_xnr3.host.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __IA_CSS_XNR3_HOST_H
+#define __IA_CSS_XNR3_HOST_H
+
+#include "ia_css_xnr3_param.h"
+#include "ia_css_xnr3_types.h"
+
+extern const struct ia_css_xnr3_config default_xnr3_config;
+
+void
+ia_css_xnr3_encode(
+ struct sh_css_isp_xnr3_params *to,
+ const struct ia_css_xnr3_config *from,
+ unsigned int size);
+
+/* ISP2401 */
+void
+ia_css_xnr3_vmem_encode(
+ struct sh_css_isp_xnr3_vmem_params *to,
+ const struct ia_css_xnr3_config *from,
+ unsigned int size);
+
+void
+ia_css_xnr3_debug_dtrace(
+ const struct ia_css_xnr3_config *config,
+ unsigned int level);
+
+#endif /* __IA_CSS_XNR3_HOST_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/xnr/xnr_3.0/ia_css_xnr3_param.h b/drivers/staging/media/atomisp/pci/isp/kernels/xnr/xnr_3.0/ia_css_xnr3_param.h
new file mode 100644
index 000000000000..8997099dff5a
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/xnr/xnr_3.0/ia_css_xnr3_param.h
@@ -0,0 +1,75 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __IA_CSS_XNR3_PARAM_H
+#define __IA_CSS_XNR3_PARAM_H
+
+#include "type_support.h"
+#include "vmem.h" /* ISP2401: needed for VMEM_ARRAY */
+
+/* Scaling factor of the alpha values: which fixed-point value represents 1.0?
+ * It must be chosen such that 1/min_sigma still fits in an ISP vector
+ * element. */
+#define XNR_ALPHA_SCALE_LOG2 5
+#define XNR_ALPHA_SCALE_FACTOR BIT(XNR_ALPHA_SCALE_LOG2)
+
+/* Scaling factor of the coring values on the ISP. */
+#define XNR_CORING_SCALE_LOG2 (ISP_VEC_ELEMBITS - 1)
+#define XNR_CORING_SCALE_FACTOR BIT(XNR_CORING_SCALE_LOG2)
+
+/* Scaling factor of the blending strength on the ISP. */
+#define XNR_BLENDING_SCALE_LOG2 (ISP_VEC_ELEMBITS - 1)
+#define XNR_BLENDING_SCALE_FACTOR BIT(XNR_BLENDING_SCALE_LOG2)
+
+/* XNR3 filter size. Must be 11x11, 9x9 or 5x5. */
+#define XNR_FILTER_SIZE 5
+
+/* XNR3 alpha (1/sigma) parameters on the ISP, expressed as a base (0) value
+ * for dark areas, and a scaled diff towards the value for bright areas. */
+struct sh_css_xnr3_alpha_params {
+ s32 y0;
+ s32 u0;
+ s32 v0;
+ s32 ydiff;
+ s32 udiff;
+ s32 vdiff;
+};
+
+/* XNR3 coring parameters on the ISP, expressed as a base (0) value
+ * for dark areas, and a scaled diff towards the value for bright areas. */
+struct sh_css_xnr3_coring_params {
+ s32 u0;
+ s32 v0;
+ s32 udiff;
+ s32 vdiff;
+};
+
+/* XNR3 blending strength on the ISP. */
+struct sh_css_xnr3_blending_params {
+ s32 strength;
+};
+
+/* XNR3 ISP parameters */
+struct sh_css_isp_xnr3_params {
+ struct sh_css_xnr3_alpha_params alpha;
+ struct sh_css_xnr3_coring_params coring;
+ struct sh_css_xnr3_blending_params blending;
+};
+
+/* ISP2401 */
+/*
+ * STRUCT sh_css_isp_xnr3_vmem_params
+ * -----------------------------------------------
+ * ISP VMEM parameters
+ */
+struct sh_css_isp_xnr3_vmem_params {
+ VMEM_ARRAY(x, ISP_VEC_NELEMS);
+ VMEM_ARRAY(a, ISP_VEC_NELEMS);
+ VMEM_ARRAY(b, ISP_VEC_NELEMS);
+ VMEM_ARRAY(c, ISP_VEC_NELEMS);
+};
+
+#endif /*__IA_CSS_XNR3_PARAM_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/xnr/xnr_3.0/ia_css_xnr3_types.h b/drivers/staging/media/atomisp/pci/isp/kernels/xnr/xnr_3.0/ia_css_xnr3_types.h
new file mode 100644
index 000000000000..c012c1865d1e
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/xnr/xnr_3.0/ia_css_xnr3_types.h
@@ -0,0 +1,89 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __IA_CSS_XNR3_TYPES_H
+#define __IA_CSS_XNR3_TYPES_H
+
+/* @file
+* CSS-API header file for Extra Noise Reduction (XNR) parameters.
+*/
+
+/**
+ * \brief Scale of the XNR sigma parameters.
+ * \details The define specifies which fixed-point value represents 1.0.
+ */
+#define IA_CSS_XNR3_SIGMA_SCALE BIT(10)
+
+/**
+ * \brief Scale of the XNR coring parameters.
+ * \details The define specifies which fixed-point value represents 1.0.
+ */
+#define IA_CSS_XNR3_CORING_SCALE BIT(15)
+
+/**
+ * \brief Scale of the XNR blending parameter.
+ * \details The define specifies which fixed-point value represents 1.0.
+ */
+#define IA_CSS_XNR3_BLENDING_SCALE BIT(11)
+
+/**
+ * \brief XNR3 Sigma Parameters.
+ * \details Sigma parameters define the strength of the XNR filter.
+ * A higher number means stronger filtering. There are two values for each of
+ * the three YUV planes: one for dark areas and one for bright areas. All
+ * sigma parameters are fixed-point values between 0.0 and 1.0, scaled with
+ * IA_CSS_XNR3_SIGMA_SCALE.
+ */
+struct ia_css_xnr3_sigma_params {
+ int y0; /** Sigma for Y range similarity in dark area */
+ int y1; /** Sigma for Y range similarity in bright area */
+ int u0; /** Sigma for U range similarity in dark area */
+ int u1; /** Sigma for U range similarity in bright area */
+ int v0; /** Sigma for V range similarity in dark area */
+ int v1; /** Sigma for V range similarity in bright area */
+};
+
+/**
+ * \brief XNR3 Coring Parameters
+ * \details Coring parameters define the "coring" strength, which is a soft
+ * thresholding technique to avoid false coloring. There are two values for
+ * each of the two chroma planes: one for dark areas and one for bright areas.
+ * All coring parameters are fixed-point values between 0.0 and 1.0, scaled
+ * with IA_CSS_XNR3_CORING_SCALE. The ineffective value is 0.
+ */
+struct ia_css_xnr3_coring_params {
+ int u0; /** Coring threshold of U channel in dark area */
+ int u1; /** Coring threshold of U channel in bright area */
+ int v0; /** Coring threshold of V channel in dark area */
+ int v1; /** Coring threshold of V channel in bright area */
+};
+
+/**
+ * \brief XNR3 Blending Parameters
+ * \details Blending parameters define the blending strength of filtered
+ * output pixels with the original chroma pixels from before xnr3. The
+ * blending strength is a fixed-point value between 0.0 and 1.0 (inclusive),
+ * scaled with IA_CSS_XNR3_BLENDING_SCALE.
+ * A higher number applies xnr filtering more strongly. A value of 1.0
+ * disables the blending and returns the xnr3 filtered output, while a
+ * value of 0.0 bypasses the entire xnr3 filter.
+ */
+struct ia_css_xnr3_blending_params {
+ int strength; /** Blending strength */
+};
+
+/**
+ * \brief XNR3 public parameters.
+ * \details Struct with all parameters for the XNR3 kernel that can be set
+ * from the CSS API.
+ */
+struct ia_css_xnr3_config {
+ struct ia_css_xnr3_sigma_params sigma; /** XNR3 sigma parameters */
+ struct ia_css_xnr3_coring_params coring; /** XNR3 coring parameters */
+ struct ia_css_xnr3_blending_params blending; /** XNR3 blending parameters */
+};
+
+#endif /* __IA_CSS_XNR3_TYPES_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/ynr/ynr_1.0/ia_css_ynr.host.c b/drivers/staging/media/atomisp/pci/isp/kernels/ynr/ynr_1.0/ia_css_ynr.host.c
new file mode 100644
index 000000000000..d43a3539c6d4
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/ynr/ynr_1.0/ia_css_ynr.host.c
@@ -0,0 +1,209 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#include "ia_css_types.h"
+#include "sh_css_defs.h"
+#include "ia_css_debug.h"
+#include "sh_css_frac.h"
+
+#include "bnr/bnr_1.0/ia_css_bnr.host.h"
+#include "ia_css_ynr.host.h"
+
+const struct ia_css_nr_config default_nr_config = {
+ 16384,
+ 8192,
+ 1280,
+ 0,
+ 0
+};
+
+const struct ia_css_ee_config default_ee_config = {
+ 8192,
+ 128,
+ 2048
+};
+
+void
+ia_css_nr_encode(
+ struct sh_css_isp_ynr_params *to,
+ const struct ia_css_nr_config *from,
+ unsigned int size)
+{
+ (void)size;
+ /* YNR (Y Noise Reduction) */
+ to->threshold =
+ uDIGIT_FITTING(8192U, 16, SH_CSS_BAYER_BITS);
+ to->gain_all =
+ uDIGIT_FITTING(from->ynr_gain, 16, SH_CSS_YNR_GAIN_SHIFT);
+ to->gain_dir =
+ uDIGIT_FITTING(from->ynr_gain, 16, SH_CSS_YNR_GAIN_SHIFT);
+ to->threshold_cb =
+ uDIGIT_FITTING(from->threshold_cb, 16, SH_CSS_BAYER_BITS);
+ to->threshold_cr =
+ uDIGIT_FITTING(from->threshold_cr, 16, SH_CSS_BAYER_BITS);
+}
+
+void
+ia_css_yee_encode(
+ struct sh_css_isp_yee_params *to,
+ const struct ia_css_yee_config *from,
+ unsigned int size)
+{
+ int asiWk1 = (int)from->ee.gain;
+ int asiWk2 = asiWk1 / 8;
+ int asiWk3 = asiWk1 / 4;
+
+ (void)size;
+ /* YEE (Y Edge Enhancement) */
+ to->dirthreshold_s =
+ min((uDIGIT_FITTING(from->nr.direction, 16, SH_CSS_BAYER_BITS)
+ << 1),
+ SH_CSS_BAYER_MAXVAL);
+ to->dirthreshold_g =
+ min((uDIGIT_FITTING(from->nr.direction, 16, SH_CSS_BAYER_BITS)
+ << 4),
+ SH_CSS_BAYER_MAXVAL);
+ to->dirthreshold_width_log2 =
+ uFRACTION_BITS_FITTING(8);
+ to->dirthreshold_width =
+ 1 << to->dirthreshold_width_log2;
+ to->detailgain =
+ uDIGIT_FITTING(from->ee.detail_gain, 11,
+ SH_CSS_YEE_DETAIL_GAIN_SHIFT);
+ to->coring_s =
+ (uDIGIT_FITTING(56U, 16, SH_CSS_BAYER_BITS) *
+ from->ee.threshold) >> 8;
+ to->coring_g =
+ (uDIGIT_FITTING(224U, 16, SH_CSS_BAYER_BITS) *
+ from->ee.threshold) >> 8;
+ /* 8; // *1.125 ->[s4.8] */
+ to->scale_plus_s =
+ (asiWk1 + asiWk2) >> (11 - SH_CSS_YEE_SCALE_SHIFT);
+ /* 8; // ( * -.25)->[s4.8] */
+ to->scale_plus_g =
+ (0 - asiWk3) >> (11 - SH_CSS_YEE_SCALE_SHIFT);
+ /* 8; // *0.875 ->[s4.8] */
+ to->scale_minus_s =
+ (asiWk1 - asiWk2) >> (11 - SH_CSS_YEE_SCALE_SHIFT);
+ /* 8; // ( *.25 ) ->[s4.8] */
+ to->scale_minus_g =
+ (asiWk3) >> (11 - SH_CSS_YEE_SCALE_SHIFT);
+ to->clip_plus_s =
+ uDIGIT_FITTING(32760U, 16, SH_CSS_BAYER_BITS);
+ to->clip_plus_g = 0;
+ to->clip_minus_s =
+ uDIGIT_FITTING(504U, 16, SH_CSS_BAYER_BITS);
+ to->clip_minus_g =
+ uDIGIT_FITTING(32256U, 16, SH_CSS_BAYER_BITS);
+ to->Yclip = SH_CSS_BAYER_MAXVAL;
+}
+
+void
+ia_css_nr_dump(
+ const struct sh_css_isp_ynr_params *ynr,
+ unsigned int level)
+{
+ if (!ynr) return;
+ ia_css_debug_dtrace(level,
+ "Y Noise Reduction:\n");
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "ynr_threshold", ynr->threshold);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "ynr_gain_all", ynr->gain_all);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "ynr_gain_dir", ynr->gain_dir);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "ynr_threshold_cb", ynr->threshold_cb);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "ynr_threshold_cr", ynr->threshold_cr);
+}
+
+void
+ia_css_yee_dump(
+ const struct sh_css_isp_yee_params *yee,
+ unsigned int level)
+{
+ ia_css_debug_dtrace(level,
+ "Y Edge Enhancement:\n");
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "ynryee_dirthreshold_s",
+ yee->dirthreshold_s);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "ynryee_dirthreshold_g",
+ yee->dirthreshold_g);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "ynryee_dirthreshold_width_log2",
+ yee->dirthreshold_width_log2);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "ynryee_dirthreshold_width",
+ yee->dirthreshold_width);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "yee_detailgain",
+ yee->detailgain);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "yee_coring_s",
+ yee->coring_s);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "yee_coring_g",
+ yee->coring_g);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "yee_scale_plus_s",
+ yee->scale_plus_s);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "yee_scale_plus_g",
+ yee->scale_plus_g);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "yee_scale_minus_s",
+ yee->scale_minus_s);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "yee_scale_minus_g",
+ yee->scale_minus_g);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "yee_clip_plus_s",
+ yee->clip_plus_s);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "yee_clip_plus_g",
+ yee->clip_plus_g);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "yee_clip_minus_s",
+ yee->clip_minus_s);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "yee_clip_minus_g",
+ yee->clip_minus_g);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "ynryee_Yclip",
+ yee->Yclip);
+}
+
+void
+ia_css_nr_debug_dtrace(
+ const struct ia_css_nr_config *config,
+ unsigned int level)
+{
+ ia_css_debug_dtrace(level,
+ "config.direction=%d, config.bnr_gain=%d, config.ynr_gain=%d, config.threshold_cb=%d, config.threshold_cr=%d\n",
+ config->direction,
+ config->bnr_gain, config->ynr_gain,
+ config->threshold_cb, config->threshold_cr);
+}
+
+void
+ia_css_ee_debug_dtrace(
+ const struct ia_css_ee_config *config,
+ unsigned int level)
+{
+ ia_css_debug_dtrace(level,
+ "config.threshold=%d, config.gain=%d, config.detail_gain=%d\n",
+ config->threshold, config->gain, config->detail_gain);
+}
+
+void
+ia_css_init_ynr_state(
+ void/*struct sh_css_isp_ynr_vmem_state*/ * state,
+ size_t size)
+{
+ memset(state, 0, size);
+}
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/ynr/ynr_1.0/ia_css_ynr.host.h b/drivers/staging/media/atomisp/pci/isp/kernels/ynr/ynr_1.0/ia_css_ynr.host.h
new file mode 100644
index 000000000000..ed5c17cc081f
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/ynr/ynr_1.0/ia_css_ynr.host.h
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __IA_CSS_YNR_HOST_H
+#define __IA_CSS_YNR_HOST_H
+
+#include "ia_css_ynr_types.h"
+#include "ia_css_ynr_param.h"
+
+extern const struct ia_css_nr_config default_nr_config;
+extern const struct ia_css_ee_config default_ee_config;
+
+void
+ia_css_nr_encode(
+ struct sh_css_isp_ynr_params *to,
+ const struct ia_css_nr_config *from,
+ unsigned int size);
+
+void
+ia_css_yee_encode(
+ struct sh_css_isp_yee_params *to,
+ const struct ia_css_yee_config *from,
+ unsigned int size);
+
+void
+ia_css_nr_dump(
+ const struct sh_css_isp_ynr_params *ynr,
+ unsigned int level);
+
+void
+ia_css_yee_dump(
+ const struct sh_css_isp_yee_params *yee,
+ unsigned int level);
+
+void
+ia_css_nr_debug_dtrace(
+ const struct ia_css_nr_config *config,
+ unsigned int level);
+
+void
+ia_css_ee_debug_dtrace(
+ const struct ia_css_ee_config *config,
+ unsigned int level);
+
+void
+ia_css_init_ynr_state(
+ void/*struct sh_css_isp_ynr_vmem_state*/ * state,
+ size_t size);
+#endif /* __IA_CSS_YNR_HOST_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/ynr/ynr_1.0/ia_css_ynr_param.h b/drivers/staging/media/atomisp/pci/isp/kernels/ynr/ynr_1.0/ia_css_ynr_param.h
new file mode 100644
index 000000000000..51221f83fc87
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/ynr/ynr_1.0/ia_css_ynr_param.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __IA_CSS_YNR_PARAM_H
+#define __IA_CSS_YNR_PARAM_H
+
+#include "type_support.h"
+
+/* YNR (Y Noise Reduction) */
+struct sh_css_isp_ynr_params {
+ s32 threshold;
+ s32 gain_all;
+ s32 gain_dir;
+ s32 threshold_cb;
+ s32 threshold_cr;
+};
+
+/* YEE (Y Edge Enhancement) */
+struct sh_css_isp_yee_params {
+ s32 dirthreshold_s;
+ s32 dirthreshold_g;
+ s32 dirthreshold_width_log2;
+ s32 dirthreshold_width;
+ s32 detailgain;
+ s32 coring_s;
+ s32 coring_g;
+ s32 scale_plus_s;
+ s32 scale_plus_g;
+ s32 scale_minus_s;
+ s32 scale_minus_g;
+ s32 clip_plus_s;
+ s32 clip_plus_g;
+ s32 clip_minus_s;
+ s32 clip_minus_g;
+ s32 Yclip;
+};
+
+#endif /* __IA_CSS_YNR_PARAM_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/ynr/ynr_1.0/ia_css_ynr_types.h b/drivers/staging/media/atomisp/pci/isp/kernels/ynr/ynr_1.0/ia_css_ynr_types.h
new file mode 100644
index 000000000000..bf5c388938ef
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/ynr/ynr_1.0/ia_css_ynr_types.h
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __IA_CSS_YNR_TYPES_H
+#define __IA_CSS_YNR_TYPES_H
+
+/* @file
+* CSS-API header file for Noise Reduction (BNR) and YCC Noise Reduction (YNR,CNR).
+*/
+
+/* Configuration used by Bayer Noise Reduction (BNR) and
+ * YCC Noise Reduction (YNR,CNR).
+ *
+ * ISP block: BNR1, YNR1, CNR1
+ * ISP1: BNR1,YNR1,CNR1 are used.
+ * ISP2: BNR1,YNR1,CNR1 are used for Preview/Video.
+ * BNR1,YNR2,CNR2 are used for Still.
+ */
+struct ia_css_nr_config {
+ ia_css_u0_16 bnr_gain; /** Strength of noise reduction (BNR).
+ u0.16, [0,65535],
+ default 14336(0.21875), ineffective 0 */
+ ia_css_u0_16 ynr_gain; /** Strength of noise reduction (YNR).
+ u0.16, [0,65535],
+ default 14336(0.21875), ineffective 0 */
+ ia_css_u0_16 direction; /** Sensitivity of edge (BNR).
+ u0.16, [0,65535],
+ default 512(0.0078125), ineffective 0 */
+ ia_css_u0_16 threshold_cb; /** Coring threshold for Cb (CNR).
+ This is the same as
+ de_config.c1_coring_threshold.
+ u0.16, [0,65535],
+ default 0(0), ineffective 0 */
+ ia_css_u0_16 threshold_cr; /** Coring threshold for Cr (CNR).
+ This is the same as
+ de_config.c2_coring_threshold.
+ u0.16, [0,65535],
+ default 0(0), ineffective 0 */
+};
+
+/* Edge Enhancement (sharpen) configuration.
+ *
+ * ISP block: YEE1
+ * ISP1: YEE1 is used.
+ * ISP2: YEE1 is used for Preview/Video.
+ * (YEE2 is used for Still.)
+ */
+struct ia_css_ee_config {
+ ia_css_u5_11 gain; /** The strength of sharpness.
+ u5.11, [0,65535],
+ default 8192(4.0), ineffective 0 */
+ ia_css_u8_8 threshold; /** The threshold that divides noises from
+ edge.
+ u8.8, [0,65535],
+ default 256(1.0), ineffective 65535 */
+ ia_css_u5_11 detail_gain; /** The strength of sharpness in pell-mell
+ area.
+ u5.11, [0,65535],
+ default 2048(1.0), ineffective 0 */
+};
+
+/* YNR and YEE (sharpen) configuration.
+ */
+struct ia_css_yee_config {
+ struct ia_css_nr_config nr; /** The NR configuration. */
+ struct ia_css_ee_config ee; /** The EE configuration. */
+};
+
+#endif /* __IA_CSS_YNR_TYPES_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/ynr/ynr_2/ia_css_ynr2.host.c b/drivers/staging/media/atomisp/pci/isp/kernels/ynr/ynr_2/ia_css_ynr2.host.c
new file mode 100644
index 000000000000..9e75cb641577
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/ynr/ynr_2/ia_css_ynr2.host.c
@@ -0,0 +1,110 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#include "ia_css_types.h"
+#include "sh_css_defs.h"
+#include "ia_css_debug.h"
+#include "assert_support.h"
+
+#include "ia_css_ynr2.host.h"
+
+const struct ia_css_ynr_config default_ynr_config = {
+ 0,
+ 0,
+ 0,
+ 0,
+};
+
+const struct ia_css_fc_config default_fc_config = {
+ 1,
+ 0, /* 0 -> ineffective */
+ 0, /* 0 -> ineffective */
+ 0, /* 0 -> ineffective */
+ 0, /* 0 -> ineffective */
+ (1 << (ISP_VEC_ELEMBITS - 2)), /* 0.5 */
+ (1 << (ISP_VEC_ELEMBITS - 2)), /* 0.5 */
+ (1 << (ISP_VEC_ELEMBITS - 2)), /* 0.5 */
+ (1 << (ISP_VEC_ELEMBITS - 2)), /* 0.5 */
+ (1 << (ISP_VEC_ELEMBITS - 1)) - 1, /* 1 */
+ (1 << (ISP_VEC_ELEMBITS - 1)) - 1, /* 1 */
+ (int16_t)-(1 << (ISP_VEC_ELEMBITS - 1)), /* -1 */
+ (int16_t)-(1 << (ISP_VEC_ELEMBITS - 1)), /* -1 */
+};
+
+void
+ia_css_ynr_encode(
+ struct sh_css_isp_yee2_params *to,
+ const struct ia_css_ynr_config *from,
+ unsigned int size)
+{
+ (void)size;
+ to->edge_sense_gain_0 = from->edge_sense_gain_0;
+ to->edge_sense_gain_1 = from->edge_sense_gain_1;
+ to->corner_sense_gain_0 = from->corner_sense_gain_0;
+ to->corner_sense_gain_1 = from->corner_sense_gain_1;
+}
+
+void
+ia_css_fc_encode(
+ struct sh_css_isp_fc_params *to,
+ const struct ia_css_fc_config *from,
+ unsigned int size)
+{
+ (void)size;
+ to->gain_exp = from->gain_exp;
+
+ to->coring_pos_0 = from->coring_pos_0;
+ to->coring_pos_1 = from->coring_pos_1;
+ to->coring_neg_0 = from->coring_neg_0;
+ to->coring_neg_1 = from->coring_neg_1;
+
+ to->gain_pos_0 = from->gain_pos_0;
+ to->gain_pos_1 = from->gain_pos_1;
+ to->gain_neg_0 = from->gain_neg_0;
+ to->gain_neg_1 = from->gain_neg_1;
+
+ to->crop_pos_0 = from->crop_pos_0;
+ to->crop_pos_1 = from->crop_pos_1;
+ to->crop_neg_0 = from->crop_neg_0;
+ to->crop_neg_1 = from->crop_neg_1;
+}
+
+void
+ia_css_ynr_dump(
+ const struct sh_css_isp_yee2_params *yee2,
+ unsigned int level);
+
+void
+ia_css_fc_dump(
+ const struct sh_css_isp_fc_params *fc,
+ unsigned int level);
+
+void
+ia_css_fc_debug_dtrace(
+ const struct ia_css_fc_config *config,
+ unsigned int level)
+{
+ ia_css_debug_dtrace(level,
+ "config.gain_exp=%d, config.coring_pos_0=%d, config.coring_pos_1=%d, config.coring_neg_0=%d, config.coring_neg_1=%d, config.gain_pos_0=%d, config.gain_pos_1=%d, config.gain_neg_0=%d, config.gain_neg_1=%d, config.crop_pos_0=%d, config.crop_pos_1=%d, config.crop_neg_0=%d, config.crop_neg_1=%d\n",
+ config->gain_exp,
+ config->coring_pos_0, config->coring_pos_1,
+ config->coring_neg_0, config->coring_neg_1,
+ config->gain_pos_0, config->gain_pos_1,
+ config->gain_neg_0, config->gain_neg_1,
+ config->crop_pos_0, config->crop_pos_1,
+ config->crop_neg_0, config->crop_neg_1);
+}
+
+void
+ia_css_ynr_debug_dtrace(
+ const struct ia_css_ynr_config *config,
+ unsigned int level)
+{
+ ia_css_debug_dtrace(level,
+ "config.edge_sense_gain_0=%d, config.edge_sense_gain_1=%d, config.corner_sense_gain_0=%d, config.corner_sense_gain_1=%d\n",
+ config->edge_sense_gain_0, config->edge_sense_gain_1,
+ config->corner_sense_gain_0, config->corner_sense_gain_1);
+}
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/ynr/ynr_2/ia_css_ynr2.host.h b/drivers/staging/media/atomisp/pci/isp/kernels/ynr/ynr_2/ia_css_ynr2.host.h
new file mode 100644
index 000000000000..cad590e76fcf
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/ynr/ynr_2/ia_css_ynr2.host.h
@@ -0,0 +1,48 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __IA_CSS_YNR2_HOST_H
+#define __IA_CSS_YNR2_HOST_H
+
+#include "ia_css_ynr2_types.h"
+#include "ia_css_ynr2_param.h"
+
+extern const struct ia_css_ynr_config default_ynr_config;
+extern const struct ia_css_fc_config default_fc_config;
+
+void
+ia_css_ynr_encode(
+ struct sh_css_isp_yee2_params *to,
+ const struct ia_css_ynr_config *from,
+ unsigned int size);
+
+void
+ia_css_fc_encode(
+ struct sh_css_isp_fc_params *to,
+ const struct ia_css_fc_config *from,
+ unsigned int size);
+
+void
+ia_css_ynr_dump(
+ const struct sh_css_isp_yee2_params *yee2,
+ unsigned int level);
+
+void
+ia_css_fc_dump(
+ const struct sh_css_isp_fc_params *fc,
+ unsigned int level);
+
+void
+ia_css_fc_debug_dtrace(
+ const struct ia_css_fc_config *config,
+ unsigned int level);
+
+void
+ia_css_ynr_debug_dtrace(
+ const struct ia_css_ynr_config *config,
+ unsigned int level);
+
+#endif /* __IA_CSS_YNR2_HOST_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/ynr/ynr_2/ia_css_ynr2_param.h b/drivers/staging/media/atomisp/pci/isp/kernels/ynr/ynr_2/ia_css_ynr2_param.h
new file mode 100644
index 000000000000..9553f16f9d2f
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/ynr/ynr_2/ia_css_ynr2_param.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __IA_CSS_YNR2_PARAM_H
+#define __IA_CSS_YNR2_PARAM_H
+
+#include "type_support.h"
+
+/* YNR (Y Noise Reduction), YEE (Y Edge Enhancement) */
+struct sh_css_isp_yee2_params {
+ s32 edge_sense_gain_0;
+ s32 edge_sense_gain_1;
+ s32 corner_sense_gain_0;
+ s32 corner_sense_gain_1;
+};
+
+/* Fringe Control */
+struct sh_css_isp_fc_params {
+ s32 gain_exp;
+ u16 coring_pos_0;
+ u16 coring_pos_1;
+ u16 coring_neg_0;
+ u16 coring_neg_1;
+ s32 gain_pos_0;
+ s32 gain_pos_1;
+ s32 gain_neg_0;
+ s32 gain_neg_1;
+ s32 crop_pos_0;
+ s32 crop_pos_1;
+ s32 crop_neg_0;
+ s32 crop_neg_1;
+};
+
+#endif /* __IA_CSS_YNR2_PARAM_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/ynr/ynr_2/ia_css_ynr2_types.h b/drivers/staging/media/atomisp/pci/isp/kernels/ynr/ynr_2/ia_css_ynr2_types.h
new file mode 100644
index 000000000000..502e104d7226
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/ynr/ynr_2/ia_css_ynr2_types.h
@@ -0,0 +1,85 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __IA_CSS_YNR2_TYPES_H
+#define __IA_CSS_YNR2_TYPES_H
+
+/* @file
+* CSS-API header file for Y(Luma) Noise Reduction.
+*/
+
+/* Y(Luma) Noise Reduction configuration.
+ *
+ * ISP block: YNR2 & YEE2
+ * (ISP1: YNR1 and YEE1 are used.)
+ * (ISP2: YNR1 and YEE1 are used for Preview/Video.)
+ * ISP2: YNR2 and YEE2 are used for Still.
+ */
+struct ia_css_ynr_config {
+ u16 edge_sense_gain_0; /** Sensitivity of edge in dark area.
+ u13.0, [0,8191],
+ default 1000, ineffective 0 */
+ u16 edge_sense_gain_1; /** Sensitivity of edge in bright area.
+ u13.0, [0,8191],
+ default 1000, ineffective 0 */
+ u16 corner_sense_gain_0; /** Sensitivity of corner in dark area.
+ u13.0, [0,8191],
+ default 1000, ineffective 0 */
+ u16 corner_sense_gain_1; /** Sensitivity of corner in bright area.
+ u13.0, [0,8191],
+ default 1000, ineffective 0 */
+};
+
+/* Fringe Control configuration.
+ *
+ * ISP block: FC2 (FC2 is used with YNR2/YEE2.)
+ * (ISP1: FC2 is not used.)
+ * (ISP2: FC2 is not for Preview/Video.)
+ * ISP2: FC2 is used for Still.
+ */
+struct ia_css_fc_config {
+ u8 gain_exp; /** Common exponent of gains.
+ u8.0, [0,13],
+ default 1, ineffective 0 */
+ u16 coring_pos_0; /** Coring threshold for positive edge in dark area.
+ u0.13, [0,8191],
+ default 0(0), ineffective 0 */
+ u16 coring_pos_1; /** Coring threshold for positive edge in bright area.
+ u0.13, [0,8191],
+ default 0(0), ineffective 0 */
+ u16 coring_neg_0; /** Coring threshold for negative edge in dark area.
+ u0.13, [0,8191],
+ default 0(0), ineffective 0 */
+ u16 coring_neg_1; /** Coring threshold for negative edge in bright area.
+ u0.13, [0,8191],
+ default 0(0), ineffective 0 */
+ u16 gain_pos_0; /** Gain for positive edge in dark area.
+ u0.13, [0,8191],
+ default 4096(0.5), ineffective 0 */
+ u16 gain_pos_1; /** Gain for positive edge in bright area.
+ u0.13, [0,8191],
+ default 4096(0.5), ineffective 0 */
+ u16 gain_neg_0; /** Gain for negative edge in dark area.
+ u0.13, [0,8191],
+ default 4096(0.5), ineffective 0 */
+ u16 gain_neg_1; /** Gain for negative edge in bright area.
+ u0.13, [0,8191],
+ default 4096(0.5), ineffective 0 */
+ u16 crop_pos_0; /** Limit for positive edge in dark area.
+ u0.13, [0,8191],
+ default/ineffective 8191(almost 1.0) */
+ u16 crop_pos_1; /** Limit for positive edge in bright area.
+ u0.13, [0,8191],
+ default/ineffective 8191(almost 1.0) */
+ s16 crop_neg_0; /** Limit for negative edge in dark area.
+ s0.13, [-8192,0],
+ default/ineffective -8192(-1.0) */
+ s16 crop_neg_1; /** Limit for negative edge in bright area.
+ s0.13, [-8192,0],
+ default/ineffective -8192(-1.0) */
+};
+
+#endif /* __IA_CSS_YNR2_TYPES_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/modes/interface/input_buf.isp.h b/drivers/staging/media/atomisp/pci/isp/modes/interface/input_buf.isp.h
new file mode 100644
index 000000000000..6a0257359e69
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/modes/interface/input_buf.isp.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/**
+Support for Intel Camera Imaging ISP subsystem.
+Copyright (c) 2010 - 2015, Intel Corporation.
+
+*/
+
+#ifndef _INPUT_BUF_ISP_H_
+#define _INPUT_BUF_ISP_H_
+
+#include <linux/math.h>
+
+/* Temporary include, since IA_CSS_BINARY_MODE_COPY is still needed */
+#include "sh_css_defs.h"
+
+#define INPUT_BUF_HEIGHT 2 /* double buffer */
+#define INPUT_BUF_LINES 2
+
+#ifndef ENABLE_CONTINUOUS
+#define ENABLE_CONTINUOUS 0
+#endif
+
+/* In continuous mode, the input buffer must be a fixed size for all binaries
+ * and at a fixed address since it will be used by the SP. */
+#define EXTRA_INPUT_VECTORS 2 /* For left padding */
+#define MAX_VECTORS_PER_INPUT_LINE_CONT \
+ (DIV_ROUND_UP(SH_CSS_MAX_SENSOR_WIDTH, ISP_NWAY) + EXTRA_INPUT_VECTORS)
+
+/* The input buffer should be on a fixed address in vmem, for continuous capture */
+#define INPUT_BUF_ADDR 0x0
+
+#endif /* _INPUT_BUF_ISP_H_ */
diff --git a/drivers/staging/media/atomisp/pci/isp/modes/interface/isp_types.h b/drivers/staging/media/atomisp/pci/isp/modes/interface/isp_types.h
new file mode 100644
index 000000000000..e5c77a95d702
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp/modes/interface/isp_types.h
@@ -0,0 +1,76 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010 - 2015, Intel Corporation.
+ */
+
+#ifndef _ISP_TYPES_H_
+#define _ISP_TYPES_H_
+
+/*
+ * Workaround: hivecc complains about "tag "sh_css_3a_output" already declared"
+ * without this extra decl.
+ */
+struct ia_css_3a_output;
+
+/*
+ * Input stream formats, these correspond to the MIPI formats and the way
+ * the CSS receiver sends these to the input formatter.
+ * The bit depth of each pixel element is stored in the global variable
+ * isp_bits_per_pixel.
+ * NOTE: for rgb565, we set isp_bits_per_pixel to 565, for all other rgb
+ * formats it's the actual depth (4, for 444, 8 for 888 etc).
+ */
+enum sh_stream_format {
+ sh_stream_format_yuv420_legacy,
+ sh_stream_format_yuv420,
+ sh_stream_format_yuv422,
+ sh_stream_format_rgb,
+ sh_stream_format_raw,
+ sh_stream_format_binary, /* bytestream such as jpeg */
+};
+
+struct s_isp_frames {
+ /*
+ * Global variables that are written to by either the SP or the host,
+ * every ISP binary needs these.
+ */
+ /* output frame */
+ char *xmem_base_addr_y;
+ char *xmem_base_addr_uv;
+ char *xmem_base_addr_u;
+ char *xmem_base_addr_v;
+ /* 2nd output frame */
+ char *xmem_base_addr_second_out_y;
+ char *xmem_base_addr_second_out_u;
+ char *xmem_base_addr_second_out_v;
+ /* input yuv frame */
+ char *xmem_base_addr_y_in;
+ char *xmem_base_addr_u_in;
+ char *xmem_base_addr_v_in;
+ /* input raw frame */
+ char *xmem_base_addr_raw;
+ /* output raw frame */
+ char *xmem_base_addr_raw_out;
+ /* viewfinder output (vf_veceven) */
+ char *xmem_base_addr_vfout_y;
+ char *xmem_base_addr_vfout_u;
+ char *xmem_base_addr_vfout_v;
+ /* overlay frame (for vf_pp) */
+ char *xmem_base_addr_overlay_y;
+ char *xmem_base_addr_overlay_u;
+ char *xmem_base_addr_overlay_v;
+ /* pre-gdc output frame (gdc input) */
+ char *xmem_base_addr_qplane_r;
+ char *xmem_base_addr_qplane_ratb;
+ char *xmem_base_addr_qplane_gr;
+ char *xmem_base_addr_qplane_gb;
+ char *xmem_base_addr_qplane_b;
+ char *xmem_base_addr_qplane_batr;
+ /* YUV as input, used by postisp binary */
+ char *xmem_base_addr_yuv_16_y;
+ char *xmem_base_addr_yuv_16_u;
+ char *xmem_base_addr_yuv_16_v;
+};
+
+#endif /* _ISP_TYPES_H_ */
diff --git a/drivers/staging/media/atomisp/pci/isp2400_input_system_global.h b/drivers/staging/media/atomisp/pci/isp2400_input_system_global.h
new file mode 100644
index 000000000000..df4378fd3540
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp2400_input_system_global.h
@@ -0,0 +1,139 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#include <type_support.h>
+
+//CSI reveiver has 3 ports.
+#define N_CSI_PORTS (3)
+//AM: Use previous define for this.
+
+//MIPI allows up to 4 channels.
+#define N_CHANNELS (4)
+// 12KB = 256bit x 384 words
+#define IB_CAPACITY_IN_WORDS (384)
+
+typedef enum {
+ MIPI_0LANE_CFG = 0,
+ MIPI_1LANE_CFG = 1,
+ MIPI_2LANE_CFG = 2,
+ MIPI_3LANE_CFG = 3,
+ MIPI_4LANE_CFG = 4
+} mipi_lane_cfg_t;
+
+typedef enum {
+ INPUT_SYSTEM_SOURCE_SENSOR = 0,
+ INPUT_SYSTEM_SOURCE_FIFO,
+ INPUT_SYSTEM_SOURCE_PRBS,
+ INPUT_SYSTEM_SOURCE_MEMORY,
+ N_INPUT_SYSTEM_SOURCE
+} input_system_source_t;
+
+/* internal routing configuration */
+typedef enum {
+ INPUT_SYSTEM_DISCARD_ALL = 0,
+ INPUT_SYSTEM_CSI_BACKEND = 1,
+ INPUT_SYSTEM_INPUT_BUFFER = 2,
+ INPUT_SYSTEM_MULTICAST = 3,
+ N_INPUT_SYSTEM_CONNECTION
+} input_system_connection_t;
+
+typedef enum {
+ INPUT_SYSTEM_MIPI_PORT0,
+ INPUT_SYSTEM_MIPI_PORT1,
+ INPUT_SYSTEM_MIPI_PORT2,
+ INPUT_SYSTEM_ACQUISITION_UNIT,
+ N_INPUT_SYSTEM_MULTIPLEX
+} input_system_multiplex_t;
+
+typedef enum {
+ INPUT_SYSTEM_SINK_MEMORY = 0,
+ INPUT_SYSTEM_SINK_ISP,
+ INPUT_SYSTEM_SINK_SP,
+ N_INPUT_SYSTEM_SINK
+} input_system_sink_t;
+
+typedef enum {
+ INPUT_SYSTEM_FIFO_CAPTURE = 0,
+ INPUT_SYSTEM_FIFO_CAPTURE_WITH_COUNTING,
+ INPUT_SYSTEM_SRAM_BUFFERING,
+ INPUT_SYSTEM_XMEM_BUFFERING,
+ INPUT_SYSTEM_XMEM_CAPTURE,
+ INPUT_SYSTEM_XMEM_ACQUIRE,
+ N_INPUT_SYSTEM_BUFFERING_MODE
+} buffering_mode_t;
+
+typedef struct isp2400_input_system_cfg_s input_system_cfg_t;
+typedef struct sync_generator_cfg_s sync_generator_cfg_t;
+typedef struct tpg_cfg_s tpg_cfg_t;
+typedef struct prbs_cfg_s prbs_cfg_t;
+
+/* MW: uint16_t should be sufficient */
+struct isp2400_input_system_cfg_s {
+ u32 no_side_band;
+ u32 fmt_type;
+ u32 ch_id;
+ u32 input_mode;
+};
+
+struct sync_generator_cfg_s {
+ u32 width;
+ u32 height;
+ u32 hblank_cycles;
+ u32 vblank_cycles;
+};
+
+/* MW: tpg & prbs are exclusive */
+struct tpg_cfg_s {
+ u32 x_mask;
+ u32 y_mask;
+ u32 x_delta;
+ u32 y_delta;
+ u32 xy_mask;
+ sync_generator_cfg_t sync_gen_cfg;
+};
+
+struct prbs_cfg_s {
+ u32 seed;
+ sync_generator_cfg_t sync_gen_cfg;
+};
+
+struct gpfifo_cfg_s {
+// TBD.
+ sync_generator_cfg_t sync_gen_cfg;
+};
+
+typedef struct gpfifo_cfg_s gpfifo_cfg_t;
+
+//ALX:Commented out to pass the compilation.
+//typedef struct isp2400_input_system_cfg_s input_system_cfg_t;
+
+struct ib_buffer_s {
+ u32 mem_reg_size;
+ u32 nof_mem_regs;
+ u32 mem_reg_addr;
+};
+
+typedef struct ib_buffer_s isp2400_ib_buffer_t;
+
+struct csi_cfg_s {
+ u32 csi_port;
+ buffering_mode_t buffering_mode;
+ isp2400_ib_buffer_t csi_buffer;
+ isp2400_ib_buffer_t acquisition_buffer;
+ u32 nof_xmem_buffers;
+};
+
+typedef struct csi_cfg_s csi_cfg_t;
+
+typedef enum {
+ INPUT_SYSTEM_CFG_FLAG_RESET = 0,
+ INPUT_SYSTEM_CFG_FLAG_SET = 1U << 0,
+ INPUT_SYSTEM_CFG_FLAG_BLOCKED = 1U << 1,
+ INPUT_SYSTEM_CFG_FLAG_REQUIRED = 1U << 2,
+ INPUT_SYSTEM_CFG_FLAG_CONFLICT = 1U << 3 // To mark a conflicting configuration.
+} isp2400_input_system_cfg_flag_t;
+
+typedef u32 input_system_config_flags_t;
diff --git a/drivers/staging/media/atomisp/pci/isp2400_input_system_local.h b/drivers/staging/media/atomisp/pci/isp2400_input_system_local.h
new file mode 100644
index 000000000000..56a4db033044
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp2400_input_system_local.h
@@ -0,0 +1,234 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010-2015, Intel Corporation.
+ */
+
+#ifndef __INPUT_SYSTEM_2400_LOCAL_H_INCLUDED__
+#define __INPUT_SYSTEM_2400_LOCAL_H_INCLUDED__
+
+#include "input_system_defs.h" /* HIVE_ISYS_GPREG_MULTICAST_A_IDX,... */
+
+/*
+ * _HRT_CSS_RECEIVER_2400_TWO_PIXEL_EN_REG_IDX,
+ * _HRT_CSS_RECEIVER_2400_CSI2_FUNC_PROG_REG_IDX,...
+ */
+#include "css_receiver_2400_defs.h"
+
+#include "isp_capture_defs.h"
+
+#include "isp_acquisition_defs.h"
+#include "input_system_ctrl_defs.h"
+
+struct target_cfg2400_s {
+ input_switch_cfg_channel_t input_switch_channel_cfg;
+ target_isp_cfg_t target_isp_cfg;
+ target_sp_cfg_t target_sp_cfg;
+ target_strm2mem_cfg_t target_strm2mem_cfg;
+};
+
+// Configuration of a channel.
+struct channel_cfg_s {
+ u32 ch_id;
+ backend_channel_cfg_t backend_ch;
+ input_system_source_t source_type;
+ source_cfg_t source_cfg;
+ target_cfg2400_t target_cfg;
+};
+
+// Complete configuration for input system.
+struct input_system_cfg2400_s {
+ input_system_source_t source_type;
+ input_system_config_flags_t source_type_flags;
+ //channel_cfg_t channel[N_CHANNELS];
+ input_system_config_flags_t ch_flags[N_CHANNELS];
+ // This is the place where the buffers' settings are collected, as given.
+ csi_cfg_t csi_value[N_CSI_PORTS];
+ input_system_config_flags_t csi_flags[N_CSI_PORTS];
+
+ // Possible another struct for ib.
+ // This buffers set at the end, based on the all configurations.
+ isp2400_ib_buffer_t csi_buffer[N_CSI_PORTS];
+ input_system_config_flags_t csi_buffer_flags[N_CSI_PORTS];
+ isp2400_ib_buffer_t acquisition_buffer_unique;
+ input_system_config_flags_t acquisition_buffer_unique_flags;
+ u32 unallocated_ib_mem_words; // Used for check.DEFAULT = IB_CAPACITY_IN_WORDS.
+ //uint32_t acq_allocated_ib_mem_words;
+
+ input_system_connection_t multicast[N_CSI_PORTS];
+ input_system_multiplex_t multiplexer;
+ input_system_config_flags_t multiplexer_flags;
+
+ tpg_cfg_t tpg_value;
+ input_system_config_flags_t tpg_flags;
+ prbs_cfg_t prbs_value;
+ input_system_config_flags_t prbs_flags;
+ gpfifo_cfg_t gpfifo_value;
+ input_system_config_flags_t gpfifo_flags;
+
+ input_switch_cfg_t input_switch_cfg;
+
+ target_isp_cfg_t target_isp[N_CHANNELS];
+ input_system_config_flags_t target_isp_flags[N_CHANNELS];
+ target_sp_cfg_t target_sp[N_CHANNELS];
+ input_system_config_flags_t target_sp_flags[N_CHANNELS];
+ target_strm2mem_cfg_t target_strm2mem[N_CHANNELS];
+ input_system_config_flags_t target_strm2mem_flags[N_CHANNELS];
+
+ input_system_config_flags_t session_flags;
+
+};
+
+/*
+ * For each MIPI port
+ */
+#define _HRT_CSS_RECEIVER_DEVICE_READY_REG_IDX _HRT_CSS_RECEIVER_2400_DEVICE_READY_REG_IDX
+#define _HRT_CSS_RECEIVER_IRQ_STATUS_REG_IDX _HRT_CSS_RECEIVER_2400_IRQ_STATUS_REG_IDX
+#define _HRT_CSS_RECEIVER_IRQ_ENABLE_REG_IDX _HRT_CSS_RECEIVER_2400_IRQ_ENABLE_REG_IDX
+#define _HRT_CSS_RECEIVER_TIMEOUT_COUNT_REG_IDX _HRT_CSS_RECEIVER_2400_CSI2_FUNC_PROG_REG_IDX
+#define _HRT_CSS_RECEIVER_INIT_COUNT_REG_IDX _HRT_CSS_RECEIVER_2400_INIT_COUNT_REG_IDX
+/* new regs for each MIPI port w.r.t. 2300 */
+#define _HRT_CSS_RECEIVER_RAW16_18_DATAID_REG_IDX _HRT_CSS_RECEIVER_2400_RAW16_18_DATAID_REG_IDX
+#define _HRT_CSS_RECEIVER_SYNC_COUNT_REG_IDX _HRT_CSS_RECEIVER_2400_SYNC_COUNT_REG_IDX
+#define _HRT_CSS_RECEIVER_RX_COUNT_REG_IDX _HRT_CSS_RECEIVER_2400_RX_COUNT_REG_IDX
+
+/* _HRT_CSS_RECEIVER_2400_COMP_FORMAT_REG_IDX is not defined per MIPI port but per channel */
+/* _HRT_CSS_RECEIVER_2400_COMP_PREDICT_REG_IDX is not defined per MIPI port but per channel */
+#define _HRT_CSS_RECEIVER_FS_TO_LS_DELAY_REG_IDX _HRT_CSS_RECEIVER_2400_FS_TO_LS_DELAY_REG_IDX
+#define _HRT_CSS_RECEIVER_LS_TO_DATA_DELAY_REG_IDX _HRT_CSS_RECEIVER_2400_LS_TO_DATA_DELAY_REG_IDX
+#define _HRT_CSS_RECEIVER_DATA_TO_LE_DELAY_REG_IDX _HRT_CSS_RECEIVER_2400_DATA_TO_LE_DELAY_REG_IDX
+#define _HRT_CSS_RECEIVER_LE_TO_FE_DELAY_REG_IDX _HRT_CSS_RECEIVER_2400_LE_TO_FE_DELAY_REG_IDX
+#define _HRT_CSS_RECEIVER_FE_TO_FS_DELAY_REG_IDX _HRT_CSS_RECEIVER_2400_FE_TO_FS_DELAY_REG_IDX
+#define _HRT_CSS_RECEIVER_LE_TO_LS_DELAY_REG_IDX _HRT_CSS_RECEIVER_2400_LE_TO_LS_DELAY_REG_IDX
+#define _HRT_CSS_RECEIVER_TWO_PIXEL_EN_REG_IDX _HRT_CSS_RECEIVER_2400_TWO_PIXEL_EN_REG_IDX
+#define _HRT_CSS_RECEIVER_BACKEND_RST_REG_IDX _HRT_CSS_RECEIVER_2400_BACKEND_RST_REG_IDX
+#define _HRT_CSS_RECEIVER_RAW18_REG_IDX _HRT_CSS_RECEIVER_2400_RAW18_REG_IDX
+#define _HRT_CSS_RECEIVER_FORCE_RAW8_REG_IDX _HRT_CSS_RECEIVER_2400_FORCE_RAW8_REG_IDX
+#define _HRT_CSS_RECEIVER_RAW16_REG_IDX _HRT_CSS_RECEIVER_2400_RAW16_REG_IDX
+
+/* Previously MIPI port regs, now 2x2 logical channel regs */
+#define _HRT_CSS_RECEIVER_COMP_SCHEME_VC0_REG0_IDX _HRT_CSS_RECEIVER_2400_COMP_SCHEME_VC0_REG0_IDX
+#define _HRT_CSS_RECEIVER_COMP_SCHEME_VC0_REG1_IDX _HRT_CSS_RECEIVER_2400_COMP_SCHEME_VC0_REG1_IDX
+#define _HRT_CSS_RECEIVER_COMP_SCHEME_VC1_REG0_IDX _HRT_CSS_RECEIVER_2400_COMP_SCHEME_VC1_REG0_IDX
+#define _HRT_CSS_RECEIVER_COMP_SCHEME_VC1_REG1_IDX _HRT_CSS_RECEIVER_2400_COMP_SCHEME_VC1_REG1_IDX
+#define _HRT_CSS_RECEIVER_COMP_SCHEME_VC2_REG0_IDX _HRT_CSS_RECEIVER_2400_COMP_SCHEME_VC2_REG0_IDX
+#define _HRT_CSS_RECEIVER_COMP_SCHEME_VC2_REG1_IDX _HRT_CSS_RECEIVER_2400_COMP_SCHEME_VC2_REG1_IDX
+#define _HRT_CSS_RECEIVER_COMP_SCHEME_VC3_REG0_IDX _HRT_CSS_RECEIVER_2400_COMP_SCHEME_VC3_REG0_IDX
+#define _HRT_CSS_RECEIVER_COMP_SCHEME_VC3_REG1_IDX _HRT_CSS_RECEIVER_2400_COMP_SCHEME_VC3_REG1_IDX
+
+/* Second backend is at offset 0x0700 w.r.t. the first port at offset 0x0100 */
+#define _HRT_CSS_BE_OFFSET 448
+#define _HRT_CSS_RECEIVER_BE_GSP_ACC_OVL_REG_IDX (_HRT_CSS_RECEIVER_2400_BE_GSP_ACC_OVL_REG_IDX + _HRT_CSS_BE_OFFSET)
+#define _HRT_CSS_RECEIVER_BE_SRST_REG_IDX (_HRT_CSS_RECEIVER_2400_BE_SRST_REG_IDX + _HRT_CSS_BE_OFFSET)
+#define _HRT_CSS_RECEIVER_BE_TWO_PPC_REG_IDX (_HRT_CSS_RECEIVER_2400_BE_TWO_PPC_REG_IDX + _HRT_CSS_BE_OFFSET)
+#define _HRT_CSS_RECEIVER_BE_COMP_FORMAT_REG0_IDX (_HRT_CSS_RECEIVER_2400_BE_COMP_FORMAT_REG0_IDX + _HRT_CSS_BE_OFFSET)
+#define _HRT_CSS_RECEIVER_BE_COMP_FORMAT_REG1_IDX (_HRT_CSS_RECEIVER_2400_BE_COMP_FORMAT_REG1_IDX + _HRT_CSS_BE_OFFSET)
+#define _HRT_CSS_RECEIVER_BE_COMP_FORMAT_REG2_IDX (_HRT_CSS_RECEIVER_2400_BE_COMP_FORMAT_REG2_IDX + _HRT_CSS_BE_OFFSET)
+#define _HRT_CSS_RECEIVER_BE_COMP_FORMAT_REG3_IDX (_HRT_CSS_RECEIVER_2400_BE_COMP_FORMAT_REG3_IDX + _HRT_CSS_BE_OFFSET)
+#define _HRT_CSS_RECEIVER_BE_SEL_REG_IDX (_HRT_CSS_RECEIVER_2400_BE_SEL_REG_IDX + _HRT_CSS_BE_OFFSET)
+#define _HRT_CSS_RECEIVER_BE_RAW16_CONFIG_REG_IDX (_HRT_CSS_RECEIVER_2400_BE_RAW16_CONFIG_REG_IDX + _HRT_CSS_BE_OFFSET)
+#define _HRT_CSS_RECEIVER_BE_RAW18_CONFIG_REG_IDX (_HRT_CSS_RECEIVER_2400_BE_RAW18_CONFIG_REG_IDX + _HRT_CSS_BE_OFFSET)
+#define _HRT_CSS_RECEIVER_BE_FORCE_RAW8_REG_IDX (_HRT_CSS_RECEIVER_2400_BE_FORCE_RAW8_REG_IDX + _HRT_CSS_BE_OFFSET)
+#define _HRT_CSS_RECEIVER_BE_IRQ_STATUS_REG_IDX (_HRT_CSS_RECEIVER_2400_BE_IRQ_STATUS_REG_IDX + _HRT_CSS_BE_OFFSET)
+#define _HRT_CSS_RECEIVER_BE_IRQ_CLEAR_REG_IDX (_HRT_CSS_RECEIVER_2400_BE_IRQ_CLEAR_REG_IDX + _HRT_CSS_BE_OFFSET)
+
+#define _HRT_CSS_RECEIVER_IRQ_OVERRUN_BIT _HRT_CSS_RECEIVER_2400_IRQ_OVERRUN_BIT
+#define _HRT_CSS_RECEIVER_IRQ_INIT_TIMEOUT_BIT _HRT_CSS_RECEIVER_2400_IRQ_RESERVED_BIT
+#define _HRT_CSS_RECEIVER_IRQ_SLEEP_MODE_ENTRY_BIT _HRT_CSS_RECEIVER_2400_IRQ_SLEEP_MODE_ENTRY_BIT
+#define _HRT_CSS_RECEIVER_IRQ_SLEEP_MODE_EXIT_BIT _HRT_CSS_RECEIVER_2400_IRQ_SLEEP_MODE_EXIT_BIT
+#define _HRT_CSS_RECEIVER_IRQ_ERR_SOT_HS_BIT _HRT_CSS_RECEIVER_2400_IRQ_ERR_SOT_HS_BIT
+#define _HRT_CSS_RECEIVER_IRQ_ERR_SOT_SYNC_HS_BIT _HRT_CSS_RECEIVER_2400_IRQ_ERR_SOT_SYNC_HS_BIT
+#define _HRT_CSS_RECEIVER_IRQ_ERR_CONTROL_BIT _HRT_CSS_RECEIVER_2400_IRQ_ERR_CONTROL_BIT
+#define _HRT_CSS_RECEIVER_IRQ_ERR_ECC_DOUBLE_BIT _HRT_CSS_RECEIVER_2400_IRQ_ERR_ECC_DOUBLE_BIT
+#define _HRT_CSS_RECEIVER_IRQ_ERR_ECC_CORRECTED_BIT _HRT_CSS_RECEIVER_2400_IRQ_ERR_ECC_CORRECTED_BIT
+#define _HRT_CSS_RECEIVER_IRQ_ERR_ECC_NO_CORRECTION_BIT _HRT_CSS_RECEIVER_2400_IRQ_ERR_ECC_NO_CORRECTION_BIT
+#define _HRT_CSS_RECEIVER_IRQ_ERR_CRC_BIT _HRT_CSS_RECEIVER_2400_IRQ_ERR_CRC_BIT
+#define _HRT_CSS_RECEIVER_IRQ_ERR_ID_BIT _HRT_CSS_RECEIVER_2400_IRQ_ERR_ID_BIT
+#define _HRT_CSS_RECEIVER_IRQ_ERR_FRAME_SYNC_BIT _HRT_CSS_RECEIVER_2400_IRQ_ERR_FRAME_SYNC_BIT
+#define _HRT_CSS_RECEIVER_IRQ_ERR_FRAME_DATA_BIT _HRT_CSS_RECEIVER_2400_IRQ_ERR_FRAME_DATA_BIT
+#define _HRT_CSS_RECEIVER_IRQ_DATA_TIMEOUT_BIT _HRT_CSS_RECEIVER_2400_IRQ_DATA_TIMEOUT_BIT
+#define _HRT_CSS_RECEIVER_IRQ_ERR_ESCAPE_BIT _HRT_CSS_RECEIVER_2400_IRQ_ERR_ESCAPE_BIT
+#define _HRT_CSS_RECEIVER_IRQ_ERR_LINE_SYNC_BIT _HRT_CSS_RECEIVER_2400_IRQ_ERR_LINE_SYNC_BIT
+
+#define _HRT_CSS_RECEIVER_FUNC_PROG_REG_IDX _HRT_CSS_RECEIVER_2400_CSI2_FUNC_PROG_REG_IDX
+#define _HRT_CSS_RECEIVER_DATA_TIMEOUT_IDX _HRT_CSS_RECEIVER_2400_CSI2_DATA_TIMEOUT_IDX
+#define _HRT_CSS_RECEIVER_DATA_TIMEOUT_BITS _HRT_CSS_RECEIVER_2400_CSI2_DATA_TIMEOUT_BITS
+
+typedef enum {
+ MIPI_FORMAT_2400_RGB888 = 0,
+ MIPI_FORMAT_2400_RGB555,
+ MIPI_FORMAT_2400_RGB444,
+ MIPI_FORMAT_2400_RGB565,
+ MIPI_FORMAT_2400_RGB666,
+ MIPI_FORMAT_2400_RAW8, /* 5 */
+ MIPI_FORMAT_2400_RAW10,
+ MIPI_FORMAT_2400_RAW6,
+ MIPI_FORMAT_2400_RAW7,
+ MIPI_FORMAT_2400_RAW12,
+ MIPI_FORMAT_2400_RAW14, /* 10 */
+ MIPI_FORMAT_2400_YUV420_8,
+ MIPI_FORMAT_2400_YUV420_10,
+ MIPI_FORMAT_2400_YUV422_8,
+ MIPI_FORMAT_2400_YUV422_10,
+ MIPI_FORMAT_2400_CUSTOM0, /* 15 */
+ MIPI_FORMAT_2400_YUV420_8_LEGACY,
+ MIPI_FORMAT_2400_EMBEDDED,
+ MIPI_FORMAT_2400_CUSTOM1,
+ MIPI_FORMAT_2400_CUSTOM2,
+ MIPI_FORMAT_2400_CUSTOM3, /* 20 */
+ MIPI_FORMAT_2400_CUSTOM4,
+ MIPI_FORMAT_2400_CUSTOM5,
+ MIPI_FORMAT_2400_CUSTOM6,
+ MIPI_FORMAT_2400_CUSTOM7,
+ MIPI_FORMAT_2400_YUV420_8_SHIFT, /* 25 */
+ MIPI_FORMAT_2400_YUV420_10_SHIFT,
+ MIPI_FORMAT_2400_RAW16,
+ MIPI_FORMAT_2400_RAW18,
+ N_MIPI_FORMAT_2400,
+} mipi_format_2400_t;
+
+#define N_MIPI_FORMAT_CUSTOM 8
+
+/* The number of stores for compressed format types */
+#define N_MIPI_COMPRESSOR_CONTEXT (N_RX_CHANNEL_ID * N_MIPI_FORMAT_CUSTOM)
+
+typedef enum {
+ RX_IRQ_INFO_BUFFER_OVERRUN = 1UL << _HRT_CSS_RECEIVER_IRQ_OVERRUN_BIT,
+ RX_IRQ_INFO_INIT_TIMEOUT = 1UL << _HRT_CSS_RECEIVER_IRQ_INIT_TIMEOUT_BIT,
+ RX_IRQ_INFO_ENTER_SLEEP_MODE = 1UL << _HRT_CSS_RECEIVER_IRQ_SLEEP_MODE_ENTRY_BIT,
+ RX_IRQ_INFO_EXIT_SLEEP_MODE = 1UL << _HRT_CSS_RECEIVER_IRQ_SLEEP_MODE_EXIT_BIT,
+ RX_IRQ_INFO_ECC_CORRECTED = 1UL << _HRT_CSS_RECEIVER_IRQ_ERR_ECC_CORRECTED_BIT,
+ RX_IRQ_INFO_ERR_SOT = 1UL << _HRT_CSS_RECEIVER_IRQ_ERR_SOT_HS_BIT,
+ RX_IRQ_INFO_ERR_SOT_SYNC = 1UL << _HRT_CSS_RECEIVER_IRQ_ERR_SOT_SYNC_HS_BIT,
+ RX_IRQ_INFO_ERR_CONTROL = 1UL << _HRT_CSS_RECEIVER_IRQ_ERR_CONTROL_BIT,
+ RX_IRQ_INFO_ERR_ECC_DOUBLE = 1UL << _HRT_CSS_RECEIVER_IRQ_ERR_ECC_DOUBLE_BIT,
+ /* RX_IRQ_INFO_NO_ERR = 1UL << _HRT_CSS_RECEIVER_IRQ_ERR_ECC_NO_CORRECTION_BIT, */
+ RX_IRQ_INFO_ERR_CRC = 1UL << _HRT_CSS_RECEIVER_IRQ_ERR_CRC_BIT,
+ RX_IRQ_INFO_ERR_UNKNOWN_ID = 1UL << _HRT_CSS_RECEIVER_IRQ_ERR_ID_BIT,
+ RX_IRQ_INFO_ERR_FRAME_SYNC = 1UL << _HRT_CSS_RECEIVER_IRQ_ERR_FRAME_SYNC_BIT,
+ RX_IRQ_INFO_ERR_FRAME_DATA = 1UL << _HRT_CSS_RECEIVER_IRQ_ERR_FRAME_DATA_BIT,
+ RX_IRQ_INFO_ERR_DATA_TIMEOUT = 1UL << _HRT_CSS_RECEIVER_IRQ_DATA_TIMEOUT_BIT,
+ RX_IRQ_INFO_ERR_UNKNOWN_ESC = 1UL << _HRT_CSS_RECEIVER_IRQ_ERR_ESCAPE_BIT,
+ RX_IRQ_INFO_ERR_LINE_SYNC = 1UL << _HRT_CSS_RECEIVER_IRQ_ERR_LINE_SYNC_BIT,
+} rx_irq_info_t;
+
+/* NOTE: The base has already an offset of 0x0100 */
+static const hrt_address __maybe_unused MIPI_PORT_OFFSET[N_MIPI_PORT_ID] = {
+ 0x00000000UL,
+ 0x00000100UL,
+ 0x00000200UL
+};
+
+static const hrt_address __maybe_unused SUB_SYSTEM_OFFSET[N_SUB_SYSTEM_ID] = {
+ 0x00001000UL,
+ 0x00002000UL,
+ 0x00003000UL,
+ 0x00004000UL,
+ 0x00005000UL,
+ 0x00009000UL,
+ 0x0000A000UL,
+ 0x0000B000UL,
+ 0x0000C000UL
+};
+
+#endif /* __INPUT_SYSTEM_LOCAL_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/isp2400_input_system_private.h b/drivers/staging/media/atomisp/pci/isp2400_input_system_private.h
new file mode 100644
index 000000000000..a6762683a0ac
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp2400_input_system_private.h
@@ -0,0 +1,114 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010-2015, Intel Corporation.
+ */
+
+#ifndef __INPUT_SYSTEM_2400_PRIVATE_H_INCLUDED__
+#define __INPUT_SYSTEM_2400_PRIVATE_H_INCLUDED__
+
+#include "input_system_public.h"
+
+#include "device_access.h"
+
+#include "assert_support.h"
+
+STORAGE_CLASS_INPUT_SYSTEM_C void input_system_reg_store(
+ const input_system_ID_t ID,
+ const hrt_address reg,
+ const hrt_data value)
+{
+ assert(ID < N_INPUT_SYSTEM_ID);
+ assert(INPUT_SYSTEM_BASE[ID] != (hrt_address)-1);
+ ia_css_device_store_uint32(INPUT_SYSTEM_BASE[ID] + reg * sizeof(hrt_data),
+ value);
+ return;
+}
+
+STORAGE_CLASS_INPUT_SYSTEM_C hrt_data input_system_reg_load(
+ const input_system_ID_t ID,
+ const hrt_address reg)
+{
+ assert(ID < N_INPUT_SYSTEM_ID);
+ assert(INPUT_SYSTEM_BASE[ID] != (hrt_address)-1);
+ return ia_css_device_load_uint32(INPUT_SYSTEM_BASE[ID] + reg * sizeof(
+ hrt_data));
+}
+
+STORAGE_CLASS_INPUT_SYSTEM_C void receiver_reg_store(
+ const rx_ID_t ID,
+ const hrt_address reg,
+ const hrt_data value)
+{
+ assert(ID < N_RX_ID);
+ assert(RX_BASE[ID] != (hrt_address)-1);
+ ia_css_device_store_uint32(RX_BASE[ID] + reg * sizeof(hrt_data), value);
+ return;
+}
+
+STORAGE_CLASS_INPUT_SYSTEM_C hrt_data receiver_reg_load(
+ const rx_ID_t ID,
+ const hrt_address reg)
+{
+ assert(ID < N_RX_ID);
+ assert(RX_BASE[ID] != (hrt_address)-1);
+ return ia_css_device_load_uint32(RX_BASE[ID] + reg * sizeof(hrt_data));
+}
+
+STORAGE_CLASS_INPUT_SYSTEM_C void receiver_port_reg_store(
+ const rx_ID_t ID,
+ const enum mipi_port_id port_ID,
+ const hrt_address reg,
+ const hrt_data value)
+{
+ assert(ID < N_RX_ID);
+ assert(port_ID < N_MIPI_PORT_ID);
+ assert(RX_BASE[ID] != (hrt_address)-1);
+ assert(MIPI_PORT_OFFSET[port_ID] != (hrt_address)-1);
+ ia_css_device_store_uint32(RX_BASE[ID] + MIPI_PORT_OFFSET[port_ID] + reg *
+ sizeof(hrt_data), value);
+ return;
+}
+
+STORAGE_CLASS_INPUT_SYSTEM_C hrt_data receiver_port_reg_load(
+ const rx_ID_t ID,
+ const enum mipi_port_id port_ID,
+ const hrt_address reg)
+{
+ assert(ID < N_RX_ID);
+ assert(port_ID < N_MIPI_PORT_ID);
+ assert(RX_BASE[ID] != (hrt_address)-1);
+ assert(MIPI_PORT_OFFSET[port_ID] != (hrt_address)-1);
+ return ia_css_device_load_uint32(RX_BASE[ID] + MIPI_PORT_OFFSET[port_ID] + reg *
+ sizeof(hrt_data));
+}
+
+STORAGE_CLASS_INPUT_SYSTEM_C void input_system_sub_system_reg_store(
+ const input_system_ID_t ID,
+ const sub_system_ID_t sub_ID,
+ const hrt_address reg,
+ const hrt_data value)
+{
+ assert(ID < N_INPUT_SYSTEM_ID);
+ assert(sub_ID < N_SUB_SYSTEM_ID);
+ assert(INPUT_SYSTEM_BASE[ID] != (hrt_address)-1);
+ assert(SUB_SYSTEM_OFFSET[sub_ID] != (hrt_address)-1);
+ ia_css_device_store_uint32(INPUT_SYSTEM_BASE[ID] + SUB_SYSTEM_OFFSET[sub_ID] +
+ reg * sizeof(hrt_data), value);
+ return;
+}
+
+STORAGE_CLASS_INPUT_SYSTEM_C hrt_data input_system_sub_system_reg_load(
+ const input_system_ID_t ID,
+ const sub_system_ID_t sub_ID,
+ const hrt_address reg)
+{
+ assert(ID < N_INPUT_SYSTEM_ID);
+ assert(sub_ID < N_SUB_SYSTEM_ID);
+ assert(INPUT_SYSTEM_BASE[ID] != (hrt_address)-1);
+ assert(SUB_SYSTEM_OFFSET[sub_ID] != (hrt_address)-1);
+ return ia_css_device_load_uint32(INPUT_SYSTEM_BASE[ID] +
+ SUB_SYSTEM_OFFSET[sub_ID] + reg * sizeof(hrt_data));
+}
+
+#endif /* __INPUT_SYSTEM_PRIVATE_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/isp2400_input_system_public.h b/drivers/staging/media/atomisp/pci/isp2400_input_system_public.h
new file mode 100644
index 000000000000..2147929392e1
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp2400_input_system_public.h
@@ -0,0 +1,308 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __INPUT_SYSTEM_2400_PUBLIC_H_INCLUDED__
+#define __INPUT_SYSTEM_2400_PUBLIC_H_INCLUDED__
+
+#include <type_support.h>
+
+/*! Set compression parameters for cfg[cfg_ID] of RECEIVER[ID]
+
+ \param ID[in] RECEIVER identifier
+ \param cfg_ID[in] Configuration identifier
+ \param comp[in] Compression method
+ \param pred[in] Predictor method
+
+ \NOTE: the storage of compression configuration is
+ implementation specific. The config can be
+ carried either on MIPI ports or on MIPI channels
+
+ \return none, RECEIVER[ID].cfg[cfg_ID] = {comp, pred}
+ */
+void receiver_set_compression(
+ const rx_ID_t ID,
+ const unsigned int cfg_ID,
+ const mipi_compressor_t comp,
+ const mipi_predictor_t pred);
+
+/*! Enable PORT[port_ID] of RECEIVER[ID]
+
+ \param ID[in] RECEIVER identifier
+ \param port_ID[in] mipi PORT identifier
+ \param cnd[in] irq predicate
+
+ \return None, enable(RECEIVER[ID].PORT[port_ID])
+ */
+void receiver_port_enable(
+ const rx_ID_t ID,
+ const enum mipi_port_id port_ID,
+ const bool cnd);
+
+/*! Flag if PORT[port_ID] of RECEIVER[ID] is enabled
+
+ \param ID[in] RECEIVER identifier
+ \param port_ID[in] mipi PORT identifier
+
+ \return enable(RECEIVER[ID].PORT[port_ID]) == true
+ */
+bool is_receiver_port_enabled(
+ const rx_ID_t ID,
+ const enum mipi_port_id port_ID);
+
+/*! Enable the IRQ channels of PORT[port_ID] of RECEIVER[ID]
+
+ \param ID[in] RECEIVER identifier
+ \param port_ID[in] mipi PORT identifier
+ \param irq_info[in] irq channels
+
+ \return None, enable(RECEIVER[ID].PORT[port_ID].irq_info)
+ */
+void receiver_irq_enable(
+ const rx_ID_t ID,
+ const enum mipi_port_id port_ID,
+ const rx_irq_info_t irq_info);
+
+/*! Return the IRQ status of PORT[port_ID] of RECEIVER[ID]
+
+ \param ID[in] RECEIVER identifier
+ \param port_ID[in] mipi PORT identifier
+
+ \return RECEIVER[ID].PORT[port_ID].irq_info
+ */
+rx_irq_info_t receiver_get_irq_info(
+ const rx_ID_t ID,
+ const enum mipi_port_id port_ID);
+
+/*! Clear the IRQ status of PORT[port_ID] of RECEIVER[ID]
+
+ \param ID[in] RECEIVER identifier
+ \param port_ID[in] mipi PORT identifier
+ \param irq_info[in] irq status
+
+ \return None, clear(RECEIVER[ID].PORT[port_ID].irq_info)
+ */
+void receiver_irq_clear(
+ const rx_ID_t ID,
+ const enum mipi_port_id port_ID,
+ const rx_irq_info_t irq_info);
+
+/*! Write to a control register of INPUT_SYSTEM[ID]
+
+ \param ID[in] INPUT_SYSTEM identifier
+ \param reg[in] register index
+ \param value[in] The data to be written
+
+ \return none, INPUT_SYSTEM[ID].ctrl[reg] = value
+ */
+STORAGE_CLASS_INPUT_SYSTEM_H void input_system_reg_store(
+ const input_system_ID_t ID,
+ const hrt_address reg,
+ const hrt_data value);
+
+/*! Read from a control register of INPUT_SYSTEM[ID]
+
+ \param ID[in] INPUT_SYSTEM identifier
+ \param reg[in] register index
+ \param value[in] The data to be written
+
+ \return INPUT_SYSTEM[ID].ctrl[reg]
+ */
+STORAGE_CLASS_INPUT_SYSTEM_H hrt_data input_system_reg_load(
+ const input_system_ID_t ID,
+ const hrt_address reg);
+
+/*! Write to a control register of RECEIVER[ID]
+
+ \param ID[in] RECEIVER identifier
+ \param reg[in] register index
+ \param value[in] The data to be written
+
+ \return none, RECEIVER[ID].ctrl[reg] = value
+ */
+STORAGE_CLASS_INPUT_SYSTEM_H void receiver_reg_store(
+ const rx_ID_t ID,
+ const hrt_address reg,
+ const hrt_data value);
+
+/*! Read from a control register of RECEIVER[ID]
+
+ \param ID[in] RECEIVER identifier
+ \param reg[in] register index
+ \param value[in] The data to be written
+
+ \return RECEIVER[ID].ctrl[reg]
+ */
+STORAGE_CLASS_INPUT_SYSTEM_H hrt_data receiver_reg_load(
+ const rx_ID_t ID,
+ const hrt_address reg);
+
+/*! Write to a control register of PORT[port_ID] of RECEIVER[ID]
+
+ \param ID[in] RECEIVER identifier
+ \param port_ID[in] mipi PORT identifier
+ \param reg[in] register index
+ \param value[in] The data to be written
+
+ \return none, RECEIVER[ID].PORT[port_ID].ctrl[reg] = value
+ */
+STORAGE_CLASS_INPUT_SYSTEM_H void receiver_port_reg_store(
+ const rx_ID_t ID,
+ const enum mipi_port_id port_ID,
+ const hrt_address reg,
+ const hrt_data value);
+
+/*! Read from a control register PORT[port_ID] of RECEIVER[ID]
+
+ \param ID[in] RECEIVER identifier
+ \param port_ID[in] mipi PORT identifier
+ \param reg[in] register index
+ \param value[in] The data to be written
+
+ \return RECEIVER[ID].PORT[port_ID].ctrl[reg]
+ */
+STORAGE_CLASS_INPUT_SYSTEM_H hrt_data receiver_port_reg_load(
+ const rx_ID_t ID,
+ const enum mipi_port_id port_ID,
+ const hrt_address reg);
+
+/*! Write to a control register of SUB_SYSTEM[sub_ID] of INPUT_SYSTEM[ID]
+
+ \param ID[in] INPUT_SYSTEM identifier
+ \param port_ID[in] sub system identifier
+ \param reg[in] register index
+ \param value[in] The data to be written
+
+ \return none, INPUT_SYSTEM[ID].SUB_SYSTEM[sub_ID].ctrl[reg] = value
+ */
+STORAGE_CLASS_INPUT_SYSTEM_H void input_system_sub_system_reg_store(
+ const input_system_ID_t ID,
+ const sub_system_ID_t sub_ID,
+ const hrt_address reg,
+ const hrt_data value);
+
+/*! Read from a control register SUB_SYSTEM[sub_ID] of INPUT_SYSTEM[ID]
+
+ \param ID[in] INPUT_SYSTEM identifier
+ \param port_ID[in] sub system identifier
+ \param reg[in] register index
+ \param value[in] The data to be written
+
+ \return INPUT_SYSTEM[ID].SUB_SYSTEM[sub_ID].ctrl[reg]
+ */
+STORAGE_CLASS_INPUT_SYSTEM_H hrt_data input_system_sub_system_reg_load(
+ const input_system_ID_t ID,
+ const sub_system_ID_t sub_ID,
+ const hrt_address reg);
+
+///////////////////////////////////////////////////////////////////////////
+//
+// Functions for configuration phase on input system.
+//
+///////////////////////////////////////////////////////////////////////////
+
+// Function that resets current configuration.
+// remove the argument since it should be private.
+input_system_err_t input_system_configuration_reset(void);
+
+// Function that commits current configuration.
+// remove the argument since it should be private.
+input_system_err_t input_system_configuration_commit(void);
+
+///////////////////////////////////////////////////////////////////////////
+//
+// User functions:
+// (encoded generic function)
+// - no checking
+// - decoding name and agruments into the generic (channel) configuration
+// function.
+//
+///////////////////////////////////////////////////////////////////////////
+
+// FIFO channel config function user
+
+input_system_err_t input_system_csi_fifo_channel_cfg(
+ u32 ch_id,
+ input_system_csi_port_t port,
+ backend_channel_cfg_t backend_ch,
+ target_cfg2400_t target
+);
+
+input_system_err_t input_system_csi_fifo_channel_with_counting_cfg(
+ u32 ch_id,
+ u32 nof_frame,
+ input_system_csi_port_t port,
+ backend_channel_cfg_t backend_ch,
+ u32 mem_region_size,
+ u32 nof_mem_regions,
+ target_cfg2400_t target
+);
+
+// SRAM channel config function user
+
+input_system_err_t input_system_csi_sram_channel_cfg(
+ u32 ch_id,
+ input_system_csi_port_t port,
+ backend_channel_cfg_t backend_ch,
+ u32 csi_mem_region_size,
+ u32 csi_nof_mem_regions,
+ target_cfg2400_t target
+);
+
+//XMEM channel config function user
+
+input_system_err_t input_system_csi_xmem_channel_cfg(
+ u32 ch_id,
+ input_system_csi_port_t port,
+ backend_channel_cfg_t backend_ch,
+ u32 mem_region_size,
+ u32 nof_mem_regions,
+ u32 acq_mem_region_size,
+ u32 acq_nof_mem_regions,
+ target_cfg2400_t target,
+ uint32_t nof_xmem_buffers
+);
+
+input_system_err_t input_system_csi_xmem_capture_only_channel_cfg(
+ u32 ch_id,
+ u32 nof_frames,
+ input_system_csi_port_t port,
+ u32 csi_mem_region_size,
+ u32 csi_nof_mem_regions,
+ u32 acq_mem_region_size,
+ u32 acq_nof_mem_regions,
+ target_cfg2400_t target
+);
+
+input_system_err_t input_system_csi_xmem_acquire_only_channel_cfg(
+ u32 ch_id,
+ u32 nof_frames,
+ input_system_csi_port_t port,
+ backend_channel_cfg_t backend_ch,
+ u32 acq_mem_region_size,
+ u32 acq_nof_mem_regions,
+ target_cfg2400_t target
+);
+
+// Non - CSI channel config function user
+
+input_system_err_t input_system_prbs_channel_cfg(
+ u32 ch_id,
+ u32 nof_frames,
+ u32 seed,
+ u32 sync_gen_width,
+ u32 sync_gen_height,
+ u32 sync_gen_hblank_cycles,
+ u32 sync_gen_vblank_cycles,
+ target_cfg2400_t target
+);
+
+input_system_err_t input_system_gpfifo_channel_cfg(
+ u32 ch_id,
+ u32 nof_frames,
+ target_cfg2400_t target
+);
+
+#endif /* __INPUT_SYSTEM_PUBLIC_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/isp2400_support.h b/drivers/staging/media/atomisp/pci/isp2400_support.h
new file mode 100644
index 000000000000..6e0b563c6c48
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp2400_support.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef _isp2400_support_h
+#define _isp2400_support_h
+
+#ifndef ISP2400_VECTOR_TYPES
+/* This typedef is to be able to include hive header files
+ in the host code which is useful in crun */
+typedef char *tmemvectors, *tmemvectoru, *tvector;
+#endif
+
+#define hrt_isp_vamem1_store_16(cell, addr, val) hrt_mem_store_16(cell, HRT_PROC_TYPE_PROP(cell, _simd_vamem1), addr, val)
+#define hrt_isp_vamem2_store_16(cell, addr, val) hrt_mem_store_16(cell, HRT_PROC_TYPE_PROP(cell, _simd_vamem2), addr, val)
+
+#define hrt_isp_dmem(cell) HRT_PROC_TYPE_PROP(cell, _base_dmem)
+#define hrt_isp_vmem(cell) HRT_PROC_TYPE_PROP(cell, _simd_vmem)
+
+#define hrt_isp_dmem_master_port_address(cell) hrt_mem_master_port_address(cell, hrt_isp_dmem(cell))
+#define hrt_isp_vmem_master_port_address(cell) hrt_mem_master_port_address(cell, hrt_isp_vmem(cell))
+
+#if ISP_HAS_HIST
+#define hrt_isp_hist(cell) HRT_PROC_TYPE_PROP(cell, _simd_histogram)
+#define hrt_isp_hist_master_port_address(cell) hrt_mem_master_port_address(cell, hrt_isp_hist(cell))
+#endif
+
+#endif /* _isp2400_support_h */
diff --git a/drivers/staging/media/atomisp/pci/isp2401_input_system_global.h b/drivers/staging/media/atomisp/pci/isp2401_input_system_global.h
new file mode 100644
index 000000000000..4aadeb1336ba
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp2401_input_system_global.h
@@ -0,0 +1,166 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+/* CSI reveiver has 3 ports. */
+#define N_CSI_PORTS (3)
+
+#include "system_local.h"
+#include "isys_dma_global.h" /* isys2401_dma_channel,
+ * isys2401_dma_cfg_t
+ */
+
+#include "ibuf_ctrl_local.h" /* ibuf_cfg_t,
+ * ibuf_ctrl_cfg_t
+ */
+
+#include "isys_stream2mmio.h" /* stream2mmio_cfg_t */
+
+#include "csi_rx.h" /* csi_rx_frontend_cfg_t,
+ * csi_rx_backend_cfg_t,
+ * csi_rx_backend_lut_entry_t
+ */
+#include "pixelgen.h"
+
+#define INPUT_SYSTEM_N_STREAM_ID 6 /* maximum number of simultaneous
+ virtual channels supported*/
+
+typedef enum {
+ INPUT_SYSTEM_SOURCE_TYPE_UNDEFINED = 0,
+ INPUT_SYSTEM_SOURCE_TYPE_SENSOR,
+ INPUT_SYSTEM_SOURCE_TYPE_PRBS,
+ N_INPUT_SYSTEM_SOURCE_TYPE
+} input_system_source_type_t;
+
+typedef struct input_system_channel_s input_system_channel_t;
+struct input_system_channel_s {
+ stream2mmio_ID_t stream2mmio_id;
+ stream2mmio_sid_ID_t stream2mmio_sid_id;
+
+ ibuf_ctrl_ID_t ibuf_ctrl_id;
+ isp2401_ib_buffer_t ib_buffer;
+
+ isys2401_dma_ID_t dma_id;
+ isys2401_dma_channel dma_channel;
+};
+
+typedef struct input_system_channel_cfg_s input_system_channel_cfg_t;
+struct input_system_channel_cfg_s {
+ stream2mmio_cfg_t stream2mmio_cfg;
+ ibuf_ctrl_cfg_t ibuf_ctrl_cfg;
+ isys2401_dma_cfg_t dma_cfg;
+ isys2401_dma_port_cfg_t dma_src_port_cfg;
+ isys2401_dma_port_cfg_t dma_dest_port_cfg;
+};
+
+typedef struct input_system_input_port_s input_system_input_port_t;
+struct input_system_input_port_s {
+ input_system_source_type_t source_type;
+
+ struct {
+ csi_rx_frontend_ID_t frontend_id;
+ csi_rx_backend_ID_t backend_id;
+ csi_mipi_packet_type_t packet_type;
+ csi_rx_backend_lut_entry_t backend_lut_entry;
+ } csi_rx;
+
+ struct {
+ csi_mipi_packet_type_t packet_type;
+ csi_rx_backend_lut_entry_t backend_lut_entry;
+ } metadata;
+
+ struct {
+ pixelgen_ID_t pixelgen_id;
+ } pixelgen;
+};
+
+typedef struct input_system_input_port_cfg_s input_system_input_port_cfg_t;
+struct input_system_input_port_cfg_s {
+ struct {
+ csi_rx_frontend_cfg_t frontend_cfg;
+ csi_rx_backend_cfg_t backend_cfg;
+ csi_rx_backend_cfg_t md_backend_cfg;
+ } csi_rx_cfg;
+
+ struct {
+ pixelgen_tpg_cfg_t tpg_cfg;
+ pixelgen_prbs_cfg_t prbs_cfg;
+ } pixelgen_cfg;
+};
+
+typedef struct isp2401_input_system_cfg_s isp2401_input_system_cfg_t;
+struct isp2401_input_system_cfg_s {
+ input_system_input_port_ID_t input_port_id;
+
+ input_system_source_type_t mode;
+
+ bool online;
+ bool raw_packed;
+ s8 linked_isys_stream_id;
+
+ struct {
+ bool comp_enable;
+ s32 active_lanes;
+ s32 fmt_type;
+ s32 ch_id;
+ s32 comp_predictor;
+ s32 comp_scheme;
+ } csi_port_attr;
+
+ pixelgen_tpg_cfg_t tpg_port_attr;
+
+ pixelgen_prbs_cfg_t prbs_port_attr;
+
+ struct {
+ s32 align_req_in_bytes;
+ s32 bits_per_pixel;
+ s32 pixels_per_line;
+ s32 lines_per_frame;
+ } input_port_resolution;
+
+ struct {
+ s32 left_padding;
+ s32 max_isp_input_width;
+ } output_port_attr;
+
+ struct {
+ bool enable;
+ s32 fmt_type;
+ s32 align_req_in_bytes;
+ s32 bits_per_pixel;
+ s32 pixels_per_line;
+ s32 lines_per_frame;
+ } metadata;
+};
+
+typedef struct virtual_input_system_stream_s virtual_input_system_stream_t;
+struct virtual_input_system_stream_s {
+ u32 id; /*Used when multiple MIPI data types and/or virtual channels are used.
+ Must be unique within one CSI RX
+ and lower than SH_CSS_MAX_ISYS_CHANNEL_NODES */
+ u8 enable_metadata;
+ input_system_input_port_t input_port;
+ input_system_channel_t channel;
+ input_system_channel_t md_channel; /* metadata channel */
+ u8 online;
+ s8 linked_isys_stream_id;
+ u8 valid;
+};
+
+typedef struct virtual_input_system_stream_cfg_s
+ virtual_input_system_stream_cfg_t;
+struct virtual_input_system_stream_cfg_s {
+ u8 enable_metadata;
+ input_system_input_port_cfg_t input_port_cfg;
+ input_system_channel_cfg_t channel_cfg;
+ input_system_channel_cfg_t md_channel_cfg;
+ u8 valid;
+};
+
+#define ISP_INPUT_BUF_START_ADDR 0
+#define NUM_OF_INPUT_BUF 2
+#define NUM_OF_LINES_PER_BUF 2
+#define LINES_OF_ISP_INPUT_BUF (NUM_OF_INPUT_BUF * NUM_OF_LINES_PER_BUF)
+#define ISP_INPUT_BUF_STRIDE SH_CSS_MAX_SENSOR_WIDTH
diff --git a/drivers/staging/media/atomisp/pci/isp2401_input_system_local.h b/drivers/staging/media/atomisp/pci/isp2401_input_system_local.h
new file mode 100644
index 000000000000..50e585530750
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp2401_input_system_local.h
@@ -0,0 +1,61 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __INPUT_SYSTEM_2401_LOCAL_H_INCLUDED__
+#define __INPUT_SYSTEM_2401_LOCAL_H_INCLUDED__
+
+#include "csi_rx.h"
+#include "pixelgen.h"
+#include "isys_stream2mmio.h"
+#include "isys_irq.h"
+
+typedef enum {
+ MIPI_FORMAT_2401_SHORT1 = 0x08,
+ MIPI_FORMAT_2401_SHORT2,
+ MIPI_FORMAT_2401_SHORT3,
+ MIPI_FORMAT_2401_SHORT4,
+ MIPI_FORMAT_2401_SHORT5,
+ MIPI_FORMAT_2401_SHORT6,
+ MIPI_FORMAT_2401_SHORT7,
+ MIPI_FORMAT_2401_SHORT8,
+ MIPI_FORMAT_2401_EMBEDDED = 0x12,
+ MIPI_FORMAT_2401_YUV420_8 = 0x18,
+ MIPI_FORMAT_2401_YUV420_10,
+ MIPI_FORMAT_2401_YUV420_8_LEGACY,
+ MIPI_FORMAT_2401_YUV420_8_SHIFT = 0x1C,
+ MIPI_FORMAT_2401_YUV420_10_SHIFT,
+ MIPI_FORMAT_2401_YUV422_8 = 0x1E,
+ MIPI_FORMAT_2401_YUV422_10,
+ MIPI_FORMAT_2401_RGB444 = 0x20,
+ MIPI_FORMAT_2401_RGB555,
+ MIPI_FORMAT_2401_RGB565,
+ MIPI_FORMAT_2401_RGB666,
+ MIPI_FORMAT_2401_RGB888,
+ MIPI_FORMAT_2401_RAW6 = 0x28,
+ MIPI_FORMAT_2401_RAW7,
+ MIPI_FORMAT_2401_RAW8,
+ MIPI_FORMAT_2401_RAW10,
+ MIPI_FORMAT_2401_RAW12,
+ MIPI_FORMAT_2401_RAW14,
+ MIPI_FORMAT_2401_CUSTOM0 = 0x30,
+ MIPI_FORMAT_2401_CUSTOM1,
+ MIPI_FORMAT_2401_CUSTOM2,
+ MIPI_FORMAT_2401_CUSTOM3,
+ MIPI_FORMAT_2401_CUSTOM4,
+ MIPI_FORMAT_2401_CUSTOM5,
+ MIPI_FORMAT_2401_CUSTOM6,
+ MIPI_FORMAT_2401_CUSTOM7,
+ //MIPI_FORMAT_RAW16, /*not supported by 2401*/
+ //MIPI_FORMAT_RAW18,
+ N_MIPI_FORMAT_2401
+} mipi_format_2401_t;
+
+#define N_MIPI_FORMAT_CUSTOM 8
+
+/* The number of stores for compressed format types */
+#define N_MIPI_COMPRESSOR_CONTEXT (N_RX_CHANNEL_ID * N_MIPI_FORMAT_CUSTOM)
+
+#endif /* __INPUT_SYSTEM_LOCAL_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/isp2401_input_system_private.h b/drivers/staging/media/atomisp/pci/isp2401_input_system_private.h
new file mode 100644
index 000000000000..36ae5445fdb4
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp2401_input_system_private.h
@@ -0,0 +1,225 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __INPUT_SYSTEM_2401_PRIVATE_H_INCLUDED__
+#define __INPUT_SYSTEM_2401_PRIVATE_H_INCLUDED__
+
+#include "input_system_public.h"
+
+#include "device_access.h" /* ia_css_device_load_uint32 */
+
+#include "assert_support.h" /* assert */
+#include "print_support.h" /* print */
+
+/* Load the register value */
+static inline hrt_data ibuf_ctrl_reg_load(const ibuf_ctrl_ID_t ID,
+ const hrt_address reg)
+{
+ assert(ID < N_IBUF_CTRL_ID);
+ assert(IBUF_CTRL_BASE[ID] != (hrt_address)-1);
+ return ia_css_device_load_uint32(IBUF_CTRL_BASE[ID] + reg * sizeof(hrt_data));
+}
+
+/* Store a value to the register */
+static inline void ibuf_ctrl_reg_store(const ibuf_ctrl_ID_t ID,
+ const hrt_address reg,
+ const hrt_data value)
+{
+ assert(ID < N_IBUF_CTRL_ID);
+ assert(IBUF_CTRL_BASE[ID] != (hrt_address)-1);
+
+ ia_css_device_store_uint32(IBUF_CTRL_BASE[ID] + reg * sizeof(hrt_data), value);
+}
+
+/* Get the state of the ibuf-controller process */
+static inline void ibuf_ctrl_get_proc_state(const ibuf_ctrl_ID_t ID,
+ const u32 proc_id,
+ ibuf_ctrl_proc_state_t *state)
+{
+ hrt_address reg_bank_offset;
+
+ reg_bank_offset =
+ _IBUF_CNTRL_PROC_REG_ALIGN * (1 + proc_id);
+
+ state->num_items =
+ ibuf_ctrl_reg_load(ID, reg_bank_offset + _IBUF_CNTRL_NUM_ITEMS_PER_STORE);
+
+ state->num_stores =
+ ibuf_ctrl_reg_load(ID, reg_bank_offset + _IBUF_CNTRL_NUM_STORES_PER_FRAME);
+
+ state->dma_channel =
+ ibuf_ctrl_reg_load(ID, reg_bank_offset + _IBUF_CNTRL_DMA_CHANNEL);
+
+ state->dma_command =
+ ibuf_ctrl_reg_load(ID, reg_bank_offset + _IBUF_CNTRL_DMA_CMD);
+
+ state->ibuf_st_addr =
+ ibuf_ctrl_reg_load(ID, reg_bank_offset + _IBUF_CNTRL_BUFFER_START_ADDRESS);
+
+ state->ibuf_stride =
+ ibuf_ctrl_reg_load(ID, reg_bank_offset + _IBUF_CNTRL_BUFFER_STRIDE);
+
+ state->ibuf_end_addr =
+ ibuf_ctrl_reg_load(ID, reg_bank_offset + _IBUF_CNTRL_BUFFER_END_ADDRESS);
+
+ state->dest_st_addr =
+ ibuf_ctrl_reg_load(ID, reg_bank_offset + _IBUF_CNTRL_DEST_START_ADDRESS);
+
+ state->dest_stride =
+ ibuf_ctrl_reg_load(ID, reg_bank_offset + _IBUF_CNTRL_DEST_STRIDE);
+
+ state->dest_end_addr =
+ ibuf_ctrl_reg_load(ID, reg_bank_offset + _IBUF_CNTRL_DEST_END_ADDRESS);
+
+ state->sync_frame =
+ ibuf_ctrl_reg_load(ID, reg_bank_offset + _IBUF_CNTRL_SYNC_FRAME);
+
+ state->sync_command =
+ ibuf_ctrl_reg_load(ID, reg_bank_offset + _IBUF_CNTRL_STR2MMIO_SYNC_CMD);
+
+ state->store_command =
+ ibuf_ctrl_reg_load(ID, reg_bank_offset + _IBUF_CNTRL_STR2MMIO_STORE_CMD);
+
+ state->shift_returned_items =
+ ibuf_ctrl_reg_load(ID, reg_bank_offset + _IBUF_CNTRL_SHIFT_ITEMS);
+
+ state->elems_ibuf =
+ ibuf_ctrl_reg_load(ID, reg_bank_offset + _IBUF_CNTRL_ELEMS_P_WORD_IBUF);
+
+ state->elems_dest =
+ ibuf_ctrl_reg_load(ID, reg_bank_offset + _IBUF_CNTRL_ELEMS_P_WORD_DEST);
+
+ state->cur_stores =
+ ibuf_ctrl_reg_load(ID, reg_bank_offset + _IBUF_CNTRL_CUR_STORES);
+
+ state->cur_acks =
+ ibuf_ctrl_reg_load(ID, reg_bank_offset + _IBUF_CNTRL_CUR_ACKS);
+
+ state->cur_s2m_ibuf_addr =
+ ibuf_ctrl_reg_load(ID, reg_bank_offset + _IBUF_CNTRL_CUR_S2M_IBUF_ADDR);
+
+ state->cur_dma_ibuf_addr =
+ ibuf_ctrl_reg_load(ID, reg_bank_offset + _IBUF_CNTRL_CUR_DMA_IBUF_ADDR);
+
+ state->cur_dma_dest_addr =
+ ibuf_ctrl_reg_load(ID, reg_bank_offset + _IBUF_CNTRL_CUR_DMA_DEST_ADDR);
+
+ state->cur_isp_dest_addr =
+ ibuf_ctrl_reg_load(ID, reg_bank_offset + _IBUF_CNTRL_CUR_ISP_DEST_ADDR);
+
+ state->dma_cmds_send =
+ ibuf_ctrl_reg_load(ID, reg_bank_offset + _IBUF_CNTRL_CUR_NR_DMA_CMDS_SEND);
+
+ state->main_cntrl_state =
+ ibuf_ctrl_reg_load(ID, reg_bank_offset + _IBUF_CNTRL_MAIN_CNTRL_STATE);
+
+ state->dma_sync_state =
+ ibuf_ctrl_reg_load(ID, reg_bank_offset + _IBUF_CNTRL_DMA_SYNC_STATE);
+
+ state->isp_sync_state =
+ ibuf_ctrl_reg_load(ID, reg_bank_offset + _IBUF_CNTRL_ISP_SYNC_STATE);
+}
+
+/* Get the ibuf-controller state. */
+static inline void ibuf_ctrl_get_state(const ibuf_ctrl_ID_t ID,
+ ibuf_ctrl_state_t *state)
+{
+ u32 i;
+
+ state->recalc_words =
+ ibuf_ctrl_reg_load(ID, _IBUF_CNTRL_RECALC_WORDS_STATUS);
+ state->arbiters =
+ ibuf_ctrl_reg_load(ID, _IBUF_CNTRL_ARBITERS_STATUS);
+
+ /*
+ * Get the values of the register-set per
+ * ibuf-controller process.
+ */
+ for (i = 0; i < N_IBUF_CTRL_PROCS[ID]; i++) {
+ ibuf_ctrl_get_proc_state(
+ ID,
+ i,
+ &state->proc_state[i]);
+ }
+}
+
+/* Dump the ibuf-controller state */
+static inline void ibuf_ctrl_dump_state(const ibuf_ctrl_ID_t ID,
+ ibuf_ctrl_state_t *state)
+{
+ u32 i;
+
+ ia_css_print("IBUF controller ID %d recalculate words 0x%x\n", ID,
+ state->recalc_words);
+ ia_css_print("IBUF controller ID %d arbiters 0x%x\n", ID, state->arbiters);
+
+ /*
+ * Dump the values of the register-set per
+ * ibuf-controller process.
+ */
+ for (i = 0; i < N_IBUF_CTRL_PROCS[ID]; i++) {
+ ia_css_print("IBUF controller ID %d Process ID %d num_items 0x%x\n", ID, i,
+ state->proc_state[i].num_items);
+ ia_css_print("IBUF controller ID %d Process ID %d num_stores 0x%x\n", ID, i,
+ state->proc_state[i].num_stores);
+ ia_css_print("IBUF controller ID %d Process ID %d dma_channel 0x%x\n", ID, i,
+ state->proc_state[i].dma_channel);
+ ia_css_print("IBUF controller ID %d Process ID %d dma_command 0x%x\n", ID, i,
+ state->proc_state[i].dma_command);
+ ia_css_print("IBUF controller ID %d Process ID %d ibuf_st_addr 0x%x\n", ID, i,
+ state->proc_state[i].ibuf_st_addr);
+ ia_css_print("IBUF controller ID %d Process ID %d ibuf_stride 0x%x\n", ID, i,
+ state->proc_state[i].ibuf_stride);
+ ia_css_print("IBUF controller ID %d Process ID %d ibuf_end_addr 0x%x\n", ID, i,
+ state->proc_state[i].ibuf_end_addr);
+ ia_css_print("IBUF controller ID %d Process ID %d dest_st_addr 0x%x\n", ID, i,
+ state->proc_state[i].dest_st_addr);
+ ia_css_print("IBUF controller ID %d Process ID %d dest_stride 0x%x\n", ID, i,
+ state->proc_state[i].dest_stride);
+ ia_css_print("IBUF controller ID %d Process ID %d dest_end_addr 0x%x\n", ID, i,
+ state->proc_state[i].dest_end_addr);
+ ia_css_print("IBUF controller ID %d Process ID %d sync_frame 0x%x\n", ID, i,
+ state->proc_state[i].sync_frame);
+ ia_css_print("IBUF controller ID %d Process ID %d sync_command 0x%x\n", ID, i,
+ state->proc_state[i].sync_command);
+ ia_css_print("IBUF controller ID %d Process ID %d store_command 0x%x\n", ID, i,
+ state->proc_state[i].store_command);
+ ia_css_print("IBUF controller ID %d Process ID %d shift_returned_items 0x%x\n",
+ ID, i,
+ state->proc_state[i].shift_returned_items);
+ ia_css_print("IBUF controller ID %d Process ID %d elems_ibuf 0x%x\n", ID, i,
+ state->proc_state[i].elems_ibuf);
+ ia_css_print("IBUF controller ID %d Process ID %d elems_dest 0x%x\n", ID, i,
+ state->proc_state[i].elems_dest);
+ ia_css_print("IBUF controller ID %d Process ID %d cur_stores 0x%x\n", ID, i,
+ state->proc_state[i].cur_stores);
+ ia_css_print("IBUF controller ID %d Process ID %d cur_acks 0x%x\n", ID, i,
+ state->proc_state[i].cur_acks);
+ ia_css_print("IBUF controller ID %d Process ID %d cur_s2m_ibuf_addr 0x%x\n", ID,
+ i,
+ state->proc_state[i].cur_s2m_ibuf_addr);
+ ia_css_print("IBUF controller ID %d Process ID %d cur_dma_ibuf_addr 0x%x\n", ID,
+ i,
+ state->proc_state[i].cur_dma_ibuf_addr);
+ ia_css_print("IBUF controller ID %d Process ID %d cur_dma_dest_addr 0x%x\n", ID,
+ i,
+ state->proc_state[i].cur_dma_dest_addr);
+ ia_css_print("IBUF controller ID %d Process ID %d cur_isp_dest_addr 0x%x\n", ID,
+ i,
+ state->proc_state[i].cur_isp_dest_addr);
+ ia_css_print("IBUF controller ID %d Process ID %d dma_cmds_send 0x%x\n", ID, i,
+ state->proc_state[i].dma_cmds_send);
+ ia_css_print("IBUF controller ID %d Process ID %d main_cntrl_state 0x%x\n", ID,
+ i,
+ state->proc_state[i].main_cntrl_state);
+ ia_css_print("IBUF controller ID %d Process ID %d dma_sync_state 0x%x\n", ID, i,
+ state->proc_state[i].dma_sync_state);
+ ia_css_print("IBUF controller ID %d Process ID %d isp_sync_state 0x%x\n", ID, i,
+ state->proc_state[i].isp_sync_state);
+ }
+}
+
+#endif /* __INPUT_SYSTEM_PRIVATE_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/isp_acquisition_defs.h b/drivers/staging/media/atomisp/pci/isp_acquisition_defs.h
new file mode 100644
index 000000000000..b04404ad59d9
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp_acquisition_defs.h
@@ -0,0 +1,221 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef _isp_acquisition_defs_h
+#define _isp_acquisition_defs_h
+
+#define _ISP_ACQUISITION_REG_ALIGN 4 /* assuming 32 bit control bus width */
+#define _ISP_ACQUISITION_BYTES_PER_ELEM 4
+
+/* --------------------------------------------------*/
+
+#define NOF_ACQ_IRQS 1
+
+/* --------------------------------------------------*/
+/* FSM */
+/* --------------------------------------------------*/
+#define MEM2STREAM_FSM_STATE_BITS 2
+#define ACQ_SYNCHRONIZER_FSM_STATE_BITS 2
+
+/* --------------------------------------------------*/
+/* REGISTER INFO */
+/* --------------------------------------------------*/
+
+#define NOF_ACQ_REGS 12
+
+// Register id's of MMIO slave accessible registers
+#define ACQ_START_ADDR_REG_ID 0
+#define ACQ_MEM_REGION_SIZE_REG_ID 1
+#define ACQ_NUM_MEM_REGIONS_REG_ID 2
+#define ACQ_INIT_REG_ID 3
+#define ACQ_RECEIVED_SHORT_PACKETS_REG_ID 4
+#define ACQ_RECEIVED_LONG_PACKETS_REG_ID 5
+#define ACQ_LAST_COMMAND_REG_ID 6
+#define ACQ_NEXT_COMMAND_REG_ID 7
+#define ACQ_LAST_ACKNOWLEDGE_REG_ID 8
+#define ACQ_NEXT_ACKNOWLEDGE_REG_ID 9
+#define ACQ_FSM_STATE_INFO_REG_ID 10
+#define ACQ_INT_CNTR_INFO_REG_ID 11
+
+// Register width
+#define ACQ_START_ADDR_REG_WIDTH 9
+#define ACQ_MEM_REGION_SIZE_REG_WIDTH 9
+#define ACQ_NUM_MEM_REGIONS_REG_WIDTH 9
+#define ACQ_INIT_REG_WIDTH 3
+#define ACQ_RECEIVED_SHORT_PACKETS_REG_WIDTH 32
+#define ACQ_RECEIVED_LONG_PACKETS_REG_WIDTH 32
+#define ACQ_LAST_COMMAND_REG_WIDTH 32
+#define ACQ_NEXT_COMMAND_REG_WIDTH 32
+#define ACQ_LAST_ACKNOWLEDGE_REG_WIDTH 32
+#define ACQ_NEXT_ACKNOWLEDGE_REG_WIDTH 32
+#define ACQ_FSM_STATE_INFO_REG_WIDTH ((MEM2STREAM_FSM_STATE_BITS * 3) + (ACQ_SYNCHRONIZER_FSM_STATE_BITS * 3))
+#define ACQ_INT_CNTR_INFO_REG_WIDTH 32
+
+/* register reset value */
+#define ACQ_START_ADDR_REG_RSTVAL 0
+#define ACQ_MEM_REGION_SIZE_REG_RSTVAL 128
+#define ACQ_NUM_MEM_REGIONS_REG_RSTVAL 3
+#define ACQ_INIT_REG_RSTVAL 0
+#define ACQ_RECEIVED_SHORT_PACKETS_REG_RSTVAL 0
+#define ACQ_RECEIVED_LONG_PACKETS_REG_RSTVAL 0
+#define ACQ_LAST_COMMAND_REG_RSTVAL 0
+#define ACQ_NEXT_COMMAND_REG_RSTVAL 0
+#define ACQ_LAST_ACKNOWLEDGE_REG_RSTVAL 0
+#define ACQ_NEXT_ACKNOWLEDGE_REG_RSTVAL 0
+#define ACQ_FSM_STATE_INFO_REG_RSTVAL 0
+#define ACQ_INT_CNTR_INFO_REG_RSTVAL 0
+
+/* bit definitions */
+#define ACQ_INIT_RST_REG_BIT 0
+#define ACQ_INIT_RESYNC_BIT 2
+#define ACQ_INIT_RST_IDX ACQ_INIT_RST_REG_BIT
+#define ACQ_INIT_RST_BITS 1
+#define ACQ_INIT_RESYNC_IDX ACQ_INIT_RESYNC_BIT
+#define ACQ_INIT_RESYNC_BITS 1
+
+/* --------------------------------------------------*/
+/* TOKEN INFO */
+/* --------------------------------------------------*/
+#define ACQ_TOKEN_ID_LSB 0
+#define ACQ_TOKEN_ID_MSB 3
+#define ACQ_TOKEN_WIDTH (ACQ_TOKEN_ID_MSB - ACQ_TOKEN_ID_LSB + 1) // 4
+#define ACQ_TOKEN_ID_IDX 0
+#define ACQ_TOKEN_ID_BITS ACQ_TOKEN_WIDTH
+#define ACQ_INIT_CMD_INIT_IDX 4
+#define ACQ_INIT_CMD_INIT_BITS 3
+#define ACQ_CMD_START_ADDR_IDX 4
+#define ACQ_CMD_START_ADDR_BITS 9
+#define ACQ_CMD_NOFWORDS_IDX 13
+#define ACQ_CMD_NOFWORDS_BITS 9
+#define ACQ_MEM_REGION_ID_IDX 22
+#define ACQ_MEM_REGION_ID_BITS 9
+#define ACQ_PACKET_LENGTH_TOKEN_MSB 21
+#define ACQ_PACKET_LENGTH_TOKEN_LSB 13
+#define ACQ_PACKET_DATA_FORMAT_ID_TOKEN_MSB 9
+#define ACQ_PACKET_DATA_FORMAT_ID_TOKEN_LSB 4
+#define ACQ_PACKET_CH_ID_TOKEN_MSB 11
+#define ACQ_PACKET_CH_ID_TOKEN_LSB 10
+#define ACQ_PACKET_MEM_REGION_ID_TOKEN_MSB 12 /* only for capt_end_of_packet_written */
+#define ACQ_PACKET_MEM_REGION_ID_TOKEN_LSB 4 /* only for capt_end_of_packet_written */
+
+/* Command tokens IDs */
+#define ACQ_READ_REGION_AUTO_INCR_TOKEN_ID 0 //0000b
+#define ACQ_READ_REGION_TOKEN_ID 1 //0001b
+#define ACQ_READ_REGION_SOP_TOKEN_ID 2 //0010b
+#define ACQ_INIT_TOKEN_ID 8 //1000b
+
+/* Acknowledge token IDs */
+#define ACQ_READ_REGION_ACK_TOKEN_ID 0 //0000b
+#define ACQ_END_OF_PACKET_TOKEN_ID 4 //0100b
+#define ACQ_END_OF_REGION_TOKEN_ID 5 //0101b
+#define ACQ_SOP_MISMATCH_TOKEN_ID 6 //0110b
+#define ACQ_UNDEF_PH_TOKEN_ID 7 //0111b
+
+#define ACQ_TOKEN_MEMREGIONID_MSB 30
+#define ACQ_TOKEN_MEMREGIONID_LSB 22
+#define ACQ_TOKEN_NOFWORDS_MSB 21
+#define ACQ_TOKEN_NOFWORDS_LSB 13
+#define ACQ_TOKEN_STARTADDR_MSB 12
+#define ACQ_TOKEN_STARTADDR_LSB 4
+
+/* --------------------------------------------------*/
+/* MIPI */
+/* --------------------------------------------------*/
+
+#define WORD_COUNT_WIDTH 16
+#define PKT_CODE_WIDTH 6
+#define CHN_NO_WIDTH 2
+#define ERROR_INFO_WIDTH 8
+
+#define LONG_PKTCODE_MAX 63
+#define LONG_PKTCODE_MIN 16
+#define SHORT_PKTCODE_MAX 15
+
+#define EOF_CODE 1
+
+/* --------------------------------------------------*/
+/* Packet Info */
+/* --------------------------------------------------*/
+#define ACQ_START_OF_FRAME 0
+#define ACQ_END_OF_FRAME 1
+#define ACQ_START_OF_LINE 2
+#define ACQ_END_OF_LINE 3
+#define ACQ_LINE_PAYLOAD 4
+#define ACQ_GEN_SH_PKT 5
+
+/* bit definition */
+#define ACQ_PKT_TYPE_IDX 16
+#define ACQ_PKT_TYPE_BITS 6
+#define ACQ_PKT_SOP_IDX 32
+#define ACQ_WORD_CNT_IDX 0
+#define ACQ_WORD_CNT_BITS 16
+#define ACQ_PKT_INFO_IDX 16
+#define ACQ_PKT_INFO_BITS 8
+#define ACQ_HEADER_DATA_IDX 0
+#define ACQ_HEADER_DATA_BITS 16
+#define ACQ_ACK_TOKEN_ID_IDX ACQ_TOKEN_ID_IDX
+#define ACQ_ACK_TOKEN_ID_BITS ACQ_TOKEN_ID_BITS
+#define ACQ_ACK_NOFWORDS_IDX 13
+#define ACQ_ACK_NOFWORDS_BITS 9
+#define ACQ_ACK_PKT_LEN_IDX 4
+#define ACQ_ACK_PKT_LEN_BITS 16
+
+/* --------------------------------------------------*/
+/* Packet Data Type */
+/* --------------------------------------------------*/
+
+#define ACQ_YUV420_8_DATA 24 /* 01 1000 YUV420 8-bit */
+#define ACQ_YUV420_10_DATA 25 /* 01 1001 YUV420 10-bit */
+#define ACQ_YUV420_8L_DATA 26 /* 01 1010 YUV420 8-bit legacy */
+#define ACQ_YUV422_8_DATA 30 /* 01 1110 YUV422 8-bit */
+#define ACQ_YUV422_10_DATA 31 /* 01 1111 YUV422 10-bit */
+#define ACQ_RGB444_DATA 32 /* 10 0000 RGB444 */
+#define ACQ_RGB555_DATA 33 /* 10 0001 RGB555 */
+#define ACQ_RGB565_DATA 34 /* 10 0010 RGB565 */
+#define ACQ_RGB666_DATA 35 /* 10 0011 RGB666 */
+#define ACQ_RGB888_DATA 36 /* 10 0100 RGB888 */
+#define ACQ_RAW6_DATA 40 /* 10 1000 RAW6 */
+#define ACQ_RAW7_DATA 41 /* 10 1001 RAW7 */
+#define ACQ_RAW8_DATA 42 /* 10 1010 RAW8 */
+#define ACQ_RAW10_DATA 43 /* 10 1011 RAW10 */
+#define ACQ_RAW12_DATA 44 /* 10 1100 RAW12 */
+#define ACQ_RAW14_DATA 45 /* 10 1101 RAW14 */
+#define ACQ_USR_DEF_1_DATA 48 /* 11 0000 JPEG [User Defined 8-bit Data Type 1] */
+#define ACQ_USR_DEF_2_DATA 49 /* 11 0001 User Defined 8-bit Data Type 2 */
+#define ACQ_USR_DEF_3_DATA 50 /* 11 0010 User Defined 8-bit Data Type 3 */
+#define ACQ_USR_DEF_4_DATA 51 /* 11 0011 User Defined 8-bit Data Type 4 */
+#define ACQ_USR_DEF_5_DATA 52 /* 11 0100 User Defined 8-bit Data Type 5 */
+#define ACQ_USR_DEF_6_DATA 53 /* 11 0101 User Defined 8-bit Data Type 6 */
+#define ACQ_USR_DEF_7_DATA 54 /* 11 0110 User Defined 8-bit Data Type 7 */
+#define ACQ_USR_DEF_8_DATA 55 /* 11 0111 User Defined 8-bit Data Type 8 */
+#define ACQ_Emb_DATA 18 /* 01 0010 embedded eight bit non image data */
+#define ACQ_SOF_DATA 0 /* 00 0000 frame start */
+#define ACQ_EOF_DATA 1 /* 00 0001 frame end */
+#define ACQ_SOL_DATA 2 /* 00 0010 line start */
+#define ACQ_EOL_DATA 3 /* 00 0011 line end */
+#define ACQ_GEN_SH1_DATA 8 /* 00 1000 Generic Short Packet Code 1 */
+#define ACQ_GEN_SH2_DATA 9 /* 00 1001 Generic Short Packet Code 2 */
+#define ACQ_GEN_SH3_DATA 10 /* 00 1010 Generic Short Packet Code 3 */
+#define ACQ_GEN_SH4_DATA 11 /* 00 1011 Generic Short Packet Code 4 */
+#define ACQ_GEN_SH5_DATA 12 /* 00 1100 Generic Short Packet Code 5 */
+#define ACQ_GEN_SH6_DATA 13 /* 00 1101 Generic Short Packet Code 6 */
+#define ACQ_GEN_SH7_DATA 14 /* 00 1110 Generic Short Packet Code 7 */
+#define ACQ_GEN_SH8_DATA 15 /* 00 1111 Generic Short Packet Code 8 */
+#define ACQ_YUV420_8_CSPS_DATA 28 /* 01 1100 YUV420 8-bit (Chroma Shifted Pixel Sampling) */
+#define ACQ_YUV420_10_CSPS_DATA 29 /* 01 1101 YUV420 10-bit (Chroma Shifted Pixel Sampling) */
+#define ACQ_RESERVED_DATA_TYPE_MIN 56
+#define ACQ_RESERVED_DATA_TYPE_MAX 63
+#define ACQ_GEN_LONG_RESERVED_DATA_TYPE_MIN 19
+#define ACQ_GEN_LONG_RESERVED_DATA_TYPE_MAX 23
+#define ACQ_YUV_RESERVED_DATA_TYPE 27
+#define ACQ_RGB_RESERVED_DATA_TYPE_MIN 37
+#define ACQ_RGB_RESERVED_DATA_TYPE_MAX 39
+#define ACQ_RAW_RESERVED_DATA_TYPE_MIN 46
+#define ACQ_RAW_RESERVED_DATA_TYPE_MAX 47
+
+/* --------------------------------------------------*/
+
+#endif /* _isp_acquisition_defs_h */
diff --git a/drivers/staging/media/atomisp/pci/isp_capture_defs.h b/drivers/staging/media/atomisp/pci/isp_capture_defs.h
new file mode 100644
index 000000000000..b1fbf492a809
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/isp_capture_defs.h
@@ -0,0 +1,270 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef _isp_capture_defs_h
+#define _isp_capture_defs_h
+
+#define _ISP_CAPTURE_REG_ALIGN 4 /* assuming 32 bit control bus width */
+#define _ISP_CAPTURE_BITS_PER_ELEM 32 /* only for data, not SOP */
+#define _ISP_CAPTURE_BYTES_PER_ELEM (_ISP_CAPTURE_BITS_PER_ELEM / 8)
+#define _ISP_CAPTURE_BYTES_PER_WORD 32 /* 256/8 */
+#define _ISP_CAPTURE_ELEM_PER_WORD _ISP_CAPTURE_BYTES_PER_WORD / _ISP_CAPTURE_BYTES_PER_ELEM
+
+/* --------------------------------------------------*/
+
+#define NOF_IRQS 2
+
+/* --------------------------------------------------*/
+/* REGISTER INFO */
+/* --------------------------------------------------*/
+
+// Number of registers
+#define CAPT_NOF_REGS 16
+
+// Register id's of MMIO slave accessible registers
+#define CAPT_START_MODE_REG_ID 0
+#define CAPT_START_ADDR_REG_ID 1
+#define CAPT_MEM_REGION_SIZE_REG_ID 2
+#define CAPT_NUM_MEM_REGIONS_REG_ID 3
+#define CAPT_INIT_REG_ID 4
+#define CAPT_START_REG_ID 5
+#define CAPT_STOP_REG_ID 6
+
+#define CAPT_PACKET_LENGTH_REG_ID 7
+#define CAPT_RECEIVED_LENGTH_REG_ID 8
+#define CAPT_RECEIVED_SHORT_PACKETS_REG_ID 9
+#define CAPT_RECEIVED_LONG_PACKETS_REG_ID 10
+#define CAPT_LAST_COMMAND_REG_ID 11
+#define CAPT_NEXT_COMMAND_REG_ID 12
+#define CAPT_LAST_ACKNOWLEDGE_REG_ID 13
+#define CAPT_NEXT_ACKNOWLEDGE_REG_ID 14
+#define CAPT_FSM_STATE_INFO_REG_ID 15
+
+// Register width
+#define CAPT_START_MODE_REG_WIDTH 1
+
+#define CAPT_START_REG_WIDTH 1
+#define CAPT_STOP_REG_WIDTH 1
+
+/* --------------------------------------------------*/
+/* FSM */
+/* --------------------------------------------------*/
+#define CAPT_WRITE2MEM_FSM_STATE_BITS 2
+#define CAPT_SYNCHRONIZER_FSM_STATE_BITS 3
+
+#define CAPT_PACKET_LENGTH_REG_WIDTH 17
+#define CAPT_RECEIVED_LENGTH_REG_WIDTH 17
+#define CAPT_RECEIVED_SHORT_PACKETS_REG_WIDTH 32
+#define CAPT_RECEIVED_LONG_PACKETS_REG_WIDTH 32
+#define CAPT_LAST_COMMAND_REG_WIDTH 32
+#define CAPT_LAST_ACKNOWLEDGE_REG_WIDTH 32
+#define CAPT_NEXT_ACKNOWLEDGE_REG_WIDTH 32
+#define CAPT_FSM_STATE_INFO_REG_WIDTH ((CAPT_WRITE2MEM_FSM_STATE_BITS * 3) + (CAPT_SYNCHRONIZER_FSM_STATE_BITS * 3))
+
+/* register reset value */
+#define CAPT_START_MODE_REG_RSTVAL 0
+#define CAPT_START_ADDR_REG_RSTVAL 0
+#define CAPT_MEM_REGION_SIZE_REG_RSTVAL 128
+#define CAPT_NUM_MEM_REGIONS_REG_RSTVAL 3
+#define CAPT_INIT_REG_RSTVAL 0
+
+#define CAPT_START_REG_RSTVAL 0
+#define CAPT_STOP_REG_RSTVAL 0
+
+#define CAPT_PACKET_LENGTH_REG_RSTVAL 0
+#define CAPT_RECEIVED_LENGTH_REG_RSTVAL 0
+#define CAPT_RECEIVED_SHORT_PACKETS_REG_RSTVAL 0
+#define CAPT_RECEIVED_LONG_PACKETS_REG_RSTVAL 0
+#define CAPT_LAST_COMMAND_REG_RSTVAL 0
+#define CAPT_NEXT_COMMAND_REG_RSTVAL 0
+#define CAPT_LAST_ACKNOWLEDGE_REG_RSTVAL 0
+#define CAPT_NEXT_ACKNOWLEDGE_REG_RSTVAL 0
+#define CAPT_FSM_STATE_INFO_REG_RSTVAL 0
+
+/* bit definitions */
+#define CAPT_INIT_RST_REG_BIT 0
+#define CAPT_INIT_FLUSH_BIT 1
+#define CAPT_INIT_RESYNC_BIT 2
+#define CAPT_INIT_RESTART_BIT 3
+#define CAPT_INIT_RESTART_MEM_ADDR_LSB 4
+
+#define CAPT_INIT_RST_REG_IDX CAPT_INIT_RST_REG_BIT
+#define CAPT_INIT_RST_REG_BITS 1
+#define CAPT_INIT_FLUSH_IDX CAPT_INIT_FLUSH_BIT
+#define CAPT_INIT_FLUSH_BITS 1
+#define CAPT_INIT_RESYNC_IDX CAPT_INIT_RESYNC_BIT
+#define CAPT_INIT_RESYNC_BITS 1
+#define CAPT_INIT_RESTART_IDX CAPT_INIT_RESTART_BIT
+#define CAPT_INIT_RESTART_BITS 1
+#define CAPT_INIT_RESTART_MEM_ADDR_IDX CAPT_INIT_RESTART_MEM_ADDR_LSB
+
+/* --------------------------------------------------*/
+/* TOKEN INFO */
+/* --------------------------------------------------*/
+#define CAPT_TOKEN_ID_LSB 0
+#define CAPT_TOKEN_ID_MSB 3
+#define CAPT_TOKEN_WIDTH (CAPT_TOKEN_ID_MSB - CAPT_TOKEN_ID_LSB + 1) /* 4 */
+
+/* Command tokens IDs */
+#define CAPT_START_TOKEN_ID 0 /* 0000b */
+#define CAPT_STOP_TOKEN_ID 1 /* 0001b */
+#define CAPT_FREEZE_TOKEN_ID 2 /* 0010b */
+#define CAPT_RESUME_TOKEN_ID 3 /* 0011b */
+#define CAPT_INIT_TOKEN_ID 8 /* 1000b */
+
+#define CAPT_START_TOKEN_BIT 0
+#define CAPT_STOP_TOKEN_BIT 0
+#define CAPT_FREEZE_TOKEN_BIT 0
+#define CAPT_RESUME_TOKEN_BIT 0
+#define CAPT_INIT_TOKEN_BIT 0
+
+/* Acknowledge token IDs */
+#define CAPT_END_OF_PACKET_RECEIVED_TOKEN_ID 0 /* 0000b */
+#define CAPT_END_OF_PACKET_WRITTEN_TOKEN_ID 1 /* 0001b */
+#define CAPT_END_OF_REGION_WRITTEN_TOKEN_ID 2 /* 0010b */
+#define CAPT_FLUSH_DONE_TOKEN_ID 3 /* 0011b */
+#define CAPT_PREMATURE_SOP_TOKEN_ID 4 /* 0100b */
+#define CAPT_MISSING_SOP_TOKEN_ID 5 /* 0101b */
+#define CAPT_UNDEF_PH_TOKEN_ID 6 /* 0110b */
+#define CAPT_STOP_ACK_TOKEN_ID 7 /* 0111b */
+
+#define CAPT_PACKET_LENGTH_TOKEN_MSB 19
+#define CAPT_PACKET_LENGTH_TOKEN_LSB 4
+#define CAPT_SUPER_PACKET_LENGTH_TOKEN_MSB 20
+#define CAPT_SUPER_PACKET_LENGTH_TOKEN_LSB 4
+#define CAPT_PACKET_DATA_FORMAT_ID_TOKEN_MSB 25
+#define CAPT_PACKET_DATA_FORMAT_ID_TOKEN_LSB 20
+#define CAPT_PACKET_CH_ID_TOKEN_MSB 27
+#define CAPT_PACKET_CH_ID_TOKEN_LSB 26
+#define CAPT_PACKET_MEM_REGION_ID_TOKEN_MSB 29
+#define CAPT_PACKET_MEM_REGION_ID_TOKEN_LSB 21
+
+/* bit definition */
+#define CAPT_CMD_IDX CAPT_TOKEN_ID_LSB
+#define CAPT_CMD_BITS (CAPT_TOKEN_ID_MSB - CAPT_TOKEN_ID_LSB + 1)
+#define CAPT_SOP_IDX 32
+#define CAPT_SOP_BITS 1
+#define CAPT_PKT_INFO_IDX 16
+#define CAPT_PKT_INFO_BITS 8
+#define CAPT_PKT_TYPE_IDX 0
+#define CAPT_PKT_TYPE_BITS 6
+#define CAPT_HEADER_DATA_IDX 0
+#define CAPT_HEADER_DATA_BITS 16
+#define CAPT_PKT_DATA_IDX 0
+#define CAPT_PKT_DATA_BITS 32
+#define CAPT_WORD_CNT_IDX 0
+#define CAPT_WORD_CNT_BITS 16
+#define CAPT_ACK_TOKEN_ID_IDX 0
+#define CAPT_ACK_TOKEN_ID_BITS 4
+//#define CAPT_ACK_PKT_LEN_IDX CAPT_PACKET_LENGTH_TOKEN_LSB
+//#define CAPT_ACK_PKT_LEN_BITS (CAPT_PACKET_LENGTH_TOKEN_MSB - CAPT_PACKET_LENGTH_TOKEN_LSB + 1)
+//#define CAPT_ACK_PKT_INFO_IDX 20
+//#define CAPT_ACK_PKT_INFO_BITS 8
+//#define CAPT_ACK_MEM_REG_ID1_IDX 20 /* for capt_end_of_packet_written */
+//#define CAPT_ACK_MEM_REG_ID2_IDX 4 /* for capt_end_of_region_written */
+#define CAPT_ACK_PKT_LEN_IDX CAPT_PACKET_LENGTH_TOKEN_LSB
+#define CAPT_ACK_PKT_LEN_BITS (CAPT_PACKET_LENGTH_TOKEN_MSB - CAPT_PACKET_LENGTH_TOKEN_LSB + 1)
+#define CAPT_ACK_SUPER_PKT_LEN_IDX CAPT_SUPER_PACKET_LENGTH_TOKEN_LSB
+#define CAPT_ACK_SUPER_PKT_LEN_BITS (CAPT_SUPER_PACKET_LENGTH_TOKEN_MSB - CAPT_SUPER_PACKET_LENGTH_TOKEN_LSB + 1)
+#define CAPT_ACK_PKT_INFO_IDX CAPT_PACKET_DATA_FORMAT_ID_TOKEN_LSB
+#define CAPT_ACK_PKT_INFO_BITS (CAPT_PACKET_CH_ID_TOKEN_MSB - CAPT_PACKET_DATA_FORMAT_ID_TOKEN_LSB + 1)
+#define CAPT_ACK_MEM_REGION_ID_IDX CAPT_PACKET_MEM_REGION_ID_TOKEN_LSB
+#define CAPT_ACK_MEM_REGION_ID_BITS (CAPT_PACKET_MEM_REGION_ID_TOKEN_MSB - CAPT_PACKET_MEM_REGION_ID_TOKEN_LSB + 1)
+#define CAPT_ACK_PKT_TYPE_IDX CAPT_PACKET_DATA_FORMAT_ID_TOKEN_LSB
+#define CAPT_ACK_PKT_TYPE_BITS (CAPT_PACKET_DATA_FORMAT_ID_TOKEN_MSB - CAPT_PACKET_DATA_FORMAT_ID_TOKEN_LSB + 1)
+#define CAPT_INIT_TOKEN_INIT_IDX 4
+#define CAPT_INIT_TOKEN_INIT_BITS 22
+
+/* --------------------------------------------------*/
+/* MIPI */
+/* --------------------------------------------------*/
+
+#define CAPT_WORD_COUNT_WIDTH 16
+#define CAPT_PKT_CODE_WIDTH 6
+#define CAPT_CHN_NO_WIDTH 2
+#define CAPT_ERROR_INFO_WIDTH 8
+
+#define LONG_PKTCODE_MAX 63
+#define LONG_PKTCODE_MIN 16
+#define SHORT_PKTCODE_MAX 15
+
+/* --------------------------------------------------*/
+/* Packet Info */
+/* --------------------------------------------------*/
+#define CAPT_START_OF_FRAME 0
+#define CAPT_END_OF_FRAME 1
+#define CAPT_START_OF_LINE 2
+#define CAPT_END_OF_LINE 3
+#define CAPT_LINE_PAYLOAD 4
+#define CAPT_GEN_SH_PKT 5
+
+/* --------------------------------------------------*/
+/* Packet Data Type */
+/* --------------------------------------------------*/
+
+#define CAPT_YUV420_8_DATA 24 /* 01 1000 YUV420 8-bit */
+#define CAPT_YUV420_10_DATA 25 /* 01 1001 YUV420 10-bit */
+#define CAPT_YUV420_8L_DATA 26 /* 01 1010 YUV420 8-bit legacy */
+#define CAPT_YUV422_8_DATA 30 /* 01 1110 YUV422 8-bit */
+#define CAPT_YUV422_10_DATA 31 /* 01 1111 YUV422 10-bit */
+#define CAPT_RGB444_DATA 32 /* 10 0000 RGB444 */
+#define CAPT_RGB555_DATA 33 /* 10 0001 RGB555 */
+#define CAPT_RGB565_DATA 34 /* 10 0010 RGB565 */
+#define CAPT_RGB666_DATA 35 /* 10 0011 RGB666 */
+#define CAPT_RGB888_DATA 36 /* 10 0100 RGB888 */
+#define CAPT_RAW6_DATA 40 /* 10 1000 RAW6 */
+#define CAPT_RAW7_DATA 41 /* 10 1001 RAW7 */
+#define CAPT_RAW8_DATA 42 /* 10 1010 RAW8 */
+#define CAPT_RAW10_DATA 43 /* 10 1011 RAW10 */
+#define CAPT_RAW12_DATA 44 /* 10 1100 RAW12 */
+#define CAPT_RAW14_DATA 45 /* 10 1101 RAW14 */
+#define CAPT_USR_DEF_1_DATA 48 /* 11 0000 JPEG [User Defined 8-bit Data Type 1] */
+#define CAPT_USR_DEF_2_DATA 49 /* 11 0001 User Defined 8-bit Data Type 2 */
+#define CAPT_USR_DEF_3_DATA 50 /* 11 0010 User Defined 8-bit Data Type 3 */
+#define CAPT_USR_DEF_4_DATA 51 /* 11 0011 User Defined 8-bit Data Type 4 */
+#define CAPT_USR_DEF_5_DATA 52 /* 11 0100 User Defined 8-bit Data Type 5 */
+#define CAPT_USR_DEF_6_DATA 53 /* 11 0101 User Defined 8-bit Data Type 6 */
+#define CAPT_USR_DEF_7_DATA 54 /* 11 0110 User Defined 8-bit Data Type 7 */
+#define CAPT_USR_DEF_8_DATA 55 /* 11 0111 User Defined 8-bit Data Type 8 */
+#define CAPT_Emb_DATA 18 /* 01 0010 embedded eight bit non image data */
+#define CAPT_SOF_DATA 0 /* 00 0000 frame start */
+#define CAPT_EOF_DATA 1 /* 00 0001 frame end */
+#define CAPT_SOL_DATA 2 /* 00 0010 line start */
+#define CAPT_EOL_DATA 3 /* 00 0011 line end */
+#define CAPT_GEN_SH1_DATA 8 /* 00 1000 Generic Short Packet Code 1 */
+#define CAPT_GEN_SH2_DATA 9 /* 00 1001 Generic Short Packet Code 2 */
+#define CAPT_GEN_SH3_DATA 10 /* 00 1010 Generic Short Packet Code 3 */
+#define CAPT_GEN_SH4_DATA 11 /* 00 1011 Generic Short Packet Code 4 */
+#define CAPT_GEN_SH5_DATA 12 /* 00 1100 Generic Short Packet Code 5 */
+#define CAPT_GEN_SH6_DATA 13 /* 00 1101 Generic Short Packet Code 6 */
+#define CAPT_GEN_SH7_DATA 14 /* 00 1110 Generic Short Packet Code 7 */
+#define CAPT_GEN_SH8_DATA 15 /* 00 1111 Generic Short Packet Code 8 */
+#define CAPT_YUV420_8_CSPS_DATA 28 /* 01 1100 YUV420 8-bit (Chroma Shifted Pixel Sampling) */
+#define CAPT_YUV420_10_CSPS_DATA 29 /* 01 1101 YUV420 10-bit (Chroma Shifted Pixel Sampling) */
+#define CAPT_RESERVED_DATA_TYPE_MIN 56
+#define CAPT_RESERVED_DATA_TYPE_MAX 63
+#define CAPT_GEN_LONG_RESERVED_DATA_TYPE_MIN 19
+#define CAPT_GEN_LONG_RESERVED_DATA_TYPE_MAX 23
+#define CAPT_YUV_RESERVED_DATA_TYPE 27
+#define CAPT_RGB_RESERVED_DATA_TYPE_MIN 37
+#define CAPT_RGB_RESERVED_DATA_TYPE_MAX 39
+#define CAPT_RAW_RESERVED_DATA_TYPE_MIN 46
+#define CAPT_RAW_RESERVED_DATA_TYPE_MAX 47
+
+/* --------------------------------------------------*/
+/* Capture Unit State */
+/* --------------------------------------------------*/
+#define CAPT_FREE_RUN 0
+#define CAPT_NO_SYNC 1
+#define CAPT_SYNC_SWP 2
+#define CAPT_SYNC_MWP 3
+#define CAPT_SYNC_WAIT 4
+#define CAPT_FREEZE 5
+#define CAPT_RUN 6
+
+/* --------------------------------------------------*/
+
+#endif /* _isp_capture_defs_h */
diff --git a/drivers/staging/media/atomisp/pci/mamoiada_params.h b/drivers/staging/media/atomisp/pci/mamoiada_params.h
new file mode 100644
index 000000000000..a4831f1e02db
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/mamoiada_params.h
@@ -0,0 +1,202 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+/* Version */
+#define RTL_VERSION
+
+/* instruction pipeline depth */
+#define ISP_BRANCHDELAY 5
+
+/* bus */
+#define ISP_BUS_WIDTH 32
+#define ISP_BUS_ADDR_WIDTH 32
+#define ISP_BUS_BURST_SIZE 1
+
+/* data-path */
+#define ISP_SCALAR_WIDTH 32
+#define ISP_SLICE_NELEMS 4
+#define ISP_VEC_NELEMS 64
+#define ISP_VEC_ELEMBITS 14
+#define ISP_VEC_ELEM8BITS 16
+#define ISP_CLONE_DATAPATH_IS_16 1
+
+/* memories */
+#define ISP_DMEM_DEPTH 4096
+#define ISP_DMEM_BSEL_DOWNSAMPLE 8
+#define ISP_VMEM_DEPTH 3072
+#define ISP_VMEM_BSEL_DOWNSAMPLE 8
+#define ISP_VMEM_ELEMBITS 14
+#define ISP_VMEM_ELEM_PRECISION 14
+#define ISP_PMEM_DEPTH 2048
+#define ISP_PMEM_WIDTH 640
+#define ISP_VAMEM_ADDRESS_BITS 12
+#define ISP_VAMEM_ELEMBITS 12
+#define ISP_VAMEM_DEPTH 2048
+#define ISP_VAMEM_ALIGNMENT 2
+#define ISP_VA_ADDRESS_WIDTH 896
+#define ISP_VEC_VALSU_LATENCY ISP_VEC_NELEMS
+#define ISP_HIST_ADDRESS_BITS 12
+#define ISP_HIST_ALIGNMENT 4
+#define ISP_HIST_COMP_IN_PREC 12
+#define ISP_HIST_DEPTH 1024
+#define ISP_HIST_WIDTH 24
+#define ISP_HIST_COMPONENTS 4
+
+/* program counter */
+#define ISP_PC_WIDTH 13
+
+/* Template switches */
+#define ISP_SHIELD_INPUT_DMEM 0
+#define ISP_SHIELD_OUTPUT_DMEM 1
+#define ISP_SHIELD_INPUT_VMEM 0
+#define ISP_SHIELD_OUTPUT_VMEM 0
+#define ISP_SHIELD_INPUT_PMEM 1
+#define ISP_SHIELD_OUTPUT_PMEM 1
+#define ISP_SHIELD_INPUT_HIST 1
+#define ISP_SHIELD_OUTPUT_HIST 1
+/* When LUT is select the shielding is always on */
+#define ISP_SHIELD_INPUT_VAMEM 1
+#define ISP_SHIELD_OUTPUT_VAMEM 1
+
+#define ISP_HAS_IRQ 1
+#define ISP_HAS_SOFT_RESET 1
+#define ISP_HAS_VEC_DIV 0
+#define ISP_HAS_VFU_W_2O 1
+#define ISP_HAS_DEINT3 1
+#define ISP_HAS_LUT 1
+#define ISP_HAS_HIST 1
+#define ISP_HAS_VALSU 1
+#define ISP_HAS_3rdVALSU 1
+#define ISP_VRF1_HAS_2P 1
+
+#define ISP_SRU_GUARDING 1
+#define ISP_VLSU_GUARDING 1
+
+#define ISP_VRF_RAM 1
+#define ISP_SRF_RAM 1
+
+#define ISP_SPLIT_VMUL_VADD_IS 0
+#define ISP_RFSPLIT_FPGA 0
+
+/* RSN or Bus pipelining */
+#define ISP_RSN_PIPE 1
+#define ISP_VSF_BUS_PIPE 0
+
+/* extra slave port to vmem */
+#define ISP_IF_VMEM 0
+#define ISP_GDC_VMEM 0
+
+/* Streaming ports */
+#define ISP_IF 1
+#define ISP_IF_B 1
+#define ISP_GDC 1
+#define ISP_SCL 1
+#define ISP_GPFIFO 1
+#define ISP_SP 1
+
+/* Removing Issue Slot(s) */
+#define ISP_HAS_NOT_SIMD_IS2 0
+#define ISP_HAS_NOT_SIMD_IS3 0
+#define ISP_HAS_NOT_SIMD_IS4 0
+#define ISP_HAS_NOT_SIMD_IS4_VADD 0
+#define ISP_HAS_NOT_SIMD_IS5 0
+#define ISP_HAS_NOT_SIMD_IS6 0
+#define ISP_HAS_NOT_SIMD_IS7 0
+#define ISP_HAS_NOT_SIMD_IS8 0
+
+/* ICache */
+#define ISP_ICACHE 1
+#define ISP_ICACHE_ONLY 0
+#define ISP_ICACHE_PREFETCH 1
+#define ISP_ICACHE_INDEX_BITS 8
+#define ISP_ICACHE_SET_BITS 5
+#define ISP_ICACHE_BLOCKS_PER_SET_BITS 1
+
+/* Experimental Flags */
+#define ISP_EXP_1 0
+#define ISP_EXP_2 0
+#define ISP_EXP_3 0
+#define ISP_EXP_4 0
+#define ISP_EXP_5 0
+#define ISP_EXP_6 0
+
+/* Derived values */
+#define ISP_LOG2_PMEM_WIDTH 10
+#define ISP_VEC_WIDTH 896
+#define ISP_SLICE_WIDTH 56
+#define ISP_VMEM_WIDTH 896
+#define ISP_VMEM_ALIGN 128
+#define ISP_SIMDLSU 1
+#define ISP_LSU_IMM_BITS 12
+
+/* convenient shortcuts for software*/
+#define ISP_NWAY ISP_VEC_NELEMS
+#define NBITS ISP_VEC_ELEMBITS
+
+#define _isp_ceil_div(a, b) (((a) + (b) - 1) / (b))
+
+#define ISP_VEC_ALIGN ISP_VMEM_ALIGN
+
+/* register file sizes */
+#define ISP_RF0_SIZE 64
+#define ISP_RF1_SIZE 16
+#define ISP_RF2_SIZE 64
+#define ISP_RF3_SIZE 4
+#define ISP_RF4_SIZE 64
+#define ISP_RF5_SIZE 16
+#define ISP_RF6_SIZE 16
+#define ISP_RF7_SIZE 16
+#define ISP_RF8_SIZE 16
+#define ISP_RF9_SIZE 16
+#define ISP_RF10_SIZE 16
+#define ISP_RF11_SIZE 16
+
+#define ISP_SRF1_SIZE 4
+#define ISP_SRF2_SIZE 64
+#define ISP_SRF3_SIZE 64
+#define ISP_SRF4_SIZE 32
+#define ISP_SRF5_SIZE 64
+#define ISP_FRF0_SIZE 16
+#define ISP_FRF1_SIZE 4
+#define ISP_FRF2_SIZE 16
+#define ISP_FRF3_SIZE 4
+#define ISP_FRF4_SIZE 4
+#define ISP_FRF5_SIZE 8
+#define ISP_FRF6_SIZE 4
+/* register file read latency */
+#define ISP_VRF1_READ_LAT 1
+#define ISP_VRF2_READ_LAT 1
+#define ISP_VRF3_READ_LAT 1
+#define ISP_VRF4_READ_LAT 1
+#define ISP_VRF5_READ_LAT 1
+#define ISP_VRF6_READ_LAT 1
+#define ISP_VRF7_READ_LAT 1
+#define ISP_VRF8_READ_LAT 1
+#define ISP_SRF1_READ_LAT 1
+#define ISP_SRF2_READ_LAT 1
+#define ISP_SRF3_READ_LAT 1
+#define ISP_SRF4_READ_LAT 1
+#define ISP_SRF5_READ_LAT 1
+#define ISP_SRF5_READ_LAT 1
+/* immediate sizes */
+#define ISP_IS1_IMM_BITS 14
+#define ISP_IS2_IMM_BITS 13
+#define ISP_IS3_IMM_BITS 14
+#define ISP_IS4_IMM_BITS 14
+#define ISP_IS5_IMM_BITS 9
+#define ISP_IS6_IMM_BITS 16
+#define ISP_IS7_IMM_BITS 9
+#define ISP_IS8_IMM_BITS 16
+#define ISP_IS9_IMM_BITS 11
+/* fifo depths */
+#define ISP_IF_FIFO_DEPTH 0
+#define ISP_IF_B_FIFO_DEPTH 0
+#define ISP_DMA_FIFO_DEPTH 0
+#define ISP_OF_FIFO_DEPTH 0
+#define ISP_GDC_FIFO_DEPTH 0
+#define ISP_SCL_FIFO_DEPTH 0
+#define ISP_GPFIFO_FIFO_DEPTH 0
+#define ISP_SP_FIFO_DEPTH 0
diff --git a/drivers/staging/media/atomisp/pci/mmu/isp_mmu.c b/drivers/staging/media/atomisp/pci/mmu/isp_mmu.c
new file mode 100644
index 000000000000..5193a7eb7d9f
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/mmu/isp_mmu.c
@@ -0,0 +1,556 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Support for Medifield PNW Camera Imaging ISP subsystem.
+ *
+ * Copyright (c) 2010 Intel Corporation. All Rights Reserved.
+ *
+ * Copyright (c) 2010 Silicon Hive www.siliconhive.com.
+ */
+/*
+ * ISP MMU management wrap code
+ */
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/gfp.h>
+#include <linux/mm.h> /* for GFP_ATOMIC */
+#include <linux/slab.h> /* for kmalloc */
+#include <linux/list.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/sizes.h>
+
+#ifdef CONFIG_X86
+#include <asm/set_memory.h>
+#endif
+
+#include "atomisp_internal.h"
+#include "mmu/isp_mmu.h"
+
+/*
+ * 64-bit x86 processor physical address layout:
+ * 0 - 0x7fffffff DDR RAM (2GB)
+ * 0x80000000 - 0xffffffff MMIO (2GB)
+ * 0x100000000 - 0x3fffffffffff DDR RAM (64TB)
+ * So if the system has more than 2GB DDR memory, the lower 2GB occupies the
+ * physical address 0 - 0x7fffffff and the rest will start from 0x100000000.
+ * We have to make sure memory is allocated from the lower 2GB for devices
+ * that are only 32-bit capable(e.g. the ISP MMU).
+ *
+ * For any confusion, contact bin.gao@intel.com.
+ */
+#define NR_PAGES_2GB (SZ_2G / PAGE_SIZE)
+
+static void free_mmu_map(struct isp_mmu *mmu, unsigned int start_isp_virt,
+ unsigned int end_isp_virt);
+
+static unsigned int atomisp_get_pte(phys_addr_t pt, unsigned int idx)
+{
+ unsigned int *pt_virt = phys_to_virt(pt);
+
+ return *(pt_virt + idx);
+}
+
+static void atomisp_set_pte(phys_addr_t pt,
+ unsigned int idx, unsigned int pte)
+{
+ unsigned int *pt_virt = phys_to_virt(pt);
+ *(pt_virt + idx) = pte;
+}
+
+static void *isp_pt_phys_to_virt(phys_addr_t phys)
+{
+ return phys_to_virt(phys);
+}
+
+static phys_addr_t isp_pte_to_pgaddr(struct isp_mmu *mmu,
+ unsigned int pte)
+{
+ return mmu->driver->pte_to_phys(mmu, pte);
+}
+
+static unsigned int isp_pgaddr_to_pte_valid(struct isp_mmu *mmu,
+ phys_addr_t phys)
+{
+ unsigned int pte = mmu->driver->phys_to_pte(mmu, phys);
+
+ return (unsigned int)(pte | ISP_PTE_VALID_MASK(mmu));
+}
+
+/*
+ * allocate a uncacheable page table.
+ * return physical address.
+ */
+static phys_addr_t alloc_page_table(struct isp_mmu *mmu)
+{
+ int i;
+ phys_addr_t page;
+ void *virt;
+
+ virt = (void *)__get_free_page(GFP_KERNEL | GFP_DMA32);
+
+ if (!virt)
+ return (phys_addr_t)NULL_PAGE;
+
+ /*
+ * we need a uncacheable page table.
+ */
+#ifdef CONFIG_X86
+ set_memory_uc((unsigned long)virt, 1);
+#endif
+
+ page = virt_to_phys(virt);
+
+ for (i = 0; i < 1024; i++) {
+ /* NEED CHECK */
+ atomisp_set_pte(page, i, mmu->driver->null_pte);
+ }
+
+ return page;
+}
+
+static void free_page_table(struct isp_mmu *mmu, phys_addr_t page)
+{
+ void *virt;
+
+ page &= ISP_PAGE_MASK;
+ /*
+ * reset the page to write back before free
+ */
+ virt = phys_to_virt(page);
+
+#ifdef CONFIG_X86
+ set_memory_wb((unsigned long)virt, 1);
+#endif
+
+ free_page((unsigned long)virt);
+}
+
+static void mmu_remap_error(struct isp_mmu *mmu,
+ phys_addr_t l1_pt, unsigned int l1_idx,
+ phys_addr_t l2_pt, unsigned int l2_idx,
+ unsigned int isp_virt, phys_addr_t old_phys,
+ phys_addr_t new_phys)
+{
+ dev_err(atomisp_dev, "address remap:\n\n"
+ "\tL1 PT: virt = %p, phys = 0x%llx, idx = %d\n"
+ "\tL2 PT: virt = %p, phys = 0x%llx, idx = %d\n"
+ "\told: isp_virt = 0x%x, phys = 0x%llx\n"
+ "\tnew: isp_virt = 0x%x, phys = 0x%llx\n",
+ isp_pt_phys_to_virt(l1_pt),
+ (u64)l1_pt, l1_idx,
+ isp_pt_phys_to_virt(l2_pt),
+ (u64)l2_pt, l2_idx, isp_virt,
+ (u64)old_phys, isp_virt,
+ (u64)new_phys);
+}
+
+static void mmu_unmap_l2_pte_error(struct isp_mmu *mmu,
+ phys_addr_t l1_pt, unsigned int l1_idx,
+ phys_addr_t l2_pt, unsigned int l2_idx,
+ unsigned int isp_virt, unsigned int pte)
+{
+ dev_err(atomisp_dev, "unmap invalid L2 pte:\n\n"
+ "\tL1 PT: virt = %p, phys = 0x%llx, idx = %d\n"
+ "\tL2 PT: virt = %p, phys = 0x%llx, idx = %d\n"
+ "\tisp_virt = 0x%x, pte(page phys) = 0x%x\n",
+ isp_pt_phys_to_virt(l1_pt),
+ (u64)l1_pt, l1_idx,
+ isp_pt_phys_to_virt(l2_pt),
+ (u64)l2_pt, l2_idx, isp_virt,
+ pte);
+}
+
+static void mmu_unmap_l1_pte_error(struct isp_mmu *mmu,
+ phys_addr_t l1_pt, unsigned int l1_idx,
+ unsigned int isp_virt, unsigned int pte)
+{
+ dev_err(atomisp_dev, "unmap invalid L1 pte (L2 PT):\n\n"
+ "\tL1 PT: virt = %p, phys = 0x%llx, idx = %d\n"
+ "\tisp_virt = 0x%x, l1_pte(L2 PT) = 0x%x\n",
+ isp_pt_phys_to_virt(l1_pt),
+ (u64)l1_pt, l1_idx, (unsigned int)isp_virt,
+ pte);
+}
+
+static void mmu_unmap_l1_pt_error(struct isp_mmu *mmu, unsigned int pte)
+{
+ dev_err(atomisp_dev, "unmap invalid L1PT:\n\n"
+ "L1PT = 0x%x\n", (unsigned int)pte);
+}
+
+/*
+ * Update L2 page table according to isp virtual address and page physical
+ * address
+ */
+static int mmu_l2_map(struct isp_mmu *mmu, phys_addr_t l1_pt,
+ unsigned int l1_idx, phys_addr_t l2_pt,
+ unsigned int start, unsigned int end, phys_addr_t phys)
+{
+ unsigned int ptr;
+ unsigned int idx;
+ unsigned int pte;
+
+ l2_pt &= ISP_PAGE_MASK;
+
+ start = start & ISP_PAGE_MASK;
+ end = ISP_PAGE_ALIGN(end);
+ phys &= ISP_PAGE_MASK;
+
+ ptr = start;
+ do {
+ idx = ISP_PTR_TO_L2_IDX(ptr);
+
+ pte = atomisp_get_pte(l2_pt, idx);
+
+ if (ISP_PTE_VALID(mmu, pte)) {
+ mmu_remap_error(mmu, l1_pt, l1_idx,
+ l2_pt, idx, ptr, pte, phys);
+
+ /* free all mapped pages */
+ free_mmu_map(mmu, start, ptr);
+
+ return -EINVAL;
+ }
+
+ pte = isp_pgaddr_to_pte_valid(mmu, phys);
+
+ atomisp_set_pte(l2_pt, idx, pte);
+ mmu->l2_pgt_refcount[l1_idx]++;
+ ptr += (1U << ISP_L2PT_OFFSET);
+ phys += (1U << ISP_L2PT_OFFSET);
+ } while (ptr < end && idx < ISP_L2PT_PTES - 1);
+
+ return 0;
+}
+
+/*
+ * Update L1 page table according to isp virtual address and page physical
+ * address
+ */
+static int mmu_l1_map(struct isp_mmu *mmu, phys_addr_t l1_pt,
+ unsigned int start, unsigned int end,
+ phys_addr_t phys)
+{
+ phys_addr_t l2_pt;
+ unsigned int ptr, l1_aligned;
+ unsigned int idx;
+ unsigned int l2_pte;
+ int ret;
+
+ l1_pt &= ISP_PAGE_MASK;
+
+ start = start & ISP_PAGE_MASK;
+ end = ISP_PAGE_ALIGN(end);
+ phys &= ISP_PAGE_MASK;
+
+ ptr = start;
+ do {
+ idx = ISP_PTR_TO_L1_IDX(ptr);
+
+ l2_pte = atomisp_get_pte(l1_pt, idx);
+
+ if (!ISP_PTE_VALID(mmu, l2_pte)) {
+ l2_pt = alloc_page_table(mmu);
+ if (l2_pt == NULL_PAGE) {
+ dev_err(atomisp_dev,
+ "alloc page table fail.\n");
+
+ /* free all mapped pages */
+ free_mmu_map(mmu, start, ptr);
+
+ return -ENOMEM;
+ }
+
+ l2_pte = isp_pgaddr_to_pte_valid(mmu, l2_pt);
+
+ atomisp_set_pte(l1_pt, idx, l2_pte);
+ mmu->l2_pgt_refcount[idx] = 0;
+ }
+
+ l2_pt = isp_pte_to_pgaddr(mmu, l2_pte);
+
+ l1_aligned = (ptr & ISP_PAGE_MASK) + (1U << ISP_L1PT_OFFSET);
+
+ if (l1_aligned < end) {
+ ret = mmu_l2_map(mmu, l1_pt, idx,
+ l2_pt, ptr, l1_aligned, phys);
+ phys += (l1_aligned - ptr);
+ ptr = l1_aligned;
+ } else {
+ ret = mmu_l2_map(mmu, l1_pt, idx,
+ l2_pt, ptr, end, phys);
+ phys += (end - ptr);
+ ptr = end;
+ }
+
+ if (ret) {
+ dev_err(atomisp_dev, "setup mapping in L2PT fail.\n");
+
+ /* free all mapped pages */
+ free_mmu_map(mmu, start, ptr);
+
+ return -EINVAL;
+ }
+ } while (ptr < end && idx < ISP_L1PT_PTES);
+
+ return 0;
+}
+
+/*
+ * Update page table according to isp virtual address and page physical
+ * address
+ */
+static int mmu_map(struct isp_mmu *mmu, unsigned int isp_virt,
+ phys_addr_t phys, unsigned int pgnr)
+{
+ unsigned int start, end;
+ phys_addr_t l1_pt;
+ int ret;
+
+ mutex_lock(&mmu->pt_mutex);
+ if (!ISP_PTE_VALID(mmu, mmu->l1_pte)) {
+ /*
+ * allocate 1 new page for L1 page table
+ */
+ l1_pt = alloc_page_table(mmu);
+ if (l1_pt == NULL_PAGE) {
+ dev_err(atomisp_dev, "alloc page table fail.\n");
+ mutex_unlock(&mmu->pt_mutex);
+ return -ENOMEM;
+ }
+
+ /*
+ * setup L1 page table physical addr to MMU
+ */
+ mmu->base_address = l1_pt;
+ mmu->l1_pte = isp_pgaddr_to_pte_valid(mmu, l1_pt);
+ memset(mmu->l2_pgt_refcount, 0, sizeof(int) * ISP_L1PT_PTES);
+ }
+
+ l1_pt = isp_pte_to_pgaddr(mmu, mmu->l1_pte);
+
+ start = (isp_virt) & ISP_PAGE_MASK;
+ end = start + (pgnr << ISP_PAGE_OFFSET);
+ phys &= ISP_PAGE_MASK;
+
+ ret = mmu_l1_map(mmu, l1_pt, start, end, phys);
+
+ if (ret)
+ dev_err(atomisp_dev, "setup mapping in L1PT fail.\n");
+
+ mutex_unlock(&mmu->pt_mutex);
+ return ret;
+}
+
+/*
+ * Free L2 page table according to isp virtual address and page physical
+ * address
+ */
+static void mmu_l2_unmap(struct isp_mmu *mmu, phys_addr_t l1_pt,
+ unsigned int l1_idx, phys_addr_t l2_pt,
+ unsigned int start, unsigned int end)
+{
+ unsigned int ptr;
+ unsigned int idx;
+ unsigned int pte;
+
+ l2_pt &= ISP_PAGE_MASK;
+
+ start = start & ISP_PAGE_MASK;
+ end = ISP_PAGE_ALIGN(end);
+
+ ptr = start;
+ do {
+ idx = ISP_PTR_TO_L2_IDX(ptr);
+
+ pte = atomisp_get_pte(l2_pt, idx);
+
+ if (!ISP_PTE_VALID(mmu, pte))
+ mmu_unmap_l2_pte_error(mmu, l1_pt, l1_idx,
+ l2_pt, idx, ptr, pte);
+
+ atomisp_set_pte(l2_pt, idx, mmu->driver->null_pte);
+ mmu->l2_pgt_refcount[l1_idx]--;
+ ptr += (1U << ISP_L2PT_OFFSET);
+ } while (ptr < end && idx < ISP_L2PT_PTES - 1);
+
+ if (mmu->l2_pgt_refcount[l1_idx] == 0) {
+ free_page_table(mmu, l2_pt);
+ atomisp_set_pte(l1_pt, l1_idx, mmu->driver->null_pte);
+ }
+}
+
+/*
+ * Free L1 page table according to isp virtual address and page physical
+ * address
+ */
+static void mmu_l1_unmap(struct isp_mmu *mmu, phys_addr_t l1_pt,
+ unsigned int start, unsigned int end)
+{
+ phys_addr_t l2_pt;
+ unsigned int ptr, l1_aligned;
+ unsigned int idx;
+ unsigned int l2_pte;
+
+ l1_pt &= ISP_PAGE_MASK;
+
+ start = start & ISP_PAGE_MASK;
+ end = ISP_PAGE_ALIGN(end);
+
+ ptr = start;
+ do {
+ idx = ISP_PTR_TO_L1_IDX(ptr);
+
+ l2_pte = atomisp_get_pte(l1_pt, idx);
+
+ if (!ISP_PTE_VALID(mmu, l2_pte)) {
+ mmu_unmap_l1_pte_error(mmu, l1_pt, idx, ptr, l2_pte);
+ continue;
+ }
+
+ l2_pt = isp_pte_to_pgaddr(mmu, l2_pte);
+
+ l1_aligned = (ptr & ISP_PAGE_MASK) + (1U << ISP_L1PT_OFFSET);
+
+ if (l1_aligned < end) {
+ mmu_l2_unmap(mmu, l1_pt, idx, l2_pt, ptr, l1_aligned);
+ ptr = l1_aligned;
+ } else {
+ mmu_l2_unmap(mmu, l1_pt, idx, l2_pt, ptr, end);
+ ptr = end;
+ }
+ /*
+ * use the same L2 page next time, so we don't
+ * need to invalidate and free this PT.
+ */
+ /* atomisp_set_pte(l1_pt, idx, NULL_PTE); */
+ } while (ptr < end && idx < ISP_L1PT_PTES);
+}
+
+/*
+ * Free page table according to isp virtual address and page physical
+ * address
+ */
+static void mmu_unmap(struct isp_mmu *mmu, unsigned int isp_virt,
+ unsigned int pgnr)
+{
+ unsigned int start, end;
+ phys_addr_t l1_pt;
+
+ mutex_lock(&mmu->pt_mutex);
+ if (!ISP_PTE_VALID(mmu, mmu->l1_pte)) {
+ mmu_unmap_l1_pt_error(mmu, mmu->l1_pte);
+ mutex_unlock(&mmu->pt_mutex);
+ return;
+ }
+
+ l1_pt = isp_pte_to_pgaddr(mmu, mmu->l1_pte);
+
+ start = (isp_virt) & ISP_PAGE_MASK;
+ end = start + (pgnr << ISP_PAGE_OFFSET);
+
+ mmu_l1_unmap(mmu, l1_pt, start, end);
+ mutex_unlock(&mmu->pt_mutex);
+}
+
+/*
+ * Free page tables according to isp start virtual address and end virtual
+ * address.
+ */
+static void free_mmu_map(struct isp_mmu *mmu, unsigned int start_isp_virt,
+ unsigned int end_isp_virt)
+{
+ unsigned int pgnr;
+ unsigned int start, end;
+
+ start = (start_isp_virt) & ISP_PAGE_MASK;
+ end = (end_isp_virt) & ISP_PAGE_MASK;
+ pgnr = (end - start) >> ISP_PAGE_OFFSET;
+ mmu_unmap(mmu, start, pgnr);
+}
+
+int isp_mmu_map(struct isp_mmu *mmu, unsigned int isp_virt,
+ phys_addr_t phys, unsigned int pgnr)
+{
+ return mmu_map(mmu, isp_virt, phys, pgnr);
+}
+
+void isp_mmu_unmap(struct isp_mmu *mmu, unsigned int isp_virt,
+ unsigned int pgnr)
+{
+ mmu_unmap(mmu, isp_virt, pgnr);
+}
+
+static void isp_mmu_flush_tlb_range_default(struct isp_mmu *mmu,
+ unsigned int start,
+ unsigned int size)
+{
+ isp_mmu_flush_tlb(mmu);
+}
+
+/*MMU init for internal structure*/
+int isp_mmu_init(struct isp_mmu *mmu, struct isp_mmu_client *driver)
+{
+ if (!mmu) /* error */
+ return -EINVAL;
+ if (!driver) /* error */
+ return -EINVAL;
+
+ if (!driver->name)
+ dev_warn(atomisp_dev, "NULL name for MMU driver...\n");
+
+ mmu->driver = driver;
+
+ if (!driver->tlb_flush_all) {
+ dev_err(atomisp_dev, "tlb_flush_all operation not provided.\n");
+ return -EINVAL;
+ }
+
+ if (!driver->tlb_flush_range)
+ driver->tlb_flush_range = isp_mmu_flush_tlb_range_default;
+
+ if (!driver->pte_valid_mask) {
+ dev_err(atomisp_dev, "PTE_MASK is missing from mmu driver\n");
+ return -EINVAL;
+ }
+
+ mmu->l1_pte = driver->null_pte;
+
+ mutex_init(&mmu->pt_mutex);
+
+ return 0;
+}
+
+/*Free L1 and L2 page table*/
+void isp_mmu_exit(struct isp_mmu *mmu)
+{
+ unsigned int idx;
+ unsigned int pte;
+ phys_addr_t l1_pt, l2_pt;
+
+ if (!mmu)
+ return;
+
+ if (!ISP_PTE_VALID(mmu, mmu->l1_pte)) {
+ dev_warn(atomisp_dev, "invalid L1PT: pte = 0x%x\n",
+ (unsigned int)mmu->l1_pte);
+ return;
+ }
+
+ l1_pt = isp_pte_to_pgaddr(mmu, mmu->l1_pte);
+
+ for (idx = 0; idx < ISP_L1PT_PTES; idx++) {
+ pte = atomisp_get_pte(l1_pt, idx);
+
+ if (ISP_PTE_VALID(mmu, pte)) {
+ l2_pt = isp_pte_to_pgaddr(mmu, pte);
+
+ free_page_table(mmu, l2_pt);
+ }
+ }
+
+ free_page_table(mmu, l1_pt);
+}
diff --git a/drivers/staging/media/atomisp/pci/mmu/sh_mmu_mrfld.c b/drivers/staging/media/atomisp/pci/mmu/sh_mmu_mrfld.c
new file mode 100644
index 000000000000..c9890892c8aa
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/mmu/sh_mmu_mrfld.c
@@ -0,0 +1,66 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Support for Merrifield PNW Camera Imaging ISP subsystem.
+ *
+ * Copyright (c) 2012 Intel Corporation. All Rights Reserved.
+ *
+ * Copyright (c) 2012 Silicon Hive www.siliconhive.com.
+ */
+#include "type_support.h"
+#include "mmu/isp_mmu.h"
+#include "mmu/sh_mmu_mrfld.h"
+#include "atomisp_compat.h"
+
+#define MERR_VALID_PTE_MASK 0x80000000
+
+/*
+ * include SH header file here
+ */
+
+static unsigned int sh_phys_to_pte(struct isp_mmu *mmu,
+ phys_addr_t phys)
+{
+ return phys >> ISP_PAGE_OFFSET;
+}
+
+static phys_addr_t sh_pte_to_phys(struct isp_mmu *mmu,
+ unsigned int pte)
+{
+ unsigned int mask = mmu->driver->pte_valid_mask;
+
+ return (phys_addr_t)((pte & ~mask) << ISP_PAGE_OFFSET);
+}
+
+static unsigned int sh_get_pd_base(struct isp_mmu *mmu,
+ phys_addr_t phys)
+{
+ unsigned int pte = sh_phys_to_pte(mmu, phys);
+
+ return HOST_ADDRESS(pte);
+}
+
+/*
+ * callback to flush tlb.
+ *
+ * tlb_flush_range will at least flush TLBs containing
+ * address mapping from addr to addr + size.
+ *
+ * tlb_flush_all will flush all TLBs.
+ *
+ * tlb_flush_all is must be provided. if tlb_flush_range is
+ * not valid, it will set to tlb_flush_all by default.
+ */
+static void sh_tlb_flush(struct isp_mmu *mmu)
+{
+ ia_css_mmu_invalidate_cache();
+}
+
+struct isp_mmu_client sh_mmu_mrfld = {
+ .name = "Silicon Hive ISP3000 MMU",
+ .pte_valid_mask = MERR_VALID_PTE_MASK,
+ .null_pte = ~MERR_VALID_PTE_MASK,
+ .get_pd_base = sh_get_pd_base,
+ .tlb_flush_all = sh_tlb_flush,
+ .phys_to_pte = sh_phys_to_pte,
+ .pte_to_phys = sh_pte_to_phys,
+};
diff --git a/drivers/staging/media/atomisp/pci/mmu_defs.h b/drivers/staging/media/atomisp/pci/mmu_defs.h
new file mode 100644
index 000000000000..6a26f2cad52a
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/mmu_defs.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef _mmu_defs_h
+#define _mmu_defs_h
+
+#define _HRT_MMU_INVALIDATE_TLB_REG_IDX 0
+#define _HRT_MMU_PAGE_TABLE_BASE_ADDRESS_REG_IDX 1
+
+#define _HRT_MMU_REG_ALIGN 4
+
+#endif /* _mmu_defs_h */
diff --git a/drivers/staging/media/atomisp/pci/runtime/binary/interface/ia_css_binary.h b/drivers/staging/media/atomisp/pci/runtime/binary/interface/ia_css_binary.h
new file mode 100644
index 000000000000..9c682f2ecbb2
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/runtime/binary/interface/ia_css_binary.h
@@ -0,0 +1,216 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/**
+Support for Intel Camera Imaging ISP subsystem.
+Copyright (c) 2010 - 2015, Intel Corporation.
+
+*/
+
+#ifndef _IA_CSS_BINARY_H_
+#define _IA_CSS_BINARY_H_
+
+#include <type_support.h>
+#include "ia_css_types.h"
+#include "ia_css_err.h"
+#include "ia_css_stream_format.h"
+#include "ia_css_stream_public.h"
+#include "ia_css_frame_public.h"
+#include "sh_css_metrics.h"
+#include "isp/kernels/fixedbds/fixedbds_1.0/ia_css_fixedbds_types.h"
+
+/* The binary mode is used in pre-processor expressions so we cannot
+ * use an enum here. */
+#define IA_CSS_BINARY_MODE_COPY 0
+#define IA_CSS_BINARY_MODE_PREVIEW 1
+#define IA_CSS_BINARY_MODE_PRIMARY 2
+#define IA_CSS_BINARY_MODE_VIDEO 3
+#define IA_CSS_BINARY_MODE_PRE_ISP 4
+#define IA_CSS_BINARY_MODE_GDC 5
+#define IA_CSS_BINARY_MODE_POST_ISP 6
+#define IA_CSS_BINARY_MODE_ANR 7
+#define IA_CSS_BINARY_MODE_CAPTURE_PP 8
+#define IA_CSS_BINARY_MODE_VF_PP 9
+#define IA_CSS_BINARY_MODE_PRE_DE 10
+#define IA_CSS_BINARY_MODE_PRIMARY_HQ_STAGE0 11
+#define IA_CSS_BINARY_MODE_PRIMARY_HQ_STAGE1 12
+#define IA_CSS_BINARY_MODE_PRIMARY_HQ_STAGE2 13
+#define IA_CSS_BINARY_MODE_PRIMARY_HQ_STAGE3 14
+#define IA_CSS_BINARY_MODE_PRIMARY_HQ_STAGE4 15
+#define IA_CSS_BINARY_MODE_PRIMARY_HQ_STAGE5 16
+#define IA_CSS_BINARY_NUM_MODES 17
+
+#define MAX_NUM_PRIMARY_STAGES 6
+#define NUM_PRIMARY_HQ_STAGES 6 /* number of primary stages for ISP2.6.1 high quality pipe */
+#define NUM_PRIMARY_STAGES 1 /* number of primary satges for ISP1/ISP2.2 pipe */
+
+/* Indicate where binaries can read input from */
+#define IA_CSS_BINARY_INPUT_SENSOR 0
+#define IA_CSS_BINARY_INPUT_MEMORY 1
+#define IA_CSS_BINARY_INPUT_VARIABLE 2
+
+/* Should be included without the path.
+ However, that requires adding the path to numerous makefiles
+ that have nothing to do with isp parameters.
+ */
+#include "runtime/isp_param/interface/ia_css_isp_param_types.h"
+
+/* now these ports only include output ports but not vf output ports */
+enum {
+ IA_CSS_BINARY_OUTPUT_PORT_0 = 0,
+ IA_CSS_BINARY_OUTPUT_PORT_1 = 1,
+ IA_CSS_BINARY_MAX_OUTPUT_PORTS = 2
+};
+
+struct ia_css_cas_binary_descr {
+ unsigned int num_stage;
+ unsigned int num_output_stage;
+ struct ia_css_frame_info *in_info;
+ struct ia_css_frame_info *internal_out_info;
+ struct ia_css_frame_info *out_info;
+ struct ia_css_frame_info *vf_info;
+ bool *is_output_stage;
+};
+
+struct ia_css_binary_descr {
+ int mode;
+ bool online;
+ bool continuous;
+ bool striped;
+ bool two_ppc;
+ bool enable_yuv_ds;
+ bool enable_high_speed;
+ bool enable_dvs_6axis;
+ bool enable_reduced_pipe;
+ bool enable_dz;
+ bool enable_xnr;
+ bool enable_fractional_ds;
+ bool enable_dpc;
+
+ /* ISP2401 */
+ bool enable_tnr;
+
+ bool enable_capture_pp_bli;
+ struct ia_css_resolution dvs_env;
+ enum atomisp_input_format stream_format;
+ struct ia_css_frame_info *in_info; /* the info of the input-frame with the
+ ISP required resolution. */
+ struct ia_css_frame_info *bds_out_info;
+ struct ia_css_frame_info *out_info[IA_CSS_BINARY_MAX_OUTPUT_PORTS];
+ struct ia_css_frame_info *vf_info;
+ unsigned int isp_pipe_version;
+ unsigned int required_bds_factor;
+ int stream_config_left_padding;
+};
+
+struct ia_css_binary {
+ const struct ia_css_binary_xinfo *info;
+ enum atomisp_input_format input_format;
+ struct ia_css_frame_info in_frame_info;
+ struct ia_css_frame_info internal_frame_info;
+ struct ia_css_frame_info out_frame_info[IA_CSS_BINARY_MAX_OUTPUT_PORTS];
+ struct ia_css_resolution effective_in_frame_res;
+ struct ia_css_frame_info vf_frame_info;
+ int input_buf_vectors;
+ int deci_factor_log2;
+ int vf_downscale_log2;
+ int s3atbl_width;
+ int s3atbl_height;
+ int s3atbl_isp_width;
+ int s3atbl_isp_height;
+ unsigned int morph_tbl_width;
+ unsigned int morph_tbl_aligned_width;
+ unsigned int morph_tbl_height;
+ int sctbl_width_per_color;
+ int sctbl_aligned_width_per_color;
+ int sctbl_height;
+ struct ia_css_sdis_info dis;
+ struct ia_css_resolution dvs_envelope;
+ bool online;
+ unsigned int uds_xc;
+ unsigned int uds_yc;
+ unsigned int left_padding;
+ struct sh_css_binary_metrics metrics;
+ struct ia_css_isp_param_host_segments mem_params;
+ struct ia_css_isp_param_css_segments css_params;
+};
+
+#define IA_CSS_BINARY_DEFAULT_SETTINGS { \
+ .input_format = ATOMISP_INPUT_FORMAT_YUV420_8_LEGACY, \
+ .in_frame_info = IA_CSS_BINARY_DEFAULT_FRAME_INFO, \
+ .internal_frame_info = IA_CSS_BINARY_DEFAULT_FRAME_INFO, \
+ .out_frame_info = {IA_CSS_BINARY_DEFAULT_FRAME_INFO}, \
+ .vf_frame_info = IA_CSS_BINARY_DEFAULT_FRAME_INFO, \
+}
+
+int
+ia_css_binary_init_infos(void);
+
+int
+ia_css_binary_uninit(void);
+
+int
+ia_css_binary_fill_info(const struct ia_css_binary_xinfo *xinfo,
+ bool online,
+ bool two_ppc,
+ enum atomisp_input_format stream_format,
+ const struct ia_css_frame_info *in_info,
+ const struct ia_css_frame_info *bds_out_info,
+ const struct ia_css_frame_info *out_info[],
+ const struct ia_css_frame_info *vf_info,
+ struct ia_css_binary *binary,
+ struct ia_css_resolution *dvs_env,
+ int stream_config_left_padding,
+ bool accelerator);
+
+int
+ia_css_binary_find(struct ia_css_binary_descr *descr,
+ struct ia_css_binary *binary);
+
+/* @brief Get the shading information of the specified shading correction type.
+ *
+ * @param[in] binary: The isp binary which has the shading correction.
+ * @param[in] type: The shading correction type.
+ * @param[in] required_bds_factor: The bayer downscaling factor required in the pipe.
+ * @param[in] stream_config: The stream configuration.
+ * @param[out] shading_info: The shading information.
+ * The shading information necessary as API is stored in the shading_info.
+ * The driver needs to get this information to generate
+ * the shading table directly required from ISP.
+ * @param[out] pipe_config: The pipe configuration.
+ * The shading information related to ISP (but, not necessary as API) is stored in the pipe_config.
+ * @return 0 or error code upon error.
+ */
+int
+ia_css_binary_get_shading_info(const struct ia_css_binary *binary,
+ enum ia_css_shading_correction_type type,
+ unsigned int required_bds_factor,
+ const struct ia_css_stream_config *stream_config,
+ struct ia_css_shading_info *shading_info,
+ struct ia_css_pipe_config *pipe_config);
+
+int
+ia_css_binary_3a_grid_info(const struct ia_css_binary *binary,
+ struct ia_css_grid_info *info,
+ struct ia_css_pipe *pipe);
+
+void
+ia_css_binary_dvs_grid_info(const struct ia_css_binary *binary,
+ struct ia_css_grid_info *info,
+ struct ia_css_pipe *pipe);
+
+void
+ia_css_binary_dvs_stat_grid_info(
+ const struct ia_css_binary *binary,
+ struct ia_css_grid_info *info,
+ struct ia_css_pipe *pipe);
+
+unsigned
+ia_css_binary_max_vf_width(void);
+
+void
+ia_css_binary_destroy_isp_parameters(struct ia_css_binary *binary);
+
+void
+ia_css_binary_get_isp_binaries(struct ia_css_binary_xinfo **binaries,
+ uint32_t *num_isp_binaries);
+
+#endif /* _IA_CSS_BINARY_H_ */
diff --git a/drivers/staging/media/atomisp/pci/runtime/binary/src/binary.c b/drivers/staging/media/atomisp/pci/runtime/binary/src/binary.c
new file mode 100644
index 000000000000..af93ca96747c
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/runtime/binary/src/binary.c
@@ -0,0 +1,1286 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#include <linux/math.h>
+
+#include <math_support.h>
+#include <gdc_device.h> /* HR_GDC_N */
+
+#include "hmm.h"
+
+#include "isp.h" /* ISP_VEC_NELEMS */
+
+#include "ia_css_binary.h"
+#include "ia_css_debug.h"
+#include "ia_css_util.h"
+#include "ia_css_isp_param.h"
+#include "sh_css_internal.h"
+#include "sh_css_sp.h"
+#include "sh_css_firmware.h"
+#include "sh_css_defs.h"
+#include "sh_css_legacy.h"
+
+#include "atomisp_internal.h"
+
+#include "vf/vf_1.0/ia_css_vf.host.h"
+#include "sc/sc_1.0/ia_css_sc.host.h"
+#include "sdis/sdis_1.0/ia_css_sdis.host.h"
+#include "fixedbds/fixedbds_1.0/ia_css_fixedbds_param.h" /* FRAC_ACC */
+
+#include "camera/pipe/interface/ia_css_pipe_binarydesc.h"
+
+#include "assert_support.h"
+
+static struct ia_css_binary_xinfo *all_binaries; /* ISP binaries only (no SP) */
+static struct ia_css_binary_xinfo
+ *binary_infos[IA_CSS_BINARY_NUM_MODES] = { NULL, };
+
+static void
+ia_css_binary_dvs_env(const struct ia_css_binary_info *info,
+ const struct ia_css_resolution *dvs_env,
+ struct ia_css_resolution *binary_dvs_env)
+{
+ if (info->enable.dvs_envelope) {
+ assert(dvs_env);
+ binary_dvs_env->width = max(dvs_env->width, SH_CSS_MIN_DVS_ENVELOPE);
+ binary_dvs_env->height = max(dvs_env->height, SH_CSS_MIN_DVS_ENVELOPE);
+ }
+}
+
+static void
+ia_css_binary_internal_res(const struct ia_css_frame_info *in_info,
+ const struct ia_css_frame_info *bds_out_info,
+ const struct ia_css_frame_info *out_info,
+ const struct ia_css_resolution *dvs_env,
+ const struct ia_css_binary_info *info,
+ struct ia_css_resolution *internal_res)
+{
+ unsigned int isp_tmp_internal_width = 0,
+ isp_tmp_internal_height = 0;
+ bool binary_supports_yuv_ds = info->enable.ds & 2;
+ struct ia_css_resolution binary_dvs_env;
+
+ binary_dvs_env.width = 0;
+ binary_dvs_env.height = 0;
+ ia_css_binary_dvs_env(info, dvs_env, &binary_dvs_env);
+
+ if (binary_supports_yuv_ds) {
+ if (in_info) {
+ isp_tmp_internal_width = in_info->res.width
+ + info->pipeline.left_cropping + binary_dvs_env.width;
+ isp_tmp_internal_height = in_info->res.height
+ + info->pipeline.top_cropping + binary_dvs_env.height;
+ }
+ } else if ((bds_out_info) && (out_info) &&
+ /* TODO: hack to make video_us case work. this should be reverted after
+ a nice solution in ISP */
+ (bds_out_info->res.width >= out_info->res.width)) {
+ isp_tmp_internal_width = bds_out_info->padded_width;
+ isp_tmp_internal_height = bds_out_info->res.height;
+ } else {
+ if (out_info) {
+ isp_tmp_internal_width = out_info->padded_width;
+ isp_tmp_internal_height = out_info->res.height;
+ }
+ }
+
+ /* We first calculate the resolutions used by the ISP. After that,
+ * we use those resolutions to compute sizes for tables etc. */
+ internal_res->width = __ISP_INTERNAL_WIDTH(isp_tmp_internal_width,
+ (int)binary_dvs_env.width,
+ info->pipeline.left_cropping, info->pipeline.mode,
+ info->pipeline.c_subsampling,
+ info->output.num_chunks, info->pipeline.pipelining);
+ internal_res->height = __ISP_INTERNAL_HEIGHT(isp_tmp_internal_height,
+ info->pipeline.top_cropping,
+ binary_dvs_env.height);
+}
+
+/* Computation results of the origin coordinate of bayer on the shading table. */
+struct sh_css_shading_table_bayer_origin_compute_results {
+ u32 bayer_scale_hor_ratio_in; /* Horizontal ratio (in) of bayer scaling. */
+ u32 bayer_scale_hor_ratio_out; /* Horizontal ratio (out) of bayer scaling. */
+ u32 bayer_scale_ver_ratio_in; /* Vertical ratio (in) of bayer scaling. */
+ u32 bayer_scale_ver_ratio_out; /* Vertical ratio (out) of bayer scaling. */
+ u32 sc_bayer_origin_x_bqs_on_shading_table; /* X coordinate (in bqs) of bayer origin on shading table. */
+ u32 sc_bayer_origin_y_bqs_on_shading_table; /* Y coordinate (in bqs) of bayer origin on shading table. */
+};
+
+/* Get the requirements for the shading correction. */
+static int
+ia_css_binary_compute_shading_table_bayer_origin(
+ const struct ia_css_binary *binary, /* [in] */
+ unsigned int required_bds_factor, /* [in] */
+ const struct ia_css_stream_config *stream_config, /* [in] */
+ struct sh_css_shading_table_bayer_origin_compute_results *res) /* [out] */
+{
+ int err;
+
+ /* Rational fraction of the fixed bayer downscaling factor. */
+ struct u32_fract bds;
+
+ /* Left padding set by InputFormatter. */
+ unsigned int left_padding_bqs; /* in bqs */
+
+ /* Flag for the NEED_BDS_FACTOR_2_00 macro defined in isp kernels. */
+ unsigned int need_bds_factor_2_00;
+
+ /* Left padding adjusted inside the isp. */
+ unsigned int left_padding_adjusted_bqs; /* in bqs */
+
+ /* Bad pixels caused by filters.
+ NxN-filter (before/after bayer scaling) moves the image position
+ to right/bottom directions by a few pixels.
+ It causes bad pixels at left/top sides,
+ and effective bayer size decreases. */
+ unsigned int bad_bqs_on_left_before_bs; /* in bqs */
+ unsigned int bad_bqs_on_left_after_bs; /* in bqs */
+ unsigned int bad_bqs_on_top_before_bs; /* in bqs */
+ unsigned int bad_bqs_on_top_after_bs; /* in bqs */
+
+ /* Get the rational fraction of bayer downscaling factor. */
+ err = sh_css_bds_factor_get_fract(required_bds_factor, &bds);
+ if (err)
+ return err;
+
+ /* Set the left padding set by InputFormatter. (ifmtr.c) */
+ if (stream_config->left_padding == -1)
+ left_padding_bqs = _ISP_BQS(binary->left_padding);
+ else
+ left_padding_bqs = (unsigned int)((int)ISP_VEC_NELEMS
+ - _ISP_BQS(stream_config->left_padding));
+
+ /* Set the left padding adjusted inside the isp.
+ When bds_factor 2.00 is needed, some padding is added to left_padding
+ inside the isp, before bayer downscaling. (raw.isp.c)
+ (Hopefully, left_crop/left_padding/top_crop should be defined in css
+ appropriately, depending on bds_factor.)
+ */
+ need_bds_factor_2_00 = ((binary->info->sp.bds.supported_bds_factors &
+ (PACK_BDS_FACTOR(SH_CSS_BDS_FACTOR_2_00) |
+ PACK_BDS_FACTOR(SH_CSS_BDS_FACTOR_2_50) |
+ PACK_BDS_FACTOR(SH_CSS_BDS_FACTOR_3_00) |
+ PACK_BDS_FACTOR(SH_CSS_BDS_FACTOR_4_00) |
+ PACK_BDS_FACTOR(SH_CSS_BDS_FACTOR_4_50) |
+ PACK_BDS_FACTOR(SH_CSS_BDS_FACTOR_5_00) |
+ PACK_BDS_FACTOR(SH_CSS_BDS_FACTOR_6_00) |
+ PACK_BDS_FACTOR(SH_CSS_BDS_FACTOR_8_00))) != 0);
+
+ if (need_bds_factor_2_00 && binary->info->sp.pipeline.left_cropping > 0)
+ left_padding_adjusted_bqs = left_padding_bqs + ISP_VEC_NELEMS;
+ else
+ left_padding_adjusted_bqs = left_padding_bqs;
+
+ /* Currently, the bad pixel caused by filters before bayer scaling
+ is NOT considered, because the bad pixel is subtle.
+ When some large filter is used in the future,
+ we need to consider the bad pixel.
+
+ Currently, when bds_factor isn't 1.00, 3x3 anti-alias filter is applied
+ to each color plane(Gr/R/B/Gb) before bayer downscaling.
+ This filter moves each color plane to right/bottom directions
+ by 1 pixel at the most, depending on downscaling factor.
+ */
+ bad_bqs_on_left_before_bs = 0;
+ bad_bqs_on_top_before_bs = 0;
+
+ /* Currently, the bad pixel caused by filters after bayer scaling
+ is NOT considered, because the bad pixel is subtle.
+ When some large filter is used in the future,
+ we need to consider the bad pixel.
+
+ Currently, when DPC&BNR is processed between bayer scaling and
+ shading correction, DPC&BNR moves each color plane to
+ right/bottom directions by 1 pixel.
+ */
+ bad_bqs_on_left_after_bs = 0;
+ bad_bqs_on_top_after_bs = 0;
+
+ /* Calculate the origin of bayer (real sensor data area)
+ located on the shading table during the shading correction. */
+ res->sc_bayer_origin_x_bqs_on_shading_table =
+ ((left_padding_adjusted_bqs + bad_bqs_on_left_before_bs)
+ * bds.denominator + bds.numerator / 2) / bds.numerator
+ + bad_bqs_on_left_after_bs;
+ /* "+ bds.numerator / 2": rounding for division by bds.numerator */
+ res->sc_bayer_origin_y_bqs_on_shading_table =
+ (bad_bqs_on_top_before_bs * bds.denominator + bds.numerator / 2) / bds.numerator
+ + bad_bqs_on_top_after_bs;
+ /* "+ bds.numerator / 2": rounding for division by bds.numerator */
+
+ res->bayer_scale_hor_ratio_in = bds.numerator;
+ res->bayer_scale_hor_ratio_out = bds.denominator;
+ res->bayer_scale_ver_ratio_in = bds.numerator;
+ res->bayer_scale_ver_ratio_out = bds.denominator;
+
+ return err;
+}
+
+/* Get the shading information of Shading Correction Type 1. */
+static int
+binary_get_shading_info_type_1(const struct ia_css_binary *binary, /* [in] */
+ unsigned int required_bds_factor, /* [in] */
+ const struct ia_css_stream_config *stream_config, /* [in] */
+ struct ia_css_shading_info *info) /* [out] */
+{
+ int err;
+ struct sh_css_shading_table_bayer_origin_compute_results res;
+
+ assert(binary);
+ assert(info);
+
+ info->type = IA_CSS_SHADING_CORRECTION_TYPE_1;
+
+ info->info.type_1.enable = binary->info->sp.enable.sc;
+ info->info.type_1.num_hor_grids = binary->sctbl_width_per_color;
+ info->info.type_1.num_ver_grids = binary->sctbl_height;
+ info->info.type_1.bqs_per_grid_cell = (1 << binary->deci_factor_log2);
+
+ /* Initialize by default values. */
+ info->info.type_1.bayer_scale_hor_ratio_in = 1;
+ info->info.type_1.bayer_scale_hor_ratio_out = 1;
+ info->info.type_1.bayer_scale_ver_ratio_in = 1;
+ info->info.type_1.bayer_scale_ver_ratio_out = 1;
+ info->info.type_1.sc_bayer_origin_x_bqs_on_shading_table = 0;
+ info->info.type_1.sc_bayer_origin_y_bqs_on_shading_table = 0;
+
+ err = ia_css_binary_compute_shading_table_bayer_origin(
+ binary,
+ required_bds_factor,
+ stream_config,
+ &res);
+ if (err)
+ return err;
+
+ info->info.type_1.bayer_scale_hor_ratio_in = res.bayer_scale_hor_ratio_in;
+ info->info.type_1.bayer_scale_hor_ratio_out = res.bayer_scale_hor_ratio_out;
+ info->info.type_1.bayer_scale_ver_ratio_in = res.bayer_scale_ver_ratio_in;
+ info->info.type_1.bayer_scale_ver_ratio_out = res.bayer_scale_ver_ratio_out;
+ info->info.type_1.sc_bayer_origin_x_bqs_on_shading_table = res.sc_bayer_origin_x_bqs_on_shading_table;
+ info->info.type_1.sc_bayer_origin_y_bqs_on_shading_table = res.sc_bayer_origin_y_bqs_on_shading_table;
+
+ return err;
+}
+
+
+int
+ia_css_binary_get_shading_info(const struct ia_css_binary *binary, /* [in] */
+ enum ia_css_shading_correction_type type, /* [in] */
+ unsigned int required_bds_factor, /* [in] */
+ const struct ia_css_stream_config *stream_config, /* [in] */
+ struct ia_css_shading_info *shading_info, /* [out] */
+ struct ia_css_pipe_config *pipe_config) /* [out] */
+{
+ int err;
+
+ assert(binary);
+ assert(shading_info);
+
+ IA_CSS_ENTER_PRIVATE("binary=%p, type=%d, required_bds_factor=%d, stream_config=%p",
+ binary, type, required_bds_factor, stream_config);
+
+ if (type == IA_CSS_SHADING_CORRECTION_TYPE_1)
+ err = binary_get_shading_info_type_1(binary,
+ required_bds_factor,
+ stream_config,
+ shading_info);
+ else
+ err = -ENOTSUPP;
+
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+}
+
+static void sh_css_binary_common_grid_info(const struct ia_css_binary *binary,
+ struct ia_css_grid_info *info)
+{
+ assert(binary);
+ assert(info);
+
+ info->isp_in_width = binary->internal_frame_info.res.width;
+ info->isp_in_height = binary->internal_frame_info.res.height;
+
+ info->vamem_type = IA_CSS_VAMEM_TYPE_2;
+}
+
+void
+ia_css_binary_dvs_grid_info(const struct ia_css_binary *binary,
+ struct ia_css_grid_info *info,
+ struct ia_css_pipe *pipe)
+{
+ struct ia_css_dvs_grid_info *dvs_info;
+
+ (void)pipe;
+ assert(binary);
+ assert(info);
+
+ dvs_info = &info->dvs_grid.dvs_grid_info;
+
+ /*
+ * For DIS, we use a division instead of a DIV_ROUND_UP(). If this is smaller
+ * than the 3a grid size, it indicates that the outer values are not
+ * valid for DIS.
+ */
+ dvs_info->enable = binary->info->sp.enable.dis;
+ dvs_info->width = binary->dis.grid.dim.width;
+ dvs_info->height = binary->dis.grid.dim.height;
+ dvs_info->aligned_width = binary->dis.grid.pad.width;
+ dvs_info->aligned_height = binary->dis.grid.pad.height;
+ dvs_info->bqs_per_grid_cell = 1 << binary->dis.deci_factor_log2;
+ dvs_info->num_hor_coefs = binary->dis.coef.dim.width;
+ dvs_info->num_ver_coefs = binary->dis.coef.dim.height;
+
+ sh_css_binary_common_grid_info(binary, info);
+}
+
+void
+ia_css_binary_dvs_stat_grid_info(
+ const struct ia_css_binary *binary,
+ struct ia_css_grid_info *info,
+ struct ia_css_pipe *pipe)
+{
+ (void)pipe;
+ sh_css_binary_common_grid_info(binary, info);
+ return;
+}
+
+int
+ia_css_binary_3a_grid_info(const struct ia_css_binary *binary,
+ struct ia_css_grid_info *info,
+ struct ia_css_pipe *pipe) {
+ struct ia_css_3a_grid_info *s3a_info;
+ int err = 0;
+
+ IA_CSS_ENTER_PRIVATE("binary=%p, info=%p, pipe=%p",
+ binary, info, pipe);
+
+ assert(binary);
+ assert(info);
+ s3a_info = &info->s3a_grid;
+
+ /* 3A statistics grid */
+ s3a_info->enable = binary->info->sp.enable.s3a;
+ s3a_info->width = binary->s3atbl_width;
+ s3a_info->height = binary->s3atbl_height;
+ s3a_info->aligned_width = binary->s3atbl_isp_width;
+ s3a_info->aligned_height = binary->s3atbl_isp_height;
+ s3a_info->bqs_per_grid_cell = (1 << binary->deci_factor_log2);
+ s3a_info->deci_factor_log2 = binary->deci_factor_log2;
+ s3a_info->elem_bit_depth = SH_CSS_BAYER_BITS;
+ s3a_info->use_dmem = binary->info->sp.s3a.s3atbl_use_dmem;
+ s3a_info->has_histogram = 0;
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+}
+
+static void
+binary_init_pc_histogram(struct sh_css_pc_histogram *histo)
+{
+ assert(histo);
+
+ histo->length = 0;
+ histo->run = NULL;
+ histo->stall = NULL;
+}
+
+static void
+binary_init_metrics(struct sh_css_binary_metrics *metrics,
+ const struct ia_css_binary_info *info)
+{
+ assert(metrics);
+ assert(info);
+
+ metrics->mode = info->pipeline.mode;
+ metrics->id = info->id;
+ metrics->next = NULL;
+ binary_init_pc_histogram(&metrics->isp_histogram);
+ binary_init_pc_histogram(&metrics->sp_histogram);
+}
+
+/* move to host part of output module */
+static bool
+binary_supports_output_format(const struct ia_css_binary_xinfo *info,
+ enum ia_css_frame_format format)
+{
+ int i;
+
+ assert(info);
+
+ for (i = 0; i < info->num_output_formats; i++) {
+ if (info->output_formats[i] == format)
+ return true;
+ }
+ return false;
+}
+
+static bool
+binary_supports_vf_format(const struct ia_css_binary_xinfo *info,
+ enum ia_css_frame_format format)
+{
+ int i;
+
+ assert(info);
+
+ for (i = 0; i < info->num_vf_formats; i++) {
+ if (info->vf_formats[i] == format)
+ return true;
+ }
+ return false;
+}
+
+/* move to host part of bds module */
+static bool
+supports_bds_factor(u32 supported_factors,
+ uint32_t bds_factor)
+{
+ return ((supported_factors & PACK_BDS_FACTOR(bds_factor)) != 0);
+}
+
+static int
+binary_init_info(struct ia_css_binary_xinfo *info, unsigned int i,
+ bool *binary_found) {
+ const unsigned char *blob = sh_css_blob_info[i].blob;
+ unsigned int size = sh_css_blob_info[i].header.blob.size;
+
+ if ((!info) || (!binary_found))
+ return -EINVAL;
+
+ *info = sh_css_blob_info[i].header.info.isp;
+ *binary_found = blob;
+ info->blob_index = i;
+ /* we don't have this binary, skip it */
+ if (!size)
+ return 0;
+
+ info->xmem_addr = sh_css_load_blob(blob, size);
+ if (!info->xmem_addr)
+ return -ENOMEM;
+ return 0;
+}
+
+/* When binaries are put at the beginning, they will only
+ * be selected if no other primary matches.
+ */
+int
+ia_css_binary_init_infos(void) {
+ unsigned int i;
+ unsigned int num_of_isp_binaries = sh_css_num_binaries - NUM_OF_SPS - NUM_OF_BLS;
+
+ if (num_of_isp_binaries == 0)
+ return 0;
+
+ all_binaries = kvmalloc(num_of_isp_binaries * sizeof(*all_binaries),
+ GFP_KERNEL);
+ if (!all_binaries)
+ return -ENOMEM;
+
+ for (i = 0; i < num_of_isp_binaries; i++)
+ {
+ int ret;
+ struct ia_css_binary_xinfo *binary = &all_binaries[i];
+ bool binary_found;
+
+ ret = binary_init_info(binary, i, &binary_found);
+ if (ret)
+ return ret;
+ if (!binary_found)
+ continue;
+ /* Prepend new binary information */
+ binary->next = binary_infos[binary->sp.pipeline.mode];
+ binary_infos[binary->sp.pipeline.mode] = binary;
+ binary->blob = &sh_css_blob_info[i];
+ binary->mem_offsets = sh_css_blob_info[i].mem_offsets;
+ }
+ return 0;
+}
+
+int
+ia_css_binary_uninit(void) {
+ unsigned int i;
+ struct ia_css_binary_xinfo *b;
+
+ for (i = 0; i < IA_CSS_BINARY_NUM_MODES; i++)
+ {
+ for (b = binary_infos[i]; b; b = b->next) {
+ if (b->xmem_addr)
+ hmm_free(b->xmem_addr);
+ b->xmem_addr = mmgr_NULL;
+ }
+ binary_infos[i] = NULL;
+ }
+ kvfree(all_binaries);
+ return 0;
+}
+
+/* @brief Compute decimation factor for 3A statistics and shading correction.
+ *
+ * @param[in] width Frame width in pixels.
+ * @param[in] height Frame height in pixels.
+ * @return Log2 of decimation factor (= grid cell size) in bayer quads.
+ */
+static int
+binary_grid_deci_factor_log2(int width, int height)
+{
+ /* 3A/Shading decimation factor specification (at August 2008)
+ * ------------------------------------------------------------------
+ * [Image Width (BQ)] [Decimation Factor (BQ)] [Resulting grid cells]
+ * 1280 ?c 32 40 ?c
+ * 640 ?c 1279 16 40 ?c 80
+ * ?c 639 8 ?c 80
+ * ------------------------------------------------------------------
+ */
+ /* Maximum and minimum decimation factor by the specification */
+#define MAX_SPEC_DECI_FACT_LOG2 5
+#define MIN_SPEC_DECI_FACT_LOG2 3
+ /* the smallest frame width in bayer quads when decimation factor (log2) is 5 or 4, by the specification */
+#define DECI_FACT_LOG2_5_SMALLEST_FRAME_WIDTH_BQ 1280
+#define DECI_FACT_LOG2_4_SMALLEST_FRAME_WIDTH_BQ 640
+
+ int smallest_factor; /* the smallest factor (log2) where the number of cells does not exceed the limitation */
+ int spec_factor; /* the factor (log2) which satisfies the specification */
+
+ /* Currently supported maximum width and height are 5120(=80*64) and 3840(=60*64). */
+ assert(ISP_BQ_GRID_WIDTH(width,
+ MAX_SPEC_DECI_FACT_LOG2) <= SH_CSS_MAX_BQ_GRID_WIDTH);
+ assert(ISP_BQ_GRID_HEIGHT(height,
+ MAX_SPEC_DECI_FACT_LOG2) <= SH_CSS_MAX_BQ_GRID_HEIGHT);
+
+ /* Compute the smallest factor. */
+ smallest_factor = MAX_SPEC_DECI_FACT_LOG2;
+ while (ISP_BQ_GRID_WIDTH(width,
+ smallest_factor - 1) <= SH_CSS_MAX_BQ_GRID_WIDTH &&
+ ISP_BQ_GRID_HEIGHT(height, smallest_factor - 1) <= SH_CSS_MAX_BQ_GRID_HEIGHT
+ && smallest_factor > MIN_SPEC_DECI_FACT_LOG2)
+ smallest_factor--;
+
+ /* Get the factor by the specification. */
+ if (_ISP_BQS(width) >= DECI_FACT_LOG2_5_SMALLEST_FRAME_WIDTH_BQ)
+ spec_factor = 5;
+ else if (_ISP_BQS(width) >= DECI_FACT_LOG2_4_SMALLEST_FRAME_WIDTH_BQ)
+ spec_factor = 4;
+ else
+ spec_factor = 3;
+
+ /* If smallest_factor is smaller than or equal to spec_factor, choose spec_factor to follow the specification.
+ If smallest_factor is larger than spec_factor, choose smallest_factor.
+
+ ex. width=2560, height=1920
+ smallest_factor=4, spec_factor=5
+ smallest_factor < spec_factor -> return spec_factor
+
+ ex. width=300, height=3000
+ smallest_factor=5, spec_factor=3
+ smallest_factor > spec_factor -> return smallest_factor
+ */
+ return max(smallest_factor, spec_factor);
+
+#undef MAX_SPEC_DECI_FACT_LOG2
+#undef MIN_SPEC_DECI_FACT_LOG2
+#undef DECI_FACT_LOG2_5_SMALLEST_FRAME_WIDTH_BQ
+#undef DECI_FACT_LOG2_4_SMALLEST_FRAME_WIDTH_BQ
+}
+
+static int
+binary_in_frame_padded_width(int in_frame_width,
+ int isp_internal_width,
+ int dvs_env_width,
+ int stream_config_left_padding,
+ int left_cropping,
+ bool need_scaling)
+{
+ int rval;
+ int nr_of_left_paddings; /* number of paddings pixels on the left of an image line */
+
+ if (IS_ISP2401) {
+ /* the output image line of Input System 2401 does not have the left paddings */
+ nr_of_left_paddings = 0;
+ } else {
+ /* in other cases, the left padding pixels are always 128 */
+ nr_of_left_paddings = 2 * ISP_VEC_NELEMS;
+ }
+
+ if (need_scaling) {
+ /* In SDV use-case, we need to match left-padding of
+ * primary and the video binary. */
+ if (stream_config_left_padding != -1) {
+ /* Different than before, we do left&right padding. */
+ rval =
+ CEIL_MUL(in_frame_width + nr_of_left_paddings,
+ 2 * ISP_VEC_NELEMS);
+ } else {
+ /* Different than before, we do left&right padding. */
+ in_frame_width += dvs_env_width;
+ rval =
+ CEIL_MUL(in_frame_width +
+ (left_cropping ? nr_of_left_paddings : 0),
+ 2 * ISP_VEC_NELEMS);
+ }
+ } else {
+ rval = isp_internal_width;
+ }
+
+ return rval;
+}
+
+int
+ia_css_binary_fill_info(const struct ia_css_binary_xinfo *xinfo,
+ bool online,
+ bool two_ppc,
+ enum atomisp_input_format stream_format,
+ const struct ia_css_frame_info *in_info, /* can be NULL */
+ const struct ia_css_frame_info *bds_out_info, /* can be NULL */
+ const struct ia_css_frame_info *out_info[], /* can be NULL */
+ const struct ia_css_frame_info *vf_info, /* can be NULL */
+ struct ia_css_binary *binary,
+ struct ia_css_resolution *dvs_env,
+ int stream_config_left_padding,
+ bool accelerator) {
+ const struct ia_css_binary_info *info = &xinfo->sp;
+ unsigned int dvs_env_width = 0,
+ dvs_env_height = 0,
+ vf_log_ds = 0,
+ s3a_log_deci = 0,
+ bits_per_pixel = 0,
+ /* Resolution at SC/3A/DIS kernel. */
+ sc_3a_dis_width = 0,
+ /* Resolution at SC/3A/DIS kernel. */
+ sc_3a_dis_padded_width = 0,
+ /* Resolution at SC/3A/DIS kernel. */
+ sc_3a_dis_height = 0,
+ isp_internal_width = 0,
+ isp_internal_height = 0,
+ s3a_isp_width = 0;
+
+ bool need_scaling = false;
+ struct ia_css_resolution binary_dvs_env, internal_res;
+ int err;
+ unsigned int i;
+ const struct ia_css_frame_info *bin_out_info = NULL;
+
+ assert(info);
+ assert(binary);
+
+ binary->info = xinfo;
+ if (!accelerator)
+ {
+ /* binary->css_params has been filled by accelerator itself. */
+ err = ia_css_isp_param_allocate_isp_parameters(
+ &binary->mem_params, &binary->css_params,
+ &info->mem_initializers);
+ if (err) {
+ return err;
+ }
+ }
+ for (i = 0; i < IA_CSS_BINARY_MAX_OUTPUT_PORTS; i++)
+ {
+ if (out_info[i] && (out_info[i]->res.width != 0)) {
+ bin_out_info = out_info[i];
+ break;
+ }
+ }
+ if (in_info && bin_out_info)
+ {
+ need_scaling = (in_info->res.width != bin_out_info->res.width) ||
+ (in_info->res.height != bin_out_info->res.height);
+ }
+
+ /* binary_dvs_env has to be equal or larger than SH_CSS_MIN_DVS_ENVELOPE */
+ binary_dvs_env.width = 0;
+ binary_dvs_env.height = 0;
+ ia_css_binary_dvs_env(info, dvs_env, &binary_dvs_env);
+ dvs_env_width = binary_dvs_env.width;
+ dvs_env_height = binary_dvs_env.height;
+ binary->dvs_envelope.width = dvs_env_width;
+ binary->dvs_envelope.height = dvs_env_height;
+
+ /* internal resolution calculation */
+ internal_res.width = 0;
+ internal_res.height = 0;
+ ia_css_binary_internal_res(in_info, bds_out_info, bin_out_info, dvs_env,
+ info, &internal_res);
+ isp_internal_width = internal_res.width;
+ isp_internal_height = internal_res.height;
+
+ /* internal frame info */
+ if (bin_out_info) /* { */
+ binary->internal_frame_info.format = bin_out_info->format;
+ /* } */
+ binary->internal_frame_info.res.width = isp_internal_width;
+ binary->internal_frame_info.padded_width = CEIL_MUL(isp_internal_width, 2 * ISP_VEC_NELEMS);
+ binary->internal_frame_info.res.height = isp_internal_height;
+ binary->internal_frame_info.raw_bit_depth = bits_per_pixel;
+
+ if (in_info)
+ {
+ binary->effective_in_frame_res.width = in_info->res.width;
+ binary->effective_in_frame_res.height = in_info->res.height;
+
+ bits_per_pixel = in_info->raw_bit_depth;
+
+ /* input info */
+ binary->in_frame_info.res.width = in_info->res.width +
+ info->pipeline.left_cropping;
+ binary->in_frame_info.res.height = in_info->res.height +
+ info->pipeline.top_cropping;
+
+ binary->in_frame_info.res.width += dvs_env_width;
+ binary->in_frame_info.res.height += dvs_env_height;
+
+ binary->in_frame_info.padded_width =
+ binary_in_frame_padded_width(in_info->res.width,
+ isp_internal_width,
+ dvs_env_width,
+ stream_config_left_padding,
+ info->pipeline.left_cropping,
+ need_scaling);
+
+ binary->in_frame_info.format = in_info->format;
+ binary->in_frame_info.raw_bayer_order = in_info->raw_bayer_order;
+ binary->in_frame_info.crop_info = in_info->crop_info;
+ }
+
+ if (online)
+ {
+ bits_per_pixel = ia_css_util_input_format_bpp(
+ stream_format, two_ppc);
+ }
+ binary->in_frame_info.raw_bit_depth = bits_per_pixel;
+
+ for (i = 0; i < IA_CSS_BINARY_MAX_OUTPUT_PORTS; i++)
+ {
+ if (out_info[i]) {
+ binary->out_frame_info[i].res.width = out_info[i]->res.width;
+ binary->out_frame_info[i].res.height = out_info[i]->res.height;
+ binary->out_frame_info[i].padded_width = out_info[i]->padded_width;
+ if (info->pipeline.mode == IA_CSS_BINARY_MODE_COPY) {
+ binary->out_frame_info[i].raw_bit_depth = bits_per_pixel;
+ } else {
+ /* Only relevant for RAW format.
+ * At the moment, all outputs are raw, 16 bit per pixel, except for copy.
+ * To do this cleanly, the binary should specify in its info
+ * the bit depth per output channel.
+ */
+ binary->out_frame_info[i].raw_bit_depth = 16;
+ }
+ binary->out_frame_info[i].format = out_info[i]->format;
+ }
+ }
+
+ if (vf_info && (vf_info->res.width != 0))
+ {
+ err = ia_css_vf_configure(binary, bin_out_info,
+ (struct ia_css_frame_info *)vf_info, &vf_log_ds);
+ if (err) {
+ if (!accelerator) {
+ ia_css_isp_param_destroy_isp_parameters(
+ &binary->mem_params,
+ &binary->css_params);
+ }
+ return err;
+ }
+ }
+ binary->vf_downscale_log2 = vf_log_ds;
+
+ binary->online = online;
+ binary->input_format = stream_format;
+
+ /* viewfinder output info */
+ if ((vf_info) && (vf_info->res.width != 0))
+ {
+ unsigned int vf_out_vecs, vf_out_width, vf_out_height;
+
+ binary->vf_frame_info.format = vf_info->format;
+ if (!bin_out_info)
+ return -EINVAL;
+ vf_out_vecs = __ISP_VF_OUTPUT_WIDTH_VECS(bin_out_info->padded_width,
+ vf_log_ds);
+ vf_out_width = _ISP_VF_OUTPUT_WIDTH(vf_out_vecs);
+ vf_out_height = _ISP_VF_OUTPUT_HEIGHT(bin_out_info->res.height,
+ vf_log_ds);
+
+ /* For preview mode, output pin is used instead of vf. */
+ if (info->pipeline.mode == IA_CSS_BINARY_MODE_PREVIEW) {
+ binary->out_frame_info[0].res.width =
+ (bin_out_info->res.width >> vf_log_ds);
+ binary->out_frame_info[0].padded_width = vf_out_width;
+ binary->out_frame_info[0].res.height = vf_out_height;
+
+ binary->vf_frame_info.res.width = 0;
+ binary->vf_frame_info.padded_width = 0;
+ binary->vf_frame_info.res.height = 0;
+ } else {
+ /* we also store the raw downscaled width. This is
+ * used for digital zoom in preview to zoom only on
+ * the width that we actually want to keep, not on
+ * the aligned width. */
+ binary->vf_frame_info.res.width =
+ (bin_out_info->res.width >> vf_log_ds);
+ binary->vf_frame_info.padded_width = vf_out_width;
+ binary->vf_frame_info.res.height = vf_out_height;
+ }
+ } else
+ {
+ binary->vf_frame_info.res.width = 0;
+ binary->vf_frame_info.padded_width = 0;
+ binary->vf_frame_info.res.height = 0;
+ }
+
+ if (info->enable.ca_gdc)
+ {
+ binary->morph_tbl_width =
+ _ISP_MORPH_TABLE_WIDTH(isp_internal_width);
+ binary->morph_tbl_aligned_width =
+ _ISP_MORPH_TABLE_ALIGNED_WIDTH(isp_internal_width);
+ binary->morph_tbl_height =
+ _ISP_MORPH_TABLE_HEIGHT(isp_internal_height);
+ } else
+ {
+ binary->morph_tbl_width = 0;
+ binary->morph_tbl_aligned_width = 0;
+ binary->morph_tbl_height = 0;
+ }
+
+ sc_3a_dis_width = binary->in_frame_info.res.width;
+ sc_3a_dis_padded_width = binary->in_frame_info.padded_width;
+ sc_3a_dis_height = binary->in_frame_info.res.height;
+ if (bds_out_info && in_info &&
+ bds_out_info->res.width != in_info->res.width)
+ {
+ /* TODO: Next, "internal_frame_info" should be derived from
+ * bds_out. So this part will change once it is in place! */
+ sc_3a_dis_width = bds_out_info->res.width + info->pipeline.left_cropping;
+ sc_3a_dis_padded_width = isp_internal_width;
+ sc_3a_dis_height = isp_internal_height;
+ }
+
+ s3a_isp_width = _ISP_S3A_ELEMS_ISP_WIDTH(sc_3a_dis_padded_width,
+ info->pipeline.left_cropping);
+ if (info->s3a.fixed_s3a_deci_log)
+ {
+ s3a_log_deci = info->s3a.fixed_s3a_deci_log;
+ } else
+ {
+ s3a_log_deci = binary_grid_deci_factor_log2(s3a_isp_width,
+ sc_3a_dis_height);
+ }
+ binary->deci_factor_log2 = s3a_log_deci;
+
+ if (info->enable.s3a)
+ {
+ binary->s3atbl_width =
+ _ISP_S3ATBL_WIDTH(sc_3a_dis_width,
+ s3a_log_deci);
+ binary->s3atbl_height =
+ _ISP_S3ATBL_HEIGHT(sc_3a_dis_height,
+ s3a_log_deci);
+ binary->s3atbl_isp_width =
+ _ISP_S3ATBL_ISP_WIDTH(s3a_isp_width,
+ s3a_log_deci);
+ binary->s3atbl_isp_height =
+ _ISP_S3ATBL_ISP_HEIGHT(sc_3a_dis_height,
+ s3a_log_deci);
+ } else
+ {
+ binary->s3atbl_width = 0;
+ binary->s3atbl_height = 0;
+ binary->s3atbl_isp_width = 0;
+ binary->s3atbl_isp_height = 0;
+ }
+
+ if (info->enable.sc)
+ {
+ binary->sctbl_width_per_color = _ISP_SCTBL_WIDTH_PER_COLOR(sc_3a_dis_padded_width, s3a_log_deci);
+ binary->sctbl_aligned_width_per_color = SH_CSS_MAX_SCTBL_ALIGNED_WIDTH_PER_COLOR;
+ binary->sctbl_height = _ISP_SCTBL_HEIGHT(sc_3a_dis_height, s3a_log_deci);
+ } else
+ {
+ binary->sctbl_width_per_color = 0;
+ binary->sctbl_aligned_width_per_color = 0;
+ binary->sctbl_height = 0;
+ }
+ ia_css_sdis_init_info(&binary->dis,
+ sc_3a_dis_width,
+ sc_3a_dis_padded_width,
+ sc_3a_dis_height,
+ info->pipeline.isp_pipe_version,
+ info->enable.dis);
+ if (info->pipeline.left_cropping)
+ binary->left_padding = 2 * ISP_VEC_NELEMS - info->pipeline.left_cropping;
+ else
+ binary->left_padding = 0;
+
+ return 0;
+}
+
+int ia_css_binary_find(struct ia_css_binary_descr *descr, struct ia_css_binary *binary)
+{
+ int mode;
+ bool online;
+ bool two_ppc;
+ enum atomisp_input_format stream_format;
+ const struct ia_css_frame_info *req_in_info,
+ *req_bds_out_info,
+ *req_out_info[IA_CSS_BINARY_MAX_OUTPUT_PORTS],
+ *req_bin_out_info = NULL,
+ *req_vf_info;
+
+ struct ia_css_binary_xinfo *xcandidate;
+ bool need_ds, need_dz, need_dvs, need_xnr, need_dpc;
+ bool striped;
+ bool enable_yuv_ds;
+ bool enable_high_speed;
+ bool enable_dvs_6axis;
+ bool enable_reduced_pipe;
+ bool enable_capture_pp_bli;
+ int err = -EINVAL;
+ bool continuous;
+ unsigned int isp_pipe_version;
+ struct ia_css_resolution dvs_env, internal_res;
+ unsigned int i;
+
+ assert(descr);
+ /* MW: used after an error check, may accept NULL, but doubtful */
+ assert(binary);
+
+ dev_dbg(atomisp_dev, "ia_css_binary_find() enter: descr=%p, (mode=%d), binary=%p\n",
+ descr, descr->mode, binary);
+
+ mode = descr->mode;
+ online = descr->online;
+ two_ppc = descr->two_ppc;
+ stream_format = descr->stream_format;
+ req_in_info = descr->in_info;
+ req_bds_out_info = descr->bds_out_info;
+ for (i = 0; i < IA_CSS_BINARY_MAX_OUTPUT_PORTS; i++) {
+ req_out_info[i] = descr->out_info[i];
+ if (req_out_info[i] && (req_out_info[i]->res.width != 0))
+ req_bin_out_info = req_out_info[i];
+ }
+ if (!req_bin_out_info)
+ return -EINVAL;
+ req_vf_info = descr->vf_info;
+
+ need_xnr = descr->enable_xnr;
+ need_ds = descr->enable_fractional_ds;
+ need_dz = false;
+ need_dvs = false;
+ need_dpc = descr->enable_dpc;
+
+ enable_yuv_ds = descr->enable_yuv_ds;
+ enable_high_speed = descr->enable_high_speed;
+ enable_dvs_6axis = descr->enable_dvs_6axis;
+ enable_reduced_pipe = descr->enable_reduced_pipe;
+ enable_capture_pp_bli = descr->enable_capture_pp_bli;
+ continuous = descr->continuous;
+ striped = descr->striped;
+ isp_pipe_version = descr->isp_pipe_version;
+
+ dvs_env.width = 0;
+ dvs_env.height = 0;
+ internal_res.width = 0;
+ internal_res.height = 0;
+
+ if (mode == IA_CSS_BINARY_MODE_VIDEO) {
+ dvs_env = descr->dvs_env;
+ need_dz = descr->enable_dz;
+ /* Video is the only mode that has a nodz variant. */
+ need_dvs = dvs_env.width || dvs_env.height;
+ }
+
+ /* print a map of the binary file */
+ dev_dbg(atomisp_dev, "BINARY INFO:\n");
+ for (i = 0; i < IA_CSS_BINARY_NUM_MODES; i++) {
+ xcandidate = binary_infos[i];
+ if (xcandidate) {
+ dev_dbg(atomisp_dev, "%d:\n", i);
+ while (xcandidate) {
+ dev_dbg(atomisp_dev, " Name:%s Type:%d Cont:%d\n",
+ xcandidate->blob->name, xcandidate->type,
+ xcandidate->sp.enable.continuous);
+ xcandidate = xcandidate->next;
+ }
+ }
+ }
+
+ /* printf("sh_css_binary_find: pipe version %d\n", isp_pipe_version); */
+ for (xcandidate = binary_infos[mode]; xcandidate;
+ xcandidate = xcandidate->next) {
+ struct ia_css_binary_info *candidate = &xcandidate->sp;
+ /* printf("sh_css_binary_find: evaluating candidate:
+ * %d\n",candidate->id); */
+ dev_dbg(atomisp_dev,
+ "ia_css_binary_find() candidate = %p, mode = %d ID = %d\n",
+ candidate, candidate->pipeline.mode, candidate->id);
+
+ /*
+ * MW: Only a limited set of jointly configured binaries can
+ * be used in a continuous preview/video mode unless it is
+ * the copy mode and runs on SP.
+ */
+ if (!candidate->enable.continuous &&
+ continuous && (mode != IA_CSS_BINARY_MODE_COPY)) {
+ dev_dbg(atomisp_dev,
+ "ia_css_binary_find() [%d] continue: !%d && %d && (%d != %d)\n",
+ __LINE__, candidate->enable.continuous,
+ continuous, mode, IA_CSS_BINARY_MODE_COPY);
+ continue;
+ }
+ if (striped && candidate->iterator.num_stripes == 1) {
+ dev_dbg(atomisp_dev,
+ "ia_css_binary_find() [%d] continue: binary is not striped\n",
+ __LINE__);
+ continue;
+ }
+
+ if (candidate->pipeline.isp_pipe_version != isp_pipe_version &&
+ (mode != IA_CSS_BINARY_MODE_COPY) &&
+ (mode != IA_CSS_BINARY_MODE_CAPTURE_PP) &&
+ (mode != IA_CSS_BINARY_MODE_VF_PP)) {
+ dev_dbg(atomisp_dev, "ia_css_binary_find() [%d] continue: (%d != %d)\n",
+ __LINE__, candidate->pipeline.isp_pipe_version, isp_pipe_version);
+ continue;
+ }
+ if (!candidate->enable.reduced_pipe && enable_reduced_pipe) {
+ dev_dbg(atomisp_dev, "ia_css_binary_find() [%d] continue: !%d && %d\n",
+ __LINE__, candidate->enable.reduced_pipe, enable_reduced_pipe);
+ continue;
+ }
+ if (!candidate->enable.dvs_6axis && enable_dvs_6axis) {
+ dev_dbg(atomisp_dev, "ia_css_binary_find() [%d] continue: !%d && %d\n",
+ __LINE__, candidate->enable.dvs_6axis, enable_dvs_6axis);
+ continue;
+ }
+ if (candidate->enable.high_speed && !enable_high_speed) {
+ dev_dbg(atomisp_dev, "ia_css_binary_find() [%d] continue: %d && !%d\n",
+ __LINE__, candidate->enable.high_speed, enable_high_speed);
+ continue;
+ }
+ if (!candidate->enable.xnr && need_xnr) {
+ dev_dbg(atomisp_dev, "ia_css_binary_find() [%d] continue: %d && !%d\n",
+ __LINE__, candidate->enable.xnr, need_xnr);
+ continue;
+ }
+ if (!(candidate->enable.ds & 2) && enable_yuv_ds) {
+ dev_dbg(atomisp_dev, "ia_css_binary_find() [%d] continue: !%d && %d\n",
+ __LINE__, ((candidate->enable.ds & 2) != 0), enable_yuv_ds);
+ continue;
+ }
+ if ((candidate->enable.ds & 2) && !enable_yuv_ds) {
+ dev_dbg(atomisp_dev, "ia_css_binary_find() [%d] continue: %d && !%d\n",
+ __LINE__, ((candidate->enable.ds & 2) != 0), enable_yuv_ds);
+ continue;
+ }
+
+ if (mode == IA_CSS_BINARY_MODE_VIDEO &&
+ candidate->enable.ds && need_ds)
+ need_dz = false;
+
+ /* when we require vf output, we need to have vf_veceven */
+ if ((req_vf_info) && !(candidate->enable.vf_veceven ||
+ /* or variable vf vec even */
+ candidate->vf_dec.is_variable ||
+ /* or more than one output pin. */
+ xcandidate->num_output_pins > 1)) {
+ dev_dbg(atomisp_dev,
+ "ia_css_binary_find() [%d] continue: (%p != NULL) && !(%d || %d || (%d >%d))\n",
+ __LINE__, req_vf_info, candidate->enable.vf_veceven,
+ candidate->vf_dec.is_variable, xcandidate->num_output_pins, 1);
+ continue;
+ }
+ if (!candidate->enable.dvs_envelope && need_dvs) {
+ dev_dbg(atomisp_dev, "ia_css_binary_find() [%d] continue: !%d && %d\n",
+ __LINE__, candidate->enable.dvs_envelope, (int)need_dvs);
+ continue;
+ }
+ /* internal_res check considers input, output, and dvs envelope sizes */
+ ia_css_binary_internal_res(req_in_info, req_bds_out_info,
+ req_bin_out_info, &dvs_env, candidate, &internal_res);
+ if (internal_res.width > candidate->internal.max_width) {
+ dev_dbg(atomisp_dev, "ia_css_binary_find() [%d] continue: (%d > %d)\n",
+ __LINE__, internal_res.width, candidate->internal.max_width);
+ continue;
+ }
+ if (internal_res.height > candidate->internal.max_height) {
+ dev_dbg(atomisp_dev, "ia_css_binary_find() [%d] continue: (%d > %d)\n",
+ __LINE__, internal_res.height, candidate->internal.max_height);
+ continue;
+ }
+ if (!candidate->enable.ds && need_ds && !(xcandidate->num_output_pins > 1)) {
+ dev_dbg(atomisp_dev, "ia_css_binary_find() [%d] continue: !%d && %d\n",
+ __LINE__, candidate->enable.ds, (int)need_ds);
+ continue;
+ }
+ if (!candidate->enable.uds && !candidate->enable.dvs_6axis && need_dz) {
+ dev_dbg(atomisp_dev,
+ "ia_css_binary_find() [%d] continue: !%d && !%d && %d\n",
+ __LINE__, candidate->enable.uds, candidate->enable.dvs_6axis,
+ (int)need_dz);
+ continue;
+ }
+ if (online && candidate->input.source == IA_CSS_BINARY_INPUT_MEMORY) {
+ dev_dbg(atomisp_dev,
+ "ia_css_binary_find() [%d] continue: %d && (%d == %d)\n",
+ __LINE__, online, candidate->input.source,
+ IA_CSS_BINARY_INPUT_MEMORY);
+ continue;
+ }
+ if (!online && candidate->input.source == IA_CSS_BINARY_INPUT_SENSOR) {
+ dev_dbg(atomisp_dev,
+ "ia_css_binary_find() [%d] continue: !%d && (%d == %d)\n",
+ __LINE__, online, candidate->input.source,
+ IA_CSS_BINARY_INPUT_SENSOR);
+ continue;
+ }
+ if (req_bin_out_info->res.width < candidate->output.min_width ||
+ req_bin_out_info->res.width > candidate->output.max_width) {
+ dev_dbg(atomisp_dev,
+ "ia_css_binary_find() [%d] continue: (%d > %d) || (%d < %d)\n",
+ __LINE__, req_bin_out_info->padded_width,
+ candidate->output.min_width, req_bin_out_info->padded_width,
+ candidate->output.max_width);
+ continue;
+ }
+ if (xcandidate->num_output_pins > 1 &&
+ /* in case we have a second output pin, */
+ req_vf_info) { /* and we need vf output. */
+ if (req_vf_info->res.width > candidate->output.max_width) {
+ dev_dbg(atomisp_dev,
+ "ia_css_binary_find() [%d] continue: (%d < %d)\n",
+ __LINE__, req_vf_info->res.width,
+ candidate->output.max_width);
+ continue;
+ }
+ }
+ if (req_in_info->padded_width > candidate->input.max_width) {
+ dev_dbg(atomisp_dev, "ia_css_binary_find() [%d] continue: (%d > %d)\n",
+ __LINE__, req_in_info->padded_width, candidate->input.max_width);
+ continue;
+ }
+ if (!binary_supports_output_format(xcandidate, req_bin_out_info->format)) {
+ dev_dbg(atomisp_dev, "ia_css_binary_find() [%d] continue: !%d\n",
+ __LINE__, binary_supports_output_format(xcandidate,
+ req_bin_out_info->format));
+ continue;
+ }
+ if (xcandidate->num_output_pins > 1 &&
+ /* in case we have a second output pin, */
+ req_vf_info && /* and we need vf output. */
+ /* check if the required vf format
+ is supported. */
+ !binary_supports_output_format(xcandidate, req_vf_info->format)) {
+ dev_dbg(atomisp_dev,
+ "ia_css_binary_find() [%d] continue: (%d > %d) && (%p != NULL) && !%d\n",
+ __LINE__, xcandidate->num_output_pins, 1, req_vf_info,
+ binary_supports_output_format(xcandidate, req_vf_info->format));
+ continue;
+ }
+
+ /* Check if vf_veceven supports the requested vf format */
+ if (xcandidate->num_output_pins == 1 &&
+ req_vf_info && candidate->enable.vf_veceven &&
+ !binary_supports_vf_format(xcandidate, req_vf_info->format)) {
+ dev_dbg(atomisp_dev,
+ "ia_css_binary_find() [%d] continue: (%d == %d) && (%p != NULL) && %d && !%d\n",
+ __LINE__, xcandidate->num_output_pins, 1,
+ req_vf_info, candidate->enable.vf_veceven,
+ binary_supports_vf_format(xcandidate, req_vf_info->format));
+ continue;
+ }
+
+ /* Check if vf_veceven supports the requested vf width */
+ if (xcandidate->num_output_pins == 1 &&
+ req_vf_info && candidate->enable.vf_veceven) { /* and we need vf output. */
+ if (req_vf_info->res.width > candidate->output.max_width) {
+ dev_dbg(atomisp_dev,
+ "ia_css_binary_find() [%d] continue: (%d < %d)\n",
+ __LINE__, req_vf_info->res.width,
+ candidate->output.max_width);
+ continue;
+ }
+ }
+
+ if (!supports_bds_factor(candidate->bds.supported_bds_factors,
+ descr->required_bds_factor)) {
+ dev_dbg(atomisp_dev, "ia_css_binary_find() [%d] continue: 0x%x & 0x%x)\n",
+ __LINE__, candidate->bds.supported_bds_factors,
+ descr->required_bds_factor);
+ continue;
+ }
+
+ if (!candidate->enable.dpc && need_dpc) {
+ dev_dbg(atomisp_dev, "ia_css_binary_find() [%d] continue: 0x%x & 0x%x)\n",
+ __LINE__, candidate->enable.dpc, descr->enable_dpc);
+ continue;
+ }
+
+ if (candidate->uds.use_bci && enable_capture_pp_bli) {
+ dev_dbg(atomisp_dev, "ia_css_binary_find() [%d] continue: 0x%x & 0x%x)\n",
+ __LINE__, candidate->uds.use_bci, descr->enable_capture_pp_bli);
+ continue;
+ }
+
+ /* reconfigure any variable properties of the binary */
+ err = ia_css_binary_fill_info(xcandidate, online, two_ppc,
+ stream_format, req_in_info,
+ req_bds_out_info,
+ req_out_info, req_vf_info,
+ binary, &dvs_env,
+ descr->stream_config_left_padding,
+ false);
+
+ if (err)
+ break;
+ binary_init_metrics(&binary->metrics, &binary->info->sp);
+ break;
+ }
+
+ if (!err && xcandidate)
+ dev_dbg(atomisp_dev, "Using binary %s (id %d), type %d, mode %d, continuous %s\n",
+ xcandidate->blob->name, xcandidate->sp.id, xcandidate->type,
+ xcandidate->sp.pipeline.mode,
+ xcandidate->sp.enable.continuous ? "true" : "false");
+
+ if (err)
+ dev_err(atomisp_dev, "Failed to find a firmware binary matching the pipeline parameters\n");
+
+ return err;
+}
+
+unsigned
+ia_css_binary_max_vf_width(void)
+{
+ /* This is (should be) true for IPU1 and IPU2 */
+ /* For IPU3 (SkyCam) this pointer is guaranteed to be NULL simply because such a binary does not exist */
+ if (binary_infos[IA_CSS_BINARY_MODE_VF_PP])
+ return binary_infos[IA_CSS_BINARY_MODE_VF_PP]->sp.output.max_width;
+ return 0;
+}
+
+void
+ia_css_binary_destroy_isp_parameters(struct ia_css_binary *binary)
+{
+ if (binary) {
+ ia_css_isp_param_destroy_isp_parameters(&binary->mem_params,
+ &binary->css_params);
+ }
+}
+
+void
+ia_css_binary_get_isp_binaries(struct ia_css_binary_xinfo **binaries,
+ uint32_t *num_isp_binaries)
+{
+ assert(binaries);
+
+ if (num_isp_binaries)
+ *num_isp_binaries = 0;
+
+ *binaries = all_binaries;
+ if (all_binaries && num_isp_binaries) {
+ /* -1 to account for sp binary which is not stored in all_binaries */
+ if (sh_css_num_binaries > 0)
+ *num_isp_binaries = sh_css_num_binaries - 1;
+ }
+}
diff --git a/drivers/staging/media/atomisp/pci/runtime/bufq/interface/ia_css_bufq.h b/drivers/staging/media/atomisp/pci/runtime/bufq/interface/ia_css_bufq.h
new file mode 100644
index 000000000000..3e7dadca6d70
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/runtime/bufq/interface/ia_css_bufq.h
@@ -0,0 +1,169 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010 - 2015, Intel Corporation.
+ */
+
+#ifndef _IA_CSS_BUFQ_H
+#define _IA_CSS_BUFQ_H
+
+#include <type_support.h>
+#include "ia_css_bufq_comm.h"
+#include "ia_css_buffer.h"
+#include "ia_css_err.h"
+#define BUFQ_EVENT_SIZE 4
+
+/**
+ * @brief Query the internal frame ID.
+ *
+ * @param[in] key The query key.
+ * @param[out] val The query value.
+ *
+ * @return
+ * true, if the query succeeds;
+ * false, if the query fails.
+ */
+bool ia_css_query_internal_queue_id(
+ enum ia_css_buffer_type buf_type,
+ unsigned int thread_id,
+ enum sh_css_queue_id *val
+);
+
+/**
+ * @brief Map buffer type to a internal queue id.
+ *
+ * @param[in] thread id Thread in which the buffer type has to be mapped or unmapped
+ * @param[in] buf_type buffer type.
+ * @param[in] map boolean flag to specify map or unmap
+ * @return none
+ */
+void ia_css_queue_map(
+ unsigned int thread_id,
+ enum ia_css_buffer_type buf_type,
+ bool map
+);
+
+/**
+ * @brief Initialize buffer type to a queue id mapping
+ * @return none
+ */
+void ia_css_queue_map_init(void);
+
+/**
+ * @brief initializes bufq module
+ * It create instances of
+ * -host to SP buffer queue which is a list with predefined size,
+ * MxN queues where M is the number threads and N is the number queues per thread
+ *-SP to host buffer queue , is a list with N queues
+ *-host to SP event communication queue
+ * -SP to host event communication queue
+ * -queue for tagger commands
+ * @return none
+ */
+void ia_css_bufq_init(void);
+
+/**
+* @brief Enqueues an item into host to SP buffer queue
+ *
+ * @param thread_index[in] Thread in which the item to be enqueued
+ *
+ * @param queue_id[in] Index of the queue in the specified thread
+ * @param item[in] Object to enqueue.
+ * @return 0 or error code upon error.
+ *
+*/
+int ia_css_bufq_enqueue_buffer(
+ int thread_index,
+ int queue_id,
+ uint32_t item);
+
+/**
+* @brief Dequeues an item from SP to host buffer queue.
+ *
+ * @param queue_id[in] Specifies the index of the queue in the list where
+ * the item has to be read.
+ * @paramitem [out] Object to be dequeued into this item.
+ * @return 0 or error code upon error.
+ *
+*/
+int ia_css_bufq_dequeue_buffer(
+ int queue_id,
+ uint32_t *item);
+
+/**
+* @brief Enqueue an event item into host to SP communication event queue.
+ *
+ * @param[in] evt_id The event ID.
+ * @param[in] evt_payload_0 The event payload.
+ * @param[in] evt_payload_1 The event payload.
+ * @param[in] evt_payload_2 The event payload.
+ * @return 0 or error code upon error.
+ *
+*/
+int ia_css_bufq_enqueue_psys_event(
+ u8 evt_id,
+ u8 evt_payload_0,
+ u8 evt_payload_1,
+ uint8_t evt_payload_2
+);
+
+/**
+ * @brief Dequeue an item from SP to host communication event queue.
+ *
+ * @param item Object to be dequeued into this item.
+ * @return 0 or error code upon error.
+ *
+*/
+int ia_css_bufq_dequeue_psys_event(
+ u8 item[BUFQ_EVENT_SIZE]
+
+);
+
+/**
+ * @brief Enqueue an event item into host to SP EOF event queue.
+ *
+ * @param[in] evt_id The event ID.
+ * @return 0 or error code upon error.
+ *
+ */
+int ia_css_bufq_enqueue_isys_event(
+ uint8_t evt_id);
+
+/**
+* @brief Dequeue an item from SP to host communication EOF event queue.
+
+ *
+ * @param item Object to be dequeued into this item.
+ * @return 0 or error code upon error.
+ *
+ */
+int ia_css_bufq_dequeue_isys_event(
+ u8 item[BUFQ_EVENT_SIZE]);
+
+/**
+* @brief Enqueue a tagger command item into tagger command queue..
+ *
+ * @param item Object to be enqueue.
+ * @return 0 or error code upon error.
+ *
+*/
+int ia_css_bufq_enqueue_tag_cmd(
+ uint32_t item);
+
+/**
+* @brief Uninitializes bufq module.
+ *
+ * @return 0 or error code upon error.
+ *
+*/
+int ia_css_bufq_deinit(void);
+
+/**
+* @brief Dump queue states
+ *
+ * @return None
+ *
+*/
+void ia_css_bufq_dump_queue_info(void);
+
+#endif /* _IA_CSS_BUFQ_H */
diff --git a/drivers/staging/media/atomisp/pci/runtime/bufq/interface/ia_css_bufq_comm.h b/drivers/staging/media/atomisp/pci/runtime/bufq/interface/ia_css_bufq_comm.h
new file mode 100644
index 000000000000..fb8067fd64a1
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/runtime/bufq/interface/ia_css_bufq_comm.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef _IA_CSS_BUFQ_COMM_H
+#define _IA_CSS_BUFQ_COMM_H
+
+#include "system_global.h"
+
+enum sh_css_queue_id {
+ SH_CSS_INVALID_QUEUE_ID = -1,
+ SH_CSS_QUEUE_A_ID = 0,
+ SH_CSS_QUEUE_B_ID,
+ SH_CSS_QUEUE_C_ID,
+ SH_CSS_QUEUE_D_ID,
+ SH_CSS_QUEUE_E_ID,
+ SH_CSS_QUEUE_F_ID,
+ SH_CSS_QUEUE_G_ID,
+ SH_CSS_QUEUE_H_ID, /* for metadata */
+
+#define SH_CSS_MAX_NUM_QUEUES (SH_CSS_QUEUE_H_ID + 1)
+
+};
+
+#define SH_CSS_MAX_DYNAMIC_BUFFERS_PER_THREAD SH_CSS_MAX_NUM_QUEUES
+/* for now we staticaly assign queue 0 & 1 to parameter sets */
+#define IA_CSS_PARAMETER_SET_QUEUE_ID SH_CSS_QUEUE_A_ID
+#define IA_CSS_PER_FRAME_PARAMETER_SET_QUEUE_ID SH_CSS_QUEUE_B_ID
+
+#endif
diff --git a/drivers/staging/media/atomisp/pci/runtime/bufq/src/bufq.c b/drivers/staging/media/atomisp/pci/runtime/bufq/src/bufq.c
new file mode 100644
index 000000000000..0f0d16f4ce7c
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/runtime/bufq/src/bufq.c
@@ -0,0 +1,523 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#include "assert_support.h" /* assert */
+#include "ia_css_buffer.h"
+#include "sp.h"
+#include "ia_css_bufq.h" /* Bufq API's */
+#include "ia_css_queue.h" /* ia_css_queue_t */
+#include "sw_event_global.h" /* Event IDs.*/
+#include "ia_css_eventq.h" /* ia_css_eventq_recv()*/
+#include "ia_css_debug.h" /* ia_css_debug_dtrace*/
+#include "sh_css_internal.h" /* sh_css_queue_type */
+#include "sp_local.h" /* sp_address_of */
+#include "sh_css_firmware.h" /* sh_css_sp_fw*/
+
+#define BUFQ_DUMP_FILE_NAME_PREFIX_SIZE 256
+
+static char prefix[BUFQ_DUMP_FILE_NAME_PREFIX_SIZE] = {0};
+
+/*********************************************************/
+/* Global Queue objects used by CSS */
+/*********************************************************/
+
+struct sh_css_queues {
+ /* Host2SP buffer queue */
+ ia_css_queue_t host2sp_buffer_queue_handles
+ [SH_CSS_MAX_SP_THREADS][SH_CSS_MAX_NUM_QUEUES];
+ /* SP2Host buffer queue */
+ ia_css_queue_t sp2host_buffer_queue_handles
+ [SH_CSS_MAX_NUM_QUEUES];
+
+ /* Host2SP event queue */
+ ia_css_queue_t host2sp_psys_event_queue_handle;
+
+ /* SP2Host event queue */
+ ia_css_queue_t sp2host_psys_event_queue_handle;
+
+ /* Host2SP ISYS event queue */
+ ia_css_queue_t host2sp_isys_event_queue_handle;
+
+ /* SP2Host ISYS event queue */
+ ia_css_queue_t sp2host_isys_event_queue_handle;
+ /* Tagger command queue */
+ ia_css_queue_t host2sp_tag_cmd_queue_handle;
+};
+
+/*******************************************************
+*** Static variables
+********************************************************/
+static struct sh_css_queues css_queues;
+
+static int
+buffer_type_to_queue_id_map[SH_CSS_MAX_SP_THREADS][IA_CSS_NUM_DYNAMIC_BUFFER_TYPE];
+static bool queue_availability[SH_CSS_MAX_SP_THREADS][SH_CSS_MAX_NUM_QUEUES];
+
+/*******************************************************
+*** Static functions
+********************************************************/
+static void map_buffer_type_to_queue_id(
+ unsigned int thread_id,
+ enum ia_css_buffer_type buf_type
+);
+static void unmap_buffer_type_to_queue_id(
+ unsigned int thread_id,
+ enum ia_css_buffer_type buf_type
+);
+
+static ia_css_queue_t *bufq_get_qhandle(
+ enum sh_css_queue_type type,
+ enum sh_css_queue_id id,
+ int thread
+);
+
+/*******************************************************
+*** Public functions
+********************************************************/
+void ia_css_queue_map_init(void)
+{
+ unsigned int i, j;
+
+ for (i = 0; i < SH_CSS_MAX_SP_THREADS; i++) {
+ for (j = 0; j < SH_CSS_MAX_NUM_QUEUES; j++)
+ queue_availability[i][j] = true;
+ }
+
+ for (i = 0; i < SH_CSS_MAX_SP_THREADS; i++) {
+ for (j = 0; j < IA_CSS_NUM_DYNAMIC_BUFFER_TYPE; j++)
+ buffer_type_to_queue_id_map[i][j] = SH_CSS_INVALID_QUEUE_ID;
+ }
+}
+
+void ia_css_queue_map(
+ unsigned int thread_id,
+ enum ia_css_buffer_type buf_type,
+ bool map)
+{
+ assert(buf_type < IA_CSS_NUM_DYNAMIC_BUFFER_TYPE);
+ assert(thread_id < SH_CSS_MAX_SP_THREADS);
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_queue_map() enter: buf_type=%d, thread_id=%d\n", buf_type, thread_id);
+
+ if (map)
+ map_buffer_type_to_queue_id(thread_id, buf_type);
+ else
+ unmap_buffer_type_to_queue_id(thread_id, buf_type);
+}
+
+/*
+ * @brief Query the internal queue ID.
+ */
+bool ia_css_query_internal_queue_id(
+ enum ia_css_buffer_type buf_type,
+ unsigned int thread_id,
+ enum sh_css_queue_id *val)
+{
+ IA_CSS_ENTER("buf_type=%d, thread_id=%d, val = %p", buf_type, thread_id, val);
+
+ if ((!val) || (thread_id >= SH_CSS_MAX_SP_THREADS) ||
+ (buf_type >= IA_CSS_NUM_DYNAMIC_BUFFER_TYPE)) {
+ IA_CSS_LEAVE("return_val = false");
+ return false;
+ }
+
+ *val = buffer_type_to_queue_id_map[thread_id][buf_type];
+ if ((*val == SH_CSS_INVALID_QUEUE_ID) || (*val >= SH_CSS_MAX_NUM_QUEUES)) {
+ IA_CSS_LOG("INVALID queue ID MAP = %d\n", *val);
+ IA_CSS_LEAVE("return_val = false");
+ return false;
+ }
+ IA_CSS_LEAVE("return_val = true");
+ return true;
+}
+
+/*******************************************************
+*** Static functions
+********************************************************/
+static void map_buffer_type_to_queue_id(
+ unsigned int thread_id,
+ enum ia_css_buffer_type buf_type)
+{
+ unsigned int i;
+
+ assert(thread_id < SH_CSS_MAX_SP_THREADS);
+ assert(buf_type < IA_CSS_NUM_DYNAMIC_BUFFER_TYPE);
+ assert(buffer_type_to_queue_id_map[thread_id][buf_type] ==
+ SH_CSS_INVALID_QUEUE_ID);
+
+ /* queue 0 is reserved for parameters because it doesn't depend on events */
+ if (buf_type == IA_CSS_BUFFER_TYPE_PARAMETER_SET) {
+ assert(queue_availability[thread_id][IA_CSS_PARAMETER_SET_QUEUE_ID]);
+ queue_availability[thread_id][IA_CSS_PARAMETER_SET_QUEUE_ID] = false;
+ buffer_type_to_queue_id_map[thread_id][buf_type] =
+ IA_CSS_PARAMETER_SET_QUEUE_ID;
+ return;
+ }
+
+ /* queue 1 is reserved for per frame parameters because it doesn't depend on events */
+ if (buf_type == IA_CSS_BUFFER_TYPE_PER_FRAME_PARAMETER_SET) {
+ assert(queue_availability[thread_id][IA_CSS_PER_FRAME_PARAMETER_SET_QUEUE_ID]);
+ queue_availability[thread_id][IA_CSS_PER_FRAME_PARAMETER_SET_QUEUE_ID] = false;
+ buffer_type_to_queue_id_map[thread_id][buf_type] =
+ IA_CSS_PER_FRAME_PARAMETER_SET_QUEUE_ID;
+ return;
+ }
+
+ for (i = SH_CSS_QUEUE_C_ID; i < SH_CSS_MAX_NUM_QUEUES; i++) {
+ if (queue_availability[thread_id][i]) {
+ queue_availability[thread_id][i] = false;
+ buffer_type_to_queue_id_map[thread_id][buf_type] = i;
+ break;
+ }
+ }
+
+ assert(i != SH_CSS_MAX_NUM_QUEUES);
+ return;
+}
+
+static void unmap_buffer_type_to_queue_id(
+ unsigned int thread_id,
+ enum ia_css_buffer_type buf_type)
+{
+ int queue_id;
+
+ assert(thread_id < SH_CSS_MAX_SP_THREADS);
+ assert(buf_type < IA_CSS_NUM_DYNAMIC_BUFFER_TYPE);
+ assert(buffer_type_to_queue_id_map[thread_id][buf_type] !=
+ SH_CSS_INVALID_QUEUE_ID);
+
+ queue_id = buffer_type_to_queue_id_map[thread_id][buf_type];
+ buffer_type_to_queue_id_map[thread_id][buf_type] = SH_CSS_INVALID_QUEUE_ID;
+ queue_availability[thread_id][queue_id] = true;
+}
+
+static ia_css_queue_t *bufq_get_qhandle(
+ enum sh_css_queue_type type,
+ enum sh_css_queue_id id,
+ int thread)
+{
+ ia_css_queue_t *q = NULL;
+
+ switch (type) {
+ case sh_css_host2sp_buffer_queue:
+ if ((thread >= SH_CSS_MAX_SP_THREADS) || (thread < 0) ||
+ (id == SH_CSS_INVALID_QUEUE_ID))
+ break;
+ q = &css_queues.host2sp_buffer_queue_handles[thread][id];
+ break;
+ case sh_css_sp2host_buffer_queue:
+ if (id == SH_CSS_INVALID_QUEUE_ID)
+ break;
+ q = &css_queues.sp2host_buffer_queue_handles[id];
+ break;
+ case sh_css_host2sp_psys_event_queue:
+ q = &css_queues.host2sp_psys_event_queue_handle;
+ break;
+ case sh_css_sp2host_psys_event_queue:
+ q = &css_queues.sp2host_psys_event_queue_handle;
+ break;
+ case sh_css_host2sp_isys_event_queue:
+ q = &css_queues.host2sp_isys_event_queue_handle;
+ break;
+ case sh_css_sp2host_isys_event_queue:
+ q = &css_queues.sp2host_isys_event_queue_handle;
+ break;
+ case sh_css_host2sp_tag_cmd_queue:
+ q = &css_queues.host2sp_tag_cmd_queue_handle;
+ break;
+ default:
+ break;
+ }
+
+ return q;
+}
+
+/* Local function to initialize a buffer queue. This reduces
+ * the chances of copy-paste errors or typos.
+ */
+static inline void
+init_bufq(unsigned int desc_offset,
+ unsigned int elems_offset,
+ ia_css_queue_t *handle)
+{
+ const struct ia_css_fw_info *fw;
+ unsigned int q_base_addr;
+ ia_css_queue_remote_t remoteq;
+
+ fw = &sh_css_sp_fw;
+ q_base_addr = fw->info.sp.host_sp_queue;
+
+ /* Setup queue location as SP and proc id as SP0_ID */
+ remoteq.location = IA_CSS_QUEUE_LOC_SP;
+ remoteq.proc_id = SP0_ID;
+ remoteq.cb_desc_addr = q_base_addr + desc_offset;
+ remoteq.cb_elems_addr = q_base_addr + elems_offset;
+ /* Initialize the queue instance and obtain handle */
+ ia_css_queue_remote_init(handle, &remoteq);
+}
+
+void ia_css_bufq_init(void)
+{
+ int i, j;
+
+ IA_CSS_ENTER_PRIVATE("");
+
+ /* Setup all the local queue descriptors for Host2SP Buffer Queues */
+ for (i = 0; i < SH_CSS_MAX_SP_THREADS; i++)
+ for (j = 0; j < SH_CSS_MAX_NUM_QUEUES; j++) {
+ init_bufq((uint32_t)offsetof(struct host_sp_queues,
+ host2sp_buffer_queues_desc[i][j]),
+ (uint32_t)offsetof(struct host_sp_queues, host2sp_buffer_queues_elems[i][j]),
+ &css_queues.host2sp_buffer_queue_handles[i][j]);
+ }
+
+ /* Setup all the local queue descriptors for SP2Host Buffer Queues */
+ for (i = 0; i < SH_CSS_MAX_NUM_QUEUES; i++) {
+ init_bufq(offsetof(struct host_sp_queues, sp2host_buffer_queues_desc[i]),
+ offsetof(struct host_sp_queues, sp2host_buffer_queues_elems[i]),
+ &css_queues.sp2host_buffer_queue_handles[i]);
+ }
+
+ /* Host2SP event queue*/
+ init_bufq((uint32_t)offsetof(struct host_sp_queues,
+ host2sp_psys_event_queue_desc),
+ (uint32_t)offsetof(struct host_sp_queues, host2sp_psys_event_queue_elems),
+ &css_queues.host2sp_psys_event_queue_handle);
+
+ /* SP2Host event queue */
+ init_bufq((uint32_t)offsetof(struct host_sp_queues,
+ sp2host_psys_event_queue_desc),
+ (uint32_t)offsetof(struct host_sp_queues, sp2host_psys_event_queue_elems),
+ &css_queues.sp2host_psys_event_queue_handle);
+
+ /* Host2SP ISYS event queue */
+ init_bufq((uint32_t)offsetof(struct host_sp_queues,
+ host2sp_isys_event_queue_desc),
+ (uint32_t)offsetof(struct host_sp_queues, host2sp_isys_event_queue_elems),
+ &css_queues.host2sp_isys_event_queue_handle);
+
+ /* SP2Host ISYS event queue*/
+ init_bufq((uint32_t)offsetof(struct host_sp_queues,
+ sp2host_isys_event_queue_desc),
+ (uint32_t)offsetof(struct host_sp_queues, sp2host_isys_event_queue_elems),
+ &css_queues.sp2host_isys_event_queue_handle);
+
+ /* Host2SP tagger command queue */
+ init_bufq((uint32_t)offsetof(struct host_sp_queues, host2sp_tag_cmd_queue_desc),
+ (uint32_t)offsetof(struct host_sp_queues, host2sp_tag_cmd_queue_elems),
+ &css_queues.host2sp_tag_cmd_queue_handle);
+
+ IA_CSS_LEAVE_PRIVATE("");
+}
+
+int ia_css_bufq_enqueue_buffer(
+ int thread_index,
+ int queue_id,
+ uint32_t item)
+{
+ ia_css_queue_t *q;
+ int error;
+
+ IA_CSS_ENTER_PRIVATE("queue_id=%d", queue_id);
+ if ((thread_index >= SH_CSS_MAX_SP_THREADS) || (thread_index < 0) ||
+ (queue_id == SH_CSS_INVALID_QUEUE_ID))
+ return -EINVAL;
+
+ /* Get the queue for communication */
+ q = bufq_get_qhandle(sh_css_host2sp_buffer_queue,
+ queue_id,
+ thread_index);
+ if (q) {
+ error = ia_css_queue_enqueue(q, item);
+ } else {
+ IA_CSS_ERROR("queue is not initialized");
+ error = -EBUSY;
+ }
+
+ IA_CSS_LEAVE_ERR_PRIVATE(error);
+ return error;
+}
+
+int ia_css_bufq_dequeue_buffer(
+ int queue_id,
+ uint32_t *item)
+{
+ int error;
+ ia_css_queue_t *q;
+
+ IA_CSS_ENTER_PRIVATE("queue_id=%d", queue_id);
+ if ((!item) ||
+ (queue_id <= SH_CSS_INVALID_QUEUE_ID) ||
+ (queue_id >= SH_CSS_MAX_NUM_QUEUES)
+ )
+ return -EINVAL;
+
+ q = bufq_get_qhandle(sh_css_sp2host_buffer_queue,
+ queue_id,
+ -1);
+ if (q) {
+ error = ia_css_queue_dequeue(q, item);
+ } else {
+ IA_CSS_ERROR("queue is not initialized");
+ error = -EBUSY;
+ }
+
+ IA_CSS_LEAVE_ERR_PRIVATE(error);
+ return error;
+}
+
+int ia_css_bufq_enqueue_psys_event(
+ u8 evt_id,
+ u8 evt_payload_0,
+ u8 evt_payload_1,
+ uint8_t evt_payload_2)
+{
+ int error = 0;
+ ia_css_queue_t *q;
+
+ IA_CSS_ENTER_PRIVATE("evt_id=%d", evt_id);
+ q = bufq_get_qhandle(sh_css_host2sp_psys_event_queue, -1, -1);
+ if (!q) {
+ IA_CSS_ERROR("queue is not initialized");
+ return -EBUSY;
+ }
+
+ error = ia_css_eventq_send(q,
+ evt_id, evt_payload_0, evt_payload_1, evt_payload_2);
+
+ IA_CSS_LEAVE_ERR_PRIVATE(error);
+ return error;
+}
+
+int ia_css_bufq_dequeue_psys_event(
+ u8 item[BUFQ_EVENT_SIZE])
+{
+ int error = 0;
+ ia_css_queue_t *q;
+
+ /* No ENTER/LEAVE in this function since this is polled
+ * by some test apps. Enablign logging here floods the log
+ * files which may cause timeouts. */
+ if (!item)
+ return -EINVAL;
+
+ q = bufq_get_qhandle(sh_css_sp2host_psys_event_queue, -1, -1);
+ if (!q) {
+ IA_CSS_ERROR("queue is not initialized");
+ return -EBUSY;
+ }
+ error = ia_css_eventq_recv(q, item);
+
+ return error;
+}
+
+int ia_css_bufq_dequeue_isys_event(
+ u8 item[BUFQ_EVENT_SIZE])
+{
+ int error = 0;
+ ia_css_queue_t *q;
+
+ /* No ENTER/LEAVE in this function since this is polled
+ * by some test apps. Enablign logging here floods the log
+ * files which may cause timeouts. */
+ if (!item)
+ return -EINVAL;
+
+ q = bufq_get_qhandle(sh_css_sp2host_isys_event_queue, -1, -1);
+ if (!q) {
+ IA_CSS_ERROR("queue is not initialized");
+ return -EBUSY;
+ }
+ error = ia_css_eventq_recv(q, item);
+ return error;
+}
+
+int ia_css_bufq_enqueue_isys_event(uint8_t evt_id)
+{
+ int error = 0;
+ ia_css_queue_t *q;
+
+ IA_CSS_ENTER_PRIVATE("event_id=%d", evt_id);
+ q = bufq_get_qhandle(sh_css_host2sp_isys_event_queue, -1, -1);
+ if (!q) {
+ IA_CSS_ERROR("queue is not initialized");
+ return -EBUSY;
+ }
+
+ error = ia_css_eventq_send(q, evt_id, 0, 0, 0);
+
+ IA_CSS_LEAVE_ERR_PRIVATE(error);
+ return error;
+}
+
+int ia_css_bufq_enqueue_tag_cmd(
+ uint32_t item)
+{
+ int error;
+ ia_css_queue_t *q;
+
+ IA_CSS_ENTER_PRIVATE("item=%d", item);
+ q = bufq_get_qhandle(sh_css_host2sp_tag_cmd_queue, -1, -1);
+ if (!q) {
+ IA_CSS_ERROR("queue is not initialized");
+ return -EBUSY;
+ }
+ error = ia_css_queue_enqueue(q, item);
+
+ IA_CSS_LEAVE_ERR_PRIVATE(error);
+ return error;
+}
+
+int ia_css_bufq_deinit(void)
+{
+ return 0;
+}
+
+static void bufq_dump_queue_info(const char *prefix, ia_css_queue_t *qhandle)
+{
+ u32 free = 0, used = 0;
+
+ assert(prefix && qhandle);
+ ia_css_queue_get_used_space(qhandle, &used);
+ ia_css_queue_get_free_space(qhandle, &free);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "%s: used=%u free=%u\n",
+ prefix, used, free);
+}
+
+void ia_css_bufq_dump_queue_info(void)
+{
+ int i, j;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "Queue Information:\n");
+
+ for (i = 0; i < SH_CSS_MAX_SP_THREADS; i++) {
+ for (j = 0; j < SH_CSS_MAX_NUM_QUEUES; j++) {
+ snprintf(prefix, BUFQ_DUMP_FILE_NAME_PREFIX_SIZE,
+ "host2sp_buffer_queue[%d][%d]", i, j);
+ bufq_dump_queue_info(prefix,
+ &css_queues.host2sp_buffer_queue_handles[i][j]);
+ }
+ }
+
+ for (i = 0; i < SH_CSS_MAX_NUM_QUEUES; i++) {
+ snprintf(prefix, BUFQ_DUMP_FILE_NAME_PREFIX_SIZE,
+ "sp2host_buffer_queue[%d]", i);
+ bufq_dump_queue_info(prefix,
+ &css_queues.sp2host_buffer_queue_handles[i]);
+ }
+ bufq_dump_queue_info("host2sp_psys_event",
+ &css_queues.host2sp_psys_event_queue_handle);
+ bufq_dump_queue_info("sp2host_psys_event",
+ &css_queues.sp2host_psys_event_queue_handle);
+
+ bufq_dump_queue_info("host2sp_isys_event",
+ &css_queues.host2sp_isys_event_queue_handle);
+ bufq_dump_queue_info("sp2host_isys_event",
+ &css_queues.sp2host_isys_event_queue_handle);
+ bufq_dump_queue_info("host2sp_tag_cmd",
+ &css_queues.host2sp_tag_cmd_queue_handle);
+}
diff --git a/drivers/staging/media/atomisp/pci/runtime/debug/interface/ia_css_debug.h b/drivers/staging/media/atomisp/pci/runtime/debug/interface/ia_css_debug.h
new file mode 100644
index 000000000000..2d0e906530af
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/runtime/debug/interface/ia_css_debug.h
@@ -0,0 +1,406 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef _IA_CSS_DEBUG_H_
+#define _IA_CSS_DEBUG_H_
+
+/*! \file */
+
+#include <type_support.h>
+#include <linux/stdarg.h>
+#include <linux/bits.h>
+#include "ia_css_types.h"
+#include "ia_css_binary.h"
+#include "ia_css_frame_public.h"
+#include "ia_css_pipe_public.h"
+#include "ia_css_stream_public.h"
+#include "ia_css_metadata.h"
+#include "sh_css_internal.h"
+/* ISP2500 */
+#include "ia_css_pipe.h"
+
+/* available levels */
+/*! Level for tracing errors */
+#define IA_CSS_DEBUG_ERROR 1
+/*! Level for tracing warnings */
+#define IA_CSS_DEBUG_WARNING 3
+/*! Level for tracing debug messages */
+#define IA_CSS_DEBUG_VERBOSE 5
+/*! Level for tracing trace messages a.o. ia_css public function calls */
+#define IA_CSS_DEBUG_TRACE 6
+/*! Level for tracing trace messages a.o. ia_css private function calls */
+#define IA_CSS_DEBUG_TRACE_PRIVATE 7
+/*! Level for tracing parameter messages e.g. in and out params of functions */
+#define IA_CSS_DEBUG_PARAM 8
+/*! Level for tracing info messages */
+#define IA_CSS_DEBUG_INFO 9
+
+/* Global variable which controls the verbosity levels of the debug tracing */
+extern int dbg_level;
+
+/*! @brief Enum defining the different isp parameters to dump.
+ * Values can be combined to dump a combination of sets.
+ */
+enum ia_css_debug_enable_param_dump {
+ IA_CSS_DEBUG_DUMP_FPN = BIT(0), /** FPN table */
+ IA_CSS_DEBUG_DUMP_OB = BIT(1), /** OB table */
+ IA_CSS_DEBUG_DUMP_SC = BIT(2), /** Shading table */
+ IA_CSS_DEBUG_DUMP_WB = BIT(3), /** White balance */
+ IA_CSS_DEBUG_DUMP_DP = BIT(4), /** Defect Pixel */
+ IA_CSS_DEBUG_DUMP_BNR = BIT(5), /** Bayer Noise Reductions */
+ IA_CSS_DEBUG_DUMP_S3A = BIT(6), /** 3A Statistics */
+ IA_CSS_DEBUG_DUMP_DE = BIT(7), /** De Mosaicing */
+ IA_CSS_DEBUG_DUMP_YNR = BIT(8), /** Luma Noise Reduction */
+ IA_CSS_DEBUG_DUMP_CSC = BIT(9), /** Color Space Conversion */
+ IA_CSS_DEBUG_DUMP_GC = BIT(10), /** Gamma Correction */
+ IA_CSS_DEBUG_DUMP_TNR = BIT(11), /** Temporal Noise Reduction */
+ IA_CSS_DEBUG_DUMP_ANR = BIT(12), /** Advanced Noise Reduction */
+ IA_CSS_DEBUG_DUMP_CE = BIT(13), /** Chroma Enhancement */
+ IA_CSS_DEBUG_DUMP_ALL = BIT(14), /** Dump all device parameters */
+};
+
+#define IA_CSS_ERROR(fmt, ...) \
+ ia_css_debug_dtrace(IA_CSS_DEBUG_ERROR, \
+ "%s() %d: error: " fmt "\n", __func__, __LINE__, ##__VA_ARGS__)
+
+#define IA_CSS_WARNING(fmt, ...) \
+ ia_css_debug_dtrace(IA_CSS_DEBUG_WARNING, \
+ "%s() %d: warning: " fmt "\n", __func__, __LINE__, ##__VA_ARGS__)
+
+/* Logging macros for public functions (API functions) */
+#define IA_CSS_ENTER(fmt, ...) \
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, \
+ "%s(): enter: " fmt "\n", __func__, ##__VA_ARGS__)
+
+/* Use this macro for small functions that do not call other functions. */
+#define IA_CSS_ENTER_LEAVE(fmt, ...) \
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, \
+ "%s(): enter: leave: " fmt "\n", __func__, ##__VA_ARGS__)
+
+#define IA_CSS_LEAVE(fmt, ...) \
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, \
+ "%s(): leave: " fmt "\n", __func__, ##__VA_ARGS__)
+
+/* Shorthand for returning an int return value */
+#define IA_CSS_LEAVE_ERR(__err) \
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, \
+ "%s() %d: leave: return_err=%d\n", __func__, __LINE__, __err)
+
+/* Use this macro for logging other than enter/leave.
+ * Note that this macro always uses the PRIVATE logging level.
+ */
+#define IA_CSS_LOG(fmt, ...) \
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, \
+ "%s(): " fmt "\n", __func__, ##__VA_ARGS__)
+
+/* Logging macros for non-API functions. These have a lower trace level */
+#define IA_CSS_ENTER_PRIVATE(fmt, ...) \
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, \
+ "%s(): enter: " fmt "\n", __func__, ##__VA_ARGS__)
+
+#define IA_CSS_LEAVE_PRIVATE(fmt, ...) \
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, \
+ "%s(): leave: " fmt "\n", __func__, ##__VA_ARGS__)
+
+/* Shorthand for returning an int return value */
+#define IA_CSS_LEAVE_ERR_PRIVATE(__err) \
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, \
+ "%s() %d: leave: return_err=%d\n", __func__, __LINE__, __err)
+
+/* Use this macro for small functions that do not call other functions. */
+#define IA_CSS_ENTER_LEAVE_PRIVATE(fmt, ...) \
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, \
+ "%s(): enter: leave: " fmt "\n", __func__, ##__VA_ARGS__)
+
+/*! @brief Function for tracing to the provided printf function in the
+ * environment.
+ * @param[in] level Level of the message.
+ * @param[in] fmt printf like format string
+ * @param[in] args arguments for the format string
+ */
+static inline void __printf(2, 0) ia_css_debug_vdtrace(unsigned int level,
+ const char *fmt,
+ va_list args)
+{
+ if (dbg_level >= level)
+ sh_css_vprint(fmt, args);
+}
+
+__printf(2, 3) void ia_css_debug_dtrace(unsigned int level,
+ const char *fmt, ...);
+
+
+/*! @brief Function to set the global dtrace verbosity level.
+ * @param[in] trace_level Maximum level of the messages to be traced.
+ * @return None
+ */
+void ia_css_debug_set_dtrace_level(
+ const unsigned int trace_level);
+
+/*! @brief Function to get the global dtrace verbosity level.
+ * @return global dtrace verbosity level
+ */
+unsigned int ia_css_debug_get_dtrace_level(void);
+
+/* ISP2401 */
+/*! @brief Dump GAC hardware state.
+ * Dumps the GAC ACB hardware registers. may be useful for
+ * detecting a GAC which got hang.
+ * @return None
+ */
+void ia_css_debug_dump_gac_state(void);
+
+/*! @brief Dump internal sp software state.
+ * Dumps the sp software state to tracing output.
+ * @return None
+ */
+void ia_css_debug_dump_sp_sw_debug_info(void);
+
+#if SP_DEBUG != SP_DEBUG_NONE
+void ia_css_debug_print_sp_debug_state(
+ const struct sh_css_sp_debug_state *state);
+#endif
+
+/*! @brief Dump all related binary info data
+ * @param[in] bi Binary info struct.
+ * @return None
+ */
+void ia_css_debug_binary_print(
+ const struct ia_css_binary *bi);
+
+void ia_css_debug_sp_dump_mipi_fifo_high_water(void);
+
+/*! \brief Dump pif A isp fifo state
+ * Dumps the primary input formatter state to tracing output.
+ * @return None
+ */
+void ia_css_debug_dump_pif_a_isp_fifo_state(void);
+
+/*! \brief Dump pif B isp fifo state
+ * Dumps the primary input formatter state to tracing output.
+ * \return None
+ */
+void ia_css_debug_dump_pif_b_isp_fifo_state(void);
+
+/*! @brief Dump stream-to-memory sp fifo state
+ * Dumps the stream-to-memory block state to tracing output.
+ * @return None
+ */
+void ia_css_debug_dump_str2mem_sp_fifo_state(void);
+
+/*! @brief Dump all fifo state info to the output
+ * Dumps all fifo state to tracing output.
+ * @return None
+ */
+void ia_css_debug_dump_all_fifo_state(void);
+
+/*! @brief Dump the frame info to the trace output
+ * Dumps the frame info to tracing output.
+ * @param[in] frame pointer to struct ia_css_frame
+ * @param[in] descr description output along with the frame info
+ * @return None
+ */
+void ia_css_debug_frame_print(
+ const struct ia_css_frame *frame,
+ const char *descr);
+
+/*! @brief Function to enable sp sleep mode.
+ * Function that enables sp sleep mode
+ * @param[in] mode indicates when to put sp to sleep
+ * @return None
+ */
+void ia_css_debug_enable_sp_sleep_mode(enum ia_css_sp_sleep_mode mode);
+
+/*! @brief Function to wake up sp when in sleep mode.
+ * After sp has been put to sleep, use this function to let it continue
+ * to run again.
+ * @return None
+ */
+void ia_css_debug_wake_up_sp(void);
+
+/*! @brief Function to dump isp parameters.
+ * Dump isp parameters to tracing output
+ * @param[in] stream pointer to ia_css_stream struct
+ * @param[in] enable flag indicating which parameters to dump.
+ * @return None
+ */
+void ia_css_debug_dump_isp_params(struct ia_css_stream *stream,
+ unsigned int enable);
+
+void ia_css_debug_dump_isp_binary(void);
+
+void sh_css_dump_sp_raw_copy_linecount(bool reduced);
+
+/*! @brief Dump the resolution info to the trace output
+ * Dumps the resolution info to the trace output.
+ * @param[in] res pointer to struct ia_css_resolution
+ * @param[in] label description of resolution output
+ * @return None
+ */
+void ia_css_debug_dump_resolution(
+ const struct ia_css_resolution *res,
+ const char *label);
+
+/*! @brief Dump the frame info to the trace output
+ * Dumps the frame info to the trace output.
+ * @param[in] info pointer to struct ia_css_frame_info
+ * @param[in] label description of frame_info output
+ * @return None
+ */
+void ia_css_debug_dump_frame_info(
+ const struct ia_css_frame_info *info,
+ const char *label);
+
+/*! @brief Dump the capture config info to the trace output
+ * Dumps the capture config info to the trace output.
+ * @param[in] config pointer to struct ia_css_capture_config
+ * @return None
+ */
+void ia_css_debug_dump_capture_config(
+ const struct ia_css_capture_config *config);
+
+/*! @brief Dump the pipe extra config info to the trace output
+ * Dumps the pipe extra config info to the trace output.
+ * @param[in] extra_config pointer to struct ia_css_pipe_extra_config
+ * @return None
+ */
+void ia_css_debug_dump_pipe_extra_config(
+ const struct ia_css_pipe_extra_config *extra_config);
+
+/*! @brief Dump the pipe config info to the trace output
+ * Dumps the pipe config info to the trace output.
+ * @param[in] config pointer to struct ia_css_pipe_config
+ * @return None
+ */
+void ia_css_debug_dump_pipe_config(
+ const struct ia_css_pipe_config *config);
+
+/*! @brief Dump the stream config source info to the trace output
+ * Dumps the stream config source info to the trace output.
+ * @param[in] config pointer to struct ia_css_stream_config
+ * @return None
+ */
+void ia_css_debug_dump_stream_config_source(
+ const struct ia_css_stream_config *config);
+
+/*! @brief Dump the mipi buffer config info to the trace output
+ * Dumps the mipi buffer config info to the trace output.
+ * @param[in] config pointer to struct ia_css_mipi_buffer_config
+ * @return None
+ */
+void ia_css_debug_dump_mipi_buffer_config(
+ const struct ia_css_mipi_buffer_config *config);
+
+/*! @brief Dump the metadata config info to the trace output
+ * Dumps the metadata config info to the trace output.
+ * @param[in] config pointer to struct ia_css_metadata_config
+ * @return None
+ */
+void ia_css_debug_dump_metadata_config(
+ const struct ia_css_metadata_config *config);
+
+/*! @brief Dump the stream config info to the trace output
+ * Dumps the stream config info to the trace output.
+ * @param[in] config pointer to struct ia_css_stream_config
+ * @param[in] num_pipes number of pipes for the stream
+ * @return None
+ */
+void ia_css_debug_dump_stream_config(
+ const struct ia_css_stream_config *config,
+ int num_pipes);
+
+/**
+ * @brief Initialize the debug mode.
+ *
+ * WARNING:
+ * This API should be called ONLY once in the debug mode.
+ *
+ * @return
+ * - true, if it is successful.
+ * - false, otherwise.
+ */
+bool ia_css_debug_mode_init(void);
+
+/**
+ * @brief Disable the DMA channel.
+ *
+ * @param[in] dma_ID The ID of the target DMA.
+ * @param[in] channel_id The ID of the target DMA channel.
+ * @param[in] request_type The type of the DMA request.
+ * For example:
+ * - "0" indicates the writing request.
+ * - "1" indicates the reading request.
+ *
+ * This is part of the DMA API -> dma.h
+ *
+ * @return
+ * - true, if it is successful.
+ * - false, otherwise.
+ */
+bool ia_css_debug_mode_disable_dma_channel(
+ int dma_ID,
+ int channel_id,
+ int request_type);
+/**
+ * @brief Enable the DMA channel.
+ *
+ * @param[in] dma_ID The ID of the target DMA.
+ * @param[in] channel_id The ID of the target DMA channel.
+ * @param[in] request_type The type of the DMA request.
+ * For example:
+ * - "0" indicates the writing request.
+ * - "1" indicates the reading request.
+ *
+ * @return
+ * - true, if it is successful.
+ * - false, otherwise.
+ */
+bool ia_css_debug_mode_enable_dma_channel(
+ int dma_ID,
+ int channel_id,
+ int request_type);
+
+/**
+ * @brief Dump tracer data.
+ * [Currently support is only for SKC]
+ *
+ * @return
+ * - none.
+ */
+void ia_css_debug_dump_trace(void);
+
+/* ISP2401 */
+/**
+ * @brief Program counter dumping (in loop)
+ *
+ * @param[in] id The ID of the SP
+ * @param[in] num_of_dumps The number of dumps
+ *
+ * @return
+ * - none
+ */
+void ia_css_debug_pc_dump(sp_ID_t id, unsigned int num_of_dumps);
+
+/* ISP2500 */
+/*! @brief Dump all states for ISP hang case.
+ * Dumps the ISP previous and current configurations
+ * GACs status, SP0/1 statuses.
+ *
+ * @param[in] pipe The current pipe
+ *
+ * @return None
+ */
+void ia_css_debug_dump_hang_status(
+ struct ia_css_pipe *pipe);
+
+/*! @brief External command handler
+ * External command handler
+ *
+ * @return None
+ */
+void ia_css_debug_ext_command_handler(void);
+
+#endif /* _IA_CSS_DEBUG_H_ */
diff --git a/drivers/staging/media/atomisp/pci/runtime/debug/interface/ia_css_debug_internal.h b/drivers/staging/media/atomisp/pci/runtime/debug/interface/ia_css_debug_internal.h
new file mode 100644
index 000000000000..d4accfc5d9a7
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/runtime/debug/interface/ia_css_debug_internal.h
@@ -0,0 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010 - 2015, Intel Corporation.
+ */
+
+/* TO DO: Move debug related code from ia_css_internal.h in */
diff --git a/drivers/staging/media/atomisp/pci/runtime/debug/interface/ia_css_debug_pipe.h b/drivers/staging/media/atomisp/pci/runtime/debug/interface/ia_css_debug_pipe.h
new file mode 100644
index 000000000000..80c58cb934d5
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/runtime/debug/interface/ia_css_debug_pipe.h
@@ -0,0 +1,59 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010 - 2015, Intel Corporation.
+ */
+
+#ifndef _IA_CSS_DEBUG_PIPE_H_
+#define _IA_CSS_DEBUG_PIPE_H_
+
+/*! \file */
+
+#include <ia_css_frame_public.h>
+#include <ia_css_stream_public.h>
+#include "ia_css_pipeline.h"
+
+/**
+ * @brief Internal debug support for constructing a pipe graph.
+ *
+ * @return None
+ */
+void ia_css_debug_pipe_graph_dump_prologue(void);
+
+/**
+ * @brief Internal debug support for constructing a pipe graph.
+ *
+ * @return None
+ */
+void ia_css_debug_pipe_graph_dump_epilogue(void);
+
+/**
+ * @brief Internal debug support for constructing a pipe graph.
+ * @param[in] stage Pipeline stage.
+ * @param[in] id Pipe id.
+ *
+ * @return None
+ */
+void ia_css_debug_pipe_graph_dump_stage(
+ struct ia_css_pipeline_stage *stage,
+ enum ia_css_pipe_id id);
+
+/**
+ * @brief Internal debug support for constructing a pipe graph.
+ * @param[in] out_frame Output frame of SP raw copy.
+ *
+ * @return None
+ */
+void ia_css_debug_pipe_graph_dump_sp_raw_copy(
+ struct ia_css_frame *out_frame);
+
+/**
+ * @brief Internal debug support for constructing a pipe graph.
+ * @param[in] stream_config info about sensor and input formatter.
+ *
+ * @return None
+ */
+void ia_css_debug_pipe_graph_dump_stream_config(
+ const struct ia_css_stream_config *stream_config);
+
+#endif /* _IA_CSS_DEBUG_PIPE_H_ */
diff --git a/drivers/staging/media/atomisp/pci/runtime/debug/src/ia_css_debug.c b/drivers/staging/media/atomisp/pci/runtime/debug/src/ia_css_debug.c
new file mode 100644
index 000000000000..b411ca2f415e
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/runtime/debug/src/ia_css_debug.c
@@ -0,0 +1,1885 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#include "debug.h"
+
+#ifndef __INLINE_INPUT_SYSTEM__
+#define __INLINE_INPUT_SYSTEM__
+#endif
+#ifndef __INLINE_IBUF_CTRL__
+#define __INLINE_IBUF_CTRL__
+#endif
+#ifndef __INLINE_CSI_RX__
+#define __INLINE_CSI_RX__
+#endif
+#ifndef __INLINE_PIXELGEN__
+#define __INLINE_PIXELGEN__
+#endif
+#ifndef __INLINE_STREAM2MMIO__
+#define __INLINE_STREAM2MMIO__
+#endif
+
+#include <linux/args.h>
+#include <linux/string.h> /* for strscpy() */
+
+#include "ia_css_debug.h"
+#include "ia_css_debug_pipe.h"
+#include "ia_css_irq.h"
+#include "ia_css_stream.h"
+#include "ia_css_pipeline.h"
+#include "ia_css_isp_param.h"
+#include "sh_css_params.h"
+#include "ia_css_bufq.h"
+/* ISP2401 */
+#include "ia_css_queue.h"
+
+#include "ia_css_isp_params.h"
+
+#include "system_local.h"
+#include "assert_support.h"
+#include "print_support.h"
+
+#include "fifo_monitor.h"
+
+#include "input_formatter.h"
+#include "dma.h"
+#include "irq.h"
+#include "gp_device.h"
+#include "sp.h"
+#include "isp.h"
+#include "type_support.h"
+#include "input_system.h" /* input_formatter_reg_load */
+#include "ia_css_tagger_common.h"
+
+#include "sh_css_internal.h"
+#include "ia_css_isys.h"
+#include "sh_css_sp.h" /* sh_css_sp_get_debug_state() */
+
+#include "css_trace.h" /* tracer */
+
+#include "device_access.h" /* for ia_css_device_load_uint32 */
+
+/* Include all kernel host interfaces for ISP1 */
+#include "anr/anr_1.0/ia_css_anr.host.h"
+#include "cnr/cnr_1.0/ia_css_cnr.host.h"
+#include "csc/csc_1.0/ia_css_csc.host.h"
+#include "de/de_1.0/ia_css_de.host.h"
+#include "dp/dp_1.0/ia_css_dp.host.h"
+#include "bnr/bnr_1.0/ia_css_bnr.host.h"
+#include "fpn/fpn_1.0/ia_css_fpn.host.h"
+#include "gc/gc_1.0/ia_css_gc.host.h"
+#include "ob/ob_1.0/ia_css_ob.host.h"
+#include "s3a/s3a_1.0/ia_css_s3a.host.h"
+#include "sc/sc_1.0/ia_css_sc.host.h"
+#include "tnr/tnr_1.0/ia_css_tnr.host.h"
+#include "uds/uds_1.0/ia_css_uds_param.h"
+#include "wb/wb_1.0/ia_css_wb.host.h"
+#include "ynr/ynr_1.0/ia_css_ynr.host.h"
+
+/* Include additional kernel host interfaces for ISP2 */
+#include "aa/aa_2/ia_css_aa2.host.h"
+#include "anr/anr_2/ia_css_anr2.host.h"
+#include "cnr/cnr_2/ia_css_cnr2.host.h"
+#include "de/de_2/ia_css_de2.host.h"
+#include "gc/gc_2/ia_css_gc2.host.h"
+#include "ynr/ynr_2/ia_css_ynr2.host.h"
+
+#define DPG_START "ia_css_debug_pipe_graph_dump_start "
+#define DPG_END " ia_css_debug_pipe_graph_dump_end\n"
+
+#define ENABLE_LINE_MAX_LENGTH (25)
+
+static struct pipe_graph_class {
+ bool do_init;
+ int height;
+ int width;
+ int eff_height;
+ int eff_width;
+ enum atomisp_input_format stream_format;
+} pg_inst = {true, 0, 0, 0, 0, N_ATOMISP_INPUT_FORMAT};
+
+static const char *const queue_id_to_str[] = {
+ /* [SH_CSS_QUEUE_A_ID] =*/ "queue_A",
+ /* [SH_CSS_QUEUE_B_ID] =*/ "queue_B",
+ /* [SH_CSS_QUEUE_C_ID] =*/ "queue_C",
+ /* [SH_CSS_QUEUE_D_ID] =*/ "queue_D",
+ /* [SH_CSS_QUEUE_E_ID] =*/ "queue_E",
+ /* [SH_CSS_QUEUE_F_ID] =*/ "queue_F",
+ /* [SH_CSS_QUEUE_G_ID] =*/ "queue_G",
+ /* [SH_CSS_QUEUE_H_ID] =*/ "queue_H"
+};
+
+static const char *const pipe_id_to_str[] = {
+ /* [IA_CSS_PIPE_ID_PREVIEW] =*/ "preview",
+ /* [IA_CSS_PIPE_ID_COPY] =*/ "copy",
+ /* [IA_CSS_PIPE_ID_VIDEO] =*/ "video",
+ /* [IA_CSS_PIPE_ID_CAPTURE] =*/ "capture",
+ /* [IA_CSS_PIPE_ID_YUVPP] =*/ "yuvpp",
+};
+
+/* 27 is combined length of _stage%d(pipe%d)\0. */
+static char dot_id_input_bin[SH_CSS_MAX_BINARY_NAME + 27];
+static char ring_buffer[200];
+
+void ia_css_debug_dtrace(unsigned int level, const char *fmt, ...)
+{
+ va_list ap;
+
+ va_start(ap, fmt);
+ ia_css_debug_vdtrace(level, fmt, ap);
+ va_end(ap);
+}
+
+void ia_css_debug_set_dtrace_level(const unsigned int trace_level)
+{
+ dbg_level = trace_level;
+ return;
+}
+
+unsigned int ia_css_debug_get_dtrace_level(void)
+{
+ return dbg_level;
+}
+
+static const char *debug_stream_format2str(const enum atomisp_input_format
+ stream_format)
+{
+ switch (stream_format) {
+ case ATOMISP_INPUT_FORMAT_YUV420_8_LEGACY:
+ return "yuv420-8-legacy";
+ case ATOMISP_INPUT_FORMAT_YUV420_8:
+ return "yuv420-8";
+ case ATOMISP_INPUT_FORMAT_YUV420_10:
+ return "yuv420-10";
+ case ATOMISP_INPUT_FORMAT_YUV420_16:
+ return "yuv420-16";
+ case ATOMISP_INPUT_FORMAT_YUV422_8:
+ return "yuv422-8";
+ case ATOMISP_INPUT_FORMAT_YUV422_10:
+ return "yuv422-10";
+ case ATOMISP_INPUT_FORMAT_YUV422_16:
+ return "yuv422-16";
+ case ATOMISP_INPUT_FORMAT_RGB_444:
+ return "rgb444";
+ case ATOMISP_INPUT_FORMAT_RGB_555:
+ return "rgb555";
+ case ATOMISP_INPUT_FORMAT_RGB_565:
+ return "rgb565";
+ case ATOMISP_INPUT_FORMAT_RGB_666:
+ return "rgb666";
+ case ATOMISP_INPUT_FORMAT_RGB_888:
+ return "rgb888";
+ case ATOMISP_INPUT_FORMAT_RAW_6:
+ return "raw6";
+ case ATOMISP_INPUT_FORMAT_RAW_7:
+ return "raw7";
+ case ATOMISP_INPUT_FORMAT_RAW_8:
+ return "raw8";
+ case ATOMISP_INPUT_FORMAT_RAW_10:
+ return "raw10";
+ case ATOMISP_INPUT_FORMAT_RAW_12:
+ return "raw12";
+ case ATOMISP_INPUT_FORMAT_RAW_14:
+ return "raw14";
+ case ATOMISP_INPUT_FORMAT_RAW_16:
+ return "raw16";
+ case ATOMISP_INPUT_FORMAT_BINARY_8:
+ return "binary8";
+ case ATOMISP_INPUT_FORMAT_GENERIC_SHORT1:
+ return "generic-short1";
+ case ATOMISP_INPUT_FORMAT_GENERIC_SHORT2:
+ return "generic-short2";
+ case ATOMISP_INPUT_FORMAT_GENERIC_SHORT3:
+ return "generic-short3";
+ case ATOMISP_INPUT_FORMAT_GENERIC_SHORT4:
+ return "generic-short4";
+ case ATOMISP_INPUT_FORMAT_GENERIC_SHORT5:
+ return "generic-short5";
+ case ATOMISP_INPUT_FORMAT_GENERIC_SHORT6:
+ return "generic-short6";
+ case ATOMISP_INPUT_FORMAT_GENERIC_SHORT7:
+ return "generic-short7";
+ case ATOMISP_INPUT_FORMAT_GENERIC_SHORT8:
+ return "generic-short8";
+ case ATOMISP_INPUT_FORMAT_YUV420_8_SHIFT:
+ return "yuv420-8-shift";
+ case ATOMISP_INPUT_FORMAT_YUV420_10_SHIFT:
+ return "yuv420-10-shift";
+ case ATOMISP_INPUT_FORMAT_EMBEDDED:
+ return "embedded-8";
+ case ATOMISP_INPUT_FORMAT_USER_DEF1:
+ return "user-def-8-type-1";
+ case ATOMISP_INPUT_FORMAT_USER_DEF2:
+ return "user-def-8-type-2";
+ case ATOMISP_INPUT_FORMAT_USER_DEF3:
+ return "user-def-8-type-3";
+ case ATOMISP_INPUT_FORMAT_USER_DEF4:
+ return "user-def-8-type-4";
+ case ATOMISP_INPUT_FORMAT_USER_DEF5:
+ return "user-def-8-type-5";
+ case ATOMISP_INPUT_FORMAT_USER_DEF6:
+ return "user-def-8-type-6";
+ case ATOMISP_INPUT_FORMAT_USER_DEF7:
+ return "user-def-8-type-7";
+ case ATOMISP_INPUT_FORMAT_USER_DEF8:
+ return "user-def-8-type-8";
+
+ default:
+ assert(!"Unknown stream format");
+ return "unknown-stream-format";
+ }
+};
+
+static const char *debug_frame_format2str(const enum ia_css_frame_format
+ frame_format)
+{
+ switch (frame_format) {
+ case IA_CSS_FRAME_FORMAT_NV11:
+ return "NV11";
+ case IA_CSS_FRAME_FORMAT_NV12:
+ return "NV12";
+ case IA_CSS_FRAME_FORMAT_NV12_16:
+ return "NV12_16";
+ case IA_CSS_FRAME_FORMAT_NV12_TILEY:
+ return "NV12_TILEY";
+ case IA_CSS_FRAME_FORMAT_NV16:
+ return "NV16";
+ case IA_CSS_FRAME_FORMAT_NV21:
+ return "NV21";
+ case IA_CSS_FRAME_FORMAT_NV61:
+ return "NV61";
+ case IA_CSS_FRAME_FORMAT_YV12:
+ return "YV12";
+ case IA_CSS_FRAME_FORMAT_YV16:
+ return "YV16";
+ case IA_CSS_FRAME_FORMAT_YUV420:
+ return "YUV420";
+ case IA_CSS_FRAME_FORMAT_YUV420_16:
+ return "YUV420_16";
+ case IA_CSS_FRAME_FORMAT_YUV422:
+ return "YUV422";
+ case IA_CSS_FRAME_FORMAT_YUV422_16:
+ return "YUV422_16";
+ case IA_CSS_FRAME_FORMAT_UYVY:
+ return "UYVY";
+ case IA_CSS_FRAME_FORMAT_YUYV:
+ return "YUYV";
+ case IA_CSS_FRAME_FORMAT_YUV444:
+ return "YUV444";
+ case IA_CSS_FRAME_FORMAT_YUV_LINE:
+ return "YUV_LINE";
+ case IA_CSS_FRAME_FORMAT_RAW:
+ return "RAW";
+ case IA_CSS_FRAME_FORMAT_RGB565:
+ return "RGB565";
+ case IA_CSS_FRAME_FORMAT_PLANAR_RGB888:
+ return "PLANAR_RGB888";
+ case IA_CSS_FRAME_FORMAT_RGBA888:
+ return "RGBA888";
+ case IA_CSS_FRAME_FORMAT_QPLANE6:
+ return "QPLANE6";
+ case IA_CSS_FRAME_FORMAT_BINARY_8:
+ return "BINARY_8";
+ case IA_CSS_FRAME_FORMAT_MIPI:
+ return "MIPI";
+ case IA_CSS_FRAME_FORMAT_RAW_PACKED:
+ return "RAW_PACKED";
+ case IA_CSS_FRAME_FORMAT_CSI_MIPI_YUV420_8:
+ return "CSI_MIPI_YUV420_8";
+ case IA_CSS_FRAME_FORMAT_CSI_MIPI_LEGACY_YUV420_8:
+ return "CSI_MIPI_LEGACY_YUV420_8";
+ case IA_CSS_FRAME_FORMAT_CSI_MIPI_YUV420_10:
+ return "CSI_MIPI_YUV420_10";
+
+ default:
+ assert(!"Unknown frame format");
+ return "unknown-frame-format";
+ }
+}
+
+static void debug_print_fifo_channel_state(const fifo_channel_state_t *state,
+ const char *descr)
+{
+ assert(state);
+ assert(descr);
+
+ ia_css_debug_dtrace(2, "FIFO channel: %s\n", descr);
+ ia_css_debug_dtrace(2, "\t%-32s: %d\n", "source valid",
+ state->src_valid);
+ ia_css_debug_dtrace(2, "\t%-32s: %d\n", "fifo accept",
+ state->fifo_accept);
+ ia_css_debug_dtrace(2, "\t%-32s: %d\n", "fifo valid",
+ state->fifo_valid);
+ ia_css_debug_dtrace(2, "\t%-32s: %d\n", "sink accept",
+ state->sink_accept);
+ return;
+}
+
+void ia_css_debug_dump_pif_a_isp_fifo_state(void)
+{
+ fifo_channel_state_t pif_to_isp, isp_to_pif;
+
+ fifo_channel_get_state(FIFO_MONITOR0_ID,
+ FIFO_CHANNEL_IF0_TO_ISP0, &pif_to_isp);
+ fifo_channel_get_state(FIFO_MONITOR0_ID,
+ FIFO_CHANNEL_ISP0_TO_IF0, &isp_to_pif);
+ debug_print_fifo_channel_state(&pif_to_isp, "Primary IF A to ISP");
+ debug_print_fifo_channel_state(&isp_to_pif, "ISP to Primary IF A");
+}
+
+void ia_css_debug_dump_pif_b_isp_fifo_state(void)
+{
+ fifo_channel_state_t pif_to_isp, isp_to_pif;
+
+ fifo_channel_get_state(FIFO_MONITOR0_ID,
+ FIFO_CHANNEL_IF1_TO_ISP0, &pif_to_isp);
+ fifo_channel_get_state(FIFO_MONITOR0_ID,
+ FIFO_CHANNEL_ISP0_TO_IF1, &isp_to_pif);
+ debug_print_fifo_channel_state(&pif_to_isp, "Primary IF B to ISP");
+ debug_print_fifo_channel_state(&isp_to_pif, "ISP to Primary IF B");
+}
+
+void ia_css_debug_dump_str2mem_sp_fifo_state(void)
+{
+ fifo_channel_state_t s2m_to_sp, sp_to_s2m;
+
+ fifo_channel_get_state(FIFO_MONITOR0_ID,
+ FIFO_CHANNEL_STREAM2MEM0_TO_SP0, &s2m_to_sp);
+ fifo_channel_get_state(FIFO_MONITOR0_ID,
+ FIFO_CHANNEL_SP0_TO_STREAM2MEM0, &sp_to_s2m);
+ debug_print_fifo_channel_state(&s2m_to_sp, "Stream-to-memory to SP");
+ debug_print_fifo_channel_state(&sp_to_s2m, "SP to stream-to-memory");
+}
+
+void ia_css_debug_dump_all_fifo_state(void)
+{
+ int i;
+ fifo_monitor_state_t state;
+
+ fifo_monitor_get_state(FIFO_MONITOR0_ID, &state);
+
+ for (i = 0; i < N_FIFO_CHANNEL; i++)
+ debug_print_fifo_channel_state(&state.fifo_channels[i],
+ "squepfstqkt");
+ return;
+}
+
+static void debug_binary_info_print(const struct ia_css_binary_xinfo *info)
+{
+ assert(info);
+ ia_css_debug_dtrace(2, "id = %d\n", info->sp.id);
+ ia_css_debug_dtrace(2, "mode = %d\n", info->sp.pipeline.mode);
+ ia_css_debug_dtrace(2, "max_input_width = %d\n", info->sp.input.max_width);
+ ia_css_debug_dtrace(2, "min_output_width = %d\n",
+ info->sp.output.min_width);
+ ia_css_debug_dtrace(2, "max_output_width = %d\n",
+ info->sp.output.max_width);
+ ia_css_debug_dtrace(2, "top_cropping = %d\n", info->sp.pipeline.top_cropping);
+ ia_css_debug_dtrace(2, "left_cropping = %d\n", info->sp.pipeline.left_cropping);
+ ia_css_debug_dtrace(2, "xmem_addr = %d\n", info->xmem_addr);
+ ia_css_debug_dtrace(2, "enable_vf_veceven = %d\n",
+ info->sp.enable.vf_veceven);
+ ia_css_debug_dtrace(2, "enable_dis = %d\n", info->sp.enable.dis);
+ ia_css_debug_dtrace(2, "enable_uds = %d\n", info->sp.enable.uds);
+ ia_css_debug_dtrace(2, "enable ds = %d\n", info->sp.enable.ds);
+ ia_css_debug_dtrace(2, "s3atbl_use_dmem = %d\n", info->sp.s3a.s3atbl_use_dmem);
+ return;
+}
+
+void ia_css_debug_binary_print(const struct ia_css_binary *bi)
+{
+ unsigned int i;
+
+ debug_binary_info_print(bi->info);
+ ia_css_debug_dtrace(2,
+ "input: %dx%d, format = %d, padded width = %d\n",
+ bi->in_frame_info.res.width,
+ bi->in_frame_info.res.height,
+ bi->in_frame_info.format,
+ bi->in_frame_info.padded_width);
+ ia_css_debug_dtrace(2,
+ "internal :%dx%d, format = %d, padded width = %d\n",
+ bi->internal_frame_info.res.width,
+ bi->internal_frame_info.res.height,
+ bi->internal_frame_info.format,
+ bi->internal_frame_info.padded_width);
+ for (i = 0; i < IA_CSS_BINARY_MAX_OUTPUT_PORTS; i++) {
+ if (bi->out_frame_info[i].res.width != 0) {
+ ia_css_debug_dtrace(2,
+ "out%d: %dx%d, format = %d, padded width = %d\n",
+ i,
+ bi->out_frame_info[i].res.width,
+ bi->out_frame_info[i].res.height,
+ bi->out_frame_info[i].format,
+ bi->out_frame_info[i].padded_width);
+ }
+ }
+ ia_css_debug_dtrace(2,
+ "vf out: %dx%d, format = %d, padded width = %d\n",
+ bi->vf_frame_info.res.width,
+ bi->vf_frame_info.res.height,
+ bi->vf_frame_info.format,
+ bi->vf_frame_info.padded_width);
+ ia_css_debug_dtrace(2, "online = %d\n", bi->online);
+ ia_css_debug_dtrace(2, "input_buf_vectors = %d\n",
+ bi->input_buf_vectors);
+ ia_css_debug_dtrace(2, "deci_factor_log2 = %d\n", bi->deci_factor_log2);
+ ia_css_debug_dtrace(2, "vf_downscale_log2 = %d\n",
+ bi->vf_downscale_log2);
+ ia_css_debug_dtrace(2, "dis_deci_factor_log2 = %d\n",
+ bi->dis.deci_factor_log2);
+ ia_css_debug_dtrace(2, "dis hor coef num = %d\n",
+ bi->dis.coef.pad.width);
+ ia_css_debug_dtrace(2, "dis ver coef num = %d\n",
+ bi->dis.coef.pad.height);
+ ia_css_debug_dtrace(2, "dis hor proj num = %d\n",
+ bi->dis.proj.pad.height);
+ ia_css_debug_dtrace(2, "sctbl_width_per_color = %d\n",
+ bi->sctbl_width_per_color);
+ ia_css_debug_dtrace(2, "s3atbl_width = %d\n", bi->s3atbl_width);
+ ia_css_debug_dtrace(2, "s3atbl_height = %d\n", bi->s3atbl_height);
+ return;
+}
+
+void ia_css_debug_frame_print(const struct ia_css_frame *frame,
+ const char *descr)
+{
+ char *data = NULL;
+
+ assert(frame);
+ assert(descr);
+
+ data = (char *)HOST_ADDRESS(frame->data);
+ ia_css_debug_dtrace(2, "frame %s (%p):\n", descr, frame);
+ ia_css_debug_dtrace(2, " resolution = %dx%d\n",
+ frame->frame_info.res.width, frame->frame_info.res.height);
+ ia_css_debug_dtrace(2, " padded width = %d\n",
+ frame->frame_info.padded_width);
+ ia_css_debug_dtrace(2, " format = %d\n", frame->frame_info.format);
+ switch (frame->frame_info.format) {
+ case IA_CSS_FRAME_FORMAT_NV12:
+ case IA_CSS_FRAME_FORMAT_NV16:
+ case IA_CSS_FRAME_FORMAT_NV21:
+ case IA_CSS_FRAME_FORMAT_NV61:
+ ia_css_debug_dtrace(2, " Y = %p\n",
+ data + frame->planes.nv.y.offset);
+ ia_css_debug_dtrace(2, " UV = %p\n",
+ data + frame->planes.nv.uv.offset);
+ break;
+ case IA_CSS_FRAME_FORMAT_YUYV:
+ case IA_CSS_FRAME_FORMAT_UYVY:
+ case IA_CSS_FRAME_FORMAT_CSI_MIPI_YUV420_8:
+ case IA_CSS_FRAME_FORMAT_CSI_MIPI_LEGACY_YUV420_8:
+ case IA_CSS_FRAME_FORMAT_YUV_LINE:
+ ia_css_debug_dtrace(2, " YUYV = %p\n",
+ data + frame->planes.yuyv.offset);
+ break;
+ case IA_CSS_FRAME_FORMAT_YUV420:
+ case IA_CSS_FRAME_FORMAT_YUV422:
+ case IA_CSS_FRAME_FORMAT_YUV444:
+ case IA_CSS_FRAME_FORMAT_YV12:
+ case IA_CSS_FRAME_FORMAT_YV16:
+ case IA_CSS_FRAME_FORMAT_YUV420_16:
+ case IA_CSS_FRAME_FORMAT_YUV422_16:
+ ia_css_debug_dtrace(2, " Y = %p\n",
+ data + frame->planes.yuv.y.offset);
+ ia_css_debug_dtrace(2, " U = %p\n",
+ data + frame->planes.yuv.u.offset);
+ ia_css_debug_dtrace(2, " V = %p\n",
+ data + frame->planes.yuv.v.offset);
+ break;
+ case IA_CSS_FRAME_FORMAT_RAW_PACKED:
+ ia_css_debug_dtrace(2, " RAW PACKED = %p\n",
+ data + frame->planes.raw.offset);
+ break;
+ case IA_CSS_FRAME_FORMAT_RAW:
+ ia_css_debug_dtrace(2, " RAW = %p\n",
+ data + frame->planes.raw.offset);
+ break;
+ case IA_CSS_FRAME_FORMAT_RGBA888:
+ case IA_CSS_FRAME_FORMAT_RGB565:
+ ia_css_debug_dtrace(2, " RGB = %p\n",
+ data + frame->planes.rgb.offset);
+ break;
+ case IA_CSS_FRAME_FORMAT_QPLANE6:
+ ia_css_debug_dtrace(2, " R = %p\n",
+ data + frame->planes.plane6.r.offset);
+ ia_css_debug_dtrace(2, " RatB = %p\n",
+ data + frame->planes.plane6.r_at_b.offset);
+ ia_css_debug_dtrace(2, " Gr = %p\n",
+ data + frame->planes.plane6.gr.offset);
+ ia_css_debug_dtrace(2, " Gb = %p\n",
+ data + frame->planes.plane6.gb.offset);
+ ia_css_debug_dtrace(2, " B = %p\n",
+ data + frame->planes.plane6.b.offset);
+ ia_css_debug_dtrace(2, " BatR = %p\n",
+ data + frame->planes.plane6.b_at_r.offset);
+ break;
+ case IA_CSS_FRAME_FORMAT_BINARY_8:
+ ia_css_debug_dtrace(2, " Binary data = %p\n",
+ data + frame->planes.binary.data.offset);
+ break;
+ default:
+ ia_css_debug_dtrace(2, " unknown frame type\n");
+ break;
+ }
+ return;
+}
+
+#if SP_DEBUG != SP_DEBUG_NONE
+
+void ia_css_debug_print_sp_debug_state(const struct sh_css_sp_debug_state
+ *state)
+{
+#endif
+
+#if SP_DEBUG == SP_DEBUG_DUMP
+
+ assert(state);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE,
+ "current SP software counter: %d\n",
+ state->debug[0]);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE,
+ "empty output buffer queue head: 0x%x\n",
+ state->debug[1]);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE,
+ "empty output buffer queue tail: 0x%x\n",
+ state->debug[2]);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE,
+ "empty s3a buffer queue head: 0x%x\n",
+ state->debug[3]);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE,
+ "empty s3a buffer queue tail: 0x%x\n",
+ state->debug[4]);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE,
+ "full output buffer queue head: 0x%x\n",
+ state->debug[5]);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE,
+ "full output buffer queue tail: 0x%x\n",
+ state->debug[6]);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE,
+ "full s3a buffer queue head: 0x%x\n",
+ state->debug[7]);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE,
+ "full s3a buffer queue tail: 0x%x\n",
+ state->debug[8]);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE, "event queue head: 0x%x\n",
+ state->debug[9]);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE, "event queue tail: 0x%x\n",
+ state->debug[10]);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE,
+ "num of stages of current pipeline: 0x%x\n",
+ state->debug[11]);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE, "DDR address of stage 1: 0x%x\n",
+ state->debug[12]);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE, "DDR address of stage 2: 0x%x\n",
+ state->debug[13]);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE,
+ "current stage out_vf buffer idx: 0x%x\n",
+ state->debug[14]);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE,
+ "current stage output buffer idx: 0x%x\n",
+ state->debug[15]);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE,
+ "current stage s3a buffer idx: 0x%x\n",
+ state->debug[16]);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE,
+ "first char of current stage name: 0x%x\n",
+ state->debug[17]);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE, "current SP thread id: 0x%x\n",
+ state->debug[18]);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE,
+ "empty output buffer address 1: 0x%x\n",
+ state->debug[19]);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE,
+ "empty output buffer address 2: 0x%x\n",
+ state->debug[20]);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE,
+ "empty out_vf buffer address 1: 0x%x\n",
+ state->debug[21]);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE,
+ "empty out_vf buffer address 2: 0x%x\n",
+ state->debug[22]);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE,
+ "empty s3a_hi buffer address 1: 0x%x\n",
+ state->debug[23]);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE,
+ "empty s3a_hi buffer address 2: 0x%x\n",
+ state->debug[24]);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE,
+ "empty s3a_lo buffer address 1: 0x%x\n",
+ state->debug[25]);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE,
+ "empty s3a_lo buffer address 2: 0x%x\n",
+ state->debug[26]);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE,
+ "empty dis_hor buffer address 1: 0x%x\n",
+ state->debug[27]);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE,
+ "empty dis_hor buffer address 2: 0x%x\n",
+ state->debug[28]);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE,
+ "empty dis_ver buffer address 1: 0x%x\n",
+ state->debug[29]);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE,
+ "empty dis_ver buffer address 2: 0x%x\n",
+ state->debug[30]);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE,
+ "empty param buffer address: 0x%x\n",
+ state->debug[31]);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE,
+ "first incorrect frame address: 0x%x\n",
+ state->debug[32]);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE,
+ "first incorrect frame container address: 0x%x\n",
+ state->debug[33]);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE,
+ "first incorrect frame container payload: 0x%x\n",
+ state->debug[34]);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE,
+ "first incorrect s3a_hi address: 0x%x\n",
+ state->debug[35]);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE,
+ "first incorrect s3a_hi container address: 0x%x\n",
+ state->debug[36]);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE,
+ "first incorrect s3a_hi container payload: 0x%x\n",
+ state->debug[37]);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE,
+ "first incorrect s3a_lo address: 0x%x\n",
+ state->debug[38]);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE,
+ "first incorrect s3a_lo container address: 0x%x\n",
+ state->debug[39]);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE,
+ "first incorrect s3a_lo container payload: 0x%x\n",
+ state->debug[40]);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE,
+ "number of calling flash start function: 0x%x\n",
+ state->debug[41]);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE,
+ "number of calling flash close function: 0x%x\n",
+ state->debug[42]);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE, "number of flashed frame: 0x%x\n",
+ state->debug[43]);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE, "flash in use flag: 0x%x\n",
+ state->debug[44]);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE,
+ "number of update frame flashed flag: 0x%x\n",
+ state->debug[46]);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE,
+ "number of active threads: 0x%x\n",
+ state->debug[45]);
+
+#elif SP_DEBUG == SP_DEBUG_COPY
+
+ /* Remember last_index because we only want to print new entries */
+ static int last_index;
+ int sp_index = state->index;
+ int n;
+
+ assert(state);
+ if (sp_index < last_index) {
+ /* SP has been reset */
+ last_index = 0;
+ }
+
+ if (last_index == 0) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE,
+ "copy-trace init: sp_dbg_if_start_line=%d, sp_dbg_if_start_column=%d, sp_dbg_if_cropped_height=%d, sp_debg_if_cropped_width=%d\n",
+ state->if_start_line,
+ state->if_start_column,
+ state->if_cropped_height,
+ state->if_cropped_width);
+ }
+
+ if ((last_index + SH_CSS_SP_DBG_TRACE_DEPTH) < sp_index) {
+ /* last index can be multiple rounds behind */
+ /* while trace size is only SH_CSS_SP_DBG_TRACE_DEPTH */
+ last_index = sp_index - SH_CSS_SP_DBG_TRACE_DEPTH;
+ }
+
+ for (n = last_index; n < sp_index; n++) {
+ int i = n % SH_CSS_SP_DBG_TRACE_DEPTH;
+
+ if (state->trace[i].frame != 0) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE,
+ "copy-trace: frame=%d, line=%d, pixel_distance=%d, mipi_used_dword=%d, sp_index=%d\n",
+ state->trace[i].frame,
+ state->trace[i].line,
+ state->trace[i].pixel_distance,
+ state->trace[i].mipi_used_dword,
+ state->trace[i].sp_index);
+ }
+ }
+
+ last_index = sp_index;
+
+#elif SP_DEBUG == SP_DEBUG_TRACE
+
+ /*
+ * This is just an example how TRACE_FILE_ID (see ia_css_debug.sp.h) will
+ * me mapped on the file name string.
+ *
+ * Adjust this to your trace case!
+ */
+ static char const *const id2filename[8] = {
+ "param_buffer.sp.c | tagger.sp.c | pipe_data.sp.c",
+ "isp_init.sp.c",
+ "sp_raw_copy.hive.c",
+ "dma_configure.sp.c",
+ "sp.hive.c",
+ "event_proxy_sp.hive.c",
+ "circular_buffer.sp.c",
+ "frame_buffer.sp.c"
+ };
+
+ /* Example SH_CSS_SP_DBG_NR_OF_TRACES==1 */
+ /* Adjust this to your trace case */
+ static char const *trace_name[SH_CSS_SP_DBG_NR_OF_TRACES] = {
+ "default"
+ };
+
+ /* Remember host_index_last because we only want to print new entries */
+ static int host_index_last[SH_CSS_SP_DBG_NR_OF_TRACES] = { 0 };
+ int t, n;
+
+ assert(state);
+
+ for (t = 0; t < SH_CSS_SP_DBG_NR_OF_TRACES; t++) {
+ int sp_index_last = state->index_last[t];
+
+ if (sp_index_last < host_index_last[t]) {
+ /* SP has been reset */
+ host_index_last[t] = 0;
+ }
+
+ if ((host_index_last[t] + SH_CSS_SP_DBG_TRACE_DEPTH) <
+ sp_index_last) {
+ /* last index can be multiple rounds behind */
+ /* while trace size is only SH_CSS_SP_DBG_TRACE_DEPTH */
+ ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE,
+ "Warning: trace %s has gap of %d traces\n",
+ trace_name[t],
+ (sp_index_last -
+ (host_index_last[t] +
+ SH_CSS_SP_DBG_TRACE_DEPTH)));
+
+ host_index_last[t] =
+ sp_index_last - SH_CSS_SP_DBG_TRACE_DEPTH;
+ }
+
+ for (n = host_index_last[t]; n < sp_index_last; n++) {
+ int i = n % SH_CSS_SP_DBG_TRACE_DEPTH;
+ int l = state->trace[t][i].location &
+ ((1 << SH_CSS_SP_DBG_TRACE_FILE_ID_BIT_POS) - 1);
+ int fid = state->trace[t][i].location >>
+ SH_CSS_SP_DBG_TRACE_FILE_ID_BIT_POS;
+ int ts = state->trace[t][i].time_stamp;
+
+ if (ts) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE,
+ "%05d trace=%s, file=%s:%d, data=0x%08x\n",
+ ts,
+ trace_name[t],
+ id2filename[fid], l,
+ state->trace[t][i].data);
+ }
+ }
+ host_index_last[t] = sp_index_last;
+ }
+
+#elif SP_DEBUG == SP_DEBUG_MINIMAL
+ int i;
+ int base = 0;
+ int limit = SH_CSS_NUM_SP_DEBUG;
+ int step = 1;
+
+ assert(state);
+
+ for (i = base; i < limit; i += step) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE,
+ "sp_dbg_trace[%d] = %d\n",
+ i, state->debug[i]);
+ }
+#endif
+
+#if SP_DEBUG != SP_DEBUG_NONE
+
+ return;
+}
+#endif
+
+void ia_css_debug_dump_sp_sw_debug_info(void)
+{
+#if SP_DEBUG != SP_DEBUG_NONE
+ struct sh_css_sp_debug_state state;
+
+ sh_css_sp_get_debug_state(&state);
+ ia_css_debug_print_sp_debug_state(&state);
+#endif
+ ia_css_bufq_dump_queue_info();
+ ia_css_pipeline_dump_thread_map_info();
+ return;
+}
+
+/* this function is for debug use, it can make SP go to sleep
+ state after each frame, then user can dump the stable SP dmem.
+ this function can be called after ia_css_start_sp()
+ and before sh_css_init_buffer_queues()
+*/
+void ia_css_debug_enable_sp_sleep_mode(enum ia_css_sp_sleep_mode mode)
+{
+ const struct ia_css_fw_info *fw;
+ unsigned int HIVE_ADDR_sp_sleep_mode;
+
+ fw = &sh_css_sp_fw;
+ HIVE_ADDR_sp_sleep_mode = fw->info.sp.sleep_mode;
+
+ (void)HIVE_ADDR_sp_sleep_mode; /* Suppress warnings in CRUN */
+
+ sp_dmem_store_uint32(SP0_ID,
+ (unsigned int)sp_address_of(sp_sleep_mode),
+ (uint32_t)mode);
+}
+
+void ia_css_debug_wake_up_sp(void)
+{
+ /*hrt_ctl_start(SP); */
+ sp_ctrl_setbit(SP0_ID, SP_SC_REG, SP_START_BIT);
+}
+
+#define FIND_DMEM_PARAMS_TYPE(stream, kernel, type) \
+ (struct CONCATENATE(CONCATENATE(sh_css_isp_, type), _params) *) \
+ findf_dmem_params(stream, offsetof(struct ia_css_memory_offsets, dmem.kernel))
+
+#define FIND_DMEM_PARAMS(stream, kernel) FIND_DMEM_PARAMS_TYPE(stream, kernel, kernel)
+
+/* Find a stage that support the kernel and return the parameters for that kernel */
+static char *
+findf_dmem_params(struct ia_css_stream *stream, short idx)
+{
+ int i;
+
+ for (i = 0; i < stream->num_pipes; i++) {
+ struct ia_css_pipe *pipe = stream->pipes[i];
+ struct ia_css_pipeline *pipeline = ia_css_pipe_get_pipeline(pipe);
+ struct ia_css_pipeline_stage *stage;
+
+ for (stage = pipeline->stages; stage; stage = stage->next) {
+ struct ia_css_binary *binary = stage->binary;
+ short *offsets = (short *)&binary->info->mem_offsets.offsets.param->dmem;
+ short dmem_offset = offsets[idx];
+ const struct ia_css_host_data *isp_data =
+ ia_css_isp_param_get_mem_init(&binary->mem_params,
+ IA_CSS_PARAM_CLASS_PARAM, IA_CSS_ISP_DMEM0);
+ if (dmem_offset < 0)
+ continue;
+ return &isp_data->address[dmem_offset];
+ }
+ }
+ return NULL;
+}
+
+void ia_css_debug_dump_isp_params(struct ia_css_stream *stream,
+ unsigned int enable)
+{
+ ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE, "ISP PARAMETERS:\n");
+
+ assert(stream);
+ if ((enable & IA_CSS_DEBUG_DUMP_FPN)
+ || (enable & IA_CSS_DEBUG_DUMP_ALL)) {
+ ia_css_fpn_dump(FIND_DMEM_PARAMS(stream, fpn), IA_CSS_DEBUG_VERBOSE);
+ }
+ if ((enable & IA_CSS_DEBUG_DUMP_OB)
+ || (enable & IA_CSS_DEBUG_DUMP_ALL)) {
+ ia_css_ob_dump(FIND_DMEM_PARAMS(stream, ob), IA_CSS_DEBUG_VERBOSE);
+ }
+ if ((enable & IA_CSS_DEBUG_DUMP_SC)
+ || (enable & IA_CSS_DEBUG_DUMP_ALL)) {
+ ia_css_sc_dump(FIND_DMEM_PARAMS(stream, sc), IA_CSS_DEBUG_VERBOSE);
+ }
+ if ((enable & IA_CSS_DEBUG_DUMP_WB)
+ || (enable & IA_CSS_DEBUG_DUMP_ALL)) {
+ ia_css_wb_dump(FIND_DMEM_PARAMS(stream, wb), IA_CSS_DEBUG_VERBOSE);
+ }
+ if ((enable & IA_CSS_DEBUG_DUMP_DP)
+ || (enable & IA_CSS_DEBUG_DUMP_ALL)) {
+ ia_css_dp_dump(FIND_DMEM_PARAMS(stream, dp), IA_CSS_DEBUG_VERBOSE);
+ }
+ if ((enable & IA_CSS_DEBUG_DUMP_BNR)
+ || (enable & IA_CSS_DEBUG_DUMP_ALL)) {
+ ia_css_bnr_dump(FIND_DMEM_PARAMS(stream, bnr), IA_CSS_DEBUG_VERBOSE);
+ }
+ if ((enable & IA_CSS_DEBUG_DUMP_S3A)
+ || (enable & IA_CSS_DEBUG_DUMP_ALL)) {
+ ia_css_s3a_dump(FIND_DMEM_PARAMS(stream, s3a), IA_CSS_DEBUG_VERBOSE);
+ }
+ if ((enable & IA_CSS_DEBUG_DUMP_DE)
+ || (enable & IA_CSS_DEBUG_DUMP_ALL)) {
+ ia_css_de_dump(FIND_DMEM_PARAMS(stream, de), IA_CSS_DEBUG_VERBOSE);
+ }
+ if ((enable & IA_CSS_DEBUG_DUMP_YNR)
+ || (enable & IA_CSS_DEBUG_DUMP_ALL)) {
+ ia_css_nr_dump(FIND_DMEM_PARAMS_TYPE(stream, nr, ynr), IA_CSS_DEBUG_VERBOSE);
+ ia_css_yee_dump(FIND_DMEM_PARAMS(stream, yee), IA_CSS_DEBUG_VERBOSE);
+ }
+ if ((enable & IA_CSS_DEBUG_DUMP_CSC)
+ || (enable & IA_CSS_DEBUG_DUMP_ALL)) {
+ ia_css_csc_dump(FIND_DMEM_PARAMS(stream, csc), IA_CSS_DEBUG_VERBOSE);
+ ia_css_yuv2rgb_dump(FIND_DMEM_PARAMS_TYPE(stream, yuv2rgb, csc),
+ IA_CSS_DEBUG_VERBOSE);
+ ia_css_rgb2yuv_dump(FIND_DMEM_PARAMS_TYPE(stream, rgb2yuv, csc),
+ IA_CSS_DEBUG_VERBOSE);
+ }
+ if ((enable & IA_CSS_DEBUG_DUMP_GC)
+ || (enable & IA_CSS_DEBUG_DUMP_ALL)) {
+ ia_css_gc_dump(FIND_DMEM_PARAMS(stream, gc), IA_CSS_DEBUG_VERBOSE);
+ }
+ if ((enable & IA_CSS_DEBUG_DUMP_TNR)
+ || (enable & IA_CSS_DEBUG_DUMP_ALL)) {
+ ia_css_tnr_dump(FIND_DMEM_PARAMS(stream, tnr), IA_CSS_DEBUG_VERBOSE);
+ }
+ if ((enable & IA_CSS_DEBUG_DUMP_ANR)
+ || (enable & IA_CSS_DEBUG_DUMP_ALL)) {
+ ia_css_anr_dump(FIND_DMEM_PARAMS(stream, anr), IA_CSS_DEBUG_VERBOSE);
+ }
+ if ((enable & IA_CSS_DEBUG_DUMP_CE)
+ || (enable & IA_CSS_DEBUG_DUMP_ALL)) {
+ ia_css_ce_dump(FIND_DMEM_PARAMS(stream, ce), IA_CSS_DEBUG_VERBOSE);
+ }
+}
+
+void sh_css_dump_sp_raw_copy_linecount(bool reduced)
+{
+ const struct ia_css_fw_info *fw;
+ unsigned int HIVE_ADDR_raw_copy_line_count;
+ s32 raw_copy_line_count;
+ static s32 prev_raw_copy_line_count = -1;
+
+ fw = &sh_css_sp_fw;
+ HIVE_ADDR_raw_copy_line_count =
+ fw->info.sp.raw_copy_line_count;
+
+ (void)HIVE_ADDR_raw_copy_line_count;
+
+ sp_dmem_load(SP0_ID,
+ (unsigned int)sp_address_of(raw_copy_line_count),
+ &raw_copy_line_count,
+ sizeof(raw_copy_line_count));
+
+ /* only indicate if copy loop is active */
+ if (reduced)
+ raw_copy_line_count = (raw_copy_line_count < 0) ? raw_copy_line_count : 1;
+ /* do the handling */
+ if (prev_raw_copy_line_count != raw_copy_line_count) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE,
+ "sh_css_dump_sp_raw_copy_linecount() line_count=%d\n",
+ raw_copy_line_count);
+ prev_raw_copy_line_count = raw_copy_line_count;
+ }
+}
+
+void ia_css_debug_dump_isp_binary(void)
+{
+ const struct ia_css_fw_info *fw;
+ unsigned int HIVE_ADDR_pipeline_sp_curr_binary_id;
+ u32 curr_binary_id;
+ static u32 prev_binary_id = 0xFFFFFFFF;
+ static u32 sample_count;
+
+ fw = &sh_css_sp_fw;
+ HIVE_ADDR_pipeline_sp_curr_binary_id = fw->info.sp.curr_binary_id;
+
+ (void)HIVE_ADDR_pipeline_sp_curr_binary_id;
+
+ sp_dmem_load(SP0_ID,
+ (unsigned int)sp_address_of(pipeline_sp_curr_binary_id),
+ &curr_binary_id,
+ sizeof(curr_binary_id));
+
+ /* do the handling */
+ sample_count++;
+ if (prev_binary_id != curr_binary_id) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE,
+ "sh_css_dump_isp_binary() pipe_id=%d, binary_id=%d, sample_count=%d\n",
+ (curr_binary_id >> 16),
+ (curr_binary_id & 0x0ffff),
+ sample_count);
+ sample_count = 0;
+ prev_binary_id = curr_binary_id;
+ }
+}
+
+/*
+ * @brief Initialize the debug mode.
+ * Refer to "ia_css_debug.h" for more details.
+ */
+bool ia_css_debug_mode_init(void)
+{
+ bool rc;
+
+ rc = sh_css_sp_init_dma_sw_reg(0);
+ return rc;
+}
+
+/*
+ * @brief Disable the DMA channel.
+ * Refer to "ia_css_debug.h" for more details.
+ */
+bool
+ia_css_debug_mode_disable_dma_channel(int dma_id,
+ int channel_id, int request_type)
+{
+ bool rc;
+
+ rc = sh_css_sp_set_dma_sw_reg(dma_id, channel_id, request_type, false);
+
+ return rc;
+}
+
+/*
+ * @brief Enable the DMA channel.
+ * Refer to "ia_css_debug.h" for more details.
+ */
+bool
+ia_css_debug_mode_enable_dma_channel(int dma_id,
+ int channel_id, int request_type)
+{
+ bool rc;
+
+ rc = sh_css_sp_set_dma_sw_reg(dma_id, channel_id, request_type, true);
+
+ return rc;
+}
+
+static void __printf(1, 2) dtrace_dot(const char *fmt, ...)
+{
+ va_list ap;
+
+ assert(fmt);
+ va_start(ap, fmt);
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_INFO, "%s", DPG_START);
+ ia_css_debug_vdtrace(IA_CSS_DEBUG_INFO, fmt, ap);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_INFO, "%s", DPG_END);
+ va_end(ap);
+}
+
+static void
+ia_css_debug_pipe_graph_dump_frame(
+ const struct ia_css_frame *frame,
+ enum ia_css_pipe_id id,
+ char const *blob_name,
+ char const *frame_name,
+ bool in_frame)
+{
+ char bufinfo[100];
+
+ if (frame->dynamic_queue_id == SH_CSS_INVALID_QUEUE_ID) {
+ snprintf(bufinfo, sizeof(bufinfo), "Internal");
+ } else {
+ snprintf(bufinfo, sizeof(bufinfo), "Queue: %s %s",
+ pipe_id_to_str[id],
+ queue_id_to_str[frame->dynamic_queue_id]);
+ }
+ dtrace_dot(
+ "node [shape = box, fixedsize=true, width=2, height=0.7]; \"%p\" [label = \"%s\\n%d(%d) x %d, %dbpp\\n%s\"];",
+ frame,
+ debug_frame_format2str(frame->frame_info.format),
+ frame->frame_info.res.width,
+ frame->frame_info.padded_width,
+ frame->frame_info.res.height,
+ frame->frame_info.raw_bit_depth,
+ bufinfo);
+
+ if (in_frame) {
+ dtrace_dot(
+ "\"%p\"->\"%s(pipe%d)\" [label = %s_frame];",
+ frame,
+ blob_name, id, frame_name);
+ } else {
+ dtrace_dot(
+ "\"%s(pipe%d)\"->\"%p\" [label = %s_frame];",
+ blob_name, id,
+ frame,
+ frame_name);
+ }
+}
+
+void
+ia_css_debug_pipe_graph_dump_prologue(void)
+{
+ dtrace_dot("digraph sh_css_pipe_graph {");
+ dtrace_dot("rankdir=LR;");
+
+ dtrace_dot("fontsize=9;");
+ dtrace_dot("label = \"\\nEnable options: rp=reduced pipe, vfve=vf_veceven, dvse=dvs_envelope, dvs6=dvs_6axis, bo=block_out, fbds=fixed_bayer_ds, bf6=bayer_fir_6db, rawb=raw_binning, cont=continuous, disc=dis_crop\\n"
+ "dp2a=dp_2adjacent, outp=output, outt=out_table, reff=ref_frame, par=params, gam=gamma, cagdc=ca_gdc, ispa=isp_addresses, inf=in_frame, outf=out_frame, hs=high_speed, inpc=input_chunking\"");
+}
+
+void ia_css_debug_pipe_graph_dump_epilogue(void)
+{
+ if (strlen(ring_buffer) > 0) {
+ dtrace_dot(ring_buffer);
+ }
+
+ if (pg_inst.stream_format != N_ATOMISP_INPUT_FORMAT) {
+ /* An input stream format has been set so assume we have
+ * an input system and sensor
+ */
+
+ dtrace_dot(
+ "node [shape = doublecircle, fixedsize=true, width=2.5]; \"input_system\" [label = \"Input system\"];");
+
+ dtrace_dot(
+ "\"input_system\"->\"%s\" [label = \"%s\"];",
+ dot_id_input_bin, debug_stream_format2str(pg_inst.stream_format));
+
+ dtrace_dot(
+ "node [shape = doublecircle, fixedsize=true, width=2.5]; \"sensor\" [label = \"Sensor\"];");
+
+ dtrace_dot(
+ "\"sensor\"->\"input_system\" [label = \"%s\\n%d x %d\\n(%d x %d)\"];",
+ debug_stream_format2str(pg_inst.stream_format),
+ pg_inst.width, pg_inst.height,
+ pg_inst.eff_width, pg_inst.eff_height);
+ }
+
+ dtrace_dot("}");
+
+ /* Reset temp strings */
+ memset(dot_id_input_bin, 0, sizeof(dot_id_input_bin));
+ memset(ring_buffer, 0, sizeof(ring_buffer));
+
+ pg_inst.do_init = true;
+ pg_inst.width = 0;
+ pg_inst.height = 0;
+ pg_inst.eff_width = 0;
+ pg_inst.eff_height = 0;
+ pg_inst.stream_format = N_ATOMISP_INPUT_FORMAT;
+}
+
+void
+ia_css_debug_pipe_graph_dump_stage(
+ struct ia_css_pipeline_stage *stage,
+ enum ia_css_pipe_id id)
+{
+ char blob_name[SH_CSS_MAX_BINARY_NAME + 10] = "<unknown type>";
+ char const *bin_type = "<unknown type>";
+ int i;
+
+ assert(stage);
+ if (stage->sp_func != IA_CSS_PIPELINE_NO_FUNC)
+ return;
+
+ if (pg_inst.do_init) {
+ ia_css_debug_pipe_graph_dump_prologue();
+ pg_inst.do_init = false;
+ }
+
+ if (stage->binary) {
+ bin_type = "binary";
+ if (stage->binary->info->blob)
+ snprintf(blob_name, sizeof(blob_name), "%s_stage%d",
+ stage->binary->info->blob->name, stage->stage_num);
+ } else if (stage->firmware) {
+ bin_type = "firmware";
+
+ strscpy(blob_name, IA_CSS_EXT_ISP_PROG_NAME(stage->firmware),
+ sizeof(blob_name));
+ }
+
+ /* Guard in case of binaries that don't have any binary_info */
+ if (stage->binary_info) {
+ char enable_info1[100];
+ char enable_info2[100];
+ char enable_info3[100];
+ char enable_info[302];
+ struct ia_css_binary_info *bi = stage->binary_info;
+
+ /* Split it in 2 function-calls to keep the amount of
+ * parameters per call "reasonable"
+ */
+ snprintf(enable_info1, sizeof(enable_info1),
+ "%s%s%s%s%s%s%s%s%s%s%s%s%s%s",
+ bi->enable.reduced_pipe ? "rp," : "",
+ bi->enable.vf_veceven ? "vfve," : "",
+ bi->enable.dis ? "dis," : "",
+ bi->enable.dvs_envelope ? "dvse," : "",
+ bi->enable.uds ? "uds," : "",
+ bi->enable.dvs_6axis ? "dvs6," : "",
+ bi->enable.block_output ? "bo," : "",
+ bi->enable.ds ? "ds," : "",
+ bi->enable.bayer_fir_6db ? "bf6," : "",
+ bi->enable.raw_binning ? "rawb," : "",
+ bi->enable.continuous ? "cont," : "",
+ bi->enable.s3a ? "s3a," : "",
+ bi->enable.fpnr ? "fpnr," : "",
+ bi->enable.sc ? "sc," : ""
+ );
+
+ snprintf(enable_info2, sizeof(enable_info2),
+ "%s%s%s%s%s%s%s%s%s%s%s",
+ bi->enable.macc ? "macc," : "",
+ bi->enable.output ? "outp," : "",
+ bi->enable.ref_frame ? "reff," : "",
+ bi->enable.tnr ? "tnr," : "",
+ bi->enable.xnr ? "xnr," : "",
+ bi->enable.params ? "par," : "",
+ bi->enable.ca_gdc ? "cagdc," : "",
+ bi->enable.isp_addresses ? "ispa," : "",
+ bi->enable.in_frame ? "inf," : "",
+ bi->enable.out_frame ? "outf," : "",
+ bi->enable.high_speed ? "hs," : ""
+ );
+
+ /* And merge them into one string */
+ snprintf(enable_info, sizeof(enable_info), "%s%s",
+ enable_info1, enable_info2);
+ {
+ int l, p;
+ char *ei = enable_info;
+
+ l = strlen(ei);
+
+ /* Replace last ',' with \0 if present */
+ if (l && enable_info[l - 1] == ',')
+ enable_info[--l] = '\0';
+
+ if (l > ENABLE_LINE_MAX_LENGTH) {
+ /* Too big for one line, find last comma */
+ p = ENABLE_LINE_MAX_LENGTH;
+ while (ei[p] != ',')
+ p--;
+ /* Last comma found, copy till that comma */
+ strscpy(enable_info1, ei,
+ p > sizeof(enable_info1) ? sizeof(enable_info1) : p);
+
+ ei += p + 1;
+ l = strlen(ei);
+
+ if (l <= ENABLE_LINE_MAX_LENGTH) {
+ /* The 2nd line fits */
+ /* we cannot use ei as argument because
+ * it is not guaranteed dword aligned
+ */
+
+ strscpy(enable_info2, ei,
+ l > sizeof(enable_info2) ? sizeof(enable_info2) : l);
+
+ snprintf(enable_info, sizeof(enable_info), "%s\\n%s",
+ enable_info1, enable_info2);
+
+ } else {
+ /* 2nd line is still too long */
+ p = ENABLE_LINE_MAX_LENGTH;
+ while (ei[p] != ',')
+ p--;
+
+ strscpy(enable_info2, ei,
+ p > sizeof(enable_info2) ? sizeof(enable_info2) : p);
+
+ ei += p + 1;
+ l = strlen(ei);
+
+ if (l <= ENABLE_LINE_MAX_LENGTH) {
+ /* The 3rd line fits */
+ /* we cannot use ei as argument because
+ * it is not guaranteed dword aligned
+ */
+ strscpy(enable_info3, ei,
+ sizeof(enable_info3));
+ snprintf(enable_info, sizeof(enable_info),
+ "%s\\n%s\\n%s",
+ enable_info1, enable_info2,
+ enable_info3);
+ } else {
+ /* 3rd line is still too long */
+ p = ENABLE_LINE_MAX_LENGTH;
+ while (ei[p] != ',')
+ p--;
+ strscpy(enable_info3, ei,
+ p > sizeof(enable_info3) ? sizeof(enable_info3) : p);
+ ei += p + 1;
+ strscpy(enable_info3, ei,
+ sizeof(enable_info3));
+ snprintf(enable_info, sizeof(enable_info),
+ "%s\\n%s\\n%s",
+ enable_info1, enable_info2,
+ enable_info3);
+ }
+ }
+ }
+ }
+
+ dtrace_dot("node [shape = circle, fixedsize=true, width=2.5, label=\"%s\\n%s\\n\\n%s\"]; \"%s(pipe%d)\"",
+ bin_type, blob_name, enable_info, blob_name, id);
+ } else {
+ dtrace_dot("node [shape = circle, fixedsize=true, width=2.5, label=\"%s\\n%s\\n\"]; \"%s(pipe%d)\"",
+ bin_type, blob_name, blob_name, id);
+ }
+
+ if (stage->stage_num == 0) {
+ /*
+ * There are some implicit assumptions about which bin is the
+ * input binary e.g. which one is connected to the input system
+ * Priority:
+ * 1) sp_raw_copy bin has highest priority
+ * 2) First stage==0 binary of preview, video or capture
+ */
+ if (strlen(dot_id_input_bin) == 0) {
+ snprintf(dot_id_input_bin, sizeof(dot_id_input_bin),
+ "%s(pipe%d)", blob_name, id);
+ }
+ }
+
+ if (stage->args.in_frame) {
+ ia_css_debug_pipe_graph_dump_frame(
+ stage->args.in_frame, id, blob_name,
+ "in", true);
+ }
+
+ for (i = 0; i < NUM_VIDEO_TNR_FRAMES; i++) {
+ if (stage->args.tnr_frames[i]) {
+ ia_css_debug_pipe_graph_dump_frame(
+ stage->args.tnr_frames[i], id,
+ blob_name, "tnr_frame", true);
+ }
+ }
+
+ for (i = 0; i < MAX_NUM_VIDEO_DELAY_FRAMES; i++) {
+ if (stage->args.delay_frames[i]) {
+ ia_css_debug_pipe_graph_dump_frame(
+ stage->args.delay_frames[i], id,
+ blob_name, "delay_frame", true);
+ }
+ }
+
+ for (i = 0; i < IA_CSS_BINARY_MAX_OUTPUT_PORTS; i++) {
+ if (stage->args.out_frame[i]) {
+ ia_css_debug_pipe_graph_dump_frame(
+ stage->args.out_frame[i], id, blob_name,
+ "out", false);
+ }
+ }
+
+ if (stage->args.out_vf_frame) {
+ ia_css_debug_pipe_graph_dump_frame(
+ stage->args.out_vf_frame, id, blob_name,
+ "out_vf", false);
+ }
+}
+
+void
+ia_css_debug_pipe_graph_dump_sp_raw_copy(
+ struct ia_css_frame *out_frame)
+{
+ assert(out_frame);
+ if (pg_inst.do_init) {
+ ia_css_debug_pipe_graph_dump_prologue();
+ pg_inst.do_init = false;
+ }
+
+ dtrace_dot("node [shape = circle, fixedsize=true, width=2.5, label=\"%s\\n%s\"]; \"%s(pipe%d)\"",
+ "sp-binary", "sp_raw_copy", "sp_raw_copy", 1);
+
+ snprintf(ring_buffer, sizeof(ring_buffer),
+ "node [shape = box, fixedsize=true, width=2, height=0.7]; \"%p\" [label = \"%s\\n%d(%d) x %d\\nRingbuffer\"];",
+ out_frame,
+ debug_frame_format2str(out_frame->frame_info.format),
+ out_frame->frame_info.res.width,
+ out_frame->frame_info.padded_width,
+ out_frame->frame_info.res.height);
+
+ dtrace_dot(ring_buffer);
+
+ dtrace_dot(
+ "\"%s(pipe%d)\"->\"%p\" [label = out_frame];",
+ "sp_raw_copy", 1, out_frame);
+
+ snprintf(dot_id_input_bin, sizeof(dot_id_input_bin), "%s(pipe%d)",
+ "sp_raw_copy", 1);
+}
+
+void
+ia_css_debug_pipe_graph_dump_stream_config(
+ const struct ia_css_stream_config *stream_config)
+{
+ pg_inst.width = stream_config->input_config.input_res.width;
+ pg_inst.height = stream_config->input_config.input_res.height;
+ pg_inst.eff_width = stream_config->input_config.effective_res.width;
+ pg_inst.eff_height = stream_config->input_config.effective_res.height;
+ pg_inst.stream_format = stream_config->input_config.format;
+}
+
+void
+ia_css_debug_dump_resolution(
+ const struct ia_css_resolution *res,
+ const char *label)
+{
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "%s: =%d x =%d\n",
+ label, res->width, res->height);
+}
+
+void
+ia_css_debug_dump_frame_info(
+ const struct ia_css_frame_info *info,
+ const char *label)
+{
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "%s\n", label);
+ ia_css_debug_dump_resolution(&info->res, "res");
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "padded_width: %d\n",
+ info->padded_width);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "format: %d\n", info->format);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "raw_bit_depth: %d\n",
+ info->raw_bit_depth);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "raw_bayer_order: %d\n",
+ info->raw_bayer_order);
+}
+
+void
+ia_css_debug_dump_capture_config(
+ const struct ia_css_capture_config *config)
+{
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "%s\n", __func__);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "mode: %d\n", config->mode);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "enable_xnr: %d\n",
+ config->enable_xnr);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "enable_raw_output: %d\n",
+ config->enable_raw_output);
+}
+
+void
+ia_css_debug_dump_pipe_extra_config(
+ const struct ia_css_pipe_extra_config *extra_config)
+{
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "%s\n", __func__);
+ if (extra_config) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "enable_raw_binning: %d\n",
+ extra_config->enable_raw_binning);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "enable_yuv_ds: %d\n",
+ extra_config->enable_yuv_ds);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "enable_high_speed: %d\n",
+ extra_config->enable_high_speed);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "enable_dvs_6axis: %d\n",
+ extra_config->enable_dvs_6axis);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "enable_reduced_pipe: %d\n",
+ extra_config->enable_reduced_pipe);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "enable_fractional_ds: %d\n",
+ extra_config->enable_fractional_ds);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "disable_vf_pp: %d\n",
+ extra_config->disable_vf_pp);
+ }
+}
+
+void
+ia_css_debug_dump_pipe_config(
+ const struct ia_css_pipe_config *config)
+{
+ unsigned int i;
+
+ IA_CSS_ENTER_PRIVATE("config = %p", config);
+ if (!config) {
+ IA_CSS_ERROR("NULL input parameter");
+ IA_CSS_LEAVE_PRIVATE("");
+ return;
+ }
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "mode: %d\n", config->mode);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "isp_pipe_version: %d\n",
+ config->isp_pipe_version);
+ ia_css_debug_dump_resolution(&config->bayer_ds_out_res,
+ "bayer_ds_out_res");
+ ia_css_debug_dump_resolution(&config->capt_pp_in_res,
+ "capt_pp_in_res");
+ ia_css_debug_dump_resolution(&config->vf_pp_in_res, "vf_pp_in_res");
+
+ if (IS_ISP2401) {
+ ia_css_debug_dump_resolution(&config->output_system_in_res,
+ "output_system_in_res");
+ }
+ ia_css_debug_dump_resolution(&config->dvs_crop_out_res,
+ "dvs_crop_out_res");
+ for (i = 0; i < IA_CSS_PIPE_MAX_OUTPUT_STAGE; i++) {
+ ia_css_debug_dump_frame_info(&config->output_info[i], "output_info");
+ ia_css_debug_dump_frame_info(&config->vf_output_info[i],
+ "vf_output_info");
+ }
+ ia_css_debug_dump_capture_config(&config->default_capture_config);
+ ia_css_debug_dump_resolution(&config->dvs_envelope, "dvs_envelope");
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "dvs_frame_delay: %d\n",
+ config->dvs_frame_delay);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "enable_dz: %d\n",
+ config->enable_dz);
+ IA_CSS_LEAVE_PRIVATE("");
+}
+
+void
+ia_css_debug_dump_stream_config_source(
+ const struct ia_css_stream_config *config)
+{
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "%s()\n", __func__);
+ switch (config->mode) {
+ case IA_CSS_INPUT_MODE_SENSOR:
+ case IA_CSS_INPUT_MODE_BUFFERED_SENSOR:
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "source.port\n");
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "port: %d\n",
+ config->source.port.port);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "num_lanes: %d\n",
+ config->source.port.num_lanes);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "timeout: %d\n",
+ config->source.port.timeout);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "compression: %d\n",
+ config->source.port.compression.type);
+ break;
+ case IA_CSS_INPUT_MODE_PRBS:
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "source.prbs\n");
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "id: %d\n",
+ config->source.prbs.id);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "h_blank: %d\n",
+ config->source.prbs.h_blank);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "v_blank: %d\n",
+ config->source.prbs.v_blank);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "seed: 0x%x\n",
+ config->source.prbs.seed);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "seed1: 0x%x\n",
+ config->source.prbs.seed1);
+ break;
+ default:
+ case IA_CSS_INPUT_MODE_FIFO:
+ case IA_CSS_INPUT_MODE_MEMORY:
+ break;
+ }
+}
+
+void
+ia_css_debug_dump_mipi_buffer_config(
+ const struct ia_css_mipi_buffer_config *config)
+{
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "%s()\n", __func__);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "size_mem_words: %d\n",
+ config->size_mem_words);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "nof_mipi_buffers: %d\n",
+ config->nof_mipi_buffers);
+}
+
+void
+ia_css_debug_dump_metadata_config(
+ const struct ia_css_metadata_config *config)
+{
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "%s()\n", __func__);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "data_type: %d\n",
+ config->data_type);
+ ia_css_debug_dump_resolution(&config->resolution, "resolution");
+}
+
+void
+ia_css_debug_dump_stream_config(
+ const struct ia_css_stream_config *config,
+ int num_pipes)
+{
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "%s()\n", __func__);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "num_pipes: %d\n", num_pipes);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "mode: %d\n", config->mode);
+ ia_css_debug_dump_stream_config_source(config);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "channel_id: %d\n",
+ config->channel_id);
+ ia_css_debug_dump_resolution(&config->input_config.input_res, "input_res");
+ ia_css_debug_dump_resolution(&config->input_config.effective_res,
+ "effective_res");
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "format: %d\n",
+ config->input_config.format);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "bayer_order: %d\n",
+ config->input_config.bayer_order);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "sensor_binning_factor: %d\n",
+ config->sensor_binning_factor);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "pixels_per_clock: %d\n",
+ config->pixels_per_clock);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "online: %d\n",
+ config->online);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "init_num_cont_raw_buf: %d\n",
+ config->init_num_cont_raw_buf);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "target_num_cont_raw_buf: %d\n",
+ config->target_num_cont_raw_buf);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "pack_raw_pixels: %d\n",
+ config->pack_raw_pixels);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "continuous: %d\n",
+ config->continuous);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "flash_gpio_pin: %d\n",
+ config->flash_gpio_pin);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "left_padding: %d\n",
+ config->left_padding);
+ ia_css_debug_dump_mipi_buffer_config(&config->mipi_buffer_config);
+ ia_css_debug_dump_metadata_config(&config->metadata_config);
+}
+
+/*
+ Trace support.
+
+ This tracer is using a buffer to trace the flow of the FW and dump misc values (see below for details).
+ Currently, support is only for SKC.
+ To enable support for other platforms:
+ - Allocate a buffer for tracing in DMEM. The longer the better.
+ - Use the DBG_init routine in sp.hive.c to initiatilize the tracer with the address and size selected.
+ - Add trace points in the SP code wherever needed.
+ - Enable the dump below with the required address and required adjustments.
+ Dump is called at the end of ia_css_debug_dump_sp_state().
+*/
+
+/*
+ dump_trace() : dump the trace points from DMEM2.
+ for every trace point, the following are printed: index, major:minor and the 16-bit attached value.
+ The routine looks for the first 0, and then prints from it cyclically.
+ Data forma in DMEM2:
+ first 4 DWORDS: header
+ DWORD 0: data description
+ byte 0: version
+ byte 1: number of threads (for future use)
+ byte 2+3: number ot TPs
+ DWORD 1: command byte + data (for future use)
+ byte 0: command
+ byte 1-3: command signature
+ DWORD 2-3: additional data (for future use)
+ Following data is 4-byte oriented:
+ byte 0: major
+ byte 1: minor
+ byte 2-3: data
+*/
+#if TRACE_ENABLE_SP0 || TRACE_ENABLE_SP1 || TRACE_ENABLE_ISP
+static void debug_dump_one_trace(enum TRACE_CORE_ID proc_id)
+{
+#if defined(HAS_TRACER_V2)
+ u32 start_addr;
+ u32 start_addr_data;
+ u32 item_size;
+ u32 tmp;
+ u8 tid_val;
+ enum TRACE_DUMP_FORMAT dump_format;
+
+ int i, j, max_trace_points, point_num, limit = -1;
+ /* using a static buffer here as the driver has issues allocating memory */
+ static u32 trace_read_buf[TRACE_BUFF_SIZE] = {0};
+ static struct trace_header_t header;
+ u8 *header_arr;
+
+ /* read the header and parse it */
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "~~~ Tracer ");
+ switch (proc_id) {
+ case TRACE_SP0_ID:
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "SP0");
+ start_addr = TRACE_SP0_ADDR;
+ start_addr_data = TRACE_SP0_DATA_ADDR;
+ item_size = TRACE_SP0_ITEM_SIZE;
+ max_trace_points = TRACE_SP0_MAX_POINTS;
+ break;
+ case TRACE_SP1_ID:
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "SP1");
+ start_addr = TRACE_SP1_ADDR;
+ start_addr_data = TRACE_SP1_DATA_ADDR;
+ item_size = TRACE_SP1_ITEM_SIZE;
+ max_trace_points = TRACE_SP1_MAX_POINTS;
+ break;
+ case TRACE_ISP_ID:
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ISP");
+ start_addr = TRACE_ISP_ADDR;
+ start_addr_data = TRACE_ISP_DATA_ADDR;
+ item_size = TRACE_ISP_ITEM_SIZE;
+ max_trace_points = TRACE_ISP_MAX_POINTS;
+ break;
+ default:
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "\t\ttraces are not supported for this processor ID - exiting\n");
+ return;
+ }
+
+ if (!IS_ISP2401) {
+ tmp = ia_css_device_load_uint32(start_addr);
+ point_num = (tmp >> 16) & 0xFFFF;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, " ver %d %d points\n", tmp & 0xFF,
+ point_num);
+ } else {
+ /* Loading byte-by-byte as using the master routine had issues */
+ header_arr = (uint8_t *)&header;
+ for (i = 0; i < (int)sizeof(struct trace_header_t); i++)
+ header_arr[i] = ia_css_device_load_uint8(start_addr + (i));
+
+ point_num = header.max_tracer_points;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, " ver %d %d points\n", header.version,
+ point_num);
+
+ tmp = header.version;
+ }
+ if ((tmp & 0xFF) != TRACER_VER) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "\t\tUnknown version - exiting\n");
+ return;
+ }
+ if (point_num > max_trace_points) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "\t\tToo many points - exiting\n");
+ return;
+ }
+ /* copy the TPs and find the first 0 */
+ for (i = 0; i < point_num; i++) {
+ trace_read_buf[i] = ia_css_device_load_uint32(start_addr_data +
+ (i * item_size));
+ if ((limit == (-1)) && (trace_read_buf[i] == 0))
+ limit = i;
+ }
+ if (IS_ISP2401) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "Status:\n");
+ for (i = 0; i < SH_CSS_MAX_SP_THREADS; i++)
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "\tT%d: %3d (%02x) %6d (%04x) %10d (%08x)\n", i,
+ header.thr_status_byte[i], header.thr_status_byte[i],
+ header.thr_status_word[i], header.thr_status_word[i],
+ header.thr_status_dword[i], header.thr_status_dword[i]);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "Scratch:\n");
+ for (i = 0; i < MAX_SCRATCH_DATA; i++)
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "%10d (%08x) ",
+ header.scratch_debug[i], header.scratch_debug[i]);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "\n");
+ }
+ /* two 0s in the beginning: empty buffer */
+ if ((trace_read_buf[0] == 0) && (trace_read_buf[1] == 0)) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "\t\tEmpty tracer - exiting\n");
+ return;
+ }
+ /* no overrun: start from 0 */
+ if ((limit == point_num - 1) ||
+ /* first 0 is at the end - border case */
+ (trace_read_buf[limit + 1] ==
+ 0)) /* did not make a full cycle after the memset */
+ limit = 0;
+ /* overrun: limit is the first non-zero after the first zero */
+ else
+ limit++;
+
+ /* print the TPs */
+ for (i = 0; i < point_num; i++) {
+ j = (limit + i) % point_num;
+ if (trace_read_buf[j]) {
+ if (!IS_ISP2401) {
+ TRACE_DUMP_FORMAT dump_format = FIELD_FORMAT_UNPACK(trace_read_buf[j]);
+ } else {
+ tid_val = FIELD_TID_UNPACK(trace_read_buf[j]);
+ dump_format = TRACE_DUMP_FORMAT_POINT;
+
+ /*
+ * When tid value is 111b, the data will be interpreted differently:
+ * tid val is ignored, major field contains 2 bits (msb) for format type
+ */
+ if (tid_val == FIELD_TID_SEL_FORMAT_PAT) {
+ dump_format = FIELD_FORMAT_UNPACK(trace_read_buf[j]);
+ }
+ }
+ switch (dump_format) {
+ case TRACE_DUMP_FORMAT_POINT:
+ ia_css_debug_dtrace(
+ IA_CSS_DEBUG_TRACE, "\t\t%d %d:%d value - %d\n",
+ j, FIELD_MAJOR_UNPACK(trace_read_buf[j]),
+ FIELD_MINOR_UNPACK(trace_read_buf[j]),
+ FIELD_VALUE_UNPACK(trace_read_buf[j]));
+ break;
+ /* ISP2400 */
+ case TRACE_DUMP_FORMAT_VALUE24_HEX:
+ ia_css_debug_dtrace(
+ IA_CSS_DEBUG_TRACE, "\t\t%d, %d, 24bit value %x H\n",
+ j,
+ FIELD_MAJOR_UNPACK(trace_read_buf[j]),
+ FIELD_VALUE_24_UNPACK(trace_read_buf[j]));
+ break;
+ /* ISP2400 */
+ case TRACE_DUMP_FORMAT_VALUE24_DEC:
+ ia_css_debug_dtrace(
+ IA_CSS_DEBUG_TRACE, "\t\t%d, %d, 24bit value %d D\n",
+ j,
+ FIELD_MAJOR_UNPACK(trace_read_buf[j]),
+ FIELD_VALUE_24_UNPACK(trace_read_buf[j]));
+ break;
+ /* ISP2401 */
+ case TRACE_DUMP_FORMAT_POINT_NO_TID:
+ ia_css_debug_dtrace(
+ IA_CSS_DEBUG_TRACE, "\t\t%d %d:%d value - %x (%d)\n",
+ j,
+ FIELD_MAJOR_W_FMT_UNPACK(trace_read_buf[j]),
+ FIELD_MINOR_UNPACK(trace_read_buf[j]),
+ FIELD_VALUE_UNPACK(trace_read_buf[j]),
+ FIELD_VALUE_UNPACK(trace_read_buf[j]));
+ break;
+ /* ISP2401 */
+ case TRACE_DUMP_FORMAT_VALUE24:
+ ia_css_debug_dtrace(
+ IA_CSS_DEBUG_TRACE, "\t\t%d, %d, 24bit value %x (%d)\n",
+ j,
+ FIELD_MAJOR_UNPACK(trace_read_buf[j]),
+ FIELD_MAJOR_W_FMT_UNPACK(trace_read_buf[j]),
+ FIELD_VALUE_24_UNPACK(trace_read_buf[j]),
+ FIELD_VALUE_24_UNPACK(trace_read_buf[j]));
+ break;
+ case TRACE_DUMP_FORMAT_VALUE24_TIMING:
+ ia_css_debug_dtrace(
+ IA_CSS_DEBUG_TRACE, "\t\t%d, %d, timing %x\n",
+ j,
+ FIELD_MAJOR_UNPACK(trace_read_buf[j]),
+ FIELD_VALUE_24_UNPACK(trace_read_buf[j]));
+ break;
+ case TRACE_DUMP_FORMAT_VALUE24_TIMING_DELTA:
+ ia_css_debug_dtrace(
+ IA_CSS_DEBUG_TRACE, "\t\t%d, %d, timing delta %x\n",
+ j,
+ FIELD_MAJOR_UNPACK(trace_read_buf[j]),
+ FIELD_VALUE_24_UNPACK(trace_read_buf[j]));
+ break;
+ default:
+ ia_css_debug_dtrace(
+ IA_CSS_DEBUG_TRACE,
+ "no such trace dump format %d",
+ dump_format);
+ break;
+ }
+ }
+ }
+#else
+ (void)proc_id;
+#endif /* HAS_TRACER_V2 */
+}
+#endif /* TRACE_ENABLE_SP0 || TRACE_ENABLE_SP1 || TRACE_ENABLE_ISP */
+
+void ia_css_debug_dump_trace(void)
+{
+#if TRACE_ENABLE_SP0
+ debug_dump_one_trace(TRACE_SP0_ID);
+#endif
+#if TRACE_ENABLE_SP1
+ debug_dump_one_trace(TRACE_SP1_ID);
+#endif
+#if TRACE_ENABLE_ISP
+ debug_dump_one_trace(TRACE_ISP_ID);
+#endif
+}
+
+/* ISP2401 */
+void ia_css_debug_pc_dump(sp_ID_t id, unsigned int num_of_dumps)
+{
+ unsigned int pc;
+ unsigned int i;
+ hrt_data sc = sp_ctrl_load(id, SP_SC_REG);
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "SP%-1d Status reg: 0x%X\n", id, sc);
+ sc = sp_ctrl_load(id, SP_CTRL_SINK_REG);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "SP%-1d Stall reg: 0x%X\n", id, sc);
+ for (i = 0; i < num_of_dumps; i++) {
+ pc = sp_ctrl_load(id, SP_PC_REG);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "SP%-1d PC: 0x%X\n", id, pc);
+ }
+}
diff --git a/drivers/staging/media/atomisp/pci/runtime/event/interface/ia_css_event.h b/drivers/staging/media/atomisp/pci/runtime/event/interface/ia_css_event.h
new file mode 100644
index 000000000000..d0c3278b0fd9
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/runtime/event/interface/ia_css_event.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010 - 2015, Intel Corporation.
+ */
+
+#ifndef _IA_CSS_EVENT_H
+#define _IA_CSS_EVENT_H
+
+#include <type_support.h>
+#include "sw_event_global.h" /*event macros.TODO : Change File Name..???*/
+
+bool ia_css_event_encode(
+ u8 *in,
+ u8 nr,
+ uint32_t *out);
+
+void ia_css_event_decode(
+ u32 event,
+ uint8_t *payload);
+
+#endif /*_IA_CSS_EVENT_H*/
diff --git a/drivers/staging/media/atomisp/pci/runtime/event/src/event.c b/drivers/staging/media/atomisp/pci/runtime/event/src/event.c
new file mode 100644
index 000000000000..6154dda2d968
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/runtime/event/src/event.c
@@ -0,0 +1,101 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010 - 2015, Intel Corporation.
+ */
+
+#include "sh_css_sp.h"
+
+#include "dma.h" /* N_DMA_CHANNEL_ID */
+
+#include <type_support.h>
+#include "ia_css_binary.h"
+#include "sh_css_hrt.h"
+#include "sh_css_defs.h"
+#include "sh_css_internal.h"
+#include "ia_css_debug.h"
+#include "ia_css_debug_internal.h"
+#include "sh_css_legacy.h"
+
+#include "gdc_device.h" /* HRT_GDC_N */
+
+/*#include "sp.h"*/ /* host2sp_enqueue_frame_data() */
+
+#include "assert_support.h"
+
+#include "ia_css_queue.h" /* host_sp_enqueue_XXX */
+#include "ia_css_event.h" /* ia_css_event_encode */
+/*
+ * @brief Encode the information into the software-event.
+ * Refer to "sw_event_public.h" for details.
+ */
+bool ia_css_event_encode(
+ u8 *in,
+ u8 nr,
+ uint32_t *out)
+{
+ bool ret;
+ u32 nr_of_bits;
+ u32 i;
+
+ assert(in);
+ assert(out);
+ OP___assert(nr > 0 && nr <= MAX_NR_OF_PAYLOADS_PER_SW_EVENT);
+
+ /* initialize the output */
+ *out = 0;
+
+ /* get the number of bits per information */
+ nr_of_bits = sizeof(uint32_t) * 8 / nr;
+
+ /* compress the all inputs into a signle output */
+ for (i = 0; i < nr; i++) {
+ *out <<= nr_of_bits;
+ *out |= in[i];
+ }
+
+ /* get the return value */
+ ret = (nr > 0 && nr <= MAX_NR_OF_PAYLOADS_PER_SW_EVENT);
+
+ return ret;
+}
+
+void ia_css_event_decode(
+ u32 event,
+ uint8_t *payload)
+{
+ assert(payload[1] == 0);
+ assert(payload[2] == 0);
+ assert(payload[3] == 0);
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_event_decode() enter:\n");
+
+ /* First decode according to the common case
+ * In case of a PORT_EOF event we overwrite with
+ * the specific values
+ * This is somewhat ugly but probably somewhat efficient
+ * (and it avoids some code duplication)
+ */
+ payload[0] = event & 0xff; /*event_code */
+ payload[1] = (event >> 8) & 0xff;
+ payload[2] = (event >> 16) & 0xff;
+ payload[3] = 0;
+
+ switch (payload[0]) {
+ case SH_CSS_SP_EVENT_PORT_EOF:
+ payload[2] = 0;
+ payload[3] = (event >> 24) & 0xff;
+ break;
+
+ case SH_CSS_SP_EVENT_ACC_STAGE_COMPLETE:
+ case SH_CSS_SP_EVENT_TIMER:
+ case SH_CSS_SP_EVENT_FRAME_TAGGED:
+ case SH_CSS_SP_EVENT_FW_WARNING:
+ case SH_CSS_SP_EVENT_FW_ASSERT:
+ payload[3] = (event >> 24) & 0xff;
+ break;
+ default:
+ break;
+ }
+}
diff --git a/drivers/staging/media/atomisp/pci/runtime/eventq/interface/ia_css_eventq.h b/drivers/staging/media/atomisp/pci/runtime/eventq/interface/ia_css_eventq.h
new file mode 100644
index 000000000000..9b058e296c93
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/runtime/eventq/interface/ia_css_eventq.h
@@ -0,0 +1,45 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010 - 2015, Intel Corporation.
+ */
+
+#ifndef _IA_CSS_EVENTQ_H
+#define _IA_CSS_EVENTQ_H
+
+#include "ia_css_queue.h" /* queue APIs */
+
+/**
+ * @brief HOST receives event from SP.
+ *
+ * @param[in] eventq_handle eventq_handle.
+ * @param[in] payload The event payload.
+ * @return 0 - Successfully dequeue.
+ * @return -EINVAL - Invalid argument.
+ * @return -ENODATA - Queue is empty.
+ */
+int ia_css_eventq_recv(
+ ia_css_queue_t *eventq_handle,
+ uint8_t *payload);
+
+/**
+ * @brief The Host sends the event to SP.
+ * The caller of this API will be blocked until the event
+ * is sent.
+ *
+ * @param[in] eventq_handle eventq_handle.
+ * @param[in] evt_id The event ID.
+ * @param[in] evt_payload_0 The event payload.
+ * @param[in] evt_payload_1 The event payload.
+ * @param[in] evt_payload_2 The event payload.
+ * @return 0 - Successfully enqueue.
+ * @return -EINVAL - Invalid argument.
+ * @return -ENOBUFS - Queue is full.
+ */
+int ia_css_eventq_send(
+ ia_css_queue_t *eventq_handle,
+ u8 evt_id,
+ u8 evt_payload_0,
+ u8 evt_payload_1,
+ uint8_t evt_payload_2);
+#endif /* _IA_CSS_EVENTQ_H */
diff --git a/drivers/staging/media/atomisp/pci/runtime/eventq/src/eventq.c b/drivers/staging/media/atomisp/pci/runtime/eventq/src/eventq.c
new file mode 100644
index 000000000000..fb1710ddcf48
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/runtime/eventq/src/eventq.c
@@ -0,0 +1,67 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#include "ia_css_types.h"
+#include "assert_support.h"
+#include "ia_css_queue.h" /* sp2host_dequeue_irq_event() */
+#include "ia_css_eventq.h"
+#include "ia_css_event.h" /* ia_css_event_encode()
+ ia_css_event_decode()
+ */
+int ia_css_eventq_recv(
+ ia_css_queue_t *eventq_handle,
+ uint8_t *payload)
+{
+ u32 sp_event;
+ int error;
+
+ /* dequeue the IRQ event */
+ error = ia_css_queue_dequeue(eventq_handle, &sp_event);
+
+ /* check whether the IRQ event is available or not */
+ if (!error)
+ ia_css_event_decode(sp_event, payload);
+ return error;
+}
+
+/*
+ * @brief The Host sends the event to the SP.
+ * Refer to "sh_css_sp.h" for details.
+ */
+int ia_css_eventq_send(
+ ia_css_queue_t *eventq_handle,
+ u8 evt_id,
+ u8 evt_payload_0,
+ u8 evt_payload_1,
+ uint8_t evt_payload_2)
+{
+ u8 tmp[4];
+ u32 sw_event;
+ int error = -ENOSYS;
+
+ /*
+ * Encode the queue type, the thread ID and
+ * the queue ID into the event.
+ */
+ tmp[0] = evt_id;
+ tmp[1] = evt_payload_0;
+ tmp[2] = evt_payload_1;
+ tmp[3] = evt_payload_2;
+ ia_css_event_encode(tmp, 4, &sw_event);
+
+ /* queue the software event (busy-waiting) */
+ for ( ; ; ) {
+ error = ia_css_queue_enqueue(eventq_handle, sw_event);
+ if (error != -ENOBUFS) {
+ /* We were able to successfully send the event
+ or had a real failure. return the status*/
+ break;
+ }
+ /* Wait for the queue to be not full and try again*/
+ udelay(1);
+ }
+ return error;
+}
diff --git a/drivers/staging/media/atomisp/pci/runtime/frame/interface/ia_css_frame.h b/drivers/staging/media/atomisp/pci/runtime/frame/interface/ia_css_frame.h
new file mode 100644
index 000000000000..45d4bb87af7a
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/runtime/frame/interface/ia_css_frame.h
@@ -0,0 +1,134 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010 - 2015, Intel Corporation.
+ */
+
+#ifndef __IA_CSS_FRAME_H__
+#define __IA_CSS_FRAME_H__
+
+/* ISP2401 */
+#include <ia_css_types.h>
+
+#include <ia_css_frame_format.h>
+#include <ia_css_frame_public.h>
+#include "dma.h"
+
+/*********************************************************************
+**** Frame INFO APIs
+**********************************************************************/
+/* @brief Sets the given width and alignment to the frame info
+ *
+ * @param
+ * @param[in] info The info to which parameters would set
+ * @param[in] width The width to be set to info
+ * @param[in] aligned The aligned to be set to info
+ * @return
+ */
+void ia_css_frame_info_set_width(struct ia_css_frame_info *info,
+ unsigned int width,
+ unsigned int min_padded_width);
+
+/* @brief Sets the given format to the frame info
+ *
+ * @param
+ * @param[in] info The info to which parameters would set
+ * @param[in] format The format to be set to info
+ * @return
+ */
+void ia_css_frame_info_set_format(struct ia_css_frame_info *info,
+ enum ia_css_frame_format format);
+
+/* @brief Sets the frame info with the given parameters
+ *
+ * @param
+ * @param[in] info The info to which parameters would set
+ * @param[in] width The width to be set to info
+ * @param[in] height The height to be set to info
+ * @param[in] format The format to be set to info
+ * @param[in] aligned The aligned to be set to info
+ * @return
+ */
+void ia_css_frame_info_init(struct ia_css_frame_info *info,
+ unsigned int width,
+ unsigned int height,
+ enum ia_css_frame_format format,
+ unsigned int aligned);
+
+/* @brief Checks whether 2 frame infos has the same resolution
+ *
+ * @param
+ * @param[in] frame_a The first frame to be compared
+ * @param[in] frame_b The second frame to be compared
+ * @return Returns true if the frames are equal
+ */
+bool ia_css_frame_info_is_same_resolution(
+ const struct ia_css_frame_info *info_a,
+ const struct ia_css_frame_info *info_b);
+
+/* @brief Check the frame info is valid
+ *
+ * @param
+ * @param[in] info The frame attributes to be initialized
+ * @return The error code.
+ */
+int ia_css_frame_check_info(const struct ia_css_frame_info *info);
+
+/*********************************************************************
+**** Frame APIs
+**********************************************************************/
+
+/* @brief Initialize the plane depending on the frame type
+ *
+ * @param
+ * @param[in] frame The frame attributes to be initialized
+ * @return The error code.
+ */
+int ia_css_frame_init_planes(struct ia_css_frame *frame);
+
+/* @brief Free an array of frames
+ *
+ * @param
+ * @param[in] num_frames The number of frames to be freed in the array
+ * @param[in] **frames_array The array of frames to be removed
+ * @return
+ */
+void ia_css_frame_free_multiple(unsigned int num_frames,
+ struct ia_css_frame **frames_array);
+
+/* @brief Allocate a CSS frame structure of given size in bytes..
+ *
+ * @param frame The allocated frame.
+ * @param[in] size_bytes The frame size in bytes.
+ * @return The error code.
+ *
+ * Allocate a frame using the given size in bytes.
+ * The frame structure is partially null initialized.
+ */
+int ia_css_frame_allocate_with_buffer_size(struct ia_css_frame **frame,
+ const unsigned int size_bytes);
+
+/* @brief Check whether 2 frames are same type
+ *
+ * @param
+ * @param[in] frame_a The first frame to be compared
+ * @param[in] frame_b The second frame to be compared
+ * @return Returns true if the frames are equal
+ */
+bool ia_css_frame_is_same_type(
+ const struct ia_css_frame *frame_a,
+ const struct ia_css_frame *frame_b);
+
+/* @brief Configure a dma port from frame info
+ *
+ * @param
+ * @param[in] config The DAM port configuration
+ * @param[in] info The frame info
+ * @return
+ */
+int ia_css_dma_configure_from_info(struct dma_port_config *config,
+ const struct ia_css_frame_info *info);
+
+unsigned int ia_css_frame_pad_width(unsigned int width, enum ia_css_frame_format format);
+
+#endif /* __IA_CSS_FRAME_H__ */
diff --git a/drivers/staging/media/atomisp/pci/runtime/frame/interface/ia_css_frame_comm.h b/drivers/staging/media/atomisp/pci/runtime/frame/interface/ia_css_frame_comm.h
new file mode 100644
index 000000000000..d1031f824896
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/runtime/frame/interface/ia_css_frame_comm.h
@@ -0,0 +1,107 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010 - 2015, Intel Corporation.
+ */
+
+#ifndef __IA_CSS_FRAME_COMM_H__
+#define __IA_CSS_FRAME_COMM_H__
+
+#include "type_support.h"
+#include "platform_support.h"
+#include "runtime/bufq/interface/ia_css_bufq_comm.h"
+#include <system_local.h> /* ia_css_ptr */
+
+/*
+ * These structs are derived from structs defined in ia_css_types.h
+ * (just take out the "_sp" from the struct name to get the "original")
+ * All the fields that are not needed by the SP are removed.
+ */
+struct ia_css_frame_sp_plane {
+ unsigned int offset; /* offset in bytes to start of frame data */
+ /* offset is wrt data in sh_css_sp_sp_frame */
+};
+
+struct ia_css_frame_sp_binary_plane {
+ unsigned int size;
+ struct ia_css_frame_sp_plane data;
+};
+
+struct ia_css_frame_sp_yuv_planes {
+ struct ia_css_frame_sp_plane y;
+ struct ia_css_frame_sp_plane u;
+ struct ia_css_frame_sp_plane v;
+};
+
+struct ia_css_frame_sp_nv_planes {
+ struct ia_css_frame_sp_plane y;
+ struct ia_css_frame_sp_plane uv;
+};
+
+struct ia_css_frame_sp_rgb_planes {
+ struct ia_css_frame_sp_plane r;
+ struct ia_css_frame_sp_plane g;
+ struct ia_css_frame_sp_plane b;
+};
+
+struct ia_css_frame_sp_plane6 {
+ struct ia_css_frame_sp_plane r;
+ struct ia_css_frame_sp_plane r_at_b;
+ struct ia_css_frame_sp_plane gr;
+ struct ia_css_frame_sp_plane gb;
+ struct ia_css_frame_sp_plane b;
+ struct ia_css_frame_sp_plane b_at_r;
+};
+
+struct ia_css_sp_resolution {
+ u16 width; /* width of valid data in pixels */
+ u16 height; /* Height of valid data in lines */
+};
+
+/*
+ * Frame info struct. This describes the contents of an image frame buffer.
+ */
+struct ia_css_frame_sp_info {
+ struct ia_css_sp_resolution res;
+ u16 padded_width; /* stride of line in memory
+ (in pixels) */
+ unsigned char format; /* format of the frame data */
+ unsigned char raw_bit_depth; /* number of valid bits per pixel,
+ only valid for RAW bayer frames */
+ unsigned char raw_bayer_order; /* bayer order, only valid
+ for RAW bayer frames */
+ unsigned char padding[3]; /* Extend to 32 bit multiple */
+};
+
+struct ia_css_buffer_sp {
+ union {
+ ia_css_ptr xmem_addr;
+ enum sh_css_queue_id queue_id;
+ } buf_src;
+ enum ia_css_buffer_type buf_type;
+};
+
+struct ia_css_frame_sp {
+ struct ia_css_frame_sp_info info;
+ struct ia_css_buffer_sp buf_attr;
+ union {
+ struct ia_css_frame_sp_plane raw;
+ struct ia_css_frame_sp_plane rgb;
+ struct ia_css_frame_sp_rgb_planes planar_rgb;
+ struct ia_css_frame_sp_plane yuyv;
+ struct ia_css_frame_sp_yuv_planes yuv;
+ struct ia_css_frame_sp_nv_planes nv;
+ struct ia_css_frame_sp_plane6 plane6;
+ struct ia_css_frame_sp_binary_plane binary;
+ } planes;
+};
+
+void ia_css_frame_info_to_frame_sp_info(
+ struct ia_css_frame_sp_info *sp_info,
+ const struct ia_css_frame_info *info);
+
+void ia_css_resolution_to_sp_resolution(
+ struct ia_css_sp_resolution *sp_info,
+ const struct ia_css_resolution *info);
+
+#endif /*__IA_CSS_FRAME_COMM_H__*/
diff --git a/drivers/staging/media/atomisp/pci/runtime/frame/src/frame.c b/drivers/staging/media/atomisp/pci/runtime/frame/src/frame.c
new file mode 100644
index 000000000000..2cb96f9a6030
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/runtime/frame/src/frame.c
@@ -0,0 +1,740 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#include <linux/bitops.h>
+#include <linux/math.h>
+
+#include "assert_support.h"
+#include "atomisp_internal.h"
+#include "hmm.h"
+#include "ia_css_debug.h"
+#include "ia_css_frame.h"
+#include "isp.h"
+#include "sh_css_internal.h"
+
+#define NV12_TILEY_TILE_WIDTH 128
+#define NV12_TILEY_TILE_HEIGHT 32
+
+/**************************************************************************
+** Static functions declarations
+**************************************************************************/
+static void frame_init_plane(struct ia_css_frame_plane *plane,
+ unsigned int width,
+ unsigned int stride,
+ unsigned int height,
+ unsigned int offset);
+
+static void frame_init_single_plane(struct ia_css_frame *frame,
+ struct ia_css_frame_plane *plane,
+ unsigned int height,
+ unsigned int subpixels_per_line,
+ unsigned int bytes_per_pixel);
+
+static void frame_init_raw_single_plane(
+ struct ia_css_frame *frame,
+ struct ia_css_frame_plane *plane,
+ unsigned int height,
+ unsigned int subpixels_per_line,
+ unsigned int bits_per_pixel);
+
+static void frame_init_nv_planes(struct ia_css_frame *frame,
+ unsigned int horizontal_decimation,
+ unsigned int vertical_decimation,
+ unsigned int bytes_per_element);
+
+static void frame_init_yuv_planes(struct ia_css_frame *frame,
+ unsigned int horizontal_decimation,
+ unsigned int vertical_decimation,
+ bool swap_uv,
+ unsigned int bytes_per_element);
+
+static void frame_init_rgb_planes(struct ia_css_frame *frame,
+ unsigned int bytes_per_element);
+
+static void frame_init_qplane6_planes(struct ia_css_frame *frame);
+
+static int frame_allocate_buffer_data(struct ia_css_frame *frame);
+
+static int frame_allocate_with_data(struct ia_css_frame **frame,
+ unsigned int width,
+ unsigned int height,
+ enum ia_css_frame_format format,
+ unsigned int padded_width,
+ unsigned int raw_bit_depth);
+
+static struct ia_css_frame *frame_create(unsigned int width,
+ unsigned int height,
+ enum ia_css_frame_format format,
+ unsigned int padded_width,
+ unsigned int raw_bit_depth,
+ bool valid);
+
+static unsigned
+ia_css_elems_bytes_from_info(
+ const struct ia_css_frame_info *info);
+
+/**************************************************************************
+** CSS API functions, exposed by ia_css.h
+**************************************************************************/
+
+int ia_css_frame_allocate_from_info(struct ia_css_frame **frame,
+ const struct ia_css_frame_info *info)
+{
+ int err = 0;
+
+ if (!frame || !info)
+ return -EINVAL;
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_frame_allocate_from_info() enter:\n");
+ err =
+ ia_css_frame_allocate(frame, info->res.width, info->res.height,
+ info->format, info->padded_width,
+ info->raw_bit_depth);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_frame_allocate_from_info() leave:\n");
+ return err;
+}
+
+int ia_css_frame_allocate(struct ia_css_frame **frame,
+ unsigned int width,
+ unsigned int height,
+ enum ia_css_frame_format format,
+ unsigned int padded_width,
+ unsigned int raw_bit_depth)
+{
+ int err = 0;
+
+ if (!frame || width == 0 || height == 0)
+ return -EINVAL;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_frame_allocate() enter: width=%d, height=%d, format=%d, padded_width=%d, raw_bit_depth=%d\n",
+ width, height, format, padded_width, raw_bit_depth);
+
+ err = frame_allocate_with_data(frame, width, height, format,
+ padded_width, raw_bit_depth);
+
+ if ((*frame) && err == 0)
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_frame_allocate() leave: frame=%p, data(DDR address)=0x%x\n", *frame,
+ (*frame)->data);
+ else
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_frame_allocate() leave: frame=%p, data(DDR address)=0x%x\n",
+ (void *)-1, (unsigned int)-1);
+
+ return err;
+}
+
+void ia_css_frame_free(struct ia_css_frame *frame)
+{
+ IA_CSS_ENTER_PRIVATE("frame = %p", frame);
+
+ if (frame) {
+ hmm_free(frame->data);
+ kvfree(frame);
+ }
+
+ IA_CSS_LEAVE_PRIVATE("void");
+}
+
+/**************************************************************************
+** Module public functions
+**************************************************************************/
+
+int ia_css_frame_check_info(const struct ia_css_frame_info *info)
+{
+ assert(info);
+ if (info->res.width == 0 || info->res.height == 0)
+ return -EINVAL;
+ return 0;
+}
+
+int ia_css_frame_init_planes(struct ia_css_frame *frame)
+{
+ assert(frame);
+
+ switch (frame->frame_info.format) {
+ case IA_CSS_FRAME_FORMAT_MIPI:
+ dev_err(atomisp_dev,
+ "%s: unexpected use of IA_CSS_FRAME_FORMAT_MIPI\n", __func__);
+ return -EINVAL;
+ case IA_CSS_FRAME_FORMAT_RAW_PACKED:
+ frame_init_raw_single_plane(frame, &frame->planes.raw,
+ frame->frame_info.res.height,
+ frame->frame_info.padded_width,
+ frame->frame_info.raw_bit_depth);
+ break;
+ case IA_CSS_FRAME_FORMAT_RAW:
+ frame_init_single_plane(frame, &frame->planes.raw,
+ frame->frame_info.res.height,
+ frame->frame_info.padded_width,
+ frame->frame_info.raw_bit_depth <= 8 ? 1 : 2);
+ break;
+ case IA_CSS_FRAME_FORMAT_RGB565:
+ frame_init_single_plane(frame, &frame->planes.rgb,
+ frame->frame_info.res.height,
+ frame->frame_info.padded_width, 2);
+ break;
+ case IA_CSS_FRAME_FORMAT_RGBA888:
+ frame_init_single_plane(frame, &frame->planes.rgb,
+ frame->frame_info.res.height,
+ frame->frame_info.padded_width * 4, 1);
+ break;
+ case IA_CSS_FRAME_FORMAT_PLANAR_RGB888:
+ frame_init_rgb_planes(frame, 1);
+ break;
+ /* yuyv and uyvu have the same frame layout, only the data
+ * positioning differs.
+ */
+ case IA_CSS_FRAME_FORMAT_YUYV:
+ case IA_CSS_FRAME_FORMAT_UYVY:
+ case IA_CSS_FRAME_FORMAT_CSI_MIPI_YUV420_8:
+ case IA_CSS_FRAME_FORMAT_CSI_MIPI_LEGACY_YUV420_8:
+ frame_init_single_plane(frame, &frame->planes.yuyv,
+ frame->frame_info.res.height,
+ frame->frame_info.padded_width * 2, 1);
+ break;
+ case IA_CSS_FRAME_FORMAT_YUV_LINE:
+ /* Needs 3 extra lines to allow vf_pp prefetching */
+ frame_init_single_plane(frame, &frame->planes.yuyv,
+ frame->frame_info.res.height * 3 / 2 + 3,
+ frame->frame_info.padded_width, 1);
+ break;
+ case IA_CSS_FRAME_FORMAT_NV11:
+ frame_init_nv_planes(frame, 4, 1, 1);
+ break;
+ /* nv12 and nv21 have the same frame layout, only the data
+ * positioning differs.
+ */
+ case IA_CSS_FRAME_FORMAT_NV12:
+ case IA_CSS_FRAME_FORMAT_NV21:
+ case IA_CSS_FRAME_FORMAT_NV12_TILEY:
+ frame_init_nv_planes(frame, 2, 2, 1);
+ break;
+ case IA_CSS_FRAME_FORMAT_NV12_16:
+ frame_init_nv_planes(frame, 2, 2, 2);
+ break;
+ /* nv16 and nv61 have the same frame layout, only the data
+ * positioning differs.
+ */
+ case IA_CSS_FRAME_FORMAT_NV16:
+ case IA_CSS_FRAME_FORMAT_NV61:
+ frame_init_nv_planes(frame, 2, 1, 1);
+ break;
+ case IA_CSS_FRAME_FORMAT_YUV420:
+ frame_init_yuv_planes(frame, 2, 2, false, 1);
+ break;
+ case IA_CSS_FRAME_FORMAT_YUV422:
+ frame_init_yuv_planes(frame, 2, 1, false, 1);
+ break;
+ case IA_CSS_FRAME_FORMAT_YUV444:
+ frame_init_yuv_planes(frame, 1, 1, false, 1);
+ break;
+ case IA_CSS_FRAME_FORMAT_YUV420_16:
+ frame_init_yuv_planes(frame, 2, 2, false, 2);
+ break;
+ case IA_CSS_FRAME_FORMAT_YUV422_16:
+ frame_init_yuv_planes(frame, 2, 1, false, 2);
+ break;
+ case IA_CSS_FRAME_FORMAT_YV12:
+ frame_init_yuv_planes(frame, 2, 2, true, 1);
+ break;
+ case IA_CSS_FRAME_FORMAT_YV16:
+ frame_init_yuv_planes(frame, 2, 1, true, 1);
+ break;
+ case IA_CSS_FRAME_FORMAT_QPLANE6:
+ frame_init_qplane6_planes(frame);
+ break;
+ case IA_CSS_FRAME_FORMAT_BINARY_8:
+ frame_init_single_plane(frame, &frame->planes.binary.data,
+ frame->frame_info.res.height,
+ frame->frame_info.padded_width, 1);
+ frame->planes.binary.size = 0;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+unsigned int ia_css_frame_pad_width(unsigned int width, enum ia_css_frame_format format)
+{
+ switch (format) {
+ /*
+ * Frames with a U and V plane of 8 bits per pixel need to have
+ * all planes aligned, this means double the alignment for the
+ * Y plane if the horizontal decimation is 2.
+ */
+ case IA_CSS_FRAME_FORMAT_YUV420:
+ case IA_CSS_FRAME_FORMAT_YV12:
+ case IA_CSS_FRAME_FORMAT_NV12:
+ case IA_CSS_FRAME_FORMAT_NV21:
+ case IA_CSS_FRAME_FORMAT_BINARY_8:
+ case IA_CSS_FRAME_FORMAT_YUV_LINE:
+ return CEIL_MUL(width, 2 * HIVE_ISP_DDR_WORD_BYTES);
+
+ case IA_CSS_FRAME_FORMAT_NV12_TILEY:
+ return CEIL_MUL(width, NV12_TILEY_TILE_WIDTH);
+
+ case IA_CSS_FRAME_FORMAT_RAW:
+ case IA_CSS_FRAME_FORMAT_RAW_PACKED:
+ return CEIL_MUL(width, 2 * ISP_VEC_NELEMS);
+
+ default:
+ return CEIL_MUL(width, HIVE_ISP_DDR_WORD_BYTES);
+ }
+}
+
+void ia_css_frame_info_set_width(struct ia_css_frame_info *info,
+ unsigned int width,
+ unsigned int min_padded_width)
+{
+ unsigned int align;
+
+ IA_CSS_ENTER_PRIVATE("info = %p,width = %d, minimum padded width = %d",
+ info, width, min_padded_width);
+ if (!info) {
+ IA_CSS_ERROR("NULL input parameter");
+ IA_CSS_LEAVE_PRIVATE("");
+ return;
+ }
+ align = max(min_padded_width, width);
+
+ info->res.width = width;
+ info->padded_width = ia_css_frame_pad_width(align, info->format);
+
+ IA_CSS_LEAVE_PRIVATE("");
+}
+
+void ia_css_frame_info_set_format(struct ia_css_frame_info *info,
+ enum ia_css_frame_format format)
+{
+ assert(info);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_frame_info_set_format() enter:\n");
+ info->format = format;
+}
+
+void ia_css_frame_info_init(struct ia_css_frame_info *info,
+ unsigned int width,
+ unsigned int height,
+ enum ia_css_frame_format format,
+ unsigned int aligned)
+{
+ IA_CSS_ENTER_PRIVATE("info = %p, width = %d, height = %d, format = %d, aligned = %d",
+ info, width, height, format, aligned);
+ if (!info) {
+ IA_CSS_ERROR("NULL input parameter");
+ IA_CSS_LEAVE_PRIVATE("");
+ return;
+ }
+ info->res.height = height;
+ info->format = format;
+ ia_css_frame_info_set_width(info, width, aligned);
+ IA_CSS_LEAVE_PRIVATE("");
+}
+
+void ia_css_frame_free_multiple(unsigned int num_frames,
+ struct ia_css_frame **frames_array)
+{
+ unsigned int i;
+
+ for (i = 0; i < num_frames; i++) {
+ if (frames_array[i]) {
+ ia_css_frame_free(frames_array[i]);
+ frames_array[i] = NULL;
+ }
+ }
+}
+
+int ia_css_frame_allocate_with_buffer_size(struct ia_css_frame **frame,
+ const unsigned int buffer_size_bytes)
+{
+ /* AM: Body copied from frame_allocate_with_data(). */
+ int err;
+ struct ia_css_frame *me = frame_create(0, 0,
+ IA_CSS_FRAME_FORMAT_NUM,/* Not valid format yet */
+ 0, 0, false);
+
+ if (!me)
+ return -ENOMEM;
+
+ /* Get the data size */
+ me->data_bytes = buffer_size_bytes;
+
+ err = frame_allocate_buffer_data(me);
+
+ if (err) {
+ kvfree(me);
+ me = NULL;
+ }
+
+ *frame = me;
+
+ return err;
+}
+
+bool ia_css_frame_info_is_same_resolution(
+ const struct ia_css_frame_info *info_a,
+ const struct ia_css_frame_info *info_b)
+{
+ if (!info_a || !info_b)
+ return false;
+ return (info_a->res.width == info_b->res.width) &&
+ (info_a->res.height == info_b->res.height);
+}
+
+bool ia_css_frame_is_same_type(const struct ia_css_frame *frame_a,
+ const struct ia_css_frame *frame_b)
+{
+ bool is_equal = false;
+ const struct ia_css_frame_info *info_a = &frame_a->frame_info;
+ const struct ia_css_frame_info *info_b = &frame_b->frame_info;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_frame_is_same_type() enter:\n");
+
+ if (!info_a || !info_b)
+ return false;
+ if (info_a->format != info_b->format)
+ return false;
+ if (info_a->padded_width != info_b->padded_width)
+ return false;
+ is_equal = ia_css_frame_info_is_same_resolution(info_a, info_b);
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_frame_is_same_type() leave:\n");
+
+ return is_equal;
+}
+
+int ia_css_dma_configure_from_info(struct dma_port_config *config,
+ const struct ia_css_frame_info *info)
+{
+ unsigned int is_raw_packed = info->format == IA_CSS_FRAME_FORMAT_RAW_PACKED;
+ unsigned int bits_per_pixel = is_raw_packed ? info->raw_bit_depth :
+ ia_css_elems_bytes_from_info(info) * 8;
+ unsigned int pix_per_ddrword = HIVE_ISP_DDR_WORD_BITS / bits_per_pixel;
+ unsigned int words_per_line = CEIL_DIV(info->padded_width, pix_per_ddrword);
+ unsigned int elems_b = pix_per_ddrword;
+
+ config->stride = HIVE_ISP_DDR_WORD_BYTES * words_per_line;
+ config->elems = (uint8_t)elems_b;
+ config->width = (uint16_t)info->res.width;
+ config->crop = 0;
+
+ if (config->width > info->padded_width) {
+ dev_err(atomisp_dev, "internal error: padded_width is too small!\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**************************************************************************
+** Static functions
+**************************************************************************/
+
+static void frame_init_plane(struct ia_css_frame_plane *plane,
+ unsigned int width,
+ unsigned int stride,
+ unsigned int height,
+ unsigned int offset)
+{
+ plane->height = height;
+ plane->width = width;
+ plane->stride = stride;
+ plane->offset = offset;
+}
+
+static void frame_init_single_plane(struct ia_css_frame *frame,
+ struct ia_css_frame_plane *plane,
+ unsigned int height,
+ unsigned int subpixels_per_line,
+ unsigned int bytes_per_pixel)
+{
+ unsigned int stride;
+
+ stride = subpixels_per_line * bytes_per_pixel;
+ /*
+ * Frame height needs to be even number - needed by hw ISYS2401.
+ * In case of odd number, round up to even.
+ * Images won't be impacted by this round up,
+ * only needed by jpeg/embedded data.
+ * As long as buffer allocation and release are using data_bytes,
+ * there won't be memory leak.
+ */
+ frame->data_bytes = stride * round_up(height, 2);
+ frame_init_plane(plane, subpixels_per_line, stride, height, 0);
+}
+
+static void frame_init_raw_single_plane(
+ struct ia_css_frame *frame,
+ struct ia_css_frame_plane *plane,
+ unsigned int height,
+ unsigned int subpixels_per_line,
+ unsigned int bits_per_pixel)
+{
+ unsigned int stride;
+
+ assert(frame);
+
+ stride = HIVE_ISP_DDR_WORD_BYTES *
+ CEIL_DIV(subpixels_per_line,
+ HIVE_ISP_DDR_WORD_BITS / bits_per_pixel);
+ frame->data_bytes = stride * height;
+ frame_init_plane(plane, subpixels_per_line, stride, height, 0);
+}
+
+static void frame_init_nv_planes(struct ia_css_frame *frame,
+ unsigned int horizontal_decimation,
+ unsigned int vertical_decimation,
+ unsigned int bytes_per_element)
+{
+ unsigned int y_width = frame->frame_info.padded_width;
+ unsigned int y_height = frame->frame_info.res.height;
+ unsigned int uv_width;
+ unsigned int uv_height;
+ unsigned int y_bytes;
+ unsigned int uv_bytes;
+ unsigned int y_stride;
+ unsigned int uv_stride;
+
+ assert(horizontal_decimation != 0 && vertical_decimation != 0);
+
+ uv_width = 2 * (y_width / horizontal_decimation);
+ uv_height = y_height / vertical_decimation;
+
+ if (frame->frame_info.format == IA_CSS_FRAME_FORMAT_NV12_TILEY) {
+ y_width = CEIL_MUL(y_width, NV12_TILEY_TILE_WIDTH);
+ uv_width = CEIL_MUL(uv_width, NV12_TILEY_TILE_WIDTH);
+ y_height = CEIL_MUL(y_height, NV12_TILEY_TILE_HEIGHT);
+ uv_height = CEIL_MUL(uv_height, NV12_TILEY_TILE_HEIGHT);
+ }
+
+ y_stride = y_width * bytes_per_element;
+ uv_stride = uv_width * bytes_per_element;
+ y_bytes = y_stride * y_height;
+ uv_bytes = uv_stride * uv_height;
+
+ frame->data_bytes = y_bytes + uv_bytes;
+ frame_init_plane(&frame->planes.nv.y, y_width, y_stride, y_height, 0);
+ frame_init_plane(&frame->planes.nv.uv, uv_width,
+ uv_stride, uv_height, y_bytes);
+ return;
+}
+
+static void frame_init_yuv_planes(struct ia_css_frame *frame,
+ unsigned int horizontal_decimation,
+ unsigned int vertical_decimation,
+ bool swap_uv,
+ unsigned int bytes_per_element)
+{
+ unsigned int y_width = frame->frame_info.padded_width,
+ y_height = frame->frame_info.res.height,
+ uv_width = y_width / horizontal_decimation,
+ uv_height = y_height / vertical_decimation,
+ y_stride, y_bytes, uv_bytes, uv_stride;
+
+ y_stride = y_width * bytes_per_element;
+ uv_stride = uv_width * bytes_per_element;
+ y_bytes = y_stride * y_height;
+ uv_bytes = uv_stride * uv_height;
+
+ frame->data_bytes = y_bytes + 2 * uv_bytes;
+ frame_init_plane(&frame->planes.yuv.y, y_width, y_stride, y_height, 0);
+ if (swap_uv) {
+ frame_init_plane(&frame->planes.yuv.v, uv_width, uv_stride,
+ uv_height, y_bytes);
+ frame_init_plane(&frame->planes.yuv.u, uv_width, uv_stride,
+ uv_height, y_bytes + uv_bytes);
+ } else {
+ frame_init_plane(&frame->planes.yuv.u, uv_width, uv_stride,
+ uv_height, y_bytes);
+ frame_init_plane(&frame->planes.yuv.v, uv_width, uv_stride,
+ uv_height, y_bytes + uv_bytes);
+ }
+ return;
+}
+
+static void frame_init_rgb_planes(struct ia_css_frame *frame,
+ unsigned int bytes_per_element)
+{
+ unsigned int width = frame->frame_info.res.width,
+ height = frame->frame_info.res.height, stride, bytes;
+
+ stride = width * bytes_per_element;
+ bytes = stride * height;
+ frame->data_bytes = 3 * bytes;
+ frame_init_plane(&frame->planes.planar_rgb.r, width, stride, height, 0);
+ frame_init_plane(&frame->planes.planar_rgb.g,
+ width, stride, height, 1 * bytes);
+ frame_init_plane(&frame->planes.planar_rgb.b,
+ width, stride, height, 2 * bytes);
+ return;
+}
+
+static void frame_init_qplane6_planes(struct ia_css_frame *frame)
+{
+ unsigned int width = frame->frame_info.padded_width / 2,
+ height = frame->frame_info.res.height / 2, bytes, stride;
+
+ stride = width * 2;
+ bytes = stride * height;
+
+ frame->data_bytes = 6 * bytes;
+ frame_init_plane(&frame->planes.plane6.r,
+ width, stride, height, 0 * bytes);
+ frame_init_plane(&frame->planes.plane6.r_at_b,
+ width, stride, height, 1 * bytes);
+ frame_init_plane(&frame->planes.plane6.gr,
+ width, stride, height, 2 * bytes);
+ frame_init_plane(&frame->planes.plane6.gb,
+ width, stride, height, 3 * bytes);
+ frame_init_plane(&frame->planes.plane6.b,
+ width, stride, height, 4 * bytes);
+ frame_init_plane(&frame->planes.plane6.b_at_r,
+ width, stride, height, 5 * bytes);
+ return;
+}
+
+static int frame_allocate_buffer_data(struct ia_css_frame *frame)
+{
+ frame->data = hmm_alloc(frame->data_bytes);
+ if (frame->data == mmgr_NULL)
+ return -ENOMEM;
+ return 0;
+}
+
+static int frame_allocate_with_data(struct ia_css_frame **frame,
+ unsigned int width,
+ unsigned int height,
+ enum ia_css_frame_format format,
+ unsigned int padded_width,
+ unsigned int raw_bit_depth)
+{
+ int err;
+ struct ia_css_frame *me = frame_create(width,
+ height,
+ format,
+ padded_width,
+ raw_bit_depth,
+ true);
+
+ if (!me)
+ return -ENOMEM;
+
+ err = ia_css_frame_init_planes(me);
+
+ if (!err)
+ err = frame_allocate_buffer_data(me);
+
+ if (err) {
+ kvfree(me);
+ *frame = NULL;
+ } else {
+ *frame = me;
+ }
+
+ return err;
+}
+
+static struct ia_css_frame *frame_create(unsigned int width,
+ unsigned int height,
+ enum ia_css_frame_format format,
+ unsigned int padded_width,
+ unsigned int raw_bit_depth,
+ bool valid)
+{
+ struct ia_css_frame *me = kvmalloc(sizeof(*me), GFP_KERNEL);
+
+ if (!me)
+ return NULL;
+
+ memset(me, 0, sizeof(*me));
+ me->frame_info.res.width = width;
+ me->frame_info.res.height = height;
+ me->frame_info.format = format;
+ me->frame_info.padded_width = padded_width;
+ me->frame_info.raw_bit_depth = raw_bit_depth;
+ me->valid = valid;
+ me->data_bytes = 0;
+ me->data = mmgr_NULL;
+ /* To indicate it is not valid frame. */
+ me->dynamic_queue_id = (int)SH_CSS_INVALID_QUEUE_ID;
+ me->buf_type = IA_CSS_BUFFER_TYPE_INVALID;
+
+ return me;
+}
+
+static unsigned
+ia_css_elems_bytes_from_info(const struct ia_css_frame_info *info)
+{
+ if (info->format == IA_CSS_FRAME_FORMAT_RGB565)
+ return 2; /* bytes per pixel */
+ if (info->format == IA_CSS_FRAME_FORMAT_YUV420_16)
+ return 2; /* bytes per pixel */
+ if (info->format == IA_CSS_FRAME_FORMAT_YUV422_16)
+ return 2; /* bytes per pixel */
+ /* Note: Essentially NV12_16 is a 2 bytes per pixel format, this return value is used
+ * to configure DMA for the output buffer,
+ * At least in SKC this data is overwritten by isp_output_init.sp.c except for elements(elems),
+ * which is configured from this return value,
+ * NV12_16 is implemented by a double buffer of 8 bit elements hence elems should be configured as 8 */
+ if (info->format == IA_CSS_FRAME_FORMAT_NV12_16)
+ return 1; /* bytes per pixel */
+
+ if (info->format == IA_CSS_FRAME_FORMAT_RAW
+ || (info->format == IA_CSS_FRAME_FORMAT_RAW_PACKED)) {
+ if (info->raw_bit_depth)
+ return BITS_TO_BYTES(info->raw_bit_depth);
+ else
+ return 2; /* bytes per pixel */
+ }
+ if (info->format == IA_CSS_FRAME_FORMAT_PLANAR_RGB888)
+ return 3; /* bytes per pixel */
+ if (info->format == IA_CSS_FRAME_FORMAT_RGBA888)
+ return 4; /* bytes per pixel */
+ if (info->format == IA_CSS_FRAME_FORMAT_QPLANE6)
+ return 2; /* bytes per pixel */
+ return 1; /* Default is 1 byte per pixel */
+}
+
+void ia_css_frame_info_to_frame_sp_info(
+ struct ia_css_frame_sp_info *to,
+ const struct ia_css_frame_info *from)
+{
+ ia_css_resolution_to_sp_resolution(&to->res, &from->res);
+ to->padded_width = (uint16_t)from->padded_width;
+ to->format = (uint8_t)from->format;
+ to->raw_bit_depth = (uint8_t)from->raw_bit_depth;
+ to->raw_bayer_order = from->raw_bayer_order;
+}
+
+void ia_css_resolution_to_sp_resolution(
+ struct ia_css_sp_resolution *to,
+ const struct ia_css_resolution *from)
+{
+ to->width = (uint16_t)from->width;
+ to->height = (uint16_t)from->height;
+}
+
+int ia_css_frame_init_from_info(struct ia_css_frame *frame,
+ const struct ia_css_frame_info *frame_info)
+{
+ frame->frame_info.res.width = frame_info->res.width;
+ frame->frame_info.res.height = frame_info->res.height;
+ frame->frame_info.format = frame_info->format;
+ frame->frame_info.padded_width = frame_info->padded_width;
+ frame->frame_info.raw_bit_depth = frame_info->raw_bit_depth;
+ frame->valid = true;
+ /* To indicate it is not valid frame. */
+ frame->dynamic_queue_id = SH_CSS_INVALID_QUEUE_ID;
+ frame->buf_type = IA_CSS_BUFFER_TYPE_INVALID;
+
+ return ia_css_frame_init_planes(frame);
+}
diff --git a/drivers/staging/media/atomisp/pci/runtime/ifmtr/interface/ia_css_ifmtr.h b/drivers/staging/media/atomisp/pci/runtime/ifmtr/interface/ia_css_ifmtr.h
new file mode 100644
index 000000000000..01b89cdaf835
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/runtime/ifmtr/interface/ia_css_ifmtr.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010 - 2015, Intel Corporation.
+ */
+
+#ifndef __IA_CSS_IFMTR_H__
+#define __IA_CSS_IFMTR_H__
+
+#include <type_support.h>
+#include <ia_css_stream_public.h>
+#include <ia_css_binary.h>
+
+extern bool ifmtr_set_if_blocking_mode_reset;
+
+unsigned int ia_css_ifmtr_lines_needed_for_bayer_order(
+ const struct ia_css_stream_config *config);
+
+unsigned int ia_css_ifmtr_columns_needed_for_bayer_order(
+ const struct ia_css_stream_config *config);
+
+int ia_css_ifmtr_configure(struct ia_css_stream_config *config,
+ struct ia_css_binary *binary);
+
+#endif /* __IA_CSS_IFMTR_H__ */
diff --git a/drivers/staging/media/atomisp/pci/runtime/ifmtr/src/ifmtr.c b/drivers/staging/media/atomisp/pci/runtime/ifmtr/src/ifmtr.c
new file mode 100644
index 000000000000..50b0b31d734a
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/runtime/ifmtr/src/ifmtr.c
@@ -0,0 +1,530 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010 - 2015, Intel Corporation.
+ */
+
+#include <linux/math.h>
+
+#include "system_global.h"
+
+#include "ia_css_ifmtr.h"
+#include <math_support.h>
+#include "sh_css_internal.h"
+#include "input_formatter.h"
+#include "assert_support.h"
+#include "sh_css_sp.h"
+#include "isp/modes/interface/input_buf.isp.h"
+
+/************************************************************
+ * Static functions declarations
+ ************************************************************/
+static int ifmtr_start_column(
+ const struct ia_css_stream_config *config,
+ unsigned int bin_in,
+ unsigned int *start_column);
+
+static int ifmtr_input_start_line(
+ const struct ia_css_stream_config *config,
+ unsigned int bin_in,
+ unsigned int *start_line);
+
+static void ifmtr_set_if_blocking_mode(
+ const input_formatter_cfg_t *const config_a,
+ const input_formatter_cfg_t *const config_b);
+
+/************************************************************
+ * Public functions
+ ************************************************************/
+
+/* ISP expects GRBG bayer order, we skip one line and/or one row
+ * to correct in case the input bayer order is different.
+ */
+unsigned int ia_css_ifmtr_lines_needed_for_bayer_order(
+ const struct ia_css_stream_config *config)
+{
+ assert(config);
+ if ((config->input_config.bayer_order == IA_CSS_BAYER_ORDER_BGGR)
+ || (config->input_config.bayer_order == IA_CSS_BAYER_ORDER_GBRG))
+ return 1;
+
+ return 0;
+}
+
+unsigned int ia_css_ifmtr_columns_needed_for_bayer_order(
+ const struct ia_css_stream_config *config)
+{
+ assert(config);
+ if ((config->input_config.bayer_order == IA_CSS_BAYER_ORDER_RGGB)
+ || (config->input_config.bayer_order == IA_CSS_BAYER_ORDER_GBRG))
+ return 1;
+
+ return 0;
+}
+
+int ia_css_ifmtr_configure(struct ia_css_stream_config *config,
+ struct ia_css_binary *binary)
+{
+ unsigned int start_line, start_column = 0,
+ cropped_height,
+ cropped_width,
+ num_vectors,
+ buffer_height = 2,
+ buffer_width,
+ two_ppc,
+ vmem_increment = 0,
+ deinterleaving = 0,
+ deinterleaving_b = 0,
+ width_a = 0,
+ width_b = 0,
+ bits_per_pixel,
+ vectors_per_buffer,
+ vectors_per_line = 0,
+ buffers_per_line = 0,
+ buf_offset_a = 0,
+ buf_offset_b = 0,
+ line_width = 0,
+ width_b_factor = 1, start_column_b,
+ left_padding = 0;
+ input_formatter_cfg_t if_a_config, if_b_config;
+ enum atomisp_input_format input_format;
+ int err = 0;
+ u8 if_config_index;
+
+ /* Determine which input formatter config set is targeted. */
+ /* Index is equal to the CSI-2 port used. */
+ enum mipi_port_id port;
+
+ if (binary) {
+ cropped_height = binary->in_frame_info.res.height;
+ cropped_width = binary->in_frame_info.res.width;
+ /* This should correspond to the input buffer definition for
+ ISP binaries in input_buf.isp.h */
+ if (binary->info->sp.enable.continuous &&
+ binary->info->sp.pipeline.mode != IA_CSS_BINARY_MODE_COPY)
+ buffer_width = MAX_VECTORS_PER_INPUT_LINE_CONT * ISP_VEC_NELEMS;
+ else
+ buffer_width = binary->info->sp.input.max_width;
+ input_format = binary->input_format;
+ } else {
+ /* sp raw copy pipe (IA_CSS_PIPE_MODE_COPY): binary is NULL */
+ cropped_height = config->input_config.input_res.height;
+ cropped_width = config->input_config.input_res.width;
+ buffer_width = MAX_VECTORS_PER_INPUT_LINE_CONT * ISP_VEC_NELEMS;
+ input_format = config->input_config.format;
+ }
+ two_ppc = config->pixels_per_clock == 2;
+ if (config->mode == IA_CSS_INPUT_MODE_SENSOR
+ || config->mode == IA_CSS_INPUT_MODE_BUFFERED_SENSOR) {
+ port = config->source.port.port;
+ if_config_index = (uint8_t)(port - MIPI_PORT0_ID);
+ } else if (config->mode == IA_CSS_INPUT_MODE_MEMORY) {
+ if_config_index = SH_CSS_IF_CONFIG_NOT_NEEDED;
+ } else {
+ if_config_index = 0;
+ }
+
+ assert(if_config_index <= SH_CSS_MAX_IF_CONFIGS
+ || if_config_index == SH_CSS_IF_CONFIG_NOT_NEEDED);
+
+ /* TODO: check to see if input is RAW and if current mode interprets
+ * RAW data in any particular bayer order. copy binary with output
+ * format other than raw should not result in dropping lines and/or
+ * columns.
+ */
+ err = ifmtr_input_start_line(config, cropped_height, &start_line);
+ if (err)
+ return err;
+ err = ifmtr_start_column(config, cropped_width, &start_column);
+ if (err)
+ return err;
+
+ if (config->left_padding == -1)
+ if (!binary)
+ /* sp raw copy pipe: set left_padding value */
+ left_padding = 0;
+ else
+ left_padding = binary->left_padding;
+ else
+ left_padding = 2 * ISP_VEC_NELEMS - config->left_padding;
+
+ if (left_padding) {
+ num_vectors = DIV_ROUND_UP(cropped_width + left_padding, ISP_VEC_NELEMS);
+ } else {
+ num_vectors = DIV_ROUND_UP(cropped_width, ISP_VEC_NELEMS);
+ num_vectors *= buffer_height;
+ /* todo: in case of left padding,
+ num_vectors is vectors per line,
+ otherwise vectors per line * buffer_height. */
+ }
+
+ start_column_b = start_column;
+
+ bits_per_pixel = input_formatter_get_alignment(INPUT_FORMATTER0_ID)
+ * 8 / ISP_VEC_NELEMS;
+ switch (input_format) {
+ case ATOMISP_INPUT_FORMAT_YUV420_8_LEGACY:
+ if (two_ppc) {
+ vmem_increment = 1;
+ deinterleaving = 1;
+ deinterleaving_b = 1;
+ /* half lines */
+ width_a = cropped_width * deinterleaving / 2;
+ width_b_factor = 2;
+ /* full lines */
+ width_b = width_a * width_b_factor;
+ buffer_width *= deinterleaving * 2;
+ /* Patch from bayer to yuv */
+ num_vectors *= deinterleaving;
+ buf_offset_b = buffer_width / 2 / ISP_VEC_NELEMS;
+ vectors_per_line = num_vectors / buffer_height;
+ /* Even lines are half size */
+ line_width = vectors_per_line *
+ input_formatter_get_alignment(INPUT_FORMATTER0_ID) /
+ 2;
+ start_column /= 2;
+ } else {
+ vmem_increment = 1;
+ deinterleaving = 3;
+ width_a = cropped_width * deinterleaving / 2;
+ buffer_width = buffer_width * deinterleaving / 2;
+ /* Patch from bayer to yuv */
+ num_vectors = num_vectors / 2 * deinterleaving;
+ start_column = start_column * deinterleaving / 2;
+ }
+ break;
+ case ATOMISP_INPUT_FORMAT_YUV420_8:
+ case ATOMISP_INPUT_FORMAT_YUV420_10:
+ case ATOMISP_INPUT_FORMAT_YUV420_16:
+ if (two_ppc) {
+ vmem_increment = 1;
+ deinterleaving = 1;
+ width_a = width_b = cropped_width * deinterleaving / 2;
+ buffer_width *= deinterleaving * 2;
+ num_vectors *= deinterleaving;
+ buf_offset_b = buffer_width / 2 / ISP_VEC_NELEMS;
+ vectors_per_line = num_vectors / buffer_height;
+ /* Even lines are half size */
+ line_width = vectors_per_line *
+ input_formatter_get_alignment(INPUT_FORMATTER0_ID) /
+ 2;
+ start_column *= deinterleaving;
+ start_column /= 2;
+ start_column_b = start_column;
+ } else {
+ vmem_increment = 1;
+ deinterleaving = 1;
+ width_a = cropped_width * deinterleaving;
+ buffer_width *= deinterleaving * 2;
+ num_vectors *= deinterleaving;
+ start_column *= deinterleaving;
+ }
+ break;
+ case ATOMISP_INPUT_FORMAT_YUV422_8:
+ case ATOMISP_INPUT_FORMAT_YUV422_10:
+ case ATOMISP_INPUT_FORMAT_YUV422_16:
+ if (two_ppc) {
+ vmem_increment = 1;
+ deinterleaving = 1;
+ width_a = width_b = cropped_width * deinterleaving;
+ buffer_width *= deinterleaving * 2;
+ num_vectors *= deinterleaving;
+ start_column *= deinterleaving;
+ buf_offset_b = buffer_width / 2 / ISP_VEC_NELEMS;
+ start_column_b = start_column;
+ } else {
+ vmem_increment = 1;
+ deinterleaving = 2;
+ width_a = cropped_width * deinterleaving;
+ buffer_width *= deinterleaving;
+ num_vectors *= deinterleaving;
+ start_column *= deinterleaving;
+ }
+ break;
+ case ATOMISP_INPUT_FORMAT_RGB_444:
+ case ATOMISP_INPUT_FORMAT_RGB_555:
+ case ATOMISP_INPUT_FORMAT_RGB_565:
+ case ATOMISP_INPUT_FORMAT_RGB_666:
+ case ATOMISP_INPUT_FORMAT_RGB_888:
+ num_vectors *= 2;
+ if (two_ppc) {
+ deinterleaving = 2; /* BR in if_a, G in if_b */
+ deinterleaving_b = 1; /* BR in if_a, G in if_b */
+ buffers_per_line = 4;
+ start_column_b = start_column;
+ start_column *= deinterleaving;
+ start_column_b *= deinterleaving_b;
+ } else {
+ deinterleaving = 3; /* BGR */
+ buffers_per_line = 3;
+ start_column *= deinterleaving;
+ }
+ vmem_increment = 1;
+ width_a = cropped_width * deinterleaving;
+ width_b = cropped_width * deinterleaving_b;
+ buffer_width *= buffers_per_line;
+ /* Patch from bayer to rgb */
+ num_vectors = num_vectors / 2 * deinterleaving;
+ buf_offset_b = buffer_width / 2 / ISP_VEC_NELEMS;
+ break;
+ case ATOMISP_INPUT_FORMAT_RAW_6:
+ case ATOMISP_INPUT_FORMAT_RAW_7:
+ case ATOMISP_INPUT_FORMAT_RAW_8:
+ case ATOMISP_INPUT_FORMAT_RAW_10:
+ case ATOMISP_INPUT_FORMAT_RAW_12:
+ if (two_ppc) {
+ int crop_col = (start_column % 2) == 1;
+
+ vmem_increment = 2;
+ deinterleaving = 1;
+ width_a = width_b = cropped_width / 2;
+
+ /* When two_ppc is enabled AND we need to crop one extra
+ * column, if_a crops by one extra and we swap the
+ * output offsets to interleave the bayer pattern in
+ * the correct order.
+ */
+ buf_offset_a = crop_col ? 1 : 0;
+ buf_offset_b = crop_col ? 0 : 1;
+ start_column_b = start_column / 2;
+ start_column = start_column / 2 + crop_col;
+ } else {
+ vmem_increment = 1;
+ deinterleaving = 2;
+ if ((!binary) || (config->continuous && binary
+ && binary->info->sp.pipeline.mode == IA_CSS_BINARY_MODE_COPY)) {
+ /* !binary -> sp raw copy pipe, no deinterleaving */
+ deinterleaving = 1;
+ }
+ width_a = cropped_width;
+ /* Must be multiple of deinterleaving */
+ num_vectors = CEIL_MUL(num_vectors, deinterleaving);
+ }
+ buffer_height *= 2;
+ if ((!binary) || config->continuous)
+ /* !binary -> sp raw copy pipe */
+ buffer_height *= 2;
+ vectors_per_line = DIV_ROUND_UP(cropped_width, ISP_VEC_NELEMS);
+ vectors_per_line = CEIL_MUL(vectors_per_line, deinterleaving);
+ break;
+ case ATOMISP_INPUT_FORMAT_RAW_14:
+ case ATOMISP_INPUT_FORMAT_RAW_16:
+ if (two_ppc) {
+ num_vectors *= 2;
+ vmem_increment = 1;
+ deinterleaving = 2;
+ width_a = width_b = cropped_width;
+ /* B buffer is one line further */
+ buf_offset_b = buffer_width / ISP_VEC_NELEMS;
+ bits_per_pixel *= 2;
+ } else {
+ vmem_increment = 1;
+ deinterleaving = 2;
+ width_a = cropped_width;
+ start_column /= deinterleaving;
+ }
+ buffer_height *= 2;
+ break;
+ case ATOMISP_INPUT_FORMAT_BINARY_8:
+ case ATOMISP_INPUT_FORMAT_GENERIC_SHORT1:
+ case ATOMISP_INPUT_FORMAT_GENERIC_SHORT2:
+ case ATOMISP_INPUT_FORMAT_GENERIC_SHORT3:
+ case ATOMISP_INPUT_FORMAT_GENERIC_SHORT4:
+ case ATOMISP_INPUT_FORMAT_GENERIC_SHORT5:
+ case ATOMISP_INPUT_FORMAT_GENERIC_SHORT6:
+ case ATOMISP_INPUT_FORMAT_GENERIC_SHORT7:
+ case ATOMISP_INPUT_FORMAT_GENERIC_SHORT8:
+ case ATOMISP_INPUT_FORMAT_YUV420_8_SHIFT:
+ case ATOMISP_INPUT_FORMAT_YUV420_10_SHIFT:
+ case ATOMISP_INPUT_FORMAT_EMBEDDED:
+ case ATOMISP_INPUT_FORMAT_USER_DEF1:
+ case ATOMISP_INPUT_FORMAT_USER_DEF2:
+ case ATOMISP_INPUT_FORMAT_USER_DEF3:
+ case ATOMISP_INPUT_FORMAT_USER_DEF4:
+ case ATOMISP_INPUT_FORMAT_USER_DEF5:
+ case ATOMISP_INPUT_FORMAT_USER_DEF6:
+ case ATOMISP_INPUT_FORMAT_USER_DEF7:
+ case ATOMISP_INPUT_FORMAT_USER_DEF8:
+ break;
+ }
+ if (width_a == 0)
+ return -EINVAL;
+
+ if (two_ppc)
+ left_padding /= 2;
+
+ /* Default values */
+ if (left_padding)
+ vectors_per_line = num_vectors;
+ if (!vectors_per_line) {
+ vectors_per_line = CEIL_MUL(num_vectors / buffer_height,
+ deinterleaving);
+ line_width = 0;
+ }
+ if (!line_width)
+ line_width = vectors_per_line *
+ input_formatter_get_alignment(INPUT_FORMATTER0_ID);
+ if (!buffers_per_line)
+ buffers_per_line = deinterleaving;
+ line_width = CEIL_MUL(line_width,
+ input_formatter_get_alignment(INPUT_FORMATTER0_ID)
+ * vmem_increment);
+
+ vectors_per_buffer = buffer_height * buffer_width / ISP_VEC_NELEMS;
+
+ if_a_config.start_line = start_line;
+ if_a_config.start_column = start_column;
+ if_a_config.left_padding = left_padding / deinterleaving;
+ if_a_config.cropped_height = cropped_height;
+ if_a_config.cropped_width = width_a;
+ if_a_config.deinterleaving = deinterleaving;
+ if_a_config.buf_vecs = vectors_per_buffer;
+ if_a_config.buf_start_index = buf_offset_a;
+ if_a_config.buf_increment = vmem_increment;
+ if_a_config.buf_eol_offset =
+ buffer_width * bits_per_pixel / 8 - line_width;
+ if_a_config.is_yuv420_format =
+ (input_format == ATOMISP_INPUT_FORMAT_YUV420_8)
+ || (input_format == ATOMISP_INPUT_FORMAT_YUV420_10)
+ || (input_format == ATOMISP_INPUT_FORMAT_YUV420_16);
+ if_a_config.block_no_reqs = (config->mode != IA_CSS_INPUT_MODE_SENSOR);
+
+ if (two_ppc) {
+ if (deinterleaving_b) {
+ deinterleaving = deinterleaving_b;
+ width_b = cropped_width * deinterleaving;
+ buffer_width *= deinterleaving;
+ /* Patch from bayer to rgb */
+ num_vectors = num_vectors / 2 *
+ deinterleaving * width_b_factor;
+ vectors_per_line = num_vectors / buffer_height;
+ line_width = vectors_per_line *
+ input_formatter_get_alignment(INPUT_FORMATTER0_ID);
+ }
+ if_b_config.start_line = start_line;
+ if_b_config.start_column = start_column_b;
+ if_b_config.left_padding = left_padding / deinterleaving;
+ if_b_config.cropped_height = cropped_height;
+ if_b_config.cropped_width = width_b;
+ if_b_config.deinterleaving = deinterleaving;
+ if_b_config.buf_vecs = vectors_per_buffer;
+ if_b_config.buf_start_index = buf_offset_b;
+ if_b_config.buf_increment = vmem_increment;
+ if_b_config.buf_eol_offset =
+ buffer_width * bits_per_pixel / 8 - line_width;
+ if_b_config.is_yuv420_format =
+ input_format == ATOMISP_INPUT_FORMAT_YUV420_8
+ || input_format == ATOMISP_INPUT_FORMAT_YUV420_10
+ || input_format == ATOMISP_INPUT_FORMAT_YUV420_16;
+ if_b_config.block_no_reqs =
+ (config->mode != IA_CSS_INPUT_MODE_SENSOR);
+
+ if (if_config_index != SH_CSS_IF_CONFIG_NOT_NEEDED) {
+ assert(if_config_index <= SH_CSS_MAX_IF_CONFIGS);
+
+ ifmtr_set_if_blocking_mode(&if_a_config, &if_b_config);
+ /* Set the ifconfigs to SP group */
+ sh_css_sp_set_if_configs(&if_a_config, &if_b_config,
+ if_config_index);
+ }
+ } else {
+ if (if_config_index != SH_CSS_IF_CONFIG_NOT_NEEDED) {
+ assert(if_config_index <= SH_CSS_MAX_IF_CONFIGS);
+
+ ifmtr_set_if_blocking_mode(&if_a_config, NULL);
+ /* Set the ifconfigs to SP group */
+ sh_css_sp_set_if_configs(&if_a_config, NULL,
+ if_config_index);
+ }
+ }
+
+ return 0;
+}
+
+bool ifmtr_set_if_blocking_mode_reset = true;
+
+/************************************************************
+ * Static functions
+ ************************************************************/
+static void ifmtr_set_if_blocking_mode(
+ const input_formatter_cfg_t *const config_a,
+ const input_formatter_cfg_t *const config_b)
+{
+ int i;
+ bool block[] = { false, false, false, false };
+
+ assert(N_INPUT_FORMATTER_ID <= (ARRAY_SIZE(block)));
+
+ block[INPUT_FORMATTER0_ID] = (bool)config_a->block_no_reqs;
+ if (config_b)
+ block[INPUT_FORMATTER1_ID] = (bool)config_b->block_no_reqs;
+
+ /* TODO: next could cause issues when streams are started after
+ * eachother. */
+ /*IF should not be reconfigured/reset from host */
+ if (ifmtr_set_if_blocking_mode_reset) {
+ ifmtr_set_if_blocking_mode_reset = false;
+ for (i = 0; i < N_INPUT_FORMATTER_ID; i++) {
+ input_formatter_ID_t id = (input_formatter_ID_t)i;
+
+ input_formatter_rst(id);
+ input_formatter_set_fifo_blocking_mode(id, block[id]);
+ }
+ }
+
+ return;
+}
+
+static int ifmtr_start_column(
+ const struct ia_css_stream_config *config,
+ unsigned int bin_in,
+ unsigned int *start_column)
+{
+ unsigned int in = config->input_config.input_res.width, start,
+ for_bayer = ia_css_ifmtr_columns_needed_for_bayer_order(config);
+
+ if (bin_in + 2 * for_bayer > in)
+ return -EINVAL;
+
+ /* On the hardware, we want to use the middle of the input, so we
+ * divide the start column by 2. */
+ start = (in - bin_in) / 2;
+ /* in case the number of extra columns is 2 or odd, we round the start
+ * column down */
+ start &= ~0x1;
+
+ /* now we add the one column (if needed) to correct for the bayer
+ * order).
+ */
+ start += for_bayer;
+ *start_column = start;
+ return 0;
+}
+
+static int ifmtr_input_start_line(
+ const struct ia_css_stream_config *config,
+ unsigned int bin_in,
+ unsigned int *start_line)
+{
+ unsigned int in = config->input_config.input_res.height, start,
+ for_bayer = ia_css_ifmtr_lines_needed_for_bayer_order(config);
+
+ if (bin_in + 2 * for_bayer > in)
+ return -EINVAL;
+
+ /* On the hardware, we want to use the middle of the input, so we
+ * divide the start line by 2. On the simulator, we cannot handle extra
+ * lines at the end of the frame.
+ */
+ start = (in - bin_in) / 2;
+ /* in case the number of extra lines is 2 or odd, we round the start
+ * line down.
+ */
+ start &= ~0x1;
+
+ /* now we add the one line (if needed) to correct for the bayer order */
+ start += for_bayer;
+ *start_line = start;
+ return 0;
+}
+
diff --git a/drivers/staging/media/atomisp/pci/runtime/inputfifo/interface/ia_css_inputfifo.h b/drivers/staging/media/atomisp/pci/runtime/inputfifo/interface/ia_css_inputfifo.h
new file mode 100644
index 000000000000..a95c3098c82a
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/runtime/inputfifo/interface/ia_css_inputfifo.h
@@ -0,0 +1,45 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010 - 2015, Intel Corporation.
+ */
+
+#ifndef _IA_CSS_INPUTFIFO_H
+#define _IA_CSS_INPUTFIFO_H
+
+#include <sp.h>
+#include <isp.h>
+
+#include "ia_css_stream_format.h"
+
+/* SP access */
+void ia_css_inputfifo_send_input_frame(
+ const unsigned short *data,
+ unsigned int width,
+ unsigned int height,
+ unsigned int ch_id,
+ enum atomisp_input_format input_format,
+ bool two_ppc);
+
+void ia_css_inputfifo_start_frame(
+ unsigned int ch_id,
+ enum atomisp_input_format input_format,
+ bool two_ppc);
+
+void ia_css_inputfifo_send_line(
+ unsigned int ch_id,
+ const unsigned short *data,
+ unsigned int width,
+ const unsigned short *data2,
+ unsigned int width2);
+
+void ia_css_inputfifo_send_embedded_line(
+ unsigned int ch_id,
+ enum atomisp_input_format data_type,
+ const unsigned short *data,
+ unsigned int width);
+
+void ia_css_inputfifo_end_frame(
+ unsigned int ch_id);
+
+#endif /* _IA_CSS_INPUTFIFO_H */
diff --git a/drivers/staging/media/atomisp/pci/runtime/inputfifo/src/inputfifo.c b/drivers/staging/media/atomisp/pci/runtime/inputfifo/src/inputfifo.c
new file mode 100644
index 000000000000..8e1efeb6372c
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/runtime/inputfifo/src/inputfifo.c
@@ -0,0 +1,520 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010 - 2015, Intel Corporation.
+ */
+
+#include "platform_support.h"
+
+#include "ia_css_inputfifo.h"
+
+#include "device_access.h"
+
+#define __INLINE_SP__
+#include "sp.h"
+#define __INLINE_ISP__
+#include "isp.h"
+#define __INLINE_IRQ__
+#include "irq.h"
+#define __INLINE_FIFO_MONITOR__
+#include "fifo_monitor.h"
+
+#define __INLINE_EVENT__
+#include "event_fifo.h"
+#define __INLINE_SP__
+
+#include "input_system.h" /* MIPI_PREDICTOR_NONE,... */
+
+#include "assert_support.h"
+
+/* System independent */
+#include "sh_css_internal.h"
+#include "ia_css_isys.h"
+
+#define HBLANK_CYCLES (187)
+#define MARKER_CYCLES (6)
+
+#include <hive_isp_css_streaming_to_mipi_types_hrt.h>
+
+/* The data type is used to send special cases:
+ * yuv420: odd lines (1, 3 etc) are twice as wide as even
+ * lines (0, 2, 4 etc).
+ * rgb: for two pixels per clock, the R and B values are sent
+ * to output_0 while only G is sent to output_1. This means
+ * that output_1 only gets half the number of values of output_0.
+ * WARNING: This type should also be used for Legacy YUV420.
+ * regular: used for all other data types (RAW, YUV422, etc)
+ */
+enum inputfifo_mipi_data_type {
+ inputfifo_mipi_data_type_regular,
+ inputfifo_mipi_data_type_yuv420,
+ inputfifo_mipi_data_type_yuv420_legacy,
+ inputfifo_mipi_data_type_rgb,
+};
+
+static unsigned int inputfifo_curr_ch_id, inputfifo_curr_fmt_type;
+struct inputfifo_instance {
+ unsigned int ch_id;
+ enum atomisp_input_format input_format;
+ bool two_ppc;
+ bool streaming;
+ unsigned int hblank_cycles;
+ unsigned int marker_cycles;
+ unsigned int fmt_type;
+ enum inputfifo_mipi_data_type type;
+};
+
+/*
+ * Maintain a basic streaming to Mipi administration with ch_id as index
+ * ch_id maps on the "Mipi virtual channel ID" and can have value 0..3
+ */
+#define INPUTFIFO_NR_OF_S2M_CHANNELS (4)
+static struct inputfifo_instance
+ inputfifo_inst_admin[INPUTFIFO_NR_OF_S2M_CHANNELS];
+
+/* Streaming to MIPI */
+static unsigned int inputfifo_wrap_marker(
+ /* static inline unsigned inputfifo_wrap_marker( */
+ unsigned int marker)
+{
+ return marker |
+ (inputfifo_curr_ch_id << HIVE_STR_TO_MIPI_CH_ID_LSB) |
+ (inputfifo_curr_fmt_type << _HIVE_STR_TO_MIPI_FMT_TYPE_LSB);
+}
+
+static inline void
+_sh_css_fifo_snd(unsigned int token)
+{
+ while (!can_event_send_token(STR2MIPI_EVENT_ID))
+ udelay(1);
+ event_send_token(STR2MIPI_EVENT_ID, token);
+ return;
+}
+
+static void inputfifo_send_data_a(
+ /* static inline void inputfifo_send_data_a( */
+ unsigned int data)
+{
+ unsigned int token = (1 << HIVE_STR_TO_MIPI_VALID_A_BIT) |
+ (data << HIVE_STR_TO_MIPI_DATA_A_LSB);
+ _sh_css_fifo_snd(token);
+ return;
+}
+
+static void inputfifo_send_data_b(
+ /* static inline void inputfifo_send_data_b( */
+ unsigned int data)
+{
+ unsigned int token = (1 << HIVE_STR_TO_MIPI_VALID_B_BIT) |
+ (data << _HIVE_STR_TO_MIPI_DATA_B_LSB);
+ _sh_css_fifo_snd(token);
+ return;
+}
+
+static void inputfifo_send_data(
+ /* static inline void inputfifo_send_data( */
+ unsigned int a,
+ unsigned int b)
+{
+ unsigned int token = ((1 << HIVE_STR_TO_MIPI_VALID_A_BIT) |
+ (1 << HIVE_STR_TO_MIPI_VALID_B_BIT) |
+ (a << HIVE_STR_TO_MIPI_DATA_A_LSB) |
+ (b << _HIVE_STR_TO_MIPI_DATA_B_LSB));
+ _sh_css_fifo_snd(token);
+ return;
+}
+
+static void inputfifo_send_sol(void)
+/* static inline void inputfifo_send_sol(void) */
+{
+ hrt_data token = inputfifo_wrap_marker(
+ 1 << HIVE_STR_TO_MIPI_SOL_BIT);
+
+ _sh_css_fifo_snd(token);
+ return;
+}
+
+static void inputfifo_send_eol(void)
+/* static inline void inputfifo_send_eol(void) */
+{
+ hrt_data token = inputfifo_wrap_marker(
+ 1 << HIVE_STR_TO_MIPI_EOL_BIT);
+ _sh_css_fifo_snd(token);
+ return;
+}
+
+static void inputfifo_send_sof(void)
+/* static inline void inputfifo_send_sof(void) */
+{
+ hrt_data token = inputfifo_wrap_marker(
+ 1 << HIVE_STR_TO_MIPI_SOF_BIT);
+
+ _sh_css_fifo_snd(token);
+ return;
+}
+
+static void inputfifo_send_eof(void)
+/* static inline void inputfifo_send_eof(void) */
+{
+ hrt_data token = inputfifo_wrap_marker(
+ 1 << HIVE_STR_TO_MIPI_EOF_BIT);
+ _sh_css_fifo_snd(token);
+ return;
+}
+
+static void inputfifo_send_ch_id_and_fmt_type(
+ /* static inline
+ void inputfifo_send_ch_id_and_fmt_type( */
+ unsigned int ch_id,
+ unsigned int fmt_type)
+{
+ hrt_data token;
+
+ inputfifo_curr_ch_id = ch_id & _HIVE_ISP_CH_ID_MASK;
+ inputfifo_curr_fmt_type = fmt_type & _HIVE_ISP_FMT_TYPE_MASK;
+ /* we send an zero marker, this will wrap the ch_id and
+ * fmt_type automatically.
+ */
+ token = inputfifo_wrap_marker(0);
+ _sh_css_fifo_snd(token);
+ return;
+}
+
+static void inputfifo_send_empty_token(void)
+/* static inline void inputfifo_send_empty_token(void) */
+{
+ hrt_data token = inputfifo_wrap_marker(0);
+
+ _sh_css_fifo_snd(token);
+ return;
+}
+
+static void inputfifo_start_frame(
+ /* static inline void inputfifo_start_frame( */
+ unsigned int ch_id,
+ unsigned int fmt_type)
+{
+ inputfifo_send_ch_id_and_fmt_type(ch_id, fmt_type);
+ inputfifo_send_sof();
+ return;
+}
+
+static void inputfifo_end_frame(
+ unsigned int marker_cycles)
+{
+ unsigned int i;
+
+ for (i = 0; i < marker_cycles; i++)
+ inputfifo_send_empty_token();
+ inputfifo_send_eof();
+ return;
+}
+
+static void inputfifo_send_line2(
+ const unsigned short *data,
+ unsigned int width,
+ const unsigned short *data2,
+ unsigned int width2,
+ unsigned int hblank_cycles,
+ unsigned int marker_cycles,
+ unsigned int two_ppc,
+ enum inputfifo_mipi_data_type type)
+{
+ unsigned int i, is_rgb = 0, is_legacy = 0;
+
+ assert(data);
+ assert((data2) || (width2 == 0));
+ if (type == inputfifo_mipi_data_type_rgb)
+ is_rgb = 1;
+
+ if (type == inputfifo_mipi_data_type_yuv420_legacy)
+ is_legacy = 1;
+
+ for (i = 0; i < hblank_cycles; i++)
+ inputfifo_send_empty_token();
+ inputfifo_send_sol();
+ for (i = 0; i < marker_cycles; i++)
+ inputfifo_send_empty_token();
+ for (i = 0; i < width; i++, data++) {
+ /* for RGB in two_ppc, we only actually send 2 pixels per
+ * clock in the even pixels (0, 2 etc). In the other cycles,
+ * we only send 1 pixel, to data[0].
+ */
+ unsigned int send_two_pixels = two_ppc;
+
+ if ((is_rgb || is_legacy) && (i % 3 == 2))
+ send_two_pixels = 0;
+ if (send_two_pixels) {
+ if (i + 1 == width) {
+ /* for jpg (binary) copy, this can occur
+ * if the file contains an odd number of bytes.
+ */
+ inputfifo_send_data(
+ data[0], 0);
+ } else {
+ inputfifo_send_data(
+ data[0], data[1]);
+ }
+ /* Additional increment because we send 2 pixels */
+ data++;
+ i++;
+ } else if (two_ppc && is_legacy) {
+ inputfifo_send_data_b(data[0]);
+ } else {
+ inputfifo_send_data_a(data[0]);
+ }
+ }
+
+ for (i = 0; i < width2; i++, data2++) {
+ /* for RGB in two_ppc, we only actually send 2 pixels per
+ * clock in the even pixels (0, 2 etc). In the other cycles,
+ * we only send 1 pixel, to data2[0].
+ */
+ unsigned int send_two_pixels = two_ppc;
+
+ if ((is_rgb || is_legacy) && (i % 3 == 2))
+ send_two_pixels = 0;
+ if (send_two_pixels) {
+ if (i + 1 == width2) {
+ /* for jpg (binary) copy, this can occur
+ * if the file contains an odd number of bytes.
+ */
+ inputfifo_send_data(
+ data2[0], 0);
+ } else {
+ inputfifo_send_data(
+ data2[0], data2[1]);
+ }
+ /* Additional increment because we send 2 pixels */
+ data2++;
+ i++;
+ } else if (two_ppc && is_legacy) {
+ inputfifo_send_data_b(data2[0]);
+ } else {
+ inputfifo_send_data_a(data2[0]);
+ }
+ }
+ for (i = 0; i < hblank_cycles; i++)
+ inputfifo_send_empty_token();
+ inputfifo_send_eol();
+ return;
+}
+
+static void
+inputfifo_send_line(const unsigned short *data,
+ unsigned int width,
+ unsigned int hblank_cycles,
+ unsigned int marker_cycles,
+ unsigned int two_ppc,
+ enum inputfifo_mipi_data_type type)
+{
+ assert(data);
+ inputfifo_send_line2(data, width, NULL, 0,
+ hblank_cycles,
+ marker_cycles,
+ two_ppc,
+ type);
+}
+
+/* Send a frame of data into the input network via the GP FIFO.
+ * Parameters:
+ * - data: array of 16 bit values that contains all data for the frame.
+ * - width: width of a line in number of subpixels, for yuv420 it is the
+ * number of Y components per line.
+ * - height: height of the frame in number of lines.
+ * - ch_id: channel ID.
+ * - fmt_type: format type.
+ * - hblank_cycles: length of horizontal blanking in cycles.
+ * - marker_cycles: number of empty cycles after start-of-line and before
+ * end-of-frame.
+ * - two_ppc: boolean, describes whether to send one or two pixels per clock
+ * cycle. In this mode, we sent pixels N and N+1 in the same cycle,
+ * to IF_PRIM_A and IF_PRIM_B respectively. The caller must make
+ * sure the input data has been formatted correctly for this.
+ * For example, for RGB formats this means that unused values
+ * must be inserted.
+ * - yuv420: boolean, describes whether (non-legacy) yuv420 data is used. In
+ * this mode, the odd lines (1,3,5 etc) are half as long as the
+ * even lines (2,4,6 etc).
+ * Note that the first line is odd (1) and the second line is even
+ * (2).
+ *
+ * This function does not do any reordering of pixels, the caller must make
+ * sure the data is in the righ format. Please refer to the CSS receiver
+ * documentation for details on the data formats.
+ */
+
+static void inputfifo_send_frame(
+ const unsigned short *data,
+ unsigned int width,
+ unsigned int height,
+ unsigned int ch_id,
+ unsigned int fmt_type,
+ unsigned int hblank_cycles,
+ unsigned int marker_cycles,
+ unsigned int two_ppc,
+ enum inputfifo_mipi_data_type type)
+{
+ unsigned int i;
+
+ assert(data);
+ inputfifo_start_frame(ch_id, fmt_type);
+
+ for (i = 0; i < height; i++) {
+ if ((type == inputfifo_mipi_data_type_yuv420) &&
+ (i & 1) == 1) {
+ inputfifo_send_line(data, 2 * width,
+ hblank_cycles,
+ marker_cycles,
+ two_ppc, type);
+ data += 2 * width;
+ } else {
+ inputfifo_send_line(data, width,
+ hblank_cycles,
+ marker_cycles,
+ two_ppc, type);
+ data += width;
+ }
+ }
+ inputfifo_end_frame(marker_cycles);
+ return;
+}
+
+static enum inputfifo_mipi_data_type inputfifo_determine_type(
+ enum atomisp_input_format input_format)
+{
+ enum inputfifo_mipi_data_type type;
+
+ type = inputfifo_mipi_data_type_regular;
+ if (input_format == ATOMISP_INPUT_FORMAT_YUV420_8_LEGACY) {
+ type =
+ inputfifo_mipi_data_type_yuv420_legacy;
+ } else if (input_format == ATOMISP_INPUT_FORMAT_YUV420_8 ||
+ input_format == ATOMISP_INPUT_FORMAT_YUV420_10 ||
+ input_format == ATOMISP_INPUT_FORMAT_YUV420_16) {
+ type =
+ inputfifo_mipi_data_type_yuv420;
+ } else if (input_format >= ATOMISP_INPUT_FORMAT_RGB_444 &&
+ input_format <= ATOMISP_INPUT_FORMAT_RGB_888) {
+ type =
+ inputfifo_mipi_data_type_rgb;
+ }
+ return type;
+}
+
+static struct inputfifo_instance *inputfifo_get_inst(
+ unsigned int ch_id)
+{
+ return &inputfifo_inst_admin[ch_id];
+}
+
+void ia_css_inputfifo_send_input_frame(
+ const unsigned short *data,
+ unsigned int width,
+ unsigned int height,
+ unsigned int ch_id,
+ enum atomisp_input_format input_format,
+ bool two_ppc)
+{
+ unsigned int fmt_type, hblank_cycles, marker_cycles;
+ enum inputfifo_mipi_data_type type;
+
+ assert(data);
+ hblank_cycles = HBLANK_CYCLES;
+ marker_cycles = MARKER_CYCLES;
+ ia_css_isys_convert_stream_format_to_mipi_format(input_format,
+ MIPI_PREDICTOR_NONE,
+ &fmt_type);
+
+ type = inputfifo_determine_type(input_format);
+
+ inputfifo_send_frame(data, width, height,
+ ch_id, fmt_type, hblank_cycles, marker_cycles,
+ two_ppc, type);
+}
+
+void ia_css_inputfifo_start_frame(
+ unsigned int ch_id,
+ enum atomisp_input_format input_format,
+ bool two_ppc)
+{
+ struct inputfifo_instance *s2mi;
+
+ s2mi = inputfifo_get_inst(ch_id);
+
+ s2mi->ch_id = ch_id;
+ ia_css_isys_convert_stream_format_to_mipi_format(input_format,
+ MIPI_PREDICTOR_NONE,
+ &s2mi->fmt_type);
+ s2mi->two_ppc = two_ppc;
+ s2mi->type = inputfifo_determine_type(input_format);
+ s2mi->hblank_cycles = HBLANK_CYCLES;
+ s2mi->marker_cycles = MARKER_CYCLES;
+ s2mi->streaming = true;
+
+ inputfifo_start_frame(ch_id, s2mi->fmt_type);
+ return;
+}
+
+void ia_css_inputfifo_send_line(
+ unsigned int ch_id,
+ const unsigned short *data,
+ unsigned int width,
+ const unsigned short *data2,
+ unsigned int width2)
+{
+ struct inputfifo_instance *s2mi;
+
+ assert(data);
+ assert((data2) || (width2 == 0));
+ s2mi = inputfifo_get_inst(ch_id);
+
+ /* Set global variables that indicate channel_id and format_type */
+ inputfifo_curr_ch_id = (s2mi->ch_id) & _HIVE_ISP_CH_ID_MASK;
+ inputfifo_curr_fmt_type = (s2mi->fmt_type) & _HIVE_ISP_FMT_TYPE_MASK;
+
+ inputfifo_send_line2(data, width, data2, width2,
+ s2mi->hblank_cycles,
+ s2mi->marker_cycles,
+ s2mi->two_ppc,
+ s2mi->type);
+}
+
+void ia_css_inputfifo_send_embedded_line(
+ unsigned int ch_id,
+ enum atomisp_input_format data_type,
+ const unsigned short *data,
+ unsigned int width)
+{
+ struct inputfifo_instance *s2mi;
+ unsigned int fmt_type;
+
+ assert(data);
+ s2mi = inputfifo_get_inst(ch_id);
+ ia_css_isys_convert_stream_format_to_mipi_format(data_type,
+ MIPI_PREDICTOR_NONE, &fmt_type);
+
+ /* Set format_type for metadata line. */
+ inputfifo_curr_fmt_type = fmt_type & _HIVE_ISP_FMT_TYPE_MASK;
+
+ inputfifo_send_line(data, width, s2mi->hblank_cycles, s2mi->marker_cycles,
+ s2mi->two_ppc, inputfifo_mipi_data_type_regular);
+}
+
+void ia_css_inputfifo_end_frame(
+ unsigned int ch_id)
+{
+ struct inputfifo_instance *s2mi;
+
+ s2mi = inputfifo_get_inst(ch_id);
+
+ /* Set global variables that indicate channel_id and format_type */
+ inputfifo_curr_ch_id = (s2mi->ch_id) & _HIVE_ISP_CH_ID_MASK;
+ inputfifo_curr_fmt_type = (s2mi->fmt_type) & _HIVE_ISP_FMT_TYPE_MASK;
+
+ /* Call existing HRT function */
+ inputfifo_end_frame(s2mi->marker_cycles);
+
+ s2mi->streaming = false;
+ return;
+}
diff --git a/drivers/staging/media/atomisp/pci/runtime/isp_param/interface/ia_css_isp_param.h b/drivers/staging/media/atomisp/pci/runtime/isp_param/interface/ia_css_isp_param.h
new file mode 100644
index 000000000000..ff9050fede10
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/runtime/isp_param/interface/ia_css_isp_param.h
@@ -0,0 +1,94 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010 - 2015, Intel Corporation.
+ */
+
+#ifndef _IA_CSS_ISP_PARAM_H_
+#define _IA_CSS_ISP_PARAM_H_
+
+#include <ia_css_err.h>
+#include "ia_css_isp_param_types.h"
+
+/* Set functions for parameter memory descriptors */
+void
+ia_css_isp_param_set_mem_init(
+ struct ia_css_isp_param_host_segments *mem_init,
+ enum ia_css_param_class pclass,
+ enum ia_css_isp_memories mem,
+ char *address, size_t size);
+
+void
+ia_css_isp_param_set_css_mem_init(
+ struct ia_css_isp_param_css_segments *mem_init,
+ enum ia_css_param_class pclass,
+ enum ia_css_isp_memories mem,
+ ia_css_ptr address, size_t size);
+
+void
+ia_css_isp_param_set_isp_mem_init(
+ struct ia_css_isp_param_isp_segments *mem_init,
+ enum ia_css_param_class pclass,
+ enum ia_css_isp_memories mem,
+ u32 address, size_t size);
+
+/* Get functions for parameter memory descriptors */
+const struct ia_css_host_data *
+ia_css_isp_param_get_mem_init(
+ const struct ia_css_isp_param_host_segments *mem_init,
+ enum ia_css_param_class pclass,
+ enum ia_css_isp_memories mem);
+
+const struct ia_css_data *
+ia_css_isp_param_get_css_mem_init(
+ const struct ia_css_isp_param_css_segments *mem_init,
+ enum ia_css_param_class pclass,
+ enum ia_css_isp_memories mem);
+
+const struct ia_css_isp_data *
+ia_css_isp_param_get_isp_mem_init(
+ const struct ia_css_isp_param_isp_segments *mem_init,
+ enum ia_css_param_class pclass,
+ enum ia_css_isp_memories mem);
+
+/* Initialize the memory interface sizes and addresses */
+void
+ia_css_init_memory_interface(
+ struct ia_css_isp_param_css_segments *isp_mem_if,
+ const struct ia_css_isp_param_host_segments *mem_params,
+ const struct ia_css_isp_param_css_segments *css_params);
+
+/* Allocate memory parameters */
+int
+ia_css_isp_param_allocate_isp_parameters(
+ struct ia_css_isp_param_host_segments *mem_params,
+ struct ia_css_isp_param_css_segments *css_params,
+ const struct ia_css_isp_param_isp_segments *mem_initializers);
+
+/* Destroy memory parameters */
+void
+ia_css_isp_param_destroy_isp_parameters(
+ struct ia_css_isp_param_host_segments *mem_params,
+ struct ia_css_isp_param_css_segments *css_params);
+
+/* Load fw parameters */
+void
+ia_css_isp_param_load_fw_params(
+ const char *fw,
+ union ia_css_all_memory_offsets *mem_offsets,
+ const struct ia_css_isp_param_memory_offsets *memory_offsets,
+ bool init);
+
+/* Copy host parameter images to ddr */
+int
+ia_css_isp_param_copy_isp_mem_if_to_ddr(
+ struct ia_css_isp_param_css_segments *ddr,
+ const struct ia_css_isp_param_host_segments *host,
+ enum ia_css_param_class pclass);
+
+/* Enable a pipeline by setting the control field in the isp dmem parameters */
+void
+ia_css_isp_param_enable_pipeline(
+ const struct ia_css_isp_param_host_segments *mem_params);
+
+#endif /* _IA_CSS_ISP_PARAM_H_ */
diff --git a/drivers/staging/media/atomisp/pci/runtime/isp_param/interface/ia_css_isp_param_types.h b/drivers/staging/media/atomisp/pci/runtime/isp_param/interface/ia_css_isp_param_types.h
new file mode 100644
index 000000000000..d6d60508c1bf
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/runtime/isp_param/interface/ia_css_isp_param_types.h
@@ -0,0 +1,74 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/**
+Support for Intel Camera Imaging ISP subsystem.
+Copyright (c) 2010 - 2015, Intel Corporation.
+
+*/
+
+#ifndef _IA_CSS_ISP_PARAM_TYPES_H_
+#define _IA_CSS_ISP_PARAM_TYPES_H_
+
+#include "ia_css_types.h"
+#include <platform_support.h>
+#include <system_global.h>
+
+/* Short hands */
+#define IA_CSS_ISP_DMEM IA_CSS_ISP_DMEM0
+#define IA_CSS_ISP_VMEM IA_CSS_ISP_VMEM0
+
+/* The driver depends on this, to be removed later. */
+#define IA_CSS_NUM_ISP_MEMORIES IA_CSS_NUM_MEMORIES
+
+/* Explicit member numbering to avoid fish type checker bug */
+enum ia_css_param_class {
+ IA_CSS_PARAM_CLASS_PARAM = 0, /* Late binding parameters, like 3A */
+ IA_CSS_PARAM_CLASS_CONFIG = 1, /* Pipe config time parameters, like resolution */
+ IA_CSS_PARAM_CLASS_STATE = 2, /* State parameters, like tnr buffer index */
+#if 0 /* Not yet implemented */
+ IA_CSS_PARAM_CLASS_FRAME = 3, /* Frame time parameters, like output buffer */
+#endif
+};
+
+#define IA_CSS_NUM_PARAM_CLASSES (IA_CSS_PARAM_CLASS_STATE + 1)
+
+/* ISP parameter descriptor */
+struct ia_css_isp_parameter {
+ u32 offset; /* Offset in isp_<mem>)parameters, etc. */
+ u32 size; /* Disabled if 0 */
+};
+
+/* Address/size of each parameter class in each isp memory, host memory pointers */
+struct ia_css_isp_param_host_segments {
+ struct ia_css_host_data params[IA_CSS_NUM_PARAM_CLASSES][IA_CSS_NUM_MEMORIES];
+};
+
+/* Address/size of each parameter class in each isp memory, css memory pointers */
+struct ia_css_isp_param_css_segments {
+ struct ia_css_data params[IA_CSS_NUM_PARAM_CLASSES][IA_CSS_NUM_MEMORIES];
+};
+
+/* Address/size of each parameter class in each isp memory, isp memory pointers */
+struct ia_css_isp_param_isp_segments {
+ struct ia_css_isp_data params[IA_CSS_NUM_PARAM_CLASSES][IA_CSS_NUM_MEMORIES];
+};
+
+/* Memory offsets in binary info */
+struct ia_css_isp_param_memory_offsets {
+ u32 offsets[IA_CSS_NUM_PARAM_CLASSES]; /** offset wrt hdr in bytes */
+};
+
+/* Offsets for ISP kernel parameters per isp memory.
+ * Only relevant for standard ISP binaries, not ACC or SP.
+ */
+union ia_css_all_memory_offsets {
+ struct {
+ CSS_ALIGN(struct ia_css_memory_offsets *param, 8);
+ CSS_ALIGN(struct ia_css_config_memory_offsets *config, 8);
+ CSS_ALIGN(struct ia_css_state_memory_offsets *state, 8);
+ } offsets;
+ struct {
+ CSS_ALIGN(void *ptr, 8);
+ } array[IA_CSS_NUM_PARAM_CLASSES];
+};
+
+#endif /* _IA_CSS_ISP_PARAM_TYPES_H_ */
diff --git a/drivers/staging/media/atomisp/pci/runtime/isp_param/src/isp_param.c b/drivers/staging/media/atomisp/pci/runtime/isp_param/src/isp_param.c
new file mode 100644
index 000000000000..251dd75a7613
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/runtime/isp_param/src/isp_param.c
@@ -0,0 +1,210 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010 - 2015, Intel Corporation.
+ */
+
+#include "hmm.h"
+
+#include "ia_css_pipeline.h"
+#include "ia_css_isp_param.h"
+
+/* Set functions for parameter memory descriptors */
+
+void
+ia_css_isp_param_set_mem_init(
+ struct ia_css_isp_param_host_segments *mem_init,
+ enum ia_css_param_class pclass,
+ enum ia_css_isp_memories mem,
+ char *address, size_t size)
+{
+ mem_init->params[pclass][mem].address = address;
+ mem_init->params[pclass][mem].size = (uint32_t)size;
+}
+
+void
+ia_css_isp_param_set_css_mem_init(
+ struct ia_css_isp_param_css_segments *mem_init,
+ enum ia_css_param_class pclass,
+ enum ia_css_isp_memories mem,
+ ia_css_ptr address, size_t size)
+{
+ mem_init->params[pclass][mem].address = address;
+ mem_init->params[pclass][mem].size = (uint32_t)size;
+}
+
+void
+ia_css_isp_param_set_isp_mem_init(
+ struct ia_css_isp_param_isp_segments *mem_init,
+ enum ia_css_param_class pclass,
+ enum ia_css_isp_memories mem,
+ u32 address, size_t size)
+{
+ mem_init->params[pclass][mem].address = address;
+ mem_init->params[pclass][mem].size = (uint32_t)size;
+}
+
+/* Get functions for parameter memory descriptors */
+const struct ia_css_host_data *
+ia_css_isp_param_get_mem_init(
+ const struct ia_css_isp_param_host_segments *mem_init,
+ enum ia_css_param_class pclass,
+ enum ia_css_isp_memories mem)
+{
+ return &mem_init->params[pclass][mem];
+}
+
+const struct ia_css_data *
+ia_css_isp_param_get_css_mem_init(
+ const struct ia_css_isp_param_css_segments *mem_init,
+ enum ia_css_param_class pclass,
+ enum ia_css_isp_memories mem)
+{
+ return &mem_init->params[pclass][mem];
+}
+
+const struct ia_css_isp_data *
+ia_css_isp_param_get_isp_mem_init(
+ const struct ia_css_isp_param_isp_segments *mem_init,
+ enum ia_css_param_class pclass,
+ enum ia_css_isp_memories mem)
+{
+ return &mem_init->params[pclass][mem];
+}
+
+void
+ia_css_init_memory_interface(
+ struct ia_css_isp_param_css_segments *isp_mem_if,
+ const struct ia_css_isp_param_host_segments *mem_params,
+ const struct ia_css_isp_param_css_segments *css_params)
+{
+ unsigned int pclass, mem;
+
+ for (pclass = 0; pclass < IA_CSS_NUM_PARAM_CLASSES; pclass++) {
+ memset(isp_mem_if->params[pclass], 0, sizeof(isp_mem_if->params[pclass]));
+ for (mem = 0; mem < IA_CSS_NUM_MEMORIES; mem++) {
+ if (!mem_params->params[pclass][mem].address)
+ continue;
+ isp_mem_if->params[pclass][mem].size = mem_params->params[pclass][mem].size;
+ if (pclass != IA_CSS_PARAM_CLASS_PARAM)
+ isp_mem_if->params[pclass][mem].address =
+ css_params->params[pclass][mem].address;
+ }
+ }
+}
+
+int
+ia_css_isp_param_allocate_isp_parameters(
+ struct ia_css_isp_param_host_segments *mem_params,
+ struct ia_css_isp_param_css_segments *css_params,
+ const struct ia_css_isp_param_isp_segments *mem_initializers) {
+ int err = 0;
+ unsigned int mem, pclass;
+
+ pclass = IA_CSS_PARAM_CLASS_PARAM;
+ for (mem = 0; mem < IA_CSS_NUM_MEMORIES; mem++)
+ {
+ for (pclass = 0; pclass < IA_CSS_NUM_PARAM_CLASSES; pclass++) {
+ u32 size = 0;
+
+ if (mem_initializers)
+ size = mem_initializers->params[pclass][mem].size;
+ mem_params->params[pclass][mem].size = size;
+ mem_params->params[pclass][mem].address = NULL;
+ css_params->params[pclass][mem].size = size;
+ css_params->params[pclass][mem].address = 0x0;
+ if (size) {
+ mem_params->params[pclass][mem].address = kvcalloc(1,
+ size,
+ GFP_KERNEL);
+ if (!mem_params->params[pclass][mem].address) {
+ err = -ENOMEM;
+ goto cleanup;
+ }
+ if (pclass != IA_CSS_PARAM_CLASS_PARAM) {
+ css_params->params[pclass][mem].address = hmm_alloc(size);
+ if (!css_params->params[pclass][mem].address) {
+ err = -ENOMEM;
+ goto cleanup;
+ }
+ }
+ }
+ }
+ }
+ return err;
+cleanup:
+ ia_css_isp_param_destroy_isp_parameters(mem_params, css_params);
+ return err;
+}
+
+void
+ia_css_isp_param_destroy_isp_parameters(
+ struct ia_css_isp_param_host_segments *mem_params,
+ struct ia_css_isp_param_css_segments *css_params)
+{
+ unsigned int mem, pclass;
+
+ for (mem = 0; mem < IA_CSS_NUM_MEMORIES; mem++) {
+ for (pclass = 0; pclass < IA_CSS_NUM_PARAM_CLASSES; pclass++) {
+ kvfree(mem_params->params[pclass][mem].address);
+ if (css_params->params[pclass][mem].address)
+ hmm_free(css_params->params[pclass][mem].address);
+ mem_params->params[pclass][mem].address = NULL;
+ css_params->params[pclass][mem].address = 0x0;
+ }
+ }
+}
+
+void
+ia_css_isp_param_load_fw_params(
+ const char *fw,
+ union ia_css_all_memory_offsets *mem_offsets,
+ const struct ia_css_isp_param_memory_offsets *memory_offsets,
+ bool init)
+{
+ unsigned int pclass;
+
+ for (pclass = 0; pclass < IA_CSS_NUM_PARAM_CLASSES; pclass++) {
+ mem_offsets->array[pclass].ptr = NULL;
+ if (init)
+ mem_offsets->array[pclass].ptr = (void *)(fw + memory_offsets->offsets[pclass]);
+ }
+}
+
+int
+ia_css_isp_param_copy_isp_mem_if_to_ddr(
+ struct ia_css_isp_param_css_segments *ddr,
+ const struct ia_css_isp_param_host_segments *host,
+ enum ia_css_param_class pclass) {
+ unsigned int mem;
+
+ for (mem = 0; mem < N_IA_CSS_ISP_MEMORIES; mem++)
+ {
+ size_t size = host->params[pclass][mem].size;
+ ia_css_ptr ddr_mem_ptr = ddr->params[pclass][mem].address;
+ char *host_mem_ptr = host->params[pclass][mem].address;
+
+ if (size != ddr->params[pclass][mem].size)
+ return -EINVAL;
+ if (!size)
+ continue;
+ hmm_store(ddr_mem_ptr, host_mem_ptr, size);
+ }
+ return 0;
+}
+
+void
+ia_css_isp_param_enable_pipeline(
+ const struct ia_css_isp_param_host_segments *mem_params)
+{
+ /* By protocol b0 of the mandatory uint32_t first field of the
+ input parameter is a disable bit*/
+ short dmem_offset = 0;
+
+ if (mem_params->params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM0].size == 0)
+ return;
+
+ *(uint32_t *)
+ &mem_params->params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM0].address[dmem_offset]
+ = 0x0;
+}
diff --git a/drivers/staging/media/atomisp/pci/runtime/isys/interface/ia_css_isys.h b/drivers/staging/media/atomisp/pci/runtime/isys/interface/ia_css_isys.h
new file mode 100644
index 000000000000..29eebe8f9078
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/runtime/isys/interface/ia_css_isys.h
@@ -0,0 +1,167 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010 - 2015, Intel Corporation.
+ */
+
+#ifndef __IA_CSS_ISYS_H__
+#define __IA_CSS_ISYS_H__
+
+#include <type_support.h>
+#include <input_system.h>
+#include <ia_css_input_port.h>
+#include <ia_css_stream_format.h>
+#include <ia_css_stream_public.h>
+#include <system_global.h>
+#include "ia_css_isys_comm.h"
+
+/**
+ * Virtual Input System. (Input System 2401)
+ */
+typedef isp2401_input_system_cfg_t ia_css_isys_descr_t;
+/* end of Virtual Input System */
+
+
+input_system_err_t ia_css_isys_init(void);
+void ia_css_isys_uninit(void);
+enum mipi_port_id ia_css_isys_port_to_mipi_port(
+ enum mipi_port_id api_port);
+
+
+/**
+ * @brief Register one (virtual) stream. This is used to track when all
+ * virtual streams are configured inside the input system. The CSI RX is
+ * only started when all registered streams are configured.
+ *
+ * @param[in] port CSI port
+ * @param[in] isys_stream_id Stream handle generated with ia_css_isys_generate_stream_id()
+ * Must be lower than SH_CSS_MAX_ISYS_CHANNEL_NODES
+ * @return 0 if successful, -EINVAL if
+ * there is already a stream registered with the same handle
+ */
+int ia_css_isys_csi_rx_register_stream(
+ enum mipi_port_id port,
+ uint32_t isys_stream_id);
+
+/**
+ * @brief Unregister one (virtual) stream. This is used to track when all
+ * virtual streams are configured inside the input system. The CSI RX is
+ * only started when all registered streams are configured.
+ *
+ * @param[in] port CSI port
+ * @param[in] isys_stream_id Stream handle generated with ia_css_isys_generate_stream_id()
+ * Must be lower than SH_CSS_MAX_ISYS_CHANNEL_NODES
+ * @return 0 if successful, -EINVAL if
+ * there is no stream registered with that handle
+ */
+int ia_css_isys_csi_rx_unregister_stream(
+ enum mipi_port_id port,
+ uint32_t isys_stream_id);
+
+int ia_css_isys_convert_compressed_format(
+ struct ia_css_csi2_compression *comp,
+ struct isp2401_input_system_cfg_s *cfg);
+unsigned int ia_css_csi2_calculate_input_system_alignment(
+ enum atomisp_input_format fmt_type);
+
+/* CSS Receiver */
+void ia_css_isys_rx_configure(
+ const rx_cfg_t *config,
+ const enum ia_css_input_mode input_mode);
+
+void ia_css_isys_rx_disable(void);
+
+void ia_css_isys_rx_enable_all_interrupts(enum mipi_port_id port);
+
+unsigned int ia_css_isys_rx_get_interrupt_reg(enum mipi_port_id port);
+void ia_css_isys_rx_get_irq_info(enum mipi_port_id port,
+ unsigned int *irq_infos);
+void ia_css_isys_rx_clear_irq_info(enum mipi_port_id port,
+ unsigned int irq_infos);
+unsigned int ia_css_isys_rx_translate_irq_infos(unsigned int bits);
+
+
+/* @brief Translate format and compression to format type.
+ *
+ * @param[in] input_format The input format.
+ * @param[in] compression The compression scheme.
+ * @param[out] fmt_type Pointer to the resulting format type.
+ * @return Error code.
+ *
+ * Translate an input format and mipi compression pair to the fmt_type.
+ * This is normally done by the sensor, but when using the input fifo, this
+ * format type must be sumitted correctly by the application.
+ */
+int ia_css_isys_convert_stream_format_to_mipi_format(
+ enum atomisp_input_format input_format,
+ mipi_predictor_t compression,
+ unsigned int *fmt_type);
+
+/**
+ * Virtual Input System. (Input System 2401)
+ */
+ia_css_isys_error_t ia_css_isys_stream_create(
+ ia_css_isys_descr_t *isys_stream_descr,
+ ia_css_isys_stream_h isys_stream,
+ uint32_t isys_stream_id);
+
+void ia_css_isys_stream_destroy(
+ ia_css_isys_stream_h isys_stream);
+
+ia_css_isys_error_t ia_css_isys_stream_calculate_cfg(
+ ia_css_isys_stream_h isys_stream,
+ ia_css_isys_descr_t *isys_stream_descr,
+ ia_css_isys_stream_cfg_t *isys_stream_cfg);
+
+void ia_css_isys_csi_rx_lut_rmgr_init(void);
+
+void ia_css_isys_csi_rx_lut_rmgr_uninit(void);
+
+bool ia_css_isys_csi_rx_lut_rmgr_acquire(
+ csi_rx_backend_ID_t backend,
+ csi_mipi_packet_type_t packet_type,
+ csi_rx_backend_lut_entry_t *entry);
+
+void ia_css_isys_csi_rx_lut_rmgr_release(
+ csi_rx_backend_ID_t backend,
+ csi_mipi_packet_type_t packet_type,
+ csi_rx_backend_lut_entry_t *entry);
+
+void ia_css_isys_ibuf_rmgr_init(void);
+
+void ia_css_isys_ibuf_rmgr_uninit(void);
+
+bool ia_css_isys_ibuf_rmgr_acquire(
+ u32 size,
+ uint32_t *start_addr);
+
+void ia_css_isys_ibuf_rmgr_release(
+ uint32_t *start_addr);
+
+void ia_css_isys_dma_channel_rmgr_init(void);
+
+void ia_css_isys_dma_channel_rmgr_uninit(void);
+
+bool ia_css_isys_dma_channel_rmgr_acquire(
+ isys2401_dma_ID_t dma_id,
+ isys2401_dma_channel *channel);
+
+void ia_css_isys_dma_channel_rmgr_release(
+ isys2401_dma_ID_t dma_id,
+ isys2401_dma_channel *channel);
+
+void ia_css_isys_stream2mmio_sid_rmgr_init(void);
+
+void ia_css_isys_stream2mmio_sid_rmgr_uninit(void);
+
+bool ia_css_isys_stream2mmio_sid_rmgr_acquire(
+ stream2mmio_ID_t stream2mmio,
+ stream2mmio_sid_ID_t *sid);
+
+void ia_css_isys_stream2mmio_sid_rmgr_release(
+ stream2mmio_ID_t stream2mmio,
+ stream2mmio_sid_ID_t *sid);
+
+/* end of Virtual Input System */
+
+#endif /* __IA_CSS_ISYS_H__ */
diff --git a/drivers/staging/media/atomisp/pci/runtime/isys/interface/ia_css_isys_comm.h b/drivers/staging/media/atomisp/pci/runtime/isys/interface/ia_css_isys_comm.h
new file mode 100644
index 000000000000..3d4c0cd2f2a6
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/runtime/isys/interface/ia_css_isys_comm.h
@@ -0,0 +1,43 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010 - 2015, Intel Corporation.
+ */
+
+#ifndef __IA_CSS_ISYS_COMM_H
+#define __IA_CSS_ISYS_COMM_H
+
+#include <type_support.h>
+#include <input_system.h>
+
+#include <platform_support.h> /* inline */
+#include <input_system_global.h>
+#include <ia_css_stream_public.h> /* IA_CSS_STREAM_MAX_ISYS_STREAM_PER_CH */
+
+#define SH_CSS_NODES_PER_THREAD 2
+#define SH_CSS_MAX_ISYS_CHANNEL_NODES (SH_CSS_MAX_SP_THREADS * SH_CSS_NODES_PER_THREAD)
+
+/*
+ * a) ia_css_isys_stream_h & ia_css_isys_stream_cfg_t come from host.
+ *
+ * b) Here it is better to use actual structures for stream handle
+ * instead of opaque handles. Otherwise, we need to have another
+ * communication channel to interpret that opaque handle(this handle is
+ * maintained by host and needs to be populated to sp for every stream open)
+ * */
+typedef virtual_input_system_stream_t *ia_css_isys_stream_h;
+typedef virtual_input_system_stream_cfg_t ia_css_isys_stream_cfg_t;
+
+/*
+ * error check for ISYS APIs.
+ * */
+typedef bool ia_css_isys_error_t;
+
+static inline uint32_t ia_css_isys_generate_stream_id(
+ u32 sp_thread_id,
+ uint32_t stream_id)
+{
+ return sp_thread_id * IA_CSS_STREAM_MAX_ISYS_STREAM_PER_CH + stream_id;
+}
+
+#endif /*_IA_CSS_ISYS_COMM_H */
diff --git a/drivers/staging/media/atomisp/pci/runtime/isys/src/csi_rx_rmgr.c b/drivers/staging/media/atomisp/pci/runtime/isys/src/csi_rx_rmgr.c
new file mode 100644
index 000000000000..7490d189f39d
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/runtime/isys/src/csi_rx_rmgr.c
@@ -0,0 +1,157 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010 - 2015, Intel Corporation.
+ */
+
+#include "system_global.h"
+
+
+#include "assert_support.h"
+#include "platform_support.h"
+#include "ia_css_isys.h"
+#include "bitop_support.h"
+#include "ia_css_pipeline.h" /* ia_css_pipeline_get_pipe_io_status() */
+#include "sh_css_internal.h" /* sh_css_sp_pipeline_io_status
+ * SH_CSS_MAX_SP_THREADS
+ */
+#include "csi_rx_rmgr.h"
+
+static isys_csi_rx_rsrc_t isys_csi_rx_rsrc[N_CSI_RX_BACKEND_ID];
+
+void ia_css_isys_csi_rx_lut_rmgr_init(void)
+{
+ memset(isys_csi_rx_rsrc, 0, sizeof(isys_csi_rx_rsrc));
+}
+
+void ia_css_isys_csi_rx_lut_rmgr_uninit(void)
+{
+ memset(isys_csi_rx_rsrc, 0, sizeof(isys_csi_rx_rsrc));
+}
+
+bool ia_css_isys_csi_rx_lut_rmgr_acquire(
+ csi_rx_backend_ID_t backend,
+ csi_mipi_packet_type_t packet_type,
+ csi_rx_backend_lut_entry_t *entry)
+{
+ bool retval = false;
+ u32 max_num_packets_of_type;
+ u32 num_active_of_type;
+ isys_csi_rx_rsrc_t *cur_rsrc = NULL;
+ u16 i;
+
+ assert(backend < N_CSI_RX_BACKEND_ID);
+ assert((packet_type == CSI_MIPI_PACKET_TYPE_LONG) ||
+ (packet_type == CSI_MIPI_PACKET_TYPE_SHORT));
+ assert(entry);
+
+ if ((backend < N_CSI_RX_BACKEND_ID) && (entry)) {
+ cur_rsrc = &isys_csi_rx_rsrc[backend];
+ if (packet_type == CSI_MIPI_PACKET_TYPE_LONG) {
+ max_num_packets_of_type = N_LONG_PACKET_LUT_ENTRIES[backend];
+ num_active_of_type = cur_rsrc->num_long_packets;
+ } else {
+ max_num_packets_of_type = N_SHORT_PACKET_LUT_ENTRIES[backend];
+ num_active_of_type = cur_rsrc->num_short_packets;
+ }
+
+ if (num_active_of_type < max_num_packets_of_type) {
+ for (i = 0; i < max_num_packets_of_type; i++) {
+ if (bitop_getbit(cur_rsrc->active_table, i) == 0) {
+ bitop_setbit(cur_rsrc->active_table, i);
+
+ if (packet_type == CSI_MIPI_PACKET_TYPE_LONG) {
+ entry->long_packet_entry = i;
+ entry->short_packet_entry = 0;
+ cur_rsrc->num_long_packets++;
+ } else {
+ entry->long_packet_entry = 0;
+ entry->short_packet_entry = i;
+ cur_rsrc->num_short_packets++;
+ }
+ cur_rsrc->num_active++;
+ retval = true;
+ break;
+ }
+ }
+ }
+ }
+ return retval;
+}
+
+void ia_css_isys_csi_rx_lut_rmgr_release(
+ csi_rx_backend_ID_t backend,
+ csi_mipi_packet_type_t packet_type,
+ csi_rx_backend_lut_entry_t *entry)
+{
+ u32 max_num_packets;
+ isys_csi_rx_rsrc_t *cur_rsrc = NULL;
+ u32 packet_entry = 0;
+
+ assert(backend < N_CSI_RX_BACKEND_ID);
+ assert(entry);
+ assert((packet_type >= CSI_MIPI_PACKET_TYPE_LONG) ||
+ (packet_type <= CSI_MIPI_PACKET_TYPE_SHORT));
+
+ if ((backend < N_CSI_RX_BACKEND_ID) && (entry)) {
+ if (packet_type == CSI_MIPI_PACKET_TYPE_LONG) {
+ max_num_packets = N_LONG_PACKET_LUT_ENTRIES[backend];
+ packet_entry = entry->long_packet_entry;
+ } else {
+ max_num_packets = N_SHORT_PACKET_LUT_ENTRIES[backend];
+ packet_entry = entry->short_packet_entry;
+ }
+
+ cur_rsrc = &isys_csi_rx_rsrc[backend];
+ if ((packet_entry < max_num_packets) && (cur_rsrc->num_active > 0)) {
+ if (bitop_getbit(cur_rsrc->active_table, packet_entry) == 1) {
+ bitop_clearbit(cur_rsrc->active_table, packet_entry);
+
+ if (packet_type == CSI_MIPI_PACKET_TYPE_LONG)
+ cur_rsrc->num_long_packets--;
+ else
+ cur_rsrc->num_short_packets--;
+ cur_rsrc->num_active--;
+ }
+ }
+ }
+}
+
+int ia_css_isys_csi_rx_register_stream(
+ enum mipi_port_id port,
+ uint32_t isys_stream_id)
+{
+ int retval = -EINVAL;
+
+ if ((port < N_INPUT_SYSTEM_CSI_PORT) &&
+ (isys_stream_id < SH_CSS_MAX_ISYS_CHANNEL_NODES)) {
+ struct sh_css_sp_pipeline_io_status *pipe_io_status;
+
+ pipe_io_status = ia_css_pipeline_get_pipe_io_status();
+ if (bitop_getbit(pipe_io_status->active[port], isys_stream_id) == 0) {
+ bitop_setbit(pipe_io_status->active[port], isys_stream_id);
+ pipe_io_status->running[port] = 0;
+ retval = 0;
+ }
+ }
+ return retval;
+}
+
+int ia_css_isys_csi_rx_unregister_stream(
+ enum mipi_port_id port,
+ uint32_t isys_stream_id)
+{
+ int retval = -EINVAL;
+
+ if ((port < N_INPUT_SYSTEM_CSI_PORT) &&
+ (isys_stream_id < SH_CSS_MAX_ISYS_CHANNEL_NODES)) {
+ struct sh_css_sp_pipeline_io_status *pipe_io_status;
+
+ pipe_io_status = ia_css_pipeline_get_pipe_io_status();
+ if (bitop_getbit(pipe_io_status->active[port], isys_stream_id) == 1) {
+ bitop_clearbit(pipe_io_status->active[port], isys_stream_id);
+ retval = 0;
+ }
+ }
+ return retval;
+}
diff --git a/drivers/staging/media/atomisp/pci/runtime/isys/src/csi_rx_rmgr.h b/drivers/staging/media/atomisp/pci/runtime/isys/src/csi_rx_rmgr.h
new file mode 100644
index 000000000000..e68386e54469
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/runtime/isys/src/csi_rx_rmgr.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010 - 2015, Intel Corporation.
+ */
+
+#ifndef __CSI_RX_RMGR_H_INCLUDED__
+#define __CSI_RX_RMGR_H_INCLUDED__
+
+typedef struct isys_csi_rx_rsrc_s isys_csi_rx_rsrc_t;
+struct isys_csi_rx_rsrc_s {
+ u32 active_table;
+ u32 num_active;
+ u16 num_long_packets;
+ u16 num_short_packets;
+};
+
+#endif /* __CSI_RX_RMGR_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/runtime/isys/src/ibuf_ctrl_rmgr.c b/drivers/staging/media/atomisp/pci/runtime/isys/src/ibuf_ctrl_rmgr.c
new file mode 100644
index 000000000000..b8d431dcd6c1
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/runtime/isys/src/ibuf_ctrl_rmgr.c
@@ -0,0 +1,113 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#include "system_global.h"
+
+#include "assert_support.h"
+#include "platform_support.h"
+#include "ia_css_isys.h"
+#include "ibuf_ctrl_rmgr.h"
+
+static ibuf_rsrc_t ibuf_rsrc;
+
+static ibuf_handle_t *getHandle(uint16_t index)
+{
+ ibuf_handle_t *handle = NULL;
+
+ if (index < MAX_IBUF_HANDLES)
+ handle = &ibuf_rsrc.handles[index];
+ return handle;
+}
+
+void ia_css_isys_ibuf_rmgr_init(void)
+{
+ memset(&ibuf_rsrc, 0, sizeof(ibuf_rsrc));
+ ibuf_rsrc.free_size = MAX_INPUT_BUFFER_SIZE;
+}
+
+void ia_css_isys_ibuf_rmgr_uninit(void)
+{
+ memset(&ibuf_rsrc, 0, sizeof(ibuf_rsrc));
+ ibuf_rsrc.free_size = MAX_INPUT_BUFFER_SIZE;
+}
+
+bool ia_css_isys_ibuf_rmgr_acquire(
+ u32 size,
+ uint32_t *start_addr)
+{
+ bool retval = false;
+ bool input_buffer_found = false;
+ u32 aligned_size;
+ ibuf_handle_t *handle = NULL;
+ u16 i;
+
+ assert(start_addr);
+ assert(size > 0);
+
+ aligned_size = (size + (IBUF_ALIGN - 1)) & ~(IBUF_ALIGN - 1);
+
+ /* Check if there is an available un-used handle with the size
+ * that will fulfill the request.
+ */
+ if (ibuf_rsrc.num_active < ibuf_rsrc.num_allocated) {
+ for (i = 0; i < ibuf_rsrc.num_allocated; i++) {
+ handle = getHandle(i);
+ if (!handle->active) {
+ if (handle->size >= aligned_size) {
+ handle->active = true;
+ input_buffer_found = true;
+ ibuf_rsrc.num_active++;
+ break;
+ }
+ }
+ }
+ }
+
+ if (!input_buffer_found) {
+ /* There were no available handles that fulfilled the
+ * request. Allocate a new handle with the requested size.
+ */
+ if ((ibuf_rsrc.num_allocated < MAX_IBUF_HANDLES) &&
+ (ibuf_rsrc.free_size >= aligned_size)) {
+ handle = getHandle(ibuf_rsrc.num_allocated);
+ handle->start_addr = ibuf_rsrc.free_start_addr;
+ handle->size = aligned_size;
+ handle->active = true;
+
+ ibuf_rsrc.free_start_addr += aligned_size;
+ ibuf_rsrc.free_size -= aligned_size;
+ ibuf_rsrc.num_active++;
+ ibuf_rsrc.num_allocated++;
+
+ input_buffer_found = true;
+ }
+ }
+
+ if (input_buffer_found && handle) {
+ *start_addr = handle->start_addr;
+ retval = true;
+ }
+
+ return retval;
+}
+
+void ia_css_isys_ibuf_rmgr_release(
+ uint32_t *start_addr)
+{
+ u16 i;
+ ibuf_handle_t *handle = NULL;
+
+ assert(start_addr);
+
+ for (i = 0; i < ibuf_rsrc.num_allocated; i++) {
+ handle = getHandle(i);
+ if (handle->active && handle->start_addr == *start_addr) {
+ handle->active = false;
+ ibuf_rsrc.num_active--;
+ break;
+ }
+ }
+}
diff --git a/drivers/staging/media/atomisp/pci/runtime/isys/src/ibuf_ctrl_rmgr.h b/drivers/staging/media/atomisp/pci/runtime/isys/src/ibuf_ctrl_rmgr.h
new file mode 100644
index 000000000000..2345dee7ba08
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/runtime/isys/src/ibuf_ctrl_rmgr.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010 - 2015, Intel Corporation.
+ */
+
+#ifndef __IBUF_CTRL_RMGR_H_INCLUDED__
+#define __IBUF_CTRL_RMGR_H_INCLUDED__
+
+#define MAX_IBUF_HANDLES 24
+#define MAX_INPUT_BUFFER_SIZE (64 * 1024)
+#define IBUF_ALIGN 8
+
+typedef struct ibuf_handle_s ibuf_handle_t;
+struct ibuf_handle_s {
+ u32 start_addr;
+ u32 size;
+ bool active;
+};
+
+typedef struct ibuf_rsrc_s ibuf_rsrc_t;
+struct ibuf_rsrc_s {
+ u32 free_start_addr;
+ u32 free_size;
+ u16 num_active;
+ u16 num_allocated;
+ ibuf_handle_t handles[MAX_IBUF_HANDLES];
+};
+
+#endif /* __IBUF_CTRL_RMGR_H_INCLUDED */
diff --git a/drivers/staging/media/atomisp/pci/runtime/isys/src/isys_dma_rmgr.c b/drivers/staging/media/atomisp/pci/runtime/isys/src/isys_dma_rmgr.c
new file mode 100644
index 000000000000..2ce2f32a1946
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/runtime/isys/src/isys_dma_rmgr.c
@@ -0,0 +1,77 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010 - 2015, Intel Corporation.
+ */
+
+#include "system_global.h"
+
+
+#include "assert_support.h"
+#include "platform_support.h"
+#include "ia_css_isys.h"
+#include "bitop_support.h"
+#include "isys_dma_rmgr.h"
+
+static isys_dma_rsrc_t isys_dma_rsrc[N_ISYS2401_DMA_ID];
+
+void ia_css_isys_dma_channel_rmgr_init(void)
+{
+ memset(&isys_dma_rsrc, 0, sizeof(isys_dma_rsrc_t));
+}
+
+void ia_css_isys_dma_channel_rmgr_uninit(void)
+{
+ memset(&isys_dma_rsrc, 0, sizeof(isys_dma_rsrc_t));
+}
+
+bool ia_css_isys_dma_channel_rmgr_acquire(
+ isys2401_dma_ID_t dma_id,
+ isys2401_dma_channel *channel)
+{
+ bool retval = false;
+ isys2401_dma_channel i;
+ isys2401_dma_channel max_dma_channel;
+ isys_dma_rsrc_t *cur_rsrc = NULL;
+
+ assert(dma_id < N_ISYS2401_DMA_ID);
+ assert(channel);
+
+ max_dma_channel = N_ISYS2401_DMA_CHANNEL_PROCS[dma_id];
+ cur_rsrc = &isys_dma_rsrc[dma_id];
+
+ if (cur_rsrc->num_active < max_dma_channel) {
+ for (i = ISYS2401_DMA_CHANNEL_0; i < N_ISYS2401_DMA_CHANNEL; i++) {
+ if (bitop_getbit(cur_rsrc->active_table, i) == 0) {
+ bitop_setbit(cur_rsrc->active_table, i);
+ *channel = i;
+ cur_rsrc->num_active++;
+ retval = true;
+ break;
+ }
+ }
+ }
+
+ return retval;
+}
+
+void ia_css_isys_dma_channel_rmgr_release(
+ isys2401_dma_ID_t dma_id,
+ isys2401_dma_channel *channel)
+{
+ isys2401_dma_channel max_dma_channel;
+ isys_dma_rsrc_t *cur_rsrc = NULL;
+
+ assert(dma_id < N_ISYS2401_DMA_ID);
+ assert(channel);
+
+ max_dma_channel = N_ISYS2401_DMA_CHANNEL_PROCS[dma_id];
+ cur_rsrc = &isys_dma_rsrc[dma_id];
+
+ if ((*channel < max_dma_channel) && (cur_rsrc->num_active > 0)) {
+ if (bitop_getbit(cur_rsrc->active_table, *channel) == 1) {
+ bitop_clearbit(cur_rsrc->active_table, *channel);
+ cur_rsrc->num_active--;
+ }
+ }
+}
diff --git a/drivers/staging/media/atomisp/pci/runtime/isys/src/isys_dma_rmgr.h b/drivers/staging/media/atomisp/pci/runtime/isys/src/isys_dma_rmgr.h
new file mode 100644
index 000000000000..070a06df5f0c
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/runtime/isys/src/isys_dma_rmgr.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010 - 2015, Intel Corporation.
+ */
+
+#ifndef __ISYS_DMA_RMGR_H_INCLUDED__
+#define __ISYS_DMA_RMGR_H_INCLUDED__
+
+typedef struct isys_dma_rsrc_s isys_dma_rsrc_t;
+struct isys_dma_rsrc_s {
+ u32 active_table;
+ u16 num_active;
+};
+
+#endif /* __ISYS_DMA_RMGR_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/runtime/isys/src/isys_init.c b/drivers/staging/media/atomisp/pci/runtime/isys/src/isys_init.c
new file mode 100644
index 000000000000..46bb3569e139
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/runtime/isys/src/isys_init.c
@@ -0,0 +1,112 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010 - 2015, Intel Corporation.
+ */
+
+#include "input_system.h"
+
+#include "ia_css_isys.h"
+#include "platform_support.h"
+
+#include "isys_dma_public.h" /* isys2401_dma_set_max_burst_size() */
+#include "isys_irq.h"
+
+static input_system_err_t ia_css_isys_2400_init(void)
+{
+ backend_channel_cfg_t backend_ch0;
+ backend_channel_cfg_t backend_ch1;
+ target_cfg2400_t targetB;
+ target_cfg2400_t targetC;
+ u32 acq_mem_region_size = 24;
+ u32 acq_nof_mem_regions = 2;
+ input_system_err_t error = INPUT_SYSTEM_ERR_NO_ERROR;
+
+ memset(&backend_ch0, 0, sizeof(backend_channel_cfg_t));
+ memset(&backend_ch1, 0, sizeof(backend_channel_cfg_t));
+ memset(&targetB, 0, sizeof(targetB));
+ memset(&targetC, 0, sizeof(targetC));
+
+ error = input_system_configuration_reset();
+ if (error != INPUT_SYSTEM_ERR_NO_ERROR)
+ return error;
+
+ error = input_system_csi_xmem_channel_cfg(
+ 0, /*ch_id */
+ INPUT_SYSTEM_PORT_A, /*port */
+ backend_ch0, /*backend_ch */
+ 32, /*mem_region_size */
+ 6, /*nof_mem_regions */
+ acq_mem_region_size, /*acq_mem_region_size */
+ acq_nof_mem_regions, /*acq_nof_mem_regions */
+ targetB, /*target */
+ 3); /*nof_xmem_buffers */
+ if (error != INPUT_SYSTEM_ERR_NO_ERROR)
+ return error;
+
+ error = input_system_csi_xmem_channel_cfg(
+ 1, /*ch_id */
+ INPUT_SYSTEM_PORT_B, /*port */
+ backend_ch0, /*backend_ch */
+ 16, /*mem_region_size */
+ 3, /*nof_mem_regions */
+ acq_mem_region_size, /*acq_mem_region_size */
+ acq_nof_mem_regions, /*acq_nof_mem_regions */
+ targetB, /*target */
+ 3); /*nof_xmem_buffers */
+ if (error != INPUT_SYSTEM_ERR_NO_ERROR)
+ return error;
+
+ error = input_system_csi_xmem_channel_cfg(
+ 2, /*ch_id */
+ INPUT_SYSTEM_PORT_C, /*port */
+ backend_ch1, /*backend_ch */
+ 32, /*mem_region_size */
+ 3, /*nof_mem_regions */
+ acq_mem_region_size, /*acq_mem_region_size */
+ acq_nof_mem_regions, /*acq_nof_mem_regions */
+ targetC, /*target */
+ 2); /*nof_xmem_buffers */
+ if (error != INPUT_SYSTEM_ERR_NO_ERROR)
+ return error;
+
+ error = input_system_configuration_commit();
+
+ return error;
+}
+
+static input_system_err_t ia_css_isys_2401_init(void)
+{
+ ia_css_isys_csi_rx_lut_rmgr_init();
+ ia_css_isys_ibuf_rmgr_init();
+ ia_css_isys_dma_channel_rmgr_init();
+ ia_css_isys_stream2mmio_sid_rmgr_init();
+
+ isys2401_dma_set_max_burst_size(ISYS2401_DMA0_ID,
+ 1 /* Non Burst DMA transactions */);
+
+ /* Enable 2401 input system IRQ status for driver to retrieve */
+ isys_irqc_status_enable(ISYS_IRQ0_ID);
+ isys_irqc_status_enable(ISYS_IRQ1_ID);
+ isys_irqc_status_enable(ISYS_IRQ2_ID);
+
+ return INPUT_SYSTEM_ERR_NO_ERROR;
+}
+
+input_system_err_t ia_css_isys_init(void)
+{
+ if (IS_ISP2401)
+ return ia_css_isys_2401_init();
+
+ return ia_css_isys_2400_init();
+}
+
+void ia_css_isys_uninit(void)
+{
+ if (IS_ISP2401) {
+ ia_css_isys_csi_rx_lut_rmgr_uninit();
+ ia_css_isys_ibuf_rmgr_uninit();
+ ia_css_isys_dma_channel_rmgr_uninit();
+ ia_css_isys_stream2mmio_sid_rmgr_uninit();
+ }
+}
diff --git a/drivers/staging/media/atomisp/pci/runtime/isys/src/isys_stream2mmio_rmgr.c b/drivers/staging/media/atomisp/pci/runtime/isys/src/isys_stream2mmio_rmgr.c
new file mode 100644
index 000000000000..9217d26cf632
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/runtime/isys/src/isys_stream2mmio_rmgr.c
@@ -0,0 +1,79 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010 - 2015, Intel Corporation.
+ */
+
+#include "system_global.h"
+
+
+#include "assert_support.h"
+#include "platform_support.h"
+#include "ia_css_isys.h"
+#include "bitop_support.h"
+#include "isys_stream2mmio_rmgr.h"
+
+static isys_stream2mmio_rsrc_t isys_stream2mmio_rsrc[N_STREAM2MMIO_ID];
+
+void ia_css_isys_stream2mmio_sid_rmgr_init(void)
+{
+ memset(isys_stream2mmio_rsrc, 0, sizeof(isys_stream2mmio_rsrc));
+}
+
+void ia_css_isys_stream2mmio_sid_rmgr_uninit(void)
+{
+ memset(isys_stream2mmio_rsrc, 0, sizeof(isys_stream2mmio_rsrc));
+}
+
+bool ia_css_isys_stream2mmio_sid_rmgr_acquire(
+ stream2mmio_ID_t stream2mmio,
+ stream2mmio_sid_ID_t *sid)
+{
+ bool retval = false;
+ stream2mmio_sid_ID_t max_sid;
+ isys_stream2mmio_rsrc_t *cur_rsrc = NULL;
+ stream2mmio_sid_ID_t i;
+
+ assert(stream2mmio < N_STREAM2MMIO_ID);
+ assert(sid);
+
+ if ((stream2mmio < N_STREAM2MMIO_ID) && (sid)) {
+ max_sid = N_STREAM2MMIO_SID_PROCS[stream2mmio];
+ cur_rsrc = &isys_stream2mmio_rsrc[stream2mmio];
+
+ if (cur_rsrc->num_active < max_sid) {
+ for (i = STREAM2MMIO_SID0_ID; i < max_sid; i++) {
+ if (bitop_getbit(cur_rsrc->active_table, i) == 0) {
+ bitop_setbit(cur_rsrc->active_table, i);
+ *sid = i;
+ cur_rsrc->num_active++;
+ retval = true;
+ break;
+ }
+ }
+ }
+ }
+ return retval;
+}
+
+void ia_css_isys_stream2mmio_sid_rmgr_release(
+ stream2mmio_ID_t stream2mmio,
+ stream2mmio_sid_ID_t *sid)
+{
+ stream2mmio_sid_ID_t max_sid;
+ isys_stream2mmio_rsrc_t *cur_rsrc = NULL;
+
+ assert(stream2mmio < N_STREAM2MMIO_ID);
+ assert(sid);
+
+ if ((stream2mmio < N_STREAM2MMIO_ID) && (sid)) {
+ max_sid = N_STREAM2MMIO_SID_PROCS[stream2mmio];
+ cur_rsrc = &isys_stream2mmio_rsrc[stream2mmio];
+ if ((*sid < max_sid) && (cur_rsrc->num_active > 0)) {
+ if (bitop_getbit(cur_rsrc->active_table, *sid) == 1) {
+ bitop_clearbit(cur_rsrc->active_table, *sid);
+ cur_rsrc->num_active--;
+ }
+ }
+ }
+}
diff --git a/drivers/staging/media/atomisp/pci/runtime/isys/src/isys_stream2mmio_rmgr.h b/drivers/staging/media/atomisp/pci/runtime/isys/src/isys_stream2mmio_rmgr.h
new file mode 100644
index 000000000000..c07ee12746d4
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/runtime/isys/src/isys_stream2mmio_rmgr.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010 - 2015, Intel Corporation.
+ */
+
+#ifndef __ISYS_STREAM2MMIO_RMGR_H_INCLUDED__
+#define __ISYS_STREAM2MMIO_RMGR_H_INCLUDED__
+
+typedef struct isys_stream2mmio_rsrc_s isys_stream2mmio_rsrc_t;
+struct isys_stream2mmio_rsrc_s {
+ u32 active_table;
+ u16 num_active;
+};
+
+#endif /* __ISYS_STREAM2MMIO_RMGR_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/runtime/isys/src/rx.c b/drivers/staging/media/atomisp/pci/runtime/isys/src/rx.c
new file mode 100644
index 000000000000..9cfb8bc97e24
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/runtime/isys/src/rx.c
@@ -0,0 +1,654 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010 - 2015, Intel Corporation.
+ */
+
+#define __INLINE_INPUT_SYSTEM__
+#include "input_system.h"
+#include "assert_support.h"
+#include "ia_css_isys.h"
+#include "ia_css_irq.h"
+#include "sh_css_internal.h"
+
+void ia_css_isys_rx_enable_all_interrupts(enum mipi_port_id port)
+{
+ hrt_data bits = receiver_port_reg_load(RX0_ID,
+ port,
+ _HRT_CSS_RECEIVER_IRQ_ENABLE_REG_IDX);
+
+ bits |= (1U << _HRT_CSS_RECEIVER_IRQ_OVERRUN_BIT) |
+ (1U << _HRT_CSS_RECEIVER_IRQ_INIT_TIMEOUT_BIT) |
+ (1U << _HRT_CSS_RECEIVER_IRQ_SLEEP_MODE_ENTRY_BIT) |
+ (1U << _HRT_CSS_RECEIVER_IRQ_SLEEP_MODE_EXIT_BIT) |
+ (1U << _HRT_CSS_RECEIVER_IRQ_ERR_SOT_HS_BIT) |
+ (1U << _HRT_CSS_RECEIVER_IRQ_ERR_SOT_SYNC_HS_BIT) |
+ (1U << _HRT_CSS_RECEIVER_IRQ_ERR_CONTROL_BIT) |
+ (1U << _HRT_CSS_RECEIVER_IRQ_ERR_ECC_DOUBLE_BIT) |
+ (1U << _HRT_CSS_RECEIVER_IRQ_ERR_ECC_CORRECTED_BIT) |
+ /*(1U << _HRT_CSS_RECEIVER_IRQ_ERR_ECC_NO_CORRECTION_BIT) | */
+ (1U << _HRT_CSS_RECEIVER_IRQ_ERR_CRC_BIT) |
+ (1U << _HRT_CSS_RECEIVER_IRQ_ERR_ID_BIT) |
+ (1U << _HRT_CSS_RECEIVER_IRQ_ERR_FRAME_SYNC_BIT) |
+ (1U << _HRT_CSS_RECEIVER_IRQ_ERR_FRAME_DATA_BIT) |
+ (1U << _HRT_CSS_RECEIVER_IRQ_DATA_TIMEOUT_BIT) |
+ (1U << _HRT_CSS_RECEIVER_IRQ_ERR_ESCAPE_BIT);
+ /*(1U << _HRT_CSS_RECEIVER_IRQ_ERR_LINE_SYNC_BIT); */
+
+ receiver_port_reg_store(RX0_ID,
+ port,
+ _HRT_CSS_RECEIVER_IRQ_ENABLE_REG_IDX, bits);
+
+ /*
+ * The CSI is nested into the Iunit IRQ's
+ */
+ ia_css_irq_enable(IA_CSS_IRQ_INFO_CSS_RECEIVER_ERROR, true);
+
+ return;
+}
+
+/* This function converts between the enum used on the CSS API and the
+ * internal DLI enum type.
+ * We do not use an array for this since we cannot use named array
+ * initializers in Windows. Without that there is no easy way to guarantee
+ * that the array values would be in the correct order.
+ * */
+enum mipi_port_id ia_css_isys_port_to_mipi_port(enum mipi_port_id api_port)
+{
+ /* In this module the validity of the inptu variable should
+ * have been checked already, so we do not check for erroneous
+ * values. */
+ enum mipi_port_id port = MIPI_PORT0_ID;
+
+ if (api_port == MIPI_PORT1_ID)
+ port = MIPI_PORT1_ID;
+ else if (api_port == MIPI_PORT2_ID)
+ port = MIPI_PORT2_ID;
+
+ return port;
+}
+
+unsigned int ia_css_isys_rx_get_interrupt_reg(enum mipi_port_id port)
+{
+ return receiver_port_reg_load(RX0_ID,
+ port,
+ _HRT_CSS_RECEIVER_IRQ_STATUS_REG_IDX);
+}
+
+void ia_css_rx_get_irq_info(unsigned int *irq_infos)
+{
+ ia_css_rx_port_get_irq_info(MIPI_PORT1_ID, irq_infos);
+}
+
+void ia_css_rx_port_get_irq_info(enum mipi_port_id api_port,
+ unsigned int *irq_infos)
+{
+ enum mipi_port_id port = ia_css_isys_port_to_mipi_port(api_port);
+
+ ia_css_isys_rx_get_irq_info(port, irq_infos);
+}
+
+void ia_css_isys_rx_get_irq_info(enum mipi_port_id port,
+ unsigned int *irq_infos)
+{
+ unsigned int bits;
+
+ assert(irq_infos);
+ bits = ia_css_isys_rx_get_interrupt_reg(port);
+ *irq_infos = ia_css_isys_rx_translate_irq_infos(bits);
+}
+
+/* Translate register bits to CSS API enum mask */
+unsigned int ia_css_isys_rx_translate_irq_infos(unsigned int bits)
+{
+ unsigned int infos = 0;
+
+ if (bits & (1U << _HRT_CSS_RECEIVER_IRQ_OVERRUN_BIT))
+ infos |= IA_CSS_RX_IRQ_INFO_BUFFER_OVERRUN;
+ if (bits & (1U << _HRT_CSS_RECEIVER_IRQ_INIT_TIMEOUT_BIT))
+ infos |= IA_CSS_RX_IRQ_INFO_INIT_TIMEOUT;
+ if (bits & (1U << _HRT_CSS_RECEIVER_IRQ_SLEEP_MODE_ENTRY_BIT))
+ infos |= IA_CSS_RX_IRQ_INFO_ENTER_SLEEP_MODE;
+ if (bits & (1U << _HRT_CSS_RECEIVER_IRQ_SLEEP_MODE_EXIT_BIT))
+ infos |= IA_CSS_RX_IRQ_INFO_EXIT_SLEEP_MODE;
+ if (bits & (1U << _HRT_CSS_RECEIVER_IRQ_ERR_ECC_CORRECTED_BIT))
+ infos |= IA_CSS_RX_IRQ_INFO_ECC_CORRECTED;
+ if (bits & (1U << _HRT_CSS_RECEIVER_IRQ_ERR_SOT_HS_BIT))
+ infos |= IA_CSS_RX_IRQ_INFO_ERR_SOT;
+ if (bits & (1U << _HRT_CSS_RECEIVER_IRQ_ERR_SOT_SYNC_HS_BIT))
+ infos |= IA_CSS_RX_IRQ_INFO_ERR_SOT_SYNC;
+ if (bits & (1U << _HRT_CSS_RECEIVER_IRQ_ERR_CONTROL_BIT))
+ infos |= IA_CSS_RX_IRQ_INFO_ERR_CONTROL;
+ if (bits & (1U << _HRT_CSS_RECEIVER_IRQ_ERR_ECC_DOUBLE_BIT))
+ infos |= IA_CSS_RX_IRQ_INFO_ERR_ECC_DOUBLE;
+ if (bits & (1U << _HRT_CSS_RECEIVER_IRQ_ERR_CRC_BIT))
+ infos |= IA_CSS_RX_IRQ_INFO_ERR_CRC;
+ if (bits & (1U << _HRT_CSS_RECEIVER_IRQ_ERR_ID_BIT))
+ infos |= IA_CSS_RX_IRQ_INFO_ERR_UNKNOWN_ID;
+ if (bits & (1U << _HRT_CSS_RECEIVER_IRQ_ERR_FRAME_SYNC_BIT))
+ infos |= IA_CSS_RX_IRQ_INFO_ERR_FRAME_SYNC;
+ if (bits & (1U << _HRT_CSS_RECEIVER_IRQ_ERR_FRAME_DATA_BIT))
+ infos |= IA_CSS_RX_IRQ_INFO_ERR_FRAME_DATA;
+ if (bits & (1U << _HRT_CSS_RECEIVER_IRQ_DATA_TIMEOUT_BIT))
+ infos |= IA_CSS_RX_IRQ_INFO_ERR_DATA_TIMEOUT;
+ if (bits & (1U << _HRT_CSS_RECEIVER_IRQ_ERR_ESCAPE_BIT))
+ infos |= IA_CSS_RX_IRQ_INFO_ERR_UNKNOWN_ESC;
+ if (bits & (1U << _HRT_CSS_RECEIVER_IRQ_ERR_LINE_SYNC_BIT))
+ infos |= IA_CSS_RX_IRQ_INFO_ERR_LINE_SYNC;
+
+ return infos;
+}
+
+void ia_css_rx_clear_irq_info(unsigned int irq_infos)
+{
+ ia_css_rx_port_clear_irq_info(MIPI_PORT1_ID, irq_infos);
+}
+
+void ia_css_rx_port_clear_irq_info(enum mipi_port_id api_port,
+ unsigned int irq_infos)
+{
+ enum mipi_port_id port = ia_css_isys_port_to_mipi_port(api_port);
+
+ ia_css_isys_rx_clear_irq_info(port, irq_infos);
+}
+
+void ia_css_isys_rx_clear_irq_info(enum mipi_port_id port,
+ unsigned int irq_infos)
+{
+ hrt_data bits = receiver_port_reg_load(RX0_ID,
+ port,
+ _HRT_CSS_RECEIVER_IRQ_ENABLE_REG_IDX);
+
+ /* MW: Why do we remap the receiver bitmap */
+ if (irq_infos & IA_CSS_RX_IRQ_INFO_BUFFER_OVERRUN)
+ bits |= 1U << _HRT_CSS_RECEIVER_IRQ_OVERRUN_BIT;
+ if (irq_infos & IA_CSS_RX_IRQ_INFO_INIT_TIMEOUT)
+ bits |= 1U << _HRT_CSS_RECEIVER_IRQ_INIT_TIMEOUT_BIT;
+ if (irq_infos & IA_CSS_RX_IRQ_INFO_ENTER_SLEEP_MODE)
+ bits |= 1U << _HRT_CSS_RECEIVER_IRQ_SLEEP_MODE_ENTRY_BIT;
+ if (irq_infos & IA_CSS_RX_IRQ_INFO_EXIT_SLEEP_MODE)
+ bits |= 1U << _HRT_CSS_RECEIVER_IRQ_SLEEP_MODE_EXIT_BIT;
+ if (irq_infos & IA_CSS_RX_IRQ_INFO_ECC_CORRECTED)
+ bits |= 1U << _HRT_CSS_RECEIVER_IRQ_ERR_ECC_CORRECTED_BIT;
+ if (irq_infos & IA_CSS_RX_IRQ_INFO_ERR_SOT)
+ bits |= 1U << _HRT_CSS_RECEIVER_IRQ_ERR_SOT_HS_BIT;
+ if (irq_infos & IA_CSS_RX_IRQ_INFO_ERR_SOT_SYNC)
+ bits |= 1U << _HRT_CSS_RECEIVER_IRQ_ERR_SOT_SYNC_HS_BIT;
+ if (irq_infos & IA_CSS_RX_IRQ_INFO_ERR_CONTROL)
+ bits |= 1U << _HRT_CSS_RECEIVER_IRQ_ERR_CONTROL_BIT;
+ if (irq_infos & IA_CSS_RX_IRQ_INFO_ERR_ECC_DOUBLE)
+ bits |= 1U << _HRT_CSS_RECEIVER_IRQ_ERR_ECC_DOUBLE_BIT;
+ if (irq_infos & IA_CSS_RX_IRQ_INFO_ERR_CRC)
+ bits |= 1U << _HRT_CSS_RECEIVER_IRQ_ERR_CRC_BIT;
+ if (irq_infos & IA_CSS_RX_IRQ_INFO_ERR_UNKNOWN_ID)
+ bits |= 1U << _HRT_CSS_RECEIVER_IRQ_ERR_ID_BIT;
+ if (irq_infos & IA_CSS_RX_IRQ_INFO_ERR_FRAME_SYNC)
+ bits |= 1U << _HRT_CSS_RECEIVER_IRQ_ERR_FRAME_SYNC_BIT;
+ if (irq_infos & IA_CSS_RX_IRQ_INFO_ERR_FRAME_DATA)
+ bits |= 1U << _HRT_CSS_RECEIVER_IRQ_ERR_FRAME_DATA_BIT;
+ if (irq_infos & IA_CSS_RX_IRQ_INFO_ERR_DATA_TIMEOUT)
+ bits |= 1U << _HRT_CSS_RECEIVER_IRQ_DATA_TIMEOUT_BIT;
+ if (irq_infos & IA_CSS_RX_IRQ_INFO_ERR_UNKNOWN_ESC)
+ bits |= 1U << _HRT_CSS_RECEIVER_IRQ_ERR_ESCAPE_BIT;
+ if (irq_infos & IA_CSS_RX_IRQ_INFO_ERR_LINE_SYNC)
+ bits |= 1U << _HRT_CSS_RECEIVER_IRQ_ERR_LINE_SYNC_BIT;
+
+ receiver_port_reg_store(RX0_ID,
+ port,
+ _HRT_CSS_RECEIVER_IRQ_ENABLE_REG_IDX, bits);
+
+ return;
+}
+
+static int ia_css_isys_2400_set_fmt_type(enum atomisp_input_format input_format,
+ unsigned int *fmt_type)
+{
+ switch (input_format) {
+ case ATOMISP_INPUT_FORMAT_RGB_888:
+ *fmt_type = MIPI_FORMAT_2400_RGB888;
+ break;
+ case ATOMISP_INPUT_FORMAT_RGB_555:
+ *fmt_type = MIPI_FORMAT_2400_RGB555;
+ break;
+ case ATOMISP_INPUT_FORMAT_RGB_444:
+ *fmt_type = MIPI_FORMAT_2400_RGB444;
+ break;
+ case ATOMISP_INPUT_FORMAT_RGB_565:
+ *fmt_type = MIPI_FORMAT_2400_RGB565;
+ break;
+ case ATOMISP_INPUT_FORMAT_RGB_666:
+ *fmt_type = MIPI_FORMAT_2400_RGB666;
+ break;
+ case ATOMISP_INPUT_FORMAT_RAW_8:
+ *fmt_type = MIPI_FORMAT_2400_RAW8;
+ break;
+ case ATOMISP_INPUT_FORMAT_RAW_10:
+ *fmt_type = MIPI_FORMAT_2400_RAW10;
+ break;
+ case ATOMISP_INPUT_FORMAT_RAW_6:
+ *fmt_type = MIPI_FORMAT_2400_RAW6;
+ break;
+ case ATOMISP_INPUT_FORMAT_RAW_7:
+ *fmt_type = MIPI_FORMAT_2400_RAW7;
+ break;
+ case ATOMISP_INPUT_FORMAT_RAW_12:
+ *fmt_type = MIPI_FORMAT_2400_RAW12;
+ break;
+ case ATOMISP_INPUT_FORMAT_RAW_14:
+ *fmt_type = MIPI_FORMAT_2400_RAW14;
+ break;
+ case ATOMISP_INPUT_FORMAT_YUV420_8:
+ *fmt_type = MIPI_FORMAT_2400_YUV420_8;
+ break;
+ case ATOMISP_INPUT_FORMAT_YUV420_10:
+ *fmt_type = MIPI_FORMAT_2400_YUV420_10;
+ break;
+ case ATOMISP_INPUT_FORMAT_YUV422_8:
+ *fmt_type = MIPI_FORMAT_2400_YUV422_8;
+ break;
+ case ATOMISP_INPUT_FORMAT_YUV422_10:
+ *fmt_type = MIPI_FORMAT_2400_YUV422_10;
+ break;
+ case ATOMISP_INPUT_FORMAT_YUV420_8_LEGACY:
+ *fmt_type = MIPI_FORMAT_2400_YUV420_8_LEGACY;
+ break;
+ case ATOMISP_INPUT_FORMAT_EMBEDDED:
+ *fmt_type = MIPI_FORMAT_2400_EMBEDDED;
+ break;
+ case ATOMISP_INPUT_FORMAT_RAW_16:
+ /* This is not specified by Arasan, so we use
+ * 17 for now.
+ */
+ *fmt_type = MIPI_FORMAT_2400_RAW16;
+ break;
+ case ATOMISP_INPUT_FORMAT_BINARY_8:
+ *fmt_type = MIPI_FORMAT_2400_CUSTOM0;
+ break;
+ case ATOMISP_INPUT_FORMAT_YUV420_16:
+ case ATOMISP_INPUT_FORMAT_YUV422_16:
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int ia_css_isys_2401_set_fmt_type(enum atomisp_input_format input_format,
+ unsigned int *fmt_type)
+{
+ switch (input_format) {
+ case ATOMISP_INPUT_FORMAT_RGB_888:
+ *fmt_type = MIPI_FORMAT_2401_RGB888;
+ break;
+ case ATOMISP_INPUT_FORMAT_RGB_555:
+ *fmt_type = MIPI_FORMAT_2401_RGB555;
+ break;
+ case ATOMISP_INPUT_FORMAT_RGB_444:
+ *fmt_type = MIPI_FORMAT_2401_RGB444;
+ break;
+ case ATOMISP_INPUT_FORMAT_RGB_565:
+ *fmt_type = MIPI_FORMAT_2401_RGB565;
+ break;
+ case ATOMISP_INPUT_FORMAT_RGB_666:
+ *fmt_type = MIPI_FORMAT_2401_RGB666;
+ break;
+ case ATOMISP_INPUT_FORMAT_RAW_8:
+ *fmt_type = MIPI_FORMAT_2401_RAW8;
+ break;
+ case ATOMISP_INPUT_FORMAT_RAW_10:
+ *fmt_type = MIPI_FORMAT_2401_RAW10;
+ break;
+ case ATOMISP_INPUT_FORMAT_RAW_6:
+ *fmt_type = MIPI_FORMAT_2401_RAW6;
+ break;
+ case ATOMISP_INPUT_FORMAT_RAW_7:
+ *fmt_type = MIPI_FORMAT_2401_RAW7;
+ break;
+ case ATOMISP_INPUT_FORMAT_RAW_12:
+ *fmt_type = MIPI_FORMAT_2401_RAW12;
+ break;
+ case ATOMISP_INPUT_FORMAT_RAW_14:
+ *fmt_type = MIPI_FORMAT_2401_RAW14;
+ break;
+ case ATOMISP_INPUT_FORMAT_YUV420_8:
+ *fmt_type = MIPI_FORMAT_2401_YUV420_8;
+ break;
+ case ATOMISP_INPUT_FORMAT_YUV420_10:
+ *fmt_type = MIPI_FORMAT_2401_YUV420_10;
+ break;
+ case ATOMISP_INPUT_FORMAT_YUV422_8:
+ *fmt_type = MIPI_FORMAT_2401_YUV422_8;
+ break;
+ case ATOMISP_INPUT_FORMAT_YUV422_10:
+ *fmt_type = MIPI_FORMAT_2401_YUV422_10;
+ break;
+ case ATOMISP_INPUT_FORMAT_YUV420_8_LEGACY:
+ *fmt_type = MIPI_FORMAT_2401_YUV420_8_LEGACY;
+ break;
+ case ATOMISP_INPUT_FORMAT_EMBEDDED:
+ *fmt_type = MIPI_FORMAT_2401_EMBEDDED;
+ break;
+ case ATOMISP_INPUT_FORMAT_USER_DEF1:
+ *fmt_type = MIPI_FORMAT_2401_CUSTOM0;
+ break;
+ case ATOMISP_INPUT_FORMAT_USER_DEF2:
+ *fmt_type = MIPI_FORMAT_2401_CUSTOM1;
+ break;
+ case ATOMISP_INPUT_FORMAT_USER_DEF3:
+ *fmt_type = MIPI_FORMAT_2401_CUSTOM2;
+ break;
+ case ATOMISP_INPUT_FORMAT_USER_DEF4:
+ *fmt_type = MIPI_FORMAT_2401_CUSTOM3;
+ break;
+ case ATOMISP_INPUT_FORMAT_USER_DEF5:
+ *fmt_type = MIPI_FORMAT_2401_CUSTOM4;
+ break;
+ case ATOMISP_INPUT_FORMAT_USER_DEF6:
+ *fmt_type = MIPI_FORMAT_2401_CUSTOM5;
+ break;
+ case ATOMISP_INPUT_FORMAT_USER_DEF7:
+ *fmt_type = MIPI_FORMAT_2401_CUSTOM6;
+ break;
+ case ATOMISP_INPUT_FORMAT_USER_DEF8:
+ *fmt_type = MIPI_FORMAT_2401_CUSTOM7;
+ break;
+
+ case ATOMISP_INPUT_FORMAT_YUV420_16:
+ case ATOMISP_INPUT_FORMAT_YUV422_16:
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+int ia_css_isys_convert_stream_format_to_mipi_format(
+ enum atomisp_input_format input_format,
+ mipi_predictor_t compression,
+ unsigned int *fmt_type)
+{
+ assert(fmt_type);
+ /*
+ * Custom (user defined) modes. Used for compressed
+ * MIPI transfers
+ *
+ * Checkpatch thinks the indent before "if" is suspect
+ * I think the only suspect part is the missing "else"
+ * because of the return.
+ */
+ if (compression != MIPI_PREDICTOR_NONE) {
+ switch (input_format) {
+ case ATOMISP_INPUT_FORMAT_RAW_6:
+ *fmt_type = 6;
+ break;
+ case ATOMISP_INPUT_FORMAT_RAW_7:
+ *fmt_type = 7;
+ break;
+ case ATOMISP_INPUT_FORMAT_RAW_8:
+ *fmt_type = 8;
+ break;
+ case ATOMISP_INPUT_FORMAT_RAW_10:
+ *fmt_type = 10;
+ break;
+ case ATOMISP_INPUT_FORMAT_RAW_12:
+ *fmt_type = 12;
+ break;
+ case ATOMISP_INPUT_FORMAT_RAW_14:
+ *fmt_type = 14;
+ break;
+ case ATOMISP_INPUT_FORMAT_RAW_16:
+ *fmt_type = 16;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+ }
+ /*
+ * This mapping comes from the Arasan CSS function spec
+ * (CSS_func_spec1.08_ahb_sep29_08.pdf).
+ *
+ * MW: For some reason the mapping is not 1-to-1
+ */
+ if (IS_ISP2401)
+ return ia_css_isys_2401_set_fmt_type(input_format, fmt_type);
+ else
+ return ia_css_isys_2400_set_fmt_type(input_format, fmt_type);
+}
+
+static mipi_predictor_t sh_css_csi2_compression_type_2_mipi_predictor(
+ enum ia_css_csi2_compression_type type)
+{
+ mipi_predictor_t predictor = MIPI_PREDICTOR_NONE;
+
+ switch (type) {
+ case IA_CSS_CSI2_COMPRESSION_TYPE_1:
+ predictor = MIPI_PREDICTOR_TYPE1 - 1;
+ break;
+ case IA_CSS_CSI2_COMPRESSION_TYPE_2:
+ predictor = MIPI_PREDICTOR_TYPE2 - 1;
+ break;
+ default:
+ break;
+ }
+ return predictor;
+}
+
+int ia_css_isys_convert_compressed_format(
+ struct ia_css_csi2_compression *comp,
+ struct isp2401_input_system_cfg_s *cfg)
+{
+ int err = 0;
+
+ assert(comp);
+ assert(cfg);
+
+ if (comp->type != IA_CSS_CSI2_COMPRESSION_TYPE_NONE) {
+ /* compression register bit slicing
+ 4 bit for each user defined data type
+ 3 bit indicate compression scheme
+ 000 No compression
+ 001 10-6-10
+ 010 10-7-10
+ 011 10-8-10
+ 100 12-6-12
+ 101 12-6-12
+ 100 12-7-12
+ 110 12-8-12
+ 1 bit indicate predictor
+ */
+ if (comp->uncompressed_bits_per_pixel == UNCOMPRESSED_BITS_PER_PIXEL_10) {
+ switch (comp->compressed_bits_per_pixel) {
+ case COMPRESSED_BITS_PER_PIXEL_6:
+ cfg->csi_port_attr.comp_scheme = MIPI_COMPRESSOR_10_6_10;
+ break;
+ case COMPRESSED_BITS_PER_PIXEL_7:
+ cfg->csi_port_attr.comp_scheme = MIPI_COMPRESSOR_10_7_10;
+ break;
+ case COMPRESSED_BITS_PER_PIXEL_8:
+ cfg->csi_port_attr.comp_scheme = MIPI_COMPRESSOR_10_8_10;
+ break;
+ default:
+ err = -EINVAL;
+ }
+ } else if (comp->uncompressed_bits_per_pixel ==
+ UNCOMPRESSED_BITS_PER_PIXEL_12) {
+ switch (comp->compressed_bits_per_pixel) {
+ case COMPRESSED_BITS_PER_PIXEL_6:
+ cfg->csi_port_attr.comp_scheme = MIPI_COMPRESSOR_12_6_12;
+ break;
+ case COMPRESSED_BITS_PER_PIXEL_7:
+ cfg->csi_port_attr.comp_scheme = MIPI_COMPRESSOR_12_7_12;
+ break;
+ case COMPRESSED_BITS_PER_PIXEL_8:
+ cfg->csi_port_attr.comp_scheme = MIPI_COMPRESSOR_12_8_12;
+ break;
+ default:
+ err = -EINVAL;
+ }
+ } else
+ err = -EINVAL;
+ cfg->csi_port_attr.comp_predictor =
+ sh_css_csi2_compression_type_2_mipi_predictor(comp->type);
+ cfg->csi_port_attr.comp_enable = true;
+ } else /* No compression */
+ cfg->csi_port_attr.comp_enable = false;
+ return err;
+}
+
+unsigned int ia_css_csi2_calculate_input_system_alignment(
+ enum atomisp_input_format fmt_type)
+{
+ unsigned int memory_alignment_in_bytes = HIVE_ISP_DDR_WORD_BYTES;
+
+ switch (fmt_type) {
+ case ATOMISP_INPUT_FORMAT_RAW_6:
+ case ATOMISP_INPUT_FORMAT_RAW_7:
+ case ATOMISP_INPUT_FORMAT_RAW_8:
+ case ATOMISP_INPUT_FORMAT_RAW_10:
+ case ATOMISP_INPUT_FORMAT_RAW_12:
+ case ATOMISP_INPUT_FORMAT_RAW_14:
+ memory_alignment_in_bytes = 2 * ISP_VEC_NELEMS;
+ break;
+ case ATOMISP_INPUT_FORMAT_YUV420_8:
+ case ATOMISP_INPUT_FORMAT_YUV422_8:
+ case ATOMISP_INPUT_FORMAT_USER_DEF1:
+ case ATOMISP_INPUT_FORMAT_USER_DEF2:
+ case ATOMISP_INPUT_FORMAT_USER_DEF3:
+ case ATOMISP_INPUT_FORMAT_USER_DEF4:
+ case ATOMISP_INPUT_FORMAT_USER_DEF5:
+ case ATOMISP_INPUT_FORMAT_USER_DEF6:
+ case ATOMISP_INPUT_FORMAT_USER_DEF7:
+ case ATOMISP_INPUT_FORMAT_USER_DEF8:
+ /* Planar YUV formats need to have all planes aligned, this means
+ * double the alignment for the Y plane if the horizontal decimation is 2. */
+ memory_alignment_in_bytes = 2 * HIVE_ISP_DDR_WORD_BYTES;
+ break;
+ case ATOMISP_INPUT_FORMAT_EMBEDDED:
+ default:
+ memory_alignment_in_bytes = HIVE_ISP_DDR_WORD_BYTES;
+ break;
+ }
+ return memory_alignment_in_bytes;
+}
+
+
+static const mipi_lane_cfg_t MIPI_PORT_LANES[N_RX_MODE][N_MIPI_PORT_ID] = {
+ {MIPI_4LANE_CFG, MIPI_1LANE_CFG, MIPI_0LANE_CFG},
+ {MIPI_3LANE_CFG, MIPI_1LANE_CFG, MIPI_0LANE_CFG},
+ {MIPI_2LANE_CFG, MIPI_1LANE_CFG, MIPI_0LANE_CFG},
+ {MIPI_1LANE_CFG, MIPI_1LANE_CFG, MIPI_0LANE_CFG},
+ {MIPI_2LANE_CFG, MIPI_1LANE_CFG, MIPI_2LANE_CFG},
+ {MIPI_3LANE_CFG, MIPI_1LANE_CFG, MIPI_1LANE_CFG},
+ {MIPI_2LANE_CFG, MIPI_1LANE_CFG, MIPI_1LANE_CFG},
+ {MIPI_1LANE_CFG, MIPI_1LANE_CFG, MIPI_1LANE_CFG}
+};
+
+void ia_css_isys_rx_configure(const rx_cfg_t *config,
+ const enum ia_css_input_mode input_mode)
+{
+ bool any_port_enabled = false;
+ enum mipi_port_id port;
+
+ if ((!config)
+ || (config->mode >= N_RX_MODE)
+ || (config->port >= N_MIPI_PORT_ID)) {
+ assert(0);
+ return;
+ }
+ for (port = (enum mipi_port_id)0; port < N_MIPI_PORT_ID; port++) {
+ if (is_receiver_port_enabled(RX0_ID, port))
+ any_port_enabled = true;
+ }
+ /* AM: Check whether this is a problem with multiple
+ * streams. MS: This is the case. */
+
+ port = config->port;
+ receiver_port_enable(RX0_ID, port, false);
+
+ port = config->port;
+
+ /* AM: Check whether this is a problem with multiple streams. */
+ if (MIPI_PORT_LANES[config->mode][port] != MIPI_0LANE_CFG) {
+ receiver_port_reg_store(RX0_ID, port,
+ _HRT_CSS_RECEIVER_FUNC_PROG_REG_IDX,
+ config->timeout);
+ receiver_port_reg_store(RX0_ID, port,
+ _HRT_CSS_RECEIVER_2400_INIT_COUNT_REG_IDX,
+ config->initcount);
+ receiver_port_reg_store(RX0_ID, port,
+ _HRT_CSS_RECEIVER_2400_SYNC_COUNT_REG_IDX,
+ config->synccount);
+ receiver_port_reg_store(RX0_ID, port,
+ _HRT_CSS_RECEIVER_2400_RX_COUNT_REG_IDX,
+ config->rxcount);
+
+ if (input_mode != IA_CSS_INPUT_MODE_BUFFERED_SENSOR) {
+ /* MW: A bit of a hack, straight wiring of the capture
+ * units,assuming they are linearly enumerated. */
+ input_system_sub_system_reg_store(INPUT_SYSTEM0_ID,
+ GPREGS_UNIT0_ID,
+ HIVE_ISYS_GPREG_MULTICAST_A_IDX
+ + (unsigned int)port,
+ INPUT_SYSTEM_CSI_BACKEND);
+ /* MW: Like the integration test example we overwite,
+ * the GPREG_MUX register */
+ input_system_sub_system_reg_store(INPUT_SYSTEM0_ID,
+ GPREGS_UNIT0_ID,
+ HIVE_ISYS_GPREG_MUX_IDX,
+ (input_system_multiplex_t)port);
+ } else {
+ /*
+ * AM: A bit of a hack, wiring the input system.
+ */
+ input_system_sub_system_reg_store(INPUT_SYSTEM0_ID,
+ GPREGS_UNIT0_ID,
+ HIVE_ISYS_GPREG_MULTICAST_A_IDX
+ + (unsigned int)port,
+ INPUT_SYSTEM_INPUT_BUFFER);
+ input_system_sub_system_reg_store(INPUT_SYSTEM0_ID,
+ GPREGS_UNIT0_ID,
+ HIVE_ISYS_GPREG_MUX_IDX,
+ INPUT_SYSTEM_ACQUISITION_UNIT);
+ }
+ }
+ /*
+ * The 2ppc is shared for all ports, so we cannot
+ * disable->configure->enable individual ports
+ */
+ /* AM: Check whether this is a problem with multiple streams. */
+ /* MS: 2ppc should be a property per binary and should be
+ * enabled/disabled per binary.
+ * Currently it is implemented as a system wide setting due
+ * to effort and risks. */
+ if (!any_port_enabled) {
+ receiver_reg_store(RX0_ID,
+ _HRT_CSS_RECEIVER_TWO_PIXEL_EN_REG_IDX,
+ config->is_two_ppc);
+ receiver_reg_store(RX0_ID, _HRT_CSS_RECEIVER_BE_TWO_PPC_REG_IDX,
+ config->is_two_ppc);
+ }
+ receiver_port_enable(RX0_ID, port, true);
+ /* TODO: JB: need to add the beneath used define to mizuchi */
+ /* sh_css_sw_hive_isp_css_2400_system_20121224_0125\css
+ * \hrt\input_system_defs.h
+ * #define INPUT_SYSTEM_CSI_RECEIVER_SELECT_BACKENG 0X207
+ */
+ /* TODO: need better name for define
+ * input_system_reg_store(INPUT_SYSTEM0_ID,
+ * INPUT_SYSTEM_CSI_RECEIVER_SELECT_BACKENG, 1);
+ */
+ input_system_reg_store(INPUT_SYSTEM0_ID, 0x207, 1);
+
+ return;
+}
+
+void ia_css_isys_rx_disable(void)
+{
+ enum mipi_port_id port;
+
+ for (port = (enum mipi_port_id)0; port < N_MIPI_PORT_ID; port++) {
+ receiver_port_reg_store(RX0_ID, port,
+ _HRT_CSS_RECEIVER_DEVICE_READY_REG_IDX,
+ false);
+ }
+ return;
+}
diff --git a/drivers/staging/media/atomisp/pci/runtime/isys/src/virtual_isys.c b/drivers/staging/media/atomisp/pci/runtime/isys/src/virtual_isys.c
new file mode 100644
index 000000000000..e6c11d5f77b4
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/runtime/isys/src/virtual_isys.c
@@ -0,0 +1,832 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#include <linux/bitops.h>
+#include <linux/math.h>
+#include <linux/string.h> /* for memcpy() */
+
+#include "system_global.h"
+
+
+#include "ia_css_isys.h"
+#include "ia_css_debug.h"
+#include "virtual_isys.h"
+#include "isp.h"
+#include "sh_css_defs.h"
+
+/*************************************************
+ *
+ * Forwarded Declaration
+ *
+ *************************************************/
+
+static bool create_input_system_channel(
+ isp2401_input_system_cfg_t *cfg,
+ bool metadata,
+ input_system_channel_t *channel);
+
+static void destroy_input_system_channel(
+ input_system_channel_t *channel);
+
+static bool create_input_system_input_port(
+ isp2401_input_system_cfg_t *cfg,
+ input_system_input_port_t *input_port);
+
+static void destroy_input_system_input_port(
+ input_system_input_port_t *input_port);
+
+static bool calculate_input_system_channel_cfg(
+ input_system_channel_t *channel,
+ input_system_input_port_t *input_port,
+ isp2401_input_system_cfg_t *isys_cfg,
+ input_system_channel_cfg_t *channel_cfg,
+ bool metadata);
+
+static bool calculate_input_system_input_port_cfg(
+ input_system_channel_t *channel,
+ input_system_input_port_t *input_port,
+ isp2401_input_system_cfg_t *isys_cfg,
+ input_system_input_port_cfg_t *input_port_cfg);
+
+static bool acquire_sid(
+ stream2mmio_ID_t stream2mmio,
+ stream2mmio_sid_ID_t *sid);
+
+static void release_sid(
+ stream2mmio_ID_t stream2mmio,
+ stream2mmio_sid_ID_t *sid);
+
+static bool acquire_ib_buffer(
+ s32 bits_per_pixel,
+ s32 pixels_per_line,
+ s32 lines_per_frame,
+ s32 align_in_bytes,
+ bool online,
+ isp2401_ib_buffer_t *buf);
+
+static void release_ib_buffer(
+ isp2401_ib_buffer_t *buf);
+
+static bool acquire_dma_channel(
+ isys2401_dma_ID_t dma_id,
+ isys2401_dma_channel *channel);
+
+static void release_dma_channel(
+ isys2401_dma_ID_t dma_id,
+ isys2401_dma_channel *channel);
+
+static bool acquire_be_lut_entry(
+ csi_rx_backend_ID_t backend,
+ csi_mipi_packet_type_t packet_type,
+ csi_rx_backend_lut_entry_t *entry);
+
+static void release_be_lut_entry(
+ csi_rx_backend_ID_t backend,
+ csi_mipi_packet_type_t packet_type,
+ csi_rx_backend_lut_entry_t *entry);
+
+static bool calculate_prbs_cfg(
+ input_system_channel_t *channel,
+ input_system_input_port_t *input_port,
+ isp2401_input_system_cfg_t *isys_cfg,
+ pixelgen_prbs_cfg_t *cfg);
+
+static bool calculate_fe_cfg(
+ const isp2401_input_system_cfg_t *isys_cfg,
+ csi_rx_frontend_cfg_t *cfg);
+
+static bool calculate_be_cfg(
+ const input_system_input_port_t *input_port,
+ const isp2401_input_system_cfg_t *isys_cfg,
+ bool metadata,
+ csi_rx_backend_cfg_t *cfg);
+
+static bool calculate_stream2mmio_cfg(
+ const isp2401_input_system_cfg_t *isys_cfg,
+ bool metadata,
+ stream2mmio_cfg_t *cfg);
+
+static bool calculate_ibuf_ctrl_cfg(
+ const input_system_channel_t *channel,
+ const input_system_input_port_t *input_port,
+ const isp2401_input_system_cfg_t *isys_cfg,
+ ibuf_ctrl_cfg_t *cfg);
+
+static bool calculate_isys2401_dma_cfg(
+ const input_system_channel_t *channel,
+ const isp2401_input_system_cfg_t *isys_cfg,
+ isys2401_dma_cfg_t *cfg);
+
+static bool calculate_isys2401_dma_port_cfg(
+ const isp2401_input_system_cfg_t *isys_cfg,
+ bool raw_packed,
+ bool metadata,
+ isys2401_dma_port_cfg_t *cfg);
+
+static csi_mipi_packet_type_t get_csi_mipi_packet_type(
+ int32_t data_type);
+
+static int32_t calculate_stride(
+ s32 bits_per_pixel,
+ s32 pixels_per_line,
+ bool raw_packed,
+ int32_t align_in_bytes);
+
+/* end of Forwarded Declaration */
+
+/**************************************************
+ *
+ * Public Methods
+ *
+ **************************************************/
+ia_css_isys_error_t ia_css_isys_stream_create(
+ ia_css_isys_descr_t *isys_stream_descr,
+ ia_css_isys_stream_h isys_stream,
+ uint32_t isys_stream_id)
+{
+ ia_css_isys_error_t rc;
+
+ if (!isys_stream_descr || !isys_stream ||
+ isys_stream_id >= SH_CSS_MAX_ISYS_CHANNEL_NODES)
+ return false;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_isys_stream_create() enter:\n");
+
+ /*Reset isys_stream to 0*/
+ memset(isys_stream, 0, sizeof(*isys_stream));
+ isys_stream->enable_metadata = isys_stream_descr->metadata.enable;
+ isys_stream->id = isys_stream_id;
+
+ isys_stream->linked_isys_stream_id = isys_stream_descr->linked_isys_stream_id;
+ rc = create_input_system_input_port(isys_stream_descr,
+ &isys_stream->input_port);
+ if (!rc)
+ return false;
+
+ rc = create_input_system_channel(isys_stream_descr, false,
+ &isys_stream->channel);
+ if (!rc) {
+ destroy_input_system_input_port(&isys_stream->input_port);
+ return false;
+ }
+
+ /* create metadata channel */
+ if (isys_stream_descr->metadata.enable) {
+ rc = create_input_system_channel(isys_stream_descr, true,
+ &isys_stream->md_channel);
+ if (!rc) {
+ destroy_input_system_input_port(&isys_stream->input_port);
+ destroy_input_system_channel(&isys_stream->channel);
+ return false;
+ }
+ }
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_isys_stream_create() leave:\n");
+
+ return true;
+}
+
+void ia_css_isys_stream_destroy(
+ ia_css_isys_stream_h isys_stream)
+{
+ destroy_input_system_input_port(&isys_stream->input_port);
+ destroy_input_system_channel(&isys_stream->channel);
+ if (isys_stream->enable_metadata) {
+ /* Destroy metadata channel only if its allocated*/
+ destroy_input_system_channel(&isys_stream->md_channel);
+ }
+}
+
+ia_css_isys_error_t ia_css_isys_stream_calculate_cfg(
+ ia_css_isys_stream_h isys_stream,
+ ia_css_isys_descr_t *isys_stream_descr,
+ ia_css_isys_stream_cfg_t *isys_stream_cfg)
+{
+ ia_css_isys_error_t rc;
+
+ if (!isys_stream_cfg ||
+ !isys_stream_descr ||
+ !isys_stream)
+ return false;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_isys_stream_calculate_cfg() enter:\n");
+
+ rc = calculate_input_system_channel_cfg(
+ &isys_stream->channel,
+ &isys_stream->input_port,
+ isys_stream_descr,
+ &isys_stream_cfg->channel_cfg,
+ false);
+ if (!rc)
+ return false;
+
+ /* configure metadata channel */
+ if (isys_stream_descr->metadata.enable) {
+ isys_stream_cfg->enable_metadata = true;
+ rc = calculate_input_system_channel_cfg(
+ &isys_stream->md_channel,
+ &isys_stream->input_port,
+ isys_stream_descr,
+ &isys_stream_cfg->md_channel_cfg,
+ true);
+ if (!rc)
+ return false;
+ }
+
+ rc = calculate_input_system_input_port_cfg(
+ &isys_stream->channel,
+ &isys_stream->input_port,
+ isys_stream_descr,
+ &isys_stream_cfg->input_port_cfg);
+ if (!rc)
+ return false;
+
+ isys_stream->valid = 1;
+ isys_stream_cfg->valid = 1;
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_isys_stream_calculate_cfg() leave:\n");
+ return rc;
+}
+
+/* end of Public Methods */
+
+/**************************************************
+ *
+ * Private Methods
+ *
+ **************************************************/
+static bool create_input_system_channel(
+ isp2401_input_system_cfg_t *cfg,
+ bool metadata,
+ input_system_channel_t *me)
+{
+ bool rc = true;
+
+ me->dma_id = ISYS2401_DMA0_ID;
+
+ switch (cfg->input_port_id) {
+ case INPUT_SYSTEM_CSI_PORT0_ID:
+ case INPUT_SYSTEM_PIXELGEN_PORT0_ID:
+ me->stream2mmio_id = STREAM2MMIO0_ID;
+ me->ibuf_ctrl_id = IBUF_CTRL0_ID;
+ break;
+
+ case INPUT_SYSTEM_CSI_PORT1_ID:
+ case INPUT_SYSTEM_PIXELGEN_PORT1_ID:
+ me->stream2mmio_id = STREAM2MMIO1_ID;
+ me->ibuf_ctrl_id = IBUF_CTRL1_ID;
+ break;
+
+ case INPUT_SYSTEM_CSI_PORT2_ID:
+ case INPUT_SYSTEM_PIXELGEN_PORT2_ID:
+ me->stream2mmio_id = STREAM2MMIO2_ID;
+ me->ibuf_ctrl_id = IBUF_CTRL2_ID;
+ break;
+ default:
+ rc = false;
+ break;
+ }
+
+ if (!rc)
+ return false;
+
+ if (!acquire_sid(me->stream2mmio_id, &me->stream2mmio_sid_id)) {
+ return false;
+ }
+
+ if (!acquire_ib_buffer(
+ metadata ? cfg->metadata.bits_per_pixel :
+ cfg->input_port_resolution.bits_per_pixel,
+ metadata ? cfg->metadata.pixels_per_line :
+ cfg->input_port_resolution.pixels_per_line,
+ metadata ? cfg->metadata.lines_per_frame :
+ cfg->input_port_resolution.lines_per_frame,
+ metadata ? cfg->metadata.align_req_in_bytes :
+ cfg->input_port_resolution.align_req_in_bytes,
+ cfg->online,
+ &me->ib_buffer)) {
+ release_sid(me->stream2mmio_id, &me->stream2mmio_sid_id);
+ return false;
+ }
+
+ if (!acquire_dma_channel(me->dma_id, &me->dma_channel)) {
+ release_sid(me->stream2mmio_id, &me->stream2mmio_sid_id);
+ release_ib_buffer(&me->ib_buffer);
+ return false;
+ }
+
+ return true;
+}
+
+static void destroy_input_system_channel(
+ input_system_channel_t *me)
+{
+ release_sid(me->stream2mmio_id,
+ &me->stream2mmio_sid_id);
+
+ release_ib_buffer(&me->ib_buffer);
+
+ release_dma_channel(me->dma_id, &me->dma_channel);
+}
+
+static bool create_input_system_input_port(
+ isp2401_input_system_cfg_t *cfg,
+ input_system_input_port_t *me)
+{
+ csi_mipi_packet_type_t packet_type;
+ bool rc = true;
+
+ switch (cfg->input_port_id) {
+ case INPUT_SYSTEM_CSI_PORT0_ID:
+ me->csi_rx.frontend_id = CSI_RX_FRONTEND0_ID;
+ me->csi_rx.backend_id = CSI_RX_BACKEND0_ID;
+
+ packet_type = get_csi_mipi_packet_type(cfg->csi_port_attr.fmt_type);
+ me->csi_rx.packet_type = packet_type;
+
+ rc = acquire_be_lut_entry(
+ me->csi_rx.backend_id,
+ packet_type,
+ &me->csi_rx.backend_lut_entry);
+ break;
+ case INPUT_SYSTEM_PIXELGEN_PORT0_ID:
+ me->pixelgen.pixelgen_id = PIXELGEN0_ID;
+ break;
+ case INPUT_SYSTEM_CSI_PORT1_ID:
+ me->csi_rx.frontend_id = CSI_RX_FRONTEND1_ID;
+ me->csi_rx.backend_id = CSI_RX_BACKEND1_ID;
+
+ packet_type = get_csi_mipi_packet_type(cfg->csi_port_attr.fmt_type);
+ me->csi_rx.packet_type = packet_type;
+
+ rc = acquire_be_lut_entry(
+ me->csi_rx.backend_id,
+ packet_type,
+ &me->csi_rx.backend_lut_entry);
+ break;
+ case INPUT_SYSTEM_PIXELGEN_PORT1_ID:
+ me->pixelgen.pixelgen_id = PIXELGEN1_ID;
+
+ break;
+ case INPUT_SYSTEM_CSI_PORT2_ID:
+ me->csi_rx.frontend_id = CSI_RX_FRONTEND2_ID;
+ me->csi_rx.backend_id = CSI_RX_BACKEND2_ID;
+
+ packet_type = get_csi_mipi_packet_type(cfg->csi_port_attr.fmt_type);
+ me->csi_rx.packet_type = packet_type;
+
+ rc = acquire_be_lut_entry(
+ me->csi_rx.backend_id,
+ packet_type,
+ &me->csi_rx.backend_lut_entry);
+ break;
+ case INPUT_SYSTEM_PIXELGEN_PORT2_ID:
+ me->pixelgen.pixelgen_id = PIXELGEN2_ID;
+ break;
+ default:
+ rc = false;
+ break;
+ }
+
+ me->source_type = cfg->mode;
+
+ /* for metadata */
+ me->metadata.packet_type = CSI_MIPI_PACKET_TYPE_UNDEFINED;
+ if (rc && cfg->metadata.enable) {
+ me->metadata.packet_type = get_csi_mipi_packet_type(
+ cfg->metadata.fmt_type);
+ rc = acquire_be_lut_entry(
+ me->csi_rx.backend_id,
+ me->metadata.packet_type,
+ &me->metadata.backend_lut_entry);
+ }
+
+ return rc;
+}
+
+static void destroy_input_system_input_port(
+ input_system_input_port_t *me)
+{
+ if (me->source_type == INPUT_SYSTEM_SOURCE_TYPE_SENSOR) {
+ release_be_lut_entry(
+ me->csi_rx.backend_id,
+ me->csi_rx.packet_type,
+ &me->csi_rx.backend_lut_entry);
+ }
+
+ if (me->metadata.packet_type != CSI_MIPI_PACKET_TYPE_UNDEFINED) {
+ /*Free the backend lut allocated for metadata*/
+ release_be_lut_entry(
+ me->csi_rx.backend_id,
+ me->metadata.packet_type,
+ &me->metadata.backend_lut_entry);
+ }
+}
+
+static bool calculate_input_system_channel_cfg(
+ input_system_channel_t *channel,
+ input_system_input_port_t *input_port,
+ isp2401_input_system_cfg_t *isys_cfg,
+ input_system_channel_cfg_t *channel_cfg,
+ bool metadata)
+{
+ bool rc;
+
+ rc = calculate_stream2mmio_cfg(isys_cfg, metadata,
+ &channel_cfg->stream2mmio_cfg);
+ if (!rc)
+ return false;
+
+ rc = calculate_ibuf_ctrl_cfg(
+ channel,
+ input_port,
+ isys_cfg,
+ &channel_cfg->ibuf_ctrl_cfg);
+ if (!rc)
+ return false;
+ if (metadata)
+ channel_cfg->ibuf_ctrl_cfg.stores_per_frame =
+ isys_cfg->metadata.lines_per_frame;
+
+ rc = calculate_isys2401_dma_cfg(
+ channel,
+ isys_cfg,
+ &channel_cfg->dma_cfg);
+ if (!rc)
+ return false;
+
+ rc = calculate_isys2401_dma_port_cfg(
+ isys_cfg,
+ false,
+ metadata,
+ &channel_cfg->dma_src_port_cfg);
+ if (!rc)
+ return false;
+
+ rc = calculate_isys2401_dma_port_cfg(
+ isys_cfg,
+ isys_cfg->raw_packed,
+ metadata,
+ &channel_cfg->dma_dest_port_cfg);
+ if (!rc)
+ return false;
+
+ return true;
+}
+
+static bool calculate_input_system_input_port_cfg(
+ input_system_channel_t *channel,
+ input_system_input_port_t *input_port,
+ isp2401_input_system_cfg_t *isys_cfg,
+ input_system_input_port_cfg_t *input_port_cfg)
+{
+ bool rc;
+
+ switch (input_port->source_type) {
+ case INPUT_SYSTEM_SOURCE_TYPE_SENSOR:
+ rc = calculate_fe_cfg(
+ isys_cfg,
+ &input_port_cfg->csi_rx_cfg.frontend_cfg);
+
+ rc &= calculate_be_cfg(
+ input_port,
+ isys_cfg,
+ false,
+ &input_port_cfg->csi_rx_cfg.backend_cfg);
+
+ if (rc && isys_cfg->metadata.enable)
+ rc &= calculate_be_cfg(input_port, isys_cfg, true,
+ &input_port_cfg->csi_rx_cfg.md_backend_cfg);
+ break;
+ case INPUT_SYSTEM_SOURCE_TYPE_PRBS:
+ rc = calculate_prbs_cfg(
+ channel,
+ input_port,
+ isys_cfg,
+ &input_port_cfg->pixelgen_cfg.prbs_cfg);
+ break;
+ default:
+ rc = false;
+ break;
+ }
+
+ return rc;
+}
+
+static bool acquire_sid(
+ stream2mmio_ID_t stream2mmio,
+ stream2mmio_sid_ID_t *sid)
+{
+ return ia_css_isys_stream2mmio_sid_rmgr_acquire(stream2mmio, sid);
+}
+
+static void release_sid(
+ stream2mmio_ID_t stream2mmio,
+ stream2mmio_sid_ID_t *sid)
+{
+ ia_css_isys_stream2mmio_sid_rmgr_release(stream2mmio, sid);
+}
+
+/* See also: ia_css_dma_configure_from_info() */
+static int32_t calculate_stride(
+ s32 bits_per_pixel,
+ s32 pixels_per_line,
+ bool raw_packed,
+ int32_t align_in_bytes)
+{
+ s32 bytes_per_line;
+ s32 pixels_per_word;
+ s32 words_per_line;
+ s32 pixels_per_line_padded;
+
+ pixels_per_line_padded = CEIL_MUL(pixels_per_line, align_in_bytes);
+
+ if (!raw_packed)
+ bits_per_pixel = CEIL_MUL(bits_per_pixel, 8);
+
+ pixels_per_word = HIVE_ISP_DDR_WORD_BITS / bits_per_pixel;
+ words_per_line = DIV_ROUND_UP(pixels_per_line_padded, pixels_per_word);
+ bytes_per_line = HIVE_ISP_DDR_WORD_BYTES * words_per_line;
+
+ return bytes_per_line;
+}
+
+static bool acquire_ib_buffer(
+ s32 bits_per_pixel,
+ s32 pixels_per_line,
+ s32 lines_per_frame,
+ s32 align_in_bytes,
+ bool online,
+ isp2401_ib_buffer_t *buf)
+{
+ buf->stride = calculate_stride(bits_per_pixel, pixels_per_line, false,
+ align_in_bytes);
+ if (online)
+ buf->lines = 4; /* use double buffering for online usecases */
+ else
+ buf->lines = 2;
+
+ (void)(lines_per_frame);
+ return ia_css_isys_ibuf_rmgr_acquire(buf->stride * buf->lines,
+ &buf->start_addr);
+}
+
+static void release_ib_buffer(
+ isp2401_ib_buffer_t *buf)
+{
+ ia_css_isys_ibuf_rmgr_release(&buf->start_addr);
+}
+
+static bool acquire_dma_channel(
+ isys2401_dma_ID_t dma_id,
+ isys2401_dma_channel *channel)
+{
+ return ia_css_isys_dma_channel_rmgr_acquire(dma_id, channel);
+}
+
+static void release_dma_channel(
+ isys2401_dma_ID_t dma_id,
+ isys2401_dma_channel *channel)
+{
+ ia_css_isys_dma_channel_rmgr_release(dma_id, channel);
+}
+
+static bool acquire_be_lut_entry(
+ csi_rx_backend_ID_t backend,
+ csi_mipi_packet_type_t packet_type,
+ csi_rx_backend_lut_entry_t *entry)
+{
+ return ia_css_isys_csi_rx_lut_rmgr_acquire(backend, packet_type, entry);
+}
+
+static void release_be_lut_entry(
+ csi_rx_backend_ID_t backend,
+ csi_mipi_packet_type_t packet_type,
+ csi_rx_backend_lut_entry_t *entry)
+{
+ ia_css_isys_csi_rx_lut_rmgr_release(backend, packet_type, entry);
+}
+
+static bool calculate_prbs_cfg(
+ input_system_channel_t *channel,
+ input_system_input_port_t *input_port,
+ isp2401_input_system_cfg_t *isys_cfg,
+ pixelgen_prbs_cfg_t *cfg)
+{
+ memcpy(cfg, &isys_cfg->prbs_port_attr, sizeof(pixelgen_prbs_cfg_t));
+
+ return true;
+}
+
+static bool calculate_fe_cfg(
+ const isp2401_input_system_cfg_t *isys_cfg,
+ csi_rx_frontend_cfg_t *cfg)
+{
+ cfg->active_lanes = isys_cfg->csi_port_attr.active_lanes;
+ return true;
+}
+
+static bool calculate_be_cfg(
+ const input_system_input_port_t *input_port,
+ const isp2401_input_system_cfg_t *isys_cfg,
+ bool metadata,
+ csi_rx_backend_cfg_t *cfg)
+{
+ memcpy(&cfg->lut_entry,
+ metadata ? &input_port->metadata.backend_lut_entry :
+ &input_port->csi_rx.backend_lut_entry,
+ sizeof(csi_rx_backend_lut_entry_t));
+
+ cfg->csi_mipi_cfg.virtual_channel = isys_cfg->csi_port_attr.ch_id;
+ if (metadata) {
+ cfg->csi_mipi_packet_type = get_csi_mipi_packet_type(
+ isys_cfg->metadata.fmt_type);
+ cfg->csi_mipi_cfg.comp_enable = false;
+ cfg->csi_mipi_cfg.data_type = isys_cfg->metadata.fmt_type;
+ } else {
+ cfg->csi_mipi_packet_type = get_csi_mipi_packet_type(
+ isys_cfg->csi_port_attr.fmt_type);
+ cfg->csi_mipi_cfg.data_type = isys_cfg->csi_port_attr.fmt_type;
+ cfg->csi_mipi_cfg.comp_enable = isys_cfg->csi_port_attr.comp_enable;
+ cfg->csi_mipi_cfg.comp_scheme = isys_cfg->csi_port_attr.comp_scheme;
+ cfg->csi_mipi_cfg.comp_predictor = isys_cfg->csi_port_attr.comp_predictor;
+ cfg->csi_mipi_cfg.comp_bit_idx = cfg->csi_mipi_cfg.data_type -
+ MIPI_FORMAT_2401_CUSTOM0;
+ }
+
+ return true;
+}
+
+static bool calculate_stream2mmio_cfg(
+ const isp2401_input_system_cfg_t *isys_cfg,
+ bool metadata,
+ stream2mmio_cfg_t *cfg
+)
+{
+ cfg->bits_per_pixel = metadata ? isys_cfg->metadata.bits_per_pixel :
+ isys_cfg->input_port_resolution.bits_per_pixel;
+
+ cfg->enable_blocking = isys_cfg->mode == INPUT_SYSTEM_SOURCE_TYPE_PRBS;
+
+ return true;
+}
+
+static bool calculate_ibuf_ctrl_cfg(
+ const input_system_channel_t *channel,
+ const input_system_input_port_t *input_port,
+ const isp2401_input_system_cfg_t *isys_cfg,
+ ibuf_ctrl_cfg_t *cfg)
+{
+ s32 bits_per_pixel;
+ s32 bytes_per_pixel;
+ s32 left_padding;
+
+ (void)input_port;
+
+ bits_per_pixel = isys_cfg->input_port_resolution.bits_per_pixel;
+ bytes_per_pixel = BITS_TO_BYTES(bits_per_pixel);
+
+ left_padding = CEIL_MUL(isys_cfg->output_port_attr.left_padding, ISP_VEC_NELEMS)
+ * bytes_per_pixel;
+
+ cfg->online = isys_cfg->online;
+
+ cfg->dma_cfg.channel = channel->dma_channel;
+ cfg->dma_cfg.cmd = _DMA_V2_MOVE_A2B_NO_SYNC_CHK_COMMAND;
+
+ cfg->dma_cfg.shift_returned_items = 0;
+ cfg->dma_cfg.elems_per_word_in_ibuf = 0;
+ cfg->dma_cfg.elems_per_word_in_dest = 0;
+
+ cfg->ib_buffer.start_addr = channel->ib_buffer.start_addr;
+ cfg->ib_buffer.stride = channel->ib_buffer.stride;
+ cfg->ib_buffer.lines = channel->ib_buffer.lines;
+
+ /*
+ #ifndef ISP2401
+ * zhengjie.lu@intel.com:
+ #endif
+ * "dest_buf_cfg" should be part of the input system output
+ * port configuration.
+ *
+ * TODO: move "dest_buf_cfg" to the input system output
+ * port configuration.
+ */
+
+ /* input_buf addr only available in sched mode;
+ this buffer is allocated in isp, crun mode addr
+ can be passed by after ISP allocation */
+ if (cfg->online) {
+ cfg->dest_buf_cfg.start_addr = ISP_INPUT_BUF_START_ADDR + left_padding;
+ cfg->dest_buf_cfg.stride = bytes_per_pixel
+ * isys_cfg->output_port_attr.max_isp_input_width;
+ cfg->dest_buf_cfg.lines = LINES_OF_ISP_INPUT_BUF;
+ } else if (isys_cfg->raw_packed) {
+ cfg->dest_buf_cfg.stride = calculate_stride(bits_per_pixel,
+ isys_cfg->input_port_resolution.pixels_per_line,
+ isys_cfg->raw_packed,
+ isys_cfg->input_port_resolution.align_req_in_bytes);
+ } else {
+ cfg->dest_buf_cfg.stride = channel->ib_buffer.stride;
+ }
+
+ /*
+ #ifndef ISP2401
+ * zhengjie.lu@intel.com:
+ #endif
+ * "items_per_store" is hard coded as "1", which is ONLY valid
+ * when the CSI-MIPI long packet is transferred.
+ *
+ * TODO: After the 1st stage of MERR+, make the proper solution to
+ * configure "items_per_store" so that it can also handle the CSI-MIPI
+ * short packet.
+ */
+ cfg->items_per_store = 1;
+
+ cfg->stores_per_frame = isys_cfg->input_port_resolution.lines_per_frame;
+
+ cfg->stream2mmio_cfg.sync_cmd = _STREAM2MMIO_CMD_TOKEN_SYNC_FRAME;
+
+ /* TODO: Define conditions as when to use store words vs store packets */
+ cfg->stream2mmio_cfg.store_cmd = _STREAM2MMIO_CMD_TOKEN_STORE_PACKETS;
+
+ return true;
+}
+
+static bool calculate_isys2401_dma_cfg(
+ const input_system_channel_t *channel,
+ const isp2401_input_system_cfg_t *isys_cfg,
+ isys2401_dma_cfg_t *cfg)
+{
+ cfg->channel = channel->dma_channel;
+
+ /* only online/sensor mode goto vmem
+ offline/buffered_sensor, tpg and prbs will go to ddr */
+ if (isys_cfg->online)
+ cfg->connection = isys2401_dma_ibuf_to_vmem_connection;
+ else
+ cfg->connection = isys2401_dma_ibuf_to_ddr_connection;
+
+ cfg->extension = isys2401_dma_zero_extension;
+ cfg->height = 1;
+
+ return true;
+}
+
+/* See also: ia_css_dma_configure_from_info() */
+static bool calculate_isys2401_dma_port_cfg(
+ const isp2401_input_system_cfg_t *isys_cfg,
+ bool raw_packed,
+ bool metadata,
+ isys2401_dma_port_cfg_t *cfg)
+{
+ s32 bits_per_pixel;
+ s32 pixels_per_line;
+ s32 align_req_in_bytes;
+
+ /* TODO: Move metadata away from isys_cfg to application layer */
+ if (metadata) {
+ bits_per_pixel = isys_cfg->metadata.bits_per_pixel;
+ pixels_per_line = isys_cfg->metadata.pixels_per_line;
+ align_req_in_bytes = isys_cfg->metadata.align_req_in_bytes;
+ } else {
+ bits_per_pixel = isys_cfg->input_port_resolution.bits_per_pixel;
+ pixels_per_line = isys_cfg->input_port_resolution.pixels_per_line;
+ align_req_in_bytes = isys_cfg->input_port_resolution.align_req_in_bytes;
+ }
+
+ cfg->stride = calculate_stride(bits_per_pixel, pixels_per_line, raw_packed,
+ align_req_in_bytes);
+
+ if (!raw_packed)
+ bits_per_pixel = CEIL_MUL(bits_per_pixel, 8);
+
+ cfg->elements = HIVE_ISP_DDR_WORD_BITS / bits_per_pixel;
+ cfg->cropping = 0;
+ cfg->width = DIV_ROUND_UP(cfg->stride, HIVE_ISP_DDR_WORD_BYTES);
+
+ return true;
+}
+
+static csi_mipi_packet_type_t get_csi_mipi_packet_type(
+ int32_t data_type)
+{
+ csi_mipi_packet_type_t packet_type;
+
+ packet_type = CSI_MIPI_PACKET_TYPE_RESERVED;
+
+ if (data_type >= 0 && data_type <= MIPI_FORMAT_2401_SHORT8)
+ packet_type = CSI_MIPI_PACKET_TYPE_SHORT;
+
+ if (data_type > MIPI_FORMAT_2401_SHORT8 && data_type <= N_MIPI_FORMAT_2401)
+ packet_type = CSI_MIPI_PACKET_TYPE_LONG;
+
+ return packet_type;
+}
+
+/* end of Private Methods */
diff --git a/drivers/staging/media/atomisp/pci/runtime/isys/src/virtual_isys.h b/drivers/staging/media/atomisp/pci/runtime/isys/src/virtual_isys.h
new file mode 100644
index 000000000000..a911766b7d49
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/runtime/isys/src/virtual_isys.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010 - 2015, Intel Corporation.
+ */
+
+#ifndef __VIRTUAL_ISYS_H_INCLUDED__
+#define __VIRTUAL_ISYS_H_INCLUDED__
+
+/* cmd for storing a number of packets indicated by reg _STREAM2MMIO_NUM_ITEMS*/
+#define _STREAM2MMIO_CMD_TOKEN_STORE_PACKETS 1
+
+/* command for waiting for a frame start */
+#define _STREAM2MMIO_CMD_TOKEN_SYNC_FRAME 2
+
+#endif /* __VIRTUAL_ISYS_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/runtime/pipeline/interface/ia_css_pipeline.h b/drivers/staging/media/atomisp/pci/runtime/pipeline/interface/ia_css_pipeline.h
new file mode 100644
index 000000000000..8b7cbf31a1a2
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/runtime/pipeline/interface/ia_css_pipeline.h
@@ -0,0 +1,272 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010 - 2015, Intel Corporation.
+ */
+
+#ifndef __IA_CSS_PIPELINE_H__
+#define __IA_CSS_PIPELINE_H__
+
+#include "sh_css_internal.h"
+#include "ia_css_pipe_public.h"
+#include "ia_css_pipeline_common.h"
+
+#define IA_CSS_PIPELINE_NUM_MAX (20)
+
+/* Pipeline stage to be executed on SP/ISP */
+struct ia_css_pipeline_stage {
+ unsigned int stage_num;
+ struct ia_css_binary *binary; /* built-in binary */
+ struct ia_css_binary_info *binary_info;
+ const struct ia_css_fw_info *firmware; /* acceleration binary */
+ /* SP function for SP stage */
+ enum ia_css_pipeline_stage_sp_func sp_func;
+ unsigned int max_input_width; /* For SP raw copy */
+ struct sh_css_binary_args args;
+ int mode;
+ bool out_frame_allocated[IA_CSS_BINARY_MAX_OUTPUT_PORTS];
+ bool vf_frame_allocated;
+ struct ia_css_pipeline_stage *next;
+ bool enable_zoom;
+};
+
+/* Pipeline of n stages to be executed on SP/ISP per stage */
+struct ia_css_pipeline {
+ enum ia_css_pipe_id pipe_id;
+ u8 pipe_num;
+ struct ia_css_pipeline_stage *stages;
+ struct ia_css_pipeline_stage *current_stage;
+ unsigned int num_stages;
+ struct ia_css_frame in_frame;
+ struct ia_css_frame out_frame[IA_CSS_PIPE_MAX_OUTPUT_STAGE];
+ struct ia_css_frame vf_frame[IA_CSS_PIPE_MAX_OUTPUT_STAGE];
+ unsigned int dvs_frame_delay;
+ unsigned int inout_port_config;
+ int num_execs;
+ bool acquire_isp_each_stage;
+};
+
+#define DEFAULT_PIPELINE { \
+ .pipe_id = IA_CSS_PIPE_ID_PREVIEW, \
+ .in_frame = DEFAULT_FRAME, \
+ .out_frame = {DEFAULT_FRAME}, \
+ .vf_frame = {DEFAULT_FRAME}, \
+ .dvs_frame_delay = IA_CSS_FRAME_DELAY_1, \
+ .num_execs = -1, \
+ .acquire_isp_each_stage = true, \
+}
+
+/* Stage descriptor used to create a new stage in the pipeline */
+struct ia_css_pipeline_stage_desc {
+ struct ia_css_binary *binary;
+ const struct ia_css_fw_info *firmware;
+ enum ia_css_pipeline_stage_sp_func sp_func;
+ unsigned int max_input_width;
+ unsigned int mode;
+ struct ia_css_frame *in_frame;
+ struct ia_css_frame *out_frame[IA_CSS_BINARY_MAX_OUTPUT_PORTS];
+ struct ia_css_frame *vf_frame;
+};
+
+/* @brief initialize the pipeline module
+ *
+ * @return None
+ *
+ * Initializes the pipeline module. This API has to be called
+ * before any operation on the pipeline module is done
+ */
+void ia_css_pipeline_init(void);
+
+/* @brief initialize the pipeline structure with default values
+ *
+ * @param[out] pipeline structure to be initialized with defaults
+ * @param[in] pipe_id
+ * @param[in] pipe_num Number that uniquely identifies a pipeline.
+ * @return 0 or error code upon error.
+ *
+ * Initializes the pipeline structure with a set of default values.
+ * This API is expected to be used when a pipeline structure is allocated
+ * externally and needs sane defaults
+ */
+int ia_css_pipeline_create(
+ struct ia_css_pipeline *pipeline,
+ enum ia_css_pipe_id pipe_id,
+ unsigned int pipe_num,
+ unsigned int dvs_frame_delay);
+
+/* @brief destroy a pipeline
+ *
+ * @param[in] pipeline
+ * @return None
+ *
+ */
+void ia_css_pipeline_destroy(struct ia_css_pipeline *pipeline);
+
+/* @brief Starts a pipeline
+ *
+ * @param[in] pipe_id
+ * @param[in] pipeline
+ * @return None
+ *
+ */
+void ia_css_pipeline_start(enum ia_css_pipe_id pipe_id,
+ struct ia_css_pipeline *pipeline);
+
+/* @brief Request to stop a pipeline
+ *
+ * @param[in] pipeline
+ * @return 0 or error code upon error.
+ *
+ */
+int ia_css_pipeline_request_stop(struct ia_css_pipeline *pipeline);
+
+/* @brief Check whether pipeline has stopped
+ *
+ * @param[in] pipeline
+ * @return true if the pipeline has stopped
+ *
+ */
+bool ia_css_pipeline_has_stopped(struct ia_css_pipeline *pipe);
+
+/* @brief clean all the stages pipeline and make it as new
+ *
+ * @param[in] pipeline
+ * @return None
+ *
+ */
+void ia_css_pipeline_clean(struct ia_css_pipeline *pipeline);
+
+/* @brief Add a stage to pipeline.
+ *
+ * @param pipeline Pointer to the pipeline to be added to.
+ * @param[in] stage_desc The description of the stage
+ * @param[out] stage The successor of the stage.
+ * @return 0 or error code upon error.
+ *
+ * Add a new stage to a non-NULL pipeline.
+ * The stage consists of an ISP binary or firmware and input and output
+ * arguments.
+*/
+int ia_css_pipeline_create_and_add_stage(
+ struct ia_css_pipeline *pipeline,
+ struct ia_css_pipeline_stage_desc *stage_desc,
+ struct ia_css_pipeline_stage **stage);
+
+/* @brief Finalize the stages in a pipeline
+ *
+ * @param pipeline Pointer to the pipeline to be added to.
+ * @return None
+ *
+ * This API is expected to be called after adding all stages
+*/
+void ia_css_pipeline_finalize_stages(struct ia_css_pipeline *pipeline,
+ bool continuous);
+
+/* @brief gets a stage from the pipeline
+ *
+ * @param[in] pipeline
+ * @return 0 or error code upon error.
+ *
+ */
+int ia_css_pipeline_get_stage(struct ia_css_pipeline *pipeline,
+ int mode,
+ struct ia_css_pipeline_stage **stage);
+
+/* @brief Gets a pipeline stage corresponding Firmware handle from the pipeline
+ *
+ * @param[in] pipeline
+ * @param[in] fw_handle
+ * @param[out] stage Pointer to Stage
+ *
+ * @return 0 or error code upon error.
+ *
+ */
+int ia_css_pipeline_get_stage_from_fw(struct ia_css_pipeline
+ *pipeline,
+ u32 fw_handle,
+ struct ia_css_pipeline_stage **stage);
+
+/* @brief Gets the Firmware handle corresponding the stage num from the pipeline
+ *
+ * @param[in] pipeline
+ * @param[in] stage_num
+ * @param[out] fw_handle
+ *
+ * @return 0 or error code upon error.
+ *
+ */
+int ia_css_pipeline_get_fw_from_stage(struct ia_css_pipeline
+ *pipeline,
+ u32 stage_num,
+ uint32_t *fw_handle);
+
+/* @brief gets the output stage from the pipeline
+ *
+ * @param[in] pipeline
+ * @return 0 or error code upon error.
+ *
+ */
+int ia_css_pipeline_get_output_stage(
+ struct ia_css_pipeline *pipeline,
+ int mode,
+ struct ia_css_pipeline_stage **stage);
+
+/* @brief Checks whether the pipeline uses params
+ *
+ * @param[in] pipeline
+ * @return true if the pipeline uses params
+ *
+ */
+bool ia_css_pipeline_uses_params(struct ia_css_pipeline *pipeline);
+
+/**
+ * @brief get the SP thread ID.
+ *
+ * @param[in] key The query key, typical use is pipe_num.
+ * @param[out] val The query value.
+ *
+ * @return
+ * true, if the query succeeds;
+ * false, if the query fails.
+ */
+bool ia_css_pipeline_get_sp_thread_id(unsigned int key, unsigned int *val);
+
+/**
+ * @brief Get the pipeline io status
+ *
+ * @param[in] None
+ * @return
+ * Pointer to pipe_io_status
+ */
+struct sh_css_sp_pipeline_io_status *ia_css_pipeline_get_pipe_io_status(void);
+
+/**
+ * @brief Map an SP thread to this pipeline
+ *
+ * @param[in] pipe_num
+ * @param[in] map true for mapping and false for unmapping sp threads.
+ *
+ */
+void ia_css_pipeline_map(unsigned int pipe_num, bool map);
+
+/**
+ * @brief Checks whether the pipeline is mapped to SP threads
+ *
+ * @param[in] Query key, typical use is pipe_num
+ *
+ * return
+ * true, pipeline is mapped to SP threads
+ * false, pipeline is not mapped to SP threads
+ */
+bool ia_css_pipeline_is_mapped(unsigned int key);
+
+/**
+ * @brief Print pipeline thread mapping
+ *
+ * @param[in] none
+ *
+ * return none
+ */
+void ia_css_pipeline_dump_thread_map_info(void);
+
+#endif /*__IA_CSS_PIPELINE_H__*/
diff --git a/drivers/staging/media/atomisp/pci/runtime/pipeline/interface/ia_css_pipeline_common.h b/drivers/staging/media/atomisp/pci/runtime/pipeline/interface/ia_css_pipeline_common.h
new file mode 100644
index 000000000000..dff453b37db1
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/runtime/pipeline/interface/ia_css_pipeline_common.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010 - 2015, Intel Corporation.
+ */
+
+#ifndef __IA_CSS_PIPELINE_COMMON_H__
+#define __IA_CSS_PIPELINE_COMMON_H__
+
+enum ia_css_pipeline_stage_sp_func {
+ IA_CSS_PIPELINE_RAW_COPY = 0,
+ IA_CSS_PIPELINE_BIN_COPY = 1,
+ IA_CSS_PIPELINE_ISYS_COPY = 2,
+ IA_CSS_PIPELINE_NO_FUNC = 3,
+};
+
+#define IA_CSS_PIPELINE_NUM_STAGE_FUNCS 3
+
+#endif /*__IA_CSS_PIPELINE_COMMON_H__*/
diff --git a/drivers/staging/media/atomisp/pci/runtime/pipeline/src/pipeline.c b/drivers/staging/media/atomisp/pci/runtime/pipeline/src/pipeline.c
new file mode 100644
index 000000000000..cb8d652227a7
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/runtime/pipeline/src/pipeline.c
@@ -0,0 +1,770 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010 - 2015, Intel Corporation.
+ */
+
+#include "hmm.h"
+
+#include "ia_css_debug.h"
+#include "sw_event_global.h" /* encode_sw_event */
+#include "sp.h" /* cnd_sp_irq_enable() */
+#include "assert_support.h"
+#include "sh_css_sp.h"
+#include "ia_css_pipeline.h"
+#include "ia_css_isp_param.h"
+#include "ia_css_bufq.h"
+
+#define PIPELINE_NUM_UNMAPPED (~0U)
+#define PIPELINE_SP_THREAD_EMPTY_TOKEN (0x0)
+#define PIPELINE_SP_THREAD_RESERVED_TOKEN (0x1)
+
+/*******************************************************
+*** Static variables
+********************************************************/
+static unsigned int pipeline_num_to_sp_thread_map[IA_CSS_PIPELINE_NUM_MAX];
+static unsigned int pipeline_sp_thread_list[SH_CSS_MAX_SP_THREADS];
+
+/*******************************************************
+*** Static functions
+********************************************************/
+static void pipeline_init_sp_thread_map(void);
+static void pipeline_map_num_to_sp_thread(unsigned int pipe_num);
+static void pipeline_unmap_num_to_sp_thread(unsigned int pipe_num);
+static void pipeline_init_defaults(
+ struct ia_css_pipeline *pipeline,
+ enum ia_css_pipe_id pipe_id,
+ unsigned int pipe_num,
+ unsigned int dvs_frame_delay);
+
+static void pipeline_stage_destroy(struct ia_css_pipeline_stage *stage);
+static int pipeline_stage_create(
+ struct ia_css_pipeline_stage_desc *stage_desc,
+ struct ia_css_pipeline_stage **new_stage);
+static void ia_css_pipeline_set_zoom_stage(struct ia_css_pipeline *pipeline);
+static void ia_css_pipeline_configure_inout_port(struct ia_css_pipeline *me,
+ bool continuous);
+
+/*******************************************************
+*** Public functions
+********************************************************/
+void ia_css_pipeline_init(void)
+{
+ pipeline_init_sp_thread_map();
+}
+
+int ia_css_pipeline_create(
+ struct ia_css_pipeline *pipeline,
+ enum ia_css_pipe_id pipe_id,
+ unsigned int pipe_num,
+ unsigned int dvs_frame_delay)
+{
+ assert(pipeline);
+ IA_CSS_ENTER_PRIVATE("pipeline = %p, pipe_id = %d, pipe_num = %d, dvs_frame_delay = %d",
+ pipeline, pipe_id, pipe_num, dvs_frame_delay);
+ if (!pipeline) {
+ IA_CSS_LEAVE_ERR_PRIVATE(-EINVAL);
+ return -EINVAL;
+ }
+
+ pipeline_init_defaults(pipeline, pipe_id, pipe_num, dvs_frame_delay);
+
+ IA_CSS_LEAVE_ERR_PRIVATE(0);
+ return 0;
+}
+
+void ia_css_pipeline_map(unsigned int pipe_num, bool map)
+{
+ assert(pipe_num < IA_CSS_PIPELINE_NUM_MAX);
+ IA_CSS_ENTER_PRIVATE("pipe_num = %d, map = %d", pipe_num, map);
+
+ if (pipe_num >= IA_CSS_PIPELINE_NUM_MAX) {
+ IA_CSS_ERROR("Invalid pipe number");
+ IA_CSS_LEAVE_PRIVATE("void");
+ return;
+ }
+ if (map)
+ pipeline_map_num_to_sp_thread(pipe_num);
+ else
+ pipeline_unmap_num_to_sp_thread(pipe_num);
+ IA_CSS_LEAVE_PRIVATE("void");
+}
+
+/* @brief destroy a pipeline
+ *
+ * @param[in] pipeline
+ * @return None
+ *
+ */
+void ia_css_pipeline_destroy(struct ia_css_pipeline *pipeline)
+{
+ assert(pipeline);
+ IA_CSS_ENTER_PRIVATE("pipeline = %p", pipeline);
+
+ if (!pipeline) {
+ IA_CSS_ERROR("NULL input parameter");
+ IA_CSS_LEAVE_PRIVATE("void");
+ return;
+ }
+
+ IA_CSS_LOG("pipe_num = %d", pipeline->pipe_num);
+
+ /* Free the pipeline number */
+ ia_css_pipeline_clean(pipeline);
+
+ IA_CSS_LEAVE_PRIVATE("void");
+}
+
+/* Run a pipeline and wait till it completes. */
+void ia_css_pipeline_start(enum ia_css_pipe_id pipe_id,
+ struct ia_css_pipeline *pipeline)
+{
+ u8 pipe_num = 0;
+ unsigned int thread_id;
+
+ assert(pipeline);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_pipeline_start() enter: pipe_id=%d, pipeline=%p\n",
+ pipe_id, pipeline);
+ pipeline->pipe_id = pipe_id;
+ sh_css_sp_init_pipeline(pipeline, pipe_id, pipe_num,
+ false, false, false, true, SH_CSS_BDS_FACTOR_1_00,
+ SH_CSS_PIPE_CONFIG_OVRD_NO_OVRD,
+ IA_CSS_INPUT_MODE_MEMORY, NULL, NULL,
+ (enum mipi_port_id)0);
+
+ ia_css_pipeline_get_sp_thread_id(pipe_num, &thread_id);
+ if (!sh_css_sp_is_running()) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_pipeline_start() error,leaving\n");
+ /* queues are invalid*/
+ return;
+ }
+ ia_css_bufq_enqueue_psys_event(IA_CSS_PSYS_SW_EVENT_START_STREAM,
+ (uint8_t)thread_id,
+ 0,
+ 0);
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_pipeline_start() leave: return_void\n");
+}
+
+/*
+ * @brief Query the SP thread ID.
+ * Refer to "sh_css_internal.h" for details.
+ */
+bool ia_css_pipeline_get_sp_thread_id(unsigned int key, unsigned int *val)
+{
+ IA_CSS_ENTER("key=%d, val=%p", key, val);
+
+ if ((!val) || (key >= IA_CSS_PIPELINE_NUM_MAX) || (key >= IA_CSS_PIPE_ID_NUM)) {
+ IA_CSS_LEAVE("return value = false");
+ return false;
+ }
+
+ *val = pipeline_num_to_sp_thread_map[key];
+
+ if (*val == (unsigned int)PIPELINE_NUM_UNMAPPED) {
+ IA_CSS_LOG("unmapped pipeline number");
+ IA_CSS_LEAVE("return value = false");
+ return false;
+ }
+ IA_CSS_LEAVE("return value = true");
+ return true;
+}
+
+void ia_css_pipeline_dump_thread_map_info(void)
+{
+ unsigned int i;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "pipeline_num_to_sp_thread_map:\n");
+ for (i = 0; i < IA_CSS_PIPELINE_NUM_MAX; i++) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "pipe_num: %u, tid: 0x%x\n", i, pipeline_num_to_sp_thread_map[i]);
+ }
+}
+
+int ia_css_pipeline_request_stop(struct ia_css_pipeline *pipeline)
+{
+ int err = 0;
+ unsigned int thread_id;
+
+ assert(pipeline);
+
+ if (!pipeline)
+ return -EINVAL;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_pipeline_request_stop() enter: pipeline=%p\n",
+ pipeline);
+
+ /* Send stop event to the sp*/
+ /* This needs improvement, stop on all the pipes available
+ * in the stream*/
+ ia_css_pipeline_get_sp_thread_id(pipeline->pipe_num, &thread_id);
+ if (!sh_css_sp_is_running()) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_pipeline_request_stop() leaving\n");
+ /* queues are invalid */
+ return -EBUSY;
+ }
+ ia_css_bufq_enqueue_psys_event(IA_CSS_PSYS_SW_EVENT_STOP_STREAM,
+ (uint8_t)thread_id,
+ 0,
+ 0);
+ sh_css_sp_uninit_pipeline(pipeline->pipe_num);
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_pipeline_request_stop() leave: return_err=%d\n",
+ err);
+ return err;
+}
+
+void ia_css_pipeline_clean(struct ia_css_pipeline *pipeline)
+{
+ struct ia_css_pipeline_stage *s;
+
+ assert(pipeline);
+ IA_CSS_ENTER_PRIVATE("pipeline = %p", pipeline);
+
+ if (!pipeline) {
+ IA_CSS_ERROR("NULL input parameter");
+ IA_CSS_LEAVE_PRIVATE("void");
+ return;
+ }
+ s = pipeline->stages;
+
+ while (s) {
+ struct ia_css_pipeline_stage *next = s->next;
+
+ pipeline_stage_destroy(s);
+ s = next;
+ }
+ pipeline_init_defaults(pipeline, pipeline->pipe_id, pipeline->pipe_num,
+ pipeline->dvs_frame_delay);
+
+ IA_CSS_LEAVE_PRIVATE("void");
+}
+
+/* @brief Add a stage to pipeline.
+ *
+ * @param pipeline Pointer to the pipeline to be added to.
+ * @param[in] stage_desc The description of the stage
+ * @param[out] stage The successor of the stage.
+ * @return 0 or error code upon error.
+ *
+ * Add a new stage to a non-NULL pipeline.
+ * The stage consists of an ISP binary or firmware and input and
+ * output arguments.
+*/
+int ia_css_pipeline_create_and_add_stage(
+ struct ia_css_pipeline *pipeline,
+ struct ia_css_pipeline_stage_desc *stage_desc,
+ struct ia_css_pipeline_stage **stage)
+{
+ struct ia_css_pipeline_stage *last, *new_stage = NULL;
+ int err;
+
+ /* other arguments can be NULL */
+ assert(pipeline);
+ assert(stage_desc);
+ last = pipeline->stages;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_pipeline_create_and_add_stage() enter:\n");
+ if (!stage_desc->binary && !stage_desc->firmware
+ && (stage_desc->sp_func == IA_CSS_PIPELINE_NO_FUNC)) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_pipeline_create_and_add_stage() done: Invalid args\n");
+
+ return -EINVAL;
+ }
+
+ /* Find the last stage */
+ while (last && last->next)
+ last = last->next;
+
+ /* if in_frame is not set, we use the out_frame from the previous
+ * stage, if no previous stage, it's an error.
+ */
+ if ((stage_desc->sp_func == IA_CSS_PIPELINE_NO_FUNC)
+ && (!stage_desc->in_frame)
+ && (!stage_desc->firmware)
+ && (!stage_desc->binary->online)) {
+ /* Do this only for ISP stages*/
+ if (last && last->args.out_frame[0])
+ stage_desc->in_frame = last->args.out_frame[0];
+
+ if (!stage_desc->in_frame)
+ return -EINVAL;
+ }
+
+ /* Create the new stage */
+ err = pipeline_stage_create(stage_desc, &new_stage);
+ if (err) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_pipeline_create_and_add_stage() done: stage_create_failed\n");
+ return err;
+ }
+
+ if (last)
+ last->next = new_stage;
+ else
+ pipeline->stages = new_stage;
+
+ /* Output the new stage */
+ if (stage)
+ *stage = new_stage;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_pipeline_create_and_add_stage() done:\n");
+ return 0;
+}
+
+void ia_css_pipeline_finalize_stages(struct ia_css_pipeline *pipeline,
+ bool continuous)
+{
+ unsigned int i = 0;
+ struct ia_css_pipeline_stage *stage;
+
+ assert(pipeline);
+ for (stage = pipeline->stages; stage; stage = stage->next) {
+ stage->stage_num = i;
+ i++;
+ }
+ pipeline->num_stages = i;
+
+ ia_css_pipeline_set_zoom_stage(pipeline);
+ ia_css_pipeline_configure_inout_port(pipeline, continuous);
+}
+
+int ia_css_pipeline_get_stage(struct ia_css_pipeline *pipeline,
+ int mode,
+ struct ia_css_pipeline_stage **stage)
+{
+ struct ia_css_pipeline_stage *s;
+
+ assert(pipeline);
+ assert(stage);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_pipeline_get_stage() enter:\n");
+ for (s = pipeline->stages; s; s = s->next) {
+ if (s->mode == mode) {
+ *stage = s;
+ return 0;
+ }
+ }
+ return -EINVAL;
+}
+
+int ia_css_pipeline_get_stage_from_fw(struct ia_css_pipeline
+ *pipeline,
+ u32 fw_handle,
+ struct ia_css_pipeline_stage **stage)
+{
+ struct ia_css_pipeline_stage *s;
+
+ assert(pipeline);
+ assert(stage);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "%s()\n", __func__);
+ for (s = pipeline->stages; s; s = s->next) {
+ if ((s->firmware) && (s->firmware->handle == fw_handle)) {
+ *stage = s;
+ return 0;
+ }
+ }
+ return -EINVAL;
+}
+
+int ia_css_pipeline_get_fw_from_stage(struct ia_css_pipeline
+ *pipeline,
+ u32 stage_num,
+ uint32_t *fw_handle)
+{
+ struct ia_css_pipeline_stage *s;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "%s()\n", __func__);
+ if ((!pipeline) || (!fw_handle))
+ return -EINVAL;
+
+ for (s = pipeline->stages; s; s = s->next) {
+ if ((s->stage_num == stage_num) && (s->firmware)) {
+ *fw_handle = s->firmware->handle;
+ return 0;
+ }
+ }
+ return -EINVAL;
+}
+
+int ia_css_pipeline_get_output_stage(
+ struct ia_css_pipeline *pipeline,
+ int mode,
+ struct ia_css_pipeline_stage **stage)
+{
+ struct ia_css_pipeline_stage *s;
+
+ assert(pipeline);
+ assert(stage);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_pipeline_get_output_stage() enter:\n");
+
+ *stage = NULL;
+ /* First find acceleration firmware at end of pipe */
+ for (s = pipeline->stages; s; s = s->next) {
+ if (s->firmware && s->mode == mode &&
+ s->firmware->info.isp.sp.enable.output)
+ *stage = s;
+ }
+ if (*stage)
+ return 0;
+ /* If no firmware, find binary in pipe */
+ return ia_css_pipeline_get_stage(pipeline, mode, stage);
+}
+
+bool ia_css_pipeline_has_stopped(struct ia_css_pipeline *pipeline)
+{
+ /* Android compilation files if made an local variable
+ stack size on android is limited to 2k and this structure
+ is around 2.5K, in place of static malloc can be done but
+ if this call is made too often it will lead to fragment memory
+ versus a fixed allocation */
+ static struct sh_css_sp_group sp_group;
+ unsigned int thread_id;
+ const struct ia_css_fw_info *fw;
+ unsigned int HIVE_ADDR_sp_group;
+
+ fw = &sh_css_sp_fw;
+ HIVE_ADDR_sp_group = fw->info.sp.group;
+
+ ia_css_pipeline_get_sp_thread_id(pipeline->pipe_num, &thread_id);
+ sp_dmem_load(SP0_ID,
+ (unsigned int)sp_address_of(sp_group),
+ &sp_group, sizeof(struct sh_css_sp_group));
+ return sp_group.pipe[thread_id].num_stages == 0;
+}
+
+struct sh_css_sp_pipeline_io_status *ia_css_pipeline_get_pipe_io_status(void)
+{
+ return(&sh_css_sp_group.pipe_io_status);
+}
+
+bool ia_css_pipeline_is_mapped(unsigned int key)
+{
+ bool ret = false;
+
+ IA_CSS_ENTER_PRIVATE("key = %d", key);
+
+ if ((key >= IA_CSS_PIPELINE_NUM_MAX) || (key >= IA_CSS_PIPE_ID_NUM)) {
+ IA_CSS_ERROR("Invalid key!!");
+ IA_CSS_LEAVE_PRIVATE("return = %d", false);
+ return false;
+ }
+
+ ret = (bool)(pipeline_num_to_sp_thread_map[key] != (unsigned int)
+ PIPELINE_NUM_UNMAPPED);
+
+ IA_CSS_LEAVE_PRIVATE("return = %d", ret);
+ return ret;
+}
+
+/*******************************************************
+*** Static functions
+********************************************************/
+
+/* Pipeline:
+ * To organize the several different binaries for each type of mode,
+ * we use a pipeline. A pipeline contains a number of stages, each with
+ * their own binary and frame pointers.
+ * When stages are added to a pipeline, output frames that are not passed
+ * from outside are automatically allocated.
+ * When input frames are not passed from outside, each stage will use the
+ * output frame of the previous stage as input (the full resolution output,
+ * not the viewfinder output).
+ * Pipelines must be cleaned and re-created when settings of the binaries
+ * change.
+ */
+static void pipeline_stage_destroy(struct ia_css_pipeline_stage *stage)
+{
+ unsigned int i;
+
+ for (i = 0; i < IA_CSS_BINARY_MAX_OUTPUT_PORTS; i++) {
+ if (stage->out_frame_allocated[i]) {
+ ia_css_frame_free(stage->args.out_frame[i]);
+ stage->args.out_frame[i] = NULL;
+ }
+ }
+ if (stage->vf_frame_allocated) {
+ ia_css_frame_free(stage->args.out_vf_frame);
+ stage->args.out_vf_frame = NULL;
+ }
+ kvfree(stage);
+}
+
+static void pipeline_init_sp_thread_map(void)
+{
+ unsigned int i;
+
+ for (i = 1; i < SH_CSS_MAX_SP_THREADS; i++)
+ pipeline_sp_thread_list[i] = PIPELINE_SP_THREAD_EMPTY_TOKEN;
+
+ for (i = 0; i < IA_CSS_PIPELINE_NUM_MAX; i++)
+ pipeline_num_to_sp_thread_map[i] = PIPELINE_NUM_UNMAPPED;
+}
+
+static void pipeline_map_num_to_sp_thread(unsigned int pipe_num)
+{
+ unsigned int i;
+ bool found_sp_thread = false;
+
+ /* pipe is not mapped to any thread */
+ assert(pipeline_num_to_sp_thread_map[pipe_num]
+ == (unsigned int)PIPELINE_NUM_UNMAPPED);
+
+ for (i = 0; i < SH_CSS_MAX_SP_THREADS; i++) {
+ if (pipeline_sp_thread_list[i] ==
+ PIPELINE_SP_THREAD_EMPTY_TOKEN) {
+ pipeline_sp_thread_list[i] =
+ PIPELINE_SP_THREAD_RESERVED_TOKEN;
+ pipeline_num_to_sp_thread_map[pipe_num] = i;
+ found_sp_thread = true;
+ break;
+ }
+ }
+
+ /* Make sure a mapping is found */
+ /* I could do:
+ assert(i < SH_CSS_MAX_SP_THREADS);
+
+ But the below is more descriptive.
+ */
+ assert(found_sp_thread);
+}
+
+static void pipeline_unmap_num_to_sp_thread(unsigned int pipe_num)
+{
+ unsigned int thread_id;
+
+ assert(pipeline_num_to_sp_thread_map[pipe_num]
+ != (unsigned int)PIPELINE_NUM_UNMAPPED);
+
+ thread_id = pipeline_num_to_sp_thread_map[pipe_num];
+ pipeline_num_to_sp_thread_map[pipe_num] = PIPELINE_NUM_UNMAPPED;
+ pipeline_sp_thread_list[thread_id] = PIPELINE_SP_THREAD_EMPTY_TOKEN;
+}
+
+static int pipeline_stage_create(
+ struct ia_css_pipeline_stage_desc *stage_desc,
+ struct ia_css_pipeline_stage **new_stage)
+{
+ int err = 0;
+ struct ia_css_pipeline_stage *stage = NULL;
+ struct ia_css_binary *binary;
+ struct ia_css_frame *vf_frame;
+ struct ia_css_frame *out_frame[IA_CSS_BINARY_MAX_OUTPUT_PORTS];
+ const struct ia_css_fw_info *firmware;
+ unsigned int i;
+
+ /* Verify input parameters*/
+ if (!(stage_desc->in_frame) && !(stage_desc->firmware)
+ && (stage_desc->binary) && !(stage_desc->binary->online)) {
+ err = -EINVAL;
+ goto ERR;
+ }
+
+ binary = stage_desc->binary;
+ firmware = stage_desc->firmware;
+ vf_frame = stage_desc->vf_frame;
+ for (i = 0; i < IA_CSS_BINARY_MAX_OUTPUT_PORTS; i++) {
+ out_frame[i] = stage_desc->out_frame[i];
+ }
+
+ stage = kvzalloc(sizeof(*stage), GFP_KERNEL);
+ if (!stage) {
+ err = -ENOMEM;
+ goto ERR;
+ }
+
+ if (firmware) {
+ stage->binary = NULL;
+ stage->binary_info =
+ (struct ia_css_binary_info *)&firmware->info.isp;
+ } else {
+ stage->binary = binary;
+ if (binary)
+ stage->binary_info =
+ (struct ia_css_binary_info *)binary->info;
+ else
+ stage->binary_info = NULL;
+ }
+
+ stage->firmware = firmware;
+ stage->sp_func = stage_desc->sp_func;
+ stage->max_input_width = stage_desc->max_input_width;
+ stage->mode = stage_desc->mode;
+ for (i = 0; i < IA_CSS_BINARY_MAX_OUTPUT_PORTS; i++)
+ stage->out_frame_allocated[i] = false;
+ stage->vf_frame_allocated = false;
+ stage->next = NULL;
+ sh_css_binary_args_reset(&stage->args);
+
+ for (i = 0; i < IA_CSS_BINARY_MAX_OUTPUT_PORTS; i++) {
+ if (!(out_frame[i]) && (binary)
+ && (binary->out_frame_info[i].res.width)) {
+ err = ia_css_frame_allocate_from_info(&out_frame[i],
+ &binary->out_frame_info[i]);
+ if (err)
+ goto ERR;
+ stage->out_frame_allocated[i] = true;
+ }
+ }
+ /* VF frame is not needed in case of need_pp
+ However, the capture binary needs a vf frame to write to.
+ */
+ if (!vf_frame) {
+ if ((binary && binary->vf_frame_info.res.width) ||
+ (firmware && firmware->info.isp.sp.enable.vf_veceven)
+ ) {
+ err = ia_css_frame_allocate_from_info(&vf_frame,
+ &binary->vf_frame_info);
+ if (err)
+ goto ERR;
+ stage->vf_frame_allocated = true;
+ }
+ } else if (vf_frame && binary && binary->vf_frame_info.res.width
+ && !firmware) {
+ /* only mark as allocated if buffer pointer available */
+ if (vf_frame->data != mmgr_NULL)
+ stage->vf_frame_allocated = true;
+ }
+
+ stage->args.in_frame = stage_desc->in_frame;
+ for (i = 0; i < IA_CSS_BINARY_MAX_OUTPUT_PORTS; i++)
+ stage->args.out_frame[i] = out_frame[i];
+ stage->args.out_vf_frame = vf_frame;
+ *new_stage = stage;
+ return err;
+ERR:
+ if (stage)
+ pipeline_stage_destroy(stage);
+ return err;
+}
+
+static const struct ia_css_frame ia_css_default_frame = DEFAULT_FRAME;
+
+static void pipeline_init_defaults(
+ struct ia_css_pipeline *pipeline,
+ enum ia_css_pipe_id pipe_id,
+ unsigned int pipe_num,
+ unsigned int dvs_frame_delay)
+{
+ unsigned int i;
+
+ pipeline->pipe_id = pipe_id;
+ pipeline->stages = NULL;
+ pipeline->current_stage = NULL;
+
+ memcpy(&pipeline->in_frame, &ia_css_default_frame,
+ sizeof(ia_css_default_frame));
+
+ for (i = 0; i < IA_CSS_PIPE_MAX_OUTPUT_STAGE; i++) {
+ memcpy(&pipeline->out_frame[i], &ia_css_default_frame,
+ sizeof(ia_css_default_frame));
+ memcpy(&pipeline->vf_frame[i], &ia_css_default_frame,
+ sizeof(ia_css_default_frame));
+ }
+ pipeline->num_execs = -1;
+ pipeline->acquire_isp_each_stage = true;
+ pipeline->pipe_num = (uint8_t)pipe_num;
+ pipeline->dvs_frame_delay = dvs_frame_delay;
+}
+
+static void ia_css_pipeline_set_zoom_stage(struct ia_css_pipeline *pipeline)
+{
+ struct ia_css_pipeline_stage *stage = NULL;
+ int err;
+
+ assert(pipeline);
+ if (pipeline->pipe_id == IA_CSS_PIPE_ID_PREVIEW) {
+ /* in preview pipeline, vf_pp stage should do zoom */
+ err = ia_css_pipeline_get_stage(pipeline, IA_CSS_BINARY_MODE_VF_PP, &stage);
+ if (!err)
+ stage->enable_zoom = true;
+ } else if (pipeline->pipe_id == IA_CSS_PIPE_ID_CAPTURE) {
+ /* in capture pipeline, capture_pp stage should do zoom */
+ err = ia_css_pipeline_get_stage(pipeline, IA_CSS_BINARY_MODE_CAPTURE_PP,
+ &stage);
+ if (!err)
+ stage->enable_zoom = true;
+ } else if (pipeline->pipe_id == IA_CSS_PIPE_ID_VIDEO) {
+ /* in video pipeline, video stage should do zoom */
+ err = ia_css_pipeline_get_stage(pipeline, IA_CSS_BINARY_MODE_VIDEO, &stage);
+ if (!err)
+ stage->enable_zoom = true;
+ } else if (pipeline->pipe_id == IA_CSS_PIPE_ID_YUVPP) {
+ /* in yuvpp pipeline, first yuv_scaler stage should do zoom */
+ err = ia_css_pipeline_get_stage(pipeline, IA_CSS_BINARY_MODE_CAPTURE_PP,
+ &stage);
+ if (!err)
+ stage->enable_zoom = true;
+ }
+}
+
+static void
+ia_css_pipeline_configure_inout_port(struct ia_css_pipeline *me,
+ bool continuous)
+{
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_pipeline_configure_inout_port() enter: pipe_id(%d) continuous(%d)\n",
+ me->pipe_id, continuous);
+ switch (me->pipe_id) {
+ case IA_CSS_PIPE_ID_PREVIEW:
+ case IA_CSS_PIPE_ID_VIDEO:
+ SH_CSS_PIPE_PORT_CONFIG_SET(me->inout_port_config,
+ (uint8_t)SH_CSS_PORT_INPUT,
+ (uint8_t)(continuous ? SH_CSS_COPYSINK_TYPE : SH_CSS_HOST_TYPE), 1);
+ SH_CSS_PIPE_PORT_CONFIG_SET(me->inout_port_config,
+ (uint8_t)SH_CSS_PORT_OUTPUT,
+ (uint8_t)SH_CSS_HOST_TYPE, 1);
+ break;
+ case IA_CSS_PIPE_ID_COPY: /*Copy pipe ports configured to "offline" mode*/
+ SH_CSS_PIPE_PORT_CONFIG_SET(me->inout_port_config,
+ (uint8_t)SH_CSS_PORT_INPUT,
+ (uint8_t)SH_CSS_HOST_TYPE, 1);
+ if (continuous) {
+ SH_CSS_PIPE_PORT_CONFIG_SET(me->inout_port_config,
+ (uint8_t)SH_CSS_PORT_OUTPUT,
+ (uint8_t)SH_CSS_COPYSINK_TYPE, 1);
+ SH_CSS_PIPE_PORT_CONFIG_SET(me->inout_port_config,
+ (uint8_t)SH_CSS_PORT_OUTPUT,
+ (uint8_t)SH_CSS_TAGGERSINK_TYPE, 1);
+ } else {
+ SH_CSS_PIPE_PORT_CONFIG_SET(me->inout_port_config,
+ (uint8_t)SH_CSS_PORT_OUTPUT,
+ (uint8_t)SH_CSS_HOST_TYPE, 1);
+ }
+ break;
+ case IA_CSS_PIPE_ID_CAPTURE:
+ SH_CSS_PIPE_PORT_CONFIG_SET(me->inout_port_config,
+ (uint8_t)SH_CSS_PORT_INPUT,
+ (uint8_t)(continuous ? SH_CSS_TAGGERSINK_TYPE : SH_CSS_HOST_TYPE),
+ 1);
+ SH_CSS_PIPE_PORT_CONFIG_SET(me->inout_port_config,
+ (uint8_t)SH_CSS_PORT_OUTPUT,
+ (uint8_t)SH_CSS_HOST_TYPE, 1);
+ break;
+ case IA_CSS_PIPE_ID_YUVPP:
+ SH_CSS_PIPE_PORT_CONFIG_SET(me->inout_port_config,
+ (uint8_t)SH_CSS_PORT_INPUT,
+ (uint8_t)(SH_CSS_HOST_TYPE), 1);
+ SH_CSS_PIPE_PORT_CONFIG_SET(me->inout_port_config,
+ (uint8_t)SH_CSS_PORT_OUTPUT,
+ (uint8_t)SH_CSS_HOST_TYPE, 1);
+ break;
+ default:
+ break;
+ }
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_pipeline_configure_inout_port() leave: inout_port_config(%x)\n",
+ me->inout_port_config);
+}
diff --git a/drivers/staging/media/atomisp/pci/runtime/queue/interface/ia_css_queue.h b/drivers/staging/media/atomisp/pci/runtime/queue/interface/ia_css_queue.h
new file mode 100644
index 000000000000..c097d912d87f
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/runtime/queue/interface/ia_css_queue.h
@@ -0,0 +1,167 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010 - 2015, Intel Corporation.
+ */
+
+#ifndef __IA_CSS_QUEUE_H
+#define __IA_CSS_QUEUE_H
+
+#include <platform_support.h>
+#include <type_support.h>
+
+#include "ia_css_queue_comm.h"
+#include "../src/queue_access.h"
+
+/* Local Queue object descriptor */
+struct ia_css_queue_local {
+ ia_css_circbuf_desc_t *cb_desc; /*Circbuf desc for local queues*/
+ ia_css_circbuf_elem_t *cb_elems; /*Circbuf elements*/
+};
+
+typedef struct ia_css_queue_local ia_css_queue_local_t;
+
+/* Handle for queue object*/
+typedef struct ia_css_queue ia_css_queue_t;
+
+/*****************************************************************************
+ * Queue Public APIs
+ *****************************************************************************/
+/* @brief Initialize a local queue instance.
+ *
+ * @param[out] qhandle. Handle to queue instance for use with API
+ * @param[in] desc. Descriptor with queue properties filled-in
+ * @return 0 - Successful init of local queue instance.
+ * @return -EINVAL - Invalid argument.
+ *
+ */
+int ia_css_queue_local_init(
+ ia_css_queue_t *qhandle,
+ ia_css_queue_local_t *desc);
+
+/* @brief Initialize a remote queue instance
+ *
+ * @param[out] qhandle. Handle to queue instance for use with API
+ * @param[in] desc. Descriptor with queue properties filled-in
+ * @return 0 - Successful init of remote queue instance.
+ * @return -EINVAL - Invalid argument.
+ */
+int ia_css_queue_remote_init(
+ ia_css_queue_t *qhandle,
+ ia_css_queue_remote_t *desc);
+
+/* @brief Uninitialize a queue instance
+ *
+ * @param[in] qhandle. Handle to queue instance
+ * @return 0 - Successful uninit.
+ *
+ */
+int ia_css_queue_uninit(
+ ia_css_queue_t *qhandle);
+
+/* @brief Enqueue an item in the queue instance
+ *
+ * @param[in] qhandle. Handle to queue instance
+ * @param[in] item. Object to be enqueued.
+ * @return 0 - Successful enqueue.
+ * @return -EINVAL - Invalid argument.
+ * @return -ENOBUFS - Queue is full.
+ *
+ */
+int ia_css_queue_enqueue(
+ ia_css_queue_t *qhandle,
+ uint32_t item);
+
+/* @brief Dequeue an item from the queue instance
+ *
+ * @param[in] qhandle. Handle to queue instance
+ * @param[out] item. Object to be dequeued into this item.
+
+ * @return 0 - Successful dequeue.
+ * @return -EINVAL - Invalid argument.
+ * @return -ENODATA - Queue is empty.
+ *
+ */
+int ia_css_queue_dequeue(
+ ia_css_queue_t *qhandle,
+ uint32_t *item);
+
+/* @brief Check if the queue is empty
+ *
+ * @param[in] qhandle. Handle to queue instance
+ * @param[in] is_empty True if empty, False if not.
+ * @return 0 - Successful access state.
+ * @return -EINVAL - Invalid argument.
+ * @return -ENOSYS - Function not implemented.
+ *
+ */
+int ia_css_queue_is_empty(
+ ia_css_queue_t *qhandle,
+ bool *is_empty);
+
+/* @brief Check if the queue is full
+ *
+ * @param[in] qhandle. Handle to queue instance
+ * @param[in] is_full True if Full, False if not.
+ * @return 0 - Successfully access state.
+ * @return -EINVAL - Invalid argument.
+ * @return -ENOSYS - Function not implemented.
+ *
+ */
+int ia_css_queue_is_full(
+ ia_css_queue_t *qhandle,
+ bool *is_full);
+
+/* @brief Get used space in the queue
+ *
+ * @param[in] qhandle. Handle to queue instance
+ * @param[in] size Number of available elements in the queue
+ * @return 0 - Successfully access state.
+ * @return -EINVAL - Invalid argument.
+ *
+ */
+int ia_css_queue_get_used_space(
+ ia_css_queue_t *qhandle,
+ uint32_t *size);
+
+/* @brief Get free space in the queue
+ *
+ * @param[in] qhandle. Handle to queue instance
+ * @param[in] size Number of free elements in the queue
+ * @return 0 - Successfully access state.
+ * @return -EINVAL - Invalid argument.
+ *
+ */
+int ia_css_queue_get_free_space(
+ ia_css_queue_t *qhandle,
+ uint32_t *size);
+
+/* @brief Peek at an element in the queue
+ *
+ * @param[in] qhandle. Handle to queue instance
+ * @param[in] offset Offset of element to peek,
+ * starting from head of queue
+ * @param[in] element Value of element returned
+ * @return 0 - Successfully access state.
+ * @return -EINVAL - Invalid argument.
+ *
+ */
+int ia_css_queue_peek(
+ ia_css_queue_t *qhandle,
+ u32 offset,
+ uint32_t *element);
+
+/* @brief Get the usable size for the queue
+ *
+ * @param[in] qhandle. Handle to queue instance
+ * @param[out] size Size value to be returned here.
+ * @return 0 - Successful get size.
+ * @return -EINVAL - Invalid argument.
+ * @return -ENOSYS - Function not implemented.
+ *
+ */
+int ia_css_queue_get_size(
+ ia_css_queue_t *qhandle,
+ uint32_t *size);
+
+#endif /* __IA_CSS_QUEUE_H */
diff --git a/drivers/staging/media/atomisp/pci/runtime/queue/interface/ia_css_queue_comm.h b/drivers/staging/media/atomisp/pci/runtime/queue/interface/ia_css_queue_comm.h
new file mode 100644
index 000000000000..0bed6425a07f
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/runtime/queue/interface/ia_css_queue_comm.h
@@ -0,0 +1,45 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010 - 2015, Intel Corporation.
+ */
+
+#ifndef __IA_CSS_QUEUE_COMM_H
+#define __IA_CSS_QUEUE_COMM_H
+
+#include "type_support.h"
+#include "ia_css_circbuf.h"
+/*****************************************************************************
+ * Queue Public Data Structures
+ *****************************************************************************/
+
+/* Queue location specifier */
+/* Avoiding enums to save space */
+#define IA_CSS_QUEUE_LOC_HOST 0
+#define IA_CSS_QUEUE_LOC_SP 1
+#define IA_CSS_QUEUE_LOC_ISP 2
+
+/* Queue type specifier */
+/* Avoiding enums to save space */
+#define IA_CSS_QUEUE_TYPE_LOCAL 0
+#define IA_CSS_QUEUE_TYPE_REMOTE 1
+
+/* for DDR Allocated queues,
+allocate minimum these many elements.
+DDR->SP' DMEM DMA transfer needs 32byte aligned address.
+Since each element size is 4 bytes, 8 elements need to be
+DMAed to access single element.*/
+#define IA_CSS_MIN_ELEM_COUNT 8
+#define IA_CSS_DMA_XFER_MASK (IA_CSS_MIN_ELEM_COUNT - 1)
+
+/* Remote Queue object descriptor */
+struct ia_css_queue_remote {
+ u32 cb_desc_addr; /*Circbuf desc address for remote queues*/
+ u32 cb_elems_addr; /*Circbuf elements addr for remote queue*/
+ u8 location; /* Cell location for queue */
+ u8 proc_id; /* Processor id for queue access */
+};
+
+typedef struct ia_css_queue_remote ia_css_queue_remote_t;
+
+#endif /* __IA_CSS_QUEUE_COMM_H */
diff --git a/drivers/staging/media/atomisp/pci/runtime/queue/src/queue.c b/drivers/staging/media/atomisp/pci/runtime/queue/src/queue.c
new file mode 100644
index 000000000000..afe77d4373f8
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/runtime/queue/src/queue.c
@@ -0,0 +1,392 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#include "ia_css_queue.h"
+#include <math_support.h>
+#include <ia_css_circbuf.h>
+#include <ia_css_circbuf_desc.h>
+#include "queue_access.h"
+
+/*****************************************************************************
+ * Queue Public APIs
+ *****************************************************************************/
+int ia_css_queue_local_init(ia_css_queue_t *qhandle, ia_css_queue_local_t *desc)
+{
+ if (NULL == qhandle || NULL == desc
+ || NULL == desc->cb_elems || NULL == desc->cb_desc) {
+ /* Invalid parameters, return error*/
+ return -EINVAL;
+ }
+
+ /* Mark the queue as Local */
+ qhandle->type = IA_CSS_QUEUE_TYPE_LOCAL;
+
+ /* Create a local circular buffer queue*/
+ ia_css_circbuf_create(&qhandle->desc.cb_local,
+ desc->cb_elems,
+ desc->cb_desc);
+
+ return 0;
+}
+
+int ia_css_queue_remote_init(ia_css_queue_t *qhandle, ia_css_queue_remote_t *desc)
+{
+ if (NULL == qhandle || NULL == desc) {
+ /* Invalid parameters, return error*/
+ return -EINVAL;
+ }
+
+ /* Mark the queue as remote*/
+ qhandle->type = IA_CSS_QUEUE_TYPE_REMOTE;
+
+ /* Copy over the local queue descriptor*/
+ qhandle->location = desc->location;
+ qhandle->proc_id = desc->proc_id;
+ qhandle->desc.remote.cb_desc_addr = desc->cb_desc_addr;
+ qhandle->desc.remote.cb_elems_addr = desc->cb_elems_addr;
+
+ /* If queue is remote, we let the local processor
+ * do its init, before using it. This is just to get us
+ * started, we can remove this restriction as we go ahead
+ */
+
+ return 0;
+}
+
+int ia_css_queue_uninit(ia_css_queue_t *qhandle)
+{
+ if (!qhandle)
+ return -EINVAL;
+
+ /* Load the required queue object */
+ if (qhandle->type == IA_CSS_QUEUE_TYPE_LOCAL) {
+ /* Local queues are created. Destroy it*/
+ ia_css_circbuf_destroy(&qhandle->desc.cb_local);
+ }
+
+ return 0;
+}
+
+int ia_css_queue_enqueue(ia_css_queue_t *qhandle, uint32_t item)
+{
+ int error;
+
+ if (!qhandle)
+ return -EINVAL;
+
+ /* 1. Load the required queue object */
+ if (qhandle->type == IA_CSS_QUEUE_TYPE_LOCAL) {
+ /* Directly de-ref the object and
+ * operate on the queue
+ */
+ if (ia_css_circbuf_is_full(&qhandle->desc.cb_local)) {
+ /* Cannot push the element. Return*/
+ return -ENOBUFS;
+ }
+
+ /* Push the element*/
+ ia_css_circbuf_push(&qhandle->desc.cb_local, item);
+ } else if (qhandle->type == IA_CSS_QUEUE_TYPE_REMOTE) {
+ ia_css_circbuf_desc_t cb_desc;
+ ia_css_circbuf_elem_t cb_elem;
+ u32 ignore_desc_flags = QUEUE_IGNORE_STEP_FLAG;
+
+ /* a. Load the queue cb_desc from remote */
+ QUEUE_CB_DESC_INIT(&cb_desc);
+ error = ia_css_queue_load(qhandle, &cb_desc, ignore_desc_flags);
+ if (error != 0)
+ return error;
+
+ /* b. Operate on the queue */
+ if (ia_css_circbuf_desc_is_full(&cb_desc))
+ return -ENOBUFS;
+
+ cb_elem.val = item;
+
+ error = ia_css_queue_item_store(qhandle, cb_desc.end, &cb_elem);
+ if (error != 0)
+ return error;
+
+ cb_desc.end = (cb_desc.end + 1) % cb_desc.size;
+
+ /* c. Store the queue object */
+ /* Set only fields requiring update with
+ * valid value. Avoids unnecessary calls
+ * to load/store functions
+ */
+ ignore_desc_flags = QUEUE_IGNORE_SIZE_START_STEP_FLAGS;
+
+ error = ia_css_queue_store(qhandle, &cb_desc, ignore_desc_flags);
+ if (error != 0)
+ return error;
+ }
+
+ return 0;
+}
+
+int ia_css_queue_dequeue(ia_css_queue_t *qhandle, uint32_t *item)
+{
+ int error;
+
+ if (!qhandle || NULL == item)
+ return -EINVAL;
+
+ /* 1. Load the required queue object */
+ if (qhandle->type == IA_CSS_QUEUE_TYPE_LOCAL) {
+ /* Directly de-ref the object and
+ * operate on the queue
+ */
+ if (ia_css_circbuf_is_empty(&qhandle->desc.cb_local)) {
+ /* Nothing to pop. Return empty queue*/
+ return -ENODATA;
+ }
+
+ *item = ia_css_circbuf_pop(&qhandle->desc.cb_local);
+ } else if (qhandle->type == IA_CSS_QUEUE_TYPE_REMOTE) {
+ /* a. Load the queue from remote */
+ ia_css_circbuf_desc_t cb_desc;
+ ia_css_circbuf_elem_t cb_elem;
+ u32 ignore_desc_flags = QUEUE_IGNORE_STEP_FLAG;
+
+ QUEUE_CB_DESC_INIT(&cb_desc);
+
+ error = ia_css_queue_load(qhandle, &cb_desc, ignore_desc_flags);
+ if (error != 0)
+ return error;
+
+ /* b. Operate on the queue */
+ if (ia_css_circbuf_desc_is_empty(&cb_desc))
+ return -ENODATA;
+
+ error = ia_css_queue_item_load(qhandle, cb_desc.start, &cb_elem);
+ if (error != 0)
+ return error;
+
+ *item = cb_elem.val;
+
+ cb_desc.start = OP_std_modadd(cb_desc.start, 1, cb_desc.size);
+
+ /* c. Store the queue object */
+ /* Set only fields requiring update with
+ * valid value. Avoids unnecessary calls
+ * to load/store functions
+ */
+ ignore_desc_flags = QUEUE_IGNORE_SIZE_END_STEP_FLAGS;
+ error = ia_css_queue_store(qhandle, &cb_desc, ignore_desc_flags);
+ if (error != 0)
+ return error;
+ }
+ return 0;
+}
+
+int ia_css_queue_is_full(ia_css_queue_t *qhandle, bool *is_full)
+{
+ int error;
+
+ if ((!qhandle) || (!is_full))
+ return -EINVAL;
+
+ /* 1. Load the required queue object */
+ if (qhandle->type == IA_CSS_QUEUE_TYPE_LOCAL) {
+ /* Directly de-ref the object and
+ * operate on the queue
+ */
+ *is_full = ia_css_circbuf_is_full(&qhandle->desc.cb_local);
+ return 0;
+ } else if (qhandle->type == IA_CSS_QUEUE_TYPE_REMOTE) {
+ /* a. Load the queue from remote */
+ ia_css_circbuf_desc_t cb_desc;
+ u32 ignore_desc_flags = QUEUE_IGNORE_STEP_FLAG;
+
+ QUEUE_CB_DESC_INIT(&cb_desc);
+ error = ia_css_queue_load(qhandle, &cb_desc, ignore_desc_flags);
+ if (error != 0)
+ return error;
+
+ /* b. Operate on the queue */
+ *is_full = ia_css_circbuf_desc_is_full(&cb_desc);
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+int ia_css_queue_get_free_space(ia_css_queue_t *qhandle, uint32_t *size)
+{
+ int error;
+
+ if ((!qhandle) || (!size))
+ return -EINVAL;
+
+ /* 1. Load the required queue object */
+ if (qhandle->type == IA_CSS_QUEUE_TYPE_LOCAL) {
+ /* Directly de-ref the object and
+ * operate on the queue
+ */
+ *size = ia_css_circbuf_get_free_elems(&qhandle->desc.cb_local);
+ return 0;
+ } else if (qhandle->type == IA_CSS_QUEUE_TYPE_REMOTE) {
+ /* a. Load the queue from remote */
+ ia_css_circbuf_desc_t cb_desc;
+ u32 ignore_desc_flags = QUEUE_IGNORE_STEP_FLAG;
+
+ QUEUE_CB_DESC_INIT(&cb_desc);
+ error = ia_css_queue_load(qhandle, &cb_desc, ignore_desc_flags);
+ if (error != 0)
+ return error;
+
+ /* b. Operate on the queue */
+ *size = ia_css_circbuf_desc_get_free_elems(&cb_desc);
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+int ia_css_queue_get_used_space(ia_css_queue_t *qhandle, uint32_t *size)
+{
+ int error;
+
+ if ((!qhandle) || (!size))
+ return -EINVAL;
+
+ /* 1. Load the required queue object */
+ if (qhandle->type == IA_CSS_QUEUE_TYPE_LOCAL) {
+ /* Directly de-ref the object and
+ * operate on the queue
+ */
+ *size = ia_css_circbuf_get_num_elems(&qhandle->desc.cb_local);
+ return 0;
+ } else if (qhandle->type == IA_CSS_QUEUE_TYPE_REMOTE) {
+ /* a. Load the queue from remote */
+ ia_css_circbuf_desc_t cb_desc;
+ u32 ignore_desc_flags = QUEUE_IGNORE_STEP_FLAG;
+
+ QUEUE_CB_DESC_INIT(&cb_desc);
+ error = ia_css_queue_load(qhandle, &cb_desc, ignore_desc_flags);
+ if (error != 0)
+ return error;
+
+ /* b. Operate on the queue */
+ *size = ia_css_circbuf_desc_get_num_elems(&cb_desc);
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+int ia_css_queue_peek(ia_css_queue_t *qhandle, u32 offset, uint32_t *element)
+{
+ u32 num_elems;
+ int error;
+
+ if ((!qhandle) || (!element))
+ return -EINVAL;
+
+ /* 1. Load the required queue object */
+ if (qhandle->type == IA_CSS_QUEUE_TYPE_LOCAL) {
+ /* Directly de-ref the object and
+ * operate on the queue
+ */
+ /* Check if offset is valid */
+ num_elems = ia_css_circbuf_get_num_elems(&qhandle->desc.cb_local);
+ if (offset > num_elems)
+ return -EINVAL;
+
+ *element = ia_css_circbuf_peek_from_start(&qhandle->desc.cb_local, (int)offset);
+ return 0;
+ } else if (qhandle->type == IA_CSS_QUEUE_TYPE_REMOTE) {
+ /* a. Load the queue from remote */
+ ia_css_circbuf_desc_t cb_desc;
+ ia_css_circbuf_elem_t cb_elem;
+ u32 ignore_desc_flags = QUEUE_IGNORE_STEP_FLAG;
+
+ QUEUE_CB_DESC_INIT(&cb_desc);
+
+ error = ia_css_queue_load(qhandle, &cb_desc, ignore_desc_flags);
+ if (error != 0)
+ return error;
+
+ /* Check if offset is valid */
+ num_elems = ia_css_circbuf_desc_get_num_elems(&cb_desc);
+ if (offset > num_elems)
+ return -EINVAL;
+
+ offset = OP_std_modadd(cb_desc.start, offset, cb_desc.size);
+ error = ia_css_queue_item_load(qhandle, (uint8_t)offset, &cb_elem);
+ if (error != 0)
+ return error;
+
+ *element = cb_elem.val;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+int ia_css_queue_is_empty(ia_css_queue_t *qhandle, bool *is_empty)
+{
+ int error;
+
+ if ((!qhandle) || (!is_empty))
+ return -EINVAL;
+
+ /* 1. Load the required queue object */
+ if (qhandle->type == IA_CSS_QUEUE_TYPE_LOCAL) {
+ /* Directly de-ref the object and
+ * operate on the queue
+ */
+ *is_empty = ia_css_circbuf_is_empty(&qhandle->desc.cb_local);
+ return 0;
+ } else if (qhandle->type == IA_CSS_QUEUE_TYPE_REMOTE) {
+ /* a. Load the queue from remote */
+ ia_css_circbuf_desc_t cb_desc;
+ u32 ignore_desc_flags = QUEUE_IGNORE_STEP_FLAG;
+
+ QUEUE_CB_DESC_INIT(&cb_desc);
+ error = ia_css_queue_load(qhandle, &cb_desc, ignore_desc_flags);
+ if (error != 0)
+ return error;
+
+ /* b. Operate on the queue */
+ *is_empty = ia_css_circbuf_desc_is_empty(&cb_desc);
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+int ia_css_queue_get_size(ia_css_queue_t *qhandle, uint32_t *size)
+{
+ int error;
+
+ if ((!qhandle) || (!size))
+ return -EINVAL;
+
+ /* 1. Load the required queue object */
+ if (qhandle->type == IA_CSS_QUEUE_TYPE_LOCAL) {
+ /* Directly de-ref the object and
+ * operate on the queue
+ */
+ /* Return maximum usable capacity */
+ *size = ia_css_circbuf_get_size(&qhandle->desc.cb_local);
+ } else if (qhandle->type == IA_CSS_QUEUE_TYPE_REMOTE) {
+ /* a. Load the queue from remote */
+ ia_css_circbuf_desc_t cb_desc;
+ u32 ignore_desc_flags = QUEUE_IGNORE_START_END_STEP_FLAGS;
+
+ QUEUE_CB_DESC_INIT(&cb_desc);
+
+ error = ia_css_queue_load(qhandle, &cb_desc, ignore_desc_flags);
+ if (error != 0)
+ return error;
+
+ /* Return maximum usable capacity */
+ *size = cb_desc.size;
+ }
+
+ return 0;
+}
diff --git a/drivers/staging/media/atomisp/pci/runtime/queue/src/queue_access.c b/drivers/staging/media/atomisp/pci/runtime/queue/src/queue_access.c
new file mode 100644
index 000000000000..2591d157870c
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/runtime/queue/src/queue_access.c
@@ -0,0 +1,169 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010 - 2015, Intel Corporation.
+ */
+
+#include "hmm.h"
+
+#include "type_support.h"
+#include "queue_access.h"
+#include "ia_css_circbuf.h"
+#include "sp.h"
+#include "assert_support.h"
+
+int ia_css_queue_load(
+ struct ia_css_queue *rdesc,
+ ia_css_circbuf_desc_t *cb_desc,
+ uint32_t ignore_desc_flags)
+{
+ if (!rdesc || !cb_desc)
+ return -EINVAL;
+
+ if (rdesc->location == IA_CSS_QUEUE_LOC_SP) {
+ assert(ignore_desc_flags <= QUEUE_IGNORE_DESC_FLAGS_MAX);
+
+ if (0 == (ignore_desc_flags & QUEUE_IGNORE_SIZE_FLAG)) {
+ cb_desc->size = sp_dmem_load_uint8(rdesc->proc_id,
+ rdesc->desc.remote.cb_desc_addr
+ + offsetof(ia_css_circbuf_desc_t, size));
+
+ if (cb_desc->size == 0) {
+ /* Adding back the workaround which was removed
+ while refactoring queues. When reading size
+ through sp_dmem_load_*, sometimes we get back
+ the value as zero. This causes division by 0
+ exception as the size is used in a modular
+ division operation. */
+ return -EDOM;
+ }
+ }
+
+ if (0 == (ignore_desc_flags & QUEUE_IGNORE_START_FLAG))
+ cb_desc->start = sp_dmem_load_uint8(rdesc->proc_id,
+ rdesc->desc.remote.cb_desc_addr
+ + offsetof(ia_css_circbuf_desc_t, start));
+
+ if (0 == (ignore_desc_flags & QUEUE_IGNORE_END_FLAG))
+ cb_desc->end = sp_dmem_load_uint8(rdesc->proc_id,
+ rdesc->desc.remote.cb_desc_addr
+ + offsetof(ia_css_circbuf_desc_t, end));
+
+ if (0 == (ignore_desc_flags & QUEUE_IGNORE_STEP_FLAG))
+ cb_desc->step = sp_dmem_load_uint8(rdesc->proc_id,
+ rdesc->desc.remote.cb_desc_addr
+ + offsetof(ia_css_circbuf_desc_t, step));
+
+ } else if (rdesc->location == IA_CSS_QUEUE_LOC_HOST) {
+ /* doing DMA transfer of entire structure */
+ hmm_load(rdesc->desc.remote.cb_desc_addr,
+ (void *)cb_desc,
+ sizeof(ia_css_circbuf_desc_t));
+ } else if (rdesc->location == IA_CSS_QUEUE_LOC_ISP) {
+ /* Not supported yet */
+ return -ENOTSUPP;
+ }
+
+ return 0;
+}
+
+int ia_css_queue_store(
+ struct ia_css_queue *rdesc,
+ ia_css_circbuf_desc_t *cb_desc,
+ uint32_t ignore_desc_flags)
+{
+ if (!rdesc || !cb_desc)
+ return -EINVAL;
+
+ if (rdesc->location == IA_CSS_QUEUE_LOC_SP) {
+ assert(ignore_desc_flags <= QUEUE_IGNORE_DESC_FLAGS_MAX);
+
+ if (0 == (ignore_desc_flags & QUEUE_IGNORE_SIZE_FLAG))
+ sp_dmem_store_uint8(rdesc->proc_id,
+ rdesc->desc.remote.cb_desc_addr
+ + offsetof(ia_css_circbuf_desc_t, size),
+ cb_desc->size);
+
+ if (0 == (ignore_desc_flags & QUEUE_IGNORE_START_FLAG))
+ sp_dmem_store_uint8(rdesc->proc_id,
+ rdesc->desc.remote.cb_desc_addr
+ + offsetof(ia_css_circbuf_desc_t, start),
+ cb_desc->start);
+
+ if (0 == (ignore_desc_flags & QUEUE_IGNORE_END_FLAG))
+ sp_dmem_store_uint8(rdesc->proc_id,
+ rdesc->desc.remote.cb_desc_addr
+ + offsetof(ia_css_circbuf_desc_t, end),
+ cb_desc->end);
+
+ if (0 == (ignore_desc_flags & QUEUE_IGNORE_STEP_FLAG))
+ sp_dmem_store_uint8(rdesc->proc_id,
+ rdesc->desc.remote.cb_desc_addr
+ + offsetof(ia_css_circbuf_desc_t, step),
+ cb_desc->step);
+ } else if (rdesc->location == IA_CSS_QUEUE_LOC_HOST) {
+ /* doing DMA transfer of entire structure */
+ hmm_store(rdesc->desc.remote.cb_desc_addr,
+ (void *)cb_desc,
+ sizeof(ia_css_circbuf_desc_t));
+ } else if (rdesc->location == IA_CSS_QUEUE_LOC_ISP) {
+ /* Not supported yet */
+ return -ENOTSUPP;
+ }
+
+ return 0;
+}
+
+int ia_css_queue_item_load(
+ struct ia_css_queue *rdesc,
+ u8 position,
+ ia_css_circbuf_elem_t *item)
+{
+ if (!rdesc || !item)
+ return -EINVAL;
+
+ if (rdesc->location == IA_CSS_QUEUE_LOC_SP) {
+ sp_dmem_load(rdesc->proc_id,
+ rdesc->desc.remote.cb_elems_addr
+ + position * sizeof(ia_css_circbuf_elem_t),
+ item,
+ sizeof(ia_css_circbuf_elem_t));
+ } else if (rdesc->location == IA_CSS_QUEUE_LOC_HOST) {
+ hmm_load(rdesc->desc.remote.cb_elems_addr
+ + position * sizeof(ia_css_circbuf_elem_t),
+ (void *)item,
+ sizeof(ia_css_circbuf_elem_t));
+ } else if (rdesc->location == IA_CSS_QUEUE_LOC_ISP) {
+ /* Not supported yet */
+ return -ENOTSUPP;
+ }
+
+ return 0;
+}
+
+int ia_css_queue_item_store(
+ struct ia_css_queue *rdesc,
+ u8 position,
+ ia_css_circbuf_elem_t *item)
+{
+ if (!rdesc || !item)
+ return -EINVAL;
+
+ if (rdesc->location == IA_CSS_QUEUE_LOC_SP) {
+ sp_dmem_store(rdesc->proc_id,
+ rdesc->desc.remote.cb_elems_addr
+ + position * sizeof(ia_css_circbuf_elem_t),
+ item,
+ sizeof(ia_css_circbuf_elem_t));
+ } else if (rdesc->location == IA_CSS_QUEUE_LOC_HOST) {
+ hmm_store(rdesc->desc.remote.cb_elems_addr
+ + position * sizeof(ia_css_circbuf_elem_t),
+ (void *)item,
+ sizeof(ia_css_circbuf_elem_t));
+ } else if (rdesc->location == IA_CSS_QUEUE_LOC_ISP) {
+ /* Not supported yet */
+ return -ENOTSUPP;
+ }
+
+ return 0;
+}
diff --git a/drivers/staging/media/atomisp/pci/runtime/queue/src/queue_access.h b/drivers/staging/media/atomisp/pci/runtime/queue/src/queue_access.h
new file mode 100644
index 000000000000..d62133a8fe6f
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/runtime/queue/src/queue_access.h
@@ -0,0 +1,78 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010 - 2015, Intel Corporation.
+ */
+
+#ifndef __QUEUE_ACCESS_H
+#define __QUEUE_ACCESS_H
+
+#include <linux/errno.h>
+
+#include <type_support.h>
+#include <ia_css_queue_comm.h>
+#include <ia_css_circbuf.h>
+
+#define QUEUE_IGNORE_START_FLAG 0x0001
+#define QUEUE_IGNORE_END_FLAG 0x0002
+#define QUEUE_IGNORE_SIZE_FLAG 0x0004
+#define QUEUE_IGNORE_STEP_FLAG 0x0008
+#define QUEUE_IGNORE_DESC_FLAGS_MAX 0x000f
+
+#define QUEUE_IGNORE_SIZE_START_STEP_FLAGS \
+ (QUEUE_IGNORE_SIZE_FLAG | \
+ QUEUE_IGNORE_START_FLAG | \
+ QUEUE_IGNORE_STEP_FLAG)
+
+#define QUEUE_IGNORE_SIZE_END_STEP_FLAGS \
+ (QUEUE_IGNORE_SIZE_FLAG | \
+ QUEUE_IGNORE_END_FLAG | \
+ QUEUE_IGNORE_STEP_FLAG)
+
+#define QUEUE_IGNORE_START_END_STEP_FLAGS \
+ (QUEUE_IGNORE_START_FLAG | \
+ QUEUE_IGNORE_END_FLAG | \
+ QUEUE_IGNORE_STEP_FLAG)
+
+#define QUEUE_CB_DESC_INIT(cb_desc) \
+ do { \
+ (cb_desc)->size = 0; \
+ (cb_desc)->step = 0; \
+ (cb_desc)->start = 0; \
+ (cb_desc)->end = 0; \
+ } while (0)
+
+struct ia_css_queue {
+ u8 type; /* Specify remote/local type of access */
+ u8 location; /* Cell location for queue */
+ u8 proc_id; /* Processor id for queue access */
+ union {
+ ia_css_circbuf_t cb_local;
+ struct {
+ u32 cb_desc_addr; /*Circbuf desc address for remote queues*/
+ u32 cb_elems_addr; /*Circbuf elements addr for remote queue*/
+ } remote;
+ } desc;
+};
+
+int ia_css_queue_load(
+ struct ia_css_queue *rdesc,
+ ia_css_circbuf_desc_t *cb_desc,
+ uint32_t ignore_desc_flags);
+
+int ia_css_queue_store(
+ struct ia_css_queue *rdesc,
+ ia_css_circbuf_desc_t *cb_desc,
+ uint32_t ignore_desc_flags);
+
+int ia_css_queue_item_load(
+ struct ia_css_queue *rdesc,
+ u8 position,
+ ia_css_circbuf_elem_t *item);
+
+int ia_css_queue_item_store(
+ struct ia_css_queue *rdesc,
+ u8 position,
+ ia_css_circbuf_elem_t *item);
+
+#endif /* __QUEUE_ACCESS_H */
diff --git a/drivers/staging/media/atomisp/pci/runtime/rmgr/interface/ia_css_rmgr.h b/drivers/staging/media/atomisp/pci/runtime/rmgr/interface/ia_css_rmgr.h
new file mode 100644
index 000000000000..00b903d950df
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/runtime/rmgr/interface/ia_css_rmgr.h
@@ -0,0 +1,64 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010 - 2015, Intel Corporation.
+ */
+
+#ifndef _IA_CSS_RMGR_H
+#define _IA_CSS_RMGR_H
+
+#include <ia_css_err.h>
+
+#ifndef __INLINE_RMGR__
+#define STORAGE_CLASS_RMGR_H extern
+#define STORAGE_CLASS_RMGR_C
+#else /* __INLINE_RMGR__ */
+#define STORAGE_CLASS_RMGR_H static inline
+#define STORAGE_CLASS_RMGR_C static inline
+#endif /* __INLINE_RMGR__ */
+
+/**
+ * @brief Initialize resource manager (host/common)
+ */
+int ia_css_rmgr_init(void);
+
+/**
+ * @brief Uninitialize resource manager (host/common)
+ */
+void ia_css_rmgr_uninit(void);
+
+/*****************************************************************
+ * Interface definition - resource type (host/common)
+ *****************************************************************
+ *
+ * struct ia_css_rmgr_<type>_pool;
+ * struct ia_css_rmgr_<type>_handle;
+ *
+ * STORAGE_CLASS_RMGR_H void ia_css_rmgr_init_<type>(
+ * struct ia_css_rmgr_<type>_pool *pool);
+ *
+ * STORAGE_CLASS_RMGR_H void ia_css_rmgr_uninit_<type>(
+ * struct ia_css_rmgr_<type>_pool *pool);
+ *
+ * STORAGE_CLASS_RMGR_H void ia_css_rmgr_acq_<type>(
+ * struct ia_css_rmgr_<type>_pool *pool,
+ * struct ia_css_rmgr_<type>_handle **handle);
+ *
+ * STORAGE_CLASS_RMGR_H void ia_css_rmgr_rel_<type>(
+ * struct ia_css_rmgr_<type>_pool *pool,
+ * struct ia_css_rmgr_<type>_handle **handle);
+ *
+ *****************************************************************
+ * Interface definition - refcounting (host/common)
+ *****************************************************************
+ *
+ * void ia_css_rmgr_refcount_retain_<type>(
+ * struct ia_css_rmgr_<type>_handle **handle);
+ *
+ * void ia_css_rmgr_refcount_release_<type>(
+ * struct ia_css_rmgr_<type>_handle **handle);
+ */
+
+#include "ia_css_rmgr_vbuf.h"
+
+#endif /* _IA_CSS_RMGR_H */
diff --git a/drivers/staging/media/atomisp/pci/runtime/rmgr/interface/ia_css_rmgr_vbuf.h b/drivers/staging/media/atomisp/pci/runtime/rmgr/interface/ia_css_rmgr_vbuf.h
new file mode 100644
index 000000000000..6820bfc77432
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/runtime/rmgr/interface/ia_css_rmgr_vbuf.h
@@ -0,0 +1,92 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010 - 2015, Intel Corporation.
+ */
+
+#ifndef _IA_CSS_RMGR_VBUF_H
+#define _IA_CSS_RMGR_VBUF_H
+
+#include "ia_css_rmgr.h"
+#include <type_support.h>
+#include <ia_css_types.h>
+#include <system_local.h>
+
+/**
+ * @brief Data structure for the resource handle (host, vbuf)
+ */
+struct ia_css_rmgr_vbuf_handle {
+ ia_css_ptr vptr;
+ u8 count;
+ u32 size;
+};
+
+/**
+ * @brief Data structure for the resource pool (host, vbuf)
+ */
+struct ia_css_rmgr_vbuf_pool {
+ u8 copy_on_write;
+ u8 recycle;
+ u32 size;
+ u32 index;
+ struct ia_css_rmgr_vbuf_handle **handles;
+};
+
+/**
+ * @brief VBUF resource pools
+ */
+extern struct ia_css_rmgr_vbuf_pool *vbuf_ref;
+extern struct ia_css_rmgr_vbuf_pool *vbuf_write;
+extern struct ia_css_rmgr_vbuf_pool *hmm_buffer_pool;
+
+/**
+ * @brief Initialize the resource pool (host, vbuf)
+ *
+ * @param pool The pointer to the pool
+ */
+STORAGE_CLASS_RMGR_H int ia_css_rmgr_init_vbuf(
+ struct ia_css_rmgr_vbuf_pool *pool);
+
+/**
+ * @brief Uninitialize the resource pool (host, vbuf)
+ *
+ * @param pool The pointer to the pool
+ */
+STORAGE_CLASS_RMGR_H void ia_css_rmgr_uninit_vbuf(
+ struct ia_css_rmgr_vbuf_pool *pool);
+
+/**
+ * @brief Acquire a handle from the pool (host, vbuf)
+ *
+ * @param pool The pointer to the pool
+ * @param handle The pointer to the handle
+ */
+STORAGE_CLASS_RMGR_H void ia_css_rmgr_acq_vbuf(
+ struct ia_css_rmgr_vbuf_pool *pool,
+ struct ia_css_rmgr_vbuf_handle **handle);
+
+/**
+ * @brief Release a handle to the pool (host, vbuf)
+ *
+ * @param pool The pointer to the pool
+ * @param handle The pointer to the handle
+ */
+STORAGE_CLASS_RMGR_H void ia_css_rmgr_rel_vbuf(
+ struct ia_css_rmgr_vbuf_pool *pool,
+ struct ia_css_rmgr_vbuf_handle **handle);
+
+/**
+ * @brief Retain the reference count for a handle (host, vbuf)
+ *
+ * @param handle The pointer to the handle
+ */
+void ia_css_rmgr_refcount_retain_vbuf(struct ia_css_rmgr_vbuf_handle **handle);
+
+/**
+ * @brief Release the reference count for a handle (host, vbuf)
+ *
+ * @param handle The pointer to the handle
+ */
+void ia_css_rmgr_refcount_release_vbuf(struct ia_css_rmgr_vbuf_handle **handle);
+
+#endif /* _IA_CSS_RMGR_VBUF_H */
diff --git a/drivers/staging/media/atomisp/pci/runtime/rmgr/src/rmgr.c b/drivers/staging/media/atomisp/pci/runtime/rmgr/src/rmgr.c
new file mode 100644
index 000000000000..3e014fd0e548
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/runtime/rmgr/src/rmgr.c
@@ -0,0 +1,31 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010 - 2015, Intel Corporation.
+ */
+
+#include "ia_css_rmgr.h"
+
+int ia_css_rmgr_init(void)
+{
+ int err = 0;
+
+ err = ia_css_rmgr_init_vbuf(vbuf_ref);
+ if (!err)
+ err = ia_css_rmgr_init_vbuf(vbuf_write);
+ if (!err)
+ err = ia_css_rmgr_init_vbuf(hmm_buffer_pool);
+ if (err)
+ ia_css_rmgr_uninit();
+ return err;
+}
+
+/*
+ * @brief Uninitialize resource pool (host)
+ */
+void ia_css_rmgr_uninit(void)
+{
+ ia_css_rmgr_uninit_vbuf(hmm_buffer_pool);
+ ia_css_rmgr_uninit_vbuf(vbuf_write);
+ ia_css_rmgr_uninit_vbuf(vbuf_ref);
+}
diff --git a/drivers/staging/media/atomisp/pci/runtime/rmgr/src/rmgr_vbuf.c b/drivers/staging/media/atomisp/pci/runtime/rmgr/src/rmgr_vbuf.c
new file mode 100644
index 000000000000..940b28c66e99
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/runtime/rmgr/src/rmgr_vbuf.c
@@ -0,0 +1,320 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010-2015, Intel Corporation.
+ */
+
+#include "hmm.h"
+#include "ia_css_rmgr.h"
+
+#include <type_support.h>
+#include <assert_support.h>
+#include <platform_support.h> /* memset */
+#include <ia_css_debug.h>
+
+/*
+ * @brief VBUF resource handles
+ */
+#define NUM_HANDLES 1000
+static struct ia_css_rmgr_vbuf_handle handle_table[NUM_HANDLES];
+
+/*
+ * @brief VBUF resource pool - refpool
+ */
+static struct ia_css_rmgr_vbuf_pool refpool;
+
+/*
+ * @brief VBUF resource pool - writepool
+ */
+static struct ia_css_rmgr_vbuf_pool writepool = {
+ .copy_on_write = true,
+};
+
+/*
+ * @brief VBUF resource pool - hmmbufferpool
+ */
+static struct ia_css_rmgr_vbuf_pool hmmbufferpool = {
+ .copy_on_write = true,
+ .recycle = true,
+ .size = 32,
+};
+
+struct ia_css_rmgr_vbuf_pool *vbuf_ref = &refpool;
+struct ia_css_rmgr_vbuf_pool *vbuf_write = &writepool;
+struct ia_css_rmgr_vbuf_pool *hmm_buffer_pool = &hmmbufferpool;
+
+/*
+ * @brief Initialize the reference count (host, vbuf)
+ */
+static void rmgr_refcount_init_vbuf(void)
+{
+ /* initialize the refcount table */
+ memset(&handle_table, 0, sizeof(handle_table));
+}
+
+/*
+ * @brief Retain the reference count for a handle (host, vbuf)
+ *
+ * @param handle The pointer to the handle
+ */
+void ia_css_rmgr_refcount_retain_vbuf(struct ia_css_rmgr_vbuf_handle **handle)
+{
+ int i;
+ struct ia_css_rmgr_vbuf_handle *h;
+
+ if ((!handle) || (!*handle)) {
+ IA_CSS_LOG("Invalid inputs");
+ return;
+ }
+ /* new vbuf to count on */
+ if ((*handle)->count == 0) {
+ h = *handle;
+ *handle = NULL;
+ for (i = 0; i < NUM_HANDLES; i++) {
+ if (handle_table[i].count == 0) {
+ *handle = &handle_table[i];
+ break;
+ }
+ }
+ /* if the loop dus not break and *handle == NULL
+ * this is an error handle and report it.
+ */
+ if (!*handle) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_ERROR,
+ "ia_css_i_host_refcount_retain_vbuf() failed to find empty slot!\n");
+ return;
+ }
+ (*handle)->vptr = h->vptr;
+ (*handle)->size = h->size;
+ }
+ (*handle)->count++;
+}
+
+/*
+ * @brief Release the reference count for a handle (host, vbuf)
+ *
+ * @param handle The pointer to the handle
+ */
+void ia_css_rmgr_refcount_release_vbuf(struct ia_css_rmgr_vbuf_handle **handle)
+{
+ if ((!handle) || ((*handle) == NULL) || (((*handle)->count) == 0)) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_ERROR, "%s invalid arguments!\n", __func__);
+ return;
+ }
+ /* decrease reference count */
+ (*handle)->count--;
+ /* remove from admin */
+ if ((*handle)->count == 0) {
+ (*handle)->vptr = 0x0;
+ (*handle)->size = 0;
+ *handle = NULL;
+ }
+}
+
+/*
+ * @brief Initialize the resource pool (host, vbuf)
+ *
+ * @param pool The pointer to the pool
+ */
+int ia_css_rmgr_init_vbuf(struct ia_css_rmgr_vbuf_pool *pool)
+{
+ int err = 0;
+ size_t bytes_needed;
+
+ rmgr_refcount_init_vbuf();
+ assert(pool);
+ if (!pool)
+ return -EINVAL;
+ /* initialize the recycle pool if used */
+ if (pool->recycle && pool->size) {
+ /* allocate memory for storing the handles */
+ bytes_needed =
+ sizeof(void *) *
+ pool->size;
+ pool->handles = kvmalloc(bytes_needed, GFP_KERNEL);
+ if (pool->handles)
+ memset(pool->handles, 0, bytes_needed);
+ else
+ err = -ENOMEM;
+ } else {
+ /* just in case, set the size to 0 */
+ pool->size = 0;
+ pool->handles = NULL;
+ }
+ return err;
+}
+
+/*
+ * @brief Uninitialize the resource pool (host, vbuf)
+ *
+ * @param pool The pointer to the pool
+ */
+void ia_css_rmgr_uninit_vbuf(struct ia_css_rmgr_vbuf_pool *pool)
+{
+ u32 i;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "%s\n", __func__);
+ if (!pool) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_ERROR, "%s NULL argument\n", __func__);
+ return;
+ }
+ if (pool->handles) {
+ /* free the hmm buffers */
+ for (i = 0; i < pool->size; i++) {
+ if (pool->handles[i]) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ " freeing/releasing %x (count=%d)\n",
+ pool->handles[i]->vptr,
+ pool->handles[i]->count);
+ /* free memory */
+ hmm_free(pool->handles[i]->vptr);
+ /* remove from refcount admin */
+ ia_css_rmgr_refcount_release_vbuf(&pool->handles[i]);
+ }
+ }
+ /* now free the pool handles list */
+ kvfree(pool->handles);
+ pool->handles = NULL;
+ }
+}
+
+/*
+ * @brief Push a handle to the pool
+ *
+ * @param pool The pointer to the pool
+ * @param handle The pointer to the handle
+ */
+static
+void rmgr_push_handle(struct ia_css_rmgr_vbuf_pool *pool,
+ struct ia_css_rmgr_vbuf_handle **handle)
+{
+ u32 i;
+ bool success = false;
+
+ assert(pool);
+ assert(pool->recycle);
+ assert(pool->handles);
+ assert(handle);
+ for (i = 0; i < pool->size; i++) {
+ if (!pool->handles[i]) {
+ ia_css_rmgr_refcount_retain_vbuf(handle);
+ pool->handles[i] = *handle;
+ success = true;
+ break;
+ }
+ }
+ assert(success);
+}
+
+/*
+ * @brief Pop a handle from the pool
+ *
+ * @param pool The pointer to the pool
+ * @param handle The pointer to the handle
+ */
+static
+void rmgr_pop_handle(struct ia_css_rmgr_vbuf_pool *pool,
+ struct ia_css_rmgr_vbuf_handle **handle)
+{
+ u32 i;
+
+ assert(pool);
+ assert(pool->recycle);
+ assert(pool->handles);
+ assert(handle);
+ assert(*handle);
+ for (i = 0; i < pool->size; i++) {
+ if ((pool->handles[i]) &&
+ (pool->handles[i]->size == (*handle)->size)) {
+ *handle = pool->handles[i];
+ pool->handles[i] = NULL;
+ /* dont release, we are returning it...
+ * ia_css_rmgr_refcount_release_vbuf(handle);
+ */
+ return;
+ }
+ }
+}
+
+/*
+ * @brief Acquire a handle from the pool (host, vbuf)
+ *
+ * @param pool The pointer to the pool
+ * @param handle The pointer to the handle
+ */
+void ia_css_rmgr_acq_vbuf(struct ia_css_rmgr_vbuf_pool *pool,
+ struct ia_css_rmgr_vbuf_handle **handle)
+{
+ if ((!pool) || (!handle) || (!*handle)) {
+ IA_CSS_LOG("Invalid inputs");
+ return;
+ }
+
+ if (pool->copy_on_write) {
+ struct ia_css_rmgr_vbuf_handle *new_handle;
+ struct ia_css_rmgr_vbuf_handle h = { 0 };
+
+ /* only one reference, reuse (no new retain) */
+ if ((*handle)->count == 1)
+ return;
+ /* more than one reference, release current buffer */
+ if ((*handle)->count > 1) {
+ /* store current values */
+ h.vptr = 0x0;
+ h.size = (*handle)->size;
+ /* release ref to current buffer */
+ ia_css_rmgr_refcount_release_vbuf(handle);
+ new_handle = &h;
+ } else {
+ new_handle = *handle;
+ }
+ /* get new buffer for needed size */
+ if (new_handle->vptr == 0x0) {
+ if (pool->recycle) {
+ /* try and pop from pool */
+ rmgr_pop_handle(pool, &new_handle);
+ }
+ if (new_handle->vptr == 0x0) {
+ /* we need to allocate */
+ new_handle->vptr = hmm_alloc(new_handle->size);
+ } else {
+ /* we popped a buffer */
+ *handle = new_handle;
+ return;
+ }
+ }
+ /* Note that new_handle will change to an internally maintained one */
+ ia_css_rmgr_refcount_retain_vbuf(&new_handle);
+ *handle = new_handle;
+ return;
+ }
+ /* Note that handle will change to an internally maintained one */
+ ia_css_rmgr_refcount_retain_vbuf(handle);
+}
+
+/*
+ * @brief Release a handle to the pool (host, vbuf)
+ *
+ * @param pool The pointer to the pool
+ * @param handle The pointer to the handle
+ */
+void ia_css_rmgr_rel_vbuf(struct ia_css_rmgr_vbuf_pool *pool,
+ struct ia_css_rmgr_vbuf_handle **handle)
+{
+ if ((!pool) || (!handle) || (!*handle)) {
+ IA_CSS_LOG("Invalid inputs");
+ return;
+ }
+ /* release the handle */
+ if ((*handle)->count == 1) {
+ if (!pool->recycle) {
+ /* non recycling pool, free mem */
+ hmm_free((*handle)->vptr);
+ } else {
+ /* recycle to pool */
+ rmgr_push_handle(pool, handle);
+ }
+ }
+ ia_css_rmgr_refcount_release_vbuf(handle);
+ *handle = NULL;
+}
diff --git a/drivers/staging/media/atomisp/pci/runtime/spctrl/interface/ia_css_spctrl.h b/drivers/staging/media/atomisp/pci/runtime/spctrl/interface/ia_css_spctrl.h
new file mode 100644
index 000000000000..3cd69284f4b4
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/runtime/spctrl/interface/ia_css_spctrl.h
@@ -0,0 +1,60 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010 - 2015, Intel Corporation.
+ */
+
+#ifndef __IA_CSS_SPCTRL_H__
+#define __IA_CSS_SPCTRL_H__
+
+#include <system_global.h>
+#include <ia_css_err.h>
+#include "ia_css_spctrl_comm.h"
+
+typedef struct {
+ u32 ddr_data_offset; /** posistion of data in DDR */
+ u32 dmem_data_addr; /** data segment address in dmem */
+ u32 dmem_bss_addr; /** bss segment address in dmem */
+ u32 data_size; /** data segment size */
+ u32 bss_size; /** bss segment size */
+ u32 spctrl_config_dmem_addr; /* <location of dmem_cfg in SP dmem */
+ u32 spctrl_state_dmem_addr; /* < location of state in SP dmem */
+ unsigned int sp_entry; /* < entry function ptr on SP */
+ const void *code; /** location of firmware */
+ u32 code_size;
+ char *program_name; /** not used on hardware, only for simulation */
+} ia_css_spctrl_cfg;
+
+/* Get the code addr in DDR of SP */
+ia_css_ptr get_sp_code_addr(sp_ID_t sp_id);
+
+/* ! Load firmware on to specfied SP
+*/
+int ia_css_spctrl_load_fw(sp_ID_t sp_id,
+ ia_css_spctrl_cfg *spctrl_cfg);
+
+/* ISP2401 */
+/*! Setup registers for reloading FW */
+void sh_css_spctrl_reload_fw(sp_ID_t sp_id);
+
+/*! Unload/release any memory allocated to hold the firmware
+*/
+int ia_css_spctrl_unload_fw(sp_ID_t sp_id);
+
+/*! Intilaize dmem_cfg in SP dmem and start SP program
+*/
+int ia_css_spctrl_start(sp_ID_t sp_id);
+
+/*! stop spctrl
+*/
+int ia_css_spctrl_stop(sp_ID_t sp_id);
+
+/*! Query the state of SP
+*/
+ia_css_spctrl_sp_sw_state ia_css_spctrl_get_state(sp_ID_t sp_id);
+
+/*! Check if SP is idle/ready
+*/
+int ia_css_spctrl_is_idle(sp_ID_t sp_id);
+
+#endif /* __IA_CSS_SPCTRL_H__ */
diff --git a/drivers/staging/media/atomisp/pci/runtime/spctrl/interface/ia_css_spctrl_comm.h b/drivers/staging/media/atomisp/pci/runtime/spctrl/interface/ia_css_spctrl_comm.h
new file mode 100644
index 000000000000..f358785fa5b6
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/runtime/spctrl/interface/ia_css_spctrl_comm.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010 - 2015, Intel Corporation.
+ */
+
+#ifndef __IA_CSS_SPCTRL_COMM_H__
+#define __IA_CSS_SPCTRL_COMM_H__
+
+#include <linux/build_bug.h>
+
+#include <type_support.h>
+
+/* state of SP */
+typedef enum {
+ IA_CSS_SP_SW_TERMINATED = 0,
+ IA_CSS_SP_SW_INITIALIZED,
+ IA_CSS_SP_SW_CONNECTED,
+ IA_CSS_SP_SW_RUNNING
+} ia_css_spctrl_sp_sw_state;
+
+/* Structure to encapsulate required arguments for
+ * initialization of SP DMEM using the SP itself
+ */
+struct ia_css_sp_init_dmem_cfg {
+ ia_css_ptr ddr_data_addr; /** data segment address in ddr */
+ u32 dmem_data_addr; /** data segment address in dmem */
+ u32 dmem_bss_addr; /** bss segment address in dmem */
+ u32 data_size; /** data segment size */
+ u32 bss_size; /** bss segment size */
+ sp_ID_t sp_id; /* <sp Id */
+};
+
+#define SIZE_OF_IA_CSS_SP_INIT_DMEM_CFG_STRUCT \
+ (1 * SIZE_OF_IA_CSS_PTR) + \
+ (4 * sizeof(uint32_t)) + \
+ (1 * sizeof(sp_ID_t))
+
+static_assert(sizeof(struct ia_css_sp_init_dmem_cfg) == SIZE_OF_IA_CSS_SP_INIT_DMEM_CFG_STRUCT);
+
+#endif /* __IA_CSS_SPCTRL_COMM_H__ */
diff --git a/drivers/staging/media/atomisp/pci/runtime/spctrl/src/spctrl.c b/drivers/staging/media/atomisp/pci/runtime/spctrl/src/spctrl.c
new file mode 100644
index 000000000000..78f00f07baaa
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/runtime/spctrl/src/spctrl.c
@@ -0,0 +1,176 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010 - 2015, Intel Corporation.
+ */
+
+#include "hmm.h"
+
+#include "ia_css_types.h"
+#define __INLINE_SP__
+#include "sp.h"
+
+#include "assert_support.h"
+#include "ia_css_spctrl.h"
+#include "ia_css_debug.h"
+
+struct spctrl_context_info {
+ struct ia_css_sp_init_dmem_cfg dmem_config;
+ u32 spctrl_config_dmem_addr; /* location of dmem_cfg in SP dmem */
+ u32 spctrl_state_dmem_addr;
+ unsigned int sp_entry; /* entry function ptr on SP */
+ ia_css_ptr code_addr; /* sp firmware location in host mem-DDR*/
+ u32 code_size;
+ char *program_name; /* used in case of PLATFORM_SIM */
+};
+
+static struct spctrl_context_info spctrl_cofig_info[N_SP_ID];
+static bool spctrl_loaded[N_SP_ID] = {0};
+
+/* Load firmware */
+int ia_css_spctrl_load_fw(sp_ID_t sp_id, ia_css_spctrl_cfg *spctrl_cfg)
+{
+ ia_css_ptr code_addr = mmgr_NULL;
+ struct ia_css_sp_init_dmem_cfg *init_dmem_cfg;
+
+ if ((sp_id >= N_SP_ID) || (!spctrl_cfg))
+ return -EINVAL;
+
+ spctrl_cofig_info[sp_id].code_addr = mmgr_NULL;
+
+ init_dmem_cfg = &spctrl_cofig_info[sp_id].dmem_config;
+ init_dmem_cfg->dmem_data_addr = spctrl_cfg->dmem_data_addr;
+ init_dmem_cfg->dmem_bss_addr = spctrl_cfg->dmem_bss_addr;
+ init_dmem_cfg->data_size = spctrl_cfg->data_size;
+ init_dmem_cfg->bss_size = spctrl_cfg->bss_size;
+ init_dmem_cfg->sp_id = sp_id;
+
+ spctrl_cofig_info[sp_id].spctrl_config_dmem_addr =
+ spctrl_cfg->spctrl_config_dmem_addr;
+ spctrl_cofig_info[sp_id].spctrl_state_dmem_addr =
+ spctrl_cfg->spctrl_state_dmem_addr;
+
+ /* store code (text + icache) and data to DDR
+ *
+ * Data used to be stored separately, because of access alignment constraints,
+ * fix the FW generation instead
+ */
+ code_addr = hmm_alloc(spctrl_cfg->code_size);
+ if (code_addr == mmgr_NULL)
+ return -ENOMEM;
+ hmm_store(code_addr, spctrl_cfg->code, spctrl_cfg->code_size);
+
+ if (sizeof(ia_css_ptr) > sizeof(hrt_data)) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_ERROR,
+ "size of ia_css_ptr can not be greater than hrt_data\n");
+ hmm_free(code_addr);
+ code_addr = mmgr_NULL;
+ return -EINVAL;
+ }
+
+ init_dmem_cfg->ddr_data_addr = code_addr + spctrl_cfg->ddr_data_offset;
+ if ((init_dmem_cfg->ddr_data_addr % HIVE_ISP_DDR_WORD_BYTES) != 0) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_ERROR,
+ "DDR address pointer is not properly aligned for DMA transfer\n");
+ hmm_free(code_addr);
+ code_addr = mmgr_NULL;
+ return -EINVAL;
+ }
+
+ spctrl_cofig_info[sp_id].sp_entry = spctrl_cfg->sp_entry;
+ spctrl_cofig_info[sp_id].code_addr = code_addr;
+ spctrl_cofig_info[sp_id].program_name = spctrl_cfg->program_name;
+
+ /* now we program the base address into the icache and
+ * invalidate the cache.
+ */
+ sp_ctrl_store(sp_id, SP_ICACHE_ADDR_REG,
+ (hrt_data)spctrl_cofig_info[sp_id].code_addr);
+ sp_ctrl_setbit(sp_id, SP_ICACHE_INV_REG, SP_ICACHE_INV_BIT);
+ spctrl_loaded[sp_id] = true;
+ return 0;
+}
+
+/* ISP2401 */
+/* reload pre-loaded FW */
+void sh_css_spctrl_reload_fw(sp_ID_t sp_id)
+{
+ /* now we program the base address into the icache and
+ * invalidate the cache.
+ */
+ sp_ctrl_store(sp_id, SP_ICACHE_ADDR_REG,
+ (hrt_data)spctrl_cofig_info[sp_id].code_addr);
+ sp_ctrl_setbit(sp_id, SP_ICACHE_INV_REG, SP_ICACHE_INV_BIT);
+ spctrl_loaded[sp_id] = true;
+}
+
+ia_css_ptr get_sp_code_addr(sp_ID_t sp_id)
+{
+ return spctrl_cofig_info[sp_id].code_addr;
+}
+
+int ia_css_spctrl_unload_fw(sp_ID_t sp_id)
+{
+ if ((sp_id >= N_SP_ID) || ((sp_id < N_SP_ID) && (!spctrl_loaded[sp_id])))
+ return -EINVAL;
+
+ /* freeup the resource */
+ if (spctrl_cofig_info[sp_id].code_addr) {
+ hmm_free(spctrl_cofig_info[sp_id].code_addr);
+ spctrl_cofig_info[sp_id].code_addr = mmgr_NULL;
+ }
+ spctrl_loaded[sp_id] = false;
+ return 0;
+}
+
+/* Initialize dmem_cfg in SP dmem and start SP program*/
+int ia_css_spctrl_start(sp_ID_t sp_id)
+{
+ if ((sp_id >= N_SP_ID) || ((sp_id < N_SP_ID) && (!spctrl_loaded[sp_id])))
+ return -EINVAL;
+
+ /* Set descr in the SP to initialize the SP DMEM */
+ /*
+ * The FW stores user-space pointers to the FW, the ISP pointer
+ * is only available here
+ *
+ */
+ assert(sizeof(unsigned int) <= sizeof(hrt_data));
+
+ sp_dmem_store(sp_id,
+ spctrl_cofig_info[sp_id].spctrl_config_dmem_addr,
+ &spctrl_cofig_info[sp_id].dmem_config,
+ sizeof(spctrl_cofig_info[sp_id].dmem_config));
+ /* set the start address */
+ sp_ctrl_store(sp_id, SP_START_ADDR_REG,
+ (hrt_data)spctrl_cofig_info[sp_id].sp_entry);
+ sp_ctrl_setbit(sp_id, SP_SC_REG, SP_RUN_BIT);
+ sp_ctrl_setbit(sp_id, SP_SC_REG, SP_START_BIT);
+ return 0;
+}
+
+/* Query the state of SP1 */
+ia_css_spctrl_sp_sw_state ia_css_spctrl_get_state(sp_ID_t sp_id)
+{
+ ia_css_spctrl_sp_sw_state state = 0;
+ unsigned int HIVE_ADDR_sp_sw_state;
+
+ if (sp_id >= N_SP_ID)
+ return IA_CSS_SP_SW_TERMINATED;
+
+ HIVE_ADDR_sp_sw_state = spctrl_cofig_info[sp_id].spctrl_state_dmem_addr;
+ (void)HIVE_ADDR_sp_sw_state; /* Suppres warnings in CRUN */
+ if (sp_id == SP0_ID)
+ state = sp_dmem_load_uint32(sp_id, (unsigned int)sp_address_of(sp_sw_state));
+ return state;
+}
+
+int ia_css_spctrl_is_idle(sp_ID_t sp_id)
+{
+ int state = 0;
+
+ assert(sp_id < N_SP_ID);
+
+ state = sp_ctrl_getbit(sp_id, SP_SC_REG, SP_IDLE_BIT);
+ return state;
+}
diff --git a/drivers/staging/media/atomisp/pci/runtime/tagger/interface/ia_css_tagger_common.h b/drivers/staging/media/atomisp/pci/runtime/tagger/interface/ia_css_tagger_common.h
new file mode 100644
index 000000000000..79006c325de6
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/runtime/tagger/interface/ia_css_tagger_common.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010 - 2015, Intel Corporation.
+ */
+
+#ifndef __IA_CSS_TAGGER_COMMON_H__
+#define __IA_CSS_TAGGER_COMMON_H__
+
+#include <system_local.h>
+#include <type_support.h>
+
+/**
+ * @brief The tagger's circular buffer.
+ *
+ * Should be one less than NUM_CONTINUOUS_FRAMES in sh_css_internal.h
+ */
+#define MAX_CB_ELEMS_FOR_TAGGER 14
+
+/**
+ * @brief Data structure for the tagger buffer element.
+ */
+typedef struct {
+ u32 frame; /* the frame value stored in the element */
+ u32 param; /* the param value stored in the element */
+ u8 mark; /* the mark on the element */
+ u8 lock; /* the lock on the element */
+ u8 exp_id; /* exp_id of frame, for debugging only */
+} ia_css_tagger_buf_sp_elem_t;
+
+#endif /* __IA_CSS_TAGGER_COMMON_H__ */
diff --git a/drivers/staging/media/atomisp/pci/runtime/timer/src/timer.c b/drivers/staging/media/atomisp/pci/runtime/timer/src/timer.c
new file mode 100644
index 000000000000..391308e563cb
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/runtime/timer/src/timer.c
@@ -0,0 +1,20 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#include <type_support.h> /* for uint32_t */
+#include "ia_css_timer.h" /*struct ia_css_clock_tick */
+#include "sh_css_legacy.h" /* IA_CSS_PIPE_ID_NUM*/
+#include "gp_timer.h" /*gp_timer_read()*/
+#include "assert_support.h"
+
+int ia_css_timer_get_current_tick(struct ia_css_clock_tick *curr_ts)
+{
+ assert(curr_ts);
+ if (!curr_ts)
+ return -EINVAL;
+ curr_ts->ticks = (clock_value_t)gp_timer_read(GP_TIMER_SEL);
+ return 0;
+}
diff --git a/drivers/staging/media/atomisp/pci/scalar_processor_2400_params.h b/drivers/staging/media/atomisp/pci/scalar_processor_2400_params.h
new file mode 100644
index 000000000000..e31da83d3106
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/scalar_processor_2400_params.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef _scalar_processor_2400_params_h
+#define _scalar_processor_2400_params_h
+
+#include "cell_params.h"
+
+#endif /* _scalar_processor_2400_params_h */
diff --git a/drivers/staging/media/atomisp/pci/sh_css.c b/drivers/staging/media/atomisp/pci/sh_css.c
new file mode 100644
index 000000000000..73bd87f43a8c
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/sh_css.c
@@ -0,0 +1,9063 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+/*! \file */
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+
+#include "hmm.h"
+
+#include "atomisp_internal.h"
+
+#include "ia_css.h"
+#include "sh_css_hrt.h" /* only for file 2 MIPI */
+#include "ia_css_buffer.h"
+#include "ia_css_binary.h"
+#include "sh_css_internal.h"
+#include "sh_css_mipi.h"
+#include "sh_css_sp.h" /* sh_css_sp_group */
+#include "ia_css_isys.h"
+#include "ia_css_frame.h"
+#include "sh_css_defs.h"
+#include "sh_css_firmware.h"
+#include "sh_css_params.h"
+#include "sh_css_params_internal.h"
+#include "sh_css_param_shading.h"
+#include "ia_css_refcount.h"
+#include "ia_css_rmgr.h"
+#include "ia_css_debug.h"
+#include "ia_css_debug_pipe.h"
+#include "ia_css_device_access.h"
+#include "device_access.h"
+#include "sh_css_legacy.h"
+#include "ia_css_pipeline.h"
+#include "ia_css_stream.h"
+#include "sh_css_stream_format.h"
+#include "ia_css_pipe.h"
+#include "ia_css_util.h"
+#include "ia_css_pipe_util.h"
+#include "ia_css_pipe_binarydesc.h"
+#include "ia_css_pipe_stagedesc.h"
+
+#include "tag.h"
+#include "assert_support.h"
+#include "math_support.h"
+#include "sw_event_global.h" /* Event IDs.*/
+#include "ia_css_ifmtr.h"
+#include "input_system.h"
+#include "mmu_device.h" /* mmu_set_page_table_base_index(), ... */
+#include "ia_css_mmu_private.h" /* sh_css_mmu_set_page_table_base_index() */
+#include "gdc_device.h" /* HRT_GDC_N */
+#include "dma.h" /* dma_set_max_burst_size() */
+#include "irq.h" /* virq */
+#include "sp.h" /* cnd_sp_irq_enable() */
+#include "isp.h" /* cnd_isp_irq_enable, ISP_VEC_NELEMS */
+#include "gp_device.h" /* gp_device_reg_store() */
+#include <gpio_global.h>
+#include <gpio_private.h>
+#include "timed_ctrl.h"
+#include "ia_css_inputfifo.h"
+#define WITH_PC_MONITORING 0
+
+#define SH_CSS_VIDEO_BUFFER_ALIGNMENT 0
+
+
+#include "ia_css_spctrl.h"
+#include "ia_css_version_data.h"
+#include "sh_css_struct.h"
+#include "ia_css_bufq.h"
+#include "ia_css_timer.h" /* clock_value_t */
+
+#include "isp/modes/interface/input_buf.isp.h"
+
+/* Name of the sp program: should not be built-in */
+#define SP_PROG_NAME "sp"
+/* Size of Refcount List */
+#define REFCOUNT_SIZE 1000
+
+/*
+ * for JPEG, we don't know the length of the image upfront,
+ * but since we support sensor up to 16MP, we take this as
+ * upper limit.
+ */
+#define JPEG_BYTES (16 * 1024 * 1024)
+
+struct sh_css my_css;
+
+int __printf(1, 0) (*sh_css_printf)(const char *fmt, va_list args) = NULL;
+
+/*
+ * modes of work: stream_create and stream_destroy will update the save/restore
+ * data only when in working mode, not suspend/resume
+ */
+enum ia_sh_css_modes {
+ sh_css_mode_none = 0,
+ sh_css_mode_working,
+ sh_css_mode_suspend,
+ sh_css_mode_resume
+};
+
+/**
+ * struct sh_css_stream_seed - a stream seed, to save and restore the
+ * stream data.
+ *
+ * @orig_stream: pointer to restore the original handle
+ * @stream: handle, used as ID too.
+ * @stream_config: stream config struct
+ * @num_pipes: number of pipes
+ * @pipes: pipe handles
+ * @orig_pipes: pointer to restore original handle
+ * @pipe_config: pipe config structs
+ *
+ * the stream seed contains all the data required to "grow" the seed again
+ * after it was closed.
+*/
+struct sh_css_stream_seed {
+ struct ia_css_stream **orig_stream;
+ struct ia_css_stream *stream;
+ struct ia_css_stream_config stream_config;
+ int num_pipes;
+ struct ia_css_pipe *pipes[IA_CSS_PIPE_ID_NUM];
+ struct ia_css_pipe **orig_pipes[IA_CSS_PIPE_ID_NUM];
+ struct ia_css_pipe_config pipe_config[IA_CSS_PIPE_ID_NUM];
+};
+
+#define MAX_ACTIVE_STREAMS 5
+/*
+ * A global struct for save/restore to hold all the data that should
+ * sustain power-down: MMU base, IRQ type, env for routines, binary loaded FW
+ * and the stream seeds.
+ */
+struct sh_css_save {
+ enum ia_sh_css_modes mode;
+ u32 mmu_base; /* the last mmu_base */
+ enum ia_css_irq_type irq_type;
+ struct sh_css_stream_seed stream_seeds[MAX_ACTIVE_STREAMS];
+ struct ia_css_fw *loaded_fw; /* fw struct previously loaded */
+ struct ia_css_env driver_env; /* driver-supplied env copy */
+};
+
+static bool my_css_save_initialized; /* if my_css_save was initialized */
+static struct sh_css_save my_css_save;
+
+/*
+ * pqiao NOTICE: this is for css internal buffer recycling when stopping
+ * pipeline,
+ * this array is temporary and will be replaced by resource manager
+ */
+
+/* Taking the biggest Size for number of Elements */
+#define MAX_HMM_BUFFER_NUM \
+ (SH_CSS_MAX_NUM_QUEUES * (IA_CSS_NUM_ELEMS_SP2HOST_BUFFER_QUEUE + 2))
+
+struct sh_css_hmm_buffer_record {
+ bool in_use;
+ enum ia_css_buffer_type type;
+ struct ia_css_rmgr_vbuf_handle *h_vbuf;
+ hrt_address kernel_ptr;
+};
+
+static struct sh_css_hmm_buffer_record hmm_buffer_record[MAX_HMM_BUFFER_NUM];
+
+#define GPIO_FLASH_PIN_MASK BIT(HIVE_GPIO_STROBE_TRIGGER_PIN)
+
+/*
+ * Local prototypes
+ */
+
+static int
+allocate_delay_frames(struct ia_css_pipe *pipe);
+
+static int
+sh_css_pipe_start(struct ia_css_stream *stream);
+
+/*
+ * @brief Check if all "ia_css_pipe" instances in the target
+ * "ia_css_stream" instance have stopped.
+ *
+ * @param[in] stream Point to the target "ia_css_stream" instance.
+ *
+ * @return
+ * - true, if all "ia_css_pipe" instances in the target "ia_css_stream"
+ * instance have ben stopped.
+ * - false, otherwise.
+ */
+
+/* ISP 2401 */
+static int
+ia_css_pipe_check_format(struct ia_css_pipe *pipe,
+ enum ia_css_frame_format format);
+
+/* ISP 2401 */
+static void
+ia_css_reset_defaults(struct sh_css *css);
+
+static void
+sh_css_init_host_sp_control_vars(void);
+
+static int
+set_num_primary_stages(unsigned int *num, enum ia_css_pipe_version version);
+
+static bool
+need_capture_pp(const struct ia_css_pipe *pipe);
+
+static bool
+need_yuv_scaler_stage(const struct ia_css_pipe *pipe);
+
+static int ia_css_pipe_create_cas_scaler_desc_single_output(
+ struct ia_css_frame_info *cas_scaler_in_info,
+ struct ia_css_frame_info *cas_scaler_out_info,
+ struct ia_css_frame_info *cas_scaler_vf_info,
+ struct ia_css_cas_binary_descr *descr);
+
+static void ia_css_pipe_destroy_cas_scaler_desc(struct ia_css_cas_binary_descr
+ *descr);
+
+static bool
+need_downscaling(const struct ia_css_resolution in_res,
+ const struct ia_css_resolution out_res);
+
+static bool need_capt_ldc(const struct ia_css_pipe *pipe);
+
+static int
+sh_css_pipe_load_binaries(struct ia_css_pipe *pipe);
+
+static
+int sh_css_pipe_get_viewfinder_frame_info(
+ struct ia_css_pipe *pipe,
+ struct ia_css_frame_info *info,
+ unsigned int idx);
+
+static int
+sh_css_pipe_get_output_frame_info(struct ia_css_pipe *pipe,
+ struct ia_css_frame_info *info,
+ unsigned int idx);
+
+static int
+capture_start(struct ia_css_pipe *pipe);
+
+static int
+video_start(struct ia_css_pipe *pipe);
+
+static int
+preview_start(struct ia_css_pipe *pipe);
+
+static int
+yuvpp_start(struct ia_css_pipe *pipe);
+
+static bool copy_on_sp(struct ia_css_pipe *pipe);
+
+static int
+init_vf_frameinfo_defaults(struct ia_css_pipe *pipe,
+ struct ia_css_frame *vf_frame, unsigned int idx);
+
+static int
+init_in_frameinfo_memory_defaults(struct ia_css_pipe *pipe,
+ struct ia_css_frame *frame, enum ia_css_frame_format format);
+
+static int
+init_out_frameinfo_defaults(struct ia_css_pipe *pipe,
+ struct ia_css_frame *out_frame, unsigned int idx);
+
+static int
+alloc_continuous_frames(struct ia_css_pipe *pipe, bool init_time);
+
+static void
+pipe_global_init(void);
+
+static int
+pipe_generate_pipe_num(const struct ia_css_pipe *pipe,
+ unsigned int *pipe_number);
+
+static void
+pipe_release_pipe_num(unsigned int pipe_num);
+
+static int
+create_host_pipeline_structure(struct ia_css_stream *stream);
+
+static int
+create_host_pipeline(struct ia_css_stream *stream);
+
+static int
+create_host_preview_pipeline(struct ia_css_pipe *pipe);
+
+static int
+create_host_video_pipeline(struct ia_css_pipe *pipe);
+
+static int
+create_host_copy_pipeline(struct ia_css_pipe *pipe,
+ unsigned int max_input_width,
+ struct ia_css_frame *out_frame);
+
+static int
+create_host_isyscopy_capture_pipeline(struct ia_css_pipe *pipe);
+
+static int
+create_host_capture_pipeline(struct ia_css_pipe *pipe);
+
+static int
+create_host_yuvpp_pipeline(struct ia_css_pipe *pipe);
+
+static unsigned int
+sh_css_get_sw_interrupt_value(unsigned int irq);
+
+static struct ia_css_binary *ia_css_pipe_get_shading_correction_binary(
+ const struct ia_css_pipe *pipe);
+
+static struct ia_css_binary *
+ia_css_pipe_get_s3a_binary(const struct ia_css_pipe *pipe);
+
+static struct ia_css_binary *
+ia_css_pipe_get_sdis_binary(const struct ia_css_pipe *pipe);
+
+static void
+sh_css_hmm_buffer_record_init(void);
+
+static void
+sh_css_hmm_buffer_record_uninit(void);
+
+static void
+sh_css_hmm_buffer_record_reset(struct sh_css_hmm_buffer_record *buffer_record);
+
+static struct sh_css_hmm_buffer_record
+*sh_css_hmm_buffer_record_acquire(struct ia_css_rmgr_vbuf_handle *h_vbuf,
+ enum ia_css_buffer_type type,
+ hrt_address kernel_ptr);
+
+static struct sh_css_hmm_buffer_record
+*sh_css_hmm_buffer_record_validate(ia_css_ptr ddr_buffer_addr,
+ enum ia_css_buffer_type type);
+
+static unsigned int get_crop_lines_for_bayer_order(const struct
+ ia_css_stream_config *config);
+static unsigned int get_crop_columns_for_bayer_order(const struct
+ ia_css_stream_config *config);
+static void get_pipe_extra_pixel(struct ia_css_pipe *pipe,
+ unsigned int *extra_row, unsigned int *extra_column);
+
+static void
+sh_css_pipe_free_shading_table(struct ia_css_pipe *pipe)
+{
+ if (!pipe) {
+ IA_CSS_ERROR("NULL input parameter");
+ return;
+ }
+
+ if (pipe->shading_table)
+ ia_css_shading_table_free(pipe->shading_table);
+ pipe->shading_table = NULL;
+}
+
+static enum ia_css_frame_format yuv420_copy_formats[] = {
+ IA_CSS_FRAME_FORMAT_NV12,
+ IA_CSS_FRAME_FORMAT_NV21,
+ IA_CSS_FRAME_FORMAT_YV12,
+ IA_CSS_FRAME_FORMAT_YUV420,
+ IA_CSS_FRAME_FORMAT_YUV420_16,
+ IA_CSS_FRAME_FORMAT_CSI_MIPI_YUV420_8,
+ IA_CSS_FRAME_FORMAT_CSI_MIPI_LEGACY_YUV420_8
+};
+
+static enum ia_css_frame_format yuv422_copy_formats[] = {
+ IA_CSS_FRAME_FORMAT_NV12,
+ IA_CSS_FRAME_FORMAT_NV16,
+ IA_CSS_FRAME_FORMAT_NV21,
+ IA_CSS_FRAME_FORMAT_NV61,
+ IA_CSS_FRAME_FORMAT_YV12,
+ IA_CSS_FRAME_FORMAT_YV16,
+ IA_CSS_FRAME_FORMAT_YUV420,
+ IA_CSS_FRAME_FORMAT_YUV420_16,
+ IA_CSS_FRAME_FORMAT_YUV422,
+ IA_CSS_FRAME_FORMAT_YUV422_16,
+ IA_CSS_FRAME_FORMAT_UYVY,
+ IA_CSS_FRAME_FORMAT_YUYV
+};
+
+/*
+ * Verify whether the selected output format is can be produced
+ * by the copy binary given the stream format.
+ */
+static int
+verify_copy_out_frame_format(struct ia_css_pipe *pipe)
+{
+ enum ia_css_frame_format out_fmt = pipe->output_info[0].format;
+ unsigned int i, found = 0;
+
+ assert(pipe);
+ assert(pipe->stream);
+
+ switch (pipe->stream->config.input_config.format) {
+ case ATOMISP_INPUT_FORMAT_YUV420_8_LEGACY:
+ case ATOMISP_INPUT_FORMAT_YUV420_8:
+ for (i = 0; i < ARRAY_SIZE(yuv420_copy_formats) && !found; i++)
+ found = (out_fmt == yuv420_copy_formats[i]);
+ break;
+ case ATOMISP_INPUT_FORMAT_YUV420_10:
+ case ATOMISP_INPUT_FORMAT_YUV420_16:
+ found = (out_fmt == IA_CSS_FRAME_FORMAT_YUV420_16);
+ break;
+ case ATOMISP_INPUT_FORMAT_YUV422_8:
+ for (i = 0; i < ARRAY_SIZE(yuv422_copy_formats) && !found; i++)
+ found = (out_fmt == yuv422_copy_formats[i]);
+ break;
+ case ATOMISP_INPUT_FORMAT_YUV422_10:
+ case ATOMISP_INPUT_FORMAT_YUV422_16:
+ found = (out_fmt == IA_CSS_FRAME_FORMAT_YUV422_16 ||
+ out_fmt == IA_CSS_FRAME_FORMAT_YUV420_16);
+ break;
+ case ATOMISP_INPUT_FORMAT_RGB_444:
+ case ATOMISP_INPUT_FORMAT_RGB_555:
+ case ATOMISP_INPUT_FORMAT_RGB_565:
+ found = (out_fmt == IA_CSS_FRAME_FORMAT_RGBA888 ||
+ out_fmt == IA_CSS_FRAME_FORMAT_RGB565);
+ break;
+ case ATOMISP_INPUT_FORMAT_RGB_666:
+ case ATOMISP_INPUT_FORMAT_RGB_888:
+ found = (out_fmt == IA_CSS_FRAME_FORMAT_RGBA888 ||
+ out_fmt == IA_CSS_FRAME_FORMAT_YUV420);
+ break;
+ case ATOMISP_INPUT_FORMAT_RAW_6:
+ case ATOMISP_INPUT_FORMAT_RAW_7:
+ case ATOMISP_INPUT_FORMAT_RAW_8:
+ case ATOMISP_INPUT_FORMAT_RAW_10:
+ case ATOMISP_INPUT_FORMAT_RAW_12:
+ case ATOMISP_INPUT_FORMAT_RAW_14:
+ case ATOMISP_INPUT_FORMAT_RAW_16:
+ found = (out_fmt == IA_CSS_FRAME_FORMAT_RAW) ||
+ (out_fmt == IA_CSS_FRAME_FORMAT_RAW_PACKED);
+ break;
+ case ATOMISP_INPUT_FORMAT_BINARY_8:
+ found = (out_fmt == IA_CSS_FRAME_FORMAT_BINARY_8);
+ break;
+ default:
+ break;
+ }
+ if (!found)
+ return -EINVAL;
+ return 0;
+}
+
+unsigned int
+ia_css_stream_input_format_bits_per_pixel(struct ia_css_stream *stream)
+{
+ int bpp = 0;
+
+ if (stream)
+ bpp = ia_css_util_input_format_bpp(stream->config.input_config.format,
+ stream->config.pixels_per_clock == 2);
+
+ return bpp;
+}
+
+static int
+sh_css_config_input_network_2400(struct ia_css_stream *stream)
+{
+ unsigned int fmt_type;
+ struct ia_css_pipe *pipe = stream->last_pipe;
+ struct ia_css_binary *binary = NULL;
+ int err = 0;
+
+ assert(stream);
+ assert(pipe);
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "sh_css_config_input_network() enter:\n");
+
+ if (pipe->pipeline.stages)
+ binary = pipe->pipeline.stages->binary;
+
+ err = ia_css_isys_convert_stream_format_to_mipi_format(
+ stream->config.input_config.format,
+ stream->csi_rx_config.comp,
+ &fmt_type);
+ if (err)
+ return err;
+ sh_css_sp_program_input_circuit(fmt_type,
+ stream->config.channel_id,
+ stream->config.mode);
+
+ if ((binary && (binary->online || stream->config.continuous)) ||
+ pipe->config.mode == IA_CSS_PIPE_MODE_COPY) {
+ err = ia_css_ifmtr_configure(&stream->config,
+ binary);
+ if (err)
+ return err;
+ }
+
+ if (stream->config.mode == IA_CSS_INPUT_MODE_PRBS) {
+ unsigned int width, height, vblank_cycles;
+ const unsigned int hblank_cycles = 100;
+ const unsigned int vblank_lines = 6;
+
+ width = (stream->config.input_config.input_res.width) /
+ (1 + (stream->config.pixels_per_clock == 2));
+ height = stream->config.input_config.input_res.height;
+ vblank_cycles = vblank_lines * (width + hblank_cycles);
+ sh_css_sp_configure_sync_gen(width, height, hblank_cycles, vblank_cycles);
+ }
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "sh_css_config_input_network() leave:\n");
+ return 0;
+}
+
+static unsigned int csi2_protocol_calculate_max_subpixels_per_line(
+ enum atomisp_input_format format,
+ unsigned int pixels_per_line)
+{
+ unsigned int rval;
+
+ switch (format) {
+ case ATOMISP_INPUT_FORMAT_YUV420_8_LEGACY:
+ /*
+ * The frame format layout is shown below.
+ *
+ * Line 0: UYY0 UYY0 ... UYY0
+ * Line 1: VYY0 VYY0 ... VYY0
+ * Line 2: UYY0 UYY0 ... UYY0
+ * Line 3: VYY0 VYY0 ... VYY0
+ * ...
+ * Line (n-2): UYY0 UYY0 ... UYY0
+ * Line (n-1): VYY0 VYY0 ... VYY0
+ *
+ * In this frame format, the even-line is
+ * as wide as the odd-line.
+ * The 0 is introduced by the input system
+ * (mipi backend).
+ */
+ rval = pixels_per_line * 2;
+ break;
+ case ATOMISP_INPUT_FORMAT_YUV420_8:
+ case ATOMISP_INPUT_FORMAT_YUV420_10:
+ case ATOMISP_INPUT_FORMAT_YUV420_16:
+ /*
+ * The frame format layout is shown below.
+ *
+ * Line 0: YYYY YYYY ... YYYY
+ * Line 1: UYVY UYVY ... UYVY UYVY
+ * Line 2: YYYY YYYY ... YYYY
+ * Line 3: UYVY UYVY ... UYVY UYVY
+ * ...
+ * Line (n-2): YYYY YYYY ... YYYY
+ * Line (n-1): UYVY UYVY ... UYVY UYVY
+ *
+ * In this frame format, the odd-line is twice
+ * wider than the even-line.
+ */
+ rval = pixels_per_line * 2;
+ break;
+ case ATOMISP_INPUT_FORMAT_YUV422_8:
+ case ATOMISP_INPUT_FORMAT_YUV422_10:
+ case ATOMISP_INPUT_FORMAT_YUV422_16:
+ /*
+ * The frame format layout is shown below.
+ *
+ * Line 0: UYVY UYVY ... UYVY
+ * Line 1: UYVY UYVY ... UYVY
+ * Line 2: UYVY UYVY ... UYVY
+ * Line 3: UYVY UYVY ... UYVY
+ * ...
+ * Line (n-2): UYVY UYVY ... UYVY
+ * Line (n-1): UYVY UYVY ... UYVY
+ *
+ * In this frame format, the even-line is
+ * as wide as the odd-line.
+ */
+ rval = pixels_per_line * 2;
+ break;
+ case ATOMISP_INPUT_FORMAT_RGB_444:
+ case ATOMISP_INPUT_FORMAT_RGB_555:
+ case ATOMISP_INPUT_FORMAT_RGB_565:
+ case ATOMISP_INPUT_FORMAT_RGB_666:
+ case ATOMISP_INPUT_FORMAT_RGB_888:
+ /*
+ * The frame format layout is shown below.
+ *
+ * Line 0: ABGR ABGR ... ABGR
+ * Line 1: ABGR ABGR ... ABGR
+ * Line 2: ABGR ABGR ... ABGR
+ * Line 3: ABGR ABGR ... ABGR
+ * ...
+ * Line (n-2): ABGR ABGR ... ABGR
+ * Line (n-1): ABGR ABGR ... ABGR
+ *
+ * In this frame format, the even-line is
+ * as wide as the odd-line.
+ */
+ rval = pixels_per_line * 4;
+ break;
+ case ATOMISP_INPUT_FORMAT_RAW_6:
+ case ATOMISP_INPUT_FORMAT_RAW_7:
+ case ATOMISP_INPUT_FORMAT_RAW_8:
+ case ATOMISP_INPUT_FORMAT_RAW_10:
+ case ATOMISP_INPUT_FORMAT_RAW_12:
+ case ATOMISP_INPUT_FORMAT_RAW_14:
+ case ATOMISP_INPUT_FORMAT_RAW_16:
+ case ATOMISP_INPUT_FORMAT_BINARY_8:
+ case ATOMISP_INPUT_FORMAT_USER_DEF1:
+ case ATOMISP_INPUT_FORMAT_USER_DEF2:
+ case ATOMISP_INPUT_FORMAT_USER_DEF3:
+ case ATOMISP_INPUT_FORMAT_USER_DEF4:
+ case ATOMISP_INPUT_FORMAT_USER_DEF5:
+ case ATOMISP_INPUT_FORMAT_USER_DEF6:
+ case ATOMISP_INPUT_FORMAT_USER_DEF7:
+ case ATOMISP_INPUT_FORMAT_USER_DEF8:
+ /*
+ * The frame format layout is shown below.
+ *
+ * Line 0: Pixel ... Pixel
+ * Line 1: Pixel ... Pixel
+ * Line 2: Pixel ... Pixel
+ * Line 3: Pixel ... Pixel
+ * ...
+ * Line (n-2): Pixel ... Pixel
+ * Line (n-1): Pixel ... Pixel
+ *
+ * In this frame format, the even-line is
+ * as wide as the odd-line.
+ */
+ rval = pixels_per_line;
+ break;
+ default:
+ rval = 0;
+ break;
+ }
+
+ return rval;
+}
+
+static bool sh_css_translate_stream_cfg_to_input_system_input_port_id(
+ struct ia_css_stream_config *stream_cfg,
+ ia_css_isys_descr_t *isys_stream_descr)
+{
+ bool rc;
+
+ rc = true;
+ switch (stream_cfg->mode) {
+ case IA_CSS_INPUT_MODE_PRBS:
+
+ if (stream_cfg->source.prbs.id == IA_CSS_PRBS_ID0)
+ isys_stream_descr->input_port_id = INPUT_SYSTEM_PIXELGEN_PORT0_ID;
+ else if (stream_cfg->source.prbs.id == IA_CSS_PRBS_ID1)
+ isys_stream_descr->input_port_id = INPUT_SYSTEM_PIXELGEN_PORT1_ID;
+ else if (stream_cfg->source.prbs.id == IA_CSS_PRBS_ID2)
+ isys_stream_descr->input_port_id = INPUT_SYSTEM_PIXELGEN_PORT2_ID;
+
+ break;
+ case IA_CSS_INPUT_MODE_BUFFERED_SENSOR:
+
+ if (stream_cfg->source.port.port == MIPI_PORT0_ID)
+ isys_stream_descr->input_port_id = INPUT_SYSTEM_CSI_PORT0_ID;
+ else if (stream_cfg->source.port.port == MIPI_PORT1_ID)
+ isys_stream_descr->input_port_id = INPUT_SYSTEM_CSI_PORT1_ID;
+ else if (stream_cfg->source.port.port == MIPI_PORT2_ID)
+ isys_stream_descr->input_port_id = INPUT_SYSTEM_CSI_PORT2_ID;
+
+ break;
+ default:
+ rc = false;
+ break;
+ }
+
+ return rc;
+}
+
+static bool sh_css_translate_stream_cfg_to_input_system_input_port_type(
+ struct ia_css_stream_config *stream_cfg,
+ ia_css_isys_descr_t *isys_stream_descr)
+{
+ bool rc;
+
+ rc = true;
+ switch (stream_cfg->mode) {
+ case IA_CSS_INPUT_MODE_PRBS:
+
+ isys_stream_descr->mode = INPUT_SYSTEM_SOURCE_TYPE_PRBS;
+
+ break;
+ case IA_CSS_INPUT_MODE_SENSOR:
+ case IA_CSS_INPUT_MODE_BUFFERED_SENSOR:
+
+ isys_stream_descr->mode = INPUT_SYSTEM_SOURCE_TYPE_SENSOR;
+ break;
+
+ default:
+ rc = false;
+ break;
+ }
+
+ return rc;
+}
+
+static bool sh_css_translate_stream_cfg_to_input_system_input_port_attr(
+ struct ia_css_stream_config *stream_cfg,
+ ia_css_isys_descr_t *isys_stream_descr,
+ int isys_stream_idx)
+{
+ bool rc;
+
+ rc = true;
+ switch (stream_cfg->mode) {
+ case IA_CSS_INPUT_MODE_PRBS:
+
+ isys_stream_descr->prbs_port_attr.seed0 = stream_cfg->source.prbs.seed;
+ isys_stream_descr->prbs_port_attr.seed1 = stream_cfg->source.prbs.seed1;
+
+ /*
+ * TODO
+ * - Make "sync_gen_cfg" as part of "ia_css_prbs_config".
+ */
+ isys_stream_descr->prbs_port_attr.sync_gen_cfg.hblank_cycles = 100;
+ isys_stream_descr->prbs_port_attr.sync_gen_cfg.vblank_cycles = 100;
+ isys_stream_descr->prbs_port_attr.sync_gen_cfg.pixels_per_clock =
+ stream_cfg->pixels_per_clock;
+ isys_stream_descr->prbs_port_attr.sync_gen_cfg.nr_of_frames = (uint32_t)~(0x0);
+ isys_stream_descr->prbs_port_attr.sync_gen_cfg.pixels_per_line =
+ stream_cfg->isys_config[IA_CSS_STREAM_DEFAULT_ISYS_STREAM_IDX].input_res.width;
+ isys_stream_descr->prbs_port_attr.sync_gen_cfg.lines_per_frame =
+ stream_cfg->isys_config[IA_CSS_STREAM_DEFAULT_ISYS_STREAM_IDX].input_res.height;
+
+ break;
+ case IA_CSS_INPUT_MODE_BUFFERED_SENSOR: {
+ int err;
+ unsigned int fmt_type;
+
+ err = ia_css_isys_convert_stream_format_to_mipi_format(
+ stream_cfg->isys_config[isys_stream_idx].format,
+ MIPI_PREDICTOR_NONE,
+ &fmt_type);
+ if (err)
+ rc = false;
+
+ isys_stream_descr->csi_port_attr.active_lanes =
+ stream_cfg->source.port.num_lanes;
+ isys_stream_descr->csi_port_attr.fmt_type = fmt_type;
+ isys_stream_descr->csi_port_attr.ch_id = stream_cfg->channel_id;
+
+ if (IS_ISP2401)
+ isys_stream_descr->online = stream_cfg->online;
+
+ err |= ia_css_isys_convert_compressed_format(
+ &stream_cfg->source.port.compression,
+ isys_stream_descr);
+ if (err)
+ rc = false;
+
+ /* metadata */
+ isys_stream_descr->metadata.enable = false;
+ if (stream_cfg->metadata_config.resolution.height > 0) {
+ err = ia_css_isys_convert_stream_format_to_mipi_format(
+ stream_cfg->metadata_config.data_type,
+ MIPI_PREDICTOR_NONE,
+ &fmt_type);
+ if (err)
+ rc = false;
+ isys_stream_descr->metadata.fmt_type = fmt_type;
+ isys_stream_descr->metadata.bits_per_pixel =
+ ia_css_util_input_format_bpp(stream_cfg->metadata_config.data_type, true);
+ isys_stream_descr->metadata.pixels_per_line =
+ stream_cfg->metadata_config.resolution.width;
+ isys_stream_descr->metadata.lines_per_frame =
+ stream_cfg->metadata_config.resolution.height;
+
+ /*
+ * For new input system, number of str2mmio requests must be even.
+ * So we round up number of metadata lines to be even.
+ */
+ if (IS_ISP2401 && isys_stream_descr->metadata.lines_per_frame > 0)
+ isys_stream_descr->metadata.lines_per_frame +=
+ (isys_stream_descr->metadata.lines_per_frame & 1);
+
+ isys_stream_descr->metadata.align_req_in_bytes =
+ ia_css_csi2_calculate_input_system_alignment(
+ stream_cfg->metadata_config.data_type);
+ isys_stream_descr->metadata.enable = true;
+ }
+
+ break;
+ }
+ default:
+ rc = false;
+ break;
+ }
+
+ return rc;
+}
+
+static bool sh_css_translate_stream_cfg_to_input_system_input_port_resolution(
+ struct ia_css_stream_config *stream_cfg,
+ ia_css_isys_descr_t *isys_stream_descr,
+ int isys_stream_idx)
+{
+ unsigned int bits_per_subpixel;
+ unsigned int max_subpixels_per_line;
+ unsigned int lines_per_frame;
+ unsigned int align_req_in_bytes;
+ enum atomisp_input_format fmt_type;
+
+ fmt_type = stream_cfg->isys_config[isys_stream_idx].format;
+ if ((stream_cfg->mode == IA_CSS_INPUT_MODE_SENSOR ||
+ stream_cfg->mode == IA_CSS_INPUT_MODE_BUFFERED_SENSOR) &&
+ stream_cfg->source.port.compression.type != IA_CSS_CSI2_COMPRESSION_TYPE_NONE) {
+ if (stream_cfg->source.port.compression.uncompressed_bits_per_pixel ==
+ UNCOMPRESSED_BITS_PER_PIXEL_10)
+ fmt_type = ATOMISP_INPUT_FORMAT_RAW_10;
+ else if (stream_cfg->source.port.compression.uncompressed_bits_per_pixel ==
+ UNCOMPRESSED_BITS_PER_PIXEL_12)
+ fmt_type = ATOMISP_INPUT_FORMAT_RAW_12;
+ else
+ return false;
+ }
+
+ bits_per_subpixel =
+ sh_css_stream_format_2_bits_per_subpixel(fmt_type);
+ if (bits_per_subpixel == 0)
+ return false;
+
+ max_subpixels_per_line =
+ csi2_protocol_calculate_max_subpixels_per_line(fmt_type,
+ stream_cfg->isys_config[isys_stream_idx].input_res.width);
+ if (max_subpixels_per_line == 0)
+ return false;
+
+ lines_per_frame = stream_cfg->isys_config[isys_stream_idx].input_res.height;
+ if (lines_per_frame == 0)
+ return false;
+
+ align_req_in_bytes = ia_css_csi2_calculate_input_system_alignment(fmt_type);
+
+ /* HW needs subpixel info for their settings */
+ isys_stream_descr->input_port_resolution.bits_per_pixel = bits_per_subpixel;
+ isys_stream_descr->input_port_resolution.pixels_per_line =
+ max_subpixels_per_line;
+ isys_stream_descr->input_port_resolution.lines_per_frame = lines_per_frame;
+ isys_stream_descr->input_port_resolution.align_req_in_bytes =
+ align_req_in_bytes;
+
+ return true;
+}
+
+static bool sh_css_translate_stream_cfg_to_isys_stream_descr(
+ struct ia_css_stream_config *stream_cfg,
+ bool early_polling,
+ ia_css_isys_descr_t *isys_stream_descr,
+ int isys_stream_idx)
+{
+ bool rc;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "sh_css_translate_stream_cfg_to_isys_stream_descr() enter:\n");
+ rc = sh_css_translate_stream_cfg_to_input_system_input_port_id(stream_cfg,
+ isys_stream_descr);
+ rc &= sh_css_translate_stream_cfg_to_input_system_input_port_type(stream_cfg,
+ isys_stream_descr);
+ rc &= sh_css_translate_stream_cfg_to_input_system_input_port_attr(stream_cfg,
+ isys_stream_descr, isys_stream_idx);
+ rc &= sh_css_translate_stream_cfg_to_input_system_input_port_resolution(
+ stream_cfg, isys_stream_descr, isys_stream_idx);
+
+ isys_stream_descr->raw_packed = stream_cfg->pack_raw_pixels;
+ isys_stream_descr->linked_isys_stream_id = (int8_t)
+ stream_cfg->isys_config[isys_stream_idx].linked_isys_stream_id;
+
+ if (IS_ISP2401)
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "sh_css_translate_stream_cfg_to_isys_stream_descr() leave:\n");
+
+ return rc;
+}
+
+static bool sh_css_translate_binary_info_to_input_system_output_port_attr(
+ struct ia_css_binary *binary,
+ ia_css_isys_descr_t *isys_stream_descr)
+{
+ if (!binary)
+ return false;
+
+ isys_stream_descr->output_port_attr.left_padding = binary->left_padding;
+ isys_stream_descr->output_port_attr.max_isp_input_width =
+ binary->info->sp.input.max_width;
+
+ return true;
+}
+
+static int
+sh_css_config_input_network_2401(struct ia_css_stream *stream)
+{
+ bool rc;
+ ia_css_isys_descr_t isys_stream_descr;
+ unsigned int sp_thread_id;
+ struct sh_css_sp_pipeline_terminal *sp_pipeline_input_terminal;
+ struct ia_css_pipe *pipe = NULL;
+ struct ia_css_binary *binary = NULL;
+ int i;
+ u32 isys_stream_id;
+ bool early_polling = false;
+
+ assert(stream);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "sh_css_config_input_network() enter 0x%p:\n", stream);
+
+ if (stream->config.continuous) {
+ if (stream->last_pipe->config.mode == IA_CSS_PIPE_MODE_CAPTURE)
+ pipe = stream->last_pipe;
+ else if (stream->last_pipe->config.mode == IA_CSS_PIPE_MODE_YUVPP)
+ pipe = stream->last_pipe;
+ else if (stream->last_pipe->config.mode == IA_CSS_PIPE_MODE_PREVIEW)
+ pipe = stream->last_pipe->pipe_settings.preview.copy_pipe;
+ else if (stream->last_pipe->config.mode == IA_CSS_PIPE_MODE_VIDEO)
+ pipe = stream->last_pipe->pipe_settings.video.copy_pipe;
+ } else {
+ pipe = stream->last_pipe;
+ }
+
+ if (!pipe)
+ return -EINVAL;
+
+ if (pipe->pipeline.stages)
+ if (pipe->pipeline.stages->binary)
+ binary = pipe->pipeline.stages->binary;
+
+ if (binary) {
+ /*
+ * this was being done in ifmtr in 2400.
+ * online and cont bypass the init_in_frameinfo_memory_defaults
+ * so need to do it here
+ */
+ ia_css_get_crop_offsets(pipe, &binary->in_frame_info);
+ }
+
+ /* get the SP thread id */
+ rc = ia_css_pipeline_get_sp_thread_id(ia_css_pipe_get_pipe_num(pipe), &sp_thread_id);
+ if (!rc)
+ return -EINVAL;
+ /* get the target input terminal */
+ sp_pipeline_input_terminal = &sh_css_sp_group.pipe_io[sp_thread_id].input;
+
+ for (i = 0; i < IA_CSS_STREAM_MAX_ISYS_STREAM_PER_CH; i++) {
+ /* initialization */
+ memset((void *)(&isys_stream_descr), 0, sizeof(ia_css_isys_descr_t));
+ sp_pipeline_input_terminal->context.virtual_input_system_stream[i].valid = 0;
+ sp_pipeline_input_terminal->ctrl.virtual_input_system_stream_cfg[i].valid = 0;
+
+ if (!stream->config.isys_config[i].valid)
+ continue;
+
+ /* translate the stream configuration to the Input System (2401) configuration */
+ rc = sh_css_translate_stream_cfg_to_isys_stream_descr(
+ &stream->config,
+ early_polling,
+ &(isys_stream_descr), i);
+
+ if (stream->config.online) {
+ rc &= sh_css_translate_binary_info_to_input_system_output_port_attr(
+ binary,
+ &(isys_stream_descr));
+ }
+
+ if (!rc)
+ return -EINVAL;
+
+ isys_stream_id = ia_css_isys_generate_stream_id(sp_thread_id, i);
+
+ /* create the virtual Input System (2401) */
+ rc = ia_css_isys_stream_create(
+ &(isys_stream_descr),
+ &sp_pipeline_input_terminal->context.virtual_input_system_stream[i],
+ isys_stream_id);
+ if (!rc)
+ return -EINVAL;
+
+ /* calculate the configuration of the virtual Input System (2401) */
+ rc = ia_css_isys_stream_calculate_cfg(
+ &sp_pipeline_input_terminal->context.virtual_input_system_stream[i],
+ &(isys_stream_descr),
+ &sp_pipeline_input_terminal->ctrl.virtual_input_system_stream_cfg[i]);
+ if (!rc) {
+ ia_css_isys_stream_destroy(
+ &sp_pipeline_input_terminal->context.virtual_input_system_stream[i]);
+ return -EINVAL;
+ }
+ }
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "sh_css_config_input_network() leave:\n");
+
+ return 0;
+}
+
+static inline struct ia_css_pipe *stream_get_last_pipe(
+ struct ia_css_stream *stream)
+{
+ struct ia_css_pipe *last_pipe = NULL;
+
+ if (stream)
+ last_pipe = stream->last_pipe;
+
+ return last_pipe;
+}
+
+static inline struct ia_css_pipe *stream_get_copy_pipe(
+ struct ia_css_stream *stream)
+{
+ struct ia_css_pipe *copy_pipe = NULL;
+ struct ia_css_pipe *last_pipe = NULL;
+ enum ia_css_pipe_id pipe_id;
+
+ last_pipe = stream_get_last_pipe(stream);
+
+ if ((stream) &&
+ (last_pipe) &&
+ (stream->config.continuous)) {
+ pipe_id = last_pipe->mode;
+ switch (pipe_id) {
+ case IA_CSS_PIPE_ID_PREVIEW:
+ copy_pipe = last_pipe->pipe_settings.preview.copy_pipe;
+ break;
+ case IA_CSS_PIPE_ID_VIDEO:
+ copy_pipe = last_pipe->pipe_settings.video.copy_pipe;
+ break;
+ default:
+ copy_pipe = NULL;
+ break;
+ }
+ }
+
+ return copy_pipe;
+}
+
+static inline struct ia_css_pipe *stream_get_target_pipe(
+ struct ia_css_stream *stream)
+{
+ struct ia_css_pipe *target_pipe;
+
+ /* get the pipe that consumes the stream */
+ if (stream->config.continuous)
+ target_pipe = stream_get_copy_pipe(stream);
+ else
+ target_pipe = stream_get_last_pipe(stream);
+
+ return target_pipe;
+}
+
+static int stream_csi_rx_helper(
+ struct ia_css_stream *stream,
+ int (*func)(enum mipi_port_id, uint32_t))
+{
+ int retval = -EINVAL;
+ u32 sp_thread_id, stream_id;
+ bool rc;
+ struct ia_css_pipe *target_pipe = NULL;
+
+ if ((!stream) || (stream->config.mode != IA_CSS_INPUT_MODE_BUFFERED_SENSOR))
+ goto exit;
+
+ target_pipe = stream_get_target_pipe(stream);
+
+ if (!target_pipe)
+ goto exit;
+
+ rc = ia_css_pipeline_get_sp_thread_id(
+ ia_css_pipe_get_pipe_num(target_pipe),
+ &sp_thread_id);
+
+ if (!rc)
+ goto exit;
+
+ /* (un)register all valid "virtual isys streams" within the ia_css_stream */
+ stream_id = 0;
+ do {
+ if (stream->config.isys_config[stream_id].valid) {
+ u32 isys_stream_id = ia_css_isys_generate_stream_id(sp_thread_id, stream_id);
+
+ retval = func(stream->config.source.port.port, isys_stream_id);
+ }
+ stream_id++;
+ } while ((retval == 0) &&
+ (stream_id < IA_CSS_STREAM_MAX_ISYS_STREAM_PER_CH));
+
+exit:
+ return retval;
+}
+
+static inline int stream_register_with_csi_rx(
+ struct ia_css_stream *stream)
+{
+ return stream_csi_rx_helper(stream, ia_css_isys_csi_rx_register_stream);
+}
+
+static inline int stream_unregister_with_csi_rx(
+ struct ia_css_stream *stream)
+{
+ return stream_csi_rx_helper(stream, ia_css_isys_csi_rx_unregister_stream);
+}
+
+
+static void
+start_binary(struct ia_css_pipe *pipe,
+ struct ia_css_binary *binary)
+{
+ assert(pipe);
+ /* Acceleration uses firmware, the binary thus can be NULL */
+
+ if (binary)
+ sh_css_metrics_start_binary(&binary->metrics);
+
+ if (!IS_ISP2401 && pipe->stream->reconfigure_css_rx) {
+ ia_css_isys_rx_configure(&pipe->stream->csi_rx_config,
+ pipe->stream->config.mode);
+ pipe->stream->reconfigure_css_rx = false;
+ }
+}
+
+/* start the copy function on the SP */
+static int
+start_copy_on_sp(struct ia_css_pipe *pipe,
+ struct ia_css_frame *out_frame)
+{
+ (void)out_frame;
+
+ if ((!pipe) || (!pipe->stream))
+ return -EINVAL;
+
+ if (!IS_ISP2401 && pipe->stream->reconfigure_css_rx)
+ ia_css_isys_rx_disable();
+
+ if (pipe->stream->config.input_config.format != ATOMISP_INPUT_FORMAT_BINARY_8)
+ return -EINVAL;
+ sh_css_sp_start_binary_copy(ia_css_pipe_get_pipe_num(pipe), out_frame, pipe->stream->config.pixels_per_clock == 2);
+
+ if (!IS_ISP2401 && pipe->stream->reconfigure_css_rx) {
+ ia_css_isys_rx_configure(&pipe->stream->csi_rx_config,
+ pipe->stream->config.mode);
+ pipe->stream->reconfigure_css_rx = false;
+ }
+
+ return 0;
+}
+
+void sh_css_binary_args_reset(struct sh_css_binary_args *args)
+{
+ unsigned int i;
+
+ for (i = 0; i < NUM_VIDEO_TNR_FRAMES; i++)
+ args->tnr_frames[i] = NULL;
+ for (i = 0; i < MAX_NUM_VIDEO_DELAY_FRAMES; i++)
+ args->delay_frames[i] = NULL;
+ args->in_frame = NULL;
+ for (i = 0; i < IA_CSS_BINARY_MAX_OUTPUT_PORTS; i++)
+ args->out_frame[i] = NULL;
+ args->out_vf_frame = NULL;
+ args->copy_vf = false;
+ args->copy_output = true;
+ args->vf_downscale_log2 = 0;
+}
+
+static void start_pipe(
+ struct ia_css_pipe *me,
+ enum sh_css_pipe_config_override copy_ovrd,
+ enum ia_css_input_mode input_mode)
+{
+ IA_CSS_ENTER_PRIVATE("me = %p, copy_ovrd = %d, input_mode = %d",
+ me, copy_ovrd, input_mode);
+
+ assert(me); /* all callers are in this file and call with non null argument */
+
+ sh_css_sp_init_pipeline(&me->pipeline,
+ me->mode,
+ (uint8_t)ia_css_pipe_get_pipe_num(me),
+ me->config.default_capture_config.enable_xnr != 0,
+ me->stream->config.pixels_per_clock == 2,
+ me->stream->config.continuous,
+ false,
+ me->required_bds_factor,
+ copy_ovrd,
+ input_mode,
+ &me->stream->config.metadata_config,
+ &me->stream->info.metadata_info
+ , (input_mode == IA_CSS_INPUT_MODE_MEMORY) ?
+ (enum mipi_port_id)0 :
+ me->stream->config.source.port.port);
+
+ if (me->config.mode != IA_CSS_PIPE_MODE_COPY) {
+ struct ia_css_pipeline_stage *stage;
+
+ stage = me->pipeline.stages;
+ if (stage) {
+ me->pipeline.current_stage = stage;
+ start_binary(me, stage->binary);
+ }
+ }
+ IA_CSS_LEAVE_PRIVATE("void");
+}
+
+void
+sh_css_invalidate_shading_tables(struct ia_css_stream *stream)
+{
+ int i;
+
+ assert(stream);
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "sh_css_invalidate_shading_tables() enter:\n");
+
+ for (i = 0; i < stream->num_pipes; i++) {
+ assert(stream->pipes[i]);
+ sh_css_pipe_free_shading_table(stream->pipes[i]);
+ }
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "sh_css_invalidate_shading_tables() leave: return_void\n");
+}
+
+static void
+enable_interrupts(enum ia_css_irq_type irq_type)
+{
+ enum mipi_port_id port;
+ bool enable_pulse = irq_type != IA_CSS_IRQ_TYPE_EDGE;
+
+ IA_CSS_ENTER_PRIVATE("");
+ /* Enable IRQ on the SP which signals that SP goes to idle
+ * (aka ready state) */
+ cnd_sp_irq_enable(SP0_ID, true);
+ /* Set the IRQ device 0 to either level or pulse */
+ irq_enable_pulse(IRQ0_ID, enable_pulse);
+
+ cnd_virq_enable_channel(virq_sp, true);
+
+ /* Enable SW interrupt 0, this is used to signal ISYS events */
+ cnd_virq_enable_channel(
+ (enum virq_id)(IRQ_SW_CHANNEL0_ID + IRQ_SW_CHANNEL_OFFSET),
+ true);
+ /* Enable SW interrupt 1, this is used to signal PSYS events */
+ cnd_virq_enable_channel(
+ (enum virq_id)(IRQ_SW_CHANNEL1_ID + IRQ_SW_CHANNEL_OFFSET),
+ true);
+
+ if (!IS_ISP2401) {
+ for (port = 0; port < N_MIPI_PORT_ID; port++)
+ ia_css_isys_rx_enable_all_interrupts(port);
+ }
+
+ IA_CSS_LEAVE_PRIVATE("");
+}
+
+static bool sh_css_setup_spctrl_config(const struct ia_css_fw_info *fw,
+ const char *program,
+ ia_css_spctrl_cfg *spctrl_cfg)
+{
+ if ((!fw) || (!spctrl_cfg))
+ return false;
+ spctrl_cfg->sp_entry = 0;
+ spctrl_cfg->program_name = (char *)(program);
+
+ spctrl_cfg->ddr_data_offset = fw->blob.data_source;
+ spctrl_cfg->dmem_data_addr = fw->blob.data_target;
+ spctrl_cfg->dmem_bss_addr = fw->blob.bss_target;
+ spctrl_cfg->data_size = fw->blob.data_size;
+ spctrl_cfg->bss_size = fw->blob.bss_size;
+
+ spctrl_cfg->spctrl_config_dmem_addr = fw->info.sp.init_dmem_data;
+ spctrl_cfg->spctrl_state_dmem_addr = fw->info.sp.sw_state;
+
+ spctrl_cfg->code_size = fw->blob.size;
+ spctrl_cfg->code = fw->blob.code;
+ spctrl_cfg->sp_entry = fw->info.sp.sp_entry; /* entry function ptr on SP */
+
+ return true;
+}
+
+void
+ia_css_unload_firmware(void)
+{
+ if (sh_css_num_binaries) {
+ /* we have already loaded before so get rid of the old stuff */
+ ia_css_binary_uninit();
+ sh_css_unload_firmware();
+ }
+}
+
+static void
+ia_css_reset_defaults(struct sh_css *css)
+{
+ struct sh_css default_css;
+
+ /* Reset everything to zero */
+ memset(&default_css, 0, sizeof(default_css));
+
+ /* Initialize the non zero values */
+ default_css.check_system_idle = true;
+ default_css.num_cont_raw_frames = NUM_CONTINUOUS_FRAMES;
+
+ /*
+ * All should be 0: but memset does it already.
+ * default_css.num_mipi_frames[N_CSI_PORTS] = 0;
+ */
+
+ default_css.irq_type = IA_CSS_IRQ_TYPE_EDGE;
+
+ /* Set the defaults to the output */
+ *css = default_css;
+}
+
+int
+ia_css_load_firmware(struct device *dev, const struct ia_css_env *env,
+ const struct ia_css_fw *fw)
+{
+ int err;
+
+ if (!env)
+ return -EINVAL;
+ if (!fw)
+ return -EINVAL;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_load_firmware() enter\n");
+
+ /* make sure we initialize my_css */
+ if (my_css.flush != env->cpu_mem_env.flush) {
+ ia_css_reset_defaults(&my_css);
+ my_css.flush = env->cpu_mem_env.flush;
+ }
+
+ err = sh_css_load_firmware(dev, fw->data, fw->bytes);
+ if (!err)
+ err = ia_css_binary_init_infos();
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_load_firmware() leave\n");
+ return err;
+}
+
+int
+ia_css_init(struct device *dev, const struct ia_css_env *env,
+ u32 mmu_l1_base, enum ia_css_irq_type irq_type)
+{
+ int err;
+ ia_css_spctrl_cfg spctrl_cfg;
+ void (*flush_func)(struct ia_css_acc_fw *fw);
+ hrt_data select, enable;
+
+ if (!env)
+ return -EINVAL;
+
+ sh_css_printf = env->print_env.debug_print;
+
+ IA_CSS_ENTER("void");
+
+ flush_func = env->cpu_mem_env.flush;
+
+ pipe_global_init();
+ ia_css_pipeline_init();
+ ia_css_queue_map_init();
+
+ ia_css_device_access_init(&env->hw_access_env);
+
+ select = gpio_reg_load(GPIO0_ID, _gpio_block_reg_do_select) & ~GPIO_FLASH_PIN_MASK;
+ enable = gpio_reg_load(GPIO0_ID, _gpio_block_reg_do_e) | GPIO_FLASH_PIN_MASK;
+ sh_css_mmu_set_page_table_base_index(mmu_l1_base);
+
+ my_css_save.mmu_base = mmu_l1_base;
+
+ ia_css_reset_defaults(&my_css);
+
+ my_css_save.driver_env = *env;
+ my_css.flush = flush_func;
+
+ err = ia_css_rmgr_init();
+ if (err) {
+ IA_CSS_LEAVE_ERR(err);
+ return err;
+ }
+
+ IA_CSS_LOG("init: %d", my_css_save_initialized);
+
+ if (!my_css_save_initialized) {
+ my_css_save_initialized = true;
+ my_css_save.mode = sh_css_mode_working;
+ memset(my_css_save.stream_seeds, 0,
+ sizeof(struct sh_css_stream_seed) * MAX_ACTIVE_STREAMS);
+ IA_CSS_LOG("init: %d mode=%d", my_css_save_initialized, my_css_save.mode);
+ }
+
+ mipi_init();
+
+ /*
+ * In case this has been programmed already, update internal
+ * data structure ...
+ * DEPRECATED
+ */
+ if (!IS_ISP2401)
+ my_css.page_table_base_index = mmu_get_page_table_base_index(MMU0_ID);
+
+ my_css.irq_type = irq_type;
+
+ my_css_save.irq_type = irq_type;
+
+ enable_interrupts(my_css.irq_type);
+
+ /* configure GPIO to output mode */
+ gpio_reg_store(GPIO0_ID, _gpio_block_reg_do_select, select);
+ gpio_reg_store(GPIO0_ID, _gpio_block_reg_do_e, enable);
+ gpio_reg_store(GPIO0_ID, _gpio_block_reg_do_0, 0);
+
+ err = ia_css_refcount_init(REFCOUNT_SIZE);
+ if (err) {
+ IA_CSS_LEAVE_ERR(err);
+ return err;
+ }
+ err = sh_css_params_init();
+ if (err) {
+ IA_CSS_LEAVE_ERR(err);
+ return err;
+ }
+
+ if (!sh_css_setup_spctrl_config(&sh_css_sp_fw, SP_PROG_NAME, &spctrl_cfg))
+ return -EINVAL;
+
+ err = ia_css_spctrl_load_fw(SP0_ID, &spctrl_cfg);
+ if (err) {
+ IA_CSS_LEAVE_ERR(err);
+ return err;
+ }
+
+ if (!sh_css_hrt_system_is_idle()) {
+ IA_CSS_LEAVE_ERR(-EBUSY);
+ return -EBUSY;
+ }
+ /*
+ * can be called here, queuing works, but:
+ * - when sp is started later, it will wipe queued items
+ * so for now we leave it for later and make sure
+ * updates are not called to frequently.
+ * sh_css_init_buffer_queues();
+ */
+
+ if (IS_ISP2401)
+ gp_device_reg_store(GP_DEVICE0_ID, _REG_GP_SWITCH_ISYS2401_ADDR, 1);
+
+ if (!IS_ISP2401)
+ dma_set_max_burst_size(DMA0_ID, HIVE_DMA_BUS_DDR_CONN,
+ ISP2400_DMA_MAX_BURST_LENGTH);
+ else
+ dma_set_max_burst_size(DMA0_ID, HIVE_DMA_BUS_DDR_CONN,
+ ISP2401_DMA_MAX_BURST_LENGTH);
+
+ if (ia_css_isys_init() != INPUT_SYSTEM_ERR_NO_ERROR)
+ err = -EINVAL;
+
+ sh_css_params_map_and_store_default_gdc_lut();
+
+ IA_CSS_LEAVE_ERR(err);
+ return err;
+}
+
+int
+ia_css_enable_isys_event_queue(bool enable)
+{
+ if (sh_css_sp_is_running())
+ return -EBUSY;
+ sh_css_sp_enable_isys_event_queue(enable);
+ return 0;
+}
+
+/*
+ * Mapping sp threads. Currently, this is done when a stream is created and
+ * pipelines are ready to be converted to sp pipelines. Be careful if you are
+ * doing it from stream_create since we could run out of sp threads due to
+ * allocation on inactive pipelines.
+ */
+static int
+map_sp_threads(struct ia_css_stream *stream, bool map)
+{
+ struct ia_css_pipe *main_pipe = NULL;
+ struct ia_css_pipe *copy_pipe = NULL;
+ struct ia_css_pipe *capture_pipe = NULL;
+ int err = 0;
+ enum ia_css_pipe_id pipe_id;
+
+ IA_CSS_ENTER_PRIVATE("stream = %p, map = %s",
+ stream, map ? "true" : "false");
+
+ if (!stream) {
+ IA_CSS_LEAVE_ERR_PRIVATE(-EINVAL);
+ return -EINVAL;
+ }
+
+ main_pipe = stream->last_pipe;
+ pipe_id = main_pipe->mode;
+
+ ia_css_pipeline_map(main_pipe->pipe_num, map);
+
+ switch (pipe_id) {
+ case IA_CSS_PIPE_ID_PREVIEW:
+ copy_pipe = main_pipe->pipe_settings.preview.copy_pipe;
+ capture_pipe = main_pipe->pipe_settings.preview.capture_pipe;
+ break;
+
+ case IA_CSS_PIPE_ID_VIDEO:
+ copy_pipe = main_pipe->pipe_settings.video.copy_pipe;
+ capture_pipe = main_pipe->pipe_settings.video.capture_pipe;
+ break;
+
+ case IA_CSS_PIPE_ID_CAPTURE:
+ default:
+ break;
+ }
+
+ if (capture_pipe)
+ ia_css_pipeline_map(capture_pipe->pipe_num, map);
+
+ /* Firmware expects copy pipe to be the last pipe mapped. (if needed) */
+ if (copy_pipe)
+ ia_css_pipeline_map(copy_pipe->pipe_num, map);
+
+ /* DH regular multi pipe - not continuous mode: map the next pipes too */
+ if (!stream->config.continuous) {
+ int i;
+
+ for (i = 1; i < stream->num_pipes; i++)
+ ia_css_pipeline_map(stream->pipes[i]->pipe_num, map);
+ }
+
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+}
+
+/*
+ * creates a host pipeline skeleton for all pipes in a stream. Called during
+ * stream_create.
+ */
+static int
+create_host_pipeline_structure(struct ia_css_stream *stream)
+{
+ struct ia_css_pipe *copy_pipe = NULL, *capture_pipe = NULL;
+ enum ia_css_pipe_id pipe_id;
+ struct ia_css_pipe *main_pipe = NULL;
+ int err = 0;
+ unsigned int copy_pipe_delay = 0,
+ capture_pipe_delay = 0;
+
+ IA_CSS_ENTER_PRIVATE("stream = %p", stream);
+
+ if (!stream) {
+ IA_CSS_LEAVE_ERR_PRIVATE(-EINVAL);
+ return -EINVAL;
+ }
+
+ main_pipe = stream->last_pipe;
+ if (!main_pipe) {
+ IA_CSS_LEAVE_ERR_PRIVATE(-EINVAL);
+ return -EINVAL;
+ }
+
+ pipe_id = main_pipe->mode;
+
+ switch (pipe_id) {
+ case IA_CSS_PIPE_ID_PREVIEW:
+ copy_pipe = main_pipe->pipe_settings.preview.copy_pipe;
+ copy_pipe_delay = main_pipe->dvs_frame_delay;
+ capture_pipe = main_pipe->pipe_settings.preview.capture_pipe;
+ capture_pipe_delay = IA_CSS_FRAME_DELAY_0;
+ err = ia_css_pipeline_create(&main_pipe->pipeline, main_pipe->mode,
+ main_pipe->pipe_num, main_pipe->dvs_frame_delay);
+ break;
+
+ case IA_CSS_PIPE_ID_VIDEO:
+ copy_pipe = main_pipe->pipe_settings.video.copy_pipe;
+ copy_pipe_delay = main_pipe->dvs_frame_delay;
+ capture_pipe = main_pipe->pipe_settings.video.capture_pipe;
+ capture_pipe_delay = IA_CSS_FRAME_DELAY_0;
+ err = ia_css_pipeline_create(&main_pipe->pipeline, main_pipe->mode,
+ main_pipe->pipe_num, main_pipe->dvs_frame_delay);
+ break;
+
+ case IA_CSS_PIPE_ID_CAPTURE:
+ capture_pipe = main_pipe;
+ capture_pipe_delay = main_pipe->dvs_frame_delay;
+ break;
+
+ case IA_CSS_PIPE_ID_YUVPP:
+ err = ia_css_pipeline_create(&main_pipe->pipeline, main_pipe->mode,
+ main_pipe->pipe_num, main_pipe->dvs_frame_delay);
+ break;
+
+ default:
+ err = -EINVAL;
+ }
+
+ if (!(err) && copy_pipe)
+ err = ia_css_pipeline_create(&copy_pipe->pipeline,
+ copy_pipe->mode,
+ copy_pipe->pipe_num,
+ copy_pipe_delay);
+
+ if (!(err) && capture_pipe)
+ err = ia_css_pipeline_create(&capture_pipe->pipeline,
+ capture_pipe->mode,
+ capture_pipe->pipe_num,
+ capture_pipe_delay);
+
+ /* DH regular multi pipe - not continuous mode: create the next pipelines too */
+ if (!stream->config.continuous) {
+ int i;
+
+ for (i = 1; i < stream->num_pipes && 0 == err; i++) {
+ main_pipe = stream->pipes[i];
+ err = ia_css_pipeline_create(&main_pipe->pipeline,
+ main_pipe->mode,
+ main_pipe->pipe_num,
+ main_pipe->dvs_frame_delay);
+ }
+ }
+
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+}
+
+/*
+ * creates a host pipeline for all pipes in a stream. Called during
+ * stream_start.
+ */
+static int
+create_host_pipeline(struct ia_css_stream *stream)
+{
+ struct ia_css_pipe *copy_pipe = NULL, *capture_pipe = NULL;
+ enum ia_css_pipe_id pipe_id;
+ struct ia_css_pipe *main_pipe = NULL;
+ int err = 0;
+ unsigned int max_input_width = 0;
+
+ IA_CSS_ENTER_PRIVATE("stream = %p", stream);
+ if (!stream) {
+ IA_CSS_LEAVE_ERR_PRIVATE(-EINVAL);
+ return -EINVAL;
+ }
+
+ main_pipe = stream->last_pipe;
+ pipe_id = main_pipe->mode;
+
+ /*
+ * No continuous frame allocation for capture pipe. It uses the
+ * "main" pipe's frames.
+ */
+ if ((pipe_id == IA_CSS_PIPE_ID_PREVIEW) ||
+ (pipe_id == IA_CSS_PIPE_ID_VIDEO)) {
+ /*
+ * About
+ * pipe_id == IA_CSS_PIPE_ID_PREVIEW &&
+ * stream->config.mode != IA_CSS_INPUT_MODE_MEMORY:
+ *
+ * The original condition pipe_id == IA_CSS_PIPE_ID_PREVIEW is
+ * too strong. E.g. in SkyCam (with memory based input frames)
+ * there is no continuous mode and thus no need for allocated
+ * continuous frames.
+ * This is not only for SkyCam but for all preview cases that
+ * use DDR based input frames. For this reason the
+ * stream->config.mode != IA_CSS_INPUT_MODE_MEMORY has beed
+ * added.
+ */
+ if (stream->config.continuous ||
+ (pipe_id == IA_CSS_PIPE_ID_PREVIEW &&
+ stream->config.mode != IA_CSS_INPUT_MODE_MEMORY)) {
+ err = alloc_continuous_frames(main_pipe, true);
+ if (err)
+ goto ERR;
+ }
+ }
+
+ /* old isys: need to allocate_mipi_frames() even in IA_CSS_PIPE_MODE_COPY */
+ if (!IS_ISP2401 || main_pipe->config.mode != IA_CSS_PIPE_MODE_COPY) {
+ err = allocate_mipi_frames(main_pipe, &stream->info);
+ if (err)
+ goto ERR;
+ }
+
+ switch (pipe_id) {
+ case IA_CSS_PIPE_ID_PREVIEW:
+ copy_pipe = main_pipe->pipe_settings.preview.copy_pipe;
+ capture_pipe = main_pipe->pipe_settings.preview.capture_pipe;
+ max_input_width =
+ main_pipe->pipe_settings.preview.preview_binary.info->sp.input.max_width;
+
+ err = create_host_preview_pipeline(main_pipe);
+ if (err)
+ goto ERR;
+
+ break;
+
+ case IA_CSS_PIPE_ID_VIDEO:
+ copy_pipe = main_pipe->pipe_settings.video.copy_pipe;
+ capture_pipe = main_pipe->pipe_settings.video.capture_pipe;
+ max_input_width =
+ main_pipe->pipe_settings.video.video_binary.info->sp.input.max_width;
+
+ err = create_host_video_pipeline(main_pipe);
+ if (err)
+ goto ERR;
+
+ break;
+
+ case IA_CSS_PIPE_ID_CAPTURE:
+ capture_pipe = main_pipe;
+
+ break;
+
+ case IA_CSS_PIPE_ID_YUVPP:
+ err = create_host_yuvpp_pipeline(main_pipe);
+ if (err)
+ goto ERR;
+
+ break;
+
+ default:
+ err = -EINVAL;
+ }
+ if (err)
+ goto ERR;
+
+ if (copy_pipe) {
+ err = create_host_copy_pipeline(copy_pipe, max_input_width,
+ main_pipe->continuous_frames[0]);
+ if (err)
+ goto ERR;
+ }
+
+ if (capture_pipe) {
+ err = create_host_capture_pipeline(capture_pipe);
+ if (err)
+ goto ERR;
+ }
+
+ /* DH regular multi pipe - not continuous mode: create the next pipelines too */
+ if (!stream->config.continuous) {
+ int i;
+
+ for (i = 1; i < stream->num_pipes && 0 == err; i++) {
+ switch (stream->pipes[i]->mode) {
+ case IA_CSS_PIPE_ID_PREVIEW:
+ err = create_host_preview_pipeline(stream->pipes[i]);
+ break;
+ case IA_CSS_PIPE_ID_VIDEO:
+ err = create_host_video_pipeline(stream->pipes[i]);
+ break;
+ case IA_CSS_PIPE_ID_CAPTURE:
+ err = create_host_capture_pipeline(stream->pipes[i]);
+ break;
+ case IA_CSS_PIPE_ID_YUVPP:
+ err = create_host_yuvpp_pipeline(stream->pipes[i]);
+ break;
+ default:
+ err = -EINVAL;
+ }
+ if (err)
+ goto ERR;
+ }
+ }
+
+ERR:
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+}
+
+static const struct ia_css_pipe default_pipe = IA_CSS_DEFAULT_PIPE;
+static const struct ia_css_preview_settings preview = IA_CSS_DEFAULT_PREVIEW_SETTINGS;
+static const struct ia_css_capture_settings capture = IA_CSS_DEFAULT_CAPTURE_SETTINGS;
+static const struct ia_css_video_settings video = IA_CSS_DEFAULT_VIDEO_SETTINGS;
+static const struct ia_css_yuvpp_settings yuvpp = IA_CSS_DEFAULT_YUVPP_SETTINGS;
+
+static int
+init_pipe_defaults(enum ia_css_pipe_mode mode,
+ struct ia_css_pipe *pipe,
+ bool copy_pipe)
+{
+ if (!pipe) {
+ IA_CSS_ERROR("NULL pipe parameter");
+ return -EINVAL;
+ }
+
+ /* Initialize pipe to pre-defined defaults */
+ memcpy(pipe, &default_pipe, sizeof(default_pipe));
+
+ /* TODO: JB should not be needed, but temporary backward reference */
+ switch (mode) {
+ case IA_CSS_PIPE_MODE_PREVIEW:
+ pipe->mode = IA_CSS_PIPE_ID_PREVIEW;
+ memcpy(&pipe->pipe_settings.preview, &preview, sizeof(preview));
+ break;
+ case IA_CSS_PIPE_MODE_CAPTURE:
+ if (copy_pipe)
+ pipe->mode = IA_CSS_PIPE_ID_COPY;
+ else
+ pipe->mode = IA_CSS_PIPE_ID_CAPTURE;
+
+ memcpy(&pipe->pipe_settings.capture, &capture, sizeof(capture));
+ break;
+ case IA_CSS_PIPE_MODE_VIDEO:
+ pipe->mode = IA_CSS_PIPE_ID_VIDEO;
+ memcpy(&pipe->pipe_settings.video, &video, sizeof(video));
+ break;
+ case IA_CSS_PIPE_MODE_COPY:
+ pipe->mode = IA_CSS_PIPE_ID_CAPTURE;
+ break;
+ case IA_CSS_PIPE_MODE_YUVPP:
+ pipe->mode = IA_CSS_PIPE_ID_YUVPP;
+ memcpy(&pipe->pipe_settings.yuvpp, &yuvpp, sizeof(yuvpp));
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void
+pipe_global_init(void)
+{
+ u8 i;
+
+ my_css.pipe_counter = 0;
+ for (i = 0; i < IA_CSS_PIPELINE_NUM_MAX; i++)
+ my_css.all_pipes[i] = NULL;
+}
+
+static int
+pipe_generate_pipe_num(const struct ia_css_pipe *pipe,
+ unsigned int *pipe_number)
+{
+ const u8 INVALID_PIPE_NUM = (uint8_t)~(0);
+ u8 pipe_num = INVALID_PIPE_NUM;
+ u8 i;
+
+ if (!pipe) {
+ IA_CSS_ERROR("NULL pipe parameter");
+ return -EINVAL;
+ }
+
+ /* Assign a new pipe_num .... search for empty place */
+ for (i = 0; i < IA_CSS_PIPELINE_NUM_MAX; i++) {
+ if (!my_css.all_pipes[i]) {
+ /* position is reserved */
+ my_css.all_pipes[i] = (struct ia_css_pipe *)pipe;
+ pipe_num = i;
+ break;
+ }
+ }
+ if (pipe_num == INVALID_PIPE_NUM) {
+ /* Max number of pipes already allocated */
+ IA_CSS_ERROR("Max number of pipes already created");
+ return -ENOSPC;
+ }
+
+ my_css.pipe_counter++;
+
+ IA_CSS_LOG("pipe_num (%d)", pipe_num);
+
+ *pipe_number = pipe_num;
+ return 0;
+}
+
+static void
+pipe_release_pipe_num(unsigned int pipe_num)
+{
+ my_css.all_pipes[pipe_num] = NULL;
+ my_css.pipe_counter--;
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "pipe_release_pipe_num (%d)\n", pipe_num);
+}
+
+static int
+create_pipe(enum ia_css_pipe_mode mode,
+ struct ia_css_pipe **pipe,
+ bool copy_pipe)
+{
+ int err = 0;
+ struct ia_css_pipe *me;
+
+ if (!pipe) {
+ IA_CSS_ERROR("NULL pipe parameter");
+ return -EINVAL;
+ }
+
+ me = kmalloc(sizeof(*me), GFP_KERNEL);
+ if (!me)
+ return -ENOMEM;
+
+ err = init_pipe_defaults(mode, me, copy_pipe);
+ if (err) {
+ kfree(me);
+ return err;
+ }
+
+ err = pipe_generate_pipe_num(me, &me->pipe_num);
+ if (err) {
+ kfree(me);
+ return err;
+ }
+
+ *pipe = me;
+ return 0;
+}
+
+struct ia_css_pipe *
+find_pipe_by_num(uint32_t pipe_num)
+{
+ unsigned int i;
+
+ for (i = 0; i < IA_CSS_PIPELINE_NUM_MAX; i++) {
+ if (my_css.all_pipes[i] &&
+ ia_css_pipe_get_pipe_num(my_css.all_pipes[i]) == pipe_num) {
+ return my_css.all_pipes[i];
+ }
+ }
+ return NULL;
+}
+
+int
+ia_css_pipe_destroy(struct ia_css_pipe *pipe)
+{
+ int err = 0;
+
+ IA_CSS_ENTER("pipe = %p", pipe);
+
+ if (!pipe) {
+ IA_CSS_LEAVE_ERR(-EINVAL);
+ return -EINVAL;
+ }
+
+ if (pipe->stream) {
+ IA_CSS_LOG("ia_css_stream_destroy not called!");
+ IA_CSS_LEAVE_ERR(-EINVAL);
+ return -EINVAL;
+ }
+
+ switch (pipe->config.mode) {
+ case IA_CSS_PIPE_MODE_PREVIEW:
+ /*
+ * need to take into account that this function is also called
+ * on the internal copy pipe
+ */
+ if (pipe->mode == IA_CSS_PIPE_ID_PREVIEW) {
+ ia_css_frame_free_multiple(NUM_CONTINUOUS_FRAMES,
+ pipe->continuous_frames);
+ ia_css_metadata_free_multiple(NUM_CONTINUOUS_FRAMES,
+ pipe->cont_md_buffers);
+ if (pipe->pipe_settings.preview.copy_pipe) {
+ err = ia_css_pipe_destroy(pipe->pipe_settings.preview.copy_pipe);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_pipe_destroy(): destroyed internal copy pipe err=%d\n",
+ err);
+ }
+ }
+ break;
+ case IA_CSS_PIPE_MODE_VIDEO:
+ if (pipe->mode == IA_CSS_PIPE_ID_VIDEO) {
+ ia_css_frame_free_multiple(NUM_CONTINUOUS_FRAMES,
+ pipe->continuous_frames);
+ ia_css_metadata_free_multiple(NUM_CONTINUOUS_FRAMES,
+ pipe->cont_md_buffers);
+ if (pipe->pipe_settings.video.copy_pipe) {
+ err = ia_css_pipe_destroy(pipe->pipe_settings.video.copy_pipe);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_pipe_destroy(): destroyed internal copy pipe err=%d\n",
+ err);
+ }
+ }
+ ia_css_frame_free_multiple(NUM_VIDEO_TNR_FRAMES,
+ pipe->pipe_settings.video.tnr_frames);
+ ia_css_frame_free_multiple(MAX_NUM_VIDEO_DELAY_FRAMES,
+ pipe->pipe_settings.video.delay_frames);
+ break;
+ case IA_CSS_PIPE_MODE_CAPTURE:
+ ia_css_frame_free_multiple(MAX_NUM_VIDEO_DELAY_FRAMES,
+ pipe->pipe_settings.capture.delay_frames);
+ break;
+ case IA_CSS_PIPE_MODE_COPY:
+ break;
+ case IA_CSS_PIPE_MODE_YUVPP:
+ break;
+ }
+
+ if (pipe->scaler_pp_lut != mmgr_NULL) {
+ hmm_free(pipe->scaler_pp_lut);
+ pipe->scaler_pp_lut = mmgr_NULL;
+ }
+
+ my_css.active_pipes[ia_css_pipe_get_pipe_num(pipe)] = NULL;
+ sh_css_pipe_free_shading_table(pipe);
+
+ ia_css_pipeline_destroy(&pipe->pipeline);
+ pipe_release_pipe_num(ia_css_pipe_get_pipe_num(pipe));
+
+ kfree(pipe);
+ IA_CSS_LEAVE("err = %d", err);
+ return err;
+}
+
+void
+ia_css_uninit(void)
+{
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_uninit() enter: void\n");
+
+ sh_css_params_free_default_gdc_lut();
+
+ /* cleanup generic data */
+ sh_css_params_uninit();
+ ia_css_refcount_uninit();
+
+ ia_css_rmgr_uninit();
+
+ if (!IS_ISP2401) {
+ /* needed for reprogramming the inputformatter after power cycle of css */
+ ifmtr_set_if_blocking_mode_reset = true;
+ }
+
+ ia_css_spctrl_unload_fw(SP0_ID);
+ sh_css_sp_set_sp_running(false);
+ /* check and free any remaining mipi frames */
+ free_mipi_frames(NULL);
+
+ sh_css_sp_reset_global_vars();
+
+ ia_css_isys_uninit();
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_uninit() leave: return_void\n");
+}
+
+int ia_css_irq_translate(
+ unsigned int *irq_infos)
+{
+ enum virq_id irq;
+ enum hrt_isp_css_irq_status status = hrt_isp_css_irq_status_more_irqs;
+ unsigned int infos = 0;
+
+ /* irq_infos can be NULL, but that would make the function useless */
+ /* assert(irq_infos != NULL); */
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_irq_translate() enter: irq_infos=%p\n", irq_infos);
+
+ while (status == hrt_isp_css_irq_status_more_irqs) {
+ status = virq_get_channel_id(&irq);
+ if (status == hrt_isp_css_irq_status_error)
+ return -EINVAL;
+
+
+ switch (irq) {
+ case virq_sp:
+ /*
+ * When SP goes to idle, info is available in the
+ * event queue.
+ */
+ infos |= IA_CSS_IRQ_INFO_EVENTS_READY;
+ break;
+ case virq_isp:
+ break;
+ case virq_isys_sof:
+ infos |= IA_CSS_IRQ_INFO_CSS_RECEIVER_SOF;
+ break;
+ case virq_isys_eof:
+ infos |= IA_CSS_IRQ_INFO_CSS_RECEIVER_EOF;
+ break;
+ case virq_isys_csi:
+ infos |= IA_CSS_IRQ_INFO_INPUT_SYSTEM_ERROR;
+ break;
+ case virq_ifmt0_id:
+ if (!IS_ISP2401)
+ infos |= IA_CSS_IRQ_INFO_IF_ERROR;
+ break;
+ case virq_dma:
+ infos |= IA_CSS_IRQ_INFO_DMA_ERROR;
+ break;
+ case virq_sw_pin_0:
+ infos |= sh_css_get_sw_interrupt_value(0);
+ break;
+ case virq_sw_pin_1:
+ infos |= sh_css_get_sw_interrupt_value(1);
+ /* pqiao TODO: also assumption here */
+ break;
+ default:
+ break;
+ }
+ }
+
+ if (irq_infos)
+ *irq_infos = infos;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_irq_translate() leave: irq_infos=%u\n",
+ infos);
+
+ return 0;
+}
+
+int ia_css_irq_enable(
+ enum ia_css_irq_info info,
+ bool enable)
+{
+ enum virq_id irq = N_virq_id;
+
+ IA_CSS_ENTER("info=%d, enable=%d", info, enable);
+
+ switch (info) {
+ case IA_CSS_IRQ_INFO_CSS_RECEIVER_SOF:
+ if (IS_ISP2401)
+ /* Just ignore those unused IRQs without printing errors */
+ return 0;
+
+ irq = virq_isys_sof;
+ break;
+ case IA_CSS_IRQ_INFO_CSS_RECEIVER_EOF:
+ if (IS_ISP2401)
+ /* Just ignore those unused IRQs without printing errors */
+ return 0;
+
+ irq = virq_isys_eof;
+ break;
+ case IA_CSS_IRQ_INFO_INPUT_SYSTEM_ERROR:
+ if (IS_ISP2401)
+ /* Just ignore those unused IRQs without printing errors */
+ return 0;
+
+ irq = virq_isys_csi;
+ break;
+ case IA_CSS_IRQ_INFO_IF_ERROR:
+ if (IS_ISP2401)
+ /* Just ignore those unused IRQs without printing errors */
+ return 0;
+
+ irq = virq_ifmt0_id;
+ break;
+ case IA_CSS_IRQ_INFO_DMA_ERROR:
+ irq = virq_dma;
+ break;
+ case IA_CSS_IRQ_INFO_SW_0:
+ irq = virq_sw_pin_0;
+ break;
+ case IA_CSS_IRQ_INFO_SW_1:
+ irq = virq_sw_pin_1;
+ break;
+ default:
+ IA_CSS_LEAVE_ERR(-EINVAL);
+ return -EINVAL;
+ }
+
+ cnd_virq_enable_channel(irq, enable);
+
+ IA_CSS_LEAVE_ERR(0);
+ return 0;
+}
+
+
+static unsigned int
+sh_css_get_sw_interrupt_value(unsigned int irq)
+{
+ unsigned int irq_value;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "sh_css_get_sw_interrupt_value() enter: irq=%d\n", irq);
+ irq_value = sh_css_sp_get_sw_interrupt_value(irq);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "sh_css_get_sw_interrupt_value() leave: irq_value=%d\n", irq_value);
+ return irq_value;
+}
+
+/*
+ * configure and load the copy binary, the next binary is used to
+ * determine whether the copy binary needs to do left padding.
+ */
+static int load_copy_binary(
+ struct ia_css_pipe *pipe,
+ struct ia_css_binary *copy_binary,
+ struct ia_css_binary *next_binary)
+{
+ struct ia_css_frame_info copy_out_info, copy_in_info, copy_vf_info;
+ unsigned int left_padding;
+ int err;
+ struct ia_css_binary_descr copy_descr;
+
+ /* next_binary can be NULL */
+ assert(pipe);
+ assert(copy_binary);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "load_copy_binary() enter:\n");
+
+ if (next_binary) {
+ copy_out_info = next_binary->in_frame_info;
+ left_padding = next_binary->left_padding;
+ } else {
+ copy_out_info = pipe->output_info[0];
+ copy_vf_info = pipe->vf_output_info[0];
+ ia_css_frame_info_set_format(&copy_vf_info, IA_CSS_FRAME_FORMAT_YUV_LINE);
+ left_padding = 0;
+ }
+
+ ia_css_pipe_get_copy_binarydesc(pipe, &copy_descr,
+ &copy_in_info, &copy_out_info,
+ (next_binary) ? NULL : NULL/*TODO: &copy_vf_info*/);
+ err = ia_css_binary_find(&copy_descr, copy_binary);
+ if (err)
+ return err;
+ copy_binary->left_padding = left_padding;
+ return 0;
+}
+
+static int
+alloc_continuous_frames(struct ia_css_pipe *pipe, bool init_time)
+{
+ int err = 0;
+ struct ia_css_frame_info ref_info;
+ enum ia_css_pipe_id pipe_id;
+ bool continuous;
+ unsigned int i, idx;
+ unsigned int num_frames;
+
+ IA_CSS_ENTER_PRIVATE("pipe = %p, init_time = %d", pipe, init_time);
+
+ if ((!pipe) || (!pipe->stream)) {
+ IA_CSS_LEAVE_ERR_PRIVATE(-EINVAL);
+ return -EINVAL;
+ }
+
+ pipe_id = pipe->mode;
+ continuous = pipe->stream->config.continuous;
+
+ if (continuous) {
+ if (init_time) {
+ num_frames = pipe->stream->config.init_num_cont_raw_buf;
+ pipe->stream->continuous_pipe = pipe;
+ } else {
+ num_frames = pipe->stream->config.target_num_cont_raw_buf;
+ }
+ } else {
+ num_frames = NUM_ONLINE_INIT_CONTINUOUS_FRAMES;
+ }
+
+ if (pipe_id == IA_CSS_PIPE_ID_PREVIEW) {
+ ref_info = pipe->pipe_settings.preview.preview_binary.in_frame_info;
+ } else if (pipe_id == IA_CSS_PIPE_ID_VIDEO) {
+ ref_info = pipe->pipe_settings.video.video_binary.in_frame_info;
+ } else {
+ /* should not happen */
+ IA_CSS_LEAVE_ERR_PRIVATE(-EINVAL);
+ return -EINVAL;
+ }
+
+ if (IS_ISP2401) {
+ /* For CSI2+, the continuous frame will hold the full input frame */
+ ref_info.res.width = pipe->stream->config.input_config.input_res.width;
+ ref_info.res.height = pipe->stream->config.input_config.input_res.height;
+
+ /* Ensure padded width is aligned for 2401 */
+ ref_info.padded_width = CEIL_MUL(ref_info.res.width, 2 * ISP_VEC_NELEMS);
+ }
+
+ if (pipe->stream->config.pack_raw_pixels) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "alloc_continuous_frames() IA_CSS_FRAME_FORMAT_RAW_PACKED\n");
+ ref_info.format = IA_CSS_FRAME_FORMAT_RAW_PACKED;
+ } else
+ {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "alloc_continuous_frames() IA_CSS_FRAME_FORMAT_RAW\n");
+ ref_info.format = IA_CSS_FRAME_FORMAT_RAW;
+ }
+
+ /* Write format back to binary */
+ if (pipe_id == IA_CSS_PIPE_ID_PREVIEW) {
+ pipe->pipe_settings.preview.preview_binary.in_frame_info.format =
+ ref_info.format;
+ } else if (pipe_id == IA_CSS_PIPE_ID_VIDEO) {
+ pipe->pipe_settings.video.video_binary.in_frame_info.format = ref_info.format;
+ } else {
+ /* should not happen */
+ IA_CSS_LEAVE_ERR_PRIVATE(-EINVAL);
+ return -EINVAL;
+ }
+
+ if (init_time)
+ idx = 0;
+ else
+ idx = pipe->stream->config.init_num_cont_raw_buf;
+
+ for (i = idx; i < NUM_CONTINUOUS_FRAMES; i++) {
+ /* free previous frame */
+ if (pipe->continuous_frames[i]) {
+ ia_css_frame_free(pipe->continuous_frames[i]);
+ pipe->continuous_frames[i] = NULL;
+ }
+ /* free previous metadata buffer */
+ ia_css_metadata_free(pipe->cont_md_buffers[i]);
+ pipe->cont_md_buffers[i] = NULL;
+
+ /* check if new frame needed */
+ if (i < num_frames) {
+ /* allocate new frame */
+ err = ia_css_frame_allocate_from_info(
+ &pipe->continuous_frames[i],
+ &ref_info);
+ if (err) {
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+ }
+ /* allocate metadata buffer */
+ pipe->cont_md_buffers[i] = ia_css_metadata_allocate(
+ &pipe->stream->info.metadata_info);
+ }
+ }
+ IA_CSS_LEAVE_ERR_PRIVATE(0);
+ return 0;
+}
+
+int
+ia_css_alloc_continuous_frame_remain(struct ia_css_stream *stream)
+{
+ if (!stream)
+ return -EINVAL;
+ return alloc_continuous_frames(stream->continuous_pipe, false);
+}
+
+static int
+load_preview_binaries(struct ia_css_pipe *pipe)
+{
+ struct ia_css_frame_info prev_in_info,
+ prev_bds_out_info,
+ prev_out_info,
+ prev_vf_info;
+ struct ia_css_binary_descr preview_descr;
+ bool online;
+ int err = 0;
+ bool need_vf_pp = false;
+ bool need_isp_copy_binary = false;
+ bool sensor = false;
+ bool continuous;
+
+ /* preview only have 1 output pin now */
+ struct ia_css_frame_info *pipe_out_info = &pipe->output_info[0];
+ struct ia_css_preview_settings *mycs = &pipe->pipe_settings.preview;
+
+ IA_CSS_ENTER_PRIVATE("");
+ assert(pipe);
+ assert(pipe->stream);
+ assert(pipe->mode == IA_CSS_PIPE_ID_PREVIEW);
+
+ online = pipe->stream->config.online;
+
+ sensor = pipe->stream->config.mode == IA_CSS_INPUT_MODE_SENSOR;
+ continuous = pipe->stream->config.continuous;
+
+ if (mycs->preview_binary.info)
+ return 0;
+
+ err = ia_css_util_check_input(&pipe->stream->config, false, false);
+ if (err)
+ return err;
+ err = ia_css_frame_check_info(pipe_out_info);
+ if (err)
+ return err;
+
+ /*
+ * Note: the current selection of vf_pp binary and
+ * parameterization of the preview binary contains a few pieces
+ * of hardcoded knowledge. This needs to be cleaned up such that
+ * the binary selection becomes more generic.
+ * The vf_pp binary is needed if one or more of the following features
+ * are required:
+ * 1. YUV downscaling.
+ * 2. Digital zoom.
+ * 3. An output format that is not supported by the preview binary.
+ * In practice this means something other than yuv_line or nv12.
+ * The decision if the vf_pp binary is needed for YUV downscaling is
+ * made after the preview binary selection, since some preview binaries
+ * can perform the requested YUV downscaling.
+ */
+ need_vf_pp = pipe->config.enable_dz;
+ need_vf_pp |= pipe_out_info->format != IA_CSS_FRAME_FORMAT_YUV_LINE &&
+ !(pipe_out_info->format == IA_CSS_FRAME_FORMAT_NV12 ||
+ pipe_out_info->format == IA_CSS_FRAME_FORMAT_NV12_16 ||
+ pipe_out_info->format == IA_CSS_FRAME_FORMAT_NV12_TILEY);
+
+ /* Preview step 1 */
+ if (pipe->vf_yuv_ds_input_info.res.width)
+ prev_vf_info = pipe->vf_yuv_ds_input_info;
+ else
+ prev_vf_info = *pipe_out_info;
+ /*
+ * If vf_pp is needed, then preview must output yuv_line.
+ * The exception is when vf_pp is manually disabled, that is only
+ * used in combination with a pipeline extension that requires
+ * yuv_line as input.
+ */
+ if (need_vf_pp)
+ ia_css_frame_info_set_format(&prev_vf_info,
+ IA_CSS_FRAME_FORMAT_YUV_LINE);
+
+ err = ia_css_pipe_get_preview_binarydesc(
+ pipe,
+ &preview_descr,
+ &prev_in_info,
+ &prev_bds_out_info,
+ &prev_out_info,
+ &prev_vf_info);
+ if (err)
+ return err;
+ err = ia_css_binary_find(&preview_descr, &mycs->preview_binary);
+ if (err)
+ return err;
+
+ /* The vf_pp binary is needed when (further) YUV downscaling is required */
+ need_vf_pp |= mycs->preview_binary.out_frame_info[0].res.width != pipe_out_info->res.width;
+ need_vf_pp |= mycs->preview_binary.out_frame_info[0].res.height != pipe_out_info->res.height;
+
+ /*
+ * When vf_pp is needed, then the output format of the selected
+ * preview binary must be yuv_line. If this is not the case,
+ * then the preview binary selection is done again.
+ */
+ if (need_vf_pp &&
+ (mycs->preview_binary.out_frame_info[0].format != IA_CSS_FRAME_FORMAT_YUV_LINE)) {
+ /* Preview step 2 */
+ if (pipe->vf_yuv_ds_input_info.res.width)
+ prev_vf_info = pipe->vf_yuv_ds_input_info;
+ else
+ prev_vf_info = *pipe_out_info;
+
+ ia_css_frame_info_set_format(&prev_vf_info,
+ IA_CSS_FRAME_FORMAT_YUV_LINE);
+
+ err = ia_css_pipe_get_preview_binarydesc(
+ pipe,
+ &preview_descr,
+ &prev_in_info,
+ &prev_bds_out_info,
+ &prev_out_info,
+ &prev_vf_info);
+ if (err)
+ return err;
+ err = ia_css_binary_find(&preview_descr,
+ &mycs->preview_binary);
+ if (err)
+ return err;
+ }
+
+ if (need_vf_pp) {
+ struct ia_css_binary_descr vf_pp_descr;
+
+ /* Viewfinder post-processing */
+ ia_css_pipe_get_vfpp_binarydesc(pipe, &vf_pp_descr,
+ &mycs->preview_binary.out_frame_info[0],
+ pipe_out_info);
+ err = ia_css_binary_find(&vf_pp_descr,
+ &mycs->vf_pp_binary);
+ if (err)
+ return err;
+ }
+
+ if (IS_ISP2401) {
+ /*
+ * When the input system is 2401, only the Direct Sensor Mode
+ * Offline Preview uses the ISP copy binary.
+ */
+ need_isp_copy_binary = !online && sensor;
+ } else {
+ /*
+ * About pipe->stream->config.mode == IA_CSS_INPUT_MODE_MEMORY:
+ * This is typical the case with SkyCam (which has no input system) but it also
+ * applies to all cases where the driver chooses for memory based input frames.
+ * In these cases, a copy binary (which typical copies sensor data to DDR) does
+ * not have much use.
+ */
+ need_isp_copy_binary = !online && !continuous;
+ }
+
+ /* Copy */
+ if (need_isp_copy_binary) {
+ err = load_copy_binary(pipe,
+ &mycs->copy_binary,
+ &mycs->preview_binary);
+ if (err)
+ return err;
+ }
+
+ if (pipe->shading_table) {
+ ia_css_shading_table_free(pipe->shading_table);
+ pipe->shading_table = NULL;
+ }
+
+ return 0;
+}
+
+static void
+ia_css_binary_unload(struct ia_css_binary *binary)
+{
+ ia_css_binary_destroy_isp_parameters(binary);
+}
+
+static int
+unload_preview_binaries(struct ia_css_pipe *pipe)
+{
+ IA_CSS_ENTER_PRIVATE("pipe = %p", pipe);
+
+ if ((!pipe) || (pipe->mode != IA_CSS_PIPE_ID_PREVIEW)) {
+ IA_CSS_LEAVE_ERR_PRIVATE(-EINVAL);
+ return -EINVAL;
+ }
+ ia_css_binary_unload(&pipe->pipe_settings.preview.copy_binary);
+ ia_css_binary_unload(&pipe->pipe_settings.preview.preview_binary);
+ ia_css_binary_unload(&pipe->pipe_settings.preview.vf_pp_binary);
+
+ IA_CSS_LEAVE_ERR_PRIVATE(0);
+ return 0;
+}
+
+static const struct ia_css_fw_info *last_output_firmware(
+ const struct ia_css_fw_info *fw)
+{
+ const struct ia_css_fw_info *last_fw = NULL;
+ /* fw can be NULL */
+ IA_CSS_ENTER_LEAVE_PRIVATE("");
+
+ for (; fw; fw = fw->next) {
+ const struct ia_css_fw_info *info = fw;
+
+ if (info->info.isp.sp.enable.output)
+ last_fw = fw;
+ }
+ return last_fw;
+}
+
+static int add_firmwares(
+ struct ia_css_pipeline *me,
+ struct ia_css_binary *binary,
+ const struct ia_css_fw_info *fw,
+ const struct ia_css_fw_info *last_fw,
+ unsigned int binary_mode,
+ struct ia_css_frame *in_frame,
+ struct ia_css_frame *out_frame,
+ struct ia_css_frame *vf_frame,
+ struct ia_css_pipeline_stage **my_stage,
+ struct ia_css_pipeline_stage **vf_stage)
+{
+ int err = 0;
+ struct ia_css_pipeline_stage *extra_stage = NULL;
+ struct ia_css_pipeline_stage_desc stage_desc;
+
+ /* all args can be NULL ??? */
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "add_firmwares() enter:\n");
+
+ for (; fw; fw = fw->next) {
+ struct ia_css_frame *out[IA_CSS_BINARY_MAX_OUTPUT_PORTS] = {NULL};
+ struct ia_css_frame *in = NULL;
+ struct ia_css_frame *vf = NULL;
+
+ if ((fw == last_fw) && (fw->info.isp.sp.enable.out_frame != 0))
+ out[0] = out_frame;
+
+ if (fw->info.isp.sp.enable.in_frame != 0)
+ in = in_frame;
+
+ if (fw->info.isp.sp.enable.out_frame != 0)
+ vf = vf_frame;
+
+ ia_css_pipe_get_firmwares_stage_desc(&stage_desc, binary,
+ out, in, vf, fw, binary_mode);
+ err = ia_css_pipeline_create_and_add_stage(me, &stage_desc,
+ &extra_stage);
+ if (err)
+ return err;
+ if (fw->info.isp.sp.enable.output != 0)
+ in_frame = extra_stage->args.out_frame[0];
+ if (my_stage && !*my_stage && extra_stage)
+ *my_stage = extra_stage;
+ if (vf_stage && !*vf_stage && extra_stage &&
+ fw->info.isp.sp.enable.vf_veceven)
+ *vf_stage = extra_stage;
+ }
+ return err;
+}
+
+static int add_vf_pp_stage(
+ struct ia_css_pipe *pipe,
+ struct ia_css_frame *in_frame,
+ struct ia_css_frame *out_frame,
+ struct ia_css_binary *vf_pp_binary,
+ struct ia_css_pipeline_stage **vf_pp_stage)
+{
+ struct ia_css_pipeline *me = NULL;
+ const struct ia_css_fw_info *last_fw = NULL;
+ int err = 0;
+ struct ia_css_frame *out_frames[IA_CSS_BINARY_MAX_OUTPUT_PORTS];
+ struct ia_css_pipeline_stage_desc stage_desc;
+
+ /* out_frame can be NULL ??? */
+
+ if (!pipe)
+ return -EINVAL;
+ if (!in_frame)
+ return -EINVAL;
+ if (!vf_pp_binary)
+ return -EINVAL;
+ if (!vf_pp_stage)
+ return -EINVAL;
+
+ ia_css_pipe_util_create_output_frames(out_frames);
+ me = &pipe->pipeline;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "add_vf_pp_stage() enter:\n");
+
+ *vf_pp_stage = NULL;
+
+ last_fw = last_output_firmware(pipe->vf_stage);
+ if (!pipe->extra_config.disable_vf_pp) {
+ if (last_fw) {
+ ia_css_pipe_util_set_output_frames(out_frames, 0, NULL);
+ ia_css_pipe_get_generic_stage_desc(&stage_desc, vf_pp_binary,
+ out_frames, in_frame, NULL);
+ } else {
+ ia_css_pipe_util_set_output_frames(out_frames, 0, out_frame);
+ ia_css_pipe_get_generic_stage_desc(&stage_desc, vf_pp_binary,
+ out_frames, in_frame, NULL);
+ }
+ err = ia_css_pipeline_create_and_add_stage(me, &stage_desc, vf_pp_stage);
+ if (err)
+ return err;
+ in_frame = (*vf_pp_stage)->args.out_frame[0];
+ }
+ err = add_firmwares(me, vf_pp_binary, pipe->vf_stage, last_fw,
+ IA_CSS_BINARY_MODE_VF_PP,
+ in_frame, out_frame, NULL,
+ vf_pp_stage, NULL);
+ return err;
+}
+
+static int add_yuv_scaler_stage(
+ struct ia_css_pipe *pipe,
+ struct ia_css_pipeline *me,
+ struct ia_css_frame *in_frame,
+ struct ia_css_frame *out_frame,
+ struct ia_css_frame *internal_out_frame,
+ struct ia_css_binary *yuv_scaler_binary,
+ struct ia_css_pipeline_stage **pre_vf_pp_stage)
+{
+ const struct ia_css_fw_info *last_fw;
+ int err = 0;
+ struct ia_css_frame *vf_frame = NULL;
+ struct ia_css_frame *out_frames[IA_CSS_BINARY_MAX_OUTPUT_PORTS];
+ struct ia_css_pipeline_stage_desc stage_desc;
+
+ /* out_frame can be NULL ??? */
+ assert(in_frame);
+ assert(pipe);
+ assert(me);
+ assert(yuv_scaler_binary);
+ assert(pre_vf_pp_stage);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "add_yuv_scaler_stage() enter:\n");
+
+ *pre_vf_pp_stage = NULL;
+ ia_css_pipe_util_create_output_frames(out_frames);
+
+ last_fw = last_output_firmware(pipe->output_stage);
+
+ if (last_fw) {
+ ia_css_pipe_util_set_output_frames(out_frames, 0, NULL);
+ ia_css_pipe_get_generic_stage_desc(&stage_desc,
+ yuv_scaler_binary, out_frames, in_frame, vf_frame);
+ } else {
+ ia_css_pipe_util_set_output_frames(out_frames, 0, out_frame);
+ ia_css_pipe_util_set_output_frames(out_frames, 1, internal_out_frame);
+ ia_css_pipe_get_generic_stage_desc(&stage_desc,
+ yuv_scaler_binary, out_frames, in_frame, vf_frame);
+ }
+ err = ia_css_pipeline_create_and_add_stage(me, &stage_desc,
+ pre_vf_pp_stage);
+ if (err)
+ return err;
+ in_frame = (*pre_vf_pp_stage)->args.out_frame[0];
+
+ err = add_firmwares(me, yuv_scaler_binary, pipe->output_stage, last_fw,
+ IA_CSS_BINARY_MODE_CAPTURE_PP,
+ in_frame, out_frame, vf_frame,
+ NULL, pre_vf_pp_stage);
+ /* If a firmware produce vf_pp output, we set that as vf_pp input */
+ (*pre_vf_pp_stage)->args.vf_downscale_log2 =
+ yuv_scaler_binary->vf_downscale_log2;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "add_yuv_scaler_stage() leave:\n");
+ return err;
+}
+
+static int add_capture_pp_stage(
+ struct ia_css_pipe *pipe,
+ struct ia_css_pipeline *me,
+ struct ia_css_frame *in_frame,
+ struct ia_css_frame *out_frame,
+ struct ia_css_binary *capture_pp_binary,
+ struct ia_css_pipeline_stage **capture_pp_stage)
+{
+ const struct ia_css_fw_info *last_fw = NULL;
+ int err = 0;
+ struct ia_css_frame *vf_frame = NULL;
+ struct ia_css_frame *out_frames[IA_CSS_BINARY_MAX_OUTPUT_PORTS];
+ struct ia_css_pipeline_stage_desc stage_desc;
+
+ /* out_frame can be NULL ??? */
+ assert(in_frame);
+ assert(pipe);
+ assert(me);
+ assert(capture_pp_binary);
+ assert(capture_pp_stage);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "add_capture_pp_stage() enter:\n");
+
+ *capture_pp_stage = NULL;
+ ia_css_pipe_util_create_output_frames(out_frames);
+
+ last_fw = last_output_firmware(pipe->output_stage);
+ err = ia_css_frame_allocate_from_info(&vf_frame,
+ &capture_pp_binary->vf_frame_info);
+ if (err)
+ return err;
+ if (last_fw) {
+ ia_css_pipe_util_set_output_frames(out_frames, 0, NULL);
+ ia_css_pipe_get_generic_stage_desc(&stage_desc,
+ capture_pp_binary, out_frames, NULL, vf_frame);
+ } else {
+ ia_css_pipe_util_set_output_frames(out_frames, 0, out_frame);
+ ia_css_pipe_get_generic_stage_desc(&stage_desc,
+ capture_pp_binary, out_frames, NULL, vf_frame);
+ }
+ err = ia_css_pipeline_create_and_add_stage(me, &stage_desc,
+ capture_pp_stage);
+ if (err)
+ return err;
+ err = add_firmwares(me, capture_pp_binary, pipe->output_stage, last_fw,
+ IA_CSS_BINARY_MODE_CAPTURE_PP,
+ in_frame, out_frame, vf_frame,
+ NULL, capture_pp_stage);
+ /* If a firmware produce vf_pp output, we set that as vf_pp input */
+ if (*capture_pp_stage) {
+ (*capture_pp_stage)->args.vf_downscale_log2 =
+ capture_pp_binary->vf_downscale_log2;
+ }
+ return err;
+}
+
+static void sh_css_setup_queues(void)
+{
+ const struct ia_css_fw_info *fw;
+ unsigned int HIVE_ADDR_host_sp_queues_initialized;
+
+ sh_css_hmm_buffer_record_init();
+
+ sh_css_event_init_irq_mask();
+
+ fw = &sh_css_sp_fw;
+ HIVE_ADDR_host_sp_queues_initialized =
+ fw->info.sp.host_sp_queues_initialized;
+
+ ia_css_bufq_init();
+
+ /* set "host_sp_queues_initialized" to "true" */
+ sp_dmem_store_uint32(SP0_ID,
+ (unsigned int)sp_address_of(host_sp_queues_initialized),
+ (uint32_t)(1));
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "sh_css_setup_queues() leave:\n");
+}
+
+static int
+init_vf_frameinfo_defaults(struct ia_css_pipe *pipe,
+ struct ia_css_frame *vf_frame, unsigned int idx)
+{
+ int err = 0;
+ unsigned int thread_id;
+ enum sh_css_queue_id queue_id;
+
+ assert(vf_frame);
+
+ sh_css_pipe_get_viewfinder_frame_info(pipe, &vf_frame->frame_info, idx);
+ ia_css_pipeline_get_sp_thread_id(ia_css_pipe_get_pipe_num(pipe), &thread_id);
+ ia_css_query_internal_queue_id(IA_CSS_BUFFER_TYPE_VF_OUTPUT_FRAME + idx, thread_id, &queue_id);
+ vf_frame->dynamic_queue_id = queue_id;
+ vf_frame->buf_type = IA_CSS_BUFFER_TYPE_VF_OUTPUT_FRAME + idx;
+
+ err = ia_css_frame_init_planes(vf_frame);
+ return err;
+}
+
+static unsigned int
+get_crop_lines_for_bayer_order(const struct ia_css_stream_config *config)
+{
+ assert(config);
+ if ((config->input_config.bayer_order == IA_CSS_BAYER_ORDER_BGGR) ||
+ (config->input_config.bayer_order == IA_CSS_BAYER_ORDER_GBRG))
+ return 1;
+
+ return 0;
+}
+
+static unsigned int
+get_crop_columns_for_bayer_order(const struct ia_css_stream_config *config)
+{
+ assert(config);
+ if ((config->input_config.bayer_order == IA_CSS_BAYER_ORDER_RGGB) ||
+ (config->input_config.bayer_order == IA_CSS_BAYER_ORDER_GBRG))
+ return 1;
+
+ return 0;
+}
+
+/*
+ * This function is to get the sum of all extra pixels in addition to the effective
+ * input, it includes dvs envelop and filter run-in
+ */
+static void get_pipe_extra_pixel(struct ia_css_pipe *pipe,
+ unsigned int *extra_row, unsigned int *extra_column)
+{
+ enum ia_css_pipe_id pipe_id = pipe->mode;
+ unsigned int left_cropping = 0, top_cropping = 0;
+ unsigned int i;
+ struct ia_css_resolution dvs_env = pipe->config.dvs_envelope;
+
+ /*
+ * The dvs envelope info may not be correctly sent down via pipe config
+ * The check is made and the correct value is populated in the binary info
+ * Use this value when computing crop, else excess lines may get trimmed
+ */
+ switch (pipe_id) {
+ case IA_CSS_PIPE_ID_PREVIEW:
+ if (pipe->pipe_settings.preview.preview_binary.info) {
+ left_cropping =
+ pipe->pipe_settings.preview.preview_binary.info->sp.pipeline.left_cropping;
+ top_cropping =
+ pipe->pipe_settings.preview.preview_binary.info->sp.pipeline.top_cropping;
+ }
+ dvs_env = pipe->pipe_settings.preview.preview_binary.dvs_envelope;
+ break;
+ case IA_CSS_PIPE_ID_VIDEO:
+ if (pipe->pipe_settings.video.video_binary.info) {
+ left_cropping =
+ pipe->pipe_settings.video.video_binary.info->sp.pipeline.left_cropping;
+ top_cropping =
+ pipe->pipe_settings.video.video_binary.info->sp.pipeline.top_cropping;
+ }
+ dvs_env = pipe->pipe_settings.video.video_binary.dvs_envelope;
+ break;
+ case IA_CSS_PIPE_ID_CAPTURE:
+ for (i = 0; i < pipe->pipe_settings.capture.num_primary_stage; i++) {
+ if (pipe->pipe_settings.capture.primary_binary[i].info) {
+ left_cropping +=
+ pipe->pipe_settings.capture.primary_binary[i].info->sp.pipeline.left_cropping;
+ top_cropping +=
+ pipe->pipe_settings.capture.primary_binary[i].info->sp.pipeline.top_cropping;
+ }
+ dvs_env.width +=
+ pipe->pipe_settings.capture.primary_binary[i].dvs_envelope.width;
+ dvs_env.height +=
+ pipe->pipe_settings.capture.primary_binary[i].dvs_envelope.height;
+ }
+ break;
+ default:
+ break;
+ }
+
+ *extra_row = top_cropping + dvs_env.height;
+ *extra_column = left_cropping + dvs_env.width;
+}
+
+void
+ia_css_get_crop_offsets(
+ struct ia_css_pipe *pipe,
+ struct ia_css_frame_info *in_frame)
+{
+ unsigned int row = 0;
+ unsigned int column = 0;
+ struct ia_css_resolution *input_res;
+ struct ia_css_resolution *effective_res;
+ unsigned int extra_row = 0, extra_col = 0;
+ unsigned int min_reqd_height, min_reqd_width;
+
+ assert(pipe);
+ assert(pipe->stream);
+ assert(in_frame);
+
+ IA_CSS_ENTER_PRIVATE("pipe = %p effective_wd = %u effective_ht = %u",
+ pipe, pipe->config.input_effective_res.width,
+ pipe->config.input_effective_res.height);
+
+ input_res = &pipe->stream->config.input_config.input_res;
+
+ if (IS_ISP2401)
+ effective_res = &pipe->config.input_effective_res;
+ else
+ effective_res = &pipe->stream->config.input_config.effective_res;
+
+ get_pipe_extra_pixel(pipe, &extra_row, &extra_col);
+
+ in_frame->raw_bayer_order = pipe->stream->config.input_config.bayer_order;
+
+ min_reqd_height = effective_res->height + extra_row;
+ min_reqd_width = effective_res->width + extra_col;
+
+ if (input_res->height > min_reqd_height) {
+ row = (input_res->height - min_reqd_height) / 2;
+ row &= ~0x1;
+ }
+ if (input_res->width > min_reqd_width) {
+ column = (input_res->width - min_reqd_width) / 2;
+ column &= ~0x1;
+ }
+
+ /*
+ * TODO:
+ * 1. Require the special support for RAW10 packed mode.
+ * 2. Require the special support for the online use cases.
+ */
+
+ /*
+ * ISP expects GRBG bayer order, we skip one line and/or one row
+ * to correct in case the input bayer order is different.
+ */
+ column += get_crop_columns_for_bayer_order(&pipe->stream->config);
+ row += get_crop_lines_for_bayer_order(&pipe->stream->config);
+
+ in_frame->crop_info.start_column = column;
+ in_frame->crop_info.start_line = row;
+
+ IA_CSS_LEAVE_PRIVATE("void start_col: %u start_row: %u", column, row);
+
+ return;
+}
+
+static int
+init_in_frameinfo_memory_defaults(struct ia_css_pipe *pipe,
+ struct ia_css_frame *frame, enum ia_css_frame_format format)
+{
+ struct ia_css_frame *in_frame;
+ int err = 0;
+ unsigned int thread_id;
+ enum sh_css_queue_id queue_id;
+
+ assert(frame);
+ in_frame = frame;
+
+ in_frame->frame_info.format = format;
+
+ if (IS_ISP2401 && format == IA_CSS_FRAME_FORMAT_RAW) {
+ in_frame->frame_info.format = (pipe->stream->config.pack_raw_pixels) ?
+ IA_CSS_FRAME_FORMAT_RAW_PACKED : IA_CSS_FRAME_FORMAT_RAW;
+ }
+
+ in_frame->frame_info.res.width = pipe->stream->config.input_config.input_res.width;
+ in_frame->frame_info.res.height = pipe->stream->config.input_config.input_res.height;
+ in_frame->frame_info.raw_bit_depth = ia_css_pipe_util_pipe_input_format_bpp(pipe);
+ ia_css_frame_info_set_width(&in_frame->frame_info,
+ pipe->stream->config.input_config.input_res.width, 0);
+ ia_css_pipeline_get_sp_thread_id(ia_css_pipe_get_pipe_num(pipe), &thread_id);
+ ia_css_query_internal_queue_id(IA_CSS_BUFFER_TYPE_INPUT_FRAME, thread_id, &queue_id);
+ in_frame->dynamic_queue_id = queue_id;
+ in_frame->buf_type = IA_CSS_BUFFER_TYPE_INPUT_FRAME;
+
+ if (IS_ISP2401)
+ ia_css_get_crop_offsets(pipe, &in_frame->frame_info);
+
+ err = ia_css_frame_init_planes(in_frame);
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "%s() bayer_order = %d\n",
+ __func__, in_frame->frame_info.raw_bayer_order);
+
+ return err;
+}
+
+static int
+init_out_frameinfo_defaults(struct ia_css_pipe *pipe,
+ struct ia_css_frame *out_frame, unsigned int idx)
+{
+ int err = 0;
+ unsigned int thread_id;
+ enum sh_css_queue_id queue_id;
+
+ assert(out_frame);
+
+ sh_css_pipe_get_output_frame_info(pipe, &out_frame->frame_info, idx);
+ ia_css_pipeline_get_sp_thread_id(ia_css_pipe_get_pipe_num(pipe), &thread_id);
+ ia_css_query_internal_queue_id(IA_CSS_BUFFER_TYPE_OUTPUT_FRAME + idx, thread_id, &queue_id);
+ out_frame->dynamic_queue_id = queue_id;
+ out_frame->buf_type = IA_CSS_BUFFER_TYPE_OUTPUT_FRAME + idx;
+ err = ia_css_frame_init_planes(out_frame);
+
+ return err;
+}
+
+/* Create stages for video pipe */
+static int create_host_video_pipeline(struct ia_css_pipe *pipe)
+{
+ struct ia_css_pipeline_stage_desc stage_desc;
+ struct ia_css_binary *copy_binary, *video_binary,
+ *yuv_scaler_binary, *vf_pp_binary;
+ struct ia_css_pipeline_stage *copy_stage = NULL;
+ struct ia_css_pipeline_stage *video_stage = NULL;
+ struct ia_css_pipeline_stage *yuv_scaler_stage = NULL;
+ struct ia_css_pipeline_stage *vf_pp_stage = NULL;
+ struct ia_css_pipeline *me;
+ struct ia_css_frame *in_frame = NULL;
+ struct ia_css_frame *out_frame;
+ struct ia_css_frame *out_frames[IA_CSS_BINARY_MAX_OUTPUT_PORTS];
+ struct ia_css_frame *vf_frame = NULL;
+ int err = 0;
+ bool need_copy = false;
+ bool need_vf_pp = false;
+ bool need_yuv_pp = false;
+ bool need_in_frameinfo_memory = false;
+
+ unsigned int i, num_yuv_scaler;
+ bool *is_output_stage = NULL;
+
+ IA_CSS_ENTER_PRIVATE("pipe = %p", pipe);
+ if ((!pipe) || (!pipe->stream) || (pipe->mode != IA_CSS_PIPE_ID_VIDEO)) {
+ IA_CSS_LEAVE_ERR_PRIVATE(-EINVAL);
+ return -EINVAL;
+ }
+ ia_css_pipe_util_create_output_frames(out_frames);
+ out_frame = &pipe->out_frame_struct;
+
+ /* pipeline already created as part of create_host_pipeline_structure */
+ me = &pipe->pipeline;
+ ia_css_pipeline_clean(me);
+
+ me->dvs_frame_delay = pipe->dvs_frame_delay;
+
+ if (IS_ISP2401) {
+ /*
+ * When the input system is 2401, always enable 'in_frameinfo_memory'
+ * except for the following: online or continuous
+ */
+ need_in_frameinfo_memory = !(pipe->stream->config.online ||
+ pipe->stream->config.continuous);
+ } else {
+ /* Construct in_frame info (only in case we have dynamic input */
+ need_in_frameinfo_memory = pipe->stream->config.mode ==
+ IA_CSS_INPUT_MODE_MEMORY;
+ }
+
+ /* Construct in_frame info (only in case we have dynamic input */
+ if (need_in_frameinfo_memory) {
+ in_frame = &pipe->in_frame_struct;
+ err = init_in_frameinfo_memory_defaults(pipe, in_frame,
+ IA_CSS_FRAME_FORMAT_RAW);
+ if (err)
+ goto ERR;
+ }
+
+ out_frame->data = 0;
+ err = init_out_frameinfo_defaults(pipe, out_frame, 0);
+ if (err)
+ goto ERR;
+
+ if (pipe->enable_viewfinder[IA_CSS_PIPE_OUTPUT_STAGE_0]) {
+ vf_frame = &pipe->vf_frame_struct;
+ vf_frame->data = 0;
+ err = init_vf_frameinfo_defaults(pipe, vf_frame, 0);
+ if (err)
+ goto ERR;
+ }
+
+ copy_binary = &pipe->pipe_settings.video.copy_binary;
+ video_binary = &pipe->pipe_settings.video.video_binary;
+ vf_pp_binary = &pipe->pipe_settings.video.vf_pp_binary;
+
+ yuv_scaler_binary = pipe->pipe_settings.video.yuv_scaler_binary;
+ num_yuv_scaler = pipe->pipe_settings.video.num_yuv_scaler;
+ is_output_stage = pipe->pipe_settings.video.is_output_stage;
+
+ need_copy = (copy_binary && copy_binary->info);
+ need_vf_pp = (vf_pp_binary && vf_pp_binary->info);
+ need_yuv_pp = (yuv_scaler_binary && yuv_scaler_binary->info);
+
+ if (need_copy) {
+ ia_css_pipe_util_set_output_frames(out_frames, 0, NULL);
+ ia_css_pipe_get_generic_stage_desc(&stage_desc, copy_binary,
+ out_frames, NULL, NULL);
+ err = ia_css_pipeline_create_and_add_stage(me, &stage_desc,
+ &copy_stage);
+ if (err)
+ goto ERR;
+ in_frame = me->stages->args.out_frame[0];
+ } else if (pipe->stream->config.continuous) {
+ if (IS_ISP2401)
+ /*
+ * When continuous is enabled, configure in_frame with the
+ * last pipe, which is the copy pipe.
+ */
+ in_frame = pipe->stream->last_pipe->continuous_frames[0];
+ else
+ in_frame = pipe->continuous_frames[0];
+ }
+
+ ia_css_pipe_util_set_output_frames(out_frames, 0,
+ need_yuv_pp ? NULL : out_frame);
+
+ /*
+ * when the video binary supports a second output pin,
+ * it can directly produce the vf_frame.
+ */
+ if (need_vf_pp) {
+ ia_css_pipe_get_generic_stage_desc(&stage_desc, video_binary,
+ out_frames, in_frame, NULL);
+ } else {
+ ia_css_pipe_get_generic_stage_desc(&stage_desc, video_binary,
+ out_frames, in_frame, vf_frame);
+ }
+ err = ia_css_pipeline_create_and_add_stage(me, &stage_desc,
+ &video_stage);
+ if (err)
+ goto ERR;
+
+ /* If we use copy iso video, the input must be yuv iso raw */
+ if (video_stage) {
+ video_stage->args.copy_vf =
+ video_binary->info->sp.pipeline.mode == IA_CSS_BINARY_MODE_COPY;
+ video_stage->args.copy_output = video_stage->args.copy_vf;
+ }
+
+ /* when the video binary supports only 1 output pin, vf_pp is needed to
+ produce the vf_frame.*/
+ if (need_vf_pp && video_stage) {
+ in_frame = video_stage->args.out_vf_frame;
+ err = add_vf_pp_stage(pipe, in_frame, vf_frame, vf_pp_binary,
+ &vf_pp_stage);
+ if (err)
+ goto ERR;
+ }
+ if (video_stage) {
+ int frm;
+
+ for (frm = 0; frm < NUM_VIDEO_TNR_FRAMES; frm++) {
+ video_stage->args.tnr_frames[frm] =
+ pipe->pipe_settings.video.tnr_frames[frm];
+ }
+ for (frm = 0; frm < MAX_NUM_VIDEO_DELAY_FRAMES; frm++) {
+ video_stage->args.delay_frames[frm] =
+ pipe->pipe_settings.video.delay_frames[frm];
+ }
+ }
+
+ if (need_yuv_pp && video_stage) {
+ struct ia_css_frame *tmp_in_frame = video_stage->args.out_frame[0];
+ struct ia_css_frame *tmp_out_frame = NULL;
+
+ for (i = 0; i < num_yuv_scaler; i++) {
+ tmp_out_frame = is_output_stage[i] ? out_frame : NULL;
+
+ err = add_yuv_scaler_stage(pipe, me, tmp_in_frame,
+ tmp_out_frame, NULL,
+ &yuv_scaler_binary[i],
+ &yuv_scaler_stage);
+
+ if (err) {
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+ }
+ /* we use output port 1 as internal output port */
+ if (yuv_scaler_stage)
+ tmp_in_frame = yuv_scaler_stage->args.out_frame[1];
+ }
+ }
+
+ pipe->pipeline.acquire_isp_each_stage = false;
+ ia_css_pipeline_finalize_stages(&pipe->pipeline,
+ pipe->stream->config.continuous);
+
+ERR:
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+}
+
+/* Create stages for preview */
+static int
+create_host_preview_pipeline(struct ia_css_pipe *pipe)
+{
+ struct ia_css_pipeline_stage *copy_stage = NULL;
+ struct ia_css_pipeline_stage *preview_stage = NULL;
+ struct ia_css_pipeline_stage *vf_pp_stage = NULL;
+ struct ia_css_pipeline_stage_desc stage_desc;
+ struct ia_css_pipeline *me = NULL;
+ struct ia_css_binary *copy_binary, *preview_binary, *vf_pp_binary = NULL;
+ struct ia_css_frame *in_frame = NULL;
+ int err = 0;
+ struct ia_css_frame *out_frame;
+ struct ia_css_frame *out_frames[IA_CSS_BINARY_MAX_OUTPUT_PORTS];
+ bool need_in_frameinfo_memory = false;
+ bool sensor = false;
+ bool buffered_sensor = false;
+ bool online = false;
+ bool continuous = false;
+
+ IA_CSS_ENTER_PRIVATE("pipe = %p", pipe);
+ if ((!pipe) || (!pipe->stream) || (pipe->mode != IA_CSS_PIPE_ID_PREVIEW)) {
+ IA_CSS_LEAVE_ERR_PRIVATE(-EINVAL);
+ return -EINVAL;
+ }
+
+ ia_css_pipe_util_create_output_frames(out_frames);
+ /* pipeline already created as part of create_host_pipeline_structure */
+ me = &pipe->pipeline;
+ ia_css_pipeline_clean(me);
+
+ if (IS_ISP2401) {
+ /*
+ * When the input system is 2401, always enable 'in_frameinfo_memory'
+ * except for the following:
+ * - Direct Sensor Mode Online Preview
+ * - Buffered Sensor Mode Online Preview
+ * - Direct Sensor Mode Continuous Preview
+ * - Buffered Sensor Mode Continuous Preview
+ */
+ sensor = (pipe->stream->config.mode == IA_CSS_INPUT_MODE_SENSOR);
+ buffered_sensor = (pipe->stream->config.mode == IA_CSS_INPUT_MODE_BUFFERED_SENSOR);
+ online = pipe->stream->config.online;
+ continuous = pipe->stream->config.continuous;
+ need_in_frameinfo_memory =
+ !((sensor && (online || continuous)) || (buffered_sensor &&
+ (online || continuous)));
+ } else {
+ /* Construct in_frame info (only in case we have dynamic input */
+ need_in_frameinfo_memory = pipe->stream->config.mode == IA_CSS_INPUT_MODE_MEMORY;
+ }
+ if (need_in_frameinfo_memory) {
+ err = init_in_frameinfo_memory_defaults(pipe, &me->in_frame,
+ IA_CSS_FRAME_FORMAT_RAW);
+ if (err)
+ goto ERR;
+
+ in_frame = &me->in_frame;
+ } else {
+ in_frame = NULL;
+ }
+ err = init_out_frameinfo_defaults(pipe, &me->out_frame[0], 0);
+ if (err)
+ goto ERR;
+ out_frame = &me->out_frame[0];
+
+ copy_binary = &pipe->pipe_settings.preview.copy_binary;
+ preview_binary = &pipe->pipe_settings.preview.preview_binary;
+ if (pipe->pipe_settings.preview.vf_pp_binary.info)
+ vf_pp_binary = &pipe->pipe_settings.preview.vf_pp_binary;
+
+ if (pipe->pipe_settings.preview.copy_binary.info) {
+ ia_css_pipe_util_set_output_frames(out_frames, 0, NULL);
+ ia_css_pipe_get_generic_stage_desc(&stage_desc, copy_binary,
+ out_frames, NULL, NULL);
+ err = ia_css_pipeline_create_and_add_stage(me, &stage_desc,
+ &copy_stage);
+ if (err)
+ goto ERR;
+ in_frame = me->stages->args.out_frame[0];
+ } else if (pipe->stream->config.continuous) {
+ if (IS_ISP2401) {
+ /*
+ * When continuous is enabled, configure in_frame with the
+ * last pipe, which is the copy pipe.
+ */
+ if (continuous || !online)
+ in_frame = pipe->stream->last_pipe->continuous_frames[0];
+ } else {
+ in_frame = pipe->continuous_frames[0];
+ }
+ }
+
+ if (vf_pp_binary) {
+ ia_css_pipe_util_set_output_frames(out_frames, 0, NULL);
+ ia_css_pipe_get_generic_stage_desc(&stage_desc, preview_binary,
+ out_frames, in_frame, NULL);
+ } else {
+ ia_css_pipe_util_set_output_frames(out_frames, 0, out_frame);
+ ia_css_pipe_get_generic_stage_desc(&stage_desc, preview_binary,
+ out_frames, in_frame, NULL);
+ }
+ err = ia_css_pipeline_create_and_add_stage(me, &stage_desc,
+ &preview_stage);
+ if (err)
+ goto ERR;
+ /* If we use copy iso preview, the input must be yuv iso raw */
+ preview_stage->args.copy_vf =
+ preview_binary->info->sp.pipeline.mode == IA_CSS_BINARY_MODE_COPY;
+ preview_stage->args.copy_output = !preview_stage->args.copy_vf;
+ if (preview_stage->args.copy_vf && !preview_stage->args.out_vf_frame) {
+ /* in case of copy, use the vf frame as output frame */
+ preview_stage->args.out_vf_frame =
+ preview_stage->args.out_frame[0];
+ }
+ if (vf_pp_binary) {
+ if (preview_binary->info->sp.pipeline.mode == IA_CSS_BINARY_MODE_COPY)
+ in_frame = preview_stage->args.out_vf_frame;
+ else
+ in_frame = preview_stage->args.out_frame[0];
+ err = add_vf_pp_stage(pipe, in_frame, out_frame, vf_pp_binary,
+ &vf_pp_stage);
+ if (err)
+ goto ERR;
+ }
+
+ pipe->pipeline.acquire_isp_each_stage = false;
+ ia_css_pipeline_finalize_stages(&pipe->pipeline, pipe->stream->config.continuous);
+
+ERR:
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+}
+
+static void send_raw_frames(struct ia_css_pipe *pipe)
+{
+ if (pipe->stream->config.continuous) {
+ unsigned int i;
+
+ sh_css_update_host2sp_cont_num_raw_frames
+ (pipe->stream->config.init_num_cont_raw_buf, true);
+ sh_css_update_host2sp_cont_num_raw_frames
+ (pipe->stream->config.target_num_cont_raw_buf, false);
+
+ /* Hand-over all the SP-internal buffers */
+ for (i = 0; i < pipe->stream->config.init_num_cont_raw_buf; i++) {
+ sh_css_update_host2sp_offline_frame(i,
+ pipe->continuous_frames[i], pipe->cont_md_buffers[i]);
+ }
+ }
+
+ return;
+}
+
+static int
+preview_start(struct ia_css_pipe *pipe)
+{
+ int err = 0;
+ struct ia_css_pipe *copy_pipe, *capture_pipe;
+ enum sh_css_pipe_config_override copy_ovrd;
+ enum ia_css_input_mode preview_pipe_input_mode;
+ unsigned int thread_id;
+
+ IA_CSS_ENTER_PRIVATE("pipe = %p", pipe);
+ if ((!pipe) || (!pipe->stream) || (pipe->mode != IA_CSS_PIPE_ID_PREVIEW)) {
+ IA_CSS_LEAVE_ERR_PRIVATE(-EINVAL);
+ return -EINVAL;
+ }
+
+ preview_pipe_input_mode = pipe->stream->config.mode;
+
+ copy_pipe = pipe->pipe_settings.preview.copy_pipe;
+ capture_pipe = pipe->pipe_settings.preview.capture_pipe;
+
+ sh_css_metrics_start_frame();
+
+ /* multi stream video needs mipi buffers */
+ err = send_mipi_frames(pipe);
+ if (err) {
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+ }
+ send_raw_frames(pipe);
+
+ ia_css_pipeline_get_sp_thread_id(ia_css_pipe_get_pipe_num(pipe), &thread_id);
+ copy_ovrd = 1 << thread_id;
+
+ if (pipe->stream->cont_capt) {
+ ia_css_pipeline_get_sp_thread_id(ia_css_pipe_get_pipe_num(capture_pipe),
+ &thread_id);
+ copy_ovrd |= 1 << thread_id;
+ }
+
+ /* Construct and load the copy pipe */
+ if (pipe->stream->config.continuous) {
+ sh_css_sp_init_pipeline(&copy_pipe->pipeline,
+ IA_CSS_PIPE_ID_COPY,
+ (uint8_t)ia_css_pipe_get_pipe_num(copy_pipe),
+ false,
+ pipe->stream->config.pixels_per_clock == 2, false,
+ false, pipe->required_bds_factor,
+ copy_ovrd,
+ pipe->stream->config.mode,
+ &pipe->stream->config.metadata_config,
+ &pipe->stream->info.metadata_info,
+ pipe->stream->config.source.port.port);
+
+ /*
+ * make the preview pipe start with mem mode input, copy handles
+ * the actual mode
+ */
+ preview_pipe_input_mode = IA_CSS_INPUT_MODE_MEMORY;
+ }
+
+ /* Construct and load the capture pipe */
+ if (pipe->stream->cont_capt) {
+ sh_css_sp_init_pipeline(&capture_pipe->pipeline,
+ IA_CSS_PIPE_ID_CAPTURE,
+ (uint8_t)ia_css_pipe_get_pipe_num(capture_pipe),
+ capture_pipe->config.default_capture_config.enable_xnr != 0,
+ capture_pipe->stream->config.pixels_per_clock == 2,
+ true, /* continuous */
+ false, /* offline */
+ capture_pipe->required_bds_factor,
+ 0,
+ IA_CSS_INPUT_MODE_MEMORY,
+ &pipe->stream->config.metadata_config,
+ &pipe->stream->info.metadata_info,
+ (enum mipi_port_id)0);
+ }
+
+ start_pipe(pipe, copy_ovrd, preview_pipe_input_mode);
+
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+}
+
+int
+ia_css_pipe_enqueue_buffer(struct ia_css_pipe *pipe,
+ const struct ia_css_buffer *buffer)
+{
+ int return_err = 0;
+ unsigned int thread_id;
+ enum sh_css_queue_id queue_id;
+ struct ia_css_pipeline *pipeline;
+ struct ia_css_pipeline_stage *stage;
+ struct ia_css_rmgr_vbuf_handle p_vbuf;
+ struct ia_css_rmgr_vbuf_handle *h_vbuf;
+ struct sh_css_hmm_buffer ddr_buffer;
+ enum ia_css_buffer_type buf_type;
+ enum ia_css_pipe_id pipe_id;
+ bool ret_err;
+
+ IA_CSS_ENTER("pipe=%p, buffer=%p", pipe, buffer);
+
+ if ((!pipe) || (!buffer)) {
+ IA_CSS_LEAVE_ERR(-EINVAL);
+ return -EINVAL;
+ }
+
+ buf_type = buffer->type;
+
+ pipe_id = pipe->mode;
+
+ IA_CSS_LOG("pipe_id=%d, buf_type=%d", pipe_id, buf_type);
+
+ assert(pipe_id < IA_CSS_PIPE_ID_NUM);
+ assert(buf_type < IA_CSS_NUM_DYNAMIC_BUFFER_TYPE);
+ if (buf_type == IA_CSS_BUFFER_TYPE_INVALID ||
+ buf_type >= IA_CSS_NUM_DYNAMIC_BUFFER_TYPE ||
+ pipe_id >= IA_CSS_PIPE_ID_NUM) {
+ IA_CSS_LEAVE_ERR(-EINVAL);
+ return -EINVAL;
+ }
+
+ ret_err = ia_css_pipeline_get_sp_thread_id(ia_css_pipe_get_pipe_num(pipe), &thread_id);
+ if (!ret_err) {
+ IA_CSS_LEAVE_ERR(-EINVAL);
+ return -EINVAL;
+ }
+
+ ret_err = ia_css_query_internal_queue_id(buf_type, thread_id, &queue_id);
+ if (!ret_err) {
+ IA_CSS_LEAVE_ERR(-EINVAL);
+ return -EINVAL;
+ }
+
+ if ((queue_id <= SH_CSS_INVALID_QUEUE_ID) || (queue_id >= SH_CSS_MAX_NUM_QUEUES)) {
+ IA_CSS_LEAVE_ERR(-EINVAL);
+ return -EINVAL;
+ }
+
+ if (!sh_css_sp_is_running()) {
+ IA_CSS_LOG("SP is not running!");
+ IA_CSS_LEAVE_ERR(-EBUSY);
+ /* SP is not running. The queues are not valid */
+ return -EBUSY;
+ }
+
+ pipeline = &pipe->pipeline;
+
+ assert(pipeline || pipe_id == IA_CSS_PIPE_ID_COPY);
+
+ assert(sizeof(void *) <= sizeof(ddr_buffer.kernel_ptr));
+ ddr_buffer.kernel_ptr = HOST_ADDRESS(NULL);
+ ddr_buffer.cookie_ptr = buffer->driver_cookie;
+ ddr_buffer.timing_data = buffer->timing_data;
+
+ if (buf_type == IA_CSS_BUFFER_TYPE_3A_STATISTICS) {
+ if (!buffer->data.stats_3a) {
+ IA_CSS_LEAVE_ERR(-EINVAL);
+ return -EINVAL;
+ }
+ ddr_buffer.kernel_ptr = HOST_ADDRESS(buffer->data.stats_3a);
+ ddr_buffer.payload.s3a = *buffer->data.stats_3a;
+ } else if (buf_type == IA_CSS_BUFFER_TYPE_DIS_STATISTICS) {
+ if (!buffer->data.stats_dvs) {
+ IA_CSS_LEAVE_ERR(-EINVAL);
+ return -EINVAL;
+ }
+ ddr_buffer.kernel_ptr = HOST_ADDRESS(buffer->data.stats_dvs);
+ ddr_buffer.payload.dis = *buffer->data.stats_dvs;
+ } else if (buf_type == IA_CSS_BUFFER_TYPE_METADATA) {
+ if (!buffer->data.metadata) {
+ IA_CSS_LEAVE_ERR(-EINVAL);
+ return -EINVAL;
+ }
+ ddr_buffer.kernel_ptr = HOST_ADDRESS(buffer->data.metadata);
+ ddr_buffer.payload.metadata = *buffer->data.metadata;
+ } else if (buf_type == IA_CSS_BUFFER_TYPE_INPUT_FRAME ||
+ buf_type == IA_CSS_BUFFER_TYPE_OUTPUT_FRAME ||
+ buf_type == IA_CSS_BUFFER_TYPE_VF_OUTPUT_FRAME ||
+ buf_type == IA_CSS_BUFFER_TYPE_SEC_OUTPUT_FRAME ||
+ buf_type == IA_CSS_BUFFER_TYPE_SEC_VF_OUTPUT_FRAME) {
+ if (!buffer->data.frame) {
+ IA_CSS_LEAVE_ERR(-EINVAL);
+ return -EINVAL;
+ }
+ ddr_buffer.kernel_ptr = HOST_ADDRESS(buffer->data.frame);
+ ddr_buffer.payload.frame.frame_data = buffer->data.frame->data;
+ ddr_buffer.payload.frame.flashed = 0;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_pipe_enqueue_buffer() buf_type=%d, data(DDR address)=0x%x\n",
+ buf_type, buffer->data.frame->data);
+
+ }
+
+ /* start of test for using rmgr for acq/rel memory */
+ p_vbuf.vptr = 0;
+ p_vbuf.count = 0;
+ p_vbuf.size = sizeof(struct sh_css_hmm_buffer);
+ h_vbuf = &p_vbuf;
+ /* TODO: change next to correct pool for optimization */
+ ia_css_rmgr_acq_vbuf(hmm_buffer_pool, &h_vbuf);
+
+ if ((!h_vbuf) || (h_vbuf->vptr == 0x0)) {
+ IA_CSS_LEAVE_ERR(-EINVAL);
+ return -EINVAL;
+ }
+
+ hmm_store(h_vbuf->vptr,
+ (void *)(&ddr_buffer),
+ sizeof(struct sh_css_hmm_buffer));
+ if (buf_type == IA_CSS_BUFFER_TYPE_3A_STATISTICS ||
+ buf_type == IA_CSS_BUFFER_TYPE_DIS_STATISTICS ||
+ buf_type == IA_CSS_BUFFER_TYPE_LACE_STATISTICS) {
+ if (!pipeline) {
+ ia_css_rmgr_rel_vbuf(hmm_buffer_pool, &h_vbuf);
+ IA_CSS_LOG("pipeline is empty!");
+ IA_CSS_LEAVE_ERR(-EINVAL);
+ return -EINVAL;
+ }
+
+ for (stage = pipeline->stages; stage; stage = stage->next) {
+ /*
+ * The SP will read the params after it got
+ * empty 3a and dis
+ */
+ if (stage->binary && stage->binary->info &&
+ (stage->binary->info->sp.enable.s3a ||
+ stage->binary->info->sp.enable.dis)) {
+ /* there is a stage that needs it */
+ return_err = ia_css_bufq_enqueue_buffer(thread_id,
+ queue_id,
+ (uint32_t)h_vbuf->vptr);
+ }
+ }
+ } else if (buf_type == IA_CSS_BUFFER_TYPE_INPUT_FRAME ||
+ buf_type == IA_CSS_BUFFER_TYPE_OUTPUT_FRAME ||
+ buf_type == IA_CSS_BUFFER_TYPE_VF_OUTPUT_FRAME ||
+ buf_type == IA_CSS_BUFFER_TYPE_SEC_OUTPUT_FRAME ||
+ buf_type == IA_CSS_BUFFER_TYPE_SEC_VF_OUTPUT_FRAME ||
+ buf_type == IA_CSS_BUFFER_TYPE_METADATA) {
+ return_err = ia_css_bufq_enqueue_buffer(thread_id,
+ queue_id,
+ (uint32_t)h_vbuf->vptr);
+ if (!return_err &&
+ buf_type == IA_CSS_BUFFER_TYPE_OUTPUT_FRAME) {
+ IA_CSS_LOG("pfp: enqueued OF %d to q %d thread %d",
+ ddr_buffer.payload.frame.frame_data,
+ queue_id, thread_id);
+ }
+ }
+
+ if (!return_err) {
+ if (sh_css_hmm_buffer_record_acquire(
+ h_vbuf, buf_type,
+ HOST_ADDRESS(ddr_buffer.kernel_ptr))) {
+ IA_CSS_LOG("send vbuf=%p", h_vbuf);
+ } else {
+ return_err = -EINVAL;
+ IA_CSS_ERROR("hmm_buffer_record[]: no available slots\n");
+ }
+ }
+
+ /*
+ * Tell the SP which queues are not empty,
+ * by sending the software event.
+ */
+ if (!return_err) {
+ if (!sh_css_sp_is_running()) {
+ /* SP is not running. The queues are not valid */
+ IA_CSS_LOG("SP is not running!");
+ IA_CSS_LEAVE_ERR(-EBUSY);
+ return -EBUSY;
+ }
+ return_err = ia_css_bufq_enqueue_psys_event(
+ IA_CSS_PSYS_SW_EVENT_BUFFER_ENQUEUED,
+ (uint8_t)thread_id,
+ queue_id,
+ 0);
+ } else {
+ ia_css_rmgr_rel_vbuf(hmm_buffer_pool, &h_vbuf);
+ IA_CSS_ERROR("buffer not enqueued");
+ }
+
+ IA_CSS_LEAVE("return value = %d", return_err);
+
+ return return_err;
+}
+
+/*
+ * TODO: Free up the hmm memory space.
+ */
+int
+ia_css_pipe_dequeue_buffer(struct ia_css_pipe *pipe,
+ struct ia_css_buffer *buffer)
+{
+ int return_err;
+ enum sh_css_queue_id queue_id;
+ ia_css_ptr ddr_buffer_addr = (ia_css_ptr)0;
+ struct sh_css_hmm_buffer ddr_buffer;
+ enum ia_css_buffer_type buf_type;
+ enum ia_css_pipe_id pipe_id;
+ unsigned int thread_id;
+ hrt_address kernel_ptr = 0;
+ bool ret_err;
+
+ IA_CSS_ENTER("pipe=%p, buffer=%p", pipe, buffer);
+
+ if ((!pipe) || (!buffer)) {
+ IA_CSS_LEAVE_ERR(-EINVAL);
+ return -EINVAL;
+ }
+
+ pipe_id = pipe->mode;
+
+ buf_type = buffer->type;
+
+ IA_CSS_LOG("pipe_id=%d, buf_type=%d", pipe_id, buf_type);
+
+ ddr_buffer.kernel_ptr = 0;
+
+ ret_err = ia_css_pipeline_get_sp_thread_id(ia_css_pipe_get_pipe_num(pipe), &thread_id);
+ if (!ret_err) {
+ IA_CSS_LEAVE_ERR(-EINVAL);
+ return -EINVAL;
+ }
+
+ ret_err = ia_css_query_internal_queue_id(buf_type, thread_id, &queue_id);
+ if (!ret_err) {
+ IA_CSS_LEAVE_ERR(-EINVAL);
+ return -EINVAL;
+ }
+
+ if ((queue_id <= SH_CSS_INVALID_QUEUE_ID) || (queue_id >= SH_CSS_MAX_NUM_QUEUES)) {
+ IA_CSS_LEAVE_ERR(-EINVAL);
+ return -EINVAL;
+ }
+
+ if (!sh_css_sp_is_running()) {
+ IA_CSS_LOG("SP is not running!");
+ IA_CSS_LEAVE_ERR(-EBUSY);
+ /* SP is not running. The queues are not valid */
+ return -EBUSY;
+ }
+
+ return_err = ia_css_bufq_dequeue_buffer(queue_id,
+ (uint32_t *)&ddr_buffer_addr);
+
+ if (!return_err) {
+ struct ia_css_frame *frame;
+ struct sh_css_hmm_buffer_record *hmm_buffer_record = NULL;
+
+ IA_CSS_LOG("receive vbuf=%x", (int)ddr_buffer_addr);
+
+ /* Validate the ddr_buffer_addr and buf_type */
+ hmm_buffer_record = sh_css_hmm_buffer_record_validate(
+ ddr_buffer_addr, buf_type);
+ if (hmm_buffer_record) {
+ /*
+ * valid hmm_buffer_record found. Save the kernel_ptr
+ * for validation after performing hmm_load. The
+ * vbuf handle and buffer_record can be released.
+ */
+ kernel_ptr = hmm_buffer_record->kernel_ptr;
+ ia_css_rmgr_rel_vbuf(hmm_buffer_pool, &hmm_buffer_record->h_vbuf);
+ sh_css_hmm_buffer_record_reset(hmm_buffer_record);
+ } else {
+ IA_CSS_ERROR("hmm_buffer_record not found (0x%x) buf_type(%d)",
+ ddr_buffer_addr, buf_type);
+ IA_CSS_LEAVE_ERR(-EINVAL);
+ return -EINVAL;
+ }
+
+ hmm_load(ddr_buffer_addr,
+ &ddr_buffer,
+ sizeof(struct sh_css_hmm_buffer));
+
+ /*
+ * if the kernel_ptr is 0 or an invalid, return an error.
+ * do not access the buffer via the kernal_ptr.
+ */
+ if ((ddr_buffer.kernel_ptr == 0) ||
+ (kernel_ptr != HOST_ADDRESS(ddr_buffer.kernel_ptr))) {
+ IA_CSS_ERROR("kernel_ptr invalid");
+ IA_CSS_ERROR("expected: (0x%llx)", (u64)kernel_ptr);
+ IA_CSS_ERROR("actual: (0x%llx)", (u64)HOST_ADDRESS(ddr_buffer.kernel_ptr));
+ IA_CSS_ERROR("buf_type: %d\n", buf_type);
+ IA_CSS_LEAVE_ERR(-EINVAL);
+ return -EINVAL;
+ }
+
+ if (ddr_buffer.kernel_ptr != 0) {
+ /*
+ * buffer->exp_id : all instances to be removed later
+ * once the driver change is completed. See patch #5758
+ * for reference
+ */
+ buffer->exp_id = 0;
+ buffer->driver_cookie = ddr_buffer.cookie_ptr;
+ buffer->timing_data = ddr_buffer.timing_data;
+
+ if (buf_type == IA_CSS_BUFFER_TYPE_OUTPUT_FRAME ||
+ buf_type == IA_CSS_BUFFER_TYPE_VF_OUTPUT_FRAME) {
+ buffer->isys_eof_clock_tick.ticks = ddr_buffer.isys_eof_clock_tick;
+ }
+
+ switch (buf_type) {
+ case IA_CSS_BUFFER_TYPE_INPUT_FRAME:
+ case IA_CSS_BUFFER_TYPE_OUTPUT_FRAME:
+ case IA_CSS_BUFFER_TYPE_SEC_OUTPUT_FRAME:
+ case IA_CSS_BUFFER_TYPE_VF_OUTPUT_FRAME:
+ case IA_CSS_BUFFER_TYPE_SEC_VF_OUTPUT_FRAME:
+ frame = (struct ia_css_frame *)HOST_ADDRESS(ddr_buffer.kernel_ptr);
+ buffer->data.frame = frame;
+ buffer->exp_id = ddr_buffer.payload.frame.exp_id;
+ frame->exp_id = ddr_buffer.payload.frame.exp_id;
+ frame->isp_config_id = ddr_buffer.payload.frame.isp_parameters_id;
+ frame->valid = pipe->num_invalid_frames == 0;
+ if (!frame->valid)
+ pipe->num_invalid_frames--;
+
+ if (frame->frame_info.format == IA_CSS_FRAME_FORMAT_BINARY_8) {
+ if (IS_ISP2401)
+ frame->planes.binary.size = frame->data_bytes;
+ else
+ frame->planes.binary.size =
+ sh_css_sp_get_binary_copy_size();
+ }
+ if (buf_type == IA_CSS_BUFFER_TYPE_OUTPUT_FRAME) {
+ IA_CSS_LOG("pfp: dequeued OF %d with config id %d thread %d",
+ frame->data, frame->isp_config_id, thread_id);
+ }
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_pipe_dequeue_buffer() buf_type=%d, data(DDR address)=0x%x\n",
+ buf_type, buffer->data.frame->data);
+
+ break;
+ case IA_CSS_BUFFER_TYPE_3A_STATISTICS:
+ buffer->data.stats_3a =
+ (struct ia_css_isp_3a_statistics *)HOST_ADDRESS(ddr_buffer.kernel_ptr);
+ buffer->exp_id = ddr_buffer.payload.s3a.exp_id;
+ buffer->data.stats_3a->exp_id = ddr_buffer.payload.s3a.exp_id;
+ buffer->data.stats_3a->isp_config_id = ddr_buffer.payload.s3a.isp_config_id;
+ break;
+ case IA_CSS_BUFFER_TYPE_DIS_STATISTICS:
+ buffer->data.stats_dvs =
+ (struct ia_css_isp_dvs_statistics *)
+ HOST_ADDRESS(ddr_buffer.kernel_ptr);
+ buffer->exp_id = ddr_buffer.payload.dis.exp_id;
+ buffer->data.stats_dvs->exp_id = ddr_buffer.payload.dis.exp_id;
+ break;
+ case IA_CSS_BUFFER_TYPE_LACE_STATISTICS:
+ break;
+ case IA_CSS_BUFFER_TYPE_METADATA:
+ buffer->data.metadata =
+ (struct ia_css_metadata *)HOST_ADDRESS(ddr_buffer.kernel_ptr);
+ buffer->exp_id = ddr_buffer.payload.metadata.exp_id;
+ buffer->data.metadata->exp_id = ddr_buffer.payload.metadata.exp_id;
+ break;
+ default:
+ return_err = -EINVAL;
+ break;
+ }
+ }
+ }
+
+ /*
+ * Tell the SP which queues are not full,
+ * by sending the software event.
+ */
+ if (!return_err) {
+ if (!sh_css_sp_is_running()) {
+ IA_CSS_LOG("SP is not running!");
+ IA_CSS_LEAVE_ERR(-EBUSY);
+ /* SP is not running. The queues are not valid */
+ return -EBUSY;
+ }
+ ia_css_bufq_enqueue_psys_event(
+ IA_CSS_PSYS_SW_EVENT_BUFFER_DEQUEUED,
+ 0,
+ queue_id,
+ 0);
+ }
+ IA_CSS_LEAVE("buffer=%p", buffer);
+
+ return return_err;
+}
+
+/*
+ * Cannot Move this to event module as it is of ia_css_event_type which is declared in ia_css.h
+ * TODO: modify and move it if possible.
+ *
+ * !!!IMPORTANT!!! KEEP THE FOLLOWING IN SYNC:
+ * 1) "enum ia_css_event_type" (ia_css_event_public.h)
+ * 2) "enum sh_css_sp_event_type" (sh_css_internal.h)
+ * 3) "enum ia_css_event_type event_id_2_event_mask" (event_handler.sp.c)
+ * 4) "enum ia_css_event_type convert_event_sp_to_host_domain" (sh_css.c)
+ */
+static enum ia_css_event_type convert_event_sp_to_host_domain[] = {
+ IA_CSS_EVENT_TYPE_OUTPUT_FRAME_DONE, /* Output frame ready. */
+ IA_CSS_EVENT_TYPE_SECOND_OUTPUT_FRAME_DONE, /* Second output frame ready. */
+ IA_CSS_EVENT_TYPE_VF_OUTPUT_FRAME_DONE, /* Viewfinder Output frame ready. */
+ IA_CSS_EVENT_TYPE_SECOND_VF_OUTPUT_FRAME_DONE, /* Second viewfinder Output frame ready. */
+ IA_CSS_EVENT_TYPE_3A_STATISTICS_DONE, /* Indication that 3A statistics are available. */
+ IA_CSS_EVENT_TYPE_DIS_STATISTICS_DONE, /* Indication that DIS statistics are available. */
+ IA_CSS_EVENT_TYPE_PIPELINE_DONE, /* Pipeline Done event, sent after last pipeline stage. */
+ IA_CSS_EVENT_TYPE_FRAME_TAGGED, /* Frame tagged. */
+ IA_CSS_EVENT_TYPE_INPUT_FRAME_DONE, /* Input frame ready. */
+ IA_CSS_EVENT_TYPE_METADATA_DONE, /* Metadata ready. */
+ IA_CSS_EVENT_TYPE_LACE_STATISTICS_DONE, /* Indication that LACE statistics are available. */
+ IA_CSS_EVENT_TYPE_ACC_STAGE_COMPLETE, /* Extension stage executed. */
+ IA_CSS_EVENT_TYPE_TIMER, /* Timing measurement data. */
+ IA_CSS_EVENT_TYPE_PORT_EOF, /* End Of Frame event, sent when in buffered sensor mode. */
+ IA_CSS_EVENT_TYPE_FW_WARNING, /* Performance warning encountered by FW */
+ IA_CSS_EVENT_TYPE_FW_ASSERT, /* Assertion hit by FW */
+ 0, /* error if sp passes SH_CSS_SP_EVENT_NR_OF_TYPES as a valid event. */
+};
+
+int
+ia_css_dequeue_psys_event(struct ia_css_event *event)
+{
+ enum ia_css_pipe_id pipe_id = 0;
+ u8 payload[4] = {0, 0, 0, 0};
+ int ret_err;
+
+ /*
+ * TODO:
+ * a) use generic decoding function , same as the one used by sp.
+ * b) group decode and dequeue into eventQueue module
+ *
+ * We skip the IA_CSS_ENTER logging call
+ * to avoid flooding the logs when the host application
+ * uses polling.
+ */
+ if (!event)
+ return -EINVAL;
+
+ /* SP is not running. The queues are not valid */
+ if (!sh_css_sp_is_running())
+ return -EBUSY;
+
+ /* dequeue the event (if any) from the psys event queue */
+ ret_err = ia_css_bufq_dequeue_psys_event(payload);
+ if (ret_err)
+ return ret_err;
+
+ IA_CSS_LOG("event dequeued from psys event queue");
+
+ /* Tell the SP that we dequeued an event from the event queue. */
+ ia_css_bufq_enqueue_psys_event(
+ IA_CSS_PSYS_SW_EVENT_EVENT_DEQUEUED, 0, 0, 0);
+
+ /*
+ * Events are decoded into 4 bytes of payload, the first byte
+ * contains the sp event type. This is converted to a host enum.
+ * TODO: can this enum conversion be eliminated
+ */
+ event->type = convert_event_sp_to_host_domain[payload[0]];
+ /* Some sane default values since not all events use all fields. */
+ event->pipe = NULL;
+ event->port = MIPI_PORT0_ID;
+ event->exp_id = 0;
+ event->fw_warning = IA_CSS_FW_WARNING_NONE;
+ event->fw_handle = 0;
+ event->timer_data = 0;
+ event->timer_code = 0;
+ event->timer_subcode = 0;
+
+ if (event->type == IA_CSS_EVENT_TYPE_TIMER) {
+ /*
+ * timer event ??? get the 2nd event and decode the data
+ * into the event struct
+ */
+ u32 tmp_data;
+ /* 1st event: LSB 16-bit timer data and code */
+ event->timer_data = ((payload[1] & 0xFF) | ((payload[3] & 0xFF) << 8));
+ event->timer_code = payload[2];
+ payload[0] = payload[1] = payload[2] = payload[3] = 0;
+ ret_err = ia_css_bufq_dequeue_psys_event(payload);
+ if (ret_err) {
+ /* no 2nd event ??? an error */
+ /*
+ * Putting IA_CSS_ERROR is resulting in failures in
+ * Merrifield smoke testing
+ */
+ IA_CSS_WARNING("Timer: Error de-queuing the 2nd TIMER event!!!\n");
+ return ret_err;
+ }
+ ia_css_bufq_enqueue_psys_event(
+ IA_CSS_PSYS_SW_EVENT_EVENT_DEQUEUED, 0, 0, 0);
+ event->type = convert_event_sp_to_host_domain[payload[0]];
+ /* It's a timer */
+ if (event->type == IA_CSS_EVENT_TYPE_TIMER) {
+ /* 2nd event data: MSB 16-bit timer and subcode */
+ tmp_data = ((payload[1] & 0xFF) | ((payload[3] & 0xFF) << 8));
+ event->timer_data |= (tmp_data << 16);
+ event->timer_subcode = payload[2];
+ } else {
+ /*
+ * It's a non timer event. So clear first half of the
+ * timer event data.
+ * If the second part of the TIMER event is not
+ * received, we discard the first half of the timer
+ * data and process the non timer event without
+ * affecting the flow. So the non timer event falls
+ * through the code.
+ */
+ event->timer_data = 0;
+ event->timer_code = 0;
+ event->timer_subcode = 0;
+ IA_CSS_ERROR("Missing 2nd timer event. Timer event discarded");
+ }
+ }
+ if (event->type == IA_CSS_EVENT_TYPE_PORT_EOF) {
+ event->port = (enum mipi_port_id)payload[1];
+ event->exp_id = payload[3];
+ } else if (event->type == IA_CSS_EVENT_TYPE_FW_WARNING) {
+ event->fw_warning = (enum ia_css_fw_warning)payload[1];
+ /* exp_id is only available in these warning types */
+ if (event->fw_warning == IA_CSS_FW_WARNING_EXP_ID_LOCKED ||
+ event->fw_warning == IA_CSS_FW_WARNING_TAG_EXP_ID_FAILED)
+ event->exp_id = payload[3];
+ } else if (event->type == IA_CSS_EVENT_TYPE_FW_ASSERT) {
+ event->fw_assert_module_id = payload[1]; /* module */
+ event->fw_assert_line_no = (payload[2] << 8) + payload[3];
+ /* payload[2] is line_no>>8, payload[3] is line_no&0xff */
+ } else if (event->type != IA_CSS_EVENT_TYPE_TIMER) {
+ /*
+ * pipe related events.
+ * payload[1] contains the pipe_num,
+ * payload[2] contains the pipe_id. These are different.
+ */
+ event->pipe = find_pipe_by_num(payload[1]);
+ pipe_id = (enum ia_css_pipe_id)payload[2];
+ /* Check to see if pipe still exists */
+ if (!event->pipe)
+ return -EBUSY;
+
+ if (event->type == IA_CSS_EVENT_TYPE_FRAME_TAGGED) {
+ /* find the capture pipe that goes with this */
+ int i, n;
+
+ n = event->pipe->stream->num_pipes;
+ for (i = 0; i < n; i++) {
+ struct ia_css_pipe *p =
+ event->pipe->stream->pipes[i];
+ if (p->config.mode == IA_CSS_PIPE_MODE_CAPTURE) {
+ event->pipe = p;
+ break;
+ }
+ }
+ event->exp_id = payload[3];
+ }
+ if (event->type == IA_CSS_EVENT_TYPE_ACC_STAGE_COMPLETE) {
+ /* payload[3] contains the acc fw handle. */
+ u32 stage_num = (uint32_t)payload[3];
+
+ ret_err = ia_css_pipeline_get_fw_from_stage(
+ &event->pipe->pipeline,
+ stage_num,
+ &event->fw_handle);
+ if (ret_err) {
+ IA_CSS_ERROR("Invalid stage num received for ACC event. stage_num:%u",
+ stage_num);
+ return ret_err;
+ }
+ }
+ }
+
+ if (event->pipe)
+ IA_CSS_LEAVE("event_id=%d, pipe_id=%d", event->type, pipe_id);
+ else
+ IA_CSS_LEAVE("event_id=%d", event->type);
+
+ return 0;
+}
+
+int
+ia_css_dequeue_isys_event(struct ia_css_event *event)
+{
+ u8 payload[4] = {0, 0, 0, 0};
+ int err = 0;
+
+ /*
+ * We skip the IA_CSS_ENTER logging call
+ * to avoid flooding the logs when the host application
+ * uses polling.
+ */
+ if (!event)
+ return -EINVAL;
+
+ /* SP is not running. The queues are not valid */
+ if (!sh_css_sp_is_running())
+ return -EBUSY;
+
+ err = ia_css_bufq_dequeue_isys_event(payload);
+ if (err)
+ return err;
+
+ IA_CSS_LOG("event dequeued from isys event queue");
+
+ /* Update SP state to indicate that element was dequeued. */
+ ia_css_bufq_enqueue_isys_event(IA_CSS_ISYS_SW_EVENT_EVENT_DEQUEUED);
+
+ /* Fill return struct with appropriate info */
+ event->type = IA_CSS_EVENT_TYPE_PORT_EOF;
+ /* EOF events are associated with a CSI port, not with a pipe */
+ event->pipe = NULL;
+ event->port = payload[1];
+ event->exp_id = payload[3];
+
+ IA_CSS_LEAVE_ERR(err);
+ return err;
+}
+
+static int
+sh_css_pipe_start(struct ia_css_stream *stream)
+{
+ int err = 0;
+
+ struct ia_css_pipe *pipe;
+ enum ia_css_pipe_id pipe_id;
+ unsigned int thread_id;
+
+ IA_CSS_ENTER_PRIVATE("stream = %p", stream);
+
+ if (!stream) {
+ IA_CSS_LEAVE_ERR(-EINVAL);
+ return -EINVAL;
+ }
+ pipe = stream->last_pipe;
+ if (!pipe) {
+ IA_CSS_LEAVE_ERR(-EINVAL);
+ return -EINVAL;
+ }
+
+ pipe_id = pipe->mode;
+
+ if (stream->started) {
+ IA_CSS_WARNING("Cannot start stream that is already started");
+ IA_CSS_LEAVE_ERR(err);
+ return err;
+ }
+
+ switch (pipe_id) {
+ case IA_CSS_PIPE_ID_PREVIEW:
+ err = preview_start(pipe);
+ break;
+ case IA_CSS_PIPE_ID_VIDEO:
+ err = video_start(pipe);
+ break;
+ case IA_CSS_PIPE_ID_CAPTURE:
+ err = capture_start(pipe);
+ break;
+ case IA_CSS_PIPE_ID_YUVPP:
+ err = yuvpp_start(pipe);
+ break;
+ default:
+ err = -EINVAL;
+ }
+ /* DH regular multi pipe - not continuous mode: start the next pipes too */
+ if (!stream->config.continuous) {
+ int i;
+
+ for (i = 1; i < stream->num_pipes && 0 == err ; i++) {
+ switch (stream->pipes[i]->mode) {
+ case IA_CSS_PIPE_ID_PREVIEW:
+ err = preview_start(stream->pipes[i]);
+ break;
+ case IA_CSS_PIPE_ID_VIDEO:
+ err = video_start(stream->pipes[i]);
+ break;
+ case IA_CSS_PIPE_ID_CAPTURE:
+ err = capture_start(stream->pipes[i]);
+ break;
+ case IA_CSS_PIPE_ID_YUVPP:
+ err = yuvpp_start(stream->pipes[i]);
+ break;
+ default:
+ err = -EINVAL;
+ }
+ }
+ }
+ if (err) {
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+ }
+
+ /*
+ * Force ISP parameter calculation after a mode change
+ * Acceleration API examples pass NULL for stream but they
+ * don't use ISP parameters anyway. So this should be okay.
+ * The SP binary (jpeg) copy does not use any parameters.
+ */
+ if (!copy_on_sp(pipe)) {
+ sh_css_invalidate_params(stream);
+ err = sh_css_param_update_isp_params(pipe,
+ stream->isp_params_configs, true, NULL);
+ if (err) {
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+ }
+ }
+
+ ia_css_debug_pipe_graph_dump_epilogue();
+
+ ia_css_pipeline_get_sp_thread_id(ia_css_pipe_get_pipe_num(pipe), &thread_id);
+
+ if (!sh_css_sp_is_running()) {
+ IA_CSS_LEAVE_ERR_PRIVATE(-EBUSY);
+ /* SP is not running. The queues are not valid */
+ return -EBUSY;
+ }
+ ia_css_bufq_enqueue_psys_event(IA_CSS_PSYS_SW_EVENT_START_STREAM,
+ (uint8_t)thread_id, 0, 0);
+
+ /* DH regular multi pipe - not continuous mode: enqueue event to the next pipes too */
+ if (!stream->config.continuous) {
+ int i;
+
+ for (i = 1; i < stream->num_pipes; i++) {
+ ia_css_pipeline_get_sp_thread_id(
+ ia_css_pipe_get_pipe_num(stream->pipes[i]),
+ &thread_id);
+ ia_css_bufq_enqueue_psys_event(
+ IA_CSS_PSYS_SW_EVENT_START_STREAM,
+ (uint8_t)thread_id, 0, 0);
+ }
+ }
+
+ /* in case of continuous capture mode, we also start capture thread and copy thread*/
+ if (pipe->stream->config.continuous) {
+ struct ia_css_pipe *copy_pipe = NULL;
+
+ if (pipe_id == IA_CSS_PIPE_ID_PREVIEW)
+ copy_pipe = pipe->pipe_settings.preview.copy_pipe;
+ else if (pipe_id == IA_CSS_PIPE_ID_VIDEO)
+ copy_pipe = pipe->pipe_settings.video.copy_pipe;
+
+ if (!copy_pipe) {
+ IA_CSS_LEAVE_ERR_PRIVATE(-EINVAL);
+ return -EINVAL;
+ }
+ ia_css_pipeline_get_sp_thread_id(ia_css_pipe_get_pipe_num(copy_pipe),
+ &thread_id);
+ /* by the time we reach here q is initialized and handle is available.*/
+ ia_css_bufq_enqueue_psys_event(
+ IA_CSS_PSYS_SW_EVENT_START_STREAM,
+ (uint8_t)thread_id, 0, 0);
+ }
+ if (pipe->stream->cont_capt) {
+ struct ia_css_pipe *capture_pipe = NULL;
+
+ if (pipe_id == IA_CSS_PIPE_ID_PREVIEW)
+ capture_pipe = pipe->pipe_settings.preview.capture_pipe;
+ else if (pipe_id == IA_CSS_PIPE_ID_VIDEO)
+ capture_pipe = pipe->pipe_settings.video.capture_pipe;
+
+ if (!capture_pipe) {
+ IA_CSS_LEAVE_ERR_PRIVATE(-EINVAL);
+ return -EINVAL;
+ }
+ ia_css_pipeline_get_sp_thread_id(ia_css_pipe_get_pipe_num(capture_pipe),
+ &thread_id);
+ /* by the time we reach here q is initialized and handle is available.*/
+ ia_css_bufq_enqueue_psys_event(
+ IA_CSS_PSYS_SW_EVENT_START_STREAM,
+ (uint8_t)thread_id, 0, 0);
+ }
+
+ stream->started = true;
+
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+}
+
+/* ISP2400 */
+void
+sh_css_enable_cont_capt(bool enable, bool stop_copy_preview)
+{
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "sh_css_enable_cont_capt() enter: enable=%d\n", enable);
+//my_css.cont_capt = enable;
+ my_css.stop_copy_preview = stop_copy_preview;
+}
+
+bool
+sh_css_continuous_is_enabled(uint8_t pipe_num)
+{
+ struct ia_css_pipe *pipe;
+ bool continuous;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "sh_css_continuous_is_enabled() enter: pipe_num=%d\n", pipe_num);
+
+ pipe = find_pipe_by_num(pipe_num);
+ continuous = pipe && pipe->stream->config.continuous;
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "sh_css_continuous_is_enabled() leave: enable=%d\n",
+ continuous);
+ return continuous;
+}
+
+/* ISP2400 */
+int
+ia_css_stream_get_max_buffer_depth(struct ia_css_stream *stream,
+ int *buffer_depth)
+{
+ if (!buffer_depth)
+ return -EINVAL;
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_stream_get_max_buffer_depth() enter: void\n");
+ (void)stream;
+ *buffer_depth = NUM_CONTINUOUS_FRAMES;
+ return 0;
+}
+
+int
+ia_css_stream_set_buffer_depth(struct ia_css_stream *stream, int buffer_depth)
+{
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_stream_set_buffer_depth() enter: num_frames=%d\n", buffer_depth);
+ (void)stream;
+ if (buffer_depth > NUM_CONTINUOUS_FRAMES || buffer_depth < 1)
+ return -EINVAL;
+ /* ok, value allowed */
+ stream->config.target_num_cont_raw_buf = buffer_depth;
+ /* TODO: check what to regarding initialization */
+ return 0;
+}
+
+/* ISP2401 */
+int
+ia_css_stream_get_buffer_depth(struct ia_css_stream *stream,
+ int *buffer_depth)
+{
+ if (!buffer_depth)
+ return -EINVAL;
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_stream_get_buffer_depth() enter: void\n");
+ (void)stream;
+ *buffer_depth = stream->config.target_num_cont_raw_buf;
+ return 0;
+}
+
+unsigned int
+sh_css_get_mipi_sizes_for_check(const unsigned int port, const unsigned int idx)
+{
+ OP___assert(port < N_CSI_PORTS);
+ OP___assert(idx < IA_CSS_MIPI_SIZE_CHECK_MAX_NOF_ENTRIES_PER_PORT);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "sh_css_get_mipi_sizes_for_check(port %d, idx %d): %d\n",
+ port, idx, my_css.mipi_sizes_for_check[port][idx]);
+ return my_css.mipi_sizes_for_check[port][idx];
+}
+
+static int sh_css_pipe_configure_output(
+ struct ia_css_pipe *pipe,
+ unsigned int width,
+ unsigned int height,
+ unsigned int padded_width,
+ enum ia_css_frame_format format,
+ unsigned int idx)
+{
+ int err = 0;
+
+ IA_CSS_ENTER_PRIVATE("pipe = %p, width = %d, height = %d, padded width = %d, format = %d, idx = %d",
+ pipe, width, height, padded_width, format, idx);
+ if (!pipe) {
+ IA_CSS_LEAVE_ERR_PRIVATE(-EINVAL);
+ return -EINVAL;
+ }
+
+ err = ia_css_util_check_res(width, height);
+ if (err) {
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+ }
+ if (pipe->output_info[idx].res.width != width ||
+ pipe->output_info[idx].res.height != height ||
+ pipe->output_info[idx].format != format) {
+ ia_css_frame_info_init(
+ &pipe->output_info[idx],
+ width,
+ height,
+ format,
+ padded_width);
+ }
+ IA_CSS_LEAVE_ERR_PRIVATE(0);
+ return 0;
+}
+
+static int
+sh_css_pipe_get_shading_info(struct ia_css_pipe *pipe,
+ struct ia_css_shading_info *shading_info,
+ struct ia_css_pipe_config *pipe_config)
+{
+ int err = 0;
+ struct ia_css_binary *binary = NULL;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "sh_css_pipe_get_shading_info() enter:\n");
+
+ binary = ia_css_pipe_get_shading_correction_binary(pipe);
+
+ if (binary) {
+ err = ia_css_binary_get_shading_info(binary,
+ IA_CSS_SHADING_CORRECTION_TYPE_1,
+ pipe->required_bds_factor,
+ (const struct ia_css_stream_config *)&pipe->stream->config,
+ shading_info, pipe_config);
+
+ /*
+ * Other function calls can be added here when other shading
+ * correction types will be added in the future.
+ */
+ } else {
+ /*
+ * When the pipe does not have a binary which has the shading
+ * correction, this function does not need to fill the shading
+ * information. It is not a error case, and then
+ * this function should return 0.
+ */
+ memset(shading_info, 0, sizeof(*shading_info));
+ }
+ return err;
+}
+
+static int
+sh_css_pipe_get_grid_info(struct ia_css_pipe *pipe,
+ struct ia_css_grid_info *info)
+{
+ int err = 0;
+ struct ia_css_binary *binary = NULL;
+
+ assert(pipe);
+ assert(info);
+
+ IA_CSS_ENTER_PRIVATE("");
+
+ binary = ia_css_pipe_get_s3a_binary(pipe);
+
+ if (binary) {
+ err = ia_css_binary_3a_grid_info(binary, info, pipe);
+ if (err)
+ goto err;
+ } else {
+ memset(&info->s3a_grid, 0, sizeof(info->s3a_grid));
+ }
+
+ binary = ia_css_pipe_get_sdis_binary(pipe);
+
+ if (binary) {
+ ia_css_binary_dvs_grid_info(binary, info, pipe);
+ ia_css_binary_dvs_stat_grid_info(binary, info, pipe);
+ } else {
+ memset(&info->dvs_grid, 0, sizeof(info->dvs_grid));
+ memset(&info->dvs_grid.dvs_stat_grid_info, 0,
+ sizeof(info->dvs_grid.dvs_stat_grid_info));
+ }
+
+ if (binary) {
+ /* copy pipe does not have ISP binary*/
+ info->isp_in_width = binary->internal_frame_info.res.width;
+ info->isp_in_height = binary->internal_frame_info.res.height;
+ }
+
+ info->vamem_type = IA_CSS_VAMEM_TYPE_2;
+
+err:
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+}
+
+/* ISP2401 */
+/*
+ * @brief Check if a format is supported by the pipe.
+ *
+ */
+static int
+ia_css_pipe_check_format(struct ia_css_pipe *pipe,
+ enum ia_css_frame_format format)
+{
+ const enum ia_css_frame_format *supported_formats;
+ int number_of_formats;
+ int found = 0;
+ int i;
+
+ IA_CSS_ENTER_PRIVATE("");
+
+ if (NULL == pipe || NULL == pipe->pipe_settings.video.video_binary.info) {
+ IA_CSS_ERROR("Pipe or binary info is not set");
+ IA_CSS_LEAVE_ERR_PRIVATE(-EINVAL);
+ return -EINVAL;
+ }
+
+ supported_formats = pipe->pipe_settings.video.video_binary.info->output_formats;
+ number_of_formats = sizeof(pipe->pipe_settings.video.video_binary.info->output_formats) / sizeof(enum ia_css_frame_format);
+
+ for (i = 0; i < number_of_formats && !found; i++) {
+ if (supported_formats[i] == format) {
+ found = 1;
+ break;
+ }
+ }
+ if (!found) {
+ IA_CSS_ERROR("Requested format is not supported by binary");
+ IA_CSS_LEAVE_ERR_PRIVATE(-EINVAL);
+ return -EINVAL;
+ }
+ IA_CSS_LEAVE_ERR_PRIVATE(0);
+ return 0;
+}
+
+static int load_video_binaries(struct ia_css_pipe *pipe)
+{
+ struct ia_css_frame_info video_in_info, tnr_info,
+ *video_vf_info, video_bds_out_info, *pipe_out_info, *pipe_vf_out_info;
+ bool online;
+ int err = 0;
+ bool continuous = pipe->stream->config.continuous;
+ unsigned int i;
+ unsigned int num_output_pins;
+ struct ia_css_frame_info video_bin_out_info;
+ bool need_scaler = false;
+ bool vf_res_different_than_output = false;
+ bool need_vf_pp = false;
+ int vf_ds_log2;
+ struct ia_css_video_settings *mycs = &pipe->pipe_settings.video;
+
+ IA_CSS_ENTER_PRIVATE("");
+ assert(pipe);
+ assert(pipe->mode == IA_CSS_PIPE_ID_VIDEO);
+ /*
+ * we only test the video_binary because offline video doesn't need a
+ * vf_pp binary and online does not (always use) the copy_binary.
+ * All are always reset at the same time anyway.
+ */
+ if (mycs->video_binary.info)
+ return 0;
+
+ online = pipe->stream->config.online;
+ pipe_out_info = &pipe->output_info[0];
+ pipe_vf_out_info = &pipe->vf_output_info[0];
+
+ assert(pipe_out_info);
+
+ /*
+ * There is no explicit input format requirement for raw or yuv
+ * What matters is that there is a binary that supports the stream format.
+ * This is checked in the binary_find(), so no need to check it here
+ */
+ err = ia_css_util_check_input(&pipe->stream->config, false, false);
+ if (err)
+ return err;
+ /* cannot have online video and input_mode memory */
+ if (online && pipe->stream->config.mode == IA_CSS_INPUT_MODE_MEMORY)
+ return -EINVAL;
+ if (pipe->enable_viewfinder[IA_CSS_PIPE_OUTPUT_STAGE_0]) {
+ err = ia_css_util_check_vf_out_info(pipe_out_info,
+ pipe_vf_out_info);
+ if (err)
+ return err;
+ } else {
+ err = ia_css_frame_check_info(pipe_out_info);
+ if (err)
+ return err;
+ }
+
+ if (pipe->out_yuv_ds_input_info.res.width)
+ video_bin_out_info = pipe->out_yuv_ds_input_info;
+ else
+ video_bin_out_info = *pipe_out_info;
+
+ /* Video */
+ if (pipe->enable_viewfinder[IA_CSS_PIPE_OUTPUT_STAGE_0]) {
+ video_vf_info = pipe_vf_out_info;
+ vf_res_different_than_output = (video_vf_info->res.width !=
+ video_bin_out_info.res.width) ||
+ (video_vf_info->res.height != video_bin_out_info.res.height);
+ } else {
+ video_vf_info = NULL;
+ }
+
+ need_scaler = need_downscaling(video_bin_out_info.res, pipe_out_info->res);
+
+ /* we build up the pipeline starting at the end */
+ /* YUV post-processing if needed */
+ if (need_scaler) {
+ struct ia_css_cas_binary_descr cas_scaler_descr = { };
+
+ /* NV12 is the common format that is supported by both */
+ /* yuv_scaler and the video_xx_isp2_min binaries. */
+ video_bin_out_info.format = IA_CSS_FRAME_FORMAT_NV12;
+
+ err = ia_css_pipe_create_cas_scaler_desc_single_output(
+ &video_bin_out_info,
+ pipe_out_info,
+ NULL,
+ &cas_scaler_descr);
+ if (err)
+ return err;
+ mycs->num_yuv_scaler = cas_scaler_descr.num_stage;
+ mycs->yuv_scaler_binary = kcalloc(cas_scaler_descr.num_stage,
+ sizeof(struct ia_css_binary),
+ GFP_KERNEL);
+ if (!mycs->yuv_scaler_binary) {
+ mycs->num_yuv_scaler = 0;
+ err = -ENOMEM;
+ return err;
+ }
+ mycs->is_output_stage = kcalloc(cas_scaler_descr.num_stage,
+ sizeof(bool), GFP_KERNEL);
+ if (!mycs->is_output_stage) {
+ err = -ENOMEM;
+ return err;
+ }
+ for (i = 0; i < cas_scaler_descr.num_stage; i++) {
+ struct ia_css_binary_descr yuv_scaler_descr;
+
+ mycs->is_output_stage[i] = cas_scaler_descr.is_output_stage[i];
+ ia_css_pipe_get_yuvscaler_binarydesc(pipe,
+ &yuv_scaler_descr, &cas_scaler_descr.in_info[i],
+ &cas_scaler_descr.out_info[i],
+ &cas_scaler_descr.internal_out_info[i],
+ &cas_scaler_descr.vf_info[i]);
+ err = ia_css_binary_find(&yuv_scaler_descr,
+ &mycs->yuv_scaler_binary[i]);
+ if (err) {
+ kfree(mycs->is_output_stage);
+ mycs->is_output_stage = NULL;
+ return err;
+ }
+ }
+ ia_css_pipe_destroy_cas_scaler_desc(&cas_scaler_descr);
+ }
+
+ {
+ struct ia_css_binary_descr video_descr;
+ enum ia_css_frame_format vf_info_format;
+
+ err = ia_css_pipe_get_video_binarydesc(pipe,
+ &video_descr, &video_in_info, &video_bds_out_info, &video_bin_out_info,
+ video_vf_info,
+ pipe->stream->config.left_padding);
+ if (err)
+ return err;
+
+ /*
+ * In the case where video_vf_info is not NULL, this allows
+ * us to find a potential video library with desired vf format.
+ * If success, no vf_pp binary is needed.
+ * If failed, we will look up video binary with YUV_LINE vf format
+ */
+ err = ia_css_binary_find(&video_descr,
+ &mycs->video_binary);
+
+ if (err) {
+ /* This will do another video binary lookup later for YUV_LINE format*/
+ if (video_vf_info)
+ need_vf_pp = true;
+ else
+ return err;
+ } else if (video_vf_info) {
+ /*
+ * The first video binary lookup is successful, but we
+ * may still need vf_pp binary based on additional check
+ */
+ num_output_pins = mycs->video_binary.info->num_output_pins;
+ vf_ds_log2 = mycs->video_binary.vf_downscale_log2;
+
+ /*
+ * If the binary has dual output pins, we need vf_pp
+ * if the resolution is different.
+ */
+ need_vf_pp |= ((num_output_pins == 2) && vf_res_different_than_output);
+
+ /*
+ * If the binary has single output pin, we need vf_pp
+ * if additional scaling is needed for vf
+ */
+ need_vf_pp |= ((num_output_pins == 1) &&
+ ((video_vf_info->res.width << vf_ds_log2 != pipe_out_info->res.width) ||
+ (video_vf_info->res.height << vf_ds_log2 != pipe_out_info->res.height)));
+ }
+
+ if (need_vf_pp) {
+ /* save the current vf_info format for restoration later */
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "load_video_binaries() need_vf_pp; find video binary with YUV_LINE again\n");
+
+ vf_info_format = video_vf_info->format;
+
+ if (!pipe->config.enable_vfpp_bci)
+ ia_css_frame_info_set_format(video_vf_info,
+ IA_CSS_FRAME_FORMAT_YUV_LINE);
+
+ ia_css_binary_destroy_isp_parameters(&mycs->video_binary);
+
+ err = ia_css_binary_find(&video_descr,
+ &mycs->video_binary);
+
+ /* restore original vf_info format */
+ ia_css_frame_info_set_format(video_vf_info,
+ vf_info_format);
+ if (err)
+ return err;
+ }
+ }
+
+ /*
+ * If a video binary does not use a ref_frame, we set the frame delay
+ * to 0. This is the case for the 1-stage low-power video binary.
+ */
+ if (!mycs->video_binary.info->sp.enable.ref_frame)
+ pipe->dvs_frame_delay = 0;
+
+ /*
+ * The delay latency determines the number of invalid frames after
+ * a stream is started.
+ */
+ pipe->num_invalid_frames = pipe->dvs_frame_delay;
+ pipe->info.num_invalid_frames = pipe->num_invalid_frames;
+
+ /*
+ * Viewfinder frames also decrement num_invalid_frames. If the pipe
+ * outputs a viewfinder output, then we need double the number of
+ * invalid frames
+ */
+ if (video_vf_info)
+ pipe->num_invalid_frames *= 2;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "load_video_binaries() num_invalid_frames=%d dvs_frame_delay=%d\n",
+ pipe->num_invalid_frames, pipe->dvs_frame_delay);
+
+ /* pqiao TODO: temp hack for PO, should be removed after offline YUVPP is enabled */
+ if (!IS_ISP2401) {
+ /* Copy */
+ if (!online && !continuous) {
+ /*
+ * TODO: what exactly needs doing, prepend the copy binary to
+ * video base this only on !online?
+ */
+ err = load_copy_binary(pipe,
+ &mycs->copy_binary,
+ &mycs->video_binary);
+ if (err)
+ return err;
+ }
+ }
+
+ if (pipe->enable_viewfinder[IA_CSS_PIPE_OUTPUT_STAGE_0] && need_vf_pp) {
+ struct ia_css_binary_descr vf_pp_descr;
+
+ if (mycs->video_binary.vf_frame_info.format
+ == IA_CSS_FRAME_FORMAT_YUV_LINE) {
+ ia_css_pipe_get_vfpp_binarydesc(pipe, &vf_pp_descr,
+ &mycs->video_binary.vf_frame_info,
+ pipe_vf_out_info);
+ } else {
+ /*
+ * output from main binary is not yuv line. currently
+ * this is possible only when bci is enabled on vfpp
+ * output
+ */
+ assert(pipe->config.enable_vfpp_bci);
+ ia_css_pipe_get_yuvscaler_binarydesc(pipe, &vf_pp_descr,
+ &mycs->video_binary.vf_frame_info,
+ pipe_vf_out_info, NULL, NULL);
+ }
+
+ err = ia_css_binary_find(&vf_pp_descr,
+ &mycs->vf_pp_binary);
+ if (err)
+ return err;
+ }
+
+ err = allocate_delay_frames(pipe);
+
+ if (err)
+ return err;
+
+ if (mycs->video_binary.info->sp.enable.block_output) {
+ tnr_info = mycs->video_binary.out_frame_info[0];
+
+ /* Make tnr reference buffers output block height align */
+ tnr_info.res.height = CEIL_MUL(tnr_info.res.height,
+ mycs->video_binary.info->sp.block.output_block_height);
+ } else {
+ tnr_info = mycs->video_binary.internal_frame_info;
+ }
+ tnr_info.format = IA_CSS_FRAME_FORMAT_YUV_LINE;
+ tnr_info.raw_bit_depth = SH_CSS_TNR_BIT_DEPTH;
+
+ for (i = 0; i < NUM_VIDEO_TNR_FRAMES; i++) {
+ if (mycs->tnr_frames[i]) {
+ ia_css_frame_free(mycs->tnr_frames[i]);
+ mycs->tnr_frames[i] = NULL;
+ }
+ err = ia_css_frame_allocate_from_info(
+ &mycs->tnr_frames[i],
+ &tnr_info);
+ if (err)
+ return err;
+ }
+ IA_CSS_LEAVE_PRIVATE("");
+ return 0;
+}
+
+static int
+unload_video_binaries(struct ia_css_pipe *pipe)
+{
+ unsigned int i;
+
+ IA_CSS_ENTER_PRIVATE("pipe = %p", pipe);
+
+ if ((!pipe) || (pipe->mode != IA_CSS_PIPE_ID_VIDEO)) {
+ IA_CSS_LEAVE_ERR_PRIVATE(-EINVAL);
+ return -EINVAL;
+ }
+ ia_css_binary_unload(&pipe->pipe_settings.video.copy_binary);
+ ia_css_binary_unload(&pipe->pipe_settings.video.video_binary);
+ ia_css_binary_unload(&pipe->pipe_settings.video.vf_pp_binary);
+
+ for (i = 0; i < pipe->pipe_settings.video.num_yuv_scaler; i++)
+ ia_css_binary_unload(&pipe->pipe_settings.video.yuv_scaler_binary[i]);
+
+ kfree(pipe->pipe_settings.video.is_output_stage);
+ pipe->pipe_settings.video.is_output_stage = NULL;
+ kfree(pipe->pipe_settings.video.yuv_scaler_binary);
+ pipe->pipe_settings.video.yuv_scaler_binary = NULL;
+
+ IA_CSS_LEAVE_ERR_PRIVATE(0);
+ return 0;
+}
+
+static int video_start(struct ia_css_pipe *pipe)
+{
+ int err = 0;
+ struct ia_css_pipe *copy_pipe, *capture_pipe;
+ enum sh_css_pipe_config_override copy_ovrd;
+ enum ia_css_input_mode video_pipe_input_mode;
+ unsigned int thread_id;
+
+ IA_CSS_ENTER_PRIVATE("pipe = %p", pipe);
+ if ((!pipe) || (pipe->mode != IA_CSS_PIPE_ID_VIDEO)) {
+ IA_CSS_LEAVE_ERR_PRIVATE(-EINVAL);
+ return -EINVAL;
+ }
+
+ video_pipe_input_mode = pipe->stream->config.mode;
+
+ copy_pipe = pipe->pipe_settings.video.copy_pipe;
+ capture_pipe = pipe->pipe_settings.video.capture_pipe;
+
+ sh_css_metrics_start_frame();
+
+ /* multi stream video needs mipi buffers */
+
+ err = send_mipi_frames(pipe);
+ if (err)
+ return err;
+
+ send_raw_frames(pipe);
+
+ ia_css_pipeline_get_sp_thread_id(ia_css_pipe_get_pipe_num(pipe), &thread_id);
+ copy_ovrd = 1 << thread_id;
+
+ if (pipe->stream->cont_capt) {
+ ia_css_pipeline_get_sp_thread_id(ia_css_pipe_get_pipe_num(capture_pipe),
+ &thread_id);
+ copy_ovrd |= 1 << thread_id;
+ }
+
+ /* Construct and load the copy pipe */
+ if (pipe->stream->config.continuous) {
+ sh_css_sp_init_pipeline(&copy_pipe->pipeline,
+ IA_CSS_PIPE_ID_COPY,
+ (uint8_t)ia_css_pipe_get_pipe_num(copy_pipe),
+ false,
+ pipe->stream->config.pixels_per_clock == 2, false,
+ false, pipe->required_bds_factor,
+ copy_ovrd,
+ pipe->stream->config.mode,
+ &pipe->stream->config.metadata_config,
+ &pipe->stream->info.metadata_info,
+ pipe->stream->config.source.port.port);
+
+ /*
+ * make the video pipe start with mem mode input, copy handles
+ * the actual mode
+ */
+ video_pipe_input_mode = IA_CSS_INPUT_MODE_MEMORY;
+ }
+
+ /* Construct and load the capture pipe */
+ if (pipe->stream->cont_capt) {
+ sh_css_sp_init_pipeline(&capture_pipe->pipeline,
+ IA_CSS_PIPE_ID_CAPTURE,
+ (uint8_t)ia_css_pipe_get_pipe_num(capture_pipe),
+ capture_pipe->config.default_capture_config.enable_xnr != 0,
+ capture_pipe->stream->config.pixels_per_clock == 2,
+ true, /* continuous */
+ false, /* offline */
+ capture_pipe->required_bds_factor,
+ 0,
+ IA_CSS_INPUT_MODE_MEMORY,
+ &pipe->stream->config.metadata_config,
+ &pipe->stream->info.metadata_info,
+ (enum mipi_port_id)0);
+ }
+
+ start_pipe(pipe, copy_ovrd, video_pipe_input_mode);
+
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+}
+
+static
+int sh_css_pipe_get_viewfinder_frame_info(
+ struct ia_css_pipe *pipe,
+ struct ia_css_frame_info *info,
+ unsigned int idx)
+{
+ assert(pipe);
+ assert(info);
+
+ /* We could print the pointer as input arg, and the values as output */
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "sh_css_pipe_get_viewfinder_frame_info() enter: void\n");
+
+ if (pipe->mode == IA_CSS_PIPE_ID_CAPTURE &&
+ (pipe->config.default_capture_config.mode == IA_CSS_CAPTURE_MODE_RAW ||
+ pipe->config.default_capture_config.mode == IA_CSS_CAPTURE_MODE_BAYER))
+ return -EINVAL;
+ /* offline video does not generate viewfinder output */
+ *info = pipe->vf_output_info[idx];
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "sh_css_pipe_get_viewfinder_frame_info() leave: \
+ info.res.width=%d, info.res.height=%d, \
+ info.padded_width=%d, info.format=%d, \
+ info.raw_bit_depth=%d, info.raw_bayer_order=%d\n",
+ info->res.width, info->res.height,
+ info->padded_width, info->format,
+ info->raw_bit_depth, info->raw_bayer_order);
+
+ return 0;
+}
+
+static int
+sh_css_pipe_configure_viewfinder(struct ia_css_pipe *pipe, unsigned int width,
+ unsigned int height, unsigned int min_width,
+ enum ia_css_frame_format format,
+ unsigned int idx)
+{
+ int err = 0;
+
+ IA_CSS_ENTER_PRIVATE("pipe = %p, width = %d, height = %d, min_width = %d, format = %d, idx = %d\n",
+ pipe, width, height, min_width, format, idx);
+
+ if (!pipe) {
+ IA_CSS_LEAVE_ERR_PRIVATE(-EINVAL);
+ return -EINVAL;
+ }
+
+ err = ia_css_util_check_res(width, height);
+ if (err) {
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+ }
+ if (pipe->vf_output_info[idx].res.width != width ||
+ pipe->vf_output_info[idx].res.height != height ||
+ pipe->vf_output_info[idx].format != format)
+ ia_css_frame_info_init(&pipe->vf_output_info[idx], width, height,
+ format, min_width);
+
+ IA_CSS_LEAVE_ERR_PRIVATE(0);
+ return 0;
+}
+
+static int load_copy_binaries(struct ia_css_pipe *pipe)
+{
+ int err = 0;
+
+ assert(pipe);
+ IA_CSS_ENTER_PRIVATE("");
+
+ assert(pipe->mode == IA_CSS_PIPE_ID_CAPTURE ||
+ pipe->mode == IA_CSS_PIPE_ID_COPY);
+ if (pipe->pipe_settings.capture.copy_binary.info)
+ return 0;
+
+ err = ia_css_frame_check_info(&pipe->output_info[0]);
+ if (err)
+ goto ERR;
+
+ err = verify_copy_out_frame_format(pipe);
+ if (err)
+ goto ERR;
+
+ err = load_copy_binary(pipe,
+ &pipe->pipe_settings.capture.copy_binary,
+ NULL);
+
+ERR:
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+}
+
+static bool need_capture_pp(
+ const struct ia_css_pipe *pipe)
+{
+ const struct ia_css_frame_info *out_info = &pipe->output_info[0];
+
+ IA_CSS_ENTER_LEAVE_PRIVATE("");
+ assert(pipe);
+ assert(pipe->mode == IA_CSS_PIPE_ID_CAPTURE);
+
+ /* determine whether we need to use the capture_pp binary.
+ * This is needed for:
+ * 1. XNR or
+ * 2. Digital Zoom or
+ * 3. YUV downscaling
+ */
+ if (pipe->out_yuv_ds_input_info.res.width &&
+ ((pipe->out_yuv_ds_input_info.res.width != out_info->res.width) ||
+ (pipe->out_yuv_ds_input_info.res.height != out_info->res.height)))
+ return true;
+
+ if (pipe->config.default_capture_config.enable_xnr != 0)
+ return true;
+
+ if ((pipe->stream->isp_params_configs->dz_config.dx < HRT_GDC_N) ||
+ (pipe->stream->isp_params_configs->dz_config.dy < HRT_GDC_N) ||
+ pipe->config.enable_dz)
+ return true;
+
+ return false;
+}
+
+static bool need_capt_ldc(
+ const struct ia_css_pipe *pipe)
+{
+ IA_CSS_ENTER_LEAVE_PRIVATE("");
+ assert(pipe);
+ assert(pipe->mode == IA_CSS_PIPE_ID_CAPTURE);
+ return (pipe->extra_config.enable_dvs_6axis) ? true : false;
+}
+
+static int set_num_primary_stages(unsigned int *num,
+ enum ia_css_pipe_version version)
+{
+ int err = 0;
+
+ if (!num)
+ return -EINVAL;
+
+ switch (version) {
+ case IA_CSS_PIPE_VERSION_2_6_1:
+ *num = NUM_PRIMARY_HQ_STAGES;
+ break;
+ case IA_CSS_PIPE_VERSION_2_2:
+ case IA_CSS_PIPE_VERSION_1:
+ *num = NUM_PRIMARY_STAGES;
+ break;
+ default:
+ err = -EINVAL;
+ break;
+ }
+
+ return err;
+}
+
+static int load_primary_binaries(
+ struct ia_css_pipe *pipe)
+{
+ bool online = false;
+ bool need_pp = false;
+ bool need_isp_copy_binary = false;
+ bool need_ldc = false;
+ bool sensor = false;
+ bool memory, continuous;
+ struct ia_css_frame_info prim_in_info,
+ prim_out_info,
+ capt_pp_out_info, vf_info,
+ *vf_pp_in_info, *pipe_out_info,
+ *pipe_vf_out_info, *capt_pp_in_info,
+ capt_ldc_out_info;
+ int err = 0;
+ struct ia_css_capture_settings *mycs;
+ unsigned int i;
+ bool need_extra_yuv_scaler = false;
+ struct ia_css_binary_descr prim_descr[MAX_NUM_PRIMARY_STAGES];
+
+ IA_CSS_ENTER_PRIVATE("");
+ assert(pipe);
+ assert(pipe->stream);
+ assert(pipe->mode == IA_CSS_PIPE_ID_CAPTURE ||
+ pipe->mode == IA_CSS_PIPE_ID_COPY);
+
+ online = pipe->stream->config.online;
+ sensor = (pipe->stream->config.mode == IA_CSS_INPUT_MODE_SENSOR);
+ memory = pipe->stream->config.mode == IA_CSS_INPUT_MODE_MEMORY;
+ continuous = pipe->stream->config.continuous;
+
+ mycs = &pipe->pipe_settings.capture;
+ pipe_out_info = &pipe->output_info[0];
+ pipe_vf_out_info = &pipe->vf_output_info[0];
+
+ if (mycs->primary_binary[0].info)
+ return 0;
+
+ err = set_num_primary_stages(&mycs->num_primary_stage,
+ pipe->config.isp_pipe_version);
+ if (err) {
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+ }
+
+ if (pipe->enable_viewfinder[IA_CSS_PIPE_OUTPUT_STAGE_0]) {
+ err = ia_css_util_check_vf_out_info(pipe_out_info, pipe_vf_out_info);
+ if (err) {
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+ }
+ } else {
+ err = ia_css_frame_check_info(pipe_out_info);
+ if (err) {
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+ }
+ }
+ need_pp = need_capture_pp(pipe);
+
+ /*
+ * we use the vf output info to get the primary/capture_pp binary
+ * configured for vf_veceven. It will select the closest downscaling
+ * factor.
+ */
+ vf_info = *pipe_vf_out_info;
+
+ /*
+ * WARNING: The #if def flag has been added below as a
+ * temporary solution to solve the problem of enabling the
+ * view finder in a single binary in a capture flow. The
+ * vf-pp stage has been removed for Skycam in the solution
+ * provided. The vf-pp stage should be re-introduced when
+ * required. This should not be considered as a clean solution.
+ * Proper investigation should be done to come up with the clean
+ * solution.
+ */
+ ia_css_frame_info_set_format(&vf_info, IA_CSS_FRAME_FORMAT_YUV_LINE);
+
+ /*
+ * TODO: All this yuv_scaler and capturepp calculation logic
+ * can be shared later. Capture_pp is also a yuv_scale binary
+ * with extra XNR funcionality. Therefore, it can be made as the
+ * first step of the cascade.
+ */
+ capt_pp_out_info = pipe->out_yuv_ds_input_info;
+ capt_pp_out_info.format = IA_CSS_FRAME_FORMAT_YUV420;
+ capt_pp_out_info.res.width /= MAX_PREFERRED_YUV_DS_PER_STEP;
+ capt_pp_out_info.res.height /= MAX_PREFERRED_YUV_DS_PER_STEP;
+ ia_css_frame_info_set_width(&capt_pp_out_info, capt_pp_out_info.res.width, 0);
+
+ need_extra_yuv_scaler = need_downscaling(capt_pp_out_info.res,
+ pipe_out_info->res);
+
+ if (need_extra_yuv_scaler) {
+ struct ia_css_cas_binary_descr cas_scaler_descr = { };
+
+ err = ia_css_pipe_create_cas_scaler_desc_single_output(
+ &capt_pp_out_info,
+ pipe_out_info,
+ NULL,
+ &cas_scaler_descr);
+ if (err) {
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+ }
+ mycs->num_yuv_scaler = cas_scaler_descr.num_stage;
+ mycs->yuv_scaler_binary = kcalloc(cas_scaler_descr.num_stage,
+ sizeof(struct ia_css_binary),
+ GFP_KERNEL);
+ if (!mycs->yuv_scaler_binary) {
+ err = -ENOMEM;
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+ }
+ mycs->is_output_stage = kcalloc(cas_scaler_descr.num_stage,
+ sizeof(bool), GFP_KERNEL);
+ if (!mycs->is_output_stage) {
+ err = -ENOMEM;
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+ }
+ for (i = 0; i < cas_scaler_descr.num_stage; i++) {
+ struct ia_css_binary_descr yuv_scaler_descr;
+
+ mycs->is_output_stage[i] = cas_scaler_descr.is_output_stage[i];
+ ia_css_pipe_get_yuvscaler_binarydesc(pipe,
+ &yuv_scaler_descr, &cas_scaler_descr.in_info[i],
+ &cas_scaler_descr.out_info[i],
+ &cas_scaler_descr.internal_out_info[i],
+ &cas_scaler_descr.vf_info[i]);
+ err = ia_css_binary_find(&yuv_scaler_descr,
+ &mycs->yuv_scaler_binary[i]);
+ if (err) {
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+ }
+ }
+ ia_css_pipe_destroy_cas_scaler_desc(&cas_scaler_descr);
+
+ } else {
+ capt_pp_out_info = pipe->output_info[0];
+ }
+
+ /* TODO Do we disable ldc for skycam */
+ need_ldc = need_capt_ldc(pipe);
+
+ /* we build up the pipeline starting at the end */
+ /* Capture post-processing */
+ if (need_pp) {
+ struct ia_css_binary_descr capture_pp_descr;
+
+ capt_pp_in_info = need_ldc ? &capt_ldc_out_info : &prim_out_info;
+
+ ia_css_pipe_get_capturepp_binarydesc(pipe,
+ &capture_pp_descr,
+ capt_pp_in_info,
+ &capt_pp_out_info,
+ &vf_info);
+
+ err = ia_css_binary_find(&capture_pp_descr,
+ &mycs->capture_pp_binary);
+ if (err) {
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+ }
+
+ if (need_ldc) {
+ struct ia_css_binary_descr capt_ldc_descr;
+
+ ia_css_pipe_get_ldc_binarydesc(pipe,
+ &capt_ldc_descr,
+ &prim_out_info,
+ &capt_ldc_out_info);
+
+ err = ia_css_binary_find(&capt_ldc_descr,
+ &mycs->capture_ldc_binary);
+ if (err) {
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+ }
+ }
+ } else {
+ prim_out_info = *pipe_out_info;
+ }
+
+ /* Primary */
+ for (i = 0; i < mycs->num_primary_stage; i++) {
+ struct ia_css_frame_info *local_vf_info = NULL;
+
+ if (pipe->enable_viewfinder[IA_CSS_PIPE_OUTPUT_STAGE_0] &&
+ (i == mycs->num_primary_stage - 1))
+ local_vf_info = &vf_info;
+ ia_css_pipe_get_primary_binarydesc(pipe, &prim_descr[i],
+ &prim_in_info, &prim_out_info,
+ local_vf_info, i);
+ err = ia_css_binary_find(&prim_descr[i], &mycs->primary_binary[i]);
+ if (err) {
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+ }
+ }
+
+ /* Viewfinder post-processing */
+ if (need_pp)
+ vf_pp_in_info = &mycs->capture_pp_binary.vf_frame_info;
+ else
+ vf_pp_in_info = &mycs->primary_binary[mycs->num_primary_stage - 1].vf_frame_info;
+
+ /*
+ * WARNING: The #if def flag has been added below as a
+ * temporary solution to solve the problem of enabling the
+ * view finder in a single binary in a capture flow. The
+ * vf-pp stage has been removed for Skycam in the solution
+ * provided. The vf-pp stage should be re-introduced when
+ * required. Thisshould not be considered as a clean solution.
+ * Proper * investigation should be done to come up with the clean
+ * solution.
+ */
+ if (pipe->enable_viewfinder[IA_CSS_PIPE_OUTPUT_STAGE_0]) {
+ struct ia_css_binary_descr vf_pp_descr;
+
+ ia_css_pipe_get_vfpp_binarydesc(pipe,
+ &vf_pp_descr, vf_pp_in_info, pipe_vf_out_info);
+ err = ia_css_binary_find(&vf_pp_descr, &mycs->vf_pp_binary);
+ if (err) {
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+ }
+ }
+ err = allocate_delay_frames(pipe);
+
+ if (err)
+ return err;
+
+ if (IS_ISP2401)
+ /*
+ * When the input system is 2401, only the Direct Sensor Mode
+ * Offline Capture uses the ISP copy binary.
+ */
+ need_isp_copy_binary = !online && sensor;
+ else
+ need_isp_copy_binary = !online && !continuous && !memory;
+
+ /* ISP Copy */
+ if (need_isp_copy_binary) {
+ err = load_copy_binary(pipe,
+ &mycs->copy_binary,
+ &mycs->primary_binary[0]);
+ if (err) {
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static int
+allocate_delay_frames(struct ia_css_pipe *pipe)
+{
+ unsigned int num_delay_frames = 0, i = 0;
+ unsigned int dvs_frame_delay = 0;
+ struct ia_css_frame_info ref_info;
+ int err = 0;
+ enum ia_css_pipe_id mode = IA_CSS_PIPE_ID_VIDEO;
+ struct ia_css_frame **delay_frames = NULL;
+
+ IA_CSS_ENTER_PRIVATE("");
+
+ if (!pipe) {
+ IA_CSS_ERROR("Invalid args - pipe %p", pipe);
+ return -EINVAL;
+ }
+
+ mode = pipe->mode;
+ dvs_frame_delay = pipe->dvs_frame_delay;
+
+ if (dvs_frame_delay > 0)
+ num_delay_frames = dvs_frame_delay + 1;
+
+ switch (mode) {
+ case IA_CSS_PIPE_ID_CAPTURE: {
+ struct ia_css_capture_settings *mycs_capture = &pipe->pipe_settings.capture;
+ (void)mycs_capture;
+ return err;
+ }
+ break;
+ case IA_CSS_PIPE_ID_VIDEO: {
+ struct ia_css_video_settings *mycs_video = &pipe->pipe_settings.video;
+
+ ref_info = mycs_video->video_binary.internal_frame_info;
+
+ /*
+ * The ref frame expects
+ * 1. Y plane
+ * 2. UV plane with line interleaving, like below
+ * UUUUUU(width/2 times) VVVVVVVV..(width/2 times)
+ *
+ * This format is not YUV420(which has Y, U and V planes).
+ * Its closer to NV12, except that the UV plane has UV
+ * interleaving, like UVUVUVUVUVUVUVUVU...
+ *
+ * TODO: make this ref_frame format as a separate frame format
+ */
+ ref_info.format = IA_CSS_FRAME_FORMAT_NV12;
+ delay_frames = mycs_video->delay_frames;
+ }
+ break;
+ case IA_CSS_PIPE_ID_PREVIEW: {
+ struct ia_css_preview_settings *mycs_preview = &pipe->pipe_settings.preview;
+
+ ref_info = mycs_preview->preview_binary.internal_frame_info;
+
+ /*
+ * The ref frame expects
+ * 1. Y plane
+ * 2. UV plane with line interleaving, like below
+ * UUUUUU(width/2 times) VVVVVVVV..(width/2 times)
+ *
+ * This format is not YUV420(which has Y, U and V planes).
+ * Its closer to NV12, except that the UV plane has UV
+ * interleaving, like UVUVUVUVUVUVUVUVU...
+ *
+ * TODO: make this ref_frame format as a separate frame format
+ */
+ ref_info.format = IA_CSS_FRAME_FORMAT_NV12;
+ delay_frames = mycs_preview->delay_frames;
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ref_info.raw_bit_depth = SH_CSS_REF_BIT_DEPTH;
+
+ assert(num_delay_frames <= MAX_NUM_VIDEO_DELAY_FRAMES);
+ for (i = 0; i < num_delay_frames; i++) {
+ err = ia_css_frame_allocate_from_info(&delay_frames[i], &ref_info);
+ if (err)
+ return err;
+ }
+ IA_CSS_LEAVE_PRIVATE("");
+ return 0;
+}
+
+static int load_advanced_binaries(struct ia_css_pipe *pipe)
+{
+ struct ia_css_frame_info pre_in_info, gdc_in_info,
+ post_in_info, post_out_info,
+ vf_info, *vf_pp_in_info, *pipe_out_info,
+ *pipe_vf_out_info;
+ bool need_pp;
+ bool need_isp_copy = true;
+ int err = 0;
+
+ IA_CSS_ENTER_PRIVATE("");
+
+ assert(pipe);
+ assert(pipe->mode == IA_CSS_PIPE_ID_CAPTURE ||
+ pipe->mode == IA_CSS_PIPE_ID_COPY);
+ if (pipe->pipe_settings.capture.pre_isp_binary.info)
+ return 0;
+ pipe_out_info = &pipe->output_info[0];
+ pipe_vf_out_info = &pipe->vf_output_info[0];
+
+ vf_info = *pipe_vf_out_info;
+ err = ia_css_util_check_vf_out_info(pipe_out_info, &vf_info);
+ if (err)
+ return err;
+ need_pp = need_capture_pp(pipe);
+
+ ia_css_frame_info_set_format(&vf_info,
+ IA_CSS_FRAME_FORMAT_YUV_LINE);
+
+ /* we build up the pipeline starting at the end */
+ /* Capture post-processing */
+ if (need_pp) {
+ struct ia_css_binary_descr capture_pp_descr;
+
+ ia_css_pipe_get_capturepp_binarydesc(pipe, &capture_pp_descr,
+ &post_out_info,
+ pipe_out_info, &vf_info);
+ err = ia_css_binary_find(&capture_pp_descr,
+ &pipe->pipe_settings.capture.capture_pp_binary);
+ if (err)
+ return err;
+ } else {
+ post_out_info = *pipe_out_info;
+ }
+
+ /* Post-gdc */
+ {
+ struct ia_css_binary_descr post_gdc_descr;
+
+ ia_css_pipe_get_post_gdc_binarydesc(pipe, &post_gdc_descr,
+ &post_in_info,
+ &post_out_info, &vf_info);
+ err = ia_css_binary_find(&post_gdc_descr,
+ &pipe->pipe_settings.capture.post_isp_binary);
+ if (err)
+ return err;
+ }
+
+ /* Gdc */
+ {
+ struct ia_css_binary_descr gdc_descr;
+
+ ia_css_pipe_get_gdc_binarydesc(pipe, &gdc_descr, &gdc_in_info,
+ &pipe->pipe_settings.capture.post_isp_binary.in_frame_info);
+ err = ia_css_binary_find(&gdc_descr,
+ &pipe->pipe_settings.capture.anr_gdc_binary);
+ if (err)
+ return err;
+ }
+ pipe->pipe_settings.capture.anr_gdc_binary.left_padding =
+ pipe->pipe_settings.capture.post_isp_binary.left_padding;
+
+ /* Pre-gdc */
+ {
+ struct ia_css_binary_descr pre_gdc_descr;
+
+ ia_css_pipe_get_pre_gdc_binarydesc(pipe, &pre_gdc_descr, &pre_in_info,
+ &pipe->pipe_settings.capture.anr_gdc_binary.in_frame_info);
+ err = ia_css_binary_find(&pre_gdc_descr,
+ &pipe->pipe_settings.capture.pre_isp_binary);
+ if (err)
+ return err;
+ }
+ pipe->pipe_settings.capture.pre_isp_binary.left_padding =
+ pipe->pipe_settings.capture.anr_gdc_binary.left_padding;
+
+ /* Viewfinder post-processing */
+ if (need_pp) {
+ vf_pp_in_info =
+ &pipe->pipe_settings.capture.capture_pp_binary.vf_frame_info;
+ } else {
+ vf_pp_in_info =
+ &pipe->pipe_settings.capture.post_isp_binary.vf_frame_info;
+ }
+
+ {
+ struct ia_css_binary_descr vf_pp_descr;
+
+ ia_css_pipe_get_vfpp_binarydesc(pipe,
+ &vf_pp_descr, vf_pp_in_info, pipe_vf_out_info);
+ err = ia_css_binary_find(&vf_pp_descr,
+ &pipe->pipe_settings.capture.vf_pp_binary);
+ if (err)
+ return err;
+ }
+
+ /* Copy */
+ if (IS_ISP2401)
+ /* For CSI2+, only the direct sensor mode/online requires ISP copy */
+ need_isp_copy = pipe->stream->config.mode == IA_CSS_INPUT_MODE_SENSOR;
+
+ if (need_isp_copy)
+ load_copy_binary(pipe,
+ &pipe->pipe_settings.capture.copy_binary,
+ &pipe->pipe_settings.capture.pre_isp_binary);
+
+ return err;
+}
+
+static int load_bayer_isp_binaries(struct ia_css_pipe *pipe)
+{
+ struct ia_css_frame_info pre_isp_in_info, *pipe_out_info;
+ int err = 0;
+ struct ia_css_binary_descr pre_de_descr;
+
+ IA_CSS_ENTER_PRIVATE("");
+ assert(pipe);
+ assert(pipe->mode == IA_CSS_PIPE_ID_CAPTURE ||
+ pipe->mode == IA_CSS_PIPE_ID_COPY);
+ pipe_out_info = &pipe->output_info[0];
+
+ if (pipe->pipe_settings.capture.pre_isp_binary.info)
+ return 0;
+
+ err = ia_css_frame_check_info(pipe_out_info);
+ if (err)
+ return err;
+
+ ia_css_pipe_get_pre_de_binarydesc(pipe, &pre_de_descr,
+ &pre_isp_in_info,
+ pipe_out_info);
+
+ err = ia_css_binary_find(&pre_de_descr,
+ &pipe->pipe_settings.capture.pre_isp_binary);
+
+ return err;
+}
+
+static int load_low_light_binaries(struct ia_css_pipe *pipe)
+{
+ struct ia_css_frame_info pre_in_info, anr_in_info,
+ post_in_info, post_out_info,
+ vf_info, *pipe_vf_out_info, *pipe_out_info,
+ *vf_pp_in_info;
+ bool need_pp;
+ bool need_isp_copy = true;
+ int err = 0;
+
+ IA_CSS_ENTER_PRIVATE("");
+ assert(pipe);
+ assert(pipe->mode == IA_CSS_PIPE_ID_CAPTURE ||
+ pipe->mode == IA_CSS_PIPE_ID_COPY);
+
+ if (pipe->pipe_settings.capture.pre_isp_binary.info)
+ return 0;
+ pipe_vf_out_info = &pipe->vf_output_info[0];
+ pipe_out_info = &pipe->output_info[0];
+
+ vf_info = *pipe_vf_out_info;
+ err = ia_css_util_check_vf_out_info(pipe_out_info,
+ &vf_info);
+ if (err)
+ return err;
+ need_pp = need_capture_pp(pipe);
+
+ ia_css_frame_info_set_format(&vf_info,
+ IA_CSS_FRAME_FORMAT_YUV_LINE);
+
+ /* we build up the pipeline starting at the end */
+ /* Capture post-processing */
+ if (need_pp) {
+ struct ia_css_binary_descr capture_pp_descr;
+
+ ia_css_pipe_get_capturepp_binarydesc(pipe, &capture_pp_descr,
+ &post_out_info,
+ pipe_out_info, &vf_info);
+ err = ia_css_binary_find(&capture_pp_descr,
+ &pipe->pipe_settings.capture.capture_pp_binary);
+ if (err)
+ return err;
+ } else {
+ post_out_info = *pipe_out_info;
+ }
+
+ /* Post-anr */
+ {
+ struct ia_css_binary_descr post_anr_descr;
+
+ ia_css_pipe_get_post_anr_binarydesc(pipe,
+ &post_anr_descr, &post_in_info, &post_out_info, &vf_info);
+ err = ia_css_binary_find(&post_anr_descr,
+ &pipe->pipe_settings.capture.post_isp_binary);
+ if (err)
+ return err;
+ }
+
+ /* Anr */
+ {
+ struct ia_css_binary_descr anr_descr;
+
+ ia_css_pipe_get_anr_binarydesc(pipe, &anr_descr, &anr_in_info,
+ &pipe->pipe_settings.capture.post_isp_binary.in_frame_info);
+ err = ia_css_binary_find(&anr_descr,
+ &pipe->pipe_settings.capture.anr_gdc_binary);
+ if (err)
+ return err;
+ }
+ pipe->pipe_settings.capture.anr_gdc_binary.left_padding =
+ pipe->pipe_settings.capture.post_isp_binary.left_padding;
+
+ /* Pre-anr */
+ {
+ struct ia_css_binary_descr pre_anr_descr;
+
+ ia_css_pipe_get_pre_anr_binarydesc(pipe, &pre_anr_descr, &pre_in_info,
+ &pipe->pipe_settings.capture.anr_gdc_binary.in_frame_info);
+ err = ia_css_binary_find(&pre_anr_descr,
+ &pipe->pipe_settings.capture.pre_isp_binary);
+ if (err)
+ return err;
+ }
+ pipe->pipe_settings.capture.pre_isp_binary.left_padding =
+ pipe->pipe_settings.capture.anr_gdc_binary.left_padding;
+
+ /* Viewfinder post-processing */
+ if (need_pp) {
+ vf_pp_in_info =
+ &pipe->pipe_settings.capture.capture_pp_binary.vf_frame_info;
+ } else {
+ vf_pp_in_info =
+ &pipe->pipe_settings.capture.post_isp_binary.vf_frame_info;
+ }
+
+ {
+ struct ia_css_binary_descr vf_pp_descr;
+
+ ia_css_pipe_get_vfpp_binarydesc(pipe, &vf_pp_descr,
+ vf_pp_in_info, pipe_vf_out_info);
+ err = ia_css_binary_find(&vf_pp_descr,
+ &pipe->pipe_settings.capture.vf_pp_binary);
+ if (err)
+ return err;
+ }
+
+ /* Copy */
+ if (IS_ISP2401)
+ /* For CSI2+, only the direct sensor mode/online requires ISP copy */
+ need_isp_copy = pipe->stream->config.mode == IA_CSS_INPUT_MODE_SENSOR;
+
+ if (need_isp_copy)
+ err = load_copy_binary(pipe,
+ &pipe->pipe_settings.capture.copy_binary,
+ &pipe->pipe_settings.capture.pre_isp_binary);
+
+ return err;
+}
+
+static bool copy_on_sp(struct ia_css_pipe *pipe)
+{
+ bool rval;
+
+ assert(pipe);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "copy_on_sp() enter:\n");
+
+ rval = true;
+
+ rval &= (pipe->mode == IA_CSS_PIPE_ID_CAPTURE);
+
+ rval &= (pipe->config.default_capture_config.mode == IA_CSS_CAPTURE_MODE_RAW);
+
+ rval &= ((pipe->stream->config.input_config.format ==
+ ATOMISP_INPUT_FORMAT_BINARY_8) ||
+ (pipe->config.mode == IA_CSS_PIPE_MODE_COPY));
+
+ return rval;
+}
+
+static int load_capture_binaries(struct ia_css_pipe *pipe)
+{
+ int err = 0;
+ bool must_be_raw;
+
+ IA_CSS_ENTER_PRIVATE("");
+ assert(pipe);
+ assert(pipe->mode == IA_CSS_PIPE_ID_CAPTURE ||
+ pipe->mode == IA_CSS_PIPE_ID_COPY);
+
+ if (pipe->pipe_settings.capture.primary_binary[0].info) {
+ IA_CSS_LEAVE_ERR_PRIVATE(0);
+ return 0;
+ }
+
+ /* in primary, advanced,low light or bayer,
+ the input format must be raw */
+ must_be_raw =
+ pipe->config.default_capture_config.mode == IA_CSS_CAPTURE_MODE_ADVANCED ||
+ pipe->config.default_capture_config.mode == IA_CSS_CAPTURE_MODE_BAYER ||
+ pipe->config.default_capture_config.mode == IA_CSS_CAPTURE_MODE_LOW_LIGHT;
+ err = ia_css_util_check_input(&pipe->stream->config, must_be_raw, false);
+ if (err) {
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+ }
+ if (copy_on_sp(pipe) &&
+ pipe->stream->config.input_config.format == ATOMISP_INPUT_FORMAT_BINARY_8) {
+ ia_css_frame_info_init(
+ &pipe->output_info[0],
+ JPEG_BYTES,
+ 1,
+ IA_CSS_FRAME_FORMAT_BINARY_8,
+ 0);
+ IA_CSS_LEAVE_ERR_PRIVATE(0);
+ return 0;
+ }
+
+ switch (pipe->config.default_capture_config.mode) {
+ case IA_CSS_CAPTURE_MODE_RAW:
+ err = load_copy_binaries(pipe);
+ if (!err && IS_ISP2401)
+ pipe->pipe_settings.capture.copy_binary.online = pipe->stream->config.online;
+
+ break;
+ case IA_CSS_CAPTURE_MODE_BAYER:
+ err = load_bayer_isp_binaries(pipe);
+ break;
+ case IA_CSS_CAPTURE_MODE_PRIMARY:
+ err = load_primary_binaries(pipe);
+ break;
+ case IA_CSS_CAPTURE_MODE_ADVANCED:
+ err = load_advanced_binaries(pipe);
+ break;
+ case IA_CSS_CAPTURE_MODE_LOW_LIGHT:
+ err = load_low_light_binaries(pipe);
+ break;
+ }
+ if (err) {
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+ }
+
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+}
+
+static int
+unload_capture_binaries(struct ia_css_pipe *pipe)
+{
+ unsigned int i;
+
+ IA_CSS_ENTER_PRIVATE("pipe = %p", pipe);
+
+ if (!pipe || (pipe->mode != IA_CSS_PIPE_ID_CAPTURE &&
+ pipe->mode != IA_CSS_PIPE_ID_COPY)) {
+ IA_CSS_LEAVE_ERR_PRIVATE(-EINVAL);
+ return -EINVAL;
+ }
+ ia_css_binary_unload(&pipe->pipe_settings.capture.copy_binary);
+ for (i = 0; i < MAX_NUM_PRIMARY_STAGES; i++)
+ ia_css_binary_unload(&pipe->pipe_settings.capture.primary_binary[i]);
+ ia_css_binary_unload(&pipe->pipe_settings.capture.pre_isp_binary);
+ ia_css_binary_unload(&pipe->pipe_settings.capture.anr_gdc_binary);
+ ia_css_binary_unload(&pipe->pipe_settings.capture.post_isp_binary);
+ ia_css_binary_unload(&pipe->pipe_settings.capture.capture_pp_binary);
+ ia_css_binary_unload(&pipe->pipe_settings.capture.capture_ldc_binary);
+ ia_css_binary_unload(&pipe->pipe_settings.capture.vf_pp_binary);
+
+ for (i = 0; i < pipe->pipe_settings.capture.num_yuv_scaler; i++)
+ ia_css_binary_unload(&pipe->pipe_settings.capture.yuv_scaler_binary[i]);
+
+ kfree(pipe->pipe_settings.capture.is_output_stage);
+ pipe->pipe_settings.capture.is_output_stage = NULL;
+ kfree(pipe->pipe_settings.capture.yuv_scaler_binary);
+ pipe->pipe_settings.capture.yuv_scaler_binary = NULL;
+
+ IA_CSS_LEAVE_ERR_PRIVATE(0);
+ return 0;
+}
+
+static bool
+need_downscaling(const struct ia_css_resolution in_res,
+ const struct ia_css_resolution out_res)
+{
+ if (in_res.width > out_res.width || in_res.height > out_res.height)
+ return true;
+
+ return false;
+}
+
+static bool
+need_yuv_scaler_stage(const struct ia_css_pipe *pipe)
+{
+ unsigned int i;
+ struct ia_css_resolution in_res, out_res;
+
+ bool need_format_conversion = false;
+
+ IA_CSS_ENTER_PRIVATE("");
+ assert(pipe);
+ assert(pipe->mode == IA_CSS_PIPE_ID_YUVPP);
+
+ /* TODO: make generic function */
+ need_format_conversion =
+ ((pipe->stream->config.input_config.format ==
+ ATOMISP_INPUT_FORMAT_YUV420_8_LEGACY) &&
+ (pipe->output_info[0].format != IA_CSS_FRAME_FORMAT_CSI_MIPI_LEGACY_YUV420_8));
+
+ in_res = pipe->config.input_effective_res;
+
+ if (pipe->config.enable_dz)
+ return true;
+
+ if ((pipe->output_info[0].res.width != 0) && need_format_conversion)
+ return true;
+
+ for (i = 0; i < IA_CSS_PIPE_MAX_OUTPUT_STAGE; i++) {
+ out_res = pipe->output_info[i].res;
+
+ /* A non-zero width means it is a valid output port */
+ if ((out_res.width != 0) && need_downscaling(in_res, out_res))
+ return true;
+ }
+
+ return false;
+}
+
+/*
+ * TODO: it is temporarily created from ia_css_pipe_create_cas_scaler_desc
+ * which has some hard-coded knowledge which prevents reuse of the function.
+ * Later, merge this with ia_css_pipe_create_cas_scaler_desc
+ */
+static int ia_css_pipe_create_cas_scaler_desc_single_output(
+ struct ia_css_frame_info *in_info,
+ struct ia_css_frame_info *out_info,
+ struct ia_css_frame_info *vf_info,
+ struct ia_css_cas_binary_descr *descr)
+{
+ unsigned int i;
+ unsigned int hor_ds_factor = 0, ver_ds_factor = 0;
+ int err = 0;
+ struct ia_css_frame_info tmp_in_info;
+ unsigned int max_scale_factor_per_stage = MAX_PREFERRED_YUV_DS_PER_STEP;
+
+ assert(in_info);
+ assert(out_info);
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_pipe_create_cas_scaler_desc() enter:\n");
+
+ /* We assume that this function is used only for single output port case. */
+ descr->num_output_stage = 1;
+
+ hor_ds_factor = CEIL_DIV(in_info->res.width, out_info->res.width);
+ ver_ds_factor = CEIL_DIV(in_info->res.height, out_info->res.height);
+ /* use the same horizontal and vertical downscaling factor for simplicity */
+ assert(hor_ds_factor == ver_ds_factor);
+
+ i = 1;
+ while (i < hor_ds_factor) {
+ descr->num_stage++;
+ i *= max_scale_factor_per_stage;
+ }
+
+ descr->in_info = kmalloc(descr->num_stage *
+ sizeof(struct ia_css_frame_info),
+ GFP_KERNEL);
+ if (!descr->in_info) {
+ err = -ENOMEM;
+ goto ERR;
+ }
+ descr->internal_out_info = kmalloc(descr->num_stage *
+ sizeof(struct ia_css_frame_info),
+ GFP_KERNEL);
+ if (!descr->internal_out_info) {
+ err = -ENOMEM;
+ goto ERR;
+ }
+ descr->out_info = kmalloc(descr->num_stage *
+ sizeof(struct ia_css_frame_info),
+ GFP_KERNEL);
+ if (!descr->out_info) {
+ err = -ENOMEM;
+ goto ERR;
+ }
+ descr->vf_info = kmalloc(descr->num_stage *
+ sizeof(struct ia_css_frame_info),
+ GFP_KERNEL);
+ if (!descr->vf_info) {
+ err = -ENOMEM;
+ goto ERR;
+ }
+ descr->is_output_stage = kmalloc(descr->num_stage * sizeof(bool),
+ GFP_KERNEL);
+ if (!descr->is_output_stage) {
+ err = -ENOMEM;
+ goto ERR;
+ }
+
+ tmp_in_info = *in_info;
+ for (i = 0; i < descr->num_stage; i++) {
+ descr->in_info[i] = tmp_in_info;
+ if ((tmp_in_info.res.width / max_scale_factor_per_stage) <= out_info->res.width) {
+ descr->is_output_stage[i] = true;
+ if ((descr->num_output_stage > 1) && (i != (descr->num_stage - 1))) {
+ descr->internal_out_info[i].res.width = out_info->res.width;
+ descr->internal_out_info[i].res.height = out_info->res.height;
+ descr->internal_out_info[i].padded_width = out_info->padded_width;
+ descr->internal_out_info[i].format = IA_CSS_FRAME_FORMAT_YUV420;
+ } else {
+ assert(i == (descr->num_stage - 1));
+ descr->internal_out_info[i].res.width = 0;
+ descr->internal_out_info[i].res.height = 0;
+ }
+ descr->out_info[i].res.width = out_info->res.width;
+ descr->out_info[i].res.height = out_info->res.height;
+ descr->out_info[i].padded_width = out_info->padded_width;
+ descr->out_info[i].format = out_info->format;
+ if (vf_info) {
+ descr->vf_info[i].res.width = vf_info->res.width;
+ descr->vf_info[i].res.height = vf_info->res.height;
+ descr->vf_info[i].padded_width = vf_info->padded_width;
+ ia_css_frame_info_set_format(&descr->vf_info[i], IA_CSS_FRAME_FORMAT_YUV_LINE);
+ } else {
+ descr->vf_info[i].res.width = 0;
+ descr->vf_info[i].res.height = 0;
+ descr->vf_info[i].padded_width = 0;
+ }
+ } else {
+ descr->is_output_stage[i] = false;
+ descr->internal_out_info[i].res.width = tmp_in_info.res.width /
+ max_scale_factor_per_stage;
+ descr->internal_out_info[i].res.height = tmp_in_info.res.height /
+ max_scale_factor_per_stage;
+ descr->internal_out_info[i].format = IA_CSS_FRAME_FORMAT_YUV420;
+ ia_css_frame_info_init(&descr->internal_out_info[i],
+ tmp_in_info.res.width / max_scale_factor_per_stage,
+ tmp_in_info.res.height / max_scale_factor_per_stage,
+ IA_CSS_FRAME_FORMAT_YUV420, 0);
+ descr->out_info[i].res.width = 0;
+ descr->out_info[i].res.height = 0;
+ descr->vf_info[i].res.width = 0;
+ descr->vf_info[i].res.height = 0;
+ }
+ tmp_in_info = descr->internal_out_info[i];
+ }
+ERR:
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_pipe_create_cas_scaler_desc() leave, err=%d\n",
+ err);
+ return err;
+}
+
+/* FIXME: merge most of this and single output version */
+static int
+ia_css_pipe_create_cas_scaler_desc(struct ia_css_pipe *pipe,
+ struct ia_css_cas_binary_descr *descr)
+{
+ struct ia_css_frame_info in_info = IA_CSS_BINARY_DEFAULT_FRAME_INFO;
+ struct ia_css_frame_info *out_info[IA_CSS_PIPE_MAX_OUTPUT_STAGE];
+ struct ia_css_frame_info *vf_out_info[IA_CSS_PIPE_MAX_OUTPUT_STAGE];
+ struct ia_css_frame_info tmp_in_info = IA_CSS_BINARY_DEFAULT_FRAME_INFO;
+ unsigned int i, j;
+ unsigned int hor_scale_factor[IA_CSS_PIPE_MAX_OUTPUT_STAGE],
+ ver_scale_factor[IA_CSS_PIPE_MAX_OUTPUT_STAGE],
+ scale_factor = 0;
+ unsigned int num_stages = 0;
+ int err = 0;
+
+ unsigned int max_scale_factor_per_stage = MAX_PREFERRED_YUV_DS_PER_STEP;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_pipe_create_cas_scaler_desc() enter:\n");
+
+ for (i = 0; i < IA_CSS_PIPE_MAX_OUTPUT_STAGE; i++) {
+ out_info[i] = NULL;
+ vf_out_info[i] = NULL;
+ hor_scale_factor[i] = 0;
+ ver_scale_factor[i] = 0;
+ }
+
+ in_info.res = pipe->config.input_effective_res;
+ in_info.padded_width = in_info.res.width;
+ descr->num_output_stage = 0;
+ /* Find out how much scaling we need for each output */
+ for (i = 0; i < IA_CSS_PIPE_MAX_OUTPUT_STAGE; i++) {
+ if (pipe->output_info[i].res.width != 0) {
+ out_info[i] = &pipe->output_info[i];
+ if (pipe->vf_output_info[i].res.width != 0)
+ vf_out_info[i] = &pipe->vf_output_info[i];
+ descr->num_output_stage += 1;
+ }
+
+ if (out_info[i]) {
+ hor_scale_factor[i] = CEIL_DIV(in_info.res.width, out_info[i]->res.width);
+ ver_scale_factor[i] = CEIL_DIV(in_info.res.height, out_info[i]->res.height);
+ /* use the same horizontal and vertical scaling factor for simplicity */
+ assert(hor_scale_factor[i] == ver_scale_factor[i]);
+ scale_factor = 1;
+ do {
+ num_stages++;
+ scale_factor *= max_scale_factor_per_stage;
+ } while (scale_factor < hor_scale_factor[i]);
+
+ in_info.res = out_info[i]->res;
+ }
+ }
+
+ if (need_yuv_scaler_stage(pipe) && (num_stages == 0))
+ num_stages = 1;
+
+ descr->num_stage = num_stages;
+
+ descr->in_info = kmalloc_array(descr->num_stage,
+ sizeof(struct ia_css_frame_info),
+ GFP_KERNEL);
+ if (!descr->in_info) {
+ err = -ENOMEM;
+ goto ERR;
+ }
+ descr->internal_out_info = kmalloc(descr->num_stage *
+ sizeof(struct ia_css_frame_info),
+ GFP_KERNEL);
+ if (!descr->internal_out_info) {
+ err = -ENOMEM;
+ goto ERR;
+ }
+ descr->out_info = kmalloc(descr->num_stage *
+ sizeof(struct ia_css_frame_info),
+ GFP_KERNEL);
+ if (!descr->out_info) {
+ err = -ENOMEM;
+ goto ERR;
+ }
+ descr->vf_info = kmalloc(descr->num_stage *
+ sizeof(struct ia_css_frame_info),
+ GFP_KERNEL);
+ if (!descr->vf_info) {
+ err = -ENOMEM;
+ goto ERR;
+ }
+ descr->is_output_stage = kmalloc(descr->num_stage * sizeof(bool),
+ GFP_KERNEL);
+ if (!descr->is_output_stage) {
+ err = -ENOMEM;
+ goto ERR;
+ }
+
+ for (i = 0; i < IA_CSS_PIPE_MAX_OUTPUT_STAGE; i++) {
+ if (out_info[i]) {
+ if (i > 0) {
+ assert((out_info[i - 1]->res.width >= out_info[i]->res.width) &&
+ (out_info[i - 1]->res.height >= out_info[i]->res.height));
+ }
+ }
+ }
+
+ tmp_in_info.res = pipe->config.input_effective_res;
+ tmp_in_info.format = IA_CSS_FRAME_FORMAT_YUV420;
+ for (i = 0, j = 0; i < descr->num_stage; i++) {
+ assert(j < 2);
+ assert(out_info[j]);
+
+ descr->in_info[i] = tmp_in_info;
+ if ((tmp_in_info.res.width / max_scale_factor_per_stage) <=
+ out_info[j]->res.width) {
+ descr->is_output_stage[i] = true;
+ if ((descr->num_output_stage > 1) && (i != (descr->num_stage - 1))) {
+ descr->internal_out_info[i].res.width = out_info[j]->res.width;
+ descr->internal_out_info[i].res.height = out_info[j]->res.height;
+ descr->internal_out_info[i].padded_width = out_info[j]->padded_width;
+ descr->internal_out_info[i].format = IA_CSS_FRAME_FORMAT_YUV420;
+ } else {
+ assert(i == (descr->num_stage - 1));
+ descr->internal_out_info[i].res.width = 0;
+ descr->internal_out_info[i].res.height = 0;
+ }
+ descr->out_info[i].res.width = out_info[j]->res.width;
+ descr->out_info[i].res.height = out_info[j]->res.height;
+ descr->out_info[i].padded_width = out_info[j]->padded_width;
+ descr->out_info[i].format = out_info[j]->format;
+ if (vf_out_info[j]) {
+ descr->vf_info[i].res.width = vf_out_info[j]->res.width;
+ descr->vf_info[i].res.height = vf_out_info[j]->res.height;
+ descr->vf_info[i].padded_width = vf_out_info[j]->padded_width;
+ ia_css_frame_info_set_format(&descr->vf_info[i], IA_CSS_FRAME_FORMAT_YUV_LINE);
+ } else {
+ descr->vf_info[i].res.width = 0;
+ descr->vf_info[i].res.height = 0;
+ descr->vf_info[i].padded_width = 0;
+ }
+ j++;
+ } else {
+ descr->is_output_stage[i] = false;
+ descr->internal_out_info[i].res.width = tmp_in_info.res.width /
+ max_scale_factor_per_stage;
+ descr->internal_out_info[i].res.height = tmp_in_info.res.height /
+ max_scale_factor_per_stage;
+ descr->internal_out_info[i].format = IA_CSS_FRAME_FORMAT_YUV420;
+ ia_css_frame_info_init(&descr->internal_out_info[i],
+ tmp_in_info.res.width / max_scale_factor_per_stage,
+ tmp_in_info.res.height / max_scale_factor_per_stage,
+ IA_CSS_FRAME_FORMAT_YUV420, 0);
+ descr->out_info[i].res.width = 0;
+ descr->out_info[i].res.height = 0;
+ descr->vf_info[i].res.width = 0;
+ descr->vf_info[i].res.height = 0;
+ }
+ tmp_in_info = descr->internal_out_info[i];
+ }
+ERR:
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_pipe_create_cas_scaler_desc() leave, err=%d\n",
+ err);
+ return err;
+}
+
+static void ia_css_pipe_destroy_cas_scaler_desc(struct ia_css_cas_binary_descr
+ *descr)
+{
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_pipe_destroy_cas_scaler_desc() enter:\n");
+ kfree(descr->in_info);
+ descr->in_info = NULL;
+ kfree(descr->internal_out_info);
+ descr->internal_out_info = NULL;
+ kfree(descr->out_info);
+ descr->out_info = NULL;
+ kfree(descr->vf_info);
+ descr->vf_info = NULL;
+ kfree(descr->is_output_stage);
+ descr->is_output_stage = NULL;
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_pipe_destroy_cas_scaler_desc() leave\n");
+}
+
+static int
+load_yuvpp_binaries(struct ia_css_pipe *pipe)
+{
+ int err = 0;
+ bool need_scaler = false;
+ struct ia_css_frame_info *vf_pp_in_info[IA_CSS_PIPE_MAX_OUTPUT_STAGE];
+ struct ia_css_yuvpp_settings *mycs;
+ struct ia_css_binary *next_binary;
+ struct ia_css_cas_binary_descr cas_scaler_descr = { };
+ unsigned int i, j;
+ bool need_isp_copy_binary = false;
+
+ IA_CSS_ENTER_PRIVATE("");
+ assert(pipe);
+ assert(pipe->stream);
+ assert(pipe->mode == IA_CSS_PIPE_ID_YUVPP);
+
+ if (pipe->pipe_settings.yuvpp.copy_binary.info)
+ goto ERR;
+
+ /* Set both must_be_raw and must_be_yuv to false then yuvpp can take rgb inputs */
+ err = ia_css_util_check_input(&pipe->stream->config, false, false);
+ if (err)
+ goto ERR;
+
+ mycs = &pipe->pipe_settings.yuvpp;
+
+ for (i = 0; i < IA_CSS_PIPE_MAX_OUTPUT_STAGE; i++) {
+ if (pipe->vf_output_info[i].res.width != 0) {
+ err = ia_css_util_check_vf_out_info(&pipe->output_info[i],
+ &pipe->vf_output_info[i]);
+ if (err)
+ goto ERR;
+ }
+ vf_pp_in_info[i] = NULL;
+ }
+
+ need_scaler = need_yuv_scaler_stage(pipe);
+
+ /* we build up the pipeline starting at the end */
+ /* Capture post-processing */
+ if (need_scaler) {
+ struct ia_css_binary_descr yuv_scaler_descr;
+
+ err = ia_css_pipe_create_cas_scaler_desc(pipe,
+ &cas_scaler_descr);
+ if (err)
+ goto ERR;
+ mycs->num_output = cas_scaler_descr.num_output_stage;
+ mycs->num_yuv_scaler = cas_scaler_descr.num_stage;
+ mycs->yuv_scaler_binary = kcalloc(cas_scaler_descr.num_stage,
+ sizeof(struct ia_css_binary),
+ GFP_KERNEL);
+ if (!mycs->yuv_scaler_binary) {
+ err = -ENOMEM;
+ goto ERR;
+ }
+ mycs->is_output_stage = kcalloc(cas_scaler_descr.num_stage,
+ sizeof(bool), GFP_KERNEL);
+ if (!mycs->is_output_stage) {
+ err = -ENOMEM;
+ goto ERR;
+ }
+ for (i = 0; i < cas_scaler_descr.num_stage; i++) {
+ mycs->is_output_stage[i] = cas_scaler_descr.is_output_stage[i];
+ ia_css_pipe_get_yuvscaler_binarydesc(pipe,
+ &yuv_scaler_descr,
+ &cas_scaler_descr.in_info[i],
+ &cas_scaler_descr.out_info[i],
+ &cas_scaler_descr.internal_out_info[i],
+ &cas_scaler_descr.vf_info[i]);
+ err = ia_css_binary_find(&yuv_scaler_descr,
+ &mycs->yuv_scaler_binary[i]);
+ if (err)
+ goto ERR;
+ }
+ ia_css_pipe_destroy_cas_scaler_desc(&cas_scaler_descr);
+ } else {
+ mycs->num_output = 1;
+ }
+
+ if (need_scaler)
+ next_binary = &mycs->yuv_scaler_binary[0];
+ else
+ next_binary = NULL;
+
+ /*
+ * NOTES
+ * - Why does the "yuvpp" pipe needs "isp_copy_binary" (i.e. ISP Copy) when
+ * its input is "ATOMISP_INPUT_FORMAT_YUV422_8"?
+ *
+ * In most use cases, the first stage in the "yuvpp" pipe is the "yuv_scale_
+ * binary". However, the "yuv_scale_binary" does NOT support the input-frame
+ * format as "IA_CSS_STREAM _FORMAT_YUV422_8".
+ *
+ * Hence, the "isp_copy_binary" is required to be present in front of the "yuv
+ * _scale_binary". It would translate the input-frame to the frame formats that
+ * are supported by the "yuv_scale_binary".
+ *
+ * Please refer to "FrameWork/css/isp/pipes/capture_pp/capture_pp_1.0/capture_
+ * pp_defs.h" for the list of input-frame formats that are supported by the
+ * "yuv_scale_binary".
+ */
+ if (IS_ISP2401)
+ need_isp_copy_binary =
+ (pipe->stream->config.input_config.format == ATOMISP_INPUT_FORMAT_YUV422_8);
+ else
+ need_isp_copy_binary = true;
+
+ if (need_isp_copy_binary) {
+ err = load_copy_binary(pipe,
+ &mycs->copy_binary,
+ next_binary);
+
+ if (err)
+ goto ERR;
+
+ /*
+ * NOTES
+ * - Why is "pipe->pipe_settings.capture.copy_binary.online" specified?
+ *
+ * In some use cases, the first stage in the "yuvpp" pipe is the
+ * "isp_copy_binary". The "isp_copy_binary" is designed to process
+ * the input from either the system DDR or from the IPU internal VMEM.
+ * So it provides the flag "online" to specify where its input is from,
+ * i.e.:
+ *
+ * (1) "online <= true", the input is from the IPU internal VMEM.
+ * (2) "online <= false", the input is from the system DDR.
+ *
+ * In other use cases, the first stage in the "yuvpp" pipe is the
+ * "yuv_scale_binary". "The "yuv_scale_binary" is designed to process the
+ * input ONLY from the system DDR. So it does not provide the flag "online"
+ * to specify where its input is from.
+ */
+ pipe->pipe_settings.capture.copy_binary.online = pipe->stream->config.online;
+ }
+
+ /* Viewfinder post-processing */
+ if (need_scaler) {
+ for (i = 0, j = 0; i < mycs->num_yuv_scaler; i++) {
+ if (mycs->is_output_stage[i]) {
+ assert(j < 2);
+ vf_pp_in_info[j] =
+ &mycs->yuv_scaler_binary[i].vf_frame_info;
+ j++;
+ }
+ }
+ mycs->num_vf_pp = j;
+ } else {
+ vf_pp_in_info[0] =
+ &mycs->copy_binary.vf_frame_info;
+ for (i = 1; i < IA_CSS_PIPE_MAX_OUTPUT_STAGE; i++)
+ vf_pp_in_info[i] = NULL;
+
+ mycs->num_vf_pp = 1;
+ }
+ mycs->vf_pp_binary = kcalloc(mycs->num_vf_pp,
+ sizeof(struct ia_css_binary),
+ GFP_KERNEL);
+ if (!mycs->vf_pp_binary) {
+ err = -ENOMEM;
+ goto ERR;
+ }
+
+ {
+ struct ia_css_binary_descr vf_pp_descr;
+
+ for (i = 0; i < mycs->num_vf_pp; i++) {
+ if (pipe->vf_output_info[i].res.width != 0) {
+ ia_css_pipe_get_vfpp_binarydesc(pipe,
+ &vf_pp_descr, vf_pp_in_info[i], &pipe->vf_output_info[i]);
+ err = ia_css_binary_find(&vf_pp_descr, &mycs->vf_pp_binary[i]);
+ if (err)
+ goto ERR;
+ }
+ }
+ }
+
+ERR:
+ if (need_scaler)
+ ia_css_pipe_destroy_cas_scaler_desc(&cas_scaler_descr);
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "load_yuvpp_binaries() leave, err=%d\n",
+ err);
+ return err;
+}
+
+static int
+unload_yuvpp_binaries(struct ia_css_pipe *pipe)
+{
+ unsigned int i;
+
+ IA_CSS_ENTER_PRIVATE("pipe = %p", pipe);
+
+ if ((!pipe) || (pipe->mode != IA_CSS_PIPE_ID_YUVPP)) {
+ IA_CSS_LEAVE_ERR_PRIVATE(-EINVAL);
+ return -EINVAL;
+ }
+ ia_css_binary_unload(&pipe->pipe_settings.yuvpp.copy_binary);
+ for (i = 0; i < pipe->pipe_settings.yuvpp.num_yuv_scaler; i++)
+ ia_css_binary_unload(&pipe->pipe_settings.yuvpp.yuv_scaler_binary[i]);
+
+ for (i = 0; i < pipe->pipe_settings.yuvpp.num_vf_pp; i++)
+ ia_css_binary_unload(&pipe->pipe_settings.yuvpp.vf_pp_binary[i]);
+
+ kfree(pipe->pipe_settings.yuvpp.is_output_stage);
+ pipe->pipe_settings.yuvpp.is_output_stage = NULL;
+ kfree(pipe->pipe_settings.yuvpp.yuv_scaler_binary);
+ pipe->pipe_settings.yuvpp.yuv_scaler_binary = NULL;
+ kfree(pipe->pipe_settings.yuvpp.vf_pp_binary);
+ pipe->pipe_settings.yuvpp.vf_pp_binary = NULL;
+
+ IA_CSS_LEAVE_ERR_PRIVATE(0);
+ return 0;
+}
+
+static int yuvpp_start(struct ia_css_pipe *pipe)
+{
+ int err = 0;
+ enum sh_css_pipe_config_override copy_ovrd;
+ enum ia_css_input_mode yuvpp_pipe_input_mode;
+ unsigned int thread_id;
+
+ IA_CSS_ENTER_PRIVATE("pipe = %p", pipe);
+ if ((!pipe) || (pipe->mode != IA_CSS_PIPE_ID_YUVPP)) {
+ IA_CSS_LEAVE_ERR_PRIVATE(-EINVAL);
+ return -EINVAL;
+ }
+
+ yuvpp_pipe_input_mode = pipe->stream->config.mode;
+
+ sh_css_metrics_start_frame();
+
+ /* multi stream video needs mipi buffers */
+
+ err = send_mipi_frames(pipe);
+ if (err) {
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+ }
+
+ ia_css_pipeline_get_sp_thread_id(ia_css_pipe_get_pipe_num(pipe), &thread_id);
+ copy_ovrd = 1 << thread_id;
+
+ start_pipe(pipe, copy_ovrd, yuvpp_pipe_input_mode);
+
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+}
+
+static int
+sh_css_pipe_unload_binaries(struct ia_css_pipe *pipe)
+{
+ int err = 0;
+
+ IA_CSS_ENTER_PRIVATE("pipe = %p", pipe);
+
+ if (!pipe) {
+ IA_CSS_LEAVE_ERR_PRIVATE(-EINVAL);
+ return -EINVAL;
+ }
+ /* PIPE_MODE_COPY has no binaries, but has output frames to outside*/
+ if (pipe->config.mode == IA_CSS_PIPE_MODE_COPY) {
+ IA_CSS_LEAVE_ERR_PRIVATE(0);
+ return 0;
+ }
+
+ switch (pipe->mode) {
+ case IA_CSS_PIPE_ID_PREVIEW:
+ err = unload_preview_binaries(pipe);
+ break;
+ case IA_CSS_PIPE_ID_VIDEO:
+ err = unload_video_binaries(pipe);
+ break;
+ case IA_CSS_PIPE_ID_CAPTURE:
+ err = unload_capture_binaries(pipe);
+ break;
+ case IA_CSS_PIPE_ID_YUVPP:
+ err = unload_yuvpp_binaries(pipe);
+ break;
+ default:
+ break;
+ }
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+}
+
+static int
+sh_css_pipe_load_binaries(struct ia_css_pipe *pipe)
+{
+ int err = 0;
+
+ assert(pipe);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "sh_css_pipe_load_binaries() enter:\n");
+
+ /* PIPE_MODE_COPY has no binaries, but has output frames to outside*/
+ if (pipe->config.mode == IA_CSS_PIPE_MODE_COPY)
+ return err;
+
+ switch (pipe->mode) {
+ case IA_CSS_PIPE_ID_PREVIEW:
+ err = load_preview_binaries(pipe);
+ break;
+ case IA_CSS_PIPE_ID_VIDEO:
+ err = load_video_binaries(pipe);
+ break;
+ case IA_CSS_PIPE_ID_CAPTURE:
+ err = load_capture_binaries(pipe);
+ break;
+ case IA_CSS_PIPE_ID_YUVPP:
+ err = load_yuvpp_binaries(pipe);
+ break;
+ default:
+ err = -EINVAL;
+ break;
+ }
+ if (err) {
+ if (sh_css_pipe_unload_binaries(pipe)) {
+ /*
+ * currently css does not support multiple error
+ * returns in a single function, using -EINVAL in
+ * this case
+ */
+ err = -EINVAL;
+ }
+ }
+ return err;
+}
+
+static int
+create_host_yuvpp_pipeline(struct ia_css_pipe *pipe)
+{
+ struct ia_css_pipeline *me;
+ int err = 0;
+ struct ia_css_pipeline_stage *vf_pp_stage = NULL,
+ *copy_stage = NULL,
+ *yuv_scaler_stage = NULL;
+ struct ia_css_binary *copy_binary,
+ *vf_pp_binary,
+ *yuv_scaler_binary;
+ bool need_scaler = false;
+ unsigned int num_stage, num_output_stage;
+ unsigned int i, j;
+
+ struct ia_css_frame *in_frame = NULL;
+ struct ia_css_frame *out_frame[IA_CSS_PIPE_MAX_OUTPUT_STAGE];
+ struct ia_css_frame *bin_out_frame[IA_CSS_BINARY_MAX_OUTPUT_PORTS];
+ struct ia_css_frame *vf_frame[IA_CSS_PIPE_MAX_OUTPUT_STAGE];
+ struct ia_css_pipeline_stage_desc stage_desc;
+ bool need_in_frameinfo_memory = false;
+ bool sensor = false;
+ bool buffered_sensor = false;
+ bool online = false;
+ bool continuous = false;
+
+ IA_CSS_ENTER_PRIVATE("pipe = %p", pipe);
+ if ((!pipe) || (!pipe->stream) || (pipe->mode != IA_CSS_PIPE_ID_YUVPP)) {
+ IA_CSS_LEAVE_ERR_PRIVATE(-EINVAL);
+ return -EINVAL;
+ }
+ me = &pipe->pipeline;
+ ia_css_pipeline_clean(me);
+ for (i = 0; i < IA_CSS_PIPE_MAX_OUTPUT_STAGE; i++) {
+ out_frame[i] = NULL;
+ vf_frame[i] = NULL;
+ }
+ ia_css_pipe_util_create_output_frames(bin_out_frame);
+ num_stage = pipe->pipe_settings.yuvpp.num_yuv_scaler;
+ num_output_stage = pipe->pipe_settings.yuvpp.num_output;
+
+ if (IS_ISP2401) {
+ /*
+ * When the input system is 2401, always enable 'in_frameinfo_memory'
+ * except for the following:
+ * - Direct Sensor Mode Online Capture
+ * - Direct Sensor Mode Continuous Capture
+ * - Buffered Sensor Mode Continuous Capture
+ */
+ sensor = pipe->stream->config.mode == IA_CSS_INPUT_MODE_SENSOR;
+ buffered_sensor = pipe->stream->config.mode == IA_CSS_INPUT_MODE_BUFFERED_SENSOR;
+ online = pipe->stream->config.online;
+ continuous = pipe->stream->config.continuous;
+ need_in_frameinfo_memory =
+ !((sensor && (online || continuous)) || (buffered_sensor && continuous));
+ } else {
+ /* Construct in_frame info (only in case we have dynamic input */
+ need_in_frameinfo_memory = pipe->stream->config.mode == IA_CSS_INPUT_MODE_MEMORY;
+ }
+ /*
+ * the input frame can come from:
+ *
+ * a) memory: connect yuvscaler to me->in_frame
+ * b) sensor, via copy binary: connect yuvscaler to copy binary later
+ * on
+ */
+ if (need_in_frameinfo_memory) {
+ /* TODO: improve for different input formats. */
+
+ /*
+ * "pipe->stream->config.input_config.format" represents the sensor output
+ * frame format, e.g. YUV422 8-bit.
+ *
+ * "in_frame_format" represents the imaging pipe's input frame format, e.g.
+ * Bayer-Quad RAW.
+ */
+ int in_frame_format;
+
+ if (pipe->stream->config.input_config.format ==
+ ATOMISP_INPUT_FORMAT_YUV420_8_LEGACY) {
+ in_frame_format = IA_CSS_FRAME_FORMAT_CSI_MIPI_LEGACY_YUV420_8;
+ } else if (pipe->stream->config.input_config.format ==
+ ATOMISP_INPUT_FORMAT_YUV422_8) {
+ /*
+ * When the sensor output frame format is "ATOMISP_INPUT_FORMAT_YUV422_8",
+ * the "isp_copy_var" binary is selected as the first stage in the yuvpp
+ * pipe.
+ *
+ * For the "isp_copy_var" binary, it reads the YUV422-8 pixels from
+ * the frame buffer (at DDR) to the frame-line buffer (at VMEM).
+ *
+ * By now, the "isp_copy_var" binary does NOT provide a separated
+ * frame-line buffer to store the YUV422-8 pixels. Instead, it stores
+ * the YUV422-8 pixels in the frame-line buffer which is designed to
+ * store the Bayer-Quad RAW pixels.
+ *
+ * To direct the "isp_copy_var" binary reading from the RAW frame-line
+ * buffer, its input frame format must be specified as "IA_CSS_FRAME_
+ * FORMAT_RAW".
+ */
+ in_frame_format = IA_CSS_FRAME_FORMAT_RAW;
+ } else {
+ in_frame_format = IA_CSS_FRAME_FORMAT_NV12;
+ }
+
+ err = init_in_frameinfo_memory_defaults(pipe,
+ &me->in_frame,
+ in_frame_format);
+
+ if (err) {
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+ }
+
+ in_frame = &me->in_frame;
+ } else {
+ in_frame = NULL;
+ }
+
+ for (i = 0; i < num_output_stage; i++) {
+ assert(i < IA_CSS_PIPE_MAX_OUTPUT_STAGE);
+ if (pipe->output_info[i].res.width != 0) {
+ err = init_out_frameinfo_defaults(pipe, &me->out_frame[i], i);
+ if (err) {
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+ }
+ out_frame[i] = &me->out_frame[i];
+ }
+
+ /* Construct vf_frame info (only in case we have VF) */
+ if (pipe->vf_output_info[i].res.width != 0) {
+ err = init_vf_frameinfo_defaults(pipe, &me->vf_frame[i], i);
+ if (err) {
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+ }
+ vf_frame[i] = &me->vf_frame[i];
+ }
+ }
+
+ copy_binary = &pipe->pipe_settings.yuvpp.copy_binary;
+ vf_pp_binary = pipe->pipe_settings.yuvpp.vf_pp_binary;
+ yuv_scaler_binary = pipe->pipe_settings.yuvpp.yuv_scaler_binary;
+ need_scaler = need_yuv_scaler_stage(pipe);
+
+ if (pipe->pipe_settings.yuvpp.copy_binary.info) {
+ struct ia_css_frame *in_frame_local = NULL;
+
+ if (IS_ISP2401 && !online) {
+ /* After isp copy is enabled in_frame needs to be passed. */
+ in_frame_local = in_frame;
+ }
+
+ if (need_scaler) {
+ ia_css_pipe_util_set_output_frames(bin_out_frame,
+ 0, NULL);
+ ia_css_pipe_get_generic_stage_desc(&stage_desc,
+ copy_binary,
+ bin_out_frame,
+ in_frame_local,
+ NULL);
+ } else {
+ ia_css_pipe_util_set_output_frames(bin_out_frame,
+ 0, out_frame[0]);
+ ia_css_pipe_get_generic_stage_desc(&stage_desc,
+ copy_binary,
+ bin_out_frame,
+ in_frame_local,
+ NULL);
+ }
+
+ err = ia_css_pipeline_create_and_add_stage(me,
+ &stage_desc,
+ &copy_stage);
+
+ if (err) {
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+ }
+
+ if (copy_stage) {
+ /* if we use yuv scaler binary, vf output should be from there */
+ copy_stage->args.copy_vf = !need_scaler;
+ /* for yuvpp pipe, it should always be enabled */
+ copy_stage->args.copy_output = true;
+ /* connect output of copy binary to input of yuv scaler */
+ in_frame = copy_stage->args.out_frame[0];
+ }
+ }
+
+ if (need_scaler) {
+ struct ia_css_frame *tmp_out_frame = NULL;
+ struct ia_css_frame *tmp_vf_frame = NULL;
+ struct ia_css_frame *tmp_in_frame = in_frame;
+
+ for (i = 0, j = 0; i < num_stage; i++) {
+ assert(j < num_output_stage);
+ if (pipe->pipe_settings.yuvpp.is_output_stage[i]) {
+ tmp_out_frame = out_frame[j];
+ tmp_vf_frame = vf_frame[j];
+ } else {
+ tmp_out_frame = NULL;
+ tmp_vf_frame = NULL;
+ }
+
+ err = add_yuv_scaler_stage(pipe, me, tmp_in_frame,
+ tmp_out_frame,
+ NULL,
+ &yuv_scaler_binary[i],
+ &yuv_scaler_stage);
+
+ if (err) {
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+ }
+ /* we use output port 1 as internal output port */
+ tmp_in_frame = yuv_scaler_stage->args.out_frame[1];
+ if (pipe->pipe_settings.yuvpp.is_output_stage[i]) {
+ if (tmp_vf_frame && (tmp_vf_frame->frame_info.res.width != 0)) {
+ in_frame = yuv_scaler_stage->args.out_vf_frame;
+ err = add_vf_pp_stage(pipe, in_frame,
+ tmp_vf_frame,
+ &vf_pp_binary[j],
+ &vf_pp_stage);
+
+ if (err) {
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+ }
+ }
+ j++;
+ }
+ }
+ } else if (copy_stage) {
+ if (vf_frame[0] && vf_frame[0]->frame_info.res.width != 0) {
+ in_frame = copy_stage->args.out_vf_frame;
+ err = add_vf_pp_stage(pipe, in_frame, vf_frame[0],
+ &vf_pp_binary[0], &vf_pp_stage);
+ }
+ if (err) {
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+ }
+ }
+
+ ia_css_pipeline_finalize_stages(&pipe->pipeline,
+ pipe->stream->config.continuous);
+
+ IA_CSS_LEAVE_ERR_PRIVATE(0);
+
+ return 0;
+}
+
+static int
+create_host_copy_pipeline(struct ia_css_pipe *pipe,
+ unsigned int max_input_width,
+ struct ia_css_frame *out_frame)
+{
+ struct ia_css_pipeline *me;
+ int err = 0;
+ struct ia_css_pipeline_stage_desc stage_desc;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "create_host_copy_pipeline() enter:\n");
+
+ /* pipeline already created as part of create_host_pipeline_structure */
+ me = &pipe->pipeline;
+ ia_css_pipeline_clean(me);
+
+ /* Construct out_frame info */
+ if (copy_on_sp(pipe) &&
+ pipe->stream->config.input_config.format == ATOMISP_INPUT_FORMAT_BINARY_8) {
+ ia_css_frame_info_init(&out_frame->frame_info, JPEG_BYTES, 1,
+ IA_CSS_FRAME_FORMAT_BINARY_8, 0);
+ } else if (out_frame->frame_info.format == IA_CSS_FRAME_FORMAT_RAW) {
+ out_frame->frame_info.raw_bit_depth =
+ ia_css_pipe_util_pipe_input_format_bpp(pipe);
+ }
+
+ me->num_stages = 1;
+ me->pipe_id = IA_CSS_PIPE_ID_COPY;
+ pipe->mode = IA_CSS_PIPE_ID_COPY;
+
+ ia_css_pipe_get_sp_func_stage_desc(&stage_desc, out_frame,
+ IA_CSS_PIPELINE_RAW_COPY,
+ max_input_width);
+ err = ia_css_pipeline_create_and_add_stage(me, &stage_desc, NULL);
+
+ ia_css_pipeline_finalize_stages(&pipe->pipeline,
+ pipe->stream->config.continuous);
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "create_host_copy_pipeline() leave:\n");
+
+ return err;
+}
+
+static int
+create_host_isyscopy_capture_pipeline(struct ia_css_pipe *pipe)
+{
+ struct ia_css_pipeline *me = &pipe->pipeline;
+ int err = 0;
+ struct ia_css_pipeline_stage_desc stage_desc;
+ struct ia_css_frame *out_frame = &me->out_frame[0];
+ struct ia_css_pipeline_stage *out_stage = NULL;
+ unsigned int thread_id;
+ enum sh_css_queue_id queue_id;
+ unsigned int max_input_width = MAX_VECTORS_PER_INPUT_LINE_CONT * ISP_VEC_NELEMS;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "create_host_isyscopy_capture_pipeline() enter:\n");
+ ia_css_pipeline_clean(me);
+
+ /* Construct out_frame info */
+ err = sh_css_pipe_get_output_frame_info(pipe, &out_frame->frame_info, 0);
+ if (err)
+ return err;
+ ia_css_pipeline_get_sp_thread_id(ia_css_pipe_get_pipe_num(pipe), &thread_id);
+ ia_css_query_internal_queue_id(IA_CSS_BUFFER_TYPE_OUTPUT_FRAME, thread_id, &queue_id);
+ out_frame->dynamic_queue_id = queue_id;
+ out_frame->buf_type = IA_CSS_BUFFER_TYPE_OUTPUT_FRAME;
+
+ me->num_stages = 1;
+ me->pipe_id = IA_CSS_PIPE_ID_CAPTURE;
+ pipe->mode = IA_CSS_PIPE_ID_CAPTURE;
+ ia_css_pipe_get_sp_func_stage_desc(&stage_desc, out_frame,
+ IA_CSS_PIPELINE_ISYS_COPY,
+ max_input_width);
+ err = ia_css_pipeline_create_and_add_stage(me,
+ &stage_desc, &out_stage);
+ if (err)
+ return err;
+
+ ia_css_pipeline_finalize_stages(me, pipe->stream->config.continuous);
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "create_host_isyscopy_capture_pipeline() leave:\n");
+
+ return err;
+}
+
+static int
+create_host_regular_capture_pipeline(struct ia_css_pipe *pipe)
+{
+ struct ia_css_pipeline *me;
+ int err = 0;
+ enum ia_css_capture_mode mode;
+ struct ia_css_pipeline_stage *current_stage = NULL;
+ struct ia_css_pipeline_stage *yuv_scaler_stage = NULL;
+ struct ia_css_binary *copy_binary,
+ *primary_binary[MAX_NUM_PRIMARY_STAGES],
+ *vf_pp_binary,
+ *pre_isp_binary,
+ *anr_gdc_binary,
+ *post_isp_binary,
+ *yuv_scaler_binary,
+ *capture_pp_binary,
+ *capture_ldc_binary;
+ bool need_pp = false;
+ bool raw;
+
+ struct ia_css_frame *in_frame;
+ struct ia_css_frame *out_frame;
+ struct ia_css_frame *out_frames[IA_CSS_BINARY_MAX_OUTPUT_PORTS];
+ struct ia_css_frame *vf_frame;
+ struct ia_css_pipeline_stage_desc stage_desc;
+ bool need_in_frameinfo_memory = false;
+ bool sensor = false;
+ bool buffered_sensor = false;
+ bool online = false;
+ bool continuous = false;
+ unsigned int i, num_yuv_scaler, num_primary_stage;
+ bool need_yuv_pp = false;
+ bool *is_output_stage = NULL;
+ bool need_ldc = false;
+
+ IA_CSS_ENTER_PRIVATE("");
+ assert(pipe);
+ assert(pipe->stream);
+ assert(pipe->mode == IA_CSS_PIPE_ID_CAPTURE ||
+ pipe->mode == IA_CSS_PIPE_ID_COPY);
+
+ me = &pipe->pipeline;
+ mode = pipe->config.default_capture_config.mode;
+ raw = (mode == IA_CSS_CAPTURE_MODE_RAW);
+ ia_css_pipeline_clean(me);
+ ia_css_pipe_util_create_output_frames(out_frames);
+
+ if (IS_ISP2401) {
+ /*
+ * When the input system is 2401, always enable 'in_frameinfo_memory'
+ * except for the following:
+ * - Direct Sensor Mode Online Capture
+ * - Direct Sensor Mode Online Capture
+ * - Direct Sensor Mode Continuous Capture
+ * - Buffered Sensor Mode Continuous Capture
+ */
+ sensor = (pipe->stream->config.mode == IA_CSS_INPUT_MODE_SENSOR);
+ buffered_sensor = (pipe->stream->config.mode == IA_CSS_INPUT_MODE_BUFFERED_SENSOR);
+ online = pipe->stream->config.online;
+ continuous = pipe->stream->config.continuous;
+ need_in_frameinfo_memory =
+ !((sensor && (online || continuous)) || (buffered_sensor &&
+ (online || continuous)));
+ } else {
+ /* Construct in_frame info (only in case we have dynamic input */
+ need_in_frameinfo_memory = pipe->stream->config.mode == IA_CSS_INPUT_MODE_MEMORY;
+ }
+
+ if (need_in_frameinfo_memory) {
+ err = init_in_frameinfo_memory_defaults(pipe, &me->in_frame,
+ IA_CSS_FRAME_FORMAT_RAW);
+ if (err) {
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+ }
+
+ in_frame = &me->in_frame;
+ } else {
+ in_frame = NULL;
+ }
+
+ err = init_out_frameinfo_defaults(pipe, &me->out_frame[0], 0);
+ if (err) {
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+ }
+ out_frame = &me->out_frame[0];
+
+ /* Construct vf_frame info (only in case we have VF) */
+ if (pipe->enable_viewfinder[IA_CSS_PIPE_OUTPUT_STAGE_0]) {
+ if (mode == IA_CSS_CAPTURE_MODE_RAW || mode == IA_CSS_CAPTURE_MODE_BAYER) {
+ /* These modes don't support viewfinder output */
+ vf_frame = NULL;
+ } else {
+ init_vf_frameinfo_defaults(pipe, &me->vf_frame[0], 0);
+ vf_frame = &me->vf_frame[0];
+ }
+ } else {
+ vf_frame = NULL;
+ }
+
+ copy_binary = &pipe->pipe_settings.capture.copy_binary;
+ num_primary_stage = pipe->pipe_settings.capture.num_primary_stage;
+ if ((num_primary_stage == 0) && (mode == IA_CSS_CAPTURE_MODE_PRIMARY)) {
+ IA_CSS_LEAVE_ERR_PRIVATE(-EINVAL);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < num_primary_stage; i++)
+ primary_binary[i] = &pipe->pipe_settings.capture.primary_binary[i];
+
+ vf_pp_binary = &pipe->pipe_settings.capture.vf_pp_binary;
+ pre_isp_binary = &pipe->pipe_settings.capture.pre_isp_binary;
+ anr_gdc_binary = &pipe->pipe_settings.capture.anr_gdc_binary;
+ post_isp_binary = &pipe->pipe_settings.capture.post_isp_binary;
+ capture_pp_binary = &pipe->pipe_settings.capture.capture_pp_binary;
+ yuv_scaler_binary = pipe->pipe_settings.capture.yuv_scaler_binary;
+ num_yuv_scaler = pipe->pipe_settings.capture.num_yuv_scaler;
+ is_output_stage = pipe->pipe_settings.capture.is_output_stage;
+ capture_ldc_binary = &pipe->pipe_settings.capture.capture_ldc_binary;
+
+ need_pp = (need_capture_pp(pipe) || pipe->output_stage) &&
+ mode != IA_CSS_CAPTURE_MODE_RAW &&
+ mode != IA_CSS_CAPTURE_MODE_BAYER;
+ need_yuv_pp = (yuv_scaler_binary && yuv_scaler_binary->info);
+ need_ldc = (capture_ldc_binary && capture_ldc_binary->info);
+
+ if (pipe->pipe_settings.capture.copy_binary.info) {
+ if (raw) {
+ ia_css_pipe_util_set_output_frames(out_frames, 0, out_frame);
+ if (IS_ISP2401) {
+ if (!continuous) {
+ ia_css_pipe_get_generic_stage_desc(&stage_desc,
+ copy_binary,
+ out_frames,
+ in_frame,
+ NULL);
+ } else {
+ in_frame = pipe->stream->last_pipe->continuous_frames[0];
+ ia_css_pipe_get_generic_stage_desc(&stage_desc,
+ copy_binary,
+ out_frames,
+ in_frame,
+ NULL);
+ }
+ } else {
+ ia_css_pipe_get_generic_stage_desc(&stage_desc,
+ copy_binary,
+ out_frames,
+ NULL, NULL);
+ }
+ } else {
+ ia_css_pipe_util_set_output_frames(out_frames, 0,
+ in_frame);
+ ia_css_pipe_get_generic_stage_desc(&stage_desc,
+ copy_binary,
+ out_frames,
+ NULL, NULL);
+ }
+
+ err = ia_css_pipeline_create_and_add_stage(me,
+ &stage_desc,
+ &current_stage);
+ if (err) {
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+ }
+ } else if (pipe->stream->config.continuous) {
+ in_frame = pipe->stream->last_pipe->continuous_frames[0];
+ }
+
+ if (mode == IA_CSS_CAPTURE_MODE_PRIMARY) {
+ struct ia_css_frame *local_in_frame = NULL;
+ struct ia_css_frame *local_out_frame = NULL;
+
+ for (i = 0; i < num_primary_stage; i++) {
+ if (i == 0)
+ local_in_frame = in_frame;
+ else
+ local_in_frame = NULL;
+ if (!need_pp && (i == num_primary_stage - 1) && (!IS_ISP2401 || !need_ldc))
+ local_out_frame = out_frame;
+ else
+ local_out_frame = NULL;
+ ia_css_pipe_util_set_output_frames(out_frames, 0, local_out_frame);
+ /*
+ * WARNING: The #if def flag has been added below as a
+ * temporary solution to solve the problem of enabling the
+ * view finder in a single binary in a capture flow. The
+ * vf-pp stage has been removed from Skycam in the solution
+ * provided. The vf-pp stage should be re-introduced when
+ * required. This * should not be considered as a clean solution.
+ * Proper investigation should be done to come up with the clean
+ * solution.
+ */
+ ia_css_pipe_get_generic_stage_desc(&stage_desc,
+ primary_binary[i],
+ out_frames,
+ local_in_frame,
+ NULL);
+ err = ia_css_pipeline_create_and_add_stage(me,
+ &stage_desc,
+ &current_stage);
+ if (err) {
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+ }
+ }
+ /* If we use copy iso primary, the input must be yuv iso raw */
+ current_stage->args.copy_vf =
+ primary_binary[0]->info->sp.pipeline.mode ==
+ IA_CSS_BINARY_MODE_COPY;
+ current_stage->args.copy_output = current_stage->args.copy_vf;
+ } else if (mode == IA_CSS_CAPTURE_MODE_ADVANCED ||
+ mode == IA_CSS_CAPTURE_MODE_LOW_LIGHT) {
+ ia_css_pipe_util_set_output_frames(out_frames, 0, NULL);
+ ia_css_pipe_get_generic_stage_desc(&stage_desc, pre_isp_binary,
+ out_frames, in_frame, NULL);
+ err = ia_css_pipeline_create_and_add_stage(me, &stage_desc,
+ NULL);
+ if (err) {
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+ }
+ ia_css_pipe_util_set_output_frames(out_frames, 0, NULL);
+ ia_css_pipe_get_generic_stage_desc(&stage_desc, anr_gdc_binary,
+ out_frames, NULL, NULL);
+ err = ia_css_pipeline_create_and_add_stage(me, &stage_desc,
+ NULL);
+ if (err) {
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+ }
+
+ if (need_pp) {
+ ia_css_pipe_util_set_output_frames(out_frames, 0, NULL);
+ ia_css_pipe_get_generic_stage_desc(&stage_desc,
+ post_isp_binary,
+ out_frames,
+ NULL, NULL);
+ } else {
+ ia_css_pipe_util_set_output_frames(out_frames, 0,
+ out_frame);
+ ia_css_pipe_get_generic_stage_desc(&stage_desc,
+ post_isp_binary,
+ out_frames,
+ NULL, NULL);
+ }
+
+ err = ia_css_pipeline_create_and_add_stage(me, &stage_desc,
+ &current_stage);
+ if (err) {
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+ }
+ } else if (mode == IA_CSS_CAPTURE_MODE_BAYER) {
+ ia_css_pipe_util_set_output_frames(out_frames, 0, out_frame);
+ ia_css_pipe_get_generic_stage_desc(&stage_desc, pre_isp_binary,
+ out_frames, in_frame, NULL);
+ err = ia_css_pipeline_create_and_add_stage(me, &stage_desc,
+ NULL);
+ if (err) {
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+ }
+ }
+
+ if (need_pp && current_stage) {
+ struct ia_css_frame *local_in_frame = NULL;
+
+ local_in_frame = current_stage->args.out_frame[0];
+
+ if (need_ldc) {
+ ia_css_pipe_util_set_output_frames(out_frames, 0, NULL);
+ ia_css_pipe_get_generic_stage_desc(&stage_desc,
+ capture_ldc_binary,
+ out_frames,
+ local_in_frame,
+ NULL);
+ err = ia_css_pipeline_create_and_add_stage(me,
+ &stage_desc,
+ &current_stage);
+ local_in_frame = current_stage->args.out_frame[0];
+ }
+ err = add_capture_pp_stage(pipe, me, local_in_frame,
+ need_yuv_pp ? NULL : out_frame,
+ capture_pp_binary,
+ &current_stage);
+ if (err) {
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+ }
+ }
+
+ if (need_yuv_pp && current_stage) {
+ struct ia_css_frame *tmp_in_frame = current_stage->args.out_frame[0];
+ struct ia_css_frame *tmp_out_frame = NULL;
+
+ for (i = 0; i < num_yuv_scaler; i++) {
+ if (is_output_stage[i])
+ tmp_out_frame = out_frame;
+ else
+ tmp_out_frame = NULL;
+
+ err = add_yuv_scaler_stage(pipe, me, tmp_in_frame,
+ tmp_out_frame, NULL,
+ &yuv_scaler_binary[i],
+ &yuv_scaler_stage);
+ if (err) {
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+ }
+ /* we use output port 1 as internal output port */
+ tmp_in_frame = yuv_scaler_stage->args.out_frame[1];
+ }
+ }
+
+ /*
+ * WARNING: The #if def flag has been added below as a
+ * temporary solution to solve the problem of enabling the
+ * view finder in a single binary in a capture flow. The vf-pp
+ * stage has been removed from Skycam in the solution provided.
+ * The vf-pp stage should be re-introduced when required. This
+ * should not be considered as a clean solution. Proper
+ * investigation should be done to come up with the clean solution.
+ */
+ if (mode != IA_CSS_CAPTURE_MODE_RAW &&
+ mode != IA_CSS_CAPTURE_MODE_BAYER &&
+ current_stage && vf_frame) {
+ in_frame = current_stage->args.out_vf_frame;
+ err = add_vf_pp_stage(pipe, in_frame, vf_frame, vf_pp_binary,
+ &current_stage);
+ if (err) {
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+ }
+ }
+ ia_css_pipeline_finalize_stages(&pipe->pipeline, pipe->stream->config.continuous);
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "create_host_regular_capture_pipeline() leave:\n");
+
+ return 0;
+}
+
+static int
+create_host_capture_pipeline(struct ia_css_pipe *pipe)
+{
+ int err = 0;
+
+ IA_CSS_ENTER_PRIVATE("pipe = %p", pipe);
+
+ if (pipe->config.mode == IA_CSS_PIPE_MODE_COPY)
+ err = create_host_isyscopy_capture_pipeline(pipe);
+ else
+ err = create_host_regular_capture_pipeline(pipe);
+ if (err) {
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+ }
+
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+
+ return err;
+}
+
+static int capture_start(struct ia_css_pipe *pipe)
+{
+ struct ia_css_pipeline *me;
+ unsigned int thread_id;
+
+ int err = 0;
+ enum sh_css_pipe_config_override copy_ovrd;
+
+ IA_CSS_ENTER_PRIVATE("pipe = %p", pipe);
+ if (!pipe) {
+ IA_CSS_LEAVE_ERR_PRIVATE(-EINVAL);
+ return -EINVAL;
+ }
+
+ me = &pipe->pipeline;
+
+ if ((pipe->config.default_capture_config.mode == IA_CSS_CAPTURE_MODE_RAW ||
+ pipe->config.default_capture_config.mode == IA_CSS_CAPTURE_MODE_BAYER) &&
+ (pipe->config.mode != IA_CSS_PIPE_MODE_COPY)) {
+ if (copy_on_sp(pipe)) {
+ err = start_copy_on_sp(pipe, &me->out_frame[0]);
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+ }
+ }
+ /* old isys: need to send_mipi_frames() in all pipe modes */
+ if (!IS_ISP2401 || pipe->config.mode != IA_CSS_PIPE_MODE_COPY) {
+ err = send_mipi_frames(pipe);
+ if (err) {
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+ }
+ }
+
+ ia_css_pipeline_get_sp_thread_id(ia_css_pipe_get_pipe_num(pipe), &thread_id);
+ copy_ovrd = 1 << thread_id;
+
+ start_pipe(pipe, copy_ovrd, pipe->stream->config.mode);
+
+ /*
+ * old isys: for IA_CSS_PIPE_MODE_COPY pipe, isys rx has to be configured,
+ * which is currently done in start_binary(); but COPY pipe contains no binary,
+ * and does not call start_binary(); so we need to configure the rx here.
+ */
+ if (!IS_ISP2401 &&
+ pipe->config.mode == IA_CSS_PIPE_MODE_COPY &&
+ pipe->stream->reconfigure_css_rx) {
+ ia_css_isys_rx_configure(&pipe->stream->csi_rx_config,
+ pipe->stream->config.mode);
+ pipe->stream->reconfigure_css_rx = false;
+ }
+
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+}
+
+static int
+sh_css_pipe_get_output_frame_info(struct ia_css_pipe *pipe,
+ struct ia_css_frame_info *info,
+ unsigned int idx)
+{
+ assert(pipe);
+ assert(info);
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "sh_css_pipe_get_output_frame_info() enter:\n");
+
+ *info = pipe->output_info[idx];
+ if (copy_on_sp(pipe) &&
+ pipe->stream->config.input_config.format == ATOMISP_INPUT_FORMAT_BINARY_8) {
+ ia_css_frame_info_init(
+ info,
+ JPEG_BYTES,
+ 1,
+ IA_CSS_FRAME_FORMAT_BINARY_8,
+ 0);
+ } else if (info->format == IA_CSS_FRAME_FORMAT_RAW ||
+ info->format == IA_CSS_FRAME_FORMAT_RAW_PACKED) {
+ info->raw_bit_depth =
+ ia_css_pipe_util_pipe_input_format_bpp(pipe);
+ }
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "sh_css_pipe_get_output_frame_info() leave:\n");
+ return 0;
+}
+
+void
+ia_css_stream_send_input_frame(const struct ia_css_stream *stream,
+ const unsigned short *data,
+ unsigned int width,
+ unsigned int height)
+{
+ assert(stream);
+
+ ia_css_inputfifo_send_input_frame(
+ data, width, height,
+ stream->config.channel_id,
+ stream->config.input_config.format,
+ stream->config.pixels_per_clock == 2);
+}
+
+void
+ia_css_stream_start_input_frame(const struct ia_css_stream *stream)
+{
+ assert(stream);
+
+ ia_css_inputfifo_start_frame(
+ stream->config.channel_id,
+ stream->config.input_config.format,
+ stream->config.pixels_per_clock == 2);
+}
+
+void
+ia_css_stream_send_input_line(const struct ia_css_stream *stream,
+ const unsigned short *data,
+ unsigned int width,
+ const unsigned short *data2,
+ unsigned int width2)
+{
+ assert(stream);
+
+ ia_css_inputfifo_send_line(stream->config.channel_id,
+ data, width, data2, width2);
+}
+
+void
+ia_css_stream_send_input_embedded_line(const struct ia_css_stream *stream,
+ enum atomisp_input_format format,
+ const unsigned short *data,
+ unsigned int width)
+{
+ assert(stream);
+ if (!data || width == 0)
+ return;
+ ia_css_inputfifo_send_embedded_line(stream->config.channel_id,
+ format, data, width);
+}
+
+void
+ia_css_stream_end_input_frame(const struct ia_css_stream *stream)
+{
+ assert(stream);
+
+ ia_css_inputfifo_end_frame(stream->config.channel_id);
+}
+
+bool
+ia_css_pipeline_uses_params(struct ia_css_pipeline *me)
+{
+ struct ia_css_pipeline_stage *stage;
+
+ assert(me);
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_pipeline_uses_params() enter: me=%p\n", me);
+
+ for (stage = me->stages; stage; stage = stage->next)
+ if (stage->binary_info && stage->binary_info->enable.params) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_pipeline_uses_params() leave: return_bool=true\n");
+ return true;
+ }
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_pipeline_uses_params() leave: return_bool=false\n");
+ return false;
+}
+
+/*
+ * @brief Tag a specific frame in continuous capture.
+ * Refer to "sh_css_internal.h" for details.
+ */
+int ia_css_stream_capture_frame(struct ia_css_stream *stream,
+ unsigned int exp_id)
+{
+ struct sh_css_tag_descr tag_descr;
+ u32 encoded_tag_descr;
+ int err;
+
+ assert(stream);
+ IA_CSS_ENTER("exp_id=%d", exp_id);
+
+ /* Only continuous streams have a tagger */
+ if (exp_id == 0 || !stream->config.continuous) {
+ IA_CSS_LEAVE_ERR(-EINVAL);
+ return -EINVAL;
+ }
+
+ if (!sh_css_sp_is_running()) {
+ /* SP is not running. The queues are not valid */
+ IA_CSS_LEAVE_ERR(-EBUSY);
+ return -EBUSY;
+ }
+
+ /* Create the tag descriptor from the parameters */
+ sh_css_create_tag_descr(0, 0, 0, exp_id, &tag_descr);
+ /* Encode the tag descriptor into a 32-bit value */
+ encoded_tag_descr = sh_css_encode_tag_descr(&tag_descr);
+ /*
+ * Enqueue the encoded tag to the host2sp queue.
+ * Note: The pipe and stage IDs for tag_cmd queue are hard-coded to 0
+ * on both host and the SP side.
+ * It is mainly because it is enough to have only one tag_cmd queue
+ */
+ err = ia_css_bufq_enqueue_tag_cmd(encoded_tag_descr);
+
+ IA_CSS_LEAVE_ERR(err);
+ return err;
+}
+
+/*
+ * @brief Configure the continuous capture.
+ * Refer to "sh_css_internal.h" for details.
+ */
+int ia_css_stream_capture(struct ia_css_stream *stream, int num_captures,
+ unsigned int skip, int offset)
+{
+ struct sh_css_tag_descr tag_descr;
+ unsigned int encoded_tag_descr;
+ int return_err;
+
+ if (!stream)
+ return -EINVAL;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_stream_capture() enter: num_captures=%d, skip=%d, offset=%d\n",
+ num_captures, skip, offset);
+
+ /* Check if the tag descriptor is valid */
+ if (num_captures < SH_CSS_MINIMUM_TAG_ID) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_stream_capture() leave: return_err=%d\n",
+ -EINVAL);
+ return -EINVAL;
+ }
+
+ /* Create the tag descriptor from the parameters */
+ sh_css_create_tag_descr(num_captures, skip, offset, 0, &tag_descr);
+
+ /* Encode the tag descriptor into a 32-bit value */
+ encoded_tag_descr = sh_css_encode_tag_descr(&tag_descr);
+
+ if (!sh_css_sp_is_running()) {
+ /* SP is not running. The queues are not valid */
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_stream_capture() leaving:queues unavailable\n");
+ return -EBUSY;
+ }
+
+ /*
+ * Enqueue the encoded tag to the host2sp queue.
+ * Note: The pipe and stage IDs for tag_cmd queue are hard-coded to 0
+ * on both host and the SP side.
+ * It is mainly because it is enough to have only one tag_cmd queue
+ */
+ return_err = ia_css_bufq_enqueue_tag_cmd((uint32_t)encoded_tag_descr);
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_stream_capture() leave: return_err=%d\n",
+ return_err);
+
+ return return_err;
+}
+
+static void
+sh_css_init_host_sp_control_vars(void)
+{
+ const struct ia_css_fw_info *fw;
+ unsigned int HIVE_ADDR_ia_css_ispctrl_sp_isp_started;
+
+ unsigned int HIVE_ADDR_host_sp_queues_initialized;
+ unsigned int HIVE_ADDR_sp_sleep_mode;
+ unsigned int HIVE_ADDR_ia_css_dmaproxy_sp_invalidate_tlb;
+ unsigned int HIVE_ADDR_sp_stop_copy_preview;
+ unsigned int HIVE_ADDR_host_sp_com;
+ unsigned int o = offsetof(struct host_sp_communication, host2sp_command)
+ / sizeof(int);
+
+ unsigned int i;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "sh_css_init_host_sp_control_vars() enter: void\n");
+
+ fw = &sh_css_sp_fw;
+ HIVE_ADDR_ia_css_ispctrl_sp_isp_started = fw->info.sp.isp_started;
+
+ HIVE_ADDR_host_sp_queues_initialized =
+ fw->info.sp.host_sp_queues_initialized;
+ HIVE_ADDR_sp_sleep_mode = fw->info.sp.sleep_mode;
+ HIVE_ADDR_ia_css_dmaproxy_sp_invalidate_tlb = fw->info.sp.invalidate_tlb;
+ HIVE_ADDR_sp_stop_copy_preview = fw->info.sp.stop_copy_preview;
+ HIVE_ADDR_host_sp_com = fw->info.sp.host_sp_com;
+
+ sp_dmem_store_uint32(SP0_ID,
+ (unsigned int)sp_address_of(ia_css_ispctrl_sp_isp_started),
+ (uint32_t)(0));
+
+ sp_dmem_store_uint32(SP0_ID,
+ (unsigned int)sp_address_of(host_sp_queues_initialized),
+ (uint32_t)(0));
+ sp_dmem_store_uint32(SP0_ID,
+ (unsigned int)sp_address_of(sp_sleep_mode),
+ (uint32_t)(0));
+ sp_dmem_store_uint32(SP0_ID,
+ (unsigned int)sp_address_of(ia_css_dmaproxy_sp_invalidate_tlb),
+ (uint32_t)(false));
+ sp_dmem_store_uint32(SP0_ID,
+ (unsigned int)sp_address_of(sp_stop_copy_preview),
+ my_css.stop_copy_preview ? (uint32_t)(1) : (uint32_t)(0));
+ store_sp_array_uint(host_sp_com, o, host2sp_cmd_ready);
+
+ for (i = 0; i < N_CSI_PORTS; i++) {
+ sh_css_update_host2sp_num_mipi_frames
+ (my_css.num_mipi_frames[i]);
+ }
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "sh_css_init_host_sp_control_vars() leave: return_void\n");
+}
+
+/*
+ * create the internal structures and fill in the configuration data
+ */
+
+static const struct
+ia_css_pipe_config ia_css_pipe_default_config = DEFAULT_PIPE_CONFIG;
+
+void ia_css_pipe_config_defaults(struct ia_css_pipe_config *pipe_config)
+{
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_pipe_config_defaults()\n");
+ memcpy(pipe_config, &ia_css_pipe_default_config, sizeof(*pipe_config));
+}
+
+void
+ia_css_pipe_extra_config_defaults(struct ia_css_pipe_extra_config *extra_config)
+{
+ if (!extra_config) {
+ IA_CSS_ERROR("NULL input parameter");
+ return;
+ }
+
+ extra_config->enable_raw_binning = false;
+ extra_config->enable_yuv_ds = false;
+ extra_config->enable_high_speed = false;
+ extra_config->enable_dvs_6axis = false;
+ extra_config->enable_reduced_pipe = false;
+ extra_config->disable_vf_pp = false;
+ extra_config->enable_fractional_ds = false;
+}
+
+void ia_css_stream_config_defaults(struct ia_css_stream_config *stream_config)
+{
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_stream_config_defaults()\n");
+ assert(stream_config);
+ memset(stream_config, 0, sizeof(*stream_config));
+ stream_config->online = true;
+ stream_config->left_padding = -1;
+ stream_config->pixels_per_clock = 1;
+ /*
+ * temporary default value for backwards compatibility.
+ * This field used to be hardcoded within CSS but this has now
+ * been moved to the stream_config struct.
+ */
+ stream_config->source.port.rxcount = 0x04040404;
+}
+
+int ia_css_pipe_create(const struct ia_css_pipe_config *config,
+ struct ia_css_pipe **pipe)
+{
+ int err = 0;
+
+ IA_CSS_ENTER_PRIVATE("config = %p, pipe = %p", config, pipe);
+
+ if (!config || !pipe) {
+ IA_CSS_LEAVE_ERR_PRIVATE(-EINVAL);
+ return -EINVAL;
+ }
+
+ err = ia_css_pipe_create_extra(config, NULL, pipe);
+
+ if (err == 0)
+ IA_CSS_LOG("pipe created successfully = %p", *pipe);
+
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+
+ return err;
+}
+
+int
+ia_css_pipe_create_extra(const struct ia_css_pipe_config *config,
+ const struct ia_css_pipe_extra_config *extra_config,
+ struct ia_css_pipe **pipe)
+{
+ int err = -EINVAL;
+ struct ia_css_pipe *internal_pipe = NULL;
+ unsigned int i;
+
+ IA_CSS_ENTER_PRIVATE("config = %p, extra_config = %p and pipe = %p", config, extra_config, pipe);
+
+ /* do not allow to create more than the maximum limit */
+ if (my_css.pipe_counter >= IA_CSS_PIPELINE_NUM_MAX) {
+ IA_CSS_LEAVE_ERR_PRIVATE(-ENOSPC);
+ return -EINVAL;
+ }
+
+ if ((!pipe) || (!config)) {
+ IA_CSS_LEAVE_ERR_PRIVATE(-EINVAL);
+ return -EINVAL;
+ }
+
+ ia_css_debug_dump_pipe_config(config);
+ ia_css_debug_dump_pipe_extra_config(extra_config);
+
+ err = create_pipe(config->mode, &internal_pipe, false);
+ if (err) {
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+ }
+
+ /* now we have a pipe structure to fill */
+ internal_pipe->config = *config;
+ if (extra_config)
+ internal_pipe->extra_config = *extra_config;
+ else
+ ia_css_pipe_extra_config_defaults(&internal_pipe->extra_config);
+
+ /*
+ * Use config value when dvs_frame_delay setting equal to 2,
+ * otherwise always 1 by default
+ */
+ if (internal_pipe->config.dvs_frame_delay == IA_CSS_FRAME_DELAY_2)
+ internal_pipe->dvs_frame_delay = 2;
+ else
+ internal_pipe->dvs_frame_delay = 1;
+
+ /*
+ * we still keep enable_raw_binning for backward compatibility,
+ * for any new fractional bayer downscaling, we should use
+ * bayer_ds_out_res. if both are specified, bayer_ds_out_res will
+ * take precedence.if none is specified, we set bayer_ds_out_res
+ * equal to IF output resolution(IF may do cropping on sensor output)
+ * or use default decimation factor 1.
+ */
+
+ /* YUV downscaling */
+ if ((internal_pipe->config.vf_pp_in_res.width ||
+ internal_pipe->config.capt_pp_in_res.width)) {
+ enum ia_css_frame_format format;
+
+ if (internal_pipe->config.vf_pp_in_res.width) {
+ format = IA_CSS_FRAME_FORMAT_YUV_LINE;
+ ia_css_frame_info_init(
+ &internal_pipe->vf_yuv_ds_input_info,
+ internal_pipe->config.vf_pp_in_res.width,
+ internal_pipe->config.vf_pp_in_res.height,
+ format, 0);
+ }
+ if (internal_pipe->config.capt_pp_in_res.width) {
+ format = IA_CSS_FRAME_FORMAT_YUV420;
+ ia_css_frame_info_init(
+ &internal_pipe->out_yuv_ds_input_info,
+ internal_pipe->config.capt_pp_in_res.width,
+ internal_pipe->config.capt_pp_in_res.height,
+ format, 0);
+ }
+ }
+ if (internal_pipe->config.vf_pp_in_res.width &&
+ internal_pipe->config.mode == IA_CSS_PIPE_MODE_PREVIEW) {
+ ia_css_frame_info_init(
+ &internal_pipe->vf_yuv_ds_input_info,
+ internal_pipe->config.vf_pp_in_res.width,
+ internal_pipe->config.vf_pp_in_res.height,
+ IA_CSS_FRAME_FORMAT_YUV_LINE, 0);
+ }
+ /* handle bayer downscaling output info */
+ if (internal_pipe->config.bayer_ds_out_res.width) {
+ ia_css_frame_info_init(
+ &internal_pipe->bds_output_info,
+ internal_pipe->config.bayer_ds_out_res.width,
+ internal_pipe->config.bayer_ds_out_res.height,
+ IA_CSS_FRAME_FORMAT_RAW, 0);
+ }
+
+ /* handle output info, assume always needed */
+ for (i = 0; i < IA_CSS_PIPE_MAX_OUTPUT_STAGE; i++) {
+ if (internal_pipe->config.output_info[i].res.width) {
+ err = sh_css_pipe_configure_output(
+ internal_pipe,
+ internal_pipe->config.output_info[i].res.width,
+ internal_pipe->config.output_info[i].res.height,
+ internal_pipe->config.output_info[i].padded_width,
+ internal_pipe->config.output_info[i].format,
+ i);
+ if (err) {
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ kvfree(internal_pipe);
+ internal_pipe = NULL;
+ return err;
+ }
+ }
+
+ /* handle vf output info, when configured */
+ internal_pipe->enable_viewfinder[i] =
+ (internal_pipe->config.vf_output_info[i].res.width != 0);
+ if (internal_pipe->config.vf_output_info[i].res.width) {
+ err = sh_css_pipe_configure_viewfinder(
+ internal_pipe,
+ internal_pipe->config.vf_output_info[i].res.width,
+ internal_pipe->config.vf_output_info[i].res.height,
+ internal_pipe->config.vf_output_info[i].padded_width,
+ internal_pipe->config.vf_output_info[i].format,
+ i);
+ if (err) {
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ kvfree(internal_pipe);
+ internal_pipe = NULL;
+ return err;
+ }
+ }
+ }
+ /* set all info to zeroes first */
+ memset(&internal_pipe->info, 0, sizeof(internal_pipe->info));
+
+ /* all went well, return the pipe */
+ *pipe = internal_pipe;
+ IA_CSS_LEAVE_ERR_PRIVATE(0);
+ return 0;
+}
+
+int
+ia_css_pipe_get_info(const struct ia_css_pipe *pipe,
+ struct ia_css_pipe_info *pipe_info)
+{
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_pipe_get_info()\n");
+ if (!pipe_info) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_ERROR,
+ "ia_css_pipe_get_info: pipe_info cannot be NULL\n");
+ return -EINVAL;
+ }
+ if (!pipe || !pipe->stream) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_ERROR,
+ "ia_css_pipe_get_info: ia_css_stream_create needs to be called before ia_css_[stream/pipe]_get_info\n");
+ return -EINVAL;
+ }
+ /* we succeeded return the info */
+ *pipe_info = pipe->info;
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_pipe_get_info() leave\n");
+ return 0;
+}
+
+bool ia_css_pipe_has_dvs_stats(struct ia_css_pipe_info *pipe_info)
+{
+ unsigned int i;
+
+ if (pipe_info) {
+ for (i = 0; i < IA_CSS_DVS_STAT_NUM_OF_LEVELS; i++) {
+ if (pipe_info->grid_info.dvs_grid.dvs_stat_grid_info.grd_cfg[i].grd_start.enable)
+ return true;
+ }
+ }
+
+ return false;
+}
+
+int
+ia_css_pipe_override_frame_format(struct ia_css_pipe *pipe,
+ int pin_index,
+ enum ia_css_frame_format new_format)
+{
+ int err = 0;
+
+ IA_CSS_ENTER_PRIVATE("pipe = %p, pin_index = %d, new_formats = %d", pipe, pin_index, new_format);
+
+ if (!pipe) {
+ IA_CSS_ERROR("pipe is not set");
+ err = -EINVAL;
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+ }
+ if (0 != pin_index && 1 != pin_index) {
+ IA_CSS_ERROR("pin index is not valid");
+ err = -EINVAL;
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+ }
+ if (new_format != IA_CSS_FRAME_FORMAT_NV12_TILEY) {
+ IA_CSS_ERROR("new format is not valid");
+ err = -EINVAL;
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+ } else {
+ err = ia_css_pipe_check_format(pipe, new_format);
+ if (!err) {
+ if (pin_index == 0)
+ pipe->output_info[0].format = new_format;
+ else
+ pipe->vf_output_info[0].format = new_format;
+ }
+ }
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+}
+
+/* Configuration of INPUT_SYSTEM_VERSION_2401 is done on SP */
+static int
+ia_css_stream_configure_rx(struct ia_css_stream *stream)
+{
+ struct ia_css_input_port *config;
+
+ assert(stream);
+
+ config = &stream->config.source.port;
+ /* AM: this code is not reliable, especially for 2400 */
+ if (config->num_lanes == 1)
+ stream->csi_rx_config.mode = MONO_1L_1L_0L;
+ else if (config->num_lanes == 2)
+ stream->csi_rx_config.mode = MONO_2L_1L_0L;
+ else if (config->num_lanes == 3)
+ stream->csi_rx_config.mode = MONO_3L_1L_0L;
+ else if (config->num_lanes == 4)
+ stream->csi_rx_config.mode = MONO_4L_1L_0L;
+ else if (config->num_lanes != 0)
+ return -EINVAL;
+
+ if (config->port > MIPI_PORT2_ID)
+ return -EINVAL;
+ stream->csi_rx_config.port =
+ ia_css_isys_port_to_mipi_port(config->port);
+ stream->csi_rx_config.timeout = config->timeout;
+ stream->csi_rx_config.initcount = 0;
+ stream->csi_rx_config.synccount = 0x28282828;
+ stream->csi_rx_config.rxcount = config->rxcount;
+ if (config->compression.type == IA_CSS_CSI2_COMPRESSION_TYPE_NONE)
+ stream->csi_rx_config.comp = MIPI_PREDICTOR_NONE;
+ else
+ /*
+ * not implemented yet, requires extension of the rx_cfg_t
+ * struct
+ */
+ return -EINVAL;
+
+ stream->csi_rx_config.is_two_ppc = (stream->config.pixels_per_clock == 2);
+ stream->reconfigure_css_rx = true;
+ return 0;
+}
+
+static struct ia_css_pipe *
+find_pipe(struct ia_css_pipe *pipes[], unsigned int num_pipes,
+ enum ia_css_pipe_mode mode, bool copy_pipe)
+{
+ unsigned int i;
+
+ assert(pipes);
+ for (i = 0; i < num_pipes; i++) {
+ assert(pipes[i]);
+ if (pipes[i]->config.mode != mode)
+ continue;
+ if (copy_pipe && pipes[i]->mode != IA_CSS_PIPE_ID_COPY)
+ continue;
+ return pipes[i];
+ }
+ return NULL;
+}
+
+static int
+metadata_info_init(const struct ia_css_metadata_config *mdc,
+ struct ia_css_metadata_info *md)
+{
+ /* Either both width and height should be set or neither */
+ if ((mdc->resolution.height > 0) ^ (mdc->resolution.width > 0))
+ return -EINVAL;
+
+ md->resolution = mdc->resolution;
+ /*
+ * We round up the stride to a multiple of the width
+ * of the port going to DDR, this is a HW requirements (DMA).
+ */
+ md->stride = CEIL_MUL(mdc->resolution.width, HIVE_ISP_DDR_WORD_BYTES);
+ md->size = mdc->resolution.height * md->stride;
+ return 0;
+}
+
+int
+ia_css_stream_create(const struct ia_css_stream_config *stream_config,
+ int num_pipes,
+ struct ia_css_pipe *pipes[],
+ struct ia_css_stream **stream)
+{
+ struct ia_css_pipe *curr_pipe;
+ struct ia_css_stream *curr_stream = NULL;
+ bool spcopyonly;
+ bool sensor_binning_changed;
+ int i, j;
+ int err = -EINVAL;
+ struct ia_css_metadata_info md_info;
+ struct ia_css_resolution effective_res;
+
+ IA_CSS_ENTER("num_pipes=%d", num_pipes);
+ ia_css_debug_dump_stream_config(stream_config, num_pipes);
+
+ /* some checks */
+ if (num_pipes == 0 ||
+ !stream ||
+ !pipes) {
+ err = -EINVAL;
+ IA_CSS_LEAVE_ERR(err);
+ return err;
+ }
+
+ if (!IS_ISP2401) {
+ /* We don't support metadata for JPEG stream, since they both use str2mem */
+ if (stream_config->input_config.format == ATOMISP_INPUT_FORMAT_BINARY_8 &&
+ stream_config->metadata_config.resolution.height > 0) {
+ err = -EINVAL;
+ IA_CSS_LEAVE_ERR(err);
+ return err;
+ }
+ } else {
+ if (stream_config->online && stream_config->pack_raw_pixels) {
+ IA_CSS_LOG("online and pack raw is invalid on input system 2401");
+ err = -EINVAL;
+ IA_CSS_LEAVE_ERR(err);
+ return err;
+ }
+ }
+
+ ia_css_debug_pipe_graph_dump_stream_config(stream_config);
+
+ /* check if mipi size specified */
+ if (stream_config->mode == IA_CSS_INPUT_MODE_BUFFERED_SENSOR)
+ if (!IS_ISP2401 || !stream_config->online)
+ {
+ unsigned int port = (unsigned int)stream_config->source.port.port;
+
+ if (port >= N_MIPI_PORT_ID) {
+ err = -EINVAL;
+ IA_CSS_LEAVE_ERR(err);
+ return err;
+ }
+
+ if (my_css.size_mem_words != 0) {
+ my_css.mipi_frame_size[port] = my_css.size_mem_words;
+ } else if (stream_config->mipi_buffer_config.size_mem_words != 0) {
+ my_css.mipi_frame_size[port] = stream_config->mipi_buffer_config.size_mem_words;
+ } else {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_stream_create() exit: error, need to set mipi frame size.\n");
+ assert(stream_config->mipi_buffer_config.size_mem_words != 0);
+ err = -EINVAL;
+ IA_CSS_LEAVE_ERR(err);
+ return err;
+ }
+
+ if (my_css.size_mem_words != 0) {
+ my_css.num_mipi_frames[port] =
+ 2; /* Temp change: Default for backwards compatibility. */
+ } else if (stream_config->mipi_buffer_config.nof_mipi_buffers != 0) {
+ my_css.num_mipi_frames[port] =
+ stream_config->mipi_buffer_config.nof_mipi_buffers;
+ } else {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_stream_create() exit: error, need to set number of mipi frames.\n");
+ assert(stream_config->mipi_buffer_config.nof_mipi_buffers != 0);
+ err = -EINVAL;
+ IA_CSS_LEAVE_ERR(err);
+ return err;
+ }
+ }
+
+ /* Currently we only supported metadata up to a certain size. */
+ err = metadata_info_init(&stream_config->metadata_config, &md_info);
+ if (err) {
+ IA_CSS_LEAVE_ERR(err);
+ return err;
+ }
+
+ /* allocate the stream instance */
+ curr_stream = kzalloc(sizeof(struct ia_css_stream), GFP_KERNEL);
+ if (!curr_stream) {
+ err = -ENOMEM;
+ IA_CSS_LEAVE_ERR(err);
+ return err;
+ }
+ /* default all to 0 */
+ curr_stream->info.metadata_info = md_info;
+
+ /* allocate pipes */
+ curr_stream->num_pipes = num_pipes;
+ curr_stream->pipes = kcalloc(num_pipes, sizeof(struct ia_css_pipe *), GFP_KERNEL);
+ if (!curr_stream->pipes) {
+ curr_stream->num_pipes = 0;
+ kfree(curr_stream);
+ curr_stream = NULL;
+ err = -ENOMEM;
+ IA_CSS_LEAVE_ERR(err);
+ return err;
+ }
+ /* store pipes */
+ spcopyonly = (num_pipes == 1) && (pipes[0]->config.mode == IA_CSS_PIPE_MODE_COPY);
+ for (i = 0; i < num_pipes; i++)
+ curr_stream->pipes[i] = pipes[i];
+ curr_stream->last_pipe = curr_stream->pipes[0];
+ /* take over stream config */
+ curr_stream->config = *stream_config;
+
+ if (IS_ISP2401) {
+ if (stream_config->mode == IA_CSS_INPUT_MODE_BUFFERED_SENSOR &&
+ stream_config->online)
+ curr_stream->config.online = false;
+
+ if (curr_stream->config.online) {
+ curr_stream->config.source.port.num_lanes =
+ stream_config->source.port.num_lanes;
+ curr_stream->config.mode = IA_CSS_INPUT_MODE_BUFFERED_SENSOR;
+ }
+ }
+ /* in case driver doesn't configure init number of raw buffers, configure it here */
+ if (curr_stream->config.target_num_cont_raw_buf == 0)
+ curr_stream->config.target_num_cont_raw_buf = NUM_CONTINUOUS_FRAMES;
+ if (curr_stream->config.init_num_cont_raw_buf == 0)
+ curr_stream->config.init_num_cont_raw_buf = curr_stream->config.target_num_cont_raw_buf;
+
+ /* Enable locking & unlocking of buffers in RAW buffer pool */
+ if (curr_stream->config.ia_css_enable_raw_buffer_locking)
+ sh_css_sp_configure_enable_raw_pool_locking(
+ curr_stream->config.lock_all);
+
+ /* copy mode specific stuff */
+ switch (curr_stream->config.mode) {
+ case IA_CSS_INPUT_MODE_SENSOR:
+ case IA_CSS_INPUT_MODE_BUFFERED_SENSOR:
+ if (!IS_ISP2401)
+ ia_css_stream_configure_rx(curr_stream);
+ break;
+ case IA_CSS_INPUT_MODE_PRBS:
+ if (!IS_ISP2401) {
+ IA_CSS_LOG("mode prbs");
+ sh_css_sp_configure_prbs(curr_stream->config.source.prbs.seed);
+ }
+ break;
+ case IA_CSS_INPUT_MODE_MEMORY:
+ IA_CSS_LOG("mode memory");
+ curr_stream->reconfigure_css_rx = false;
+ break;
+ default:
+ IA_CSS_LOG("mode sensor/default");
+ }
+
+ for (i = 0; i < num_pipes; i++) {
+ struct ia_css_resolution effective_res;
+
+ curr_pipe = pipes[i];
+ /* set current stream */
+ curr_pipe->stream = curr_stream;
+ /* take over effective info */
+
+ effective_res = curr_pipe->config.input_effective_res;
+ if (effective_res.height == 0 || effective_res.width == 0) {
+ effective_res = curr_pipe->stream->config.input_config.effective_res;
+
+ curr_pipe->config.input_effective_res = effective_res;
+ }
+ IA_CSS_LOG("effective_res=%dx%d",
+ effective_res.width,
+ effective_res.height);
+ }
+
+ err = ia_css_stream_isp_parameters_init(curr_stream);
+ if (err)
+ goto ERR;
+ IA_CSS_LOG("isp_params_configs: %p", curr_stream->isp_params_configs);
+
+ /* sensor binning */
+ if (!spcopyonly) {
+ sensor_binning_changed =
+ sh_css_params_set_binning_factor(curr_stream,
+ curr_stream->config.sensor_binning_factor);
+ } else {
+ sensor_binning_changed = false;
+ }
+
+ IA_CSS_LOG("sensor_binning=%d, changed=%d",
+ curr_stream->config.sensor_binning_factor, sensor_binning_changed);
+ /* loop over pipes */
+ IA_CSS_LOG("num_pipes=%d", num_pipes);
+ curr_stream->cont_capt = false;
+ /* Temporary hack: we give the preview pipe a reference to the capture
+ * pipe in continuous capture mode. */
+ if (curr_stream->config.continuous) {
+ /* Search for the preview pipe and create the copy pipe */
+ struct ia_css_pipe *preview_pipe;
+ struct ia_css_pipe *video_pipe;
+ struct ia_css_pipe *capture_pipe = NULL;
+ struct ia_css_pipe *copy_pipe = NULL;
+
+ if (num_pipes >= 2) {
+ curr_stream->cont_capt = true;
+ curr_stream->disable_cont_vf = curr_stream->config.disable_cont_viewfinder;
+ curr_stream->stop_copy_preview = my_css.stop_copy_preview;
+ }
+
+ /* Create copy pipe here, since it may not be exposed to the driver */
+ preview_pipe = find_pipe(pipes, num_pipes,
+ IA_CSS_PIPE_MODE_PREVIEW, false);
+ video_pipe = find_pipe(pipes, num_pipes,
+ IA_CSS_PIPE_MODE_VIDEO, false);
+
+ if (curr_stream->cont_capt) {
+ capture_pipe = find_pipe(pipes, num_pipes,
+ IA_CSS_PIPE_MODE_CAPTURE,
+ false);
+ if (!capture_pipe) {
+ err = -EINVAL;
+ goto ERR;
+ }
+ }
+ /* We do not support preview and video pipe at the same time */
+ if (preview_pipe && video_pipe) {
+ err = -EINVAL;
+ goto ERR;
+ }
+
+ if (preview_pipe && !preview_pipe->pipe_settings.preview.copy_pipe) {
+ err = create_pipe(IA_CSS_PIPE_MODE_CAPTURE, &copy_pipe, true);
+ if (err)
+ goto ERR;
+ ia_css_pipe_config_defaults(&copy_pipe->config);
+ preview_pipe->pipe_settings.preview.copy_pipe = copy_pipe;
+ copy_pipe->stream = curr_stream;
+ }
+ if (preview_pipe && curr_stream->cont_capt)
+ preview_pipe->pipe_settings.preview.capture_pipe = capture_pipe;
+
+ if (video_pipe && !video_pipe->pipe_settings.video.copy_pipe) {
+ err = create_pipe(IA_CSS_PIPE_MODE_CAPTURE, &copy_pipe, true);
+ if (err)
+ goto ERR;
+ ia_css_pipe_config_defaults(&copy_pipe->config);
+ video_pipe->pipe_settings.video.copy_pipe = copy_pipe;
+ copy_pipe->stream = curr_stream;
+ }
+ if (video_pipe && curr_stream->cont_capt)
+ video_pipe->pipe_settings.video.capture_pipe = capture_pipe;
+ }
+ for (i = 0; i < num_pipes; i++) {
+ curr_pipe = pipes[i];
+ /* set current stream */
+ curr_pipe->stream = curr_stream;
+
+ /* take over effective info */
+
+ effective_res = curr_pipe->config.input_effective_res;
+ err = ia_css_util_check_res(
+ effective_res.width,
+ effective_res.height);
+ if (err)
+ goto ERR;
+
+ /* sensor binning per pipe */
+ if (sensor_binning_changed)
+ sh_css_pipe_free_shading_table(curr_pipe);
+ }
+
+ /* now pipes have been configured, info should be available */
+ for (i = 0; i < num_pipes; i++) {
+ struct ia_css_pipe_info *pipe_info = NULL;
+
+ curr_pipe = pipes[i];
+
+ err = sh_css_pipe_load_binaries(curr_pipe);
+ if (err)
+ goto ERR;
+
+ /* handle each pipe */
+ pipe_info = &curr_pipe->info;
+ for (j = 0; j < IA_CSS_PIPE_MAX_OUTPUT_STAGE; j++) {
+ err = sh_css_pipe_get_output_frame_info(curr_pipe,
+ &pipe_info->output_info[j], j);
+ if (err)
+ goto ERR;
+ }
+
+ if (!spcopyonly) {
+ if (!IS_ISP2401)
+ err = sh_css_pipe_get_shading_info(curr_pipe,
+ &pipe_info->shading_info,
+ NULL);
+ else
+ err = sh_css_pipe_get_shading_info(curr_pipe,
+ &pipe_info->shading_info,
+ &curr_pipe->config);
+
+ if (err)
+ goto ERR;
+ err = sh_css_pipe_get_grid_info(curr_pipe,
+ &pipe_info->grid_info);
+ if (err)
+ goto ERR;
+ for (j = 0; j < IA_CSS_PIPE_MAX_OUTPUT_STAGE; j++) {
+ sh_css_pipe_get_viewfinder_frame_info(curr_pipe,
+ &pipe_info->vf_output_info[j],
+ j);
+ if (err)
+ goto ERR;
+ }
+ }
+
+ my_css.active_pipes[ia_css_pipe_get_pipe_num(curr_pipe)] = curr_pipe;
+ }
+
+ curr_stream->started = false;
+
+ /* Map SP threads before doing anything. */
+ err = map_sp_threads(curr_stream, true);
+ if (err) {
+ IA_CSS_LOG("map_sp_threads: return_err=%d", err);
+ goto ERR;
+ }
+
+ for (i = 0; i < num_pipes; i++) {
+ curr_pipe = pipes[i];
+ ia_css_pipe_map_queue(curr_pipe, true);
+ }
+
+ /* Create host side pipeline objects without stages */
+ err = create_host_pipeline_structure(curr_stream);
+ if (err) {
+ IA_CSS_LOG("create_host_pipeline_structure: return_err=%d", err);
+ goto ERR;
+ }
+
+ /* assign curr_stream */
+ *stream = curr_stream;
+
+ERR:
+ if (!err) {
+ /* working mode: enter into the seed list */
+ if (my_css_save.mode == sh_css_mode_working) {
+ for (i = 0; i < MAX_ACTIVE_STREAMS; i++) {
+ if (!my_css_save.stream_seeds[i].stream) {
+ IA_CSS_LOG("entered stream into loc=%d", i);
+ my_css_save.stream_seeds[i].orig_stream = stream;
+ my_css_save.stream_seeds[i].stream = curr_stream;
+ my_css_save.stream_seeds[i].num_pipes = num_pipes;
+ my_css_save.stream_seeds[i].stream_config = *stream_config;
+ for (j = 0; j < num_pipes; j++) {
+ my_css_save.stream_seeds[i].pipe_config[j] = pipes[j]->config;
+ my_css_save.stream_seeds[i].pipes[j] = pipes[j];
+ my_css_save.stream_seeds[i].orig_pipes[j] = &pipes[j];
+ }
+ break;
+ }
+ }
+ } else {
+ ia_css_stream_destroy(curr_stream);
+ }
+ } else {
+ ia_css_stream_destroy(curr_stream);
+ }
+ IA_CSS_LEAVE("return_err=%d mode=%d", err, my_css_save.mode);
+ return err;
+}
+
+int
+ia_css_stream_destroy(struct ia_css_stream *stream)
+{
+ int i;
+ int err = 0;
+
+ IA_CSS_ENTER_PRIVATE("stream = %p", stream);
+ if (!stream) {
+ err = -EINVAL;
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+ }
+
+ ia_css_stream_isp_parameters_uninit(stream);
+
+ if ((stream->last_pipe) &&
+ ia_css_pipeline_is_mapped(stream->last_pipe->pipe_num)) {
+ if (IS_ISP2401) {
+ for (i = 0; i < stream->num_pipes; i++) {
+ struct ia_css_pipe *entry = stream->pipes[i];
+ unsigned int sp_thread_id;
+ struct sh_css_sp_pipeline_terminal *sp_pipeline_input_terminal;
+
+ assert(entry);
+ if (entry) {
+ /* get the SP thread id */
+ if (!ia_css_pipeline_get_sp_thread_id(
+ ia_css_pipe_get_pipe_num(entry), &sp_thread_id))
+ return -EINVAL;
+
+ /* get the target input terminal */
+ sp_pipeline_input_terminal =
+ &sh_css_sp_group.pipe_io[sp_thread_id].input;
+
+ for (i = 0; i < IA_CSS_STREAM_MAX_ISYS_STREAM_PER_CH; i++) {
+ ia_css_isys_stream_h isys_stream =
+ &sp_pipeline_input_terminal->context.virtual_input_system_stream[i];
+ if (stream->config.isys_config[i].valid && isys_stream->valid)
+ ia_css_isys_stream_destroy(isys_stream);
+ }
+ }
+ }
+
+ if (stream->config.mode == IA_CSS_INPUT_MODE_BUFFERED_SENSOR) {
+ for (i = 0; i < stream->num_pipes; i++) {
+ struct ia_css_pipe *entry = stream->pipes[i];
+ /*
+ * free any mipi frames that are remaining:
+ * some test stream create-destroy cycles do
+ * not generate output frames
+ * and the mipi buffer is not freed in the
+ * deque function
+ */
+ if (entry)
+ free_mipi_frames(entry);
+ }
+ }
+ stream_unregister_with_csi_rx(stream);
+ }
+
+ for (i = 0; i < stream->num_pipes; i++) {
+ struct ia_css_pipe *curr_pipe = stream->pipes[i];
+
+ assert(curr_pipe);
+ ia_css_pipe_map_queue(curr_pipe, false);
+ }
+
+ err = map_sp_threads(stream, false);
+ if (err) {
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+ }
+ }
+
+ /* remove references from pipes to stream */
+ for (i = 0; i < stream->num_pipes; i++) {
+ struct ia_css_pipe *entry = stream->pipes[i];
+
+ assert(entry);
+ if (entry) {
+ /* clear reference to stream */
+ entry->stream = NULL;
+ /* check internal copy pipe */
+ if (entry->mode == IA_CSS_PIPE_ID_PREVIEW &&
+ entry->pipe_settings.preview.copy_pipe) {
+ IA_CSS_LOG("clearing stream on internal preview copy pipe");
+ entry->pipe_settings.preview.copy_pipe->stream = NULL;
+ }
+ if (entry->mode == IA_CSS_PIPE_ID_VIDEO &&
+ entry->pipe_settings.video.copy_pipe) {
+ IA_CSS_LOG("clearing stream on internal video copy pipe");
+ entry->pipe_settings.video.copy_pipe->stream = NULL;
+ }
+ err = sh_css_pipe_unload_binaries(entry);
+ }
+ }
+ /* free associated memory of stream struct */
+ kfree(stream->pipes);
+ stream->pipes = NULL;
+ stream->num_pipes = 0;
+
+ /* working mode: take out of the seed list */
+ if (my_css_save.mode == sh_css_mode_working) {
+ for (i = 0; i < MAX_ACTIVE_STREAMS; i++) {
+ if (my_css_save.stream_seeds[i].stream == stream) {
+ IA_CSS_LOG("took out stream %d", i);
+ my_css_save.stream_seeds[i].stream = NULL;
+ break;
+ }
+ }
+ }
+
+ kfree(stream);
+ IA_CSS_LEAVE_ERR(err);
+
+ return err;
+}
+
+int
+ia_css_stream_get_info(const struct ia_css_stream *stream,
+ struct ia_css_stream_info *stream_info)
+{
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_stream_get_info: enter/exit\n");
+ assert(stream);
+ assert(stream_info);
+
+ *stream_info = stream->info;
+ return 0;
+}
+
+int
+ia_css_stream_start(struct ia_css_stream *stream)
+{
+ int err = 0;
+
+ IA_CSS_ENTER("stream = %p", stream);
+ if ((!stream) || (!stream->last_pipe)) {
+ IA_CSS_LEAVE_ERR(-EINVAL);
+ return -EINVAL;
+ }
+ IA_CSS_LOG("starting %d", stream->last_pipe->mode);
+
+ sh_css_sp_set_disable_continuous_viewfinder(stream->disable_cont_vf);
+
+ /* Create host side pipeline. */
+ err = create_host_pipeline(stream);
+ if (err) {
+ IA_CSS_LEAVE_ERR(err);
+ return err;
+ }
+
+ if (IS_ISP2401 &&
+ ((stream->config.mode == IA_CSS_INPUT_MODE_SENSOR) ||
+ (stream->config.mode == IA_CSS_INPUT_MODE_BUFFERED_SENSOR)))
+ stream_register_with_csi_rx(stream);
+
+ /* Initialize mipi size checks */
+ if (!IS_ISP2401 && stream->config.mode == IA_CSS_INPUT_MODE_BUFFERED_SENSOR) {
+ unsigned int idx;
+ unsigned int port = (unsigned int)(stream->config.source.port.port);
+
+ for (idx = 0; idx < IA_CSS_MIPI_SIZE_CHECK_MAX_NOF_ENTRIES_PER_PORT; idx++) {
+ sh_css_sp_group.config.mipi_sizes_for_check[port][idx] =
+ sh_css_get_mipi_sizes_for_check(port, idx);
+ }
+ }
+
+ if (stream->config.mode != IA_CSS_INPUT_MODE_MEMORY) {
+ if (IS_ISP2401)
+ err = sh_css_config_input_network_2401(stream);
+ else
+ err = sh_css_config_input_network_2400(stream);
+ if (err)
+ return err;
+ }
+
+ err = sh_css_pipe_start(stream);
+ IA_CSS_LEAVE_ERR(err);
+ return err;
+}
+
+int
+ia_css_stream_stop(struct ia_css_stream *stream)
+{
+ int err = 0;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_stream_stop() enter/exit\n");
+ assert(stream);
+ assert(stream->last_pipe);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_stream_stop: stopping %d\n",
+ stream->last_pipe->mode);
+
+ /* De-initialize mipi size checks */
+ if (!IS_ISP2401 && stream->config.mode == IA_CSS_INPUT_MODE_BUFFERED_SENSOR) {
+ unsigned int idx;
+ unsigned int port = (unsigned int)(stream->config.source.port.port);
+
+ for (idx = 0; idx < IA_CSS_MIPI_SIZE_CHECK_MAX_NOF_ENTRIES_PER_PORT; idx++)
+ sh_css_sp_group.config.mipi_sizes_for_check[port][idx] = 0;
+ }
+
+ err = ia_css_pipeline_request_stop(&stream->last_pipe->pipeline);
+ if (err)
+ return err;
+
+ /*
+ * Ideally, unmapping should happen after pipeline_stop, but current
+ * semantics do not allow that.
+ */
+ /* err = map_sp_threads(stream, false); */
+
+ return err;
+}
+
+bool
+ia_css_stream_has_stopped(struct ia_css_stream *stream)
+{
+ bool stopped;
+
+ assert(stream);
+
+ stopped = ia_css_pipeline_has_stopped(&stream->last_pipe->pipeline);
+
+ return stopped;
+}
+
+/* ISP2400 */
+/*
+ * Destroy the stream and all the pipes related to it.
+ * The stream handle is used to identify the correct entry in the css_save struct
+ */
+int
+ia_css_stream_unload(struct ia_css_stream *stream)
+{
+ int i;
+
+ assert(stream);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_stream_unload() enter,\n");
+ /* some checks */
+ assert(stream);
+ for (i = 0; i < MAX_ACTIVE_STREAMS; i++)
+ if (my_css_save.stream_seeds[i].stream == stream) {
+ int j;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_stream_unload(): unloading %d (%p)\n", i,
+ my_css_save.stream_seeds[i].stream);
+ ia_css_stream_destroy(stream);
+ for (j = 0; j < my_css_save.stream_seeds[i].num_pipes; j++)
+ ia_css_pipe_destroy(my_css_save.stream_seeds[i].pipes[j]);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_stream_unload(): after unloading %d (%p)\n", i,
+ my_css_save.stream_seeds[i].stream);
+ break;
+ }
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_stream_unload() exit,\n");
+ return 0;
+}
+
+int
+ia_css_temp_pipe_to_pipe_id(const struct ia_css_pipe *pipe,
+ enum ia_css_pipe_id *pipe_id)
+{
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_temp_pipe_to_pipe_id() enter/exit\n");
+ if (pipe)
+ *pipe_id = pipe->mode;
+ else
+ *pipe_id = IA_CSS_PIPE_ID_COPY;
+
+ return 0;
+}
+
+enum atomisp_input_format
+ia_css_stream_get_format(const struct ia_css_stream *stream)
+{
+ return stream->config.input_config.format;
+}
+
+bool
+ia_css_stream_get_two_pixels_per_clock(const struct ia_css_stream *stream)
+{
+ return (stream->config.pixels_per_clock == 2);
+}
+
+struct ia_css_binary *
+ia_css_stream_get_shading_correction_binary(const struct ia_css_stream
+ *stream)
+{
+ struct ia_css_pipe *pipe;
+
+ assert(stream);
+
+ pipe = stream->pipes[0];
+
+ if (stream->num_pipes == 2) {
+ assert(stream->pipes[1]);
+ if (stream->pipes[1]->config.mode == IA_CSS_PIPE_MODE_VIDEO ||
+ stream->pipes[1]->config.mode == IA_CSS_PIPE_MODE_PREVIEW)
+ pipe = stream->pipes[1];
+ }
+
+ return ia_css_pipe_get_shading_correction_binary(pipe);
+}
+
+struct ia_css_binary *
+ia_css_stream_get_dvs_binary(const struct ia_css_stream *stream)
+{
+ int i;
+ struct ia_css_pipe *video_pipe = NULL;
+
+ /* First we find the video pipe */
+ for (i = 0; i < stream->num_pipes; i++) {
+ struct ia_css_pipe *pipe = stream->pipes[i];
+
+ if (pipe->config.mode == IA_CSS_PIPE_MODE_VIDEO) {
+ video_pipe = pipe;
+ break;
+ }
+ }
+ if (video_pipe)
+ return &video_pipe->pipe_settings.video.video_binary;
+ return NULL;
+}
+
+struct ia_css_binary *
+ia_css_stream_get_3a_binary(const struct ia_css_stream *stream)
+{
+ struct ia_css_pipe *pipe;
+ struct ia_css_binary *s3a_binary = NULL;
+
+ assert(stream);
+
+ pipe = stream->pipes[0];
+
+ if (stream->num_pipes == 2) {
+ assert(stream->pipes[1]);
+ if (stream->pipes[1]->config.mode == IA_CSS_PIPE_MODE_VIDEO ||
+ stream->pipes[1]->config.mode == IA_CSS_PIPE_MODE_PREVIEW)
+ pipe = stream->pipes[1];
+ }
+
+ s3a_binary = ia_css_pipe_get_s3a_binary(pipe);
+
+ return s3a_binary;
+}
+
+int
+ia_css_stream_set_output_padded_width(struct ia_css_stream *stream,
+ unsigned int output_padded_width)
+{
+ struct ia_css_pipe *pipe;
+
+ assert(stream);
+
+ pipe = stream->last_pipe;
+
+ assert(pipe);
+
+ /* set the config also just in case (redundant info? why do we save config in pipe?) */
+ pipe->config.output_info[IA_CSS_PIPE_OUTPUT_STAGE_0].padded_width = output_padded_width;
+ pipe->output_info[IA_CSS_PIPE_OUTPUT_STAGE_0].padded_width = output_padded_width;
+
+ return 0;
+}
+
+static struct ia_css_binary *
+ia_css_pipe_get_shading_correction_binary(const struct ia_css_pipe *pipe)
+{
+ struct ia_css_binary *binary = NULL;
+
+ assert(pipe);
+
+ switch (pipe->config.mode) {
+ case IA_CSS_PIPE_MODE_PREVIEW:
+ binary = (struct ia_css_binary *)&pipe->pipe_settings.preview.preview_binary;
+ break;
+ case IA_CSS_PIPE_MODE_VIDEO:
+ binary = (struct ia_css_binary *)&pipe->pipe_settings.video.video_binary;
+ break;
+ case IA_CSS_PIPE_MODE_CAPTURE:
+ if (pipe->config.default_capture_config.mode == IA_CSS_CAPTURE_MODE_PRIMARY) {
+ unsigned int i;
+
+ for (i = 0; i < pipe->pipe_settings.capture.num_primary_stage; i++) {
+ if (pipe->pipe_settings.capture.primary_binary[i].info->sp.enable.sc) {
+ binary = (struct ia_css_binary *)&pipe->pipe_settings.capture.primary_binary[i];
+ break;
+ }
+ }
+ } else if (pipe->config.default_capture_config.mode ==
+ IA_CSS_CAPTURE_MODE_BAYER)
+ binary = (struct ia_css_binary *)&pipe->pipe_settings.capture.pre_isp_binary;
+ else if (pipe->config.default_capture_config.mode ==
+ IA_CSS_CAPTURE_MODE_ADVANCED ||
+ pipe->config.default_capture_config.mode == IA_CSS_CAPTURE_MODE_LOW_LIGHT) {
+ if (pipe->config.isp_pipe_version == IA_CSS_PIPE_VERSION_1)
+ binary = (struct ia_css_binary *)&pipe->pipe_settings.capture.pre_isp_binary;
+ else if (pipe->config.isp_pipe_version == IA_CSS_PIPE_VERSION_2_2)
+ binary = (struct ia_css_binary *)&pipe->pipe_settings.capture.post_isp_binary;
+ }
+ break;
+ default:
+ break;
+ }
+
+ if (binary && binary->info->sp.enable.sc)
+ return binary;
+
+ return NULL;
+}
+
+static struct ia_css_binary *
+ia_css_pipe_get_s3a_binary(const struct ia_css_pipe *pipe)
+{
+ struct ia_css_binary *binary = NULL;
+
+ assert(pipe);
+
+ switch (pipe->config.mode) {
+ case IA_CSS_PIPE_MODE_PREVIEW:
+ binary = (struct ia_css_binary *)&pipe->pipe_settings.preview.preview_binary;
+ break;
+ case IA_CSS_PIPE_MODE_VIDEO:
+ binary = (struct ia_css_binary *)&pipe->pipe_settings.video.video_binary;
+ break;
+ case IA_CSS_PIPE_MODE_CAPTURE:
+ if (pipe->config.default_capture_config.mode == IA_CSS_CAPTURE_MODE_PRIMARY) {
+ unsigned int i;
+
+ for (i = 0; i < pipe->pipe_settings.capture.num_primary_stage; i++) {
+ if (pipe->pipe_settings.capture.primary_binary[i].info->sp.enable.s3a) {
+ binary = (struct ia_css_binary *)&pipe->pipe_settings.capture.primary_binary[i];
+ break;
+ }
+ }
+ } else if (pipe->config.default_capture_config.mode ==
+ IA_CSS_CAPTURE_MODE_BAYER) {
+ binary = (struct ia_css_binary *)&pipe->pipe_settings.capture.pre_isp_binary;
+ } else if (pipe->config.default_capture_config.mode ==
+ IA_CSS_CAPTURE_MODE_ADVANCED ||
+ pipe->config.default_capture_config.mode == IA_CSS_CAPTURE_MODE_LOW_LIGHT) {
+ if (pipe->config.isp_pipe_version == IA_CSS_PIPE_VERSION_1)
+ binary = (struct ia_css_binary *)&pipe->pipe_settings.capture.pre_isp_binary;
+ else if (pipe->config.isp_pipe_version == IA_CSS_PIPE_VERSION_2_2)
+ binary = (struct ia_css_binary *)&pipe->pipe_settings.capture.post_isp_binary;
+ else
+ assert(0);
+ }
+ break;
+ default:
+ break;
+ }
+
+ if (binary && !binary->info->sp.enable.s3a)
+ binary = NULL;
+
+ return binary;
+}
+
+static struct ia_css_binary *
+ia_css_pipe_get_sdis_binary(const struct ia_css_pipe *pipe)
+{
+ struct ia_css_binary *binary = NULL;
+
+ assert(pipe);
+
+ switch (pipe->config.mode) {
+ case IA_CSS_PIPE_MODE_VIDEO:
+ binary = (struct ia_css_binary *)&pipe->pipe_settings.video.video_binary;
+ break;
+ default:
+ break;
+ }
+
+ if (binary && !binary->info->sp.enable.dis)
+ binary = NULL;
+
+ return binary;
+}
+
+struct ia_css_pipeline *
+ia_css_pipe_get_pipeline(const struct ia_css_pipe *pipe)
+{
+ assert(pipe);
+
+ return (struct ia_css_pipeline *)&pipe->pipeline;
+}
+
+unsigned int
+ia_css_pipe_get_pipe_num(const struct ia_css_pipe *pipe)
+{
+ assert(pipe);
+
+ /*
+ * KW was not sure this function was not returning a value
+ * that was out of range; so added an assert, and, for the
+ * case when asserts are not enabled, clip to the largest
+ * value; pipe_num is unsigned so the value cannot be too small
+ */
+ assert(pipe->pipe_num < IA_CSS_PIPELINE_NUM_MAX);
+
+ if (pipe->pipe_num >= IA_CSS_PIPELINE_NUM_MAX)
+ return (IA_CSS_PIPELINE_NUM_MAX - 1);
+
+ return pipe->pipe_num;
+}
+
+unsigned int
+ia_css_pipe_get_isp_pipe_version(const struct ia_css_pipe *pipe)
+{
+ assert(pipe);
+
+ return (unsigned int)pipe->config.isp_pipe_version;
+}
+
+#define SP_START_TIMEOUT_US 30000000
+
+int
+ia_css_start_sp(void)
+{
+ unsigned long timeout;
+ int err = 0;
+
+ IA_CSS_ENTER("");
+ sh_css_sp_start_isp();
+
+ /* waiting for the SP is completely started */
+ timeout = SP_START_TIMEOUT_US;
+ while ((ia_css_spctrl_get_state(SP0_ID) != IA_CSS_SP_SW_INITIALIZED) && timeout) {
+ timeout--;
+ udelay(1);
+ }
+ if (timeout == 0) {
+ IA_CSS_ERROR("timeout during SP initialization");
+ return -EINVAL;
+ }
+
+ /* Workaround, in order to run two streams in parallel. See TASK 4271*/
+ /* TODO: Fix this. */
+
+ sh_css_init_host_sp_control_vars();
+
+ /* buffers should be initialized only when sp is started */
+ /* AM: At the moment it will be done only when there is no stream active. */
+
+ sh_css_setup_queues();
+ ia_css_bufq_dump_queue_info();
+
+ IA_CSS_LEAVE_ERR(err);
+ return err;
+}
+
+/*
+ * Time to wait SP for termincate. Only condition when this can happen
+ * is a fatal hw failure, but we must be able to detect this and emit
+ * a proper error trace.
+ */
+#define SP_SHUTDOWN_TIMEOUT_US 200000
+
+int
+ia_css_stop_sp(void)
+{
+ unsigned long timeout;
+ int err = 0;
+
+ IA_CSS_ENTER("void");
+
+ if (!sh_css_sp_is_running()) {
+ err = -EINVAL;
+ IA_CSS_LEAVE("SP already stopped : return_err=%d", err);
+
+ /* Return an error - stop SP should not have been called by driver */
+ return err;
+ }
+
+ /* For now, stop whole SP */
+ if (!sh_css_write_host2sp_command(host2sp_cmd_terminate)) {
+ IA_CSS_ERROR("Call to 'sh-css_write_host2sp_command()' failed");
+ ia_css_debug_dump_sp_sw_debug_info();
+ }
+
+ sh_css_sp_set_sp_running(false);
+
+ timeout = SP_SHUTDOWN_TIMEOUT_US;
+ while (!ia_css_spctrl_is_idle(SP0_ID) && timeout) {
+ timeout--;
+ udelay(1);
+ }
+ if (ia_css_spctrl_get_state(SP0_ID) != IA_CSS_SP_SW_TERMINATED)
+ IA_CSS_WARNING("SP has not terminated (SW)");
+
+ if (timeout == 0) {
+ IA_CSS_WARNING("SP is not idle");
+ ia_css_debug_dump_sp_sw_debug_info();
+ }
+ timeout = SP_SHUTDOWN_TIMEOUT_US;
+ while (!isp_ctrl_getbit(ISP0_ID, ISP_SC_REG, ISP_IDLE_BIT) && timeout) {
+ timeout--;
+ udelay(1);
+ }
+ if (timeout == 0) {
+ IA_CSS_WARNING("ISP is not idle");
+ ia_css_debug_dump_sp_sw_debug_info();
+ }
+
+ sh_css_hmm_buffer_record_uninit();
+
+ /* clear pending param sets from refcount */
+ sh_css_param_clear_param_sets();
+
+ IA_CSS_LEAVE_ERR(err);
+ return err;
+}
+
+int
+ia_css_update_continuous_frames(struct ia_css_stream *stream)
+{
+ struct ia_css_pipe *pipe;
+ unsigned int i;
+
+ ia_css_debug_dtrace(
+ IA_CSS_DEBUG_TRACE,
+ "sh_css_update_continuous_frames() enter:\n");
+
+ if (!stream) {
+ ia_css_debug_dtrace(
+ IA_CSS_DEBUG_TRACE,
+ "sh_css_update_continuous_frames() leave: invalid stream, return_void\n");
+ return -EINVAL;
+ }
+
+ pipe = stream->continuous_pipe;
+
+ for (i = stream->config.init_num_cont_raw_buf;
+ i < stream->config.target_num_cont_raw_buf; i++)
+ sh_css_update_host2sp_offline_frame(i,
+ pipe->continuous_frames[i], pipe->cont_md_buffers[i]);
+
+ sh_css_update_host2sp_cont_num_raw_frames
+ (stream->config.target_num_cont_raw_buf, true);
+ ia_css_debug_dtrace(
+ IA_CSS_DEBUG_TRACE,
+ "sh_css_update_continuous_frames() leave: return_void\n");
+
+ return 0;
+}
+
+void ia_css_pipe_map_queue(struct ia_css_pipe *pipe, bool map)
+{
+ unsigned int thread_id;
+ unsigned int pipe_num;
+ bool need_input_queue;
+
+ IA_CSS_ENTER("");
+ assert(pipe);
+
+ pipe_num = pipe->pipe_num;
+
+ ia_css_pipeline_get_sp_thread_id(pipe_num, &thread_id);
+
+ if (IS_ISP2401)
+ need_input_queue = true;
+ else
+ need_input_queue = pipe->stream->config.mode == IA_CSS_INPUT_MODE_MEMORY;
+
+ /* map required buffer queues to resources */
+ /* TODO: to be improved */
+ if (pipe->mode == IA_CSS_PIPE_ID_PREVIEW) {
+ if (need_input_queue)
+ ia_css_queue_map(thread_id, IA_CSS_BUFFER_TYPE_INPUT_FRAME, map);
+ ia_css_queue_map(thread_id, IA_CSS_BUFFER_TYPE_OUTPUT_FRAME, map);
+ ia_css_queue_map(thread_id, IA_CSS_BUFFER_TYPE_PARAMETER_SET, map);
+ ia_css_queue_map(thread_id, IA_CSS_BUFFER_TYPE_PER_FRAME_PARAMETER_SET, map);
+ ia_css_queue_map(thread_id, IA_CSS_BUFFER_TYPE_METADATA, map);
+ if (pipe->pipe_settings.preview.preview_binary.info &&
+ pipe->pipe_settings.preview.preview_binary.info->sp.enable.s3a)
+ ia_css_queue_map(thread_id, IA_CSS_BUFFER_TYPE_3A_STATISTICS, map);
+ } else if (pipe->mode == IA_CSS_PIPE_ID_CAPTURE) {
+ unsigned int i;
+
+ if (need_input_queue)
+ ia_css_queue_map(thread_id, IA_CSS_BUFFER_TYPE_INPUT_FRAME, map);
+ ia_css_queue_map(thread_id, IA_CSS_BUFFER_TYPE_OUTPUT_FRAME, map);
+ ia_css_queue_map(thread_id, IA_CSS_BUFFER_TYPE_VF_OUTPUT_FRAME, map);
+ ia_css_queue_map(thread_id, IA_CSS_BUFFER_TYPE_PARAMETER_SET, map);
+ ia_css_queue_map(thread_id, IA_CSS_BUFFER_TYPE_PER_FRAME_PARAMETER_SET, map);
+ ia_css_queue_map(thread_id, IA_CSS_BUFFER_TYPE_METADATA, map);
+ if (pipe->config.default_capture_config.mode == IA_CSS_CAPTURE_MODE_PRIMARY) {
+ for (i = 0; i < pipe->pipe_settings.capture.num_primary_stage; i++) {
+ if (pipe->pipe_settings.capture.primary_binary[i].info &&
+ pipe->pipe_settings.capture.primary_binary[i].info->sp.enable.s3a) {
+ ia_css_queue_map(thread_id, IA_CSS_BUFFER_TYPE_3A_STATISTICS, map);
+ break;
+ }
+ }
+ } else if (pipe->config.default_capture_config.mode ==
+ IA_CSS_CAPTURE_MODE_ADVANCED ||
+ pipe->config.default_capture_config.mode == IA_CSS_CAPTURE_MODE_LOW_LIGHT ||
+ pipe->config.default_capture_config.mode == IA_CSS_CAPTURE_MODE_BAYER) {
+ if (pipe->pipe_settings.capture.pre_isp_binary.info &&
+ pipe->pipe_settings.capture.pre_isp_binary.info->sp.enable.s3a)
+ ia_css_queue_map(thread_id, IA_CSS_BUFFER_TYPE_3A_STATISTICS, map);
+ }
+ } else if (pipe->mode == IA_CSS_PIPE_ID_VIDEO) {
+ if (need_input_queue)
+ ia_css_queue_map(thread_id, IA_CSS_BUFFER_TYPE_INPUT_FRAME, map);
+ ia_css_queue_map(thread_id, IA_CSS_BUFFER_TYPE_OUTPUT_FRAME, map);
+ if (pipe->enable_viewfinder[IA_CSS_PIPE_OUTPUT_STAGE_0])
+ ia_css_queue_map(thread_id, IA_CSS_BUFFER_TYPE_VF_OUTPUT_FRAME, map);
+ ia_css_queue_map(thread_id, IA_CSS_BUFFER_TYPE_PARAMETER_SET, map);
+ ia_css_queue_map(thread_id, IA_CSS_BUFFER_TYPE_PER_FRAME_PARAMETER_SET, map);
+ ia_css_queue_map(thread_id, IA_CSS_BUFFER_TYPE_METADATA, map);
+ if (pipe->pipe_settings.video.video_binary.info &&
+ pipe->pipe_settings.video.video_binary.info->sp.enable.s3a)
+ ia_css_queue_map(thread_id, IA_CSS_BUFFER_TYPE_3A_STATISTICS, map);
+ if (pipe->pipe_settings.video.video_binary.info &&
+ (pipe->pipe_settings.video.video_binary.info->sp.enable.dis
+ ))
+ ia_css_queue_map(thread_id, IA_CSS_BUFFER_TYPE_DIS_STATISTICS, map);
+ } else if (pipe->mode == IA_CSS_PIPE_ID_COPY) {
+ if (need_input_queue)
+ ia_css_queue_map(thread_id, IA_CSS_BUFFER_TYPE_INPUT_FRAME, map);
+ if (!pipe->stream->config.continuous)
+ ia_css_queue_map(thread_id, IA_CSS_BUFFER_TYPE_OUTPUT_FRAME, map);
+ ia_css_queue_map(thread_id, IA_CSS_BUFFER_TYPE_METADATA, map);
+ } else if (pipe->mode == IA_CSS_PIPE_ID_YUVPP) {
+ unsigned int idx;
+
+ for (idx = 0; idx < IA_CSS_PIPE_MAX_OUTPUT_STAGE; idx++) {
+ ia_css_queue_map(thread_id, IA_CSS_BUFFER_TYPE_OUTPUT_FRAME + idx, map);
+ if (pipe->enable_viewfinder[idx])
+ ia_css_queue_map(thread_id, IA_CSS_BUFFER_TYPE_VF_OUTPUT_FRAME + idx, map);
+ }
+ if (need_input_queue)
+ ia_css_queue_map(thread_id, IA_CSS_BUFFER_TYPE_INPUT_FRAME, map);
+ ia_css_queue_map(thread_id, IA_CSS_BUFFER_TYPE_PARAMETER_SET, map);
+ ia_css_queue_map(thread_id, IA_CSS_BUFFER_TYPE_METADATA, map);
+ }
+ IA_CSS_LEAVE("");
+}
+
+
+int
+ia_css_unlock_raw_frame(struct ia_css_stream *stream, uint32_t exp_id)
+{
+ int ret;
+
+ IA_CSS_ENTER("");
+
+ /*
+ * Only continuous streams have a tagger to which we can send the
+ * unlock message.
+ */
+ if (!stream || !stream->config.continuous) {
+ IA_CSS_ERROR("invalid stream pointer");
+ return -EINVAL;
+ }
+
+ if (exp_id > IA_CSS_ISYS_MAX_EXPOSURE_ID ||
+ exp_id < IA_CSS_ISYS_MIN_EXPOSURE_ID) {
+ IA_CSS_ERROR("invalid exposure ID: %d\n", exp_id);
+ return -EINVAL;
+ }
+
+ /*
+ * Send the event. Since we verified that the exp_id is valid,
+ * we can safely assign it to an 8-bit argument here.
+ */
+ ret = ia_css_bufq_enqueue_psys_event(
+ IA_CSS_PSYS_SW_EVENT_UNLOCK_RAW_BUFFER, exp_id, 0, 0);
+
+ IA_CSS_LEAVE_ERR(ret);
+ return ret;
+}
+
+static void
+sh_css_hmm_buffer_record_init(void)
+{
+ int i;
+
+ for (i = 0; i < MAX_HMM_BUFFER_NUM; i++)
+ sh_css_hmm_buffer_record_reset(&hmm_buffer_record[i]);
+}
+
+static void
+sh_css_hmm_buffer_record_uninit(void)
+{
+ int i;
+ struct sh_css_hmm_buffer_record *buffer_record = NULL;
+
+ buffer_record = &hmm_buffer_record[0];
+ for (i = 0; i < MAX_HMM_BUFFER_NUM; i++) {
+ if (buffer_record->in_use) {
+ if (buffer_record->h_vbuf)
+ ia_css_rmgr_rel_vbuf(hmm_buffer_pool, &buffer_record->h_vbuf);
+ sh_css_hmm_buffer_record_reset(buffer_record);
+ }
+ buffer_record++;
+ }
+}
+
+static void
+sh_css_hmm_buffer_record_reset(struct sh_css_hmm_buffer_record *buffer_record)
+{
+ assert(buffer_record);
+ buffer_record->in_use = false;
+ buffer_record->type = IA_CSS_BUFFER_TYPE_INVALID;
+ buffer_record->h_vbuf = NULL;
+ buffer_record->kernel_ptr = 0;
+}
+
+static struct sh_css_hmm_buffer_record
+*sh_css_hmm_buffer_record_acquire(struct ia_css_rmgr_vbuf_handle *h_vbuf,
+ enum ia_css_buffer_type type,
+ hrt_address kernel_ptr)
+{
+ int i;
+ struct sh_css_hmm_buffer_record *buffer_record = NULL;
+ struct sh_css_hmm_buffer_record *out_buffer_record = NULL;
+
+ assert(h_vbuf);
+ assert((type > IA_CSS_BUFFER_TYPE_INVALID) &&
+ (type < IA_CSS_NUM_DYNAMIC_BUFFER_TYPE));
+ assert(kernel_ptr != 0);
+
+ buffer_record = &hmm_buffer_record[0];
+ for (i = 0; i < MAX_HMM_BUFFER_NUM; i++) {
+ if (!buffer_record->in_use) {
+ buffer_record->in_use = true;
+ buffer_record->type = type;
+ buffer_record->h_vbuf = h_vbuf;
+ buffer_record->kernel_ptr = kernel_ptr;
+ out_buffer_record = buffer_record;
+ break;
+ }
+ buffer_record++;
+ }
+
+ return out_buffer_record;
+}
+
+static struct sh_css_hmm_buffer_record
+*sh_css_hmm_buffer_record_validate(ia_css_ptr ddr_buffer_addr,
+ enum ia_css_buffer_type type)
+{
+ int i;
+ struct sh_css_hmm_buffer_record *buffer_record = NULL;
+ bool found_record = false;
+
+ buffer_record = &hmm_buffer_record[0];
+ for (i = 0; i < MAX_HMM_BUFFER_NUM; i++) {
+ if ((buffer_record->in_use) &&
+ (buffer_record->type == type) &&
+ (buffer_record->h_vbuf) &&
+ (buffer_record->h_vbuf->vptr == ddr_buffer_addr)) {
+ found_record = true;
+ break;
+ }
+ buffer_record++;
+ }
+
+ if (found_record)
+ return buffer_record;
+ else
+ return NULL;
+}
diff --git a/drivers/staging/media/atomisp/pci/sh_css_defs.h b/drivers/staging/media/atomisp/pci/sh_css_defs.h
new file mode 100644
index 000000000000..7bfeeb8cf053
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/sh_css_defs.h
@@ -0,0 +1,353 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef _SH_CSS_DEFS_H_
+#define _SH_CSS_DEFS_H_
+
+#include <linux/math.h>
+
+#include "isp.h"
+
+/*#include "vamem.h"*/ /* Cannot include for VAMEM properties this file is visible on ISP -> pipeline generator */
+
+/* ID's for refcount */
+#define IA_CSS_REFCOUNT_PARAM_SET_POOL 0xCAFE0001
+#define IA_CSS_REFCOUNT_PARAM_BUFFER 0xCAFE0002
+
+/* Digital Image Stabilization */
+#define SH_CSS_DIS_DECI_FACTOR_LOG2 6
+
+/* UV offset: 1:uv=-128...127, 0:uv=0...255 */
+#define SH_CSS_UV_OFFSET_IS_0 0
+
+/* Bits of bayer is adjusted as 13 in ISP */
+#define SH_CSS_BAYER_BITS 13
+
+/* Max value of bayer data (unsigned 13bit in ISP) */
+#define SH_CSS_BAYER_MAXVAL ((1U << SH_CSS_BAYER_BITS) - 1)
+
+/* Bits of yuv in ISP */
+#define SH_CSS_ISP_YUV_BITS 8
+
+#define SH_CSS_DP_GAIN_SHIFT 5
+#define SH_CSS_BNR_GAIN_SHIFT 13
+#define SH_CSS_YNR_GAIN_SHIFT 13
+#define SH_CSS_AE_YCOEF_SHIFT 13
+#define SH_CSS_AF_FIR_SHIFT 13
+#define SH_CSS_YEE_DETAIL_GAIN_SHIFT 8 /* [u5.8] */
+#define SH_CSS_YEE_SCALE_SHIFT 8
+#define SH_CSS_TNR_COEF_SHIFT 13
+#define SH_CSS_MACC_COEF_SHIFT 11 /* [s2.11] for ISP1 */
+#define SH_CSS_MACC2_COEF_SHIFT 13 /* [s[exp].[13-exp]] for ISP2 */
+#define SH_CSS_DIS_COEF_SHIFT 13
+
+/* enumeration of the bayer downscale factors. When a binary supports multiple
+ * factors, the OR of these defines is used to build the mask of supported
+ * factors. The BDS factor is used in pre-processor expressions so we cannot
+ * use an enum here. */
+#define SH_CSS_BDS_FACTOR_1_00 (0)
+#define SH_CSS_BDS_FACTOR_1_25 (1)
+#define SH_CSS_BDS_FACTOR_1_50 (2)
+#define SH_CSS_BDS_FACTOR_2_00 (3)
+#define SH_CSS_BDS_FACTOR_2_25 (4)
+#define SH_CSS_BDS_FACTOR_2_50 (5)
+#define SH_CSS_BDS_FACTOR_3_00 (6)
+#define SH_CSS_BDS_FACTOR_4_00 (7)
+#define SH_CSS_BDS_FACTOR_4_50 (8)
+#define SH_CSS_BDS_FACTOR_5_00 (9)
+#define SH_CSS_BDS_FACTOR_6_00 (10)
+#define SH_CSS_BDS_FACTOR_8_00 (11)
+#define NUM_BDS_FACTORS (12)
+
+#define PACK_BDS_FACTOR(factor) (1 << (factor))
+
+/* Following macros should match with the type enum ia_css_pipe_version in
+ * ia_css_pipe_public.h. The reason to add these macros is that enum type
+ * will be evaluted to 0 in preprocessing time. */
+#define SH_CSS_ISP_PIPE_VERSION_1 1
+#define SH_CSS_ISP_PIPE_VERSION_2_2 2
+#define SH_CSS_ISP_PIPE_VERSION_2_6_1 3
+#define SH_CSS_ISP_PIPE_VERSION_2_7 4
+
+/*--------------- sRGB Gamma -----------------
+CCM : YCgCo[0,8191] -> RGB[0,4095]
+sRGB Gamma : RGB [0,4095] -> RGB[0,8191]
+CSC : RGB [0,8191] -> YUV[0,8191]
+
+CCM:
+Y[0,8191],CgCo[-4096,4095],coef[-8192,8191] -> RGB[0,4095]
+
+sRGB Gamma:
+RGB[0,4095] -(interpolation step16)-> RGB[0,255] -(LUT 12bit)-> RGB[0,4095] -> RGB[0,8191]
+
+CSC:
+RGB[0,8191],coef[-8192,8191] -> RGB[0,8191]
+--------------------------------------------*/
+/* Bits of input/output of sRGB Gamma */
+#define SH_CSS_RGB_GAMMA_INPUT_BITS 12 /* [0,4095] */
+#define SH_CSS_RGB_GAMMA_OUTPUT_BITS 13 /* [0,8191] */
+
+/* Bits of fractional part of interpolation in vamem, [0,4095]->[0,255] */
+#define SH_CSS_RGB_GAMMA_FRAC_BITS \
+ (SH_CSS_RGB_GAMMA_INPUT_BITS - SH_CSS_ISP_RGB_GAMMA_TABLE_SIZE_LOG2)
+#define SH_CSS_RGB_GAMMA_ONE BIT(SH_CSS_RGB_GAMMA_FRAC_BITS)
+
+/* Bits of input of CCM, = 13, Y[0,8191],CgCo[-4096,4095] */
+#define SH_CSS_YUV2RGB_CCM_INPUT_BITS SH_CSS_BAYER_BITS
+
+/* Bits of output of CCM, = 12, RGB[0,4095] */
+#define SH_CSS_YUV2RGB_CCM_OUTPUT_BITS SH_CSS_RGB_GAMMA_INPUT_BITS
+
+/* Maximum value of output of CCM */
+#define SH_CSS_YUV2RGB_CCM_MAX_OUTPUT \
+ ((1 << SH_CSS_YUV2RGB_CCM_OUTPUT_BITS) - 1)
+
+#define SH_CSS_NUM_INPUT_BUF_LINES 4
+
+/* Left cropping only applicable for sufficiently large nway */
+#define SH_CSS_MAX_LEFT_CROPPING 12
+#define SH_CSS_MAX_TOP_CROPPING 12
+
+#define SH_CSS_SP_MAX_WIDTH 1280
+
+/* This is the maximum grid we can handle in the ISP binaries.
+ * The host code makes sure no bigger grid is ever selected. */
+#define SH_CSS_MAX_BQ_GRID_WIDTH 80
+#define SH_CSS_MAX_BQ_GRID_HEIGHT 60
+
+/* The minimum dvs envelope is 12x12(for IPU2) to make sure the
+ * invalid rows/columns that result from filter initialization are skipped. */
+#define SH_CSS_MIN_DVS_ENVELOPE 12U
+
+/* The FPGA system (vec_nelems == 16) only supports up to 5MP */
+#define SH_CSS_MAX_SENSOR_WIDTH 4608
+#define SH_CSS_MAX_SENSOR_HEIGHT 3450
+
+/* Limited to reduce vmem pressure */
+#if ISP_VMEM_DEPTH >= 3072
+#define SH_CSS_MAX_CONTINUOUS_SENSOR_WIDTH SH_CSS_MAX_SENSOR_WIDTH
+#define SH_CSS_MAX_CONTINUOUS_SENSOR_HEIGHT SH_CSS_MAX_SENSOR_HEIGHT
+#else
+#define SH_CSS_MAX_CONTINUOUS_SENSOR_WIDTH 3264
+#define SH_CSS_MAX_CONTINUOUS_SENSOR_HEIGHT 2448
+#endif
+/* When using bayer decimation */
+/*
+#define SH_CSS_MAX_CONTINUOUS_SENSOR_WIDTH_DEC 4224
+#define SH_CSS_MAX_CONTINUOUS_SENSOR_HEIGHT_DEC 3168
+*/
+#define SH_CSS_MAX_CONTINUOUS_SENSOR_WIDTH_DEC SH_CSS_MAX_SENSOR_WIDTH
+#define SH_CSS_MAX_CONTINUOUS_SENSOR_HEIGHT_DEC SH_CSS_MAX_SENSOR_HEIGHT
+
+#define SH_CSS_MIN_SENSOR_WIDTH 2
+#define SH_CSS_MIN_SENSOR_HEIGHT 2
+
+/*
+#define SH_CSS_MAX_VF_WIDTH_DEC 1920
+#define SH_CSS_MAX_VF_HEIGHT_DEC 1080
+*/
+#define SH_CSS_MAX_VF_WIDTH_DEC SH_CSS_MAX_VF_WIDTH
+#define SH_CSS_MAX_VF_HEIGHT_DEC SH_CSS_MAX_VF_HEIGHT
+
+/* We use 16 bits per coordinate component, including integer
+ and fractional bits */
+#define SH_CSS_MORPH_TABLE_GRID ISP_VEC_NELEMS
+#define SH_CSS_MORPH_TABLE_ELEM_BYTES 2
+#define SH_CSS_MORPH_TABLE_ELEMS_PER_DDR_WORD \
+ (HIVE_ISP_DDR_WORD_BYTES / SH_CSS_MORPH_TABLE_ELEM_BYTES)
+
+#define SH_CSS_MAX_SCTBL_WIDTH_PER_COLOR (SH_CSS_MAX_BQ_GRID_WIDTH + 1)
+#define SH_CSS_MAX_SCTBL_HEIGHT_PER_COLOR (SH_CSS_MAX_BQ_GRID_HEIGHT + 1)
+
+#define SH_CSS_MAX_SCTBL_ALIGNED_WIDTH_PER_COLOR \
+ CEIL_MUL(SH_CSS_MAX_SCTBL_WIDTH_PER_COLOR, ISP_VEC_NELEMS)
+
+/* Each line of this table is aligned to the maximum line width. */
+#define SH_CSS_MAX_S3ATBL_WIDTH SH_CSS_MAX_BQ_GRID_WIDTH
+
+/* Video mode specific DVS define */
+/* The video binary supports a delay of 1 or 2 frames */
+#define MAX_DVS_FRAME_DELAY 2
+/* +1 because DVS reads the previous and writes the current frame concurrently */
+#define MAX_NUM_VIDEO_DELAY_FRAMES (MAX_DVS_FRAME_DELAY + 1)
+
+#define NUM_VIDEO_TNR_FRAMES 2
+
+/* Note that this is the define used to configure all data structures common for all modes */
+/* It should be equal or bigger to the max number of DVS frames for all possible modes */
+/* Rules: these implement logic shared between the host code and ISP firmware.
+ The ISP firmware needs these rules to be applied at pre-processor time,
+ that's why these are macros, not functions. */
+#define _ISP_BQS(num) ((num) / 2)
+#define _ISP_VECS(width) DIV_ROUND_UP(width, ISP_VEC_NELEMS)
+
+#define ISP_BQ_GRID_WIDTH(elements_per_line, deci_factor_log2) \
+ CEIL_SHIFT(elements_per_line / 2, deci_factor_log2)
+#define ISP_BQ_GRID_HEIGHT(lines_per_frame, deci_factor_log2) \
+ CEIL_SHIFT(lines_per_frame / 2, deci_factor_log2)
+#define ISP_C_VECTORS_PER_LINE(elements_per_line) \
+ _ISP_VECS(elements_per_line / 2)
+
+/* The morphing table is similar to the shading table in the sense that we
+ have 1 more value than we have cells in the grid. */
+#define _ISP_MORPH_TABLE_WIDTH(int_width) \
+ (DIV_ROUND_UP(int_width, SH_CSS_MORPH_TABLE_GRID) + 1)
+#define _ISP_MORPH_TABLE_HEIGHT(int_height) \
+ (DIV_ROUND_UP(int_height, SH_CSS_MORPH_TABLE_GRID) + 1)
+#define _ISP_MORPH_TABLE_ALIGNED_WIDTH(width) \
+ CEIL_MUL(_ISP_MORPH_TABLE_WIDTH(width), \
+ SH_CSS_MORPH_TABLE_ELEMS_PER_DDR_WORD)
+
+#define _ISP_SCTBL_WIDTH_PER_COLOR(input_width, deci_factor_log2) \
+ (ISP_BQ_GRID_WIDTH(input_width, deci_factor_log2) + 1)
+#define _ISP_SCTBL_HEIGHT(input_height, deci_factor_log2) \
+ (ISP_BQ_GRID_HEIGHT(input_height, deci_factor_log2) + 1)
+#define _ISP_SCTBL_ALIGNED_WIDTH_PER_COLOR(input_width, deci_factor_log2) \
+ CEIL_MUL(_ISP_SCTBL_WIDTH_PER_COLOR(input_width, deci_factor_log2), \
+ ISP_VEC_NELEMS)
+
+/* To position the shading center grid point on the center of output image,
+ * one more grid cell is needed as margin. */
+#define SH_CSS_SCTBL_CENTERING_MARGIN 1
+
+/* The shading table width and height are the number of grids, not cells. The last grid should be counted. */
+#define SH_CSS_SCTBL_LAST_GRID_COUNT 1
+
+/* Number of horizontal grids per color in the shading table. */
+#define _ISP2401_SCTBL_WIDTH_PER_COLOR(input_width, deci_factor_log2) \
+ (ISP_BQ_GRID_WIDTH(input_width, deci_factor_log2) + \
+ SH_CSS_SCTBL_CENTERING_MARGIN + SH_CSS_SCTBL_LAST_GRID_COUNT)
+
+/* Number of vertical grids per color in the shading table. */
+#define _ISP2401_SCTBL_HEIGHT(input_height, deci_factor_log2) \
+ (ISP_BQ_GRID_HEIGHT(input_height, deci_factor_log2) + \
+ SH_CSS_SCTBL_CENTERING_MARGIN + SH_CSS_SCTBL_LAST_GRID_COUNT)
+
+/* ISP2401: Legacy API: Number of horizontal grids per color in the shading table. */
+#define _ISP_SCTBL_LEGACY_WIDTH_PER_COLOR(input_width, deci_factor_log2) \
+ (ISP_BQ_GRID_WIDTH(input_width, deci_factor_log2) + SH_CSS_SCTBL_LAST_GRID_COUNT)
+
+/* ISP2401: Legacy API: Number of vertical grids per color in the shading table. */
+#define _ISP_SCTBL_LEGACY_HEIGHT(input_height, deci_factor_log2) \
+ (ISP_BQ_GRID_HEIGHT(input_height, deci_factor_log2) + SH_CSS_SCTBL_LAST_GRID_COUNT)
+
+/* *****************************************************************
+ * Statistics for 3A (Auto Focus, Auto White Balance, Auto Exposure)
+ * *****************************************************************/
+/* if left cropping is used, 3A statistics are also cropped by 2 vectors. */
+#define _ISP_S3ATBL_WIDTH(in_width, deci_factor_log2) \
+ (_ISP_BQS(in_width) >> deci_factor_log2)
+#define _ISP_S3ATBL_HEIGHT(in_height, deci_factor_log2) \
+ (_ISP_BQS(in_height) >> deci_factor_log2)
+#define _ISP_S3A_ELEMS_ISP_WIDTH(width, left_crop) \
+ (width - ((left_crop) ? 2 * ISP_VEC_NELEMS : 0))
+
+#define _ISP_S3ATBL_ISP_WIDTH(in_width, deci_factor_log2) \
+ CEIL_SHIFT(_ISP_BQS(in_width), deci_factor_log2)
+#define _ISP_S3ATBL_ISP_HEIGHT(in_height, deci_factor_log2) \
+ CEIL_SHIFT(_ISP_BQS(in_height), deci_factor_log2)
+#define ISP_S3ATBL_VECTORS \
+ _ISP_VECS(SH_CSS_MAX_S3ATBL_WIDTH * \
+ (sizeof(struct ia_css_3a_output) / sizeof(int32_t)))
+#define ISP_S3ATBL_HI_LO_STRIDE \
+ (ISP_S3ATBL_VECTORS * ISP_VEC_NELEMS)
+#define ISP_S3ATBL_HI_LO_STRIDE_BYTES \
+ (sizeof(unsigned short) * ISP_S3ATBL_HI_LO_STRIDE)
+
+/* Viewfinder support */
+#define __ISP_MAX_VF_OUTPUT_WIDTH(width, left_crop) \
+ (width - 2 * ISP_VEC_NELEMS + ((left_crop) ? 2 * ISP_VEC_NELEMS : 0))
+
+#define __ISP_VF_OUTPUT_WIDTH_VECS(out_width, vf_log_downscale) \
+ (_ISP_VECS((out_width) >> (vf_log_downscale)))
+
+#define _ISP_VF_OUTPUT_WIDTH(vf_out_vecs) ((vf_out_vecs) * ISP_VEC_NELEMS)
+#define _ISP_VF_OUTPUT_HEIGHT(out_height, vf_log_ds) \
+ ((out_height) >> (vf_log_ds))
+
+#define _ISP_LOG_VECTOR_STEP(mode) \
+ ((mode) == IA_CSS_BINARY_MODE_CAPTURE_PP ? 2 : 1)
+
+/* It is preferred to have not more than 2x scaling at one step
+ * in GDC (assumption is for capture_pp and yuv_scale stages) */
+#define MAX_PREFERRED_YUV_DS_PER_STEP 2
+
+/* Rules for computing the internal width. This is extremely complicated
+ * and definitely needs to be commented and explained. */
+#define _ISP_LEFT_CROP_EXTRA(left_crop) ((left_crop) > 0 ? 2 * ISP_VEC_NELEMS : 0)
+
+#define __ISP_MIN_INTERNAL_WIDTH(num_chunks, pipelining, mode) \
+ ((num_chunks) * (pipelining) * (1 << _ISP_LOG_VECTOR_STEP(mode)) * \
+ ISP_VEC_NELEMS)
+
+#define __ISP_PADDED_OUTPUT_WIDTH(out_width, dvs_env_width, left_crop) \
+ ((out_width) + MAX(dvs_env_width, _ISP_LEFT_CROP_EXTRA(left_crop)))
+
+#define __ISP_CHUNK_STRIDE_ISP(mode) \
+ ((1 << _ISP_LOG_VECTOR_STEP(mode)) * ISP_VEC_NELEMS)
+
+#define __ISP_CHUNK_STRIDE_DDR(c_subsampling, num_chunks) \
+ ((c_subsampling) * (num_chunks) * HIVE_ISP_DDR_WORD_BYTES)
+#define __ISP_INTERNAL_WIDTH(out_width, \
+ dvs_env_width, \
+ left_crop, \
+ mode, \
+ c_subsampling, \
+ num_chunks, \
+ pipelining) \
+ round_up(round_up(MAX(__ISP_PADDED_OUTPUT_WIDTH(out_width, \
+ dvs_env_width, \
+ left_crop), \
+ __ISP_MIN_INTERNAL_WIDTH(num_chunks, \
+ pipelining, \
+ mode) \
+ ), \
+ __ISP_CHUNK_STRIDE_ISP(mode) \
+ ), \
+ __ISP_CHUNK_STRIDE_DDR(c_subsampling, num_chunks) \
+ )
+
+#define __ISP_INTERNAL_HEIGHT(out_height, dvs_env_height, top_crop) \
+ ((out_height) + (dvs_env_height) + top_crop)
+
+/* @GC: Input can be up to sensor resolution when either bayer downscaling
+ * or raw binning is enabled.
+ * Also, during continuous mode, we need to align to 4*NWAY since input
+ * should support binning */
+#define _ISP_MAX_INPUT_WIDTH(max_internal_width, enable_ds, enable_fixed_bayer_ds, enable_raw_bin, \
+ enable_continuous) \
+ ((enable_ds) ? \
+ SH_CSS_MAX_SENSOR_WIDTH :\
+ (enable_fixed_bayer_ds) ? \
+ CEIL_MUL(SH_CSS_MAX_CONTINUOUS_SENSOR_WIDTH_DEC, 4 * ISP_VEC_NELEMS) : \
+ (enable_raw_bin) ? \
+ CEIL_MUL(SH_CSS_MAX_CONTINUOUS_SENSOR_WIDTH, 4 * ISP_VEC_NELEMS) : \
+ (enable_continuous) ? \
+ SH_CSS_MAX_CONTINUOUS_SENSOR_WIDTH \
+ : max_internal_width)
+
+#define _ISP_INPUT_WIDTH(internal_width, ds_input_width, enable_ds) \
+ ((enable_ds) ? (ds_input_width) : (internal_width))
+
+#define _ISP_MAX_INPUT_HEIGHT(max_internal_height, enable_ds, enable_fixed_bayer_ds, enable_raw_bin, \
+ enable_continuous) \
+ ((enable_ds) ? \
+ SH_CSS_MAX_SENSOR_HEIGHT :\
+ (enable_fixed_bayer_ds) ? \
+ SH_CSS_MAX_CONTINUOUS_SENSOR_HEIGHT_DEC : \
+ (enable_raw_bin || enable_continuous) ? \
+ SH_CSS_MAX_CONTINUOUS_SENSOR_HEIGHT \
+ : max_internal_height)
+
+#define _ISP_INPUT_HEIGHT(internal_height, ds_input_height, enable_ds) \
+ ((enable_ds) ? (ds_input_height) : (internal_height))
+
+#define SH_CSS_MAX_STAGES 8 /* primary_stage[1-6], capture_pp, vf_pp */
+
+/* For CSI2+ input system, it requires extra paddinga from vmem */
+#define _ISP_EXTRA_PADDING_VECS 0
+
+#endif /* _SH_CSS_DEFS_H_ */
diff --git a/drivers/staging/media/atomisp/pci/sh_css_firmware.c b/drivers/staging/media/atomisp/pci/sh_css_firmware.c
new file mode 100644
index 000000000000..bed599223717
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/sh_css_firmware.c
@@ -0,0 +1,382 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#include <linux/string.h> /* for memcpy() */
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+
+#include "hmm.h"
+
+#include <math_support.h>
+#include "platform_support.h"
+#include "sh_css_firmware.h"
+
+#include "sh_css_defs.h"
+#include "ia_css_debug.h"
+#include "sh_css_internal.h"
+#include "ia_css_isp_param.h"
+
+#include "assert_support.h"
+
+#include "isp.h" /* PMEM_WIDTH_LOG2 */
+
+#include "ia_css_isp_params.h"
+#include "ia_css_isp_configs.h"
+#include "ia_css_isp_states.h"
+
+#define _STR(x) #x
+#define STR(x) _STR(x)
+
+struct firmware_header {
+ struct sh_css_fw_bi_file_h file_header;
+ struct ia_css_fw_info binary_header;
+};
+
+struct fw_param {
+ const char *name;
+ const void *buffer;
+};
+
+static struct firmware_header *firmware_header;
+
+/*
+ * The string STR is a place holder
+ * which will be replaced with the actual RELEASE_VERSION
+ * during package generation. Please do not modify
+ */
+static const char *release_version_2401 = STR(irci_stable_candrpv_0415_20150521_0458);
+static const char *release_version_2400 = STR(irci_stable_candrpv_0415_20150423_1753);
+
+#define MAX_FW_REL_VER_NAME 300
+static char FW_rel_ver_name[MAX_FW_REL_VER_NAME] = "---";
+
+struct ia_css_fw_info sh_css_sp_fw;
+struct ia_css_blob_descr *sh_css_blob_info; /* Only ISP blob info (no SP) */
+unsigned int sh_css_num_binaries; /* This includes 1 SP binary */
+
+static struct fw_param *fw_minibuffer;
+
+char *sh_css_get_fw_version(void)
+{
+ return FW_rel_ver_name;
+}
+
+/*
+ * Split the loaded firmware into blobs
+ */
+
+/* Setup sp/sp1 binary */
+static int
+setup_binary(struct ia_css_fw_info *fw, const char *fw_data,
+ struct ia_css_fw_info *sh_css_fw, unsigned int binary_id)
+{
+ const char *blob_data;
+
+ if ((!fw) || (!fw_data))
+ return -EINVAL;
+
+ blob_data = fw_data + fw->blob.offset;
+
+ *sh_css_fw = *fw;
+
+ sh_css_fw->blob.code = vmalloc(fw->blob.size);
+ if (!sh_css_fw->blob.code)
+ return -ENOMEM;
+
+ memcpy((void *)sh_css_fw->blob.code, blob_data, fw->blob.size);
+ sh_css_fw->blob.data = (char *)sh_css_fw->blob.code + fw->blob.data_source;
+ fw_minibuffer[binary_id].buffer = sh_css_fw->blob.code;
+
+ return 0;
+}
+
+int
+sh_css_load_blob_info(const char *fw, const struct ia_css_fw_info *bi,
+ struct ia_css_blob_descr *bd,
+ unsigned int index)
+{
+ const char *name;
+ const unsigned char *blob;
+
+ if ((!fw) || (!bd))
+ return -EINVAL;
+
+ /* Special case: only one binary in fw */
+ if (!bi)
+ bi = (const struct ia_css_fw_info *)fw;
+
+ name = fw + bi->blob.prog_name_offset;
+ blob = (const unsigned char *)fw + bi->blob.offset;
+
+ /* sanity check */
+ if (bi->blob.size !=
+ bi->blob.text_size + bi->blob.icache_size +
+ bi->blob.data_size + bi->blob.padding_size) {
+ /* sanity check, note the padding bytes added for section to DDR alignment */
+ return -EINVAL;
+ }
+
+ if ((bi->blob.offset % (1UL << (ISP_PMEM_WIDTH_LOG2 - 3))) != 0)
+ return -EINVAL;
+
+ bd->blob = blob;
+ bd->header = *bi;
+
+ if (bi->type == ia_css_isp_firmware || bi->type == ia_css_sp_firmware) {
+ char *namebuffer;
+
+ namebuffer = kstrdup(name, GFP_KERNEL);
+ if (!namebuffer)
+ return -ENOMEM;
+ bd->name = fw_minibuffer[index].name = namebuffer;
+ } else {
+ bd->name = name;
+ }
+
+ if (bi->type == ia_css_isp_firmware) {
+ size_t paramstruct_size = sizeof(struct ia_css_memory_offsets);
+ size_t configstruct_size = sizeof(struct ia_css_config_memory_offsets);
+ size_t statestruct_size = sizeof(struct ia_css_state_memory_offsets);
+
+ char *parambuf = kmalloc(paramstruct_size + configstruct_size +
+ statestruct_size,
+ GFP_KERNEL);
+ if (!parambuf)
+ return -ENOMEM;
+
+ bd->mem_offsets.array[IA_CSS_PARAM_CLASS_PARAM].ptr = NULL;
+ bd->mem_offsets.array[IA_CSS_PARAM_CLASS_CONFIG].ptr = NULL;
+ bd->mem_offsets.array[IA_CSS_PARAM_CLASS_STATE].ptr = NULL;
+
+ fw_minibuffer[index].buffer = parambuf;
+
+ /* copy ia_css_memory_offsets */
+ memcpy(parambuf, (void *)(fw +
+ bi->blob.memory_offsets.offsets[IA_CSS_PARAM_CLASS_PARAM]),
+ paramstruct_size);
+ bd->mem_offsets.array[IA_CSS_PARAM_CLASS_PARAM].ptr = parambuf;
+
+ /* copy ia_css_config_memory_offsets */
+ memcpy(parambuf + paramstruct_size,
+ (void *)(fw + bi->blob.memory_offsets.offsets[IA_CSS_PARAM_CLASS_CONFIG]),
+ configstruct_size);
+ bd->mem_offsets.array[IA_CSS_PARAM_CLASS_CONFIG].ptr = parambuf +
+ paramstruct_size;
+
+ /* copy ia_css_state_memory_offsets */
+ memcpy(parambuf + paramstruct_size + configstruct_size,
+ (void *)(fw + bi->blob.memory_offsets.offsets[IA_CSS_PARAM_CLASS_STATE]),
+ statestruct_size);
+ bd->mem_offsets.array[IA_CSS_PARAM_CLASS_STATE].ptr = parambuf +
+ paramstruct_size + configstruct_size;
+ }
+ return 0;
+}
+
+bool
+sh_css_check_firmware_version(struct device *dev, const char *fw_data)
+{
+ const char *release_version;
+ struct sh_css_fw_bi_file_h *file_header;
+
+ if (IS_ISP2401)
+ release_version = release_version_2401;
+ else
+ release_version = release_version_2400;
+
+ firmware_header = (struct firmware_header *)fw_data;
+ file_header = &firmware_header->file_header;
+
+ if (strcmp(file_header->version, release_version) != 0) {
+ dev_err(dev, "Firmware version may not be compatible with this driver\n");
+ dev_err(dev, "Expecting version '%s', but firmware is '%s'.\n",
+ release_version, file_header->version);
+ }
+
+ /* For now, let's just accept a wrong version, even if wrong */
+ return false;
+}
+
+static const char * const fw_type_name[] = {
+ [ia_css_sp_firmware] = "SP",
+ [ia_css_isp_firmware] = "ISP",
+ [ia_css_bootloader_firmware] = "BootLoader",
+ [ia_css_acc_firmware] = "accel",
+};
+
+static const char * const fw_acc_type_name[] = {
+ [IA_CSS_ACC_NONE] = "Normal",
+ [IA_CSS_ACC_OUTPUT] = "Accel for output",
+ [IA_CSS_ACC_VIEWFINDER] = "Accel for viewfinder",
+ [IA_CSS_ACC_STANDALONE] = "Stand-alone accel",
+};
+
+int
+sh_css_load_firmware(struct device *dev, const char *fw_data,
+ unsigned int fw_size)
+{
+ unsigned int i;
+ const char *release_version;
+ struct ia_css_fw_info *binaries;
+ struct sh_css_fw_bi_file_h *file_header;
+ int ret;
+
+ /* some sanity checks */
+ if (!fw_data || fw_size < sizeof(struct sh_css_fw_bi_file_h))
+ return -EINVAL;
+
+ firmware_header = (struct firmware_header *)fw_data;
+ file_header = &firmware_header->file_header;
+
+ if (file_header->h_size != sizeof(struct sh_css_fw_bi_file_h))
+ return -EINVAL;
+
+ binaries = &firmware_header->binary_header;
+ strscpy(FW_rel_ver_name, file_header->version,
+ min(sizeof(FW_rel_ver_name), sizeof(file_header->version)));
+ if (IS_ISP2401)
+ release_version = release_version_2401;
+ else
+ release_version = release_version_2400;
+ ret = sh_css_check_firmware_version(dev, fw_data);
+ if (ret) {
+ IA_CSS_ERROR("CSS code version (%s) and firmware version (%s) mismatch!",
+ file_header->version, release_version);
+ return -EINVAL;
+ } else {
+ IA_CSS_LOG("successfully load firmware version %s", release_version);
+ }
+
+ sh_css_num_binaries = file_header->binary_nr;
+ /* Only allocate memory for ISP blob info */
+ if (sh_css_num_binaries > NUM_OF_SPS) {
+ sh_css_blob_info = kmalloc(
+ (sh_css_num_binaries - NUM_OF_SPS) *
+ sizeof(*sh_css_blob_info), GFP_KERNEL);
+ if (!sh_css_blob_info)
+ return -ENOMEM;
+ } else {
+ sh_css_blob_info = NULL;
+ }
+
+ fw_minibuffer = kcalloc(sh_css_num_binaries, sizeof(struct fw_param),
+ GFP_KERNEL);
+ if (!fw_minibuffer)
+ return -ENOMEM;
+
+ for (i = 0; i < sh_css_num_binaries; i++) {
+ struct ia_css_fw_info *bi = &binaries[i];
+ /*
+ * note: the var below is made static as it is quite large;
+ * if it is not static it ends up on the stack which could
+ * cause issues for drivers
+ */
+ static struct ia_css_blob_descr bd;
+ int err;
+
+ err = sh_css_load_blob_info(fw_data, bi, &bd, i);
+
+ if (err)
+ return -EINVAL;
+
+ if (bi->blob.offset + bi->blob.size > fw_size)
+ return -EINVAL;
+
+ switch (bd.header.type) {
+ case ia_css_isp_firmware:
+ if (bd.header.info.isp.type > IA_CSS_ACC_STANDALONE) {
+ dev_err(dev, "binary #%2d: invalid SP type\n",
+ i);
+ return -EINVAL;
+ }
+
+ dev_dbg(dev,
+ "binary #%-2d type %s (%s), binary id is %2d: %s\n",
+ i,
+ fw_type_name[bd.header.type],
+ fw_acc_type_name[bd.header.info.isp.type],
+ bd.header.info.isp.sp.id,
+ bd.name);
+ break;
+ case ia_css_sp_firmware:
+ case ia_css_bootloader_firmware:
+ case ia_css_acc_firmware:
+ dev_dbg(dev,
+ "binary #%-2d type %s: %s\n",
+ i, fw_type_name[bd.header.type],
+ bd.name);
+ break;
+ default:
+ if (bd.header.info.isp.type > IA_CSS_ACC_STANDALONE) {
+ dev_err(dev,
+ "binary #%2d: invalid firmware type\n",
+ i);
+ return -EINVAL;
+ }
+ break;
+ }
+
+ if (bi->type == ia_css_sp_firmware) {
+ if (i != SP_FIRMWARE)
+ return -EINVAL;
+ err = setup_binary(bi, fw_data, &sh_css_sp_fw, i);
+ if (err)
+ return err;
+
+ } else {
+ /*
+ * All subsequent binaries
+ * (including bootloaders) (i>NUM_OF_SPS)
+ * are ISP firmware
+ */
+ if (i < NUM_OF_SPS)
+ return -EINVAL;
+
+ if (bi->type != ia_css_isp_firmware)
+ return -EINVAL;
+ if (!sh_css_blob_info) /* cannot happen but KW does not see this */
+ return -EINVAL;
+ sh_css_blob_info[i - NUM_OF_SPS] = bd;
+ }
+ }
+
+ return 0;
+}
+
+void sh_css_unload_firmware(void)
+{
+ /* release firmware minibuffer */
+ if (fw_minibuffer) {
+ unsigned int i = 0;
+
+ for (i = 0; i < sh_css_num_binaries; i++) {
+ kfree(fw_minibuffer[i].name);
+ kvfree(fw_minibuffer[i].buffer);
+ }
+ kfree(fw_minibuffer);
+ fw_minibuffer = NULL;
+ }
+
+ memset(&sh_css_sp_fw, 0, sizeof(sh_css_sp_fw));
+ kfree(sh_css_blob_info);
+ sh_css_blob_info = NULL;
+ sh_css_num_binaries = 0;
+}
+
+ia_css_ptr
+sh_css_load_blob(const unsigned char *blob, unsigned int size)
+{
+ ia_css_ptr target_addr = hmm_alloc(size);
+ /*
+ * this will allocate memory aligned to a DDR word boundary which
+ * is required for the CSS DMA to read the instructions.
+ */
+
+ assert(blob);
+ if (target_addr)
+ hmm_store(target_addr, blob, size);
+ return target_addr;
+}
diff --git a/drivers/staging/media/atomisp/pci/sh_css_firmware.h b/drivers/staging/media/atomisp/pci/sh_css_firmware.h
new file mode 100644
index 000000000000..1c6bc29f68d5
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/sh_css_firmware.h
@@ -0,0 +1,45 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef _SH_CSS_FIRMWARE_H_
+#define _SH_CSS_FIRMWARE_H_
+
+#include <system_local.h>
+
+#include <ia_css_err.h>
+#include <ia_css_acc_types.h>
+
+/* This is for the firmware loaded from user space */
+struct sh_css_fw_bi_file_h {
+ char version[64]; /* branch tag + week day + time */
+ int binary_nr; /* Number of binaries */
+ unsigned int h_size; /* sizeof(struct sh_css_fw_bi_file_h) */
+};
+
+extern struct ia_css_fw_info sh_css_sp_fw;
+extern struct ia_css_blob_descr *sh_css_blob_info;
+extern unsigned int sh_css_num_binaries;
+
+char
+*sh_css_get_fw_version(void);
+
+struct device;
+bool
+sh_css_check_firmware_version(struct device *dev, const char *fw_data);
+
+int
+sh_css_load_firmware(struct device *dev, const char *fw_data,
+ unsigned int fw_size);
+
+void sh_css_unload_firmware(void);
+
+ia_css_ptr sh_css_load_blob(const unsigned char *blob, unsigned int size);
+
+int
+sh_css_load_blob_info(const char *fw, const struct ia_css_fw_info *bi,
+ struct ia_css_blob_descr *bd, unsigned int i);
+
+#endif /* _SH_CSS_FIRMWARE_H_ */
diff --git a/drivers/staging/media/atomisp/pci/sh_css_frac.h b/drivers/staging/media/atomisp/pci/sh_css_frac.h
new file mode 100644
index 000000000000..c2cf244ac06e
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/sh_css_frac.h
@@ -0,0 +1,48 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __SH_CSS_FRAC_H
+#define __SH_CSS_FRAC_H
+
+#include <linux/minmax.h>
+
+#include "mamoiada_params.h"
+
+#define sISP_REG_BIT ISP_VEC_ELEMBITS
+#define uISP_REG_BIT ((unsigned int)(sISP_REG_BIT - 1))
+#define sSHIFT (16 - sISP_REG_BIT)
+#define uSHIFT ((unsigned int)(16 - uISP_REG_BIT))
+#define sFRACTION_BITS_FITTING(a) (a - sSHIFT)
+#define uFRACTION_BITS_FITTING(a) ((unsigned int)(a - uSHIFT))
+#define sISP_VAL_MIN (-(1 << uISP_REG_BIT))
+#define sISP_VAL_MAX ((1 << uISP_REG_BIT) - 1)
+#define uISP_VAL_MIN (0U)
+#define uISP_VAL_MAX ((unsigned int)((1 << uISP_REG_BIT) - 1))
+
+/* a:fraction bits for 16bit precision, b:fraction bits for ISP precision */
+static inline int sDIGIT_FITTING(int v, int a, int b)
+{
+ int fit_shift = sFRACTION_BITS_FITTING(a) - b;
+
+ v >>= sSHIFT;
+ if (fit_shift > 0)
+ v >>= fit_shift;
+
+ return clamp_t(int, v, sISP_VAL_MIN, sISP_VAL_MAX);
+}
+
+static inline unsigned int uDIGIT_FITTING(unsigned int v, int a, int b)
+{
+ int fit_shift = uFRACTION_BITS_FITTING(a) - b;
+
+ v >>= uSHIFT;
+ if (fit_shift > 0)
+ v >>= fit_shift;
+
+ return clamp_t(unsigned int, v, uISP_VAL_MIN, uISP_VAL_MAX);
+}
+
+#endif /* __SH_CSS_FRAC_H */
diff --git a/drivers/staging/media/atomisp/pci/sh_css_host_data.c b/drivers/staging/media/atomisp/pci/sh_css_host_data.c
new file mode 100644
index 000000000000..39efd8933034
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/sh_css_host_data.c
@@ -0,0 +1,34 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#include <linux/slab.h>
+#include <ia_css_host_data.h>
+#include <sh_css_internal.h>
+
+struct ia_css_host_data *ia_css_host_data_allocate(size_t size)
+{
+ struct ia_css_host_data *me;
+
+ me = kmalloc(sizeof(struct ia_css_host_data), GFP_KERNEL);
+ if (!me)
+ return NULL;
+ me->size = (uint32_t)size;
+ me->address = kvmalloc(size, GFP_KERNEL);
+ if (!me->address) {
+ kfree(me);
+ return NULL;
+ }
+ return me;
+}
+
+void ia_css_host_data_free(struct ia_css_host_data *me)
+{
+ if (me) {
+ kvfree(me->address);
+ me->address = NULL;
+ kfree(me);
+ }
+}
diff --git a/drivers/staging/media/atomisp/pci/sh_css_hrt.c b/drivers/staging/media/atomisp/pci/sh_css_hrt.c
new file mode 100644
index 000000000000..d4633572f8f3
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/sh_css_hrt.c
@@ -0,0 +1,73 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#include "platform_support.h"
+
+#include "sh_css_hrt.h"
+#include "ia_css_debug.h"
+
+#include "device_access.h"
+
+#define __INLINE_EVENT__
+#include "event_fifo.h"
+#define __INLINE_SP__
+#include "sp.h"
+#define __INLINE_ISP__
+#include "isp.h"
+#define __INLINE_IRQ__
+#include "irq.h"
+#define __INLINE_FIFO_MONITOR__
+#include "fifo_monitor.h"
+
+/* System independent */
+#include "sh_css_internal.h"
+
+bool sh_css_hrt_system_is_idle(void)
+{
+ bool not_idle = false, idle;
+ fifo_channel_t ch;
+
+ idle = sp_ctrl_getbit(SP0_ID, SP_SC_REG, SP_IDLE_BIT);
+ not_idle |= !idle;
+ if (!idle)
+ IA_CSS_WARNING("SP not idle");
+
+ idle = isp_ctrl_getbit(ISP0_ID, ISP_SC_REG, ISP_IDLE_BIT);
+ not_idle |= !idle;
+ if (!idle)
+ IA_CSS_WARNING("ISP not idle");
+
+ for (ch = 0; ch < N_FIFO_CHANNEL; ch++) {
+ fifo_channel_state_t state;
+
+ fifo_channel_get_state(FIFO_MONITOR0_ID, ch, &state);
+ if (state.fifo_valid) {
+ IA_CSS_WARNING("FIFO channel %d is not empty", ch);
+ not_idle = true;
+ }
+ }
+
+ return !not_idle;
+}
+
+int sh_css_hrt_sp_wait(void)
+{
+ irq_sw_channel_id_t irq_id = IRQ_SW_CHANNEL0_ID;
+ /*
+ * Wait till SP is idle or till there is a SW2 interrupt
+ * The SW2 interrupt will be used when frameloop runs on SP
+ * and signals an event with similar meaning as SP idle
+ * (e.g. frame_done)
+ */
+ while (!sp_ctrl_getbit(SP0_ID, SP_SC_REG, SP_IDLE_BIT) &&
+ ((irq_reg_load(IRQ0_ID,
+ _HRT_IRQ_CONTROLLER_STATUS_REG_IDX) &
+ (1U << (irq_id + IRQ_SW_CHANNEL_OFFSET))) == 0)) {
+ udelay(1);
+ }
+
+ return 0;
+}
diff --git a/drivers/staging/media/atomisp/pci/sh_css_hrt.h b/drivers/staging/media/atomisp/pci/sh_css_hrt.h
new file mode 100644
index 000000000000..c2d88b4607c4
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/sh_css_hrt.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef _SH_CSS_HRT_H_
+#define _SH_CSS_HRT_H_
+
+#include <sp.h>
+#include <isp.h>
+
+#include <ia_css_err.h>
+
+/* SP access */
+void sh_css_hrt_sp_start_si(void);
+
+void sh_css_hrt_sp_start_copy_frame(void);
+
+void sh_css_hrt_sp_start_isp(void);
+
+int sh_css_hrt_sp_wait(void);
+
+bool sh_css_hrt_system_is_idle(void);
+
+#endif /* _SH_CSS_HRT_H_ */
diff --git a/drivers/staging/media/atomisp/pci/sh_css_internal.h b/drivers/staging/media/atomisp/pci/sh_css_internal.h
new file mode 100644
index 000000000000..9155a83fcc03
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/sh_css_internal.h
@@ -0,0 +1,956 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef _SH_CSS_INTERNAL_H_
+#define _SH_CSS_INTERNAL_H_
+
+#include <linux/build_bug.h>
+#include <linux/math.h>
+#include <linux/stdarg.h>
+
+#include <system_global.h>
+#include <math_support.h>
+#include <type_support.h>
+#include <platform_support.h>
+
+#include "input_formatter.h"
+#include "input_system.h"
+
+#include "ia_css_types.h"
+#include "ia_css_acc_types.h"
+#include "ia_css_buffer.h"
+
+#include "ia_css_binary.h"
+#include "sh_css_firmware.h" /* not needed/desired on SP/ISP */
+#include "sh_css_legacy.h"
+#include "sh_css_defs.h"
+#include "sh_css_uds.h"
+#include "dma.h" /* N_DMA_CHANNEL_ID */
+#include "ia_css_circbuf_comm.h" /* Circular buffer */
+#include "ia_css_frame_comm.h"
+#include "ia_css_3a.h"
+#include "ia_css_dvs.h"
+#include "ia_css_metadata.h"
+#include "runtime/bufq/interface/ia_css_bufq.h"
+#include "ia_css_timer.h"
+
+/* TODO: Move to a more suitable place when sp pipeline design is done. */
+#define IA_CSS_NUM_CB_SEM_READ_RESOURCE 2
+#define IA_CSS_NUM_CB_SEM_WRITE_RESOURCE 1
+#define IA_CSS_NUM_CBS 2
+#define IA_CSS_CB_MAX_ELEMS 2
+
+/* Use case specific. index limited to IA_CSS_NUM_CB_SEM_READ_RESOURCE or
+ * IA_CSS_NUM_CB_SEM_WRITE_RESOURCE for read and write respectively.
+ * TODO: Enforce the limitation above.
+*/
+#define IA_CSS_COPYSINK_SEM_INDEX 0
+#define IA_CSS_TAGGER_SEM_INDEX 1
+
+/* Force generation of output event. Used by acceleration pipe. */
+#define IA_CSS_POST_OUT_EVENT_FORCE 2
+
+#define SH_CSS_MAX_BINARY_NAME 64
+
+#define SP_DEBUG_NONE (0)
+#define SP_DEBUG_DUMP (1)
+#define SP_DEBUG_COPY (2)
+#define SP_DEBUG_TRACE (3)
+#define SP_DEBUG_MINIMAL (4)
+
+#define SP_DEBUG SP_DEBUG_NONE
+#define SP_DEBUG_MINIMAL_OVERWRITE 1
+
+#define SH_CSS_TNR_BIT_DEPTH 8
+#define SH_CSS_REF_BIT_DEPTH 8
+
+/* keep next up to date with the definition for MAX_CB_ELEMS_FOR_TAGGER in tagger.sp.c */
+#define NUM_CONTINUOUS_FRAMES 15
+#define NUM_MIPI_FRAMES_PER_STREAM 2
+
+#define NUM_ONLINE_INIT_CONTINUOUS_FRAMES 2
+
+#define NR_OF_PIPELINES IA_CSS_PIPE_ID_NUM /* Must match with IA_CSS_PIPE_ID_NUM */
+
+#define SH_CSS_MAX_IF_CONFIGS 3 /* Must match with IA_CSS_NR_OF_CONFIGS (not defined yet).*/
+#define SH_CSS_IF_CONFIG_NOT_NEEDED 0xFF
+
+#define SH_CSS_MAX_SP_THREADS 5
+
+/**
+ * The C99 standard does not specify the exact object representation of structs;
+ * the representation is compiler dependent.
+ *
+ * The structs that are communicated between host and SP/ISP should have the
+ * exact same object representation. The compiler that is used to compile the
+ * firmware is hivecc.
+ *
+ * To check if a different compiler, used to compile a host application, uses
+ * another object representation, macros are defined specifying the size of
+ * the structs as expected by the firmware.
+ *
+ * A host application shall verify that a sizeof( ) of the struct is equal to
+ * the SIZE_OF_XXX macro of the corresponding struct. If they are not
+ * equal, functionality will break.
+ */
+#define SIZE_OF_HRT_VADDRESS sizeof(hive_uint32)
+
+/* Number of SP's */
+#define NUM_OF_SPS 1
+
+#define NUM_OF_BLS 0
+
+/* Enum for order of Binaries */
+enum sh_css_order_binaries {
+ SP_FIRMWARE = 0,
+ ISP_FIRMWARE
+};
+
+/*
+* JB: keep next enum in sync with thread id's
+* and pipe id's
+*/
+enum sh_css_pipe_config_override {
+ SH_CSS_PIPE_CONFIG_OVRD_NONE = 0,
+ SH_CSS_PIPE_CONFIG_OVRD_NO_OVRD = 0xffff
+};
+
+enum host2sp_commands {
+ host2sp_cmd_error = 0,
+ /*
+ * The host2sp_cmd_ready command is the only command written by the SP
+ * It acknowledges that is previous command has been received.
+ * (this does not mean that the command has been executed)
+ * It also indicates that a new command can be send (it is a queue
+ * with depth 1).
+ */
+ host2sp_cmd_ready = 1,
+ /* Command written by the Host */
+ host2sp_cmd_dummy, /* No action, can be used as watchdog */
+ host2sp_cmd_start_flash, /* Request SP to start the flash */
+ host2sp_cmd_terminate, /* SP should terminate itself */
+ N_host2sp_cmd
+};
+
+/* Enumeration used to indicate the events that are produced by
+ * the SP and consumed by the Host.
+ *
+ * !!!IMPORTANT!!! KEEP THE FOLLOWING IN SYNC:
+ * 1) "enum ia_css_event_type" (ia_css_event_public.h)
+ * 2) "enum sh_css_sp_event_type" (sh_css_internal.h)
+ * 3) "enum ia_css_event_type event_id_2_event_mask" (event_handler.sp.c)
+ * 4) "enum ia_css_event_type convert_event_sp_to_host_domain" (sh_css.c)
+ */
+enum sh_css_sp_event_type {
+ SH_CSS_SP_EVENT_OUTPUT_FRAME_DONE,
+ SH_CSS_SP_EVENT_SECOND_OUTPUT_FRAME_DONE,
+ SH_CSS_SP_EVENT_VF_OUTPUT_FRAME_DONE,
+ SH_CSS_SP_EVENT_SECOND_VF_OUTPUT_FRAME_DONE,
+ SH_CSS_SP_EVENT_3A_STATISTICS_DONE,
+ SH_CSS_SP_EVENT_DIS_STATISTICS_DONE,
+ SH_CSS_SP_EVENT_PIPELINE_DONE,
+ SH_CSS_SP_EVENT_FRAME_TAGGED,
+ SH_CSS_SP_EVENT_INPUT_FRAME_DONE,
+ SH_CSS_SP_EVENT_METADATA_DONE,
+ SH_CSS_SP_EVENT_LACE_STATISTICS_DONE,
+ SH_CSS_SP_EVENT_ACC_STAGE_COMPLETE,
+ SH_CSS_SP_EVENT_TIMER,
+ SH_CSS_SP_EVENT_PORT_EOF,
+ SH_CSS_SP_EVENT_FW_WARNING,
+ SH_CSS_SP_EVENT_FW_ASSERT,
+ SH_CSS_SP_EVENT_NR_OF_TYPES /* must be last */
+};
+
+/* xmem address map allocation per pipeline, css pointers */
+/* Note that the struct below should only consist of ia_css_ptr-es
+ Otherwise this will cause a fail in the function ref_sh_css_ddr_address_map
+ */
+struct sh_css_ddr_address_map {
+ ia_css_ptr isp_param;
+ ia_css_ptr isp_mem_param[SH_CSS_MAX_STAGES][IA_CSS_NUM_MEMORIES];
+ ia_css_ptr macc_tbl;
+ ia_css_ptr fpn_tbl;
+ ia_css_ptr sc_tbl;
+ ia_css_ptr tetra_r_x;
+ ia_css_ptr tetra_r_y;
+ ia_css_ptr tetra_gr_x;
+ ia_css_ptr tetra_gr_y;
+ ia_css_ptr tetra_gb_x;
+ ia_css_ptr tetra_gb_y;
+ ia_css_ptr tetra_b_x;
+ ia_css_ptr tetra_b_y;
+ ia_css_ptr tetra_ratb_x;
+ ia_css_ptr tetra_ratb_y;
+ ia_css_ptr tetra_batr_x;
+ ia_css_ptr tetra_batr_y;
+ ia_css_ptr dvs_6axis_params_y;
+};
+
+#define SIZE_OF_SH_CSS_DDR_ADDRESS_MAP_STRUCT \
+ (SIZE_OF_HRT_VADDRESS + \
+ (SH_CSS_MAX_STAGES * IA_CSS_NUM_MEMORIES * SIZE_OF_HRT_VADDRESS) + \
+ (16 * SIZE_OF_HRT_VADDRESS))
+
+static_assert(sizeof(struct sh_css_ddr_address_map) == SIZE_OF_SH_CSS_DDR_ADDRESS_MAP_STRUCT);
+
+/* xmem address map allocation per pipeline */
+struct sh_css_ddr_address_map_size {
+ size_t isp_param;
+ size_t isp_mem_param[SH_CSS_MAX_STAGES][IA_CSS_NUM_MEMORIES];
+ size_t macc_tbl;
+ size_t fpn_tbl;
+ size_t sc_tbl;
+ size_t tetra_r_x;
+ size_t tetra_r_y;
+ size_t tetra_gr_x;
+ size_t tetra_gr_y;
+ size_t tetra_gb_x;
+ size_t tetra_gb_y;
+ size_t tetra_b_x;
+ size_t tetra_b_y;
+ size_t tetra_ratb_x;
+ size_t tetra_ratb_y;
+ size_t tetra_batr_x;
+ size_t tetra_batr_y;
+ size_t dvs_6axis_params_y;
+};
+
+struct sh_css_ddr_address_map_compound {
+ struct sh_css_ddr_address_map map;
+ struct sh_css_ddr_address_map_size size;
+};
+
+struct ia_css_isp_parameter_set_info {
+ struct sh_css_ddr_address_map
+ mem_map;/** pointers to Parameters in ISP format IMPT:
+ This should be first member of this struct */
+ u32
+ isp_parameters_id;/** Unique ID to track which config was actually applied to a particular frame */
+ ia_css_ptr
+ output_frame_ptr;/** Output frame to which this config has to be applied (optional) */
+};
+
+/* this struct contains all arguments that can be passed to
+ a binary. It depends on the binary which ones are used. */
+struct sh_css_binary_args {
+ struct ia_css_frame *in_frame; /* input frame */
+ const struct ia_css_frame
+ *delay_frames[MAX_NUM_VIDEO_DELAY_FRAMES]; /* reference input frame */
+ const struct ia_css_frame *tnr_frames[NUM_VIDEO_TNR_FRAMES]; /* tnr frames */
+ struct ia_css_frame
+ *out_frame[IA_CSS_BINARY_MAX_OUTPUT_PORTS]; /* output frame */
+ struct ia_css_frame *out_vf_frame; /* viewfinder output frame */
+ bool copy_vf;
+ bool copy_output;
+ unsigned int vf_downscale_log2;
+};
+
+#if SP_DEBUG == SP_DEBUG_DUMP
+
+#define SH_CSS_NUM_SP_DEBUG 48
+
+struct sh_css_sp_debug_state {
+ unsigned int error;
+ unsigned int debug[SH_CSS_NUM_SP_DEBUG];
+};
+
+#elif SP_DEBUG == SP_DEBUG_COPY
+
+#define SH_CSS_SP_DBG_TRACE_DEPTH (40)
+
+struct sh_css_sp_debug_trace {
+ u16 frame;
+ u16 line;
+ u16 pixel_distance;
+ u16 mipi_used_dword;
+ u16 sp_index;
+};
+
+struct sh_css_sp_debug_state {
+ u16 if_start_line;
+ u16 if_start_column;
+ u16 if_cropped_height;
+ u16 if_cropped_width;
+ unsigned int index;
+ struct sh_css_sp_debug_trace
+ trace[SH_CSS_SP_DBG_TRACE_DEPTH];
+};
+
+#elif SP_DEBUG == SP_DEBUG_TRACE
+
+/* Example of just one global trace */
+#define SH_CSS_SP_DBG_NR_OF_TRACES (1)
+#define SH_CSS_SP_DBG_TRACE_DEPTH (40)
+
+#define SH_CSS_SP_DBG_TRACE_FILE_ID_BIT_POS (13)
+
+struct sh_css_sp_debug_trace {
+ u16 time_stamp;
+ u16 location; /* bit 15..13 = file_id, 12..0 = line nr. */
+ u32 data;
+};
+
+struct sh_css_sp_debug_state {
+ struct sh_css_sp_debug_trace
+ trace[SH_CSS_SP_DBG_NR_OF_TRACES][SH_CSS_SP_DBG_TRACE_DEPTH];
+ u16 index_last[SH_CSS_SP_DBG_NR_OF_TRACES];
+ u8 index[SH_CSS_SP_DBG_NR_OF_TRACES];
+};
+
+#elif SP_DEBUG == SP_DEBUG_MINIMAL
+
+#define SH_CSS_NUM_SP_DEBUG 128
+
+struct sh_css_sp_debug_state {
+ unsigned int error;
+ unsigned int debug[SH_CSS_NUM_SP_DEBUG];
+};
+
+#endif
+
+struct sh_css_sp_debug_command {
+ /*
+ * The DMA software-mask,
+ * Bit 31...24: unused.
+ * Bit 23...16: unused.
+ * Bit 15...08: reading-request enabling bits for DMA channel 7..0
+ * Bit 07...00: writing-request enabling bits for DMA channel 7..0
+ *
+ * For example, "0...0 0...0 11111011 11111101" indicates that the
+ * writing request through DMA Channel 1 and the reading request
+ * through DMA channel 2 are both disabled. The others are enabled.
+ */
+ u32 dma_sw_reg;
+};
+
+/* SP input formatter configuration.*/
+struct sh_css_sp_input_formatter_set {
+ u32 stream_format;
+ input_formatter_cfg_t config_a;
+ input_formatter_cfg_t config_b;
+};
+
+#define IA_CSS_MIPI_SIZE_CHECK_MAX_NOF_ENTRIES_PER_PORT (3)
+
+/*
+ * SP configuration information
+ *
+ * This struct is part of the atomisp firmware ABI and is directly copied
+ * to ISP DRAM by sh_css_store_sp_group_to_ddr()
+ *
+ * Do NOT change this struct's layout or remove seemingly unused fields!
+ */
+struct sh_css_sp_config {
+ u8 no_isp_sync; /* Signal host immediately after start */
+ u8 enable_raw_pool_locking; /** Enable Raw Buffer Locking for HALv3 Support */
+ u8 lock_all;
+ /** If raw buffer locking is enabled, this flag indicates whether raw
+ frames are locked when their EOF event is successfully sent to the
+ host (true) or when they are passed to the preview/video pipe
+ (false). */
+
+ /*
+ * Note the fields below are only used on the ISP2400 not on the ISP2401,
+ * sh_css_store_sp_group_to_ddr() skip copying these when run on the ISP2401.
+ */
+ struct {
+ u8 a_changed;
+ u8 b_changed;
+ u8 isp_2ppc;
+ struct sh_css_sp_input_formatter_set
+ set[SH_CSS_MAX_IF_CONFIGS]; /* CSI-2 port is used as index. */
+ } input_formatter;
+
+ sync_generator_cfg_t sync_gen;
+ tpg_cfg_t tpg;
+ prbs_cfg_t prbs;
+ input_system_cfg_t input_circuit;
+ u8 input_circuit_cfg_changed;
+ u32 mipi_sizes_for_check[N_CSI_PORTS][IA_CSS_MIPI_SIZE_CHECK_MAX_NOF_ENTRIES_PER_PORT];
+ /* These last 2 fields are used on both the ISP2400 and the ISP2401 */
+ u8 enable_isys_event_queue;
+ u8 disable_cont_vf;
+};
+
+enum sh_css_stage_type {
+ SH_CSS_SP_STAGE_TYPE = 0,
+ SH_CSS_ISP_STAGE_TYPE = 1
+};
+
+#define SH_CSS_NUM_STAGE_TYPES 2
+
+#define SH_CSS_PIPE_CONFIG_SAMPLE_PARAMS BIT(0)
+#define SH_CSS_PIPE_CONFIG_SAMPLE_PARAMS_MASK \
+ ((SH_CSS_PIPE_CONFIG_SAMPLE_PARAMS << SH_CSS_MAX_SP_THREADS) - 1)
+
+struct sh_css_sp_pipeline_terminal {
+ union {
+ /* Input System 2401 */
+ virtual_input_system_stream_t
+ virtual_input_system_stream[IA_CSS_STREAM_MAX_ISYS_STREAM_PER_CH];
+ } context;
+ /*
+ * TODO
+ * - Remove "virtual_input_system_cfg" when the ISYS2401 DLI is ready.
+ */
+ union {
+ /* Input System 2401 */
+ virtual_input_system_stream_cfg_t
+ virtual_input_system_stream_cfg[IA_CSS_STREAM_MAX_ISYS_STREAM_PER_CH];
+ } ctrl;
+};
+
+struct sh_css_sp_pipeline_io {
+ struct sh_css_sp_pipeline_terminal input;
+ /* pqiao: comment out temporarily to save dmem */
+ /*struct sh_css_sp_pipeline_terminal output;*/
+};
+
+/* This struct tracks how many streams are registered per CSI port.
+ * This is used to track which streams have already been configured.
+ * Only when all streams are configured, the CSI RX is started for that port.
+ */
+struct sh_css_sp_pipeline_io_status {
+ u32 active[N_INPUT_SYSTEM_CSI_PORT]; /** registered streams */
+ u32 running[N_INPUT_SYSTEM_CSI_PORT]; /** configured streams */
+};
+
+enum sh_css_port_dir {
+ SH_CSS_PORT_INPUT = 0,
+ SH_CSS_PORT_OUTPUT = 1
+};
+
+enum sh_css_port_type {
+ SH_CSS_HOST_TYPE = 0,
+ SH_CSS_COPYSINK_TYPE = 1,
+ SH_CSS_TAGGERSINK_TYPE = 2
+};
+
+/* Pipe inout settings: output port on 7-4bits, input port on 3-0bits */
+#define SH_CSS_PORT_FLD_WIDTH_IN_BITS (4)
+#define SH_CSS_PORT_TYPE_BIT_FLD(pt) (0x1 << (pt))
+#define SH_CSS_PORT_FLD(pd) ((pd) ? SH_CSS_PORT_FLD_WIDTH_IN_BITS : 0)
+#define SH_CSS_PIPE_PORT_CONFIG_ON(p, pd, pt) ((p) |= (SH_CSS_PORT_TYPE_BIT_FLD(pt) << SH_CSS_PORT_FLD(pd)))
+#define SH_CSS_PIPE_PORT_CONFIG_OFF(p, pd, pt) ((p) &= ~(SH_CSS_PORT_TYPE_BIT_FLD(pt) << SH_CSS_PORT_FLD(pd)))
+#define SH_CSS_PIPE_PORT_CONFIG_SET(p, pd, pt, val) ((val) ? \
+ SH_CSS_PIPE_PORT_CONFIG_ON(p, pd, pt) : SH_CSS_PIPE_PORT_CONFIG_OFF(p, pd, pt))
+#define SH_CSS_PIPE_PORT_CONFIG_GET(p, pd, pt) ((p) & (SH_CSS_PORT_TYPE_BIT_FLD(pt) << SH_CSS_PORT_FLD(pd)))
+#define SH_CSS_PIPE_PORT_CONFIG_IS_CONTINUOUS(p) \
+ (!(SH_CSS_PIPE_PORT_CONFIG_GET(p, SH_CSS_PORT_INPUT, SH_CSS_HOST_TYPE) && \
+ SH_CSS_PIPE_PORT_CONFIG_GET(p, SH_CSS_PORT_OUTPUT, SH_CSS_HOST_TYPE)))
+
+#define IA_CSS_ACQUIRE_ISP_POS 31
+
+/* Flags for metadata processing */
+#define SH_CSS_METADATA_ENABLED 0x01
+#define SH_CSS_METADATA_PROCESSED 0x02
+#define SH_CSS_METADATA_OFFLINE_MODE 0x04
+#define SH_CSS_METADATA_WAIT_INPUT 0x08
+
+/* @brief Free an array of metadata buffers.
+ *
+ * @param[in] num_bufs Number of metadata buffers to be freed.
+ * @param[in] bufs Pointer of array of metadata buffers.
+ *
+ * This function frees an array of metadata buffers.
+ */
+void
+ia_css_metadata_free_multiple(unsigned int num_bufs,
+ struct ia_css_metadata **bufs);
+
+/* Macro for handling pipe_qos_config */
+#define QOS_INVALID (~0U)
+
+/* Information for a pipeline */
+struct sh_css_sp_pipeline {
+ u32 pipe_id; /* the pipe ID */
+ u32 pipe_num; /* the dynamic pipe number */
+ u32 thread_id; /* the sp thread ID */
+ u32 pipe_config; /* the pipe config */
+ u32 pipe_qos_config; /* Bitmap of multiple QOS extension fw state.
+ (0xFFFFFFFF) indicates non QOS pipe.*/
+ u32 inout_port_config;
+ u32 required_bds_factor;
+ u32 dvs_frame_delay;
+ u32 input_system_mode; /* enum ia_css_input_mode */
+ u32 port_id; /* port_id for input system */
+ u32 num_stages; /* the pipe config */
+ u32 running; /* needed for pipe termination */
+ ia_css_ptr sp_stage_addr[SH_CSS_MAX_STAGES];
+ ia_css_ptr scaler_pp_lut; /* Early bound LUT */
+ u32 dummy; /* stage ptr is only used on sp but lives in
+ this struct; needs cleanup */
+ s32 num_execs; /* number of times to run if this is
+ an acceleration pipe. */
+ struct {
+ u32 format; /* Metadata format in hrt format */
+ u32 width; /* Width of a line */
+ u32 height; /* Number of lines */
+ u32 stride; /* Stride (in bytes) per line */
+ u32 size; /* Total size (in bytes) */
+ ia_css_ptr cont_buf; /* Address of continuous buffer */
+ } metadata;
+ u32 output_frame_queue_id;
+ union {
+ struct {
+ u32 bytes_available;
+ } bin;
+ struct {
+ u32 height;
+ u32 width;
+ u32 padded_width;
+ u32 max_input_width;
+ u32 raw_bit_depth;
+ } raw;
+ } copy;
+};
+
+/*
+ * The first frames (with comment Dynamic) can be dynamic or static
+ * The other frames (ref_in and below) can only be static
+ * Static means that the data address will not change during the life time
+ * of the associated pipe. Dynamic means that the data address can
+ * change with every (frame) iteration of the associated pipe
+ *
+ * s3a and dis are now also dynamic but (still) handled separately
+ */
+#define SH_CSS_NUM_DYNAMIC_FRAME_IDS (3)
+
+struct ia_css_frames_sp {
+ struct ia_css_frame_sp in;
+ struct ia_css_frame_sp out[IA_CSS_BINARY_MAX_OUTPUT_PORTS];
+ struct ia_css_resolution effective_in_res;
+ struct ia_css_frame_sp out_vf;
+ struct ia_css_frame_sp_info internal_frame_info;
+ struct ia_css_buffer_sp s3a_buf;
+ struct ia_css_buffer_sp dvs_buf;
+ struct ia_css_buffer_sp metadata_buf;
+};
+
+/* Information for a single pipeline stage for an ISP */
+struct sh_css_isp_stage {
+ /*
+ * For compatibility and portabilty, only types
+ * from "stdint.h" are allowed
+ *
+ * Use of "enum" and "bool" is prohibited
+ * Multiple boolean flags can be stored in an
+ * integer
+ */
+ struct ia_css_blob_info blob_info;
+ struct ia_css_binary_info binary_info;
+ char binary_name[SH_CSS_MAX_BINARY_NAME];
+ struct ia_css_isp_param_css_segments mem_initializers;
+};
+
+/* Information for a single pipeline stage */
+struct sh_css_sp_stage {
+ /*
+ * For compatibility and portabilty, only types
+ * from "stdint.h" are allowed
+ *
+ * Use of "enum" and "bool" is prohibited
+ * Multiple boolean flags can be stored in an
+ * integer
+ */
+ u8 num; /* Stage number */
+ u8 isp_online;
+ u8 isp_copy_vf;
+ u8 isp_copy_output;
+ u8 sp_enable_xnr;
+ u8 isp_deci_log_factor;
+ u8 isp_vf_downscale_bits;
+ u8 deinterleaved;
+ /*
+ * NOTE: Programming the input circuit can only be done at the
+ * start of a session. It is illegal to program it during execution
+ * The input circuit defines the connectivity
+ */
+ u8 program_input_circuit;
+ /* enum ia_css_pipeline_stage_sp_func func; */
+ u8 func;
+ /* The type of the pipe-stage */
+ /* enum sh_css_stage_type stage_type; */
+ u8 stage_type;
+ u8 num_stripes;
+ u8 isp_pipe_version;
+ struct {
+ u8 vf_output;
+ u8 s3a;
+ u8 sdis;
+ u8 dvs_stats;
+ u8 lace_stats;
+ } enable;
+ /* Add padding to come to a word boundary */
+ /* unsigned char padding[0]; */
+
+ struct sh_css_crop_pos sp_out_crop_pos;
+ struct ia_css_frames_sp frames;
+ struct ia_css_resolution dvs_envelope;
+ struct sh_css_uds_info uds;
+ ia_css_ptr isp_stage_addr;
+ ia_css_ptr xmem_bin_addr;
+ ia_css_ptr xmem_map_addr;
+
+ u16 top_cropping;
+ u16 row_stripes_height;
+ u16 row_stripes_overlap_lines;
+ u8 if_config_index; /* Which should be applied by this stage. */
+};
+
+/*
+ * Time: 2012-07-19, 17:40.
+ * Note: Add a new data member "debug" in "sh_css_sp_group". This
+ * data member is used to pass the debugging command from the
+ * Host to the SP.
+ *
+ * Time: Before 2012-07-19.
+ * Note:
+ * Group all host initialized SP variables into this struct.
+ * This is initialized every stage through dma.
+ * The stage part itself is transferred through sh_css_sp_stage.
+*/
+struct sh_css_sp_group {
+ struct sh_css_sp_config config;
+ struct sh_css_sp_pipeline pipe[SH_CSS_MAX_SP_THREADS];
+ struct sh_css_sp_pipeline_io pipe_io[SH_CSS_MAX_SP_THREADS];
+ struct sh_css_sp_pipeline_io_status pipe_io_status;
+ struct sh_css_sp_debug_command debug;
+};
+
+/* Data in SP dmem that is set from the host every stage. */
+struct sh_css_sp_per_frame_data {
+ /* ddr address of sp_group and sp_stage */
+ ia_css_ptr sp_group_addr;
+};
+
+#define SH_CSS_NUM_SDW_IRQS 3
+
+/* Output data from SP to css */
+struct sh_css_sp_output {
+ unsigned int bin_copy_bytes_copied;
+#if SP_DEBUG != SP_DEBUG_NONE
+ struct sh_css_sp_debug_state debug;
+#endif
+ unsigned int sw_interrupt_value[SH_CSS_NUM_SDW_IRQS];
+};
+
+/**
+ * @brief Data structure for the circular buffer.
+ * The circular buffer is empty if "start == end". The
+ * circular buffer is full if "(end + 1) % size == start".
+ */
+/* Variable Sized Buffer Queue Elements */
+
+#define IA_CSS_NUM_ELEMS_HOST2SP_BUFFER_QUEUE 6
+#define IA_CSS_NUM_ELEMS_HOST2SP_PARAM_QUEUE 3
+#define IA_CSS_NUM_ELEMS_HOST2SP_TAG_CMD_QUEUE 6
+
+/* sp-to-host queue is expected to be emptied in ISR since
+ * it is used instead of HW interrupts (due to HW design issue).
+ * We need one queue element per CSI port. */
+#define IA_CSS_NUM_ELEMS_SP2HOST_ISYS_EVENT_QUEUE (2 * N_CSI_PORTS)
+/* The host-to-sp queue needs to allow for some delay
+ * in the emptying of this queue in the SP since there is no
+ * separate SP thread for this. */
+#define IA_CSS_NUM_ELEMS_HOST2SP_ISYS_EVENT_QUEUE (2 * N_CSI_PORTS)
+
+#define IA_CSS_NUM_ELEMS_HOST2SP_PSYS_EVENT_QUEUE 13
+#define IA_CSS_NUM_ELEMS_SP2HOST_BUFFER_QUEUE 19
+#define IA_CSS_NUM_ELEMS_SP2HOST_PSYS_EVENT_QUEUE 26 /* holds events for all type of buffers, hence deeper */
+
+struct sh_css_hmm_buffer {
+ union {
+ struct ia_css_isp_3a_statistics s3a;
+ struct ia_css_isp_dvs_statistics dis;
+ ia_css_ptr skc_dvs_statistics;
+ ia_css_ptr lace_stat;
+ struct ia_css_metadata metadata;
+ struct frame_data_wrapper {
+ ia_css_ptr frame_data;
+ u32 flashed;
+ u32 exp_id;
+ u32 isp_parameters_id; /** Unique ID to track which config was
+ actually applied to a particular frame */
+ } frame;
+ ia_css_ptr ddr_ptrs;
+ } payload;
+ /*
+ * kernel_ptr is present for host administration purposes only.
+ * type is uint64_t in order to be 64-bit host compatible.
+ * uint64_t does not exist on SP/ISP.
+ * Size of the struct is checked by sp.hive.c.
+ */
+ CSS_ALIGN(u64 cookie_ptr, 8); /* TODO: check if this alignment is needed */
+ u64 kernel_ptr;
+ struct ia_css_time_meas timing_data;
+ clock_value_t isys_eof_clock_tick;
+};
+
+#define SIZE_OF_FRAME_STRUCT \
+ (SIZE_OF_HRT_VADDRESS + \
+ (3 * sizeof(uint32_t)))
+
+#define SIZE_OF_PAYLOAD_UNION \
+ (MAX(MAX(MAX(MAX( \
+ SIZE_OF_IA_CSS_ISP_3A_STATISTICS_STRUCT, \
+ SIZE_OF_IA_CSS_ISP_DVS_STATISTICS_STRUCT), \
+ SIZE_OF_IA_CSS_METADATA_STRUCT), \
+ SIZE_OF_FRAME_STRUCT), \
+ SIZE_OF_HRT_VADDRESS))
+
+/* Do not use sizeof(uint64_t) since that does not exist of SP */
+#define SIZE_OF_SH_CSS_HMM_BUFFER_STRUCT \
+ (round_up(SIZE_OF_PAYLOAD_UNION, 8) + \
+ 8 + \
+ 8 + \
+ SIZE_OF_IA_CSS_TIME_MEAS_STRUCT + \
+ round_up(SIZE_OF_IA_CSS_CLOCK_TICK_STRUCT, 8))
+
+static_assert(sizeof(struct sh_css_hmm_buffer) == SIZE_OF_SH_CSS_HMM_BUFFER_STRUCT);
+
+enum sh_css_queue_type {
+ sh_css_invalid_queue_type = -1,
+ sh_css_host2sp_buffer_queue,
+ sh_css_sp2host_buffer_queue,
+ sh_css_host2sp_psys_event_queue,
+ sh_css_sp2host_psys_event_queue,
+ sh_css_sp2host_isys_event_queue,
+ sh_css_host2sp_isys_event_queue,
+ sh_css_host2sp_tag_cmd_queue,
+};
+
+struct sh_css_event_irq_mask {
+ u16 or_mask;
+ u16 and_mask;
+};
+
+#define SIZE_OF_SH_CSS_EVENT_IRQ_MASK_STRUCT \
+ (2 * sizeof(uint16_t))
+
+static_assert(sizeof(struct sh_css_event_irq_mask) == SIZE_OF_SH_CSS_EVENT_IRQ_MASK_STRUCT);
+
+struct host_sp_communication {
+ /*
+ * Don't use enum host2sp_commands, because the sizeof an enum is
+ * compiler dependent and thus non-portable
+ */
+ u32 host2sp_command;
+
+ /*
+ * The frame buffers that are reused by the
+ * copy pipe in the offline preview mode.
+ *
+ * host2sp_offline_frames[0]: the input frame of the preview pipe.
+ * host2sp_offline_frames[1]: the output frame of the copy pipe.
+ *
+ * TODO:
+ * Remove it when the Host and the SP is decoupled.
+ */
+ ia_css_ptr host2sp_offline_frames[NUM_CONTINUOUS_FRAMES];
+ ia_css_ptr host2sp_offline_metadata[NUM_CONTINUOUS_FRAMES];
+
+ ia_css_ptr host2sp_mipi_frames[N_CSI_PORTS][NUM_MIPI_FRAMES_PER_STREAM];
+ ia_css_ptr host2sp_mipi_metadata[N_CSI_PORTS][NUM_MIPI_FRAMES_PER_STREAM];
+ u32 host2sp_num_mipi_frames[N_CSI_PORTS];
+ u32 host2sp_cont_avail_num_raw_frames;
+ u32 host2sp_cont_extra_num_raw_frames;
+ u32 host2sp_cont_target_num_raw_frames;
+ struct sh_css_event_irq_mask host2sp_event_irq_mask[NR_OF_PIPELINES];
+
+};
+
+#define SIZE_OF_HOST_SP_COMMUNICATION_STRUCT \
+ (sizeof(uint32_t) + \
+ (NUM_CONTINUOUS_FRAMES * SIZE_OF_HRT_VADDRESS * 2) + \
+ (N_CSI_PORTS * NUM_MIPI_FRAMES_PER_STREAM * SIZE_OF_HRT_VADDRESS * 2) + \
+ ((3 + N_CSI_PORTS) * sizeof(uint32_t)) + \
+ (NR_OF_PIPELINES * SIZE_OF_SH_CSS_EVENT_IRQ_MASK_STRUCT))
+
+static_assert(sizeof(struct host_sp_communication) == SIZE_OF_HOST_SP_COMMUNICATION_STRUCT);
+
+struct host_sp_queues {
+ /*
+ * Queues for the dynamic frame information,
+ * i.e. the "in_frame" buffer, the "out_frame"
+ * buffer and the "vf_out_frame" buffer.
+ */
+ ia_css_circbuf_desc_t host2sp_buffer_queues_desc
+ [SH_CSS_MAX_SP_THREADS][SH_CSS_MAX_NUM_QUEUES];
+ ia_css_circbuf_elem_t host2sp_buffer_queues_elems
+ [SH_CSS_MAX_SP_THREADS][SH_CSS_MAX_NUM_QUEUES]
+ [IA_CSS_NUM_ELEMS_HOST2SP_BUFFER_QUEUE];
+ ia_css_circbuf_desc_t sp2host_buffer_queues_desc
+ [SH_CSS_MAX_NUM_QUEUES];
+ ia_css_circbuf_elem_t sp2host_buffer_queues_elems
+ [SH_CSS_MAX_NUM_QUEUES][IA_CSS_NUM_ELEMS_SP2HOST_BUFFER_QUEUE];
+
+ /*
+ * The queues for the events.
+ */
+ ia_css_circbuf_desc_t host2sp_psys_event_queue_desc;
+
+ ia_css_circbuf_elem_t host2sp_psys_event_queue_elems
+ [IA_CSS_NUM_ELEMS_HOST2SP_PSYS_EVENT_QUEUE];
+ ia_css_circbuf_desc_t sp2host_psys_event_queue_desc;
+
+ ia_css_circbuf_elem_t sp2host_psys_event_queue_elems
+ [IA_CSS_NUM_ELEMS_SP2HOST_PSYS_EVENT_QUEUE];
+
+ /*
+ * The queues for the ISYS events.
+ */
+ ia_css_circbuf_desc_t host2sp_isys_event_queue_desc;
+
+ ia_css_circbuf_elem_t host2sp_isys_event_queue_elems
+ [IA_CSS_NUM_ELEMS_HOST2SP_ISYS_EVENT_QUEUE];
+ ia_css_circbuf_desc_t sp2host_isys_event_queue_desc;
+
+ ia_css_circbuf_elem_t sp2host_isys_event_queue_elems
+ [IA_CSS_NUM_ELEMS_SP2HOST_ISYS_EVENT_QUEUE];
+ /*
+ * The queue for the tagger commands.
+ * CHECK: are these last two present on the 2401 ?
+ */
+ ia_css_circbuf_desc_t host2sp_tag_cmd_queue_desc;
+
+ ia_css_circbuf_elem_t host2sp_tag_cmd_queue_elems
+ [IA_CSS_NUM_ELEMS_HOST2SP_TAG_CMD_QUEUE];
+};
+
+#define SIZE_OF_QUEUES_ELEMS \
+ (SIZE_OF_IA_CSS_CIRCBUF_ELEM_S_STRUCT * \
+ ((SH_CSS_MAX_SP_THREADS * SH_CSS_MAX_NUM_QUEUES * IA_CSS_NUM_ELEMS_HOST2SP_BUFFER_QUEUE) + \
+ (SH_CSS_MAX_NUM_QUEUES * IA_CSS_NUM_ELEMS_SP2HOST_BUFFER_QUEUE) + \
+ (IA_CSS_NUM_ELEMS_HOST2SP_PSYS_EVENT_QUEUE) + \
+ (IA_CSS_NUM_ELEMS_SP2HOST_PSYS_EVENT_QUEUE) + \
+ (IA_CSS_NUM_ELEMS_HOST2SP_ISYS_EVENT_QUEUE) + \
+ (IA_CSS_NUM_ELEMS_SP2HOST_ISYS_EVENT_QUEUE) + \
+ (IA_CSS_NUM_ELEMS_HOST2SP_TAG_CMD_QUEUE)))
+
+#define IA_CSS_NUM_CIRCBUF_DESCS 5
+
+#define SIZE_OF_QUEUES_DESC \
+ ((SH_CSS_MAX_SP_THREADS * SH_CSS_MAX_NUM_QUEUES * \
+ SIZE_OF_IA_CSS_CIRCBUF_DESC_S_STRUCT) + \
+ (SH_CSS_MAX_NUM_QUEUES * SIZE_OF_IA_CSS_CIRCBUF_DESC_S_STRUCT) + \
+ (IA_CSS_NUM_CIRCBUF_DESCS * SIZE_OF_IA_CSS_CIRCBUF_DESC_S_STRUCT))
+
+#define SIZE_OF_HOST_SP_QUEUES_STRUCT \
+ (SIZE_OF_QUEUES_ELEMS + SIZE_OF_QUEUES_DESC)
+
+static_assert(sizeof(struct host_sp_queues) == SIZE_OF_HOST_SP_QUEUES_STRUCT);
+
+extern int __printf(1, 0) (*sh_css_printf)(const char *fmt, va_list args);
+
+static inline void __printf(1, 2) sh_css_print(const char *fmt, ...)
+{
+ va_list ap;
+
+ if (sh_css_printf) {
+ va_start(ap, fmt);
+ sh_css_printf(fmt, ap);
+ va_end(ap);
+ }
+}
+
+static inline void __printf(1, 0) sh_css_vprint(const char *fmt, va_list args)
+{
+ if (sh_css_printf)
+ sh_css_printf(fmt, args);
+}
+
+/* The following #if is there because this header file is also included
+ by SP and ISP code but they do not need this data and HIVECC has alignment
+ issue with the firmware struct/union's.
+ More permanent solution will be to refactor this include.
+*/
+ia_css_ptr sh_css_params_ddr_address_map(void);
+
+int
+sh_css_params_init(void);
+
+void
+sh_css_params_uninit(void);
+
+void
+sh_css_binary_args_reset(struct sh_css_binary_args *args);
+
+/* Check two frames for equality (format, resolution, bits per element) */
+bool
+sh_css_frame_equal_types(const struct ia_css_frame *frame_a,
+ const struct ia_css_frame *frame_b);
+
+bool
+sh_css_frame_info_equal_resolution(const struct ia_css_frame_info *info_a,
+ const struct ia_css_frame_info *info_b);
+
+void
+sh_css_capture_enable_bayer_downscaling(bool enable);
+
+void
+sh_css_binary_print(const struct ia_css_binary *binary);
+
+/* aligned argument of sh_css_frame_info_set_width can be used for an extra alignment requirement.
+ When 0, no extra alignment is done. */
+void
+sh_css_frame_info_set_width(struct ia_css_frame_info *info,
+ unsigned int width,
+ unsigned int aligned);
+
+
+unsigned int
+sh_css_get_mipi_sizes_for_check(const unsigned int port,
+ const unsigned int idx);
+
+
+ia_css_ptr
+sh_css_store_sp_group_to_ddr(void);
+
+ia_css_ptr
+sh_css_store_sp_stage_to_ddr(unsigned int pipe, unsigned int stage);
+
+ia_css_ptr
+sh_css_store_isp_stage_to_ddr(unsigned int pipe, unsigned int stage);
+
+void
+sh_css_update_uds_and_crop_info(
+ const struct ia_css_binary_info *info,
+ const struct ia_css_frame_info *in_frame_info,
+ const struct ia_css_frame_info *out_frame_info,
+ const struct ia_css_resolution *dvs_env,
+ const struct ia_css_dz_config *zoom,
+ const struct ia_css_vector *motion_vector,
+ struct sh_css_uds_info *uds, /* out */
+ struct sh_css_crop_pos *sp_out_crop_pos, /* out */
+
+ bool enable_zoom
+);
+
+void
+sh_css_invalidate_shading_tables(struct ia_css_stream *stream);
+
+struct ia_css_pipeline *
+ia_css_pipe_get_pipeline(const struct ia_css_pipe *pipe);
+
+unsigned int
+ia_css_pipe_get_pipe_num(const struct ia_css_pipe *pipe);
+
+unsigned int
+ia_css_pipe_get_isp_pipe_version(const struct ia_css_pipe *pipe);
+
+bool
+sh_css_continuous_is_enabled(uint8_t pipe_num);
+
+struct ia_css_pipe *
+find_pipe_by_num(uint32_t pipe_num);
+
+void
+ia_css_get_crop_offsets(
+ struct ia_css_pipe *pipe,
+ struct ia_css_frame_info *in_frame);
+
+#endif /* _SH_CSS_INTERNAL_H_ */
diff --git a/drivers/staging/media/atomisp/pci/sh_css_legacy.h b/drivers/staging/media/atomisp/pci/sh_css_legacy.h
new file mode 100644
index 000000000000..1d3549c52a46
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/sh_css_legacy.h
@@ -0,0 +1,61 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef _SH_CSS_LEGACY_H_
+#define _SH_CSS_LEGACY_H_
+
+#include <type_support.h>
+#include <ia_css_err.h>
+#include <ia_css_types.h>
+#include <ia_css_frame_public.h>
+#include <ia_css_pipe_public.h>
+#include <ia_css_stream_public.h>
+
+/* The pipe id type, distinguishes the kind of pipes that
+ * can be run in parallel.
+ */
+enum ia_css_pipe_id {
+ IA_CSS_PIPE_ID_PREVIEW,
+ IA_CSS_PIPE_ID_COPY,
+ IA_CSS_PIPE_ID_VIDEO,
+ IA_CSS_PIPE_ID_CAPTURE,
+ IA_CSS_PIPE_ID_YUVPP,
+ IA_CSS_PIPE_ID_NUM
+};
+
+struct ia_css_pipe_extra_config {
+ bool enable_raw_binning;
+ bool enable_yuv_ds;
+ bool enable_high_speed;
+ bool enable_dvs_6axis;
+ bool enable_reduced_pipe;
+ bool enable_fractional_ds;
+ bool disable_vf_pp;
+};
+
+int
+ia_css_pipe_create_extra(const struct ia_css_pipe_config *config,
+ const struct ia_css_pipe_extra_config *extra_config,
+ struct ia_css_pipe **pipe);
+
+void
+ia_css_pipe_extra_config_defaults(struct ia_css_pipe_extra_config
+ *extra_config);
+
+int
+ia_css_temp_pipe_to_pipe_id(const struct ia_css_pipe *pipe,
+ enum ia_css_pipe_id *pipe_id);
+
+/* DEPRECATED. FPN is not supported. */
+int
+sh_css_set_black_frame(struct ia_css_stream *stream,
+ const struct ia_css_frame *raw_black_frame);
+
+/* ISP2400 */
+void
+sh_css_enable_cont_capt(bool enable, bool stop_copy_preview);
+
+#endif /* _SH_CSS_LEGACY_H_ */
diff --git a/drivers/staging/media/atomisp/pci/sh_css_metrics.c b/drivers/staging/media/atomisp/pci/sh_css_metrics.c
new file mode 100644
index 000000000000..edf473dd86ca
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/sh_css_metrics.c
@@ -0,0 +1,145 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#include "assert_support.h"
+#include "sh_css_metrics.h"
+
+#include "sp.h"
+#include "isp.h"
+
+#include "sh_css_internal.h"
+
+#define MULTIPLE_PCS 0
+#define SUSPEND 0
+#define NOF_PCS 1
+#define RESUME_MASK 0x8
+#define STOP_MASK 0x0
+
+static bool pc_histogram_enabled;
+static struct sh_css_pc_histogram *isp_histogram;
+static struct sh_css_pc_histogram *sp_histogram;
+
+struct sh_css_metrics sh_css_metrics;
+
+void
+sh_css_metrics_start_frame(void)
+{
+ sh_css_metrics.frame_metrics.num_frames++;
+}
+
+static void
+clear_histogram(struct sh_css_pc_histogram *histogram)
+{
+ unsigned int i;
+
+ assert(histogram);
+
+ for (i = 0; i < histogram->length; i++) {
+ histogram->run[i] = 0;
+ histogram->stall[i] = 0;
+ histogram->msink[i] = 0xFFFF;
+ }
+}
+
+void
+sh_css_metrics_enable_pc_histogram(bool enable)
+{
+ pc_histogram_enabled = enable;
+}
+
+static void
+make_histogram(struct sh_css_pc_histogram *histogram, unsigned int length)
+{
+ assert(histogram);
+
+ if (histogram->length)
+ return;
+ if (histogram->run)
+ return;
+ histogram->run = kvmalloc(length * sizeof(*histogram->run),
+ GFP_KERNEL);
+ if (!histogram->run)
+ return;
+ histogram->stall = kvmalloc(length * sizeof(*histogram->stall),
+ GFP_KERNEL);
+ if (!histogram->stall)
+ return;
+ histogram->msink = kvmalloc(length * sizeof(*histogram->msink),
+ GFP_KERNEL);
+ if (!histogram->msink)
+ return;
+
+ histogram->length = length;
+ clear_histogram(histogram);
+}
+
+static void
+insert_binary_metrics(struct sh_css_binary_metrics **l,
+ struct sh_css_binary_metrics *metrics)
+{
+ assert(l);
+ assert(*l);
+ assert(metrics);
+
+ for (; *l; l = &(*l)->next)
+ if (*l == metrics)
+ return;
+
+ *l = metrics;
+ metrics->next = NULL;
+}
+
+void
+sh_css_metrics_start_binary(struct sh_css_binary_metrics *metrics)
+{
+ assert(metrics);
+
+ if (!pc_histogram_enabled)
+ return;
+
+ isp_histogram = &metrics->isp_histogram;
+ sp_histogram = &metrics->sp_histogram;
+ make_histogram(isp_histogram, ISP_PMEM_DEPTH);
+ make_histogram(sp_histogram, SP_PMEM_DEPTH);
+ insert_binary_metrics(&sh_css_metrics.binary_metrics, metrics);
+}
+
+void
+sh_css_metrics_sample_pcs(void)
+{
+ bool stall;
+ unsigned int pc;
+ unsigned int msink;
+
+
+
+ if (!pc_histogram_enabled)
+ return;
+
+ if (isp_histogram) {
+ msink = isp_ctrl_load(ISP0_ID, ISP_CTRL_SINK_REG);
+ pc = isp_ctrl_load(ISP0_ID, ISP_PC_REG);
+
+ isp_histogram->msink[pc] &= msink;
+ stall = (msink != 0x7FF);
+
+ if (stall)
+ isp_histogram->stall[pc]++;
+ else
+ isp_histogram->run[pc]++;
+ }
+
+ if (sp_histogram && 0) {
+ msink = sp_ctrl_load(SP0_ID, SP_CTRL_SINK_REG);
+ pc = sp_ctrl_load(SP0_ID, SP_PC_REG);
+ sp_histogram->msink[pc] &= msink;
+ stall = (msink != 0x7FF);
+ if (stall)
+ sp_histogram->stall[pc]++;
+ else
+ sp_histogram->run[pc]++;
+ }
+}
diff --git a/drivers/staging/media/atomisp/pci/sh_css_metrics.h b/drivers/staging/media/atomisp/pci/sh_css_metrics.h
new file mode 100644
index 000000000000..0ae995b80d07
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/sh_css_metrics.h
@@ -0,0 +1,47 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef _SH_CSS_METRICS_H_
+#define _SH_CSS_METRICS_H_
+
+#include <type_support.h>
+
+struct sh_css_pc_histogram {
+ unsigned int length;
+ unsigned int *run;
+ unsigned int *stall;
+ unsigned int *msink;
+};
+
+struct sh_css_binary_metrics {
+ unsigned int mode;
+ unsigned int id;
+ struct sh_css_pc_histogram isp_histogram;
+ struct sh_css_pc_histogram sp_histogram;
+ struct sh_css_binary_metrics *next;
+};
+
+struct ia_css_frame_metrics {
+ unsigned int num_frames;
+};
+
+struct sh_css_metrics {
+ struct sh_css_binary_metrics *binary_metrics;
+ struct ia_css_frame_metrics frame_metrics;
+};
+
+extern struct sh_css_metrics sh_css_metrics;
+
+/* includes ia_css_binary.h, which depends on sh_css_metrics.h */
+#include "ia_css_types.h"
+
+/* Sample ISP and SP pc and add to histogram */
+void sh_css_metrics_enable_pc_histogram(bool enable);
+void sh_css_metrics_start_frame(void);
+void sh_css_metrics_start_binary(struct sh_css_binary_metrics *metrics);
+void sh_css_metrics_sample_pcs(void);
+
+#endif /* _SH_CSS_METRICS_H_ */
diff --git a/drivers/staging/media/atomisp/pci/sh_css_mipi.c b/drivers/staging/media/atomisp/pci/sh_css_mipi.c
new file mode 100644
index 000000000000..971b685cdb58
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/sh_css_mipi.c
@@ -0,0 +1,545 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#include "ia_css_mipi.h"
+#include "sh_css_mipi.h"
+#include <type_support.h>
+#include "system_global.h"
+#include "ia_css_err.h"
+#include "ia_css_pipe.h"
+#include "ia_css_stream_format.h"
+#include "sh_css_stream_format.h"
+#include "ia_css_stream_public.h"
+#include "ia_css_frame_public.h"
+#include "ia_css_input_port.h"
+#include "ia_css_debug.h"
+#include "sh_css_struct.h"
+#include "sh_css_defs.h"
+#include "sh_css_sp.h" /* sh_css_update_host2sp_mipi_frame sh_css_update_host2sp_num_mipi_frames ... */
+#include "sw_event_global.h" /* IA_CSS_PSYS_SW_EVENT_MIPI_BUFFERS_READY */
+
+static u32
+ref_count_mipi_allocation[N_CSI_PORTS]; /* Initialized in mipi_init */
+
+/* Assumptions:
+ * - A line is multiple of 4 bytes = 1 word.
+ * - Each frame has SOF and EOF (each 1 word).
+ * - Each line has format header and optionally SOL and EOL (each 1 word).
+ * - Odd and even lines of YUV420 format are different in bites per pixel size.
+ * - Custom size of embedded data.
+ * -- Interleaved frames are not taken into account.
+ * -- Lines are multiples of 8B, and not necessary of (custom 3B, or 7B
+ * etc.).
+ * Result is given in DDR mem words, 32B or 256 bits
+ */
+int
+ia_css_mipi_frame_calculate_size(const unsigned int width,
+ const unsigned int height,
+ const enum atomisp_input_format format,
+ const bool hasSOLandEOL,
+ const unsigned int embedded_data_size_words,
+ unsigned int *size_mem_words)
+{
+ int err = 0;
+
+ unsigned int bits_per_pixel = 0;
+ unsigned int even_line_bytes = 0;
+ unsigned int odd_line_bytes = 0;
+ unsigned int words_per_odd_line = 0;
+ unsigned int words_for_first_line = 0;
+ unsigned int words_per_even_line = 0;
+ unsigned int mem_words_per_even_line = 0;
+ unsigned int mem_words_per_odd_line = 0;
+ unsigned int mem_words_for_first_line = 0;
+ unsigned int mem_words_for_EOF = 0;
+ unsigned int mem_words = 0;
+ unsigned int width_padded = width;
+
+ /* The changes will be reverted as soon as RAW
+ * Buffers are deployed by the 2401 Input System
+ * in the non-continuous use scenario.
+ */
+ if (IS_ISP2401)
+ width_padded += (2 * ISP_VEC_NELEMS);
+
+ IA_CSS_ENTER("padded_width=%d, height=%d, format=%d, hasSOLandEOL=%d, embedded_data_size_words=%d\n",
+ width_padded, height, format, hasSOLandEOL, embedded_data_size_words);
+
+ switch (format) {
+ case ATOMISP_INPUT_FORMAT_RAW_6: /* 4p, 3B, 24bits */
+ bits_per_pixel = 6;
+ break;
+ case ATOMISP_INPUT_FORMAT_RAW_7: /* 8p, 7B, 56bits */
+ bits_per_pixel = 7;
+ break;
+ case ATOMISP_INPUT_FORMAT_RAW_8: /* 1p, 1B, 8bits */
+ case ATOMISP_INPUT_FORMAT_BINARY_8: /* 8bits, TODO: check. */
+ case ATOMISP_INPUT_FORMAT_YUV420_8: /* odd 2p, 2B, 16bits, even 2p, 4B, 32bits */
+ bits_per_pixel = 8;
+ break;
+ case ATOMISP_INPUT_FORMAT_YUV420_10: /* odd 4p, 5B, 40bits, even 4p, 10B, 80bits */
+ case ATOMISP_INPUT_FORMAT_RAW_10: /* 4p, 5B, 40bits */
+ /* The changes will be reverted as soon as RAW
+ * Buffers are deployed by the 2401 Input System
+ * in the non-continuous use scenario.
+ */
+ bits_per_pixel = 10;
+ break;
+ case ATOMISP_INPUT_FORMAT_YUV420_8_LEGACY: /* 2p, 3B, 24bits */
+ case ATOMISP_INPUT_FORMAT_RAW_12: /* 2p, 3B, 24bits */
+ bits_per_pixel = 12;
+ break;
+ case ATOMISP_INPUT_FORMAT_RAW_14: /* 4p, 7B, 56bits */
+ bits_per_pixel = 14;
+ break;
+ case ATOMISP_INPUT_FORMAT_RGB_444: /* 1p, 2B, 16bits */
+ case ATOMISP_INPUT_FORMAT_RGB_555: /* 1p, 2B, 16bits */
+ case ATOMISP_INPUT_FORMAT_RGB_565: /* 1p, 2B, 16bits */
+ case ATOMISP_INPUT_FORMAT_YUV422_8: /* 2p, 4B, 32bits */
+ bits_per_pixel = 16;
+ break;
+ case ATOMISP_INPUT_FORMAT_RGB_666: /* 4p, 9B, 72bits */
+ bits_per_pixel = 18;
+ break;
+ case ATOMISP_INPUT_FORMAT_YUV422_10: /* 2p, 5B, 40bits */
+ bits_per_pixel = 20;
+ break;
+ case ATOMISP_INPUT_FORMAT_RGB_888: /* 1p, 3B, 24bits */
+ bits_per_pixel = 24;
+ break;
+
+ case ATOMISP_INPUT_FORMAT_YUV420_16: /* Not supported */
+ case ATOMISP_INPUT_FORMAT_YUV422_16: /* Not supported */
+ case ATOMISP_INPUT_FORMAT_RAW_16: /* TODO: not specified in MIPI SPEC, check */
+ default:
+ return -EINVAL;
+ }
+
+ odd_line_bytes = (width_padded * bits_per_pixel + 7) >> 3; /* ceil ( bits per line / 8) */
+
+ /* Even lines for YUV420 formats are double in bits_per_pixel. */
+ if (format == ATOMISP_INPUT_FORMAT_YUV420_8
+ || format == ATOMISP_INPUT_FORMAT_YUV420_10
+ || format == ATOMISP_INPUT_FORMAT_YUV420_16) {
+ even_line_bytes = (width_padded * 2 * bits_per_pixel + 7) >>
+ 3; /* ceil ( bits per line / 8) */
+ } else {
+ even_line_bytes = odd_line_bytes;
+ }
+
+ /* a frame represented in memory: ()- optional; data - payload words.
+ * addr 0 1 2 3 4 5 6 7:
+ * first SOF (SOL) PACK_H data data data data data
+ * data data data data data data data data
+ * ...
+ * data data 0 0 0 0 0 0
+ * second (EOL) (SOL) PACK_H data data data data data
+ * data data data data data data data data
+ * ...
+ * data data 0 0 0 0 0 0
+ * ...
+ * last (EOL) EOF 0 0 0 0 0 0
+ *
+ * Embedded lines are regular lines stored before the first and after
+ * payload lines.
+ */
+
+ words_per_odd_line = (odd_line_bytes + 3) >> 2;
+ /* ceil(odd_line_bytes/4); word = 4 bytes */
+ words_per_even_line = (even_line_bytes + 3) >> 2;
+ words_for_first_line = words_per_odd_line + 2 + (hasSOLandEOL ? 1 : 0);
+ /* + SOF +packet header + optionally (SOL), but (EOL) is not in the first line */
+ words_per_odd_line += (1 + (hasSOLandEOL ? 2 : 0));
+ /* each non-first line has format header, and optionally (SOL) and (EOL). */
+ words_per_even_line += (1 + (hasSOLandEOL ? 2 : 0));
+
+ mem_words_per_odd_line = (words_per_odd_line + 7) >> 3;
+ /* ceil(words_per_odd_line/8); mem_word = 32 bytes, 8 words */
+ mem_words_for_first_line = (words_for_first_line + 7) >> 3;
+ mem_words_per_even_line = (words_per_even_line + 7) >> 3;
+ mem_words_for_EOF = 1; /* last line consist of the optional (EOL) and EOF */
+
+ mem_words = ((embedded_data_size_words + 7) >> 3) +
+ mem_words_for_first_line +
+ (((height + 1) >> 1) - 1) * mem_words_per_odd_line +
+ /* ceil (height/2) - 1 (first line is calculated separately) */
+ (height >> 1) * mem_words_per_even_line + /* floor(height/2) */
+ mem_words_for_EOF;
+
+ *size_mem_words = mem_words; /* ceil(words/8); mem word is 32B = 8words. */
+ /* Check if the above is still needed. */
+
+ IA_CSS_LEAVE_ERR(err);
+ return err;
+}
+
+void
+mipi_init(void)
+{
+ unsigned int i;
+
+ for (i = 0; i < N_CSI_PORTS; i++)
+ ref_count_mipi_allocation[i] = 0;
+}
+
+/*
+ * @brief Calculate the required MIPI buffer sizes.
+ * Based on the stream configuration, calculate the
+ * required MIPI buffer sizes (in DDR words).
+ *
+ * @param[in] stream_cfg Point to the target stream configuration
+ * @param[out] size_mem_words MIPI buffer size in DDR words.
+ *
+ * @return
+ */
+static int calculate_mipi_buff_size(struct ia_css_stream_config *stream_cfg,
+ unsigned int *size_mem_words)
+{
+ unsigned int width;
+ unsigned int height;
+ enum atomisp_input_format format;
+ bool pack_raw_pixels;
+
+ unsigned int width_padded;
+ unsigned int bits_per_pixel = 0;
+
+ unsigned int even_line_bytes = 0;
+ unsigned int odd_line_bytes = 0;
+
+ unsigned int words_per_odd_line = 0;
+ unsigned int words_per_even_line = 0;
+
+ unsigned int mem_words_per_even_line = 0;
+ unsigned int mem_words_per_odd_line = 0;
+
+ unsigned int mem_words_per_buff_line = 0;
+ unsigned int mem_words_per_buff = 0;
+ int err = 0;
+
+ /**
+ * zhengjie.lu@intel.com
+ *
+ * NOTE
+ * - In the struct "ia_css_stream_config", there
+ * are two members: "input_config" and "isys_config".
+ * Both of them provide the same information, e.g.
+ * input_res and format.
+ *
+ * Question here is that: which one shall be used?
+ */
+ width = stream_cfg->input_config.input_res.width;
+ height = stream_cfg->input_config.input_res.height;
+ format = stream_cfg->input_config.format;
+ pack_raw_pixels = stream_cfg->pack_raw_pixels;
+ /* end of NOTE */
+
+ /**
+ * zhengjie.lu@intel.com
+ *
+ * NOTE
+ * - The following code is derived from the
+ * existing code "ia_css_mipi_frame_calculate_size()".
+ *
+ * Question here is: why adding "2 * ISP_VEC_NELEMS"
+ * to "width_padded", but not making "width_padded"
+ * aligned with "2 * ISP_VEC_NELEMS"?
+ */
+ /* The changes will be reverted as soon as RAW
+ * Buffers are deployed by the 2401 Input System
+ * in the non-continuous use scenario.
+ */
+ width_padded = width + (2 * ISP_VEC_NELEMS);
+ /* end of NOTE */
+
+ IA_CSS_ENTER("padded_width=%d, height=%d, format=%d\n",
+ width_padded, height, format);
+
+ bits_per_pixel = sh_css_stream_format_2_bits_per_subpixel(format);
+ bits_per_pixel =
+ (format == ATOMISP_INPUT_FORMAT_RAW_10 && pack_raw_pixels) ? bits_per_pixel : 16;
+ if (bits_per_pixel == 0)
+ return -EINVAL;
+
+ odd_line_bytes = (width_padded * bits_per_pixel + 7) >> 3; /* ceil ( bits per line / 8) */
+
+ /* Even lines for YUV420 formats are double in bits_per_pixel. */
+ if (format == ATOMISP_INPUT_FORMAT_YUV420_8
+ || format == ATOMISP_INPUT_FORMAT_YUV420_10) {
+ even_line_bytes = (width_padded * 2 * bits_per_pixel + 7) >>
+ 3; /* ceil ( bits per line / 8) */
+ } else {
+ even_line_bytes = odd_line_bytes;
+ }
+
+ words_per_odd_line = (odd_line_bytes + 3) >> 2;
+ /* ceil(odd_line_bytes/4); word = 4 bytes */
+ words_per_even_line = (even_line_bytes + 3) >> 2;
+
+ mem_words_per_odd_line = (words_per_odd_line + 7) >> 3;
+ /* ceil(words_per_odd_line/8); mem_word = 32 bytes, 8 words */
+ mem_words_per_even_line = (words_per_even_line + 7) >> 3;
+
+ mem_words_per_buff_line =
+ (mem_words_per_odd_line > mem_words_per_even_line) ? mem_words_per_odd_line : mem_words_per_even_line;
+ mem_words_per_buff = mem_words_per_buff_line * height;
+
+ *size_mem_words = mem_words_per_buff;
+
+ IA_CSS_LEAVE_ERR(err);
+ return err;
+}
+
+int
+allocate_mipi_frames(struct ia_css_pipe *pipe,
+ struct ia_css_stream_info *info)
+{
+ int err = -EINVAL;
+ unsigned int port;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "allocate_mipi_frames(%p) enter:\n", pipe);
+
+ if (IS_ISP2401 && pipe->stream->config.online) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "allocate_mipi_frames(%p) exit: no buffers needed for 2401 pipe mode.\n",
+ pipe);
+ return 0;
+ }
+
+ if (pipe->stream->config.mode != IA_CSS_INPUT_MODE_BUFFERED_SENSOR) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "allocate_mipi_frames(%p) exit: no buffers needed for pipe mode.\n",
+ pipe);
+ return 0; /* AM TODO: Check */
+ }
+
+ port = (unsigned int)pipe->stream->config.source.port.port;
+ if (port >= N_CSI_PORTS) {
+ IA_CSS_ERROR("allocate_mipi_frames(%p) exit: port is not correct (port=%d).",
+ pipe, port);
+ return -EINVAL;
+ }
+
+ if (IS_ISP2401)
+ err = calculate_mipi_buff_size(&pipe->stream->config,
+ &my_css.mipi_frame_size[port]);
+
+ /*
+ * 2401 system allows multiple streams to use same physical port. This is not
+ * true for 2400 system. Currently 2401 uses MIPI buffers as a temporary solution.
+ * TODO AM: Once that is changed (removed) this code should be removed as well.
+ * In that case only 2400 related code should remain.
+ */
+ if (ref_count_mipi_allocation[port] != 0) {
+ if (IS_ISP2401)
+ ref_count_mipi_allocation[port]++;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "allocate_mipi_frames(%p) leave: nothing to do, already allocated for this port (port=%d).\n",
+ pipe, port);
+ return 0;
+ }
+
+ ref_count_mipi_allocation[port]++;
+
+ /* AM TODO: mipi frames number should come from stream struct. */
+ my_css.num_mipi_frames[port] = NUM_MIPI_FRAMES_PER_STREAM;
+
+ /* Incremental allocation (per stream), not for all streams at once. */
+ { /* limit the scope of i,j */
+ unsigned int i, j;
+
+ for (i = 0; i < my_css.num_mipi_frames[port]; i++) {
+ /* free previous frame */
+ if (my_css.mipi_frames[port][i]) {
+ ia_css_frame_free(my_css.mipi_frames[port][i]);
+ my_css.mipi_frames[port][i] = NULL;
+ }
+ /* check if new frame is needed */
+ if (i < my_css.num_mipi_frames[port]) {
+ /* allocate new frame */
+ err = ia_css_frame_allocate_with_buffer_size(
+ &my_css.mipi_frames[port][i],
+ my_css.mipi_frame_size[port] * HIVE_ISP_DDR_WORD_BYTES);
+ if (err) {
+ for (j = 0; j < i; j++) {
+ if (my_css.mipi_frames[port][j]) {
+ ia_css_frame_free(my_css.mipi_frames[port][j]);
+ my_css.mipi_frames[port][j] = NULL;
+ }
+ }
+ IA_CSS_ERROR("allocate_mipi_frames(%p, %d) exit: allocation failed.",
+ pipe, port);
+ return err;
+ }
+ }
+ if (info->metadata_info.size > 0) {
+ /* free previous metadata buffer */
+ if (my_css.mipi_metadata[port][i]) {
+ ia_css_metadata_free(my_css.mipi_metadata[port][i]);
+ my_css.mipi_metadata[port][i] = NULL;
+ }
+ /* check if need to allocate a new metadata buffer */
+ if (i < my_css.num_mipi_frames[port]) {
+ /* allocate new metadata buffer */
+ my_css.mipi_metadata[port][i] = ia_css_metadata_allocate(&info->metadata_info);
+ if (!my_css.mipi_metadata[port][i]) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "allocate_mipi_metadata(%p, %d) failed.\n",
+ pipe, port);
+ return err;
+ }
+ }
+ }
+ }
+ }
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "allocate_mipi_frames(%p) exit:\n", pipe);
+
+ return err;
+}
+
+int
+free_mipi_frames(struct ia_css_pipe *pipe)
+{
+ int err = -EINVAL;
+ unsigned int port;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "free_mipi_frames(%p) enter:\n", pipe);
+
+ /* assert(pipe != NULL); TEMP: TODO: Should be assert only. */
+ if (pipe) {
+ assert(pipe->stream);
+ if ((!pipe) || (!pipe->stream)) {
+ IA_CSS_ERROR("free_mipi_frames(%p) exit: pipe or stream is null.",
+ pipe);
+ return -EINVAL;
+ }
+
+ if (pipe->stream->config.mode != IA_CSS_INPUT_MODE_BUFFERED_SENSOR) {
+ IA_CSS_ERROR("free_mipi_frames(%p) exit: wrong mode.",
+ pipe);
+ return err;
+ }
+
+ port = (unsigned int)pipe->stream->config.source.port.port;
+
+ if (port >= N_CSI_PORTS) {
+ IA_CSS_ERROR("free_mipi_frames(%p, %d) exit: pipe port is not correct.",
+ pipe, port);
+ return err;
+ }
+
+ if (ref_count_mipi_allocation[port] > 0) {
+ if (!IS_ISP2401) {
+ assert(ref_count_mipi_allocation[port] == 1);
+ if (ref_count_mipi_allocation[port] != 1) {
+ IA_CSS_ERROR("free_mipi_frames(%p) exit: wrong ref_count (ref_count=%d).",
+ pipe, ref_count_mipi_allocation[port]);
+ return err;
+ }
+ }
+
+ ref_count_mipi_allocation[port]--;
+
+ if (ref_count_mipi_allocation[port] == 0) {
+ /* no streams are using this buffer, so free it */
+ unsigned int i;
+
+ for (i = 0; i < my_css.num_mipi_frames[port]; i++) {
+ if (my_css.mipi_frames[port][i]) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "free_mipi_frames(port=%d, num=%d).\n", port, i);
+ ia_css_frame_free(my_css.mipi_frames[port][i]);
+ my_css.mipi_frames[port][i] = NULL;
+ }
+ if (my_css.mipi_metadata[port][i]) {
+ ia_css_metadata_free(my_css.mipi_metadata[port][i]);
+ my_css.mipi_metadata[port][i] = NULL;
+ }
+ }
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "free_mipi_frames(%p) exit (deallocated).\n", pipe);
+ }
+ }
+ } else { /* pipe ==NULL */
+ /* AM TEMP: free-ing all mipi buffers just like a legacy code. */
+ for (port = 0; port < N_CSI_PORTS; port++) {
+ unsigned int i;
+
+ for (i = 0; i < my_css.num_mipi_frames[port]; i++) {
+ if (my_css.mipi_frames[port][i]) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "free_mipi_frames(port=%d, num=%d).\n", port, i);
+ ia_css_frame_free(my_css.mipi_frames[port][i]);
+ my_css.mipi_frames[port][i] = NULL;
+ }
+ if (my_css.mipi_metadata[port][i]) {
+ ia_css_metadata_free(my_css.mipi_metadata[port][i]);
+ my_css.mipi_metadata[port][i] = NULL;
+ }
+ }
+ ref_count_mipi_allocation[port] = 0;
+ }
+ }
+ return 0;
+}
+
+int
+send_mipi_frames(struct ia_css_pipe *pipe)
+{
+ int err = -EINVAL;
+ unsigned int i;
+ unsigned int port;
+
+ IA_CSS_ENTER_PRIVATE("pipe=%p", pipe);
+
+ /* multi stream video needs mipi buffers */
+ /* nothing to be done in other cases. */
+ if (pipe->stream->config.mode != IA_CSS_INPUT_MODE_BUFFERED_SENSOR) {
+ IA_CSS_LOG("nothing to be done for this mode");
+ return 0;
+ /* TODO: AM: maybe this should be returning an error. */
+ }
+
+ port = (unsigned int)pipe->stream->config.source.port.port;
+
+ if (port >= N_CSI_PORTS) {
+ IA_CSS_ERROR("send_mipi_frames(%p) exit: invalid port specified (port=%d).",
+ pipe, port);
+ return err;
+ }
+
+ /* Hand-over the SP-internal mipi buffers */
+ for (i = 0; i < my_css.num_mipi_frames[port]; i++) {
+ /* Need to include the offset for port. */
+ sh_css_update_host2sp_mipi_frame(port * NUM_MIPI_FRAMES_PER_STREAM + i,
+ my_css.mipi_frames[port][i]);
+ sh_css_update_host2sp_mipi_metadata(port * NUM_MIPI_FRAMES_PER_STREAM + i,
+ my_css.mipi_metadata[port][i]);
+ }
+ sh_css_update_host2sp_num_mipi_frames(my_css.num_mipi_frames[port]);
+
+ /**********************************
+ * Send an event to inform the SP
+ * that all MIPI frames are passed.
+ **********************************/
+ if (!sh_css_sp_is_running()) {
+ /* SP is not running. The queues are not valid */
+ IA_CSS_ERROR("sp is not running");
+ return err;
+ }
+
+ ia_css_bufq_enqueue_psys_event(
+ IA_CSS_PSYS_SW_EVENT_MIPI_BUFFERS_READY,
+ (uint8_t)port,
+ (uint8_t)my_css.num_mipi_frames[port],
+ 0 /* not used */);
+ IA_CSS_LEAVE_ERR_PRIVATE(0);
+ return 0;
+}
diff --git a/drivers/staging/media/atomisp/pci/sh_css_mipi.h b/drivers/staging/media/atomisp/pci/sh_css_mipi.h
new file mode 100644
index 000000000000..b3887ee3c75a
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/sh_css_mipi.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __SH_CSS_MIPI_H
+#define __SH_CSS_MIPI_H
+
+#include <ia_css_err.h> /* ia_css_err */
+#include <ia_css_types.h> /* ia_css_pipe */
+#include <ia_css_stream_public.h> /* ia_css_stream_config */
+
+void
+mipi_init(void);
+
+int
+allocate_mipi_frames(struct ia_css_pipe *pipe, struct ia_css_stream_info *info);
+
+int
+free_mipi_frames(struct ia_css_pipe *pipe);
+
+int
+send_mipi_frames(struct ia_css_pipe *pipe);
+
+#endif /* __SH_CSS_MIPI_H */
diff --git a/drivers/staging/media/atomisp/pci/sh_css_mmu.c b/drivers/staging/media/atomisp/pci/sh_css_mmu.c
new file mode 100644
index 000000000000..f2a84c1d6e52
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/sh_css_mmu.c
@@ -0,0 +1,51 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#include "ia_css_mmu.h"
+#include "ia_css_mmu_private.h"
+#include <ia_css_debug.h>
+#include "sh_css_sp.h"
+#include "sh_css_firmware.h"
+#include "sp.h"
+#include "mmu_device.h"
+
+void
+ia_css_mmu_invalidate_cache(void)
+{
+ const struct ia_css_fw_info *fw = &sh_css_sp_fw;
+ unsigned int HIVE_ADDR_ia_css_dmaproxy_sp_invalidate_tlb;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_mmu_invalidate_cache() enter\n");
+
+ /* if the SP is not running we should not access its dmem */
+ if (sh_css_sp_is_running()) {
+ HIVE_ADDR_ia_css_dmaproxy_sp_invalidate_tlb = fw->info.sp.invalidate_tlb;
+
+ (void)HIVE_ADDR_ia_css_dmaproxy_sp_invalidate_tlb; /* Suppres warnings in CRUN */
+
+ sp_dmem_store_uint32(SP0_ID,
+ (unsigned int)sp_address_of(ia_css_dmaproxy_sp_invalidate_tlb),
+ true);
+ }
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_mmu_invalidate_cache() leave\n");
+}
+
+void
+sh_css_mmu_set_page_table_base_index(hrt_data base_index)
+{
+ int i;
+
+ IA_CSS_ENTER_PRIVATE("base_index=0x%08x\n", base_index);
+ for (i = 0; i < N_MMU_ID; i++) {
+ mmu_ID_t mmu_id = i;
+
+ mmu_set_page_table_base_index(mmu_id, base_index);
+ mmu_invalidate_cache(mmu_id);
+ }
+ IA_CSS_LEAVE_PRIVATE("");
+}
diff --git a/drivers/staging/media/atomisp/pci/sh_css_param_dvs.c b/drivers/staging/media/atomisp/pci/sh_css_param_dvs.c
new file mode 100644
index 000000000000..7fa4aab35b06
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/sh_css_param_dvs.c
@@ -0,0 +1,274 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#include "sh_css_param_dvs.h"
+#include <assert_support.h>
+#include <type_support.h>
+#include <ia_css_err.h>
+#include <ia_css_types.h>
+#include "ia_css_debug.h"
+
+static struct ia_css_dvs_6axis_config *
+alloc_dvs_6axis_table(const struct ia_css_resolution *frame_res,
+ struct ia_css_dvs_6axis_config *dvs_config_src)
+{
+ unsigned int width_y = 0;
+ unsigned int height_y = 0;
+ unsigned int width_uv = 0;
+ unsigned int height_uv = 0;
+ int err = 0;
+ struct ia_css_dvs_6axis_config *dvs_config = NULL;
+
+ dvs_config = kvmalloc(sizeof(struct ia_css_dvs_6axis_config),
+ GFP_KERNEL);
+ if (!dvs_config) {
+ IA_CSS_ERROR("out of memory");
+ err = -ENOMEM;
+ } else {
+ /*Initialize new struct with latest config settings*/
+ if (dvs_config_src) {
+ dvs_config->width_y = width_y = dvs_config_src->width_y;
+ dvs_config->height_y = height_y = dvs_config_src->height_y;
+ dvs_config->width_uv = width_uv = dvs_config_src->width_uv;
+ dvs_config->height_uv = height_uv = dvs_config_src->height_uv;
+ IA_CSS_LOG("alloc_dvs_6axis_table Y: W %d H %d", width_y, height_y);
+ } else if (frame_res) {
+ dvs_config->width_y = width_y = DVS_TABLE_IN_BLOCKDIM_X_LUMA(frame_res->width);
+ dvs_config->height_y = height_y = DVS_TABLE_IN_BLOCKDIM_Y_LUMA(
+ frame_res->height);
+ dvs_config->width_uv = width_uv = DVS_TABLE_IN_BLOCKDIM_X_CHROMA(
+ frame_res->width /
+ 2); /* UV = Y/2, depens on colour format YUV 4.2.0*/
+ dvs_config->height_uv = height_uv = DVS_TABLE_IN_BLOCKDIM_Y_CHROMA(
+ frame_res->height /
+ 2);/* UV = Y/2, depens on colour format YUV 4.2.0*/
+ IA_CSS_LOG("alloc_dvs_6axis_table Y: W %d H %d", width_y, height_y);
+ }
+
+ /* Generate Y buffers */
+ dvs_config->xcoords_y = kvmalloc(width_y * height_y * sizeof(uint32_t),
+ GFP_KERNEL);
+ if (!dvs_config->xcoords_y) {
+ IA_CSS_ERROR("out of memory");
+ err = -ENOMEM;
+ goto exit;
+ }
+
+ dvs_config->ycoords_y = kvmalloc(width_y * height_y * sizeof(uint32_t),
+ GFP_KERNEL);
+ if (!dvs_config->ycoords_y) {
+ IA_CSS_ERROR("out of memory");
+ err = -ENOMEM;
+ goto exit;
+ }
+
+ /* Generate UV buffers */
+ IA_CSS_LOG("UV W %d H %d", width_uv, height_uv);
+
+ dvs_config->xcoords_uv = kvmalloc(width_uv * height_uv * sizeof(uint32_t),
+ GFP_KERNEL);
+ if (!dvs_config->xcoords_uv) {
+ IA_CSS_ERROR("out of memory");
+ err = -ENOMEM;
+ goto exit;
+ }
+
+ dvs_config->ycoords_uv = kvmalloc(width_uv * height_uv * sizeof(uint32_t),
+ GFP_KERNEL);
+ if (!dvs_config->ycoords_uv) {
+ IA_CSS_ERROR("out of memory");
+ err = -ENOMEM;
+ }
+exit:
+ if (err) {
+ free_dvs_6axis_table(
+ &dvs_config); /* we might have allocated some memory, release this */
+ dvs_config = NULL;
+ }
+ }
+
+ IA_CSS_LEAVE("dvs_config=%p", dvs_config);
+ return dvs_config;
+}
+
+static void
+init_dvs_6axis_table_from_default(struct ia_css_dvs_6axis_config *dvs_config,
+ const struct ia_css_resolution *dvs_offset)
+{
+ unsigned int x, y;
+ unsigned int width_y = dvs_config->width_y;
+ unsigned int height_y = dvs_config->height_y;
+ unsigned int width_uv = dvs_config->width_uv;
+ unsigned int height_uv = dvs_config->height_uv;
+
+ IA_CSS_LOG("Env_X=%d, Env_Y=%d, width_y=%d, height_y=%d",
+ dvs_offset->width, dvs_offset->height, width_y, height_y);
+ for (y = 0; y < height_y; y++) {
+ for (x = 0; x < width_y; x++) {
+ dvs_config->xcoords_y[y * width_y + x] = (dvs_offset->width + x *
+ DVS_BLOCKDIM_X) << DVS_COORD_FRAC_BITS;
+ }
+ }
+
+ for (y = 0; y < height_y; y++) {
+ for (x = 0; x < width_y; x++) {
+ dvs_config->ycoords_y[y * width_y + x] = (dvs_offset->height + y *
+ DVS_BLOCKDIM_Y_LUMA) << DVS_COORD_FRAC_BITS;
+ }
+ }
+
+ for (y = 0; y < height_uv; y++) {
+ for (x = 0; x < width_uv;
+ x++) { /* Envelope dimensions set in Ypixels hence offset UV = offset Y/2 */
+ dvs_config->xcoords_uv[y * width_uv + x] = ((dvs_offset->width / 2) + x *
+ DVS_BLOCKDIM_X) << DVS_COORD_FRAC_BITS;
+ }
+ }
+
+ for (y = 0; y < height_uv; y++) {
+ for (x = 0; x < width_uv;
+ x++) { /* Envelope dimensions set in Ypixels hence offset UV = offset Y/2 */
+ dvs_config->ycoords_uv[y * width_uv + x] = ((dvs_offset->height / 2) + y *
+ DVS_BLOCKDIM_Y_CHROMA) <<
+ DVS_COORD_FRAC_BITS;
+ }
+ }
+}
+
+static void
+init_dvs_6axis_table_from_config(struct ia_css_dvs_6axis_config *dvs_config,
+ struct ia_css_dvs_6axis_config *dvs_config_src)
+{
+ unsigned int width_y = dvs_config->width_y;
+ unsigned int height_y = dvs_config->height_y;
+ unsigned int width_uv = dvs_config->width_uv;
+ unsigned int height_uv = dvs_config->height_uv;
+
+ memcpy(dvs_config->xcoords_y, dvs_config_src->xcoords_y,
+ (width_y * height_y * sizeof(uint32_t)));
+ memcpy(dvs_config->ycoords_y, dvs_config_src->ycoords_y,
+ (width_y * height_y * sizeof(uint32_t)));
+ memcpy(dvs_config->xcoords_uv, dvs_config_src->xcoords_uv,
+ (width_uv * height_uv * sizeof(uint32_t)));
+ memcpy(dvs_config->ycoords_uv, dvs_config_src->ycoords_uv,
+ (width_uv * height_uv * sizeof(uint32_t)));
+}
+
+struct ia_css_dvs_6axis_config *
+generate_dvs_6axis_table(const struct ia_css_resolution *frame_res,
+ const struct ia_css_resolution *dvs_offset)
+{
+ struct ia_css_dvs_6axis_config *dvs_6axis_table;
+
+ assert(frame_res);
+ assert(dvs_offset);
+
+ dvs_6axis_table = alloc_dvs_6axis_table(frame_res, NULL);
+ if (dvs_6axis_table) {
+ init_dvs_6axis_table_from_default(dvs_6axis_table, dvs_offset);
+ return dvs_6axis_table;
+ }
+ return NULL;
+}
+
+struct ia_css_dvs_6axis_config *
+generate_dvs_6axis_table_from_config(struct ia_css_dvs_6axis_config
+ *dvs_config_src)
+{
+ struct ia_css_dvs_6axis_config *dvs_6axis_table;
+
+ assert(dvs_config_src);
+
+ dvs_6axis_table = alloc_dvs_6axis_table(NULL, dvs_config_src);
+ if (dvs_6axis_table) {
+ init_dvs_6axis_table_from_config(dvs_6axis_table, dvs_config_src);
+ return dvs_6axis_table;
+ }
+ return NULL;
+}
+
+void
+free_dvs_6axis_table(struct ia_css_dvs_6axis_config **dvs_6axis_config)
+{
+ if ((dvs_6axis_config) && (*dvs_6axis_config)) {
+ IA_CSS_ENTER_PRIVATE("dvs_6axis_config %p", (*dvs_6axis_config));
+ if ((*dvs_6axis_config)->xcoords_y) {
+ kvfree((*dvs_6axis_config)->xcoords_y);
+ (*dvs_6axis_config)->xcoords_y = NULL;
+ }
+
+ if ((*dvs_6axis_config)->ycoords_y) {
+ kvfree((*dvs_6axis_config)->ycoords_y);
+ (*dvs_6axis_config)->ycoords_y = NULL;
+ }
+
+ /* Free up UV buffers */
+ if ((*dvs_6axis_config)->xcoords_uv) {
+ kvfree((*dvs_6axis_config)->xcoords_uv);
+ (*dvs_6axis_config)->xcoords_uv = NULL;
+ }
+
+ if ((*dvs_6axis_config)->ycoords_uv) {
+ kvfree((*dvs_6axis_config)->ycoords_uv);
+ (*dvs_6axis_config)->ycoords_uv = NULL;
+ }
+
+ IA_CSS_LEAVE_PRIVATE("dvs_6axis_config %p", (*dvs_6axis_config));
+ kvfree(*dvs_6axis_config);
+ *dvs_6axis_config = NULL;
+ }
+}
+
+void copy_dvs_6axis_table(struct ia_css_dvs_6axis_config *dvs_config_dst,
+ const struct ia_css_dvs_6axis_config *dvs_config_src)
+{
+ unsigned int width_y;
+ unsigned int height_y;
+ unsigned int width_uv;
+ unsigned int height_uv;
+
+ assert(dvs_config_src);
+ assert(dvs_config_dst);
+ assert(dvs_config_src->xcoords_y);
+ assert(dvs_config_src->xcoords_uv);
+ assert(dvs_config_src->ycoords_y);
+ assert(dvs_config_src->ycoords_uv);
+ assert(dvs_config_src->width_y == dvs_config_dst->width_y);
+ assert(dvs_config_src->width_uv == dvs_config_dst->width_uv);
+ assert(dvs_config_src->height_y == dvs_config_dst->height_y);
+ assert(dvs_config_src->height_uv == dvs_config_dst->height_uv);
+
+ width_y = dvs_config_src->width_y;
+ height_y = dvs_config_src->height_y;
+ width_uv =
+ dvs_config_src->width_uv; /* = Y/2, depens on colour format YUV 4.2.0*/
+ height_uv = dvs_config_src->height_uv;
+
+ memcpy(dvs_config_dst->xcoords_y, dvs_config_src->xcoords_y,
+ (width_y * height_y * sizeof(uint32_t)));
+ memcpy(dvs_config_dst->ycoords_y, dvs_config_src->ycoords_y,
+ (width_y * height_y * sizeof(uint32_t)));
+
+ memcpy(dvs_config_dst->xcoords_uv, dvs_config_src->xcoords_uv,
+ (width_uv * height_uv * sizeof(uint32_t)));
+ memcpy(dvs_config_dst->ycoords_uv, dvs_config_src->ycoords_uv,
+ (width_uv * height_uv * sizeof(uint32_t)));
+}
+
+void
+ia_css_dvs_statistics_get(enum dvs_statistics_type type,
+ union ia_css_dvs_statistics_host *host_stats,
+ const union ia_css_dvs_statistics_isp *isp_stats)
+{
+ if (type == DVS_STATISTICS) {
+ ia_css_get_dvs_statistics(host_stats->p_dvs_statistics_host,
+ isp_stats->p_dvs_statistics_isp);
+ } else if (type == DVS2_STATISTICS) {
+ ia_css_get_dvs2_statistics(host_stats->p_dvs2_statistics_host,
+ isp_stats->p_dvs_statistics_isp);
+ }
+ return;
+}
diff --git a/drivers/staging/media/atomisp/pci/sh_css_param_dvs.h b/drivers/staging/media/atomisp/pci/sh_css_param_dvs.h
new file mode 100644
index 000000000000..b31a0d4e8acb
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/sh_css_param_dvs.h
@@ -0,0 +1,76 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef _SH_CSS_PARAMS_DVS_H_
+#define _SH_CSS_PARAMS_DVS_H_
+
+#include <linux/math.h>
+
+#include <math_support.h>
+#include <ia_css_types.h>
+#include "gdc_global.h" /* gdc_warp_param_mem_t */
+
+#define DVS_ENV_MIN_X (12)
+#define DVS_ENV_MIN_Y (12)
+
+#define DVS_BLOCKDIM_X (64) /* X block height*/
+#define DVS_BLOCKDIM_Y_LUMA (64) /* Y block height*/
+#define DVS_BLOCKDIM_Y_CHROMA (32) /* UV height block size is half the Y block height*/
+
+/* ISP2400 */
+/* horizontal 64x64 blocks round up to DVS_BLOCKDIM_X, make even */
+#define DVS_NUM_BLOCKS_X(X) round_up(DIV_ROUND_UP((X), DVS_BLOCKDIM_X), 2)
+#define DVS_NUM_BLOCKS_X_CHROMA(X) DIV_ROUND_UP((X), DVS_BLOCKDIM_X)
+
+/* ISP2400 */
+/* vertical 64x64 blocks round up to DVS_BLOCKDIM_Y */
+#define DVS_NUM_BLOCKS_Y(X) DIV_ROUND_UP((X), DVS_BLOCKDIM_Y_LUMA)
+#define DVS_NUM_BLOCKS_Y_CHROMA(X) DIV_ROUND_UP((X), DVS_BLOCKDIM_Y_CHROMA)
+
+/* N blocks have N + 1 set of coords */
+#define DVS_TABLE_IN_BLOCKDIM_X_LUMA(X) (DVS_NUM_BLOCKS_X(X) + 1)
+#define DVS_TABLE_IN_BLOCKDIM_X_CHROMA(X) (DVS_NUM_BLOCKS_X_CHROMA(X) + 1)
+#define DVS_TABLE_IN_BLOCKDIM_Y_LUMA(X) (DVS_NUM_BLOCKS_Y(X) + 1)
+#define DVS_TABLE_IN_BLOCKDIM_Y_CHROMA(X) (DVS_NUM_BLOCKS_Y_CHROMA(X) + 1)
+
+#define DVS_COORD_FRAC_BITS (10)
+
+/* ISP2400 */
+#define DVS_INPUT_BYTES_PER_PIXEL (1)
+
+#define XMEM_ALIGN_LOG2 (5)
+
+#define DVS_6AXIS_COORDS_ELEMS \
+ round_up(sizeof(gdc_warp_param_mem_t), HIVE_ISP_DDR_WORD_BYTES)
+
+/* currently we only support two output with the same resolution, output 0 is th default one. */
+#define DVS_6AXIS_BYTES(binary) \
+ (DVS_6AXIS_COORDS_ELEMS \
+ * DVS_NUM_BLOCKS_X((binary)->out_frame_info[0].res.width) \
+ * DVS_NUM_BLOCKS_Y((binary)->out_frame_info[0].res.height))
+
+/*
+ * ISP2400:
+ * Bilinear interpolation (HRT_GDC_BLI_MODE) is the supported method currently.
+ * Bicubic interpolation (HRT_GDC_BCI_MODE) is not supported yet */
+#define DVS_GDC_INTERP_METHOD HRT_GDC_BLI_MODE
+
+struct ia_css_dvs_6axis_config *
+generate_dvs_6axis_table(const struct ia_css_resolution *frame_res,
+ const struct ia_css_resolution *dvs_offset);
+
+struct ia_css_dvs_6axis_config *
+generate_dvs_6axis_table_from_config(struct ia_css_dvs_6axis_config
+ *dvs_config_src);
+
+void
+free_dvs_6axis_table(struct ia_css_dvs_6axis_config **dvs_6axis_config);
+
+void
+copy_dvs_6axis_table(struct ia_css_dvs_6axis_config *dvs_config_dst,
+ const struct ia_css_dvs_6axis_config *dvs_config_src);
+
+#endif
diff --git a/drivers/staging/media/atomisp/pci/sh_css_param_shading.c b/drivers/staging/media/atomisp/pci/sh_css_param_shading.c
new file mode 100644
index 000000000000..513e272f2fdc
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/sh_css_param_shading.c
@@ -0,0 +1,383 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#include <linux/math.h>
+#include <linux/slab.h>
+
+#include <math_support.h>
+#include "sh_css_param_shading.h"
+#include "ia_css_shading.h"
+#include "assert_support.h"
+#include "sh_css_defs.h"
+#include "sh_css_internal.h"
+#include "ia_css_debug.h"
+#include "ia_css_pipe_binarydesc.h"
+
+#include "sh_css_hrt.h"
+
+#include "platform_support.h"
+
+/* Bilinear interpolation on shading tables:
+ * For each target point T, we calculate the 4 surrounding source points:
+ * ul (upper left), ur (upper right), ll (lower left) and lr (lower right).
+ * We then calculate the distances from the T to the source points: x0, x1,
+ * y0 and y1.
+ * We then calculate the value of T:
+ * dx0*dy0*Slr + dx0*dy1*Sur + dx1*dy0*Sll + dx1*dy1*Sul.
+ * We choose a grid size of 1x1 which means:
+ * dx1 = 1-dx0
+ * dy1 = 1-dy0
+ *
+ * Sul dx0 dx1 Sur
+ * .<----->|<------------->.
+ * ^
+ * dy0|
+ * v T
+ * - .
+ * ^
+ * |
+ * dy1|
+ * v
+ * . .
+ * Sll Slr
+ *
+ * Padding:
+ * The area that the ISP operates on can include padding both on the left
+ * and the right. We need to padd the shading table such that the shading
+ * values end up on the correct pixel values. This means we must padd the
+ * shading table to match the ISP padding.
+ * We can have 5 cases:
+ * 1. All 4 points fall in the left padding.
+ * 2. The left 2 points fall in the left padding.
+ * 3. All 4 points fall in the cropped (target) region.
+ * 4. The right 2 points fall in the right padding.
+ * 5. All 4 points fall in the right padding.
+ * Cases 1 and 5 are easy to handle: we simply use the
+ * value 1 in the shading table.
+ * Cases 2 and 4 require interpolation that takes into
+ * account how far into the padding area the pixels
+ * fall. We extrapolate the shading table into the
+ * padded area and then interpolate.
+ */
+static void
+crop_and_interpolate(unsigned int cropped_width,
+ unsigned int cropped_height,
+ unsigned int left_padding,
+ int right_padding,
+ int top_padding,
+ const struct ia_css_shading_table *in_table,
+ struct ia_css_shading_table *out_table,
+ enum ia_css_sc_color color)
+{
+ unsigned int i, j,
+ sensor_width,
+ sensor_height,
+ table_width,
+ table_height,
+ table_cell_h,
+ out_cell_size,
+ in_cell_size,
+ out_start_row,
+ padded_width;
+ int out_start_col, /* can be negative to indicate padded space */
+ table_cell_w;
+ unsigned short *in_ptr,
+ *out_ptr;
+
+ assert(in_table);
+ assert(out_table);
+
+ sensor_width = in_table->sensor_width;
+ sensor_height = in_table->sensor_height;
+ table_width = in_table->width;
+ table_height = in_table->height;
+ in_ptr = in_table->data[color];
+ out_ptr = out_table->data[color];
+
+ padded_width = cropped_width + left_padding + right_padding;
+ out_cell_size = CEIL_DIV(padded_width, out_table->width - 1);
+ in_cell_size = CEIL_DIV(sensor_width, table_width - 1);
+
+ out_start_col = ((int)sensor_width - (int)cropped_width) / 2 - left_padding;
+ out_start_row = ((int)sensor_height - (int)cropped_height) / 2 - top_padding;
+ table_cell_w = (int)((table_width - 1) * in_cell_size);
+ table_cell_h = (table_height - 1) * in_cell_size;
+
+ for (i = 0; i < out_table->height; i++) {
+ int ty, src_y0, src_y1;
+ unsigned int sy0, sy1, dy0, dy1, divy;
+
+ /*
+ * calculate target point and make sure it falls within
+ * the table
+ */
+ ty = out_start_row + i * out_cell_size;
+
+ /* calculate closest source points in shading table and
+ make sure they fall within the table */
+ src_y0 = ty / (int)in_cell_size;
+ if (in_cell_size < out_cell_size)
+ src_y1 = (ty + out_cell_size) / in_cell_size;
+ else
+ src_y1 = src_y0 + 1;
+ src_y0 = clamp(src_y0, 0, (int)table_height - 1);
+ src_y1 = clamp(src_y1, 0, (int)table_height - 1);
+ ty = min(clamp(ty, 0, (int)sensor_height - 1),
+ (int)table_cell_h);
+
+ /* calculate closest source points for distance computation */
+ sy0 = min(src_y0 * in_cell_size, sensor_height - 1);
+ sy1 = min(src_y1 * in_cell_size, sensor_height - 1);
+ /* calculate distance between source and target pixels */
+ dy0 = ty - sy0;
+ dy1 = sy1 - ty;
+ divy = sy1 - sy0;
+ if (divy == 0) {
+ dy0 = 1;
+ divy = 1;
+ }
+
+ for (j = 0; j < out_table->width; j++, out_ptr++) {
+ int tx, src_x0, src_x1;
+ unsigned int sx0, sx1, dx0, dx1, divx;
+ unsigned short s_ul, s_ur, s_ll, s_lr;
+
+ /* calculate target point */
+ tx = out_start_col + j * out_cell_size;
+ /* calculate closest source points. */
+ src_x0 = tx / (int)in_cell_size;
+ if (in_cell_size < out_cell_size) {
+ src_x1 = (tx + out_cell_size) /
+ (int)in_cell_size;
+ } else {
+ src_x1 = src_x0 + 1;
+ }
+ /* if src points fall in padding, select closest ones.*/
+ src_x0 = clamp(src_x0, 0, (int)table_width - 1);
+ src_x1 = clamp(src_x1, 0, (int)table_width - 1);
+ tx = min(clamp(tx, 0, (int)sensor_width - 1),
+ (int)table_cell_w);
+ /*
+ * calculate closest source points for distance
+ * computation
+ */
+ sx0 = min(src_x0 * in_cell_size, sensor_width - 1);
+ sx1 = min(src_x1 * in_cell_size, sensor_width - 1);
+ /*
+ * calculate distances between source and target
+ * pixels
+ */
+ dx0 = tx - sx0;
+ dx1 = sx1 - tx;
+ divx = sx1 - sx0;
+ /* if we're at the edge, we just use the closest
+ * point still in the grid. We make up for the divider
+ * in this case by setting the distance to
+ * out_cell_size, since it's actually 0.
+ */
+ if (divx == 0) {
+ dx0 = 1;
+ divx = 1;
+ }
+
+ /* get source pixel values */
+ s_ul = in_ptr[(table_width * src_y0) + src_x0];
+ s_ur = in_ptr[(table_width * src_y0) + src_x1];
+ s_ll = in_ptr[(table_width * src_y1) + src_x0];
+ s_lr = in_ptr[(table_width * src_y1) + src_x1];
+
+ *out_ptr = (unsigned short)((dx0 * dy0 * s_lr + dx0 * dy1 * s_ur + dx1 * dy0 *
+ s_ll + dx1 * dy1 * s_ul) /
+ (divx * divy));
+ }
+ }
+}
+
+void
+sh_css_params_shading_id_table_generate(
+ struct ia_css_shading_table **target_table,
+ unsigned int table_width,
+ unsigned int table_height)
+{
+ /* initialize table with ones, shift becomes zero */
+ unsigned int i, j;
+ struct ia_css_shading_table *result;
+
+ assert(target_table);
+
+ result = ia_css_shading_table_alloc(table_width, table_height);
+ if (!result) {
+ *target_table = NULL;
+ return;
+ }
+
+ for (i = 0; i < IA_CSS_SC_NUM_COLORS; i++) {
+ for (j = 0; j < table_height * table_width; j++)
+ result->data[i][j] = 1;
+ }
+ result->fraction_bits = 0;
+ *target_table = result;
+}
+
+void
+prepare_shading_table(const struct ia_css_shading_table *in_table,
+ unsigned int sensor_binning,
+ struct ia_css_shading_table **target_table,
+ const struct ia_css_binary *binary,
+ unsigned int bds_factor)
+{
+ unsigned int input_width, input_height, table_width, table_height, i;
+ unsigned int left_padding, top_padding, left_cropping;
+ struct ia_css_shading_table *result;
+ struct u32_fract bds;
+ int right_padding;
+
+ assert(target_table);
+ assert(binary);
+
+ if (!in_table) {
+ sh_css_params_shading_id_table_generate(target_table,
+ binary->sctbl_width_per_color,
+ binary->sctbl_height);
+ return;
+ }
+
+ /*
+ * We use the ISP input resolution for the shading table because
+ * shading correction is performed in the bayer domain (before bayer
+ * down scaling).
+ */
+ input_height = binary->in_frame_info.res.height;
+ input_width = binary->in_frame_info.res.width;
+ left_padding = binary->left_padding;
+ left_cropping = (binary->info->sp.pipeline.left_cropping == 0) ?
+ binary->dvs_envelope.width : 2 * ISP_VEC_NELEMS;
+
+ sh_css_bds_factor_get_fract(bds_factor, &bds);
+
+ left_padding = (left_padding + binary->info->sp.pipeline.left_cropping) *
+ bds.numerator / bds.denominator -
+ binary->info->sp.pipeline.left_cropping;
+ right_padding = (binary->internal_frame_info.res.width -
+ binary->effective_in_frame_res.width * bds.denominator /
+ bds.numerator - left_cropping) * bds.numerator / bds.denominator;
+ top_padding = binary->info->sp.pipeline.top_cropping * bds.numerator /
+ bds.denominator -
+ binary->info->sp.pipeline.top_cropping;
+
+ /*
+ * We take into account the binning done by the sensor. We do this
+ * by cropping the non-binned part of the shading table and then
+ * increasing the size of a grid cell with this same binning factor.
+ */
+ input_width <<= sensor_binning;
+ input_height <<= sensor_binning;
+ /*
+ * We also scale the padding by the same binning factor. This will
+ * make it much easier later on to calculate the padding of the
+ * shading table.
+ */
+ left_padding <<= sensor_binning;
+ right_padding <<= sensor_binning;
+ top_padding <<= sensor_binning;
+
+ /*
+ * during simulation, the used resolution can exceed the sensor
+ * resolution, so we clip it.
+ */
+ input_width = min(input_width, in_table->sensor_width);
+ input_height = min(input_height, in_table->sensor_height);
+
+ /* This prepare_shading_table() function is called only in legacy API (not in new API).
+ Then, the legacy shading table width and height should be used. */
+ table_width = binary->sctbl_width_per_color;
+ table_height = binary->sctbl_height;
+
+ result = ia_css_shading_table_alloc(table_width, table_height);
+ if (!result) {
+ *target_table = NULL;
+ return;
+ }
+ result->sensor_width = in_table->sensor_width;
+ result->sensor_height = in_table->sensor_height;
+ result->fraction_bits = in_table->fraction_bits;
+
+ /*
+ * now we crop the original shading table and then interpolate to the
+ * requested resolution and decimation factor.
+ */
+ for (i = 0; i < IA_CSS_SC_NUM_COLORS; i++) {
+ crop_and_interpolate(input_width, input_height,
+ left_padding, right_padding, top_padding,
+ in_table,
+ result, i);
+ }
+ *target_table = result;
+}
+
+struct ia_css_shading_table *
+ia_css_shading_table_alloc(
+ unsigned int width,
+ unsigned int height)
+{
+ unsigned int i;
+ struct ia_css_shading_table *me;
+
+ IA_CSS_ENTER("");
+
+ me = kmalloc(sizeof(*me), GFP_KERNEL);
+ if (!me)
+ return me;
+
+ me->width = width;
+ me->height = height;
+ me->sensor_width = 0;
+ me->sensor_height = 0;
+ me->fraction_bits = 0;
+ for (i = 0; i < IA_CSS_SC_NUM_COLORS; i++) {
+ me->data[i] =
+ kvmalloc(width * height * sizeof(*me->data[0]),
+ GFP_KERNEL);
+ if (!me->data[i]) {
+ unsigned int j;
+
+ for (j = 0; j < i; j++) {
+ kvfree(me->data[j]);
+ me->data[j] = NULL;
+ }
+ kfree(me);
+ return NULL;
+ }
+ }
+
+ IA_CSS_LEAVE("");
+ return me;
+}
+
+void
+ia_css_shading_table_free(struct ia_css_shading_table *table)
+{
+ unsigned int i;
+
+ if (!table)
+ return;
+
+ /*
+ * We only output logging when the table is not NULL, otherwise
+ * logs will give the impression that a table was freed.
+ */
+ IA_CSS_ENTER("");
+
+ for (i = 0; i < IA_CSS_SC_NUM_COLORS; i++) {
+ if (table->data[i]) {
+ kvfree(table->data[i]);
+ table->data[i] = NULL;
+ }
+ }
+ kfree(table);
+
+ IA_CSS_LEAVE("");
+}
diff --git a/drivers/staging/media/atomisp/pci/sh_css_param_shading.h b/drivers/staging/media/atomisp/pci/sh_css_param_shading.h
new file mode 100644
index 000000000000..b4ca8815dc1b
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/sh_css_param_shading.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __SH_CSS_PARAMS_SHADING_H
+#define __SH_CSS_PARAMS_SHADING_H
+
+#include <ia_css_types.h>
+#include <ia_css_binary.h>
+
+void
+sh_css_params_shading_id_table_generate(
+ struct ia_css_shading_table **target_table,
+ unsigned int table_width,
+ unsigned int table_height);
+
+void
+prepare_shading_table(const struct ia_css_shading_table *in_table,
+ unsigned int sensor_binning,
+ struct ia_css_shading_table **target_table,
+ const struct ia_css_binary *binary,
+ unsigned int bds_factor);
+
+#endif /* __SH_CSS_PARAMS_SHADING_H */
diff --git a/drivers/staging/media/atomisp/pci/sh_css_params.c b/drivers/staging/media/atomisp/pci/sh_css_params.c
new file mode 100644
index 000000000000..11d62313c908
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/sh_css_params.c
@@ -0,0 +1,4548 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#include <linux/math.h>
+
+#include "gdc_device.h" /* gdc_lut_store(), ... */
+#include "isp.h" /* ISP_VEC_ELEMBITS */
+#include "vamem.h"
+#ifndef __INLINE_HMEM__
+#define __INLINE_HMEM__
+#endif
+#include "hmem.h"
+#define IA_CSS_INCLUDE_PARAMETERS
+#define IA_CSS_INCLUDE_ACC_PARAMETERS
+
+#include "hmm.h"
+#include "sh_css_params.h"
+#include "ia_css_queue.h"
+#include "sw_event_global.h" /* Event IDs */
+
+#include "platform_support.h"
+#include "assert_support.h"
+
+#include "ia_css_stream.h"
+#include "sh_css_params_internal.h"
+#include "sh_css_param_shading.h"
+#include "sh_css_param_dvs.h"
+#include "ia_css_refcount.h"
+#include "sh_css_internal.h"
+#include "ia_css_control.h"
+#include "ia_css_shading.h"
+#include "sh_css_defs.h"
+#include "sh_css_sp.h"
+#include "ia_css_pipeline.h"
+#include "ia_css_debug.h"
+
+#include "ia_css_isp_param.h"
+#include "ia_css_isp_params.h"
+#include "ia_css_mipi.h"
+#include "ia_css_morph.h"
+#include "ia_css_host_data.h"
+#include "ia_css_pipe.h"
+#include "ia_css_pipe_binarydesc.h"
+
+/* Include all kernel host interfaces for ISP1 */
+
+#include "anr/anr_1.0/ia_css_anr.host.h"
+#include "cnr/cnr_1.0/ia_css_cnr.host.h"
+#include "csc/csc_1.0/ia_css_csc.host.h"
+#include "de/de_1.0/ia_css_de.host.h"
+#include "dp/dp_1.0/ia_css_dp.host.h"
+#include "bnr/bnr_1.0/ia_css_bnr.host.h"
+#include "dvs/dvs_1.0/ia_css_dvs.host.h"
+#include "fpn/fpn_1.0/ia_css_fpn.host.h"
+#include "gc/gc_1.0/ia_css_gc.host.h"
+#include "macc/macc_1.0/ia_css_macc.host.h"
+#include "ctc/ctc_1.0/ia_css_ctc.host.h"
+#include "ob/ob_1.0/ia_css_ob.host.h"
+#include "raw/raw_1.0/ia_css_raw.host.h"
+#include "fixedbds/fixedbds_1.0/ia_css_fixedbds_param.h"
+#include "s3a/s3a_1.0/ia_css_s3a.host.h"
+#include "sc/sc_1.0/ia_css_sc.host.h"
+#include "sdis/sdis_1.0/ia_css_sdis.host.h"
+#include "tnr/tnr_1.0/ia_css_tnr.host.h"
+#include "uds/uds_1.0/ia_css_uds_param.h"
+#include "wb/wb_1.0/ia_css_wb.host.h"
+#include "ynr/ynr_1.0/ia_css_ynr.host.h"
+#include "xnr/xnr_1.0/ia_css_xnr.host.h"
+
+/* Include additional kernel host interfaces for ISP2 */
+
+#include "aa/aa_2/ia_css_aa2.host.h"
+#include "anr/anr_2/ia_css_anr2.host.h"
+#include "bh/bh_2/ia_css_bh.host.h"
+#include "cnr/cnr_2/ia_css_cnr2.host.h"
+#include "ctc/ctc1_5/ia_css_ctc1_5.host.h"
+#include "de/de_2/ia_css_de2.host.h"
+#include "gc/gc_2/ia_css_gc2.host.h"
+#include "sdis/sdis_2/ia_css_sdis2.host.h"
+#include "ynr/ynr_2/ia_css_ynr2.host.h"
+#include "fc/fc_1.0/ia_css_formats.host.h"
+
+#include "xnr/xnr_3.0/ia_css_xnr3.host.h"
+
+
+#include "sh_css_frac.h"
+#include "ia_css_bufq.h"
+
+static size_t fpntbl_bytes(const struct ia_css_binary *binary)
+{
+ return array3_size(sizeof(char),
+ binary->in_frame_info.res.height,
+ binary->in_frame_info.padded_width);
+}
+
+static size_t sctbl_bytes(const struct ia_css_binary *binary)
+{
+ return size_mul(sizeof(unsigned short),
+ array3_size(binary->sctbl_height,
+ binary->sctbl_aligned_width_per_color,
+ IA_CSS_SC_NUM_COLORS));
+}
+
+static size_t morph_plane_bytes(const struct ia_css_binary *binary)
+{
+ return array3_size(SH_CSS_MORPH_TABLE_ELEM_BYTES,
+ binary->morph_tbl_aligned_width,
+ binary->morph_tbl_height);
+}
+
+/* We keep a second copy of the ptr struct for the SP to access.
+ Again, this would not be necessary on the chip. */
+static ia_css_ptr sp_ddr_ptrs;
+
+/* sp group address on DDR */
+static ia_css_ptr xmem_sp_group_ptrs;
+
+static ia_css_ptr xmem_sp_stage_ptrs[IA_CSS_PIPE_ID_NUM]
+[SH_CSS_MAX_STAGES];
+static ia_css_ptr xmem_isp_stage_ptrs[IA_CSS_PIPE_ID_NUM]
+[SH_CSS_MAX_STAGES];
+
+static ia_css_ptr default_gdc_lut;
+static int interleaved_lut_temp[4][HRT_GDC_N];
+
+/* END DO NOT MOVE INTO VIMALS_WORLD */
+
+/* Digital Zoom lookup table. See documentation for more details about the
+ * contents of this table.
+ */
+static const int zoom_table[4][HRT_GDC_N] = {
+ {
+ 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4,
+ 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4,
+ 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4,
+ 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4,
+ 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4,
+ 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4,
+ 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4,
+ 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4,
+ 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4,
+ 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4,
+ 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4,
+ 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4,
+ -1 << 4, -1 << 4, -1 << 4, -1 << 4, -1 << 4, -1 << 4, -1 << 4, -1 << 4,
+ -1 << 4, -1 << 4, -1 << 4, -1 << 4, -1 << 4, -1 << 4, -1 << 4, -1 << 4,
+ -1 << 4, -1 << 4, -1 << 4, -1 << 4, -1 << 4, -1 << 4, -1 << 4, -1 << 4,
+ -1 << 4, -1 << 4, -1 << 4, -1 << 4, -1 << 4, -1 << 4, -1 << 4, -1 << 4,
+ -1 << 4, -1 << 4, -1 << 4, -1 << 4, -1 << 4, -1 << 4, -1 << 4, -1 << 4,
+ -1 << 4, -1 << 4, -1 << 4, -1 << 4, -1 << 4, -1 << 4, -1 << 4, -1 << 4,
+ -2 << 4, -2 << 4, -2 << 4, -2 << 4, -2 << 4, -2 << 4, -2 << 4, -2 << 4,
+ -2 << 4, -2 << 4, -2 << 4, -2 << 4, -2 << 4, -2 << 4, -2 << 4, -2 << 4,
+ -2 << 4, -2 << 4, -2 << 4, -2 << 4, -2 << 4, -2 << 4, -2 << 4, -2 << 4,
+ -2 << 4, -2 << 4, -2 << 4, -2 << 4, -2 << 4, -2 << 4, -2 << 4, -2 << 4,
+ -3 << 4, -3 << 4, -3 << 4, -3 << 4, -3 << 4, -3 << 4, -3 << 4, -3 << 4,
+ -3 << 4, -3 << 4, -3 << 4, -3 << 4, -3 << 4, -3 << 4, -3 << 4, -3 << 4,
+ -3 << 4, -3 << 4, -3 << 4, -3 << 4, -3 << 4, -3 << 4, -3 << 4, -3 << 4,
+ -3 << 4, -3 << 4, -3 << 4, -3 << 4, -3 << 4, -3 << 4, -3 << 4, -3 << 4,
+ -4 << 4, -4 << 4, -4 << 4, -4 << 4, -4 << 4, -4 << 4, -4 << 4, -4 << 4,
+ -4 << 4, -4 << 4, -4 << 4, -4 << 4, -4 << 4, -4 << 4, -4 << 4, -4 << 4,
+ -4 << 4, -4 << 4, -4 << 4, -4 << 4, -4 << 4, -4 << 4, -4 << 4, -4 << 4,
+ -4 << 4, -4 << 4, -4 << 4, -4 << 4, -4 << 4, -4 << 4, -4 << 4, -4 << 4,
+ -5 << 4, -5 << 4, -5 << 4, -5 << 4, -5 << 4, -5 << 4, -5 << 4, -5 << 4,
+ -5 << 4, -5 << 4, -5 << 4, -5 << 4, -5 << 4, -5 << 4, -5 << 4, -5 << 4,
+ -6 << 4, -6 << 4, -6 << 4, -6 << 4, -6 << 4, -6 << 4, -6 << 4, -6 << 4,
+ -6 << 4, -6 << 4, -6 << 4, -6 << 4, -6 << 4, -6 << 4, -6 << 4, -6 << 4,
+ -6 << 4, -6 << 4, -6 << 4, -6 << 4, -6 << 4, -6 << 4, -6 << 4, -6 << 4,
+ -6 << 4, -6 << 4, -6 << 4, -6 << 4, -6 << 4, -6 << 4, -6 << 4, -6 << 4,
+ -7 << 4, -7 << 4, -7 << 4, -7 << 4, -7 << 4, -7 << 4, -7 << 4, -7 << 4,
+ -7 << 4, -7 << 4, -7 << 4, -7 << 4, -7 << 4, -7 << 4, -7 << 4, -7 << 4,
+ -7 << 4, -7 << 4, -7 << 4, -7 << 4, -7 << 4, -7 << 4, -7 << 4, -7 << 4,
+ -7 << 4, -7 << 4, -7 << 4, -7 << 4, -7 << 4, -7 << 4, -7 << 4, -7 << 4,
+ -8 << 4, -8 << 4, -8 << 4, -8 << 4, -8 << 4, -8 << 4, -8 << 4, -8 << 4,
+ -8 << 4, -8 << 4, -8 << 4, -8 << 4, -8 << 4, -8 << 4, -8 << 4, -8 << 4,
+ -9 << 4, -9 << 4, -9 << 4, -9 << 4, -9 << 4, -9 << 4, -9 << 4, -9 << 4,
+ -9 << 4, -9 << 4, -9 << 4, -9 << 4, -9 << 4, -9 << 4, -9 << 4, -9 << 4,
+ -9 << 4, -9 << 4, -9 << 4, -9 << 4, -9 << 4, -9 << 4, -9 << 4, -9 << 4,
+ -9 << 4, -9 << 4, -9 << 4, -9 << 4, -9 << 4, -9 << 4, -9 << 4, -9 << 4,
+ -10 << 4, -10 << 4, -10 << 4, -10 << 4, -10 << 4, -10 << 4, -10 << 4, -10 << 4,
+ -10 << 4, -10 << 4, -10 << 4, -10 << 4, -10 << 4, -10 << 4, -10 << 4, -10 << 4,
+ -11 << 4, -11 << 4, -11 << 4, -11 << 4, -11 << 4, -11 << 4, -11 << 4, -11 << 4,
+ -11 << 4, -11 << 4, -11 << 4, -11 << 4, -11 << 4, -11 << 4, -11 << 4, -11 << 4,
+ -11 << 4, -11 << 4, -11 << 4, -11 << 4, -11 << 4, -11 << 4, -11 << 4, -11 << 4,
+ -11 << 4, -11 << 4, -11 << 4, -11 << 4, -11 << 4, -11 << 4, -11 << 4, -11 << 4,
+ -12 << 4, -12 << 4, -12 << 4, -12 << 4, -12 << 4, -12 << 4, -12 << 4, -12 << 4,
+ -12 << 4, -12 << 4, -12 << 4, -12 << 4, -12 << 4, -12 << 4, -12 << 4, -12 << 4,
+ -13 << 4, -13 << 4, -13 << 4, -13 << 4, -13 << 4, -13 << 4, -13 << 4, -13 << 4,
+ -13 << 4, -13 << 4, -13 << 4, -13 << 4, -13 << 4, -13 << 4, -13 << 4, -13 << 4,
+ -13 << 4, -13 << 4, -13 << 4, -13 << 4, -13 << 4, -13 << 4, -13 << 4, -13 << 4,
+ -13 << 4, -13 << 4, -13 << 4, -13 << 4, -13 << 4, -13 << 4, -13 << 4, -13 << 4,
+ -14 << 4, -14 << 4, -14 << 4, -14 << 4, -14 << 4, -14 << 4, -14 << 4, -14 << 4,
+ -14 << 4, -14 << 4, -14 << 4, -14 << 4, -14 << 4, -14 << 4, -14 << 4, -14 << 4,
+ -14 << 4, -14 << 4, -14 << 4, -14 << 4, -14 << 4, -14 << 4, -14 << 4, -14 << 4,
+ -14 << 4, -14 << 4, -14 << 4, -14 << 4, -14 << 4, -14 << 4, -14 << 4, -14 << 4,
+ -15 << 4, -15 << 4, -15 << 4, -15 << 4, -15 << 4, -15 << 4, -15 << 4, -15 << 4,
+ -15 << 4, -15 << 4, -15 << 4, -15 << 4, -15 << 4, -15 << 4, -15 << 4, -15 << 4,
+ -16 << 4, -16 << 4, -16 << 4, -16 << 4, -16 << 4, -16 << 4, -16 << 4, -16 << 4,
+ -16 << 4, -16 << 4, -16 << 4, -16 << 4, -16 << 4, -16 << 4, -16 << 4, -16 << 4,
+ -16 << 4, -16 << 4, -16 << 4, -16 << 4, -16 << 4, -16 << 4, -16 << 4, -16 << 4,
+ -16 << 4, -16 << 4, -16 << 4, -16 << 4, -16 << 4, -16 << 4, -16 << 4, -16 << 4,
+ -16 << 4, -16 << 4, -16 << 4, -16 << 4, -16 << 4, -16 << 4, -16 << 4, -16 << 4,
+ -16 << 4, -16 << 4, -16 << 4, -16 << 4, -16 << 4, -16 << 4, -16 << 4, -16 << 4,
+ -17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4,
+ -17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4,
+ -17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4,
+ -17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4,
+ -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4,
+ -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4,
+ -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4,
+ -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4,
+ -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4,
+ -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4,
+ -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4,
+ -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4,
+ -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4,
+ -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4,
+ -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4,
+ -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4,
+ -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4,
+ -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4,
+ -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4,
+ -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4,
+ -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4,
+ -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4,
+ -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4,
+ -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4,
+ -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4,
+ -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4,
+ -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4,
+ -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4,
+ -17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4,
+ -17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4,
+ -17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4,
+ -17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4,
+ -16 << 4, -16 << 4, -16 << 4, -16 << 4, -16 << 4, -16 << 4, -16 << 4, -16 << 4,
+ -16 << 4, -16 << 4, -16 << 4, -16 << 4, -16 << 4, -16 << 4, -16 << 4, -16 << 4,
+ -15 << 4, -15 << 4, -15 << 4, -15 << 4, -15 << 4, -15 << 4, -15 << 4, -15 << 4,
+ -15 << 4, -15 << 4, -15 << 4, -15 << 4, -15 << 4, -15 << 4, -15 << 4, -15 << 4,
+ -15 << 4, -15 << 4, -15 << 4, -15 << 4, -15 << 4, -15 << 4, -15 << 4, -15 << 4,
+ -15 << 4, -15 << 4, -15 << 4, -15 << 4, -15 << 4, -15 << 4, -15 << 4, -15 << 4,
+ -14 << 4, -14 << 4, -14 << 4, -14 << 4, -14 << 4, -14 << 4, -14 << 4, -14 << 4,
+ -14 << 4, -14 << 4, -14 << 4, -14 << 4, -14 << 4, -14 << 4, -14 << 4, -14 << 4,
+ -13 << 4, -13 << 4, -13 << 4, -13 << 4, -13 << 4, -13 << 4, -13 << 4, -13 << 4,
+ -13 << 4, -13 << 4, -13 << 4, -13 << 4, -13 << 4, -13 << 4, -13 << 4, -13 << 4,
+ -12 << 4, -12 << 4, -12 << 4, -12 << 4, -12 << 4, -12 << 4, -12 << 4, -12 << 4,
+ -12 << 4, -12 << 4, -12 << 4, -12 << 4, -12 << 4, -12 << 4, -12 << 4, -12 << 4,
+ -11 << 4, -11 << 4, -11 << 4, -11 << 4, -11 << 4, -11 << 4, -11 << 4, -11 << 4,
+ -11 << 4, -11 << 4, -11 << 4, -11 << 4, -11 << 4, -11 << 4, -11 << 4, -11 << 4,
+ -9 << 4, -9 << 4, -9 << 4, -9 << 4, -9 << 4, -9 << 4, -9 << 4, -9 << 4,
+ -9 << 4, -9 << 4, -9 << 4, -9 << 4, -9 << 4, -9 << 4, -9 << 4, -9 << 4,
+ -8 << 4, -8 << 4, -8 << 4, -8 << 4, -8 << 4, -8 << 4, -8 << 4, -8 << 4,
+ -8 << 4, -8 << 4, -8 << 4, -8 << 4, -8 << 4, -8 << 4, -8 << 4, -8 << 4,
+ -7 << 4, -7 << 4, -7 << 4, -7 << 4, -7 << 4, -7 << 4, -7 << 4, -7 << 4,
+ -7 << 4, -7 << 4, -7 << 4, -7 << 4, -7 << 4, -7 << 4, -7 << 4, -7 << 4,
+ -5 << 4, -5 << 4, -5 << 4, -5 << 4, -5 << 4, -5 << 4, -5 << 4, -5 << 4,
+ -5 << 4, -5 << 4, -5 << 4, -5 << 4, -5 << 4, -5 << 4, -5 << 4, -5 << 4,
+ -3 << 4, -3 << 4, -3 << 4, -3 << 4, -3 << 4, -3 << 4, -3 << 4, -3 << 4,
+ -3 << 4, -3 << 4, -3 << 4, -3 << 4, -3 << 4, -3 << 4, -3 << 4, -3 << 4,
+ -1 << 4, -1 << 4, -1 << 4, -1 << 4, -1 << 4, -1 << 4, -1 << 4, -1 << 4,
+ -1 << 4, -1 << 4, -1 << 4, -1 << 4, -1 << 4, -1 << 4, -1 << 4, -1 << 4
+ },
+ {
+ 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4,
+ 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4,
+ 2 << 4, 2 << 4, 2 << 4, 2 << 4, 2 << 4, 2 << 4, 2 << 4, 2 << 4,
+ 2 << 4, 2 << 4, 2 << 4, 2 << 4, 2 << 4, 2 << 4, 2 << 4, 2 << 4,
+ 4 << 4, 4 << 4, 4 << 4, 4 << 4, 4 << 4, 4 << 4, 4 << 4, 4 << 4,
+ 4 << 4, 4 << 4, 4 << 4, 4 << 4, 4 << 4, 4 << 4, 4 << 4, 4 << 4,
+ 7 << 4, 7 << 4, 7 << 4, 7 << 4, 7 << 4, 7 << 4, 7 << 4, 7 << 4,
+ 7 << 4, 7 << 4, 7 << 4, 7 << 4, 7 << 4, 7 << 4, 7 << 4, 7 << 4,
+ 9 << 4, 9 << 4, 9 << 4, 9 << 4, 9 << 4, 9 << 4, 9 << 4, 9 << 4,
+ 9 << 4, 9 << 4, 9 << 4, 9 << 4, 9 << 4, 9 << 4, 9 << 4, 9 << 4,
+ 12 << 4, 12 << 4, 12 << 4, 12 << 4, 12 << 4, 12 << 4, 12 << 4, 12 << 4,
+ 12 << 4, 12 << 4, 12 << 4, 12 << 4, 12 << 4, 12 << 4, 12 << 4, 12 << 4,
+ 16 << 4, 16 << 4, 16 << 4, 16 << 4, 16 << 4, 16 << 4, 16 << 4, 16 << 4,
+ 16 << 4, 16 << 4, 16 << 4, 16 << 4, 16 << 4, 16 << 4, 16 << 4, 16 << 4,
+ 19 << 4, 19 << 4, 19 << 4, 19 << 4, 19 << 4, 19 << 4, 19 << 4, 19 << 4,
+ 19 << 4, 19 << 4, 19 << 4, 19 << 4, 19 << 4, 19 << 4, 19 << 4, 19 << 4,
+ 23 << 4, 23 << 4, 23 << 4, 23 << 4, 23 << 4, 23 << 4, 23 << 4, 23 << 4,
+ 23 << 4, 23 << 4, 23 << 4, 23 << 4, 23 << 4, 23 << 4, 23 << 4, 23 << 4,
+ 27 << 4, 27 << 4, 27 << 4, 27 << 4, 27 << 4, 27 << 4, 27 << 4, 27 << 4,
+ 27 << 4, 27 << 4, 27 << 4, 27 << 4, 27 << 4, 27 << 4, 27 << 4, 27 << 4,
+ 31 << 4, 31 << 4, 31 << 4, 31 << 4, 31 << 4, 31 << 4, 31 << 4, 31 << 4,
+ 31 << 4, 31 << 4, 31 << 4, 31 << 4, 31 << 4, 31 << 4, 31 << 4, 31 << 4,
+ 35 << 4, 35 << 4, 35 << 4, 35 << 4, 35 << 4, 35 << 4, 35 << 4, 35 << 4,
+ 35 << 4, 35 << 4, 35 << 4, 35 << 4, 35 << 4, 35 << 4, 35 << 4, 35 << 4,
+ 39 << 4, 39 << 4, 39 << 4, 39 << 4, 39 << 4, 39 << 4, 39 << 4, 39 << 4,
+ 39 << 4, 39 << 4, 39 << 4, 39 << 4, 39 << 4, 39 << 4, 39 << 4, 39 << 4,
+ 43 << 4, 43 << 4, 43 << 4, 43 << 4, 43 << 4, 43 << 4, 43 << 4, 43 << 4,
+ 43 << 4, 43 << 4, 43 << 4, 43 << 4, 43 << 4, 43 << 4, 43 << 4, 43 << 4,
+ 48 << 4, 48 << 4, 48 << 4, 48 << 4, 48 << 4, 48 << 4, 48 << 4, 48 << 4,
+ 48 << 4, 48 << 4, 48 << 4, 48 << 4, 48 << 4, 48 << 4, 48 << 4, 48 << 4,
+ 53 << 4, 53 << 4, 53 << 4, 53 << 4, 53 << 4, 53 << 4, 53 << 4, 53 << 4,
+ 53 << 4, 53 << 4, 53 << 4, 53 << 4, 53 << 4, 53 << 4, 53 << 4, 53 << 4,
+ 58 << 4, 58 << 4, 58 << 4, 58 << 4, 58 << 4, 58 << 4, 58 << 4, 58 << 4,
+ 58 << 4, 58 << 4, 58 << 4, 58 << 4, 58 << 4, 58 << 4, 58 << 4, 58 << 4,
+ 62 << 4, 62 << 4, 62 << 4, 62 << 4, 62 << 4, 62 << 4, 62 << 4, 62 << 4,
+ 62 << 4, 62 << 4, 62 << 4, 62 << 4, 62 << 4, 62 << 4, 62 << 4, 62 << 4,
+ 67 << 4, 67 << 4, 67 << 4, 67 << 4, 67 << 4, 67 << 4, 67 << 4, 67 << 4,
+ 67 << 4, 67 << 4, 67 << 4, 67 << 4, 67 << 4, 67 << 4, 67 << 4, 67 << 4,
+ 73 << 4, 73 << 4, 73 << 4, 73 << 4, 73 << 4, 73 << 4, 73 << 4, 73 << 4,
+ 73 << 4, 73 << 4, 73 << 4, 73 << 4, 73 << 4, 73 << 4, 73 << 4, 73 << 4,
+ 78 << 4, 78 << 4, 78 << 4, 78 << 4, 78 << 4, 78 << 4, 78 << 4, 78 << 4,
+ 78 << 4, 78 << 4, 78 << 4, 78 << 4, 78 << 4, 78 << 4, 78 << 4, 78 << 4,
+ 83 << 4, 83 << 4, 83 << 4, 83 << 4, 83 << 4, 83 << 4, 83 << 4, 83 << 4,
+ 83 << 4, 83 << 4, 83 << 4, 83 << 4, 83 << 4, 83 << 4, 83 << 4, 83 << 4,
+ 88 << 4, 88 << 4, 88 << 4, 88 << 4, 88 << 4, 88 << 4, 88 << 4, 88 << 4,
+ 88 << 4, 88 << 4, 88 << 4, 88 << 4, 88 << 4, 88 << 4, 88 << 4, 88 << 4,
+ 94 << 4, 94 << 4, 94 << 4, 94 << 4, 94 << 4, 94 << 4, 94 << 4, 94 << 4,
+ 94 << 4, 94 << 4, 94 << 4, 94 << 4, 94 << 4, 94 << 4, 94 << 4, 94 << 4,
+ 99 << 4, 99 << 4, 99 << 4, 99 << 4, 99 << 4, 99 << 4, 99 << 4, 99 << 4,
+ 99 << 4, 99 << 4, 99 << 4, 99 << 4, 99 << 4, 99 << 4, 99 << 4, 99 << 4,
+ 105 << 4, 105 << 4, 105 << 4, 105 << 4, 105 << 4, 105 << 4, 105 << 4, 105 << 4,
+ 105 << 4, 105 << 4, 105 << 4, 105 << 4, 105 << 4, 105 << 4, 105 << 4, 105 << 4,
+ 110 << 4, 110 << 4, 110 << 4, 110 << 4, 110 << 4, 110 << 4, 110 << 4, 110 << 4,
+ 110 << 4, 110 << 4, 110 << 4, 110 << 4, 110 << 4, 110 << 4, 110 << 4, 110 << 4,
+ 116 << 4, 116 << 4, 116 << 4, 116 << 4, 116 << 4, 116 << 4, 116 << 4, 116 << 4,
+ 116 << 4, 116 << 4, 116 << 4, 116 << 4, 116 << 4, 116 << 4, 116 << 4, 116 << 4,
+ 121 << 4, 121 << 4, 121 << 4, 121 << 4, 121 << 4, 121 << 4, 121 << 4, 121 << 4,
+ 121 << 4, 121 << 4, 121 << 4, 121 << 4, 121 << 4, 121 << 4, 121 << 4, 121 << 4,
+ 127 << 4, 127 << 4, 127 << 4, 127 << 4, 127 << 4, 127 << 4, 127 << 4, 127 << 4,
+ 127 << 4, 127 << 4, 127 << 4, 127 << 4, 127 << 4, 127 << 4, 127 << 4, 127 << 4,
+ 132 << 4, 132 << 4, 132 << 4, 132 << 4, 132 << 4, 132 << 4, 132 << 4, 132 << 4,
+ 132 << 4, 132 << 4, 132 << 4, 132 << 4, 132 << 4, 132 << 4, 132 << 4, 132 << 4,
+ 138 << 4, 138 << 4, 138 << 4, 138 << 4, 138 << 4, 138 << 4, 138 << 4, 138 << 4,
+ 138 << 4, 138 << 4, 138 << 4, 138 << 4, 138 << 4, 138 << 4, 138 << 4, 138 << 4,
+ 144 << 4, 144 << 4, 144 << 4, 144 << 4, 144 << 4, 144 << 4, 144 << 4, 144 << 4,
+ 144 << 4, 144 << 4, 144 << 4, 144 << 4, 144 << 4, 144 << 4, 144 << 4, 144 << 4,
+ 149 << 4, 149 << 4, 149 << 4, 149 << 4, 149 << 4, 149 << 4, 149 << 4, 149 << 4,
+ 149 << 4, 149 << 4, 149 << 4, 149 << 4, 149 << 4, 149 << 4, 149 << 4, 149 << 4,
+ 154 << 4, 154 << 4, 154 << 4, 154 << 4, 154 << 4, 154 << 4, 154 << 4, 154 << 4,
+ 154 << 4, 154 << 4, 154 << 4, 154 << 4, 154 << 4, 154 << 4, 154 << 4, 154 << 4,
+ 160 << 4, 160 << 4, 160 << 4, 160 << 4, 160 << 4, 160 << 4, 160 << 4, 160 << 4,
+ 160 << 4, 160 << 4, 160 << 4, 160 << 4, 160 << 4, 160 << 4, 160 << 4, 160 << 4,
+ 165 << 4, 165 << 4, 165 << 4, 165 << 4, 165 << 4, 165 << 4, 165 << 4, 165 << 4,
+ 165 << 4, 165 << 4, 165 << 4, 165 << 4, 165 << 4, 165 << 4, 165 << 4, 165 << 4,
+ 170 << 4, 170 << 4, 170 << 4, 170 << 4, 170 << 4, 170 << 4, 170 << 4, 170 << 4,
+ 170 << 4, 170 << 4, 170 << 4, 170 << 4, 170 << 4, 170 << 4, 170 << 4, 170 << 4,
+ 176 << 4, 176 << 4, 176 << 4, 176 << 4, 176 << 4, 176 << 4, 176 << 4, 176 << 4,
+ 176 << 4, 176 << 4, 176 << 4, 176 << 4, 176 << 4, 176 << 4, 176 << 4, 176 << 4,
+ 181 << 4, 181 << 4, 181 << 4, 181 << 4, 181 << 4, 181 << 4, 181 << 4, 181 << 4,
+ 181 << 4, 181 << 4, 181 << 4, 181 << 4, 181 << 4, 181 << 4, 181 << 4, 181 << 4,
+ 186 << 4, 186 << 4, 186 << 4, 186 << 4, 186 << 4, 186 << 4, 186 << 4, 186 << 4,
+ 186 << 4, 186 << 4, 186 << 4, 186 << 4, 186 << 4, 186 << 4, 186 << 4, 186 << 4,
+ 191 << 4, 191 << 4, 191 << 4, 191 << 4, 191 << 4, 191 << 4, 191 << 4, 191 << 4,
+ 191 << 4, 191 << 4, 191 << 4, 191 << 4, 191 << 4, 191 << 4, 191 << 4, 191 << 4,
+ 195 << 4, 195 << 4, 195 << 4, 195 << 4, 195 << 4, 195 << 4, 195 << 4, 195 << 4,
+ 195 << 4, 195 << 4, 195 << 4, 195 << 4, 195 << 4, 195 << 4, 195 << 4, 195 << 4,
+ 200 << 4, 200 << 4, 200 << 4, 200 << 4, 200 << 4, 200 << 4, 200 << 4, 200 << 4,
+ 200 << 4, 200 << 4, 200 << 4, 200 << 4, 200 << 4, 200 << 4, 200 << 4, 200 << 4,
+ 205 << 4, 205 << 4, 205 << 4, 205 << 4, 205 << 4, 205 << 4, 205 << 4, 205 << 4,
+ 205 << 4, 205 << 4, 205 << 4, 205 << 4, 205 << 4, 205 << 4, 205 << 4, 205 << 4,
+ 209 << 4, 209 << 4, 209 << 4, 209 << 4, 209 << 4, 209 << 4, 209 << 4, 209 << 4,
+ 209 << 4, 209 << 4, 209 << 4, 209 << 4, 209 << 4, 209 << 4, 209 << 4, 209 << 4,
+ 213 << 4, 213 << 4, 213 << 4, 213 << 4, 213 << 4, 213 << 4, 213 << 4, 213 << 4,
+ 213 << 4, 213 << 4, 213 << 4, 213 << 4, 213 << 4, 213 << 4, 213 << 4, 213 << 4,
+ 218 << 4, 218 << 4, 218 << 4, 218 << 4, 218 << 4, 218 << 4, 218 << 4, 218 << 4,
+ 218 << 4, 218 << 4, 218 << 4, 218 << 4, 218 << 4, 218 << 4, 218 << 4, 218 << 4,
+ 222 << 4, 222 << 4, 222 << 4, 222 << 4, 222 << 4, 222 << 4, 222 << 4, 222 << 4,
+ 222 << 4, 222 << 4, 222 << 4, 222 << 4, 222 << 4, 222 << 4, 222 << 4, 222 << 4,
+ 225 << 4, 225 << 4, 225 << 4, 225 << 4, 225 << 4, 225 << 4, 225 << 4, 225 << 4,
+ 225 << 4, 225 << 4, 225 << 4, 225 << 4, 225 << 4, 225 << 4, 225 << 4, 225 << 4,
+ 229 << 4, 229 << 4, 229 << 4, 229 << 4, 229 << 4, 229 << 4, 229 << 4, 229 << 4,
+ 229 << 4, 229 << 4, 229 << 4, 229 << 4, 229 << 4, 229 << 4, 229 << 4, 229 << 4,
+ 232 << 4, 232 << 4, 232 << 4, 232 << 4, 232 << 4, 232 << 4, 232 << 4, 232 << 4,
+ 232 << 4, 232 << 4, 232 << 4, 232 << 4, 232 << 4, 232 << 4, 232 << 4, 232 << 4,
+ 236 << 4, 236 << 4, 236 << 4, 236 << 4, 236 << 4, 236 << 4, 236 << 4, 236 << 4,
+ 236 << 4, 236 << 4, 236 << 4, 236 << 4, 236 << 4, 236 << 4, 236 << 4, 236 << 4,
+ 239 << 4, 239 << 4, 239 << 4, 239 << 4, 239 << 4, 239 << 4, 239 << 4, 239 << 4,
+ 239 << 4, 239 << 4, 239 << 4, 239 << 4, 239 << 4, 239 << 4, 239 << 4, 239 << 4,
+ 241 << 4, 241 << 4, 241 << 4, 241 << 4, 241 << 4, 241 << 4, 241 << 4, 241 << 4,
+ 241 << 4, 241 << 4, 241 << 4, 241 << 4, 241 << 4, 241 << 4, 241 << 4, 241 << 4,
+ 244 << 4, 244 << 4, 244 << 4, 244 << 4, 244 << 4, 244 << 4, 244 << 4, 244 << 4,
+ 244 << 4, 244 << 4, 244 << 4, 244 << 4, 244 << 4, 244 << 4, 244 << 4, 244 << 4,
+ 246 << 4, 246 << 4, 246 << 4, 246 << 4, 246 << 4, 246 << 4, 246 << 4, 246 << 4,
+ 246 << 4, 246 << 4, 246 << 4, 246 << 4, 246 << 4, 246 << 4, 246 << 4, 246 << 4,
+ 248 << 4, 248 << 4, 248 << 4, 248 << 4, 248 << 4, 248 << 4, 248 << 4, 248 << 4,
+ 248 << 4, 248 << 4, 248 << 4, 248 << 4, 248 << 4, 248 << 4, 248 << 4, 248 << 4,
+ 250 << 4, 250 << 4, 250 << 4, 250 << 4, 250 << 4, 250 << 4, 250 << 4, 250 << 4,
+ 250 << 4, 250 << 4, 250 << 4, 250 << 4, 250 << 4, 250 << 4, 250 << 4, 250 << 4,
+ 252 << 4, 252 << 4, 252 << 4, 252 << 4, 252 << 4, 252 << 4, 252 << 4, 252 << 4,
+ 252 << 4, 252 << 4, 252 << 4, 252 << 4, 252 << 4, 252 << 4, 252 << 4, 252 << 4,
+ 253 << 4, 253 << 4, 253 << 4, 253 << 4, 253 << 4, 253 << 4, 253 << 4, 253 << 4,
+ 253 << 4, 253 << 4, 253 << 4, 253 << 4, 253 << 4, 253 << 4, 253 << 4, 253 << 4,
+ 254 << 4, 254 << 4, 254 << 4, 254 << 4, 254 << 4, 254 << 4, 254 << 4, 254 << 4,
+ 254 << 4, 254 << 4, 254 << 4, 254 << 4, 254 << 4, 254 << 4, 254 << 4, 254 << 4,
+ 255 << 4, 255 << 4, 255 << 4, 255 << 4, 255 << 4, 255 << 4, 255 << 4, 255 << 4,
+ 255 << 4, 255 << 4, 255 << 4, 255 << 4, 255 << 4, 255 << 4, 255 << 4, 255 << 4,
+ 255 << 4, 255 << 4, 255 << 4, 255 << 4, 255 << 4, 255 << 4, 255 << 4, 255 << 4,
+ 255 << 4, 255 << 4, 255 << 4, 255 << 4, 255 << 4, 255 << 4, 255 << 4, 255 << 4
+ },
+ {
+ 256 << 4, 256 << 4, 256 << 4, 256 << 4, 256 << 4, 256 << 4, 256 << 4, 256 << 4,
+ 256 << 4, 256 << 4, 256 << 4, 256 << 4, 256 << 4, 256 << 4, 256 << 4, 256 << 4,
+ 255 << 4, 255 << 4, 255 << 4, 255 << 4, 255 << 4, 255 << 4, 255 << 4, 255 << 4,
+ 255 << 4, 255 << 4, 255 << 4, 255 << 4, 255 << 4, 255 << 4, 255 << 4, 255 << 4,
+ 255 << 4, 255 << 4, 255 << 4, 255 << 4, 255 << 4, 255 << 4, 255 << 4, 255 << 4,
+ 255 << 4, 255 << 4, 255 << 4, 255 << 4, 255 << 4, 255 << 4, 255 << 4, 255 << 4,
+ 254 << 4, 254 << 4, 254 << 4, 254 << 4, 254 << 4, 254 << 4, 254 << 4, 254 << 4,
+ 254 << 4, 254 << 4, 254 << 4, 254 << 4, 254 << 4, 254 << 4, 254 << 4, 254 << 4,
+ 253 << 4, 253 << 4, 253 << 4, 253 << 4, 253 << 4, 253 << 4, 253 << 4, 253 << 4,
+ 253 << 4, 253 << 4, 253 << 4, 253 << 4, 253 << 4, 253 << 4, 253 << 4, 253 << 4,
+ 252 << 4, 252 << 4, 252 << 4, 252 << 4, 252 << 4, 252 << 4, 252 << 4, 252 << 4,
+ 252 << 4, 252 << 4, 252 << 4, 252 << 4, 252 << 4, 252 << 4, 252 << 4, 252 << 4,
+ 250 << 4, 250 << 4, 250 << 4, 250 << 4, 250 << 4, 250 << 4, 250 << 4, 250 << 4,
+ 250 << 4, 250 << 4, 250 << 4, 250 << 4, 250 << 4, 250 << 4, 250 << 4, 250 << 4,
+ 248 << 4, 248 << 4, 248 << 4, 248 << 4, 248 << 4, 248 << 4, 248 << 4, 248 << 4,
+ 248 << 4, 248 << 4, 248 << 4, 248 << 4, 248 << 4, 248 << 4, 248 << 4, 248 << 4,
+ 246 << 4, 246 << 4, 246 << 4, 246 << 4, 246 << 4, 246 << 4, 246 << 4, 246 << 4,
+ 246 << 4, 246 << 4, 246 << 4, 246 << 4, 246 << 4, 246 << 4, 246 << 4, 246 << 4,
+ 244 << 4, 244 << 4, 244 << 4, 244 << 4, 244 << 4, 244 << 4, 244 << 4, 244 << 4,
+ 244 << 4, 244 << 4, 244 << 4, 244 << 4, 244 << 4, 244 << 4, 244 << 4, 244 << 4,
+ 241 << 4, 241 << 4, 241 << 4, 241 << 4, 241 << 4, 241 << 4, 241 << 4, 241 << 4,
+ 241 << 4, 241 << 4, 241 << 4, 241 << 4, 241 << 4, 241 << 4, 241 << 4, 241 << 4,
+ 239 << 4, 239 << 4, 239 << 4, 239 << 4, 239 << 4, 239 << 4, 239 << 4, 239 << 4,
+ 239 << 4, 239 << 4, 239 << 4, 239 << 4, 239 << 4, 239 << 4, 239 << 4, 239 << 4,
+ 236 << 4, 236 << 4, 236 << 4, 236 << 4, 236 << 4, 236 << 4, 236 << 4, 236 << 4,
+ 236 << 4, 236 << 4, 236 << 4, 236 << 4, 236 << 4, 236 << 4, 236 << 4, 236 << 4,
+ 232 << 4, 232 << 4, 232 << 4, 232 << 4, 232 << 4, 232 << 4, 232 << 4, 232 << 4,
+ 232 << 4, 232 << 4, 232 << 4, 232 << 4, 232 << 4, 232 << 4, 232 << 4, 232 << 4,
+ 229 << 4, 229 << 4, 229 << 4, 229 << 4, 229 << 4, 229 << 4, 229 << 4, 229 << 4,
+ 229 << 4, 229 << 4, 229 << 4, 229 << 4, 229 << 4, 229 << 4, 229 << 4, 229 << 4,
+ 225 << 4, 225 << 4, 225 << 4, 225 << 4, 225 << 4, 225 << 4, 225 << 4, 225 << 4,
+ 225 << 4, 225 << 4, 225 << 4, 225 << 4, 225 << 4, 225 << 4, 225 << 4, 225 << 4,
+ 222 << 4, 222 << 4, 222 << 4, 222 << 4, 222 << 4, 222 << 4, 222 << 4, 222 << 4,
+ 222 << 4, 222 << 4, 222 << 4, 222 << 4, 222 << 4, 222 << 4, 222 << 4, 222 << 4,
+ 218 << 4, 218 << 4, 218 << 4, 218 << 4, 218 << 4, 218 << 4, 218 << 4, 218 << 4,
+ 218 << 4, 218 << 4, 218 << 4, 218 << 4, 218 << 4, 218 << 4, 218 << 4, 218 << 4,
+ 213 << 4, 213 << 4, 213 << 4, 213 << 4, 213 << 4, 213 << 4, 213 << 4, 213 << 4,
+ 213 << 4, 213 << 4, 213 << 4, 213 << 4, 213 << 4, 213 << 4, 213 << 4, 213 << 4,
+ 209 << 4, 209 << 4, 209 << 4, 209 << 4, 209 << 4, 209 << 4, 209 << 4, 209 << 4,
+ 209 << 4, 209 << 4, 209 << 4, 209 << 4, 209 << 4, 209 << 4, 209 << 4, 209 << 4,
+ 205 << 4, 205 << 4, 205 << 4, 205 << 4, 205 << 4, 205 << 4, 205 << 4, 205 << 4,
+ 205 << 4, 205 << 4, 205 << 4, 205 << 4, 205 << 4, 205 << 4, 205 << 4, 205 << 4,
+ 200 << 4, 200 << 4, 200 << 4, 200 << 4, 200 << 4, 200 << 4, 200 << 4, 200 << 4,
+ 200 << 4, 200 << 4, 200 << 4, 200 << 4, 200 << 4, 200 << 4, 200 << 4, 200 << 4,
+ 195 << 4, 195 << 4, 195 << 4, 195 << 4, 195 << 4, 195 << 4, 195 << 4, 195 << 4,
+ 195 << 4, 195 << 4, 195 << 4, 195 << 4, 195 << 4, 195 << 4, 195 << 4, 195 << 4,
+ 191 << 4, 191 << 4, 191 << 4, 191 << 4, 191 << 4, 191 << 4, 191 << 4, 191 << 4,
+ 191 << 4, 191 << 4, 191 << 4, 191 << 4, 191 << 4, 191 << 4, 191 << 4, 191 << 4,
+ 186 << 4, 186 << 4, 186 << 4, 186 << 4, 186 << 4, 186 << 4, 186 << 4, 186 << 4,
+ 186 << 4, 186 << 4, 186 << 4, 186 << 4, 186 << 4, 186 << 4, 186 << 4, 186 << 4,
+ 181 << 4, 181 << 4, 181 << 4, 181 << 4, 181 << 4, 181 << 4, 181 << 4, 181 << 4,
+ 181 << 4, 181 << 4, 181 << 4, 181 << 4, 181 << 4, 181 << 4, 181 << 4, 181 << 4,
+ 176 << 4, 176 << 4, 176 << 4, 176 << 4, 176 << 4, 176 << 4, 176 << 4, 176 << 4,
+ 176 << 4, 176 << 4, 176 << 4, 176 << 4, 176 << 4, 176 << 4, 176 << 4, 176 << 4,
+ 170 << 4, 170 << 4, 170 << 4, 170 << 4, 170 << 4, 170 << 4, 170 << 4, 170 << 4,
+ 170 << 4, 170 << 4, 170 << 4, 170 << 4, 170 << 4, 170 << 4, 170 << 4, 170 << 4,
+ 165 << 4, 165 << 4, 165 << 4, 165 << 4, 165 << 4, 165 << 4, 165 << 4, 165 << 4,
+ 165 << 4, 165 << 4, 165 << 4, 165 << 4, 165 << 4, 165 << 4, 165 << 4, 165 << 4,
+ 160 << 4, 160 << 4, 160 << 4, 160 << 4, 160 << 4, 160 << 4, 160 << 4, 160 << 4,
+ 160 << 4, 160 << 4, 160 << 4, 160 << 4, 160 << 4, 160 << 4, 160 << 4, 160 << 4,
+ 154 << 4, 154 << 4, 154 << 4, 154 << 4, 154 << 4, 154 << 4, 154 << 4, 154 << 4,
+ 154 << 4, 154 << 4, 154 << 4, 154 << 4, 154 << 4, 154 << 4, 154 << 4, 154 << 4,
+ 149 << 4, 149 << 4, 149 << 4, 149 << 4, 149 << 4, 149 << 4, 149 << 4, 149 << 4,
+ 149 << 4, 149 << 4, 149 << 4, 149 << 4, 149 << 4, 149 << 4, 149 << 4, 149 << 4,
+ 144 << 4, 144 << 4, 144 << 4, 144 << 4, 144 << 4, 144 << 4, 144 << 4, 144 << 4,
+ 144 << 4, 144 << 4, 144 << 4, 144 << 4, 144 << 4, 144 << 4, 144 << 4, 144 << 4,
+ 138 << 4, 138 << 4, 138 << 4, 138 << 4, 138 << 4, 138 << 4, 138 << 4, 138 << 4,
+ 138 << 4, 138 << 4, 138 << 4, 138 << 4, 138 << 4, 138 << 4, 138 << 4, 138 << 4,
+ 132 << 4, 132 << 4, 132 << 4, 132 << 4, 132 << 4, 132 << 4, 132 << 4, 132 << 4,
+ 132 << 4, 132 << 4, 132 << 4, 132 << 4, 132 << 4, 132 << 4, 132 << 4, 132 << 4,
+ 127 << 4, 127 << 4, 127 << 4, 127 << 4, 127 << 4, 127 << 4, 127 << 4, 127 << 4,
+ 127 << 4, 127 << 4, 127 << 4, 127 << 4, 127 << 4, 127 << 4, 127 << 4, 127 << 4,
+ 121 << 4, 121 << 4, 121 << 4, 121 << 4, 121 << 4, 121 << 4, 121 << 4, 121 << 4,
+ 121 << 4, 121 << 4, 121 << 4, 121 << 4, 121 << 4, 121 << 4, 121 << 4, 121 << 4,
+ 116 << 4, 116 << 4, 116 << 4, 116 << 4, 116 << 4, 116 << 4, 116 << 4, 116 << 4,
+ 116 << 4, 116 << 4, 116 << 4, 116 << 4, 116 << 4, 116 << 4, 116 << 4, 116 << 4,
+ 110 << 4, 110 << 4, 110 << 4, 110 << 4, 110 << 4, 110 << 4, 110 << 4, 110 << 4,
+ 110 << 4, 110 << 4, 110 << 4, 110 << 4, 110 << 4, 110 << 4, 110 << 4, 110 << 4,
+ 105 << 4, 105 << 4, 105 << 4, 105 << 4, 105 << 4, 105 << 4, 105 << 4, 105 << 4,
+ 105 << 4, 105 << 4, 105 << 4, 105 << 4, 105 << 4, 105 << 4, 105 << 4, 105 << 4,
+ 99 << 4, 99 << 4, 99 << 4, 99 << 4, 99 << 4, 99 << 4, 99 << 4, 99 << 4,
+ 99 << 4, 99 << 4, 99 << 4, 99 << 4, 99 << 4, 99 << 4, 99 << 4, 99 << 4,
+ 94 << 4, 94 << 4, 94 << 4, 94 << 4, 94 << 4, 94 << 4, 94 << 4, 94 << 4,
+ 94 << 4, 94 << 4, 94 << 4, 94 << 4, 94 << 4, 94 << 4, 94 << 4, 94 << 4,
+ 88 << 4, 88 << 4, 88 << 4, 88 << 4, 88 << 4, 88 << 4, 88 << 4, 88 << 4,
+ 88 << 4, 88 << 4, 88 << 4, 88 << 4, 88 << 4, 88 << 4, 88 << 4, 88 << 4,
+ 83 << 4, 83 << 4, 83 << 4, 83 << 4, 83 << 4, 83 << 4, 83 << 4, 83 << 4,
+ 83 << 4, 83 << 4, 83 << 4, 83 << 4, 83 << 4, 83 << 4, 83 << 4, 83 << 4,
+ 78 << 4, 78 << 4, 78 << 4, 78 << 4, 78 << 4, 78 << 4, 78 << 4, 78 << 4,
+ 78 << 4, 78 << 4, 78 << 4, 78 << 4, 78 << 4, 78 << 4, 78 << 4, 78 << 4,
+ 73 << 4, 73 << 4, 73 << 4, 73 << 4, 73 << 4, 73 << 4, 73 << 4, 73 << 4,
+ 73 << 4, 73 << 4, 73 << 4, 73 << 4, 73 << 4, 73 << 4, 73 << 4, 73 << 4,
+ 67 << 4, 67 << 4, 67 << 4, 67 << 4, 67 << 4, 67 << 4, 67 << 4, 67 << 4,
+ 67 << 4, 67 << 4, 67 << 4, 67 << 4, 67 << 4, 67 << 4, 67 << 4, 67 << 4,
+ 62 << 4, 62 << 4, 62 << 4, 62 << 4, 62 << 4, 62 << 4, 62 << 4, 62 << 4,
+ 62 << 4, 62 << 4, 62 << 4, 62 << 4, 62 << 4, 62 << 4, 62 << 4, 62 << 4,
+ 58 << 4, 58 << 4, 58 << 4, 58 << 4, 58 << 4, 58 << 4, 58 << 4, 58 << 4,
+ 58 << 4, 58 << 4, 58 << 4, 58 << 4, 58 << 4, 58 << 4, 58 << 4, 58 << 4,
+ 53 << 4, 53 << 4, 53 << 4, 53 << 4, 53 << 4, 53 << 4, 53 << 4, 53 << 4,
+ 53 << 4, 53 << 4, 53 << 4, 53 << 4, 53 << 4, 53 << 4, 53 << 4, 53 << 4,
+ 48 << 4, 48 << 4, 48 << 4, 48 << 4, 48 << 4, 48 << 4, 48 << 4, 48 << 4,
+ 48 << 4, 48 << 4, 48 << 4, 48 << 4, 48 << 4, 48 << 4, 48 << 4, 48 << 4,
+ 43 << 4, 43 << 4, 43 << 4, 43 << 4, 43 << 4, 43 << 4, 43 << 4, 43 << 4,
+ 43 << 4, 43 << 4, 43 << 4, 43 << 4, 43 << 4, 43 << 4, 43 << 4, 43 << 4,
+ 39 << 4, 39 << 4, 39 << 4, 39 << 4, 39 << 4, 39 << 4, 39 << 4, 39 << 4,
+ 39 << 4, 39 << 4, 39 << 4, 39 << 4, 39 << 4, 39 << 4, 39 << 4, 39 << 4,
+ 35 << 4, 35 << 4, 35 << 4, 35 << 4, 35 << 4, 35 << 4, 35 << 4, 35 << 4,
+ 35 << 4, 35 << 4, 35 << 4, 35 << 4, 35 << 4, 35 << 4, 35 << 4, 35 << 4,
+ 31 << 4, 31 << 4, 31 << 4, 31 << 4, 31 << 4, 31 << 4, 31 << 4, 31 << 4,
+ 31 << 4, 31 << 4, 31 << 4, 31 << 4, 31 << 4, 31 << 4, 31 << 4, 31 << 4,
+ 27 << 4, 27 << 4, 27 << 4, 27 << 4, 27 << 4, 27 << 4, 27 << 4, 27 << 4,
+ 27 << 4, 27 << 4, 27 << 4, 27 << 4, 27 << 4, 27 << 4, 27 << 4, 27 << 4,
+ 23 << 4, 23 << 4, 23 << 4, 23 << 4, 23 << 4, 23 << 4, 23 << 4, 23 << 4,
+ 23 << 4, 23 << 4, 23 << 4, 23 << 4, 23 << 4, 23 << 4, 23 << 4, 23 << 4,
+ 19 << 4, 19 << 4, 19 << 4, 19 << 4, 19 << 4, 19 << 4, 19 << 4, 19 << 4,
+ 19 << 4, 19 << 4, 19 << 4, 19 << 4, 19 << 4, 19 << 4, 19 << 4, 19 << 4,
+ 16 << 4, 16 << 4, 16 << 4, 16 << 4, 16 << 4, 16 << 4, 16 << 4, 16 << 4,
+ 16 << 4, 16 << 4, 16 << 4, 16 << 4, 16 << 4, 16 << 4, 16 << 4, 16 << 4,
+ 12 << 4, 12 << 4, 12 << 4, 12 << 4, 12 << 4, 12 << 4, 12 << 4, 12 << 4,
+ 12 << 4, 12 << 4, 12 << 4, 12 << 4, 12 << 4, 12 << 4, 12 << 4, 12 << 4,
+ 9 << 4, 9 << 4, 9 << 4, 9 << 4, 9 << 4, 9 << 4, 9 << 4, 9 << 4,
+ 9 << 4, 9 << 4, 9 << 4, 9 << 4, 9 << 4, 9 << 4, 9 << 4, 9 << 4,
+ 7 << 4, 7 << 4, 7 << 4, 7 << 4, 7 << 4, 7 << 4, 7 << 4, 7 << 4,
+ 7 << 4, 7 << 4, 7 << 4, 7 << 4, 7 << 4, 7 << 4, 7 << 4, 7 << 4,
+ 4 << 4, 4 << 4, 4 << 4, 4 << 4, 4 << 4, 4 << 4, 4 << 4, 4 << 4,
+ 4 << 4, 4 << 4, 4 << 4, 4 << 4, 4 << 4, 4 << 4, 4 << 4, 4 << 4,
+ 2 << 4, 2 << 4, 2 << 4, 2 << 4, 2 << 4, 2 << 4, 2 << 4, 2 << 4,
+ 2 << 4, 2 << 4, 2 << 4, 2 << 4, 2 << 4, 2 << 4, 2 << 4, 2 << 4
+ },
+ {
+ 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4,
+ 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4,
+ -1 << 4, -1 << 4, -1 << 4, -1 << 4, -1 << 4, -1 << 4, -1 << 4, -1 << 4,
+ -1 << 4, -1 << 4, -1 << 4, -1 << 4, -1 << 4, -1 << 4, -1 << 4, -1 << 4,
+ -3 << 4, -3 << 4, -3 << 4, -3 << 4, -3 << 4, -3 << 4, -3 << 4, -3 << 4,
+ -3 << 4, -3 << 4, -3 << 4, -3 << 4, -3 << 4, -3 << 4, -3 << 4, -3 << 4,
+ -5 << 4, -5 << 4, -5 << 4, -5 << 4, -5 << 4, -5 << 4, -5 << 4, -5 << 4,
+ -5 << 4, -5 << 4, -5 << 4, -5 << 4, -5 << 4, -5 << 4, -5 << 4, -5 << 4,
+ -6 << 4, -6 << 4, -6 << 4, -6 << 4, -6 << 4, -6 << 4, -6 << 4, -6 << 4,
+ -6 << 4, -6 << 4, -6 << 4, -6 << 4, -6 << 4, -6 << 4, -6 << 4, -6 << 4,
+ -8 << 4, -8 << 4, -8 << 4, -8 << 4, -8 << 4, -8 << 4, -8 << 4, -8 << 4,
+ -8 << 4, -8 << 4, -8 << 4, -8 << 4, -8 << 4, -8 << 4, -8 << 4, -8 << 4,
+ -9 << 4, -9 << 4, -9 << 4, -9 << 4, -9 << 4, -9 << 4, -9 << 4, -9 << 4,
+ -9 << 4, -9 << 4, -9 << 4, -9 << 4, -9 << 4, -9 << 4, -9 << 4, -9 << 4,
+ -10 << 4, -10 << 4, -10 << 4, -10 << 4, -10 << 4, -10 << 4, -10 << 4, -10 << 4,
+ -10 << 4, -10 << 4, -10 << 4, -10 << 4, -10 << 4, -10 << 4, -10 << 4, -10 << 4,
+ -12 << 4, -12 << 4, -12 << 4, -12 << 4, -12 << 4, -12 << 4, -12 << 4, -12 << 4,
+ -12 << 4, -12 << 4, -12 << 4, -12 << 4, -12 << 4, -12 << 4, -12 << 4, -12 << 4,
+ -13 << 4, -13 << 4, -13 << 4, -13 << 4, -13 << 4, -13 << 4, -13 << 4, -13 << 4,
+ -13 << 4, -13 << 4, -13 << 4, -13 << 4, -13 << 4, -13 << 4, -13 << 4, -13 << 4,
+ -14 << 4, -14 << 4, -14 << 4, -14 << 4, -14 << 4, -14 << 4, -14 << 4, -14 << 4,
+ -14 << 4, -14 << 4, -14 << 4, -14 << 4, -14 << 4, -14 << 4, -14 << 4, -14 << 4,
+ -15 << 4, -15 << 4, -15 << 4, -15 << 4, -15 << 4, -15 << 4, -15 << 4, -15 << 4,
+ -15 << 4, -15 << 4, -15 << 4, -15 << 4, -15 << 4, -15 << 4, -15 << 4, -15 << 4,
+ -16 << 4, -16 << 4, -16 << 4, -16 << 4, -16 << 4, -16 << 4, -16 << 4, -16 << 4,
+ -16 << 4, -16 << 4, -16 << 4, -16 << 4, -16 << 4, -16 << 4, -16 << 4, -16 << 4,
+ -15 << 4, -15 << 4, -15 << 4, -15 << 4, -15 << 4, -15 << 4, -15 << 4, -15 << 4,
+ -15 << 4, -15 << 4, -15 << 4, -15 << 4, -15 << 4, -15 << 4, -15 << 4, -15 << 4,
+ -17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4,
+ -17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4,
+ -17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4,
+ -17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4,
+ -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4,
+ -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4,
+ -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4,
+ -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4,
+ -17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4,
+ -17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4,
+ -19 << 4, -19 << 4, -19 << 4, -19 << 4, -19 << 4, -19 << 4, -19 << 4, -19 << 4,
+ -19 << 4, -19 << 4, -19 << 4, -19 << 4, -19 << 4, -19 << 4, -19 << 4, -19 << 4,
+ -19 << 4, -19 << 4, -19 << 4, -19 << 4, -19 << 4, -19 << 4, -19 << 4, -19 << 4,
+ -19 << 4, -19 << 4, -19 << 4, -19 << 4, -19 << 4, -19 << 4, -19 << 4, -19 << 4,
+ -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4,
+ -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4,
+ -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4,
+ -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4,
+ -19 << 4, -19 << 4, -19 << 4, -19 << 4, -19 << 4, -19 << 4, -19 << 4, -19 << 4,
+ -19 << 4, -19 << 4, -19 << 4, -19 << 4, -19 << 4, -19 << 4, -19 << 4, -19 << 4,
+ -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4,
+ -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4,
+ -19 << 4, -19 << 4, -19 << 4, -19 << 4, -19 << 4, -19 << 4, -19 << 4, -19 << 4,
+ -19 << 4, -19 << 4, -19 << 4, -19 << 4, -19 << 4, -19 << 4, -19 << 4, -19 << 4,
+ -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4,
+ -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4,
+ -17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4,
+ -17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4,
+ -17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4,
+ -17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4,
+ -17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4,
+ -17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4,
+ -16 << 4, -16 << 4, -16 << 4, -16 << 4, -16 << 4, -16 << 4, -16 << 4, -16 << 4,
+ -16 << 4, -16 << 4, -16 << 4, -16 << 4, -16 << 4, -16 << 4, -16 << 4, -16 << 4,
+ -16 << 4, -16 << 4, -16 << 4, -16 << 4, -16 << 4, -16 << 4, -16 << 4, -16 << 4,
+ -16 << 4, -16 << 4, -16 << 4, -16 << 4, -16 << 4, -16 << 4, -16 << 4, -16 << 4,
+ -16 << 4, -16 << 4, -16 << 4, -16 << 4, -16 << 4, -16 << 4, -16 << 4, -16 << 4,
+ -16 << 4, -16 << 4, -16 << 4, -16 << 4, -16 << 4, -16 << 4, -16 << 4, -16 << 4,
+ -15 << 4, -15 << 4, -15 << 4, -15 << 4, -15 << 4, -15 << 4, -15 << 4, -15 << 4,
+ -15 << 4, -15 << 4, -15 << 4, -15 << 4, -15 << 4, -15 << 4, -15 << 4, -15 << 4,
+ -14 << 4, -14 << 4, -14 << 4, -14 << 4, -14 << 4, -14 << 4, -14 << 4, -14 << 4,
+ -14 << 4, -14 << 4, -14 << 4, -14 << 4, -14 << 4, -14 << 4, -14 << 4, -14 << 4,
+ -14 << 4, -14 << 4, -14 << 4, -14 << 4, -14 << 4, -14 << 4, -14 << 4, -14 << 4,
+ -14 << 4, -14 << 4, -14 << 4, -14 << 4, -14 << 4, -14 << 4, -14 << 4, -14 << 4,
+ -13 << 4, -13 << 4, -13 << 4, -13 << 4, -13 << 4, -13 << 4, -13 << 4, -13 << 4,
+ -13 << 4, -13 << 4, -13 << 4, -13 << 4, -13 << 4, -13 << 4, -13 << 4, -13 << 4,
+ -12 << 4, -12 << 4, -12 << 4, -12 << 4, -12 << 4, -12 << 4, -12 << 4, -12 << 4,
+ -12 << 4, -12 << 4, -12 << 4, -12 << 4, -12 << 4, -12 << 4, -12 << 4, -12 << 4,
+ -12 << 4, -12 << 4, -12 << 4, -12 << 4, -12 << 4, -12 << 4, -12 << 4, -12 << 4,
+ -12 << 4, -12 << 4, -12 << 4, -12 << 4, -12 << 4, -12 << 4, -12 << 4, -12 << 4,
+ -12 << 4, -12 << 4, -12 << 4, -12 << 4, -12 << 4, -12 << 4, -12 << 4, -12 << 4,
+ -12 << 4, -12 << 4, -12 << 4, -12 << 4, -12 << 4, -12 << 4, -12 << 4, -12 << 4,
+ -11 << 4, -11 << 4, -11 << 4, -11 << 4, -11 << 4, -11 << 4, -11 << 4, -11 << 4,
+ -11 << 4, -11 << 4, -11 << 4, -11 << 4, -11 << 4, -11 << 4, -11 << 4, -11 << 4,
+ -11 << 4, -11 << 4, -11 << 4, -11 << 4, -11 << 4, -11 << 4, -11 << 4, -11 << 4,
+ -11 << 4, -11 << 4, -11 << 4, -11 << 4, -11 << 4, -11 << 4, -11 << 4, -11 << 4,
+ -9 << 4, -9 << 4, -9 << 4, -9 << 4, -9 << 4, -9 << 4, -9 << 4, -9 << 4,
+ -9 << 4, -9 << 4, -9 << 4, -9 << 4, -9 << 4, -9 << 4, -9 << 4, -9 << 4,
+ -9 << 4, -9 << 4, -9 << 4, -9 << 4, -9 << 4, -9 << 4, -9 << 4, -9 << 4,
+ -9 << 4, -9 << 4, -9 << 4, -9 << 4, -9 << 4, -9 << 4, -9 << 4, -9 << 4,
+ -9 << 4, -9 << 4, -9 << 4, -9 << 4, -9 << 4, -9 << 4, -9 << 4, -9 << 4,
+ -9 << 4, -9 << 4, -9 << 4, -9 << 4, -9 << 4, -9 << 4, -9 << 4, -9 << 4,
+ -8 << 4, -8 << 4, -8 << 4, -8 << 4, -8 << 4, -8 << 4, -8 << 4, -8 << 4,
+ -8 << 4, -8 << 4, -8 << 4, -8 << 4, -8 << 4, -8 << 4, -8 << 4, -8 << 4,
+ -6 << 4, -6 << 4, -6 << 4, -6 << 4, -6 << 4, -6 << 4, -6 << 4, -6 << 4,
+ -6 << 4, -6 << 4, -6 << 4, -6 << 4, -6 << 4, -6 << 4, -6 << 4, -6 << 4,
+ -6 << 4, -6 << 4, -6 << 4, -6 << 4, -6 << 4, -6 << 4, -6 << 4, -6 << 4,
+ -6 << 4, -6 << 4, -6 << 4, -6 << 4, -6 << 4, -6 << 4, -6 << 4, -6 << 4,
+ -6 << 4, -6 << 4, -6 << 4, -6 << 4, -6 << 4, -6 << 4, -6 << 4, -6 << 4,
+ -6 << 4, -6 << 4, -6 << 4, -6 << 4, -6 << 4, -6 << 4, -6 << 4, -6 << 4,
+ -5 << 4, -5 << 4, -5 << 4, -5 << 4, -5 << 4, -5 << 4, -5 << 4, -5 << 4,
+ -5 << 4, -5 << 4, -5 << 4, -5 << 4, -5 << 4, -5 << 4, -5 << 4, -5 << 4,
+ -4 << 4, -4 << 4, -4 << 4, -4 << 4, -4 << 4, -4 << 4, -4 << 4, -4 << 4,
+ -4 << 4, -4 << 4, -4 << 4, -4 << 4, -4 << 4, -4 << 4, -4 << 4, -4 << 4,
+ -3 << 4, -3 << 4, -3 << 4, -3 << 4, -3 << 4, -3 << 4, -3 << 4, -3 << 4,
+ -3 << 4, -3 << 4, -3 << 4, -3 << 4, -3 << 4, -3 << 4, -3 << 4, -3 << 4,
+ -4 << 4, -4 << 4, -4 << 4, -4 << 4, -4 << 4, -4 << 4, -4 << 4, -4 << 4,
+ -4 << 4, -4 << 4, -4 << 4, -4 << 4, -4 << 4, -4 << 4, -4 << 4, -4 << 4,
+ -3 << 4, -3 << 4, -3 << 4, -3 << 4, -3 << 4, -3 << 4, -3 << 4, -3 << 4,
+ -3 << 4, -3 << 4, -3 << 4, -3 << 4, -3 << 4, -3 << 4, -3 << 4, -3 << 4,
+ -2 << 4, -2 << 4, -2 << 4, -2 << 4, -2 << 4, -2 << 4, -2 << 4, -2 << 4,
+ -2 << 4, -2 << 4, -2 << 4, -2 << 4, -2 << 4, -2 << 4, -2 << 4, -2 << 4,
+ -2 << 4, -2 << 4, -2 << 4, -2 << 4, -2 << 4, -2 << 4, -2 << 4, -2 << 4,
+ -2 << 4, -2 << 4, -2 << 4, -2 << 4, -2 << 4, -2 << 4, -2 << 4, -2 << 4,
+ -1 << 4, -1 << 4, -1 << 4, -1 << 4, -1 << 4, -1 << 4, -1 << 4, -1 << 4,
+ -1 << 4, -1 << 4, -1 << 4, -1 << 4, -1 << 4, -1 << 4, -1 << 4, -1 << 4,
+ 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4,
+ 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4,
+ -1 << 4, -1 << 4, -1 << 4, -1 << 4, -1 << 4, -1 << 4, -1 << 4, -1 << 4,
+ -1 << 4, -1 << 4, -1 << 4, -1 << 4, -1 << 4, -1 << 4, -1 << 4, -1 << 4,
+ 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4,
+ 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4,
+ 1 << 4, 1 << 4, 1 << 4, 1 << 4, 1 << 4, 1 << 4, 1 << 4, 1 << 4,
+ 1 << 4, 1 << 4, 1 << 4, 1 << 4, 1 << 4, 1 << 4, 1 << 4, 1 << 4,
+ 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4,
+ 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4,
+ 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4,
+ 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4,
+ 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4,
+ 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4
+ }
+};
+
+static const struct ia_css_dz_config default_dz_config = {
+ HRT_GDC_N,
+ HRT_GDC_N,
+ {
+ \
+ {0, 0}, \
+ {0, 0}, \
+ }
+};
+
+static const struct ia_css_vector default_motion_config = {
+ 0,
+ 0
+};
+
+/* ------ deprecated(bz675) : from ------ */
+static const struct ia_css_shading_settings default_shading_settings = {
+ 1 /* enable shading table conversion in the css
+ (This matches the legacy way.) */
+};
+
+/* ------ deprecated(bz675) : to ------ */
+
+struct ia_css_isp_skc_dvs_statistics {
+ ia_css_ptr p_data;
+};
+
+static int
+ref_sh_css_ddr_address_map(
+ struct sh_css_ddr_address_map *map,
+ struct sh_css_ddr_address_map *out);
+
+static int
+write_ia_css_isp_parameter_set_info_to_ddr(
+ struct ia_css_isp_parameter_set_info *me,
+ ia_css_ptr *out);
+
+static int
+free_ia_css_isp_parameter_set_info(ia_css_ptr ptr);
+
+static int
+sh_css_params_write_to_ddr_internal(
+ struct ia_css_pipe *pipe,
+ unsigned int pipe_id,
+ struct ia_css_isp_parameters *params,
+ const struct ia_css_pipeline_stage *stage,
+ struct sh_css_ddr_address_map *ddr_map,
+ struct sh_css_ddr_address_map_size *ddr_map_size);
+
+static int
+sh_css_create_isp_params(struct ia_css_stream *stream,
+ struct ia_css_isp_parameters **isp_params_out);
+
+static bool
+sh_css_init_isp_params_from_global(struct ia_css_stream *stream,
+ struct ia_css_isp_parameters *params,
+ bool use_default_config,
+ struct ia_css_pipe *pipe_in);
+
+static int
+sh_css_init_isp_params_from_config(struct ia_css_pipe *pipe,
+ struct ia_css_isp_parameters *params,
+ const struct ia_css_isp_config *config,
+ struct ia_css_pipe *pipe_in);
+
+static int
+sh_css_set_global_isp_config_on_pipe(
+ struct ia_css_pipe *curr_pipe,
+ const struct ia_css_isp_config *config,
+ struct ia_css_pipe *pipe);
+
+static int
+sh_css_set_per_frame_isp_config_on_pipe(
+ struct ia_css_stream *stream,
+ const struct ia_css_isp_config *config,
+ struct ia_css_pipe *pipe);
+
+static int
+sh_css_update_uds_and_crop_info_based_on_zoom_region(
+ const struct ia_css_binary_info *info,
+ const struct ia_css_frame_info *in_frame_info,
+ const struct ia_css_frame_info *out_frame_info,
+ const struct ia_css_resolution *dvs_env,
+ const struct ia_css_dz_config *zoom,
+ const struct ia_css_vector *motion_vector,
+ struct sh_css_uds_info *uds, /* out */
+ struct sh_css_crop_pos *sp_out_crop_pos, /* out */
+ struct ia_css_resolution pipe_in_res,
+ bool enable_zoom);
+
+ia_css_ptr
+sh_css_params_ddr_address_map(void)
+{
+ return sp_ddr_ptrs;
+}
+
+/* ****************************************************
+ * Each coefficient is stored as 7bits to fit 2 of them into one
+ * ISP vector element, so we will store 4 coefficents on every
+ * memory word (32bits)
+ *
+ * 0: Coefficient 0 used bits
+ * 1: Coefficient 1 used bits
+ * 2: Coefficient 2 used bits
+ * 3: Coefficient 3 used bits
+ * x: not used
+ *
+ * xx33333332222222 | xx11111110000000
+ *
+ * ***************************************************
+ */
+static struct ia_css_host_data *
+convert_allocate_fpntbl(struct ia_css_isp_parameters *params)
+{
+ unsigned int i, j;
+ short *data_ptr;
+ struct ia_css_host_data *me;
+ unsigned int isp_format_data_size;
+ u32 *isp_format_data_ptr;
+
+ assert(params);
+
+ data_ptr = params->fpn_config.data;
+ isp_format_data_size = params->fpn_config.height * params->fpn_config.width *
+ sizeof(uint32_t);
+
+ me = ia_css_host_data_allocate(isp_format_data_size);
+
+ if (!me)
+ return NULL;
+
+ isp_format_data_ptr = (uint32_t *)me->address;
+
+ for (i = 0; i < params->fpn_config.height; i++) {
+ for (j = 0;
+ j < params->fpn_config.width;
+ j += 4, data_ptr += 4, isp_format_data_ptr++) {
+ int data = data_ptr[0] << 0 |
+ data_ptr[1] << 7 |
+ data_ptr[2] << 16 |
+ data_ptr[3] << 23;
+ *isp_format_data_ptr = data;
+ }
+ }
+ return me;
+}
+
+static int
+store_fpntbl(struct ia_css_isp_parameters *params, ia_css_ptr ptr)
+{
+ struct ia_css_host_data *isp_data;
+
+ assert(params);
+ assert(ptr != mmgr_NULL);
+
+ isp_data = convert_allocate_fpntbl(params);
+ if (!isp_data) {
+ IA_CSS_LEAVE_ERR_PRIVATE(-ENOMEM);
+ return -ENOMEM;
+ }
+ ia_css_params_store_ia_css_host_data(ptr, isp_data);
+
+ ia_css_host_data_free(isp_data);
+ return 0;
+}
+
+static void
+convert_raw_to_fpn(struct ia_css_isp_parameters *params)
+{
+ int maxval = 0;
+ unsigned int i;
+
+ assert(params);
+
+ /* Find the maximum value in the table */
+ for (i = 0; i < params->fpn_config.height * params->fpn_config.width; i++) {
+ int val = params->fpn_config.data[i];
+ /* Make sure FPN value can be represented in 13-bit unsigned
+ * number (ISP precision - 1), but note that actual input range
+ * depends on precision of input frame data.
+ */
+ if (val < 0) {
+ /* Checkpatch patch */
+ val = 0;
+ } else if (val >= (1 << 13)) {
+ /* Checkpatch patch */
+ /* MW: BUG, is "13" a system or application property */
+ val = (1 << 13) - 1;
+ }
+ maxval = max(maxval, val);
+ }
+ /* Find the lowest shift value to remap the values in the range
+ * 0..maxval to 0..2^shiftval*63.
+ */
+ params->fpn_config.shift = 0;
+ while (maxval > 63) {
+ /* MW: BUG, is "63" a system or application property */
+ maxval >>= 1;
+ params->fpn_config.shift++;
+ }
+ /* Adjust the values in the table for the shift value */
+ for (i = 0; i < params->fpn_config.height * params->fpn_config.width; i++)
+ ((unsigned short *)params->fpn_config.data)[i] >>= params->fpn_config.shift;
+}
+
+static void
+ia_css_process_kernel(struct ia_css_stream *stream,
+ struct ia_css_isp_parameters *params,
+ void (*process)(unsigned int pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params))
+{
+ int i;
+
+ for (i = 0; i < stream->num_pipes; i++) {
+ struct ia_css_pipe *pipe = stream->pipes[i];
+ struct ia_css_pipeline *pipeline = ia_css_pipe_get_pipeline(pipe);
+ struct ia_css_pipeline_stage *stage;
+
+ /* update the other buffers to the pipe specific copies */
+ for (stage = pipeline->stages; stage; stage = stage->next) {
+ if (!stage || !stage->binary) continue;
+ process(pipeline->pipe_id, stage, params);
+ }
+ }
+}
+
+static int
+sh_css_select_dp_10bpp_config(const struct ia_css_pipe *pipe,
+ bool *is_dp_10bpp)
+{
+ int err = 0;
+ /* Currently we check if 10bpp DPC configuration is required based
+ * on the use case,i.e. if BDS and DPC is both enabled. The more cleaner
+ * design choice would be to expose the type of DPC (either 10bpp or 13bpp)
+ * using the binary info, but the current control flow does not allow this
+ * implementation. (This is because the configuration is set before a
+ * binary is selected, and the binary info is not available)
+ */
+ if ((!pipe) || (!is_dp_10bpp)) {
+ IA_CSS_LEAVE_ERR_PRIVATE(-EINVAL);
+ err = -EINVAL;
+ } else {
+ *is_dp_10bpp = false;
+
+ /* check if DPC is enabled from the host */
+ if (pipe->config.enable_dpc) {
+ /*check if BDS is enabled*/
+ unsigned int required_bds_factor = SH_CSS_BDS_FACTOR_1_00;
+
+ if ((pipe->config.bayer_ds_out_res.width != 0) &&
+ (pipe->config.bayer_ds_out_res.height != 0)) {
+ if (0 == binarydesc_calculate_bds_factor(
+ pipe->config.input_effective_res,
+ pipe->config.bayer_ds_out_res,
+ &required_bds_factor)) {
+ if (required_bds_factor != SH_CSS_BDS_FACTOR_1_00) {
+ /*we use 10bpp BDS configuration*/
+ *is_dp_10bpp = true;
+ }
+ }
+ }
+ }
+ }
+
+ return err;
+}
+
+int
+sh_css_set_black_frame(struct ia_css_stream *stream,
+ const struct ia_css_frame *raw_black_frame)
+{
+ struct ia_css_isp_parameters *params;
+ /* this function desperately needs to be moved to the ISP or SP such
+ * that it can use the DMA.
+ */
+ unsigned int height, width, y, x, k, data;
+ ia_css_ptr ptr;
+
+ assert(stream);
+ assert(raw_black_frame);
+
+ params = stream->isp_params_configs;
+ height = raw_black_frame->frame_info.res.height;
+ width = raw_black_frame->frame_info.padded_width;
+
+ ptr = raw_black_frame->data
+ + raw_black_frame->planes.raw.offset;
+
+ IA_CSS_ENTER_PRIVATE("black_frame=%p", raw_black_frame);
+
+ if (params->fpn_config.data &&
+ (params->fpn_config.width != width || params->fpn_config.height != height)) {
+ kvfree(params->fpn_config.data);
+ params->fpn_config.data = NULL;
+ }
+ if (!params->fpn_config.data) {
+ params->fpn_config.data = kvmalloc(array3_size(height, width, sizeof(short)),
+ GFP_KERNEL);
+ if (!params->fpn_config.data) {
+ IA_CSS_ERROR("out of memory");
+ IA_CSS_LEAVE_ERR_PRIVATE(-ENOMEM);
+ return -ENOMEM;
+ }
+ params->fpn_config.width = width;
+ params->fpn_config.height = height;
+ params->fpn_config.shift = 0;
+ }
+
+ /* store raw to fpntbl */
+ for (y = 0; y < height; y++) {
+ for (x = 0; x < width; x += (ISP_VEC_NELEMS * 2)) {
+ int ofs = y * width + x;
+
+ for (k = 0; k < ISP_VEC_NELEMS; k += 2) {
+ hmm_load(ptr, (void *)(&data), sizeof(int));
+ params->fpn_config.data[ofs + 2 * k] =
+ (short)(data & 0xFFFF);
+ params->fpn_config.data[ofs + 2 * k + 2] =
+ (short)((data >> 16) & 0xFFFF);
+ ptr += sizeof(int); /* byte system address */
+ }
+ for (k = 0; k < ISP_VEC_NELEMS; k += 2) {
+ hmm_load(ptr, (void *)(&data), sizeof(int));
+ params->fpn_config.data[ofs + 2 * k + 1] =
+ (short)(data & 0xFFFF);
+ params->fpn_config.data[ofs + 2 * k + 3] =
+ (short)((data >> 16) & 0xFFFF);
+ ptr += sizeof(int); /* byte system address */
+ }
+ }
+ }
+
+ /* raw -> fpn */
+ convert_raw_to_fpn(params);
+
+ /* overwrite isp parameter */
+ ia_css_process_kernel(stream, params, ia_css_kernel_process_param[IA_CSS_FPN_ID]);
+
+ IA_CSS_LEAVE_ERR_PRIVATE(0);
+
+ return 0;
+}
+
+bool
+sh_css_params_set_binning_factor(struct ia_css_stream *stream,
+ unsigned int binning_fact)
+{
+ struct ia_css_isp_parameters *params;
+
+ IA_CSS_ENTER_PRIVATE("void");
+ assert(stream);
+
+ params = stream->isp_params_configs;
+
+ if (params->sensor_binning != binning_fact) {
+ params->sensor_binning = binning_fact;
+ params->sc_table_changed = true;
+ }
+
+ IA_CSS_LEAVE_PRIVATE("void");
+
+ return params->sc_table_changed;
+}
+
+static void
+sh_css_set_shading_table(struct ia_css_stream *stream,
+ struct ia_css_isp_parameters *params,
+ const struct ia_css_shading_table *table)
+{
+ IA_CSS_ENTER_PRIVATE("");
+ if (!table)
+ return;
+ assert(stream);
+
+ if (!table->enable)
+ table = NULL;
+
+ if (table != params->sc_table) {
+ params->sc_table = table;
+ params->sc_table_changed = true;
+ /* Not very clean, this goes to sh_css.c to invalidate the
+ * shading table for all pipes. Should replaced by a loop
+ * and a pipe-specific call.
+ */
+ if (!params->output_frame)
+ sh_css_invalidate_shading_tables(stream);
+ }
+
+ IA_CSS_LEAVE_PRIVATE("void");
+}
+
+void
+ia_css_params_store_ia_css_host_data(
+ ia_css_ptr ddr_addr,
+ struct ia_css_host_data *data)
+{
+ assert(data);
+ assert(data->address);
+ assert(ddr_addr != mmgr_NULL);
+
+ IA_CSS_ENTER_PRIVATE("");
+
+ hmm_store(ddr_addr,
+ (void *)(data->address),
+ (size_t)data->size);
+
+ IA_CSS_LEAVE_PRIVATE("void");
+}
+
+struct ia_css_host_data *
+ia_css_params_alloc_convert_sctbl(
+ const struct ia_css_pipeline_stage *stage,
+ const struct ia_css_shading_table *shading_table)
+{
+ const struct ia_css_binary *binary = stage->binary;
+ struct ia_css_host_data *sctbl;
+ unsigned int i, j, aligned_width;
+ unsigned int sctbl_size;
+ short int *ptr;
+
+ assert(binary);
+ assert(shading_table);
+
+ IA_CSS_ENTER_PRIVATE("");
+
+ if (!shading_table) {
+ IA_CSS_LEAVE_PRIVATE("void");
+ return NULL;
+ }
+
+ aligned_width = binary->sctbl_aligned_width_per_color;
+ sctbl_size = shading_table->height * IA_CSS_SC_NUM_COLORS * aligned_width *
+ sizeof(short);
+
+ sctbl = ia_css_host_data_allocate((size_t)sctbl_size);
+
+ if (!sctbl)
+ return NULL;
+ ptr = (short int *)sctbl->address;
+ memset(ptr,
+ 0,
+ sctbl_size);
+
+ for (i = 0; i < shading_table->height; i++) {
+ for (j = 0; j < IA_CSS_SC_NUM_COLORS; j++) {
+ memcpy(ptr,
+ &shading_table->data[j]
+ [i * shading_table->width],
+ shading_table->width * sizeof(short));
+ ptr += aligned_width;
+ }
+ }
+
+ IA_CSS_LEAVE_PRIVATE("void");
+ return sctbl;
+}
+
+int ia_css_params_store_sctbl(
+ const struct ia_css_pipeline_stage *stage,
+ ia_css_ptr sc_tbl,
+ const struct ia_css_shading_table *sc_config)
+{
+ struct ia_css_host_data *isp_sc_tbl;
+
+ IA_CSS_ENTER_PRIVATE("");
+
+ if (!sc_config) {
+ IA_CSS_LEAVE_PRIVATE("void");
+ return 0;
+ }
+
+ isp_sc_tbl = ia_css_params_alloc_convert_sctbl(stage, sc_config);
+ if (!isp_sc_tbl) {
+ IA_CSS_LEAVE_ERR_PRIVATE(-ENOMEM);
+ return -ENOMEM;
+ }
+ /* store the shading table to ddr */
+ ia_css_params_store_ia_css_host_data(sc_tbl, isp_sc_tbl);
+ ia_css_host_data_free(isp_sc_tbl);
+
+ IA_CSS_LEAVE_PRIVATE("void");
+
+ return 0;
+}
+
+static void
+sh_css_enable_pipeline(const struct ia_css_binary *binary)
+{
+ if (!binary)
+ return;
+
+ IA_CSS_ENTER_PRIVATE("");
+
+ ia_css_isp_param_enable_pipeline(&binary->mem_params);
+
+ IA_CSS_LEAVE_PRIVATE("void");
+}
+
+static int
+ia_css_process_zoom_and_motion(
+ struct ia_css_isp_parameters *params,
+ const struct ia_css_pipeline_stage *first_stage)
+{
+ /* first_stage can be NULL */
+ const struct ia_css_pipeline_stage *stage;
+ int err = 0;
+ struct ia_css_resolution pipe_in_res;
+
+ pipe_in_res.width = 0;
+ pipe_in_res.height = 0;
+
+ assert(params);
+
+ IA_CSS_ENTER_PRIVATE("");
+
+ /* Go through all stages to udate uds and cropping */
+ for (stage = first_stage; stage; stage = stage->next) {
+ struct ia_css_binary *binary;
+ /* note: the var below is made static as it is quite large;
+ if it is not static it ends up on the stack which could
+ cause issues for drivers
+ */
+ static struct ia_css_binary tmp_binary;
+
+ const struct ia_css_binary_xinfo *info = NULL;
+
+ binary = stage->binary;
+ if (binary) {
+ info = binary->info;
+ } else {
+ const struct sh_css_binary_args *args = &stage->args;
+ const struct ia_css_frame_info *out_infos[IA_CSS_BINARY_MAX_OUTPUT_PORTS] = {NULL};
+
+ out_infos[0] = ia_css_frame_get_info(args->out_frame[0]);
+
+ info = &stage->firmware->info.isp;
+ ia_css_binary_fill_info(info, false, false,
+ ATOMISP_INPUT_FORMAT_RAW_10,
+ ia_css_frame_get_info(args->in_frame),
+ NULL,
+ out_infos,
+ ia_css_frame_get_info(args->out_vf_frame),
+ &tmp_binary,
+ NULL,
+ -1, true);
+ binary = &tmp_binary;
+ binary->info = info;
+ }
+
+ if (stage == first_stage) {
+ /* we will use pipe_in_res to scale the zoom crop region if needed */
+ pipe_in_res = binary->effective_in_frame_res;
+ }
+
+ assert(stage->stage_num < SH_CSS_MAX_STAGES);
+ if (params->dz_config.zoom_region.resolution.width == 0 &&
+ params->dz_config.zoom_region.resolution.height == 0) {
+ sh_css_update_uds_and_crop_info(
+ &info->sp,
+ &binary->in_frame_info,
+ &binary->out_frame_info[0],
+ &binary->dvs_envelope,
+ &params->dz_config,
+ &params->motion_config,
+ &params->uds[stage->stage_num].uds,
+ &params->uds[stage->stage_num].crop_pos,
+ stage->enable_zoom);
+ } else {
+ err = sh_css_update_uds_and_crop_info_based_on_zoom_region(
+ &info->sp,
+ &binary->in_frame_info,
+ &binary->out_frame_info[0],
+ &binary->dvs_envelope,
+ &params->dz_config,
+ &params->motion_config,
+ &params->uds[stage->stage_num].uds,
+ &params->uds[stage->stage_num].crop_pos,
+ pipe_in_res,
+ stage->enable_zoom);
+ if (err)
+ return err;
+ }
+ }
+ params->isp_params_changed = true;
+
+ IA_CSS_LEAVE_PRIVATE("void");
+ return err;
+}
+
+static void
+sh_css_set_gamma_table(struct ia_css_isp_parameters *params,
+ const struct ia_css_gamma_table *table)
+{
+ if (!table)
+ return;
+ IA_CSS_ENTER_PRIVATE("table=%p", table);
+
+ assert(params);
+ params->gc_table = *table;
+ params->config_changed[IA_CSS_GC_ID] = true;
+
+ IA_CSS_LEAVE_PRIVATE("void");
+}
+
+static void
+sh_css_get_gamma_table(const struct ia_css_isp_parameters *params,
+ struct ia_css_gamma_table *table)
+{
+ if (!table)
+ return;
+ IA_CSS_ENTER_PRIVATE("table=%p", table);
+
+ assert(params);
+ *table = params->gc_table;
+
+ IA_CSS_LEAVE_PRIVATE("void");
+}
+
+static void
+sh_css_set_ctc_table(struct ia_css_isp_parameters *params,
+ const struct ia_css_ctc_table *table)
+{
+ if (!table)
+ return;
+
+ IA_CSS_ENTER_PRIVATE("table=%p", table);
+
+ assert(params);
+ params->ctc_table = *table;
+ params->config_changed[IA_CSS_CTC_ID] = true;
+
+ IA_CSS_LEAVE_PRIVATE("void");
+}
+
+static void
+sh_css_get_ctc_table(const struct ia_css_isp_parameters *params,
+ struct ia_css_ctc_table *table)
+{
+ if (!table)
+ return;
+
+ IA_CSS_ENTER_PRIVATE("table=%p", table);
+
+ assert(params);
+ *table = params->ctc_table;
+
+ IA_CSS_LEAVE_PRIVATE("void");
+}
+
+static void
+sh_css_set_macc_table(struct ia_css_isp_parameters *params,
+ const struct ia_css_macc_table *table)
+{
+ if (!table)
+ return;
+
+ IA_CSS_ENTER_PRIVATE("table=%p", table);
+
+ assert(params);
+ params->macc_table = *table;
+ params->config_changed[IA_CSS_MACC_ID] = true;
+
+ IA_CSS_LEAVE_PRIVATE("void");
+}
+
+static void
+sh_css_get_macc_table(const struct ia_css_isp_parameters *params,
+ struct ia_css_macc_table *table)
+{
+ if (!table)
+ return;
+
+ IA_CSS_ENTER_PRIVATE("table=%p", table);
+
+ assert(params);
+ *table = params->macc_table;
+
+ IA_CSS_LEAVE_PRIVATE("void");
+}
+
+void ia_css_morph_table_free(
+ struct ia_css_morph_table *me)
+{
+ unsigned int i;
+
+ if (!me)
+ return;
+
+ IA_CSS_ENTER("");
+
+ for (i = 0; i < IA_CSS_MORPH_TABLE_NUM_PLANES; i++) {
+ if (me->coordinates_x[i]) {
+ kvfree(me->coordinates_x[i]);
+ me->coordinates_x[i] = NULL;
+ }
+ if (me->coordinates_y[i]) {
+ kvfree(me->coordinates_y[i]);
+ me->coordinates_y[i] = NULL;
+ }
+ }
+
+ kvfree(me);
+ IA_CSS_LEAVE("void");
+}
+
+struct ia_css_morph_table *ia_css_morph_table_allocate(
+ unsigned int width,
+ unsigned int height)
+{
+ unsigned int i;
+ struct ia_css_morph_table *me;
+
+ IA_CSS_ENTER("");
+
+ me = kvmalloc(sizeof(*me), GFP_KERNEL);
+ if (!me) {
+ IA_CSS_ERROR("out of memory");
+ return me;
+ }
+
+ for (i = 0; i < IA_CSS_MORPH_TABLE_NUM_PLANES; i++) {
+ me->coordinates_x[i] = NULL;
+ me->coordinates_y[i] = NULL;
+ }
+
+ for (i = 0; i < IA_CSS_MORPH_TABLE_NUM_PLANES; i++) {
+ me->coordinates_x[i] = kvmalloc(height * width *
+ sizeof(*me->coordinates_x[i]),
+ GFP_KERNEL);
+ me->coordinates_y[i] = kvmalloc(height * width *
+ sizeof(*me->coordinates_y[i]),
+ GFP_KERNEL);
+
+ if ((!me->coordinates_x[i]) ||
+ (!me->coordinates_y[i])) {
+ ia_css_morph_table_free(me);
+ me = NULL;
+ return me;
+ }
+ }
+ me->width = width;
+ me->height = height;
+ IA_CSS_LEAVE("");
+ return me;
+}
+
+static int sh_css_params_default_morph_table(
+ struct ia_css_morph_table **table,
+ const struct ia_css_binary *binary)
+{
+ /* MW 2400 advanced requires different scaling */
+ unsigned int i, j, k, step, width, height;
+ short start_x[IA_CSS_MORPH_TABLE_NUM_PLANES] = { -8, 0, -8, 0, 0, -8 },
+ start_y[IA_CSS_MORPH_TABLE_NUM_PLANES] = { 0, 0, -8, -8, -8, 0 };
+ struct ia_css_morph_table *tab;
+
+ assert(table);
+ assert(binary);
+
+ IA_CSS_ENTER_PRIVATE("");
+
+ step = (ISP_VEC_NELEMS / 16) * 128;
+ width = binary->morph_tbl_width;
+ height = binary->morph_tbl_height;
+
+ tab = ia_css_morph_table_allocate(width, height);
+ if (!tab)
+ return -ENOMEM;
+
+ for (i = 0; i < IA_CSS_MORPH_TABLE_NUM_PLANES; i++) {
+ short val_y = start_y[i];
+
+ for (j = 0; j < height; j++) {
+ short val_x = start_x[i];
+ unsigned short *x_ptr, *y_ptr;
+
+ x_ptr = &tab->coordinates_x[i][j * width];
+ y_ptr = &tab->coordinates_y[i][j * width];
+ for (k = 0; k < width;
+ k++, x_ptr++, y_ptr++, val_x += (short)step) {
+ if (k == 0)
+ *x_ptr = 0;
+ else if (k == width - 1)
+ *x_ptr = val_x + 2 * start_x[i];
+ else
+ *x_ptr = val_x;
+ if (j == 0)
+ *y_ptr = 0;
+ else
+ *y_ptr = val_y;
+ }
+ val_y += (short)step;
+ }
+ }
+ *table = tab;
+
+ IA_CSS_LEAVE_ERR_PRIVATE(0);
+
+ return 0;
+}
+
+static void
+sh_css_set_morph_table(struct ia_css_isp_parameters *params,
+ const struct ia_css_morph_table *table)
+{
+ if (!table)
+ return;
+
+ IA_CSS_ENTER_PRIVATE("table=%p", table);
+
+ assert(params);
+ if (table->enable == false)
+ table = NULL;
+ params->morph_table = table;
+ params->morph_table_changed = true;
+ IA_CSS_LEAVE_PRIVATE("void");
+}
+
+void
+ia_css_translate_3a_statistics(
+ struct ia_css_3a_statistics *host_stats,
+ const struct ia_css_isp_3a_statistics_map *isp_stats)
+{
+ IA_CSS_ENTER("");
+ if (host_stats->grid.use_dmem) {
+ IA_CSS_LOG("3A: DMEM");
+ ia_css_s3a_dmem_decode(host_stats, isp_stats->dmem_stats);
+ } else {
+ IA_CSS_LOG("3A: VMEM");
+ ia_css_s3a_vmem_decode(host_stats, isp_stats->vmem_stats_hi,
+ isp_stats->vmem_stats_lo);
+ }
+ IA_CSS_LOG("3A: HMEM");
+ ia_css_s3a_hmem_decode(host_stats, isp_stats->hmem_stats);
+
+ IA_CSS_LEAVE("void");
+}
+
+void
+ia_css_isp_3a_statistics_map_free(struct ia_css_isp_3a_statistics_map *me)
+{
+ if (me) {
+ if (me->data_allocated) {
+ kvfree(me->data_ptr);
+ me->data_ptr = NULL;
+ me->data_allocated = false;
+ }
+ kvfree(me);
+ }
+}
+
+struct ia_css_isp_3a_statistics_map *
+ia_css_isp_3a_statistics_map_allocate(
+ const struct ia_css_isp_3a_statistics *isp_stats,
+ void *data_ptr)
+{
+ struct ia_css_isp_3a_statistics_map *me;
+ /* Windows compiler does not like adding sizes to a void *
+ * so we use a local char * instead. */
+ char *base_ptr;
+
+ me = kvmalloc(sizeof(*me), GFP_KERNEL);
+ if (!me) {
+ IA_CSS_LEAVE("cannot allocate memory");
+ goto err;
+ }
+
+ me->data_ptr = data_ptr;
+ me->data_allocated = !data_ptr;
+ if (!data_ptr) {
+ me->data_ptr = kvmalloc(isp_stats->size, GFP_KERNEL);
+ if (!me->data_ptr) {
+ IA_CSS_LEAVE("cannot allocate memory");
+ goto err;
+ }
+ }
+ base_ptr = me->data_ptr;
+
+ me->size = isp_stats->size;
+ /* GCC complains when we assign a char * to a void *, so these
+ * casts are necessary unfortunately. */
+ me->dmem_stats = (void *)base_ptr;
+ me->vmem_stats_hi = (void *)(base_ptr + isp_stats->dmem_size);
+ me->vmem_stats_lo = (void *)(base_ptr + isp_stats->dmem_size +
+ isp_stats->vmem_size);
+ me->hmem_stats = (void *)(base_ptr + isp_stats->dmem_size +
+ 2 * isp_stats->vmem_size);
+
+ IA_CSS_LEAVE("map=%p", me);
+ return me;
+
+err:
+ kvfree(me);
+ return NULL;
+}
+
+int
+ia_css_get_3a_statistics(struct ia_css_3a_statistics *host_stats,
+ const struct ia_css_isp_3a_statistics *isp_stats)
+{
+ struct ia_css_isp_3a_statistics_map *map;
+ int ret = 0;
+
+ IA_CSS_ENTER("host_stats=%p, isp_stats=%p", host_stats, isp_stats);
+
+ assert(host_stats);
+ assert(isp_stats);
+
+ map = ia_css_isp_3a_statistics_map_allocate(isp_stats, NULL);
+ if (map) {
+ hmm_load(isp_stats->data_ptr, map->data_ptr, isp_stats->size);
+ ia_css_translate_3a_statistics(host_stats, map);
+ ia_css_isp_3a_statistics_map_free(map);
+ } else {
+ IA_CSS_ERROR("out of memory");
+ ret = -ENOMEM;
+ }
+
+ IA_CSS_LEAVE_ERR(ret);
+ return ret;
+}
+
+/* Parameter encoding is not yet orthogonal.
+ This function hnadles some of the exceptions.
+*/
+static void
+ia_css_set_param_exceptions(const struct ia_css_pipe *pipe,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params);
+
+ /* Copy also to DP. Should be done by the driver. */
+ params->dp_config.gr = params->wb_config.gr;
+ params->dp_config.r = params->wb_config.r;
+ params->dp_config.b = params->wb_config.b;
+ params->dp_config.gb = params->wb_config.gb;
+}
+
+static void
+sh_css_set_nr_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_nr_config *config)
+{
+ if (!config)
+ return;
+ assert(params);
+
+ IA_CSS_ENTER_PRIVATE("config=%p", config);
+
+ ia_css_nr_debug_dtrace(config, IA_CSS_DEBUG_TRACE_PRIVATE);
+ params->nr_config = *config;
+ params->yee_config.nr = *config;
+ params->config_changed[IA_CSS_NR_ID] = true;
+ params->config_changed[IA_CSS_YEE_ID] = true;
+ params->config_changed[IA_CSS_BNR_ID] = true;
+
+ IA_CSS_LEAVE_PRIVATE("void");
+}
+
+static void
+sh_css_set_ee_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_ee_config *config)
+{
+ if (!config)
+ return;
+ assert(params);
+
+ IA_CSS_ENTER_PRIVATE("config=%p", config);
+ ia_css_ee_debug_dtrace(config, IA_CSS_DEBUG_TRACE_PRIVATE);
+
+ params->ee_config = *config;
+ params->yee_config.ee = *config;
+ params->config_changed[IA_CSS_YEE_ID] = true;
+
+ IA_CSS_LEAVE_PRIVATE("void");
+}
+
+static void
+sh_css_get_ee_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_ee_config *config)
+{
+ if (!config)
+ return;
+
+ IA_CSS_ENTER_PRIVATE("config=%p", config);
+
+ assert(params);
+ *config = params->ee_config;
+
+ ia_css_ee_debug_dtrace(config, IA_CSS_DEBUG_TRACE_PRIVATE);
+ IA_CSS_LEAVE_PRIVATE("void");
+}
+
+static void
+sh_css_set_pipe_dvs_6axis_config(const struct ia_css_pipe *pipe,
+ struct ia_css_isp_parameters *params,
+ const struct ia_css_dvs_6axis_config *dvs_config)
+{
+ if (!dvs_config)
+ return;
+ assert(params);
+ assert(pipe);
+ assert(dvs_config->height_y == dvs_config->height_uv);
+ assert((dvs_config->width_y - 1) == 2 * (dvs_config->width_uv - 1));
+ assert(pipe->mode < IA_CSS_PIPE_ID_NUM);
+
+ IA_CSS_ENTER_PRIVATE("dvs_config=%p", dvs_config);
+
+ copy_dvs_6axis_table(params->pipe_dvs_6axis_config[pipe->mode], dvs_config);
+
+ params->pipe_dvs_6axis_config_changed[pipe->mode] = true;
+
+ IA_CSS_LEAVE_PRIVATE("void");
+}
+
+static void
+sh_css_get_pipe_dvs_6axis_config(const struct ia_css_pipe *pipe,
+ const struct ia_css_isp_parameters *params,
+ struct ia_css_dvs_6axis_config *dvs_config)
+{
+ if (!dvs_config)
+ return;
+ assert(params);
+ assert(pipe);
+ assert(dvs_config->height_y == dvs_config->height_uv);
+ assert((dvs_config->width_y - 1) == 2 * dvs_config->width_uv - 1);
+
+ IA_CSS_ENTER_PRIVATE("dvs_config=%p", dvs_config);
+
+ if ((pipe->mode < IA_CSS_PIPE_ID_NUM) &&
+ (dvs_config->width_y == params->pipe_dvs_6axis_config[pipe->mode]->width_y) &&
+ (dvs_config->height_y == params->pipe_dvs_6axis_config[pipe->mode]->height_y) &&
+ (dvs_config->width_uv == params->pipe_dvs_6axis_config[pipe->mode]->width_uv) &&
+ (dvs_config->height_uv == params->pipe_dvs_6axis_config[pipe->mode]->height_uv)
+ &&
+ dvs_config->xcoords_y &&
+ dvs_config->ycoords_y &&
+ dvs_config->xcoords_uv &&
+ dvs_config->ycoords_uv) {
+ copy_dvs_6axis_table(dvs_config, params->pipe_dvs_6axis_config[pipe->mode]);
+ }
+
+ IA_CSS_LEAVE_PRIVATE("void");
+}
+
+static void
+sh_css_set_baa_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_aa_config *config)
+{
+ if (!config)
+ return;
+ assert(params);
+
+ IA_CSS_ENTER_PRIVATE("config=%p", config);
+
+ params->bds_config = *config;
+ params->config_changed[IA_CSS_BDS_ID] = true;
+
+ IA_CSS_LEAVE_PRIVATE("void");
+}
+
+static void
+sh_css_get_baa_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_aa_config *config)
+{
+ if (!config)
+ return;
+ assert(params);
+
+ IA_CSS_ENTER_PRIVATE("config=%p", config);
+
+ *config = params->bds_config;
+
+ IA_CSS_LEAVE_PRIVATE("void");
+}
+
+static void
+sh_css_set_dz_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_dz_config *config)
+{
+ if (!config)
+ return;
+ assert(params);
+
+ IA_CSS_ENTER_PRIVATE("dx=%d, dy=%d", config->dx, config->dy);
+
+ assert(config->dx <= HRT_GDC_N);
+ assert(config->dy <= HRT_GDC_N);
+
+ params->dz_config = *config;
+ params->dz_config_changed = true;
+ /* JK: Why isp params changed?? */
+ params->isp_params_changed = true;
+
+ IA_CSS_LEAVE_PRIVATE("void");
+}
+
+static void
+sh_css_get_dz_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_dz_config *config)
+{
+ if (!config)
+ return;
+ assert(params);
+
+ IA_CSS_ENTER_PRIVATE("config=%p", config);
+
+ *config = params->dz_config;
+
+ IA_CSS_LEAVE_PRIVATE("dx=%d, dy=%d", config->dx, config->dy);
+}
+
+static void
+sh_css_set_motion_vector(struct ia_css_isp_parameters *params,
+ const struct ia_css_vector *motion)
+{
+ if (!motion)
+ return;
+ assert(params);
+
+ IA_CSS_ENTER_PRIVATE("x=%d, y=%d", motion->x, motion->y);
+
+ params->motion_config = *motion;
+ /* JK: Why do isp params change? */
+ params->motion_config_changed = true;
+ params->isp_params_changed = true;
+
+ IA_CSS_LEAVE_PRIVATE("void");
+}
+
+static void
+sh_css_get_motion_vector(const struct ia_css_isp_parameters *params,
+ struct ia_css_vector *motion)
+{
+ if (!motion)
+ return;
+ assert(params);
+
+ IA_CSS_ENTER_PRIVATE("motion=%p", motion);
+
+ *motion = params->motion_config;
+
+ IA_CSS_LEAVE_PRIVATE("x=%d, y=%d", motion->x, motion->y);
+}
+
+struct ia_css_isp_config *
+sh_css_pipe_isp_config_get(struct ia_css_pipe *pipe)
+{
+ if (!pipe) {
+ IA_CSS_ERROR("pipe=%p", NULL);
+ return NULL;
+ }
+ return pipe->config.p_isp_config;
+}
+
+int
+ia_css_stream_set_isp_config(
+ struct ia_css_stream *stream,
+ const struct ia_css_isp_config *config)
+{
+ return ia_css_stream_set_isp_config_on_pipe(stream, config, NULL);
+}
+
+int
+ia_css_stream_set_isp_config_on_pipe(
+ struct ia_css_stream *stream,
+ const struct ia_css_isp_config *config,
+ struct ia_css_pipe *pipe)
+{
+ int err = 0;
+
+ if ((!stream) || (!config))
+ return -EINVAL;
+
+ IA_CSS_ENTER("stream=%p, config=%p, pipe=%p", stream, config, pipe);
+
+ if (config->output_frame)
+ err = sh_css_set_per_frame_isp_config_on_pipe(stream, config, pipe);
+ else
+ err = sh_css_set_global_isp_config_on_pipe(stream->pipes[0], config, pipe);
+
+ IA_CSS_LEAVE_ERR(err);
+ return err;
+}
+
+int
+ia_css_pipe_set_isp_config(struct ia_css_pipe *pipe,
+ struct ia_css_isp_config *config)
+{
+ struct ia_css_pipe *pipe_in = pipe;
+ int err = 0;
+
+ IA_CSS_ENTER("pipe=%p", pipe);
+
+ if ((!pipe) || (!pipe->stream))
+ return -EINVAL;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "config=%p\n", config);
+
+ if (config->output_frame)
+ err = sh_css_set_per_frame_isp_config_on_pipe(pipe->stream, config, pipe);
+ else
+ err = sh_css_set_global_isp_config_on_pipe(pipe, config, pipe_in);
+ IA_CSS_LEAVE_ERR(err);
+ return err;
+}
+
+static int
+sh_css_set_global_isp_config_on_pipe(
+ struct ia_css_pipe *curr_pipe,
+ const struct ia_css_isp_config *config,
+ struct ia_css_pipe *pipe)
+{
+ int err = 0;
+ int err1 = 0;
+ int err2 = 0;
+
+ IA_CSS_ENTER_PRIVATE("stream=%p, config=%p, pipe=%p", curr_pipe, config, pipe);
+
+ err1 = sh_css_init_isp_params_from_config(curr_pipe, curr_pipe->stream->isp_params_configs, config, pipe);
+
+ /* Now commit all changes to the SP */
+ err2 = sh_css_param_update_isp_params(curr_pipe, curr_pipe->stream->isp_params_configs, sh_css_sp_is_running(), pipe);
+
+ /* The following code is intentional. The sh_css_init_isp_params_from_config interface
+ * throws an error when both DPC and BDS is enabled. The CSS API must pass this error
+ * information to the caller, ie. the host. We do not return this error immediately,
+ * but instead continue with updating the ISP params to enable testing of features
+ * which are currently in TR phase. */
+
+ err = (err1 != 0) ? err1 : ((err2 != 0) ? err2 : err);
+
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+}
+
+static int
+sh_css_set_per_frame_isp_config_on_pipe(
+ struct ia_css_stream *stream,
+ const struct ia_css_isp_config *config,
+ struct ia_css_pipe *pipe)
+{
+ unsigned int i;
+ bool per_frame_config_created = false;
+ int err = 0;
+ int err1 = 0;
+ int err2 = 0;
+ int err3 = 0;
+
+ struct sh_css_ddr_address_map *ddr_ptrs;
+ struct sh_css_ddr_address_map_size *ddr_ptrs_size;
+ struct ia_css_isp_parameters *params;
+
+ IA_CSS_ENTER_PRIVATE("stream=%p, config=%p, pipe=%p", stream, config, pipe);
+
+ if (!pipe) {
+ err = -EINVAL;
+ goto exit;
+ }
+
+ /* create per-frame ISP params object with default values
+ * from stream->isp_params_configs if one doesn't already exist
+ */
+ if (!stream->per_frame_isp_params_configs) {
+ err = sh_css_create_isp_params(stream,
+ &stream->per_frame_isp_params_configs);
+ if (err)
+ goto exit;
+ per_frame_config_created = true;
+ }
+
+ params = stream->per_frame_isp_params_configs;
+
+ /* update new ISP params object with the new config */
+ if (!sh_css_init_isp_params_from_global(stream, params, false, pipe)) {
+ err1 = -EINVAL;
+ }
+
+ err2 = sh_css_init_isp_params_from_config(stream->pipes[0], params, config, pipe);
+
+ if (per_frame_config_created) {
+ ddr_ptrs = &params->ddr_ptrs;
+ ddr_ptrs_size = &params->ddr_ptrs_size;
+ /* create per pipe reference to general ddr_ptrs */
+ for (i = 0; i < IA_CSS_PIPE_ID_NUM; i++) {
+ ref_sh_css_ddr_address_map(ddr_ptrs, &params->pipe_ddr_ptrs[i]);
+ params->pipe_ddr_ptrs_size[i] = *ddr_ptrs_size;
+ }
+ }
+
+ /* now commit to ddr */
+ err3 = sh_css_param_update_isp_params(stream->pipes[0], params, sh_css_sp_is_running(), pipe);
+
+ /* The following code is intentional. The sh_css_init_sp_params_from_config and
+ * sh_css_init_isp_params_from_config throws an error when both DPC and BDS is enabled.
+ * The CSS API must pass this error information to the caller, ie. the host.
+ * We do not return this error immediately, but instead continue with updating the ISP params
+ * to enable testing of features which are currently in TR phase. */
+ err = (err1 != 0) ? err1 :
+ (err2 != 0) ? err2 :
+ (err3 != 0) ? err3 : err;
+exit:
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+}
+
+static int
+sh_css_init_isp_params_from_config(struct ia_css_pipe *pipe,
+ struct ia_css_isp_parameters *params,
+ const struct ia_css_isp_config *config,
+ struct ia_css_pipe *pipe_in)
+{
+ int err = 0;
+ bool is_dp_10bpp = true;
+
+ assert(pipe);
+
+ IA_CSS_ENTER_PRIVATE("pipe=%p, config=%p, params=%p", pipe, config, params);
+
+ ia_css_set_configs(params, config);
+
+ sh_css_set_nr_config(params, config->nr_config);
+ sh_css_set_ee_config(params, config->ee_config);
+ sh_css_set_baa_config(params, config->baa_config);
+ if ((pipe->mode < IA_CSS_PIPE_ID_NUM) &&
+ (params->pipe_dvs_6axis_config[pipe->mode]))
+ sh_css_set_pipe_dvs_6axis_config(pipe, params, config->dvs_6axis_config);
+ sh_css_set_dz_config(params, config->dz_config);
+ sh_css_set_motion_vector(params, config->motion_vector);
+ sh_css_set_shading_table(pipe->stream, params, config->shading_table);
+ sh_css_set_morph_table(params, config->morph_table);
+ sh_css_set_macc_table(params, config->macc_table);
+ sh_css_set_gamma_table(params, config->gamma_table);
+ sh_css_set_ctc_table(params, config->ctc_table);
+ /* ------ deprecated(bz675) : from ------ */
+ sh_css_set_shading_settings(params, config->shading_settings);
+ /* ------ deprecated(bz675) : to ------ */
+
+ params->dis_coef_table_changed = (config->dvs_coefs);
+ params->dvs2_coef_table_changed = (config->dvs2_coefs);
+
+ params->output_frame = config->output_frame;
+ params->isp_parameters_id = config->isp_config_id;
+
+ if (0 ==
+ sh_css_select_dp_10bpp_config(pipe, &is_dp_10bpp)) {
+ /* return an error when both DPC and BDS is enabled by the
+ * user. */
+ /* we do not exit from this point immediately to allow internal
+ * firmware feature testing. */
+ if (is_dp_10bpp) {
+ err = -EINVAL;
+ }
+ } else {
+ err = -EINVAL;
+ goto exit;
+ }
+
+ ia_css_set_param_exceptions(pipe, params);
+
+exit:
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+}
+
+void
+ia_css_stream_get_isp_config(
+ const struct ia_css_stream *stream,
+ struct ia_css_isp_config *config)
+{
+ IA_CSS_ENTER("void");
+ ia_css_pipe_get_isp_config(stream->pipes[0], config);
+ IA_CSS_LEAVE("void");
+}
+
+void
+ia_css_pipe_get_isp_config(struct ia_css_pipe *pipe,
+ struct ia_css_isp_config *config)
+{
+ struct ia_css_isp_parameters *params = NULL;
+
+ assert(config);
+
+ IA_CSS_ENTER("config=%p", config);
+
+ params = pipe->stream->isp_params_configs;
+ assert(params);
+
+ ia_css_get_configs(params, config);
+
+ sh_css_get_ee_config(params, config->ee_config);
+ sh_css_get_baa_config(params, config->baa_config);
+ sh_css_get_pipe_dvs_6axis_config(pipe, params, config->dvs_6axis_config);
+ sh_css_get_macc_table(params, config->macc_table);
+ sh_css_get_gamma_table(params, config->gamma_table);
+ sh_css_get_ctc_table(params, config->ctc_table);
+ sh_css_get_dz_config(params, config->dz_config);
+ sh_css_get_motion_vector(params, config->motion_vector);
+ /* ------ deprecated(bz675) : from ------ */
+ sh_css_get_shading_settings(params, config->shading_settings);
+ /* ------ deprecated(bz675) : to ------ */
+
+ config->output_frame = params->output_frame;
+ config->isp_config_id = params->isp_parameters_id;
+
+ IA_CSS_LEAVE("void");
+}
+
+/*
+ * coding style says the return of "mmgr_NULL" is the error signal
+ *
+ * Deprecated: Implement mmgr_realloc()
+ */
+static bool realloc_isp_css_mm_buf(
+ ia_css_ptr *curr_buf,
+ size_t *curr_size,
+ size_t needed_size,
+ bool force,
+ int *err)
+{
+ s32 id;
+
+ *err = 0;
+ /* Possible optimization: add a function sh_css_isp_css_mm_realloc()
+ * and implement on top of hmm. */
+
+ IA_CSS_ENTER_PRIVATE("void");
+
+ if (!force && *curr_size >= needed_size) {
+ IA_CSS_LEAVE_PRIVATE("false");
+ return false;
+ }
+ /* don't reallocate if single ref to buffer and same size */
+ if (*curr_size == needed_size && ia_css_refcount_is_single(*curr_buf)) {
+ IA_CSS_LEAVE_PRIVATE("false");
+ return false;
+ }
+
+ id = IA_CSS_REFCOUNT_PARAM_BUFFER;
+ ia_css_refcount_decrement(id, *curr_buf);
+ *curr_buf = ia_css_refcount_increment(id, hmm_alloc(needed_size));
+ if (!*curr_buf) {
+ *err = -ENOMEM;
+ *curr_size = 0;
+ } else {
+ *curr_size = needed_size;
+ }
+ IA_CSS_LEAVE_PRIVATE("true");
+ return true;
+}
+
+static bool reallocate_buffer(
+ ia_css_ptr *curr_buf,
+ size_t *curr_size,
+ size_t needed_size,
+ bool force,
+ int *err)
+{
+ bool ret;
+
+ IA_CSS_ENTER_PRIVATE("void");
+
+ ret = realloc_isp_css_mm_buf(curr_buf,
+ curr_size, needed_size, force, err);
+
+ IA_CSS_LEAVE_PRIVATE("ret=%d", ret);
+ return ret;
+}
+
+struct ia_css_isp_3a_statistics *
+ia_css_isp_3a_statistics_allocate(const struct ia_css_3a_grid_info *grid)
+{
+ struct ia_css_isp_3a_statistics *me;
+
+ IA_CSS_ENTER("grid=%p", grid);
+
+ assert(grid);
+
+ /* MW: Does "grid->enable" also control the histogram output ?? */
+ if (!grid->enable)
+ return NULL;
+
+ me = kvcalloc(1, sizeof(*me), GFP_KERNEL);
+ if (!me)
+ goto err;
+
+ if (grid->use_dmem) {
+ me->dmem_size = sizeof(struct ia_css_3a_output) *
+ grid->aligned_width *
+ grid->aligned_height;
+ } else {
+ me->vmem_size = ISP_S3ATBL_HI_LO_STRIDE_BYTES *
+ grid->aligned_height;
+ }
+ me->hmem_size = sizeof_hmem(HMEM0_ID);
+
+ /* All subsections need to be aligned to the system bus width */
+ me->dmem_size = CEIL_MUL(me->dmem_size, HIVE_ISP_DDR_WORD_BYTES);
+ me->vmem_size = CEIL_MUL(me->vmem_size, HIVE_ISP_DDR_WORD_BYTES);
+ me->hmem_size = CEIL_MUL(me->hmem_size, HIVE_ISP_DDR_WORD_BYTES);
+
+ me->size = me->dmem_size + me->vmem_size * 2 + me->hmem_size;
+ me->data_ptr = hmm_alloc(me->size);
+ if (me->data_ptr == mmgr_NULL) {
+ kvfree(me);
+ me = NULL;
+ goto err;
+ }
+ if (me->dmem_size)
+ me->data.dmem.s3a_tbl = me->data_ptr;
+ if (me->vmem_size) {
+ me->data.vmem.s3a_tbl_hi = me->data_ptr + me->dmem_size;
+ me->data.vmem.s3a_tbl_lo = me->data_ptr + me->dmem_size + me->vmem_size;
+ }
+ if (me->hmem_size)
+ me->data_hmem.rgby_tbl = me->data_ptr + me->dmem_size + 2 * me->vmem_size;
+
+err:
+ IA_CSS_LEAVE("return=%p", me);
+ return me;
+}
+
+void
+ia_css_isp_3a_statistics_free(struct ia_css_isp_3a_statistics *me)
+{
+ if (me) {
+ hmm_free(me->data_ptr);
+ kvfree(me);
+ }
+}
+
+struct ia_css_isp_skc_dvs_statistics *ia_css_skc_dvs_statistics_allocate(void)
+{
+ return NULL;
+}
+
+struct ia_css_metadata *
+ia_css_metadata_allocate(const struct ia_css_metadata_info *metadata_info)
+{
+ struct ia_css_metadata *md = NULL;
+
+ IA_CSS_ENTER("");
+
+ if (metadata_info->size == 0)
+ return NULL;
+
+ md = kvmalloc(sizeof(*md), GFP_KERNEL);
+ if (!md)
+ goto error;
+
+ md->info = *metadata_info;
+ md->exp_id = 0;
+ md->address = hmm_alloc(metadata_info->size);
+ if (md->address == mmgr_NULL)
+ goto error;
+
+ IA_CSS_LEAVE("return=%p", md);
+ return md;
+
+error:
+ ia_css_metadata_free(md);
+ IA_CSS_LEAVE("return=%p", NULL);
+ return NULL;
+}
+
+void
+ia_css_metadata_free(struct ia_css_metadata *me)
+{
+ if (me) {
+ /* The enter and leave macros are placed inside
+ * the condition to avoid false logging of metadata
+ * free events when metadata is disabled.
+ * We found this to be confusing during development
+ * and debugging. */
+ IA_CSS_ENTER("me=%p", me);
+ hmm_free(me->address);
+ kvfree(me);
+ IA_CSS_LEAVE("void");
+ }
+}
+
+void
+ia_css_metadata_free_multiple(unsigned int num_bufs,
+ struct ia_css_metadata **bufs)
+{
+ unsigned int i;
+
+ if (bufs) {
+ for (i = 0; i < num_bufs; i++)
+ ia_css_metadata_free(bufs[i]);
+ }
+}
+
+static unsigned int g_param_buffer_dequeue_count;
+static unsigned int g_param_buffer_enqueue_count;
+
+int
+ia_css_stream_isp_parameters_init(struct ia_css_stream *stream)
+{
+ int err = 0;
+ unsigned int i;
+ struct sh_css_ddr_address_map *ddr_ptrs;
+ struct sh_css_ddr_address_map_size *ddr_ptrs_size;
+ struct ia_css_isp_parameters *params;
+
+ assert(stream);
+ IA_CSS_ENTER_PRIVATE("void");
+
+ if (!stream) {
+ IA_CSS_LEAVE_ERR_PRIVATE(-EINVAL);
+ return -EINVAL;
+ }
+ /* TMP: tracking of paramsets */
+ g_param_buffer_dequeue_count = 0;
+ g_param_buffer_enqueue_count = 0;
+
+ stream->per_frame_isp_params_configs = NULL;
+ err = sh_css_create_isp_params(stream,
+ &stream->isp_params_configs);
+ if (err)
+ goto ERR;
+
+ params = stream->isp_params_configs;
+ if (!sh_css_init_isp_params_from_global(stream, params, true, NULL)) {
+ /* we do not return the error immediately to enable internal
+ * firmware feature testing */
+ err = -EINVAL;
+ }
+
+ ddr_ptrs = &params->ddr_ptrs;
+ ddr_ptrs_size = &params->ddr_ptrs_size;
+
+ /* create per pipe reference to general ddr_ptrs */
+ for (i = 0; i < IA_CSS_PIPE_ID_NUM; i++) {
+ ref_sh_css_ddr_address_map(ddr_ptrs, &params->pipe_ddr_ptrs[i]);
+ params->pipe_ddr_ptrs_size[i] = *ddr_ptrs_size;
+ }
+
+ERR:
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+}
+
+static void
+ia_css_set_sdis_config(
+ struct ia_css_isp_parameters *params,
+ const struct ia_css_dvs_coefficients *dvs_coefs)
+{
+ ia_css_set_sdis_horicoef_config(params, dvs_coefs);
+ ia_css_set_sdis_vertcoef_config(params, dvs_coefs);
+ ia_css_set_sdis_horiproj_config(params, dvs_coefs);
+ ia_css_set_sdis_vertproj_config(params, dvs_coefs);
+}
+
+static void
+ia_css_set_sdis2_config(
+ struct ia_css_isp_parameters *params,
+ const struct ia_css_dvs2_coefficients *dvs2_coefs)
+{
+ ia_css_set_sdis2_horicoef_config(params, dvs2_coefs);
+ ia_css_set_sdis2_vertcoef_config(params, dvs2_coefs);
+ ia_css_set_sdis2_horiproj_config(params, dvs2_coefs);
+ ia_css_set_sdis2_vertproj_config(params, dvs2_coefs);
+}
+
+static int
+sh_css_create_isp_params(struct ia_css_stream *stream,
+ struct ia_css_isp_parameters **isp_params_out)
+{
+ bool succ = true;
+ unsigned int i;
+ struct sh_css_ddr_address_map *ddr_ptrs;
+ struct sh_css_ddr_address_map_size *ddr_ptrs_size;
+ int err;
+ size_t params_size;
+ struct ia_css_isp_parameters *params =
+ kvmalloc(sizeof(struct ia_css_isp_parameters), GFP_KERNEL);
+
+ if (!params) {
+ *isp_params_out = NULL;
+ err = -ENOMEM;
+ IA_CSS_ERROR("%s:%d error: cannot allocate memory", __FILE__, __LINE__);
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+ } else {
+ memset(params, 0, sizeof(struct ia_css_isp_parameters));
+ }
+
+ ddr_ptrs = &params->ddr_ptrs;
+ ddr_ptrs_size = &params->ddr_ptrs_size;
+
+ for (i = 0; i < IA_CSS_PIPE_ID_NUM; i++) {
+ memset(&params->pipe_ddr_ptrs[i], 0,
+ sizeof(params->pipe_ddr_ptrs[i]));
+ memset(&params->pipe_ddr_ptrs_size[i], 0,
+ sizeof(params->pipe_ddr_ptrs_size[i]));
+ }
+
+ memset(ddr_ptrs, 0, sizeof(*ddr_ptrs));
+ memset(ddr_ptrs_size, 0, sizeof(*ddr_ptrs_size));
+
+ params_size = sizeof(params->uds);
+ ddr_ptrs_size->isp_param = params_size;
+ ddr_ptrs->isp_param =
+ ia_css_refcount_increment(IA_CSS_REFCOUNT_PARAM_BUFFER,
+ hmm_alloc(params_size));
+ succ &= (ddr_ptrs->isp_param != mmgr_NULL);
+
+ ddr_ptrs_size->macc_tbl = sizeof(struct ia_css_macc_table);
+ ddr_ptrs->macc_tbl =
+ ia_css_refcount_increment(IA_CSS_REFCOUNT_PARAM_BUFFER,
+ hmm_alloc(sizeof(struct ia_css_macc_table)));
+ succ &= (ddr_ptrs->macc_tbl != mmgr_NULL);
+
+ *isp_params_out = params;
+
+ if (!succ)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static bool
+sh_css_init_isp_params_from_global(struct ia_css_stream *stream,
+ struct ia_css_isp_parameters *params,
+ bool use_default_config,
+ struct ia_css_pipe *pipe_in)
+{
+ bool retval = true;
+ int i = 0;
+ bool is_dp_10bpp = true;
+ unsigned int isp_pipe_version = ia_css_pipe_get_isp_pipe_version(
+ stream->pipes[0]);
+ struct ia_css_isp_parameters *stream_params = stream->isp_params_configs;
+
+ if (!use_default_config && !stream_params) {
+ retval = false;
+ goto exit;
+ }
+
+ params->output_frame = NULL;
+ params->isp_parameters_id = 0;
+
+ if (use_default_config) {
+ ia_css_set_xnr3_config(params, &default_xnr3_config);
+
+ sh_css_set_nr_config(params, &default_nr_config);
+ sh_css_set_ee_config(params, &default_ee_config);
+ if (isp_pipe_version == SH_CSS_ISP_PIPE_VERSION_1)
+ sh_css_set_macc_table(params, &default_macc_table);
+ else if (isp_pipe_version == SH_CSS_ISP_PIPE_VERSION_2_2)
+ sh_css_set_macc_table(params, &default_macc2_table);
+ sh_css_set_gamma_table(params, &default_gamma_table);
+ sh_css_set_ctc_table(params, &default_ctc_table);
+ sh_css_set_baa_config(params, &default_baa_config);
+ sh_css_set_dz_config(params, &default_dz_config);
+ /* ------ deprecated(bz675) : from ------ */
+ sh_css_set_shading_settings(params, &default_shading_settings);
+ /* ------ deprecated(bz675) : to ------ */
+
+ ia_css_set_s3a_config(params, &default_3a_config);
+ ia_css_set_wb_config(params, &default_wb_config);
+ ia_css_set_csc_config(params, &default_cc_config);
+ ia_css_set_tnr_config(params, &default_tnr_config);
+ ia_css_set_ob_config(params, &default_ob_config);
+ ia_css_set_dp_config(params, &default_dp_config);
+
+ ia_css_set_param_exceptions(pipe_in, params);
+
+ ia_css_set_de_config(params, &default_de_config);
+ ia_css_set_gc_config(params, &default_gc_config);
+ ia_css_set_anr_config(params, &default_anr_config);
+ ia_css_set_anr2_config(params, &default_anr_thres);
+ ia_css_set_ce_config(params, &default_ce_config);
+ ia_css_set_xnr_table_config(params, &default_xnr_table);
+ ia_css_set_ecd_config(params, &default_ecd_config);
+ ia_css_set_ynr_config(params, &default_ynr_config);
+ ia_css_set_fc_config(params, &default_fc_config);
+ ia_css_set_cnr_config(params, &default_cnr_config);
+ ia_css_set_macc_config(params, &default_macc_config);
+ ia_css_set_ctc_config(params, &default_ctc_config);
+ ia_css_set_aa_config(params, &default_aa_config);
+ ia_css_set_r_gamma_config(params, &default_r_gamma_table);
+ ia_css_set_g_gamma_config(params, &default_g_gamma_table);
+ ia_css_set_b_gamma_config(params, &default_b_gamma_table);
+ ia_css_set_yuv2rgb_config(params, &default_yuv2rgb_cc_config);
+ ia_css_set_rgb2yuv_config(params, &default_rgb2yuv_cc_config);
+ ia_css_set_xnr_config(params, &default_xnr_config);
+ ia_css_set_sdis_config(params, &default_sdis_config);
+ ia_css_set_sdis2_config(params, &default_sdis2_config);
+ ia_css_set_formats_config(params, &default_formats_config);
+
+ params->fpn_config.data = NULL;
+ params->config_changed[IA_CSS_FPN_ID] = true;
+ params->fpn_config.enabled = 0;
+
+ params->motion_config = default_motion_config;
+ params->motion_config_changed = true;
+
+ params->morph_table = NULL;
+ params->morph_table_changed = true;
+
+ params->sc_table = NULL;
+ params->sc_table_changed = true;
+
+ ia_css_sdis2_clear_coefficients(&params->dvs2_coefs);
+ params->dvs2_coef_table_changed = true;
+
+ ia_css_sdis_clear_coefficients(&params->dvs_coefs);
+ params->dis_coef_table_changed = true;
+ } else {
+ ia_css_set_xnr3_config(params, &stream_params->xnr3_config);
+
+ sh_css_set_nr_config(params, &stream_params->nr_config);
+ sh_css_set_ee_config(params, &stream_params->ee_config);
+ if (isp_pipe_version == SH_CSS_ISP_PIPE_VERSION_1)
+ sh_css_set_macc_table(params, &stream_params->macc_table);
+ else if (isp_pipe_version == SH_CSS_ISP_PIPE_VERSION_2_2)
+ sh_css_set_macc_table(params, &stream_params->macc_table);
+ sh_css_set_gamma_table(params, &stream_params->gc_table);
+ sh_css_set_ctc_table(params, &stream_params->ctc_table);
+ sh_css_set_baa_config(params, &stream_params->bds_config);
+ sh_css_set_dz_config(params, &stream_params->dz_config);
+ /* ------ deprecated(bz675) : from ------ */
+ sh_css_set_shading_settings(params, &stream_params->shading_settings);
+ /* ------ deprecated(bz675) : to ------ */
+
+ ia_css_set_s3a_config(params, &stream_params->s3a_config);
+ ia_css_set_wb_config(params, &stream_params->wb_config);
+ ia_css_set_csc_config(params, &stream_params->cc_config);
+ ia_css_set_tnr_config(params, &stream_params->tnr_config);
+ ia_css_set_ob_config(params, &stream_params->ob_config);
+ ia_css_set_dp_config(params, &stream_params->dp_config);
+ ia_css_set_de_config(params, &stream_params->de_config);
+ ia_css_set_gc_config(params, &stream_params->gc_config);
+ ia_css_set_anr_config(params, &stream_params->anr_config);
+ ia_css_set_anr2_config(params, &stream_params->anr_thres);
+ ia_css_set_ce_config(params, &stream_params->ce_config);
+ ia_css_set_xnr_table_config(params, &stream_params->xnr_table);
+ ia_css_set_ecd_config(params, &stream_params->ecd_config);
+ ia_css_set_ynr_config(params, &stream_params->ynr_config);
+ ia_css_set_fc_config(params, &stream_params->fc_config);
+ ia_css_set_cnr_config(params, &stream_params->cnr_config);
+ ia_css_set_macc_config(params, &stream_params->macc_config);
+ ia_css_set_ctc_config(params, &stream_params->ctc_config);
+ ia_css_set_aa_config(params, &stream_params->aa_config);
+ ia_css_set_r_gamma_config(params, &stream_params->r_gamma_table);
+ ia_css_set_g_gamma_config(params, &stream_params->g_gamma_table);
+ ia_css_set_b_gamma_config(params, &stream_params->b_gamma_table);
+ ia_css_set_yuv2rgb_config(params, &stream_params->yuv2rgb_cc_config);
+ ia_css_set_rgb2yuv_config(params, &stream_params->rgb2yuv_cc_config);
+ ia_css_set_xnr_config(params, &stream_params->xnr_config);
+ ia_css_set_formats_config(params, &stream_params->formats_config);
+
+ for (i = 0; i < stream->num_pipes; i++) {
+ if (0 ==
+ sh_css_select_dp_10bpp_config(stream->pipes[i], &is_dp_10bpp)) {
+ /* set the return value as false if both DPC and
+ * BDS is enabled by the user. But we do not return
+ * the value immediately to enable internal firmware
+ * feature testing. */
+ retval = !is_dp_10bpp;
+ /* FIXME: should it ignore this error? */
+ } else {
+ retval = false;
+ goto exit;
+ }
+ }
+
+ ia_css_set_param_exceptions(pipe_in, params);
+
+ params->fpn_config.data = stream_params->fpn_config.data;
+ params->config_changed[IA_CSS_FPN_ID] =
+ stream_params->config_changed[IA_CSS_FPN_ID];
+ params->fpn_config.enabled = stream_params->fpn_config.enabled;
+
+ sh_css_set_motion_vector(params, &stream_params->motion_config);
+ sh_css_set_morph_table(params, stream_params->morph_table);
+
+ if (stream_params->sc_table) {
+ sh_css_set_shading_table(stream, params, stream_params->sc_table);
+ } else {
+ params->sc_table = NULL;
+ params->sc_table_changed = true;
+ }
+
+ /* Only IA_CSS_PIPE_ID_VIDEO & IA_CSS_PIPE_ID_CAPTURE will support dvs_6axis_config*/
+ for (i = 0; i < IA_CSS_PIPE_ID_NUM; i++) {
+ if (stream_params->pipe_dvs_6axis_config[i]) {
+ if (params->pipe_dvs_6axis_config[i]) {
+ copy_dvs_6axis_table(params->pipe_dvs_6axis_config[i],
+ stream_params->pipe_dvs_6axis_config[i]);
+ } else {
+ params->pipe_dvs_6axis_config[i] =
+ generate_dvs_6axis_table_from_config(stream_params->pipe_dvs_6axis_config[i]);
+ }
+ }
+ }
+ ia_css_set_sdis_config(params, &stream_params->dvs_coefs);
+ params->dis_coef_table_changed = stream_params->dis_coef_table_changed;
+
+ ia_css_set_sdis2_config(params, &stream_params->dvs2_coefs);
+ params->dvs2_coef_table_changed = stream_params->dvs2_coef_table_changed;
+ params->sensor_binning = stream_params->sensor_binning;
+ }
+
+exit:
+ return retval;
+}
+
+int
+sh_css_params_init(void)
+{
+ int i, p;
+
+ IA_CSS_ENTER_PRIVATE("void");
+
+ /* TMP: tracking of paramsets */
+ g_param_buffer_dequeue_count = 0;
+ g_param_buffer_enqueue_count = 0;
+
+ for (p = 0; p < IA_CSS_PIPE_ID_NUM; p++) {
+ for (i = 0; i < SH_CSS_MAX_STAGES; i++) {
+ xmem_sp_stage_ptrs[p][i] =
+ ia_css_refcount_increment(-1,
+ hmm_alloc(sizeof(struct sh_css_sp_stage)));
+ xmem_isp_stage_ptrs[p][i] =
+ ia_css_refcount_increment(-1,
+ hmm_alloc(sizeof(struct sh_css_sp_stage)));
+
+ if ((xmem_sp_stage_ptrs[p][i] == mmgr_NULL) ||
+ (xmem_isp_stage_ptrs[p][i] == mmgr_NULL)) {
+ sh_css_params_uninit();
+ IA_CSS_LEAVE_ERR_PRIVATE(-ENOMEM);
+ return -ENOMEM;
+ }
+
+ hmm_set(xmem_sp_stage_ptrs[p][i], 0, sizeof(struct sh_css_sp_stage));
+ hmm_set(xmem_isp_stage_ptrs[p][i], 0, sizeof(struct sh_css_sp_stage));
+ }
+ }
+
+ ia_css_config_gamma_table();
+ ia_css_config_ctc_table();
+ ia_css_config_rgb_gamma_tables();
+ ia_css_config_xnr_table();
+
+ sp_ddr_ptrs = ia_css_refcount_increment(-1,
+ hmm_alloc(CEIL_MUL(sizeof(struct sh_css_ddr_address_map),
+ HIVE_ISP_DDR_WORD_BYTES)));
+ xmem_sp_group_ptrs = ia_css_refcount_increment(-1,
+ hmm_alloc(sizeof(struct sh_css_sp_group)));
+
+ if ((sp_ddr_ptrs == mmgr_NULL) ||
+ (xmem_sp_group_ptrs == mmgr_NULL)) {
+ ia_css_uninit();
+ IA_CSS_LEAVE_ERR_PRIVATE(-ENOMEM);
+ return -ENOMEM;
+ }
+ hmm_set(sp_ddr_ptrs, 0, CEIL_MUL(sizeof(struct sh_css_ddr_address_map),
+ HIVE_ISP_DDR_WORD_BYTES));
+ hmm_set(xmem_sp_group_ptrs, 0, sizeof(struct sh_css_sp_group));
+ IA_CSS_LEAVE_ERR_PRIVATE(0);
+ return 0;
+}
+
+static void host_lut_store(const void *lut)
+{
+ unsigned int i;
+
+ for (i = 0; i < N_GDC_ID; i++)
+ gdc_lut_store((gdc_ID_t)i, (const int (*)[HRT_GDC_N]) lut);
+}
+
+int ia_css_pipe_set_bci_scaler_lut(struct ia_css_pipe *pipe,
+ const void *lut)
+{
+ int err = 0;
+ bool stream_started = false;
+
+ IA_CSS_ENTER("pipe=%p lut=%p", pipe, lut);
+
+ if (!lut || !pipe) {
+ err = -EINVAL;
+ IA_CSS_LEAVE("err=%d", err);
+ return err;
+ }
+
+ /* If the pipe belongs to a stream and the stream has started, it is not
+ * safe to store lut to gdc HW. If pipe->stream is NULL, then no stream is
+ * created with this pipe, so it is safe to do this operation as long as
+ * ia_css_init() has been called. */
+ if (pipe->stream && pipe->stream->started) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_ERROR,
+ "unable to set scaler lut since stream has started\n");
+ stream_started = true;
+ err = -ENOTSUPP;
+ }
+
+ /* Free any existing tables. */
+ if (pipe->scaler_pp_lut != mmgr_NULL) {
+ hmm_free(pipe->scaler_pp_lut);
+ pipe->scaler_pp_lut = mmgr_NULL;
+ }
+
+ if (!stream_started) {
+ pipe->scaler_pp_lut = hmm_alloc(sizeof(zoom_table));
+
+ if (pipe->scaler_pp_lut == mmgr_NULL) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_ERROR,
+ "unable to allocate scaler_pp_lut\n");
+ err = -ENOMEM;
+ } else {
+ gdc_lut_convert_to_isp_format((const int(*)[HRT_GDC_N])lut,
+ interleaved_lut_temp);
+ hmm_store(pipe->scaler_pp_lut,
+ (int *)interleaved_lut_temp,
+ sizeof(zoom_table));
+ }
+ }
+
+ IA_CSS_LEAVE("lut(%u) err=%d", pipe->scaler_pp_lut, err);
+ return err;
+}
+
+/* if pipe is NULL, returns default lut addr. */
+ia_css_ptr sh_css_pipe_get_pp_gdc_lut(const struct ia_css_pipe *pipe)
+{
+ assert(pipe);
+
+ if (pipe->scaler_pp_lut != mmgr_NULL)
+ return pipe->scaler_pp_lut;
+ else
+ return sh_css_params_get_default_gdc_lut();
+}
+
+int sh_css_params_map_and_store_default_gdc_lut(void)
+{
+ int err = 0;
+
+ IA_CSS_ENTER_PRIVATE("void");
+
+ /* Is table already mapped? Nothing to do if it is mapped. */
+ if (default_gdc_lut != mmgr_NULL)
+ return err;
+
+ host_lut_store((void *)zoom_table);
+
+ default_gdc_lut = hmm_alloc(sizeof(zoom_table));
+
+ if (default_gdc_lut == mmgr_NULL)
+ return -ENOMEM;
+
+ gdc_lut_convert_to_isp_format((const int(*)[HRT_GDC_N])zoom_table,
+ interleaved_lut_temp);
+ hmm_store(default_gdc_lut, (int *)interleaved_lut_temp,
+ sizeof(zoom_table));
+
+ IA_CSS_LEAVE_PRIVATE("lut(%u) err=%d", default_gdc_lut, err);
+ return err;
+}
+
+void sh_css_params_free_default_gdc_lut(void)
+{
+ IA_CSS_ENTER_PRIVATE("void");
+
+ if (default_gdc_lut != mmgr_NULL) {
+ hmm_free(default_gdc_lut);
+ default_gdc_lut = mmgr_NULL;
+ }
+
+ IA_CSS_LEAVE_PRIVATE("void");
+}
+
+ia_css_ptr sh_css_params_get_default_gdc_lut(void)
+{
+ return default_gdc_lut;
+}
+
+static void free_param_set_callback(
+ ia_css_ptr ptr)
+{
+ IA_CSS_ENTER_PRIVATE("void");
+
+ free_ia_css_isp_parameter_set_info(ptr);
+
+ IA_CSS_LEAVE_PRIVATE("void");
+}
+
+static void free_buffer_callback(
+ ia_css_ptr ptr)
+{
+ IA_CSS_ENTER_PRIVATE("void");
+
+ hmm_free(ptr);
+
+ IA_CSS_LEAVE_PRIVATE("void");
+}
+
+void
+sh_css_param_clear_param_sets(void)
+{
+ IA_CSS_ENTER_PRIVATE("void");
+
+ ia_css_refcount_clear(IA_CSS_REFCOUNT_PARAM_SET_POOL, &free_param_set_callback);
+
+ IA_CSS_LEAVE_PRIVATE("void");
+}
+
+/*
+ * MW: we can define hmm_free() to return a NULL
+ * then you can write ptr = hmm_free(ptr);
+ */
+#define safe_free(id, x) \
+ do { \
+ ia_css_refcount_decrement(id, x); \
+ (x) = mmgr_NULL; \
+ } while (0)
+
+static void free_map(struct sh_css_ddr_address_map *map)
+{
+ unsigned int i;
+
+ ia_css_ptr *addrs = (ia_css_ptr *)map;
+
+ IA_CSS_ENTER_PRIVATE("void");
+
+ /* free buffers */
+ for (i = 0; i < (sizeof(struct sh_css_ddr_address_map_size) /
+ sizeof(size_t)); i++) {
+ if (addrs[i] == mmgr_NULL)
+ continue;
+ safe_free(IA_CSS_REFCOUNT_PARAM_BUFFER, addrs[i]);
+ }
+
+ IA_CSS_LEAVE_PRIVATE("void");
+}
+
+void
+ia_css_stream_isp_parameters_uninit(struct ia_css_stream *stream)
+{
+ int i;
+ struct ia_css_isp_parameters *params = stream->isp_params_configs;
+ struct ia_css_isp_parameters *per_frame_params =
+ stream->per_frame_isp_params_configs;
+
+ IA_CSS_ENTER_PRIVATE("void");
+ if (!params) {
+ IA_CSS_LEAVE_PRIVATE("isp_param_configs is NULL");
+ return;
+ }
+
+ /* free existing ddr_ptr maps */
+ for (i = 0; i < IA_CSS_PIPE_ID_NUM; i++) {
+ free_map(&params->pipe_ddr_ptrs[i]);
+ if (per_frame_params)
+ free_map(&per_frame_params->pipe_ddr_ptrs[i]);
+ /* Free up theDVS table memory blocks before recomputing new table */
+ if (params->pipe_dvs_6axis_config[i])
+ free_dvs_6axis_table(&params->pipe_dvs_6axis_config[i]);
+ if (per_frame_params && per_frame_params->pipe_dvs_6axis_config[i])
+ free_dvs_6axis_table(&per_frame_params->pipe_dvs_6axis_config[i]);
+ }
+ free_map(&params->ddr_ptrs);
+ if (per_frame_params)
+ free_map(&per_frame_params->ddr_ptrs);
+
+ if (params->fpn_config.data) {
+ kvfree(params->fpn_config.data);
+ params->fpn_config.data = NULL;
+ }
+
+ /* Free up sc_config (temporal shading table) if it is allocated. */
+ if (params->sc_config) {
+ ia_css_shading_table_free(params->sc_config);
+ params->sc_config = NULL;
+ }
+ if (per_frame_params) {
+ if (per_frame_params->sc_config) {
+ ia_css_shading_table_free(per_frame_params->sc_config);
+ per_frame_params->sc_config = NULL;
+ }
+ }
+
+ kvfree(params);
+ kvfree(per_frame_params);
+ stream->isp_params_configs = NULL;
+ stream->per_frame_isp_params_configs = NULL;
+
+ IA_CSS_LEAVE_PRIVATE("void");
+}
+
+void
+sh_css_params_uninit(void)
+{
+ unsigned int p, i;
+
+ IA_CSS_ENTER_PRIVATE("void");
+
+ ia_css_refcount_decrement(-1, sp_ddr_ptrs);
+ sp_ddr_ptrs = mmgr_NULL;
+ ia_css_refcount_decrement(-1, xmem_sp_group_ptrs);
+ xmem_sp_group_ptrs = mmgr_NULL;
+
+ for (p = 0; p < IA_CSS_PIPE_ID_NUM; p++)
+ for (i = 0; i < SH_CSS_MAX_STAGES; i++) {
+ ia_css_refcount_decrement(-1, xmem_sp_stage_ptrs[p][i]);
+ xmem_sp_stage_ptrs[p][i] = mmgr_NULL;
+ ia_css_refcount_decrement(-1, xmem_isp_stage_ptrs[p][i]);
+ xmem_isp_stage_ptrs[p][i] = mmgr_NULL;
+ }
+
+ /* go through the pools to clear references */
+ ia_css_refcount_clear(IA_CSS_REFCOUNT_PARAM_SET_POOL, &free_param_set_callback);
+ ia_css_refcount_clear(IA_CSS_REFCOUNT_PARAM_BUFFER, &free_buffer_callback);
+ ia_css_refcount_clear(-1, &free_buffer_callback);
+
+ IA_CSS_LEAVE_PRIVATE("void");
+}
+
+static struct ia_css_host_data *
+convert_allocate_morph_plane(
+ unsigned short *data,
+ unsigned int width,
+ unsigned int height,
+ unsigned int aligned_width)
+{
+ unsigned int i, j, padding, w;
+ struct ia_css_host_data *me;
+ unsigned int isp_data_size;
+ u16 *isp_data_ptr;
+
+ IA_CSS_ENTER_PRIVATE("void");
+
+ /* currently we don't have morph table interpolation yet,
+ * so we allow a wider table to be used. This will be removed
+ * in the future. */
+ if (width > aligned_width) {
+ padding = 0;
+ w = aligned_width;
+ } else {
+ padding = aligned_width - width;
+ w = width;
+ }
+ isp_data_size = height * (w + padding) * sizeof(uint16_t);
+
+ me = ia_css_host_data_allocate((size_t)isp_data_size);
+
+ if (!me) {
+ IA_CSS_LEAVE_ERR_PRIVATE(-ENOMEM);
+ return NULL;
+ }
+
+ isp_data_ptr = (uint16_t *)me->address;
+
+ memset(isp_data_ptr, 0, (size_t)isp_data_size);
+
+ for (i = 0; i < height; i++) {
+ for (j = 0; j < w; j++)
+ *isp_data_ptr++ = (uint16_t)data[j];
+ isp_data_ptr += padding;
+ data += width;
+ }
+
+ IA_CSS_LEAVE_PRIVATE("void");
+ return me;
+}
+
+static int
+store_morph_plane(
+ unsigned short *data,
+ unsigned int width,
+ unsigned int height,
+ ia_css_ptr dest,
+ unsigned int aligned_width)
+{
+ struct ia_css_host_data *isp_data;
+
+ assert(dest != mmgr_NULL);
+
+ isp_data = convert_allocate_morph_plane(data, width, height, aligned_width);
+ if (!isp_data) {
+ IA_CSS_LEAVE_ERR_PRIVATE(-ENOMEM);
+ return -ENOMEM;
+ }
+ ia_css_params_store_ia_css_host_data(dest, isp_data);
+
+ ia_css_host_data_free(isp_data);
+ return 0;
+}
+
+static void sh_css_update_isp_params_to_ddr(
+ struct ia_css_isp_parameters *params,
+ ia_css_ptr ddr_ptr)
+{
+ size_t size = sizeof(params->uds);
+
+ IA_CSS_ENTER_PRIVATE("void");
+
+ assert(params);
+
+ hmm_store(ddr_ptr, &params->uds, size);
+ IA_CSS_LEAVE_PRIVATE("void");
+}
+
+static void sh_css_update_isp_mem_params_to_ddr(
+ const struct ia_css_binary *binary,
+ ia_css_ptr ddr_mem_ptr,
+ size_t size,
+ enum ia_css_isp_memories mem)
+{
+ const struct ia_css_host_data *params;
+
+ IA_CSS_ENTER_PRIVATE("void");
+
+ params = ia_css_isp_param_get_mem_init(&binary->mem_params,
+ IA_CSS_PARAM_CLASS_PARAM, mem);
+ hmm_store(ddr_mem_ptr, params->address, size);
+
+ IA_CSS_LEAVE_PRIVATE("void");
+}
+
+void ia_css_dequeue_param_buffers(/*unsigned int pipe_num*/ void)
+{
+ unsigned int i;
+ ia_css_ptr cpy;
+ enum sh_css_queue_id param_queue_ids[3] = { IA_CSS_PARAMETER_SET_QUEUE_ID,
+ IA_CSS_PER_FRAME_PARAMETER_SET_QUEUE_ID,
+ SH_CSS_INVALID_QUEUE_ID
+ };
+
+ IA_CSS_ENTER_PRIVATE("void");
+
+ if (!sh_css_sp_is_running()) {
+ IA_CSS_LEAVE_PRIVATE("sp is not running");
+ /* SP is not running. The queues are not valid */
+ return;
+ }
+
+ for (i = 0; SH_CSS_INVALID_QUEUE_ID != param_queue_ids[i]; i++) {
+ cpy = (ia_css_ptr)0;
+ /* clean-up old copy */
+ while (ia_css_bufq_dequeue_buffer(param_queue_ids[i],
+ (uint32_t *)&cpy) == 0) {
+ /* TMP: keep track of dequeued param set count
+ */
+ g_param_buffer_dequeue_count++;
+ ia_css_bufq_enqueue_psys_event(
+ IA_CSS_PSYS_SW_EVENT_BUFFER_DEQUEUED,
+ 0,
+ param_queue_ids[i],
+ 0);
+
+ IA_CSS_LOG("dequeued param set %x from %d, release ref", cpy, 0);
+ free_ia_css_isp_parameter_set_info(cpy);
+ cpy = (ia_css_ptr)0;
+ }
+ }
+
+ IA_CSS_LEAVE_PRIVATE("void");
+}
+
+static void
+process_kernel_parameters(unsigned int pipe_id,
+ struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params,
+ unsigned int isp_pipe_version,
+ unsigned int raw_bit_depth)
+{
+ unsigned int param_id;
+
+ (void)isp_pipe_version;
+ (void)raw_bit_depth;
+
+ sh_css_enable_pipeline(stage->binary);
+
+ if (params->config_changed[IA_CSS_OB_ID]) {
+ ia_css_ob_configure(&params->stream_configs.ob,
+ isp_pipe_version, raw_bit_depth);
+ }
+ if (params->config_changed[IA_CSS_S3A_ID]) {
+ ia_css_s3a_configure(raw_bit_depth);
+ }
+ /* Copy stage uds parameters to config, since they can differ per stage.
+ */
+ params->crop_config.crop_pos = params->uds[stage->stage_num].crop_pos;
+ params->uds_config.crop_pos = params->uds[stage->stage_num].crop_pos;
+ params->uds_config.uds = params->uds[stage->stage_num].uds;
+ /* Call parameter process functions for all kernels */
+ /* Skip SC, since that is called on a temp sc table */
+ for (param_id = 0; param_id < IA_CSS_NUM_PARAMETER_IDS; param_id++) {
+ if (param_id == IA_CSS_SC_ID) continue;
+ if (params->config_changed[param_id])
+ ia_css_kernel_process_param[param_id](pipe_id, stage, params);
+ }
+}
+
+int
+sh_css_param_update_isp_params(struct ia_css_pipe *curr_pipe,
+ struct ia_css_isp_parameters *params,
+ bool commit,
+ struct ia_css_pipe *pipe_in)
+{
+ int err = 0;
+ ia_css_ptr cpy;
+ int i;
+ unsigned int raw_bit_depth = 10;
+ unsigned int isp_pipe_version = SH_CSS_ISP_PIPE_VERSION_1;
+ bool acc_cluster_params_changed = false;
+ unsigned int thread_id, pipe_num;
+
+ (void)acc_cluster_params_changed;
+
+ assert(curr_pipe);
+
+ IA_CSS_ENTER_PRIVATE("pipe=%p, isp_parameters_id=%d", pipe_in, params->isp_parameters_id);
+ raw_bit_depth = ia_css_stream_input_format_bits_per_pixel(curr_pipe->stream);
+
+ /* now make the map available to the sp */
+ if (!commit) {
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+ }
+ /* enqueue a copies of the mem_map to
+ the designated pipelines */
+ for (i = 0; i < curr_pipe->stream->num_pipes; i++) {
+ struct ia_css_pipe *pipe;
+ struct sh_css_ddr_address_map *cur_map;
+ struct sh_css_ddr_address_map_size *cur_map_size;
+ struct ia_css_isp_parameter_set_info isp_params_info;
+ struct ia_css_pipeline *pipeline;
+ struct ia_css_pipeline_stage *stage;
+
+ enum sh_css_queue_id queue_id;
+
+ pipe = curr_pipe->stream->pipes[i];
+ pipeline = ia_css_pipe_get_pipeline(pipe);
+ pipe_num = ia_css_pipe_get_pipe_num(pipe);
+ isp_pipe_version = ia_css_pipe_get_isp_pipe_version(pipe);
+ ia_css_pipeline_get_sp_thread_id(pipe_num, &thread_id);
+
+ ia_css_query_internal_queue_id(params->output_frame
+ ? IA_CSS_BUFFER_TYPE_PER_FRAME_PARAMETER_SET
+ : IA_CSS_BUFFER_TYPE_PARAMETER_SET,
+ thread_id, &queue_id);
+ if (!sh_css_sp_is_running()) {
+ /* SP is not running. The queues are not valid */
+ err = -EBUSY;
+ break;
+ }
+ cur_map = &params->pipe_ddr_ptrs[pipeline->pipe_id];
+ cur_map_size = &params->pipe_ddr_ptrs_size[pipeline->pipe_id];
+
+ /* TODO: Normally, zoom and motion parameters shouldn't
+ * be part of "isp_params" as it is resolution/pipe dependent
+ * Therefore, move the zoom config elsewhere (e.g. shading
+ * table can be taken as an example! @GC
+ * */
+ {
+ /* we have to do this per pipeline because */
+ /* the processing is a.o. resolution dependent */
+ err = ia_css_process_zoom_and_motion(params,
+ pipeline->stages);
+ if (err)
+ return err;
+ }
+ /* check if to actually update the parameters for this pipe */
+ /* When API change is implemented making good distinction between
+ * stream config and pipe config this skipping code can be moved out of the #ifdef */
+ if (pipe_in && (pipe != pipe_in)) {
+ IA_CSS_LOG("skipping pipe %p", pipe);
+ continue;
+ }
+
+ /* BZ 125915, should be moved till after "update other buff" */
+ /* update the other buffers to the pipe specific copies */
+ for (stage = pipeline->stages; stage; stage = stage->next) {
+ unsigned int mem;
+
+ if (!stage || !stage->binary)
+ continue;
+
+ process_kernel_parameters(pipeline->pipe_id,
+ stage, params,
+ isp_pipe_version, raw_bit_depth);
+
+ err = sh_css_params_write_to_ddr_internal(
+ pipe,
+ pipeline->pipe_id,
+ params,
+ stage,
+ cur_map,
+ cur_map_size);
+
+ if (err)
+ break;
+ for (mem = 0; mem < IA_CSS_NUM_MEMORIES; mem++) {
+ params->isp_mem_params_changed
+ [pipeline->pipe_id][stage->stage_num][mem] = false;
+ }
+ } /* for */
+ if (err)
+ break;
+ /* update isp_params to pipe specific copies */
+ if (params->isp_params_changed) {
+ reallocate_buffer(&cur_map->isp_param,
+ &cur_map_size->isp_param,
+ cur_map_size->isp_param,
+ true,
+ &err);
+ if (err)
+ break;
+ sh_css_update_isp_params_to_ddr(params, cur_map->isp_param);
+ }
+
+ /* last make referenced copy */
+ err = ref_sh_css_ddr_address_map(
+ cur_map,
+ &isp_params_info.mem_map);
+ if (err)
+ break;
+
+ /* Update Parameters ID */
+ isp_params_info.isp_parameters_id = params->isp_parameters_id;
+
+ /* Update output frame pointer */
+ isp_params_info.output_frame_ptr =
+ (params->output_frame) ? params->output_frame->data : mmgr_NULL;
+
+ /* now write the copy to ddr */
+ err = write_ia_css_isp_parameter_set_info_to_ddr(&isp_params_info, &cpy);
+ if (err)
+ break;
+
+ /* enqueue the set to sp */
+ IA_CSS_LOG("queue param set %x to %d", cpy, thread_id);
+
+ err = ia_css_bufq_enqueue_buffer(thread_id, queue_id, (uint32_t)cpy);
+ if (err) {
+ free_ia_css_isp_parameter_set_info(cpy);
+ IA_CSS_LOG("pfp: FAILED to add config id %d for OF %d to q %d on thread %d",
+ isp_params_info.isp_parameters_id,
+ isp_params_info.output_frame_ptr,
+ queue_id, thread_id);
+ break;
+ } else {
+ /* TMP: check discrepancy between nr of enqueued
+ * parameter sets and dequeued sets
+ */
+ g_param_buffer_enqueue_count++;
+ assert(g_param_buffer_enqueue_count < g_param_buffer_dequeue_count + 50);
+ /*
+ * Tell the SP which queues are not empty,
+ * by sending the software event.
+ */
+ if (!sh_css_sp_is_running()) {
+ /* SP is not running. The queues are not valid */
+ IA_CSS_LEAVE_ERR_PRIVATE(-EBUSY);
+ return -EBUSY;
+ }
+ ia_css_bufq_enqueue_psys_event(
+ IA_CSS_PSYS_SW_EVENT_BUFFER_ENQUEUED,
+ (uint8_t)thread_id,
+ (uint8_t)queue_id,
+ 0);
+ IA_CSS_LOG("pfp: added config id %d for OF %d to q %d on thread %d",
+ isp_params_info.isp_parameters_id,
+ isp_params_info.output_frame_ptr,
+ queue_id, thread_id);
+ }
+ /* clean-up old copy */
+ ia_css_dequeue_param_buffers(/*pipe_num*/);
+ params->pipe_dvs_6axis_config_changed[pipeline->pipe_id] = false;
+ } /* end for each 'active' pipeline */
+ /* clear the changed flags after all params
+ for all pipelines have been updated */
+ params->isp_params_changed = false;
+ params->sc_table_changed = false;
+ params->dis_coef_table_changed = false;
+ params->dvs2_coef_table_changed = false;
+ params->morph_table_changed = false;
+ params->dz_config_changed = false;
+ params->motion_config_changed = false;
+ /* ------ deprecated(bz675) : from ------ */
+ params->shading_settings_changed = false;
+ /* ------ deprecated(bz675) : to ------ */
+
+ memset(&params->config_changed[0], 0, sizeof(params->config_changed));
+
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+}
+
+static int
+sh_css_params_write_to_ddr_internal(
+ struct ia_css_pipe *pipe,
+ unsigned int pipe_id,
+ struct ia_css_isp_parameters *params,
+ const struct ia_css_pipeline_stage *stage,
+ struct sh_css_ddr_address_map *ddr_map,
+ struct sh_css_ddr_address_map_size *ddr_map_size)
+{
+ int err;
+ const struct ia_css_binary *binary;
+
+ unsigned int stage_num;
+ unsigned int mem;
+ bool buff_realloced;
+
+ /* struct is > 128 bytes so it should not be on stack (see checkpatch) */
+ static struct ia_css_macc_table converted_macc_table;
+
+ IA_CSS_ENTER_PRIVATE("void");
+ assert(params);
+ assert(ddr_map);
+ assert(ddr_map_size);
+ assert(stage);
+
+ binary = stage->binary;
+ assert(binary);
+
+ stage_num = stage->stage_num;
+
+ if (binary->info->sp.enable.fpnr) {
+ buff_realloced = reallocate_buffer(&ddr_map->fpn_tbl,
+ &ddr_map_size->fpn_tbl,
+ fpntbl_bytes(binary),
+ params->config_changed[IA_CSS_FPN_ID],
+ &err);
+ if (err) {
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+ }
+ if (params->config_changed[IA_CSS_FPN_ID] || buff_realloced) {
+ if (params->fpn_config.enabled) {
+ err = store_fpntbl(params, ddr_map->fpn_tbl);
+ if (err) {
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+ }
+ }
+ }
+ }
+
+ if (binary->info->sp.enable.sc) {
+ u32 enable_conv;
+
+ enable_conv = params->shading_settings.enable_shading_table_conversion;
+
+ buff_realloced = reallocate_buffer(&ddr_map->sc_tbl,
+ &ddr_map_size->sc_tbl,
+ sctbl_bytes(binary),
+ params->sc_table_changed,
+ &err);
+ if (err) {
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+ }
+
+ if (params->shading_settings_changed ||
+ params->sc_table_changed || buff_realloced) {
+ if (enable_conv == 0) {
+ if (params->sc_table) {
+ /* store the shading table to ddr */
+ err = ia_css_params_store_sctbl(stage, ddr_map->sc_tbl, params->sc_table);
+ if (err) {
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+ }
+ /* set sc_config to isp */
+ params->sc_config = (struct ia_css_shading_table *)params->sc_table;
+ ia_css_kernel_process_param[IA_CSS_SC_ID](pipe_id, stage, params);
+ params->sc_config = NULL;
+ } else {
+ /* generate the identical shading table */
+ if (params->sc_config) {
+ ia_css_shading_table_free(params->sc_config);
+ params->sc_config = NULL;
+ }
+ sh_css_params_shading_id_table_generate(&params->sc_config,
+ binary->sctbl_width_per_color,
+ binary->sctbl_height);
+ if (!params->sc_config) {
+ IA_CSS_LEAVE_ERR_PRIVATE(-ENOMEM);
+ return -ENOMEM;
+ }
+
+ /* store the shading table to ddr */
+ err = ia_css_params_store_sctbl(stage, ddr_map->sc_tbl, params->sc_config);
+ if (err) {
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+ }
+
+ /* set sc_config to isp */
+ ia_css_kernel_process_param[IA_CSS_SC_ID](pipe_id, stage, params);
+
+ /* free the shading table */
+ ia_css_shading_table_free(params->sc_config);
+ params->sc_config = NULL;
+ }
+ } else { /* legacy */
+ /* ------ deprecated(bz675) : from ------ */
+ /* shading table is full resolution, reduce */
+ if (params->sc_config) {
+ ia_css_shading_table_free(params->sc_config);
+ params->sc_config = NULL;
+ }
+ prepare_shading_table(
+ (const struct ia_css_shading_table *)params->sc_table,
+ params->sensor_binning,
+ &params->sc_config,
+ binary, pipe->required_bds_factor);
+ if (!params->sc_config) {
+ IA_CSS_LEAVE_ERR_PRIVATE(-ENOMEM);
+ return -ENOMEM;
+ }
+
+ /* store the shading table to ddr */
+ err = ia_css_params_store_sctbl(stage, ddr_map->sc_tbl, params->sc_config);
+ if (err) {
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+ }
+
+ /* set sc_config to isp */
+ ia_css_kernel_process_param[IA_CSS_SC_ID](pipe_id, stage, params);
+
+ /* free the shading table */
+ ia_css_shading_table_free(params->sc_config);
+ params->sc_config = NULL;
+ /* ------ deprecated(bz675) : to ------ */
+ }
+ }
+ }
+
+ if (params->config_changed[IA_CSS_MACC_ID] && binary->info->sp.enable.macc) {
+ unsigned int i, j, idx;
+ static const unsigned int idx_map[] = {
+ 0, 1, 3, 2, 6, 7, 5, 4, 12, 13, 15, 14, 10, 11, 9, 8
+ };
+
+ for (i = 0; i < IA_CSS_MACC_NUM_AXES; i++) {
+ idx = 4 * idx_map[i];
+ j = 4 * i;
+
+ if (binary->info->sp.pipeline.isp_pipe_version == SH_CSS_ISP_PIPE_VERSION_1) {
+ converted_macc_table.data[idx] =
+ (int16_t)sDIGIT_FITTING(params->macc_table.data[j],
+ 13, SH_CSS_MACC_COEF_SHIFT);
+ converted_macc_table.data[idx + 1] =
+ (int16_t)sDIGIT_FITTING(params->macc_table.data[j + 1],
+ 13, SH_CSS_MACC_COEF_SHIFT);
+ converted_macc_table.data[idx + 2] =
+ (int16_t)sDIGIT_FITTING(params->macc_table.data[j + 2],
+ 13, SH_CSS_MACC_COEF_SHIFT);
+ converted_macc_table.data[idx + 3] =
+ (int16_t)sDIGIT_FITTING(params->macc_table.data[j + 3],
+ 13, SH_CSS_MACC_COEF_SHIFT);
+ } else if (binary->info->sp.pipeline.isp_pipe_version ==
+ SH_CSS_ISP_PIPE_VERSION_2_2) {
+ converted_macc_table.data[idx] =
+ params->macc_table.data[j];
+ converted_macc_table.data[idx + 1] =
+ params->macc_table.data[j + 1];
+ converted_macc_table.data[idx + 2] =
+ params->macc_table.data[j + 2];
+ converted_macc_table.data[idx + 3] =
+ params->macc_table.data[j + 3];
+ }
+ }
+ reallocate_buffer(&ddr_map->macc_tbl,
+ &ddr_map_size->macc_tbl,
+ ddr_map_size->macc_tbl,
+ true,
+ &err);
+ if (err) {
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+ }
+ hmm_store(ddr_map->macc_tbl,
+ converted_macc_table.data,
+ sizeof(converted_macc_table.data));
+ }
+
+ if (binary->info->sp.enable.dvs_6axis) {
+ /* because UV is packed into the Y plane, calc total
+ * YYU size = /2 gives size of UV-only,
+ * total YYU size = UV-only * 3.
+ */
+ buff_realloced = reallocate_buffer(
+ &ddr_map->dvs_6axis_params_y,
+ &ddr_map_size->dvs_6axis_params_y,
+ (size_t)((DVS_6AXIS_BYTES(binary) / 2) * 3),
+ params->pipe_dvs_6axis_config_changed[pipe_id],
+ &err);
+ if (err) {
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+ }
+
+ if (params->pipe_dvs_6axis_config_changed[pipe_id] || buff_realloced) {
+ const struct ia_css_frame_info *dvs_in_frame_info;
+
+ if (stage->args.delay_frames[0]) {
+ /*When delay frames are present(as in case of video),
+ they are used for dvs. Configure DVS using those params*/
+ dvs_in_frame_info = &stage->args.delay_frames[0]->frame_info;
+ } else {
+ /*Otherwise, use input frame to configure DVS*/
+ dvs_in_frame_info = &stage->args.in_frame->frame_info;
+ }
+
+ /* Generate default DVS unity table on start up*/
+ if (!params->pipe_dvs_6axis_config[pipe_id]) {
+ struct ia_css_resolution dvs_offset = {0};
+
+ dvs_offset.width = (PIX_SHIFT_FILTER_RUN_IN_X + binary->dvs_envelope.width) / 2;
+ dvs_offset.height = (PIX_SHIFT_FILTER_RUN_IN_Y + binary->dvs_envelope.height) / 2;
+
+ params->pipe_dvs_6axis_config[pipe_id] =
+ generate_dvs_6axis_table(&binary->out_frame_info[0].res, &dvs_offset);
+ if (!params->pipe_dvs_6axis_config[pipe_id]) {
+ IA_CSS_LEAVE_ERR_PRIVATE(-ENOMEM);
+ return -ENOMEM;
+ }
+ params->pipe_dvs_6axis_config_changed[pipe_id] = true;
+
+ store_dvs_6axis_config(params->pipe_dvs_6axis_config[pipe_id],
+ binary,
+ dvs_in_frame_info,
+ ddr_map->dvs_6axis_params_y);
+ params->isp_params_changed = true;
+ }
+ }
+ }
+
+ if (binary->info->sp.enable.ca_gdc) {
+ unsigned int i;
+ ia_css_ptr *virt_addr_tetra_x[
+
+ IA_CSS_MORPH_TABLE_NUM_PLANES];
+ size_t *virt_size_tetra_x[
+
+ IA_CSS_MORPH_TABLE_NUM_PLANES];
+ ia_css_ptr *virt_addr_tetra_y[
+
+ IA_CSS_MORPH_TABLE_NUM_PLANES];
+ size_t *virt_size_tetra_y[
+
+ IA_CSS_MORPH_TABLE_NUM_PLANES];
+
+ virt_addr_tetra_x[0] = &ddr_map->tetra_r_x;
+ virt_addr_tetra_x[1] = &ddr_map->tetra_gr_x;
+ virt_addr_tetra_x[2] = &ddr_map->tetra_gb_x;
+ virt_addr_tetra_x[3] = &ddr_map->tetra_b_x;
+ virt_addr_tetra_x[4] = &ddr_map->tetra_ratb_x;
+ virt_addr_tetra_x[5] = &ddr_map->tetra_batr_x;
+
+ virt_size_tetra_x[0] = &ddr_map_size->tetra_r_x;
+ virt_size_tetra_x[1] = &ddr_map_size->tetra_gr_x;
+ virt_size_tetra_x[2] = &ddr_map_size->tetra_gb_x;
+ virt_size_tetra_x[3] = &ddr_map_size->tetra_b_x;
+ virt_size_tetra_x[4] = &ddr_map_size->tetra_ratb_x;
+ virt_size_tetra_x[5] = &ddr_map_size->tetra_batr_x;
+
+ virt_addr_tetra_y[0] = &ddr_map->tetra_r_y;
+ virt_addr_tetra_y[1] = &ddr_map->tetra_gr_y;
+ virt_addr_tetra_y[2] = &ddr_map->tetra_gb_y;
+ virt_addr_tetra_y[3] = &ddr_map->tetra_b_y;
+ virt_addr_tetra_y[4] = &ddr_map->tetra_ratb_y;
+ virt_addr_tetra_y[5] = &ddr_map->tetra_batr_y;
+
+ virt_size_tetra_y[0] = &ddr_map_size->tetra_r_y;
+ virt_size_tetra_y[1] = &ddr_map_size->tetra_gr_y;
+ virt_size_tetra_y[2] = &ddr_map_size->tetra_gb_y;
+ virt_size_tetra_y[3] = &ddr_map_size->tetra_b_y;
+ virt_size_tetra_y[4] = &ddr_map_size->tetra_ratb_y;
+ virt_size_tetra_y[5] = &ddr_map_size->tetra_batr_y;
+
+ buff_realloced = false;
+ for (i = 0; i < IA_CSS_MORPH_TABLE_NUM_PLANES; i++) {
+ buff_realloced |=
+ reallocate_buffer(virt_addr_tetra_x[i],
+ virt_size_tetra_x[i],
+ morph_plane_bytes(binary),
+ params->morph_table_changed,
+ &err);
+ if (err) {
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+ }
+ buff_realloced |=
+ reallocate_buffer(virt_addr_tetra_y[i],
+ virt_size_tetra_y[i],
+ morph_plane_bytes(binary),
+ params->morph_table_changed,
+ &err);
+ if (err) {
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+ }
+ }
+ if (params->morph_table_changed || buff_realloced) {
+ const struct ia_css_morph_table *table = params->morph_table;
+ struct ia_css_morph_table *id_table = NULL;
+
+ if ((table) &&
+ (table->width < binary->morph_tbl_width ||
+ table->height < binary->morph_tbl_height)) {
+ table = NULL;
+ }
+ if (!table) {
+ err = sh_css_params_default_morph_table(&id_table,
+ binary);
+ if (err) {
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+ }
+ table = id_table;
+ }
+
+ for (i = 0; i < IA_CSS_MORPH_TABLE_NUM_PLANES; i++) {
+ store_morph_plane(table->coordinates_x[i],
+ table->width,
+ table->height,
+ *virt_addr_tetra_x[i],
+ binary->morph_tbl_aligned_width);
+ store_morph_plane(table->coordinates_y[i],
+ table->width,
+ table->height,
+ *virt_addr_tetra_y[i],
+ binary->morph_tbl_aligned_width);
+ }
+ if (id_table)
+ ia_css_morph_table_free(id_table);
+ }
+ }
+
+ /* After special cases like SC, FPN since they may change parameters */
+ for (mem = 0; mem < N_IA_CSS_MEMORIES; mem++) {
+ const struct ia_css_isp_data *isp_data =
+ ia_css_isp_param_get_isp_mem_init(&binary->info->sp.mem_initializers,
+ IA_CSS_PARAM_CLASS_PARAM, mem);
+ size_t size = isp_data->size;
+
+ if (!size) continue;
+ buff_realloced = reallocate_buffer(&ddr_map->isp_mem_param[stage_num][mem],
+ &ddr_map_size->isp_mem_param[stage_num][mem],
+ size,
+ params->isp_mem_params_changed[pipe_id][stage_num][mem],
+ &err);
+ if (err) {
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+ }
+ if (params->isp_mem_params_changed[pipe_id][stage_num][mem] || buff_realloced) {
+ sh_css_update_isp_mem_params_to_ddr(binary,
+ ddr_map->isp_mem_param[stage_num][mem],
+ ddr_map_size->isp_mem_param[stage_num][mem], mem);
+ }
+ }
+
+ IA_CSS_LEAVE_ERR_PRIVATE(0);
+ return 0;
+}
+
+const struct ia_css_fpn_table *ia_css_get_fpn_table(struct ia_css_stream
+ *stream)
+{
+ struct ia_css_isp_parameters *params;
+
+ IA_CSS_ENTER_LEAVE("void");
+ assert(stream);
+
+ params = stream->isp_params_configs;
+
+ return &params->fpn_config;
+}
+
+struct ia_css_shading_table *ia_css_get_shading_table(struct ia_css_stream
+ *stream)
+{
+ struct ia_css_shading_table *table = NULL;
+ struct ia_css_isp_parameters *params;
+
+ IA_CSS_ENTER("void");
+
+ assert(stream);
+
+ params = stream->isp_params_configs;
+ if (!params)
+ return NULL;
+
+ if (params->shading_settings.enable_shading_table_conversion == 0) {
+ if (params->sc_table) {
+ table = (struct ia_css_shading_table *)params->sc_table;
+ } else {
+ const struct ia_css_binary *binary
+ = ia_css_stream_get_shading_correction_binary(stream);
+ if (binary) {
+ /* generate the identical shading table */
+ if (params->sc_config) {
+ ia_css_shading_table_free(params->sc_config);
+ params->sc_config = NULL;
+ }
+ sh_css_params_shading_id_table_generate(&params->sc_config,
+ binary->sctbl_width_per_color,
+ binary->sctbl_height);
+ table = params->sc_config;
+ /* The sc_config will be freed in the
+ * ia_css_stream_isp_parameters_uninit function. */
+ }
+ }
+ } else {
+ /* ------ deprecated(bz675) : from ------ */
+ const struct ia_css_binary *binary
+ = ia_css_stream_get_shading_correction_binary(stream);
+ struct ia_css_pipe *pipe;
+
+ /**********************************************************************/
+ /* following code is copied from function ia_css_stream_get_shading_correction_binary()
+ * to match with the binary */
+ pipe = stream->pipes[0];
+
+ if (stream->num_pipes == 2) {
+ assert(stream->pipes[1]);
+ if (stream->pipes[1]->config.mode == IA_CSS_PIPE_MODE_VIDEO ||
+ stream->pipes[1]->config.mode == IA_CSS_PIPE_MODE_PREVIEW)
+ pipe = stream->pipes[1];
+ }
+ /**********************************************************************/
+ if (binary) {
+ if (params->sc_config) {
+ ia_css_shading_table_free(params->sc_config);
+ params->sc_config = NULL;
+ }
+ prepare_shading_table(
+ (const struct ia_css_shading_table *)params->sc_table,
+ params->sensor_binning,
+ &params->sc_config,
+ binary, pipe->required_bds_factor);
+
+ table = params->sc_config;
+ /* The sc_config will be freed in the
+ * ia_css_stream_isp_parameters_uninit function. */
+ }
+ /* ------ deprecated(bz675) : to ------ */
+ }
+
+ IA_CSS_LEAVE("table=%p", table);
+
+ return table;
+}
+
+ia_css_ptr sh_css_store_sp_group_to_ddr(void)
+{
+ u8 *write_buf;
+ u8 *buf_ptr;
+
+ IA_CSS_ENTER_LEAVE_PRIVATE("void");
+
+ write_buf = kzalloc(sizeof(u8) * 8192, GFP_KERNEL);
+ if (!write_buf)
+ return 0;
+
+ buf_ptr = write_buf;
+ if (IS_ISP2401) {
+ memcpy(buf_ptr, &sh_css_sp_group.config, 3);
+ buf_ptr += 3;
+ *buf_ptr++ = sh_css_sp_group.config.enable_isys_event_queue;
+ *buf_ptr++ = sh_css_sp_group.config.disable_cont_vf;
+ memset(buf_ptr, 0, 3);
+ buf_ptr += 3; /* Padding 3 bytes for struct sh_css_sp_config*/
+ } else {
+ memcpy(buf_ptr, &sh_css_sp_group.config, sizeof(sh_css_sp_group.config));
+ buf_ptr += sizeof(sh_css_sp_group.config);
+ }
+
+ memcpy(buf_ptr, &sh_css_sp_group.pipe, sizeof(sh_css_sp_group.pipe));
+ buf_ptr += sizeof(sh_css_sp_group.pipe);
+
+ if (IS_ISP2401) {
+ memcpy(buf_ptr, &sh_css_sp_group.pipe_io, sizeof(sh_css_sp_group.pipe_io));
+ buf_ptr += sizeof(sh_css_sp_group.pipe_io);
+ memcpy(buf_ptr, &sh_css_sp_group.pipe_io_status,
+ sizeof(sh_css_sp_group.pipe_io_status));
+ buf_ptr += sizeof(sh_css_sp_group.pipe_io_status);
+ }
+
+ memcpy(buf_ptr, &sh_css_sp_group.debug, sizeof(sh_css_sp_group.debug));
+ buf_ptr += sizeof(sh_css_sp_group.debug);
+
+ hmm_store(xmem_sp_group_ptrs,
+ write_buf,
+ buf_ptr - write_buf);
+
+ kfree(write_buf);
+ return xmem_sp_group_ptrs;
+}
+
+ia_css_ptr sh_css_store_sp_stage_to_ddr(
+ unsigned int pipe,
+ unsigned int stage)
+{
+ IA_CSS_ENTER_LEAVE_PRIVATE("void");
+ hmm_store(xmem_sp_stage_ptrs[pipe][stage],
+ &sh_css_sp_stage,
+ sizeof(struct sh_css_sp_stage));
+ return xmem_sp_stage_ptrs[pipe][stage];
+}
+
+ia_css_ptr sh_css_store_isp_stage_to_ddr(
+ unsigned int pipe,
+ unsigned int stage)
+{
+ IA_CSS_ENTER_LEAVE_PRIVATE("void");
+ hmm_store(xmem_isp_stage_ptrs[pipe][stage],
+ &sh_css_isp_stage,
+ sizeof(struct sh_css_isp_stage));
+ return xmem_isp_stage_ptrs[pipe][stage];
+}
+
+static int ref_sh_css_ddr_address_map(
+ struct sh_css_ddr_address_map *map,
+ struct sh_css_ddr_address_map *out)
+{
+ int err = 0;
+ unsigned int i;
+
+ /* we will use a union to copy things; overlaying an array
+ with the struct; that way adding fields in the struct
+ will keep things working, and we will not get type errors.
+ */
+ union {
+ struct sh_css_ddr_address_map *map;
+ ia_css_ptr *addrs;
+ } in_addrs, to_addrs;
+
+ IA_CSS_ENTER_PRIVATE("void");
+ assert(map);
+ assert(out);
+
+ in_addrs.map = map;
+ to_addrs.map = out;
+
+ assert(sizeof(struct sh_css_ddr_address_map_size) / sizeof(size_t) ==
+ sizeof(struct sh_css_ddr_address_map) / sizeof(ia_css_ptr));
+
+ /* copy map using size info */
+ for (i = 0; i < (sizeof(struct sh_css_ddr_address_map_size) /
+ sizeof(size_t)); i++) {
+ if (in_addrs.addrs[i] == mmgr_NULL)
+ to_addrs.addrs[i] = mmgr_NULL;
+ else
+ to_addrs.addrs[i] = ia_css_refcount_increment(IA_CSS_REFCOUNT_PARAM_BUFFER,
+ in_addrs.addrs[i]);
+ }
+
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+}
+
+static int write_ia_css_isp_parameter_set_info_to_ddr(
+ struct ia_css_isp_parameter_set_info *me,
+ ia_css_ptr *out)
+{
+ int err = 0;
+ bool succ;
+
+ IA_CSS_ENTER_PRIVATE("void");
+
+ assert(me);
+ assert(out);
+
+ *out = ia_css_refcount_increment(IA_CSS_REFCOUNT_PARAM_SET_POOL,
+ hmm_alloc(sizeof(struct ia_css_isp_parameter_set_info)));
+ succ = (*out != mmgr_NULL);
+ if (succ)
+ hmm_store(*out,
+ me, sizeof(struct ia_css_isp_parameter_set_info));
+ else
+ err = -ENOMEM;
+
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+}
+
+static int
+free_ia_css_isp_parameter_set_info(
+ ia_css_ptr ptr)
+{
+ int err = 0;
+ struct ia_css_isp_parameter_set_info isp_params_info;
+ unsigned int i;
+ ia_css_ptr *addrs = (ia_css_ptr *)&isp_params_info.mem_map;
+
+ IA_CSS_ENTER_PRIVATE("ptr = %u", ptr);
+
+ /* sanity check - ptr must be valid */
+ if (!ia_css_refcount_is_valid(ptr)) {
+ IA_CSS_ERROR("%s: IA_CSS_REFCOUNT_PARAM_SET_POOL(0x%x) invalid arg", __func__,
+ ptr);
+ err = -EINVAL;
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+ }
+
+ hmm_load(ptr, &isp_params_info.mem_map, sizeof(struct sh_css_ddr_address_map));
+ /* copy map using size info */
+ for (i = 0; i < (sizeof(struct sh_css_ddr_address_map_size) /
+ sizeof(size_t)); i++) {
+ if (addrs[i] == mmgr_NULL)
+ continue;
+
+ /* sanity check - ptr must be valid */
+ if (!ia_css_refcount_is_valid(addrs[i])) {
+ IA_CSS_ERROR("%s: IA_CSS_REFCOUNT_PARAM_BUFFER(0x%x) invalid arg", __func__,
+ ptr);
+ err = -EINVAL;
+ continue;
+ }
+
+ ia_css_refcount_decrement(IA_CSS_REFCOUNT_PARAM_BUFFER, addrs[i]);
+ }
+ ia_css_refcount_decrement(IA_CSS_REFCOUNT_PARAM_SET_POOL, ptr);
+
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+}
+
+/* Mark all parameters as changed to force recomputing the derived ISP parameters */
+void
+sh_css_invalidate_params(struct ia_css_stream *stream)
+{
+ struct ia_css_isp_parameters *params;
+ unsigned int i, j, mem;
+
+ IA_CSS_ENTER_PRIVATE("void");
+ assert(stream);
+
+ params = stream->isp_params_configs;
+ params->isp_params_changed = true;
+ for (i = 0; i < IA_CSS_PIPE_ID_NUM; i++) {
+ for (j = 0; j < SH_CSS_MAX_STAGES; j++) {
+ for (mem = 0; mem < N_IA_CSS_MEMORIES; mem++) {
+ params->isp_mem_params_changed[i][j][mem] = true;
+ }
+ }
+ }
+
+ memset(&params->config_changed[0], 1, sizeof(params->config_changed));
+ params->dis_coef_table_changed = true;
+ params->dvs2_coef_table_changed = true;
+ params->morph_table_changed = true;
+ params->sc_table_changed = true;
+ params->dz_config_changed = true;
+ params->motion_config_changed = true;
+
+ /*Free up theDVS table memory blocks before recomputing new table */
+ for (i = 0; i < IA_CSS_PIPE_ID_NUM; i++) {
+ if (params->pipe_dvs_6axis_config[i]) {
+ free_dvs_6axis_table(&params->pipe_dvs_6axis_config[i]);
+ params->pipe_dvs_6axis_config_changed[i] = true;
+ }
+ }
+
+ IA_CSS_LEAVE_PRIVATE("void");
+}
+
+void
+sh_css_update_uds_and_crop_info(
+ const struct ia_css_binary_info *info,
+ const struct ia_css_frame_info *in_frame_info,
+ const struct ia_css_frame_info *out_frame_info,
+ const struct ia_css_resolution *dvs_env,
+ const struct ia_css_dz_config *zoom,
+ const struct ia_css_vector *motion_vector,
+ struct sh_css_uds_info *uds, /* out */
+ struct sh_css_crop_pos *sp_out_crop_pos, /* out */
+
+ bool enable_zoom)
+{
+ IA_CSS_ENTER_PRIVATE("void");
+
+ assert(info);
+ assert(in_frame_info);
+ assert(out_frame_info);
+ assert(dvs_env);
+ assert(zoom);
+ assert(motion_vector);
+ assert(uds);
+ assert(sp_out_crop_pos);
+
+ uds->curr_dx = enable_zoom ? (uint16_t)zoom->dx : HRT_GDC_N;
+ uds->curr_dy = enable_zoom ? (uint16_t)zoom->dy : HRT_GDC_N;
+
+ if (info->enable.dvs_envelope) {
+ unsigned int crop_x = 0,
+ crop_y = 0,
+ uds_xc = 0,
+ uds_yc = 0,
+ env_width, env_height;
+ int half_env_x, half_env_y;
+ int motion_x = motion_vector->x;
+ int motion_y = motion_vector->y;
+ bool upscale_x = in_frame_info->res.width < out_frame_info->res.width;
+ bool upscale_y = in_frame_info->res.height < out_frame_info->res.height;
+
+ if (info->enable.uds && !info->enable.ds) {
+ /**
+ * we calculate with the envelope that we can actually
+ * use, the min dvs envelope is for the filter
+ * initialization.
+ */
+ env_width = dvs_env->width -
+ SH_CSS_MIN_DVS_ENVELOPE;
+ env_height = dvs_env->height -
+ SH_CSS_MIN_DVS_ENVELOPE;
+ half_env_x = env_width / 2;
+ half_env_y = env_height / 2;
+ /**
+ * for digital zoom, we use the dvs envelope and make
+ * sure that we don't include the 8 leftmost pixels or
+ * 8 topmost rows.
+ */
+ if (upscale_x) {
+ uds_xc = (in_frame_info->res.width
+ + env_width
+ + SH_CSS_MIN_DVS_ENVELOPE) / 2;
+ } else {
+ uds_xc = (out_frame_info->res.width
+ + env_width) / 2
+ + SH_CSS_MIN_DVS_ENVELOPE;
+ }
+ if (upscale_y) {
+ uds_yc = (in_frame_info->res.height
+ + env_height
+ + SH_CSS_MIN_DVS_ENVELOPE) / 2;
+ } else {
+ uds_yc = (out_frame_info->res.height
+ + env_height) / 2
+ + SH_CSS_MIN_DVS_ENVELOPE;
+ }
+ /* clip the motion vector to +/- half the envelope */
+ motion_x = clamp(motion_x, -half_env_x, half_env_x);
+ motion_y = clamp(motion_y, -half_env_y, half_env_y);
+ uds_xc += motion_x;
+ uds_yc += motion_y;
+ /* uds can be pipelined, remove top lines */
+ crop_y = 2;
+ } else if (info->enable.ds) {
+ env_width = dvs_env->width;
+ env_height = dvs_env->height;
+ half_env_x = env_width / 2;
+ half_env_y = env_height / 2;
+ /* clip the motion vector to +/- half the envelope */
+ motion_x = clamp(motion_x, -half_env_x, half_env_x);
+ motion_y = clamp(motion_y, -half_env_y, half_env_y);
+ /* for video with downscaling, the envelope is included
+ in the input resolution. */
+ uds_xc = in_frame_info->res.width / 2 + motion_x;
+ uds_yc = in_frame_info->res.height / 2 + motion_y;
+ crop_x = info->pipeline.left_cropping;
+ /* ds == 2 (yuv_ds) can be pipelined, remove top
+ lines */
+ if (info->enable.ds & 1)
+ crop_y = info->pipeline.top_cropping;
+ else
+ crop_y = 2;
+ } else {
+ /* video nodz: here we can only crop. We make sure we
+ crop at least the first 8x8 pixels away. */
+ env_width = dvs_env->width -
+ SH_CSS_MIN_DVS_ENVELOPE;
+ env_height = dvs_env->height -
+ SH_CSS_MIN_DVS_ENVELOPE;
+ half_env_x = env_width / 2;
+ half_env_y = env_height / 2;
+ motion_x = clamp(motion_x, -half_env_x, half_env_x);
+ motion_y = clamp(motion_y, -half_env_y, half_env_y);
+ crop_x = SH_CSS_MIN_DVS_ENVELOPE
+ + half_env_x + motion_x;
+ crop_y = SH_CSS_MIN_DVS_ENVELOPE
+ + half_env_y + motion_y;
+ }
+
+ /* Must enforce that the crop position is even */
+ crop_x = round_down(crop_x, 2);
+ crop_y = round_down(crop_y, 2);
+ uds_xc = round_down(uds_xc, 2);
+ uds_yc = round_down(uds_yc, 2);
+
+ uds->xc = (uint16_t)uds_xc;
+ uds->yc = (uint16_t)uds_yc;
+ sp_out_crop_pos->x = (uint16_t)crop_x;
+ sp_out_crop_pos->y = (uint16_t)crop_y;
+ } else {
+ /* for down scaling, we always use the center of the image */
+ uds->xc = (uint16_t)in_frame_info->res.width / 2;
+ uds->yc = (uint16_t)in_frame_info->res.height / 2;
+ sp_out_crop_pos->x = (uint16_t)info->pipeline.left_cropping;
+ sp_out_crop_pos->y = (uint16_t)info->pipeline.top_cropping;
+ }
+ IA_CSS_LEAVE_PRIVATE("void");
+}
+
+static int
+sh_css_update_uds_and_crop_info_based_on_zoom_region(
+ const struct ia_css_binary_info *info,
+ const struct ia_css_frame_info *in_frame_info,
+ const struct ia_css_frame_info *out_frame_info,
+ const struct ia_css_resolution *dvs_env,
+ const struct ia_css_dz_config *zoom,
+ const struct ia_css_vector *motion_vector,
+ struct sh_css_uds_info *uds, /* out */
+ struct sh_css_crop_pos *sp_out_crop_pos, /* out */
+ struct ia_css_resolution pipe_in_res,
+ bool enable_zoom)
+{
+ unsigned int x0 = 0, y0 = 0, x1 = 0, y1 = 0;
+ int err = 0;
+ /* Note:
+ * Filter_Envelope = 0 for NND/LUT
+ * Filter_Envelope = 1 for BCI
+ * Filter_Envelope = 3 for BLI
+ * Currently, not considering this filter envelope because, In uds.sp.c is recalculating
+ * the dx/dy based on filter envelope and other information (ia_css_uds_sp_scale_params)
+ * Ideally, That should be done on host side not on sp side.
+ */
+ unsigned int filter_envelope = 0;
+
+ IA_CSS_ENTER_PRIVATE("void");
+
+ assert(info);
+ assert(in_frame_info);
+ assert(out_frame_info);
+ assert(dvs_env);
+ assert(zoom);
+ assert(motion_vector);
+ assert(uds);
+ assert(sp_out_crop_pos);
+ x0 = zoom->zoom_region.origin.x;
+ y0 = zoom->zoom_region.origin.y;
+ x1 = zoom->zoom_region.resolution.width + x0;
+ y1 = zoom->zoom_region.resolution.height + y0;
+
+ if ((x0 > x1) || (y0 > y1) || (x1 > pipe_in_res.width) || (y1 > pipe_in_res.height))
+ return -EINVAL;
+
+ if (!enable_zoom) {
+ uds->curr_dx = HRT_GDC_N;
+ uds->curr_dy = HRT_GDC_N;
+ }
+
+ if (info->enable.dvs_envelope) {
+ /* Zoom region is only supported by the UDS module on ISP
+ * 2 and higher. It is not supported in video mode on ISP 1 */
+ return -EINVAL;
+ } else {
+ if (enable_zoom) {
+ /* A. Calculate dx/dy based on crop region using in_frame_info
+ * Scale the crop region if in_frame_info to the stage is not same as
+ * actual effective input of the pipeline
+ */
+ if (in_frame_info->res.width != pipe_in_res.width ||
+ in_frame_info->res.height != pipe_in_res.height) {
+ x0 = (x0 * in_frame_info->res.width) / (pipe_in_res.width);
+ y0 = (y0 * in_frame_info->res.height) / (pipe_in_res.height);
+ x1 = (x1 * in_frame_info->res.width) / (pipe_in_res.width);
+ y1 = (y1 * in_frame_info->res.height) / (pipe_in_res.height);
+ }
+ uds->curr_dx =
+ ((x1 - x0 - filter_envelope) * HRT_GDC_N) / in_frame_info->res.width;
+ uds->curr_dy =
+ ((y1 - y0 - filter_envelope) * HRT_GDC_N) / in_frame_info->res.height;
+
+ /* B. Calculate xc/yc based on crop region */
+ uds->xc = (uint16_t)x0 + (((x1) - (x0)) / 2);
+ uds->yc = (uint16_t)y0 + (((y1) - (y0)) / 2);
+ } else {
+ uds->xc = (uint16_t)in_frame_info->res.width / 2;
+ uds->yc = (uint16_t)in_frame_info->res.height / 2;
+ }
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "uds->curr_dx=%d, uds->xc=%d, uds->yc=%d\n",
+ uds->curr_dx, uds->xc, uds->yc);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "x0=%d, y0=%d, x1=%d, y1=%d\n",
+ x0, y0, x1, y1);
+ sp_out_crop_pos->x = (uint16_t)info->pipeline.left_cropping;
+ sp_out_crop_pos->y = (uint16_t)info->pipeline.top_cropping;
+ }
+ IA_CSS_LEAVE_PRIVATE("void");
+ return err;
+}
+
+struct ia_css_3a_statistics *
+ia_css_3a_statistics_allocate(const struct ia_css_3a_grid_info *grid)
+{
+ struct ia_css_3a_statistics *me;
+ int grid_size;
+
+ IA_CSS_ENTER("grid=%p", grid);
+
+ assert(grid);
+
+ me = kvcalloc(1, sizeof(*me), GFP_KERNEL);
+ if (!me)
+ goto err;
+
+ me->grid = *grid;
+ grid_size = grid->width * grid->height;
+ me->data = kvmalloc(grid_size * sizeof(*me->data), GFP_KERNEL);
+ if (!me->data)
+ goto err;
+ /* No weighted histogram, no structure, treat the histogram data as a byte dump in a byte array */
+ me->rgby_data = kvmalloc(sizeof_hmem(HMEM0_ID), GFP_KERNEL);
+ if (!me->rgby_data)
+ goto err;
+
+ IA_CSS_LEAVE("return=%p", me);
+ return me;
+err:
+ ia_css_3a_statistics_free(me);
+
+ IA_CSS_LEAVE("return=%p", NULL);
+ return NULL;
+}
+
+void
+ia_css_3a_statistics_free(struct ia_css_3a_statistics *me)
+{
+ if (me) {
+ kvfree(me->rgby_data);
+ kvfree(me->data);
+ kvfree(me);
+ }
+}
+
+struct ia_css_dvs_statistics *
+ia_css_dvs_statistics_allocate(const struct ia_css_dvs_grid_info *grid)
+{
+ struct ia_css_dvs_statistics *me;
+
+ assert(grid);
+
+ me = kvcalloc(1, sizeof(*me), GFP_KERNEL);
+ if (!me)
+ goto err;
+
+ me->grid = *grid;
+ me->hor_proj = kvmalloc(grid->height * IA_CSS_DVS_NUM_COEF_TYPES *
+ sizeof(*me->hor_proj), GFP_KERNEL);
+ if (!me->hor_proj)
+ goto err;
+
+ me->ver_proj = kvmalloc(grid->width * IA_CSS_DVS_NUM_COEF_TYPES *
+ sizeof(*me->ver_proj), GFP_KERNEL);
+ if (!me->ver_proj)
+ goto err;
+
+ return me;
+err:
+ ia_css_dvs_statistics_free(me);
+ return NULL;
+}
+
+void
+ia_css_dvs_statistics_free(struct ia_css_dvs_statistics *me)
+{
+ if (me) {
+ kvfree(me->hor_proj);
+ kvfree(me->ver_proj);
+ kvfree(me);
+ }
+}
+
+struct ia_css_dvs_coefficients *
+ia_css_dvs_coefficients_allocate(const struct ia_css_dvs_grid_info *grid)
+{
+ struct ia_css_dvs_coefficients *me;
+
+ assert(grid);
+
+ me = kvcalloc(1, sizeof(*me), GFP_KERNEL);
+ if (!me)
+ goto err;
+
+ me->grid = *grid;
+
+ me->hor_coefs = kvmalloc(grid->num_hor_coefs *
+ IA_CSS_DVS_NUM_COEF_TYPES *
+ sizeof(*me->hor_coefs), GFP_KERNEL);
+ if (!me->hor_coefs)
+ goto err;
+
+ me->ver_coefs = kvmalloc(grid->num_ver_coefs *
+ IA_CSS_DVS_NUM_COEF_TYPES *
+ sizeof(*me->ver_coefs), GFP_KERNEL);
+ if (!me->ver_coefs)
+ goto err;
+
+ return me;
+err:
+ ia_css_dvs_coefficients_free(me);
+ return NULL;
+}
+
+void
+ia_css_dvs_coefficients_free(struct ia_css_dvs_coefficients *me)
+{
+ if (me) {
+ kvfree(me->hor_coefs);
+ kvfree(me->ver_coefs);
+ kvfree(me);
+ }
+}
+
+struct ia_css_dvs2_statistics *
+ia_css_dvs2_statistics_allocate(const struct ia_css_dvs_grid_info *grid)
+{
+ struct ia_css_dvs2_statistics *me;
+
+ assert(grid);
+
+ me = kvcalloc(1, sizeof(*me), GFP_KERNEL);
+ if (!me)
+ goto err;
+
+ me->grid = *grid;
+
+ me->hor_prod.odd_real = kvmalloc(grid->aligned_width *
+ grid->aligned_height *
+ sizeof(*me->hor_prod.odd_real),
+ GFP_KERNEL);
+ if (!me->hor_prod.odd_real)
+ goto err;
+
+ me->hor_prod.odd_imag = kvmalloc(grid->aligned_width *
+ grid->aligned_height *
+ sizeof(*me->hor_prod.odd_imag),
+ GFP_KERNEL);
+ if (!me->hor_prod.odd_imag)
+ goto err;
+
+ me->hor_prod.even_real = kvmalloc(grid->aligned_width *
+ grid->aligned_height *
+ sizeof(*me->hor_prod.even_real),
+ GFP_KERNEL);
+ if (!me->hor_prod.even_real)
+ goto err;
+
+ me->hor_prod.even_imag = kvmalloc(grid->aligned_width *
+ grid->aligned_height *
+ sizeof(*me->hor_prod.even_imag),
+ GFP_KERNEL);
+ if (!me->hor_prod.even_imag)
+ goto err;
+
+ me->ver_prod.odd_real = kvmalloc(grid->aligned_width *
+ grid->aligned_height *
+ sizeof(*me->ver_prod.odd_real),
+ GFP_KERNEL);
+ if (!me->ver_prod.odd_real)
+ goto err;
+
+ me->ver_prod.odd_imag = kvmalloc(grid->aligned_width *
+ grid->aligned_height *
+ sizeof(*me->ver_prod.odd_imag),
+ GFP_KERNEL);
+ if (!me->ver_prod.odd_imag)
+ goto err;
+
+ me->ver_prod.even_real = kvmalloc(grid->aligned_width *
+ grid->aligned_height *
+ sizeof(*me->ver_prod.even_real),
+ GFP_KERNEL);
+ if (!me->ver_prod.even_real)
+ goto err;
+
+ me->ver_prod.even_imag = kvmalloc(grid->aligned_width *
+ grid->aligned_height *
+ sizeof(*me->ver_prod.even_imag),
+ GFP_KERNEL);
+ if (!me->ver_prod.even_imag)
+ goto err;
+
+ return me;
+err:
+ ia_css_dvs2_statistics_free(me);
+ return NULL;
+}
+
+void
+ia_css_dvs2_statistics_free(struct ia_css_dvs2_statistics *me)
+{
+ if (me) {
+ kvfree(me->hor_prod.odd_real);
+ kvfree(me->hor_prod.odd_imag);
+ kvfree(me->hor_prod.even_real);
+ kvfree(me->hor_prod.even_imag);
+ kvfree(me->ver_prod.odd_real);
+ kvfree(me->ver_prod.odd_imag);
+ kvfree(me->ver_prod.even_real);
+ kvfree(me->ver_prod.even_imag);
+ kvfree(me);
+ }
+}
+
+struct ia_css_dvs2_coefficients *
+ia_css_dvs2_coefficients_allocate(const struct ia_css_dvs_grid_info *grid)
+{
+ struct ia_css_dvs2_coefficients *me;
+
+ assert(grid);
+
+ me = kvcalloc(1, sizeof(*me), GFP_KERNEL);
+ if (!me)
+ goto err;
+
+ me->grid = *grid;
+
+ me->hor_coefs.odd_real = kvmalloc(grid->num_hor_coefs *
+ sizeof(*me->hor_coefs.odd_real),
+ GFP_KERNEL);
+ if (!me->hor_coefs.odd_real)
+ goto err;
+
+ me->hor_coefs.odd_imag = kvmalloc(grid->num_hor_coefs *
+ sizeof(*me->hor_coefs.odd_imag),
+ GFP_KERNEL);
+ if (!me->hor_coefs.odd_imag)
+ goto err;
+
+ me->hor_coefs.even_real = kvmalloc(grid->num_hor_coefs *
+ sizeof(*me->hor_coefs.even_real),
+ GFP_KERNEL);
+ if (!me->hor_coefs.even_real)
+ goto err;
+
+ me->hor_coefs.even_imag = kvmalloc(grid->num_hor_coefs *
+ sizeof(*me->hor_coefs.even_imag),
+ GFP_KERNEL);
+ if (!me->hor_coefs.even_imag)
+ goto err;
+
+ me->ver_coefs.odd_real = kvmalloc(grid->num_ver_coefs *
+ sizeof(*me->ver_coefs.odd_real),
+ GFP_KERNEL);
+ if (!me->ver_coefs.odd_real)
+ goto err;
+
+ me->ver_coefs.odd_imag = kvmalloc(grid->num_ver_coefs *
+ sizeof(*me->ver_coefs.odd_imag),
+ GFP_KERNEL);
+ if (!me->ver_coefs.odd_imag)
+ goto err;
+
+ me->ver_coefs.even_real = kvmalloc(grid->num_ver_coefs *
+ sizeof(*me->ver_coefs.even_real),
+ GFP_KERNEL);
+ if (!me->ver_coefs.even_real)
+ goto err;
+
+ me->ver_coefs.even_imag = kvmalloc(grid->num_ver_coefs *
+ sizeof(*me->ver_coefs.even_imag),
+ GFP_KERNEL);
+ if (!me->ver_coefs.even_imag)
+ goto err;
+
+ return me;
+err:
+ ia_css_dvs2_coefficients_free(me);
+ return NULL;
+}
+
+void
+ia_css_dvs2_coefficients_free(struct ia_css_dvs2_coefficients *me)
+{
+ if (me) {
+ kvfree(me->hor_coefs.odd_real);
+ kvfree(me->hor_coefs.odd_imag);
+ kvfree(me->hor_coefs.even_real);
+ kvfree(me->hor_coefs.even_imag);
+ kvfree(me->ver_coefs.odd_real);
+ kvfree(me->ver_coefs.odd_imag);
+ kvfree(me->ver_coefs.even_real);
+ kvfree(me->ver_coefs.even_imag);
+ kvfree(me);
+ }
+}
+
+struct ia_css_dvs_6axis_config *
+ia_css_dvs2_6axis_config_allocate(const struct ia_css_stream *stream)
+{
+ struct ia_css_dvs_6axis_config *dvs_config = NULL;
+ struct ia_css_isp_parameters *params = NULL;
+ unsigned int width_y;
+ unsigned int height_y;
+ unsigned int width_uv;
+ unsigned int height_uv;
+
+ assert(stream);
+ params = stream->isp_params_configs;
+
+ /* Backward compatibility by default consider pipe as Video*/
+ if (!params || !params->pipe_dvs_6axis_config[IA_CSS_PIPE_ID_VIDEO])
+ goto err;
+
+ dvs_config = kvcalloc(1, sizeof(struct ia_css_dvs_6axis_config),
+ GFP_KERNEL);
+ if (!dvs_config)
+ goto err;
+
+ dvs_config->width_y = width_y =
+ params->pipe_dvs_6axis_config[IA_CSS_PIPE_ID_VIDEO]->width_y;
+ dvs_config->height_y = height_y =
+ params->pipe_dvs_6axis_config[IA_CSS_PIPE_ID_VIDEO]->height_y;
+ dvs_config->width_uv = width_uv =
+ params->pipe_dvs_6axis_config[IA_CSS_PIPE_ID_VIDEO]->width_uv;
+ dvs_config->height_uv = height_uv =
+ params->pipe_dvs_6axis_config[IA_CSS_PIPE_ID_VIDEO]->height_uv;
+ IA_CSS_LOG("table Y: W %d H %d", width_y, height_y);
+ IA_CSS_LOG("table UV: W %d H %d", width_uv, height_uv);
+ dvs_config->xcoords_y = kvmalloc(width_y * height_y * sizeof(uint32_t),
+ GFP_KERNEL);
+ if (!dvs_config->xcoords_y)
+ goto err;
+
+ dvs_config->ycoords_y = kvmalloc(width_y * height_y * sizeof(uint32_t),
+ GFP_KERNEL);
+ if (!dvs_config->ycoords_y)
+ goto err;
+
+ dvs_config->xcoords_uv = kvmalloc(width_uv * height_uv *
+ sizeof(uint32_t),
+ GFP_KERNEL);
+ if (!dvs_config->xcoords_uv)
+ goto err;
+
+ dvs_config->ycoords_uv = kvmalloc(width_uv * height_uv *
+ sizeof(uint32_t),
+ GFP_KERNEL);
+ if (!dvs_config->ycoords_uv)
+ goto err;
+
+ return dvs_config;
+err:
+ ia_css_dvs2_6axis_config_free(dvs_config);
+ return NULL;
+}
+
+void
+ia_css_dvs2_6axis_config_free(struct ia_css_dvs_6axis_config *dvs_6axis_config)
+{
+ if (dvs_6axis_config) {
+ kvfree(dvs_6axis_config->xcoords_y);
+ kvfree(dvs_6axis_config->ycoords_y);
+ kvfree(dvs_6axis_config->xcoords_uv);
+ kvfree(dvs_6axis_config->ycoords_uv);
+ kvfree(dvs_6axis_config);
+ }
+}
+
+void
+ia_css_en_dz_capt_pipe(struct ia_css_stream *stream, bool enable)
+{
+ struct ia_css_pipe *pipe;
+ struct ia_css_pipeline *pipeline;
+ struct ia_css_pipeline_stage *stage;
+ enum ia_css_pipe_id pipe_id;
+ int err;
+ int i;
+
+ if (!stream)
+ return;
+
+ for (i = 0; i < stream->num_pipes; i++) {
+ pipe = stream->pipes[i];
+ pipeline = ia_css_pipe_get_pipeline(pipe);
+ pipe_id = pipeline->pipe_id;
+
+ if (pipe_id == IA_CSS_PIPE_ID_CAPTURE) {
+ err = ia_css_pipeline_get_stage(pipeline, IA_CSS_BINARY_MODE_CAPTURE_PP,
+ &stage);
+ if (!err)
+ stage->enable_zoom = enable;
+ break;
+ }
+ }
+}
diff --git a/drivers/staging/media/atomisp/pci/sh_css_params.h b/drivers/staging/media/atomisp/pci/sh_css_params.h
new file mode 100644
index 000000000000..75957dea3c38
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/sh_css_params.h
@@ -0,0 +1,172 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef _SH_CSS_PARAMS_H_
+#define _SH_CSS_PARAMS_H_
+
+/*! \file */
+
+/* Forward declaration to break mutual dependency */
+struct ia_css_isp_parameters;
+
+#include <type_support.h>
+#include "ia_css_types.h"
+#include "ia_css_binary.h"
+#include "sh_css_legacy.h"
+
+#include "sh_css_defs.h" /* SH_CSS_MAX_STAGES */
+#include "ia_css_pipeline.h"
+#include "ia_css_isp_params.h"
+#include "uds/uds_1.0/ia_css_uds_param.h"
+#include "crop/crop_1.0/ia_css_crop_types.h"
+
+#define PIX_SHIFT_FILTER_RUN_IN_X 12
+#define PIX_SHIFT_FILTER_RUN_IN_Y 12
+
+#include "ob/ob_1.0/ia_css_ob_param.h"
+/* Isp configurations per stream */
+struct sh_css_isp_param_configs {
+ /* OB (Optical Black) */
+ struct sh_css_isp_ob_stream_config ob;
+};
+
+/* Isp parameters per stream */
+struct ia_css_isp_parameters {
+ /* UDS */
+ struct sh_css_sp_uds_params uds[SH_CSS_MAX_STAGES];
+ struct sh_css_isp_param_configs stream_configs;
+ struct ia_css_fpn_table fpn_config;
+ struct ia_css_vector motion_config;
+ const struct ia_css_morph_table *morph_table;
+ const struct ia_css_shading_table *sc_table;
+ struct ia_css_shading_table *sc_config;
+ struct ia_css_macc_table macc_table;
+ struct ia_css_gamma_table gc_table;
+ struct ia_css_ctc_table ctc_table;
+ struct ia_css_xnr_table xnr_table;
+
+ struct ia_css_dz_config dz_config;
+ struct ia_css_3a_config s3a_config;
+ struct ia_css_wb_config wb_config;
+ struct ia_css_cc_config cc_config;
+ struct ia_css_cc_config yuv2rgb_cc_config;
+ struct ia_css_cc_config rgb2yuv_cc_config;
+ struct ia_css_tnr_config tnr_config;
+ struct ia_css_ob_config ob_config;
+ /*----- DPC configuration -----*/
+ /* The default DPC configuration is retained and currently set
+ * using the stream configuration. The code generated from genparams
+ * uses this configuration to set the DPC parameters per stage but this
+ * will be overwritten by the per pipe configuration */
+ struct ia_css_dp_config dp_config;
+ /* ------ pipe specific DPC configuration ------ */
+ /* Please note that this implementation is a temporary solution and
+ * should be replaced by CSS per pipe configuration when the support
+ * is ready (HSD 1303967698)*/
+ struct ia_css_dp_config pipe_dp_config[IA_CSS_PIPE_ID_NUM];
+ struct ia_css_nr_config nr_config;
+ struct ia_css_ee_config ee_config;
+ struct ia_css_de_config de_config;
+ struct ia_css_gc_config gc_config;
+ struct ia_css_anr_config anr_config;
+ struct ia_css_ce_config ce_config;
+ struct ia_css_formats_config formats_config;
+ /* ---- deprecated: replaced with pipe_dvs_6axis_config---- */
+ struct ia_css_dvs_6axis_config *dvs_6axis_config;
+ struct ia_css_ecd_config ecd_config;
+ struct ia_css_ynr_config ynr_config;
+ struct ia_css_yee_config yee_config;
+ struct ia_css_fc_config fc_config;
+ struct ia_css_cnr_config cnr_config;
+ struct ia_css_macc_config macc_config;
+ struct ia_css_ctc_config ctc_config;
+ struct ia_css_aa_config aa_config;
+ struct ia_css_aa_config bds_config;
+ struct ia_css_aa_config raa_config;
+ struct ia_css_rgb_gamma_table r_gamma_table;
+ struct ia_css_rgb_gamma_table g_gamma_table;
+ struct ia_css_rgb_gamma_table b_gamma_table;
+ struct ia_css_anr_thres anr_thres;
+ struct ia_css_xnr_config xnr_config;
+ struct ia_css_xnr3_config xnr3_config;
+ struct ia_css_uds_config uds_config;
+ struct ia_css_crop_config crop_config;
+ struct ia_css_output_config output_config;
+ struct ia_css_dvs_6axis_config *pipe_dvs_6axis_config[IA_CSS_PIPE_ID_NUM];
+ /* ------ deprecated(bz675) : from ------ */
+ struct ia_css_shading_settings shading_settings;
+ /* ------ deprecated(bz675) : to ------ */
+ struct ia_css_dvs_coefficients dvs_coefs;
+ struct ia_css_dvs2_coefficients dvs2_coefs;
+
+ bool isp_params_changed;
+
+ bool isp_mem_params_changed
+ [IA_CSS_PIPE_ID_NUM][SH_CSS_MAX_STAGES][IA_CSS_NUM_MEMORIES];
+ bool dz_config_changed;
+ bool motion_config_changed;
+ bool dis_coef_table_changed;
+ bool dvs2_coef_table_changed;
+ bool morph_table_changed;
+ bool sc_table_changed;
+ bool anr_thres_changed;
+ /* ---- deprecated: replaced with pipe_dvs_6axis_config_changed ---- */
+ bool dvs_6axis_config_changed;
+ /* ------ pipe specific DPC configuration ------ */
+ /* Please note that this implementation is a temporary solution and
+ * should be replaced by CSS per pipe configuration when the support
+ * is ready (HSD 1303967698) */
+ bool pipe_dpc_config_changed[IA_CSS_PIPE_ID_NUM];
+ /* ------ deprecated(bz675) : from ------ */
+ bool shading_settings_changed;
+ /* ------ deprecated(bz675) : to ------ */
+ bool pipe_dvs_6axis_config_changed[IA_CSS_PIPE_ID_NUM];
+
+ bool config_changed[IA_CSS_NUM_PARAMETER_IDS];
+
+ unsigned int sensor_binning;
+ /* local buffers, used to re-order the 3a statistics in vmem-format */
+ struct sh_css_ddr_address_map pipe_ddr_ptrs[IA_CSS_PIPE_ID_NUM];
+ struct sh_css_ddr_address_map_size pipe_ddr_ptrs_size[IA_CSS_PIPE_ID_NUM];
+ struct sh_css_ddr_address_map ddr_ptrs;
+ struct sh_css_ddr_address_map_size ddr_ptrs_size;
+ struct ia_css_frame
+ *output_frame; /** Output frame the config is to be applied to (optional) */
+ u32 isp_parameters_id; /** Unique ID to track which config was actually applied to a particular frame */
+};
+
+void
+ia_css_params_store_ia_css_host_data(
+ ia_css_ptr ddr_addr,
+ struct ia_css_host_data *data);
+
+int
+ia_css_params_store_sctbl(
+ const struct ia_css_pipeline_stage *stage,
+ ia_css_ptr ddr_addr,
+ const struct ia_css_shading_table *shading_table);
+
+struct ia_css_host_data *
+ia_css_params_alloc_convert_sctbl(
+ const struct ia_css_pipeline_stage *stage,
+ const struct ia_css_shading_table *shading_table);
+
+struct ia_css_isp_config *
+sh_css_pipe_isp_config_get(struct ia_css_pipe *pipe);
+
+int
+sh_css_params_map_and_store_default_gdc_lut(void);
+
+void
+sh_css_params_free_default_gdc_lut(void);
+
+ia_css_ptr
+sh_css_params_get_default_gdc_lut(void);
+
+ia_css_ptr
+sh_css_pipe_get_pp_gdc_lut(const struct ia_css_pipe *pipe);
+
+#endif /* _SH_CSS_PARAMS_H_ */
diff --git a/drivers/staging/media/atomisp/pci/sh_css_params_internal.h b/drivers/staging/media/atomisp/pci/sh_css_params_internal.h
new file mode 100644
index 000000000000..b832e18b42e4
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/sh_css_params_internal.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef _SH_CSS_PARAMS_INTERNAL_H_
+#define _SH_CSS_PARAMS_INTERNAL_H_
+
+void
+sh_css_param_clear_param_sets(void);
+
+#endif /* _SH_CSS_PARAMS_INTERNAL_H_ */
diff --git a/drivers/staging/media/atomisp/pci/sh_css_properties.c b/drivers/staging/media/atomisp/pci/sh_css_properties.c
new file mode 100644
index 000000000000..caeeaf9a9536
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/sh_css_properties.c
@@ -0,0 +1,25 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#include "ia_css_properties.h"
+#include <assert_support.h>
+#include "ia_css_types.h"
+#include "gdc_device.h"
+
+void
+ia_css_get_properties(struct ia_css_properties *properties)
+{
+ assert(properties);
+ /*
+ * MW: We don't want to store the coordinates
+ * full range in memory: Truncate
+ */
+ properties->gdc_coord_one = gdc_get_unity(GDC0_ID) / HRT_GDC_COORD_SCALE;
+
+ properties->l1_base_is_index = true;
+
+ properties->vamem_type = IA_CSS_VAMEM_TYPE_2;
+}
diff --git a/drivers/staging/media/atomisp/pci/sh_css_sp.c b/drivers/staging/media/atomisp/pci/sh_css_sp.c
new file mode 100644
index 000000000000..6da151e7a873
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/sh_css_sp.c
@@ -0,0 +1,1741 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#include "hmm.h"
+
+#include "sh_css_sp.h"
+
+#include "input_formatter.h"
+
+#include "dma.h" /* N_DMA_CHANNEL_ID */
+
+#include "ia_css_buffer.h"
+#include "ia_css_binary.h"
+#include "sh_css_hrt.h"
+#include "sh_css_defs.h"
+#include "sh_css_internal.h"
+#include "ia_css_control.h"
+#include "ia_css_debug.h"
+#include "ia_css_debug_pipe.h"
+#include "ia_css_event_public.h"
+#include "ia_css_mmu.h"
+#include "ia_css_stream.h"
+#include "ia_css_isp_param.h"
+#include "sh_css_params.h"
+#include "sh_css_legacy.h"
+#include "ia_css_frame_comm.h"
+#include "ia_css_isys.h"
+
+#include "gdc_device.h" /* HRT_GDC_N */
+
+/*#include "sp.h"*/ /* host2sp_enqueue_frame_data() */
+
+
+#include "assert_support.h"
+
+#include "sw_event_global.h" /* Event IDs.*/
+#include "ia_css_event.h"
+#include "mmu_device.h"
+#include "ia_css_spctrl.h"
+#include "atomisp_internal.h"
+
+#ifndef offsetof
+#define offsetof(T, x) ((unsigned int)&(((T *)0)->x))
+#endif
+
+#define IA_CSS_INCLUDE_CONFIGURATIONS
+#include "ia_css_isp_configs.h"
+#define IA_CSS_INCLUDE_STATES
+#include "ia_css_isp_states.h"
+
+#include "isp/kernels/ipu2_io_ls/bayer_io_ls/ia_css_bayer_io.host.h"
+
+struct sh_css_sp_group sh_css_sp_group;
+struct sh_css_sp_stage sh_css_sp_stage;
+struct sh_css_isp_stage sh_css_isp_stage;
+static struct sh_css_sp_output sh_css_sp_output;
+static struct sh_css_sp_per_frame_data per_frame_data;
+
+/* true if SP supports frame loop and host2sp_commands */
+/* For the moment there is only code that sets this bool to true */
+/* TODO: add code that sets this bool to false */
+static bool sp_running;
+
+static int
+set_output_frame_buffer(const struct ia_css_frame *frame,
+ unsigned int idx);
+
+static void
+sh_css_copy_buffer_attr_to_spbuffer(struct ia_css_buffer_sp *dest_buf,
+ const enum sh_css_queue_id queue_id,
+ const ia_css_ptr xmem_addr,
+ const enum ia_css_buffer_type buf_type);
+
+static void
+initialize_frame_buffer_attribute(struct ia_css_buffer_sp *buf_attr);
+
+static void
+initialize_stage_frames(struct ia_css_frames_sp *frames);
+
+/* This data is stored every frame */
+void
+store_sp_group_data(void)
+{
+ per_frame_data.sp_group_addr = sh_css_store_sp_group_to_ddr();
+}
+
+static void
+copy_isp_stage_to_sp_stage(void)
+{
+ /* [WW07.5]type casting will cause potential issues */
+ sh_css_sp_stage.num_stripes = (uint8_t)
+ sh_css_isp_stage.binary_info.iterator.num_stripes;
+ sh_css_sp_stage.row_stripes_height = (uint16_t)
+ sh_css_isp_stage.binary_info.iterator.row_stripes_height;
+ sh_css_sp_stage.row_stripes_overlap_lines = (uint16_t)
+ sh_css_isp_stage.binary_info.iterator.row_stripes_overlap_lines;
+ sh_css_sp_stage.top_cropping = (uint16_t)
+ sh_css_isp_stage.binary_info.pipeline.top_cropping;
+ sh_css_sp_stage.enable.sdis = sh_css_isp_stage.binary_info.enable.dis;
+ sh_css_sp_stage.enable.s3a = sh_css_isp_stage.binary_info.enable.s3a;
+}
+
+void
+store_sp_stage_data(enum ia_css_pipe_id id, unsigned int pipe_num,
+ unsigned int stage)
+{
+ unsigned int thread_id;
+
+ ia_css_pipeline_get_sp_thread_id(pipe_num, &thread_id);
+ copy_isp_stage_to_sp_stage();
+ if (id != IA_CSS_PIPE_ID_COPY)
+ sh_css_sp_stage.isp_stage_addr =
+ sh_css_store_isp_stage_to_ddr(pipe_num, stage);
+ sh_css_sp_group.pipe[thread_id].sp_stage_addr[stage] =
+ sh_css_store_sp_stage_to_ddr(pipe_num, stage);
+
+ /* Clear for next frame */
+ sh_css_sp_stage.program_input_circuit = false;
+}
+
+static void
+store_sp_per_frame_data(const struct ia_css_fw_info *fw)
+{
+ unsigned int HIVE_ADDR_sp_per_frame_data = 0;
+
+ assert(fw);
+
+ switch (fw->type) {
+ case ia_css_sp_firmware:
+ HIVE_ADDR_sp_per_frame_data = fw->info.sp.per_frame_data;
+ break;
+ case ia_css_acc_firmware:
+ HIVE_ADDR_sp_per_frame_data = fw->info.acc.per_frame_data;
+ break;
+ case ia_css_isp_firmware:
+ return;
+ }
+
+ sp_dmem_store(SP0_ID,
+ (unsigned int)sp_address_of(sp_per_frame_data),
+ &per_frame_data,
+ sizeof(per_frame_data));
+}
+
+static void
+sh_css_store_sp_per_frame_data(enum ia_css_pipe_id pipe_id,
+ unsigned int pipe_num,
+ const struct ia_css_fw_info *sp_fw)
+{
+ if (!sp_fw)
+ sp_fw = &sh_css_sp_fw;
+
+ store_sp_stage_data(pipe_id, pipe_num, 0);
+ store_sp_group_data();
+ store_sp_per_frame_data(sp_fw);
+}
+
+#if SP_DEBUG != SP_DEBUG_NONE
+
+void
+sh_css_sp_get_debug_state(struct sh_css_sp_debug_state *state)
+{
+ const struct ia_css_fw_info *fw = &sh_css_sp_fw;
+ unsigned int HIVE_ADDR_sp_output = fw->info.sp.output;
+ unsigned int i;
+ unsigned int offset = (unsigned int)offsetof(struct sh_css_sp_output,
+ debug) / sizeof(int);
+
+ assert(state);
+
+ (void)HIVE_ADDR_sp_output; /* To get rid of warning in CRUN */
+ for (i = 0; i < sizeof(*state) / sizeof(int); i++)
+ ((unsigned int *)state)[i] = load_sp_array_uint(sp_output, i + offset);
+}
+
+#endif
+
+void
+sh_css_sp_start_binary_copy(unsigned int pipe_num,
+ struct ia_css_frame *out_frame,
+ unsigned int two_ppc)
+{
+ enum ia_css_pipe_id pipe_id;
+ unsigned int thread_id;
+ struct sh_css_sp_pipeline *pipe;
+ u8 stage_num = 0;
+
+ assert(out_frame);
+ pipe_id = IA_CSS_PIPE_ID_CAPTURE;
+ ia_css_pipeline_get_sp_thread_id(pipe_num, &thread_id);
+ pipe = &sh_css_sp_group.pipe[thread_id];
+
+ pipe->copy.bin.bytes_available = out_frame->data_bytes;
+ pipe->num_stages = 1;
+ pipe->pipe_id = pipe_id;
+ pipe->pipe_num = pipe_num;
+ pipe->thread_id = thread_id;
+ pipe->pipe_config = 0x0; /* No parameters */
+ pipe->pipe_qos_config = QOS_INVALID;
+
+ if (pipe->inout_port_config == 0) {
+ SH_CSS_PIPE_PORT_CONFIG_SET(pipe->inout_port_config,
+ (uint8_t)SH_CSS_PORT_INPUT,
+ (uint8_t)SH_CSS_HOST_TYPE, 1);
+ SH_CSS_PIPE_PORT_CONFIG_SET(pipe->inout_port_config,
+ (uint8_t)SH_CSS_PORT_OUTPUT,
+ (uint8_t)SH_CSS_HOST_TYPE, 1);
+ }
+ IA_CSS_LOG("pipe_id %d port_config %08x",
+ pipe->pipe_id, pipe->inout_port_config);
+
+ if (!IS_ISP2401)
+ sh_css_sp_group.config.input_formatter.isp_2ppc = (uint8_t)two_ppc;
+
+ sh_css_sp_stage.num = stage_num;
+ sh_css_sp_stage.stage_type = SH_CSS_SP_STAGE_TYPE;
+ sh_css_sp_stage.func =
+ (unsigned int)IA_CSS_PIPELINE_BIN_COPY;
+
+ set_output_frame_buffer(out_frame, 0);
+
+ /* sp_bin_copy_init on the SP does not deal with dynamica/static yet */
+ /* For now always update the dynamic data from out frames. */
+ sh_css_store_sp_per_frame_data(pipe_id, pipe_num, &sh_css_sp_fw);
+}
+
+static void
+sh_css_sp_start_raw_copy(struct ia_css_frame *out_frame,
+ unsigned int pipe_num,
+ unsigned int two_ppc,
+ unsigned int max_input_width,
+ enum sh_css_pipe_config_override pipe_conf_override,
+ unsigned int if_config_index)
+{
+ enum ia_css_pipe_id pipe_id;
+ unsigned int thread_id;
+ u8 stage_num = 0;
+ struct sh_css_sp_pipeline *pipe;
+
+ assert(out_frame);
+
+ {
+ /*
+ * Clear sh_css_sp_stage for easy debugging.
+ * program_input_circuit must be saved as it is set outside
+ * this function.
+ */
+ u8 program_input_circuit;
+
+ program_input_circuit = sh_css_sp_stage.program_input_circuit;
+ memset(&sh_css_sp_stage, 0, sizeof(sh_css_sp_stage));
+ sh_css_sp_stage.program_input_circuit = program_input_circuit;
+ }
+
+ pipe_id = IA_CSS_PIPE_ID_COPY;
+ ia_css_pipeline_get_sp_thread_id(pipe_num, &thread_id);
+ pipe = &sh_css_sp_group.pipe[thread_id];
+
+ pipe->copy.raw.height = out_frame->frame_info.res.height;
+ pipe->copy.raw.width = out_frame->frame_info.res.width;
+ pipe->copy.raw.padded_width = out_frame->frame_info.padded_width;
+ pipe->copy.raw.raw_bit_depth = out_frame->frame_info.raw_bit_depth;
+ pipe->copy.raw.max_input_width = max_input_width;
+ pipe->num_stages = 1;
+ pipe->pipe_id = pipe_id;
+ /* TODO: next indicates from which queues parameters need to be
+ sampled, needs checking/improvement */
+ if (pipe_conf_override == SH_CSS_PIPE_CONFIG_OVRD_NO_OVRD)
+ pipe->pipe_config =
+ (SH_CSS_PIPE_CONFIG_SAMPLE_PARAMS << thread_id);
+ else
+ pipe->pipe_config = pipe_conf_override;
+
+ pipe->pipe_qos_config = QOS_INVALID;
+
+ if (pipe->inout_port_config == 0) {
+ SH_CSS_PIPE_PORT_CONFIG_SET(pipe->inout_port_config,
+ (uint8_t)SH_CSS_PORT_INPUT,
+ (uint8_t)SH_CSS_HOST_TYPE, 1);
+ SH_CSS_PIPE_PORT_CONFIG_SET(pipe->inout_port_config,
+ (uint8_t)SH_CSS_PORT_OUTPUT,
+ (uint8_t)SH_CSS_HOST_TYPE, 1);
+ }
+ IA_CSS_LOG("pipe_id %d port_config %08x",
+ pipe->pipe_id, pipe->inout_port_config);
+
+ if (!IS_ISP2401)
+ sh_css_sp_group.config.input_formatter.isp_2ppc = (uint8_t)two_ppc;
+
+ sh_css_sp_stage.num = stage_num;
+ sh_css_sp_stage.xmem_bin_addr = 0x0;
+ sh_css_sp_stage.stage_type = SH_CSS_SP_STAGE_TYPE;
+ sh_css_sp_stage.func = (unsigned int)IA_CSS_PIPELINE_RAW_COPY;
+ sh_css_sp_stage.if_config_index = (uint8_t)if_config_index;
+ set_output_frame_buffer(out_frame, 0);
+
+ ia_css_debug_pipe_graph_dump_sp_raw_copy(out_frame);
+}
+
+static void
+sh_css_sp_start_isys_copy(struct ia_css_frame *out_frame,
+ unsigned int pipe_num, unsigned int max_input_width,
+ unsigned int if_config_index)
+{
+ enum ia_css_pipe_id pipe_id;
+ unsigned int thread_id;
+ u8 stage_num = 0;
+ struct sh_css_sp_pipeline *pipe;
+ enum sh_css_queue_id queue_id;
+
+ assert(out_frame);
+
+ {
+ /*
+ * Clear sh_css_sp_stage for easy debugging.
+ * program_input_circuit must be saved as it is set outside
+ * this function.
+ */
+ u8 program_input_circuit;
+
+ program_input_circuit = sh_css_sp_stage.program_input_circuit;
+ memset(&sh_css_sp_stage, 0, sizeof(sh_css_sp_stage));
+ sh_css_sp_stage.program_input_circuit = program_input_circuit;
+ }
+
+ pipe_id = IA_CSS_PIPE_ID_COPY;
+ ia_css_pipeline_get_sp_thread_id(pipe_num, &thread_id);
+ pipe = &sh_css_sp_group.pipe[thread_id];
+
+ pipe->copy.raw.height = out_frame->frame_info.res.height;
+ pipe->copy.raw.width = out_frame->frame_info.res.width;
+ pipe->copy.raw.padded_width = out_frame->frame_info.padded_width;
+ pipe->copy.raw.raw_bit_depth = out_frame->frame_info.raw_bit_depth;
+ pipe->copy.raw.max_input_width = max_input_width;
+ pipe->num_stages = 1;
+ pipe->pipe_id = pipe_id;
+ pipe->pipe_config = 0x0; /* No parameters */
+ pipe->pipe_qos_config = QOS_INVALID;
+
+ initialize_stage_frames(&sh_css_sp_stage.frames);
+ sh_css_sp_stage.num = stage_num;
+ sh_css_sp_stage.xmem_bin_addr = 0x0;
+ sh_css_sp_stage.stage_type = SH_CSS_SP_STAGE_TYPE;
+ sh_css_sp_stage.func = (unsigned int)IA_CSS_PIPELINE_ISYS_COPY;
+ sh_css_sp_stage.if_config_index = (uint8_t)if_config_index;
+
+ set_output_frame_buffer(out_frame, 0);
+
+ if (pipe->metadata.height > 0) {
+ ia_css_query_internal_queue_id(IA_CSS_BUFFER_TYPE_METADATA, thread_id,
+ &queue_id);
+ sh_css_copy_buffer_attr_to_spbuffer(&sh_css_sp_stage.frames.metadata_buf,
+ queue_id, mmgr_EXCEPTION,
+ IA_CSS_BUFFER_TYPE_METADATA);
+ }
+
+ ia_css_debug_pipe_graph_dump_sp_raw_copy(out_frame);
+}
+
+unsigned int
+sh_css_sp_get_binary_copy_size(void)
+{
+ const struct ia_css_fw_info *fw = &sh_css_sp_fw;
+ unsigned int HIVE_ADDR_sp_output = fw->info.sp.output;
+ unsigned int offset = (unsigned int)offsetof(struct sh_css_sp_output,
+ bin_copy_bytes_copied) / sizeof(int);
+ (void)HIVE_ADDR_sp_output; /* To get rid of warning in CRUN */
+ return load_sp_array_uint(sp_output, offset);
+}
+
+unsigned int
+sh_css_sp_get_sw_interrupt_value(unsigned int irq)
+{
+ const struct ia_css_fw_info *fw = &sh_css_sp_fw;
+ unsigned int HIVE_ADDR_sp_output = fw->info.sp.output;
+ unsigned int offset = (unsigned int)offsetof(struct sh_css_sp_output,
+ sw_interrupt_value)
+ / sizeof(int);
+ (void)HIVE_ADDR_sp_output; /* To get rid of warning in CRUN */
+ return load_sp_array_uint(sp_output, offset + irq);
+}
+
+static void
+sh_css_copy_buffer_attr_to_spbuffer(struct ia_css_buffer_sp *dest_buf,
+ const enum sh_css_queue_id queue_id,
+ const ia_css_ptr xmem_addr,
+ const enum ia_css_buffer_type buf_type)
+{
+ assert(buf_type < IA_CSS_NUM_BUFFER_TYPE);
+ if (queue_id > SH_CSS_INVALID_QUEUE_ID) {
+ /*
+ * value >=0 indicates that function init_frame_pointers()
+ * should use the dynamic data address
+ */
+ assert(queue_id < SH_CSS_MAX_NUM_QUEUES);
+
+ /*
+ * Klocwork assumes assert can be disabled;
+ * Since we can get there with any type, and it does not
+ * know that frame_in->dynamic_data_index can only be set
+ * for one of the types in the assert) it has to assume we
+ * can get here for any type. however this could lead to an
+ * out of bounds reference when indexing buf_type about 10
+ * lines below. In order to satisfy KW an additional if
+ * has been added. This one will always yield true.
+ */
+ if (queue_id < SH_CSS_MAX_NUM_QUEUES)
+ dest_buf->buf_src.queue_id = queue_id;
+ } else {
+ assert(xmem_addr != mmgr_EXCEPTION);
+ dest_buf->buf_src.xmem_addr = xmem_addr;
+ }
+ dest_buf->buf_type = buf_type;
+}
+
+static void
+sh_css_copy_frame_to_spframe(struct ia_css_frame_sp *sp_frame_out,
+ const struct ia_css_frame *frame_in)
+{
+ assert(frame_in);
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "sh_css_copy_frame_to_spframe():\n");
+
+ sh_css_copy_buffer_attr_to_spbuffer(&sp_frame_out->buf_attr,
+ frame_in->dynamic_queue_id,
+ frame_in->data,
+ frame_in->buf_type);
+
+ ia_css_frame_info_to_frame_sp_info(&sp_frame_out->info, &frame_in->frame_info);
+
+ switch (frame_in->frame_info.format) {
+ case IA_CSS_FRAME_FORMAT_RAW_PACKED:
+ case IA_CSS_FRAME_FORMAT_RAW:
+ sp_frame_out->planes.raw.offset = frame_in->planes.raw.offset;
+ break;
+ case IA_CSS_FRAME_FORMAT_RGB565:
+ case IA_CSS_FRAME_FORMAT_RGBA888:
+ sp_frame_out->planes.rgb.offset = frame_in->planes.rgb.offset;
+ break;
+ case IA_CSS_FRAME_FORMAT_PLANAR_RGB888:
+ sp_frame_out->planes.planar_rgb.r.offset =
+ frame_in->planes.planar_rgb.r.offset;
+ sp_frame_out->planes.planar_rgb.g.offset =
+ frame_in->planes.planar_rgb.g.offset;
+ sp_frame_out->planes.planar_rgb.b.offset =
+ frame_in->planes.planar_rgb.b.offset;
+ break;
+ case IA_CSS_FRAME_FORMAT_YUYV:
+ case IA_CSS_FRAME_FORMAT_UYVY:
+ case IA_CSS_FRAME_FORMAT_CSI_MIPI_YUV420_8:
+ case IA_CSS_FRAME_FORMAT_CSI_MIPI_LEGACY_YUV420_8:
+ case IA_CSS_FRAME_FORMAT_YUV_LINE:
+ sp_frame_out->planes.yuyv.offset = frame_in->planes.yuyv.offset;
+ break;
+ case IA_CSS_FRAME_FORMAT_NV11:
+ case IA_CSS_FRAME_FORMAT_NV12:
+ case IA_CSS_FRAME_FORMAT_NV12_16:
+ case IA_CSS_FRAME_FORMAT_NV12_TILEY:
+ case IA_CSS_FRAME_FORMAT_NV21:
+ case IA_CSS_FRAME_FORMAT_NV16:
+ case IA_CSS_FRAME_FORMAT_NV61:
+ sp_frame_out->planes.nv.y.offset =
+ frame_in->planes.nv.y.offset;
+ sp_frame_out->planes.nv.uv.offset =
+ frame_in->planes.nv.uv.offset;
+ break;
+ case IA_CSS_FRAME_FORMAT_YUV420:
+ case IA_CSS_FRAME_FORMAT_YUV422:
+ case IA_CSS_FRAME_FORMAT_YUV444:
+ case IA_CSS_FRAME_FORMAT_YUV420_16:
+ case IA_CSS_FRAME_FORMAT_YUV422_16:
+ case IA_CSS_FRAME_FORMAT_YV12:
+ case IA_CSS_FRAME_FORMAT_YV16:
+ sp_frame_out->planes.yuv.y.offset =
+ frame_in->planes.yuv.y.offset;
+ sp_frame_out->planes.yuv.u.offset =
+ frame_in->planes.yuv.u.offset;
+ sp_frame_out->planes.yuv.v.offset =
+ frame_in->planes.yuv.v.offset;
+ break;
+ case IA_CSS_FRAME_FORMAT_QPLANE6:
+ sp_frame_out->planes.plane6.r.offset =
+ frame_in->planes.plane6.r.offset;
+ sp_frame_out->planes.plane6.r_at_b.offset =
+ frame_in->planes.plane6.r_at_b.offset;
+ sp_frame_out->planes.plane6.gr.offset =
+ frame_in->planes.plane6.gr.offset;
+ sp_frame_out->planes.plane6.gb.offset =
+ frame_in->planes.plane6.gb.offset;
+ sp_frame_out->planes.plane6.b.offset =
+ frame_in->planes.plane6.b.offset;
+ sp_frame_out->planes.plane6.b_at_r.offset =
+ frame_in->planes.plane6.b_at_r.offset;
+ break;
+ case IA_CSS_FRAME_FORMAT_BINARY_8:
+ sp_frame_out->planes.binary.data.offset =
+ frame_in->planes.binary.data.offset;
+ break;
+ default:
+ /*
+ * This should not happen, but in case it does,
+ * nullify the planes
+ */
+ memset(&sp_frame_out->planes, 0, sizeof(sp_frame_out->planes));
+ break;
+ }
+}
+
+static int
+set_input_frame_buffer(const struct ia_css_frame *frame)
+{
+ if (!frame)
+ return -EINVAL;
+
+ switch (frame->frame_info.format) {
+ case IA_CSS_FRAME_FORMAT_QPLANE6:
+ case IA_CSS_FRAME_FORMAT_YUV420_16:
+ case IA_CSS_FRAME_FORMAT_RAW_PACKED:
+ case IA_CSS_FRAME_FORMAT_RAW:
+ case IA_CSS_FRAME_FORMAT_YUV420:
+ case IA_CSS_FRAME_FORMAT_YUYV:
+ case IA_CSS_FRAME_FORMAT_YUV_LINE:
+ case IA_CSS_FRAME_FORMAT_NV12:
+ case IA_CSS_FRAME_FORMAT_NV12_16:
+ case IA_CSS_FRAME_FORMAT_NV12_TILEY:
+ case IA_CSS_FRAME_FORMAT_NV21:
+ case IA_CSS_FRAME_FORMAT_CSI_MIPI_YUV420_8:
+ case IA_CSS_FRAME_FORMAT_CSI_MIPI_LEGACY_YUV420_8:
+ case IA_CSS_FRAME_FORMAT_CSI_MIPI_YUV420_10:
+ break;
+ default:
+ return -EINVAL;
+ }
+ sh_css_copy_frame_to_spframe(&sh_css_sp_stage.frames.in, frame);
+
+ return 0;
+}
+
+static int
+set_output_frame_buffer(const struct ia_css_frame *frame,
+ unsigned int idx)
+{
+ if (!frame)
+ return -EINVAL;
+
+ switch (frame->frame_info.format) {
+ case IA_CSS_FRAME_FORMAT_YUV420:
+ case IA_CSS_FRAME_FORMAT_YUV422:
+ case IA_CSS_FRAME_FORMAT_YUV444:
+ case IA_CSS_FRAME_FORMAT_YV12:
+ case IA_CSS_FRAME_FORMAT_YV16:
+ case IA_CSS_FRAME_FORMAT_YUV420_16:
+ case IA_CSS_FRAME_FORMAT_YUV422_16:
+ case IA_CSS_FRAME_FORMAT_NV11:
+ case IA_CSS_FRAME_FORMAT_NV12:
+ case IA_CSS_FRAME_FORMAT_NV12_16:
+ case IA_CSS_FRAME_FORMAT_NV12_TILEY:
+ case IA_CSS_FRAME_FORMAT_NV16:
+ case IA_CSS_FRAME_FORMAT_NV21:
+ case IA_CSS_FRAME_FORMAT_NV61:
+ case IA_CSS_FRAME_FORMAT_YUYV:
+ case IA_CSS_FRAME_FORMAT_UYVY:
+ case IA_CSS_FRAME_FORMAT_CSI_MIPI_YUV420_8:
+ case IA_CSS_FRAME_FORMAT_CSI_MIPI_LEGACY_YUV420_8:
+ case IA_CSS_FRAME_FORMAT_YUV_LINE:
+ case IA_CSS_FRAME_FORMAT_RGB565:
+ case IA_CSS_FRAME_FORMAT_RGBA888:
+ case IA_CSS_FRAME_FORMAT_PLANAR_RGB888:
+ case IA_CSS_FRAME_FORMAT_RAW:
+ case IA_CSS_FRAME_FORMAT_RAW_PACKED:
+ case IA_CSS_FRAME_FORMAT_QPLANE6:
+ case IA_CSS_FRAME_FORMAT_BINARY_8:
+ break;
+ default:
+ return -EINVAL;
+ }
+ sh_css_copy_frame_to_spframe(&sh_css_sp_stage.frames.out[idx], frame);
+ return 0;
+}
+
+static int
+set_view_finder_buffer(const struct ia_css_frame *frame)
+{
+ if (!frame)
+ return -EINVAL;
+
+ switch (frame->frame_info.format) {
+ /* the dual output pin */
+ case IA_CSS_FRAME_FORMAT_NV12:
+ case IA_CSS_FRAME_FORMAT_NV12_16:
+ case IA_CSS_FRAME_FORMAT_NV21:
+ case IA_CSS_FRAME_FORMAT_YUYV:
+ case IA_CSS_FRAME_FORMAT_UYVY:
+ case IA_CSS_FRAME_FORMAT_CSI_MIPI_YUV420_8:
+ case IA_CSS_FRAME_FORMAT_CSI_MIPI_LEGACY_YUV420_8:
+ case IA_CSS_FRAME_FORMAT_YUV420:
+ case IA_CSS_FRAME_FORMAT_YV12:
+ case IA_CSS_FRAME_FORMAT_NV12_TILEY:
+
+ /* for vf_veceven */
+ case IA_CSS_FRAME_FORMAT_YUV_LINE:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ sh_css_copy_frame_to_spframe(&sh_css_sp_stage.frames.out_vf, frame);
+ return 0;
+}
+
+void sh_css_sp_set_if_configs(
+ const input_formatter_cfg_t *config_a,
+ const input_formatter_cfg_t *config_b,
+ const uint8_t if_config_index
+)
+{
+ assert(if_config_index < SH_CSS_MAX_IF_CONFIGS);
+ assert(config_a);
+
+ sh_css_sp_group.config.input_formatter.set[if_config_index].config_a =
+ *config_a;
+ sh_css_sp_group.config.input_formatter.a_changed = true;
+
+ if (config_b) {
+ sh_css_sp_group.config.input_formatter.set[if_config_index].config_b =
+ *config_b;
+ sh_css_sp_group.config.input_formatter.b_changed = true;
+ }
+}
+
+void
+sh_css_sp_program_input_circuit(int fmt_type,
+ int ch_id,
+ enum ia_css_input_mode input_mode)
+{
+ sh_css_sp_group.config.input_circuit.no_side_band = false;
+ sh_css_sp_group.config.input_circuit.fmt_type = fmt_type;
+ sh_css_sp_group.config.input_circuit.ch_id = ch_id;
+ sh_css_sp_group.config.input_circuit.input_mode = input_mode;
+ /*
+ * The SP group is only loaded at SP boot time and is read once
+ * change flags as "input_circuit_cfg_changed" must be reset on the SP
+ */
+ sh_css_sp_group.config.input_circuit_cfg_changed = true;
+ sh_css_sp_stage.program_input_circuit = true;
+}
+
+void
+sh_css_sp_configure_sync_gen(int width, int height,
+ int hblank_cycles,
+ int vblank_cycles)
+{
+ sh_css_sp_group.config.sync_gen.width = width;
+ sh_css_sp_group.config.sync_gen.height = height;
+ sh_css_sp_group.config.sync_gen.hblank_cycles = hblank_cycles;
+ sh_css_sp_group.config.sync_gen.vblank_cycles = vblank_cycles;
+}
+
+void
+sh_css_sp_configure_prbs(int seed)
+{
+ sh_css_sp_group.config.prbs.seed = seed;
+}
+
+void
+sh_css_sp_configure_enable_raw_pool_locking(bool lock_all)
+{
+ sh_css_sp_group.config.enable_raw_pool_locking = true;
+ sh_css_sp_group.config.lock_all = lock_all;
+}
+
+void
+sh_css_sp_enable_isys_event_queue(bool enable)
+{
+ sh_css_sp_group.config.enable_isys_event_queue = enable;
+}
+
+void
+sh_css_sp_set_disable_continuous_viewfinder(bool flag)
+{
+ sh_css_sp_group.config.disable_cont_vf = flag;
+}
+
+static int
+sh_css_sp_write_frame_pointers(const struct sh_css_binary_args *args)
+{
+ int err = 0;
+ int i;
+
+ assert(args);
+
+ if (args->in_frame)
+ err = set_input_frame_buffer(args->in_frame);
+ if (!err && args->out_vf_frame)
+ err = set_view_finder_buffer(args->out_vf_frame);
+ for (i = 0; i < IA_CSS_BINARY_MAX_OUTPUT_PORTS; i++) {
+ if (!err && args->out_frame[i])
+ err = set_output_frame_buffer(args->out_frame[i], i);
+ }
+
+ /* we don't pass this error back to the upper layer, so we add a assert here
+ because we actually hit the error here but it still works by accident... */
+ if (err)
+ assert(false);
+ return err;
+}
+
+static void
+sh_css_sp_init_group(bool two_ppc,
+ enum atomisp_input_format input_format,
+ bool no_isp_sync,
+ uint8_t if_config_index)
+{
+ if (!IS_ISP2401)
+ sh_css_sp_group.config.input_formatter.isp_2ppc = two_ppc;
+
+ sh_css_sp_group.config.no_isp_sync = (uint8_t)no_isp_sync;
+ /* decide whether the frame is processed online or offline */
+ if (if_config_index == SH_CSS_IF_CONFIG_NOT_NEEDED)
+ return;
+
+ if (!IS_ISP2401) {
+ assert(if_config_index < SH_CSS_MAX_IF_CONFIGS);
+ sh_css_sp_group.config.input_formatter.set[if_config_index].stream_format =
+ input_format;
+ }
+}
+
+void
+sh_css_stage_write_binary_info(struct ia_css_binary_info *info)
+{
+ assert(info);
+ sh_css_isp_stage.binary_info = *info;
+}
+
+static int
+copy_isp_mem_if_to_ddr(struct ia_css_binary *binary)
+{
+ int err;
+
+ err = ia_css_isp_param_copy_isp_mem_if_to_ddr(
+ &binary->css_params,
+ &binary->mem_params,
+ IA_CSS_PARAM_CLASS_CONFIG);
+ if (err)
+ return err;
+ err = ia_css_isp_param_copy_isp_mem_if_to_ddr(
+ &binary->css_params,
+ &binary->mem_params,
+ IA_CSS_PARAM_CLASS_STATE);
+ if (err)
+ return err;
+ return 0;
+}
+
+static bool
+is_sp_stage(struct ia_css_pipeline_stage *stage)
+{
+ assert(stage);
+ return stage->sp_func != IA_CSS_PIPELINE_NO_FUNC;
+}
+
+static int configure_isp_from_args(const struct sh_css_sp_pipeline *pipeline,
+ const struct ia_css_binary *binary,
+ const struct sh_css_binary_args *args,
+ bool two_ppc,
+ bool deinterleaved)
+{
+ int ret;
+
+ ret = ia_css_fpn_configure(binary, &binary->in_frame_info);
+ if (ret)
+ return ret;
+ ret = ia_css_crop_configure(binary, ia_css_frame_get_info(args->delay_frames[0]));
+ if (ret)
+ return ret;
+ ret = ia_css_qplane_configure(pipeline, binary, &binary->in_frame_info);
+ if (ret)
+ return ret;
+ ret = ia_css_output0_configure(binary, ia_css_frame_get_info(args->out_frame[0]));
+ if (ret)
+ return ret;
+ ret = ia_css_output1_configure(binary, ia_css_frame_get_info(args->out_vf_frame));
+ if (ret)
+ return ret;
+ ret = ia_css_copy_output_configure(binary, args->copy_output);
+ if (ret)
+ return ret;
+ ret = ia_css_output0_configure(binary, ia_css_frame_get_info(args->out_frame[0]));
+ if (ret)
+ return ret;
+ ret = ia_css_iterator_configure(binary, ia_css_frame_get_info(args->in_frame));
+ if (ret)
+ return ret;
+ ret = ia_css_dvs_configure(binary, ia_css_frame_get_info(args->out_frame[0]));
+ if (ret)
+ return ret;
+ ret = ia_css_output_configure(binary, ia_css_frame_get_info(args->out_frame[0]));
+ if (ret)
+ return ret;
+ ret = ia_css_raw_configure(pipeline, binary, ia_css_frame_get_info(args->in_frame),
+ &binary->in_frame_info, two_ppc, deinterleaved);
+ if (ret)
+ return ret;
+
+ /*
+ * FIXME: args->delay_frames can be NULL here
+ *
+ * Somehow, the driver at the Intel Atom Yocto tree doesn't seem to
+ * suffer from the same issue.
+ *
+ * Anyway, the function below should now handle a NULL delay_frames
+ * without crashing, but the pipeline should likely be built without
+ * adding it at the first place (or there are a hidden bug somewhere)
+ */
+ ret = ia_css_ref_configure(binary, args->delay_frames, pipeline->dvs_frame_delay);
+ if (ret)
+ return ret;
+ ret = ia_css_tnr_configure(binary, args->tnr_frames);
+ if (ret)
+ return ret;
+ return ia_css_bayer_io_config(binary, args);
+}
+
+static void
+initialize_isp_states(const struct ia_css_binary *binary)
+{
+ unsigned int i;
+
+ if (!binary->info->mem_offsets.offsets.state)
+ return;
+
+ for (i = 0; i < IA_CSS_NUM_STATE_IDS; i++)
+ ia_css_kernel_init_state[i](binary);
+}
+
+static void
+initialize_frame_buffer_attribute(struct ia_css_buffer_sp *buf_attr)
+{
+ buf_attr->buf_src.queue_id = SH_CSS_INVALID_QUEUE_ID;
+ buf_attr->buf_type = IA_CSS_BUFFER_TYPE_INVALID;
+}
+
+static void
+initialize_stage_frames(struct ia_css_frames_sp *frames)
+{
+ unsigned int i;
+
+ initialize_frame_buffer_attribute(&frames->in.buf_attr);
+ for (i = 0; i < IA_CSS_BINARY_MAX_OUTPUT_PORTS; i++)
+ initialize_frame_buffer_attribute(&frames->out[i].buf_attr);
+
+ initialize_frame_buffer_attribute(&frames->out_vf.buf_attr);
+ initialize_frame_buffer_attribute(&frames->s3a_buf);
+ initialize_frame_buffer_attribute(&frames->dvs_buf);
+ initialize_frame_buffer_attribute(&frames->metadata_buf);
+}
+
+static int
+sh_css_sp_init_stage(struct ia_css_binary *binary,
+ const char *binary_name,
+ const struct ia_css_blob_info *blob_info,
+ const struct sh_css_binary_args *args,
+ unsigned int pipe_num,
+ unsigned int stage,
+ bool xnr,
+ const struct ia_css_isp_param_css_segments *isp_mem_if,
+ unsigned int if_config_index,
+ bool two_ppc)
+{
+ const struct ia_css_binary_xinfo *xinfo;
+ const struct ia_css_binary_info *info;
+ int err = 0;
+ int i;
+ struct ia_css_pipe *pipe = NULL;
+ unsigned int thread_id;
+ enum sh_css_queue_id queue_id;
+ bool continuous = sh_css_continuous_is_enabled((uint8_t)pipe_num);
+
+ assert(binary);
+ assert(blob_info);
+ assert(args);
+ assert(isp_mem_if);
+
+ xinfo = binary->info;
+ info = &xinfo->sp;
+ {
+ /*
+ * Clear sh_css_sp_stage for easy debugging.
+ * program_input_circuit must be saved as it is set outside
+ * this function.
+ */
+ u8 program_input_circuit;
+
+ program_input_circuit = sh_css_sp_stage.program_input_circuit;
+ memset(&sh_css_sp_stage, 0, sizeof(sh_css_sp_stage));
+ sh_css_sp_stage.program_input_circuit = (uint8_t)program_input_circuit;
+ }
+
+ ia_css_pipeline_get_sp_thread_id(pipe_num, &thread_id);
+
+ if (!info) {
+ sh_css_sp_group.pipe[thread_id].sp_stage_addr[stage] = mmgr_NULL;
+ return 0;
+ }
+
+ if (IS_ISP2401)
+ sh_css_sp_stage.deinterleaved = 0;
+ else
+ sh_css_sp_stage.deinterleaved = ((stage == 0) && continuous);
+
+ initialize_stage_frames(&sh_css_sp_stage.frames);
+ /*
+ * TODO: Make the Host dynamically determine
+ * the stage type.
+ */
+ sh_css_sp_stage.stage_type = SH_CSS_ISP_STAGE_TYPE;
+ sh_css_sp_stage.num = (uint8_t)stage;
+ sh_css_sp_stage.isp_online = (uint8_t)binary->online;
+ sh_css_sp_stage.isp_copy_vf = (uint8_t)args->copy_vf;
+ sh_css_sp_stage.isp_copy_output = (uint8_t)args->copy_output;
+ sh_css_sp_stage.enable.vf_output = (args->out_vf_frame != NULL);
+
+ /*
+ * Copy the frame infos first, to be overwritten by the frames,
+ * if these are present.
+ */
+ sh_css_sp_stage.frames.effective_in_res.width = binary->effective_in_frame_res.width;
+ sh_css_sp_stage.frames.effective_in_res.height = binary->effective_in_frame_res.height;
+
+ ia_css_frame_info_to_frame_sp_info(&sh_css_sp_stage.frames.in.info,
+ &binary->in_frame_info);
+ for (i = 0; i < IA_CSS_BINARY_MAX_OUTPUT_PORTS; i++) {
+ ia_css_frame_info_to_frame_sp_info(&sh_css_sp_stage.frames.out[i].info,
+ &binary->out_frame_info[i]);
+ }
+ ia_css_frame_info_to_frame_sp_info(&sh_css_sp_stage.frames.internal_frame_info,
+ &binary->internal_frame_info);
+ sh_css_sp_stage.dvs_envelope.width = binary->dvs_envelope.width;
+ sh_css_sp_stage.dvs_envelope.height = binary->dvs_envelope.height;
+ sh_css_sp_stage.isp_pipe_version = (uint8_t)info->pipeline.isp_pipe_version;
+ sh_css_sp_stage.isp_deci_log_factor = (uint8_t)binary->deci_factor_log2;
+ sh_css_sp_stage.isp_vf_downscale_bits = (uint8_t)binary->vf_downscale_log2;
+
+ sh_css_sp_stage.if_config_index = (uint8_t)if_config_index;
+
+ sh_css_sp_stage.sp_enable_xnr = (uint8_t)xnr;
+ sh_css_sp_stage.xmem_bin_addr = xinfo->xmem_addr;
+ sh_css_sp_stage.xmem_map_addr = sh_css_params_ddr_address_map();
+ sh_css_isp_stage.blob_info = *blob_info;
+ sh_css_stage_write_binary_info((struct ia_css_binary_info *)info);
+
+ /* Make sure binary name is smaller than allowed string size */
+ assert(strlen(binary_name) < SH_CSS_MAX_BINARY_NAME - 1);
+ strscpy(sh_css_isp_stage.binary_name, binary_name, SH_CSS_MAX_BINARY_NAME);
+ sh_css_isp_stage.mem_initializers = *isp_mem_if;
+
+ /*
+ * Even when a stage does not need uds and does not params,
+ * ia_css_uds_sp_scale_params() seems to be called (needs
+ * further investigation). This function can not deal with
+ * dx, dy = {0, 0}
+ */
+
+ err = sh_css_sp_write_frame_pointers(args);
+ /* TODO: move it to a better place */
+ if (binary->info->sp.enable.s3a) {
+ ia_css_query_internal_queue_id(IA_CSS_BUFFER_TYPE_3A_STATISTICS, thread_id,
+ &queue_id);
+ sh_css_copy_buffer_attr_to_spbuffer(&sh_css_sp_stage.frames.s3a_buf, queue_id,
+ mmgr_EXCEPTION,
+ IA_CSS_BUFFER_TYPE_3A_STATISTICS);
+ }
+ if (binary->info->sp.enable.dis) {
+ ia_css_query_internal_queue_id(IA_CSS_BUFFER_TYPE_DIS_STATISTICS, thread_id,
+ &queue_id);
+ sh_css_copy_buffer_attr_to_spbuffer(&sh_css_sp_stage.frames.dvs_buf, queue_id,
+ mmgr_EXCEPTION,
+ IA_CSS_BUFFER_TYPE_DIS_STATISTICS);
+ }
+ ia_css_query_internal_queue_id(IA_CSS_BUFFER_TYPE_METADATA, thread_id, &queue_id);
+ sh_css_copy_buffer_attr_to_spbuffer(&sh_css_sp_stage.frames.metadata_buf, queue_id, mmgr_EXCEPTION, IA_CSS_BUFFER_TYPE_METADATA);
+ if (err)
+ return err;
+
+ if (IS_ISP2401) {
+ pipe = find_pipe_by_num(sh_css_sp_group.pipe[thread_id].pipe_num);
+ if (!pipe)
+ return -EINVAL;
+
+ if (args->in_frame)
+ ia_css_get_crop_offsets(pipe, &args->in_frame->frame_info);
+ else
+ ia_css_get_crop_offsets(pipe, &binary->in_frame_info);
+ }
+
+ err = configure_isp_from_args(&sh_css_sp_group.pipe[thread_id],
+ binary, args, two_ppc, sh_css_sp_stage.deinterleaved);
+ if (err)
+ return err;
+
+ initialize_isp_states(binary);
+
+ /*
+ * We do this only for preview pipe because in fill_binary_info function
+ * we assign vf_out res to out res, but for ISP internal processing, we need
+ * the original out res. for video pipe, it has two output pins --- out and
+ * vf_out, so it can keep these two resolutions already.
+ */
+ if (binary->info->sp.pipeline.mode == IA_CSS_BINARY_MODE_PREVIEW &&
+ (binary->vf_downscale_log2 > 0)) {
+ /*
+ * TODO: Remove this after preview output decimation is fixed
+ * by configuring out&vf info fields properly.
+ */
+ sh_css_sp_stage.frames.out[0].info.padded_width
+ <<= binary->vf_downscale_log2;
+ sh_css_sp_stage.frames.out[0].info.res.width
+ <<= binary->vf_downscale_log2;
+ sh_css_sp_stage.frames.out[0].info.res.height
+ <<= binary->vf_downscale_log2;
+ }
+ err = copy_isp_mem_if_to_ddr(binary);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int
+sp_init_stage(struct ia_css_pipeline_stage *stage,
+ unsigned int pipe_num,
+ bool xnr,
+ unsigned int if_config_index,
+ bool two_ppc)
+{
+ struct ia_css_binary *binary;
+ const struct ia_css_fw_info *firmware;
+ const struct sh_css_binary_args *args;
+ unsigned int stage_num;
+ /*
+ * Initialiser required because of the "else" path below.
+ * Is this a valid path ?
+ */
+ const char *binary_name = "";
+ const struct ia_css_binary_xinfo *info = NULL;
+ /*
+ * Note: the var below is made static as it is quite large;
+ * if it is not static it ends up on the stack which could
+ * cause issues for drivers
+ */
+ static struct ia_css_binary tmp_binary;
+ const struct ia_css_blob_info *blob_info = NULL;
+ struct ia_css_isp_param_css_segments isp_mem_if;
+ /*
+ * LA: should be ia_css_data, should not contain host pointer.
+ * However, CSS/DDR pointer is not available yet.
+ * Hack is to store it in params->ddr_ptrs and then copy it late in
+ * the SP just before vmem init.
+ * TODO: Call this after CSS/DDR allocation and store that pointer.
+ * Best is to allocate it at stage creation time together with host
+ * pointer. Remove vmem from params.
+ */
+ struct ia_css_isp_param_css_segments *mem_if = &isp_mem_if;
+
+ int err = 0;
+
+ assert(stage);
+
+ binary = stage->binary;
+ firmware = stage->firmware;
+ args = &stage->args;
+ stage_num = stage->stage_num;
+
+ if (binary) {
+ info = binary->info;
+ binary_name = (const char *)(info->blob->name);
+ blob_info = &info->blob->header.blob;
+ ia_css_init_memory_interface(mem_if, &binary->mem_params, &binary->css_params);
+ } else if (firmware) {
+ const struct ia_css_frame_info *out_infos[IA_CSS_BINARY_MAX_OUTPUT_PORTS] = {NULL};
+
+ if (args->out_frame[0])
+ out_infos[0] = &args->out_frame[0]->frame_info;
+ info = &firmware->info.isp;
+ ia_css_binary_fill_info(info, false, false,
+ ATOMISP_INPUT_FORMAT_RAW_10,
+ ia_css_frame_get_info(args->in_frame),
+ NULL,
+ out_infos,
+ ia_css_frame_get_info(args->out_vf_frame),
+ &tmp_binary,
+ NULL,
+ -1, true);
+ binary = &tmp_binary;
+ binary->info = info;
+ binary_name = IA_CSS_EXT_ISP_PROG_NAME(firmware);
+ blob_info = &firmware->blob;
+ mem_if = (struct ia_css_isp_param_css_segments *)&firmware->mem_initializers;
+ } else {
+ /* SP stage */
+ assert(stage->sp_func != IA_CSS_PIPELINE_NO_FUNC);
+ /*
+ * binary and blob_info are now NULL.
+ * These will be passed to sh_css_sp_init_stage
+ * and dereferenced there, so passing a NULL
+ * pointer is no good. return an error
+ */
+ return -EINVAL;
+ }
+
+ err = sh_css_sp_init_stage(binary,
+ (const char *)binary_name,
+ blob_info,
+ args,
+ pipe_num,
+ stage_num,
+ xnr,
+ mem_if,
+ if_config_index,
+ two_ppc);
+ return err;
+}
+
+static void
+sp_init_sp_stage(struct ia_css_pipeline_stage *stage,
+ unsigned int pipe_num,
+ bool two_ppc,
+ enum sh_css_pipe_config_override copy_ovrd,
+ unsigned int if_config_index)
+{
+ const struct sh_css_binary_args *args = &stage->args;
+
+ assert(stage);
+ switch (stage->sp_func) {
+ case IA_CSS_PIPELINE_RAW_COPY:
+ sh_css_sp_start_raw_copy(args->out_frame[0],
+ pipe_num, two_ppc,
+ stage->max_input_width,
+ copy_ovrd, if_config_index);
+ break;
+ case IA_CSS_PIPELINE_BIN_COPY:
+ assert(false); /* TBI */
+ break;
+ case IA_CSS_PIPELINE_ISYS_COPY:
+ sh_css_sp_start_isys_copy(args->out_frame[0],
+ pipe_num, stage->max_input_width, if_config_index);
+ break;
+ case IA_CSS_PIPELINE_NO_FUNC:
+ assert(false);
+ break;
+ }
+}
+
+void
+sh_css_sp_init_pipeline(struct ia_css_pipeline *me,
+ enum ia_css_pipe_id id,
+ u8 pipe_num,
+ bool xnr,
+ bool two_ppc,
+ bool continuous,
+ bool offline,
+ unsigned int required_bds_factor,
+ enum sh_css_pipe_config_override copy_ovrd,
+ enum ia_css_input_mode input_mode,
+ const struct ia_css_metadata_config *md_config,
+ const struct ia_css_metadata_info *md_info,
+ const enum mipi_port_id port_id)
+{
+ /* Get first stage */
+ struct ia_css_pipeline_stage *stage = NULL;
+ struct ia_css_binary *first_binary = NULL;
+ struct ia_css_pipe *pipe = NULL;
+ unsigned int num;
+ enum ia_css_pipe_id pipe_id = id;
+ unsigned int thread_id;
+ u8 if_config_index, tmp_if_config_index;
+
+ if (!me->stages) {
+ dev_err(atomisp_dev, "%s called on a pipeline without stages\n",
+ __func__);
+ return; /* FIXME should be able to return an error */
+ }
+
+ first_binary = me->stages->binary;
+
+ if (input_mode == IA_CSS_INPUT_MODE_SENSOR ||
+ input_mode == IA_CSS_INPUT_MODE_BUFFERED_SENSOR) {
+ assert(port_id < N_MIPI_PORT_ID);
+ if (port_id >= N_MIPI_PORT_ID) /* should not happen but KW does not know */
+ return; /* we should be able to return an error */
+ if_config_index = (uint8_t)(port_id - MIPI_PORT0_ID);
+ } else if (input_mode == IA_CSS_INPUT_MODE_MEMORY) {
+ if_config_index = SH_CSS_IF_CONFIG_NOT_NEEDED;
+ } else {
+ if_config_index = 0x0;
+ }
+
+ ia_css_pipeline_get_sp_thread_id(pipe_num, &thread_id);
+ memset(&sh_css_sp_group.pipe[thread_id], 0, sizeof(struct sh_css_sp_pipeline));
+
+ /* Count stages */
+ for (stage = me->stages, num = 0; stage; stage = stage->next, num++) {
+ stage->stage_num = num;
+ ia_css_debug_pipe_graph_dump_stage(stage, id);
+ }
+ me->num_stages = num;
+
+ if (first_binary) {
+ /* Init pipeline data */
+ sh_css_sp_init_group(two_ppc, first_binary->input_format,
+ offline, if_config_index);
+ } /* if (first_binary != NULL) */
+
+ /* Signal the host immediately after start for SP_ISYS_COPY only */
+ if (me->num_stages == 1 &&
+ me->stages->sp_func == IA_CSS_PIPELINE_ISYS_COPY)
+ sh_css_sp_group.config.no_isp_sync = true;
+
+ /* Init stage data */
+ sh_css_init_host2sp_frame_data();
+
+ sh_css_sp_group.pipe[thread_id].num_stages = 0;
+ sh_css_sp_group.pipe[thread_id].pipe_id = pipe_id;
+ sh_css_sp_group.pipe[thread_id].thread_id = thread_id;
+ sh_css_sp_group.pipe[thread_id].pipe_num = pipe_num;
+ sh_css_sp_group.pipe[thread_id].num_execs = me->num_execs;
+ sh_css_sp_group.pipe[thread_id].pipe_qos_config = QOS_INVALID;
+ sh_css_sp_group.pipe[thread_id].required_bds_factor = required_bds_factor;
+ sh_css_sp_group.pipe[thread_id].input_system_mode
+ = (uint32_t)input_mode;
+ sh_css_sp_group.pipe[thread_id].port_id = port_id;
+ sh_css_sp_group.pipe[thread_id].dvs_frame_delay = (uint32_t)me->dvs_frame_delay;
+
+ /* TODO: next indicates from which queues parameters need to be
+ sampled, needs checking/improvement */
+ if (ia_css_pipeline_uses_params(me)) {
+ sh_css_sp_group.pipe[thread_id].pipe_config =
+ SH_CSS_PIPE_CONFIG_SAMPLE_PARAMS << thread_id;
+ }
+
+ /*
+ * For continuous use-cases, SP copy is responsible for sampling the
+ * parameters
+ */
+ if (continuous)
+ sh_css_sp_group.pipe[thread_id].pipe_config = 0;
+
+ sh_css_sp_group.pipe[thread_id].inout_port_config = me->inout_port_config;
+
+ pipe = find_pipe_by_num(pipe_num);
+ assert(pipe);
+ if (!pipe)
+ return;
+
+ sh_css_sp_group.pipe[thread_id].scaler_pp_lut = sh_css_pipe_get_pp_gdc_lut(pipe);
+
+ if (md_info && md_info->size > 0) {
+ sh_css_sp_group.pipe[thread_id].metadata.width = md_info->resolution.width;
+ sh_css_sp_group.pipe[thread_id].metadata.height = md_info->resolution.height;
+ sh_css_sp_group.pipe[thread_id].metadata.stride = md_info->stride;
+ sh_css_sp_group.pipe[thread_id].metadata.size = md_info->size;
+ ia_css_isys_convert_stream_format_to_mipi_format(
+ md_config->data_type, MIPI_PREDICTOR_NONE,
+ &sh_css_sp_group.pipe[thread_id].metadata.format);
+ }
+
+ sh_css_sp_group.pipe[thread_id].output_frame_queue_id = (uint32_t)SH_CSS_INVALID_QUEUE_ID;
+ if (pipe_id != IA_CSS_PIPE_ID_COPY) {
+ ia_css_query_internal_queue_id(IA_CSS_BUFFER_TYPE_OUTPUT_FRAME, thread_id,
+ (enum sh_css_queue_id *)(
+ &sh_css_sp_group.pipe[thread_id].output_frame_queue_id));
+ }
+
+ IA_CSS_LOG("pipe_id %d port_config %08x",
+ pipe_id, sh_css_sp_group.pipe[thread_id].inout_port_config);
+
+ for (stage = me->stages, num = 0; stage; stage = stage->next, num++) {
+ sh_css_sp_group.pipe[thread_id].num_stages++;
+ if (is_sp_stage(stage)) {
+ sp_init_sp_stage(stage, pipe_num, two_ppc,
+ copy_ovrd, if_config_index);
+ } else {
+ if ((stage->stage_num != 0) ||
+ SH_CSS_PIPE_PORT_CONFIG_IS_CONTINUOUS(me->inout_port_config))
+ tmp_if_config_index = SH_CSS_IF_CONFIG_NOT_NEEDED;
+ else
+ tmp_if_config_index = if_config_index;
+ sp_init_stage(stage, pipe_num,
+ xnr, tmp_if_config_index, two_ppc);
+ }
+
+ store_sp_stage_data(pipe_id, pipe_num, num);
+ }
+ sh_css_sp_group.pipe[thread_id].pipe_config |= (uint32_t)
+ (me->acquire_isp_each_stage << IA_CSS_ACQUIRE_ISP_POS);
+ store_sp_group_data();
+}
+
+void
+sh_css_sp_uninit_pipeline(unsigned int pipe_num)
+{
+ unsigned int thread_id;
+
+ ia_css_pipeline_get_sp_thread_id(pipe_num, &thread_id);
+ /*memset(&sh_css_sp_group.pipe[thread_id], 0, sizeof(struct sh_css_sp_pipeline));*/
+ sh_css_sp_group.pipe[thread_id].num_stages = 0;
+}
+
+bool sh_css_write_host2sp_command(enum host2sp_commands host2sp_command)
+{
+ unsigned int HIVE_ADDR_host_sp_com = sh_css_sp_fw.info.sp.host_sp_com;
+ unsigned int offset = (unsigned int)offsetof(struct host_sp_communication,
+ host2sp_command)
+ / sizeof(int);
+ enum host2sp_commands last_cmd = host2sp_cmd_error;
+ (void)HIVE_ADDR_host_sp_com; /* Suppress warnings in CRUN */
+
+ /* Previous command must be handled by SP (by design) */
+ last_cmd = load_sp_array_uint(host_sp_com, offset);
+ if (last_cmd != host2sp_cmd_ready)
+ IA_CSS_ERROR("last host command not handled by SP(%d)", last_cmd);
+
+ store_sp_array_uint(host_sp_com, offset, host2sp_command);
+
+ return (last_cmd == host2sp_cmd_ready);
+}
+
+enum host2sp_commands
+sh_css_read_host2sp_command(void)
+{
+ unsigned int HIVE_ADDR_host_sp_com = sh_css_sp_fw.info.sp.host_sp_com;
+ unsigned int offset = (unsigned int)offsetof(struct host_sp_communication, host2sp_command)
+ / sizeof(int);
+ (void)HIVE_ADDR_host_sp_com; /* Suppress warnings in CRUN */
+ return (enum host2sp_commands)load_sp_array_uint(host_sp_com, offset);
+}
+
+/*
+ * Frame data is no longer part of the sp_stage structure but part of a
+ * separate structure. The aim is to make the sp_data struct static
+ * (it defines a pipeline) and that the dynamic (per frame) data is stored
+ * separately.
+ *
+ * This function must be called first every where were you start constructing
+ * a new pipeline by defining one or more stages with use of variable
+ * sh_css_sp_stage. Even the special cases like accelerator and copy_frame
+ * These have a pipeline of just 1 stage.
+ */
+void
+sh_css_init_host2sp_frame_data(void)
+{
+ /* Clean table */
+ unsigned int HIVE_ADDR_host_sp_com = sh_css_sp_fw.info.sp.host_sp_com;
+
+ (void)HIVE_ADDR_host_sp_com; /* Suppress warnings in CRUN */
+ /*
+ * rvanimme: don't clean it to save static frame info line ref_in
+ * ref_out, and tnr_frames. Once this static data is in a
+ * separate data struct, this may be enable (but still, there is
+ * no need for it)
+ */
+}
+
+/*
+ * @brief Update the offline frame information in host_sp_communication.
+ * Refer to "sh_css_sp.h" for more details.
+ */
+void
+sh_css_update_host2sp_offline_frame(
+ unsigned int frame_num,
+ struct ia_css_frame *frame,
+ struct ia_css_metadata *metadata)
+{
+ unsigned int HIVE_ADDR_host_sp_com;
+ unsigned int offset;
+
+ assert(frame_num < NUM_CONTINUOUS_FRAMES);
+
+ /* Write new frame data into SP DMEM */
+ HIVE_ADDR_host_sp_com = sh_css_sp_fw.info.sp.host_sp_com;
+ offset = (unsigned int)offsetof(struct host_sp_communication,
+ host2sp_offline_frames)
+ / sizeof(int);
+ offset += frame_num;
+ store_sp_array_uint(host_sp_com, offset, frame ? frame->data : 0);
+
+ /* Write metadata buffer into SP DMEM */
+ offset = (unsigned int)offsetof(struct host_sp_communication,
+ host2sp_offline_metadata)
+ / sizeof(int);
+ offset += frame_num;
+ store_sp_array_uint(host_sp_com, offset, metadata ? metadata->address : 0);
+}
+
+/*
+ * @brief Update the mipi frame information in host_sp_communication.
+ * Refer to "sh_css_sp.h" for more details.
+ */
+void
+sh_css_update_host2sp_mipi_frame(
+ unsigned int frame_num,
+ struct ia_css_frame *frame)
+{
+ unsigned int HIVE_ADDR_host_sp_com;
+ unsigned int offset;
+
+ /* MIPI buffers are dedicated to port, so now there are more of them. */
+ assert(frame_num < (N_CSI_PORTS * NUM_MIPI_FRAMES_PER_STREAM));
+
+ /* Write new frame data into SP DMEM */
+ HIVE_ADDR_host_sp_com = sh_css_sp_fw.info.sp.host_sp_com;
+ offset = (unsigned int)offsetof(struct host_sp_communication,
+ host2sp_mipi_frames)
+ / sizeof(int);
+ offset += frame_num;
+
+ store_sp_array_uint(host_sp_com, offset,
+ frame ? frame->data : 0);
+}
+
+/*
+ * @brief Update the mipi metadata information in host_sp_communication.
+ * Refer to "sh_css_sp.h" for more details.
+ */
+void
+sh_css_update_host2sp_mipi_metadata(
+ unsigned int frame_num,
+ struct ia_css_metadata *metadata)
+{
+ unsigned int HIVE_ADDR_host_sp_com;
+ unsigned int o;
+
+ /* MIPI buffers are dedicated to port, so now there are more of them. */
+ assert(frame_num < (N_CSI_PORTS * NUM_MIPI_FRAMES_PER_STREAM));
+
+ /* Write new frame data into SP DMEM */
+ HIVE_ADDR_host_sp_com = sh_css_sp_fw.info.sp.host_sp_com;
+ o = offsetof(struct host_sp_communication, host2sp_mipi_metadata)
+ / sizeof(int);
+ o += frame_num;
+ store_sp_array_uint(host_sp_com, o,
+ metadata ? metadata->address : 0);
+}
+
+void
+sh_css_update_host2sp_num_mipi_frames(unsigned int num_frames)
+{
+ unsigned int HIVE_ADDR_host_sp_com;
+ unsigned int offset;
+
+ /* Write new frame data into SP DMEM */
+ HIVE_ADDR_host_sp_com = sh_css_sp_fw.info.sp.host_sp_com;
+ offset = (unsigned int)offsetof(struct host_sp_communication,
+ host2sp_num_mipi_frames)
+ / sizeof(int);
+
+ store_sp_array_uint(host_sp_com, offset, num_frames);
+}
+
+void
+sh_css_update_host2sp_cont_num_raw_frames(unsigned int num_frames,
+ bool set_avail)
+{
+ const struct ia_css_fw_info *fw;
+ unsigned int HIVE_ADDR_host_sp_com;
+ unsigned int extra_num_frames, avail_num_frames;
+ unsigned int offset, offset_extra;
+
+ /* Write new frame data into SP DMEM */
+ fw = &sh_css_sp_fw;
+ HIVE_ADDR_host_sp_com = fw->info.sp.host_sp_com;
+ if (set_avail) {
+ offset = (unsigned int)offsetof(struct host_sp_communication,
+ host2sp_cont_avail_num_raw_frames)
+ / sizeof(int);
+ avail_num_frames = load_sp_array_uint(host_sp_com, offset);
+ extra_num_frames = num_frames - avail_num_frames;
+ offset_extra = (unsigned int)offsetof(struct host_sp_communication,
+ host2sp_cont_extra_num_raw_frames)
+ / sizeof(int);
+ store_sp_array_uint(host_sp_com, offset_extra, extra_num_frames);
+ } else
+ offset = (unsigned int)offsetof(struct host_sp_communication,
+ host2sp_cont_target_num_raw_frames)
+ / sizeof(int);
+
+ store_sp_array_uint(host_sp_com, offset, num_frames);
+}
+
+void
+sh_css_event_init_irq_mask(void)
+{
+ int i;
+ unsigned int HIVE_ADDR_host_sp_com = sh_css_sp_fw.info.sp.host_sp_com;
+ unsigned int offset;
+ struct sh_css_event_irq_mask event_irq_mask_init;
+
+ event_irq_mask_init.or_mask = IA_CSS_EVENT_TYPE_ALL;
+ event_irq_mask_init.and_mask = IA_CSS_EVENT_TYPE_NONE;
+ (void)HIVE_ADDR_host_sp_com; /* Suppress warnings in CRUN */
+
+ assert(sizeof(event_irq_mask_init) % HRT_BUS_BYTES == 0);
+ for (i = 0; i < IA_CSS_PIPE_ID_NUM; i++) {
+ offset = (unsigned int)offsetof(struct host_sp_communication,
+ host2sp_event_irq_mask[i]);
+ assert(offset % HRT_BUS_BYTES == 0);
+ sp_dmem_store(SP0_ID,
+ (unsigned int)sp_address_of(host_sp_com) + offset,
+ &event_irq_mask_init, sizeof(event_irq_mask_init));
+ }
+}
+
+int
+ia_css_pipe_set_irq_mask(struct ia_css_pipe *pipe,
+ unsigned int or_mask,
+ unsigned int and_mask)
+{
+ unsigned int HIVE_ADDR_host_sp_com = sh_css_sp_fw.info.sp.host_sp_com;
+ unsigned int offset;
+ struct sh_css_event_irq_mask event_irq_mask;
+ unsigned int pipe_num;
+
+ assert(pipe);
+
+ assert(IA_CSS_PIPE_ID_NUM == NR_OF_PIPELINES);
+ /*
+ * Linux kernel does not have UINT16_MAX
+ * Therefore decided to comment out these 2 asserts for Linux
+ * Alternatives that were not chosen:
+ * - add a conditional #define for UINT16_MAX
+ * - compare with (uint16_t)~0 or 0xffff
+ * - different assert for Linux and Windows
+ */
+
+ (void)HIVE_ADDR_host_sp_com; /* Suppress warnings in CRUN */
+
+ IA_CSS_LOG("or_mask=%x, and_mask=%x", or_mask, and_mask);
+ event_irq_mask.or_mask = (uint16_t)or_mask;
+ event_irq_mask.and_mask = (uint16_t)and_mask;
+
+ pipe_num = ia_css_pipe_get_pipe_num(pipe);
+ if (pipe_num >= IA_CSS_PIPE_ID_NUM)
+ return -EINVAL;
+ offset = (unsigned int)offsetof(struct host_sp_communication,
+ host2sp_event_irq_mask[pipe_num]);
+ assert(offset % HRT_BUS_BYTES == 0);
+ sp_dmem_store(SP0_ID,
+ (unsigned int)sp_address_of(host_sp_com) + offset,
+ &event_irq_mask, sizeof(event_irq_mask));
+
+ return 0;
+}
+
+int
+ia_css_event_get_irq_mask(const struct ia_css_pipe *pipe,
+ unsigned int *or_mask,
+ unsigned int *and_mask)
+{
+ unsigned int HIVE_ADDR_host_sp_com = sh_css_sp_fw.info.sp.host_sp_com;
+ unsigned int offset;
+ struct sh_css_event_irq_mask event_irq_mask;
+ unsigned int pipe_num;
+
+ (void)HIVE_ADDR_host_sp_com; /* Suppress warnings in CRUN */
+
+ IA_CSS_ENTER_LEAVE("");
+
+ assert(pipe);
+ assert(IA_CSS_PIPE_ID_NUM == NR_OF_PIPELINES);
+
+ pipe_num = ia_css_pipe_get_pipe_num(pipe);
+ if (pipe_num >= IA_CSS_PIPE_ID_NUM)
+ return -EINVAL;
+ offset = (unsigned int)offsetof(struct host_sp_communication,
+ host2sp_event_irq_mask[pipe_num]);
+ assert(offset % HRT_BUS_BYTES == 0);
+ sp_dmem_load(SP0_ID,
+ (unsigned int)sp_address_of(host_sp_com) + offset,
+ &event_irq_mask, sizeof(event_irq_mask));
+
+ if (or_mask)
+ *or_mask = event_irq_mask.or_mask;
+
+ if (and_mask)
+ *and_mask = event_irq_mask.and_mask;
+
+ return 0;
+}
+
+void
+sh_css_sp_set_sp_running(bool flag)
+{
+ sp_running = flag;
+}
+
+bool
+sh_css_sp_is_running(void)
+{
+ return sp_running;
+}
+
+void
+sh_css_sp_start_isp(void)
+{
+ const struct ia_css_fw_info *fw;
+ unsigned int HIVE_ADDR_sp_sw_state;
+
+ fw = &sh_css_sp_fw;
+ HIVE_ADDR_sp_sw_state = fw->info.sp.sw_state;
+
+ if (sp_running)
+ return;
+
+ (void)HIVE_ADDR_sp_sw_state; /* Suppress warnings in CRUN */
+
+ /* no longer here, sp started immediately */
+ /*ia_css_debug_pipe_graph_dump_epilogue();*/
+
+ store_sp_group_data();
+ store_sp_per_frame_data(fw);
+
+ sp_dmem_store_uint32(SP0_ID,
+ (unsigned int)sp_address_of(sp_sw_state),
+ (uint32_t)(IA_CSS_SP_SW_TERMINATED));
+
+ /*
+ * Note 1: The sp_start_isp function contains a wait till
+ * the input network is configured by the SP.
+ * Note 2: Not all SP binaries supports host2sp_commands.
+ * In case a binary does support it, the host2sp_command
+ * will have status cmd_ready after return of the function
+ * sh_css_hrt_sp_start_isp. There is no race-condition here
+ * because only after the process_frame command has been
+ * received, the SP starts configuring the input network.
+ */
+
+ /*
+ * We need to set sp_running before we call ia_css_mmu_invalidate_cache
+ * as ia_css_mmu_invalidate_cache checks on sp_running to
+ * avoid that it accesses dmem while the SP is not powered
+ */
+ sp_running = true;
+ ia_css_mmu_invalidate_cache();
+ /* Invalidate all MMU caches */
+ mmu_invalidate_cache_all();
+
+ ia_css_spctrl_start(SP0_ID);
+}
+
+bool
+ia_css_isp_has_started(void)
+{
+ const struct ia_css_fw_info *fw = &sh_css_sp_fw;
+ unsigned int HIVE_ADDR_ia_css_ispctrl_sp_isp_started = fw->info.sp.isp_started;
+ (void)HIVE_ADDR_ia_css_ispctrl_sp_isp_started; /* Suppress warnings in CRUN */
+
+ return (bool)load_sp_uint(ia_css_ispctrl_sp_isp_started);
+}
+
+/*
+ * @brief Initialize the DMA software-mask in the debug mode.
+ * Refer to "sh_css_sp.h" for more details.
+ */
+bool
+sh_css_sp_init_dma_sw_reg(int dma_id)
+{
+ int i;
+
+ /* enable all the DMA channels */
+ for (i = 0; i < N_DMA_CHANNEL_ID; i++) {
+ /* enable the writing request */
+ sh_css_sp_set_dma_sw_reg(dma_id,
+ i,
+ 0,
+ true);
+ /* enable the reading request */
+ sh_css_sp_set_dma_sw_reg(dma_id,
+ i,
+ 1,
+ true);
+ }
+
+ return true;
+}
+
+/*
+ * @brief Set the DMA software-mask in the debug mode.
+ * Refer to "sh_css_sp.h" for more details.
+ */
+bool
+sh_css_sp_set_dma_sw_reg(int dma_id,
+ int channel_id,
+ int request_type,
+ bool enable)
+{
+ u32 sw_reg;
+ u32 bit_val;
+ u32 bit_offset;
+ u32 bit_mask;
+
+ (void)dma_id;
+
+ assert(channel_id >= 0 && channel_id < N_DMA_CHANNEL_ID);
+ assert(request_type >= 0);
+
+ /* get the software-mask */
+ sw_reg =
+ sh_css_sp_group.debug.dma_sw_reg;
+
+ /* get the offset of the target bit */
+ bit_offset = (8 * request_type) + channel_id;
+
+ /* clear the value of the target bit */
+ bit_mask = ~(1 << bit_offset);
+ sw_reg &= bit_mask;
+
+ /* set the value of the bit for the DMA channel */
+ bit_val = enable ? 1 : 0;
+ bit_val <<= bit_offset;
+ sw_reg |= bit_val;
+
+ /* update the software status of DMA channels */
+ sh_css_sp_group.debug.dma_sw_reg = sw_reg;
+
+ return true;
+}
+
+void
+sh_css_sp_reset_global_vars(void)
+{
+ memset(&sh_css_sp_group, 0, sizeof(struct sh_css_sp_group));
+ memset(&sh_css_sp_stage, 0, sizeof(struct sh_css_sp_stage));
+ memset(&sh_css_isp_stage, 0, sizeof(struct sh_css_isp_stage));
+ memset(&sh_css_sp_output, 0, sizeof(struct sh_css_sp_output));
+ memset(&per_frame_data, 0, sizeof(struct sh_css_sp_per_frame_data));
+}
diff --git a/drivers/staging/media/atomisp/pci/sh_css_sp.h b/drivers/staging/media/atomisp/pci/sh_css_sp.h
new file mode 100644
index 000000000000..78aec5b7e8fa
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/sh_css_sp.h
@@ -0,0 +1,220 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef _SH_CSS_SP_H_
+#define _SH_CSS_SP_H_
+
+#include <system_global.h>
+#include <type_support.h>
+#include "input_formatter.h"
+
+#include "ia_css_binary.h"
+#include "ia_css_types.h"
+#include "ia_css_pipeline.h"
+
+/* Function to initialize the data and bss section descr of the binary */
+void
+sh_css_sp_store_init_dmem(const struct ia_css_fw_info *fw);
+
+void
+store_sp_stage_data(enum ia_css_pipe_id id, unsigned int pipe_num,
+ unsigned int stage);
+
+void
+sh_css_stage_write_binary_info(struct ia_css_binary_info *info);
+
+void
+store_sp_group_data(void);
+
+/* Start binary (jpeg) copy on the SP */
+void
+sh_css_sp_start_binary_copy(unsigned int pipe_num,
+ struct ia_css_frame *out_frame,
+ unsigned int two_ppc);
+
+unsigned int
+sh_css_sp_get_binary_copy_size(void);
+
+/* Return the value of a SW interrupt */
+unsigned int
+sh_css_sp_get_sw_interrupt_value(unsigned int irq);
+
+void
+sh_css_sp_init_pipeline(struct ia_css_pipeline *me,
+ enum ia_css_pipe_id id,
+ u8 pipe_num,
+ bool xnr,
+ bool two_ppc,
+ bool continuous,
+ bool offline,
+ unsigned int required_bds_factor,
+ enum sh_css_pipe_config_override copy_ovrd,
+ enum ia_css_input_mode input_mode,
+ const struct ia_css_metadata_config *md_config,
+ const struct ia_css_metadata_info *md_info,
+ const enum mipi_port_id port_id);
+
+void
+sh_css_sp_uninit_pipeline(unsigned int pipe_num);
+
+bool sh_css_write_host2sp_command(enum host2sp_commands host2sp_command);
+
+enum host2sp_commands
+sh_css_read_host2sp_command(void);
+
+void
+sh_css_init_host2sp_frame_data(void);
+
+/**
+ * @brief Update the offline frame information in host_sp_communication.
+ *
+ * @param[in] frame_num The offline frame number.
+ * @param[in] frame The pointer to the offline frame.
+ */
+void
+sh_css_update_host2sp_offline_frame(
+ unsigned int frame_num,
+ struct ia_css_frame *frame,
+ struct ia_css_metadata *metadata);
+
+/**
+ * @brief Update the mipi frame information in host_sp_communication.
+ *
+ * @param[in] frame_num The mipi frame number.
+ * @param[in] frame The pointer to the mipi frame.
+ */
+void
+sh_css_update_host2sp_mipi_frame(
+ unsigned int frame_num,
+ struct ia_css_frame *frame);
+
+/**
+ * @brief Update the mipi metadata information in host_sp_communication.
+ *
+ * @param[in] frame_num The mipi frame number.
+ * @param[in] metadata The pointer to the mipi metadata.
+ */
+void
+sh_css_update_host2sp_mipi_metadata(
+ unsigned int frame_num,
+ struct ia_css_metadata *metadata);
+
+/**
+ * @brief Update the nr of mipi frames to use in host_sp_communication.
+ *
+ * @param[in] num_frames The number of mipi frames to use.
+ */
+void
+sh_css_update_host2sp_num_mipi_frames(unsigned int num_frames);
+
+/**
+ * @brief Update the nr of offline frames to use in host_sp_communication.
+ *
+ * @param[in] num_frames The number of raw frames to use.
+ */
+void
+sh_css_update_host2sp_cont_num_raw_frames(unsigned int num_frames,
+ bool set_avail);
+
+void
+sh_css_event_init_irq_mask(void);
+
+void
+sh_css_sp_start_isp(void);
+
+void
+sh_css_sp_set_sp_running(bool flag);
+
+bool
+sh_css_sp_is_running(void);
+
+#if SP_DEBUG != SP_DEBUG_NONE
+
+void
+sh_css_sp_get_debug_state(struct sh_css_sp_debug_state *state);
+
+#endif
+
+void
+sh_css_sp_set_if_configs(
+ const input_formatter_cfg_t *config_a,
+ const input_formatter_cfg_t *config_b,
+ const uint8_t if_config_index);
+
+void
+sh_css_sp_program_input_circuit(int fmt_type,
+ int ch_id,
+ enum ia_css_input_mode input_mode);
+
+void
+sh_css_sp_configure_sync_gen(int width,
+ int height,
+ int hblank_cycles,
+ int vblank_cycles);
+
+void
+sh_css_sp_configure_prbs(int seed);
+
+void
+sh_css_sp_configure_enable_raw_pool_locking(bool lock_all);
+
+void
+sh_css_sp_enable_isys_event_queue(bool enable);
+
+void
+sh_css_sp_set_disable_continuous_viewfinder(bool flag);
+
+void
+sh_css_sp_reset_global_vars(void);
+
+/**
+ * @brief Initialize the DMA software-mask in the debug mode.
+ * This API should be ONLY called in the debugging mode.
+ * And it should be always called before the first call of
+ * "sh_css_set_dma_sw_reg(...)".
+ *
+ * @param[in] dma_id The ID of the target DMA.
+ *
+ * @return
+ * - true, if it is successful.
+ * - false, otherwise.
+ */
+bool
+sh_css_sp_init_dma_sw_reg(int dma_id);
+
+/**
+ * @brief Set the DMA software-mask in the debug mode.
+ * This API should be ONLYL called in the debugging mode. Must
+ * call "sh_css_set_dma_sw_reg(...)" before this
+ * API is called for the first time.
+ *
+ * @param[in] dma_id The ID of the target DMA.
+ * @param[in] channel_id The ID of the target DMA channel.
+ * @param[in] request_type The type of the DMA request.
+ * For example:
+ * - "0" indicates the writing request.
+ * - "1" indicates the reading request.
+ *
+ * @param[in] enable If it is "true", the target DMA
+ * channel is enabled in the software.
+ * Otherwise, the target DMA channel
+ * is disabled in the software.
+ *
+ * @return
+ * - true, if it is successful.
+ * - false, otherwise.
+ */
+bool
+sh_css_sp_set_dma_sw_reg(int dma_id,
+ int channel_id,
+ int request_type,
+ bool enable);
+
+extern struct sh_css_sp_group sh_css_sp_group;
+extern struct sh_css_sp_stage sh_css_sp_stage;
+extern struct sh_css_isp_stage sh_css_isp_stage;
+
+#endif /* _SH_CSS_SP_H_ */
diff --git a/drivers/staging/media/atomisp/pci/sh_css_stream_format.c b/drivers/staging/media/atomisp/pci/sh_css_stream_format.c
new file mode 100644
index 000000000000..24ec09703d9a
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/sh_css_stream_format.c
@@ -0,0 +1,68 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#include "sh_css_stream_format.h"
+#include <ia_css_stream_format.h>
+
+unsigned int sh_css_stream_format_2_bits_per_subpixel(
+ enum atomisp_input_format format)
+{
+ unsigned int rval;
+
+ switch (format) {
+ case ATOMISP_INPUT_FORMAT_RGB_444:
+ rval = 4;
+ break;
+ case ATOMISP_INPUT_FORMAT_RGB_555:
+ rval = 5;
+ break;
+ case ATOMISP_INPUT_FORMAT_RGB_565:
+ case ATOMISP_INPUT_FORMAT_RGB_666:
+ case ATOMISP_INPUT_FORMAT_RAW_6:
+ rval = 6;
+ break;
+ case ATOMISP_INPUT_FORMAT_RAW_7:
+ rval = 7;
+ break;
+ case ATOMISP_INPUT_FORMAT_YUV420_8_LEGACY:
+ case ATOMISP_INPUT_FORMAT_YUV420_8:
+ case ATOMISP_INPUT_FORMAT_YUV422_8:
+ case ATOMISP_INPUT_FORMAT_RGB_888:
+ case ATOMISP_INPUT_FORMAT_RAW_8:
+ case ATOMISP_INPUT_FORMAT_BINARY_8:
+ case ATOMISP_INPUT_FORMAT_USER_DEF1:
+ case ATOMISP_INPUT_FORMAT_USER_DEF2:
+ case ATOMISP_INPUT_FORMAT_USER_DEF3:
+ case ATOMISP_INPUT_FORMAT_USER_DEF4:
+ case ATOMISP_INPUT_FORMAT_USER_DEF5:
+ case ATOMISP_INPUT_FORMAT_USER_DEF6:
+ case ATOMISP_INPUT_FORMAT_USER_DEF7:
+ case ATOMISP_INPUT_FORMAT_USER_DEF8:
+ rval = 8;
+ break;
+ case ATOMISP_INPUT_FORMAT_YUV420_10:
+ case ATOMISP_INPUT_FORMAT_YUV422_10:
+ case ATOMISP_INPUT_FORMAT_RAW_10:
+ rval = 10;
+ break;
+ case ATOMISP_INPUT_FORMAT_RAW_12:
+ rval = 12;
+ break;
+ case ATOMISP_INPUT_FORMAT_RAW_14:
+ rval = 14;
+ break;
+ case ATOMISP_INPUT_FORMAT_RAW_16:
+ case ATOMISP_INPUT_FORMAT_YUV420_16:
+ case ATOMISP_INPUT_FORMAT_YUV422_16:
+ rval = 16;
+ break;
+ default:
+ rval = 0;
+ break;
+ }
+
+ return rval;
+}
diff --git a/drivers/staging/media/atomisp/pci/sh_css_stream_format.h b/drivers/staging/media/atomisp/pci/sh_css_stream_format.h
new file mode 100644
index 000000000000..7f5a3be00c6b
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/sh_css_stream_format.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __SH_CSS_STREAM_FORMAT_H
+#define __SH_CSS_STREAM_FORMAT_H
+
+#include <ia_css_stream_format.h>
+
+unsigned int sh_css_stream_format_2_bits_per_subpixel(
+ enum atomisp_input_format format);
+
+#endif /* __SH_CSS_STREAM_FORMAT_H */
diff --git a/drivers/staging/media/atomisp/pci/sh_css_struct.h b/drivers/staging/media/atomisp/pci/sh_css_struct.h
new file mode 100644
index 000000000000..c51fcb725208
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/sh_css_struct.h
@@ -0,0 +1,76 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __SH_CSS_STRUCT_H
+#define __SH_CSS_STRUCT_H
+
+/* This header files contains the definition of the
+ sh_css struct and friends; locigally the file would
+ probably be called sh_css.h after the pattern
+ <type>.h but sh_css.h is the predecesssor of ia_css.h
+ so this could cause confusion; hence the _struct
+ in the filename
+*/
+
+#include <type_support.h>
+#include <system_local.h>
+#include "ia_css_pipeline.h"
+#include "ia_css_pipe_public.h"
+#include "ia_css_frame_public.h"
+#include "ia_css_queue.h"
+#include "ia_css_irq.h"
+
+struct sh_css {
+ struct ia_css_pipe *active_pipes[IA_CSS_PIPELINE_NUM_MAX];
+ /* All of the pipes created at any point of time. At this moment there can
+ * be no more than MAX_SP_THREADS of them because pipe_num is reused as SP
+ * thread_id to which a pipe's pipeline is associated. At a later point, if
+ * we support more pipe objects, we should add test code to test that
+ * possibility. Also, active_pipes[] should be able to hold only
+ * SH_CSS_MAX_SP_THREADS objects. Anything else is misleading. */
+ struct ia_css_pipe *all_pipes[IA_CSS_PIPELINE_NUM_MAX];
+ void *(*malloc)(size_t bytes, bool zero_mem);
+ void (*free)(void *ptr);
+ void (*flush)(struct ia_css_acc_fw *fw);
+
+/* ISP2401 */
+ void *(*malloc_ex)(size_t bytes, bool zero_mem, const char *caller_func,
+ int caller_line);
+ void (*free_ex)(void *ptr, const char *caller_func, int caller_line);
+
+/* ISP2400 */
+ bool stop_copy_preview;
+
+ bool check_system_idle;
+ unsigned int num_cont_raw_frames;
+ unsigned int num_mipi_frames[N_CSI_PORTS];
+ struct ia_css_frame
+ *mipi_frames[N_CSI_PORTS][NUM_MIPI_FRAMES_PER_STREAM];
+ struct ia_css_metadata
+ *mipi_metadata[N_CSI_PORTS][NUM_MIPI_FRAMES_PER_STREAM];
+ unsigned int
+ mipi_sizes_for_check[N_CSI_PORTS][IA_CSS_MIPI_SIZE_CHECK_MAX_NOF_ENTRIES_PER_PORT];
+ unsigned int mipi_frame_size[N_CSI_PORTS];
+ ia_css_ptr sp_bin_addr;
+ hrt_data page_table_base_index;
+
+ unsigned int
+ size_mem_words; /* \deprecated{Use ia_css_mipi_buffer_config instead.}*/
+ enum ia_css_irq_type irq_type;
+ unsigned int pipe_counter;
+
+ unsigned int type; /* 2400 or 2401 for now */
+};
+
+#define IPU_2400 1
+#define IPU_2401 2
+
+#define IS_2400() (my_css.type == IPU_2400)
+#define IS_2401() (my_css.type == IPU_2401)
+
+extern struct sh_css my_css;
+
+#endif /* __SH_CSS_STRUCT_H */
diff --git a/drivers/staging/media/atomisp/pci/sh_css_uds.h b/drivers/staging/media/atomisp/pci/sh_css_uds.h
new file mode 100644
index 000000000000..951f32da9aed
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/sh_css_uds.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef _SH_CSS_UDS_H_
+#define _SH_CSS_UDS_H_
+
+#include <type_support.h>
+
+#define SIZE_OF_SH_CSS_UDS_INFO_IN_BITS (4 * 16)
+#define SIZE_OF_SH_CSS_CROP_POS_IN_BITS (2 * 16)
+
+/* Uds types, used in pipeline_global.h and sh_css_internal.h */
+
+struct sh_css_uds_info {
+ u16 curr_dx;
+ u16 curr_dy;
+ u16 xc;
+ u16 yc;
+};
+
+struct sh_css_crop_pos {
+ u16 x;
+ u16 y;
+};
+
+#endif /* _SH_CSS_UDS_H_ */
diff --git a/drivers/staging/media/atomisp/pci/sh_css_version.c b/drivers/staging/media/atomisp/pci/sh_css_version.c
new file mode 100644
index 000000000000..ba8660f0cb81
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/sh_css_version.c
@@ -0,0 +1,31 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#include "../../include/linux/atomisp.h"
+#include "../../include/linux/atomisp_platform.h"
+#include "ia_css_version.h"
+#include "ia_css_version_data.h"
+#include "ia_css_err.h"
+#include "sh_css_firmware.h"
+
+int
+ia_css_get_version(char *version, int max_size)
+{
+ char *css_version;
+
+ if (!IS_ISP2401)
+ css_version = ISP2400_CSS_VERSION_STRING;
+ else
+ css_version = ISP2401_CSS_VERSION_STRING;
+
+ if (max_size <= (int)strlen(css_version) + (int)strlen(sh_css_get_fw_version()) + 5)
+ return -EINVAL;
+ strscpy(version, css_version, max_size);
+ strcat(version, "FW:");
+ strcat(version, sh_css_get_fw_version());
+ strcat(version, "; ");
+ return 0;
+}
diff --git a/drivers/staging/media/atomisp/pci/str2mem_defs.h b/drivers/staging/media/atomisp/pci/str2mem_defs.h
new file mode 100644
index 000000000000..fc06d4e4589f
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/str2mem_defs.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef _ST2MEM_DEFS_H
+#define _ST2MEM_DEFS_H
+
+#define _STR2MEM_CRUN_BIT 0x100000
+#define _STR2MEM_CMD_BITS 0x0F0000
+#define _STR2MEM_COUNT_BITS 0x00FFFF
+
+#define _STR2MEM_BLOCKS_CMD 0xA0000
+#define _STR2MEM_PACKETS_CMD 0xB0000
+#define _STR2MEM_BYTES_CMD 0xC0000
+#define _STR2MEM_BYTES_FROM_PACKET_CMD 0xD0000
+
+#define _STR2MEM_SOFT_RESET_REG_ID 0
+#define _STR2MEM_INPUT_ENDIANNESS_REG_ID 1
+#define _STR2MEM_OUTPUT_ENDIANNESS_REG_ID 2
+#define _STR2MEM_BIT_SWAPPING_REG_ID 3
+#define _STR2MEM_BLOCK_SYNC_LEVEL_REG_ID 4
+#define _STR2MEM_PACKET_SYNC_LEVEL_REG_ID 5
+#define _STR2MEM_READ_POST_WRITE_SYNC_ENABLE_REG_ID 6
+#define _STR2MEM_DUAL_BYTE_INPUTS_ENABLED_REG_ID 7
+#define _STR2MEM_EN_STAT_UPDATE_ID 8
+
+#define _STR2MEM_REG_ALIGN 4
+
+#endif /* _ST2MEM_DEFS_H */
diff --git a/drivers/staging/media/atomisp/pci/streaming_to_mipi_defs.h b/drivers/staging/media/atomisp/pci/streaming_to_mipi_defs.h
new file mode 100644
index 000000000000..8efbaa55f752
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/streaming_to_mipi_defs.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef _streaming_to_mipi_defs_h
+#define _streaming_to_mipi_defs_h
+
+#define HIVE_STR_TO_MIPI_VALID_A_BIT 0
+#define HIVE_STR_TO_MIPI_VALID_B_BIT 1
+#define HIVE_STR_TO_MIPI_SOL_BIT 2
+#define HIVE_STR_TO_MIPI_EOL_BIT 3
+#define HIVE_STR_TO_MIPI_SOF_BIT 4
+#define HIVE_STR_TO_MIPI_EOF_BIT 5
+#define HIVE_STR_TO_MIPI_CH_ID_LSB 6
+
+#define HIVE_STR_TO_MIPI_DATA_A_LSB (HIVE_STR_TO_MIPI_VALID_B_BIT + 1)
+
+#endif /* _streaming_to_mipi_defs_h */
diff --git a/drivers/staging/media/atomisp/pci/system_global.h b/drivers/staging/media/atomisp/pci/system_global.h
new file mode 100644
index 000000000000..e8a29f73d67a
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/system_global.h
@@ -0,0 +1,368 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * (c) 2020 Mauro Carvalho Chehab <mchehab+huawei@kernel.org>
+ */
+
+#ifndef __SYSTEM_GLOBAL_H_INCLUDED__
+#define __SYSTEM_GLOBAL_H_INCLUDED__
+
+/*
+ * Create a list of HAS and IS properties that defines the system
+ * Those are common for both ISP2400 and ISP2401
+ *
+ * The configuration assumes the following
+ * - The system is hetereogeneous; Multiple cells and devices classes
+ * - The cell and device instances are homogeneous, each device type
+ * belongs to the same class
+ * - Device instances supporting a subset of the class capabilities are
+ * allowed
+ *
+ * We could manage different device classes through the enumerated
+ * lists (C) or the use of classes (C++), but that is presently not
+ * fully supported
+ *
+ * N.B. the 3 input formatters are of 2 different classess
+ */
+
+#define DMA_DDR_TO_VAMEM_WORKAROUND
+#define DMA_DDR_TO_HMEM_WORKAROUND
+
+/*
+ * The longest allowed (uninteruptible) bus transfer, does not
+ * take stalling into account
+ */
+#define HIVE_ISP_MAX_BURST_LENGTH 1024
+
+/*
+ * Maximum allowed burst length in words for the ISP DMA
+ * This value is set to 2 to prevent the ISP DMA from blocking
+ * the bus for too long; as the input system can only buffer
+ * 2 lines on Moorefield and Cherrytrail, the input system buffers
+ * may overflow if blocked for too long (BZ 2726).
+ */
+#define ISP2400_DMA_MAX_BURST_LENGTH 128
+#define ISP2401_DMA_MAX_BURST_LENGTH 2
+
+#include <hive_isp_css_defs.h>
+#include <type_support.h>
+
+/* This interface is deprecated */
+#include "hive_types.h"
+
+/*
+ * Semi global. "HRT" is accessible from SP, but the HRT types do not fully apply
+ */
+#define HRT_VADDRESS_WIDTH 32
+
+#define SIZEOF_HRT_REG (HRT_DATA_WIDTH >> 3)
+#define HIVE_ISP_CTRL_DATA_BYTES (HIVE_ISP_CTRL_DATA_WIDTH / 8)
+
+/* The main bus connecting all devices */
+#define HRT_BUS_WIDTH HIVE_ISP_CTRL_DATA_WIDTH
+#define HRT_BUS_BYTES HIVE_ISP_CTRL_DATA_BYTES
+
+typedef u32 hrt_bus_align_t;
+
+/*
+ * Enumerate the devices, device access through the API is by ID,
+ * through the DLI by address. The enumerator terminators are used
+ * to size the wiring arrays and as an exception value.
+ */
+typedef enum {
+ DDR0_ID = 0,
+ N_DDR_ID
+} ddr_ID_t;
+
+typedef enum {
+ ISP0_ID = 0,
+ N_ISP_ID
+} isp_ID_t;
+
+typedef enum {
+ SP0_ID = 0,
+ N_SP_ID
+} sp_ID_t;
+
+typedef enum {
+ MMU0_ID = 0,
+ MMU1_ID,
+ N_MMU_ID
+} mmu_ID_t;
+
+typedef enum {
+ DMA0_ID = 0,
+ N_DMA_ID
+} dma_ID_t;
+
+typedef enum {
+ GDC0_ID = 0,
+ GDC1_ID,
+ N_GDC_ID
+} gdc_ID_t;
+
+/* this extra define is needed because we want to use it also
+ in the preprocessor, and that doesn't work with enums.
+ */
+#define N_GDC_ID_CPP 2
+
+typedef enum {
+ VAMEM0_ID = 0,
+ VAMEM1_ID,
+ VAMEM2_ID,
+ N_VAMEM_ID
+} vamem_ID_t;
+
+typedef enum {
+ BAMEM0_ID = 0,
+ N_BAMEM_ID
+} bamem_ID_t;
+
+typedef enum {
+ HMEM0_ID = 0,
+ N_HMEM_ID
+} hmem_ID_t;
+
+typedef enum {
+ IRQ0_ID = 0, /* GP IRQ block */
+ IRQ1_ID, /* Input formatter */
+ IRQ2_ID, /* input system */
+ IRQ3_ID, /* input selector */
+ N_IRQ_ID
+} irq_ID_t;
+
+typedef enum {
+ FIFO_MONITOR0_ID = 0,
+ N_FIFO_MONITOR_ID
+} fifo_monitor_ID_t;
+
+typedef enum {
+ GP_DEVICE0_ID = 0,
+ N_GP_DEVICE_ID
+} gp_device_ID_t;
+
+typedef enum {
+ GP_TIMER0_ID = 0,
+ GP_TIMER1_ID,
+ GP_TIMER2_ID,
+ GP_TIMER3_ID,
+ GP_TIMER4_ID,
+ GP_TIMER5_ID,
+ GP_TIMER6_ID,
+ GP_TIMER7_ID,
+ N_GP_TIMER_ID
+} gp_timer_ID_t;
+
+typedef enum {
+ GPIO0_ID = 0,
+ N_GPIO_ID
+} gpio_ID_t;
+
+typedef enum {
+ TIMED_CTRL0_ID = 0,
+ N_TIMED_CTRL_ID
+} timed_ctrl_ID_t;
+
+typedef enum {
+ INPUT_FORMATTER0_ID = 0,
+ INPUT_FORMATTER1_ID,
+ INPUT_FORMATTER2_ID,
+ INPUT_FORMATTER3_ID,
+ N_INPUT_FORMATTER_ID
+} input_formatter_ID_t;
+
+/* The IF RST is outside the IF */
+#define INPUT_FORMATTER0_SRST_OFFSET 0x0824
+#define INPUT_FORMATTER1_SRST_OFFSET 0x0624
+#define INPUT_FORMATTER2_SRST_OFFSET 0x0424
+#define INPUT_FORMATTER3_SRST_OFFSET 0x0224
+
+#define INPUT_FORMATTER0_SRST_MASK 0x0001
+#define INPUT_FORMATTER1_SRST_MASK 0x0002
+#define INPUT_FORMATTER2_SRST_MASK 0x0004
+#define INPUT_FORMATTER3_SRST_MASK 0x0008
+
+typedef enum {
+ INPUT_SYSTEM0_ID = 0,
+ N_INPUT_SYSTEM_ID
+} input_system_ID_t;
+
+typedef enum {
+ RX0_ID = 0,
+ N_RX_ID
+} rx_ID_t;
+
+enum mipi_port_id {
+ MIPI_PORT0_ID = 0,
+ MIPI_PORT1_ID,
+ MIPI_PORT2_ID,
+ N_MIPI_PORT_ID
+};
+
+#define N_RX_CHANNEL_ID 4
+
+typedef enum {
+ CAPTURE_UNIT0_ID = 0,
+ CAPTURE_UNIT1_ID,
+ CAPTURE_UNIT2_ID,
+ ACQUISITION_UNIT0_ID,
+ DMA_UNIT0_ID,
+ CTRL_UNIT0_ID,
+ GPREGS_UNIT0_ID,
+ FIFO_UNIT0_ID,
+ IRQ_UNIT0_ID,
+ N_SUB_SYSTEM_ID
+} sub_system_ID_t;
+
+#define N_CAPTURE_UNIT_ID 3
+#define N_ACQUISITION_UNIT_ID 1
+#define N_CTRL_UNIT_ID 1
+
+
+enum ia_css_isp_memories {
+ IA_CSS_ISP_PMEM0 = 0,
+ IA_CSS_ISP_DMEM0,
+ IA_CSS_ISP_VMEM0,
+ IA_CSS_ISP_VAMEM0,
+ IA_CSS_ISP_VAMEM1,
+ IA_CSS_ISP_VAMEM2,
+ IA_CSS_ISP_HMEM0,
+ IA_CSS_SP_DMEM0,
+ IA_CSS_DDR,
+ N_IA_CSS_MEMORIES
+};
+
+#define IA_CSS_NUM_MEMORIES 9
+/* For driver compatibility */
+#define N_IA_CSS_ISP_MEMORIES IA_CSS_NUM_MEMORIES
+#define IA_CSS_NUM_ISP_MEMORIES IA_CSS_NUM_MEMORIES
+
+/*
+ * ISP2401 specific enums
+ */
+
+typedef enum {
+ ISYS_IRQ0_ID = 0, /* port a */
+ ISYS_IRQ1_ID, /* port b */
+ ISYS_IRQ2_ID, /* port c */
+ N_ISYS_IRQ_ID
+} isys_irq_ID_t;
+
+
+/*
+ * Input-buffer Controller.
+ */
+typedef enum {
+ IBUF_CTRL0_ID = 0, /* map to ISYS2401_IBUF_CNTRL_A */
+ IBUF_CTRL1_ID, /* map to ISYS2401_IBUF_CNTRL_B */
+ IBUF_CTRL2_ID, /* map ISYS2401_IBUF_CNTRL_C */
+ N_IBUF_CTRL_ID
+} ibuf_ctrl_ID_t;
+/* end of Input-buffer Controller */
+
+/*
+ * Stream2MMIO.
+ */
+typedef enum {
+ STREAM2MMIO0_ID = 0, /* map to ISYS2401_S2M_A */
+ STREAM2MMIO1_ID, /* map to ISYS2401_S2M_B */
+ STREAM2MMIO2_ID, /* map to ISYS2401_S2M_C */
+ N_STREAM2MMIO_ID
+} stream2mmio_ID_t;
+
+typedef enum {
+ /*
+ * Stream2MMIO 0 has 8 SIDs that are indexed by
+ * [STREAM2MMIO_SID0_ID...STREAM2MMIO_SID7_ID].
+ *
+ * Stream2MMIO 1 has 4 SIDs that are indexed by
+ * [STREAM2MMIO_SID0_ID...TREAM2MMIO_SID3_ID].
+ *
+ * Stream2MMIO 2 has 4 SIDs that are indexed by
+ * [STREAM2MMIO_SID0_ID...STREAM2MMIO_SID3_ID].
+ */
+ STREAM2MMIO_SID0_ID = 0,
+ STREAM2MMIO_SID1_ID,
+ STREAM2MMIO_SID2_ID,
+ STREAM2MMIO_SID3_ID,
+ STREAM2MMIO_SID4_ID,
+ STREAM2MMIO_SID5_ID,
+ STREAM2MMIO_SID6_ID,
+ STREAM2MMIO_SID7_ID,
+ N_STREAM2MMIO_SID_ID
+} stream2mmio_sid_ID_t;
+/* end of Stream2MMIO */
+
+/**
+ * Input System 2401: CSI-MIPI recevier.
+ */
+typedef enum {
+ CSI_RX_BACKEND0_ID = 0, /* map to ISYS2401_MIPI_BE_A */
+ CSI_RX_BACKEND1_ID, /* map to ISYS2401_MIPI_BE_B */
+ CSI_RX_BACKEND2_ID, /* map to ISYS2401_MIPI_BE_C */
+ N_CSI_RX_BACKEND_ID
+} csi_rx_backend_ID_t;
+
+typedef enum {
+ CSI_RX_FRONTEND0_ID = 0, /* map to ISYS2401_CSI_RX_A */
+ CSI_RX_FRONTEND1_ID, /* map to ISYS2401_CSI_RX_B */
+ CSI_RX_FRONTEND2_ID, /* map to ISYS2401_CSI_RX_C */
+#define N_CSI_RX_FRONTEND_ID (CSI_RX_FRONTEND2_ID + 1)
+} csi_rx_frontend_ID_t;
+
+typedef enum {
+ CSI_RX_DLANE0_ID = 0, /* map to DLANE0 in CSI RX */
+ CSI_RX_DLANE1_ID, /* map to DLANE1 in CSI RX */
+ CSI_RX_DLANE2_ID, /* map to DLANE2 in CSI RX */
+ CSI_RX_DLANE3_ID, /* map to DLANE3 in CSI RX */
+ N_CSI_RX_DLANE_ID
+} csi_rx_fe_dlane_ID_t;
+/* end of CSI-MIPI receiver */
+
+typedef enum {
+ ISYS2401_DMA0_ID = 0,
+ N_ISYS2401_DMA_ID
+} isys2401_dma_ID_t;
+
+/**
+ * Pixel-generator. ("system_global.h")
+ */
+typedef enum {
+ PIXELGEN0_ID = 0,
+ PIXELGEN1_ID,
+ PIXELGEN2_ID,
+ N_PIXELGEN_ID
+} pixelgen_ID_t;
+/* end of pixel-generator. ("system_global.h") */
+
+typedef enum {
+ INPUT_SYSTEM_CSI_PORT0_ID = 0,
+ INPUT_SYSTEM_CSI_PORT1_ID,
+ INPUT_SYSTEM_CSI_PORT2_ID,
+
+ INPUT_SYSTEM_PIXELGEN_PORT0_ID,
+ INPUT_SYSTEM_PIXELGEN_PORT1_ID,
+ INPUT_SYSTEM_PIXELGEN_PORT2_ID,
+
+ N_INPUT_SYSTEM_INPUT_PORT_ID
+} input_system_input_port_ID_t;
+
+#define N_INPUT_SYSTEM_CSI_PORT 3
+
+typedef enum {
+ ISYS2401_DMA_CHANNEL_0 = 0,
+ ISYS2401_DMA_CHANNEL_1,
+ ISYS2401_DMA_CHANNEL_2,
+ ISYS2401_DMA_CHANNEL_3,
+ ISYS2401_DMA_CHANNEL_4,
+ ISYS2401_DMA_CHANNEL_5,
+ ISYS2401_DMA_CHANNEL_6,
+ ISYS2401_DMA_CHANNEL_7,
+ ISYS2401_DMA_CHANNEL_8,
+ ISYS2401_DMA_CHANNEL_9,
+ ISYS2401_DMA_CHANNEL_10,
+ ISYS2401_DMA_CHANNEL_11,
+ N_ISYS2401_DMA_CHANNEL
+} isys2401_dma_channel;
+
+#endif /* __SYSTEM_GLOBAL_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/system_local.c b/drivers/staging/media/atomisp/pci/system_local.c
new file mode 100644
index 000000000000..a8a93760d5b1
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/system_local.c
@@ -0,0 +1,170 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#include "system_local.h"
+
+/* ISP */
+const hrt_address ISP_CTRL_BASE[N_ISP_ID] = {
+ 0x0000000000020000ULL
+};
+
+const hrt_address ISP_DMEM_BASE[N_ISP_ID] = {
+ 0x0000000000200000ULL
+};
+
+const hrt_address ISP_BAMEM_BASE[N_BAMEM_ID] = {
+ 0x0000000000100000ULL
+};
+
+/* SP */
+const hrt_address SP_CTRL_BASE[N_SP_ID] = {
+ 0x0000000000010000ULL
+};
+
+const hrt_address SP_DMEM_BASE[N_SP_ID] = {
+ 0x0000000000300000ULL
+};
+
+/* MMU */
+/*
+ * MMU0_ID: The data MMU
+ * MMU1_ID: The icache MMU
+ */
+const hrt_address MMU_BASE[N_MMU_ID] = {
+ 0x0000000000070000ULL,
+ 0x00000000000A0000ULL
+};
+
+/* DMA */
+const hrt_address DMA_BASE[N_DMA_ID] = {
+ 0x0000000000040000ULL
+};
+
+const hrt_address ISYS2401_DMA_BASE[N_ISYS2401_DMA_ID] = {
+ 0x00000000000CA000ULL
+};
+
+/* IRQ */
+const hrt_address IRQ_BASE[N_IRQ_ID] = {
+ 0x0000000000000500ULL,
+ 0x0000000000030A00ULL,
+ 0x000000000008C000ULL,
+ 0x0000000000090200ULL
+};
+
+/*
+ 0x0000000000000500ULL};
+ */
+
+/* GDC */
+const hrt_address GDC_BASE[N_GDC_ID] = {
+ 0x0000000000050000ULL,
+ 0x0000000000060000ULL
+};
+
+/* FIFO_MONITOR (not a subset of GP_DEVICE) */
+const hrt_address FIFO_MONITOR_BASE[N_FIFO_MONITOR_ID] = {
+ 0x0000000000000000ULL
+};
+
+/*
+const hrt_address GP_REGS_BASE[N_GP_REGS_ID] = {
+ 0x0000000000000000ULL};
+
+const hrt_address GP_DEVICE_BASE[N_GP_DEVICE_ID] = {
+ 0x0000000000090000ULL};
+*/
+
+/* GP_DEVICE (single base for all separate GP_REG instances) */
+const hrt_address GP_DEVICE_BASE[N_GP_DEVICE_ID] = {
+ 0x0000000000000000ULL
+};
+
+/*GP TIMER , all timer registers are inter-twined,
+ * so, having multiple base addresses for
+ * different timers does not help*/
+const hrt_address GP_TIMER_BASE =
+ (hrt_address)0x0000000000000600ULL;
+
+/* GPIO */
+const hrt_address GPIO_BASE[N_GPIO_ID] = {
+ 0x0000000000000400ULL
+};
+
+/* TIMED_CTRL */
+const hrt_address TIMED_CTRL_BASE[N_TIMED_CTRL_ID] = {
+ 0x0000000000000100ULL
+};
+
+/* INPUT_FORMATTER */
+const hrt_address INPUT_FORMATTER_BASE[N_INPUT_FORMATTER_ID] = {
+ 0x0000000000030000ULL,
+ 0x0000000000030200ULL,
+ 0x0000000000030400ULL,
+ 0x0000000000030600ULL
+}; /* memcpy() */
+
+/* INPUT_SYSTEM */
+const hrt_address INPUT_SYSTEM_BASE[N_INPUT_SYSTEM_ID] = {
+ 0x0000000000080000ULL
+};
+
+/* 0x0000000000081000ULL, */ /* capture A */
+/* 0x0000000000082000ULL, */ /* capture B */
+/* 0x0000000000083000ULL, */ /* capture C */
+/* 0x0000000000084000ULL, */ /* Acquisition */
+/* 0x0000000000085000ULL, */ /* DMA */
+/* 0x0000000000089000ULL, */ /* ctrl */
+/* 0x000000000008A000ULL, */ /* GP regs */
+/* 0x000000000008B000ULL, */ /* FIFO */
+/* 0x000000000008C000ULL, */ /* IRQ */
+
+/* RX, the MIPI lane control regs start at offset 0 */
+const hrt_address RX_BASE[N_RX_ID] = {
+ 0x0000000000080100ULL
+};
+
+/* IBUF_CTRL, part of the Input System 2401 */
+const hrt_address IBUF_CTRL_BASE[N_IBUF_CTRL_ID] = {
+ 0x00000000000C1800ULL, /* ibuf controller A */
+ 0x00000000000C3800ULL, /* ibuf controller B */
+ 0x00000000000C5800ULL /* ibuf controller C */
+};
+
+/* ISYS IRQ Controllers, part of the Input System 2401 */
+const hrt_address ISYS_IRQ_BASE[N_ISYS_IRQ_ID] = {
+ 0x00000000000C1400ULL, /* port a */
+ 0x00000000000C3400ULL, /* port b */
+ 0x00000000000C5400ULL /* port c */
+};
+
+/* CSI FE, part of the Input System 2401 */
+const hrt_address CSI_RX_FE_CTRL_BASE[N_CSI_RX_FRONTEND_ID] = {
+ 0x00000000000C0400ULL, /* csi fe controller A */
+ 0x00000000000C2400ULL, /* csi fe controller B */
+ 0x00000000000C4400ULL /* csi fe controller C */
+};
+
+/* CSI BE, part of the Input System 2401 */
+const hrt_address CSI_RX_BE_CTRL_BASE[N_CSI_RX_BACKEND_ID] = {
+ 0x00000000000C0800ULL, /* csi be controller A */
+ 0x00000000000C2800ULL, /* csi be controller B */
+ 0x00000000000C4800ULL /* csi be controller C */
+};
+
+/* PIXEL Generator, part of the Input System 2401 */
+const hrt_address PIXELGEN_CTRL_BASE[N_PIXELGEN_ID] = {
+ 0x00000000000C1000ULL, /* pixel gen controller A */
+ 0x00000000000C3000ULL, /* pixel gen controller B */
+ 0x00000000000C5000ULL /* pixel gen controller C */
+};
+
+/* Stream2MMIO, part of the Input System 2401 */
+const hrt_address STREAM2MMIO_CTRL_BASE[N_STREAM2MMIO_ID] = {
+ 0x00000000000C0C00ULL, /* stream2mmio controller A */
+ 0x00000000000C2C00ULL, /* stream2mmio controller B */
+ 0x00000000000C4C00ULL /* stream2mmio controller C */
+};
diff --git a/drivers/staging/media/atomisp/pci/system_local.h b/drivers/staging/media/atomisp/pci/system_local.h
new file mode 100644
index 000000000000..970f4ef990ec
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/system_local.h
@@ -0,0 +1,94 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __SYSTEM_LOCAL_H_INCLUDED__
+#define __SYSTEM_LOCAL_H_INCLUDED__
+
+#ifdef HRT_ISP_CSS_CUSTOM_HOST
+#ifndef HRT_USE_VIR_ADDRS
+#define HRT_USE_VIR_ADDRS
+#endif
+#endif
+
+#include "system_global.h"
+
+/* This interface is deprecated */
+#include "hive_types.h"
+
+/*
+ * Cell specific address maps
+ */
+
+#define GP_FIFO_BASE ((hrt_address)0x0000000000090104) /* This is NOT a base address */
+
+/* ISP */
+extern const hrt_address ISP_CTRL_BASE[N_ISP_ID];
+extern const hrt_address ISP_DMEM_BASE[N_ISP_ID];
+extern const hrt_address ISP_BAMEM_BASE[N_BAMEM_ID];
+
+/* SP */
+extern const hrt_address SP_CTRL_BASE[N_SP_ID];
+extern const hrt_address SP_DMEM_BASE[N_SP_ID];
+
+/* MMU */
+
+extern const hrt_address MMU_BASE[N_MMU_ID];
+
+/* DMA */
+extern const hrt_address DMA_BASE[N_DMA_ID];
+extern const hrt_address ISYS2401_DMA_BASE[N_ISYS2401_DMA_ID];
+
+/* IRQ */
+extern const hrt_address IRQ_BASE[N_IRQ_ID];
+
+/* GDC */
+extern const hrt_address GDC_BASE[N_GDC_ID];
+
+/* FIFO_MONITOR (not a subset of GP_DEVICE) */
+extern const hrt_address FIFO_MONITOR_BASE[N_FIFO_MONITOR_ID];
+
+/* GP_DEVICE (single base for all separate GP_REG instances) */
+extern const hrt_address GP_DEVICE_BASE[N_GP_DEVICE_ID];
+
+/*GP TIMER , all timer registers are inter-twined,
+ * so, having multiple base addresses for
+ * different timers does not help*/
+extern const hrt_address GP_TIMER_BASE;
+
+/* GPIO */
+extern const hrt_address GPIO_BASE[N_GPIO_ID];
+
+/* TIMED_CTRL */
+extern const hrt_address TIMED_CTRL_BASE[N_TIMED_CTRL_ID];
+
+/* INPUT_FORMATTER */
+extern const hrt_address INPUT_FORMATTER_BASE[N_INPUT_FORMATTER_ID];
+
+/* INPUT_SYSTEM */
+extern const hrt_address INPUT_SYSTEM_BASE[N_INPUT_SYSTEM_ID];
+
+/* RX, the MIPI lane control regs start at offset 0 */
+extern const hrt_address RX_BASE[N_RX_ID];
+
+/* IBUF_CTRL, part of the Input System 2401 */
+extern const hrt_address IBUF_CTRL_BASE[N_IBUF_CTRL_ID];
+
+/* ISYS IRQ Controllers, part of the Input System 2401 */
+extern const hrt_address ISYS_IRQ_BASE[N_ISYS_IRQ_ID];
+
+/* CSI FE, part of the Input System 2401 */
+extern const hrt_address CSI_RX_FE_CTRL_BASE[N_CSI_RX_FRONTEND_ID];
+
+/* CSI BE, part of the Input System 2401 */
+extern const hrt_address CSI_RX_BE_CTRL_BASE[N_CSI_RX_BACKEND_ID];
+
+/* PIXEL Generator, part of the Input System 2401 */
+extern const hrt_address PIXELGEN_CTRL_BASE[N_PIXELGEN_ID];
+
+/* Stream2MMIO, part of the Input System 2401 */
+extern const hrt_address STREAM2MMIO_CTRL_BASE[N_STREAM2MMIO_ID];
+
+#endif /* __SYSTEM_LOCAL_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/timed_controller_defs.h b/drivers/staging/media/atomisp/pci/timed_controller_defs.h
new file mode 100644
index 000000000000..37a9226c6941
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/timed_controller_defs.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef _timed_controller_defs_h
+#define _timed_controller_defs_h
+
+#define _HRT_TIMED_CONTROLLER_CMD_REG_IDX 0
+
+#define _HRT_TIMED_CONTROLLER_REG_ALIGN 4
+
+#endif /* _timed_controller_defs_h */
diff --git a/drivers/staging/media/atomisp/pci/version.h b/drivers/staging/media/atomisp/pci/version.h
new file mode 100644
index 000000000000..90688034c491
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/version.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef HRT_VERSION_H
+#define HRT_VERSION_H
+#define HRT_VERSION_MAJOR 1
+#define HRT_VERSION_MINOR 4
+#define HRT_VERSION 1_4
+#endif
diff --git a/drivers/staging/media/av7110/Kconfig b/drivers/staging/media/av7110/Kconfig
new file mode 100644
index 000000000000..0722df9e6a41
--- /dev/null
+++ b/drivers/staging/media/av7110/Kconfig
@@ -0,0 +1,72 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config DVB_AV7110_IR
+ bool
+ depends on RC_CORE=y || RC_CORE = DVB_AV7110
+ default DVB_AV7110
+
+config DVB_AV7110
+ tristate "AV7110 cards"
+ depends on DVB_CORE && PCI && I2C
+ select TTPCI_EEPROM
+ select VIDEO_SAA7146_VV
+ depends on VIDEO_DEV # dependencies of VIDEO_SAA7146_VV
+ select DVB_VES1820 if MEDIA_SUBDRV_AUTOSELECT
+ select DVB_VES1X93 if MEDIA_SUBDRV_AUTOSELECT
+ select DVB_STV0299 if MEDIA_SUBDRV_AUTOSELECT
+ select DVB_TDA8083 if MEDIA_SUBDRV_AUTOSELECT
+ select DVB_SP8870 if MEDIA_SUBDRV_AUTOSELECT
+ select DVB_STV0297 if MEDIA_SUBDRV_AUTOSELECT
+ select DVB_L64781 if MEDIA_SUBDRV_AUTOSELECT
+ select DVB_LNBP21 if MEDIA_SUBDRV_AUTOSELECT
+ help
+ Support for SAA7146 and AV7110 based DVB cards as produced
+ by Fujitsu-Siemens, Technotrend, Hauppauge and others.
+
+ This driver only supports the fullfeatured cards with
+ onboard MPEG2 decoder.
+
+ This driver needs an external firmware. Please use the script
+ "<kerneldir>/scripts/get_dvb_firmware av7110" to
+ download/extract it, and then copy it to /usr/lib/hotplug/firmware
+ or /lib/firmware (depending on configuration of firmware hotplug).
+
+ Alternatively, you can download the file and use the kernel's
+ EXTRA_FIRMWARE configuration option to build it into your
+ kernel image by adding the filename to the EXTRA_FIRMWARE
+ configuration option string.
+
+ Say Y if you own such a card and want to use it.
+
+config DVB_AV7110_OSD
+ bool "AV7110 OSD support"
+ depends on DVB_AV7110
+ default y if DVB_AV7110=y || DVB_AV7110=m
+ help
+ The AV7110 firmware provides some code to generate an OnScreenDisplay
+ on the video output. This is kind of nonstandard and not guaranteed to
+ be maintained.
+
+ Anyway, some popular DVB software like VDR uses this OSD to render
+ its menus, so say Y if you want to use this software.
+
+ All other people say N.
+
+if DVB_AV7110
+
+# Frontend driver that it is used only by AV7110 driver
+# While technically independent, it doesn't make sense to keep
+# it if we drop support for AV7110, as no other driver will use it.
+
+config DVB_SP8870
+ tristate "Spase sp8870 based"
+ depends on DVB_CORE && I2C
+ default m if !MEDIA_SUBDRV_AUTOSELECT
+ help
+ A DVB-T tuner module. Say Y when you want to support this frontend.
+
+ This driver needs external firmware. Please use the command
+ "<kerneldir>/scripts/get_dvb_firmware sp8870" to
+ download/extract it, and then copy it to /usr/lib/hotplug/firmware
+ or /lib/firmware (depending on configuration of firmware hotplug).
+
+endif
diff --git a/drivers/staging/media/av7110/Makefile b/drivers/staging/media/av7110/Makefile
new file mode 100644
index 000000000000..f4bbb535d988
--- /dev/null
+++ b/drivers/staging/media/av7110/Makefile
@@ -0,0 +1,20 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for the AV7110 DVB device driver
+#
+
+dvb-ttpci-objs := av7110_hw.o av7110_v4l.o av7110_av.o av7110_ca.o av7110.o \
+ av7110_ipack.o dvb_filter.o
+
+ifdef CONFIG_DVB_AV7110_IR
+dvb-ttpci-objs += av7110_ir.o
+endif
+
+obj-$(CONFIG_DVB_AV7110) += dvb-ttpci.o
+
+obj-$(CONFIG_DVB_SP8870) += sp8870.o
+
+ccflags-y += -I $(srctree)/drivers/media/dvb-frontends
+ccflags-y += -I $(srctree)/drivers/media/tuners
+ccflags-y += -I $(srctree)/drivers/media/pci/ttpci
+ccflags-y += -I $(srctree)/drivers/media/common
diff --git a/drivers/staging/media/av7110/av7110.c b/drivers/staging/media/av7110/av7110.c
new file mode 100644
index 000000000000..602342d1174f
--- /dev/null
+++ b/drivers/staging/media/av7110/av7110.c
@@ -0,0 +1,2895 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * driver for the SAA7146 based AV110 cards (like the Fujitsu-Siemens DVB)
+ * - initialization and demux stuff
+ *
+ * Copyright (C) 1999-2002 Ralph Metzler
+ * & Marcus Metzler for convergence integrated media GmbH
+ *
+ * originally based on code by:
+ * Copyright (C) 1998,1999 Christian Theiss <mistert@rz.fh-augsburg.de>
+ *
+ * the project's page is at https://linuxtv.org
+ */
+
+#include <linux/module.h>
+#include <linux/kmod.h>
+#include <linux/delay.h>
+#include <linux/fs.h>
+#include <linux/timer.h>
+#include <linux/poll.h>
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/types.h>
+#include <linux/fcntl.h>
+#include <linux/interrupt.h>
+#include <linux/string.h>
+#include <linux/pci.h>
+#include <linux/vmalloc.h>
+#include <linux/firmware.h>
+#include <linux/crc32.h>
+#include <linux/i2c.h>
+#include <linux/kthread.h>
+#include <linux/slab.h>
+#include <linux/unaligned.h>
+#include <asm/byteorder.h>
+
+#include <linux/dvb/frontend.h>
+
+#include <media/dvb_frontend.h>
+
+#include "ttpci-eeprom.h"
+#include "av7110.h"
+#include "av7110_hw.h"
+#include "av7110_av.h"
+#include "av7110_ca.h"
+#include "av7110_ipack.h"
+
+#include "bsbe1.h"
+#include "lnbp21.h"
+#include "bsru6.h"
+
+#define TS_WIDTH 376
+#define TS_HEIGHT 512
+#define TS_BUFLEN (TS_WIDTH * TS_HEIGHT)
+#define TS_MAX_PACKETS (TS_BUFLEN / TS_SIZE)
+
+int av7110_debug;
+
+static int vidmode = CVBS_RGB_OUT;
+static int pids_off;
+static int adac = DVB_ADAC_TI;
+static int hw_sections;
+static int rgb_on;
+static int volume = 255;
+static int budgetpatch;
+static int wss_cfg_4_3 = 0x4008;
+static int wss_cfg_16_9 = 0x0007;
+static int tv_standard;
+static int full_ts;
+
+module_param_named(debug, av7110_debug, int, 0644);
+MODULE_PARM_DESC(debug, "debug level (bitmask, default 0)");
+module_param(vidmode, int, 0444);
+MODULE_PARM_DESC(vidmode, "analog video out: 0 off, 1 CVBS+RGB (default), 2 CVBS+YC, 3 YC");
+module_param(pids_off, int, 0444);
+MODULE_PARM_DESC(pids_off, "clear video/audio/PCR PID filters when demux is closed");
+module_param(adac, int, 0444);
+MODULE_PARM_DESC(adac, "audio DAC type: 0 TI, 1 CRYSTAL, 2 MSP (use if autodetection fails)");
+module_param(hw_sections, int, 0444);
+MODULE_PARM_DESC(hw_sections, "0 use software section filter, 1 use hardware");
+module_param(rgb_on, int, 0444);
+MODULE_PARM_DESC(rgb_on, "For Siemens DVB-C cards only: Enable RGB control signal on SCART pin 16 to switch SCART video mode from CVBS to RGB");
+module_param(volume, int, 0444);
+MODULE_PARM_DESC(volume, "initial volume: default 255 (range 0-255)");
+module_param(budgetpatch, int, 0444);
+MODULE_PARM_DESC(budgetpatch, "use budget-patch hardware modification: default 0 (0 no, 1 autodetect, 2 always)");
+module_param(full_ts, int, 0444);
+MODULE_PARM_DESC(full_ts, "enable code for full-ts hardware modification: 0 disable (default), 1 enable");
+module_param(wss_cfg_4_3, int, 0444);
+MODULE_PARM_DESC(wss_cfg_4_3, "WSS 4:3 - default 0x4008 - bit 15: disable, 14: burst mode, 13..0: wss data");
+module_param(wss_cfg_16_9, int, 0444);
+MODULE_PARM_DESC(wss_cfg_16_9, "WSS 16:9 - default 0x0007 - bit 15: disable, 14: burst mode, 13..0: wss data");
+module_param(tv_standard, int, 0444);
+MODULE_PARM_DESC(tv_standard, "TV standard: 0 PAL (default), 1 NTSC");
+
+DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
+
+static void restart_feeds(struct av7110 *av7110);
+static int budget_start_feed(struct dvb_demux_feed *feed);
+static int budget_stop_feed(struct dvb_demux_feed *feed);
+
+static int av7110_num;
+
+#define FE_FUNC_OVERRIDE(fe_func, av7110_copy, av7110_func) \
+{\
+ if (fe_func) { \
+ av7110_copy = fe_func; \
+ fe_func = av7110_func; \
+ } \
+} /* Macro argument reuse of 'fe_func' is intentional! */
+
+static void init_av7110_av(struct av7110 *av7110)
+{
+ int ret;
+ struct saa7146_dev *dev = av7110->dev;
+
+ /* set internal volume control to maximum */
+ av7110->adac_type = DVB_ADAC_TI;
+ ret = av7110_set_volume(av7110, av7110->mixer.volume_left, av7110->mixer.volume_right);
+ if (ret < 0)
+ pr_err("cannot set internal volume to maximum:%d\n", ret);
+
+ ret = av7110_fw_cmd(av7110, COMTYPE_ENCODER, SetMonitorType,
+ 1, (u16)av7110->display_ar);
+ if (ret < 0)
+ pr_err("unable to set aspect ratio\n");
+ ret = av7110_fw_cmd(av7110, COMTYPE_ENCODER, SetPanScanType,
+ 1, av7110->display_panscan);
+ if (ret < 0)
+ pr_err("unable to set pan scan\n");
+
+ ret = av7110_fw_cmd(av7110, COMTYPE_ENCODER, SetWSSConfig, 2, 2, wss_cfg_4_3);
+ if (ret < 0)
+ pr_err("unable to configure 4:3 wss\n");
+ ret = av7110_fw_cmd(av7110, COMTYPE_ENCODER, SetWSSConfig, 2, 3, wss_cfg_16_9);
+ if (ret < 0)
+ pr_err("unable to configure 16:9 wss\n");
+
+ ret = av7710_set_video_mode(av7110, vidmode);
+ if (ret < 0)
+ pr_err("cannot set video mode:%d\n", ret);
+
+ /* handle different card types */
+ /* remaining inits according to card and frontend type */
+ av7110->analog_tuner_flags = 0;
+ av7110->current_input = 0;
+ if (dev->pci->subsystem_vendor == 0x13c2 && dev->pci->subsystem_device == 0x000a)
+ av7110_fw_cmd(av7110, COMTYPE_AUDIODAC, ADSwitch, 1, 0); // SPDIF on
+ if (i2c_writereg(av7110, 0x20, 0x00, 0x00) == 1) {
+ pr_info("Crystal audio DAC @ card %d detected\n", av7110->dvb_adapter.num);
+ av7110->adac_type = DVB_ADAC_CRYSTAL;
+ i2c_writereg(av7110, 0x20, 0x01, 0xd2);
+ i2c_writereg(av7110, 0x20, 0x02, 0x49);
+ i2c_writereg(av7110, 0x20, 0x03, 0x00);
+ i2c_writereg(av7110, 0x20, 0x04, 0x00);
+
+ /**
+ * some special handling for the Siemens DVB-C cards...
+ */
+ } else if (av7110_init_analog_module(av7110) == 0) {
+ /* done. */
+ } else if (dev->pci->subsystem_vendor == 0x110a) {
+ pr_info("DVB-C w/o analog module @ card %d detected\n", av7110->dvb_adapter.num);
+ av7110->adac_type = DVB_ADAC_NONE;
+ } else {
+ av7110->adac_type = adac;
+ pr_info("adac type set to %d @ card %d\n", av7110->adac_type, av7110->dvb_adapter.num);
+ }
+
+ if (av7110->adac_type == DVB_ADAC_NONE || av7110->adac_type == DVB_ADAC_MSP34x0) {
+ // switch DVB SCART on
+ ret = av7110_fw_cmd(av7110, COMTYPE_AUDIODAC, MainSwitch, 1, 0);
+ if (ret < 0)
+ pr_err("cannot switch on SCART(Main):%d\n", ret);
+ ret = av7110_fw_cmd(av7110, COMTYPE_AUDIODAC, ADSwitch, 1, 1);
+ if (ret < 0)
+ pr_err("cannot switch on SCART(AD):%d\n", ret);
+ if (rgb_on &&
+ ((av7110->dev->pci->subsystem_vendor == 0x110a) ||
+ (av7110->dev->pci->subsystem_vendor == 0x13c2)) &&
+ (av7110->dev->pci->subsystem_device == 0x0000)) {
+ saa7146_setgpio(dev, 1, SAA7146_GPIO_OUTHI); // RGB on, SCART pin 16
+ //saa7146_setgpio(dev, 3, SAA7146_GPIO_OUTLO); // SCARTpin 8
+ }
+ }
+
+ if (dev->pci->subsystem_vendor == 0x13c2 && dev->pci->subsystem_device == 0x000e)
+ av7110_fw_cmd(av7110, COMTYPE_AUDIODAC, SpdifSwitch, 1, 0); // SPDIF on
+
+ ret = av7110_set_volume(av7110, av7110->mixer.volume_left, av7110->mixer.volume_right);
+ if (ret < 0)
+ pr_err("cannot set volume :%d\n", ret);
+}
+
+static void recover_arm(struct av7110 *av7110)
+{
+ dprintk(4, "%p\n", av7110);
+
+ av7110_bootarm(av7110);
+ msleep(100);
+
+ init_av7110_av(av7110);
+
+ /* card-specific recovery */
+ if (av7110->recover)
+ av7110->recover(av7110);
+
+ restart_feeds(av7110);
+
+#if IS_ENABLED(CONFIG_DVB_AV7110_IR)
+ av7110_set_ir_config(av7110);
+#endif
+}
+
+static void av7110_arm_sync(struct av7110 *av7110)
+{
+ if (av7110->arm_thread)
+ kthread_stop(av7110->arm_thread);
+
+ av7110->arm_thread = NULL;
+}
+
+static int arm_thread(void *data)
+{
+ struct av7110 *av7110 = data;
+ u16 newloops = 0;
+ int timeout;
+
+ dprintk(4, "%p\n", av7110);
+
+ for (;;) {
+ timeout = wait_event_interruptible_timeout(av7110->arm_wait,
+ kthread_should_stop(), 5 * HZ);
+
+ if (-ERESTARTSYS == timeout || kthread_should_stop()) {
+ /* got signal or told to quit*/
+ break;
+ }
+
+ if (!av7110->arm_ready)
+ continue;
+
+ if (mutex_lock_interruptible(&av7110->dcomlock))
+ break;
+ newloops = rdebi(av7110, DEBINOSWAP, STATUS_LOOPS, 0, 2);
+ mutex_unlock(&av7110->dcomlock);
+
+ if (newloops == av7110->arm_loops || av7110->arm_errors > 3) {
+ pr_err("ARM crashed @ card %d\n", av7110->dvb_adapter.num);
+
+ recover_arm(av7110);
+
+ if (mutex_lock_interruptible(&av7110->dcomlock))
+ break;
+ newloops = rdebi(av7110, DEBINOSWAP, STATUS_LOOPS, 0, 2) - 1;
+ mutex_unlock(&av7110->dcomlock);
+ }
+ av7110->arm_loops = newloops;
+ av7110->arm_errors = 0;
+ }
+
+ return 0;
+}
+
+/****************************************************************************
+ * IRQ handling
+ ****************************************************************************/
+
+static int DvbDmxFilterCallback(u8 *buffer1, size_t buffer1_len,
+ u8 *buffer2, size_t buffer2_len,
+ struct dvb_demux_filter *dvbdmxfilter,
+ struct av7110 *av7110)
+{
+ if (!dvbdmxfilter->feed->demux->dmx.frontend)
+ return 0;
+ if (dvbdmxfilter->feed->demux->dmx.frontend->source == DMX_MEMORY_FE)
+ return 0;
+
+ switch (dvbdmxfilter->type) {
+ case DMX_TYPE_SEC:
+ if ((((buffer1[1] << 8) | buffer1[2]) & 0xfff) + 3 != buffer1_len)
+ return 0;
+ if (dvbdmxfilter->doneq) {
+ struct dmx_section_filter *filter = &dvbdmxfilter->filter;
+ int i;
+ u8 xor, neq = 0;
+
+ for (i = 0; i < DVB_DEMUX_MASK_MAX; i++) {
+ xor = filter->filter_value[i] ^ buffer1[i];
+ neq |= dvbdmxfilter->maskandnotmode[i] & xor;
+ }
+ if (!neq)
+ return 0;
+ }
+ return dvbdmxfilter->feed->cb.sec(buffer1, buffer1_len,
+ buffer2, buffer2_len,
+ &dvbdmxfilter->filter, NULL);
+ case DMX_TYPE_TS:
+ if (!(dvbdmxfilter->feed->ts_type & TS_PACKET))
+ return 0;
+ if (dvbdmxfilter->feed->ts_type & TS_PAYLOAD_ONLY)
+ return dvbdmxfilter->feed->cb.ts(buffer1, buffer1_len,
+ buffer2, buffer2_len,
+ &dvbdmxfilter->feed->feed.ts,
+ NULL);
+ else
+ av7110_p2t_write(buffer1, buffer1_len,
+ dvbdmxfilter->feed->pid,
+ &av7110->p2t_filter[dvbdmxfilter->index]);
+ return 0;
+ default:
+ return 0;
+ }
+}
+
+//#define DEBUG_TIMING
+static inline void print_time(char *s)
+{
+#ifdef DEBUG_TIMING
+ struct timespec64 ts;
+
+ ktime_get_real_ts64(&ts);
+ pr_info("%s(): %ptSp\n", s, &ts);
+#endif
+}
+
+#define DEBI_READ 0
+#define DEBI_WRITE 1
+static inline void start_debi_dma(struct av7110 *av7110, int dir,
+ unsigned long addr, unsigned int len)
+{
+ dprintk(8, "%c %08lx %u\n", dir == DEBI_READ ? 'R' : 'W', addr, len);
+ if (saa7146_wait_for_debi_done(av7110->dev, 0)) {
+ pr_err("%s(): saa7146_wait_for_debi_done timed out\n", __func__);
+ return;
+ }
+
+ SAA7146_ISR_CLEAR(av7110->dev, MASK_19); /* for good measure */
+ SAA7146_IER_ENABLE(av7110->dev, MASK_19);
+ if (len < 5)
+ len = 5; /* we want a real DEBI DMA */
+ if (dir == DEBI_WRITE)
+ iwdebi(av7110, DEBISWAB, addr, 0, (len + 3) & ~3);
+ else
+ irdebi(av7110, DEBISWAB, addr, 0, len);
+}
+
+static void debiirq(struct tasklet_struct *t)
+{
+ struct av7110 *av7110 = from_tasklet(av7110, t, debi_tasklet);
+ int type = av7110->debitype;
+ int handle = (type >> 8) & 0x1f;
+ unsigned int xfer = 0;
+
+ print_time("debi");
+ dprintk(4, "type 0x%04x\n", type);
+
+ if (type == -1) {
+ pr_err("DEBI irq oops @ %ld, psr:0x%08x, ssr:0x%08x\n", jiffies,
+ saa7146_read(av7110->dev, PSR), saa7146_read(av7110->dev, SSR));
+ goto debi_done;
+ }
+ av7110->debitype = -1;
+
+ switch (type & 0xff) {
+ case DATA_TS_RECORD:
+ dvb_dmx_swfilter_packets(&av7110->demux,
+ (const u8 *)av7110->debi_virt,
+ av7110->debilen / 188);
+ xfer = RX_BUFF;
+ break;
+
+ case DATA_PES_RECORD:
+ if (av7110->demux.recording)
+ av7110_record_cb(&av7110->p2t[handle],
+ (u8 *)av7110->debi_virt,
+ av7110->debilen);
+ xfer = RX_BUFF;
+ break;
+
+ case DATA_IPMPE:
+ case DATA_FSECTION:
+ case DATA_PIPING:
+ if (av7110->handle2filter[handle])
+ DvbDmxFilterCallback((u8 *)av7110->debi_virt,
+ av7110->debilen, NULL, 0,
+ av7110->handle2filter[handle],
+ av7110);
+ xfer = RX_BUFF;
+ break;
+
+ case DATA_CI_GET:
+ {
+ u8 *data = av7110->debi_virt;
+ u8 data_0 = data[0];
+
+ if (data_0 < 2 && data[2] == 0xff) {
+ int flags = 0;
+
+ if (data[5] > 0)
+ flags |= CA_CI_MODULE_PRESENT;
+ if (data[5] > 5)
+ flags |= CA_CI_MODULE_READY;
+ av7110->ci_slot[data_0].flags = flags;
+ } else {
+ ci_get_data(&av7110->ci_rbuffer,
+ av7110->debi_virt,
+ av7110->debilen);
+ }
+ xfer = RX_BUFF;
+ break;
+ }
+
+ case DATA_COMMON_INTERFACE:
+ CI_handle(av7110, (u8 *)av7110->debi_virt, av7110->debilen);
+ xfer = RX_BUFF;
+ break;
+
+ case DATA_DEBUG_MESSAGE:
+ ((s8 *)av7110->debi_virt)[Reserved_SIZE - 1] = 0;
+ pr_info("%s\n", (s8 *)av7110->debi_virt);
+ xfer = RX_BUFF;
+ break;
+
+ case DATA_CI_PUT:
+ dprintk(4, "debi DATA_CI_PUT\n");
+ xfer = TX_BUFF;
+ break;
+ case DATA_MPEG_PLAY:
+ dprintk(4, "debi DATA_MPEG_PLAY\n");
+ xfer = TX_BUFF;
+ break;
+ case DATA_BMP_LOAD:
+ dprintk(4, "debi DATA_BMP_LOAD\n");
+ xfer = TX_BUFF;
+ break;
+ default:
+ break;
+ }
+debi_done:
+ spin_lock(&av7110->debilock);
+ if (xfer)
+ iwdebi(av7110, DEBINOSWAP, xfer, 0, 2);
+ ARM_ClearMailBox(av7110);
+ spin_unlock(&av7110->debilock);
+}
+
+/* irq from av7110 firmware writing the mailbox register in the DPRAM */
+static void gpioirq(struct tasklet_struct *t)
+{
+ struct av7110 *av7110 = from_tasklet(av7110, t, gpio_tasklet);
+ u32 rxbuf, txbuf;
+ int len;
+
+ if (av7110->debitype != -1)
+ /* we shouldn't get any irq while a debi xfer is running */
+ pr_err("GPIO0 irq oops @ %ld, psr:0x%08x, ssr:0x%08x\n", jiffies,
+ saa7146_read(av7110->dev, PSR), saa7146_read(av7110->dev, SSR));
+
+ if (saa7146_wait_for_debi_done(av7110->dev, 0)) {
+ pr_err("%s(): saa7146_wait_for_debi_done timed out\n", __func__);
+ BUG(); /* maybe we should try resetting the debi? */
+ }
+
+ spin_lock(&av7110->debilock);
+ ARM_ClearIrq(av7110);
+
+ /* see what the av7110 wants */
+ av7110->debitype = irdebi(av7110, DEBINOSWAP, IRQ_STATE, 0, 2);
+ av7110->debilen = irdebi(av7110, DEBINOSWAP, IRQ_STATE_EXT, 0, 2);
+ rxbuf = irdebi(av7110, DEBINOSWAP, RX_BUFF, 0, 2);
+ txbuf = irdebi(av7110, DEBINOSWAP, TX_BUFF, 0, 2);
+ len = (av7110->debilen + 3) & ~3;
+
+ print_time("gpio");
+ dprintk(8, "GPIO0 irq 0x%04x %d\n", av7110->debitype, av7110->debilen);
+
+ switch (av7110->debitype & 0xff) {
+ case DATA_TS_PLAY:
+ case DATA_PES_PLAY:
+ break;
+
+ case DATA_MPEG_VIDEO_EVENT:
+ {
+ u32 h_ar;
+ struct video_event event;
+
+ av7110->video_size.w = irdebi(av7110, DEBINOSWAP, STATUS_MPEG_WIDTH, 0, 2);
+ h_ar = irdebi(av7110, DEBINOSWAP, STATUS_MPEG_HEIGHT_AR, 0, 2);
+
+ iwdebi(av7110, DEBINOSWAP, IRQ_STATE_EXT, 0, 2);
+ iwdebi(av7110, DEBINOSWAP, RX_BUFF, 0, 2);
+
+ av7110->video_size.h = h_ar & 0xfff;
+
+ event.type = VIDEO_EVENT_SIZE_CHANGED;
+ event.u.size.w = av7110->video_size.w;
+ event.u.size.h = av7110->video_size.h;
+ switch ((h_ar >> 12) & 0xf) {
+ case 3:
+ av7110->video_size.aspect_ratio = VIDEO_FORMAT_16_9;
+ event.u.size.aspect_ratio = VIDEO_FORMAT_16_9;
+ av7110->videostate.video_format = VIDEO_FORMAT_16_9;
+ break;
+ case 4:
+ av7110->video_size.aspect_ratio = VIDEO_FORMAT_221_1;
+ event.u.size.aspect_ratio = VIDEO_FORMAT_221_1;
+ av7110->videostate.video_format = VIDEO_FORMAT_221_1;
+ break;
+ default:
+ av7110->video_size.aspect_ratio = VIDEO_FORMAT_4_3;
+ event.u.size.aspect_ratio = VIDEO_FORMAT_4_3;
+ av7110->videostate.video_format = VIDEO_FORMAT_4_3;
+ }
+
+ dprintk(8, "GPIO0 irq: DATA_MPEG_VIDEO_EVENT: w/h/ar = %u/%u/%u\n",
+ av7110->video_size.w, av7110->video_size.h,
+ av7110->video_size.aspect_ratio);
+
+ dvb_video_add_event(av7110, &event);
+ break;
+ }
+
+ case DATA_CI_PUT:
+ {
+ int avail;
+ struct dvb_ringbuffer *cibuf = &av7110->ci_wbuffer;
+
+ avail = dvb_ringbuffer_avail(cibuf);
+ if (avail <= 2) {
+ iwdebi(av7110, DEBINOSWAP, IRQ_STATE_EXT, 0, 2);
+ iwdebi(av7110, DEBINOSWAP, TX_LEN, 0, 2);
+ iwdebi(av7110, DEBINOSWAP, TX_BUFF, 0, 2);
+ break;
+ }
+ len = DVB_RINGBUFFER_PEEK(cibuf, 0) << 8;
+ len |= DVB_RINGBUFFER_PEEK(cibuf, 1);
+ if (avail < len + 2) {
+ iwdebi(av7110, DEBINOSWAP, IRQ_STATE_EXT, 0, 2);
+ iwdebi(av7110, DEBINOSWAP, TX_LEN, 0, 2);
+ iwdebi(av7110, DEBINOSWAP, TX_BUFF, 0, 2);
+ break;
+ }
+ DVB_RINGBUFFER_SKIP(cibuf, 2);
+
+ dvb_ringbuffer_read(cibuf, av7110->debi_virt, len);
+
+ iwdebi(av7110, DEBINOSWAP, TX_LEN, len, 2);
+ iwdebi(av7110, DEBINOSWAP, IRQ_STATE_EXT, len, 2);
+ dprintk(8, "DMA: CI\n");
+ start_debi_dma(av7110, DEBI_WRITE, DPRAM_BASE + txbuf, len);
+ spin_unlock(&av7110->debilock);
+ wake_up(&cibuf->queue);
+ return;
+ }
+
+ case DATA_MPEG_PLAY:
+ if (!av7110->playing) {
+ iwdebi(av7110, DEBINOSWAP, IRQ_STATE_EXT, 0, 2);
+ iwdebi(av7110, DEBINOSWAP, TX_LEN, 0, 2);
+ iwdebi(av7110, DEBINOSWAP, TX_BUFF, 0, 2);
+ break;
+ }
+ len = 0;
+ if (av7110->debitype & 0x100) {
+ spin_lock(&av7110->aout.lock);
+ len = av7110_pes_play(av7110->debi_virt, &av7110->aout, 2048);
+ spin_unlock(&av7110->aout.lock);
+ }
+ if (len <= 0 && (av7110->debitype & 0x200) &&
+ av7110->videostate.play_state != VIDEO_FREEZED) {
+ spin_lock(&av7110->avout.lock);
+ len = av7110_pes_play(av7110->debi_virt, &av7110->avout, 2048);
+ spin_unlock(&av7110->avout.lock);
+ }
+ if (len <= 0) {
+ iwdebi(av7110, DEBINOSWAP, IRQ_STATE_EXT, 0, 2);
+ iwdebi(av7110, DEBINOSWAP, TX_LEN, 0, 2);
+ iwdebi(av7110, DEBINOSWAP, TX_BUFF, 0, 2);
+ break;
+ }
+ dprintk(8, "GPIO0 PES_PLAY len=%04x\n", len);
+ iwdebi(av7110, DEBINOSWAP, TX_LEN, len, 2);
+ iwdebi(av7110, DEBINOSWAP, IRQ_STATE_EXT, len, 2);
+ dprintk(8, "DMA: MPEG_PLAY\n");
+ start_debi_dma(av7110, DEBI_WRITE, DPRAM_BASE + txbuf, len);
+ spin_unlock(&av7110->debilock);
+ return;
+
+ case DATA_BMP_LOAD:
+ len = av7110->debilen;
+ dprintk(8, "gpio DATA_BMP_LOAD len %d\n", len);
+ if (!len) {
+ av7110->bmp_state = BMP_LOADED;
+ iwdebi(av7110, DEBINOSWAP, IRQ_STATE_EXT, 0, 2);
+ iwdebi(av7110, DEBINOSWAP, TX_LEN, 0, 2);
+ iwdebi(av7110, DEBINOSWAP, TX_BUFF, 0, 2);
+ wake_up(&av7110->bmpq);
+ dprintk(8, "gpio DATA_BMP_LOAD done\n");
+ break;
+ }
+ if (len > av7110->bmplen)
+ len = av7110->bmplen;
+ if (len > 2 * 1024)
+ len = 2 * 1024;
+ iwdebi(av7110, DEBINOSWAP, TX_LEN, len, 2);
+ iwdebi(av7110, DEBINOSWAP, IRQ_STATE_EXT, len, 2);
+ memcpy(av7110->debi_virt, av7110->bmpbuf + av7110->bmpp, len);
+ av7110->bmpp += len;
+ av7110->bmplen -= len;
+ dprintk(8, "gpio DATA_BMP_LOAD DMA len %d\n", len);
+ start_debi_dma(av7110, DEBI_WRITE, DPRAM_BASE + txbuf, len);
+ spin_unlock(&av7110->debilock);
+ return;
+
+ case DATA_CI_GET:
+ case DATA_COMMON_INTERFACE:
+ case DATA_FSECTION:
+ case DATA_IPMPE:
+ case DATA_PIPING:
+ if (!len || len > 4 * 1024) {
+ iwdebi(av7110, DEBINOSWAP, RX_BUFF, 0, 2);
+ break;
+ }
+ fallthrough;
+
+ case DATA_TS_RECORD:
+ case DATA_PES_RECORD:
+ dprintk(8, "DMA: TS_REC etc.\n");
+ start_debi_dma(av7110, DEBI_READ, DPRAM_BASE + rxbuf, len);
+ spin_unlock(&av7110->debilock);
+ return;
+
+ case DATA_DEBUG_MESSAGE:
+ if (!len || len > 0xff) {
+ iwdebi(av7110, DEBINOSWAP, RX_BUFF, 0, 2);
+ break;
+ }
+ start_debi_dma(av7110, DEBI_READ, Reserved, len);
+ spin_unlock(&av7110->debilock);
+ return;
+
+ case DATA_IRCOMMAND:
+#if IS_ENABLED(CONFIG_DVB_AV7110_IR)
+ av7110_ir_handler(av7110,
+ swahw32(irdebi(av7110, DEBINOSWAP, Reserved,
+ 0, 4)));
+#endif
+ iwdebi(av7110, DEBINOSWAP, RX_BUFF, 0, 2);
+ break;
+
+ default:
+ pr_err("%s(): unknown irq: type=%d len=%d\n", __func__,
+ av7110->debitype, av7110->debilen);
+ break;
+ }
+ av7110->debitype = -1;
+ ARM_ClearMailBox(av7110);
+ spin_unlock(&av7110->debilock);
+}
+
+#ifdef CONFIG_DVB_AV7110_OSD
+static int dvb_osd_ioctl(struct file *file,
+ unsigned int cmd, void *parg)
+{
+ struct dvb_device *dvbdev = file->private_data;
+ struct av7110 *av7110 = dvbdev->priv;
+
+ dprintk(4, "%p\n", av7110);
+
+ if (cmd == OSD_SEND_CMD)
+ return av7110_osd_cmd(av7110, (osd_cmd_t *)parg);
+ if (cmd == OSD_GET_CAPABILITY)
+ return av7110_osd_capability(av7110, (osd_cap_t *)parg);
+
+ return -EINVAL;
+}
+
+static const struct file_operations dvb_osd_fops = {
+ .owner = THIS_MODULE,
+ .unlocked_ioctl = dvb_generic_ioctl,
+ .open = dvb_generic_open,
+ .release = dvb_generic_release,
+ .llseek = noop_llseek,
+};
+
+static struct dvb_device dvbdev_osd = {
+ .priv = NULL,
+ .users = 1,
+ .writers = 1,
+ .fops = &dvb_osd_fops,
+ .kernel_ioctl = dvb_osd_ioctl,
+};
+#endif /* CONFIG_DVB_AV7110_OSD */
+
+static inline int SetPIDs(struct av7110 *av7110, u16 vpid, u16 apid, u16 ttpid,
+ u16 subpid, u16 pcrpid)
+{
+ u16 aflags = 0;
+
+ dprintk(4, "%p\n", av7110);
+
+ if (vpid == 0x1fff || apid == 0x1fff ||
+ ttpid == 0x1fff || subpid == 0x1fff || pcrpid == 0x1fff) {
+ vpid = 0;
+ apid = 0;
+ ttpid = 0;
+ subpid = 0;
+ pcrpid = 0;
+ av7110->pids[DMX_PES_VIDEO] = 0;
+ av7110->pids[DMX_PES_AUDIO] = 0;
+ av7110->pids[DMX_PES_TELETEXT] = 0;
+ av7110->pids[DMX_PES_PCR] = 0;
+ }
+
+ if (av7110->audiostate.bypass_mode)
+ aflags |= 0x8000;
+
+ return av7110_fw_cmd(av7110, COMTYPE_PIDFILTER, MultiPID, 6,
+ pcrpid, vpid, apid, ttpid, subpid, aflags);
+}
+
+int ChangePIDs(struct av7110 *av7110, u16 vpid, u16 apid, u16 ttpid,
+ u16 subpid, u16 pcrpid)
+{
+ int ret = 0;
+
+ dprintk(4, "%p\n", av7110);
+
+ if (mutex_lock_interruptible(&av7110->pid_mutex))
+ return -ERESTARTSYS;
+
+ if (!(vpid & 0x8000))
+ av7110->pids[DMX_PES_VIDEO] = vpid;
+ if (!(apid & 0x8000))
+ av7110->pids[DMX_PES_AUDIO] = apid;
+ if (!(ttpid & 0x8000))
+ av7110->pids[DMX_PES_TELETEXT] = ttpid;
+ if (!(pcrpid & 0x8000))
+ av7110->pids[DMX_PES_PCR] = pcrpid;
+
+ av7110->pids[DMX_PES_SUBTITLE] = 0;
+
+ if (av7110->fe_synced) {
+ pcrpid = av7110->pids[DMX_PES_PCR];
+ ret = SetPIDs(av7110, vpid, apid, ttpid, subpid, pcrpid);
+ }
+
+ mutex_unlock(&av7110->pid_mutex);
+ return ret;
+}
+
+/******************************************************************************
+ * hardware filter functions
+ ******************************************************************************/
+
+static int StartHWFilter(struct dvb_demux_filter *dvbdmxfilter)
+{
+ struct dvb_demux_feed *dvbdmxfeed = dvbdmxfilter->feed;
+ struct av7110 *av7110 = dvbdmxfeed->demux->priv;
+ u16 buf[20];
+ int ret, i;
+ u16 handle;
+// u16 mode = 0x0320;
+ u16 mode = 0xb96a;
+
+ dprintk(4, "%p\n", av7110);
+
+ if (av7110->full_ts)
+ return 0;
+
+ if (dvbdmxfilter->type == DMX_TYPE_SEC) {
+ if (hw_sections) {
+ buf[4] = (dvbdmxfilter->filter.filter_value[0] << 8) |
+ dvbdmxfilter->maskandmode[0];
+ for (i = 3; i < 18; i++)
+ buf[i + 4 - 2] =
+ (dvbdmxfilter->filter.filter_value[i] << 8) |
+ dvbdmxfilter->maskandmode[i];
+ mode = 4;
+ }
+ } else if ((dvbdmxfeed->ts_type & TS_PACKET) &&
+ !(dvbdmxfeed->ts_type & TS_PAYLOAD_ONLY)) {
+ av7110_p2t_init(&av7110->p2t_filter[dvbdmxfilter->index], dvbdmxfeed);
+ }
+
+ buf[0] = (COMTYPE_PID_FILTER << 8) + AddPIDFilter;
+ buf[1] = 16;
+ buf[2] = dvbdmxfeed->pid;
+ buf[3] = mode;
+
+ ret = av7110_fw_request(av7110, buf, 20, &handle, 1);
+ if (ret != 0 || handle >= 32) {
+ pr_err("%s(): error buf %04x %04x %04x %04x ret %d handle %04x\n",
+ __func__, buf[0], buf[1], buf[2], buf[3], ret, handle);
+ dvbdmxfilter->hw_handle = 0xffff;
+ if (!ret)
+ ret = -1;
+ return ret;
+ }
+
+ av7110->handle2filter[handle] = dvbdmxfilter;
+ dvbdmxfilter->hw_handle = handle;
+
+ return ret;
+}
+
+static int StopHWFilter(struct dvb_demux_filter *dvbdmxfilter)
+{
+ struct av7110 *av7110 = dvbdmxfilter->feed->demux->priv;
+ u16 buf[3];
+ u16 answ[2];
+ int ret;
+ u16 handle;
+
+ dprintk(4, "%p\n", av7110);
+
+ if (av7110->full_ts)
+ return 0;
+
+ handle = dvbdmxfilter->hw_handle;
+ if (handle >= 32) {
+ pr_err("%s(): tried to stop invalid filter %04x, filter type = %x\n",
+ __func__, handle, dvbdmxfilter->type);
+ return -EINVAL;
+ }
+
+ av7110->handle2filter[handle] = NULL;
+
+ buf[0] = (COMTYPE_PID_FILTER << 8) + DelPIDFilter;
+ buf[1] = 1;
+ buf[2] = handle;
+ ret = av7110_fw_request(av7110, buf, 3, answ, 2);
+ if (ret != 0 || answ[1] != handle) {
+ pr_err("%s(): error cmd %04x %04x %04x ret %x resp %04x %04x pid %d\n", __func__,
+ buf[0], buf[1], buf[2], ret, answ[0], answ[1], dvbdmxfilter->feed->pid);
+ if (!ret)
+ ret = -1;
+ }
+ return ret;
+}
+
+static int dvb_feed_start_pid(struct dvb_demux_feed *dvbdmxfeed)
+{
+ struct dvb_demux *dvbdmx = dvbdmxfeed->demux;
+ struct av7110 *av7110 = dvbdmx->priv;
+ u16 *pid = dvbdmx->pids, npids[5];
+ int i;
+ int ret = 0;
+
+ dprintk(4, "%p\n", av7110);
+
+ npids[0] = 0xffff;
+ npids[1] = 0xffff;
+ npids[2] = 0xffff;
+ npids[3] = 0xffff;
+ npids[4] = 0xffff;
+ i = dvbdmxfeed->pes_type;
+ npids[i] = (pid[i] & 0x8000) ? 0 : pid[i];
+ if ((i == 2) && npids[i] && (dvbdmxfeed->ts_type & TS_PACKET)) {
+ npids[i] = 0;
+ ret = ChangePIDs(av7110, npids[1], npids[0], npids[2], npids[3], npids[4]);
+ if (!ret)
+ ret = StartHWFilter(dvbdmxfeed->filter);
+ return ret;
+ }
+ if (dvbdmxfeed->pes_type <= 2 || dvbdmxfeed->pes_type == 4) {
+ ret = ChangePIDs(av7110, npids[1], npids[0], npids[2], npids[3], npids[4]);
+ if (ret)
+ return ret;
+ }
+
+ if (dvbdmxfeed->pes_type < 2 && npids[0])
+ if (av7110->fe_synced) {
+ ret = av7110_fw_cmd(av7110, COMTYPE_PIDFILTER, Scan, 0);
+ if (ret)
+ return ret;
+ }
+
+ if ((dvbdmxfeed->ts_type & TS_PACKET) && !av7110->full_ts) {
+ if (dvbdmxfeed->pes_type == 0 && !(dvbdmx->pids[0] & 0x8000))
+ ret = av7110_av_start_record(av7110, RP_AUDIO, dvbdmxfeed);
+ if (dvbdmxfeed->pes_type == 1 && !(dvbdmx->pids[1] & 0x8000))
+ ret = av7110_av_start_record(av7110, RP_VIDEO, dvbdmxfeed);
+ }
+ return ret;
+}
+
+static int dvb_feed_stop_pid(struct dvb_demux_feed *dvbdmxfeed)
+{
+ struct dvb_demux *dvbdmx = dvbdmxfeed->demux;
+ struct av7110 *av7110 = dvbdmx->priv;
+ u16 *pid = dvbdmx->pids, npids[5];
+ int i;
+
+ int ret = 0;
+
+ dprintk(4, "%p\n", av7110);
+
+ if (dvbdmxfeed->pes_type <= 1) {
+ ret = av7110_av_stop(av7110, dvbdmxfeed->pes_type ? RP_VIDEO : RP_AUDIO);
+ if (ret)
+ return ret;
+ if (!av7110->rec_mode)
+ dvbdmx->recording = 0;
+ if (!av7110->playing)
+ dvbdmx->playing = 0;
+ }
+ npids[0] = 0xffff;
+ npids[1] = 0xffff;
+ npids[2] = 0xffff;
+ npids[3] = 0xffff;
+ npids[4] = 0xffff;
+ i = dvbdmxfeed->pes_type;
+ switch (i) {
+ case 2: //teletext
+ if (dvbdmxfeed->ts_type & TS_PACKET)
+ ret = StopHWFilter(dvbdmxfeed->filter);
+ npids[2] = 0;
+ break;
+ case 0:
+ case 1:
+ case 4:
+ if (!pids_off)
+ return 0;
+ npids[i] = (pid[i] & 0x8000) ? 0 : pid[i];
+ break;
+ }
+ if (!ret)
+ ret = ChangePIDs(av7110, npids[1], npids[0], npids[2], npids[3], npids[4]);
+ return ret;
+}
+
+static int av7110_start_feed(struct dvb_demux_feed *feed)
+{
+ struct dvb_demux *demux = feed->demux;
+ struct av7110 *av7110 = demux->priv;
+ int ret = 0;
+
+ dprintk(4, "%p\n", av7110);
+
+ if (!demux->dmx.frontend)
+ return -EINVAL;
+
+ if (!av7110->full_ts && feed->pid > 0x1fff)
+ return -EINVAL;
+
+ if (feed->type == DMX_TYPE_TS) {
+ if ((feed->ts_type & TS_DECODER) &&
+ (feed->pes_type <= DMX_PES_PCR)) {
+ switch (demux->dmx.frontend->source) {
+ case DMX_MEMORY_FE:
+ if (feed->ts_type & TS_DECODER)
+ if (feed->pes_type < 2 &&
+ !(demux->pids[0] & 0x8000) &&
+ !(demux->pids[1] & 0x8000)) {
+ dvb_ringbuffer_flush_spinlock_wakeup(&av7110->avout);
+ dvb_ringbuffer_flush_spinlock_wakeup(&av7110->aout);
+ ret = av7110_av_start_play(av7110, RP_AV);
+ if (!ret)
+ demux->playing = 1;
+ }
+ break;
+ default:
+ ret = dvb_feed_start_pid(feed);
+ break;
+ }
+ } else if ((feed->ts_type & TS_PACKET) &&
+ (demux->dmx.frontend->source != DMX_MEMORY_FE)) {
+ ret = StartHWFilter(feed->filter);
+ }
+ }
+
+ if (av7110->full_ts) {
+ budget_start_feed(feed);
+ return ret;
+ }
+
+ if (feed->type == DMX_TYPE_SEC) {
+ int i;
+
+ for (i = 0; i < demux->filternum; i++) {
+ if (demux->filter[i].state != DMX_STATE_READY)
+ continue;
+ if (demux->filter[i].type != DMX_TYPE_SEC)
+ continue;
+ if (demux->filter[i].filter.parent != &feed->feed.sec)
+ continue;
+ demux->filter[i].state = DMX_STATE_GO;
+ if (demux->dmx.frontend->source != DMX_MEMORY_FE) {
+ ret = StartHWFilter(&demux->filter[i]);
+ if (ret)
+ break;
+ }
+ }
+ }
+
+ return ret;
+}
+
+static int av7110_stop_feed(struct dvb_demux_feed *feed)
+{
+ struct dvb_demux *demux = feed->demux;
+ struct av7110 *av7110 = demux->priv;
+ int i, rc, ret = 0;
+
+ dprintk(4, "%p\n", av7110);
+
+ if (feed->type == DMX_TYPE_TS) {
+ if (feed->ts_type & TS_DECODER) {
+ if (feed->pes_type >= DMX_PES_OTHER ||
+ !demux->pesfilter[feed->pes_type])
+ return -EINVAL;
+ demux->pids[feed->pes_type] |= 0x8000;
+ demux->pesfilter[feed->pes_type] = NULL;
+ }
+ if (feed->ts_type & TS_DECODER && feed->pes_type < DMX_PES_OTHER)
+ ret = dvb_feed_stop_pid(feed);
+ else
+ if ((feed->ts_type & TS_PACKET) &&
+ (demux->dmx.frontend->source != DMX_MEMORY_FE))
+ ret = StopHWFilter(feed->filter);
+ }
+
+ if (av7110->full_ts) {
+ budget_stop_feed(feed);
+ return ret;
+ }
+
+ if (feed->type == DMX_TYPE_SEC) {
+ for (i = 0; i < demux->filternum; i++) {
+ if (demux->filter[i].state == DMX_STATE_GO &&
+ demux->filter[i].filter.parent == &feed->feed.sec) {
+ demux->filter[i].state = DMX_STATE_READY;
+ if (demux->dmx.frontend->source != DMX_MEMORY_FE) {
+ rc = StopHWFilter(&demux->filter[i]);
+ if (!ret)
+ ret = rc;
+ /* keep going, stop as many filters as possible */
+ }
+ }
+ }
+ }
+
+ return ret;
+}
+
+static void restart_feeds(struct av7110 *av7110)
+{
+ struct dvb_demux *dvbdmx = &av7110->demux;
+ struct dvb_demux_feed *feed;
+ int mode;
+ int feeding;
+ int i, j;
+
+ dprintk(4, "%p\n", av7110);
+
+ mode = av7110->playing;
+ av7110->playing = 0;
+ av7110->rec_mode = 0;
+
+ feeding = av7110->feeding1; /* full_ts mod */
+
+ for (i = 0; i < dvbdmx->feednum; i++) {
+ feed = &dvbdmx->feed[i];
+ if (feed->state == DMX_STATE_GO) {
+ if (feed->type == DMX_TYPE_SEC) {
+ for (j = 0; j < dvbdmx->filternum; j++) {
+ if (dvbdmx->filter[j].type != DMX_TYPE_SEC)
+ continue;
+ if (dvbdmx->filter[j].filter.parent != &feed->feed.sec)
+ continue;
+ if (dvbdmx->filter[j].state == DMX_STATE_GO)
+ dvbdmx->filter[j].state = DMX_STATE_READY;
+ }
+ }
+ av7110_start_feed(feed);
+ }
+ }
+
+ av7110->feeding1 = feeding; /* full_ts mod */
+
+ if (mode)
+ av7110_av_start_play(av7110, mode);
+}
+
+static int dvb_get_stc(struct dmx_demux *demux, unsigned int num,
+ u64 *stc, unsigned int *base)
+{
+ int ret;
+ u16 fwstc[4];
+ u16 tag = ((COMTYPE_REQUEST << 8) + ReqSTC);
+ struct dvb_demux *dvbdemux;
+ struct av7110 *av7110;
+
+ /* pointer casting paranoia... */
+ if (WARN_ON(!demux))
+ return -EIO;
+ dvbdemux = demux->priv;
+ if (WARN_ON(!dvbdemux))
+ return -EIO;
+ av7110 = dvbdemux->priv;
+
+ dprintk(4, "%p\n", av7110);
+
+ if (num != 0)
+ return -EINVAL;
+
+ ret = av7110_fw_request(av7110, &tag, 0, fwstc, 4);
+ if (ret) {
+ pr_err("%s(): av7110_fw_request error\n", __func__);
+ return ret;
+ }
+ dprintk(2, "fwstc = %04hx %04hx %04hx %04hx\n", fwstc[0], fwstc[1], fwstc[2], fwstc[3]);
+
+ *stc = (((uint64_t)((fwstc[3] & 0x8000) >> 15)) << 32) |
+ (((uint64_t)fwstc[1]) << 16) | ((uint64_t)fwstc[0]);
+ *base = 1;
+
+ dprintk(4, "stc = %lu\n", (unsigned long)*stc);
+
+ return 0;
+}
+
+/******************************************************************************
+ * SEC device file operations
+ ******************************************************************************/
+
+static int av7110_set_tone(struct dvb_frontend *fe, enum fe_sec_tone_mode tone)
+{
+ struct av7110 *av7110 = fe->dvb->priv;
+
+ switch (tone) {
+ case SEC_TONE_ON:
+ return Set22K(av7110, 1);
+
+ case SEC_TONE_OFF:
+ return Set22K(av7110, 0);
+
+ default:
+ return -EINVAL;
+ }
+}
+
+static int av7110_diseqc_send_master_cmd(struct dvb_frontend *fe,
+ struct dvb_diseqc_master_cmd *cmd)
+{
+ struct av7110 *av7110 = fe->dvb->priv;
+
+ return av7110_diseqc_send(av7110, cmd->msg_len, cmd->msg, -1);
+}
+
+static int av7110_diseqc_send_burst(struct dvb_frontend *fe,
+ enum fe_sec_mini_cmd minicmd)
+{
+ struct av7110 *av7110 = fe->dvb->priv;
+
+ return av7110_diseqc_send(av7110, 0, NULL, minicmd);
+}
+
+/* simplified code from budget-core.c */
+static int stop_ts_capture(struct av7110 *budget)
+{
+ dprintk(2, "budget: %p\n", budget);
+
+ if (--budget->feeding1)
+ return budget->feeding1;
+ saa7146_write(budget->dev, MC1, MASK_20); /* DMA3 off */
+ SAA7146_IER_DISABLE(budget->dev, MASK_10);
+ SAA7146_ISR_CLEAR(budget->dev, MASK_10);
+ return 0;
+}
+
+static int start_ts_capture(struct av7110 *budget)
+{
+ unsigned int y;
+
+ dprintk(2, "budget: %p\n", budget);
+
+ if (budget->feeding1)
+ return ++budget->feeding1;
+ for (y = 0; y < TS_HEIGHT; y++)
+ memset(budget->grabbing + y * TS_WIDTH, 0x00, TS_WIDTH);
+ budget->ttbp = 0;
+ SAA7146_ISR_CLEAR(budget->dev, MASK_10); /* VPE */
+ SAA7146_IER_ENABLE(budget->dev, MASK_10); /* VPE */
+ saa7146_write(budget->dev, MC1, (MASK_04 | MASK_20)); /* DMA3 on */
+ return ++budget->feeding1;
+}
+
+static int budget_start_feed(struct dvb_demux_feed *feed)
+{
+ struct dvb_demux *demux = feed->demux;
+ struct av7110 *budget = demux->priv;
+ int status;
+
+ dprintk(2, "av7110: %p\n", budget);
+
+ spin_lock(&budget->feedlock1);
+ feed->pusi_seen = false; /* have a clean section start */
+ status = start_ts_capture(budget);
+ spin_unlock(&budget->feedlock1);
+ return status;
+}
+
+static int budget_stop_feed(struct dvb_demux_feed *feed)
+{
+ struct dvb_demux *demux = feed->demux;
+ struct av7110 *budget = demux->priv;
+ int status;
+
+ dprintk(2, "budget: %p\n", budget);
+
+ spin_lock(&budget->feedlock1);
+ status = stop_ts_capture(budget);
+ spin_unlock(&budget->feedlock1);
+ return status;
+}
+
+static void vpeirq(struct tasklet_struct *t)
+{
+ struct av7110 *budget = from_tasklet(budget, t, vpe_tasklet);
+ u8 *mem = (u8 *)(budget->grabbing);
+ u32 olddma = budget->ttbp;
+ u32 newdma = saa7146_read(budget->dev, PCI_VDP3);
+ struct dvb_demux *demux = budget->full_ts ? &budget->demux : &budget->demux1;
+
+ /* nearest lower position divisible by 188 */
+ newdma -= newdma % 188;
+
+ if (newdma >= TS_BUFLEN)
+ return;
+
+ budget->ttbp = newdma;
+
+ if (!budget->feeding1 || (newdma == olddma))
+ return;
+
+ /* Ensure streamed PCI data is synced to CPU */
+ dma_sync_sg_for_cpu(&budget->dev->pci->dev, budget->pt.slist,
+ budget->pt.nents, DMA_FROM_DEVICE);
+
+#ifdef RPS_DEBUG
+ /* track rps1 activity */
+ pr_info("%s(): %02x Event Counter 1 0x%04x\n", __func__, mem[olddma],
+ saa7146_read(budget->dev, EC1R) & 0x3fff);
+#endif
+
+ if (newdma > olddma) {
+ /* no wraparound, dump olddma..newdma */
+ dvb_dmx_swfilter_packets(demux, mem + olddma, (newdma - olddma) / 188);
+ } else {
+ /* wraparound, dump olddma..buflen and 0..newdma */
+ dvb_dmx_swfilter_packets(demux, mem + olddma, (TS_BUFLEN - olddma) / 188);
+ dvb_dmx_swfilter_packets(demux, mem, newdma / 188);
+ }
+}
+
+static int av7110_register(struct av7110 *av7110)
+{
+ int ret, i;
+ struct dvb_demux *dvbdemux = &av7110->demux;
+ struct dvb_demux *dvbdemux1 = &av7110->demux1;
+
+ dprintk(4, "%p\n", av7110);
+
+ if (av7110->registered)
+ return -1;
+
+ av7110->registered = 1;
+
+ dvbdemux->priv = (void *)av7110;
+
+ for (i = 0; i < 32; i++)
+ av7110->handle2filter[i] = NULL;
+
+ dvbdemux->filternum = (av7110->full_ts) ? 256 : 32;
+ dvbdemux->feednum = (av7110->full_ts) ? 256 : 32;
+ dvbdemux->start_feed = av7110_start_feed;
+ dvbdemux->stop_feed = av7110_stop_feed;
+ dvbdemux->write_to_decoder = av7110_write_to_decoder;
+ dvbdemux->dmx.capabilities = (DMX_TS_FILTERING | DMX_SECTION_FILTERING |
+ DMX_MEMORY_BASED_FILTERING);
+
+ dvb_dmx_init(&av7110->demux);
+ av7110->demux.dmx.get_stc = dvb_get_stc;
+
+ av7110->dmxdev.filternum = (av7110->full_ts) ? 256 : 32;
+ av7110->dmxdev.demux = &dvbdemux->dmx;
+ av7110->dmxdev.capabilities = 0;
+
+ dvb_dmxdev_init(&av7110->dmxdev, &av7110->dvb_adapter);
+
+ av7110->hw_frontend.source = DMX_FRONTEND_0;
+
+ ret = dvbdemux->dmx.add_frontend(&dvbdemux->dmx, &av7110->hw_frontend);
+
+ if (ret < 0)
+ return ret;
+
+ av7110->mem_frontend.source = DMX_MEMORY_FE;
+
+ ret = dvbdemux->dmx.add_frontend(&dvbdemux->dmx, &av7110->mem_frontend);
+
+ if (ret < 0)
+ return ret;
+
+ ret = dvbdemux->dmx.connect_frontend(&dvbdemux->dmx,
+ &av7110->hw_frontend);
+ if (ret < 0)
+ return ret;
+
+ av7110_av_register(av7110);
+ av7110_ca_register(av7110);
+
+#ifdef CONFIG_DVB_AV7110_OSD
+ dvb_register_device(&av7110->dvb_adapter, &av7110->osd_dev,
+ &dvbdev_osd, av7110, DVB_DEVICE_OSD, 0);
+#endif
+
+ dvb_net_init(&av7110->dvb_adapter, &av7110->dvb_net, &dvbdemux->dmx);
+
+ if (budgetpatch) {
+ /* initialize software demux1 without its own frontend
+ * demux1 hardware is connected to frontend0 of demux0
+ */
+ dvbdemux1->priv = (void *)av7110;
+
+ dvbdemux1->filternum = 256;
+ dvbdemux1->feednum = 256;
+ dvbdemux1->start_feed = budget_start_feed;
+ dvbdemux1->stop_feed = budget_stop_feed;
+ dvbdemux1->write_to_decoder = NULL;
+
+ dvbdemux1->dmx.capabilities = (DMX_TS_FILTERING | DMX_SECTION_FILTERING |
+ DMX_MEMORY_BASED_FILTERING);
+
+ dvb_dmx_init(&av7110->demux1);
+
+ av7110->dmxdev1.filternum = 256;
+ av7110->dmxdev1.demux = &dvbdemux1->dmx;
+ av7110->dmxdev1.capabilities = 0;
+
+ dvb_dmxdev_init(&av7110->dmxdev1, &av7110->dvb_adapter);
+
+ dvb_net_init(&av7110->dvb_adapter, &av7110->dvb_net1, &dvbdemux1->dmx);
+ pr_info("additional demux1 for budget-patch registered\n");
+ }
+ return 0;
+}
+
+static void dvb_unregister(struct av7110 *av7110)
+{
+ struct dvb_demux *dvbdemux = &av7110->demux;
+ struct dvb_demux *dvbdemux1 = &av7110->demux1;
+
+ dprintk(4, "%p\n", av7110);
+
+ if (!av7110->registered)
+ return;
+
+ if (budgetpatch) {
+ dvb_net_release(&av7110->dvb_net1);
+ dvbdemux->dmx.close(&dvbdemux1->dmx);
+ dvb_dmxdev_release(&av7110->dmxdev1);
+ dvb_dmx_release(&av7110->demux1);
+ }
+
+ dvb_net_release(&av7110->dvb_net);
+
+ dvbdemux->dmx.close(&dvbdemux->dmx);
+ dvbdemux->dmx.remove_frontend(&dvbdemux->dmx, &av7110->hw_frontend);
+ dvbdemux->dmx.remove_frontend(&dvbdemux->dmx, &av7110->mem_frontend);
+
+ dvb_dmxdev_release(&av7110->dmxdev);
+ dvb_dmx_release(&av7110->demux);
+
+ if (av7110->fe) {
+ dvb_unregister_frontend(av7110->fe);
+ dvb_frontend_detach(av7110->fe);
+ }
+ dvb_unregister_device(av7110->osd_dev);
+ av7110_av_unregister(av7110);
+ av7110_ca_unregister(av7110);
+}
+
+/****************************************************************************
+ * I2C client commands
+ ****************************************************************************/
+
+int i2c_writereg(struct av7110 *av7110, u8 id, u8 reg, u8 val)
+{
+ u8 msg[2] = { reg, val };
+ struct i2c_msg msgs;
+
+ msgs.flags = 0;
+ msgs.addr = id / 2;
+ msgs.len = 2;
+ msgs.buf = msg;
+ return i2c_transfer(&av7110->i2c_adap, &msgs, 1);
+}
+
+u8 i2c_readreg(struct av7110 *av7110, u8 id, u8 reg)
+{
+ u8 mm1[] = {0x00};
+ u8 mm2[] = {0x00};
+ struct i2c_msg msgs[2];
+
+ msgs[0].flags = 0;
+ msgs[1].flags = I2C_M_RD;
+ msgs[0].addr = id / 2;
+ msgs[1].addr = id / 2;
+ mm1[0] = reg;
+ msgs[0].len = 1;
+ msgs[1].len = 1;
+ msgs[0].buf = mm1;
+ msgs[1].buf = mm2;
+ i2c_transfer(&av7110->i2c_adap, msgs, 2);
+
+ return mm2[0];
+}
+
+/****************************************************************************
+ * INITIALIZATION
+ ****************************************************************************/
+
+static int check_firmware(struct av7110 *av7110)
+{
+ u32 crc = 0, len = 0;
+ unsigned char *ptr;
+
+ /* check for firmware magic */
+ ptr = av7110->bin_fw;
+ if (ptr[0] != 'A' || ptr[1] != 'V' ||
+ ptr[2] != 'F' || ptr[3] != 'W') {
+ pr_err("this is not an av7110 firmware\n");
+ return -EINVAL;
+ }
+ ptr += 4;
+
+ /* check dpram file */
+ crc = get_unaligned_be32(ptr);
+ ptr += 4;
+ len = get_unaligned_be32(ptr);
+ ptr += 4;
+ if (len >= 512) {
+ pr_err("dpram file is way too big.\n");
+ return -EINVAL;
+ }
+ if (crc != crc32_le(0, ptr, len)) {
+ pr_err("crc32 of dpram file does not match.\n");
+ return -EINVAL;
+ }
+ av7110->bin_dpram = ptr;
+ av7110->size_dpram = len;
+ ptr += len;
+
+ /* check root file */
+ crc = get_unaligned_be32(ptr);
+ ptr += 4;
+ len = get_unaligned_be32(ptr);
+ ptr += 4;
+
+ if (len <= 200000 || len >= 300000 ||
+ len > ((av7110->bin_fw + av7110->size_fw) - ptr)) {
+ pr_err("root file has strange size (%d). aborting.\n", len);
+ return -EINVAL;
+ }
+ if (crc != crc32_le(0, ptr, len)) {
+ pr_err("crc32 of root file does not match.\n");
+ return -EINVAL;
+ }
+ av7110->bin_root = ptr;
+ av7110->size_root = len;
+ return 0;
+}
+
+static void put_firmware(struct av7110 *av7110)
+{
+ vfree(av7110->bin_fw);
+}
+
+static int get_firmware(struct av7110 *av7110)
+{
+ int ret;
+ const struct firmware *fw;
+
+ /* request the av7110 firmware, this will block until someone uploads it */
+ ret = request_firmware(&fw, "dvb-ttpci-01.fw", &av7110->dev->pci->dev);
+ if (ret) {
+ if (ret == -ENOENT) {
+ pr_err("could not load firmware, file not found: dvb-ttpci-01.fw\n");
+ pr_err("usually this should be in /usr/lib/hotplug/firmware or /lib/firmware\n");
+ pr_err("and can be downloaded from https://linuxtv.org/download/dvb/firmware/\n");
+ } else {
+ pr_err("cannot request firmware (error %i)\n", ret);
+ }
+ return -EINVAL;
+ }
+
+ if (fw->size <= 200000) {
+ pr_err("this firmware is way too small.\n");
+ release_firmware(fw);
+ return -EINVAL;
+ }
+
+ /* check if the firmware is available */
+ av7110->bin_fw = vmalloc(fw->size);
+ if (!av7110->bin_fw) {
+ dprintk(1, "out of memory\n");
+ release_firmware(fw);
+ return -ENOMEM;
+ }
+
+ memcpy(av7110->bin_fw, fw->data, fw->size);
+ av7110->size_fw = fw->size;
+ ret = check_firmware(av7110);
+ if (ret)
+ vfree(av7110->bin_fw);
+
+ release_firmware(fw);
+ return ret;
+}
+
+static int alps_bsrv2_tuner_set_params(struct dvb_frontend *fe)
+{
+ struct dtv_frontend_properties *p = &fe->dtv_property_cache;
+ struct av7110 *av7110 = fe->dvb->priv;
+ u8 pwr = 0;
+ u8 buf[4];
+ struct i2c_msg msg = { .addr = 0x61, .flags = 0, .buf = buf, .len = sizeof(buf) };
+ u32 div = (p->frequency + 479500) / 125;
+
+ if (p->frequency > 2000000)
+ pwr = 3;
+ else if (p->frequency > 1800000)
+ pwr = 2;
+ else if (p->frequency > 1600000)
+ pwr = 1;
+ else if (p->frequency > 1200000)
+ pwr = 0;
+ else if (p->frequency >= 1100000)
+ pwr = 1;
+ else
+ pwr = 2;
+
+ buf[0] = (div >> 8) & 0x7f;
+ buf[1] = div & 0xff;
+ buf[2] = ((div & 0x18000) >> 10) | 0x95;
+ buf[3] = (pwr << 6) | 0x30;
+
+ // NOTE: since we're using a prescaler of 2, we set the
+ // divisor frequency to 62.5kHz and divide by 125 above
+
+ if (fe->ops.i2c_gate_ctrl)
+ fe->ops.i2c_gate_ctrl(fe, 1);
+ if (i2c_transfer(&av7110->i2c_adap, &msg, 1) != 1)
+ return -EIO;
+ return 0;
+}
+
+static struct ves1x93_config alps_bsrv2_config = {
+ .demod_address = 0x08,
+ .xin = 90100000UL,
+ .invert_pwm = 0,
+};
+
+static int alps_tdbe2_tuner_set_params(struct dvb_frontend *fe)
+{
+ struct dtv_frontend_properties *p = &fe->dtv_property_cache;
+ struct av7110 *av7110 = fe->dvb->priv;
+ u32 div;
+ u8 data[4];
+ struct i2c_msg msg = { .addr = 0x62, .flags = 0, .buf = data, .len = sizeof(data) };
+
+ div = (p->frequency + 35937500 + 31250) / 62500;
+
+ data[0] = (div >> 8) & 0x7f;
+ data[1] = div & 0xff;
+ data[2] = 0x85 | ((div >> 10) & 0x60);
+ data[3] = (p->frequency < 174000000 ? 0x88 : p->frequency < 470000000 ? 0x84 : 0x81);
+
+ if (fe->ops.i2c_gate_ctrl)
+ fe->ops.i2c_gate_ctrl(fe, 1);
+ if (i2c_transfer(&av7110->i2c_adap, &msg, 1) != 1)
+ return -EIO;
+ return 0;
+}
+
+static struct ves1820_config alps_tdbe2_config = {
+ .demod_address = 0x09,
+ .xin = 57840000UL,
+ .invert = 1,
+ .selagc = VES1820_SELAGC_SIGNAMPERR,
+};
+
+static int grundig_29504_451_tuner_set_params(struct dvb_frontend *fe)
+{
+ struct dtv_frontend_properties *p = &fe->dtv_property_cache;
+ struct av7110 *av7110 = fe->dvb->priv;
+ u32 div;
+ u8 data[4];
+ struct i2c_msg msg = { .addr = 0x61, .flags = 0, .buf = data, .len = sizeof(data) };
+
+ div = p->frequency / 125;
+ data[0] = (div >> 8) & 0x7f;
+ data[1] = div & 0xff;
+ data[2] = 0x8e;
+ data[3] = 0x00;
+
+ if (fe->ops.i2c_gate_ctrl)
+ fe->ops.i2c_gate_ctrl(fe, 1);
+ if (i2c_transfer(&av7110->i2c_adap, &msg, 1) != 1)
+ return -EIO;
+ return 0;
+}
+
+static struct tda8083_config grundig_29504_451_config = {
+ .demod_address = 0x68,
+};
+
+static int philips_cd1516_tuner_set_params(struct dvb_frontend *fe)
+{
+ struct dtv_frontend_properties *p = &fe->dtv_property_cache;
+ struct av7110 *av7110 = fe->dvb->priv;
+ u32 div;
+ u32 f = p->frequency;
+ u8 data[4];
+ struct i2c_msg msg = { .addr = 0x61, .flags = 0, .buf = data, .len = sizeof(data) };
+
+ div = (f + 36125000 + 31250) / 62500;
+
+ data[0] = (div >> 8) & 0x7f;
+ data[1] = div & 0xff;
+ data[2] = 0x8e;
+ data[3] = (f < 174000000 ? 0xa1 : f < 470000000 ? 0x92 : 0x34);
+
+ if (fe->ops.i2c_gate_ctrl)
+ fe->ops.i2c_gate_ctrl(fe, 1);
+ if (i2c_transfer(&av7110->i2c_adap, &msg, 1) != 1)
+ return -EIO;
+ return 0;
+}
+
+static struct ves1820_config philips_cd1516_config = {
+ .demod_address = 0x09,
+ .xin = 57840000UL,
+ .invert = 1,
+ .selagc = VES1820_SELAGC_SIGNAMPERR,
+};
+
+static int alps_tdlb7_tuner_set_params(struct dvb_frontend *fe)
+{
+ struct dtv_frontend_properties *p = &fe->dtv_property_cache;
+ struct av7110 *av7110 = fe->dvb->priv;
+ u32 div, pwr;
+ u8 data[4];
+ struct i2c_msg msg = { .addr = 0x60, .flags = 0, .buf = data, .len = sizeof(data) };
+
+ div = (p->frequency + 36200000) / 166666;
+
+ if (p->frequency <= 782000000)
+ pwr = 1;
+ else
+ pwr = 2;
+
+ data[0] = (div >> 8) & 0x7f;
+ data[1] = div & 0xff;
+ data[2] = 0x85;
+ data[3] = pwr << 6;
+
+ if (fe->ops.i2c_gate_ctrl)
+ fe->ops.i2c_gate_ctrl(fe, 1);
+ if (i2c_transfer(&av7110->i2c_adap, &msg, 1) != 1)
+ return -EIO;
+ return 0;
+}
+
+static int alps_tdlb7_request_firmware(struct dvb_frontend *fe, const struct firmware **fw, char *name)
+{
+#if IS_ENABLED(CONFIG_DVB_SP8870)
+ struct av7110 *av7110 = fe->dvb->priv;
+
+ return request_firmware(fw, name, &av7110->dev->pci->dev);
+#else
+ return -EINVAL;
+#endif
+}
+
+static const struct sp8870_config alps_tdlb7_config = {
+ .demod_address = 0x71,
+ .request_firmware = alps_tdlb7_request_firmware,
+};
+
+static u8 nexusca_stv0297_inittab[] = {
+ 0x80, 0x01,
+ 0x80, 0x00,
+ 0x81, 0x01,
+ 0x81, 0x00,
+ 0x00, 0x09,
+ 0x01, 0x69,
+ 0x03, 0x00,
+ 0x04, 0x00,
+ 0x07, 0x00,
+ 0x08, 0x00,
+ 0x20, 0x00,
+ 0x21, 0x40,
+ 0x22, 0x00,
+ 0x23, 0x00,
+ 0x24, 0x40,
+ 0x25, 0x88,
+ 0x30, 0xff,
+ 0x31, 0x00,
+ 0x32, 0xff,
+ 0x33, 0x00,
+ 0x34, 0x50,
+ 0x35, 0x7f,
+ 0x36, 0x00,
+ 0x37, 0x20,
+ 0x38, 0x00,
+ 0x40, 0x1c,
+ 0x41, 0xff,
+ 0x42, 0x29,
+ 0x43, 0x00,
+ 0x44, 0xff,
+ 0x45, 0x00,
+ 0x46, 0x00,
+ 0x49, 0x04,
+ 0x4a, 0x00,
+ 0x4b, 0x7b,
+ 0x52, 0x30,
+ 0x55, 0xae,
+ 0x56, 0x47,
+ 0x57, 0xe1,
+ 0x58, 0x3a,
+ 0x5a, 0x1e,
+ 0x5b, 0x34,
+ 0x60, 0x00,
+ 0x63, 0x00,
+ 0x64, 0x00,
+ 0x65, 0x00,
+ 0x66, 0x00,
+ 0x67, 0x00,
+ 0x68, 0x00,
+ 0x69, 0x00,
+ 0x6a, 0x02,
+ 0x6b, 0x00,
+ 0x70, 0xff,
+ 0x71, 0x00,
+ 0x72, 0x00,
+ 0x73, 0x00,
+ 0x74, 0x0c,
+ 0x80, 0x00,
+ 0x81, 0x00,
+ 0x82, 0x00,
+ 0x83, 0x00,
+ 0x84, 0x04,
+ 0x85, 0x80,
+ 0x86, 0x24,
+ 0x87, 0x78,
+ 0x88, 0x10,
+ 0x89, 0x00,
+ 0x90, 0x01,
+ 0x91, 0x01,
+ 0xa0, 0x04,
+ 0xa1, 0x00,
+ 0xa2, 0x00,
+ 0xb0, 0x91,
+ 0xb1, 0x0b,
+ 0xc0, 0x53,
+ 0xc1, 0x70,
+ 0xc2, 0x12,
+ 0xd0, 0x00,
+ 0xd1, 0x00,
+ 0xd2, 0x00,
+ 0xd3, 0x00,
+ 0xd4, 0x00,
+ 0xd5, 0x00,
+ 0xde, 0x00,
+ 0xdf, 0x00,
+ 0x61, 0x49,
+ 0x62, 0x0b,
+ 0x53, 0x08,
+ 0x59, 0x08,
+ 0xff, 0xff,
+};
+
+static int nexusca_stv0297_tuner_set_params(struct dvb_frontend *fe)
+{
+ struct dtv_frontend_properties *p = &fe->dtv_property_cache;
+ struct av7110 *av7110 = fe->dvb->priv;
+ u32 div;
+ u8 data[4];
+ struct i2c_msg msg = { .addr = 0x63, .flags = 0, .buf = data, .len = sizeof(data) };
+ struct i2c_msg readmsg = { .addr = 0x63, .flags = I2C_M_RD, .buf = data, .len = 1 };
+ int i;
+
+ div = (p->frequency + 36150000 + 31250) / 62500;
+
+ data[0] = (div >> 8) & 0x7f;
+ data[1] = div & 0xff;
+ data[2] = 0xce;
+
+ if (p->frequency < 45000000)
+ return -EINVAL;
+ else if (p->frequency < 137000000)
+ data[3] = 0x01;
+ else if (p->frequency < 403000000)
+ data[3] = 0x02;
+ else if (p->frequency < 860000000)
+ data[3] = 0x04;
+ else
+ return -EINVAL;
+
+ if (fe->ops.i2c_gate_ctrl)
+ fe->ops.i2c_gate_ctrl(fe, 1);
+ if (i2c_transfer(&av7110->i2c_adap, &msg, 1) != 1) {
+ pr_err("nexusca: pll transfer failed!\n");
+ return -EIO;
+ }
+
+ // wait for PLL lock
+ for (i = 0; i < 20; i++) {
+ if (fe->ops.i2c_gate_ctrl)
+ fe->ops.i2c_gate_ctrl(fe, 1);
+ if (i2c_transfer(&av7110->i2c_adap, &readmsg, 1) == 1)
+ if (data[0] & 0x40)
+ break;
+ msleep(10);
+ }
+
+ return 0;
+}
+
+static struct stv0297_config nexusca_stv0297_config = {
+ .demod_address = 0x1C,
+ .inittab = nexusca_stv0297_inittab,
+ .invert = 1,
+ .stop_during_read = 1,
+};
+
+static int grundig_29504_401_tuner_set_params(struct dvb_frontend *fe)
+{
+ struct dtv_frontend_properties *p = &fe->dtv_property_cache;
+ struct av7110 *av7110 = fe->dvb->priv;
+ u32 div;
+ u8 cfg, cpump, band_select;
+ u8 data[4];
+ struct i2c_msg msg = { .addr = 0x61, .flags = 0, .buf = data, .len = sizeof(data) };
+
+ div = (36125000 + p->frequency) / 166666;
+
+ cfg = 0x88;
+
+ if (p->frequency < 175000000)
+ cpump = 2;
+ else if (p->frequency < 390000000)
+ cpump = 1;
+ else if (p->frequency < 470000000)
+ cpump = 2;
+ else if (p->frequency < 750000000)
+ cpump = 1;
+ else
+ cpump = 3;
+
+ if (p->frequency < 175000000)
+ band_select = 0x0e;
+ else if (p->frequency < 470000000)
+ band_select = 0x05;
+ else
+ band_select = 0x03;
+
+ data[0] = (div >> 8) & 0x7f;
+ data[1] = div & 0xff;
+ data[2] = ((div >> 10) & 0x60) | cfg;
+ data[3] = (cpump << 6) | band_select;
+
+ if (fe->ops.i2c_gate_ctrl)
+ fe->ops.i2c_gate_ctrl(fe, 1);
+ if (i2c_transfer(&av7110->i2c_adap, &msg, 1) != 1)
+ return -EIO;
+ return 0;
+}
+
+static struct l64781_config grundig_29504_401_config = {
+ .demod_address = 0x55,
+};
+
+static int av7110_fe_lock_fix(struct av7110 *av7110, enum fe_status status)
+{
+ int ret = 0;
+ int synced = (status & FE_HAS_LOCK) ? 1 : 0;
+
+ av7110->fe_status = status;
+
+ if (av7110->fe_synced == synced)
+ return 0;
+
+ if (av7110->playing) {
+ av7110->fe_synced = synced;
+ return 0;
+ }
+
+ if (mutex_lock_interruptible(&av7110->pid_mutex))
+ return -ERESTARTSYS;
+
+ if (synced) {
+ ret = SetPIDs(av7110, av7110->pids[DMX_PES_VIDEO],
+ av7110->pids[DMX_PES_AUDIO],
+ av7110->pids[DMX_PES_TELETEXT], 0,
+ av7110->pids[DMX_PES_PCR]);
+ if (!ret)
+ ret = av7110_fw_cmd(av7110, COMTYPE_PIDFILTER, Scan, 0);
+ } else {
+ ret = SetPIDs(av7110, 0, 0, 0, 0, 0);
+ if (!ret) {
+ ret = av7110_fw_cmd(av7110, COMTYPE_PID_FILTER, FlushTSQueue, 0);
+ if (!ret)
+ ret = av7110_wait_msgstate(av7110, GPMQBusy);
+ }
+ }
+
+ if (!ret)
+ av7110->fe_synced = synced;
+
+ mutex_unlock(&av7110->pid_mutex);
+ return ret;
+}
+
+static int av7110_fe_set_frontend(struct dvb_frontend *fe)
+{
+ struct av7110 *av7110 = fe->dvb->priv;
+
+ int ret = av7110_fe_lock_fix(av7110, 0);
+
+ if (!ret)
+ ret = av7110->fe_set_frontend(fe);
+
+ return ret;
+}
+
+static int av7110_fe_init(struct dvb_frontend *fe)
+{
+ struct av7110 *av7110 = fe->dvb->priv;
+
+ int ret = av7110_fe_lock_fix(av7110, 0);
+
+ if (!ret)
+ ret = av7110->fe_init(fe);
+ return ret;
+}
+
+static int av7110_fe_read_status(struct dvb_frontend *fe,
+ enum fe_status *status)
+{
+ struct av7110 *av7110 = fe->dvb->priv;
+
+ /* call the real implementation */
+ int ret = av7110->fe_read_status(fe, status);
+
+ if (!ret)
+ if (((*status ^ av7110->fe_status) & FE_HAS_LOCK) && (*status & FE_HAS_LOCK))
+ ret = av7110_fe_lock_fix(av7110, *status);
+ return ret;
+}
+
+static int av7110_fe_diseqc_reset_overload(struct dvb_frontend *fe)
+{
+ struct av7110 *av7110 = fe->dvb->priv;
+
+ int ret = av7110_fe_lock_fix(av7110, 0);
+
+ if (!ret)
+ ret = av7110->fe_diseqc_reset_overload(fe);
+ return ret;
+}
+
+static int av7110_fe_diseqc_send_master_cmd(struct dvb_frontend *fe,
+ struct dvb_diseqc_master_cmd *cmd)
+{
+ struct av7110 *av7110 = fe->dvb->priv;
+
+ int ret = av7110_fe_lock_fix(av7110, 0);
+
+ if (!ret) {
+ av7110->saved_master_cmd = *cmd;
+ ret = av7110->fe_diseqc_send_master_cmd(fe, cmd);
+ }
+ return ret;
+}
+
+static int av7110_fe_diseqc_send_burst(struct dvb_frontend *fe,
+ enum fe_sec_mini_cmd minicmd)
+{
+ struct av7110 *av7110 = fe->dvb->priv;
+
+ int ret = av7110_fe_lock_fix(av7110, 0);
+
+ if (!ret) {
+ av7110->saved_minicmd = minicmd;
+ ret = av7110->fe_diseqc_send_burst(fe, minicmd);
+ }
+ return ret;
+}
+
+static int av7110_fe_set_tone(struct dvb_frontend *fe,
+ enum fe_sec_tone_mode tone)
+{
+ struct av7110 *av7110 = fe->dvb->priv;
+
+ int ret = av7110_fe_lock_fix(av7110, 0);
+
+ if (!ret) {
+ av7110->saved_tone = tone;
+ ret = av7110->fe_set_tone(fe, tone);
+ }
+ return ret;
+}
+
+static int av7110_fe_set_voltage(struct dvb_frontend *fe,
+ enum fe_sec_voltage voltage)
+{
+ struct av7110 *av7110 = fe->dvb->priv;
+
+ int ret = av7110_fe_lock_fix(av7110, 0);
+
+ if (!ret) {
+ av7110->saved_voltage = voltage;
+ ret = av7110->fe_set_voltage(fe, voltage);
+ }
+ return ret;
+}
+
+static int av7110_fe_dishnetwork_send_legacy_command(struct dvb_frontend *fe, unsigned long cmd)
+{
+ struct av7110 *av7110 = fe->dvb->priv;
+
+ int ret = av7110_fe_lock_fix(av7110, 0);
+
+ if (!ret)
+ ret = av7110->fe_dishnetwork_send_legacy_command(fe, cmd);
+ return ret;
+}
+
+static void dvb_s_recover(struct av7110 *av7110)
+{
+ av7110_fe_init(av7110->fe);
+
+ av7110_fe_set_voltage(av7110->fe, av7110->saved_voltage);
+ if (av7110->saved_master_cmd.msg_len) {
+ msleep(20);
+ av7110_fe_diseqc_send_master_cmd(av7110->fe, &av7110->saved_master_cmd);
+ }
+ msleep(20);
+ av7110_fe_diseqc_send_burst(av7110->fe, av7110->saved_minicmd);
+ msleep(20);
+ av7110_fe_set_tone(av7110->fe, av7110->saved_tone);
+
+ av7110_fe_set_frontend(av7110->fe);
+}
+
+static u8 read_pwm(struct av7110 *av7110)
+{
+ u8 b = 0xff;
+ u8 pwm;
+ struct i2c_msg msg[] = { { .addr = 0x50, .flags = 0, .buf = &b, .len = 1 },
+ { .addr = 0x50, .flags = I2C_M_RD, .buf = &pwm, .len = 1} };
+
+ if ((i2c_transfer(&av7110->i2c_adap, msg, 2) != 2) || (pwm == 0xff))
+ pwm = 0x48;
+
+ return pwm;
+}
+
+static int frontend_init(struct av7110 *av7110)
+{
+ int ret;
+
+ if (av7110->dev->pci->subsystem_vendor == 0x110a) {
+ switch (av7110->dev->pci->subsystem_device) {
+ case 0x0000: // Fujitsu/Siemens DVB-Cable (ves1820/Philips CD1516(??))
+ av7110->fe = dvb_attach(ves1820_attach, &philips_cd1516_config,
+ &av7110->i2c_adap, read_pwm(av7110));
+ if (av7110->fe)
+ av7110->fe->ops.tuner_ops.set_params = philips_cd1516_tuner_set_params;
+ break;
+ }
+
+ } else if (av7110->dev->pci->subsystem_vendor == 0x13c2) {
+ switch (av7110->dev->pci->subsystem_device) {
+ case 0x0000: // Hauppauge/TT WinTV DVB-S rev1.X
+ case 0x0003: // Hauppauge/TT WinTV Nexus-S Rev 2.X
+ case 0x1002: // Hauppauge/TT WinTV DVB-S rev1.3SE
+
+ // try the ALPS BSRV2 first of all
+ av7110->fe = dvb_attach(ves1x93_attach, &alps_bsrv2_config, &av7110->i2c_adap);
+ if (av7110->fe) {
+ av7110->fe->ops.tuner_ops.set_params = alps_bsrv2_tuner_set_params;
+ av7110->fe->ops.diseqc_send_master_cmd = av7110_diseqc_send_master_cmd;
+ av7110->fe->ops.diseqc_send_burst = av7110_diseqc_send_burst;
+ av7110->fe->ops.set_tone = av7110_set_tone;
+ av7110->recover = dvb_s_recover;
+ break;
+ }
+
+ // try the ALPS BSRU6 now
+ av7110->fe = dvb_attach(stv0299_attach, &alps_bsru6_config, &av7110->i2c_adap);
+ if (av7110->fe) {
+ av7110->fe->ops.tuner_ops.set_params = alps_bsru6_tuner_set_params;
+ av7110->fe->tuner_priv = &av7110->i2c_adap;
+
+ av7110->fe->ops.diseqc_send_master_cmd = av7110_diseqc_send_master_cmd;
+ av7110->fe->ops.diseqc_send_burst = av7110_diseqc_send_burst;
+ av7110->fe->ops.set_tone = av7110_set_tone;
+ av7110->recover = dvb_s_recover;
+ break;
+ }
+
+ // Try the grundig 29504-451
+ av7110->fe = dvb_attach(tda8083_attach, &grundig_29504_451_config, &av7110->i2c_adap);
+ if (av7110->fe) {
+ av7110->fe->ops.tuner_ops.set_params = grundig_29504_451_tuner_set_params;
+ av7110->fe->ops.diseqc_send_master_cmd = av7110_diseqc_send_master_cmd;
+ av7110->fe->ops.diseqc_send_burst = av7110_diseqc_send_burst;
+ av7110->fe->ops.set_tone = av7110_set_tone;
+ av7110->recover = dvb_s_recover;
+ break;
+ }
+
+ /* Try DVB-C cards */
+ switch (av7110->dev->pci->subsystem_device) {
+ case 0x0000:
+ /* Siemens DVB-C (full-length card) VES1820/Philips CD1516 */
+ av7110->fe = dvb_attach(ves1820_attach, &philips_cd1516_config, &av7110->i2c_adap,
+ read_pwm(av7110));
+ if (av7110->fe)
+ av7110->fe->ops.tuner_ops.set_params = philips_cd1516_tuner_set_params;
+ break;
+ case 0x0003:
+ /* Hauppauge DVB-C 2.1 VES1820/ALPS TDBE2 */
+ av7110->fe = dvb_attach(ves1820_attach, &alps_tdbe2_config, &av7110->i2c_adap,
+ read_pwm(av7110));
+ if (av7110->fe)
+ av7110->fe->ops.tuner_ops.set_params = alps_tdbe2_tuner_set_params;
+ break;
+ }
+ break;
+
+ case 0x0001: // Hauppauge/TT Nexus-T premium rev1.X
+ {
+ struct dvb_frontend *fe;
+
+ // try ALPS TDLB7 first, then Grundig 29504-401
+ fe = dvb_attach(sp8870_attach, &alps_tdlb7_config, &av7110->i2c_adap);
+ if (fe) {
+ fe->ops.tuner_ops.set_params = alps_tdlb7_tuner_set_params;
+ av7110->fe = fe;
+ break;
+ }
+ }
+ fallthrough;
+
+ case 0x0008: // Hauppauge/TT DVB-T
+ // Grundig 29504-401
+ av7110->fe = dvb_attach(l64781_attach, &grundig_29504_401_config, &av7110->i2c_adap);
+ if (av7110->fe)
+ av7110->fe->ops.tuner_ops.set_params = grundig_29504_401_tuner_set_params;
+ break;
+
+ case 0x0002: // Hauppauge/TT DVB-C premium rev2.X
+
+ av7110->fe = dvb_attach(ves1820_attach, &alps_tdbe2_config, &av7110->i2c_adap, read_pwm(av7110));
+ if (av7110->fe)
+ av7110->fe->ops.tuner_ops.set_params = alps_tdbe2_tuner_set_params;
+ break;
+
+ case 0x0004: // Galaxis DVB-S rev1.3
+ /* ALPS BSRV2 */
+ av7110->fe = dvb_attach(ves1x93_attach, &alps_bsrv2_config, &av7110->i2c_adap);
+ if (av7110->fe) {
+ av7110->fe->ops.tuner_ops.set_params = alps_bsrv2_tuner_set_params;
+ av7110->fe->ops.diseqc_send_master_cmd = av7110_diseqc_send_master_cmd;
+ av7110->fe->ops.diseqc_send_burst = av7110_diseqc_send_burst;
+ av7110->fe->ops.set_tone = av7110_set_tone;
+ av7110->recover = dvb_s_recover;
+ }
+ break;
+
+ case 0x0006: /* Fujitsu-Siemens DVB-S rev 1.6 */
+ /* Grundig 29504-451 */
+ av7110->fe = dvb_attach(tda8083_attach, &grundig_29504_451_config, &av7110->i2c_adap);
+ if (av7110->fe) {
+ av7110->fe->ops.tuner_ops.set_params = grundig_29504_451_tuner_set_params;
+ av7110->fe->ops.diseqc_send_master_cmd = av7110_diseqc_send_master_cmd;
+ av7110->fe->ops.diseqc_send_burst = av7110_diseqc_send_burst;
+ av7110->fe->ops.set_tone = av7110_set_tone;
+ av7110->recover = dvb_s_recover;
+ }
+ break;
+
+ case 0x000A: // Hauppauge/TT Nexus-CA rev1.X
+
+ av7110->fe = dvb_attach(stv0297_attach, &nexusca_stv0297_config, &av7110->i2c_adap);
+ if (av7110->fe) {
+ av7110->fe->ops.tuner_ops.set_params = nexusca_stv0297_tuner_set_params;
+
+ /* set TDA9819 into DVB mode */
+ saa7146_setgpio(av7110->dev, 1, SAA7146_GPIO_OUTLO); // TDA9819 pin9(STD)
+ saa7146_setgpio(av7110->dev, 3, SAA7146_GPIO_OUTLO); // TDA9819 pin30(VIF)
+
+ /* tuner on this needs a slower i2c bus speed */
+ av7110->dev->i2c_bitrate = SAA7146_I2C_BUS_BIT_RATE_240;
+ break;
+ }
+ break;
+
+ case 0x000E: /* Hauppauge/TT Nexus-S rev 2.3 */
+ /* ALPS BSBE1 */
+ av7110->fe = dvb_attach(stv0299_attach, &alps_bsbe1_config, &av7110->i2c_adap);
+ if (av7110->fe) {
+ av7110->fe->ops.tuner_ops.set_params = alps_bsbe1_tuner_set_params;
+ av7110->fe->tuner_priv = &av7110->i2c_adap;
+
+ if (!dvb_attach(lnbp21_attach, av7110->fe, &av7110->i2c_adap, 0, 0)) {
+ pr_err("LNBP21 not found!\n");
+ if (av7110->fe->ops.release)
+ av7110->fe->ops.release(av7110->fe);
+ av7110->fe = NULL;
+ } else {
+ av7110->fe->ops.dishnetwork_send_legacy_command = NULL;
+ av7110->recover = dvb_s_recover;
+ }
+ }
+ break;
+ }
+ }
+
+ if (!av7110->fe) {
+ /* FIXME: propagate the failure code from the lower layers */
+ ret = -ENOMEM;
+ pr_err("A frontend driver was not found for device [%04x:%04x] subsystem [%04x:%04x]\n",
+ av7110->dev->pci->vendor, av7110->dev->pci->device,
+ av7110->dev->pci->subsystem_vendor, av7110->dev->pci->subsystem_device);
+ } else {
+ FE_FUNC_OVERRIDE(av7110->fe->ops.init, av7110->fe_init, av7110_fe_init);
+ FE_FUNC_OVERRIDE(av7110->fe->ops.read_status, av7110->fe_read_status, av7110_fe_read_status);
+ FE_FUNC_OVERRIDE(av7110->fe->ops.diseqc_reset_overload, av7110->fe_diseqc_reset_overload, av7110_fe_diseqc_reset_overload);
+ FE_FUNC_OVERRIDE(av7110->fe->ops.diseqc_send_master_cmd, av7110->fe_diseqc_send_master_cmd, av7110_fe_diseqc_send_master_cmd);
+ FE_FUNC_OVERRIDE(av7110->fe->ops.diseqc_send_burst, av7110->fe_diseqc_send_burst, av7110_fe_diseqc_send_burst);
+ FE_FUNC_OVERRIDE(av7110->fe->ops.set_tone, av7110->fe_set_tone, av7110_fe_set_tone);
+ FE_FUNC_OVERRIDE(av7110->fe->ops.set_voltage, av7110->fe_set_voltage, av7110_fe_set_voltage);
+ FE_FUNC_OVERRIDE(av7110->fe->ops.dishnetwork_send_legacy_command, av7110->fe_dishnetwork_send_legacy_command, av7110_fe_dishnetwork_send_legacy_command);
+ FE_FUNC_OVERRIDE(av7110->fe->ops.set_frontend, av7110->fe_set_frontend, av7110_fe_set_frontend);
+
+ ret = dvb_register_frontend(&av7110->dvb_adapter, av7110->fe);
+ if (ret < 0) {
+ pr_err("av7110: Frontend registration failed!\n");
+ dvb_frontend_detach(av7110->fe);
+ av7110->fe = NULL;
+ }
+ }
+ return ret;
+}
+
+/* Budgetpatch note:
+ * Original hardware design by Roberto Deza:
+ * There is a DVB_Wiki at
+ * https://linuxtv.org
+ *
+ * New software triggering design by Emard that works on
+ * original Roberto Deza's hardware:
+ *
+ * rps1 code for budgetpatch will copy internal HS event to GPIO3 pin.
+ * GPIO3 is in budget-patch hardware connectd to port B VSYNC
+ * HS is an internal event of 7146, accessible with RPS
+ * and temporarily raised high every n lines
+ * (n in defined in the RPS_THRESH1 counter threshold)
+ * I think HS is raised high on the beginning of the n-th line
+ * and remains high until this n-th line that triggered
+ * it is completely received. When the reception of n-th line
+ * ends, HS is lowered.
+ *
+ * To transmit data over DMA, 7146 needs changing state at
+ * port B VSYNC pin. Any changing of port B VSYNC will
+ * cause some DMA data transfer, with more or less packets loss.
+ * It depends on the phase and frequency of VSYNC and
+ * the way of 7146 is instructed to trigger on port B (defined
+ * in DD1_INIT register, 3rd nibble from the right valid
+ * numbers are 0-7, see datasheet)
+ *
+ * The correct triggering can minimize packet loss,
+ * dvbtraffic should give this stable bandwidths:
+ * 22k transponder = 33814 kbit/s
+ * 27.5k transponder = 38045 kbit/s
+ * by experiment it is found that the best results
+ * (stable bandwidths and almost no packet loss)
+ * are obtained using DD1_INIT triggering number 2
+ * (Va at rising edge of VS Fa = HS x VS-failing forced toggle)
+ * and a VSYNC phase that occurs in the middle of DMA transfer
+ * (about byte 188*512=96256 in the DMA window).
+ *
+ * Phase of HS is still not clear to me how to control,
+ * It just happens to be so. It can be seen if one enables
+ * RPS_IRQ and print Event Counter 1 in vpeirq(). Every
+ * time RPS_INTERRUPT is called, the Event Counter 1 will
+ * increment. That's how the 7146 is programmed to do event
+ * counting in this budget-patch.c
+ * I *think* HPS setting has something to do with the phase
+ * of HS but I can't be 100% sure in that.
+ *
+ * hardware debug note: a working budget card (including budget patch)
+ * with vpeirq() interrupt setup in mode "0x90" (every 64K) will
+ * generate 3 interrupts per 25-Hz DMA frame of 2*188*512 bytes
+ * and that means 3*25=75 Hz of interrupt frequency, as seen by
+ * watch cat /proc/interrupts
+ *
+ * If this frequency is 3x lower (and data received in the DMA
+ * buffer don't start with 0x47, but in the middle of packets,
+ * whose lengths appear to be like 188 292 188 104 etc.
+ * this means VSYNC line is not connected in the hardware.
+ * (check soldering pcb and pins)
+ * The same behaviour of missing VSYNC can be duplicated on budget
+ * cards, by setting DD1_INIT trigger mode 7 in 3rd nibble.
+ */
+static int av7110_attach(struct saa7146_dev *dev,
+ struct saa7146_pci_extension_data *pci_ext)
+{
+ const int length = TS_WIDTH * TS_HEIGHT;
+ struct pci_dev *pdev = dev->pci;
+ struct av7110 *av7110;
+ struct task_struct *thread;
+ int ret, count = 0;
+
+ dprintk(4, "dev: %p\n", dev);
+
+ /* Set RPS_IRQ to 1 to track rps1 activity.
+ * Enabling this won't send any interrupt to PC CPU.
+ */
+#define RPS_IRQ 0
+
+ if (budgetpatch == 1) {
+ budgetpatch = 0;
+ /* autodetect the presence of budget patch
+ * this only works if saa7146 has been recently
+ * reset with MASK_31 to MC1
+ *
+ * will wait for VBI_B event (vertical blank at port B)
+ * and will reset GPIO3 after VBI_B is detected.
+ * (GPIO3 should be raised high by CPU to
+ * test if GPIO3 will generate vertical blank signal
+ * in budget patch GPIO3 is connected to VSYNC_B
+ */
+
+ /* RESET SAA7146 */
+ saa7146_write(dev, MC1, MASK_31);
+ /* autodetection success seems to be time-dependend after reset */
+
+ /* Fix VSYNC level */
+ saa7146_setgpio(dev, 3, SAA7146_GPIO_OUTLO);
+ /* set vsync_b triggering */
+ saa7146_write(dev, DD1_STREAM_B, 0);
+ /* port B VSYNC at rising edge */
+ saa7146_write(dev, DD1_INIT, 0x00000200);
+ saa7146_write(dev, BRS_CTRL, 0x00000000); // VBI
+ saa7146_write(dev, MC2,
+ 1 * (MASK_08 | MASK_24) | // BRS control
+ 0 * (MASK_09 | MASK_25) | // a
+ 1 * (MASK_10 | MASK_26) | // b
+ 0 * (MASK_06 | MASK_22) | // HPS_CTRL1
+ 0 * (MASK_05 | MASK_21) | // HPS_CTRL2
+ 0 * (MASK_01 | MASK_15) // DEBI
+ );
+
+ /* start writing RPS1 code from beginning */
+ count = 0;
+ /* Disable RPS1 */
+ saa7146_write(dev, MC1, MASK_29);
+ /* RPS1 timeout disable */
+ saa7146_write(dev, RPS_TOV1, 0);
+ WRITE_RPS1(CMD_PAUSE | EVT_VBI_B);
+ WRITE_RPS1(CMD_WR_REG_MASK | (GPIO_CTRL >> 2));
+ WRITE_RPS1(GPIO3_MSK);
+ WRITE_RPS1(SAA7146_GPIO_OUTLO << 24);
+#if RPS_IRQ
+ /* issue RPS1 interrupt to increment counter */
+ WRITE_RPS1(CMD_INTERRUPT);
+#endif
+ WRITE_RPS1(CMD_STOP);
+ /* Jump to begin of RPS program as safety measure (p37) */
+ WRITE_RPS1(CMD_JUMP);
+ WRITE_RPS1(dev->d_rps1.dma_handle);
+
+#if RPS_IRQ
+ /* set event counter 1 source as RPS1 interrupt (0x03) (rE4 p53)
+ * use 0x03 to track RPS1 interrupts - increase by 1 every gpio3 is toggled
+ * use 0x15 to track VPE interrupts - increase by 1 every vpeirq() is called
+ */
+ saa7146_write(dev, EC1SSR, (0x03 << 2) | 3);
+ /* set event counter 1 threshold to maximum allowed value (rEC p55) */
+ saa7146_write(dev, ECT1R, 0x3fff);
+#endif
+ /* Set RPS1 Address register to point to RPS code (r108 p42) */
+ saa7146_write(dev, RPS_ADDR1, dev->d_rps1.dma_handle);
+ /* Enable RPS1, (rFC p33) */
+ saa7146_write(dev, MC1, (MASK_13 | MASK_29));
+
+ mdelay(10);
+ /* now send VSYNC_B to rps1 by rising GPIO3 */
+ saa7146_setgpio(dev, 3, SAA7146_GPIO_OUTHI);
+ mdelay(10);
+ /* if rps1 responded by lowering the GPIO3,
+ * then we have budgetpatch hardware
+ */
+ if ((saa7146_read(dev, GPIO_CTRL) & 0x10000000) == 0) {
+ budgetpatch = 1;
+ pr_info("BUDGET-PATCH DETECTED.\n");
+ }
+ /* Disable RPS1 */
+ saa7146_write(dev, MC1, (MASK_29));
+#if RPS_IRQ
+ pr_info("Event Counter 1 0x%04x\n", saa7146_read(dev, EC1R) & 0x3fff);
+#endif
+ }
+
+ /* prepare the av7110 device struct */
+ av7110 = kzalloc(sizeof(*av7110), GFP_KERNEL);
+ if (!av7110) {
+ dprintk(1, "out of memory\n");
+ return -ENOMEM;
+ }
+
+ av7110->card_name = (char *)pci_ext->ext_priv;
+ av7110->dev = dev;
+ dev->ext_priv = av7110;
+
+ ret = get_firmware(av7110);
+ if (ret < 0)
+ goto err_kfree_0;
+
+ ret = dvb_register_adapter(&av7110->dvb_adapter, av7110->card_name,
+ THIS_MODULE, &dev->pci->dev, adapter_nr);
+ if (ret < 0)
+ goto err_put_firmware_1;
+
+ /* the Siemens DVB needs this if you want to have the i2c chips
+ * get recognized before the main driver is fully loaded
+ */
+ saa7146_write(dev, GPIO_CTRL, 0x500000);
+
+ strscpy(av7110->i2c_adap.name, pci_ext->ext_priv,
+ sizeof(av7110->i2c_adap.name));
+
+ saa7146_i2c_adapter_prepare(dev, &av7110->i2c_adap, SAA7146_I2C_BUS_BIT_RATE_120); /* 275 kHz */
+
+ ret = i2c_add_adapter(&av7110->i2c_adap);
+ if (ret < 0)
+ goto err_dvb_unregister_adapter_2;
+
+ ttpci_eeprom_parse_mac(&av7110->i2c_adap,
+ av7110->dvb_adapter.proposed_mac);
+ ret = -ENOMEM;
+
+ /* full-ts mod? */
+ if (full_ts)
+ av7110->full_ts = true;
+
+ /* check for full-ts flag in eeprom */
+ if (i2c_readreg(av7110, 0xaa, 0) == 0x4f && i2c_readreg(av7110, 0xaa, 1) == 0x45) {
+ u8 flags = i2c_readreg(av7110, 0xaa, 2);
+
+ if (flags != 0xff && (flags & 0x01))
+ av7110->full_ts = true;
+ }
+
+ if (av7110->full_ts) {
+ pr_info("full-ts mode enabled for saa7146 port B\n");
+ spin_lock_init(&av7110->feedlock1);
+ av7110->grabbing = saa7146_vmalloc_build_pgtable(pdev, length,
+ &av7110->pt);
+ if (!av7110->grabbing)
+ goto err_i2c_del_3;
+
+ saa7146_write(dev, DD1_STREAM_B, 0x00000000);
+ saa7146_write(dev, MC2, (MASK_10 | MASK_26));
+
+ saa7146_write(dev, DD1_INIT, 0x00000600);
+ saa7146_write(dev, MC2, (MASK_09 | MASK_25 | MASK_10 | MASK_26));
+
+ saa7146_write(dev, BRS_CTRL, 0x60000000);
+ saa7146_write(dev, MC2, MASK_08 | MASK_24);
+
+ /* dma3 */
+ saa7146_write(dev, PCI_BT_V1, 0x001c0000 | (saa7146_read(dev, PCI_BT_V1) & ~0x001f0000));
+ saa7146_write(dev, BASE_ODD3, 0);
+ saa7146_write(dev, BASE_EVEN3, 0);
+ saa7146_write(dev, PROT_ADDR3, TS_WIDTH * TS_HEIGHT);
+ saa7146_write(dev, PITCH3, TS_WIDTH);
+ saa7146_write(dev, BASE_PAGE3, av7110->pt.dma | ME1 | 0x90);
+ saa7146_write(dev, NUM_LINE_BYTE3, (TS_HEIGHT << 16) | TS_WIDTH);
+ saa7146_write(dev, MC2, MASK_04 | MASK_20);
+
+ tasklet_setup(&av7110->vpe_tasklet, vpeirq);
+
+ } else if (budgetpatch) {
+ spin_lock_init(&av7110->feedlock1);
+ av7110->grabbing = saa7146_vmalloc_build_pgtable(pdev, length,
+ &av7110->pt);
+ if (!av7110->grabbing)
+ goto err_i2c_del_3;
+
+ saa7146_write(dev, PCI_BT_V1, 0x1c1f101f);
+ saa7146_write(dev, BCS_CTRL, 0x80400040);
+ /* set dd1 stream a & b */
+ saa7146_write(dev, DD1_STREAM_B, 0x00000000);
+ saa7146_write(dev, DD1_INIT, 0x03000200);
+ saa7146_write(dev, MC2, (MASK_09 | MASK_25 | MASK_10 | MASK_26));
+ saa7146_write(dev, BRS_CTRL, 0x60000000);
+ saa7146_write(dev, BASE_ODD3, 0);
+ saa7146_write(dev, BASE_EVEN3, 0);
+ saa7146_write(dev, PROT_ADDR3, TS_WIDTH * TS_HEIGHT);
+ saa7146_write(dev, BASE_PAGE3, av7110->pt.dma | ME1 | 0x90);
+
+ saa7146_write(dev, PITCH3, TS_WIDTH);
+ saa7146_write(dev, NUM_LINE_BYTE3, (TS_HEIGHT << 16) | TS_WIDTH);
+
+ /* upload all */
+ saa7146_write(dev, MC2, 0x077c077c);
+ saa7146_write(dev, GPIO_CTRL, 0x000000);
+#if RPS_IRQ
+ /* set event counter 1 source as RPS1 interrupt (0x03) (rE4 p53)
+ * use 0x03 to track RPS1 interrupts - increase by 1 every gpio3 is toggled
+ * use 0x15 to track VPE interrupts - increase by 1 every vpeirq() is called
+ */
+ saa7146_write(dev, EC1SSR, (0x03 << 2) | 3);
+ /* set event counter 1 threshold to maximum allowed value (rEC p55) */
+ saa7146_write(dev, ECT1R, 0x3fff);
+#endif
+ /* Setup BUDGETPATCH MAIN RPS1 "program" (p35) */
+ count = 0;
+
+ /* Wait Source Line Counter Threshold (p36) */
+ WRITE_RPS1(CMD_PAUSE | EVT_HS);
+ /* Set GPIO3=1 (p42) */
+ WRITE_RPS1(CMD_WR_REG_MASK | (GPIO_CTRL >> 2));
+ WRITE_RPS1(GPIO3_MSK);
+ WRITE_RPS1(SAA7146_GPIO_OUTHI << 24);
+#if RPS_IRQ
+ /* issue RPS1 interrupt */
+ WRITE_RPS1(CMD_INTERRUPT);
+#endif
+ /* Wait reset Source Line Counter Threshold (p36) */
+ WRITE_RPS1(CMD_PAUSE | RPS_INV | EVT_HS);
+ /* Set GPIO3=0 (p42) */
+ WRITE_RPS1(CMD_WR_REG_MASK | (GPIO_CTRL >> 2));
+ WRITE_RPS1(GPIO3_MSK);
+ WRITE_RPS1(SAA7146_GPIO_OUTLO << 24);
+#if RPS_IRQ
+ /* issue RPS1 interrupt */
+ WRITE_RPS1(CMD_INTERRUPT);
+#endif
+ /* Jump to begin of RPS program (p37) */
+ WRITE_RPS1(CMD_JUMP);
+ WRITE_RPS1(dev->d_rps1.dma_handle);
+
+ /* Fix VSYNC level */
+ saa7146_setgpio(dev, 3, SAA7146_GPIO_OUTLO);
+ /* Set RPS1 Address register to point to RPS code (r108 p42) */
+ saa7146_write(dev, RPS_ADDR1, dev->d_rps1.dma_handle);
+ /* Set Source Line Counter Threshold, using BRS (rCC p43)
+ * It generates HS event every TS_HEIGHT lines
+ * this is related to TS_WIDTH set in register
+ * NUM_LINE_BYTE3. If NUM_LINE_BYTE low 16 bits
+ * are set to TS_WIDTH bytes (TS_WIDTH=2*188),
+ * then RPS_THRESH1 should be set to trigger
+ * every TS_HEIGHT (512) lines.
+ */
+ saa7146_write(dev, RPS_THRESH1, (TS_HEIGHT * 1) | MASK_12);
+
+ /* Enable RPS1 (rFC p33) */
+ saa7146_write(dev, MC1, (MASK_13 | MASK_29));
+
+ /* end of budgetpatch register initialization */
+ tasklet_setup(&av7110->vpe_tasklet, vpeirq);
+ } else {
+ saa7146_write(dev, PCI_BT_V1, 0x1c00101f);
+ saa7146_write(dev, BCS_CTRL, 0x80400040);
+
+ /* set dd1 stream a & b */
+ saa7146_write(dev, DD1_STREAM_B, 0x00000000);
+ saa7146_write(dev, DD1_INIT, 0x03000000);
+ saa7146_write(dev, MC2, (MASK_09 | MASK_25 | MASK_10 | MASK_26));
+
+ /* upload all */
+ saa7146_write(dev, MC2, 0x077c077c);
+ saa7146_write(dev, GPIO_CTRL, 0x000000);
+ }
+
+ tasklet_setup(&av7110->debi_tasklet, debiirq);
+ tasklet_setup(&av7110->gpio_tasklet, gpioirq);
+
+ mutex_init(&av7110->pid_mutex);
+
+ /* locks for data transfers from/to AV7110 */
+ spin_lock_init(&av7110->debilock);
+ mutex_init(&av7110->dcomlock);
+ av7110->debitype = -1;
+
+ /* default OSD window */
+ av7110->osdwin = 1;
+ mutex_init(&av7110->osd_mutex);
+
+ /* TV standard */
+ av7110->vidmode = tv_standard == 1 ? AV7110_VIDEO_MODE_NTSC
+ : AV7110_VIDEO_MODE_PAL;
+
+ /* ARM "watchdog" */
+ init_waitqueue_head(&av7110->arm_wait);
+ av7110->arm_thread = NULL;
+
+ /* allocate and init buffers */
+ av7110->debi_virt = dma_alloc_coherent(&pdev->dev, 8192,
+ &av7110->debi_bus, GFP_KERNEL);
+ if (!av7110->debi_virt)
+ goto err_saa71466_vfree_4;
+
+ av7110->iobuf = vmalloc(AVOUTLEN + AOUTLEN + BMPLEN + 4 * IPACKS);
+ if (!av7110->iobuf)
+ goto err_pci_free_5;
+
+ ret = av7110_av_init(av7110);
+ if (ret < 0)
+ goto err_iobuf_vfree_6;
+
+ /* init BMP buffer */
+ av7110->bmpbuf = av7110->iobuf + AVOUTLEN + AOUTLEN;
+ init_waitqueue_head(&av7110->bmpq);
+
+ ret = av7110_ca_init(av7110);
+ if (ret < 0)
+ goto err_av7110_av_exit_7;
+
+ /* load firmware into AV7110 cards */
+ ret = av7110_bootarm(av7110);
+ if (ret < 0)
+ goto err_av7110_ca_exit_8;
+
+ ret = av7110_firmversion(av7110);
+ if (ret < 0)
+ goto err_stop_arm_9;
+
+ if (FW_VERSION(av7110->arm_app) < 0x2501)
+ pr_warn("Warning, firmware version 0x%04x is too old. System might be unstable!\n",
+ FW_VERSION(av7110->arm_app));
+
+ thread = kthread_run(arm_thread, (void *)av7110, "arm_mon");
+ if (IS_ERR(thread)) {
+ ret = PTR_ERR(thread);
+ goto err_stop_arm_9;
+ }
+ av7110->arm_thread = thread;
+
+ /* set initial volume in mixer struct */
+ av7110->mixer.volume_left = volume;
+ av7110->mixer.volume_right = volume;
+
+ ret = av7110_register(av7110);
+ if (ret < 0)
+ goto err_arm_thread_stop_10;
+
+ init_av7110_av(av7110);
+
+ /* special case DVB-C: these cards have an analog tuner
+ * plus need some special handling, so we have separate
+ * saa7146_ext_vv data for these...
+ */
+ ret = av7110_init_v4l(av7110);
+ if (ret < 0)
+ goto err_av7110_unregister_11;
+
+ av7110->dvb_adapter.priv = av7110;
+ ret = frontend_init(av7110);
+ if (ret < 0)
+ goto err_av7110_exit_v4l_12;
+
+ mutex_init(&av7110->ioctl_mutex);
+
+#if IS_ENABLED(CONFIG_DVB_AV7110_IR)
+ av7110_ir_init(av7110);
+#endif
+ pr_info("found av7110-%d.\n", av7110_num);
+ av7110_num++;
+out:
+ return ret;
+
+err_av7110_exit_v4l_12:
+ av7110_exit_v4l(av7110);
+err_av7110_unregister_11:
+ dvb_unregister(av7110);
+err_arm_thread_stop_10:
+ av7110_arm_sync(av7110);
+err_stop_arm_9:
+ /* Nothing to do. Rejoice. */
+err_av7110_ca_exit_8:
+ av7110_ca_exit(av7110);
+err_av7110_av_exit_7:
+ av7110_av_exit(av7110);
+err_iobuf_vfree_6:
+ vfree(av7110->iobuf);
+err_pci_free_5:
+ dma_free_coherent(&pdev->dev, 8192, av7110->debi_virt,
+ av7110->debi_bus);
+err_saa71466_vfree_4:
+ if (av7110->grabbing)
+ saa7146_vfree_destroy_pgtable(pdev, av7110->grabbing, &av7110->pt);
+err_i2c_del_3:
+ i2c_del_adapter(&av7110->i2c_adap);
+err_dvb_unregister_adapter_2:
+ dvb_unregister_adapter(&av7110->dvb_adapter);
+err_put_firmware_1:
+ put_firmware(av7110);
+err_kfree_0:
+ kfree(av7110);
+ goto out;
+}
+
+static int av7110_detach(struct saa7146_dev *saa)
+{
+ struct av7110 *av7110 = saa->ext_priv;
+
+ dprintk(4, "%p\n", av7110);
+
+#if IS_ENABLED(CONFIG_DVB_AV7110_IR)
+ av7110_ir_exit(av7110);
+#endif
+ if (budgetpatch || av7110->full_ts) {
+ if (budgetpatch) {
+ /* Disable RPS1 */
+ saa7146_write(saa, MC1, MASK_29);
+ /* VSYNC LOW (inactive) */
+ saa7146_setgpio(saa, 3, SAA7146_GPIO_OUTLO);
+ }
+ saa7146_write(saa, MC1, MASK_20); /* DMA3 off */
+ SAA7146_IER_DISABLE(saa, MASK_10);
+ SAA7146_ISR_CLEAR(saa, MASK_10);
+ msleep(50);
+ tasklet_kill(&av7110->vpe_tasklet);
+ saa7146_vfree_destroy_pgtable(saa->pci, av7110->grabbing, &av7110->pt);
+ }
+ av7110_exit_v4l(av7110);
+
+ av7110_arm_sync(av7110);
+
+ tasklet_kill(&av7110->debi_tasklet);
+ tasklet_kill(&av7110->gpio_tasklet);
+
+ dvb_unregister(av7110);
+
+ SAA7146_IER_DISABLE(saa, MASK_19 | MASK_03);
+ SAA7146_ISR_CLEAR(saa, MASK_19 | MASK_03);
+
+ av7110_ca_exit(av7110);
+ av7110_av_exit(av7110);
+
+ vfree(av7110->iobuf);
+ dma_free_coherent(&saa->pci->dev, 8192, av7110->debi_virt,
+ av7110->debi_bus);
+
+ i2c_del_adapter(&av7110->i2c_adap);
+
+ dvb_unregister_adapter(&av7110->dvb_adapter);
+
+ av7110_num--;
+
+ put_firmware(av7110);
+
+ kfree(av7110);
+
+ saa->ext_priv = NULL;
+
+ return 0;
+}
+
+static void av7110_irq(struct saa7146_dev *dev, u32 *isr)
+{
+ struct av7110 *av7110 = dev->ext_priv;
+
+ //print_time("av7110_irq");
+
+ /* Note: Don't try to handle the DEBI error irq (MASK_18), in
+ * intel mode the timeout is asserted all the time...
+ */
+
+ if (*isr & MASK_19) {
+ //printk("av7110_irq: DEBI\n");
+ /* Note 1: The DEBI irq is level triggered: We must enable it
+ * only after we started a DMA xfer, and disable it here
+ * immediately, or it will be signalled all the time while
+ * DEBI is idle.
+ * Note 2: You would think that an irq which is masked is
+ * not signalled by the hardware. Not so for the SAA7146:
+ * An irq is signalled as long as the corresponding bit
+ * in the ISR is set, and disabling irqs just prevents the
+ * hardware from setting the ISR bit. This means a) that we
+ * must clear the ISR *after* disabling the irq (which is why
+ * we must do it here even though saa7146_core did it already),
+ * and b) that if we were to disable an edge triggered irq
+ * (like the gpio irqs sadly are) temporarily we would likely
+ * loose some. This sucks :-(
+ */
+ SAA7146_IER_DISABLE(av7110->dev, MASK_19);
+ SAA7146_ISR_CLEAR(av7110->dev, MASK_19);
+ tasklet_schedule(&av7110->debi_tasklet);
+ }
+
+ if (*isr & MASK_03) {
+ //printk("av7110_irq: GPIO\n");
+ tasklet_schedule(&av7110->gpio_tasklet);
+ }
+
+ if (*isr & MASK_10)
+ tasklet_schedule(&av7110->vpe_tasklet);
+}
+
+static struct saa7146_extension av7110_extension_driver;
+
+#define MAKE_AV7110_INFO(x_var, x_name) \
+static struct saa7146_pci_extension_data x_var = { \
+ .ext_priv = x_name, \
+ .ext = &av7110_extension_driver }
+
+MAKE_AV7110_INFO(tts_1_X_fsc, "Technotrend/Hauppauge WinTV DVB-S rev1.X or Fujitsu Siemens DVB-C");
+MAKE_AV7110_INFO(ttt_1_X, "Technotrend/Hauppauge WinTV DVB-T rev1.X");
+MAKE_AV7110_INFO(ttc_1_X, "Technotrend/Hauppauge WinTV Nexus-CA rev1.X");
+MAKE_AV7110_INFO(ttc_2_X, "Technotrend/Hauppauge WinTV DVB-C rev2.X");
+MAKE_AV7110_INFO(tts_2_X, "Technotrend/Hauppauge WinTV Nexus-S rev2.X");
+MAKE_AV7110_INFO(tts_2_3, "Technotrend/Hauppauge WinTV Nexus-S rev2.3");
+MAKE_AV7110_INFO(tts_1_3se, "Technotrend/Hauppauge WinTV DVB-S rev1.3 SE");
+MAKE_AV7110_INFO(ttt, "Technotrend/Hauppauge DVB-T");
+MAKE_AV7110_INFO(fsc, "Fujitsu Siemens DVB-C");
+MAKE_AV7110_INFO(fss, "Fujitsu Siemens DVB-S rev1.6");
+MAKE_AV7110_INFO(gxs_1_3, "Galaxis DVB-S rev1.3");
+
+static const struct pci_device_id pci_tbl[] = {
+ MAKE_EXTENSION_PCI(fsc, 0x110a, 0x0000),
+ MAKE_EXTENSION_PCI(tts_1_X_fsc, 0x13c2, 0x0000),
+ MAKE_EXTENSION_PCI(ttt_1_X, 0x13c2, 0x0001),
+ MAKE_EXTENSION_PCI(ttc_2_X, 0x13c2, 0x0002),
+ MAKE_EXTENSION_PCI(tts_2_X, 0x13c2, 0x0003),
+ MAKE_EXTENSION_PCI(gxs_1_3, 0x13c2, 0x0004),
+ MAKE_EXTENSION_PCI(fss, 0x13c2, 0x0006),
+ MAKE_EXTENSION_PCI(ttt, 0x13c2, 0x0008),
+ MAKE_EXTENSION_PCI(ttc_1_X, 0x13c2, 0x000a),
+ MAKE_EXTENSION_PCI(tts_2_3, 0x13c2, 0x000e),
+ MAKE_EXTENSION_PCI(tts_1_3se, 0x13c2, 0x1002),
+
+// MAKE_EXTENSION_PCI(???, 0x13c2, 0x0005), UNDEFINED CARD // Technisat SkyStar1
+// MAKE_EXTENSION_PCI(???, 0x13c2, 0x0009), UNDEFINED CARD // TT/Hauppauge WinTV Nexus-CA v???
+
+ {
+ .vendor = 0,
+ }
+};
+
+MODULE_DEVICE_TABLE(pci, pci_tbl);
+
+static struct saa7146_extension av7110_extension_driver = {
+ .name = "av7110",
+ .flags = SAA7146_USE_I2C_IRQ,
+
+ .module = THIS_MODULE,
+ .pci_tbl = &pci_tbl[0],
+ .attach = av7110_attach,
+ .detach = av7110_detach,
+
+ .irq_mask = MASK_19 | MASK_03 | MASK_10,
+ .irq_func = av7110_irq,
+};
+
+static int __init av7110_init(void)
+{
+ return saa7146_register_extension(&av7110_extension_driver);
+}
+
+static void __exit av7110_exit(void)
+{
+ saa7146_unregister_extension(&av7110_extension_driver);
+}
+
+module_init(av7110_init);
+module_exit(av7110_exit);
+
+MODULE_DESCRIPTION("driver for the SAA7146 based AV110 PCI DVB cards by Siemens, Technotrend, Hauppauge");
+MODULE_AUTHOR("Ralph Metzler, Marcus Metzler, others");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/media/av7110/av7110.h b/drivers/staging/media/av7110/av7110.h
new file mode 100644
index 000000000000..b584754f4be0
--- /dev/null
+++ b/drivers/staging/media/av7110/av7110.h
@@ -0,0 +1,311 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _AV7110_H_
+#define _AV7110_H_
+
+#include <linux/interrupt.h>
+#include <linux/socket.h>
+#include <linux/netdevice.h>
+#include <linux/i2c.h>
+#include <linux/input.h>
+#include <linux/time.h>
+
+#include <linux/dvb/video.h>
+#include <linux/dvb/audio.h>
+#include <linux/dvb/dmx.h>
+#include <linux/dvb/ca.h>
+#include <linux/dvb/osd.h>
+#include <linux/dvb/net.h>
+#include <linux/mutex.h>
+
+#include <media/dvbdev.h>
+#include <media/demux.h>
+#include <media/dvb_demux.h>
+#include <media/dmxdev.h>
+#include "dvb_filter.h"
+#include <media/dvb_net.h>
+#include <media/dvb_ringbuffer.h>
+#include <media/dvb_frontend.h>
+#include "ves1820.h"
+#include "ves1x93.h"
+#include "stv0299.h"
+#include "tda8083.h"
+#include "sp8870.h"
+#include "stv0297.h"
+#include "l64781.h"
+
+#include <media/drv-intf/saa7146_vv.h>
+
+#define ANALOG_TUNER_VES1820 1
+#define ANALOG_TUNER_STV0297 2
+
+extern int av7110_debug;
+
+#ifdef pr_fmt
+#undef pr_fmt
+#endif
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#define dprintk(level, fmt, arg...) do { \
+ if ((level) & av7110_debug) \
+ pr_info("%s(): " fmt, __func__, ##arg); \
+} while (0)
+
+#define MAXFILT 32
+
+enum {AV_PES_STREAM, PS_STREAM, TS_STREAM, PES_STREAM};
+
+enum av7110_video_mode {
+ AV7110_VIDEO_MODE_PAL = 0,
+ AV7110_VIDEO_MODE_NTSC = 1
+};
+
+struct av7110_p2t {
+ u8 pes[TS_SIZE];
+ u8 counter;
+ long pos;
+ int frags;
+ struct dvb_demux_feed *feed;
+};
+
+/* video MPEG decoder events: */
+/* (code copied from dvb_frontend.c, should maybe be factored out...) */
+#define MAX_VIDEO_EVENT 8
+struct dvb_video_events {
+ struct video_event events[MAX_VIDEO_EVENT];
+ int eventw;
+ int eventr;
+ int overflow;
+ wait_queue_head_t wait_queue;
+ spinlock_t lock;
+};
+
+struct av7110;
+
+/* infrared remote control */
+struct infrared {
+ struct rc_dev *rcdev;
+ char input_phys[32];
+ u32 ir_config;
+};
+
+#define MAX_CI_SLOTS 2
+
+/* place to store all the necessary device information */
+struct av7110 {
+ /* devices */
+
+ struct dvb_device dvb_dev;
+ struct dvb_net dvb_net;
+
+ struct video_device v4l_dev;
+ struct video_device vbi_dev;
+
+ struct saa7146_dev *dev;
+
+ struct i2c_adapter i2c_adap;
+
+ char *card_name;
+
+ /* support for analog module of dvb-c */
+ int analog_tuner_flags;
+ int current_input;
+ u32 current_freq;
+
+ struct tasklet_struct debi_tasklet;
+ struct tasklet_struct gpio_tasklet;
+
+ int adac_type; /* audio DAC type */
+#define DVB_ADAC_TI 0
+#define DVB_ADAC_CRYSTAL 1
+#define DVB_ADAC_MSP34x0 2
+#define DVB_ADAC_MSP34x5 3
+#define DVB_ADAC_NONE -1
+
+ /* buffers */
+
+ void *iobuf; /* memory for all buffers */
+ struct dvb_ringbuffer avout; /* buffer for video or A/V mux */
+#define AVOUTLEN (128 * 1024)
+ struct dvb_ringbuffer aout; /* buffer for audio */
+#define AOUTLEN (64 * 1024)
+ void *bmpbuf;
+#define BMPLEN (8 * 32768 + 1024)
+
+ /* bitmap buffers and states */
+
+ int bmpp;
+ int bmplen;
+ volatile int bmp_state;
+#define BMP_NONE 0
+#define BMP_LOADING 1
+#define BMP_LOADED 2
+ wait_queue_head_t bmpq;
+
+ /* DEBI and polled command interface */
+
+ spinlock_t debilock;
+ struct mutex dcomlock;
+ volatile int debitype;
+ volatile int debilen;
+
+ /* Recording and playback flags */
+
+ int rec_mode;
+ int playing;
+#define RP_NONE 0
+#define RP_VIDEO 1
+#define RP_AUDIO 2
+#define RP_AV 3
+
+ /* OSD */
+
+ int osdwin; /* currently active window */
+ u16 osdbpp[8];
+ struct mutex osd_mutex;
+
+ /* CA */
+
+ struct ca_slot_info ci_slot[MAX_CI_SLOTS];
+
+ enum av7110_video_mode vidmode;
+ struct dmxdev dmxdev;
+ struct dvb_demux demux;
+
+ struct dmx_frontend hw_frontend;
+ struct dmx_frontend mem_frontend;
+
+ /* for budget mode demux1 */
+ struct dmxdev dmxdev1;
+ struct dvb_demux demux1;
+ struct dvb_net dvb_net1;
+ spinlock_t feedlock1;
+ int feeding1;
+ u32 ttbp;
+ unsigned char *grabbing;
+ struct saa7146_pgtable pt;
+ struct tasklet_struct vpe_tasklet;
+ bool full_ts;
+
+ int fe_synced;
+ struct mutex pid_mutex;
+
+ int video_blank;
+ struct video_status videostate;
+ u16 display_panscan;
+ int display_ar;
+ int trickmode;
+#define TRICK_NONE 0
+#define TRICK_FAST 1
+#define TRICK_SLOW 2
+#define TRICK_FREEZE 3
+ struct audio_status audiostate;
+
+ struct dvb_demux_filter *handle2filter[32];
+ struct av7110_p2t p2t_filter[MAXFILT];
+ struct dvb_filter_pes2ts p2t[2];
+ struct ipack ipack[2];
+ u8 *kbuf[2];
+
+ int sinfo;
+ int feeding;
+
+ int arm_errors;
+ int registered;
+
+ /* AV711X */
+
+ u32 arm_fw;
+ u32 arm_rtsl;
+ u32 arm_vid;
+ u32 arm_app;
+ u32 avtype;
+ int arm_ready;
+ struct task_struct *arm_thread;
+ wait_queue_head_t arm_wait;
+ u16 arm_loops;
+
+ void *debi_virt;
+ dma_addr_t debi_bus;
+
+ u16 pids[DMX_PES_OTHER];
+
+ struct dvb_ringbuffer ci_rbuffer;
+ struct dvb_ringbuffer ci_wbuffer;
+
+ struct audio_mixer mixer;
+
+ struct dvb_adapter dvb_adapter;
+ struct dvb_device *video_dev;
+ struct dvb_device *audio_dev;
+ struct dvb_device *ca_dev;
+ struct dvb_device *osd_dev;
+
+ struct dvb_video_events video_events;
+ video_size_t video_size;
+
+ u16 wssMode;
+ u16 wssData;
+
+ struct infrared ir;
+
+ /* firmware stuff */
+ unsigned char *bin_fw;
+ unsigned long size_fw;
+
+ unsigned char *bin_dpram;
+ unsigned long size_dpram;
+
+ unsigned char *bin_root;
+ unsigned long size_root;
+
+ struct dvb_frontend *fe;
+ enum fe_status fe_status;
+
+ struct mutex ioctl_mutex;
+
+ /* crash recovery */
+ void (*recover)(struct av7110 *av7110);
+ enum fe_sec_voltage saved_voltage;
+ enum fe_sec_tone_mode saved_tone;
+ struct dvb_diseqc_master_cmd saved_master_cmd;
+ enum fe_sec_mini_cmd saved_minicmd;
+
+ int (*fe_init)(struct dvb_frontend *fe);
+ int (*fe_read_status)(struct dvb_frontend *fe, enum fe_status *status);
+ int (*fe_diseqc_reset_overload)(struct dvb_frontend *fe);
+ int (*fe_diseqc_send_master_cmd)(struct dvb_frontend *fe,
+ struct dvb_diseqc_master_cmd *cmd);
+ int (*fe_diseqc_send_burst)(struct dvb_frontend *fe,
+ enum fe_sec_mini_cmd minicmd);
+ int (*fe_set_tone)(struct dvb_frontend *fe,
+ enum fe_sec_tone_mode tone);
+ int (*fe_set_voltage)(struct dvb_frontend *fe,
+ enum fe_sec_voltage voltage);
+ int (*fe_dishnetwork_send_legacy_command)(struct dvb_frontend *fe,
+ unsigned long cmd);
+ int (*fe_set_frontend)(struct dvb_frontend *fe);
+};
+
+int ChangePIDs(struct av7110 *av7110, u16 vpid, u16 apid, u16 ttpid,
+ u16 subpid, u16 pcrpid);
+
+void av7110_ir_handler(struct av7110 *av7110, u32 ircom);
+int av7110_set_ir_config(struct av7110 *av7110);
+int av7110_ir_init(struct av7110 *av7110);
+void av7110_ir_exit(struct av7110 *av7110);
+
+/* msp3400 i2c subaddresses */
+#define MSP_WR_DEM 0x10
+#define MSP_RD_DEM 0x11
+#define MSP_WR_DSP 0x12
+#define MSP_RD_DSP 0x13
+
+int i2c_writereg(struct av7110 *av7110, u8 id, u8 reg, u8 val);
+u8 i2c_readreg(struct av7110 *av7110, u8 id, u8 reg);
+int msp_writereg(struct av7110 *av7110, u8 dev, u16 reg, u16 val);
+
+int av7110_init_analog_module(struct av7110 *av7110);
+int av7110_init_v4l(struct av7110 *av7110);
+int av7110_exit_v4l(struct av7110 *av7110);
+
+#endif /* _AV7110_H_ */
diff --git a/drivers/staging/media/av7110/av7110_av.c b/drivers/staging/media/av7110/av7110_av.c
new file mode 100644
index 000000000000..2993ac43c49c
--- /dev/null
+++ b/drivers/staging/media/av7110/av7110_av.c
@@ -0,0 +1,1689 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * driver for the SAA7146 based AV110 cards
+ * - audio and video MPEG decoder stuff
+ *
+ * Copyright (C) 1999-2002 Ralph Metzler
+ * & Marcus Metzler for convergence integrated media GmbH
+ *
+ * originally based on code by:
+ * Copyright (C) 1998,1999 Christian Theiss <mistert@rz.fh-augsburg.de>
+ *
+ * the project's page is at https://linuxtv.org
+ */
+
+#include <linux/ethtool.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/delay.h>
+#include <linux/fs.h>
+
+#include "av7110.h"
+#include "av7110_hw.h"
+#include "av7110_av.h"
+#include "av7110_ipack.h"
+
+/* MPEG-2 (ISO 13818 / H.222.0) stream types */
+#define PROG_STREAM_MAP 0xBC
+#define PRIVATE_STREAM1 0xBD
+#define PADDING_STREAM 0xBE
+#define PRIVATE_STREAM2 0xBF
+#define AUDIO_STREAM_S 0xC0
+#define AUDIO_STREAM_E 0xDF
+#define VIDEO_STREAM_S 0xE0
+#define VIDEO_STREAM_E 0xEF
+#define ECM_STREAM 0xF0
+#define EMM_STREAM 0xF1
+#define DSM_CC_STREAM 0xF2
+#define ISO13522_STREAM 0xF3
+#define PROG_STREAM_DIR 0xFF
+
+#define PTS_DTS_FLAGS 0xC0
+
+//pts_dts flags
+#define PTS_ONLY 0x80
+#define PTS_DTS 0xC0
+#define TS_SIZE 188
+#define TRANS_ERROR 0x80
+#define PAY_START 0x40
+#define TRANS_PRIO 0x20
+#define PID_MASK_HI 0x1F
+//flags
+#define TRANS_SCRMBL1 0x80
+#define TRANS_SCRMBL2 0x40
+#define ADAPT_FIELD 0x20
+#define PAYLOAD 0x10
+#define COUNT_MASK 0x0F
+
+// adaptation flags
+#define DISCON_IND 0x80
+#define RAND_ACC_IND 0x40
+#define ES_PRI_IND 0x20
+#define PCR_FLAG 0x10
+#define OPCR_FLAG 0x08
+#define SPLICE_FLAG 0x04
+#define TRANS_PRIV 0x02
+#define ADAP_EXT_FLAG 0x01
+
+// adaptation extension flags
+#define LTW_FLAG 0x80
+#define PIECE_RATE 0x40
+#define SEAM_SPLICE 0x20
+
+static void p_to_t(u8 const *buf, long length, u16 pid,
+ u8 *counter, struct dvb_demux_feed *feed);
+static int write_ts_to_decoder(struct av7110 *av7110, int type, const u8 *buf, size_t len);
+
+int av7110_record_cb(struct dvb_filter_pes2ts *p2t, u8 *buf, size_t len)
+{
+ struct dvb_demux_feed *dvbdmxfeed = p2t->priv;
+
+ if (!(dvbdmxfeed->ts_type & TS_PACKET))
+ return 0;
+ if (buf[3] == 0xe0) { // video PES do not have a length in TS
+ buf[4] = 0;
+ buf[5] = 0;
+ }
+ if (dvbdmxfeed->ts_type & TS_PAYLOAD_ONLY)
+ return dvbdmxfeed->cb.ts(buf, len, NULL, 0,
+ &dvbdmxfeed->feed.ts, NULL);
+ else
+ return dvb_filter_pes2ts(p2t, buf, len, 1);
+}
+
+static int dvb_filter_pes2ts_cb(void *priv, unsigned char *data)
+{
+ struct dvb_demux_feed *dvbdmxfeed = (struct dvb_demux_feed *)priv;
+
+ dvbdmxfeed->cb.ts(data, 188, NULL, 0,
+ &dvbdmxfeed->feed.ts, NULL);
+ return 0;
+}
+
+int av7110_av_start_record(struct av7110 *av7110, int av,
+ struct dvb_demux_feed *dvbdmxfeed)
+{
+ int ret = 0;
+ struct dvb_demux *dvbdmx = dvbdmxfeed->demux;
+
+ dprintk(2, "av7110:%p, dvb_demux_feed:%p\n", av7110, dvbdmxfeed);
+
+ if (av7110->playing || (av7110->rec_mode & av))
+ return -EBUSY;
+ av7110_fw_cmd(av7110, COMTYPE_REC_PLAY, __Stop, 0);
+ dvbdmx->recording = 1;
+ av7110->rec_mode |= av;
+
+ switch (av7110->rec_mode) {
+ case RP_AUDIO:
+ dvb_filter_pes2ts_init(&av7110->p2t[0],
+ dvbdmx->pesfilter[0]->pid,
+ dvb_filter_pes2ts_cb,
+ (void *)dvbdmx->pesfilter[0]);
+ ret = av7110_fw_cmd(av7110, COMTYPE_REC_PLAY, __Record, 2, AudioPES, 0);
+ break;
+
+ case RP_VIDEO:
+ dvb_filter_pes2ts_init(&av7110->p2t[1],
+ dvbdmx->pesfilter[1]->pid,
+ dvb_filter_pes2ts_cb,
+ (void *)dvbdmx->pesfilter[1]);
+ ret = av7110_fw_cmd(av7110, COMTYPE_REC_PLAY, __Record, 2, VideoPES, 0);
+ break;
+
+ case RP_AV:
+ dvb_filter_pes2ts_init(&av7110->p2t[0],
+ dvbdmx->pesfilter[0]->pid,
+ dvb_filter_pes2ts_cb,
+ (void *)dvbdmx->pesfilter[0]);
+ dvb_filter_pes2ts_init(&av7110->p2t[1],
+ dvbdmx->pesfilter[1]->pid,
+ dvb_filter_pes2ts_cb,
+ (void *)dvbdmx->pesfilter[1]);
+ ret = av7110_fw_cmd(av7110, COMTYPE_REC_PLAY, __Record, 2, AV_PES, 0);
+ break;
+ }
+ return ret;
+}
+
+int av7110_av_start_play(struct av7110 *av7110, int av)
+{
+ int ret = 0;
+
+ dprintk(2, "av7110:%p\n", av7110);
+
+ if (av7110->rec_mode)
+ return -EBUSY;
+ if (av7110->playing & av)
+ return -EBUSY;
+
+ av7110_fw_cmd(av7110, COMTYPE_REC_PLAY, __Stop, 0);
+
+ if (av7110->playing == RP_NONE) {
+ av7110_ipack_reset(&av7110->ipack[0]);
+ av7110_ipack_reset(&av7110->ipack[1]);
+ }
+
+ av7110->playing |= av;
+ switch (av7110->playing) {
+ case RP_AUDIO:
+ ret = av7110_fw_cmd(av7110, COMTYPE_REC_PLAY, __Play, 2, AudioPES, 0);
+ break;
+ case RP_VIDEO:
+ ret = av7110_fw_cmd(av7110, COMTYPE_REC_PLAY, __Play, 2, VideoPES, 0);
+ av7110->sinfo = 0;
+ break;
+ case RP_AV:
+ av7110->sinfo = 0;
+ ret = av7110_fw_cmd(av7110, COMTYPE_REC_PLAY, __Play, 2, AV_PES, 0);
+ break;
+ }
+ return ret;
+}
+
+int av7110_av_stop(struct av7110 *av7110, int av)
+{
+ int ret = 0;
+
+ dprintk(2, "av7110:%p\n", av7110);
+
+ if (!(av7110->playing & av) && !(av7110->rec_mode & av))
+ return 0;
+ av7110_fw_cmd(av7110, COMTYPE_REC_PLAY, __Stop, 0);
+ if (av7110->playing) {
+ av7110->playing &= ~av;
+ switch (av7110->playing) {
+ case RP_AUDIO:
+ ret = av7110_fw_cmd(av7110, COMTYPE_REC_PLAY, __Play, 2, AudioPES, 0);
+ break;
+ case RP_VIDEO:
+ ret = av7110_fw_cmd(av7110, COMTYPE_REC_PLAY, __Play, 2, VideoPES, 0);
+ break;
+ case RP_NONE:
+ ret = av7110_set_vidmode(av7110, av7110->vidmode);
+ break;
+ }
+ } else {
+ av7110->rec_mode &= ~av;
+ switch (av7110->rec_mode) {
+ case RP_AUDIO:
+ ret = av7110_fw_cmd(av7110, COMTYPE_REC_PLAY, __Record, 2, AudioPES, 0);
+ break;
+ case RP_VIDEO:
+ ret = av7110_fw_cmd(av7110, COMTYPE_REC_PLAY, __Record, 2, VideoPES, 0);
+ break;
+ case RP_NONE:
+ break;
+ }
+ }
+ return ret;
+}
+
+int av7110_pes_play(void *dest, struct dvb_ringbuffer *buf, int dlen)
+{
+ int len;
+ u32 sync;
+ u16 blen;
+
+ if (!dlen) {
+ wake_up(&buf->queue);
+ return -1;
+ }
+ while (1) {
+ len = dvb_ringbuffer_avail(buf);
+ if (len < 6) {
+ wake_up(&buf->queue);
+ return -1;
+ }
+ sync = DVB_RINGBUFFER_PEEK(buf, 0) << 24;
+ sync |= DVB_RINGBUFFER_PEEK(buf, 1) << 16;
+ sync |= DVB_RINGBUFFER_PEEK(buf, 2) << 8;
+ sync |= DVB_RINGBUFFER_PEEK(buf, 3);
+
+ if (((sync & ~0x0f) == 0x000001e0) ||
+ ((sync & ~0x1f) == 0x000001c0) ||
+ (sync == 0x000001bd))
+ break;
+ pr_info("resync\n");
+ DVB_RINGBUFFER_SKIP(buf, 1);
+ }
+ blen = DVB_RINGBUFFER_PEEK(buf, 4) << 8;
+ blen |= DVB_RINGBUFFER_PEEK(buf, 5);
+ blen += 6;
+ if (len < blen || blen > dlen) {
+ //pr_info("buffer empty - avail %d blen %u dlen %d\n", len, blen, dlen);
+ wake_up(&buf->queue);
+ return -1;
+ }
+
+ dvb_ringbuffer_read(buf, dest, (size_t)blen);
+
+ dprintk(2, "pread=0x%08lx, pwrite=0x%08lx\n",
+ (unsigned long)buf->pread, (unsigned long)buf->pwrite);
+ wake_up(&buf->queue);
+ return blen;
+}
+
+int av7110_set_volume(struct av7110 *av7110, unsigned int volleft,
+ unsigned int volright)
+{
+ unsigned int vol, val, balance = 0;
+ int err;
+
+ dprintk(2, "av7110:%p\n", av7110);
+
+ av7110->mixer.volume_left = volleft;
+ av7110->mixer.volume_right = volright;
+
+ switch (av7110->adac_type) {
+ case DVB_ADAC_TI:
+ volleft = (volleft * 256) / 1036;
+ volright = (volright * 256) / 1036;
+ if (volleft > 0x3f)
+ volleft = 0x3f;
+ if (volright > 0x3f)
+ volright = 0x3f;
+ err = SendDAC(av7110, 3, 0x80 + volleft);
+ if (err)
+ return err;
+ return SendDAC(av7110, 4, volright);
+
+ case DVB_ADAC_CRYSTAL:
+ volleft = 127 - volleft / 2;
+ volright = 127 - volright / 2;
+ i2c_writereg(av7110, 0x20, 0x03, volleft);
+ i2c_writereg(av7110, 0x20, 0x04, volright);
+ return 0;
+
+ case DVB_ADAC_MSP34x0:
+ vol = (volleft > volright) ? volleft : volright;
+ val = (vol * 0x73 / 255) << 8;
+ if (vol > 0)
+ balance = ((volright - volleft) * 127) / vol;
+ msp_writereg(av7110, MSP_WR_DSP, 0x0001, balance << 8);
+ msp_writereg(av7110, MSP_WR_DSP, 0x0000, val); /* loudspeaker */
+ msp_writereg(av7110, MSP_WR_DSP, 0x0006, val); /* headphonesr */
+ return 0;
+
+ case DVB_ADAC_MSP34x5:
+ vol = (volleft > volright) ? volleft : volright;
+ val = (vol * 0x73 / 255) << 8;
+ if (vol > 0)
+ balance = ((volright - volleft) * 127) / vol;
+ msp_writereg(av7110, MSP_WR_DSP, 0x0001, balance << 8);
+ msp_writereg(av7110, MSP_WR_DSP, 0x0000, val); /* loudspeaker */
+ return 0;
+ }
+
+ return 0;
+}
+
+int av7110_set_vidmode(struct av7110 *av7110, enum av7110_video_mode mode)
+{
+ int ret;
+
+ dprintk(2, "av7110:%p\n", av7110);
+
+ ret = av7110_fw_cmd(av7110, COMTYPE_ENCODER, LoadVidCode, 1, mode);
+
+ if (!ret && !av7110->playing) {
+ ret = ChangePIDs(av7110, av7110->pids[DMX_PES_VIDEO],
+ av7110->pids[DMX_PES_AUDIO],
+ av7110->pids[DMX_PES_TELETEXT],
+ 0, av7110->pids[DMX_PES_PCR]);
+ if (!ret)
+ ret = av7110_fw_cmd(av7110, COMTYPE_PIDFILTER, Scan, 0);
+ }
+ return ret;
+}
+
+static enum av7110_video_mode sw2mode[16] = {
+ AV7110_VIDEO_MODE_PAL, AV7110_VIDEO_MODE_NTSC,
+ AV7110_VIDEO_MODE_NTSC, AV7110_VIDEO_MODE_PAL,
+ AV7110_VIDEO_MODE_NTSC, AV7110_VIDEO_MODE_NTSC,
+ AV7110_VIDEO_MODE_PAL, AV7110_VIDEO_MODE_NTSC,
+ AV7110_VIDEO_MODE_PAL, AV7110_VIDEO_MODE_PAL,
+ AV7110_VIDEO_MODE_PAL, AV7110_VIDEO_MODE_PAL,
+ AV7110_VIDEO_MODE_PAL, AV7110_VIDEO_MODE_PAL,
+ AV7110_VIDEO_MODE_PAL, AV7110_VIDEO_MODE_PAL,
+};
+
+static int get_video_format(struct av7110 *av7110, u8 *buf, int count)
+{
+ int i;
+ int hsize, vsize;
+ int sw;
+ u8 *p;
+ int ret = 0;
+
+ dprintk(2, "av7110:%p\n", av7110);
+
+ if (av7110->sinfo)
+ return 0;
+ for (i = 7; i < count - 10; i++) {
+ p = buf + i;
+ if (p[0] || p[1] || p[2] != 0x01 || p[3] != 0xb3)
+ continue;
+ p += 4;
+ hsize = ((p[1] & 0xF0) >> 4) | (p[0] << 4);
+ vsize = ((p[1] & 0x0F) << 8) | (p[2]);
+ sw = (p[3] & 0x0F);
+ ret = av7110_set_vidmode(av7110, sw2mode[sw]);
+ if (!ret) {
+ dprintk(2, "playback %dx%d fr=%d\n", hsize, vsize, sw);
+ av7110->sinfo = 1;
+ }
+ break;
+ }
+ return ret;
+}
+
+/****************************************************************************
+ * I/O buffer management and control
+ ****************************************************************************/
+
+static inline long aux_ring_buffer_write(struct dvb_ringbuffer *rbuf,
+ const u8 *buf, unsigned long count)
+{
+ unsigned long todo = count;
+ int free;
+
+ while (todo > 0) {
+ if (dvb_ringbuffer_free(rbuf) < 2048) {
+ if (wait_event_interruptible(rbuf->queue,
+ (dvb_ringbuffer_free(rbuf) >= 2048)))
+ return count - todo;
+ }
+ free = dvb_ringbuffer_free(rbuf);
+ if (free > todo)
+ free = todo;
+ dvb_ringbuffer_write(rbuf, buf, free);
+ todo -= free;
+ buf += free;
+ }
+
+ return count - todo;
+}
+
+static void play_video_cb(u8 *buf, int count, void *priv)
+{
+ struct av7110 *av7110 = (struct av7110 *)priv;
+
+ dprintk(2, "av7110:%p\n", av7110);
+
+ if ((buf[3] & 0xe0) == 0xe0) {
+ get_video_format(av7110, buf, count);
+ aux_ring_buffer_write(&av7110->avout, buf, count);
+ } else {
+ aux_ring_buffer_write(&av7110->aout, buf, count);
+ }
+}
+
+static void play_audio_cb(u8 *buf, int count, void *priv)
+{
+ struct av7110 *av7110 = (struct av7110 *)priv;
+
+ dprintk(2, "av7110:%p\n", av7110);
+
+ aux_ring_buffer_write(&av7110->aout, buf, count);
+}
+
+#define FREE_COND_TS (dvb_ringbuffer_free(rb) >= 4096)
+
+static ssize_t ts_play(struct av7110 *av7110, const char __user *buf,
+ unsigned long count, int nonblock, int type)
+{
+ struct dvb_ringbuffer *rb;
+ u8 *kb;
+ unsigned long todo = count;
+
+ dprintk(2, "type %d cnt %lu\n", type, count);
+
+ rb = (type) ? &av7110->avout : &av7110->aout;
+ kb = av7110->kbuf[type];
+
+ if (!kb)
+ return -ENOBUFS;
+
+ if (nonblock && !FREE_COND_TS)
+ return -EWOULDBLOCK;
+
+ while (todo >= TS_SIZE) {
+ if (!FREE_COND_TS) {
+ if (nonblock)
+ return count - todo;
+ if (wait_event_interruptible(rb->queue, FREE_COND_TS))
+ return count - todo;
+ }
+ if (copy_from_user(kb, buf, TS_SIZE))
+ return -EFAULT;
+ write_ts_to_decoder(av7110, type, kb, TS_SIZE);
+ todo -= TS_SIZE;
+ buf += TS_SIZE;
+ }
+
+ return count - todo;
+}
+
+#define FREE_COND (dvb_ringbuffer_free(&av7110->avout) >= 20 * 1024 && \
+ dvb_ringbuffer_free(&av7110->aout) >= 20 * 1024)
+
+static ssize_t dvb_play(struct av7110 *av7110, const char __user *buf,
+ unsigned long count, int nonblock, int type)
+{
+ unsigned long todo = count, n;
+
+ dprintk(2, "av7110:%p\n", av7110);
+
+ if (!av7110->kbuf[type])
+ return -ENOBUFS;
+
+ if (nonblock && !FREE_COND)
+ return -EWOULDBLOCK;
+
+ while (todo > 0) {
+ if (!FREE_COND) {
+ if (nonblock)
+ return count - todo;
+ if (wait_event_interruptible(av7110->avout.queue,
+ FREE_COND))
+ return count - todo;
+ }
+ n = todo;
+ if (n > IPACKS * 2)
+ n = IPACKS * 2;
+ if (copy_from_user(av7110->kbuf[type], buf, n))
+ return -EFAULT;
+ av7110_ipack_instant_repack(av7110->kbuf[type], n,
+ &av7110->ipack[type]);
+ todo -= n;
+ buf += n;
+ }
+ return count - todo;
+}
+
+static ssize_t dvb_play_kernel(struct av7110 *av7110, const u8 *buf,
+ unsigned long count, int nonblock, int type)
+{
+ unsigned long todo = count, n;
+
+ dprintk(2, "av7110:%p\n", av7110);
+
+ if (!av7110->kbuf[type])
+ return -ENOBUFS;
+
+ if (nonblock && !FREE_COND)
+ return -EWOULDBLOCK;
+
+ while (todo > 0) {
+ if (!FREE_COND) {
+ if (nonblock)
+ return count - todo;
+ if (wait_event_interruptible(av7110->avout.queue,
+ FREE_COND))
+ return count - todo;
+ }
+ n = todo;
+ if (n > IPACKS * 2)
+ n = IPACKS * 2;
+ av7110_ipack_instant_repack(buf, n, &av7110->ipack[type]);
+ todo -= n;
+ buf += n;
+ }
+ return count - todo;
+}
+
+static ssize_t dvb_aplay(struct av7110 *av7110, const char __user *buf,
+ unsigned long count, int nonblock, int type)
+{
+ unsigned long todo = count, n;
+
+ dprintk(2, "av7110:%p\n", av7110);
+
+ if (!av7110->kbuf[type])
+ return -ENOBUFS;
+ if (nonblock && dvb_ringbuffer_free(&av7110->aout) < 20 * 1024)
+ return -EWOULDBLOCK;
+
+ while (todo > 0) {
+ if (dvb_ringbuffer_free(&av7110->aout) < 20 * 1024) {
+ if (nonblock)
+ return count - todo;
+ if (wait_event_interruptible(av7110->aout.queue,
+ (dvb_ringbuffer_free(&av7110->aout) >= 20 * 1024)))
+ return count - todo;
+ }
+ n = todo;
+ if (n > IPACKS * 2)
+ n = IPACKS * 2;
+ if (copy_from_user(av7110->kbuf[type], buf, n))
+ return -EFAULT;
+ av7110_ipack_instant_repack(av7110->kbuf[type], n,
+ &av7110->ipack[type]);
+ todo -= n;
+ buf += n;
+ }
+ return count - todo;
+}
+
+void av7110_p2t_init(struct av7110_p2t *p, struct dvb_demux_feed *feed)
+{
+ memset(p->pes, 0, TS_SIZE);
+ p->counter = 0;
+ p->pos = 0;
+ p->frags = 0;
+ if (feed)
+ p->feed = feed;
+}
+
+static void clear_p2t(struct av7110_p2t *p)
+{
+ memset(p->pes, 0, TS_SIZE);
+// p->counter = 0;
+ p->pos = 0;
+ p->frags = 0;
+}
+
+static int find_pes_header(u8 const *buf, long length, int *frags)
+{
+ int c = 0;
+ int found = 0;
+
+ *frags = 0;
+
+ while (c < length - 3 && !found) {
+ if (buf[c] == 0x00 && buf[c + 1] == 0x00 &&
+ buf[c + 2] == 0x01) {
+ switch (buf[c + 3]) {
+ case PROG_STREAM_MAP:
+ case PRIVATE_STREAM2:
+ case PROG_STREAM_DIR:
+ case ECM_STREAM:
+ case EMM_STREAM:
+ case PADDING_STREAM:
+ case DSM_CC_STREAM:
+ case ISO13522_STREAM:
+ case PRIVATE_STREAM1:
+ case AUDIO_STREAM_S ... AUDIO_STREAM_E:
+ case VIDEO_STREAM_S ... VIDEO_STREAM_E:
+ found = 1;
+ break;
+
+ default:
+ c++;
+ break;
+ }
+ } else {
+ c++;
+ }
+ }
+ if (c == length - 3 && !found) {
+ if (buf[length - 1] == 0x00)
+ *frags = 1;
+ if (buf[length - 2] == 0x00 &&
+ buf[length - 1] == 0x00)
+ *frags = 2;
+ if (buf[length - 3] == 0x00 &&
+ buf[length - 2] == 0x00 &&
+ buf[length - 1] == 0x01)
+ *frags = 3;
+ return -1;
+ }
+
+ return c;
+}
+
+void av7110_p2t_write(u8 const *buf, long length, u16 pid, struct av7110_p2t *p)
+{
+ int c, c2, l, add;
+ int check, rest;
+
+ c = 0;
+ c2 = 0;
+ if (p->frags) {
+ check = 0;
+ switch (p->frags) {
+ case 1:
+ if (buf[c] == 0x00 && buf[c + 1] == 0x01) {
+ check = 1;
+ c += 2;
+ }
+ break;
+ case 2:
+ if (buf[c] == 0x01) {
+ check = 1;
+ c++;
+ }
+ break;
+ case 3:
+ check = 1;
+ }
+ if (check) {
+ switch (buf[c]) {
+ case PROG_STREAM_MAP:
+ case PRIVATE_STREAM2:
+ case PROG_STREAM_DIR:
+ case ECM_STREAM:
+ case EMM_STREAM:
+ case PADDING_STREAM:
+ case DSM_CC_STREAM:
+ case ISO13522_STREAM:
+ case PRIVATE_STREAM1:
+ case AUDIO_STREAM_S ... AUDIO_STREAM_E:
+ case VIDEO_STREAM_S ... VIDEO_STREAM_E:
+ p->pes[0] = 0x00;
+ p->pes[1] = 0x00;
+ p->pes[2] = 0x01;
+ p->pes[3] = buf[c];
+ p->pos = 4;
+ memcpy(p->pes + p->pos, buf + c, (TS_SIZE - 4) - p->pos);
+ c += (TS_SIZE - 4) - p->pos;
+ p_to_t(p->pes, (TS_SIZE - 4), pid, &p->counter, p->feed);
+ clear_p2t(p);
+ break;
+
+ default:
+ c = 0;
+ break;
+ }
+ }
+ p->frags = 0;
+ }
+
+ if (p->pos) {
+ c2 = find_pes_header(buf + c, length - c, &p->frags);
+ if (c2 >= 0 && c2 < (TS_SIZE - 4) - p->pos)
+ l = c2 + c;
+ else
+ l = (TS_SIZE - 4) - p->pos;
+ memcpy(p->pes + p->pos, buf, l);
+ c += l;
+ p->pos += l;
+ p_to_t(p->pes, p->pos, pid, &p->counter, p->feed);
+ clear_p2t(p);
+ }
+
+ add = 0;
+ while (c < length) {
+ c2 = find_pes_header(buf + c + add, length - c - add, &p->frags);
+ if (c2 >= 0) {
+ c2 += c + add;
+ if (c2 > c) {
+ p_to_t(buf + c, c2 - c, pid, &p->counter, p->feed);
+ c = c2;
+ clear_p2t(p);
+ add = 0;
+ } else {
+ add = 1;
+ }
+ } else {
+ l = length - c;
+ rest = l % (TS_SIZE - 4);
+ l -= rest;
+ p_to_t(buf + c, l, pid, &p->counter, p->feed);
+ memcpy(p->pes, buf + c + l, rest);
+ p->pos = rest;
+ c = length;
+ }
+ }
+}
+
+static int write_ts_header2(u16 pid, u8 *counter, int pes_start, u8 *buf, u8 length)
+{
+ int i;
+ int c = 0;
+ int fill;
+ u8 tshead[4] = { 0x47, 0x00, 0x00, 0x10 };
+
+ fill = (TS_SIZE - 4) - length;
+ if (pes_start)
+ tshead[1] = 0x40;
+ if (fill)
+ tshead[3] = 0x30;
+ tshead[1] |= (u8)((pid & 0x1F00) >> 8);
+ tshead[2] |= (u8)(pid & 0x00FF);
+ tshead[3] |= ((*counter)++ & 0x0F);
+ memcpy(buf, tshead, 4);
+ c += 4;
+
+ if (fill) {
+ buf[4] = fill - 1;
+ c++;
+ if (fill > 1) {
+ buf[5] = 0x00;
+ c++;
+ }
+ for (i = 6; i < fill + 4; i++) {
+ buf[i] = 0xFF;
+ c++;
+ }
+ }
+
+ return c;
+}
+
+static void p_to_t(u8 const *buf, long length, u16 pid, u8 *counter,
+ struct dvb_demux_feed *feed)
+{
+ int l, pes_start;
+ u8 obuf[TS_SIZE];
+ long c = 0;
+
+ pes_start = 0;
+ if (length > 3 &&
+ buf[0] == 0x00 && buf[1] == 0x00 && buf[2] == 0x01)
+ switch (buf[3]) {
+ case PROG_STREAM_MAP:
+ case PRIVATE_STREAM2:
+ case PROG_STREAM_DIR:
+ case ECM_STREAM:
+ case EMM_STREAM:
+ case PADDING_STREAM:
+ case DSM_CC_STREAM:
+ case ISO13522_STREAM:
+ case PRIVATE_STREAM1:
+ case AUDIO_STREAM_S ... AUDIO_STREAM_E:
+ case VIDEO_STREAM_S ... VIDEO_STREAM_E:
+ pes_start = 1;
+ break;
+
+ default:
+ break;
+ }
+
+ while (c < length) {
+ memset(obuf, 0, TS_SIZE);
+ if (length - c >= (TS_SIZE - 4)) {
+ l = write_ts_header2(pid, counter, pes_start,
+ obuf, (TS_SIZE - 4));
+ memcpy(obuf + l, buf + c, TS_SIZE - l);
+ c += TS_SIZE - l;
+ } else {
+ l = write_ts_header2(pid, counter, pes_start,
+ obuf, length - c);
+ memcpy(obuf + l, buf + c, TS_SIZE - l);
+ c = length;
+ }
+ feed->cb.ts(obuf, 188, NULL, 0, &feed->feed.ts, NULL);
+ pes_start = 0;
+ }
+}
+
+static int write_ts_to_decoder(struct av7110 *av7110, int type, const u8 *buf, size_t len)
+{
+ struct ipack *ipack = &av7110->ipack[type];
+
+ if (buf[1] & TRANS_ERROR) {
+ av7110_ipack_reset(ipack);
+ return -1;
+ }
+
+ if (!(buf[3] & PAYLOAD))
+ return -1;
+
+ if (buf[1] & PAY_START)
+ av7110_ipack_flush(ipack);
+
+ if (buf[3] & ADAPT_FIELD) {
+ if (buf[4] > len - 1 - 4)
+ return 0;
+ len -= buf[4] + 1;
+ buf += buf[4] + 1;
+ }
+
+ av7110_ipack_instant_repack(buf + 4, len - 4, ipack);
+ return 0;
+}
+
+int av7110_write_to_decoder(struct dvb_demux_feed *feed, const u8 *buf, size_t len)
+{
+ struct dvb_demux *demux = feed->demux;
+ struct av7110 *av7110 = demux->priv;
+
+ dprintk(2, "av7110:%p\n", av7110);
+
+ if (av7110->full_ts && demux->dmx.frontend->source != DMX_MEMORY_FE)
+ return 0;
+
+ switch (feed->pes_type) {
+ case 0:
+ if (av7110->audiostate.stream_source == AUDIO_SOURCE_MEMORY)
+ return -EINVAL;
+ break;
+ case 1:
+ if (av7110->videostate.stream_source == VIDEO_SOURCE_MEMORY)
+ return -EINVAL;
+ break;
+ default:
+ return -1;
+ }
+
+ return write_ts_to_decoder(av7110, feed->pes_type, buf, len);
+}
+
+/******************************************************************************
+ * Video MPEG decoder events
+ ******************************************************************************/
+void dvb_video_add_event(struct av7110 *av7110, struct video_event *event)
+{
+ struct dvb_video_events *events = &av7110->video_events;
+ int wp;
+
+ spin_lock_bh(&events->lock);
+
+ wp = (events->eventw + 1) % MAX_VIDEO_EVENT;
+ if (wp == events->eventr) {
+ events->overflow = 1;
+ events->eventr = (events->eventr + 1) % MAX_VIDEO_EVENT;
+ }
+
+ //FIXME: timestamp?
+ memcpy(&events->events[events->eventw], event, sizeof(struct video_event));
+ events->eventw = wp;
+
+ spin_unlock_bh(&events->lock);
+
+ wake_up_interruptible(&events->wait_queue);
+}
+
+static int dvb_video_get_event(struct av7110 *av7110, struct video_event *event, int flags)
+{
+ struct dvb_video_events *events = &av7110->video_events;
+
+ if (events->overflow) {
+ events->overflow = 0;
+ return -EOVERFLOW;
+ }
+ if (events->eventw == events->eventr) {
+ int ret;
+
+ if (flags & O_NONBLOCK)
+ return -EWOULDBLOCK;
+
+ ret = wait_event_interruptible(events->wait_queue,
+ events->eventw != events->eventr);
+ if (ret < 0)
+ return ret;
+ }
+
+ spin_lock_bh(&events->lock);
+
+ memcpy(event, &events->events[events->eventr],
+ sizeof(struct video_event));
+ events->eventr = (events->eventr + 1) % MAX_VIDEO_EVENT;
+
+ spin_unlock_bh(&events->lock);
+
+ return 0;
+}
+
+/******************************************************************************
+ * DVB device file operations
+ ******************************************************************************/
+
+static __poll_t dvb_video_poll(struct file *file, poll_table *wait)
+{
+ struct dvb_device *dvbdev = file->private_data;
+ struct av7110 *av7110 = dvbdev->priv;
+ __poll_t mask = 0;
+
+ dprintk(2, "av7110:%p\n", av7110);
+
+ if ((file->f_flags & O_ACCMODE) != O_RDONLY)
+ poll_wait(file, &av7110->avout.queue, wait);
+
+ poll_wait(file, &av7110->video_events.wait_queue, wait);
+
+ if (av7110->video_events.eventw != av7110->video_events.eventr)
+ mask = EPOLLPRI;
+
+ if ((file->f_flags & O_ACCMODE) != O_RDONLY) {
+ if (av7110->playing) {
+ if (FREE_COND)
+ mask |= (EPOLLOUT | EPOLLWRNORM);
+ } else {
+ /* if not playing: may play if asked for */
+ mask |= (EPOLLOUT | EPOLLWRNORM);
+ }
+ }
+
+ return mask;
+}
+
+static ssize_t dvb_video_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct dvb_device *dvbdev = file->private_data;
+ struct av7110 *av7110 = dvbdev->priv;
+ unsigned char c;
+
+ dprintk(2, "av7110:%p\n", av7110);
+
+ if ((file->f_flags & O_ACCMODE) == O_RDONLY)
+ return -EPERM;
+
+ if (av7110->videostate.stream_source != VIDEO_SOURCE_MEMORY)
+ return -EPERM;
+
+ if (get_user(c, buf))
+ return -EFAULT;
+ if (c == 0x47 && count % TS_SIZE == 0)
+ return ts_play(av7110, buf, count, file->f_flags & O_NONBLOCK, 1);
+ else
+ return dvb_play(av7110, buf, count, file->f_flags & O_NONBLOCK, 1);
+}
+
+static __poll_t dvb_audio_poll(struct file *file, poll_table *wait)
+{
+ struct dvb_device *dvbdev = file->private_data;
+ struct av7110 *av7110 = dvbdev->priv;
+ __poll_t mask = 0;
+
+ dprintk(2, "av7110:%p\n", av7110);
+
+ poll_wait(file, &av7110->aout.queue, wait);
+
+ if (av7110->playing) {
+ if (dvb_ringbuffer_free(&av7110->aout) >= 20 * 1024)
+ mask |= (EPOLLOUT | EPOLLWRNORM);
+ } else {
+ /* if not playing: may play if asked for */
+ mask = (EPOLLOUT | EPOLLWRNORM);
+ }
+
+ return mask;
+}
+
+static ssize_t dvb_audio_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct dvb_device *dvbdev = file->private_data;
+ struct av7110 *av7110 = dvbdev->priv;
+ unsigned char c;
+
+ dprintk(2, "av7110:%p\n", av7110);
+
+ if (av7110->audiostate.stream_source != AUDIO_SOURCE_MEMORY) {
+ pr_err("not audio source memory\n");
+ return -EPERM;
+ }
+
+ if (get_user(c, buf))
+ return -EFAULT;
+ if (c == 0x47 && count % TS_SIZE == 0)
+ return ts_play(av7110, buf, count, file->f_flags & O_NONBLOCK, 0);
+ else
+ return dvb_aplay(av7110, buf, count, file->f_flags & O_NONBLOCK, 0);
+}
+
+static u8 iframe_header[] = { 0x00, 0x00, 0x01, 0xe0, 0x00, 0x00, 0x80, 0x00, 0x00 };
+
+#define MIN_IFRAME 400000
+
+static int play_iframe(struct av7110 *av7110, char __user *buf, unsigned int len, int nonblock)
+{
+ unsigned int i, n;
+ int progressive = 0;
+ int match = 0;
+
+ dprintk(2, "av7110:%p\n", av7110);
+
+ if (len == 0)
+ return 0;
+
+ if (!(av7110->playing & RP_VIDEO)) {
+ if (av7110_av_start_play(av7110, RP_VIDEO) < 0)
+ return -EBUSY;
+ }
+
+ /* search in buf for instances of 00 00 01 b5 1? */
+ for (i = 0; i < len; i++) {
+ unsigned char c;
+
+ if (get_user(c, buf + i))
+ return -EFAULT;
+ if (match == 5) {
+ progressive = c & 0x08;
+ match = 0;
+ }
+ if (c == 0x00) {
+ match = (match == 1 || match == 2) ? 2 : 1;
+ continue;
+ }
+ switch (match++) {
+ case 2:
+ if (c == 0x01)
+ continue;
+ break;
+ case 3:
+ if (c == 0xb5)
+ continue;
+ break;
+ case 4:
+ if ((c & 0xf0) == 0x10)
+ continue;
+ break;
+ }
+ match = 0;
+ }
+
+ /* setting n always > 1, fixes problems when playing stillframes
+ * consisting of I- and P-Frames
+ */
+ n = MIN_IFRAME / len + 1;
+
+ /* FIXME: nonblock? */
+ dvb_play_kernel(av7110, iframe_header, sizeof(iframe_header), 0, 1);
+
+ for (i = 0; i < n; i++)
+ dvb_play(av7110, buf, len, 0, 1);
+
+ av7110_ipack_flush(&av7110->ipack[1]);
+
+ if (progressive)
+ return vidcom(av7110, AV_VIDEO_CMD_FREEZE, 1);
+ else
+ return 0;
+}
+
+#ifdef CONFIG_COMPAT
+struct compat_video_still_picture {
+ compat_uptr_t iFrame;
+ s32 size;
+};
+
+#define VIDEO_STILLPICTURE32 _IOW('o', 30, struct compat_video_still_picture)
+
+struct compat_video_event {
+ __s32 type;
+ /* unused, make sure to use atomic time for y2038 if it ever gets used */
+ compat_long_t timestamp;
+ union {
+ video_size_t size;
+ unsigned int frame_rate; /* in frames per 1000sec */
+ unsigned char vsync_field; /* unknown/odd/even/progressive */
+ } u;
+};
+
+#define VIDEO_GET_EVENT32 _IOR('o', 28, struct compat_video_event)
+
+static int dvb_compat_video_get_event(struct av7110 *av7110,
+ struct compat_video_event *event, int flags)
+{
+ struct video_event ev;
+ int ret;
+
+ ret = dvb_video_get_event(av7110, &ev, flags);
+
+ *event = (struct compat_video_event) {
+ .type = ev.type,
+ .timestamp = ev.timestamp,
+ .u.size = ev.u.size,
+ };
+
+ return ret;
+}
+#endif
+
+static int dvb_video_ioctl(struct file *file,
+ unsigned int cmd, void *parg)
+{
+ struct dvb_device *dvbdev = file->private_data;
+ struct av7110 *av7110 = dvbdev->priv;
+ unsigned long arg = (unsigned long)parg;
+ int ret = 0;
+
+ dprintk(1, "av7110:%p, cmd=%04x\n", av7110, cmd);
+
+ if ((file->f_flags & O_ACCMODE) == O_RDONLY) {
+ if (cmd != VIDEO_GET_STATUS && cmd != VIDEO_GET_EVENT &&
+ cmd != VIDEO_GET_SIZE) {
+ return -EPERM;
+ }
+ }
+
+ if (mutex_lock_interruptible(&av7110->ioctl_mutex))
+ return -ERESTARTSYS;
+
+ switch (cmd) {
+ case VIDEO_STOP:
+ av7110->videostate.play_state = VIDEO_STOPPED;
+ if (av7110->videostate.stream_source == VIDEO_SOURCE_MEMORY)
+ ret = av7110_av_stop(av7110, RP_VIDEO);
+ else
+ ret = vidcom(av7110, AV_VIDEO_CMD_STOP,
+ av7110->videostate.video_blank ? 0 : 1);
+ if (!ret)
+ av7110->trickmode = TRICK_NONE;
+ break;
+
+ case VIDEO_PLAY:
+ av7110->trickmode = TRICK_NONE;
+ if (av7110->videostate.play_state == VIDEO_FREEZED) {
+ av7110->videostate.play_state = VIDEO_PLAYING;
+ ret = vidcom(av7110, AV_VIDEO_CMD_PLAY, 0);
+ if (ret)
+ break;
+ }
+ if (av7110->videostate.stream_source == VIDEO_SOURCE_MEMORY) {
+ if (av7110->playing == RP_AV) {
+ ret = av7110_fw_cmd(av7110, COMTYPE_REC_PLAY, __Stop, 0);
+ if (ret)
+ break;
+ av7110->playing &= ~RP_VIDEO;
+ }
+ ret = av7110_av_start_play(av7110, RP_VIDEO);
+ }
+ if (!ret)
+ ret = vidcom(av7110, AV_VIDEO_CMD_PLAY, 0);
+ if (!ret)
+ av7110->videostate.play_state = VIDEO_PLAYING;
+ break;
+
+ case VIDEO_FREEZE:
+ av7110->videostate.play_state = VIDEO_FREEZED;
+ if (av7110->playing & RP_VIDEO)
+ ret = av7110_fw_cmd(av7110, COMTYPE_REC_PLAY, __Pause, 0);
+ else
+ ret = vidcom(av7110, AV_VIDEO_CMD_FREEZE, 1);
+ if (!ret)
+ av7110->trickmode = TRICK_FREEZE;
+ break;
+
+ case VIDEO_CONTINUE:
+ if (av7110->playing & RP_VIDEO)
+ ret = av7110_fw_cmd(av7110, COMTYPE_REC_PLAY, __Continue, 0);
+ if (!ret)
+ ret = vidcom(av7110, AV_VIDEO_CMD_PLAY, 0);
+ if (!ret) {
+ av7110->videostate.play_state = VIDEO_PLAYING;
+ av7110->trickmode = TRICK_NONE;
+ }
+ break;
+
+ case VIDEO_SELECT_SOURCE:
+ av7110->videostate.stream_source = (video_stream_source_t)arg;
+ break;
+
+ case VIDEO_SET_BLANK:
+ av7110->videostate.video_blank = (int)arg;
+ break;
+
+ case VIDEO_GET_STATUS:
+ memcpy(parg, &av7110->videostate, sizeof(struct video_status));
+ break;
+
+#ifdef CONFIG_COMPAT
+ case VIDEO_GET_EVENT32:
+ ret = dvb_compat_video_get_event(av7110, parg, file->f_flags);
+ break;
+#endif
+
+ case VIDEO_GET_EVENT:
+ ret = dvb_video_get_event(av7110, parg, file->f_flags);
+ break;
+
+ case VIDEO_GET_SIZE:
+ memcpy(parg, &av7110->video_size, sizeof(video_size_t));
+ break;
+
+ case VIDEO_SET_DISPLAY_FORMAT:
+ {
+ video_displayformat_t format = (video_displayformat_t)arg;
+
+ switch (format) {
+ case VIDEO_PAN_SCAN:
+ av7110->display_panscan = VID_PAN_SCAN_PREF;
+ break;
+ case VIDEO_LETTER_BOX:
+ av7110->display_panscan = VID_VC_AND_PS_PREF;
+ break;
+ case VIDEO_CENTER_CUT_OUT:
+ av7110->display_panscan = VID_CENTRE_CUT_PREF;
+ break;
+ default:
+ ret = -EINVAL;
+ }
+ if (ret < 0)
+ break;
+ av7110->videostate.display_format = format;
+ ret = av7110_fw_cmd(av7110, COMTYPE_ENCODER, SetPanScanType,
+ 1, av7110->display_panscan);
+ break;
+ }
+
+ case VIDEO_SET_FORMAT:
+ if (arg > 1) {
+ ret = -EINVAL;
+ break;
+ }
+ av7110->display_ar = arg;
+ ret = av7110_fw_cmd(av7110, COMTYPE_ENCODER, SetMonitorType,
+ 1, (u16)arg);
+ break;
+
+#ifdef CONFIG_COMPAT
+ case VIDEO_STILLPICTURE32:
+ {
+ struct compat_video_still_picture *pic =
+ (struct compat_video_still_picture *)parg;
+ av7110->videostate.stream_source = VIDEO_SOURCE_MEMORY;
+ dvb_ringbuffer_flush_spinlock_wakeup(&av7110->avout);
+ ret = play_iframe(av7110, compat_ptr(pic->iFrame),
+ pic->size, file->f_flags & O_NONBLOCK);
+ break;
+ }
+#endif
+
+ case VIDEO_STILLPICTURE:
+ {
+ struct video_still_picture *pic =
+ (struct video_still_picture *)parg;
+ av7110->videostate.stream_source = VIDEO_SOURCE_MEMORY;
+ dvb_ringbuffer_flush_spinlock_wakeup(&av7110->avout);
+ ret = play_iframe(av7110, pic->iFrame, pic->size,
+ file->f_flags & O_NONBLOCK);
+ break;
+ }
+
+ case VIDEO_FAST_FORWARD:
+ //note: arg is ignored by firmware
+ if (av7110->playing & RP_VIDEO)
+ ret = av7110_fw_cmd(av7110, COMTYPE_REC_PLAY,
+ __Scan_I, 2, AV_PES, 0);
+ else
+ ret = vidcom(av7110, AV_VIDEO_CMD_FFWD, arg);
+ if (!ret) {
+ av7110->trickmode = TRICK_FAST;
+ av7110->videostate.play_state = VIDEO_PLAYING;
+ }
+ break;
+
+ case VIDEO_SLOWMOTION:
+ if (av7110->playing & RP_VIDEO) {
+ if (av7110->trickmode != TRICK_SLOW)
+ ret = av7110_fw_cmd(av7110, COMTYPE_REC_PLAY, __Slow, 2, 0, 0);
+ if (!ret)
+ ret = vidcom(av7110, AV_VIDEO_CMD_SLOW, arg);
+ } else {
+ ret = vidcom(av7110, AV_VIDEO_CMD_PLAY, 0);
+ if (!ret)
+ ret = vidcom(av7110, AV_VIDEO_CMD_STOP, 0);
+ if (!ret)
+ ret = vidcom(av7110, AV_VIDEO_CMD_SLOW, arg);
+ }
+ if (!ret) {
+ av7110->trickmode = TRICK_SLOW;
+ av7110->videostate.play_state = VIDEO_PLAYING;
+ }
+ break;
+
+ case VIDEO_GET_CAPABILITIES:
+ *(int *)parg = VIDEO_CAP_MPEG1 | VIDEO_CAP_MPEG2 |
+ VIDEO_CAP_SYS | VIDEO_CAP_PROG;
+ break;
+
+ case VIDEO_CLEAR_BUFFER:
+ dvb_ringbuffer_flush_spinlock_wakeup(&av7110->avout);
+ av7110_ipack_reset(&av7110->ipack[1]);
+ if (av7110->playing == RP_AV) {
+ ret = av7110_fw_cmd(av7110, COMTYPE_REC_PLAY,
+ __Play, 2, AV_PES, 0);
+ if (ret)
+ break;
+ if (av7110->trickmode == TRICK_FAST)
+ ret = av7110_fw_cmd(av7110, COMTYPE_REC_PLAY,
+ __Scan_I, 2, AV_PES, 0);
+ if (av7110->trickmode == TRICK_SLOW) {
+ ret = av7110_fw_cmd(av7110, COMTYPE_REC_PLAY,
+ __Slow, 2, 0, 0);
+ if (!ret)
+ ret = vidcom(av7110, AV_VIDEO_CMD_SLOW, arg);
+ }
+ if (av7110->trickmode == TRICK_FREEZE)
+ ret = vidcom(av7110, AV_VIDEO_CMD_STOP, 1);
+ }
+ break;
+
+ case VIDEO_SET_STREAMTYPE:
+ break;
+
+ default:
+ ret = -ENOIOCTLCMD;
+ break;
+ }
+
+ mutex_unlock(&av7110->ioctl_mutex);
+ return ret;
+}
+
+static int dvb_audio_ioctl(struct file *file,
+ unsigned int cmd, void *parg)
+{
+ struct dvb_device *dvbdev = file->private_data;
+ struct av7110 *av7110 = dvbdev->priv;
+ unsigned long arg = (unsigned long)parg;
+ int ret = 0;
+
+ dprintk(1, "av7110:%p, cmd=%04x\n", av7110, cmd);
+
+ if (((file->f_flags & O_ACCMODE) == O_RDONLY) &&
+ (cmd != AUDIO_GET_STATUS))
+ return -EPERM;
+
+ if (mutex_lock_interruptible(&av7110->ioctl_mutex))
+ return -ERESTARTSYS;
+
+ switch (cmd) {
+ case AUDIO_STOP:
+ if (av7110->audiostate.stream_source == AUDIO_SOURCE_MEMORY)
+ ret = av7110_av_stop(av7110, RP_AUDIO);
+ else
+ ret = audcom(av7110, AUDIO_CMD_MUTE);
+ if (!ret)
+ av7110->audiostate.play_state = AUDIO_STOPPED;
+ break;
+
+ case AUDIO_PLAY:
+ if (av7110->audiostate.stream_source == AUDIO_SOURCE_MEMORY)
+ ret = av7110_av_start_play(av7110, RP_AUDIO);
+ if (!ret)
+ ret = audcom(av7110, AUDIO_CMD_UNMUTE);
+ if (!ret)
+ av7110->audiostate.play_state = AUDIO_PLAYING;
+ break;
+
+ case AUDIO_PAUSE:
+ ret = audcom(av7110, AUDIO_CMD_MUTE);
+ if (!ret)
+ av7110->audiostate.play_state = AUDIO_PAUSED;
+ break;
+
+ case AUDIO_CONTINUE:
+ if (av7110->audiostate.play_state == AUDIO_PAUSED) {
+ av7110->audiostate.play_state = AUDIO_PLAYING;
+ ret = audcom(av7110, AUDIO_CMD_UNMUTE | AUDIO_CMD_PCM16);
+ }
+ break;
+
+ case AUDIO_SELECT_SOURCE:
+ av7110->audiostate.stream_source = (audio_stream_source_t)arg;
+ break;
+
+ case AUDIO_SET_MUTE:
+ {
+ ret = audcom(av7110, arg ? AUDIO_CMD_MUTE : AUDIO_CMD_UNMUTE);
+ if (!ret)
+ av7110->audiostate.mute_state = (int)arg;
+ break;
+ }
+
+ case AUDIO_SET_AV_SYNC:
+ av7110->audiostate.AV_sync_state = (int)arg;
+ ret = audcom(av7110, arg ? AUDIO_CMD_SYNC_ON : AUDIO_CMD_SYNC_OFF);
+ break;
+
+ case AUDIO_SET_BYPASS_MODE:
+ if (FW_VERSION(av7110->arm_app) < 0x2621)
+ ret = -EINVAL;
+ av7110->audiostate.bypass_mode = (int)arg;
+ break;
+
+ case AUDIO_CHANNEL_SELECT:
+ av7110->audiostate.channel_select = (audio_channel_select_t)arg;
+ switch (av7110->audiostate.channel_select) {
+ case AUDIO_STEREO:
+ ret = audcom(av7110, AUDIO_CMD_STEREO);
+ if (!ret) {
+ if (av7110->adac_type == DVB_ADAC_CRYSTAL)
+ i2c_writereg(av7110, 0x20, 0x02, 0x49);
+ else if (av7110->adac_type == DVB_ADAC_MSP34x5)
+ msp_writereg(av7110, MSP_WR_DSP, 0x0008, 0x0220);
+ }
+ break;
+ case AUDIO_MONO_LEFT:
+ ret = audcom(av7110, AUDIO_CMD_MONO_L);
+ if (!ret) {
+ if (av7110->adac_type == DVB_ADAC_CRYSTAL)
+ i2c_writereg(av7110, 0x20, 0x02, 0x4a);
+ else if (av7110->adac_type == DVB_ADAC_MSP34x5)
+ msp_writereg(av7110, MSP_WR_DSP, 0x0008, 0x0200);
+ }
+ break;
+ case AUDIO_MONO_RIGHT:
+ ret = audcom(av7110, AUDIO_CMD_MONO_R);
+ if (!ret) {
+ if (av7110->adac_type == DVB_ADAC_CRYSTAL)
+ i2c_writereg(av7110, 0x20, 0x02, 0x45);
+ else if (av7110->adac_type == DVB_ADAC_MSP34x5)
+ msp_writereg(av7110, MSP_WR_DSP, 0x0008, 0x0210);
+ }
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+ break;
+
+ case AUDIO_GET_STATUS:
+ memcpy(parg, &av7110->audiostate, sizeof(struct audio_status));
+ break;
+
+ case AUDIO_GET_CAPABILITIES:
+ if (FW_VERSION(av7110->arm_app) < 0x2621)
+ *(unsigned int *)parg = AUDIO_CAP_LPCM | AUDIO_CAP_MP1 | AUDIO_CAP_MP2;
+ else
+ *(unsigned int *)parg = AUDIO_CAP_LPCM | AUDIO_CAP_DTS | AUDIO_CAP_AC3 |
+ AUDIO_CAP_MP1 | AUDIO_CAP_MP2;
+ break;
+
+ case AUDIO_CLEAR_BUFFER:
+ dvb_ringbuffer_flush_spinlock_wakeup(&av7110->aout);
+ av7110_ipack_reset(&av7110->ipack[0]);
+ if (av7110->playing == RP_AV)
+ ret = av7110_fw_cmd(av7110, COMTYPE_REC_PLAY,
+ __Play, 2, AV_PES, 0);
+ break;
+
+ case AUDIO_SET_ID:
+ break;
+
+ case AUDIO_SET_MIXER:
+ {
+ struct audio_mixer *amix = (struct audio_mixer *)parg;
+
+ ret = av7110_set_volume(av7110, amix->volume_left, amix->volume_right);
+ break;
+ }
+
+ case AUDIO_SET_STREAMTYPE:
+ break;
+
+ default:
+ ret = -ENOIOCTLCMD;
+ }
+
+ mutex_unlock(&av7110->ioctl_mutex);
+ return ret;
+}
+
+static int dvb_video_open(struct inode *inode, struct file *file)
+{
+ struct dvb_device *dvbdev = file->private_data;
+ struct av7110 *av7110 = dvbdev->priv;
+ int err;
+
+ dprintk(2, "av7110:%p\n", av7110);
+
+ err = dvb_generic_open(inode, file);
+ if (err < 0)
+ return err;
+
+ if ((file->f_flags & O_ACCMODE) != O_RDONLY) {
+ dvb_ringbuffer_flush_spinlock_wakeup(&av7110->aout);
+ dvb_ringbuffer_flush_spinlock_wakeup(&av7110->avout);
+ av7110->video_blank = 1;
+ av7110->audiostate.AV_sync_state = 1;
+ av7110->videostate.stream_source = VIDEO_SOURCE_DEMUX;
+
+ /* empty event queue */
+ av7110->video_events.eventr = 0;
+ av7110->video_events.eventw = 0;
+ }
+
+ return 0;
+}
+
+static int dvb_video_release(struct inode *inode, struct file *file)
+{
+ struct dvb_device *dvbdev = file->private_data;
+ struct av7110 *av7110 = dvbdev->priv;
+
+ dprintk(2, "av7110:%p\n", av7110);
+
+ if ((file->f_flags & O_ACCMODE) != O_RDONLY)
+ av7110_av_stop(av7110, RP_VIDEO);
+
+ return dvb_generic_release(inode, file);
+}
+
+static int dvb_audio_open(struct inode *inode, struct file *file)
+{
+ struct dvb_device *dvbdev = file->private_data;
+ struct av7110 *av7110 = dvbdev->priv;
+ int err = dvb_generic_open(inode, file);
+
+ dprintk(2, "av7110:%p\n", av7110);
+
+ if (err < 0)
+ return err;
+ dvb_ringbuffer_flush_spinlock_wakeup(&av7110->aout);
+ av7110->audiostate.stream_source = AUDIO_SOURCE_DEMUX;
+ return 0;
+}
+
+static int dvb_audio_release(struct inode *inode, struct file *file)
+{
+ struct dvb_device *dvbdev = file->private_data;
+ struct av7110 *av7110 = dvbdev->priv;
+
+ dprintk(2, "av7110:%p\n", av7110);
+
+ av7110_av_stop(av7110, RP_AUDIO);
+ return dvb_generic_release(inode, file);
+}
+
+/******************************************************************************
+ * driver registration
+ ******************************************************************************/
+
+static const struct file_operations dvb_video_fops = {
+ .owner = THIS_MODULE,
+ .write = dvb_video_write,
+ .unlocked_ioctl = dvb_generic_ioctl,
+ .compat_ioctl = dvb_generic_ioctl,
+ .open = dvb_video_open,
+ .release = dvb_video_release,
+ .poll = dvb_video_poll,
+ .llseek = noop_llseek,
+};
+
+static struct dvb_device dvbdev_video = {
+ .priv = NULL,
+ .users = 6,
+ .readers = 5, /* arbitrary */
+ .writers = 1,
+ .fops = &dvb_video_fops,
+ .kernel_ioctl = dvb_video_ioctl,
+};
+
+static const struct file_operations dvb_audio_fops = {
+ .owner = THIS_MODULE,
+ .write = dvb_audio_write,
+ .unlocked_ioctl = dvb_generic_ioctl,
+ .compat_ioctl = dvb_generic_ioctl,
+ .open = dvb_audio_open,
+ .release = dvb_audio_release,
+ .poll = dvb_audio_poll,
+ .llseek = noop_llseek,
+};
+
+static struct dvb_device dvbdev_audio = {
+ .priv = NULL,
+ .users = 1,
+ .writers = 1,
+ .fops = &dvb_audio_fops,
+ .kernel_ioctl = dvb_audio_ioctl,
+};
+
+int av7110_av_register(struct av7110 *av7110)
+{
+ av7110->audiostate.AV_sync_state = 0;
+ av7110->audiostate.mute_state = 0;
+ av7110->audiostate.play_state = AUDIO_STOPPED;
+ av7110->audiostate.stream_source = AUDIO_SOURCE_DEMUX;
+ av7110->audiostate.channel_select = AUDIO_STEREO;
+ av7110->audiostate.bypass_mode = 0;
+
+ av7110->videostate.video_blank = 0;
+ av7110->videostate.play_state = VIDEO_STOPPED;
+ av7110->videostate.stream_source = VIDEO_SOURCE_DEMUX;
+ av7110->videostate.video_format = VIDEO_FORMAT_4_3;
+ av7110->videostate.display_format = VIDEO_LETTER_BOX;
+ av7110->display_ar = VIDEO_FORMAT_4_3;
+ av7110->display_panscan = VID_VC_AND_PS_PREF;
+
+ init_waitqueue_head(&av7110->video_events.wait_queue);
+ spin_lock_init(&av7110->video_events.lock);
+ av7110->video_events.eventw = 0;
+ av7110->video_events.eventr = 0;
+ av7110->video_events.overflow = 0;
+ memset(&av7110->video_size, 0, sizeof(video_size_t));
+
+ dvb_register_device(&av7110->dvb_adapter, &av7110->video_dev,
+ &dvbdev_video, av7110, DVB_DEVICE_VIDEO, 0);
+
+ dvb_register_device(&av7110->dvb_adapter, &av7110->audio_dev,
+ &dvbdev_audio, av7110, DVB_DEVICE_AUDIO, 0);
+
+ return 0;
+}
+
+void av7110_av_unregister(struct av7110 *av7110)
+{
+ dvb_unregister_device(av7110->audio_dev);
+ dvb_unregister_device(av7110->video_dev);
+}
+
+int av7110_av_init(struct av7110 *av7110)
+{
+ void (*play[])(u8 *, int, void *) = { play_audio_cb, play_video_cb };
+ int i, ret;
+
+ for (i = 0; i < 2; i++) {
+ struct ipack *ipack = av7110->ipack + i;
+
+ ret = av7110_ipack_init(ipack, IPACKS, play[i]);
+ if (ret < 0) {
+ if (i)
+ av7110_ipack_free(--ipack);
+ goto out;
+ }
+ ipack->data = av7110;
+ }
+
+ dvb_ringbuffer_init(&av7110->avout, av7110->iobuf, AVOUTLEN);
+ dvb_ringbuffer_init(&av7110->aout, av7110->iobuf + AVOUTLEN, AOUTLEN);
+
+ av7110->kbuf[0] = (u8 *)(av7110->iobuf + AVOUTLEN + AOUTLEN + BMPLEN);
+ av7110->kbuf[1] = av7110->kbuf[0] + 2 * IPACKS;
+out:
+ return ret;
+}
+
+void av7110_av_exit(struct av7110 *av7110)
+{
+ av7110_ipack_free(&av7110->ipack[0]);
+ av7110_ipack_free(&av7110->ipack[1]);
+}
diff --git a/drivers/staging/media/av7110/av7110_av.h b/drivers/staging/media/av7110/av7110_av.h
new file mode 100644
index 000000000000..eebaf59c7585
--- /dev/null
+++ b/drivers/staging/media/av7110/av7110_av.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _AV7110_AV_H_
+#define _AV7110_AV_H_
+
+struct av7110;
+
+int av7110_set_vidmode(struct av7110 *av7110,
+ enum av7110_video_mode mode);
+
+int av7110_record_cb(struct dvb_filter_pes2ts *p2t, u8 *buf, size_t len);
+int av7110_pes_play(void *dest, struct dvb_ringbuffer *buf, int dlen);
+int av7110_write_to_decoder(struct dvb_demux_feed *feed, const u8 *buf, size_t len);
+
+int av7110_set_volume(struct av7110 *av7110, unsigned int volleft,
+ unsigned int volright);
+int av7110_av_stop(struct av7110 *av7110, int av);
+int av7110_av_start_record(struct av7110 *av7110, int av,
+ struct dvb_demux_feed *dvbdmxfeed);
+int av7110_av_start_play(struct av7110 *av7110, int av);
+
+void dvb_video_add_event(struct av7110 *av7110, struct video_event *event);
+
+void av7110_p2t_init(struct av7110_p2t *p, struct dvb_demux_feed *feed);
+void av7110_p2t_write(u8 const *buf, long length, u16 pid, struct av7110_p2t *p);
+
+int av7110_av_register(struct av7110 *av7110);
+void av7110_av_unregister(struct av7110 *av7110);
+int av7110_av_init(struct av7110 *av7110);
+void av7110_av_exit(struct av7110 *av7110);
+
+#endif /* _AV7110_AV_H_ */
diff --git a/drivers/staging/media/av7110/av7110_ca.c b/drivers/staging/media/av7110/av7110_ca.c
new file mode 100644
index 000000000000..63d9c97a5190
--- /dev/null
+++ b/drivers/staging/media/av7110/av7110_ca.c
@@ -0,0 +1,386 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * driver for the SAA7146 based AV110 cards
+ * - CA and CI stuff
+ *
+ * Copyright (C) 1999-2002 Ralph Metzler
+ * & Marcus Metzler for convergence integrated media GmbH
+ *
+ * originally based on code by:
+ * Copyright (C) 1998,1999 Christian Theiss <mistert@rz.fh-augsburg.de>
+ *
+ * the project's page is at https://linuxtv.org
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/delay.h>
+#include <linux/fs.h>
+#include <linux/timer.h>
+#include <linux/poll.h>
+#include <linux/gfp.h>
+
+#include "av7110.h"
+#include "av7110_hw.h"
+#include "av7110_ca.h"
+
+void CI_handle(struct av7110 *av7110, u8 *data, u16 len)
+{
+ unsigned int slot_num;
+
+ dprintk(8, "av7110:%p\n", av7110);
+
+ if (len < 3)
+ return;
+ switch (data[0]) {
+ case CI_MSG_CI_INFO:
+ if (data[2] != 1 && data[2] != MAX_CI_SLOTS)
+ break;
+
+ slot_num = array_index_nospec(data[2] - 1, MAX_CI_SLOTS);
+
+ switch (data[1]) {
+ case 0:
+ av7110->ci_slot[slot_num].flags = 0;
+ break;
+ case 1:
+ av7110->ci_slot[slot_num].flags |= CA_CI_MODULE_PRESENT;
+ break;
+ case 2:
+ av7110->ci_slot[slot_num].flags |= CA_CI_MODULE_READY;
+ break;
+ }
+ break;
+ case CI_SWITCH_PRG_REPLY:
+ //av7110->ci_stat=data[1];
+ break;
+ default:
+ break;
+ }
+}
+
+void ci_get_data(struct dvb_ringbuffer *cibuf, u8 *data, int len)
+{
+ if (dvb_ringbuffer_free(cibuf) < len + 2)
+ return;
+
+ DVB_RINGBUFFER_WRITE_BYTE(cibuf, len >> 8);
+ DVB_RINGBUFFER_WRITE_BYTE(cibuf, len & 0xff);
+ dvb_ringbuffer_write(cibuf, data, len);
+ wake_up_interruptible(&cibuf->queue);
+}
+
+/******************************************************************************
+ * CI link layer file ops
+ ******************************************************************************/
+
+static int ci_ll_init(struct dvb_ringbuffer *cirbuf, struct dvb_ringbuffer *ciwbuf, int size)
+{
+ struct dvb_ringbuffer *tab[] = { cirbuf, ciwbuf, NULL }, **p;
+ void *data;
+
+ for (p = tab; *p; p++) {
+ data = vmalloc(size);
+ if (!data) {
+ while (p-- != tab) {
+ vfree(p[0]->data);
+ p[0]->data = NULL;
+ }
+ return -ENOMEM;
+ }
+ dvb_ringbuffer_init(*p, data, size);
+ }
+ return 0;
+}
+
+static void ci_ll_flush(struct dvb_ringbuffer *cirbuf, struct dvb_ringbuffer *ciwbuf)
+{
+ dvb_ringbuffer_flush_spinlock_wakeup(cirbuf);
+ dvb_ringbuffer_flush_spinlock_wakeup(ciwbuf);
+}
+
+static void ci_ll_release(struct dvb_ringbuffer *cirbuf, struct dvb_ringbuffer *ciwbuf)
+{
+ vfree(cirbuf->data);
+ cirbuf->data = NULL;
+ vfree(ciwbuf->data);
+ ciwbuf->data = NULL;
+}
+
+static int ci_ll_reset(struct dvb_ringbuffer *cibuf, struct file *file,
+ int slots, struct ca_slot_info *slot)
+{
+ int i;
+ int len = 0;
+ u8 msg[8] = { 0x00, 0x06, 0x00, 0x00, 0xff, 0x02, 0x00, 0x00 };
+
+ for (i = 0; i < 2; i++) {
+ if (slots & (1 << i))
+ len += 8;
+ }
+
+ if (dvb_ringbuffer_free(cibuf) < len)
+ return -EBUSY;
+
+ for (i = 0; i < 2; i++) {
+ if (slots & (1 << i)) {
+ msg[2] = i;
+ dvb_ringbuffer_write(cibuf, msg, 8);
+ slot[i].flags = 0;
+ }
+ }
+
+ return 0;
+}
+
+static ssize_t ci_ll_write(struct dvb_ringbuffer *cibuf, struct file *file,
+ const char __user *buf, size_t count, loff_t *ppos)
+{
+ int free;
+ int non_blocking = file->f_flags & O_NONBLOCK;
+ u8 *page = (u8 *)__get_free_page(GFP_USER);
+ int res;
+
+ if (!page)
+ return -ENOMEM;
+
+ res = -EINVAL;
+ if (count > 2048)
+ goto out;
+
+ res = -EFAULT;
+ if (copy_from_user(page, buf, count))
+ goto out;
+
+ free = dvb_ringbuffer_free(cibuf);
+ if (count + 2 > free) {
+ res = -EWOULDBLOCK;
+ if (non_blocking)
+ goto out;
+ res = -ERESTARTSYS;
+ if (wait_event_interruptible(cibuf->queue,
+ (dvb_ringbuffer_free(cibuf) >= count + 2)))
+ goto out;
+ }
+
+ DVB_RINGBUFFER_WRITE_BYTE(cibuf, count >> 8);
+ DVB_RINGBUFFER_WRITE_BYTE(cibuf, count & 0xff);
+
+ res = dvb_ringbuffer_write(cibuf, page, count);
+out:
+ free_page((unsigned long)page);
+ return res;
+}
+
+static ssize_t ci_ll_read(struct dvb_ringbuffer *cibuf, struct file *file,
+ char __user *buf, size_t count, loff_t *ppos)
+{
+ int avail;
+ int non_blocking = file->f_flags & O_NONBLOCK;
+ ssize_t len;
+
+ if (!cibuf->data || !count)
+ return 0;
+ if (non_blocking && (dvb_ringbuffer_empty(cibuf)))
+ return -EWOULDBLOCK;
+ if (wait_event_interruptible(cibuf->queue,
+ !dvb_ringbuffer_empty(cibuf)))
+ return -ERESTARTSYS;
+ avail = dvb_ringbuffer_avail(cibuf);
+ if (avail < 4)
+ return 0;
+ len = DVB_RINGBUFFER_PEEK(cibuf, 0) << 8;
+ len |= DVB_RINGBUFFER_PEEK(cibuf, 1);
+ if (avail < len + 2 || count < len)
+ return -EINVAL;
+ DVB_RINGBUFFER_SKIP(cibuf, 2);
+
+ return dvb_ringbuffer_read_user(cibuf, buf, len);
+}
+
+static int dvb_ca_open(struct inode *inode, struct file *file)
+{
+ struct dvb_device *dvbdev = file->private_data;
+ struct av7110 *av7110 = dvbdev->priv;
+ int err = dvb_generic_open(inode, file);
+
+ dprintk(8, "av7110:%p\n", av7110);
+
+ if (err < 0)
+ return err;
+ ci_ll_flush(&av7110->ci_rbuffer, &av7110->ci_wbuffer);
+ return 0;
+}
+
+static __poll_t dvb_ca_poll(struct file *file, poll_table *wait)
+{
+ struct dvb_device *dvbdev = file->private_data;
+ struct av7110 *av7110 = dvbdev->priv;
+ struct dvb_ringbuffer *rbuf = &av7110->ci_rbuffer;
+ struct dvb_ringbuffer *wbuf = &av7110->ci_wbuffer;
+ __poll_t mask = 0;
+
+ dprintk(8, "av7110:%p\n", av7110);
+
+ poll_wait(file, &rbuf->queue, wait);
+ poll_wait(file, &wbuf->queue, wait);
+
+ if (!dvb_ringbuffer_empty(rbuf))
+ mask |= (EPOLLIN | EPOLLRDNORM);
+
+ if (dvb_ringbuffer_free(wbuf) > 1024)
+ mask |= (EPOLLOUT | EPOLLWRNORM);
+
+ return mask;
+}
+
+static int dvb_ca_ioctl(struct file *file, unsigned int cmd, void *parg)
+{
+ struct dvb_device *dvbdev = file->private_data;
+ struct av7110 *av7110 = dvbdev->priv;
+ unsigned long arg = (unsigned long)parg;
+ int ret = 0;
+
+ dprintk(8, "av7110:%p\n", av7110);
+
+ if (mutex_lock_interruptible(&av7110->ioctl_mutex))
+ return -ERESTARTSYS;
+
+ switch (cmd) {
+ case CA_RESET:
+ ret = ci_ll_reset(&av7110->ci_wbuffer, file, arg,
+ &av7110->ci_slot[0]);
+ break;
+ case CA_GET_CAP:
+ {
+ struct ca_caps cap;
+
+ cap.slot_num = 2;
+ cap.slot_type = (FW_CI_LL_SUPPORT(av7110->arm_app) ?
+ CA_CI_LINK : CA_CI) | CA_DESCR;
+ cap.descr_num = 16;
+ cap.descr_type = CA_ECD;
+ memcpy(parg, &cap, sizeof(cap));
+ break;
+ }
+
+ case CA_GET_SLOT_INFO:
+ {
+ struct ca_slot_info *info = (struct ca_slot_info *)parg;
+ unsigned int slot_num;
+
+ if (info->num < 0 || info->num > 1) {
+ mutex_unlock(&av7110->ioctl_mutex);
+ return -EINVAL;
+ }
+ slot_num = array_index_nospec(info->num, MAX_CI_SLOTS);
+
+ av7110->ci_slot[slot_num].num = info->num;
+ av7110->ci_slot[slot_num].type = FW_CI_LL_SUPPORT(av7110->arm_app) ?
+ CA_CI_LINK : CA_CI;
+ memcpy(info, &av7110->ci_slot[slot_num],
+ sizeof(struct ca_slot_info));
+ break;
+ }
+
+ case CA_GET_MSG:
+ break;
+
+ case CA_SEND_MSG:
+ break;
+
+ case CA_GET_DESCR_INFO:
+ {
+ struct ca_descr_info info;
+
+ info.num = 16;
+ info.type = CA_ECD;
+ memcpy(parg, &info, sizeof(info));
+ break;
+ }
+
+ case CA_SET_DESCR:
+ {
+ struct ca_descr *descr = (struct ca_descr *)parg;
+
+ if (descr->index >= 16 || descr->parity > 1) {
+ mutex_unlock(&av7110->ioctl_mutex);
+ return -EINVAL;
+ }
+ av7110_fw_cmd(av7110, COMTYPE_PIDFILTER, SetDescr, 5,
+ (descr->index << 8) | descr->parity,
+ (descr->cw[0] << 8) | descr->cw[1],
+ (descr->cw[2] << 8) | descr->cw[3],
+ (descr->cw[4] << 8) | descr->cw[5],
+ (descr->cw[6] << 8) | descr->cw[7]);
+ break;
+ }
+
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ mutex_unlock(&av7110->ioctl_mutex);
+ return ret;
+}
+
+static ssize_t dvb_ca_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct dvb_device *dvbdev = file->private_data;
+ struct av7110 *av7110 = dvbdev->priv;
+
+ dprintk(8, "av7110:%p\n", av7110);
+ return ci_ll_write(&av7110->ci_wbuffer, file, buf, count, ppos);
+}
+
+static ssize_t dvb_ca_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct dvb_device *dvbdev = file->private_data;
+ struct av7110 *av7110 = dvbdev->priv;
+
+ dprintk(8, "av7110:%p\n", av7110);
+ return ci_ll_read(&av7110->ci_rbuffer, file, buf, count, ppos);
+}
+
+static const struct file_operations dvb_ca_fops = {
+ .owner = THIS_MODULE,
+ .read = dvb_ca_read,
+ .write = dvb_ca_write,
+ .unlocked_ioctl = dvb_generic_ioctl,
+ .open = dvb_ca_open,
+ .release = dvb_generic_release,
+ .poll = dvb_ca_poll,
+ .llseek = default_llseek,
+};
+
+static struct dvb_device dvbdev_ca = {
+ .priv = NULL,
+ .users = 1,
+ .writers = 1,
+ .fops = &dvb_ca_fops,
+ .kernel_ioctl = dvb_ca_ioctl,
+};
+
+int av7110_ca_register(struct av7110 *av7110)
+{
+ return dvb_register_device(&av7110->dvb_adapter, &av7110->ca_dev,
+ &dvbdev_ca, av7110, DVB_DEVICE_CA, 0);
+}
+
+void av7110_ca_unregister(struct av7110 *av7110)
+{
+ dvb_unregister_device(av7110->ca_dev);
+}
+
+int av7110_ca_init(struct av7110 *av7110)
+{
+ return ci_ll_init(&av7110->ci_rbuffer, &av7110->ci_wbuffer, 8192);
+}
+
+void av7110_ca_exit(struct av7110 *av7110)
+{
+ ci_ll_release(&av7110->ci_rbuffer, &av7110->ci_wbuffer);
+}
diff --git a/drivers/staging/media/av7110/av7110_ca.h b/drivers/staging/media/av7110/av7110_ca.h
new file mode 100644
index 000000000000..d3521944b97c
--- /dev/null
+++ b/drivers/staging/media/av7110/av7110_ca.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _AV7110_CA_H_
+#define _AV7110_CA_H_
+
+struct av7110;
+
+void CI_handle(struct av7110 *av7110, u8 *data, u16 len);
+void ci_get_data(struct dvb_ringbuffer *cibuf, u8 *data, int len);
+
+int av7110_ca_register(struct av7110 *av7110);
+void av7110_ca_unregister(struct av7110 *av7110);
+int av7110_ca_init(struct av7110 *av7110);
+void av7110_ca_exit(struct av7110 *av7110);
+
+#endif /* _AV7110_CA_H_ */
diff --git a/drivers/staging/media/av7110/av7110_hw.c b/drivers/staging/media/av7110/av7110_hw.c
new file mode 100644
index 000000000000..bf8e6dca40e5
--- /dev/null
+++ b/drivers/staging/media/av7110/av7110_hw.c
@@ -0,0 +1,1191 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * driver for the SAA7146 based AV110 cards
+ * - av7110 low level hardware access and firmware interface
+ *
+ * Copyright (C) 1999-2002 Ralph Metzler
+ * & Marcus Metzler for convergence integrated media GmbH
+ *
+ * originally based on code by:
+ * Copyright (C) 1998,1999 Christian Theiss <mistert@rz.fh-augsburg.de>
+ *
+ * the project's page is at https://linuxtv.org
+ */
+
+/* for debugging ARM communication: */
+//#define COM_DEBUG
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/delay.h>
+#include <linux/fs.h>
+
+#include "av7110.h"
+#include "av7110_hw.h"
+
+#define _NOHANDSHAKE
+
+/*
+ * Max transfer size done by av7110_fw_cmd()
+ *
+ * The maximum size passed to this function is 6 bytes. The buffer also
+ * uses two additional ones for type and size. So, 8 bytes is enough.
+ */
+#define MAX_XFER_SIZE 8
+
+/****************************************************************************
+ * DEBI functions
+ ****************************************************************************/
+
+/* This DEBI code is based on the Stradis driver
+ * by Nathan Laredo <laredo@gnu.org>
+ */
+
+int av7110_debiwrite(struct av7110 *av7110, u32 config,
+ int addr, u32 val, unsigned int count)
+{
+ struct saa7146_dev *dev = av7110->dev;
+
+ if (count > 32764) {
+ pr_err("%s(): invalid count %d\n", __func__, count);
+ return -1;
+ }
+ if (saa7146_wait_for_debi_done(av7110->dev, 0) < 0) {
+ pr_err("%s(): wait_for_debi_done failed\n", __func__);
+ return -1;
+ }
+ saa7146_write(dev, DEBI_CONFIG, config);
+ if (count <= 4) /* immediate transfer */
+ saa7146_write(dev, DEBI_AD, val);
+ else /* block transfer */
+ saa7146_write(dev, DEBI_AD, av7110->debi_bus);
+ saa7146_write(dev, DEBI_COMMAND, (count << 17) | (addr & 0xffff));
+ saa7146_write(dev, MC2, (2 << 16) | 2);
+ return 0;
+}
+
+u32 av7110_debiread(struct av7110 *av7110, u32 config, int addr, unsigned int count)
+{
+ struct saa7146_dev *dev = av7110->dev;
+ u32 result = 0;
+
+ if (count > 32764) {
+ pr_err("%s(): invalid count %d\n", __func__, count);
+ return 0;
+ }
+ if (saa7146_wait_for_debi_done(av7110->dev, 0) < 0) {
+ pr_err("%s(): wait_for_debi_done #1 failed\n", __func__);
+ return 0;
+ }
+ saa7146_write(dev, DEBI_AD, av7110->debi_bus);
+ saa7146_write(dev, DEBI_COMMAND, (count << 17) | 0x10000 | (addr & 0xffff));
+
+ saa7146_write(dev, DEBI_CONFIG, config);
+ saa7146_write(dev, MC2, (2 << 16) | 2);
+ if (count > 4)
+ return count;
+ if (saa7146_wait_for_debi_done(av7110->dev, 0) < 0) {
+ pr_err("%s(): wait_for_debi_done #2 failed\n", __func__);
+ return 0;
+ }
+
+ result = saa7146_read(dev, DEBI_AD);
+ result &= (0xffffffffUL >> ((4 - count) * 8));
+ return result;
+}
+
+/* av7110 ARM core boot stuff */
+#if 0
+void av7110_reset_arm(struct av7110 *av7110)
+{
+ saa7146_setgpio(av7110->dev, RESET_LINE, SAA7146_GPIO_OUTLO);
+
+ /* Disable DEBI and GPIO irq */
+ SAA7146_IER_DISABLE(av7110->dev, MASK_19 | MASK_03);
+ SAA7146_ISR_CLEAR(av7110->dev, MASK_19 | MASK_03);
+
+ saa7146_setgpio(av7110->dev, RESET_LINE, SAA7146_GPIO_OUTHI);
+ msleep(30); /* the firmware needs some time to initialize */
+
+ ARM_ResetMailBox(av7110);
+
+ SAA7146_ISR_CLEAR(av7110->dev, MASK_19 | MASK_03);
+ SAA7146_IER_ENABLE(av7110->dev, MASK_03);
+
+ av7110->arm_ready = 1;
+ dprintk(1, "reset ARM\n");
+}
+#endif /* 0 */
+
+static int waitdebi(struct av7110 *av7110, int adr, int state)
+{
+ int k;
+
+ dprintk(4, "%p\n", av7110);
+
+ for (k = 0; k < 100; k++) {
+ if (irdebi(av7110, DEBINOSWAP, adr, 0, 2) == state)
+ return 0;
+ udelay(5);
+ }
+ return -ETIMEDOUT;
+}
+
+static int load_dram(struct av7110 *av7110, u32 *data, int len)
+{
+ int i;
+ int blocks, rest;
+ u32 base, bootblock = AV7110_BOOT_BLOCK;
+
+ dprintk(4, "%p\n", av7110);
+
+ blocks = len / AV7110_BOOT_MAX_SIZE;
+ rest = len % AV7110_BOOT_MAX_SIZE;
+ base = DRAM_START_CODE;
+
+ for (i = 0; i < blocks; i++) {
+ if (waitdebi(av7110, AV7110_BOOT_STATE, BOOTSTATE_BUFFER_EMPTY) < 0) {
+ pr_err("%s(): timeout at block %d\n", __func__, i);
+ return -ETIMEDOUT;
+ }
+ dprintk(4, "writing DRAM block %d\n", i);
+ mwdebi(av7110, DEBISWAB, bootblock,
+ ((u8 *)data) + i * AV7110_BOOT_MAX_SIZE, AV7110_BOOT_MAX_SIZE);
+ bootblock ^= 0x1400;
+ iwdebi(av7110, DEBISWAB, AV7110_BOOT_BASE, swab32(base), 4);
+ iwdebi(av7110, DEBINOSWAP, AV7110_BOOT_SIZE, AV7110_BOOT_MAX_SIZE, 2);
+ iwdebi(av7110, DEBINOSWAP, AV7110_BOOT_STATE, BOOTSTATE_BUFFER_FULL, 2);
+ base += AV7110_BOOT_MAX_SIZE;
+ }
+
+ if (rest > 0) {
+ if (waitdebi(av7110, AV7110_BOOT_STATE, BOOTSTATE_BUFFER_EMPTY) < 0) {
+ pr_err("%s(): timeout at last block\n", __func__);
+ return -ETIMEDOUT;
+ }
+ if (rest > 4)
+ mwdebi(av7110, DEBISWAB, bootblock,
+ ((u8 *)data) + i * AV7110_BOOT_MAX_SIZE, rest);
+ else
+ mwdebi(av7110, DEBISWAB, bootblock,
+ ((u8 *)data) + i * AV7110_BOOT_MAX_SIZE - 4, rest + 4);
+
+ iwdebi(av7110, DEBISWAB, AV7110_BOOT_BASE, swab32(base), 4);
+ iwdebi(av7110, DEBINOSWAP, AV7110_BOOT_SIZE, rest, 2);
+ iwdebi(av7110, DEBINOSWAP, AV7110_BOOT_STATE, BOOTSTATE_BUFFER_FULL, 2);
+ }
+ if (waitdebi(av7110, AV7110_BOOT_STATE, BOOTSTATE_BUFFER_EMPTY) < 0) {
+ pr_err("%s(): timeout after last block\n", __func__);
+ return -ETIMEDOUT;
+ }
+ iwdebi(av7110, DEBINOSWAP, AV7110_BOOT_SIZE, 0, 2);
+ iwdebi(av7110, DEBINOSWAP, AV7110_BOOT_STATE, BOOTSTATE_BUFFER_FULL, 2);
+ if (waitdebi(av7110, AV7110_BOOT_STATE, BOOTSTATE_AV7110_BOOT_COMPLETE) < 0) {
+ pr_err("%s(): final handshake timeout\n", __func__);
+ return -ETIMEDOUT;
+ }
+ return 0;
+}
+
+/* we cannot write av7110 DRAM directly, so load a bootloader into
+ * the DPRAM which implements a simple boot protocol
+ */
+int av7110_bootarm(struct av7110 *av7110)
+{
+ const struct firmware *fw;
+ const char *fw_name = "av7110/bootcode.bin";
+ struct saa7146_dev *dev = av7110->dev;
+ u32 ret;
+ int i;
+
+ dprintk(4, "%p\n", av7110);
+
+ av7110->arm_ready = 0;
+
+ saa7146_setgpio(dev, RESET_LINE, SAA7146_GPIO_OUTLO);
+
+ /* Disable DEBI and GPIO irq */
+ SAA7146_IER_DISABLE(av7110->dev, MASK_03 | MASK_19);
+ SAA7146_ISR_CLEAR(av7110->dev, MASK_19 | MASK_03);
+
+ /* enable DEBI */
+ saa7146_write(av7110->dev, MC1, 0x08800880);
+ saa7146_write(av7110->dev, DD1_STREAM_B, 0x00000000);
+ saa7146_write(av7110->dev, MC2, (MASK_09 | MASK_25 | MASK_10 | MASK_26));
+
+ /* test DEBI */
+ iwdebi(av7110, DEBISWAP, DPRAM_BASE, 0x76543210, 4);
+ /* FIXME: Why does Nexus CA require 2x iwdebi for first init? */
+ iwdebi(av7110, DEBISWAP, DPRAM_BASE, 0x76543210, 4);
+
+ ret = irdebi(av7110, DEBINOSWAP, DPRAM_BASE, 0, 4);
+ if (ret != 0x10325476) {
+ pr_err("debi test in %s() failed: %08x != %08x (check your BIOS 'Plug&Play OS' settings)\n",
+ __func__, ret, 0x10325476);
+ return -1;
+ }
+ for (i = 0; i < 8192; i += 4)
+ iwdebi(av7110, DEBISWAP, DPRAM_BASE + i, 0x00, 4);
+ dprintk(2, "debi test OK\n");
+
+ /* boot */
+ dprintk(1, "load boot code\n");
+ saa7146_setgpio(dev, ARM_IRQ_LINE, SAA7146_GPIO_IRQLO);
+ //saa7146_setgpio(dev, DEBI_DONE_LINE, SAA7146_GPIO_INPUT);
+ //saa7146_setgpio(dev, 3, SAA7146_GPIO_INPUT);
+
+ ret = request_firmware(&fw, fw_name, &dev->pci->dev);
+ if (ret) {
+ pr_err("Failed to load firmware \"%s\"\n", fw_name);
+ return ret;
+ }
+
+ mwdebi(av7110, DEBISWAB, DPRAM_BASE, fw->data, fw->size);
+ release_firmware(fw);
+ iwdebi(av7110, DEBINOSWAP, AV7110_BOOT_STATE, BOOTSTATE_BUFFER_FULL, 2);
+
+ if (saa7146_wait_for_debi_done(av7110->dev, 1)) {
+ pr_err("%s(): saa7146_wait_for_debi_done() timed out\n", __func__);
+ return -ETIMEDOUT;
+ }
+ saa7146_setgpio(dev, RESET_LINE, SAA7146_GPIO_OUTHI);
+ mdelay(1);
+
+ dprintk(1, "load dram code\n");
+ if (load_dram(av7110, (u32 *)av7110->bin_root, av7110->size_root) < 0) {
+ pr_err("%s(): load_dram() failed\n", __func__);
+ return -1;
+ }
+
+ saa7146_setgpio(dev, RESET_LINE, SAA7146_GPIO_OUTLO);
+ mdelay(1);
+
+ dprintk(1, "load dpram code\n");
+ mwdebi(av7110, DEBISWAB, DPRAM_BASE, av7110->bin_dpram, av7110->size_dpram);
+
+ if (saa7146_wait_for_debi_done(av7110->dev, 1)) {
+ pr_err("%s(): saa7146_wait_for_debi_done() timed out after loading DRAM\n", __func__);
+ return -ETIMEDOUT;
+ }
+ saa7146_setgpio(dev, RESET_LINE, SAA7146_GPIO_OUTHI);
+ msleep(30); /* the firmware needs some time to initialize */
+
+ //ARM_ClearIrq(av7110);
+ ARM_ResetMailBox(av7110);
+ SAA7146_ISR_CLEAR(av7110->dev, MASK_19 | MASK_03);
+ SAA7146_IER_ENABLE(av7110->dev, MASK_03);
+
+ av7110->arm_errors = 0;
+ av7110->arm_ready = 1;
+ return 0;
+}
+MODULE_FIRMWARE("av7110/bootcode.bin");
+
+/****************************************************************************
+ * DEBI command polling
+ ****************************************************************************/
+
+int av7110_wait_msgstate(struct av7110 *av7110, u16 flags)
+{
+ unsigned long start;
+ u32 stat;
+ int err;
+
+ if (FW_VERSION(av7110->arm_app) <= 0x261c) {
+ /* not supported by old firmware */
+ msleep(50);
+ return 0;
+ }
+
+ /* new firmware */
+ start = jiffies;
+ for (;;) {
+ err = time_after(jiffies, start + ARM_WAIT_FREE);
+ if (mutex_lock_interruptible(&av7110->dcomlock))
+ return -ERESTARTSYS;
+ stat = rdebi(av7110, DEBINOSWAP, MSGSTATE, 0, 2);
+ mutex_unlock(&av7110->dcomlock);
+ if ((stat & flags) == 0)
+ break;
+ if (err) {
+ pr_err("%s(): timeout waiting for MSGSTATE %04x\n", __func__, stat & flags);
+ return -ETIMEDOUT;
+ }
+ msleep(1);
+ }
+ return 0;
+}
+
+static int __av7110_send_fw_cmd(struct av7110 *av7110, u16 *buf, int length)
+{
+ int i;
+ unsigned long start;
+ char *type = NULL;
+ u16 flags[2] = {0, 0};
+ u32 stat;
+ int err;
+
+// dprintk(4, "%p\n", av7110);
+
+ if (!av7110->arm_ready) {
+ dprintk(1, "arm not ready.\n");
+ return -ENXIO;
+ }
+
+ start = jiffies;
+ while (1) {
+ err = time_after(jiffies, start + ARM_WAIT_FREE);
+ if (rdebi(av7110, DEBINOSWAP, COMMAND, 0, 2) == 0)
+ break;
+ if (err) {
+ pr_err("%s(): timeout waiting for COMMAND idle\n", __func__);
+ av7110->arm_errors++;
+ return -ETIMEDOUT;
+ }
+ msleep(1);
+ }
+
+ if (FW_VERSION(av7110->arm_app) <= 0x261f)
+ wdebi(av7110, DEBINOSWAP, COM_IF_LOCK, 0xffff, 2);
+
+#ifndef _NOHANDSHAKE
+ start = jiffies;
+ while (1) {
+ err = time_after(jiffies, start + ARM_WAIT_SHAKE);
+ if (rdebi(av7110, DEBINOSWAP, HANDSHAKE_REG, 0, 2) == 0)
+ break;
+ if (err) {
+ pr_err("%s(): timeout waiting for HANDSHAKE_REG\n", __func__);
+ return -ETIMEDOUT;
+ }
+ msleep(1);
+ }
+#endif
+
+ switch ((buf[0] >> 8) & 0xff) {
+ case COMTYPE_PIDFILTER:
+ case COMTYPE_ENCODER:
+ case COMTYPE_REC_PLAY:
+ case COMTYPE_MPEGDECODER:
+ type = "MSG";
+ flags[0] = GPMQOver;
+ flags[1] = GPMQFull;
+ break;
+ case COMTYPE_OSD:
+ type = "OSD";
+ flags[0] = OSDQOver;
+ flags[1] = OSDQFull;
+ break;
+ case COMTYPE_MISC:
+ if (FW_VERSION(av7110->arm_app) >= 0x261d) {
+ type = "MSG";
+ flags[0] = GPMQOver;
+ flags[1] = GPMQBusy;
+ }
+ break;
+ default:
+ break;
+ }
+
+ if (type) {
+ /* non-immediate COMMAND type */
+ start = jiffies;
+ for (;;) {
+ err = time_after(jiffies, start + ARM_WAIT_FREE);
+ stat = rdebi(av7110, DEBINOSWAP, MSGSTATE, 0, 2);
+ if (stat & flags[0]) {
+ pr_err("%s(): %s QUEUE overflow\n", __func__, type);
+ return -1;
+ }
+ if ((stat & flags[1]) == 0)
+ break;
+ if (err) {
+ pr_err("%s(): timeout waiting on busy %s QUEUE\n", __func__, type);
+ av7110->arm_errors++;
+ return -ETIMEDOUT;
+ }
+ msleep(1);
+ }
+ }
+
+ for (i = 2; i < length; i++)
+ wdebi(av7110, DEBINOSWAP, COMMAND + 2 * i, (u32)buf[i], 2);
+
+ if (length)
+ wdebi(av7110, DEBINOSWAP, COMMAND + 2, (u32)buf[1], 2);
+ else
+ wdebi(av7110, DEBINOSWAP, COMMAND + 2, 0, 2);
+
+ wdebi(av7110, DEBINOSWAP, COMMAND, (u32)buf[0], 2);
+
+ if (FW_VERSION(av7110->arm_app) <= 0x261f)
+ wdebi(av7110, DEBINOSWAP, COM_IF_LOCK, 0x0000, 2);
+
+#ifdef COM_DEBUG
+ start = jiffies;
+ while (1) {
+ err = time_after(jiffies, start + ARM_WAIT_FREE);
+ if (rdebi(av7110, DEBINOSWAP, COMMAND, 0, 2) == 0)
+ break;
+ if (err) {
+ pr_err("%s(): timeout waiting for COMMAND %d to complete\n",
+ __func__, (buf[0] >> 8) & 0xff);
+ return -ETIMEDOUT;
+ }
+ msleep(1);
+ }
+
+ stat = rdebi(av7110, DEBINOSWAP, MSGSTATE, 0, 2);
+ if (stat & GPMQOver) {
+ pr_err("%s(): GPMQOver\n", __func__);
+ return -ENOSPC;
+ } else if (stat & OSDQOver) {
+ pr_err("%s(): OSDQOver\n", __func__);
+ return -ENOSPC;
+ }
+#endif
+
+ return 0;
+}
+
+static int av7110_send_fw_cmd(struct av7110 *av7110, u16 *buf, int length)
+{
+ int ret;
+
+// dprintk(4, "%p\n", av7110);
+
+ if (!av7110->arm_ready) {
+ dprintk(1, "arm not ready.\n");
+ return -1;
+ }
+ if (mutex_lock_interruptible(&av7110->dcomlock))
+ return -ERESTARTSYS;
+
+ ret = __av7110_send_fw_cmd(av7110, buf, length);
+ mutex_unlock(&av7110->dcomlock);
+ if (ret && ret != -ERESTARTSYS)
+ pr_err("%s(): error %d\n", __func__, ret);
+ return ret;
+}
+
+int av7110_fw_cmd(struct av7110 *av7110, int type, int com, int num, ...)
+{
+ va_list args;
+ u16 buf[MAX_XFER_SIZE];
+ int i, ret;
+
+// dprintk(4, "%p\n", av7110);
+
+ if (2 + num > ARRAY_SIZE(buf)) {
+ pr_warn("%s(): len=%d is too big!\n", __func__, num);
+ return -EINVAL;
+ }
+
+ buf[0] = ((type << 8) | com);
+ buf[1] = num;
+
+ if (num) {
+ va_start(args, num);
+ for (i = 0; i < num; i++)
+ buf[i + 2] = va_arg(args, u32);
+ va_end(args);
+ }
+
+ ret = av7110_send_fw_cmd(av7110, buf, num + 2);
+ if (ret && ret != -ERESTARTSYS)
+ pr_err("%s(): error %d\n", __func__, ret);
+ return ret;
+}
+
+#if 0
+int av7110_send_ci_cmd(struct av7110 *av7110, u8 subcom, u8 *buf, u8 len)
+{
+ int i, ret;
+ u16 cmd[18] = { ((COMTYPE_COMMON_IF << 8) + subcom),
+ 16, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
+
+ dprintk(4, "%p\n", av7110);
+
+ for (i = 0; i < len && i < 32; i++) {
+ if (i % 2 == 0)
+ cmd[(i / 2) + 2] = (u16)(buf[i]) << 8;
+ else
+ cmd[(i / 2) + 2] |= buf[i];
+ }
+
+ ret = av7110_send_fw_cmd(av7110, cmd, 18);
+ if (ret && ret != -ERESTARTSYS)
+ pr_err("%s(): error %d\n", __func__, ret);
+ return ret;
+}
+#endif /* 0 */
+
+int av7110_fw_request(struct av7110 *av7110, u16 *request_buf,
+ int request_buf_len, u16 *reply_buf, int reply_buf_len)
+{
+ int err;
+ s16 i;
+ unsigned long start;
+#ifdef COM_DEBUG
+ u32 stat;
+#endif
+
+ dprintk(4, "%p\n", av7110);
+
+ if (!av7110->arm_ready) {
+ dprintk(1, "arm not ready.\n");
+ return -1;
+ }
+
+ if (mutex_lock_interruptible(&av7110->dcomlock))
+ return -ERESTARTSYS;
+
+ err = __av7110_send_fw_cmd(av7110, request_buf, request_buf_len);
+ if (err < 0) {
+ mutex_unlock(&av7110->dcomlock);
+ pr_err("%s(): error %d\n", __func__, err);
+ return err;
+ }
+
+ start = jiffies;
+ while (1) {
+ err = time_after(jiffies, start + ARM_WAIT_FREE);
+ if (rdebi(av7110, DEBINOSWAP, COMMAND, 0, 2) == 0)
+ break;
+ if (err) {
+ pr_err("%s(): timeout waiting for COMMAND to complete\n", __func__);
+ mutex_unlock(&av7110->dcomlock);
+ return -ETIMEDOUT;
+ }
+#ifdef _NOHANDSHAKE
+ msleep(1);
+#endif
+ }
+
+#ifndef _NOHANDSHAKE
+ start = jiffies;
+ while (1) {
+ err = time_after(jiffies, start + ARM_WAIT_SHAKE);
+ if (rdebi(av7110, DEBINOSWAP, HANDSHAKE_REG, 0, 2) == 0)
+ break;
+ if (err) {
+ pr_err("%s(): timeout waiting for HANDSHAKE_REG\n", __func__);
+ mutex_unlock(&av7110->dcomlock);
+ return -ETIMEDOUT;
+ }
+ msleep(1);
+ }
+#endif
+
+#ifdef COM_DEBUG
+ stat = rdebi(av7110, DEBINOSWAP, MSGSTATE, 0, 2);
+ if (stat & GPMQOver) {
+ pr_err("%s(): GPMQOver\n", __func__);
+ mutex_unlock(&av7110->dcomlock);
+ return -1;
+ } else if (stat & OSDQOver) {
+ pr_err("%s(): OSDQOver\n", __func__);
+ mutex_unlock(&av7110->dcomlock);
+ return -1;
+ }
+#endif
+
+ for (i = 0; i < reply_buf_len; i++)
+ reply_buf[i] = rdebi(av7110, DEBINOSWAP, COM_BUFF + 2 * i, 0, 2);
+
+ mutex_unlock(&av7110->dcomlock);
+ return 0;
+}
+
+static int av7110_fw_query(struct av7110 *av7110, u16 tag, u16 *buf, s16 length)
+{
+ int ret;
+
+ ret = av7110_fw_request(av7110, &tag, 0, buf, length);
+ if (ret)
+ pr_err("%s(): error %d\n", __func__, ret);
+ return ret;
+}
+
+/****************************************************************************
+ * Firmware commands
+ ****************************************************************************/
+
+/* get version of the firmware ROM, RTSL, video ucode and ARM application */
+int av7110_firmversion(struct av7110 *av7110)
+{
+ u16 buf[20];
+ u16 tag = ((COMTYPE_REQUEST << 8) + ReqVersion);
+
+ dprintk(4, "%p\n", av7110);
+
+ if (av7110_fw_query(av7110, tag, buf, 16)) {
+ pr_err("failed to boot firmware @ card %d\n", av7110->dvb_adapter.num);
+ return -EIO;
+ }
+
+ av7110->arm_fw = (buf[0] << 16) + buf[1];
+ av7110->arm_rtsl = (buf[2] << 16) + buf[3];
+ av7110->arm_vid = (buf[4] << 16) + buf[5];
+ av7110->arm_app = (buf[6] << 16) + buf[7];
+ av7110->avtype = (buf[8] << 16) + buf[9];
+
+ pr_info("info @ card %d: firm %08x, rtsl %08x, vid %08x, app %08x\n",
+ av7110->dvb_adapter.num, av7110->arm_fw,
+ av7110->arm_rtsl, av7110->arm_vid, av7110->arm_app);
+
+ /* print firmware capabilities */
+ if (FW_CI_LL_SUPPORT(av7110->arm_app))
+ pr_info("firmware @ card %d supports CI link layer interface\n",
+ av7110->dvb_adapter.num);
+ else
+ pr_info("no firmware support for CI link layer interface @ card %d\n",
+ av7110->dvb_adapter.num);
+
+ return 0;
+}
+
+int av7110_diseqc_send(struct av7110 *av7110, int len, u8 *msg, unsigned long burst)
+{
+ int i, ret;
+ u16 buf[18] = { ((COMTYPE_AUDIODAC << 8) + SendDiSEqC),
+ 16, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
+
+ dprintk(4, "%p\n", av7110);
+
+ if (len > 10)
+ len = 10;
+
+ buf[1] = len + 2;
+ buf[2] = len;
+
+ if (burst != -1)
+ buf[3] = burst ? 0x01 : 0x00;
+ else
+ buf[3] = 0xffff;
+
+ for (i = 0; i < len; i++)
+ buf[i + 4] = msg[i];
+
+ ret = av7110_send_fw_cmd(av7110, buf, 18);
+ if (ret && ret != -ERESTARTSYS)
+ pr_err("%s(): error %d\n", __func__, ret);
+ return ret;
+}
+
+#ifdef CONFIG_DVB_AV7110_OSD
+
+static inline int SetColorBlend(struct av7110 *av7110, u8 windownr)
+{
+ return av7110_fw_cmd(av7110, COMTYPE_OSD, SetCBlend, 1, windownr);
+}
+
+static inline int SetBlend_(struct av7110 *av7110, u8 windownr,
+ enum av7110_osd_palette_type colordepth, u16 index, u8 blending)
+{
+ return av7110_fw_cmd(av7110, COMTYPE_OSD, SetBlend, 4,
+ windownr, colordepth, index, blending);
+}
+
+static inline int SetColor_(struct av7110 *av7110, u8 windownr,
+ enum av7110_osd_palette_type colordepth, u16 index, u16 colorhi, u16 colorlo)
+{
+ return av7110_fw_cmd(av7110, COMTYPE_OSD, SetColor, 5,
+ windownr, colordepth, index, colorhi, colorlo);
+}
+
+static inline int SetFont(struct av7110 *av7110, u8 windownr, u8 fontsize,
+ u16 colorfg, u16 colorbg)
+{
+ return av7110_fw_cmd(av7110, COMTYPE_OSD, Set_Font, 4,
+ windownr, fontsize, colorfg, colorbg);
+}
+
+static int FlushText(struct av7110 *av7110)
+{
+ unsigned long start;
+ int err;
+
+ if (mutex_lock_interruptible(&av7110->dcomlock))
+ return -ERESTARTSYS;
+ start = jiffies;
+ while (1) {
+ err = time_after(jiffies, start + ARM_WAIT_OSD);
+ if (rdebi(av7110, DEBINOSWAP, BUFF1_BASE, 0, 2) == 0)
+ break;
+ if (err) {
+ pr_err("%s(): timeout waiting for BUFF1_BASE == 0\n", __func__);
+ mutex_unlock(&av7110->dcomlock);
+ return -ETIMEDOUT;
+ }
+ msleep(1);
+ }
+ mutex_unlock(&av7110->dcomlock);
+ return 0;
+}
+
+static int WriteText(struct av7110 *av7110, u8 win, u16 x, u16 y, char *buf)
+{
+ int i, ret;
+ unsigned long start;
+ int length = strlen(buf) + 1;
+ u16 cbuf[5] = { (COMTYPE_OSD << 8) + DText, 3, win, x, y };
+
+ if (mutex_lock_interruptible(&av7110->dcomlock))
+ return -ERESTARTSYS;
+
+ start = jiffies;
+ while (1) {
+ ret = time_after(jiffies, start + ARM_WAIT_OSD);
+ if (rdebi(av7110, DEBINOSWAP, BUFF1_BASE, 0, 2) == 0)
+ break;
+ if (ret) {
+ pr_err("%s(): timeout waiting for BUFF1_BASE == 0\n", __func__);
+ mutex_unlock(&av7110->dcomlock);
+ return -ETIMEDOUT;
+ }
+ msleep(1);
+ }
+#ifndef _NOHANDSHAKE
+ start = jiffies;
+ while (1) {
+ ret = time_after(jiffies, start + ARM_WAIT_SHAKE);
+ if (rdebi(av7110, DEBINOSWAP, HANDSHAKE_REG, 0, 2) == 0)
+ break;
+ if (ret) {
+ pr_err("%s(): timeout waiting for HANDSHAKE_REG\n", __func__);
+ mutex_unlock(&av7110->dcomlock);
+ return -ETIMEDOUT;
+ }
+ msleep(1);
+ }
+#endif
+ for (i = 0; i < length / 2; i++)
+ wdebi(av7110, DEBINOSWAP, BUFF1_BASE + i * 2,
+ swab16(*(u16 *)(buf + 2 * i)), 2);
+ if (length & 1)
+ wdebi(av7110, DEBINOSWAP, BUFF1_BASE + i * 2, 0, 2);
+ ret = __av7110_send_fw_cmd(av7110, cbuf, 5);
+ mutex_unlock(&av7110->dcomlock);
+ if (ret && ret != -ERESTARTSYS)
+ pr_err("%s(): error %d\n", __func__, ret);
+ return ret;
+}
+
+static inline int DrawLine(struct av7110 *av7110, u8 windownr,
+ u16 x, u16 y, u16 dx, u16 dy, u16 color)
+{
+ return av7110_fw_cmd(av7110, COMTYPE_OSD, DLine, 6,
+ windownr, x, y, dx, dy, color);
+}
+
+static inline int DrawBlock(struct av7110 *av7110, u8 windownr,
+ u16 x, u16 y, u16 dx, u16 dy, u16 color)
+{
+ return av7110_fw_cmd(av7110, COMTYPE_OSD, DBox, 6,
+ windownr, x, y, dx, dy, color);
+}
+
+static inline int HideWindow(struct av7110 *av7110, u8 windownr)
+{
+ return av7110_fw_cmd(av7110, COMTYPE_OSD, WHide, 1, windownr);
+}
+
+static inline int MoveWindowRel(struct av7110 *av7110, u8 windownr, u16 x, u16 y)
+{
+ return av7110_fw_cmd(av7110, COMTYPE_OSD, WMoveD, 3, windownr, x, y);
+}
+
+static inline int MoveWindowAbs(struct av7110 *av7110, u8 windownr, u16 x, u16 y)
+{
+ return av7110_fw_cmd(av7110, COMTYPE_OSD, WMoveA, 3, windownr, x, y);
+}
+
+static inline int DestroyOSDWindow(struct av7110 *av7110, u8 windownr)
+{
+ return av7110_fw_cmd(av7110, COMTYPE_OSD, WDestroy, 1, windownr);
+}
+
+static inline int CreateOSDWindow(struct av7110 *av7110, u8 windownr,
+ osd_raw_window_t disptype,
+ u16 width, u16 height)
+{
+ return av7110_fw_cmd(av7110, COMTYPE_OSD, WCreate, 4,
+ windownr, disptype, width, height);
+}
+
+static enum av7110_osd_palette_type bpp2pal[8] = {
+ Pal1Bit, Pal2Bit, 0, Pal4Bit, 0, 0, 0, Pal8Bit
+};
+
+static osd_raw_window_t bpp2bit[8] = {
+ OSD_BITMAP1, OSD_BITMAP2, 0, OSD_BITMAP4, 0, 0, 0, OSD_BITMAP8
+};
+
+static inline int WaitUntilBmpLoaded(struct av7110 *av7110)
+{
+ int ret = wait_event_timeout(av7110->bmpq,
+ av7110->bmp_state != BMP_LOADING, 10 * HZ);
+ if (ret == 0) {
+ pr_warn("warning: timeout waiting in LoadBitmap: %d, %d\n", ret, av7110->bmp_state);
+ av7110->bmp_state = BMP_NONE;
+ return -ETIMEDOUT;
+ }
+ return 0;
+}
+
+static inline int LoadBitmap(struct av7110 *av7110,
+ u16 dx, u16 dy, int inc, u8 __user *data)
+{
+ u16 format;
+ int bpp;
+ int i;
+ int d, delta;
+ u8 c;
+ int ret;
+
+ dprintk(4, "%p\n", av7110);
+
+ format = bpp2bit[av7110->osdbpp[av7110->osdwin]];
+
+ av7110->bmp_state = BMP_LOADING;
+ if (format == OSD_BITMAP8) {
+ bpp = 8; delta = 1;
+ } else if (format == OSD_BITMAP4) {
+ bpp = 4; delta = 2;
+ } else if (format == OSD_BITMAP2) {
+ bpp = 2; delta = 4;
+ } else if (format == OSD_BITMAP1) {
+ bpp = 1; delta = 8;
+ } else {
+ av7110->bmp_state = BMP_NONE;
+ return -EINVAL;
+ }
+ av7110->bmplen = ((dx * dy * bpp + 7) & ~7) / 8;
+ av7110->bmpp = 0;
+ if (av7110->bmplen > 32768) {
+ av7110->bmp_state = BMP_NONE;
+ return -EINVAL;
+ }
+ for (i = 0; i < dy; i++) {
+ if (copy_from_user(av7110->bmpbuf + 1024 + i * dx, data + i * inc, dx)) {
+ av7110->bmp_state = BMP_NONE;
+ return -EINVAL;
+ }
+ }
+ if (format != OSD_BITMAP8) {
+ for (i = 0; i < dx * dy / delta; i++) {
+ c = ((u8 *)av7110->bmpbuf)[1024 + i * delta + delta - 1];
+ for (d = delta - 2; d >= 0; d--) {
+ c |= (((u8 *)av7110->bmpbuf)[1024 + i * delta + d]
+ << ((delta - d - 1) * bpp));
+ ((u8 *)av7110->bmpbuf)[1024 + i] = c;
+ }
+ }
+ }
+ av7110->bmplen += 1024;
+ dprintk(4, "av7110_fw_cmd(): LoadBmp size %d\n", av7110->bmplen);
+ ret = av7110_fw_cmd(av7110, COMTYPE_OSD, LoadBmp, 3, format, dx, dy);
+ if (!ret)
+ ret = WaitUntilBmpLoaded(av7110);
+ return ret;
+}
+
+static int BlitBitmap(struct av7110 *av7110, u16 x, u16 y)
+{
+ dprintk(4, "%p\n", av7110);
+
+ return av7110_fw_cmd(av7110, COMTYPE_OSD, BlitBmp, 4, av7110->osdwin, x, y, 0);
+}
+
+static inline int ReleaseBitmap(struct av7110 *av7110)
+{
+ dprintk(4, "%p\n", av7110);
+
+ if (av7110->bmp_state != BMP_LOADED && FW_VERSION(av7110->arm_app) < 0x261e)
+ return -1;
+ if (av7110->bmp_state == BMP_LOADING)
+ dprintk(1, "%s called while BMP_LOADING\n", __func__);
+ av7110->bmp_state = BMP_NONE;
+ return av7110_fw_cmd(av7110, COMTYPE_OSD, ReleaseBmp, 0);
+}
+
+static u32 RGB2YUV(u16 R, u16 G, u16 B)
+{
+ u16 y, u, v;
+ u16 Y, Cr, Cb;
+
+ y = R * 77 + G * 150 + B * 29; /* Luma=0.299R+0.587G+0.114B 0..65535 */
+ u = 2048 + B * 8 - (y >> 5); /* Cr 0..4095 */
+ v = 2048 + R * 8 - (y >> 5); /* Cb 0..4095 */
+
+ Y = y / 256;
+ Cb = u / 16;
+ Cr = v / 16;
+
+ return Cr | (Cb << 16) | (Y << 8);
+}
+
+static int OSDSetColor(struct av7110 *av7110, u8 color, u8 r, u8 g, u8 b, u8 blend)
+{
+ int ret;
+
+ u16 ch, cl;
+ u32 yuv;
+
+ yuv = blend ? RGB2YUV(r, g, b) : 0;
+ cl = (yuv & 0xffff);
+ ch = ((yuv >> 16) & 0xffff);
+ ret = SetColor_(av7110, av7110->osdwin, bpp2pal[av7110->osdbpp[av7110->osdwin]],
+ color, ch, cl);
+ if (!ret)
+ ret = SetBlend_(av7110, av7110->osdwin, bpp2pal[av7110->osdbpp[av7110->osdwin]],
+ color, ((blend >> 4) & 0x0f));
+ return ret;
+}
+
+static int OSDSetPalette(struct av7110 *av7110, u32 __user *colors, u8 first, u8 last)
+{
+ int i;
+ int length = last - first + 1;
+
+ if (length * 4 > DATA_BUFF3_SIZE)
+ return -EINVAL;
+
+ for (i = 0; i < length; i++) {
+ u32 color, blend, yuv;
+
+ if (get_user(color, colors + i))
+ return -EFAULT;
+ blend = (color & 0xF0000000) >> 4;
+ yuv = blend ? RGB2YUV(color & 0xFF, (color >> 8) & 0xFF,
+ (color >> 16) & 0xFF) | blend : 0;
+ yuv = ((yuv & 0xFFFF0000) >> 16) | ((yuv & 0x0000FFFF) << 16);
+ wdebi(av7110, DEBINOSWAP, DATA_BUFF3_BASE + i * 4, yuv, 4);
+ }
+ return av7110_fw_cmd(av7110, COMTYPE_OSD, Set_Palette, 4,
+ av7110->osdwin,
+ bpp2pal[av7110->osdbpp[av7110->osdwin]],
+ first, last);
+}
+
+static int OSDSetBlock(struct av7110 *av7110, int x0, int y0,
+ int x1, int y1, int inc, u8 __user *data)
+{
+ uint w, h, bpp, bpl, size, lpb, bnum, brest;
+ int i;
+ int rc, release_rc;
+
+ w = x1 - x0 + 1;
+ h = y1 - y0 + 1;
+ if (inc <= 0)
+ inc = w;
+ if (w <= 0 || w > 720 || h <= 0 || h > 576)
+ return -EINVAL;
+ bpp = av7110->osdbpp[av7110->osdwin] + 1;
+ bpl = ((w * bpp + 7) & ~7) / 8;
+ size = h * bpl;
+ lpb = (32 * 1024) / bpl;
+ bnum = size / (lpb * bpl);
+ brest = size - bnum * lpb * bpl;
+
+ if (av7110->bmp_state == BMP_LOADING) {
+ /* possible if syscall is repeated by -ERESTARTSYS and if firmware cannot abort */
+ if (WARN_ON(FW_VERSION(av7110->arm_app) >= 0x261e))
+ return -EIO;
+ rc = WaitUntilBmpLoaded(av7110);
+ if (rc)
+ return rc;
+ /* just continue. This should work for all fw versions
+ * if bnum==1 && !brest && LoadBitmap was successful
+ */
+ }
+
+ rc = 0;
+ for (i = 0; i < bnum; i++) {
+ rc = LoadBitmap(av7110, w, lpb, inc, data);
+ if (rc)
+ break;
+ rc = BlitBitmap(av7110, x0, y0 + i * lpb);
+ if (rc)
+ break;
+ data += lpb * inc;
+ }
+ if (!rc && brest) {
+ rc = LoadBitmap(av7110, w, brest / bpl, inc, data);
+ if (!rc)
+ rc = BlitBitmap(av7110, x0, y0 + bnum * lpb);
+ }
+ release_rc = ReleaseBitmap(av7110);
+ if (!rc)
+ rc = release_rc;
+ if (rc)
+ dprintk(1, "returns %d\n", rc);
+ return rc;
+}
+
+int av7110_osd_cmd(struct av7110 *av7110, osd_cmd_t *dc)
+{
+ int ret;
+
+ if (mutex_lock_interruptible(&av7110->osd_mutex))
+ return -ERESTARTSYS;
+
+ switch (dc->cmd) {
+ case OSD_Close:
+ ret = DestroyOSDWindow(av7110, av7110->osdwin);
+ break;
+ case OSD_Open:
+ av7110->osdbpp[av7110->osdwin] = (dc->color - 1) & 7;
+ ret = CreateOSDWindow(av7110, av7110->osdwin,
+ bpp2bit[av7110->osdbpp[av7110->osdwin]],
+ dc->x1 - dc->x0 + 1, dc->y1 - dc->y0 + 1);
+ if (ret)
+ break;
+ if (!dc->data) {
+ ret = MoveWindowAbs(av7110, av7110->osdwin, dc->x0, dc->y0);
+ if (ret)
+ break;
+ ret = SetColorBlend(av7110, av7110->osdwin);
+ }
+ break;
+ case OSD_Show:
+ ret = MoveWindowRel(av7110, av7110->osdwin, 0, 0);
+ break;
+ case OSD_Hide:
+ ret = HideWindow(av7110, av7110->osdwin);
+ break;
+ case OSD_Clear:
+ ret = DrawBlock(av7110, av7110->osdwin, 0, 0, 720, 576, 0);
+ break;
+ case OSD_Fill:
+ ret = DrawBlock(av7110, av7110->osdwin, 0, 0, 720, 576, dc->color);
+ break;
+ case OSD_SetColor:
+ ret = OSDSetColor(av7110, dc->color, dc->x0, dc->y0, dc->x1, dc->y1);
+ break;
+ case OSD_SetPalette:
+ if (FW_VERSION(av7110->arm_app) >= 0x2618) {
+ ret = OSDSetPalette(av7110, dc->data, dc->color, dc->x0);
+ } else {
+ int i, len = dc->x0 - dc->color + 1;
+ u8 __user *colors = (u8 __user *)dc->data;
+ u8 r, g = 0, b = 0, blend = 0;
+
+ ret = 0;
+ for (i = 0; i < len; i++) {
+ if (get_user(r, colors + i * 4) ||
+ get_user(g, colors + i * 4 + 1) ||
+ get_user(b, colors + i * 4 + 2) ||
+ get_user(blend, colors + i * 4 + 3)) {
+ ret = -EFAULT;
+ break;
+ }
+ ret = OSDSetColor(av7110, dc->color + i, r, g, b, blend);
+ if (ret)
+ break;
+ }
+ }
+ break;
+ case OSD_SetPixel:
+ ret = DrawLine(av7110, av7110->osdwin,
+ dc->x0, dc->y0, 0, 0, dc->color);
+ break;
+ case OSD_SetRow:
+ dc->y1 = dc->y0;
+ fallthrough;
+ case OSD_SetBlock:
+ ret = OSDSetBlock(av7110, dc->x0, dc->y0, dc->x1, dc->y1, dc->color, dc->data);
+ break;
+ case OSD_FillRow:
+ ret = DrawBlock(av7110, av7110->osdwin, dc->x0, dc->y0,
+ dc->x1 - dc->x0 + 1, dc->y1, dc->color);
+ break;
+ case OSD_FillBlock:
+ ret = DrawBlock(av7110, av7110->osdwin, dc->x0, dc->y0,
+ dc->x1 - dc->x0 + 1, dc->y1 - dc->y0 + 1, dc->color);
+ break;
+ case OSD_Line:
+ ret = DrawLine(av7110, av7110->osdwin,
+ dc->x0, dc->y0, dc->x1 - dc->x0, dc->y1 - dc->y0, dc->color);
+ break;
+ case OSD_Text:
+ {
+ char textbuf[240];
+
+ if (strncpy_from_user(textbuf, dc->data, 240) < 0) {
+ ret = -EFAULT;
+ break;
+ }
+ textbuf[239] = 0;
+ if (dc->x1 > 3)
+ dc->x1 = 3;
+ ret = SetFont(av7110, av7110->osdwin, dc->x1,
+ (u16)(dc->color & 0xffff), (u16)(dc->color >> 16));
+ if (!ret)
+ ret = FlushText(av7110);
+ if (!ret)
+ ret = WriteText(av7110, av7110->osdwin, dc->x0, dc->y0, textbuf);
+ break;
+ }
+ case OSD_SetWindow:
+ if (dc->x0 < 1 || dc->x0 > 7) {
+ ret = -EINVAL;
+ } else {
+ av7110->osdwin = dc->x0;
+ ret = 0;
+ }
+ break;
+ case OSD_MoveWindow:
+ ret = MoveWindowAbs(av7110, av7110->osdwin, dc->x0, dc->y0);
+ if (!ret)
+ ret = SetColorBlend(av7110, av7110->osdwin);
+ break;
+ case OSD_OpenRaw:
+ if (dc->color < OSD_BITMAP1 || dc->color > OSD_CURSOR) {
+ ret = -EINVAL;
+ break;
+ }
+ if (dc->color >= OSD_BITMAP1 && dc->color <= OSD_BITMAP8HR)
+ av7110->osdbpp[av7110->osdwin] = (1 << (dc->color & 3)) - 1;
+ else
+ av7110->osdbpp[av7110->osdwin] = 0;
+ ret = CreateOSDWindow(av7110, av7110->osdwin, (osd_raw_window_t)dc->color,
+ dc->x1 - dc->x0 + 1, dc->y1 - dc->y0 + 1);
+ if (ret)
+ break;
+ if (!dc->data) {
+ ret = MoveWindowAbs(av7110, av7110->osdwin, dc->x0, dc->y0);
+ if (!ret)
+ ret = SetColorBlend(av7110, av7110->osdwin);
+ }
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ mutex_unlock(&av7110->osd_mutex);
+ if (ret == -ERESTARTSYS)
+ dprintk(1, "%s(%d) returns with -ERESTARTSYS\n", __func__, dc->cmd);
+ else if (ret)
+ dprintk(1, "%s(%d) returns with %d\n", __func__, dc->cmd, ret);
+
+ return ret;
+}
+
+int av7110_osd_capability(struct av7110 *av7110, osd_cap_t *cap)
+{
+ switch (cap->cmd) {
+ case OSD_CAP_MEMSIZE:
+ if (FW_4M_SDRAM(av7110->arm_app))
+ cap->val = 1000000;
+ else
+ cap->val = 92000;
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+#endif /* CONFIG_DVB_AV7110_OSD */
diff --git a/drivers/staging/media/av7110/av7110_hw.h b/drivers/staging/media/av7110/av7110_hw.h
new file mode 100644
index 000000000000..d4579f411c56
--- /dev/null
+++ b/drivers/staging/media/av7110/av7110_hw.h
@@ -0,0 +1,483 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _AV7110_HW_H_
+#define _AV7110_HW_H_
+
+#include "av7110.h"
+
+/* DEBI transfer mode defs */
+
+#define DEBINOSWAP 0x000e0000
+#define DEBISWAB 0x001e0000
+#define DEBISWAP 0x002e0000
+
+#define ARM_WAIT_FREE (HZ)
+#define ARM_WAIT_SHAKE (HZ / 5)
+#define ARM_WAIT_OSD (HZ)
+
+enum av7110_bootstate {
+ BOOTSTATE_BUFFER_EMPTY = 0,
+ BOOTSTATE_BUFFER_FULL = 1,
+ BOOTSTATE_AV7110_BOOT_COMPLETE = 2
+};
+
+enum av7110_type_rec_play_format {
+ RP_None,
+ AudioPES,
+ AudioMp2,
+ AudioPCM,
+ VideoPES,
+ AV_PES
+};
+
+enum av7110_osd_palette_type {
+ NoPalet = 0, /* No palette */
+ Pal1Bit = 2, /* 2 colors for 1 Bit Palette */
+ Pal2Bit = 4, /* 4 colors for 2 bit palette */
+ Pal4Bit = 16, /* 16 colors for 4 bit palette */
+ Pal8Bit = 256 /* 256 colors for 16 bit palette */
+};
+
+/* switch defines */
+#define SB_GPIO 3
+#define SB_OFF SAA7146_GPIO_OUTLO /* SlowBlank off (TV-Mode) */
+#define SB_ON SAA7146_GPIO_INPUT /* SlowBlank on (AV-Mode) */
+#define SB_WIDE SAA7146_GPIO_OUTHI /* SlowBlank 6V (16/9-Mode) (not implemented) */
+
+#define FB_GPIO 1
+#define FB_OFF SAA7146_GPIO_LO /* FastBlank off (CVBS-Mode) */
+#define FB_ON SAA7146_GPIO_OUTHI /* FastBlank on (RGB-Mode) */
+#define FB_LOOP SAA7146_GPIO_INPUT /* FastBlank loop-through (PC graphics ???) */
+
+enum av7110_video_output_mode {
+ NO_OUT = 0, /* disable analog output */
+ CVBS_RGB_OUT = 1,
+ CVBS_YC_OUT = 2,
+ YC_OUT = 3
+};
+
+/* firmware internal msg q status: */
+#define GPMQFull 0x0001 /* Main Message Queue Full */
+#define GPMQOver 0x0002 /* Main Message Queue Overflow */
+#define HPQFull 0x0004 /* High Priority Msg Queue Full */
+#define HPQOver 0x0008
+#define OSDQFull 0x0010 /* OSD Queue Full */
+#define OSDQOver 0x0020
+#define GPMQBusy 0x0040 /* Queue not empty, FW >= 261d */
+#define HPQBusy 0x0080
+#define OSDQBusy 0x0100
+
+/* hw section filter flags */
+#define SECTION_EIT 0x01
+#define SECTION_SINGLE 0x00
+#define SECTION_CYCLE 0x02
+#define SECTION_CONTINUOS 0x04
+#define SECTION_MODE 0x06
+#define SECTION_IPMPE 0x0C /* size up to 4k */
+#define SECTION_HIGH_SPEED 0x1C /* larger buffer */
+#define DATA_PIPING_FLAG 0x20 /* for Data Piping Filter */
+
+#define PBUFSIZE_NONE 0x0000
+#define PBUFSIZE_1P 0x0100
+#define PBUFSIZE_2P 0x0200
+#define PBUFSIZE_1K 0x0300
+#define PBUFSIZE_2K 0x0400
+#define PBUFSIZE_4K 0x0500
+#define PBUFSIZE_8K 0x0600
+#define PBUFSIZE_16K 0x0700
+#define PBUFSIZE_32K 0x0800
+
+/* firmware command codes */
+enum av7110_osd_command {
+ WCreate,
+ WDestroy,
+ WMoveD,
+ WMoveA,
+ WHide,
+ WTop,
+ DBox,
+ DLine,
+ DText,
+ Set_Font,
+ SetColor,
+ SetBlend,
+ SetWBlend,
+ SetCBlend,
+ SetNonBlend,
+ LoadBmp,
+ BlitBmp,
+ ReleaseBmp,
+ SetWTrans,
+ SetWNoTrans,
+ Set_Palette
+};
+
+enum av7110_pid_command {
+ MultiPID,
+ VideoPID,
+ AudioPID,
+ InitFilt,
+ FiltError,
+ NewVersion,
+ CacheError,
+ AddPIDFilter,
+ DelPIDFilter,
+ Scan,
+ SetDescr,
+ SetIR,
+ FlushTSQueue
+};
+
+enum av7110_mpeg_command {
+ SelAudChannels
+};
+
+enum av7110_audio_command {
+ AudioDAC,
+ CabADAC,
+ ON22K,
+ OFF22K,
+ MainSwitch,
+ ADSwitch,
+ SendDiSEqC,
+ SetRegister,
+ SpdifSwitch
+};
+
+enum av7110_request_command {
+ AudioState,
+ AudioBuffState,
+ VideoState1,
+ VideoState2,
+ VideoState3,
+ CrashCounter,
+ ReqVersion,
+ ReqVCXO,
+ ReqRegister,
+ ReqSecFilterError,
+ ReqSTC
+};
+
+enum av7110_encoder_command {
+ SetVidMode,
+ SetTestMode,
+ LoadVidCode,
+ SetMonitorType,
+ SetPanScanType,
+ SetFreezeMode,
+ SetWSSConfig
+};
+
+enum av7110_rec_play_state {
+ __Record,
+ __Stop,
+ __Play,
+ __Pause,
+ __Slow,
+ __FF_IP,
+ __Scan_I,
+ __Continue
+};
+
+enum av7110_fw_cmd_misc {
+ AV7110_FW_VIDEO_ZOOM = 1,
+ AV7110_FW_VIDEO_COMMAND,
+ AV7110_FW_AUDIO_COMMAND
+};
+
+enum av7110_command_type {
+ COMTYPE_NOCOM,
+ COMTYPE_PIDFILTER,
+ COMTYPE_MPEGDECODER,
+ COMTYPE_OSD,
+ COMTYPE_BMP,
+ COMTYPE_ENCODER,
+ COMTYPE_AUDIODAC,
+ COMTYPE_REQUEST,
+ COMTYPE_SYSTEM,
+ COMTYPE_REC_PLAY,
+ COMTYPE_COMMON_IF,
+ COMTYPE_PID_FILTER,
+ COMTYPE_PES,
+ COMTYPE_TS,
+ COMTYPE_VIDEO,
+ COMTYPE_AUDIO,
+ COMTYPE_CI_LL,
+ COMTYPE_MISC = 0x80
+};
+
+#define VID_NONE_PREF 0x00 /* No aspect ration processing preferred */
+#define VID_PAN_SCAN_PREF 0x01 /* Pan and Scan Display preferred */
+#define VID_VERT_COMP_PREF 0x02 /* Vertical compression display preferred */
+#define VID_VC_AND_PS_PREF 0x03 /* PanScan and vertical Compression if allowed */
+#define VID_CENTRE_CUT_PREF 0x05 /* PanScan with zero vector */
+
+/* MPEG video decoder commands */
+#define AV_VIDEO_CMD_STOP 0x000e
+#define AV_VIDEO_CMD_PLAY 0x000d
+#define AV_VIDEO_CMD_FREEZE 0x0102
+#define AV_VIDEO_CMD_FFWD 0x0016
+#define AV_VIDEO_CMD_SLOW 0x0022
+
+/* MPEG audio decoder commands */
+#define AUDIO_CMD_MUTE 0x0001
+#define AUDIO_CMD_UNMUTE 0x0002
+#define AUDIO_CMD_PCM16 0x0010
+#define AUDIO_CMD_STEREO 0x0080
+#define AUDIO_CMD_MONO_L 0x0100
+#define AUDIO_CMD_MONO_R 0x0200
+#define AUDIO_CMD_SYNC_OFF 0x000e
+#define AUDIO_CMD_SYNC_ON 0x000f
+
+/* firmware data interface codes */
+#define DATA_NONE 0x00
+#define DATA_FSECTION 0x01
+#define DATA_IPMPE 0x02
+#define DATA_MPEG_RECORD 0x03
+#define DATA_DEBUG_MESSAGE 0x04
+#define DATA_COMMON_INTERFACE 0x05
+#define DATA_MPEG_PLAY 0x06
+#define DATA_BMP_LOAD 0x07
+#define DATA_IRCOMMAND 0x08
+#define DATA_PIPING 0x09
+#define DATA_STREAMING 0x0a
+#define DATA_CI_GET 0x0b
+#define DATA_CI_PUT 0x0c
+#define DATA_MPEG_VIDEO_EVENT 0x0d
+
+#define DATA_PES_RECORD 0x10
+#define DATA_PES_PLAY 0x11
+#define DATA_TS_RECORD 0x12
+#define DATA_TS_PLAY 0x13
+
+/* ancient CI command codes, only two are actually still used
+ * by the link level CI firmware
+ */
+#define CI_CMD_ERROR 0x00
+#define CI_CMD_ACK 0x01
+#define CI_CMD_SYSTEM_READY 0x02
+#define CI_CMD_KEYPRESS 0x03
+#define CI_CMD_ON_TUNED 0x04
+#define CI_CMD_ON_SWITCH_PROGRAM 0x05
+#define CI_CMD_SECTION_ARRIVED 0x06
+#define CI_CMD_SECTION_TIMEOUT 0x07
+#define CI_CMD_TIME 0x08
+#define CI_CMD_ENTER_MENU 0x09
+#define CI_CMD_FAST_PSI 0x0a
+#define CI_CMD_GET_SLOT_INFO 0x0b
+
+#define CI_MSG_NONE 0x00
+#define CI_MSG_CI_INFO 0x01
+#define CI_MSG_MENU 0x02
+#define CI_MSG_LIST 0x03
+#define CI_MSG_TEXT 0x04
+#define CI_MSG_REQUEST_INPUT 0x05
+#define CI_MSG_INPUT_COMPLETE 0x06
+#define CI_MSG_LIST_MORE 0x07
+#define CI_MSG_MENU_MORE 0x08
+#define CI_MSG_CLOSE_MMI_IMM 0x09
+#define CI_MSG_SECTION_REQUEST 0x0a
+#define CI_MSG_CLOSE_FILTER 0x0b
+#define CI_PSI_COMPLETE 0x0c
+#define CI_MODULE_READY 0x0d
+#define CI_SWITCH_PRG_REPLY 0x0e
+#define CI_MSG_TEXT_MORE 0x0f
+
+#define CI_MSG_CA_PMT 0xe0
+#define CI_MSG_ERROR 0xf0
+
+/* base address of the dual ported RAM which serves as communication
+ * area between PCI bus and av7110,
+ * as seen by the DEBI bus of the saa7146
+ */
+#define DPRAM_BASE 0x4000
+
+/* boot protocol area */
+#define AV7110_BOOT_STATE (DPRAM_BASE + 0x3F8)
+#define AV7110_BOOT_SIZE (DPRAM_BASE + 0x3FA)
+#define AV7110_BOOT_BASE (DPRAM_BASE + 0x3FC)
+#define AV7110_BOOT_BLOCK (DPRAM_BASE + 0x400)
+#define AV7110_BOOT_MAX_SIZE 0xc00
+
+/* firmware command protocol area */
+#define IRQ_STATE (DPRAM_BASE + 0x0F4)
+#define IRQ_STATE_EXT (DPRAM_BASE + 0x0F6)
+#define MSGSTATE (DPRAM_BASE + 0x0F8)
+#define COMMAND (DPRAM_BASE + 0x0FC)
+#define COM_BUFF (DPRAM_BASE + 0x100)
+#define COM_BUFF_SIZE 0x20
+
+/* various data buffers */
+#define BUFF1_BASE (DPRAM_BASE + 0x120)
+#define BUFF1_SIZE 0xE0
+
+#define DATA_BUFF0_BASE (DPRAM_BASE + 0x200)
+#define DATA_BUFF0_SIZE 0x0800
+
+#define DATA_BUFF1_BASE (DATA_BUFF0_BASE + DATA_BUFF0_SIZE)
+#define DATA_BUFF1_SIZE 0x0800
+
+#define DATA_BUFF2_BASE (DATA_BUFF1_BASE + DATA_BUFF1_SIZE)
+#define DATA_BUFF2_SIZE 0x0800
+
+#define DATA_BUFF3_BASE (DATA_BUFF2_BASE + DATA_BUFF2_SIZE)
+#define DATA_BUFF3_SIZE 0x0400
+
+#define Reserved (DPRAM_BASE + 0x1E00)
+#define Reserved_SIZE 0x1C0
+
+/* firmware status area */
+#define STATUS_BASE (DPRAM_BASE + 0x1FC0)
+#define STATUS_LOOPS (STATUS_BASE + 0x08)
+
+#define STATUS_MPEG_WIDTH (STATUS_BASE + 0x0C)
+/* ((aspect_ratio & 0xf) << 12) | (height & 0xfff) */
+#define STATUS_MPEG_HEIGHT_AR (STATUS_BASE + 0x0E)
+
+/* firmware data protocol area */
+#define RX_TYPE (DPRAM_BASE + 0x1FE8)
+#define RX_LEN (DPRAM_BASE + 0x1FEA)
+#define TX_TYPE (DPRAM_BASE + 0x1FEC)
+#define TX_LEN (DPRAM_BASE + 0x1FEE)
+
+#define RX_BUFF (DPRAM_BASE + 0x1FF4)
+#define TX_BUFF (DPRAM_BASE + 0x1FF6)
+
+#define HANDSHAKE_REG (DPRAM_BASE + 0x1FF8)
+#define COM_IF_LOCK (DPRAM_BASE + 0x1FFA)
+
+#define IRQ_RX (DPRAM_BASE + 0x1FFC)
+#define IRQ_TX (DPRAM_BASE + 0x1FFE)
+
+/* used by boot protocol to load firmware into av7110 DRAM */
+#define DRAM_START_CODE 0x2e000404
+#define DRAM_MAX_CODE_SIZE 0x00100000
+
+/* saa7146 gpio lines */
+#define RESET_LINE 2
+#define DEBI_DONE_LINE 1
+#define ARM_IRQ_LINE 0
+
+int av7110_bootarm(struct av7110 *av7110);
+int av7110_firmversion(struct av7110 *av7110);
+#define FW_CI_LL_SUPPORT(arm_app) ((arm_app) & 0x80000000)
+#define FW_4M_SDRAM(arm_app) ((arm_app) & 0x40000000)
+#define FW_VERSION(arm_app) ((arm_app) & 0x0000FFFF)
+
+int av7110_wait_msgstate(struct av7110 *av7110, u16 flags);
+int av7110_fw_cmd(struct av7110 *av7110, int type, int com, int num, ...);
+int av7110_fw_request(struct av7110 *av7110, u16 *request_buf,
+ int request_buf_len, u16 *reply_buf, int reply_buf_len);
+
+/* DEBI (saa7146 data extension bus interface) access */
+int av7110_debiwrite(struct av7110 *av7110, u32 config,
+ int addr, u32 val, unsigned int count);
+u32 av7110_debiread(struct av7110 *av7110, u32 config,
+ int addr, unsigned int count);
+
+/* DEBI during interrupt */
+/* single word writes */
+static inline void iwdebi(struct av7110 *av7110, u32 config, int addr, u32 val, unsigned int count)
+{
+ av7110_debiwrite(av7110, config, addr, val, count);
+}
+
+/* buffer writes */
+static inline void mwdebi(struct av7110 *av7110, u32 config, int addr,
+ const u8 *val, int count)
+{
+ memcpy(av7110->debi_virt, val, count);
+ av7110_debiwrite(av7110, config, addr, 0, count);
+}
+
+static inline u32 irdebi(struct av7110 *av7110, u32 config, int addr, u32 val, unsigned int count)
+{
+ u32 res;
+
+ res = av7110_debiread(av7110, config, addr, count);
+ if (count <= 4)
+ memcpy(av7110->debi_virt, (char *)&res, count);
+ return res;
+}
+
+/* DEBI outside interrupts, only for count <= 4! */
+static inline void wdebi(struct av7110 *av7110, u32 config, int addr, u32 val, unsigned int count)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&av7110->debilock, flags);
+ av7110_debiwrite(av7110, config, addr, val, count);
+ spin_unlock_irqrestore(&av7110->debilock, flags);
+}
+
+static inline u32 rdebi(struct av7110 *av7110, u32 config, int addr, u32 val, unsigned int count)
+{
+ unsigned long flags;
+ u32 res;
+
+ spin_lock_irqsave(&av7110->debilock, flags);
+ res = av7110_debiread(av7110, config, addr, count);
+ spin_unlock_irqrestore(&av7110->debilock, flags);
+ return res;
+}
+
+/* handle mailbox registers of the dual ported RAM */
+static inline void ARM_ResetMailBox(struct av7110 *av7110)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&av7110->debilock, flags);
+ av7110_debiread(av7110, DEBINOSWAP, IRQ_RX, 2);
+ av7110_debiwrite(av7110, DEBINOSWAP, IRQ_RX, 0, 2);
+ spin_unlock_irqrestore(&av7110->debilock, flags);
+}
+
+static inline void ARM_ClearMailBox(struct av7110 *av7110)
+{
+ iwdebi(av7110, DEBINOSWAP, IRQ_RX, 0, 2);
+}
+
+static inline void ARM_ClearIrq(struct av7110 *av7110)
+{
+ irdebi(av7110, DEBINOSWAP, IRQ_RX, 0, 2);
+}
+
+/****************************************************************************
+ * Firmware commands
+ ****************************************************************************/
+
+static inline int SendDAC(struct av7110 *av7110, u8 addr, u8 data)
+{
+ return av7110_fw_cmd(av7110, COMTYPE_AUDIODAC, AudioDAC, 2, addr, data);
+}
+
+static inline int av7710_set_video_mode(struct av7110 *av7110, int mode)
+{
+ return av7110_fw_cmd(av7110, COMTYPE_ENCODER, SetVidMode, 1, mode);
+}
+
+static inline int vidcom(struct av7110 *av7110, u32 com, u32 arg)
+{
+ return av7110_fw_cmd(av7110, COMTYPE_MISC, AV7110_FW_VIDEO_COMMAND, 4,
+ (com >> 16), (com & 0xffff),
+ (arg >> 16), (arg & 0xffff));
+}
+
+static inline int audcom(struct av7110 *av7110, u32 com)
+{
+ return av7110_fw_cmd(av7110, COMTYPE_MISC, AV7110_FW_AUDIO_COMMAND, 2,
+ (com >> 16), (com & 0xffff));
+}
+
+static inline int Set22K(struct av7110 *av7110, int state)
+{
+ return av7110_fw_cmd(av7110, COMTYPE_AUDIODAC, (state ? ON22K : OFF22K), 0);
+}
+
+int av7110_diseqc_send(struct av7110 *av7110, int len, u8 *msg, unsigned long burst);
+
+#ifdef CONFIG_DVB_AV7110_OSD
+int av7110_osd_cmd(struct av7110 *av7110, osd_cmd_t *dc);
+int av7110_osd_capability(struct av7110 *av7110, osd_cap_t *cap);
+#endif /* CONFIG_DVB_AV7110_OSD */
+
+#endif /* _AV7110_HW_H_ */
diff --git a/drivers/staging/media/av7110/av7110_ipack.c b/drivers/staging/media/av7110/av7110_ipack.c
new file mode 100644
index 000000000000..4be6e225f08e
--- /dev/null
+++ b/drivers/staging/media/av7110/av7110_ipack.c
@@ -0,0 +1,394 @@
+// SPDX-License-Identifier: GPL-2.0
+#include "dvb_filter.h"
+#include "av7110_ipack.h"
+#include <linux/string.h> /* for memcpy() */
+#include <linux/vmalloc.h>
+
+void av7110_ipack_reset(struct ipack *p)
+{
+ p->found = 0;
+ p->cid = 0;
+ p->plength = 0;
+ p->flag1 = 0;
+ p->flag2 = 0;
+ p->hlength = 0;
+ p->mpeg = 0;
+ p->check = 0;
+ p->which = 0;
+ p->done = 0;
+ p->count = 0;
+}
+
+int av7110_ipack_init(struct ipack *p, int size,
+ void (*func)(u8 *buf, int size, void *priv))
+{
+ p->buf = vmalloc(size);
+ if (!p->buf)
+ return -ENOMEM;
+ p->size = size;
+ p->func = func;
+ p->repack_subids = 0;
+ av7110_ipack_reset(p);
+ return 0;
+}
+
+void av7110_ipack_free(struct ipack *p)
+{
+ vfree(p->buf);
+}
+
+static void send_ipack(struct ipack *p)
+{
+ int off;
+ struct dvb_audio_info ai;
+ int ac3_off = 0;
+ int streamid = 0;
+ int nframes = 0;
+ int f = 0;
+
+ switch (p->mpeg) {
+ case 2:
+ if (p->count < 10)
+ return;
+ p->buf[3] = p->cid;
+ p->buf[4] = (u8)(((p->count - 6) & 0xff00) >> 8);
+ p->buf[5] = (u8)((p->count - 6) & 0x00ff);
+ if (p->repack_subids && p->cid == PRIVATE_STREAM1) {
+ off = 9 + p->buf[8];
+ streamid = p->buf[off];
+ if ((streamid & 0xf8) == 0x80) {
+ ai.off = 0;
+ ac3_off = ((p->buf[off + 2] << 8) |
+ p->buf[off + 3]);
+ if (ac3_off < p->count)
+ f = dvb_filter_get_ac3info(p->buf + off + 3 + ac3_off,
+ p->count - ac3_off, &ai, 0);
+ if (!f) {
+ nframes = (p->count - off - 3 - ac3_off) /
+ ai.framesize + 1;
+ p->buf[off + 2] = (ac3_off >> 8) & 0xff;
+ p->buf[off + 3] = (ac3_off) & 0xff;
+ p->buf[off + 1] = nframes;
+ ac3_off += nframes * ai.framesize - p->count;
+ }
+ }
+ }
+ p->func(p->buf, p->count, p->data);
+
+ p->buf[6] = 0x80;
+ p->buf[7] = 0x00;
+ p->buf[8] = 0x00;
+ p->count = 9;
+ if (p->repack_subids && p->cid == PRIVATE_STREAM1 &&
+ (streamid & 0xf8) == 0x80) {
+ p->count += 4;
+ p->buf[9] = streamid;
+ p->buf[10] = (ac3_off >> 8) & 0xff;
+ p->buf[11] = (ac3_off) & 0xff;
+ p->buf[12] = 0;
+ }
+ break;
+
+ case 1:
+ if (p->count < 8)
+ return;
+ p->buf[3] = p->cid;
+ p->buf[4] = (u8)(((p->count - 6) & 0xff00) >> 8);
+ p->buf[5] = (u8)((p->count - 6) & 0x00ff);
+ p->func(p->buf, p->count, p->data);
+
+ p->buf[6] = 0x0f;
+ p->count = 7;
+ break;
+ }
+}
+
+void av7110_ipack_flush(struct ipack *p)
+{
+ if (p->plength != MMAX_PLENGTH - 6 || p->found <= 6)
+ return;
+ p->plength = p->found - 6;
+ p->found = 0;
+ send_ipack(p);
+ av7110_ipack_reset(p);
+}
+
+static void write_ipack(struct ipack *p, const u8 *data, int count)
+{
+ u8 headr[3] = { 0x00, 0x00, 0x01 };
+
+ if (p->count < 6) {
+ memcpy(p->buf, headr, 3);
+ p->count = 6;
+ }
+
+ if (p->count + count < p->size) {
+ memcpy(p->buf + p->count, data, count);
+ p->count += count;
+ } else {
+ int rest = p->size - p->count;
+
+ memcpy(p->buf + p->count, data, rest);
+ p->count += rest;
+ send_ipack(p);
+ if (count - rest > 0)
+ write_ipack(p, data + rest, count - rest);
+ }
+}
+
+int av7110_ipack_instant_repack(const u8 *buf, int count, struct ipack *p)
+{
+ int l;
+ int c = 0;
+
+ while (c < count && (p->mpeg == 0 ||
+ (p->mpeg == 1 && p->found < 7) ||
+ (p->mpeg == 2 && p->found < 9)) &&
+ (p->found < 5 || !p->done)) {
+ switch (p->found) {
+ case 0:
+ case 1:
+ if (buf[c] == 0x00)
+ p->found++;
+ else
+ p->found = 0;
+ c++;
+ break;
+ case 2:
+ if (buf[c] == 0x01)
+ p->found++;
+ else if (buf[c] == 0)
+ p->found = 2;
+ else
+ p->found = 0;
+ c++;
+ break;
+ case 3:
+ p->cid = 0;
+ switch (buf[c]) {
+ case PROG_STREAM_MAP:
+ case PRIVATE_STREAM2:
+ case PROG_STREAM_DIR:
+ case ECM_STREAM:
+ case EMM_STREAM:
+ case PADDING_STREAM:
+ case DSM_CC_STREAM:
+ case ISO13522_STREAM:
+ p->done = 1;
+ fallthrough;
+ case PRIVATE_STREAM1:
+ case VIDEO_STREAM_S ... VIDEO_STREAM_E:
+ case AUDIO_STREAM_S ... AUDIO_STREAM_E:
+ p->found++;
+ p->cid = buf[c];
+ c++;
+ break;
+ default:
+ p->found = 0;
+ break;
+ }
+ break;
+
+ case 4:
+ if (count - c > 1) {
+ p->plen[0] = buf[c];
+ c++;
+ p->plen[1] = buf[c];
+ c++;
+ p->found += 2;
+ p->plength = (p->plen[0] << 8) | p->plen[1];
+ } else {
+ p->plen[0] = buf[c];
+ p->found++;
+ return count;
+ }
+ break;
+ case 5:
+ p->plen[1] = buf[c];
+ c++;
+ p->found++;
+ p->plength = (p->plen[0] << 8) | p->plen[1];
+ break;
+ case 6:
+ if (!p->done) {
+ p->flag1 = buf[c];
+ c++;
+ p->found++;
+ if ((p->flag1 & 0xc0) == 0x80) {
+ p->mpeg = 2;
+ } else {
+ p->hlength = 0;
+ p->which = 0;
+ p->mpeg = 1;
+ p->flag2 = 0;
+ }
+ }
+ break;
+
+ case 7:
+ if (!p->done && p->mpeg == 2) {
+ p->flag2 = buf[c];
+ c++;
+ p->found++;
+ }
+ break;
+
+ case 8:
+ if (!p->done && p->mpeg == 2) {
+ p->hlength = buf[c];
+ c++;
+ p->found++;
+ }
+ break;
+ }
+ }
+
+ if (c == count)
+ return count;
+
+ if (!p->plength)
+ p->plength = MMAX_PLENGTH - 6;
+
+ if (!(p->done || ((p->mpeg == 2 && p->found >= 9) ||
+ (p->mpeg == 1 && p->found >= 7))))
+ return count;
+
+ switch (p->cid) {
+ case AUDIO_STREAM_S ... AUDIO_STREAM_E:
+ case VIDEO_STREAM_S ... VIDEO_STREAM_E:
+ case PRIVATE_STREAM1:
+ if (p->mpeg == 2 && p->found == 9) {
+ write_ipack(p, &p->flag1, 1);
+ write_ipack(p, &p->flag2, 1);
+ write_ipack(p, &p->hlength, 1);
+ }
+
+ if (p->mpeg == 1 && p->found == 7)
+ write_ipack(p, &p->flag1, 1);
+
+ if (p->mpeg == 2 && (p->flag2 & PTS_ONLY) && p->found < 14) {
+ while (c < count && p->found < 14) {
+ p->pts[p->found - 9] = buf[c];
+ write_ipack(p, buf + c, 1);
+ c++;
+ p->found++;
+ }
+ if (c == count)
+ return count;
+ }
+
+ if (p->mpeg == 1 && p->which < 2000) {
+ if (p->found == 7) {
+ p->check = p->flag1;
+ p->hlength = 1;
+ }
+
+ while (!p->which && c < count && p->check == 0xff) {
+ p->check = buf[c];
+ write_ipack(p, buf + c, 1);
+ c++;
+ p->found++;
+ p->hlength++;
+ }
+
+ if (c == count)
+ return count;
+
+ if ((p->check & 0xc0) == 0x40 && !p->which) {
+ p->check = buf[c];
+ write_ipack(p, buf + c, 1);
+ c++;
+ p->found++;
+ p->hlength++;
+
+ p->which = 1;
+ if (c == count)
+ return count;
+ p->check = buf[c];
+ write_ipack(p, buf + c, 1);
+ c++;
+ p->found++;
+ p->hlength++;
+ p->which = 2;
+ if (c == count)
+ return count;
+ }
+
+ if (p->which == 1) {
+ p->check = buf[c];
+ write_ipack(p, buf + c, 1);
+ c++;
+ p->found++;
+ p->hlength++;
+ p->which = 2;
+ if (c == count)
+ return count;
+ }
+
+ if ((p->check & 0x30) && p->check != 0xff) {
+ p->flag2 = (p->check & 0xf0) << 2;
+ p->pts[0] = p->check;
+ p->which = 3;
+ }
+
+ if (c == count)
+ return count;
+ if (p->which > 2) {
+ if ((p->flag2 & PTS_DTS_FLAGS) == PTS_ONLY) {
+ while (c < count && p->which < 7) {
+ p->pts[p->which - 2] = buf[c];
+ write_ipack(p, buf + c, 1);
+ c++;
+ p->found++;
+ p->which++;
+ p->hlength++;
+ }
+ if (c == count)
+ return count;
+ } else if ((p->flag2 & PTS_DTS_FLAGS) == PTS_DTS) {
+ while (c < count && p->which < 12) {
+ if (p->which < 7)
+ p->pts[p->which - 2] = buf[c];
+ write_ipack(p, buf + c, 1);
+ c++;
+ p->found++;
+ p->which++;
+ p->hlength++;
+ }
+ if (c == count)
+ return count;
+ }
+ p->which = 2000;
+ }
+ }
+
+ while (c < count && p->found < p->plength + 6) {
+ l = count - c;
+ if (l + p->found > p->plength + 6)
+ l = p->plength + 6 - p->found;
+ write_ipack(p, buf + c, l);
+ p->found += l;
+ c += l;
+ }
+ break;
+ }
+
+ if (p->done) {
+ if (p->found + count - c < p->plength + 6) {
+ p->found += count - c;
+ c = count;
+ } else {
+ c += p->plength + 6 - p->found;
+ p->found = p->plength + 6;
+ }
+ }
+
+ if (p->plength && p->found == p->plength + 6) {
+ send_ipack(p);
+ av7110_ipack_reset(p);
+ if (c < count)
+ av7110_ipack_instant_repack(buf + c, count - c, p);
+ }
+
+ return count;
+}
diff --git a/drivers/staging/media/av7110/av7110_ipack.h b/drivers/staging/media/av7110/av7110_ipack.h
new file mode 100644
index 000000000000..55296421d52f
--- /dev/null
+++ b/drivers/staging/media/av7110/av7110_ipack.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _AV7110_IPACK_H_
+#define _AV7110_IPACK_H_
+
+int av7110_ipack_init(struct ipack *p, int size,
+ void (*func)(u8 *buf, int size, void *priv));
+
+void av7110_ipack_reset(struct ipack *p);
+int av7110_ipack_instant_repack(const u8 *buf, int count, struct ipack *p);
+void av7110_ipack_free(struct ipack *p);
+void av7110_ipack_flush(struct ipack *p);
+
+#endif
diff --git a/drivers/staging/media/av7110/av7110_ir.c b/drivers/staging/media/av7110/av7110_ir.c
new file mode 100644
index 000000000000..68b3979ba5f2
--- /dev/null
+++ b/drivers/staging/media/av7110/av7110_ir.c
@@ -0,0 +1,157 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Driver for the remote control of SAA7146 based AV7110 cards
+ *
+ * Copyright (C) 1999-2003 Holger Waechtler <holger@convergence.de>
+ * Copyright (C) 2003-2007 Oliver Endriss <o.endriss@gmx.de>
+ * Copyright (C) 2019 Sean Young <sean@mess.org>
+ */
+
+#include <linux/kernel.h>
+#include <media/rc-core.h>
+
+#include "av7110.h"
+#include "av7110_hw.h"
+
+#define IR_RC5 0
+#define IR_RCMM 1
+#define IR_RC5_EXT 2 /* internal only */
+
+/* interrupt handler */
+void av7110_ir_handler(struct av7110 *av7110, u32 ircom)
+{
+ struct rc_dev *rcdev = av7110->ir.rcdev;
+ enum rc_proto proto;
+ u32 command, addr, scancode;
+ u32 toggle;
+
+ dprintk(4, "ir command = %08x\n", ircom);
+
+ if (rcdev) {
+ switch (av7110->ir.ir_config) {
+ case IR_RC5: /* RC5: 5 bits device address, 6 bits command */
+ command = ircom & 0x3f;
+ addr = (ircom >> 6) & 0x1f;
+ scancode = RC_SCANCODE_RC5(addr, command);
+ toggle = ircom & 0x0800;
+ proto = RC_PROTO_RC5;
+ break;
+
+ case IR_RCMM: /* RCMM: 32 bits scancode */
+ scancode = ircom & ~0x8000;
+ toggle = ircom & 0x8000;
+ proto = RC_PROTO_RCMM32;
+ break;
+
+ case IR_RC5_EXT:
+ /*
+ * extended RC5: 5 bits device address, 7 bits command
+ *
+ * Extended RC5 uses only one start bit. The second
+ * start bit is re-assigned bit 6 of the command bit.
+ */
+ command = ircom & 0x3f;
+ addr = (ircom >> 6) & 0x1f;
+ if (!(ircom & 0x1000))
+ command |= 0x40;
+ scancode = RC_SCANCODE_RC5(addr, command);
+ toggle = ircom & 0x0800;
+ proto = RC_PROTO_RC5;
+ break;
+ default:
+ dprintk(2, "unknown ir config %d\n", av7110->ir.ir_config);
+ return;
+ }
+
+ rc_keydown(rcdev, proto, scancode, toggle != 0);
+ }
+}
+
+int av7110_set_ir_config(struct av7110 *av7110)
+{
+ dprintk(4, "ir config = %08x\n", av7110->ir.ir_config);
+
+ return av7110_fw_cmd(av7110, COMTYPE_PIDFILTER, SetIR, 1,
+ av7110->ir.ir_config);
+}
+
+static int change_protocol(struct rc_dev *rcdev, u64 *rc_type)
+{
+ struct av7110 *av7110 = rcdev->priv;
+ u32 ir_config;
+
+ if (*rc_type & RC_PROTO_BIT_RCMM32) {
+ ir_config = IR_RCMM;
+ *rc_type = RC_PROTO_BIT_RCMM32;
+ } else if (*rc_type & RC_PROTO_BIT_RC5) {
+ if (FW_VERSION(av7110->arm_app) >= 0x2620)
+ ir_config = IR_RC5_EXT;
+ else
+ ir_config = IR_RC5;
+ *rc_type = RC_PROTO_BIT_RC5;
+ } else {
+ return -EINVAL;
+ }
+
+ if (ir_config == av7110->ir.ir_config)
+ return 0;
+
+ av7110->ir.ir_config = ir_config;
+
+ return av7110_set_ir_config(av7110);
+}
+
+int av7110_ir_init(struct av7110 *av7110)
+{
+ struct rc_dev *rcdev;
+ struct pci_dev *pci;
+ int ret;
+
+ rcdev = rc_allocate_device(RC_DRIVER_SCANCODE);
+ if (!rcdev)
+ return -ENOMEM;
+
+ pci = av7110->dev->pci;
+
+ snprintf(av7110->ir.input_phys, sizeof(av7110->ir.input_phys),
+ "pci-%s/ir0", pci_name(pci));
+
+ rcdev->device_name = av7110->card_name;
+ rcdev->driver_name = KBUILD_MODNAME;
+ rcdev->input_phys = av7110->ir.input_phys;
+ rcdev->input_id.bustype = BUS_PCI;
+ rcdev->input_id.version = 2;
+ if (pci->subsystem_vendor) {
+ rcdev->input_id.vendor = pci->subsystem_vendor;
+ rcdev->input_id.product = pci->subsystem_device;
+ } else {
+ rcdev->input_id.vendor = pci->vendor;
+ rcdev->input_id.product = pci->device;
+ }
+
+ rcdev->dev.parent = &pci->dev;
+ rcdev->allowed_protocols = RC_PROTO_BIT_RC5 | RC_PROTO_BIT_RCMM32;
+ rcdev->change_protocol = change_protocol;
+ rcdev->map_name = RC_MAP_HAUPPAUGE;
+ rcdev->priv = av7110;
+
+ av7110->ir.rcdev = rcdev;
+ av7110->ir.ir_config = IR_RC5;
+ av7110_set_ir_config(av7110);
+
+ ret = rc_register_device(rcdev);
+ if (ret) {
+ av7110->ir.rcdev = NULL;
+ rc_free_device(rcdev);
+ }
+
+ return ret;
+}
+
+void av7110_ir_exit(struct av7110 *av7110)
+{
+ rc_unregister_device(av7110->ir.rcdev);
+}
+
+//MODULE_AUTHOR("Holger Waechtler <holger@convergence.de>, Oliver Endriss <o.endriss@gmx.de>");
+//MODULE_LICENSE("GPL");
diff --git a/drivers/staging/media/av7110/av7110_v4l.c b/drivers/staging/media/av7110/av7110_v4l.c
new file mode 100644
index 000000000000..200a7a29ea31
--- /dev/null
+++ b/drivers/staging/media/av7110/av7110_v4l.c
@@ -0,0 +1,958 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * driver for the SAA7146 based AV110 cards
+ * - video4linux interface for DVB and Siemens DVB-C analog module
+ *
+ * Copyright (C) 1999-2002 Ralph Metzler
+ * & Marcus Metzler for convergence integrated media GmbH
+ *
+ * originally based on code by:
+ * Copyright (C) 1998,1999 Christian Theiss <mistert@rz.fh-augsburg.de>
+ *
+ * the project's page is at https://linuxtv.org
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/delay.h>
+#include <linux/fs.h>
+#include <linux/timer.h>
+#include <linux/poll.h>
+
+#include "av7110.h"
+#include "av7110_hw.h"
+#include "av7110_av.h"
+
+int msp_writereg(struct av7110 *av7110, u8 dev, u16 reg, u16 val)
+{
+ u8 msg[5] = { dev, reg >> 8, reg & 0xff, val >> 8, val & 0xff };
+ struct i2c_msg msgs = { .flags = 0, .len = 5, .buf = msg };
+
+ switch (av7110->adac_type) {
+ case DVB_ADAC_MSP34x0:
+ msgs.addr = 0x40;
+ break;
+ case DVB_ADAC_MSP34x5:
+ msgs.addr = 0x42;
+ break;
+ default:
+ return 0;
+ }
+
+ if (i2c_transfer(&av7110->i2c_adap, &msgs, 1) != 1) {
+ dprintk(1, "failed @ card %d, %u = %u\n", av7110->dvb_adapter.num, reg, val);
+ return -EIO;
+ }
+ return 0;
+}
+
+static int msp_readreg(struct av7110 *av7110, u8 dev, u16 reg, u16 *val)
+{
+ u8 msg1[3] = { dev, reg >> 8, reg & 0xff };
+ u8 msg2[2];
+ struct i2c_msg msgs[2] = {
+ { .flags = 0, .len = 3, .buf = msg1 },
+ { .flags = I2C_M_RD, .len = 2, .buf = msg2 }
+ };
+
+ switch (av7110->adac_type) {
+ case DVB_ADAC_MSP34x0:
+ msgs[0].addr = 0x40;
+ msgs[1].addr = 0x40;
+ break;
+ case DVB_ADAC_MSP34x5:
+ msgs[0].addr = 0x42;
+ msgs[1].addr = 0x42;
+ break;
+ default:
+ return 0;
+ }
+
+ if (i2c_transfer(&av7110->i2c_adap, &msgs[0], 2) != 2) {
+ dprintk(1, "failed @ card %d, %u\n", av7110->dvb_adapter.num, reg);
+ return -EIO;
+ }
+ *val = (msg2[0] << 8) | msg2[1];
+ return 0;
+}
+
+static struct v4l2_input inputs[4] = {
+ {
+ .index = 0,
+ .name = "DVB",
+ .type = V4L2_INPUT_TYPE_CAMERA,
+ .audioset = 1,
+ .tuner = 0, /* ignored */
+ .std = V4L2_STD_PAL_BG | V4L2_STD_NTSC_M,
+ .status = 0,
+ .capabilities = V4L2_IN_CAP_STD,
+ }, {
+ .index = 1,
+ .name = "Television",
+ .type = V4L2_INPUT_TYPE_TUNER,
+ .audioset = 1,
+ .tuner = 0,
+ .std = V4L2_STD_PAL_BG | V4L2_STD_NTSC_M,
+ .status = 0,
+ .capabilities = V4L2_IN_CAP_STD,
+ }, {
+ .index = 2,
+ .name = "Video",
+ .type = V4L2_INPUT_TYPE_CAMERA,
+ .audioset = 0,
+ .tuner = 0,
+ .std = V4L2_STD_PAL_BG | V4L2_STD_NTSC_M,
+ .status = 0,
+ .capabilities = V4L2_IN_CAP_STD,
+ }, {
+ .index = 3,
+ .name = "Y/C",
+ .type = V4L2_INPUT_TYPE_CAMERA,
+ .audioset = 0,
+ .tuner = 0,
+ .std = V4L2_STD_PAL_BG | V4L2_STD_NTSC_M,
+ .status = 0,
+ .capabilities = V4L2_IN_CAP_STD,
+ }
+};
+
+static int ves1820_writereg(struct saa7146_dev *dev, u8 addr, u8 reg, u8 data)
+{
+ struct av7110 *av7110 = dev->ext_priv;
+ u8 buf[] = { 0x00, reg, data };
+ struct i2c_msg msg = { .addr = addr, .flags = 0, .buf = buf, .len = 3 };
+
+ dprintk(4, "dev: %p\n", dev);
+
+ if (i2c_transfer(&av7110->i2c_adap, &msg, 1) != 1)
+ return -1;
+ return 0;
+}
+
+static int tuner_write(struct saa7146_dev *dev, u8 addr, u8 data[4])
+{
+ struct av7110 *av7110 = dev->ext_priv;
+ struct i2c_msg msg = { .addr = addr, .flags = 0, .buf = data, .len = 4 };
+
+ dprintk(4, "dev: %p\n", dev);
+
+ if (i2c_transfer(&av7110->i2c_adap, &msg, 1) != 1)
+ return -1;
+ return 0;
+}
+
+static int ves1820_set_tv_freq(struct saa7146_dev *dev, u32 freq)
+{
+ u32 div;
+ u8 config;
+ u8 buf[4];
+
+ dprintk(4, "freq: 0x%08x\n", freq);
+
+ /* magic number: 614. tuning with the frequency given by v4l2
+ * is always off by 614*62.5 = 38375 kHz...
+ */
+ div = freq + 614;
+
+ buf[0] = (div >> 8) & 0x7f;
+ buf[1] = div & 0xff;
+ buf[2] = 0x8e;
+
+ if (freq < 16U * 16825 / 100)
+ config = 0xa0;
+ else if (freq < 16U * 44725 / 100)
+ config = 0x90;
+ else
+ config = 0x30;
+ config &= ~0x02;
+
+ buf[3] = config;
+
+ return tuner_write(dev, 0x61, buf);
+}
+
+static int stv0297_set_tv_freq(struct saa7146_dev *dev, u32 freq)
+{
+ struct av7110 *av7110 = (struct av7110 *)dev->ext_priv;
+ u32 div;
+ u8 data[4];
+
+ div = (freq + 38900000 + 31250) / 62500;
+
+ data[0] = (div >> 8) & 0x7f;
+ data[1] = div & 0xff;
+ data[2] = 0xce;
+
+ if (freq < 45000000)
+ return -EINVAL;
+ else if (freq < 137000000)
+ data[3] = 0x01;
+ else if (freq < 403000000)
+ data[3] = 0x02;
+ else if (freq < 860000000)
+ data[3] = 0x04;
+ else
+ return -EINVAL;
+
+ if (av7110->fe->ops.i2c_gate_ctrl)
+ av7110->fe->ops.i2c_gate_ctrl(av7110->fe, 1);
+ return tuner_write(dev, 0x63, data);
+}
+
+static struct saa7146_standard analog_standard[];
+static struct saa7146_standard dvb_standard[];
+static struct saa7146_standard standard[];
+
+static const struct v4l2_audio msp3400_v4l2_audio = {
+ .index = 0,
+ .name = "Television",
+ .capability = V4L2_AUDCAP_STEREO
+};
+
+static int av7110_dvb_c_switch(struct saa7146_dev *dev)
+{
+ struct av7110 *av7110 = (struct av7110 *)dev->ext_priv;
+ u16 adswitch;
+ int source, sync;
+
+ dprintk(4, "%p\n", av7110);
+
+ if (av7110->current_input != 0) {
+ dprintk(1, "switching to analog TV:\n");
+ adswitch = 1;
+ source = SAA7146_HPS_SOURCE_PORT_B;
+ sync = SAA7146_HPS_SYNC_PORT_B;
+ memcpy(standard, analog_standard, sizeof(struct saa7146_standard) * 2);
+
+ switch (av7110->current_input) {
+ case 1:
+ dprintk(1, "switching SAA7113 to Analog Tuner Input\n");
+ msp_writereg(av7110, MSP_WR_DSP, 0x0008, 0x0000); // loudspeaker source
+ msp_writereg(av7110, MSP_WR_DSP, 0x0009, 0x0000); // headphone source
+ msp_writereg(av7110, MSP_WR_DSP, 0x000a, 0x0000); // SCART 1 source
+ msp_writereg(av7110, MSP_WR_DSP, 0x000e, 0x3000); // FM matrix, mono
+ msp_writereg(av7110, MSP_WR_DSP, 0x0000, 0x4f00); // loudspeaker + headphone
+ msp_writereg(av7110, MSP_WR_DSP, 0x0007, 0x4f00); // SCART 1 volume
+
+ if (av7110->analog_tuner_flags & ANALOG_TUNER_VES1820) {
+ if (ves1820_writereg(dev, 0x09, 0x0f, 0x60))
+ dprintk(1, "setting band in demodulator failed\n");
+ } else if (av7110->analog_tuner_flags & ANALOG_TUNER_STV0297) {
+ saa7146_setgpio(dev, 1, SAA7146_GPIO_OUTHI); // TDA9819 pin9(STD)
+ saa7146_setgpio(dev, 3, SAA7146_GPIO_OUTHI); // TDA9819 pin30(VIF)
+ }
+ if (i2c_writereg(av7110, 0x48, 0x02, 0xd0) != 1)
+ dprintk(1, "saa7113 write failed @ card %d", av7110->dvb_adapter.num);
+ break;
+ case 2:
+ dprintk(1, "switching SAA7113 to Video AV CVBS Input\n");
+ if (i2c_writereg(av7110, 0x48, 0x02, 0xd2) != 1)
+ dprintk(1, "saa7113 write failed @ card %d", av7110->dvb_adapter.num);
+ break;
+ case 3:
+ dprintk(1, "switching SAA7113 to Video AV Y/C Input\n");
+ if (i2c_writereg(av7110, 0x48, 0x02, 0xd9) != 1)
+ dprintk(1, "saa7113 write failed @ card %d", av7110->dvb_adapter.num);
+ break;
+ default:
+ dprintk(1, "switching SAA7113 to Input: AV7110: SAA7113: invalid input\n");
+ }
+ } else {
+ adswitch = 0;
+ source = SAA7146_HPS_SOURCE_PORT_A;
+ sync = SAA7146_HPS_SYNC_PORT_A;
+ memcpy(standard, dvb_standard, sizeof(struct saa7146_standard) * 2);
+ dprintk(1, "switching DVB mode\n");
+ msp_writereg(av7110, MSP_WR_DSP, 0x0008, 0x0220); // loudspeaker source
+ msp_writereg(av7110, MSP_WR_DSP, 0x0009, 0x0220); // headphone source
+ msp_writereg(av7110, MSP_WR_DSP, 0x000a, 0x0220); // SCART 1 source
+ msp_writereg(av7110, MSP_WR_DSP, 0x000e, 0x3000); // FM matrix, mono
+ msp_writereg(av7110, MSP_WR_DSP, 0x0000, 0x7f00); // loudspeaker + headphone
+ msp_writereg(av7110, MSP_WR_DSP, 0x0007, 0x7f00); // SCART 1 volume
+
+ if (av7110->analog_tuner_flags & ANALOG_TUNER_VES1820) {
+ if (ves1820_writereg(dev, 0x09, 0x0f, 0x20))
+ dprintk(1, "setting band in demodulator failed\n");
+ } else if (av7110->analog_tuner_flags & ANALOG_TUNER_STV0297) {
+ saa7146_setgpio(dev, 1, SAA7146_GPIO_OUTLO); // TDA9819 pin9(STD)
+ saa7146_setgpio(dev, 3, SAA7146_GPIO_OUTLO); // TDA9819 pin30(VIF)
+ }
+ }
+
+ /* hmm, this does not do anything!? */
+ if (av7110_fw_cmd(av7110, COMTYPE_AUDIODAC, ADSwitch, 1, adswitch))
+ dprintk(1, "ADSwitch error\n");
+
+ saa7146_set_hps_source_and_sync(dev, source, sync);
+
+ return 0;
+}
+
+static int vidioc_g_tuner(struct file *file, void *fh, struct v4l2_tuner *t)
+{
+ struct saa7146_dev *dev = video_drvdata(file);
+ struct av7110 *av7110 = (struct av7110 *)dev->ext_priv;
+ u16 stereo_det;
+ s8 stereo;
+
+ dprintk(2, "VIDIOC_G_TUNER: %d\n", t->index);
+
+ if (!av7110->analog_tuner_flags || t->index != 0)
+ return -EINVAL;
+
+ memset(t, 0, sizeof(*t));
+ strscpy((char *)t->name, "Television", sizeof(t->name));
+
+ t->type = V4L2_TUNER_ANALOG_TV;
+ t->capability = V4L2_TUNER_CAP_NORM | V4L2_TUNER_CAP_STEREO |
+ V4L2_TUNER_CAP_LANG1 | V4L2_TUNER_CAP_LANG2 | V4L2_TUNER_CAP_SAP;
+ t->rangelow = 772; /* 48.25 MHZ / 62.5 kHz = 772, see fi1216mk2-specs, page 2 */
+ t->rangehigh = 13684; /* 855.25 MHz / 62.5 kHz = 13684 */
+ /* FIXME: add the real signal strength here */
+ t->signal = 0xffff;
+ t->afc = 0;
+
+ /* FIXME: standard / stereo detection is still broken */
+ msp_readreg(av7110, MSP_RD_DEM, 0x007e, &stereo_det);
+ dprintk(1, "VIDIOC_G_TUNER: msp3400 TV standard detection: 0x%04x\n", stereo_det);
+ msp_readreg(av7110, MSP_RD_DSP, 0x0018, &stereo_det);
+ dprintk(1, "VIDIOC_G_TUNER: msp3400 stereo detection: 0x%04x\n", stereo_det);
+ stereo = (s8)(stereo_det >> 8);
+ if (stereo > 0x10) {
+ /* stereo */
+ t->rxsubchans = V4L2_TUNER_SUB_STEREO | V4L2_TUNER_SUB_MONO;
+ t->audmode = V4L2_TUNER_MODE_STEREO;
+ } else if (stereo < -0x10) {
+ /* bilingual */
+ t->rxsubchans = V4L2_TUNER_SUB_LANG1 | V4L2_TUNER_SUB_LANG2;
+ t->audmode = V4L2_TUNER_MODE_LANG1;
+ } else {
+ /* mono */
+ t->rxsubchans = V4L2_TUNER_SUB_MONO;
+ }
+
+ return 0;
+}
+
+static int vidioc_s_tuner(struct file *file, void *fh, const struct v4l2_tuner *t)
+{
+ struct saa7146_dev *dev = video_drvdata(file);
+ struct av7110 *av7110 = (struct av7110 *)dev->ext_priv;
+ u16 fm_matrix, src;
+
+ dprintk(2, "VIDIOC_S_TUNER: %d\n", t->index);
+
+ if (!av7110->analog_tuner_flags || av7110->current_input != 1)
+ return -EINVAL;
+
+ switch (t->audmode) {
+ case V4L2_TUNER_MODE_STEREO:
+ dprintk(2, "VIDIOC_S_TUNER: V4L2_TUNER_MODE_STEREO\n");
+ fm_matrix = 0x3001; /* stereo */
+ src = 0x0020;
+ break;
+ case V4L2_TUNER_MODE_LANG1_LANG2:
+ dprintk(2, "VIDIOC_S_TUNER: V4L2_TUNER_MODE_LANG1_LANG2\n");
+ fm_matrix = 0x3000; /* bilingual */
+ src = 0x0020;
+ break;
+ case V4L2_TUNER_MODE_LANG1:
+ dprintk(2, "VIDIOC_S_TUNER: V4L2_TUNER_MODE_LANG1\n");
+ fm_matrix = 0x3000; /* mono */
+ src = 0x0000;
+ break;
+ case V4L2_TUNER_MODE_LANG2:
+ dprintk(2, "VIDIOC_S_TUNER: V4L2_TUNER_MODE_LANG2\n");
+ fm_matrix = 0x3000; /* mono */
+ src = 0x0010;
+ break;
+ default: /* case V4L2_TUNER_MODE_MONO: */
+ dprintk(2, "VIDIOC_S_TUNER: TDA9840_SET_MONO\n");
+ fm_matrix = 0x3000; /* mono */
+ src = 0x0030;
+ break;
+ }
+ msp_writereg(av7110, MSP_WR_DSP, 0x000e, fm_matrix);
+ msp_writereg(av7110, MSP_WR_DSP, 0x0008, src);
+ msp_writereg(av7110, MSP_WR_DSP, 0x0009, src);
+ msp_writereg(av7110, MSP_WR_DSP, 0x000a, src);
+ return 0;
+}
+
+static int vidioc_g_frequency(struct file *file, void *fh, struct v4l2_frequency *f)
+{
+ struct saa7146_dev *dev = video_drvdata(file);
+ struct av7110 *av7110 = (struct av7110 *)dev->ext_priv;
+
+ dprintk(2, "VIDIOC_G_FREQ: freq:0x%08x\n", f->frequency);
+
+ if (!av7110->analog_tuner_flags || av7110->current_input != 1)
+ return -EINVAL;
+
+ memset(f, 0, sizeof(*f));
+ f->type = V4L2_TUNER_ANALOG_TV;
+ f->frequency = av7110->current_freq;
+ return 0;
+}
+
+static int vidioc_s_frequency(struct file *file, void *fh, const struct v4l2_frequency *f)
+{
+ struct saa7146_dev *dev = video_drvdata(file);
+ struct av7110 *av7110 = (struct av7110 *)dev->ext_priv;
+
+ dprintk(2, "VIDIOC_S_FREQUENCY: freq:0x%08x\n", f->frequency);
+
+ if (!av7110->analog_tuner_flags || av7110->current_input != 1)
+ return -EINVAL;
+
+ if (f->type != V4L2_TUNER_ANALOG_TV)
+ return -EINVAL;
+
+ msp_writereg(av7110, MSP_WR_DSP, 0x0000, 0xffe0); /* fast mute */
+ msp_writereg(av7110, MSP_WR_DSP, 0x0007, 0xffe0);
+
+ /* tune in desired frequency */
+ if (av7110->analog_tuner_flags & ANALOG_TUNER_VES1820)
+ ves1820_set_tv_freq(dev, f->frequency);
+ else if (av7110->analog_tuner_flags & ANALOG_TUNER_STV0297)
+ stv0297_set_tv_freq(dev, f->frequency);
+ av7110->current_freq = f->frequency;
+
+ msp_writereg(av7110, MSP_WR_DSP, 0x0015, 0x003f); /* start stereo detection */
+ msp_writereg(av7110, MSP_WR_DSP, 0x0015, 0x0000);
+ msp_writereg(av7110, MSP_WR_DSP, 0x0000, 0x4f00); /* loudspeaker + headphone */
+ msp_writereg(av7110, MSP_WR_DSP, 0x0007, 0x4f00); /* SCART 1 volume */
+ return 0;
+}
+
+static int vidioc_enum_input(struct file *file, void *fh, struct v4l2_input *i)
+{
+ struct saa7146_dev *dev = video_drvdata(file);
+ struct av7110 *av7110 = (struct av7110 *)dev->ext_priv;
+
+ dprintk(2, "VIDIOC_ENUMINPUT: %d\n", i->index);
+
+ if (av7110->analog_tuner_flags) {
+ if (i->index >= 4)
+ return -EINVAL;
+ } else {
+ if (i->index != 0)
+ return -EINVAL;
+ }
+
+ memcpy(i, &inputs[i->index], sizeof(struct v4l2_input));
+
+ return 0;
+}
+
+static int vidioc_g_input(struct file *file, void *fh, unsigned int *input)
+{
+ struct saa7146_dev *dev = video_drvdata(file);
+ struct av7110 *av7110 = (struct av7110 *)dev->ext_priv;
+
+ *input = av7110->current_input;
+ dprintk(2, "VIDIOC_G_INPUT: %d\n", *input);
+ return 0;
+}
+
+static int vidioc_s_input(struct file *file, void *fh, unsigned int input)
+{
+ struct saa7146_dev *dev = video_drvdata(file);
+ struct av7110 *av7110 = (struct av7110 *)dev->ext_priv;
+
+ dprintk(2, "VIDIOC_S_INPUT: %d\n", input);
+
+ if (!av7110->analog_tuner_flags)
+ return input ? -EINVAL : 0;
+
+ if (input >= 4)
+ return -EINVAL;
+
+ av7110->current_input = input;
+ return av7110_dvb_c_switch(dev);
+}
+
+static int vidioc_enum_output(struct file *file, void *fh, struct v4l2_output *o)
+{
+ if (o->index)
+ return -EINVAL;
+ strscpy(o->name, "Video Output", sizeof(o->name));
+ o->type = V4L2_OUTPUT_TYPE_ANALOG;
+ o->std = V4L2_STD_NTSC_M | V4L2_STD_PAL_BG;
+ o->capabilities = V4L2_OUT_CAP_STD;
+ return 0;
+}
+
+static int vidioc_g_output(struct file *file, void *fh, unsigned int *output)
+{
+ *output = 0;
+ return 0;
+}
+
+static int vidioc_s_output(struct file *file, void *fh, unsigned int output)
+{
+ return output ? -EINVAL : 0;
+}
+
+static int vidioc_enumaudio(struct file *file, void *fh, struct v4l2_audio *a)
+{
+ dprintk(2, "VIDIOC_G_AUDIO: %d\n", a->index);
+ if (a->index != 0)
+ return -EINVAL;
+ *a = msp3400_v4l2_audio;
+ return 0;
+}
+
+static int vidioc_g_audio(struct file *file, void *fh, struct v4l2_audio *a)
+{
+ struct saa7146_dev *dev = video_drvdata(file);
+ struct av7110 *av7110 = (struct av7110 *)dev->ext_priv;
+
+ dprintk(2, "VIDIOC_G_AUDIO: %d\n", a->index);
+ if (a->index != 0)
+ return -EINVAL;
+ if (av7110->current_input >= 2)
+ return -EINVAL;
+ *a = msp3400_v4l2_audio;
+ return 0;
+}
+
+static int vidioc_s_audio(struct file *file, void *fh, const struct v4l2_audio *a)
+{
+ struct saa7146_dev *dev = video_drvdata(file);
+ struct av7110 *av7110 = (struct av7110 *)dev->ext_priv;
+
+ dprintk(2, "VIDIOC_S_AUDIO: %d\n", a->index);
+ if (av7110->current_input >= 2)
+ return -EINVAL;
+ return a->index ? -EINVAL : 0;
+}
+
+static int vidioc_g_sliced_vbi_cap(struct file *file, void *fh,
+ struct v4l2_sliced_vbi_cap *cap)
+{
+ struct saa7146_dev *dev = video_drvdata(file);
+ struct av7110 *av7110 = (struct av7110 *)dev->ext_priv;
+
+ dprintk(2, "VIDIOC_G_SLICED_VBI_CAP\n");
+ if (cap->type != V4L2_BUF_TYPE_SLICED_VBI_OUTPUT)
+ return -EINVAL;
+ if (FW_VERSION(av7110->arm_app) >= 0x2623) {
+ cap->service_set = V4L2_SLICED_WSS_625;
+ cap->service_lines[0][23] = V4L2_SLICED_WSS_625;
+ }
+ return 0;
+}
+
+static int vidioc_g_fmt_sliced_vbi_out(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ struct saa7146_dev *dev = video_drvdata(file);
+ struct av7110 *av7110 = (struct av7110 *)dev->ext_priv;
+
+ dprintk(2, "VIDIOC_G_FMT:\n");
+ if (FW_VERSION(av7110->arm_app) < 0x2623)
+ return -EINVAL;
+ memset(&f->fmt.sliced, 0, sizeof(f->fmt.sliced));
+ if (av7110->wssMode) {
+ f->fmt.sliced.service_set = V4L2_SLICED_WSS_625;
+ f->fmt.sliced.service_lines[0][23] = V4L2_SLICED_WSS_625;
+ }
+ f->fmt.sliced.io_size = sizeof(struct v4l2_sliced_vbi_data);
+ return 0;
+}
+
+static int vidioc_try_fmt_sliced_vbi_out(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ struct saa7146_dev *dev = video_drvdata(file);
+ struct av7110 *av7110 = (struct av7110 *)dev->ext_priv;
+ bool want_wss = (f->fmt.sliced.service_set & V4L2_SLICED_WSS_625) ||
+ (!f->fmt.sliced.service_set &&
+ f->fmt.sliced.service_lines[0][23] == V4L2_SLICED_WSS_625);
+
+ dprintk(2, "VIDIOC_G_FMT:\n");
+ if (FW_VERSION(av7110->arm_app) < 0x2623)
+ return -EINVAL;
+ memset(&f->fmt.sliced, 0, sizeof(f->fmt.sliced));
+ if (want_wss) {
+ f->fmt.sliced.service_set = V4L2_SLICED_WSS_625;
+ f->fmt.sliced.service_lines[0][23] = V4L2_SLICED_WSS_625;
+ }
+ f->fmt.sliced.io_size = sizeof(struct v4l2_sliced_vbi_data);
+ return 0;
+}
+
+static int vidioc_s_fmt_sliced_vbi_out(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ struct saa7146_dev *dev = video_drvdata(file);
+ struct av7110 *av7110 = (struct av7110 *)dev->ext_priv;
+
+ dprintk(2, "VIDIOC_S_FMT\n");
+ if (vidioc_try_fmt_sliced_vbi_out(file, fh, f))
+ return -EINVAL;
+ if (f->fmt.sliced.service_set & V4L2_SLICED_WSS_625) {
+ /* WSS controlled by userspace */
+ av7110->wssMode = 1;
+ av7110->wssData = 0;
+ } else {
+ /* WSS controlled by firmware */
+ av7110->wssMode = 0;
+ av7110->wssData = 0;
+ return av7110_fw_cmd(av7110, COMTYPE_ENCODER,
+ SetWSSConfig, 1, 0);
+ }
+ return 0;
+}
+
+static ssize_t av7110_vbi_write(struct file *file, const char __user *data, size_t count, loff_t *ppos)
+{
+ struct saa7146_dev *dev = video_drvdata(file);
+ struct av7110 *av7110 = (struct av7110 *)dev->ext_priv;
+ struct v4l2_sliced_vbi_data d;
+ int rc;
+
+ dprintk(2, "\n");
+ if (FW_VERSION(av7110->arm_app) < 0x2623 || !av7110->wssMode || count != sizeof(d))
+ return -EINVAL;
+ if (copy_from_user(&d, data, count))
+ return -EFAULT;
+ if ((d.id != 0 && d.id != V4L2_SLICED_WSS_625) || d.field != 0 || d.line != 23)
+ return -EINVAL;
+ if (d.id)
+ av7110->wssData = ((d.data[1] << 8) & 0x3f00) | d.data[0];
+ else
+ av7110->wssData = 0x8000;
+ rc = av7110_fw_cmd(av7110, COMTYPE_ENCODER, SetWSSConfig, 2, 1, av7110->wssData);
+ return (rc < 0) ? rc : count;
+}
+
+/****************************************************************************
+ * INITIALIZATION
+ ****************************************************************************/
+
+static u8 saa7113_init_regs[] = {
+ 0x02, 0xd0,
+ 0x03, 0x23,
+ 0x04, 0x00,
+ 0x05, 0x00,
+ 0x06, 0xe9,
+ 0x07, 0x0d,
+ 0x08, 0x98,
+ 0x09, 0x02,
+ 0x0a, 0x80,
+ 0x0b, 0x40,
+ 0x0c, 0x40,
+ 0x0d, 0x00,
+ 0x0e, 0x01,
+ 0x0f, 0x7c,
+ 0x10, 0x48,
+ 0x11, 0x0c,
+ 0x12, 0x8b,
+ 0x13, 0x1a,
+ 0x14, 0x00,
+ 0x15, 0x00,
+ 0x16, 0x00,
+ 0x17, 0x00,
+ 0x18, 0x00,
+ 0x19, 0x00,
+ 0x1a, 0x00,
+ 0x1b, 0x00,
+ 0x1c, 0x00,
+ 0x1d, 0x00,
+ 0x1e, 0x00,
+
+ 0x41, 0x77,
+ 0x42, 0x77,
+ 0x43, 0x77,
+ 0x44, 0x77,
+ 0x45, 0x77,
+ 0x46, 0x77,
+ 0x47, 0x77,
+ 0x48, 0x77,
+ 0x49, 0x77,
+ 0x4a, 0x77,
+ 0x4b, 0x77,
+ 0x4c, 0x77,
+ 0x4d, 0x77,
+ 0x4e, 0x77,
+ 0x4f, 0x77,
+ 0x50, 0x77,
+ 0x51, 0x77,
+ 0x52, 0x77,
+ 0x53, 0x77,
+ 0x54, 0x77,
+ 0x55, 0x77,
+ 0x56, 0x77,
+ 0x57, 0xff,
+
+ 0xff
+};
+
+static struct saa7146_ext_vv av7110_vv_data_st;
+static struct saa7146_ext_vv av7110_vv_data_c;
+
+int av7110_init_analog_module(struct av7110 *av7110)
+{
+ u16 version1, version2;
+
+ if (i2c_writereg(av7110, 0x80, 0x0, 0x80) == 1 &&
+ i2c_writereg(av7110, 0x80, 0x0, 0) == 1) {
+ pr_info("DVB-C analog module @ card %d detected, initializing MSP3400\n",
+ av7110->dvb_adapter.num);
+ av7110->adac_type = DVB_ADAC_MSP34x0;
+ } else if (i2c_writereg(av7110, 0x84, 0x0, 0x80) == 1 &&
+ i2c_writereg(av7110, 0x84, 0x0, 0) == 1) {
+ pr_info("DVB-C analog module @ card %d detected, initializing MSP3415\n",
+ av7110->dvb_adapter.num);
+ av7110->adac_type = DVB_ADAC_MSP34x5;
+ } else {
+ return -ENODEV;
+ }
+
+ msleep(100); // the probing above resets the msp...
+ msp_readreg(av7110, MSP_RD_DSP, 0x001e, &version1);
+ msp_readreg(av7110, MSP_RD_DSP, 0x001f, &version2);
+ dprintk(1, "@ card %d MSP34xx version 0x%04x 0x%04x\n",
+ av7110->dvb_adapter.num, version1, version2);
+ msp_writereg(av7110, MSP_WR_DSP, 0x0013, 0x0c00);
+ msp_writereg(av7110, MSP_WR_DSP, 0x0000, 0x7f00); // loudspeaker + headphone
+ msp_writereg(av7110, MSP_WR_DSP, 0x0008, 0x0220); // loudspeaker source
+ msp_writereg(av7110, MSP_WR_DSP, 0x0009, 0x0220); // headphone source
+ msp_writereg(av7110, MSP_WR_DSP, 0x0004, 0x7f00); // loudspeaker volume
+ msp_writereg(av7110, MSP_WR_DSP, 0x000a, 0x0220); // SCART 1 source
+ msp_writereg(av7110, MSP_WR_DSP, 0x0007, 0x7f00); // SCART 1 volume
+ msp_writereg(av7110, MSP_WR_DSP, 0x000d, 0x1900); // prescale SCART
+
+ if (i2c_writereg(av7110, 0x48, 0x01, 0x00) != 1) {
+ pr_info("saa7113 not accessible\n");
+ } else {
+ u8 *i = saa7113_init_regs;
+
+ if ((av7110->dev->pci->subsystem_vendor == 0x110a) &&
+ (av7110->dev->pci->subsystem_device == 0x0000)) {
+ /* Fujitsu/Siemens DVB-Cable */
+ av7110->analog_tuner_flags |= ANALOG_TUNER_VES1820;
+ } else if ((av7110->dev->pci->subsystem_vendor == 0x13c2) &&
+ (av7110->dev->pci->subsystem_device == 0x0002)) {
+ /* Hauppauge/TT DVB-C premium */
+ av7110->analog_tuner_flags |= ANALOG_TUNER_VES1820;
+ } else if ((av7110->dev->pci->subsystem_vendor == 0x13c2) &&
+ (av7110->dev->pci->subsystem_device == 0x000A)) {
+ /* Hauppauge/TT DVB-C premium */
+ av7110->analog_tuner_flags |= ANALOG_TUNER_STV0297;
+ }
+
+ /* setup for DVB by default */
+ if (av7110->analog_tuner_flags & ANALOG_TUNER_VES1820) {
+ if (ves1820_writereg(av7110->dev, 0x09, 0x0f, 0x20))
+ dprintk(1, "setting band in demodulator failed\n");
+ } else if (av7110->analog_tuner_flags & ANALOG_TUNER_STV0297) {
+ saa7146_setgpio(av7110->dev, 1, SAA7146_GPIO_OUTLO); // TDA9819 pin9(STD)
+ saa7146_setgpio(av7110->dev, 3, SAA7146_GPIO_OUTLO); // TDA9819 pin30(VIF)
+ }
+
+ /* init the saa7113 */
+ while (*i != 0xff) {
+ if (i2c_writereg(av7110, 0x48, i[0], i[1]) != 1) {
+ dprintk(1, "saa7113 initialization failed @ card %d", av7110->dvb_adapter.num);
+ break;
+ }
+ i += 2;
+ }
+ /* setup msp for analog sound: B/G Dual-FM */
+ msp_writereg(av7110, MSP_WR_DEM, 0x00bb, 0x02d0); // AD_CV
+ msp_writereg(av7110, MSP_WR_DEM, 0x0001, 3); // FIR1
+ msp_writereg(av7110, MSP_WR_DEM, 0x0001, 18); // FIR1
+ msp_writereg(av7110, MSP_WR_DEM, 0x0001, 27); // FIR1
+ msp_writereg(av7110, MSP_WR_DEM, 0x0001, 48); // FIR1
+ msp_writereg(av7110, MSP_WR_DEM, 0x0001, 66); // FIR1
+ msp_writereg(av7110, MSP_WR_DEM, 0x0001, 72); // FIR1
+ msp_writereg(av7110, MSP_WR_DEM, 0x0005, 4); // FIR2
+ msp_writereg(av7110, MSP_WR_DEM, 0x0005, 64); // FIR2
+ msp_writereg(av7110, MSP_WR_DEM, 0x0005, 0); // FIR2
+ msp_writereg(av7110, MSP_WR_DEM, 0x0005, 3); // FIR2
+ msp_writereg(av7110, MSP_WR_DEM, 0x0005, 18); // FIR2
+ msp_writereg(av7110, MSP_WR_DEM, 0x0005, 27); // FIR2
+ msp_writereg(av7110, MSP_WR_DEM, 0x0005, 48); // FIR2
+ msp_writereg(av7110, MSP_WR_DEM, 0x0005, 66); // FIR2
+ msp_writereg(av7110, MSP_WR_DEM, 0x0005, 72); // FIR2
+ msp_writereg(av7110, MSP_WR_DEM, 0x0083, 0xa000); // MODE_REG
+ msp_writereg(av7110, MSP_WR_DEM, 0x0093, 0x00aa); // DCO1_LO 5.74MHz
+ msp_writereg(av7110, MSP_WR_DEM, 0x009b, 0x04fc); // DCO1_HI
+ msp_writereg(av7110, MSP_WR_DEM, 0x00a3, 0x038e); // DCO2_LO 5.5MHz
+ msp_writereg(av7110, MSP_WR_DEM, 0x00ab, 0x04c6); // DCO2_HI
+ msp_writereg(av7110, MSP_WR_DEM, 0x0056, 0); // LOAD_REG 1/2
+ }
+
+ memcpy(standard, dvb_standard, sizeof(struct saa7146_standard) * 2);
+ /* set dd1 stream a & b */
+ saa7146_write(av7110->dev, DD1_STREAM_B, 0x00000000);
+ saa7146_write(av7110->dev, DD1_INIT, 0x03000700);
+ saa7146_write(av7110->dev, MC2, (MASK_09 | MASK_25 | MASK_10 | MASK_26));
+
+ return 0;
+}
+
+int av7110_init_v4l(struct av7110 *av7110)
+{
+ struct saa7146_dev *dev = av7110->dev;
+ struct saa7146_ext_vv *vv_data;
+ int ret;
+
+ /* special case DVB-C: these cards have an analog tuner
+ * plus need some special handling, so we have separate
+ * saa7146_ext_vv data for these...
+ */
+ if (av7110->analog_tuner_flags)
+ vv_data = &av7110_vv_data_c;
+ else
+ vv_data = &av7110_vv_data_st;
+ ret = saa7146_vv_init(dev, vv_data);
+
+ if (ret) {
+ ERR("cannot init capture device. skipping\n");
+ return -ENODEV;
+ }
+ vv_data->vid_ops.vidioc_enum_input = vidioc_enum_input;
+ vv_data->vid_ops.vidioc_g_input = vidioc_g_input;
+ vv_data->vid_ops.vidioc_s_input = vidioc_s_input;
+ vv_data->vid_ops.vidioc_g_tuner = vidioc_g_tuner;
+ vv_data->vid_ops.vidioc_s_tuner = vidioc_s_tuner;
+ vv_data->vid_ops.vidioc_g_frequency = vidioc_g_frequency;
+ vv_data->vid_ops.vidioc_s_frequency = vidioc_s_frequency;
+ vv_data->vid_ops.vidioc_enumaudio = vidioc_enumaudio;
+ vv_data->vid_ops.vidioc_g_audio = vidioc_g_audio;
+ vv_data->vid_ops.vidioc_s_audio = vidioc_s_audio;
+ vv_data->vid_ops.vidioc_g_fmt_vbi_cap = NULL;
+
+ vv_data->vbi_ops.vidioc_enum_output = vidioc_enum_output;
+ vv_data->vbi_ops.vidioc_g_output = vidioc_g_output;
+ vv_data->vbi_ops.vidioc_s_output = vidioc_s_output;
+ vv_data->vbi_ops.vidioc_g_parm = NULL;
+ vv_data->vbi_ops.vidioc_g_fmt_vbi_cap = NULL;
+ vv_data->vbi_ops.vidioc_try_fmt_vbi_cap = NULL;
+ vv_data->vbi_ops.vidioc_s_fmt_vbi_cap = NULL;
+ vv_data->vbi_ops.vidioc_g_sliced_vbi_cap = vidioc_g_sliced_vbi_cap;
+ vv_data->vbi_ops.vidioc_g_fmt_sliced_vbi_out = vidioc_g_fmt_sliced_vbi_out;
+ vv_data->vbi_ops.vidioc_try_fmt_sliced_vbi_out = vidioc_try_fmt_sliced_vbi_out;
+ vv_data->vbi_ops.vidioc_s_fmt_sliced_vbi_out = vidioc_s_fmt_sliced_vbi_out;
+
+ if (FW_VERSION(av7110->arm_app) < 0x2623)
+ vv_data->capabilities &= ~V4L2_CAP_SLICED_VBI_OUTPUT;
+
+ if (saa7146_register_device(&av7110->v4l_dev, dev, "av7110", VFL_TYPE_VIDEO)) {
+ ERR("cannot register capture device. skipping\n");
+ saa7146_vv_release(dev);
+ return -ENODEV;
+ }
+ if (FW_VERSION(av7110->arm_app) >= 0x2623) {
+ if (saa7146_register_device(&av7110->vbi_dev, dev, "av7110", VFL_TYPE_VBI))
+ ERR("cannot register vbi v4l2 device. skipping\n");
+ }
+ return 0;
+}
+
+int av7110_exit_v4l(struct av7110 *av7110)
+{
+ struct saa7146_dev *dev = av7110->dev;
+
+ saa7146_unregister_device(&av7110->v4l_dev, av7110->dev);
+ saa7146_unregister_device(&av7110->vbi_dev, av7110->dev);
+
+ saa7146_vv_release(dev);
+
+ return 0;
+}
+
+/* FIXME: these values are experimental values that look better than the
+ * values from the latest "official" driver -- at least for me... (MiHu)
+ */
+static struct saa7146_standard standard[] = {
+ {
+ .name = "PAL", .id = V4L2_STD_PAL_BG,
+ .v_offset = 0x15, .v_field = 288,
+ .h_offset = 0x48, .h_pixels = 708,
+ .v_max_out = 576, .h_max_out = 768,
+ }, {
+ .name = "NTSC", .id = V4L2_STD_NTSC_M,
+ .v_offset = 0x10, .v_field = 244,
+ .h_offset = 0x40, .h_pixels = 708,
+ .v_max_out = 480, .h_max_out = 640,
+ }
+};
+
+static struct saa7146_standard analog_standard[] = {
+ {
+ .name = "PAL", .id = V4L2_STD_PAL_BG,
+ .v_offset = 0x1b, .v_field = 288,
+ .h_offset = 0x08, .h_pixels = 708,
+ .v_max_out = 576, .h_max_out = 768,
+ }, {
+ .name = "NTSC", .id = V4L2_STD_NTSC_M,
+ .v_offset = 0x10, .v_field = 244,
+ .h_offset = 0x40, .h_pixels = 708,
+ .v_max_out = 480, .h_max_out = 640,
+ }
+};
+
+static struct saa7146_standard dvb_standard[] = {
+ {
+ .name = "PAL", .id = V4L2_STD_PAL_BG,
+ .v_offset = 0x14, .v_field = 288,
+ .h_offset = 0x48, .h_pixels = 708,
+ .v_max_out = 576, .h_max_out = 768,
+ }, {
+ .name = "NTSC", .id = V4L2_STD_NTSC_M,
+ .v_offset = 0x10, .v_field = 244,
+ .h_offset = 0x40, .h_pixels = 708,
+ .v_max_out = 480, .h_max_out = 640,
+ }
+};
+
+static int std_callback(struct saa7146_dev *dev, struct saa7146_standard *std)
+{
+ struct av7110 *av7110 = (struct av7110 *)dev->ext_priv;
+
+ if (std->id & V4L2_STD_PAL) {
+ av7110->vidmode = AV7110_VIDEO_MODE_PAL;
+ av7110_set_vidmode(av7110, av7110->vidmode);
+ } else if (std->id & V4L2_STD_NTSC) {
+ av7110->vidmode = AV7110_VIDEO_MODE_NTSC;
+ av7110_set_vidmode(av7110, av7110->vidmode);
+ } else {
+ return -1;
+ }
+
+ return 0;
+}
+
+static struct saa7146_ext_vv av7110_vv_data_st = {
+ .inputs = 1,
+ .audios = 1,
+ .capabilities = V4L2_CAP_SLICED_VBI_OUTPUT | V4L2_CAP_AUDIO,
+ .flags = 0,
+
+ .stds = &standard[0],
+ .num_stds = ARRAY_SIZE(standard),
+ .std_callback = &std_callback,
+
+ .vbi_write = av7110_vbi_write,
+};
+
+static struct saa7146_ext_vv av7110_vv_data_c = {
+ .inputs = 1,
+ .audios = 1,
+ .capabilities = V4L2_CAP_TUNER | V4L2_CAP_SLICED_VBI_OUTPUT | V4L2_CAP_AUDIO,
+ .flags = SAA7146_USE_PORT_B_FOR_VBI,
+
+ .stds = &standard[0],
+ .num_stds = ARRAY_SIZE(standard),
+ .std_callback = &std_callback,
+
+ .vbi_write = av7110_vbi_write,
+};
+
diff --git a/drivers/staging/media/av7110/dvb_filter.c b/drivers/staging/media/av7110/dvb_filter.c
new file mode 100644
index 000000000000..9eafbb82bf42
--- /dev/null
+++ b/drivers/staging/media/av7110/dvb_filter.c
@@ -0,0 +1,115 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/string.h>
+#include "dvb_filter.h"
+
+static u32 freq[4] = {480, 441, 320, 0};
+
+static unsigned int ac3_bitrates[32] = {
+ 32, 40, 48, 56, 64, 80, 96, 112, 128, 160, 192, 224, 256, 320, 384, 448, 512, 576, 640,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static u32 ac3_frames[3][32] = {
+ {64, 80, 96, 112, 128, 160, 192, 224, 256, 320, 384, 448, 512, 640, 768, 896, 1024,
+ 1152, 1280, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
+ {69, 87, 104, 121, 139, 174, 208, 243, 278, 348, 417, 487, 557, 696, 835, 975, 1114,
+ 1253, 1393, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
+ {96, 120, 144, 168, 192, 240, 288, 336, 384, 480, 576, 672, 768, 960, 1152, 1344,
+ 1536, 1728, 1920, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
+};
+
+int dvb_filter_get_ac3info(u8 *mbuf, int count, struct dvb_audio_info *ai, int pr)
+{
+ u8 *headr;
+ int found = 0;
+ int c = 0;
+ u8 frame = 0;
+ int fr = 0;
+
+ while (!found && c < count) {
+ u8 *b = mbuf + c;
+
+ if (b[0] == 0x0b && b[1] == 0x77)
+ found = 1;
+ else
+ c++;
+ }
+
+ if (!found)
+ return -1;
+
+ ai->off = c;
+ if (c + 5 >= count)
+ return -1;
+
+ ai->layer = 0; // 0 for AC3
+ headr = mbuf + c + 2;
+
+ frame = (headr[2] & 0x3f);
+ ai->bit_rate = ac3_bitrates[frame >> 1] * 1000;
+
+ ai->frequency = (headr[2] & 0xc0) >> 6;
+ fr = (headr[2] & 0xc0) >> 6;
+ ai->frequency = freq[fr] * 100;
+
+ ai->framesize = ac3_frames[fr][frame >> 1];
+ if ((frame & 1) && (fr == 1))
+ ai->framesize++;
+ ai->framesize = ai->framesize << 1;
+
+ if (pr)
+ pr_info("Audiostream: AC3, BRate: %d kb/s, Freq: %d Hz, Framesize %d\n",
+ (int)ai->bit_rate / 1000, (int)ai->frequency, (int)ai->framesize);
+
+ return 0;
+}
+
+void dvb_filter_pes2ts_init(struct dvb_filter_pes2ts *p2ts, unsigned short pid,
+ dvb_filter_pes2ts_cb_t *cb, void *priv)
+{
+ unsigned char *buf = p2ts->buf;
+
+ buf[0] = 0x47;
+ buf[1] = (pid >> 8);
+ buf[2] = pid & 0xff;
+ p2ts->cc = 0;
+ p2ts->cb = cb;
+ p2ts->priv = priv;
+}
+
+int dvb_filter_pes2ts(struct dvb_filter_pes2ts *p2ts, unsigned char *pes,
+ int len, int payload_start)
+{
+ unsigned char *buf = p2ts->buf;
+ int ret = 0, rest;
+
+ //len=6+((pes[4]<<8)|pes[5]);
+
+ if (payload_start)
+ buf[1] |= 0x40;
+ else
+ buf[1] &= ~0x40;
+ while (len >= 184) {
+ buf[3] = 0x10 | ((p2ts->cc++) & 0x0f);
+ memcpy(buf + 4, pes, 184);
+ ret = p2ts->cb(p2ts->priv, buf);
+ if (ret)
+ return ret;
+ len -= 184; pes += 184;
+ buf[1] &= ~0x40;
+ }
+ if (!len)
+ return 0;
+ buf[3] = 0x30 | ((p2ts->cc++) & 0x0f);
+ rest = 183 - len;
+ if (rest) {
+ buf[5] = 0x00;
+ if (rest - 1)
+ memset(buf + 6, 0xff, rest - 1);
+ }
+ buf[4] = rest;
+ memcpy(buf + 5 + rest, pes, len);
+ return p2ts->cb(p2ts->priv, buf);
+}
diff --git a/drivers/staging/media/av7110/dvb_filter.h b/drivers/staging/media/av7110/dvb_filter.h
new file mode 100644
index 000000000000..38b483508e07
--- /dev/null
+++ b/drivers/staging/media/av7110/dvb_filter.h
@@ -0,0 +1,238 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2003 Convergence GmbH
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public License
+ * as published by the Free Software Foundation; either version 2.1
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DVB_FILTER_H_
+#define _DVB_FILTER_H_
+
+#include <linux/slab.h>
+
+#include <media/demux.h>
+
+typedef int (dvb_filter_pes2ts_cb_t)(void *, unsigned char *);
+
+struct dvb_filter_pes2ts {
+ unsigned char buf[188];
+ unsigned char cc;
+ dvb_filter_pes2ts_cb_t *cb;
+ void *priv;
+};
+
+void dvb_filter_pes2ts_init(struct dvb_filter_pes2ts *p2ts, unsigned short pid,
+ dvb_filter_pes2ts_cb_t *cb, void *priv);
+
+int dvb_filter_pes2ts(struct dvb_filter_pes2ts *p2ts, unsigned char *pes,
+ int len, int payload_start);
+
+#define PROG_STREAM_MAP 0xBC
+#define PRIVATE_STREAM1 0xBD
+#define PADDING_STREAM 0xBE
+#define PRIVATE_STREAM2 0xBF
+#define AUDIO_STREAM_S 0xC0
+#define AUDIO_STREAM_E 0xDF
+#define VIDEO_STREAM_S 0xE0
+#define VIDEO_STREAM_E 0xEF
+#define ECM_STREAM 0xF0
+#define EMM_STREAM 0xF1
+#define DSM_CC_STREAM 0xF2
+#define ISO13522_STREAM 0xF3
+#define PROG_STREAM_DIR 0xFF
+
+#define DVB_PICTURE_START 0x00
+#define DVB_USER_START 0xb2
+#define DVB_SEQUENCE_HEADER 0xb3
+#define DVB_SEQUENCE_ERROR 0xb4
+#define DVB_EXTENSION_START 0xb5
+#define DVB_SEQUENCE_END 0xb7
+#define DVB_GOP_START 0xb8
+#define DVB_EXCEPT_SLICE 0xb0
+
+#define SEQUENCE_EXTENSION 0x01
+#define SEQUENCE_DISPLAY_EXTENSION 0x02
+#define PICTURE_CODING_EXTENSION 0x08
+#define QUANT_MATRIX_EXTENSION 0x03
+#define PICTURE_DISPLAY_EXTENSION 0x07
+
+#define I_FRAME 0x01
+#define B_FRAME 0x02
+#define P_FRAME 0x03
+
+/* Initialize sequence_data */
+#define INIT_HORIZONTAL_SIZE 720
+#define INIT_VERTICAL_SIZE 576
+#define INIT_ASPECT_RATIO 0x02
+#define INIT_FRAME_RATE 0x03
+#define INIT_DISP_HORIZONTAL_SIZE 540
+#define INIT_DISP_VERTICAL_SIZE 576
+
+//flags2
+#define PTS_DTS_FLAGS 0xC0
+#define ESCR_FLAG 0x20
+#define ES_RATE_FLAG 0x10
+#define DSM_TRICK_FLAG 0x08
+#define ADD_CPY_FLAG 0x04
+#define PES_CRC_FLAG 0x02
+#define PES_EXT_FLAG 0x01
+
+//pts_dts flags
+#define PTS_ONLY 0x80
+#define PTS_DTS 0xC0
+
+#define TS_SIZE 188
+#define TRANS_ERROR 0x80
+#define PAY_START 0x40
+#define TRANS_PRIO 0x20
+#define PID_MASK_HI 0x1F
+//flags
+#define TRANS_SCRMBL1 0x80
+#define TRANS_SCRMBL2 0x40
+#define ADAPT_FIELD 0x20
+#define PAYLOAD 0x10
+#define COUNT_MASK 0x0F
+
+// adaptation flags
+#define DISCON_IND 0x80
+#define RAND_ACC_IND 0x40
+#define ES_PRI_IND 0x20
+#define PCR_FLAG 0x10
+#define OPCR_FLAG 0x08
+#define SPLICE_FLAG 0x04
+#define TRANS_PRIV 0x02
+#define ADAP_EXT_FLAG 0x01
+
+// adaptation extension flags
+#define LTW_FLAG 0x80
+#define PIECE_RATE 0x40
+#define SEAM_SPLICE 0x20
+
+#define MAX_PLENGTH 0xFFFF
+#define MMAX_PLENGTH (256 * MAX_PLENGTH)
+
+#ifndef IPACKS
+#define IPACKS 2048
+#endif
+
+struct ipack {
+ int size;
+ int found;
+ u8 *buf;
+ u8 cid;
+ u32 plength;
+ u8 plen[2];
+ u8 flag1;
+ u8 flag2;
+ u8 hlength;
+ u8 pts[5];
+ u16 *pid;
+ int mpeg;
+ u8 check;
+ int which;
+ int done;
+ void *data;
+ void (*func)(u8 *buf, int size, void *priv);
+ int count;
+ int repack_subids;
+};
+
+struct dvb_video_info {
+ u32 horizontal_size;
+ u32 vertical_size;
+ u32 aspect_ratio;
+ u32 framerate;
+ u32 video_format;
+ u32 bit_rate;
+ u32 comp_bit_rate;
+ u32 vbv_buffer_size;
+ s16 vbv_delay;
+ u32 CSPF;
+ u32 off;
+};
+
+#define OFF_SIZE 4
+#define FIRST_FIELD 0
+#define SECOND_FIELD 1
+#define VIDEO_FRAME_PICTURE 0x03
+
+struct mpg_picture {
+ int channel;
+ struct dvb_video_info vinfo;
+ u32 *sequence_gop_header;
+ u32 *picture_header;
+ s32 time_code;
+ int low_delay;
+ int closed_gop;
+ int broken_link;
+ int sequence_header_flag;
+ int gop_flag;
+ int sequence_end_flag;
+
+ u8 profile_and_level;
+ s32 picture_coding_parameter;
+ u32 matrix[32];
+ s8 matrix_change_flag;
+
+ u8 picture_header_parameter;
+ /* bit 0 - 2: bwd f code
+ * bit 3 : fpb vector
+ * bit 4 - 6: fwd f code
+ * bit 7 : fpf vector
+ */
+
+ int mpeg1_flag;
+ int progressive_sequence;
+ int sequence_display_extension_flag;
+ u32 sequence_header_data;
+ s16 last_frame_centre_horizontal_offset;
+ s16 last_frame_centre_vertical_offset;
+
+ u32 pts[2]; /* [0] 1st field, [1] 2nd field */
+ int top_field_first;
+ int repeat_first_field;
+ int progressive_frame;
+ int bank;
+ int forward_bank;
+ int backward_bank;
+ int compress;
+ s16 frame_centre_horizontal_offset[OFF_SIZE];
+ /* [0-2] 1st field, [3] 2nd field */
+ s16 frame_centre_vertical_offset[OFF_SIZE];
+ /* [0-2] 1st field, [3] 2nd field */
+ s16 temporal_reference[2];
+ /* [0] 1st field, [1] 2nd field */
+
+ s8 picture_coding_type[2];
+ /* [0] 1st field, [1] 2nd field */
+ s8 picture_structure[2];
+ /* [0] 1st field, [1] 2nd field */
+ s8 picture_display_extension_flag[2];
+ /* [0] 1st field, [1] 2nd field */
+ /* picture_display_extenion() 0:no 1:exit*/
+ s8 pts_flag[2];
+ /* [0] 1st field, [1] 2nd field */
+};
+
+struct dvb_audio_info {
+ int layer;
+ u32 bit_rate;
+ u32 frequency;
+ u32 mode;
+ u32 mode_extension;
+ u32 emphasis;
+ u32 framesize;
+ u32 off;
+};
+
+int dvb_filter_get_ac3info(u8 *mbuf, int count, struct dvb_audio_info *ai, int pr);
+
+#endif
diff --git a/drivers/staging/media/av7110/sp8870.c b/drivers/staging/media/av7110/sp8870.c
new file mode 100644
index 000000000000..0c813860f5b2
--- /dev/null
+++ b/drivers/staging/media/av7110/sp8870.c
@@ -0,0 +1,625 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Driver for Spase SP8870 demodulator
+.*
+ * Copyright (C) 1999 Juergen Peitz
+ */
+
+/*
+ * This driver needs external firmware. Please use the command
+ * "<kerneldir>/scripts/get_dvb_firmware alps_tdlb7" to
+ * download/extract it, and then copy it to /usr/lib/hotplug/firmware
+ * or /lib/firmware (depending on configuration of firmware hotplug).
+ */
+
+#ifdef pr_fmt
+#undef pr_fmt
+#endif
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#define SP8870_DEFAULT_FIRMWARE "dvb-fe-sp8870.fw"
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/firmware.h>
+#include <linux/delay.h>
+#include <linux/string.h>
+#include <linux/slab.h>
+
+#include <media/dvb_frontend.h>
+#include "sp8870.h"
+
+struct sp8870_state {
+ struct i2c_adapter *i2c;
+
+ const struct sp8870_config *config;
+
+ struct dvb_frontend frontend;
+
+ /* demodulator private data */
+ u8 initialised:1;
+};
+
+static int debug;
+#define dprintk(fmt, arg...) \
+ do { \
+ if (debug) \
+ pr_info("%s(): " fmt, __func__, ##arg); \
+ } while (0)
+
+/* firmware size for sp8870 */
+#define SP8870_FIRMWARE_SIZE 16382
+
+/* starting point for firmware in file 'Sc_main.mc' */
+#define SP8870_FIRMWARE_OFFSET 0x0A
+
+static int sp8870_writereg(struct sp8870_state *state, u16 reg, u16 data)
+{
+ u8 buf[] = { reg >> 8, reg & 0xff, data >> 8, data & 0xff };
+ struct i2c_msg msg = {
+ .addr = state->config->demod_address,
+ .flags = 0,
+ .buf = buf,
+ .len = 4
+ };
+
+ int err;
+
+ err = i2c_transfer(state->i2c, &msg, 1);
+ if (err != 1) {
+ dprintk("writereg error (err == %i, reg == 0x%02x, data == 0x%02x)\n", err, reg, data);
+ return -EREMOTEIO;
+ }
+
+ return 0;
+}
+
+static int sp8870_readreg(struct sp8870_state *state, u16 reg)
+{
+ int ret;
+ u8 b0[] = { reg >> 8, reg & 0xff };
+ u8 b1[] = { 0, 0 };
+ struct i2c_msg msg[] = {
+ { .addr = state->config->demod_address, .flags = 0, .buf = b0, .len = 2 },
+ { .addr = state->config->demod_address, .flags = I2C_M_RD, .buf = b1, .len = 2 }
+ };
+
+ ret = i2c_transfer(state->i2c, msg, 2);
+
+ if (ret != 2) {
+ dprintk("readreg error (ret == %i)\n", ret);
+ return -1;
+ }
+
+ return (b1[0] << 8 | b1[1]);
+}
+
+static int sp8870_firmware_upload(struct sp8870_state *state, const struct firmware *fw)
+{
+ struct i2c_msg msg;
+ const char *fw_buf = fw->data;
+ int fw_pos;
+ u8 tx_buf[255];
+ int tx_len;
+ int err = 0;
+
+ dprintk("start firmware upload...\n");
+
+ if (fw->size < SP8870_FIRMWARE_SIZE + SP8870_FIRMWARE_OFFSET)
+ return -EINVAL;
+
+ // system controller stop
+ sp8870_writereg(state, 0x0F00, 0x0000);
+
+ // instruction RAM register hiword
+ sp8870_writereg(state, 0x8F08, ((SP8870_FIRMWARE_SIZE / 2) & 0xFFFF));
+
+ // instruction RAM MWR
+ sp8870_writereg(state, 0x8F0A, ((SP8870_FIRMWARE_SIZE / 2) >> 16));
+
+ // do firmware upload
+ fw_pos = SP8870_FIRMWARE_OFFSET;
+ while (fw_pos < SP8870_FIRMWARE_SIZE + SP8870_FIRMWARE_OFFSET) {
+ tx_len = (fw_pos <= SP8870_FIRMWARE_SIZE + SP8870_FIRMWARE_OFFSET - 252) ? 252 :
+ SP8870_FIRMWARE_SIZE + SP8870_FIRMWARE_OFFSET - fw_pos;
+ // write register 0xCF0A
+ tx_buf[0] = 0xCF;
+ tx_buf[1] = 0x0A;
+ memcpy(&tx_buf[2], fw_buf + fw_pos, tx_len);
+ msg.addr = state->config->demod_address;
+ msg.flags = 0;
+ msg.buf = tx_buf;
+ msg.len = tx_len + 2;
+ err = i2c_transfer(state->i2c, &msg, 1);
+ if (err != 1) {
+ pr_err("%s(): firmware upload failed!\n", __func__);
+ pr_err("%s(): i2c error (err == %i)\n", __func__, err);
+ return err;
+ }
+ fw_pos += tx_len;
+ }
+
+ dprintk("firmware upload successful!\n");
+ return 0;
+};
+
+static void sp8870_microcontroller_stop(struct sp8870_state *state)
+{
+ sp8870_writereg(state, 0x0F08, 0x000);
+ sp8870_writereg(state, 0x0F09, 0x000);
+
+ // microcontroller STOP
+ sp8870_writereg(state, 0x0F00, 0x000);
+}
+
+static void sp8870_microcontroller_start(struct sp8870_state *state)
+{
+ sp8870_writereg(state, 0x0F08, 0x000);
+ sp8870_writereg(state, 0x0F09, 0x000);
+
+ // microcontroller START
+ sp8870_writereg(state, 0x0F00, 0x001);
+ // not documented but if we don't read 0x0D01 out here
+ // we don't get a correct data valid signal
+ sp8870_readreg(state, 0x0D01);
+}
+
+static int sp8870_read_data_valid_signal(struct sp8870_state *state)
+{
+ return (sp8870_readreg(state, 0x0D02) > 0);
+}
+
+static int configure_reg0xc05(struct dtv_frontend_properties *p, u16 *reg0xc05)
+{
+ int known_parameters = 1;
+
+ *reg0xc05 = 0x000;
+
+ switch (p->modulation) {
+ case QPSK:
+ break;
+ case QAM_16:
+ *reg0xc05 |= (1 << 10);
+ break;
+ case QAM_64:
+ *reg0xc05 |= (2 << 10);
+ break;
+ case QAM_AUTO:
+ known_parameters = 0;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (p->hierarchy) {
+ case HIERARCHY_NONE:
+ break;
+ case HIERARCHY_1:
+ *reg0xc05 |= (1 << 7);
+ break;
+ case HIERARCHY_2:
+ *reg0xc05 |= (2 << 7);
+ break;
+ case HIERARCHY_4:
+ *reg0xc05 |= (3 << 7);
+ break;
+ case HIERARCHY_AUTO:
+ known_parameters = 0;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (p->code_rate_HP) {
+ case FEC_1_2:
+ break;
+ case FEC_2_3:
+ *reg0xc05 |= (1 << 3);
+ break;
+ case FEC_3_4:
+ *reg0xc05 |= (2 << 3);
+ break;
+ case FEC_5_6:
+ *reg0xc05 |= (3 << 3);
+ break;
+ case FEC_7_8:
+ *reg0xc05 |= (4 << 3);
+ break;
+ case FEC_AUTO:
+ known_parameters = 0;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (known_parameters)
+ *reg0xc05 |= (2 << 1); /* use specified parameters */
+ else
+ *reg0xc05 |= (1 << 1); /* enable autoprobing */
+
+ return 0;
+}
+
+static int sp8870_wake_up(struct sp8870_state *state)
+{
+ // enable TS output and interface pins
+ return sp8870_writereg(state, 0xC18, 0x00D);
+}
+
+static int sp8870_set_frontend_parameters(struct dvb_frontend *fe)
+{
+ struct dtv_frontend_properties *p = &fe->dtv_property_cache;
+ struct sp8870_state *state = fe->demodulator_priv;
+ int err;
+ u16 reg0xc05;
+
+ err = configure_reg0xc05(p, &reg0xc05);
+ if (err)
+ return err;
+
+ // system controller stop
+ sp8870_microcontroller_stop(state);
+
+ // set tuner parameters
+ if (fe->ops.tuner_ops.set_params) {
+ fe->ops.tuner_ops.set_params(fe);
+ if (fe->ops.i2c_gate_ctrl)
+ fe->ops.i2c_gate_ctrl(fe, 0);
+ }
+
+ // sample rate correction bit [23..17]
+ sp8870_writereg(state, 0x0319, 0x000A);
+
+ // sample rate correction bit [16..0]
+ sp8870_writereg(state, 0x031A, 0x0AAB);
+
+ // integer carrier offset
+ sp8870_writereg(state, 0x0309, 0x0400);
+
+ // fractional carrier offset
+ sp8870_writereg(state, 0x030A, 0x0000);
+
+ // filter for 6/7/8 Mhz channel
+ if (p->bandwidth_hz == 6000000)
+ sp8870_writereg(state, 0x0311, 0x0002);
+ else if (p->bandwidth_hz == 7000000)
+ sp8870_writereg(state, 0x0311, 0x0001);
+ else
+ sp8870_writereg(state, 0x0311, 0x0000);
+
+ // scan order: 2k first = 0x0000, 8k first = 0x0001
+ if (p->transmission_mode == TRANSMISSION_MODE_2K)
+ sp8870_writereg(state, 0x0338, 0x0000);
+ else
+ sp8870_writereg(state, 0x0338, 0x0001);
+
+ sp8870_writereg(state, 0xc05, reg0xc05);
+
+ // read status reg in order to clear pending irqs
+ err = sp8870_readreg(state, 0x200);
+ if (err < 0)
+ return err;
+
+ // system controller start
+ sp8870_microcontroller_start(state);
+
+ return 0;
+}
+
+static int sp8870_init(struct dvb_frontend *fe)
+{
+ struct sp8870_state *state = fe->demodulator_priv;
+ const struct firmware *fw = NULL;
+
+ sp8870_wake_up(state);
+ if (state->initialised)
+ return 0;
+ state->initialised = 1;
+
+ dprintk("initialising frontend...\n");
+
+ /* request the firmware, this will block until someone uploads it */
+ pr_info("waiting for firmware upload (%s)...\n", SP8870_DEFAULT_FIRMWARE);
+ if (state->config->request_firmware(fe, &fw, SP8870_DEFAULT_FIRMWARE)) {
+ pr_err("no firmware upload (timeout or file not found?)\n");
+ return -EIO;
+ }
+
+ if (sp8870_firmware_upload(state, fw)) {
+ pr_err("writing firmware to device failed\n");
+ release_firmware(fw);
+ return -EIO;
+ }
+ release_firmware(fw);
+ pr_info("firmware upload complete\n");
+
+ /* enable TS output and interface pins */
+ sp8870_writereg(state, 0xc18, 0x00d);
+
+ // system controller stop
+ sp8870_microcontroller_stop(state);
+
+ // ADC mode
+ sp8870_writereg(state, 0x0301, 0x0003);
+
+ // Reed Solomon parity bytes passed to output
+ sp8870_writereg(state, 0x0C13, 0x0001);
+
+ // MPEG clock is suppressed if no valid data
+ sp8870_writereg(state, 0x0C14, 0x0001);
+
+ /* bit 0x010: enable data valid signal */
+ sp8870_writereg(state, 0x0D00, 0x010);
+ sp8870_writereg(state, 0x0D01, 0x000);
+
+ return 0;
+}
+
+static int sp8870_read_status(struct dvb_frontend *fe,
+ enum fe_status *fe_status)
+{
+ struct sp8870_state *state = fe->demodulator_priv;
+ int status;
+ int signal;
+
+ *fe_status = 0;
+
+ status = sp8870_readreg(state, 0x0200);
+ if (status < 0)
+ return -EIO;
+
+ signal = sp8870_readreg(state, 0x0303);
+ if (signal < 0)
+ return -EIO;
+
+ if (signal > 0x0F)
+ *fe_status |= FE_HAS_SIGNAL;
+ if (status & 0x08)
+ *fe_status |= FE_HAS_SYNC;
+ if (status & 0x04)
+ *fe_status |= FE_HAS_LOCK | FE_HAS_CARRIER | FE_HAS_VITERBI;
+
+ return 0;
+}
+
+static int sp8870_read_ber(struct dvb_frontend *fe, u32 *ber)
+{
+ struct sp8870_state *state = fe->demodulator_priv;
+ int ret;
+ u32 tmp;
+
+ *ber = 0;
+
+ ret = sp8870_readreg(state, 0xC08);
+ if (ret < 0)
+ return -EIO;
+
+ tmp = ret & 0x3F;
+
+ ret = sp8870_readreg(state, 0xC07);
+ if (ret < 0)
+ return -EIO;
+
+ tmp = ret << 6;
+ if (tmp >= 0x3FFF0)
+ tmp = ~0;
+
+ *ber = tmp;
+
+ return 0;
+}
+
+static int sp8870_read_signal_strength(struct dvb_frontend *fe, u16 *signal)
+{
+ struct sp8870_state *state = fe->demodulator_priv;
+ int ret;
+ u16 tmp;
+
+ *signal = 0;
+
+ ret = sp8870_readreg(state, 0x306);
+ if (ret < 0)
+ return -EIO;
+
+ tmp = ret << 8;
+
+ ret = sp8870_readreg(state, 0x303);
+ if (ret < 0)
+ return -EIO;
+
+ tmp |= ret;
+
+ if (tmp)
+ *signal = 0xFFFF - tmp;
+
+ return 0;
+}
+
+static int sp8870_read_uncorrected_blocks(struct dvb_frontend *fe, u32 *ublocks)
+{
+ struct sp8870_state *state = fe->demodulator_priv;
+ int ret;
+
+ *ublocks = 0;
+
+ ret = sp8870_readreg(state, 0xC0C);
+ if (ret < 0)
+ return -EIO;
+
+ if (ret == 0xFFFF)
+ ret = ~0;
+
+ *ublocks = ret;
+
+ return 0;
+}
+
+/* number of trials to recover from lockup */
+#define MAXTRIALS 5
+/* maximum checks for data valid signal */
+#define MAXCHECKS 100
+
+/* only for debugging: counter for detected lockups */
+static int lockups;
+/* only for debugging: counter for channel switches */
+static int switches;
+
+static int sp8870_set_frontend(struct dvb_frontend *fe)
+{
+ struct dtv_frontend_properties *p = &fe->dtv_property_cache;
+ struct sp8870_state *state = fe->demodulator_priv;
+
+ /*
+ * The firmware of the sp8870 sometimes locks up after setting frontend parameters.
+ * We try to detect this by checking the data valid signal.
+ * If it is not set after MAXCHECKS we try to recover the lockup by setting
+ * the frontend parameters again.
+ */
+
+ int err = 0;
+ int valid = 0;
+ int trials = 0;
+ int check_count = 0;
+
+ dprintk("frequency = %i\n", p->frequency);
+
+ for (trials = 1; trials <= MAXTRIALS; trials++) {
+ err = sp8870_set_frontend_parameters(fe);
+ if (err)
+ return err;
+
+ for (check_count = 0; check_count < MAXCHECKS; check_count++) {
+// valid = ((sp8870_readreg(i2c, 0x0200) & 4) == 0);
+ valid = sp8870_read_data_valid_signal(state);
+ if (valid) {
+ dprintk("delay = %i usec\n", check_count * 10);
+ break;
+ }
+ udelay(10);
+ }
+ if (valid)
+ break;
+ }
+
+ if (!valid) {
+ pr_err("%s(): firmware crash!!!!!!\n", __func__);
+ return -EIO;
+ }
+
+ if (debug) {
+ if (valid) {
+ if (trials > 1) {
+ pr_info("%s(): firmware lockup!!!\n", __func__);
+ pr_info("%s(): recovered after %i trial(s))\n", __func__, trials - 1);
+ lockups++;
+ }
+ }
+ switches++;
+ pr_info("%s(): switches = %i lockups = %i\n", __func__, switches, lockups);
+ }
+
+ return 0;
+}
+
+static int sp8870_sleep(struct dvb_frontend *fe)
+{
+ struct sp8870_state *state = fe->demodulator_priv;
+
+ // tristate TS output and disable interface pins
+ return sp8870_writereg(state, 0xC18, 0x000);
+}
+
+static int sp8870_get_tune_settings(struct dvb_frontend *fe, struct dvb_frontend_tune_settings *fesettings)
+{
+ fesettings->min_delay_ms = 350;
+ fesettings->step_size = 0;
+ fesettings->max_drift = 0;
+ return 0;
+}
+
+static int sp8870_i2c_gate_ctrl(struct dvb_frontend *fe, int enable)
+{
+ struct sp8870_state *state = fe->demodulator_priv;
+
+ if (enable)
+ return sp8870_writereg(state, 0x206, 0x001);
+ else
+ return sp8870_writereg(state, 0x206, 0x000);
+}
+
+static void sp8870_release(struct dvb_frontend *fe)
+{
+ struct sp8870_state *state = fe->demodulator_priv;
+
+ kfree(state);
+}
+
+static const struct dvb_frontend_ops sp8870_ops;
+
+struct dvb_frontend *sp8870_attach(const struct sp8870_config *config,
+ struct i2c_adapter *i2c)
+{
+ struct sp8870_state *state = NULL;
+
+ /* allocate memory for the internal state */
+ state = kzalloc(sizeof(*state), GFP_KERNEL);
+ if (!state)
+ goto error;
+
+ /* setup the state */
+ state->config = config;
+ state->i2c = i2c;
+ state->initialised = 0;
+
+ /* check if the demod is there */
+ if (sp8870_readreg(state, 0x0200) < 0)
+ goto error;
+
+ /* create dvb_frontend */
+ memcpy(&state->frontend.ops, &sp8870_ops, sizeof(sp8870_ops));
+ state->frontend.demodulator_priv = state;
+ return &state->frontend;
+
+error:
+ kfree(state);
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(sp8870_attach);
+
+static const struct dvb_frontend_ops sp8870_ops = {
+ .delsys = { SYS_DVBT },
+ .info = {
+ .name = "Spase SP8870 DVB-T",
+ .frequency_min_hz = 470 * MHz,
+ .frequency_max_hz = 860 * MHz,
+ .frequency_stepsize_hz = 166666,
+ .caps = FE_CAN_FEC_1_2 | FE_CAN_FEC_2_3 |
+ FE_CAN_FEC_3_4 | FE_CAN_FEC_5_6 |
+ FE_CAN_FEC_7_8 | FE_CAN_FEC_AUTO |
+ FE_CAN_QPSK | FE_CAN_QAM_16 |
+ FE_CAN_QAM_64 | FE_CAN_QAM_AUTO |
+ FE_CAN_HIERARCHY_AUTO | FE_CAN_RECOVER
+ },
+
+ .release = sp8870_release,
+
+ .init = sp8870_init,
+ .sleep = sp8870_sleep,
+ .i2c_gate_ctrl = sp8870_i2c_gate_ctrl,
+
+ .set_frontend = sp8870_set_frontend,
+ .get_tune_settings = sp8870_get_tune_settings,
+
+ .read_status = sp8870_read_status,
+ .read_ber = sp8870_read_ber,
+ .read_signal_strength = sp8870_read_signal_strength,
+ .read_ucblocks = sp8870_read_uncorrected_blocks,
+};
+
+module_param(debug, int, 0644);
+MODULE_PARM_DESC(debug, "Turn on/off frontend debugging (default:off).");
+
+MODULE_DESCRIPTION("Spase SP8870 DVB-T Demodulator driver");
+MODULE_AUTHOR("Juergen Peitz");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/media/av7110/sp8870.h b/drivers/staging/media/av7110/sp8870.h
new file mode 100644
index 000000000000..3323d1dfa568
--- /dev/null
+++ b/drivers/staging/media/av7110/sp8870.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Driver for Spase SP8870 demodulator
+ *
+ * Copyright (C) 1999 Juergen Peitz
+ */
+
+#ifndef SP8870_H
+#define SP8870_H
+
+#include <linux/dvb/frontend.h>
+#include <linux/firmware.h>
+
+struct sp8870_config {
+ /* the demodulator's i2c address */
+ u8 demod_address;
+
+ /* request firmware for device */
+ int (*request_firmware)(struct dvb_frontend *fe, const struct firmware **fw, char *name);
+};
+
+#if IS_REACHABLE(CONFIG_DVB_SP8870)
+struct dvb_frontend *sp8870_attach(const struct sp8870_config *config, struct i2c_adapter *i2c);
+#else
+static inline struct dvb_frontend *sp8870_attach(const struct sp8870_config *config,
+ struct i2c_adapter *i2c)
+{
+ pr_warn(KBUILD_MODNAME ": %s(): driver disabled by Kconfig\n", __func__);
+ return NULL;
+}
+#endif // CONFIG_DVB_SP8870
+
+#endif // SP8870_H
diff --git a/drivers/staging/media/bcm2048/Kconfig b/drivers/staging/media/bcm2048/Kconfig
deleted file mode 100644
index a9fc6e186494..000000000000
--- a/drivers/staging/media/bcm2048/Kconfig
+++ /dev/null
@@ -1,13 +0,0 @@
-#
-# Multimedia Video device configuration
-#
-
-config I2C_BCM2048
- tristate "Broadcom BCM2048 FM Radio Receiver support"
- depends on I2C && VIDEO_V4L2 && RADIO_ADAPTERS
- ---help---
- Say Y here if you want support to BCM2048 FM Radio Receiver.
- This device driver supports only i2c bus.
-
- To compile this driver as a module, choose M here: the
- module will be called radio-bcm2048.
diff --git a/drivers/staging/media/bcm2048/Makefile b/drivers/staging/media/bcm2048/Makefile
deleted file mode 100644
index b4f5663d1408..000000000000
--- a/drivers/staging/media/bcm2048/Makefile
+++ /dev/null
@@ -1 +0,0 @@
-obj-$(CONFIG_I2C_BCM2048) += radio-bcm2048.o
diff --git a/drivers/staging/media/bcm2048/TODO b/drivers/staging/media/bcm2048/TODO
deleted file mode 100644
index 051f85dbe89e..000000000000
--- a/drivers/staging/media/bcm2048/TODO
+++ /dev/null
@@ -1,24 +0,0 @@
-TODO:
-
-From the initial code review:
-
-The main thing you need to do is to implement all the controls using the
-control framework (see Documentation/video4linux/v4l2-controls.txt).
-Most drivers are by now converted to the control framework, so you will
-find many examples of how to do this in drivers/media/radio.
-
-The sysfs stuff should be replaced by controls as well. A lot of the RDS
-support is now available as controls (although there may well be some
-missing features, but that is easy enough to add). Since the RDS data is
-actually read() from the device I am not sure whether the RDS
-properties/controls should be there at all.
-
-Correct Coding Style, as this driver also violates several Style
-rules, and do evil tricks, like returning from a function inside a
-macro.
-
-Finally this driver should probably be split up into two parts: one
-v4l2_subdev-based core driver and one platform driver. See e.g.
-radio-si4713/si4713-i2c.c as a good example. But I would wait with that
-until the rest of the driver is cleaned up. Then I have a better idea of
-whether this is necessary or not.
diff --git a/drivers/staging/media/bcm2048/radio-bcm2048.c b/drivers/staging/media/bcm2048/radio-bcm2048.c
deleted file mode 100644
index fb55e5941445..000000000000
--- a/drivers/staging/media/bcm2048/radio-bcm2048.c
+++ /dev/null
@@ -1,2711 +0,0 @@
-/*
- * drivers/staging/media/radio-bcm2048.c
- *
- * Driver for I2C Broadcom BCM2048 FM Radio Receiver:
- *
- * Copyright (C) Nokia Corporation
- * Contact: Eero Nurkkala <ext-eero.nurkkala@nokia.com>
- *
- * Copyright (C) Nils Faerber <nils.faerber@kernelconcepts.de>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA
- */
-
-/*
- * History:
- * Eero Nurkkala <ext-eero.nurkkala@nokia.com>
- * Version 0.0.1
- * - Initial implementation
- * 2010-02-21 Nils Faerber <nils.faerber@kernelconcepts.de>
- * Version 0.0.2
- * - Add support for interrupt driven rds data reading
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/version.h>
-#include <linux/interrupt.h>
-#include <linux/sysfs.h>
-#include <linux/completion.h>
-#include <linux/delay.h>
-#include <linux/i2c.h>
-#include <linux/videodev2.h>
-#include <linux/mutex.h>
-#include <linux/slab.h>
-#include <media/v4l2-common.h>
-#include <media/v4l2-ioctl.h>
-#include "radio-bcm2048.h"
-
-/* driver definitions */
-#define BCM2048_DRIVER_AUTHOR "Eero Nurkkala <ext-eero.nurkkala@nokia.com>"
-#define BCM2048_DRIVER_NAME BCM2048_NAME
-#define BCM2048_DRIVER_VERSION KERNEL_VERSION(0, 0, 1)
-#define BCM2048_DRIVER_CARD "Broadcom bcm2048 FM Radio Receiver"
-#define BCM2048_DRIVER_DESC "I2C driver for BCM2048 FM Radio Receiver"
-
-/* I2C Control Registers */
-#define BCM2048_I2C_FM_RDS_SYSTEM 0x00
-#define BCM2048_I2C_FM_CTRL 0x01
-#define BCM2048_I2C_RDS_CTRL0 0x02
-#define BCM2048_I2C_RDS_CTRL1 0x03
-#define BCM2048_I2C_FM_AUDIO_PAUSE 0x04
-#define BCM2048_I2C_FM_AUDIO_CTRL0 0x05
-#define BCM2048_I2C_FM_AUDIO_CTRL1 0x06
-#define BCM2048_I2C_FM_SEARCH_CTRL0 0x07
-#define BCM2048_I2C_FM_SEARCH_CTRL1 0x08
-#define BCM2048_I2C_FM_SEARCH_TUNE_MODE 0x09
-#define BCM2048_I2C_FM_FREQ0 0x0a
-#define BCM2048_I2C_FM_FREQ1 0x0b
-#define BCM2048_I2C_FM_AF_FREQ0 0x0c
-#define BCM2048_I2C_FM_AF_FREQ1 0x0d
-#define BCM2048_I2C_FM_CARRIER 0x0e
-#define BCM2048_I2C_FM_RSSI 0x0f
-#define BCM2048_I2C_FM_RDS_MASK0 0x10
-#define BCM2048_I2C_FM_RDS_MASK1 0x11
-#define BCM2048_I2C_FM_RDS_FLAG0 0x12
-#define BCM2048_I2C_FM_RDS_FLAG1 0x13
-#define BCM2048_I2C_RDS_WLINE 0x14
-#define BCM2048_I2C_RDS_BLKB_MATCH0 0x16
-#define BCM2048_I2C_RDS_BLKB_MATCH1 0x17
-#define BCM2048_I2C_RDS_BLKB_MASK0 0x18
-#define BCM2048_I2C_RDS_BLKB_MASK1 0x19
-#define BCM2048_I2C_RDS_PI_MATCH0 0x1a
-#define BCM2048_I2C_RDS_PI_MATCH1 0x1b
-#define BCM2048_I2C_RDS_PI_MASK0 0x1c
-#define BCM2048_I2C_RDS_PI_MASK1 0x1d
-#define BCM2048_I2C_SPARE1 0x20
-#define BCM2048_I2C_SPARE2 0x21
-#define BCM2048_I2C_FM_RDS_REV 0x28
-#define BCM2048_I2C_SLAVE_CONFIGURATION 0x29
-#define BCM2048_I2C_RDS_DATA 0x80
-#define BCM2048_I2C_FM_BEST_TUNE_MODE 0x90
-
-/* BCM2048_I2C_FM_RDS_SYSTEM */
-#define BCM2048_FM_ON 0x01
-#define BCM2048_RDS_ON 0x02
-
-/* BCM2048_I2C_FM_CTRL */
-#define BCM2048_BAND_SELECT 0x01
-#define BCM2048_STEREO_MONO_AUTO_SELECT 0x02
-#define BCM2048_STEREO_MONO_MANUAL_SELECT 0x04
-#define BCM2048_STEREO_MONO_BLEND_SWITCH 0x08
-#define BCM2048_HI_LO_INJECTION 0x10
-
-/* BCM2048_I2C_RDS_CTRL0 */
-#define BCM2048_RBDS_RDS_SELECT 0x01
-#define BCM2048_FLUSH_FIFO 0x02
-
-/* BCM2048_I2C_FM_AUDIO_PAUSE */
-#define BCM2048_AUDIO_PAUSE_RSSI_TRESH 0x0f
-#define BCM2048_AUDIO_PAUSE_DURATION 0xf0
-
-/* BCM2048_I2C_FM_AUDIO_CTRL0 */
-#define BCM2048_RF_MUTE 0x01
-#define BCM2048_MANUAL_MUTE 0x02
-#define BCM2048_DAC_OUTPUT_LEFT 0x04
-#define BCM2048_DAC_OUTPUT_RIGHT 0x08
-#define BCM2048_AUDIO_ROUTE_DAC 0x10
-#define BCM2048_AUDIO_ROUTE_I2S 0x20
-#define BCM2048_DE_EMPHASIS_SELECT 0x40
-#define BCM2048_AUDIO_BANDWIDTH_SELECT 0x80
-
-/* BCM2048_I2C_FM_SEARCH_CTRL0 */
-#define BCM2048_SEARCH_RSSI_THRESHOLD 0x7f
-#define BCM2048_SEARCH_DIRECTION 0x80
-
-/* BCM2048_I2C_FM_SEARCH_TUNE_MODE */
-#define BCM2048_FM_AUTO_SEARCH 0x03
-
-/* BCM2048_I2C_FM_RSSI */
-#define BCM2048_RSSI_VALUE 0xff
-
-/* BCM2048_I2C_FM_RDS_MASK0 */
-/* BCM2048_I2C_FM_RDS_MASK1 */
-#define BCM2048_FM_FLAG_SEARCH_TUNE_FINISHED 0x01
-#define BCM2048_FM_FLAG_SEARCH_TUNE_FAIL 0x02
-#define BCM2048_FM_FLAG_RSSI_LOW 0x04
-#define BCM2048_FM_FLAG_CARRIER_ERROR_HIGH 0x08
-#define BCM2048_FM_FLAG_AUDIO_PAUSE_INDICATION 0x10
-#define BCM2048_FLAG_STEREO_DETECTED 0x20
-#define BCM2048_FLAG_STEREO_ACTIVE 0x40
-
-/* BCM2048_I2C_RDS_DATA */
-#define BCM2048_SLAVE_ADDRESS 0x3f
-#define BCM2048_SLAVE_ENABLE 0x80
-
-/* BCM2048_I2C_FM_BEST_TUNE_MODE */
-#define BCM2048_BEST_TUNE_MODE 0x80
-
-#define BCM2048_FM_FLAG_SEARCH_TUNE_FINISHED 0x01
-#define BCM2048_FM_FLAG_SEARCH_TUNE_FAIL 0x02
-#define BCM2048_FM_FLAG_RSSI_LOW 0x04
-#define BCM2048_FM_FLAG_CARRIER_ERROR_HIGH 0x08
-#define BCM2048_FM_FLAG_AUDIO_PAUSE_INDICATION 0x10
-#define BCM2048_FLAG_STEREO_DETECTED 0x20
-#define BCM2048_FLAG_STEREO_ACTIVE 0x40
-
-#define BCM2048_RDS_FLAG_FIFO_WLINE 0x02
-#define BCM2048_RDS_FLAG_B_BLOCK_MATCH 0x08
-#define BCM2048_RDS_FLAG_SYNC_LOST 0x10
-#define BCM2048_RDS_FLAG_PI_MATCH 0x20
-
-#define BCM2048_RDS_MARK_END_BYTE0 0x7C
-#define BCM2048_RDS_MARK_END_BYTEN 0xFF
-
-#define BCM2048_FM_FLAGS_ALL (FM_FLAG_SEARCH_TUNE_FINISHED | \
- FM_FLAG_SEARCH_TUNE_FAIL | \
- FM_FLAG_RSSI_LOW | \
- FM_FLAG_CARRIER_ERROR_HIGH | \
- FM_FLAG_AUDIO_PAUSE_INDICATION | \
- FLAG_STEREO_DETECTED | FLAG_STEREO_ACTIVE)
-
-#define BCM2048_RDS_FLAGS_ALL (RDS_FLAG_FIFO_WLINE | \
- RDS_FLAG_B_BLOCK_MATCH | \
- RDS_FLAG_SYNC_LOST | RDS_FLAG_PI_MATCH)
-
-#define BCM2048_DEFAULT_TIMEOUT 1500
-#define BCM2048_AUTO_SEARCH_TIMEOUT 3000
-
-
-#define BCM2048_FREQDEV_UNIT 10000
-#define BCM2048_FREQV4L2_MULTI 625
-#define dev_to_v4l2(f) ((f * BCM2048_FREQDEV_UNIT) / BCM2048_FREQV4L2_MULTI)
-#define v4l2_to_dev(f) ((f * BCM2048_FREQV4L2_MULTI) / BCM2048_FREQDEV_UNIT)
-
-#define msb(x) ((u8)((u16) x >> 8))
-#define lsb(x) ((u8)((u16) x & 0x00FF))
-#define compose_u16(msb, lsb) (((u16)msb << 8) | lsb)
-
-#define BCM2048_DEFAULT_POWERING_DELAY 20
-#define BCM2048_DEFAULT_REGION 0x02
-#define BCM2048_DEFAULT_MUTE 0x01
-#define BCM2048_DEFAULT_RSSI_THRESHOLD 0x64
-#define BCM2048_DEFAULT_RDS_WLINE 0x7E
-
-#define BCM2048_FM_SEARCH_INACTIVE 0x00
-#define BCM2048_FM_PRE_SET_MODE 0x01
-#define BCM2048_FM_AUTO_SEARCH_MODE 0x02
-#define BCM2048_FM_AF_JUMP_MODE 0x03
-
-#define BCM2048_FREQUENCY_BASE 64000
-
-#define BCM2048_POWER_ON 0x01
-#define BCM2048_POWER_OFF 0x00
-
-#define BCM2048_ITEM_ENABLED 0x01
-#define BCM2048_SEARCH_DIRECTION_UP 0x01
-
-#define BCM2048_DE_EMPHASIS_75us 75
-#define BCM2048_DE_EMPHASIS_50us 50
-
-#define BCM2048_SCAN_FAIL 0x00
-#define BCM2048_SCAN_OK 0x01
-
-#define BCM2048_FREQ_ERROR_FLOOR -20
-#define BCM2048_FREQ_ERROR_ROOF 20
-
-/* -60 dB is reported as full signal strength */
-#define BCM2048_RSSI_LEVEL_BASE -60
-#define BCM2048_RSSI_LEVEL_ROOF -100
-#define BCM2048_RSSI_LEVEL_ROOF_NEG 100
-#define BCM2048_SIGNAL_MULTIPLIER (0xFFFF / \
- (BCM2048_RSSI_LEVEL_ROOF_NEG + \
- BCM2048_RSSI_LEVEL_BASE))
-
-#define BCM2048_RDS_FIFO_DUPLE_SIZE 0x03
-#define BCM2048_RDS_CRC_MASK 0x0F
-#define BCM2048_RDS_CRC_NONE 0x00
-#define BCM2048_RDS_CRC_MAX_2BITS 0x04
-#define BCM2048_RDS_CRC_LEAST_2BITS 0x08
-#define BCM2048_RDS_CRC_UNRECOVARABLE 0x0C
-
-#define BCM2048_RDS_BLOCK_MASK 0xF0
-#define BCM2048_RDS_BLOCK_A 0x00
-#define BCM2048_RDS_BLOCK_B 0x10
-#define BCM2048_RDS_BLOCK_C 0x20
-#define BCM2048_RDS_BLOCK_D 0x30
-#define BCM2048_RDS_BLOCK_C_SCORED 0x40
-#define BCM2048_RDS_BLOCK_E 0x60
-
-#define BCM2048_RDS_RT 0x20
-#define BCM2048_RDS_PS 0x00
-
-#define BCM2048_RDS_GROUP_AB_MASK 0x08
-#define BCM2048_RDS_GROUP_A 0x00
-#define BCM2048_RDS_GROUP_B 0x08
-
-#define BCM2048_RDS_RT_AB_MASK 0x10
-#define BCM2048_RDS_RT_A 0x00
-#define BCM2048_RDS_RT_B 0x10
-#define BCM2048_RDS_RT_INDEX 0x0F
-
-#define BCM2048_RDS_PS_INDEX 0x03
-
-struct rds_info {
- u16 rds_pi;
-#define BCM2048_MAX_RDS_RT (64 + 1)
- u8 rds_rt[BCM2048_MAX_RDS_RT];
- u8 rds_rt_group_b;
- u8 rds_rt_ab;
-#define BCM2048_MAX_RDS_PS (8 + 1)
- u8 rds_ps[BCM2048_MAX_RDS_PS];
- u8 rds_ps_group;
- u8 rds_ps_group_cnt;
-#define BCM2048_MAX_RDS_RADIO_TEXT 255
- u8 radio_text[BCM2048_MAX_RDS_RADIO_TEXT + 3];
- u8 text_len;
-};
-
-struct region_info {
- u32 bottom_frequency;
- u32 top_frequency;
- u8 deemphasis;
- u8 channel_spacing;
- u8 region;
-};
-
-struct bcm2048_device {
- struct i2c_client *client;
- struct video_device videodev;
- struct work_struct work;
- struct completion compl;
- struct mutex mutex;
- struct bcm2048_platform_data *platform_data;
- struct rds_info rds_info;
- struct region_info region_info;
- u16 frequency;
- u8 cache_fm_rds_system;
- u8 cache_fm_ctrl;
- u8 cache_fm_audio_ctrl0;
- u8 cache_fm_search_ctrl0;
- u8 power_state;
- u8 rds_state;
- u8 fifo_size;
- u8 scan_state;
- u8 mute_state;
-
- /* for rds data device read */
- wait_queue_head_t read_queue;
- unsigned int users;
- unsigned char rds_data_available;
- unsigned int rd_index;
-};
-
-static int radio_nr = -1; /* radio device minor (-1 ==> auto assign) */
-module_param(radio_nr, int, 0);
-MODULE_PARM_DESC(radio_nr,
- "Minor number for radio device (-1 ==> auto assign)");
-
-static struct region_info region_configs[] = {
- /* USA */
- {
- .channel_spacing = 20,
- .bottom_frequency = 87500,
- .top_frequency = 108000,
- .deemphasis = 75,
- .region = 0,
- },
- /* Australia */
- {
- .channel_spacing = 20,
- .bottom_frequency = 87500,
- .top_frequency = 108000,
- .deemphasis = 50,
- .region = 1,
- },
- /* Europe */
- {
- .channel_spacing = 10,
- .bottom_frequency = 87500,
- .top_frequency = 108000,
- .deemphasis = 50,
- .region = 2,
- },
- /* Japan */
- {
- .channel_spacing = 10,
- .bottom_frequency = 76000,
- .top_frequency = 90000,
- .deemphasis = 50,
- .region = 3,
- },
-};
-
-/*
- * I2C Interface read / write
- */
-static int bcm2048_send_command(struct bcm2048_device *bdev, unsigned int reg,
- unsigned int value)
-{
- struct i2c_client *client = bdev->client;
- u8 data[2];
-
- if (!bdev->power_state) {
- dev_err(&bdev->client->dev, "bcm2048: chip not powered!\n");
- return -EIO;
- }
-
- data[0] = reg & 0xff;
- data[1] = value & 0xff;
-
- if (i2c_master_send(client, data, 2) == 2)
- return 0;
-
- dev_err(&bdev->client->dev, "BCM I2C error!\n");
- dev_err(&bdev->client->dev, "Is Bluetooth up and running?\n");
- return -EIO;
-}
-
-static int bcm2048_recv_command(struct bcm2048_device *bdev, unsigned int reg,
- u8 *value)
-{
- struct i2c_client *client = bdev->client;
-
- if (!bdev->power_state) {
- dev_err(&bdev->client->dev, "bcm2048: chip not powered!\n");
- return -EIO;
- }
-
- value[0] = i2c_smbus_read_byte_data(client, reg & 0xff);
-
- return 0;
-}
-
-static int bcm2048_recv_duples(struct bcm2048_device *bdev, unsigned int reg,
- u8 *value, u8 duples)
-{
- struct i2c_client *client = bdev->client;
- struct i2c_adapter *adap = client->adapter;
- struct i2c_msg msg[2];
- u8 buf;
-
- if (!bdev->power_state) {
- dev_err(&bdev->client->dev, "bcm2048: chip not powered!\n");
- return -EIO;
- }
-
- buf = reg & 0xff;
-
- msg[0].addr = client->addr;
- msg[0].flags = client->flags & I2C_M_TEN;
- msg[0].len = 1;
- msg[0].buf = &buf;
-
- msg[1].addr = client->addr;
- msg[1].flags = client->flags & I2C_M_TEN;
- msg[1].flags |= I2C_M_RD;
- msg[1].len = duples;
- msg[1].buf = value;
-
- return i2c_transfer(adap, msg, 2);
-}
-
-/*
- * BCM2048 - I2C register programming helpers
- */
-static int bcm2048_set_power_state(struct bcm2048_device *bdev, u8 power)
-{
- int err = 0;
-
- mutex_lock(&bdev->mutex);
-
- if (power) {
- bdev->power_state = BCM2048_POWER_ON;
- bdev->cache_fm_rds_system |= BCM2048_FM_ON;
- } else {
- bdev->cache_fm_rds_system &= ~BCM2048_FM_ON;
- }
-
- /*
- * Warning! FM cannot be turned off because then
- * the I2C communications get ruined!
- * Comment off the "if (power)" when the chip works!
- */
- if (power)
- err = bcm2048_send_command(bdev, BCM2048_I2C_FM_RDS_SYSTEM,
- bdev->cache_fm_rds_system);
- msleep(BCM2048_DEFAULT_POWERING_DELAY);
-
- if (!power)
- bdev->power_state = BCM2048_POWER_OFF;
-
- mutex_unlock(&bdev->mutex);
- return err;
-}
-
-static int bcm2048_get_power_state(struct bcm2048_device *bdev)
-{
- int err;
- u8 value;
-
- mutex_lock(&bdev->mutex);
-
- err = bcm2048_recv_command(bdev, BCM2048_I2C_FM_RDS_SYSTEM, &value);
-
- mutex_unlock(&bdev->mutex);
-
- if (!err && (value & BCM2048_FM_ON))
- return BCM2048_POWER_ON;
-
- return err;
-}
-
-static int bcm2048_set_rds_no_lock(struct bcm2048_device *bdev, u8 rds_on)
-{
- int err;
- u8 flags;
-
- bdev->cache_fm_rds_system &= ~BCM2048_RDS_ON;
-
- if (rds_on) {
- bdev->cache_fm_rds_system |= BCM2048_RDS_ON;
- bdev->rds_state = BCM2048_RDS_ON;
- flags = BCM2048_RDS_FLAG_FIFO_WLINE;
- err = bcm2048_send_command(bdev, BCM2048_I2C_FM_RDS_MASK1,
- flags);
- } else {
- flags = 0;
- bdev->rds_state = 0;
- err = bcm2048_send_command(bdev, BCM2048_I2C_FM_RDS_MASK1,
- flags);
- memset(&bdev->rds_info, 0, sizeof(bdev->rds_info));
- }
-
- err = bcm2048_send_command(bdev, BCM2048_I2C_FM_RDS_SYSTEM,
- bdev->cache_fm_rds_system);
-
- return err;
-}
-
-static int bcm2048_get_rds_no_lock(struct bcm2048_device *bdev)
-{
- int err;
- u8 value;
-
- err = bcm2048_recv_command(bdev, BCM2048_I2C_FM_RDS_SYSTEM, &value);
-
- if (!err && (value & BCM2048_RDS_ON))
- return BCM2048_ITEM_ENABLED;
-
- return err;
-}
-
-static int bcm2048_set_rds(struct bcm2048_device *bdev, u8 rds_on)
-{
- int err;
-
- mutex_lock(&bdev->mutex);
-
- err = bcm2048_set_rds_no_lock(bdev, rds_on);
-
- mutex_unlock(&bdev->mutex);
- return err;
-}
-
-static int bcm2048_get_rds(struct bcm2048_device *bdev)
-{
- int err;
-
- mutex_lock(&bdev->mutex);
-
- err = bcm2048_get_rds_no_lock(bdev);
-
- mutex_unlock(&bdev->mutex);
- return err;
-}
-
-static int bcm2048_get_rds_pi(struct bcm2048_device *bdev)
-{
- return bdev->rds_info.rds_pi;
-}
-
-static int bcm2048_set_fm_automatic_stereo_mono(struct bcm2048_device *bdev,
- u8 enabled)
-{
- int err;
-
- mutex_lock(&bdev->mutex);
-
- bdev->cache_fm_ctrl &= ~BCM2048_STEREO_MONO_AUTO_SELECT;
-
- if (enabled)
- bdev->cache_fm_ctrl |= BCM2048_STEREO_MONO_AUTO_SELECT;
-
- err = bcm2048_send_command(bdev, BCM2048_I2C_FM_CTRL,
- bdev->cache_fm_ctrl);
-
- mutex_unlock(&bdev->mutex);
- return err;
-}
-
-static int bcm2048_set_fm_hi_lo_injection(struct bcm2048_device *bdev,
- u8 hi_lo)
-{
- int err;
-
- mutex_lock(&bdev->mutex);
-
- bdev->cache_fm_ctrl &= ~BCM2048_HI_LO_INJECTION;
-
- if (hi_lo)
- bdev->cache_fm_ctrl |= BCM2048_HI_LO_INJECTION;
-
- err = bcm2048_send_command(bdev, BCM2048_I2C_FM_CTRL,
- bdev->cache_fm_ctrl);
-
- mutex_unlock(&bdev->mutex);
- return err;
-}
-
-static int bcm2048_get_fm_hi_lo_injection(struct bcm2048_device *bdev)
-{
- int err;
- u8 value;
-
- mutex_lock(&bdev->mutex);
-
- err = bcm2048_recv_command(bdev, BCM2048_I2C_FM_CTRL, &value);
-
- mutex_unlock(&bdev->mutex);
-
- if (!err && (value & BCM2048_HI_LO_INJECTION))
- return BCM2048_ITEM_ENABLED;
-
- return err;
-}
-
-static int bcm2048_set_fm_frequency(struct bcm2048_device *bdev, u32 frequency)
-{
- int err;
-
- if (frequency < bdev->region_info.bottom_frequency ||
- frequency > bdev->region_info.top_frequency)
- return -EDOM;
-
- frequency -= BCM2048_FREQUENCY_BASE;
-
- mutex_lock(&bdev->mutex);
-
- err = bcm2048_send_command(bdev, BCM2048_I2C_FM_FREQ0, lsb(frequency));
- err |= bcm2048_send_command(bdev, BCM2048_I2C_FM_FREQ1,
- msb(frequency));
-
- if (!err)
- bdev->frequency = frequency;
-
- mutex_unlock(&bdev->mutex);
- return err;
-}
-
-static int bcm2048_get_fm_frequency(struct bcm2048_device *bdev)
-{
- int err;
- u8 lsb, msb;
-
- mutex_lock(&bdev->mutex);
-
- err = bcm2048_recv_command(bdev, BCM2048_I2C_FM_FREQ0, &lsb);
- err |= bcm2048_recv_command(bdev, BCM2048_I2C_FM_FREQ1, &msb);
-
- mutex_unlock(&bdev->mutex);
-
- if (err)
- return err;
-
- err = compose_u16(msb, lsb);
- err += BCM2048_FREQUENCY_BASE;
-
- return err;
-}
-
-static int bcm2048_set_fm_af_frequency(struct bcm2048_device *bdev,
- u32 frequency)
-{
- int err;
-
- if (frequency < bdev->region_info.bottom_frequency ||
- frequency > bdev->region_info.top_frequency)
- return -EDOM;
-
- frequency -= BCM2048_FREQUENCY_BASE;
-
- mutex_lock(&bdev->mutex);
-
- err = bcm2048_send_command(bdev, BCM2048_I2C_FM_AF_FREQ0,
- lsb(frequency));
- err |= bcm2048_send_command(bdev, BCM2048_I2C_FM_AF_FREQ1,
- msb(frequency));
- if (!err)
- bdev->frequency = frequency;
-
- mutex_unlock(&bdev->mutex);
- return err;
-}
-
-static int bcm2048_get_fm_af_frequency(struct bcm2048_device *bdev)
-{
- int err;
- u8 lsb, msb;
-
- mutex_lock(&bdev->mutex);
-
- err = bcm2048_recv_command(bdev, BCM2048_I2C_FM_AF_FREQ0, &lsb);
- err |= bcm2048_recv_command(bdev, BCM2048_I2C_FM_AF_FREQ1, &msb);
-
- mutex_unlock(&bdev->mutex);
-
- if (err)
- return err;
-
- err = compose_u16(msb, lsb);
- err += BCM2048_FREQUENCY_BASE;
-
- return err;
-}
-
-static int bcm2048_set_fm_deemphasis(struct bcm2048_device *bdev, int d)
-{
- int err;
- u8 deemphasis;
-
- if (d == BCM2048_DE_EMPHASIS_75us)
- deemphasis = BCM2048_DE_EMPHASIS_SELECT;
- else
- deemphasis = 0;
-
- mutex_lock(&bdev->mutex);
-
- bdev->cache_fm_audio_ctrl0 &= ~BCM2048_DE_EMPHASIS_SELECT;
- bdev->cache_fm_audio_ctrl0 |= deemphasis;
-
- err = bcm2048_send_command(bdev, BCM2048_I2C_FM_AUDIO_CTRL0,
- bdev->cache_fm_audio_ctrl0);
-
- if (!err)
- bdev->region_info.deemphasis = d;
-
- mutex_unlock(&bdev->mutex);
-
- return err;
-}
-
-static int bcm2048_get_fm_deemphasis(struct bcm2048_device *bdev)
-{
- int err;
- u8 value;
-
- mutex_lock(&bdev->mutex);
-
- err = bcm2048_recv_command(bdev, BCM2048_I2C_FM_AUDIO_CTRL0, &value);
-
- mutex_unlock(&bdev->mutex);
-
- if (!err) {
- if (value & BCM2048_DE_EMPHASIS_SELECT)
- return BCM2048_DE_EMPHASIS_75us;
-
- return BCM2048_DE_EMPHASIS_50us;
- }
-
- return err;
-}
-
-static int bcm2048_set_region(struct bcm2048_device *bdev, u8 region)
-{
- int err;
- u32 new_frequency = 0;
-
- if (region >= ARRAY_SIZE(region_configs))
- return -EINVAL;
-
- mutex_lock(&bdev->mutex);
- bdev->region_info = region_configs[region];
-
- if (region_configs[region].bottom_frequency < 87500)
- bdev->cache_fm_ctrl |= BCM2048_BAND_SELECT;
- else
- bdev->cache_fm_ctrl &= ~BCM2048_BAND_SELECT;
-
- err = bcm2048_send_command(bdev, BCM2048_I2C_FM_CTRL,
- bdev->cache_fm_ctrl);
- if (err) {
- mutex_unlock(&bdev->mutex);
- goto done;
- }
- mutex_unlock(&bdev->mutex);
-
- if (bdev->frequency < region_configs[region].bottom_frequency ||
- bdev->frequency > region_configs[region].top_frequency)
- new_frequency = region_configs[region].bottom_frequency;
-
- if (new_frequency > 0) {
- err = bcm2048_set_fm_frequency(bdev, new_frequency);
-
- if (err)
- goto done;
- }
-
- err = bcm2048_set_fm_deemphasis(bdev,
- region_configs[region].deemphasis);
-
-done:
- return err;
-}
-
-static int bcm2048_get_region(struct bcm2048_device *bdev)
-{
- int err;
-
- mutex_lock(&bdev->mutex);
- err = bdev->region_info.region;
- mutex_unlock(&bdev->mutex);
-
- return err;
-}
-
-static int bcm2048_set_mute(struct bcm2048_device *bdev, u16 mute)
-{
- int err;
-
- mutex_lock(&bdev->mutex);
-
- bdev->cache_fm_audio_ctrl0 &= ~(BCM2048_RF_MUTE | BCM2048_MANUAL_MUTE);
-
- if (mute)
- bdev->cache_fm_audio_ctrl0 |= (BCM2048_RF_MUTE |
- BCM2048_MANUAL_MUTE);
-
- err = bcm2048_send_command(bdev, BCM2048_I2C_FM_AUDIO_CTRL0,
- bdev->cache_fm_audio_ctrl0);
-
- if (!err)
- bdev->mute_state = mute;
-
- mutex_unlock(&bdev->mutex);
- return err;
-}
-
-static int bcm2048_get_mute(struct bcm2048_device *bdev)
-{
- int err;
- u8 value;
-
- mutex_lock(&bdev->mutex);
-
- if (bdev->power_state) {
- err = bcm2048_recv_command(bdev, BCM2048_I2C_FM_AUDIO_CTRL0,
- &value);
- if (!err)
- err = value & (BCM2048_RF_MUTE | BCM2048_MANUAL_MUTE);
- } else {
- err = bdev->mute_state;
- }
-
- mutex_unlock(&bdev->mutex);
- return err;
-}
-
-static int bcm2048_set_audio_route(struct bcm2048_device *bdev, u8 route)
-{
- int err;
-
- mutex_lock(&bdev->mutex);
-
- route &= (BCM2048_AUDIO_ROUTE_DAC | BCM2048_AUDIO_ROUTE_I2S);
- bdev->cache_fm_audio_ctrl0 &= ~(BCM2048_AUDIO_ROUTE_DAC |
- BCM2048_AUDIO_ROUTE_I2S);
- bdev->cache_fm_audio_ctrl0 |= route;
-
- err = bcm2048_send_command(bdev, BCM2048_I2C_FM_AUDIO_CTRL0,
- bdev->cache_fm_audio_ctrl0);
-
- mutex_unlock(&bdev->mutex);
- return err;
-}
-
-static int bcm2048_get_audio_route(struct bcm2048_device *bdev)
-{
- int err;
- u8 value;
-
- mutex_lock(&bdev->mutex);
-
- err = bcm2048_recv_command(bdev, BCM2048_I2C_FM_AUDIO_CTRL0, &value);
-
- mutex_unlock(&bdev->mutex);
-
- if (!err)
- return value & (BCM2048_AUDIO_ROUTE_DAC |
- BCM2048_AUDIO_ROUTE_I2S);
-
- return err;
-}
-
-static int bcm2048_set_dac_output(struct bcm2048_device *bdev, u8 channels)
-{
- int err;
-
- mutex_lock(&bdev->mutex);
-
- bdev->cache_fm_audio_ctrl0 &= ~(BCM2048_DAC_OUTPUT_LEFT |
- BCM2048_DAC_OUTPUT_RIGHT);
- bdev->cache_fm_audio_ctrl0 |= channels;
-
- err = bcm2048_send_command(bdev, BCM2048_I2C_FM_AUDIO_CTRL0,
- bdev->cache_fm_audio_ctrl0);
-
- mutex_unlock(&bdev->mutex);
- return err;
-}
-
-static int bcm2048_get_dac_output(struct bcm2048_device *bdev)
-{
- int err;
- u8 value;
-
- mutex_lock(&bdev->mutex);
-
- err = bcm2048_recv_command(bdev, BCM2048_I2C_FM_AUDIO_CTRL0, &value);
-
- mutex_unlock(&bdev->mutex);
-
- if (!err)
- return value & (BCM2048_DAC_OUTPUT_LEFT |
- BCM2048_DAC_OUTPUT_RIGHT);
-
- return err;
-}
-
-static int bcm2048_set_fm_search_rssi_threshold(struct bcm2048_device *bdev,
- u8 threshold)
-{
- int err;
-
- mutex_lock(&bdev->mutex);
-
- threshold &= BCM2048_SEARCH_RSSI_THRESHOLD;
- bdev->cache_fm_search_ctrl0 &= ~BCM2048_SEARCH_RSSI_THRESHOLD;
- bdev->cache_fm_search_ctrl0 |= threshold;
-
- err = bcm2048_send_command(bdev, BCM2048_I2C_FM_SEARCH_CTRL0,
- bdev->cache_fm_search_ctrl0);
-
- mutex_unlock(&bdev->mutex);
- return err;
-}
-
-static int bcm2048_get_fm_search_rssi_threshold(struct bcm2048_device *bdev)
-{
- int err;
- u8 value;
-
- mutex_lock(&bdev->mutex);
-
- err = bcm2048_recv_command(bdev, BCM2048_I2C_FM_SEARCH_CTRL0, &value);
-
- mutex_unlock(&bdev->mutex);
-
- if (!err)
- return value & BCM2048_SEARCH_RSSI_THRESHOLD;
-
- return err;
-}
-
-static int bcm2048_set_fm_search_mode_direction(struct bcm2048_device *bdev,
- u8 direction)
-{
- int err;
-
- mutex_lock(&bdev->mutex);
-
- bdev->cache_fm_search_ctrl0 &= ~BCM2048_SEARCH_DIRECTION;
-
- if (direction)
- bdev->cache_fm_search_ctrl0 |= BCM2048_SEARCH_DIRECTION;
-
- err = bcm2048_send_command(bdev, BCM2048_I2C_FM_SEARCH_CTRL0,
- bdev->cache_fm_search_ctrl0);
-
- mutex_unlock(&bdev->mutex);
- return err;
-}
-
-static int bcm2048_get_fm_search_mode_direction(struct bcm2048_device *bdev)
-{
- int err;
- u8 value;
-
- mutex_lock(&bdev->mutex);
-
- err = bcm2048_recv_command(bdev, BCM2048_I2C_FM_SEARCH_CTRL0, &value);
-
- mutex_unlock(&bdev->mutex);
-
- if (!err && (value & BCM2048_SEARCH_DIRECTION))
- return BCM2048_SEARCH_DIRECTION_UP;
-
- return err;
-}
-
-static int bcm2048_set_fm_search_tune_mode(struct bcm2048_device *bdev,
- u8 mode)
-{
- int err, timeout, restart_rds = 0;
- u8 value, flags;
-
- value = mode & BCM2048_FM_AUTO_SEARCH;
-
- flags = BCM2048_FM_FLAG_SEARCH_TUNE_FINISHED |
- BCM2048_FM_FLAG_SEARCH_TUNE_FAIL;
-
- mutex_lock(&bdev->mutex);
-
- /*
- * If RDS is enabled, and frequency is changed, RDS quits working.
- * Thus, always restart RDS if it's enabled. Moreover, RDS must
- * not be enabled while changing the frequency because it can
- * provide a race to the mutex from the workqueue handler if RDS
- * IRQ occurs while waiting for frequency changed IRQ.
- */
- if (bcm2048_get_rds_no_lock(bdev)) {
- err = bcm2048_set_rds_no_lock(bdev, 0);
- if (err)
- goto unlock;
- restart_rds = 1;
- }
-
- err = bcm2048_send_command(bdev, BCM2048_I2C_FM_RDS_MASK0, flags);
-
- if (err)
- goto unlock;
-
- bcm2048_send_command(bdev, BCM2048_I2C_FM_SEARCH_TUNE_MODE, value);
-
- if (mode != BCM2048_FM_AUTO_SEARCH_MODE)
- timeout = BCM2048_DEFAULT_TIMEOUT;
- else
- timeout = BCM2048_AUTO_SEARCH_TIMEOUT;
-
- if (!wait_for_completion_timeout(&bdev->compl,
- msecs_to_jiffies(timeout)))
- dev_err(&bdev->client->dev, "IRQ timeout.\n");
-
- if (value)
- if (!bdev->scan_state)
- err = -EIO;
-
-unlock:
- if (restart_rds)
- err |= bcm2048_set_rds_no_lock(bdev, 1);
-
- mutex_unlock(&bdev->mutex);
-
- return err;
-}
-
-static int bcm2048_get_fm_search_tune_mode(struct bcm2048_device *bdev)
-{
- int err;
- u8 value;
-
- mutex_lock(&bdev->mutex);
-
- err = bcm2048_recv_command(bdev, BCM2048_I2C_FM_SEARCH_TUNE_MODE,
- &value);
-
- mutex_unlock(&bdev->mutex);
-
- if (!err)
- return value & BCM2048_FM_AUTO_SEARCH;
-
- return err;
-}
-
-static int bcm2048_set_rds_b_block_mask(struct bcm2048_device *bdev, u16 mask)
-{
- int err;
-
- mutex_lock(&bdev->mutex);
-
- err = bcm2048_send_command(bdev,
- BCM2048_I2C_RDS_BLKB_MASK0, lsb(mask));
- err |= bcm2048_send_command(bdev,
- BCM2048_I2C_RDS_BLKB_MASK1, msb(mask));
-
- mutex_unlock(&bdev->mutex);
- return err;
-}
-
-static int bcm2048_get_rds_b_block_mask(struct bcm2048_device *bdev)
-{
- int err;
- u8 lsb, msb;
-
- mutex_lock(&bdev->mutex);
-
- err = bcm2048_recv_command(bdev,
- BCM2048_I2C_RDS_BLKB_MASK0, &lsb);
- err |= bcm2048_recv_command(bdev,
- BCM2048_I2C_RDS_BLKB_MASK1, &msb);
-
- mutex_unlock(&bdev->mutex);
-
- if (!err)
- return compose_u16(msb, lsb);
-
- return err;
-}
-
-static int bcm2048_set_rds_b_block_match(struct bcm2048_device *bdev,
- u16 match)
-{
- int err;
-
- mutex_lock(&bdev->mutex);
-
- err = bcm2048_send_command(bdev,
- BCM2048_I2C_RDS_BLKB_MATCH0, lsb(match));
- err |= bcm2048_send_command(bdev,
- BCM2048_I2C_RDS_BLKB_MATCH1, msb(match));
-
- mutex_unlock(&bdev->mutex);
- return err;
-}
-
-static int bcm2048_get_rds_b_block_match(struct bcm2048_device *bdev)
-{
- int err;
- u8 lsb, msb;
-
- mutex_lock(&bdev->mutex);
-
- err = bcm2048_recv_command(bdev,
- BCM2048_I2C_RDS_BLKB_MATCH0, &lsb);
- err |= bcm2048_recv_command(bdev,
- BCM2048_I2C_RDS_BLKB_MATCH1, &msb);
-
- mutex_unlock(&bdev->mutex);
-
- if (!err)
- return compose_u16(msb, lsb);
-
- return err;
-}
-
-static int bcm2048_set_rds_pi_mask(struct bcm2048_device *bdev, u16 mask)
-{
- int err;
-
- mutex_lock(&bdev->mutex);
-
- err = bcm2048_send_command(bdev,
- BCM2048_I2C_RDS_PI_MASK0, lsb(mask));
- err |= bcm2048_send_command(bdev,
- BCM2048_I2C_RDS_PI_MASK1, msb(mask));
-
- mutex_unlock(&bdev->mutex);
- return err;
-}
-
-static int bcm2048_get_rds_pi_mask(struct bcm2048_device *bdev)
-{
- int err;
- u8 lsb, msb;
-
- mutex_lock(&bdev->mutex);
-
- err = bcm2048_recv_command(bdev,
- BCM2048_I2C_RDS_PI_MASK0, &lsb);
- err |= bcm2048_recv_command(bdev,
- BCM2048_I2C_RDS_PI_MASK1, &msb);
-
- mutex_unlock(&bdev->mutex);
-
- if (!err)
- return compose_u16(msb, lsb);
-
- return err;
-}
-
-static int bcm2048_set_rds_pi_match(struct bcm2048_device *bdev, u16 match)
-{
- int err;
-
- mutex_lock(&bdev->mutex);
-
- err = bcm2048_send_command(bdev,
- BCM2048_I2C_RDS_PI_MATCH0, lsb(match));
- err |= bcm2048_send_command(bdev,
- BCM2048_I2C_RDS_PI_MATCH1, msb(match));
-
- mutex_unlock(&bdev->mutex);
- return err;
-}
-
-static int bcm2048_get_rds_pi_match(struct bcm2048_device *bdev)
-{
- int err;
- u8 lsb, msb;
-
- mutex_lock(&bdev->mutex);
-
- err = bcm2048_recv_command(bdev,
- BCM2048_I2C_RDS_PI_MATCH0, &lsb);
- err |= bcm2048_recv_command(bdev,
- BCM2048_I2C_RDS_PI_MATCH1, &msb);
-
- mutex_unlock(&bdev->mutex);
-
- if (!err)
- return compose_u16(msb, lsb);
-
- return err;
-}
-
-static int bcm2048_set_fm_rds_mask(struct bcm2048_device *bdev, u16 mask)
-{
- int err;
-
- mutex_lock(&bdev->mutex);
-
- err = bcm2048_send_command(bdev,
- BCM2048_I2C_FM_RDS_MASK0, lsb(mask));
- err |= bcm2048_send_command(bdev,
- BCM2048_I2C_FM_RDS_MASK1, msb(mask));
-
- mutex_unlock(&bdev->mutex);
- return err;
-}
-
-static int bcm2048_get_fm_rds_mask(struct bcm2048_device *bdev)
-{
- int err;
- u8 value0, value1;
-
- mutex_lock(&bdev->mutex);
-
- err = bcm2048_recv_command(bdev, BCM2048_I2C_FM_RDS_MASK0, &value0);
- err |= bcm2048_recv_command(bdev, BCM2048_I2C_FM_RDS_MASK1, &value1);
-
- mutex_unlock(&bdev->mutex);
-
- if (!err)
- return compose_u16(value1, value0);
-
- return err;
-}
-
-static int bcm2048_get_fm_rds_flags(struct bcm2048_device *bdev)
-{
- int err;
- u8 value0, value1;
-
- mutex_lock(&bdev->mutex);
-
- err = bcm2048_recv_command(bdev, BCM2048_I2C_FM_RDS_FLAG0, &value0);
- err |= bcm2048_recv_command(bdev, BCM2048_I2C_FM_RDS_FLAG1, &value1);
-
- mutex_unlock(&bdev->mutex);
-
- if (!err)
- return compose_u16(value1, value0);
-
- return err;
-}
-
-static int bcm2048_get_region_bottom_frequency(struct bcm2048_device *bdev)
-{
- return bdev->region_info.bottom_frequency;
-}
-
-static int bcm2048_get_region_top_frequency(struct bcm2048_device *bdev)
-{
- return bdev->region_info.top_frequency;
-}
-
-static int bcm2048_set_fm_best_tune_mode(struct bcm2048_device *bdev, u8 mode)
-{
- int err;
- u8 value;
-
- mutex_lock(&bdev->mutex);
-
- /* Perform read as the manual indicates */
- err = bcm2048_recv_command(bdev, BCM2048_I2C_FM_BEST_TUNE_MODE,
- &value);
- value &= ~BCM2048_BEST_TUNE_MODE;
-
- if (mode)
- value |= BCM2048_BEST_TUNE_MODE;
- err |= bcm2048_send_command(bdev, BCM2048_I2C_FM_BEST_TUNE_MODE,
- value);
-
- mutex_unlock(&bdev->mutex);
- return err;
-}
-
-static int bcm2048_get_fm_best_tune_mode(struct bcm2048_device *bdev)
-{
- int err;
- u8 value;
-
- mutex_lock(&bdev->mutex);
-
- err = bcm2048_recv_command(bdev, BCM2048_I2C_FM_BEST_TUNE_MODE,
- &value);
-
- mutex_unlock(&bdev->mutex);
-
- if (!err && (value & BCM2048_BEST_TUNE_MODE))
- return BCM2048_ITEM_ENABLED;
-
- return err;
-}
-
-static int bcm2048_get_fm_carrier_error(struct bcm2048_device *bdev)
-{
- int err = 0;
- s8 value;
-
- mutex_lock(&bdev->mutex);
- err = bcm2048_recv_command(bdev, BCM2048_I2C_FM_CARRIER, &value);
- mutex_unlock(&bdev->mutex);
-
- if (!err)
- return value;
-
- return err;
-}
-
-static int bcm2048_get_fm_rssi(struct bcm2048_device *bdev)
-{
- int err;
- s8 value;
-
- mutex_lock(&bdev->mutex);
- err = bcm2048_recv_command(bdev, BCM2048_I2C_FM_RSSI, &value);
- mutex_unlock(&bdev->mutex);
-
- if (!err)
- return value;
-
- return err;
-}
-
-static int bcm2048_set_rds_wline(struct bcm2048_device *bdev, u8 wline)
-{
- int err;
-
- mutex_lock(&bdev->mutex);
-
- err = bcm2048_send_command(bdev, BCM2048_I2C_RDS_WLINE, wline);
-
- if (!err)
- bdev->fifo_size = wline;
-
- mutex_unlock(&bdev->mutex);
- return err;
-}
-
-static int bcm2048_get_rds_wline(struct bcm2048_device *bdev)
-{
- int err;
- u8 value;
-
- mutex_lock(&bdev->mutex);
-
- err = bcm2048_recv_command(bdev, BCM2048_I2C_RDS_WLINE, &value);
-
- mutex_unlock(&bdev->mutex);
-
- if (!err) {
- bdev->fifo_size = value;
- return value;
- }
-
- return err;
-}
-
-static int bcm2048_checkrev(struct bcm2048_device *bdev)
-{
- int err;
- u8 version;
-
- mutex_lock(&bdev->mutex);
-
- err = bcm2048_recv_command(bdev, BCM2048_I2C_FM_RDS_REV, &version);
-
- mutex_unlock(&bdev->mutex);
-
- if (!err) {
- dev_info(&bdev->client->dev, "BCM2048 Version 0x%x\n",
- version);
- return version;
- }
-
- return err;
-}
-
-static int bcm2048_get_rds_rt(struct bcm2048_device *bdev, char *data)
-{
- int err = 0, i, j = 0, ce = 0, cr = 0;
- char data_buffer[BCM2048_MAX_RDS_RT+1];
-
- mutex_lock(&bdev->mutex);
-
- if (!bdev->rds_info.text_len) {
- err = -EINVAL;
- goto unlock;
- }
-
- for (i = 0; i < BCM2048_MAX_RDS_RT; i++) {
- if (bdev->rds_info.rds_rt[i]) {
- ce = i;
- /* Skip the carriage return */
- if (bdev->rds_info.rds_rt[i] != 0x0d) {
- data_buffer[j++] = bdev->rds_info.rds_rt[i];
- } else {
- cr = i;
- break;
- }
- }
- }
-
- if (j <= BCM2048_MAX_RDS_RT)
- data_buffer[j] = 0;
-
- for (i = 0; i < BCM2048_MAX_RDS_RT; i++) {
- if (!bdev->rds_info.rds_rt[i]) {
- if (cr && (i < cr)) {
- err = -EBUSY;
- goto unlock;
- }
- if (i < ce) {
- if (cr && (i >= cr))
- break;
- err = -EBUSY;
- goto unlock;
- }
- }
- }
-
- memcpy(data, data_buffer, sizeof(data_buffer));
-
-unlock:
- mutex_unlock(&bdev->mutex);
- return err;
-}
-
-static int bcm2048_get_rds_ps(struct bcm2048_device *bdev, char *data)
-{
- int err = 0, i, j = 0;
- char data_buffer[BCM2048_MAX_RDS_PS+1];
-
- mutex_lock(&bdev->mutex);
-
- if (!bdev->rds_info.text_len) {
- err = -EINVAL;
- goto unlock;
- }
-
- for (i = 0; i < BCM2048_MAX_RDS_PS; i++) {
- if (bdev->rds_info.rds_ps[i]) {
- data_buffer[j++] = bdev->rds_info.rds_ps[i];
- } else {
- if (i < (BCM2048_MAX_RDS_PS - 1)) {
- err = -EBUSY;
- goto unlock;
- }
- }
- }
-
- if (j <= BCM2048_MAX_RDS_PS)
- data_buffer[j] = 0;
-
- memcpy(data, data_buffer, sizeof(data_buffer));
-
-unlock:
- mutex_unlock(&bdev->mutex);
- return err;
-}
-
-static void bcm2048_parse_rds_pi(struct bcm2048_device *bdev)
-{
- int i, cnt = 0;
- u16 pi;
-
- for (i = 0; i < bdev->fifo_size; i += BCM2048_RDS_FIFO_DUPLE_SIZE) {
-
- /* Block A match, only data without crc errors taken */
- if (bdev->rds_info.radio_text[i] == BCM2048_RDS_BLOCK_A) {
-
- pi = (bdev->rds_info.radio_text[i+1] << 8) +
- bdev->rds_info.radio_text[i+2];
-
- if (!bdev->rds_info.rds_pi) {
- bdev->rds_info.rds_pi = pi;
- return;
- }
- if (pi != bdev->rds_info.rds_pi) {
- cnt++;
- if (cnt > 3) {
- bdev->rds_info.rds_pi = pi;
- cnt = 0;
- }
- } else {
- cnt = 0;
- }
- }
- }
-}
-
-static int bcm2048_rds_block_crc(struct bcm2048_device *bdev, int i)
-{
- return bdev->rds_info.radio_text[i] & BCM2048_RDS_CRC_MASK;
-}
-
-static void bcm2048_parse_rds_rt_block(struct bcm2048_device *bdev, int i,
- int index, int crc)
-{
- /* Good data will overwrite poor data */
- if (crc) {
- if (!bdev->rds_info.rds_rt[index])
- bdev->rds_info.rds_rt[index] =
- bdev->rds_info.radio_text[i+1];
- if (!bdev->rds_info.rds_rt[index+1])
- bdev->rds_info.rds_rt[index+1] =
- bdev->rds_info.radio_text[i+2];
- } else {
- bdev->rds_info.rds_rt[index] = bdev->rds_info.radio_text[i+1];
- bdev->rds_info.rds_rt[index+1] =
- bdev->rds_info.radio_text[i+2];
- }
-}
-
-static int bcm2048_parse_rt_match_b(struct bcm2048_device *bdev, int i)
-{
- int crc, rt_id, rt_group_b, rt_ab, index = 0;
-
- crc = bcm2048_rds_block_crc(bdev, i);
-
- if (crc == BCM2048_RDS_CRC_UNRECOVARABLE)
- return -EIO;
-
- if ((bdev->rds_info.radio_text[i] & BCM2048_RDS_BLOCK_MASK) ==
- BCM2048_RDS_BLOCK_B) {
-
- rt_id = bdev->rds_info.radio_text[i+1] &
- BCM2048_RDS_BLOCK_MASK;
- rt_group_b = bdev->rds_info.radio_text[i+1] &
- BCM2048_RDS_GROUP_AB_MASK;
- rt_ab = bdev->rds_info.radio_text[i+2] &
- BCM2048_RDS_RT_AB_MASK;
-
- if (rt_group_b != bdev->rds_info.rds_rt_group_b) {
- memset(bdev->rds_info.rds_rt, 0,
- sizeof(bdev->rds_info.rds_rt));
- bdev->rds_info.rds_rt_group_b = rt_group_b;
- }
-
- if (rt_id == BCM2048_RDS_RT) {
- /* A to B or (vice versa), means: clear screen */
- if (rt_ab != bdev->rds_info.rds_rt_ab) {
- memset(bdev->rds_info.rds_rt, 0,
- sizeof(bdev->rds_info.rds_rt));
- bdev->rds_info.rds_rt_ab = rt_ab;
- }
-
- index = bdev->rds_info.radio_text[i+2] &
- BCM2048_RDS_RT_INDEX;
-
- if (bdev->rds_info.rds_rt_group_b)
- index <<= 1;
- else
- index <<= 2;
-
- return index;
- }
- }
-
- return -EIO;
-}
-
-static int bcm2048_parse_rt_match_c(struct bcm2048_device *bdev, int i,
- int index)
-{
- int crc;
-
- crc = bcm2048_rds_block_crc(bdev, i);
-
- if (crc == BCM2048_RDS_CRC_UNRECOVARABLE)
- return 0;
-
- BUG_ON((index+2) >= BCM2048_MAX_RDS_RT);
-
- if ((bdev->rds_info.radio_text[i] & BCM2048_RDS_BLOCK_MASK) ==
- BCM2048_RDS_BLOCK_C) {
- if (bdev->rds_info.rds_rt_group_b)
- return 1;
- bcm2048_parse_rds_rt_block(bdev, i, index, crc);
- return 1;
- }
-
- return 0;
-}
-
-static void bcm2048_parse_rt_match_d(struct bcm2048_device *bdev, int i,
- int index)
-{
- int crc;
-
- crc = bcm2048_rds_block_crc(bdev, i);
-
- if (crc == BCM2048_RDS_CRC_UNRECOVARABLE)
- return;
-
- BUG_ON((index+4) >= BCM2048_MAX_RDS_RT);
-
- if ((bdev->rds_info.radio_text[i] & BCM2048_RDS_BLOCK_MASK) ==
- BCM2048_RDS_BLOCK_D)
- bcm2048_parse_rds_rt_block(bdev, i, index+2, crc);
-}
-
-static void bcm2048_parse_rds_rt(struct bcm2048_device *bdev)
-{
- int i, index = 0, crc, match_b = 0, match_c = 0, match_d = 0;
-
- for (i = 0; i < bdev->fifo_size; i += BCM2048_RDS_FIFO_DUPLE_SIZE) {
-
- if (match_b) {
- match_b = 0;
- index = bcm2048_parse_rt_match_b(bdev, i);
- if (index >= 0 && index <= (BCM2048_MAX_RDS_RT - 5))
- match_c = 1;
- continue;
- } else if (match_c) {
- match_c = 0;
- if (bcm2048_parse_rt_match_c(bdev, i, index))
- match_d = 1;
- continue;
- } else if (match_d) {
- match_d = 0;
- bcm2048_parse_rt_match_d(bdev, i, index);
- continue;
- }
-
- /* Skip erroneous blocks due to messed up A block altogether */
- if ((bdev->rds_info.radio_text[i] & BCM2048_RDS_BLOCK_MASK)
- == BCM2048_RDS_BLOCK_A) {
- crc = bcm2048_rds_block_crc(bdev, i);
- if (crc == BCM2048_RDS_CRC_UNRECOVARABLE)
- continue;
- /* Syncronize to a good RDS PI */
- if (((bdev->rds_info.radio_text[i+1] << 8) +
- bdev->rds_info.radio_text[i+2]) ==
- bdev->rds_info.rds_pi)
- match_b = 1;
- }
- }
-}
-
-static void bcm2048_parse_rds_ps_block(struct bcm2048_device *bdev, int i,
- int index, int crc)
-{
- /* Good data will overwrite poor data */
- if (crc) {
- if (!bdev->rds_info.rds_ps[index])
- bdev->rds_info.rds_ps[index] =
- bdev->rds_info.radio_text[i+1];
- if (!bdev->rds_info.rds_ps[index+1])
- bdev->rds_info.rds_ps[index+1] =
- bdev->rds_info.radio_text[i+2];
- } else {
- bdev->rds_info.rds_ps[index] = bdev->rds_info.radio_text[i+1];
- bdev->rds_info.rds_ps[index+1] =
- bdev->rds_info.radio_text[i+2];
- }
-}
-
-static int bcm2048_parse_ps_match_c(struct bcm2048_device *bdev, int i,
- int index)
-{
- int crc;
-
- crc = bcm2048_rds_block_crc(bdev, i);
-
- if (crc == BCM2048_RDS_CRC_UNRECOVARABLE)
- return 0;
-
- if ((bdev->rds_info.radio_text[i] & BCM2048_RDS_BLOCK_MASK) ==
- BCM2048_RDS_BLOCK_C)
- return 1;
-
- return 0;
-}
-
-static void bcm2048_parse_ps_match_d(struct bcm2048_device *bdev, int i,
- int index)
-{
- int crc;
-
- crc = bcm2048_rds_block_crc(bdev, i);
-
- if (crc == BCM2048_RDS_CRC_UNRECOVARABLE)
- return;
-
- if ((bdev->rds_info.radio_text[i] & BCM2048_RDS_BLOCK_MASK) ==
- BCM2048_RDS_BLOCK_D)
- bcm2048_parse_rds_ps_block(bdev, i, index, crc);
-}
-
-static int bcm2048_parse_ps_match_b(struct bcm2048_device *bdev, int i)
-{
- int crc, index, ps_id, ps_group;
-
- crc = bcm2048_rds_block_crc(bdev, i);
-
- if (crc == BCM2048_RDS_CRC_UNRECOVARABLE)
- return -EIO;
-
- /* Block B Radio PS match */
- if ((bdev->rds_info.radio_text[i] & BCM2048_RDS_BLOCK_MASK) ==
- BCM2048_RDS_BLOCK_B) {
- ps_id = bdev->rds_info.radio_text[i+1] &
- BCM2048_RDS_BLOCK_MASK;
- ps_group = bdev->rds_info.radio_text[i+1] &
- BCM2048_RDS_GROUP_AB_MASK;
-
- /*
- * Poor RSSI will lead to RDS data corruption
- * So using 3 (same) sequential values to justify major changes
- */
- if (ps_group != bdev->rds_info.rds_ps_group) {
- if (crc == BCM2048_RDS_CRC_NONE) {
- bdev->rds_info.rds_ps_group_cnt++;
- if (bdev->rds_info.rds_ps_group_cnt > 2) {
- bdev->rds_info.rds_ps_group = ps_group;
- bdev->rds_info.rds_ps_group_cnt = 0;
- dev_err(&bdev->client->dev,
- "RDS PS Group change!\n");
- } else {
- return -EIO;
- }
- } else {
- bdev->rds_info.rds_ps_group_cnt = 0;
- }
- }
-
- if (ps_id == BCM2048_RDS_PS) {
- index = bdev->rds_info.radio_text[i+2] &
- BCM2048_RDS_PS_INDEX;
- index <<= 1;
- return index;
- }
- }
-
- return -EIO;
-}
-
-static void bcm2048_parse_rds_ps(struct bcm2048_device *bdev)
-{
- int i, index = 0, crc, match_b = 0, match_c = 0, match_d = 0;
-
- for (i = 0; i < bdev->fifo_size; i += BCM2048_RDS_FIFO_DUPLE_SIZE) {
-
- if (match_b) {
- match_b = 0;
- index = bcm2048_parse_ps_match_b(bdev, i);
- if (index >= 0 && index < (BCM2048_MAX_RDS_PS - 1))
- match_c = 1;
- continue;
- } else if (match_c) {
- match_c = 0;
- if (bcm2048_parse_ps_match_c(bdev, i, index))
- match_d = 1;
- continue;
- } else if (match_d) {
- match_d = 0;
- bcm2048_parse_ps_match_d(bdev, i, index);
- continue;
- }
-
- /* Skip erroneous blocks due to messed up A block altogether */
- if ((bdev->rds_info.radio_text[i] & BCM2048_RDS_BLOCK_MASK)
- == BCM2048_RDS_BLOCK_A) {
- crc = bcm2048_rds_block_crc(bdev, i);
- if (crc == BCM2048_RDS_CRC_UNRECOVARABLE)
- continue;
- /* Syncronize to a good RDS PI */
- if (((bdev->rds_info.radio_text[i+1] << 8) +
- bdev->rds_info.radio_text[i+2]) ==
- bdev->rds_info.rds_pi)
- match_b = 1;
- }
- }
-}
-
-static void bcm2048_rds_fifo_receive(struct bcm2048_device *bdev)
-{
- int err;
-
- mutex_lock(&bdev->mutex);
-
- err = bcm2048_recv_duples(bdev, BCM2048_I2C_RDS_DATA,
- bdev->rds_info.radio_text, bdev->fifo_size);
- if (err != 2) {
- dev_err(&bdev->client->dev, "RDS Read problem\n");
- mutex_unlock(&bdev->mutex);
- return;
- }
-
- bdev->rds_info.text_len = bdev->fifo_size;
-
- bcm2048_parse_rds_pi(bdev);
- bcm2048_parse_rds_rt(bdev);
- bcm2048_parse_rds_ps(bdev);
-
- mutex_unlock(&bdev->mutex);
-
- wake_up_interruptible(&bdev->read_queue);
-}
-
-static int bcm2048_get_rds_data(struct bcm2048_device *bdev, char *data)
-{
- int err = 0, i, p = 0;
- char *data_buffer;
-
- mutex_lock(&bdev->mutex);
-
- if (!bdev->rds_info.text_len) {
- err = -EINVAL;
- goto unlock;
- }
-
- data_buffer = kcalloc(BCM2048_MAX_RDS_RADIO_TEXT, 5, GFP_KERNEL);
- if (!data_buffer) {
- err = -ENOMEM;
- goto unlock;
- }
-
- for (i = 0; i < bdev->rds_info.text_len; i++) {
- p += sprintf(data_buffer+p, "%x ",
- bdev->rds_info.radio_text[i]);
- }
-
- memcpy(data, data_buffer, p);
- kfree(data_buffer);
-
-unlock:
- mutex_unlock(&bdev->mutex);
- return err;
-}
-
-/*
- * BCM2048 default initialization sequence
- */
-static int bcm2048_init(struct bcm2048_device *bdev)
-{
- int err;
-
- err = bcm2048_set_power_state(bdev, BCM2048_POWER_ON);
- if (err < 0)
- goto exit;
-
- err = bcm2048_set_audio_route(bdev, BCM2048_AUDIO_ROUTE_DAC);
- if (err < 0)
- goto exit;
-
- err = bcm2048_set_dac_output(bdev, BCM2048_DAC_OUTPUT_LEFT |
- BCM2048_DAC_OUTPUT_RIGHT);
-
-exit:
- return err;
-}
-
-/*
- * BCM2048 default deinitialization sequence
- */
-static int bcm2048_deinit(struct bcm2048_device *bdev)
-{
- int err;
-
- err = bcm2048_set_audio_route(bdev, 0);
- if (err < 0)
- goto exit;
-
- err = bcm2048_set_dac_output(bdev, 0);
- if (err < 0)
- goto exit;
-
- err = bcm2048_set_power_state(bdev, BCM2048_POWER_OFF);
- if (err < 0)
- goto exit;
-
-exit:
- return err;
-}
-
-/*
- * BCM2048 probe sequence
- */
-static int bcm2048_probe(struct bcm2048_device *bdev)
-{
- int err;
-
- err = bcm2048_set_power_state(bdev, BCM2048_POWER_ON);
- if (err < 0)
- goto unlock;
-
- err = bcm2048_checkrev(bdev);
- if (err < 0)
- goto unlock;
-
- err = bcm2048_set_mute(bdev, BCM2048_DEFAULT_MUTE);
- if (err < 0)
- goto unlock;
-
- err = bcm2048_set_region(bdev, BCM2048_DEFAULT_REGION);
- if (err < 0)
- goto unlock;
-
- err = bcm2048_set_fm_search_rssi_threshold(bdev,
- BCM2048_DEFAULT_RSSI_THRESHOLD);
- if (err < 0)
- goto unlock;
-
- err = bcm2048_set_fm_automatic_stereo_mono(bdev, BCM2048_ITEM_ENABLED);
- if (err < 0)
- goto unlock;
-
- err = bcm2048_get_rds_wline(bdev);
- if (err < BCM2048_DEFAULT_RDS_WLINE)
- err = bcm2048_set_rds_wline(bdev, BCM2048_DEFAULT_RDS_WLINE);
- if (err < 0)
- goto unlock;
-
- err = bcm2048_set_power_state(bdev, BCM2048_POWER_OFF);
-
- init_waitqueue_head(&bdev->read_queue);
- bdev->rds_data_available = 0;
- bdev->rd_index = 0;
- bdev->users = 0;
-
-unlock:
- return err;
-}
-
-/*
- * BCM2048 workqueue handler
- */
-static void bcm2048_work(struct work_struct *work)
-{
- struct bcm2048_device *bdev;
- u8 flag_lsb, flag_msb, flags;
-
- bdev = container_of(work, struct bcm2048_device, work);
- bcm2048_recv_command(bdev, BCM2048_I2C_FM_RDS_FLAG0, &flag_lsb);
- bcm2048_recv_command(bdev, BCM2048_I2C_FM_RDS_FLAG1, &flag_msb);
-
- if (flag_lsb & (BCM2048_FM_FLAG_SEARCH_TUNE_FINISHED |
- BCM2048_FM_FLAG_SEARCH_TUNE_FAIL)) {
-
- if (flag_lsb & BCM2048_FM_FLAG_SEARCH_TUNE_FAIL)
- bdev->scan_state = BCM2048_SCAN_FAIL;
- else
- bdev->scan_state = BCM2048_SCAN_OK;
-
- complete(&bdev->compl);
- }
-
- if (flag_msb & BCM2048_RDS_FLAG_FIFO_WLINE) {
- bcm2048_rds_fifo_receive(bdev);
- if (bdev->rds_state) {
- flags = BCM2048_RDS_FLAG_FIFO_WLINE;
- bcm2048_send_command(bdev, BCM2048_I2C_FM_RDS_MASK1,
- flags);
- }
- bdev->rds_data_available = 1;
- bdev->rd_index = 0; /* new data, new start */
- }
-}
-
-/*
- * BCM2048 interrupt handler
- */
-static irqreturn_t bcm2048_handler(int irq, void *dev)
-{
- struct bcm2048_device *bdev = dev;
-
- dev_dbg(&bdev->client->dev, "IRQ called, queuing work\n");
- if (bdev->power_state)
- schedule_work(&bdev->work);
-
- return IRQ_HANDLED;
-}
-
-/*
- * BCM2048 sysfs interface definitions
- */
-#define property_write(prop, type, mask, check) \
-static ssize_t bcm2048_##prop##_write(struct device *dev, \
- struct device_attribute *attr, \
- const char *buf, \
- size_t count) \
-{ \
- struct bcm2048_device *bdev = dev_get_drvdata(dev); \
- type value; \
- int err; \
- \
- if (!bdev) \
- return -ENODEV; \
- \
- if (sscanf(buf, mask, &value) != 1) \
- return -EINVAL; \
- \
- if (check) \
- return -EDOM; \
- \
- err = bcm2048_set_##prop(bdev, value); \
- \
- return err < 0 ? err : count; \
-}
-
-#define property_read(prop, size, mask) \
-static ssize_t bcm2048_##prop##_read(struct device *dev, \
- struct device_attribute *attr, \
- char *buf) \
-{ \
- struct bcm2048_device *bdev = dev_get_drvdata(dev); \
- int value; \
- \
- if (!bdev) \
- return -ENODEV; \
- \
- value = bcm2048_get_##prop(bdev); \
- \
- if (value >= 0) \
- value = sprintf(buf, mask "\n", value); \
- \
- return value; \
-}
-
-#define property_signed_read(prop, size, mask) \
-static ssize_t bcm2048_##prop##_read(struct device *dev, \
- struct device_attribute *attr, \
- char *buf) \
-{ \
- struct bcm2048_device *bdev = dev_get_drvdata(dev); \
- size value; \
- \
- if (!bdev) \
- return -ENODEV; \
- \
- value = bcm2048_get_##prop(bdev); \
- \
- value = sprintf(buf, mask "\n", value); \
- \
- return value; \
-}
-
-#define DEFINE_SYSFS_PROPERTY(prop, signal, size, mask, check) \
-property_write(prop, signal size, mask, check) \
-property_read(prop, size, mask)
-
-#define property_str_read(prop, size) \
-static ssize_t bcm2048_##prop##_read(struct device *dev, \
- struct device_attribute *attr, \
- char *buf) \
-{ \
- struct bcm2048_device *bdev = dev_get_drvdata(dev); \
- int count; \
- u8 *out; \
- \
- if (!bdev) \
- return -ENODEV; \
- \
- out = kzalloc(size + 1, GFP_KERNEL); \
- if (!out) \
- return -ENOMEM; \
- \
- bcm2048_get_##prop(bdev, out); \
- count = sprintf(buf, "%s\n", out); \
- \
- kfree(out); \
- \
- return count; \
-}
-
-DEFINE_SYSFS_PROPERTY(power_state, unsigned, int, "%u", 0)
-DEFINE_SYSFS_PROPERTY(mute, unsigned, int, "%u", 0)
-DEFINE_SYSFS_PROPERTY(audio_route, unsigned, int, "%u", 0)
-DEFINE_SYSFS_PROPERTY(dac_output, unsigned, int, "%u", 0)
-
-DEFINE_SYSFS_PROPERTY(fm_hi_lo_injection, unsigned, int, "%u", 0)
-DEFINE_SYSFS_PROPERTY(fm_frequency, unsigned, int, "%u", 0)
-DEFINE_SYSFS_PROPERTY(fm_af_frequency, unsigned, int, "%u", 0)
-DEFINE_SYSFS_PROPERTY(fm_deemphasis, unsigned, int, "%u", 0)
-DEFINE_SYSFS_PROPERTY(fm_rds_mask, unsigned, int, "%u", 0)
-DEFINE_SYSFS_PROPERTY(fm_best_tune_mode, unsigned, int, "%u", 0)
-DEFINE_SYSFS_PROPERTY(fm_search_rssi_threshold, unsigned, int, "%u", 0)
-DEFINE_SYSFS_PROPERTY(fm_search_mode_direction, unsigned, int, "%u", 0)
-DEFINE_SYSFS_PROPERTY(fm_search_tune_mode, unsigned, int, "%u", value > 3)
-
-DEFINE_SYSFS_PROPERTY(rds, unsigned, int, "%u", 0)
-DEFINE_SYSFS_PROPERTY(rds_b_block_mask, unsigned, int, "%u", 0)
-DEFINE_SYSFS_PROPERTY(rds_b_block_match, unsigned, int, "%u", 0)
-DEFINE_SYSFS_PROPERTY(rds_pi_mask, unsigned, int, "%u", 0)
-DEFINE_SYSFS_PROPERTY(rds_pi_match, unsigned, int, "%u", 0)
-DEFINE_SYSFS_PROPERTY(rds_wline, unsigned, int, "%u", 0)
-property_read(rds_pi, unsigned int, "%x")
-property_str_read(rds_rt, (BCM2048_MAX_RDS_RT + 1))
-property_str_read(rds_ps, (BCM2048_MAX_RDS_PS + 1))
-
-property_read(fm_rds_flags, unsigned int, "%u")
-property_str_read(rds_data, BCM2048_MAX_RDS_RADIO_TEXT*5)
-
-property_read(region_bottom_frequency, unsigned int, "%u")
-property_read(region_top_frequency, unsigned int, "%u")
-property_signed_read(fm_carrier_error, int, "%d")
-property_signed_read(fm_rssi, int, "%d")
-DEFINE_SYSFS_PROPERTY(region, unsigned, int, "%u", 0)
-
-static struct device_attribute attrs[] = {
- __ATTR(power_state, S_IRUGO | S_IWUSR, bcm2048_power_state_read,
- bcm2048_power_state_write),
- __ATTR(mute, S_IRUGO | S_IWUSR, bcm2048_mute_read,
- bcm2048_mute_write),
- __ATTR(audio_route, S_IRUGO | S_IWUSR, bcm2048_audio_route_read,
- bcm2048_audio_route_write),
- __ATTR(dac_output, S_IRUGO | S_IWUSR, bcm2048_dac_output_read,
- bcm2048_dac_output_write),
- __ATTR(fm_hi_lo_injection, S_IRUGO | S_IWUSR,
- bcm2048_fm_hi_lo_injection_read,
- bcm2048_fm_hi_lo_injection_write),
- __ATTR(fm_frequency, S_IRUGO | S_IWUSR, bcm2048_fm_frequency_read,
- bcm2048_fm_frequency_write),
- __ATTR(fm_af_frequency, S_IRUGO | S_IWUSR,
- bcm2048_fm_af_frequency_read,
- bcm2048_fm_af_frequency_write),
- __ATTR(fm_deemphasis, S_IRUGO | S_IWUSR, bcm2048_fm_deemphasis_read,
- bcm2048_fm_deemphasis_write),
- __ATTR(fm_rds_mask, S_IRUGO | S_IWUSR, bcm2048_fm_rds_mask_read,
- bcm2048_fm_rds_mask_write),
- __ATTR(fm_best_tune_mode, S_IRUGO | S_IWUSR,
- bcm2048_fm_best_tune_mode_read,
- bcm2048_fm_best_tune_mode_write),
- __ATTR(fm_search_rssi_threshold, S_IRUGO | S_IWUSR,
- bcm2048_fm_search_rssi_threshold_read,
- bcm2048_fm_search_rssi_threshold_write),
- __ATTR(fm_search_mode_direction, S_IRUGO | S_IWUSR,
- bcm2048_fm_search_mode_direction_read,
- bcm2048_fm_search_mode_direction_write),
- __ATTR(fm_search_tune_mode, S_IRUGO | S_IWUSR,
- bcm2048_fm_search_tune_mode_read,
- bcm2048_fm_search_tune_mode_write),
- __ATTR(rds, S_IRUGO | S_IWUSR, bcm2048_rds_read,
- bcm2048_rds_write),
- __ATTR(rds_b_block_mask, S_IRUGO | S_IWUSR,
- bcm2048_rds_b_block_mask_read,
- bcm2048_rds_b_block_mask_write),
- __ATTR(rds_b_block_match, S_IRUGO | S_IWUSR,
- bcm2048_rds_b_block_match_read,
- bcm2048_rds_b_block_match_write),
- __ATTR(rds_pi_mask, S_IRUGO | S_IWUSR, bcm2048_rds_pi_mask_read,
- bcm2048_rds_pi_mask_write),
- __ATTR(rds_pi_match, S_IRUGO | S_IWUSR, bcm2048_rds_pi_match_read,
- bcm2048_rds_pi_match_write),
- __ATTR(rds_wline, S_IRUGO | S_IWUSR, bcm2048_rds_wline_read,
- bcm2048_rds_wline_write),
- __ATTR(rds_pi, S_IRUGO, bcm2048_rds_pi_read, NULL),
- __ATTR(rds_rt, S_IRUGO, bcm2048_rds_rt_read, NULL),
- __ATTR(rds_ps, S_IRUGO, bcm2048_rds_ps_read, NULL),
- __ATTR(fm_rds_flags, S_IRUGO, bcm2048_fm_rds_flags_read, NULL),
- __ATTR(region_bottom_frequency, S_IRUGO,
- bcm2048_region_bottom_frequency_read, NULL),
- __ATTR(region_top_frequency, S_IRUGO,
- bcm2048_region_top_frequency_read, NULL),
- __ATTR(fm_carrier_error, S_IRUGO,
- bcm2048_fm_carrier_error_read, NULL),
- __ATTR(fm_rssi, S_IRUGO,
- bcm2048_fm_rssi_read, NULL),
- __ATTR(region, S_IRUGO | S_IWUSR, bcm2048_region_read,
- bcm2048_region_write),
- __ATTR(rds_data, S_IRUGO, bcm2048_rds_data_read, NULL),
-};
-
-static int bcm2048_sysfs_unregister_properties(struct bcm2048_device *bdev,
- int size)
-{
- int i;
-
- for (i = 0; i < size; i++)
- device_remove_file(&bdev->client->dev, &attrs[i]);
-
- return 0;
-}
-
-static int bcm2048_sysfs_register_properties(struct bcm2048_device *bdev)
-{
- int err = 0;
- int i;
-
- for (i = 0; i < ARRAY_SIZE(attrs); i++) {
- if (device_create_file(&bdev->client->dev, &attrs[i]) != 0) {
- dev_err(&bdev->client->dev,
- "could not register sysfs entry\n");
- err = -EBUSY;
- bcm2048_sysfs_unregister_properties(bdev, i);
- break;
- }
- }
-
- return err;
-}
-
-
-static int bcm2048_fops_open(struct file *file)
-{
- struct bcm2048_device *bdev = video_drvdata(file);
-
- bdev->users++;
- bdev->rd_index = 0;
- bdev->rds_data_available = 0;
-
- return 0;
-}
-
-static int bcm2048_fops_release(struct file *file)
-{
- struct bcm2048_device *bdev = video_drvdata(file);
-
- bdev->users--;
-
- return 0;
-}
-
-static unsigned int bcm2048_fops_poll(struct file *file,
- struct poll_table_struct *pts)
-{
- struct bcm2048_device *bdev = video_drvdata(file);
- int retval = 0;
-
- poll_wait(file, &bdev->read_queue, pts);
-
- if (bdev->rds_data_available)
- retval = POLLIN | POLLRDNORM;
-
- return retval;
-}
-
-static ssize_t bcm2048_fops_read(struct file *file, char __user *buf,
- size_t count, loff_t *ppos)
-{
- struct bcm2048_device *bdev = video_drvdata(file);
- int i;
- int retval = 0;
-
- /* we return at least 3 bytes, one block */
- count = (count / 3) * 3; /* only multiples of 3 */
- if (count < 3)
- return -ENOBUFS;
-
- while (!bdev->rds_data_available) {
- if (file->f_flags & O_NONBLOCK) {
- retval = -EWOULDBLOCK;
- goto done;
- }
- /* interruptible_sleep_on(&bdev->read_queue); */
- if (wait_event_interruptible(bdev->read_queue,
- bdev->rds_data_available) < 0) {
- retval = -EINTR;
- goto done;
- }
- }
-
- mutex_lock(&bdev->mutex);
- /* copy data to userspace */
- i = bdev->fifo_size - bdev->rd_index;
- if (count > i)
- count = (i / 3) * 3;
-
- i = 0;
- while (i < count) {
- unsigned char tmpbuf[3];
-
- tmpbuf[i] = bdev->rds_info.radio_text[bdev->rd_index+i+2];
- tmpbuf[i+1] = bdev->rds_info.radio_text[bdev->rd_index+i+1];
- tmpbuf[i+2] = (bdev->rds_info.radio_text[bdev->rd_index + i] & 0xf0) >> 4;
- if ((bdev->rds_info.radio_text[bdev->rd_index+i] &
- BCM2048_RDS_CRC_MASK) == BCM2048_RDS_CRC_UNRECOVARABLE)
- tmpbuf[i+2] |= 0x80;
- if (copy_to_user(buf+i, tmpbuf, 3)) {
- retval = -EFAULT;
- break;
- }
- i += 3;
- }
-
- bdev->rd_index += i;
- if (bdev->rd_index >= bdev->fifo_size)
- bdev->rds_data_available = 0;
-
- mutex_unlock(&bdev->mutex);
- if (retval == 0)
- retval = i;
-
-done:
- return retval;
-}
-
-/*
- * bcm2048_fops - file operations interface
- */
-static const struct v4l2_file_operations bcm2048_fops = {
- .owner = THIS_MODULE,
- .unlocked_ioctl = video_ioctl2,
- /* for RDS read support */
- .open = bcm2048_fops_open,
- .release = bcm2048_fops_release,
- .read = bcm2048_fops_read,
- .poll = bcm2048_fops_poll
-};
-
-/*
- * Video4Linux Interface
- */
-static struct v4l2_queryctrl bcm2048_v4l2_queryctrl[] = {
- {
- .id = V4L2_CID_AUDIO_VOLUME,
- .flags = V4L2_CTRL_FLAG_DISABLED,
- },
- {
- .id = V4L2_CID_AUDIO_BALANCE,
- .flags = V4L2_CTRL_FLAG_DISABLED,
- },
- {
- .id = V4L2_CID_AUDIO_BASS,
- .flags = V4L2_CTRL_FLAG_DISABLED,
- },
- {
- .id = V4L2_CID_AUDIO_TREBLE,
- .flags = V4L2_CTRL_FLAG_DISABLED,
- },
- {
- .id = V4L2_CID_AUDIO_MUTE,
- .type = V4L2_CTRL_TYPE_BOOLEAN,
- .name = "Mute",
- .minimum = 0,
- .maximum = 1,
- .step = 1,
- .default_value = 1,
- },
- {
- .id = V4L2_CID_AUDIO_LOUDNESS,
- .flags = V4L2_CTRL_FLAG_DISABLED,
- },
-};
-
-static int bcm2048_vidioc_querycap(struct file *file, void *priv,
- struct v4l2_capability *capability)
-{
- struct bcm2048_device *bdev = video_get_drvdata(video_devdata(file));
-
- strlcpy(capability->driver, BCM2048_DRIVER_NAME,
- sizeof(capability->driver));
- strlcpy(capability->card, BCM2048_DRIVER_CARD,
- sizeof(capability->card));
- snprintf(capability->bus_info, 32, "I2C: 0x%X", bdev->client->addr);
- capability->device_caps = V4L2_CAP_TUNER | V4L2_CAP_RADIO |
- V4L2_CAP_HW_FREQ_SEEK;
- capability->capabilities = capability->device_caps |
- V4L2_CAP_DEVICE_CAPS;
-
- return 0;
-}
-
-static int bcm2048_vidioc_g_input(struct file *filp, void *priv,
- unsigned int *i)
-{
- *i = 0;
-
- return 0;
-}
-
-static int bcm2048_vidioc_s_input(struct file *filp, void *priv,
- unsigned int i)
-{
- if (i)
- return -EINVAL;
-
- return 0;
-}
-
-static int bcm2048_vidioc_queryctrl(struct file *file, void *priv,
- struct v4l2_queryctrl *qc)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(bcm2048_v4l2_queryctrl); i++) {
- if (qc->id && qc->id == bcm2048_v4l2_queryctrl[i].id) {
- *qc = bcm2048_v4l2_queryctrl[i];
- return 0;
- }
- }
-
- return -EINVAL;
-}
-
-static int bcm2048_vidioc_g_ctrl(struct file *file, void *priv,
- struct v4l2_control *ctrl)
-{
- struct bcm2048_device *bdev = video_get_drvdata(video_devdata(file));
- int err = 0;
-
- if (!bdev)
- return -ENODEV;
-
- switch (ctrl->id) {
- case V4L2_CID_AUDIO_MUTE:
- err = bcm2048_get_mute(bdev);
- if (err >= 0)
- ctrl->value = err;
- break;
- }
-
- return err;
-}
-
-static int bcm2048_vidioc_s_ctrl(struct file *file, void *priv,
- struct v4l2_control *ctrl)
-{
- struct bcm2048_device *bdev = video_get_drvdata(video_devdata(file));
- int err = 0;
-
- if (!bdev)
- return -ENODEV;
-
- switch (ctrl->id) {
- case V4L2_CID_AUDIO_MUTE:
- if (ctrl->value) {
- if (bdev->power_state) {
- err = bcm2048_set_mute(bdev, ctrl->value);
- err |= bcm2048_deinit(bdev);
- }
- } else {
- if (!bdev->power_state) {
- err = bcm2048_init(bdev);
- err |= bcm2048_set_mute(bdev, ctrl->value);
- }
- }
- break;
- }
-
- return err;
-}
-
-static int bcm2048_vidioc_g_audio(struct file *file, void *priv,
- struct v4l2_audio *audio)
-{
- if (audio->index > 1)
- return -EINVAL;
-
- strncpy(audio->name, "Radio", 32);
- audio->capability = V4L2_AUDCAP_STEREO;
-
- return 0;
-}
-
-static int bcm2048_vidioc_s_audio(struct file *file, void *priv,
- const struct v4l2_audio *audio)
-{
- if (audio->index != 0)
- return -EINVAL;
-
- return 0;
-}
-
-static int bcm2048_vidioc_g_tuner(struct file *file, void *priv,
- struct v4l2_tuner *tuner)
-{
- struct bcm2048_device *bdev = video_get_drvdata(video_devdata(file));
- s8 f_error;
- s8 rssi;
-
- if (!bdev)
- return -ENODEV;
-
- if (tuner->index > 0)
- return -EINVAL;
-
- strncpy(tuner->name, "FM Receiver", 32);
- tuner->type = V4L2_TUNER_RADIO;
- tuner->rangelow =
- dev_to_v4l2(bcm2048_get_region_bottom_frequency(bdev));
- tuner->rangehigh =
- dev_to_v4l2(bcm2048_get_region_top_frequency(bdev));
- tuner->rxsubchans = V4L2_TUNER_SUB_STEREO;
- tuner->capability = V4L2_TUNER_CAP_STEREO | V4L2_TUNER_CAP_LOW;
- tuner->audmode = V4L2_TUNER_MODE_STEREO;
- tuner->afc = 0;
- if (bdev->power_state) {
- /*
- * Report frequencies with high carrier errors to have zero
- * signal level
- */
- f_error = bcm2048_get_fm_carrier_error(bdev);
- if (f_error < BCM2048_FREQ_ERROR_FLOOR ||
- f_error > BCM2048_FREQ_ERROR_ROOF) {
- tuner->signal = 0;
- } else {
- /*
- * RSSI level -60 dB is defined to report full
- * signal strength
- */
- rssi = bcm2048_get_fm_rssi(bdev);
- if (rssi >= BCM2048_RSSI_LEVEL_BASE) {
- tuner->signal = 0xFFFF;
- } else if (rssi > BCM2048_RSSI_LEVEL_ROOF) {
- tuner->signal = (rssi +
- BCM2048_RSSI_LEVEL_ROOF_NEG)
- * BCM2048_SIGNAL_MULTIPLIER;
- } else {
- tuner->signal = 0;
- }
- }
- } else {
- tuner->signal = 0;
- }
-
- return 0;
-}
-
-static int bcm2048_vidioc_s_tuner(struct file *file, void *priv,
- const struct v4l2_tuner *tuner)
-{
- struct bcm2048_device *bdev = video_get_drvdata(video_devdata(file));
-
- if (!bdev)
- return -ENODEV;
-
- if (tuner->index > 0)
- return -EINVAL;
-
- return 0;
-}
-
-static int bcm2048_vidioc_g_frequency(struct file *file, void *priv,
- struct v4l2_frequency *freq)
-{
- struct bcm2048_device *bdev = video_get_drvdata(video_devdata(file));
- int err = 0;
- int f;
-
- if (!bdev->power_state)
- return -ENODEV;
-
- freq->type = V4L2_TUNER_RADIO;
- f = bcm2048_get_fm_frequency(bdev);
-
- if (f < 0)
- err = f;
- else
- freq->frequency = dev_to_v4l2(f);
-
- return err;
-}
-
-static int bcm2048_vidioc_s_frequency(struct file *file, void *priv,
- const struct v4l2_frequency *freq)
-{
- struct bcm2048_device *bdev = video_get_drvdata(video_devdata(file));
- int err;
-
- if (freq->type != V4L2_TUNER_RADIO)
- return -EINVAL;
-
- if (!bdev->power_state)
- return -ENODEV;
-
- err = bcm2048_set_fm_frequency(bdev, v4l2_to_dev(freq->frequency));
- err |= bcm2048_set_fm_search_tune_mode(bdev, BCM2048_FM_PRE_SET_MODE);
-
- return err;
-}
-
-static int bcm2048_vidioc_s_hw_freq_seek(struct file *file, void *priv,
- const struct v4l2_hw_freq_seek *seek)
-{
- struct bcm2048_device *bdev = video_get_drvdata(video_devdata(file));
- int err;
-
- if (!bdev->power_state)
- return -ENODEV;
-
- if ((seek->tuner != 0) || (seek->type != V4L2_TUNER_RADIO))
- return -EINVAL;
-
- err = bcm2048_set_fm_search_mode_direction(bdev, seek->seek_upward);
- err |= bcm2048_set_fm_search_tune_mode(bdev,
- BCM2048_FM_AUTO_SEARCH_MODE);
-
- return err;
-}
-
-static struct v4l2_ioctl_ops bcm2048_ioctl_ops = {
- .vidioc_querycap = bcm2048_vidioc_querycap,
- .vidioc_g_input = bcm2048_vidioc_g_input,
- .vidioc_s_input = bcm2048_vidioc_s_input,
- .vidioc_queryctrl = bcm2048_vidioc_queryctrl,
- .vidioc_g_ctrl = bcm2048_vidioc_g_ctrl,
- .vidioc_s_ctrl = bcm2048_vidioc_s_ctrl,
- .vidioc_g_audio = bcm2048_vidioc_g_audio,
- .vidioc_s_audio = bcm2048_vidioc_s_audio,
- .vidioc_g_tuner = bcm2048_vidioc_g_tuner,
- .vidioc_s_tuner = bcm2048_vidioc_s_tuner,
- .vidioc_g_frequency = bcm2048_vidioc_g_frequency,
- .vidioc_s_frequency = bcm2048_vidioc_s_frequency,
- .vidioc_s_hw_freq_seek = bcm2048_vidioc_s_hw_freq_seek,
-};
-
-/*
- * bcm2048_viddev_template - video device interface
- */
-static struct video_device bcm2048_viddev_template = {
- .fops = &bcm2048_fops,
- .name = BCM2048_DRIVER_NAME,
- .release = video_device_release_empty,
- .ioctl_ops = &bcm2048_ioctl_ops,
-};
-
-/*
- * I2C driver interface
- */
-static int bcm2048_i2c_driver_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
-{
- struct bcm2048_device *bdev;
- int err;
-
- bdev = kzalloc(sizeof(*bdev), GFP_KERNEL);
- if (!bdev) {
- err = -ENOMEM;
- goto exit;
- }
-
- bdev->client = client;
- i2c_set_clientdata(client, bdev);
- mutex_init(&bdev->mutex);
- init_completion(&bdev->compl);
- INIT_WORK(&bdev->work, bcm2048_work);
-
- if (client->irq) {
- err = request_irq(client->irq,
- bcm2048_handler, IRQF_TRIGGER_FALLING,
- client->name, bdev);
- if (err < 0) {
- dev_err(&client->dev, "Could not request IRQ\n");
- goto free_bdev;
- }
- dev_dbg(&client->dev, "IRQ requested.\n");
- } else {
- dev_dbg(&client->dev, "IRQ not configured. Using timeouts.\n");
- }
-
- bdev->videodev = bcm2048_viddev_template;
- video_set_drvdata(&bdev->videodev, bdev);
- if (video_register_device(&bdev->videodev, VFL_TYPE_RADIO, radio_nr)) {
- dev_dbg(&client->dev, "Could not register video device.\n");
- err = -EIO;
- goto free_irq;
- }
-
- err = bcm2048_sysfs_register_properties(bdev);
- if (err < 0) {
- dev_dbg(&client->dev, "Could not register sysfs interface.\n");
- goto free_registration;
- }
-
- err = bcm2048_probe(bdev);
- if (err < 0) {
- dev_dbg(&client->dev, "Failed to probe device information.\n");
- goto free_sysfs;
- }
-
- return 0;
-
-free_sysfs:
- bcm2048_sysfs_unregister_properties(bdev, ARRAY_SIZE(attrs));
-free_registration:
- video_unregister_device(&bdev->videodev);
-free_irq:
- if (client->irq)
- free_irq(client->irq, bdev);
-free_bdev:
- i2c_set_clientdata(client, NULL);
- kfree(bdev);
-exit:
- return err;
-}
-
-static int __exit bcm2048_i2c_driver_remove(struct i2c_client *client)
-{
- struct bcm2048_device *bdev = i2c_get_clientdata(client);
-
- if (!client->adapter)
- return -ENODEV;
-
- if (bdev) {
- bcm2048_sysfs_unregister_properties(bdev, ARRAY_SIZE(attrs));
- video_unregister_device(&bdev->videodev);
-
- if (bdev->power_state)
- bcm2048_set_power_state(bdev, BCM2048_POWER_OFF);
-
- if (client->irq > 0)
- free_irq(client->irq, bdev);
-
- cancel_work_sync(&bdev->work);
-
- kfree(bdev);
- }
-
- return 0;
-}
-
-/*
- * bcm2048_i2c_driver - i2c driver interface
- */
-static const struct i2c_device_id bcm2048_id[] = {
- { "bcm2048", 0 },
- { },
-};
-MODULE_DEVICE_TABLE(i2c, bcm2048_id);
-
-static struct i2c_driver bcm2048_i2c_driver = {
- .driver = {
- .name = BCM2048_DRIVER_NAME,
- },
- .probe = bcm2048_i2c_driver_probe,
- .remove = __exit_p(bcm2048_i2c_driver_remove),
- .id_table = bcm2048_id,
-};
-
-module_i2c_driver(bcm2048_i2c_driver);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR(BCM2048_DRIVER_AUTHOR);
-MODULE_DESCRIPTION(BCM2048_DRIVER_DESC);
-MODULE_VERSION("0.0.2");
diff --git a/drivers/staging/media/bcm2048/radio-bcm2048.h b/drivers/staging/media/bcm2048/radio-bcm2048.h
deleted file mode 100644
index 4c90a32db795..000000000000
--- a/drivers/staging/media/bcm2048/radio-bcm2048.h
+++ /dev/null
@@ -1,30 +0,0 @@
-/*
- * drivers/staging/media/radio-bcm2048.h
- *
- * Property and command definitions for bcm2048 radio receiver chip.
- *
- * Copyright (C) Nokia Corporation
- * Contact: Eero Nurkkala <ext-eero.nurkkala@nokia.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA
- */
-
-#ifndef BCM2048_H
-#define BCM2048_H
-
-#define BCM2048_NAME "bcm2048"
-#define BCM2048_I2C_ADDR 0x22
-
-#endif /* ifndef BCM2048_H */
diff --git a/drivers/staging/media/cxd2099/Kconfig b/drivers/staging/media/cxd2099/Kconfig
deleted file mode 100644
index b48aefddc84c..000000000000
--- a/drivers/staging/media/cxd2099/Kconfig
+++ /dev/null
@@ -1,12 +0,0 @@
-config DVB_CXD2099
- tristate "CXD2099AR Common Interface driver"
- depends on DVB_CORE && PCI && I2C
- ---help---
- Support for the CI module found on cards based on
- - Micronas ngene PCIe bridge: cineS2 etc.
- - Digital Devices PCIe bridge: Octopus series
-
- For now, data is passed through '/dev/dvb/adapterX/sec0':
- - Encrypted data must be written to 'sec0'.
- - Decrypted data can be read from 'sec0'.
- - Setup the CAM using device 'ca0'.
diff --git a/drivers/staging/media/cxd2099/Makefile b/drivers/staging/media/cxd2099/Makefile
deleted file mode 100644
index b2905e65057c..000000000000
--- a/drivers/staging/media/cxd2099/Makefile
+++ /dev/null
@@ -1,5 +0,0 @@
-obj-$(CONFIG_DVB_CXD2099) += cxd2099.o
-
-ccflags-y += -Idrivers/media/dvb-core/
-ccflags-y += -Idrivers/media/dvb-frontends/
-ccflags-y += -Idrivers/media/tuners/
diff --git a/drivers/staging/media/cxd2099/TODO b/drivers/staging/media/cxd2099/TODO
deleted file mode 100644
index 375bb6f8ee2c..000000000000
--- a/drivers/staging/media/cxd2099/TODO
+++ /dev/null
@@ -1,12 +0,0 @@
-For now, data is passed through '/dev/dvb/adapterX/sec0':
- - Encrypted data must be written to 'sec0'.
- - Decrypted data can be read from 'sec0'.
- - Setup the CAM using device 'ca0'.
-
-But this is wrong. There are some discussions about the proper way for
-doing it, as seen at:
- http://www.mail-archive.com/linux-media@vger.kernel.org/msg22196.html
-
-While there's no proper fix for it, the driver should be kept in staging.
-
-Patches should be submitted to: linux-media@vger.kernel.org.
diff --git a/drivers/staging/media/cxd2099/cxd2099.c b/drivers/staging/media/cxd2099/cxd2099.c
deleted file mode 100644
index 692ba3e63e14..000000000000
--- a/drivers/staging/media/cxd2099/cxd2099.c
+++ /dev/null
@@ -1,721 +0,0 @@
-/*
- * cxd2099.c: Driver for the CXD2099AR Common Interface Controller
- *
- * Copyright (C) 2010-2011 Digital Devices GmbH
- *
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 only, as published by the Free Software Foundation.
- *
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA
- * Or, point your browser to http://www.gnu.org/copyleft/gpl.html
- */
-
-#include <linux/slab.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/i2c.h>
-#include <linux/wait.h>
-#include <linux/delay.h>
-#include <linux/mutex.h>
-#include <linux/io.h>
-
-#include "cxd2099.h"
-
-#define MAX_BUFFER_SIZE 248
-
-struct cxd {
- struct dvb_ca_en50221 en;
-
- struct i2c_adapter *i2c;
- struct cxd2099_cfg cfg;
-
- u8 regs[0x23];
- u8 lastaddress;
- u8 clk_reg_f;
- u8 clk_reg_b;
- int mode;
- int ready;
- int dr;
- int slot_stat;
-
- u8 amem[1024];
- int amem_read;
-
- int cammode;
- struct mutex lock;
-};
-
-static int i2c_write_reg(struct i2c_adapter *adapter, u8 adr,
- u8 reg, u8 data)
-{
- u8 m[2] = {reg, data};
- struct i2c_msg msg = {.addr = adr, .flags = 0, .buf = m, .len = 2};
-
- if (i2c_transfer(adapter, &msg, 1) != 1) {
- dev_err(&adapter->dev,
- "Failed to write to I2C register %02x@%02x!\n",
- reg, adr);
- return -1;
- }
- return 0;
-}
-
-static int i2c_write(struct i2c_adapter *adapter, u8 adr,
- u8 *data, u8 len)
-{
- struct i2c_msg msg = {.addr = adr, .flags = 0, .buf = data, .len = len};
-
- if (i2c_transfer(adapter, &msg, 1) != 1) {
- dev_err(&adapter->dev, "Failed to write to I2C!\n");
- return -1;
- }
- return 0;
-}
-
-static int i2c_read_reg(struct i2c_adapter *adapter, u8 adr,
- u8 reg, u8 *val)
-{
- struct i2c_msg msgs[2] = {{.addr = adr, .flags = 0,
- .buf = &reg, .len = 1},
- {.addr = adr, .flags = I2C_M_RD,
- .buf = val, .len = 1} };
-
- if (i2c_transfer(adapter, msgs, 2) != 2) {
- dev_err(&adapter->dev, "error in i2c_read_reg\n");
- return -1;
- }
- return 0;
-}
-
-static int i2c_read(struct i2c_adapter *adapter, u8 adr,
- u8 reg, u8 *data, u8 n)
-{
- struct i2c_msg msgs[2] = {{.addr = adr, .flags = 0,
- .buf = &reg, .len = 1},
- {.addr = adr, .flags = I2C_M_RD,
- .buf = data, .len = n} };
-
- if (i2c_transfer(adapter, msgs, 2) != 2) {
- dev_err(&adapter->dev, "error in i2c_read\n");
- return -1;
- }
- return 0;
-}
-
-static int read_block(struct cxd *ci, u8 adr, u8 *data, u8 n)
-{
- int status;
-
- status = i2c_write_reg(ci->i2c, ci->cfg.adr, 0, adr);
- if (!status) {
- ci->lastaddress = adr;
- status = i2c_read(ci->i2c, ci->cfg.adr, 1, data, n);
- }
- return status;
-}
-
-static int read_reg(struct cxd *ci, u8 reg, u8 *val)
-{
- return read_block(ci, reg, val, 1);
-}
-
-
-static int read_pccard(struct cxd *ci, u16 address, u8 *data, u8 n)
-{
- int status;
- u8 addr[3] = {2, address & 0xff, address >> 8};
-
- status = i2c_write(ci->i2c, ci->cfg.adr, addr, 3);
- if (!status)
- status = i2c_read(ci->i2c, ci->cfg.adr, 3, data, n);
- return status;
-}
-
-static int write_pccard(struct cxd *ci, u16 address, u8 *data, u8 n)
-{
- int status;
- u8 addr[3] = {2, address & 0xff, address >> 8};
-
- status = i2c_write(ci->i2c, ci->cfg.adr, addr, 3);
- if (!status) {
- u8 buf[256] = {3};
-
- memcpy(buf+1, data, n);
- status = i2c_write(ci->i2c, ci->cfg.adr, buf, n+1);
- }
- return status;
-}
-
-static int read_io(struct cxd *ci, u16 address, u8 *val)
-{
- int status;
- u8 addr[3] = {2, address & 0xff, address >> 8};
-
- status = i2c_write(ci->i2c, ci->cfg.adr, addr, 3);
- if (!status)
- status = i2c_read(ci->i2c, ci->cfg.adr, 3, val, 1);
- return status;
-}
-
-static int write_io(struct cxd *ci, u16 address, u8 val)
-{
- int status;
- u8 addr[3] = {2, address & 0xff, address >> 8};
- u8 buf[2] = {3, val};
-
- status = i2c_write(ci->i2c, ci->cfg.adr, addr, 3);
- if (!status)
- status = i2c_write(ci->i2c, ci->cfg.adr, buf, 2);
- return status;
-}
-
-#if 0
-static int read_io_data(struct cxd *ci, u8 *data, u8 n)
-{
- int status;
- u8 addr[3] = { 2, 0, 0 };
-
- status = i2c_write(ci->i2c, ci->cfg.adr, addr, 3);
- if (!status)
- status = i2c_read(ci->i2c, ci->cfg.adr, 3, data, n);
- return 0;
-}
-
-static int write_io_data(struct cxd *ci, u8 *data, u8 n)
-{
- int status;
- u8 addr[3] = {2, 0, 0};
-
- status = i2c_write(ci->i2c, ci->cfg.adr, addr, 3);
- if (!status) {
- u8 buf[256] = {3};
-
- memcpy(buf+1, data, n);
- status = i2c_write(ci->i2c, ci->cfg.adr, buf, n + 1);
- }
- return 0;
-}
-#endif
-
-static int write_regm(struct cxd *ci, u8 reg, u8 val, u8 mask)
-{
- int status;
-
- status = i2c_write_reg(ci->i2c, ci->cfg.adr, 0, reg);
- if (!status && reg >= 6 && reg <= 8 && mask != 0xff)
- status = i2c_read_reg(ci->i2c, ci->cfg.adr, 1, &ci->regs[reg]);
- ci->regs[reg] = (ci->regs[reg] & (~mask)) | val;
- if (!status) {
- ci->lastaddress = reg;
- status = i2c_write_reg(ci->i2c, ci->cfg.adr, 1, ci->regs[reg]);
- }
- if (reg == 0x20)
- ci->regs[reg] &= 0x7f;
- return status;
-}
-
-static int write_reg(struct cxd *ci, u8 reg, u8 val)
-{
- return write_regm(ci, reg, val, 0xff);
-}
-
-#ifdef BUFFER_MODE
-static int write_block(struct cxd *ci, u8 adr, u8 *data, int n)
-{
- int status;
- u8 buf[256] = {1};
-
- status = i2c_write_reg(ci->i2c, ci->cfg.adr, 0, adr);
- if (!status) {
- ci->lastaddress = adr;
- memcpy(buf + 1, data, n);
- status = i2c_write(ci->i2c, ci->cfg.adr, buf, n + 1);
- }
- return status;
-}
-#endif
-
-static void set_mode(struct cxd *ci, int mode)
-{
- if (mode == ci->mode)
- return;
-
- switch (mode) {
- case 0x00: /* IO mem */
- write_regm(ci, 0x06, 0x00, 0x07);
- break;
- case 0x01: /* ATT mem */
- write_regm(ci, 0x06, 0x02, 0x07);
- break;
- default:
- break;
- }
- ci->mode = mode;
-}
-
-static void cam_mode(struct cxd *ci, int mode)
-{
- if (mode == ci->cammode)
- return;
-
- switch (mode) {
- case 0x00:
- write_regm(ci, 0x20, 0x80, 0x80);
- break;
- case 0x01:
-#ifdef BUFFER_MODE
- if (!ci->en.read_data)
- return;
- dev_info(&ci->i2c->dev, "enable cam buffer mode\n");
- /* write_reg(ci, 0x0d, 0x00); */
- /* write_reg(ci, 0x0e, 0x01); */
- write_regm(ci, 0x08, 0x40, 0x40);
- /* read_reg(ci, 0x12, &dummy); */
- write_regm(ci, 0x08, 0x80, 0x80);
-#endif
- break;
- default:
- break;
- }
- ci->cammode = mode;
-}
-
-
-
-static int init(struct cxd *ci)
-{
- int status;
-
- mutex_lock(&ci->lock);
- ci->mode = -1;
- do {
- status = write_reg(ci, 0x00, 0x00);
- if (status < 0)
- break;
- status = write_reg(ci, 0x01, 0x00);
- if (status < 0)
- break;
- status = write_reg(ci, 0x02, 0x10);
- if (status < 0)
- break;
- status = write_reg(ci, 0x03, 0x00);
- if (status < 0)
- break;
- status = write_reg(ci, 0x05, 0xFF);
- if (status < 0)
- break;
- status = write_reg(ci, 0x06, 0x1F);
- if (status < 0)
- break;
- status = write_reg(ci, 0x07, 0x1F);
- if (status < 0)
- break;
- status = write_reg(ci, 0x08, 0x28);
- if (status < 0)
- break;
- status = write_reg(ci, 0x14, 0x20);
- if (status < 0)
- break;
-
-#if 0
- /* Input Mode C, BYPass Serial, TIVAL = low, MSB */
- status = write_reg(ci, 0x09, 0x4D);
- if (status < 0)
- break;
-#endif
- /* TOSTRT = 8, Mode B (gated clock), falling Edge,
- * Serial, POL=HIGH, MSB */
- status = write_reg(ci, 0x0A, 0xA7);
- if (status < 0)
- break;
-
- status = write_reg(ci, 0x0B, 0x33);
- if (status < 0)
- break;
- status = write_reg(ci, 0x0C, 0x33);
- if (status < 0)
- break;
-
- status = write_regm(ci, 0x14, 0x00, 0x0F);
- if (status < 0)
- break;
- status = write_reg(ci, 0x15, ci->clk_reg_b);
- if (status < 0)
- break;
- status = write_regm(ci, 0x16, 0x00, 0x0F);
- if (status < 0)
- break;
- status = write_reg(ci, 0x17, ci->clk_reg_f);
- if (status < 0)
- break;
-
- if (ci->cfg.clock_mode) {
- if (ci->cfg.polarity) {
- status = write_reg(ci, 0x09, 0x6f);
- if (status < 0)
- break;
- } else {
- status = write_reg(ci, 0x09, 0x6d);
- if (status < 0)
- break;
- }
- status = write_reg(ci, 0x20, 0x68);
- if (status < 0)
- break;
- status = write_reg(ci, 0x21, 0x00);
- if (status < 0)
- break;
- status = write_reg(ci, 0x22, 0x02);
- if (status < 0)
- break;
- } else {
- if (ci->cfg.polarity) {
- status = write_reg(ci, 0x09, 0x4f);
- if (status < 0)
- break;
- } else {
- status = write_reg(ci, 0x09, 0x4d);
- if (status < 0)
- break;
- }
-
- status = write_reg(ci, 0x20, 0x28);
- if (status < 0)
- break;
- status = write_reg(ci, 0x21, 0x00);
- if (status < 0)
- break;
- status = write_reg(ci, 0x22, 0x07);
- if (status < 0)
- break;
- }
-
- status = write_regm(ci, 0x20, 0x80, 0x80);
- if (status < 0)
- break;
- status = write_regm(ci, 0x03, 0x02, 0x02);
- if (status < 0)
- break;
- status = write_reg(ci, 0x01, 0x04);
- if (status < 0)
- break;
- status = write_reg(ci, 0x00, 0x31);
- if (status < 0)
- break;
-
- /* Put TS in bypass */
- status = write_regm(ci, 0x09, 0x08, 0x08);
- if (status < 0)
- break;
- ci->cammode = -1;
- cam_mode(ci, 0);
- } while (0);
- mutex_unlock(&ci->lock);
-
- return 0;
-}
-
-static int read_attribute_mem(struct dvb_ca_en50221 *ca,
- int slot, int address)
-{
- struct cxd *ci = ca->data;
-#if 0
- if (ci->amem_read) {
- if (address <= 0 || address > 1024)
- return -EIO;
- return ci->amem[address];
- }
-
- mutex_lock(&ci->lock);
- write_regm(ci, 0x06, 0x00, 0x05);
- read_pccard(ci, 0, &ci->amem[0], 128);
- read_pccard(ci, 128, &ci->amem[0], 128);
- read_pccard(ci, 256, &ci->amem[0], 128);
- read_pccard(ci, 384, &ci->amem[0], 128);
- write_regm(ci, 0x06, 0x05, 0x05);
- mutex_unlock(&ci->lock);
- return ci->amem[address];
-#else
- u8 val;
-
- mutex_lock(&ci->lock);
- set_mode(ci, 1);
- read_pccard(ci, address, &val, 1);
- mutex_unlock(&ci->lock);
- /* printk(KERN_INFO "%02x:%02x\n", address,val); */
- return val;
-#endif
-}
-
-static int write_attribute_mem(struct dvb_ca_en50221 *ca, int slot,
- int address, u8 value)
-{
- struct cxd *ci = ca->data;
-
- mutex_lock(&ci->lock);
- set_mode(ci, 1);
- write_pccard(ci, address, &value, 1);
- mutex_unlock(&ci->lock);
- return 0;
-}
-
-static int read_cam_control(struct dvb_ca_en50221 *ca,
- int slot, u8 address)
-{
- struct cxd *ci = ca->data;
- u8 val;
-
- mutex_lock(&ci->lock);
- set_mode(ci, 0);
- read_io(ci, address, &val);
- mutex_unlock(&ci->lock);
- return val;
-}
-
-static int write_cam_control(struct dvb_ca_en50221 *ca, int slot,
- u8 address, u8 value)
-{
- struct cxd *ci = ca->data;
-
- mutex_lock(&ci->lock);
- set_mode(ci, 0);
- write_io(ci, address, value);
- mutex_unlock(&ci->lock);
- return 0;
-}
-
-static int slot_reset(struct dvb_ca_en50221 *ca, int slot)
-{
- struct cxd *ci = ca->data;
-
- mutex_lock(&ci->lock);
-#if 0
- write_reg(ci, 0x00, 0x21);
- write_reg(ci, 0x06, 0x1F);
- write_reg(ci, 0x00, 0x31);
-#else
-#if 0
- write_reg(ci, 0x06, 0x1F);
- write_reg(ci, 0x06, 0x2F);
-#else
- cam_mode(ci, 0);
- write_reg(ci, 0x00, 0x21);
- write_reg(ci, 0x06, 0x1F);
- write_reg(ci, 0x00, 0x31);
- write_regm(ci, 0x20, 0x80, 0x80);
- write_reg(ci, 0x03, 0x02);
- ci->ready = 0;
-#endif
-#endif
- ci->mode = -1;
- {
- int i;
-#if 0
- u8 val;
-#endif
- for (i = 0; i < 100; i++) {
- usleep_range(10000, 11000);
-#if 0
- read_reg(ci, 0x06, &val);
- dev_info(&ci->i2c->dev, "%d:%02x\n", i, val);
- if (!(val&0x10))
- break;
-#else
- if (ci->ready)
- break;
-#endif
- }
- }
- mutex_unlock(&ci->lock);
- /* msleep(500); */
- return 0;
-}
-
-static int slot_shutdown(struct dvb_ca_en50221 *ca, int slot)
-{
- struct cxd *ci = ca->data;
-
- dev_info(&ci->i2c->dev, "slot_shutdown\n");
- mutex_lock(&ci->lock);
- write_regm(ci, 0x09, 0x08, 0x08);
- write_regm(ci, 0x20, 0x80, 0x80); /* Reset CAM Mode */
- write_regm(ci, 0x06, 0x07, 0x07); /* Clear IO Mode */
- ci->mode = -1;
- mutex_unlock(&ci->lock);
- return 0;
-}
-
-static int slot_ts_enable(struct dvb_ca_en50221 *ca, int slot)
-{
- struct cxd *ci = ca->data;
-
- mutex_lock(&ci->lock);
- write_regm(ci, 0x09, 0x00, 0x08);
- set_mode(ci, 0);
-#ifdef BUFFER_MODE
- cam_mode(ci, 1);
-#endif
- mutex_unlock(&ci->lock);
- return 0;
-}
-
-
-static int campoll(struct cxd *ci)
-{
- u8 istat;
-
- read_reg(ci, 0x04, &istat);
- if (!istat)
- return 0;
- write_reg(ci, 0x05, istat);
-
- if (istat&0x40) {
- ci->dr = 1;
- dev_info(&ci->i2c->dev, "DR\n");
- }
- if (istat&0x20)
- dev_info(&ci->i2c->dev, "WC\n");
-
- if (istat&2) {
- u8 slotstat;
-
- read_reg(ci, 0x01, &slotstat);
- if (!(2&slotstat)) {
- if (!ci->slot_stat) {
- ci->slot_stat = DVB_CA_EN50221_POLL_CAM_PRESENT;
- write_regm(ci, 0x03, 0x08, 0x08);
- }
-
- } else {
- if (ci->slot_stat) {
- ci->slot_stat = 0;
- write_regm(ci, 0x03, 0x00, 0x08);
- dev_info(&ci->i2c->dev, "NO CAM\n");
- ci->ready = 0;
- }
- }
- if (istat&8 &&
- ci->slot_stat == DVB_CA_EN50221_POLL_CAM_PRESENT) {
- ci->ready = 1;
- ci->slot_stat |= DVB_CA_EN50221_POLL_CAM_READY;
- }
- }
- return 0;
-}
-
-
-static int poll_slot_status(struct dvb_ca_en50221 *ca, int slot, int open)
-{
- struct cxd *ci = ca->data;
- u8 slotstat;
-
- mutex_lock(&ci->lock);
- campoll(ci);
- read_reg(ci, 0x01, &slotstat);
- mutex_unlock(&ci->lock);
-
- return ci->slot_stat;
-}
-
-#ifdef BUFFER_MODE
-static int read_data(struct dvb_ca_en50221 *ca, int slot, u8 *ebuf, int ecount)
-{
- struct cxd *ci = ca->data;
- u8 msb, lsb;
- u16 len;
-
- mutex_lock(&ci->lock);
- campoll(ci);
- mutex_unlock(&ci->lock);
-
- dev_info(&ci->i2c->dev, "read_data\n");
- if (!ci->dr)
- return 0;
-
- mutex_lock(&ci->lock);
- read_reg(ci, 0x0f, &msb);
- read_reg(ci, 0x10, &lsb);
- len = (msb<<8)|lsb;
- read_block(ci, 0x12, ebuf, len);
- ci->dr = 0;
- mutex_unlock(&ci->lock);
-
- return len;
-}
-
-static int write_data(struct dvb_ca_en50221 *ca, int slot, u8 *ebuf, int ecount)
-{
- struct cxd *ci = ca->data;
-
- mutex_lock(&ci->lock);
- printk(kern_INFO "write_data %d\n", ecount);
- write_reg(ci, 0x0d, ecount>>8);
- write_reg(ci, 0x0e, ecount&0xff);
- write_block(ci, 0x11, ebuf, ecount);
- mutex_unlock(&ci->lock);
- return ecount;
-}
-#endif
-
-static struct dvb_ca_en50221 en_templ = {
- .read_attribute_mem = read_attribute_mem,
- .write_attribute_mem = write_attribute_mem,
- .read_cam_control = read_cam_control,
- .write_cam_control = write_cam_control,
- .slot_reset = slot_reset,
- .slot_shutdown = slot_shutdown,
- .slot_ts_enable = slot_ts_enable,
- .poll_slot_status = poll_slot_status,
-#ifdef BUFFER_MODE
- .read_data = read_data,
- .write_data = write_data,
-#endif
-
-};
-
-struct dvb_ca_en50221 *cxd2099_attach(struct cxd2099_cfg *cfg,
- void *priv,
- struct i2c_adapter *i2c)
-{
- struct cxd *ci;
- u8 val;
-
- if (i2c_read_reg(i2c, cfg->adr, 0, &val) < 0) {
- dev_info(&i2c->dev, "No CXD2099 detected at %02x\n", cfg->adr);
- return NULL;
- }
-
- ci = kzalloc(sizeof(struct cxd), GFP_KERNEL);
- if (!ci)
- return NULL;
-
- mutex_init(&ci->lock);
- ci->cfg = *cfg;
- ci->i2c = i2c;
- ci->lastaddress = 0xff;
- ci->clk_reg_b = 0x4a;
- ci->clk_reg_f = 0x1b;
-
- ci->en = en_templ;
- ci->en.data = ci;
- init(ci);
- dev_info(&i2c->dev, "Attached CXD2099AR at %02x\n", ci->cfg.adr);
- return &ci->en;
-}
-EXPORT_SYMBOL(cxd2099_attach);
-
-MODULE_DESCRIPTION("cxd2099");
-MODULE_AUTHOR("Ralph Metzler");
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/media/cxd2099/cxd2099.h b/drivers/staging/media/cxd2099/cxd2099.h
deleted file mode 100644
index 0eb607c5b423..000000000000
--- a/drivers/staging/media/cxd2099/cxd2099.h
+++ /dev/null
@@ -1,51 +0,0 @@
-/*
- * cxd2099.h: Driver for the CXD2099AR Common Interface Controller
- *
- * Copyright (C) 2010-2011 Digital Devices GmbH
- *
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 only, as published by the Free Software Foundation.
- *
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA
- * Or, point your browser to http://www.gnu.org/copyleft/gpl.html
- */
-
-#ifndef _CXD2099_H_
-#define _CXD2099_H_
-
-#include <dvb_ca_en50221.h>
-
-struct cxd2099_cfg {
- u32 bitrate;
- u8 adr;
- u8 polarity:1;
- u8 clock_mode:1;
-};
-
-#if defined(CONFIG_DVB_CXD2099) || \
- (defined(CONFIG_DVB_CXD2099_MODULE) && defined(MODULE))
-struct dvb_ca_en50221 *cxd2099_attach(struct cxd2099_cfg *cfg,
- void *priv, struct i2c_adapter *i2c);
-#else
-
-static inline struct dvb_ca_en50221 *cxd2099_attach(struct cxd2099_cfg *cfg,
- void *priv, struct i2c_adapter *i2c)
-{
- dev_warn(&i2c->dev, "%s: driver disabled by Kconfig\n", __func__);
- return NULL;
-}
-#endif
-
-#endif
diff --git a/drivers/staging/media/davinci_vpfe/Kconfig b/drivers/staging/media/davinci_vpfe/Kconfig
deleted file mode 100644
index 4de2f082491d..000000000000
--- a/drivers/staging/media/davinci_vpfe/Kconfig
+++ /dev/null
@@ -1,10 +0,0 @@
-config VIDEO_DM365_VPFE
- tristate "DM365 VPFE Media Controller Capture Driver"
- depends on VIDEO_V4L2 && ARCH_DAVINCI_DM365 && !VIDEO_DM365_ISIF
- depends on HAS_DMA
- select VIDEOBUF2_DMA_CONTIG
- help
- Support for DM365 VPFE based Media Controller Capture driver.
-
- To compile this driver as a module, choose M here: the
- module will be called vpfe-mc-capture.
diff --git a/drivers/staging/media/davinci_vpfe/Makefile b/drivers/staging/media/davinci_vpfe/Makefile
deleted file mode 100644
index c64515c644cd..000000000000
--- a/drivers/staging/media/davinci_vpfe/Makefile
+++ /dev/null
@@ -1,3 +0,0 @@
-obj-$(CONFIG_VIDEO_DM365_VPFE) += \
- dm365_isif.o dm365_ipipe_hw.o dm365_ipipe.o \
- dm365_resizer.o dm365_ipipeif.o vpfe_mc_capture.o vpfe_video.o
diff --git a/drivers/staging/media/davinci_vpfe/TODO b/drivers/staging/media/davinci_vpfe/TODO
deleted file mode 100644
index 7015ab35ded5..000000000000
--- a/drivers/staging/media/davinci_vpfe/TODO
+++ /dev/null
@@ -1,37 +0,0 @@
-TODO (general):
-==================================
-
-- User space interface refinement
- - Controls should be used when possible rather than private ioctl
- - No enums should be used
- - Use of MC and V4L2 subdev APIs when applicable
- - Single interface header might suffice
- - Current interface forces to configure everything at once
-- Get rid of the dm365_ipipe_hw.[ch] layer
-- Active external sub-devices defined by link configuration; no strcmp
- needed
-- More generic platform data (i2c adapters)
-- The driver should have no knowledge of possible external subdevs; see
- struct vpfe_subdev_id
-- Some of the hardware control should be refactorede
-- Check proper serialisation (through mutexes and spinlocks)
-- Names that are visible in kernel global namespace should have a common
- prefix (or a few)
-- While replacing the older driver in media folder, provide a compatibility
- layer and compatibility tests that warrants (using the libv4l's LD_PRELOAD
- approach) there is no regression for the users using the older driver.
-
-Building of uImage and Applications:
-==================================
-
-As of now since the interface will undergo few changes all the include
-files are present in staging itself, to build for dm365 follow below steps,
-
-- copy vpfe.h from drivers/staging/media/davinci_vpfe/ to
- include/media/davinci/ folder for building the uImage.
-- copy davinci_vpfe_user.h from drivers/staging/media/davinci_vpfe/ to
- include/uapi/linux/davinci_vpfe.h, and add a entry in Kbuild (required
- for building application).
-- copy dm365_ipipeif_user.h from drivers/staging/media/davinci_vpfe/ to
- include/uapi/linux/dm365_ipipeif.h and a entry in Kbuild (required
- for building application).
diff --git a/drivers/staging/media/davinci_vpfe/davinci-vpfe-mc.txt b/drivers/staging/media/davinci_vpfe/davinci-vpfe-mc.txt
deleted file mode 100644
index a1e91778aa9b..000000000000
--- a/drivers/staging/media/davinci_vpfe/davinci-vpfe-mc.txt
+++ /dev/null
@@ -1,154 +0,0 @@
-Davinci Video processing Front End (VPFE) driver
-
-Copyright (C) 2012 Texas Instruments Inc
-
-Contacts: Manjunath Hadli <manjunath.hadli@ti.com>
- Prabhakar Lad <prabhakar.lad@ti.com>
-
-
-Introduction
-============
-
-This file documents the Texas Instruments Davinci Video processing Front End
-(VPFE) driver located under drivers/media/platform/davinci. The original driver
-exists for Davinci VPFE, which is now being changed to Media Controller
-Framework.
-
-Currently the driver has been successfully used on the following
-version of Davinci:
-
- DM365/DM368
-
-The driver implements V4L2, Media controller and v4l2_subdev interfaces. Sensor,
-lens and flash drivers using the v4l2_subdev interface in the kernel are
-supported.
-
-
-Split to subdevs
-================
-
-The Davinci VPFE is split into V4L2 subdevs, each of the blocks inside the VPFE
-having one subdev to represent it. Each of the subdevs provide a V4L2 subdev
-interface to userspace.
-
- DAVINCI ISIF
- DAVINCI IPIPEIF
- DAVINCI IPIPE
- DAVINCI CROP RESIZER
- DAVINCI RESIZER A
- DAVINCI RESIZER B
-
-Each possible link in the VPFE is modelled by a link in the Media controller
-interface. For an example program see [1].
-
-
-ISIF, IPIPE, and RESIZER block IOCTLs
-======================================
-
-The Davinci Video processing Front End (VPFE) driver supports standard V4L2
-IOCTLs and controls where possible and practical. Much of the functions provided
-by the VPFE, however, does not fall under the standard IOCTL's.
-
-In general, there is a private ioctl for configuring each of the blocks
-containing hardware-dependent functions.
-
-The following private IOCTLs are supported:
-
- VIDIOC_VPFE_ISIF_[S/G]_RAW_PARAMS
- VIDIOC_VPFE_IPIPE_[S/G]_CONFIG
- VIDIOC_VPFE_RSZ_[S/G]_CONFIG
-
-The parameter structures used by these ioctl's are described in
-include/uapi/linux/davinci_vpfe.h.
-
-The VIDIOC_VPFE_ISIF_S_RAW_PARAMS, VIDIOC_VPFE_IPIPE_S_CONFIG and
-VIDIOC_VPFE_RSZ_S_CONFIG are used to configure, enable and disable functions in
-the isif, ipipe and resizer blocks respectively. These IOCTL's control several
-functions in the blocks they control. VIDIOC_VPFE_ISIF_S_RAW_PARAMS IOCTL
-accepts a pointer to struct vpfe_isif_raw_config as its argument. Similarly
-VIDIOC_VPFE_IPIPE_S_CONFIG accepts a pointer to struct vpfe_ipipe_config. And
-VIDIOC_VPFE_RSZ_S_CONFIG accepts a pointer to struct vpfe_rsz_config as its
-argument. Similarly VIDIOC_VPFE_ISIF_G_RAW_PARAMS, VIDIOC_VPFE_IPIPE_G_CONFIG
-and VIDIOC_VPFE_RSZ_G_CONFIG are used to get the current configuration set in
-the isif, ipipe and resizer blocks respectively.
-
-The detailed functions of the VPFE itself related to a given VPFE block is
-described in the Technical Reference Manuals (TRMs) --- see the end of the
-document for those.
-
-
-IPIPEIF block IOCTLs
-======================================
-
-The following private IOCTLs are supported:
-
- VIDIOC_VPFE_IPIPEIF_[S/G]_CONFIG
-
-The parameter structures used by these ioctl's are described in
-include/uapi/linux/dm365_ipipeif.h
-
-The VIDIOC_VPFE_IPIPEIF_S_CONFIG is used to configure the ipipeif
-hardware block. The VIDIOC_VPFE_IPIPEIF_S_CONFIG and
-VIDIOC_VPFE_IPIPEIF_G_CONFIG accepts a pointer to struct ipipeif_params
-as its argument.
-
-
-VPFE Operating Modes
-==========================================
-
-a: Continuous Modes
-------------------------
-
-1: tvp514x/tvp7002/mt9p031---> DAVINCI ISIF---> SDRAM
-
-2: tvp514x/tvp7002/mt9p031---> DAVINCI ISIF---> DAVINCI IPIPEIF--->|
- |
- <--------------------<----------------<---------------------<---|
- |
- V
- DAVINCI CROP RESIZER--->DAVINCI RESIZER [A/B]---> SDRAM
-
-3: tvp514x/tvp7002/mt9p031---> DAVINCI ISIF---> DAVINCI IPIPEIF--->|
- |
- <--------------------<----------------<---------------------<---|
- |
- V
- DAVINCI IPIPE---> DAVINCI CROP RESIZER--->DAVINCI RESIZER [A/B]---> SDRAM
-
-a: Single Shot Modes
-------------------------
-
-1: SDRAM---> DAVINCI IPIPEIF---> DAVINCI IPIPE---> DAVINCI CROP RESIZER--->|
- |
- <----------------<----------------<------------------<---------------<--|
- |
- V
-DAVINCI RESIZER [A/B]---> SDRAM
-
-2: SDRAM---> DAVINCI IPIPEIF---> DAVINCI CROP RESIZER--->|
- |
- <----------------<----------------<---------------<---|
- |
- V
-DAVINCI RESIZER [A/B]---> SDRAM
-
-
-Technical reference manuals (TRMs) and other documentation
-==========================================================
-
-Davinci DM365 TRM:
-<URL:http://www.ti.com/lit/ds/sprs457e/sprs457e.pdf>
-Referenced MARCH 2009-REVISED JUNE 2011
-
-Davinci DM368 TRM:
-<URL:http://www.ti.com/lit/ds/sprs668c/sprs668c.pdf>
-Referenced APRIL 2010-REVISED JUNE 2011
-
-Davinci Video Processing Front End (VPFE) DM36x
-<URL:http://www.ti.com/lit/ug/sprufg8c/sprufg8c.pdf>
-
-
-References
-==========
-
-[1] http://git.ideasonboard.org/?p=media-ctl.git;a=summary
diff --git a/drivers/staging/media/davinci_vpfe/davinci_vpfe_user.h b/drivers/staging/media/davinci_vpfe/davinci_vpfe_user.h
deleted file mode 100644
index 7b7e7b26c1e8..000000000000
--- a/drivers/staging/media/davinci_vpfe/davinci_vpfe_user.h
+++ /dev/null
@@ -1,1290 +0,0 @@
-/*
- * Copyright (C) 2012 Texas Instruments Inc
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- * Contributors:
- * Manjunath Hadli <manjunath.hadli@ti.com>
- * Prabhakar Lad <prabhakar.lad@ti.com>
- */
-
-#ifndef _DAVINCI_VPFE_USER_H
-#define _DAVINCI_VPFE_USER_H
-
-#include <linux/types.h>
-#include <linux/videodev2.h>
-
-/*
- * Private IOCTL
- *
- * VIDIOC_VPFE_ISIF_S_RAW_PARAMS: Set raw params in isif
- * VIDIOC_VPFE_ISIF_G_RAW_PARAMS: Get raw params from isif
- * VIDIOC_VPFE_PRV_S_CONFIG: Set ipipe engine configuration
- * VIDIOC_VPFE_PRV_G_CONFIG: Get ipipe engine configuration
- * VIDIOC_VPFE_RSZ_S_CONFIG: Set resizer engine configuration
- * VIDIOC_VPFE_RSZ_G_CONFIG: Get resizer engine configuration
- */
-
-#define VIDIOC_VPFE_ISIF_S_RAW_PARAMS \
- _IOW('V', BASE_VIDIOC_PRIVATE + 1, struct vpfe_isif_raw_config)
-#define VIDIOC_VPFE_ISIF_G_RAW_PARAMS \
- _IOR('V', BASE_VIDIOC_PRIVATE + 2, struct vpfe_isif_raw_config)
-#define VIDIOC_VPFE_IPIPE_S_CONFIG \
- _IOWR('P', BASE_VIDIOC_PRIVATE + 3, struct vpfe_ipipe_config)
-#define VIDIOC_VPFE_IPIPE_G_CONFIG \
- _IOWR('P', BASE_VIDIOC_PRIVATE + 4, struct vpfe_ipipe_config)
-#define VIDIOC_VPFE_RSZ_S_CONFIG \
- _IOWR('R', BASE_VIDIOC_PRIVATE + 5, struct vpfe_rsz_config)
-#define VIDIOC_VPFE_RSZ_G_CONFIG \
- _IOWR('R', BASE_VIDIOC_PRIVATE + 6, struct vpfe_rsz_config)
-
-/*
- * Private Control's for ISIF
- */
-#define VPFE_ISIF_CID_CRGAIN (V4L2_CID_USER_BASE | 0xa001)
-#define VPFE_ISIF_CID_CGRGAIN (V4L2_CID_USER_BASE | 0xa002)
-#define VPFE_ISIF_CID_CGBGAIN (V4L2_CID_USER_BASE | 0xa003)
-#define VPFE_ISIF_CID_CBGAIN (V4L2_CID_USER_BASE | 0xa004)
-#define VPFE_ISIF_CID_GAIN_OFFSET (V4L2_CID_USER_BASE | 0xa005)
-
-/*
- * Private Control's for ISIF and IPIPEIF
- */
-#define VPFE_CID_DPCM_PREDICTOR (V4L2_CID_USER_BASE | 0xa006)
-
-/************************************************************************
- * Vertical Defect Correction parameters
- ***********************************************************************/
-
-/**
- * vertical defect correction methods
- */
-enum vpfe_isif_vdfc_corr_mode {
- /* Defect level subtraction. Just fed through if saturating */
- VPFE_ISIF_VDFC_NORMAL,
- /**
- * Defect level subtraction. Horizontal interpolation ((i-2)+(i+2))/2
- * if data saturating
- */
- VPFE_ISIF_VDFC_HORZ_INTERPOL_IF_SAT,
- /* Horizontal interpolation (((i-2)+(i+2))/2) */
- VPFE_ISIF_VDFC_HORZ_INTERPOL
-};
-
-/**
- * Max Size of the Vertical Defect Correction table
- */
-#define VPFE_ISIF_VDFC_TABLE_SIZE 8
-
-/**
- * Values used for shifting up the vdfc defect level
- */
-enum vpfe_isif_vdfc_shift {
- /* No Shift */
- VPFE_ISIF_VDFC_NO_SHIFT,
- /* Shift by 1 bit */
- VPFE_ISIF_VDFC_SHIFT_1,
- /* Shift by 2 bit */
- VPFE_ISIF_VDFC_SHIFT_2,
- /* Shift by 3 bit */
- VPFE_ISIF_VDFC_SHIFT_3,
- /* Shift by 4 bit */
- VPFE_ISIF_VDFC_SHIFT_4
-};
-
-/**
- * Defect Correction (DFC) table entry
- */
-struct vpfe_isif_vdfc_entry {
- /* vertical position of defect */
- unsigned short pos_vert;
- /* horizontal position of defect */
- unsigned short pos_horz;
- /**
- * Defect level of Vertical line defect position. This is subtracted
- * from the data at the defect position
- */
- unsigned char level_at_pos;
- /**
- * Defect level of the pixels upper than the vertical line defect.
- * This is subtracted from the data
- */
- unsigned char level_up_pixels;
- /**
- * Defect level of the pixels lower than the vertical line defect.
- * This is subtracted from the data
- */
- unsigned char level_low_pixels;
-};
-
-/**
- * Structure for Defect Correction (DFC) parameter
- */
-struct vpfe_isif_dfc {
- /* enable vertical defect correction */
- unsigned char en;
- /* Correction methods */
- enum vpfe_isif_vdfc_corr_mode corr_mode;
- /**
- * 0 - whole line corrected, 1 - not
- * pixels upper than the defect
- */
- unsigned char corr_whole_line;
- /**
- * defect level shift value. level_at_pos, level_upper_pos,
- * and level_lower_pos can be shifted up by this value
- */
- enum vpfe_isif_vdfc_shift def_level_shift;
- /* defect saturation level */
- unsigned short def_sat_level;
- /* number of vertical defects. Max is VPFE_ISIF_VDFC_TABLE_SIZE */
- short num_vdefects;
- /* VDFC table ptr */
- struct vpfe_isif_vdfc_entry table[VPFE_ISIF_VDFC_TABLE_SIZE];
-};
-
-/************************************************************************
-* Digital/Black clamp or DC Subtract parameters
-************************************************************************/
-/**
- * Horizontal Black Clamp modes
- */
-enum vpfe_isif_horz_bc_mode {
- /**
- * Horizontal clamp disabled. Only vertical clamp
- * value is subtracted
- */
- VPFE_ISIF_HORZ_BC_DISABLE,
- /**
- * Horizontal clamp value is calculated and subtracted
- * from image data along with vertical clamp value
- */
- VPFE_ISIF_HORZ_BC_CLAMP_CALC_ENABLED,
- /**
- * Horizontal clamp value calculated from previous image
- * is subtracted from image data along with vertical clamp
- * value. How the horizontal clamp value for the first image
- * is calculated in this case ???
- */
- VPFE_ISIF_HORZ_BC_CLAMP_NOT_UPDATED
-};
-
-/**
- * Base window selection for Horizontal Black Clamp calculations
- */
-enum vpfe_isif_horz_bc_base_win_sel {
- /* Select Most left window for bc calculation */
- VPFE_ISIF_SEL_MOST_LEFT_WIN,
-
- /* Select Most right window for bc calculation */
- VPFE_ISIF_SEL_MOST_RIGHT_WIN,
-};
-
-/* Size of window in horizontal direction for horizontal bc */
-enum vpfe_isif_horz_bc_sz_h {
- VPFE_ISIF_HORZ_BC_SZ_H_2PIXELS,
- VPFE_ISIF_HORZ_BC_SZ_H_4PIXELS,
- VPFE_ISIF_HORZ_BC_SZ_H_8PIXELS,
- VPFE_ISIF_HORZ_BC_SZ_H_16PIXELS
-};
-
-/* Size of window in vertcal direction for vertical bc */
-enum vpfe_isif_horz_bc_sz_v {
- VPFE_ISIF_HORZ_BC_SZ_H_32PIXELS,
- VPFE_ISIF_HORZ_BC_SZ_H_64PIXELS,
- VPFE_ISIF_HORZ_BC_SZ_H_128PIXELS,
- VPFE_ISIF_HORZ_BC_SZ_H_256PIXELS
-};
-
-/**
- * Structure for Horizontal Black Clamp config params
- */
-struct vpfe_isif_horz_bclamp {
- /* horizontal clamp mode */
- enum vpfe_isif_horz_bc_mode mode;
- /**
- * pixel value limit enable.
- * 0 - limit disabled
- * 1 - pixel value limited to 1023
- */
- unsigned char clamp_pix_limit;
- /**
- * Select most left or right window for clamp val
- * calculation
- */
- enum vpfe_isif_horz_bc_base_win_sel base_win_sel_calc;
- /* Window count per color for calculation. range 1-32 */
- unsigned char win_count_calc;
- /* Window start position - horizontal for calculation. 0 - 8191 */
- unsigned short win_start_h_calc;
- /* Window start position - vertical for calculation 0 - 8191 */
- unsigned short win_start_v_calc;
- /* Width of the sample window in pixels for calculation */
- enum vpfe_isif_horz_bc_sz_h win_h_sz_calc;
- /* Height of the sample window in pixels for calculation */
- enum vpfe_isif_horz_bc_sz_v win_v_sz_calc;
-};
-
-/**
- * Black Clamp vertical reset values
- */
-enum vpfe_isif_vert_bc_reset_val_sel {
- /* Reset value used is the clamp value calculated */
- VPFE_ISIF_VERT_BC_USE_HORZ_CLAMP_VAL,
- /* Reset value used is reset_clamp_val configured */
- VPFE_ISIF_VERT_BC_USE_CONFIG_CLAMP_VAL,
- /* No update, previous image value is used */
- VPFE_ISIF_VERT_BC_NO_UPDATE
-};
-
-enum vpfe_isif_vert_bc_sz_h {
- VPFE_ISIF_VERT_BC_SZ_H_2PIXELS,
- VPFE_ISIF_VERT_BC_SZ_H_4PIXELS,
- VPFE_ISIF_VERT_BC_SZ_H_8PIXELS,
- VPFE_ISIF_VERT_BC_SZ_H_16PIXELS,
- VPFE_ISIF_VERT_BC_SZ_H_32PIXELS,
- VPFE_ISIF_VERT_BC_SZ_H_64PIXELS
-};
-
-/**
- * Structure for Vertical Black Clamp configuration params
- */
-struct vpfe_isif_vert_bclamp {
- /* Reset value selection for vertical clamp calculation */
- enum vpfe_isif_vert_bc_reset_val_sel reset_val_sel;
- /* U12 value if reset_sel = ISIF_BC_VERT_USE_CONFIG_CLAMP_VAL */
- unsigned short reset_clamp_val;
- /**
- * U8Q8. Line average coefficient used in vertical clamp
- * calculation
- */
- unsigned char line_ave_coef;
- /* Width in pixels of the optical black region used for calculation. */
- enum vpfe_isif_vert_bc_sz_h ob_h_sz_calc;
- /* Height of the optical black region for calculation */
- unsigned short ob_v_sz_calc;
- /* Optical black region start position - horizontal. 0 - 8191 */
- unsigned short ob_start_h;
- /* Optical black region start position - vertical 0 - 8191 */
- unsigned short ob_start_v;
-};
-
-/**
- * Structure for Black Clamp configuration params
- */
-struct vpfe_isif_black_clamp {
- /**
- * this offset value is added irrespective of the clamp
- * enable status. S13
- */
- unsigned short dc_offset;
- /**
- * Enable black/digital clamp value to be subtracted
- * from the image data
- */
- unsigned char en;
- /**
- * black clamp mode. same/separate clamp for 4 colors
- * 0 - disable - same clamp value for all colors
- * 1 - clamp value calculated separately for all colors
- */
- unsigned char bc_mode_color;
- /* Vertical start position for bc subtraction */
- unsigned short vert_start_sub;
- /* Black clamp for horizontal direction */
- struct vpfe_isif_horz_bclamp horz;
- /* Black clamp for vertical direction */
- struct vpfe_isif_vert_bclamp vert;
-};
-
-/*************************************************************************
-** Color Space Conversion (CSC)
-*************************************************************************/
-/**
- * Number of Coefficient values used for CSC
- */
-#define VPFE_ISIF_CSC_NUM_COEFF 16
-
-struct float_8_bit {
- /* 8 bit integer part */
- __u8 integer;
- /* 8 bit decimal part */
- __u8 decimal;
-};
-
-struct float_16_bit {
- /* 16 bit integer part */
- __u16 integer;
- /* 16 bit decimal part */
- __u16 decimal;
-};
-
-/*************************************************************************
-** Color Space Conversion parameters
-*************************************************************************/
-/**
- * Structure used for CSC config params
- */
-struct vpfe_isif_color_space_conv {
- /* Enable color space conversion */
- unsigned char en;
- /**
- * csc coefficient table. S8Q5, M00 at index 0, M01 at index 1, and
- * so forth
- */
- struct float_8_bit coeff[VPFE_ISIF_CSC_NUM_COEFF];
-};
-
-enum vpfe_isif_datasft {
- /* No Shift */
- VPFE_ISIF_NO_SHIFT,
- /* 1 bit Shift */
- VPFE_ISIF_1BIT_SHIFT,
- /* 2 bit Shift */
- VPFE_ISIF_2BIT_SHIFT,
- /* 3 bit Shift */
- VPFE_ISIF_3BIT_SHIFT,
- /* 4 bit Shift */
- VPFE_ISIF_4BIT_SHIFT,
- /* 5 bit Shift */
- VPFE_ISIF_5BIT_SHIFT,
- /* 6 bit Shift */
- VPFE_ISIF_6BIT_SHIFT
-};
-
-#define VPFE_ISIF_LINEAR_TAB_SIZE 192
-/*************************************************************************
-** Linearization parameters
-*************************************************************************/
-/**
- * Structure for Sensor data linearization
- */
-struct vpfe_isif_linearize {
- /* Enable or Disable linearization of data */
- unsigned char en;
- /* Shift value applied */
- enum vpfe_isif_datasft corr_shft;
- /* scale factor applied U11Q10 */
- struct float_16_bit scale_fact;
- /* Size of the linear table */
- unsigned short table[VPFE_ISIF_LINEAR_TAB_SIZE];
-};
-
-/*************************************************************************
-** ISIF Raw configuration parameters
-*************************************************************************/
-enum vpfe_isif_fmt_mode {
- VPFE_ISIF_SPLIT,
- VPFE_ISIF_COMBINE
-};
-
-enum vpfe_isif_lnum {
- VPFE_ISIF_1LINE,
- VPFE_ISIF_2LINES,
- VPFE_ISIF_3LINES,
- VPFE_ISIF_4LINES
-};
-
-enum vpfe_isif_line {
- VPFE_ISIF_1STLINE,
- VPFE_ISIF_2NDLINE,
- VPFE_ISIF_3RDLINE,
- VPFE_ISIF_4THLINE
-};
-
-struct vpfe_isif_fmtplen {
- /**
- * number of program entries for SET0, range 1 - 16
- * when fmtmode is ISIF_SPLIT, 1 - 8 when fmtmode is
- * ISIF_COMBINE
- */
- unsigned short plen0;
- /**
- * number of program entries for SET1, range 1 - 16
- * when fmtmode is ISIF_SPLIT, 1 - 8 when fmtmode is
- * ISIF_COMBINE
- */
- unsigned short plen1;
- /**
- * number of program entries for SET2, range 1 - 16
- * when fmtmode is ISIF_SPLIT, 1 - 8 when fmtmode is
- * ISIF_COMBINE
- */
- unsigned short plen2;
- /**
- * number of program entries for SET3, range 1 - 16
- * when fmtmode is ISIF_SPLIT, 1 - 8 when fmtmode is
- * ISIF_COMBINE
- */
- unsigned short plen3;
-};
-
-struct vpfe_isif_fmt_cfg {
- /* Split or combine or line alternate */
- enum vpfe_isif_fmt_mode fmtmode;
- /* enable or disable line alternating mode */
- unsigned char ln_alter_en;
- /* Split/combine line number */
- enum vpfe_isif_lnum lnum;
- /* Address increment Range 1 - 16 */
- unsigned int addrinc;
-};
-
-struct vpfe_isif_fmt_addr_ptr {
- /* Initial address */
- unsigned int init_addr;
- /* output line number */
- enum vpfe_isif_line out_line;
-};
-
-struct vpfe_isif_fmtpgm_ap {
- /* program address pointer */
- unsigned char pgm_aptr;
- /* program address increment or decrement */
- unsigned char pgmupdt;
-};
-
-struct vpfe_isif_data_formatter {
- /* Enable/Disable data formatter */
- unsigned char en;
- /* data formatter configuration */
- struct vpfe_isif_fmt_cfg cfg;
- /* Formatter program entries length */
- struct vpfe_isif_fmtplen plen;
- /* first pixel in a line fed to formatter */
- unsigned short fmtrlen;
- /* HD interval for output line. Only valid when split line */
- unsigned short fmthcnt;
- /* formatter address pointers */
- struct vpfe_isif_fmt_addr_ptr fmtaddr_ptr[16];
- /* program enable/disable */
- unsigned char pgm_en[32];
- /* program address pointers */
- struct vpfe_isif_fmtpgm_ap fmtpgm_ap[32];
-};
-
-struct vpfe_isif_df_csc {
- /* Color Space Conversion configuration, 0 - csc, 1 - df */
- unsigned int df_or_csc;
- /* csc configuration valid if df_or_csc is 0 */
- struct vpfe_isif_color_space_conv csc;
- /* data formatter configuration valid if df_or_csc is 1 */
- struct vpfe_isif_data_formatter df;
- /* start pixel in a line at the input */
- unsigned int start_pix;
- /* number of pixels in input line */
- unsigned int num_pixels;
- /* start line at the input */
- unsigned int start_line;
- /* number of lines at the input */
- unsigned int num_lines;
-};
-
-struct vpfe_isif_gain_offsets_adj {
- /* Enable or Disable Gain adjustment for SDRAM data */
- unsigned char gain_sdram_en;
- /* Enable or Disable Gain adjustment for IPIPE data */
- unsigned char gain_ipipe_en;
- /* Enable or Disable Gain adjustment for H3A data */
- unsigned char gain_h3a_en;
- /* Enable or Disable Gain adjustment for SDRAM data */
- unsigned char offset_sdram_en;
- /* Enable or Disable Gain adjustment for IPIPE data */
- unsigned char offset_ipipe_en;
- /* Enable or Disable Gain adjustment for H3A data */
- unsigned char offset_h3a_en;
-};
-
-struct vpfe_isif_cul {
- /* Horizontal Cull pattern for odd lines */
- unsigned char hcpat_odd;
- /* Horizontal Cull pattern for even lines */
- unsigned char hcpat_even;
- /* Vertical Cull pattern */
- unsigned char vcpat;
- /* Enable or disable lpf. Apply when cull is enabled */
- unsigned char en_lpf;
-};
-
-/* all the stuff in this struct will be provided by userland */
-struct vpfe_isif_raw_config {
- /* Linearization parameters for image sensor data input */
- struct vpfe_isif_linearize linearize;
- /* Data formatter or CSC */
- struct vpfe_isif_df_csc df_csc;
- /* Defect Pixel Correction (DFC) confguration */
- struct vpfe_isif_dfc dfc;
- /* Black/Digital Clamp configuration */
- struct vpfe_isif_black_clamp bclamp;
- /* Gain, offset adjustments */
- struct vpfe_isif_gain_offsets_adj gain_offset;
- /* Culling */
- struct vpfe_isif_cul culling;
- /* horizontal offset for Gain/LSC/DFC */
- unsigned short horz_offset;
- /* vertical offset for Gain/LSC/DFC */
- unsigned short vert_offset;
-};
-
-/**********************************************************************
- IPIPE API Structures
-**********************************************************************/
-
-/* IPIPE module configurations */
-
-/* IPIPE input configuration */
-#define VPFE_IPIPE_INPUT_CONFIG (1 << 0)
-/* LUT based Defect Pixel Correction */
-#define VPFE_IPIPE_LUTDPC (1 << 1)
-/* On the fly (OTF) Defect Pixel Correction */
-#define VPFE_IPIPE_OTFDPC (1 << 2)
-/* Noise Filter - 1 */
-#define VPFE_IPIPE_NF1 (1 << 3)
-/* Noise Filter - 2 */
-#define VPFE_IPIPE_NF2 (1 << 4)
-/* White Balance. Also a control ID */
-#define VPFE_IPIPE_WB (1 << 5)
-/* 1st RGB to RBG Blend module */
-#define VPFE_IPIPE_RGB2RGB_1 (1 << 6)
-/* 2nd RGB to RBG Blend module */
-#define VPFE_IPIPE_RGB2RGB_2 (1 << 7)
-/* Gamma Correction */
-#define VPFE_IPIPE_GAMMA (1 << 8)
-/* 3D LUT color conversion */
-#define VPFE_IPIPE_3D_LUT (1 << 9)
-/* RGB to YCbCr module */
-#define VPFE_IPIPE_RGB2YUV (1 << 10)
-/* YUV 422 conversion module */
-#define VPFE_IPIPE_YUV422_CONV (1 << 11)
-/* Edge Enhancement */
-#define VPFE_IPIPE_YEE (1 << 12)
-/* Green Imbalance Correction */
-#define VPFE_IPIPE_GIC (1 << 13)
-/* CFA Interpolation */
-#define VPFE_IPIPE_CFA (1 << 14)
-/* Chroma Artifact Reduction */
-#define VPFE_IPIPE_CAR (1 << 15)
-/* Chroma Gain Suppression */
-#define VPFE_IPIPE_CGS (1 << 16)
-/* Global brightness and contrast control */
-#define VPFE_IPIPE_GBCE (1 << 17)
-
-#define VPFE_IPIPE_MAX_MODULES 18
-
-struct ipipe_float_u16 {
- unsigned short integer;
- unsigned short decimal;
-};
-
-struct ipipe_float_s16 {
- short integer;
- unsigned short decimal;
-};
-
-struct ipipe_float_u8 {
- unsigned char integer;
- unsigned char decimal;
-};
-
-/* Copy method selection for vertical correction
- * Used when ipipe_dfc_corr_meth is IPIPE_DPC_CTORB_AFTER_HINT
- */
-enum vpfe_ipipe_dpc_corr_meth {
- /* replace by black or white dot specified by repl_white */
- VPFE_IPIPE_DPC_REPL_BY_DOT = 0,
- /* Copy from left */
- VPFE_IPIPE_DPC_CL = 1,
- /* Copy from right */
- VPFE_IPIPE_DPC_CR = 2,
- /* Horizontal interpolation */
- VPFE_IPIPE_DPC_H_INTP = 3,
- /* Vertical interpolation */
- VPFE_IPIPE_DPC_V_INTP = 4,
- /* Copy from top */
- VPFE_IPIPE_DPC_CT = 5,
- /* Copy from bottom */
- VPFE_IPIPE_DPC_CB = 6,
- /* 2D interpolation */
- VPFE_IPIPE_DPC_2D_INTP = 7,
-};
-
-struct vpfe_ipipe_lutdpc_entry {
- /* Horizontal position */
- unsigned short horz_pos;
- /* vertical position */
- unsigned short vert_pos;
- enum vpfe_ipipe_dpc_corr_meth method;
-};
-
-#define VPFE_IPIPE_MAX_SIZE_DPC 256
-
-/* Structure for configuring DPC module */
-struct vpfe_ipipe_lutdpc {
- /* 0 - disable, 1 - enable */
- unsigned char en;
- /* 0 - replace with black dot, 1 - white dot when correction
- * method is IPIPE_DFC_REPL_BY_DOT=0,
- */
- unsigned char repl_white;
- /* number of entries in the correction table. Currently only
- * support up-to 256 entries. infinite mode is not supported
- */
- unsigned short dpc_size;
- struct vpfe_ipipe_lutdpc_entry table[VPFE_IPIPE_MAX_SIZE_DPC];
-};
-
-enum vpfe_ipipe_otfdpc_det_meth {
- VPFE_IPIPE_DPC_OTF_MIN_MAX,
- VPFE_IPIPE_DPC_OTF_MIN_MAX2
-};
-
-struct vpfe_ipipe_otfdpc_thr {
- unsigned short r;
- unsigned short gr;
- unsigned short gb;
- unsigned short b;
-};
-
-enum vpfe_ipipe_otfdpc_alg {
- VPFE_IPIPE_OTFDPC_2_0,
- VPFE_IPIPE_OTFDPC_3_0
-};
-
-struct vpfe_ipipe_otfdpc_2_0_cfg {
- /* defect detection threshold for MIN_MAX2 method (DPC 2.0 alg) */
- struct vpfe_ipipe_otfdpc_thr det_thr;
- /* defect correction threshold for MIN_MAX2 method (DPC 2.0 alg) or
- * maximum value for MIN_MAX method
- */
- struct vpfe_ipipe_otfdpc_thr corr_thr;
-};
-
-struct vpfe_ipipe_otfdpc_3_0_cfg {
- /* DPC3.0 activity adj shf. activity = (max2-min2) >> (6 -shf)
- */
- unsigned char act_adj_shf;
- /* DPC3.0 detection threshold, THR */
- unsigned short det_thr;
- /* DPC3.0 detection threshold slope, SLP */
- unsigned short det_slp;
- /* DPC3.0 detection threshold min, MIN */
- unsigned short det_thr_min;
- /* DPC3.0 detection threshold max, MAX */
- unsigned short det_thr_max;
- /* DPC3.0 correction threshold, THR */
- unsigned short corr_thr;
- /* DPC3.0 correction threshold slope, SLP */
- unsigned short corr_slp;
- /* DPC3.0 correction threshold min, MIN */
- unsigned short corr_thr_min;
- /* DPC3.0 correction threshold max, MAX */
- unsigned short corr_thr_max;
-};
-
-struct vpfe_ipipe_otfdpc {
- /* 0 - disable, 1 - enable */
- unsigned char en;
- /* defect detection method */
- enum vpfe_ipipe_otfdpc_det_meth det_method;
- /* Algorithm used. Applicable only when IPIPE_DPC_OTF_MIN_MAX2 is
- * used
- */
- enum vpfe_ipipe_otfdpc_alg alg;
- union {
- /* if alg is IPIPE_OTFDPC_2_0 */
- struct vpfe_ipipe_otfdpc_2_0_cfg dpc_2_0;
- /* if alg is IPIPE_OTFDPC_3_0 */
- struct vpfe_ipipe_otfdpc_3_0_cfg dpc_3_0;
- } alg_cfg;
-};
-
-/* Threshold values table size */
-#define VPFE_IPIPE_NF_THR_TABLE_SIZE 8
-/* Intensity values table size */
-#define VPFE_IPIPE_NF_STR_TABLE_SIZE 8
-
-/* NF, sampling method for green pixels */
-enum vpfe_ipipe_nf_sampl_meth {
- /* Same as R or B */
- VPFE_IPIPE_NF_BOX,
- /* Diamond mode */
- VPFE_IPIPE_NF_DIAMOND
-};
-
-/* Structure for configuring NF module */
-struct vpfe_ipipe_nf {
- /* 0 - disable, 1 - enable */
- unsigned char en;
- /* Sampling method for green pixels */
- enum vpfe_ipipe_nf_sampl_meth gr_sample_meth;
- /* Down shift value in LUT reference address
- */
- unsigned char shft_val;
- /* Spread value in NF algorithm
- */
- unsigned char spread_val;
- /* Apply LSC gain to threshold. Enable this only if
- * LSC is enabled in ISIF
- */
- unsigned char apply_lsc_gain;
- /* Threshold values table */
- unsigned short thr[VPFE_IPIPE_NF_THR_TABLE_SIZE];
- /* intensity values table */
- unsigned char str[VPFE_IPIPE_NF_STR_TABLE_SIZE];
- /* Edge detection minimum threshold */
- unsigned short edge_det_min_thr;
- /* Edge detection maximum threshold */
- unsigned short edge_det_max_thr;
-};
-
-enum vpfe_ipipe_gic_alg {
- VPFE_IPIPE_GIC_ALG_CONST_GAIN,
- VPFE_IPIPE_GIC_ALG_ADAPT_GAIN
-};
-
-enum vpfe_ipipe_gic_thr_sel {
- VPFE_IPIPE_GIC_THR_REG,
- VPFE_IPIPE_GIC_THR_NF
-};
-
-enum vpfe_ipipe_gic_wt_fn_type {
- /* Use difference as index */
- VPFE_IPIPE_GIC_WT_FN_TYP_DIF,
- /* Use weight function as index */
- VPFE_IPIPE_GIC_WT_FN_TYP_HP_VAL
-};
-
-/* structure for Green Imbalance Correction */
-struct vpfe_ipipe_gic {
- /* 0 - disable, 1 - enable */
- unsigned char en;
- /* 0 - Constant gain , 1 - Adaptive gain algorithm */
- enum vpfe_ipipe_gic_alg gic_alg;
- /* GIC gain or weight. Used for Constant gain and Adaptive algorithms
- */
- unsigned short gain;
- /* Threshold selection. GIC register values or NF2 thr table */
- enum vpfe_ipipe_gic_thr_sel thr_sel;
- /* thr1. Used when thr_sel is IPIPE_GIC_THR_REG */
- unsigned short thr;
- /* this value is used for thr2-thr1, thr3-thr2 or
- * thr4-thr3 when wt_fn_type is index. Otherwise it
- * is the
- */
- unsigned short slope;
- /* Apply LSC gain to threshold. Enable this only if
- * LSC is enabled in ISIF & thr_sel is IPIPE_GIC_THR_REG
- */
- unsigned char apply_lsc_gain;
- /* Multiply Nf2 threshold by this gain. Use this when thr_sel
- * is IPIPE_GIC_THR_NF
- */
- struct ipipe_float_u8 nf2_thr_gain;
- /* Weight function uses difference as index or high pass value.
- * Used for adaptive gain algorithm
- */
- enum vpfe_ipipe_gic_wt_fn_type wt_fn_type;
-};
-
-/* Structure for configuring WB module */
-struct vpfe_ipipe_wb {
- /* Offset (S12) for R */
- short ofst_r;
- /* Offset (S12) for Gr */
- short ofst_gr;
- /* Offset (S12) for Gb */
- short ofst_gb;
- /* Offset (S12) for B */
- short ofst_b;
- /* Gain (U13Q9) for Red */
- struct ipipe_float_u16 gain_r;
- /* Gain (U13Q9) for Gr */
- struct ipipe_float_u16 gain_gr;
- /* Gain (U13Q9) for Gb */
- struct ipipe_float_u16 gain_gb;
- /* Gain (U13Q9) for Blue */
- struct ipipe_float_u16 gain_b;
-};
-
-enum vpfe_ipipe_cfa_alg {
- /* Algorithm is 2DirAC */
- VPFE_IPIPE_CFA_ALG_2DIRAC,
- /* Algorithm is 2DirAC + Digital Antialiasing (DAA) */
- VPFE_IPIPE_CFA_ALG_2DIRAC_DAA,
- /* Algorithm is DAA */
- VPFE_IPIPE_CFA_ALG_DAA
-};
-
-/* Structure for CFA Interpolation */
-struct vpfe_ipipe_cfa {
- /* 2DirAC or 2DirAC + DAA */
- enum vpfe_ipipe_cfa_alg alg;
- /* 2Dir CFA HP value Low Threshold */
- unsigned short hpf_thr_2dir;
- /* 2Dir CFA HP value slope */
- unsigned short hpf_slp_2dir;
- /* 2Dir CFA HP mix threshold */
- unsigned short hp_mix_thr_2dir;
- /* 2Dir CFA HP mix slope */
- unsigned short hp_mix_slope_2dir;
- /* 2Dir Direction threshold */
- unsigned short dir_thr_2dir;
- /* 2Dir Direction slope */
- unsigned short dir_slope_2dir;
- /* 2Dir Non Directional Weight */
- unsigned short nd_wt_2dir;
- /* DAA Mono Hue Fraction */
- unsigned short hue_fract_daa;
- /* DAA Mono Edge threshold */
- unsigned short edge_thr_daa;
- /* DAA Mono threshold minimum */
- unsigned short thr_min_daa;
- /* DAA Mono threshold slope */
- unsigned short thr_slope_daa;
- /* DAA Mono slope minimum */
- unsigned short slope_min_daa;
- /* DAA Mono slope slope */
- unsigned short slope_slope_daa;
- /* DAA Mono LP wight */
- unsigned short lp_wt_daa;
-};
-
-/* Struct for configuring RGB2RGB blending module */
-struct vpfe_ipipe_rgb2rgb {
- /* Matrix coefficient for RR S12Q8 for ID = 1 and S11Q8 for ID = 2 */
- struct ipipe_float_s16 coef_rr;
- /* Matrix coefficient for GR S12Q8/S11Q8 */
- struct ipipe_float_s16 coef_gr;
- /* Matrix coefficient for BR S12Q8/S11Q8 */
- struct ipipe_float_s16 coef_br;
- /* Matrix coefficient for RG S12Q8/S11Q8 */
- struct ipipe_float_s16 coef_rg;
- /* Matrix coefficient for GG S12Q8/S11Q8 */
- struct ipipe_float_s16 coef_gg;
- /* Matrix coefficient for BG S12Q8/S11Q8 */
- struct ipipe_float_s16 coef_bg;
- /* Matrix coefficient for RB S12Q8/S11Q8 */
- struct ipipe_float_s16 coef_rb;
- /* Matrix coefficient for GB S12Q8/S11Q8 */
- struct ipipe_float_s16 coef_gb;
- /* Matrix coefficient for BB S12Q8/S11Q8 */
- struct ipipe_float_s16 coef_bb;
- /* Output offset for R S13/S11 */
- int out_ofst_r;
- /* Output offset for G S13/S11 */
- int out_ofst_g;
- /* Output offset for B S13/S11 */
- int out_ofst_b;
-};
-
-#define VPFE_IPIPE_MAX_SIZE_GAMMA 512
-
-enum vpfe_ipipe_gamma_tbl_size {
- VPFE_IPIPE_GAMMA_TBL_SZ_64 = 64,
- VPFE_IPIPE_GAMMA_TBL_SZ_128 = 128,
- VPFE_IPIPE_GAMMA_TBL_SZ_256 = 256,
- VPFE_IPIPE_GAMMA_TBL_SZ_512 = 512,
-};
-
-enum vpfe_ipipe_gamma_tbl_sel {
- VPFE_IPIPE_GAMMA_TBL_RAM = 0,
- VPFE_IPIPE_GAMMA_TBL_ROM = 1,
-};
-
-struct vpfe_ipipe_gamma_entry {
- /* 10 bit slope */
- short slope;
- /* 10 bit offset */
- unsigned short offset;
-};
-
-/* Structure for configuring Gamma correction module */
-struct vpfe_ipipe_gamma {
- /* 0 - Enable Gamma correction for Red
- * 1 - bypass Gamma correction. Data is divided by 16
- */
- unsigned char bypass_r;
- /* 0 - Enable Gamma correction for Blue
- * 1 - bypass Gamma correction. Data is divided by 16
- */
- unsigned char bypass_b;
- /* 0 - Enable Gamma correction for Green
- * 1 - bypass Gamma correction. Data is divided by 16
- */
- unsigned char bypass_g;
- /* IPIPE_GAMMA_TBL_RAM or IPIPE_GAMMA_TBL_ROM */
- enum vpfe_ipipe_gamma_tbl_sel tbl_sel;
- /* Table size for RAM gamma table.
- */
- enum vpfe_ipipe_gamma_tbl_size tbl_size;
- /* R table */
- struct vpfe_ipipe_gamma_entry table_r[VPFE_IPIPE_MAX_SIZE_GAMMA];
- /* Blue table */
- struct vpfe_ipipe_gamma_entry table_b[VPFE_IPIPE_MAX_SIZE_GAMMA];
- /* Green table */
- struct vpfe_ipipe_gamma_entry table_g[VPFE_IPIPE_MAX_SIZE_GAMMA];
-};
-
-#define VPFE_IPIPE_MAX_SIZE_3D_LUT 729
-
-struct vpfe_ipipe_3d_lut_entry {
- /* 10 bit entry for red */
- unsigned short r;
- /* 10 bit entry for green */
- unsigned short g;
- /* 10 bit entry for blue */
- unsigned short b;
-};
-
-/* structure for 3D-LUT */
-struct vpfe_ipipe_3d_lut {
- /* enable/disable 3D lut */
- unsigned char en;
- /* 3D - LUT table entry */
- struct vpfe_ipipe_3d_lut_entry table[VPFE_IPIPE_MAX_SIZE_3D_LUT];
-};
-
-/* Struct for configuring rgb2ycbcr module */
-struct vpfe_ipipe_rgb2yuv {
- /* Matrix coefficient for RY S12Q8 */
- struct ipipe_float_s16 coef_ry;
- /* Matrix coefficient for GY S12Q8 */
- struct ipipe_float_s16 coef_gy;
- /* Matrix coefficient for BY S12Q8 */
- struct ipipe_float_s16 coef_by;
- /* Matrix coefficient for RCb S12Q8 */
- struct ipipe_float_s16 coef_rcb;
- /* Matrix coefficient for GCb S12Q8 */
- struct ipipe_float_s16 coef_gcb;
- /* Matrix coefficient for BCb S12Q8 */
- struct ipipe_float_s16 coef_bcb;
- /* Matrix coefficient for RCr S12Q8 */
- struct ipipe_float_s16 coef_rcr;
- /* Matrix coefficient for GCr S12Q8 */
- struct ipipe_float_s16 coef_gcr;
- /* Matrix coefficient for BCr S12Q8 */
- struct ipipe_float_s16 coef_bcr;
- /* Output offset for R S11 */
- int out_ofst_y;
- /* Output offset for Cb S11 */
- int out_ofst_cb;
- /* Output offset for Cr S11 */
- int out_ofst_cr;
-};
-
-enum vpfe_ipipe_gbce_type {
- VPFE_IPIPE_GBCE_Y_VAL_TBL = 0,
- VPFE_IPIPE_GBCE_GAIN_TBL = 1,
-};
-
-#define VPFE_IPIPE_MAX_SIZE_GBCE_LUT 1024
-
-/* structure for Global brightness and Contrast */
-struct vpfe_ipipe_gbce {
- /* enable/disable GBCE */
- unsigned char en;
- /* Y - value table or Gain table */
- enum vpfe_ipipe_gbce_type type;
- /* ptr to LUT for GBCE with 1024 entries */
- unsigned short table[VPFE_IPIPE_MAX_SIZE_GBCE_LUT];
-};
-
-/* Chrominance position. Applicable only for YCbCr input
- * Applied after edge enhancement
- */
-enum vpfe_chr_pos {
- /* Co-siting, same position with luminance */
- VPFE_IPIPE_YUV422_CHR_POS_COSITE = 0,
- /* Centering, In the middle of luminance */
- VPFE_IPIPE_YUV422_CHR_POS_CENTRE = 1,
-};
-
-/* Structure for configuring yuv422 conversion module */
-struct vpfe_ipipe_yuv422_conv {
- /* Max Chrominance value */
- unsigned char en_chrom_lpf;
- /* 1 - enable LPF for chrminance, 0 - disable */
- enum vpfe_chr_pos chrom_pos;
-};
-
-#define VPFE_IPIPE_MAX_SIZE_YEE_LUT 1024
-
-enum vpfe_ipipe_yee_merge_meth {
- VPFE_IPIPE_YEE_ABS_MAX = 0,
- VPFE_IPIPE_YEE_EE_ES = 1,
-};
-
-/* Structure for configuring YUV Edge Enhancement module */
-struct vpfe_ipipe_yee {
- /* 1 - enable enhancement, 0 - disable */
- unsigned char en;
- /* enable/disable halo reduction in edge sharpner */
- unsigned char en_halo_red;
- /* Merge method between Edge Enhancer and Edge sharpner */
- enum vpfe_ipipe_yee_merge_meth merge_meth;
- /* HPF Shift length */
- unsigned char hpf_shft;
- /* HPF Coefficient 00, S10 */
- short hpf_coef_00;
- /* HPF Coefficient 01, S10 */
- short hpf_coef_01;
- /* HPF Coefficient 02, S10 */
- short hpf_coef_02;
- /* HPF Coefficient 10, S10 */
- short hpf_coef_10;
- /* HPF Coefficient 11, S10 */
- short hpf_coef_11;
- /* HPF Coefficient 12, S10 */
- short hpf_coef_12;
- /* HPF Coefficient 20, S10 */
- short hpf_coef_20;
- /* HPF Coefficient 21, S10 */
- short hpf_coef_21;
- /* HPF Coefficient 22, S10 */
- short hpf_coef_22;
- /* Lower threshold before referring to LUT */
- unsigned short yee_thr;
- /* Edge sharpener Gain */
- unsigned short es_gain;
- /* Edge sharpener lower threshold */
- unsigned short es_thr1;
- /* Edge sharpener upper threshold */
- unsigned short es_thr2;
- /* Edge sharpener gain on gradient */
- unsigned short es_gain_grad;
- /* Edge sharpener offset on gradient */
- unsigned short es_ofst_grad;
- /* Ptr to EE table. Must have 1024 entries */
- short table[VPFE_IPIPE_MAX_SIZE_YEE_LUT];
-};
-
-enum vpfe_ipipe_car_meth {
- /* Chromatic Gain Control */
- VPFE_IPIPE_CAR_CHR_GAIN_CTRL = 0,
- /* Dynamic switching between CHR_GAIN_CTRL
- * and MED_FLTR
- */
- VPFE_IPIPE_CAR_DYN_SWITCH = 1,
- /* Median Filter */
- VPFE_IPIPE_CAR_MED_FLTR = 2,
-};
-
-enum vpfe_ipipe_car_hpf_type {
- VPFE_IPIPE_CAR_HPF_Y = 0,
- VPFE_IPIPE_CAR_HPF_H = 1,
- VPFE_IPIPE_CAR_HPF_V = 2,
- VPFE_IPIPE_CAR_HPF_2D = 3,
- /* 2D HPF from YUV Edge Enhancement */
- VPFE_IPIPE_CAR_HPF_2D_YEE = 4,
-};
-
-struct vpfe_ipipe_car_gain {
- /* csup_gain */
- unsigned char gain;
- /* csup_shf. */
- unsigned char shft;
- /* gain minimum */
- unsigned short gain_min;
-};
-
-/* Structure for Chromatic Artifact Reduction */
-struct vpfe_ipipe_car {
- /* enable/disable */
- unsigned char en;
- /* Gain control or Dynamic switching */
- enum vpfe_ipipe_car_meth meth;
- /* Gain1 function configuration for Gain control */
- struct vpfe_ipipe_car_gain gain1;
- /* Gain2 function configuration for Gain control */
- struct vpfe_ipipe_car_gain gain2;
- /* HPF type used for CAR */
- enum vpfe_ipipe_car_hpf_type hpf;
- /* csup_thr: HPF threshold for Gain control */
- unsigned char hpf_thr;
- /* Down shift value for hpf. 2 bits */
- unsigned char hpf_shft;
- /* switch limit for median filter */
- unsigned char sw0;
- /* switch coefficient for Gain control */
- unsigned char sw1;
-};
-
-/* structure for Chromatic Gain Suppression */
-struct vpfe_ipipe_cgs {
- /* enable/disable */
- unsigned char en;
- /* gain1 bright side threshold */
- unsigned char h_thr;
- /* gain1 bright side slope */
- unsigned char h_slope;
- /* gain1 down shift value for bright side */
- unsigned char h_shft;
- /* gain1 bright side minimum gain */
- unsigned char h_min;
-};
-
-/* Max pixels allowed in the input. If above this either decimation
- * or frame division mode to be enabled
- */
-#define VPFE_IPIPE_MAX_INPUT_WIDTH 2600
-
-struct vpfe_ipipe_input_config {
- unsigned int vst;
- unsigned int hst;
-};
-
-/**
- * struct vpfe_ipipe_config - IPIPE engine configuration (user)
- * @input_config: Pointer to structure for ipipe configuration.
- * @flag: Specifies which ISP IPIPE functions should be enabled.
- * @lutdpc: Pointer to luma enhancement structure.
- * @otfdpc: Pointer to structure for defect correction.
- * @nf1: Pointer to structure for Noise Filter.
- * @nf2: Pointer to structure for Noise Filter.
- * @gic: Pointer to structure for Green Imbalance.
- * @wbal: Pointer to structure for White Balance.
- * @cfa: Pointer to structure containing the CFA interpolation.
- * @rgb2rgb1: Pointer to structure for RGB to RGB Blending.
- * @rgb2rgb2: Pointer to structure for RGB to RGB Blending.
- * @gamma: Pointer to gamma structure.
- * @lut: Pointer to structure for 3D LUT.
- * @rgb2yuv: Pointer to structure for RGB-YCbCr conversion.
- * @gbce: Pointer to structure for Global Brightness,Contrast Control.
- * @yuv422_conv: Pointer to structure for YUV 422 conversion.
- * @yee: Pointer to structure for Edge Enhancer.
- * @car: Pointer to structure for Chromatic Artifact Reduction.
- * @cgs: Pointer to structure for Chromatic Gain Suppression.
- */
-struct vpfe_ipipe_config {
- __u32 flag;
- struct vpfe_ipipe_input_config __user *input_config;
- struct vpfe_ipipe_lutdpc __user *lutdpc;
- struct vpfe_ipipe_otfdpc __user *otfdpc;
- struct vpfe_ipipe_nf __user *nf1;
- struct vpfe_ipipe_nf __user *nf2;
- struct vpfe_ipipe_gic __user *gic;
- struct vpfe_ipipe_wb __user *wbal;
- struct vpfe_ipipe_cfa __user *cfa;
- struct vpfe_ipipe_rgb2rgb __user *rgb2rgb1;
- struct vpfe_ipipe_rgb2rgb __user *rgb2rgb2;
- struct vpfe_ipipe_gamma __user *gamma;
- struct vpfe_ipipe_3d_lut __user *lut;
- struct vpfe_ipipe_rgb2yuv __user *rgb2yuv;
- struct vpfe_ipipe_gbce __user *gbce;
- struct vpfe_ipipe_yuv422_conv __user *yuv422_conv;
- struct vpfe_ipipe_yee __user *yee;
- struct vpfe_ipipe_car __user *car;
- struct vpfe_ipipe_cgs __user *cgs;
-};
-
-/*******************************************************************
-** Resizer API structures
-*******************************************************************/
-/* Interpolation types used for horizontal rescale */
-enum vpfe_rsz_intp_t {
- VPFE_RSZ_INTP_CUBIC,
- VPFE_RSZ_INTP_LINEAR
-};
-
-/* Horizontal LPF intensity selection */
-enum vpfe_rsz_h_lpf_lse_t {
- VPFE_RSZ_H_LPF_LSE_INTERN,
- VPFE_RSZ_H_LPF_LSE_USER_VAL
-};
-
-enum vpfe_rsz_down_scale_ave_sz {
- VPFE_IPIPE_DWN_SCALE_1_OVER_2,
- VPFE_IPIPE_DWN_SCALE_1_OVER_4,
- VPFE_IPIPE_DWN_SCALE_1_OVER_8,
- VPFE_IPIPE_DWN_SCALE_1_OVER_16,
- VPFE_IPIPE_DWN_SCALE_1_OVER_32,
- VPFE_IPIPE_DWN_SCALE_1_OVER_64,
- VPFE_IPIPE_DWN_SCALE_1_OVER_128,
- VPFE_IPIPE_DWN_SCALE_1_OVER_256
-};
-
-struct vpfe_rsz_output_spec {
- /* enable horizontal flip */
- unsigned char h_flip;
- /* enable vertical flip */
- unsigned char v_flip;
- /* line start offset for y. */
- unsigned int vst_y;
- /* line start offset for c. Only for 420 */
- unsigned int vst_c;
- /* vertical rescale interpolation type, YCbCr or Luminance */
- enum vpfe_rsz_intp_t v_typ_y;
- /* vertical rescale interpolation type for Chrominance */
- enum vpfe_rsz_intp_t v_typ_c;
- /* vertical lpf intensity - Luminance */
- unsigned char v_lpf_int_y;
- /* vertical lpf intensity - Chrominance */
- unsigned char v_lpf_int_c;
- /* horizontal rescale interpolation types, YCbCr or Luminance */
- enum vpfe_rsz_intp_t h_typ_y;
- /* horizontal rescale interpolation types, Chrominance */
- enum vpfe_rsz_intp_t h_typ_c;
- /* horizontal lpf intensity - Luminance */
- unsigned char h_lpf_int_y;
- /* horizontal lpf intensity - Chrominance */
- unsigned char h_lpf_int_c;
- /* Use down scale mode for scale down */
- unsigned char en_down_scale;
- /* if downscale, set the downscale more average size for horizontal
- * direction. Used only if output width and height is less than
- * input sizes
- */
- enum vpfe_rsz_down_scale_ave_sz h_dscale_ave_sz;
- /* if downscale, set the downscale more average size for vertical
- * direction. Used only if output width and height is less than
- * input sizes
- */
- enum vpfe_rsz_down_scale_ave_sz v_dscale_ave_sz;
- /* Y offset. If set, the offset would be added to the base address
- */
- unsigned int user_y_ofst;
- /* C offset. If set, the offset would be added to the base address
- */
- unsigned int user_c_ofst;
-};
-
-struct vpfe_rsz_config_params {
- unsigned int vst;
- /* horizontal start position of the image
- * data to IPIPE
- */
- unsigned int hst;
- /* output spec of the image data coming out of resizer - 0(UYVY).
- */
- struct vpfe_rsz_output_spec output1;
- /* output spec of the image data coming out of resizer - 1(UYVY).
- */
- struct vpfe_rsz_output_spec output2;
- /* 0 , chroma sample at odd pixel, 1 - even pixel */
- unsigned char chroma_sample_even;
- unsigned char frame_div_mode_en;
- unsigned char yuv_y_min;
- unsigned char yuv_y_max;
- unsigned char yuv_c_min;
- unsigned char yuv_c_max;
- enum vpfe_chr_pos out_chr_pos;
- unsigned char bypass;
-};
-
-/* Structure for VIDIOC_VPFE_RSZ_[S/G]_CONFIG IOCTLs */
-struct vpfe_rsz_config {
- struct vpfe_rsz_config_params *config;
-};
-
-#endif /* _DAVINCI_VPFE_USER_H */
diff --git a/drivers/staging/media/davinci_vpfe/dm365_ipipe.c b/drivers/staging/media/davinci_vpfe/dm365_ipipe.c
deleted file mode 100644
index 1bbb90ce0086..000000000000
--- a/drivers/staging/media/davinci_vpfe/dm365_ipipe.c
+++ /dev/null
@@ -1,1862 +0,0 @@
-/*
- * Copyright (C) 2012 Texas Instruments Inc
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- * Contributors:
- * Manjunath Hadli <manjunath.hadli@ti.com>
- * Prabhakar Lad <prabhakar.lad@ti.com>
- *
- *
- * IPIPE allows fine tuning of the input image using different
- * tuning modules in IPIPE. Some examples :- Noise filter, Defect
- * pixel correction etc. It essentially operate on Bayer Raw data
- * or YUV raw data. To do image tuning, application call,
- *
- */
-
-#include <linux/slab.h>
-
-#include "dm365_ipipe.h"
-#include "dm365_ipipe_hw.h"
-#include "vpfe_mc_capture.h"
-
-#define MIN_OUT_WIDTH 32
-#define MIN_OUT_HEIGHT 32
-
-/* ipipe input format's */
-static const unsigned int ipipe_input_fmts[] = {
- MEDIA_BUS_FMT_UYVY8_2X8,
- MEDIA_BUS_FMT_SGRBG12_1X12,
- MEDIA_BUS_FMT_SGRBG10_DPCM8_1X8,
- MEDIA_BUS_FMT_SGRBG10_ALAW8_1X8,
-};
-
-/* ipipe output format's */
-static const unsigned int ipipe_output_fmts[] = {
- MEDIA_BUS_FMT_UYVY8_2X8,
-};
-
-static int ipipe_validate_lutdpc_params(struct vpfe_ipipe_lutdpc *lutdpc)
-{
- int i;
-
- if (lutdpc->en > 1 || lutdpc->repl_white > 1 ||
- lutdpc->dpc_size > LUT_DPC_MAX_SIZE)
- return -EINVAL;
-
- if (lutdpc->en && !lutdpc->table)
- return -EINVAL;
-
- for (i = 0; i < lutdpc->dpc_size; i++)
- if (lutdpc->table[i].horz_pos > LUT_DPC_H_POS_MASK ||
- lutdpc->table[i].vert_pos > LUT_DPC_V_POS_MASK)
- return -EINVAL;
-
- return 0;
-}
-
-static int ipipe_set_lutdpc_params(struct vpfe_ipipe_device *ipipe, void *param)
-{
- struct vpfe_ipipe_lutdpc *lutdpc = &ipipe->config.lutdpc;
- struct vpfe_ipipe_lutdpc *dpc_param;
- struct device *dev;
-
- if (!param) {
- memset((void *)lutdpc, 0, sizeof(struct vpfe_ipipe_lutdpc));
- goto success;
- }
-
- dev = ipipe->subdev.v4l2_dev->dev;
- dpc_param = (struct vpfe_ipipe_lutdpc *)param;
- lutdpc->en = dpc_param->en;
- lutdpc->repl_white = dpc_param->repl_white;
- lutdpc->dpc_size = dpc_param->dpc_size;
- memcpy(&lutdpc->table, &dpc_param->table,
- (dpc_param->dpc_size * sizeof(struct vpfe_ipipe_lutdpc_entry)));
- if (ipipe_validate_lutdpc_params(lutdpc) < 0)
- return -EINVAL;
-
-success:
- ipipe_set_lutdpc_regs(ipipe->base_addr, ipipe->isp5_base_addr, lutdpc);
-
- return 0;
-}
-
-static int ipipe_get_lutdpc_params(struct vpfe_ipipe_device *ipipe, void *param)
-{
- struct vpfe_ipipe_lutdpc *lut_param = (struct vpfe_ipipe_lutdpc *)param;
- struct vpfe_ipipe_lutdpc *lutdpc = &ipipe->config.lutdpc;
-
- lut_param->en = lutdpc->en;
- lut_param->repl_white = lutdpc->repl_white;
- lut_param->dpc_size = lutdpc->dpc_size;
- memcpy(&lut_param->table, &lutdpc->table,
- (lutdpc->dpc_size * sizeof(struct vpfe_ipipe_lutdpc_entry)));
-
- return 0;
-}
-
-static int ipipe_set_input_config(struct vpfe_ipipe_device *ipipe, void *param)
-{
- struct vpfe_ipipe_input_config *config = &ipipe->config.input_config;
-
- if (!param)
- memset(config, 0, sizeof(struct vpfe_ipipe_input_config));
- else
- memcpy(config, param, sizeof(struct vpfe_ipipe_input_config));
- return 0;
-}
-
-static int ipipe_get_input_config(struct vpfe_ipipe_device *ipipe, void *param)
-{
- struct vpfe_ipipe_input_config *config = &ipipe->config.input_config;
-
- if (!param)
- return -EINVAL;
-
- memcpy(param, config, sizeof(struct vpfe_ipipe_input_config));
-
- return 0;
-}
-
-static int ipipe_validate_otfdpc_params(struct vpfe_ipipe_otfdpc *dpc_param)
-{
- struct vpfe_ipipe_otfdpc_2_0_cfg *dpc_2_0;
- struct vpfe_ipipe_otfdpc_3_0_cfg *dpc_3_0;
-
- if (dpc_param->en > 1)
- return -EINVAL;
-
- if (dpc_param->alg == VPFE_IPIPE_OTFDPC_2_0) {
- dpc_2_0 = &dpc_param->alg_cfg.dpc_2_0;
- if (dpc_2_0->det_thr.r > OTFDPC_DPC2_THR_MASK ||
- dpc_2_0->det_thr.gr > OTFDPC_DPC2_THR_MASK ||
- dpc_2_0->det_thr.gb > OTFDPC_DPC2_THR_MASK ||
- dpc_2_0->det_thr.b > OTFDPC_DPC2_THR_MASK ||
- dpc_2_0->corr_thr.r > OTFDPC_DPC2_THR_MASK ||
- dpc_2_0->corr_thr.gr > OTFDPC_DPC2_THR_MASK ||
- dpc_2_0->corr_thr.gb > OTFDPC_DPC2_THR_MASK ||
- dpc_2_0->corr_thr.b > OTFDPC_DPC2_THR_MASK)
- return -EINVAL;
- return 0;
- }
-
- dpc_3_0 = &dpc_param->alg_cfg.dpc_3_0;
-
- if (dpc_3_0->act_adj_shf > OTF_DPC3_0_SHF_MASK ||
- dpc_3_0->det_thr > OTF_DPC3_0_DET_MASK ||
- dpc_3_0->det_slp > OTF_DPC3_0_SLP_MASK ||
- dpc_3_0->det_thr_min > OTF_DPC3_0_DET_MASK ||
- dpc_3_0->det_thr_max > OTF_DPC3_0_DET_MASK ||
- dpc_3_0->corr_thr > OTF_DPC3_0_CORR_MASK ||
- dpc_3_0->corr_slp > OTF_DPC3_0_SLP_MASK ||
- dpc_3_0->corr_thr_min > OTF_DPC3_0_CORR_MASK ||
- dpc_3_0->corr_thr_max > OTF_DPC3_0_CORR_MASK)
- return -EINVAL;
-
- return 0;
-}
-
-static int ipipe_set_otfdpc_params(struct vpfe_ipipe_device *ipipe, void *param)
-{
- struct vpfe_ipipe_otfdpc *dpc_param = (struct vpfe_ipipe_otfdpc *)param;
- struct vpfe_ipipe_otfdpc *otfdpc = &ipipe->config.otfdpc;
- struct device *dev;
-
- if (!param) {
- memset((void *)otfdpc, 0, sizeof(struct ipipe_otfdpc_2_0));
- goto success;
- }
- dev = ipipe->subdev.v4l2_dev->dev;
- memcpy(otfdpc, dpc_param, sizeof(struct vpfe_ipipe_otfdpc));
- if (ipipe_validate_otfdpc_params(otfdpc) < 0) {
- dev_err(dev, "Invalid otfdpc params\n");
- return -EINVAL;
- }
-
-success:
- ipipe_set_otfdpc_regs(ipipe->base_addr, otfdpc);
-
- return 0;
-}
-
-static int ipipe_get_otfdpc_params(struct vpfe_ipipe_device *ipipe, void *param)
-{
- struct vpfe_ipipe_otfdpc *dpc_param = (struct vpfe_ipipe_otfdpc *)param;
- struct vpfe_ipipe_otfdpc *otfdpc = &ipipe->config.otfdpc;
-
- memcpy(dpc_param, otfdpc, sizeof(struct vpfe_ipipe_otfdpc));
- return 0;
-}
-
-static int ipipe_validate_nf_params(struct vpfe_ipipe_nf *nf_param)
-{
- int i;
-
- if (nf_param->en > 1 || nf_param->shft_val > D2F_SHFT_VAL_MASK ||
- nf_param->spread_val > D2F_SPR_VAL_MASK ||
- nf_param->apply_lsc_gain > 1 ||
- nf_param->edge_det_min_thr > D2F_EDGE_DET_THR_MASK ||
- nf_param->edge_det_max_thr > D2F_EDGE_DET_THR_MASK)
- return -EINVAL;
-
- for (i = 0; i < VPFE_IPIPE_NF_THR_TABLE_SIZE; i++)
- if (nf_param->thr[i] > D2F_THR_VAL_MASK)
- return -EINVAL;
-
- for (i = 0; i < VPFE_IPIPE_NF_STR_TABLE_SIZE; i++)
- if (nf_param->str[i] > D2F_STR_VAL_MASK)
- return -EINVAL;
-
- return 0;
-}
-
-static int ipipe_set_nf_params(struct vpfe_ipipe_device *ipipe,
- unsigned int id, void *param)
-{
- struct vpfe_ipipe_nf *nf_param = (struct vpfe_ipipe_nf *)param;
- struct vpfe_ipipe_nf *nf = &ipipe->config.nf1;
- struct device *dev;
-
- if (id == IPIPE_D2F_2ND)
- nf = &ipipe->config.nf2;
-
- if (!nf_param) {
- memset((void *)nf, 0, sizeof(struct vpfe_ipipe_nf));
- goto success;
- }
-
- dev = ipipe->subdev.v4l2_dev->dev;
- memcpy(nf, nf_param, sizeof(struct vpfe_ipipe_nf));
- if (ipipe_validate_nf_params(nf) < 0) {
- dev_err(dev, "Invalid nf params\n");
- return -EINVAL;
- }
-
-success:
- ipipe_set_d2f_regs(ipipe->base_addr, id, nf);
-
- return 0;
-}
-
-static int ipipe_set_nf1_params(struct vpfe_ipipe_device *ipipe, void *param)
-{
- return ipipe_set_nf_params(ipipe, IPIPE_D2F_1ST, param);
-}
-
-static int ipipe_set_nf2_params(struct vpfe_ipipe_device *ipipe, void *param)
-{
- return ipipe_set_nf_params(ipipe, IPIPE_D2F_2ND, param);
-}
-
-static int ipipe_get_nf_params(struct vpfe_ipipe_device *ipipe,
- unsigned int id, void *param)
-{
- struct vpfe_ipipe_nf *nf_param = (struct vpfe_ipipe_nf *)param;
- struct vpfe_ipipe_nf *nf = &ipipe->config.nf1;
-
- if (id == IPIPE_D2F_2ND)
- nf = &ipipe->config.nf2;
-
- memcpy(nf_param, nf, sizeof(struct vpfe_ipipe_nf));
-
- return 0;
-}
-
-static int ipipe_get_nf1_params(struct vpfe_ipipe_device *ipipe, void *param)
-{
- return ipipe_get_nf_params(ipipe, IPIPE_D2F_1ST, param);
-}
-
-static int ipipe_get_nf2_params(struct vpfe_ipipe_device *ipipe, void *param)
-{
- return ipipe_get_nf_params(ipipe, IPIPE_D2F_2ND, param);
-}
-
-static int ipipe_validate_gic_params(struct vpfe_ipipe_gic *gic)
-{
- if (gic->en > 1 || gic->gain > GIC_GAIN_MASK ||
- gic->thr > GIC_THR_MASK || gic->slope > GIC_SLOPE_MASK ||
- gic->apply_lsc_gain > 1 ||
- gic->nf2_thr_gain.integer > GIC_NFGAN_INT_MASK ||
- gic->nf2_thr_gain.decimal > GIC_NFGAN_DECI_MASK)
- return -EINVAL;
-
- return 0;
-}
-
-static int ipipe_set_gic_params(struct vpfe_ipipe_device *ipipe, void *param)
-{
- struct vpfe_ipipe_gic *gic_param = (struct vpfe_ipipe_gic *)param;
- struct device *dev = ipipe->subdev.v4l2_dev->dev;
- struct vpfe_ipipe_gic *gic = &ipipe->config.gic;
-
- if (!gic_param) {
- memset((void *)gic, 0, sizeof(struct vpfe_ipipe_gic));
- goto success;
- }
-
- memcpy(gic, gic_param, sizeof(struct vpfe_ipipe_gic));
- if (ipipe_validate_gic_params(gic) < 0) {
- dev_err(dev, "Invalid gic params\n");
- return -EINVAL;
- }
-
-success:
- ipipe_set_gic_regs(ipipe->base_addr, gic);
-
- return 0;
-}
-
-static int ipipe_get_gic_params(struct vpfe_ipipe_device *ipipe, void *param)
-{
- struct vpfe_ipipe_gic *gic_param = (struct vpfe_ipipe_gic *)param;
- struct vpfe_ipipe_gic *gic = &ipipe->config.gic;
-
- memcpy(gic_param, gic, sizeof(struct vpfe_ipipe_gic));
-
- return 0;
-}
-
-static int ipipe_validate_wb_params(struct vpfe_ipipe_wb *wbal)
-{
- if (wbal->ofst_r > WB_OFFSET_MASK ||
- wbal->ofst_gr > WB_OFFSET_MASK ||
- wbal->ofst_gb > WB_OFFSET_MASK ||
- wbal->ofst_b > WB_OFFSET_MASK ||
- wbal->gain_r.integer > WB_GAIN_INT_MASK ||
- wbal->gain_r.decimal > WB_GAIN_DECI_MASK ||
- wbal->gain_gr.integer > WB_GAIN_INT_MASK ||
- wbal->gain_gr.decimal > WB_GAIN_DECI_MASK ||
- wbal->gain_gb.integer > WB_GAIN_INT_MASK ||
- wbal->gain_gb.decimal > WB_GAIN_DECI_MASK ||
- wbal->gain_b.integer > WB_GAIN_INT_MASK ||
- wbal->gain_b.decimal > WB_GAIN_DECI_MASK)
- return -EINVAL;
-
- return 0;
-}
-
-static int ipipe_set_wb_params(struct vpfe_ipipe_device *ipipe, void *param)
-{
- struct vpfe_ipipe_wb *wb_param = (struct vpfe_ipipe_wb *)param;
- struct vpfe_ipipe_wb *wbal = &ipipe->config.wbal;
-
- if (!wb_param) {
- const struct vpfe_ipipe_wb wb_defaults = {
- .gain_r = {2, 0x0},
- .gain_gr = {2, 0x0},
- .gain_gb = {2, 0x0},
- .gain_b = {2, 0x0}
- };
- memcpy(wbal, &wb_defaults, sizeof(struct vpfe_ipipe_wb));
- goto success;
- }
-
- memcpy(wbal, wb_param, sizeof(struct vpfe_ipipe_wb));
- if (ipipe_validate_wb_params(wbal) < 0)
- return -EINVAL;
-
-success:
- ipipe_set_wb_regs(ipipe->base_addr, wbal);
-
- return 0;
-}
-
-static int ipipe_get_wb_params(struct vpfe_ipipe_device *ipipe, void *param)
-{
- struct vpfe_ipipe_wb *wb_param = (struct vpfe_ipipe_wb *)param;
- struct vpfe_ipipe_wb *wbal = &ipipe->config.wbal;
-
- memcpy(wb_param, wbal, sizeof(struct vpfe_ipipe_wb));
- return 0;
-}
-
-static int ipipe_validate_cfa_params(struct vpfe_ipipe_cfa *cfa)
-{
- if (cfa->hpf_thr_2dir > CFA_HPF_THR_2DIR_MASK ||
- cfa->hpf_slp_2dir > CFA_HPF_SLOPE_2DIR_MASK ||
- cfa->hp_mix_thr_2dir > CFA_HPF_MIX_THR_2DIR_MASK ||
- cfa->hp_mix_slope_2dir > CFA_HPF_MIX_SLP_2DIR_MASK ||
- cfa->dir_thr_2dir > CFA_DIR_THR_2DIR_MASK ||
- cfa->dir_slope_2dir > CFA_DIR_SLP_2DIR_MASK ||
- cfa->nd_wt_2dir > CFA_ND_WT_2DIR_MASK ||
- cfa->hue_fract_daa > CFA_DAA_HUE_FRA_MASK ||
- cfa->edge_thr_daa > CFA_DAA_EDG_THR_MASK ||
- cfa->thr_min_daa > CFA_DAA_THR_MIN_MASK ||
- cfa->thr_slope_daa > CFA_DAA_THR_SLP_MASK ||
- cfa->slope_min_daa > CFA_DAA_SLP_MIN_MASK ||
- cfa->slope_slope_daa > CFA_DAA_SLP_SLP_MASK ||
- cfa->lp_wt_daa > CFA_DAA_LP_WT_MASK)
- return -EINVAL;
-
- return 0;
-}
-
-static int ipipe_set_cfa_params(struct vpfe_ipipe_device *ipipe, void *param)
-{
- struct vpfe_ipipe_cfa *cfa_param = (struct vpfe_ipipe_cfa *)param;
- struct vpfe_ipipe_cfa *cfa = &ipipe->config.cfa;
-
- if (!cfa_param) {
- memset(cfa, 0, sizeof(struct vpfe_ipipe_cfa));
- cfa->alg = VPFE_IPIPE_CFA_ALG_2DIRAC;
- goto success;
- }
-
- memcpy(cfa, cfa_param, sizeof(struct vpfe_ipipe_cfa));
- if (ipipe_validate_cfa_params(cfa) < 0)
- return -EINVAL;
-
-success:
- ipipe_set_cfa_regs(ipipe->base_addr, cfa);
-
- return 0;
-}
-
-static int ipipe_get_cfa_params(struct vpfe_ipipe_device *ipipe, void *param)
-{
- struct vpfe_ipipe_cfa *cfa_param = (struct vpfe_ipipe_cfa *)param;
- struct vpfe_ipipe_cfa *cfa = &ipipe->config.cfa;
-
- memcpy(cfa_param, cfa, sizeof(struct vpfe_ipipe_cfa));
- return 0;
-}
-
-static int
-ipipe_validate_rgb2rgb_params(struct vpfe_ipipe_rgb2rgb *rgb2rgb,
- unsigned int id)
-{
- u32 gain_int_upper = RGB2RGB_1_GAIN_INT_MASK;
- u32 offset_upper = RGB2RGB_1_OFST_MASK;
-
- if (id == IPIPE_RGB2RGB_2) {
- offset_upper = RGB2RGB_2_OFST_MASK;
- gain_int_upper = RGB2RGB_2_GAIN_INT_MASK;
- }
-
- if (rgb2rgb->coef_rr.decimal > RGB2RGB_GAIN_DECI_MASK ||
- rgb2rgb->coef_rr.integer > gain_int_upper)
- return -EINVAL;
-
- if (rgb2rgb->coef_gr.decimal > RGB2RGB_GAIN_DECI_MASK ||
- rgb2rgb->coef_gr.integer > gain_int_upper)
- return -EINVAL;
-
- if (rgb2rgb->coef_br.decimal > RGB2RGB_GAIN_DECI_MASK ||
- rgb2rgb->coef_br.integer > gain_int_upper)
- return -EINVAL;
-
- if (rgb2rgb->coef_rg.decimal > RGB2RGB_GAIN_DECI_MASK ||
- rgb2rgb->coef_rg.integer > gain_int_upper)
- return -EINVAL;
-
- if (rgb2rgb->coef_gg.decimal > RGB2RGB_GAIN_DECI_MASK ||
- rgb2rgb->coef_gg.integer > gain_int_upper)
- return -EINVAL;
-
- if (rgb2rgb->coef_bg.decimal > RGB2RGB_GAIN_DECI_MASK ||
- rgb2rgb->coef_bg.integer > gain_int_upper)
- return -EINVAL;
-
- if (rgb2rgb->coef_rb.decimal > RGB2RGB_GAIN_DECI_MASK ||
- rgb2rgb->coef_rb.integer > gain_int_upper)
- return -EINVAL;
-
- if (rgb2rgb->coef_gb.decimal > RGB2RGB_GAIN_DECI_MASK ||
- rgb2rgb->coef_gb.integer > gain_int_upper)
- return -EINVAL;
-
- if (rgb2rgb->coef_bb.decimal > RGB2RGB_GAIN_DECI_MASK ||
- rgb2rgb->coef_bb.integer > gain_int_upper)
- return -EINVAL;
-
- if (rgb2rgb->out_ofst_r > offset_upper ||
- rgb2rgb->out_ofst_g > offset_upper ||
- rgb2rgb->out_ofst_b > offset_upper)
- return -EINVAL;
-
- return 0;
-}
-
-static int ipipe_set_rgb2rgb_params(struct vpfe_ipipe_device *ipipe,
- unsigned int id, void *param)
-{
- struct vpfe_ipipe_rgb2rgb *rgb2rgb = &ipipe->config.rgb2rgb1;
- struct device *dev = ipipe->subdev.v4l2_dev->dev;
- struct vpfe_ipipe_rgb2rgb *rgb2rgb_param;
-
- rgb2rgb_param = (struct vpfe_ipipe_rgb2rgb *)param;
-
- if (id == IPIPE_RGB2RGB_2)
- rgb2rgb = &ipipe->config.rgb2rgb2;
-
- if (!rgb2rgb_param) {
- const struct vpfe_ipipe_rgb2rgb rgb2rgb_defaults = {
- .coef_rr = {1, 0}, /* 256 */
- .coef_gr = {0, 0},
- .coef_br = {0, 0},
- .coef_rg = {0, 0},
- .coef_gg = {1, 0}, /* 256 */
- .coef_bg = {0, 0},
- .coef_rb = {0, 0},
- .coef_gb = {0, 0},
- .coef_bb = {1, 0}, /* 256 */
- };
- /* Copy defaults for rgb2rgb conversion */
- memcpy(rgb2rgb, &rgb2rgb_defaults,
- sizeof(struct vpfe_ipipe_rgb2rgb));
- goto success;
- }
-
- memcpy(rgb2rgb, rgb2rgb_param, sizeof(struct vpfe_ipipe_rgb2rgb));
- if (ipipe_validate_rgb2rgb_params(rgb2rgb, id) < 0) {
- dev_err(dev, "Invalid rgb2rgb params\n");
- return -EINVAL;
- }
-
-success:
- ipipe_set_rgb2rgb_regs(ipipe->base_addr, id, rgb2rgb);
-
- return 0;
-}
-
-static int
-ipipe_set_rgb2rgb_1_params(struct vpfe_ipipe_device *ipipe, void *param)
-{
- return ipipe_set_rgb2rgb_params(ipipe, IPIPE_RGB2RGB_1, param);
-}
-
-static int
-ipipe_set_rgb2rgb_2_params(struct vpfe_ipipe_device *ipipe, void *param)
-{
- return ipipe_set_rgb2rgb_params(ipipe, IPIPE_RGB2RGB_2, param);
-}
-
-static int ipipe_get_rgb2rgb_params(struct vpfe_ipipe_device *ipipe,
- unsigned int id, void *param)
-{
- struct vpfe_ipipe_rgb2rgb *rgb2rgb = &ipipe->config.rgb2rgb1;
- struct vpfe_ipipe_rgb2rgb *rgb2rgb_param;
-
- rgb2rgb_param = (struct vpfe_ipipe_rgb2rgb *)param;
-
- if (id == IPIPE_RGB2RGB_2)
- rgb2rgb = &ipipe->config.rgb2rgb2;
-
- memcpy(rgb2rgb_param, rgb2rgb, sizeof(struct vpfe_ipipe_rgb2rgb));
-
- return 0;
-}
-
-static int
-ipipe_get_rgb2rgb_1_params(struct vpfe_ipipe_device *ipipe, void *param)
-{
- return ipipe_get_rgb2rgb_params(ipipe, IPIPE_RGB2RGB_1, param);
-}
-
-static int
-ipipe_get_rgb2rgb_2_params(struct vpfe_ipipe_device *ipipe, void *param)
-{
- return ipipe_get_rgb2rgb_params(ipipe, IPIPE_RGB2RGB_2, param);
-}
-
-static int
-ipipe_validate_gamma_entry(struct vpfe_ipipe_gamma_entry *table, int size)
-{
- int i;
-
- if (!table)
- return -EINVAL;
-
- for (i = 0; i < size; i++)
- if (table[i].slope > GAMMA_MASK ||
- table[i].offset > GAMMA_MASK)
- return -EINVAL;
-
- return 0;
-}
-
-static int
-ipipe_validate_gamma_params(struct vpfe_ipipe_gamma *gamma, struct device *dev)
-{
- int table_size;
- int err;
-
- if (gamma->bypass_r > 1 ||
- gamma->bypass_b > 1 ||
- gamma->bypass_g > 1)
- return -EINVAL;
-
- if (gamma->tbl_sel != VPFE_IPIPE_GAMMA_TBL_RAM)
- return 0;
-
- table_size = gamma->tbl_size;
- if (!gamma->bypass_r) {
- err = ipipe_validate_gamma_entry(gamma->table_r, table_size);
- if (err) {
- dev_err(dev, "GAMMA R - table entry invalid\n");
- return err;
- }
- }
-
- if (!gamma->bypass_b) {
- err = ipipe_validate_gamma_entry(gamma->table_b, table_size);
- if (err) {
- dev_err(dev, "GAMMA B - table entry invalid\n");
- return err;
- }
- }
-
- if (!gamma->bypass_g) {
- err = ipipe_validate_gamma_entry(gamma->table_g, table_size);
- if (err) {
- dev_err(dev, "GAMMA G - table entry invalid\n");
- return err;
- }
- }
-
- return 0;
-}
-
-static int
-ipipe_set_gamma_params(struct vpfe_ipipe_device *ipipe, void *param)
-{
- struct vpfe_ipipe_gamma *gamma_param = (struct vpfe_ipipe_gamma *)param;
- struct vpfe_ipipe_gamma *gamma = &ipipe->config.gamma;
- struct device *dev = ipipe->subdev.v4l2_dev->dev;
- int table_size;
-
- if (!gamma_param) {
- memset(gamma, 0, sizeof(struct vpfe_ipipe_gamma));
- gamma->tbl_sel = VPFE_IPIPE_GAMMA_TBL_ROM;
- goto success;
- }
-
- gamma->bypass_r = gamma_param->bypass_r;
- gamma->bypass_b = gamma_param->bypass_b;
- gamma->bypass_g = gamma_param->bypass_g;
- gamma->tbl_sel = gamma_param->tbl_sel;
- gamma->tbl_size = gamma_param->tbl_size;
-
- if (ipipe_validate_gamma_params(gamma, dev) < 0)
- return -EINVAL;
-
- if (gamma_param->tbl_sel != VPFE_IPIPE_GAMMA_TBL_RAM)
- goto success;
-
- table_size = gamma->tbl_size;
- if (!gamma_param->bypass_r)
- memcpy(&gamma->table_r, &gamma_param->table_r,
- (table_size * sizeof(struct vpfe_ipipe_gamma_entry)));
-
- if (!gamma_param->bypass_b)
- memcpy(&gamma->table_b, &gamma_param->table_b,
- (table_size * sizeof(struct vpfe_ipipe_gamma_entry)));
-
- if (!gamma_param->bypass_g)
- memcpy(&gamma->table_g, &gamma_param->table_g,
- (table_size * sizeof(struct vpfe_ipipe_gamma_entry)));
-
-success:
- ipipe_set_gamma_regs(ipipe->base_addr, ipipe->isp5_base_addr, gamma);
-
- return 0;
-}
-
-static int ipipe_get_gamma_params(struct vpfe_ipipe_device *ipipe, void *param)
-{
- struct vpfe_ipipe_gamma *gamma_param = (struct vpfe_ipipe_gamma *)param;
- struct vpfe_ipipe_gamma *gamma = &ipipe->config.gamma;
- struct device *dev = ipipe->subdev.v4l2_dev->dev;
- int table_size;
-
- gamma_param->bypass_r = gamma->bypass_r;
- gamma_param->bypass_g = gamma->bypass_g;
- gamma_param->bypass_b = gamma->bypass_b;
- gamma_param->tbl_sel = gamma->tbl_sel;
- gamma_param->tbl_size = gamma->tbl_size;
-
- if (gamma->tbl_sel != VPFE_IPIPE_GAMMA_TBL_RAM)
- return 0;
-
- table_size = gamma->tbl_size;
-
- if (!gamma->bypass_r && !gamma_param->table_r) {
- dev_err(dev,
- "ipipe_get_gamma_params: table ptr empty for R\n");
- return -EINVAL;
- }
- memcpy(gamma_param->table_r, gamma->table_r,
- (table_size * sizeof(struct vpfe_ipipe_gamma_entry)));
-
- if (!gamma->bypass_g && !gamma_param->table_g) {
- dev_err(dev, "ipipe_get_gamma_params: table ptr empty for G\n");
- return -EINVAL;
- }
- memcpy(gamma_param->table_g, gamma->table_g,
- (table_size * sizeof(struct vpfe_ipipe_gamma_entry)));
-
- if (!gamma->bypass_b && !gamma_param->table_b) {
- dev_err(dev, "ipipe_get_gamma_params: table ptr empty for B\n");
- return -EINVAL;
- }
- memcpy(gamma_param->table_b, gamma->table_b,
- (table_size * sizeof(struct vpfe_ipipe_gamma_entry)));
-
- return 0;
-}
-
-static int ipipe_validate_3d_lut_params(struct vpfe_ipipe_3d_lut *lut)
-{
- int i;
-
- if (!lut->en)
- return 0;
-
- for (i = 0; i < VPFE_IPIPE_MAX_SIZE_3D_LUT; i++)
- if (lut->table[i].r > D3_LUT_ENTRY_MASK ||
- lut->table[i].g > D3_LUT_ENTRY_MASK ||
- lut->table[i].b > D3_LUT_ENTRY_MASK)
- return -EINVAL;
-
- return 0;
-}
-
-static int ipipe_get_3d_lut_params(struct vpfe_ipipe_device *ipipe, void *param)
-{
- struct vpfe_ipipe_3d_lut *lut_param = (struct vpfe_ipipe_3d_lut *)param;
- struct vpfe_ipipe_3d_lut *lut = &ipipe->config.lut;
- struct device *dev = ipipe->subdev.v4l2_dev->dev;
-
- lut_param->en = lut->en;
- if (!lut_param->table) {
- dev_err(dev, "ipipe_get_3d_lut_params: Invalid table ptr\n");
- return -EINVAL;
- }
-
- memcpy(lut_param->table, &lut->table,
- (VPFE_IPIPE_MAX_SIZE_3D_LUT *
- sizeof(struct vpfe_ipipe_3d_lut_entry)));
-
- return 0;
-}
-
-static int
-ipipe_set_3d_lut_params(struct vpfe_ipipe_device *ipipe, void *param)
-{
- struct vpfe_ipipe_3d_lut *lut_param = (struct vpfe_ipipe_3d_lut *)param;
- struct vpfe_ipipe_3d_lut *lut = &ipipe->config.lut;
- struct device *dev = ipipe->subdev.v4l2_dev->dev;
-
- if (!lut_param) {
- memset(lut, 0, sizeof(struct vpfe_ipipe_3d_lut));
- goto success;
- }
-
- memcpy(lut, lut_param, sizeof(struct vpfe_ipipe_3d_lut));
- if (ipipe_validate_3d_lut_params(lut) < 0) {
- dev_err(dev, "Invalid 3D-LUT Params\n");
- return -EINVAL;
- }
-
-success:
- ipipe_set_3d_lut_regs(ipipe->base_addr, ipipe->isp5_base_addr, lut);
-
- return 0;
-}
-
-static int ipipe_validate_rgb2yuv_params(struct vpfe_ipipe_rgb2yuv *rgb2yuv)
-{
- if (rgb2yuv->coef_ry.decimal > RGB2YCBCR_COEF_DECI_MASK ||
- rgb2yuv->coef_ry.integer > RGB2YCBCR_COEF_INT_MASK)
- return -EINVAL;
-
- if (rgb2yuv->coef_gy.decimal > RGB2YCBCR_COEF_DECI_MASK ||
- rgb2yuv->coef_gy.integer > RGB2YCBCR_COEF_INT_MASK)
- return -EINVAL;
-
- if (rgb2yuv->coef_by.decimal > RGB2YCBCR_COEF_DECI_MASK ||
- rgb2yuv->coef_by.integer > RGB2YCBCR_COEF_INT_MASK)
- return -EINVAL;
-
- if (rgb2yuv->coef_rcb.decimal > RGB2YCBCR_COEF_DECI_MASK ||
- rgb2yuv->coef_rcb.integer > RGB2YCBCR_COEF_INT_MASK)
- return -EINVAL;
-
- if (rgb2yuv->coef_gcb.decimal > RGB2YCBCR_COEF_DECI_MASK ||
- rgb2yuv->coef_gcb.integer > RGB2YCBCR_COEF_INT_MASK)
- return -EINVAL;
-
- if (rgb2yuv->coef_bcb.decimal > RGB2YCBCR_COEF_DECI_MASK ||
- rgb2yuv->coef_bcb.integer > RGB2YCBCR_COEF_INT_MASK)
- return -EINVAL;
-
- if (rgb2yuv->coef_rcr.decimal > RGB2YCBCR_COEF_DECI_MASK ||
- rgb2yuv->coef_rcr.integer > RGB2YCBCR_COEF_INT_MASK)
- return -EINVAL;
-
- if (rgb2yuv->coef_gcr.decimal > RGB2YCBCR_COEF_DECI_MASK ||
- rgb2yuv->coef_gcr.integer > RGB2YCBCR_COEF_INT_MASK)
- return -EINVAL;
-
- if (rgb2yuv->coef_bcr.decimal > RGB2YCBCR_COEF_DECI_MASK ||
- rgb2yuv->coef_bcr.integer > RGB2YCBCR_COEF_INT_MASK)
- return -EINVAL;
-
- if (rgb2yuv->out_ofst_y > RGB2YCBCR_OFST_MASK ||
- rgb2yuv->out_ofst_cb > RGB2YCBCR_OFST_MASK ||
- rgb2yuv->out_ofst_cr > RGB2YCBCR_OFST_MASK)
- return -EINVAL;
-
- return 0;
-}
-
-static int
-ipipe_set_rgb2yuv_params(struct vpfe_ipipe_device *ipipe, void *param)
-{
- struct vpfe_ipipe_rgb2yuv *rgb2yuv = &ipipe->config.rgb2yuv;
- struct device *dev = ipipe->subdev.v4l2_dev->dev;
- struct vpfe_ipipe_rgb2yuv *rgb2yuv_param;
-
- rgb2yuv_param = (struct vpfe_ipipe_rgb2yuv *)param;
- if (!rgb2yuv_param) {
- /* Defaults for rgb2yuv conversion */
- const struct vpfe_ipipe_rgb2yuv rgb2yuv_defaults = {
- .coef_ry = {0, 0x4d},
- .coef_gy = {0, 0x96},
- .coef_by = {0, 0x1d},
- .coef_rcb = {0xf, 0xd5},
- .coef_gcb = {0xf, 0xab},
- .coef_bcb = {0, 0x80},
- .coef_rcr = {0, 0x80},
- .coef_gcr = {0xf, 0x95},
- .coef_bcr = {0xf, 0xeb},
- .out_ofst_cb = 0x80,
- .out_ofst_cr = 0x80,
- };
- /* Copy defaults for rgb2yuv conversion */
- memcpy(rgb2yuv, &rgb2yuv_defaults,
- sizeof(struct vpfe_ipipe_rgb2yuv));
- goto success;
- }
-
- memcpy(rgb2yuv, rgb2yuv_param, sizeof(struct vpfe_ipipe_rgb2yuv));
- if (ipipe_validate_rgb2yuv_params(rgb2yuv) < 0) {
- dev_err(dev, "Invalid rgb2yuv params\n");
- return -EINVAL;
- }
-
-success:
- ipipe_set_rgb2ycbcr_regs(ipipe->base_addr, rgb2yuv);
-
- return 0;
-}
-
-static int
-ipipe_get_rgb2yuv_params(struct vpfe_ipipe_device *ipipe, void *param)
-{
- struct vpfe_ipipe_rgb2yuv *rgb2yuv = &ipipe->config.rgb2yuv;
- struct vpfe_ipipe_rgb2yuv *rgb2yuv_param;
-
- rgb2yuv_param = (struct vpfe_ipipe_rgb2yuv *)param;
- memcpy(rgb2yuv_param, rgb2yuv, sizeof(struct vpfe_ipipe_rgb2yuv));
- return 0;
-}
-
-static int ipipe_validate_gbce_params(struct vpfe_ipipe_gbce *gbce)
-{
- u32 max = GBCE_Y_VAL_MASK;
- int i;
-
- if (!gbce->en)
- return 0;
-
- if (gbce->type == VPFE_IPIPE_GBCE_GAIN_TBL)
- max = GBCE_GAIN_VAL_MASK;
-
- for (i = 0; i < VPFE_IPIPE_MAX_SIZE_GBCE_LUT; i++)
- if (gbce->table[i] > max)
- return -EINVAL;
-
- return 0;
-}
-
-static int ipipe_set_gbce_params(struct vpfe_ipipe_device *ipipe, void *param)
-{
- struct vpfe_ipipe_gbce *gbce_param = (struct vpfe_ipipe_gbce *)param;
- struct vpfe_ipipe_gbce *gbce = &ipipe->config.gbce;
- struct device *dev = ipipe->subdev.v4l2_dev->dev;
-
- if (!gbce_param) {
- memset(gbce, 0, sizeof(struct vpfe_ipipe_gbce));
- } else {
- memcpy(gbce, gbce_param, sizeof(struct vpfe_ipipe_gbce));
- if (ipipe_validate_gbce_params(gbce) < 0) {
- dev_err(dev, "Invalid gbce params\n");
- return -EINVAL;
- }
- }
-
- ipipe_set_gbce_regs(ipipe->base_addr, ipipe->isp5_base_addr, gbce);
-
- return 0;
-}
-
-static int ipipe_get_gbce_params(struct vpfe_ipipe_device *ipipe, void *param)
-{
- struct vpfe_ipipe_gbce *gbce_param = (struct vpfe_ipipe_gbce *)param;
- struct vpfe_ipipe_gbce *gbce = &ipipe->config.gbce;
- struct device *dev = ipipe->subdev.v4l2_dev->dev;
-
- gbce_param->en = gbce->en;
- gbce_param->type = gbce->type;
- if (!gbce_param->table) {
- dev_err(dev, "ipipe_get_gbce_params: Invalid table ptr\n");
- return -EINVAL;
- }
-
- memcpy(gbce_param->table, gbce->table,
- (VPFE_IPIPE_MAX_SIZE_GBCE_LUT * sizeof(unsigned short)));
-
- return 0;
-}
-
-static int
-ipipe_validate_yuv422_conv_params(struct vpfe_ipipe_yuv422_conv *yuv422_conv)
-{
- if (yuv422_conv->en_chrom_lpf > 1)
- return -EINVAL;
-
- return 0;
-}
-
-static int
-ipipe_set_yuv422_conv_params(struct vpfe_ipipe_device *ipipe, void *param)
-{
- struct vpfe_ipipe_yuv422_conv *yuv422_conv = &ipipe->config.yuv422_conv;
- struct vpfe_ipipe_yuv422_conv *yuv422_conv_param;
- struct device *dev = ipipe->subdev.v4l2_dev->dev;
-
- yuv422_conv_param = (struct vpfe_ipipe_yuv422_conv *)param;
- if (!yuv422_conv_param) {
- memset(yuv422_conv, 0, sizeof(struct vpfe_ipipe_yuv422_conv));
- yuv422_conv->chrom_pos = VPFE_IPIPE_YUV422_CHR_POS_COSITE;
- } else {
- memcpy(yuv422_conv, yuv422_conv_param,
- sizeof(struct vpfe_ipipe_yuv422_conv));
- if (ipipe_validate_yuv422_conv_params(yuv422_conv) < 0) {
- dev_err(dev, "Invalid yuv422 params\n");
- return -EINVAL;
- }
- }
-
- ipipe_set_yuv422_conv_regs(ipipe->base_addr, yuv422_conv);
-
- return 0;
-}
-
-static int
-ipipe_get_yuv422_conv_params(struct vpfe_ipipe_device *ipipe, void *param)
-{
- struct vpfe_ipipe_yuv422_conv *yuv422_conv = &ipipe->config.yuv422_conv;
- struct vpfe_ipipe_yuv422_conv *yuv422_conv_param;
-
- yuv422_conv_param = (struct vpfe_ipipe_yuv422_conv *)param;
- memcpy(yuv422_conv_param, yuv422_conv,
- sizeof(struct vpfe_ipipe_yuv422_conv));
-
- return 0;
-}
-
-static int ipipe_validate_yee_params(struct vpfe_ipipe_yee *yee)
-{
- int i;
-
- if (yee->en > 1 ||
- yee->en_halo_red > 1 ||
- yee->hpf_shft > YEE_HPF_SHIFT_MASK)
- return -EINVAL;
-
- if (yee->hpf_coef_00 > YEE_COEF_MASK ||
- yee->hpf_coef_01 > YEE_COEF_MASK ||
- yee->hpf_coef_02 > YEE_COEF_MASK ||
- yee->hpf_coef_10 > YEE_COEF_MASK ||
- yee->hpf_coef_11 > YEE_COEF_MASK ||
- yee->hpf_coef_12 > YEE_COEF_MASK ||
- yee->hpf_coef_20 > YEE_COEF_MASK ||
- yee->hpf_coef_21 > YEE_COEF_MASK ||
- yee->hpf_coef_22 > YEE_COEF_MASK)
- return -EINVAL;
-
- if (yee->yee_thr > YEE_THR_MASK ||
- yee->es_gain > YEE_ES_GAIN_MASK ||
- yee->es_thr1 > YEE_ES_THR1_MASK ||
- yee->es_thr2 > YEE_THR_MASK ||
- yee->es_gain_grad > YEE_THR_MASK ||
- yee->es_ofst_grad > YEE_THR_MASK)
- return -EINVAL;
-
- for (i = 0; i < VPFE_IPIPE_MAX_SIZE_YEE_LUT; i++)
- if (yee->table[i] > YEE_ENTRY_MASK)
- return -EINVAL;
-
- return 0;
-}
-
-static int ipipe_set_yee_params(struct vpfe_ipipe_device *ipipe, void *param)
-{
- struct vpfe_ipipe_yee *yee_param = (struct vpfe_ipipe_yee *)param;
- struct device *dev = ipipe->subdev.v4l2_dev->dev;
- struct vpfe_ipipe_yee *yee = &ipipe->config.yee;
-
- if (!yee_param) {
- memset(yee, 0, sizeof(struct vpfe_ipipe_yee));
- } else {
- memcpy(yee, yee_param, sizeof(struct vpfe_ipipe_yee));
- if (ipipe_validate_yee_params(yee) < 0) {
- dev_err(dev, "Invalid yee params\n");
- return -EINVAL;
- }
- }
-
- ipipe_set_ee_regs(ipipe->base_addr, ipipe->isp5_base_addr, yee);
-
- return 0;
-}
-
-static int ipipe_get_yee_params(struct vpfe_ipipe_device *ipipe, void *param)
-{
- struct vpfe_ipipe_yee *yee_param = (struct vpfe_ipipe_yee *)param;
- struct vpfe_ipipe_yee *yee = &ipipe->config.yee;
-
- yee_param->en = yee->en;
- yee_param->en_halo_red = yee->en_halo_red;
- yee_param->merge_meth = yee->merge_meth;
- yee_param->hpf_shft = yee->hpf_shft;
- yee_param->hpf_coef_00 = yee->hpf_coef_00;
- yee_param->hpf_coef_01 = yee->hpf_coef_01;
- yee_param->hpf_coef_02 = yee->hpf_coef_02;
- yee_param->hpf_coef_10 = yee->hpf_coef_10;
- yee_param->hpf_coef_11 = yee->hpf_coef_11;
- yee_param->hpf_coef_12 = yee->hpf_coef_12;
- yee_param->hpf_coef_20 = yee->hpf_coef_20;
- yee_param->hpf_coef_21 = yee->hpf_coef_21;
- yee_param->hpf_coef_22 = yee->hpf_coef_22;
- yee_param->yee_thr = yee->yee_thr;
- yee_param->es_gain = yee->es_gain;
- yee_param->es_thr1 = yee->es_thr1;
- yee_param->es_thr2 = yee->es_thr2;
- yee_param->es_gain_grad = yee->es_gain_grad;
- yee_param->es_ofst_grad = yee->es_ofst_grad;
- memcpy(yee_param->table, &yee->table,
- (VPFE_IPIPE_MAX_SIZE_YEE_LUT * sizeof(short)));
-
- return 0;
-}
-
-static int ipipe_validate_car_params(struct vpfe_ipipe_car *car)
-{
- if (car->en > 1 || car->hpf_shft > CAR_HPF_SHIFT_MASK ||
- car->gain1.shft > CAR_GAIN1_SHFT_MASK ||
- car->gain1.gain_min > CAR_GAIN_MIN_MASK ||
- car->gain2.shft > CAR_GAIN2_SHFT_MASK ||
- car->gain2.gain_min > CAR_GAIN_MIN_MASK)
- return -EINVAL;
-
- return 0;
-}
-
-static int ipipe_set_car_params(struct vpfe_ipipe_device *ipipe, void *param)
-{
- struct vpfe_ipipe_car *car_param = (struct vpfe_ipipe_car *)param;
- struct device *dev = ipipe->subdev.v4l2_dev->dev;
- struct vpfe_ipipe_car *car = &ipipe->config.car;
-
- if (!car_param) {
- memset(car, 0, sizeof(struct vpfe_ipipe_car));
- } else {
- memcpy(car, car_param, sizeof(struct vpfe_ipipe_car));
- if (ipipe_validate_car_params(car) < 0) {
- dev_err(dev, "Invalid car params\n");
- return -EINVAL;
- }
- }
-
- ipipe_set_car_regs(ipipe->base_addr, car);
-
- return 0;
-}
-
-static int ipipe_get_car_params(struct vpfe_ipipe_device *ipipe, void *param)
-{
- struct vpfe_ipipe_car *car_param = (struct vpfe_ipipe_car *)param;
- struct vpfe_ipipe_car *car = &ipipe->config.car;
-
- memcpy(car_param, car, sizeof(struct vpfe_ipipe_car));
- return 0;
-}
-
-static int ipipe_validate_cgs_params(struct vpfe_ipipe_cgs *cgs)
-{
- if (cgs->en > 1 || cgs->h_shft > CAR_SHIFT_MASK)
- return -EINVAL;
-
- return 0;
-}
-
-static int ipipe_set_cgs_params(struct vpfe_ipipe_device *ipipe, void *param)
-{
- struct vpfe_ipipe_cgs *cgs_param = (struct vpfe_ipipe_cgs *)param;
- struct device *dev = ipipe->subdev.v4l2_dev->dev;
- struct vpfe_ipipe_cgs *cgs = &ipipe->config.cgs;
-
- if (!cgs_param) {
- memset(cgs, 0, sizeof(struct vpfe_ipipe_cgs));
- } else {
- memcpy(cgs, cgs_param, sizeof(struct vpfe_ipipe_cgs));
- if (ipipe_validate_cgs_params(cgs) < 0) {
- dev_err(dev, "Invalid cgs params\n");
- return -EINVAL;
- }
- }
-
- ipipe_set_cgs_regs(ipipe->base_addr, cgs);
-
- return 0;
-}
-
-static int ipipe_get_cgs_params(struct vpfe_ipipe_device *ipipe, void *param)
-{
- struct vpfe_ipipe_cgs *cgs_param = (struct vpfe_ipipe_cgs *)param;
- struct vpfe_ipipe_cgs *cgs = &ipipe->config.cgs;
-
- memcpy(cgs_param, cgs, sizeof(struct vpfe_ipipe_cgs));
-
- return 0;
-}
-
-static const struct ipipe_module_if ipipe_modules[VPFE_IPIPE_MAX_MODULES] = {
- /* VPFE_IPIPE_INPUT_CONFIG */ {
- offsetof(struct ipipe_module_params, input_config),
- FIELD_SIZEOF(struct ipipe_module_params, input_config),
- offsetof(struct vpfe_ipipe_config, input_config),
- ipipe_set_input_config,
- ipipe_get_input_config,
- }, /* VPFE_IPIPE_LUTDPC */ {
- offsetof(struct ipipe_module_params, lutdpc),
- FIELD_SIZEOF(struct ipipe_module_params, lutdpc),
- offsetof(struct vpfe_ipipe_config, lutdpc),
- ipipe_set_lutdpc_params,
- ipipe_get_lutdpc_params,
- }, /* VPFE_IPIPE_OTFDPC */ {
- offsetof(struct ipipe_module_params, otfdpc),
- FIELD_SIZEOF(struct ipipe_module_params, otfdpc),
- offsetof(struct vpfe_ipipe_config, otfdpc),
- ipipe_set_otfdpc_params,
- ipipe_get_otfdpc_params,
- }, /* VPFE_IPIPE_NF1 */ {
- offsetof(struct ipipe_module_params, nf1),
- FIELD_SIZEOF(struct ipipe_module_params, nf1),
- offsetof(struct vpfe_ipipe_config, nf1),
- ipipe_set_nf1_params,
- ipipe_get_nf1_params,
- }, /* VPFE_IPIPE_NF2 */ {
- offsetof(struct ipipe_module_params, nf2),
- FIELD_SIZEOF(struct ipipe_module_params, nf2),
- offsetof(struct vpfe_ipipe_config, nf2),
- ipipe_set_nf2_params,
- ipipe_get_nf2_params,
- }, /* VPFE_IPIPE_WB */ {
- offsetof(struct ipipe_module_params, wbal),
- FIELD_SIZEOF(struct ipipe_module_params, wbal),
- offsetof(struct vpfe_ipipe_config, wbal),
- ipipe_set_wb_params,
- ipipe_get_wb_params,
- }, /* VPFE_IPIPE_RGB2RGB_1 */ {
- offsetof(struct ipipe_module_params, rgb2rgb1),
- FIELD_SIZEOF(struct ipipe_module_params, rgb2rgb1),
- offsetof(struct vpfe_ipipe_config, rgb2rgb1),
- ipipe_set_rgb2rgb_1_params,
- ipipe_get_rgb2rgb_1_params,
- }, /* VPFE_IPIPE_RGB2RGB_2 */ {
- offsetof(struct ipipe_module_params, rgb2rgb2),
- FIELD_SIZEOF(struct ipipe_module_params, rgb2rgb2),
- offsetof(struct vpfe_ipipe_config, rgb2rgb2),
- ipipe_set_rgb2rgb_2_params,
- ipipe_get_rgb2rgb_2_params,
- }, /* VPFE_IPIPE_GAMMA */ {
- offsetof(struct ipipe_module_params, gamma),
- FIELD_SIZEOF(struct ipipe_module_params, gamma),
- offsetof(struct vpfe_ipipe_config, gamma),
- ipipe_set_gamma_params,
- ipipe_get_gamma_params,
- }, /* VPFE_IPIPE_3D_LUT */ {
- offsetof(struct ipipe_module_params, lut),
- FIELD_SIZEOF(struct ipipe_module_params, lut),
- offsetof(struct vpfe_ipipe_config, lut),
- ipipe_set_3d_lut_params,
- ipipe_get_3d_lut_params,
- }, /* VPFE_IPIPE_RGB2YUV */ {
- offsetof(struct ipipe_module_params, rgb2yuv),
- FIELD_SIZEOF(struct ipipe_module_params, rgb2yuv),
- offsetof(struct vpfe_ipipe_config, rgb2yuv),
- ipipe_set_rgb2yuv_params,
- ipipe_get_rgb2yuv_params,
- }, /* VPFE_IPIPE_YUV422_CONV */ {
- offsetof(struct ipipe_module_params, yuv422_conv),
- FIELD_SIZEOF(struct ipipe_module_params, yuv422_conv),
- offsetof(struct vpfe_ipipe_config, yuv422_conv),
- ipipe_set_yuv422_conv_params,
- ipipe_get_yuv422_conv_params,
- }, /* VPFE_IPIPE_YEE */ {
- offsetof(struct ipipe_module_params, yee),
- FIELD_SIZEOF(struct ipipe_module_params, yee),
- offsetof(struct vpfe_ipipe_config, yee),
- ipipe_set_yee_params,
- ipipe_get_yee_params,
- }, /* VPFE_IPIPE_GIC */ {
- offsetof(struct ipipe_module_params, gic),
- FIELD_SIZEOF(struct ipipe_module_params, gic),
- offsetof(struct vpfe_ipipe_config, gic),
- ipipe_set_gic_params,
- ipipe_get_gic_params,
- }, /* VPFE_IPIPE_CFA */ {
- offsetof(struct ipipe_module_params, cfa),
- FIELD_SIZEOF(struct ipipe_module_params, cfa),
- offsetof(struct vpfe_ipipe_config, cfa),
- ipipe_set_cfa_params,
- ipipe_get_cfa_params,
- }, /* VPFE_IPIPE_CAR */ {
- offsetof(struct ipipe_module_params, car),
- FIELD_SIZEOF(struct ipipe_module_params, car),
- offsetof(struct vpfe_ipipe_config, car),
- ipipe_set_car_params,
- ipipe_get_car_params,
- }, /* VPFE_IPIPE_CGS */ {
- offsetof(struct ipipe_module_params, cgs),
- FIELD_SIZEOF(struct ipipe_module_params, cgs),
- offsetof(struct vpfe_ipipe_config, cgs),
- ipipe_set_cgs_params,
- ipipe_get_cgs_params,
- }, /* VPFE_IPIPE_GBCE */ {
- offsetof(struct ipipe_module_params, gbce),
- FIELD_SIZEOF(struct ipipe_module_params, gbce),
- offsetof(struct vpfe_ipipe_config, gbce),
- ipipe_set_gbce_params,
- ipipe_get_gbce_params,
- },
-};
-
-static int ipipe_s_config(struct v4l2_subdev *sd, struct vpfe_ipipe_config *cfg)
-{
- struct vpfe_ipipe_device *ipipe = v4l2_get_subdevdata(sd);
- unsigned int i;
- int rval = 0;
-
- for (i = 0; i < ARRAY_SIZE(ipipe_modules); i++) {
- unsigned int bit = 1 << i;
-
- if (cfg->flag & bit) {
- const struct ipipe_module_if *module_if =
- &ipipe_modules[i];
- struct ipipe_module_params *params;
- void __user *from = *(void * __user *)
- ((void *)cfg + module_if->config_offset);
- size_t size;
- void *to;
-
- params = kmalloc(sizeof(struct ipipe_module_params),
- GFP_KERNEL);
- to = (void *)params + module_if->param_offset;
- size = module_if->param_size;
-
- if (to && from && size) {
- if (copy_from_user(to, from, size)) {
- rval = -EFAULT;
- break;
- }
- rval = module_if->set(ipipe, to);
- if (rval)
- goto error;
- } else if (to && !from && size) {
- rval = module_if->set(ipipe, NULL);
- if (rval)
- goto error;
- }
- kfree(params);
- }
- }
-error:
- return rval;
-}
-
-static int ipipe_g_config(struct v4l2_subdev *sd, struct vpfe_ipipe_config *cfg)
-{
- struct vpfe_ipipe_device *ipipe = v4l2_get_subdevdata(sd);
- unsigned int i;
- int rval = 0;
-
- for (i = 1; i < ARRAY_SIZE(ipipe_modules); i++) {
- unsigned int bit = 1 << i;
-
- if (cfg->flag & bit) {
- const struct ipipe_module_if *module_if =
- &ipipe_modules[i];
- struct ipipe_module_params *params;
- void __user *to = *(void * __user *)
- ((void *)cfg + module_if->config_offset);
- size_t size;
- void *from;
-
- params = kmalloc(sizeof(struct ipipe_module_params),
- GFP_KERNEL);
- from = (void *)params + module_if->param_offset;
- size = module_if->param_size;
-
- if (to && from && size) {
- rval = module_if->get(ipipe, from);
- if (rval)
- goto error;
- if (copy_to_user(to, from, size)) {
- rval = -EFAULT;
- break;
- }
- }
- kfree(params);
- }
- }
-error:
- return rval;
-}
-
-/*
- * ipipe_ioctl() - Handle ipipe module private ioctl's
- * @sd: pointer to v4l2 subdev structure
- * @cmd: configuration command
- * @arg: configuration argument
- */
-static long ipipe_ioctl(struct v4l2_subdev *sd, unsigned int cmd, void *arg)
-{
- int ret = 0;
-
- switch (cmd) {
- case VIDIOC_VPFE_IPIPE_S_CONFIG:
- ret = ipipe_s_config(sd, arg);
- break;
-
- case VIDIOC_VPFE_IPIPE_G_CONFIG:
- ret = ipipe_g_config(sd, arg);
- break;
-
- default:
- ret = -ENOIOCTLCMD;
- }
- return ret;
-}
-
-void vpfe_ipipe_enable(struct vpfe_device *vpfe_dev, int en)
-{
- struct vpfe_ipipeif_device *ipipeif = &vpfe_dev->vpfe_ipipeif;
- struct vpfe_ipipe_device *ipipe = &vpfe_dev->vpfe_ipipe;
- unsigned char val;
-
- if (ipipe->input == IPIPE_INPUT_NONE)
- return;
-
- /* ipipe is set to single shot */
- if (ipipeif->input == IPIPEIF_INPUT_MEMORY && en) {
- /* for single-shot mode, need to wait for h/w to
- * reset many register bits
- */
- do {
- val = regr_ip(vpfe_dev->vpfe_ipipe.base_addr,
- IPIPE_SRC_EN);
- } while (val);
- }
- regw_ip(vpfe_dev->vpfe_ipipe.base_addr, en, IPIPE_SRC_EN);
-}
-
-/*
- * ipipe_set_stream() - Enable/Disable streaming on the ipipe subdevice
- * @sd: pointer to v4l2 subdev structure
- * @enable: 1 == Enable, 0 == Disable
- */
-static int ipipe_set_stream(struct v4l2_subdev *sd, int enable)
-{
- struct vpfe_ipipe_device *ipipe = v4l2_get_subdevdata(sd);
- struct vpfe_device *vpfe_dev = to_vpfe_device(ipipe);
-
- if (enable && ipipe->input != IPIPE_INPUT_NONE &&
- ipipe->output != IPIPE_OUTPUT_NONE) {
- if (config_ipipe_hw(ipipe) < 0)
- return -EINVAL;
- }
-
- vpfe_ipipe_enable(vpfe_dev, enable);
-
- return 0;
-}
-
-/*
- * __ipipe_get_format() - helper function for getting ipipe format
- * @ipipe: pointer to ipipe private structure.
- * @pad: pad number.
- * @cfg: V4L2 subdev pad config
- * @which: wanted subdev format.
- *
- */
-static struct v4l2_mbus_framefmt *
-__ipipe_get_format(struct vpfe_ipipe_device *ipipe,
- struct v4l2_subdev_pad_config *cfg, unsigned int pad,
- enum v4l2_subdev_format_whence which)
-{
- if (which == V4L2_SUBDEV_FORMAT_TRY)
- return v4l2_subdev_get_try_format(&ipipe->subdev, cfg, pad);
-
- return &ipipe->formats[pad];
-}
-
-/*
- * ipipe_try_format() - Handle try format by pad subdev method
- * @ipipe: VPFE ipipe device.
- * @cfg: V4L2 subdev pad config
- * @pad: pad num.
- * @fmt: pointer to v4l2 format structure.
- * @which : wanted subdev format
- */
-static void
-ipipe_try_format(struct vpfe_ipipe_device *ipipe,
- struct v4l2_subdev_pad_config *cfg, unsigned int pad,
- struct v4l2_mbus_framefmt *fmt,
- enum v4l2_subdev_format_whence which)
-{
- unsigned int max_out_height;
- unsigned int max_out_width;
- unsigned int i;
-
- max_out_width = IPIPE_MAX_OUTPUT_WIDTH_A;
- max_out_height = IPIPE_MAX_OUTPUT_HEIGHT_A;
-
- if (pad == IPIPE_PAD_SINK) {
- for (i = 0; i < ARRAY_SIZE(ipipe_input_fmts); i++)
- if (fmt->code == ipipe_input_fmts[i])
- break;
-
- /* If not found, use SBGGR10 as default */
- if (i >= ARRAY_SIZE(ipipe_input_fmts))
- fmt->code = MEDIA_BUS_FMT_SGRBG12_1X12;
- } else if (pad == IPIPE_PAD_SOURCE) {
- for (i = 0; i < ARRAY_SIZE(ipipe_output_fmts); i++)
- if (fmt->code == ipipe_output_fmts[i])
- break;
-
- /* If not found, use UYVY as default */
- if (i >= ARRAY_SIZE(ipipe_output_fmts))
- fmt->code = MEDIA_BUS_FMT_UYVY8_2X8;
- }
-
- fmt->width = clamp_t(u32, fmt->width, MIN_OUT_HEIGHT, max_out_width);
- fmt->height = clamp_t(u32, fmt->height, MIN_OUT_WIDTH, max_out_height);
-}
-
-/*
- * ipipe_set_format() - Handle set format by pads subdev method
- * @sd: pointer to v4l2 subdev structure
- * @cfg: V4L2 subdev pad config
- * @fmt: pointer to v4l2 subdev format structure
- * return -EINVAL or zero on success
- */
-static int
-ipipe_set_format(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config *cfg,
- struct v4l2_subdev_format *fmt)
-{
- struct vpfe_ipipe_device *ipipe = v4l2_get_subdevdata(sd);
- struct v4l2_mbus_framefmt *format;
-
- format = __ipipe_get_format(ipipe, cfg, fmt->pad, fmt->which);
- if (format == NULL)
- return -EINVAL;
-
- ipipe_try_format(ipipe, cfg, fmt->pad, &fmt->format, fmt->which);
- *format = fmt->format;
-
- if (fmt->which == V4L2_SUBDEV_FORMAT_TRY)
- return 0;
-
- if (fmt->pad == IPIPE_PAD_SINK &&
- (ipipe->input == IPIPE_INPUT_CCDC ||
- ipipe->input == IPIPE_INPUT_MEMORY))
- ipipe->formats[fmt->pad] = fmt->format;
- else if (fmt->pad == IPIPE_PAD_SOURCE &&
- ipipe->output == IPIPE_OUTPUT_RESIZER)
- ipipe->formats[fmt->pad] = fmt->format;
- else
- return -EINVAL;
-
- return 0;
-}
-
-/*
- * ipipe_get_format() - Handle get format by pads subdev method.
- * @sd: pointer to v4l2 subdev structure.
- * @cfg: V4L2 subdev pad config
- * @fmt: pointer to v4l2 subdev format structure.
- */
-static int
-ipipe_get_format(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config *cfg,
- struct v4l2_subdev_format *fmt)
-{
- struct vpfe_ipipe_device *ipipe = v4l2_get_subdevdata(sd);
-
- if (fmt->which == V4L2_SUBDEV_FORMAT_ACTIVE)
- fmt->format = ipipe->formats[fmt->pad];
- else
- fmt->format = *(v4l2_subdev_get_try_format(sd, cfg, fmt->pad));
-
- return 0;
-}
-
-/*
- * ipipe_enum_frame_size() - enum frame sizes on pads
- * @sd: pointer to v4l2 subdev structure.
- * @cfg: V4L2 subdev pad config
- * @fse: pointer to v4l2_subdev_frame_size_enum structure.
- */
-static int
-ipipe_enum_frame_size(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config *cfg,
- struct v4l2_subdev_frame_size_enum *fse)
-{
- struct vpfe_ipipe_device *ipipe = v4l2_get_subdevdata(sd);
- struct v4l2_mbus_framefmt format;
-
- if (fse->index != 0)
- return -EINVAL;
-
- format.code = fse->code;
- format.width = 1;
- format.height = 1;
- ipipe_try_format(ipipe, cfg, fse->pad, &format, fse->which);
- fse->min_width = format.width;
- fse->min_height = format.height;
-
- if (format.code != fse->code)
- return -EINVAL;
-
- format.code = fse->code;
- format.width = -1;
- format.height = -1;
- ipipe_try_format(ipipe, cfg, fse->pad, &format, fse->which);
- fse->max_width = format.width;
- fse->max_height = format.height;
-
- return 0;
-}
-
-/*
- * ipipe_enum_mbus_code() - enum mbus codes for pads
- * @sd: pointer to v4l2 subdev structure.
- * @cfg: V4L2 subdev pad config
- * @code: pointer to v4l2_subdev_mbus_code_enum structure
- */
-static int
-ipipe_enum_mbus_code(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config *cfg,
- struct v4l2_subdev_mbus_code_enum *code)
-{
- switch (code->pad) {
- case IPIPE_PAD_SINK:
- if (code->index >= ARRAY_SIZE(ipipe_input_fmts))
- return -EINVAL;
- code->code = ipipe_input_fmts[code->index];
- break;
-
- case IPIPE_PAD_SOURCE:
- if (code->index >= ARRAY_SIZE(ipipe_output_fmts))
- return -EINVAL;
- code->code = ipipe_output_fmts[code->index];
- break;
-
- default:
- return -EINVAL;
- }
-
- return 0;
-}
-
-/*
- * ipipe_s_ctrl() - Handle set control subdev method
- * @ctrl: pointer to v4l2 control structure
- */
-static int ipipe_s_ctrl(struct v4l2_ctrl *ctrl)
-{
- struct vpfe_ipipe_device *ipipe =
- container_of(ctrl->handler, struct vpfe_ipipe_device, ctrls);
- struct ipipe_lum_adj *lum_adj = &ipipe->config.lum_adj;
-
- switch (ctrl->id) {
- case V4L2_CID_BRIGHTNESS:
- lum_adj->brightness = ctrl->val;
- ipipe_set_lum_adj_regs(ipipe->base_addr, lum_adj);
- break;
-
- case V4L2_CID_CONTRAST:
- lum_adj->contrast = ctrl->val;
- ipipe_set_lum_adj_regs(ipipe->base_addr, lum_adj);
- break;
-
- default:
- return -EINVAL;
- }
-
- return 0;
-}
-
-/*
- * ipipe_init_formats() - Initialize formats on all pads
- * @sd: pointer to v4l2 subdev structure.
- * @fh: V4L2 subdev file handle
- *
- * Initialize all pad formats with default values. Try formats are initialized
- * on the file handle.
- */
-static int
-ipipe_init_formats(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
-{
- struct v4l2_subdev_format format;
-
- memset(&format, 0, sizeof(format));
- format.pad = IPIPE_PAD_SINK;
- format.which = V4L2_SUBDEV_FORMAT_TRY;
- format.format.code = MEDIA_BUS_FMT_SGRBG12_1X12;
- format.format.width = IPIPE_MAX_OUTPUT_WIDTH_A;
- format.format.height = IPIPE_MAX_OUTPUT_HEIGHT_A;
- ipipe_set_format(sd, fh->pad, &format);
-
- memset(&format, 0, sizeof(format));
- format.pad = IPIPE_PAD_SOURCE;
- format.which = V4L2_SUBDEV_FORMAT_TRY;
- format.format.code = MEDIA_BUS_FMT_UYVY8_2X8;
- format.format.width = IPIPE_MAX_OUTPUT_WIDTH_A;
- format.format.height = IPIPE_MAX_OUTPUT_HEIGHT_A;
- ipipe_set_format(sd, fh->pad, &format);
-
- return 0;
-}
-
-/* subdev core operations */
-static const struct v4l2_subdev_core_ops ipipe_v4l2_core_ops = {
- .ioctl = ipipe_ioctl,
-};
-
-static const struct v4l2_ctrl_ops ipipe_ctrl_ops = {
- .s_ctrl = ipipe_s_ctrl,
-};
-
-/* subdev file operations */
-static const struct v4l2_subdev_internal_ops ipipe_v4l2_internal_ops = {
- .open = ipipe_init_formats,
-};
-
-/* subdev video operations */
-static const struct v4l2_subdev_video_ops ipipe_v4l2_video_ops = {
- .s_stream = ipipe_set_stream,
-};
-
-/* subdev pad operations */
-static const struct v4l2_subdev_pad_ops ipipe_v4l2_pad_ops = {
- .enum_mbus_code = ipipe_enum_mbus_code,
- .enum_frame_size = ipipe_enum_frame_size,
- .get_fmt = ipipe_get_format,
- .set_fmt = ipipe_set_format,
-};
-
-/* v4l2 subdev operation */
-static const struct v4l2_subdev_ops ipipe_v4l2_ops = {
- .core = &ipipe_v4l2_core_ops,
- .video = &ipipe_v4l2_video_ops,
- .pad = &ipipe_v4l2_pad_ops,
-};
-
-/*
- * Media entity operations
- */
-
-/*
- * ipipe_link_setup() - Setup ipipe connections
- * @entity: ipipe media entity
- * @local: Pad at the local end of the link
- * @remote: Pad at the remote end of the link
- * @flags: Link flags
- *
- * return -EINVAL or zero on success
- */
-static int
-ipipe_link_setup(struct media_entity *entity, const struct media_pad *local,
- const struct media_pad *remote, u32 flags)
-{
- struct v4l2_subdev *sd = media_entity_to_v4l2_subdev(entity);
- struct vpfe_ipipe_device *ipipe = v4l2_get_subdevdata(sd);
- struct vpfe_device *vpfe_dev = to_vpfe_device(ipipe);
- u16 ipipeif_sink = vpfe_dev->vpfe_ipipeif.input;
-
- switch (local->index | media_entity_type(remote->entity)) {
- case IPIPE_PAD_SINK | MEDIA_ENT_T_V4L2_SUBDEV:
- if (!(flags & MEDIA_LNK_FL_ENABLED)) {
- ipipe->input = IPIPE_INPUT_NONE;
- break;
- }
- if (ipipe->input != IPIPE_INPUT_NONE)
- return -EBUSY;
- if (ipipeif_sink == IPIPEIF_INPUT_MEMORY)
- ipipe->input = IPIPE_INPUT_MEMORY;
- else
- ipipe->input = IPIPE_INPUT_CCDC;
- break;
-
- case IPIPE_PAD_SOURCE | MEDIA_ENT_T_V4L2_SUBDEV:
- /* out to RESIZER */
- if (flags & MEDIA_LNK_FL_ENABLED)
- ipipe->output = IPIPE_OUTPUT_RESIZER;
- else
- ipipe->output = IPIPE_OUTPUT_NONE;
- break;
-
- default:
- return -EINVAL;
- }
-
- return 0;
-}
-
-static const struct media_entity_operations ipipe_media_ops = {
- .link_setup = ipipe_link_setup,
-};
-
-/*
- * vpfe_ipipe_unregister_entities() - ipipe unregister entity
- * @vpfe_ipipe: pointer to ipipe subdevice structure.
- */
-void vpfe_ipipe_unregister_entities(struct vpfe_ipipe_device *vpfe_ipipe)
-{
- /* unregister subdev */
- v4l2_device_unregister_subdev(&vpfe_ipipe->subdev);
- /* cleanup entity */
- media_entity_cleanup(&vpfe_ipipe->subdev.entity);
-}
-
-/*
- * vpfe_ipipe_register_entities() - ipipe register entity
- * @ipipe: pointer to ipipe subdevice structure.
- * @vdev: pointer to v4l2 device structure.
- */
-int
-vpfe_ipipe_register_entities(struct vpfe_ipipe_device *ipipe,
- struct v4l2_device *vdev)
-{
- int ret;
-
- /* Register the subdev */
- ret = v4l2_device_register_subdev(vdev, &ipipe->subdev);
- if (ret) {
- pr_err("Failed to register ipipe as v4l2 subdevice\n");
- return ret;
- }
-
- return ret;
-}
-
-#define IPIPE_CONTRAST_HIGH 0xff
-#define IPIPE_BRIGHT_HIGH 0xff
-
-/*
- * vpfe_ipipe_init() - ipipe module initialization.
- * @ipipe: pointer to ipipe subdevice structure.
- * @pdev: platform device pointer.
- */
-int
-vpfe_ipipe_init(struct vpfe_ipipe_device *ipipe, struct platform_device *pdev)
-{
- struct media_pad *pads = &ipipe->pads[0];
- struct v4l2_subdev *sd = &ipipe->subdev;
- struct media_entity *me = &sd->entity;
- static resource_size_t res_len;
- struct resource *res;
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 4);
- if (!res)
- return -ENOENT;
-
- res_len = resource_size(res);
- res = request_mem_region(res->start, res_len, res->name);
- if (!res)
- return -EBUSY;
- ipipe->base_addr = ioremap_nocache(res->start, res_len);
- if (!ipipe->base_addr)
- return -EBUSY;
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 6);
- if (!res)
- return -ENOENT;
- ipipe->isp5_base_addr = ioremap_nocache(res->start, res_len);
- if (!ipipe->isp5_base_addr)
- return -EBUSY;
-
- v4l2_subdev_init(sd, &ipipe_v4l2_ops);
- sd->internal_ops = &ipipe_v4l2_internal_ops;
- strlcpy(sd->name, "DAVINCI IPIPE", sizeof(sd->name));
- sd->grp_id = 1 << 16; /* group ID for davinci subdevs */
- v4l2_set_subdevdata(sd, ipipe);
- sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
-
- pads[IPIPE_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
- pads[IPIPE_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE;
-
- ipipe->input = IPIPE_INPUT_NONE;
- ipipe->output = IPIPE_OUTPUT_NONE;
-
- me->ops = &ipipe_media_ops;
- v4l2_ctrl_handler_init(&ipipe->ctrls, 2);
- v4l2_ctrl_new_std(&ipipe->ctrls, &ipipe_ctrl_ops,
- V4L2_CID_BRIGHTNESS, 0,
- IPIPE_BRIGHT_HIGH, 1, 16);
- v4l2_ctrl_new_std(&ipipe->ctrls, &ipipe_ctrl_ops,
- V4L2_CID_CONTRAST, 0,
- IPIPE_CONTRAST_HIGH, 1, 16);
-
-
- v4l2_ctrl_handler_setup(&ipipe->ctrls);
- sd->ctrl_handler = &ipipe->ctrls;
-
- return media_entity_init(me, IPIPE_PADS_NUM, pads, 0);
-}
-
-/*
- * vpfe_ipipe_cleanup() - ipipe subdevice cleanup.
- * @ipipe: pointer to ipipe subdevice
- * @dev: pointer to platform device
- */
-void vpfe_ipipe_cleanup(struct vpfe_ipipe_device *ipipe,
- struct platform_device *pdev)
-{
- struct resource *res;
-
- v4l2_ctrl_handler_free(&ipipe->ctrls);
-
- iounmap(ipipe->base_addr);
- iounmap(ipipe->isp5_base_addr);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 4);
- if (res)
- release_mem_region(res->start, resource_size(res));
-}
diff --git a/drivers/staging/media/davinci_vpfe/dm365_ipipe.h b/drivers/staging/media/davinci_vpfe/dm365_ipipe.h
deleted file mode 100644
index d81b29e19309..000000000000
--- a/drivers/staging/media/davinci_vpfe/dm365_ipipe.h
+++ /dev/null
@@ -1,179 +0,0 @@
-/*
- * Copyright (C) 2012 Texas Instruments Inc
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- * Contributors:
- * Manjunath Hadli <manjunath.hadli@ti.com>
- * Prabhakar Lad <prabhakar.lad@ti.com>
- */
-
-#ifndef _DAVINCI_VPFE_DM365_IPIPE_H
-#define _DAVINCI_VPFE_DM365_IPIPE_H
-
-#include <linux/platform_device.h>
-
-#include <media/v4l2-ctrls.h>
-#include <media/v4l2-subdev.h>
-
-#include "davinci_vpfe_user.h"
-#include "vpfe_video.h"
-
-#define CEIL(a, b) (((a) + (b-1)) / (b))
-
-enum ipipe_noise_filter {
- IPIPE_D2F_1ST = 0,
- IPIPE_D2F_2ND = 1,
-};
-
-/* Used for driver storage */
-struct ipipe_otfdpc_2_0 {
- /* 0 - disable, 1 - enable */
- unsigned char en;
- /* defect detection method */
- enum vpfe_ipipe_otfdpc_det_meth det_method;
- /* Algorithm used. Applicable only when IPIPE_DPC_OTF_MIN_MAX2 is
- * used
- */
- enum vpfe_ipipe_otfdpc_alg alg;
- struct vpfe_ipipe_otfdpc_2_0_cfg otfdpc_2_0;
-};
-
-struct ipipe_otfdpc_3_0 {
- /* 0 - disable, 1 - enable */
- unsigned char en;
- /* defect detection method */
- enum vpfe_ipipe_otfdpc_det_meth det_method;
- /* Algorithm used. Applicable only when IPIPE_DPC_OTF_MIN_MAX2 is
- * used
- */
- enum vpfe_ipipe_otfdpc_alg alg;
- struct vpfe_ipipe_otfdpc_3_0_cfg otfdpc_3_0;
-};
-
-/* Structure for configuring Luminance Adjustment module */
-struct ipipe_lum_adj {
- /* Brightness adjustments */
- unsigned char brightness;
- /* contrast adjustments */
- unsigned char contrast;
-};
-
-enum ipipe_rgb2rgb {
- IPIPE_RGB2RGB_1 = 0,
- IPIPE_RGB2RGB_2 = 1,
-};
-
-struct ipipe_module_params {
- __u32 flag;
- struct vpfe_ipipe_input_config input_config;
- struct vpfe_ipipe_lutdpc lutdpc;
- struct vpfe_ipipe_otfdpc otfdpc;
- struct vpfe_ipipe_nf nf1;
- struct vpfe_ipipe_nf nf2;
- struct vpfe_ipipe_gic gic;
- struct vpfe_ipipe_wb wbal;
- struct vpfe_ipipe_cfa cfa;
- struct vpfe_ipipe_rgb2rgb rgb2rgb1;
- struct vpfe_ipipe_rgb2rgb rgb2rgb2;
- struct vpfe_ipipe_gamma gamma;
- struct vpfe_ipipe_3d_lut lut;
- struct vpfe_ipipe_rgb2yuv rgb2yuv;
- struct vpfe_ipipe_gbce gbce;
- struct vpfe_ipipe_yuv422_conv yuv422_conv;
- struct vpfe_ipipe_yee yee;
- struct vpfe_ipipe_car car;
- struct vpfe_ipipe_cgs cgs;
- struct ipipe_lum_adj lum_adj;
-};
-
-#define IPIPE_PAD_SINK 0
-#define IPIPE_PAD_SOURCE 1
-
-#define IPIPE_PADS_NUM 2
-
-#define IPIPE_OUTPUT_NONE 0
-#define IPIPE_OUTPUT_RESIZER (1 << 0)
-
-enum ipipe_input_entity {
- IPIPE_INPUT_NONE = 0,
- IPIPE_INPUT_MEMORY = 1,
- IPIPE_INPUT_CCDC = 2,
-};
-
-
-struct vpfe_ipipe_device {
- struct v4l2_subdev subdev;
- struct media_pad pads[IPIPE_PADS_NUM];
- struct v4l2_mbus_framefmt formats[IPIPE_PADS_NUM];
- enum ipipe_input_entity input;
- unsigned int output;
- struct v4l2_ctrl_handler ctrls;
- void __iomem *base_addr;
- void __iomem *isp5_base_addr;
- struct ipipe_module_params config;
-};
-
-struct ipipe_module_if {
- unsigned int param_offset;
- unsigned int param_size;
- unsigned int config_offset;
- int (*set)(struct vpfe_ipipe_device *ipipe, void *param);
- int (*get)(struct vpfe_ipipe_device *ipipe, void *param);
-};
-
-/* data paths */
-enum ipipe_data_paths {
- IPIPE_RAW2YUV,
- /* Bayer RAW input to YCbCr output */
- IPIPE_RAW2RAW,
- /* Bayer Raw to Bayer output */
- IPIPE_RAW2BOX,
- /* Bayer Raw to Boxcar output */
- IPIPE_YUV2YUV
- /* YUV Raw to YUV Raw output */
-};
-
-#define IPIPE_COLPTN_R_Ye 0x0
-#define IPIPE_COLPTN_Gr_Cy 0x1
-#define IPIPE_COLPTN_Gb_G 0x2
-#define IPIPE_COLPTN_B_Mg 0x3
-
-#define COLPAT_EE_SHIFT 0
-#define COLPAT_EO_SHIFT 2
-#define COLPAT_OE_SHIFT 4
-#define COLPAT_OO_SHIFT 6
-
-#define ipipe_sgrbg_pattern \
- (IPIPE_COLPTN_Gr_Cy << COLPAT_EE_SHIFT | \
- IPIPE_COLPTN_R_Ye << COLPAT_EO_SHIFT | \
- IPIPE_COLPTN_B_Mg << COLPAT_OE_SHIFT | \
- IPIPE_COLPTN_Gb_G << COLPAT_OO_SHIFT)
-
-#define ipipe_srggb_pattern \
- (IPIPE_COLPTN_R_Ye << COLPAT_EE_SHIFT | \
- IPIPE_COLPTN_Gr_Cy << COLPAT_EO_SHIFT | \
- IPIPE_COLPTN_Gb_G << COLPAT_OE_SHIFT | \
- IPIPE_COLPTN_B_Mg << COLPAT_OO_SHIFT)
-
-int vpfe_ipipe_register_entities(struct vpfe_ipipe_device *ipipe,
- struct v4l2_device *v4l2_dev);
-int vpfe_ipipe_init(struct vpfe_ipipe_device *ipipe,
- struct platform_device *pdev);
-void vpfe_ipipe_unregister_entities(struct vpfe_ipipe_device *ipipe);
-void vpfe_ipipe_cleanup(struct vpfe_ipipe_device *ipipe,
- struct platform_device *pdev);
-void vpfe_ipipe_enable(struct vpfe_device *vpfe_dev, int en);
-
-#endif /* _DAVINCI_VPFE_DM365_IPIPE_H */
diff --git a/drivers/staging/media/davinci_vpfe/dm365_ipipe_hw.c b/drivers/staging/media/davinci_vpfe/dm365_ipipe_hw.c
deleted file mode 100644
index 2a3a56b88de1..000000000000
--- a/drivers/staging/media/davinci_vpfe/dm365_ipipe_hw.c
+++ /dev/null
@@ -1,1048 +0,0 @@
-/*
- * Copyright (C) 2012 Texas Instruments Inc
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- * Contributors:
- * Manjunath Hadli <manjunath.hadli@ti.com>
- * Prabhakar Lad <prabhakar.lad@ti.com>
- */
-
-#include "dm365_ipipe_hw.h"
-
-#define IPIPE_MODE_CONTINUOUS 0
-#define IPIPE_MODE_SINGLE_SHOT 1
-
-static void ipipe_clock_enable(void __iomem *base_addr)
-{
- /* enable IPIPE MMR for register write access */
- regw_ip(base_addr, IPIPE_GCK_MMR_DEFAULT, IPIPE_GCK_MMR);
-
- /* enable the clock wb,cfa,dfc,d2f,pre modules */
- regw_ip(base_addr, IPIPE_GCK_PIX_DEFAULT, IPIPE_GCK_PIX);
-}
-
-static void
-rsz_set_common_params(void __iomem *rsz_base, struct resizer_params *params)
-{
- struct rsz_common_params *rsz_common = &params->rsz_common;
- u32 val;
-
- /* Set mode */
- regw_rsz(rsz_base, params->oper_mode, RSZ_SRC_MODE);
-
- /* data source selection and bypass */
- val = (rsz_common->passthrough << RSZ_BYPASS_SHIFT) |
- rsz_common->source;
- regw_rsz(rsz_base, val, RSZ_SRC_FMT0);
-
- /* src image selection */
- val = (rsz_common->raw_flip & 1) |
- (rsz_common->src_img_fmt << RSZ_SRC_IMG_FMT_SHIFT) |
- ((rsz_common->y_c & 1) << RSZ_SRC_Y_C_SEL_SHIFT);
- regw_rsz(rsz_base, val, RSZ_SRC_FMT1);
-
- regw_rsz(rsz_base, rsz_common->vps & IPIPE_RSZ_VPS_MASK, RSZ_SRC_VPS);
- regw_rsz(rsz_base, rsz_common->hps & IPIPE_RSZ_HPS_MASK, RSZ_SRC_HPS);
- regw_rsz(rsz_base, rsz_common->vsz & IPIPE_RSZ_VSZ_MASK, RSZ_SRC_VSZ);
- regw_rsz(rsz_base, rsz_common->hsz & IPIPE_RSZ_HSZ_MASK, RSZ_SRC_HSZ);
- regw_rsz(rsz_base, rsz_common->yuv_y_min, RSZ_YUV_Y_MIN);
- regw_rsz(rsz_base, rsz_common->yuv_y_max, RSZ_YUV_Y_MAX);
- regw_rsz(rsz_base, rsz_common->yuv_c_min, RSZ_YUV_C_MIN);
- regw_rsz(rsz_base, rsz_common->yuv_c_max, RSZ_YUV_C_MAX);
- /* chromatic position */
- regw_rsz(rsz_base, rsz_common->out_chr_pos, RSZ_YUV_PHS);
-}
-
-static void
-rsz_set_rsz_regs(void __iomem *rsz_base, unsigned int rsz_id,
- struct resizer_params *params)
-{
- struct resizer_scale_param *rsc_params;
- struct rsz_ext_mem_param *ext_mem;
- struct resizer_rgb *rgb;
- u32 reg_base;
- u32 val;
-
- rsc_params = &params->rsz_rsc_param[rsz_id];
- rgb = &params->rsz2rgb[rsz_id];
- ext_mem = &params->ext_mem_param[rsz_id];
-
- if (rsz_id == RSZ_A) {
- val = rsc_params->h_flip << RSZA_H_FLIP_SHIFT;
- val |= rsc_params->v_flip << RSZA_V_FLIP_SHIFT;
- reg_base = RSZ_EN_A;
- } else {
- val = rsc_params->h_flip << RSZB_H_FLIP_SHIFT;
- val |= rsc_params->v_flip << RSZB_V_FLIP_SHIFT;
- reg_base = RSZ_EN_B;
- }
- /* update flip settings */
- regw_rsz(rsz_base, val, RSZ_SEQ);
-
- regw_rsz(rsz_base, params->oper_mode, reg_base + RSZ_MODE);
-
- val = (rsc_params->cen << RSZ_CEN_SHIFT) | rsc_params->yen;
- regw_rsz(rsz_base, val, reg_base + RSZ_420);
-
- regw_rsz(rsz_base, rsc_params->i_vps & RSZ_VPS_MASK,
- reg_base + RSZ_I_VPS);
- regw_rsz(rsz_base, rsc_params->i_hps & RSZ_HPS_MASK,
- reg_base + RSZ_I_HPS);
- regw_rsz(rsz_base, rsc_params->o_vsz & RSZ_O_VSZ_MASK,
- reg_base + RSZ_O_VSZ);
- regw_rsz(rsz_base, rsc_params->o_hsz & RSZ_O_HSZ_MASK,
- reg_base + RSZ_O_HSZ);
- regw_rsz(rsz_base, rsc_params->v_phs_y & RSZ_V_PHS_MASK,
- reg_base + RSZ_V_PHS_Y);
- regw_rsz(rsz_base, rsc_params->v_phs_c & RSZ_V_PHS_MASK,
- reg_base + RSZ_V_PHS_C);
-
- /* keep this additional adjustment to zero for now */
- regw_rsz(rsz_base, rsc_params->v_dif & RSZ_V_DIF_MASK,
- reg_base + RSZ_V_DIF);
-
- val = (rsc_params->v_typ_y & 1) |
- ((rsc_params->v_typ_c & 1) << RSZ_TYP_C_SHIFT);
- regw_rsz(rsz_base, val, reg_base + RSZ_V_TYP);
-
- val = (rsc_params->v_lpf_int_y & RSZ_LPF_INT_MASK) |
- ((rsc_params->v_lpf_int_c & RSZ_LPF_INT_MASK) <<
- RSZ_LPF_INT_C_SHIFT);
- regw_rsz(rsz_base, val, reg_base + RSZ_V_LPF);
-
- regw_rsz(rsz_base, rsc_params->h_phs &
- RSZ_H_PHS_MASK, reg_base + RSZ_H_PHS);
-
- regw_rsz(rsz_base, 0, reg_base + RSZ_H_PHS_ADJ);
- regw_rsz(rsz_base, rsc_params->h_dif &
- RSZ_H_DIF_MASK, reg_base + RSZ_H_DIF);
-
- val = (rsc_params->h_typ_y & 1) |
- ((rsc_params->h_typ_c & 1) << RSZ_TYP_C_SHIFT);
- regw_rsz(rsz_base, val, reg_base + RSZ_H_TYP);
-
- val = (rsc_params->h_lpf_int_y & RSZ_LPF_INT_MASK) |
- ((rsc_params->h_lpf_int_c & RSZ_LPF_INT_MASK) <<
- RSZ_LPF_INT_C_SHIFT);
- regw_rsz(rsz_base, val, reg_base + RSZ_H_LPF);
-
- regw_rsz(rsz_base, rsc_params->dscale_en & 1, reg_base + RSZ_DWN_EN);
-
- val = (rsc_params->h_dscale_ave_sz & RSZ_DWN_SCALE_AV_SZ_MASK) |
- ((rsc_params->v_dscale_ave_sz & RSZ_DWN_SCALE_AV_SZ_MASK) <<
- RSZ_DWN_SCALE_AV_SZ_V_SHIFT);
- regw_rsz(rsz_base, val, reg_base + RSZ_DWN_AV);
-
- /* setting rgb conversion parameters */
- regw_rsz(rsz_base, rgb->rgb_en, reg_base + RSZ_RGB_EN);
-
- val = (rgb->rgb_typ << RSZ_RGB_TYP_SHIFT) |
- (rgb->rgb_msk0 << RSZ_RGB_MSK0_SHIFT) |
- (rgb->rgb_msk1 << RSZ_RGB_MSK1_SHIFT);
- regw_rsz(rsz_base, val, reg_base + RSZ_RGB_TYP);
-
- regw_rsz(rsz_base, rgb->rgb_alpha_val & RSZ_RGB_ALPHA_MASK,
- reg_base + RSZ_RGB_BLD);
-
- /* setting external memory parameters */
- regw_rsz(rsz_base, ext_mem->rsz_sdr_oft_y, reg_base + RSZ_SDR_Y_OFT);
- regw_rsz(rsz_base, ext_mem->rsz_sdr_ptr_s_y,
- reg_base + RSZ_SDR_Y_PTR_S);
- regw_rsz(rsz_base, ext_mem->rsz_sdr_ptr_e_y,
- reg_base + RSZ_SDR_Y_PTR_E);
- regw_rsz(rsz_base, ext_mem->rsz_sdr_oft_c, reg_base + RSZ_SDR_C_OFT);
- regw_rsz(rsz_base, ext_mem->rsz_sdr_ptr_s_c,
- reg_base + RSZ_SDR_C_PTR_S);
- regw_rsz(rsz_base, (ext_mem->rsz_sdr_ptr_e_c >> 1),
- reg_base + RSZ_SDR_C_PTR_E);
-}
-
-/*set the registers of either RSZ0 or RSZ1 */
-static void
-ipipe_setup_resizer(void __iomem *rsz_base, struct resizer_params *params)
-{
- /* enable MMR gate to write to Resizer */
- regw_rsz(rsz_base, 1, RSZ_GCK_MMR);
-
- /* Enable resizer if it is not in bypass mode */
- if (params->rsz_common.passthrough)
- regw_rsz(rsz_base, 0, RSZ_GCK_SDR);
- else
- regw_rsz(rsz_base, 1, RSZ_GCK_SDR);
-
- rsz_set_common_params(rsz_base, params);
-
- regw_rsz(rsz_base, params->rsz_en[RSZ_A], RSZ_EN_A);
-
- if (params->rsz_en[RSZ_A])
- /*setting rescale parameters */
- rsz_set_rsz_regs(rsz_base, RSZ_A, params);
-
- regw_rsz(rsz_base, params->rsz_en[RSZ_B], RSZ_EN_B);
-
- if (params->rsz_en[RSZ_B])
- rsz_set_rsz_regs(rsz_base, RSZ_B, params);
-}
-
-static u32 ipipe_get_color_pat(u32 pix)
-{
- switch (pix) {
- case MEDIA_BUS_FMT_SGRBG10_ALAW8_1X8:
- case MEDIA_BUS_FMT_SGRBG10_DPCM8_1X8:
- case MEDIA_BUS_FMT_SGRBG12_1X12:
- return ipipe_sgrbg_pattern;
-
- default:
- return ipipe_srggb_pattern;
- }
-}
-
-static int ipipe_get_data_path(struct vpfe_ipipe_device *ipipe)
-{
- u32 temp_pix_fmt;
-
- switch (ipipe->formats[IPIPE_PAD_SINK].code) {
- case MEDIA_BUS_FMT_SBGGR8_1X8:
- case MEDIA_BUS_FMT_SGRBG10_ALAW8_1X8:
- case MEDIA_BUS_FMT_SGRBG10_DPCM8_1X8:
- case MEDIA_BUS_FMT_SGRBG12_1X12:
- temp_pix_fmt = MEDIA_BUS_FMT_SGRBG12_1X12;
- break;
-
- default:
- temp_pix_fmt = MEDIA_BUS_FMT_UYVY8_2X8;
- }
-
- if (temp_pix_fmt == MEDIA_BUS_FMT_SGRBG12_1X12) {
- if (ipipe->formats[IPIPE_PAD_SOURCE].code ==
- MEDIA_BUS_FMT_SGRBG12_1X12)
- return IPIPE_RAW2RAW;
- return IPIPE_RAW2YUV;
- }
-
- return IPIPE_YUV2YUV;
-}
-
-static int get_ipipe_mode(struct vpfe_ipipe_device *ipipe)
-{
- struct vpfe_device *vpfe_dev = to_vpfe_device(ipipe);
- u16 ipipeif_sink = vpfe_dev->vpfe_ipipeif.input;
-
- if (ipipeif_sink == IPIPEIF_INPUT_MEMORY)
- return IPIPE_MODE_SINGLE_SHOT;
- else if (ipipeif_sink == IPIPEIF_INPUT_ISIF)
- return IPIPE_MODE_CONTINUOUS;
-
- return -EINVAL;
-}
-
-int config_ipipe_hw(struct vpfe_ipipe_device *ipipe)
-{
- struct vpfe_ipipe_input_config *config = &ipipe->config.input_config;
- void __iomem *ipipe_base = ipipe->base_addr;
- struct v4l2_mbus_framefmt *outformat;
- u32 color_pat;
- u32 ipipe_mode;
- u32 data_path;
-
- /* enable clock to IPIPE */
- vpss_enable_clock(VPSS_IPIPE_CLOCK, 1);
- ipipe_clock_enable(ipipe_base);
-
- if (ipipe->input == IPIPE_INPUT_NONE) {
- regw_ip(ipipe_base, 0, IPIPE_SRC_EN);
- return 0;
- }
-
- ipipe_mode = get_ipipe_mode(ipipe);
- if (ipipe_mode < 0) {
- pr_err("Failed to get ipipe mode");
- return -EINVAL;
- }
- regw_ip(ipipe_base, ipipe_mode, IPIPE_SRC_MODE);
-
- data_path = ipipe_get_data_path(ipipe);
- regw_ip(ipipe_base, data_path, IPIPE_SRC_FMT);
-
- regw_ip(ipipe_base, config->vst & IPIPE_RSZ_VPS_MASK, IPIPE_SRC_VPS);
- regw_ip(ipipe_base, config->hst & IPIPE_RSZ_HPS_MASK, IPIPE_SRC_HPS);
-
- outformat = &ipipe->formats[IPIPE_PAD_SOURCE];
- regw_ip(ipipe_base, (outformat->height + 1) & IPIPE_RSZ_VSZ_MASK,
- IPIPE_SRC_VSZ);
- regw_ip(ipipe_base, (outformat->width + 1) & IPIPE_RSZ_HSZ_MASK,
- IPIPE_SRC_HSZ);
-
- if (data_path == IPIPE_RAW2YUV ||
- data_path == IPIPE_RAW2RAW) {
- color_pat =
- ipipe_get_color_pat(ipipe->formats[IPIPE_PAD_SINK].code);
- regw_ip(ipipe_base, color_pat, IPIPE_SRC_COL);
- }
-
- return 0;
-}
-
-/*
- * config_rsz_hw() - Performs hardware setup of resizer.
- */
-int config_rsz_hw(struct vpfe_resizer_device *resizer,
- struct resizer_params *config)
-{
- struct vpfe_device *vpfe_dev = to_vpfe_device(resizer);
- void __iomem *ipipe_base = vpfe_dev->vpfe_ipipe.base_addr;
- void __iomem *rsz_base = vpfe_dev->vpfe_resizer.base_addr;
-
- /* enable VPSS clock */
- vpss_enable_clock(VPSS_IPIPE_CLOCK, 1);
- ipipe_clock_enable(ipipe_base);
-
- ipipe_setup_resizer(rsz_base, config);
-
- return 0;
-}
-
-static void
-rsz_set_y_address(void __iomem *rsz_base, unsigned int address,
- unsigned int offset)
-{
- u32 val;
-
- val = address & SET_LOW_ADDR;
- regw_rsz(rsz_base, val, offset + RSZ_SDR_Y_BAD_L);
- regw_rsz(rsz_base, val, offset + RSZ_SDR_Y_SAD_L);
-
- val = (address & SET_HIGH_ADDR) >> 16;
- regw_rsz(rsz_base, val, offset + RSZ_SDR_Y_BAD_H);
- regw_rsz(rsz_base, val, offset + RSZ_SDR_Y_SAD_H);
-}
-
-static void
-rsz_set_c_address(void __iomem *rsz_base, unsigned int address,
- unsigned int offset)
-{
- u32 val;
-
- val = address & SET_LOW_ADDR;
- regw_rsz(rsz_base, val, offset + RSZ_SDR_C_BAD_L);
- regw_rsz(rsz_base, val, offset + RSZ_SDR_C_SAD_L);
-
- val = (address & SET_HIGH_ADDR) >> 16;
- regw_rsz(rsz_base, val, offset + RSZ_SDR_C_BAD_H);
- regw_rsz(rsz_base, val, offset + RSZ_SDR_C_SAD_H);
-}
-
-/*
- * resizer_set_outaddr() - set the address for given resize_no
- * @rsz_base: resizer base address
- * @params: pointer to ipipe_params structure
- * @resize_no: 0 - Resizer-A, 1 - Resizer B
- * @address: the address to set
- */
-int
-resizer_set_outaddr(void __iomem *rsz_base, struct resizer_params *params,
- int resize_no, unsigned int address)
-{
- struct resizer_scale_param *rsc_param;
- struct rsz_ext_mem_param *mem_param;
- struct rsz_common_params *rsz_common;
- unsigned int rsz_start_add;
- unsigned int val;
-
- if (resize_no != RSZ_A && resize_no != RSZ_B)
- return -EINVAL;
-
- mem_param = &params->ext_mem_param[resize_no];
- rsc_param = &params->rsz_rsc_param[resize_no];
- rsz_common = &params->rsz_common;
-
- if (resize_no == RSZ_A)
- rsz_start_add = RSZ_EN_A;
- else
- rsz_start_add = RSZ_EN_B;
-
- /* y_c = 0 for y, = 1 for c */
- if (rsz_common->src_img_fmt == RSZ_IMG_420) {
- if (rsz_common->y_c) {
- /* C channel */
- val = address + mem_param->flip_ofst_c;
- rsz_set_c_address(rsz_base, val, rsz_start_add);
- } else {
- val = address + mem_param->flip_ofst_y;
- rsz_set_y_address(rsz_base, val, rsz_start_add);
- }
- } else {
- if (rsc_param->cen && rsc_param->yen) {
- /* 420 */
- val = address + mem_param->c_offset +
- mem_param->flip_ofst_c +
- mem_param->user_y_ofst +
- mem_param->user_c_ofst;
- if (resize_no == RSZ_B)
- val +=
- params->ext_mem_param[RSZ_A].user_y_ofst +
- params->ext_mem_param[RSZ_A].user_c_ofst;
- /* set C address */
- rsz_set_c_address(rsz_base, val, rsz_start_add);
- }
- val = address + mem_param->flip_ofst_y + mem_param->user_y_ofst;
- if (resize_no == RSZ_B)
- val += params->ext_mem_param[RSZ_A].user_y_ofst +
- params->ext_mem_param[RSZ_A].user_c_ofst;
- /* set Y address */
- rsz_set_y_address(rsz_base, val, rsz_start_add);
- }
- /* resizer must be enabled */
- regw_rsz(rsz_base, params->rsz_en[resize_no], rsz_start_add);
-
- return 0;
-}
-
-void
-ipipe_set_lutdpc_regs(void __iomem *base_addr, void __iomem *isp5_base_addr,
- struct vpfe_ipipe_lutdpc *dpc)
-{
- u32 max_tbl_size = LUT_DPC_MAX_SIZE >> 1;
- u32 lut_start_addr = DPC_TB0_START_ADDR;
- u32 val;
- u32 count;
-
- ipipe_clock_enable(base_addr);
- regw_ip(base_addr, dpc->en, DPC_LUT_EN);
-
- if (dpc->en != 1)
- return;
-
- val = LUTDPC_TBL_256_EN | (dpc->repl_white & 1);
- regw_ip(base_addr, val, DPC_LUT_SEL);
- regw_ip(base_addr, LUT_DPC_START_ADDR, DPC_LUT_ADR);
- regw_ip(base_addr, dpc->dpc_size, DPC_LUT_SIZ & LUT_DPC_SIZE_MASK);
-
- if (dpc->table == NULL)
- return;
-
- for (count = 0; count < dpc->dpc_size; count++) {
- if (count >= max_tbl_size)
- lut_start_addr = DPC_TB1_START_ADDR;
- val = (dpc->table[count].horz_pos & LUT_DPC_H_POS_MASK) |
- ((dpc->table[count].vert_pos & LUT_DPC_V_POS_MASK) <<
- LUT_DPC_V_POS_SHIFT) | (dpc->table[count].method <<
- LUT_DPC_CORR_METH_SHIFT);
- w_ip_table(isp5_base_addr, val, (lut_start_addr +
- ((count % max_tbl_size) << 2)));
- }
-}
-
-static void
-set_dpc_thresholds(void __iomem *base_addr,
- struct vpfe_ipipe_otfdpc_2_0_cfg *dpc_thr)
-{
- regw_ip(base_addr, dpc_thr->corr_thr.r & OTFDPC_DPC2_THR_MASK,
- DPC_OTF_2C_THR_R);
- regw_ip(base_addr, dpc_thr->corr_thr.gr & OTFDPC_DPC2_THR_MASK,
- DPC_OTF_2C_THR_GR);
- regw_ip(base_addr, dpc_thr->corr_thr.gb & OTFDPC_DPC2_THR_MASK,
- DPC_OTF_2C_THR_GB);
- regw_ip(base_addr, dpc_thr->corr_thr.b & OTFDPC_DPC2_THR_MASK,
- DPC_OTF_2C_THR_B);
- regw_ip(base_addr, dpc_thr->det_thr.r & OTFDPC_DPC2_THR_MASK,
- DPC_OTF_2D_THR_R);
- regw_ip(base_addr, dpc_thr->det_thr.gr & OTFDPC_DPC2_THR_MASK,
- DPC_OTF_2D_THR_GR);
- regw_ip(base_addr, dpc_thr->det_thr.gb & OTFDPC_DPC2_THR_MASK,
- DPC_OTF_2D_THR_GB);
- regw_ip(base_addr, dpc_thr->det_thr.b & OTFDPC_DPC2_THR_MASK,
- DPC_OTF_2D_THR_B);
-}
-
-void ipipe_set_otfdpc_regs(void __iomem *base_addr,
- struct vpfe_ipipe_otfdpc *otfdpc)
-{
- struct vpfe_ipipe_otfdpc_2_0_cfg *dpc_2_0 = &otfdpc->alg_cfg.dpc_2_0;
- struct vpfe_ipipe_otfdpc_3_0_cfg *dpc_3_0 = &otfdpc->alg_cfg.dpc_3_0;
- u32 val;
-
- ipipe_clock_enable(base_addr);
-
- regw_ip(base_addr, (otfdpc->en & 1), DPC_OTF_EN);
- if (!otfdpc->en)
- return;
-
- /* dpc enabled */
- val = (otfdpc->det_method << OTF_DET_METHOD_SHIFT) | otfdpc->alg;
- regw_ip(base_addr, val, DPC_OTF_TYP);
-
- if (otfdpc->det_method == VPFE_IPIPE_DPC_OTF_MIN_MAX) {
- /* ALG= 0, TYP = 0, DPC_OTF_2D_THR_[x]=0
- * DPC_OTF_2C_THR_[x] = Maximum thresohld
- * MinMax method
- */
- dpc_2_0->det_thr.r = dpc_2_0->det_thr.gb =
- dpc_2_0->det_thr.gr = dpc_2_0->det_thr.b = 0;
- set_dpc_thresholds(base_addr, dpc_2_0);
- return;
- }
- /* MinMax2 */
- if (otfdpc->alg == VPFE_IPIPE_OTFDPC_2_0) {
- set_dpc_thresholds(base_addr, dpc_2_0);
- return;
- }
- regw_ip(base_addr, dpc_3_0->act_adj_shf &
- OTF_DPC3_0_SHF_MASK, DPC_OTF_3_SHF);
- /* Detection thresholds */
- regw_ip(base_addr, ((dpc_3_0->det_thr & OTF_DPC3_0_THR_MASK) <<
- OTF_DPC3_0_THR_SHIFT), DPC_OTF_3D_THR);
- regw_ip(base_addr, dpc_3_0->det_slp &
- OTF_DPC3_0_SLP_MASK, DPC_OTF_3D_SLP);
- regw_ip(base_addr, dpc_3_0->det_thr_min &
- OTF_DPC3_0_DET_MASK, DPC_OTF_3D_MIN);
- regw_ip(base_addr, dpc_3_0->det_thr_max &
- OTF_DPC3_0_DET_MASK, DPC_OTF_3D_MAX);
- /* Correction thresholds */
- regw_ip(base_addr, ((dpc_3_0->corr_thr & OTF_DPC3_0_THR_MASK) <<
- OTF_DPC3_0_THR_SHIFT), DPC_OTF_3C_THR);
- regw_ip(base_addr, dpc_3_0->corr_slp &
- OTF_DPC3_0_SLP_MASK, DPC_OTF_3C_SLP);
- regw_ip(base_addr, dpc_3_0->corr_thr_min &
- OTF_DPC3_0_CORR_MASK, DPC_OTF_3C_MIN);
- regw_ip(base_addr, dpc_3_0->corr_thr_max &
- OTF_DPC3_0_CORR_MASK, DPC_OTF_3C_MAX);
-}
-
-/* 2D Noise filter */
-void
-ipipe_set_d2f_regs(void __iomem *base_addr, unsigned int id,
- struct vpfe_ipipe_nf *noise_filter)
-{
-
- u32 offset = D2F_1ST;
- int count;
- u32 val;
-
- if (id == IPIPE_D2F_2ND)
- offset = D2F_2ND;
-
- ipipe_clock_enable(base_addr);
- regw_ip(base_addr, noise_filter->en & 1, offset + D2F_EN);
- if (!noise_filter->en)
- return;
-
- /*noise filter enabled */
- /* Combine all the fields to make D2F_CFG register of IPIPE */
- val = ((noise_filter->spread_val & D2F_SPR_VAL_MASK) <<
- D2F_SPR_VAL_SHIFT) | ((noise_filter->shft_val &
- D2F_SHFT_VAL_MASK) << D2F_SHFT_VAL_SHIFT) |
- (noise_filter->gr_sample_meth << D2F_SAMPLE_METH_SHIFT) |
- ((noise_filter->apply_lsc_gain & 1) <<
- D2F_APPLY_LSC_GAIN_SHIFT) | D2F_USE_SPR_REG_VAL;
- regw_ip(base_addr, val, offset + D2F_TYP);
-
- /* edge detection minimum */
- regw_ip(base_addr, noise_filter->edge_det_min_thr &
- D2F_EDGE_DET_THR_MASK, offset + D2F_EDG_MIN);
-
- /* edge detection maximum */
- regw_ip(base_addr, noise_filter->edge_det_max_thr &
- D2F_EDGE_DET_THR_MASK, offset + D2F_EDG_MAX);
-
- for (count = 0; count < VPFE_IPIPE_NF_STR_TABLE_SIZE; count++)
- regw_ip(base_addr,
- (noise_filter->str[count] & D2F_STR_VAL_MASK),
- offset + D2F_STR + count * 4);
-
- for (count = 0; count < VPFE_IPIPE_NF_THR_TABLE_SIZE; count++)
- regw_ip(base_addr, noise_filter->thr[count] & D2F_THR_VAL_MASK,
- offset + D2F_THR + count * 4);
-}
-
-#define IPIPE_U8Q5(decimal, integer) \
- (((decimal & 0x1f) | ((integer & 0x7) << 5)))
-
-/* Green Imbalance Correction */
-void ipipe_set_gic_regs(void __iomem *base_addr, struct vpfe_ipipe_gic *gic)
-{
- u32 val;
-
- ipipe_clock_enable(base_addr);
- regw_ip(base_addr, gic->en & 1, GIC_EN);
-
- if (!gic->en)
- return;
-
- /*gic enabled */
- val = (gic->wt_fn_type << GIC_TYP_SHIFT) |
- (gic->thr_sel << GIC_THR_SEL_SHIFT) |
- ((gic->apply_lsc_gain & 1) << GIC_APPLY_LSC_GAIN_SHIFT);
- regw_ip(base_addr, val, GIC_TYP);
-
- regw_ip(base_addr, gic->gain & GIC_GAIN_MASK, GIC_GAN);
-
- if (gic->gic_alg != VPFE_IPIPE_GIC_ALG_ADAPT_GAIN) {
- /* Constant Gain. Set threshold to maximum */
- regw_ip(base_addr, GIC_THR_MASK, GIC_THR);
- return;
- }
-
- if (gic->thr_sel == VPFE_IPIPE_GIC_THR_REG) {
- regw_ip(base_addr, gic->thr & GIC_THR_MASK, GIC_THR);
- regw_ip(base_addr, gic->slope & GIC_SLOPE_MASK, GIC_SLP);
- } else {
- /* Use NF thresholds */
- val = IPIPE_U8Q5(gic->nf2_thr_gain.decimal,
- gic->nf2_thr_gain.integer);
- regw_ip(base_addr, val, GIC_NFGAN);
- }
-}
-
-#define IPIPE_U13Q9(decimal, integer) \
- (((decimal & 0x1ff) | ((integer & 0xf) << 9)))
-/* White balance */
-void ipipe_set_wb_regs(void __iomem *base_addr, struct vpfe_ipipe_wb *wb)
-{
- u32 val;
-
- ipipe_clock_enable(base_addr);
- /* Ofsets. S12 */
- regw_ip(base_addr, wb->ofst_r & WB_OFFSET_MASK, WB2_OFT_R);
- regw_ip(base_addr, wb->ofst_gr & WB_OFFSET_MASK, WB2_OFT_GR);
- regw_ip(base_addr, wb->ofst_gb & WB_OFFSET_MASK, WB2_OFT_GB);
- regw_ip(base_addr, wb->ofst_b & WB_OFFSET_MASK, WB2_OFT_B);
-
- /* Gains. U13Q9 */
- val = IPIPE_U13Q9(wb->gain_r.decimal, wb->gain_r.integer);
- regw_ip(base_addr, val, WB2_WGN_R);
-
- val = IPIPE_U13Q9(wb->gain_gr.decimal, wb->gain_gr.integer);
- regw_ip(base_addr, val, WB2_WGN_GR);
-
- val = IPIPE_U13Q9(wb->gain_gb.decimal, wb->gain_gb.integer);
- regw_ip(base_addr, val, WB2_WGN_GB);
-
- val = IPIPE_U13Q9(wb->gain_b.decimal, wb->gain_b.integer);
- regw_ip(base_addr, val, WB2_WGN_B);
-}
-
-/* CFA */
-void ipipe_set_cfa_regs(void __iomem *base_addr, struct vpfe_ipipe_cfa *cfa)
-{
- ipipe_clock_enable(base_addr);
-
- regw_ip(base_addr, cfa->alg, CFA_MODE);
- regw_ip(base_addr, cfa->hpf_thr_2dir & CFA_HPF_THR_2DIR_MASK,
- CFA_2DIR_HPF_THR);
- regw_ip(base_addr, cfa->hpf_slp_2dir & CFA_HPF_SLOPE_2DIR_MASK,
- CFA_2DIR_HPF_SLP);
- regw_ip(base_addr, cfa->hp_mix_thr_2dir & CFA_HPF_MIX_THR_2DIR_MASK,
- CFA_2DIR_MIX_THR);
- regw_ip(base_addr, cfa->hp_mix_slope_2dir & CFA_HPF_MIX_SLP_2DIR_MASK,
- CFA_2DIR_MIX_SLP);
- regw_ip(base_addr, cfa->dir_thr_2dir & CFA_DIR_THR_2DIR_MASK,
- CFA_2DIR_DIR_THR);
- regw_ip(base_addr, cfa->dir_slope_2dir & CFA_DIR_SLP_2DIR_MASK,
- CFA_2DIR_DIR_SLP);
- regw_ip(base_addr, cfa->nd_wt_2dir & CFA_ND_WT_2DIR_MASK,
- CFA_2DIR_NDWT);
- regw_ip(base_addr, cfa->hue_fract_daa & CFA_DAA_HUE_FRA_MASK,
- CFA_MONO_HUE_FRA);
- regw_ip(base_addr, cfa->edge_thr_daa & CFA_DAA_EDG_THR_MASK,
- CFA_MONO_EDG_THR);
- regw_ip(base_addr, cfa->thr_min_daa & CFA_DAA_THR_MIN_MASK,
- CFA_MONO_THR_MIN);
- regw_ip(base_addr, cfa->thr_slope_daa & CFA_DAA_THR_SLP_MASK,
- CFA_MONO_THR_SLP);
- regw_ip(base_addr, cfa->slope_min_daa & CFA_DAA_SLP_MIN_MASK,
- CFA_MONO_SLP_MIN);
- regw_ip(base_addr, cfa->slope_slope_daa & CFA_DAA_SLP_SLP_MASK,
- CFA_MONO_SLP_SLP);
- regw_ip(base_addr, cfa->lp_wt_daa & CFA_DAA_LP_WT_MASK,
- CFA_MONO_LPWT);
-}
-
-void
-ipipe_set_rgb2rgb_regs(void __iomem *base_addr, unsigned int id,
- struct vpfe_ipipe_rgb2rgb *rgb)
-{
- u32 offset_mask = RGB2RGB_1_OFST_MASK;
- u32 offset = RGB1_MUL_BASE;
- u32 integ_mask = 0xf;
- u32 val;
-
- ipipe_clock_enable(base_addr);
-
- if (id == IPIPE_RGB2RGB_2) {
- /* For second RGB module, gain integer is 3 bits instead
- of 4, offset has 11 bits insread of 13 */
- offset = RGB2_MUL_BASE;
- integ_mask = 0x7;
- offset_mask = RGB2RGB_2_OFST_MASK;
- }
- /* Gains */
- val = (rgb->coef_rr.decimal & 0xff) |
- ((rgb->coef_rr.integer & integ_mask) << 8);
- regw_ip(base_addr, val, offset + RGB_MUL_RR);
- val = (rgb->coef_gr.decimal & 0xff) |
- ((rgb->coef_gr.integer & integ_mask) << 8);
- regw_ip(base_addr, val, offset + RGB_MUL_GR);
- val = (rgb->coef_br.decimal & 0xff) |
- ((rgb->coef_br.integer & integ_mask) << 8);
- regw_ip(base_addr, val, offset + RGB_MUL_BR);
- val = (rgb->coef_rg.decimal & 0xff) |
- ((rgb->coef_rg.integer & integ_mask) << 8);
- regw_ip(base_addr, val, offset + RGB_MUL_RG);
- val = (rgb->coef_gg.decimal & 0xff) |
- ((rgb->coef_gg.integer & integ_mask) << 8);
- regw_ip(base_addr, val, offset + RGB_MUL_GG);
- val = (rgb->coef_bg.decimal & 0xff) |
- ((rgb->coef_bg.integer & integ_mask) << 8);
- regw_ip(base_addr, val, offset + RGB_MUL_BG);
- val = (rgb->coef_rb.decimal & 0xff) |
- ((rgb->coef_rb.integer & integ_mask) << 8);
- regw_ip(base_addr, val, offset + RGB_MUL_RB);
- val = (rgb->coef_gb.decimal & 0xff) |
- ((rgb->coef_gb.integer & integ_mask) << 8);
- regw_ip(base_addr, val, offset + RGB_MUL_GB);
- val = (rgb->coef_bb.decimal & 0xff) |
- ((rgb->coef_bb.integer & integ_mask) << 8);
- regw_ip(base_addr, val, offset + RGB_MUL_BB);
-
- /* Offsets */
- regw_ip(base_addr, rgb->out_ofst_r & offset_mask, offset + RGB_OFT_OR);
- regw_ip(base_addr, rgb->out_ofst_g & offset_mask, offset + RGB_OFT_OG);
- regw_ip(base_addr, rgb->out_ofst_b & offset_mask, offset + RGB_OFT_OB);
-}
-
-static void
-ipipe_update_gamma_tbl(void __iomem *isp5_base_addr,
- struct vpfe_ipipe_gamma_entry *table, int size, u32 addr)
-{
- int count;
- u32 val;
-
- for (count = 0; count < size; count++) {
- val = table[count].slope & GAMMA_MASK;
- val |= (table[count].offset & GAMMA_MASK) << GAMMA_SHIFT;
- w_ip_table(isp5_base_addr, val, (addr + (count * 4)));
- }
-}
-
-void
-ipipe_set_gamma_regs(void __iomem *base_addr, void __iomem *isp5_base_addr,
- struct vpfe_ipipe_gamma *gamma)
-{
- int table_size;
- u32 val;
-
- ipipe_clock_enable(base_addr);
- val = (gamma->bypass_r << GAMMA_BYPR_SHIFT) |
- (gamma->bypass_b << GAMMA_BYPG_SHIFT) |
- (gamma->bypass_g << GAMMA_BYPB_SHIFT) |
- (gamma->tbl_sel << GAMMA_TBL_SEL_SHIFT) |
- (gamma->tbl_size << GAMMA_TBL_SIZE_SHIFT);
-
- regw_ip(base_addr, val, GMM_CFG);
-
- if (gamma->tbl_sel != VPFE_IPIPE_GAMMA_TBL_RAM)
- return;
-
- table_size = gamma->tbl_size;
-
- if (!gamma->bypass_r && gamma->table_r != NULL)
- ipipe_update_gamma_tbl(isp5_base_addr, gamma->table_r,
- table_size, GAMMA_R_START_ADDR);
- if (!gamma->bypass_b && gamma->table_b != NULL)
- ipipe_update_gamma_tbl(isp5_base_addr, gamma->table_b,
- table_size, GAMMA_B_START_ADDR);
- if (!gamma->bypass_g && gamma->table_g != NULL)
- ipipe_update_gamma_tbl(isp5_base_addr, gamma->table_g,
- table_size, GAMMA_G_START_ADDR);
-}
-
-void
-ipipe_set_3d_lut_regs(void __iomem *base_addr, void __iomem *isp5_base_addr,
- struct vpfe_ipipe_3d_lut *lut_3d)
-{
- struct vpfe_ipipe_3d_lut_entry *tbl;
- u32 bnk_index;
- u32 tbl_index;
- u32 val;
- u32 i;
-
- ipipe_clock_enable(base_addr);
- regw_ip(base_addr, lut_3d->en, D3LUT_EN);
-
- if (!lut_3d->en)
- return;
-
- /* lut_3d enabled */
- if (!lut_3d->table)
- return;
-
- /* valied table */
- tbl = lut_3d->table;
- for (i = 0; i < VPFE_IPIPE_MAX_SIZE_3D_LUT; i++) {
- /* Each entry has 0-9 (B), 10-19 (G) and
- 20-29 R values */
- val = tbl[i].b & D3_LUT_ENTRY_MASK;
- val |= (tbl[i].g & D3_LUT_ENTRY_MASK) <<
- D3_LUT_ENTRY_G_SHIFT;
- val |= (tbl[i].r & D3_LUT_ENTRY_MASK) <<
- D3_LUT_ENTRY_R_SHIFT;
- bnk_index = i % 4;
- tbl_index = i >> 2;
- tbl_index <<= 2;
- if (bnk_index == 0)
- w_ip_table(isp5_base_addr, val,
- tbl_index + D3L_TB0_START_ADDR);
- else if (bnk_index == 1)
- w_ip_table(isp5_base_addr, val,
- tbl_index + D3L_TB1_START_ADDR);
- else if (bnk_index == 2)
- w_ip_table(isp5_base_addr, val,
- tbl_index + D3L_TB2_START_ADDR);
- else
- w_ip_table(isp5_base_addr, val,
- tbl_index + D3L_TB3_START_ADDR);
- }
-}
-
-/* Lumina adjustments */
-void
-ipipe_set_lum_adj_regs(void __iomem *base_addr, struct ipipe_lum_adj *lum_adj)
-{
- u32 val;
-
- ipipe_clock_enable(base_addr);
-
- /* combine fields of YUV_ADJ to set brightness and contrast */
- val = lum_adj->contrast << LUM_ADJ_CONTR_SHIFT |
- lum_adj->brightness << LUM_ADJ_BRIGHT_SHIFT;
- regw_ip(base_addr, val, YUV_ADJ);
-}
-
-#define IPIPE_S12Q8(decimal, integer) \
- (((decimal & 0xff) | ((integer & 0xf) << 8)))
-
-void ipipe_set_rgb2ycbcr_regs(void __iomem *base_addr,
- struct vpfe_ipipe_rgb2yuv *yuv)
-{
- u32 val;
-
- /* S10Q8 */
- ipipe_clock_enable(base_addr);
- val = IPIPE_S12Q8(yuv->coef_ry.decimal, yuv->coef_ry.integer);
- regw_ip(base_addr, val, YUV_MUL_RY);
- val = IPIPE_S12Q8(yuv->coef_gy.decimal, yuv->coef_gy.integer);
- regw_ip(base_addr, val, YUV_MUL_GY);
- val = IPIPE_S12Q8(yuv->coef_by.decimal, yuv->coef_by.integer);
- regw_ip(base_addr, val, YUV_MUL_BY);
- val = IPIPE_S12Q8(yuv->coef_rcb.decimal, yuv->coef_rcb.integer);
- regw_ip(base_addr, val, YUV_MUL_RCB);
- val = IPIPE_S12Q8(yuv->coef_gcb.decimal, yuv->coef_gcb.integer);
- regw_ip(base_addr, val, YUV_MUL_GCB);
- val = IPIPE_S12Q8(yuv->coef_bcb.decimal, yuv->coef_bcb.integer);
- regw_ip(base_addr, val, YUV_MUL_BCB);
- val = IPIPE_S12Q8(yuv->coef_rcr.decimal, yuv->coef_rcr.integer);
- regw_ip(base_addr, val, YUV_MUL_RCR);
- val = IPIPE_S12Q8(yuv->coef_gcr.decimal, yuv->coef_gcr.integer);
- regw_ip(base_addr, val, YUV_MUL_GCR);
- val = IPIPE_S12Q8(yuv->coef_bcr.decimal, yuv->coef_bcr.integer);
- regw_ip(base_addr, val, YUV_MUL_BCR);
- regw_ip(base_addr, yuv->out_ofst_y & RGB2YCBCR_OFST_MASK, YUV_OFT_Y);
- regw_ip(base_addr, yuv->out_ofst_cb & RGB2YCBCR_OFST_MASK, YUV_OFT_CB);
- regw_ip(base_addr, yuv->out_ofst_cr & RGB2YCBCR_OFST_MASK, YUV_OFT_CR);
-}
-
-/* YUV 422 conversion */
-void
-ipipe_set_yuv422_conv_regs(void __iomem *base_addr,
- struct vpfe_ipipe_yuv422_conv *conv)
-{
- u32 val;
-
- ipipe_clock_enable(base_addr);
-
- /* Combine all the fields to make YUV_PHS register of IPIPE */
- val = (conv->chrom_pos << 0) | (conv->en_chrom_lpf << 1);
- regw_ip(base_addr, val, YUV_PHS);
-}
-
-void
-ipipe_set_gbce_regs(void __iomem *base_addr, void __iomem *isp5_base_addr,
- struct vpfe_ipipe_gbce *gbce)
-{
- unsigned int count;
- u32 mask = GBCE_Y_VAL_MASK;
-
- if (gbce->type == VPFE_IPIPE_GBCE_GAIN_TBL)
- mask = GBCE_GAIN_VAL_MASK;
-
- ipipe_clock_enable(base_addr);
- regw_ip(base_addr, gbce->en & 1, GBCE_EN);
-
- if (!gbce->en)
- return;
-
- regw_ip(base_addr, gbce->type, GBCE_TYP);
-
- if (!gbce->table)
- return;
-
- for (count = 0; count < VPFE_IPIPE_MAX_SIZE_GBCE_LUT; count += 2)
- w_ip_table(isp5_base_addr, ((gbce->table[count + 1] & mask) <<
- GBCE_ENTRY_SHIFT) | (gbce->table[count] & mask),
- ((count/2) << 2) + GBCE_TB_START_ADDR);
-}
-
-void
-ipipe_set_ee_regs(void __iomem *base_addr, void __iomem *isp5_base_addr,
- struct vpfe_ipipe_yee *ee)
-{
- unsigned int count;
- u32 val;
-
- ipipe_clock_enable(base_addr);
- regw_ip(base_addr, ee->en, YEE_EN);
-
- if (!ee->en)
- return;
-
- val = ee->en_halo_red & 1;
- val |= ee->merge_meth << YEE_HALO_RED_EN_SHIFT;
- regw_ip(base_addr, val, YEE_TYP);
-
- regw_ip(base_addr, ee->hpf_shft, YEE_SHF);
- regw_ip(base_addr, ee->hpf_coef_00 & YEE_COEF_MASK, YEE_MUL_00);
- regw_ip(base_addr, ee->hpf_coef_01 & YEE_COEF_MASK, YEE_MUL_01);
- regw_ip(base_addr, ee->hpf_coef_02 & YEE_COEF_MASK, YEE_MUL_02);
- regw_ip(base_addr, ee->hpf_coef_10 & YEE_COEF_MASK, YEE_MUL_10);
- regw_ip(base_addr, ee->hpf_coef_11 & YEE_COEF_MASK, YEE_MUL_11);
- regw_ip(base_addr, ee->hpf_coef_12 & YEE_COEF_MASK, YEE_MUL_12);
- regw_ip(base_addr, ee->hpf_coef_20 & YEE_COEF_MASK, YEE_MUL_20);
- regw_ip(base_addr, ee->hpf_coef_21 & YEE_COEF_MASK, YEE_MUL_21);
- regw_ip(base_addr, ee->hpf_coef_22 & YEE_COEF_MASK, YEE_MUL_22);
- regw_ip(base_addr, ee->yee_thr & YEE_THR_MASK, YEE_THR);
- regw_ip(base_addr, ee->es_gain & YEE_ES_GAIN_MASK, YEE_E_GAN);
- regw_ip(base_addr, ee->es_thr1 & YEE_ES_THR1_MASK, YEE_E_THR1);
- regw_ip(base_addr, ee->es_thr2 & YEE_THR_MASK, YEE_E_THR2);
- regw_ip(base_addr, ee->es_gain_grad & YEE_THR_MASK, YEE_G_GAN);
- regw_ip(base_addr, ee->es_ofst_grad & YEE_THR_MASK, YEE_G_OFT);
-
- if (ee->table == NULL)
- return;
-
- for (count = 0; count < VPFE_IPIPE_MAX_SIZE_YEE_LUT; count += 2)
- w_ip_table(isp5_base_addr, ((ee->table[count + 1] &
- YEE_ENTRY_MASK) << YEE_ENTRY_SHIFT) |
- (ee->table[count] & YEE_ENTRY_MASK),
- ((count/2) << 2) + YEE_TB_START_ADDR);
-}
-
-/* Chromatic Artifact Correction. CAR */
-static void ipipe_set_mf(void __iomem *base_addr)
-{
- /* typ to dynamic switch */
- regw_ip(base_addr, VPFE_IPIPE_CAR_DYN_SWITCH, CAR_TYP);
- /* Set SW0 to maximum */
- regw_ip(base_addr, CAR_MF_THR, CAR_SW);
-}
-
-static void
-ipipe_set_gain_ctrl(void __iomem *base_addr, struct vpfe_ipipe_car *car)
-{
- regw_ip(base_addr, VPFE_IPIPE_CAR_CHR_GAIN_CTRL, CAR_TYP);
- regw_ip(base_addr, car->hpf, CAR_HPF_TYP);
- regw_ip(base_addr, car->hpf_shft & CAR_HPF_SHIFT_MASK, CAR_HPF_SHF);
- regw_ip(base_addr, car->hpf_thr, CAR_HPF_THR);
- regw_ip(base_addr, car->gain1.gain, CAR_GN1_GAN);
- regw_ip(base_addr, car->gain1.shft & CAR_GAIN1_SHFT_MASK, CAR_GN1_SHF);
- regw_ip(base_addr, car->gain1.gain_min & CAR_GAIN_MIN_MASK,
- CAR_GN1_MIN);
- regw_ip(base_addr, car->gain2.gain, CAR_GN2_GAN);
- regw_ip(base_addr, car->gain2.shft & CAR_GAIN2_SHFT_MASK, CAR_GN2_SHF);
- regw_ip(base_addr, car->gain2.gain_min & CAR_GAIN_MIN_MASK,
- CAR_GN2_MIN);
-}
-
-void ipipe_set_car_regs(void __iomem *base_addr, struct vpfe_ipipe_car *car)
-{
- u32 val;
-
- ipipe_clock_enable(base_addr);
- regw_ip(base_addr, car->en, CAR_EN);
-
- if (!car->en)
- return;
-
- switch (car->meth) {
- case VPFE_IPIPE_CAR_MED_FLTR:
- ipipe_set_mf(base_addr);
- break;
-
- case VPFE_IPIPE_CAR_CHR_GAIN_CTRL:
- ipipe_set_gain_ctrl(base_addr, car);
- break;
-
- default:
- /* Dynamic switch between MF and Gain Ctrl. */
- ipipe_set_mf(base_addr);
- ipipe_set_gain_ctrl(base_addr, car);
- /* Set the threshold for switching between
- * the two Here we overwrite the MF SW0 value
- */
- regw_ip(base_addr, VPFE_IPIPE_CAR_DYN_SWITCH, CAR_TYP);
- val = car->sw1;
- val <<= CAR_SW1_SHIFT;
- val |= car->sw0;
- regw_ip(base_addr, val, CAR_SW);
- }
-}
-
-/* Chromatic Gain Suppression */
-void ipipe_set_cgs_regs(void __iomem *base_addr, struct vpfe_ipipe_cgs *cgs)
-{
- ipipe_clock_enable(base_addr);
- regw_ip(base_addr, cgs->en, CGS_EN);
-
- if (!cgs->en)
- return;
-
- /* Set the bright side parameters */
- regw_ip(base_addr, cgs->h_thr, CGS_GN1_H_THR);
- regw_ip(base_addr, cgs->h_slope, CGS_GN1_H_GAN);
- regw_ip(base_addr, cgs->h_shft & CAR_SHIFT_MASK, CGS_GN1_H_SHF);
- regw_ip(base_addr, cgs->h_min, CGS_GN1_H_MIN);
-}
-
-void rsz_src_enable(void __iomem *rsz_base, int enable)
-{
- regw_rsz(rsz_base, enable, RSZ_SRC_EN);
-}
-
-int rsz_enable(void __iomem *rsz_base, int rsz_id, int enable)
-{
- if (rsz_id == RSZ_A) {
- regw_rsz(rsz_base, enable, RSZ_EN_A);
- /* We always enable RSZ_A. RSZ_B is enable upon request from
- * application. So enable RSZ_SRC_EN along with RSZ_A
- */
- regw_rsz(rsz_base, enable, RSZ_SRC_EN);
- } else if (rsz_id == RSZ_B) {
- regw_rsz(rsz_base, enable, RSZ_EN_B);
- } else {
- BUG();
- }
-
- return 0;
-}
diff --git a/drivers/staging/media/davinci_vpfe/dm365_ipipe_hw.h b/drivers/staging/media/davinci_vpfe/dm365_ipipe_hw.h
deleted file mode 100644
index 2bf2f7a69173..000000000000
--- a/drivers/staging/media/davinci_vpfe/dm365_ipipe_hw.h
+++ /dev/null
@@ -1,558 +0,0 @@
-/*
- * Copyright (C) 2012 Texas Instruments Inc
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- * Contributors:
- * Manjunath Hadli <manjunath.hadli@ti.com>
- * Prabhakar Lad <prabhakar.lad@ti.com>
- */
-
-#ifndef _DAVINCI_VPFE_DM365_IPIPE_HW_H
-#define _DAVINCI_VPFE_DM365_IPIPE_HW_H
-
-#include "vpfe_mc_capture.h"
-
-#define SET_LOW_ADDR 0x0000ffff
-#define SET_HIGH_ADDR 0xffff0000
-
-/* Below are the internal tables */
-#define DPC_TB0_START_ADDR 0x8000
-#define DPC_TB1_START_ADDR 0x8400
-
-#define GAMMA_R_START_ADDR 0xa800
-#define GAMMA_G_START_ADDR 0xb000
-#define GAMMA_B_START_ADDR 0xb800
-
-/* RAM table addresses for edge enhancement correction*/
-#define YEE_TB_START_ADDR 0x8800
-
-/* RAM table address for GBC LUT */
-#define GBCE_TB_START_ADDR 0x9000
-
-/* RAM table for 3D NF LUT */
-#define D3L_TB0_START_ADDR 0x9800
-#define D3L_TB1_START_ADDR 0x9c00
-#define D3L_TB2_START_ADDR 0xa000
-#define D3L_TB3_START_ADDR 0xa400
-
-/* IPIPE Register Offsets from the base address */
-#define IPIPE_SRC_EN 0x0000
-#define IPIPE_SRC_MODE 0x0004
-#define IPIPE_SRC_FMT 0x0008
-#define IPIPE_SRC_COL 0x000c
-#define IPIPE_SRC_VPS 0x0010
-#define IPIPE_SRC_VSZ 0x0014
-#define IPIPE_SRC_HPS 0x0018
-#define IPIPE_SRC_HSZ 0x001c
-
-#define IPIPE_SEL_SBU 0x0020
-
-#define IPIPE_DMA_STA 0x0024
-#define IPIPE_GCK_MMR 0x0028
-#define IPIPE_GCK_PIX 0x002c
-#define IPIPE_RESERVED0 0x0030
-
-/* Defect Correction */
-#define DPC_LUT_EN 0x0034
-#define DPC_LUT_SEL 0x0038
-#define DPC_LUT_ADR 0x003c
-#define DPC_LUT_SIZ 0x0040
-#define DPC_OTF_EN 0x0044
-#define DPC_OTF_TYP 0x0048
-#define DPC_OTF_2D_THR_R 0x004c
-#define DPC_OTF_2D_THR_GR 0x0050
-#define DPC_OTF_2D_THR_GB 0x0054
-#define DPC_OTF_2D_THR_B 0x0058
-#define DPC_OTF_2C_THR_R 0x005c
-#define DPC_OTF_2C_THR_GR 0x0060
-#define DPC_OTF_2C_THR_GB 0x0064
-#define DPC_OTF_2C_THR_B 0x0068
-#define DPC_OTF_3_SHF 0x006c
-#define DPC_OTF_3D_THR 0x0070
-#define DPC_OTF_3D_SLP 0x0074
-#define DPC_OTF_3D_MIN 0x0078
-#define DPC_OTF_3D_MAX 0x007c
-#define DPC_OTF_3C_THR 0x0080
-#define DPC_OTF_3C_SLP 0x0084
-#define DPC_OTF_3C_MIN 0x0088
-#define DPC_OTF_3C_MAX 0x008c
-
-/* Lense Shading Correction */
-#define LSC_VOFT 0x90
-#define LSC_VA2 0x94
-#define LSC_VA1 0x98
-#define LSC_VS 0x9c
-#define LSC_HOFT 0xa0
-#define LSC_HA2 0xa4
-#define LSC_HA1 0xa8
-#define LSC_HS 0xac
-#define LSC_GAIN_R 0xb0
-#define LSC_GAIN_GR 0xb4
-#define LSC_GAIN_GB 0xb8
-#define LSC_GAIN_B 0xbc
-#define LSC_OFT_R 0xc0
-#define LSC_OFT_GR 0xc4
-#define LSC_OFT_GB 0xc8
-#define LSC_OFT_B 0xcc
-#define LSC_SHF 0xd0
-#define LSC_MAX 0xd4
-
-/* Noise Filter 1. Ofsets from start address given */
-#define D2F_1ST 0xd8
-#define D2F_EN 0x0
-#define D2F_TYP 0x4
-#define D2F_THR 0x8
-#define D2F_STR 0x28
-#define D2F_SPR 0x48
-#define D2F_EDG_MIN 0x68
-#define D2F_EDG_MAX 0x6c
-
-/* Noise Filter 2 */
-#define D2F_2ND 0x148
-
-/* GIC */
-#define GIC_EN 0x1b8
-#define GIC_TYP 0x1bc
-#define GIC_GAN 0x1c0
-#define GIC_NFGAN 0x1c4
-#define GIC_THR 0x1c8
-#define GIC_SLP 0x1cc
-
-/* White Balance */
-#define WB2_OFT_R 0x1d0
-#define WB2_OFT_GR 0x1d4
-#define WB2_OFT_GB 0x1d8
-#define WB2_OFT_B 0x1dc
-#define WB2_WGN_R 0x1e0
-#define WB2_WGN_GR 0x1e4
-#define WB2_WGN_GB 0x1e8
-#define WB2_WGN_B 0x1ec
-
-/* CFA interpolation */
-#define CFA_MODE 0x1f0
-#define CFA_2DIR_HPF_THR 0x1f4
-#define CFA_2DIR_HPF_SLP 0x1f8
-#define CFA_2DIR_MIX_THR 0x1fc
-#define CFA_2DIR_MIX_SLP 0x200
-#define CFA_2DIR_DIR_THR 0x204
-#define CFA_2DIR_DIR_SLP 0x208
-#define CFA_2DIR_NDWT 0x20c
-#define CFA_MONO_HUE_FRA 0x210
-#define CFA_MONO_EDG_THR 0x214
-#define CFA_MONO_THR_MIN 0x218
-#define CFA_MONO_THR_SLP 0x21c
-#define CFA_MONO_SLP_MIN 0x220
-#define CFA_MONO_SLP_SLP 0x224
-#define CFA_MONO_LPWT 0x228
-
-/* RGB to RGB conversiona - 1st */
-#define RGB1_MUL_BASE 0x22c
-/* Offsets from base */
-#define RGB_MUL_RR 0x0
-#define RGB_MUL_GR 0x4
-#define RGB_MUL_BR 0x8
-#define RGB_MUL_RG 0xc
-#define RGB_MUL_GG 0x10
-#define RGB_MUL_BG 0x14
-#define RGB_MUL_RB 0x18
-#define RGB_MUL_GB 0x1c
-#define RGB_MUL_BB 0x20
-#define RGB_OFT_OR 0x24
-#define RGB_OFT_OG 0x28
-#define RGB_OFT_OB 0x2c
-
-/* Gamma */
-#define GMM_CFG 0x25c
-
-/* RGB to RGB conversiona - 2nd */
-#define RGB2_MUL_BASE 0x260
-
-/* 3D LUT */
-#define D3LUT_EN 0x290
-
-/* RGB to YUV(YCbCr) conversion */
-#define YUV_ADJ 0x294
-#define YUV_MUL_RY 0x298
-#define YUV_MUL_GY 0x29c
-#define YUV_MUL_BY 0x2a0
-#define YUV_MUL_RCB 0x2a4
-#define YUV_MUL_GCB 0x2a8
-#define YUV_MUL_BCB 0x2ac
-#define YUV_MUL_RCR 0x2b0
-#define YUV_MUL_GCR 0x2b4
-#define YUV_MUL_BCR 0x2b8
-#define YUV_OFT_Y 0x2bc
-#define YUV_OFT_CB 0x2c0
-#define YUV_OFT_CR 0x2c4
-#define YUV_PHS 0x2c8
-
-/* Global Brightness and Contrast */
-#define GBCE_EN 0x2cc
-#define GBCE_TYP 0x2d0
-
-/* Edge Enhancer */
-#define YEE_EN 0x2d4
-#define YEE_TYP 0x2d8
-#define YEE_SHF 0x2dc
-#define YEE_MUL_00 0x2e0
-#define YEE_MUL_01 0x2e4
-#define YEE_MUL_02 0x2e8
-#define YEE_MUL_10 0x2ec
-#define YEE_MUL_11 0x2f0
-#define YEE_MUL_12 0x2f4
-#define YEE_MUL_20 0x2f8
-#define YEE_MUL_21 0x2fc
-#define YEE_MUL_22 0x300
-#define YEE_THR 0x304
-#define YEE_E_GAN 0x308
-#define YEE_E_THR1 0x30c
-#define YEE_E_THR2 0x310
-#define YEE_G_GAN 0x314
-#define YEE_G_OFT 0x318
-
-/* Chroma Artifact Reduction */
-#define CAR_EN 0x31c
-#define CAR_TYP 0x320
-#define CAR_SW 0x324
-#define CAR_HPF_TYP 0x328
-#define CAR_HPF_SHF 0x32c
-#define CAR_HPF_THR 0x330
-#define CAR_GN1_GAN 0x334
-#define CAR_GN1_SHF 0x338
-#define CAR_GN1_MIN 0x33c
-#define CAR_GN2_GAN 0x340
-#define CAR_GN2_SHF 0x344
-#define CAR_GN2_MIN 0x348
-
-/* Chroma Gain Suppression */
-#define CGS_EN 0x34c
-#define CGS_GN1_L_THR 0x350
-#define CGS_GN1_L_GAN 0x354
-#define CGS_GN1_L_SHF 0x358
-#define CGS_GN1_L_MIN 0x35c
-#define CGS_GN1_H_THR 0x360
-#define CGS_GN1_H_GAN 0x364
-#define CGS_GN1_H_SHF 0x368
-#define CGS_GN1_H_MIN 0x36c
-#define CGS_GN2_L_THR 0x370
-#define CGS_GN2_L_GAN 0x374
-#define CGS_GN2_L_SHF 0x378
-#define CGS_GN2_L_MIN 0x37c
-
-/* Resizer */
-#define RSZ_SRC_EN 0x0
-#define RSZ_SRC_MODE 0x4
-#define RSZ_SRC_FMT0 0x8
-#define RSZ_SRC_FMT1 0xc
-#define RSZ_SRC_VPS 0x10
-#define RSZ_SRC_VSZ 0x14
-#define RSZ_SRC_HPS 0x18
-#define RSZ_SRC_HSZ 0x1c
-#define RSZ_DMA_RZA 0x20
-#define RSZ_DMA_RZB 0x24
-#define RSZ_DMA_STA 0x28
-#define RSZ_GCK_MMR 0x2c
-#define RSZ_RESERVED0 0x30
-#define RSZ_GCK_SDR 0x34
-#define RSZ_IRQ_RZA 0x38
-#define RSZ_IRQ_RZB 0x3c
-#define RSZ_YUV_Y_MIN 0x40
-#define RSZ_YUV_Y_MAX 0x44
-#define RSZ_YUV_C_MIN 0x48
-#define RSZ_YUV_C_MAX 0x4c
-#define RSZ_YUV_PHS 0x50
-#define RSZ_SEQ 0x54
-
-/* Resizer Rescale Parameters */
-#define RSZ_EN_A 0x58
-#define RSZ_EN_B 0xe8
-/* offset of the registers to be added with base register of
- either RSZ0 or RSZ1
-*/
-#define RSZ_MODE 0x4
-#define RSZ_420 0x8
-#define RSZ_I_VPS 0xc
-#define RSZ_I_HPS 0x10
-#define RSZ_O_VSZ 0x14
-#define RSZ_O_HSZ 0x18
-#define RSZ_V_PHS_Y 0x1c
-#define RSZ_V_PHS_C 0x20
-#define RSZ_V_DIF 0x24
-#define RSZ_V_TYP 0x28
-#define RSZ_V_LPF 0x2c
-#define RSZ_H_PHS 0x30
-#define RSZ_H_PHS_ADJ 0x34
-#define RSZ_H_DIF 0x38
-#define RSZ_H_TYP 0x3c
-#define RSZ_H_LPF 0x40
-#define RSZ_DWN_EN 0x44
-#define RSZ_DWN_AV 0x48
-
-/* Resizer RGB Conversion Parameters */
-#define RSZ_RGB_EN 0x4c
-#define RSZ_RGB_TYP 0x50
-#define RSZ_RGB_BLD 0x54
-
-/* Resizer External Memory Parameters */
-#define RSZ_SDR_Y_BAD_H 0x58
-#define RSZ_SDR_Y_BAD_L 0x5c
-#define RSZ_SDR_Y_SAD_H 0x60
-#define RSZ_SDR_Y_SAD_L 0x64
-#define RSZ_SDR_Y_OFT 0x68
-#define RSZ_SDR_Y_PTR_S 0x6c
-#define RSZ_SDR_Y_PTR_E 0x70
-#define RSZ_SDR_C_BAD_H 0x74
-#define RSZ_SDR_C_BAD_L 0x78
-#define RSZ_SDR_C_SAD_H 0x7c
-#define RSZ_SDR_C_SAD_L 0x80
-#define RSZ_SDR_C_OFT 0x84
-#define RSZ_SDR_C_PTR_S 0x88
-#define RSZ_SDR_C_PTR_E 0x8c
-
-/* Macro for resizer */
-#define RSZ_YUV_Y_MIN 0x40
-#define RSZ_YUV_Y_MAX 0x44
-#define RSZ_YUV_C_MIN 0x48
-#define RSZ_YUV_C_MAX 0x4c
-
-#define IPIPE_GCK_MMR_DEFAULT 1
-#define IPIPE_GCK_PIX_DEFAULT 0xe
-#define RSZ_GCK_MMR_DEFAULT 1
-#define RSZ_GCK_SDR_DEFAULT 1
-
-/* LUTDPC */
-#define LUTDPC_TBL_256_EN 0
-#define LUTDPC_INF_TBL_EN 1
-#define LUT_DPC_START_ADDR 0
-#define LUT_DPC_H_POS_MASK 0x1fff
-#define LUT_DPC_V_POS_MASK 0x1fff
-#define LUT_DPC_V_POS_SHIFT 13
-#define LUT_DPC_CORR_METH_SHIFT 26
-#define LUT_DPC_MAX_SIZE 256
-#define LUT_DPC_SIZE_MASK 0x3ff
-
-/* OTFDPC */
-#define OTFDPC_DPC2_THR_MASK 0xfff
-#define OTF_DET_METHOD_SHIFT 1
-#define OTF_DPC3_0_SHF_MASK 3
-#define OTF_DPC3_0_THR_SHIFT 6
-#define OTF_DPC3_0_THR_MASK 0x3f
-#define OTF_DPC3_0_SLP_MASK 0x3f
-#define OTF_DPC3_0_DET_MASK 0xfff
-#define OTF_DPC3_0_CORR_MASK 0xfff
-
-/* NF (D2F) */
-#define D2F_SPR_VAL_MASK 0x1f
-#define D2F_SPR_VAL_SHIFT 0
-#define D2F_SHFT_VAL_MASK 3
-#define D2F_SHFT_VAL_SHIFT 5
-#define D2F_SAMPLE_METH_SHIFT 7
-#define D2F_APPLY_LSC_GAIN_SHIFT 8
-#define D2F_USE_SPR_REG_VAL 0
-#define D2F_STR_VAL_MASK 0x1f
-#define D2F_THR_VAL_MASK 0x3ff
-#define D2F_EDGE_DET_THR_MASK 0x7ff
-
-/* Green Imbalance Correction */
-#define GIC_TYP_SHIFT 0
-#define GIC_THR_SEL_SHIFT 1
-#define GIC_APPLY_LSC_GAIN_SHIFT 2
-#define GIC_GAIN_MASK 0xff
-#define GIC_THR_MASK 0xfff
-#define GIC_SLOPE_MASK 0xfff
-#define GIC_NFGAN_INT_MASK 7
-#define GIC_NFGAN_DECI_MASK 0x1f
-
-/* WB */
-#define WB_OFFSET_MASK 0xfff
-#define WB_GAIN_INT_MASK 0xf
-#define WB_GAIN_DECI_MASK 0x1ff
-
-/* CFA */
-#define CFA_HPF_THR_2DIR_MASK 0x1fff
-#define CFA_HPF_SLOPE_2DIR_MASK 0x3ff
-#define CFA_HPF_MIX_THR_2DIR_MASK 0x1fff
-#define CFA_HPF_MIX_SLP_2DIR_MASK 0x3ff
-#define CFA_DIR_THR_2DIR_MASK 0x3ff
-#define CFA_DIR_SLP_2DIR_MASK 0x7f
-#define CFA_ND_WT_2DIR_MASK 0x3f
-#define CFA_DAA_HUE_FRA_MASK 0x3f
-#define CFA_DAA_EDG_THR_MASK 0xff
-#define CFA_DAA_THR_MIN_MASK 0x3ff
-#define CFA_DAA_THR_SLP_MASK 0x3ff
-#define CFA_DAA_SLP_MIN_MASK 0x3ff
-#define CFA_DAA_SLP_SLP_MASK 0x3ff
-#define CFA_DAA_LP_WT_MASK 0x3f
-
-/* RGB2RGB */
-#define RGB2RGB_1_OFST_MASK 0x1fff
-#define RGB2RGB_1_GAIN_INT_MASK 0xf
-#define RGB2RGB_GAIN_DECI_MASK 0xff
-#define RGB2RGB_2_OFST_MASK 0x7ff
-#define RGB2RGB_2_GAIN_INT_MASK 0x7
-
-/* Gamma */
-#define GAMMA_BYPR_SHIFT 0
-#define GAMMA_BYPG_SHIFT 1
-#define GAMMA_BYPB_SHIFT 2
-#define GAMMA_TBL_SEL_SHIFT 4
-#define GAMMA_TBL_SIZE_SHIFT 5
-#define GAMMA_MASK 0x3ff
-#define GAMMA_SHIFT 10
-
-/* 3D LUT */
-#define D3_LUT_ENTRY_MASK 0x3ff
-#define D3_LUT_ENTRY_R_SHIFT 20
-#define D3_LUT_ENTRY_G_SHIFT 10
-#define D3_LUT_ENTRY_B_SHIFT 0
-
-/* Lumina adj */
-#define LUM_ADJ_CONTR_SHIFT 0
-#define LUM_ADJ_BRIGHT_SHIFT 8
-
-/* RGB2YCbCr */
-#define RGB2YCBCR_OFST_MASK 0x7ff
-#define RGB2YCBCR_COEF_INT_MASK 0xf
-#define RGB2YCBCR_COEF_DECI_MASK 0xff
-
-/* GBCE */
-#define GBCE_Y_VAL_MASK 0xff
-#define GBCE_GAIN_VAL_MASK 0x3ff
-#define GBCE_ENTRY_SHIFT 10
-
-/* Edge Enhancements */
-#define YEE_HALO_RED_EN_SHIFT 1
-#define YEE_HPF_SHIFT_MASK 0xf
-#define YEE_COEF_MASK 0x3ff
-#define YEE_THR_MASK 0x3f
-#define YEE_ES_GAIN_MASK 0xfff
-#define YEE_ES_THR1_MASK 0xfff
-#define YEE_ENTRY_SHIFT 9
-#define YEE_ENTRY_MASK 0x1ff
-
-/* CAR */
-#define CAR_MF_THR 0xff
-#define CAR_SW1_SHIFT 8
-#define CAR_GAIN1_SHFT_MASK 7
-#define CAR_GAIN_MIN_MASK 0x1ff
-#define CAR_GAIN2_SHFT_MASK 0xf
-#define CAR_HPF_SHIFT_MASK 3
-
-/* CGS */
-#define CAR_SHIFT_MASK 3
-
-/* Resizer */
-#define RSZ_BYPASS_SHIFT 1
-#define RSZ_SRC_IMG_FMT_SHIFT 1
-#define RSZ_SRC_Y_C_SEL_SHIFT 2
-#define IPIPE_RSZ_VPS_MASK 0xffff
-#define IPIPE_RSZ_HPS_MASK 0xffff
-#define IPIPE_RSZ_VSZ_MASK 0x1fff
-#define IPIPE_RSZ_HSZ_MASK 0x1fff
-#define RSZ_HPS_MASK 0x1fff
-#define RSZ_VPS_MASK 0x1fff
-#define RSZ_O_HSZ_MASK 0x1fff
-#define RSZ_O_VSZ_MASK 0x1fff
-#define RSZ_V_PHS_MASK 0x3fff
-#define RSZ_V_DIF_MASK 0x3fff
-
-#define RSZA_H_FLIP_SHIFT 0
-#define RSZA_V_FLIP_SHIFT 1
-#define RSZB_H_FLIP_SHIFT 2
-#define RSZB_V_FLIP_SHIFT 3
-#define RSZ_A 0
-#define RSZ_B 1
-#define RSZ_CEN_SHIFT 1
-#define RSZ_YEN_SHIFT 0
-#define RSZ_TYP_Y_SHIFT 0
-#define RSZ_TYP_C_SHIFT 1
-#define RSZ_LPF_INT_MASK 0x3f
-#define RSZ_LPF_INT_C_SHIFT 6
-#define RSZ_H_PHS_MASK 0x3fff
-#define RSZ_H_DIF_MASK 0x3fff
-#define RSZ_DIFF_DOWN_THR 256
-#define RSZ_DWN_SCALE_AV_SZ_V_SHIFT 3
-#define RSZ_DWN_SCALE_AV_SZ_MASK 7
-#define RSZ_RGB_MSK1_SHIFT 2
-#define RSZ_RGB_MSK0_SHIFT 1
-#define RSZ_RGB_TYP_SHIFT 0
-#define RSZ_RGB_ALPHA_MASK 0xff
-
-static inline u32 regr_ip(void __iomem *addr, u32 offset)
-{
- return readl(addr + offset);
-}
-
-static inline void regw_ip(void __iomem *addr, u32 val, u32 offset)
-{
- writel(val, addr + offset);
-}
-
-static inline u32 w_ip_table(void __iomem *addr, u32 val, u32 offset)
-{
- writel(val, addr + offset);
-
- return val;
-}
-
-static inline u32 regr_rsz(void __iomem *addr, u32 offset)
-{
- return readl(addr + offset);
-}
-
-static inline u32 regw_rsz(void __iomem *addr, u32 val, u32 offset)
-{
- writel(val, addr + offset);
-
- return val;
-}
-
-int config_ipipe_hw(struct vpfe_ipipe_device *ipipe);
-int resizer_set_outaddr(void __iomem *rsz_base, struct resizer_params *params,
- int resize_no, unsigned int address);
-int rsz_enable(void __iomem *rsz_base, int rsz_id, int enable);
-void rsz_src_enable(void __iomem *rsz_base, int enable);
-void rsz_set_in_pix_format(unsigned char y_c);
-int config_rsz_hw(struct vpfe_resizer_device *resizer,
- struct resizer_params *config);
-void ipipe_set_d2f_regs(void __iomem *base_addr, unsigned int id,
- struct vpfe_ipipe_nf *noise_filter);
-void ipipe_set_rgb2rgb_regs(void __iomem *base_addr, unsigned int id,
- struct vpfe_ipipe_rgb2rgb *rgb);
-void ipipe_set_yuv422_conv_regs(void __iomem *base_addr,
- struct vpfe_ipipe_yuv422_conv *conv);
-void ipipe_set_lum_adj_regs(void __iomem *base_addr,
- struct ipipe_lum_adj *lum_adj);
-void ipipe_set_rgb2ycbcr_regs(void __iomem *base_addr,
- struct vpfe_ipipe_rgb2yuv *yuv);
-void ipipe_set_lutdpc_regs(void __iomem *base_addr,
- void __iomem *isp5_base_addr, struct vpfe_ipipe_lutdpc *lutdpc);
-void ipipe_set_otfdpc_regs(void __iomem *base_addr,
- struct vpfe_ipipe_otfdpc *otfdpc);
-void ipipe_set_3d_lut_regs(void __iomem *base_addr,
- void __iomem *isp5_base_addr, struct vpfe_ipipe_3d_lut *lut_3d);
-void ipipe_set_gamma_regs(void __iomem *base_addr,
- void __iomem *isp5_base_addr, struct vpfe_ipipe_gamma *gamma);
-void ipipe_set_ee_regs(void __iomem *base_addr,
- void __iomem *isp5_base_addr, struct vpfe_ipipe_yee *ee);
-void ipipe_set_gbce_regs(void __iomem *base_addr,
- void __iomem *isp5_base_addr, struct vpfe_ipipe_gbce *gbce);
-void ipipe_set_gic_regs(void __iomem *base_addr, struct vpfe_ipipe_gic *gic);
-void ipipe_set_cfa_regs(void __iomem *base_addr, struct vpfe_ipipe_cfa *cfa);
-void ipipe_set_car_regs(void __iomem *base_addr, struct vpfe_ipipe_car *car);
-void ipipe_set_cgs_regs(void __iomem *base_addr, struct vpfe_ipipe_cgs *cgs);
-void ipipe_set_wb_regs(void __iomem *base_addr, struct vpfe_ipipe_wb *wb);
-
-#endif /* _DAVINCI_VPFE_DM365_IPIPE_HW_H */
diff --git a/drivers/staging/media/davinci_vpfe/dm365_ipipeif.c b/drivers/staging/media/davinci_vpfe/dm365_ipipeif.c
deleted file mode 100644
index 8b230541b1d1..000000000000
--- a/drivers/staging/media/davinci_vpfe/dm365_ipipeif.c
+++ /dev/null
@@ -1,1067 +0,0 @@
-/*
- * Copyright (C) 2012 Texas Instruments Inc
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- * Contributors:
- * Manjunath Hadli <manjunath.hadli@ti.com>
- * Prabhakar Lad <prabhakar.lad@ti.com>
- */
-
-#include "dm365_ipipeif.h"
-#include "vpfe_mc_capture.h"
-
-static const unsigned int ipipeif_input_fmts[] = {
- MEDIA_BUS_FMT_UYVY8_2X8,
- MEDIA_BUS_FMT_SGRBG12_1X12,
- MEDIA_BUS_FMT_Y8_1X8,
- MEDIA_BUS_FMT_UV8_1X8,
- MEDIA_BUS_FMT_YDYUYDYV8_1X16,
- MEDIA_BUS_FMT_SBGGR8_1X8,
-};
-
-static const unsigned int ipipeif_output_fmts[] = {
- MEDIA_BUS_FMT_UYVY8_2X8,
- MEDIA_BUS_FMT_SGRBG12_1X12,
- MEDIA_BUS_FMT_Y8_1X8,
- MEDIA_BUS_FMT_UV8_1X8,
- MEDIA_BUS_FMT_YDYUYDYV8_1X16,
- MEDIA_BUS_FMT_SBGGR8_1X8,
- MEDIA_BUS_FMT_SGRBG10_DPCM8_1X8,
- MEDIA_BUS_FMT_SGRBG10_ALAW8_1X8,
-};
-
-static int
-ipipeif_get_pack_mode(u32 in_pix_fmt)
-{
- switch (in_pix_fmt) {
- case MEDIA_BUS_FMT_SBGGR8_1X8:
- case MEDIA_BUS_FMT_Y8_1X8:
- case MEDIA_BUS_FMT_SGRBG10_DPCM8_1X8:
- case MEDIA_BUS_FMT_UV8_1X8:
- return IPIPEIF_5_1_PACK_8_BIT;
-
- case MEDIA_BUS_FMT_SGRBG10_ALAW8_1X8:
- return IPIPEIF_5_1_PACK_8_BIT_A_LAW;
-
- case MEDIA_BUS_FMT_SGRBG12_1X12:
- return IPIPEIF_5_1_PACK_16_BIT;
-
- case MEDIA_BUS_FMT_SBGGR12_1X12:
- return IPIPEIF_5_1_PACK_12_BIT;
-
- default:
- return IPIPEIF_5_1_PACK_16_BIT;
- }
-}
-
-static inline u32 ipipeif_read(void *addr, u32 offset)
-{
- return readl(addr + offset);
-}
-
-static inline void ipipeif_write(u32 val, void *addr, u32 offset)
-{
- writel(val, addr + offset);
-}
-
-static void ipipeif_config_dpc(void *addr, struct ipipeif_dpc *dpc)
-{
- u32 val = 0;
-
- if (dpc->en) {
- val = (dpc->en & 1) << IPIPEIF_DPC2_EN_SHIFT;
- val |= dpc->thr & IPIPEIF_DPC2_THR_MASK;
- }
- ipipeif_write(val, addr, IPIPEIF_DPC2);
-}
-
-#define IPIPEIF_MODE_CONTINUOUS 0
-#define IPIPEIF_MODE_ONE_SHOT 1
-
-static int get_oneshot_mode(enum ipipeif_input_entity input)
-{
- if (input == IPIPEIF_INPUT_MEMORY)
- return IPIPEIF_MODE_ONE_SHOT;
- else if (input == IPIPEIF_INPUT_ISIF)
- return IPIPEIF_MODE_CONTINUOUS;
-
- return -EINVAL;
-}
-
-static int
-ipipeif_get_cfg_src1(struct vpfe_ipipeif_device *ipipeif)
-{
- struct v4l2_mbus_framefmt *informat;
-
- informat = &ipipeif->formats[IPIPEIF_PAD_SINK];
- if (ipipeif->input == IPIPEIF_INPUT_MEMORY &&
- (informat->code == MEDIA_BUS_FMT_Y8_1X8 ||
- informat->code == MEDIA_BUS_FMT_UV8_1X8))
- return IPIPEIF_CCDC;
-
- return IPIPEIF_SRC1_PARALLEL_PORT;
-}
-
-static int
-ipipeif_get_data_shift(struct vpfe_ipipeif_device *ipipeif)
-{
- struct v4l2_mbus_framefmt *informat;
-
- informat = &ipipeif->formats[IPIPEIF_PAD_SINK];
-
- switch (informat->code) {
- case MEDIA_BUS_FMT_SGRBG12_1X12:
- return IPIPEIF_5_1_BITS11_0;
-
- case MEDIA_BUS_FMT_Y8_1X8:
- case MEDIA_BUS_FMT_UV8_1X8:
- return IPIPEIF_5_1_BITS11_0;
-
- default:
- return IPIPEIF_5_1_BITS7_0;
- }
-}
-
-static enum ipipeif_input_source
-ipipeif_get_source(struct vpfe_ipipeif_device *ipipeif)
-{
- struct v4l2_mbus_framefmt *informat;
-
- informat = &ipipeif->formats[IPIPEIF_PAD_SINK];
- if (ipipeif->input == IPIPEIF_INPUT_ISIF)
- return IPIPEIF_CCDC;
-
- if (informat->code == MEDIA_BUS_FMT_UYVY8_2X8)
- return IPIPEIF_SDRAM_YUV;
-
- return IPIPEIF_SDRAM_RAW;
-}
-
-void vpfe_ipipeif_ss_buffer_isr(struct vpfe_ipipeif_device *ipipeif)
-{
- struct vpfe_video_device *video_in = &ipipeif->video_in;
-
- if (ipipeif->input != IPIPEIF_INPUT_MEMORY)
- return;
-
- spin_lock(&video_in->dma_queue_lock);
- vpfe_video_process_buffer_complete(video_in);
- video_in->state = VPFE_VIDEO_BUFFER_NOT_QUEUED;
- vpfe_video_schedule_next_buffer(video_in);
- spin_unlock(&video_in->dma_queue_lock);
-}
-
-int vpfe_ipipeif_decimation_enabled(struct vpfe_device *vpfe_dev)
-{
- struct vpfe_ipipeif_device *ipipeif = &vpfe_dev->vpfe_ipipeif;
-
- return ipipeif->config.decimation;
-}
-
-int vpfe_ipipeif_get_rsz(struct vpfe_device *vpfe_dev)
-{
- struct vpfe_ipipeif_device *ipipeif = &vpfe_dev->vpfe_ipipeif;
-
- return ipipeif->config.rsz;
-}
-
-#define RD_DATA_15_2 0x7
-
-/*
- * ipipeif_hw_setup() - This function sets up IPIPEIF
- * @sd: pointer to v4l2 subdev structure
- * return -EINVAL or zero on success
- */
-static int ipipeif_hw_setup(struct v4l2_subdev *sd)
-{
- struct vpfe_ipipeif_device *ipipeif = v4l2_get_subdevdata(sd);
- struct v4l2_mbus_framefmt *informat, *outformat;
- struct ipipeif_params params = ipipeif->config;
- enum ipipeif_input_source ipipeif_source;
- u32 isif_port_if;
- void *ipipeif_base_addr;
- unsigned int val;
- int data_shift;
- int pack_mode;
- int source1;
- int tmp;
-
- ipipeif_base_addr = ipipeif->ipipeif_base_addr;
-
- /* Enable clock to IPIPEIF and IPIPE */
- vpss_enable_clock(VPSS_IPIPEIF_CLOCK, 1);
-
- informat = &ipipeif->formats[IPIPEIF_PAD_SINK];
- outformat = &ipipeif->formats[IPIPEIF_PAD_SOURCE];
-
- /* Combine all the fields to make CFG1 register of IPIPEIF */
- tmp = val = get_oneshot_mode(ipipeif->input);
- if (tmp < 0) {
- dev_err(&sd->devnode->dev, "ipipeif: links setup required");
- return -EINVAL;
- }
- val <<= ONESHOT_SHIFT;
-
- ipipeif_source = ipipeif_get_source(ipipeif);
- val |= ipipeif_source << INPSRC_SHIFT;
-
- val |= params.clock_select << CLKSEL_SHIFT;
- val |= params.avg_filter << AVGFILT_SHIFT;
- val |= params.decimation << DECIM_SHIFT;
-
- pack_mode = ipipeif_get_pack_mode(informat->code);
- val |= pack_mode << PACK8IN_SHIFT;
-
- source1 = ipipeif_get_cfg_src1(ipipeif);
- val |= source1 << INPSRC1_SHIFT;
-
- data_shift = ipipeif_get_data_shift(ipipeif);
- if (ipipeif_source != IPIPEIF_SDRAM_YUV)
- val |= data_shift << DATASFT_SHIFT;
- else
- val &= ~(RD_DATA_15_2 << DATASFT_SHIFT);
-
- ipipeif_write(val, ipipeif_base_addr, IPIPEIF_CFG1);
-
- switch (ipipeif_source) {
- case IPIPEIF_CCDC:
- ipipeif_write(ipipeif->gain, ipipeif_base_addr, IPIPEIF_GAIN);
- break;
-
- case IPIPEIF_SDRAM_RAW:
- case IPIPEIF_CCDC_DARKFM:
- ipipeif_write(ipipeif->gain, ipipeif_base_addr, IPIPEIF_GAIN);
- /* fall through */
- case IPIPEIF_SDRAM_YUV:
- val |= data_shift << DATASFT_SHIFT;
- ipipeif_write(params.ppln, ipipeif_base_addr, IPIPEIF_PPLN);
- ipipeif_write(params.lpfr, ipipeif_base_addr, IPIPEIF_LPFR);
- ipipeif_write(informat->width, ipipeif_base_addr, IPIPEIF_HNUM);
- ipipeif_write(informat->height,
- ipipeif_base_addr, IPIPEIF_VNUM);
- break;
-
- default:
- return -EINVAL;
- }
-
- /*check if decimation is enable or not */
- if (params.decimation)
- ipipeif_write(params.rsz, ipipeif_base_addr, IPIPEIF_RSZ);
-
- /* Setup sync alignment and initial rsz position */
- val = params.if_5_1.align_sync & 1;
- val <<= IPIPEIF_INIRSZ_ALNSYNC_SHIFT;
- val |= params.if_5_1.rsz_start & IPIPEIF_INIRSZ_MASK;
- ipipeif_write(val, ipipeif_base_addr, IPIPEIF_INIRSZ);
- isif_port_if = informat->code;
-
- if (isif_port_if == MEDIA_BUS_FMT_Y8_1X8)
- isif_port_if = MEDIA_BUS_FMT_YUYV8_1X16;
- else if (isif_port_if == MEDIA_BUS_FMT_UV8_1X8)
- isif_port_if = MEDIA_BUS_FMT_SGRBG12_1X12;
-
- /* Enable DPCM decompression */
- switch (ipipeif_source) {
- case IPIPEIF_SDRAM_RAW:
- val = 0;
- if (outformat->code == MEDIA_BUS_FMT_SGRBG10_DPCM8_1X8) {
- val = 1;
- val |= (IPIPEIF_DPCM_8BIT_10BIT & 1) <<
- IPIPEIF_DPCM_BITS_SHIFT;
- val |= (ipipeif->dpcm_predictor & 1) <<
- IPIPEIF_DPCM_PRED_SHIFT;
- }
- ipipeif_write(val, ipipeif_base_addr, IPIPEIF_DPCM);
-
- /* set DPC */
- ipipeif_config_dpc(ipipeif_base_addr, &params.if_5_1.dpc);
-
- ipipeif_write(params.if_5_1.clip,
- ipipeif_base_addr, IPIPEIF_OCLIP);
-
- /* fall through for SDRAM YUV mode */
- /* configure CFG2 */
- val = ipipeif_read(ipipeif_base_addr, IPIPEIF_CFG2);
- switch (isif_port_if) {
- case MEDIA_BUS_FMT_YUYV8_1X16:
- case MEDIA_BUS_FMT_UYVY8_2X8:
- case MEDIA_BUS_FMT_Y8_1X8:
- RESETBIT(val, IPIPEIF_CFG2_YUV8_SHIFT);
- SETBIT(val, IPIPEIF_CFG2_YUV16_SHIFT);
- ipipeif_write(val, ipipeif_base_addr, IPIPEIF_CFG2);
- break;
-
- default:
- RESETBIT(val, IPIPEIF_CFG2_YUV8_SHIFT);
- RESETBIT(val, IPIPEIF_CFG2_YUV16_SHIFT);
- ipipeif_write(val, ipipeif_base_addr, IPIPEIF_CFG2);
- break;
- }
-
- case IPIPEIF_SDRAM_YUV:
- /* Set clock divider */
- if (params.clock_select == IPIPEIF_SDRAM_CLK) {
- val = ipipeif_read(ipipeif_base_addr, IPIPEIF_CLKDIV);
- val |= (params.if_5_1.clk_div.m - 1) <<
- IPIPEIF_CLKDIV_M_SHIFT;
- val |= (params.if_5_1.clk_div.n - 1);
- ipipeif_write(val, ipipeif_base_addr, IPIPEIF_CLKDIV);
- }
- break;
-
- case IPIPEIF_CCDC:
- case IPIPEIF_CCDC_DARKFM:
- /* set DPC */
- ipipeif_config_dpc(ipipeif_base_addr, &params.if_5_1.dpc);
-
- /* Set DF gain & threshold control */
- val = 0;
- if (params.if_5_1.df_gain_en) {
- val = params.if_5_1.df_gain_thr &
- IPIPEIF_DF_GAIN_THR_MASK;
- ipipeif_write(val, ipipeif_base_addr, IPIPEIF_DFSGTH);
- val = (params.if_5_1.df_gain_en & 1) <<
- IPIPEIF_DF_GAIN_EN_SHIFT;
- val |= params.if_5_1.df_gain &
- IPIPEIF_DF_GAIN_MASK;
- }
- ipipeif_write(val, ipipeif_base_addr, IPIPEIF_DFSGVL);
- /* configure CFG2 */
- val = VPFE_PINPOL_POSITIVE << IPIPEIF_CFG2_HDPOL_SHIFT;
- val |= VPFE_PINPOL_POSITIVE << IPIPEIF_CFG2_VDPOL_SHIFT;
-
- switch (isif_port_if) {
- case MEDIA_BUS_FMT_YUYV8_1X16:
- case MEDIA_BUS_FMT_YUYV10_1X20:
- RESETBIT(val, IPIPEIF_CFG2_YUV8_SHIFT);
- SETBIT(val, IPIPEIF_CFG2_YUV16_SHIFT);
- break;
-
- case MEDIA_BUS_FMT_YUYV8_2X8:
- case MEDIA_BUS_FMT_UYVY8_2X8:
- case MEDIA_BUS_FMT_Y8_1X8:
- case MEDIA_BUS_FMT_YUYV10_2X10:
- SETBIT(val, IPIPEIF_CFG2_YUV8_SHIFT);
- SETBIT(val, IPIPEIF_CFG2_YUV16_SHIFT);
- val |= IPIPEIF_CBCR_Y << IPIPEIF_CFG2_YUV8P_SHIFT;
- break;
-
- default:
- /* Bayer */
- ipipeif_write(params.if_5_1.clip, ipipeif_base_addr,
- IPIPEIF_OCLIP);
- }
- ipipeif_write(val, ipipeif_base_addr, IPIPEIF_CFG2);
- break;
-
- default:
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int
-ipipeif_set_config(struct v4l2_subdev *sd, struct ipipeif_params *config)
-{
- struct vpfe_ipipeif_device *ipipeif = v4l2_get_subdevdata(sd);
- struct device *dev = ipipeif->subdev.v4l2_dev->dev;
-
- if (!config) {
- dev_err(dev, "Invalid configuration pointer\n");
- return -EINVAL;
- }
-
- ipipeif->config.clock_select = config->clock_select;
- ipipeif->config.ppln = config->ppln;
- ipipeif->config.lpfr = config->lpfr;
- ipipeif->config.rsz = config->rsz;
- ipipeif->config.decimation = config->decimation;
- if (ipipeif->config.decimation &&
- (ipipeif->config.rsz < IPIPEIF_RSZ_MIN ||
- ipipeif->config.rsz > IPIPEIF_RSZ_MAX)) {
- dev_err(dev, "rsz range is %d to %d\n",
- IPIPEIF_RSZ_MIN, IPIPEIF_RSZ_MAX);
- return -EINVAL;
- }
-
- ipipeif->config.avg_filter = config->avg_filter;
-
- ipipeif->config.if_5_1.df_gain_thr = config->if_5_1.df_gain_thr;
- ipipeif->config.if_5_1.df_gain = config->if_5_1.df_gain;
- ipipeif->config.if_5_1.df_gain_en = config->if_5_1.df_gain_en;
-
- ipipeif->config.if_5_1.rsz_start = config->if_5_1.rsz_start;
- ipipeif->config.if_5_1.align_sync = config->if_5_1.align_sync;
- ipipeif->config.if_5_1.clip = config->if_5_1.clip;
-
- ipipeif->config.if_5_1.dpc.en = config->if_5_1.dpc.en;
- ipipeif->config.if_5_1.dpc.thr = config->if_5_1.dpc.thr;
-
- ipipeif->config.if_5_1.clk_div.m = config->if_5_1.clk_div.m;
- ipipeif->config.if_5_1.clk_div.n = config->if_5_1.clk_div.n;
-
- return 0;
-}
-
-static int
-ipipeif_get_config(struct v4l2_subdev *sd, void __user *arg)
-{
- struct vpfe_ipipeif_device *ipipeif = v4l2_get_subdevdata(sd);
- struct ipipeif_params *config = arg;
- struct device *dev = ipipeif->subdev.v4l2_dev->dev;
-
- if (!arg) {
- dev_err(dev, "Invalid configuration pointer\n");
- return -EINVAL;
- }
-
- config->clock_select = ipipeif->config.clock_select;
- config->ppln = ipipeif->config.ppln;
- config->lpfr = ipipeif->config.lpfr;
- config->rsz = ipipeif->config.rsz;
- config->decimation = ipipeif->config.decimation;
- config->avg_filter = ipipeif->config.avg_filter;
-
- config->if_5_1.df_gain_thr = ipipeif->config.if_5_1.df_gain_thr;
- config->if_5_1.df_gain = ipipeif->config.if_5_1.df_gain;
- config->if_5_1.df_gain_en = ipipeif->config.if_5_1.df_gain_en;
-
- config->if_5_1.rsz_start = ipipeif->config.if_5_1.rsz_start;
- config->if_5_1.align_sync = ipipeif->config.if_5_1.align_sync;
- config->if_5_1.clip = ipipeif->config.if_5_1.clip;
-
- config->if_5_1.dpc.en = ipipeif->config.if_5_1.dpc.en;
- config->if_5_1.dpc.thr = ipipeif->config.if_5_1.dpc.thr;
-
- config->if_5_1.clk_div.m = ipipeif->config.if_5_1.clk_div.m;
- config->if_5_1.clk_div.n = ipipeif->config.if_5_1.clk_div.n;
-
- return 0;
-}
-
-/*
- * ipipeif_ioctl() - Handle ipipeif module private ioctl's
- * @sd: pointer to v4l2 subdev structure
- * @cmd: configuration command
- * @arg: configuration argument
- */
-static long ipipeif_ioctl(struct v4l2_subdev *sd,
- unsigned int cmd, void *arg)
-{
- struct ipipeif_params *config = arg;
- int ret = -ENOIOCTLCMD;
-
- switch (cmd) {
- case VIDIOC_VPFE_IPIPEIF_S_CONFIG:
- ret = ipipeif_set_config(sd, config);
- break;
-
- case VIDIOC_VPFE_IPIPEIF_G_CONFIG:
- ret = ipipeif_get_config(sd, arg);
- break;
- }
- return ret;
-}
-
-/*
- * ipipeif_s_ctrl() - Handle set control subdev method
- * @ctrl: pointer to v4l2 control structure
- */
-static int ipipeif_s_ctrl(struct v4l2_ctrl *ctrl)
-{
- struct vpfe_ipipeif_device *ipipeif =
- container_of(ctrl->handler, struct vpfe_ipipeif_device, ctrls);
-
- switch (ctrl->id) {
- case VPFE_CID_DPCM_PREDICTOR:
- ipipeif->dpcm_predictor = ctrl->val;
- break;
-
- case V4L2_CID_GAIN:
- ipipeif->gain = ctrl->val;
- break;
-
- default:
- return -EINVAL;
- }
-
- return 0;
-}
-
-#define ENABLE_IPIPEIF 0x1
-
-void vpfe_ipipeif_enable(struct vpfe_device *vpfe_dev)
-{
- struct vpfe_ipipeif_device *ipipeif = &vpfe_dev->vpfe_ipipeif;
- void *ipipeif_base_addr = ipipeif->ipipeif_base_addr;
- unsigned char val;
-
- if (ipipeif->input != IPIPEIF_INPUT_MEMORY)
- return;
-
- do {
- val = ipipeif_read(ipipeif_base_addr, IPIPEIF_ENABLE);
- } while (val & 0x1);
-
- ipipeif_write(ENABLE_IPIPEIF, ipipeif_base_addr, IPIPEIF_ENABLE);
-}
-
-/*
- * ipipeif_set_stream() - Enable/Disable streaming on ipipeif subdev
- * @sd: pointer to v4l2 subdev structure
- * @enable: 1 == Enable, 0 == Disable
- */
-static int ipipeif_set_stream(struct v4l2_subdev *sd, int enable)
-{
- struct vpfe_ipipeif_device *ipipeif = v4l2_get_subdevdata(sd);
- struct vpfe_device *vpfe_dev = to_vpfe_device(ipipeif);
- int ret = 0;
-
- if (!enable)
- return ret;
-
- ret = ipipeif_hw_setup(sd);
- if (!ret)
- vpfe_ipipeif_enable(vpfe_dev);
-
- return ret;
-}
-
-/*
- * ipipeif_enum_mbus_code() - Handle pixel format enumeration
- * @sd: pointer to v4l2 subdev structure
- * @cfg: V4L2 subdev pad config
- * @code: pointer to v4l2_subdev_mbus_code_enum structure
- * return -EINVAL or zero on success
- */
-static int ipipeif_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
- struct v4l2_subdev_mbus_code_enum *code)
-{
- switch (code->pad) {
- case IPIPEIF_PAD_SINK:
- if (code->index >= ARRAY_SIZE(ipipeif_input_fmts))
- return -EINVAL;
-
- code->code = ipipeif_input_fmts[code->index];
- break;
-
- case IPIPEIF_PAD_SOURCE:
- if (code->index >= ARRAY_SIZE(ipipeif_output_fmts))
- return -EINVAL;
-
- code->code = ipipeif_output_fmts[code->index];
- break;
-
- default:
- return -EINVAL;
- }
-
- return 0;
-}
-
-/*
- * ipipeif_get_format() - Handle get format by pads subdev method
- * @sd: pointer to v4l2 subdev structure
- * @cfg: V4L2 subdev pad config
- * @fmt: pointer to v4l2 subdev format structure
- */
-static int
-ipipeif_get_format(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config *cfg,
- struct v4l2_subdev_format *fmt)
-{
- struct vpfe_ipipeif_device *ipipeif = v4l2_get_subdevdata(sd);
-
- if (fmt->which == V4L2_SUBDEV_FORMAT_ACTIVE)
- fmt->format = ipipeif->formats[fmt->pad];
- else
- fmt->format = *(v4l2_subdev_get_try_format(sd, cfg, fmt->pad));
-
- return 0;
-}
-
-#define MIN_OUT_WIDTH 32
-#define MIN_OUT_HEIGHT 32
-
-/*
- * ipipeif_try_format() - Handle try format by pad subdev method
- * @ipipeif: VPFE ipipeif device.
- * @cfg: V4L2 subdev pad config
- * @pad: pad num.
- * @fmt: pointer to v4l2 format structure.
- * @which : wanted subdev format
- */
-static void
-ipipeif_try_format(struct vpfe_ipipeif_device *ipipeif,
- struct v4l2_subdev_pad_config *cfg, unsigned int pad,
- struct v4l2_mbus_framefmt *fmt,
- enum v4l2_subdev_format_whence which)
-{
- unsigned int max_out_height;
- unsigned int max_out_width;
- unsigned int i;
-
- max_out_width = IPIPE_MAX_OUTPUT_WIDTH_A;
- max_out_height = IPIPE_MAX_OUTPUT_HEIGHT_A;
-
- if (pad == IPIPEIF_PAD_SINK) {
- for (i = 0; i < ARRAY_SIZE(ipipeif_input_fmts); i++)
- if (fmt->code == ipipeif_input_fmts[i])
- break;
-
- /* If not found, use SBGGR10 as default */
- if (i >= ARRAY_SIZE(ipipeif_input_fmts))
- fmt->code = MEDIA_BUS_FMT_SGRBG12_1X12;
- } else if (pad == IPIPEIF_PAD_SOURCE) {
- for (i = 0; i < ARRAY_SIZE(ipipeif_output_fmts); i++)
- if (fmt->code == ipipeif_output_fmts[i])
- break;
-
- /* If not found, use UYVY as default */
- if (i >= ARRAY_SIZE(ipipeif_output_fmts))
- fmt->code = MEDIA_BUS_FMT_UYVY8_2X8;
- }
-
- fmt->width = clamp_t(u32, fmt->width, MIN_OUT_HEIGHT, max_out_width);
- fmt->height = clamp_t(u32, fmt->height, MIN_OUT_WIDTH, max_out_height);
-}
-
-static int
-ipipeif_enum_frame_size(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config *cfg,
- struct v4l2_subdev_frame_size_enum *fse)
-{
- struct vpfe_ipipeif_device *ipipeif = v4l2_get_subdevdata(sd);
- struct v4l2_mbus_framefmt format;
-
- if (fse->index != 0)
- return -EINVAL;
-
- format.code = fse->code;
- format.width = 1;
- format.height = 1;
- ipipeif_try_format(ipipeif, cfg, fse->pad, &format, fse->which);
- fse->min_width = format.width;
- fse->min_height = format.height;
-
- if (format.code != fse->code)
- return -EINVAL;
-
- format.code = fse->code;
- format.width = -1;
- format.height = -1;
- ipipeif_try_format(ipipeif, cfg, fse->pad, &format, fse->which);
- fse->max_width = format.width;
- fse->max_height = format.height;
-
- return 0;
-}
-
-/*
- * __ipipeif_get_format() - helper function for getting ipipeif format
- * @ipipeif: pointer to ipipeif private structure.
- * @cfg: V4L2 subdev pad config
- * @pad: pad number.
- * @which: wanted subdev format.
- *
- */
-static struct v4l2_mbus_framefmt *
-__ipipeif_get_format(struct vpfe_ipipeif_device *ipipeif,
- struct v4l2_subdev_pad_config *cfg, unsigned int pad,
- enum v4l2_subdev_format_whence which)
-{
- if (which == V4L2_SUBDEV_FORMAT_TRY)
- return v4l2_subdev_get_try_format(&ipipeif->subdev, cfg, pad);
-
- return &ipipeif->formats[pad];
-}
-
-/*
- * ipipeif_set_format() - Handle set format by pads subdev method
- * @sd: pointer to v4l2 subdev structure
- * @cfg: V4L2 subdev pad config
- * @fmt: pointer to v4l2 subdev format structure
- * return -EINVAL or zero on success
- */
-static int
-ipipeif_set_format(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config *cfg,
- struct v4l2_subdev_format *fmt)
-{
- struct vpfe_ipipeif_device *ipipeif = v4l2_get_subdevdata(sd);
- struct v4l2_mbus_framefmt *format;
-
- format = __ipipeif_get_format(ipipeif, cfg, fmt->pad, fmt->which);
- if (format == NULL)
- return -EINVAL;
-
- ipipeif_try_format(ipipeif, cfg, fmt->pad, &fmt->format, fmt->which);
- *format = fmt->format;
-
- if (fmt->which == V4L2_SUBDEV_FORMAT_TRY)
- return 0;
-
- if (fmt->pad == IPIPEIF_PAD_SINK &&
- ipipeif->input != IPIPEIF_INPUT_NONE)
- ipipeif->formats[fmt->pad] = fmt->format;
- else if (fmt->pad == IPIPEIF_PAD_SOURCE &&
- ipipeif->output != IPIPEIF_OUTPUT_NONE)
- ipipeif->formats[fmt->pad] = fmt->format;
- else
- return -EINVAL;
-
- return 0;
-}
-
-static void ipipeif_set_default_config(struct vpfe_ipipeif_device *ipipeif)
-{
-#define WIDTH_I 640
-#define HEIGHT_I 480
-
- const struct ipipeif_params ipipeif_defaults = {
- .clock_select = IPIPEIF_SDRAM_CLK,
- .ppln = WIDTH_I + 8,
- .lpfr = HEIGHT_I + 10,
- .rsz = 16, /* resize ratio 16/rsz */
- .decimation = IPIPEIF_DECIMATION_OFF,
- .avg_filter = IPIPEIF_AVG_OFF,
- .if_5_1 = {
- .clk_div = {
- .m = 1, /* clock = sdram clock * (m/n) */
- .n = 6
- },
- .clip = 4095,
- },
- };
- memcpy(&ipipeif->config, &ipipeif_defaults,
- sizeof(struct ipipeif_params));
-}
-
-/*
- * ipipeif_init_formats() - Initialize formats on all pads
- * @sd: VPFE ipipeif V4L2 subdevice
- * @fh: V4L2 subdev file handle
- *
- * Initialize all pad formats with default values. Try formats are initialized
- * on the file handle.
- */
-static int
-ipipeif_init_formats(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
-{
- struct vpfe_ipipeif_device *ipipeif = v4l2_get_subdevdata(sd);
- struct v4l2_subdev_format format;
-
- memset(&format, 0, sizeof(format));
- format.pad = IPIPEIF_PAD_SINK;
- format.which = V4L2_SUBDEV_FORMAT_TRY;
- format.format.code = MEDIA_BUS_FMT_SGRBG12_1X12;
- format.format.width = IPIPE_MAX_OUTPUT_WIDTH_A;
- format.format.height = IPIPE_MAX_OUTPUT_HEIGHT_A;
- ipipeif_set_format(sd, fh->pad, &format);
-
- memset(&format, 0, sizeof(format));
- format.pad = IPIPEIF_PAD_SOURCE;
- format.which = V4L2_SUBDEV_FORMAT_TRY;
- format.format.code = MEDIA_BUS_FMT_UYVY8_2X8;
- format.format.width = IPIPE_MAX_OUTPUT_WIDTH_A;
- format.format.height = IPIPE_MAX_OUTPUT_HEIGHT_A;
- ipipeif_set_format(sd, fh->pad, &format);
-
- ipipeif_set_default_config(ipipeif);
-
- return 0;
-}
-
-/*
- * ipipeif_video_in_queue() - ipipeif video in queue
- * @vpfe_dev: vpfe device pointer
- * @addr: buffer address
- */
-static int
-ipipeif_video_in_queue(struct vpfe_device *vpfe_dev, unsigned long addr)
-{
- struct vpfe_ipipeif_device *ipipeif = &vpfe_dev->vpfe_ipipeif;
- void *ipipeif_base_addr = ipipeif->ipipeif_base_addr;
- unsigned int adofs;
- u32 val;
-
- if (ipipeif->input != IPIPEIF_INPUT_MEMORY)
- return -EINVAL;
-
- switch (ipipeif->formats[IPIPEIF_PAD_SINK].code) {
- case MEDIA_BUS_FMT_Y8_1X8:
- case MEDIA_BUS_FMT_UV8_1X8:
- case MEDIA_BUS_FMT_YDYUYDYV8_1X16:
- adofs = ipipeif->formats[IPIPEIF_PAD_SINK].width;
- break;
-
- default:
- adofs = ipipeif->formats[IPIPEIF_PAD_SINK].width << 1;
- break;
- }
-
- /* adjust the line len to be a multiple of 32 */
- adofs += 31;
- adofs &= ~0x1f;
- val = (adofs >> 5) & IPIPEIF_ADOFS_LSB_MASK;
- ipipeif_write(val, ipipeif_base_addr, IPIPEIF_ADOFS);
-
- /* lower sixteen bit */
- val = (addr >> IPIPEIF_ADDRL_SHIFT) & IPIPEIF_ADDRL_MASK;
- ipipeif_write(val, ipipeif_base_addr, IPIPEIF_ADDRL);
-
- /* upper next seven bit */
- val = (addr >> IPIPEIF_ADDRU_SHIFT) & IPIPEIF_ADDRU_MASK;
- ipipeif_write(val, ipipeif_base_addr, IPIPEIF_ADDRU);
-
- return 0;
-}
-
-/* subdev core operations */
-static const struct v4l2_subdev_core_ops ipipeif_v4l2_core_ops = {
- .ioctl = ipipeif_ioctl,
-};
-
-static const struct v4l2_ctrl_ops ipipeif_ctrl_ops = {
- .s_ctrl = ipipeif_s_ctrl,
-};
-
-static const struct v4l2_ctrl_config vpfe_ipipeif_dpcm_pred = {
- .ops = &ipipeif_ctrl_ops,
- .id = VPFE_CID_DPCM_PREDICTOR,
- .name = "DPCM Predictor",
- .type = V4L2_CTRL_TYPE_INTEGER,
- .min = 0,
- .max = 1,
- .step = 1,
- .def = 0,
-};
-
-/* subdev file operations */
-static const struct v4l2_subdev_internal_ops ipipeif_v4l2_internal_ops = {
- .open = ipipeif_init_formats,
-};
-
-/* subdev video operations */
-static const struct v4l2_subdev_video_ops ipipeif_v4l2_video_ops = {
- .s_stream = ipipeif_set_stream,
-};
-
-/* subdev pad operations */
-static const struct v4l2_subdev_pad_ops ipipeif_v4l2_pad_ops = {
- .enum_mbus_code = ipipeif_enum_mbus_code,
- .enum_frame_size = ipipeif_enum_frame_size,
- .get_fmt = ipipeif_get_format,
- .set_fmt = ipipeif_set_format,
-};
-
-/* subdev operations */
-static const struct v4l2_subdev_ops ipipeif_v4l2_ops = {
- .core = &ipipeif_v4l2_core_ops,
- .video = &ipipeif_v4l2_video_ops,
- .pad = &ipipeif_v4l2_pad_ops,
-};
-
-static const struct vpfe_video_operations video_in_ops = {
- .queue = ipipeif_video_in_queue,
-};
-
-static int
-ipipeif_link_setup(struct media_entity *entity, const struct media_pad *local,
- const struct media_pad *remote, u32 flags)
-{
- struct v4l2_subdev *sd = media_entity_to_v4l2_subdev(entity);
- struct vpfe_ipipeif_device *ipipeif = v4l2_get_subdevdata(sd);
- struct vpfe_device *vpfe = to_vpfe_device(ipipeif);
-
- switch (local->index | media_entity_type(remote->entity)) {
- case IPIPEIF_PAD_SINK | MEDIA_ENT_T_DEVNODE:
- /* Single shot mode */
- if (!(flags & MEDIA_LNK_FL_ENABLED)) {
- ipipeif->input = IPIPEIF_INPUT_NONE;
- break;
- }
- ipipeif->input = IPIPEIF_INPUT_MEMORY;
- break;
-
- case IPIPEIF_PAD_SINK | MEDIA_ENT_T_V4L2_SUBDEV:
- /* read from isif */
- if (!(flags & MEDIA_LNK_FL_ENABLED)) {
- ipipeif->input = IPIPEIF_INPUT_NONE;
- break;
- }
- if (ipipeif->input != IPIPEIF_INPUT_NONE)
- return -EBUSY;
-
- ipipeif->input = IPIPEIF_INPUT_ISIF;
- break;
-
- case IPIPEIF_PAD_SOURCE | MEDIA_ENT_T_V4L2_SUBDEV:
- if (!(flags & MEDIA_LNK_FL_ENABLED)) {
- ipipeif->output = IPIPEIF_OUTPUT_NONE;
- break;
- }
- if (remote->entity == &vpfe->vpfe_ipipe.subdev.entity)
- /* connencted to ipipe */
- ipipeif->output = IPIPEIF_OUTPUT_IPIPE;
- else if (remote->entity == &vpfe->vpfe_resizer.
- crop_resizer.subdev.entity)
- /* connected to resizer */
- ipipeif->output = IPIPEIF_OUTPUT_RESIZER;
- else
- return -EINVAL;
- break;
-
- default:
- return -EINVAL;
- }
-
- return 0;
-}
-
-static const struct media_entity_operations ipipeif_media_ops = {
- .link_setup = ipipeif_link_setup,
-};
-
-/*
- * vpfe_ipipeif_unregister_entities() - Unregister entity
- * @ipipeif - pointer to ipipeif subdevice structure.
- */
-void vpfe_ipipeif_unregister_entities(struct vpfe_ipipeif_device *ipipeif)
-{
- /* unregister video device */
- vpfe_video_unregister(&ipipeif->video_in);
-
- /* unregister subdev */
- v4l2_device_unregister_subdev(&ipipeif->subdev);
- /* cleanup entity */
- media_entity_cleanup(&ipipeif->subdev.entity);
-}
-
-int
-vpfe_ipipeif_register_entities(struct vpfe_ipipeif_device *ipipeif,
- struct v4l2_device *vdev)
-{
- struct vpfe_device *vpfe_dev = to_vpfe_device(ipipeif);
- unsigned int flags;
- int ret;
-
- /* Register the subdev */
- ret = v4l2_device_register_subdev(vdev, &ipipeif->subdev);
- if (ret < 0)
- return ret;
-
- ret = vpfe_video_register(&ipipeif->video_in, vdev);
- if (ret) {
- pr_err("Failed to register ipipeif video-in device\n");
- goto fail;
- }
- ipipeif->video_in.vpfe_dev = vpfe_dev;
-
- flags = 0;
- ret = media_entity_create_link(&ipipeif->video_in.video_dev.entity, 0,
- &ipipeif->subdev.entity, 0, flags);
- if (ret < 0)
- goto fail;
-
- return 0;
-fail:
- v4l2_device_unregister_subdev(&ipipeif->subdev);
-
- return ret;
-}
-
-#define IPIPEIF_GAIN_HIGH 0x3ff
-#define IPIPEIF_DEFAULT_GAIN 0x200
-
-int vpfe_ipipeif_init(struct vpfe_ipipeif_device *ipipeif,
- struct platform_device *pdev)
-{
- struct v4l2_subdev *sd = &ipipeif->subdev;
- struct media_pad *pads = &ipipeif->pads[0];
- struct media_entity *me = &sd->entity;
- static resource_size_t res_len;
- struct resource *res;
- int ret;
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 3);
- if (!res)
- return -ENOENT;
-
- res_len = resource_size(res);
- res = request_mem_region(res->start, res_len, res->name);
- if (!res)
- return -EBUSY;
-
- ipipeif->ipipeif_base_addr = ioremap_nocache(res->start, res_len);
- if (!ipipeif->ipipeif_base_addr) {
- ret = -EBUSY;
- goto fail;
- }
-
- v4l2_subdev_init(sd, &ipipeif_v4l2_ops);
-
- sd->internal_ops = &ipipeif_v4l2_internal_ops;
- strlcpy(sd->name, "DAVINCI IPIPEIF", sizeof(sd->name));
- sd->grp_id = 1 << 16; /* group ID for davinci subdevs */
-
- v4l2_set_subdevdata(sd, ipipeif);
-
- sd->flags |= V4L2_SUBDEV_FL_HAS_EVENTS | V4L2_SUBDEV_FL_HAS_DEVNODE;
- pads[IPIPEIF_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
- pads[IPIPEIF_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE;
- ipipeif->input = IPIPEIF_INPUT_NONE;
- ipipeif->output = IPIPEIF_OUTPUT_NONE;
- me->ops = &ipipeif_media_ops;
-
- ret = media_entity_init(me, IPIPEIF_NUM_PADS, pads, 0);
- if (ret)
- goto fail;
-
- v4l2_ctrl_handler_init(&ipipeif->ctrls, 2);
- v4l2_ctrl_new_std(&ipipeif->ctrls, &ipipeif_ctrl_ops,
- V4L2_CID_GAIN, 0,
- IPIPEIF_GAIN_HIGH, 1, IPIPEIF_DEFAULT_GAIN);
- v4l2_ctrl_new_custom(&ipipeif->ctrls, &vpfe_ipipeif_dpcm_pred, NULL);
- v4l2_ctrl_handler_setup(&ipipeif->ctrls);
- sd->ctrl_handler = &ipipeif->ctrls;
-
- ipipeif->video_in.ops = &video_in_ops;
- ipipeif->video_in.type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
- ret = vpfe_video_init(&ipipeif->video_in, "IPIPEIF");
- if (ret) {
- pr_err("Failed to init IPIPEIF video-in device\n");
- goto fail;
- }
- ipipeif_set_default_config(ipipeif);
- return 0;
-fail:
- release_mem_region(res->start, res_len);
- return ret;
-}
-
-void
-vpfe_ipipeif_cleanup(struct vpfe_ipipeif_device *ipipeif,
- struct platform_device *pdev)
-{
- struct resource *res;
-
- v4l2_ctrl_handler_free(&ipipeif->ctrls);
- iounmap(ipipeif->ipipeif_base_addr);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 3);
- if (res)
- release_mem_region(res->start, resource_size(res));
-
-}
diff --git a/drivers/staging/media/davinci_vpfe/dm365_ipipeif.h b/drivers/staging/media/davinci_vpfe/dm365_ipipeif.h
deleted file mode 100644
index cea3d61335af..000000000000
--- a/drivers/staging/media/davinci_vpfe/dm365_ipipeif.h
+++ /dev/null
@@ -1,233 +0,0 @@
-/*
- * Copyright (C) 2012 Texas Instruments Inc
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- * Contributors:
- * Manjunath Hadli <manjunath.hadli@ti.com>
- * Prabhakar Lad <prabhakar.lad@ti.com>
- */
-
-#ifndef _DAVINCI_VPFE_DM365_IPIPEIF_H
-#define _DAVINCI_VPFE_DM365_IPIPEIF_H
-
-#include <linux/platform_device.h>
-
-#include <media/davinci/vpss.h>
-#include <media/v4l2-ctrls.h>
-#include <media/v4l2-subdev.h>
-
-#include "dm365_ipipeif_user.h"
-#include "vpfe_video.h"
-
-/* IPIPE base specific types */
-enum ipipeif_data_shift {
- IPIPEIF_BITS15_2 = 0,
- IPIPEIF_BITS14_1 = 1,
- IPIPEIF_BITS13_0 = 2,
- IPIPEIF_BITS12_0 = 3,
- IPIPEIF_BITS11_0 = 4,
- IPIPEIF_BITS10_0 = 5,
- IPIPEIF_BITS9_0 = 6,
-};
-
-enum ipipeif_clkdiv {
- IPIPEIF_DIVIDE_HALF = 0,
- IPIPEIF_DIVIDE_THIRD = 1,
- IPIPEIF_DIVIDE_FOURTH = 2,
- IPIPEIF_DIVIDE_FIFTH = 3,
- IPIPEIF_DIVIDE_SIXTH = 4,
- IPIPEIF_DIVIDE_EIGHTH = 5,
- IPIPEIF_DIVIDE_SIXTEENTH = 6,
- IPIPEIF_DIVIDE_THIRTY = 7,
-};
-
-enum ipipeif_pack_mode {
- IPIPEIF_PACK_16_BIT = 0,
- IPIPEIF_PACK_8_BIT = 1,
-};
-
-enum ipipeif_5_1_pack_mode {
- IPIPEIF_5_1_PACK_16_BIT = 0,
- IPIPEIF_5_1_PACK_8_BIT = 1,
- IPIPEIF_5_1_PACK_8_BIT_A_LAW = 2,
- IPIPEIF_5_1_PACK_12_BIT = 3
-};
-
-enum ipipeif_input_source {
- IPIPEIF_CCDC = 0,
- IPIPEIF_SDRAM_RAW = 1,
- IPIPEIF_CCDC_DARKFM = 2,
- IPIPEIF_SDRAM_YUV = 3,
-};
-
-enum ipipeif_ialaw {
- IPIPEIF_ALAW_OFF = 0,
- IPIPEIF_ALAW_ON = 1,
-};
-
-enum ipipeif_input_src1 {
- IPIPEIF_SRC1_PARALLEL_PORT = 0,
- IPIPEIF_SRC1_SDRAM_RAW = 1,
- IPIPEIF_SRC1_ISIF_DARKFM = 2,
- IPIPEIF_SRC1_SDRAM_YUV = 3,
-};
-
-enum ipipeif_dfs_dir {
- IPIPEIF_PORT_MINUS_SDRAM = 0,
- IPIPEIF_SDRAM_MINUS_PORT = 1,
-};
-
-enum ipipeif_chroma_phase {
- IPIPEIF_CBCR_Y = 0,
- IPIPEIF_Y_CBCR = 1,
-};
-
-enum ipipeif_dpcm_type {
- IPIPEIF_DPCM_8BIT_10BIT = 0,
- IPIPEIF_DPCM_8BIT_12BIT = 1,
-};
-
-/* data shift for IPIPE 5.1 */
-enum ipipeif_5_1_data_shift {
- IPIPEIF_5_1_BITS11_0 = 0,
- IPIPEIF_5_1_BITS10_0 = 1,
- IPIPEIF_5_1_BITS9_0 = 2,
- IPIPEIF_5_1_BITS8_0 = 3,
- IPIPEIF_5_1_BITS7_0 = 4,
- IPIPEIF_5_1_BITS15_4 = 5,
-};
-
-#define IPIPEIF_PAD_SINK 0
-#define IPIPEIF_PAD_SOURCE 1
-
-#define IPIPEIF_NUM_PADS 2
-
-enum ipipeif_input_entity {
- IPIPEIF_INPUT_NONE = 0,
- IPIPEIF_INPUT_ISIF = 1,
- IPIPEIF_INPUT_MEMORY = 2,
-};
-
-enum ipipeif_output_entity {
- IPIPEIF_OUTPUT_NONE = 0,
- IPIPEIF_OUTPUT_IPIPE = 1,
- IPIPEIF_OUTPUT_RESIZER = 2,
-};
-
-struct vpfe_ipipeif_device {
- struct v4l2_subdev subdev;
- struct media_pad pads[IPIPEIF_NUM_PADS];
- struct v4l2_mbus_framefmt formats[IPIPEIF_NUM_PADS];
- enum ipipeif_input_entity input;
- unsigned int output;
- struct vpfe_video_device video_in;
- struct v4l2_ctrl_handler ctrls;
- void __iomem *ipipeif_base_addr;
- struct ipipeif_params config;
- int dpcm_predictor;
- int gain;
-};
-
-/* IPIPEIF Register Offsets from the base address */
-#define IPIPEIF_ENABLE 0x00
-#define IPIPEIF_CFG1 0x04
-#define IPIPEIF_PPLN 0x08
-#define IPIPEIF_LPFR 0x0c
-#define IPIPEIF_HNUM 0x10
-#define IPIPEIF_VNUM 0x14
-#define IPIPEIF_ADDRU 0x18
-#define IPIPEIF_ADDRL 0x1c
-#define IPIPEIF_ADOFS 0x20
-#define IPIPEIF_RSZ 0x24
-#define IPIPEIF_GAIN 0x28
-
-/* Below registers are available only on IPIPE 5.1 */
-#define IPIPEIF_DPCM 0x2c
-#define IPIPEIF_CFG2 0x30
-#define IPIPEIF_INIRSZ 0x34
-#define IPIPEIF_OCLIP 0x38
-#define IPIPEIF_DTUDF 0x3c
-#define IPIPEIF_CLKDIV 0x40
-#define IPIPEIF_DPC1 0x44
-#define IPIPEIF_DPC2 0x48
-#define IPIPEIF_DFSGVL 0x4c
-#define IPIPEIF_DFSGTH 0x50
-#define IPIPEIF_RSZ3A 0x54
-#define IPIPEIF_INIRSZ3A 0x58
-#define IPIPEIF_RSZ_MIN 16
-#define IPIPEIF_RSZ_MAX 112
-#define IPIPEIF_RSZ_CONST 16
-#define SETBIT(reg, bit) (reg = ((reg) | ((0x00000001)<<(bit))))
-#define RESETBIT(reg, bit) (reg = ((reg) & (~(0x00000001<<(bit)))))
-
-#define IPIPEIF_ADOFS_LSB_MASK 0x1ff
-#define IPIPEIF_ADOFS_LSB_SHIFT 5
-#define IPIPEIF_ADOFS_MSB_MASK 0x200
-#define IPIPEIF_ADDRU_MASK 0x7ff
-#define IPIPEIF_ADDRL_SHIFT 5
-#define IPIPEIF_ADDRL_MASK 0xffff
-#define IPIPEIF_ADDRU_SHIFT 21
-#define IPIPEIF_ADDRMSB_SHIFT 31
-#define IPIPEIF_ADDRMSB_LEFT_SHIFT 10
-
-/* CFG1 Masks and shifts */
-#define ONESHOT_SHIFT 0
-#define DECIM_SHIFT 1
-#define INPSRC_SHIFT 2
-#define CLKDIV_SHIFT 4
-#define AVGFILT_SHIFT 7
-#define PACK8IN_SHIFT 8
-#define IALAW_SHIFT 9
-#define CLKSEL_SHIFT 10
-#define DATASFT_SHIFT 11
-#define INPSRC1_SHIFT 14
-
-/* DPC2 */
-#define IPIPEIF_DPC2_EN_SHIFT 12
-#define IPIPEIF_DPC2_THR_MASK 0xfff
-/* Applicable for IPIPE 5.1 */
-#define IPIPEIF_DF_GAIN_EN_SHIFT 10
-#define IPIPEIF_DF_GAIN_MASK 0x3ff
-#define IPIPEIF_DF_GAIN_THR_MASK 0xfff
-/* DPCM */
-#define IPIPEIF_DPCM_BITS_SHIFT 2
-#define IPIPEIF_DPCM_PRED_SHIFT 1
-/* CFG2 */
-#define IPIPEIF_CFG2_HDPOL_SHIFT 1
-#define IPIPEIF_CFG2_VDPOL_SHIFT 2
-#define IPIPEIF_CFG2_YUV8_SHIFT 6
-#define IPIPEIF_CFG2_YUV16_SHIFT 3
-#define IPIPEIF_CFG2_YUV8P_SHIFT 7
-
-/* INIRSZ */
-#define IPIPEIF_INIRSZ_ALNSYNC_SHIFT 13
-#define IPIPEIF_INIRSZ_MASK 0x1fff
-
-/* CLKDIV */
-#define IPIPEIF_CLKDIV_M_SHIFT 8
-
-void vpfe_ipipeif_enable(struct vpfe_device *vpfe_dev);
-void vpfe_ipipeif_ss_buffer_isr(struct vpfe_ipipeif_device *ipipeif);
-int vpfe_ipipeif_decimation_enabled(struct vpfe_device *vpfe_dev);
-int vpfe_ipipeif_get_rsz(struct vpfe_device *vpfe_dev);
-void vpfe_ipipeif_cleanup(struct vpfe_ipipeif_device *ipipeif,
- struct platform_device *pdev);
-int vpfe_ipipeif_init(struct vpfe_ipipeif_device *ipipeif,
- struct platform_device *pdev);
-int vpfe_ipipeif_register_entities(struct vpfe_ipipeif_device *ipipeif,
- struct v4l2_device *vdev);
-void vpfe_ipipeif_unregister_entities(struct vpfe_ipipeif_device *ipipeif);
-
-#endif /* _DAVINCI_VPFE_DM365_IPIPEIF_H */
diff --git a/drivers/staging/media/davinci_vpfe/dm365_ipipeif_user.h b/drivers/staging/media/davinci_vpfe/dm365_ipipeif_user.h
deleted file mode 100644
index e2a69b59554a..000000000000
--- a/drivers/staging/media/davinci_vpfe/dm365_ipipeif_user.h
+++ /dev/null
@@ -1,93 +0,0 @@
-/*
- * Copyright (C) 2012 Texas Instruments Inc
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- * Contributors:
- * Manjunath Hadli <manjunath.hadli@ti.com>
- * Prabhakar Lad <prabhakar.lad@ti.com>
- */
-
-#ifndef _DAVINCI_VPFE_DM365_IPIPEIF_USER_H
-#define _DAVINCI_VPFE_DM365_IPIPEIF_USER_H
-
-/* clockdiv for IPIPE 5.1 */
-struct ipipeif_5_1_clkdiv {
- unsigned char m;
- unsigned char n;
-};
-
-enum ipipeif_decimation {
- IPIPEIF_DECIMATION_OFF,
- IPIPEIF_DECIMATION_ON
-};
-
-/* DPC at the if for IPIPE 5.1 */
-struct ipipeif_dpc {
- /* 0 - disable, 1 - enable */
- unsigned char en;
- /* threshold */
- unsigned short thr;
-};
-
-enum ipipeif_clock {
- IPIPEIF_PIXCEL_CLK,
- IPIPEIF_SDRAM_CLK
-};
-
-enum ipipeif_avg_filter {
- IPIPEIF_AVG_OFF,
- IPIPEIF_AVG_ON
-};
-
-struct ipipeif_5_1 {
- struct ipipeif_5_1_clkdiv clk_div;
- /* Defect pixel correction */
- struct ipipeif_dpc dpc;
- /* clipped to this value */
- unsigned short clip;
- /* Align HSync and VSync to rsz_start */
- unsigned char align_sync;
- /* resizer start position */
- unsigned int rsz_start;
- /* DF gain enable */
- unsigned char df_gain_en;
- /* DF gain value */
- unsigned short df_gain;
- /* DF gain threshold value */
- unsigned short df_gain_thr;
-};
-
-struct ipipeif_params {
- enum ipipeif_clock clock_select;
- unsigned int ppln;
- unsigned int lpfr;
- unsigned char rsz;
- enum ipipeif_decimation decimation;
- enum ipipeif_avg_filter avg_filter;
- /* IPIPE 5.1 */
- struct ipipeif_5_1 if_5_1;
-};
-
-/*
- * Private IOCTL
- * VIDIOC_VPFE_IPIPEIF_S_CONFIG: Set IPIEIF configuration
- * VIDIOC_VPFE_IPIPEIF_G_CONFIG: Get IPIEIF configuration
- */
-#define VIDIOC_VPFE_IPIPEIF_S_CONFIG \
- _IOWR('I', BASE_VIDIOC_PRIVATE + 1, struct ipipeif_params)
-#define VIDIOC_VPFE_IPIPEIF_G_CONFIG \
- _IOWR('I', BASE_VIDIOC_PRIVATE + 2, struct ipipeif_params)
-
-#endif /* _DAVINCI_VPFE_DM365_IPIPEIF_USER_H */
diff --git a/drivers/staging/media/davinci_vpfe/dm365_isif.c b/drivers/staging/media/davinci_vpfe/dm365_isif.c
deleted file mode 100644
index 80907b464412..000000000000
--- a/drivers/staging/media/davinci_vpfe/dm365_isif.c
+++ /dev/null
@@ -1,2106 +0,0 @@
-/*
- * Copyright (C) 2012 Texas Instruments Inc
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- * Contributors:
- * Manjunath Hadli <manjunath.hadli@ti.com>
- * Prabhakar Lad <prabhakar.lad@ti.com>
- */
-
-#include <linux/delay.h>
-#include "dm365_isif.h"
-#include "vpfe_mc_capture.h"
-
-#define MAX_WIDTH 4096
-#define MAX_HEIGHT 4096
-
-static const unsigned int isif_fmts[] = {
- MEDIA_BUS_FMT_YUYV8_2X8,
- MEDIA_BUS_FMT_UYVY8_2X8,
- MEDIA_BUS_FMT_YUYV8_1X16,
- MEDIA_BUS_FMT_YUYV10_1X20,
- MEDIA_BUS_FMT_SGRBG12_1X12,
- MEDIA_BUS_FMT_SGRBG10_ALAW8_1X8,
- MEDIA_BUS_FMT_SGRBG10_DPCM8_1X8,
-};
-
-#define ISIF_COLPTN_R_Ye 0x0
-#define ISIF_COLPTN_Gr_Cy 0x1
-#define ISIF_COLPTN_Gb_G 0x2
-#define ISIF_COLPTN_B_Mg 0x3
-
-#define ISIF_CCOLP_CP01_0 0
-#define ISIF_CCOLP_CP03_2 2
-#define ISIF_CCOLP_CP05_4 4
-#define ISIF_CCOLP_CP07_6 6
-#define ISIF_CCOLP_CP11_0 8
-#define ISIF_CCOLP_CP13_2 10
-#define ISIF_CCOLP_CP15_4 12
-#define ISIF_CCOLP_CP17_6 14
-
-static const u32 isif_sgrbg_pattern =
- ISIF_COLPTN_Gr_Cy << ISIF_CCOLP_CP01_0 |
- ISIF_COLPTN_R_Ye << ISIF_CCOLP_CP03_2 |
- ISIF_COLPTN_B_Mg << ISIF_CCOLP_CP05_4 |
- ISIF_COLPTN_Gb_G << ISIF_CCOLP_CP07_6 |
- ISIF_COLPTN_Gr_Cy << ISIF_CCOLP_CP11_0 |
- ISIF_COLPTN_R_Ye << ISIF_CCOLP_CP13_2 |
- ISIF_COLPTN_B_Mg << ISIF_CCOLP_CP15_4 |
- ISIF_COLPTN_Gb_G << ISIF_CCOLP_CP17_6;
-
-static const u32 isif_srggb_pattern =
- ISIF_COLPTN_R_Ye << ISIF_CCOLP_CP01_0 |
- ISIF_COLPTN_Gr_Cy << ISIF_CCOLP_CP03_2 |
- ISIF_COLPTN_Gb_G << ISIF_CCOLP_CP05_4 |
- ISIF_COLPTN_B_Mg << ISIF_CCOLP_CP07_6 |
- ISIF_COLPTN_R_Ye << ISIF_CCOLP_CP11_0 |
- ISIF_COLPTN_Gr_Cy << ISIF_CCOLP_CP13_2 |
- ISIF_COLPTN_Gb_G << ISIF_CCOLP_CP15_4 |
- ISIF_COLPTN_B_Mg << ISIF_CCOLP_CP17_6;
-
-static inline u32 isif_read(void __iomem *base_addr, u32 offset)
-{
- return readl(base_addr + offset);
-}
-
-static inline void isif_write(void __iomem *base_addr, u32 val, u32 offset)
-{
- writel(val, base_addr + offset);
-}
-
-static inline u32 isif_merge(void __iomem *base_addr, u32 mask, u32 val,
- u32 offset)
-{
- u32 new_val = (isif_read(base_addr, offset) & ~mask) | (val & mask);
-
- isif_write(base_addr, new_val, offset);
-
- return new_val;
-}
-
-static void isif_enable_output_to_sdram(struct vpfe_isif_device *isif, int en)
-{
- isif_merge(isif->isif_cfg.base_addr, ISIF_SYNCEN_WEN_MASK,
- en << ISIF_SYNCEN_WEN_SHIFT, SYNCEN);
-}
-
-static inline void
-isif_regw_lin_tbl(struct vpfe_isif_device *isif, u32 val, u32 offset, int i)
-{
- if (!i)
- writel(val, isif->isif_cfg.linear_tbl0_addr + offset);
- else
- writel(val, isif->isif_cfg.linear_tbl1_addr + offset);
-}
-
-static void isif_disable_all_modules(struct vpfe_isif_device *isif)
-{
- /* disable BC */
- isif_write(isif->isif_cfg.base_addr, 0, CLAMPCFG);
- /* disable vdfc */
- isif_write(isif->isif_cfg.base_addr, 0, DFCCTL);
- /* disable CSC */
- isif_write(isif->isif_cfg.base_addr, 0, CSCCTL);
- /* disable linearization */
- isif_write(isif->isif_cfg.base_addr, 0, LINCFG0);
-}
-
-static void isif_enable(struct vpfe_isif_device *isif, int en)
-{
- if (!en)
- /* Before disable isif, disable all ISIF modules */
- isif_disable_all_modules(isif);
-
- /*
- * wait for next VD. Assume lowest scan rate is 12 Hz. So
- * 100 msec delay is good enough
- */
- msleep(100);
- isif_merge(isif->isif_cfg.base_addr, ISIF_SYNCEN_VDHDEN_MASK,
- en, SYNCEN);
-}
-
-/*
- * ISIF helper functions
- */
-
-#define DM365_ISIF_MDFS_OFFSET 15
-#define DM365_ISIF_MDFS_MASK 0x1
-
-/* get field id in isif hardware */
-enum v4l2_field vpfe_isif_get_fid(struct vpfe_device *vpfe_dev)
-{
- struct vpfe_isif_device *isif = &vpfe_dev->vpfe_isif;
- u32 field_status;
-
- field_status = isif_read(isif->isif_cfg.base_addr, MODESET);
- field_status = (field_status >> DM365_ISIF_MDFS_OFFSET) &
- DM365_ISIF_MDFS_MASK;
- return field_status;
-}
-
-static int
-isif_set_pixel_format(struct vpfe_isif_device *isif, unsigned int pixfmt)
-{
- if (isif->formats[ISIF_PAD_SINK].code == MEDIA_BUS_FMT_SGRBG12_1X12) {
- if (pixfmt == V4L2_PIX_FMT_SBGGR16)
- isif->isif_cfg.data_pack = ISIF_PACK_16BIT;
- else if ((pixfmt == V4L2_PIX_FMT_SGRBG10DPCM8) ||
- (pixfmt == V4L2_PIX_FMT_SGRBG10ALAW8))
- isif->isif_cfg.data_pack = ISIF_PACK_8BIT;
- else
- return -EINVAL;
-
- isif->isif_cfg.bayer.pix_fmt = ISIF_PIXFMT_RAW;
- isif->isif_cfg.bayer.v4l2_pix_fmt = pixfmt;
- } else {
- if (pixfmt == V4L2_PIX_FMT_YUYV)
- isif->isif_cfg.ycbcr.pix_order = ISIF_PIXORDER_YCBYCR;
- else if (pixfmt == V4L2_PIX_FMT_UYVY)
- isif->isif_cfg.ycbcr.pix_order = ISIF_PIXORDER_CBYCRY;
- else
- return -EINVAL;
-
- isif->isif_cfg.data_pack = ISIF_PACK_8BIT;
- isif->isif_cfg.ycbcr.v4l2_pix_fmt = pixfmt;
- }
-
- return 0;
-}
-
-static int
-isif_set_frame_format(struct vpfe_isif_device *isif,
- enum isif_frmfmt frm_fmt)
-{
- if (isif->formats[ISIF_PAD_SINK].code == MEDIA_BUS_FMT_SGRBG12_1X12)
- isif->isif_cfg.bayer.frm_fmt = frm_fmt;
- else
- isif->isif_cfg.ycbcr.frm_fmt = frm_fmt;
-
- return 0;
-}
-
-static int isif_set_image_window(struct vpfe_isif_device *isif)
-{
- struct v4l2_rect *win = &isif->crop;
-
- if (isif->formats[ISIF_PAD_SINK].code == MEDIA_BUS_FMT_SGRBG12_1X12) {
- isif->isif_cfg.bayer.win.top = win->top;
- isif->isif_cfg.bayer.win.left = win->left;
- isif->isif_cfg.bayer.win.width = win->width;
- isif->isif_cfg.bayer.win.height = win->height;
- return 0;
- }
- isif->isif_cfg.ycbcr.win.top = win->top;
- isif->isif_cfg.ycbcr.win.left = win->left;
- isif->isif_cfg.ycbcr.win.width = win->width;
- isif->isif_cfg.ycbcr.win.height = win->height;
-
- return 0;
-}
-
-static int
-isif_set_buftype(struct vpfe_isif_device *isif, enum isif_buftype buf_type)
-{
- if (isif->formats[ISIF_PAD_SINK].code == MEDIA_BUS_FMT_SGRBG12_1X12)
- isif->isif_cfg.bayer.buf_type = buf_type;
- else
- isif->isif_cfg.ycbcr.buf_type = buf_type;
-
- return 0;
-}
-
-/* configure format in isif hardware */
-static int
-isif_config_format(struct vpfe_device *vpfe_dev, unsigned int pad)
-{
- struct vpfe_isif_device *vpfe_isif = &vpfe_dev->vpfe_isif;
- enum isif_frmfmt frm_fmt = ISIF_FRMFMT_INTERLACED;
- struct v4l2_pix_format format;
- int ret = 0;
-
- v4l2_fill_pix_format(&format, &vpfe_dev->vpfe_isif.formats[pad]);
- mbus_to_pix(&vpfe_dev->vpfe_isif.formats[pad], &format);
-
- if (isif_set_pixel_format(vpfe_isif, format.pixelformat) < 0) {
- v4l2_err(&vpfe_dev->v4l2_dev,
- "Failed to set pixel format in isif\n");
- return -EINVAL;
- }
-
- /* call for s_crop will override these values */
- vpfe_isif->crop.left = 0;
- vpfe_isif->crop.top = 0;
- vpfe_isif->crop.width = format.width;
- vpfe_isif->crop.height = format.height;
-
- /* configure the image window */
- isif_set_image_window(vpfe_isif);
-
- switch (vpfe_dev->vpfe_isif.formats[pad].field) {
- case V4L2_FIELD_INTERLACED:
- /* do nothing, since it is default */
- ret = isif_set_buftype(vpfe_isif, ISIF_BUFTYPE_FLD_INTERLEAVED);
- break;
-
- case V4L2_FIELD_NONE:
- frm_fmt = ISIF_FRMFMT_PROGRESSIVE;
- /* buffer type only applicable for interlaced scan */
- break;
-
- case V4L2_FIELD_SEQ_TB:
- ret = isif_set_buftype(vpfe_isif, ISIF_BUFTYPE_FLD_SEPARATED);
- break;
-
- default:
- return -EINVAL;
- }
-
- /* set the frame format */
- if (!ret)
- ret = isif_set_frame_format(vpfe_isif, frm_fmt);
-
- return ret;
-}
-
-/*
- * isif_try_format() - Try video format on a pad
- * @isif: VPFE isif device
- * @cfg: V4L2 subdev pad config
- * @fmt: pointer to v4l2 subdev format structure
- */
-static void
-isif_try_format(struct vpfe_isif_device *isif, struct v4l2_subdev_pad_config *cfg,
- struct v4l2_subdev_format *fmt)
-{
- unsigned int width = fmt->format.width;
- unsigned int height = fmt->format.height;
- unsigned int i;
-
- for (i = 0; i < ARRAY_SIZE(isif_fmts); i++) {
- if (fmt->format.code == isif_fmts[i])
- break;
- }
-
- /* If not found, use YUYV8_2x8 as default */
- if (i >= ARRAY_SIZE(isif_fmts))
- fmt->format.code = MEDIA_BUS_FMT_YUYV8_2X8;
-
- /* Clamp the size. */
- fmt->format.width = clamp_t(u32, width, 32, MAX_WIDTH);
- fmt->format.height = clamp_t(u32, height, 32, MAX_HEIGHT);
-
- /* The data formatter truncates the number of horizontal output
- * pixels to a multiple of 16. To avoid clipping data, allow
- * callers to request an output size bigger than the input size
- * up to the nearest multiple of 16.
- */
- if (fmt->pad == ISIF_PAD_SOURCE)
- fmt->format.width &= ~15;
-}
-
-/*
- * vpfe_isif_buffer_isr() - isif module non-progressive buffer scheduling isr
- * @isif: Pointer to isif subdevice.
- */
-void vpfe_isif_buffer_isr(struct vpfe_isif_device *isif)
-{
- struct vpfe_device *vpfe_dev = to_vpfe_device(isif);
- struct vpfe_video_device *video = &isif->video_out;
- enum v4l2_field field;
- int fid;
-
- if (!video->started)
- return;
-
- field = video->fmt.fmt.pix.field;
-
- if (field == V4L2_FIELD_NONE) {
- /* handle progressive frame capture */
- if (video->cur_frm != video->next_frm)
- vpfe_video_process_buffer_complete(video);
- return;
- }
-
- /* interlaced or TB capture check which field we
- * are in hardware
- */
- fid = vpfe_isif_get_fid(vpfe_dev);
-
- /* switch the software maintained field id */
- video->field_id ^= 1;
- if (fid == video->field_id) {
- /* we are in-sync here,continue */
- if (fid == 0) {
- /*
- * One frame is just being captured. If the
- * next frame is available, release the current
- * frame and move on
- */
- if (video->cur_frm != video->next_frm)
- vpfe_video_process_buffer_complete(video);
- /*
- * based on whether the two fields are stored
- * interleavely or separately in memory,
- * reconfigure the ISIF memory address
- */
- if (field == V4L2_FIELD_SEQ_TB)
- vpfe_video_schedule_bottom_field(video);
- return;
- }
- /*
- * if one field is just being captured configure
- * the next frame get the next frame from the
- * empty queue if no frame is available hold on
- * to the current buffer
- */
- spin_lock(&video->dma_queue_lock);
- if (!list_empty(&video->dma_queue) &&
- video->cur_frm == video->next_frm)
- vpfe_video_schedule_next_buffer(video);
- spin_unlock(&video->dma_queue_lock);
- } else if (fid == 0) {
- /*
- * out of sync. Recover from any hardware out-of-sync.
- * May loose one frame
- */
- video->field_id = fid;
- }
-}
-
-/*
- * vpfe_isif_vidint1_isr() - ISIF module progressive buffer scheduling isr
- * @isif: Pointer to isif subdevice.
- */
-void vpfe_isif_vidint1_isr(struct vpfe_isif_device *isif)
-{
- struct vpfe_video_device *video = &isif->video_out;
-
- if (!video->started)
- return;
-
- spin_lock(&video->dma_queue_lock);
- if (video->fmt.fmt.pix.field == V4L2_FIELD_NONE &&
- !list_empty(&video->dma_queue) && video->cur_frm == video->next_frm)
- vpfe_video_schedule_next_buffer(video);
-
- spin_unlock(&video->dma_queue_lock);
-}
-
-/*
- * VPFE video operations
- */
-
-static int isif_video_queue(struct vpfe_device *vpfe_dev, unsigned long addr)
-{
- struct vpfe_isif_device *isif = &vpfe_dev->vpfe_isif;
-
- isif_write(isif->isif_cfg.base_addr, (addr >> 21) &
- ISIF_CADU_BITS, CADU);
- isif_write(isif->isif_cfg.base_addr, (addr >> 5) &
- ISIF_CADL_BITS, CADL);
-
- return 0;
-}
-
-static const struct vpfe_video_operations isif_video_ops = {
- .queue = isif_video_queue,
-};
-
-/*
- * V4L2 subdev operations
- */
-
-/* Parameter operations */
-static int isif_get_params(struct v4l2_subdev *sd, void *params)
-{
- struct vpfe_isif_device *isif = v4l2_get_subdevdata(sd);
-
- /* only raw module parameters can be set through the IOCTL */
- if (isif->formats[ISIF_PAD_SINK].code != MEDIA_BUS_FMT_SGRBG12_1X12)
- return -EINVAL;
- memcpy(params, &isif->isif_cfg.bayer.config_params,
- sizeof(isif->isif_cfg.bayer.config_params));
- return 0;
-}
-
-static int isif_validate_df_csc_params(struct vpfe_isif_df_csc *df_csc)
-{
- struct vpfe_isif_color_space_conv *csc;
- int err = -EINVAL;
- int i;
-
- if (!df_csc->df_or_csc) {
- /* csc configuration */
- csc = &df_csc->csc;
- if (csc->en) {
- for (i = 0; i < VPFE_ISIF_CSC_NUM_COEFF; i++)
- if (csc->coeff[i].integer >
- ISIF_CSC_COEF_INTEG_MASK ||
- csc->coeff[i].decimal >
- ISIF_CSC_COEF_DECIMAL_MASK) {
- pr_err("Invalid CSC coefficients\n");
- return err;
- }
- }
- }
- if (df_csc->start_pix > ISIF_DF_CSC_SPH_MASK) {
- pr_err("Invalid df_csc start pix value\n");
- return err;
- }
-
- if (df_csc->num_pixels > ISIF_DF_NUMPIX) {
- pr_err("Invalid df_csc num pixels value\n");
- return err;
- }
-
- if (df_csc->start_line > ISIF_DF_CSC_LNH_MASK) {
- pr_err("Invalid df_csc start_line value\n");
- return err;
- }
-
- if (df_csc->num_lines > ISIF_DF_NUMLINES) {
- pr_err("Invalid df_csc num_lines value\n");
- return err;
- }
-
- return 0;
-}
-
-#define DM365_ISIF_MAX_VDFLSFT 4
-#define DM365_ISIF_MAX_VDFSLV 4095
-#define DM365_ISIF_MAX_DFCMEM0 0x1fff
-#define DM365_ISIF_MAX_DFCMEM1 0x1fff
-
-static int isif_validate_dfc_params(struct vpfe_isif_dfc *dfc)
-{
- int err = -EINVAL;
- int i;
-
- if (!dfc->en)
- return 0;
-
- if (dfc->corr_whole_line > 1) {
- pr_err("Invalid corr_whole_line value\n");
- return err;
- }
-
- if (dfc->def_level_shift > DM365_ISIF_MAX_VDFLSFT) {
- pr_err("Invalid def_level_shift value\n");
- return err;
- }
-
- if (dfc->def_sat_level > DM365_ISIF_MAX_VDFSLV) {
- pr_err("Invalid def_sat_level value\n");
- return err;
- }
-
- if (!dfc->num_vdefects ||
- dfc->num_vdefects > VPFE_ISIF_VDFC_TABLE_SIZE) {
- pr_err("Invalid num_vdefects value\n");
- return err;
- }
-
- for (i = 0; i < VPFE_ISIF_VDFC_TABLE_SIZE; i++) {
- if (dfc->table[i].pos_vert > DM365_ISIF_MAX_DFCMEM0) {
- pr_err("Invalid pos_vert value\n");
- return err;
- }
- if (dfc->table[i].pos_horz > DM365_ISIF_MAX_DFCMEM1) {
- pr_err("Invalid pos_horz value\n");
- return err;
- }
- }
-
- return 0;
-}
-
-#define DM365_ISIF_MAX_CLVRV 0xfff
-#define DM365_ISIF_MAX_CLDC 0x1fff
-#define DM365_ISIF_MAX_CLHSH 0x1fff
-#define DM365_ISIF_MAX_CLHSV 0x1fff
-#define DM365_ISIF_MAX_CLVSH 0x1fff
-#define DM365_ISIF_MAX_CLVSV 0x1fff
-#define DM365_ISIF_MAX_HEIGHT_BLACK_REGION 0x1fff
-
-static int isif_validate_bclamp_params(struct vpfe_isif_black_clamp *bclamp)
-{
- int err = -EINVAL;
-
- if (bclamp->dc_offset > DM365_ISIF_MAX_CLDC) {
- pr_err("Invalid bclamp dc_offset value\n");
- return err;
- }
- if (!bclamp->en)
- return 0;
- if (bclamp->horz.clamp_pix_limit > 1) {
- pr_err("Invalid bclamp horz clamp_pix_limit value\n");
- return err;
- }
- if (bclamp->horz.win_count_calc < 1 ||
- bclamp->horz.win_count_calc > 32) {
- pr_err("Invalid bclamp horz win_count_calc value\n");
- return err;
- }
- if (bclamp->horz.win_start_h_calc > DM365_ISIF_MAX_CLHSH) {
- pr_err("Invalid bclamp win_start_v_calc value\n");
- return err;
- }
-
- if (bclamp->horz.win_start_v_calc > DM365_ISIF_MAX_CLHSV) {
- pr_err("Invalid bclamp win_start_v_calc value\n");
- return err;
- }
- if (bclamp->vert.reset_clamp_val > DM365_ISIF_MAX_CLVRV) {
- pr_err("Invalid bclamp reset_clamp_val value\n");
- return err;
- }
- if (bclamp->vert.ob_v_sz_calc > DM365_ISIF_MAX_HEIGHT_BLACK_REGION) {
- pr_err("Invalid bclamp ob_v_sz_calc value\n");
- return err;
- }
- if (bclamp->vert.ob_start_h > DM365_ISIF_MAX_CLVSH) {
- pr_err("Invalid bclamp ob_start_h value\n");
- return err;
- }
- if (bclamp->vert.ob_start_v > DM365_ISIF_MAX_CLVSV) {
- pr_err("Invalid bclamp ob_start_h value\n");
- return err;
- }
- return 0;
-}
-
-static int
-isif_validate_raw_params(struct vpfe_isif_raw_config *params)
-{
- int ret;
-
- ret = isif_validate_df_csc_params(&params->df_csc);
- if (ret)
- return ret;
- ret = isif_validate_dfc_params(&params->dfc);
- if (ret)
- return ret;
- ret = isif_validate_bclamp_params(&params->bclamp);
- return ret;
-}
-
-static int isif_set_params(struct v4l2_subdev *sd, void *params)
-{
- struct vpfe_isif_device *isif = v4l2_get_subdevdata(sd);
- struct vpfe_isif_raw_config isif_raw_params;
- int ret = -EINVAL;
-
- /* only raw module parameters can be set through the IOCTL */
- if (isif->formats[ISIF_PAD_SINK].code != MEDIA_BUS_FMT_SGRBG12_1X12)
- return ret;
-
- memcpy(&isif_raw_params, params, sizeof(isif_raw_params));
- if (!isif_validate_raw_params(&isif_raw_params)) {
- memcpy(&isif->isif_cfg.bayer.config_params, &isif_raw_params,
- sizeof(isif_raw_params));
- ret = 0;
- }
- return ret;
-}
-/*
- * isif_ioctl() - isif module private ioctl's
- * @sd: VPFE isif V4L2 subdevice
- * @cmd: ioctl command
- * @arg: ioctl argument
- *
- * Return 0 on success or a negative error code otherwise.
- */
-static long isif_ioctl(struct v4l2_subdev *sd, unsigned int cmd, void *arg)
-{
- int ret;
-
- switch (cmd) {
- case VIDIOC_VPFE_ISIF_S_RAW_PARAMS:
- ret = isif_set_params(sd, arg);
- break;
-
- case VIDIOC_VPFE_ISIF_G_RAW_PARAMS:
- ret = isif_get_params(sd, arg);
- break;
-
- default:
- ret = -ENOIOCTLCMD;
- }
- return ret;
-}
-
-static void isif_config_gain_offset(struct vpfe_isif_device *isif)
-{
- struct vpfe_isif_gain_offsets_adj *gain_off_ptr =
- &isif->isif_cfg.bayer.config_params.gain_offset;
- void __iomem *base = isif->isif_cfg.base_addr;
- u32 val;
-
- val = ((gain_off_ptr->gain_sdram_en & 1) << GAIN_SDRAM_EN_SHIFT) |
- ((gain_off_ptr->gain_ipipe_en & 1) << GAIN_IPIPE_EN_SHIFT) |
- ((gain_off_ptr->gain_h3a_en & 1) << GAIN_H3A_EN_SHIFT) |
- ((gain_off_ptr->offset_sdram_en & 1) << OFST_SDRAM_EN_SHIFT) |
- ((gain_off_ptr->offset_ipipe_en & 1) << OFST_IPIPE_EN_SHIFT) |
- ((gain_off_ptr->offset_h3a_en & 1) << OFST_H3A_EN_SHIFT);
- isif_merge(base, GAIN_OFFSET_EN_MASK, val, CGAMMAWD);
-
- isif_write(base, isif->isif_cfg.isif_gain_params.cr_gain, CRGAIN);
- isif_write(base, isif->isif_cfg.isif_gain_params.cgr_gain, CGRGAIN);
- isif_write(base, isif->isif_cfg.isif_gain_params.cgb_gain, CGBGAIN);
- isif_write(base, isif->isif_cfg.isif_gain_params.cb_gain, CBGAIN);
- isif_write(base, isif->isif_cfg.isif_gain_params.offset & OFFSET_MASK,
- COFSTA);
-
-}
-
-static void isif_config_bclamp(struct vpfe_isif_device *isif,
- struct vpfe_isif_black_clamp *bc)
-{
- u32 val;
-
- /**
- * DC Offset is always added to image data irrespective of bc enable
- * status
- */
- val = bc->dc_offset & ISIF_BC_DCOFFSET_MASK;
- isif_write(isif->isif_cfg.base_addr, val, CLDCOFST);
-
- if (!bc->en)
- return;
-
- val = (bc->bc_mode_color & ISIF_BC_MODE_COLOR_MASK) <<
- ISIF_BC_MODE_COLOR_SHIFT;
-
- /* Enable BC and horizontal clamp calculation paramaters */
- val = val | 1 | ((bc->horz.mode & ISIF_HORZ_BC_MODE_MASK) <<
- ISIF_HORZ_BC_MODE_SHIFT);
-
- isif_write(isif->isif_cfg.base_addr, val, CLAMPCFG);
-
- if (bc->horz.mode != VPFE_ISIF_HORZ_BC_DISABLE) {
- /*
- * Window count for calculation
- * Base window selection
- * pixel limit
- * Horizontal size of window
- * vertical size of the window
- * Horizontal start position of the window
- * Vertical start position of the window
- */
- val = (bc->horz.win_count_calc & ISIF_HORZ_BC_WIN_COUNT_MASK) |
- ((bc->horz.base_win_sel_calc & 1) <<
- ISIF_HORZ_BC_WIN_SEL_SHIFT) |
- ((bc->horz.clamp_pix_limit & 1) <<
- ISIF_HORZ_BC_PIX_LIMIT_SHIFT) |
- ((bc->horz.win_h_sz_calc &
- ISIF_HORZ_BC_WIN_H_SIZE_MASK) <<
- ISIF_HORZ_BC_WIN_H_SIZE_SHIFT) |
- ((bc->horz.win_v_sz_calc &
- ISIF_HORZ_BC_WIN_V_SIZE_MASK) <<
- ISIF_HORZ_BC_WIN_V_SIZE_SHIFT);
-
- isif_write(isif->isif_cfg.base_addr, val, CLHWIN0);
-
- val = bc->horz.win_start_h_calc & ISIF_HORZ_BC_WIN_START_H_MASK;
- isif_write(isif->isif_cfg.base_addr, val, CLHWIN1);
-
- val = bc->horz.win_start_v_calc & ISIF_HORZ_BC_WIN_START_V_MASK;
- isif_write(isif->isif_cfg.base_addr, val, CLHWIN2);
- }
-
- /* vertical clamp calculation paramaters */
- /* OB H Valid */
- val = bc->vert.ob_h_sz_calc & ISIF_VERT_BC_OB_H_SZ_MASK;
-
- /* Reset clamp value sel for previous line */
- val |= (bc->vert.reset_val_sel & ISIF_VERT_BC_RST_VAL_SEL_MASK) <<
- ISIF_VERT_BC_RST_VAL_SEL_SHIFT;
-
- /* Line average coefficient */
- val |= bc->vert.line_ave_coef << ISIF_VERT_BC_LINE_AVE_COEF_SHIFT;
- isif_write(isif->isif_cfg.base_addr, val, CLVWIN0);
-
- /* Configured reset value */
- if (bc->vert.reset_val_sel == VPFE_ISIF_VERT_BC_USE_CONFIG_CLAMP_VAL) {
- val = bc->vert.reset_clamp_val & ISIF_VERT_BC_RST_VAL_MASK;
- isif_write(isif->isif_cfg.base_addr, val, CLVRV);
- }
-
- /* Optical Black horizontal start position */
- val = bc->vert.ob_start_h & ISIF_VERT_BC_OB_START_HORZ_MASK;
- isif_write(isif->isif_cfg.base_addr, val, CLVWIN1);
-
- /* Optical Black vertical start position */
- val = bc->vert.ob_start_v & ISIF_VERT_BC_OB_START_VERT_MASK;
- isif_write(isif->isif_cfg.base_addr, val, CLVWIN2);
-
- val = bc->vert.ob_v_sz_calc & ISIF_VERT_BC_OB_VERT_SZ_MASK;
- isif_write(isif->isif_cfg.base_addr, val, CLVWIN3);
-
- /* Vertical start position for BC subtraction */
- val = bc->vert_start_sub & ISIF_BC_VERT_START_SUB_V_MASK;
- isif_write(isif->isif_cfg.base_addr, val, CLSV);
-}
-
-/* This function will configure the window size to be capture in ISIF reg */
-static void
-isif_setwin(struct vpfe_isif_device *isif, struct v4l2_rect *image_win,
- enum isif_frmfmt frm_fmt, int ppc, int mode)
-{
- int horz_nr_pixels;
- int vert_nr_lines;
- int horz_start;
- int vert_start;
- int mid_img;
-
- /*
- * ppc - per pixel count. indicates how many pixels per cell
- * output to SDRAM. example, for ycbcr, it is one y and one c, so 2.
- * raw capture this is 1
- */
- horz_start = image_win->left << (ppc - 1);
- horz_nr_pixels = (image_win->width << (ppc - 1)) - 1;
-
- /* Writing the horizontal info into the registers */
- isif_write(isif->isif_cfg.base_addr,
- horz_start & START_PX_HOR_MASK, SPH);
- isif_write(isif->isif_cfg.base_addr,
- horz_nr_pixels & NUM_PX_HOR_MASK, LNH);
- vert_start = image_win->top;
-
- if (frm_fmt == ISIF_FRMFMT_INTERLACED) {
- vert_nr_lines = (image_win->height >> 1) - 1;
- vert_start >>= 1;
- /* To account for VD since line 0 doesn't have any data */
- vert_start += 1;
- } else {
- /* To account for VD since line 0 doesn't have any data */
- vert_start += 1;
- vert_nr_lines = image_win->height - 1;
- /* configure VDINT0 and VDINT1 */
- mid_img = vert_start + (image_win->height / 2);
- isif_write(isif->isif_cfg.base_addr, mid_img, VDINT1);
- }
-
- if (!mode)
- isif_write(isif->isif_cfg.base_addr, 0, VDINT0);
- else
- isif_write(isif->isif_cfg.base_addr, vert_nr_lines, VDINT0);
- isif_write(isif->isif_cfg.base_addr,
- vert_start & START_VER_ONE_MASK, SLV0);
- isif_write(isif->isif_cfg.base_addr,
- vert_start & START_VER_TWO_MASK, SLV1);
- isif_write(isif->isif_cfg.base_addr,
- vert_nr_lines & NUM_LINES_VER, LNV);
-}
-
-#define DM365_ISIF_DFCMWR_MEMORY_WRITE 1
-#define DM365_ISIF_DFCMRD_MEMORY_READ 0x2
-
-static void
-isif_config_dfc(struct vpfe_isif_device *isif, struct vpfe_isif_dfc *vdfc)
-{
-#define DFC_WRITE_WAIT_COUNT 1000
- u32 count = DFC_WRITE_WAIT_COUNT;
- u32 val;
- int i;
-
- if (!vdfc->en)
- return;
-
- /* Correction mode */
- val = (vdfc->corr_mode & ISIF_VDFC_CORR_MOD_MASK) <<
- ISIF_VDFC_CORR_MOD_SHIFT;
-
- /* Correct whole line or partial */
- if (vdfc->corr_whole_line)
- val |= 1 << ISIF_VDFC_CORR_WHOLE_LN_SHIFT;
-
- /* level shift value */
- val |= (vdfc->def_level_shift & ISIF_VDFC_LEVEL_SHFT_MASK) <<
- ISIF_VDFC_LEVEL_SHFT_SHIFT;
-
- isif_write(isif->isif_cfg.base_addr, val, DFCCTL);
-
- /* Defect saturation level */
- val = vdfc->def_sat_level & ISIF_VDFC_SAT_LEVEL_MASK;
- isif_write(isif->isif_cfg.base_addr, val, VDFSATLV);
-
- isif_write(isif->isif_cfg.base_addr, vdfc->table[0].pos_vert &
- ISIF_VDFC_POS_MASK, DFCMEM0);
- isif_write(isif->isif_cfg.base_addr, vdfc->table[0].pos_horz &
- ISIF_VDFC_POS_MASK, DFCMEM1);
- if (vdfc->corr_mode == VPFE_ISIF_VDFC_NORMAL ||
- vdfc->corr_mode == VPFE_ISIF_VDFC_HORZ_INTERPOL_IF_SAT) {
- isif_write(isif->isif_cfg.base_addr,
- vdfc->table[0].level_at_pos, DFCMEM2);
- isif_write(isif->isif_cfg.base_addr,
- vdfc->table[0].level_up_pixels, DFCMEM3);
- isif_write(isif->isif_cfg.base_addr,
- vdfc->table[0].level_low_pixels, DFCMEM4);
- }
-
- val = isif_read(isif->isif_cfg.base_addr, DFCMEMCTL);
- /* set DFCMARST and set DFCMWR */
- val |= 1 << ISIF_DFCMEMCTL_DFCMARST_SHIFT;
- val |= 1;
- isif_write(isif->isif_cfg.base_addr, val, DFCMEMCTL);
-
- while (count && (isif_read(isif->isif_cfg.base_addr, DFCMEMCTL) & 0x01))
- count--;
-
- val = isif_read(isif->isif_cfg.base_addr, DFCMEMCTL);
- if (!count) {
- pr_debug("defect table write timeout !!\n");
- return;
- }
-
- for (i = 1; i < vdfc->num_vdefects; i++) {
- isif_write(isif->isif_cfg.base_addr, vdfc->table[i].pos_vert &
- ISIF_VDFC_POS_MASK, DFCMEM0);
-
- isif_write(isif->isif_cfg.base_addr, vdfc->table[i].pos_horz &
- ISIF_VDFC_POS_MASK, DFCMEM1);
-
- if (vdfc->corr_mode == VPFE_ISIF_VDFC_NORMAL ||
- vdfc->corr_mode == VPFE_ISIF_VDFC_HORZ_INTERPOL_IF_SAT) {
- isif_write(isif->isif_cfg.base_addr,
- vdfc->table[i].level_at_pos, DFCMEM2);
- isif_write(isif->isif_cfg.base_addr,
- vdfc->table[i].level_up_pixels, DFCMEM3);
- isif_write(isif->isif_cfg.base_addr,
- vdfc->table[i].level_low_pixels, DFCMEM4);
- }
- val = isif_read(isif->isif_cfg.base_addr, DFCMEMCTL);
- /* clear DFCMARST and set DFCMWR */
- val &= ~(1 << ISIF_DFCMEMCTL_DFCMARST_SHIFT);
- val |= 1;
- isif_write(isif->isif_cfg.base_addr, val, DFCMEMCTL);
-
- count = DFC_WRITE_WAIT_COUNT;
- while (count && (isif_read(isif->isif_cfg.base_addr,
- DFCMEMCTL) & 0x01))
- count--;
-
- val = isif_read(isif->isif_cfg.base_addr, DFCMEMCTL);
- if (!count) {
- pr_debug("defect table write timeout !!\n");
- return;
- }
- }
- if (vdfc->num_vdefects < VPFE_ISIF_VDFC_TABLE_SIZE) {
- /* Extra cycle needed */
- isif_write(isif->isif_cfg.base_addr, 0, DFCMEM0);
- isif_write(isif->isif_cfg.base_addr,
- DM365_ISIF_MAX_DFCMEM1, DFCMEM1);
- isif_write(isif->isif_cfg.base_addr,
- DM365_ISIF_DFCMWR_MEMORY_WRITE, DFCMEMCTL);
- }
- /* enable VDFC */
- isif_merge(isif->isif_cfg.base_addr, (1 << ISIF_VDFC_EN_SHIFT),
- (1 << ISIF_VDFC_EN_SHIFT), DFCCTL);
-
- isif_merge(isif->isif_cfg.base_addr, (1 << ISIF_VDFC_EN_SHIFT),
- (0 << ISIF_VDFC_EN_SHIFT), DFCCTL);
-
- isif_write(isif->isif_cfg.base_addr, 0x6, DFCMEMCTL);
- for (i = 0; i < vdfc->num_vdefects; i++) {
- count = DFC_WRITE_WAIT_COUNT;
- while (count &&
- (isif_read(isif->isif_cfg.base_addr, DFCMEMCTL) & 0x2))
- count--;
- val = isif_read(isif->isif_cfg.base_addr, DFCMEMCTL);
- if (!count) {
- pr_debug("defect table write timeout !!\n");
- return;
- }
- isif_write(isif->isif_cfg.base_addr,
- DM365_ISIF_DFCMRD_MEMORY_READ, DFCMEMCTL);
- }
-}
-
-static void
-isif_config_csc(struct vpfe_isif_device *isif, struct vpfe_isif_df_csc *df_csc)
-{
- u32 val1;
- u32 val2;
- u32 i;
-
- if (!df_csc->csc.en) {
- isif_write(isif->isif_cfg.base_addr, 0, CSCCTL);
- return;
- }
- /* initialize all bits to 0 */
- val1 = 0;
- for (i = 0; i < VPFE_ISIF_CSC_NUM_COEFF; i++) {
- if ((i % 2) == 0) {
- /* CSCM - LSB */
- val1 = ((df_csc->csc.coeff[i].integer &
- ISIF_CSC_COEF_INTEG_MASK) <<
- ISIF_CSC_COEF_INTEG_SHIFT) |
- ((df_csc->csc.coeff[i].decimal &
- ISIF_CSC_COEF_DECIMAL_MASK));
- } else {
-
- /* CSCM - MSB */
- val2 = ((df_csc->csc.coeff[i].integer &
- ISIF_CSC_COEF_INTEG_MASK) <<
- ISIF_CSC_COEF_INTEG_SHIFT) |
- ((df_csc->csc.coeff[i].decimal &
- ISIF_CSC_COEF_DECIMAL_MASK));
- val2 <<= ISIF_CSCM_MSB_SHIFT;
- val2 |= val1;
- isif_write(isif->isif_cfg.base_addr, val2,
- (CSCM0 + ((i-1) << 1)));
- }
- }
- /* program the active area */
- isif_write(isif->isif_cfg.base_addr, df_csc->start_pix &
- ISIF_DF_CSC_SPH_MASK, FMTSPH);
- /*
- * one extra pixel as required for CSC. Actually number of
- * pixel - 1 should be configured in this register. So we
- * need to subtract 1 before writing to FMTSPH, but we will
- * not do this since csc requires one extra pixel
- */
- isif_write(isif->isif_cfg.base_addr, df_csc->num_pixels &
- ISIF_DF_CSC_SPH_MASK, FMTLNH);
- isif_write(isif->isif_cfg.base_addr, df_csc->start_line &
- ISIF_DF_CSC_SPH_MASK, FMTSLV);
- /*
- * one extra line as required for CSC. See reason documented for
- * num_pixels
- */
- isif_write(isif->isif_cfg.base_addr, df_csc->num_lines &
- ISIF_DF_CSC_SPH_MASK, FMTLNV);
- /* Enable CSC */
- isif_write(isif->isif_cfg.base_addr, 1, CSCCTL);
-}
-
-static void
-isif_config_linearization(struct vpfe_isif_device *isif,
- struct vpfe_isif_linearize *linearize)
-{
- u32 val;
- u32 i;
-
- if (!linearize->en) {
- isif_write(isif->isif_cfg.base_addr, 0, LINCFG0);
- return;
- }
- /* shift value for correction */
- val = (linearize->corr_shft & ISIF_LIN_CORRSFT_MASK) <<
- ISIF_LIN_CORRSFT_SHIFT;
- /* enable */
- val |= 1;
- isif_write(isif->isif_cfg.base_addr, val, LINCFG0);
- /* Scale factor */
- val = (linearize->scale_fact.integer & 1) <<
- ISIF_LIN_SCALE_FACT_INTEG_SHIFT;
- val |= linearize->scale_fact.decimal & ISIF_LIN_SCALE_FACT_DECIMAL_MASK;
- isif_write(isif->isif_cfg.base_addr, val, LINCFG1);
-
- for (i = 0; i < VPFE_ISIF_LINEAR_TAB_SIZE; i++) {
- val = linearize->table[i] & ISIF_LIN_ENTRY_MASK;
- if (i%2)
- isif_regw_lin_tbl(isif, val, ((i >> 1) << 2), 1);
- else
- isif_regw_lin_tbl(isif, val, ((i >> 1) << 2), 0);
- }
-}
-
-static void
-isif_config_culling(struct vpfe_isif_device *isif, struct vpfe_isif_cul *cul)
-{
- u32 val;
-
- /* Horizontal pattern */
- val = cul->hcpat_even << CULL_PAT_EVEN_LINE_SHIFT;
- val |= cul->hcpat_odd;
- isif_write(isif->isif_cfg.base_addr, val, CULH);
- /* vertical pattern */
- isif_write(isif->isif_cfg.base_addr, cul->vcpat, CULV);
- /* LPF */
- isif_merge(isif->isif_cfg.base_addr, ISIF_LPF_MASK << ISIF_LPF_SHIFT,
- cul->en_lpf << ISIF_LPF_SHIFT, MODESET);
-}
-
-static int isif_get_pix_fmt(u32 mbus_code)
-{
- switch (mbus_code) {
- case MEDIA_BUS_FMT_SGRBG10_ALAW8_1X8:
- case MEDIA_BUS_FMT_SGRBG10_DPCM8_1X8:
- case MEDIA_BUS_FMT_SGRBG12_1X12:
- return ISIF_PIXFMT_RAW;
-
- case MEDIA_BUS_FMT_YUYV8_2X8:
- case MEDIA_BUS_FMT_UYVY8_2X8:
- case MEDIA_BUS_FMT_YUYV10_2X10:
- case MEDIA_BUS_FMT_Y8_1X8:
- return ISIF_PIXFMT_YCBCR_8BIT;
-
- case MEDIA_BUS_FMT_YUYV8_1X16:
- case MEDIA_BUS_FMT_YUYV10_1X20:
- return ISIF_PIXFMT_YCBCR_16BIT;
-
- default:
- break;
- }
- return -EINVAL;
-}
-
-#define ISIF_INTERLACE_INVERSE_MODE 0x4b6d
-#define ISIF_INTERLACE_NON_INVERSE_MODE 0x0b6d
-#define ISIF_PROGRESSIVE_INVERSE_MODE 0x4000
-#define ISIF_PROGRESSIVE_NON_INVERSE_MODE 0x0000
-
-static int isif_config_raw(struct v4l2_subdev *sd, int mode)
-{
- struct vpfe_isif_device *isif = v4l2_get_subdevdata(sd);
- struct isif_params_raw *params = &isif->isif_cfg.bayer;
- struct vpfe_isif_raw_config *module_params =
- &isif->isif_cfg.bayer.config_params;
- struct v4l2_mbus_framefmt *format;
- int pix_fmt;
- u32 val;
-
- format = &isif->formats[ISIF_PAD_SINK];
-
- /* In case of user has set BT656IF earlier, it should be reset
- * when configuring for raw input.
- */
- isif_write(isif->isif_cfg.base_addr, 0, REC656IF);
- /* Configure CCDCFG register
- * Set CCD Not to swap input since input is RAW data
- * Set FID detection function to Latch at V-Sync
- * Set WENLOG - isif valid area
- * Set TRGSEL
- * Set EXTRG
- * Packed to 8 or 16 bits
- */
- val = ISIF_YCINSWP_RAW | ISIF_CCDCFG_FIDMD_LATCH_VSYNC |
- ISIF_CCDCFG_WENLOG_AND | ISIF_CCDCFG_TRGSEL_WEN |
- ISIF_CCDCFG_EXTRG_DISABLE | (isif->isif_cfg.data_pack &
- ISIF_DATA_PACK_MASK);
- isif_write(isif->isif_cfg.base_addr, val, CCDCFG);
-
- pix_fmt = isif_get_pix_fmt(format->code);
- if (pix_fmt < 0) {
- pr_debug("Invalid pix_fmt(input mode)\n");
- return -EINVAL;
- }
- /*
- * Configure the vertical sync polarity(MODESET.VDPOL)
- * Configure the horizontal sync polarity (MODESET.HDPOL)
- * Configure frame id polarity (MODESET.FLDPOL)
- * Configure data polarity
- * Configure External WEN Selection
- * Configure frame format(progressive or interlace)
- * Configure pixel format (Input mode)
- * Configure the data shift
- */
- val = ISIF_VDHDOUT_INPUT | ((params->vd_pol & ISIF_VD_POL_MASK) <<
- ISIF_VD_POL_SHIFT) | ((params->hd_pol & ISIF_HD_POL_MASK) <<
- ISIF_HD_POL_SHIFT) | ((params->fid_pol & ISIF_FID_POL_MASK) <<
- ISIF_FID_POL_SHIFT) | ((ISIF_DATAPOL_NORMAL &
- ISIF_DATAPOL_MASK) << ISIF_DATAPOL_SHIFT) | ((ISIF_EXWEN_DISABLE &
- ISIF_EXWEN_MASK) << ISIF_EXWEN_SHIFT) | ((params->frm_fmt &
- ISIF_FRM_FMT_MASK) << ISIF_FRM_FMT_SHIFT) | ((pix_fmt &
- ISIF_INPUT_MASK) << ISIF_INPUT_SHIFT);
-
- /* currently only MEDIA_BUS_FMT_SGRBG12_1X12 is
- * supported. shift appropriately depending on
- * different MBUS fmt's added
- */
- if (format->code == MEDIA_BUS_FMT_SGRBG12_1X12)
- val |= ((VPFE_ISIF_NO_SHIFT &
- ISIF_DATASFT_MASK) << ISIF_DATASFT_SHIFT);
-
- isif_write(isif->isif_cfg.base_addr, val, MODESET);
- /*
- * Configure GAMMAWD register
- * CFA pattern setting
- */
- val = (params->cfa_pat & ISIF_GAMMAWD_CFA_MASK) <<
- ISIF_GAMMAWD_CFA_SHIFT;
- /* Gamma msb */
- if (params->v4l2_pix_fmt == V4L2_PIX_FMT_SGRBG10ALAW8)
- val = val | ISIF_ALAW_ENABLE;
-
- val = val | ((params->data_msb & ISIF_ALAW_GAMA_WD_MASK) <<
- ISIF_ALAW_GAMA_WD_SHIFT);
-
- isif_write(isif->isif_cfg.base_addr, val, CGAMMAWD);
- /* Configure DPCM compression settings */
- if (params->v4l2_pix_fmt == V4L2_PIX_FMT_SGRBG10DPCM8) {
- val = 1 << ISIF_DPCM_EN_SHIFT;
- val |= (params->dpcm_predictor &
- ISIF_DPCM_PREDICTOR_MASK) << ISIF_DPCM_PREDICTOR_SHIFT;
- }
- isif_write(isif->isif_cfg.base_addr, val, MISC);
- /* Configure Gain & Offset */
- isif_config_gain_offset(isif);
- /* Configure Color pattern */
- if (format->code == MEDIA_BUS_FMT_SGRBG12_1X12)
- val = isif_sgrbg_pattern;
- else
- /* default set to rggb */
- val = isif_srggb_pattern;
-
- isif_write(isif->isif_cfg.base_addr, val, CCOLP);
-
- /* Configure HSIZE register */
- val = (params->horz_flip_en & ISIF_HSIZE_FLIP_MASK) <<
- ISIF_HSIZE_FLIP_SHIFT;
-
- /* calculate line offset in 32 bytes based on pack value */
- if (isif->isif_cfg.data_pack == ISIF_PACK_8BIT)
- val |= ((params->win.width + 31) >> 5) & ISIF_LINEOFST_MASK;
- else if (isif->isif_cfg.data_pack == ISIF_PACK_12BIT)
- val |= ((((params->win.width + (params->win.width >> 2)) +
- 31) >> 5) & ISIF_LINEOFST_MASK);
- else
- val |= (((params->win.width * 2) + 31) >> 5) &
- ISIF_LINEOFST_MASK;
- isif_write(isif->isif_cfg.base_addr, val, HSIZE);
- /* Configure SDOFST register */
- if (params->frm_fmt == ISIF_FRMFMT_INTERLACED) {
- if (params->image_invert_en)
- /* For interlace inverse mode */
- isif_write(isif->isif_cfg.base_addr,
- ISIF_INTERLACE_INVERSE_MODE, SDOFST);
- else
- /* For interlace non inverse mode */
- isif_write(isif->isif_cfg.base_addr,
- ISIF_INTERLACE_NON_INVERSE_MODE, SDOFST);
- } else if (params->frm_fmt == ISIF_FRMFMT_PROGRESSIVE) {
- if (params->image_invert_en)
- isif_write(isif->isif_cfg.base_addr,
- ISIF_PROGRESSIVE_INVERSE_MODE, SDOFST);
- else
- /* For progessive non inverse mode */
- isif_write(isif->isif_cfg.base_addr,
- ISIF_PROGRESSIVE_NON_INVERSE_MODE, SDOFST);
- }
- /* Configure video window */
- isif_setwin(isif, &params->win, params->frm_fmt, 1, mode);
- /* Configure Black Clamp */
- isif_config_bclamp(isif, &module_params->bclamp);
- /* Configure Vertical Defection Pixel Correction */
- isif_config_dfc(isif, &module_params->dfc);
- if (!module_params->df_csc.df_or_csc)
- /* Configure Color Space Conversion */
- isif_config_csc(isif, &module_params->df_csc);
-
- isif_config_linearization(isif, &module_params->linearize);
- /* Configure Culling */
- isif_config_culling(isif, &module_params->culling);
- /* Configure Horizontal and vertical offsets(DFC,LSC,Gain) */
- val = module_params->horz_offset & ISIF_DATA_H_OFFSET_MASK;
- isif_write(isif->isif_cfg.base_addr, val, DATAHOFST);
-
- val = module_params->vert_offset & ISIF_DATA_V_OFFSET_MASK;
- isif_write(isif->isif_cfg.base_addr, val, DATAVOFST);
-
- return 0;
-}
-
-#define DM365_ISIF_HSIZE_MASK 0xffffffe0
-#define DM365_ISIF_SDOFST_2_LINES 0x00000249
-
-/* This function will configure ISIF for YCbCr parameters. */
-static int isif_config_ycbcr(struct v4l2_subdev *sd, int mode)
-{
- struct vpfe_isif_device *isif = v4l2_get_subdevdata(sd);
- struct isif_ycbcr_config *params = &isif->isif_cfg.ycbcr;
- struct v4l2_mbus_framefmt *format;
- int pix_fmt;
- u32 modeset;
- u32 ccdcfg;
-
- format = &isif->formats[ISIF_PAD_SINK];
- /*
- * first reset the ISIF
- * all registers have default values after reset
- * This is important since we assume default values to be set in
- * a lot of registers that we didn't touch
- */
- /* start with all bits zero */
- ccdcfg = modeset = 0;
- pix_fmt = isif_get_pix_fmt(format->code);
- if (pix_fmt < 0) {
- pr_debug("Invalid pix_fmt(input mode)\n");
- return -EINVAL;
- }
- /* configure pixel format or input mode */
- modeset = modeset | ((pix_fmt & ISIF_INPUT_MASK) <<
- ISIF_INPUT_SHIFT) | ((params->frm_fmt & ISIF_FRM_FMT_MASK) <<
- ISIF_FRM_FMT_SHIFT) | (((params->fid_pol &
- ISIF_FID_POL_MASK) << ISIF_FID_POL_SHIFT)) |
- (((params->hd_pol & ISIF_HD_POL_MASK) << ISIF_HD_POL_SHIFT)) |
- (((params->vd_pol & ISIF_VD_POL_MASK) << ISIF_VD_POL_SHIFT));
- /* pack the data to 8-bit CCDCCFG */
- switch (format->code) {
- case MEDIA_BUS_FMT_YUYV8_2X8:
- case MEDIA_BUS_FMT_UYVY8_2X8:
- if (pix_fmt != ISIF_PIXFMT_YCBCR_8BIT) {
- pr_debug("Invalid pix_fmt(input mode)\n");
- return -EINVAL;
- }
- modeset |= ((VPFE_PINPOL_NEGATIVE & ISIF_VD_POL_MASK) <<
- ISIF_VD_POL_SHIFT);
- isif_write(isif->isif_cfg.base_addr, 3, REC656IF);
- ccdcfg = ccdcfg | ISIF_PACK_8BIT | ISIF_YCINSWP_YCBCR;
- break;
-
- case MEDIA_BUS_FMT_YUYV10_2X10:
- if (pix_fmt != ISIF_PIXFMT_YCBCR_8BIT) {
- pr_debug("Invalid pix_fmt(input mode)\n");
- return -EINVAL;
- }
- /* setup BT.656, embedded sync */
- isif_write(isif->isif_cfg.base_addr, 3, REC656IF);
- /* enable 10 bit mode in ccdcfg */
- ccdcfg = ccdcfg | ISIF_PACK_8BIT | ISIF_YCINSWP_YCBCR |
- ISIF_BW656_ENABLE;
- break;
-
- case MEDIA_BUS_FMT_YUYV10_1X20:
- if (pix_fmt != ISIF_PIXFMT_YCBCR_16BIT) {
- pr_debug("Invalid pix_fmt(input mode)\n");
- return -EINVAL;
- }
- isif_write(isif->isif_cfg.base_addr, 3, REC656IF);
- break;
-
- case MEDIA_BUS_FMT_Y8_1X8:
- ccdcfg |= ISIF_PACK_8BIT;
- ccdcfg |= ISIF_YCINSWP_YCBCR;
- if (pix_fmt != ISIF_PIXFMT_YCBCR_8BIT) {
- pr_debug("Invalid pix_fmt(input mode)\n");
- return -EINVAL;
- }
- break;
-
- case MEDIA_BUS_FMT_YUYV8_1X16:
- if (pix_fmt != ISIF_PIXFMT_YCBCR_16BIT) {
- pr_debug("Invalid pix_fmt(input mode)\n");
- return -EINVAL;
- }
- break;
-
- default:
- /* should never come here */
- pr_debug("Invalid interface type\n");
- return -EINVAL;
- }
- isif_write(isif->isif_cfg.base_addr, modeset, MODESET);
- /* Set up pix order */
- ccdcfg |= (params->pix_order & ISIF_PIX_ORDER_MASK) <<
- ISIF_PIX_ORDER_SHIFT;
- isif_write(isif->isif_cfg.base_addr, ccdcfg, CCDCFG);
- /* configure video window */
- if (format->code == MEDIA_BUS_FMT_YUYV10_1X20 ||
- format->code == MEDIA_BUS_FMT_YUYV8_1X16)
- isif_setwin(isif, &params->win, params->frm_fmt, 1, mode);
- else
- isif_setwin(isif, &params->win, params->frm_fmt, 2, mode);
-
- /*
- * configure the horizontal line offset
- * this is done by rounding up width to a multiple of 16 pixels
- * and multiply by two to account for y:cb:cr 4:2:2 data
- */
- isif_write(isif->isif_cfg.base_addr,
- ((((params->win.width * 2) + 31) &
- DM365_ISIF_HSIZE_MASK) >> 5), HSIZE);
-
- /* configure the memory line offset */
- if (params->frm_fmt == ISIF_FRMFMT_INTERLACED &&
- params->buf_type == ISIF_BUFTYPE_FLD_INTERLEAVED)
- /* two fields are interleaved in memory */
- isif_write(isif->isif_cfg.base_addr,
- DM365_ISIF_SDOFST_2_LINES, SDOFST);
- return 0;
-}
-
-static int isif_configure(struct v4l2_subdev *sd, int mode)
-{
- struct vpfe_isif_device *isif = v4l2_get_subdevdata(sd);
- struct v4l2_mbus_framefmt *format;
-
- format = &isif->formats[ISIF_PAD_SINK];
-
- switch (format->code) {
- case MEDIA_BUS_FMT_SGRBG10_ALAW8_1X8:
- case MEDIA_BUS_FMT_SGRBG10_DPCM8_1X8:
- case MEDIA_BUS_FMT_SGRBG12_1X12:
- return isif_config_raw(sd, mode);
-
- case MEDIA_BUS_FMT_YUYV8_2X8:
- case MEDIA_BUS_FMT_UYVY8_2X8:
- case MEDIA_BUS_FMT_YUYV10_2X10:
- case MEDIA_BUS_FMT_Y8_1X8:
- case MEDIA_BUS_FMT_YUYV8_1X16:
- case MEDIA_BUS_FMT_YUYV10_1X20:
- return isif_config_ycbcr(sd, mode);
-
- default:
- break;
- }
- return -EINVAL;
-}
-
-/*
- * isif_set_stream() - Enable/Disable streaming on the ISIF module
- * @sd: VPFE ISIF V4L2 subdevice
- * @enable: Enable/disable stream
- */
-static int isif_set_stream(struct v4l2_subdev *sd, int enable)
-{
- struct vpfe_isif_device *isif = v4l2_get_subdevdata(sd);
- int ret;
-
- if (enable) {
- ret = isif_configure(sd,
- (isif->output == ISIF_OUTPUT_MEMORY) ? 0 : 1);
- if (ret)
- return ret;
- if (isif->output == ISIF_OUTPUT_MEMORY)
- isif_enable_output_to_sdram(isif, 1);
- isif_enable(isif, 1);
- } else {
- isif_enable(isif, 0);
- isif_enable_output_to_sdram(isif, 0);
- }
-
- return 0;
-}
-
-/*
- * __isif_get_format() - helper function for getting isif format
- * @isif: pointer to isif private structure.
- * @pad: pad number.
- * @cfg: V4L2 subdev pad config
- * @which: wanted subdev format.
- */
-static struct v4l2_mbus_framefmt *
-__isif_get_format(struct vpfe_isif_device *isif, struct v4l2_subdev_pad_config *cfg,
- unsigned int pad, enum v4l2_subdev_format_whence which)
-{
- if (which == V4L2_SUBDEV_FORMAT_TRY) {
- struct v4l2_subdev_format fmt;
-
- fmt.pad = pad;
- fmt.which = which;
-
- return v4l2_subdev_get_try_format(&isif->subdev, cfg, pad);
- }
- return &isif->formats[pad];
-}
-
-/*
- * isif_set_format() - set format on pad
- * @sd : VPFE ISIF device
- * @cfg : V4L2 subdev pad config
- * @fmt : pointer to v4l2 subdev format structure
- *
- * Return 0 on success or -EINVAL if format or pad is invalid
- */
-static int
-isif_set_format(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config *cfg,
- struct v4l2_subdev_format *fmt)
-{
- struct vpfe_isif_device *isif = v4l2_get_subdevdata(sd);
- struct vpfe_device *vpfe_dev = to_vpfe_device(isif);
- struct v4l2_mbus_framefmt *format;
-
- format = __isif_get_format(isif, cfg, fmt->pad, fmt->which);
- if (format == NULL)
- return -EINVAL;
-
- isif_try_format(isif, cfg, fmt);
- memcpy(format, &fmt->format, sizeof(*format));
-
- if (fmt->which == V4L2_SUBDEV_FORMAT_TRY)
- return 0;
-
- if (fmt->pad == ISIF_PAD_SOURCE)
- return isif_config_format(vpfe_dev, fmt->pad);
-
- return 0;
-}
-
-/*
- * isif_get_format() - Retrieve the video format on a pad
- * @sd: VPFE ISIF V4L2 subdevice
- * @cfg: V4L2 subdev pad config
- * @fmt: pointer to v4l2 subdev format structure
- *
- * Return 0 on success or -EINVAL if the pad is invalid or doesn't correspond
- * to the format type.
- */
-static int
-isif_get_format(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config *cfg,
- struct v4l2_subdev_format *fmt)
-{
- struct vpfe_isif_device *vpfe_isif = v4l2_get_subdevdata(sd);
- struct v4l2_mbus_framefmt *format;
-
- format = __isif_get_format(vpfe_isif, cfg, fmt->pad, fmt->which);
- if (format == NULL)
- return -EINVAL;
-
- memcpy(&fmt->format, format, sizeof(fmt->format));
-
- return 0;
-}
-
-/*
- * isif_enum_frame_size() - enum frame sizes on pads
- * @sd: VPFE isif V4L2 subdevice
- * @cfg: V4L2 subdev pad config
- * @code: pointer to v4l2_subdev_frame_size_enum structure
- */
-static int
-isif_enum_frame_size(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config *cfg,
- struct v4l2_subdev_frame_size_enum *fse)
-{
- struct vpfe_isif_device *isif = v4l2_get_subdevdata(sd);
- struct v4l2_subdev_format format;
-
- if (fse->index != 0)
- return -EINVAL;
-
- format.pad = fse->pad;
- format.format.code = fse->code;
- format.format.width = 1;
- format.format.height = 1;
- format.which = fse->which;
- isif_try_format(isif, cfg, &format);
- fse->min_width = format.format.width;
- fse->min_height = format.format.height;
-
- if (format.format.code != fse->code)
- return -EINVAL;
-
- format.pad = fse->pad;
- format.format.code = fse->code;
- format.format.width = -1;
- format.format.height = -1;
- format.which = fse->which;
- isif_try_format(isif, cfg, &format);
- fse->max_width = format.format.width;
- fse->max_height = format.format.height;
-
- return 0;
-}
-
-/*
- * isif_enum_mbus_code() - enum mbus codes for pads
- * @sd: VPFE isif V4L2 subdevice
- * @cfg: V4L2 subdev pad config
- * @code: pointer to v4l2_subdev_mbus_code_enum structure
- */
-static int
-isif_enum_mbus_code(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config *cfg,
- struct v4l2_subdev_mbus_code_enum *code)
-{
- switch (code->pad) {
- case ISIF_PAD_SINK:
- case ISIF_PAD_SOURCE:
- if (code->index >= ARRAY_SIZE(isif_fmts))
- return -EINVAL;
- code->code = isif_fmts[code->index];
- break;
-
- default:
- return -EINVAL;
- }
-
- return 0;
-}
-
-/*
- * isif_pad_set_selection() - set crop rectangle on pad
- * @sd: VPFE isif V4L2 subdevice
- * @cfg: V4L2 subdev pad config
- * @code: pointer to v4l2_subdev_mbus_code_enum structure
- *
- * Return 0 on success, -EINVAL if pad is invalid
- */
-static int
-isif_pad_set_selection(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
- struct v4l2_subdev_selection *sel)
-{
- struct vpfe_isif_device *vpfe_isif = v4l2_get_subdevdata(sd);
- struct v4l2_mbus_framefmt *format;
-
- /* check whether it's a valid pad and target */
- if (sel->pad != ISIF_PAD_SINK || sel->target != V4L2_SEL_TGT_CROP)
- return -EINVAL;
-
- format = __isif_get_format(vpfe_isif, cfg, sel->pad, sel->which);
- if (format == NULL)
- return -EINVAL;
-
- /* check wether crop rect is within limits */
- if (sel->r.top < 0 || sel->r.left < 0 ||
- (sel->r.left + sel->r.width >
- vpfe_isif->formats[ISIF_PAD_SINK].width) ||
- (sel->r.top + sel->r.height >
- vpfe_isif->formats[ISIF_PAD_SINK].height)) {
- sel->r.left = 0;
- sel->r.top = 0;
- sel->r.width = format->width;
- sel->r.height = format->height;
- }
- /* adjust the width to 16 pixel boundary */
- sel->r.width = ((sel->r.width + 15) & ~0xf);
- vpfe_isif->crop = sel->r;
- if (sel->which == V4L2_SUBDEV_FORMAT_ACTIVE) {
- isif_set_image_window(vpfe_isif);
- } else {
- struct v4l2_rect *rect;
-
- rect = v4l2_subdev_get_try_crop(sd, cfg, ISIF_PAD_SINK);
- memcpy(rect, &vpfe_isif->crop, sizeof(*rect));
- }
- return 0;
-}
-
-/*
- * isif_pad_get_selection() - get crop rectangle on pad
- * @sd: VPFE isif V4L2 subdevice
- * @cfg: V4L2 subdev pad config
- * @code: pointer to v4l2_subdev_mbus_code_enum structure
- *
- * Return 0 on success, -EINVAL if pad is invalid
- */
-static int
-isif_pad_get_selection(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
- struct v4l2_subdev_selection *sel)
-{
- struct vpfe_isif_device *vpfe_isif = v4l2_get_subdevdata(sd);
-
- /* check whether it's a valid pad and target */
- if (sel->pad != ISIF_PAD_SINK || sel->target != V4L2_SEL_TGT_CROP)
- return -EINVAL;
-
- if (sel->which == V4L2_SUBDEV_FORMAT_TRY) {
- struct v4l2_rect *rect;
-
- rect = v4l2_subdev_get_try_crop(sd, cfg, ISIF_PAD_SINK);
- memcpy(&sel->r, rect, sizeof(*rect));
- } else {
- sel->r = vpfe_isif->crop;
- }
-
- return 0;
-}
-
-/*
- * isif_init_formats() - Initialize formats on all pads
- * @sd: VPFE isif V4L2 subdevice
- * @fh: V4L2 subdev file handle
- *
- * Initialize all pad formats with default values. Try formats are initialized
- * on the file handle.
- */
-static int
-isif_init_formats(struct v4l2_subdev *sd,
- struct v4l2_subdev_fh *fh)
-{
- struct v4l2_subdev_format format;
- struct v4l2_subdev_selection sel;
-
- memset(&format, 0, sizeof(format));
- format.pad = ISIF_PAD_SINK;
- format.which = V4L2_SUBDEV_FORMAT_TRY;
- format.format.code = MEDIA_BUS_FMT_SGRBG12_1X12;
- format.format.width = MAX_WIDTH;
- format.format.height = MAX_HEIGHT;
- isif_set_format(sd, fh->pad, &format);
-
- memset(&format, 0, sizeof(format));
- format.pad = ISIF_PAD_SOURCE;
- format.which = V4L2_SUBDEV_FORMAT_TRY;
- format.format.code = MEDIA_BUS_FMT_SGRBG12_1X12;
- format.format.width = MAX_WIDTH;
- format.format.height = MAX_HEIGHT;
- isif_set_format(sd, fh->pad, &format);
-
- memset(&sel, 0, sizeof(sel));
- sel.pad = ISIF_PAD_SINK;
- sel.which = V4L2_SUBDEV_FORMAT_TRY;
- sel.target = V4L2_SEL_TGT_CROP;
- sel.r.width = MAX_WIDTH;
- sel.r.height = MAX_HEIGHT;
- isif_pad_set_selection(sd, fh->pad, &sel);
-
- return 0;
-}
-
-/* subdev core operations */
-static const struct v4l2_subdev_core_ops isif_v4l2_core_ops = {
- .ioctl = isif_ioctl,
-};
-
-/* subdev file operations */
-static const struct v4l2_subdev_internal_ops isif_v4l2_internal_ops = {
- .open = isif_init_formats,
-};
-
-/* subdev video operations */
-static const struct v4l2_subdev_video_ops isif_v4l2_video_ops = {
- .s_stream = isif_set_stream,
-};
-
-/* subdev pad operations */
-static const struct v4l2_subdev_pad_ops isif_v4l2_pad_ops = {
- .enum_mbus_code = isif_enum_mbus_code,
- .enum_frame_size = isif_enum_frame_size,
- .get_fmt = isif_get_format,
- .set_fmt = isif_set_format,
- .set_selection = isif_pad_set_selection,
- .get_selection = isif_pad_get_selection,
-};
-
-/* subdev operations */
-static const struct v4l2_subdev_ops isif_v4l2_ops = {
- .core = &isif_v4l2_core_ops,
- .video = &isif_v4l2_video_ops,
- .pad = &isif_v4l2_pad_ops,
-};
-
-/*
- * Media entity operations
- */
-
-/*
- * isif_link_setup() - Setup isif connections
- * @entity: isif media entity
- * @local: Pad at the local end of the link
- * @remote: Pad at the remote end of the link
- * @flags: Link flags
- *
- * return -EINVAL or zero on success
- */
-static int
-isif_link_setup(struct media_entity *entity, const struct media_pad *local,
- const struct media_pad *remote, u32 flags)
-{
- struct v4l2_subdev *sd = media_entity_to_v4l2_subdev(entity);
- struct vpfe_isif_device *isif = v4l2_get_subdevdata(sd);
-
- switch (local->index | media_entity_type(remote->entity)) {
- case ISIF_PAD_SINK | MEDIA_ENT_T_V4L2_SUBDEV:
- /* read from decoder/sensor */
- if (!(flags & MEDIA_LNK_FL_ENABLED)) {
- isif->input = ISIF_INPUT_NONE;
- break;
- }
- if (isif->input != ISIF_INPUT_NONE)
- return -EBUSY;
- isif->input = ISIF_INPUT_PARALLEL;
- break;
-
- case ISIF_PAD_SOURCE | MEDIA_ENT_T_DEVNODE:
- /* write to memory */
- if (flags & MEDIA_LNK_FL_ENABLED)
- isif->output = ISIF_OUTPUT_MEMORY;
- else
- isif->output = ISIF_OUTPUT_NONE;
- break;
-
- case ISIF_PAD_SOURCE | MEDIA_ENT_T_V4L2_SUBDEV:
- if (flags & MEDIA_LNK_FL_ENABLED)
- isif->output = ISIF_OUTPUT_IPIPEIF;
- else
- isif->output = ISIF_OUTPUT_NONE;
- break;
-
- default:
- return -EINVAL;
- }
-
- return 0;
-}
-static const struct media_entity_operations isif_media_ops = {
- .link_setup = isif_link_setup,
-};
-
-/*
- * vpfe_isif_unregister_entities() - isif unregister entity
- * @isif - pointer to isif subdevice structure.
- */
-void vpfe_isif_unregister_entities(struct vpfe_isif_device *isif)
-{
- vpfe_video_unregister(&isif->video_out);
- /* unregister subdev */
- v4l2_device_unregister_subdev(&isif->subdev);
- /* cleanup entity */
- media_entity_cleanup(&isif->subdev.entity);
-}
-
-static void isif_restore_defaults(struct vpfe_isif_device *isif)
-{
- enum vpss_ccdc_source_sel source = VPSS_CCDCIN;
- int i;
-
- memset(&isif->isif_cfg.bayer.config_params, 0,
- sizeof(struct vpfe_isif_raw_config));
-
- isif->isif_cfg.bayer.config_params.linearize.corr_shft =
- VPFE_ISIF_NO_SHIFT;
- isif->isif_cfg.bayer.config_params.linearize.scale_fact.integer = 1;
- isif->isif_cfg.bayer.config_params.culling.hcpat_odd =
- ISIF_CULLING_HCAPT_ODD;
- isif->isif_cfg.bayer.config_params.culling.hcpat_even =
- ISIF_CULLING_HCAPT_EVEN;
- isif->isif_cfg.bayer.config_params.culling.vcpat = ISIF_CULLING_VCAPT;
- /* Enable clock to ISIF, IPIPEIF and BL */
- vpss_enable_clock(VPSS_CCDC_CLOCK, 1);
- vpss_enable_clock(VPSS_IPIPEIF_CLOCK, 1);
- vpss_enable_clock(VPSS_BL_CLOCK, 1);
-
- /* set all registers to default value */
- for (i = 0; i <= 0x1f8; i += 4)
- isif_write(isif->isif_cfg.base_addr, 0, i);
- /* no culling support */
- isif_write(isif->isif_cfg.base_addr, 0xffff, CULH);
- isif_write(isif->isif_cfg.base_addr, 0xff, CULV);
-
- /* Set default offset and gain */
- isif_config_gain_offset(isif);
- vpss_select_ccdc_source(source);
-}
-
-/*
- * vpfe_isif_register_entities() - isif register entity
- * @isif - pointer to isif subdevice structure.
- * @vdev: pointer to v4l2 device structure.
- */
-int vpfe_isif_register_entities(struct vpfe_isif_device *isif,
- struct v4l2_device *vdev)
-{
- struct vpfe_device *vpfe_dev = to_vpfe_device(isif);
- unsigned int flags;
- int ret;
-
- /* Register the subdev */
- ret = v4l2_device_register_subdev(vdev, &isif->subdev);
- if (ret < 0)
- return ret;
-
- isif_restore_defaults(isif);
- ret = vpfe_video_register(&isif->video_out, vdev);
- if (ret) {
- pr_err("Failed to register isif video out device\n");
- goto out_video_register;
- }
- isif->video_out.vpfe_dev = vpfe_dev;
- flags = 0;
- /* connect isif to video node */
- ret = media_entity_create_link(&isif->subdev.entity, 1,
- &isif->video_out.video_dev.entity,
- 0, flags);
- if (ret < 0)
- goto out_create_link;
- return 0;
-out_create_link:
- vpfe_video_unregister(&isif->video_out);
-out_video_register:
- v4l2_device_unregister_subdev(&isif->subdev);
- return ret;
-}
-
-/* -------------------------------------------------------------------
- * V4L2 subdev control operations
- */
-
-static int vpfe_isif_s_ctrl(struct v4l2_ctrl *ctrl)
-{
- struct vpfe_isif_device *isif =
- container_of(ctrl->handler, struct vpfe_isif_device, ctrls);
- struct isif_oper_config *config = &isif->isif_cfg;
-
- switch (ctrl->id) {
- case VPFE_CID_DPCM_PREDICTOR:
- config->bayer.dpcm_predictor = ctrl->val;
- break;
-
- case VPFE_ISIF_CID_CRGAIN:
- config->isif_gain_params.cr_gain = ctrl->val;
- break;
-
- case VPFE_ISIF_CID_CGRGAIN:
- config->isif_gain_params.cgr_gain = ctrl->val;
- break;
-
- case VPFE_ISIF_CID_CGBGAIN:
- config->isif_gain_params.cgb_gain = ctrl->val;
- break;
-
- case VPFE_ISIF_CID_CBGAIN:
- config->isif_gain_params.cb_gain = ctrl->val;
- break;
-
- case VPFE_ISIF_CID_GAIN_OFFSET:
- config->isif_gain_params.offset = ctrl->val;
- break;
-
- default:
- return -EINVAL;
- }
- return 0;
-}
-
-static const struct v4l2_ctrl_ops vpfe_isif_ctrl_ops = {
- .s_ctrl = vpfe_isif_s_ctrl,
-};
-
-static const struct v4l2_ctrl_config vpfe_isif_dpcm_pred = {
- .ops = &vpfe_isif_ctrl_ops,
- .id = VPFE_CID_DPCM_PREDICTOR,
- .name = "DPCM Predictor",
- .type = V4L2_CTRL_TYPE_INTEGER,
- .min = 0,
- .max = 1,
- .step = 1,
- .def = 0,
-};
-
-static const struct v4l2_ctrl_config vpfe_isif_crgain = {
- .ops = &vpfe_isif_ctrl_ops,
- .id = VPFE_ISIF_CID_CRGAIN,
- .name = "CRGAIN",
- .type = V4L2_CTRL_TYPE_INTEGER,
- .min = 0,
- .max = (1 << 12) - 1,
- .step = 1,
- .def = 0,
-};
-
-static const struct v4l2_ctrl_config vpfe_isif_cgrgain = {
- .ops = &vpfe_isif_ctrl_ops,
- .id = VPFE_ISIF_CID_CGRGAIN,
- .name = "CGRGAIN",
- .type = V4L2_CTRL_TYPE_INTEGER,
- .min = 0,
- .max = (1 << 12) - 1,
- .step = 1,
- .def = 0,
-};
-
-static const struct v4l2_ctrl_config vpfe_isif_cgbgain = {
- .ops = &vpfe_isif_ctrl_ops,
- .id = VPFE_ISIF_CID_CGBGAIN,
- .name = "CGBGAIN",
- .type = V4L2_CTRL_TYPE_INTEGER,
- .min = 0,
- .max = (1 << 12) - 1,
- .step = 1,
- .def = 0,
-};
-
-static const struct v4l2_ctrl_config vpfe_isif_cbgain = {
- .ops = &vpfe_isif_ctrl_ops,
- .id = VPFE_ISIF_CID_CBGAIN,
- .name = "CBGAIN",
- .type = V4L2_CTRL_TYPE_INTEGER,
- .min = 0,
- .max = (1 << 12) - 1,
- .step = 1,
- .def = 0,
-};
-
-static const struct v4l2_ctrl_config vpfe_isif_gain_offset = {
- .ops = &vpfe_isif_ctrl_ops,
- .id = VPFE_ISIF_CID_GAIN_OFFSET,
- .name = "Gain Offset",
- .type = V4L2_CTRL_TYPE_INTEGER,
- .min = 0,
- .max = (1 << 12) - 1,
- .step = 1,
- .def = 0,
-};
-
-static void isif_remove(struct vpfe_isif_device *isif,
- struct platform_device *pdev)
-{
- struct resource *res;
- int i = 0;
-
- iounmap(isif->isif_cfg.base_addr);
- iounmap(isif->isif_cfg.linear_tbl0_addr);
- iounmap(isif->isif_cfg.linear_tbl1_addr);
-
- while (i < 3) {
- res = platform_get_resource(pdev, IORESOURCE_MEM, i);
- if (res)
- release_mem_region(res->start,
- resource_size(res));
- i++;
- }
-}
-
-static void isif_config_defaults(struct vpfe_isif_device *isif)
-{
- isif->isif_cfg.ycbcr.v4l2_pix_fmt = V4L2_PIX_FMT_UYVY;
- isif->isif_cfg.ycbcr.pix_fmt = ISIF_PIXFMT_YCBCR_8BIT;
- isif->isif_cfg.ycbcr.frm_fmt = ISIF_FRMFMT_INTERLACED;
- isif->isif_cfg.ycbcr.fid_pol = VPFE_PINPOL_POSITIVE;
- isif->isif_cfg.ycbcr.vd_pol = VPFE_PINPOL_POSITIVE;
- isif->isif_cfg.ycbcr.hd_pol = VPFE_PINPOL_POSITIVE;
- isif->isif_cfg.ycbcr.pix_order = ISIF_PIXORDER_CBYCRY;
- isif->isif_cfg.ycbcr.buf_type = ISIF_BUFTYPE_FLD_INTERLEAVED;
-
- isif->isif_cfg.bayer.v4l2_pix_fmt = V4L2_PIX_FMT_SGRBG10ALAW8;
- isif->isif_cfg.bayer.pix_fmt = ISIF_PIXFMT_RAW;
- isif->isif_cfg.bayer.frm_fmt = ISIF_FRMFMT_PROGRESSIVE;
- isif->isif_cfg.bayer.fid_pol = VPFE_PINPOL_POSITIVE;
- isif->isif_cfg.bayer.vd_pol = VPFE_PINPOL_POSITIVE;
- isif->isif_cfg.bayer.hd_pol = VPFE_PINPOL_POSITIVE;
- isif->isif_cfg.bayer.cfa_pat = ISIF_CFA_PAT_MOSAIC;
- isif->isif_cfg.bayer.data_msb = ISIF_BIT_MSB_11;
- isif->isif_cfg.data_pack = ISIF_PACK_8BIT;
-}
-/*
- * vpfe_isif_init() - Initialize V4L2 subdev and media entity
- * @isif: VPFE isif module
- * @pdev: Pointer to platform device structure.
- * Return 0 on success and a negative error code on failure.
- */
-int vpfe_isif_init(struct vpfe_isif_device *isif, struct platform_device *pdev)
-{
- struct v4l2_subdev *sd = &isif->subdev;
- struct media_pad *pads = &isif->pads[0];
- struct media_entity *me = &sd->entity;
- static resource_size_t res_len;
- struct resource *res;
- void __iomem *addr;
- int status;
- int i = 0;
-
- /* Get the ISIF base address, linearization table0 and table1 addr. */
- while (i < 3) {
- res = platform_get_resource(pdev, IORESOURCE_MEM, i);
- if (!res) {
- status = -ENOENT;
- goto fail_nobase_res;
- }
- res_len = resource_size(res);
- res = request_mem_region(res->start, res_len, res->name);
- if (!res) {
- status = -EBUSY;
- goto fail_nobase_res;
- }
- addr = ioremap_nocache(res->start, res_len);
- if (!addr) {
- status = -EBUSY;
- goto fail_base_iomap;
- }
- switch (i) {
- case 0:
- /* ISIF base address */
- isif->isif_cfg.base_addr = addr;
- break;
- case 1:
- /* ISIF linear tbl0 address */
- isif->isif_cfg.linear_tbl0_addr = addr;
- break;
- default:
- /* ISIF linear tbl0 address */
- isif->isif_cfg.linear_tbl1_addr = addr;
- break;
- }
- i++;
- }
- davinci_cfg_reg(DM365_VIN_CAM_WEN);
- davinci_cfg_reg(DM365_VIN_CAM_VD);
- davinci_cfg_reg(DM365_VIN_CAM_HD);
- davinci_cfg_reg(DM365_VIN_YIN4_7_EN);
- davinci_cfg_reg(DM365_VIN_YIN0_3_EN);
-
- /* queue ops */
- isif->video_out.ops = &isif_video_ops;
- v4l2_subdev_init(sd, &isif_v4l2_ops);
- sd->internal_ops = &isif_v4l2_internal_ops;
- strlcpy(sd->name, "DAVINCI ISIF", sizeof(sd->name));
- sd->grp_id = 1 << 16; /* group ID for davinci subdevs */
- v4l2_set_subdevdata(sd, isif);
- sd->flags |= V4L2_SUBDEV_FL_HAS_EVENTS | V4L2_SUBDEV_FL_HAS_DEVNODE;
- pads[ISIF_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
- pads[ISIF_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE;
-
- isif->input = ISIF_INPUT_NONE;
- isif->output = ISIF_OUTPUT_NONE;
- me->ops = &isif_media_ops;
- status = media_entity_init(me, ISIF_PADS_NUM, pads, 0);
- if (status)
- goto isif_fail;
- isif->video_out.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
- status = vpfe_video_init(&isif->video_out, "ISIF");
- if (status) {
- pr_err("Failed to init isif-out video device\n");
- goto isif_fail;
- }
- v4l2_ctrl_handler_init(&isif->ctrls, 6);
- v4l2_ctrl_new_custom(&isif->ctrls, &vpfe_isif_crgain, NULL);
- v4l2_ctrl_new_custom(&isif->ctrls, &vpfe_isif_cgrgain, NULL);
- v4l2_ctrl_new_custom(&isif->ctrls, &vpfe_isif_cgbgain, NULL);
- v4l2_ctrl_new_custom(&isif->ctrls, &vpfe_isif_cbgain, NULL);
- v4l2_ctrl_new_custom(&isif->ctrls, &vpfe_isif_gain_offset, NULL);
- v4l2_ctrl_new_custom(&isif->ctrls, &vpfe_isif_dpcm_pred, NULL);
-
- v4l2_ctrl_handler_setup(&isif->ctrls);
- sd->ctrl_handler = &isif->ctrls;
- isif_config_defaults(isif);
- return 0;
-fail_base_iomap:
- release_mem_region(res->start, res_len);
- i--;
-fail_nobase_res:
- if (isif->isif_cfg.base_addr)
- iounmap(isif->isif_cfg.base_addr);
- if (isif->isif_cfg.linear_tbl0_addr)
- iounmap(isif->isif_cfg.linear_tbl0_addr);
-
- while (i >= 0) {
- res = platform_get_resource(pdev, IORESOURCE_MEM, i);
- release_mem_region(res->start, res_len);
- i--;
- }
- return status;
-isif_fail:
- v4l2_ctrl_handler_free(&isif->ctrls);
- isif_remove(isif, pdev);
- return status;
-}
-
-/*
- * vpfe_isif_cleanup - isif module cleanup
- * @isif: pointer to isif subdevice
- * @dev: pointer to platform device structure
- */
-void
-vpfe_isif_cleanup(struct vpfe_isif_device *isif, struct platform_device *pdev)
-{
- isif_remove(isif, pdev);
-}
diff --git a/drivers/staging/media/davinci_vpfe/dm365_isif.h b/drivers/staging/media/davinci_vpfe/dm365_isif.h
deleted file mode 100644
index 89e814e9c0d7..000000000000
--- a/drivers/staging/media/davinci_vpfe/dm365_isif.h
+++ /dev/null
@@ -1,203 +0,0 @@
-/*
- * Copyright (C) 2012 Texas Instruments Inc
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- * Contributors:
- * Manjunath Hadli <manjunath.hadli@ti.com>
- * Prabhakar Lad <prabhakar.lad@ti.com>
- */
-
-#ifndef _DAVINCI_VPFE_DM365_ISIF_H
-#define _DAVINCI_VPFE_DM365_ISIF_H
-
-#include <linux/platform_device.h>
-
-#include <mach/mux.h>
-
-#include <media/davinci/vpfe_types.h>
-#include <media/v4l2-ctrls.h>
-#include <media/v4l2-device.h>
-
-#include "davinci_vpfe_user.h"
-#include "dm365_isif_regs.h"
-#include "vpfe_video.h"
-
-#define ISIF_CULLING_HCAPT_ODD 0xff
-#define ISIF_CULLING_HCAPT_EVEN 0xff
-#define ISIF_CULLING_VCAPT 0xff
-
-#define ISIF_CADU_BITS 0x07ff
-#define ISIF_CADL_BITS 0x0ffff
-
-enum isif_pixfmt {
- ISIF_PIXFMT_RAW = 0,
- ISIF_PIXFMT_YCBCR_16BIT = 1,
- ISIF_PIXFMT_YCBCR_8BIT = 2,
-};
-
-enum isif_frmfmt {
- ISIF_FRMFMT_PROGRESSIVE = 0,
- ISIF_FRMFMT_INTERLACED = 1,
-};
-
-/* PIXEL ORDER IN MEMORY from LSB to MSB */
-/* only applicable for 8-bit input mode */
-enum isif_pixorder {
- ISIF_PIXORDER_YCBYCR = 0,
- ISIF_PIXORDER_CBYCRY = 1,
-};
-
-enum isif_buftype {
- ISIF_BUFTYPE_FLD_INTERLEAVED = 0,
- ISIF_BUFTYPE_FLD_SEPARATED = 1,
-};
-
-struct isif_ycbcr_config {
- /* v4l2 pixel format */
- unsigned long v4l2_pix_fmt;
- /* isif pixel format */
- enum isif_pixfmt pix_fmt;
- /* isif frame format */
- enum isif_frmfmt frm_fmt;
- /* isif crop window */
- struct v4l2_rect win;
- /* field polarity */
- enum vpfe_pin_pol fid_pol;
- /* interface VD polarity */
- enum vpfe_pin_pol vd_pol;
- /* interface HD polarity */
- enum vpfe_pin_pol hd_pol;
- /* isif pix order. Only used for ycbcr capture */
- enum isif_pixorder pix_order;
- /* isif buffer type. Only used for ycbcr capture */
- enum isif_buftype buf_type;
-};
-
-enum isif_cfa_pattern {
- ISIF_CFA_PAT_MOSAIC = 0,
- ISIF_CFA_PAT_STRIPE = 1,
-};
-
-enum isif_data_msb {
- /* MSB b15 */
- ISIF_BIT_MSB_15 = 0,
- /* MSB b14 */
- ISIF_BIT_MSB_14 = 1,
- /* MSB b13 */
- ISIF_BIT_MSB_13 = 2,
- /* MSB b12 */
- ISIF_BIT_MSB_12 = 3,
- /* MSB b11 */
- ISIF_BIT_MSB_11 = 4,
- /* MSB b10 */
- ISIF_BIT_MSB_10 = 5,
- /* MSB b9 */
- ISIF_BIT_MSB_9 = 6,
- /* MSB b8 */
- ISIF_BIT_MSB_8 = 7,
- /* MSB b7 */
- ISIF_BIT_MSB_7 = 8,
-};
-
-struct isif_params_raw {
- /* v4l2 pixel format */
- unsigned long v4l2_pix_fmt;
- /* isif pixel format */
- enum isif_pixfmt pix_fmt;
- /* isif frame format */
- enum isif_frmfmt frm_fmt;
- /* video window */
- struct v4l2_rect win;
- /* field polarity */
- enum vpfe_pin_pol fid_pol;
- /* interface VD polarity */
- enum vpfe_pin_pol vd_pol;
- /* interface HD polarity */
- enum vpfe_pin_pol hd_pol;
- /* buffer type. Applicable for interlaced mode */
- enum isif_buftype buf_type;
- /* cfa pattern */
- enum isif_cfa_pattern cfa_pat;
- /* Data MSB position */
- enum isif_data_msb data_msb;
- /* Enable horizontal flip */
- unsigned char horz_flip_en;
- /* Enable image invert vertically */
- unsigned char image_invert_en;
- unsigned char dpcm_predictor;
- struct vpfe_isif_raw_config config_params;
-};
-
-enum isif_data_pack {
- ISIF_PACK_16BIT = 0,
- ISIF_PACK_12BIT = 1,
- ISIF_PACK_8BIT = 2,
-};
-
-struct isif_gain_values {
- unsigned int cr_gain;
- unsigned int cgr_gain;
- unsigned int cgb_gain;
- unsigned int cb_gain;
- unsigned int offset;
-};
-
-struct isif_oper_config {
- struct isif_ycbcr_config ycbcr;
- struct isif_params_raw bayer;
- enum isif_data_pack data_pack;
- struct isif_gain_values isif_gain_params;
- void __iomem *base_addr;
- void __iomem *linear_tbl0_addr;
- void __iomem *linear_tbl1_addr;
-};
-
-#define ISIF_PAD_SINK 0
-#define ISIF_PAD_SOURCE 1
-
-#define ISIF_PADS_NUM 2
-
-enum isif_input_entity {
- ISIF_INPUT_NONE = 0,
- ISIF_INPUT_PARALLEL = 1,
-};
-
-#define ISIF_OUTPUT_NONE (0)
-#define ISIF_OUTPUT_MEMORY (1 << 0)
-#define ISIF_OUTPUT_IPIPEIF (1 << 1)
-
-struct vpfe_isif_device {
- struct v4l2_subdev subdev;
- struct media_pad pads[ISIF_PADS_NUM];
- struct v4l2_mbus_framefmt formats[ISIF_PADS_NUM];
- enum isif_input_entity input;
- unsigned int output;
- struct v4l2_ctrl_handler ctrls;
- struct v4l2_rect crop;
- struct isif_oper_config isif_cfg;
- struct vpfe_video_device video_out;
-};
-
-enum v4l2_field vpfe_isif_get_fid(struct vpfe_device *vpfe_dev);
-void vpfe_isif_unregister_entities(struct vpfe_isif_device *isif);
-int vpfe_isif_register_entities(struct vpfe_isif_device *isif,
- struct v4l2_device *dev);
-int vpfe_isif_init(struct vpfe_isif_device *isif, struct platform_device *pdev);
-void vpfe_isif_cleanup(struct vpfe_isif_device *vpfe_isif,
- struct platform_device *pdev);
-void vpfe_isif_vidint1_isr(struct vpfe_isif_device *isif);
-void vpfe_isif_buffer_isr(struct vpfe_isif_device *isif);
-
-#endif /* _DAVINCI_VPFE_DM365_ISIF_H */
diff --git a/drivers/staging/media/davinci_vpfe/dm365_isif_regs.h b/drivers/staging/media/davinci_vpfe/dm365_isif_regs.h
deleted file mode 100644
index 8aceabb43f8e..000000000000
--- a/drivers/staging/media/davinci_vpfe/dm365_isif_regs.h
+++ /dev/null
@@ -1,294 +0,0 @@
-/*
- * Copyright (C) 2012 Texas Instruments Inc
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- * Contributors:
- * Manjunath Hadli <manjunath.hadli@ti.com>
- * Prabhakar Lad <prabhakar.lad@ti.com>
- */
-
-#ifndef _DAVINCI_VPFE_DM365_ISIF_REGS_H
-#define _DAVINCI_VPFE_DM365_ISIF_REGS_H
-
-/* ISIF registers relative offsets */
-#define SYNCEN 0x00
-#define MODESET 0x04
-#define HDW 0x08
-#define VDW 0x0c
-#define PPLN 0x10
-#define LPFR 0x14
-#define SPH 0x18
-#define LNH 0x1c
-#define SLV0 0x20
-#define SLV1 0x24
-#define LNV 0x28
-#define CULH 0x2c
-#define CULV 0x30
-#define HSIZE 0x34
-#define SDOFST 0x38
-#define CADU 0x3c
-#define CADL 0x40
-#define LINCFG0 0x44
-#define LINCFG1 0x48
-#define CCOLP 0x4c
-#define CRGAIN 0x50
-#define CGRGAIN 0x54
-#define CGBGAIN 0x58
-#define CBGAIN 0x5c
-#define COFSTA 0x60
-#define FLSHCFG0 0x64
-#define FLSHCFG1 0x68
-#define FLSHCFG2 0x6c
-#define VDINT0 0x70
-#define VDINT1 0x74
-#define VDINT2 0x78
-#define MISC 0x7c
-#define CGAMMAWD 0x80
-#define REC656IF 0x84
-#define CCDCFG 0x88
-/*****************************************************
-* Defect Correction registers
-*****************************************************/
-#define DFCCTL 0x8c
-#define VDFSATLV 0x90
-#define DFCMEMCTL 0x94
-#define DFCMEM0 0x98
-#define DFCMEM1 0x9c
-#define DFCMEM2 0xa0
-#define DFCMEM3 0xa4
-#define DFCMEM4 0xa8
-/****************************************************
-* Black Clamp registers
-****************************************************/
-#define CLAMPCFG 0xac
-#define CLDCOFST 0xb0
-#define CLSV 0xb4
-#define CLHWIN0 0xb8
-#define CLHWIN1 0xbc
-#define CLHWIN2 0xc0
-#define CLVRV 0xc4
-#define CLVWIN0 0xc8
-#define CLVWIN1 0xcc
-#define CLVWIN2 0xd0
-#define CLVWIN3 0xd4
-/****************************************************
-* Lense Shading Correction
-****************************************************/
-#define DATAHOFST 0xd8
-#define DATAVOFST 0xdc
-#define LSCHVAL 0xe0
-#define LSCVVAL 0xe4
-#define TWODLSCCFG 0xe8
-#define TWODLSCOFST 0xec
-#define TWODLSCINI 0xf0
-#define TWODLSCGRBU 0xf4
-#define TWODLSCGRBL 0xf8
-#define TWODLSCGROF 0xfc
-#define TWODLSCORBU 0x100
-#define TWODLSCORBL 0x104
-#define TWODLSCOROF 0x108
-#define TWODLSCIRQEN 0x10c
-#define TWODLSCIRQST 0x110
-/****************************************************
-* Data formatter
-****************************************************/
-#define FMTCFG 0x114
-#define FMTPLEN 0x118
-#define FMTSPH 0x11c
-#define FMTLNH 0x120
-#define FMTSLV 0x124
-#define FMTLNV 0x128
-#define FMTRLEN 0x12c
-#define FMTHCNT 0x130
-#define FMTAPTR_BASE 0x134
-/* Below macro for addresses FMTAPTR0 - FMTAPTR15 */
-#define FMTAPTR(i) (FMTAPTR_BASE + (i * 4))
-#define FMTPGMVF0 0x174
-#define FMTPGMVF1 0x178
-#define FMTPGMAPU0 0x17c
-#define FMTPGMAPU1 0x180
-#define FMTPGMAPS0 0x184
-#define FMTPGMAPS1 0x188
-#define FMTPGMAPS2 0x18c
-#define FMTPGMAPS3 0x190
-#define FMTPGMAPS4 0x194
-#define FMTPGMAPS5 0x198
-#define FMTPGMAPS6 0x19c
-#define FMTPGMAPS7 0x1a0
-/************************************************
-* Color Space Converter
-************************************************/
-#define CSCCTL 0x1a4
-#define CSCM0 0x1a8
-#define CSCM1 0x1ac
-#define CSCM2 0x1b0
-#define CSCM3 0x1b4
-#define CSCM4 0x1b8
-#define CSCM5 0x1bc
-#define CSCM6 0x1c0
-#define CSCM7 0x1c4
-#define OBWIN0 0x1c8
-#define OBWIN1 0x1cc
-#define OBWIN2 0x1d0
-#define OBWIN3 0x1d4
-#define OBVAL0 0x1d8
-#define OBVAL1 0x1dc
-#define OBVAL2 0x1e0
-#define OBVAL3 0x1e4
-#define OBVAL4 0x1e8
-#define OBVAL5 0x1ec
-#define OBVAL6 0x1f0
-#define OBVAL7 0x1f4
-#define CLKCTL 0x1f8
-
-/* Masks & Shifts below */
-#define START_PX_HOR_MASK 0x7fff
-#define NUM_PX_HOR_MASK 0x7fff
-#define START_VER_ONE_MASK 0x7fff
-#define START_VER_TWO_MASK 0x7fff
-#define NUM_LINES_VER 0x7fff
-
-/* gain - offset masks */
-#define OFFSET_MASK 0xfff
-#define GAIN_SDRAM_EN_SHIFT 12
-#define GAIN_IPIPE_EN_SHIFT 13
-#define GAIN_H3A_EN_SHIFT 14
-#define OFST_SDRAM_EN_SHIFT 8
-#define OFST_IPIPE_EN_SHIFT 9
-#define OFST_H3A_EN_SHIFT 10
-#define GAIN_OFFSET_EN_MASK 0x7700
-
-/* Culling */
-#define CULL_PAT_EVEN_LINE_SHIFT 8
-
-/* CCDCFG register */
-#define ISIF_YCINSWP_RAW (0x00 << 4)
-#define ISIF_YCINSWP_YCBCR (0x01 << 4)
-#define ISIF_CCDCFG_FIDMD_LATCH_VSYNC (0x00 << 6)
-#define ISIF_CCDCFG_WENLOG_AND (0x00 << 8)
-#define ISIF_CCDCFG_TRGSEL_WEN (0x00 << 9)
-#define ISIF_CCDCFG_EXTRG_DISABLE (0x00 << 10)
-#define ISIF_LATCH_ON_VSYNC_DISABLE (0x01 << 15)
-#define ISIF_LATCH_ON_VSYNC_ENABLE (0x00 << 15)
-#define ISIF_DATA_PACK_MASK 0x03
-#define ISIF_PIX_ORDER_SHIFT 11
-#define ISIF_PIX_ORDER_MASK 0x01
-#define ISIF_BW656_ENABLE (0x01 << 5)
-
-/* MODESET registers */
-#define ISIF_VDHDOUT_INPUT (0x00 << 0)
-#define ISIF_INPUT_MASK 0x03
-#define ISIF_INPUT_SHIFT 12
-#define ISIF_FID_POL_MASK 0x01
-#define ISIF_FID_POL_SHIFT 4
-#define ISIF_HD_POL_MASK 0x01
-#define ISIF_HD_POL_SHIFT 3
-#define ISIF_VD_POL_MASK 0x01
-#define ISIF_VD_POL_SHIFT 2
-#define ISIF_DATAPOL_NORMAL 0x00
-#define ISIF_DATAPOL_MASK 0x01
-#define ISIF_DATAPOL_SHIFT 6
-#define ISIF_EXWEN_DISABLE 0x00
-#define ISIF_EXWEN_MASK 0x01
-#define ISIF_EXWEN_SHIFT 5
-#define ISIF_FRM_FMT_MASK 0x01
-#define ISIF_FRM_FMT_SHIFT 7
-#define ISIF_DATASFT_MASK 0x07
-#define ISIF_DATASFT_SHIFT 8
-#define ISIF_LPF_SHIFT 14
-#define ISIF_LPF_MASK 0x1
-
-/* GAMMAWD registers */
-#define ISIF_ALAW_GAMA_WD_MASK 0xf
-#define ISIF_ALAW_GAMA_WD_SHIFT 1
-#define ISIF_ALAW_ENABLE 0x01
-#define ISIF_GAMMAWD_CFA_MASK 0x01
-#define ISIF_GAMMAWD_CFA_SHIFT 5
-
-/* HSIZE registers */
-#define ISIF_HSIZE_FLIP_MASK 0x01
-#define ISIF_HSIZE_FLIP_SHIFT 12
-#define ISIF_LINEOFST_MASK 0xfff
-
-/* MISC registers */
-#define ISIF_DPCM_EN_SHIFT 12
-#define ISIF_DPCM_PREDICTOR_SHIFT 13
-#define ISIF_DPCM_PREDICTOR_MASK 1
-
-/* Black clamp related */
-#define ISIF_BC_DCOFFSET_MASK 0x1fff
-#define ISIF_BC_MODE_COLOR_MASK 1
-#define ISIF_BC_MODE_COLOR_SHIFT 4
-#define ISIF_HORZ_BC_MODE_MASK 3
-#define ISIF_HORZ_BC_MODE_SHIFT 1
-#define ISIF_HORZ_BC_WIN_COUNT_MASK 0x1f
-#define ISIF_HORZ_BC_WIN_SEL_SHIFT 5
-#define ISIF_HORZ_BC_PIX_LIMIT_SHIFT 6
-#define ISIF_HORZ_BC_WIN_H_SIZE_MASK 3
-#define ISIF_HORZ_BC_WIN_H_SIZE_SHIFT 8
-#define ISIF_HORZ_BC_WIN_V_SIZE_MASK 3
-#define ISIF_HORZ_BC_WIN_V_SIZE_SHIFT 12
-#define ISIF_HORZ_BC_WIN_START_H_MASK 0x1fff
-#define ISIF_HORZ_BC_WIN_START_V_MASK 0x1fff
-#define ISIF_VERT_BC_OB_H_SZ_MASK 7
-#define ISIF_VERT_BC_RST_VAL_SEL_MASK 3
-#define ISIF_VERT_BC_RST_VAL_SEL_SHIFT 4
-#define ISIF_VERT_BC_LINE_AVE_COEF_SHIFT 8
-#define ISIF_VERT_BC_OB_START_HORZ_MASK 0x1fff
-#define ISIF_VERT_BC_OB_START_VERT_MASK 0x1fff
-#define ISIF_VERT_BC_OB_VERT_SZ_MASK 0x1fff
-#define ISIF_VERT_BC_RST_VAL_MASK 0xfff
-#define ISIF_BC_VERT_START_SUB_V_MASK 0x1fff
-
-/* VDFC registers */
-#define ISIF_VDFC_EN_SHIFT 4
-#define ISIF_VDFC_CORR_MOD_MASK 3
-#define ISIF_VDFC_CORR_MOD_SHIFT 5
-#define ISIF_VDFC_CORR_WHOLE_LN_SHIFT 7
-#define ISIF_VDFC_LEVEL_SHFT_MASK 7
-#define ISIF_VDFC_LEVEL_SHFT_SHIFT 8
-#define ISIF_VDFC_SAT_LEVEL_MASK 0xfff
-#define ISIF_VDFC_POS_MASK 0x1fff
-#define ISIF_DFCMEMCTL_DFCMARST_SHIFT 2
-
-/* CSC registers */
-#define ISIF_CSC_COEF_INTEG_MASK 7
-#define ISIF_CSC_COEF_DECIMAL_MASK 0x1f
-#define ISIF_CSC_COEF_INTEG_SHIFT 5
-#define ISIF_CSCM_MSB_SHIFT 8
-#define ISIF_DF_CSC_SPH_MASK 0x1fff
-#define ISIF_DF_CSC_LNH_MASK 0x1fff
-#define ISIF_DF_CSC_SLV_MASK 0x1fff
-#define ISIF_DF_CSC_LNV_MASK 0x1fff
-#define ISIF_DF_NUMLINES 0x7fff
-#define ISIF_DF_NUMPIX 0x1fff
-
-/* Offsets for LSC/DFC/Gain */
-#define ISIF_DATA_H_OFFSET_MASK 0x1fff
-#define ISIF_DATA_V_OFFSET_MASK 0x1fff
-
-/* Linearization */
-#define ISIF_LIN_CORRSFT_MASK 7
-#define ISIF_LIN_CORRSFT_SHIFT 4
-#define ISIF_LIN_SCALE_FACT_INTEG_SHIFT 10
-#define ISIF_LIN_SCALE_FACT_DECIMAL_MASK 0x3ff
-#define ISIF_LIN_ENTRY_MASK 0x3ff
-
-/* masks and shifts*/
-#define ISIF_SYNCEN_VDHDEN_MASK (1 << 0)
-#define ISIF_SYNCEN_WEN_MASK (1 << 1)
-#define ISIF_SYNCEN_WEN_SHIFT 1
-
-#endif /* _DAVINCI_VPFE_DM365_ISIF_REGS_H */
diff --git a/drivers/staging/media/davinci_vpfe/dm365_resizer.c b/drivers/staging/media/davinci_vpfe/dm365_resizer.c
deleted file mode 100644
index acb293ed9c91..000000000000
--- a/drivers/staging/media/davinci_vpfe/dm365_resizer.c
+++ /dev/null
@@ -1,1996 +0,0 @@
-/*
- * Copyright (C) 2012 Texas Instruments Inc
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- * Contributors:
- * Manjunath Hadli <manjunath.hadli@ti.com>
- * Prabhakar Lad <prabhakar.lad@ti.com>
- *
- *
- * Resizer allows upscaling or downscaling a image to a desired
- * resolution. There are 2 resizer modules. both operating on the
- * same input image, but can have different output resolution.
- */
-
-#include "dm365_ipipe_hw.h"
-#include "dm365_resizer.h"
-
-#define MIN_IN_WIDTH 32
-#define MIN_IN_HEIGHT 32
-#define MAX_IN_WIDTH 4095
-#define MAX_IN_HEIGHT 4095
-#define MIN_OUT_WIDTH 16
-#define MIN_OUT_HEIGHT 2
-
-static const unsigned int resizer_input_formats[] = {
- MEDIA_BUS_FMT_UYVY8_2X8,
- MEDIA_BUS_FMT_Y8_1X8,
- MEDIA_BUS_FMT_UV8_1X8,
- MEDIA_BUS_FMT_SGRBG12_1X12,
-};
-
-static const unsigned int resizer_output_formats[] = {
- MEDIA_BUS_FMT_UYVY8_2X8,
- MEDIA_BUS_FMT_Y8_1X8,
- MEDIA_BUS_FMT_UV8_1X8,
- MEDIA_BUS_FMT_YDYUYDYV8_1X16,
- MEDIA_BUS_FMT_SGRBG12_1X12,
-};
-
-/* resizer_calculate_line_length() - This function calculates the line length of
- * various image planes at the input and
- * output.
- */
-static void
-resizer_calculate_line_length(u32 pix, int width, int height,
- int *line_len, int *line_len_c)
-{
- *line_len = 0;
- *line_len_c = 0;
-
- if (pix == MEDIA_BUS_FMT_UYVY8_2X8 ||
- pix == MEDIA_BUS_FMT_SGRBG12_1X12) {
- *line_len = width << 1;
- } else if (pix == MEDIA_BUS_FMT_Y8_1X8 ||
- pix == MEDIA_BUS_FMT_UV8_1X8) {
- *line_len = width;
- *line_len_c = width;
- } else {
- /* YUV 420 */
- /* round width to upper 32 byte boundary */
- *line_len = width;
- *line_len_c = width;
- }
- /* adjust the line len to be a multiple of 32 */
- *line_len += 31;
- *line_len &= ~0x1f;
- *line_len_c += 31;
- *line_len_c &= ~0x1f;
-}
-
-static inline int
-resizer_validate_output_image_format(struct device *dev,
- struct v4l2_mbus_framefmt *format,
- int *in_line_len, int *in_line_len_c)
-{
- if (format->code != MEDIA_BUS_FMT_UYVY8_2X8 &&
- format->code != MEDIA_BUS_FMT_Y8_1X8 &&
- format->code != MEDIA_BUS_FMT_UV8_1X8 &&
- format->code != MEDIA_BUS_FMT_YDYUYDYV8_1X16 &&
- format->code != MEDIA_BUS_FMT_SGRBG12_1X12) {
- dev_err(dev, "Invalid Mbus format, %d\n", format->code);
- return -EINVAL;
- }
- if (!format->width || !format->height) {
- dev_err(dev, "invalid width or height\n");
- return -EINVAL;
- }
- resizer_calculate_line_length(format->code, format->width,
- format->height, in_line_len, in_line_len_c);
- return 0;
-}
-
-static void
-resizer_configure_passthru(struct vpfe_resizer_device *resizer, int bypass)
-{
- struct resizer_params *param = &resizer->config;
-
- param->rsz_rsc_param[RSZ_A].cen = DISABLE;
- param->rsz_rsc_param[RSZ_A].yen = DISABLE;
- param->rsz_rsc_param[RSZ_A].v_phs_y = 0;
- param->rsz_rsc_param[RSZ_A].v_phs_c = 0;
- param->rsz_rsc_param[RSZ_A].v_dif = 256;
- param->rsz_rsc_param[RSZ_A].v_lpf_int_y = 0;
- param->rsz_rsc_param[RSZ_A].v_lpf_int_c = 0;
- param->rsz_rsc_param[RSZ_A].h_phs = 0;
- param->rsz_rsc_param[RSZ_A].h_dif = 256;
- param->rsz_rsc_param[RSZ_A].h_lpf_int_y = 0;
- param->rsz_rsc_param[RSZ_A].h_lpf_int_c = 0;
- param->rsz_rsc_param[RSZ_A].dscale_en = DISABLE;
- param->rsz2rgb[RSZ_A].rgb_en = DISABLE;
- param->rsz_en[RSZ_A] = ENABLE;
- param->rsz_en[RSZ_B] = DISABLE;
- if (bypass) {
- param->rsz_rsc_param[RSZ_A].i_vps = 0;
- param->rsz_rsc_param[RSZ_A].i_hps = 0;
- /* Raw Bypass */
- param->rsz_common.passthrough = BYPASS_ON;
- }
-}
-
-static void
-configure_resizer_out_params(struct vpfe_resizer_device *resizer, int index,
- void *output_spec, unsigned char partial,
- unsigned flag)
-{
- struct resizer_params *param = &resizer->config;
- struct v4l2_mbus_framefmt *outformat;
- struct vpfe_rsz_output_spec *output;
-
- if (index == RSZ_A &&
- resizer->resizer_a.output == RESIZER_OUTPUT_NONE) {
- param->rsz_en[index] = DISABLE;
- return;
- }
- if (index == RSZ_B &&
- resizer->resizer_b.output == RESIZER_OUTPUT_NONE) {
- param->rsz_en[index] = DISABLE;
- return;
- }
- output = output_spec;
- param->rsz_en[index] = ENABLE;
- if (partial) {
- param->rsz_rsc_param[index].h_flip = output->h_flip;
- param->rsz_rsc_param[index].v_flip = output->v_flip;
- param->rsz_rsc_param[index].v_typ_y = output->v_typ_y;
- param->rsz_rsc_param[index].v_typ_c = output->v_typ_c;
- param->rsz_rsc_param[index].v_lpf_int_y =
- output->v_lpf_int_y;
- param->rsz_rsc_param[index].v_lpf_int_c =
- output->v_lpf_int_c;
- param->rsz_rsc_param[index].h_typ_y = output->h_typ_y;
- param->rsz_rsc_param[index].h_typ_c = output->h_typ_c;
- param->rsz_rsc_param[index].h_lpf_int_y =
- output->h_lpf_int_y;
- param->rsz_rsc_param[index].h_lpf_int_c =
- output->h_lpf_int_c;
- param->rsz_rsc_param[index].dscale_en =
- output->en_down_scale;
- param->rsz_rsc_param[index].h_dscale_ave_sz =
- output->h_dscale_ave_sz;
- param->rsz_rsc_param[index].v_dscale_ave_sz =
- output->v_dscale_ave_sz;
- param->ext_mem_param[index].user_y_ofst =
- (output->user_y_ofst + 31) & ~0x1f;
- param->ext_mem_param[index].user_c_ofst =
- (output->user_c_ofst + 31) & ~0x1f;
- return;
- }
-
- if (index == RSZ_A)
- outformat = &resizer->resizer_a.formats[RESIZER_PAD_SOURCE];
- else
- outformat = &resizer->resizer_b.formats[RESIZER_PAD_SOURCE];
- param->rsz_rsc_param[index].o_vsz = outformat->height - 1;
- param->rsz_rsc_param[index].o_hsz = outformat->width - 1;
- param->ext_mem_param[index].rsz_sdr_ptr_s_y = output->vst_y;
- param->ext_mem_param[index].rsz_sdr_ptr_e_y = outformat->height;
- param->ext_mem_param[index].rsz_sdr_ptr_s_c = output->vst_c;
- param->ext_mem_param[index].rsz_sdr_ptr_e_c = outformat->height;
-
- if (!flag)
- return;
- /* update common parameters */
- param->rsz_rsc_param[index].h_flip = output->h_flip;
- param->rsz_rsc_param[index].v_flip = output->v_flip;
- param->rsz_rsc_param[index].v_typ_y = output->v_typ_y;
- param->rsz_rsc_param[index].v_typ_c = output->v_typ_c;
- param->rsz_rsc_param[index].v_lpf_int_y = output->v_lpf_int_y;
- param->rsz_rsc_param[index].v_lpf_int_c = output->v_lpf_int_c;
- param->rsz_rsc_param[index].h_typ_y = output->h_typ_y;
- param->rsz_rsc_param[index].h_typ_c = output->h_typ_c;
- param->rsz_rsc_param[index].h_lpf_int_y = output->h_lpf_int_y;
- param->rsz_rsc_param[index].h_lpf_int_c = output->h_lpf_int_c;
- param->rsz_rsc_param[index].dscale_en = output->en_down_scale;
- param->rsz_rsc_param[index].h_dscale_ave_sz = output->h_dscale_ave_sz;
- param->rsz_rsc_param[index].v_dscale_ave_sz = output->h_dscale_ave_sz;
- param->ext_mem_param[index].user_y_ofst =
- (output->user_y_ofst + 31) & ~0x1f;
- param->ext_mem_param[index].user_c_ofst =
- (output->user_c_ofst + 31) & ~0x1f;
-}
-
-/*
- * resizer_calculate_resize_ratios() - Calculates resize ratio for resizer
- * A or B. This is called after setting
- * the input size or output size.
- * @resizer: Pointer to VPFE resizer subdevice.
- * @index: index RSZ_A-resizer-A RSZ_B-resizer-B.
- */
-static void
-resizer_calculate_resize_ratios(struct vpfe_resizer_device *resizer, int index)
-{
- struct resizer_params *param = &resizer->config;
- struct v4l2_mbus_framefmt *informat, *outformat;
-
- informat = &resizer->crop_resizer.formats[RESIZER_CROP_PAD_SINK];
-
- if (index == RSZ_A)
- outformat = &resizer->resizer_a.formats[RESIZER_PAD_SOURCE];
- else
- outformat = &resizer->resizer_b.formats[RESIZER_PAD_SOURCE];
-
- if (outformat->field != V4L2_FIELD_INTERLACED)
- param->rsz_rsc_param[index].v_dif =
- ((informat->height) * 256) / (outformat->height);
- else
- param->rsz_rsc_param[index].v_dif =
- ((informat->height >> 1) * 256) / (outformat->height);
- param->rsz_rsc_param[index].h_dif =
- ((informat->width) * 256) / (outformat->width);
-}
-
-void
-static resizer_enable_422_420_conversion(struct resizer_params *param,
- int index, bool en)
-{
- param->rsz_rsc_param[index].cen = en;
- param->rsz_rsc_param[index].yen = en;
-}
-
-/* resizer_calculate_sdram_offsets() - This function calculates the offsets from
- * start of buffer for the C plane when
- * output format is YUV420SP. It also
- * calculates the offsets from the start of
- * the buffer when the image is flipped
- * vertically or horizontally for ycbcr/y/c
- * planes.
- * @resizer: Pointer to resizer subdevice.
- * @index: index RSZ_A-resizer-A RSZ_B-resizer-B.
- */
-static int
-resizer_calculate_sdram_offsets(struct vpfe_resizer_device *resizer, int index)
-{
- struct resizer_params *param = &resizer->config;
- struct v4l2_mbus_framefmt *outformat;
- int bytesperpixel = 2;
- int image_height;
- int image_width;
- int yuv_420 = 0;
- int offset = 0;
-
- if (index == RSZ_A)
- outformat = &resizer->resizer_a.formats[RESIZER_PAD_SOURCE];
- else
- outformat = &resizer->resizer_b.formats[RESIZER_PAD_SOURCE];
-
- image_height = outformat->height + 1;
- image_width = outformat->width + 1;
- param->ext_mem_param[index].c_offset = 0;
- param->ext_mem_param[index].flip_ofst_y = 0;
- param->ext_mem_param[index].flip_ofst_c = 0;
- if (outformat->code == MEDIA_BUS_FMT_YDYUYDYV8_1X16) {
- /* YUV 420 */
- yuv_420 = 1;
- bytesperpixel = 1;
- }
-
- if (param->rsz_rsc_param[index].h_flip)
- /* width * bytesperpixel - 1 */
- offset = (image_width * bytesperpixel) - 1;
- if (param->rsz_rsc_param[index].v_flip)
- offset += (image_height - 1) *
- param->ext_mem_param[index].rsz_sdr_oft_y;
- param->ext_mem_param[index].flip_ofst_y = offset;
- if (!yuv_420)
- return 0;
- offset = 0;
- /* half height for c-plane */
- if (param->rsz_rsc_param[index].h_flip)
- /* width * bytesperpixel - 1 */
- offset = image_width - 1;
- if (param->rsz_rsc_param[index].v_flip)
- offset += (((image_height >> 1) - 1) *
- param->ext_mem_param[index].rsz_sdr_oft_c);
- param->ext_mem_param[index].flip_ofst_c = offset;
- param->ext_mem_param[index].c_offset =
- param->ext_mem_param[index].rsz_sdr_oft_y * image_height;
- return 0;
-}
-
-static int resizer_configure_output_win(struct vpfe_resizer_device *resizer)
-{
- struct resizer_params *param = &resizer->config;
- struct vpfe_rsz_output_spec output_specs;
- struct v4l2_mbus_framefmt *outformat;
- int line_len_c;
- int line_len;
- int ret;
-
- outformat = &resizer->resizer_a.formats[RESIZER_PAD_SOURCE];
-
- memset(&output_specs, 0x0, sizeof(struct vpfe_rsz_output_spec));
- output_specs.vst_y = param->user_config.vst;
- if (outformat->code == MEDIA_BUS_FMT_YDYUYDYV8_1X16)
- output_specs.vst_c = param->user_config.vst;
-
- configure_resizer_out_params(resizer, RSZ_A, &output_specs, 0, 0);
- resizer_calculate_line_length(outformat->code,
- param->rsz_rsc_param[0].o_hsz + 1,
- param->rsz_rsc_param[0].o_vsz + 1,
- &line_len, &line_len_c);
- param->ext_mem_param[0].rsz_sdr_oft_y = line_len;
- param->ext_mem_param[0].rsz_sdr_oft_c = line_len_c;
- resizer_calculate_resize_ratios(resizer, RSZ_A);
- if (param->rsz_en[RSZ_B])
- resizer_calculate_resize_ratios(resizer, RSZ_B);
-
- if (outformat->code == MEDIA_BUS_FMT_YDYUYDYV8_1X16)
- resizer_enable_422_420_conversion(param, RSZ_A, ENABLE);
- else
- resizer_enable_422_420_conversion(param, RSZ_A, DISABLE);
-
- ret = resizer_calculate_sdram_offsets(resizer, RSZ_A);
- if (!ret && param->rsz_en[RSZ_B])
- ret = resizer_calculate_sdram_offsets(resizer, RSZ_B);
-
- if (ret)
- pr_err("Error in calculating sdram offsets\n");
- return ret;
-}
-
-static int
-resizer_calculate_down_scale_f_div_param(struct device *dev,
- int input_width, int output_width,
- struct resizer_scale_param *param)
-{
- /* rsz = R, input_width = H, output width = h in the equation */
- unsigned int two_power;
- unsigned int upper_h1;
- unsigned int upper_h2;
- unsigned int val1;
- unsigned int val;
- unsigned int rsz;
- unsigned int h1;
- unsigned int h2;
- unsigned int o;
- unsigned int n;
-
- upper_h1 = input_width >> 1;
- n = param->h_dscale_ave_sz;
- /* 2 ^ (scale+1) */
- two_power = 1 << (n + 1);
- upper_h1 = (upper_h1 >> (n + 1)) << (n + 1);
- upper_h2 = input_width - upper_h1;
- if (upper_h2 % two_power) {
- dev_err(dev, "frame halves to be a multiple of 2 power n+1\n");
- return -EINVAL;
- }
- two_power = 1 << n;
- rsz = (input_width << 8) / output_width;
- val = rsz * two_power;
- val = ((upper_h1 << 8) / val) + 1;
- if (!(val % 2)) {
- h1 = val;
- } else {
- val = upper_h1 << 8;
- val >>= n + 1;
- val -= rsz >> 1;
- val /= rsz << 1;
- val <<= 1;
- val += 2;
- h1 = val;
- }
- o = 10 + (two_power << 2);
- if (((input_width << 7) / rsz) % 2)
- o += (((CEIL(rsz, 1024)) << 1) << n);
- h2 = output_width - h1;
- /* phi */
- val = (h1 * rsz) - (((upper_h1 - (o - 10)) / two_power) << 8);
- /* skip */
- val1 = ((val - 1024) >> 9) << 1;
- param->f_div.num_passes = MAX_PASSES;
- param->f_div.pass[0].o_hsz = h1 - 1;
- param->f_div.pass[0].i_hps = 0;
- param->f_div.pass[0].h_phs = 0;
- param->f_div.pass[0].src_hps = 0;
- param->f_div.pass[0].src_hsz = upper_h1 + o;
- param->f_div.pass[1].o_hsz = h2 - 1;
- param->f_div.pass[1].i_hps = 10 + (val1 * two_power);
- param->f_div.pass[1].h_phs = (val - (val1 << 8));
- param->f_div.pass[1].src_hps = upper_h1 - o;
- param->f_div.pass[1].src_hsz = upper_h2 + o;
-
- return 0;
-}
-
-static int
-resizer_configure_common_in_params(struct vpfe_resizer_device *resizer)
-{
- struct vpfe_device *vpfe_dev = to_vpfe_device(resizer);
- struct resizer_params *param = &resizer->config;
- struct vpfe_rsz_config_params *user_config;
- struct v4l2_mbus_framefmt *informat;
-
- informat = &resizer->crop_resizer.formats[RESIZER_CROP_PAD_SINK];
- user_config = &resizer->config.user_config;
- param->rsz_common.vps = param->user_config.vst;
- param->rsz_common.hps = param->user_config.hst;
-
- if (vpfe_ipipeif_decimation_enabled(vpfe_dev))
- param->rsz_common.hsz = (((informat->width - 1) *
- IPIPEIF_RSZ_CONST) / vpfe_ipipeif_get_rsz(vpfe_dev));
- else
- param->rsz_common.hsz = informat->width - 1;
-
- if (informat->field == V4L2_FIELD_INTERLACED)
- param->rsz_common.vsz = (informat->height - 1) >> 1;
- else
- param->rsz_common.vsz = informat->height - 1;
-
- param->rsz_common.raw_flip = 0;
-
- if (resizer->crop_resizer.input == RESIZER_CROP_INPUT_IPIPEIF)
- param->rsz_common.source = IPIPEIF_DATA;
- else
- param->rsz_common.source = IPIPE_DATA;
-
- switch (informat->code) {
- case MEDIA_BUS_FMT_UYVY8_2X8:
- param->rsz_common.src_img_fmt = RSZ_IMG_422;
- param->rsz_common.raw_flip = 0;
- break;
-
- case MEDIA_BUS_FMT_Y8_1X8:
- param->rsz_common.src_img_fmt = RSZ_IMG_420;
- /* Select y */
- param->rsz_common.y_c = 0;
- param->rsz_common.raw_flip = 0;
- break;
-
- case MEDIA_BUS_FMT_UV8_1X8:
- param->rsz_common.src_img_fmt = RSZ_IMG_420;
- /* Select y */
- param->rsz_common.y_c = 1;
- param->rsz_common.raw_flip = 0;
- break;
-
- case MEDIA_BUS_FMT_SGRBG12_1X12:
- param->rsz_common.raw_flip = 1;
- break;
-
- default:
- param->rsz_common.src_img_fmt = RSZ_IMG_422;
- param->rsz_common.source = IPIPE_DATA;
- }
-
- param->rsz_common.yuv_y_min = user_config->yuv_y_min;
- param->rsz_common.yuv_y_max = user_config->yuv_y_max;
- param->rsz_common.yuv_c_min = user_config->yuv_c_min;
- param->rsz_common.yuv_c_max = user_config->yuv_c_max;
- param->rsz_common.out_chr_pos = user_config->out_chr_pos;
- param->rsz_common.rsz_seq_crv = user_config->chroma_sample_even;
-
- return 0;
-}
-static int
-resizer_configure_in_continious_mode(struct vpfe_resizer_device *resizer)
-{
- struct device *dev = resizer->crop_resizer.subdev.v4l2_dev->dev;
- struct resizer_params *param = &resizer->config;
- struct vpfe_rsz_config_params *cont_config;
- int line_len_c;
- int line_len;
- int ret;
-
- if (resizer->resizer_a.output != RESIZER_OUPUT_MEMORY) {
- dev_err(dev, "enable resizer - Resizer-A\n");
- return -EINVAL;
- }
-
- cont_config = &resizer->config.user_config;
- param->rsz_en[RSZ_A] = ENABLE;
- configure_resizer_out_params(resizer, RSZ_A,
- &cont_config->output1, 1, 0);
- param->rsz_en[RSZ_B] = DISABLE;
- param->oper_mode = RESIZER_MODE_CONTINIOUS;
-
- if (resizer->resizer_b.output == RESIZER_OUPUT_MEMORY) {
- struct v4l2_mbus_framefmt *outformat2;
-
- param->rsz_en[RSZ_B] = ENABLE;
- outformat2 = &resizer->resizer_b.formats[RESIZER_PAD_SOURCE];
- ret = resizer_validate_output_image_format(dev, outformat2,
- &line_len, &line_len_c);
- if (ret)
- return ret;
- param->ext_mem_param[RSZ_B].rsz_sdr_oft_y = line_len;
- param->ext_mem_param[RSZ_B].rsz_sdr_oft_c = line_len_c;
- configure_resizer_out_params(resizer, RSZ_B,
- &cont_config->output2, 0, 1);
- if (outformat2->code == MEDIA_BUS_FMT_YDYUYDYV8_1X16)
- resizer_enable_422_420_conversion(param,
- RSZ_B, ENABLE);
- else
- resizer_enable_422_420_conversion(param,
- RSZ_B, DISABLE);
- }
- resizer_configure_common_in_params(resizer);
- ret = resizer_configure_output_win(resizer);
- if (ret)
- return ret;
-
- param->rsz_common.passthrough = cont_config->bypass;
- if (cont_config->bypass)
- resizer_configure_passthru(resizer, 1);
-
- return 0;
-}
-
-static inline int
-resizer_validate_input_image_format(struct device *dev,
- u32 pix,
- int width, int height, int *line_len)
-{
- int val;
-
- if (pix != MEDIA_BUS_FMT_UYVY8_2X8 &&
- pix != MEDIA_BUS_FMT_Y8_1X8 &&
- pix != MEDIA_BUS_FMT_UV8_1X8 &&
- pix != MEDIA_BUS_FMT_SGRBG12_1X12) {
- dev_err(dev,
- "resizer validate output: pix format not supported, %d\n", pix);
- return -EINVAL;
- }
-
- if (!width || !height) {
- dev_err(dev,
- "resizer validate input: invalid width or height\n");
- return -EINVAL;
- }
-
- if (pix == MEDIA_BUS_FMT_UV8_1X8)
- resizer_calculate_line_length(pix, width,
- height, &val, line_len);
- else
- resizer_calculate_line_length(pix, width,
- height, line_len, &val);
-
- return 0;
-}
-
-static int
-resizer_validate_decimation(struct device *dev, enum ipipeif_decimation dec_en,
- unsigned char rsz, unsigned char frame_div_mode_en,
- int width)
-{
- if (dec_en && frame_div_mode_en) {
- dev_err(dev,
- "dec_en & frame_div_mode_en can not enabled simultaneously\n");
- return -EINVAL;
- }
-
- if (frame_div_mode_en) {
- dev_err(dev, "frame_div_mode mode not supported\n");
- return -EINVAL;
- }
-
- if (!dec_en)
- return 0;
-
- if (width <= VPFE_IPIPE_MAX_INPUT_WIDTH) {
- dev_err(dev,
- "image width to be more than %d for decimation\n",
- VPFE_IPIPE_MAX_INPUT_WIDTH);
- return -EINVAL;
- }
-
- if (rsz < IPIPEIF_RSZ_MIN || rsz > IPIPEIF_RSZ_MAX) {
- dev_err(dev, "rsz range is %d to %d\n",
- IPIPEIF_RSZ_MIN, IPIPEIF_RSZ_MAX);
- return -EINVAL;
- }
-
- return 0;
-}
-
-/* resizer_calculate_normal_f_div_param() - Algorithm to calculate the frame
- * division parameters for resizer.
- * in normal mode.
- */
-static int
-resizer_calculate_normal_f_div_param(struct device *dev, int input_width,
- int output_width, struct resizer_scale_param *param)
-{
- /* rsz = R, input_width = H, output width = h in the equation */
- unsigned int val1;
- unsigned int rsz;
- unsigned int val;
- unsigned int h1;
- unsigned int h2;
- unsigned int o;
-
- if (output_width > input_width) {
- dev_err(dev, "frame div mode is used for scale down only\n");
- return -EINVAL;
- }
-
- rsz = (input_width << 8) / output_width;
- val = rsz << 1;
- val = ((input_width << 8) / val) + 1;
- o = 14;
- if (!(val % 2)) {
- h1 = val;
- } else {
- val = input_width << 7;
- val -= rsz >> 1;
- val /= rsz << 1;
- val <<= 1;
- val += 2;
- o += ((CEIL(rsz, 1024)) << 1);
- h1 = val;
- }
- h2 = output_width - h1;
- /* phi */
- val = (h1 * rsz) - (((input_width >> 1) - o) << 8);
- /* skip */
- val1 = ((val - 1024) >> 9) << 1;
- param->f_div.num_passes = MAX_PASSES;
- param->f_div.pass[0].o_hsz = h1 - 1;
- param->f_div.pass[0].i_hps = 0;
- param->f_div.pass[0].h_phs = 0;
- param->f_div.pass[0].src_hps = 0;
- param->f_div.pass[0].src_hsz = (input_width >> 2) + o;
- param->f_div.pass[1].o_hsz = h2 - 1;
- param->f_div.pass[1].i_hps = val1;
- param->f_div.pass[1].h_phs = (val - (val1 << 8));
- param->f_div.pass[1].src_hps = (input_width >> 2) - o;
- param->f_div.pass[1].src_hsz = (input_width >> 2) + o;
-
- return 0;
-}
-
-static int
-resizer_configure_in_single_shot_mode(struct vpfe_resizer_device *resizer)
-{
- struct vpfe_rsz_config_params *config = &resizer->config.user_config;
- struct device *dev = resizer->crop_resizer.subdev.v4l2_dev->dev;
- struct vpfe_device *vpfe_dev = to_vpfe_device(resizer);
- struct v4l2_mbus_framefmt *outformat1, *outformat2;
- struct resizer_params *param = &resizer->config;
- struct v4l2_mbus_framefmt *informat;
- int decimation;
- int line_len_c;
- int line_len;
- int rsz;
- int ret;
-
- informat = &resizer->crop_resizer.formats[RESIZER_CROP_PAD_SINK];
- outformat1 = &resizer->resizer_a.formats[RESIZER_PAD_SOURCE];
- outformat2 = &resizer->resizer_b.formats[RESIZER_PAD_SOURCE];
-
- decimation = vpfe_ipipeif_decimation_enabled(vpfe_dev);
- rsz = vpfe_ipipeif_get_rsz(vpfe_dev);
- if (decimation && param->user_config.frame_div_mode_en) {
- dev_err(dev,
- "dec_en & frame_div_mode_en cannot enabled simultaneously\n");
- return -EINVAL;
- }
-
- ret = resizer_validate_decimation(dev, decimation, rsz,
- param->user_config.frame_div_mode_en, informat->width);
- if (ret)
- return -EINVAL;
-
- ret = resizer_validate_input_image_format(dev, informat->code,
- informat->width, informat->height, &line_len);
- if (ret)
- return -EINVAL;
-
- if (resizer->resizer_a.output != RESIZER_OUTPUT_NONE) {
- param->rsz_en[RSZ_A] = ENABLE;
- ret = resizer_validate_output_image_format(dev, outformat1,
- &line_len, &line_len_c);
- if (ret)
- return ret;
- param->ext_mem_param[RSZ_A].rsz_sdr_oft_y = line_len;
- param->ext_mem_param[RSZ_A].rsz_sdr_oft_c = line_len_c;
- configure_resizer_out_params(resizer, RSZ_A,
- &param->user_config.output1, 0, 1);
-
- if (outformat1->code == MEDIA_BUS_FMT_SGRBG12_1X12)
- param->rsz_common.raw_flip = 1;
- else
- param->rsz_common.raw_flip = 0;
-
- if (outformat1->code == MEDIA_BUS_FMT_YDYUYDYV8_1X16)
- resizer_enable_422_420_conversion(param,
- RSZ_A, ENABLE);
- else
- resizer_enable_422_420_conversion(param,
- RSZ_A, DISABLE);
- }
-
- if (resizer->resizer_b.output != RESIZER_OUTPUT_NONE) {
- param->rsz_en[RSZ_B] = ENABLE;
- ret = resizer_validate_output_image_format(dev, outformat2,
- &line_len, &line_len_c);
- if (ret)
- return ret;
- param->ext_mem_param[RSZ_B].rsz_sdr_oft_y = line_len;
- param->ext_mem_param[RSZ_B].rsz_sdr_oft_c = line_len_c;
- configure_resizer_out_params(resizer, RSZ_B,
- &param->user_config.output2, 0, 1);
- if (outformat2->code == MEDIA_BUS_FMT_YDYUYDYV8_1X16)
- resizer_enable_422_420_conversion(param,
- RSZ_B, ENABLE);
- else
- resizer_enable_422_420_conversion(param,
- RSZ_B, DISABLE);
- }
-
- resizer_configure_common_in_params(resizer);
- if (resizer->resizer_a.output != RESIZER_OUTPUT_NONE) {
- resizer_calculate_resize_ratios(resizer, RSZ_A);
- resizer_calculate_sdram_offsets(resizer, RSZ_A);
- /* Overriding resize ratio calculation */
- if (informat->code == MEDIA_BUS_FMT_UV8_1X8) {
- param->rsz_rsc_param[RSZ_A].v_dif =
- (((informat->height + 1) * 2) * 256) /
- (param->rsz_rsc_param[RSZ_A].o_vsz + 1);
- }
- }
-
- if (resizer->resizer_b.output != RESIZER_OUTPUT_NONE) {
- resizer_calculate_resize_ratios(resizer, RSZ_B);
- resizer_calculate_sdram_offsets(resizer, RSZ_B);
- /* Overriding resize ratio calculation */
- if (informat->code == MEDIA_BUS_FMT_UV8_1X8) {
- param->rsz_rsc_param[RSZ_B].v_dif =
- (((informat->height + 1) * 2) * 256) /
- (param->rsz_rsc_param[RSZ_B].o_vsz + 1);
- }
- }
- if (param->user_config.frame_div_mode_en &&
- param->rsz_en[RSZ_A]) {
- if (!param->rsz_rsc_param[RSZ_A].dscale_en)
- ret = resizer_calculate_normal_f_div_param(dev,
- informat->width,
- param->rsz_rsc_param[RSZ_A].o_vsz + 1,
- &param->rsz_rsc_param[RSZ_A]);
- else
- ret = resizer_calculate_down_scale_f_div_param(dev,
- informat->width,
- param->rsz_rsc_param[RSZ_A].o_vsz + 1,
- &param->rsz_rsc_param[RSZ_A]);
- if (ret)
- return -EINVAL;
- }
- if (param->user_config.frame_div_mode_en &&
- param->rsz_en[RSZ_B]) {
- if (!param->rsz_rsc_param[RSZ_B].dscale_en)
- ret = resizer_calculate_normal_f_div_param(dev,
- informat->width,
- param->rsz_rsc_param[RSZ_B].o_vsz + 1,
- &param->rsz_rsc_param[RSZ_B]);
- else
- ret = resizer_calculate_down_scale_f_div_param(dev,
- informat->width,
- param->rsz_rsc_param[RSZ_B].o_vsz + 1,
- &param->rsz_rsc_param[RSZ_B]);
- if (ret)
- return -EINVAL;
- }
- param->rsz_common.passthrough = config->bypass;
- if (config->bypass)
- resizer_configure_passthru(resizer, 1);
- return 0;
-}
-
-static void
-resizer_set_defualt_configuration(struct vpfe_resizer_device *resizer)
-{
-#define WIDTH_I 640
-#define HEIGHT_I 480
-#define WIDTH_O 640
-#define HEIGHT_O 480
- const struct resizer_params rsz_default_config = {
- .oper_mode = RESIZER_MODE_ONE_SHOT,
- .rsz_common = {
- .vsz = HEIGHT_I - 1,
- .hsz = WIDTH_I - 1,
- .src_img_fmt = RSZ_IMG_422,
- .raw_flip = 1, /* flip preserve Raw format */
- .source = IPIPE_DATA,
- .passthrough = BYPASS_OFF,
- .yuv_y_max = 255,
- .yuv_c_max = 255,
- .rsz_seq_crv = DISABLE,
- .out_chr_pos = VPFE_IPIPE_YUV422_CHR_POS_COSITE,
- },
- .rsz_rsc_param = {
- {
- .h_flip = DISABLE,
- .v_flip = DISABLE,
- .cen = DISABLE,
- .yen = DISABLE,
- .o_vsz = HEIGHT_O - 1,
- .o_hsz = WIDTH_O - 1,
- .v_dif = 256,
- .v_typ_y = VPFE_RSZ_INTP_CUBIC,
- .h_typ_c = VPFE_RSZ_INTP_CUBIC,
- .h_dif = 256,
- .h_typ_y = VPFE_RSZ_INTP_CUBIC,
- .h_typ_c = VPFE_RSZ_INTP_CUBIC,
- .h_dscale_ave_sz =
- VPFE_IPIPE_DWN_SCALE_1_OVER_2,
- .v_dscale_ave_sz =
- VPFE_IPIPE_DWN_SCALE_1_OVER_2,
- },
- {
- .h_flip = DISABLE,
- .v_flip = DISABLE,
- .cen = DISABLE,
- .yen = DISABLE,
- .o_vsz = HEIGHT_O - 1,
- .o_hsz = WIDTH_O - 1,
- .v_dif = 256,
- .v_typ_y = VPFE_RSZ_INTP_CUBIC,
- .h_typ_c = VPFE_RSZ_INTP_CUBIC,
- .h_dif = 256,
- .h_typ_y = VPFE_RSZ_INTP_CUBIC,
- .h_typ_c = VPFE_RSZ_INTP_CUBIC,
- .h_dscale_ave_sz =
- VPFE_IPIPE_DWN_SCALE_1_OVER_2,
- .v_dscale_ave_sz =
- VPFE_IPIPE_DWN_SCALE_1_OVER_2,
- },
- },
- .rsz2rgb = {
- {
- .rgb_en = DISABLE
- },
- {
- .rgb_en = DISABLE
- }
- },
- .ext_mem_param = {
- {
- .rsz_sdr_oft_y = WIDTH_O << 1,
- .rsz_sdr_ptr_e_y = HEIGHT_O,
- .rsz_sdr_oft_c = WIDTH_O,
- .rsz_sdr_ptr_e_c = HEIGHT_O >> 1,
- },
- {
- .rsz_sdr_oft_y = WIDTH_O << 1,
- .rsz_sdr_ptr_e_y = HEIGHT_O,
- .rsz_sdr_oft_c = WIDTH_O,
- .rsz_sdr_ptr_e_c = HEIGHT_O,
- },
- },
- .rsz_en[0] = ENABLE,
- .rsz_en[1] = DISABLE,
- .user_config = {
- .output1 = {
- .v_typ_y = VPFE_RSZ_INTP_CUBIC,
- .v_typ_c = VPFE_RSZ_INTP_CUBIC,
- .h_typ_y = VPFE_RSZ_INTP_CUBIC,
- .h_typ_c = VPFE_RSZ_INTP_CUBIC,
- .h_dscale_ave_sz =
- VPFE_IPIPE_DWN_SCALE_1_OVER_2,
- .v_dscale_ave_sz =
- VPFE_IPIPE_DWN_SCALE_1_OVER_2,
- },
- .output2 = {
- .v_typ_y = VPFE_RSZ_INTP_CUBIC,
- .v_typ_c = VPFE_RSZ_INTP_CUBIC,
- .h_typ_y = VPFE_RSZ_INTP_CUBIC,
- .h_typ_c = VPFE_RSZ_INTP_CUBIC,
- .h_dscale_ave_sz =
- VPFE_IPIPE_DWN_SCALE_1_OVER_2,
- .v_dscale_ave_sz =
- VPFE_IPIPE_DWN_SCALE_1_OVER_2,
- },
- .yuv_y_max = 255,
- .yuv_c_max = 255,
- .out_chr_pos = VPFE_IPIPE_YUV422_CHR_POS_COSITE,
- },
- };
- memcpy(&resizer->config, &rsz_default_config,
- sizeof(struct resizer_params));
-}
-
-/*
- * resizer_set_configuration() - set resizer config
- * @resizer: vpfe resizer device pointer.
- * @chan_config: resizer channel configuration.
- */
-static int
-resizer_set_configuration(struct vpfe_resizer_device *resizer,
- struct vpfe_rsz_config *chan_config)
-{
- if (!chan_config->config)
- resizer_set_defualt_configuration(resizer);
- else
- if (copy_from_user(&resizer->config.user_config,
- chan_config->config, sizeof(struct vpfe_rsz_config_params)))
- return -EFAULT;
-
- return 0;
-}
-
-/*
- * resizer_get_configuration() - get resizer config
- * @resizer: vpfe resizer device pointer.
- * @channel: image processor logical channel.
- * @chan_config: resizer channel configuration.
- */
-static int
-resizer_get_configuration(struct vpfe_resizer_device *resizer,
- struct vpfe_rsz_config *chan_config)
-{
- struct device *dev = resizer->crop_resizer.subdev.v4l2_dev->dev;
-
- if (!chan_config->config) {
- dev_err(dev, "Resizer channel invalid pointer\n");
- return -EINVAL;
- }
-
- if (copy_to_user((void *)chan_config->config,
- (void *)&resizer->config.user_config,
- sizeof(struct vpfe_rsz_config_params))) {
- dev_err(dev, "resizer_get_configuration: Error in copy to user\n");
- return -EFAULT;
- }
-
- return 0;
-}
-
-/*
- * VPFE video operations
- */
-
-/*
- * resizer_a_video_out_queue() - RESIZER-A video out queue
- * @vpfe_dev: vpfe device pointer.
- * @addr: buffer address.
- */
-static int resizer_a_video_out_queue(struct vpfe_device *vpfe_dev,
- unsigned long addr)
-{
- struct vpfe_resizer_device *resizer = &vpfe_dev->vpfe_resizer;
-
- return resizer_set_outaddr(resizer->base_addr,
- &resizer->config, RSZ_A, addr);
-}
-
-/*
- * resizer_b_video_out_queue() - RESIZER-B video out queue
- * @vpfe_dev: vpfe device pointer.
- * @addr: buffer address.
- */
-static int resizer_b_video_out_queue(struct vpfe_device *vpfe_dev,
- unsigned long addr)
-{
- struct vpfe_resizer_device *resizer = &vpfe_dev->vpfe_resizer;
-
- return resizer_set_outaddr(resizer->base_addr,
- &resizer->config, RSZ_B, addr);
-}
-
-static const struct vpfe_video_operations resizer_a_video_ops = {
- .queue = resizer_a_video_out_queue,
-};
-
-static const struct vpfe_video_operations resizer_b_video_ops = {
- .queue = resizer_b_video_out_queue,
-};
-
-static void resizer_enable(struct vpfe_resizer_device *resizer, int en)
-{
- struct vpfe_device *vpfe_dev = to_vpfe_device(resizer);
- u16 ipipeif_sink = vpfe_dev->vpfe_ipipeif.input;
- unsigned char val;
-
- if (resizer->crop_resizer.input == RESIZER_CROP_INPUT_NONE)
- return;
-
- if (resizer->crop_resizer.input == RESIZER_CROP_INPUT_IPIPEIF &&
- ipipeif_sink == IPIPEIF_INPUT_MEMORY) {
- do {
- val = regr_rsz(resizer->base_addr, RSZ_SRC_EN);
- } while (val);
-
- if (resizer->resizer_a.output != RESIZER_OUTPUT_NONE) {
- do {
- val = regr_rsz(resizer->base_addr, RSZ_A);
- } while (val);
- }
- if (resizer->resizer_b.output != RESIZER_OUTPUT_NONE) {
- do {
- val = regr_rsz(resizer->base_addr, RSZ_B);
- } while (val);
- }
- }
- if (resizer->resizer_a.output != RESIZER_OUTPUT_NONE)
- rsz_enable(resizer->base_addr, RSZ_A, en);
-
- if (resizer->resizer_b.output != RESIZER_OUTPUT_NONE)
- rsz_enable(resizer->base_addr, RSZ_B, en);
-}
-
-
-/*
- * resizer_ss_isr() - resizer module single-shot buffer scheduling isr
- * @resizer: vpfe resizer device pointer.
- */
-static void resizer_ss_isr(struct vpfe_resizer_device *resizer)
-{
- struct vpfe_video_device *video_out = &resizer->resizer_a.video_out;
- struct vpfe_video_device *video_out2 = &resizer->resizer_b.video_out;
- struct vpfe_device *vpfe_dev = to_vpfe_device(resizer);
- struct vpfe_pipeline *pipe = &video_out->pipe;
- u16 ipipeif_sink = vpfe_dev->vpfe_ipipeif.input;
- u32 val;
-
- if (ipipeif_sink != IPIPEIF_INPUT_MEMORY)
- return;
-
- if (resizer->resizer_a.output == RESIZER_OUPUT_MEMORY) {
- val = vpss_dma_complete_interrupt();
- if (val != 0 && val != 2)
- return;
- }
-
- if (resizer->resizer_a.output == RESIZER_OUPUT_MEMORY) {
- spin_lock(&video_out->dma_queue_lock);
- vpfe_video_process_buffer_complete(video_out);
- video_out->state = VPFE_VIDEO_BUFFER_NOT_QUEUED;
- vpfe_video_schedule_next_buffer(video_out);
- spin_unlock(&video_out->dma_queue_lock);
- }
-
- /* If resizer B is enabled */
- if (pipe->output_num > 1 && resizer->resizer_b.output ==
- RESIZER_OUPUT_MEMORY) {
- spin_lock(&video_out->dma_queue_lock);
- vpfe_video_process_buffer_complete(video_out2);
- video_out2->state = VPFE_VIDEO_BUFFER_NOT_QUEUED;
- vpfe_video_schedule_next_buffer(video_out2);
- spin_unlock(&video_out2->dma_queue_lock);
- }
-
- /* start HW if buffers are queued */
- if (vpfe_video_is_pipe_ready(pipe) &&
- resizer->resizer_a.output == RESIZER_OUPUT_MEMORY) {
- resizer_enable(resizer, 1);
- vpfe_ipipe_enable(vpfe_dev, 1);
- vpfe_ipipeif_enable(vpfe_dev);
- }
-}
-
-/*
- * vpfe_resizer_buffer_isr() - resizer module buffer scheduling isr
- * @resizer: vpfe resizer device pointer.
- */
-void vpfe_resizer_buffer_isr(struct vpfe_resizer_device *resizer)
-{
- struct vpfe_device *vpfe_dev = to_vpfe_device(resizer);
- struct vpfe_video_device *video_out = &resizer->resizer_a.video_out;
- struct vpfe_video_device *video_out2 = &resizer->resizer_b.video_out;
- struct vpfe_pipeline *pipe = &resizer->resizer_a.video_out.pipe;
- enum v4l2_field field;
- int fid;
-
- if (!video_out->started)
- return;
-
- if (resizer->crop_resizer.input == RESIZER_CROP_INPUT_NONE)
- return;
-
- field = video_out->fmt.fmt.pix.field;
- if (field == V4L2_FIELD_NONE) {
- /* handle progressive frame capture */
- if (video_out->cur_frm != video_out->next_frm) {
- vpfe_video_process_buffer_complete(video_out);
- if (pipe->output_num > 1)
- vpfe_video_process_buffer_complete(video_out2);
- }
-
- video_out->skip_frame_count--;
- if (!video_out->skip_frame_count) {
- video_out->skip_frame_count =
- video_out->skip_frame_count_init;
- rsz_src_enable(resizer->base_addr, 1);
- } else {
- rsz_src_enable(resizer->base_addr, 0);
- }
- return;
- }
-
- /* handle interlaced frame capture */
- fid = vpfe_isif_get_fid(vpfe_dev);
-
- /* switch the software maintained field id */
- video_out->field_id ^= 1;
- if (fid == video_out->field_id) {
- /*
- * we are in-sync here,continue.
- * One frame is just being captured. If the
- * next frame is available, release the current
- * frame and move on
- */
- if (fid == 0 && video_out->cur_frm != video_out->next_frm) {
- vpfe_video_process_buffer_complete(video_out);
- if (pipe->output_num > 1)
- vpfe_video_process_buffer_complete(video_out2);
- }
- } else if (fid == 0) {
- /*
- * out of sync. Recover from any hardware out-of-sync.
- * May loose one frame
- */
- video_out->field_id = fid;
- }
-}
-
-/*
- * vpfe_resizer_dma_isr() - resizer module dma isr
- * @resizer: vpfe resizer device pointer.
- */
-void vpfe_resizer_dma_isr(struct vpfe_resizer_device *resizer)
-{
- struct vpfe_video_device *video_out2 = &resizer->resizer_b.video_out;
- struct vpfe_video_device *video_out = &resizer->resizer_a.video_out;
- struct vpfe_device *vpfe_dev = to_vpfe_device(resizer);
- struct vpfe_pipeline *pipe = &video_out->pipe;
- int schedule_capture = 0;
- enum v4l2_field field;
- int fid;
-
- if (!video_out->started)
- return;
-
- if (pipe->state == VPFE_PIPELINE_STREAM_SINGLESHOT) {
- resizer_ss_isr(resizer);
- return;
- }
-
- field = video_out->fmt.fmt.pix.field;
- if (field == V4L2_FIELD_NONE) {
- if (!list_empty(&video_out->dma_queue) &&
- video_out->cur_frm == video_out->next_frm)
- schedule_capture = 1;
- } else {
- fid = vpfe_isif_get_fid(vpfe_dev);
- if (fid == video_out->field_id) {
- /* we are in-sync here,continue */
- if (fid == 1 && !list_empty(&video_out->dma_queue) &&
- video_out->cur_frm == video_out->next_frm)
- schedule_capture = 1;
- }
- }
-
- if (!schedule_capture)
- return;
-
- spin_lock(&video_out->dma_queue_lock);
- vpfe_video_schedule_next_buffer(video_out);
- spin_unlock(&video_out->dma_queue_lock);
- if (pipe->output_num > 1) {
- spin_lock(&video_out2->dma_queue_lock);
- vpfe_video_schedule_next_buffer(video_out2);
- spin_unlock(&video_out2->dma_queue_lock);
- }
-}
-
-/*
- * V4L2 subdev operations
- */
-
-/*
- * resizer_ioctl() - Handle resizer module private ioctl's
- * @sd: pointer to v4l2 subdev structure
- * @cmd: configuration command
- * @arg: configuration argument
- */
-static long resizer_ioctl(struct v4l2_subdev *sd, unsigned int cmd, void *arg)
-{
- struct vpfe_resizer_device *resizer = v4l2_get_subdevdata(sd);
- struct device *dev = resizer->crop_resizer.subdev.v4l2_dev->dev;
- struct vpfe_rsz_config *user_config;
- int ret = -ENOIOCTLCMD;
-
- if (&resizer->crop_resizer.subdev != sd)
- return ret;
-
- switch (cmd) {
- case VIDIOC_VPFE_RSZ_S_CONFIG:
- user_config = arg;
- ret = resizer_set_configuration(resizer, user_config);
- break;
-
- case VIDIOC_VPFE_RSZ_G_CONFIG:
- user_config = arg;
- if (!user_config->config) {
- dev_err(dev, "error in VIDIOC_VPFE_RSZ_G_CONFIG\n");
- return -EINVAL;
- }
- ret = resizer_get_configuration(resizer, user_config);
- break;
- }
- return ret;
-}
-
-static int resizer_do_hw_setup(struct vpfe_resizer_device *resizer)
-{
- struct vpfe_device *vpfe_dev = to_vpfe_device(resizer);
- u16 ipipeif_sink = vpfe_dev->vpfe_ipipeif.input;
- u16 ipipeif_source = vpfe_dev->vpfe_ipipeif.output;
- struct resizer_params *param = &resizer->config;
- int ret = 0;
-
- if (resizer->resizer_a.output == RESIZER_OUPUT_MEMORY ||
- resizer->resizer_b.output == RESIZER_OUPUT_MEMORY) {
- if (ipipeif_sink == IPIPEIF_INPUT_MEMORY &&
- ipipeif_source == IPIPEIF_OUTPUT_RESIZER)
- ret = resizer_configure_in_single_shot_mode(resizer);
- else
- ret = resizer_configure_in_continious_mode(resizer);
- if (ret)
- return ret;
- ret = config_rsz_hw(resizer, param);
- }
- return ret;
-}
-
-/*
- * resizer_set_stream() - Enable/Disable streaming on resizer subdev
- * @sd: pointer to v4l2 subdev structure
- * @enable: 1 == Enable, 0 == Disable
- */
-static int resizer_set_stream(struct v4l2_subdev *sd, int enable)
-{
- struct vpfe_resizer_device *resizer = v4l2_get_subdevdata(sd);
-
- if (&resizer->crop_resizer.subdev != sd)
- return 0;
-
- if (resizer->resizer_a.output != RESIZER_OUPUT_MEMORY)
- return 0;
-
- switch (enable) {
- case 1:
- if (resizer_do_hw_setup(resizer) < 0)
- return -EINVAL;
- resizer_enable(resizer, enable);
- break;
-
- case 0:
- resizer_enable(resizer, enable);
- break;
- }
-
- return 0;
-}
-
-/*
- * __resizer_get_format() - helper function for getting resizer format
- * @sd: pointer to subdev.
- * @cfg: V4L2 subdev pad config
- * @pad: pad number.
- * @which: wanted subdev format.
- * Retun wanted mbus frame format.
- */
-static struct v4l2_mbus_framefmt *
-__resizer_get_format(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config *cfg,
- unsigned int pad, enum v4l2_subdev_format_whence which)
-{
- struct vpfe_resizer_device *resizer = v4l2_get_subdevdata(sd);
-
- if (which == V4L2_SUBDEV_FORMAT_TRY)
- return v4l2_subdev_get_try_format(sd, cfg, pad);
- if (&resizer->crop_resizer.subdev == sd)
- return &resizer->crop_resizer.formats[pad];
- if (&resizer->resizer_a.subdev == sd)
- return &resizer->resizer_a.formats[pad];
- if (&resizer->resizer_b.subdev == sd)
- return &resizer->resizer_b.formats[pad];
- return NULL;
-}
-
-/*
- * resizer_try_format() - Handle try format by pad subdev method
- * @sd: pointer to subdev.
- * @cfg: V4L2 subdev pad config
- * @pad: pad num.
- * @fmt: pointer to v4l2 format structure.
- * @which: wanted subdev format.
- */
-static void
-resizer_try_format(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config *cfg,
- unsigned int pad, struct v4l2_mbus_framefmt *fmt,
- enum v4l2_subdev_format_whence which)
-{
- struct vpfe_resizer_device *resizer = v4l2_get_subdevdata(sd);
- unsigned int max_out_height;
- unsigned int max_out_width;
- unsigned int i;
-
- if ((&resizer->resizer_a.subdev == sd && pad == RESIZER_PAD_SINK) ||
- (&resizer->resizer_b.subdev == sd && pad == RESIZER_PAD_SINK) ||
- (&resizer->crop_resizer.subdev == sd &&
- (pad == RESIZER_CROP_PAD_SOURCE ||
- pad == RESIZER_CROP_PAD_SOURCE2 || pad == RESIZER_CROP_PAD_SINK))) {
- for (i = 0; i < ARRAY_SIZE(resizer_input_formats); i++) {
- if (fmt->code == resizer_input_formats[i])
- break;
- }
- /* If not found, use UYVY as default */
- if (i >= ARRAY_SIZE(resizer_input_formats))
- fmt->code = MEDIA_BUS_FMT_UYVY8_2X8;
-
- fmt->width = clamp_t(u32, fmt->width, MIN_IN_WIDTH,
- MAX_IN_WIDTH);
- fmt->height = clamp_t(u32, fmt->height, MIN_IN_HEIGHT,
- MAX_IN_HEIGHT);
- } else if (&resizer->resizer_a.subdev == sd &&
- pad == RESIZER_PAD_SOURCE) {
- max_out_width = IPIPE_MAX_OUTPUT_WIDTH_A;
- max_out_height = IPIPE_MAX_OUTPUT_HEIGHT_A;
-
- for (i = 0; i < ARRAY_SIZE(resizer_output_formats); i++) {
- if (fmt->code == resizer_output_formats[i])
- break;
- }
- /* If not found, use UYVY as default */
- if (i >= ARRAY_SIZE(resizer_output_formats))
- fmt->code = MEDIA_BUS_FMT_UYVY8_2X8;
-
- fmt->width = clamp_t(u32, fmt->width, MIN_OUT_WIDTH,
- max_out_width);
- fmt->width &= ~15;
- fmt->height = clamp_t(u32, fmt->height, MIN_OUT_HEIGHT,
- max_out_height);
- } else if (&resizer->resizer_b.subdev == sd &&
- pad == RESIZER_PAD_SOURCE) {
- max_out_width = IPIPE_MAX_OUTPUT_WIDTH_B;
- max_out_height = IPIPE_MAX_OUTPUT_HEIGHT_B;
-
- for (i = 0; i < ARRAY_SIZE(resizer_output_formats); i++) {
- if (fmt->code == resizer_output_formats[i])
- break;
- }
- /* If not found, use UYVY as default */
- if (i >= ARRAY_SIZE(resizer_output_formats))
- fmt->code = MEDIA_BUS_FMT_UYVY8_2X8;
-
- fmt->width = clamp_t(u32, fmt->width, MIN_OUT_WIDTH,
- max_out_width);
- fmt->width &= ~15;
- fmt->height = clamp_t(u32, fmt->height, MIN_OUT_HEIGHT,
- max_out_height);
- }
-}
-
-/*
- * resizer_set_format() - Handle set format by pads subdev method
- * @sd: pointer to v4l2 subdev structure
- * @cfg: V4L2 subdev pad config
- * @fmt: pointer to v4l2 subdev format structure
- * return -EINVAL or zero on success
- */
-static int resizer_set_format(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config *cfg,
- struct v4l2_subdev_format *fmt)
-{
- struct vpfe_resizer_device *resizer = v4l2_get_subdevdata(sd);
- struct v4l2_mbus_framefmt *format;
-
- format = __resizer_get_format(sd, cfg, fmt->pad, fmt->which);
- if (format == NULL)
- return -EINVAL;
-
- resizer_try_format(sd, cfg, fmt->pad, &fmt->format, fmt->which);
- *format = fmt->format;
-
- if (fmt->which == V4L2_SUBDEV_FORMAT_TRY)
- return 0;
-
- if (&resizer->crop_resizer.subdev == sd) {
- if (fmt->pad == RESIZER_CROP_PAD_SINK) {
- resizer->crop_resizer.formats[fmt->pad] = fmt->format;
- } else if (fmt->pad == RESIZER_CROP_PAD_SOURCE &&
- resizer->crop_resizer.output == RESIZER_A) {
- resizer->crop_resizer.formats[fmt->pad] = fmt->format;
- resizer->crop_resizer.
- formats[RESIZER_CROP_PAD_SOURCE2] = fmt->format;
- } else if (fmt->pad == RESIZER_CROP_PAD_SOURCE2 &&
- resizer->crop_resizer.output2 == RESIZER_B) {
- resizer->crop_resizer.formats[fmt->pad] = fmt->format;
- resizer->crop_resizer.
- formats[RESIZER_CROP_PAD_SOURCE] = fmt->format;
- } else {
- return -EINVAL;
- }
- } else if (&resizer->resizer_a.subdev == sd) {
- if (fmt->pad == RESIZER_PAD_SINK)
- resizer->resizer_a.formats[fmt->pad] = fmt->format;
- else if (fmt->pad == RESIZER_PAD_SOURCE)
- resizer->resizer_a.formats[fmt->pad] = fmt->format;
- else
- return -EINVAL;
- } else if (&resizer->resizer_b.subdev == sd) {
- if (fmt->pad == RESIZER_PAD_SINK)
- resizer->resizer_b.formats[fmt->pad] = fmt->format;
- else if (fmt->pad == RESIZER_PAD_SOURCE)
- resizer->resizer_b.formats[fmt->pad] = fmt->format;
- else
- return -EINVAL;
- } else {
- return -EINVAL;
- }
-
- return 0;
-}
-
-/*
- * resizer_get_format() - Retrieve the video format on a pad
- * @sd: pointer to v4l2 subdev structure.
- * @cfg: V4L2 subdev pad config
- * @fmt: pointer to v4l2 subdev format structure
- * return -EINVAL or zero on success
- */
-static int resizer_get_format(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config *cfg,
- struct v4l2_subdev_format *fmt)
-{
- struct v4l2_mbus_framefmt *format;
-
- format = __resizer_get_format(sd, cfg, fmt->pad, fmt->which);
- if (format == NULL)
- return -EINVAL;
-
- fmt->format = *format;
-
- return 0;
-}
-
-/*
- * resizer_enum_frame_size() - enum frame sizes on pads
- * @sd: Pointer to subdevice.
- * @cfg: V4L2 subdev pad config
- * @code: pointer to v4l2_subdev_frame_size_enum structure.
- */
-static int resizer_enum_frame_size(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
- struct v4l2_subdev_frame_size_enum *fse)
-{
- struct v4l2_mbus_framefmt format;
-
- if (fse->index != 0)
- return -EINVAL;
-
- format.code = fse->code;
- format.width = 1;
- format.height = 1;
- resizer_try_format(sd, cfg, fse->pad, &format, fse->which);
- fse->min_width = format.width;
- fse->min_height = format.height;
-
- if (format.code != fse->code)
- return -EINVAL;
-
- format.code = fse->code;
- format.width = -1;
- format.height = -1;
- resizer_try_format(sd, cfg, fse->pad, &format, fse->which);
- fse->max_width = format.width;
- fse->max_height = format.height;
-
- return 0;
-}
-
-/*
- * resizer_enum_mbus_code() - enum mbus codes for pads
- * @sd: Pointer to subdevice.
- * @cfg: V4L2 subdev pad config
- * @code: pointer to v4l2_subdev_mbus_code_enum structure
- */
-static int resizer_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
- struct v4l2_subdev_mbus_code_enum *code)
-{
- if (code->pad == RESIZER_PAD_SINK) {
- if (code->index >= ARRAY_SIZE(resizer_input_formats))
- return -EINVAL;
-
- code->code = resizer_input_formats[code->index];
- } else if (code->pad == RESIZER_PAD_SOURCE) {
- if (code->index >= ARRAY_SIZE(resizer_output_formats))
- return -EINVAL;
-
- code->code = resizer_output_formats[code->index];
- }
-
- return 0;
-}
-
-/*
- * resizer_init_formats() - Initialize formats on all pads
- * @sd: Pointer to subdevice.
- * @fh: V4L2 subdev file handle.
- *
- * Initialize all pad formats with default values. Try formats are
- * initialized on the file handle.
- */
-static int resizer_init_formats(struct v4l2_subdev *sd,
- struct v4l2_subdev_fh *fh)
-{
- __u32 which = V4L2_SUBDEV_FORMAT_TRY;
- struct vpfe_resizer_device *resizer = v4l2_get_subdevdata(sd);
- struct v4l2_subdev_format format;
-
- if (&resizer->crop_resizer.subdev == sd) {
- memset(&format, 0, sizeof(format));
- format.pad = RESIZER_CROP_PAD_SINK;
- format.which = which;
- format.format.code = MEDIA_BUS_FMT_YUYV8_2X8;
- format.format.width = MAX_IN_WIDTH;
- format.format.height = MAX_IN_HEIGHT;
- resizer_set_format(sd, fh->pad, &format);
-
- memset(&format, 0, sizeof(format));
- format.pad = RESIZER_CROP_PAD_SOURCE;
- format.which = which;
- format.format.code = MEDIA_BUS_FMT_UYVY8_2X8;
- format.format.width = MAX_IN_WIDTH;
- format.format.height = MAX_IN_WIDTH;
- resizer_set_format(sd, fh->pad, &format);
-
- memset(&format, 0, sizeof(format));
- format.pad = RESIZER_CROP_PAD_SOURCE2;
- format.which = which;
- format.format.code = MEDIA_BUS_FMT_UYVY8_2X8;
- format.format.width = MAX_IN_WIDTH;
- format.format.height = MAX_IN_WIDTH;
- resizer_set_format(sd, fh->pad, &format);
- } else if (&resizer->resizer_a.subdev == sd) {
- memset(&format, 0, sizeof(format));
- format.pad = RESIZER_PAD_SINK;
- format.which = which;
- format.format.code = MEDIA_BUS_FMT_YUYV8_2X8;
- format.format.width = MAX_IN_WIDTH;
- format.format.height = MAX_IN_HEIGHT;
- resizer_set_format(sd, fh->pad, &format);
-
- memset(&format, 0, sizeof(format));
- format.pad = RESIZER_PAD_SOURCE;
- format.which = which;
- format.format.code = MEDIA_BUS_FMT_UYVY8_2X8;
- format.format.width = IPIPE_MAX_OUTPUT_WIDTH_A;
- format.format.height = IPIPE_MAX_OUTPUT_HEIGHT_A;
- resizer_set_format(sd, fh->pad, &format);
- } else if (&resizer->resizer_b.subdev == sd) {
- memset(&format, 0, sizeof(format));
- format.pad = RESIZER_PAD_SINK;
- format.which = which;
- format.format.code = MEDIA_BUS_FMT_YUYV8_2X8;
- format.format.width = MAX_IN_WIDTH;
- format.format.height = MAX_IN_HEIGHT;
- resizer_set_format(sd, fh->pad, &format);
-
- memset(&format, 0, sizeof(format));
- format.pad = RESIZER_PAD_SOURCE;
- format.which = which;
- format.format.code = MEDIA_BUS_FMT_UYVY8_2X8;
- format.format.width = IPIPE_MAX_OUTPUT_WIDTH_B;
- format.format.height = IPIPE_MAX_OUTPUT_HEIGHT_B;
- resizer_set_format(sd, fh->pad, &format);
- }
-
- return 0;
-}
-
-/* subdev core operations */
-static const struct v4l2_subdev_core_ops resizer_v4l2_core_ops = {
- .ioctl = resizer_ioctl,
-};
-
-/* subdev internal operations */
-static const struct v4l2_subdev_internal_ops resizer_v4l2_internal_ops = {
- .open = resizer_init_formats,
-};
-
-/* subdev video operations */
-static const struct v4l2_subdev_video_ops resizer_v4l2_video_ops = {
- .s_stream = resizer_set_stream,
-};
-
-/* subdev pad operations */
-static const struct v4l2_subdev_pad_ops resizer_v4l2_pad_ops = {
- .enum_mbus_code = resizer_enum_mbus_code,
- .enum_frame_size = resizer_enum_frame_size,
- .get_fmt = resizer_get_format,
- .set_fmt = resizer_set_format,
-};
-
-/* subdev operations */
-static const struct v4l2_subdev_ops resizer_v4l2_ops = {
- .core = &resizer_v4l2_core_ops,
- .video = &resizer_v4l2_video_ops,
- .pad = &resizer_v4l2_pad_ops,
-};
-
-/*
- * Media entity operations
- */
-
-/*
- * resizer_link_setup() - Setup resizer connections
- * @entity: Pointer to media entity structure
- * @local: Pointer to local pad array
- * @remote: Pointer to remote pad array
- * @flags: Link flags
- * return -EINVAL or zero on success
- */
-static int resizer_link_setup(struct media_entity *entity,
- const struct media_pad *local,
- const struct media_pad *remote, u32 flags)
-{
- struct v4l2_subdev *sd = media_entity_to_v4l2_subdev(entity);
- struct vpfe_resizer_device *resizer = v4l2_get_subdevdata(sd);
- struct vpfe_device *vpfe_dev = to_vpfe_device(resizer);
- u16 ipipeif_source = vpfe_dev->vpfe_ipipeif.output;
- u16 ipipe_source = vpfe_dev->vpfe_ipipe.output;
-
- if (&resizer->crop_resizer.subdev == sd) {
- switch (local->index | media_entity_type(remote->entity)) {
- case RESIZER_CROP_PAD_SINK | MEDIA_ENT_T_V4L2_SUBDEV:
- if (!(flags & MEDIA_LNK_FL_ENABLED)) {
- resizer->crop_resizer.input =
- RESIZER_CROP_INPUT_NONE;
- break;
- }
-
- if (resizer->crop_resizer.input !=
- RESIZER_CROP_INPUT_NONE)
- return -EBUSY;
- if (ipipeif_source == IPIPEIF_OUTPUT_RESIZER)
- resizer->crop_resizer.input =
- RESIZER_CROP_INPUT_IPIPEIF;
- else if (ipipe_source == IPIPE_OUTPUT_RESIZER)
- resizer->crop_resizer.input =
- RESIZER_CROP_INPUT_IPIPE;
- else
- return -EINVAL;
- break;
-
- case RESIZER_CROP_PAD_SOURCE | MEDIA_ENT_T_V4L2_SUBDEV:
- if (!(flags & MEDIA_LNK_FL_ENABLED)) {
- resizer->crop_resizer.output =
- RESIZER_CROP_OUTPUT_NONE;
- break;
- }
- if (resizer->crop_resizer.output !=
- RESIZER_CROP_OUTPUT_NONE)
- return -EBUSY;
- resizer->crop_resizer.output = RESIZER_A;
- break;
-
- case RESIZER_CROP_PAD_SOURCE2 | MEDIA_ENT_T_V4L2_SUBDEV:
- if (!(flags & MEDIA_LNK_FL_ENABLED)) {
- resizer->crop_resizer.output2 =
- RESIZER_CROP_OUTPUT_NONE;
- break;
- }
- if (resizer->crop_resizer.output2 !=
- RESIZER_CROP_OUTPUT_NONE)
- return -EBUSY;
- resizer->crop_resizer.output2 = RESIZER_B;
- break;
-
- default:
- return -EINVAL;
- }
- } else if (&resizer->resizer_a.subdev == sd) {
- switch (local->index | media_entity_type(remote->entity)) {
- case RESIZER_PAD_SINK | MEDIA_ENT_T_V4L2_SUBDEV:
- if (!(flags & MEDIA_LNK_FL_ENABLED)) {
- resizer->resizer_a.input = RESIZER_INPUT_NONE;
- break;
- }
- if (resizer->resizer_a.input != RESIZER_INPUT_NONE)
- return -EBUSY;
- resizer->resizer_a.input = RESIZER_INPUT_CROP_RESIZER;
- break;
-
- case RESIZER_PAD_SOURCE | MEDIA_ENT_T_DEVNODE:
- if (!(flags & MEDIA_LNK_FL_ENABLED)) {
- resizer->resizer_a.output = RESIZER_OUTPUT_NONE;
- break;
- }
- if (resizer->resizer_a.output != RESIZER_OUTPUT_NONE)
- return -EBUSY;
- resizer->resizer_a.output = RESIZER_OUPUT_MEMORY;
- break;
-
- default:
- return -EINVAL;
- }
- } else if (&resizer->resizer_b.subdev == sd) {
- switch (local->index | media_entity_type(remote->entity)) {
- case RESIZER_PAD_SINK | MEDIA_ENT_T_V4L2_SUBDEV:
- if (!(flags & MEDIA_LNK_FL_ENABLED)) {
- resizer->resizer_b.input = RESIZER_INPUT_NONE;
- break;
- }
- if (resizer->resizer_b.input != RESIZER_INPUT_NONE)
- return -EBUSY;
- resizer->resizer_b.input = RESIZER_INPUT_CROP_RESIZER;
- break;
-
- case RESIZER_PAD_SOURCE | MEDIA_ENT_T_DEVNODE:
- if (!(flags & MEDIA_LNK_FL_ENABLED)) {
- resizer->resizer_b.output = RESIZER_OUTPUT_NONE;
- break;
- }
- if (resizer->resizer_b.output != RESIZER_OUTPUT_NONE)
- return -EBUSY;
- resizer->resizer_b.output = RESIZER_OUPUT_MEMORY;
- break;
-
- default:
- return -EINVAL;
- }
- } else {
- return -EINVAL;
- }
-
- return 0;
-}
-
-static const struct media_entity_operations resizer_media_ops = {
- .link_setup = resizer_link_setup,
-};
-
-/*
- * vpfe_resizer_unregister_entities() - Unregister entity
- * @vpfe_rsz - pointer to resizer subdevice structure.
- */
-void vpfe_resizer_unregister_entities(struct vpfe_resizer_device *vpfe_rsz)
-{
- /* unregister video devices */
- vpfe_video_unregister(&vpfe_rsz->resizer_a.video_out);
- vpfe_video_unregister(&vpfe_rsz->resizer_b.video_out);
-
- /* unregister subdev */
- v4l2_device_unregister_subdev(&vpfe_rsz->crop_resizer.subdev);
- v4l2_device_unregister_subdev(&vpfe_rsz->resizer_a.subdev);
- v4l2_device_unregister_subdev(&vpfe_rsz->resizer_b.subdev);
- /* cleanup entity */
- media_entity_cleanup(&vpfe_rsz->crop_resizer.subdev.entity);
- media_entity_cleanup(&vpfe_rsz->resizer_a.subdev.entity);
- media_entity_cleanup(&vpfe_rsz->resizer_b.subdev.entity);
-}
-
-/*
- * vpfe_resizer_register_entities() - Register entity
- * @resizer - pointer to resizer devive.
- * @vdev: pointer to v4l2 device structure.
- */
-int vpfe_resizer_register_entities(struct vpfe_resizer_device *resizer,
- struct v4l2_device *vdev)
-{
- struct vpfe_device *vpfe_dev = to_vpfe_device(resizer);
- unsigned int flags = 0;
- int ret;
-
- /* Register the crop resizer subdev */
- ret = v4l2_device_register_subdev(vdev, &resizer->crop_resizer.subdev);
- if (ret < 0) {
- pr_err("Failed to register crop resizer as v4l2-subdev\n");
- return ret;
- }
- /* Register Resizer-A subdev */
- ret = v4l2_device_register_subdev(vdev, &resizer->resizer_a.subdev);
- if (ret < 0) {
- pr_err("Failed to register resizer-a as v4l2-subdev\n");
- return ret;
- }
- /* Register Resizer-B subdev */
- ret = v4l2_device_register_subdev(vdev, &resizer->resizer_b.subdev);
- if (ret < 0) {
- pr_err("Failed to register resizer-b as v4l2-subdev\n");
- return ret;
- }
- /* Register video-out device for resizer-a */
- ret = vpfe_video_register(&resizer->resizer_a.video_out, vdev);
- if (ret) {
- pr_err("Failed to register RSZ-A video-out device\n");
- goto out_video_out2_register;
- }
- resizer->resizer_a.video_out.vpfe_dev = vpfe_dev;
-
- /* Register video-out device for resizer-b */
- ret = vpfe_video_register(&resizer->resizer_b.video_out, vdev);
- if (ret) {
- pr_err("Failed to register RSZ-B video-out device\n");
- goto out_video_out2_register;
- }
- resizer->resizer_b.video_out.vpfe_dev = vpfe_dev;
-
- /* create link between Resizer Crop----> Resizer A*/
- ret = media_entity_create_link(&resizer->crop_resizer.subdev.entity, 1,
- &resizer->resizer_a.subdev.entity,
- 0, flags);
- if (ret < 0)
- goto out_create_link;
-
- /* create link between Resizer Crop----> Resizer B*/
- ret = media_entity_create_link(&resizer->crop_resizer.subdev.entity, 2,
- &resizer->resizer_b.subdev.entity,
- 0, flags);
- if (ret < 0)
- goto out_create_link;
-
- /* create link between Resizer A ----> video out */
- ret = media_entity_create_link(&resizer->resizer_a.subdev.entity, 1,
- &resizer->resizer_a.video_out.video_dev.entity, 0, flags);
- if (ret < 0)
- goto out_create_link;
-
- /* create link between Resizer B ----> video out */
- ret = media_entity_create_link(&resizer->resizer_b.subdev.entity, 1,
- &resizer->resizer_b.video_out.video_dev.entity, 0, flags);
- if (ret < 0)
- goto out_create_link;
-
- return 0;
-
-out_create_link:
- vpfe_video_unregister(&resizer->resizer_b.video_out);
-out_video_out2_register:
- vpfe_video_unregister(&resizer->resizer_a.video_out);
- v4l2_device_unregister_subdev(&resizer->crop_resizer.subdev);
- v4l2_device_unregister_subdev(&resizer->resizer_a.subdev);
- v4l2_device_unregister_subdev(&resizer->resizer_b.subdev);
- media_entity_cleanup(&resizer->crop_resizer.subdev.entity);
- media_entity_cleanup(&resizer->resizer_a.subdev.entity);
- media_entity_cleanup(&resizer->resizer_b.subdev.entity);
- return ret;
-}
-
-/*
- * vpfe_resizer_init() - resizer device initialization.
- * @vpfe_rsz - pointer to resizer device
- * @pdev: platform device pointer.
- */
-int vpfe_resizer_init(struct vpfe_resizer_device *vpfe_rsz,
- struct platform_device *pdev)
-{
- struct v4l2_subdev *sd = &vpfe_rsz->crop_resizer.subdev;
- struct media_pad *pads = &vpfe_rsz->crop_resizer.pads[0];
- struct media_entity *me = &sd->entity;
- static resource_size_t res_len;
- struct resource *res;
- int ret;
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 5);
- if (!res)
- return -ENOENT;
-
- res_len = resource_size(res);
- res = request_mem_region(res->start, res_len, res->name);
- if (!res)
- return -EBUSY;
-
- vpfe_rsz->base_addr = ioremap_nocache(res->start, res_len);
- if (!vpfe_rsz->base_addr)
- return -EBUSY;
-
- v4l2_subdev_init(sd, &resizer_v4l2_ops);
- sd->internal_ops = &resizer_v4l2_internal_ops;
- strlcpy(sd->name, "DAVINCI RESIZER CROP", sizeof(sd->name));
- sd->grp_id = 1 << 16; /* group ID for davinci subdevs */
- v4l2_set_subdevdata(sd, vpfe_rsz);
- sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
-
- pads[RESIZER_CROP_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
- pads[RESIZER_CROP_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE;
- pads[RESIZER_CROP_PAD_SOURCE2].flags = MEDIA_PAD_FL_SOURCE;
-
- vpfe_rsz->crop_resizer.input = RESIZER_CROP_INPUT_NONE;
- vpfe_rsz->crop_resizer.output = RESIZER_CROP_OUTPUT_NONE;
- vpfe_rsz->crop_resizer.output2 = RESIZER_CROP_OUTPUT_NONE;
- vpfe_rsz->crop_resizer.rsz_device = vpfe_rsz;
- me->ops = &resizer_media_ops;
- ret = media_entity_init(me, RESIZER_CROP_PADS_NUM, pads, 0);
- if (ret)
- return ret;
-
- sd = &vpfe_rsz->resizer_a.subdev;
- pads = &vpfe_rsz->resizer_a.pads[0];
- me = &sd->entity;
-
- v4l2_subdev_init(sd, &resizer_v4l2_ops);
- sd->internal_ops = &resizer_v4l2_internal_ops;
- strlcpy(sd->name, "DAVINCI RESIZER A", sizeof(sd->name));
- sd->grp_id = 1 << 16; /* group ID for davinci subdevs */
- v4l2_set_subdevdata(sd, vpfe_rsz);
- sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
-
- pads[RESIZER_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
- pads[RESIZER_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE;
-
- vpfe_rsz->resizer_a.input = RESIZER_INPUT_NONE;
- vpfe_rsz->resizer_a.output = RESIZER_OUTPUT_NONE;
- vpfe_rsz->resizer_a.rsz_device = vpfe_rsz;
- me->ops = &resizer_media_ops;
- ret = media_entity_init(me, RESIZER_PADS_NUM, pads, 0);
- if (ret)
- return ret;
-
- sd = &vpfe_rsz->resizer_b.subdev;
- pads = &vpfe_rsz->resizer_b.pads[0];
- me = &sd->entity;
-
- v4l2_subdev_init(sd, &resizer_v4l2_ops);
- sd->internal_ops = &resizer_v4l2_internal_ops;
- strlcpy(sd->name, "DAVINCI RESIZER B", sizeof(sd->name));
- sd->grp_id = 1 << 16; /* group ID for davinci subdevs */
- v4l2_set_subdevdata(sd, vpfe_rsz);
- sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
-
- pads[RESIZER_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
- pads[RESIZER_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE;
-
- vpfe_rsz->resizer_b.input = RESIZER_INPUT_NONE;
- vpfe_rsz->resizer_b.output = RESIZER_OUTPUT_NONE;
- vpfe_rsz->resizer_b.rsz_device = vpfe_rsz;
- me->ops = &resizer_media_ops;
- ret = media_entity_init(me, RESIZER_PADS_NUM, pads, 0);
- if (ret)
- return ret;
-
- vpfe_rsz->resizer_a.video_out.ops = &resizer_a_video_ops;
- vpfe_rsz->resizer_a.video_out.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
- ret = vpfe_video_init(&vpfe_rsz->resizer_a.video_out, "RSZ-A");
- if (ret) {
- pr_err("Failed to init RSZ video-out device\n");
- return ret;
- }
- vpfe_rsz->resizer_b.video_out.ops = &resizer_b_video_ops;
- vpfe_rsz->resizer_b.video_out.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
- ret = vpfe_video_init(&vpfe_rsz->resizer_b.video_out, "RSZ-B");
- if (ret) {
- pr_err("Failed to init RSZ video-out2 device\n");
- return ret;
- }
- memset(&vpfe_rsz->config, 0, sizeof(struct resizer_params));
-
- return 0;
-}
-
-void
-vpfe_resizer_cleanup(struct vpfe_resizer_device *vpfe_rsz,
- struct platform_device *pdev)
-{
- struct resource *res;
-
- iounmap(vpfe_rsz->base_addr);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 5);
- if (res)
- release_mem_region(res->start,
- resource_size(res));
-}
diff --git a/drivers/staging/media/davinci_vpfe/dm365_resizer.h b/drivers/staging/media/davinci_vpfe/dm365_resizer.h
deleted file mode 100644
index 93b0f44030aa..000000000000
--- a/drivers/staging/media/davinci_vpfe/dm365_resizer.h
+++ /dev/null
@@ -1,244 +0,0 @@
-/*
- * Copyright (C) 2012 Texas Instruments Inc
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- * Contributors:
- * Manjunath Hadli <manjunath.hadli@ti.com>
- * Prabhakar Lad <prabhakar.lad@ti.com>
- */
-
-#ifndef _DAVINCI_VPFE_DM365_RESIZER_H
-#define _DAVINCI_VPFE_DM365_RESIZER_H
-
-enum resizer_oper_mode {
- RESIZER_MODE_CONTINIOUS = 0,
- RESIZER_MODE_ONE_SHOT = 1,
-};
-
-struct f_div_pass {
- unsigned int o_hsz;
- unsigned int i_hps;
- unsigned int h_phs;
- unsigned int src_hps;
- unsigned int src_hsz;
-};
-
-#define MAX_PASSES 2
-
-struct f_div_param {
- unsigned char en;
- unsigned int num_passes;
- struct f_div_pass pass[MAX_PASSES];
-};
-
-/* Resizer Rescale Parameters*/
-struct resizer_scale_param {
- bool h_flip;
- bool v_flip;
- bool cen;
- bool yen;
- unsigned short i_vps;
- unsigned short i_hps;
- unsigned short o_vsz;
- unsigned short o_hsz;
- unsigned short v_phs_y;
- unsigned short v_phs_c;
- unsigned short v_dif;
- /* resize method - Luminance */
- enum vpfe_rsz_intp_t v_typ_y;
- /* resize method - Chrominance */
- enum vpfe_rsz_intp_t v_typ_c;
- /* vertical lpf intensity - Luminance */
- unsigned char v_lpf_int_y;
- /* vertical lpf intensity - Chrominance */
- unsigned char v_lpf_int_c;
- unsigned short h_phs;
- unsigned short h_dif;
- /* resize method - Luminance */
- enum vpfe_rsz_intp_t h_typ_y;
- /* resize method - Chrominance */
- enum vpfe_rsz_intp_t h_typ_c;
- /* horizontal lpf intensity - Luminance */
- unsigned char h_lpf_int_y;
- /* horizontal lpf intensity - Chrominance */
- unsigned char h_lpf_int_c;
- bool dscale_en;
- enum vpfe_rsz_down_scale_ave_sz h_dscale_ave_sz;
- enum vpfe_rsz_down_scale_ave_sz v_dscale_ave_sz;
- /* store the calculated frame division parameter */
- struct f_div_param f_div;
-};
-
-enum resizer_rgb_t {
- OUTPUT_32BIT,
- OUTPUT_16BIT
-};
-
-enum resizer_rgb_msk_t {
- NOMASK = 0,
- MASKLAST2 = 1,
-};
-
-/* Resizer RGB Conversion Parameters */
-struct resizer_rgb {
- bool rgb_en;
- enum resizer_rgb_t rgb_typ;
- enum resizer_rgb_msk_t rgb_msk0;
- enum resizer_rgb_msk_t rgb_msk1;
- unsigned int rgb_alpha_val;
-};
-
-/* Resizer External Memory Parameters */
-struct rsz_ext_mem_param {
- unsigned int rsz_sdr_oft_y;
- unsigned int rsz_sdr_ptr_s_y;
- unsigned int rsz_sdr_ptr_e_y;
- unsigned int rsz_sdr_oft_c;
- unsigned int rsz_sdr_ptr_s_c;
- unsigned int rsz_sdr_ptr_e_c;
- /* offset to be added to buffer start when flipping for y/ycbcr */
- unsigned int flip_ofst_y;
- /* offset to be added to buffer start when flipping for c */
- unsigned int flip_ofst_c;
- /* c offset for YUV 420SP */
- unsigned int c_offset;
- /* User Defined Y offset for YUV 420SP or YUV420ILE data */
- unsigned int user_y_ofst;
- /* User Defined C offset for YUV 420SP data */
- unsigned int user_c_ofst;
-};
-
-enum rsz_data_source {
- IPIPE_DATA,
- IPIPEIF_DATA
-};
-
-enum rsz_src_img_fmt {
- RSZ_IMG_422,
- RSZ_IMG_420
-};
-
-enum rsz_dpaths_bypass_t {
- BYPASS_OFF = 0,
- BYPASS_ON = 1,
-};
-
-struct rsz_common_params {
- unsigned int vps;
- unsigned int vsz;
- unsigned int hps;
- unsigned int hsz;
- /* 420 or 422 */
- enum rsz_src_img_fmt src_img_fmt;
- /* Y or C when src_fmt is 420, 0 - y, 1 - c */
- unsigned char y_c;
- /* flip raw or ycbcr */
- unsigned char raw_flip;
- /* IPIPE or IPIPEIF data */
- enum rsz_data_source source;
- enum rsz_dpaths_bypass_t passthrough;
- unsigned char yuv_y_min;
- unsigned char yuv_y_max;
- unsigned char yuv_c_min;
- unsigned char yuv_c_max;
- bool rsz_seq_crv;
- enum vpfe_chr_pos out_chr_pos;
-};
-
-struct resizer_params {
- enum resizer_oper_mode oper_mode;
- struct rsz_common_params rsz_common;
- struct resizer_scale_param rsz_rsc_param[2];
- struct resizer_rgb rsz2rgb[2];
- struct rsz_ext_mem_param ext_mem_param[2];
- bool rsz_en[2];
- struct vpfe_rsz_config_params user_config;
-};
-
-#define ENABLE 1
-#define DISABLE (!ENABLE)
-
-#define RESIZER_CROP_PAD_SINK 0
-#define RESIZER_CROP_PAD_SOURCE 1
-#define RESIZER_CROP_PAD_SOURCE2 2
-
-#define RESIZER_CROP_PADS_NUM 3
-
-enum resizer_crop_input_entity {
- RESIZER_CROP_INPUT_NONE = 0,
- RESIZER_CROP_INPUT_IPIPEIF = 1,
- RESIZER_CROP_INPUT_IPIPE = 2,
-};
-
-enum resizer_crop_output_entity {
- RESIZER_CROP_OUTPUT_NONE,
- RESIZER_A,
- RESIZER_B,
-};
-
-struct dm365_crop_resizer_device {
- struct v4l2_subdev subdev;
- struct media_pad pads[RESIZER_CROP_PADS_NUM];
- struct v4l2_mbus_framefmt formats[RESIZER_CROP_PADS_NUM];
- enum resizer_crop_input_entity input;
- enum resizer_crop_output_entity output;
- enum resizer_crop_output_entity output2;
- struct vpfe_resizer_device *rsz_device;
-};
-
-#define RESIZER_PAD_SINK 0
-#define RESIZER_PAD_SOURCE 1
-
-#define RESIZER_PADS_NUM 2
-
-enum resizer_input_entity {
- RESIZER_INPUT_NONE = 0,
- RESIZER_INPUT_CROP_RESIZER = 1,
-};
-
-enum resizer_output_entity {
- RESIZER_OUTPUT_NONE = 0,
- RESIZER_OUPUT_MEMORY = 1,
-};
-
-struct dm365_resizer_device {
- struct v4l2_subdev subdev;
- struct media_pad pads[RESIZER_PADS_NUM];
- struct v4l2_mbus_framefmt formats[RESIZER_PADS_NUM];
- enum resizer_input_entity input;
- enum resizer_output_entity output;
- struct vpfe_video_device video_out;
- struct vpfe_resizer_device *rsz_device;
-};
-
-struct vpfe_resizer_device {
- struct dm365_crop_resizer_device crop_resizer;
- struct dm365_resizer_device resizer_a;
- struct dm365_resizer_device resizer_b;
- struct resizer_params config;
- void __iomem *base_addr;
-};
-
-int vpfe_resizer_init(struct vpfe_resizer_device *vpfe_rsz,
- struct platform_device *pdev);
-int vpfe_resizer_register_entities(struct vpfe_resizer_device *vpfe_rsz,
- struct v4l2_device *v4l2_dev);
-void vpfe_resizer_unregister_entities(struct vpfe_resizer_device *vpfe_rsz);
-void vpfe_resizer_cleanup(struct vpfe_resizer_device *vpfe_rsz,
- struct platform_device *pdev);
-void vpfe_resizer_buffer_isr(struct vpfe_resizer_device *resizer);
-void vpfe_resizer_dma_isr(struct vpfe_resizer_device *resizer);
-
-#endif /* _DAVINCI_VPFE_DM365_RESIZER_H */
diff --git a/drivers/staging/media/davinci_vpfe/vpfe.h b/drivers/staging/media/davinci_vpfe/vpfe.h
deleted file mode 100644
index 0587bc52a840..000000000000
--- a/drivers/staging/media/davinci_vpfe/vpfe.h
+++ /dev/null
@@ -1,86 +0,0 @@
-/*
- * Copyright (C) 2012 Texas Instruments Inc
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- * Contributors:
- * Manjunath Hadli <manjunath.hadli@ti.com>
- * Prabhakar Lad <prabhakar.lad@ti.com>
- */
-
-#ifndef _VPFE_H
-#define _VPFE_H
-
-#ifdef __KERNEL__
-#include <linux/v4l2-subdev.h>
-#include <linux/clk.h>
-#include <linux/i2c.h>
-
-#include <media/davinci/vpfe_types.h>
-
-#define CAPTURE_DRV_NAME "vpfe-capture"
-
-struct vpfe_route {
- __u32 input;
- __u32 output;
-};
-
-enum vpfe_subdev_id {
- VPFE_SUBDEV_TVP5146 = 1,
- VPFE_SUBDEV_MT9T031 = 2,
- VPFE_SUBDEV_TVP7002 = 3,
- VPFE_SUBDEV_MT9P031 = 4,
-};
-
-struct vpfe_ext_subdev_info {
- /* v4l2 subdev */
- struct v4l2_subdev *subdev;
- /* Sub device module name */
- char module_name[32];
- /* Sub device group id */
- int grp_id;
- /* Number of inputs supported */
- int num_inputs;
- /* inputs available at the sub device */
- struct v4l2_input *inputs;
- /* Sub dev routing information for each input */
- struct vpfe_route *routes;
- /* ccdc bus/interface configuration */
- struct vpfe_hw_if_param ccdc_if_params;
- /* i2c subdevice board info */
- struct i2c_board_info board_info;
- /* Is this a camera sub device ? */
- unsigned is_camera:1;
- /* check if sub dev supports routing */
- unsigned can_route:1;
- /* registered ? */
- unsigned registered:1;
-};
-
-struct vpfe_config {
- /* Number of sub devices connected to vpfe */
- int num_subdevs;
- /* information about each subdev */
- struct vpfe_ext_subdev_info *sub_devs;
- /* evm card info */
- char *card_name;
- /* setup function for the input path */
- int (*setup_input)(enum vpfe_subdev_id id);
- /* number of clocks */
- int num_clocks;
- /* clocks used for vpfe capture */
- char *clocks[];
-};
-#endif
-#endif
diff --git a/drivers/staging/media/davinci_vpfe/vpfe_mc_capture.c b/drivers/staging/media/davinci_vpfe/vpfe_mc_capture.c
deleted file mode 100644
index 57426199ad7a..000000000000
--- a/drivers/staging/media/davinci_vpfe/vpfe_mc_capture.c
+++ /dev/null
@@ -1,716 +0,0 @@
-/*
- * Copyright (C) 2012 Texas Instruments Inc
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- * Contributors:
- * Manjunath Hadli <manjunath.hadli@ti.com>
- * Prabhakar Lad <prabhakar.lad@ti.com>
- *
- *
- * Driver name : VPFE Capture driver
- * VPFE Capture driver allows applications to capture and stream video
- * frames on DaVinci SoCs (DM6446, DM355 etc) from a YUV source such as
- * TVP5146 or Raw Bayer RGB image data from an image sensor
- * such as Microns' MT9T001, MT9T031 etc.
- *
- * These SoCs have, in common, a Video Processing Subsystem (VPSS) that
- * consists of a Video Processing Front End (VPFE) for capturing
- * video/raw image data and Video Processing Back End (VPBE) for displaying
- * YUV data through an in-built analog encoder or Digital LCD port. This
- * driver is for capture through VPFE. A typical EVM using these SoCs have
- * following high level configuration.
- *
- * decoder(TVP5146/ YUV/
- * MT9T001) --> Raw Bayer RGB ---> MUX -> VPFE (CCDC/ISIF)
- * data input | |
- * V |
- * SDRAM |
- * V
- * Image Processor
- * |
- * V
- * SDRAM
- * The data flow happens from a decoder connected to the VPFE over a
- * YUV embedded (BT.656/BT.1120) or separate sync or raw bayer rgb interface
- * and to the input of VPFE through an optional MUX (if more inputs are
- * to be interfaced on the EVM). The input data is first passed through
- * CCDC (CCD Controller, a.k.a Image Sensor Interface, ISIF). The CCDC
- * does very little or no processing on YUV data and does pre-process Raw
- * Bayer RGB data through modules such as Defect Pixel Correction (DFC)
- * Color Space Conversion (CSC), data gain/offset etc. After this, data
- * can be written to SDRAM or can be connected to the image processing
- * block such as IPIPE (on DM355/DM365 only).
- *
- * Features supported
- * - MMAP IO
- * - USERPTR IO
- * - Capture using TVP5146 over BT.656
- * - Support for interfacing decoders using sub device model
- * - Work with DM365 or DM355 or DM6446 CCDC to do Raw Bayer
- * RGB/YUV data capture to SDRAM.
- * - Chaining of Image Processor
- * - SINGLE-SHOT mode
- */
-
-#include <linux/interrupt.h>
-#include <linux/module.h>
-#include <linux/slab.h>
-
-#include "vpfe.h"
-#include "vpfe_mc_capture.h"
-
-static bool debug;
-static bool interface;
-
-module_param(interface, bool, S_IRUGO);
-module_param(debug, bool, 0644);
-
-/**
- * VPFE capture can be used for capturing video such as from TVP5146 or TVP7002
- * and for capture raw bayer data from camera sensors such as mt9p031. At this
- * point there is problem in co-existence of mt9p031 and tvp5146 due to i2c
- * address collision. So set the variable below from bootargs to do either video
- * capture or camera capture.
- * interface = 0 - video capture (from TVP514x or such),
- * interface = 1 - Camera capture (from mt9p031 or such)
- * Re-visit this when we fix the co-existence issue
- */
-MODULE_PARM_DESC(interface, "interface 0-1 (default:0)");
-MODULE_PARM_DESC(debug, "Debug level 0-1");
-
-MODULE_DESCRIPTION("VPFE Video for Linux Capture Driver");
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Texas Instruments");
-
-/* map mbus_fmt to pixelformat */
-void mbus_to_pix(const struct v4l2_mbus_framefmt *mbus,
- struct v4l2_pix_format *pix)
-{
- switch (mbus->code) {
- case MEDIA_BUS_FMT_UYVY8_2X8:
- pix->pixelformat = V4L2_PIX_FMT_UYVY;
- pix->bytesperline = pix->width * 2;
- break;
-
- case MEDIA_BUS_FMT_YUYV8_2X8:
- pix->pixelformat = V4L2_PIX_FMT_YUYV;
- pix->bytesperline = pix->width * 2;
- break;
-
- case MEDIA_BUS_FMT_YUYV10_1X20:
- pix->pixelformat = V4L2_PIX_FMT_UYVY;
- pix->bytesperline = pix->width * 2;
- break;
-
- case MEDIA_BUS_FMT_SGRBG12_1X12:
- pix->pixelformat = V4L2_PIX_FMT_SBGGR16;
- pix->bytesperline = pix->width * 2;
- break;
-
- case MEDIA_BUS_FMT_SGRBG10_DPCM8_1X8:
- pix->pixelformat = V4L2_PIX_FMT_SGRBG10DPCM8;
- pix->bytesperline = pix->width;
- break;
-
- case MEDIA_BUS_FMT_SGRBG10_ALAW8_1X8:
- pix->pixelformat = V4L2_PIX_FMT_SGRBG10ALAW8;
- pix->bytesperline = pix->width;
- break;
-
- case MEDIA_BUS_FMT_YDYUYDYV8_1X16:
- pix->pixelformat = V4L2_PIX_FMT_NV12;
- pix->bytesperline = pix->width;
- break;
-
- case MEDIA_BUS_FMT_Y8_1X8:
- pix->pixelformat = V4L2_PIX_FMT_GREY;
- pix->bytesperline = pix->width;
- break;
-
- case MEDIA_BUS_FMT_UV8_1X8:
- pix->pixelformat = V4L2_PIX_FMT_UV8;
- pix->bytesperline = pix->width;
- break;
-
- default:
- pr_err("Invalid mbus code set\n");
- }
- /* pitch should be 32 bytes aligned */
- pix->bytesperline = ALIGN(pix->bytesperline, 32);
- if (pix->pixelformat == V4L2_PIX_FMT_NV12)
- pix->sizeimage = pix->bytesperline * pix->height +
- ((pix->bytesperline * pix->height) >> 1);
- else
- pix->sizeimage = pix->bytesperline * pix->height;
-}
-
-/* ISR for VINT0*/
-static irqreturn_t vpfe_isr(int irq, void *dev_id)
-{
- struct vpfe_device *vpfe_dev = dev_id;
-
- v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_isr\n");
- vpfe_isif_buffer_isr(&vpfe_dev->vpfe_isif);
- vpfe_resizer_buffer_isr(&vpfe_dev->vpfe_resizer);
- return IRQ_HANDLED;
-}
-
-/* vpfe_vdint1_isr() - isr handler for VINT1 interrupt */
-static irqreturn_t vpfe_vdint1_isr(int irq, void *dev_id)
-{
- struct vpfe_device *vpfe_dev = dev_id;
-
- v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_vdint1_isr\n");
- vpfe_isif_vidint1_isr(&vpfe_dev->vpfe_isif);
- return IRQ_HANDLED;
-}
-
-/* vpfe_imp_dma_isr() - ISR for ipipe dma completion */
-static irqreturn_t vpfe_imp_dma_isr(int irq, void *dev_id)
-{
- struct vpfe_device *vpfe_dev = dev_id;
-
- v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_imp_dma_isr\n");
- vpfe_ipipeif_ss_buffer_isr(&vpfe_dev->vpfe_ipipeif);
- vpfe_resizer_dma_isr(&vpfe_dev->vpfe_resizer);
- return IRQ_HANDLED;
-}
-
-/*
- * vpfe_disable_clock() - Disable clocks for vpfe capture driver
- * @vpfe_dev - ptr to vpfe capture device
- *
- * Disables clocks defined in vpfe configuration. The function
- * assumes that at least one clock is to be defined which is
- * true as of now.
- */
-static void vpfe_disable_clock(struct vpfe_device *vpfe_dev)
-{
- struct vpfe_config *vpfe_cfg = vpfe_dev->cfg;
- int i;
-
- for (i = 0; i < vpfe_cfg->num_clocks; i++) {
- clk_disable_unprepare(vpfe_dev->clks[i]);
- clk_put(vpfe_dev->clks[i]);
- }
- kzfree(vpfe_dev->clks);
- v4l2_info(vpfe_dev->pdev->driver, "vpfe capture clocks disabled\n");
-}
-
-/*
- * vpfe_enable_clock() - Enable clocks for vpfe capture driver
- * @vpfe_dev - ptr to vpfe capture device
- *
- * Enables clocks defined in vpfe configuration. The function
- * assumes that at least one clock is to be defined which is
- * true as of now.
- */
-static int vpfe_enable_clock(struct vpfe_device *vpfe_dev)
-{
- struct vpfe_config *vpfe_cfg = vpfe_dev->cfg;
- int ret = -EFAULT;
- int i;
-
- if (!vpfe_cfg->num_clocks)
- return 0;
-
- vpfe_dev->clks = kcalloc(vpfe_cfg->num_clocks,
- sizeof(struct clock *), GFP_KERNEL);
- if (vpfe_dev->clks == NULL)
- return -ENOMEM;
-
- for (i = 0; i < vpfe_cfg->num_clocks; i++) {
- if (vpfe_cfg->clocks[i] == NULL) {
- v4l2_err(vpfe_dev->pdev->driver,
- "clock %s is not defined in vpfe config\n",
- vpfe_cfg->clocks[i]);
- goto out;
- }
-
- vpfe_dev->clks[i] =
- clk_get(vpfe_dev->pdev, vpfe_cfg->clocks[i]);
- if (IS_ERR(vpfe_dev->clks[i])) {
- v4l2_err(vpfe_dev->pdev->driver,
- "Failed to get clock %s\n",
- vpfe_cfg->clocks[i]);
- goto out;
- }
-
- if (clk_prepare_enable(vpfe_dev->clks[i])) {
- v4l2_err(vpfe_dev->pdev->driver,
- "vpfe clock %s not enabled\n",
- vpfe_cfg->clocks[i]);
- goto out;
- }
-
- v4l2_info(vpfe_dev->pdev->driver, "vpss clock %s enabled",
- vpfe_cfg->clocks[i]);
- }
-
- return 0;
-out:
- for (i = 0; i < vpfe_cfg->num_clocks; i++)
- if (!IS_ERR(vpfe_dev->clks[i])) {
- clk_disable_unprepare(vpfe_dev->clks[i]);
- clk_put(vpfe_dev->clks[i]);
- }
-
- v4l2_err(vpfe_dev->pdev->driver, "Failed to enable clocks\n");
- kzfree(vpfe_dev->clks);
-
- return ret;
-}
-
-/*
- * vpfe_detach_irq() - Detach IRQs for vpfe capture driver
- * @vpfe_dev - ptr to vpfe capture device
- *
- * Detach all IRQs defined in vpfe configuration.
- */
-static void vpfe_detach_irq(struct vpfe_device *vpfe_dev)
-{
- free_irq(vpfe_dev->ccdc_irq0, vpfe_dev);
- free_irq(vpfe_dev->ccdc_irq1, vpfe_dev);
- free_irq(vpfe_dev->imp_dma_irq, vpfe_dev);
-}
-
-/*
- * vpfe_attach_irq() - Attach IRQs for vpfe capture driver
- * @vpfe_dev - ptr to vpfe capture device
- *
- * Attach all IRQs defined in vpfe configuration.
- */
-static int vpfe_attach_irq(struct vpfe_device *vpfe_dev)
-{
- int ret = 0;
-
- ret = request_irq(vpfe_dev->ccdc_irq0, vpfe_isr, 0,
- "vpfe_capture0", vpfe_dev);
- if (ret < 0) {
- v4l2_err(&vpfe_dev->v4l2_dev,
- "Error: requesting VINT0 interrupt\n");
- return ret;
- }
-
- ret = request_irq(vpfe_dev->ccdc_irq1, vpfe_vdint1_isr, 0,
- "vpfe_capture1", vpfe_dev);
- if (ret < 0) {
- v4l2_err(&vpfe_dev->v4l2_dev,
- "Error: requesting VINT1 interrupt\n");
- free_irq(vpfe_dev->ccdc_irq0, vpfe_dev);
- return ret;
- }
-
- ret = request_irq(vpfe_dev->imp_dma_irq, vpfe_imp_dma_isr,
- 0, "Imp_Sdram_Irq", vpfe_dev);
- if (ret < 0) {
- v4l2_err(&vpfe_dev->v4l2_dev,
- "Error: requesting IMP IRQ interrupt\n");
- free_irq(vpfe_dev->ccdc_irq1, vpfe_dev);
- free_irq(vpfe_dev->ccdc_irq0, vpfe_dev);
- return ret;
- }
-
- return 0;
-}
-
-/*
- * register_i2c_devices() - register all i2c v4l2 subdevs
- * @vpfe_dev - ptr to vpfe capture device
- *
- * register all i2c v4l2 subdevs
- */
-static int register_i2c_devices(struct vpfe_device *vpfe_dev)
-{
- struct vpfe_ext_subdev_info *sdinfo;
- struct vpfe_config *vpfe_cfg;
- struct i2c_adapter *i2c_adap;
- unsigned int num_subdevs;
- int ret;
- int i;
- int k;
-
- vpfe_cfg = vpfe_dev->cfg;
- i2c_adap = i2c_get_adapter(1);
- num_subdevs = vpfe_cfg->num_subdevs;
- vpfe_dev->sd =
- kcalloc(num_subdevs, sizeof(struct v4l2_subdev *),
- GFP_KERNEL);
- if (vpfe_dev->sd == NULL)
- return -ENOMEM;
-
- for (i = 0, k = 0; i < num_subdevs; i++) {
- sdinfo = &vpfe_cfg->sub_devs[i];
- /*
- * register subdevices based on interface setting. Currently
- * tvp5146 and mt9p031 cannot co-exists due to i2c address
- * conflicts. So only one of them is registered. Re-visit this
- * once we have support for i2c switch handling in i2c driver
- * framework
- */
- if (interface == sdinfo->is_camera) {
- /* setup input path */
- if (vpfe_cfg->setup_input &&
- vpfe_cfg->setup_input(sdinfo->grp_id) < 0) {
- ret = -EFAULT;
- v4l2_info(&vpfe_dev->v4l2_dev,
- "could not setup input for %s\n",
- sdinfo->module_name);
- goto probe_sd_out;
- }
- /* Load up the subdevice */
- vpfe_dev->sd[k] =
- v4l2_i2c_new_subdev_board(&vpfe_dev->v4l2_dev,
- i2c_adap, &sdinfo->board_info,
- NULL);
- if (vpfe_dev->sd[k]) {
- v4l2_info(&vpfe_dev->v4l2_dev,
- "v4l2 sub device %s registered\n",
- sdinfo->module_name);
-
- vpfe_dev->sd[k]->grp_id = sdinfo->grp_id;
- k++;
-
- sdinfo->registered = 1;
- }
- } else {
- v4l2_info(&vpfe_dev->v4l2_dev,
- "v4l2 sub device %s is not registered\n",
- sdinfo->module_name);
- }
- }
- vpfe_dev->num_ext_subdevs = k;
-
- return 0;
-
-probe_sd_out:
- kzfree(vpfe_dev->sd);
-
- return ret;
-}
-
-/*
- * vpfe_register_entities() - register all v4l2 subdevs and media entities
- * @vpfe_dev - ptr to vpfe capture device
- *
- * register all v4l2 subdevs, media entities, and creates links
- * between entities
- */
-static int vpfe_register_entities(struct vpfe_device *vpfe_dev)
-{
- unsigned int flags = 0;
- int ret;
- int i;
-
- /* register i2c devices first */
- ret = register_i2c_devices(vpfe_dev);
- if (ret)
- return ret;
-
- /* register rest of the sub-devs */
- ret = vpfe_isif_register_entities(&vpfe_dev->vpfe_isif,
- &vpfe_dev->v4l2_dev);
- if (ret)
- return ret;
-
- ret = vpfe_ipipeif_register_entities(&vpfe_dev->vpfe_ipipeif,
- &vpfe_dev->v4l2_dev);
- if (ret)
- goto out_isif_register;
-
- ret = vpfe_ipipe_register_entities(&vpfe_dev->vpfe_ipipe,
- &vpfe_dev->v4l2_dev);
- if (ret)
- goto out_ipipeif_register;
-
- ret = vpfe_resizer_register_entities(&vpfe_dev->vpfe_resizer,
- &vpfe_dev->v4l2_dev);
- if (ret)
- goto out_ipipe_register;
-
- /* create links now, starting with external(i2c) entities */
- for (i = 0; i < vpfe_dev->num_ext_subdevs; i++)
- /* if entity has no pads (ex: amplifier),
- cant establish link */
- if (vpfe_dev->sd[i]->entity.num_pads) {
- ret = media_entity_create_link(&vpfe_dev->sd[i]->entity,
- 0, &vpfe_dev->vpfe_isif.subdev.entity,
- 0, flags);
- if (ret < 0)
- goto out_resizer_register;
- }
-
- ret = media_entity_create_link(&vpfe_dev->vpfe_isif.subdev.entity, 1,
- &vpfe_dev->vpfe_ipipeif.subdev.entity,
- 0, flags);
- if (ret < 0)
- goto out_resizer_register;
-
- ret = media_entity_create_link(&vpfe_dev->vpfe_ipipeif.subdev.entity, 1,
- &vpfe_dev->vpfe_ipipe.subdev.entity,
- 0, flags);
- if (ret < 0)
- goto out_resizer_register;
-
- ret = media_entity_create_link(&vpfe_dev->vpfe_ipipe.subdev.entity,
- 1, &vpfe_dev->vpfe_resizer.crop_resizer.subdev.entity,
- 0, flags);
- if (ret < 0)
- goto out_resizer_register;
-
- ret = media_entity_create_link(&vpfe_dev->vpfe_ipipeif.subdev.entity, 1,
- &vpfe_dev->vpfe_resizer.crop_resizer.subdev.entity,
- 0, flags);
- if (ret < 0)
- goto out_resizer_register;
-
- ret = v4l2_device_register_subdev_nodes(&vpfe_dev->v4l2_dev);
- if (ret < 0)
- goto out_resizer_register;
-
- return 0;
-
-out_resizer_register:
- vpfe_resizer_unregister_entities(&vpfe_dev->vpfe_resizer);
-out_ipipe_register:
- vpfe_ipipe_unregister_entities(&vpfe_dev->vpfe_ipipe);
-out_ipipeif_register:
- vpfe_ipipeif_unregister_entities(&vpfe_dev->vpfe_ipipeif);
-out_isif_register:
- vpfe_isif_unregister_entities(&vpfe_dev->vpfe_isif);
-
- return ret;
-}
-
-/*
- * vpfe_unregister_entities() - unregister all v4l2 subdevs and media entities
- * @vpfe_dev - ptr to vpfe capture device
- *
- * unregister all v4l2 subdevs and media entities
- */
-static void vpfe_unregister_entities(struct vpfe_device *vpfe_dev)
-{
- vpfe_isif_unregister_entities(&vpfe_dev->vpfe_isif);
- vpfe_ipipeif_unregister_entities(&vpfe_dev->vpfe_ipipeif);
- vpfe_ipipe_unregister_entities(&vpfe_dev->vpfe_ipipe);
- vpfe_resizer_unregister_entities(&vpfe_dev->vpfe_resizer);
-}
-
-/*
- * vpfe_cleanup_modules() - cleanup all non-i2c v4l2 subdevs
- * @vpfe_dev - ptr to vpfe capture device
- * @pdev - pointer to platform device
- *
- * cleanup all v4l2 subdevs
- */
-static void vpfe_cleanup_modules(struct vpfe_device *vpfe_dev,
- struct platform_device *pdev)
-{
- vpfe_isif_cleanup(&vpfe_dev->vpfe_isif, pdev);
- vpfe_ipipeif_cleanup(&vpfe_dev->vpfe_ipipeif, pdev);
- vpfe_ipipe_cleanup(&vpfe_dev->vpfe_ipipe, pdev);
- vpfe_resizer_cleanup(&vpfe_dev->vpfe_resizer, pdev);
-}
-
-/*
- * vpfe_initialize_modules() - initialize all non-i2c v4l2 subdevs
- * @vpfe_dev - ptr to vpfe capture device
- * @pdev - pointer to platform device
- *
- * intialize all v4l2 subdevs and media entities
- */
-static int vpfe_initialize_modules(struct vpfe_device *vpfe_dev,
- struct platform_device *pdev)
-{
- int ret;
-
- ret = vpfe_isif_init(&vpfe_dev->vpfe_isif, pdev);
- if (ret)
- return ret;
-
- ret = vpfe_ipipeif_init(&vpfe_dev->vpfe_ipipeif, pdev);
- if (ret)
- goto out_isif_init;
-
- ret = vpfe_ipipe_init(&vpfe_dev->vpfe_ipipe, pdev);
- if (ret)
- goto out_ipipeif_init;
-
- ret = vpfe_resizer_init(&vpfe_dev->vpfe_resizer, pdev);
- if (ret)
- goto out_ipipe_init;
-
- return 0;
-
-out_ipipe_init:
- vpfe_ipipe_cleanup(&vpfe_dev->vpfe_ipipe, pdev);
-out_ipipeif_init:
- vpfe_ipipeif_cleanup(&vpfe_dev->vpfe_ipipeif, pdev);
-out_isif_init:
- vpfe_isif_cleanup(&vpfe_dev->vpfe_isif, pdev);
-
- return ret;
-}
-
-/*
- * vpfe_probe() : vpfe probe function
- * @pdev: platform device pointer
- *
- * This function creates device entries by register itself to the V4L2 driver
- * and initializes fields of each device objects
- */
-static int vpfe_probe(struct platform_device *pdev)
-{
- struct vpfe_device *vpfe_dev;
- struct resource *res1;
- int ret = -ENOMEM;
-
- vpfe_dev = kzalloc(sizeof(*vpfe_dev), GFP_KERNEL);
- if (!vpfe_dev)
- return ret;
-
- if (pdev->dev.platform_data == NULL) {
- v4l2_err(pdev->dev.driver, "Unable to get vpfe config\n");
- ret = -ENOENT;
- goto probe_free_dev_mem;
- }
-
- vpfe_dev->cfg = pdev->dev.platform_data;
- if (vpfe_dev->cfg->card_name == NULL ||
- vpfe_dev->cfg->sub_devs == NULL) {
- v4l2_err(pdev->dev.driver, "null ptr in vpfe_cfg\n");
- ret = -ENOENT;
- goto probe_free_dev_mem;
- }
-
- /* Get VINT0 irq resource */
- res1 = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- if (!res1) {
- v4l2_err(pdev->dev.driver,
- "Unable to get interrupt for VINT0\n");
- ret = -ENOENT;
- goto probe_free_dev_mem;
- }
- vpfe_dev->ccdc_irq0 = res1->start;
-
- /* Get VINT1 irq resource */
- res1 = platform_get_resource(pdev, IORESOURCE_IRQ, 1);
- if (!res1) {
- v4l2_err(pdev->dev.driver,
- "Unable to get interrupt for VINT1\n");
- ret = -ENOENT;
- goto probe_free_dev_mem;
- }
- vpfe_dev->ccdc_irq1 = res1->start;
-
- /* Get DMA irq resource */
- res1 = platform_get_resource(pdev, IORESOURCE_IRQ, 2);
- if (!res1) {
- v4l2_err(pdev->dev.driver,
- "Unable to get interrupt for DMA\n");
- ret = -ENOENT;
- goto probe_free_dev_mem;
- }
- vpfe_dev->imp_dma_irq = res1->start;
-
- vpfe_dev->pdev = &pdev->dev;
-
- /* enable vpss clocks */
- ret = vpfe_enable_clock(vpfe_dev);
- if (ret)
- goto probe_free_dev_mem;
-
- ret = vpfe_initialize_modules(vpfe_dev, pdev);
- if (ret)
- goto probe_disable_clock;
-
- vpfe_dev->media_dev.dev = vpfe_dev->pdev;
- strcpy((char *)&vpfe_dev->media_dev.model, "davinci-media");
-
- ret = media_device_register(&vpfe_dev->media_dev);
- if (ret) {
- v4l2_err(pdev->dev.driver,
- "Unable to register media device.\n");
- goto probe_out_entities_cleanup;
- }
-
- vpfe_dev->v4l2_dev.mdev = &vpfe_dev->media_dev;
- ret = v4l2_device_register(&pdev->dev, &vpfe_dev->v4l2_dev);
- if (ret) {
- v4l2_err(pdev->dev.driver, "Unable to register v4l2 device.\n");
- goto probe_out_media_unregister;
- }
-
- v4l2_info(&vpfe_dev->v4l2_dev, "v4l2 device registered\n");
- /* set the driver data in platform device */
- platform_set_drvdata(pdev, vpfe_dev);
- /* register subdevs/entities */
- ret = vpfe_register_entities(vpfe_dev);
- if (ret)
- goto probe_out_v4l2_unregister;
-
- ret = vpfe_attach_irq(vpfe_dev);
- if (ret)
- goto probe_out_entities_unregister;
-
- return 0;
-
-probe_out_entities_unregister:
- vpfe_unregister_entities(vpfe_dev);
- kzfree(vpfe_dev->sd);
-probe_out_v4l2_unregister:
- v4l2_device_unregister(&vpfe_dev->v4l2_dev);
-probe_out_media_unregister:
- media_device_unregister(&vpfe_dev->media_dev);
-probe_out_entities_cleanup:
- vpfe_cleanup_modules(vpfe_dev, pdev);
-probe_disable_clock:
- vpfe_disable_clock(vpfe_dev);
-probe_free_dev_mem:
- kzfree(vpfe_dev);
-
- return ret;
-}
-
-/*
- * vpfe_remove : This function un-registers device from V4L2 driver
- */
-static int vpfe_remove(struct platform_device *pdev)
-{
- struct vpfe_device *vpfe_dev = platform_get_drvdata(pdev);
-
- v4l2_info(pdev->dev.driver, "vpfe_remove\n");
-
- kzfree(vpfe_dev->sd);
- vpfe_detach_irq(vpfe_dev);
- vpfe_unregister_entities(vpfe_dev);
- vpfe_cleanup_modules(vpfe_dev, pdev);
- v4l2_device_unregister(&vpfe_dev->v4l2_dev);
- media_device_unregister(&vpfe_dev->media_dev);
- vpfe_disable_clock(vpfe_dev);
- kzfree(vpfe_dev);
-
- return 0;
-}
-
-static struct platform_driver vpfe_driver = {
- .driver = {
- .name = CAPTURE_DRV_NAME,
- },
- .probe = vpfe_probe,
- .remove = vpfe_remove,
-};
-
-module_platform_driver(vpfe_driver);
diff --git a/drivers/staging/media/davinci_vpfe/vpfe_mc_capture.h b/drivers/staging/media/davinci_vpfe/vpfe_mc_capture.h
deleted file mode 100644
index 8ad8d743f4e0..000000000000
--- a/drivers/staging/media/davinci_vpfe/vpfe_mc_capture.h
+++ /dev/null
@@ -1,93 +0,0 @@
-/*
- * Copyright (C) 2012 Texas Instruments Inc
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- * Contributors:
- * Manjunath Hadli <manjunath.hadli@ti.com>
- * Prabhakar Lad <prabhakar.lad@ti.com>
- */
-
-#ifndef _DAVINCI_VPFE_MC_CAPTURE_H
-#define _DAVINCI_VPFE_MC_CAPTURE_H
-
-#include "dm365_ipipe.h"
-#include "dm365_ipipeif.h"
-#include "dm365_isif.h"
-#include "dm365_resizer.h"
-#include "vpfe_video.h"
-
-#define VPFE_MAJOR_RELEASE 0
-#define VPFE_MINOR_RELEASE 0
-#define VPFE_BUILD 1
-#define VPFE_CAPTURE_VERSION_CODE ((VPFE_MAJOR_RELEASE << 16) | \
- (VPFE_MINOR_RELEASE << 8) | \
- VPFE_BUILD)
-
-/* IPIPE hardware limits */
-#define IPIPE_MAX_OUTPUT_WIDTH_A 2176
-#define IPIPE_MAX_OUTPUT_WIDTH_B 640
-
-/* Based on max resolution supported. QXGA */
-#define IPIPE_MAX_OUTPUT_HEIGHT_A 1536
-/* Based on max resolution supported. VGA */
-#define IPIPE_MAX_OUTPUT_HEIGHT_B 480
-
-#define to_vpfe_device(ptr_module) \
- container_of(ptr_module, struct vpfe_device, vpfe_##ptr_module)
-#define to_device(ptr_module) \
- (to_vpfe_device(ptr_module)->dev)
-
-struct vpfe_device {
- /* external registered sub devices */
- struct v4l2_subdev **sd;
- /* number of registered external subdevs */
- unsigned int num_ext_subdevs;
- /* vpfe cfg */
- struct vpfe_config *cfg;
- /* clock ptrs for vpfe capture */
- struct clk **clks;
- /* V4l2 device */
- struct v4l2_device v4l2_dev;
- /* parent device */
- struct device *pdev;
- /* IRQ number for DMA transfer completion at the image processor */
- unsigned int imp_dma_irq;
- /* CCDC IRQs used when CCDC/ISIF output to SDRAM */
- unsigned int ccdc_irq0;
- unsigned int ccdc_irq1;
- /* media device */
- struct media_device media_dev;
- /* ccdc subdevice */
- struct vpfe_isif_device vpfe_isif;
- /* ipipeif subdevice */
- struct vpfe_ipipeif_device vpfe_ipipeif;
- /* ipipe subdevice */
- struct vpfe_ipipe_device vpfe_ipipe;
- /* resizer subdevice */
- struct vpfe_resizer_device vpfe_resizer;
-};
-
-/* File handle structure */
-struct vpfe_fh {
- struct v4l2_fh vfh;
- struct vpfe_video_device *video;
- /* Indicates whether this file handle is doing IO */
- u8 io_allowed;
-};
-
-void mbus_to_pix(const struct v4l2_mbus_framefmt *mbus,
- struct v4l2_pix_format *pix);
-
-#endif /* _DAVINCI_VPFE_MC_CAPTURE_H */
diff --git a/drivers/staging/media/davinci_vpfe/vpfe_video.c b/drivers/staging/media/davinci_vpfe/vpfe_video.c
deleted file mode 100644
index 87048a14c34d..000000000000
--- a/drivers/staging/media/davinci_vpfe/vpfe_video.c
+++ /dev/null
@@ -1,1628 +0,0 @@
-/*
- * Copyright (C) 2012 Texas Instruments Inc
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- * Contributors:
- * Manjunath Hadli <manjunath.hadli@ti.com>
- * Prabhakar Lad <prabhakar.lad@ti.com>
- */
-
-#include <linux/module.h>
-#include <linux/slab.h>
-
-#include <media/v4l2-ioctl.h>
-
-#include "vpfe.h"
-#include "vpfe_mc_capture.h"
-
-static int debug;
-
-/* get v4l2 subdev pointer to external subdev which is active */
-static struct media_entity *vpfe_get_input_entity
- (struct vpfe_video_device *video)
-{
- struct vpfe_device *vpfe_dev = video->vpfe_dev;
- struct media_pad *remote;
-
- remote = media_entity_remote_pad(&vpfe_dev->vpfe_isif.pads[0]);
- if (remote == NULL) {
- pr_err("Invalid media connection to isif/ccdc\n");
- return NULL;
- }
- return remote->entity;
-}
-
-/* updates external subdev(sensor/decoder) which is active */
-static int vpfe_update_current_ext_subdev(struct vpfe_video_device *video)
-{
- struct vpfe_device *vpfe_dev = video->vpfe_dev;
- struct vpfe_config *vpfe_cfg;
- struct v4l2_subdev *subdev;
- struct media_pad *remote;
- int i;
-
- remote = media_entity_remote_pad(&vpfe_dev->vpfe_isif.pads[0]);
- if (remote == NULL) {
- pr_err("Invalid media connection to isif/ccdc\n");
- return -EINVAL;
- }
-
- subdev = media_entity_to_v4l2_subdev(remote->entity);
- vpfe_cfg = vpfe_dev->pdev->platform_data;
- for (i = 0; i < vpfe_cfg->num_subdevs; i++) {
- if (!strcmp(vpfe_cfg->sub_devs[i].module_name, subdev->name)) {
- video->current_ext_subdev = &vpfe_cfg->sub_devs[i];
- break;
- }
- }
-
- /* if user not linked decoder/sensor to isif/ccdc */
- if (i == vpfe_cfg->num_subdevs) {
- pr_err("Invalid media chain connection to isif/ccdc\n");
- return -EINVAL;
- }
- /* find the v4l2 subdev pointer */
- for (i = 0; i < vpfe_dev->num_ext_subdevs; i++) {
- if (!strcmp(video->current_ext_subdev->module_name,
- vpfe_dev->sd[i]->name))
- video->current_ext_subdev->subdev = vpfe_dev->sd[i];
- }
- return 0;
-}
-
-/* get the subdev which is connected to the output video node */
-static struct v4l2_subdev *
-vpfe_video_remote_subdev(struct vpfe_video_device *video, u32 *pad)
-{
- struct media_pad *remote = media_entity_remote_pad(&video->pad);
-
- if (remote == NULL || remote->entity->type != MEDIA_ENT_T_V4L2_SUBDEV)
- return NULL;
- if (pad)
- *pad = remote->index;
- return media_entity_to_v4l2_subdev(remote->entity);
-}
-
-/* get the format set at output pad of the adjacent subdev */
-static int
-__vpfe_video_get_format(struct vpfe_video_device *video,
- struct v4l2_format *format)
-{
- struct v4l2_subdev_format fmt;
- struct v4l2_subdev *subdev;
- struct media_pad *remote;
- u32 pad;
- int ret;
-
- subdev = vpfe_video_remote_subdev(video, &pad);
- if (subdev == NULL)
- return -EINVAL;
-
- fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
- remote = media_entity_remote_pad(&video->pad);
- fmt.pad = remote->index;
-
- ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &fmt);
- if (ret == -ENOIOCTLCMD)
- return -EINVAL;
-
- format->type = video->type;
- /* convert mbus_format to v4l2_format */
- v4l2_fill_pix_format(&format->fmt.pix, &fmt.format);
- mbus_to_pix(&fmt.format, &format->fmt.pix);
-
- return 0;
-}
-
-/* make a note of pipeline details */
-static void vpfe_prepare_pipeline(struct vpfe_video_device *video)
-{
- struct media_entity *entity = &video->video_dev.entity;
- struct media_device *mdev = entity->parent;
- struct vpfe_pipeline *pipe = &video->pipe;
- struct vpfe_video_device *far_end = NULL;
- struct media_entity_graph graph;
-
- pipe->input_num = 0;
- pipe->output_num = 0;
-
- if (video->type == V4L2_BUF_TYPE_VIDEO_OUTPUT)
- pipe->inputs[pipe->input_num++] = video;
- else
- pipe->outputs[pipe->output_num++] = video;
-
- mutex_lock(&mdev->graph_mutex);
- media_entity_graph_walk_start(&graph, entity);
- while ((entity = media_entity_graph_walk_next(&graph))) {
- if (entity == &video->video_dev.entity)
- continue;
- if (media_entity_type(entity) != MEDIA_ENT_T_DEVNODE)
- continue;
- far_end = to_vpfe_video(media_entity_to_video_device(entity));
- if (far_end->type == V4L2_BUF_TYPE_VIDEO_OUTPUT)
- pipe->inputs[pipe->input_num++] = far_end;
- else
- pipe->outputs[pipe->output_num++] = far_end;
- }
- mutex_unlock(&mdev->graph_mutex);
-}
-
-/* update pipe state selected by user */
-static int vpfe_update_pipe_state(struct vpfe_video_device *video)
-{
- struct vpfe_pipeline *pipe = &video->pipe;
- int ret;
-
- vpfe_prepare_pipeline(video);
-
- /* Find out if there is any input video
- if yes, it is single shot.
- */
- if (pipe->input_num == 0) {
- pipe->state = VPFE_PIPELINE_STREAM_CONTINUOUS;
- ret = vpfe_update_current_ext_subdev(video);
- if (ret) {
- pr_err("Invalid external subdev\n");
- return ret;
- }
- } else {
- pipe->state = VPFE_PIPELINE_STREAM_SINGLESHOT;
- }
- video->initialized = 1;
- video->skip_frame_count = 1;
- video->skip_frame_count_init = 1;
- return 0;
-}
-
-/* checks wether pipeline is ready for enabling */
-int vpfe_video_is_pipe_ready(struct vpfe_pipeline *pipe)
-{
- int i;
-
- for (i = 0; i < pipe->input_num; i++)
- if (!pipe->inputs[i]->started ||
- pipe->inputs[i]->state != VPFE_VIDEO_BUFFER_QUEUED)
- return 0;
- for (i = 0; i < pipe->output_num; i++)
- if (!pipe->outputs[i]->started ||
- pipe->outputs[i]->state != VPFE_VIDEO_BUFFER_QUEUED)
- return 0;
- return 1;
-}
-
-/**
- * Validate a pipeline by checking both ends of all links for format
- * discrepancies.
- *
- * Return 0 if all formats match, or -EPIPE if at least one link is found with
- * different formats on its two ends.
- */
-static int vpfe_video_validate_pipeline(struct vpfe_pipeline *pipe)
-{
- struct v4l2_subdev_format fmt_source;
- struct v4l2_subdev_format fmt_sink;
- struct v4l2_subdev *subdev;
- struct media_pad *pad;
- int ret;
-
- /*
- * Should not matter if it is output[0] or 1 as
- * the general ideas is to traverse backwards and
- * the fact that the out video node always has the
- * format of the connected pad.
- */
- subdev = vpfe_video_remote_subdev(pipe->outputs[0], NULL);
- if (subdev == NULL)
- return -EPIPE;
-
- while (1) {
- /* Retrieve the sink format */
- pad = &subdev->entity.pads[0];
- if (!(pad->flags & MEDIA_PAD_FL_SINK))
- break;
-
- fmt_sink.which = V4L2_SUBDEV_FORMAT_ACTIVE;
- fmt_sink.pad = pad->index;
- ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL,
- &fmt_sink);
-
- if (ret < 0 && ret != -ENOIOCTLCMD)
- return -EPIPE;
-
- /* Retrieve the source format */
- pad = media_entity_remote_pad(pad);
- if (pad == NULL ||
- pad->entity->type != MEDIA_ENT_T_V4L2_SUBDEV)
- break;
-
- subdev = media_entity_to_v4l2_subdev(pad->entity);
-
- fmt_source.which = V4L2_SUBDEV_FORMAT_ACTIVE;
- fmt_source.pad = pad->index;
- ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &fmt_source);
- if (ret < 0 && ret != -ENOIOCTLCMD)
- return -EPIPE;
-
- /* Check if the two ends match */
- if (fmt_source.format.code != fmt_sink.format.code ||
- fmt_source.format.width != fmt_sink.format.width ||
- fmt_source.format.height != fmt_sink.format.height)
- return -EPIPE;
- }
- return 0;
-}
-
-/*
- * vpfe_pipeline_enable() - Enable streaming on a pipeline
- * @vpfe_dev: vpfe device
- * @pipe: vpfe pipeline
- *
- * Walk the entities chain starting at the pipeline output video node and start
- * all modules in the chain in the given mode.
- *
- * Return 0 if successful, or the return value of the failed video::s_stream
- * operation otherwise.
- */
-static int vpfe_pipeline_enable(struct vpfe_pipeline *pipe)
-{
- struct media_entity_graph graph;
- struct media_entity *entity;
- struct v4l2_subdev *subdev;
- struct media_device *mdev;
- int ret = 0;
-
- if (pipe->state == VPFE_PIPELINE_STREAM_CONTINUOUS)
- entity = vpfe_get_input_entity(pipe->outputs[0]);
- else
- entity = &pipe->inputs[0]->video_dev.entity;
-
- mdev = entity->parent;
- mutex_lock(&mdev->graph_mutex);
- media_entity_graph_walk_start(&graph, entity);
- while ((entity = media_entity_graph_walk_next(&graph))) {
-
- if (media_entity_type(entity) == MEDIA_ENT_T_DEVNODE)
- continue;
- subdev = media_entity_to_v4l2_subdev(entity);
- ret = v4l2_subdev_call(subdev, video, s_stream, 1);
- if (ret < 0 && ret != -ENOIOCTLCMD)
- break;
- }
- mutex_unlock(&mdev->graph_mutex);
- return ret;
-}
-
-/*
- * vpfe_pipeline_disable() - Disable streaming on a pipeline
- * @vpfe_dev: vpfe device
- * @pipe: VPFE pipeline
- *
- * Walk the entities chain starting at the pipeline output video node and stop
- * all modules in the chain.
- *
- * Return 0 if all modules have been properly stopped, or -ETIMEDOUT if a module
- * can't be stopped.
- */
-static int vpfe_pipeline_disable(struct vpfe_pipeline *pipe)
-{
- struct media_entity_graph graph;
- struct media_entity *entity;
- struct v4l2_subdev *subdev;
- struct media_device *mdev;
- int ret = 0;
-
- if (pipe->state == VPFE_PIPELINE_STREAM_CONTINUOUS)
- entity = vpfe_get_input_entity(pipe->outputs[0]);
- else
- entity = &pipe->inputs[0]->video_dev.entity;
-
- mdev = entity->parent;
- mutex_lock(&mdev->graph_mutex);
- media_entity_graph_walk_start(&graph, entity);
-
- while ((entity = media_entity_graph_walk_next(&graph))) {
-
- if (media_entity_type(entity) == MEDIA_ENT_T_DEVNODE)
- continue;
- subdev = media_entity_to_v4l2_subdev(entity);
- ret = v4l2_subdev_call(subdev, video, s_stream, 0);
- if (ret < 0 && ret != -ENOIOCTLCMD)
- break;
- }
- mutex_unlock(&mdev->graph_mutex);
-
- return ret ? -ETIMEDOUT : 0;
-}
-
-/*
- * vpfe_pipeline_set_stream() - Enable/disable streaming on a pipeline
- * @vpfe_dev: VPFE device
- * @pipe: VPFE pipeline
- * @state: Stream state (stopped or active)
- *
- * Set the pipeline to the given stream state.
- *
- * Return 0 if successful, or the return value of the failed video::s_stream
- * operation otherwise.
- */
-static int vpfe_pipeline_set_stream(struct vpfe_pipeline *pipe,
- enum vpfe_pipeline_stream_state state)
-{
- if (state == VPFE_PIPELINE_STREAM_STOPPED)
- return vpfe_pipeline_disable(pipe);
-
- return vpfe_pipeline_enable(pipe);
-}
-
-static int all_videos_stopped(struct vpfe_video_device *video)
-{
- struct vpfe_pipeline *pipe = &video->pipe;
- int i;
-
- for (i = 0; i < pipe->input_num; i++)
- if (pipe->inputs[i]->started)
- return 0;
- for (i = 0; i < pipe->output_num; i++)
- if (pipe->outputs[i]->started)
- return 0;
- return 1;
-}
-
-/*
- * vpfe_open() - open video device
- * @file: file pointer
- *
- * initialize media pipeline state, allocate memory for file handle
- *
- * Return 0 if successful, or the return -ENODEV otherwise.
- */
-static int vpfe_open(struct file *file)
-{
- struct vpfe_video_device *video = video_drvdata(file);
- struct vpfe_fh *handle;
-
- /* Allocate memory for the file handle object */
- handle = kzalloc(sizeof(struct vpfe_fh), GFP_KERNEL);
-
- if (handle == NULL)
- return -ENOMEM;
-
- v4l2_fh_init(&handle->vfh, &video->video_dev);
- v4l2_fh_add(&handle->vfh);
-
- mutex_lock(&video->lock);
- /* If decoder is not initialized. initialize it */
- if (!video->initialized && vpfe_update_pipe_state(video)) {
- mutex_unlock(&video->lock);
- return -ENODEV;
- }
- /* Increment device users counter */
- video->usrs++;
- /* Set io_allowed member to false */
- handle->io_allowed = 0;
- handle->video = video;
- file->private_data = &handle->vfh;
- mutex_unlock(&video->lock);
-
- return 0;
-}
-
-/* get the next buffer available from dma queue */
-static unsigned long
-vpfe_video_get_next_buffer(struct vpfe_video_device *video)
-{
- video->cur_frm = video->next_frm =
- list_entry(video->dma_queue.next,
- struct vpfe_cap_buffer, list);
-
- list_del(&video->next_frm->list);
- video->next_frm->vb.state = VB2_BUF_STATE_ACTIVE;
- return vb2_dma_contig_plane_dma_addr(&video->next_frm->vb, 0);
-}
-
-/* schedule the next buffer which is available on dma queue */
-void vpfe_video_schedule_next_buffer(struct vpfe_video_device *video)
-{
- struct vpfe_device *vpfe_dev = video->vpfe_dev;
- unsigned long addr;
-
- if (list_empty(&video->dma_queue))
- return;
-
- video->next_frm = list_entry(video->dma_queue.next,
- struct vpfe_cap_buffer, list);
-
- if (VPFE_PIPELINE_STREAM_SINGLESHOT == video->pipe.state)
- video->cur_frm = video->next_frm;
-
- list_del(&video->next_frm->list);
- video->next_frm->vb.state = VB2_BUF_STATE_ACTIVE;
- addr = vb2_dma_contig_plane_dma_addr(&video->next_frm->vb, 0);
- video->ops->queue(vpfe_dev, addr);
- video->state = VPFE_VIDEO_BUFFER_QUEUED;
-}
-
-/* schedule the buffer for capturing bottom field */
-void vpfe_video_schedule_bottom_field(struct vpfe_video_device *video)
-{
- struct vpfe_device *vpfe_dev = video->vpfe_dev;
- unsigned long addr;
-
- addr = vb2_dma_contig_plane_dma_addr(&video->cur_frm->vb, 0);
- addr += video->field_off;
- video->ops->queue(vpfe_dev, addr);
-}
-
-/* make buffer available for dequeue */
-void vpfe_video_process_buffer_complete(struct vpfe_video_device *video)
-{
- struct vpfe_pipeline *pipe = &video->pipe;
-
- v4l2_get_timestamp(&video->cur_frm->vb.v4l2_buf.timestamp);
- vb2_buffer_done(&video->cur_frm->vb, VB2_BUF_STATE_DONE);
- if (pipe->state == VPFE_PIPELINE_STREAM_CONTINUOUS)
- video->cur_frm = video->next_frm;
-}
-
-/* vpfe_stop_capture() - stop streaming */
-static void vpfe_stop_capture(struct vpfe_video_device *video)
-{
- struct vpfe_pipeline *pipe = &video->pipe;
-
- video->started = 0;
-
- if (video->type == V4L2_BUF_TYPE_VIDEO_OUTPUT)
- return;
- if (all_videos_stopped(video))
- vpfe_pipeline_set_stream(pipe,
- VPFE_PIPELINE_STREAM_STOPPED);
-}
-
-/*
- * vpfe_release() - release video device
- * @file: file pointer
- *
- * deletes buffer queue, frees the buffers and the vpfe file handle
- *
- * Return 0
- */
-static int vpfe_release(struct file *file)
-{
- struct vpfe_video_device *video = video_drvdata(file);
- struct v4l2_fh *vfh = file->private_data;
- struct vpfe_device *vpfe_dev = video->vpfe_dev;
- struct vpfe_fh *fh = container_of(vfh, struct vpfe_fh, vfh);
-
- v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_release\n");
-
- /* Get the device lock */
- mutex_lock(&video->lock);
- /* if this instance is doing IO */
- if (fh->io_allowed) {
- if (video->started) {
- vpfe_stop_capture(video);
- /* mark pipe state as stopped in vpfe_release(),
- as app might call streamon() after streamoff()
- in which case driver has to start streaming.
- */
- video->pipe.state = VPFE_PIPELINE_STREAM_STOPPED;
- vb2_streamoff(&video->buffer_queue,
- video->buffer_queue.type);
- }
- video->io_usrs = 0;
- /* Free buffers allocated */
- vb2_queue_release(&video->buffer_queue);
- vb2_dma_contig_cleanup_ctx(video->alloc_ctx);
- }
- /* Decrement device users counter */
- video->usrs--;
- v4l2_fh_del(&fh->vfh);
- v4l2_fh_exit(&fh->vfh);
- /* If this is the last file handle */
- if (!video->usrs)
- video->initialized = 0;
- mutex_unlock(&video->lock);
- file->private_data = NULL;
- /* Free memory allocated to file handle object */
- v4l2_fh_del(vfh);
- kzfree(fh);
- return 0;
-}
-
-/*
- * vpfe_mmap() - It is used to map kernel space buffers
- * into user spaces
- */
-static int vpfe_mmap(struct file *file, struct vm_area_struct *vma)
-{
- struct vpfe_video_device *video = video_drvdata(file);
- struct vpfe_device *vpfe_dev = video->vpfe_dev;
-
- v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_mmap\n");
- return vb2_mmap(&video->buffer_queue, vma);
-}
-
-/*
- * vpfe_poll() - It is used for select/poll system call
- */
-static unsigned int vpfe_poll(struct file *file, poll_table *wait)
-{
- struct vpfe_video_device *video = video_drvdata(file);
- struct vpfe_device *vpfe_dev = video->vpfe_dev;
-
- v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_poll\n");
- if (video->started)
- return vb2_poll(&video->buffer_queue, file, wait);
- return 0;
-}
-
-/* vpfe capture driver file operations */
-static const struct v4l2_file_operations vpfe_fops = {
- .owner = THIS_MODULE,
- .open = vpfe_open,
- .release = vpfe_release,
- .unlocked_ioctl = video_ioctl2,
- .mmap = vpfe_mmap,
- .poll = vpfe_poll
-};
-
-/*
- * vpfe_querycap() - query capabilities of video device
- * @file: file pointer
- * @priv: void pointer
- * @cap: pointer to v4l2_capability structure
- *
- * fills v4l2 capabilities structure
- *
- * Return 0
- */
-static int vpfe_querycap(struct file *file, void *priv,
- struct v4l2_capability *cap)
-{
- struct vpfe_video_device *video = video_drvdata(file);
- struct vpfe_device *vpfe_dev = video->vpfe_dev;
-
- v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_querycap\n");
-
- if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
- cap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
- else
- cap->device_caps = V4L2_CAP_VIDEO_OUTPUT | V4L2_CAP_STREAMING;
- cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_VIDEO_OUTPUT |
- V4L2_CAP_STREAMING | V4L2_CAP_DEVICE_CAPS;
- strlcpy(cap->driver, CAPTURE_DRV_NAME, sizeof(cap->driver));
- strlcpy(cap->bus_info, "VPFE", sizeof(cap->bus_info));
- strlcpy(cap->card, vpfe_dev->cfg->card_name, sizeof(cap->card));
-
- return 0;
-}
-
-/*
- * vpfe_g_fmt() - get the format which is active on video device
- * @file: file pointer
- * @priv: void pointer
- * @fmt: pointer to v4l2_format structure
- *
- * fills v4l2 format structure with active format
- *
- * Return 0
- */
-static int vpfe_g_fmt(struct file *file, void *priv,
- struct v4l2_format *fmt)
-{
- struct vpfe_video_device *video = video_drvdata(file);
- struct vpfe_device *vpfe_dev = video->vpfe_dev;
-
- v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_g_fmt\n");
- /* Fill in the information about format */
- *fmt = video->fmt;
- return 0;
-}
-
-/*
- * vpfe_enum_fmt() - enum formats supported on media chain
- * @file: file pointer
- * @priv: void pointer
- * @fmt: pointer to v4l2_fmtdesc structure
- *
- * fills v4l2_fmtdesc structure with output format set on adjacent subdev,
- * only one format is enumearted as subdevs are already configured
- *
- * Return 0 if successful, error code otherwise
- */
-static int vpfe_enum_fmt(struct file *file, void *priv,
- struct v4l2_fmtdesc *fmt)
-{
- struct vpfe_video_device *video = video_drvdata(file);
- struct vpfe_device *vpfe_dev = video->vpfe_dev;
- struct v4l2_subdev_format sd_fmt;
- struct v4l2_mbus_framefmt mbus;
- struct v4l2_subdev *subdev;
- struct v4l2_format format;
- struct media_pad *remote;
- int ret;
-
- v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_enum_fmt\n");
-
- /* since already subdev pad format is set,
- only one pixel format is available */
- if (fmt->index > 0) {
- v4l2_err(&vpfe_dev->v4l2_dev, "Invalid index\n");
- return -EINVAL;
- }
- /* get the remote pad */
- remote = media_entity_remote_pad(&video->pad);
- if (remote == NULL) {
- v4l2_err(&vpfe_dev->v4l2_dev,
- "invalid remote pad for video node\n");
- return -EINVAL;
- }
- /* get the remote subdev */
- subdev = vpfe_video_remote_subdev(video, NULL);
- if (subdev == NULL) {
- v4l2_err(&vpfe_dev->v4l2_dev,
- "invalid remote subdev for video node\n");
- return -EINVAL;
- }
- sd_fmt.pad = remote->index;
- sd_fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
- /* get output format of remote subdev */
- ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &sd_fmt);
- if (ret) {
- v4l2_err(&vpfe_dev->v4l2_dev,
- "invalid remote subdev for video node\n");
- return ret;
- }
- /* convert to pix format */
- mbus.code = sd_fmt.format.code;
- mbus_to_pix(&mbus, &format.fmt.pix);
- /* copy the result */
- fmt->pixelformat = format.fmt.pix.pixelformat;
-
- return 0;
-}
-
-/*
- * vpfe_s_fmt() - set the format on video device
- * @file: file pointer
- * @priv: void pointer
- * @fmt: pointer to v4l2_format structure
- *
- * validate and set the format on video device
- *
- * Return 0 on success, error code otherwise
- */
-static int vpfe_s_fmt(struct file *file, void *priv,
- struct v4l2_format *fmt)
-{
- struct vpfe_video_device *video = video_drvdata(file);
- struct vpfe_device *vpfe_dev = video->vpfe_dev;
- struct v4l2_format format;
- int ret;
-
- v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_s_fmt\n");
- /* If streaming is started, return error */
- if (video->started) {
- v4l2_err(&vpfe_dev->v4l2_dev, "Streaming is started\n");
- return -EBUSY;
- }
- /* get adjacent subdev's output pad format */
- ret = __vpfe_video_get_format(video, &format);
- if (ret)
- return ret;
- *fmt = format;
- video->fmt = *fmt;
- return 0;
-}
-
-/*
- * vpfe_try_fmt() - try the format on video device
- * @file: file pointer
- * @priv: void pointer
- * @fmt: pointer to v4l2_format structure
- *
- * validate the format, update with correct format
- * based on output format set on adjacent subdev
- *
- * Return 0 on success, error code otherwise
- */
-static int vpfe_try_fmt(struct file *file, void *priv,
- struct v4l2_format *fmt)
-{
- struct vpfe_video_device *video = video_drvdata(file);
- struct vpfe_device *vpfe_dev = video->vpfe_dev;
- struct v4l2_format format;
- int ret;
-
- v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_try_fmt\n");
- /* get adjacent subdev's output pad format */
- ret = __vpfe_video_get_format(video, &format);
- if (ret)
- return ret;
-
- *fmt = format;
- return 0;
-}
-
-/*
- * vpfe_enum_input() - enum inputs supported on media chain
- * @file: file pointer
- * @priv: void pointer
- * @fmt: pointer to v4l2_fmtdesc structure
- *
- * fills v4l2_input structure with input available on media chain,
- * only one input is enumearted as media chain is setup by this time
- *
- * Return 0 if successful, -EINVAL is media chain is invalid
- */
-static int vpfe_enum_input(struct file *file, void *priv,
- struct v4l2_input *inp)
-{
- struct vpfe_video_device *video = video_drvdata(file);
- struct vpfe_ext_subdev_info *sdinfo = video->current_ext_subdev;
- struct vpfe_device *vpfe_dev = video->vpfe_dev;
-
- v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_enum_input\n");
- /* enumerate from the subdev user has chosen through mc */
- if (inp->index < sdinfo->num_inputs) {
- memcpy(inp, &sdinfo->inputs[inp->index],
- sizeof(struct v4l2_input));
- return 0;
- }
- return -EINVAL;
-}
-
-/*
- * vpfe_g_input() - get index of the input which is active
- * @file: file pointer
- * @priv: void pointer
- * @index: pointer to unsigned int
- *
- * set index with input index which is active
- */
-static int vpfe_g_input(struct file *file, void *priv, unsigned int *index)
-{
- struct vpfe_video_device *video = video_drvdata(file);
- struct vpfe_device *vpfe_dev = video->vpfe_dev;
-
- v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_g_input\n");
-
- *index = video->current_input;
- return 0;
-}
-
-/*
- * vpfe_s_input() - set input which is pointed by input index
- * @file: file pointer
- * @priv: void pointer
- * @index: pointer to unsigned int
- *
- * set input on external subdev
- *
- * Return 0 on success, error code otherwise
- */
-static int vpfe_s_input(struct file *file, void *priv, unsigned int index)
-{
- struct vpfe_video_device *video = video_drvdata(file);
- struct vpfe_device *vpfe_dev = video->vpfe_dev;
- struct vpfe_ext_subdev_info *sdinfo;
- struct vpfe_route *route;
- struct v4l2_input *inps;
- u32 output;
- u32 input;
- int ret;
- int i;
-
- v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_s_input\n");
-
- ret = mutex_lock_interruptible(&video->lock);
- if (ret)
- return ret;
- /*
- * If streaming is started return device busy
- * error
- */
- if (video->started) {
- v4l2_err(&vpfe_dev->v4l2_dev, "Streaming is on\n");
- ret = -EBUSY;
- goto unlock_out;
- }
-
- sdinfo = video->current_ext_subdev;
- if (!sdinfo->registered) {
- ret = -EINVAL;
- goto unlock_out;
- }
- if (vpfe_dev->cfg->setup_input &&
- vpfe_dev->cfg->setup_input(sdinfo->grp_id) < 0) {
- ret = -EFAULT;
- v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev,
- "couldn't setup input for %s\n",
- sdinfo->module_name);
- goto unlock_out;
- }
- route = &sdinfo->routes[index];
- if (route && sdinfo->can_route) {
- input = route->input;
- output = route->output;
- ret = v4l2_device_call_until_err(&vpfe_dev->v4l2_dev,
- sdinfo->grp_id, video,
- s_routing, input, output, 0);
- if (ret) {
- v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev,
- "s_input:error in setting input in decoder\n");
- ret = -EINVAL;
- goto unlock_out;
- }
- }
- /* set standards set by subdev in video device */
- for (i = 0; i < sdinfo->num_inputs; i++) {
- inps = &sdinfo->inputs[i];
- video->video_dev.tvnorms |= inps->std;
- }
- video->current_input = index;
-unlock_out:
- mutex_unlock(&video->lock);
- return ret;
-}
-
-/*
- * vpfe_querystd() - query std which is being input on external subdev
- * @file: file pointer
- * @priv: void pointer
- * @std_id: pointer to v4l2_std_id structure
- *
- * call external subdev through v4l2_device_call_until_err to
- * get the std that is being active.
- *
- * Return 0 on success, error code otherwise
- */
-static int vpfe_querystd(struct file *file, void *priv, v4l2_std_id *std_id)
-{
- struct vpfe_video_device *video = video_drvdata(file);
- struct vpfe_device *vpfe_dev = video->vpfe_dev;
- struct vpfe_ext_subdev_info *sdinfo;
- int ret;
-
- v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_querystd\n");
-
- ret = mutex_lock_interruptible(&video->lock);
- sdinfo = video->current_ext_subdev;
- if (ret)
- return ret;
- /* Call querystd function of decoder device */
- ret = v4l2_device_call_until_err(&vpfe_dev->v4l2_dev, sdinfo->grp_id,
- video, querystd, std_id);
- mutex_unlock(&video->lock);
- return ret;
-}
-
-/*
- * vpfe_s_std() - set std on external subdev
- * @file: file pointer
- * @priv: void pointer
- * @std_id: pointer to v4l2_std_id structure
- *
- * set std pointed by std_id on external subdev by calling it using
- * v4l2_device_call_until_err
- *
- * Return 0 on success, error code otherwise
- */
-static int vpfe_s_std(struct file *file, void *priv, v4l2_std_id std_id)
-{
- struct vpfe_video_device *video = video_drvdata(file);
- struct vpfe_device *vpfe_dev = video->vpfe_dev;
- struct vpfe_ext_subdev_info *sdinfo;
- int ret;
-
- v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_s_std\n");
-
- /* Call decoder driver function to set the standard */
- ret = mutex_lock_interruptible(&video->lock);
- if (ret)
- return ret;
- sdinfo = video->current_ext_subdev;
- /* If streaming is started, return device busy error */
- if (video->started) {
- v4l2_err(&vpfe_dev->v4l2_dev, "streaming is started\n");
- ret = -EBUSY;
- goto unlock_out;
- }
- ret = v4l2_device_call_until_err(&vpfe_dev->v4l2_dev, sdinfo->grp_id,
- video, s_std, std_id);
- if (ret < 0) {
- v4l2_err(&vpfe_dev->v4l2_dev, "Failed to set standard\n");
- video->stdid = V4L2_STD_UNKNOWN;
- goto unlock_out;
- }
- video->stdid = std_id;
-unlock_out:
- mutex_unlock(&video->lock);
- return ret;
-}
-
-static int vpfe_g_std(struct file *file, void *priv, v4l2_std_id *tvnorm)
-{
- struct vpfe_video_device *video = video_drvdata(file);
- struct vpfe_device *vpfe_dev = video->vpfe_dev;
-
- v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_g_std\n");
- *tvnorm = video->stdid;
- return 0;
-}
-
-/*
- * vpfe_enum_dv_timings() - enumerate dv_timings which are supported by
- * to external subdev
- * @file: file pointer
- * @priv: void pointer
- * @timings: pointer to v4l2_enum_dv_timings structure
- *
- * enum dv_timings's which are supported by external subdev through
- * v4l2_subdev_call
- *
- * Return 0 on success, error code otherwise
- */
-static int
-vpfe_enum_dv_timings(struct file *file, void *fh,
- struct v4l2_enum_dv_timings *timings)
-{
- struct vpfe_video_device *video = video_drvdata(file);
- struct vpfe_device *vpfe_dev = video->vpfe_dev;
- struct v4l2_subdev *subdev = video->current_ext_subdev->subdev;
-
- timings->pad = 0;
-
- v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_enum_dv_timings\n");
- return v4l2_subdev_call(subdev, pad, enum_dv_timings, timings);
-}
-
-/*
- * vpfe_query_dv_timings() - query the dv_timings which is being input
- * to external subdev
- * @file: file pointer
- * @priv: void pointer
- * @timings: pointer to v4l2_dv_timings structure
- *
- * get dv_timings which is being input on external subdev through
- * v4l2_subdev_call
- *
- * Return 0 on success, error code otherwise
- */
-static int
-vpfe_query_dv_timings(struct file *file, void *fh,
- struct v4l2_dv_timings *timings)
-{
- struct vpfe_video_device *video = video_drvdata(file);
- struct vpfe_device *vpfe_dev = video->vpfe_dev;
- struct v4l2_subdev *subdev = video->current_ext_subdev->subdev;
-
- v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_query_dv_timings\n");
- return v4l2_subdev_call(subdev, video, query_dv_timings, timings);
-}
-
-/*
- * vpfe_s_dv_timings() - set dv_timings on external subdev
- * @file: file pointer
- * @priv: void pointer
- * @timings: pointer to v4l2_dv_timings structure
- *
- * set dv_timings pointed by timings on external subdev through
- * v4l2_device_call_until_err, this configures amplifier also
- *
- * Return 0 on success, error code otherwise
- */
-static int
-vpfe_s_dv_timings(struct file *file, void *fh,
- struct v4l2_dv_timings *timings)
-{
- struct vpfe_video_device *video = video_drvdata(file);
- struct vpfe_device *vpfe_dev = video->vpfe_dev;
-
- v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_s_dv_timings\n");
-
- video->stdid = V4L2_STD_UNKNOWN;
- return v4l2_device_call_until_err(&vpfe_dev->v4l2_dev,
- video->current_ext_subdev->grp_id,
- video, s_dv_timings, timings);
-}
-
-/*
- * vpfe_g_dv_timings() - get dv_timings which is set on external subdev
- * @file: file pointer
- * @priv: void pointer
- * @timings: pointer to v4l2_dv_timings structure
- *
- * get dv_timings which is set on external subdev through
- * v4l2_subdev_call
- *
- * Return 0 on success, error code otherwise
- */
-static int
-vpfe_g_dv_timings(struct file *file, void *fh,
- struct v4l2_dv_timings *timings)
-{
- struct vpfe_video_device *video = video_drvdata(file);
- struct vpfe_device *vpfe_dev = video->vpfe_dev;
- struct v4l2_subdev *subdev = video->current_ext_subdev->subdev;
-
- v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_g_dv_timings\n");
- return v4l2_subdev_call(subdev, video, g_dv_timings, timings);
-}
-
-/*
- * Videobuf operations
- */
-/*
- * vpfe_buffer_queue_setup : Callback function for buffer setup.
- * @vq: vb2_queue ptr
- * @fmt: v4l2 format
- * @nbuffers: ptr to number of buffers requested by application
- * @nplanes:: contains number of distinct video planes needed to hold a frame
- * @sizes[]: contains the size (in bytes) of each plane.
- * @alloc_ctxs: ptr to allocation context
- *
- * This callback function is called when reqbuf() is called to adjust
- * the buffer nbuffers and buffer size
- */
-static int
-vpfe_buffer_queue_setup(struct vb2_queue *vq, const struct v4l2_format *fmt,
- unsigned int *nbuffers, unsigned int *nplanes,
- unsigned int sizes[], void *alloc_ctxs[])
-{
- struct vpfe_fh *fh = vb2_get_drv_priv(vq);
- struct vpfe_video_device *video = fh->video;
- struct vpfe_device *vpfe_dev = video->vpfe_dev;
- unsigned long size;
-
- v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_buffer_queue_setup\n");
- size = video->fmt.fmt.pix.sizeimage;
-
- if (vq->num_buffers + *nbuffers < 3)
- *nbuffers = 3 - vq->num_buffers;
-
- *nplanes = 1;
- sizes[0] = size;
- alloc_ctxs[0] = video->alloc_ctx;
- v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev,
- "nbuffers=%d, size=%lu\n", *nbuffers, size);
- return 0;
-}
-
-/*
- * vpfe_buffer_prepare : callback function for buffer prepare
- * @vb: ptr to vb2_buffer
- *
- * This is the callback function for buffer prepare when vb2_qbuf()
- * function is called. The buffer is prepared and user space virtual address
- * or user address is converted into physical address
- */
-static int vpfe_buffer_prepare(struct vb2_buffer *vb)
-{
- struct vpfe_fh *fh = vb2_get_drv_priv(vb->vb2_queue);
- struct vpfe_video_device *video = fh->video;
- struct vpfe_device *vpfe_dev = video->vpfe_dev;
- unsigned long addr;
-
- v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_buffer_prepare\n");
-
- if (vb->state != VB2_BUF_STATE_ACTIVE &&
- vb->state != VB2_BUF_STATE_PREPARED)
- return 0;
-
- /* Initialize buffer */
- vb2_set_plane_payload(vb, 0, video->fmt.fmt.pix.sizeimage);
- if (vb2_plane_vaddr(vb, 0) &&
- vb2_get_plane_payload(vb, 0) > vb2_plane_size(vb, 0))
- return -EINVAL;
-
- addr = vb2_dma_contig_plane_dma_addr(vb, 0);
- /* Make sure user addresses are aligned to 32 bytes */
- if (!ALIGN(addr, 32))
- return -EINVAL;
-
- return 0;
-}
-
-static void vpfe_buffer_queue(struct vb2_buffer *vb)
-{
- /* Get the file handle object and device object */
- struct vpfe_fh *fh = vb2_get_drv_priv(vb->vb2_queue);
- struct vpfe_video_device *video = fh->video;
- struct vpfe_device *vpfe_dev = video->vpfe_dev;
- struct vpfe_pipeline *pipe = &video->pipe;
- struct vpfe_cap_buffer *buf = container_of(vb,
- struct vpfe_cap_buffer, vb);
- unsigned long flags;
- unsigned long empty;
- unsigned long addr;
-
- spin_lock_irqsave(&video->dma_queue_lock, flags);
- empty = list_empty(&video->dma_queue);
- /* add the buffer to the DMA queue */
- list_add_tail(&buf->list, &video->dma_queue);
- spin_unlock_irqrestore(&video->dma_queue_lock, flags);
- /* this case happens in case of single shot */
- if (empty && video->started && pipe->state ==
- VPFE_PIPELINE_STREAM_SINGLESHOT &&
- video->state == VPFE_VIDEO_BUFFER_NOT_QUEUED) {
- spin_lock(&video->dma_queue_lock);
- addr = vpfe_video_get_next_buffer(video);
- video->ops->queue(vpfe_dev, addr);
-
- video->state = VPFE_VIDEO_BUFFER_QUEUED;
- spin_unlock(&video->dma_queue_lock);
-
- /* enable h/w each time in single shot */
- if (vpfe_video_is_pipe_ready(pipe))
- vpfe_pipeline_set_stream(pipe,
- VPFE_PIPELINE_STREAM_SINGLESHOT);
- }
-}
-
-/* vpfe_start_capture() - start streaming on all the subdevs */
-static int vpfe_start_capture(struct vpfe_video_device *video)
-{
- struct vpfe_pipeline *pipe = &video->pipe;
- int ret = 0;
-
- video->started = 1;
- if (vpfe_video_is_pipe_ready(pipe))
- ret = vpfe_pipeline_set_stream(pipe, pipe->state);
-
- return ret;
-}
-
-static int vpfe_start_streaming(struct vb2_queue *vq, unsigned int count)
-{
- struct vpfe_fh *fh = vb2_get_drv_priv(vq);
- struct vpfe_video_device *video = fh->video;
- struct vpfe_device *vpfe_dev = video->vpfe_dev;
- unsigned long addr;
- int ret;
-
- ret = mutex_lock_interruptible(&video->lock);
- if (ret)
- goto streamoff;
-
- /* Get the next frame from the buffer queue */
- video->cur_frm = video->next_frm =
- list_entry(video->dma_queue.next, struct vpfe_cap_buffer, list);
- /* Remove buffer from the buffer queue */
- list_del(&video->cur_frm->list);
- /* Mark state of the current frame to active */
- video->cur_frm->vb.state = VB2_BUF_STATE_ACTIVE;
- /* Initialize field_id and started member */
- video->field_id = 0;
- addr = vb2_dma_contig_plane_dma_addr(&video->cur_frm->vb, 0);
- video->ops->queue(vpfe_dev, addr);
- video->state = VPFE_VIDEO_BUFFER_QUEUED;
-
- ret = vpfe_start_capture(video);
- if (ret) {
- struct vpfe_cap_buffer *buf, *tmp;
-
- vb2_buffer_done(&video->cur_frm->vb, VB2_BUF_STATE_QUEUED);
- list_for_each_entry_safe(buf, tmp, &video->dma_queue, list) {
- list_del(&buf->list);
- vb2_buffer_done(&buf->vb, VB2_BUF_STATE_QUEUED);
- }
- goto unlock_out;
- }
-
- mutex_unlock(&video->lock);
-
- return ret;
-unlock_out:
- mutex_unlock(&video->lock);
-streamoff:
- ret = vb2_streamoff(&video->buffer_queue, video->buffer_queue.type);
- return 0;
-}
-
-static int vpfe_buffer_init(struct vb2_buffer *vb)
-{
- struct vpfe_cap_buffer *buf = container_of(vb,
- struct vpfe_cap_buffer, vb);
-
- INIT_LIST_HEAD(&buf->list);
- return 0;
-}
-
-/* abort streaming and wait for last buffer */
-static void vpfe_stop_streaming(struct vb2_queue *vq)
-{
- struct vpfe_fh *fh = vb2_get_drv_priv(vq);
- struct vpfe_video_device *video = fh->video;
-
- /* release all active buffers */
- if (video->cur_frm == video->next_frm) {
- vb2_buffer_done(&video->cur_frm->vb, VB2_BUF_STATE_ERROR);
- } else {
- if (video->cur_frm != NULL)
- vb2_buffer_done(&video->cur_frm->vb,
- VB2_BUF_STATE_ERROR);
- if (video->next_frm != NULL)
- vb2_buffer_done(&video->next_frm->vb,
- VB2_BUF_STATE_ERROR);
- }
-
- while (!list_empty(&video->dma_queue)) {
- video->next_frm = list_entry(video->dma_queue.next,
- struct vpfe_cap_buffer, list);
- list_del(&video->next_frm->list);
- vb2_buffer_done(&video->next_frm->vb, VB2_BUF_STATE_ERROR);
- }
-}
-
-static void vpfe_buf_cleanup(struct vb2_buffer *vb)
-{
- struct vpfe_fh *fh = vb2_get_drv_priv(vb->vb2_queue);
- struct vpfe_video_device *video = fh->video;
- struct vpfe_device *vpfe_dev = video->vpfe_dev;
- struct vpfe_cap_buffer *buf = container_of(vb,
- struct vpfe_cap_buffer, vb);
-
- v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_buf_cleanup\n");
- if (vb->state == VB2_BUF_STATE_ACTIVE)
- list_del_init(&buf->list);
-}
-
-static struct vb2_ops video_qops = {
- .queue_setup = vpfe_buffer_queue_setup,
- .buf_init = vpfe_buffer_init,
- .buf_prepare = vpfe_buffer_prepare,
- .start_streaming = vpfe_start_streaming,
- .stop_streaming = vpfe_stop_streaming,
- .buf_cleanup = vpfe_buf_cleanup,
- .buf_queue = vpfe_buffer_queue,
-};
-
-/*
- * vpfe_reqbufs() - supported REQBUF only once opening
- * the device.
- */
-static int vpfe_reqbufs(struct file *file, void *priv,
- struct v4l2_requestbuffers *req_buf)
-{
- struct vpfe_video_device *video = video_drvdata(file);
- struct vpfe_device *vpfe_dev = video->vpfe_dev;
- struct vpfe_fh *fh = file->private_data;
- struct vb2_queue *q;
- int ret;
-
- v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_reqbufs\n");
-
- if (V4L2_BUF_TYPE_VIDEO_CAPTURE != req_buf->type &&
- V4L2_BUF_TYPE_VIDEO_OUTPUT != req_buf->type) {
- v4l2_err(&vpfe_dev->v4l2_dev, "Invalid buffer type\n");
- return -EINVAL;
- }
-
- ret = mutex_lock_interruptible(&video->lock);
- if (ret)
- return ret;
-
- if (video->io_usrs != 0) {
- v4l2_err(&vpfe_dev->v4l2_dev, "Only one IO user allowed\n");
- ret = -EBUSY;
- goto unlock_out;
- }
- video->memory = req_buf->memory;
-
- /* Initialize videobuf2 queue as per the buffer type */
- video->alloc_ctx = vb2_dma_contig_init_ctx(vpfe_dev->pdev);
- if (IS_ERR(video->alloc_ctx)) {
- v4l2_err(&vpfe_dev->v4l2_dev, "Failed to get the context\n");
- return PTR_ERR(video->alloc_ctx);
- }
-
- q = &video->buffer_queue;
- q->type = req_buf->type;
- q->io_modes = VB2_MMAP | VB2_USERPTR;
- q->drv_priv = fh;
- q->min_buffers_needed = 1;
- q->ops = &video_qops;
- q->mem_ops = &vb2_dma_contig_memops;
- q->buf_struct_size = sizeof(struct vpfe_cap_buffer);
- q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
-
- ret = vb2_queue_init(q);
- if (ret) {
- v4l2_err(&vpfe_dev->v4l2_dev, "vb2_queue_init() failed\n");
- vb2_dma_contig_cleanup_ctx(vpfe_dev->pdev);
- return ret;
- }
-
- fh->io_allowed = 1;
- video->io_usrs = 1;
- INIT_LIST_HEAD(&video->dma_queue);
- ret = vb2_reqbufs(&video->buffer_queue, req_buf);
-
-unlock_out:
- mutex_unlock(&video->lock);
- return ret;
-}
-
-/*
- * vpfe_querybuf() - query buffers for exchange
- */
-static int vpfe_querybuf(struct file *file, void *priv,
- struct v4l2_buffer *buf)
-{
- struct vpfe_video_device *video = video_drvdata(file);
- struct vpfe_device *vpfe_dev = video->vpfe_dev;
-
- v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_querybuf\n");
-
- if (V4L2_BUF_TYPE_VIDEO_CAPTURE != buf->type &&
- V4L2_BUF_TYPE_VIDEO_OUTPUT != buf->type) {
- v4l2_err(&vpfe_dev->v4l2_dev, "Invalid buf type\n");
- return -EINVAL;
- }
-
- if (video->memory != V4L2_MEMORY_MMAP) {
- v4l2_err(&vpfe_dev->v4l2_dev, "Invalid memory\n");
- return -EINVAL;
- }
-
- /* Call vb2_querybuf to get information */
- return vb2_querybuf(&video->buffer_queue, buf);
-}
-
-/*
- * vpfe_qbuf() - queue buffers for capture or processing
- */
-static int vpfe_qbuf(struct file *file, void *priv,
- struct v4l2_buffer *p)
-{
- struct vpfe_video_device *video = video_drvdata(file);
- struct vpfe_device *vpfe_dev = video->vpfe_dev;
- struct vpfe_fh *fh = file->private_data;
-
- v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_qbuf\n");
-
- if (V4L2_BUF_TYPE_VIDEO_CAPTURE != p->type &&
- V4L2_BUF_TYPE_VIDEO_OUTPUT != p->type) {
- v4l2_err(&vpfe_dev->v4l2_dev, "Invalid buf type\n");
- return -EINVAL;
- }
- /*
- * If this file handle is not allowed to do IO,
- * return error
- */
- if (!fh->io_allowed) {
- v4l2_err(&vpfe_dev->v4l2_dev, "fh->io_allowed\n");
- return -EACCES;
- }
-
- return vb2_qbuf(&video->buffer_queue, p);
-}
-
-/*
- * vpfe_dqbuf() - deque buffer which is done with processing
- */
-static int vpfe_dqbuf(struct file *file, void *priv,
- struct v4l2_buffer *buf)
-{
- struct vpfe_video_device *video = video_drvdata(file);
- struct vpfe_device *vpfe_dev = video->vpfe_dev;
-
- v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_dqbuf\n");
-
- if (V4L2_BUF_TYPE_VIDEO_CAPTURE != buf->type &&
- V4L2_BUF_TYPE_VIDEO_OUTPUT != buf->type) {
- v4l2_err(&vpfe_dev->v4l2_dev, "Invalid buf type\n");
- return -EINVAL;
- }
-
- return vb2_dqbuf(&video->buffer_queue,
- buf, (file->f_flags & O_NONBLOCK));
-}
-
-/*
- * vpfe_streamon() - start streaming
- * @file: file pointer
- * @priv: void pointer
- * @buf_type: enum v4l2_buf_type
- *
- * queue buffer onto hardware for capture/processing and
- * start all the subdevs which are in media chain
- *
- * Return 0 on success, error code otherwise
- */
-static int vpfe_streamon(struct file *file, void *priv,
- enum v4l2_buf_type buf_type)
-{
- struct vpfe_video_device *video = video_drvdata(file);
- struct vpfe_device *vpfe_dev = video->vpfe_dev;
- struct vpfe_pipeline *pipe = &video->pipe;
- struct vpfe_fh *fh = file->private_data;
- struct vpfe_ext_subdev_info *sdinfo;
- int ret = -EINVAL;
-
- v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_streamon\n");
-
- if (V4L2_BUF_TYPE_VIDEO_CAPTURE != buf_type &&
- V4L2_BUF_TYPE_VIDEO_OUTPUT != buf_type) {
- v4l2_err(&vpfe_dev->v4l2_dev, "Invalid buf type\n");
- return ret;
- }
- /* If file handle is not allowed IO, return error */
- if (!fh->io_allowed) {
- v4l2_err(&vpfe_dev->v4l2_dev, "fh->io_allowed\n");
- return -EACCES;
- }
- sdinfo = video->current_ext_subdev;
- /* If buffer queue is empty, return error */
- if (list_empty(&video->buffer_queue.queued_list)) {
- v4l2_err(&vpfe_dev->v4l2_dev, "buffer queue is empty\n");
- return -EIO;
- }
- /* Validate the pipeline */
- if (V4L2_BUF_TYPE_VIDEO_CAPTURE == buf_type) {
- ret = vpfe_video_validate_pipeline(pipe);
- if (ret < 0)
- return ret;
- }
- /* Call vb2_streamon to start streaming */
- return vb2_streamon(&video->buffer_queue, buf_type);
-}
-
-/*
- * vpfe_streamoff() - stop streaming
- * @file: file pointer
- * @priv: void pointer
- * @buf_type: enum v4l2_buf_type
- *
- * stop all the subdevs which are in media chain
- *
- * Return 0 on success, error code otherwise
- */
-static int vpfe_streamoff(struct file *file, void *priv,
- enum v4l2_buf_type buf_type)
-{
- struct vpfe_video_device *video = video_drvdata(file);
- struct vpfe_device *vpfe_dev = video->vpfe_dev;
- struct vpfe_fh *fh = file->private_data;
- int ret = 0;
-
- v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_streamoff\n");
-
- if (buf_type != V4L2_BUF_TYPE_VIDEO_CAPTURE &&
- buf_type != V4L2_BUF_TYPE_VIDEO_OUTPUT) {
- v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "Invalid buf type\n");
- return -EINVAL;
- }
-
- /* If io is allowed for this file handle, return error */
- if (!fh->io_allowed) {
- v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "fh->io_allowed\n");
- return -EACCES;
- }
-
- /* If streaming is not started, return error */
- if (!video->started) {
- v4l2_err(&vpfe_dev->v4l2_dev, "device is not started\n");
- return -EINVAL;
- }
-
- ret = mutex_lock_interruptible(&video->lock);
- if (ret)
- return ret;
-
- vpfe_stop_capture(video);
- ret = vb2_streamoff(&video->buffer_queue, buf_type);
- mutex_unlock(&video->lock);
-
- return ret;
-}
-
-/* vpfe capture ioctl operations */
-static const struct v4l2_ioctl_ops vpfe_ioctl_ops = {
- .vidioc_querycap = vpfe_querycap,
- .vidioc_g_fmt_vid_cap = vpfe_g_fmt,
- .vidioc_s_fmt_vid_cap = vpfe_s_fmt,
- .vidioc_try_fmt_vid_cap = vpfe_try_fmt,
- .vidioc_enum_fmt_vid_cap = vpfe_enum_fmt,
- .vidioc_g_fmt_vid_out = vpfe_g_fmt,
- .vidioc_s_fmt_vid_out = vpfe_s_fmt,
- .vidioc_try_fmt_vid_out = vpfe_try_fmt,
- .vidioc_enum_fmt_vid_out = vpfe_enum_fmt,
- .vidioc_enum_input = vpfe_enum_input,
- .vidioc_g_input = vpfe_g_input,
- .vidioc_s_input = vpfe_s_input,
- .vidioc_querystd = vpfe_querystd,
- .vidioc_s_std = vpfe_s_std,
- .vidioc_g_std = vpfe_g_std,
- .vidioc_enum_dv_timings = vpfe_enum_dv_timings,
- .vidioc_query_dv_timings = vpfe_query_dv_timings,
- .vidioc_s_dv_timings = vpfe_s_dv_timings,
- .vidioc_g_dv_timings = vpfe_g_dv_timings,
- .vidioc_reqbufs = vpfe_reqbufs,
- .vidioc_querybuf = vpfe_querybuf,
- .vidioc_qbuf = vpfe_qbuf,
- .vidioc_dqbuf = vpfe_dqbuf,
- .vidioc_streamon = vpfe_streamon,
- .vidioc_streamoff = vpfe_streamoff,
-};
-
-/* VPFE video init function */
-int vpfe_video_init(struct vpfe_video_device *video, const char *name)
-{
- const char *direction;
- int ret;
-
- switch (video->type) {
- case V4L2_BUF_TYPE_VIDEO_CAPTURE:
- direction = "output";
- video->pad.flags = MEDIA_PAD_FL_SINK;
- video->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
- break;
-
- case V4L2_BUF_TYPE_VIDEO_OUTPUT:
- direction = "input";
- video->pad.flags = MEDIA_PAD_FL_SOURCE;
- video->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
- break;
-
- default:
- return -EINVAL;
- }
- /* Initialize field of video device */
- video->video_dev.release = video_device_release;
- video->video_dev.fops = &vpfe_fops;
- video->video_dev.ioctl_ops = &vpfe_ioctl_ops;
- video->video_dev.minor = -1;
- video->video_dev.tvnorms = 0;
- snprintf(video->video_dev.name, sizeof(video->video_dev.name),
- "DAVINCI VIDEO %s %s", name, direction);
-
- spin_lock_init(&video->irqlock);
- spin_lock_init(&video->dma_queue_lock);
- mutex_init(&video->lock);
- ret = media_entity_init(&video->video_dev.entity,
- 1, &video->pad, 0);
- if (ret < 0)
- return ret;
-
- video_set_drvdata(&video->video_dev, video);
-
- return 0;
-}
-
-/* vpfe video device register function */
-int vpfe_video_register(struct vpfe_video_device *video,
- struct v4l2_device *vdev)
-{
- int ret;
-
- video->video_dev.v4l2_dev = vdev;
-
- ret = video_register_device(&video->video_dev, VFL_TYPE_GRABBER, -1);
- if (ret < 0)
- pr_err("%s: could not register video device (%d)\n",
- __func__, ret);
- return ret;
-}
-
-/* vpfe video device unregister function */
-void vpfe_video_unregister(struct vpfe_video_device *video)
-{
- if (video_is_registered(&video->video_dev)) {
- video_unregister_device(&video->video_dev);
- media_entity_cleanup(&video->video_dev.entity);
- }
-}
diff --git a/drivers/staging/media/davinci_vpfe/vpfe_video.h b/drivers/staging/media/davinci_vpfe/vpfe_video.h
deleted file mode 100644
index 1b1b6c4a56b7..000000000000
--- a/drivers/staging/media/davinci_vpfe/vpfe_video.h
+++ /dev/null
@@ -1,153 +0,0 @@
-/*
- * Copyright (C) 2012 Texas Instruments Inc
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- * Contributors:
- * Manjunath Hadli <manjunath.hadli@ti.com>
- * Prabhakar Lad <prabhakar.lad@ti.com>
- */
-
-#ifndef _DAVINCI_VPFE_VIDEO_H
-#define _DAVINCI_VPFE_VIDEO_H
-
-#include <media/videobuf2-dma-contig.h>
-
-struct vpfe_device;
-
-/*
- * struct vpfe_video_operations - VPFE video operations
- * @queue: Resume streaming when a buffer is queued. Called on VIDIOC_QBUF
- * if there was no buffer previously queued.
- */
-struct vpfe_video_operations {
- int (*queue)(struct vpfe_device *vpfe_dev, unsigned long addr);
-};
-
-enum vpfe_pipeline_stream_state {
- VPFE_PIPELINE_STREAM_STOPPED = 0,
- VPFE_PIPELINE_STREAM_CONTINUOUS = 1,
- VPFE_PIPELINE_STREAM_SINGLESHOT = 2,
-};
-
-enum vpfe_video_state {
- /* indicates that buffer is not queued */
- VPFE_VIDEO_BUFFER_NOT_QUEUED = 0,
- /* indicates that buffer is queued */
- VPFE_VIDEO_BUFFER_QUEUED = 1,
-};
-
-struct vpfe_pipeline {
- /* media pipeline */
- struct media_pipeline *pipe;
- /* state of the pipeline, continuous,
- * single-shot or stopped
- */
- enum vpfe_pipeline_stream_state state;
- /* number of active input video entities */
- unsigned int input_num;
- /* number of active output video entities */
- unsigned int output_num;
- /* input video nodes in case of single-shot mode */
- struct vpfe_video_device *inputs[10];
- /* capturing video nodes */
- struct vpfe_video_device *outputs[10];
-};
-
-#define to_vpfe_pipeline(__e) \
- container_of((__e)->pipe, struct vpfe_pipeline, pipe)
-
-#define to_vpfe_video(vdev) \
- container_of(vdev, struct vpfe_video_device, video_dev)
-
-struct vpfe_cap_buffer {
- struct vb2_buffer vb;
- struct list_head list;
-};
-
-struct vpfe_video_device {
- /* vpfe device */
- struct vpfe_device *vpfe_dev;
- /* video dev */
- struct video_device video_dev;
- /* media pad of video entity */
- struct media_pad pad;
- /* video operations supported by video device */
- const struct vpfe_video_operations *ops;
- /* type of the video buffers used by user */
- enum v4l2_buf_type type;
- /* Indicates id of the field which is being captured */
- u32 field_id;
- /* pipeline for which video device is part of */
- struct vpfe_pipeline pipe;
- /* Indicates whether streaming started */
- u8 started;
- /* Indicates state of the stream */
- unsigned int state;
- /* current input at the sub device */
- int current_input;
- /*
- * This field keeps track of type of buffer exchange mechanism
- * user has selected
- */
- enum v4l2_memory memory;
- /* number of open instances of the channel */
- u32 usrs;
- /* flag to indicate whether decoder is initialized */
- u8 initialized;
- /* skip frame count */
- u8 skip_frame_count;
- /* skip frame count init value */
- u8 skip_frame_count_init;
- /* time per frame for skipping */
- struct v4l2_fract timeperframe;
- /* ptr to currently selected sub device */
- struct vpfe_ext_subdev_info *current_ext_subdev;
- /* Pointer pointing to current vpfe_cap_buffer */
- struct vpfe_cap_buffer *cur_frm;
- /* Pointer pointing to next vpfe_cap_buffer */
- struct vpfe_cap_buffer *next_frm;
- /* Used to store pixel format */
- struct v4l2_format fmt;
- struct vb2_queue buffer_queue;
- /* allocator-specific contexts for each plane */
- struct vb2_alloc_ctx *alloc_ctx;
- /* Queue of filled frames */
- struct list_head dma_queue;
- spinlock_t irqlock;
- /* IRQ lock for DMA queue */
- spinlock_t dma_queue_lock;
- /* lock used to access this structure */
- struct mutex lock;
- /* number of users performing IO */
- u32 io_usrs;
- /* Currently selected or default standard */
- v4l2_std_id stdid;
- /*
- * offset where second field starts from the starting of the
- * buffer for field separated YCbCr formats
- */
- u32 field_off;
-};
-
-int vpfe_video_is_pipe_ready(struct vpfe_pipeline *pipe);
-void vpfe_video_unregister(struct vpfe_video_device *video);
-int vpfe_video_register(struct vpfe_video_device *video,
- struct v4l2_device *vdev);
-int vpfe_video_init(struct vpfe_video_device *video, const char *name);
-void vpfe_video_process_buffer_complete(struct vpfe_video_device *video);
-void vpfe_video_schedule_bottom_field(struct vpfe_video_device *video);
-void vpfe_video_schedule_next_buffer(struct vpfe_video_device *video);
-
-#endif /* _DAVINCI_VPFE_VIDEO_H */
diff --git a/drivers/staging/media/deprecated/atmel/Kconfig b/drivers/staging/media/deprecated/atmel/Kconfig
new file mode 100644
index 000000000000..418841ea5a0d
--- /dev/null
+++ b/drivers/staging/media/deprecated/atmel/Kconfig
@@ -0,0 +1,47 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+comment "Atmel media platform drivers"
+
+config VIDEO_ATMEL_ISC
+ tristate "ATMEL Image Sensor Controller (ISC) support (DEPRECATED)"
+ depends on V4L_PLATFORM_DRIVERS
+ depends on VIDEO_DEV && COMMON_CLK
+ depends on ARCH_AT91 || COMPILE_TEST
+ depends on !VIDEO_MICROCHIP_ISC_BASE || COMPILE_TEST
+ select MEDIA_CONTROLLER
+ select VIDEO_V4L2_SUBDEV_API
+ select VIDEOBUF2_DMA_CONTIG
+ select REGMAP_MMIO
+ select V4L2_FWNODE
+ select VIDEO_ATMEL_ISC_BASE
+ help
+ This module makes the ATMEL Image Sensor Controller available
+ as a v4l2 device.
+
+ This driver is deprecated and is scheduled for removal by
+ the beginning of 2026. See the TODO file for more information.
+
+config VIDEO_ATMEL_XISC
+ tristate "ATMEL eXtended Image Sensor Controller (XISC) support (DEPRECATED)"
+ depends on V4L_PLATFORM_DRIVERS
+ depends on VIDEO_DEV && COMMON_CLK
+ depends on ARCH_AT91 || COMPILE_TEST
+ depends on !VIDEO_MICROCHIP_ISC_BASE || COMPILE_TEST
+ select VIDEOBUF2_DMA_CONTIG
+ select REGMAP_MMIO
+ select V4L2_FWNODE
+ select VIDEO_ATMEL_ISC_BASE
+ select MEDIA_CONTROLLER
+ select VIDEO_V4L2_SUBDEV_API
+ help
+ This module makes the ATMEL eXtended Image Sensor Controller
+ available as a v4l2 device.
+
+ This driver is deprecated and is scheduled for removal by
+ the beginning of 2026. See the TODO file for more information.
+
+config VIDEO_ATMEL_ISC_BASE
+ tristate
+ default n
+ help
+ ATMEL ISC and XISC common code base.
diff --git a/drivers/staging/media/deprecated/atmel/Makefile b/drivers/staging/media/deprecated/atmel/Makefile
new file mode 100644
index 000000000000..34eaeeac5bba
--- /dev/null
+++ b/drivers/staging/media/deprecated/atmel/Makefile
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-2.0-only
+atmel-isc-objs = atmel-sama5d2-isc.o
+atmel-xisc-objs = atmel-sama7g5-isc.o
+atmel-isc-common-objs = atmel-isc-base.o atmel-isc-clk.o
+
+obj-$(CONFIG_VIDEO_ATMEL_ISC_BASE) += atmel-isc-common.o
+obj-$(CONFIG_VIDEO_ATMEL_ISC) += atmel-isc.o
+obj-$(CONFIG_VIDEO_ATMEL_XISC) += atmel-xisc.o
diff --git a/drivers/staging/media/deprecated/atmel/TODO b/drivers/staging/media/deprecated/atmel/TODO
new file mode 100644
index 000000000000..71691df07a80
--- /dev/null
+++ b/drivers/staging/media/deprecated/atmel/TODO
@@ -0,0 +1,34 @@
+The Atmel ISC driver is not compliant with media controller specification.
+In order to evolve this driver, it has to move to media controller, to
+support enhanced features and future products which embed it.
+The move to media controller involves several changes which are
+not backwards compatible with the current usability of the driver.
+
+The best example is the way the format is propagated from the top video
+driver /dev/videoX down to the sensor.
+
+In a simple configuration sensor ==> isc , the isc just calls subdev s_fmt
+and controls the sensor directly. This is achieved by having a lot of code
+inside the driver that will query the subdev at probe time and make a list
+of formats which are usable.
+Basically the user has nothing to configure, as the isc will handle
+everything at the top level. This is an easy way to capture, but also comes
+with the drawback of lack of flexibility.
+In a more complicated pipeline
+sensor ==> controller 1 ==> controller 2 ==> isc
+this will not be achievable, as controller 1 and controller 2 might be
+media-controller configurable, and will not propagate the formats down to
+the sensor.
+
+After discussions with the media maintainers, the decision is to move
+Atmel ISC to staging as-is, to keep the Kconfig symbols and the users
+to the driver in staging. Thus, all the existing users of the non
+media-controller paradigm will continue to be happy and use the old config
+way.
+
+The new driver was added in the media subsystem with a different
+symbol, with the conversion to media controller done, and new users
+of the driver will be able to use all the new features.
+
+The replacement driver is named VIDEO_MICROCHIP_ISC or
+VIDEO_MICROCHIP_XISC depending on the product flavor.
diff --git a/drivers/staging/media/deprecated/atmel/atmel-isc-base.c b/drivers/staging/media/deprecated/atmel/atmel-isc-base.c
new file mode 100644
index 000000000000..fb9ee8547392
--- /dev/null
+++ b/drivers/staging/media/deprecated/atmel/atmel-isc-base.c
@@ -0,0 +1,2008 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Microchip Image Sensor Controller (ISC) common driver base
+ *
+ * Copyright (C) 2016-2019 Microchip Technology, Inc.
+ *
+ * Author: Songjun Wu
+ * Author: Eugen Hristev <eugen.hristev@microchip.com>
+ *
+ */
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/math64.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_graph.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+#include <linux/videodev2.h>
+#include <linux/atmel-isc-media.h>
+
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-image-sizes.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-fwnode.h>
+#include <media/v4l2-subdev.h>
+#include <media/videobuf2-dma-contig.h>
+
+#include "atmel-isc-regs.h"
+#include "atmel-isc.h"
+
+static unsigned int debug;
+module_param(debug, int, 0644);
+MODULE_PARM_DESC(debug, "debug level (0-2)");
+
+static unsigned int sensor_preferred = 1;
+module_param(sensor_preferred, uint, 0644);
+MODULE_PARM_DESC(sensor_preferred,
+ "Sensor is preferred to output the specified format (1-on 0-off), default 1");
+
+#define ISC_IS_FORMAT_RAW(mbus_code) \
+ (((mbus_code) & 0xf000) == 0x3000)
+
+#define ISC_IS_FORMAT_GREY(mbus_code) \
+ (((mbus_code) == MEDIA_BUS_FMT_Y10_1X10) | \
+ (((mbus_code) == MEDIA_BUS_FMT_Y8_1X8)))
+
+static inline void isc_update_v4l2_ctrls(struct isc_device *isc)
+{
+ struct isc_ctrls *ctrls = &isc->ctrls;
+
+ /* In here we set the v4l2 controls w.r.t. our pipeline config */
+ v4l2_ctrl_s_ctrl(isc->r_gain_ctrl, ctrls->gain[ISC_HIS_CFG_MODE_R]);
+ v4l2_ctrl_s_ctrl(isc->b_gain_ctrl, ctrls->gain[ISC_HIS_CFG_MODE_B]);
+ v4l2_ctrl_s_ctrl(isc->gr_gain_ctrl, ctrls->gain[ISC_HIS_CFG_MODE_GR]);
+ v4l2_ctrl_s_ctrl(isc->gb_gain_ctrl, ctrls->gain[ISC_HIS_CFG_MODE_GB]);
+
+ v4l2_ctrl_s_ctrl(isc->r_off_ctrl, ctrls->offset[ISC_HIS_CFG_MODE_R]);
+ v4l2_ctrl_s_ctrl(isc->b_off_ctrl, ctrls->offset[ISC_HIS_CFG_MODE_B]);
+ v4l2_ctrl_s_ctrl(isc->gr_off_ctrl, ctrls->offset[ISC_HIS_CFG_MODE_GR]);
+ v4l2_ctrl_s_ctrl(isc->gb_off_ctrl, ctrls->offset[ISC_HIS_CFG_MODE_GB]);
+}
+
+static inline void isc_update_awb_ctrls(struct isc_device *isc)
+{
+ struct isc_ctrls *ctrls = &isc->ctrls;
+
+ /* In here we set our actual hw pipeline config */
+
+ regmap_write(isc->regmap, ISC_WB_O_RGR,
+ ((ctrls->offset[ISC_HIS_CFG_MODE_R])) |
+ ((ctrls->offset[ISC_HIS_CFG_MODE_GR]) << 16));
+ regmap_write(isc->regmap, ISC_WB_O_BGB,
+ ((ctrls->offset[ISC_HIS_CFG_MODE_B])) |
+ ((ctrls->offset[ISC_HIS_CFG_MODE_GB]) << 16));
+ regmap_write(isc->regmap, ISC_WB_G_RGR,
+ ctrls->gain[ISC_HIS_CFG_MODE_R] |
+ (ctrls->gain[ISC_HIS_CFG_MODE_GR] << 16));
+ regmap_write(isc->regmap, ISC_WB_G_BGB,
+ ctrls->gain[ISC_HIS_CFG_MODE_B] |
+ (ctrls->gain[ISC_HIS_CFG_MODE_GB] << 16));
+}
+
+static inline void isc_reset_awb_ctrls(struct isc_device *isc)
+{
+ unsigned int c;
+
+ for (c = ISC_HIS_CFG_MODE_GR; c <= ISC_HIS_CFG_MODE_B; c++) {
+ /* gains have a fixed point at 9 decimals */
+ isc->ctrls.gain[c] = 1 << 9;
+ /* offsets are in 2's complements */
+ isc->ctrls.offset[c] = 0;
+ }
+}
+
+
+static int isc_queue_setup(struct vb2_queue *vq,
+ unsigned int *nbuffers, unsigned int *nplanes,
+ unsigned int sizes[], struct device *alloc_devs[])
+{
+ struct isc_device *isc = vb2_get_drv_priv(vq);
+ unsigned int size = isc->fmt.fmt.pix.sizeimage;
+
+ if (*nplanes)
+ return sizes[0] < size ? -EINVAL : 0;
+
+ *nplanes = 1;
+ sizes[0] = size;
+
+ return 0;
+}
+
+static int isc_buffer_prepare(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct isc_device *isc = vb2_get_drv_priv(vb->vb2_queue);
+ unsigned long size = isc->fmt.fmt.pix.sizeimage;
+
+ if (vb2_plane_size(vb, 0) < size) {
+ v4l2_err(&isc->v4l2_dev, "buffer too small (%lu < %lu)\n",
+ vb2_plane_size(vb, 0), size);
+ return -EINVAL;
+ }
+
+ vb2_set_plane_payload(vb, 0, size);
+
+ vbuf->field = isc->fmt.fmt.pix.field;
+
+ return 0;
+}
+
+static void isc_crop_pfe(struct isc_device *isc)
+{
+ struct regmap *regmap = isc->regmap;
+ u32 h, w;
+
+ h = isc->fmt.fmt.pix.height;
+ w = isc->fmt.fmt.pix.width;
+
+ /*
+ * In case the sensor is not RAW, it will output a pixel (12-16 bits)
+ * with two samples on the ISC Data bus (which is 8-12)
+ * ISC will count each sample, so, we need to multiply these values
+ * by two, to get the real number of samples for the required pixels.
+ */
+ if (!ISC_IS_FORMAT_RAW(isc->config.sd_format->mbus_code)) {
+ h <<= 1;
+ w <<= 1;
+ }
+
+ /*
+ * We limit the column/row count that the ISC will output according
+ * to the configured resolution that we want.
+ * This will avoid the situation where the sensor is misconfigured,
+ * sending more data, and the ISC will just take it and DMA to memory,
+ * causing corruption.
+ */
+ regmap_write(regmap, ISC_PFE_CFG1,
+ (ISC_PFE_CFG1_COLMIN(0) & ISC_PFE_CFG1_COLMIN_MASK) |
+ (ISC_PFE_CFG1_COLMAX(w - 1) & ISC_PFE_CFG1_COLMAX_MASK));
+
+ regmap_write(regmap, ISC_PFE_CFG2,
+ (ISC_PFE_CFG2_ROWMIN(0) & ISC_PFE_CFG2_ROWMIN_MASK) |
+ (ISC_PFE_CFG2_ROWMAX(h - 1) & ISC_PFE_CFG2_ROWMAX_MASK));
+
+ regmap_update_bits(regmap, ISC_PFE_CFG0,
+ ISC_PFE_CFG0_COLEN | ISC_PFE_CFG0_ROWEN,
+ ISC_PFE_CFG0_COLEN | ISC_PFE_CFG0_ROWEN);
+}
+
+static void isc_start_dma(struct isc_device *isc)
+{
+ struct regmap *regmap = isc->regmap;
+ u32 sizeimage = isc->fmt.fmt.pix.sizeimage;
+ u32 dctrl_dview;
+ dma_addr_t addr0;
+
+ addr0 = vb2_dma_contig_plane_dma_addr(&isc->cur_frm->vb.vb2_buf, 0);
+ regmap_write(regmap, ISC_DAD0 + isc->offsets.dma, addr0);
+
+ switch (isc->config.fourcc) {
+ case V4L2_PIX_FMT_YUV420:
+ regmap_write(regmap, ISC_DAD1 + isc->offsets.dma,
+ addr0 + (sizeimage * 2) / 3);
+ regmap_write(regmap, ISC_DAD2 + isc->offsets.dma,
+ addr0 + (sizeimage * 5) / 6);
+ break;
+ case V4L2_PIX_FMT_YUV422P:
+ regmap_write(regmap, ISC_DAD1 + isc->offsets.dma,
+ addr0 + sizeimage / 2);
+ regmap_write(regmap, ISC_DAD2 + isc->offsets.dma,
+ addr0 + (sizeimage * 3) / 4);
+ break;
+ default:
+ break;
+ }
+
+ dctrl_dview = isc->config.dctrl_dview;
+
+ regmap_write(regmap, ISC_DCTRL + isc->offsets.dma,
+ dctrl_dview | ISC_DCTRL_IE_IS);
+ spin_lock(&isc->awb_lock);
+ regmap_write(regmap, ISC_CTRLEN, ISC_CTRL_CAPTURE);
+ spin_unlock(&isc->awb_lock);
+}
+
+static void isc_set_pipeline(struct isc_device *isc, u32 pipeline)
+{
+ struct regmap *regmap = isc->regmap;
+ struct isc_ctrls *ctrls = &isc->ctrls;
+ u32 val, bay_cfg;
+ const u32 *gamma;
+ unsigned int i;
+
+ /* WB-->CFA-->CC-->GAM-->CSC-->CBC-->SUB422-->SUB420 */
+ for (i = 0; i < ISC_PIPE_LINE_NODE_NUM; i++) {
+ val = pipeline & BIT(i) ? 1 : 0;
+ regmap_field_write(isc->pipeline[i], val);
+ }
+
+ if (!pipeline)
+ return;
+
+ bay_cfg = isc->config.sd_format->cfa_baycfg;
+
+ regmap_write(regmap, ISC_WB_CFG, bay_cfg);
+ isc_update_awb_ctrls(isc);
+ isc_update_v4l2_ctrls(isc);
+
+ regmap_write(regmap, ISC_CFA_CFG, bay_cfg | ISC_CFA_CFG_EITPOL);
+
+ gamma = &isc->gamma_table[ctrls->gamma_index][0];
+ regmap_bulk_write(regmap, ISC_GAM_BENTRY, gamma, GAMMA_ENTRIES);
+ regmap_bulk_write(regmap, ISC_GAM_GENTRY, gamma, GAMMA_ENTRIES);
+ regmap_bulk_write(regmap, ISC_GAM_RENTRY, gamma, GAMMA_ENTRIES);
+
+ isc->config_dpc(isc);
+ isc->config_csc(isc);
+ isc->config_cbc(isc);
+ isc->config_cc(isc);
+ isc->config_gam(isc);
+}
+
+static int isc_update_profile(struct isc_device *isc)
+{
+ struct regmap *regmap = isc->regmap;
+ u32 sr;
+ int counter = 100;
+
+ regmap_write(regmap, ISC_CTRLEN, ISC_CTRL_UPPRO);
+
+ regmap_read(regmap, ISC_CTRLSR, &sr);
+ while ((sr & ISC_CTRL_UPPRO) && counter--) {
+ usleep_range(1000, 2000);
+ regmap_read(regmap, ISC_CTRLSR, &sr);
+ }
+
+ if (counter < 0) {
+ v4l2_warn(&isc->v4l2_dev, "Time out to update profile\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static void isc_set_histogram(struct isc_device *isc, bool enable)
+{
+ struct regmap *regmap = isc->regmap;
+ struct isc_ctrls *ctrls = &isc->ctrls;
+
+ if (enable) {
+ regmap_write(regmap, ISC_HIS_CFG + isc->offsets.his,
+ ISC_HIS_CFG_MODE_GR |
+ (isc->config.sd_format->cfa_baycfg
+ << ISC_HIS_CFG_BAYSEL_SHIFT) |
+ ISC_HIS_CFG_RAR);
+ regmap_write(regmap, ISC_HIS_CTRL + isc->offsets.his,
+ ISC_HIS_CTRL_EN);
+ regmap_write(regmap, ISC_INTEN, ISC_INT_HISDONE);
+ ctrls->hist_id = ISC_HIS_CFG_MODE_GR;
+ isc_update_profile(isc);
+ regmap_write(regmap, ISC_CTRLEN, ISC_CTRL_HISREQ);
+
+ ctrls->hist_stat = HIST_ENABLED;
+ } else {
+ regmap_write(regmap, ISC_INTDIS, ISC_INT_HISDONE);
+ regmap_write(regmap, ISC_HIS_CTRL + isc->offsets.his,
+ ISC_HIS_CTRL_DIS);
+
+ ctrls->hist_stat = HIST_DISABLED;
+ }
+}
+
+static int isc_configure(struct isc_device *isc)
+{
+ struct regmap *regmap = isc->regmap;
+ u32 pfe_cfg0, dcfg, mask, pipeline;
+ struct isc_subdev_entity *subdev = isc->current_subdev;
+
+ pfe_cfg0 = isc->config.sd_format->pfe_cfg0_bps;
+ pipeline = isc->config.bits_pipeline;
+
+ dcfg = isc->config.dcfg_imode | isc->dcfg;
+
+ pfe_cfg0 |= subdev->pfe_cfg0 | ISC_PFE_CFG0_MODE_PROGRESSIVE;
+ mask = ISC_PFE_CFG0_BPS_MASK | ISC_PFE_CFG0_HPOL_LOW |
+ ISC_PFE_CFG0_VPOL_LOW | ISC_PFE_CFG0_PPOL_LOW |
+ ISC_PFE_CFG0_MODE_MASK | ISC_PFE_CFG0_CCIR_CRC |
+ ISC_PFE_CFG0_CCIR656 | ISC_PFE_CFG0_MIPI;
+
+ regmap_update_bits(regmap, ISC_PFE_CFG0, mask, pfe_cfg0);
+
+ isc->config_rlp(isc);
+
+ regmap_write(regmap, ISC_DCFG + isc->offsets.dma, dcfg);
+
+ /* Set the pipeline */
+ isc_set_pipeline(isc, pipeline);
+
+ /*
+ * The current implemented histogram is available for RAW R, B, GB, GR
+ * channels. We need to check if sensor is outputting RAW BAYER
+ */
+ if (isc->ctrls.awb &&
+ ISC_IS_FORMAT_RAW(isc->config.sd_format->mbus_code))
+ isc_set_histogram(isc, true);
+ else
+ isc_set_histogram(isc, false);
+
+ /* Update profile */
+ return isc_update_profile(isc);
+}
+
+static int isc_start_streaming(struct vb2_queue *vq, unsigned int count)
+{
+ struct isc_device *isc = vb2_get_drv_priv(vq);
+ struct regmap *regmap = isc->regmap;
+ struct isc_buffer *buf;
+ unsigned long flags;
+ int ret;
+
+ /* Enable stream on the sub device */
+ ret = v4l2_subdev_call(isc->current_subdev->sd, video, s_stream, 1);
+ if (ret && ret != -ENOIOCTLCMD) {
+ v4l2_err(&isc->v4l2_dev, "stream on failed in subdev %d\n",
+ ret);
+ goto err_start_stream;
+ }
+
+ ret = pm_runtime_resume_and_get(isc->dev);
+ if (ret < 0) {
+ v4l2_err(&isc->v4l2_dev, "RPM resume failed in subdev %d\n",
+ ret);
+ goto err_pm_get;
+ }
+
+ ret = isc_configure(isc);
+ if (unlikely(ret))
+ goto err_configure;
+
+ /* Enable DMA interrupt */
+ regmap_write(regmap, ISC_INTEN, ISC_INT_DDONE);
+
+ spin_lock_irqsave(&isc->dma_queue_lock, flags);
+
+ isc->sequence = 0;
+ isc->stop = false;
+ reinit_completion(&isc->comp);
+
+ isc->cur_frm = list_first_entry(&isc->dma_queue,
+ struct isc_buffer, list);
+ list_del(&isc->cur_frm->list);
+
+ isc_crop_pfe(isc);
+ isc_start_dma(isc);
+
+ spin_unlock_irqrestore(&isc->dma_queue_lock, flags);
+
+ /* if we streaming from RAW, we can do one-shot white balance adj */
+ if (ISC_IS_FORMAT_RAW(isc->config.sd_format->mbus_code))
+ v4l2_ctrl_activate(isc->do_wb_ctrl, true);
+
+ return 0;
+
+err_configure:
+ pm_runtime_put_sync(isc->dev);
+err_pm_get:
+ v4l2_subdev_call(isc->current_subdev->sd, video, s_stream, 0);
+
+err_start_stream:
+ spin_lock_irqsave(&isc->dma_queue_lock, flags);
+ list_for_each_entry(buf, &isc->dma_queue, list)
+ vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_QUEUED);
+ INIT_LIST_HEAD(&isc->dma_queue);
+ spin_unlock_irqrestore(&isc->dma_queue_lock, flags);
+
+ return ret;
+}
+
+static void isc_stop_streaming(struct vb2_queue *vq)
+{
+ struct isc_device *isc = vb2_get_drv_priv(vq);
+ unsigned long flags;
+ struct isc_buffer *buf;
+ int ret;
+
+ mutex_lock(&isc->awb_mutex);
+ v4l2_ctrl_activate(isc->do_wb_ctrl, false);
+
+ isc->stop = true;
+
+ /* Wait until the end of the current frame */
+ if (isc->cur_frm && !wait_for_completion_timeout(&isc->comp, 5 * HZ))
+ v4l2_err(&isc->v4l2_dev,
+ "Timeout waiting for end of the capture\n");
+
+ mutex_unlock(&isc->awb_mutex);
+
+ /* Disable DMA interrupt */
+ regmap_write(isc->regmap, ISC_INTDIS, ISC_INT_DDONE);
+
+ pm_runtime_put_sync(isc->dev);
+
+ /* Disable stream on the sub device */
+ ret = v4l2_subdev_call(isc->current_subdev->sd, video, s_stream, 0);
+ if (ret && ret != -ENOIOCTLCMD)
+ v4l2_err(&isc->v4l2_dev, "stream off failed in subdev\n");
+
+ /* Release all active buffers */
+ spin_lock_irqsave(&isc->dma_queue_lock, flags);
+ if (unlikely(isc->cur_frm)) {
+ vb2_buffer_done(&isc->cur_frm->vb.vb2_buf,
+ VB2_BUF_STATE_ERROR);
+ isc->cur_frm = NULL;
+ }
+ list_for_each_entry(buf, &isc->dma_queue, list)
+ vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
+ INIT_LIST_HEAD(&isc->dma_queue);
+ spin_unlock_irqrestore(&isc->dma_queue_lock, flags);
+}
+
+static void isc_buffer_queue(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct isc_buffer *buf = container_of(vbuf, struct isc_buffer, vb);
+ struct isc_device *isc = vb2_get_drv_priv(vb->vb2_queue);
+ unsigned long flags;
+
+ spin_lock_irqsave(&isc->dma_queue_lock, flags);
+ if (!isc->cur_frm && list_empty(&isc->dma_queue) &&
+ vb2_start_streaming_called(vb->vb2_queue)) {
+ isc->cur_frm = buf;
+ isc_start_dma(isc);
+ } else
+ list_add_tail(&buf->list, &isc->dma_queue);
+ spin_unlock_irqrestore(&isc->dma_queue_lock, flags);
+}
+
+static struct isc_format *find_format_by_fourcc(struct isc_device *isc,
+ unsigned int fourcc)
+{
+ unsigned int num_formats = isc->num_user_formats;
+ struct isc_format *fmt;
+ unsigned int i;
+
+ for (i = 0; i < num_formats; i++) {
+ fmt = isc->user_formats[i];
+ if (fmt->fourcc == fourcc)
+ return fmt;
+ }
+
+ return NULL;
+}
+
+static const struct vb2_ops isc_vb2_ops = {
+ .queue_setup = isc_queue_setup,
+ .buf_prepare = isc_buffer_prepare,
+ .start_streaming = isc_start_streaming,
+ .stop_streaming = isc_stop_streaming,
+ .buf_queue = isc_buffer_queue,
+};
+
+static int isc_querycap(struct file *file, void *priv,
+ struct v4l2_capability *cap)
+{
+ strscpy(cap->driver, "microchip-isc", sizeof(cap->driver));
+ strscpy(cap->card, "Atmel Image Sensor Controller", sizeof(cap->card));
+
+ return 0;
+}
+
+static int isc_enum_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ struct isc_device *isc = video_drvdata(file);
+ u32 index = f->index;
+ u32 i, supported_index;
+
+ if (index < isc->controller_formats_size) {
+ f->pixelformat = isc->controller_formats[index].fourcc;
+ return 0;
+ }
+
+ index -= isc->controller_formats_size;
+
+ supported_index = 0;
+
+ for (i = 0; i < isc->formats_list_size; i++) {
+ if (!ISC_IS_FORMAT_RAW(isc->formats_list[i].mbus_code) ||
+ !isc->formats_list[i].sd_support)
+ continue;
+ if (supported_index == index) {
+ f->pixelformat = isc->formats_list[i].fourcc;
+ return 0;
+ }
+ supported_index++;
+ }
+
+ return -EINVAL;
+}
+
+static int isc_g_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *fmt)
+{
+ struct isc_device *isc = video_drvdata(file);
+
+ *fmt = isc->fmt;
+
+ return 0;
+}
+
+/*
+ * Checks the current configured format, if ISC can output it,
+ * considering which type of format the ISC receives from the sensor
+ */
+static int isc_try_validate_formats(struct isc_device *isc)
+{
+ int ret;
+ bool bayer = false, yuv = false, rgb = false, grey = false;
+
+ /* all formats supported by the RLP module are OK */
+ switch (isc->try_config.fourcc) {
+ case V4L2_PIX_FMT_SBGGR8:
+ case V4L2_PIX_FMT_SGBRG8:
+ case V4L2_PIX_FMT_SGRBG8:
+ case V4L2_PIX_FMT_SRGGB8:
+ case V4L2_PIX_FMT_SBGGR10:
+ case V4L2_PIX_FMT_SGBRG10:
+ case V4L2_PIX_FMT_SGRBG10:
+ case V4L2_PIX_FMT_SRGGB10:
+ case V4L2_PIX_FMT_SBGGR12:
+ case V4L2_PIX_FMT_SGBRG12:
+ case V4L2_PIX_FMT_SGRBG12:
+ case V4L2_PIX_FMT_SRGGB12:
+ ret = 0;
+ bayer = true;
+ break;
+
+ case V4L2_PIX_FMT_YUV420:
+ case V4L2_PIX_FMT_YUV422P:
+ case V4L2_PIX_FMT_YUYV:
+ case V4L2_PIX_FMT_UYVY:
+ case V4L2_PIX_FMT_VYUY:
+ ret = 0;
+ yuv = true;
+ break;
+
+ case V4L2_PIX_FMT_RGB565:
+ case V4L2_PIX_FMT_ABGR32:
+ case V4L2_PIX_FMT_XBGR32:
+ case V4L2_PIX_FMT_ARGB444:
+ case V4L2_PIX_FMT_ARGB555:
+ ret = 0;
+ rgb = true;
+ break;
+ case V4L2_PIX_FMT_GREY:
+ case V4L2_PIX_FMT_Y10:
+ case V4L2_PIX_FMT_Y16:
+ ret = 0;
+ grey = true;
+ break;
+ default:
+ /* any other different formats are not supported */
+ ret = -EINVAL;
+ }
+ v4l2_dbg(1, debug, &isc->v4l2_dev,
+ "Format validation, requested rgb=%u, yuv=%u, grey=%u, bayer=%u\n",
+ rgb, yuv, grey, bayer);
+
+ /* we cannot output RAW if we do not receive RAW */
+ if ((bayer) && !ISC_IS_FORMAT_RAW(isc->try_config.sd_format->mbus_code))
+ return -EINVAL;
+
+ /* we cannot output GREY if we do not receive RAW/GREY */
+ if (grey && !ISC_IS_FORMAT_RAW(isc->try_config.sd_format->mbus_code) &&
+ !ISC_IS_FORMAT_GREY(isc->try_config.sd_format->mbus_code))
+ return -EINVAL;
+
+ return ret;
+}
+
+/*
+ * Configures the RLP and DMA modules, depending on the output format
+ * configured for the ISC.
+ * If direct_dump == true, just dump raw data 8/16 bits depending on format.
+ */
+static int isc_try_configure_rlp_dma(struct isc_device *isc, bool direct_dump)
+{
+ isc->try_config.rlp_cfg_mode = 0;
+
+ switch (isc->try_config.fourcc) {
+ case V4L2_PIX_FMT_SBGGR8:
+ case V4L2_PIX_FMT_SGBRG8:
+ case V4L2_PIX_FMT_SGRBG8:
+ case V4L2_PIX_FMT_SRGGB8:
+ isc->try_config.rlp_cfg_mode = ISC_RLP_CFG_MODE_DAT8;
+ isc->try_config.dcfg_imode = ISC_DCFG_IMODE_PACKED8;
+ isc->try_config.dctrl_dview = ISC_DCTRL_DVIEW_PACKED;
+ isc->try_config.bpp = 8;
+ isc->try_config.bpp_v4l2 = 8;
+ break;
+ case V4L2_PIX_FMT_SBGGR10:
+ case V4L2_PIX_FMT_SGBRG10:
+ case V4L2_PIX_FMT_SGRBG10:
+ case V4L2_PIX_FMT_SRGGB10:
+ isc->try_config.rlp_cfg_mode = ISC_RLP_CFG_MODE_DAT10;
+ isc->try_config.dcfg_imode = ISC_DCFG_IMODE_PACKED16;
+ isc->try_config.dctrl_dview = ISC_DCTRL_DVIEW_PACKED;
+ isc->try_config.bpp = 16;
+ isc->try_config.bpp_v4l2 = 16;
+ break;
+ case V4L2_PIX_FMT_SBGGR12:
+ case V4L2_PIX_FMT_SGBRG12:
+ case V4L2_PIX_FMT_SGRBG12:
+ case V4L2_PIX_FMT_SRGGB12:
+ isc->try_config.rlp_cfg_mode = ISC_RLP_CFG_MODE_DAT12;
+ isc->try_config.dcfg_imode = ISC_DCFG_IMODE_PACKED16;
+ isc->try_config.dctrl_dview = ISC_DCTRL_DVIEW_PACKED;
+ isc->try_config.bpp = 16;
+ isc->try_config.bpp_v4l2 = 16;
+ break;
+ case V4L2_PIX_FMT_RGB565:
+ isc->try_config.rlp_cfg_mode = ISC_RLP_CFG_MODE_RGB565;
+ isc->try_config.dcfg_imode = ISC_DCFG_IMODE_PACKED16;
+ isc->try_config.dctrl_dview = ISC_DCTRL_DVIEW_PACKED;
+ isc->try_config.bpp = 16;
+ isc->try_config.bpp_v4l2 = 16;
+ break;
+ case V4L2_PIX_FMT_ARGB444:
+ isc->try_config.rlp_cfg_mode = ISC_RLP_CFG_MODE_ARGB444;
+ isc->try_config.dcfg_imode = ISC_DCFG_IMODE_PACKED16;
+ isc->try_config.dctrl_dview = ISC_DCTRL_DVIEW_PACKED;
+ isc->try_config.bpp = 16;
+ isc->try_config.bpp_v4l2 = 16;
+ break;
+ case V4L2_PIX_FMT_ARGB555:
+ isc->try_config.rlp_cfg_mode = ISC_RLP_CFG_MODE_ARGB555;
+ isc->try_config.dcfg_imode = ISC_DCFG_IMODE_PACKED16;
+ isc->try_config.dctrl_dview = ISC_DCTRL_DVIEW_PACKED;
+ isc->try_config.bpp = 16;
+ isc->try_config.bpp_v4l2 = 16;
+ break;
+ case V4L2_PIX_FMT_ABGR32:
+ case V4L2_PIX_FMT_XBGR32:
+ isc->try_config.rlp_cfg_mode = ISC_RLP_CFG_MODE_ARGB32;
+ isc->try_config.dcfg_imode = ISC_DCFG_IMODE_PACKED32;
+ isc->try_config.dctrl_dview = ISC_DCTRL_DVIEW_PACKED;
+ isc->try_config.bpp = 32;
+ isc->try_config.bpp_v4l2 = 32;
+ break;
+ case V4L2_PIX_FMT_YUV420:
+ isc->try_config.rlp_cfg_mode = ISC_RLP_CFG_MODE_YYCC;
+ isc->try_config.dcfg_imode = ISC_DCFG_IMODE_YC420P;
+ isc->try_config.dctrl_dview = ISC_DCTRL_DVIEW_PLANAR;
+ isc->try_config.bpp = 12;
+ isc->try_config.bpp_v4l2 = 8; /* only first plane */
+ break;
+ case V4L2_PIX_FMT_YUV422P:
+ isc->try_config.rlp_cfg_mode = ISC_RLP_CFG_MODE_YYCC;
+ isc->try_config.dcfg_imode = ISC_DCFG_IMODE_YC422P;
+ isc->try_config.dctrl_dview = ISC_DCTRL_DVIEW_PLANAR;
+ isc->try_config.bpp = 16;
+ isc->try_config.bpp_v4l2 = 8; /* only first plane */
+ break;
+ case V4L2_PIX_FMT_YUYV:
+ isc->try_config.rlp_cfg_mode = ISC_RLP_CFG_MODE_YCYC | ISC_RLP_CFG_YMODE_YUYV;
+ isc->try_config.dcfg_imode = ISC_DCFG_IMODE_PACKED32;
+ isc->try_config.dctrl_dview = ISC_DCTRL_DVIEW_PACKED;
+ isc->try_config.bpp = 16;
+ isc->try_config.bpp_v4l2 = 16;
+ break;
+ case V4L2_PIX_FMT_UYVY:
+ isc->try_config.rlp_cfg_mode = ISC_RLP_CFG_MODE_YCYC | ISC_RLP_CFG_YMODE_UYVY;
+ isc->try_config.dcfg_imode = ISC_DCFG_IMODE_PACKED32;
+ isc->try_config.dctrl_dview = ISC_DCTRL_DVIEW_PACKED;
+ isc->try_config.bpp = 16;
+ isc->try_config.bpp_v4l2 = 16;
+ break;
+ case V4L2_PIX_FMT_VYUY:
+ isc->try_config.rlp_cfg_mode = ISC_RLP_CFG_MODE_YCYC | ISC_RLP_CFG_YMODE_VYUY;
+ isc->try_config.dcfg_imode = ISC_DCFG_IMODE_PACKED32;
+ isc->try_config.dctrl_dview = ISC_DCTRL_DVIEW_PACKED;
+ isc->try_config.bpp = 16;
+ isc->try_config.bpp_v4l2 = 16;
+ break;
+ case V4L2_PIX_FMT_GREY:
+ isc->try_config.rlp_cfg_mode = ISC_RLP_CFG_MODE_DATY8;
+ isc->try_config.dcfg_imode = ISC_DCFG_IMODE_PACKED8;
+ isc->try_config.dctrl_dview = ISC_DCTRL_DVIEW_PACKED;
+ isc->try_config.bpp = 8;
+ isc->try_config.bpp_v4l2 = 8;
+ break;
+ case V4L2_PIX_FMT_Y16:
+ isc->try_config.rlp_cfg_mode = ISC_RLP_CFG_MODE_DATY10 | ISC_RLP_CFG_LSH;
+ fallthrough;
+ case V4L2_PIX_FMT_Y10:
+ isc->try_config.rlp_cfg_mode |= ISC_RLP_CFG_MODE_DATY10;
+ isc->try_config.dcfg_imode = ISC_DCFG_IMODE_PACKED16;
+ isc->try_config.dctrl_dview = ISC_DCTRL_DVIEW_PACKED;
+ isc->try_config.bpp = 16;
+ isc->try_config.bpp_v4l2 = 16;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (direct_dump) {
+ isc->try_config.rlp_cfg_mode = ISC_RLP_CFG_MODE_DAT8;
+ isc->try_config.dcfg_imode = ISC_DCFG_IMODE_PACKED8;
+ isc->try_config.dctrl_dview = ISC_DCTRL_DVIEW_PACKED;
+ return 0;
+ }
+
+ return 0;
+}
+
+/*
+ * Configuring pipeline modules, depending on which format the ISC outputs
+ * and considering which format it has as input from the sensor.
+ */
+static int isc_try_configure_pipeline(struct isc_device *isc)
+{
+ switch (isc->try_config.fourcc) {
+ case V4L2_PIX_FMT_RGB565:
+ case V4L2_PIX_FMT_ARGB555:
+ case V4L2_PIX_FMT_ARGB444:
+ case V4L2_PIX_FMT_ABGR32:
+ case V4L2_PIX_FMT_XBGR32:
+ /* if sensor format is RAW, we convert inside ISC */
+ if (ISC_IS_FORMAT_RAW(isc->try_config.sd_format->mbus_code)) {
+ isc->try_config.bits_pipeline = CFA_ENABLE |
+ WB_ENABLE | GAM_ENABLES | DPC_BLCENABLE |
+ CC_ENABLE;
+ } else {
+ isc->try_config.bits_pipeline = 0x0;
+ }
+ break;
+ case V4L2_PIX_FMT_YUV420:
+ /* if sensor format is RAW, we convert inside ISC */
+ if (ISC_IS_FORMAT_RAW(isc->try_config.sd_format->mbus_code)) {
+ isc->try_config.bits_pipeline = CFA_ENABLE |
+ CSC_ENABLE | GAM_ENABLES | WB_ENABLE |
+ SUB420_ENABLE | SUB422_ENABLE | CBC_ENABLE |
+ DPC_BLCENABLE;
+ } else {
+ isc->try_config.bits_pipeline = 0x0;
+ }
+ break;
+ case V4L2_PIX_FMT_YUV422P:
+ /* if sensor format is RAW, we convert inside ISC */
+ if (ISC_IS_FORMAT_RAW(isc->try_config.sd_format->mbus_code)) {
+ isc->try_config.bits_pipeline = CFA_ENABLE |
+ CSC_ENABLE | WB_ENABLE | GAM_ENABLES |
+ SUB422_ENABLE | CBC_ENABLE | DPC_BLCENABLE;
+ } else {
+ isc->try_config.bits_pipeline = 0x0;
+ }
+ break;
+ case V4L2_PIX_FMT_YUYV:
+ case V4L2_PIX_FMT_UYVY:
+ case V4L2_PIX_FMT_VYUY:
+ /* if sensor format is RAW, we convert inside ISC */
+ if (ISC_IS_FORMAT_RAW(isc->try_config.sd_format->mbus_code)) {
+ isc->try_config.bits_pipeline = CFA_ENABLE |
+ CSC_ENABLE | WB_ENABLE | GAM_ENABLES |
+ SUB422_ENABLE | CBC_ENABLE | DPC_BLCENABLE;
+ } else {
+ isc->try_config.bits_pipeline = 0x0;
+ }
+ break;
+ case V4L2_PIX_FMT_GREY:
+ case V4L2_PIX_FMT_Y16:
+ /* if sensor format is RAW, we convert inside ISC */
+ if (ISC_IS_FORMAT_RAW(isc->try_config.sd_format->mbus_code)) {
+ isc->try_config.bits_pipeline = CFA_ENABLE |
+ CSC_ENABLE | WB_ENABLE | GAM_ENABLES |
+ CBC_ENABLE | DPC_BLCENABLE;
+ } else {
+ isc->try_config.bits_pipeline = 0x0;
+ }
+ break;
+ default:
+ if (ISC_IS_FORMAT_RAW(isc->try_config.sd_format->mbus_code))
+ isc->try_config.bits_pipeline = WB_ENABLE | DPC_BLCENABLE;
+ else
+ isc->try_config.bits_pipeline = 0x0;
+ }
+
+ /* Tune the pipeline to product specific */
+ isc->adapt_pipeline(isc);
+
+ return 0;
+}
+
+static void isc_try_fse(struct isc_device *isc,
+ struct v4l2_subdev_state *sd_state)
+{
+ struct v4l2_rect *try_crop =
+ v4l2_subdev_state_get_crop(sd_state, 0);
+ struct v4l2_subdev_frame_size_enum fse = {
+ .which = V4L2_SUBDEV_FORMAT_TRY,
+ };
+ int ret;
+
+ /*
+ * If we do not know yet which format the subdev is using, we cannot
+ * do anything.
+ */
+ if (!isc->try_config.sd_format)
+ return;
+
+ fse.code = isc->try_config.sd_format->mbus_code;
+
+ ret = v4l2_subdev_call(isc->current_subdev->sd, pad, enum_frame_size,
+ sd_state, &fse);
+ /*
+ * Attempt to obtain format size from subdev. If not available,
+ * just use the maximum ISC can receive.
+ */
+ if (ret) {
+ try_crop->width = isc->max_width;
+ try_crop->height = isc->max_height;
+ } else {
+ try_crop->width = fse.max_width;
+ try_crop->height = fse.max_height;
+ }
+}
+
+static int isc_try_fmt(struct isc_device *isc, struct v4l2_format *f,
+ u32 *code)
+{
+ int i;
+ struct isc_format *sd_fmt = NULL, *direct_fmt = NULL;
+ struct v4l2_pix_format *pixfmt = &f->fmt.pix;
+ struct v4l2_subdev_pad_config pad_cfg = {};
+ struct v4l2_subdev_state pad_state = {
+ .pads = &pad_cfg,
+ };
+ struct v4l2_subdev_format format = {
+ .which = V4L2_SUBDEV_FORMAT_TRY,
+ };
+ u32 mbus_code;
+ int ret;
+ bool rlp_dma_direct_dump = false;
+
+ if (f->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+
+ /* Step 1: find a RAW format that is supported */
+ for (i = 0; i < isc->num_user_formats; i++) {
+ if (ISC_IS_FORMAT_RAW(isc->user_formats[i]->mbus_code)) {
+ sd_fmt = isc->user_formats[i];
+ break;
+ }
+ }
+ /* Step 2: We can continue with this RAW format, or we can look
+ * for better: maybe sensor supports directly what we need.
+ */
+ direct_fmt = find_format_by_fourcc(isc, pixfmt->pixelformat);
+
+ /* Step 3: We have both. We decide given the module parameter which
+ * one to use.
+ */
+ if (direct_fmt && sd_fmt && sensor_preferred)
+ sd_fmt = direct_fmt;
+
+ /* Step 4: we do not have RAW but we have a direct format. Use it. */
+ if (direct_fmt && !sd_fmt)
+ sd_fmt = direct_fmt;
+
+ /* Step 5: if we are using a direct format, we need to package
+ * everything as 8 bit data and just dump it
+ */
+ if (sd_fmt == direct_fmt)
+ rlp_dma_direct_dump = true;
+
+ /* Step 6: We have no format. This can happen if the userspace
+ * requests some weird/invalid format.
+ * In this case, default to whatever we have
+ */
+ if (!sd_fmt && !direct_fmt) {
+ sd_fmt = isc->user_formats[isc->num_user_formats - 1];
+ v4l2_dbg(1, debug, &isc->v4l2_dev,
+ "Sensor not supporting %.4s, using %.4s\n",
+ (char *)&pixfmt->pixelformat, (char *)&sd_fmt->fourcc);
+ }
+
+ if (!sd_fmt) {
+ ret = -EINVAL;
+ goto isc_try_fmt_err;
+ }
+
+ /* Step 7: Print out what we decided for debugging */
+ v4l2_dbg(1, debug, &isc->v4l2_dev,
+ "Preferring to have sensor using format %.4s\n",
+ (char *)&sd_fmt->fourcc);
+
+ /* Step 8: at this moment we decided which format the subdev will use */
+ isc->try_config.sd_format = sd_fmt;
+
+ /* Limit to Atmel ISC hardware capabilities */
+ if (pixfmt->width > isc->max_width)
+ pixfmt->width = isc->max_width;
+ if (pixfmt->height > isc->max_height)
+ pixfmt->height = isc->max_height;
+
+ /*
+ * The mbus format is the one the subdev outputs.
+ * The pixels will be transferred in this format Sensor -> ISC
+ */
+ mbus_code = sd_fmt->mbus_code;
+
+ /*
+ * Validate formats. If the required format is not OK, default to raw.
+ */
+
+ isc->try_config.fourcc = pixfmt->pixelformat;
+
+ if (isc_try_validate_formats(isc)) {
+ pixfmt->pixelformat = isc->try_config.fourcc = sd_fmt->fourcc;
+ /* Re-try to validate the new format */
+ ret = isc_try_validate_formats(isc);
+ if (ret)
+ goto isc_try_fmt_err;
+ }
+
+ ret = isc_try_configure_rlp_dma(isc, rlp_dma_direct_dump);
+ if (ret)
+ goto isc_try_fmt_err;
+
+ ret = isc_try_configure_pipeline(isc);
+ if (ret)
+ goto isc_try_fmt_err;
+
+ /* Obtain frame sizes if possible to have crop requirements ready */
+ isc_try_fse(isc, &pad_state);
+
+ v4l2_fill_mbus_format(&format.format, pixfmt, mbus_code);
+ ret = v4l2_subdev_call(isc->current_subdev->sd, pad, set_fmt,
+ &pad_state, &format);
+ if (ret < 0)
+ goto isc_try_fmt_subdev_err;
+
+ v4l2_fill_pix_format(pixfmt, &format.format);
+
+ /* Limit to Atmel ISC hardware capabilities */
+ if (pixfmt->width > isc->max_width)
+ pixfmt->width = isc->max_width;
+ if (pixfmt->height > isc->max_height)
+ pixfmt->height = isc->max_height;
+
+ pixfmt->field = V4L2_FIELD_NONE;
+ pixfmt->bytesperline = (pixfmt->width * isc->try_config.bpp_v4l2) >> 3;
+ pixfmt->sizeimage = ((pixfmt->width * isc->try_config.bpp) >> 3) *
+ pixfmt->height;
+
+ if (code)
+ *code = mbus_code;
+
+ return 0;
+
+isc_try_fmt_err:
+ v4l2_err(&isc->v4l2_dev, "Could not find any possible format for a working pipeline\n");
+isc_try_fmt_subdev_err:
+ memset(&isc->try_config, 0, sizeof(isc->try_config));
+
+ return ret;
+}
+
+static int isc_set_fmt(struct isc_device *isc, struct v4l2_format *f)
+{
+ struct v4l2_subdev_format format = {
+ .which = V4L2_SUBDEV_FORMAT_ACTIVE,
+ };
+ u32 mbus_code = 0;
+ int ret;
+
+ ret = isc_try_fmt(isc, f, &mbus_code);
+ if (ret)
+ return ret;
+
+ v4l2_fill_mbus_format(&format.format, &f->fmt.pix, mbus_code);
+ ret = v4l2_subdev_call(isc->current_subdev->sd, pad,
+ set_fmt, NULL, &format);
+ if (ret < 0)
+ return ret;
+
+ /* Limit to Atmel ISC hardware capabilities */
+ if (f->fmt.pix.width > isc->max_width)
+ f->fmt.pix.width = isc->max_width;
+ if (f->fmt.pix.height > isc->max_height)
+ f->fmt.pix.height = isc->max_height;
+
+ isc->fmt = *f;
+
+ if (isc->try_config.sd_format && isc->config.sd_format &&
+ isc->try_config.sd_format != isc->config.sd_format) {
+ isc->ctrls.hist_stat = HIST_INIT;
+ isc_reset_awb_ctrls(isc);
+ isc_update_v4l2_ctrls(isc);
+ }
+ /* make the try configuration active */
+ isc->config = isc->try_config;
+
+ v4l2_dbg(1, debug, &isc->v4l2_dev, "New ISC configuration in place\n");
+
+ return 0;
+}
+
+static int isc_s_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct isc_device *isc = video_drvdata(file);
+
+ if (vb2_is_busy(&isc->vb2_vidq))
+ return -EBUSY;
+
+ return isc_set_fmt(isc, f);
+}
+
+static int isc_try_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct isc_device *isc = video_drvdata(file);
+
+ return isc_try_fmt(isc, f, NULL);
+}
+
+static int isc_enum_input(struct file *file, void *priv,
+ struct v4l2_input *inp)
+{
+ if (inp->index != 0)
+ return -EINVAL;
+
+ inp->type = V4L2_INPUT_TYPE_CAMERA;
+ inp->std = 0;
+ strscpy(inp->name, "Camera", sizeof(inp->name));
+
+ return 0;
+}
+
+static int isc_g_input(struct file *file, void *priv, unsigned int *i)
+{
+ *i = 0;
+
+ return 0;
+}
+
+static int isc_s_input(struct file *file, void *priv, unsigned int i)
+{
+ if (i > 0)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int isc_g_parm(struct file *file, void *fh, struct v4l2_streamparm *a)
+{
+ struct isc_device *isc = video_drvdata(file);
+
+ return v4l2_g_parm_cap(video_devdata(file), isc->current_subdev->sd, a);
+}
+
+static int isc_s_parm(struct file *file, void *fh, struct v4l2_streamparm *a)
+{
+ struct isc_device *isc = video_drvdata(file);
+
+ return v4l2_s_parm_cap(video_devdata(file), isc->current_subdev->sd, a);
+}
+
+static int isc_enum_framesizes(struct file *file, void *fh,
+ struct v4l2_frmsizeenum *fsize)
+{
+ struct isc_device *isc = video_drvdata(file);
+ int ret = -EINVAL;
+ int i;
+
+ if (fsize->index)
+ return -EINVAL;
+
+ for (i = 0; i < isc->num_user_formats; i++)
+ if (isc->user_formats[i]->fourcc == fsize->pixel_format)
+ ret = 0;
+
+ for (i = 0; i < isc->controller_formats_size; i++)
+ if (isc->controller_formats[i].fourcc == fsize->pixel_format)
+ ret = 0;
+
+ if (ret)
+ return ret;
+
+ fsize->type = V4L2_FRMSIZE_TYPE_CONTINUOUS;
+
+ fsize->stepwise.min_width = 16;
+ fsize->stepwise.max_width = isc->max_width;
+ fsize->stepwise.min_height = 16;
+ fsize->stepwise.max_height = isc->max_height;
+ fsize->stepwise.step_width = 1;
+ fsize->stepwise.step_height = 1;
+
+ return 0;
+}
+
+static const struct v4l2_ioctl_ops isc_ioctl_ops = {
+ .vidioc_querycap = isc_querycap,
+ .vidioc_enum_fmt_vid_cap = isc_enum_fmt_vid_cap,
+ .vidioc_g_fmt_vid_cap = isc_g_fmt_vid_cap,
+ .vidioc_s_fmt_vid_cap = isc_s_fmt_vid_cap,
+ .vidioc_try_fmt_vid_cap = isc_try_fmt_vid_cap,
+
+ .vidioc_enum_input = isc_enum_input,
+ .vidioc_g_input = isc_g_input,
+ .vidioc_s_input = isc_s_input,
+
+ .vidioc_reqbufs = vb2_ioctl_reqbufs,
+ .vidioc_querybuf = vb2_ioctl_querybuf,
+ .vidioc_qbuf = vb2_ioctl_qbuf,
+ .vidioc_expbuf = vb2_ioctl_expbuf,
+ .vidioc_dqbuf = vb2_ioctl_dqbuf,
+ .vidioc_create_bufs = vb2_ioctl_create_bufs,
+ .vidioc_prepare_buf = vb2_ioctl_prepare_buf,
+ .vidioc_streamon = vb2_ioctl_streamon,
+ .vidioc_streamoff = vb2_ioctl_streamoff,
+
+ .vidioc_g_parm = isc_g_parm,
+ .vidioc_s_parm = isc_s_parm,
+ .vidioc_enum_framesizes = isc_enum_framesizes,
+
+ .vidioc_log_status = v4l2_ctrl_log_status,
+ .vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
+};
+
+static int isc_open(struct file *file)
+{
+ struct isc_device *isc = video_drvdata(file);
+ struct v4l2_subdev *sd = isc->current_subdev->sd;
+ int ret;
+
+ if (mutex_lock_interruptible(&isc->lock))
+ return -ERESTARTSYS;
+
+ ret = v4l2_fh_open(file);
+ if (ret < 0)
+ goto unlock;
+
+ if (!v4l2_fh_is_singular_file(file))
+ goto unlock;
+
+ ret = v4l2_subdev_call(sd, core, s_power, 1);
+ if (ret < 0 && ret != -ENOIOCTLCMD) {
+ v4l2_fh_release(file);
+ goto unlock;
+ }
+
+ ret = isc_set_fmt(isc, &isc->fmt);
+ if (ret) {
+ v4l2_subdev_call(sd, core, s_power, 0);
+ v4l2_fh_release(file);
+ }
+
+unlock:
+ mutex_unlock(&isc->lock);
+ return ret;
+}
+
+static int isc_release(struct file *file)
+{
+ struct isc_device *isc = video_drvdata(file);
+ struct v4l2_subdev *sd = isc->current_subdev->sd;
+ bool fh_singular;
+ int ret;
+
+ mutex_lock(&isc->lock);
+
+ fh_singular = v4l2_fh_is_singular_file(file);
+
+ ret = _vb2_fop_release(file, NULL);
+
+ if (fh_singular)
+ v4l2_subdev_call(sd, core, s_power, 0);
+
+ mutex_unlock(&isc->lock);
+
+ return ret;
+}
+
+static const struct v4l2_file_operations isc_fops = {
+ .owner = THIS_MODULE,
+ .open = isc_open,
+ .release = isc_release,
+ .unlocked_ioctl = video_ioctl2,
+ .read = vb2_fop_read,
+ .mmap = vb2_fop_mmap,
+ .poll = vb2_fop_poll,
+};
+
+irqreturn_t atmel_isc_interrupt(int irq, void *dev_id)
+{
+ struct isc_device *isc = (struct isc_device *)dev_id;
+ struct regmap *regmap = isc->regmap;
+ u32 isc_intsr, isc_intmask, pending;
+ irqreturn_t ret = IRQ_NONE;
+
+ regmap_read(regmap, ISC_INTSR, &isc_intsr);
+ regmap_read(regmap, ISC_INTMASK, &isc_intmask);
+
+ pending = isc_intsr & isc_intmask;
+
+ if (likely(pending & ISC_INT_DDONE)) {
+ spin_lock(&isc->dma_queue_lock);
+ if (isc->cur_frm) {
+ struct vb2_v4l2_buffer *vbuf = &isc->cur_frm->vb;
+ struct vb2_buffer *vb = &vbuf->vb2_buf;
+
+ vb->timestamp = ktime_get_ns();
+ vbuf->sequence = isc->sequence++;
+ vb2_buffer_done(vb, VB2_BUF_STATE_DONE);
+ isc->cur_frm = NULL;
+ }
+
+ if (!list_empty(&isc->dma_queue) && !isc->stop) {
+ isc->cur_frm = list_first_entry(&isc->dma_queue,
+ struct isc_buffer, list);
+ list_del(&isc->cur_frm->list);
+
+ isc_start_dma(isc);
+ }
+
+ if (isc->stop)
+ complete(&isc->comp);
+
+ ret = IRQ_HANDLED;
+ spin_unlock(&isc->dma_queue_lock);
+ }
+
+ if (pending & ISC_INT_HISDONE) {
+ schedule_work(&isc->awb_work);
+ ret = IRQ_HANDLED;
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(atmel_isc_interrupt);
+
+static void isc_hist_count(struct isc_device *isc, u32 *min, u32 *max)
+{
+ struct regmap *regmap = isc->regmap;
+ struct isc_ctrls *ctrls = &isc->ctrls;
+ u32 *hist_count = &ctrls->hist_count[ctrls->hist_id];
+ u32 *hist_entry = &ctrls->hist_entry[0];
+ u32 i;
+
+ *min = 0;
+ *max = HIST_ENTRIES;
+
+ regmap_bulk_read(regmap, ISC_HIS_ENTRY + isc->offsets.his_entry,
+ hist_entry, HIST_ENTRIES);
+
+ *hist_count = 0;
+ /*
+ * we deliberately ignore the end of the histogram,
+ * the most white pixels
+ */
+ for (i = 1; i < HIST_ENTRIES; i++) {
+ if (*hist_entry && !*min)
+ *min = i;
+ if (*hist_entry)
+ *max = i;
+ *hist_count += i * (*hist_entry++);
+ }
+
+ if (!*min)
+ *min = 1;
+
+ v4l2_dbg(1, debug, &isc->v4l2_dev,
+ "isc wb: hist_id %u, hist_count %u",
+ ctrls->hist_id, *hist_count);
+}
+
+static void isc_wb_update(struct isc_ctrls *ctrls)
+{
+ struct isc_device *isc = container_of(ctrls, struct isc_device, ctrls);
+ u32 *hist_count = &ctrls->hist_count[0];
+ u32 c, offset[4];
+ u64 avg = 0;
+ /* We compute two gains, stretch gain and grey world gain */
+ u32 s_gain[4], gw_gain[4];
+
+ /*
+ * According to Grey World, we need to set gains for R/B to normalize
+ * them towards the green channel.
+ * Thus we want to keep Green as fixed and adjust only Red/Blue
+ * Compute the average of the both green channels first
+ */
+ avg = (u64)hist_count[ISC_HIS_CFG_MODE_GR] +
+ (u64)hist_count[ISC_HIS_CFG_MODE_GB];
+ avg >>= 1;
+
+ v4l2_dbg(1, debug, &isc->v4l2_dev,
+ "isc wb: green components average %llu\n", avg);
+
+ /* Green histogram is null, nothing to do */
+ if (!avg)
+ return;
+
+ for (c = ISC_HIS_CFG_MODE_GR; c <= ISC_HIS_CFG_MODE_B; c++) {
+ /*
+ * the color offset is the minimum value of the histogram.
+ * we stretch this color to the full range by substracting
+ * this value from the color component.
+ */
+ offset[c] = ctrls->hist_minmax[c][HIST_MIN_INDEX];
+ /*
+ * The offset is always at least 1. If the offset is 1, we do
+ * not need to adjust it, so our result must be zero.
+ * the offset is computed in a histogram on 9 bits (0..512)
+ * but the offset in register is based on
+ * 12 bits pipeline (0..4096).
+ * we need to shift with the 3 bits that the histogram is
+ * ignoring
+ */
+ ctrls->offset[c] = (offset[c] - 1) << 3;
+
+ /*
+ * the offset is then taken and converted to 2's complements,
+ * and must be negative, as we subtract this value from the
+ * color components
+ */
+ ctrls->offset[c] = -ctrls->offset[c];
+
+ /*
+ * the stretch gain is the total number of histogram bins
+ * divided by the actual range of color component (Max - Min)
+ * If we compute gain like this, the actual color component
+ * will be stretched to the full histogram.
+ * We need to shift 9 bits for precision, we have 9 bits for
+ * decimals
+ */
+ s_gain[c] = (HIST_ENTRIES << 9) /
+ (ctrls->hist_minmax[c][HIST_MAX_INDEX] -
+ ctrls->hist_minmax[c][HIST_MIN_INDEX] + 1);
+
+ /*
+ * Now we have to compute the gain w.r.t. the average.
+ * Add/lose gain to the component towards the average.
+ * If it happens that the component is zero, use the
+ * fixed point value : 1.0 gain.
+ */
+ if (hist_count[c])
+ gw_gain[c] = div_u64(avg << 9, hist_count[c]);
+ else
+ gw_gain[c] = 1 << 9;
+
+ v4l2_dbg(1, debug, &isc->v4l2_dev,
+ "isc wb: component %d, s_gain %u, gw_gain %u\n",
+ c, s_gain[c], gw_gain[c]);
+ /* multiply both gains and adjust for decimals */
+ ctrls->gain[c] = s_gain[c] * gw_gain[c];
+ ctrls->gain[c] >>= 9;
+
+ /* make sure we are not out of range */
+ ctrls->gain[c] = clamp_val(ctrls->gain[c], 0, GENMASK(12, 0));
+
+ v4l2_dbg(1, debug, &isc->v4l2_dev,
+ "isc wb: component %d, final gain %u\n",
+ c, ctrls->gain[c]);
+ }
+}
+
+static void isc_awb_work(struct work_struct *w)
+{
+ struct isc_device *isc =
+ container_of(w, struct isc_device, awb_work);
+ struct regmap *regmap = isc->regmap;
+ struct isc_ctrls *ctrls = &isc->ctrls;
+ u32 hist_id = ctrls->hist_id;
+ u32 baysel;
+ unsigned long flags;
+ u32 min, max;
+ int ret;
+
+ if (ctrls->hist_stat != HIST_ENABLED)
+ return;
+
+ isc_hist_count(isc, &min, &max);
+
+ v4l2_dbg(1, debug, &isc->v4l2_dev,
+ "isc wb mode %d: hist min %u , max %u\n", hist_id, min, max);
+
+ ctrls->hist_minmax[hist_id][HIST_MIN_INDEX] = min;
+ ctrls->hist_minmax[hist_id][HIST_MAX_INDEX] = max;
+
+ if (hist_id != ISC_HIS_CFG_MODE_B) {
+ hist_id++;
+ } else {
+ isc_wb_update(ctrls);
+ hist_id = ISC_HIS_CFG_MODE_GR;
+ }
+
+ ctrls->hist_id = hist_id;
+ baysel = isc->config.sd_format->cfa_baycfg << ISC_HIS_CFG_BAYSEL_SHIFT;
+
+ ret = pm_runtime_resume_and_get(isc->dev);
+ if (ret < 0)
+ return;
+
+ /*
+ * only update if we have all the required histograms and controls
+ * if awb has been disabled, we need to reset registers as well.
+ */
+ if (hist_id == ISC_HIS_CFG_MODE_GR || ctrls->awb == ISC_WB_NONE) {
+ /*
+ * It may happen that DMA Done IRQ will trigger while we are
+ * updating white balance registers here.
+ * In that case, only parts of the controls have been updated.
+ * We can avoid that by locking the section.
+ */
+ spin_lock_irqsave(&isc->awb_lock, flags);
+ isc_update_awb_ctrls(isc);
+ spin_unlock_irqrestore(&isc->awb_lock, flags);
+
+ /*
+ * if we are doing just the one time white balance adjustment,
+ * we are basically done.
+ */
+ if (ctrls->awb == ISC_WB_ONETIME) {
+ v4l2_info(&isc->v4l2_dev,
+ "Completed one time white-balance adjustment.\n");
+ /* update the v4l2 controls values */
+ isc_update_v4l2_ctrls(isc);
+ ctrls->awb = ISC_WB_NONE;
+ }
+ }
+ regmap_write(regmap, ISC_HIS_CFG + isc->offsets.his,
+ hist_id | baysel | ISC_HIS_CFG_RAR);
+
+ /*
+ * We have to make sure the streaming has not stopped meanwhile.
+ * ISC requires a frame to clock the internal profile update.
+ * To avoid issues, lock the sequence with a mutex
+ */
+ mutex_lock(&isc->awb_mutex);
+
+ /* streaming is not active anymore */
+ if (isc->stop) {
+ mutex_unlock(&isc->awb_mutex);
+ return;
+ }
+
+ isc_update_profile(isc);
+
+ mutex_unlock(&isc->awb_mutex);
+
+ /* if awb has been disabled, we don't need to start another histogram */
+ if (ctrls->awb)
+ regmap_write(regmap, ISC_CTRLEN, ISC_CTRL_HISREQ);
+
+ pm_runtime_put_sync(isc->dev);
+}
+
+static int isc_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct isc_device *isc = container_of(ctrl->handler,
+ struct isc_device, ctrls.handler);
+ struct isc_ctrls *ctrls = &isc->ctrls;
+
+ if (ctrl->flags & V4L2_CTRL_FLAG_INACTIVE)
+ return 0;
+
+ switch (ctrl->id) {
+ case V4L2_CID_BRIGHTNESS:
+ ctrls->brightness = ctrl->val & ISC_CBC_BRIGHT_MASK;
+ break;
+ case V4L2_CID_CONTRAST:
+ ctrls->contrast = ctrl->val & ISC_CBC_CONTRAST_MASK;
+ break;
+ case V4L2_CID_GAMMA:
+ ctrls->gamma_index = ctrl->val;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static const struct v4l2_ctrl_ops isc_ctrl_ops = {
+ .s_ctrl = isc_s_ctrl,
+};
+
+static int isc_s_awb_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct isc_device *isc = container_of(ctrl->handler,
+ struct isc_device, ctrls.handler);
+ struct isc_ctrls *ctrls = &isc->ctrls;
+
+ if (ctrl->flags & V4L2_CTRL_FLAG_INACTIVE)
+ return 0;
+
+ switch (ctrl->id) {
+ case V4L2_CID_AUTO_WHITE_BALANCE:
+ if (ctrl->val == 1)
+ ctrls->awb = ISC_WB_AUTO;
+ else
+ ctrls->awb = ISC_WB_NONE;
+
+ /* configure the controls with new values from v4l2 */
+ if (ctrl->cluster[ISC_CTRL_R_GAIN]->is_new)
+ ctrls->gain[ISC_HIS_CFG_MODE_R] = isc->r_gain_ctrl->val;
+ if (ctrl->cluster[ISC_CTRL_B_GAIN]->is_new)
+ ctrls->gain[ISC_HIS_CFG_MODE_B] = isc->b_gain_ctrl->val;
+ if (ctrl->cluster[ISC_CTRL_GR_GAIN]->is_new)
+ ctrls->gain[ISC_HIS_CFG_MODE_GR] = isc->gr_gain_ctrl->val;
+ if (ctrl->cluster[ISC_CTRL_GB_GAIN]->is_new)
+ ctrls->gain[ISC_HIS_CFG_MODE_GB] = isc->gb_gain_ctrl->val;
+
+ if (ctrl->cluster[ISC_CTRL_R_OFF]->is_new)
+ ctrls->offset[ISC_HIS_CFG_MODE_R] = isc->r_off_ctrl->val;
+ if (ctrl->cluster[ISC_CTRL_B_OFF]->is_new)
+ ctrls->offset[ISC_HIS_CFG_MODE_B] = isc->b_off_ctrl->val;
+ if (ctrl->cluster[ISC_CTRL_GR_OFF]->is_new)
+ ctrls->offset[ISC_HIS_CFG_MODE_GR] = isc->gr_off_ctrl->val;
+ if (ctrl->cluster[ISC_CTRL_GB_OFF]->is_new)
+ ctrls->offset[ISC_HIS_CFG_MODE_GB] = isc->gb_off_ctrl->val;
+
+ isc_update_awb_ctrls(isc);
+
+ mutex_lock(&isc->awb_mutex);
+ if (vb2_is_streaming(&isc->vb2_vidq)) {
+ /*
+ * If we are streaming, we can update profile to
+ * have the new settings in place.
+ */
+ isc_update_profile(isc);
+ } else {
+ /*
+ * The auto cluster will activate automatically this
+ * control. This has to be deactivated when not
+ * streaming.
+ */
+ v4l2_ctrl_activate(isc->do_wb_ctrl, false);
+ }
+ mutex_unlock(&isc->awb_mutex);
+
+ /* if we have autowhitebalance on, start histogram procedure */
+ if (ctrls->awb == ISC_WB_AUTO &&
+ vb2_is_streaming(&isc->vb2_vidq) &&
+ ISC_IS_FORMAT_RAW(isc->config.sd_format->mbus_code))
+ isc_set_histogram(isc, true);
+
+ /*
+ * for one time whitebalance adjustment, check the button,
+ * if it's pressed, perform the one time operation.
+ */
+ if (ctrls->awb == ISC_WB_NONE &&
+ ctrl->cluster[ISC_CTRL_DO_WB]->is_new &&
+ !(ctrl->cluster[ISC_CTRL_DO_WB]->flags &
+ V4L2_CTRL_FLAG_INACTIVE)) {
+ ctrls->awb = ISC_WB_ONETIME;
+ isc_set_histogram(isc, true);
+ v4l2_dbg(1, debug, &isc->v4l2_dev,
+ "One time white-balance started.\n");
+ }
+ return 0;
+ }
+ return 0;
+}
+
+static int isc_g_volatile_awb_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct isc_device *isc = container_of(ctrl->handler,
+ struct isc_device, ctrls.handler);
+ struct isc_ctrls *ctrls = &isc->ctrls;
+
+ switch (ctrl->id) {
+ /* being a cluster, this id will be called for every control */
+ case V4L2_CID_AUTO_WHITE_BALANCE:
+ ctrl->cluster[ISC_CTRL_R_GAIN]->val =
+ ctrls->gain[ISC_HIS_CFG_MODE_R];
+ ctrl->cluster[ISC_CTRL_B_GAIN]->val =
+ ctrls->gain[ISC_HIS_CFG_MODE_B];
+ ctrl->cluster[ISC_CTRL_GR_GAIN]->val =
+ ctrls->gain[ISC_HIS_CFG_MODE_GR];
+ ctrl->cluster[ISC_CTRL_GB_GAIN]->val =
+ ctrls->gain[ISC_HIS_CFG_MODE_GB];
+
+ ctrl->cluster[ISC_CTRL_R_OFF]->val =
+ ctrls->offset[ISC_HIS_CFG_MODE_R];
+ ctrl->cluster[ISC_CTRL_B_OFF]->val =
+ ctrls->offset[ISC_HIS_CFG_MODE_B];
+ ctrl->cluster[ISC_CTRL_GR_OFF]->val =
+ ctrls->offset[ISC_HIS_CFG_MODE_GR];
+ ctrl->cluster[ISC_CTRL_GB_OFF]->val =
+ ctrls->offset[ISC_HIS_CFG_MODE_GB];
+ break;
+ }
+ return 0;
+}
+
+static const struct v4l2_ctrl_ops isc_awb_ops = {
+ .s_ctrl = isc_s_awb_ctrl,
+ .g_volatile_ctrl = isc_g_volatile_awb_ctrl,
+};
+
+#define ISC_CTRL_OFF(_name, _id, _name_str) \
+ static const struct v4l2_ctrl_config _name = { \
+ .ops = &isc_awb_ops, \
+ .id = _id, \
+ .name = _name_str, \
+ .type = V4L2_CTRL_TYPE_INTEGER, \
+ .flags = V4L2_CTRL_FLAG_SLIDER, \
+ .min = -4095, \
+ .max = 4095, \
+ .step = 1, \
+ .def = 0, \
+ }
+
+ISC_CTRL_OFF(isc_r_off_ctrl, ISC_CID_R_OFFSET, "Red Component Offset");
+ISC_CTRL_OFF(isc_b_off_ctrl, ISC_CID_B_OFFSET, "Blue Component Offset");
+ISC_CTRL_OFF(isc_gr_off_ctrl, ISC_CID_GR_OFFSET, "Green Red Component Offset");
+ISC_CTRL_OFF(isc_gb_off_ctrl, ISC_CID_GB_OFFSET, "Green Blue Component Offset");
+
+#define ISC_CTRL_GAIN(_name, _id, _name_str) \
+ static const struct v4l2_ctrl_config _name = { \
+ .ops = &isc_awb_ops, \
+ .id = _id, \
+ .name = _name_str, \
+ .type = V4L2_CTRL_TYPE_INTEGER, \
+ .flags = V4L2_CTRL_FLAG_SLIDER, \
+ .min = 0, \
+ .max = 8191, \
+ .step = 1, \
+ .def = 512, \
+ }
+
+ISC_CTRL_GAIN(isc_r_gain_ctrl, ISC_CID_R_GAIN, "Red Component Gain");
+ISC_CTRL_GAIN(isc_b_gain_ctrl, ISC_CID_B_GAIN, "Blue Component Gain");
+ISC_CTRL_GAIN(isc_gr_gain_ctrl, ISC_CID_GR_GAIN, "Green Red Component Gain");
+ISC_CTRL_GAIN(isc_gb_gain_ctrl, ISC_CID_GB_GAIN, "Green Blue Component Gain");
+
+static int isc_ctrl_init(struct isc_device *isc)
+{
+ const struct v4l2_ctrl_ops *ops = &isc_ctrl_ops;
+ struct isc_ctrls *ctrls = &isc->ctrls;
+ struct v4l2_ctrl_handler *hdl = &ctrls->handler;
+ int ret;
+
+ ctrls->hist_stat = HIST_INIT;
+ isc_reset_awb_ctrls(isc);
+
+ ret = v4l2_ctrl_handler_init(hdl, 13);
+ if (ret < 0)
+ return ret;
+
+ /* Initialize product specific controls. For example, contrast */
+ isc->config_ctrls(isc, ops);
+
+ ctrls->brightness = 0;
+
+ v4l2_ctrl_new_std(hdl, ops, V4L2_CID_BRIGHTNESS, -1024, 1023, 1, 0);
+ v4l2_ctrl_new_std(hdl, ops, V4L2_CID_GAMMA, 0, isc->gamma_max, 1,
+ isc->gamma_max);
+ isc->awb_ctrl = v4l2_ctrl_new_std(hdl, &isc_awb_ops,
+ V4L2_CID_AUTO_WHITE_BALANCE,
+ 0, 1, 1, 1);
+
+ /* do_white_balance is a button, so min,max,step,default are ignored */
+ isc->do_wb_ctrl = v4l2_ctrl_new_std(hdl, &isc_awb_ops,
+ V4L2_CID_DO_WHITE_BALANCE,
+ 0, 0, 0, 0);
+
+ if (!isc->do_wb_ctrl) {
+ ret = hdl->error;
+ v4l2_ctrl_handler_free(hdl);
+ return ret;
+ }
+
+ v4l2_ctrl_activate(isc->do_wb_ctrl, false);
+
+ isc->r_gain_ctrl = v4l2_ctrl_new_custom(hdl, &isc_r_gain_ctrl, NULL);
+ isc->b_gain_ctrl = v4l2_ctrl_new_custom(hdl, &isc_b_gain_ctrl, NULL);
+ isc->gr_gain_ctrl = v4l2_ctrl_new_custom(hdl, &isc_gr_gain_ctrl, NULL);
+ isc->gb_gain_ctrl = v4l2_ctrl_new_custom(hdl, &isc_gb_gain_ctrl, NULL);
+ isc->r_off_ctrl = v4l2_ctrl_new_custom(hdl, &isc_r_off_ctrl, NULL);
+ isc->b_off_ctrl = v4l2_ctrl_new_custom(hdl, &isc_b_off_ctrl, NULL);
+ isc->gr_off_ctrl = v4l2_ctrl_new_custom(hdl, &isc_gr_off_ctrl, NULL);
+ isc->gb_off_ctrl = v4l2_ctrl_new_custom(hdl, &isc_gb_off_ctrl, NULL);
+
+ /*
+ * The cluster is in auto mode with autowhitebalance enabled
+ * and manual mode otherwise.
+ */
+ v4l2_ctrl_auto_cluster(10, &isc->awb_ctrl, 0, true);
+
+ v4l2_ctrl_handler_setup(hdl);
+
+ return 0;
+}
+
+static int isc_async_bound(struct v4l2_async_notifier *notifier,
+ struct v4l2_subdev *subdev,
+ struct v4l2_async_connection *asd)
+{
+ struct isc_device *isc = container_of(notifier->v4l2_dev,
+ struct isc_device, v4l2_dev);
+ struct isc_subdev_entity *subdev_entity =
+ container_of(notifier, struct isc_subdev_entity, notifier);
+
+ if (video_is_registered(&isc->video_dev)) {
+ v4l2_err(&isc->v4l2_dev, "only supports one sub-device.\n");
+ return -EBUSY;
+ }
+
+ subdev_entity->sd = subdev;
+
+ return 0;
+}
+
+static void isc_async_unbind(struct v4l2_async_notifier *notifier,
+ struct v4l2_subdev *subdev,
+ struct v4l2_async_connection *asd)
+{
+ struct isc_device *isc = container_of(notifier->v4l2_dev,
+ struct isc_device, v4l2_dev);
+ mutex_destroy(&isc->awb_mutex);
+ cancel_work_sync(&isc->awb_work);
+ video_unregister_device(&isc->video_dev);
+ v4l2_ctrl_handler_free(&isc->ctrls.handler);
+}
+
+static struct isc_format *find_format_by_code(struct isc_device *isc,
+ unsigned int code, int *index)
+{
+ struct isc_format *fmt = &isc->formats_list[0];
+ unsigned int i;
+
+ for (i = 0; i < isc->formats_list_size; i++) {
+ if (fmt->mbus_code == code) {
+ *index = i;
+ return fmt;
+ }
+
+ fmt++;
+ }
+
+ return NULL;
+}
+
+static int isc_formats_init(struct isc_device *isc)
+{
+ struct isc_format *fmt;
+ struct v4l2_subdev *subdev = isc->current_subdev->sd;
+ unsigned int num_fmts, i, j;
+ u32 list_size = isc->formats_list_size;
+ struct v4l2_subdev_mbus_code_enum mbus_code = {
+ .which = V4L2_SUBDEV_FORMAT_ACTIVE,
+ };
+
+ num_fmts = 0;
+ while (!v4l2_subdev_call(subdev, pad, enum_mbus_code,
+ NULL, &mbus_code)) {
+ mbus_code.index++;
+
+ fmt = find_format_by_code(isc, mbus_code.code, &i);
+ if (!fmt) {
+ v4l2_warn(&isc->v4l2_dev, "Mbus code %x not supported\n",
+ mbus_code.code);
+ continue;
+ }
+
+ fmt->sd_support = true;
+ num_fmts++;
+ }
+
+ if (!num_fmts)
+ return -ENXIO;
+
+ isc->num_user_formats = num_fmts;
+ isc->user_formats = devm_kcalloc(isc->dev,
+ num_fmts, sizeof(*isc->user_formats),
+ GFP_KERNEL);
+ if (!isc->user_formats)
+ return -ENOMEM;
+
+ fmt = &isc->formats_list[0];
+ for (i = 0, j = 0; i < list_size; i++) {
+ if (fmt->sd_support)
+ isc->user_formats[j++] = fmt;
+ fmt++;
+ }
+
+ return 0;
+}
+
+static int isc_set_default_fmt(struct isc_device *isc)
+{
+ struct v4l2_format f = {
+ .type = V4L2_BUF_TYPE_VIDEO_CAPTURE,
+ .fmt.pix = {
+ .width = VGA_WIDTH,
+ .height = VGA_HEIGHT,
+ .field = V4L2_FIELD_NONE,
+ .pixelformat = isc->user_formats[0]->fourcc,
+ },
+ };
+ int ret;
+
+ ret = isc_try_fmt(isc, &f, NULL);
+ if (ret)
+ return ret;
+
+ isc->fmt = f;
+ return 0;
+}
+
+static int isc_async_complete(struct v4l2_async_notifier *notifier)
+{
+ struct isc_device *isc = container_of(notifier->v4l2_dev,
+ struct isc_device, v4l2_dev);
+ struct video_device *vdev = &isc->video_dev;
+ struct vb2_queue *q = &isc->vb2_vidq;
+ int ret = 0;
+
+ INIT_WORK(&isc->awb_work, isc_awb_work);
+
+ ret = v4l2_device_register_subdev_nodes(&isc->v4l2_dev);
+ if (ret < 0) {
+ v4l2_err(&isc->v4l2_dev, "Failed to register subdev nodes\n");
+ return ret;
+ }
+
+ isc->current_subdev = container_of(notifier,
+ struct isc_subdev_entity, notifier);
+ mutex_init(&isc->lock);
+ mutex_init(&isc->awb_mutex);
+
+ init_completion(&isc->comp);
+
+ /* Initialize videobuf2 queue */
+ q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ q->io_modes = VB2_MMAP | VB2_DMABUF | VB2_READ;
+ q->drv_priv = isc;
+ q->buf_struct_size = sizeof(struct isc_buffer);
+ q->ops = &isc_vb2_ops;
+ q->mem_ops = &vb2_dma_contig_memops;
+ q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+ q->lock = &isc->lock;
+ q->min_queued_buffers = 1;
+ q->dev = isc->dev;
+
+ ret = vb2_queue_init(q);
+ if (ret < 0) {
+ v4l2_err(&isc->v4l2_dev,
+ "vb2_queue_init() failed: %d\n", ret);
+ goto isc_async_complete_err;
+ }
+
+ /* Init video dma queues */
+ INIT_LIST_HEAD(&isc->dma_queue);
+ spin_lock_init(&isc->dma_queue_lock);
+ spin_lock_init(&isc->awb_lock);
+
+ ret = isc_formats_init(isc);
+ if (ret < 0) {
+ v4l2_err(&isc->v4l2_dev,
+ "Init format failed: %d\n", ret);
+ goto isc_async_complete_err;
+ }
+
+ ret = isc_set_default_fmt(isc);
+ if (ret) {
+ v4l2_err(&isc->v4l2_dev, "Could not set default format\n");
+ goto isc_async_complete_err;
+ }
+
+ ret = isc_ctrl_init(isc);
+ if (ret) {
+ v4l2_err(&isc->v4l2_dev, "Init isc ctrols failed: %d\n", ret);
+ goto isc_async_complete_err;
+ }
+
+ /* Register video device */
+ strscpy(vdev->name, KBUILD_MODNAME, sizeof(vdev->name));
+ vdev->release = video_device_release_empty;
+ vdev->fops = &isc_fops;
+ vdev->ioctl_ops = &isc_ioctl_ops;
+ vdev->v4l2_dev = &isc->v4l2_dev;
+ vdev->vfl_dir = VFL_DIR_RX;
+ vdev->queue = q;
+ vdev->lock = &isc->lock;
+ vdev->ctrl_handler = &isc->ctrls.handler;
+ vdev->device_caps = V4L2_CAP_STREAMING | V4L2_CAP_VIDEO_CAPTURE;
+ video_set_drvdata(vdev, isc);
+
+ ret = video_register_device(vdev, VFL_TYPE_VIDEO, -1);
+ if (ret < 0) {
+ v4l2_err(&isc->v4l2_dev,
+ "video_register_device failed: %d\n", ret);
+ goto isc_async_complete_err;
+ }
+
+ return 0;
+
+isc_async_complete_err:
+ mutex_destroy(&isc->awb_mutex);
+ mutex_destroy(&isc->lock);
+ return ret;
+}
+
+const struct v4l2_async_notifier_operations atmel_isc_async_ops = {
+ .bound = isc_async_bound,
+ .unbind = isc_async_unbind,
+ .complete = isc_async_complete,
+};
+EXPORT_SYMBOL_GPL(atmel_isc_async_ops);
+
+void atmel_isc_subdev_cleanup(struct isc_device *isc)
+{
+ struct isc_subdev_entity *subdev_entity;
+
+ list_for_each_entry(subdev_entity, &isc->subdev_entities, list) {
+ v4l2_async_nf_unregister(&subdev_entity->notifier);
+ v4l2_async_nf_cleanup(&subdev_entity->notifier);
+ }
+
+ INIT_LIST_HEAD(&isc->subdev_entities);
+}
+EXPORT_SYMBOL_GPL(atmel_isc_subdev_cleanup);
+
+int atmel_isc_pipeline_init(struct isc_device *isc)
+{
+ struct device *dev = isc->dev;
+ struct regmap *regmap = isc->regmap;
+ struct regmap_field *regs;
+ unsigned int i;
+
+ /*
+ * DPCEN-->GDCEN-->BLCEN-->WB-->CFA-->CC-->
+ * GAM-->VHXS-->CSC-->CBC-->SUB422-->SUB420
+ */
+ const struct reg_field regfields[ISC_PIPE_LINE_NODE_NUM] = {
+ REG_FIELD(ISC_DPC_CTRL, 0, 0),
+ REG_FIELD(ISC_DPC_CTRL, 1, 1),
+ REG_FIELD(ISC_DPC_CTRL, 2, 2),
+ REG_FIELD(ISC_WB_CTRL, 0, 0),
+ REG_FIELD(ISC_CFA_CTRL, 0, 0),
+ REG_FIELD(ISC_CC_CTRL, 0, 0),
+ REG_FIELD(ISC_GAM_CTRL, 0, 0),
+ REG_FIELD(ISC_GAM_CTRL, 1, 1),
+ REG_FIELD(ISC_GAM_CTRL, 2, 2),
+ REG_FIELD(ISC_GAM_CTRL, 3, 3),
+ REG_FIELD(ISC_VHXS_CTRL, 0, 0),
+ REG_FIELD(ISC_CSC_CTRL + isc->offsets.csc, 0, 0),
+ REG_FIELD(ISC_CBC_CTRL + isc->offsets.cbc, 0, 0),
+ REG_FIELD(ISC_SUB422_CTRL + isc->offsets.sub422, 0, 0),
+ REG_FIELD(ISC_SUB420_CTRL + isc->offsets.sub420, 0, 0),
+ };
+
+ for (i = 0; i < ISC_PIPE_LINE_NODE_NUM; i++) {
+ regs = devm_regmap_field_alloc(dev, regmap, regfields[i]);
+ if (IS_ERR(regs))
+ return PTR_ERR(regs);
+
+ isc->pipeline[i] = regs;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(atmel_isc_pipeline_init);
+
+/* regmap configuration */
+#define ATMEL_ISC_REG_MAX 0xd5c
+const struct regmap_config atmel_isc_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = ATMEL_ISC_REG_MAX,
+};
+EXPORT_SYMBOL_GPL(atmel_isc_regmap_config);
+
+MODULE_AUTHOR("Songjun Wu");
+MODULE_AUTHOR("Eugen Hristev");
+MODULE_DESCRIPTION("Atmel ISC common code base");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/media/deprecated/atmel/atmel-isc-clk.c b/drivers/staging/media/deprecated/atmel/atmel-isc-clk.c
new file mode 100644
index 000000000000..d442b5f4c931
--- /dev/null
+++ b/drivers/staging/media/deprecated/atmel/atmel-isc-clk.c
@@ -0,0 +1,311 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Microchip Image Sensor Controller (ISC) common clock driver setup
+ *
+ * Copyright (C) 2016 Microchip Technology, Inc.
+ *
+ * Author: Songjun Wu
+ * Author: Eugen Hristev <eugen.hristev@microchip.com>
+ *
+ */
+#include <linux/clk.h>
+#include <linux/clkdev.h>
+#include <linux/clk-provider.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+
+#include "atmel-isc-regs.h"
+#include "atmel-isc.h"
+
+static int isc_wait_clk_stable(struct clk_hw *hw)
+{
+ struct isc_clk *isc_clk = to_isc_clk(hw);
+ struct regmap *regmap = isc_clk->regmap;
+ unsigned long timeout = jiffies + usecs_to_jiffies(1000);
+ unsigned int status;
+
+ while (time_before(jiffies, timeout)) {
+ regmap_read(regmap, ISC_CLKSR, &status);
+ if (!(status & ISC_CLKSR_SIP))
+ return 0;
+
+ usleep_range(10, 250);
+ }
+
+ return -ETIMEDOUT;
+}
+
+static int isc_clk_prepare(struct clk_hw *hw)
+{
+ struct isc_clk *isc_clk = to_isc_clk(hw);
+ int ret;
+
+ ret = pm_runtime_resume_and_get(isc_clk->dev);
+ if (ret < 0)
+ return ret;
+
+ return isc_wait_clk_stable(hw);
+}
+
+static void isc_clk_unprepare(struct clk_hw *hw)
+{
+ struct isc_clk *isc_clk = to_isc_clk(hw);
+
+ isc_wait_clk_stable(hw);
+
+ pm_runtime_put_sync(isc_clk->dev);
+}
+
+static int isc_clk_enable(struct clk_hw *hw)
+{
+ struct isc_clk *isc_clk = to_isc_clk(hw);
+ u32 id = isc_clk->id;
+ struct regmap *regmap = isc_clk->regmap;
+ unsigned long flags;
+ unsigned int status;
+
+ dev_dbg(isc_clk->dev, "ISC CLK: %s, id = %d, div = %d, parent id = %d\n",
+ __func__, id, isc_clk->div, isc_clk->parent_id);
+
+ spin_lock_irqsave(&isc_clk->lock, flags);
+ regmap_update_bits(regmap, ISC_CLKCFG,
+ ISC_CLKCFG_DIV_MASK(id) | ISC_CLKCFG_SEL_MASK(id),
+ (isc_clk->div << ISC_CLKCFG_DIV_SHIFT(id)) |
+ (isc_clk->parent_id << ISC_CLKCFG_SEL_SHIFT(id)));
+
+ regmap_write(regmap, ISC_CLKEN, ISC_CLK(id));
+ spin_unlock_irqrestore(&isc_clk->lock, flags);
+
+ regmap_read(regmap, ISC_CLKSR, &status);
+ if (status & ISC_CLK(id))
+ return 0;
+ else
+ return -EINVAL;
+}
+
+static void isc_clk_disable(struct clk_hw *hw)
+{
+ struct isc_clk *isc_clk = to_isc_clk(hw);
+ u32 id = isc_clk->id;
+ unsigned long flags;
+
+ spin_lock_irqsave(&isc_clk->lock, flags);
+ regmap_write(isc_clk->regmap, ISC_CLKDIS, ISC_CLK(id));
+ spin_unlock_irqrestore(&isc_clk->lock, flags);
+}
+
+static int isc_clk_is_enabled(struct clk_hw *hw)
+{
+ struct isc_clk *isc_clk = to_isc_clk(hw);
+ u32 status;
+ int ret;
+
+ ret = pm_runtime_resume_and_get(isc_clk->dev);
+ if (ret < 0)
+ return 0;
+
+ regmap_read(isc_clk->regmap, ISC_CLKSR, &status);
+
+ pm_runtime_put_sync(isc_clk->dev);
+
+ return status & ISC_CLK(isc_clk->id) ? 1 : 0;
+}
+
+static unsigned long
+isc_clk_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
+{
+ struct isc_clk *isc_clk = to_isc_clk(hw);
+
+ return DIV_ROUND_CLOSEST(parent_rate, isc_clk->div + 1);
+}
+
+static int isc_clk_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ struct isc_clk *isc_clk = to_isc_clk(hw);
+ long best_rate = -EINVAL;
+ int best_diff = -1;
+ unsigned int i, div;
+
+ for (i = 0; i < clk_hw_get_num_parents(hw); i++) {
+ struct clk_hw *parent;
+ unsigned long parent_rate;
+
+ parent = clk_hw_get_parent_by_index(hw, i);
+ if (!parent)
+ continue;
+
+ parent_rate = clk_hw_get_rate(parent);
+ if (!parent_rate)
+ continue;
+
+ for (div = 1; div < ISC_CLK_MAX_DIV + 2; div++) {
+ unsigned long rate;
+ int diff;
+
+ rate = DIV_ROUND_CLOSEST(parent_rate, div);
+ diff = abs(req->rate - rate);
+
+ if (best_diff < 0 || best_diff > diff) {
+ best_rate = rate;
+ best_diff = diff;
+ req->best_parent_rate = parent_rate;
+ req->best_parent_hw = parent;
+ }
+
+ if (!best_diff || rate < req->rate)
+ break;
+ }
+
+ if (!best_diff)
+ break;
+ }
+
+ dev_dbg(isc_clk->dev,
+ "ISC CLK: %s, best_rate = %ld, parent clk: %s @ %ld\n",
+ __func__, best_rate,
+ __clk_get_name((req->best_parent_hw)->clk),
+ req->best_parent_rate);
+
+ if (best_rate < 0)
+ return best_rate;
+
+ req->rate = best_rate;
+
+ return 0;
+}
+
+static int isc_clk_set_parent(struct clk_hw *hw, u8 index)
+{
+ struct isc_clk *isc_clk = to_isc_clk(hw);
+
+ if (index >= clk_hw_get_num_parents(hw))
+ return -EINVAL;
+
+ isc_clk->parent_id = index;
+
+ return 0;
+}
+
+static u8 isc_clk_get_parent(struct clk_hw *hw)
+{
+ struct isc_clk *isc_clk = to_isc_clk(hw);
+
+ return isc_clk->parent_id;
+}
+
+static int isc_clk_set_rate(struct clk_hw *hw,
+ unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct isc_clk *isc_clk = to_isc_clk(hw);
+ u32 div;
+
+ if (!rate)
+ return -EINVAL;
+
+ div = DIV_ROUND_CLOSEST(parent_rate, rate);
+ if (div > (ISC_CLK_MAX_DIV + 1) || !div)
+ return -EINVAL;
+
+ isc_clk->div = div - 1;
+
+ return 0;
+}
+
+static const struct clk_ops isc_clk_ops = {
+ .prepare = isc_clk_prepare,
+ .unprepare = isc_clk_unprepare,
+ .enable = isc_clk_enable,
+ .disable = isc_clk_disable,
+ .is_enabled = isc_clk_is_enabled,
+ .recalc_rate = isc_clk_recalc_rate,
+ .determine_rate = isc_clk_determine_rate,
+ .set_parent = isc_clk_set_parent,
+ .get_parent = isc_clk_get_parent,
+ .set_rate = isc_clk_set_rate,
+};
+
+static int isc_clk_register(struct isc_device *isc, unsigned int id)
+{
+ struct regmap *regmap = isc->regmap;
+ struct device_node *np = isc->dev->of_node;
+ struct isc_clk *isc_clk;
+ struct clk_init_data init;
+ const char *clk_name = np->name;
+ const char *parent_names[3];
+ int num_parents;
+
+ if (id == ISC_ISPCK && !isc->ispck_required)
+ return 0;
+
+ num_parents = of_clk_get_parent_count(np);
+ if (num_parents < 1 || num_parents > 3)
+ return -EINVAL;
+
+ if (num_parents > 2 && id == ISC_ISPCK)
+ num_parents = 2;
+
+ of_clk_parent_fill(np, parent_names, num_parents);
+
+ if (id == ISC_MCK)
+ of_property_read_string(np, "clock-output-names", &clk_name);
+ else
+ clk_name = "isc-ispck";
+
+ init.parent_names = parent_names;
+ init.num_parents = num_parents;
+ init.name = clk_name;
+ init.ops = &isc_clk_ops;
+ init.flags = CLK_SET_RATE_GATE | CLK_SET_PARENT_GATE;
+
+ isc_clk = &isc->isc_clks[id];
+ isc_clk->hw.init = &init;
+ isc_clk->regmap = regmap;
+ isc_clk->id = id;
+ isc_clk->dev = isc->dev;
+ spin_lock_init(&isc_clk->lock);
+
+ isc_clk->clk = clk_register(isc->dev, &isc_clk->hw);
+ if (IS_ERR(isc_clk->clk)) {
+ dev_err(isc->dev, "%s: clock register fail\n", clk_name);
+ return PTR_ERR(isc_clk->clk);
+ } else if (id == ISC_MCK) {
+ of_clk_add_provider(np, of_clk_src_simple_get, isc_clk->clk);
+ }
+
+ return 0;
+}
+
+int atmel_isc_clk_init(struct isc_device *isc)
+{
+ unsigned int i;
+ int ret;
+
+ for (i = 0; i < ARRAY_SIZE(isc->isc_clks); i++)
+ isc->isc_clks[i].clk = ERR_PTR(-EINVAL);
+
+ for (i = 0; i < ARRAY_SIZE(isc->isc_clks); i++) {
+ ret = isc_clk_register(isc, i);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(atmel_isc_clk_init);
+
+void atmel_isc_clk_cleanup(struct isc_device *isc)
+{
+ unsigned int i;
+
+ of_clk_del_provider(isc->dev->of_node);
+
+ for (i = 0; i < ARRAY_SIZE(isc->isc_clks); i++) {
+ struct isc_clk *isc_clk = &isc->isc_clks[i];
+
+ if (!IS_ERR(isc_clk->clk))
+ clk_unregister(isc_clk->clk);
+ }
+}
+EXPORT_SYMBOL_GPL(atmel_isc_clk_cleanup);
diff --git a/drivers/staging/media/deprecated/atmel/atmel-isc-regs.h b/drivers/staging/media/deprecated/atmel/atmel-isc-regs.h
new file mode 100644
index 000000000000..d06b72228d4f
--- /dev/null
+++ b/drivers/staging/media/deprecated/atmel/atmel-isc-regs.h
@@ -0,0 +1,413 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ATMEL_ISC_REGS_H
+#define __ATMEL_ISC_REGS_H
+
+#include <linux/bitops.h>
+
+/* ISC Control Enable Register 0 */
+#define ISC_CTRLEN 0x00000000
+
+/* ISC Control Disable Register 0 */
+#define ISC_CTRLDIS 0x00000004
+
+/* ISC Control Status Register 0 */
+#define ISC_CTRLSR 0x00000008
+
+#define ISC_CTRL_CAPTURE BIT(0)
+#define ISC_CTRL_UPPRO BIT(1)
+#define ISC_CTRL_HISREQ BIT(2)
+#define ISC_CTRL_HISCLR BIT(3)
+
+/* ISC Parallel Front End Configuration 0 Register */
+#define ISC_PFE_CFG0 0x0000000c
+
+#define ISC_PFE_CFG0_HPOL_LOW BIT(0)
+#define ISC_PFE_CFG0_VPOL_LOW BIT(1)
+#define ISC_PFE_CFG0_PPOL_LOW BIT(2)
+#define ISC_PFE_CFG0_CCIR656 BIT(9)
+#define ISC_PFE_CFG0_CCIR_CRC BIT(10)
+#define ISC_PFE_CFG0_MIPI BIT(14)
+
+#define ISC_PFE_CFG0_MODE_PROGRESSIVE (0x0 << 4)
+#define ISC_PFE_CFG0_MODE_MASK GENMASK(6, 4)
+
+#define ISC_PFE_CFG0_BPS_EIGHT (0x4 << 28)
+#define ISC_PFG_CFG0_BPS_NINE (0x3 << 28)
+#define ISC_PFG_CFG0_BPS_TEN (0x2 << 28)
+#define ISC_PFG_CFG0_BPS_ELEVEN (0x1 << 28)
+#define ISC_PFG_CFG0_BPS_TWELVE (0x0 << 28)
+#define ISC_PFE_CFG0_BPS_MASK GENMASK(30, 28)
+
+#define ISC_PFE_CFG0_COLEN BIT(12)
+#define ISC_PFE_CFG0_ROWEN BIT(13)
+
+/* ISC Parallel Front End Configuration 1 Register */
+#define ISC_PFE_CFG1 0x00000010
+
+#define ISC_PFE_CFG1_COLMIN(v) ((v))
+#define ISC_PFE_CFG1_COLMIN_MASK GENMASK(15, 0)
+#define ISC_PFE_CFG1_COLMAX(v) ((v) << 16)
+#define ISC_PFE_CFG1_COLMAX_MASK GENMASK(31, 16)
+
+/* ISC Parallel Front End Configuration 2 Register */
+#define ISC_PFE_CFG2 0x00000014
+
+#define ISC_PFE_CFG2_ROWMIN(v) ((v))
+#define ISC_PFE_CFG2_ROWMIN_MASK GENMASK(15, 0)
+#define ISC_PFE_CFG2_ROWMAX(v) ((v) << 16)
+#define ISC_PFE_CFG2_ROWMAX_MASK GENMASK(31, 16)
+
+/* ISC Clock Enable Register */
+#define ISC_CLKEN 0x00000018
+
+/* ISC Clock Disable Register */
+#define ISC_CLKDIS 0x0000001c
+
+/* ISC Clock Status Register */
+#define ISC_CLKSR 0x00000020
+#define ISC_CLKSR_SIP BIT(31)
+
+#define ISC_CLK(n) BIT(n)
+
+/* ISC Clock Configuration Register */
+#define ISC_CLKCFG 0x00000024
+#define ISC_CLKCFG_DIV_SHIFT(n) ((n)*16)
+#define ISC_CLKCFG_DIV_MASK(n) GENMASK(((n)*16 + 7), (n)*16)
+#define ISC_CLKCFG_SEL_SHIFT(n) ((n)*16 + 8)
+#define ISC_CLKCFG_SEL_MASK(n) GENMASK(((n)*17 + 8), ((n)*16 + 8))
+
+/* ISC Interrupt Enable Register */
+#define ISC_INTEN 0x00000028
+
+/* ISC Interrupt Disable Register */
+#define ISC_INTDIS 0x0000002c
+
+/* ISC Interrupt Mask Register */
+#define ISC_INTMASK 0x00000030
+
+/* ISC Interrupt Status Register */
+#define ISC_INTSR 0x00000034
+
+#define ISC_INT_DDONE BIT(8)
+#define ISC_INT_HISDONE BIT(12)
+
+/* ISC DPC Control Register */
+#define ISC_DPC_CTRL 0x40
+
+#define ISC_DPC_CTRL_DPCEN BIT(0)
+#define ISC_DPC_CTRL_GDCEN BIT(1)
+#define ISC_DPC_CTRL_BLCEN BIT(2)
+
+/* ISC DPC Config Register */
+#define ISC_DPC_CFG 0x44
+
+#define ISC_DPC_CFG_BAYSEL_SHIFT 0
+
+#define ISC_DPC_CFG_EITPOL BIT(4)
+
+#define ISC_DPC_CFG_TA_ENABLE BIT(14)
+#define ISC_DPC_CFG_TC_ENABLE BIT(13)
+#define ISC_DPC_CFG_TM_ENABLE BIT(12)
+
+#define ISC_DPC_CFG_RE_MODE BIT(17)
+
+#define ISC_DPC_CFG_GDCCLP_SHIFT 20
+#define ISC_DPC_CFG_GDCCLP_MASK GENMASK(22, 20)
+
+#define ISC_DPC_CFG_BLOFF_SHIFT 24
+#define ISC_DPC_CFG_BLOFF_MASK GENMASK(31, 24)
+
+#define ISC_DPC_CFG_BAYCFG_SHIFT 0
+#define ISC_DPC_CFG_BAYCFG_MASK GENMASK(1, 0)
+/* ISC DPC Threshold Median Register */
+#define ISC_DPC_THRESHM 0x48
+
+/* ISC DPC Threshold Closest Register */
+#define ISC_DPC_THRESHC 0x4C
+
+/* ISC DPC Threshold Average Register */
+#define ISC_DPC_THRESHA 0x50
+
+/* ISC DPC STatus Register */
+#define ISC_DPC_SR 0x54
+
+/* ISC White Balance Control Register */
+#define ISC_WB_CTRL 0x00000058
+
+/* ISC White Balance Configuration Register */
+#define ISC_WB_CFG 0x0000005c
+
+/* ISC White Balance Offset for R, GR Register */
+#define ISC_WB_O_RGR 0x00000060
+
+/* ISC White Balance Offset for B, GB Register */
+#define ISC_WB_O_BGB 0x00000064
+
+/* ISC White Balance Gain for R, GR Register */
+#define ISC_WB_G_RGR 0x00000068
+
+/* ISC White Balance Gain for B, GB Register */
+#define ISC_WB_G_BGB 0x0000006c
+
+/* ISC Color Filter Array Control Register */
+#define ISC_CFA_CTRL 0x00000070
+
+/* ISC Color Filter Array Configuration Register */
+#define ISC_CFA_CFG 0x00000074
+#define ISC_CFA_CFG_EITPOL BIT(4)
+
+#define ISC_BAY_CFG_GRGR 0x0
+#define ISC_BAY_CFG_RGRG 0x1
+#define ISC_BAY_CFG_GBGB 0x2
+#define ISC_BAY_CFG_BGBG 0x3
+
+/* ISC Color Correction Control Register */
+#define ISC_CC_CTRL 0x00000078
+
+/* ISC Color Correction RR RG Register */
+#define ISC_CC_RR_RG 0x0000007c
+
+/* ISC Color Correction RB OR Register */
+#define ISC_CC_RB_OR 0x00000080
+
+/* ISC Color Correction GR GG Register */
+#define ISC_CC_GR_GG 0x00000084
+
+/* ISC Color Correction GB OG Register */
+#define ISC_CC_GB_OG 0x00000088
+
+/* ISC Color Correction BR BG Register */
+#define ISC_CC_BR_BG 0x0000008c
+
+/* ISC Color Correction BB OB Register */
+#define ISC_CC_BB_OB 0x00000090
+
+/* ISC Gamma Correction Control Register */
+#define ISC_GAM_CTRL 0x00000094
+
+#define ISC_GAM_CTRL_BIPART BIT(4)
+
+/* ISC_Gamma Correction Blue Entry Register */
+#define ISC_GAM_BENTRY 0x00000098
+
+/* ISC_Gamma Correction Green Entry Register */
+#define ISC_GAM_GENTRY 0x00000198
+
+/* ISC_Gamma Correction Green Entry Register */
+#define ISC_GAM_RENTRY 0x00000298
+
+/* ISC VHXS Control Register */
+#define ISC_VHXS_CTRL 0x398
+
+/* ISC VHXS Source Size Register */
+#define ISC_VHXS_SS 0x39C
+
+/* ISC VHXS Destination Size Register */
+#define ISC_VHXS_DS 0x3A0
+
+/* ISC Vertical Factor Register */
+#define ISC_VXS_FACT 0x3a4
+
+/* ISC Horizontal Factor Register */
+#define ISC_HXS_FACT 0x3a8
+
+/* ISC Vertical Config Register */
+#define ISC_VXS_CFG 0x3ac
+
+/* ISC Horizontal Config Register */
+#define ISC_HXS_CFG 0x3b0
+
+/* ISC Vertical Tap Register */
+#define ISC_VXS_TAP 0x3b4
+
+/* ISC Horizontal Tap Register */
+#define ISC_HXS_TAP 0x434
+
+/* Offset for CSC register specific to sama5d2 product */
+#define ISC_SAMA5D2_CSC_OFFSET 0
+/* Offset for CSC register specific to sama7g5 product */
+#define ISC_SAMA7G5_CSC_OFFSET 0x11c
+
+/* Color Space Conversion Control Register */
+#define ISC_CSC_CTRL 0x00000398
+
+/* Color Space Conversion YR YG Register */
+#define ISC_CSC_YR_YG 0x0000039c
+
+/* Color Space Conversion YB OY Register */
+#define ISC_CSC_YB_OY 0x000003a0
+
+/* Color Space Conversion CBR CBG Register */
+#define ISC_CSC_CBR_CBG 0x000003a4
+
+/* Color Space Conversion CBB OCB Register */
+#define ISC_CSC_CBB_OCB 0x000003a8
+
+/* Color Space Conversion CRR CRG Register */
+#define ISC_CSC_CRR_CRG 0x000003ac
+
+/* Color Space Conversion CRB OCR Register */
+#define ISC_CSC_CRB_OCR 0x000003b0
+
+/* Offset for CBC register specific to sama5d2 product */
+#define ISC_SAMA5D2_CBC_OFFSET 0
+/* Offset for CBC register specific to sama7g5 product */
+#define ISC_SAMA7G5_CBC_OFFSET 0x11c
+
+/* Contrast And Brightness Control Register */
+#define ISC_CBC_CTRL 0x000003b4
+
+/* Contrast And Brightness Configuration Register */
+#define ISC_CBC_CFG 0x000003b8
+
+/* Brightness Register */
+#define ISC_CBC_BRIGHT 0x000003bc
+#define ISC_CBC_BRIGHT_MASK GENMASK(10, 0)
+
+/* Contrast Register */
+#define ISC_CBC_CONTRAST 0x000003c0
+#define ISC_CBC_CONTRAST_MASK GENMASK(11, 0)
+
+/* Hue Register */
+#define ISC_CBCHS_HUE 0x4e0
+/* Saturation Register */
+#define ISC_CBCHS_SAT 0x4e4
+
+/* Offset for SUB422 register specific to sama5d2 product */
+#define ISC_SAMA5D2_SUB422_OFFSET 0
+/* Offset for SUB422 register specific to sama7g5 product */
+#define ISC_SAMA7G5_SUB422_OFFSET 0x124
+
+/* Subsampling 4:4:4 to 4:2:2 Control Register */
+#define ISC_SUB422_CTRL 0x000003c4
+
+/* Offset for SUB420 register specific to sama5d2 product */
+#define ISC_SAMA5D2_SUB420_OFFSET 0
+/* Offset for SUB420 register specific to sama7g5 product */
+#define ISC_SAMA7G5_SUB420_OFFSET 0x124
+/* Subsampling 4:2:2 to 4:2:0 Control Register */
+#define ISC_SUB420_CTRL 0x000003cc
+
+/* Offset for RLP register specific to sama5d2 product */
+#define ISC_SAMA5D2_RLP_OFFSET 0
+/* Offset for RLP register specific to sama7g5 product */
+#define ISC_SAMA7G5_RLP_OFFSET 0x124
+/* Rounding, Limiting and Packing Configuration Register */
+#define ISC_RLP_CFG 0x000003d0
+
+#define ISC_RLP_CFG_MODE_DAT8 0x0
+#define ISC_RLP_CFG_MODE_DAT9 0x1
+#define ISC_RLP_CFG_MODE_DAT10 0x2
+#define ISC_RLP_CFG_MODE_DAT11 0x3
+#define ISC_RLP_CFG_MODE_DAT12 0x4
+#define ISC_RLP_CFG_MODE_DATY8 0x5
+#define ISC_RLP_CFG_MODE_DATY10 0x6
+#define ISC_RLP_CFG_MODE_ARGB444 0x7
+#define ISC_RLP_CFG_MODE_ARGB555 0x8
+#define ISC_RLP_CFG_MODE_RGB565 0x9
+#define ISC_RLP_CFG_MODE_ARGB32 0xa
+#define ISC_RLP_CFG_MODE_YYCC 0xb
+#define ISC_RLP_CFG_MODE_YYCC_LIMITED 0xc
+#define ISC_RLP_CFG_MODE_YCYC 0xd
+#define ISC_RLP_CFG_MODE_MASK GENMASK(3, 0)
+
+#define ISC_RLP_CFG_LSH BIT(5)
+
+#define ISC_RLP_CFG_YMODE_YUYV (3 << 6)
+#define ISC_RLP_CFG_YMODE_YVYU (2 << 6)
+#define ISC_RLP_CFG_YMODE_VYUY (0 << 6)
+#define ISC_RLP_CFG_YMODE_UYVY (1 << 6)
+
+#define ISC_RLP_CFG_YMODE_MASK GENMASK(7, 6)
+
+/* Offset for HIS register specific to sama5d2 product */
+#define ISC_SAMA5D2_HIS_OFFSET 0
+/* Offset for HIS register specific to sama7g5 product */
+#define ISC_SAMA7G5_HIS_OFFSET 0x124
+/* Histogram Control Register */
+#define ISC_HIS_CTRL 0x000003d4
+
+#define ISC_HIS_CTRL_EN BIT(0)
+#define ISC_HIS_CTRL_DIS 0x0
+
+/* Histogram Configuration Register */
+#define ISC_HIS_CFG 0x000003d8
+
+#define ISC_HIS_CFG_MODE_GR 0x0
+#define ISC_HIS_CFG_MODE_R 0x1
+#define ISC_HIS_CFG_MODE_GB 0x2
+#define ISC_HIS_CFG_MODE_B 0x3
+#define ISC_HIS_CFG_MODE_Y 0x4
+#define ISC_HIS_CFG_MODE_RAW 0x5
+#define ISC_HIS_CFG_MODE_YCCIR656 0x6
+
+#define ISC_HIS_CFG_BAYSEL_SHIFT 4
+
+#define ISC_HIS_CFG_RAR BIT(8)
+
+/* Offset for DMA register specific to sama5d2 product */
+#define ISC_SAMA5D2_DMA_OFFSET 0
+/* Offset for DMA register specific to sama7g5 product */
+#define ISC_SAMA7G5_DMA_OFFSET 0x13c
+
+/* DMA Configuration Register */
+#define ISC_DCFG 0x000003e0
+#define ISC_DCFG_IMODE_PACKED8 0x0
+#define ISC_DCFG_IMODE_PACKED16 0x1
+#define ISC_DCFG_IMODE_PACKED32 0x2
+#define ISC_DCFG_IMODE_YC422SP 0x3
+#define ISC_DCFG_IMODE_YC422P 0x4
+#define ISC_DCFG_IMODE_YC420SP 0x5
+#define ISC_DCFG_IMODE_YC420P 0x6
+#define ISC_DCFG_IMODE_MASK GENMASK(2, 0)
+
+#define ISC_DCFG_YMBSIZE_SINGLE (0x0 << 4)
+#define ISC_DCFG_YMBSIZE_BEATS4 (0x1 << 4)
+#define ISC_DCFG_YMBSIZE_BEATS8 (0x2 << 4)
+#define ISC_DCFG_YMBSIZE_BEATS16 (0x3 << 4)
+#define ISC_DCFG_YMBSIZE_BEATS32 (0x4 << 4)
+#define ISC_DCFG_YMBSIZE_MASK GENMASK(6, 4)
+
+#define ISC_DCFG_CMBSIZE_SINGLE (0x0 << 8)
+#define ISC_DCFG_CMBSIZE_BEATS4 (0x1 << 8)
+#define ISC_DCFG_CMBSIZE_BEATS8 (0x2 << 8)
+#define ISC_DCFG_CMBSIZE_BEATS16 (0x3 << 8)
+#define ISC_DCFG_CMBSIZE_BEATS32 (0x4 << 8)
+#define ISC_DCFG_CMBSIZE_MASK GENMASK(10, 8)
+
+/* DMA Control Register */
+#define ISC_DCTRL 0x000003e4
+
+#define ISC_DCTRL_DVIEW_PACKED (0x0 << 1)
+#define ISC_DCTRL_DVIEW_SEMIPLANAR (0x1 << 1)
+#define ISC_DCTRL_DVIEW_PLANAR (0x2 << 1)
+#define ISC_DCTRL_DVIEW_MASK GENMASK(2, 1)
+
+#define ISC_DCTRL_IE_IS (0x0 << 4)
+
+/* DMA Descriptor Address Register */
+#define ISC_DNDA 0x000003e8
+
+/* DMA Address 0 Register */
+#define ISC_DAD0 0x000003ec
+
+/* DMA Address 1 Register */
+#define ISC_DAD1 0x000003f4
+
+/* DMA Address 2 Register */
+#define ISC_DAD2 0x000003fc
+
+/* Offset for version register specific to sama5d2 product */
+#define ISC_SAMA5D2_VERSION_OFFSET 0
+#define ISC_SAMA7G5_VERSION_OFFSET 0x13c
+/* Version Register */
+#define ISC_VERSION 0x0000040c
+
+/* Offset for version register specific to sama5d2 product */
+#define ISC_SAMA5D2_HIS_ENTRY_OFFSET 0
+/* Offset for version register specific to sama7g5 product */
+#define ISC_SAMA7G5_HIS_ENTRY_OFFSET 0x14c
+/* Histogram Entry */
+#define ISC_HIS_ENTRY 0x00000410
+
+#endif
diff --git a/drivers/staging/media/deprecated/atmel/atmel-isc.h b/drivers/staging/media/deprecated/atmel/atmel-isc.h
new file mode 100644
index 000000000000..31767ea74be6
--- /dev/null
+++ b/drivers/staging/media/deprecated/atmel/atmel-isc.h
@@ -0,0 +1,362 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Microchip Image Sensor Controller (ISC) driver header file
+ *
+ * Copyright (C) 2016-2019 Microchip Technology, Inc.
+ *
+ * Author: Songjun Wu
+ * Author: Eugen Hristev <eugen.hristev@microchip.com>
+ *
+ */
+#ifndef _ATMEL_ISC_H_
+
+#include <linux/clk-provider.h>
+#include <linux/platform_device.h>
+
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/videobuf2-dma-contig.h>
+
+#define ISC_CLK_MAX_DIV 255
+
+enum isc_clk_id {
+ ISC_ISPCK = 0,
+ ISC_MCK = 1,
+};
+
+struct isc_clk {
+ struct clk_hw hw;
+ struct clk *clk;
+ struct regmap *regmap;
+ spinlock_t lock; /* serialize access to clock registers */
+ u8 id;
+ u8 parent_id;
+ u32 div;
+ struct device *dev;
+};
+
+#define to_isc_clk(v) container_of(v, struct isc_clk, hw)
+
+struct isc_buffer {
+ struct vb2_v4l2_buffer vb;
+ struct list_head list;
+};
+
+struct isc_subdev_entity {
+ struct v4l2_subdev *sd;
+ struct v4l2_async_connection *asd;
+ struct device_node *epn;
+ struct v4l2_async_notifier notifier;
+
+ u32 pfe_cfg0;
+
+ struct list_head list;
+};
+
+/*
+ * struct isc_format - ISC media bus format information
+ This structure represents the interface between the ISC
+ and the sensor. It's the input format received by
+ the ISC.
+ * @fourcc: Fourcc code for this format
+ * @mbus_code: V4L2 media bus format code.
+ * @cfa_baycfg: If this format is RAW BAYER, indicate the type of bayer.
+ this is either BGBG, RGRG, etc.
+ * @pfe_cfg0_bps: Number of hardware data lines connected to the ISC
+ */
+
+struct isc_format {
+ u32 fourcc;
+ u32 mbus_code;
+ u32 cfa_baycfg;
+
+ bool sd_support;
+ u32 pfe_cfg0_bps;
+};
+
+/* Pipeline bitmap */
+#define DPC_DPCENABLE BIT(0)
+#define DPC_GDCENABLE BIT(1)
+#define DPC_BLCENABLE BIT(2)
+#define WB_ENABLE BIT(3)
+#define CFA_ENABLE BIT(4)
+#define CC_ENABLE BIT(5)
+#define GAM_ENABLE BIT(6)
+#define GAM_BENABLE BIT(7)
+#define GAM_GENABLE BIT(8)
+#define GAM_RENABLE BIT(9)
+#define VHXS_ENABLE BIT(10)
+#define CSC_ENABLE BIT(11)
+#define CBC_ENABLE BIT(12)
+#define SUB422_ENABLE BIT(13)
+#define SUB420_ENABLE BIT(14)
+
+#define GAM_ENABLES (GAM_RENABLE | GAM_GENABLE | GAM_BENABLE | GAM_ENABLE)
+
+/*
+ * struct fmt_config - ISC format configuration and internal pipeline
+ This structure represents the internal configuration
+ of the ISC.
+ It also holds the format that ISC will present to v4l2.
+ * @sd_format: Pointer to an isc_format struct that holds the sensor
+ configuration.
+ * @fourcc: Fourcc code for this format.
+ * @bpp: Bytes per pixel in the current format.
+ * @bpp_v4l2: Bytes per pixel in the current format, for v4l2.
+ This differs from 'bpp' in the sense that in planar
+ formats, it refers only to the first plane.
+ * @rlp_cfg_mode: Configuration of the RLP (rounding, limiting packaging)
+ * @dcfg_imode: Configuration of the input of the DMA module
+ * @dctrl_dview: Configuration of the output of the DMA module
+ * @bits_pipeline: Configuration of the pipeline, which modules are enabled
+ */
+struct fmt_config {
+ struct isc_format *sd_format;
+
+ u32 fourcc;
+ u8 bpp;
+ u8 bpp_v4l2;
+
+ u32 rlp_cfg_mode;
+ u32 dcfg_imode;
+ u32 dctrl_dview;
+
+ u32 bits_pipeline;
+};
+
+#define HIST_ENTRIES 512
+#define HIST_BAYER (ISC_HIS_CFG_MODE_B + 1)
+
+enum{
+ HIST_INIT = 0,
+ HIST_ENABLED,
+ HIST_DISABLED,
+};
+
+struct isc_ctrls {
+ struct v4l2_ctrl_handler handler;
+
+ u32 brightness;
+ u32 contrast;
+ u8 gamma_index;
+#define ISC_WB_NONE 0
+#define ISC_WB_AUTO 1
+#define ISC_WB_ONETIME 2
+ u8 awb;
+
+ /* one for each component : GR, R, GB, B */
+ u32 gain[HIST_BAYER];
+ s32 offset[HIST_BAYER];
+
+ u32 hist_entry[HIST_ENTRIES];
+ u32 hist_count[HIST_BAYER];
+ u8 hist_id;
+ u8 hist_stat;
+#define HIST_MIN_INDEX 0
+#define HIST_MAX_INDEX 1
+ u32 hist_minmax[HIST_BAYER][2];
+};
+
+#define ISC_PIPE_LINE_NODE_NUM 15
+
+/*
+ * struct isc_reg_offsets - ISC device register offsets
+ * @csc: Offset for the CSC register
+ * @cbc: Offset for the CBC register
+ * @sub422: Offset for the SUB422 register
+ * @sub420: Offset for the SUB420 register
+ * @rlp: Offset for the RLP register
+ * @his: Offset for the HIS related registers
+ * @dma: Offset for the DMA related registers
+ * @version: Offset for the version register
+ * @his_entry: Offset for the HIS entries registers
+ */
+struct isc_reg_offsets {
+ u32 csc;
+ u32 cbc;
+ u32 sub422;
+ u32 sub420;
+ u32 rlp;
+ u32 his;
+ u32 dma;
+ u32 version;
+ u32 his_entry;
+};
+
+/*
+ * struct isc_device - ISC device driver data/config struct
+ * @regmap: Register map
+ * @hclock: Hclock clock input (refer datasheet)
+ * @ispck: iscpck clock (refer datasheet)
+ * @isc_clks: ISC clocks
+ * @ispck_required: ISC requires ISP Clock initialization
+ * @dcfg: DMA master configuration, architecture dependent
+ *
+ * @dev: Registered device driver
+ * @v4l2_dev: v4l2 registered device
+ * @video_dev: registered video device
+ *
+ * @vb2_vidq: video buffer 2 video queue
+ * @dma_queue_lock: lock to serialize the dma buffer queue
+ * @dma_queue: the queue for dma buffers
+ * @cur_frm: current isc frame/buffer
+ * @sequence: current frame number
+ * @stop: true if isc is not streaming, false if streaming
+ * @comp: completion reference that signals frame completion
+ *
+ * @fmt: current v42l format
+ * @user_formats: list of formats that are supported and agreed with sd
+ * @num_user_formats: how many formats are in user_formats
+ *
+ * @config: current ISC format configuration
+ * @try_config: the current ISC try format , not yet activated
+ *
+ * @ctrls: holds information about ISC controls
+ * @do_wb_ctrl: control regarding the DO_WHITE_BALANCE button
+ * @awb_work: workqueue reference for autowhitebalance histogram
+ * analysis
+ *
+ * @lock: lock for serializing userspace file operations
+ * with ISC operations
+ * @awb_mutex: serialize access to streaming status from awb work queue
+ * @awb_lock: lock for serializing awb work queue operations
+ * with DMA/buffer operations
+ *
+ * @pipeline: configuration of the ISC pipeline
+ *
+ * @current_subdev: current subdevice: the sensor
+ * @subdev_entities: list of subdevice entitites
+ *
+ * @gamma_table: pointer to the table with gamma values, has
+ * gamma_max sets of GAMMA_ENTRIES entries each
+ * @gamma_max: maximum number of sets of inside the gamma_table
+ *
+ * @max_width: maximum frame width, dependent on the internal RAM
+ * @max_height: maximum frame height, dependent on the internal RAM
+ *
+ * @config_dpc: pointer to a function that initializes product
+ * specific DPC module
+ * @config_csc: pointer to a function that initializes product
+ * specific CSC module
+ * @config_cbc: pointer to a function that initializes product
+ * specific CBC module
+ * @config_cc: pointer to a function that initializes product
+ * specific CC module
+ * @config_gam: pointer to a function that initializes product
+ * specific GAMMA module
+ * @config_rlp: pointer to a function that initializes product
+ * specific RLP module
+ * @config_ctrls: pointer to a functoin that initializes product
+ * specific v4l2 controls.
+ *
+ * @adapt_pipeline: pointer to a function that adapts the pipeline bits
+ * to the product specific pipeline
+ *
+ * @offsets: struct holding the product specific register offsets
+ * @controller_formats: pointer to the array of possible formats that the
+ * controller can output
+ * @formats_list: pointer to the array of possible formats that can
+ * be used as an input to the controller
+ * @controller_formats_size: size of controller_formats array
+ * @formats_list_size: size of formats_list array
+ */
+struct isc_device {
+ struct regmap *regmap;
+ struct clk *hclock;
+ struct clk *ispck;
+ struct isc_clk isc_clks[2];
+ bool ispck_required;
+ u32 dcfg;
+
+ struct device *dev;
+ struct v4l2_device v4l2_dev;
+ struct video_device video_dev;
+
+ struct vb2_queue vb2_vidq;
+ spinlock_t dma_queue_lock;
+ struct list_head dma_queue;
+ struct isc_buffer *cur_frm;
+ unsigned int sequence;
+ bool stop;
+ struct completion comp;
+
+ struct v4l2_format fmt;
+ struct isc_format **user_formats;
+ unsigned int num_user_formats;
+
+ struct fmt_config config;
+ struct fmt_config try_config;
+
+ struct isc_ctrls ctrls;
+ struct work_struct awb_work;
+
+ struct mutex lock;
+ struct mutex awb_mutex;
+ spinlock_t awb_lock;
+
+ struct regmap_field *pipeline[ISC_PIPE_LINE_NODE_NUM];
+
+ struct isc_subdev_entity *current_subdev;
+ struct list_head subdev_entities;
+
+ struct {
+#define ISC_CTRL_DO_WB 1
+#define ISC_CTRL_R_GAIN 2
+#define ISC_CTRL_B_GAIN 3
+#define ISC_CTRL_GR_GAIN 4
+#define ISC_CTRL_GB_GAIN 5
+#define ISC_CTRL_R_OFF 6
+#define ISC_CTRL_B_OFF 7
+#define ISC_CTRL_GR_OFF 8
+#define ISC_CTRL_GB_OFF 9
+ struct v4l2_ctrl *awb_ctrl;
+ struct v4l2_ctrl *do_wb_ctrl;
+ struct v4l2_ctrl *r_gain_ctrl;
+ struct v4l2_ctrl *b_gain_ctrl;
+ struct v4l2_ctrl *gr_gain_ctrl;
+ struct v4l2_ctrl *gb_gain_ctrl;
+ struct v4l2_ctrl *r_off_ctrl;
+ struct v4l2_ctrl *b_off_ctrl;
+ struct v4l2_ctrl *gr_off_ctrl;
+ struct v4l2_ctrl *gb_off_ctrl;
+ };
+
+#define GAMMA_ENTRIES 64
+ /* pointer to the defined gamma table */
+ const u32 (*gamma_table)[GAMMA_ENTRIES];
+ u32 gamma_max;
+
+ u32 max_width;
+ u32 max_height;
+
+ struct {
+ void (*config_dpc)(struct isc_device *isc);
+ void (*config_csc)(struct isc_device *isc);
+ void (*config_cbc)(struct isc_device *isc);
+ void (*config_cc)(struct isc_device *isc);
+ void (*config_gam)(struct isc_device *isc);
+ void (*config_rlp)(struct isc_device *isc);
+
+ void (*config_ctrls)(struct isc_device *isc,
+ const struct v4l2_ctrl_ops *ops);
+
+ void (*adapt_pipeline)(struct isc_device *isc);
+ };
+
+ struct isc_reg_offsets offsets;
+ const struct isc_format *controller_formats;
+ struct isc_format *formats_list;
+ u32 controller_formats_size;
+ u32 formats_list_size;
+};
+
+extern const struct regmap_config atmel_isc_regmap_config;
+extern const struct v4l2_async_notifier_operations atmel_isc_async_ops;
+
+irqreturn_t atmel_isc_interrupt(int irq, void *dev_id);
+int atmel_isc_pipeline_init(struct isc_device *isc);
+int atmel_isc_clk_init(struct isc_device *isc);
+void atmel_isc_subdev_cleanup(struct isc_device *isc);
+void atmel_isc_clk_cleanup(struct isc_device *isc);
+
+#endif
diff --git a/drivers/staging/media/deprecated/atmel/atmel-sama5d2-isc.c b/drivers/staging/media/deprecated/atmel/atmel-sama5d2-isc.c
new file mode 100644
index 000000000000..71e6e278a4b3
--- /dev/null
+++ b/drivers/staging/media/deprecated/atmel/atmel-sama5d2-isc.c
@@ -0,0 +1,644 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Microchip Image Sensor Controller (ISC) driver
+ *
+ * Copyright (C) 2016-2019 Microchip Technology, Inc.
+ *
+ * Author: Songjun Wu
+ * Author: Eugen Hristev <eugen.hristev@microchip.com>
+ *
+ *
+ * Sensor-->PFE-->WB-->CFA-->CC-->GAM-->CSC-->CBC-->SUB-->RLP-->DMA
+ *
+ * ISC video pipeline integrates the following submodules:
+ * PFE: Parallel Front End to sample the camera sensor input stream
+ * WB: Programmable white balance in the Bayer domain
+ * CFA: Color filter array interpolation module
+ * CC: Programmable color correction
+ * GAM: Gamma correction
+ * CSC: Programmable color space conversion
+ * CBC: Contrast and Brightness control
+ * SUB: This module performs YCbCr444 to YCbCr420 chrominance subsampling
+ * RLP: This module performs rounding, range limiting
+ * and packing of the incoming data
+ */
+
+#include <linux/clk.h>
+#include <linux/clkdev.h>
+#include <linux/clk-provider.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/math64.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_graph.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+#include <linux/videodev2.h>
+
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-image-sizes.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-fwnode.h>
+#include <media/v4l2-subdev.h>
+#include <media/videobuf2-dma-contig.h>
+
+#include "atmel-isc-regs.h"
+#include "atmel-isc.h"
+
+#define ISC_SAMA5D2_MAX_SUPPORT_WIDTH 2592
+#define ISC_SAMA5D2_MAX_SUPPORT_HEIGHT 1944
+
+#define ISC_SAMA5D2_PIPELINE \
+ (WB_ENABLE | CFA_ENABLE | CC_ENABLE | GAM_ENABLES | CSC_ENABLE | \
+ CBC_ENABLE | SUB422_ENABLE | SUB420_ENABLE)
+
+/* This is a list of the formats that the ISC can *output* */
+static const struct isc_format sama5d2_controller_formats[] = {
+ {
+ .fourcc = V4L2_PIX_FMT_ARGB444,
+ }, {
+ .fourcc = V4L2_PIX_FMT_ARGB555,
+ }, {
+ .fourcc = V4L2_PIX_FMT_RGB565,
+ }, {
+ .fourcc = V4L2_PIX_FMT_ABGR32,
+ }, {
+ .fourcc = V4L2_PIX_FMT_XBGR32,
+ }, {
+ .fourcc = V4L2_PIX_FMT_YUV420,
+ }, {
+ .fourcc = V4L2_PIX_FMT_YUYV,
+ }, {
+ .fourcc = V4L2_PIX_FMT_YUV422P,
+ }, {
+ .fourcc = V4L2_PIX_FMT_GREY,
+ }, {
+ .fourcc = V4L2_PIX_FMT_Y10,
+ }, {
+ .fourcc = V4L2_PIX_FMT_SBGGR8,
+ }, {
+ .fourcc = V4L2_PIX_FMT_SGBRG8,
+ }, {
+ .fourcc = V4L2_PIX_FMT_SGRBG8,
+ }, {
+ .fourcc = V4L2_PIX_FMT_SRGGB8,
+ }, {
+ .fourcc = V4L2_PIX_FMT_SBGGR10,
+ }, {
+ .fourcc = V4L2_PIX_FMT_SGBRG10,
+ }, {
+ .fourcc = V4L2_PIX_FMT_SGRBG10,
+ }, {
+ .fourcc = V4L2_PIX_FMT_SRGGB10,
+ },
+};
+
+/* This is a list of formats that the ISC can receive as *input* */
+static struct isc_format sama5d2_formats_list[] = {
+ {
+ .fourcc = V4L2_PIX_FMT_SBGGR8,
+ .mbus_code = MEDIA_BUS_FMT_SBGGR8_1X8,
+ .pfe_cfg0_bps = ISC_PFE_CFG0_BPS_EIGHT,
+ .cfa_baycfg = ISC_BAY_CFG_BGBG,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_SGBRG8,
+ .mbus_code = MEDIA_BUS_FMT_SGBRG8_1X8,
+ .pfe_cfg0_bps = ISC_PFE_CFG0_BPS_EIGHT,
+ .cfa_baycfg = ISC_BAY_CFG_GBGB,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_SGRBG8,
+ .mbus_code = MEDIA_BUS_FMT_SGRBG8_1X8,
+ .pfe_cfg0_bps = ISC_PFE_CFG0_BPS_EIGHT,
+ .cfa_baycfg = ISC_BAY_CFG_GRGR,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_SRGGB8,
+ .mbus_code = MEDIA_BUS_FMT_SRGGB8_1X8,
+ .pfe_cfg0_bps = ISC_PFE_CFG0_BPS_EIGHT,
+ .cfa_baycfg = ISC_BAY_CFG_RGRG,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_SBGGR10,
+ .mbus_code = MEDIA_BUS_FMT_SBGGR10_1X10,
+ .pfe_cfg0_bps = ISC_PFG_CFG0_BPS_TEN,
+ .cfa_baycfg = ISC_BAY_CFG_RGRG,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_SGBRG10,
+ .mbus_code = MEDIA_BUS_FMT_SGBRG10_1X10,
+ .pfe_cfg0_bps = ISC_PFG_CFG0_BPS_TEN,
+ .cfa_baycfg = ISC_BAY_CFG_GBGB,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_SGRBG10,
+ .mbus_code = MEDIA_BUS_FMT_SGRBG10_1X10,
+ .pfe_cfg0_bps = ISC_PFG_CFG0_BPS_TEN,
+ .cfa_baycfg = ISC_BAY_CFG_GRGR,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_SRGGB10,
+ .mbus_code = MEDIA_BUS_FMT_SRGGB10_1X10,
+ .pfe_cfg0_bps = ISC_PFG_CFG0_BPS_TEN,
+ .cfa_baycfg = ISC_BAY_CFG_RGRG,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_SBGGR12,
+ .mbus_code = MEDIA_BUS_FMT_SBGGR12_1X12,
+ .pfe_cfg0_bps = ISC_PFG_CFG0_BPS_TWELVE,
+ .cfa_baycfg = ISC_BAY_CFG_BGBG,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_SGBRG12,
+ .mbus_code = MEDIA_BUS_FMT_SGBRG12_1X12,
+ .pfe_cfg0_bps = ISC_PFG_CFG0_BPS_TWELVE,
+ .cfa_baycfg = ISC_BAY_CFG_GBGB,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_SGRBG12,
+ .mbus_code = MEDIA_BUS_FMT_SGRBG12_1X12,
+ .pfe_cfg0_bps = ISC_PFG_CFG0_BPS_TWELVE,
+ .cfa_baycfg = ISC_BAY_CFG_GRGR,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_SRGGB12,
+ .mbus_code = MEDIA_BUS_FMT_SRGGB12_1X12,
+ .pfe_cfg0_bps = ISC_PFG_CFG0_BPS_TWELVE,
+ .cfa_baycfg = ISC_BAY_CFG_RGRG,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_GREY,
+ .mbus_code = MEDIA_BUS_FMT_Y8_1X8,
+ .pfe_cfg0_bps = ISC_PFE_CFG0_BPS_EIGHT,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_YUYV,
+ .mbus_code = MEDIA_BUS_FMT_YUYV8_2X8,
+ .pfe_cfg0_bps = ISC_PFE_CFG0_BPS_EIGHT,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_RGB565,
+ .mbus_code = MEDIA_BUS_FMT_RGB565_2X8_LE,
+ .pfe_cfg0_bps = ISC_PFE_CFG0_BPS_EIGHT,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_Y10,
+ .mbus_code = MEDIA_BUS_FMT_Y10_1X10,
+ .pfe_cfg0_bps = ISC_PFG_CFG0_BPS_TEN,
+ },
+
+};
+
+static void isc_sama5d2_config_csc(struct isc_device *isc)
+{
+ struct regmap *regmap = isc->regmap;
+
+ /* Convert RGB to YUV */
+ regmap_write(regmap, ISC_CSC_YR_YG + isc->offsets.csc,
+ 0x42 | (0x81 << 16));
+ regmap_write(regmap, ISC_CSC_YB_OY + isc->offsets.csc,
+ 0x19 | (0x10 << 16));
+ regmap_write(regmap, ISC_CSC_CBR_CBG + isc->offsets.csc,
+ 0xFDA | (0xFB6 << 16));
+ regmap_write(regmap, ISC_CSC_CBB_OCB + isc->offsets.csc,
+ 0x70 | (0x80 << 16));
+ regmap_write(regmap, ISC_CSC_CRR_CRG + isc->offsets.csc,
+ 0x70 | (0xFA2 << 16));
+ regmap_write(regmap, ISC_CSC_CRB_OCR + isc->offsets.csc,
+ 0xFEE | (0x80 << 16));
+}
+
+static void isc_sama5d2_config_cbc(struct isc_device *isc)
+{
+ struct regmap *regmap = isc->regmap;
+
+ regmap_write(regmap, ISC_CBC_BRIGHT + isc->offsets.cbc,
+ isc->ctrls.brightness);
+ regmap_write(regmap, ISC_CBC_CONTRAST + isc->offsets.cbc,
+ isc->ctrls.contrast);
+}
+
+static void isc_sama5d2_config_cc(struct isc_device *isc)
+{
+ struct regmap *regmap = isc->regmap;
+
+ /* Configure each register at the neutral fixed point 1.0 or 0.0 */
+ regmap_write(regmap, ISC_CC_RR_RG, (1 << 8));
+ regmap_write(regmap, ISC_CC_RB_OR, 0);
+ regmap_write(regmap, ISC_CC_GR_GG, (1 << 8) << 16);
+ regmap_write(regmap, ISC_CC_GB_OG, 0);
+ regmap_write(regmap, ISC_CC_BR_BG, 0);
+ regmap_write(regmap, ISC_CC_BB_OB, (1 << 8));
+}
+
+static void isc_sama5d2_config_ctrls(struct isc_device *isc,
+ const struct v4l2_ctrl_ops *ops)
+{
+ struct isc_ctrls *ctrls = &isc->ctrls;
+ struct v4l2_ctrl_handler *hdl = &ctrls->handler;
+
+ ctrls->contrast = 256;
+
+ v4l2_ctrl_new_std(hdl, ops, V4L2_CID_CONTRAST, -2048, 2047, 1, 256);
+}
+
+static void isc_sama5d2_config_dpc(struct isc_device *isc)
+{
+ /* This module is not present on sama5d2 pipeline */
+}
+
+static void isc_sama5d2_config_gam(struct isc_device *isc)
+{
+ /* No specific gamma configuration */
+}
+
+static void isc_sama5d2_config_rlp(struct isc_device *isc)
+{
+ struct regmap *regmap = isc->regmap;
+ u32 rlp_mode = isc->config.rlp_cfg_mode;
+
+ /*
+ * In sama5d2, the YUV planar modes and the YUYV modes are treated
+ * in the same way in RLP register.
+ * Normally, YYCC mode should be Luma(n) - Color B(n) - Color R (n)
+ * and YCYC should be Luma(n + 1) - Color B (n) - Luma (n) - Color R (n)
+ * but in sama5d2, the YCYC mode does not exist, and YYCC must be
+ * selected for both planar and interleaved modes, as in fact
+ * both modes are supported.
+ *
+ * Thus, if the YCYC mode is selected, replace it with the
+ * sama5d2-compliant mode which is YYCC .
+ */
+ if ((rlp_mode & ISC_RLP_CFG_MODE_MASK) == ISC_RLP_CFG_MODE_YCYC) {
+ rlp_mode &= ~ISC_RLP_CFG_MODE_MASK;
+ rlp_mode |= ISC_RLP_CFG_MODE_YYCC;
+ }
+
+ regmap_update_bits(regmap, ISC_RLP_CFG + isc->offsets.rlp,
+ ISC_RLP_CFG_MODE_MASK, rlp_mode);
+}
+
+static void isc_sama5d2_adapt_pipeline(struct isc_device *isc)
+{
+ isc->try_config.bits_pipeline &= ISC_SAMA5D2_PIPELINE;
+}
+
+/* Gamma table with gamma 1/2.2 */
+static const u32 isc_sama5d2_gamma_table[][GAMMA_ENTRIES] = {
+ /* 0 --> gamma 1/1.8 */
+ { 0x65, 0x66002F, 0x950025, 0xBB0020, 0xDB001D, 0xF8001A,
+ 0x1130018, 0x12B0017, 0x1420016, 0x1580014, 0x16D0013, 0x1810012,
+ 0x1940012, 0x1A60012, 0x1B80011, 0x1C90010, 0x1DA0010, 0x1EA000F,
+ 0x1FA000F, 0x209000F, 0x218000F, 0x227000E, 0x235000E, 0x243000E,
+ 0x251000E, 0x25F000D, 0x26C000D, 0x279000D, 0x286000D, 0x293000C,
+ 0x2A0000C, 0x2AC000C, 0x2B8000C, 0x2C4000C, 0x2D0000B, 0x2DC000B,
+ 0x2E7000B, 0x2F3000B, 0x2FE000B, 0x309000B, 0x314000B, 0x31F000A,
+ 0x32A000A, 0x334000B, 0x33F000A, 0x349000A, 0x354000A, 0x35E000A,
+ 0x368000A, 0x372000A, 0x37C000A, 0x386000A, 0x3900009, 0x399000A,
+ 0x3A30009, 0x3AD0009, 0x3B60009, 0x3BF000A, 0x3C90009, 0x3D20009,
+ 0x3DB0009, 0x3E40009, 0x3ED0009, 0x3F60009 },
+
+ /* 1 --> gamma 1/2 */
+ { 0x7F, 0x800034, 0xB50028, 0xDE0021, 0x100001E, 0x11E001B,
+ 0x1390019, 0x1520017, 0x16A0015, 0x1800014, 0x1940014, 0x1A80013,
+ 0x1BB0012, 0x1CD0011, 0x1DF0010, 0x1EF0010, 0x200000F, 0x20F000F,
+ 0x21F000E, 0x22D000F, 0x23C000E, 0x24A000E, 0x258000D, 0x265000D,
+ 0x273000C, 0x27F000D, 0x28C000C, 0x299000C, 0x2A5000C, 0x2B1000B,
+ 0x2BC000C, 0x2C8000B, 0x2D3000C, 0x2DF000B, 0x2EA000A, 0x2F5000A,
+ 0x2FF000B, 0x30A000A, 0x314000B, 0x31F000A, 0x329000A, 0x333000A,
+ 0x33D0009, 0x3470009, 0x350000A, 0x35A0009, 0x363000A, 0x36D0009,
+ 0x3760009, 0x37F0009, 0x3880009, 0x3910009, 0x39A0009, 0x3A30009,
+ 0x3AC0008, 0x3B40009, 0x3BD0008, 0x3C60008, 0x3CE0008, 0x3D60009,
+ 0x3DF0008, 0x3E70008, 0x3EF0008, 0x3F70008 },
+
+ /* 2 --> gamma 1/2.2 */
+ { 0x99, 0x9B0038, 0xD4002A, 0xFF0023, 0x122001F, 0x141001B,
+ 0x15D0019, 0x1760017, 0x18E0015, 0x1A30015, 0x1B80013, 0x1CC0012,
+ 0x1DE0011, 0x1F00010, 0x2010010, 0x2110010, 0x221000F, 0x230000F,
+ 0x23F000E, 0x24D000E, 0x25B000D, 0x269000C, 0x276000C, 0x283000C,
+ 0x28F000C, 0x29B000C, 0x2A7000C, 0x2B3000B, 0x2BF000B, 0x2CA000B,
+ 0x2D5000B, 0x2E0000A, 0x2EB000A, 0x2F5000A, 0x2FF000A, 0x30A000A,
+ 0x3140009, 0x31E0009, 0x327000A, 0x3310009, 0x33A0009, 0x3440009,
+ 0x34D0009, 0x3560009, 0x35F0009, 0x3680008, 0x3710008, 0x3790009,
+ 0x3820008, 0x38A0008, 0x3930008, 0x39B0008, 0x3A30008, 0x3AB0008,
+ 0x3B30008, 0x3BB0008, 0x3C30008, 0x3CB0007, 0x3D20008, 0x3DA0007,
+ 0x3E20007, 0x3E90007, 0x3F00008, 0x3F80007 },
+};
+
+static int isc_parse_dt(struct device *dev, struct isc_device *isc)
+{
+ struct device_node *np = dev->of_node;
+ struct device_node *epn;
+ struct isc_subdev_entity *subdev_entity;
+ unsigned int flags;
+ int ret = -EINVAL;
+
+ INIT_LIST_HEAD(&isc->subdev_entities);
+
+ for_each_endpoint_of_node(np, epn) {
+ struct v4l2_fwnode_endpoint v4l2_epn = { .bus_type = 0 };
+
+ ret = v4l2_fwnode_endpoint_parse(of_fwnode_handle(epn),
+ &v4l2_epn);
+ if (ret) {
+ ret = -EINVAL;
+ dev_err(dev, "Could not parse the endpoint\n");
+ break;
+ }
+
+ subdev_entity = devm_kzalloc(dev, sizeof(*subdev_entity),
+ GFP_KERNEL);
+ if (!subdev_entity) {
+ ret = -ENOMEM;
+ break;
+ }
+ subdev_entity->epn = epn;
+
+ flags = v4l2_epn.bus.parallel.flags;
+
+ if (flags & V4L2_MBUS_HSYNC_ACTIVE_LOW)
+ subdev_entity->pfe_cfg0 = ISC_PFE_CFG0_HPOL_LOW;
+
+ if (flags & V4L2_MBUS_VSYNC_ACTIVE_LOW)
+ subdev_entity->pfe_cfg0 |= ISC_PFE_CFG0_VPOL_LOW;
+
+ if (flags & V4L2_MBUS_PCLK_SAMPLE_FALLING)
+ subdev_entity->pfe_cfg0 |= ISC_PFE_CFG0_PPOL_LOW;
+
+ if (v4l2_epn.bus_type == V4L2_MBUS_BT656)
+ subdev_entity->pfe_cfg0 |= ISC_PFE_CFG0_CCIR_CRC |
+ ISC_PFE_CFG0_CCIR656;
+
+ list_add_tail(&subdev_entity->list, &isc->subdev_entities);
+ }
+ of_node_put(epn);
+
+ return ret;
+}
+
+static int atmel_isc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct isc_device *isc;
+ void __iomem *io_base;
+ struct isc_subdev_entity *subdev_entity;
+ int irq;
+ int ret;
+ u32 ver;
+
+ isc = devm_kzalloc(dev, sizeof(*isc), GFP_KERNEL);
+ if (!isc)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, isc);
+ isc->dev = dev;
+
+ io_base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(io_base))
+ return PTR_ERR(io_base);
+
+ isc->regmap = devm_regmap_init_mmio(dev, io_base, &atmel_isc_regmap_config);
+ if (IS_ERR(isc->regmap)) {
+ ret = PTR_ERR(isc->regmap);
+ dev_err(dev, "failed to init register map: %d\n", ret);
+ return ret;
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ ret = devm_request_irq(dev, irq, atmel_isc_interrupt, 0,
+ "atmel-sama5d2-isc", isc);
+ if (ret < 0) {
+ dev_err(dev, "can't register ISR for IRQ %u (ret=%i)\n",
+ irq, ret);
+ return ret;
+ }
+
+ isc->gamma_table = isc_sama5d2_gamma_table;
+ isc->gamma_max = 2;
+
+ isc->max_width = ISC_SAMA5D2_MAX_SUPPORT_WIDTH;
+ isc->max_height = ISC_SAMA5D2_MAX_SUPPORT_HEIGHT;
+
+ isc->config_dpc = isc_sama5d2_config_dpc;
+ isc->config_csc = isc_sama5d2_config_csc;
+ isc->config_cbc = isc_sama5d2_config_cbc;
+ isc->config_cc = isc_sama5d2_config_cc;
+ isc->config_gam = isc_sama5d2_config_gam;
+ isc->config_rlp = isc_sama5d2_config_rlp;
+ isc->config_ctrls = isc_sama5d2_config_ctrls;
+
+ isc->adapt_pipeline = isc_sama5d2_adapt_pipeline;
+
+ isc->offsets.csc = ISC_SAMA5D2_CSC_OFFSET;
+ isc->offsets.cbc = ISC_SAMA5D2_CBC_OFFSET;
+ isc->offsets.sub422 = ISC_SAMA5D2_SUB422_OFFSET;
+ isc->offsets.sub420 = ISC_SAMA5D2_SUB420_OFFSET;
+ isc->offsets.rlp = ISC_SAMA5D2_RLP_OFFSET;
+ isc->offsets.his = ISC_SAMA5D2_HIS_OFFSET;
+ isc->offsets.dma = ISC_SAMA5D2_DMA_OFFSET;
+ isc->offsets.version = ISC_SAMA5D2_VERSION_OFFSET;
+ isc->offsets.his_entry = ISC_SAMA5D2_HIS_ENTRY_OFFSET;
+
+ isc->controller_formats = sama5d2_controller_formats;
+ isc->controller_formats_size = ARRAY_SIZE(sama5d2_controller_formats);
+ isc->formats_list = sama5d2_formats_list;
+ isc->formats_list_size = ARRAY_SIZE(sama5d2_formats_list);
+
+ /* sama5d2-isc - 8 bits per beat */
+ isc->dcfg = ISC_DCFG_YMBSIZE_BEATS8 | ISC_DCFG_CMBSIZE_BEATS8;
+
+ /* sama5d2-isc : ISPCK is required and mandatory */
+ isc->ispck_required = true;
+
+ ret = atmel_isc_pipeline_init(isc);
+ if (ret)
+ return ret;
+
+ isc->hclock = devm_clk_get(dev, "hclock");
+ if (IS_ERR(isc->hclock)) {
+ ret = PTR_ERR(isc->hclock);
+ dev_err(dev, "failed to get hclock: %d\n", ret);
+ return ret;
+ }
+
+ ret = clk_prepare_enable(isc->hclock);
+ if (ret) {
+ dev_err(dev, "failed to enable hclock: %d\n", ret);
+ return ret;
+ }
+
+ ret = atmel_isc_clk_init(isc);
+ if (ret) {
+ dev_err(dev, "failed to init isc clock: %d\n", ret);
+ goto unprepare_hclk;
+ }
+ ret = v4l2_device_register(dev, &isc->v4l2_dev);
+ if (ret) {
+ dev_err(dev, "unable to register v4l2 device.\n");
+ goto unprepare_clk;
+ }
+
+ ret = isc_parse_dt(dev, isc);
+ if (ret) {
+ dev_err(dev, "fail to parse device tree\n");
+ goto unregister_v4l2_device;
+ }
+
+ if (list_empty(&isc->subdev_entities)) {
+ dev_err(dev, "no subdev found\n");
+ ret = -ENODEV;
+ goto unregister_v4l2_device;
+ }
+
+ list_for_each_entry(subdev_entity, &isc->subdev_entities, list) {
+ struct v4l2_async_connection *asd;
+ struct fwnode_handle *fwnode =
+ of_fwnode_handle(subdev_entity->epn);
+
+ v4l2_async_nf_init(&subdev_entity->notifier, &isc->v4l2_dev);
+
+ asd = v4l2_async_nf_add_fwnode_remote(&subdev_entity->notifier,
+ fwnode,
+ struct v4l2_async_connection);
+
+ of_node_put(subdev_entity->epn);
+ subdev_entity->epn = NULL;
+
+ if (IS_ERR(asd)) {
+ ret = PTR_ERR(asd);
+ goto cleanup_subdev;
+ }
+
+ subdev_entity->notifier.ops = &atmel_isc_async_ops;
+
+ ret = v4l2_async_nf_register(&subdev_entity->notifier);
+ if (ret) {
+ dev_err(dev, "fail to register async notifier\n");
+ goto cleanup_subdev;
+ }
+
+ if (video_is_registered(&isc->video_dev))
+ break;
+ }
+
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+ pm_request_idle(dev);
+
+ isc->ispck = isc->isc_clks[ISC_ISPCK].clk;
+
+ ret = clk_prepare_enable(isc->ispck);
+ if (ret) {
+ dev_err(dev, "failed to enable ispck: %d\n", ret);
+ goto disable_pm;
+ }
+
+ /* ispck should be greater or equal to hclock */
+ ret = clk_set_rate(isc->ispck, clk_get_rate(isc->hclock));
+ if (ret) {
+ dev_err(dev, "failed to set ispck rate: %d\n", ret);
+ goto unprepare_clk;
+ }
+
+ regmap_read(isc->regmap, ISC_VERSION + isc->offsets.version, &ver);
+ dev_info(dev, "Microchip ISC version %x\n", ver);
+
+ return 0;
+
+unprepare_clk:
+ clk_disable_unprepare(isc->ispck);
+
+disable_pm:
+ pm_runtime_disable(dev);
+
+cleanup_subdev:
+ atmel_isc_subdev_cleanup(isc);
+
+unregister_v4l2_device:
+ v4l2_device_unregister(&isc->v4l2_dev);
+
+unprepare_hclk:
+ clk_disable_unprepare(isc->hclock);
+
+ atmel_isc_clk_cleanup(isc);
+
+ return ret;
+}
+
+static void atmel_isc_remove(struct platform_device *pdev)
+{
+ struct isc_device *isc = platform_get_drvdata(pdev);
+
+ pm_runtime_disable(&pdev->dev);
+
+ atmel_isc_subdev_cleanup(isc);
+
+ v4l2_device_unregister(&isc->v4l2_dev);
+
+ clk_disable_unprepare(isc->ispck);
+ clk_disable_unprepare(isc->hclock);
+
+ atmel_isc_clk_cleanup(isc);
+}
+
+static int __maybe_unused isc_runtime_suspend(struct device *dev)
+{
+ struct isc_device *isc = dev_get_drvdata(dev);
+
+ clk_disable_unprepare(isc->ispck);
+ clk_disable_unprepare(isc->hclock);
+
+ return 0;
+}
+
+static int __maybe_unused isc_runtime_resume(struct device *dev)
+{
+ struct isc_device *isc = dev_get_drvdata(dev);
+ int ret;
+
+ ret = clk_prepare_enable(isc->hclock);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(isc->ispck);
+ if (ret)
+ clk_disable_unprepare(isc->hclock);
+
+ return ret;
+}
+
+static const struct dev_pm_ops atmel_isc_dev_pm_ops = {
+ SET_RUNTIME_PM_OPS(isc_runtime_suspend, isc_runtime_resume, NULL)
+};
+
+#if IS_ENABLED(CONFIG_OF)
+static const struct of_device_id atmel_isc_of_match[] = {
+ { .compatible = "atmel,sama5d2-isc" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, atmel_isc_of_match);
+#endif
+
+static struct platform_driver atmel_isc_driver = {
+ .probe = atmel_isc_probe,
+ .remove = atmel_isc_remove,
+ .driver = {
+ .name = "atmel-sama5d2-isc",
+ .pm = &atmel_isc_dev_pm_ops,
+ .of_match_table = of_match_ptr(atmel_isc_of_match),
+ },
+};
+
+module_platform_driver(atmel_isc_driver);
+
+MODULE_AUTHOR("Songjun Wu");
+MODULE_DESCRIPTION("The V4L2 driver for Atmel-ISC");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/media/deprecated/atmel/atmel-sama7g5-isc.c b/drivers/staging/media/deprecated/atmel/atmel-sama7g5-isc.c
new file mode 100644
index 000000000000..1f74c2dd044c
--- /dev/null
+++ b/drivers/staging/media/deprecated/atmel/atmel-sama7g5-isc.c
@@ -0,0 +1,607 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Microchip eXtended Image Sensor Controller (XISC) driver
+ *
+ * Copyright (C) 2019-2021 Microchip Technology, Inc. and its subsidiaries
+ *
+ * Author: Eugen Hristev <eugen.hristev@microchip.com>
+ *
+ * Sensor-->PFE-->DPC-->WB-->CFA-->CC-->GAM-->VHXS-->CSC-->CBHS-->SUB-->RLP-->DMA-->HIS
+ *
+ * ISC video pipeline integrates the following submodules:
+ * PFE: Parallel Front End to sample the camera sensor input stream
+ * DPC: Defective Pixel Correction with black offset correction, green disparity
+ * correction and defective pixel correction (3 modules total)
+ * WB: Programmable white balance in the Bayer domain
+ * CFA: Color filter array interpolation module
+ * CC: Programmable color correction
+ * GAM: Gamma correction
+ *VHXS: Vertical and Horizontal Scaler
+ * CSC: Programmable color space conversion
+ *CBHS: Contrast Brightness Hue and Saturation control
+ * SUB: This module performs YCbCr444 to YCbCr420 chrominance subsampling
+ * RLP: This module performs rounding, range limiting
+ * and packing of the incoming data
+ * DMA: This module performs DMA master accesses to write frames to external RAM
+ * HIS: Histogram module performs statistic counters on the frames
+ */
+
+#include <linux/clk.h>
+#include <linux/clkdev.h>
+#include <linux/clk-provider.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/math64.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_graph.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+#include <linux/videodev2.h>
+
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-image-sizes.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-fwnode.h>
+#include <media/v4l2-subdev.h>
+#include <media/videobuf2-dma-contig.h>
+
+#include "atmel-isc-regs.h"
+#include "atmel-isc.h"
+
+#define ISC_SAMA7G5_MAX_SUPPORT_WIDTH 3264
+#define ISC_SAMA7G5_MAX_SUPPORT_HEIGHT 2464
+
+#define ISC_SAMA7G5_PIPELINE \
+ (WB_ENABLE | CFA_ENABLE | CC_ENABLE | GAM_ENABLES | CSC_ENABLE | \
+ CBC_ENABLE | SUB422_ENABLE | SUB420_ENABLE)
+
+/* This is a list of the formats that the ISC can *output* */
+static const struct isc_format sama7g5_controller_formats[] = {
+ {
+ .fourcc = V4L2_PIX_FMT_ARGB444,
+ }, {
+ .fourcc = V4L2_PIX_FMT_ARGB555,
+ }, {
+ .fourcc = V4L2_PIX_FMT_RGB565,
+ }, {
+ .fourcc = V4L2_PIX_FMT_ABGR32,
+ }, {
+ .fourcc = V4L2_PIX_FMT_XBGR32,
+ }, {
+ .fourcc = V4L2_PIX_FMT_YUV420,
+ }, {
+ .fourcc = V4L2_PIX_FMT_UYVY,
+ }, {
+ .fourcc = V4L2_PIX_FMT_VYUY,
+ }, {
+ .fourcc = V4L2_PIX_FMT_YUYV,
+ }, {
+ .fourcc = V4L2_PIX_FMT_YUV422P,
+ }, {
+ .fourcc = V4L2_PIX_FMT_GREY,
+ }, {
+ .fourcc = V4L2_PIX_FMT_Y10,
+ }, {
+ .fourcc = V4L2_PIX_FMT_Y16,
+ }, {
+ .fourcc = V4L2_PIX_FMT_SBGGR8,
+ }, {
+ .fourcc = V4L2_PIX_FMT_SGBRG8,
+ }, {
+ .fourcc = V4L2_PIX_FMT_SGRBG8,
+ }, {
+ .fourcc = V4L2_PIX_FMT_SRGGB8,
+ }, {
+ .fourcc = V4L2_PIX_FMT_SBGGR10,
+ }, {
+ .fourcc = V4L2_PIX_FMT_SGBRG10,
+ }, {
+ .fourcc = V4L2_PIX_FMT_SGRBG10,
+ }, {
+ .fourcc = V4L2_PIX_FMT_SRGGB10,
+ },
+};
+
+/* This is a list of formats that the ISC can receive as *input* */
+static struct isc_format sama7g5_formats_list[] = {
+ {
+ .fourcc = V4L2_PIX_FMT_SBGGR8,
+ .mbus_code = MEDIA_BUS_FMT_SBGGR8_1X8,
+ .pfe_cfg0_bps = ISC_PFE_CFG0_BPS_EIGHT,
+ .cfa_baycfg = ISC_BAY_CFG_BGBG,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_SGBRG8,
+ .mbus_code = MEDIA_BUS_FMT_SGBRG8_1X8,
+ .pfe_cfg0_bps = ISC_PFE_CFG0_BPS_EIGHT,
+ .cfa_baycfg = ISC_BAY_CFG_GBGB,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_SGRBG8,
+ .mbus_code = MEDIA_BUS_FMT_SGRBG8_1X8,
+ .pfe_cfg0_bps = ISC_PFE_CFG0_BPS_EIGHT,
+ .cfa_baycfg = ISC_BAY_CFG_GRGR,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_SRGGB8,
+ .mbus_code = MEDIA_BUS_FMT_SRGGB8_1X8,
+ .pfe_cfg0_bps = ISC_PFE_CFG0_BPS_EIGHT,
+ .cfa_baycfg = ISC_BAY_CFG_RGRG,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_SBGGR10,
+ .mbus_code = MEDIA_BUS_FMT_SBGGR10_1X10,
+ .pfe_cfg0_bps = ISC_PFG_CFG0_BPS_TEN,
+ .cfa_baycfg = ISC_BAY_CFG_RGRG,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_SGBRG10,
+ .mbus_code = MEDIA_BUS_FMT_SGBRG10_1X10,
+ .pfe_cfg0_bps = ISC_PFG_CFG0_BPS_TEN,
+ .cfa_baycfg = ISC_BAY_CFG_GBGB,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_SGRBG10,
+ .mbus_code = MEDIA_BUS_FMT_SGRBG10_1X10,
+ .pfe_cfg0_bps = ISC_PFG_CFG0_BPS_TEN,
+ .cfa_baycfg = ISC_BAY_CFG_GRGR,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_SRGGB10,
+ .mbus_code = MEDIA_BUS_FMT_SRGGB10_1X10,
+ .pfe_cfg0_bps = ISC_PFG_CFG0_BPS_TEN,
+ .cfa_baycfg = ISC_BAY_CFG_RGRG,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_SBGGR12,
+ .mbus_code = MEDIA_BUS_FMT_SBGGR12_1X12,
+ .pfe_cfg0_bps = ISC_PFG_CFG0_BPS_TWELVE,
+ .cfa_baycfg = ISC_BAY_CFG_BGBG,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_SGBRG12,
+ .mbus_code = MEDIA_BUS_FMT_SGBRG12_1X12,
+ .pfe_cfg0_bps = ISC_PFG_CFG0_BPS_TWELVE,
+ .cfa_baycfg = ISC_BAY_CFG_GBGB,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_SGRBG12,
+ .mbus_code = MEDIA_BUS_FMT_SGRBG12_1X12,
+ .pfe_cfg0_bps = ISC_PFG_CFG0_BPS_TWELVE,
+ .cfa_baycfg = ISC_BAY_CFG_GRGR,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_SRGGB12,
+ .mbus_code = MEDIA_BUS_FMT_SRGGB12_1X12,
+ .pfe_cfg0_bps = ISC_PFG_CFG0_BPS_TWELVE,
+ .cfa_baycfg = ISC_BAY_CFG_RGRG,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_GREY,
+ .mbus_code = MEDIA_BUS_FMT_Y8_1X8,
+ .pfe_cfg0_bps = ISC_PFE_CFG0_BPS_EIGHT,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_YUYV,
+ .mbus_code = MEDIA_BUS_FMT_YUYV8_2X8,
+ .pfe_cfg0_bps = ISC_PFE_CFG0_BPS_EIGHT,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_UYVY,
+ .mbus_code = MEDIA_BUS_FMT_UYVY8_2X8,
+ .pfe_cfg0_bps = ISC_PFE_CFG0_BPS_EIGHT,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_RGB565,
+ .mbus_code = MEDIA_BUS_FMT_RGB565_2X8_LE,
+ .pfe_cfg0_bps = ISC_PFE_CFG0_BPS_EIGHT,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_Y10,
+ .mbus_code = MEDIA_BUS_FMT_Y10_1X10,
+ .pfe_cfg0_bps = ISC_PFG_CFG0_BPS_TEN,
+ },
+};
+
+static void isc_sama7g5_config_csc(struct isc_device *isc)
+{
+ struct regmap *regmap = isc->regmap;
+
+ /* Convert RGB to YUV */
+ regmap_write(regmap, ISC_CSC_YR_YG + isc->offsets.csc,
+ 0x42 | (0x81 << 16));
+ regmap_write(regmap, ISC_CSC_YB_OY + isc->offsets.csc,
+ 0x19 | (0x10 << 16));
+ regmap_write(regmap, ISC_CSC_CBR_CBG + isc->offsets.csc,
+ 0xFDA | (0xFB6 << 16));
+ regmap_write(regmap, ISC_CSC_CBB_OCB + isc->offsets.csc,
+ 0x70 | (0x80 << 16));
+ regmap_write(regmap, ISC_CSC_CRR_CRG + isc->offsets.csc,
+ 0x70 | (0xFA2 << 16));
+ regmap_write(regmap, ISC_CSC_CRB_OCR + isc->offsets.csc,
+ 0xFEE | (0x80 << 16));
+}
+
+static void isc_sama7g5_config_cbc(struct isc_device *isc)
+{
+ struct regmap *regmap = isc->regmap;
+
+ /* Configure what is set via v4l2 ctrls */
+ regmap_write(regmap, ISC_CBC_BRIGHT + isc->offsets.cbc, isc->ctrls.brightness);
+ regmap_write(regmap, ISC_CBC_CONTRAST + isc->offsets.cbc, isc->ctrls.contrast);
+ /* Configure Hue and Saturation as neutral midpoint */
+ regmap_write(regmap, ISC_CBCHS_HUE, 0);
+ regmap_write(regmap, ISC_CBCHS_SAT, (1 << 4));
+}
+
+static void isc_sama7g5_config_cc(struct isc_device *isc)
+{
+ struct regmap *regmap = isc->regmap;
+
+ /* Configure each register at the neutral fixed point 1.0 or 0.0 */
+ regmap_write(regmap, ISC_CC_RR_RG, (1 << 8));
+ regmap_write(regmap, ISC_CC_RB_OR, 0);
+ regmap_write(regmap, ISC_CC_GR_GG, (1 << 8) << 16);
+ regmap_write(regmap, ISC_CC_GB_OG, 0);
+ regmap_write(regmap, ISC_CC_BR_BG, 0);
+ regmap_write(regmap, ISC_CC_BB_OB, (1 << 8));
+}
+
+static void isc_sama7g5_config_ctrls(struct isc_device *isc,
+ const struct v4l2_ctrl_ops *ops)
+{
+ struct isc_ctrls *ctrls = &isc->ctrls;
+ struct v4l2_ctrl_handler *hdl = &ctrls->handler;
+
+ ctrls->contrast = 16;
+
+ v4l2_ctrl_new_std(hdl, ops, V4L2_CID_CONTRAST, -2048, 2047, 1, 16);
+}
+
+static void isc_sama7g5_config_dpc(struct isc_device *isc)
+{
+ u32 bay_cfg = isc->config.sd_format->cfa_baycfg;
+ struct regmap *regmap = isc->regmap;
+
+ regmap_update_bits(regmap, ISC_DPC_CFG, ISC_DPC_CFG_BLOFF_MASK,
+ (64 << ISC_DPC_CFG_BLOFF_SHIFT));
+ regmap_update_bits(regmap, ISC_DPC_CFG, ISC_DPC_CFG_BAYCFG_MASK,
+ (bay_cfg << ISC_DPC_CFG_BAYCFG_SHIFT));
+}
+
+static void isc_sama7g5_config_gam(struct isc_device *isc)
+{
+ struct regmap *regmap = isc->regmap;
+
+ regmap_update_bits(regmap, ISC_GAM_CTRL, ISC_GAM_CTRL_BIPART,
+ ISC_GAM_CTRL_BIPART);
+}
+
+static void isc_sama7g5_config_rlp(struct isc_device *isc)
+{
+ struct regmap *regmap = isc->regmap;
+ u32 rlp_mode = isc->config.rlp_cfg_mode;
+
+ regmap_update_bits(regmap, ISC_RLP_CFG + isc->offsets.rlp,
+ ISC_RLP_CFG_MODE_MASK | ISC_RLP_CFG_LSH |
+ ISC_RLP_CFG_YMODE_MASK, rlp_mode);
+}
+
+static void isc_sama7g5_adapt_pipeline(struct isc_device *isc)
+{
+ isc->try_config.bits_pipeline &= ISC_SAMA7G5_PIPELINE;
+}
+
+/* Gamma table with gamma 1/2.2 */
+static const u32 isc_sama7g5_gamma_table[][GAMMA_ENTRIES] = {
+ /* index 0 --> gamma bipartite */
+ {
+ 0x980, 0x4c0320, 0x650260, 0x7801e0, 0x8701a0, 0x940180,
+ 0xa00160, 0xab0120, 0xb40120, 0xbd0120, 0xc60100, 0xce0100,
+ 0xd600e0, 0xdd00e0, 0xe400e0, 0xeb00c0, 0xf100c0, 0xf700c0,
+ 0xfd00c0, 0x10300a0, 0x10800c0, 0x10e00a0, 0x11300a0, 0x11800a0,
+ 0x11d00a0, 0x12200a0, 0x12700a0, 0x12c0080, 0x13000a0, 0x1350080,
+ 0x13900a0, 0x13e0080, 0x1420076, 0x17d0062, 0x1ae0054, 0x1d8004a,
+ 0x1fd0044, 0x21f003e, 0x23e003a, 0x25b0036, 0x2760032, 0x28f0030,
+ 0x2a7002e, 0x2be002c, 0x2d4002c, 0x2ea0028, 0x2fe0028, 0x3120026,
+ 0x3250024, 0x3370024, 0x3490022, 0x35a0022, 0x36b0020, 0x37b0020,
+ 0x38b0020, 0x39b001e, 0x3aa001e, 0x3b9001c, 0x3c7001c, 0x3d5001c,
+ 0x3e3001c, 0x3f1001c, 0x3ff001a, 0x40c001a },
+};
+
+static int xisc_parse_dt(struct device *dev, struct isc_device *isc)
+{
+ struct device_node *np = dev->of_node;
+ struct device_node *epn;
+ struct isc_subdev_entity *subdev_entity;
+ unsigned int flags;
+ int ret = -EINVAL;
+ bool mipi_mode;
+
+ INIT_LIST_HEAD(&isc->subdev_entities);
+
+ mipi_mode = of_property_read_bool(np, "microchip,mipi-mode");
+
+ for_each_endpoint_of_node(np, epn) {
+ struct v4l2_fwnode_endpoint v4l2_epn = { .bus_type = 0 };
+
+ ret = v4l2_fwnode_endpoint_parse(of_fwnode_handle(epn),
+ &v4l2_epn);
+ if (ret) {
+ ret = -EINVAL;
+ dev_err(dev, "Could not parse the endpoint\n");
+ break;
+ }
+
+ subdev_entity = devm_kzalloc(dev, sizeof(*subdev_entity),
+ GFP_KERNEL);
+ if (!subdev_entity) {
+ ret = -ENOMEM;
+ break;
+ }
+ subdev_entity->epn = epn;
+
+ flags = v4l2_epn.bus.parallel.flags;
+
+ if (flags & V4L2_MBUS_HSYNC_ACTIVE_LOW)
+ subdev_entity->pfe_cfg0 = ISC_PFE_CFG0_HPOL_LOW;
+
+ if (flags & V4L2_MBUS_VSYNC_ACTIVE_LOW)
+ subdev_entity->pfe_cfg0 |= ISC_PFE_CFG0_VPOL_LOW;
+
+ if (flags & V4L2_MBUS_PCLK_SAMPLE_FALLING)
+ subdev_entity->pfe_cfg0 |= ISC_PFE_CFG0_PPOL_LOW;
+
+ if (v4l2_epn.bus_type == V4L2_MBUS_BT656)
+ subdev_entity->pfe_cfg0 |= ISC_PFE_CFG0_CCIR_CRC |
+ ISC_PFE_CFG0_CCIR656;
+
+ if (mipi_mode)
+ subdev_entity->pfe_cfg0 |= ISC_PFE_CFG0_MIPI;
+
+ list_add_tail(&subdev_entity->list, &isc->subdev_entities);
+ }
+ of_node_put(epn);
+
+ return ret;
+}
+
+static int microchip_xisc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct isc_device *isc;
+ void __iomem *io_base;
+ struct isc_subdev_entity *subdev_entity;
+ int irq;
+ int ret;
+ u32 ver;
+
+ isc = devm_kzalloc(dev, sizeof(*isc), GFP_KERNEL);
+ if (!isc)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, isc);
+ isc->dev = dev;
+
+ io_base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(io_base))
+ return PTR_ERR(io_base);
+
+ isc->regmap = devm_regmap_init_mmio(dev, io_base, &atmel_isc_regmap_config);
+ if (IS_ERR(isc->regmap)) {
+ ret = PTR_ERR(isc->regmap);
+ dev_err(dev, "failed to init register map: %d\n", ret);
+ return ret;
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ ret = devm_request_irq(dev, irq, atmel_isc_interrupt, 0,
+ "microchip-sama7g5-xisc", isc);
+ if (ret < 0) {
+ dev_err(dev, "can't register ISR for IRQ %u (ret=%i)\n",
+ irq, ret);
+ return ret;
+ }
+
+ isc->gamma_table = isc_sama7g5_gamma_table;
+ isc->gamma_max = 0;
+
+ isc->max_width = ISC_SAMA7G5_MAX_SUPPORT_WIDTH;
+ isc->max_height = ISC_SAMA7G5_MAX_SUPPORT_HEIGHT;
+
+ isc->config_dpc = isc_sama7g5_config_dpc;
+ isc->config_csc = isc_sama7g5_config_csc;
+ isc->config_cbc = isc_sama7g5_config_cbc;
+ isc->config_cc = isc_sama7g5_config_cc;
+ isc->config_gam = isc_sama7g5_config_gam;
+ isc->config_rlp = isc_sama7g5_config_rlp;
+ isc->config_ctrls = isc_sama7g5_config_ctrls;
+
+ isc->adapt_pipeline = isc_sama7g5_adapt_pipeline;
+
+ isc->offsets.csc = ISC_SAMA7G5_CSC_OFFSET;
+ isc->offsets.cbc = ISC_SAMA7G5_CBC_OFFSET;
+ isc->offsets.sub422 = ISC_SAMA7G5_SUB422_OFFSET;
+ isc->offsets.sub420 = ISC_SAMA7G5_SUB420_OFFSET;
+ isc->offsets.rlp = ISC_SAMA7G5_RLP_OFFSET;
+ isc->offsets.his = ISC_SAMA7G5_HIS_OFFSET;
+ isc->offsets.dma = ISC_SAMA7G5_DMA_OFFSET;
+ isc->offsets.version = ISC_SAMA7G5_VERSION_OFFSET;
+ isc->offsets.his_entry = ISC_SAMA7G5_HIS_ENTRY_OFFSET;
+
+ isc->controller_formats = sama7g5_controller_formats;
+ isc->controller_formats_size = ARRAY_SIZE(sama7g5_controller_formats);
+ isc->formats_list = sama7g5_formats_list;
+ isc->formats_list_size = ARRAY_SIZE(sama7g5_formats_list);
+
+ /* sama7g5-isc RAM access port is full AXI4 - 32 bits per beat */
+ isc->dcfg = ISC_DCFG_YMBSIZE_BEATS32 | ISC_DCFG_CMBSIZE_BEATS32;
+
+ /* sama7g5-isc : ISPCK does not exist, ISC is clocked by MCK */
+ isc->ispck_required = false;
+
+ ret = atmel_isc_pipeline_init(isc);
+ if (ret)
+ return ret;
+
+ isc->hclock = devm_clk_get(dev, "hclock");
+ if (IS_ERR(isc->hclock)) {
+ ret = PTR_ERR(isc->hclock);
+ dev_err(dev, "failed to get hclock: %d\n", ret);
+ return ret;
+ }
+
+ ret = clk_prepare_enable(isc->hclock);
+ if (ret) {
+ dev_err(dev, "failed to enable hclock: %d\n", ret);
+ return ret;
+ }
+
+ ret = atmel_isc_clk_init(isc);
+ if (ret) {
+ dev_err(dev, "failed to init isc clock: %d\n", ret);
+ goto unprepare_hclk;
+ }
+
+ ret = v4l2_device_register(dev, &isc->v4l2_dev);
+ if (ret) {
+ dev_err(dev, "unable to register v4l2 device.\n");
+ goto unprepare_hclk;
+ }
+
+ ret = xisc_parse_dt(dev, isc);
+ if (ret) {
+ dev_err(dev, "fail to parse device tree\n");
+ goto unregister_v4l2_device;
+ }
+
+ if (list_empty(&isc->subdev_entities)) {
+ dev_err(dev, "no subdev found\n");
+ ret = -ENODEV;
+ goto unregister_v4l2_device;
+ }
+
+ list_for_each_entry(subdev_entity, &isc->subdev_entities, list) {
+ struct v4l2_async_connection *asd;
+ struct fwnode_handle *fwnode =
+ of_fwnode_handle(subdev_entity->epn);
+
+ v4l2_async_nf_init(&subdev_entity->notifier, &isc->v4l2_dev);
+
+ asd = v4l2_async_nf_add_fwnode_remote(&subdev_entity->notifier,
+ fwnode,
+ struct v4l2_async_connection);
+
+ of_node_put(subdev_entity->epn);
+ subdev_entity->epn = NULL;
+
+ if (IS_ERR(asd)) {
+ ret = PTR_ERR(asd);
+ goto cleanup_subdev;
+ }
+
+ subdev_entity->notifier.ops = &atmel_isc_async_ops;
+
+ ret = v4l2_async_nf_register(&subdev_entity->notifier);
+ if (ret) {
+ dev_err(dev, "fail to register async notifier\n");
+ goto cleanup_subdev;
+ }
+
+ if (video_is_registered(&isc->video_dev))
+ break;
+ }
+
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+ pm_request_idle(dev);
+
+ regmap_read(isc->regmap, ISC_VERSION + isc->offsets.version, &ver);
+ dev_info(dev, "Microchip XISC version %x\n", ver);
+
+ return 0;
+
+cleanup_subdev:
+ atmel_isc_subdev_cleanup(isc);
+
+unregister_v4l2_device:
+ v4l2_device_unregister(&isc->v4l2_dev);
+
+unprepare_hclk:
+ clk_disable_unprepare(isc->hclock);
+
+ atmel_isc_clk_cleanup(isc);
+
+ return ret;
+}
+
+static void microchip_xisc_remove(struct platform_device *pdev)
+{
+ struct isc_device *isc = platform_get_drvdata(pdev);
+
+ pm_runtime_disable(&pdev->dev);
+
+ atmel_isc_subdev_cleanup(isc);
+
+ v4l2_device_unregister(&isc->v4l2_dev);
+
+ clk_disable_unprepare(isc->hclock);
+
+ atmel_isc_clk_cleanup(isc);
+}
+
+static int __maybe_unused xisc_runtime_suspend(struct device *dev)
+{
+ struct isc_device *isc = dev_get_drvdata(dev);
+
+ clk_disable_unprepare(isc->hclock);
+
+ return 0;
+}
+
+static int __maybe_unused xisc_runtime_resume(struct device *dev)
+{
+ struct isc_device *isc = dev_get_drvdata(dev);
+ int ret;
+
+ ret = clk_prepare_enable(isc->hclock);
+ if (ret)
+ return ret;
+
+ return ret;
+}
+
+static const struct dev_pm_ops microchip_xisc_dev_pm_ops = {
+ SET_RUNTIME_PM_OPS(xisc_runtime_suspend, xisc_runtime_resume, NULL)
+};
+
+#if IS_ENABLED(CONFIG_OF)
+static const struct of_device_id microchip_xisc_of_match[] = {
+ { .compatible = "microchip,sama7g5-isc" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, microchip_xisc_of_match);
+#endif
+
+static struct platform_driver microchip_xisc_driver = {
+ .probe = microchip_xisc_probe,
+ .remove = microchip_xisc_remove,
+ .driver = {
+ .name = "microchip-sama7g5-xisc",
+ .pm = &microchip_xisc_dev_pm_ops,
+ .of_match_table = of_match_ptr(microchip_xisc_of_match),
+ },
+};
+
+module_platform_driver(microchip_xisc_driver);
+
+MODULE_AUTHOR("Eugen Hristev <eugen.hristev@microchip.com>");
+MODULE_DESCRIPTION("The V4L2 driver for Microchip-XISC");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/media/imx/Kconfig b/drivers/staging/media/imx/Kconfig
new file mode 100644
index 000000000000..1cd48028b641
--- /dev/null
+++ b/drivers/staging/media/imx/Kconfig
@@ -0,0 +1,15 @@
+# SPDX-License-Identifier: GPL-2.0
+config VIDEO_IMX_MEDIA
+ tristate "i.MX5/6 V4L2 media drivers"
+ depends on ARCH_MXC || COMPILE_TEST
+ depends on HAS_DMA
+ depends on VIDEO_DEV
+ depends on IMX_IPUV3_CORE
+ select MEDIA_CONTROLLER
+ select V4L2_FWNODE
+ select V4L2_MEM2MEM_DEV
+ select VIDEOBUF2_DMA_CONTIG
+ select VIDEO_V4L2_SUBDEV_API
+ help
+ Say yes here to enable support for video4linux media controller
+ drivers for the i.MX5/6 SOC.
diff --git a/drivers/staging/media/imx/Makefile b/drivers/staging/media/imx/Makefile
new file mode 100644
index 000000000000..330e0825f506
--- /dev/null
+++ b/drivers/staging/media/imx/Makefile
@@ -0,0 +1,14 @@
+# SPDX-License-Identifier: GPL-2.0
+imx-media-common-objs := imx-media-capture.o imx-media-dev-common.o \
+ imx-media-of.o imx-media-utils.o
+
+imx6-media-objs := imx-media-dev.o imx-media-internal-sd.o \
+ imx-ic-common.o imx-ic-prp.o imx-ic-prpencvf.o imx-media-vdic.o \
+ imx-media-csc-scaler.o
+
+imx6-media-csi-objs := imx-media-csi.o imx-media-fim.o
+
+obj-$(CONFIG_VIDEO_IMX_MEDIA) += imx-media-common.o
+obj-$(CONFIG_VIDEO_IMX_MEDIA) += imx6-media.o
+obj-$(CONFIG_VIDEO_IMX_MEDIA) += imx6-media-csi.o
+obj-$(CONFIG_VIDEO_IMX_MEDIA) += imx6-mipi-csi2.o
diff --git a/drivers/staging/media/imx/TODO b/drivers/staging/media/imx/TODO
new file mode 100644
index 000000000000..11c9e10d34ae
--- /dev/null
+++ b/drivers/staging/media/imx/TODO
@@ -0,0 +1,13 @@
+
+- The Frame Interval Monitor could be exported to v4l2-core for
+ general use.
+
+- This media driver supports inheriting V4L2 controls to the
+ video capture devices, from the subdevices in the capture device's
+ pipeline. The controls for each capture device are updated in the
+ link_notify callback when the pipeline is modified. This feature should be
+ removed, userspace should use the subdev-based userspace API instead.
+
+- Similarly to the legacy control handling, legacy format handling where
+ formats on the video nodes are influenced by the active format of the
+ connected subdev should be removed.
diff --git a/drivers/staging/media/imx/imx-ic-common.c b/drivers/staging/media/imx/imx-ic-common.c
new file mode 100644
index 000000000000..6df1ffb53895
--- /dev/null
+++ b/drivers/staging/media/imx/imx-ic-common.c
@@ -0,0 +1,87 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * V4L2 Image Converter Subdev for Freescale i.MX5/6 SOC
+ *
+ * Copyright (c) 2014-2016 Mentor Graphics Inc.
+ */
+#include <media/v4l2-device.h>
+#include <media/v4l2-subdev.h>
+#include "imx-media.h"
+#include "imx-ic.h"
+
+#define IC_TASK_PRP IC_NUM_TASKS
+#define IC_NUM_OPS (IC_NUM_TASKS + 1)
+
+static struct imx_ic_ops *ic_ops[IC_NUM_OPS] = {
+ [IC_TASK_PRP] = &imx_ic_prp_ops,
+ [IC_TASK_ENCODER] = &imx_ic_prpencvf_ops,
+ [IC_TASK_VIEWFINDER] = &imx_ic_prpencvf_ops,
+};
+
+struct v4l2_subdev *imx_media_ic_register(struct v4l2_device *v4l2_dev,
+ struct device *ipu_dev,
+ struct ipu_soc *ipu,
+ u32 grp_id)
+{
+ struct imx_ic_priv *priv;
+ int ret;
+
+ priv = devm_kzalloc(ipu_dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return ERR_PTR(-ENOMEM);
+
+ priv->ipu_dev = ipu_dev;
+ priv->ipu = ipu;
+
+ /* get our IC task id */
+ switch (grp_id) {
+ case IMX_MEDIA_GRP_ID_IPU_IC_PRP:
+ priv->task_id = IC_TASK_PRP;
+ break;
+ case IMX_MEDIA_GRP_ID_IPU_IC_PRPENC:
+ priv->task_id = IC_TASK_ENCODER;
+ break;
+ case IMX_MEDIA_GRP_ID_IPU_IC_PRPVF:
+ priv->task_id = IC_TASK_VIEWFINDER;
+ break;
+ default:
+ return ERR_PTR(-EINVAL);
+ }
+
+ v4l2_subdev_init(&priv->sd, ic_ops[priv->task_id]->subdev_ops);
+ v4l2_set_subdevdata(&priv->sd, priv);
+ priv->sd.internal_ops = ic_ops[priv->task_id]->internal_ops;
+ priv->sd.entity.ops = ic_ops[priv->task_id]->entity_ops;
+ priv->sd.entity.function = MEDIA_ENT_F_PROC_VIDEO_SCALER;
+ priv->sd.owner = ipu_dev->driver->owner;
+ priv->sd.flags = V4L2_SUBDEV_FL_HAS_DEVNODE | V4L2_SUBDEV_FL_HAS_EVENTS;
+ priv->sd.grp_id = grp_id;
+ imx_media_grp_id_to_sd_name(priv->sd.name, sizeof(priv->sd.name),
+ priv->sd.grp_id, ipu_get_num(ipu));
+
+ ret = ic_ops[priv->task_id]->init(priv);
+ if (ret)
+ return ERR_PTR(ret);
+
+ ret = v4l2_device_register_subdev(v4l2_dev, &priv->sd);
+ if (ret) {
+ ic_ops[priv->task_id]->remove(priv);
+ return ERR_PTR(ret);
+ }
+
+ return &priv->sd;
+}
+
+int imx_media_ic_unregister(struct v4l2_subdev *sd)
+{
+ struct imx_ic_priv *priv = container_of(sd, struct imx_ic_priv, sd);
+
+ v4l2_info(sd, "Removing\n");
+
+ ic_ops[priv->task_id]->remove(priv);
+
+ v4l2_device_unregister_subdev(sd);
+ media_entity_cleanup(&sd->entity);
+
+ return 0;
+}
diff --git a/drivers/staging/media/imx/imx-ic-prp.c b/drivers/staging/media/imx/imx-ic-prp.c
new file mode 100644
index 000000000000..2b80d54006b3
--- /dev/null
+++ b/drivers/staging/media/imx/imx-ic-prp.c
@@ -0,0 +1,528 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * V4L2 Capture IC Preprocess Subdev for Freescale i.MX5/6 SOC
+ *
+ * This subdevice handles capture of video frames from the CSI or VDIC,
+ * which are routed directly to the Image Converter preprocess tasks,
+ * for resizing, colorspace conversion, and rotation.
+ *
+ * Copyright (c) 2012-2017 Mentor Graphics Inc.
+ */
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/timer.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-subdev.h>
+#include <media/imx.h>
+#include "imx-media.h"
+#include "imx-ic.h"
+
+/*
+ * Min/Max supported width and heights.
+ */
+#define MIN_W 32
+#define MIN_H 32
+#define MAX_W 4096
+#define MAX_H 4096
+#define W_ALIGN 4 /* multiple of 16 pixels */
+#define H_ALIGN 1 /* multiple of 2 lines */
+#define S_ALIGN 1 /* multiple of 2 */
+
+struct prp_priv {
+ struct imx_ic_priv *ic_priv;
+ struct media_pad pad[PRP_NUM_PADS];
+
+ /* lock to protect all members below */
+ struct mutex lock;
+
+ struct v4l2_subdev *src_sd;
+ struct v4l2_subdev *sink_sd_prpenc;
+ struct v4l2_subdev *sink_sd_prpvf;
+
+ /* the CSI id at link validate */
+ int csi_id;
+
+ struct v4l2_mbus_framefmt format_mbus;
+ struct v4l2_fract frame_interval;
+
+ int stream_count;
+};
+
+static inline struct prp_priv *sd_to_priv(struct v4l2_subdev *sd)
+{
+ struct imx_ic_priv *ic_priv = v4l2_get_subdevdata(sd);
+
+ return ic_priv->task_priv;
+}
+
+static int prp_start(struct prp_priv *priv)
+{
+ struct imx_ic_priv *ic_priv = priv->ic_priv;
+ bool src_is_vdic;
+
+ /* set IC to receive from CSI or VDI depending on source */
+ src_is_vdic = !!(priv->src_sd->grp_id & IMX_MEDIA_GRP_ID_IPU_VDIC);
+
+ ipu_set_ic_src_mux(ic_priv->ipu, priv->csi_id, src_is_vdic);
+
+ return 0;
+}
+
+static void prp_stop(struct prp_priv *priv)
+{
+}
+
+static struct v4l2_mbus_framefmt *
+__prp_get_fmt(struct prp_priv *priv, struct v4l2_subdev_state *sd_state,
+ unsigned int pad, enum v4l2_subdev_format_whence which)
+{
+ if (which == V4L2_SUBDEV_FORMAT_TRY)
+ return v4l2_subdev_state_get_format(sd_state, pad);
+ else
+ return &priv->format_mbus;
+}
+
+/*
+ * V4L2 subdev operations.
+ */
+
+static int prp_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ struct prp_priv *priv = sd_to_priv(sd);
+ struct v4l2_mbus_framefmt *infmt;
+ int ret = 0;
+
+ mutex_lock(&priv->lock);
+
+ switch (code->pad) {
+ case PRP_SINK_PAD:
+ ret = imx_media_enum_ipu_formats(&code->code, code->index,
+ PIXFMT_SEL_YUV_RGB);
+ break;
+ case PRP_SRC_PAD_PRPENC:
+ case PRP_SRC_PAD_PRPVF:
+ if (code->index != 0) {
+ ret = -EINVAL;
+ goto out;
+ }
+ infmt = __prp_get_fmt(priv, sd_state, PRP_SINK_PAD,
+ code->which);
+ code->code = infmt->code;
+ break;
+ default:
+ ret = -EINVAL;
+ }
+out:
+ mutex_unlock(&priv->lock);
+ return ret;
+}
+
+static int prp_get_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_format *sdformat)
+{
+ struct prp_priv *priv = sd_to_priv(sd);
+ struct v4l2_mbus_framefmt *fmt;
+ int ret = 0;
+
+ if (sdformat->pad >= PRP_NUM_PADS)
+ return -EINVAL;
+
+ mutex_lock(&priv->lock);
+
+ fmt = __prp_get_fmt(priv, sd_state, sdformat->pad, sdformat->which);
+ if (!fmt) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ sdformat->format = *fmt;
+out:
+ mutex_unlock(&priv->lock);
+ return ret;
+}
+
+static int prp_set_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_format *sdformat)
+{
+ struct prp_priv *priv = sd_to_priv(sd);
+ struct v4l2_mbus_framefmt *fmt, *infmt;
+ const struct imx_media_pixfmt *cc;
+ int ret = 0;
+ u32 code;
+
+ if (sdformat->pad >= PRP_NUM_PADS)
+ return -EINVAL;
+
+ mutex_lock(&priv->lock);
+
+ if (priv->stream_count > 0) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ infmt = __prp_get_fmt(priv, sd_state, PRP_SINK_PAD, sdformat->which);
+
+ switch (sdformat->pad) {
+ case PRP_SINK_PAD:
+ v4l_bound_align_image(&sdformat->format.width, MIN_W, MAX_W,
+ W_ALIGN, &sdformat->format.height,
+ MIN_H, MAX_H, H_ALIGN, S_ALIGN);
+
+ cc = imx_media_find_ipu_format(sdformat->format.code,
+ PIXFMT_SEL_YUV_RGB);
+ if (!cc) {
+ imx_media_enum_ipu_formats(&code, 0,
+ PIXFMT_SEL_YUV_RGB);
+ cc = imx_media_find_ipu_format(code,
+ PIXFMT_SEL_YUV_RGB);
+ sdformat->format.code = cc->codes[0];
+ }
+
+ if (sdformat->format.field == V4L2_FIELD_ANY)
+ sdformat->format.field = V4L2_FIELD_NONE;
+ break;
+ case PRP_SRC_PAD_PRPENC:
+ case PRP_SRC_PAD_PRPVF:
+ /* Output pads mirror input pad */
+ sdformat->format = *infmt;
+ break;
+ }
+
+ imx_media_try_colorimetry(&sdformat->format, true);
+
+ fmt = __prp_get_fmt(priv, sd_state, sdformat->pad, sdformat->which);
+ *fmt = sdformat->format;
+out:
+ mutex_unlock(&priv->lock);
+ return ret;
+}
+
+static int prp_link_setup(struct media_entity *entity,
+ const struct media_pad *local,
+ const struct media_pad *remote, u32 flags)
+{
+ struct v4l2_subdev *sd = media_entity_to_v4l2_subdev(entity);
+ struct imx_ic_priv *ic_priv = v4l2_get_subdevdata(sd);
+ struct prp_priv *priv = ic_priv->task_priv;
+ struct v4l2_subdev *remote_sd;
+ int ret = 0;
+
+ dev_dbg(ic_priv->ipu_dev, "%s: link setup %s -> %s",
+ ic_priv->sd.name, remote->entity->name, local->entity->name);
+
+ remote_sd = media_entity_to_v4l2_subdev(remote->entity);
+
+ mutex_lock(&priv->lock);
+
+ if (local->flags & MEDIA_PAD_FL_SINK) {
+ if (flags & MEDIA_LNK_FL_ENABLED) {
+ if (priv->src_sd) {
+ ret = -EBUSY;
+ goto out;
+ }
+ if (priv->sink_sd_prpenc &&
+ (remote_sd->grp_id & IMX_MEDIA_GRP_ID_IPU_VDIC)) {
+ ret = -EINVAL;
+ goto out;
+ }
+ priv->src_sd = remote_sd;
+ } else {
+ priv->src_sd = NULL;
+ }
+
+ goto out;
+ }
+
+ /* this is a source pad */
+ if (flags & MEDIA_LNK_FL_ENABLED) {
+ switch (local->index) {
+ case PRP_SRC_PAD_PRPENC:
+ if (priv->sink_sd_prpenc) {
+ ret = -EBUSY;
+ goto out;
+ }
+ if (priv->src_sd && (priv->src_sd->grp_id &
+ IMX_MEDIA_GRP_ID_IPU_VDIC)) {
+ ret = -EINVAL;
+ goto out;
+ }
+ priv->sink_sd_prpenc = remote_sd;
+ break;
+ case PRP_SRC_PAD_PRPVF:
+ if (priv->sink_sd_prpvf) {
+ ret = -EBUSY;
+ goto out;
+ }
+ priv->sink_sd_prpvf = remote_sd;
+ break;
+ default:
+ ret = -EINVAL;
+ }
+ } else {
+ switch (local->index) {
+ case PRP_SRC_PAD_PRPENC:
+ priv->sink_sd_prpenc = NULL;
+ break;
+ case PRP_SRC_PAD_PRPVF:
+ priv->sink_sd_prpvf = NULL;
+ break;
+ default:
+ ret = -EINVAL;
+ }
+ }
+
+out:
+ mutex_unlock(&priv->lock);
+ return ret;
+}
+
+static int prp_link_validate(struct v4l2_subdev *sd,
+ struct media_link *link,
+ struct v4l2_subdev_format *source_fmt,
+ struct v4l2_subdev_format *sink_fmt)
+{
+ struct imx_ic_priv *ic_priv = v4l2_get_subdevdata(sd);
+ struct prp_priv *priv = ic_priv->task_priv;
+ struct v4l2_subdev *csi;
+ int ret;
+
+ ret = v4l2_subdev_link_validate_default(sd, link,
+ source_fmt, sink_fmt);
+ if (ret)
+ return ret;
+
+ csi = imx_media_pipeline_subdev(&ic_priv->sd.entity,
+ IMX_MEDIA_GRP_ID_IPU_CSI, true);
+ if (IS_ERR(csi))
+ csi = NULL;
+
+ mutex_lock(&priv->lock);
+
+ if (priv->src_sd->grp_id & IMX_MEDIA_GRP_ID_IPU_VDIC) {
+ /*
+ * the ->PRPENC link cannot be enabled if the source
+ * is the VDIC
+ */
+ if (priv->sink_sd_prpenc) {
+ ret = -EINVAL;
+ goto out;
+ }
+ } else {
+ /* the source is a CSI */
+ if (!csi) {
+ ret = -EINVAL;
+ goto out;
+ }
+ }
+
+ if (csi) {
+ switch (csi->grp_id) {
+ case IMX_MEDIA_GRP_ID_IPU_CSI0:
+ priv->csi_id = 0;
+ break;
+ case IMX_MEDIA_GRP_ID_IPU_CSI1:
+ priv->csi_id = 1;
+ break;
+ default:
+ ret = -EINVAL;
+ }
+ } else {
+ priv->csi_id = 0;
+ }
+
+out:
+ mutex_unlock(&priv->lock);
+ return ret;
+}
+
+static int prp_s_stream(struct v4l2_subdev *sd, int enable)
+{
+ struct imx_ic_priv *ic_priv = v4l2_get_subdevdata(sd);
+ struct prp_priv *priv = ic_priv->task_priv;
+ int ret = 0;
+
+ mutex_lock(&priv->lock);
+
+ if (!priv->src_sd || (!priv->sink_sd_prpenc && !priv->sink_sd_prpvf)) {
+ ret = -EPIPE;
+ goto out;
+ }
+
+ /*
+ * enable/disable streaming only if stream_count is
+ * going from 0 to 1 / 1 to 0.
+ */
+ if (priv->stream_count != !enable)
+ goto update_count;
+
+ dev_dbg(ic_priv->ipu_dev, "%s: stream %s\n", sd->name,
+ enable ? "ON" : "OFF");
+
+ if (enable)
+ ret = prp_start(priv);
+ else
+ prp_stop(priv);
+ if (ret)
+ goto out;
+
+ /* start/stop upstream */
+ ret = v4l2_subdev_call(priv->src_sd, video, s_stream, enable);
+ ret = (ret && ret != -ENOIOCTLCMD) ? ret : 0;
+ if (ret) {
+ if (enable)
+ prp_stop(priv);
+ goto out;
+ }
+
+update_count:
+ priv->stream_count += enable ? 1 : -1;
+ if (priv->stream_count < 0)
+ priv->stream_count = 0;
+out:
+ mutex_unlock(&priv->lock);
+ return ret;
+}
+
+static int prp_get_frame_interval(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_frame_interval *fi)
+{
+ struct prp_priv *priv = sd_to_priv(sd);
+
+ /*
+ * FIXME: Implement support for V4L2_SUBDEV_FORMAT_TRY, using the V4L2
+ * subdev active state API.
+ */
+ if (fi->which != V4L2_SUBDEV_FORMAT_ACTIVE)
+ return -EINVAL;
+
+ if (fi->pad >= PRP_NUM_PADS)
+ return -EINVAL;
+
+ mutex_lock(&priv->lock);
+ fi->interval = priv->frame_interval;
+ mutex_unlock(&priv->lock);
+
+ return 0;
+}
+
+static int prp_set_frame_interval(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_frame_interval *fi)
+{
+ struct prp_priv *priv = sd_to_priv(sd);
+
+ /*
+ * FIXME: Implement support for V4L2_SUBDEV_FORMAT_TRY, using the V4L2
+ * subdev active state API.
+ */
+ if (fi->which != V4L2_SUBDEV_FORMAT_ACTIVE)
+ return -EINVAL;
+
+ if (fi->pad >= PRP_NUM_PADS)
+ return -EINVAL;
+
+ mutex_lock(&priv->lock);
+
+ /* No limits on valid frame intervals */
+ if (fi->interval.numerator == 0 || fi->interval.denominator == 0)
+ fi->interval = priv->frame_interval;
+ else
+ priv->frame_interval = fi->interval;
+
+ mutex_unlock(&priv->lock);
+
+ return 0;
+}
+
+static int prp_registered(struct v4l2_subdev *sd)
+{
+ struct prp_priv *priv = sd_to_priv(sd);
+ u32 code;
+
+ /* init default frame interval */
+ priv->frame_interval.numerator = 1;
+ priv->frame_interval.denominator = 30;
+
+ /* set a default mbus format */
+ imx_media_enum_ipu_formats(&code, 0, PIXFMT_SEL_YUV);
+
+ return imx_media_init_mbus_fmt(&priv->format_mbus,
+ IMX_MEDIA_DEF_PIX_WIDTH,
+ IMX_MEDIA_DEF_PIX_HEIGHT, code,
+ V4L2_FIELD_NONE, NULL);
+}
+
+static const struct v4l2_subdev_pad_ops prp_pad_ops = {
+ .enum_mbus_code = prp_enum_mbus_code,
+ .get_fmt = prp_get_fmt,
+ .set_fmt = prp_set_fmt,
+ .get_frame_interval = prp_get_frame_interval,
+ .set_frame_interval = prp_set_frame_interval,
+ .link_validate = prp_link_validate,
+};
+
+static const struct v4l2_subdev_video_ops prp_video_ops = {
+ .s_stream = prp_s_stream,
+};
+
+static const struct media_entity_operations prp_entity_ops = {
+ .link_setup = prp_link_setup,
+ .link_validate = v4l2_subdev_link_validate,
+};
+
+static const struct v4l2_subdev_ops prp_subdev_ops = {
+ .video = &prp_video_ops,
+ .pad = &prp_pad_ops,
+};
+
+static const struct v4l2_subdev_internal_ops prp_internal_ops = {
+ .init_state = imx_media_init_state,
+ .registered = prp_registered,
+};
+
+static int prp_init(struct imx_ic_priv *ic_priv)
+{
+ struct prp_priv *priv;
+ int i;
+
+ priv = devm_kzalloc(ic_priv->ipu_dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ mutex_init(&priv->lock);
+ ic_priv->task_priv = priv;
+ priv->ic_priv = ic_priv;
+
+ for (i = 0; i < PRP_NUM_PADS; i++)
+ priv->pad[i].flags = (i == PRP_SINK_PAD) ?
+ MEDIA_PAD_FL_SINK : MEDIA_PAD_FL_SOURCE;
+
+ return media_entity_pads_init(&ic_priv->sd.entity, PRP_NUM_PADS,
+ priv->pad);
+}
+
+static void prp_remove(struct imx_ic_priv *ic_priv)
+{
+ struct prp_priv *priv = ic_priv->task_priv;
+
+ mutex_destroy(&priv->lock);
+}
+
+struct imx_ic_ops imx_ic_prp_ops = {
+ .subdev_ops = &prp_subdev_ops,
+ .internal_ops = &prp_internal_ops,
+ .entity_ops = &prp_entity_ops,
+ .init = prp_init,
+ .remove = prp_remove,
+};
diff --git a/drivers/staging/media/imx/imx-ic-prpencvf.c b/drivers/staging/media/imx/imx-ic-prpencvf.c
new file mode 100644
index 000000000000..77360bfe081a
--- /dev/null
+++ b/drivers/staging/media/imx/imx-ic-prpencvf.c
@@ -0,0 +1,1386 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * V4L2 Capture IC Preprocess Subdev for Freescale i.MX5/6 SOC
+ *
+ * This subdevice handles capture of video frames from the CSI or VDIC,
+ * which are routed directly to the Image Converter preprocess tasks,
+ * for resizing, colorspace conversion, and rotation.
+ *
+ * Copyright (c) 2012-2017 Mentor Graphics Inc.
+ */
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/timer.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-mc.h>
+#include <media/v4l2-subdev.h>
+#include <media/imx.h>
+#include "imx-media.h"
+#include "imx-ic.h"
+
+/*
+ * Min/Max supported width and heights.
+ *
+ * We allow planar output, so we have to align width at the source pad
+ * by 16 pixels to meet IDMAC alignment requirements for possible planar
+ * output.
+ *
+ * TODO: move this into pad format negotiation, if capture device
+ * has not requested a planar format, we should allow 8 pixel
+ * alignment at the source pad.
+ */
+#define MIN_W_SINK 32
+#define MIN_H_SINK 32
+#define MAX_W_SINK 4096
+#define MAX_H_SINK 4096
+#define W_ALIGN_SINK 3 /* multiple of 8 pixels */
+#define H_ALIGN_SINK 1 /* multiple of 2 lines */
+
+#define MAX_W_SRC 1024
+#define MAX_H_SRC 1024
+#define W_ALIGN_SRC 1 /* multiple of 2 pixels */
+#define H_ALIGN_SRC 1 /* multiple of 2 lines */
+
+#define S_ALIGN 1 /* multiple of 2 */
+
+struct prp_priv {
+ struct imx_ic_priv *ic_priv;
+ struct media_pad pad[PRPENCVF_NUM_PADS];
+ /* the video device at output pad */
+ struct imx_media_video_dev *vdev;
+
+ /* lock to protect all members below */
+ struct mutex lock;
+
+ /* IPU units we require */
+ struct ipu_ic *ic;
+ struct ipuv3_channel *out_ch;
+ struct ipuv3_channel *rot_in_ch;
+ struct ipuv3_channel *rot_out_ch;
+
+ /* active vb2 buffers to send to video dev sink */
+ struct imx_media_buffer *active_vb2_buf[2];
+ struct imx_media_dma_buf underrun_buf;
+
+ int ipu_buf_num; /* ipu double buffer index: 0-1 */
+
+ /* the sink for the captured frames */
+ struct media_entity *sink;
+ /* the source subdev */
+ struct v4l2_subdev *src_sd;
+
+ struct v4l2_mbus_framefmt format_mbus[PRPENCVF_NUM_PADS];
+ const struct imx_media_pixfmt *cc[PRPENCVF_NUM_PADS];
+ struct v4l2_fract frame_interval;
+
+ struct imx_media_dma_buf rot_buf[2];
+
+ /* controls */
+ struct v4l2_ctrl_handler ctrl_hdlr;
+ int rotation; /* degrees */
+ bool hflip;
+ bool vflip;
+
+ /* derived from rotation, hflip, vflip controls */
+ enum ipu_rotate_mode rot_mode;
+
+ spinlock_t irqlock; /* protect eof_irq handler */
+
+ struct timer_list eof_timeout_timer;
+ int eof_irq;
+ int nfb4eof_irq;
+
+ int stream_count;
+ u32 frame_sequence; /* frame sequence counter */
+ bool last_eof; /* waiting for last EOF at stream off */
+ bool nfb4eof; /* NFB4EOF encountered during streaming */
+ bool interweave_swap; /* swap top/bottom lines when interweaving */
+ struct completion last_eof_comp;
+};
+
+static const struct prp_channels {
+ u32 out_ch;
+ u32 rot_in_ch;
+ u32 rot_out_ch;
+} prp_channel[] = {
+ [IC_TASK_ENCODER] = {
+ .out_ch = IPUV3_CHANNEL_IC_PRP_ENC_MEM,
+ .rot_in_ch = IPUV3_CHANNEL_MEM_ROT_ENC,
+ .rot_out_ch = IPUV3_CHANNEL_ROT_ENC_MEM,
+ },
+ [IC_TASK_VIEWFINDER] = {
+ .out_ch = IPUV3_CHANNEL_IC_PRP_VF_MEM,
+ .rot_in_ch = IPUV3_CHANNEL_MEM_ROT_VF,
+ .rot_out_ch = IPUV3_CHANNEL_ROT_VF_MEM,
+ },
+};
+
+static inline struct prp_priv *sd_to_priv(struct v4l2_subdev *sd)
+{
+ struct imx_ic_priv *ic_priv = v4l2_get_subdevdata(sd);
+
+ return ic_priv->task_priv;
+}
+
+static void prp_put_ipu_resources(struct prp_priv *priv)
+{
+ if (priv->ic)
+ ipu_ic_put(priv->ic);
+ priv->ic = NULL;
+
+ if (priv->out_ch)
+ ipu_idmac_put(priv->out_ch);
+ priv->out_ch = NULL;
+
+ if (priv->rot_in_ch)
+ ipu_idmac_put(priv->rot_in_ch);
+ priv->rot_in_ch = NULL;
+
+ if (priv->rot_out_ch)
+ ipu_idmac_put(priv->rot_out_ch);
+ priv->rot_out_ch = NULL;
+}
+
+static int prp_get_ipu_resources(struct prp_priv *priv)
+{
+ struct imx_ic_priv *ic_priv = priv->ic_priv;
+ struct ipu_ic *ic;
+ struct ipuv3_channel *out_ch, *rot_in_ch, *rot_out_ch;
+ int ret, task = ic_priv->task_id;
+
+ ic = ipu_ic_get(ic_priv->ipu, task);
+ if (IS_ERR(ic)) {
+ v4l2_err(&ic_priv->sd, "failed to get IC\n");
+ ret = PTR_ERR(ic);
+ goto out;
+ }
+ priv->ic = ic;
+
+ out_ch = ipu_idmac_get(ic_priv->ipu, prp_channel[task].out_ch);
+ if (IS_ERR(out_ch)) {
+ v4l2_err(&ic_priv->sd, "could not get IDMAC channel %u\n",
+ prp_channel[task].out_ch);
+ ret = PTR_ERR(out_ch);
+ goto out;
+ }
+ priv->out_ch = out_ch;
+
+ rot_in_ch = ipu_idmac_get(ic_priv->ipu, prp_channel[task].rot_in_ch);
+ if (IS_ERR(rot_in_ch)) {
+ v4l2_err(&ic_priv->sd, "could not get IDMAC channel %u\n",
+ prp_channel[task].rot_in_ch);
+ ret = PTR_ERR(rot_in_ch);
+ goto out;
+ }
+ priv->rot_in_ch = rot_in_ch;
+
+ rot_out_ch = ipu_idmac_get(ic_priv->ipu, prp_channel[task].rot_out_ch);
+ if (IS_ERR(rot_out_ch)) {
+ v4l2_err(&ic_priv->sd, "could not get IDMAC channel %u\n",
+ prp_channel[task].rot_out_ch);
+ ret = PTR_ERR(rot_out_ch);
+ goto out;
+ }
+ priv->rot_out_ch = rot_out_ch;
+
+ return 0;
+out:
+ prp_put_ipu_resources(priv);
+ return ret;
+}
+
+static void prp_vb2_buf_done(struct prp_priv *priv, struct ipuv3_channel *ch)
+{
+ struct imx_media_video_dev *vdev = priv->vdev;
+ struct imx_media_buffer *done, *next;
+ struct vb2_buffer *vb;
+ dma_addr_t phys;
+
+ done = priv->active_vb2_buf[priv->ipu_buf_num];
+ if (done) {
+ done->vbuf.field = vdev->fmt.field;
+ done->vbuf.sequence = priv->frame_sequence;
+ vb = &done->vbuf.vb2_buf;
+ vb->timestamp = ktime_get_ns();
+ vb2_buffer_done(vb, priv->nfb4eof ?
+ VB2_BUF_STATE_ERROR : VB2_BUF_STATE_DONE);
+ }
+
+ priv->frame_sequence++;
+ priv->nfb4eof = false;
+
+ /* get next queued buffer */
+ next = imx_media_capture_device_next_buf(vdev);
+ if (next) {
+ phys = vb2_dma_contig_plane_dma_addr(&next->vbuf.vb2_buf, 0);
+ priv->active_vb2_buf[priv->ipu_buf_num] = next;
+ } else {
+ phys = priv->underrun_buf.phys;
+ priv->active_vb2_buf[priv->ipu_buf_num] = NULL;
+ }
+
+ if (ipu_idmac_buffer_is_ready(ch, priv->ipu_buf_num))
+ ipu_idmac_clear_buffer(ch, priv->ipu_buf_num);
+
+ if (priv->interweave_swap && ch == priv->out_ch)
+ phys += vdev->fmt.bytesperline;
+
+ ipu_cpmem_set_buffer(ch, priv->ipu_buf_num, phys);
+}
+
+static irqreturn_t prp_eof_interrupt(int irq, void *dev_id)
+{
+ struct prp_priv *priv = dev_id;
+ struct ipuv3_channel *channel;
+
+ spin_lock(&priv->irqlock);
+
+ if (priv->last_eof) {
+ complete(&priv->last_eof_comp);
+ priv->last_eof = false;
+ goto unlock;
+ }
+
+ channel = (ipu_rot_mode_is_irt(priv->rot_mode)) ?
+ priv->rot_out_ch : priv->out_ch;
+
+ prp_vb2_buf_done(priv, channel);
+
+ /* select new IPU buf */
+ ipu_idmac_select_buffer(channel, priv->ipu_buf_num);
+ /* toggle IPU double-buffer index */
+ priv->ipu_buf_num ^= 1;
+
+ /* bump the EOF timeout timer */
+ mod_timer(&priv->eof_timeout_timer,
+ jiffies + msecs_to_jiffies(IMX_MEDIA_EOF_TIMEOUT));
+
+unlock:
+ spin_unlock(&priv->irqlock);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t prp_nfb4eof_interrupt(int irq, void *dev_id)
+{
+ struct prp_priv *priv = dev_id;
+ struct imx_ic_priv *ic_priv = priv->ic_priv;
+
+ spin_lock(&priv->irqlock);
+
+ /*
+ * this is not an unrecoverable error, just mark
+ * the next captured frame with vb2 error flag.
+ */
+ priv->nfb4eof = true;
+
+ v4l2_err(&ic_priv->sd, "NFB4EOF\n");
+
+ spin_unlock(&priv->irqlock);
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * EOF timeout timer function.
+ */
+/*
+ * EOF timeout timer function. This is an unrecoverable condition
+ * without a stream restart.
+ */
+static void prp_eof_timeout(struct timer_list *t)
+{
+ struct prp_priv *priv = timer_container_of(priv, t, eof_timeout_timer);
+ struct imx_media_video_dev *vdev = priv->vdev;
+ struct imx_ic_priv *ic_priv = priv->ic_priv;
+
+ v4l2_err(&ic_priv->sd, "EOF timeout\n");
+
+ /* signal a fatal error to capture device */
+ imx_media_capture_device_error(vdev);
+}
+
+static void prp_setup_vb2_buf(struct prp_priv *priv, dma_addr_t *phys)
+{
+ struct imx_media_video_dev *vdev = priv->vdev;
+ struct imx_media_buffer *buf;
+ int i;
+
+ for (i = 0; i < 2; i++) {
+ buf = imx_media_capture_device_next_buf(vdev);
+ if (buf) {
+ priv->active_vb2_buf[i] = buf;
+ phys[i] = vb2_dma_contig_plane_dma_addr(
+ &buf->vbuf.vb2_buf, 0);
+ } else {
+ priv->active_vb2_buf[i] = NULL;
+ phys[i] = priv->underrun_buf.phys;
+ }
+ }
+}
+
+static void prp_unsetup_vb2_buf(struct prp_priv *priv,
+ enum vb2_buffer_state return_status)
+{
+ struct imx_media_buffer *buf;
+ int i;
+
+ /* return any remaining active frames with return_status */
+ for (i = 0; i < 2; i++) {
+ buf = priv->active_vb2_buf[i];
+ if (buf) {
+ struct vb2_buffer *vb = &buf->vbuf.vb2_buf;
+
+ vb->timestamp = ktime_get_ns();
+ vb2_buffer_done(vb, return_status);
+ }
+ }
+}
+
+static int prp_setup_channel(struct prp_priv *priv,
+ struct ipuv3_channel *channel,
+ enum ipu_rotate_mode rot_mode,
+ dma_addr_t addr0, dma_addr_t addr1,
+ bool rot_swap_width_height)
+{
+ struct imx_media_video_dev *vdev = priv->vdev;
+ const struct imx_media_pixfmt *outcc;
+ struct v4l2_mbus_framefmt *outfmt;
+ unsigned int burst_size;
+ struct ipu_image image;
+ bool interweave;
+ int ret;
+
+ outfmt = &priv->format_mbus[PRPENCVF_SRC_PAD];
+ outcc = vdev->cc;
+
+ ipu_cpmem_zero(channel);
+
+ memset(&image, 0, sizeof(image));
+ image.pix = vdev->fmt;
+ image.rect = vdev->compose;
+
+ /*
+ * If the field type at capture interface is interlaced, and
+ * the output IDMAC pad is sequential, enable interweave at
+ * the IDMAC output channel.
+ */
+ interweave = V4L2_FIELD_IS_INTERLACED(image.pix.field) &&
+ V4L2_FIELD_IS_SEQUENTIAL(outfmt->field);
+ priv->interweave_swap = interweave &&
+ image.pix.field == V4L2_FIELD_INTERLACED_BT;
+
+ if (rot_swap_width_height) {
+ swap(image.pix.width, image.pix.height);
+ swap(image.rect.width, image.rect.height);
+ /* recalc stride using swapped width */
+ image.pix.bytesperline = outcc->planar ?
+ image.pix.width :
+ (image.pix.width * outcc->bpp) >> 3;
+ }
+
+ if (priv->interweave_swap && channel == priv->out_ch) {
+ /* start interweave scan at 1st top line (2nd line) */
+ image.rect.top = 1;
+ }
+
+ image.phys0 = addr0;
+ image.phys1 = addr1;
+
+ /*
+ * Skip writing U and V components to odd rows in the output
+ * channels for planar 4:2:0 (but not when enabling IDMAC
+ * interweaving, they are incompatible).
+ */
+ if ((channel == priv->out_ch && !interweave) ||
+ channel == priv->rot_out_ch) {
+ switch (image.pix.pixelformat) {
+ case V4L2_PIX_FMT_YUV420:
+ case V4L2_PIX_FMT_YVU420:
+ case V4L2_PIX_FMT_NV12:
+ ipu_cpmem_skip_odd_chroma_rows(channel);
+ break;
+ }
+ }
+
+ ret = ipu_cpmem_set_image(channel, &image);
+ if (ret)
+ return ret;
+
+ if (channel == priv->rot_in_ch ||
+ channel == priv->rot_out_ch) {
+ burst_size = 8;
+ ipu_cpmem_set_block_mode(channel);
+ } else {
+ burst_size = (image.pix.width & 0xf) ? 8 : 16;
+ }
+
+ ipu_cpmem_set_burstsize(channel, burst_size);
+
+ if (rot_mode)
+ ipu_cpmem_set_rotation(channel, rot_mode);
+
+ if (interweave && channel == priv->out_ch)
+ ipu_cpmem_interlaced_scan(channel,
+ priv->interweave_swap ?
+ -image.pix.bytesperline :
+ image.pix.bytesperline,
+ image.pix.pixelformat);
+
+ ret = ipu_ic_task_idma_init(priv->ic, channel,
+ image.pix.width, image.pix.height,
+ burst_size, rot_mode);
+ if (ret)
+ return ret;
+
+ ipu_cpmem_set_axi_id(channel, 1);
+
+ ipu_idmac_set_double_buffer(channel, true);
+
+ return 0;
+}
+
+static int prp_setup_rotation(struct prp_priv *priv)
+{
+ struct imx_media_video_dev *vdev = priv->vdev;
+ struct imx_ic_priv *ic_priv = priv->ic_priv;
+ const struct imx_media_pixfmt *outcc, *incc;
+ struct v4l2_mbus_framefmt *infmt;
+ struct v4l2_pix_format *outfmt;
+ struct ipu_ic_csc csc;
+ dma_addr_t phys[2];
+ int ret;
+
+ infmt = &priv->format_mbus[PRPENCVF_SINK_PAD];
+ outfmt = &vdev->fmt;
+ incc = priv->cc[PRPENCVF_SINK_PAD];
+ outcc = vdev->cc;
+
+ ret = ipu_ic_calc_csc(&csc,
+ infmt->ycbcr_enc, infmt->quantization,
+ incc->cs,
+ outfmt->ycbcr_enc, outfmt->quantization,
+ outcc->cs);
+ if (ret) {
+ v4l2_err(&ic_priv->sd, "ipu_ic_calc_csc failed, %d\n",
+ ret);
+ return ret;
+ }
+
+ ret = imx_media_alloc_dma_buf(ic_priv->ipu_dev, &priv->rot_buf[0],
+ outfmt->sizeimage);
+ if (ret) {
+ v4l2_err(&ic_priv->sd, "failed to alloc rot_buf[0], %d\n", ret);
+ return ret;
+ }
+ ret = imx_media_alloc_dma_buf(ic_priv->ipu_dev, &priv->rot_buf[1],
+ outfmt->sizeimage);
+ if (ret) {
+ v4l2_err(&ic_priv->sd, "failed to alloc rot_buf[1], %d\n", ret);
+ goto free_rot0;
+ }
+
+ ret = ipu_ic_task_init(priv->ic, &csc,
+ infmt->width, infmt->height,
+ outfmt->height, outfmt->width);
+ if (ret) {
+ v4l2_err(&ic_priv->sd, "ipu_ic_task_init failed, %d\n", ret);
+ goto free_rot1;
+ }
+
+ /* init the IC-PRP-->MEM IDMAC channel */
+ ret = prp_setup_channel(priv, priv->out_ch, IPU_ROTATE_NONE,
+ priv->rot_buf[0].phys, priv->rot_buf[1].phys,
+ true);
+ if (ret) {
+ v4l2_err(&ic_priv->sd,
+ "prp_setup_channel(out_ch) failed, %d\n", ret);
+ goto free_rot1;
+ }
+
+ /* init the MEM-->IC-PRP ROT IDMAC channel */
+ ret = prp_setup_channel(priv, priv->rot_in_ch, priv->rot_mode,
+ priv->rot_buf[0].phys, priv->rot_buf[1].phys,
+ true);
+ if (ret) {
+ v4l2_err(&ic_priv->sd,
+ "prp_setup_channel(rot_in_ch) failed, %d\n", ret);
+ goto free_rot1;
+ }
+
+ prp_setup_vb2_buf(priv, phys);
+
+ /* init the destination IC-PRP ROT-->MEM IDMAC channel */
+ ret = prp_setup_channel(priv, priv->rot_out_ch, IPU_ROTATE_NONE,
+ phys[0], phys[1],
+ false);
+ if (ret) {
+ v4l2_err(&ic_priv->sd,
+ "prp_setup_channel(rot_out_ch) failed, %d\n", ret);
+ goto unsetup_vb2;
+ }
+
+ /* now link IC-PRP-->MEM to MEM-->IC-PRP ROT */
+ ipu_idmac_link(priv->out_ch, priv->rot_in_ch);
+
+ /* enable the IC */
+ ipu_ic_enable(priv->ic);
+
+ /* set buffers ready */
+ ipu_idmac_select_buffer(priv->out_ch, 0);
+ ipu_idmac_select_buffer(priv->out_ch, 1);
+ ipu_idmac_select_buffer(priv->rot_out_ch, 0);
+ ipu_idmac_select_buffer(priv->rot_out_ch, 1);
+
+ /* enable the channels */
+ ipu_idmac_enable_channel(priv->out_ch);
+ ipu_idmac_enable_channel(priv->rot_in_ch);
+ ipu_idmac_enable_channel(priv->rot_out_ch);
+
+ /* and finally enable the IC PRP task */
+ ipu_ic_task_enable(priv->ic);
+
+ return 0;
+
+unsetup_vb2:
+ prp_unsetup_vb2_buf(priv, VB2_BUF_STATE_QUEUED);
+free_rot1:
+ imx_media_free_dma_buf(ic_priv->ipu_dev, &priv->rot_buf[1]);
+free_rot0:
+ imx_media_free_dma_buf(ic_priv->ipu_dev, &priv->rot_buf[0]);
+ return ret;
+}
+
+static void prp_unsetup_rotation(struct prp_priv *priv)
+{
+ struct imx_ic_priv *ic_priv = priv->ic_priv;
+
+ ipu_ic_task_disable(priv->ic);
+
+ ipu_idmac_disable_channel(priv->out_ch);
+ ipu_idmac_disable_channel(priv->rot_in_ch);
+ ipu_idmac_disable_channel(priv->rot_out_ch);
+
+ ipu_idmac_unlink(priv->out_ch, priv->rot_in_ch);
+
+ ipu_ic_disable(priv->ic);
+
+ imx_media_free_dma_buf(ic_priv->ipu_dev, &priv->rot_buf[0]);
+ imx_media_free_dma_buf(ic_priv->ipu_dev, &priv->rot_buf[1]);
+}
+
+static int prp_setup_norotation(struct prp_priv *priv)
+{
+ struct imx_media_video_dev *vdev = priv->vdev;
+ struct imx_ic_priv *ic_priv = priv->ic_priv;
+ const struct imx_media_pixfmt *outcc, *incc;
+ struct v4l2_mbus_framefmt *infmt;
+ struct v4l2_pix_format *outfmt;
+ struct ipu_ic_csc csc;
+ dma_addr_t phys[2];
+ int ret;
+
+ infmt = &priv->format_mbus[PRPENCVF_SINK_PAD];
+ outfmt = &vdev->fmt;
+ incc = priv->cc[PRPENCVF_SINK_PAD];
+ outcc = vdev->cc;
+
+ ret = ipu_ic_calc_csc(&csc,
+ infmt->ycbcr_enc, infmt->quantization,
+ incc->cs,
+ outfmt->ycbcr_enc, outfmt->quantization,
+ outcc->cs);
+ if (ret) {
+ v4l2_err(&ic_priv->sd, "ipu_ic_calc_csc failed, %d\n",
+ ret);
+ return ret;
+ }
+
+ ret = ipu_ic_task_init(priv->ic, &csc,
+ infmt->width, infmt->height,
+ outfmt->width, outfmt->height);
+ if (ret) {
+ v4l2_err(&ic_priv->sd, "ipu_ic_task_init failed, %d\n", ret);
+ return ret;
+ }
+
+ prp_setup_vb2_buf(priv, phys);
+
+ /* init the IC PRP-->MEM IDMAC channel */
+ ret = prp_setup_channel(priv, priv->out_ch, priv->rot_mode,
+ phys[0], phys[1], false);
+ if (ret) {
+ v4l2_err(&ic_priv->sd,
+ "prp_setup_channel(out_ch) failed, %d\n", ret);
+ goto unsetup_vb2;
+ }
+
+ ipu_cpmem_dump(priv->out_ch);
+ ipu_ic_dump(priv->ic);
+ ipu_dump(ic_priv->ipu);
+
+ ipu_ic_enable(priv->ic);
+
+ /* set buffers ready */
+ ipu_idmac_select_buffer(priv->out_ch, 0);
+ ipu_idmac_select_buffer(priv->out_ch, 1);
+
+ /* enable the channels */
+ ipu_idmac_enable_channel(priv->out_ch);
+
+ /* enable the IC task */
+ ipu_ic_task_enable(priv->ic);
+
+ return 0;
+
+unsetup_vb2:
+ prp_unsetup_vb2_buf(priv, VB2_BUF_STATE_QUEUED);
+ return ret;
+}
+
+static void prp_unsetup_norotation(struct prp_priv *priv)
+{
+ ipu_ic_task_disable(priv->ic);
+ ipu_idmac_disable_channel(priv->out_ch);
+ ipu_ic_disable(priv->ic);
+}
+
+static void prp_unsetup(struct prp_priv *priv,
+ enum vb2_buffer_state state)
+{
+ if (ipu_rot_mode_is_irt(priv->rot_mode))
+ prp_unsetup_rotation(priv);
+ else
+ prp_unsetup_norotation(priv);
+
+ prp_unsetup_vb2_buf(priv, state);
+}
+
+static int prp_start(struct prp_priv *priv)
+{
+ struct imx_ic_priv *ic_priv = priv->ic_priv;
+ struct imx_media_video_dev *vdev = priv->vdev;
+ int ret;
+
+ ret = prp_get_ipu_resources(priv);
+ if (ret)
+ return ret;
+
+ ret = imx_media_alloc_dma_buf(ic_priv->ipu_dev, &priv->underrun_buf,
+ vdev->fmt.sizeimage);
+ if (ret)
+ goto out_put_ipu;
+
+ priv->ipu_buf_num = 0;
+
+ /* init EOF completion waitq */
+ init_completion(&priv->last_eof_comp);
+ priv->frame_sequence = 0;
+ priv->last_eof = false;
+ priv->nfb4eof = false;
+
+ if (ipu_rot_mode_is_irt(priv->rot_mode))
+ ret = prp_setup_rotation(priv);
+ else
+ ret = prp_setup_norotation(priv);
+ if (ret)
+ goto out_free_underrun;
+
+ priv->nfb4eof_irq = ipu_idmac_channel_irq(ic_priv->ipu,
+ priv->out_ch,
+ IPU_IRQ_NFB4EOF);
+ ret = devm_request_irq(ic_priv->ipu_dev, priv->nfb4eof_irq,
+ prp_nfb4eof_interrupt, 0,
+ "imx-ic-prp-nfb4eof", priv);
+ if (ret) {
+ v4l2_err(&ic_priv->sd,
+ "Error registering NFB4EOF irq: %d\n", ret);
+ goto out_unsetup;
+ }
+
+ if (ipu_rot_mode_is_irt(priv->rot_mode))
+ priv->eof_irq = ipu_idmac_channel_irq(
+ ic_priv->ipu, priv->rot_out_ch, IPU_IRQ_EOF);
+ else
+ priv->eof_irq = ipu_idmac_channel_irq(
+ ic_priv->ipu, priv->out_ch, IPU_IRQ_EOF);
+
+ ret = devm_request_irq(ic_priv->ipu_dev, priv->eof_irq,
+ prp_eof_interrupt, 0,
+ "imx-ic-prp-eof", priv);
+ if (ret) {
+ v4l2_err(&ic_priv->sd,
+ "Error registering eof irq: %d\n", ret);
+ goto out_free_nfb4eof_irq;
+ }
+
+ /* start upstream */
+ ret = v4l2_subdev_call(priv->src_sd, video, s_stream, 1);
+ ret = (ret && ret != -ENOIOCTLCMD) ? ret : 0;
+ if (ret) {
+ v4l2_err(&ic_priv->sd,
+ "upstream stream on failed: %d\n", ret);
+ goto out_free_eof_irq;
+ }
+
+ /* start the EOF timeout timer */
+ mod_timer(&priv->eof_timeout_timer,
+ jiffies + msecs_to_jiffies(IMX_MEDIA_EOF_TIMEOUT));
+
+ return 0;
+
+out_free_eof_irq:
+ devm_free_irq(ic_priv->ipu_dev, priv->eof_irq, priv);
+out_free_nfb4eof_irq:
+ devm_free_irq(ic_priv->ipu_dev, priv->nfb4eof_irq, priv);
+out_unsetup:
+ prp_unsetup(priv, VB2_BUF_STATE_QUEUED);
+out_free_underrun:
+ imx_media_free_dma_buf(ic_priv->ipu_dev, &priv->underrun_buf);
+out_put_ipu:
+ prp_put_ipu_resources(priv);
+ return ret;
+}
+
+static void prp_stop(struct prp_priv *priv)
+{
+ struct imx_ic_priv *ic_priv = priv->ic_priv;
+ unsigned long flags;
+ int ret;
+
+ /* mark next EOF interrupt as the last before stream off */
+ spin_lock_irqsave(&priv->irqlock, flags);
+ priv->last_eof = true;
+ spin_unlock_irqrestore(&priv->irqlock, flags);
+
+ /*
+ * and then wait for interrupt handler to mark completion.
+ */
+ ret = wait_for_completion_timeout(
+ &priv->last_eof_comp,
+ msecs_to_jiffies(IMX_MEDIA_EOF_TIMEOUT));
+ if (ret == 0)
+ v4l2_warn(&ic_priv->sd, "wait last EOF timeout\n");
+
+ /* stop upstream */
+ ret = v4l2_subdev_call(priv->src_sd, video, s_stream, 0);
+ if (ret && ret != -ENOIOCTLCMD)
+ v4l2_warn(&ic_priv->sd,
+ "upstream stream off failed: %d\n", ret);
+
+ devm_free_irq(ic_priv->ipu_dev, priv->eof_irq, priv);
+ devm_free_irq(ic_priv->ipu_dev, priv->nfb4eof_irq, priv);
+
+ prp_unsetup(priv, VB2_BUF_STATE_ERROR);
+
+ imx_media_free_dma_buf(ic_priv->ipu_dev, &priv->underrun_buf);
+
+ /* cancel the EOF timeout timer */
+ timer_delete_sync(&priv->eof_timeout_timer);
+
+ prp_put_ipu_resources(priv);
+}
+
+static struct v4l2_mbus_framefmt *
+__prp_get_fmt(struct prp_priv *priv, struct v4l2_subdev_state *sd_state,
+ unsigned int pad, enum v4l2_subdev_format_whence which)
+{
+ if (which == V4L2_SUBDEV_FORMAT_TRY)
+ return v4l2_subdev_state_get_format(sd_state, pad);
+ else
+ return &priv->format_mbus[pad];
+}
+
+/*
+ * Applies IC resizer and IDMAC alignment restrictions to output
+ * rectangle given the input rectangle, and depending on given
+ * rotation mode.
+ *
+ * The IC resizer cannot downsize more than 4:1. Note also that
+ * for 90 or 270 rotation, _both_ output width and height must
+ * be aligned by W_ALIGN_SRC, because the intermediate rotation
+ * buffer swaps output width/height, and the final output buffer
+ * does not.
+ *
+ * Returns true if the output rectangle was modified.
+ */
+static bool prp_bound_align_output(struct v4l2_mbus_framefmt *outfmt,
+ struct v4l2_mbus_framefmt *infmt,
+ enum ipu_rotate_mode rot_mode)
+{
+ u32 orig_width = outfmt->width;
+ u32 orig_height = outfmt->height;
+
+ if (ipu_rot_mode_is_irt(rot_mode))
+ v4l_bound_align_image(&outfmt->width,
+ infmt->height / 4, MAX_H_SRC,
+ W_ALIGN_SRC,
+ &outfmt->height,
+ infmt->width / 4, MAX_W_SRC,
+ W_ALIGN_SRC, S_ALIGN);
+ else
+ v4l_bound_align_image(&outfmt->width,
+ infmt->width / 4, MAX_W_SRC,
+ W_ALIGN_SRC,
+ &outfmt->height,
+ infmt->height / 4, MAX_H_SRC,
+ H_ALIGN_SRC, S_ALIGN);
+
+ return outfmt->width != orig_width || outfmt->height != orig_height;
+}
+
+/*
+ * V4L2 subdev operations.
+ */
+
+static int prp_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ if (code->pad >= PRPENCVF_NUM_PADS)
+ return -EINVAL;
+
+ return imx_media_enum_ipu_formats(&code->code, code->index,
+ PIXFMT_SEL_YUV_RGB);
+}
+
+static int prp_get_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_format *sdformat)
+{
+ struct prp_priv *priv = sd_to_priv(sd);
+ struct v4l2_mbus_framefmt *fmt;
+ int ret = 0;
+
+ if (sdformat->pad >= PRPENCVF_NUM_PADS)
+ return -EINVAL;
+
+ mutex_lock(&priv->lock);
+
+ fmt = __prp_get_fmt(priv, sd_state, sdformat->pad, sdformat->which);
+ if (!fmt) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ sdformat->format = *fmt;
+out:
+ mutex_unlock(&priv->lock);
+ return ret;
+}
+
+static void prp_try_fmt(struct prp_priv *priv,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_format *sdformat,
+ const struct imx_media_pixfmt **cc)
+{
+ struct v4l2_mbus_framefmt *infmt;
+
+ *cc = imx_media_find_ipu_format(sdformat->format.code,
+ PIXFMT_SEL_YUV_RGB);
+ if (!*cc) {
+ u32 code;
+
+ imx_media_enum_ipu_formats(&code, 0, PIXFMT_SEL_YUV_RGB);
+ *cc = imx_media_find_ipu_format(code, PIXFMT_SEL_YUV_RGB);
+
+ sdformat->format.code = (*cc)->codes[0];
+ }
+
+ infmt = __prp_get_fmt(priv, sd_state, PRPENCVF_SINK_PAD,
+ sdformat->which);
+
+ if (sdformat->pad == PRPENCVF_SRC_PAD) {
+ sdformat->format.field = infmt->field;
+
+ prp_bound_align_output(&sdformat->format, infmt,
+ priv->rot_mode);
+
+ /* propagate colorimetry from sink */
+ sdformat->format.colorspace = infmt->colorspace;
+ sdformat->format.xfer_func = infmt->xfer_func;
+ } else {
+ v4l_bound_align_image(&sdformat->format.width,
+ MIN_W_SINK, MAX_W_SINK, W_ALIGN_SINK,
+ &sdformat->format.height,
+ MIN_H_SINK, MAX_H_SINK, H_ALIGN_SINK,
+ S_ALIGN);
+
+ if (sdformat->format.field == V4L2_FIELD_ANY)
+ sdformat->format.field = V4L2_FIELD_NONE;
+ }
+
+ imx_media_try_colorimetry(&sdformat->format, true);
+}
+
+static int prp_set_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_format *sdformat)
+{
+ struct prp_priv *priv = sd_to_priv(sd);
+ const struct imx_media_pixfmt *cc;
+ struct v4l2_mbus_framefmt *fmt;
+ int ret = 0;
+
+ if (sdformat->pad >= PRPENCVF_NUM_PADS)
+ return -EINVAL;
+
+ mutex_lock(&priv->lock);
+
+ if (priv->stream_count > 0) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ prp_try_fmt(priv, sd_state, sdformat, &cc);
+
+ fmt = __prp_get_fmt(priv, sd_state, sdformat->pad, sdformat->which);
+ *fmt = sdformat->format;
+
+ /* propagate a default format to source pad */
+ if (sdformat->pad == PRPENCVF_SINK_PAD) {
+ const struct imx_media_pixfmt *outcc;
+ struct v4l2_mbus_framefmt *outfmt;
+ struct v4l2_subdev_format format;
+
+ format.pad = PRPENCVF_SRC_PAD;
+ format.which = sdformat->which;
+ format.format = sdformat->format;
+ prp_try_fmt(priv, sd_state, &format, &outcc);
+
+ outfmt = __prp_get_fmt(priv, sd_state, PRPENCVF_SRC_PAD,
+ sdformat->which);
+ *outfmt = format.format;
+ if (sdformat->which == V4L2_SUBDEV_FORMAT_ACTIVE)
+ priv->cc[PRPENCVF_SRC_PAD] = outcc;
+ }
+
+ if (sdformat->which == V4L2_SUBDEV_FORMAT_ACTIVE)
+ priv->cc[sdformat->pad] = cc;
+
+out:
+ mutex_unlock(&priv->lock);
+ return ret;
+}
+
+static int prp_enum_frame_size(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_frame_size_enum *fse)
+{
+ struct prp_priv *priv = sd_to_priv(sd);
+ struct v4l2_subdev_format format = {};
+ const struct imx_media_pixfmt *cc;
+ int ret = 0;
+
+ if (fse->pad >= PRPENCVF_NUM_PADS || fse->index != 0)
+ return -EINVAL;
+
+ mutex_lock(&priv->lock);
+
+ format.pad = fse->pad;
+ format.which = fse->which;
+ format.format.code = fse->code;
+ format.format.width = 1;
+ format.format.height = 1;
+ prp_try_fmt(priv, sd_state, &format, &cc);
+ fse->min_width = format.format.width;
+ fse->min_height = format.format.height;
+
+ if (format.format.code != fse->code) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ format.format.code = fse->code;
+ format.format.width = -1;
+ format.format.height = -1;
+ prp_try_fmt(priv, sd_state, &format, &cc);
+ fse->max_width = format.format.width;
+ fse->max_height = format.format.height;
+out:
+ mutex_unlock(&priv->lock);
+ return ret;
+}
+
+static int prp_link_setup(struct media_entity *entity,
+ const struct media_pad *local,
+ const struct media_pad *remote, u32 flags)
+{
+ struct v4l2_subdev *sd = media_entity_to_v4l2_subdev(entity);
+ struct imx_ic_priv *ic_priv = v4l2_get_subdevdata(sd);
+ struct prp_priv *priv = ic_priv->task_priv;
+ struct v4l2_subdev *remote_sd;
+ int ret = 0;
+
+ dev_dbg(ic_priv->ipu_dev, "%s: link setup %s -> %s",
+ ic_priv->sd.name, remote->entity->name, local->entity->name);
+
+ mutex_lock(&priv->lock);
+
+ if (local->flags & MEDIA_PAD_FL_SINK) {
+ if (!is_media_entity_v4l2_subdev(remote->entity)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ remote_sd = media_entity_to_v4l2_subdev(remote->entity);
+
+ if (flags & MEDIA_LNK_FL_ENABLED) {
+ if (priv->src_sd) {
+ ret = -EBUSY;
+ goto out;
+ }
+ priv->src_sd = remote_sd;
+ } else {
+ priv->src_sd = NULL;
+ }
+
+ goto out;
+ }
+
+ /* this is the source pad */
+
+ /* the remote must be the device node */
+ if (!is_media_entity_v4l2_video_device(remote->entity)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (flags & MEDIA_LNK_FL_ENABLED) {
+ if (priv->sink) {
+ ret = -EBUSY;
+ goto out;
+ }
+ } else {
+ priv->sink = NULL;
+ goto out;
+ }
+
+ priv->sink = remote->entity;
+out:
+ mutex_unlock(&priv->lock);
+ return ret;
+}
+
+static int prp_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct prp_priv *priv = container_of(ctrl->handler,
+ struct prp_priv, ctrl_hdlr);
+ struct imx_ic_priv *ic_priv = priv->ic_priv;
+ enum ipu_rotate_mode rot_mode;
+ int rotation, ret = 0;
+ bool hflip, vflip;
+
+ mutex_lock(&priv->lock);
+
+ rotation = priv->rotation;
+ hflip = priv->hflip;
+ vflip = priv->vflip;
+
+ switch (ctrl->id) {
+ case V4L2_CID_HFLIP:
+ hflip = (ctrl->val == 1);
+ break;
+ case V4L2_CID_VFLIP:
+ vflip = (ctrl->val == 1);
+ break;
+ case V4L2_CID_ROTATE:
+ rotation = ctrl->val;
+ break;
+ default:
+ v4l2_err(&ic_priv->sd, "Invalid control\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = ipu_degrees_to_rot_mode(&rot_mode, rotation, hflip, vflip);
+ if (ret)
+ goto out;
+
+ if (rot_mode != priv->rot_mode) {
+ struct v4l2_mbus_framefmt outfmt, infmt;
+
+ /* can't change rotation mid-streaming */
+ if (priv->stream_count > 0) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ outfmt = priv->format_mbus[PRPENCVF_SRC_PAD];
+ infmt = priv->format_mbus[PRPENCVF_SINK_PAD];
+
+ if (prp_bound_align_output(&outfmt, &infmt, rot_mode)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ priv->rot_mode = rot_mode;
+ priv->rotation = rotation;
+ priv->hflip = hflip;
+ priv->vflip = vflip;
+ }
+
+out:
+ mutex_unlock(&priv->lock);
+ return ret;
+}
+
+static const struct v4l2_ctrl_ops prp_ctrl_ops = {
+ .s_ctrl = prp_s_ctrl,
+};
+
+static int prp_init_controls(struct prp_priv *priv)
+{
+ struct imx_ic_priv *ic_priv = priv->ic_priv;
+ struct v4l2_ctrl_handler *hdlr = &priv->ctrl_hdlr;
+ int ret;
+
+ v4l2_ctrl_handler_init(hdlr, 3);
+
+ v4l2_ctrl_new_std(hdlr, &prp_ctrl_ops, V4L2_CID_HFLIP,
+ 0, 1, 1, 0);
+ v4l2_ctrl_new_std(hdlr, &prp_ctrl_ops, V4L2_CID_VFLIP,
+ 0, 1, 1, 0);
+ v4l2_ctrl_new_std(hdlr, &prp_ctrl_ops, V4L2_CID_ROTATE,
+ 0, 270, 90, 0);
+
+ ic_priv->sd.ctrl_handler = hdlr;
+
+ if (hdlr->error) {
+ ret = hdlr->error;
+ goto out_free;
+ }
+
+ v4l2_ctrl_handler_setup(hdlr);
+ return 0;
+
+out_free:
+ v4l2_ctrl_handler_free(hdlr);
+ return ret;
+}
+
+static int prp_s_stream(struct v4l2_subdev *sd, int enable)
+{
+ struct imx_ic_priv *ic_priv = v4l2_get_subdevdata(sd);
+ struct prp_priv *priv = ic_priv->task_priv;
+ int ret = 0;
+
+ mutex_lock(&priv->lock);
+
+ if (!priv->src_sd || !priv->sink) {
+ ret = -EPIPE;
+ goto out;
+ }
+
+ /*
+ * enable/disable streaming only if stream_count is
+ * going from 0 to 1 / 1 to 0.
+ */
+ if (priv->stream_count != !enable)
+ goto update_count;
+
+ dev_dbg(ic_priv->ipu_dev, "%s: stream %s\n", sd->name,
+ enable ? "ON" : "OFF");
+
+ if (enable)
+ ret = prp_start(priv);
+ else
+ prp_stop(priv);
+ if (ret)
+ goto out;
+
+update_count:
+ priv->stream_count += enable ? 1 : -1;
+ if (priv->stream_count < 0)
+ priv->stream_count = 0;
+out:
+ mutex_unlock(&priv->lock);
+ return ret;
+}
+
+static int prp_get_frame_interval(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_frame_interval *fi)
+{
+ struct prp_priv *priv = sd_to_priv(sd);
+
+ /*
+ * FIXME: Implement support for V4L2_SUBDEV_FORMAT_TRY, using the V4L2
+ * subdev active state API.
+ */
+ if (fi->which != V4L2_SUBDEV_FORMAT_ACTIVE)
+ return -EINVAL;
+
+ if (fi->pad >= PRPENCVF_NUM_PADS)
+ return -EINVAL;
+
+ mutex_lock(&priv->lock);
+ fi->interval = priv->frame_interval;
+ mutex_unlock(&priv->lock);
+
+ return 0;
+}
+
+static int prp_set_frame_interval(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_frame_interval *fi)
+{
+ struct prp_priv *priv = sd_to_priv(sd);
+
+ /*
+ * FIXME: Implement support for V4L2_SUBDEV_FORMAT_TRY, using the V4L2
+ * subdev active state API.
+ */
+ if (fi->which != V4L2_SUBDEV_FORMAT_ACTIVE)
+ return -EINVAL;
+
+ if (fi->pad >= PRPENCVF_NUM_PADS)
+ return -EINVAL;
+
+ mutex_lock(&priv->lock);
+
+ /* No limits on valid frame intervals */
+ if (fi->interval.numerator == 0 || fi->interval.denominator == 0)
+ fi->interval = priv->frame_interval;
+ else
+ priv->frame_interval = fi->interval;
+
+ mutex_unlock(&priv->lock);
+
+ return 0;
+}
+
+static int prp_registered(struct v4l2_subdev *sd)
+{
+ struct prp_priv *priv = sd_to_priv(sd);
+ struct imx_ic_priv *ic_priv = priv->ic_priv;
+ int i, ret;
+ u32 code;
+
+ /* set a default mbus format */
+ imx_media_enum_ipu_formats(&code, 0, PIXFMT_SEL_YUV);
+
+ for (i = 0; i < PRPENCVF_NUM_PADS; i++) {
+ ret = imx_media_init_mbus_fmt(&priv->format_mbus[i],
+ IMX_MEDIA_DEF_PIX_WIDTH,
+ IMX_MEDIA_DEF_PIX_HEIGHT, code,
+ V4L2_FIELD_NONE, &priv->cc[i]);
+ if (ret)
+ return ret;
+ }
+
+ /* init default frame interval */
+ priv->frame_interval.numerator = 1;
+ priv->frame_interval.denominator = 30;
+
+ priv->vdev = imx_media_capture_device_init(ic_priv->ipu_dev,
+ &ic_priv->sd,
+ PRPENCVF_SRC_PAD, true);
+ if (IS_ERR(priv->vdev))
+ return PTR_ERR(priv->vdev);
+
+ ret = imx_media_capture_device_register(priv->vdev, 0);
+ if (ret)
+ goto remove_vdev;
+
+ ret = prp_init_controls(priv);
+ if (ret)
+ goto unreg_vdev;
+
+ return 0;
+
+unreg_vdev:
+ imx_media_capture_device_unregister(priv->vdev);
+remove_vdev:
+ imx_media_capture_device_remove(priv->vdev);
+ return ret;
+}
+
+static void prp_unregistered(struct v4l2_subdev *sd)
+{
+ struct prp_priv *priv = sd_to_priv(sd);
+
+ imx_media_capture_device_unregister(priv->vdev);
+ imx_media_capture_device_remove(priv->vdev);
+
+ v4l2_ctrl_handler_free(&priv->ctrl_hdlr);
+}
+
+static const struct v4l2_subdev_pad_ops prp_pad_ops = {
+ .enum_mbus_code = prp_enum_mbus_code,
+ .enum_frame_size = prp_enum_frame_size,
+ .get_fmt = prp_get_fmt,
+ .set_fmt = prp_set_fmt,
+ .get_frame_interval = prp_get_frame_interval,
+ .set_frame_interval = prp_set_frame_interval,
+};
+
+static const struct v4l2_subdev_video_ops prp_video_ops = {
+ .s_stream = prp_s_stream,
+};
+
+static const struct media_entity_operations prp_entity_ops = {
+ .link_setup = prp_link_setup,
+ .link_validate = v4l2_subdev_link_validate,
+};
+
+static const struct v4l2_subdev_ops prp_subdev_ops = {
+ .video = &prp_video_ops,
+ .pad = &prp_pad_ops,
+};
+
+static const struct v4l2_subdev_internal_ops prp_internal_ops = {
+ .init_state = imx_media_init_state,
+ .registered = prp_registered,
+ .unregistered = prp_unregistered,
+};
+
+static int prp_init(struct imx_ic_priv *ic_priv)
+{
+ struct prp_priv *priv;
+ int i, ret;
+
+ priv = devm_kzalloc(ic_priv->ipu_dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ ic_priv->task_priv = priv;
+ priv->ic_priv = ic_priv;
+
+ spin_lock_init(&priv->irqlock);
+ timer_setup(&priv->eof_timeout_timer, prp_eof_timeout, 0);
+
+ mutex_init(&priv->lock);
+
+ for (i = 0; i < PRPENCVF_NUM_PADS; i++) {
+ priv->pad[i].flags = (i == PRPENCVF_SINK_PAD) ?
+ MEDIA_PAD_FL_SINK : MEDIA_PAD_FL_SOURCE;
+ }
+
+ ret = media_entity_pads_init(&ic_priv->sd.entity, PRPENCVF_NUM_PADS,
+ priv->pad);
+ if (ret)
+ mutex_destroy(&priv->lock);
+
+ return ret;
+}
+
+static void prp_remove(struct imx_ic_priv *ic_priv)
+{
+ struct prp_priv *priv = ic_priv->task_priv;
+
+ mutex_destroy(&priv->lock);
+}
+
+struct imx_ic_ops imx_ic_prpencvf_ops = {
+ .subdev_ops = &prp_subdev_ops,
+ .internal_ops = &prp_internal_ops,
+ .entity_ops = &prp_entity_ops,
+ .init = prp_init,
+ .remove = prp_remove,
+};
diff --git a/drivers/staging/media/imx/imx-ic.h b/drivers/staging/media/imx/imx-ic.h
new file mode 100644
index 000000000000..587c191c3eab
--- /dev/null
+++ b/drivers/staging/media/imx/imx-ic.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * V4L2 Image Converter Subdev for Freescale i.MX5/6 SOC
+ *
+ * Copyright (c) 2016 Mentor Graphics Inc.
+ */
+#ifndef _IMX_IC_H
+#define _IMX_IC_H
+
+#include <media/v4l2-subdev.h>
+
+struct imx_ic_priv {
+ struct device *ipu_dev;
+ struct ipu_soc *ipu;
+ struct v4l2_subdev sd;
+ int task_id;
+ void *task_priv;
+};
+
+struct imx_ic_ops {
+ const struct v4l2_subdev_ops *subdev_ops;
+ const struct v4l2_subdev_internal_ops *internal_ops;
+ const struct media_entity_operations *entity_ops;
+
+ int (*init)(struct imx_ic_priv *ic_priv);
+ void (*remove)(struct imx_ic_priv *ic_priv);
+};
+
+extern struct imx_ic_ops imx_ic_prp_ops;
+extern struct imx_ic_ops imx_ic_prpencvf_ops;
+
+#endif
diff --git a/drivers/staging/media/imx/imx-media-capture.c b/drivers/staging/media/imx/imx-media-capture.c
new file mode 100644
index 000000000000..e9cef7af000a
--- /dev/null
+++ b/drivers/staging/media/imx/imx-media-capture.c
@@ -0,0 +1,1055 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Video Capture Subdev for Freescale i.MX5/6 SOC
+ *
+ * Copyright (c) 2012-2016 Mentor Graphics Inc.
+ */
+#include <linux/delay.h>
+#include <linux/fs.h>
+#include <linux/module.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/platform_device.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/timer.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-fwnode.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-mc.h>
+#include <media/v4l2-subdev.h>
+#include <media/videobuf2-dma-contig.h>
+#include <video/imx-ipu-v3.h>
+#include <media/imx.h>
+#include "imx-media.h"
+
+#define IMX_CAPTURE_NAME "imx-capture"
+
+struct capture_priv {
+ struct imx_media_dev *md; /* Media device */
+ struct device *dev; /* Physical device */
+
+ struct imx_media_video_dev vdev; /* Video device */
+ struct media_pad vdev_pad; /* Video device pad */
+
+ struct v4l2_subdev *src_sd; /* Source subdev */
+ int src_sd_pad; /* Source subdev pad */
+
+ struct mutex mutex; /* Protect vdev operations */
+
+ struct vb2_queue q; /* The videobuf2 queue */
+ struct list_head ready_q; /* List of queued buffers */
+ spinlock_t q_lock; /* Protect ready_q */
+
+ struct v4l2_ctrl_handler ctrl_hdlr; /* Controls inherited from subdevs */
+
+ bool legacy_api; /* Use the legacy (pre-MC) API */
+};
+
+#define to_capture_priv(v) container_of(v, struct capture_priv, vdev)
+
+/* In bytes, per queue */
+#define VID_MEM_LIMIT SZ_64M
+
+/* -----------------------------------------------------------------------------
+ * MC-Centric Video IOCTLs
+ */
+
+static const struct imx_media_pixfmt *capture_find_format(u32 code, u32 fourcc)
+{
+ const struct imx_media_pixfmt *cc;
+
+ cc = imx_media_find_ipu_format(code, PIXFMT_SEL_YUV_RGB);
+ if (cc) {
+ enum imx_pixfmt_sel fmt_sel = cc->cs == IPUV3_COLORSPACE_YUV
+ ? PIXFMT_SEL_YUV : PIXFMT_SEL_RGB;
+
+ cc = imx_media_find_pixel_format(fourcc, fmt_sel);
+ if (!cc) {
+ imx_media_enum_pixel_formats(&fourcc, 0, fmt_sel, 0);
+ cc = imx_media_find_pixel_format(fourcc, fmt_sel);
+ }
+
+ return cc;
+ }
+
+ return imx_media_find_mbus_format(code, PIXFMT_SEL_ANY);
+}
+
+static int capture_querycap(struct file *file, void *fh,
+ struct v4l2_capability *cap)
+{
+ struct capture_priv *priv = video_drvdata(file);
+
+ strscpy(cap->driver, IMX_CAPTURE_NAME, sizeof(cap->driver));
+ strscpy(cap->card, IMX_CAPTURE_NAME, sizeof(cap->card));
+ snprintf(cap->bus_info, sizeof(cap->bus_info),
+ "platform:%s", dev_name(priv->dev));
+
+ return 0;
+}
+
+static int capture_enum_fmt_vid_cap(struct file *file, void *fh,
+ struct v4l2_fmtdesc *f)
+{
+ return imx_media_enum_pixel_formats(&f->pixelformat, f->index,
+ PIXFMT_SEL_ANY, f->mbus_code);
+}
+
+static int capture_enum_framesizes(struct file *file, void *fh,
+ struct v4l2_frmsizeenum *fsize)
+{
+ const struct imx_media_pixfmt *cc;
+
+ if (fsize->index > 0)
+ return -EINVAL;
+
+ cc = imx_media_find_pixel_format(fsize->pixel_format, PIXFMT_SEL_ANY);
+ if (!cc)
+ return -EINVAL;
+
+ /*
+ * TODO: The constraints are hardware-specific and may depend on the
+ * pixel format. This should come from the driver using
+ * imx_media_capture.
+ */
+ fsize->type = V4L2_FRMSIZE_TYPE_CONTINUOUS;
+ fsize->stepwise.min_width = 1;
+ fsize->stepwise.max_width = 65535;
+ fsize->stepwise.min_height = 1;
+ fsize->stepwise.max_height = 65535;
+ fsize->stepwise.step_width = 1;
+ fsize->stepwise.step_height = 1;
+
+ return 0;
+}
+
+static int capture_g_fmt_vid_cap(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ struct capture_priv *priv = video_drvdata(file);
+
+ f->fmt.pix = priv->vdev.fmt;
+
+ return 0;
+}
+
+static const struct imx_media_pixfmt *
+__capture_try_fmt(struct v4l2_pix_format *pixfmt, struct v4l2_rect *compose)
+{
+ struct v4l2_mbus_framefmt fmt_src;
+ const struct imx_media_pixfmt *cc;
+
+ /*
+ * Find the pixel format, default to the first supported format if not
+ * found.
+ */
+ cc = imx_media_find_pixel_format(pixfmt->pixelformat, PIXFMT_SEL_ANY);
+ if (!cc) {
+ imx_media_enum_pixel_formats(&pixfmt->pixelformat, 0,
+ PIXFMT_SEL_ANY, 0);
+ cc = imx_media_find_pixel_format(pixfmt->pixelformat,
+ PIXFMT_SEL_ANY);
+ }
+
+ /* Allow IDMAC interweave but enforce field order from source. */
+ if (V4L2_FIELD_IS_INTERLACED(pixfmt->field)) {
+ switch (pixfmt->field) {
+ case V4L2_FIELD_SEQ_TB:
+ pixfmt->field = V4L2_FIELD_INTERLACED_TB;
+ break;
+ case V4L2_FIELD_SEQ_BT:
+ pixfmt->field = V4L2_FIELD_INTERLACED_BT;
+ break;
+ default:
+ break;
+ }
+ }
+
+ v4l2_fill_mbus_format(&fmt_src, pixfmt, 0);
+ imx_media_mbus_fmt_to_pix_fmt(pixfmt, &fmt_src, cc);
+
+ if (compose) {
+ compose->width = fmt_src.width;
+ compose->height = fmt_src.height;
+ }
+
+ return cc;
+}
+
+static int capture_try_fmt_vid_cap(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ __capture_try_fmt(&f->fmt.pix, NULL);
+ return 0;
+}
+
+static int capture_s_fmt_vid_cap(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ struct capture_priv *priv = video_drvdata(file);
+ const struct imx_media_pixfmt *cc;
+
+ if (vb2_is_busy(&priv->q)) {
+ dev_err(priv->dev, "%s queue busy\n", __func__);
+ return -EBUSY;
+ }
+
+ cc = __capture_try_fmt(&f->fmt.pix, &priv->vdev.compose);
+
+ priv->vdev.cc = cc;
+ priv->vdev.fmt = f->fmt.pix;
+
+ return 0;
+}
+
+static int capture_g_selection(struct file *file, void *fh,
+ struct v4l2_selection *s)
+{
+ struct capture_priv *priv = video_drvdata(file);
+
+ switch (s->target) {
+ case V4L2_SEL_TGT_COMPOSE:
+ case V4L2_SEL_TGT_COMPOSE_DEFAULT:
+ case V4L2_SEL_TGT_COMPOSE_BOUNDS:
+ /* The compose rectangle is fixed to the source format. */
+ s->r = priv->vdev.compose;
+ break;
+ case V4L2_SEL_TGT_COMPOSE_PADDED:
+ /*
+ * The hardware writes with a configurable but fixed DMA burst
+ * size. If the source format width is not burst size aligned,
+ * the written frame contains padding to the right.
+ */
+ s->r.left = 0;
+ s->r.top = 0;
+ s->r.width = priv->vdev.fmt.width;
+ s->r.height = priv->vdev.fmt.height;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int capture_subscribe_event(struct v4l2_fh *fh,
+ const struct v4l2_event_subscription *sub)
+{
+ switch (sub->type) {
+ case V4L2_EVENT_IMX_FRAME_INTERVAL_ERROR:
+ return v4l2_event_subscribe(fh, sub, 0, NULL);
+ default:
+ return -EINVAL;
+ }
+}
+
+static const struct v4l2_ioctl_ops capture_ioctl_ops = {
+ .vidioc_querycap = capture_querycap,
+
+ .vidioc_enum_fmt_vid_cap = capture_enum_fmt_vid_cap,
+ .vidioc_enum_framesizes = capture_enum_framesizes,
+
+ .vidioc_g_fmt_vid_cap = capture_g_fmt_vid_cap,
+ .vidioc_try_fmt_vid_cap = capture_try_fmt_vid_cap,
+ .vidioc_s_fmt_vid_cap = capture_s_fmt_vid_cap,
+
+ .vidioc_g_selection = capture_g_selection,
+
+ .vidioc_reqbufs = vb2_ioctl_reqbufs,
+ .vidioc_create_bufs = vb2_ioctl_create_bufs,
+ .vidioc_prepare_buf = vb2_ioctl_prepare_buf,
+ .vidioc_querybuf = vb2_ioctl_querybuf,
+ .vidioc_qbuf = vb2_ioctl_qbuf,
+ .vidioc_dqbuf = vb2_ioctl_dqbuf,
+ .vidioc_expbuf = vb2_ioctl_expbuf,
+ .vidioc_streamon = vb2_ioctl_streamon,
+ .vidioc_streamoff = vb2_ioctl_streamoff,
+
+ .vidioc_subscribe_event = capture_subscribe_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
+};
+
+/* -----------------------------------------------------------------------------
+ * Legacy Video IOCTLs
+ */
+
+static int capture_legacy_enum_framesizes(struct file *file, void *fh,
+ struct v4l2_frmsizeenum *fsize)
+{
+ struct capture_priv *priv = video_drvdata(file);
+ const struct imx_media_pixfmt *cc;
+ struct v4l2_subdev_frame_size_enum fse = {
+ .index = fsize->index,
+ .pad = priv->src_sd_pad,
+ .which = V4L2_SUBDEV_FORMAT_ACTIVE,
+ };
+ int ret;
+
+ cc = imx_media_find_pixel_format(fsize->pixel_format, PIXFMT_SEL_ANY);
+ if (!cc)
+ return -EINVAL;
+
+ fse.code = cc->codes ? cc->codes[0] : 0;
+
+ ret = v4l2_subdev_call(priv->src_sd, pad, enum_frame_size, NULL, &fse);
+ if (ret)
+ return ret;
+
+ if (fse.min_width == fse.max_width &&
+ fse.min_height == fse.max_height) {
+ fsize->type = V4L2_FRMSIZE_TYPE_DISCRETE;
+ fsize->discrete.width = fse.min_width;
+ fsize->discrete.height = fse.min_height;
+ } else {
+ fsize->type = V4L2_FRMSIZE_TYPE_CONTINUOUS;
+ fsize->stepwise.min_width = fse.min_width;
+ fsize->stepwise.max_width = fse.max_width;
+ fsize->stepwise.min_height = fse.min_height;
+ fsize->stepwise.max_height = fse.max_height;
+ fsize->stepwise.step_width = 1;
+ fsize->stepwise.step_height = 1;
+ }
+
+ return 0;
+}
+
+static int capture_legacy_enum_frameintervals(struct file *file, void *fh,
+ struct v4l2_frmivalenum *fival)
+{
+ struct capture_priv *priv = video_drvdata(file);
+ const struct imx_media_pixfmt *cc;
+ struct v4l2_subdev_frame_interval_enum fie = {
+ .index = fival->index,
+ .pad = priv->src_sd_pad,
+ .width = fival->width,
+ .height = fival->height,
+ .which = V4L2_SUBDEV_FORMAT_ACTIVE,
+ };
+ int ret;
+
+ cc = imx_media_find_pixel_format(fival->pixel_format, PIXFMT_SEL_ANY);
+ if (!cc)
+ return -EINVAL;
+
+ fie.code = cc->codes ? cc->codes[0] : 0;
+
+ ret = v4l2_subdev_call(priv->src_sd, pad, enum_frame_interval,
+ NULL, &fie);
+ if (ret)
+ return ret;
+
+ fival->type = V4L2_FRMIVAL_TYPE_DISCRETE;
+ fival->discrete = fie.interval;
+
+ return 0;
+}
+
+static int capture_legacy_enum_fmt_vid_cap(struct file *file, void *fh,
+ struct v4l2_fmtdesc *f)
+{
+ struct capture_priv *priv = video_drvdata(file);
+ const struct imx_media_pixfmt *cc_src;
+ struct v4l2_subdev_format fmt_src = {
+ .pad = priv->src_sd_pad,
+ .which = V4L2_SUBDEV_FORMAT_ACTIVE,
+ };
+ u32 fourcc;
+ int ret;
+
+ ret = v4l2_subdev_call(priv->src_sd, pad, get_fmt, NULL, &fmt_src);
+ if (ret) {
+ dev_err(priv->dev, "failed to get src_sd format\n");
+ return ret;
+ }
+
+ cc_src = imx_media_find_ipu_format(fmt_src.format.code,
+ PIXFMT_SEL_YUV_RGB);
+ if (cc_src) {
+ enum imx_pixfmt_sel fmt_sel =
+ (cc_src->cs == IPUV3_COLORSPACE_YUV) ?
+ PIXFMT_SEL_YUV : PIXFMT_SEL_RGB;
+
+ ret = imx_media_enum_pixel_formats(&fourcc, f->index, fmt_sel,
+ 0);
+ if (ret)
+ return ret;
+ } else {
+ cc_src = imx_media_find_mbus_format(fmt_src.format.code,
+ PIXFMT_SEL_ANY);
+ if (WARN_ON(!cc_src))
+ return -EINVAL;
+
+ if (f->index != 0)
+ return -EINVAL;
+ fourcc = cc_src->fourcc;
+ }
+
+ f->pixelformat = fourcc;
+
+ return 0;
+}
+
+static const struct imx_media_pixfmt *
+__capture_legacy_try_fmt(struct capture_priv *priv,
+ struct v4l2_subdev_format *fmt_src,
+ struct v4l2_pix_format *pixfmt)
+{
+ const struct imx_media_pixfmt *cc;
+
+ cc = capture_find_format(fmt_src->format.code, pixfmt->pixelformat);
+ if (WARN_ON(!cc))
+ return NULL;
+
+ /* allow IDMAC interweave but enforce field order from source */
+ if (V4L2_FIELD_IS_INTERLACED(pixfmt->field)) {
+ switch (fmt_src->format.field) {
+ case V4L2_FIELD_SEQ_TB:
+ fmt_src->format.field = V4L2_FIELD_INTERLACED_TB;
+ break;
+ case V4L2_FIELD_SEQ_BT:
+ fmt_src->format.field = V4L2_FIELD_INTERLACED_BT;
+ break;
+ default:
+ break;
+ }
+ }
+
+ imx_media_mbus_fmt_to_pix_fmt(pixfmt, &fmt_src->format, cc);
+
+ return cc;
+}
+
+static int capture_legacy_try_fmt_vid_cap(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ struct capture_priv *priv = video_drvdata(file);
+ struct v4l2_subdev_format fmt_src = {
+ .pad = priv->src_sd_pad,
+ .which = V4L2_SUBDEV_FORMAT_ACTIVE,
+ };
+ int ret;
+
+ ret = v4l2_subdev_call(priv->src_sd, pad, get_fmt, NULL, &fmt_src);
+ if (ret)
+ return ret;
+
+ if (!__capture_legacy_try_fmt(priv, &fmt_src, &f->fmt.pix))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int capture_legacy_s_fmt_vid_cap(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ struct capture_priv *priv = video_drvdata(file);
+ struct v4l2_subdev_format fmt_src = {
+ .pad = priv->src_sd_pad,
+ .which = V4L2_SUBDEV_FORMAT_ACTIVE,
+ };
+ const struct imx_media_pixfmt *cc;
+ int ret;
+
+ if (vb2_is_busy(&priv->q)) {
+ dev_err(priv->dev, "%s queue busy\n", __func__);
+ return -EBUSY;
+ }
+
+ ret = v4l2_subdev_call(priv->src_sd, pad, get_fmt, NULL, &fmt_src);
+ if (ret)
+ return ret;
+
+ cc = __capture_legacy_try_fmt(priv, &fmt_src, &f->fmt.pix);
+ if (!cc)
+ return -EINVAL;
+
+ priv->vdev.cc = cc;
+ priv->vdev.fmt = f->fmt.pix;
+ priv->vdev.compose.width = fmt_src.format.width;
+ priv->vdev.compose.height = fmt_src.format.height;
+
+ return 0;
+}
+
+static int capture_legacy_querystd(struct file *file, void *fh,
+ v4l2_std_id *std)
+{
+ struct capture_priv *priv = video_drvdata(file);
+
+ return v4l2_subdev_call(priv->src_sd, video, querystd, std);
+}
+
+static int capture_legacy_g_std(struct file *file, void *fh, v4l2_std_id *std)
+{
+ struct capture_priv *priv = video_drvdata(file);
+
+ return v4l2_subdev_call(priv->src_sd, video, g_std, std);
+}
+
+static int capture_legacy_s_std(struct file *file, void *fh, v4l2_std_id std)
+{
+ struct capture_priv *priv = video_drvdata(file);
+
+ if (vb2_is_busy(&priv->q))
+ return -EBUSY;
+
+ return v4l2_subdev_call(priv->src_sd, video, s_std, std);
+}
+
+static int capture_legacy_g_parm(struct file *file, void *fh,
+ struct v4l2_streamparm *a)
+{
+ struct capture_priv *priv = video_drvdata(file);
+ struct v4l2_subdev_frame_interval fi = {
+ .pad = priv->src_sd_pad,
+ };
+ int ret;
+
+ if (a->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+
+ ret = v4l2_subdev_call_state_active(priv->src_sd, pad,
+ get_frame_interval, &fi);
+ if (ret < 0)
+ return ret;
+
+ a->parm.capture.capability = V4L2_CAP_TIMEPERFRAME;
+ a->parm.capture.timeperframe = fi.interval;
+
+ return 0;
+}
+
+static int capture_legacy_s_parm(struct file *file, void *fh,
+ struct v4l2_streamparm *a)
+{
+ struct capture_priv *priv = video_drvdata(file);
+ struct v4l2_subdev_frame_interval fi = {
+ .pad = priv->src_sd_pad,
+ };
+ int ret;
+
+ if (a->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+
+ fi.interval = a->parm.capture.timeperframe;
+ ret = v4l2_subdev_call_state_active(priv->src_sd, pad,
+ set_frame_interval, &fi);
+ if (ret < 0)
+ return ret;
+
+ a->parm.capture.capability = V4L2_CAP_TIMEPERFRAME;
+ a->parm.capture.timeperframe = fi.interval;
+
+ return 0;
+}
+
+static int capture_legacy_subscribe_event(struct v4l2_fh *fh,
+ const struct v4l2_event_subscription *sub)
+{
+ switch (sub->type) {
+ case V4L2_EVENT_IMX_FRAME_INTERVAL_ERROR:
+ return v4l2_event_subscribe(fh, sub, 0, NULL);
+ case V4L2_EVENT_SOURCE_CHANGE:
+ return v4l2_src_change_event_subscribe(fh, sub);
+ case V4L2_EVENT_CTRL:
+ return v4l2_ctrl_subscribe_event(fh, sub);
+ default:
+ return -EINVAL;
+ }
+}
+
+static const struct v4l2_ioctl_ops capture_legacy_ioctl_ops = {
+ .vidioc_querycap = capture_querycap,
+
+ .vidioc_enum_framesizes = capture_legacy_enum_framesizes,
+ .vidioc_enum_frameintervals = capture_legacy_enum_frameintervals,
+
+ .vidioc_enum_fmt_vid_cap = capture_legacy_enum_fmt_vid_cap,
+ .vidioc_g_fmt_vid_cap = capture_g_fmt_vid_cap,
+ .vidioc_try_fmt_vid_cap = capture_legacy_try_fmt_vid_cap,
+ .vidioc_s_fmt_vid_cap = capture_legacy_s_fmt_vid_cap,
+
+ .vidioc_querystd = capture_legacy_querystd,
+ .vidioc_g_std = capture_legacy_g_std,
+ .vidioc_s_std = capture_legacy_s_std,
+
+ .vidioc_g_selection = capture_g_selection,
+
+ .vidioc_g_parm = capture_legacy_g_parm,
+ .vidioc_s_parm = capture_legacy_s_parm,
+
+ .vidioc_reqbufs = vb2_ioctl_reqbufs,
+ .vidioc_create_bufs = vb2_ioctl_create_bufs,
+ .vidioc_prepare_buf = vb2_ioctl_prepare_buf,
+ .vidioc_querybuf = vb2_ioctl_querybuf,
+ .vidioc_qbuf = vb2_ioctl_qbuf,
+ .vidioc_dqbuf = vb2_ioctl_dqbuf,
+ .vidioc_expbuf = vb2_ioctl_expbuf,
+ .vidioc_streamon = vb2_ioctl_streamon,
+ .vidioc_streamoff = vb2_ioctl_streamoff,
+
+ .vidioc_subscribe_event = capture_legacy_subscribe_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
+};
+
+/* -----------------------------------------------------------------------------
+ * Queue Operations
+ */
+
+static int capture_queue_setup(struct vb2_queue *vq,
+ unsigned int *nbuffers,
+ unsigned int *nplanes,
+ unsigned int sizes[],
+ struct device *alloc_devs[])
+{
+ struct capture_priv *priv = vb2_get_drv_priv(vq);
+ struct v4l2_pix_format *pix = &priv->vdev.fmt;
+ unsigned int q_num_bufs = vb2_get_num_buffers(vq);
+ unsigned int count = *nbuffers;
+
+ if (vq->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+
+ if (*nplanes) {
+ if (*nplanes != 1 || sizes[0] < pix->sizeimage)
+ return -EINVAL;
+ count += q_num_bufs;
+ }
+
+ count = min_t(__u32, VID_MEM_LIMIT / pix->sizeimage, count);
+
+ if (*nplanes)
+ *nbuffers = (count < q_num_bufs) ? 0 :
+ count - q_num_bufs;
+ else
+ *nbuffers = count;
+
+ *nplanes = 1;
+ sizes[0] = pix->sizeimage;
+
+ return 0;
+}
+
+static int capture_buf_init(struct vb2_buffer *vb)
+{
+ struct imx_media_buffer *buf = to_imx_media_vb(vb);
+
+ INIT_LIST_HEAD(&buf->list);
+
+ return 0;
+}
+
+static int capture_buf_prepare(struct vb2_buffer *vb)
+{
+ struct vb2_queue *vq = vb->vb2_queue;
+ struct capture_priv *priv = vb2_get_drv_priv(vq);
+ struct v4l2_pix_format *pix = &priv->vdev.fmt;
+
+ if (vb2_plane_size(vb, 0) < pix->sizeimage) {
+ dev_err(priv->dev,
+ "data will not fit into plane (%lu < %lu)\n",
+ vb2_plane_size(vb, 0), (long)pix->sizeimage);
+ return -EINVAL;
+ }
+
+ vb2_set_plane_payload(vb, 0, pix->sizeimage);
+
+ return 0;
+}
+
+static void capture_buf_queue(struct vb2_buffer *vb)
+{
+ struct capture_priv *priv = vb2_get_drv_priv(vb->vb2_queue);
+ struct imx_media_buffer *buf = to_imx_media_vb(vb);
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->q_lock, flags);
+
+ list_add_tail(&buf->list, &priv->ready_q);
+
+ spin_unlock_irqrestore(&priv->q_lock, flags);
+}
+
+static int capture_validate_fmt(struct capture_priv *priv)
+{
+ struct v4l2_subdev_format fmt_src = {
+ .pad = priv->src_sd_pad,
+ .which = V4L2_SUBDEV_FORMAT_ACTIVE,
+ };
+ const struct imx_media_pixfmt *cc;
+ int ret;
+
+ /* Retrieve the media bus format on the source subdev. */
+ ret = v4l2_subdev_call(priv->src_sd, pad, get_fmt, NULL, &fmt_src);
+ if (ret)
+ return ret;
+
+ /*
+ * Verify that the media bus size matches the size set on the video
+ * node. It is sufficient to check the compose rectangle size without
+ * checking the rounded size from vdev.fmt, as the rounded size is
+ * derived directly from the compose rectangle size, and will thus
+ * always match if the compose rectangle matches.
+ */
+ if (priv->vdev.compose.width != fmt_src.format.width ||
+ priv->vdev.compose.height != fmt_src.format.height)
+ return -EPIPE;
+
+ /*
+ * Verify that the media bus code is compatible with the pixel format
+ * set on the video node.
+ */
+ cc = capture_find_format(fmt_src.format.code, 0);
+ if (!cc || priv->vdev.cc->cs != cc->cs)
+ return -EPIPE;
+
+ return 0;
+}
+
+static int capture_start_streaming(struct vb2_queue *vq, unsigned int count)
+{
+ struct capture_priv *priv = vb2_get_drv_priv(vq);
+ struct imx_media_buffer *buf, *tmp;
+ unsigned long flags;
+ int ret;
+
+ ret = capture_validate_fmt(priv);
+ if (ret) {
+ dev_err(priv->dev, "capture format not valid\n");
+ goto return_bufs;
+ }
+
+ ret = imx_media_pipeline_set_stream(priv->md, &priv->src_sd->entity,
+ true);
+ if (ret) {
+ dev_err(priv->dev, "pipeline start failed with %d\n", ret);
+ goto return_bufs;
+ }
+
+ return 0;
+
+return_bufs:
+ spin_lock_irqsave(&priv->q_lock, flags);
+ list_for_each_entry_safe(buf, tmp, &priv->ready_q, list) {
+ list_del(&buf->list);
+ vb2_buffer_done(&buf->vbuf.vb2_buf, VB2_BUF_STATE_QUEUED);
+ }
+ spin_unlock_irqrestore(&priv->q_lock, flags);
+ return ret;
+}
+
+static void capture_stop_streaming(struct vb2_queue *vq)
+{
+ struct capture_priv *priv = vb2_get_drv_priv(vq);
+ struct imx_media_buffer *frame;
+ struct imx_media_buffer *tmp;
+ unsigned long flags;
+ int ret;
+
+ ret = imx_media_pipeline_set_stream(priv->md, &priv->src_sd->entity,
+ false);
+ if (ret)
+ dev_warn(priv->dev, "pipeline stop failed with %d\n", ret);
+
+ /* release all active buffers */
+ spin_lock_irqsave(&priv->q_lock, flags);
+ list_for_each_entry_safe(frame, tmp, &priv->ready_q, list) {
+ list_del(&frame->list);
+ vb2_buffer_done(&frame->vbuf.vb2_buf, VB2_BUF_STATE_ERROR);
+ }
+ spin_unlock_irqrestore(&priv->q_lock, flags);
+}
+
+static const struct vb2_ops capture_qops = {
+ .queue_setup = capture_queue_setup,
+ .buf_init = capture_buf_init,
+ .buf_prepare = capture_buf_prepare,
+ .buf_queue = capture_buf_queue,
+ .start_streaming = capture_start_streaming,
+ .stop_streaming = capture_stop_streaming,
+};
+
+/* -----------------------------------------------------------------------------
+ * File Operations
+ */
+
+static int capture_open(struct file *file)
+{
+ struct capture_priv *priv = video_drvdata(file);
+ struct video_device *vfd = priv->vdev.vfd;
+ int ret;
+
+ if (mutex_lock_interruptible(&priv->mutex))
+ return -ERESTARTSYS;
+
+ ret = v4l2_fh_open(file);
+ if (ret) {
+ dev_err(priv->dev, "v4l2_fh_open failed\n");
+ goto out;
+ }
+
+ ret = v4l2_pipeline_pm_get(&vfd->entity);
+ if (ret)
+ v4l2_fh_release(file);
+
+out:
+ mutex_unlock(&priv->mutex);
+ return ret;
+}
+
+static int capture_release(struct file *file)
+{
+ struct capture_priv *priv = video_drvdata(file);
+ struct video_device *vfd = priv->vdev.vfd;
+ struct vb2_queue *vq = &priv->q;
+
+ mutex_lock(&priv->mutex);
+
+ if (file->private_data == vq->owner) {
+ vb2_queue_release(vq);
+ vq->owner = NULL;
+ }
+
+ v4l2_pipeline_pm_put(&vfd->entity);
+
+ v4l2_fh_release(file);
+ mutex_unlock(&priv->mutex);
+ return 0;
+}
+
+static const struct v4l2_file_operations capture_fops = {
+ .owner = THIS_MODULE,
+ .open = capture_open,
+ .release = capture_release,
+ .poll = vb2_fop_poll,
+ .unlocked_ioctl = video_ioctl2,
+ .mmap = vb2_fop_mmap,
+};
+
+/* -----------------------------------------------------------------------------
+ * Public API
+ */
+
+struct imx_media_buffer *
+imx_media_capture_device_next_buf(struct imx_media_video_dev *vdev)
+{
+ struct capture_priv *priv = to_capture_priv(vdev);
+ struct imx_media_buffer *buf = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->q_lock, flags);
+
+ /* get next queued buffer */
+ if (!list_empty(&priv->ready_q)) {
+ buf = list_entry(priv->ready_q.next, struct imx_media_buffer,
+ list);
+ list_del(&buf->list);
+ }
+
+ spin_unlock_irqrestore(&priv->q_lock, flags);
+
+ return buf;
+}
+EXPORT_SYMBOL_GPL(imx_media_capture_device_next_buf);
+
+void imx_media_capture_device_error(struct imx_media_video_dev *vdev)
+{
+ struct capture_priv *priv = to_capture_priv(vdev);
+ struct vb2_queue *vq = &priv->q;
+ unsigned long flags;
+
+ if (!vb2_is_streaming(vq))
+ return;
+
+ spin_lock_irqsave(&priv->q_lock, flags);
+ vb2_queue_error(vq);
+ spin_unlock_irqrestore(&priv->q_lock, flags);
+}
+EXPORT_SYMBOL_GPL(imx_media_capture_device_error);
+
+static int capture_init_format(struct capture_priv *priv)
+{
+ struct v4l2_subdev_format fmt_src = {
+ .pad = priv->src_sd_pad,
+ .which = V4L2_SUBDEV_FORMAT_ACTIVE,
+ };
+ struct imx_media_video_dev *vdev = &priv->vdev;
+ int ret;
+
+ if (priv->legacy_api) {
+ ret = v4l2_subdev_call(priv->src_sd, pad, get_fmt, NULL,
+ &fmt_src);
+ if (ret) {
+ dev_err(priv->dev, "failed to get source format\n");
+ return ret;
+ }
+ } else {
+ fmt_src.format.code = MEDIA_BUS_FMT_UYVY8_2X8;
+ fmt_src.format.width = IMX_MEDIA_DEF_PIX_WIDTH;
+ fmt_src.format.height = IMX_MEDIA_DEF_PIX_HEIGHT;
+ }
+
+ imx_media_mbus_fmt_to_pix_fmt(&vdev->fmt, &fmt_src.format, NULL);
+ vdev->compose.width = fmt_src.format.width;
+ vdev->compose.height = fmt_src.format.height;
+
+ vdev->cc = imx_media_find_pixel_format(vdev->fmt.pixelformat,
+ PIXFMT_SEL_ANY);
+
+ return 0;
+}
+
+int imx_media_capture_device_register(struct imx_media_video_dev *vdev,
+ u32 link_flags)
+{
+ struct capture_priv *priv = to_capture_priv(vdev);
+ struct v4l2_subdev *sd = priv->src_sd;
+ struct v4l2_device *v4l2_dev = sd->v4l2_dev;
+ struct video_device *vfd = vdev->vfd;
+ int ret;
+
+ /* get media device */
+ priv->md = container_of(v4l2_dev->mdev, struct imx_media_dev, md);
+
+ vfd->v4l2_dev = v4l2_dev;
+
+ /* Initialize the default format and compose rectangle. */
+ ret = capture_init_format(priv);
+ if (ret < 0)
+ return ret;
+
+ /* Register the video device. */
+ ret = video_register_device(vfd, VFL_TYPE_VIDEO, -1);
+ if (ret) {
+ dev_err(priv->dev, "Failed to register video device\n");
+ return ret;
+ }
+
+ dev_info(priv->dev, "Registered %s as /dev/%s\n", vfd->name,
+ video_device_node_name(vfd));
+
+ /* Create the link from the src_sd devnode pad to device node. */
+ if (link_flags & MEDIA_LNK_FL_IMMUTABLE)
+ link_flags |= MEDIA_LNK_FL_ENABLED;
+ ret = media_create_pad_link(&sd->entity, priv->src_sd_pad,
+ &vfd->entity, 0, link_flags);
+ if (ret) {
+ dev_err(priv->dev, "failed to create link to device node\n");
+ video_unregister_device(vfd);
+ return ret;
+ }
+
+ /* Add vdev to the video devices list. */
+ imx_media_add_video_device(priv->md, vdev);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(imx_media_capture_device_register);
+
+void imx_media_capture_device_unregister(struct imx_media_video_dev *vdev)
+{
+ struct capture_priv *priv = to_capture_priv(vdev);
+ struct video_device *vfd = priv->vdev.vfd;
+
+ media_entity_cleanup(&vfd->entity);
+ video_unregister_device(vfd);
+}
+EXPORT_SYMBOL_GPL(imx_media_capture_device_unregister);
+
+struct imx_media_video_dev *
+imx_media_capture_device_init(struct device *dev, struct v4l2_subdev *src_sd,
+ int pad, bool legacy_api)
+{
+ struct capture_priv *priv;
+ struct video_device *vfd;
+ struct vb2_queue *vq;
+ int ret;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return ERR_PTR(-ENOMEM);
+
+ priv->src_sd = src_sd;
+ priv->src_sd_pad = pad;
+ priv->dev = dev;
+ priv->legacy_api = legacy_api;
+
+ mutex_init(&priv->mutex);
+ INIT_LIST_HEAD(&priv->ready_q);
+ spin_lock_init(&priv->q_lock);
+
+ /* Allocate and initialize the video device. */
+ vfd = video_device_alloc();
+ if (!vfd)
+ return ERR_PTR(-ENOMEM);
+
+ vfd->fops = &capture_fops;
+ vfd->ioctl_ops = legacy_api ? &capture_legacy_ioctl_ops
+ : &capture_ioctl_ops;
+ vfd->minor = -1;
+ vfd->release = video_device_release;
+ vfd->vfl_dir = VFL_DIR_RX;
+ vfd->tvnorms = V4L2_STD_NTSC | V4L2_STD_PAL | V4L2_STD_SECAM;
+ vfd->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING
+ | (!legacy_api ? V4L2_CAP_IO_MC : 0);
+ vfd->lock = &priv->mutex;
+ vfd->queue = &priv->q;
+
+ snprintf(vfd->name, sizeof(vfd->name), "%s capture", src_sd->name);
+
+ video_set_drvdata(vfd, priv);
+ priv->vdev.vfd = vfd;
+ INIT_LIST_HEAD(&priv->vdev.list);
+
+ /* Initialize the video device pad. */
+ priv->vdev_pad.flags = MEDIA_PAD_FL_SINK;
+ ret = media_entity_pads_init(&vfd->entity, 1, &priv->vdev_pad);
+ if (ret) {
+ video_device_release(vfd);
+ return ERR_PTR(ret);
+ }
+
+ /* Initialize the vb2 queue. */
+ vq = &priv->q;
+ vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ vq->io_modes = VB2_MMAP | VB2_DMABUF;
+ vq->drv_priv = priv;
+ vq->buf_struct_size = sizeof(struct imx_media_buffer);
+ vq->ops = &capture_qops;
+ vq->mem_ops = &vb2_dma_contig_memops;
+ vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+ vq->lock = &priv->mutex;
+ vq->min_queued_buffers = 2;
+ vq->dev = priv->dev;
+
+ ret = vb2_queue_init(vq);
+ if (ret) {
+ dev_err(priv->dev, "vb2_queue_init failed\n");
+ video_device_release(vfd);
+ return ERR_PTR(ret);
+ }
+
+ if (legacy_api) {
+ /* Initialize the control handler. */
+ v4l2_ctrl_handler_init(&priv->ctrl_hdlr, 0);
+ vfd->ctrl_handler = &priv->ctrl_hdlr;
+ }
+
+ return &priv->vdev;
+}
+EXPORT_SYMBOL_GPL(imx_media_capture_device_init);
+
+void imx_media_capture_device_remove(struct imx_media_video_dev *vdev)
+{
+ struct capture_priv *priv = to_capture_priv(vdev);
+
+ v4l2_ctrl_handler_free(&priv->ctrl_hdlr);
+}
+EXPORT_SYMBOL_GPL(imx_media_capture_device_remove);
+
+MODULE_DESCRIPTION("i.MX5/6 v4l2 video capture interface driver");
+MODULE_AUTHOR("Steve Longerbeam <steve_longerbeam@mentor.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/media/imx/imx-media-csc-scaler.c b/drivers/staging/media/imx/imx-media-csc-scaler.c
new file mode 100644
index 000000000000..0a27330f9790
--- /dev/null
+++ b/drivers/staging/media/imx/imx-media-csc-scaler.c
@@ -0,0 +1,925 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * i.MX IPUv3 IC PP mem2mem CSC/Scaler driver
+ *
+ * Copyright (C) 2011 Pengutronix, Sascha Hauer
+ * Copyright (C) 2018 Pengutronix, Philipp Zabel
+ */
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/fs.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <video/imx-ipu-v3.h>
+#include <video/imx-ipu-image-convert.h>
+
+#include <media/media-device.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-mem2mem.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-ioctl.h>
+#include <media/videobuf2-dma-contig.h>
+
+#include "imx-media.h"
+
+#define IMX_CSC_SCALER_NAME "imx-csc-scaler"
+
+enum {
+ V4L2_M2M_SRC = 0,
+ V4L2_M2M_DST = 1,
+};
+
+struct ipu_csc_scaler_priv {
+ struct imx_media_video_dev vdev;
+
+ struct v4l2_m2m_dev *m2m_dev;
+ struct device *dev;
+
+ struct imx_media_dev *md;
+
+ struct mutex mutex; /* mem2mem device mutex */
+};
+
+#define vdev_to_priv(v) container_of(v, struct ipu_csc_scaler_priv, vdev)
+
+/* Per-queue, driver-specific private data */
+struct ipu_csc_scaler_q_data {
+ struct v4l2_pix_format cur_fmt;
+ struct v4l2_rect rect;
+};
+
+struct ipu_csc_scaler_ctx {
+ struct ipu_csc_scaler_priv *priv;
+
+ struct v4l2_fh fh;
+ struct ipu_csc_scaler_q_data q_data[2];
+ struct ipu_image_convert_ctx *icc;
+
+ struct v4l2_ctrl_handler ctrl_hdlr;
+ int rotate;
+ bool hflip;
+ bool vflip;
+ enum ipu_rotate_mode rot_mode;
+ unsigned int sequence;
+};
+
+static inline struct ipu_csc_scaler_ctx *file_to_ctx(struct file *filp)
+{
+ return container_of(file_to_v4l2_fh(filp), struct ipu_csc_scaler_ctx, fh);
+}
+
+static struct ipu_csc_scaler_q_data *get_q_data(struct ipu_csc_scaler_ctx *ctx,
+ enum v4l2_buf_type type)
+{
+ if (V4L2_TYPE_IS_OUTPUT(type))
+ return &ctx->q_data[V4L2_M2M_SRC];
+ else
+ return &ctx->q_data[V4L2_M2M_DST];
+}
+
+/*
+ * mem2mem callbacks
+ */
+
+static void job_abort(void *_ctx)
+{
+ struct ipu_csc_scaler_ctx *ctx = _ctx;
+
+ if (ctx->icc)
+ ipu_image_convert_abort(ctx->icc);
+}
+
+static void ipu_ic_pp_complete(struct ipu_image_convert_run *run, void *_ctx)
+{
+ struct ipu_csc_scaler_ctx *ctx = _ctx;
+ struct ipu_csc_scaler_priv *priv = ctx->priv;
+ struct vb2_v4l2_buffer *src_buf, *dst_buf;
+
+ src_buf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
+ dst_buf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
+
+ v4l2_m2m_buf_copy_metadata(src_buf, dst_buf);
+
+ src_buf->sequence = ctx->sequence++;
+ dst_buf->sequence = src_buf->sequence;
+
+ v4l2_m2m_buf_done(src_buf, run->status ? VB2_BUF_STATE_ERROR :
+ VB2_BUF_STATE_DONE);
+ v4l2_m2m_buf_done(dst_buf, run->status ? VB2_BUF_STATE_ERROR :
+ VB2_BUF_STATE_DONE);
+
+ v4l2_m2m_job_finish(priv->m2m_dev, ctx->fh.m2m_ctx);
+ kfree(run);
+}
+
+static void device_run(void *_ctx)
+{
+ struct ipu_csc_scaler_ctx *ctx = _ctx;
+ struct ipu_csc_scaler_priv *priv = ctx->priv;
+ struct vb2_v4l2_buffer *src_buf, *dst_buf;
+ struct ipu_image_convert_run *run;
+ int ret;
+
+ src_buf = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
+ dst_buf = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
+
+ run = kzalloc(sizeof(*run), GFP_KERNEL);
+ if (!run)
+ goto err;
+
+ run->ctx = ctx->icc;
+ run->in_phys = vb2_dma_contig_plane_dma_addr(&src_buf->vb2_buf, 0);
+ run->out_phys = vb2_dma_contig_plane_dma_addr(&dst_buf->vb2_buf, 0);
+
+ ret = ipu_image_convert_queue(run);
+ if (ret < 0) {
+ v4l2_err(ctx->priv->vdev.vfd->v4l2_dev,
+ "%s: failed to queue: %d\n", __func__, ret);
+ goto err;
+ }
+
+ return;
+
+err:
+ v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
+ v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
+ v4l2_m2m_buf_done(src_buf, VB2_BUF_STATE_ERROR);
+ v4l2_m2m_buf_done(dst_buf, VB2_BUF_STATE_ERROR);
+ v4l2_m2m_job_finish(priv->m2m_dev, ctx->fh.m2m_ctx);
+}
+
+/*
+ * Video ioctls
+ */
+static int ipu_csc_scaler_querycap(struct file *file, void *priv,
+ struct v4l2_capability *cap)
+{
+ strscpy(cap->driver, IMX_CSC_SCALER_NAME, sizeof(cap->driver));
+ strscpy(cap->card, IMX_CSC_SCALER_NAME, sizeof(cap->card));
+ snprintf(cap->bus_info, sizeof(cap->bus_info),
+ "platform:%s", IMX_CSC_SCALER_NAME);
+
+ return 0;
+}
+
+static int ipu_csc_scaler_enum_fmt(struct file *file, void *fh,
+ struct v4l2_fmtdesc *f)
+{
+ u32 fourcc;
+ int ret;
+
+ ret = imx_media_enum_pixel_formats(&fourcc, f->index,
+ PIXFMT_SEL_YUV_RGB, 0);
+ if (ret)
+ return ret;
+
+ f->pixelformat = fourcc;
+
+ return 0;
+}
+
+static int ipu_csc_scaler_g_fmt(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct ipu_csc_scaler_ctx *ctx = file_to_ctx(file);
+ struct ipu_csc_scaler_q_data *q_data;
+
+ q_data = get_q_data(ctx, f->type);
+
+ f->fmt.pix = q_data->cur_fmt;
+
+ return 0;
+}
+
+static int ipu_csc_scaler_try_fmt(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct ipu_csc_scaler_ctx *ctx = file_to_ctx(file);
+ struct ipu_csc_scaler_q_data *q_data = get_q_data(ctx, f->type);
+ struct ipu_image test_in, test_out;
+ enum v4l2_field field;
+
+ field = f->fmt.pix.field;
+ if (field == V4L2_FIELD_ANY)
+ field = V4L2_FIELD_NONE;
+ else if (field != V4L2_FIELD_NONE)
+ return -EINVAL;
+
+ if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) {
+ struct ipu_csc_scaler_q_data *q_data_in =
+ get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
+
+ test_out.pix = f->fmt.pix;
+ test_in.pix = q_data_in->cur_fmt;
+ } else {
+ struct ipu_csc_scaler_q_data *q_data_out =
+ get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
+
+ test_in.pix = f->fmt.pix;
+ test_out.pix = q_data_out->cur_fmt;
+ }
+
+ ipu_image_convert_adjust(&test_in, &test_out, ctx->rot_mode);
+
+ f->fmt.pix = (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) ?
+ test_out.pix : test_in.pix;
+
+ if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) {
+ f->fmt.pix.colorspace = q_data->cur_fmt.colorspace;
+ f->fmt.pix.ycbcr_enc = q_data->cur_fmt.ycbcr_enc;
+ f->fmt.pix.xfer_func = q_data->cur_fmt.xfer_func;
+ f->fmt.pix.quantization = q_data->cur_fmt.quantization;
+ } else if (f->fmt.pix.colorspace == V4L2_COLORSPACE_DEFAULT) {
+ f->fmt.pix.colorspace = V4L2_COLORSPACE_SRGB;
+ f->fmt.pix.ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT;
+ f->fmt.pix.xfer_func = V4L2_XFER_FUNC_DEFAULT;
+ f->fmt.pix.quantization = V4L2_QUANTIZATION_DEFAULT;
+ }
+
+ return 0;
+}
+
+static int ipu_csc_scaler_s_fmt(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct ipu_csc_scaler_ctx *ctx = file_to_ctx(file);
+ struct ipu_csc_scaler_q_data *q_data;
+ struct vb2_queue *vq;
+ int ret;
+
+ vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type);
+ if (vb2_is_busy(vq)) {
+ v4l2_err(ctx->priv->vdev.vfd->v4l2_dev, "%s: queue busy\n",
+ __func__);
+ return -EBUSY;
+ }
+
+ q_data = get_q_data(ctx, f->type);
+
+ ret = ipu_csc_scaler_try_fmt(file, priv, f);
+ if (ret < 0)
+ return ret;
+
+ q_data->cur_fmt.width = f->fmt.pix.width;
+ q_data->cur_fmt.height = f->fmt.pix.height;
+ q_data->cur_fmt.pixelformat = f->fmt.pix.pixelformat;
+ q_data->cur_fmt.field = f->fmt.pix.field;
+ q_data->cur_fmt.bytesperline = f->fmt.pix.bytesperline;
+ q_data->cur_fmt.sizeimage = f->fmt.pix.sizeimage;
+
+ /* Reset cropping/composing rectangle */
+ q_data->rect.left = 0;
+ q_data->rect.top = 0;
+ q_data->rect.width = q_data->cur_fmt.width;
+ q_data->rect.height = q_data->cur_fmt.height;
+
+ if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) {
+ /* Set colorimetry on the output queue */
+ q_data->cur_fmt.colorspace = f->fmt.pix.colorspace;
+ q_data->cur_fmt.ycbcr_enc = f->fmt.pix.ycbcr_enc;
+ q_data->cur_fmt.xfer_func = f->fmt.pix.xfer_func;
+ q_data->cur_fmt.quantization = f->fmt.pix.quantization;
+ /* Propagate colorimetry to the capture queue */
+ q_data = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
+ q_data->cur_fmt.colorspace = f->fmt.pix.colorspace;
+ q_data->cur_fmt.ycbcr_enc = f->fmt.pix.ycbcr_enc;
+ q_data->cur_fmt.xfer_func = f->fmt.pix.xfer_func;
+ q_data->cur_fmt.quantization = f->fmt.pix.quantization;
+ }
+
+ /*
+ * TODO: Setting colorimetry on the capture queue is currently not
+ * supported by the V4L2 API
+ */
+
+ return 0;
+}
+
+static int ipu_csc_scaler_g_selection(struct file *file, void *priv,
+ struct v4l2_selection *s)
+{
+ struct ipu_csc_scaler_ctx *ctx = file_to_ctx(file);
+ struct ipu_csc_scaler_q_data *q_data;
+
+ switch (s->target) {
+ case V4L2_SEL_TGT_CROP:
+ case V4L2_SEL_TGT_CROP_DEFAULT:
+ case V4L2_SEL_TGT_CROP_BOUNDS:
+ if (s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
+ return -EINVAL;
+ q_data = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
+ break;
+ case V4L2_SEL_TGT_COMPOSE:
+ case V4L2_SEL_TGT_COMPOSE_DEFAULT:
+ case V4L2_SEL_TGT_COMPOSE_BOUNDS:
+ if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+ q_data = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (s->target == V4L2_SEL_TGT_CROP ||
+ s->target == V4L2_SEL_TGT_COMPOSE) {
+ s->r = q_data->rect;
+ } else {
+ s->r.left = 0;
+ s->r.top = 0;
+ s->r.width = q_data->cur_fmt.width;
+ s->r.height = q_data->cur_fmt.height;
+ }
+
+ return 0;
+}
+
+static int ipu_csc_scaler_s_selection(struct file *file, void *priv,
+ struct v4l2_selection *s)
+{
+ struct ipu_csc_scaler_ctx *ctx = file_to_ctx(file);
+ struct ipu_csc_scaler_q_data *q_data;
+
+ switch (s->target) {
+ case V4L2_SEL_TGT_CROP:
+ if (s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
+ return -EINVAL;
+ break;
+ case V4L2_SEL_TGT_COMPOSE:
+ if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE &&
+ s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
+ return -EINVAL;
+
+ q_data = get_q_data(ctx, s->type);
+
+ /* The input's frame width to the IC must be a multiple of 8 pixels
+ * When performing resizing the frame width must be multiple of burst
+ * size - 8 or 16 pixels as defined by CB#_BURST_16 parameter.
+ */
+ if (s->flags & V4L2_SEL_FLAG_GE)
+ s->r.width = round_up(s->r.width, 8);
+ if (s->flags & V4L2_SEL_FLAG_LE)
+ s->r.width = round_down(s->r.width, 8);
+ s->r.width = clamp_t(unsigned int, s->r.width, 8,
+ round_down(q_data->cur_fmt.width, 8));
+ s->r.height = clamp_t(unsigned int, s->r.height, 1,
+ q_data->cur_fmt.height);
+ s->r.left = clamp_t(unsigned int, s->r.left, 0,
+ q_data->cur_fmt.width - s->r.width);
+ s->r.top = clamp_t(unsigned int, s->r.top, 0,
+ q_data->cur_fmt.height - s->r.height);
+
+ /* V4L2_SEL_FLAG_KEEP_CONFIG is only valid for subdevices */
+ q_data->rect = s->r;
+
+ return 0;
+}
+
+static const struct v4l2_ioctl_ops ipu_csc_scaler_ioctl_ops = {
+ .vidioc_querycap = ipu_csc_scaler_querycap,
+
+ .vidioc_enum_fmt_vid_cap = ipu_csc_scaler_enum_fmt,
+ .vidioc_g_fmt_vid_cap = ipu_csc_scaler_g_fmt,
+ .vidioc_try_fmt_vid_cap = ipu_csc_scaler_try_fmt,
+ .vidioc_s_fmt_vid_cap = ipu_csc_scaler_s_fmt,
+
+ .vidioc_enum_fmt_vid_out = ipu_csc_scaler_enum_fmt,
+ .vidioc_g_fmt_vid_out = ipu_csc_scaler_g_fmt,
+ .vidioc_try_fmt_vid_out = ipu_csc_scaler_try_fmt,
+ .vidioc_s_fmt_vid_out = ipu_csc_scaler_s_fmt,
+
+ .vidioc_g_selection = ipu_csc_scaler_g_selection,
+ .vidioc_s_selection = ipu_csc_scaler_s_selection,
+
+ .vidioc_reqbufs = v4l2_m2m_ioctl_reqbufs,
+ .vidioc_querybuf = v4l2_m2m_ioctl_querybuf,
+
+ .vidioc_qbuf = v4l2_m2m_ioctl_qbuf,
+ .vidioc_expbuf = v4l2_m2m_ioctl_expbuf,
+ .vidioc_dqbuf = v4l2_m2m_ioctl_dqbuf,
+ .vidioc_create_bufs = v4l2_m2m_ioctl_create_bufs,
+ .vidioc_prepare_buf = v4l2_m2m_ioctl_prepare_buf,
+
+ .vidioc_streamon = v4l2_m2m_ioctl_streamon,
+ .vidioc_streamoff = v4l2_m2m_ioctl_streamoff,
+
+ .vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
+};
+
+/*
+ * Queue operations
+ */
+
+static int ipu_csc_scaler_queue_setup(struct vb2_queue *vq,
+ unsigned int *nbuffers,
+ unsigned int *nplanes,
+ unsigned int sizes[],
+ struct device *alloc_devs[])
+{
+ struct ipu_csc_scaler_ctx *ctx = vb2_get_drv_priv(vq);
+ struct ipu_csc_scaler_q_data *q_data;
+ unsigned int size, count = *nbuffers;
+
+ q_data = get_q_data(ctx, vq->type);
+
+ size = q_data->cur_fmt.sizeimage;
+
+ *nbuffers = count;
+
+ if (*nplanes)
+ return sizes[0] < size ? -EINVAL : 0;
+
+ *nplanes = 1;
+ sizes[0] = size;
+
+ dev_dbg(ctx->priv->dev, "get %d buffer(s) of size %d each.\n",
+ count, size);
+
+ return 0;
+}
+
+static int ipu_csc_scaler_buf_prepare(struct vb2_buffer *vb)
+{
+ struct vb2_queue *vq = vb->vb2_queue;
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct ipu_csc_scaler_ctx *ctx = vb2_get_drv_priv(vq);
+ struct ipu_csc_scaler_q_data *q_data;
+ unsigned long size;
+
+ dev_dbg(ctx->priv->dev, "type: %d\n", vq->type);
+
+ if (V4L2_TYPE_IS_OUTPUT(vq->type)) {
+ if (vbuf->field == V4L2_FIELD_ANY)
+ vbuf->field = V4L2_FIELD_NONE;
+ if (vbuf->field != V4L2_FIELD_NONE) {
+ dev_dbg(ctx->priv->dev, "%s: field isn't supported\n",
+ __func__);
+ return -EINVAL;
+ }
+ }
+
+ q_data = get_q_data(ctx, vq->type);
+ size = q_data->cur_fmt.sizeimage;
+
+ if (vb2_plane_size(vb, 0) < size) {
+ dev_dbg(ctx->priv->dev,
+ "%s: data will not fit into plane (%lu < %lu)\n",
+ __func__, vb2_plane_size(vb, 0), size);
+ return -EINVAL;
+ }
+
+ vb2_set_plane_payload(vb, 0, size);
+
+ return 0;
+}
+
+static void ipu_csc_scaler_buf_queue(struct vb2_buffer *vb)
+{
+ struct ipu_csc_scaler_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+
+ v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, to_vb2_v4l2_buffer(vb));
+}
+
+static void ipu_image_from_q_data(struct ipu_image *im,
+ struct ipu_csc_scaler_q_data *q_data)
+{
+ struct v4l2_pix_format *fmt = &q_data->cur_fmt;
+
+ im->pix = *fmt;
+ if (fmt->ycbcr_enc == V4L2_YCBCR_ENC_DEFAULT)
+ im->pix.ycbcr_enc = V4L2_MAP_YCBCR_ENC_DEFAULT(fmt->colorspace);
+ if (fmt->quantization == V4L2_QUANTIZATION_DEFAULT)
+ im->pix.ycbcr_enc = V4L2_MAP_YCBCR_ENC_DEFAULT(fmt->colorspace);
+ im->rect = q_data->rect;
+}
+
+static int ipu_csc_scaler_start_streaming(struct vb2_queue *q,
+ unsigned int count)
+{
+ const enum ipu_ic_task ic_task = IC_TASK_POST_PROCESSOR;
+ struct ipu_csc_scaler_ctx *ctx = vb2_get_drv_priv(q);
+ struct ipu_csc_scaler_priv *priv = ctx->priv;
+ struct ipu_soc *ipu = priv->md->ipu[0];
+ struct ipu_csc_scaler_q_data *q_data;
+ struct vb2_queue *other_q;
+ struct ipu_image in, out;
+
+ other_q = v4l2_m2m_get_vq(ctx->fh.m2m_ctx,
+ (q->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) ?
+ V4L2_BUF_TYPE_VIDEO_OUTPUT :
+ V4L2_BUF_TYPE_VIDEO_CAPTURE);
+ if (!vb2_is_streaming(other_q))
+ return 0;
+
+ if (ctx->icc) {
+ v4l2_warn(ctx->priv->vdev.vfd->v4l2_dev, "removing old ICC\n");
+ ipu_image_convert_unprepare(ctx->icc);
+ }
+
+ q_data = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
+ ipu_image_from_q_data(&in, q_data);
+
+ q_data = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
+ ipu_image_from_q_data(&out, q_data);
+
+ ctx->icc = ipu_image_convert_prepare(ipu, ic_task, &in, &out,
+ ctx->rot_mode,
+ ipu_ic_pp_complete, ctx);
+ if (IS_ERR(ctx->icc)) {
+ struct vb2_v4l2_buffer *buf;
+ int ret = PTR_ERR(ctx->icc);
+
+ ctx->icc = NULL;
+ v4l2_err(ctx->priv->vdev.vfd->v4l2_dev, "%s: error %d\n",
+ __func__, ret);
+ while ((buf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx)))
+ v4l2_m2m_buf_done(buf, VB2_BUF_STATE_QUEUED);
+ while ((buf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx)))
+ v4l2_m2m_buf_done(buf, VB2_BUF_STATE_QUEUED);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void ipu_csc_scaler_stop_streaming(struct vb2_queue *q)
+{
+ struct ipu_csc_scaler_ctx *ctx = vb2_get_drv_priv(q);
+ struct vb2_v4l2_buffer *buf;
+
+ if (ctx->icc) {
+ ipu_image_convert_unprepare(ctx->icc);
+ ctx->icc = NULL;
+ }
+
+ ctx->sequence = 0;
+
+ if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) {
+ while ((buf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx)))
+ v4l2_m2m_buf_done(buf, VB2_BUF_STATE_ERROR);
+ } else {
+ while ((buf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx)))
+ v4l2_m2m_buf_done(buf, VB2_BUF_STATE_ERROR);
+ }
+}
+
+static const struct vb2_ops ipu_csc_scaler_qops = {
+ .queue_setup = ipu_csc_scaler_queue_setup,
+ .buf_prepare = ipu_csc_scaler_buf_prepare,
+ .buf_queue = ipu_csc_scaler_buf_queue,
+ .start_streaming = ipu_csc_scaler_start_streaming,
+ .stop_streaming = ipu_csc_scaler_stop_streaming,
+};
+
+static int ipu_csc_scaler_queue_init(void *priv, struct vb2_queue *src_vq,
+ struct vb2_queue *dst_vq)
+{
+ struct ipu_csc_scaler_ctx *ctx = priv;
+ int ret;
+
+ memset(src_vq, 0, sizeof(*src_vq));
+ src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
+ src_vq->io_modes = VB2_MMAP | VB2_DMABUF;
+ src_vq->drv_priv = ctx;
+ src_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
+ src_vq->ops = &ipu_csc_scaler_qops;
+ src_vq->mem_ops = &vb2_dma_contig_memops;
+ src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ src_vq->lock = &ctx->priv->mutex;
+ src_vq->dev = ctx->priv->dev;
+
+ ret = vb2_queue_init(src_vq);
+ if (ret)
+ return ret;
+
+ memset(dst_vq, 0, sizeof(*dst_vq));
+ dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ dst_vq->io_modes = VB2_MMAP | VB2_DMABUF;
+ dst_vq->drv_priv = ctx;
+ dst_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
+ dst_vq->ops = &ipu_csc_scaler_qops;
+ dst_vq->mem_ops = &vb2_dma_contig_memops;
+ dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ dst_vq->lock = &ctx->priv->mutex;
+ dst_vq->dev = ctx->priv->dev;
+
+ return vb2_queue_init(dst_vq);
+}
+
+static int ipu_csc_scaler_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct ipu_csc_scaler_ctx *ctx = container_of(ctrl->handler,
+ struct ipu_csc_scaler_ctx,
+ ctrl_hdlr);
+ enum ipu_rotate_mode rot_mode;
+ int rotate;
+ bool hflip, vflip;
+ int ret = 0;
+
+ rotate = ctx->rotate;
+ hflip = ctx->hflip;
+ vflip = ctx->vflip;
+
+ switch (ctrl->id) {
+ case V4L2_CID_HFLIP:
+ hflip = ctrl->val;
+ break;
+ case V4L2_CID_VFLIP:
+ vflip = ctrl->val;
+ break;
+ case V4L2_CID_ROTATE:
+ rotate = ctrl->val;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ret = ipu_degrees_to_rot_mode(&rot_mode, rotate, hflip, vflip);
+ if (ret)
+ return ret;
+
+ if (rot_mode != ctx->rot_mode) {
+ struct v4l2_pix_format *in_fmt, *out_fmt;
+ struct ipu_image test_in, test_out;
+
+ in_fmt = &ctx->q_data[V4L2_M2M_SRC].cur_fmt;
+ out_fmt = &ctx->q_data[V4L2_M2M_DST].cur_fmt;
+
+ test_in.pix = *in_fmt;
+ test_out.pix = *out_fmt;
+
+ if (ipu_rot_mode_is_irt(rot_mode) !=
+ ipu_rot_mode_is_irt(ctx->rot_mode)) {
+ /* Switch width & height to keep aspect ratio intact */
+ test_out.pix.width = out_fmt->height;
+ test_out.pix.height = out_fmt->width;
+ }
+
+ ipu_image_convert_adjust(&test_in, &test_out, ctx->rot_mode);
+
+ /* Check if output format needs to be changed */
+ if (test_in.pix.width != in_fmt->width ||
+ test_in.pix.height != in_fmt->height ||
+ test_in.pix.bytesperline != in_fmt->bytesperline ||
+ test_in.pix.sizeimage != in_fmt->sizeimage) {
+ struct vb2_queue *out_q;
+
+ out_q = v4l2_m2m_get_vq(ctx->fh.m2m_ctx,
+ V4L2_BUF_TYPE_VIDEO_OUTPUT);
+ if (vb2_is_busy(out_q))
+ return -EBUSY;
+ }
+
+ /* Check if capture format needs to be changed */
+ if (test_out.pix.width != out_fmt->width ||
+ test_out.pix.height != out_fmt->height ||
+ test_out.pix.bytesperline != out_fmt->bytesperline ||
+ test_out.pix.sizeimage != out_fmt->sizeimage) {
+ struct vb2_queue *cap_q;
+
+ cap_q = v4l2_m2m_get_vq(ctx->fh.m2m_ctx,
+ V4L2_BUF_TYPE_VIDEO_CAPTURE);
+ if (vb2_is_busy(cap_q))
+ return -EBUSY;
+ }
+
+ *in_fmt = test_in.pix;
+ *out_fmt = test_out.pix;
+
+ ctx->rot_mode = rot_mode;
+ ctx->rotate = rotate;
+ ctx->hflip = hflip;
+ ctx->vflip = vflip;
+ }
+
+ return 0;
+}
+
+static const struct v4l2_ctrl_ops ipu_csc_scaler_ctrl_ops = {
+ .s_ctrl = ipu_csc_scaler_s_ctrl,
+};
+
+static int ipu_csc_scaler_init_controls(struct ipu_csc_scaler_ctx *ctx)
+{
+ struct v4l2_ctrl_handler *hdlr = &ctx->ctrl_hdlr;
+
+ v4l2_ctrl_handler_init(hdlr, 3);
+
+ v4l2_ctrl_new_std(hdlr, &ipu_csc_scaler_ctrl_ops, V4L2_CID_HFLIP,
+ 0, 1, 1, 0);
+ v4l2_ctrl_new_std(hdlr, &ipu_csc_scaler_ctrl_ops, V4L2_CID_VFLIP,
+ 0, 1, 1, 0);
+ v4l2_ctrl_new_std(hdlr, &ipu_csc_scaler_ctrl_ops, V4L2_CID_ROTATE,
+ 0, 270, 90, 0);
+
+ if (hdlr->error) {
+ v4l2_ctrl_handler_free(hdlr);
+ return hdlr->error;
+ }
+
+ v4l2_ctrl_handler_setup(hdlr);
+ return 0;
+}
+
+#define DEFAULT_WIDTH 720
+#define DEFAULT_HEIGHT 576
+static const struct ipu_csc_scaler_q_data ipu_csc_scaler_q_data_default = {
+ .cur_fmt = {
+ .width = DEFAULT_WIDTH,
+ .height = DEFAULT_HEIGHT,
+ .pixelformat = V4L2_PIX_FMT_YUV420,
+ .field = V4L2_FIELD_NONE,
+ .bytesperline = DEFAULT_WIDTH,
+ .sizeimage = DEFAULT_WIDTH * DEFAULT_HEIGHT * 3 / 2,
+ .colorspace = V4L2_COLORSPACE_SRGB,
+ },
+ .rect = {
+ .width = DEFAULT_WIDTH,
+ .height = DEFAULT_HEIGHT,
+ },
+};
+
+/*
+ * File operations
+ */
+static int ipu_csc_scaler_open(struct file *file)
+{
+ struct ipu_csc_scaler_priv *priv = video_drvdata(file);
+ struct ipu_csc_scaler_ctx *ctx = NULL;
+ int ret;
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ ctx->rot_mode = IPU_ROTATE_NONE;
+
+ v4l2_fh_init(&ctx->fh, video_devdata(file));
+ v4l2_fh_add(&ctx->fh, file);
+ ctx->priv = priv;
+
+ ctx->fh.m2m_ctx = v4l2_m2m_ctx_init(priv->m2m_dev, ctx,
+ &ipu_csc_scaler_queue_init);
+ if (IS_ERR(ctx->fh.m2m_ctx)) {
+ ret = PTR_ERR(ctx->fh.m2m_ctx);
+ goto err_ctx;
+ }
+
+ ret = ipu_csc_scaler_init_controls(ctx);
+ if (ret)
+ goto err_ctrls;
+
+ ctx->fh.ctrl_handler = &ctx->ctrl_hdlr;
+
+ ctx->q_data[V4L2_M2M_SRC] = ipu_csc_scaler_q_data_default;
+ ctx->q_data[V4L2_M2M_DST] = ipu_csc_scaler_q_data_default;
+
+ dev_dbg(priv->dev, "Created instance %p, m2m_ctx: %p\n", ctx,
+ ctx->fh.m2m_ctx);
+
+ return 0;
+
+err_ctrls:
+ v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
+err_ctx:
+ v4l2_fh_del(&ctx->fh, file);
+ v4l2_fh_exit(&ctx->fh);
+ kfree(ctx);
+ return ret;
+}
+
+static int ipu_csc_scaler_release(struct file *file)
+{
+ struct ipu_csc_scaler_priv *priv = video_drvdata(file);
+ struct ipu_csc_scaler_ctx *ctx = file_to_ctx(file);
+
+ dev_dbg(priv->dev, "Releasing instance %p\n", ctx);
+
+ v4l2_ctrl_handler_free(&ctx->ctrl_hdlr);
+ v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
+ v4l2_fh_del(&ctx->fh, file);
+ v4l2_fh_exit(&ctx->fh);
+ kfree(ctx);
+
+ return 0;
+}
+
+static const struct v4l2_file_operations ipu_csc_scaler_fops = {
+ .owner = THIS_MODULE,
+ .open = ipu_csc_scaler_open,
+ .release = ipu_csc_scaler_release,
+ .poll = v4l2_m2m_fop_poll,
+ .unlocked_ioctl = video_ioctl2,
+ .mmap = v4l2_m2m_fop_mmap,
+};
+
+static const struct v4l2_m2m_ops m2m_ops = {
+ .device_run = device_run,
+ .job_abort = job_abort,
+};
+
+static void ipu_csc_scaler_video_device_release(struct video_device *vdev)
+{
+ struct ipu_csc_scaler_priv *priv = video_get_drvdata(vdev);
+
+ v4l2_m2m_release(priv->m2m_dev);
+ video_device_release(vdev);
+ kfree(priv);
+}
+
+static const struct video_device ipu_csc_scaler_videodev_template = {
+ .name = "ipu_ic_pp csc/scaler",
+ .fops = &ipu_csc_scaler_fops,
+ .ioctl_ops = &ipu_csc_scaler_ioctl_ops,
+ .minor = -1,
+ .release = ipu_csc_scaler_video_device_release,
+ .vfl_dir = VFL_DIR_M2M,
+ .device_caps = V4L2_CAP_VIDEO_M2M | V4L2_CAP_STREAMING,
+};
+
+int imx_media_csc_scaler_device_register(struct imx_media_video_dev *vdev)
+{
+ struct ipu_csc_scaler_priv *priv = vdev_to_priv(vdev);
+ struct video_device *vfd = vdev->vfd;
+ int ret;
+
+ vfd->v4l2_dev = &priv->md->v4l2_dev;
+
+ ret = video_register_device(vfd, VFL_TYPE_VIDEO, -1);
+ if (ret) {
+ v4l2_err(vfd->v4l2_dev, "Failed to register video device\n");
+ return ret;
+ }
+
+ v4l2_info(vfd->v4l2_dev, "Registered %s as /dev/%s\n", vfd->name,
+ video_device_node_name(vfd));
+
+ return 0;
+}
+
+void imx_media_csc_scaler_device_unregister(struct imx_media_video_dev *vdev)
+{
+ struct ipu_csc_scaler_priv *priv = vdev_to_priv(vdev);
+ struct video_device *vfd = priv->vdev.vfd;
+
+ video_unregister_device(vfd);
+}
+
+struct imx_media_video_dev *
+imx_media_csc_scaler_device_init(struct imx_media_dev *md)
+{
+ struct ipu_csc_scaler_priv *priv;
+ struct video_device *vfd;
+ int ret;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return ERR_PTR(-ENOMEM);
+
+ priv->md = md;
+ priv->dev = md->md.dev;
+
+ mutex_init(&priv->mutex);
+
+ vfd = video_device_alloc();
+ if (!vfd) {
+ ret = -ENOMEM;
+ goto err_vfd;
+ }
+
+ *vfd = ipu_csc_scaler_videodev_template;
+ vfd->lock = &priv->mutex;
+ priv->vdev.vfd = vfd;
+
+ INIT_LIST_HEAD(&priv->vdev.list);
+
+ video_set_drvdata(vfd, priv);
+
+ priv->m2m_dev = v4l2_m2m_init(&m2m_ops);
+ if (IS_ERR(priv->m2m_dev)) {
+ ret = PTR_ERR(priv->m2m_dev);
+ v4l2_err(&md->v4l2_dev, "Failed to init mem2mem device: %d\n",
+ ret);
+ goto err_m2m;
+ }
+
+ return &priv->vdev;
+
+err_m2m:
+ video_device_release(vfd);
+err_vfd:
+ kfree(priv);
+ return ERR_PTR(ret);
+}
+
+MODULE_DESCRIPTION("i.MX IPUv3 mem2mem scaler/CSC driver");
+MODULE_AUTHOR("Sascha Hauer <s.hauer@pengutronix.de>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/media/imx/imx-media-csi.c b/drivers/staging/media/imx/imx-media-csi.c
new file mode 100644
index 000000000000..fd7e37d803e7
--- /dev/null
+++ b/drivers/staging/media/imx/imx-media-csi.c
@@ -0,0 +1,2083 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * V4L2 Capture CSI Subdev for Freescale i.MX5/6 SOC
+ *
+ * Copyright (c) 2014-2017 Mentor Graphics Inc.
+ * Copyright (C) 2017 Pengutronix, Philipp Zabel <kernel@pengutronix.de>
+ */
+#include <linux/delay.h>
+#include <linux/gcd.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/of_graph.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/platform_device.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-fwnode.h>
+#include <media/v4l2-mc.h>
+#include <media/v4l2-subdev.h>
+#include <media/videobuf2-dma-contig.h>
+#include <video/imx-ipu-v3.h>
+#include <media/imx.h>
+#include "imx-media.h"
+
+/*
+ * Min/Max supported width and heights.
+ *
+ * We allow planar output, so we have to align width by 16 pixels
+ * to meet IDMAC alignment requirements.
+ *
+ * TODO: move this into pad format negotiation, if capture device
+ * has not requested planar formats, we should allow 8 pixel
+ * alignment.
+ */
+#define MIN_W 32
+#define MIN_H 32
+#define MAX_W 4096
+#define MAX_H 4096
+#define W_ALIGN 1 /* multiple of 2 pixels */
+#define H_ALIGN 1 /* multiple of 2 lines */
+#define S_ALIGN 1 /* multiple of 2 */
+
+/*
+ * struct csi_skip_desc - CSI frame skipping descriptor
+ * @keep - number of frames kept per max_ratio frames
+ * @max_ratio - width of skip_smfc, written to MAX_RATIO bitfield
+ * @skip_smfc - skip pattern written to the SKIP_SMFC bitfield
+ */
+struct csi_skip_desc {
+ u8 keep;
+ u8 max_ratio;
+ u8 skip_smfc;
+};
+
+struct csi_priv {
+ struct device *dev;
+ struct ipu_soc *ipu;
+ struct v4l2_subdev sd;
+ struct media_pad pad[CSI_NUM_PADS];
+ struct v4l2_async_notifier notifier;
+
+ /* the video device at IDMAC output pad */
+ struct imx_media_video_dev *vdev;
+ struct imx_media_fim *fim;
+ int csi_id;
+ int smfc_id;
+
+ /* lock to protect all members below */
+ struct mutex lock;
+
+ int active_output_pad;
+
+ struct ipuv3_channel *idmac_ch;
+ struct ipu_smfc *smfc;
+ struct ipu_csi *csi;
+
+ struct v4l2_mbus_framefmt format_mbus[CSI_NUM_PADS];
+ const struct imx_media_pixfmt *cc[CSI_NUM_PADS];
+ struct v4l2_fract frame_interval[CSI_NUM_PADS];
+ struct v4l2_rect crop;
+ struct v4l2_rect compose;
+ const struct csi_skip_desc *skip;
+
+ /* active vb2 buffers to send to video dev sink */
+ struct imx_media_buffer *active_vb2_buf[2];
+ struct imx_media_dma_buf underrun_buf;
+
+ int ipu_buf_num; /* ipu double buffer index: 0-1 */
+
+ /* the sink for the captured frames */
+ struct media_entity *sink;
+ enum ipu_csi_dest dest;
+ /* the source subdev */
+ struct v4l2_subdev *src_sd;
+
+ /* the mipi virtual channel number at link validate */
+ int vc_num;
+
+ /* media bus config of the upstream subdevice CSI is receiving from */
+ struct v4l2_mbus_config mbus_cfg;
+
+ spinlock_t irqlock; /* protect eof_irq handler */
+ struct timer_list eof_timeout_timer;
+ int eof_irq;
+ int nfb4eof_irq;
+
+ struct v4l2_ctrl_handler ctrl_hdlr;
+
+ int stream_count; /* streaming counter */
+ u32 frame_sequence; /* frame sequence counter */
+ bool last_eof; /* waiting for last EOF at stream off */
+ bool nfb4eof; /* NFB4EOF encountered during streaming */
+ bool interweave_swap; /* swap top/bottom lines when interweaving */
+ struct completion last_eof_comp;
+};
+
+static inline struct csi_priv *sd_to_dev(struct v4l2_subdev *sdev)
+{
+ return container_of(sdev, struct csi_priv, sd);
+}
+
+static inline struct csi_priv *notifier_to_dev(struct v4l2_async_notifier *n)
+{
+ return container_of(n, struct csi_priv, notifier);
+}
+
+static inline bool is_parallel_bus(struct v4l2_mbus_config *mbus_cfg)
+{
+ return mbus_cfg->type != V4L2_MBUS_CSI2_DPHY;
+}
+
+static inline bool is_parallel_16bit_bus(struct v4l2_mbus_config *mbus_cfg)
+{
+ return is_parallel_bus(mbus_cfg) && mbus_cfg->bus.parallel.bus_width >= 16;
+}
+
+/*
+ * Check for conditions that require the IPU to handle the
+ * data internally as generic data, aka passthrough mode:
+ * - raw bayer media bus formats, or
+ * - BT.656 and BT.1120 (8/10-bit YUV422) data can always be processed
+ * on-the-fly
+ * - the CSI is receiving from a 16-bit parallel bus, or
+ * - the CSI is receiving from an 8-bit parallel bus and the incoming
+ * media bus format is other than UYVY8_2X8/YUYV8_2X8.
+ */
+static inline bool requires_passthrough(struct v4l2_mbus_config *mbus_cfg,
+ struct v4l2_mbus_framefmt *infmt,
+ const struct imx_media_pixfmt *incc)
+{
+ if (mbus_cfg->type == V4L2_MBUS_BT656) // including BT.1120
+ return false;
+
+ return incc->bayer || is_parallel_16bit_bus(mbus_cfg) ||
+ (is_parallel_bus(mbus_cfg) &&
+ infmt->code != MEDIA_BUS_FMT_UYVY8_2X8 &&
+ infmt->code != MEDIA_BUS_FMT_YUYV8_2X8);
+}
+
+/*
+ * Queries the media bus config of the upstream entity that provides data to
+ * the CSI. This will either be the entity directly upstream from the CSI-2
+ * receiver, directly upstream from a video mux, or directly upstream from
+ * the CSI itself.
+ */
+static int csi_get_upstream_mbus_config(struct csi_priv *priv,
+ struct v4l2_mbus_config *mbus_cfg)
+{
+ struct v4l2_subdev *sd, *remote_sd;
+ struct media_pad *remote_pad;
+ int ret;
+
+ if (!priv->src_sd)
+ return -EPIPE;
+
+ sd = priv->src_sd;
+
+ switch (sd->grp_id) {
+ case IMX_MEDIA_GRP_ID_CSI_MUX:
+ /*
+ * CSI is connected directly to CSI mux, skip up to
+ * CSI-2 receiver if it is in the path, otherwise stay
+ * with the CSI mux.
+ */
+ sd = imx_media_pipeline_subdev(&sd->entity,
+ IMX_MEDIA_GRP_ID_CSI2,
+ true);
+ if (IS_ERR(sd))
+ sd = priv->src_sd;
+ break;
+ case IMX_MEDIA_GRP_ID_CSI2:
+ break;
+ default:
+ /*
+ * the source is neither the CSI mux nor the CSI-2 receiver,
+ * get the source pad directly upstream from CSI itself.
+ */
+ sd = &priv->sd;
+ break;
+ }
+
+ /* get source pad of entity directly upstream from sd */
+ remote_pad = media_entity_remote_pad_unique(&sd->entity,
+ MEDIA_PAD_FL_SOURCE);
+ if (IS_ERR(remote_pad))
+ return PTR_ERR(remote_pad);
+
+ remote_sd = media_entity_to_v4l2_subdev(remote_pad->entity);
+
+ ret = v4l2_subdev_call(remote_sd, pad, get_mbus_config,
+ remote_pad->index, mbus_cfg);
+ if (ret == -ENOIOCTLCMD)
+ v4l2_err(&priv->sd,
+ "entity %s does not implement get_mbus_config()\n",
+ remote_pad->entity->name);
+
+ return ret;
+}
+
+static void csi_idmac_put_ipu_resources(struct csi_priv *priv)
+{
+ if (priv->idmac_ch)
+ ipu_idmac_put(priv->idmac_ch);
+ priv->idmac_ch = NULL;
+
+ if (priv->smfc)
+ ipu_smfc_put(priv->smfc);
+ priv->smfc = NULL;
+}
+
+static int csi_idmac_get_ipu_resources(struct csi_priv *priv)
+{
+ int ch_num, ret;
+ struct ipu_smfc *smfc;
+ struct ipuv3_channel *idmac_ch;
+
+ ch_num = IPUV3_CHANNEL_CSI0 + priv->smfc_id;
+
+ smfc = ipu_smfc_get(priv->ipu, ch_num);
+ if (IS_ERR(smfc)) {
+ v4l2_err(&priv->sd, "failed to get SMFC\n");
+ ret = PTR_ERR(smfc);
+ goto out;
+ }
+ priv->smfc = smfc;
+
+ idmac_ch = ipu_idmac_get(priv->ipu, ch_num);
+ if (IS_ERR(idmac_ch)) {
+ v4l2_err(&priv->sd, "could not get IDMAC channel %u\n",
+ ch_num);
+ ret = PTR_ERR(idmac_ch);
+ goto out;
+ }
+ priv->idmac_ch = idmac_ch;
+
+ return 0;
+out:
+ csi_idmac_put_ipu_resources(priv);
+ return ret;
+}
+
+static void csi_vb2_buf_done(struct csi_priv *priv)
+{
+ struct imx_media_video_dev *vdev = priv->vdev;
+ struct imx_media_buffer *done, *next;
+ struct vb2_buffer *vb;
+ dma_addr_t phys;
+
+ done = priv->active_vb2_buf[priv->ipu_buf_num];
+ if (done) {
+ done->vbuf.field = vdev->fmt.field;
+ done->vbuf.sequence = priv->frame_sequence;
+ vb = &done->vbuf.vb2_buf;
+ vb->timestamp = ktime_get_ns();
+ vb2_buffer_done(vb, priv->nfb4eof ?
+ VB2_BUF_STATE_ERROR : VB2_BUF_STATE_DONE);
+ }
+
+ priv->frame_sequence++;
+ priv->nfb4eof = false;
+
+ /* get next queued buffer */
+ next = imx_media_capture_device_next_buf(vdev);
+ if (next) {
+ phys = vb2_dma_contig_plane_dma_addr(&next->vbuf.vb2_buf, 0);
+ priv->active_vb2_buf[priv->ipu_buf_num] = next;
+ } else {
+ phys = priv->underrun_buf.phys;
+ priv->active_vb2_buf[priv->ipu_buf_num] = NULL;
+ }
+
+ if (ipu_idmac_buffer_is_ready(priv->idmac_ch, priv->ipu_buf_num))
+ ipu_idmac_clear_buffer(priv->idmac_ch, priv->ipu_buf_num);
+
+ if (priv->interweave_swap)
+ phys += vdev->fmt.bytesperline;
+
+ ipu_cpmem_set_buffer(priv->idmac_ch, priv->ipu_buf_num, phys);
+}
+
+static irqreturn_t csi_idmac_eof_interrupt(int irq, void *dev_id)
+{
+ struct csi_priv *priv = dev_id;
+
+ spin_lock(&priv->irqlock);
+
+ if (priv->last_eof) {
+ complete(&priv->last_eof_comp);
+ priv->last_eof = false;
+ goto unlock;
+ }
+
+ if (priv->fim)
+ /* call frame interval monitor */
+ imx_media_fim_eof_monitor(priv->fim, ktime_get());
+
+ csi_vb2_buf_done(priv);
+
+ /* select new IPU buf */
+ ipu_idmac_select_buffer(priv->idmac_ch, priv->ipu_buf_num);
+ /* toggle IPU double-buffer index */
+ priv->ipu_buf_num ^= 1;
+
+ /* bump the EOF timeout timer */
+ mod_timer(&priv->eof_timeout_timer,
+ jiffies + msecs_to_jiffies(IMX_MEDIA_EOF_TIMEOUT));
+
+unlock:
+ spin_unlock(&priv->irqlock);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t csi_idmac_nfb4eof_interrupt(int irq, void *dev_id)
+{
+ struct csi_priv *priv = dev_id;
+
+ spin_lock(&priv->irqlock);
+
+ /*
+ * this is not an unrecoverable error, just mark
+ * the next captured frame with vb2 error flag.
+ */
+ priv->nfb4eof = true;
+
+ v4l2_err(&priv->sd, "NFB4EOF\n");
+
+ spin_unlock(&priv->irqlock);
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * EOF timeout timer function. This is an unrecoverable condition
+ * without a stream restart.
+ */
+static void csi_idmac_eof_timeout(struct timer_list *t)
+{
+ struct csi_priv *priv = timer_container_of(priv, t, eof_timeout_timer);
+ struct imx_media_video_dev *vdev = priv->vdev;
+
+ v4l2_err(&priv->sd, "EOF timeout\n");
+
+ /* signal a fatal error to capture device */
+ imx_media_capture_device_error(vdev);
+}
+
+static void csi_idmac_setup_vb2_buf(struct csi_priv *priv, dma_addr_t *phys)
+{
+ struct imx_media_video_dev *vdev = priv->vdev;
+ struct imx_media_buffer *buf;
+ int i;
+
+ for (i = 0; i < 2; i++) {
+ buf = imx_media_capture_device_next_buf(vdev);
+ if (buf) {
+ priv->active_vb2_buf[i] = buf;
+ phys[i] = vb2_dma_contig_plane_dma_addr(
+ &buf->vbuf.vb2_buf, 0);
+ } else {
+ priv->active_vb2_buf[i] = NULL;
+ phys[i] = priv->underrun_buf.phys;
+ }
+ }
+}
+
+static void csi_idmac_unsetup_vb2_buf(struct csi_priv *priv,
+ enum vb2_buffer_state return_status)
+{
+ struct imx_media_buffer *buf;
+ int i;
+
+ /* return any remaining active frames with return_status */
+ for (i = 0; i < 2; i++) {
+ buf = priv->active_vb2_buf[i];
+ if (buf) {
+ struct vb2_buffer *vb = &buf->vbuf.vb2_buf;
+
+ vb->timestamp = ktime_get_ns();
+ vb2_buffer_done(vb, return_status);
+ }
+ }
+}
+
+/* init the SMFC IDMAC channel */
+static int csi_idmac_setup_channel(struct csi_priv *priv)
+{
+ struct imx_media_video_dev *vdev = priv->vdev;
+ const struct imx_media_pixfmt *incc;
+ struct v4l2_mbus_framefmt *infmt;
+ struct v4l2_mbus_framefmt *outfmt;
+ bool passthrough, interweave;
+ struct ipu_image image;
+ u32 passthrough_bits;
+ u32 passthrough_cycles;
+ dma_addr_t phys[2];
+ u32 burst_size;
+ int ret;
+
+ infmt = &priv->format_mbus[CSI_SINK_PAD];
+ incc = priv->cc[CSI_SINK_PAD];
+ outfmt = &priv->format_mbus[CSI_SRC_PAD_IDMAC];
+
+ ipu_cpmem_zero(priv->idmac_ch);
+
+ memset(&image, 0, sizeof(image));
+ image.pix = vdev->fmt;
+ image.rect = vdev->compose;
+
+ csi_idmac_setup_vb2_buf(priv, phys);
+
+ image.phys0 = phys[0];
+ image.phys1 = phys[1];
+
+ passthrough = requires_passthrough(&priv->mbus_cfg, infmt, incc);
+ passthrough_cycles = 1;
+
+ /*
+ * If the field type at capture interface is interlaced, and
+ * the output IDMAC pad is sequential, enable interweave at
+ * the IDMAC output channel.
+ */
+ interweave = V4L2_FIELD_IS_INTERLACED(image.pix.field) &&
+ V4L2_FIELD_IS_SEQUENTIAL(outfmt->field);
+ priv->interweave_swap = interweave &&
+ image.pix.field == V4L2_FIELD_INTERLACED_BT;
+
+ switch (image.pix.pixelformat) {
+ case V4L2_PIX_FMT_SBGGR8:
+ case V4L2_PIX_FMT_SGBRG8:
+ case V4L2_PIX_FMT_SGRBG8:
+ case V4L2_PIX_FMT_SRGGB8:
+ case V4L2_PIX_FMT_GREY:
+ burst_size = 16;
+ passthrough_bits = 8;
+ break;
+ case V4L2_PIX_FMT_SBGGR16:
+ case V4L2_PIX_FMT_SGBRG16:
+ case V4L2_PIX_FMT_SGRBG16:
+ case V4L2_PIX_FMT_SRGGB16:
+ case V4L2_PIX_FMT_Y10:
+ case V4L2_PIX_FMT_Y12:
+ burst_size = 8;
+ passthrough_bits = 16;
+ break;
+ case V4L2_PIX_FMT_YUV420:
+ case V4L2_PIX_FMT_YVU420:
+ case V4L2_PIX_FMT_NV12:
+ burst_size = (image.pix.width & 0x3f) ?
+ ((image.pix.width & 0x1f) ?
+ ((image.pix.width & 0xf) ? 8 : 16) : 32) : 64;
+ passthrough_bits = 16;
+ /*
+ * Skip writing U and V components to odd rows (but not
+ * when enabling IDMAC interweaving, they are incompatible).
+ */
+ if (!interweave)
+ ipu_cpmem_skip_odd_chroma_rows(priv->idmac_ch);
+ break;
+ case V4L2_PIX_FMT_YUYV:
+ case V4L2_PIX_FMT_UYVY:
+ burst_size = (image.pix.width & 0x1f) ?
+ ((image.pix.width & 0xf) ? 8 : 16) : 32;
+ passthrough_bits = 16;
+ break;
+ case V4L2_PIX_FMT_RGB565:
+ if (passthrough) {
+ burst_size = 16;
+ passthrough_bits = 8;
+ passthrough_cycles = incc->cycles;
+ break;
+ }
+ fallthrough; /* non-passthrough RGB565 (CSI-2 bus) */
+ default:
+ burst_size = (image.pix.width & 0xf) ? 8 : 16;
+ passthrough_bits = 16;
+ break;
+ }
+
+ if (passthrough) {
+ if (priv->interweave_swap) {
+ /* start interweave scan at 1st top line (2nd line) */
+ image.phys0 += image.pix.bytesperline;
+ image.phys1 += image.pix.bytesperline;
+ }
+
+ ipu_cpmem_set_resolution(priv->idmac_ch,
+ image.rect.width * passthrough_cycles,
+ image.rect.height);
+ ipu_cpmem_set_stride(priv->idmac_ch, image.pix.bytesperline);
+ ipu_cpmem_set_buffer(priv->idmac_ch, 0, image.phys0);
+ ipu_cpmem_set_buffer(priv->idmac_ch, 1, image.phys1);
+ ipu_cpmem_set_format_passthrough(priv->idmac_ch,
+ passthrough_bits);
+ } else {
+ if (priv->interweave_swap) {
+ /* start interweave scan at 1st top line (2nd line) */
+ image.rect.top = 1;
+ }
+
+ ret = ipu_cpmem_set_image(priv->idmac_ch, &image);
+ if (ret)
+ goto unsetup_vb2;
+ }
+
+ ipu_cpmem_set_burstsize(priv->idmac_ch, burst_size);
+
+ /*
+ * Set the channel for the direct CSI-->memory via SMFC
+ * use-case to very high priority, by enabling the watermark
+ * signal in the SMFC, enabling WM in the channel, and setting
+ * the channel priority to high.
+ *
+ * Refer to the i.mx6 rev. D TRM Table 36-8: Calculated priority
+ * value.
+ *
+ * The WM's are set very low by intention here to ensure that
+ * the SMFC FIFOs do not overflow.
+ */
+ ipu_smfc_set_watermark(priv->smfc, 0x02, 0x01);
+ ipu_cpmem_set_high_priority(priv->idmac_ch);
+ ipu_idmac_enable_watermark(priv->idmac_ch, true);
+ ipu_cpmem_set_axi_id(priv->idmac_ch, 0);
+
+ burst_size = passthrough ?
+ (burst_size >> 3) - 1 : (burst_size >> 2) - 1;
+
+ ipu_smfc_set_burstsize(priv->smfc, burst_size);
+
+ if (interweave)
+ ipu_cpmem_interlaced_scan(priv->idmac_ch,
+ priv->interweave_swap ?
+ -image.pix.bytesperline :
+ image.pix.bytesperline,
+ image.pix.pixelformat);
+
+ ipu_idmac_set_double_buffer(priv->idmac_ch, true);
+
+ return 0;
+
+unsetup_vb2:
+ csi_idmac_unsetup_vb2_buf(priv, VB2_BUF_STATE_QUEUED);
+ return ret;
+}
+
+static void csi_idmac_unsetup(struct csi_priv *priv,
+ enum vb2_buffer_state state)
+{
+ ipu_idmac_disable_channel(priv->idmac_ch);
+ ipu_smfc_disable(priv->smfc);
+
+ csi_idmac_unsetup_vb2_buf(priv, state);
+}
+
+static int csi_idmac_setup(struct csi_priv *priv)
+{
+ int ret;
+
+ ret = csi_idmac_setup_channel(priv);
+ if (ret)
+ return ret;
+
+ ipu_cpmem_dump(priv->idmac_ch);
+ ipu_dump(priv->ipu);
+
+ ipu_smfc_enable(priv->smfc);
+
+ /* set buffers ready */
+ ipu_idmac_select_buffer(priv->idmac_ch, 0);
+ ipu_idmac_select_buffer(priv->idmac_ch, 1);
+
+ /* enable the channels */
+ ipu_idmac_enable_channel(priv->idmac_ch);
+
+ return 0;
+}
+
+static int csi_idmac_start(struct csi_priv *priv)
+{
+ struct imx_media_video_dev *vdev = priv->vdev;
+ int ret;
+
+ ret = csi_idmac_get_ipu_resources(priv);
+ if (ret)
+ return ret;
+
+ ipu_smfc_map_channel(priv->smfc, priv->csi_id, priv->vc_num);
+
+ ret = imx_media_alloc_dma_buf(priv->dev, &priv->underrun_buf,
+ vdev->fmt.sizeimage);
+ if (ret)
+ goto out_put_ipu;
+
+ priv->ipu_buf_num = 0;
+
+ /* init EOF completion waitq */
+ init_completion(&priv->last_eof_comp);
+ priv->frame_sequence = 0;
+ priv->last_eof = false;
+ priv->nfb4eof = false;
+
+ ret = csi_idmac_setup(priv);
+ if (ret) {
+ v4l2_err(&priv->sd, "csi_idmac_setup failed: %d\n", ret);
+ goto out_free_dma_buf;
+ }
+
+ priv->nfb4eof_irq = ipu_idmac_channel_irq(priv->ipu,
+ priv->idmac_ch,
+ IPU_IRQ_NFB4EOF);
+ ret = devm_request_irq(priv->dev, priv->nfb4eof_irq,
+ csi_idmac_nfb4eof_interrupt, 0,
+ "imx-smfc-nfb4eof", priv);
+ if (ret) {
+ v4l2_err(&priv->sd,
+ "Error registering NFB4EOF irq: %d\n", ret);
+ goto out_unsetup;
+ }
+
+ priv->eof_irq = ipu_idmac_channel_irq(priv->ipu, priv->idmac_ch,
+ IPU_IRQ_EOF);
+
+ ret = devm_request_irq(priv->dev, priv->eof_irq,
+ csi_idmac_eof_interrupt, 0,
+ "imx-smfc-eof", priv);
+ if (ret) {
+ v4l2_err(&priv->sd,
+ "Error registering eof irq: %d\n", ret);
+ goto out_free_nfb4eof_irq;
+ }
+
+ /* start the EOF timeout timer */
+ mod_timer(&priv->eof_timeout_timer,
+ jiffies + msecs_to_jiffies(IMX_MEDIA_EOF_TIMEOUT));
+
+ return 0;
+
+out_free_nfb4eof_irq:
+ devm_free_irq(priv->dev, priv->nfb4eof_irq, priv);
+out_unsetup:
+ csi_idmac_unsetup(priv, VB2_BUF_STATE_QUEUED);
+out_free_dma_buf:
+ imx_media_free_dma_buf(priv->dev, &priv->underrun_buf);
+out_put_ipu:
+ csi_idmac_put_ipu_resources(priv);
+ return ret;
+}
+
+static void csi_idmac_wait_last_eof(struct csi_priv *priv)
+{
+ unsigned long flags;
+ int ret;
+
+ /* mark next EOF interrupt as the last before stream off */
+ spin_lock_irqsave(&priv->irqlock, flags);
+ priv->last_eof = true;
+ spin_unlock_irqrestore(&priv->irqlock, flags);
+
+ /*
+ * and then wait for interrupt handler to mark completion.
+ */
+ ret = wait_for_completion_timeout(
+ &priv->last_eof_comp, msecs_to_jiffies(IMX_MEDIA_EOF_TIMEOUT));
+ if (ret == 0)
+ v4l2_warn(&priv->sd, "wait last EOF timeout\n");
+}
+
+static void csi_idmac_stop(struct csi_priv *priv)
+{
+ devm_free_irq(priv->dev, priv->eof_irq, priv);
+ devm_free_irq(priv->dev, priv->nfb4eof_irq, priv);
+
+ csi_idmac_unsetup(priv, VB2_BUF_STATE_ERROR);
+
+ imx_media_free_dma_buf(priv->dev, &priv->underrun_buf);
+
+ /* cancel the EOF timeout timer */
+ timer_delete_sync(&priv->eof_timeout_timer);
+
+ csi_idmac_put_ipu_resources(priv);
+}
+
+/* Update the CSI whole sensor and active windows */
+static int csi_setup(struct csi_priv *priv)
+{
+ struct v4l2_mbus_framefmt *infmt, *outfmt;
+ const struct imx_media_pixfmt *incc;
+ struct v4l2_mbus_framefmt if_fmt;
+ struct v4l2_rect crop;
+
+ infmt = &priv->format_mbus[CSI_SINK_PAD];
+ incc = priv->cc[CSI_SINK_PAD];
+ outfmt = &priv->format_mbus[priv->active_output_pad];
+
+ if_fmt = *infmt;
+ crop = priv->crop;
+
+ /*
+ * if cycles is set, we need to handle this over multiple cycles as
+ * generic/bayer data
+ */
+ if (is_parallel_bus(&priv->mbus_cfg) && incc->cycles) {
+ if_fmt.width *= incc->cycles;
+ crop.width *= incc->cycles;
+ }
+
+ ipu_csi_set_window(priv->csi, &crop);
+
+ ipu_csi_set_downsize(priv->csi,
+ priv->crop.width == 2 * priv->compose.width,
+ priv->crop.height == 2 * priv->compose.height);
+
+ ipu_csi_init_interface(priv->csi, &priv->mbus_cfg, &if_fmt, outfmt);
+
+ ipu_csi_set_dest(priv->csi, priv->dest);
+
+ if (priv->dest == IPU_CSI_DEST_IDMAC)
+ ipu_csi_set_skip_smfc(priv->csi, priv->skip->skip_smfc,
+ priv->skip->max_ratio - 1, 0);
+
+ ipu_csi_dump(priv->csi);
+
+ return 0;
+}
+
+static int csi_start(struct csi_priv *priv)
+{
+ struct v4l2_fract *input_fi, *output_fi;
+ int ret;
+
+ input_fi = &priv->frame_interval[CSI_SINK_PAD];
+ output_fi = &priv->frame_interval[priv->active_output_pad];
+
+ /* start upstream */
+ ret = v4l2_subdev_call(priv->src_sd, video, s_stream, 1);
+ ret = (ret && ret != -ENOIOCTLCMD) ? ret : 0;
+ if (ret)
+ return ret;
+
+ /* Skip first few frames from a BT.656 source */
+ if (priv->mbus_cfg.type == V4L2_MBUS_BT656) {
+ u32 delay_usec, bad_frames = 20;
+
+ delay_usec = DIV_ROUND_UP_ULL((u64)USEC_PER_SEC *
+ input_fi->numerator * bad_frames,
+ input_fi->denominator);
+
+ usleep_range(delay_usec, delay_usec + 1000);
+ }
+
+ if (priv->dest == IPU_CSI_DEST_IDMAC) {
+ ret = csi_idmac_start(priv);
+ if (ret)
+ goto stop_upstream;
+ }
+
+ ret = csi_setup(priv);
+ if (ret)
+ goto idmac_stop;
+
+ /* start the frame interval monitor */
+ if (priv->fim && priv->dest == IPU_CSI_DEST_IDMAC)
+ imx_media_fim_set_stream(priv->fim, output_fi, true);
+
+ ret = ipu_csi_enable(priv->csi);
+ if (ret) {
+ v4l2_err(&priv->sd, "CSI enable error: %d\n", ret);
+ goto fim_off;
+ }
+
+ return 0;
+
+fim_off:
+ if (priv->fim && priv->dest == IPU_CSI_DEST_IDMAC)
+ imx_media_fim_set_stream(priv->fim, NULL, false);
+idmac_stop:
+ if (priv->dest == IPU_CSI_DEST_IDMAC)
+ csi_idmac_stop(priv);
+stop_upstream:
+ v4l2_subdev_call(priv->src_sd, video, s_stream, 0);
+ return ret;
+}
+
+static void csi_stop(struct csi_priv *priv)
+{
+ if (priv->dest == IPU_CSI_DEST_IDMAC)
+ csi_idmac_wait_last_eof(priv);
+
+ /*
+ * Disable the CSI asap, after syncing with the last EOF.
+ * Doing so after the IDMA channel is disabled has shown to
+ * create hard system-wide hangs.
+ */
+ ipu_csi_disable(priv->csi);
+
+ /* stop upstream */
+ v4l2_subdev_call(priv->src_sd, video, s_stream, 0);
+
+ if (priv->dest == IPU_CSI_DEST_IDMAC) {
+ csi_idmac_stop(priv);
+
+ /* stop the frame interval monitor */
+ if (priv->fim)
+ imx_media_fim_set_stream(priv->fim, NULL, false);
+ }
+}
+
+static const struct csi_skip_desc csi_skip[12] = {
+ { 1, 1, 0x00 }, /* Keep all frames */
+ { 5, 6, 0x10 }, /* Skip every sixth frame */
+ { 4, 5, 0x08 }, /* Skip every fifth frame */
+ { 3, 4, 0x04 }, /* Skip every fourth frame */
+ { 2, 3, 0x02 }, /* Skip every third frame */
+ { 3, 5, 0x0a }, /* Skip frames 1 and 3 of every 5 */
+ { 1, 2, 0x01 }, /* Skip every second frame */
+ { 2, 5, 0x0b }, /* Keep frames 1 and 4 of every 5 */
+ { 1, 3, 0x03 }, /* Keep one in three frames */
+ { 1, 4, 0x07 }, /* Keep one in four frames */
+ { 1, 5, 0x0f }, /* Keep one in five frames */
+ { 1, 6, 0x1f }, /* Keep one in six frames */
+};
+
+static void csi_apply_skip_interval(const struct csi_skip_desc *skip,
+ struct v4l2_fract *interval)
+{
+ unsigned int div;
+
+ interval->numerator *= skip->max_ratio;
+ interval->denominator *= skip->keep;
+
+ /* Reduce fraction to lowest terms */
+ div = gcd(interval->numerator, interval->denominator);
+ if (div > 1) {
+ interval->numerator /= div;
+ interval->denominator /= div;
+ }
+}
+
+/*
+ * Find the skip pattern to produce the output frame interval closest to the
+ * requested one, for the given input frame interval. Updates the output frame
+ * interval to the exact value.
+ */
+static const struct csi_skip_desc *csi_find_best_skip(struct v4l2_fract *in,
+ struct v4l2_fract *out)
+{
+ const struct csi_skip_desc *skip = &csi_skip[0], *best_skip = skip;
+ u32 min_err = UINT_MAX;
+ u64 want_us;
+ int i;
+
+ /* Default to 1:1 ratio */
+ if (out->numerator == 0 || out->denominator == 0 ||
+ in->numerator == 0 || in->denominator == 0) {
+ *out = *in;
+ return best_skip;
+ }
+
+ want_us = div_u64((u64)USEC_PER_SEC * out->numerator, out->denominator);
+
+ /* Find the reduction closest to the requested time per frame */
+ for (i = 0; i < ARRAY_SIZE(csi_skip); i++, skip++) {
+ u64 tmp, err;
+
+ tmp = div_u64((u64)USEC_PER_SEC * in->numerator *
+ skip->max_ratio, in->denominator * skip->keep);
+
+ err = abs((s64)tmp - want_us);
+ if (err < min_err) {
+ min_err = err;
+ best_skip = skip;
+ }
+ }
+
+ *out = *in;
+ csi_apply_skip_interval(best_skip, out);
+
+ return best_skip;
+}
+
+/*
+ * V4L2 subdev operations.
+ */
+
+static int csi_get_frame_interval(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_frame_interval *fi)
+{
+ struct csi_priv *priv = v4l2_get_subdevdata(sd);
+
+ /*
+ * FIXME: Implement support for V4L2_SUBDEV_FORMAT_TRY, using the V4L2
+ * subdev active state API.
+ */
+ if (fi->which != V4L2_SUBDEV_FORMAT_ACTIVE)
+ return -EINVAL;
+
+ if (fi->pad >= CSI_NUM_PADS)
+ return -EINVAL;
+
+ mutex_lock(&priv->lock);
+
+ fi->interval = priv->frame_interval[fi->pad];
+
+ mutex_unlock(&priv->lock);
+
+ return 0;
+}
+
+static int csi_set_frame_interval(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_frame_interval *fi)
+{
+ struct csi_priv *priv = v4l2_get_subdevdata(sd);
+ struct v4l2_fract *input_fi;
+ int ret = 0;
+
+ /*
+ * FIXME: Implement support for V4L2_SUBDEV_FORMAT_TRY, using the V4L2
+ * subdev active state API.
+ */
+ if (fi->which != V4L2_SUBDEV_FORMAT_ACTIVE)
+ return -EINVAL;
+
+ mutex_lock(&priv->lock);
+
+ input_fi = &priv->frame_interval[CSI_SINK_PAD];
+
+ switch (fi->pad) {
+ case CSI_SINK_PAD:
+ /* No limits on valid input frame intervals */
+ if (fi->interval.numerator == 0 ||
+ fi->interval.denominator == 0)
+ fi->interval = *input_fi;
+ /* Reset output intervals and frame skipping ratio to 1:1 */
+ priv->frame_interval[CSI_SRC_PAD_IDMAC] = fi->interval;
+ priv->frame_interval[CSI_SRC_PAD_DIRECT] = fi->interval;
+ priv->skip = &csi_skip[0];
+ break;
+ case CSI_SRC_PAD_IDMAC:
+ /*
+ * frame interval at IDMAC output pad depends on input
+ * interval, modified by frame skipping.
+ */
+ priv->skip = csi_find_best_skip(input_fi, &fi->interval);
+ break;
+ case CSI_SRC_PAD_DIRECT:
+ /*
+ * frame interval at DIRECT output pad is same as input
+ * interval.
+ */
+ fi->interval = *input_fi;
+ break;
+ default:
+ ret = -EINVAL;
+ goto out;
+ }
+
+ priv->frame_interval[fi->pad] = fi->interval;
+out:
+ mutex_unlock(&priv->lock);
+ return ret;
+}
+
+static int csi_s_stream(struct v4l2_subdev *sd, int enable)
+{
+ struct csi_priv *priv = v4l2_get_subdevdata(sd);
+ int ret = 0;
+
+ mutex_lock(&priv->lock);
+
+ if (!priv->src_sd || !priv->sink) {
+ ret = -EPIPE;
+ goto out;
+ }
+
+ /*
+ * enable/disable streaming only if stream_count is
+ * going from 0 to 1 / 1 to 0.
+ */
+ if (priv->stream_count != !enable)
+ goto update_count;
+
+ if (enable) {
+ dev_dbg(priv->dev, "stream ON\n");
+ ret = csi_start(priv);
+ if (ret)
+ goto out;
+ } else {
+ dev_dbg(priv->dev, "stream OFF\n");
+ csi_stop(priv);
+ }
+
+update_count:
+ priv->stream_count += enable ? 1 : -1;
+ if (priv->stream_count < 0)
+ priv->stream_count = 0;
+out:
+ mutex_unlock(&priv->lock);
+ return ret;
+}
+
+static int csi_link_setup(struct media_entity *entity,
+ const struct media_pad *local,
+ const struct media_pad *remote, u32 flags)
+{
+ struct v4l2_subdev *sd = media_entity_to_v4l2_subdev(entity);
+ struct csi_priv *priv = v4l2_get_subdevdata(sd);
+ struct v4l2_subdev *remote_sd;
+ int ret = 0;
+
+ dev_dbg(priv->dev, "link setup %s -> %s\n", remote->entity->name,
+ local->entity->name);
+
+ mutex_lock(&priv->lock);
+
+ if (local->flags & MEDIA_PAD_FL_SINK) {
+ if (!is_media_entity_v4l2_subdev(remote->entity)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ remote_sd = media_entity_to_v4l2_subdev(remote->entity);
+
+ if (flags & MEDIA_LNK_FL_ENABLED) {
+ if (priv->src_sd) {
+ ret = -EBUSY;
+ goto out;
+ }
+ priv->src_sd = remote_sd;
+ } else {
+ priv->src_sd = NULL;
+ }
+
+ goto out;
+ }
+
+ /* this is a source pad */
+
+ if (flags & MEDIA_LNK_FL_ENABLED) {
+ if (priv->sink) {
+ ret = -EBUSY;
+ goto out;
+ }
+ } else {
+ v4l2_ctrl_handler_free(&priv->ctrl_hdlr);
+ v4l2_ctrl_handler_init(&priv->ctrl_hdlr, 0);
+ priv->sink = NULL;
+ /* do not apply IC burst alignment in csi_try_crop */
+ priv->active_output_pad = CSI_SRC_PAD_IDMAC;
+ goto out;
+ }
+
+ /* record which output pad is now active */
+ priv->active_output_pad = local->index;
+
+ /* set CSI destination */
+ if (local->index == CSI_SRC_PAD_IDMAC) {
+ if (!is_media_entity_v4l2_video_device(remote->entity)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (priv->fim) {
+ ret = imx_media_fim_add_controls(priv->fim);
+ if (ret)
+ goto out;
+ }
+
+ priv->dest = IPU_CSI_DEST_IDMAC;
+ } else {
+ if (!is_media_entity_v4l2_subdev(remote->entity)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ remote_sd = media_entity_to_v4l2_subdev(remote->entity);
+ switch (remote_sd->grp_id) {
+ case IMX_MEDIA_GRP_ID_IPU_VDIC:
+ priv->dest = IPU_CSI_DEST_VDIC;
+ break;
+ case IMX_MEDIA_GRP_ID_IPU_IC_PRP:
+ priv->dest = IPU_CSI_DEST_IC;
+ break;
+ default:
+ ret = -EINVAL;
+ goto out;
+ }
+ }
+
+ priv->sink = remote->entity;
+out:
+ mutex_unlock(&priv->lock);
+ return ret;
+}
+
+static int csi_link_validate(struct v4l2_subdev *sd,
+ struct media_link *link,
+ struct v4l2_subdev_format *source_fmt,
+ struct v4l2_subdev_format *sink_fmt)
+{
+ struct csi_priv *priv = v4l2_get_subdevdata(sd);
+ struct v4l2_mbus_config mbus_cfg = { .type = 0 };
+ bool is_csi2;
+ int ret;
+
+ ret = v4l2_subdev_link_validate_default(sd, link,
+ source_fmt, sink_fmt);
+ if (ret)
+ return ret;
+
+ ret = csi_get_upstream_mbus_config(priv, &mbus_cfg);
+ if (ret) {
+ v4l2_err(&priv->sd,
+ "failed to get upstream media bus configuration\n");
+ return ret;
+ }
+
+ mutex_lock(&priv->lock);
+
+ priv->mbus_cfg = mbus_cfg;
+ is_csi2 = !is_parallel_bus(&mbus_cfg);
+ if (is_csi2) {
+ /*
+ * NOTE! It seems the virtual channels from the mipi csi-2
+ * receiver are used only for routing by the video mux's,
+ * or for hard-wired routing to the CSI's. Once the stream
+ * enters the CSI's however, they are treated internally
+ * in the IPU as virtual channel 0.
+ */
+ ipu_csi_set_mipi_datatype(priv->csi, 0,
+ &priv->format_mbus[CSI_SINK_PAD]);
+ }
+
+ /* select either parallel or MIPI-CSI2 as input to CSI */
+ ipu_set_csi_src_mux(priv->ipu, priv->csi_id, is_csi2);
+
+ mutex_unlock(&priv->lock);
+ return ret;
+}
+
+static struct v4l2_mbus_framefmt *
+__csi_get_fmt(struct csi_priv *priv, struct v4l2_subdev_state *sd_state,
+ unsigned int pad, enum v4l2_subdev_format_whence which)
+{
+ if (which == V4L2_SUBDEV_FORMAT_TRY)
+ return v4l2_subdev_state_get_format(sd_state, pad);
+ else
+ return &priv->format_mbus[pad];
+}
+
+static struct v4l2_rect *
+__csi_get_crop(struct csi_priv *priv, struct v4l2_subdev_state *sd_state,
+ enum v4l2_subdev_format_whence which)
+{
+ if (which == V4L2_SUBDEV_FORMAT_TRY)
+ return v4l2_subdev_state_get_crop(sd_state, CSI_SINK_PAD);
+ else
+ return &priv->crop;
+}
+
+static struct v4l2_rect *
+__csi_get_compose(struct csi_priv *priv, struct v4l2_subdev_state *sd_state,
+ enum v4l2_subdev_format_whence which)
+{
+ if (which == V4L2_SUBDEV_FORMAT_TRY)
+ return v4l2_subdev_state_get_compose(sd_state, CSI_SINK_PAD);
+ else
+ return &priv->compose;
+}
+
+static void csi_try_crop(struct csi_priv *priv,
+ struct v4l2_rect *crop,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_mbus_framefmt *infmt,
+ struct v4l2_mbus_config *mbus_cfg)
+{
+ u32 in_height;
+
+ crop->width = min_t(__u32, infmt->width, crop->width);
+ if (crop->left + crop->width > infmt->width)
+ crop->left = infmt->width - crop->width;
+ /* adjust crop left/width to h/w alignment restrictions */
+ crop->left &= ~0x3;
+ if (priv->active_output_pad == CSI_SRC_PAD_DIRECT)
+ crop->width &= ~0x7; /* multiple of 8 pixels (IC burst) */
+ else
+ crop->width &= ~0x1; /* multiple of 2 pixels */
+
+ in_height = infmt->height;
+ if (infmt->field == V4L2_FIELD_ALTERNATE)
+ in_height *= 2;
+
+ /*
+ * FIXME: not sure why yet, but on interlaced bt.656,
+ * changing the vertical cropping causes loss of vertical
+ * sync, so fix it to NTSC/PAL active lines. NTSC contains
+ * 2 extra lines of active video that need to be cropped.
+ */
+ if (mbus_cfg->type == V4L2_MBUS_BT656 &&
+ (V4L2_FIELD_HAS_BOTH(infmt->field) ||
+ infmt->field == V4L2_FIELD_ALTERNATE)) {
+ crop->height = in_height;
+ crop->top = (in_height == 480) ? 2 : 0;
+ } else {
+ crop->height = min_t(__u32, in_height, crop->height);
+ if (crop->top + crop->height > in_height)
+ crop->top = in_height - crop->height;
+ }
+}
+
+static int csi_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ struct csi_priv *priv = v4l2_get_subdevdata(sd);
+ struct v4l2_mbus_config mbus_cfg = { .type = 0 };
+ const struct imx_media_pixfmt *incc;
+ struct v4l2_mbus_framefmt *infmt;
+ int ret = 0;
+
+ mutex_lock(&priv->lock);
+
+ infmt = __csi_get_fmt(priv, sd_state, CSI_SINK_PAD, code->which);
+ incc = imx_media_find_mbus_format(infmt->code, PIXFMT_SEL_ANY);
+
+ switch (code->pad) {
+ case CSI_SINK_PAD:
+ ret = imx_media_enum_mbus_formats(&code->code, code->index,
+ PIXFMT_SEL_ANY);
+ break;
+ case CSI_SRC_PAD_DIRECT:
+ case CSI_SRC_PAD_IDMAC:
+ ret = csi_get_upstream_mbus_config(priv, &mbus_cfg);
+ if (ret) {
+ v4l2_err(&priv->sd,
+ "failed to get upstream media bus configuration\n");
+ goto out;
+ }
+
+ if (requires_passthrough(&mbus_cfg, infmt, incc)) {
+ if (code->index != 0) {
+ ret = -EINVAL;
+ goto out;
+ }
+ code->code = infmt->code;
+ } else {
+ enum imx_pixfmt_sel fmt_sel =
+ (incc->cs == IPUV3_COLORSPACE_YUV) ?
+ PIXFMT_SEL_YUV : PIXFMT_SEL_RGB;
+
+ ret = imx_media_enum_ipu_formats(&code->code,
+ code->index,
+ fmt_sel);
+ }
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+out:
+ mutex_unlock(&priv->lock);
+ return ret;
+}
+
+static int csi_enum_frame_size(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_frame_size_enum *fse)
+{
+ struct csi_priv *priv = v4l2_get_subdevdata(sd);
+ struct v4l2_rect *crop;
+ int ret = 0;
+
+ if (fse->pad >= CSI_NUM_PADS ||
+ fse->index > (fse->pad == CSI_SINK_PAD ? 0 : 3))
+ return -EINVAL;
+
+ mutex_lock(&priv->lock);
+
+ if (fse->pad == CSI_SINK_PAD) {
+ fse->min_width = MIN_W;
+ fse->max_width = MAX_W;
+ fse->min_height = MIN_H;
+ fse->max_height = MAX_H;
+ } else {
+ crop = __csi_get_crop(priv, sd_state, fse->which);
+
+ fse->min_width = fse->index & 1 ?
+ crop->width / 2 : crop->width;
+ fse->max_width = fse->min_width;
+ fse->min_height = fse->index & 2 ?
+ crop->height / 2 : crop->height;
+ fse->max_height = fse->min_height;
+ }
+
+ mutex_unlock(&priv->lock);
+ return ret;
+}
+
+static int csi_enum_frame_interval(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_frame_interval_enum *fie)
+{
+ struct csi_priv *priv = v4l2_get_subdevdata(sd);
+ struct v4l2_fract *input_fi;
+ struct v4l2_rect *crop;
+ int ret = 0;
+
+ if (fie->pad >= CSI_NUM_PADS ||
+ fie->index >= (fie->pad != CSI_SRC_PAD_IDMAC ?
+ 1 : ARRAY_SIZE(csi_skip)))
+ return -EINVAL;
+
+ mutex_lock(&priv->lock);
+
+ input_fi = &priv->frame_interval[CSI_SINK_PAD];
+ crop = __csi_get_crop(priv, sd_state, fie->which);
+
+ if ((fie->width != crop->width && fie->width != crop->width / 2) ||
+ (fie->height != crop->height && fie->height != crop->height / 2)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ fie->interval = *input_fi;
+
+ if (fie->pad == CSI_SRC_PAD_IDMAC)
+ csi_apply_skip_interval(&csi_skip[fie->index],
+ &fie->interval);
+
+out:
+ mutex_unlock(&priv->lock);
+ return ret;
+}
+
+static int csi_get_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_format *sdformat)
+{
+ struct csi_priv *priv = v4l2_get_subdevdata(sd);
+ struct v4l2_mbus_framefmt *fmt;
+ int ret = 0;
+
+ if (sdformat->pad >= CSI_NUM_PADS)
+ return -EINVAL;
+
+ mutex_lock(&priv->lock);
+
+ fmt = __csi_get_fmt(priv, sd_state, sdformat->pad, sdformat->which);
+ if (!fmt) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ sdformat->format = *fmt;
+out:
+ mutex_unlock(&priv->lock);
+ return ret;
+}
+
+static void csi_try_field(struct csi_priv *priv,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_format *sdformat)
+{
+ struct v4l2_mbus_framefmt *infmt =
+ __csi_get_fmt(priv, sd_state, CSI_SINK_PAD, sdformat->which);
+
+ /*
+ * no restrictions on sink pad field type except must
+ * be initialized.
+ */
+ if (sdformat->pad == CSI_SINK_PAD) {
+ if (sdformat->format.field == V4L2_FIELD_ANY)
+ sdformat->format.field = V4L2_FIELD_NONE;
+ return;
+ }
+
+ switch (infmt->field) {
+ case V4L2_FIELD_SEQ_TB:
+ case V4L2_FIELD_SEQ_BT:
+ /*
+ * If the user requests sequential at the source pad,
+ * allow it (along with possibly inverting field order).
+ * Otherwise passthrough the field type.
+ */
+ if (!V4L2_FIELD_IS_SEQUENTIAL(sdformat->format.field))
+ sdformat->format.field = infmt->field;
+ break;
+ case V4L2_FIELD_ALTERNATE:
+ /*
+ * This driver does not support alternate field mode, and
+ * the CSI captures a whole frame, so the CSI never presents
+ * alternate mode at its source pads. If user has not
+ * already requested sequential, translate ALTERNATE at
+ * sink pad to SEQ_TB or SEQ_BT at the source pad depending
+ * on input height (assume NTSC BT order if 480 total active
+ * frame lines, otherwise PAL TB order).
+ */
+ if (!V4L2_FIELD_IS_SEQUENTIAL(sdformat->format.field))
+ sdformat->format.field = (infmt->height == 480 / 2) ?
+ V4L2_FIELD_SEQ_BT : V4L2_FIELD_SEQ_TB;
+ break;
+ default:
+ /* Passthrough for all other input field types */
+ sdformat->format.field = infmt->field;
+ break;
+ }
+}
+
+static void csi_try_fmt(struct csi_priv *priv,
+ struct v4l2_mbus_config *mbus_cfg,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_format *sdformat,
+ struct v4l2_rect *crop,
+ struct v4l2_rect *compose,
+ const struct imx_media_pixfmt **cc)
+{
+ const struct imx_media_pixfmt *incc;
+ struct v4l2_mbus_framefmt *infmt;
+ u32 code;
+
+ infmt = __csi_get_fmt(priv, sd_state, CSI_SINK_PAD, sdformat->which);
+
+ switch (sdformat->pad) {
+ case CSI_SRC_PAD_DIRECT:
+ case CSI_SRC_PAD_IDMAC:
+ incc = imx_media_find_mbus_format(infmt->code, PIXFMT_SEL_ANY);
+
+ sdformat->format.width = compose->width;
+ sdformat->format.height = compose->height;
+
+ if (requires_passthrough(mbus_cfg, infmt, incc)) {
+ sdformat->format.code = infmt->code;
+ *cc = incc;
+ } else {
+ enum imx_pixfmt_sel fmt_sel =
+ (incc->cs == IPUV3_COLORSPACE_YUV) ?
+ PIXFMT_SEL_YUV : PIXFMT_SEL_RGB;
+
+ *cc = imx_media_find_ipu_format(sdformat->format.code,
+ fmt_sel);
+ if (!*cc) {
+ imx_media_enum_ipu_formats(&code, 0, fmt_sel);
+ *cc = imx_media_find_ipu_format(code, fmt_sel);
+ sdformat->format.code = (*cc)->codes[0];
+ }
+ }
+
+ csi_try_field(priv, sd_state, sdformat);
+
+ /* propagate colorimetry from sink */
+ sdformat->format.colorspace = infmt->colorspace;
+ sdformat->format.xfer_func = infmt->xfer_func;
+ sdformat->format.quantization = infmt->quantization;
+ sdformat->format.ycbcr_enc = infmt->ycbcr_enc;
+
+ break;
+ case CSI_SINK_PAD:
+ v4l_bound_align_image(&sdformat->format.width, MIN_W, MAX_W,
+ W_ALIGN, &sdformat->format.height,
+ MIN_H, MAX_H, H_ALIGN, S_ALIGN);
+
+ *cc = imx_media_find_mbus_format(sdformat->format.code,
+ PIXFMT_SEL_ANY);
+ if (!*cc) {
+ imx_media_enum_mbus_formats(&code, 0,
+ PIXFMT_SEL_YUV_RGB);
+ *cc = imx_media_find_mbus_format(code,
+ PIXFMT_SEL_YUV_RGB);
+ sdformat->format.code = (*cc)->codes[0];
+ }
+
+ csi_try_field(priv, sd_state, sdformat);
+
+ /* Reset crop and compose rectangles */
+ crop->left = 0;
+ crop->top = 0;
+ crop->width = sdformat->format.width;
+ crop->height = sdformat->format.height;
+ if (sdformat->format.field == V4L2_FIELD_ALTERNATE)
+ crop->height *= 2;
+ csi_try_crop(priv, crop, sd_state, &sdformat->format, mbus_cfg);
+ compose->left = 0;
+ compose->top = 0;
+ compose->width = crop->width;
+ compose->height = crop->height;
+
+ break;
+ }
+
+ imx_media_try_colorimetry(&sdformat->format,
+ priv->active_output_pad == CSI_SRC_PAD_DIRECT);
+}
+
+static int csi_set_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_format *sdformat)
+{
+ struct csi_priv *priv = v4l2_get_subdevdata(sd);
+ struct v4l2_mbus_config mbus_cfg = { .type = 0 };
+ const struct imx_media_pixfmt *cc;
+ struct v4l2_mbus_framefmt *fmt;
+ struct v4l2_rect *crop, *compose;
+ int ret;
+
+ if (sdformat->pad >= CSI_NUM_PADS)
+ return -EINVAL;
+
+ ret = csi_get_upstream_mbus_config(priv, &mbus_cfg);
+ if (ret) {
+ v4l2_err(&priv->sd,
+ "failed to get upstream media bus configuration\n");
+ return ret;
+ }
+
+ mutex_lock(&priv->lock);
+
+ if (priv->stream_count > 0) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ crop = __csi_get_crop(priv, sd_state, sdformat->which);
+ compose = __csi_get_compose(priv, sd_state, sdformat->which);
+
+ csi_try_fmt(priv, &mbus_cfg, sd_state, sdformat, crop, compose, &cc);
+
+ fmt = __csi_get_fmt(priv, sd_state, sdformat->pad, sdformat->which);
+ *fmt = sdformat->format;
+
+ if (sdformat->pad == CSI_SINK_PAD) {
+ int pad;
+
+ /* propagate format to source pads */
+ for (pad = CSI_SINK_PAD + 1; pad < CSI_NUM_PADS; pad++) {
+ const struct imx_media_pixfmt *outcc;
+ struct v4l2_mbus_framefmt *outfmt;
+ struct v4l2_subdev_format format;
+
+ format.pad = pad;
+ format.which = sdformat->which;
+ format.format = sdformat->format;
+ csi_try_fmt(priv, &mbus_cfg, sd_state, &format, NULL,
+ compose, &outcc);
+
+ outfmt = __csi_get_fmt(priv, sd_state, pad,
+ sdformat->which);
+ *outfmt = format.format;
+
+ if (sdformat->which == V4L2_SUBDEV_FORMAT_ACTIVE)
+ priv->cc[pad] = outcc;
+ }
+ }
+
+ if (sdformat->which == V4L2_SUBDEV_FORMAT_ACTIVE)
+ priv->cc[sdformat->pad] = cc;
+
+out:
+ mutex_unlock(&priv->lock);
+ return ret;
+}
+
+static int csi_get_selection(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_selection *sel)
+{
+ struct csi_priv *priv = v4l2_get_subdevdata(sd);
+ struct v4l2_mbus_framefmt *infmt;
+ struct v4l2_rect *crop, *compose;
+ int ret = 0;
+
+ if (sel->pad != CSI_SINK_PAD)
+ return -EINVAL;
+
+ mutex_lock(&priv->lock);
+
+ infmt = __csi_get_fmt(priv, sd_state, CSI_SINK_PAD, sel->which);
+ crop = __csi_get_crop(priv, sd_state, sel->which);
+ compose = __csi_get_compose(priv, sd_state, sel->which);
+
+ switch (sel->target) {
+ case V4L2_SEL_TGT_CROP_BOUNDS:
+ sel->r.left = 0;
+ sel->r.top = 0;
+ sel->r.width = infmt->width;
+ sel->r.height = infmt->height;
+ if (infmt->field == V4L2_FIELD_ALTERNATE)
+ sel->r.height *= 2;
+ break;
+ case V4L2_SEL_TGT_CROP:
+ sel->r = *crop;
+ break;
+ case V4L2_SEL_TGT_COMPOSE_BOUNDS:
+ sel->r.left = 0;
+ sel->r.top = 0;
+ sel->r.width = crop->width;
+ sel->r.height = crop->height;
+ break;
+ case V4L2_SEL_TGT_COMPOSE:
+ sel->r = *compose;
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ mutex_unlock(&priv->lock);
+ return ret;
+}
+
+static int csi_set_scale(u32 *compose, u32 crop, u32 flags)
+{
+ if ((flags & (V4L2_SEL_FLAG_LE | V4L2_SEL_FLAG_GE)) ==
+ (V4L2_SEL_FLAG_LE | V4L2_SEL_FLAG_GE) &&
+ *compose != crop && *compose != crop / 2)
+ return -ERANGE;
+
+ if (*compose <= crop / 2 ||
+ (*compose < crop * 3 / 4 && !(flags & V4L2_SEL_FLAG_GE)) ||
+ (*compose < crop && (flags & V4L2_SEL_FLAG_LE)))
+ *compose = crop / 2;
+ else
+ *compose = crop;
+
+ return 0;
+}
+
+static int csi_set_selection(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_selection *sel)
+{
+ struct csi_priv *priv = v4l2_get_subdevdata(sd);
+ struct v4l2_mbus_config mbus_cfg = { .type = 0 };
+ struct v4l2_mbus_framefmt *infmt;
+ struct v4l2_rect *crop, *compose;
+ int pad, ret;
+
+ if (sel->pad != CSI_SINK_PAD)
+ return -EINVAL;
+
+ ret = csi_get_upstream_mbus_config(priv, &mbus_cfg);
+ if (ret) {
+ v4l2_err(&priv->sd,
+ "failed to get upstream media bus configuration\n");
+ return ret;
+ }
+
+ mutex_lock(&priv->lock);
+
+ if (priv->stream_count > 0) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ infmt = __csi_get_fmt(priv, sd_state, CSI_SINK_PAD, sel->which);
+ crop = __csi_get_crop(priv, sd_state, sel->which);
+ compose = __csi_get_compose(priv, sd_state, sel->which);
+
+ switch (sel->target) {
+ case V4L2_SEL_TGT_CROP:
+ /*
+ * Modifying the crop rectangle always changes the format on
+ * the source pads. If the KEEP_CONFIG flag is set, just return
+ * the current crop rectangle.
+ */
+ if (sel->flags & V4L2_SEL_FLAG_KEEP_CONFIG) {
+ sel->r = priv->crop;
+ if (sel->which == V4L2_SUBDEV_FORMAT_TRY)
+ *crop = sel->r;
+ goto out;
+ }
+
+ csi_try_crop(priv, &sel->r, sd_state, infmt, &mbus_cfg);
+
+ *crop = sel->r;
+
+ /* Reset scaling to 1:1 */
+ compose->width = crop->width;
+ compose->height = crop->height;
+ break;
+ case V4L2_SEL_TGT_COMPOSE:
+ /*
+ * Modifying the compose rectangle always changes the format on
+ * the source pads. If the KEEP_CONFIG flag is set, just return
+ * the current compose rectangle.
+ */
+ if (sel->flags & V4L2_SEL_FLAG_KEEP_CONFIG) {
+ sel->r = priv->compose;
+ if (sel->which == V4L2_SUBDEV_FORMAT_TRY)
+ *compose = sel->r;
+ goto out;
+ }
+
+ sel->r.left = 0;
+ sel->r.top = 0;
+ ret = csi_set_scale(&sel->r.width, crop->width, sel->flags);
+ if (ret)
+ goto out;
+ ret = csi_set_scale(&sel->r.height, crop->height, sel->flags);
+ if (ret)
+ goto out;
+
+ *compose = sel->r;
+ break;
+ default:
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /* Reset source pads to sink compose rectangle */
+ for (pad = CSI_SINK_PAD + 1; pad < CSI_NUM_PADS; pad++) {
+ struct v4l2_mbus_framefmt *outfmt;
+
+ outfmt = __csi_get_fmt(priv, sd_state, pad, sel->which);
+ outfmt->width = compose->width;
+ outfmt->height = compose->height;
+ }
+
+out:
+ mutex_unlock(&priv->lock);
+ return ret;
+}
+
+static int csi_subscribe_event(struct v4l2_subdev *sd, struct v4l2_fh *fh,
+ struct v4l2_event_subscription *sub)
+{
+ if (sub->type != V4L2_EVENT_IMX_FRAME_INTERVAL_ERROR)
+ return -EINVAL;
+ if (sub->id != 0)
+ return -EINVAL;
+
+ return v4l2_event_subscribe(fh, sub, 0, NULL);
+}
+
+static int csi_registered(struct v4l2_subdev *sd)
+{
+ struct csi_priv *priv = v4l2_get_subdevdata(sd);
+ struct ipu_csi *csi;
+ int i, ret;
+ u32 code;
+
+ /* get handle to IPU CSI */
+ csi = ipu_csi_get(priv->ipu, priv->csi_id);
+ if (IS_ERR(csi)) {
+ v4l2_err(&priv->sd, "failed to get CSI%d\n", priv->csi_id);
+ return PTR_ERR(csi);
+ }
+ priv->csi = csi;
+
+ for (i = 0; i < CSI_NUM_PADS; i++) {
+ code = 0;
+ if (i != CSI_SINK_PAD)
+ imx_media_enum_ipu_formats(&code, 0, PIXFMT_SEL_YUV);
+
+ /* set a default mbus format */
+ ret = imx_media_init_mbus_fmt(&priv->format_mbus[i],
+ IMX_MEDIA_DEF_PIX_WIDTH,
+ IMX_MEDIA_DEF_PIX_HEIGHT, code,
+ V4L2_FIELD_NONE, &priv->cc[i]);
+ if (ret)
+ goto put_csi;
+
+ /* init default frame interval */
+ priv->frame_interval[i].numerator = 1;
+ priv->frame_interval[i].denominator = 30;
+ }
+
+ /* disable frame skipping */
+ priv->skip = &csi_skip[0];
+
+ /* init default crop and compose rectangle sizes */
+ priv->crop.width = IMX_MEDIA_DEF_PIX_WIDTH;
+ priv->crop.height = IMX_MEDIA_DEF_PIX_HEIGHT;
+ priv->compose.width = IMX_MEDIA_DEF_PIX_WIDTH;
+ priv->compose.height = IMX_MEDIA_DEF_PIX_HEIGHT;
+
+ priv->fim = imx_media_fim_init(&priv->sd);
+ if (IS_ERR(priv->fim)) {
+ ret = PTR_ERR(priv->fim);
+ goto put_csi;
+ }
+
+ priv->vdev = imx_media_capture_device_init(priv->sd.dev, &priv->sd,
+ CSI_SRC_PAD_IDMAC, true);
+ if (IS_ERR(priv->vdev)) {
+ ret = PTR_ERR(priv->vdev);
+ goto free_fim;
+ }
+
+ ret = imx_media_capture_device_register(priv->vdev, 0);
+ if (ret)
+ goto remove_vdev;
+
+ return 0;
+
+remove_vdev:
+ imx_media_capture_device_remove(priv->vdev);
+free_fim:
+ if (priv->fim)
+ imx_media_fim_free(priv->fim);
+put_csi:
+ ipu_csi_put(priv->csi);
+ return ret;
+}
+
+static void csi_unregistered(struct v4l2_subdev *sd)
+{
+ struct csi_priv *priv = v4l2_get_subdevdata(sd);
+
+ imx_media_capture_device_unregister(priv->vdev);
+ imx_media_capture_device_remove(priv->vdev);
+
+ if (priv->fim)
+ imx_media_fim_free(priv->fim);
+
+ if (priv->csi)
+ ipu_csi_put(priv->csi);
+}
+
+/*
+ * The CSI has only one fwnode endpoint, at the sink pad. Verify the
+ * endpoint belongs to us, and return CSI_SINK_PAD.
+ */
+static int csi_get_fwnode_pad(struct media_entity *entity,
+ struct fwnode_endpoint *endpoint)
+{
+ struct v4l2_subdev *sd = media_entity_to_v4l2_subdev(entity);
+ struct csi_priv *priv = v4l2_get_subdevdata(sd);
+ struct fwnode_handle *csi_port = dev_fwnode(priv->dev);
+ struct fwnode_handle *csi_ep;
+ int ret;
+
+ csi_ep = fwnode_get_next_child_node(csi_port, NULL);
+
+ ret = endpoint->local_fwnode == csi_ep ? CSI_SINK_PAD : -ENXIO;
+
+ fwnode_handle_put(csi_ep);
+
+ return ret;
+}
+
+static const struct media_entity_operations csi_entity_ops = {
+ .link_setup = csi_link_setup,
+ .link_validate = v4l2_subdev_link_validate,
+ .get_fwnode_pad = csi_get_fwnode_pad,
+};
+
+static const struct v4l2_subdev_core_ops csi_core_ops = {
+ .subscribe_event = csi_subscribe_event,
+ .unsubscribe_event = v4l2_event_subdev_unsubscribe,
+};
+
+static const struct v4l2_subdev_video_ops csi_video_ops = {
+ .s_stream = csi_s_stream,
+};
+
+static const struct v4l2_subdev_pad_ops csi_pad_ops = {
+ .enum_mbus_code = csi_enum_mbus_code,
+ .enum_frame_size = csi_enum_frame_size,
+ .enum_frame_interval = csi_enum_frame_interval,
+ .get_fmt = csi_get_fmt,
+ .set_fmt = csi_set_fmt,
+ .get_selection = csi_get_selection,
+ .set_selection = csi_set_selection,
+ .get_frame_interval = csi_get_frame_interval,
+ .set_frame_interval = csi_set_frame_interval,
+ .link_validate = csi_link_validate,
+};
+
+static const struct v4l2_subdev_ops csi_subdev_ops = {
+ .core = &csi_core_ops,
+ .video = &csi_video_ops,
+ .pad = &csi_pad_ops,
+};
+
+static const struct v4l2_subdev_internal_ops csi_internal_ops = {
+ .init_state = imx_media_init_state,
+ .registered = csi_registered,
+ .unregistered = csi_unregistered,
+};
+
+static int imx_csi_notify_bound(struct v4l2_async_notifier *notifier,
+ struct v4l2_subdev *sd,
+ struct v4l2_async_connection *asd)
+{
+ struct csi_priv *priv = notifier_to_dev(notifier);
+ struct media_pad *sink = &priv->sd.entity.pads[CSI_SINK_PAD];
+
+ /*
+ * If the subdev is a video mux, it must be one of the CSI
+ * muxes. Mark it as such via its group id.
+ */
+ if (sd->entity.function == MEDIA_ENT_F_VID_MUX)
+ sd->grp_id = IMX_MEDIA_GRP_ID_CSI_MUX;
+
+ return v4l2_create_fwnode_links_to_pad(sd, sink, 0);
+}
+
+static const struct v4l2_async_notifier_operations csi_notify_ops = {
+ .bound = imx_csi_notify_bound,
+};
+
+static int imx_csi_async_register(struct csi_priv *priv)
+{
+ struct v4l2_async_connection *asd = NULL;
+ struct fwnode_handle *ep;
+ unsigned int port;
+ int ret;
+
+ v4l2_async_subdev_nf_init(&priv->notifier, &priv->sd);
+
+ /* get this CSI's port id */
+ ret = fwnode_property_read_u32(dev_fwnode(priv->dev), "reg", &port);
+ if (ret < 0)
+ return ret;
+
+ ep = fwnode_graph_get_endpoint_by_id(dev_fwnode(priv->dev->parent),
+ port, 0,
+ FWNODE_GRAPH_ENDPOINT_NEXT);
+ if (ep) {
+ asd = v4l2_async_nf_add_fwnode_remote(&priv->notifier, ep,
+ struct v4l2_async_connection);
+
+ fwnode_handle_put(ep);
+
+ if (IS_ERR(asd)) {
+ ret = PTR_ERR(asd);
+ /* OK if asd already exists */
+ if (ret != -EEXIST)
+ return ret;
+ }
+ }
+
+ priv->notifier.ops = &csi_notify_ops;
+
+ ret = v4l2_async_nf_register(&priv->notifier);
+ if (ret)
+ return ret;
+
+ return v4l2_async_register_subdev(&priv->sd);
+}
+
+static int imx_csi_probe(struct platform_device *pdev)
+{
+ struct ipu_client_platformdata *pdata;
+ struct pinctrl *pinctrl;
+ struct csi_priv *priv;
+ int i, ret;
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, &priv->sd);
+ priv->dev = &pdev->dev;
+
+ ret = dma_set_coherent_mask(priv->dev, DMA_BIT_MASK(32));
+ if (ret)
+ return ret;
+
+ /* get parent IPU */
+ priv->ipu = dev_get_drvdata(priv->dev->parent);
+
+ /* get our CSI id */
+ pdata = priv->dev->platform_data;
+ priv->csi_id = pdata->csi;
+ priv->smfc_id = (priv->csi_id == 0) ? 0 : 2;
+
+ priv->active_output_pad = CSI_SRC_PAD_IDMAC;
+
+ timer_setup(&priv->eof_timeout_timer, csi_idmac_eof_timeout, 0);
+ spin_lock_init(&priv->irqlock);
+
+ v4l2_subdev_init(&priv->sd, &csi_subdev_ops);
+ v4l2_set_subdevdata(&priv->sd, priv);
+ priv->sd.internal_ops = &csi_internal_ops;
+ priv->sd.entity.ops = &csi_entity_ops;
+ priv->sd.entity.function = MEDIA_ENT_F_PROC_VIDEO_PIXEL_FORMATTER;
+ priv->sd.dev = &pdev->dev;
+ priv->sd.fwnode = of_fwnode_handle(pdata->of_node);
+ priv->sd.owner = THIS_MODULE;
+ priv->sd.flags = V4L2_SUBDEV_FL_HAS_DEVNODE | V4L2_SUBDEV_FL_HAS_EVENTS;
+ priv->sd.grp_id = priv->csi_id ?
+ IMX_MEDIA_GRP_ID_IPU_CSI1 : IMX_MEDIA_GRP_ID_IPU_CSI0;
+ imx_media_grp_id_to_sd_name(priv->sd.name, sizeof(priv->sd.name),
+ priv->sd.grp_id, ipu_get_num(priv->ipu));
+
+ for (i = 0; i < CSI_NUM_PADS; i++)
+ priv->pad[i].flags = (i == CSI_SINK_PAD) ?
+ MEDIA_PAD_FL_SINK : MEDIA_PAD_FL_SOURCE;
+
+ ret = media_entity_pads_init(&priv->sd.entity, CSI_NUM_PADS,
+ priv->pad);
+ if (ret)
+ return ret;
+
+ mutex_init(&priv->lock);
+
+ v4l2_ctrl_handler_init(&priv->ctrl_hdlr, 0);
+ priv->sd.ctrl_handler = &priv->ctrl_hdlr;
+
+ /*
+ * The IPUv3 driver did not assign an of_node to this
+ * device. As a result, pinctrl does not automatically
+ * configure our pin groups, so we need to do that manually
+ * here, after setting this device's of_node.
+ */
+ priv->dev->of_node = pdata->of_node;
+ pinctrl = devm_pinctrl_get_select_default(priv->dev);
+ if (IS_ERR(pinctrl)) {
+ ret = PTR_ERR(pinctrl);
+ dev_dbg(priv->dev,
+ "devm_pinctrl_get_select_default() failed: %d\n", ret);
+ if (ret != -ENODEV)
+ goto free;
+ }
+
+ ret = imx_csi_async_register(priv);
+ if (ret)
+ goto cleanup;
+
+ return 0;
+
+cleanup:
+ v4l2_async_nf_unregister(&priv->notifier);
+ v4l2_async_nf_cleanup(&priv->notifier);
+free:
+ v4l2_ctrl_handler_free(&priv->ctrl_hdlr);
+ mutex_destroy(&priv->lock);
+ return ret;
+}
+
+static void imx_csi_remove(struct platform_device *pdev)
+{
+ struct v4l2_subdev *sd = platform_get_drvdata(pdev);
+ struct csi_priv *priv = sd_to_dev(sd);
+
+ v4l2_ctrl_handler_free(&priv->ctrl_hdlr);
+ mutex_destroy(&priv->lock);
+ v4l2_async_nf_unregister(&priv->notifier);
+ v4l2_async_nf_cleanup(&priv->notifier);
+ v4l2_async_unregister_subdev(sd);
+ media_entity_cleanup(&sd->entity);
+}
+
+static const struct platform_device_id imx_csi_ids[] = {
+ { .name = "imx-ipuv3-csi" },
+ { },
+};
+MODULE_DEVICE_TABLE(platform, imx_csi_ids);
+
+static struct platform_driver imx_csi_driver = {
+ .probe = imx_csi_probe,
+ .remove = imx_csi_remove,
+ .id_table = imx_csi_ids,
+ .driver = {
+ .name = "imx-ipuv3-csi",
+ },
+};
+module_platform_driver(imx_csi_driver);
+
+MODULE_DESCRIPTION("i.MX CSI subdev driver");
+MODULE_AUTHOR("Steve Longerbeam <steve_longerbeam@mentor.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/media/imx/imx-media-dev-common.c b/drivers/staging/media/imx/imx-media-dev-common.c
new file mode 100644
index 000000000000..0d0ee8627a2d
--- /dev/null
+++ b/drivers/staging/media/imx/imx-media-dev-common.c
@@ -0,0 +1,401 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * V4L2 Media Controller Driver for Freescale common i.MX5/6/7 SOC
+ *
+ * Copyright (c) 2019 Linaro Ltd
+ * Copyright (c) 2016 Mentor Graphics Inc.
+ */
+
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-mc.h>
+#include "imx-media.h"
+
+static inline struct imx_media_dev *notifier2dev(struct v4l2_async_notifier *n)
+{
+ return container_of(n, struct imx_media_dev, notifier);
+}
+
+/*
+ * Create the missing media links from the CSI-2 receiver.
+ * Called after all async subdevs have bound.
+ */
+static void imx_media_create_csi2_links(struct imx_media_dev *imxmd)
+{
+ struct v4l2_subdev *sd, *csi2 = NULL;
+
+ list_for_each_entry(sd, &imxmd->v4l2_dev.subdevs, list) {
+ if (sd->grp_id == IMX_MEDIA_GRP_ID_CSI2) {
+ csi2 = sd;
+ break;
+ }
+ }
+ if (!csi2)
+ return;
+
+ list_for_each_entry(sd, &imxmd->v4l2_dev.subdevs, list) {
+ /* skip if not a CSI or a CSI mux */
+ if (!(sd->grp_id & IMX_MEDIA_GRP_ID_IPU_CSI) &&
+ !(sd->grp_id & IMX_MEDIA_GRP_ID_CSI_MUX))
+ continue;
+
+ v4l2_create_fwnode_links(csi2, sd);
+ }
+}
+
+/*
+ * adds given video device to given imx-media source pad vdev list.
+ * Continues upstream from the pad entity's sink pads.
+ */
+static int imx_media_add_vdev_to_pad(struct imx_media_dev *imxmd,
+ struct imx_media_video_dev *vdev,
+ struct media_pad *srcpad)
+{
+ struct media_entity *entity = srcpad->entity;
+ struct imx_media_pad_vdev *pad_vdev;
+ struct list_head *pad_vdev_list;
+ struct media_link *link;
+ struct v4l2_subdev *sd;
+ int i, ret;
+
+ /* skip this entity if not a v4l2_subdev */
+ if (!is_media_entity_v4l2_subdev(entity))
+ return 0;
+
+ sd = media_entity_to_v4l2_subdev(entity);
+
+ pad_vdev_list = to_pad_vdev_list(sd, srcpad->index);
+ if (!pad_vdev_list) {
+ v4l2_warn(&imxmd->v4l2_dev, "%s:%u has no vdev list!\n",
+ entity->name, srcpad->index);
+ /*
+ * shouldn't happen, but no reason to fail driver load,
+ * just skip this entity.
+ */
+ return 0;
+ }
+
+ /* just return if we've been here before */
+ list_for_each_entry(pad_vdev, pad_vdev_list, list) {
+ if (pad_vdev->vdev == vdev)
+ return 0;
+ }
+
+ dev_dbg(imxmd->md.dev, "adding %s to pad %s:%u\n",
+ vdev->vfd->entity.name, entity->name, srcpad->index);
+
+ pad_vdev = devm_kzalloc(imxmd->md.dev, sizeof(*pad_vdev), GFP_KERNEL);
+ if (!pad_vdev)
+ return -ENOMEM;
+
+ /* attach this vdev to this pad */
+ pad_vdev->vdev = vdev;
+ list_add_tail(&pad_vdev->list, pad_vdev_list);
+
+ /* move upstream from this entity's sink pads */
+ for (i = 0; i < entity->num_pads; i++) {
+ struct media_pad *pad = &entity->pads[i];
+
+ if (!(pad->flags & MEDIA_PAD_FL_SINK))
+ continue;
+
+ list_for_each_entry(link, &entity->links, list) {
+ if (link->sink != pad)
+ continue;
+ ret = imx_media_add_vdev_to_pad(imxmd, vdev,
+ link->source);
+ if (ret)
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * For every subdevice, allocate an array of list_head's, one list_head
+ * for each pad, to hold the list of video devices reachable from that
+ * pad.
+ */
+static int imx_media_alloc_pad_vdev_lists(struct imx_media_dev *imxmd)
+{
+ struct list_head *vdev_lists;
+ struct media_entity *entity;
+ struct v4l2_subdev *sd;
+ int i;
+
+ list_for_each_entry(sd, &imxmd->v4l2_dev.subdevs, list) {
+ entity = &sd->entity;
+ vdev_lists = devm_kcalloc(imxmd->md.dev,
+ entity->num_pads, sizeof(*vdev_lists),
+ GFP_KERNEL);
+ if (!vdev_lists)
+ return -ENOMEM;
+
+ /* attach to the subdev's host private pointer */
+ sd->host_priv = vdev_lists;
+
+ for (i = 0; i < entity->num_pads; i++)
+ INIT_LIST_HEAD(to_pad_vdev_list(sd, i));
+ }
+
+ return 0;
+}
+
+/* form the vdev lists in all imx-media source pads */
+static int imx_media_create_pad_vdev_lists(struct imx_media_dev *imxmd)
+{
+ struct imx_media_video_dev *vdev;
+ struct media_link *link;
+ int ret;
+
+ ret = imx_media_alloc_pad_vdev_lists(imxmd);
+ if (ret)
+ return ret;
+
+ list_for_each_entry(vdev, &imxmd->vdev_list, list) {
+ link = list_first_entry(&vdev->vfd->entity.links,
+ struct media_link, list);
+ ret = imx_media_add_vdev_to_pad(imxmd, vdev, link->source);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+/* async subdev complete notifier */
+int imx_media_probe_complete(struct v4l2_async_notifier *notifier)
+{
+ struct imx_media_dev *imxmd = notifier2dev(notifier);
+ int ret;
+
+ mutex_lock(&imxmd->mutex);
+
+ imx_media_create_csi2_links(imxmd);
+
+ ret = imx_media_create_pad_vdev_lists(imxmd);
+ if (ret)
+ goto unlock;
+
+ ret = v4l2_device_register_subdev_nodes(&imxmd->v4l2_dev);
+unlock:
+ mutex_unlock(&imxmd->mutex);
+ if (ret)
+ return ret;
+
+ return media_device_register(&imxmd->md);
+}
+EXPORT_SYMBOL_GPL(imx_media_probe_complete);
+
+/*
+ * adds controls to a video device from an entity subdevice.
+ * Continues upstream from the entity's sink pads.
+ */
+static int imx_media_inherit_controls(struct imx_media_dev *imxmd,
+ struct video_device *vfd,
+ struct media_entity *entity)
+{
+ int i, ret = 0;
+
+ if (is_media_entity_v4l2_subdev(entity)) {
+ struct v4l2_subdev *sd = media_entity_to_v4l2_subdev(entity);
+
+ dev_dbg(imxmd->md.dev,
+ "adding controls to %s from %s\n",
+ vfd->entity.name, sd->entity.name);
+
+ ret = v4l2_ctrl_add_handler(vfd->ctrl_handler,
+ sd->ctrl_handler,
+ NULL, true);
+ if (ret)
+ return ret;
+ }
+
+ /* move upstream */
+ for (i = 0; i < entity->num_pads; i++) {
+ struct media_pad *pad, *spad = &entity->pads[i];
+
+ if (!(spad->flags & MEDIA_PAD_FL_SINK))
+ continue;
+
+ pad = media_pad_remote_pad_first(spad);
+ if (!pad || !is_media_entity_v4l2_subdev(pad->entity))
+ continue;
+
+ ret = imx_media_inherit_controls(imxmd, vfd, pad->entity);
+ if (ret)
+ break;
+ }
+
+ return ret;
+}
+
+static int imx_media_link_notify(struct media_link *link, u32 flags,
+ unsigned int notification)
+{
+ struct imx_media_dev *imxmd = container_of(link->graph_obj.mdev,
+ struct imx_media_dev, md);
+ struct media_entity *source = link->source->entity;
+ struct imx_media_pad_vdev *pad_vdev;
+ struct list_head *pad_vdev_list;
+ struct video_device *vfd;
+ struct v4l2_subdev *sd;
+ int pad_idx, ret;
+
+ ret = v4l2_pipeline_link_notify(link, flags, notification);
+ if (ret)
+ return ret;
+
+ /* don't bother if source is not a subdev */
+ if (!is_media_entity_v4l2_subdev(source))
+ return 0;
+
+ sd = media_entity_to_v4l2_subdev(source);
+ pad_idx = link->source->index;
+
+ pad_vdev_list = to_pad_vdev_list(sd, pad_idx);
+ if (!pad_vdev_list) {
+ /* nothing to do if source sd has no pad vdev list */
+ return 0;
+ }
+
+ /*
+ * Before disabling a link, reset controls for all video
+ * devices reachable from this link.
+ *
+ * After enabling a link, refresh controls for all video
+ * devices reachable from this link.
+ */
+ if (notification == MEDIA_DEV_NOTIFY_PRE_LINK_CH &&
+ !(flags & MEDIA_LNK_FL_ENABLED)) {
+ list_for_each_entry(pad_vdev, pad_vdev_list, list) {
+ vfd = pad_vdev->vdev->vfd;
+ if (!vfd->ctrl_handler)
+ continue;
+ dev_dbg(imxmd->md.dev,
+ "reset controls for %s\n",
+ vfd->entity.name);
+ v4l2_ctrl_handler_free(vfd->ctrl_handler);
+ v4l2_ctrl_handler_init(vfd->ctrl_handler, 0);
+ }
+ } else if (notification == MEDIA_DEV_NOTIFY_POST_LINK_CH &&
+ (link->flags & MEDIA_LNK_FL_ENABLED)) {
+ list_for_each_entry(pad_vdev, pad_vdev_list, list) {
+ vfd = pad_vdev->vdev->vfd;
+ if (!vfd->ctrl_handler)
+ continue;
+ dev_dbg(imxmd->md.dev,
+ "refresh controls for %s\n",
+ vfd->entity.name);
+ ret = imx_media_inherit_controls(imxmd, vfd,
+ &vfd->entity);
+ if (ret)
+ break;
+ }
+ }
+
+ return ret;
+}
+
+static void imx_media_notify(struct v4l2_subdev *sd, unsigned int notification,
+ void *arg)
+{
+ struct media_entity *entity = &sd->entity;
+ int i;
+
+ if (notification != V4L2_DEVICE_NOTIFY_EVENT)
+ return;
+
+ for (i = 0; i < entity->num_pads; i++) {
+ struct media_pad *pad = &entity->pads[i];
+ struct imx_media_pad_vdev *pad_vdev;
+ struct list_head *pad_vdev_list;
+
+ pad_vdev_list = to_pad_vdev_list(sd, pad->index);
+ if (!pad_vdev_list)
+ continue;
+ list_for_each_entry(pad_vdev, pad_vdev_list, list)
+ v4l2_event_queue(pad_vdev->vdev->vfd, arg);
+ }
+}
+
+static const struct v4l2_async_notifier_operations imx_media_notifier_ops = {
+ .complete = imx_media_probe_complete,
+};
+
+static const struct media_device_ops imx_media_md_ops = {
+ .link_notify = imx_media_link_notify,
+};
+
+struct imx_media_dev *imx_media_dev_init(struct device *dev,
+ const struct media_device_ops *ops)
+{
+ struct imx_media_dev *imxmd;
+ int ret;
+
+ imxmd = devm_kzalloc(dev, sizeof(*imxmd), GFP_KERNEL);
+ if (!imxmd)
+ return ERR_PTR(-ENOMEM);
+
+ dev_set_drvdata(dev, imxmd);
+
+ strscpy(imxmd->md.model, "imx-media", sizeof(imxmd->md.model));
+ imxmd->md.ops = ops ? ops : &imx_media_md_ops;
+ imxmd->md.dev = dev;
+
+ mutex_init(&imxmd->mutex);
+
+ imxmd->v4l2_dev.mdev = &imxmd->md;
+ imxmd->v4l2_dev.notify = imx_media_notify;
+ strscpy(imxmd->v4l2_dev.name, "imx-media",
+ sizeof(imxmd->v4l2_dev.name));
+ snprintf(imxmd->md.bus_info, sizeof(imxmd->md.bus_info),
+ "platform:%s", dev_name(imxmd->md.dev));
+
+ media_device_init(&imxmd->md);
+
+ ret = v4l2_device_register(dev, &imxmd->v4l2_dev);
+ if (ret < 0) {
+ v4l2_err(&imxmd->v4l2_dev,
+ "Failed to register v4l2_device: %d\n", ret);
+ goto cleanup;
+ }
+
+ INIT_LIST_HEAD(&imxmd->vdev_list);
+
+ v4l2_async_nf_init(&imxmd->notifier, &imxmd->v4l2_dev);
+
+ return imxmd;
+
+cleanup:
+ media_device_cleanup(&imxmd->md);
+
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(imx_media_dev_init);
+
+int imx_media_dev_notifier_register(struct imx_media_dev *imxmd,
+ const struct v4l2_async_notifier_operations *ops)
+{
+ int ret;
+
+ /* no subdevs? just bail */
+ if (list_empty(&imxmd->notifier.waiting_list)) {
+ v4l2_err(&imxmd->v4l2_dev, "no subdevs\n");
+ return -ENODEV;
+ }
+
+ /* prepare the async subdev notifier and register it */
+ imxmd->notifier.ops = ops ? ops : &imx_media_notifier_ops;
+ ret = v4l2_async_nf_register(&imxmd->notifier);
+ if (ret) {
+ v4l2_err(&imxmd->v4l2_dev,
+ "v4l2_async_nf_register failed with %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(imx_media_dev_notifier_register);
diff --git a/drivers/staging/media/imx/imx-media-dev.c b/drivers/staging/media/imx/imx-media-dev.c
new file mode 100644
index 000000000000..a08389b99d14
--- /dev/null
+++ b/drivers/staging/media/imx/imx-media-dev.c
@@ -0,0 +1,143 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * V4L2 Media Controller Driver for Freescale i.MX5/6 SOC
+ *
+ * Copyright (c) 2016-2019 Mentor Graphics Inc.
+ */
+#include <linux/fs.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <media/v4l2-async.h>
+#include <media/v4l2-event.h>
+#include <media/imx.h>
+#include "imx-media.h"
+
+static inline struct imx_media_dev *notifier2dev(struct v4l2_async_notifier *n)
+{
+ return container_of(n, struct imx_media_dev, notifier);
+}
+
+/* async subdev bound notifier */
+static int imx_media_subdev_bound(struct v4l2_async_notifier *notifier,
+ struct v4l2_subdev *sd,
+ struct v4l2_async_connection *asd)
+{
+ struct imx_media_dev *imxmd = notifier2dev(notifier);
+ int ret;
+
+ if (sd->grp_id & IMX_MEDIA_GRP_ID_IPU_CSI) {
+ /* register the IPU internal subdevs */
+ ret = imx_media_register_ipu_internal_subdevs(imxmd, sd);
+ if (ret)
+ return ret;
+ }
+
+ dev_dbg(imxmd->md.dev, "subdev %s bound\n", sd->name);
+
+ return 0;
+}
+
+/* async subdev complete notifier */
+static int imx6_media_probe_complete(struct v4l2_async_notifier *notifier)
+{
+ struct imx_media_dev *imxmd = notifier2dev(notifier);
+ int ret;
+
+ /* call the imx5/6/7 common probe completion handler */
+ ret = imx_media_probe_complete(notifier);
+ if (ret)
+ return ret;
+
+ mutex_lock(&imxmd->mutex);
+
+ imxmd->m2m_vdev = imx_media_csc_scaler_device_init(imxmd);
+ if (IS_ERR(imxmd->m2m_vdev)) {
+ ret = PTR_ERR(imxmd->m2m_vdev);
+ imxmd->m2m_vdev = NULL;
+ goto unlock;
+ }
+
+ ret = imx_media_csc_scaler_device_register(imxmd->m2m_vdev);
+unlock:
+ mutex_unlock(&imxmd->mutex);
+ return ret;
+}
+
+/* async subdev complete notifier */
+static const struct v4l2_async_notifier_operations imx_media_notifier_ops = {
+ .bound = imx_media_subdev_bound,
+ .complete = imx6_media_probe_complete,
+};
+
+static int imx_media_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *node = dev->of_node;
+ struct imx_media_dev *imxmd;
+ int ret;
+
+ imxmd = imx_media_dev_init(dev, NULL);
+ if (IS_ERR(imxmd))
+ return PTR_ERR(imxmd);
+
+ ret = imx_media_add_of_subdevs(imxmd, node);
+ if (ret) {
+ v4l2_err(&imxmd->v4l2_dev,
+ "add_of_subdevs failed with %d\n", ret);
+ goto cleanup;
+ }
+
+ ret = imx_media_dev_notifier_register(imxmd, &imx_media_notifier_ops);
+ if (ret)
+ goto cleanup;
+
+ return 0;
+
+cleanup:
+ v4l2_async_nf_cleanup(&imxmd->notifier);
+ v4l2_device_unregister(&imxmd->v4l2_dev);
+ media_device_cleanup(&imxmd->md);
+
+ return ret;
+}
+
+static void imx_media_remove(struct platform_device *pdev)
+{
+ struct imx_media_dev *imxmd =
+ (struct imx_media_dev *)platform_get_drvdata(pdev);
+
+ v4l2_info(&imxmd->v4l2_dev, "Removing imx-media\n");
+
+ if (imxmd->m2m_vdev) {
+ imx_media_csc_scaler_device_unregister(imxmd->m2m_vdev);
+ imxmd->m2m_vdev = NULL;
+ }
+
+ v4l2_async_nf_unregister(&imxmd->notifier);
+ imx_media_unregister_ipu_internal_subdevs(imxmd);
+ v4l2_async_nf_cleanup(&imxmd->notifier);
+ media_device_unregister(&imxmd->md);
+ v4l2_device_unregister(&imxmd->v4l2_dev);
+ media_device_cleanup(&imxmd->md);
+}
+
+static const struct of_device_id imx_media_dt_ids[] = {
+ { .compatible = "fsl,imx-capture-subsystem" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, imx_media_dt_ids);
+
+static struct platform_driver imx_media_pdrv = {
+ .probe = imx_media_probe,
+ .remove = imx_media_remove,
+ .driver = {
+ .name = "imx-media",
+ .of_match_table = imx_media_dt_ids,
+ },
+};
+
+module_platform_driver(imx_media_pdrv);
+
+MODULE_DESCRIPTION("i.MX5/6 v4l2 media controller driver");
+MODULE_AUTHOR("Steve Longerbeam <steve_longerbeam@mentor.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/media/imx/imx-media-fim.c b/drivers/staging/media/imx/imx-media-fim.c
new file mode 100644
index 000000000000..ccbc0371fba2
--- /dev/null
+++ b/drivers/staging/media/imx/imx-media-fim.c
@@ -0,0 +1,431 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Frame Interval Monitor.
+ *
+ * Copyright (c) 2016 Mentor Graphics Inc.
+ */
+#include <linux/delay.h>
+#include <linux/irq.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-subdev.h>
+#include <media/imx.h>
+#include "imx-media.h"
+
+enum {
+ FIM_CL_ENABLE = 0,
+ FIM_CL_NUM,
+ FIM_CL_TOLERANCE_MIN,
+ FIM_CL_TOLERANCE_MAX,
+ FIM_CL_NUM_SKIP,
+ FIM_NUM_CONTROLS,
+};
+
+enum {
+ FIM_CL_ICAP_EDGE = 0,
+ FIM_CL_ICAP_CHANNEL,
+ FIM_NUM_ICAP_CONTROLS,
+};
+
+#define FIM_CL_ENABLE_DEF 0 /* FIM disabled by default */
+#define FIM_CL_NUM_DEF 8 /* average 8 frames */
+#define FIM_CL_NUM_SKIP_DEF 2 /* skip 2 frames after restart */
+#define FIM_CL_TOLERANCE_MIN_DEF 50 /* usec */
+#define FIM_CL_TOLERANCE_MAX_DEF 0 /* no max tolerance (unbounded) */
+
+struct imx_media_fim {
+ /* the owning subdev of this fim instance */
+ struct v4l2_subdev *sd;
+
+ /* FIM's control handler */
+ struct v4l2_ctrl_handler ctrl_handler;
+
+ /* control clusters */
+ struct v4l2_ctrl *ctrl[FIM_NUM_CONTROLS];
+ struct v4l2_ctrl *icap_ctrl[FIM_NUM_ICAP_CONTROLS];
+
+ spinlock_t lock; /* protect control values */
+
+ /* current control values */
+ bool enabled;
+ int num_avg;
+ int num_skip;
+ unsigned long tolerance_min; /* usec */
+ unsigned long tolerance_max; /* usec */
+ /* input capture method of measuring FI */
+ int icap_channel;
+ int icap_flags;
+
+ int counter;
+ ktime_t last_ts;
+ unsigned long sum; /* usec */
+ unsigned long nominal; /* usec */
+
+ struct completion icap_first_event;
+ bool stream_on;
+};
+
+static bool icap_enabled(struct imx_media_fim *fim)
+{
+ return fim->icap_flags != IRQ_TYPE_NONE;
+}
+
+static void update_fim_nominal(struct imx_media_fim *fim,
+ const struct v4l2_fract *fi)
+{
+ if (fi->denominator == 0) {
+ dev_dbg(fim->sd->dev, "no frame interval, FIM disabled\n");
+ fim->enabled = false;
+ return;
+ }
+
+ fim->nominal = DIV_ROUND_CLOSEST_ULL(1000000ULL * (u64)fi->numerator,
+ fi->denominator);
+
+ dev_dbg(fim->sd->dev, "FI=%lu usec\n", fim->nominal);
+}
+
+static void reset_fim(struct imx_media_fim *fim, bool curval)
+{
+ struct v4l2_ctrl *icap_chan = fim->icap_ctrl[FIM_CL_ICAP_CHANNEL];
+ struct v4l2_ctrl *icap_edge = fim->icap_ctrl[FIM_CL_ICAP_EDGE];
+ struct v4l2_ctrl *en = fim->ctrl[FIM_CL_ENABLE];
+ struct v4l2_ctrl *num = fim->ctrl[FIM_CL_NUM];
+ struct v4l2_ctrl *skip = fim->ctrl[FIM_CL_NUM_SKIP];
+ struct v4l2_ctrl *tol_min = fim->ctrl[FIM_CL_TOLERANCE_MIN];
+ struct v4l2_ctrl *tol_max = fim->ctrl[FIM_CL_TOLERANCE_MAX];
+
+ if (curval) {
+ fim->enabled = en->cur.val;
+ fim->icap_flags = icap_edge->cur.val;
+ fim->icap_channel = icap_chan->cur.val;
+ fim->num_avg = num->cur.val;
+ fim->num_skip = skip->cur.val;
+ fim->tolerance_min = tol_min->cur.val;
+ fim->tolerance_max = tol_max->cur.val;
+ } else {
+ fim->enabled = en->val;
+ fim->icap_flags = icap_edge->val;
+ fim->icap_channel = icap_chan->val;
+ fim->num_avg = num->val;
+ fim->num_skip = skip->val;
+ fim->tolerance_min = tol_min->val;
+ fim->tolerance_max = tol_max->val;
+ }
+
+ /* disable tolerance range if max <= min */
+ if (fim->tolerance_max <= fim->tolerance_min)
+ fim->tolerance_max = 0;
+
+ /* num_skip must be >= 1 if input capture not used */
+ if (!icap_enabled(fim))
+ fim->num_skip = max_t(int, fim->num_skip, 1);
+
+ fim->counter = -fim->num_skip;
+ fim->sum = 0;
+}
+
+static void send_fim_event(struct imx_media_fim *fim, unsigned long error)
+{
+ static const struct v4l2_event ev = {
+ .type = V4L2_EVENT_IMX_FRAME_INTERVAL_ERROR,
+ };
+
+ v4l2_subdev_notify_event(fim->sd, &ev);
+}
+
+/*
+ * Monitor an averaged frame interval. If the average deviates too much
+ * from the nominal frame rate, send the frame interval error event. The
+ * frame intervals are averaged in order to quiet noise from
+ * (presumably random) interrupt latency.
+ */
+static void frame_interval_monitor(struct imx_media_fim *fim,
+ ktime_t timestamp)
+{
+ long long interval, error;
+ unsigned long error_avg;
+ bool send_event = false;
+
+ if (!fim->enabled || ++fim->counter <= 0)
+ goto out_update_ts;
+
+ /* max error is less than l00µs, so use 32-bit division or fail */
+ interval = ktime_to_ns(ktime_sub(timestamp, fim->last_ts));
+ error = abs(interval - NSEC_PER_USEC * (u64)fim->nominal);
+ if (error > U32_MAX)
+ error = U32_MAX;
+ else
+ error = abs((u32)error / NSEC_PER_USEC);
+
+ if (fim->tolerance_max && error >= fim->tolerance_max) {
+ dev_dbg(fim->sd->dev,
+ "FIM: %llu ignored, out of tolerance bounds\n",
+ error);
+ fim->counter--;
+ goto out_update_ts;
+ }
+
+ fim->sum += error;
+
+ if (fim->counter == fim->num_avg) {
+ error_avg = DIV_ROUND_CLOSEST(fim->sum, fim->num_avg);
+
+ if (error_avg > fim->tolerance_min)
+ send_event = true;
+
+ dev_dbg(fim->sd->dev, "FIM: error: %lu usec%s\n",
+ error_avg, send_event ? " (!!!)" : "");
+
+ fim->counter = 0;
+ fim->sum = 0;
+ }
+
+out_update_ts:
+ fim->last_ts = timestamp;
+ if (send_event)
+ send_fim_event(fim, error_avg);
+}
+
+/*
+ * In case we are monitoring the first frame interval after streamon
+ * (when fim->num_skip = 0), we need a valid fim->last_ts before we
+ * can begin. This only applies to the input capture method. It is not
+ * possible to accurately measure the first FI after streamon using the
+ * EOF method, so fim->num_skip minimum is set to 1 in that case, so this
+ * function is a noop when the EOF method is used.
+ */
+static void fim_acquire_first_ts(struct imx_media_fim *fim)
+{
+ unsigned long ret;
+
+ if (!fim->enabled || fim->num_skip > 0)
+ return;
+
+ ret = wait_for_completion_timeout(
+ &fim->icap_first_event,
+ msecs_to_jiffies(IMX_MEDIA_EOF_TIMEOUT));
+ if (ret == 0)
+ v4l2_warn(fim->sd, "wait first icap event timeout\n");
+}
+
+/* FIM Controls */
+static int fim_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct imx_media_fim *fim = container_of(ctrl->handler,
+ struct imx_media_fim,
+ ctrl_handler);
+ unsigned long flags;
+ int ret = 0;
+
+ spin_lock_irqsave(&fim->lock, flags);
+
+ switch (ctrl->id) {
+ case V4L2_CID_IMX_FIM_ENABLE:
+ break;
+ case V4L2_CID_IMX_FIM_ICAP_EDGE:
+ if (fim->stream_on)
+ ret = -EBUSY;
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ if (!ret)
+ reset_fim(fim, false);
+
+ spin_unlock_irqrestore(&fim->lock, flags);
+ return ret;
+}
+
+static const struct v4l2_ctrl_ops fim_ctrl_ops = {
+ .s_ctrl = fim_s_ctrl,
+};
+
+static const struct v4l2_ctrl_config fim_ctrl[] = {
+ [FIM_CL_ENABLE] = {
+ .ops = &fim_ctrl_ops,
+ .id = V4L2_CID_IMX_FIM_ENABLE,
+ .name = "FIM Enable",
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .def = FIM_CL_ENABLE_DEF,
+ .min = 0,
+ .max = 1,
+ .step = 1,
+ },
+ [FIM_CL_NUM] = {
+ .ops = &fim_ctrl_ops,
+ .id = V4L2_CID_IMX_FIM_NUM,
+ .name = "FIM Num Average",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .def = FIM_CL_NUM_DEF,
+ .min = 1, /* no averaging */
+ .max = 64, /* average 64 frames */
+ .step = 1,
+ },
+ [FIM_CL_TOLERANCE_MIN] = {
+ .ops = &fim_ctrl_ops,
+ .id = V4L2_CID_IMX_FIM_TOLERANCE_MIN,
+ .name = "FIM Tolerance Min",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .def = FIM_CL_TOLERANCE_MIN_DEF,
+ .min = 2,
+ .max = 200,
+ .step = 1,
+ },
+ [FIM_CL_TOLERANCE_MAX] = {
+ .ops = &fim_ctrl_ops,
+ .id = V4L2_CID_IMX_FIM_TOLERANCE_MAX,
+ .name = "FIM Tolerance Max",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .def = FIM_CL_TOLERANCE_MAX_DEF,
+ .min = 0,
+ .max = 500,
+ .step = 1,
+ },
+ [FIM_CL_NUM_SKIP] = {
+ .ops = &fim_ctrl_ops,
+ .id = V4L2_CID_IMX_FIM_NUM_SKIP,
+ .name = "FIM Num Skip",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .def = FIM_CL_NUM_SKIP_DEF,
+ .min = 0, /* skip no frames */
+ .max = 256, /* skip 256 frames */
+ .step = 1,
+ },
+};
+
+static const struct v4l2_ctrl_config fim_icap_ctrl[] = {
+ [FIM_CL_ICAP_EDGE] = {
+ .ops = &fim_ctrl_ops,
+ .id = V4L2_CID_IMX_FIM_ICAP_EDGE,
+ .name = "FIM Input Capture Edge",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .def = IRQ_TYPE_NONE, /* input capture disabled by default */
+ .min = IRQ_TYPE_NONE,
+ .max = IRQ_TYPE_EDGE_BOTH,
+ .step = 1,
+ },
+ [FIM_CL_ICAP_CHANNEL] = {
+ .ops = &fim_ctrl_ops,
+ .id = V4L2_CID_IMX_FIM_ICAP_CHANNEL,
+ .name = "FIM Input Capture Channel",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .def = 0,
+ .min = 0,
+ .max = 1,
+ .step = 1,
+ },
+};
+
+static int init_fim_controls(struct imx_media_fim *fim)
+{
+ struct v4l2_ctrl_handler *hdlr = &fim->ctrl_handler;
+ int i, ret;
+
+ v4l2_ctrl_handler_init(hdlr, FIM_NUM_CONTROLS + FIM_NUM_ICAP_CONTROLS);
+
+ for (i = 0; i < FIM_NUM_CONTROLS; i++)
+ fim->ctrl[i] = v4l2_ctrl_new_custom(hdlr,
+ &fim_ctrl[i],
+ NULL);
+ for (i = 0; i < FIM_NUM_ICAP_CONTROLS; i++)
+ fim->icap_ctrl[i] = v4l2_ctrl_new_custom(hdlr,
+ &fim_icap_ctrl[i],
+ NULL);
+ if (hdlr->error) {
+ ret = hdlr->error;
+ goto err_free;
+ }
+
+ v4l2_ctrl_cluster(FIM_NUM_CONTROLS, fim->ctrl);
+ v4l2_ctrl_cluster(FIM_NUM_ICAP_CONTROLS, fim->icap_ctrl);
+
+ return 0;
+err_free:
+ v4l2_ctrl_handler_free(hdlr);
+ return ret;
+}
+
+/*
+ * Monitor frame intervals via EOF interrupt. This method is
+ * subject to uncertainty errors introduced by interrupt latency.
+ *
+ * This is a noop if the Input Capture method is being used, since
+ * the frame_interval_monitor() is called by the input capture event
+ * callback handler in that case.
+ */
+void imx_media_fim_eof_monitor(struct imx_media_fim *fim, ktime_t timestamp)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&fim->lock, flags);
+
+ if (!icap_enabled(fim))
+ frame_interval_monitor(fim, timestamp);
+
+ spin_unlock_irqrestore(&fim->lock, flags);
+}
+
+/* Called by the subdev in its s_stream callback */
+void imx_media_fim_set_stream(struct imx_media_fim *fim,
+ const struct v4l2_fract *fi,
+ bool on)
+{
+ unsigned long flags;
+
+ v4l2_ctrl_lock(fim->ctrl[FIM_CL_ENABLE]);
+
+ if (fim->stream_on == on)
+ goto out;
+
+ if (on) {
+ spin_lock_irqsave(&fim->lock, flags);
+ reset_fim(fim, true);
+ update_fim_nominal(fim, fi);
+ spin_unlock_irqrestore(&fim->lock, flags);
+
+ if (icap_enabled(fim))
+ fim_acquire_first_ts(fim);
+ }
+
+ fim->stream_on = on;
+out:
+ v4l2_ctrl_unlock(fim->ctrl[FIM_CL_ENABLE]);
+}
+
+int imx_media_fim_add_controls(struct imx_media_fim *fim)
+{
+ /* add the FIM controls to the calling subdev ctrl handler */
+ return v4l2_ctrl_add_handler(fim->sd->ctrl_handler,
+ &fim->ctrl_handler, NULL, true);
+}
+
+/* Called by the subdev in its subdev registered callback */
+struct imx_media_fim *imx_media_fim_init(struct v4l2_subdev *sd)
+{
+ struct imx_media_fim *fim;
+ int ret;
+
+ fim = devm_kzalloc(sd->dev, sizeof(*fim), GFP_KERNEL);
+ if (!fim)
+ return ERR_PTR(-ENOMEM);
+
+ fim->sd = sd;
+
+ spin_lock_init(&fim->lock);
+
+ ret = init_fim_controls(fim);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return fim;
+}
+
+void imx_media_fim_free(struct imx_media_fim *fim)
+{
+ v4l2_ctrl_handler_free(&fim->ctrl_handler);
+}
diff --git a/drivers/staging/media/imx/imx-media-internal-sd.c b/drivers/staging/media/imx/imx-media-internal-sd.c
new file mode 100644
index 000000000000..da4109b2fd13
--- /dev/null
+++ b/drivers/staging/media/imx/imx-media-internal-sd.c
@@ -0,0 +1,306 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Media driver for Freescale i.MX5/6 SOC
+ *
+ * Adds the IPU internal subdevices and the media links between them.
+ *
+ * Copyright (c) 2016 Mentor Graphics Inc.
+ */
+#include <linux/platform_device.h>
+#include "imx-media.h"
+
+/* max pads per internal-sd */
+#define MAX_INTERNAL_PADS 8
+/* max links per internal-sd pad */
+#define MAX_INTERNAL_LINKS 8
+
+struct internal_subdev;
+
+struct internal_link {
+ int remote;
+ int local_pad;
+ int remote_pad;
+};
+
+struct internal_pad {
+ int num_links;
+ struct internal_link link[MAX_INTERNAL_LINKS];
+};
+
+struct internal_subdev {
+ u32 grp_id;
+ struct internal_pad pad[MAX_INTERNAL_PADS];
+
+ struct v4l2_subdev * (*sync_register)(struct v4l2_device *v4l2_dev,
+ struct device *ipu_dev,
+ struct ipu_soc *ipu,
+ u32 grp_id);
+ int (*sync_unregister)(struct v4l2_subdev *sd);
+};
+
+static const struct internal_subdev int_subdev[NUM_IPU_SUBDEVS] = {
+ [IPU_CSI0] = {
+ .grp_id = IMX_MEDIA_GRP_ID_IPU_CSI0,
+ .pad[CSI_SRC_PAD_DIRECT] = {
+ .num_links = 2,
+ .link = {
+ {
+ .local_pad = CSI_SRC_PAD_DIRECT,
+ .remote = IPU_IC_PRP,
+ .remote_pad = PRP_SINK_PAD,
+ }, {
+ .local_pad = CSI_SRC_PAD_DIRECT,
+ .remote = IPU_VDIC,
+ .remote_pad = VDIC_SINK_PAD_DIRECT,
+ },
+ },
+ },
+ },
+
+ [IPU_CSI1] = {
+ .grp_id = IMX_MEDIA_GRP_ID_IPU_CSI1,
+ .pad[CSI_SRC_PAD_DIRECT] = {
+ .num_links = 2,
+ .link = {
+ {
+ .local_pad = CSI_SRC_PAD_DIRECT,
+ .remote = IPU_IC_PRP,
+ .remote_pad = PRP_SINK_PAD,
+ }, {
+ .local_pad = CSI_SRC_PAD_DIRECT,
+ .remote = IPU_VDIC,
+ .remote_pad = VDIC_SINK_PAD_DIRECT,
+ },
+ },
+ },
+ },
+
+ [IPU_VDIC] = {
+ .grp_id = IMX_MEDIA_GRP_ID_IPU_VDIC,
+ .sync_register = imx_media_vdic_register,
+ .sync_unregister = imx_media_vdic_unregister,
+ .pad[VDIC_SRC_PAD_DIRECT] = {
+ .num_links = 1,
+ .link = {
+ {
+ .local_pad = VDIC_SRC_PAD_DIRECT,
+ .remote = IPU_IC_PRP,
+ .remote_pad = PRP_SINK_PAD,
+ },
+ },
+ },
+ },
+
+ [IPU_IC_PRP] = {
+ .grp_id = IMX_MEDIA_GRP_ID_IPU_IC_PRP,
+ .sync_register = imx_media_ic_register,
+ .sync_unregister = imx_media_ic_unregister,
+ .pad[PRP_SRC_PAD_PRPENC] = {
+ .num_links = 1,
+ .link = {
+ {
+ .local_pad = PRP_SRC_PAD_PRPENC,
+ .remote = IPU_IC_PRPENC,
+ .remote_pad = PRPENCVF_SINK_PAD,
+ },
+ },
+ },
+ .pad[PRP_SRC_PAD_PRPVF] = {
+ .num_links = 1,
+ .link = {
+ {
+ .local_pad = PRP_SRC_PAD_PRPVF,
+ .remote = IPU_IC_PRPVF,
+ .remote_pad = PRPENCVF_SINK_PAD,
+ },
+ },
+ },
+ },
+
+ [IPU_IC_PRPENC] = {
+ .grp_id = IMX_MEDIA_GRP_ID_IPU_IC_PRPENC,
+ .sync_register = imx_media_ic_register,
+ .sync_unregister = imx_media_ic_unregister,
+ },
+
+ [IPU_IC_PRPVF] = {
+ .grp_id = IMX_MEDIA_GRP_ID_IPU_IC_PRPVF,
+ .sync_register = imx_media_ic_register,
+ .sync_unregister = imx_media_ic_unregister,
+ },
+};
+
+static int create_internal_link(struct imx_media_dev *imxmd,
+ struct v4l2_subdev *src,
+ struct v4l2_subdev *sink,
+ const struct internal_link *link)
+{
+ int ret;
+
+ /* skip if this link already created */
+ if (media_entity_find_link(&src->entity.pads[link->local_pad],
+ &sink->entity.pads[link->remote_pad]))
+ return 0;
+
+ dev_dbg(imxmd->md.dev, "%s:%d -> %s:%d\n",
+ src->name, link->local_pad,
+ sink->name, link->remote_pad);
+
+ ret = media_create_pad_link(&src->entity, link->local_pad,
+ &sink->entity, link->remote_pad, 0);
+ if (ret)
+ v4l2_err(&imxmd->v4l2_dev, "%s failed: %d\n", __func__, ret);
+
+ return ret;
+}
+
+static int create_ipu_internal_links(struct imx_media_dev *imxmd,
+ const struct internal_subdev *intsd,
+ struct v4l2_subdev *sd,
+ int ipu_id)
+{
+ const struct internal_pad *intpad;
+ const struct internal_link *link;
+ struct media_pad *pad;
+ int i, j, ret;
+
+ /* create the source->sink links */
+ for (i = 0; i < sd->entity.num_pads; i++) {
+ intpad = &intsd->pad[i];
+ pad = &sd->entity.pads[i];
+
+ if (!(pad->flags & MEDIA_PAD_FL_SOURCE))
+ continue;
+
+ for (j = 0; j < intpad->num_links; j++) {
+ struct v4l2_subdev *sink;
+
+ link = &intpad->link[j];
+ sink = imxmd->sync_sd[ipu_id][link->remote];
+
+ ret = create_internal_link(imxmd, sd, sink, link);
+ if (ret)
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+int imx_media_register_ipu_internal_subdevs(struct imx_media_dev *imxmd,
+ struct v4l2_subdev *csi)
+{
+ struct device *ipu_dev = csi->dev->parent;
+ const struct internal_subdev *intsd;
+ struct v4l2_subdev *sd;
+ struct ipu_soc *ipu;
+ int i, ipu_id, ret;
+
+ ipu = dev_get_drvdata(ipu_dev);
+ if (!ipu) {
+ v4l2_err(&imxmd->v4l2_dev, "invalid IPU device!\n");
+ return -ENODEV;
+ }
+
+ ipu_id = ipu_get_num(ipu);
+ if (ipu_id > 1) {
+ v4l2_err(&imxmd->v4l2_dev, "invalid IPU id %d!\n", ipu_id);
+ return -ENODEV;
+ }
+
+ mutex_lock(&imxmd->mutex);
+
+ /* record this IPU */
+ if (!imxmd->ipu[ipu_id])
+ imxmd->ipu[ipu_id] = ipu;
+
+ /* register the synchronous subdevs */
+ for (i = 0; i < NUM_IPU_SUBDEVS; i++) {
+ intsd = &int_subdev[i];
+
+ sd = imxmd->sync_sd[ipu_id][i];
+
+ /*
+ * skip if this sync subdev already registered or its
+ * not a sync subdev (one of the CSIs)
+ */
+ if (sd || !intsd->sync_register)
+ continue;
+
+ mutex_unlock(&imxmd->mutex);
+ sd = intsd->sync_register(&imxmd->v4l2_dev, ipu_dev, ipu,
+ intsd->grp_id);
+ mutex_lock(&imxmd->mutex);
+ if (IS_ERR(sd)) {
+ ret = PTR_ERR(sd);
+ goto err_unwind;
+ }
+
+ imxmd->sync_sd[ipu_id][i] = sd;
+ }
+
+ /*
+ * all the sync subdevs are registered, create the media links
+ * between them.
+ */
+ for (i = 0; i < NUM_IPU_SUBDEVS; i++) {
+ intsd = &int_subdev[i];
+
+ if (intsd->grp_id == csi->grp_id) {
+ sd = csi;
+ } else {
+ sd = imxmd->sync_sd[ipu_id][i];
+ if (!sd)
+ continue;
+ }
+
+ ret = create_ipu_internal_links(imxmd, intsd, sd, ipu_id);
+ if (ret) {
+ mutex_unlock(&imxmd->mutex);
+ imx_media_unregister_ipu_internal_subdevs(imxmd);
+ return ret;
+ }
+ }
+
+ mutex_unlock(&imxmd->mutex);
+ return 0;
+
+err_unwind:
+ while (--i >= 0) {
+ intsd = &int_subdev[i];
+ sd = imxmd->sync_sd[ipu_id][i];
+ if (!sd || !intsd->sync_unregister)
+ continue;
+ mutex_unlock(&imxmd->mutex);
+ intsd->sync_unregister(sd);
+ mutex_lock(&imxmd->mutex);
+ }
+
+ mutex_unlock(&imxmd->mutex);
+ return ret;
+}
+
+void imx_media_unregister_ipu_internal_subdevs(struct imx_media_dev *imxmd)
+{
+ const struct internal_subdev *intsd;
+ struct v4l2_subdev *sd;
+ int i, j;
+
+ mutex_lock(&imxmd->mutex);
+
+ for (i = 0; i < 2; i++) {
+ for (j = 0; j < NUM_IPU_SUBDEVS; j++) {
+ intsd = &int_subdev[j];
+ sd = imxmd->sync_sd[i][j];
+
+ if (!sd || !intsd->sync_unregister)
+ continue;
+
+ mutex_unlock(&imxmd->mutex);
+ intsd->sync_unregister(sd);
+ mutex_lock(&imxmd->mutex);
+ }
+ }
+
+ mutex_unlock(&imxmd->mutex);
+}
diff --git a/drivers/staging/media/imx/imx-media-of.c b/drivers/staging/media/imx/imx-media-of.c
new file mode 100644
index 000000000000..bb28daa4d713
--- /dev/null
+++ b/drivers/staging/media/imx/imx-media-of.c
@@ -0,0 +1,71 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Media driver for Freescale i.MX5/6 SOC
+ *
+ * Open Firmware parsing.
+ *
+ * Copyright (c) 2016 Mentor Graphics Inc.
+ */
+#include <linux/of_platform.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-fwnode.h>
+#include <media/v4l2-subdev.h>
+#include <media/videobuf2-dma-contig.h>
+#include <linux/of_graph.h>
+#include <video/imx-ipu-v3.h>
+#include "imx-media.h"
+
+static int imx_media_of_add_csi(struct imx_media_dev *imxmd,
+ struct device_node *csi_np)
+{
+ struct v4l2_async_connection *asd;
+ int ret = 0;
+
+ if (!of_device_is_available(csi_np)) {
+ dev_dbg(imxmd->md.dev, "%s: %pOFn not enabled\n", __func__,
+ csi_np);
+ return -ENODEV;
+ }
+
+ /* add CSI fwnode to async notifier */
+ asd = v4l2_async_nf_add_fwnode(&imxmd->notifier,
+ of_fwnode_handle(csi_np),
+ struct v4l2_async_connection);
+ if (IS_ERR(asd)) {
+ ret = PTR_ERR(asd);
+ if (ret == -EEXIST)
+ dev_dbg(imxmd->md.dev, "%s: already added %pOFn\n",
+ __func__, csi_np);
+ }
+
+ return ret;
+}
+
+int imx_media_add_of_subdevs(struct imx_media_dev *imxmd,
+ struct device_node *np)
+{
+ struct device_node *csi_np;
+ int i, ret;
+
+ for (i = 0; ; i++) {
+ csi_np = of_parse_phandle(np, "ports", i);
+ if (!csi_np)
+ break;
+
+ ret = imx_media_of_add_csi(imxmd, csi_np);
+ of_node_put(csi_np);
+ if (ret) {
+ /* unavailable or already added is not an error */
+ if (ret == -ENODEV || ret == -EEXIST) {
+ continue;
+ }
+
+ /* other error, can't continue */
+ return ret;
+ }
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(imx_media_add_of_subdevs);
diff --git a/drivers/staging/media/imx/imx-media-utils.c b/drivers/staging/media/imx/imx-media-utils.c
new file mode 100644
index 000000000000..1b5af8945e6b
--- /dev/null
+++ b/drivers/staging/media/imx/imx-media-utils.c
@@ -0,0 +1,785 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * V4L2 Media Controller Driver for Freescale i.MX5/6 SOC
+ *
+ * Copyright (c) 2016 Mentor Graphics Inc.
+ */
+#include <linux/module.h>
+#include "imx-media.h"
+
+#define IMX_BUS_FMTS(fmt...) ((const u32[]) {fmt, 0})
+
+/*
+ * List of supported pixel formats for the subdevs.
+ */
+static const struct imx_media_pixfmt pixel_formats[] = {
+ /*** YUV formats start here ***/
+ {
+ .fourcc = V4L2_PIX_FMT_UYVY,
+ .codes = IMX_BUS_FMTS(
+ MEDIA_BUS_FMT_UYVY8_2X8,
+ MEDIA_BUS_FMT_UYVY8_1X16
+ ),
+ .cs = IPUV3_COLORSPACE_YUV,
+ .bpp = 16,
+ }, {
+ .fourcc = V4L2_PIX_FMT_YUYV,
+ .codes = IMX_BUS_FMTS(
+ MEDIA_BUS_FMT_YUYV8_2X8,
+ MEDIA_BUS_FMT_YUYV8_1X16
+ ),
+ .cs = IPUV3_COLORSPACE_YUV,
+ .bpp = 16,
+ }, {
+ .fourcc = V4L2_PIX_FMT_YUV420,
+ .cs = IPUV3_COLORSPACE_YUV,
+ .bpp = 12,
+ .planar = true,
+ }, {
+ .fourcc = V4L2_PIX_FMT_YVU420,
+ .cs = IPUV3_COLORSPACE_YUV,
+ .bpp = 12,
+ .planar = true,
+ }, {
+ .fourcc = V4L2_PIX_FMT_YUV422P,
+ .cs = IPUV3_COLORSPACE_YUV,
+ .bpp = 16,
+ .planar = true,
+ }, {
+ .fourcc = V4L2_PIX_FMT_NV12,
+ .cs = IPUV3_COLORSPACE_YUV,
+ .bpp = 12,
+ .planar = true,
+ }, {
+ .fourcc = V4L2_PIX_FMT_NV16,
+ .cs = IPUV3_COLORSPACE_YUV,
+ .bpp = 16,
+ .planar = true,
+ }, {
+ .fourcc = V4L2_PIX_FMT_YUV32,
+ .codes = IMX_BUS_FMTS(MEDIA_BUS_FMT_AYUV8_1X32),
+ .cs = IPUV3_COLORSPACE_YUV,
+ .bpp = 32,
+ .ipufmt = true,
+ },
+ /*** RGB formats start here ***/
+ {
+ .fourcc = V4L2_PIX_FMT_RGB565,
+ .codes = IMX_BUS_FMTS(MEDIA_BUS_FMT_RGB565_2X8_LE),
+ .cs = IPUV3_COLORSPACE_RGB,
+ .bpp = 16,
+ .cycles = 2,
+ }, {
+ .fourcc = V4L2_PIX_FMT_RGB24,
+ .codes = IMX_BUS_FMTS(
+ MEDIA_BUS_FMT_RGB888_1X24,
+ MEDIA_BUS_FMT_RGB888_2X12_LE
+ ),
+ .cs = IPUV3_COLORSPACE_RGB,
+ .bpp = 24,
+ }, {
+ .fourcc = V4L2_PIX_FMT_BGR24,
+ .cs = IPUV3_COLORSPACE_RGB,
+ .bpp = 24,
+ }, {
+ .fourcc = V4L2_PIX_FMT_XRGB32,
+ .codes = IMX_BUS_FMTS(MEDIA_BUS_FMT_ARGB8888_1X32),
+ .cs = IPUV3_COLORSPACE_RGB,
+ .bpp = 32,
+ }, {
+ .fourcc = V4L2_PIX_FMT_XRGB32,
+ .codes = IMX_BUS_FMTS(MEDIA_BUS_FMT_ARGB8888_1X32),
+ .cs = IPUV3_COLORSPACE_RGB,
+ .bpp = 32,
+ .ipufmt = true,
+ }, {
+ .fourcc = V4L2_PIX_FMT_XBGR32,
+ .cs = IPUV3_COLORSPACE_RGB,
+ .bpp = 32,
+ }, {
+ .fourcc = V4L2_PIX_FMT_BGRX32,
+ .cs = IPUV3_COLORSPACE_RGB,
+ .bpp = 32,
+ }, {
+ .fourcc = V4L2_PIX_FMT_RGBX32,
+ .cs = IPUV3_COLORSPACE_RGB,
+ .bpp = 32,
+ },
+ /*** raw bayer and grayscale formats start here ***/
+ {
+ .fourcc = V4L2_PIX_FMT_SBGGR8,
+ .codes = IMX_BUS_FMTS(MEDIA_BUS_FMT_SBGGR8_1X8),
+ .cs = IPUV3_COLORSPACE_RGB,
+ .bpp = 8,
+ .bayer = true,
+ }, {
+ .fourcc = V4L2_PIX_FMT_SGBRG8,
+ .codes = IMX_BUS_FMTS(MEDIA_BUS_FMT_SGBRG8_1X8),
+ .cs = IPUV3_COLORSPACE_RGB,
+ .bpp = 8,
+ .bayer = true,
+ }, {
+ .fourcc = V4L2_PIX_FMT_SGRBG8,
+ .codes = IMX_BUS_FMTS(MEDIA_BUS_FMT_SGRBG8_1X8),
+ .cs = IPUV3_COLORSPACE_RGB,
+ .bpp = 8,
+ .bayer = true,
+ }, {
+ .fourcc = V4L2_PIX_FMT_SRGGB8,
+ .codes = IMX_BUS_FMTS(MEDIA_BUS_FMT_SRGGB8_1X8),
+ .cs = IPUV3_COLORSPACE_RGB,
+ .bpp = 8,
+ .bayer = true,
+ }, {
+ .fourcc = V4L2_PIX_FMT_SBGGR16,
+ .codes = IMX_BUS_FMTS(
+ MEDIA_BUS_FMT_SBGGR10_1X10,
+ MEDIA_BUS_FMT_SBGGR12_1X12,
+ MEDIA_BUS_FMT_SBGGR14_1X14,
+ MEDIA_BUS_FMT_SBGGR16_1X16
+ ),
+ .cs = IPUV3_COLORSPACE_RGB,
+ .bpp = 16,
+ .bayer = true,
+ }, {
+ .fourcc = V4L2_PIX_FMT_SGBRG16,
+ .codes = IMX_BUS_FMTS(
+ MEDIA_BUS_FMT_SGBRG10_1X10,
+ MEDIA_BUS_FMT_SGBRG12_1X12,
+ MEDIA_BUS_FMT_SGBRG14_1X14,
+ MEDIA_BUS_FMT_SGBRG16_1X16
+ ),
+ .cs = IPUV3_COLORSPACE_RGB,
+ .bpp = 16,
+ .bayer = true,
+ }, {
+ .fourcc = V4L2_PIX_FMT_SGRBG16,
+ .codes = IMX_BUS_FMTS(
+ MEDIA_BUS_FMT_SGRBG10_1X10,
+ MEDIA_BUS_FMT_SGRBG12_1X12,
+ MEDIA_BUS_FMT_SGRBG14_1X14,
+ MEDIA_BUS_FMT_SGRBG16_1X16
+ ),
+ .cs = IPUV3_COLORSPACE_RGB,
+ .bpp = 16,
+ .bayer = true,
+ }, {
+ .fourcc = V4L2_PIX_FMT_SRGGB16,
+ .codes = IMX_BUS_FMTS(
+ MEDIA_BUS_FMT_SRGGB10_1X10,
+ MEDIA_BUS_FMT_SRGGB12_1X12,
+ MEDIA_BUS_FMT_SRGGB14_1X14,
+ MEDIA_BUS_FMT_SRGGB16_1X16
+ ),
+ .cs = IPUV3_COLORSPACE_RGB,
+ .bpp = 16,
+ .bayer = true,
+ }, {
+ .fourcc = V4L2_PIX_FMT_GREY,
+ .codes = IMX_BUS_FMTS(
+ MEDIA_BUS_FMT_Y8_1X8,
+ MEDIA_BUS_FMT_Y10_1X10,
+ MEDIA_BUS_FMT_Y12_1X12
+ ),
+ .cs = IPUV3_COLORSPACE_RGB,
+ .bpp = 8,
+ .bayer = true,
+ }, {
+ .fourcc = V4L2_PIX_FMT_Y10,
+ .codes = IMX_BUS_FMTS(MEDIA_BUS_FMT_Y10_1X10),
+ .cs = IPUV3_COLORSPACE_RGB,
+ .bpp = 16,
+ .bayer = true,
+ }, {
+ .fourcc = V4L2_PIX_FMT_Y12,
+ .codes = IMX_BUS_FMTS(MEDIA_BUS_FMT_Y12_1X12),
+ .cs = IPUV3_COLORSPACE_RGB,
+ .bpp = 16,
+ .bayer = true,
+ },
+};
+
+/*
+ * Search in the pixel_formats[] array for an entry with the given fourcc
+ * that matches the requested selection criteria and return it.
+ *
+ * @fourcc: Search for an entry with the given fourcc pixel format.
+ * @fmt_sel: Allow entries only with the given selection criteria.
+ */
+const struct imx_media_pixfmt *
+imx_media_find_pixel_format(u32 fourcc, enum imx_pixfmt_sel fmt_sel)
+{
+ bool sel_ipu = fmt_sel & PIXFMT_SEL_IPU;
+ unsigned int i;
+
+ fmt_sel &= ~PIXFMT_SEL_IPU;
+
+ for (i = 0; i < ARRAY_SIZE(pixel_formats); i++) {
+ const struct imx_media_pixfmt *fmt = &pixel_formats[i];
+ enum imx_pixfmt_sel sel;
+
+ if (sel_ipu != fmt->ipufmt)
+ continue;
+
+ sel = fmt->bayer ? PIXFMT_SEL_BAYER :
+ ((fmt->cs == IPUV3_COLORSPACE_YUV) ?
+ PIXFMT_SEL_YUV : PIXFMT_SEL_RGB);
+
+ if ((fmt_sel & sel) && fmt->fourcc == fourcc)
+ return fmt;
+ }
+
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(imx_media_find_pixel_format);
+
+/*
+ * Search in the pixel_formats[] array for an entry with the given media
+ * bus code that matches the requested selection criteria and return it.
+ *
+ * @code: Search for an entry with the given media-bus code.
+ * @fmt_sel: Allow entries only with the given selection criteria.
+ */
+const struct imx_media_pixfmt *
+imx_media_find_mbus_format(u32 code, enum imx_pixfmt_sel fmt_sel)
+{
+ bool sel_ipu = fmt_sel & PIXFMT_SEL_IPU;
+ unsigned int i;
+
+ fmt_sel &= ~PIXFMT_SEL_IPU;
+
+ for (i = 0; i < ARRAY_SIZE(pixel_formats); i++) {
+ const struct imx_media_pixfmt *fmt = &pixel_formats[i];
+ enum imx_pixfmt_sel sel;
+ unsigned int j;
+
+ if (sel_ipu != fmt->ipufmt)
+ continue;
+
+ sel = fmt->bayer ? PIXFMT_SEL_BAYER :
+ ((fmt->cs == IPUV3_COLORSPACE_YUV) ?
+ PIXFMT_SEL_YUV : PIXFMT_SEL_RGB);
+
+ if (!(fmt_sel & sel) || !fmt->codes)
+ continue;
+
+ for (j = 0; fmt->codes[j]; j++) {
+ if (code == fmt->codes[j])
+ return fmt;
+ }
+ }
+
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(imx_media_find_mbus_format);
+
+/*
+ * Enumerate entries in the pixel_formats[] array that match the
+ * requested selection criteria. Return the fourcc that matches the
+ * selection criteria at the requested match index.
+ *
+ * @fourcc: The returned fourcc that matches the search criteria at
+ * the requested match index.
+ * @index: The requested match index.
+ * @fmt_sel: Include in the enumeration entries with the given selection
+ * criteria.
+ * @code: If non-zero, only include in the enumeration entries matching this
+ * media bus code.
+ */
+int imx_media_enum_pixel_formats(u32 *fourcc, u32 index,
+ enum imx_pixfmt_sel fmt_sel, u32 code)
+{
+ bool sel_ipu = fmt_sel & PIXFMT_SEL_IPU;
+ unsigned int i;
+
+ fmt_sel &= ~PIXFMT_SEL_IPU;
+
+ for (i = 0; i < ARRAY_SIZE(pixel_formats); i++) {
+ const struct imx_media_pixfmt *fmt = &pixel_formats[i];
+ enum imx_pixfmt_sel sel;
+
+ if (sel_ipu != fmt->ipufmt)
+ continue;
+
+ sel = fmt->bayer ? PIXFMT_SEL_BAYER :
+ ((fmt->cs == IPUV3_COLORSPACE_YUV) ?
+ PIXFMT_SEL_YUV : PIXFMT_SEL_RGB);
+
+ if (!(fmt_sel & sel))
+ continue;
+
+ /*
+ * If a media bus code is specified, only consider formats that
+ * match it.
+ */
+ if (code) {
+ unsigned int j;
+
+ if (!fmt->codes)
+ continue;
+
+ for (j = 0; fmt->codes[j]; j++) {
+ if (code == fmt->codes[j])
+ break;
+ }
+
+ if (!fmt->codes[j])
+ continue;
+ }
+
+ if (index == 0) {
+ *fourcc = fmt->fourcc;
+ return 0;
+ }
+
+ index--;
+ }
+
+ return -EINVAL;
+}
+EXPORT_SYMBOL_GPL(imx_media_enum_pixel_formats);
+
+/*
+ * Enumerate entries in the pixel_formats[] array that match the
+ * requested search criteria. Return the media-bus code that matches
+ * the search criteria at the requested match index.
+ *
+ * @code: The returned media-bus code that matches the search criteria at
+ * the requested match index.
+ * @index: The requested match index.
+ * @fmt_sel: Include in the enumeration entries with the given selection
+ * criteria.
+ */
+int imx_media_enum_mbus_formats(u32 *code, u32 index,
+ enum imx_pixfmt_sel fmt_sel)
+{
+ bool sel_ipu = fmt_sel & PIXFMT_SEL_IPU;
+ unsigned int i;
+
+ fmt_sel &= ~PIXFMT_SEL_IPU;
+
+ for (i = 0; i < ARRAY_SIZE(pixel_formats); i++) {
+ const struct imx_media_pixfmt *fmt = &pixel_formats[i];
+ enum imx_pixfmt_sel sel;
+ unsigned int j;
+
+ if (sel_ipu != fmt->ipufmt)
+ continue;
+
+ sel = fmt->bayer ? PIXFMT_SEL_BAYER :
+ ((fmt->cs == IPUV3_COLORSPACE_YUV) ?
+ PIXFMT_SEL_YUV : PIXFMT_SEL_RGB);
+
+ if (!(fmt_sel & sel) || !fmt->codes)
+ continue;
+
+ for (j = 0; fmt->codes[j]; j++) {
+ if (index == 0) {
+ *code = fmt->codes[j];
+ return 0;
+ }
+
+ index--;
+ }
+ }
+
+ return -EINVAL;
+}
+EXPORT_SYMBOL_GPL(imx_media_enum_mbus_formats);
+
+int imx_media_init_mbus_fmt(struct v4l2_mbus_framefmt *mbus,
+ u32 width, u32 height, u32 code, u32 field,
+ const struct imx_media_pixfmt **cc)
+{
+ const struct imx_media_pixfmt *lcc;
+
+ mbus->width = width;
+ mbus->height = height;
+ mbus->field = field;
+
+ if (code == 0)
+ imx_media_enum_mbus_formats(&code, 0, PIXFMT_SEL_YUV);
+
+ lcc = imx_media_find_mbus_format(code, PIXFMT_SEL_ANY);
+ if (!lcc) {
+ lcc = imx_media_find_ipu_format(code, PIXFMT_SEL_YUV_RGB);
+ if (!lcc)
+ return -EINVAL;
+ }
+
+ mbus->code = code;
+
+ mbus->colorspace = V4L2_COLORSPACE_SRGB;
+ mbus->xfer_func = V4L2_MAP_XFER_FUNC_DEFAULT(mbus->colorspace);
+ mbus->ycbcr_enc = V4L2_MAP_YCBCR_ENC_DEFAULT(mbus->colorspace);
+ mbus->quantization =
+ V4L2_MAP_QUANTIZATION_DEFAULT(lcc->cs == IPUV3_COLORSPACE_RGB,
+ mbus->colorspace,
+ mbus->ycbcr_enc);
+
+ if (cc)
+ *cc = lcc;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(imx_media_init_mbus_fmt);
+
+/*
+ * Initializes the TRY format to the ACTIVE format on all pads
+ * of a subdev. Can be used as the .init_state internal operation.
+ */
+int imx_media_init_state(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state)
+{
+ struct v4l2_mbus_framefmt *mf_try;
+ unsigned int pad;
+ int ret;
+
+ for (pad = 0; pad < sd->entity.num_pads; pad++) {
+ struct v4l2_subdev_format format = {
+ .pad = pad,
+ .which = V4L2_SUBDEV_FORMAT_ACTIVE,
+ };
+
+ ret = v4l2_subdev_call(sd, pad, get_fmt, NULL, &format);
+ if (ret)
+ continue;
+
+ mf_try = v4l2_subdev_state_get_format(sd_state, pad);
+ *mf_try = format.format;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(imx_media_init_state);
+
+/*
+ * Default the colorspace in tryfmt to SRGB if set to an unsupported
+ * colorspace or not initialized. Then set the remaining colorimetry
+ * parameters based on the colorspace if they are uninitialized.
+ *
+ * tryfmt->code must be set on entry.
+ *
+ * If this format is destined to be routed through the Image Converter,
+ * Y`CbCr encoding must be fixed. The IC supports only BT.601 Y`CbCr
+ * or Rec.709 Y`CbCr encoding.
+ */
+void imx_media_try_colorimetry(struct v4l2_mbus_framefmt *tryfmt,
+ bool ic_route)
+{
+ const struct imx_media_pixfmt *cc;
+ bool is_rgb = false;
+
+ cc = imx_media_find_mbus_format(tryfmt->code, PIXFMT_SEL_ANY);
+ if (!cc)
+ cc = imx_media_find_ipu_format(tryfmt->code,
+ PIXFMT_SEL_YUV_RGB);
+
+ if (cc && cc->cs == IPUV3_COLORSPACE_RGB)
+ is_rgb = true;
+
+ switch (tryfmt->colorspace) {
+ case V4L2_COLORSPACE_SMPTE170M:
+ case V4L2_COLORSPACE_REC709:
+ case V4L2_COLORSPACE_JPEG:
+ case V4L2_COLORSPACE_SRGB:
+ case V4L2_COLORSPACE_BT2020:
+ case V4L2_COLORSPACE_OPRGB:
+ case V4L2_COLORSPACE_DCI_P3:
+ case V4L2_COLORSPACE_RAW:
+ break;
+ default:
+ tryfmt->colorspace = V4L2_COLORSPACE_SRGB;
+ break;
+ }
+
+ if (tryfmt->xfer_func == V4L2_XFER_FUNC_DEFAULT)
+ tryfmt->xfer_func =
+ V4L2_MAP_XFER_FUNC_DEFAULT(tryfmt->colorspace);
+
+ if (ic_route) {
+ if (tryfmt->ycbcr_enc != V4L2_YCBCR_ENC_601 &&
+ tryfmt->ycbcr_enc != V4L2_YCBCR_ENC_709)
+ tryfmt->ycbcr_enc = V4L2_YCBCR_ENC_601;
+ } else {
+ if (tryfmt->ycbcr_enc == V4L2_YCBCR_ENC_DEFAULT) {
+ tryfmt->ycbcr_enc =
+ V4L2_MAP_YCBCR_ENC_DEFAULT(tryfmt->colorspace);
+ }
+ }
+
+ if (tryfmt->quantization == V4L2_QUANTIZATION_DEFAULT)
+ tryfmt->quantization =
+ V4L2_MAP_QUANTIZATION_DEFAULT(is_rgb,
+ tryfmt->colorspace,
+ tryfmt->ycbcr_enc);
+}
+EXPORT_SYMBOL_GPL(imx_media_try_colorimetry);
+
+int imx_media_mbus_fmt_to_pix_fmt(struct v4l2_pix_format *pix,
+ const struct v4l2_mbus_framefmt *mbus,
+ const struct imx_media_pixfmt *cc)
+{
+ u32 width;
+ u32 stride;
+
+ if (!cc) {
+ cc = imx_media_find_ipu_format(mbus->code,
+ PIXFMT_SEL_YUV_RGB);
+ if (!cc)
+ cc = imx_media_find_mbus_format(mbus->code,
+ PIXFMT_SEL_ANY);
+ if (!cc)
+ return -EINVAL;
+ }
+
+ /*
+ * TODO: the IPU currently does not support the AYUV32 format,
+ * so until it does convert to a supported YUV format.
+ */
+ if (cc->ipufmt && cc->cs == IPUV3_COLORSPACE_YUV) {
+ u32 code;
+
+ imx_media_enum_mbus_formats(&code, 0, PIXFMT_SEL_YUV);
+ cc = imx_media_find_mbus_format(code, PIXFMT_SEL_YUV);
+ }
+
+ /* Round up width for minimum burst size */
+ width = round_up(mbus->width, 8);
+
+ /* Round up stride for IDMAC line start address alignment */
+ if (cc->planar)
+ stride = round_up(width, 16);
+ else
+ stride = round_up((width * cc->bpp) >> 3, 8);
+
+ pix->width = width;
+ pix->height = mbus->height;
+ pix->pixelformat = cc->fourcc;
+ pix->colorspace = mbus->colorspace;
+ pix->xfer_func = mbus->xfer_func;
+ pix->ycbcr_enc = mbus->ycbcr_enc;
+ pix->quantization = mbus->quantization;
+ pix->field = mbus->field;
+ pix->bytesperline = stride;
+ pix->sizeimage = cc->planar ? ((stride * pix->height * cc->bpp) >> 3) :
+ stride * pix->height;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(imx_media_mbus_fmt_to_pix_fmt);
+
+void imx_media_free_dma_buf(struct device *dev,
+ struct imx_media_dma_buf *buf)
+{
+ if (buf->virt)
+ dma_free_coherent(dev, buf->len, buf->virt, buf->phys);
+
+ buf->virt = NULL;
+ buf->phys = 0;
+}
+EXPORT_SYMBOL_GPL(imx_media_free_dma_buf);
+
+int imx_media_alloc_dma_buf(struct device *dev,
+ struct imx_media_dma_buf *buf,
+ int size)
+{
+ imx_media_free_dma_buf(dev, buf);
+
+ buf->len = PAGE_ALIGN(size);
+ buf->virt = dma_alloc_coherent(dev, buf->len, &buf->phys,
+ GFP_DMA | GFP_KERNEL);
+ if (!buf->virt) {
+ dev_err(dev, "%s: failed\n", __func__);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(imx_media_alloc_dma_buf);
+
+/* form a subdev name given a group id and ipu id */
+void imx_media_grp_id_to_sd_name(char *sd_name, int sz, u32 grp_id, int ipu_id)
+{
+ int id;
+
+ switch (grp_id) {
+ case IMX_MEDIA_GRP_ID_IPU_CSI0...IMX_MEDIA_GRP_ID_IPU_CSI1:
+ id = (grp_id >> IMX_MEDIA_GRP_ID_IPU_CSI_BIT) - 1;
+ snprintf(sd_name, sz, "ipu%d_csi%d", ipu_id + 1, id);
+ break;
+ case IMX_MEDIA_GRP_ID_IPU_VDIC:
+ snprintf(sd_name, sz, "ipu%d_vdic", ipu_id + 1);
+ break;
+ case IMX_MEDIA_GRP_ID_IPU_IC_PRP:
+ snprintf(sd_name, sz, "ipu%d_ic_prp", ipu_id + 1);
+ break;
+ case IMX_MEDIA_GRP_ID_IPU_IC_PRPENC:
+ snprintf(sd_name, sz, "ipu%d_ic_prpenc", ipu_id + 1);
+ break;
+ case IMX_MEDIA_GRP_ID_IPU_IC_PRPVF:
+ snprintf(sd_name, sz, "ipu%d_ic_prpvf", ipu_id + 1);
+ break;
+ default:
+ break;
+ }
+}
+EXPORT_SYMBOL_GPL(imx_media_grp_id_to_sd_name);
+
+/*
+ * Adds a video device to the master video device list. This is called
+ * when a video device is registered.
+ */
+void imx_media_add_video_device(struct imx_media_dev *imxmd,
+ struct imx_media_video_dev *vdev)
+{
+ mutex_lock(&imxmd->mutex);
+
+ list_add_tail(&vdev->list, &imxmd->vdev_list);
+
+ mutex_unlock(&imxmd->mutex);
+}
+EXPORT_SYMBOL_GPL(imx_media_add_video_device);
+
+/*
+ * Search upstream/downstream for a subdevice or video device pad in the
+ * current pipeline, starting from start_entity. Returns the device's
+ * source/sink pad that it was reached from. Must be called with
+ * mdev->graph_mutex held.
+ *
+ * If grp_id != 0, finds a subdevice's pad of given grp_id.
+ * Else If buftype != 0, finds a video device's pad of given buffer type.
+ * Else, returns the nearest source/sink pad to start_entity.
+ */
+struct media_pad *
+imx_media_pipeline_pad(struct media_entity *start_entity, u32 grp_id,
+ enum v4l2_buf_type buftype, bool upstream)
+{
+ struct media_entity *me = start_entity;
+ struct media_pad *pad = NULL;
+ struct video_device *vfd;
+ struct v4l2_subdev *sd;
+ int i;
+
+ for (i = 0; i < me->num_pads; i++) {
+ struct media_pad *spad = &me->pads[i];
+
+ if ((upstream && !(spad->flags & MEDIA_PAD_FL_SINK)) ||
+ (!upstream && !(spad->flags & MEDIA_PAD_FL_SOURCE)))
+ continue;
+
+ pad = media_pad_remote_pad_first(spad);
+ if (!pad)
+ continue;
+
+ if (grp_id) {
+ if (is_media_entity_v4l2_subdev(pad->entity)) {
+ sd = media_entity_to_v4l2_subdev(pad->entity);
+ if (sd->grp_id & grp_id)
+ return pad;
+ }
+
+ return imx_media_pipeline_pad(pad->entity, grp_id,
+ buftype, upstream);
+ } else if (buftype) {
+ if (is_media_entity_v4l2_video_device(pad->entity)) {
+ vfd = media_entity_to_video_device(pad->entity);
+ if (buftype == vfd->queue->type)
+ return pad;
+ }
+
+ return imx_media_pipeline_pad(pad->entity, grp_id,
+ buftype, upstream);
+ } else {
+ return pad;
+ }
+ }
+
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(imx_media_pipeline_pad);
+
+/*
+ * Search upstream/downstream for a subdev or video device in the current
+ * pipeline. Must be called with mdev->graph_mutex held.
+ */
+static struct media_entity *
+find_pipeline_entity(struct media_entity *start, u32 grp_id,
+ enum v4l2_buf_type buftype, bool upstream)
+{
+ struct media_pad *pad = NULL;
+ struct video_device *vfd;
+ struct v4l2_subdev *sd;
+
+ if (grp_id && is_media_entity_v4l2_subdev(start)) {
+ sd = media_entity_to_v4l2_subdev(start);
+ if (sd->grp_id & grp_id)
+ return &sd->entity;
+ } else if (buftype && is_media_entity_v4l2_video_device(start)) {
+ vfd = media_entity_to_video_device(start);
+ if (buftype == vfd->queue->type)
+ return &vfd->entity;
+ }
+
+ pad = imx_media_pipeline_pad(start, grp_id, buftype, upstream);
+
+ return pad ? pad->entity : NULL;
+}
+
+/*
+ * Find a subdev reached upstream from the given start entity in
+ * the current pipeline.
+ * Must be called with mdev->graph_mutex held.
+ */
+struct v4l2_subdev *
+imx_media_pipeline_subdev(struct media_entity *start_entity, u32 grp_id,
+ bool upstream)
+{
+ struct media_entity *me;
+
+ me = find_pipeline_entity(start_entity, grp_id, 0, upstream);
+ if (!me)
+ return ERR_PTR(-ENODEV);
+
+ return media_entity_to_v4l2_subdev(me);
+}
+EXPORT_SYMBOL_GPL(imx_media_pipeline_subdev);
+
+/*
+ * Turn current pipeline streaming on/off starting from entity.
+ */
+int imx_media_pipeline_set_stream(struct imx_media_dev *imxmd,
+ struct media_entity *entity,
+ bool on)
+{
+ struct v4l2_subdev *sd;
+ int ret = 0;
+
+ if (!is_media_entity_v4l2_subdev(entity))
+ return -EINVAL;
+ sd = media_entity_to_v4l2_subdev(entity);
+
+ mutex_lock(&imxmd->md.graph_mutex);
+
+ if (on) {
+ ret = __media_pipeline_start(entity->pads, &imxmd->pipe);
+ if (ret)
+ goto out;
+ ret = v4l2_subdev_call(sd, video, s_stream, 1);
+ if (ret)
+ __media_pipeline_stop(entity->pads);
+ } else {
+ v4l2_subdev_call(sd, video, s_stream, 0);
+ if (media_pad_pipeline(entity->pads))
+ __media_pipeline_stop(entity->pads);
+ }
+
+out:
+ mutex_unlock(&imxmd->md.graph_mutex);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(imx_media_pipeline_set_stream);
+
+MODULE_DESCRIPTION("i.MX5/6 v4l2 media controller driver");
+MODULE_AUTHOR("Steve Longerbeam <steve_longerbeam@mentor.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/media/imx/imx-media-vdic.c b/drivers/staging/media/imx/imx-media-vdic.c
new file mode 100644
index 000000000000..86f2b30cb06c
--- /dev/null
+++ b/drivers/staging/media/imx/imx-media-vdic.c
@@ -0,0 +1,933 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * V4L2 Deinterlacer Subdev for Freescale i.MX5/6 SOC
+ *
+ * Copyright (c) 2017 Mentor Graphics Inc.
+ */
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-mc.h>
+#include <media/v4l2-subdev.h>
+#include <media/imx.h>
+#include "imx-media.h"
+
+/*
+ * This subdev implements two different video pipelines:
+ *
+ * CSI -> VDIC
+ *
+ * In this pipeline, the CSI sends a single interlaced field F(n-1)
+ * directly to the VDIC (and optionally the following field F(n)
+ * can be sent to memory via IDMAC channel 13). This pipeline only works
+ * in VDIC's high motion mode, which only requires a single field for
+ * processing. The other motion modes (low and medium) require three
+ * fields, so this pipeline does not work in those modes. Also, it is
+ * not clear how this pipeline can deal with the various field orders
+ * (sequential BT/TB, interlaced BT/TB).
+ *
+ * MEM -> CH8,9,10 -> VDIC
+ *
+ * In this pipeline, previous field F(n-1), current field F(n), and next
+ * field F(n+1) are transferred to the VDIC via IDMAC channels 8,9,10.
+ * These memory buffers can come from a video output or mem2mem device.
+ * All motion modes are supported by this pipeline.
+ *
+ * The "direct" CSI->VDIC pipeline requires no DMA, but it can only be
+ * used in high motion mode.
+ */
+
+struct vdic_priv;
+
+struct vdic_pipeline_ops {
+ int (*setup)(struct vdic_priv *priv);
+ void (*start)(struct vdic_priv *priv);
+ void (*stop)(struct vdic_priv *priv);
+ void (*disable)(struct vdic_priv *priv);
+};
+
+/*
+ * Min/Max supported width and heights.
+ */
+#define MIN_W 32
+#define MIN_H 32
+#define MAX_W_VDIC 968
+#define MAX_H_VDIC 2048
+#define W_ALIGN 4 /* multiple of 16 pixels */
+#define H_ALIGN 1 /* multiple of 2 lines */
+#define S_ALIGN 1 /* multiple of 2 */
+
+struct vdic_priv {
+ struct device *ipu_dev;
+ struct ipu_soc *ipu;
+
+ struct v4l2_subdev sd;
+ struct media_pad pad[VDIC_NUM_PADS];
+
+ /* lock to protect all members below */
+ struct mutex lock;
+
+ /* IPU units we require */
+ struct ipu_vdi *vdi;
+
+ int active_input_pad;
+
+ struct ipuv3_channel *vdi_in_ch_p; /* F(n-1) transfer channel */
+ struct ipuv3_channel *vdi_in_ch; /* F(n) transfer channel */
+ struct ipuv3_channel *vdi_in_ch_n; /* F(n+1) transfer channel */
+
+ /* pipeline operations */
+ struct vdic_pipeline_ops *ops;
+
+ /* current and previous input buffers indirect path */
+ struct imx_media_buffer *curr_in_buf;
+ struct imx_media_buffer *prev_in_buf;
+
+ /*
+ * translated field type, input line stride, and field size
+ * for indirect path
+ */
+ u32 fieldtype;
+ u32 in_stride;
+ u32 field_size;
+
+ /* the source (a video device or subdev) */
+ struct media_entity *src;
+ /* the sink that will receive the progressive out buffers */
+ struct v4l2_subdev *sink_sd;
+
+ struct v4l2_mbus_framefmt format_mbus[VDIC_NUM_PADS];
+ const struct imx_media_pixfmt *cc[VDIC_NUM_PADS];
+ struct v4l2_fract frame_interval[VDIC_NUM_PADS];
+
+ /* the video device at IDMAC input pad */
+ struct imx_media_video_dev *vdev;
+
+ bool csi_direct; /* using direct CSI->VDIC->IC pipeline */
+
+ /* motion select control */
+ struct v4l2_ctrl_handler ctrl_hdlr;
+ enum ipu_motion_sel motion;
+
+ int stream_count;
+};
+
+static void vdic_put_ipu_resources(struct vdic_priv *priv)
+{
+ if (priv->vdi_in_ch_p)
+ ipu_idmac_put(priv->vdi_in_ch_p);
+ priv->vdi_in_ch_p = NULL;
+
+ if (priv->vdi_in_ch)
+ ipu_idmac_put(priv->vdi_in_ch);
+ priv->vdi_in_ch = NULL;
+
+ if (priv->vdi_in_ch_n)
+ ipu_idmac_put(priv->vdi_in_ch_n);
+ priv->vdi_in_ch_n = NULL;
+
+ if (!IS_ERR_OR_NULL(priv->vdi))
+ ipu_vdi_put(priv->vdi);
+ priv->vdi = NULL;
+}
+
+static int vdic_get_ipu_resources(struct vdic_priv *priv)
+{
+ int ret, err_chan;
+ struct ipuv3_channel *ch;
+ struct ipu_vdi *vdi;
+
+ vdi = ipu_vdi_get(priv->ipu);
+ if (IS_ERR(vdi)) {
+ v4l2_err(&priv->sd, "failed to get VDIC\n");
+ ret = PTR_ERR(vdi);
+ goto out;
+ }
+ priv->vdi = vdi;
+
+ if (!priv->csi_direct) {
+ ch = ipu_idmac_get(priv->ipu, IPUV3_CHANNEL_MEM_VDI_PREV);
+ if (IS_ERR(ch)) {
+ err_chan = IPUV3_CHANNEL_MEM_VDI_PREV;
+ ret = PTR_ERR(ch);
+ goto out_err_chan;
+ }
+ priv->vdi_in_ch_p = ch;
+
+ ch = ipu_idmac_get(priv->ipu, IPUV3_CHANNEL_MEM_VDI_CUR);
+ if (IS_ERR(ch)) {
+ err_chan = IPUV3_CHANNEL_MEM_VDI_CUR;
+ ret = PTR_ERR(ch);
+ goto out_err_chan;
+ }
+ priv->vdi_in_ch = ch;
+
+ ch = ipu_idmac_get(priv->ipu, IPUV3_CHANNEL_MEM_VDI_NEXT);
+ if (IS_ERR(ch)) {
+ err_chan = IPUV3_CHANNEL_MEM_VDI_NEXT;
+ ret = PTR_ERR(ch);
+ goto out_err_chan;
+ }
+ priv->vdi_in_ch_n = ch;
+ }
+
+ return 0;
+
+out_err_chan:
+ v4l2_err(&priv->sd, "could not get IDMAC channel %u\n", err_chan);
+out:
+ vdic_put_ipu_resources(priv);
+ return ret;
+}
+
+static int setup_vdi_channel(struct vdic_priv *priv,
+ struct ipuv3_channel *channel,
+ dma_addr_t phys0, dma_addr_t phys1)
+{
+ struct imx_media_video_dev *vdev = priv->vdev;
+ unsigned int burst_size;
+ struct ipu_image image;
+ int ret;
+
+ ipu_cpmem_zero(channel);
+
+ memset(&image, 0, sizeof(image));
+ image.pix = vdev->fmt;
+ image.rect = vdev->compose;
+ /* one field to VDIC channels */
+ image.pix.height /= 2;
+ image.rect.height /= 2;
+ image.phys0 = phys0;
+ image.phys1 = phys1;
+
+ ret = ipu_cpmem_set_image(channel, &image);
+ if (ret)
+ return ret;
+
+ burst_size = (image.pix.width & 0xf) ? 8 : 16;
+ ipu_cpmem_set_burstsize(channel, burst_size);
+
+ ipu_cpmem_set_axi_id(channel, 1);
+
+ ipu_idmac_set_double_buffer(channel, false);
+
+ return 0;
+}
+
+static int vdic_setup_direct(struct vdic_priv *priv)
+{
+ /* set VDIC to receive from CSI for direct path */
+ ipu_fsu_link(priv->ipu, IPUV3_CHANNEL_CSI_DIRECT,
+ IPUV3_CHANNEL_CSI_VDI_PREV);
+
+ return 0;
+}
+
+static void vdic_start_direct(struct vdic_priv *priv)
+{
+}
+
+static void vdic_stop_direct(struct vdic_priv *priv)
+{
+}
+
+static void vdic_disable_direct(struct vdic_priv *priv)
+{
+ ipu_fsu_unlink(priv->ipu, IPUV3_CHANNEL_CSI_DIRECT,
+ IPUV3_CHANNEL_CSI_VDI_PREV);
+}
+
+static int vdic_setup_indirect(struct vdic_priv *priv)
+{
+ struct v4l2_mbus_framefmt *infmt;
+ const struct imx_media_pixfmt *incc;
+ int in_size, ret;
+
+ infmt = &priv->format_mbus[VDIC_SINK_PAD_IDMAC];
+ incc = priv->cc[VDIC_SINK_PAD_IDMAC];
+
+ in_size = (infmt->width * incc->bpp * infmt->height) >> 3;
+
+ /* 1/2 full image size */
+ priv->field_size = in_size / 2;
+ priv->in_stride = incc->planar ?
+ infmt->width : (infmt->width * incc->bpp) >> 3;
+
+ priv->prev_in_buf = NULL;
+ priv->curr_in_buf = NULL;
+
+ priv->fieldtype = infmt->field;
+
+ /* init the vdi-in channels */
+ ret = setup_vdi_channel(priv, priv->vdi_in_ch_p, 0, 0);
+ if (ret)
+ return ret;
+ ret = setup_vdi_channel(priv, priv->vdi_in_ch, 0, 0);
+ if (ret)
+ return ret;
+ return setup_vdi_channel(priv, priv->vdi_in_ch_n, 0, 0);
+}
+
+static void vdic_start_indirect(struct vdic_priv *priv)
+{
+ /* enable the channels */
+ ipu_idmac_enable_channel(priv->vdi_in_ch_p);
+ ipu_idmac_enable_channel(priv->vdi_in_ch);
+ ipu_idmac_enable_channel(priv->vdi_in_ch_n);
+}
+
+static void vdic_stop_indirect(struct vdic_priv *priv)
+{
+ /* disable channels */
+ ipu_idmac_disable_channel(priv->vdi_in_ch_p);
+ ipu_idmac_disable_channel(priv->vdi_in_ch);
+ ipu_idmac_disable_channel(priv->vdi_in_ch_n);
+}
+
+static void vdic_disable_indirect(struct vdic_priv *priv)
+{
+}
+
+static struct vdic_pipeline_ops direct_ops = {
+ .setup = vdic_setup_direct,
+ .start = vdic_start_direct,
+ .stop = vdic_stop_direct,
+ .disable = vdic_disable_direct,
+};
+
+static struct vdic_pipeline_ops indirect_ops = {
+ .setup = vdic_setup_indirect,
+ .start = vdic_start_indirect,
+ .stop = vdic_stop_indirect,
+ .disable = vdic_disable_indirect,
+};
+
+static int vdic_start(struct vdic_priv *priv)
+{
+ struct v4l2_mbus_framefmt *infmt;
+ int ret;
+
+ infmt = &priv->format_mbus[priv->active_input_pad];
+
+ priv->ops = priv->csi_direct ? &direct_ops : &indirect_ops;
+
+ ret = vdic_get_ipu_resources(priv);
+ if (ret)
+ return ret;
+
+ /*
+ * init the VDIC.
+ *
+ * note we don't give infmt->code to ipu_vdi_setup(). The VDIC
+ * only supports 4:2:2 or 4:2:0, and this subdev will only
+ * negotiate 4:2:2 at its sink pads.
+ */
+ ipu_vdi_setup(priv->vdi, MEDIA_BUS_FMT_UYVY8_2X8,
+ infmt->width, infmt->height);
+ ipu_vdi_set_field_order(priv->vdi, V4L2_STD_UNKNOWN, infmt->field);
+ ipu_vdi_set_motion(priv->vdi, priv->motion);
+
+ ret = priv->ops->setup(priv);
+ if (ret)
+ goto out_put_ipu;
+
+ ipu_vdi_enable(priv->vdi);
+
+ priv->ops->start(priv);
+
+ return 0;
+
+out_put_ipu:
+ vdic_put_ipu_resources(priv);
+ return ret;
+}
+
+static void vdic_stop(struct vdic_priv *priv)
+{
+ priv->ops->stop(priv);
+ ipu_vdi_disable(priv->vdi);
+ priv->ops->disable(priv);
+
+ vdic_put_ipu_resources(priv);
+}
+
+/*
+ * V4L2 subdev operations.
+ */
+
+static int vdic_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct vdic_priv *priv = container_of(ctrl->handler,
+ struct vdic_priv, ctrl_hdlr);
+ enum ipu_motion_sel motion;
+ int ret = 0;
+
+ mutex_lock(&priv->lock);
+
+ switch (ctrl->id) {
+ case V4L2_CID_DEINTERLACING_MODE:
+ motion = ctrl->val;
+ if (motion != priv->motion) {
+ /* can't change motion control mid-streaming */
+ if (priv->stream_count > 0) {
+ ret = -EBUSY;
+ goto out;
+ }
+ priv->motion = motion;
+ }
+ break;
+ default:
+ v4l2_err(&priv->sd, "Invalid control\n");
+ ret = -EINVAL;
+ }
+
+out:
+ mutex_unlock(&priv->lock);
+ return ret;
+}
+
+static const struct v4l2_ctrl_ops vdic_ctrl_ops = {
+ .s_ctrl = vdic_s_ctrl,
+};
+
+static const char * const vdic_ctrl_motion_menu[] = {
+ "No Motion Compensation",
+ "Low Motion",
+ "Medium Motion",
+ "High Motion",
+};
+
+static int vdic_init_controls(struct vdic_priv *priv)
+{
+ struct v4l2_ctrl_handler *hdlr = &priv->ctrl_hdlr;
+ int ret;
+
+ v4l2_ctrl_handler_init(hdlr, 1);
+
+ v4l2_ctrl_new_std_menu_items(hdlr, &vdic_ctrl_ops,
+ V4L2_CID_DEINTERLACING_MODE,
+ HIGH_MOTION, 0, HIGH_MOTION,
+ vdic_ctrl_motion_menu);
+
+ priv->sd.ctrl_handler = hdlr;
+
+ if (hdlr->error) {
+ ret = hdlr->error;
+ goto out_free;
+ }
+
+ v4l2_ctrl_handler_setup(hdlr);
+ return 0;
+
+out_free:
+ v4l2_ctrl_handler_free(hdlr);
+ return ret;
+}
+
+static int vdic_s_stream(struct v4l2_subdev *sd, int enable)
+{
+ struct vdic_priv *priv = v4l2_get_subdevdata(sd);
+ struct v4l2_subdev *src_sd = NULL;
+ int ret = 0;
+
+ mutex_lock(&priv->lock);
+
+ if (!priv->src || !priv->sink_sd) {
+ ret = -EPIPE;
+ goto out;
+ }
+
+ if (priv->csi_direct)
+ src_sd = media_entity_to_v4l2_subdev(priv->src);
+
+ /*
+ * enable/disable streaming only if stream_count is
+ * going from 0 to 1 / 1 to 0.
+ */
+ if (priv->stream_count != !enable)
+ goto update_count;
+
+ dev_dbg(priv->ipu_dev, "%s: stream %s\n", sd->name,
+ enable ? "ON" : "OFF");
+
+ if (enable)
+ ret = vdic_start(priv);
+ else
+ vdic_stop(priv);
+ if (ret)
+ goto out;
+
+ if (src_sd) {
+ /* start/stop upstream */
+ ret = v4l2_subdev_call(src_sd, video, s_stream, enable);
+ ret = (ret && ret != -ENOIOCTLCMD) ? ret : 0;
+ if (ret) {
+ if (enable)
+ vdic_stop(priv);
+ goto out;
+ }
+ }
+
+update_count:
+ priv->stream_count += enable ? 1 : -1;
+ if (priv->stream_count < 0)
+ priv->stream_count = 0;
+out:
+ mutex_unlock(&priv->lock);
+ return ret;
+}
+
+static struct v4l2_mbus_framefmt *
+__vdic_get_fmt(struct vdic_priv *priv, struct v4l2_subdev_state *sd_state,
+ unsigned int pad, enum v4l2_subdev_format_whence which)
+{
+ if (which == V4L2_SUBDEV_FORMAT_TRY)
+ return v4l2_subdev_state_get_format(sd_state, pad);
+ else
+ return &priv->format_mbus[pad];
+}
+
+static int vdic_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ if (code->pad >= VDIC_NUM_PADS)
+ return -EINVAL;
+
+ return imx_media_enum_ipu_formats(&code->code, code->index,
+ PIXFMT_SEL_YUV);
+}
+
+static int vdic_get_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_format *sdformat)
+{
+ struct vdic_priv *priv = v4l2_get_subdevdata(sd);
+ struct v4l2_mbus_framefmt *fmt;
+ int ret = 0;
+
+ if (sdformat->pad >= VDIC_NUM_PADS)
+ return -EINVAL;
+
+ mutex_lock(&priv->lock);
+
+ fmt = __vdic_get_fmt(priv, sd_state, sdformat->pad, sdformat->which);
+ if (!fmt) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ sdformat->format = *fmt;
+out:
+ mutex_unlock(&priv->lock);
+ return ret;
+}
+
+static void vdic_try_fmt(struct vdic_priv *priv,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_format *sdformat,
+ const struct imx_media_pixfmt **cc)
+{
+ struct v4l2_mbus_framefmt *infmt;
+
+ *cc = imx_media_find_ipu_format(sdformat->format.code,
+ PIXFMT_SEL_YUV);
+ if (!*cc) {
+ u32 code;
+
+ imx_media_enum_ipu_formats(&code, 0, PIXFMT_SEL_YUV);
+ *cc = imx_media_find_ipu_format(code, PIXFMT_SEL_YUV);
+ sdformat->format.code = (*cc)->codes[0];
+ }
+
+ infmt = __vdic_get_fmt(priv, sd_state, priv->active_input_pad,
+ sdformat->which);
+
+ switch (sdformat->pad) {
+ case VDIC_SRC_PAD_DIRECT:
+ sdformat->format = *infmt;
+ /* output is always progressive! */
+ sdformat->format.field = V4L2_FIELD_NONE;
+ break;
+ case VDIC_SINK_PAD_DIRECT:
+ case VDIC_SINK_PAD_IDMAC:
+ v4l_bound_align_image(&sdformat->format.width,
+ MIN_W, MAX_W_VDIC, W_ALIGN,
+ &sdformat->format.height,
+ MIN_H, MAX_H_VDIC, H_ALIGN, S_ALIGN);
+
+ /* input must be interlaced! Choose SEQ_TB if not */
+ if (!V4L2_FIELD_HAS_BOTH(sdformat->format.field))
+ sdformat->format.field = V4L2_FIELD_SEQ_TB;
+ break;
+ }
+
+ imx_media_try_colorimetry(&sdformat->format, true);
+}
+
+static int vdic_set_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_format *sdformat)
+{
+ struct vdic_priv *priv = v4l2_get_subdevdata(sd);
+ const struct imx_media_pixfmt *cc;
+ struct v4l2_mbus_framefmt *fmt;
+ int ret = 0;
+
+ if (sdformat->pad >= VDIC_NUM_PADS)
+ return -EINVAL;
+
+ mutex_lock(&priv->lock);
+
+ if (priv->stream_count > 0) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ vdic_try_fmt(priv, sd_state, sdformat, &cc);
+
+ fmt = __vdic_get_fmt(priv, sd_state, sdformat->pad, sdformat->which);
+ *fmt = sdformat->format;
+
+ /* propagate format to source pad */
+ if (sdformat->pad == VDIC_SINK_PAD_DIRECT ||
+ sdformat->pad == VDIC_SINK_PAD_IDMAC) {
+ const struct imx_media_pixfmt *outcc;
+ struct v4l2_mbus_framefmt *outfmt;
+ struct v4l2_subdev_format format;
+
+ format.pad = VDIC_SRC_PAD_DIRECT;
+ format.which = sdformat->which;
+ format.format = sdformat->format;
+ vdic_try_fmt(priv, sd_state, &format, &outcc);
+
+ outfmt = __vdic_get_fmt(priv, sd_state, VDIC_SRC_PAD_DIRECT,
+ sdformat->which);
+ *outfmt = format.format;
+ if (sdformat->which == V4L2_SUBDEV_FORMAT_ACTIVE)
+ priv->cc[VDIC_SRC_PAD_DIRECT] = outcc;
+ }
+
+ if (sdformat->which == V4L2_SUBDEV_FORMAT_ACTIVE)
+ priv->cc[sdformat->pad] = cc;
+out:
+ mutex_unlock(&priv->lock);
+ return ret;
+}
+
+static int vdic_link_setup(struct media_entity *entity,
+ const struct media_pad *local,
+ const struct media_pad *remote, u32 flags)
+{
+ struct v4l2_subdev *sd = media_entity_to_v4l2_subdev(entity);
+ struct vdic_priv *priv = v4l2_get_subdevdata(sd);
+ struct v4l2_subdev *remote_sd;
+ int ret = 0;
+
+ dev_dbg(priv->ipu_dev, "%s: link setup %s -> %s",
+ sd->name, remote->entity->name, local->entity->name);
+
+ mutex_lock(&priv->lock);
+
+ if (local->flags & MEDIA_PAD_FL_SOURCE) {
+ if (!is_media_entity_v4l2_subdev(remote->entity)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ remote_sd = media_entity_to_v4l2_subdev(remote->entity);
+
+ if (flags & MEDIA_LNK_FL_ENABLED) {
+ if (priv->sink_sd) {
+ ret = -EBUSY;
+ goto out;
+ }
+ priv->sink_sd = remote_sd;
+ } else {
+ priv->sink_sd = NULL;
+ }
+
+ goto out;
+ }
+
+ /* this is a sink pad */
+
+ if (flags & MEDIA_LNK_FL_ENABLED) {
+ if (priv->src) {
+ ret = -EBUSY;
+ goto out;
+ }
+ } else {
+ priv->src = NULL;
+ goto out;
+ }
+
+ if (local->index == VDIC_SINK_PAD_IDMAC) {
+ struct imx_media_video_dev *vdev = priv->vdev;
+
+ if (!is_media_entity_v4l2_video_device(remote->entity)) {
+ ret = -EINVAL;
+ goto out;
+ }
+ if (!vdev) {
+ ret = -ENODEV;
+ goto out;
+ }
+
+ priv->csi_direct = false;
+ } else {
+ if (!is_media_entity_v4l2_subdev(remote->entity)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ remote_sd = media_entity_to_v4l2_subdev(remote->entity);
+
+ /* direct pad must connect to a CSI */
+ if (!(remote_sd->grp_id & IMX_MEDIA_GRP_ID_IPU_CSI) ||
+ remote->index != CSI_SRC_PAD_DIRECT) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ priv->csi_direct = true;
+ }
+
+ priv->src = remote->entity;
+ /* record which input pad is now active */
+ priv->active_input_pad = local->index;
+out:
+ mutex_unlock(&priv->lock);
+ return ret;
+}
+
+static int vdic_link_validate(struct v4l2_subdev *sd,
+ struct media_link *link,
+ struct v4l2_subdev_format *source_fmt,
+ struct v4l2_subdev_format *sink_fmt)
+{
+ struct vdic_priv *priv = v4l2_get_subdevdata(sd);
+ int ret;
+
+ ret = v4l2_subdev_link_validate_default(sd, link,
+ source_fmt, sink_fmt);
+ if (ret)
+ return ret;
+
+ mutex_lock(&priv->lock);
+
+ if (priv->csi_direct && priv->motion != HIGH_MOTION) {
+ v4l2_err(&priv->sd,
+ "direct CSI pipeline requires high motion\n");
+ ret = -EINVAL;
+ }
+
+ mutex_unlock(&priv->lock);
+ return ret;
+}
+
+static int vdic_get_frame_interval(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_frame_interval *fi)
+{
+ struct vdic_priv *priv = v4l2_get_subdevdata(sd);
+
+ /*
+ * FIXME: Implement support for V4L2_SUBDEV_FORMAT_TRY, using the V4L2
+ * subdev active state API.
+ */
+ if (fi->which != V4L2_SUBDEV_FORMAT_ACTIVE)
+ return -EINVAL;
+
+ if (fi->pad >= VDIC_NUM_PADS)
+ return -EINVAL;
+
+ mutex_lock(&priv->lock);
+
+ fi->interval = priv->frame_interval[fi->pad];
+
+ mutex_unlock(&priv->lock);
+
+ return 0;
+}
+
+static int vdic_set_frame_interval(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_frame_interval *fi)
+{
+ struct vdic_priv *priv = v4l2_get_subdevdata(sd);
+ struct v4l2_fract *input_fi, *output_fi;
+ int ret = 0;
+
+ /*
+ * FIXME: Implement support for V4L2_SUBDEV_FORMAT_TRY, using the V4L2
+ * subdev active state API.
+ */
+ if (fi->which != V4L2_SUBDEV_FORMAT_ACTIVE)
+ return -EINVAL;
+
+ mutex_lock(&priv->lock);
+
+ input_fi = &priv->frame_interval[priv->active_input_pad];
+ output_fi = &priv->frame_interval[VDIC_SRC_PAD_DIRECT];
+
+ switch (fi->pad) {
+ case VDIC_SINK_PAD_DIRECT:
+ case VDIC_SINK_PAD_IDMAC:
+ /* No limits on valid input frame intervals */
+ if (fi->interval.numerator == 0 ||
+ fi->interval.denominator == 0)
+ fi->interval = priv->frame_interval[fi->pad];
+ /* Reset output interval */
+ *output_fi = fi->interval;
+ if (priv->csi_direct)
+ output_fi->denominator *= 2;
+ break;
+ case VDIC_SRC_PAD_DIRECT:
+ /*
+ * frame rate at output pad is double input
+ * rate when using direct CSI->VDIC pipeline.
+ *
+ * TODO: implement VDIC frame skipping
+ */
+ fi->interval = *input_fi;
+ if (priv->csi_direct)
+ fi->interval.denominator *= 2;
+ break;
+ default:
+ ret = -EINVAL;
+ goto out;
+ }
+
+ priv->frame_interval[fi->pad] = fi->interval;
+out:
+ mutex_unlock(&priv->lock);
+ return ret;
+}
+
+static int vdic_registered(struct v4l2_subdev *sd)
+{
+ struct vdic_priv *priv = v4l2_get_subdevdata(sd);
+ int i, ret;
+ u32 code;
+
+ for (i = 0; i < VDIC_NUM_PADS; i++) {
+ code = 0;
+ if (i != VDIC_SINK_PAD_IDMAC)
+ imx_media_enum_ipu_formats(&code, 0, PIXFMT_SEL_YUV);
+
+ /* set a default mbus format */
+ ret = imx_media_init_mbus_fmt(&priv->format_mbus[i],
+ IMX_MEDIA_DEF_PIX_WIDTH,
+ IMX_MEDIA_DEF_PIX_HEIGHT, code,
+ V4L2_FIELD_NONE, &priv->cc[i]);
+ if (ret)
+ return ret;
+
+ /* init default frame interval */
+ priv->frame_interval[i].numerator = 1;
+ priv->frame_interval[i].denominator = 30;
+ if (i == VDIC_SRC_PAD_DIRECT)
+ priv->frame_interval[i].denominator *= 2;
+ }
+
+ priv->active_input_pad = VDIC_SINK_PAD_DIRECT;
+
+ return vdic_init_controls(priv);
+}
+
+static void vdic_unregistered(struct v4l2_subdev *sd)
+{
+ struct vdic_priv *priv = v4l2_get_subdevdata(sd);
+
+ v4l2_ctrl_handler_free(&priv->ctrl_hdlr);
+}
+
+static const struct v4l2_subdev_pad_ops vdic_pad_ops = {
+ .enum_mbus_code = vdic_enum_mbus_code,
+ .get_fmt = vdic_get_fmt,
+ .set_fmt = vdic_set_fmt,
+ .get_frame_interval = vdic_get_frame_interval,
+ .set_frame_interval = vdic_set_frame_interval,
+ .link_validate = vdic_link_validate,
+};
+
+static const struct v4l2_subdev_video_ops vdic_video_ops = {
+ .s_stream = vdic_s_stream,
+};
+
+static const struct media_entity_operations vdic_entity_ops = {
+ .link_setup = vdic_link_setup,
+ .link_validate = v4l2_subdev_link_validate,
+};
+
+static const struct v4l2_subdev_ops vdic_subdev_ops = {
+ .video = &vdic_video_ops,
+ .pad = &vdic_pad_ops,
+};
+
+static const struct v4l2_subdev_internal_ops vdic_internal_ops = {
+ .init_state = imx_media_init_state,
+ .registered = vdic_registered,
+ .unregistered = vdic_unregistered,
+};
+
+struct v4l2_subdev *imx_media_vdic_register(struct v4l2_device *v4l2_dev,
+ struct device *ipu_dev,
+ struct ipu_soc *ipu,
+ u32 grp_id)
+{
+ struct vdic_priv *priv;
+ int i, ret;
+
+ priv = devm_kzalloc(ipu_dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return ERR_PTR(-ENOMEM);
+
+ priv->ipu_dev = ipu_dev;
+ priv->ipu = ipu;
+
+ v4l2_subdev_init(&priv->sd, &vdic_subdev_ops);
+ v4l2_set_subdevdata(&priv->sd, priv);
+ priv->sd.internal_ops = &vdic_internal_ops;
+ priv->sd.entity.ops = &vdic_entity_ops;
+ priv->sd.entity.function = MEDIA_ENT_F_PROC_VIDEO_PIXEL_FORMATTER;
+ priv->sd.owner = ipu_dev->driver->owner;
+ priv->sd.flags = V4L2_SUBDEV_FL_HAS_DEVNODE;
+ priv->sd.grp_id = grp_id;
+ imx_media_grp_id_to_sd_name(priv->sd.name, sizeof(priv->sd.name),
+ priv->sd.grp_id, ipu_get_num(ipu));
+
+ mutex_init(&priv->lock);
+
+ for (i = 0; i < VDIC_NUM_PADS; i++)
+ priv->pad[i].flags = (i == VDIC_SRC_PAD_DIRECT) ?
+ MEDIA_PAD_FL_SOURCE : MEDIA_PAD_FL_SINK;
+
+ ret = media_entity_pads_init(&priv->sd.entity, VDIC_NUM_PADS,
+ priv->pad);
+ if (ret)
+ goto free;
+
+ ret = v4l2_device_register_subdev(v4l2_dev, &priv->sd);
+ if (ret)
+ goto free;
+
+ return &priv->sd;
+free:
+ mutex_destroy(&priv->lock);
+ return ERR_PTR(ret);
+}
+
+int imx_media_vdic_unregister(struct v4l2_subdev *sd)
+{
+ struct vdic_priv *priv = v4l2_get_subdevdata(sd);
+
+ v4l2_info(sd, "Removing\n");
+
+ v4l2_device_unregister_subdev(sd);
+ mutex_destroy(&priv->lock);
+ media_entity_cleanup(&sd->entity);
+
+ return 0;
+}
diff --git a/drivers/staging/media/imx/imx-media.h b/drivers/staging/media/imx/imx-media.h
new file mode 100644
index 000000000000..f095d9134fee
--- /dev/null
+++ b/drivers/staging/media/imx/imx-media.h
@@ -0,0 +1,299 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * V4L2 Media Controller Driver for Freescale i.MX5/6 SOC
+ *
+ * Copyright (c) 2016 Mentor Graphics Inc.
+ */
+#ifndef _IMX_MEDIA_H
+#define _IMX_MEDIA_H
+
+#include <linux/platform_device.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-fwnode.h>
+#include <media/v4l2-subdev.h>
+#include <media/videobuf2-dma-contig.h>
+#include <video/imx-ipu-v3.h>
+
+#define IMX_MEDIA_DEF_PIX_WIDTH 640
+#define IMX_MEDIA_DEF_PIX_HEIGHT 480
+
+/*
+ * Enumeration of the IPU internal sub-devices
+ */
+enum {
+ IPU_CSI0 = 0,
+ IPU_CSI1,
+ IPU_VDIC,
+ IPU_IC_PRP,
+ IPU_IC_PRPENC,
+ IPU_IC_PRPVF,
+ NUM_IPU_SUBDEVS,
+};
+
+/*
+ * Pad definitions for the subdevs with multiple source or
+ * sink pads
+ */
+
+/* ipu_csi */
+enum {
+ CSI_SINK_PAD = 0,
+ CSI_SRC_PAD_DIRECT,
+ CSI_SRC_PAD_IDMAC,
+ CSI_NUM_PADS,
+};
+
+/* ipu_vdic */
+enum {
+ VDIC_SINK_PAD_DIRECT = 0,
+ VDIC_SINK_PAD_IDMAC,
+ VDIC_SRC_PAD_DIRECT,
+ VDIC_NUM_PADS,
+};
+
+/* ipu_ic_prp */
+enum {
+ PRP_SINK_PAD = 0,
+ PRP_SRC_PAD_PRPENC,
+ PRP_SRC_PAD_PRPVF,
+ PRP_NUM_PADS,
+};
+
+/* ipu_ic_prpencvf */
+enum {
+ PRPENCVF_SINK_PAD = 0,
+ PRPENCVF_SRC_PAD,
+ PRPENCVF_NUM_PADS,
+};
+
+/* How long to wait for EOF interrupts in the buffer-capture subdevs */
+#define IMX_MEDIA_EOF_TIMEOUT 2000
+
+struct imx_media_pixfmt {
+ /* the in-memory FourCC pixel format */
+ u32 fourcc;
+ /*
+ * the set of equivalent media bus codes for the fourcc.
+ * NOTE! codes pointer is NULL for in-memory-only formats.
+ */
+ const u32 *codes;
+ int bpp; /* total bpp */
+ /* cycles per pixel for generic (bayer) formats for the parallel bus */
+ int cycles;
+ enum ipu_color_space cs;
+ bool planar; /* is a planar format */
+ bool bayer; /* is a raw bayer format */
+ bool ipufmt; /* is one of the IPU internal formats */
+};
+
+enum imx_pixfmt_sel {
+ PIXFMT_SEL_YUV = BIT(0), /* select YUV formats */
+ PIXFMT_SEL_RGB = BIT(1), /* select RGB formats */
+ PIXFMT_SEL_BAYER = BIT(2), /* select BAYER formats */
+ PIXFMT_SEL_IPU = BIT(3), /* select IPU-internal formats */
+ PIXFMT_SEL_YUV_RGB = PIXFMT_SEL_YUV | PIXFMT_SEL_RGB,
+ PIXFMT_SEL_ANY = PIXFMT_SEL_YUV | PIXFMT_SEL_RGB | PIXFMT_SEL_BAYER,
+};
+
+struct imx_media_buffer {
+ struct vb2_v4l2_buffer vbuf; /* v4l buffer must be first */
+ struct list_head list;
+};
+
+struct imx_media_video_dev {
+ struct video_device *vfd;
+
+ /* the user format */
+ struct v4l2_pix_format fmt;
+ /* the compose rectangle */
+ struct v4l2_rect compose;
+ const struct imx_media_pixfmt *cc;
+
+ /* links this vdev to master list */
+ struct list_head list;
+};
+
+static inline struct imx_media_buffer *to_imx_media_vb(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+
+ return container_of(vbuf, struct imx_media_buffer, vbuf);
+}
+
+/*
+ * to support control inheritance to video devices, this
+ * retrieves a pad's list_head of video devices that can
+ * be reached from the pad. Note that only the lists in
+ * source pads get populated, sink pads have empty lists.
+ */
+static inline struct list_head *
+to_pad_vdev_list(struct v4l2_subdev *sd, int pad_index)
+{
+ struct list_head *vdev_list = sd->host_priv;
+
+ return vdev_list ? &vdev_list[pad_index] : NULL;
+}
+
+/* an entry in a pad's video device list */
+struct imx_media_pad_vdev {
+ struct imx_media_video_dev *vdev;
+ struct list_head list;
+};
+
+struct imx_media_dev {
+ struct media_device md;
+ struct v4l2_device v4l2_dev;
+
+ /* the pipeline object */
+ struct media_pipeline pipe;
+
+ struct mutex mutex; /* protect elements below */
+
+ /* master video device list */
+ struct list_head vdev_list;
+
+ /* IPUs this media driver control, valid after subdevs bound */
+ struct ipu_soc *ipu[2];
+
+ /* for async subdev registration */
+ struct v4l2_async_notifier notifier;
+
+ /* IC scaler/CSC mem2mem video device */
+ struct imx_media_video_dev *m2m_vdev;
+
+ /* the IPU internal subdev's registered synchronously */
+ struct v4l2_subdev *sync_sd[2][NUM_IPU_SUBDEVS];
+};
+
+/* imx-media-utils.c */
+const struct imx_media_pixfmt *
+imx_media_find_pixel_format(u32 fourcc, enum imx_pixfmt_sel sel);
+int imx_media_enum_pixel_formats(u32 *fourcc, u32 index,
+ enum imx_pixfmt_sel sel, u32 code);
+const struct imx_media_pixfmt *
+imx_media_find_mbus_format(u32 code, enum imx_pixfmt_sel sel);
+int imx_media_enum_mbus_formats(u32 *code, u32 index,
+ enum imx_pixfmt_sel sel);
+
+static inline const struct imx_media_pixfmt *
+imx_media_find_ipu_format(u32 code, enum imx_pixfmt_sel fmt_sel)
+{
+ return imx_media_find_mbus_format(code, fmt_sel | PIXFMT_SEL_IPU);
+}
+
+static inline int imx_media_enum_ipu_formats(u32 *code, u32 index,
+ enum imx_pixfmt_sel fmt_sel)
+{
+ return imx_media_enum_mbus_formats(code, index,
+ fmt_sel | PIXFMT_SEL_IPU);
+}
+
+int imx_media_init_mbus_fmt(struct v4l2_mbus_framefmt *mbus,
+ u32 width, u32 height, u32 code, u32 field,
+ const struct imx_media_pixfmt **cc);
+int imx_media_init_state(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state);
+void imx_media_try_colorimetry(struct v4l2_mbus_framefmt *tryfmt,
+ bool ic_route);
+int imx_media_mbus_fmt_to_pix_fmt(struct v4l2_pix_format *pix,
+ const struct v4l2_mbus_framefmt *mbus,
+ const struct imx_media_pixfmt *cc);
+void imx_media_grp_id_to_sd_name(char *sd_name, int sz,
+ u32 grp_id, int ipu_id);
+void imx_media_add_video_device(struct imx_media_dev *imxmd,
+ struct imx_media_video_dev *vdev);
+struct media_pad *
+imx_media_pipeline_pad(struct media_entity *start_entity, u32 grp_id,
+ enum v4l2_buf_type buftype, bool upstream);
+struct v4l2_subdev *
+imx_media_pipeline_subdev(struct media_entity *start_entity, u32 grp_id,
+ bool upstream);
+
+struct imx_media_dma_buf {
+ void *virt;
+ dma_addr_t phys;
+ unsigned long len;
+};
+
+void imx_media_free_dma_buf(struct device *dev,
+ struct imx_media_dma_buf *buf);
+int imx_media_alloc_dma_buf(struct device *dev,
+ struct imx_media_dma_buf *buf,
+ int size);
+
+int imx_media_pipeline_set_stream(struct imx_media_dev *imxmd,
+ struct media_entity *entity,
+ bool on);
+
+/* imx-media-dev-common.c */
+int imx_media_probe_complete(struct v4l2_async_notifier *notifier);
+struct imx_media_dev *imx_media_dev_init(struct device *dev,
+ const struct media_device_ops *ops);
+int imx_media_dev_notifier_register(struct imx_media_dev *imxmd,
+ const struct v4l2_async_notifier_operations *ops);
+
+/* imx-media-fim.c */
+struct imx_media_fim;
+void imx_media_fim_eof_monitor(struct imx_media_fim *fim, ktime_t timestamp);
+void imx_media_fim_set_stream(struct imx_media_fim *fim,
+ const struct v4l2_fract *frame_interval,
+ bool on);
+int imx_media_fim_add_controls(struct imx_media_fim *fim);
+struct imx_media_fim *imx_media_fim_init(struct v4l2_subdev *sd);
+void imx_media_fim_free(struct imx_media_fim *fim);
+
+/* imx-media-internal-sd.c */
+int imx_media_register_ipu_internal_subdevs(struct imx_media_dev *imxmd,
+ struct v4l2_subdev *csi);
+void imx_media_unregister_ipu_internal_subdevs(struct imx_media_dev *imxmd);
+
+/* imx-media-of.c */
+int imx_media_add_of_subdevs(struct imx_media_dev *dev,
+ struct device_node *np);
+
+/* imx-media-vdic.c */
+struct v4l2_subdev *imx_media_vdic_register(struct v4l2_device *v4l2_dev,
+ struct device *ipu_dev,
+ struct ipu_soc *ipu,
+ u32 grp_id);
+int imx_media_vdic_unregister(struct v4l2_subdev *sd);
+
+/* imx-ic-common.c */
+struct v4l2_subdev *imx_media_ic_register(struct v4l2_device *v4l2_dev,
+ struct device *ipu_dev,
+ struct ipu_soc *ipu,
+ u32 grp_id);
+int imx_media_ic_unregister(struct v4l2_subdev *sd);
+
+/* imx-media-capture.c */
+struct imx_media_video_dev *
+imx_media_capture_device_init(struct device *dev, struct v4l2_subdev *src_sd,
+ int pad, bool legacy_api);
+void imx_media_capture_device_remove(struct imx_media_video_dev *vdev);
+int imx_media_capture_device_register(struct imx_media_video_dev *vdev,
+ u32 link_flags);
+void imx_media_capture_device_unregister(struct imx_media_video_dev *vdev);
+struct imx_media_buffer *
+imx_media_capture_device_next_buf(struct imx_media_video_dev *vdev);
+void imx_media_capture_device_error(struct imx_media_video_dev *vdev);
+
+/* imx-media-csc-scaler.c */
+struct imx_media_video_dev *
+imx_media_csc_scaler_device_init(struct imx_media_dev *dev);
+int imx_media_csc_scaler_device_register(struct imx_media_video_dev *vdev);
+void imx_media_csc_scaler_device_unregister(struct imx_media_video_dev *vdev);
+
+/* subdev group ids */
+#define IMX_MEDIA_GRP_ID_CSI2 BIT(8)
+#define IMX_MEDIA_GRP_ID_IPU_CSI_BIT 10
+#define IMX_MEDIA_GRP_ID_IPU_CSI (0x3 << IMX_MEDIA_GRP_ID_IPU_CSI_BIT)
+#define IMX_MEDIA_GRP_ID_IPU_CSI0 BIT(IMX_MEDIA_GRP_ID_IPU_CSI_BIT)
+#define IMX_MEDIA_GRP_ID_IPU_CSI1 (2 << IMX_MEDIA_GRP_ID_IPU_CSI_BIT)
+#define IMX_MEDIA_GRP_ID_IPU_VDIC BIT(12)
+#define IMX_MEDIA_GRP_ID_IPU_IC_PRP BIT(13)
+#define IMX_MEDIA_GRP_ID_IPU_IC_PRPENC BIT(14)
+#define IMX_MEDIA_GRP_ID_IPU_IC_PRPVF BIT(15)
+#define IMX_MEDIA_GRP_ID_CSI_MUX BIT(16)
+
+#endif
diff --git a/drivers/staging/media/imx/imx6-mipi-csi2.c b/drivers/staging/media/imx/imx6-mipi-csi2.c
new file mode 100644
index 000000000000..dd8c7b3233bc
--- /dev/null
+++ b/drivers/staging/media/imx/imx6-mipi-csi2.c
@@ -0,0 +1,846 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * MIPI CSI-2 Receiver Subdev for Freescale i.MX6 SOC.
+ *
+ * Copyright (c) 2012-2017 Mentor Graphics Inc.
+ */
+#include <linux/clk.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/irq.h>
+#include <linux/module.h>
+#include <linux/of_graph.h>
+#include <linux/platform_device.h>
+#include <media/v4l2-common.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-fwnode.h>
+#include <media/v4l2-mc.h>
+#include <media/v4l2-subdev.h>
+#include "imx-media.h"
+
+/*
+ * there must be 5 pads: 1 input pad from sensor, and
+ * the 4 virtual channel output pads
+ */
+#define CSI2_SINK_PAD 0
+#define CSI2_NUM_SINK_PADS 1
+#define CSI2_NUM_SRC_PADS 4
+#define CSI2_NUM_PADS 5
+
+/*
+ * The default maximum bit-rate per lane in Mbps, if the
+ * source subdev does not provide V4L2_CID_LINK_FREQ.
+ */
+#define CSI2_DEFAULT_MAX_MBPS 849
+
+struct csi2_dev {
+ struct device *dev;
+ struct v4l2_subdev sd;
+ struct v4l2_async_notifier notifier;
+ struct media_pad pad[CSI2_NUM_PADS];
+ struct clk *dphy_clk;
+ struct clk *pllref_clk;
+ struct clk *pix_clk; /* what is this? */
+ void __iomem *base;
+
+ struct v4l2_subdev *remote;
+ unsigned int remote_pad;
+ unsigned short data_lanes;
+
+ /* lock to protect all members below */
+ struct mutex lock;
+
+ struct v4l2_mbus_framefmt format_mbus;
+
+ int stream_count;
+ struct v4l2_subdev *src_sd;
+ bool sink_linked[CSI2_NUM_SRC_PADS];
+};
+
+#define DEVICE_NAME "imx6-mipi-csi2"
+
+/* Register offsets */
+#define CSI2_VERSION 0x000
+#define CSI2_N_LANES 0x004
+#define CSI2_PHY_SHUTDOWNZ 0x008
+#define CSI2_DPHY_RSTZ 0x00c
+#define CSI2_RESETN 0x010
+#define CSI2_PHY_STATE 0x014
+#define PHY_STOPSTATEDATA_BIT 4
+#define PHY_STOPSTATEDATA(n) BIT(PHY_STOPSTATEDATA_BIT + (n))
+#define PHY_RXCLKACTIVEHS BIT(8)
+#define PHY_RXULPSCLKNOT BIT(9)
+#define PHY_STOPSTATECLK BIT(10)
+#define CSI2_DATA_IDS_1 0x018
+#define CSI2_DATA_IDS_2 0x01c
+#define CSI2_ERR1 0x020
+#define CSI2_ERR2 0x024
+#define CSI2_MSK1 0x028
+#define CSI2_MSK2 0x02c
+#define CSI2_PHY_TST_CTRL0 0x030
+#define PHY_TESTCLR BIT(0)
+#define PHY_TESTCLK BIT(1)
+#define CSI2_PHY_TST_CTRL1 0x034
+#define PHY_TESTEN BIT(16)
+/*
+ * i.MX CSI2IPU Gasket registers follow. The CSI2IPU gasket is
+ * not part of the MIPI CSI-2 core, but its registers fall in the
+ * same register map range.
+ */
+#define CSI2IPU_GASKET 0xf00
+#define CSI2IPU_YUV422_YUYV BIT(2)
+
+static inline struct csi2_dev *sd_to_dev(struct v4l2_subdev *sdev)
+{
+ return container_of(sdev, struct csi2_dev, sd);
+}
+
+static inline struct csi2_dev *notifier_to_dev(struct v4l2_async_notifier *n)
+{
+ return container_of(n, struct csi2_dev, notifier);
+}
+
+/*
+ * The required sequence of MIPI CSI-2 startup as specified in the i.MX6
+ * reference manual is as follows:
+ *
+ * 1. Deassert presetn signal (global reset).
+ * It's not clear what this "global reset" signal is (maybe APB
+ * global reset), but in any case this step would be probably
+ * be carried out during driver load in csi2_probe().
+ *
+ * 2. Configure MIPI Camera Sensor to put all Tx lanes in LP-11 state.
+ * This must be carried out by the MIPI sensor's s_power(ON) subdev
+ * op.
+ *
+ * 3. D-PHY initialization.
+ * 4. CSI2 Controller programming (Set N_LANES, deassert PHY_SHUTDOWNZ,
+ * deassert PHY_RSTZ, deassert CSI2_RESETN).
+ * 5. Read the PHY status register (PHY_STATE) to confirm that all data and
+ * clock lanes of the D-PHY are in LP-11 state.
+ * 6. Configure the MIPI Camera Sensor to start transmitting a clock on the
+ * D-PHY clock lane.
+ * 7. CSI2 Controller programming - Read the PHY status register (PHY_STATE)
+ * to confirm that the D-PHY is receiving a clock on the D-PHY clock lane.
+ *
+ * All steps 3 through 7 are carried out by csi2_s_stream(ON) here. Step
+ * 6 is accomplished by calling the source subdev's s_stream(ON) between
+ * steps 5 and 7.
+ */
+
+static void csi2_enable(struct csi2_dev *csi2, bool enable)
+{
+ if (enable) {
+ writel(0x1, csi2->base + CSI2_PHY_SHUTDOWNZ);
+ writel(0x1, csi2->base + CSI2_DPHY_RSTZ);
+ writel(0x1, csi2->base + CSI2_RESETN);
+ } else {
+ writel(0x0, csi2->base + CSI2_PHY_SHUTDOWNZ);
+ writel(0x0, csi2->base + CSI2_DPHY_RSTZ);
+ writel(0x0, csi2->base + CSI2_RESETN);
+ }
+}
+
+static void csi2_set_lanes(struct csi2_dev *csi2, unsigned int lanes)
+{
+ writel(lanes - 1, csi2->base + CSI2_N_LANES);
+}
+
+static void dw_mipi_csi2_phy_write(struct csi2_dev *csi2,
+ u32 test_code, u32 test_data)
+{
+ /* Clear PHY test interface */
+ writel(PHY_TESTCLR, csi2->base + CSI2_PHY_TST_CTRL0);
+ writel(0x0, csi2->base + CSI2_PHY_TST_CTRL1);
+ writel(0x0, csi2->base + CSI2_PHY_TST_CTRL0);
+
+ /* Raise test interface strobe signal */
+ writel(PHY_TESTCLK, csi2->base + CSI2_PHY_TST_CTRL0);
+
+ /* Configure address write on falling edge and lower strobe signal */
+ writel(PHY_TESTEN | test_code, csi2->base + CSI2_PHY_TST_CTRL1);
+ writel(0x0, csi2->base + CSI2_PHY_TST_CTRL0);
+
+ /* Configure data write on rising edge and raise strobe signal */
+ writel(test_data, csi2->base + CSI2_PHY_TST_CTRL1);
+ writel(PHY_TESTCLK, csi2->base + CSI2_PHY_TST_CTRL0);
+
+ /* Clear strobe signal */
+ writel(0x0, csi2->base + CSI2_PHY_TST_CTRL0);
+}
+
+/*
+ * This table is based on the table documented at
+ * https://community.nxp.com/docs/DOC-94312. It assumes
+ * a 27MHz D-PHY pll reference clock.
+ */
+static const struct {
+ u32 max_mbps;
+ u32 hsfreqrange_sel;
+} hsfreq_map[] = {
+ { 90, 0x00}, {100, 0x20}, {110, 0x40}, {125, 0x02},
+ {140, 0x22}, {150, 0x42}, {160, 0x04}, {180, 0x24},
+ {200, 0x44}, {210, 0x06}, {240, 0x26}, {250, 0x46},
+ {270, 0x08}, {300, 0x28}, {330, 0x48}, {360, 0x2a},
+ {400, 0x4a}, {450, 0x0c}, {500, 0x2c}, {550, 0x0e},
+ {600, 0x2e}, {650, 0x10}, {700, 0x30}, {750, 0x12},
+ {800, 0x32}, {850, 0x14}, {900, 0x34}, {950, 0x54},
+ {1000, 0x74},
+};
+
+static int max_mbps_to_hsfreqrange_sel(u32 max_mbps)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(hsfreq_map); i++)
+ if (hsfreq_map[i].max_mbps > max_mbps)
+ return hsfreq_map[i].hsfreqrange_sel;
+
+ return -EINVAL;
+}
+
+static int csi2_dphy_init(struct csi2_dev *csi2)
+{
+ struct v4l2_ctrl *ctrl;
+ u32 mbps_per_lane;
+ int sel;
+
+ ctrl = v4l2_ctrl_find(csi2->src_sd->ctrl_handler,
+ V4L2_CID_LINK_FREQ);
+ if (!ctrl)
+ mbps_per_lane = CSI2_DEFAULT_MAX_MBPS;
+ else
+ mbps_per_lane = DIV_ROUND_UP_ULL(2 * ctrl->qmenu_int[ctrl->val],
+ USEC_PER_SEC);
+
+ sel = max_mbps_to_hsfreqrange_sel(mbps_per_lane);
+ if (sel < 0)
+ return sel;
+
+ dw_mipi_csi2_phy_write(csi2, 0x44, sel);
+
+ return 0;
+}
+
+/*
+ * Waits for ultra-low-power state on D-PHY clock lane. This is currently
+ * unused and may not be needed at all, but keep around just in case.
+ */
+static int __maybe_unused csi2_dphy_wait_ulp(struct csi2_dev *csi2)
+{
+ u32 reg;
+ int ret;
+
+ /* wait for ULP on clock lane */
+ ret = readl_poll_timeout(csi2->base + CSI2_PHY_STATE, reg,
+ !(reg & PHY_RXULPSCLKNOT), 0, 500000);
+ if (ret) {
+ v4l2_err(&csi2->sd, "ULP timeout, phy_state = 0x%08x\n", reg);
+ return ret;
+ }
+
+ /* wait until no errors on bus */
+ ret = readl_poll_timeout(csi2->base + CSI2_ERR1, reg,
+ reg == 0x0, 0, 500000);
+ if (ret) {
+ v4l2_err(&csi2->sd, "stable bus timeout, err1 = 0x%08x\n", reg);
+ return ret;
+ }
+
+ return 0;
+}
+
+/* Waits for low-power LP-11 state on data and clock lanes. */
+static void csi2_dphy_wait_stopstate(struct csi2_dev *csi2, unsigned int lanes)
+{
+ u32 mask, reg;
+ int ret;
+
+ mask = PHY_STOPSTATECLK | (((1 << lanes) - 1) << PHY_STOPSTATEDATA_BIT);
+
+ ret = readl_poll_timeout(csi2->base + CSI2_PHY_STATE, reg,
+ (reg & mask) == mask, 0, 500000);
+ if (ret) {
+ v4l2_warn(&csi2->sd, "LP-11 wait timeout, likely a sensor driver bug, expect capture failures.\n");
+ v4l2_warn(&csi2->sd, "phy_state = 0x%08x\n", reg);
+ }
+}
+
+/* Wait for active clock on the clock lane. */
+static int csi2_dphy_wait_clock_lane(struct csi2_dev *csi2)
+{
+ u32 reg;
+ int ret;
+
+ ret = readl_poll_timeout(csi2->base + CSI2_PHY_STATE, reg,
+ (reg & PHY_RXCLKACTIVEHS), 0, 500000);
+ if (ret) {
+ v4l2_err(&csi2->sd, "clock lane timeout, phy_state = 0x%08x\n",
+ reg);
+ return ret;
+ }
+
+ return 0;
+}
+
+/* Setup the i.MX CSI2IPU Gasket */
+static void csi2ipu_gasket_init(struct csi2_dev *csi2)
+{
+ u32 reg = 0;
+
+ switch (csi2->format_mbus.code) {
+ case MEDIA_BUS_FMT_YUYV8_2X8:
+ case MEDIA_BUS_FMT_YUYV8_1X16:
+ reg = CSI2IPU_YUV422_YUYV;
+ break;
+ default:
+ break;
+ }
+
+ writel(reg, csi2->base + CSI2IPU_GASKET);
+}
+
+static int csi2_get_active_lanes(struct csi2_dev *csi2, unsigned int *lanes)
+{
+ struct v4l2_mbus_config mbus_config = { 0 };
+ int ret;
+
+ *lanes = csi2->data_lanes;
+
+ ret = v4l2_subdev_call(csi2->remote, pad, get_mbus_config,
+ csi2->remote_pad, &mbus_config);
+ if (ret == -ENOIOCTLCMD) {
+ dev_dbg(csi2->dev, "No remote mbus configuration available\n");
+ return 0;
+ }
+
+ if (ret) {
+ dev_err(csi2->dev, "Failed to get remote mbus configuration\n");
+ return ret;
+ }
+
+ if (mbus_config.type != V4L2_MBUS_CSI2_DPHY) {
+ dev_err(csi2->dev, "Unsupported media bus type %u\n",
+ mbus_config.type);
+ return -EINVAL;
+ }
+
+ if (mbus_config.bus.mipi_csi2.num_data_lanes > csi2->data_lanes) {
+ dev_err(csi2->dev,
+ "Unsupported mbus config: too many data lanes %u\n",
+ mbus_config.bus.mipi_csi2.num_data_lanes);
+ return -EINVAL;
+ }
+
+ *lanes = mbus_config.bus.mipi_csi2.num_data_lanes;
+
+ return 0;
+}
+
+static int csi2_start(struct csi2_dev *csi2)
+{
+ unsigned int lanes;
+ int ret;
+
+ ret = clk_prepare_enable(csi2->pix_clk);
+ if (ret)
+ return ret;
+
+ /* setup the gasket */
+ csi2ipu_gasket_init(csi2);
+
+ /* Step 3 */
+ ret = csi2_dphy_init(csi2);
+ if (ret)
+ goto err_disable_clk;
+
+ ret = csi2_get_active_lanes(csi2, &lanes);
+ if (ret)
+ goto err_disable_clk;
+
+ /* Step 4 */
+ csi2_set_lanes(csi2, lanes);
+ csi2_enable(csi2, true);
+
+ /* Step 5 */
+ ret = v4l2_subdev_call(csi2->src_sd, video, pre_streamon,
+ V4L2_SUBDEV_PRE_STREAMON_FL_MANUAL_LP);
+ if (ret && ret != -ENOIOCTLCMD)
+ goto err_assert_reset;
+ csi2_dphy_wait_stopstate(csi2, lanes);
+
+ /* Step 6 */
+ ret = v4l2_subdev_call(csi2->src_sd, video, s_stream, 1);
+ ret = (ret && ret != -ENOIOCTLCMD) ? ret : 0;
+ if (ret)
+ goto err_stop_lp11;
+
+ /* Step 7 */
+ ret = csi2_dphy_wait_clock_lane(csi2);
+ if (ret)
+ goto err_stop_upstream;
+
+ return 0;
+
+err_stop_upstream:
+ v4l2_subdev_call(csi2->src_sd, video, s_stream, 0);
+err_stop_lp11:
+ v4l2_subdev_call(csi2->src_sd, video, post_streamoff);
+err_assert_reset:
+ csi2_enable(csi2, false);
+err_disable_clk:
+ clk_disable_unprepare(csi2->pix_clk);
+ return ret;
+}
+
+static void csi2_stop(struct csi2_dev *csi2)
+{
+ /* stop upstream */
+ v4l2_subdev_call(csi2->src_sd, video, s_stream, 0);
+ v4l2_subdev_call(csi2->src_sd, video, post_streamoff);
+
+ csi2_enable(csi2, false);
+ clk_disable_unprepare(csi2->pix_clk);
+}
+
+/*
+ * V4L2 subdev operations.
+ */
+
+static int csi2_s_stream(struct v4l2_subdev *sd, int enable)
+{
+ struct csi2_dev *csi2 = sd_to_dev(sd);
+ int i, ret = 0;
+
+ mutex_lock(&csi2->lock);
+
+ if (!csi2->src_sd) {
+ ret = -EPIPE;
+ goto out;
+ }
+
+ for (i = 0; i < CSI2_NUM_SRC_PADS; i++) {
+ if (csi2->sink_linked[i])
+ break;
+ }
+ if (i >= CSI2_NUM_SRC_PADS) {
+ ret = -EPIPE;
+ goto out;
+ }
+
+ /*
+ * enable/disable streaming only if stream_count is
+ * going from 0 to 1 / 1 to 0.
+ */
+ if (csi2->stream_count != !enable)
+ goto update_count;
+
+ dev_dbg(csi2->dev, "stream %s\n", enable ? "ON" : "OFF");
+ if (enable)
+ ret = csi2_start(csi2);
+ else
+ csi2_stop(csi2);
+ if (ret)
+ goto out;
+
+update_count:
+ csi2->stream_count += enable ? 1 : -1;
+ if (csi2->stream_count < 0)
+ csi2->stream_count = 0;
+out:
+ mutex_unlock(&csi2->lock);
+ return ret;
+}
+
+static int csi2_link_setup(struct media_entity *entity,
+ const struct media_pad *local,
+ const struct media_pad *remote, u32 flags)
+{
+ struct v4l2_subdev *sd = media_entity_to_v4l2_subdev(entity);
+ struct csi2_dev *csi2 = sd_to_dev(sd);
+ struct v4l2_subdev *remote_sd;
+ int ret = 0;
+
+ dev_dbg(csi2->dev, "link setup %s -> %s", remote->entity->name,
+ local->entity->name);
+
+ remote_sd = media_entity_to_v4l2_subdev(remote->entity);
+
+ mutex_lock(&csi2->lock);
+
+ if (local->flags & MEDIA_PAD_FL_SOURCE) {
+ if (flags & MEDIA_LNK_FL_ENABLED) {
+ if (csi2->sink_linked[local->index - 1]) {
+ ret = -EBUSY;
+ goto out;
+ }
+ csi2->sink_linked[local->index - 1] = true;
+ } else {
+ csi2->sink_linked[local->index - 1] = false;
+ }
+ } else {
+ if (flags & MEDIA_LNK_FL_ENABLED) {
+ if (csi2->src_sd) {
+ ret = -EBUSY;
+ goto out;
+ }
+ csi2->src_sd = remote_sd;
+ } else {
+ csi2->src_sd = NULL;
+ }
+ }
+
+out:
+ mutex_unlock(&csi2->lock);
+ return ret;
+}
+
+static struct v4l2_mbus_framefmt *
+__csi2_get_fmt(struct csi2_dev *csi2, struct v4l2_subdev_state *sd_state,
+ unsigned int pad, enum v4l2_subdev_format_whence which)
+{
+ if (which == V4L2_SUBDEV_FORMAT_TRY)
+ return v4l2_subdev_state_get_format(sd_state, pad);
+ else
+ return &csi2->format_mbus;
+}
+
+static int csi2_get_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_format *sdformat)
+{
+ struct csi2_dev *csi2 = sd_to_dev(sd);
+ struct v4l2_mbus_framefmt *fmt;
+
+ mutex_lock(&csi2->lock);
+
+ fmt = __csi2_get_fmt(csi2, sd_state, sdformat->pad, sdformat->which);
+
+ sdformat->format = *fmt;
+
+ mutex_unlock(&csi2->lock);
+
+ return 0;
+}
+
+static int csi2_set_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_format *sdformat)
+{
+ struct csi2_dev *csi2 = sd_to_dev(sd);
+ struct v4l2_mbus_framefmt *fmt;
+ int ret = 0;
+
+ if (sdformat->pad >= CSI2_NUM_PADS)
+ return -EINVAL;
+
+ mutex_lock(&csi2->lock);
+
+ if (csi2->stream_count > 0) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ /* Output pads mirror active input pad, no limits on input pads */
+ if (sdformat->pad != CSI2_SINK_PAD)
+ sdformat->format = csi2->format_mbus;
+
+ fmt = __csi2_get_fmt(csi2, sd_state, sdformat->pad, sdformat->which);
+
+ *fmt = sdformat->format;
+out:
+ mutex_unlock(&csi2->lock);
+ return ret;
+}
+
+static int csi2_registered(struct v4l2_subdev *sd)
+{
+ struct csi2_dev *csi2 = sd_to_dev(sd);
+
+ /* set a default mbus format */
+ return imx_media_init_mbus_fmt(&csi2->format_mbus,
+ IMX_MEDIA_DEF_PIX_WIDTH,
+ IMX_MEDIA_DEF_PIX_HEIGHT, 0,
+ V4L2_FIELD_NONE, NULL);
+}
+
+/* --------------- CORE OPS --------------- */
+
+static int csi2_log_status(struct v4l2_subdev *sd)
+{
+ struct csi2_dev *csi2 = sd_to_dev(sd);
+
+ v4l2_info(sd, "-----MIPI CSI status-----\n");
+ v4l2_info(sd, "VERSION: 0x%x\n",
+ readl(csi2->base + CSI2_VERSION));
+ v4l2_info(sd, "N_LANES: 0x%x\n",
+ readl(csi2->base + CSI2_N_LANES));
+ v4l2_info(sd, "PHY_SHUTDOWNZ: 0x%x\n",
+ readl(csi2->base + CSI2_PHY_SHUTDOWNZ));
+ v4l2_info(sd, "DPHY_RSTZ: 0x%x\n",
+ readl(csi2->base + CSI2_DPHY_RSTZ));
+ v4l2_info(sd, "RESETN: 0x%x\n",
+ readl(csi2->base + CSI2_RESETN));
+ v4l2_info(sd, "PHY_STATE: 0x%x\n",
+ readl(csi2->base + CSI2_PHY_STATE));
+ v4l2_info(sd, "DATA_IDS_1: 0x%x\n",
+ readl(csi2->base + CSI2_DATA_IDS_1));
+ v4l2_info(sd, "DATA_IDS_2: 0x%x\n",
+ readl(csi2->base + CSI2_DATA_IDS_2));
+ v4l2_info(sd, "ERR1: 0x%x\n",
+ readl(csi2->base + CSI2_ERR1));
+ v4l2_info(sd, "ERR2: 0x%x\n",
+ readl(csi2->base + CSI2_ERR2));
+ v4l2_info(sd, "MSK1: 0x%x\n",
+ readl(csi2->base + CSI2_MSK1));
+ v4l2_info(sd, "MSK2: 0x%x\n",
+ readl(csi2->base + CSI2_MSK2));
+ v4l2_info(sd, "PHY_TST_CTRL0: 0x%x\n",
+ readl(csi2->base + CSI2_PHY_TST_CTRL0));
+ v4l2_info(sd, "PHY_TST_CTRL1: 0x%x\n",
+ readl(csi2->base + CSI2_PHY_TST_CTRL1));
+
+ return 0;
+}
+
+static const struct v4l2_subdev_core_ops csi2_core_ops = {
+ .log_status = csi2_log_status,
+};
+
+static const struct media_entity_operations csi2_entity_ops = {
+ .link_setup = csi2_link_setup,
+ .link_validate = v4l2_subdev_link_validate,
+ .get_fwnode_pad = v4l2_subdev_get_fwnode_pad_1_to_1,
+};
+
+static const struct v4l2_subdev_video_ops csi2_video_ops = {
+ .s_stream = csi2_s_stream,
+};
+
+static const struct v4l2_subdev_pad_ops csi2_pad_ops = {
+ .get_fmt = csi2_get_fmt,
+ .set_fmt = csi2_set_fmt,
+};
+
+static const struct v4l2_subdev_ops csi2_subdev_ops = {
+ .core = &csi2_core_ops,
+ .video = &csi2_video_ops,
+ .pad = &csi2_pad_ops,
+};
+
+static const struct v4l2_subdev_internal_ops csi2_internal_ops = {
+ .init_state = imx_media_init_state,
+ .registered = csi2_registered,
+};
+
+static int csi2_notify_bound(struct v4l2_async_notifier *notifier,
+ struct v4l2_subdev *sd,
+ struct v4l2_async_connection *asd)
+{
+ struct csi2_dev *csi2 = notifier_to_dev(notifier);
+ struct media_pad *sink = &csi2->sd.entity.pads[CSI2_SINK_PAD];
+ int pad;
+
+ pad = media_entity_get_fwnode_pad(&sd->entity, asd->match.fwnode,
+ MEDIA_PAD_FL_SOURCE);
+ if (pad < 0) {
+ dev_err(csi2->dev, "Failed to find pad for %s\n", sd->name);
+ return pad;
+ }
+
+ csi2->remote = sd;
+ csi2->remote_pad = pad;
+
+ dev_dbg(csi2->dev, "Bound %s pad: %d\n", sd->name, pad);
+
+ return v4l2_create_fwnode_links_to_pad(sd, sink, 0);
+}
+
+static void csi2_notify_unbind(struct v4l2_async_notifier *notifier,
+ struct v4l2_subdev *sd,
+ struct v4l2_async_connection *asd)
+{
+ struct csi2_dev *csi2 = notifier_to_dev(notifier);
+
+ csi2->remote = NULL;
+}
+
+static const struct v4l2_async_notifier_operations csi2_notify_ops = {
+ .bound = csi2_notify_bound,
+ .unbind = csi2_notify_unbind,
+};
+
+static int csi2_async_register(struct csi2_dev *csi2)
+{
+ struct v4l2_fwnode_endpoint vep = {
+ .bus_type = V4L2_MBUS_CSI2_DPHY,
+ };
+ struct v4l2_async_connection *asd;
+ struct fwnode_handle *ep;
+ int ret;
+
+ v4l2_async_subdev_nf_init(&csi2->notifier, &csi2->sd);
+
+ ep = fwnode_graph_get_endpoint_by_id(dev_fwnode(csi2->dev), 0, 0,
+ FWNODE_GRAPH_ENDPOINT_NEXT);
+ if (!ep)
+ return -ENOTCONN;
+
+ ret = v4l2_fwnode_endpoint_parse(ep, &vep);
+ if (ret)
+ goto err_parse;
+
+ csi2->data_lanes = vep.bus.mipi_csi2.num_data_lanes;
+
+ dev_dbg(csi2->dev, "data lanes: %d\n", vep.bus.mipi_csi2.num_data_lanes);
+ dev_dbg(csi2->dev, "flags: 0x%08x\n", vep.bus.mipi_csi2.flags);
+
+ asd = v4l2_async_nf_add_fwnode_remote(&csi2->notifier, ep,
+ struct v4l2_async_connection);
+ fwnode_handle_put(ep);
+
+ if (IS_ERR(asd))
+ return PTR_ERR(asd);
+
+ csi2->notifier.ops = &csi2_notify_ops;
+
+ ret = v4l2_async_nf_register(&csi2->notifier);
+ if (ret)
+ return ret;
+
+ return v4l2_async_register_subdev(&csi2->sd);
+
+err_parse:
+ fwnode_handle_put(ep);
+ return ret;
+}
+
+static int csi2_probe(struct platform_device *pdev)
+{
+ struct csi2_dev *csi2;
+ struct resource *res;
+ int i, ret;
+
+ csi2 = devm_kzalloc(&pdev->dev, sizeof(*csi2), GFP_KERNEL);
+ if (!csi2)
+ return -ENOMEM;
+
+ csi2->dev = &pdev->dev;
+
+ v4l2_subdev_init(&csi2->sd, &csi2_subdev_ops);
+ v4l2_set_subdevdata(&csi2->sd, &pdev->dev);
+ csi2->sd.internal_ops = &csi2_internal_ops;
+ csi2->sd.entity.ops = &csi2_entity_ops;
+ csi2->sd.dev = &pdev->dev;
+ csi2->sd.owner = THIS_MODULE;
+ csi2->sd.flags = V4L2_SUBDEV_FL_HAS_DEVNODE;
+ strscpy(csi2->sd.name, DEVICE_NAME, sizeof(csi2->sd.name));
+ csi2->sd.entity.function = MEDIA_ENT_F_VID_IF_BRIDGE;
+ csi2->sd.grp_id = IMX_MEDIA_GRP_ID_CSI2;
+
+ for (i = 0; i < CSI2_NUM_PADS; i++) {
+ csi2->pad[i].flags = (i == CSI2_SINK_PAD) ?
+ MEDIA_PAD_FL_SINK : MEDIA_PAD_FL_SOURCE;
+ }
+
+ ret = media_entity_pads_init(&csi2->sd.entity, CSI2_NUM_PADS,
+ csi2->pad);
+ if (ret)
+ return ret;
+
+ csi2->pllref_clk = devm_clk_get(&pdev->dev, "ref");
+ if (IS_ERR(csi2->pllref_clk)) {
+ v4l2_err(&csi2->sd, "failed to get pll reference clock\n");
+ return PTR_ERR(csi2->pllref_clk);
+ }
+
+ csi2->dphy_clk = devm_clk_get(&pdev->dev, "dphy");
+ if (IS_ERR(csi2->dphy_clk)) {
+ v4l2_err(&csi2->sd, "failed to get dphy clock\n");
+ return PTR_ERR(csi2->dphy_clk);
+ }
+
+ csi2->pix_clk = devm_clk_get(&pdev->dev, "pix");
+ if (IS_ERR(csi2->pix_clk)) {
+ v4l2_err(&csi2->sd, "failed to get pixel clock\n");
+ return PTR_ERR(csi2->pix_clk);
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ v4l2_err(&csi2->sd, "failed to get platform resources\n");
+ return -ENODEV;
+ }
+
+ csi2->base = devm_ioremap(&pdev->dev, res->start, PAGE_SIZE);
+ if (!csi2->base)
+ return -ENOMEM;
+
+ mutex_init(&csi2->lock);
+
+ ret = clk_prepare_enable(csi2->pllref_clk);
+ if (ret) {
+ v4l2_err(&csi2->sd, "failed to enable pllref_clk\n");
+ goto rmmutex;
+ }
+
+ ret = clk_prepare_enable(csi2->dphy_clk);
+ if (ret) {
+ v4l2_err(&csi2->sd, "failed to enable dphy_clk\n");
+ goto pllref_off;
+ }
+
+ platform_set_drvdata(pdev, &csi2->sd);
+
+ ret = csi2_async_register(csi2);
+ if (ret)
+ goto clean_notifier;
+
+ return 0;
+
+clean_notifier:
+ v4l2_async_nf_unregister(&csi2->notifier);
+ v4l2_async_nf_cleanup(&csi2->notifier);
+ clk_disable_unprepare(csi2->dphy_clk);
+pllref_off:
+ clk_disable_unprepare(csi2->pllref_clk);
+rmmutex:
+ mutex_destroy(&csi2->lock);
+ return ret;
+}
+
+static void csi2_remove(struct platform_device *pdev)
+{
+ struct v4l2_subdev *sd = platform_get_drvdata(pdev);
+ struct csi2_dev *csi2 = sd_to_dev(sd);
+
+ v4l2_async_nf_unregister(&csi2->notifier);
+ v4l2_async_nf_cleanup(&csi2->notifier);
+ v4l2_async_unregister_subdev(sd);
+ clk_disable_unprepare(csi2->dphy_clk);
+ clk_disable_unprepare(csi2->pllref_clk);
+ mutex_destroy(&csi2->lock);
+ media_entity_cleanup(&sd->entity);
+}
+
+static const struct of_device_id csi2_dt_ids[] = {
+ { .compatible = "fsl,imx6-mipi-csi2", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, csi2_dt_ids);
+
+static struct platform_driver csi2_driver = {
+ .driver = {
+ .name = DEVICE_NAME,
+ .of_match_table = csi2_dt_ids,
+ },
+ .probe = csi2_probe,
+ .remove = csi2_remove,
+};
+
+module_platform_driver(csi2_driver);
+
+MODULE_DESCRIPTION("i.MX5/6 MIPI CSI-2 Receiver driver");
+MODULE_AUTHOR("Steve Longerbeam <steve_longerbeam@mentor.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/media/ipu3/Kconfig b/drivers/staging/media/ipu3/Kconfig
new file mode 100644
index 000000000000..114a1d8e7cc8
--- /dev/null
+++ b/drivers/staging/media/ipu3/Kconfig
@@ -0,0 +1,16 @@
+# SPDX-License-Identifier: GPL-2.0
+config VIDEO_IPU3_IMGU
+ tristate "Intel ipu3-imgu driver"
+ depends on PCI && VIDEO_DEV
+ depends on X86
+ select MEDIA_CONTROLLER
+ select VIDEO_V4L2_SUBDEV_API
+ select IOMMU_IOVA
+ select VIDEOBUF2_DMA_SG
+ help
+ This is the Video4Linux2 driver for Intel IPU3 image processing unit,
+ found in Intel Skylake and Kaby Lake SoCs and used for processing
+ images and video.
+
+ Say Y or M here if you have a Skylake/Kaby Lake SoC with a MIPI
+ camera. The module will be called ipu3-imgu.
diff --git a/drivers/staging/media/ipu3/Makefile b/drivers/staging/media/ipu3/Makefile
new file mode 100644
index 000000000000..9def80ef28f3
--- /dev/null
+++ b/drivers/staging/media/ipu3/Makefile
@@ -0,0 +1,12 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for the IPU3 ImgU drivers
+#
+
+ipu3-imgu-objs += \
+ ipu3-mmu.o ipu3-dmamap.o \
+ ipu3-tables.o ipu3-css-pool.o \
+ ipu3-css-fw.o ipu3-css-params.o \
+ ipu3-css.o ipu3-v4l2.o ipu3.o
+
+obj-$(CONFIG_VIDEO_IPU3_IMGU) += ipu3-imgu.o
diff --git a/drivers/staging/media/ipu3/TODO b/drivers/staging/media/ipu3/TODO
new file mode 100644
index 000000000000..3fd1fc10b68d
--- /dev/null
+++ b/drivers/staging/media/ipu3/TODO
@@ -0,0 +1,12 @@
+This is a list of things that need to be done to get this driver out of the
+staging directory.
+
+- Request API conversion. Remove of the dual pipeline and associate buffers
+ as well as formats and the binary used to a request. Remove the
+ opportunistic buffer management. (Sakari)
+
+- IPU3 driver documentation (Laurent)
+ Comments on configuring v4l2 subdevs for CIO2 and ImgU.
+
+- Elaborate the functionality of different selection rectangles in driver
+ documentation. This may require driver changes as well.
diff --git a/drivers/staging/media/ipu3/include/uapi/intel-ipu3.h b/drivers/staging/media/ipu3/include/uapi/intel-ipu3.h
new file mode 100644
index 000000000000..8b85524beb59
--- /dev/null
+++ b/drivers/staging/media/ipu3/include/uapi/intel-ipu3.h
@@ -0,0 +1,2820 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/* Copyright (C) 2017 - 2018 Intel Corporation */
+
+#ifndef __IPU3_UAPI_H
+#define __IPU3_UAPI_H
+
+#include <linux/types.h>
+
+/* from /drivers/staging/media/ipu3/include/videodev2.h */
+
+/* Vendor specific - used for IPU3 camera sub-system */
+/* IPU3 processing parameters */
+#define V4L2_META_FMT_IPU3_PARAMS v4l2_fourcc('i', 'p', '3', 'p')
+/* IPU3 3A statistics */
+#define V4L2_META_FMT_IPU3_STAT_3A v4l2_fourcc('i', 'p', '3', 's')
+
+/* from include/uapi/linux/v4l2-controls.h */
+#define V4L2_CID_INTEL_IPU3_BASE (V4L2_CID_USER_BASE + 0x10c0)
+#define V4L2_CID_INTEL_IPU3_MODE (V4L2_CID_INTEL_IPU3_BASE + 1)
+
+/******************* ipu3_uapi_stats_3a *******************/
+
+#define IPU3_UAPI_MAX_STRIPES 2
+#define IPU3_UAPI_MAX_BUBBLE_SIZE 10
+
+#define IPU3_UAPI_GRID_START_MASK ((1 << 12) - 1)
+#define IPU3_UAPI_GRID_Y_START_EN (1 << 15)
+
+/* controls generation of meta_data (like FF enable/disable) */
+#define IPU3_UAPI_AWB_RGBS_THR_B_EN (1 << 14)
+#define IPU3_UAPI_AWB_RGBS_THR_B_INCL_SAT (1 << 15)
+
+/**
+ * struct ipu3_uapi_grid_config - Grid plane config
+ *
+ * @width: Grid horizontal dimensions, in number of grid blocks(cells).
+ * For AWB, the range is (16, 80).
+ * For AF/AE, the range is (16, 32).
+ * @height: Grid vertical dimensions, in number of grid cells.
+ * For AWB, the range is (16, 60).
+ * For AF/AE, the range is (16, 24).
+ * @block_width_log2: Log2 of the width of each cell in pixels.
+ * For AWB, the range is [3, 6].
+ * For AF/AE, the range is [3, 7].
+ * @block_height_log2: Log2 of the height of each cell in pixels.
+ * For AWB, the range is [3, 6].
+ * For AF/AE, the range is [3, 7].
+ * @height_per_slice: The number of blocks in vertical axis per slice.
+ * Default 2.
+ * @x_start: X value of top left corner of Region of Interest(ROI).
+ * @y_start: Y value of top left corner of ROI
+ * @x_end: X value of bottom right corner of ROI
+ * @y_end: Y value of bottom right corner of ROI
+ *
+ * Due to the size of total amount of collected data, most statistics
+ * create a grid-based output, and the data is then divided into "slices".
+ */
+struct ipu3_uapi_grid_config {
+ __u8 width;
+ __u8 height;
+ __u16 block_width_log2:3;
+ __u16 block_height_log2:3;
+ __u16 height_per_slice:8;
+ __u16 x_start;
+ __u16 y_start;
+ __u16 x_end;
+ __u16 y_end;
+} __packed;
+
+/**
+ * struct ipu3_uapi_awb_set_item - Memory layout for each cell in AWB
+ *
+ * @Gr_avg: Green average for red lines in the cell.
+ * @R_avg: Red average in the cell.
+ * @B_avg: Blue average in the cell.
+ * @Gb_avg: Green average for blue lines in the cell.
+ * @sat_ratio: Percentage of pixels over the thresholds specified in
+ * ipu3_uapi_awb_config_s, coded from 0 to 255.
+ * @padding0: Unused byte for padding.
+ * @padding1: Unused byte for padding.
+ * @padding2: Unused byte for padding.
+ */
+struct ipu3_uapi_awb_set_item {
+ __u8 Gr_avg;
+ __u8 R_avg;
+ __u8 B_avg;
+ __u8 Gb_avg;
+ __u8 sat_ratio;
+ __u8 padding0;
+ __u8 padding1;
+ __u8 padding2;
+} __attribute__((packed));
+
+/*
+ * The grid based data is divided into "slices" called set, each slice of setX
+ * refers to ipu3_uapi_grid_config width * height_per_slice.
+ */
+#define IPU3_UAPI_AWB_MAX_SETS 60
+/* Based on grid size 80 * 60 and cell size 16 x 16 */
+#define IPU3_UAPI_AWB_SET_SIZE 160
+#define IPU3_UAPI_AWB_SPARE_FOR_BUBBLES \
+ (IPU3_UAPI_MAX_BUBBLE_SIZE * IPU3_UAPI_MAX_STRIPES)
+#define IPU3_UAPI_AWB_MAX_BUFFER_SIZE \
+ (IPU3_UAPI_AWB_MAX_SETS * \
+ (IPU3_UAPI_AWB_SET_SIZE + IPU3_UAPI_AWB_SPARE_FOR_BUBBLES))
+
+/**
+ * struct ipu3_uapi_awb_raw_buffer - AWB raw buffer
+ *
+ * @meta_data: buffer to hold auto white balance meta data which is
+ * the average values for each color channel.
+ */
+struct ipu3_uapi_awb_raw_buffer {
+ struct ipu3_uapi_awb_set_item meta_data[IPU3_UAPI_AWB_MAX_BUFFER_SIZE]
+ __attribute__((aligned(32)));
+} __packed;
+
+/**
+ * struct ipu3_uapi_awb_config_s - AWB config
+ *
+ * @rgbs_thr_gr: gr threshold value.
+ * @rgbs_thr_r: Red threshold value.
+ * @rgbs_thr_gb: gb threshold value.
+ * @rgbs_thr_b: Blue threshold value.
+ * @grid: &ipu3_uapi_grid_config, the default grid resolution is 16x16 cells.
+ *
+ * The threshold is a saturation measure range [0, 8191], 8191 is default.
+ * Values over threshold may be optionally rejected for averaging.
+ */
+struct ipu3_uapi_awb_config_s {
+ __u16 rgbs_thr_gr;
+ __u16 rgbs_thr_r;
+ __u16 rgbs_thr_gb;
+ __u16 rgbs_thr_b;
+ struct ipu3_uapi_grid_config grid;
+} __attribute__((aligned(32))) __packed;
+
+/**
+ * struct ipu3_uapi_awb_config - AWB config wrapper
+ *
+ * @config: config for auto white balance as defined by &ipu3_uapi_awb_config_s
+ */
+struct ipu3_uapi_awb_config {
+ struct ipu3_uapi_awb_config_s config __attribute__((aligned(32)));
+} __packed;
+
+#define IPU3_UAPI_AE_COLORS 4 /* R, G, B, Y */
+#define IPU3_UAPI_AE_BINS 256
+#define IPU3_UAPI_AE_WEIGHTS 96
+
+/**
+ * struct ipu3_uapi_ae_raw_buffer - AE global weighted histogram
+ *
+ * @vals: Sum of IPU3_UAPI_AE_COLORS in cell
+ *
+ * Each histogram contains IPU3_UAPI_AE_BINS bins. Each bin has 24 bit unsigned
+ * for counting the number of the pixel.
+ */
+struct ipu3_uapi_ae_raw_buffer {
+ __u32 vals[IPU3_UAPI_AE_BINS * IPU3_UAPI_AE_COLORS];
+} __packed;
+
+/**
+ * struct ipu3_uapi_ae_raw_buffer_aligned - AE raw buffer
+ *
+ * @buff: &ipu3_uapi_ae_raw_buffer to hold full frame meta data.
+ */
+struct ipu3_uapi_ae_raw_buffer_aligned {
+ struct ipu3_uapi_ae_raw_buffer buff __attribute__((aligned(32)));
+} __packed;
+
+/**
+ * struct ipu3_uapi_ae_grid_config - AE weight grid
+ *
+ * @width: Grid horizontal dimensions. Value: [16, 32], default 16.
+ * @height: Grid vertical dimensions. Value: [16, 24], default 16.
+ * @block_width_log2: Log2 of the width of the grid cell, value: [3, 7].
+ * @block_height_log2: Log2 of the height of the grid cell, value: [3, 7].
+ * default is 3 (cell size 8x8), 4 cell per grid.
+ * @reserved0: reserved
+ * @ae_en: 0: does not write to &ipu3_uapi_ae_raw_buffer_aligned array,
+ * 1: write normally.
+ * @rst_hist_array: write 1 to trigger histogram array reset.
+ * @done_rst_hist_array: flag for histogram array reset done.
+ * @x_start: X value of top left corner of ROI, default 0.
+ * @y_start: Y value of top left corner of ROI, default 0.
+ * @x_end: X value of bottom right corner of ROI
+ * @y_end: Y value of bottom right corner of ROI
+ *
+ * The AE block accumulates 4 global weighted histograms(R, G, B, Y) over
+ * a defined ROI within the frame. The contribution of each pixel into the
+ * histogram, defined by &ipu3_uapi_ae_weight_elem LUT, is indexed by a grid.
+ */
+struct ipu3_uapi_ae_grid_config {
+ __u8 width;
+ __u8 height;
+ __u8 block_width_log2:4;
+ __u8 block_height_log2:4;
+ __u8 reserved0:5;
+ __u8 ae_en:1;
+ __u8 rst_hist_array:1;
+ __u8 done_rst_hist_array:1;
+ __u16 x_start;
+ __u16 y_start;
+ __u16 x_end;
+ __u16 y_end;
+} __packed;
+
+/**
+ * struct ipu3_uapi_ae_weight_elem - AE weights LUT
+ *
+ * @cell0: weighted histogram grid value.
+ * @cell1: weighted histogram grid value.
+ * @cell2: weighted histogram grid value.
+ * @cell3: weighted histogram grid value.
+ * @cell4: weighted histogram grid value.
+ * @cell5: weighted histogram grid value.
+ * @cell6: weighted histogram grid value.
+ * @cell7: weighted histogram grid value.
+ *
+ * Use weighted grid value to give a different contribution factor to each cell.
+ * Precision u4, range [0, 15].
+ */
+struct ipu3_uapi_ae_weight_elem {
+ __u32 cell0:4;
+ __u32 cell1:4;
+ __u32 cell2:4;
+ __u32 cell3:4;
+ __u32 cell4:4;
+ __u32 cell5:4;
+ __u32 cell6:4;
+ __u32 cell7:4;
+} __packed;
+
+/**
+ * struct ipu3_uapi_ae_ccm - AE coefficients for WB and CCM
+ *
+ * @gain_gr: WB gain factor for the gr channels. Default 256.
+ * @gain_r: WB gain factor for the r channel. Default 256.
+ * @gain_b: WB gain factor for the b channel. Default 256.
+ * @gain_gb: WB gain factor for the gb channels. Default 256.
+ * @mat: 4x4 matrix that transforms Bayer quad output from WB to RGB+Y.
+ *
+ * Default:
+ * 128, 0, 0, 0,
+ * 0, 128, 0, 0,
+ * 0, 0, 128, 0,
+ * 0, 0, 0, 128,
+ *
+ * As part of the raw frame pre-process stage, the WB and color conversion need
+ * to be applied to expose the impact of these gain operations.
+ */
+struct ipu3_uapi_ae_ccm {
+ __u16 gain_gr;
+ __u16 gain_r;
+ __u16 gain_b;
+ __u16 gain_gb;
+ __s16 mat[16];
+} __packed;
+
+/**
+ * struct ipu3_uapi_ae_config - AE config
+ *
+ * @grid_cfg: config for auto exposure statistics grid. See struct
+ * &ipu3_uapi_ae_grid_config, as Imgu did not support output
+ * auto exposure statistics, so user can ignore this configuration
+ * and use the RGB table in auto-whitebalance statistics instead.
+ * @weights: &IPU3_UAPI_AE_WEIGHTS is based on 32x24 blocks in the grid.
+ * Each grid cell has a corresponding value in weights LUT called
+ * grid value, global histogram is updated based on grid value and
+ * pixel value.
+ * @ae_ccm: Color convert matrix pre-processing block.
+ *
+ * Calculate AE grid from image resolution, resample ae weights.
+ */
+struct ipu3_uapi_ae_config {
+ struct ipu3_uapi_ae_grid_config grid_cfg __attribute__((aligned(32)));
+ struct ipu3_uapi_ae_weight_elem weights[IPU3_UAPI_AE_WEIGHTS]
+ __attribute__((aligned(32)));
+ struct ipu3_uapi_ae_ccm ae_ccm __attribute__((aligned(32)));
+} __packed;
+
+/**
+ * struct ipu3_uapi_af_filter_config - AF 2D filter for contrast measurements
+ *
+ * @y1_coeff_0: filter Y1, structure: 3x11, support both symmetry and
+ * anti-symmetry type. A12 is center, A1-A11 are neighbours.
+ * for analyzing low frequency content, used to calculate sum
+ * of gradients in x direction.
+ * @y1_coeff_0.a1: filter1 coefficients A1, u8, default 0.
+ * @y1_coeff_0.a2: filter1 coefficients A2, u8, default 0.
+ * @y1_coeff_0.a3: filter1 coefficients A3, u8, default 0.
+ * @y1_coeff_0.a4: filter1 coefficients A4, u8, default 0.
+ * @y1_coeff_1: Struct
+ * @y1_coeff_1.a5: filter1 coefficients A5, u8, default 0.
+ * @y1_coeff_1.a6: filter1 coefficients A6, u8, default 0.
+ * @y1_coeff_1.a7: filter1 coefficients A7, u8, default 0.
+ * @y1_coeff_1.a8: filter1 coefficients A8, u8, default 0.
+ * @y1_coeff_2: Struct
+ * @y1_coeff_2.a9: filter1 coefficients A9, u8, default 0.
+ * @y1_coeff_2.a10: filter1 coefficients A10, u8, default 0.
+ * @y1_coeff_2.a11: filter1 coefficients A11, u8, default 0.
+ * @y1_coeff_2.a12: filter1 coefficients A12, u8, default 128.
+ * @y1_sign_vec: Each bit corresponds to one coefficient sign bit,
+ * 0: positive, 1: negative, default 0.
+ * @y2_coeff_0: Y2, same structure as Y1. For analyzing high frequency content.
+ * @y2_coeff_0.a1: filter2 coefficients A1, u8, default 0.
+ * @y2_coeff_0.a2: filter2 coefficients A2, u8, default 0.
+ * @y2_coeff_0.a3: filter2 coefficients A3, u8, default 0.
+ * @y2_coeff_0.a4: filter2 coefficients A4, u8, default 0.
+ * @y2_coeff_1: Struct
+ * @y2_coeff_1.a5: filter2 coefficients A5, u8, default 0.
+ * @y2_coeff_1.a6: filter2 coefficients A6, u8, default 0.
+ * @y2_coeff_1.a7: filter2 coefficients A7, u8, default 0.
+ * @y2_coeff_1.a8: filter2 coefficients A8, u8, default 0.
+ * @y2_coeff_2: Struct
+ * @y2_coeff_2.a9: filter1 coefficients A9, u8, default 0.
+ * @y2_coeff_2.a10: filter1 coefficients A10, u8, default 0.
+ * @y2_coeff_2.a11: filter1 coefficients A11, u8, default 0.
+ * @y2_coeff_2.a12: filter1 coefficients A12, u8, default 128.
+ * @y2_sign_vec: Each bit corresponds to one coefficient sign bit,
+ * 0: positive, 1: negative, default 0.
+ * @y_calc: Pre-processing that converts Bayer quad to RGB+Y values to be
+ * used for building histogram. Range [0, 32], default 8.
+ *
+ * Rule:
+ * y_gen_rate_gr + y_gen_rate_r + y_gen_rate_b + y_gen_rate_gb = 32
+ * A single Y is calculated based on sum of Gr/R/B/Gb based on
+ * their contribution ratio.
+ * @y_calc.y_gen_rate_gr: Contribution ratio Gr for Y
+ * @y_calc.y_gen_rate_r: Contribution ratio R for Y
+ * @y_calc.y_gen_rate_b: Contribution ratio B for Y
+ * @y_calc.y_gen_rate_gb: Contribution ratio Gb for Y
+ * @nf: The shift right value that should be applied during the Y1/Y2 filter to
+ * make sure the total memory needed is 2 bytes per grid cell.
+ * @nf.reserved0: reserved
+ * @nf.y1_nf: Normalization factor for the convolution coeffs of y1,
+ * should be log2 of the sum of the abs values of the filter
+ * coeffs, default 7 (2^7 = 128).
+ * @nf.reserved1: reserved
+ * @nf.y2_nf: Normalization factor for y2, should be log2 of the sum of the
+ * abs values of the filter coeffs.
+ * @nf.reserved2: reserved
+ */
+struct ipu3_uapi_af_filter_config {
+ struct {
+ __u8 a1;
+ __u8 a2;
+ __u8 a3;
+ __u8 a4;
+ } y1_coeff_0;
+ struct {
+ __u8 a5;
+ __u8 a6;
+ __u8 a7;
+ __u8 a8;
+ } y1_coeff_1;
+ struct {
+ __u8 a9;
+ __u8 a10;
+ __u8 a11;
+ __u8 a12;
+ } y1_coeff_2;
+
+ __u32 y1_sign_vec;
+
+ struct {
+ __u8 a1;
+ __u8 a2;
+ __u8 a3;
+ __u8 a4;
+ } y2_coeff_0;
+ struct {
+ __u8 a5;
+ __u8 a6;
+ __u8 a7;
+ __u8 a8;
+ } y2_coeff_1;
+ struct {
+ __u8 a9;
+ __u8 a10;
+ __u8 a11;
+ __u8 a12;
+ } y2_coeff_2;
+
+ __u32 y2_sign_vec;
+
+ struct {
+ __u8 y_gen_rate_gr;
+ __u8 y_gen_rate_r;
+ __u8 y_gen_rate_b;
+ __u8 y_gen_rate_gb;
+ } y_calc;
+
+ struct {
+ __u32 reserved0:8;
+ __u32 y1_nf:4;
+ __u32 reserved1:4;
+ __u32 y2_nf:4;
+ __u32 reserved2:12;
+ } nf;
+} __packed;
+
+#define IPU3_UAPI_AF_MAX_SETS 24
+#define IPU3_UAPI_AF_MD_ITEM_SIZE 4
+#define IPU3_UAPI_AF_SPARE_FOR_BUBBLES \
+ (IPU3_UAPI_MAX_BUBBLE_SIZE * IPU3_UAPI_MAX_STRIPES * \
+ IPU3_UAPI_AF_MD_ITEM_SIZE)
+#define IPU3_UAPI_AF_Y_TABLE_SET_SIZE 128
+#define IPU3_UAPI_AF_Y_TABLE_MAX_SIZE \
+ (IPU3_UAPI_AF_MAX_SETS * \
+ (IPU3_UAPI_AF_Y_TABLE_SET_SIZE + IPU3_UAPI_AF_SPARE_FOR_BUBBLES) * \
+ IPU3_UAPI_MAX_STRIPES)
+
+/**
+ * struct ipu3_uapi_af_raw_buffer - AF meta data
+ *
+ * @y_table: Each color component will be convolved separately with filter1
+ * and filter2 and the result will be summed out and averaged for
+ * each cell.
+ */
+struct ipu3_uapi_af_raw_buffer {
+ __u8 y_table[IPU3_UAPI_AF_Y_TABLE_MAX_SIZE] __attribute__((aligned(32)));
+} __packed;
+
+/**
+ * struct ipu3_uapi_af_config_s - AF config
+ *
+ * @filter_config: AF uses Y1 and Y2 filters as configured in
+ * &ipu3_uapi_af_filter_config
+ * @padding: paddings
+ * @grid_cfg: See &ipu3_uapi_grid_config, default resolution 16x16. Use large
+ * grid size for large image and vice versa.
+ */
+struct ipu3_uapi_af_config_s {
+ struct ipu3_uapi_af_filter_config filter_config __attribute__((aligned(32)));
+ __u8 padding[4];
+ struct ipu3_uapi_grid_config grid_cfg __attribute__((aligned(32)));
+} __packed;
+
+#define IPU3_UAPI_AWB_FR_MAX_SETS 24
+#define IPU3_UAPI_AWB_FR_MD_ITEM_SIZE 8
+#define IPU3_UAPI_AWB_FR_BAYER_TBL_SIZE 256
+#define IPU3_UAPI_AWB_FR_SPARE_FOR_BUBBLES \
+ (IPU3_UAPI_MAX_BUBBLE_SIZE * IPU3_UAPI_MAX_STRIPES * \
+ IPU3_UAPI_AWB_FR_MD_ITEM_SIZE)
+#define IPU3_UAPI_AWB_FR_BAYER_TABLE_MAX_SIZE \
+ (IPU3_UAPI_AWB_FR_MAX_SETS * \
+ (IPU3_UAPI_AWB_FR_BAYER_TBL_SIZE + \
+ IPU3_UAPI_AWB_FR_SPARE_FOR_BUBBLES) * IPU3_UAPI_MAX_STRIPES)
+
+/**
+ * struct ipu3_uapi_awb_fr_raw_buffer - AWB filter response meta data
+ *
+ * @meta_data: Statistics output on the grid after convolving with 1D filter.
+ */
+struct ipu3_uapi_awb_fr_raw_buffer {
+ __u8 meta_data[IPU3_UAPI_AWB_FR_BAYER_TABLE_MAX_SIZE]
+ __attribute__((aligned(32)));
+} __packed;
+
+/**
+ * struct ipu3_uapi_awb_fr_config_s - AWB filter response config
+ *
+ * @grid_cfg: grid config, default 16x16.
+ * @bayer_coeff: 1D Filter 1x11 center symmetry/anti-symmetry.
+ * coefficients defaults { 0, 0, 0, 0, 0, 128 }.
+ * Applied on whole image for each Bayer channel separately
+ * by a weighted sum of its 11x1 neighbors.
+ * @reserved1: reserved
+ * @bayer_sign: sign of filter coefficients, default 0.
+ * @bayer_nf: normalization factor for the convolution coeffs, to make sure
+ * total memory needed is within pre-determined range.
+ * NF should be the log2 of the sum of the abs values of the
+ * filter coeffs, range [7, 14], default 7.
+ * @reserved2: reserved
+ */
+struct ipu3_uapi_awb_fr_config_s {
+ struct ipu3_uapi_grid_config grid_cfg;
+ __u8 bayer_coeff[6];
+ __u16 reserved1;
+ __u32 bayer_sign;
+ __u8 bayer_nf;
+ __u8 reserved2[7];
+} __packed;
+
+/**
+ * struct ipu3_uapi_4a_config - 4A config
+ *
+ * @awb_config: &ipu3_uapi_awb_config_s, default resolution 16x16
+ * @ae_grd_config: auto exposure statistics &ipu3_uapi_ae_grid_config
+ * @padding: paddings
+ * @af_config: auto focus config &ipu3_uapi_af_config_s
+ * @awb_fr_config: &ipu3_uapi_awb_fr_config_s, default resolution 16x16
+ */
+struct ipu3_uapi_4a_config {
+ struct ipu3_uapi_awb_config_s awb_config __attribute__((aligned(32)));
+ struct ipu3_uapi_ae_grid_config ae_grd_config;
+ __u8 padding[20];
+ struct ipu3_uapi_af_config_s af_config;
+ struct ipu3_uapi_awb_fr_config_s awb_fr_config
+ __attribute__((aligned(32)));
+} __packed;
+
+/**
+ * struct ipu3_uapi_bubble_info - Bubble info for host side debugging
+ *
+ * @num_of_stripes: A single frame is divided into several parts called stripes
+ * due to limitation on line buffer memory.
+ * The separation between the stripes is vertical. Each such
+ * stripe is processed as a single frame by the ISP pipe.
+ * @padding: padding bytes.
+ * @num_sets: number of sets.
+ * @padding1: padding bytes.
+ * @size_of_set: set size.
+ * @padding2: padding bytes.
+ * @bubble_size: is the amount of padding in the bubble expressed in "sets".
+ * @padding3: padding bytes.
+ */
+struct ipu3_uapi_bubble_info {
+ __u32 num_of_stripes __attribute__((aligned(32)));
+ __u8 padding[28];
+ __u32 num_sets;
+ __u8 padding1[28];
+ __u32 size_of_set;
+ __u8 padding2[28];
+ __u32 bubble_size;
+ __u8 padding3[28];
+} __packed;
+
+/*
+ * struct ipu3_uapi_stats_3a_bubble_info_per_stripe
+ */
+struct ipu3_uapi_stats_3a_bubble_info_per_stripe {
+ struct ipu3_uapi_bubble_info awb[IPU3_UAPI_MAX_STRIPES];
+ struct ipu3_uapi_bubble_info af[IPU3_UAPI_MAX_STRIPES];
+ struct ipu3_uapi_bubble_info awb_fr[IPU3_UAPI_MAX_STRIPES];
+} __packed;
+
+/**
+ * struct ipu3_uapi_ff_status - Enable bits for each 3A fixed function
+ *
+ * @awb_en: auto white balance enable
+ * @padding: padding config
+ * @ae_en: auto exposure enable
+ * @padding1: padding config
+ * @af_en: auto focus enable
+ * @padding2: padding config
+ * @awb_fr_en: awb filter response enable bit
+ * @padding3: padding config
+ */
+struct ipu3_uapi_ff_status {
+ __u32 awb_en __attribute__((aligned(32)));
+ __u8 padding[28];
+ __u32 ae_en;
+ __u8 padding1[28];
+ __u32 af_en;
+ __u8 padding2[28];
+ __u32 awb_fr_en;
+ __u8 padding3[28];
+} __packed;
+
+/**
+ * struct ipu3_uapi_stats_3a - 3A statistics
+ *
+ * @awb_raw_buffer: auto white balance meta data &ipu3_uapi_awb_raw_buffer
+ * @ae_raw_buffer: auto exposure raw data &ipu3_uapi_ae_raw_buffer_aligned
+ * current Imgu does not output the auto exposure statistics
+ * to ae_raw_buffer, the user such as 3A algorithm can use the
+ * RGB table in &ipu3_uapi_awb_raw_buffer to do auto-exposure.
+ * @af_raw_buffer: &ipu3_uapi_af_raw_buffer for auto focus meta data
+ * @awb_fr_raw_buffer: value as specified by &ipu3_uapi_awb_fr_raw_buffer
+ * @stats_4a_config: 4a statistics config as defined by &ipu3_uapi_4a_config.
+ * @ae_join_buffers: 1 to use ae_raw_buffer.
+ * @padding: padding config
+ * @stats_3a_bubble_per_stripe: a &ipu3_uapi_stats_3a_bubble_info_per_stripe
+ * @stats_3a_status: 3a statistics status set in &ipu3_uapi_ff_status
+ */
+struct ipu3_uapi_stats_3a {
+ struct ipu3_uapi_awb_raw_buffer awb_raw_buffer;
+ struct ipu3_uapi_ae_raw_buffer_aligned
+ ae_raw_buffer[IPU3_UAPI_MAX_STRIPES];
+ struct ipu3_uapi_af_raw_buffer af_raw_buffer;
+ struct ipu3_uapi_awb_fr_raw_buffer awb_fr_raw_buffer;
+ struct ipu3_uapi_4a_config stats_4a_config;
+ __u32 ae_join_buffers;
+ __u8 padding[28];
+ struct ipu3_uapi_stats_3a_bubble_info_per_stripe
+ stats_3a_bubble_per_stripe;
+ struct ipu3_uapi_ff_status stats_3a_status;
+} __packed;
+
+/******************* ipu3_uapi_acc_param *******************/
+
+#define IPU3_UAPI_ISP_VEC_ELEMS 64
+#define IPU3_UAPI_ISP_TNR3_VMEM_LEN 9
+
+#define IPU3_UAPI_BNR_LUT_SIZE 32
+
+/* number of elements in gamma correction LUT */
+#define IPU3_UAPI_GAMMA_CORR_LUT_ENTRIES 256
+
+/* largest grid is 73x56, for grid_height_per_slice of 2, 73x2 = 146 */
+#define IPU3_UAPI_SHD_MAX_CELLS_PER_SET 146
+#define IPU3_UAPI_SHD_MAX_CFG_SETS 28
+/* Normalization shift aka nf */
+#define IPU3_UAPI_SHD_BLGR_NF_SHIFT 13
+#define IPU3_UAPI_SHD_BLGR_NF_MASK 7
+
+#define IPU3_UAPI_YUVP2_TCC_MACC_TABLE_ELEMENTS 16
+#define IPU3_UAPI_YUVP2_TCC_INV_Y_LUT_ELEMENTS 14
+#define IPU3_UAPI_YUVP2_TCC_GAIN_PCWL_LUT_ELEMENTS 258
+#define IPU3_UAPI_YUVP2_TCC_R_SQR_LUT_ELEMENTS 24
+
+#define IPU3_UAPI_ANR_LUT_SIZE 26
+#define IPU3_UAPI_ANR_PYRAMID_SIZE 22
+
+#define IPU3_UAPI_LIN_LUT_SIZE 64
+
+/* Bayer Noise Reduction related structs */
+
+/**
+ * struct ipu3_uapi_bnr_static_config_wb_gains_config - White balance gains
+ *
+ * @gr: white balance gain for Gr channel.
+ * @r: white balance gain for R channel.
+ * @b: white balance gain for B channel.
+ * @gb: white balance gain for Gb channel.
+ *
+ * For BNR parameters WB gain factor for the three channels [Ggr, Ggb, Gb, Gr].
+ * Their precision is U3.13 and the range is (0, 8) and the actual gain is
+ * Gx + 1, it is typically Gx = 1.
+ *
+ * Pout = {Pin * (1 + Gx)}.
+ */
+struct ipu3_uapi_bnr_static_config_wb_gains_config {
+ __u16 gr;
+ __u16 r;
+ __u16 b;
+ __u16 gb;
+} __packed;
+
+/**
+ * struct ipu3_uapi_bnr_static_config_wb_gains_thr_config - Threshold config
+ *
+ * @gr: white balance threshold gain for Gr channel.
+ * @r: white balance threshold gain for R channel.
+ * @b: white balance threshold gain for B channel.
+ * @gb: white balance threshold gain for Gb channel.
+ *
+ * Defines the threshold that specifies how different a defect pixel can be from
+ * its neighbors.(used by dynamic defect pixel correction sub block)
+ * Precision u4.4 range [0, 8].
+ */
+struct ipu3_uapi_bnr_static_config_wb_gains_thr_config {
+ __u8 gr;
+ __u8 r;
+ __u8 b;
+ __u8 gb;
+} __packed;
+
+/**
+ * struct ipu3_uapi_bnr_static_config_thr_coeffs_config - Noise model
+ * coefficients that controls noise threshold
+ *
+ * @cf: Free coefficient for threshold calculation, range [0, 8191], default 0.
+ * @reserved0: reserved
+ * @cg: Gain coefficient for threshold calculation, [0, 31], default 8.
+ * @ci: Intensity coefficient for threshold calculation. range [0, 0x1f]
+ * default 6.
+ * format: u3.2 (3 most significant bits represent whole number,
+ * 2 least significant bits represent the fractional part
+ * with each count representing 0.25)
+ * e.g. 6 in binary format is 00110, that translates to 1.5
+ * @reserved1: reserved
+ * @r_nf: Normalization shift value for r^2 calculation, range [12, 20]
+ * where r is a radius of pixel [row, col] from centor of sensor.
+ * default 14.
+ *
+ * Threshold used to distinguish between noise and details.
+ */
+struct ipu3_uapi_bnr_static_config_thr_coeffs_config {
+ __u32 cf:13;
+ __u32 reserved0:3;
+ __u32 cg:5;
+ __u32 ci:5;
+ __u32 reserved1:1;
+ __u32 r_nf:5;
+} __packed;
+
+/**
+ * struct ipu3_uapi_bnr_static_config_thr_ctrl_shd_config - Shading config
+ *
+ * @gr: Coefficient defines lens shading gain approximation for gr channel
+ * @r: Coefficient defines lens shading gain approximation for r channel
+ * @b: Coefficient defines lens shading gain approximation for b channel
+ * @gb: Coefficient defines lens shading gain approximation for gb channel
+ *
+ * Parameters for noise model (NM) adaptation of BNR due to shading correction.
+ * All above have precision of u3.3, default to 0.
+ */
+struct ipu3_uapi_bnr_static_config_thr_ctrl_shd_config {
+ __u8 gr;
+ __u8 r;
+ __u8 b;
+ __u8 gb;
+} __packed;
+
+/**
+ * struct ipu3_uapi_bnr_static_config_opt_center_config - Optical center config
+ *
+ * @x_reset: Reset value of X (col start - X center). Precision s12.0.
+ * @reserved0: reserved
+ * @y_reset: Reset value of Y (row start - Y center). Precision s12.0.
+ * @reserved2: reserved
+ *
+ * Distance from corner to optical center for NM adaptation due to shading
+ * correction (should be calculated based on shading tables)
+ */
+struct ipu3_uapi_bnr_static_config_opt_center_config {
+ __s32 x_reset:13;
+ __u32 reserved0:3;
+ __s32 y_reset:13;
+ __u32 reserved2:3;
+} __packed;
+
+/**
+ * struct ipu3_uapi_bnr_static_config_lut_config - BNR square root lookup table
+ *
+ * @values: pre-calculated values of square root function.
+ *
+ * LUT implementation of square root operation.
+ */
+struct ipu3_uapi_bnr_static_config_lut_config {
+ __u8 values[IPU3_UAPI_BNR_LUT_SIZE];
+} __packed;
+
+/**
+ * struct ipu3_uapi_bnr_static_config_bp_ctrl_config - Detect bad pixels (bp)
+ *
+ * @bp_thr_gain: Defines the threshold that specifies how different a
+ * defect pixel can be from its neighbors. Threshold is
+ * dependent on de-noise threshold calculated by algorithm.
+ * Range [4, 31], default 4.
+ * @reserved0: reserved
+ * @defect_mode: Mode of addressed defect pixels,
+ * 0 - single defect pixel is expected,
+ * 1 - 2 adjacent defect pixels are expected, default 1.
+ * @bp_gain: Defines how 2nd derivation that passes through a defect pixel
+ * is different from 2nd derivations that pass through
+ * neighbor pixels. u4.2, range [0, 256], default 8.
+ * @reserved1: reserved
+ * @w0_coeff: Blending coefficient of defect pixel correction.
+ * Precision u4, range [0, 8], default 8.
+ * @reserved2: reserved
+ * @w1_coeff: Enable influence of incorrect defect pixel correction to be
+ * avoided. Precision u4, range [1, 8], default 8.
+ * @reserved3: reserved
+ */
+struct ipu3_uapi_bnr_static_config_bp_ctrl_config {
+ __u32 bp_thr_gain:5;
+ __u32 reserved0:2;
+ __u32 defect_mode:1;
+ __u32 bp_gain:6;
+ __u32 reserved1:18;
+ __u32 w0_coeff:4;
+ __u32 reserved2:4;
+ __u32 w1_coeff:4;
+ __u32 reserved3:20;
+} __packed;
+
+/**
+ * struct ipu3_uapi_bnr_static_config_dn_detect_ctrl_config - Denoising config
+ *
+ * @alpha: Weight of central element of smoothing filter.
+ * @beta: Weight of peripheral elements of smoothing filter, default 4.
+ * @gamma: Weight of diagonal elements of smoothing filter, default 4.
+ *
+ * beta and gamma parameter define the strength of the noise removal filter.
+ * All above has precision u0.4, range [0, 0xf]
+ * format: u0.4 (no / zero bits represent whole number,
+ * 4 bits represent the fractional part
+ * with each count representing 0.0625)
+ * e.g. 0xf translates to 0.0625x15 = 0.9375
+ *
+ * @reserved0: reserved
+ * @max_inf: Maximum increase of peripheral or diagonal element influence
+ * relative to the pre-defined value range: [0x5, 0xa]
+ * @reserved1: reserved
+ * @gd_enable: Green disparity enable control, 0 - disable, 1 - enable.
+ * @bpc_enable: Bad pixel correction enable control, 0 - disable, 1 - enable.
+ * @bnr_enable: Bayer noise removal enable control, 0 - disable, 1 - enable.
+ * @ff_enable: Fixed function enable, 0 - disable, 1 - enable.
+ * @reserved2: reserved
+ */
+struct ipu3_uapi_bnr_static_config_dn_detect_ctrl_config {
+ __u32 alpha:4;
+ __u32 beta:4;
+ __u32 gamma:4;
+ __u32 reserved0:4;
+ __u32 max_inf:4;
+ __u32 reserved1:7;
+ __u32 gd_enable:1;
+ __u32 bpc_enable:1;
+ __u32 bnr_enable:1;
+ __u32 ff_enable:1;
+ __u32 reserved2:1;
+} __packed;
+
+/**
+ * struct ipu3_uapi_bnr_static_config_opt_center_sqr_config - BNR optical square
+ *
+ * @x_sqr_reset: Reset value of X^2.
+ * @y_sqr_reset: Reset value of Y^2.
+ *
+ * Please note:
+ *
+ * #. X and Y ref to
+ * &ipu3_uapi_bnr_static_config_opt_center_config
+ * #. Both structs are used in threshold formula to calculate r^2, where r
+ * is a radius of pixel [row, col] from centor of sensor.
+ */
+struct ipu3_uapi_bnr_static_config_opt_center_sqr_config {
+ __u32 x_sqr_reset;
+ __u32 y_sqr_reset;
+} __packed;
+
+/**
+ * struct ipu3_uapi_bnr_static_config - BNR static config
+ *
+ * @wb_gains: white balance gains &ipu3_uapi_bnr_static_config_wb_gains_config
+ * @wb_gains_thr: white balance gains threshold as defined by
+ * &ipu3_uapi_bnr_static_config_wb_gains_thr_config
+ * @thr_coeffs: coefficients of threshold
+ * &ipu3_uapi_bnr_static_config_thr_coeffs_config
+ * @thr_ctrl_shd: control of shading threshold
+ * &ipu3_uapi_bnr_static_config_thr_ctrl_shd_config
+ * @opt_center: optical center &ipu3_uapi_bnr_static_config_opt_center_config
+ *
+ * Above parameters and opt_center_sqr are used for white balance and shading.
+ *
+ * @lut: lookup table &ipu3_uapi_bnr_static_config_lut_config
+ * @bp_ctrl: detect and remove bad pixels as defined in struct
+ * &ipu3_uapi_bnr_static_config_bp_ctrl_config
+ * @dn_detect_ctrl: detect and remove noise.
+ * &ipu3_uapi_bnr_static_config_dn_detect_ctrl_config
+ * @column_size: The number of pixels in column.
+ * @opt_center_sqr: Reset value of r^2 to optical center, see
+ * &ipu3_uapi_bnr_static_config_opt_center_sqr_config.
+ */
+struct ipu3_uapi_bnr_static_config {
+ struct ipu3_uapi_bnr_static_config_wb_gains_config wb_gains;
+ struct ipu3_uapi_bnr_static_config_wb_gains_thr_config wb_gains_thr;
+ struct ipu3_uapi_bnr_static_config_thr_coeffs_config thr_coeffs;
+ struct ipu3_uapi_bnr_static_config_thr_ctrl_shd_config thr_ctrl_shd;
+ struct ipu3_uapi_bnr_static_config_opt_center_config opt_center;
+ struct ipu3_uapi_bnr_static_config_lut_config lut;
+ struct ipu3_uapi_bnr_static_config_bp_ctrl_config bp_ctrl;
+ struct ipu3_uapi_bnr_static_config_dn_detect_ctrl_config dn_detect_ctrl;
+ __u32 column_size;
+ struct ipu3_uapi_bnr_static_config_opt_center_sqr_config opt_center_sqr;
+} __packed;
+
+/**
+ * struct ipu3_uapi_bnr_static_config_green_disparity - Correct green disparity
+ *
+ * @gd_red: Shading gain coeff for gr disparity level in bright red region.
+ * Precision u0.6, default 4(0.0625).
+ * @reserved0: reserved
+ * @gd_green: Shading gain coeff for gr disparity level in bright green
+ * region. Precision u0.6, default 4(0.0625).
+ * @reserved1: reserved
+ * @gd_blue: Shading gain coeff for gr disparity level in bright blue region.
+ * Precision u0.6, default 4(0.0625).
+ * @reserved2: reserved
+ * @gd_black: Maximal green disparity level in dark region (stronger disparity
+ * assumed to be image detail). Precision u14, default 80.
+ * @reserved3: reserved
+ * @gd_shading: Change maximal green disparity level according to square
+ * distance from image center.
+ * @reserved4: reserved
+ * @gd_support: Lower bound for the number of second green color pixels in
+ * current pixel neighborhood with less than threshold difference
+ * from it.
+ *
+ * The shading gain coeff of red, green, blue and black are used to calculate
+ * threshold given a pixel's color value and its coordinates in the image.
+ *
+ * @reserved5: reserved
+ * @gd_clip: Turn green disparity clip on/off, [0, 1], default 1.
+ * @gd_central_weight: Central pixel weight in 9 pixels weighted sum.
+ */
+struct ipu3_uapi_bnr_static_config_green_disparity {
+ __u32 gd_red:6;
+ __u32 reserved0:2;
+ __u32 gd_green:6;
+ __u32 reserved1:2;
+ __u32 gd_blue:6;
+ __u32 reserved2:10;
+ __u32 gd_black:14;
+ __u32 reserved3:2;
+ __u32 gd_shading:7;
+ __u32 reserved4:1;
+ __u32 gd_support:2;
+ __u32 reserved5:1;
+ __u32 gd_clip:1;
+ __u32 gd_central_weight:4;
+} __packed;
+
+/**
+ * struct ipu3_uapi_dm_config - De-mosaic parameters
+ *
+ * @dm_en: de-mosaic enable.
+ * @ch_ar_en: Checker artifacts removal enable flag. Default 0.
+ * @fcc_en: False color correction (FCC) enable flag. Default 0.
+ * @reserved0: reserved
+ * @frame_width: do not care
+ * @gamma_sc: Sharpening coefficient (coefficient of 2-d derivation of
+ * complementary color in Hamilton-Adams interpolation).
+ * u5, range [0, 31], default 8.
+ * @reserved1: reserved
+ * @lc_ctrl: Parameter that controls weights of Chroma Homogeneity metric
+ * in calculation of final homogeneity metric.
+ * u5, range [0, 31], default 7.
+ * @reserved2: reserved
+ * @cr_param1: First parameter that defines Checker artifact removal
+ * feature gain. Precision u5, range [0, 31], default 8.
+ * @reserved3: reserved
+ * @cr_param2: Second parameter that defines Checker artifact removal
+ * feature gain. Precision u5, range [0, 31], default 8.
+ * @reserved4: reserved
+ * @coring_param: Defines power of false color correction operation.
+ * low for preserving edge colors, high for preserving gray
+ * edge artifacts.
+ * Precision u1.4, range [0, 1.9375], default 4 (0.25).
+ * @reserved5: reserved
+ *
+ * The demosaic fixed function block is responsible to covert Bayer(mosaiced)
+ * images into color images based on demosaicing algorithm.
+ */
+struct ipu3_uapi_dm_config {
+ __u32 dm_en:1;
+ __u32 ch_ar_en:1;
+ __u32 fcc_en:1;
+ __u32 reserved0:13;
+ __u32 frame_width:16;
+
+ __u32 gamma_sc:5;
+ __u32 reserved1:3;
+ __u32 lc_ctrl:5;
+ __u32 reserved2:3;
+ __u32 cr_param1:5;
+ __u32 reserved3:3;
+ __u32 cr_param2:5;
+ __u32 reserved4:3;
+
+ __u32 coring_param:5;
+ __u32 reserved5:27;
+} __packed;
+
+/**
+ * struct ipu3_uapi_ccm_mat_config - Color correction matrix
+ *
+ * @coeff_m11: CCM 3x3 coefficient, range [-65536, 65535]
+ * @coeff_m12: CCM 3x3 coefficient, range [-8192, 8191]
+ * @coeff_m13: CCM 3x3 coefficient, range [-32768, 32767]
+ * @coeff_o_r: Bias 3x1 coefficient, range [-8191, 8181]
+ * @coeff_m21: CCM 3x3 coefficient, range [-32767, 32767]
+ * @coeff_m22: CCM 3x3 coefficient, range [-8192, 8191]
+ * @coeff_m23: CCM 3x3 coefficient, range [-32768, 32767]
+ * @coeff_o_g: Bias 3x1 coefficient, range [-8191, 8181]
+ * @coeff_m31: CCM 3x3 coefficient, range [-32768, 32767]
+ * @coeff_m32: CCM 3x3 coefficient, range [-8192, 8191]
+ * @coeff_m33: CCM 3x3 coefficient, range [-32768, 32767]
+ * @coeff_o_b: Bias 3x1 coefficient, range [-8191, 8181]
+ *
+ * Transform sensor specific color space to standard sRGB by applying 3x3 matrix
+ * and adding a bias vector O. The transformation is basically a rotation and
+ * translation in the 3-dimensional color spaces. Here are the defaults:
+ *
+ * 9775, -2671, 1087, 0
+ * -1071, 8303, 815, 0
+ * -23, -7887, 16103, 0
+ */
+struct ipu3_uapi_ccm_mat_config {
+ __s16 coeff_m11;
+ __s16 coeff_m12;
+ __s16 coeff_m13;
+ __s16 coeff_o_r;
+ __s16 coeff_m21;
+ __s16 coeff_m22;
+ __s16 coeff_m23;
+ __s16 coeff_o_g;
+ __s16 coeff_m31;
+ __s16 coeff_m32;
+ __s16 coeff_m33;
+ __s16 coeff_o_b;
+} __packed;
+
+/**
+ * struct ipu3_uapi_gamma_corr_ctrl - Gamma correction
+ *
+ * @enable: gamma correction enable.
+ * @reserved: reserved
+ */
+struct ipu3_uapi_gamma_corr_ctrl {
+ __u32 enable:1;
+ __u32 reserved:31;
+} __packed;
+
+/**
+ * struct ipu3_uapi_gamma_corr_lut - Per-pixel tone mapping implemented as LUT.
+ *
+ * @lut: 256 tabulated values of the gamma function. LUT[1].. LUT[256]
+ * format u13.0, range [0, 8191].
+ *
+ * The tone mapping operation is done by a Piece wise linear graph
+ * that is implemented as a lookup table(LUT). The pixel component input
+ * intensity is the X-axis of the graph which is the table entry.
+ */
+struct ipu3_uapi_gamma_corr_lut {
+ __u16 lut[IPU3_UAPI_GAMMA_CORR_LUT_ENTRIES];
+} __packed;
+
+/**
+ * struct ipu3_uapi_gamma_config - Gamma config
+ *
+ * @gc_ctrl: control of gamma correction &ipu3_uapi_gamma_corr_ctrl
+ * @gc_lut: lookup table of gamma correction &ipu3_uapi_gamma_corr_lut
+ */
+struct ipu3_uapi_gamma_config {
+ struct ipu3_uapi_gamma_corr_ctrl gc_ctrl __attribute__((aligned(32)));
+ struct ipu3_uapi_gamma_corr_lut gc_lut __attribute__((aligned(32)));
+} __packed;
+
+/**
+ * struct ipu3_uapi_csc_mat_config - Color space conversion matrix config
+ *
+ * @coeff_c11: Conversion matrix value, format s0.14, range [-16384, 16383].
+ * @coeff_c12: Conversion matrix value, format s0.14, range [-8192, 8191].
+ * @coeff_c13: Conversion matrix value, format s0.14, range [-16384, 16383].
+ * @coeff_b1: Bias 3x1 coefficient, s13.0 range [-8192, 8191].
+ * @coeff_c21: Conversion matrix value, format s0.14, range [-16384, 16383].
+ * @coeff_c22: Conversion matrix value, format s0.14, range [-8192, 8191].
+ * @coeff_c23: Conversion matrix value, format s0.14, range [-16384, 16383].
+ * @coeff_b2: Bias 3x1 coefficient, s13.0 range [-8192, 8191].
+ * @coeff_c31: Conversion matrix value, format s0.14, range [-16384, 16383].
+ * @coeff_c32: Conversion matrix value, format s0.14, range [-8192, 8191].
+ * @coeff_c33: Conversion matrix value, format s0.14, range [-16384, 16383].
+ * @coeff_b3: Bias 3x1 coefficient, s13.0 range [-8192, 8191].
+ *
+ * To transform each pixel from RGB to YUV (Y - brightness/luminance,
+ * UV -chroma) by applying the pixel's values by a 3x3 matrix and adding an
+ * optional bias 3x1 vector. Here are the default values for the matrix:
+ *
+ * 4898, 9617, 1867, 0,
+ * -2410, -4732, 7143, 0,
+ * 10076, -8437, -1638, 0,
+ *
+ * (i.e. for real number 0.299, 0.299 * 2^14 becomes 4898.)
+ */
+struct ipu3_uapi_csc_mat_config {
+ __s16 coeff_c11;
+ __s16 coeff_c12;
+ __s16 coeff_c13;
+ __s16 coeff_b1;
+ __s16 coeff_c21;
+ __s16 coeff_c22;
+ __s16 coeff_c23;
+ __s16 coeff_b2;
+ __s16 coeff_c31;
+ __s16 coeff_c32;
+ __s16 coeff_c33;
+ __s16 coeff_b3;
+} __packed;
+
+/**
+ * struct ipu3_uapi_cds_params - Chroma down-scaling
+ *
+ * @ds_c00: range [0, 3]
+ * @ds_c01: range [0, 3]
+ * @ds_c02: range [0, 3]
+ * @ds_c03: range [0, 3]
+ * @ds_c10: range [0, 3]
+ * @ds_c11: range [0, 3]
+ * @ds_c12: range [0, 3]
+ * @ds_c13: range [0, 3]
+ *
+ * In case user does not provide, above 4x2 filter will use following defaults:
+ * 1, 3, 3, 1,
+ * 1, 3, 3, 1,
+ *
+ * @ds_nf: Normalization factor for Chroma output downscaling filter,
+ * range 0,4, default 2.
+ * @reserved0: reserved
+ * @csc_en: Color space conversion enable
+ * @uv_bin_output: 0: output YUV 4.2.0, 1: output YUV 4.2.2(default).
+ * @reserved1: reserved
+ */
+struct ipu3_uapi_cds_params {
+ __u32 ds_c00:2;
+ __u32 ds_c01:2;
+ __u32 ds_c02:2;
+ __u32 ds_c03:2;
+ __u32 ds_c10:2;
+ __u32 ds_c11:2;
+ __u32 ds_c12:2;
+ __u32 ds_c13:2;
+ __u32 ds_nf:5;
+ __u32 reserved0:3;
+ __u32 csc_en:1;
+ __u32 uv_bin_output:1;
+ __u32 reserved1:6;
+} __packed;
+
+/**
+ * struct ipu3_uapi_shd_grid_config - Bayer shading(darkening) correction
+ *
+ * @width: Grid horizontal dimensions, u8, [8, 128], default 73
+ * @height: Grid vertical dimensions, u8, [8, 128], default 56
+ * @block_width_log2: Log2 of the width of the grid cell in pixel count
+ * u4, [0, 15], default value 5.
+ * @reserved0: reserved
+ * @block_height_log2: Log2 of the height of the grid cell in pixel count
+ * u4, [0, 15], default value 6.
+ * @reserved1: reserved
+ * @grid_height_per_slice: SHD_MAX_CELLS_PER_SET/width.
+ * (with SHD_MAX_CELLS_PER_SET = 146).
+ * @x_start: X value of top left corner of sensor relative to ROI
+ * s13, [-4096, 0], default 0, only negative values.
+ * @y_start: Y value of top left corner of sensor relative to ROI
+ * s13, [-4096, 0], default 0, only negative values.
+ */
+struct ipu3_uapi_shd_grid_config {
+ /* reg 0 */
+ __u8 width;
+ __u8 height;
+ __u8 block_width_log2:3;
+ __u8 reserved0:1;
+ __u8 block_height_log2:3;
+ __u8 reserved1:1;
+ __u8 grid_height_per_slice;
+ /* reg 1 */
+ __s16 x_start;
+ __s16 y_start;
+} __packed;
+
+/**
+ * struct ipu3_uapi_shd_general_config - Shading general config
+ *
+ * @init_set_vrt_offst_ul: set vertical offset,
+ * y_start >> block_height_log2 % grid_height_per_slice.
+ * @shd_enable: shading enable.
+ * @gain_factor: Gain factor. Shift calculated anti shading value. Precision u2.
+ * 0x0 - gain factor [1, 5], means no shift interpolated value.
+ * 0x1 - gain factor [1, 9], means shift interpolated by 1.
+ * 0x2 - gain factor [1, 17], means shift interpolated by 2.
+ * @reserved: reserved
+ *
+ * Correction is performed by multiplying a gain factor for each of the 4 Bayer
+ * channels as a function of the pixel location in the sensor.
+ */
+struct ipu3_uapi_shd_general_config {
+ __u32 init_set_vrt_offst_ul:8;
+ __u32 shd_enable:1;
+ __u32 gain_factor:2;
+ __u32 reserved:21;
+} __packed;
+
+/**
+ * struct ipu3_uapi_shd_black_level_config - Black level correction
+ *
+ * @bl_r: Bios values for green red. s11 range [-2048, 2047].
+ * @bl_gr: Bios values for green blue. s11 range [-2048, 2047].
+ * @bl_gb: Bios values for red. s11 range [-2048, 2047].
+ * @bl_b: Bios values for blue. s11 range [-2048, 2047].
+ */
+struct ipu3_uapi_shd_black_level_config {
+ __s16 bl_r;
+ __s16 bl_gr;
+ __s16 bl_gb;
+ __s16 bl_b;
+} __packed;
+
+/**
+ * struct ipu3_uapi_shd_config_static - Shading config static
+ *
+ * @grid: shading grid config &ipu3_uapi_shd_grid_config
+ * @general: shading general config &ipu3_uapi_shd_general_config
+ * @black_level: black level config for shading correction as defined by
+ * &ipu3_uapi_shd_black_level_config
+ */
+struct ipu3_uapi_shd_config_static {
+ struct ipu3_uapi_shd_grid_config grid;
+ struct ipu3_uapi_shd_general_config general;
+ struct ipu3_uapi_shd_black_level_config black_level;
+} __packed;
+
+/**
+ * struct ipu3_uapi_shd_lut - Shading gain factor lookup table.
+ *
+ * @sets: array
+ * @sets.r_and_gr: Red and GreenR Lookup table.
+ * @sets.r_and_gr.r: Red shading factor.
+ * @sets.r_and_gr.gr: GreenR shading factor.
+ * @sets.reserved1: reserved
+ * @sets.gb_and_b: GreenB and Blue Lookup table.
+ * @sets.gb_and_b.gb: GreenB shading factor.
+ * @sets.gb_and_b.b: Blue shading factor.
+ * @sets.reserved2: reserved
+ *
+ * Map to shading correction LUT register set.
+ */
+struct ipu3_uapi_shd_lut {
+ struct {
+ struct {
+ __u16 r;
+ __u16 gr;
+ } r_and_gr[IPU3_UAPI_SHD_MAX_CELLS_PER_SET];
+ __u8 reserved1[24];
+ struct {
+ __u16 gb;
+ __u16 b;
+ } gb_and_b[IPU3_UAPI_SHD_MAX_CELLS_PER_SET];
+ __u8 reserved2[24];
+ } sets[IPU3_UAPI_SHD_MAX_CFG_SETS];
+} __packed;
+
+/**
+ * struct ipu3_uapi_shd_config - Shading config
+ *
+ * @shd: shading static config, see &ipu3_uapi_shd_config_static
+ * @shd_lut: shading lookup table &ipu3_uapi_shd_lut
+ */
+struct ipu3_uapi_shd_config {
+ struct ipu3_uapi_shd_config_static shd __attribute__((aligned(32)));
+ struct ipu3_uapi_shd_lut shd_lut __attribute__((aligned(32)));
+} __packed;
+
+/* Image Enhancement Filter directed */
+
+/**
+ * struct ipu3_uapi_iefd_cux2 - IEFd Config Unit 2 parameters
+ *
+ * @x0: X0 point of Config Unit, u9.0, default 0.
+ * @x1: X1 point of Config Unit, u9.0, default 0.
+ * @a01: Slope A of Config Unit, s4.4, default 0.
+ * @b01: Slope B, always 0.
+ *
+ * Calculate weight for blending directed and non-directed denoise elements
+ *
+ * Note:
+ * Each instance of Config Unit needs X coordinate of n points and
+ * slope A factor between points calculated by driver based on calibration
+ * parameters.
+ *
+ * All CU inputs are unsigned, they will be converted to signed when written
+ * to register, i.e. a01 will be written to 9 bit register in s4.4 format.
+ * The data precision s4.4 means 4 bits for integer parts and 4 bits for the
+ * fractional part, the first bit indicates positive or negative value.
+ * For userspace software (commonly the imaging library), the computation for
+ * the CU slope values should be based on the slope resolution 1/16 (binary
+ * 0.0001 - the minimal interval value), the slope value range is [-256, +255].
+ * This applies to &ipu3_uapi_iefd_cux6_ed, &ipu3_uapi_iefd_cux2_1,
+ * &ipu3_uapi_iefd_cux2_1, &ipu3_uapi_iefd_cux4 and &ipu3_uapi_iefd_cux6_rad.
+ */
+struct ipu3_uapi_iefd_cux2 {
+ __u32 x0:9;
+ __u32 x1:9;
+ __u32 a01:9;
+ __u32 b01:5;
+} __packed;
+
+/**
+ * struct ipu3_uapi_iefd_cux6_ed - Calculate power of non-directed sharpening
+ * element, Config Unit 6 for edge detail (ED).
+ *
+ * @x0: X coordinate of point 0, u9.0, default 0.
+ * @x1: X coordinate of point 1, u9.0, default 0.
+ * @x2: X coordinate of point 2, u9.0, default 0.
+ * @reserved0: reserved
+ * @x3: X coordinate of point 3, u9.0, default 0.
+ * @x4: X coordinate of point 4, u9.0, default 0.
+ * @x5: X coordinate of point 5, u9.0, default 0.
+ * @reserved1: reserved
+ * @a01: slope A points 01, s4.4, default 0.
+ * @a12: slope A points 12, s4.4, default 0.
+ * @a23: slope A points 23, s4.4, default 0.
+ * @reserved2: reserved
+ * @a34: slope A points 34, s4.4, default 0.
+ * @a45: slope A points 45, s4.4, default 0.
+ * @reserved3: reserved
+ * @b01: slope B points 01, s4.4, default 0.
+ * @b12: slope B points 12, s4.4, default 0.
+ * @b23: slope B points 23, s4.4, default 0.
+ * @reserved4: reserved
+ * @b34: slope B points 34, s4.4, default 0.
+ * @b45: slope B points 45, s4.4, default 0.
+ * @reserved5: reserved.
+ */
+struct ipu3_uapi_iefd_cux6_ed {
+ __u32 x0:9;
+ __u32 x1:9;
+ __u32 x2:9;
+ __u32 reserved0:5;
+
+ __u32 x3:9;
+ __u32 x4:9;
+ __u32 x5:9;
+ __u32 reserved1:5;
+
+ __u32 a01:9;
+ __u32 a12:9;
+ __u32 a23:9;
+ __u32 reserved2:5;
+
+ __u32 a34:9;
+ __u32 a45:9;
+ __u32 reserved3:14;
+
+ __u32 b01:9;
+ __u32 b12:9;
+ __u32 b23:9;
+ __u32 reserved4:5;
+
+ __u32 b34:9;
+ __u32 b45:9;
+ __u32 reserved5:14;
+} __packed;
+
+/**
+ * struct ipu3_uapi_iefd_cux2_1 - Calculate power of non-directed denoise
+ * element apply.
+ * @x0: X0 point of Config Unit, u9.0, default 0.
+ * @x1: X1 point of Config Unit, u9.0, default 0.
+ * @a01: Slope A of Config Unit, s4.4, default 0.
+ * @reserved1: reserved
+ * @b01: offset B0 of Config Unit, u7.0, default 0.
+ * @reserved2: reserved
+ */
+struct ipu3_uapi_iefd_cux2_1 {
+ __u32 x0:9;
+ __u32 x1:9;
+ __u32 a01:9;
+ __u32 reserved1:5;
+
+ __u32 b01:8;
+ __u32 reserved2:24;
+} __packed;
+
+/**
+ * struct ipu3_uapi_iefd_cux4 - Calculate power of non-directed sharpening
+ * element.
+ *
+ * @x0: X0 point of Config Unit, u9.0, default 0.
+ * @x1: X1 point of Config Unit, u9.0, default 0.
+ * @x2: X2 point of Config Unit, u9.0, default 0.
+ * @reserved0: reserved
+ * @x3: X3 point of Config Unit, u9.0, default 0.
+ * @a01: Slope A0 of Config Unit, s4.4, default 0.
+ * @a12: Slope A1 of Config Unit, s4.4, default 0.
+ * @reserved1: reserved
+ * @a23: Slope A2 of Config Unit, s4.4, default 0.
+ * @b01: Offset B0 of Config Unit, s7.0, default 0.
+ * @b12: Offset B1 of Config Unit, s7.0, default 0.
+ * @reserved2: reserved
+ * @b23: Offset B2 of Config Unit, s7.0, default 0.
+ * @reserved3: reserved
+ */
+struct ipu3_uapi_iefd_cux4 {
+ __u32 x0:9;
+ __u32 x1:9;
+ __u32 x2:9;
+ __u32 reserved0:5;
+
+ __u32 x3:9;
+ __u32 a01:9;
+ __u32 a12:9;
+ __u32 reserved1:5;
+
+ __u32 a23:9;
+ __u32 b01:8;
+ __u32 b12:8;
+ __u32 reserved2:7;
+
+ __u32 b23:8;
+ __u32 reserved3:24;
+} __packed;
+
+/**
+ * struct ipu3_uapi_iefd_cux6_rad - Radial Config Unit (CU)
+ *
+ * @x0: x0 points of Config Unit radial, u8.0
+ * @x1: x1 points of Config Unit radial, u8.0
+ * @x2: x2 points of Config Unit radial, u8.0
+ * @x3: x3 points of Config Unit radial, u8.0
+ * @x4: x4 points of Config Unit radial, u8.0
+ * @x5: x5 points of Config Unit radial, u8.0
+ * @reserved1: reserved
+ * @a01: Slope A of Config Unit radial, s7.8
+ * @a12: Slope A of Config Unit radial, s7.8
+ * @a23: Slope A of Config Unit radial, s7.8
+ * @a34: Slope A of Config Unit radial, s7.8
+ * @a45: Slope A of Config Unit radial, s7.8
+ * @reserved2: reserved
+ * @b01: Slope B of Config Unit radial, s9.0
+ * @b12: Slope B of Config Unit radial, s9.0
+ * @b23: Slope B of Config Unit radial, s9.0
+ * @reserved4: reserved
+ * @b34: Slope B of Config Unit radial, s9.0
+ * @b45: Slope B of Config Unit radial, s9.0
+ * @reserved5: reserved
+ */
+struct ipu3_uapi_iefd_cux6_rad {
+ __u32 x0:8;
+ __u32 x1:8;
+ __u32 x2:8;
+ __u32 x3:8;
+
+ __u32 x4:8;
+ __u32 x5:8;
+ __u32 reserved1:16;
+
+ __u32 a01:16;
+ __u32 a12:16;
+
+ __u32 a23:16;
+ __u32 a34:16;
+
+ __u32 a45:16;
+ __u32 reserved2:16;
+
+ __u32 b01:10;
+ __u32 b12:10;
+ __u32 b23:10;
+ __u32 reserved4:2;
+
+ __u32 b34:10;
+ __u32 b45:10;
+ __u32 reserved5:12;
+} __packed;
+
+/**
+ * struct ipu3_uapi_yuvp1_iefd_cfg_units - IEFd Config Units parameters
+ *
+ * @cu_1: calculate weight for blending directed and
+ * non-directed denoise elements. See &ipu3_uapi_iefd_cux2
+ * @cu_ed: calculate power of non-directed sharpening element, see
+ * &ipu3_uapi_iefd_cux6_ed
+ * @cu_3: calculate weight for blending directed and
+ * non-directed denoise elements. A &ipu3_uapi_iefd_cux2
+ * @cu_5: calculate power of non-directed denoise element apply, use
+ * &ipu3_uapi_iefd_cux2_1
+ * @cu_6: calculate power of non-directed sharpening element. See
+ * &ipu3_uapi_iefd_cux4
+ * @cu_7: calculate weight for blending directed and
+ * non-directed denoise elements. Use &ipu3_uapi_iefd_cux2
+ * @cu_unsharp: Config Unit of unsharp &ipu3_uapi_iefd_cux4
+ * @cu_radial: Config Unit of radial &ipu3_uapi_iefd_cux6_rad
+ * @cu_vssnlm: Config Unit of vssnlm &ipu3_uapi_iefd_cux2
+ */
+struct ipu3_uapi_yuvp1_iefd_cfg_units {
+ struct ipu3_uapi_iefd_cux2 cu_1;
+ struct ipu3_uapi_iefd_cux6_ed cu_ed;
+ struct ipu3_uapi_iefd_cux2 cu_3;
+ struct ipu3_uapi_iefd_cux2_1 cu_5;
+ struct ipu3_uapi_iefd_cux4 cu_6;
+ struct ipu3_uapi_iefd_cux2 cu_7;
+ struct ipu3_uapi_iefd_cux4 cu_unsharp;
+ struct ipu3_uapi_iefd_cux6_rad cu_radial;
+ struct ipu3_uapi_iefd_cux2 cu_vssnlm;
+} __packed;
+
+/**
+ * struct ipu3_uapi_yuvp1_iefd_config_s - IEFd config
+ *
+ * @horver_diag_coeff: Gradient compensation. Compared with vertical /
+ * horizontal (0 / 90 degree), coefficient of diagonal (45 /
+ * 135 degree) direction should be corrected by approx.
+ * 1/sqrt(2).
+ * @reserved0: reserved
+ * @clamp_stitch: Slope to stitch between clamped and unclamped edge values
+ * @reserved1: reserved
+ * @direct_metric_update: Update coeff for direction metric
+ * @reserved2: reserved
+ * @ed_horver_diag_coeff: Radial Coefficient that compensates for
+ * different distance for vertical/horizontal and
+ * diagonal gradient calculation (approx. 1/sqrt(2))
+ * @reserved3: reserved
+ */
+struct ipu3_uapi_yuvp1_iefd_config_s {
+ __u32 horver_diag_coeff:7;
+ __u32 reserved0:1;
+ __u32 clamp_stitch:6;
+ __u32 reserved1:2;
+ __u32 direct_metric_update:5;
+ __u32 reserved2:3;
+ __u32 ed_horver_diag_coeff:7;
+ __u32 reserved3:1;
+} __packed;
+
+/**
+ * struct ipu3_uapi_yuvp1_iefd_control - IEFd control
+ *
+ * @iefd_en: Enable IEFd
+ * @denoise_en: Enable denoise
+ * @direct_smooth_en: Enable directional smooth
+ * @rad_en: Enable radial update
+ * @vssnlm_en: Enable VSSNLM output filter
+ * @reserved: reserved
+ */
+struct ipu3_uapi_yuvp1_iefd_control {
+ __u32 iefd_en:1;
+ __u32 denoise_en:1;
+ __u32 direct_smooth_en:1;
+ __u32 rad_en:1;
+ __u32 vssnlm_en:1;
+ __u32 reserved:27;
+} __packed;
+
+/**
+ * struct ipu3_uapi_sharp_cfg - Sharpening config
+ *
+ * @nega_lmt_txt: Sharpening limit for negative overshoots for texture.
+ * @reserved0: reserved
+ * @posi_lmt_txt: Sharpening limit for positive overshoots for texture.
+ * @reserved1: reserved
+ * @nega_lmt_dir: Sharpening limit for negative overshoots for direction (edge).
+ * @reserved2: reserved
+ * @posi_lmt_dir: Sharpening limit for positive overshoots for direction (edge).
+ * @reserved3: reserved
+ *
+ * Fixed point type u13.0, range [0, 8191].
+ */
+struct ipu3_uapi_sharp_cfg {
+ __u32 nega_lmt_txt:13;
+ __u32 reserved0:19;
+ __u32 posi_lmt_txt:13;
+ __u32 reserved1:19;
+ __u32 nega_lmt_dir:13;
+ __u32 reserved2:19;
+ __u32 posi_lmt_dir:13;
+ __u32 reserved3:19;
+} __packed;
+
+/**
+ * struct ipu3_uapi_far_w - Sharpening config for far sub-group
+ *
+ * @dir_shrp: Weight of wide direct sharpening, u1.6, range [0, 64], default 64.
+ * @reserved0: reserved
+ * @dir_dns: Weight of wide direct denoising, u1.6, range [0, 64], default 0.
+ * @reserved1: reserved
+ * @ndir_dns_powr: Power of non-direct denoising,
+ * Precision u1.6, range [0, 64], default 64.
+ * @reserved2: reserved
+ */
+struct ipu3_uapi_far_w {
+ __u32 dir_shrp:7;
+ __u32 reserved0:1;
+ __u32 dir_dns:7;
+ __u32 reserved1:1;
+ __u32 ndir_dns_powr:7;
+ __u32 reserved2:9;
+} __packed;
+
+/**
+ * struct ipu3_uapi_unsharp_cfg - Unsharp config
+ *
+ * @unsharp_weight: Unsharp mask blending weight.
+ * u1.6, range [0, 64], default 16.
+ * 0 - disabled, 64 - use only unsharp.
+ * @reserved0: reserved
+ * @unsharp_amount: Unsharp mask amount, u4.5, range [0, 511], default 0.
+ * @reserved1: reserved
+ */
+struct ipu3_uapi_unsharp_cfg {
+ __u32 unsharp_weight:7;
+ __u32 reserved0:1;
+ __u32 unsharp_amount:9;
+ __u32 reserved1:15;
+} __packed;
+
+/**
+ * struct ipu3_uapi_yuvp1_iefd_shrp_cfg - IEFd sharpness config
+ *
+ * @cfg: sharpness config &ipu3_uapi_sharp_cfg
+ * @far_w: wide range config, value as specified by &ipu3_uapi_far_w:
+ * The 5x5 environment is separated into 2 sub-groups, the 3x3 nearest
+ * neighbors (8 pixels called Near), and the second order neighborhood
+ * around them (16 pixels called Far).
+ * @unshrp_cfg: unsharpness config. &ipu3_uapi_unsharp_cfg
+ */
+struct ipu3_uapi_yuvp1_iefd_shrp_cfg {
+ struct ipu3_uapi_sharp_cfg cfg;
+ struct ipu3_uapi_far_w far_w;
+ struct ipu3_uapi_unsharp_cfg unshrp_cfg;
+} __packed;
+
+/**
+ * struct ipu3_uapi_unsharp_coef0 - Unsharp mask coefficients
+ *
+ * @c00: Coeff11, s0.8, range [-255, 255], default 1.
+ * @c01: Coeff12, s0.8, range [-255, 255], default 5.
+ * @c02: Coeff13, s0.8, range [-255, 255], default 9.
+ * @reserved: reserved
+ *
+ * Configurable registers for common sharpening support.
+ */
+struct ipu3_uapi_unsharp_coef0 {
+ __u32 c00:9;
+ __u32 c01:9;
+ __u32 c02:9;
+ __u32 reserved:5;
+} __packed;
+
+/**
+ * struct ipu3_uapi_unsharp_coef1 - Unsharp mask coefficients
+ *
+ * @c11: Coeff22, s0.8, range [-255, 255], default 29.
+ * @c12: Coeff23, s0.8, range [-255, 255], default 55.
+ * @c22: Coeff33, s0.8, range [-255, 255], default 96.
+ * @reserved: reserved
+ */
+struct ipu3_uapi_unsharp_coef1 {
+ __u32 c11:9;
+ __u32 c12:9;
+ __u32 c22:9;
+ __u32 reserved:5;
+} __packed;
+
+/**
+ * struct ipu3_uapi_yuvp1_iefd_unshrp_cfg - Unsharp mask config
+ *
+ * @unsharp_coef0: unsharp coefficient 0 config. See &ipu3_uapi_unsharp_coef0
+ * @unsharp_coef1: unsharp coefficient 1 config. See &ipu3_uapi_unsharp_coef1
+ */
+struct ipu3_uapi_yuvp1_iefd_unshrp_cfg {
+ struct ipu3_uapi_unsharp_coef0 unsharp_coef0;
+ struct ipu3_uapi_unsharp_coef1 unsharp_coef1;
+} __packed;
+
+/**
+ * struct ipu3_uapi_radial_reset_xy - Radial coordinate reset
+ *
+ * @x: Radial reset of x coordinate. Precision s12, [-4095, 4095], default 0.
+ * @reserved0: reserved
+ * @y: Radial center y coordinate. Precision s12, [-4095, 4095], default 0.
+ * @reserved1: reserved
+ */
+struct ipu3_uapi_radial_reset_xy {
+ __s32 x:13;
+ __u32 reserved0:3;
+ __s32 y:13;
+ __u32 reserved1:3;
+} __packed;
+
+/**
+ * struct ipu3_uapi_radial_reset_x2 - Radial X^2 reset
+ *
+ * @x2: Radial reset of x^2 coordinate. Precision u24, default 0.
+ * @reserved: reserved
+ */
+struct ipu3_uapi_radial_reset_x2 {
+ __u32 x2:24;
+ __u32 reserved:8;
+} __packed;
+
+/**
+ * struct ipu3_uapi_radial_reset_y2 - Radial Y^2 reset
+ *
+ * @y2: Radial reset of y^2 coordinate. Precision u24, default 0.
+ * @reserved: reserved
+ */
+struct ipu3_uapi_radial_reset_y2 {
+ __u32 y2:24;
+ __u32 reserved:8;
+} __packed;
+
+/**
+ * struct ipu3_uapi_radial_cfg - Radial config
+ *
+ * @rad_nf: Radial. R^2 normalization factor is scale down by 2^ - (15 + scale)
+ * @reserved0: reserved
+ * @rad_inv_r2: Radial R^-2 normelized to (0.5..1).
+ * Precision u7, range [0, 127].
+ * @reserved1: reserved
+ */
+struct ipu3_uapi_radial_cfg {
+ __u32 rad_nf:4;
+ __u32 reserved0:4;
+ __u32 rad_inv_r2:7;
+ __u32 reserved1:17;
+} __packed;
+
+/**
+ * struct ipu3_uapi_rad_far_w - Radial FAR sub-group
+ *
+ * @rad_dir_far_sharp_w: Weight of wide direct sharpening, u1.6, range [0, 64],
+ * default 64.
+ * @rad_dir_far_dns_w: Weight of wide direct denoising, u1.6, range [0, 64],
+ * default 0.
+ * @rad_ndir_far_dns_power: power of non-direct sharpening, u1.6, range [0, 64],
+ * default 0.
+ * @reserved: reserved
+ */
+struct ipu3_uapi_rad_far_w {
+ __u32 rad_dir_far_sharp_w:8;
+ __u32 rad_dir_far_dns_w:8;
+ __u32 rad_ndir_far_dns_power:8;
+ __u32 reserved:8;
+} __packed;
+
+/**
+ * struct ipu3_uapi_cu_cfg0 - Radius Config Unit cfg0 register
+ *
+ * @cu6_pow: Power of CU6. Power of non-direct sharpening, u3.4.
+ * @reserved0: reserved
+ * @cu_unsharp_pow: Power of unsharp mask, u2.4.
+ * @reserved1: reserved
+ * @rad_cu6_pow: Radial/corner CU6. Directed sharpening power, u3.4.
+ * @reserved2: reserved
+ * @rad_cu_unsharp_pow: Radial power of unsharp mask, u2.4.
+ * @reserved3: reserved
+ */
+struct ipu3_uapi_cu_cfg0 {
+ __u32 cu6_pow:7;
+ __u32 reserved0:1;
+ __u32 cu_unsharp_pow:7;
+ __u32 reserved1:1;
+ __u32 rad_cu6_pow:7;
+ __u32 reserved2:1;
+ __u32 rad_cu_unsharp_pow:6;
+ __u32 reserved3:2;
+} __packed;
+
+/**
+ * struct ipu3_uapi_cu_cfg1 - Radius Config Unit cfg1 register
+ *
+ * @rad_cu6_x1: X1 point of Config Unit 6, precision u9.0.
+ * @reserved0: reserved
+ * @rad_cu_unsharp_x1: X1 point for Config Unit unsharp for radial/corner point
+ * precision u9.0.
+ * @reserved1: reserved
+ */
+struct ipu3_uapi_cu_cfg1 {
+ __u32 rad_cu6_x1:9;
+ __u32 reserved0:1;
+ __u32 rad_cu_unsharp_x1:9;
+ __u32 reserved1:13;
+} __packed;
+
+/**
+ * struct ipu3_uapi_yuvp1_iefd_rad_cfg - IEFd parameters changed radially over
+ * the picture plane.
+ *
+ * @reset_xy: reset xy value in radial calculation. &ipu3_uapi_radial_reset_xy
+ * @reset_x2: reset x square value in radial calculation. See struct
+ * &ipu3_uapi_radial_reset_x2
+ * @reset_y2: reset y square value in radial calculation. See struct
+ * &ipu3_uapi_radial_reset_y2
+ * @cfg: radial config defined in &ipu3_uapi_radial_cfg
+ * @rad_far_w: weight for wide range radial. &ipu3_uapi_rad_far_w
+ * @cu_cfg0: configuration unit 0. See &ipu3_uapi_cu_cfg0
+ * @cu_cfg1: configuration unit 1. See &ipu3_uapi_cu_cfg1
+ */
+struct ipu3_uapi_yuvp1_iefd_rad_cfg {
+ struct ipu3_uapi_radial_reset_xy reset_xy;
+ struct ipu3_uapi_radial_reset_x2 reset_x2;
+ struct ipu3_uapi_radial_reset_y2 reset_y2;
+ struct ipu3_uapi_radial_cfg cfg;
+ struct ipu3_uapi_rad_far_w rad_far_w;
+ struct ipu3_uapi_cu_cfg0 cu_cfg0;
+ struct ipu3_uapi_cu_cfg1 cu_cfg1;
+} __packed;
+
+/* Vssnlm - Very small scale non-local mean algorithm */
+
+/**
+ * struct ipu3_uapi_vss_lut_x - Vssnlm LUT x0/x1/x2
+ *
+ * @vs_x0: Vssnlm LUT x0, precision u8, range [0, 255], default 16.
+ * @vs_x1: Vssnlm LUT x1, precision u8, range [0, 255], default 32.
+ * @vs_x2: Vssnlm LUT x2, precision u8, range [0, 255], default 64.
+ * @reserved2: reserved
+ */
+struct ipu3_uapi_vss_lut_x {
+ __u32 vs_x0:8;
+ __u32 vs_x1:8;
+ __u32 vs_x2:8;
+ __u32 reserved2:8;
+} __packed;
+
+/**
+ * struct ipu3_uapi_vss_lut_y - Vssnlm LUT y0/y1/y2
+ *
+ * @vs_y1: Vssnlm LUT y1, precision u4, range [0, 8], default 1.
+ * @reserved0: reserved
+ * @vs_y2: Vssnlm LUT y2, precision u4, range [0, 8], default 3.
+ * @reserved1: reserved
+ * @vs_y3: Vssnlm LUT y3, precision u4, range [0, 8], default 8.
+ * @reserved2: reserved
+ */
+struct ipu3_uapi_vss_lut_y {
+ __u32 vs_y1:4;
+ __u32 reserved0:4;
+ __u32 vs_y2:4;
+ __u32 reserved1:4;
+ __u32 vs_y3:4;
+ __u32 reserved2:12;
+} __packed;
+
+/**
+ * struct ipu3_uapi_yuvp1_iefd_vssnlm_cfg - IEFd Vssnlm Lookup table
+ *
+ * @vss_lut_x: vss lookup table. See &ipu3_uapi_vss_lut_x description
+ * @vss_lut_y: vss lookup table. See &ipu3_uapi_vss_lut_y description
+ */
+struct ipu3_uapi_yuvp1_iefd_vssnlm_cfg {
+ struct ipu3_uapi_vss_lut_x vss_lut_x;
+ struct ipu3_uapi_vss_lut_y vss_lut_y;
+} __packed;
+
+/**
+ * struct ipu3_uapi_yuvp1_iefd_config - IEFd config
+ *
+ * @units: configuration unit setting, &ipu3_uapi_yuvp1_iefd_cfg_units
+ * @config: configuration, as defined by &ipu3_uapi_yuvp1_iefd_config_s
+ * @control: control setting, as defined by &ipu3_uapi_yuvp1_iefd_control
+ * @sharp: sharpness setting, as defined by &ipu3_uapi_yuvp1_iefd_shrp_cfg
+ * @unsharp: unsharpness setting, as defined by &ipu3_uapi_yuvp1_iefd_unshrp_cfg
+ * @rad: radial setting, as defined by &ipu3_uapi_yuvp1_iefd_rad_cfg
+ * @vsslnm: vsslnm setting, as defined by &ipu3_uapi_yuvp1_iefd_vssnlm_cfg
+ */
+struct ipu3_uapi_yuvp1_iefd_config {
+ struct ipu3_uapi_yuvp1_iefd_cfg_units units;
+ struct ipu3_uapi_yuvp1_iefd_config_s config;
+ struct ipu3_uapi_yuvp1_iefd_control control;
+ struct ipu3_uapi_yuvp1_iefd_shrp_cfg sharp;
+ struct ipu3_uapi_yuvp1_iefd_unshrp_cfg unsharp;
+ struct ipu3_uapi_yuvp1_iefd_rad_cfg rad;
+ struct ipu3_uapi_yuvp1_iefd_vssnlm_cfg vsslnm;
+} __packed;
+
+/**
+ * struct ipu3_uapi_yuvp1_yds_config - Y Down-Sampling config
+ *
+ * @c00: range [0, 3], default 0x0
+ * @c01: range [0, 3], default 0x1
+ * @c02: range [0, 3], default 0x1
+ * @c03: range [0, 3], default 0x0
+ * @c10: range [0, 3], default 0x0
+ * @c11: range [0, 3], default 0x1
+ * @c12: range [0, 3], default 0x1
+ * @c13: range [0, 3], default 0x0
+ *
+ * Above are 4x2 filter coefficients for chroma output downscaling.
+ *
+ * @norm_factor: Normalization factor, range [0, 4], default 2
+ * 0 - divide by 1
+ * 1 - divide by 2
+ * 2 - divide by 4
+ * 3 - divide by 8
+ * 4 - divide by 16
+ * @reserved0: reserved
+ * @bin_output: Down sampling on Luma channel in two optional modes
+ * 0 - Bin output 4.2.0 (default), 1 output 4.2.2.
+ * @reserved1: reserved
+ */
+struct ipu3_uapi_yuvp1_yds_config {
+ __u32 c00:2;
+ __u32 c01:2;
+ __u32 c02:2;
+ __u32 c03:2;
+ __u32 c10:2;
+ __u32 c11:2;
+ __u32 c12:2;
+ __u32 c13:2;
+ __u32 norm_factor:5;
+ __u32 reserved0:4;
+ __u32 bin_output:1;
+ __u32 reserved1:6;
+} __packed;
+
+/* Chroma Noise Reduction */
+
+/**
+ * struct ipu3_uapi_yuvp1_chnr_enable_config - Chroma noise reduction enable
+ *
+ * @enable: enable/disable chroma noise reduction
+ * @yuv_mode: 0 - YUV420, 1 - YUV422
+ * @reserved0: reserved
+ * @col_size: number of columns in the frame, max width is 2560
+ * @reserved1: reserved
+ */
+struct ipu3_uapi_yuvp1_chnr_enable_config {
+ __u32 enable:1;
+ __u32 yuv_mode:1;
+ __u32 reserved0:14;
+ __u32 col_size:12;
+ __u32 reserved1:4;
+} __packed;
+
+/**
+ * struct ipu3_uapi_yuvp1_chnr_coring_config - Coring thresholds for UV
+ *
+ * @u: U coring level, u0.13, range [0.0, 1.0], default 0.0
+ * @reserved0: reserved
+ * @v: V coring level, u0.13, range [0.0, 1.0], default 0.0
+ * @reserved1: reserved
+ */
+struct ipu3_uapi_yuvp1_chnr_coring_config {
+ __u32 u:13;
+ __u32 reserved0:3;
+ __u32 v:13;
+ __u32 reserved1:3;
+} __packed;
+
+/**
+ * struct ipu3_uapi_yuvp1_chnr_sense_gain_config - Chroma noise reduction gains
+ *
+ * All sensitivity gain parameters have precision u13.0, range [0, 8191].
+ *
+ * @vy: Sensitivity of horizontal edge of Y, default 100
+ * @vu: Sensitivity of horizontal edge of U, default 100
+ * @vv: Sensitivity of horizontal edge of V, default 100
+ * @reserved0: reserved
+ * @hy: Sensitivity of vertical edge of Y, default 50
+ * @hu: Sensitivity of vertical edge of U, default 50
+ * @hv: Sensitivity of vertical edge of V, default 50
+ * @reserved1: reserved
+ */
+struct ipu3_uapi_yuvp1_chnr_sense_gain_config {
+ __u32 vy:8;
+ __u32 vu:8;
+ __u32 vv:8;
+ __u32 reserved0:8;
+
+ __u32 hy:8;
+ __u32 hu:8;
+ __u32 hv:8;
+ __u32 reserved1:8;
+} __packed;
+
+/**
+ * struct ipu3_uapi_yuvp1_chnr_iir_fir_config - Chroma IIR/FIR filter config
+ *
+ * @fir_0h: Value of center tap in horizontal FIR, range [0, 32], default 8.
+ * @reserved0: reserved
+ * @fir_1h: Value of distance 1 in horizontal FIR, range [0, 32], default 12.
+ * @reserved1: reserved
+ * @fir_2h: Value of distance 2 tap in horizontal FIR, range [0, 32], default 0.
+ * @dalpha_clip_val: weight for previous row in IIR, range [1, 256], default 0.
+ * @reserved2: reserved
+ */
+struct ipu3_uapi_yuvp1_chnr_iir_fir_config {
+ __u32 fir_0h:6;
+ __u32 reserved0:2;
+ __u32 fir_1h:6;
+ __u32 reserved1:2;
+ __u32 fir_2h:6;
+ __u32 dalpha_clip_val:9;
+ __u32 reserved2:1;
+} __packed;
+
+/**
+ * struct ipu3_uapi_yuvp1_chnr_config - Chroma noise reduction config
+ *
+ * @enable: chroma noise reduction enable, see
+ * &ipu3_uapi_yuvp1_chnr_enable_config
+ * @coring: coring config for chroma noise reduction, see
+ * &ipu3_uapi_yuvp1_chnr_coring_config
+ * @sense_gain: sensitivity config for chroma noise reduction, see
+ * ipu3_uapi_yuvp1_chnr_sense_gain_config
+ * @iir_fir: iir and fir config for chroma noise reduction, see
+ * ipu3_uapi_yuvp1_chnr_iir_fir_config
+ */
+struct ipu3_uapi_yuvp1_chnr_config {
+ struct ipu3_uapi_yuvp1_chnr_enable_config enable;
+ struct ipu3_uapi_yuvp1_chnr_coring_config coring;
+ struct ipu3_uapi_yuvp1_chnr_sense_gain_config sense_gain;
+ struct ipu3_uapi_yuvp1_chnr_iir_fir_config iir_fir;
+} __packed;
+
+/* Edge Enhancement and Noise Reduction */
+
+/**
+ * struct ipu3_uapi_yuvp1_y_ee_nr_lpf_config - Luma(Y) edge enhancement low-pass
+ * filter coefficients
+ *
+ * @a_diag: Smoothing diagonal coefficient, u5.0.
+ * @reserved0: reserved
+ * @a_periph: Image smoothing perpherial, u5.0.
+ * @reserved1: reserved
+ * @a_cent: Image Smoothing center coefficient, u5.0.
+ * @reserved2: reserved
+ * @enable: 0: Y_EE_NR disabled, output = input; 1: Y_EE_NR enabled.
+ */
+struct ipu3_uapi_yuvp1_y_ee_nr_lpf_config {
+ __u32 a_diag:5;
+ __u32 reserved0:3;
+ __u32 a_periph:5;
+ __u32 reserved1:3;
+ __u32 a_cent:5;
+ __u32 reserved2:9;
+ __u32 enable:1;
+} __packed;
+
+/**
+ * struct ipu3_uapi_yuvp1_y_ee_nr_sense_config - Luma(Y) edge enhancement
+ * noise reduction sensitivity gains
+ *
+ * @edge_sense_0: Sensitivity of edge in dark area. u13.0, default 8191.
+ * @reserved0: reserved
+ * @delta_edge_sense: Difference in the sensitivity of edges between
+ * the bright and dark areas. u13.0, default 0.
+ * @reserved1: reserved
+ * @corner_sense_0: Sensitivity of corner in dark area. u13.0, default 0.
+ * @reserved2: reserved
+ * @delta_corner_sense: Difference in the sensitivity of corners between
+ * the bright and dark areas. u13.0, default 8191.
+ * @reserved3: reserved
+ */
+struct ipu3_uapi_yuvp1_y_ee_nr_sense_config {
+ __u32 edge_sense_0:13;
+ __u32 reserved0:3;
+ __u32 delta_edge_sense:13;
+ __u32 reserved1:3;
+ __u32 corner_sense_0:13;
+ __u32 reserved2:3;
+ __u32 delta_corner_sense:13;
+ __u32 reserved3:3;
+} __packed;
+
+/**
+ * struct ipu3_uapi_yuvp1_y_ee_nr_gain_config - Luma(Y) edge enhancement
+ * noise reduction gain config
+ *
+ * @gain_pos_0: Gain for positive edge in dark area. u5.0, [0, 16], default 2.
+ * @reserved0: reserved
+ * @delta_gain_posi: Difference in the gain of edges between the bright and
+ * dark areas for positive edges. u5.0, [0, 16], default 0.
+ * @reserved1: reserved
+ * @gain_neg_0: Gain for negative edge in dark area. u5.0, [0, 16], default 8.
+ * @reserved2: reserved
+ * @delta_gain_neg: Difference in the gain of edges between the bright and
+ * dark areas for negative edges. u5.0, [0, 16], default 0.
+ * @reserved3: reserved
+ */
+struct ipu3_uapi_yuvp1_y_ee_nr_gain_config {
+ __u32 gain_pos_0:5;
+ __u32 reserved0:3;
+ __u32 delta_gain_posi:5;
+ __u32 reserved1:3;
+ __u32 gain_neg_0:5;
+ __u32 reserved2:3;
+ __u32 delta_gain_neg:5;
+ __u32 reserved3:3;
+} __packed;
+
+/**
+ * struct ipu3_uapi_yuvp1_y_ee_nr_clip_config - Luma(Y) edge enhancement
+ * noise reduction clipping config
+ *
+ * @clip_pos_0: Limit of positive edge in dark area
+ * u5, value [0, 16], default 8.
+ * @reserved0: reserved
+ * @delta_clip_posi: Difference in the limit of edges between the bright
+ * and dark areas for positive edges.
+ * u5, value [0, 16], default 8.
+ * @reserved1: reserved
+ * @clip_neg_0: Limit of negative edge in dark area
+ * u5, value [0, 16], default 8.
+ * @reserved2: reserved
+ * @delta_clip_neg: Difference in the limit of edges between the bright
+ * and dark areas for negative edges.
+ * u5, value [0, 16], default 8.
+ * @reserved3: reserved
+ */
+struct ipu3_uapi_yuvp1_y_ee_nr_clip_config {
+ __u32 clip_pos_0:5;
+ __u32 reserved0:3;
+ __u32 delta_clip_posi:5;
+ __u32 reserved1:3;
+ __u32 clip_neg_0:5;
+ __u32 reserved2:3;
+ __u32 delta_clip_neg:5;
+ __u32 reserved3:3;
+} __packed;
+
+/**
+ * struct ipu3_uapi_yuvp1_y_ee_nr_frng_config - Luma(Y) edge enhancement
+ * noise reduction fringe config
+ *
+ * @gain_exp: Common exponent of gains, u4, [0, 8], default 2.
+ * @reserved0: reserved
+ * @min_edge: Threshold for edge and smooth stitching, u13.
+ * @reserved1: reserved
+ * @lin_seg_param: Power of LinSeg, u4.
+ * @reserved2: reserved
+ * @t1: Parameter for enabling/disabling the edge enhancement, u1.0, [0, 1],
+ * default 1.
+ * @t2: Parameter for enabling/disabling the smoothing, u1.0, [0, 1],
+ * default 1.
+ * @reserved3: reserved
+ */
+struct ipu3_uapi_yuvp1_y_ee_nr_frng_config {
+ __u32 gain_exp:4;
+ __u32 reserved0:28;
+ __u32 min_edge:13;
+ __u32 reserved1:3;
+ __u32 lin_seg_param:4;
+ __u32 reserved2:4;
+ __u32 t1:1;
+ __u32 t2:1;
+ __u32 reserved3:6;
+} __packed;
+
+/**
+ * struct ipu3_uapi_yuvp1_y_ee_nr_diag_config - Luma(Y) edge enhancement
+ * noise reduction diagonal config
+ *
+ * @diag_disc_g: Coefficient that prioritize diagonal edge direction on
+ * horizontal or vertical for final enhancement.
+ * u4.0, [1, 15], default 1.
+ * @reserved0: reserved
+ * @hvw_hor: Weight of horizontal/vertical edge enhancement for hv edge.
+ * u2.2, [1, 15], default 4.
+ * @dw_hor: Weight of diagonal edge enhancement for hv edge.
+ * u2.2, [1, 15], default 1.
+ * @hvw_diag: Weight of horizontal/vertical edge enhancement for diagonal edge.
+ * u2.2, [1, 15], default 1.
+ * @dw_diag: Weight of diagonal edge enhancement for diagonal edge.
+ * u2.2, [1, 15], default 4.
+ * @reserved1: reserved
+ */
+struct ipu3_uapi_yuvp1_y_ee_nr_diag_config {
+ __u32 diag_disc_g:4;
+ __u32 reserved0:4;
+ __u32 hvw_hor:4;
+ __u32 dw_hor:4;
+ __u32 hvw_diag:4;
+ __u32 dw_diag:4;
+ __u32 reserved1:8;
+} __packed;
+
+/**
+ * struct ipu3_uapi_yuvp1_y_ee_nr_fc_coring_config - Luma(Y) edge enhancement
+ * noise reduction false color correction (FCC) coring config
+ *
+ * @pos_0: Gain for positive edge in dark, u13.0, [0, 16], default 0.
+ * @reserved0: reserved
+ * @pos_delta: Gain for positive edge in bright, value: pos_0 + pos_delta <=16
+ * u13.0, default 0.
+ * @reserved1: reserved
+ * @neg_0: Gain for negative edge in dark area, u13.0, range [0, 16], default 0.
+ * @reserved2: reserved
+ * @neg_delta: Gain for negative edge in bright area. neg_0 + neg_delta <=16
+ * u13.0, default 0.
+ * @reserved3: reserved
+ *
+ * Coring is a simple soft thresholding technique.
+ */
+struct ipu3_uapi_yuvp1_y_ee_nr_fc_coring_config {
+ __u32 pos_0:13;
+ __u32 reserved0:3;
+ __u32 pos_delta:13;
+ __u32 reserved1:3;
+ __u32 neg_0:13;
+ __u32 reserved2:3;
+ __u32 neg_delta:13;
+ __u32 reserved3:3;
+} __packed;
+
+/**
+ * struct ipu3_uapi_yuvp1_y_ee_nr_config - Edge enhancement and noise reduction
+ *
+ * @lpf: low-pass filter config. See &ipu3_uapi_yuvp1_y_ee_nr_lpf_config
+ * @sense: sensitivity config. See &ipu3_uapi_yuvp1_y_ee_nr_sense_config
+ * @gain: gain config as defined in &ipu3_uapi_yuvp1_y_ee_nr_gain_config
+ * @clip: clip config as defined in &ipu3_uapi_yuvp1_y_ee_nr_clip_config
+ * @frng: fringe config as defined in &ipu3_uapi_yuvp1_y_ee_nr_frng_config
+ * @diag: diagonal edge config. See &ipu3_uapi_yuvp1_y_ee_nr_diag_config
+ * @fc_coring: coring config for fringe control. See
+ * &ipu3_uapi_yuvp1_y_ee_nr_fc_coring_config
+ */
+struct ipu3_uapi_yuvp1_y_ee_nr_config {
+ struct ipu3_uapi_yuvp1_y_ee_nr_lpf_config lpf;
+ struct ipu3_uapi_yuvp1_y_ee_nr_sense_config sense;
+ struct ipu3_uapi_yuvp1_y_ee_nr_gain_config gain;
+ struct ipu3_uapi_yuvp1_y_ee_nr_clip_config clip;
+ struct ipu3_uapi_yuvp1_y_ee_nr_frng_config frng;
+ struct ipu3_uapi_yuvp1_y_ee_nr_diag_config diag;
+ struct ipu3_uapi_yuvp1_y_ee_nr_fc_coring_config fc_coring;
+} __packed;
+
+/* Total Color Correction */
+
+/**
+ * struct ipu3_uapi_yuvp2_tcc_gen_control_static_config - Total color correction
+ * general control config
+ *
+ * @en: 0 - TCC disabled. Output = input 1 - TCC enabled.
+ * @blend_shift: blend shift, Range[3, 4], default NA.
+ * @gain_according_to_y_only: 0: Gain is calculated according to YUV,
+ * 1: Gain is calculated according to Y only
+ * @reserved0: reserved
+ * @gamma: Final blending coefficients. Values[-16, 16], default NA.
+ * @reserved1: reserved
+ * @delta: Final blending coefficients. Values[-16, 16], default NA.
+ * @reserved2: reserved
+ */
+struct ipu3_uapi_yuvp2_tcc_gen_control_static_config {
+ __u32 en:1;
+ __u32 blend_shift:3;
+ __u32 gain_according_to_y_only:1;
+ __u32 reserved0:11;
+ __s32 gamma:5;
+ __u32 reserved1:3;
+ __s32 delta:5;
+ __u32 reserved2:3;
+} __packed;
+
+/**
+ * struct ipu3_uapi_yuvp2_tcc_macc_elem_static_config - Total color correction
+ * multi-axis color control (MACC) config
+ *
+ * @a: a coefficient for 2x2 MACC conversion matrix.
+ * @reserved0: reserved
+ * @b: b coefficient 2x2 MACC conversion matrix.
+ * @reserved1: reserved
+ * @c: c coefficient for 2x2 MACC conversion matrix.
+ * @reserved2: reserved
+ * @d: d coefficient for 2x2 MACC conversion matrix.
+ * @reserved3: reserved
+ */
+struct ipu3_uapi_yuvp2_tcc_macc_elem_static_config {
+ __s32 a:12;
+ __u32 reserved0:4;
+ __s32 b:12;
+ __u32 reserved1:4;
+ __s32 c:12;
+ __u32 reserved2:4;
+ __s32 d:12;
+ __u32 reserved3:4;
+} __packed;
+
+/**
+ * struct ipu3_uapi_yuvp2_tcc_macc_table_static_config - Total color correction
+ * multi-axis color control (MACC) table array
+ *
+ * @entries: config for multi axis color correction, as specified by
+ * &ipu3_uapi_yuvp2_tcc_macc_elem_static_config
+ */
+struct ipu3_uapi_yuvp2_tcc_macc_table_static_config {
+ struct ipu3_uapi_yuvp2_tcc_macc_elem_static_config
+ entries[IPU3_UAPI_YUVP2_TCC_MACC_TABLE_ELEMENTS];
+} __packed;
+
+/**
+ * struct ipu3_uapi_yuvp2_tcc_inv_y_lut_static_config - Total color correction
+ * inverse y lookup table
+ *
+ * @entries: lookup table for inverse y estimation, and use it to estimate the
+ * ratio between luma and chroma. Chroma by approximate the absolute
+ * value of the radius on the chroma plane (R = sqrt(u^2+v^2) ) and
+ * luma by approximate by 1/Y.
+ */
+struct ipu3_uapi_yuvp2_tcc_inv_y_lut_static_config {
+ __u16 entries[IPU3_UAPI_YUVP2_TCC_INV_Y_LUT_ELEMENTS];
+} __packed;
+
+/**
+ * struct ipu3_uapi_yuvp2_tcc_gain_pcwl_lut_static_config - Total color
+ * correction lookup table for PCWL
+ *
+ * @entries: lookup table for gain piece wise linear transformation (PCWL)
+ */
+struct ipu3_uapi_yuvp2_tcc_gain_pcwl_lut_static_config {
+ __u16 entries[IPU3_UAPI_YUVP2_TCC_GAIN_PCWL_LUT_ELEMENTS];
+} __packed;
+
+/**
+ * struct ipu3_uapi_yuvp2_tcc_r_sqr_lut_static_config - Total color correction
+ * lookup table for r square root
+ *
+ * @entries: lookup table for r square root estimation
+ */
+struct ipu3_uapi_yuvp2_tcc_r_sqr_lut_static_config {
+ __s16 entries[IPU3_UAPI_YUVP2_TCC_R_SQR_LUT_ELEMENTS];
+} __packed;
+
+/**
+ * struct ipu3_uapi_yuvp2_tcc_static_config- Total color correction static
+ *
+ * @gen_control: general config for Total Color Correction
+ * @macc_table: config for multi axis color correction
+ * @inv_y_lut: lookup table for inverse y estimation
+ * @gain_pcwl: lookup table for gain PCWL
+ * @r_sqr_lut: lookup table for r square root estimation.
+ */
+struct ipu3_uapi_yuvp2_tcc_static_config {
+ struct ipu3_uapi_yuvp2_tcc_gen_control_static_config gen_control;
+ struct ipu3_uapi_yuvp2_tcc_macc_table_static_config macc_table;
+ struct ipu3_uapi_yuvp2_tcc_inv_y_lut_static_config inv_y_lut;
+ struct ipu3_uapi_yuvp2_tcc_gain_pcwl_lut_static_config gain_pcwl;
+ struct ipu3_uapi_yuvp2_tcc_r_sqr_lut_static_config r_sqr_lut;
+} __packed;
+
+/* Advanced Noise Reduction related structs */
+
+/*
+ * struct ipu3_uapi_anr_alpha - Advanced noise reduction alpha
+ *
+ * Tunable parameters that are subject to modification according to the
+ * total gain used.
+ */
+struct ipu3_uapi_anr_alpha {
+ __u16 gr;
+ __u16 r;
+ __u16 b;
+ __u16 gb;
+ __u16 dc_gr;
+ __u16 dc_r;
+ __u16 dc_b;
+ __u16 dc_gb;
+} __packed;
+
+/*
+ * struct ipu3_uapi_anr_beta - Advanced noise reduction beta
+ *
+ * Tunable parameters that are subject to modification according to the
+ * total gain used.
+ */
+struct ipu3_uapi_anr_beta {
+ __u16 beta_gr;
+ __u16 beta_r;
+ __u16 beta_b;
+ __u16 beta_gb;
+} __packed;
+
+/*
+ * struct ipu3_uapi_anr_plane_color - Advanced noise reduction per plane R, Gr,
+ * Gb and B register settings
+ *
+ * Tunable parameters that are subject to modification according to the
+ * total gain used.
+ */
+struct ipu3_uapi_anr_plane_color {
+ __u16 reg_w_gr[16];
+ __u16 reg_w_r[16];
+ __u16 reg_w_b[16];
+ __u16 reg_w_gb[16];
+} __packed;
+
+/**
+ * struct ipu3_uapi_anr_transform_config - Advanced noise reduction transform
+ *
+ * @enable: advanced noise reduction enabled.
+ * @adaptive_treshhold_en: On IPU3, adaptive threshold is always enabled.
+ * @reserved1: reserved
+ * @reserved2: reserved
+ * @alpha: using following defaults:
+ * 13, 13, 13, 13, 0, 0, 0, 0
+ * 11, 11, 11, 11, 0, 0, 0, 0
+ * 14, 14, 14, 14, 0, 0, 0, 0
+ * @beta: use following defaults:
+ * 24, 24, 24, 24
+ * 21, 20, 20, 21
+ * 25, 25, 25, 25
+ * @color: use defaults defined in driver/media/pci/intel/ipu3-tables.c
+ * @sqrt_lut: 11 bits per element, values =
+ * [724 768 810 849 887
+ * 923 958 991 1024 1056
+ * 1116 1145 1173 1201 1086
+ * 1228 1254 1280 1305 1330
+ * 1355 1379 1402 1425 1448]
+ * @xreset: Reset value of X for r^2 calculation Value: col_start-X_center
+ * Constraint: Xreset + FrameWdith=4095 Xreset= -4095, default -1632.
+ * @reserved3: reserved
+ * @yreset: Reset value of Y for r^2 calculation Value: row_start-Y_center
+ * Constraint: Yreset + FrameHeight=4095 Yreset= -4095, default -1224.
+ * @reserved4: reserved
+ * @x_sqr_reset: Reset value of X^2 for r^2 calculation Value = (Xreset)^2
+ * @r_normfactor: Normalization factor for R. Default 14.
+ * @reserved5: reserved
+ * @y_sqr_reset: Reset value of Y^2 for r^2 calculation Value = (Yreset)^2
+ * @gain_scale: Parameter describing shading gain as a function of distance
+ * from the image center.
+ * A single value per frame, loaded by the driver. Default 115.
+ */
+struct ipu3_uapi_anr_transform_config {
+ __u32 enable:1; /* 0 or 1, disabled or enabled */
+ __u32 adaptive_treshhold_en:1; /* On IPU3, always enabled */
+
+ __u32 reserved1:30;
+ __u8 reserved2[44];
+
+ struct ipu3_uapi_anr_alpha alpha[3];
+ struct ipu3_uapi_anr_beta beta[3];
+ struct ipu3_uapi_anr_plane_color color[3];
+
+ __u16 sqrt_lut[IPU3_UAPI_ANR_LUT_SIZE]; /* 11 bits per element */
+
+ __s16 xreset:13;
+ __u16 reserved3:3;
+ __s16 yreset:13;
+ __u16 reserved4:3;
+
+ __u32 x_sqr_reset:24;
+ __u32 r_normfactor:5;
+ __u32 reserved5:3;
+
+ __u32 y_sqr_reset:24;
+ __u32 gain_scale:8;
+} __packed;
+
+/**
+ * struct ipu3_uapi_anr_stitch_pyramid - ANR stitch pyramid
+ *
+ * @entry0: pyramid LUT entry0, range [0x0, 0x3f]
+ * @entry1: pyramid LUT entry1, range [0x0, 0x3f]
+ * @entry2: pyramid LUT entry2, range [0x0, 0x3f]
+ * @reserved: reserved
+ */
+struct ipu3_uapi_anr_stitch_pyramid {
+ __u32 entry0:6;
+ __u32 entry1:6;
+ __u32 entry2:6;
+ __u32 reserved:14;
+} __packed;
+
+/**
+ * struct ipu3_uapi_anr_stitch_config - ANR stitch config
+ *
+ * @anr_stitch_en: enable stitch. Enabled with 1.
+ * @reserved: reserved
+ * @pyramid: pyramid table as defined by &ipu3_uapi_anr_stitch_pyramid
+ * default values:
+ * { 1, 3, 5 }, { 7, 7, 5 }, { 3, 1, 3 },
+ * { 9, 15, 21 }, { 21, 15, 9 }, { 3, 5, 15 },
+ * { 25, 35, 35 }, { 25, 15, 5 }, { 7, 21, 35 },
+ * { 49, 49, 35 }, { 21, 7, 7 }, { 21, 35, 49 },
+ * { 49, 35, 21 }, { 7, 5, 15 }, { 25, 35, 35 },
+ * { 25, 15, 5 }, { 3, 9, 15 }, { 21, 21, 15 },
+ * { 9, 3, 1 }, { 3, 5, 7 }, { 7, 5, 3}, { 1 }
+ */
+struct ipu3_uapi_anr_stitch_config {
+ __u32 anr_stitch_en;
+ __u8 reserved[44];
+ struct ipu3_uapi_anr_stitch_pyramid pyramid[IPU3_UAPI_ANR_PYRAMID_SIZE];
+} __packed;
+
+/**
+ * struct ipu3_uapi_anr_config - ANR config
+ *
+ * @transform: advanced noise reduction transform config as specified by
+ * &ipu3_uapi_anr_transform_config
+ * @stitch: create 4x4 patch from 4 surrounding 8x8 patches.
+ */
+struct ipu3_uapi_anr_config {
+ struct ipu3_uapi_anr_transform_config transform __attribute__((aligned(32)));
+ struct ipu3_uapi_anr_stitch_config stitch __attribute__((aligned(32)));
+} __packed;
+
+/**
+ * struct ipu3_uapi_acc_param - Accelerator cluster parameters
+ *
+ * ACC refers to the HW cluster containing all Fixed Functions (FFs). Each FF
+ * implements a specific algorithm.
+ *
+ * @bnr: parameters for bayer noise reduction static config. See
+ * &ipu3_uapi_bnr_static_config
+ * @green_disparity: disparity static config between gr and gb channel.
+ * See &ipu3_uapi_bnr_static_config_green_disparity
+ * @dm: de-mosaic config. See &ipu3_uapi_dm_config
+ * @ccm: color correction matrix. See &ipu3_uapi_ccm_mat_config
+ * @gamma: gamma correction config. See &ipu3_uapi_gamma_config
+ * @csc: color space conversion matrix. See &ipu3_uapi_csc_mat_config
+ * @cds: color down sample config. See &ipu3_uapi_cds_params
+ * @shd: lens shading correction config. See &ipu3_uapi_shd_config
+ * @iefd: Image enhancement filter and denoise config.
+ * &ipu3_uapi_yuvp1_iefd_config
+ * @yds_c0: y down scaler config. &ipu3_uapi_yuvp1_yds_config
+ * @chnr_c0: chroma noise reduction config. &ipu3_uapi_yuvp1_chnr_config
+ * @y_ee_nr: y edge enhancement and noise reduction config.
+ * &ipu3_uapi_yuvp1_y_ee_nr_config
+ * @yds: y down scaler config. See &ipu3_uapi_yuvp1_yds_config
+ * @chnr: chroma noise reduction config. See &ipu3_uapi_yuvp1_chnr_config
+ * @yds2: y channel down scaler config. See &ipu3_uapi_yuvp1_yds_config
+ * @tcc: total color correction config as defined in struct
+ * &ipu3_uapi_yuvp2_tcc_static_config
+ * @anr: advanced noise reduction config.See &ipu3_uapi_anr_config
+ * @awb_fr: AWB filter response config. See ipu3_uapi_awb_fr_config
+ * @ae: auto exposure config As specified by &ipu3_uapi_ae_config
+ * @af: auto focus config. As specified by &ipu3_uapi_af_config
+ * @awb: auto white balance config. As specified by &ipu3_uapi_awb_config
+ */
+struct ipu3_uapi_acc_param {
+ struct ipu3_uapi_bnr_static_config bnr;
+ struct ipu3_uapi_bnr_static_config_green_disparity
+ green_disparity __attribute__((aligned(32)));
+ struct ipu3_uapi_dm_config dm __attribute__((aligned(32)));
+ struct ipu3_uapi_ccm_mat_config ccm __attribute__((aligned(32)));
+ struct ipu3_uapi_gamma_config gamma __attribute__((aligned(32)));
+ struct ipu3_uapi_csc_mat_config csc __attribute__((aligned(32)));
+ struct ipu3_uapi_cds_params cds __attribute__((aligned(32)));
+ struct ipu3_uapi_shd_config shd __attribute__((aligned(32)));
+ struct ipu3_uapi_yuvp1_iefd_config iefd __attribute__((aligned(32)));
+ struct ipu3_uapi_yuvp1_yds_config yds_c0 __attribute__((aligned(32)));
+ struct ipu3_uapi_yuvp1_chnr_config chnr_c0 __attribute__((aligned(32)));
+ struct ipu3_uapi_yuvp1_y_ee_nr_config y_ee_nr __attribute__((aligned(32)));
+ struct ipu3_uapi_yuvp1_yds_config yds __attribute__((aligned(32)));
+ struct ipu3_uapi_yuvp1_chnr_config chnr __attribute__((aligned(32)));
+ struct ipu3_uapi_yuvp1_yds_config yds2 __attribute__((aligned(32)));
+ struct ipu3_uapi_yuvp2_tcc_static_config tcc __attribute__((aligned(32)));
+ struct ipu3_uapi_anr_config anr;
+ struct ipu3_uapi_awb_fr_config_s awb_fr;
+ struct ipu3_uapi_ae_config ae;
+ struct ipu3_uapi_af_config_s af;
+ struct ipu3_uapi_awb_config awb;
+} __packed;
+
+/**
+ * struct ipu3_uapi_isp_lin_vmem_params - Linearization parameters
+ *
+ * @lin_lutlow_gr: linearization look-up table for GR channel interpolation.
+ * @lin_lutlow_r: linearization look-up table for R channel interpolation.
+ * @lin_lutlow_b: linearization look-up table for B channel interpolation.
+ * @lin_lutlow_gb: linearization look-up table for GB channel interpolation.
+ * lin_lutlow_gr / lin_lutlow_r / lin_lutlow_b /
+ * lin_lutlow_gb <= LIN_MAX_VALUE - 1.
+ * @lin_lutdif_gr: lin_lutlow_gr[i+1] - lin_lutlow_gr[i].
+ * @lin_lutdif_r: lin_lutlow_r[i+1] - lin_lutlow_r[i].
+ * @lin_lutdif_b: lin_lutlow_b[i+1] - lin_lutlow_b[i].
+ * @lin_lutdif_gb: lin_lutlow_gb[i+1] - lin_lutlow_gb[i].
+ */
+struct ipu3_uapi_isp_lin_vmem_params {
+ __s16 lin_lutlow_gr[IPU3_UAPI_LIN_LUT_SIZE];
+ __s16 lin_lutlow_r[IPU3_UAPI_LIN_LUT_SIZE];
+ __s16 lin_lutlow_b[IPU3_UAPI_LIN_LUT_SIZE];
+ __s16 lin_lutlow_gb[IPU3_UAPI_LIN_LUT_SIZE];
+ __s16 lin_lutdif_gr[IPU3_UAPI_LIN_LUT_SIZE];
+ __s16 lin_lutdif_r[IPU3_UAPI_LIN_LUT_SIZE];
+ __s16 lin_lutdif_b[IPU3_UAPI_LIN_LUT_SIZE];
+ __s16 lin_lutdif_gb[IPU3_UAPI_LIN_LUT_SIZE];
+} __packed;
+
+/* Temporal Noise Reduction */
+
+/**
+ * struct ipu3_uapi_isp_tnr3_vmem_params - Temporal noise reduction vector
+ * memory parameters
+ *
+ * @slope: slope setting in interpolation curve for temporal noise reduction.
+ * @reserved1: reserved
+ * @sigma: knee point setting in interpolation curve for temporal
+ * noise reduction.
+ * @reserved2: reserved
+ */
+struct ipu3_uapi_isp_tnr3_vmem_params {
+ __u16 slope[IPU3_UAPI_ISP_TNR3_VMEM_LEN];
+ __u16 reserved1[IPU3_UAPI_ISP_VEC_ELEMS
+ - IPU3_UAPI_ISP_TNR3_VMEM_LEN];
+ __u16 sigma[IPU3_UAPI_ISP_TNR3_VMEM_LEN];
+ __u16 reserved2[IPU3_UAPI_ISP_VEC_ELEMS
+ - IPU3_UAPI_ISP_TNR3_VMEM_LEN];
+} __packed;
+
+/**
+ * struct ipu3_uapi_isp_tnr3_params - Temporal noise reduction v3 parameters
+ *
+ * @knee_y1: Knee point TNR3 assumes standard deviation of Y,U and
+ * V at Y1 are TnrY1_Sigma_Y, U and V.
+ * @knee_y2: Knee point TNR3 assumes standard deviation of Y,U and
+ * V at Y2 are TnrY2_Sigma_Y, U and V.
+ * @maxfb_y: Max feedback gain for Y
+ * @maxfb_u: Max feedback gain for U
+ * @maxfb_v: Max feedback gain for V
+ * @round_adj_y: rounding Adjust for Y
+ * @round_adj_u: rounding Adjust for U
+ * @round_adj_v: rounding Adjust for V
+ * @ref_buf_select: selection of the reference frame buffer to be used.
+ */
+struct ipu3_uapi_isp_tnr3_params {
+ __u32 knee_y1;
+ __u32 knee_y2;
+ __u32 maxfb_y;
+ __u32 maxfb_u;
+ __u32 maxfb_v;
+ __u32 round_adj_y;
+ __u32 round_adj_u;
+ __u32 round_adj_v;
+ __u32 ref_buf_select;
+} __packed;
+
+/* Extreme Noise Reduction version 3 */
+
+/**
+ * struct ipu3_uapi_isp_xnr3_vmem_params - Extreme noise reduction v3
+ * vector memory parameters
+ *
+ * @x: xnr3 parameters.
+ * @a: xnr3 parameters.
+ * @b: xnr3 parameters.
+ * @c: xnr3 parameters.
+ */
+struct ipu3_uapi_isp_xnr3_vmem_params {
+ __u16 x[IPU3_UAPI_ISP_VEC_ELEMS];
+ __u16 a[IPU3_UAPI_ISP_VEC_ELEMS];
+ __u16 b[IPU3_UAPI_ISP_VEC_ELEMS];
+ __u16 c[IPU3_UAPI_ISP_VEC_ELEMS];
+} __packed;
+
+/**
+ * struct ipu3_uapi_xnr3_alpha_params - Extreme noise reduction v3
+ * alpha tuning parameters
+ *
+ * @y0: Sigma for Y range similarity in dark area.
+ * @u0: Sigma for U range similarity in dark area.
+ * @v0: Sigma for V range similarity in dark area.
+ * @ydiff: Sigma difference for Y between bright area and dark area.
+ * @udiff: Sigma difference for U between bright area and dark area.
+ * @vdiff: Sigma difference for V between bright area and dark area.
+ */
+struct ipu3_uapi_xnr3_alpha_params {
+ __u32 y0;
+ __u32 u0;
+ __u32 v0;
+ __u32 ydiff;
+ __u32 udiff;
+ __u32 vdiff;
+} __packed;
+
+/**
+ * struct ipu3_uapi_xnr3_coring_params - Extreme noise reduction v3
+ * coring parameters
+ *
+ * @u0: Coring Threshold of U channel in dark area.
+ * @v0: Coring Threshold of V channel in dark area.
+ * @udiff: Threshold difference of U channel between bright and dark area.
+ * @vdiff: Threshold difference of V channel between bright and dark area.
+ */
+struct ipu3_uapi_xnr3_coring_params {
+ __u32 u0;
+ __u32 v0;
+ __u32 udiff;
+ __u32 vdiff;
+} __packed;
+
+/**
+ * struct ipu3_uapi_xnr3_blending_params - Blending factor
+ *
+ * @strength: The factor for blending output with input. This is tuning
+ * parameterHigher values lead to more aggressive XNR operation.
+ */
+struct ipu3_uapi_xnr3_blending_params {
+ __u32 strength;
+} __packed;
+
+/**
+ * struct ipu3_uapi_isp_xnr3_params - Extreme noise reduction v3 parameters
+ *
+ * @alpha: parameters for xnr3 alpha. See &ipu3_uapi_xnr3_alpha_params
+ * @coring: parameters for xnr3 coring. See &ipu3_uapi_xnr3_coring_params
+ * @blending: parameters for xnr3 blending. See &ipu3_uapi_xnr3_blending_params
+ */
+struct ipu3_uapi_isp_xnr3_params {
+ struct ipu3_uapi_xnr3_alpha_params alpha;
+ struct ipu3_uapi_xnr3_coring_params coring;
+ struct ipu3_uapi_xnr3_blending_params blending;
+} __packed;
+
+/***** Obgrid (optical black level compensation) table entry *****/
+
+/**
+ * struct ipu3_uapi_obgrid_param - Optical black level compensation parameters
+ *
+ * @gr: Grid table values for color GR
+ * @r: Grid table values for color R
+ * @b: Grid table values for color B
+ * @gb: Grid table values for color GB
+ *
+ * Black level is different for red, green, and blue channels. So black level
+ * compensation is different per channel.
+ */
+struct ipu3_uapi_obgrid_param {
+ __u16 gr;
+ __u16 r;
+ __u16 b;
+ __u16 gb;
+} __packed;
+
+/******************* V4L2_META_FMT_IPU3_PARAMS *******************/
+
+/**
+ * struct ipu3_uapi_flags - bits to indicate which pipeline needs update
+ *
+ * @gdc: 0 = no update, 1 = update.
+ * @obgrid: 0 = no update, 1 = update.
+ * @reserved1: Not used.
+ * @acc_bnr: 0 = no update, 1 = update.
+ * @acc_green_disparity: 0 = no update, 1 = update.
+ * @acc_dm: 0 = no update, 1 = update.
+ * @acc_ccm: 0 = no update, 1 = update.
+ * @acc_gamma: 0 = no update, 1 = update.
+ * @acc_csc: 0 = no update, 1 = update.
+ * @acc_cds: 0 = no update, 1 = update.
+ * @acc_shd: 0 = no update, 1 = update.
+ * @reserved2: Not used.
+ * @acc_iefd: 0 = no update, 1 = update.
+ * @acc_yds_c0: 0 = no update, 1 = update.
+ * @acc_chnr_c0: 0 = no update, 1 = update.
+ * @acc_y_ee_nr: 0 = no update, 1 = update.
+ * @acc_yds: 0 = no update, 1 = update.
+ * @acc_chnr: 0 = no update, 1 = update.
+ * @acc_ytm: 0 = no update, 1 = update.
+ * @acc_yds2: 0 = no update, 1 = update.
+ * @acc_tcc: 0 = no update, 1 = update.
+ * @acc_dpc: 0 = no update, 1 = update.
+ * @acc_bds: 0 = no update, 1 = update.
+ * @acc_anr: 0 = no update, 1 = update.
+ * @acc_awb_fr: 0 = no update, 1 = update.
+ * @acc_ae: 0 = no update, 1 = update.
+ * @acc_af: 0 = no update, 1 = update.
+ * @acc_awb: 0 = no update, 1 = update.
+ * @reserved3: Not used.
+ * @lin_vmem_params: 0 = no update, 1 = update.
+ * @tnr3_vmem_params: 0 = no update, 1 = update.
+ * @xnr3_vmem_params: 0 = no update, 1 = update.
+ * @tnr3_dmem_params: 0 = no update, 1 = update.
+ * @xnr3_dmem_params: 0 = no update, 1 = update.
+ * @reserved4: Not used.
+ * @obgrid_param: 0 = no update, 1 = update.
+ * @reserved5: Not used.
+ */
+struct ipu3_uapi_flags {
+ __u32 gdc:1;
+ __u32 obgrid:1;
+ __u32 reserved1:30;
+
+ __u32 acc_bnr:1;
+ __u32 acc_green_disparity:1;
+ __u32 acc_dm:1;
+ __u32 acc_ccm:1;
+ __u32 acc_gamma:1;
+ __u32 acc_csc:1;
+ __u32 acc_cds:1;
+ __u32 acc_shd:1;
+ __u32 reserved2:2;
+ __u32 acc_iefd:1;
+ __u32 acc_yds_c0:1;
+ __u32 acc_chnr_c0:1;
+ __u32 acc_y_ee_nr:1;
+ __u32 acc_yds:1;
+ __u32 acc_chnr:1;
+ __u32 acc_ytm:1;
+ __u32 acc_yds2:1;
+ __u32 acc_tcc:1;
+ __u32 acc_dpc:1;
+ __u32 acc_bds:1;
+ __u32 acc_anr:1;
+ __u32 acc_awb_fr:1;
+ __u32 acc_ae:1;
+ __u32 acc_af:1;
+ __u32 acc_awb:1;
+ __u32 reserved3:4;
+
+ __u32 lin_vmem_params:1;
+ __u32 tnr3_vmem_params:1;
+ __u32 xnr3_vmem_params:1;
+ __u32 tnr3_dmem_params:1;
+ __u32 xnr3_dmem_params:1;
+ __u32 reserved4:1;
+ __u32 obgrid_param:1;
+ __u32 reserved5:25;
+} __packed;
+
+/**
+ * struct ipu3_uapi_params - V4L2_META_FMT_IPU3_PARAMS
+ *
+ * @use: select which parameters to apply, see &ipu3_uapi_flags
+ * @acc_param: ACC parameters, as specified by &ipu3_uapi_acc_param
+ * @lin_vmem_params: linearization VMEM, as specified by
+ * &ipu3_uapi_isp_lin_vmem_params
+ * @tnr3_vmem_params: tnr3 VMEM as specified by
+ * &ipu3_uapi_isp_tnr3_vmem_params
+ * @xnr3_vmem_params: xnr3 VMEM as specified by
+ * &ipu3_uapi_isp_xnr3_vmem_params
+ * @tnr3_dmem_params: tnr3 DMEM as specified by &ipu3_uapi_isp_tnr3_params
+ * @xnr3_dmem_params: xnr3 DMEM as specified by &ipu3_uapi_isp_xnr3_params
+ * @obgrid_param: obgrid parameters as specified by
+ * &ipu3_uapi_obgrid_param
+ *
+ * The video queue "parameters" is of format V4L2_META_FMT_IPU3_PARAMS.
+ * This is a "single plane" v4l2_meta_format using V4L2_BUF_TYPE_META_OUTPUT.
+ *
+ * struct ipu3_uapi_params as defined below contains a lot of parameters and
+ * ipu3_uapi_flags selects which parameters to apply.
+ */
+struct ipu3_uapi_params {
+ /* Flags which of the settings below are to be applied */
+ struct ipu3_uapi_flags use __attribute__((aligned(32)));
+
+ /* Accelerator cluster parameters */
+ struct ipu3_uapi_acc_param acc_param;
+
+ /* ISP vector address space parameters */
+ struct ipu3_uapi_isp_lin_vmem_params lin_vmem_params;
+ struct ipu3_uapi_isp_tnr3_vmem_params tnr3_vmem_params;
+ struct ipu3_uapi_isp_xnr3_vmem_params xnr3_vmem_params;
+
+ /* ISP data memory (DMEM) parameters */
+ struct ipu3_uapi_isp_tnr3_params tnr3_dmem_params;
+ struct ipu3_uapi_isp_xnr3_params xnr3_dmem_params;
+
+ /* Optical black level compensation */
+ struct ipu3_uapi_obgrid_param obgrid_param;
+} __packed;
+
+#endif /* __IPU3_UAPI_H */
diff --git a/drivers/staging/media/ipu3/ipu3-abi.h b/drivers/staging/media/ipu3/ipu3-abi.h
new file mode 100644
index 000000000000..c76935b436d7
--- /dev/null
+++ b/drivers/staging/media/ipu3/ipu3-abi.h
@@ -0,0 +1,2011 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2018 Intel Corporation */
+
+#ifndef __IPU3_ABI_H
+#define __IPU3_ABI_H
+
+#include "include/uapi/intel-ipu3.h"
+
+/******************* IMGU Hardware information *******************/
+
+typedef u32 imgu_addr_t;
+
+#define IMGU_ISP_VMEM_ALIGN 128
+#define IMGU_DVS_BLOCK_W 64
+#define IMGU_DVS_BLOCK_H 32
+#define IMGU_GDC_BUF_X (2 * IMGU_DVS_BLOCK_W)
+#define IMGU_GDC_BUF_Y IMGU_DVS_BLOCK_H
+/* n = 0..1 */
+#define IMGU_SP_PMEM_BASE(n) (0x20000 + (n) * 0x4000)
+#define IMGU_MAX_BQ_GRID_WIDTH 80
+#define IMGU_MAX_BQ_GRID_HEIGHT 60
+#define IMGU_OBGRID_TILE_SIZE 16
+#define IMGU_PIXELS_PER_WORD 50
+#define IMGU_BYTES_PER_WORD 64
+#define IMGU_STRIPE_FIXED_HALF_OVERLAP 2
+#define IMGU_SHD_SETS 3
+#define IMGU_BDS_MIN_CLIP_VAL 0
+#define IMGU_BDS_MAX_CLIP_VAL 2
+
+#define IMGU_ABI_AWB_MAX_CELLS_PER_SET 160
+#define IMGU_ABI_AF_MAX_CELLS_PER_SET 32
+#define IMGU_ABI_AWB_FR_MAX_CELLS_PER_SET 32
+
+#define IMGU_ABI_ACC_OP_IDLE 0
+#define IMGU_ABI_ACC_OP_END_OF_ACK 1
+#define IMGU_ABI_ACC_OP_END_OF_OPS 2
+#define IMGU_ABI_ACC_OP_NO_OPS 3
+
+#define IMGU_ABI_ACC_OPTYPE_PROCESS_LINES 0
+#define IMGU_ABI_ACC_OPTYPE_TRANSFER_DATA 1
+
+/* Register definitions */
+
+/* PM_CTRL_0_5_0_IMGHMMADR */
+#define IMGU_REG_PM_CTRL 0x0
+#define IMGU_PM_CTRL_START BIT(0)
+#define IMGU_PM_CTRL_CFG_DONE BIT(1)
+#define IMGU_PM_CTRL_RACE_TO_HALT BIT(2)
+#define IMGU_PM_CTRL_NACK_ALL BIT(3)
+#define IMGU_PM_CTRL_CSS_PWRDN BIT(4)
+#define IMGU_PM_CTRL_RST_AT_EOF BIT(5)
+#define IMGU_PM_CTRL_FORCE_HALT BIT(6)
+#define IMGU_PM_CTRL_FORCE_UNHALT BIT(7)
+#define IMGU_PM_CTRL_FORCE_PWRDN BIT(8)
+#define IMGU_PM_CTRL_FORCE_RESET BIT(9)
+
+/* SYSTEM_REQ_0_5_0_IMGHMMADR */
+#define IMGU_REG_SYSTEM_REQ 0x18
+#define IMGU_SYSTEM_REQ_FREQ_MASK 0x3f
+#define IMGU_SYSTEM_REQ_FREQ_DIVIDER 25
+#define IMGU_REG_INT_STATUS 0x30
+#define IMGU_REG_INT_ENABLE 0x34
+#define IMGU_REG_INT_CSS_IRQ BIT(31)
+/* STATE_0_5_0_IMGHMMADR */
+#define IMGU_REG_STATE 0x130
+#define IMGU_STATE_HALT_STS BIT(0)
+#define IMGU_STATE_IDLE_STS BIT(1)
+#define IMGU_STATE_POWER_UP BIT(2)
+#define IMGU_STATE_POWER_DOWN BIT(3)
+#define IMGU_STATE_CSS_BUSY_MASK 0xc0
+#define IMGU_STATE_PM_FSM_MASK 0x180
+#define IMGU_STATE_PWRDNM_FSM_MASK 0x1E00000
+/* PM_STS_0_5_0_IMGHMMADR */
+#define IMGU_REG_PM_STS 0x140
+
+#define IMGU_REG_BASE 0x4000
+
+#define IMGU_REG_ISP_CTRL (IMGU_REG_BASE + 0x00)
+#define IMGU_CTRL_RST BIT(0)
+#define IMGU_CTRL_START BIT(1)
+#define IMGU_CTRL_BREAK BIT(2)
+#define IMGU_CTRL_RUN BIT(3)
+#define IMGU_CTRL_BROKEN BIT(4)
+#define IMGU_CTRL_IDLE BIT(5)
+#define IMGU_CTRL_SLEEPING BIT(6)
+#define IMGU_CTRL_STALLING BIT(7)
+#define IMGU_CTRL_IRQ_CLEAR BIT(8)
+#define IMGU_CTRL_IRQ_READY BIT(10)
+#define IMGU_CTRL_IRQ_SLEEPING BIT(11)
+#define IMGU_CTRL_ICACHE_INV BIT(12)
+#define IMGU_CTRL_IPREFETCH_EN BIT(13)
+#define IMGU_REG_ISP_START_ADDR (IMGU_REG_BASE + 0x04)
+#define IMGU_REG_ISP_ICACHE_ADDR (IMGU_REG_BASE + 0x10)
+#define IMGU_REG_ISP_PC (IMGU_REG_BASE + 0x1c)
+
+/* SP Registers, sp = 0:SP0; 1:SP1 */
+#define IMGU_REG_SP_CTRL(sp) (IMGU_REG_BASE + (sp) * 0x100 + 0x100)
+ /* For bits in IMGU_REG_SP_CTRL, see IMGU_CTRL_* */
+#define IMGU_REG_SP_START_ADDR(sp) (IMGU_REG_BASE + (sp) * 0x100 + 0x104)
+#define IMGU_REG_SP_ICACHE_ADDR(sp) (IMGU_REG_BASE + (sp) * 0x100 + 0x11c)
+#define IMGU_REG_SP_CTRL_SINK(sp) (IMGU_REG_BASE + (sp) * 0x100 + 0x130)
+#define IMGU_REG_SP_PC(sp) (IMGU_REG_BASE + (sp) * 0x100 + 0x134)
+
+#define IMGU_REG_TLB_INVALIDATE (IMGU_REG_BASE + 0x300)
+#define IMGU_TLB_INVALIDATE 1
+#define IMGU_REG_L1_PHYS (IMGU_REG_BASE + 0x304) /* 27-bit pfn */
+
+#define IMGU_REG_CIO_GATE_BURST_STATE (IMGU_REG_BASE + 0x404)
+#define IMGU_CIO_GATE_BURST_MASK 0x80
+
+#define IMGU_REG_GP_BUSY (IMGU_REG_BASE + 0x500)
+#define IMGU_REG_GP_STARVING (IMGU_REG_BASE + 0x504)
+#define IMGU_REG_GP_WORKLOAD (IMGU_REG_BASE + 0x508)
+#define IMGU_REG_GP_IRQ(n) (IMGU_REG_BASE + (n) * 4 + 0x50c) /* n = 0..4 */
+#define IMGU_REG_GP_SP1_STRMON_STAT (IMGU_REG_BASE + 0x520)
+#define IMGU_REG_GP_SP2_STRMON_STAT (IMGU_REG_BASE + 0x524)
+#define IMGU_REG_GP_ISP_STRMON_STAT (IMGU_REG_BASE + 0x528)
+#define IMGU_REG_GP_MOD_STRMON_STAT (IMGU_REG_BASE + 0x52c)
+
+/* Port definitions for the streaming monitors. */
+/* For each definition there is signal pair : valid [bit 0]- accept [bit 1] */
+#define IMGU_GP_STRMON_STAT_SP1_PORT_SP12DMA BIT(0)
+#define IMGU_GP_STRMON_STAT_SP1_PORT_DMA2SP1 BIT(2)
+#define IMGU_GP_STRMON_STAT_SP1_PORT_SP12SP2 BIT(4)
+#define IMGU_GP_STRMON_STAT_SP1_PORT_SP22SP1 BIT(6)
+#define IMGU_GP_STRMON_STAT_SP1_PORT_SP12ISP BIT(8)
+#define IMGU_GP_STRMON_STAT_SP1_PORT_ISP2SP1 BIT(10)
+
+#define IMGU_GP_STRMON_STAT_SP2_PORT_SP22DMA BIT(0)
+#define IMGU_GP_STRMON_STAT_SP2_PORT_DMA2SP2 BIT(2)
+#define IMGU_GP_STRMON_STAT_SP2_PORT_SP22SP1 BIT(4)
+#define IMGU_GP_STRMON_STAT_SP2_PORT_SP12SP2 BIT(6)
+
+#define IMGU_GP_STRMON_STAT_ISP_PORT_ISP2DMA BIT(0)
+#define IMGU_GP_STRMON_STAT_ISP_PORT_DMA2ISP BIT(2)
+#define IMGU_GP_STRMON_STAT_ISP_PORT_ISP2SP1 BIT(4)
+#define IMGU_GP_STRMON_STAT_ISP_PORT_SP12ISP BIT(6)
+
+/* Between the devices and the fifo */
+#define IMGU_GP_STRMON_STAT_MOD_PORT_SP12DMA BIT(0)
+#define IMGU_GP_STRMON_STAT_MOD_PORT_DMA2SP1 BIT(2)
+#define IMGU_GP_STRMON_STAT_MOD_PORT_SP22DMA BIT(4)
+#define IMGU_GP_STRMON_STAT_MOD_PORT_DMA2SP2 BIT(6)
+#define IMGU_GP_STRMON_STAT_MOD_PORT_ISP2DMA BIT(8)
+#define IMGU_GP_STRMON_STAT_MOD_PORT_DMA2ISP BIT(10)
+#define IMGU_GP_STRMON_STAT_MOD_PORT_CELLS2GDC BIT(12)
+#define IMGU_GP_STRMON_STAT_MOD_PORT_GDC2CELLS BIT(14)
+#define IMGU_GP_STRMON_STAT_MOD_PORT_CELLS2DECOMP BIT(16)
+#define IMGU_GP_STRMON_STAT_MOD_PORT_DECOMP2CELLS BIT(18)
+/* n = 1..6 */
+#define IMGU_GP_STRMON_STAT_MOD_PORT_S2V(n) (1 << (((n) - 1) * 2 + 20))
+
+/* n = 1..15 */
+#define IMGU_GP_STRMON_STAT_ACCS_PORT_ACC(n) (1 << (((n) - 1) * 2))
+
+/* After FIFO and demux before SP1, n = 1..15 */
+#define IMGU_GP_STRMON_STAT_ACCS2SP1_MON_PORT_ACC(n) (1 << (((n) - 1) * 2))
+
+/* After FIFO and demux before SP2, n = 1..15 */
+#define IMGU_GP_STRMON_STAT_ACCS2SP2_MON_PORT_ACC(n) (1 << (((n) - 1) * 2))
+
+#define IMGU_REG_GP_HALT (IMGU_REG_BASE + 0x5dc)
+
+ /* n = 0..2 (main ctrl, SP0, SP1) */
+#define IMGU_REG_IRQCTRL_BASE(n) (IMGU_REG_BASE + (n) * 0x100 + 0x700)
+#define IMGU_IRQCTRL_MAIN 0
+#define IMGU_IRQCTRL_SP0 1
+#define IMGU_IRQCTRL_SP1 2
+#define IMGU_IRQCTRL_NUM 3
+#define IMGU_IRQCTRL_IRQ_SP1 BIT(0)
+#define IMGU_IRQCTRL_IRQ_SP2 BIT(1)
+#define IMGU_IRQCTRL_IRQ_ISP BIT(2)
+#define IMGU_IRQCTRL_IRQ_SP1_STREAM_MON BIT(3)
+#define IMGU_IRQCTRL_IRQ_SP2_STREAM_MON BIT(4)
+#define IMGU_IRQCTRL_IRQ_ISP_STREAM_MON BIT(5)
+#define IMGU_IRQCTRL_IRQ_MOD_STREAM_MON BIT(6)
+#define IMGU_IRQCTRL_IRQ_MOD_ISP_STREAM_MON BIT(7)
+#define IMGU_IRQCTRL_IRQ_ACCS_STREAM_MON BIT(8)
+#define IMGU_IRQCTRL_IRQ_ACCS_SP1_STREAM_MON BIT(9)
+#define IMGU_IRQCTRL_IRQ_ACCS_SP2_STREAM_MON BIT(10)
+#define IMGU_IRQCTRL_IRQ_ISP_PMEM_ERROR BIT(11)
+#define IMGU_IRQCTRL_IRQ_ISP_BAMEM_ERROR BIT(12)
+#define IMGU_IRQCTRL_IRQ_ISP_VMEM_ERROR BIT(13)
+#define IMGU_IRQCTRL_IRQ_ISP_DMEM_ERROR BIT(14)
+#define IMGU_IRQCTRL_IRQ_SP1_ICACHE_MEM_ERROR BIT(15)
+#define IMGU_IRQCTRL_IRQ_SP1_DMEM_ERROR BIT(16)
+#define IMGU_IRQCTRL_IRQ_SP2_ICACHE_MEM_ERROR BIT(17)
+#define IMGU_IRQCTRL_IRQ_SP2_DMEM_ERROR BIT(18)
+#define IMGU_IRQCTRL_IRQ_ACCS_SCRATCH_MEM_ERROR BIT(19)
+#define IMGU_IRQCTRL_IRQ_GP_TIMER(n) BIT(20 + (n)) /* n=0..1 */
+#define IMGU_IRQCTRL_IRQ_DMA BIT(22)
+#define IMGU_IRQCTRL_IRQ_SW_PIN(n) BIT(23 + (n)) /* n=0..4 */
+#define IMGU_IRQCTRL_IRQ_ACC_SYS BIT(28)
+#define IMGU_IRQCTRL_IRQ_OUT_FORM_IRQ_CTRL BIT(29)
+#define IMGU_IRQCTRL_IRQ_SP1_IRQ_CTRL BIT(30)
+#define IMGU_IRQCTRL_IRQ_SP2_IRQ_CTRL BIT(31)
+#define IMGU_REG_IRQCTRL_EDGE(n) (IMGU_REG_IRQCTRL_BASE(n) + 0x00)
+#define IMGU_REG_IRQCTRL_MASK(n) (IMGU_REG_IRQCTRL_BASE(n) + 0x04)
+#define IMGU_REG_IRQCTRL_STATUS(n) (IMGU_REG_IRQCTRL_BASE(n) + 0x08)
+#define IMGU_REG_IRQCTRL_CLEAR(n) (IMGU_REG_IRQCTRL_BASE(n) + 0x0c)
+#define IMGU_REG_IRQCTRL_ENABLE(n) (IMGU_REG_IRQCTRL_BASE(n) + 0x10)
+#define IMGU_REG_IRQCTRL_EDGE_NOT_PULSE(n) (IMGU_REG_IRQCTRL_BASE(n) + 0x14)
+#define IMGU_REG_IRQCTRL_STR_OUT_ENABLE(n) (IMGU_REG_IRQCTRL_BASE(n) + 0x18)
+
+#define IMGU_REG_GP_TIMER (IMGU_REG_BASE + 0xa34)
+
+#define IMGU_REG_SP_DMEM_BASE(n) (IMGU_REG_BASE + (n) * 0x4000 + 0x4000)
+#define IMGU_REG_ISP_DMEM_BASE (IMGU_REG_BASE + 0xc000)
+
+#define IMGU_REG_GDC_BASE (IMGU_REG_BASE + 0x18000)
+#define IMGU_REG_GDC_LUT_BASE (IMGU_REG_GDC_BASE + 0x140)
+#define IMGU_GDC_LUT_MASK ((1 << 12) - 1) /* Range -1024..+1024 */
+
+#define IMGU_SCALER_PHASES 32
+#define IMGU_SCALER_COEFF_BITS 24
+#define IMGU_SCALER_PHASE_COUNTER_PREC_REF 6
+#define IMGU_SCALER_MAX_EXPONENT_SHIFT 3
+#define IMGU_SCALER_FILTER_TAPS 4
+#define IMGU_SCALER_TAPS_Y IMGU_SCALER_FILTER_TAPS
+#define IMGU_SCALER_TAPS_UV (IMGU_SCALER_FILTER_TAPS / 2)
+#define IMGU_SCALER_FIR_PHASES \
+ (IMGU_SCALER_PHASES << IMGU_SCALER_PHASE_COUNTER_PREC_REF)
+
+/******************* imgu_abi_acc_param *******************/
+
+#define IMGU_ABI_SHD_MAX_PROCESS_LINES 31
+#define IMGU_ABI_SHD_MAX_TRANSFERS 31
+#define IMGU_ABI_SHD_MAX_OPERATIONS \
+ (IMGU_ABI_SHD_MAX_PROCESS_LINES + IMGU_ABI_SHD_MAX_TRANSFERS)
+#define IMGU_ABI_SHD_MAX_CELLS_PER_SET 146
+/* largest grid is 73x56 */
+#define IMGU_ABI_SHD_MAX_CFG_SETS (2 * 28)
+
+#define IMGU_ABI_DVS_STAT_MAX_OPERATIONS 100
+#define IMGU_ABI_DVS_STAT_MAX_PROCESS_LINES 52
+#define IMGU_ABI_DVS_STAT_MAX_TRANSFERS 52
+
+#define IMGU_ABI_BDS_SAMPLE_PATTERN_ARRAY_SIZE 8
+#define IMGU_ABI_BDS_PHASE_COEFFS_ARRAY_SIZE 32
+
+#define IMGU_ABI_AWB_FR_MAX_TRANSFERS 30
+#define IMGU_ABI_AWB_FR_MAX_PROCESS_LINES 30
+#define IMGU_ABI_AWB_FR_MAX_OPERATIONS \
+ (IMGU_ABI_AWB_FR_MAX_TRANSFERS + IMGU_ABI_AWB_FR_MAX_PROCESS_LINES)
+
+#define IMGU_ABI_AF_MAX_TRANSFERS 30
+#define IMGU_ABI_AF_MAX_PROCESS_LINES 30
+#define IMGU_ABI_AF_MAX_OPERATIONS \
+ (IMGU_ABI_AF_MAX_TRANSFERS + IMGU_ABI_AF_MAX_PROCESS_LINES)
+
+#define IMGU_ABI_AWB_MAX_PROCESS_LINES 68
+#define IMGU_ABI_AWB_MAX_TRANSFERS 68
+#define IMGU_ABI_AWB_MAX_OPERATIONS \
+ (IMGU_ABI_AWB_MAX_PROCESS_LINES + IMGU_ABI_AWB_MAX_TRANSFERS)
+
+#define IMGU_ABI_OSYS_PIN_VF 0
+#define IMGU_ABI_OSYS_PIN_OUT 1
+#define IMGU_ABI_OSYS_PINS 2
+
+#define IMGU_ABI_DVS_STAT_LEVELS 3
+#define IMGU_ABI_YUVP2_YTM_LUT_ENTRIES 256
+#define IMGU_ABI_GDC_FRAC_BITS 8
+#define IMGU_ABI_BINARY_MAX_OUTPUT_PORTS 2
+#define IMGU_ABI_MAX_BINARY_NAME 64
+#define IMGU_ABI_ISP_DDR_WORD_BITS 256
+#define IMGU_ABI_ISP_DDR_WORD_BYTES (IMGU_ABI_ISP_DDR_WORD_BITS / 8)
+#define IMGU_ABI_MAX_STAGES 3
+#define IMGU_ABI_MAX_IF_CONFIGS 3
+#define IMGU_ABI_PIPE_CONFIG_ACQUIRE_ISP BIT(31)
+#define IMGU_ABI_PORT_CONFIG_TYPE_INPUT_HOST BIT(0)
+#define IMGU_ABI_PORT_CONFIG_TYPE_OUTPUT_HOST BIT(4)
+#define IMGU_ABI_MAX_SP_THREADS 4
+#define IMGU_ABI_FRAMES_REF 3
+#define IMGU_ABI_FRAMES_TNR 4
+#define IMGU_ABI_BUF_SETS_TNR 1
+
+#define IMGU_ABI_EVENT_BUFFER_ENQUEUED(thread, queue) \
+ (0 << 24 | (thread) << 16 | (queue) << 8)
+#define IMGU_ABI_EVENT_BUFFER_DEQUEUED(queue) (1 << 24 | (queue) << 8)
+#define IMGU_ABI_EVENT_EVENT_DEQUEUED (2 << 24)
+#define IMGU_ABI_EVENT_START_STREAM (3 << 24)
+#define IMGU_ABI_EVENT_STOP_STREAM (4 << 24)
+#define IMGU_ABI_EVENT_MIPI_BUFFERS_READY (5 << 24)
+#define IMGU_ABI_EVENT_UNLOCK_RAW_BUFFER (6 << 24)
+#define IMGU_ABI_EVENT_STAGE_ENABLE_DISABLE (7 << 24)
+
+#define IMGU_ABI_HOST2SP_BUFQ_SIZE 3
+#define IMGU_ABI_SP2HOST_BUFQ_SIZE (2 * IMGU_ABI_MAX_SP_THREADS)
+#define IMGU_ABI_HOST2SP_EVTQ_SIZE (IMGU_ABI_QUEUE_NUM * \
+ IMGU_ABI_MAX_SP_THREADS * 2 + IMGU_ABI_MAX_SP_THREADS * 4)
+#define IMGU_ABI_SP2HOST_EVTQ_SIZE (6 * IMGU_ABI_MAX_SP_THREADS)
+
+#define IMGU_ABI_EVTTYPE_EVENT_SHIFT 0
+#define IMGU_ABI_EVTTYPE_EVENT_MASK (0xff << IMGU_ABI_EVTTYPE_EVENT_SHIFT)
+#define IMGU_ABI_EVTTYPE_PIPE_SHIFT 8
+#define IMGU_ABI_EVTTYPE_PIPE_MASK (0xff << IMGU_ABI_EVTTYPE_PIPE_SHIFT)
+#define IMGU_ABI_EVTTYPE_PIPEID_SHIFT 16
+#define IMGU_ABI_EVTTYPE_PIPEID_MASK (0xff << IMGU_ABI_EVTTYPE_PIPEID_SHIFT)
+#define IMGU_ABI_EVTTYPE_MODULEID_SHIFT 8
+#define IMGU_ABI_EVTTYPE_MODULEID_MASK (0xff << IMGU_ABI_EVTTYPE_MODULEID_SHIFT)
+#define IMGU_ABI_EVTTYPE_LINENO_SHIFT 16
+#define IMGU_ABI_EVTTYPE_LINENO_MASK (0xffff << IMGU_ABI_EVTTYPE_LINENO_SHIFT)
+
+/* Output frame ready */
+#define IMGU_ABI_EVTTYPE_OUT_FRAME_DONE 0
+/* Second output frame ready */
+#define IMGU_ABI_EVTTYPE_2ND_OUT_FRAME_DONE 1
+/* Viewfinder Output frame ready */
+#define IMGU_ABI_EVTTYPE_VF_OUT_FRAME_DONE 2
+/* Second viewfinder Output frame ready */
+#define IMGU_ABI_EVTTYPE_2ND_VF_OUT_FRAME_DONE 3
+/* Indication that 3A statistics are available */
+#define IMGU_ABI_EVTTYPE_3A_STATS_DONE 4
+/* Indication that DIS statistics are available */
+#define IMGU_ABI_EVTTYPE_DIS_STATS_DONE 5
+/* Pipeline Done event, sent after last pipeline stage */
+#define IMGU_ABI_EVTTYPE_PIPELINE_DONE 6
+/* Frame tagged */
+#define IMGU_ABI_EVTTYPE_FRAME_TAGGED 7
+/* Input frame ready */
+#define IMGU_ABI_EVTTYPE_INPUT_FRAME_DONE 8
+/* Metadata ready */
+#define IMGU_ABI_EVTTYPE_METADATA_DONE 9
+/* Indication that LACE statistics are available */
+#define IMGU_ABI_EVTTYPE_LACE_STATS_DONE 10
+/* Extension stage executed */
+#define IMGU_ABI_EVTTYPE_ACC_STAGE_COMPLETE 11
+/* Timing measurement data */
+#define IMGU_ABI_EVTTYPE_TIMER 12
+/* End Of Frame event, sent when in buffered sensor mode */
+#define IMGU_ABI_EVTTYPE_PORT_EOF 13
+/* Performance warning encountered by FW */
+#define IMGU_ABI_EVTTYPE_FW_WARNING 14
+/* Assertion hit by FW */
+#define IMGU_ABI_EVTTYPE_FW_ASSERT 15
+
+#define IMGU_ABI_NUM_CONTINUOUS_FRAMES 10
+#define IMGU_ABI_SP_COMM_COMMAND 0x00
+
+/*
+ * The host2sp_cmd_ready command is the only command written by the SP
+ * It acknowledges that is previous command has been received.
+ * (this does not mean that the command has been executed)
+ * It also indicates that a new command can be send (it is a queue
+ * with depth 1).
+ */
+#define IMGU_ABI_SP_COMM_COMMAND_READY 1
+/* Command written by the Host */
+#define IMGU_ABI_SP_COMM_COMMAND_DUMMY 2 /* No action */
+#define IMGU_ABI_SP_COMM_COMMAND_START_FLASH 3 /* Start the flash */
+#define IMGU_ABI_SP_COMM_COMMAND_TERMINATE 4 /* Terminate */
+
+/* n = 0..IPU3_CSS_PIPE_ID_NUM-1 */
+#define IMGU_ABI_SP_COMM_EVENT_IRQ_MASK(n) ((n) * 4 + 0x60)
+#define IMGU_ABI_SP_COMM_EVENT_IRQ_MASK_OR_SHIFT 0
+#define IMGU_ABI_SP_COMM_EVENT_IRQ_MASK_AND_SHIFT 16
+
+#define IMGU_ABI_BL_DMACMD_TYPE_SP_PMEM 1 /* sp_pmem */
+
+/***** For parameter computation *****/
+
+#define IMGU_HIVE_OF_SYS_SCALER_TO_FA_OFFSET 0xC
+#define IMGU_HIVE_OF_SYS_OF_TO_FA_OFFSET 0x8
+#define IMGU_HIVE_OF_SYS_OF_SYSTEM_NWAYS 32
+
+#define IMGU_SCALER_ELEMS_PER_VEC 0x10
+#define IMGU_SCALER_FILTER_TAPS_Y 0x4
+#define IMGU_SCALER_OUT_BPP 0x8
+
+#define IMGU_SCALER_MS_TO_OUTFORMACC_SL_ADDR 0x400
+#define IMGU_SCALER_TO_OF_ACK_FA_ADDR \
+ (0xC00 + IMGU_HIVE_OF_SYS_SCALER_TO_FA_OFFSET)
+#define IMGU_OF_TO_ACK_FA_ADDR (0xC00 + IMGU_HIVE_OF_SYS_OF_TO_FA_OFFSET)
+#define IMGU_OUTFORMACC_MS_TO_SCALER_SL_ADDR 0
+#define IMGU_SCALER_INTR_BPP 10
+
+#define IMGU_PS_SNR_PRESERVE_BITS 3
+#define IMGU_CNTX_BPP 11
+#define IMGU_SCALER_FILTER_TAPS_UV (IMGU_SCALER_FILTER_TAPS_Y / 2)
+
+#define IMGU_VMEM2_ELEMS_PER_VEC (IMGU_SCALER_ELEMS_PER_VEC)
+#define IMGU_STRIDE_Y (IMGU_SCALER_FILTER_TAPS_Y + 1)
+#define IMGU_MAX_FRAME_WIDTH 3840
+#define IMGU_VMEM3_ELEMS_PER_VEC (IMGU_SCALER_ELEMS_PER_VEC)
+
+#define IMGU_VER_CNTX_WORDS DIV_ROUND_UP((IMGU_SCALER_OUT_BPP + \
+ IMGU_PS_SNR_PRESERVE_BITS), IMGU_CNTX_BPP) /* 1 */
+#define IMGU_MAX_INPUT_BLOCK_HEIGHT 64
+#define IMGU_HOR_CNTX_WORDS DIV_ROUND_UP((IMGU_SCALER_INTR_BPP + \
+ IMGU_PS_SNR_PRESERVE_BITS), IMGU_CNTX_BPP) /* 2 */
+#define IMGU_MAX_OUTPUT_BLOCK_WIDTH 128
+#define IMGU_CNTX_STRIDE_UV (IMGU_SCALER_FILTER_TAPS_UV + 1)
+
+#define IMGU_OSYS_DMA_CROP_W_LIMIT 64
+#define IMGU_OSYS_DMA_CROP_H_LIMIT 4
+#define IMGU_OSYS_BLOCK_WIDTH (2 * IPU3_UAPI_ISP_VEC_ELEMS)
+#define IMGU_OSYS_BLOCK_HEIGHT 32
+#define IMGU_OSYS_PHASES 0x20
+#define IMGU_OSYS_FILTER_TAPS 0x4
+#define IMGU_OSYS_PHASE_COUNTER_PREC_REF 6
+#define IMGU_OSYS_NUM_INPUT_BUFFERS 2
+#define IMGU_OSYS_FIR_PHASES \
+ (IMGU_OSYS_PHASES << IMGU_OSYS_PHASE_COUNTER_PREC_REF)
+#define IMGU_OSYS_TAPS_UV (IMGU_OSYS_FILTER_TAPS / 2)
+#define IMGU_OSYS_TAPS_Y (IMGU_OSYS_FILTER_TAPS)
+#define IMGU_OSYS_NUM_INTERM_BUFFERS 2
+
+#define IMGU_VMEM1_Y_SIZE \
+ (IMGU_OSYS_BLOCK_HEIGHT * IMGU_VMEM1_Y_STRIDE)
+#define IMGU_VMEM1_UV_SIZE (IMGU_VMEM1_Y_SIZE / 4)
+#define IMGU_VMEM1_OUT_BUF_ADDR (IMGU_VMEM1_INP_BUF_ADDR + \
+ (IMGU_OSYS_NUM_INPUT_BUFFERS * IMGU_VMEM1_BUF_SIZE))
+#define IMGU_OSYS_NUM_OUTPUT_BUFFERS 2
+
+/* transpose of input height */
+#define IMGU_VMEM2_VECS_PER_LINE \
+ (DIV_ROUND_UP(IMGU_OSYS_BLOCK_HEIGHT, IMGU_VMEM2_ELEMS_PER_VEC))
+/* size in words (vectors) */
+#define IMGU_VMEM2_BUF_SIZE \
+ (IMGU_VMEM2_VECS_PER_LINE * IMGU_VMEM2_LINES_PER_BLOCK)
+#define IMGU_VMEM3_VER_Y_SIZE \
+ ((IMGU_STRIDE_Y * IMGU_MAX_FRAME_WIDTH \
+ / IMGU_VMEM3_ELEMS_PER_VEC) * IMGU_VER_CNTX_WORDS)
+#define IMGU_VMEM3_HOR_Y_SIZE \
+ ((IMGU_STRIDE_Y * IMGU_MAX_INPUT_BLOCK_HEIGHT \
+ / IMGU_VMEM3_ELEMS_PER_VEC) * IMGU_HOR_CNTX_WORDS)
+#define IMGU_VMEM3_VER_Y_EXTRA \
+ ((IMGU_STRIDE_Y * IMGU_MAX_OUTPUT_BLOCK_WIDTH \
+ / IMGU_VMEM3_ELEMS_PER_VEC) * IMGU_VER_CNTX_WORDS)
+#define IMGU_VMEM3_VER_U_SIZE \
+ (((IMGU_CNTX_STRIDE_UV * IMGU_MAX_FRAME_WIDTH \
+ / IMGU_VMEM3_ELEMS_PER_VEC) * IMGU_VER_CNTX_WORDS) / 2)
+#define IMGU_VMEM3_HOR_U_SIZE \
+ (((IMGU_STRIDE_Y * IMGU_MAX_INPUT_BLOCK_HEIGHT \
+ / IMGU_VMEM3_ELEMS_PER_VEC) * IMGU_HOR_CNTX_WORDS) / 2)
+#define IMGU_VMEM3_VER_U_EXTRA \
+ (((IMGU_CNTX_STRIDE_UV * IMGU_MAX_OUTPUT_BLOCK_WIDTH \
+ / IMGU_VMEM3_ELEMS_PER_VEC) * IMGU_VER_CNTX_WORDS) / 2)
+#define IMGU_VMEM3_VER_V_SIZE \
+ (((IMGU_CNTX_STRIDE_UV * IMGU_MAX_FRAME_WIDTH \
+ / IMGU_VMEM3_ELEMS_PER_VEC) * IMGU_VER_CNTX_WORDS) / 2)
+
+#define IMGU_ISP_VEC_NELEMS 64
+#define IMGU_LUMA_TO_CHROMA_RATIO 2
+#define IMGU_INPUT_BLOCK_WIDTH (128)
+#define IMGU_FIFO_ADDR_SCALER_TO_FMT \
+ (IMGU_SCALER_MS_TO_OUTFORMACC_SL_ADDR >> 2)
+#define IMGU_FIFO_ADDR_SCALER_TO_SP (IMGU_SCALER_TO_OF_ACK_FA_ADDR >> 2)
+#define IMGU_VMEM1_INP_BUF_ADDR 0
+#define IMGU_VMEM1_Y_STRIDE \
+ (IMGU_OSYS_BLOCK_WIDTH / IMGU_VMEM1_ELEMS_PER_VEC)
+#define IMGU_VMEM1_BUF_SIZE (IMGU_VMEM1_V_OFFSET + IMGU_VMEM1_UV_SIZE)
+
+#define IMGU_VMEM1_U_OFFSET (IMGU_VMEM1_Y_SIZE)
+#define IMGU_VMEM1_V_OFFSET (IMGU_VMEM1_U_OFFSET + IMGU_VMEM1_UV_SIZE)
+#define IMGU_VMEM1_UV_STRIDE (IMGU_VMEM1_Y_STRIDE / 2)
+#define IMGU_VMEM1_INT_BUF_ADDR (IMGU_VMEM1_OUT_BUF_ADDR + \
+ (IMGU_OSYS_NUM_OUTPUT_BUFFERS * IMGU_VMEM1_BUF_SIZE))
+
+#define IMGU_VMEM1_ELEMS_PER_VEC (IMGU_HIVE_OF_SYS_OF_SYSTEM_NWAYS)
+#define IMGU_VMEM2_BUF_Y_ADDR 0
+#define IMGU_VMEM2_BUF_Y_STRIDE (IMGU_VMEM2_VECS_PER_LINE)
+#define IMGU_VMEM2_BUF_U_ADDR \
+ (IMGU_VMEM2_BUF_Y_ADDR + IMGU_VMEM2_BUF_SIZE)
+#define IMGU_VMEM2_BUF_V_ADDR \
+ (IMGU_VMEM2_BUF_U_ADDR + IMGU_VMEM2_BUF_SIZE / 4)
+#define IMGU_VMEM2_BUF_UV_STRIDE (IMGU_VMEM2_VECS_PER_LINE / 2)
+/* 1.5 x depth of intermediate buffer */
+#define IMGU_VMEM2_LINES_PER_BLOCK 192
+#define IMGU_VMEM3_HOR_Y_ADDR \
+ (IMGU_VMEM3_VER_Y_ADDR + IMGU_VMEM3_VER_Y_SIZE)
+#define IMGU_VMEM3_HOR_U_ADDR \
+ (IMGU_VMEM3_VER_U_ADDR + IMGU_VMEM3_VER_U_SIZE)
+#define IMGU_VMEM3_HOR_V_ADDR \
+ (IMGU_VMEM3_VER_V_ADDR + IMGU_VMEM3_VER_V_SIZE)
+#define IMGU_VMEM3_VER_Y_ADDR 0
+#define IMGU_VMEM3_VER_U_ADDR \
+ (IMGU_VMEM3_VER_Y_ADDR + IMGU_VMEM3_VER_Y_SIZE + \
+ max(IMGU_VMEM3_HOR_Y_SIZE, IMGU_VMEM3_VER_Y_EXTRA))
+#define IMGU_VMEM3_VER_V_ADDR \
+ (IMGU_VMEM3_VER_U_ADDR + IMGU_VMEM3_VER_U_SIZE + \
+ max(IMGU_VMEM3_HOR_U_SIZE, IMGU_VMEM3_VER_U_EXTRA))
+#define IMGU_FIFO_ADDR_FMT_TO_SP (IMGU_OF_TO_ACK_FA_ADDR >> 2)
+#define IMGU_FIFO_ADDR_FMT_TO_SCALER (IMGU_OUTFORMACC_MS_TO_SCALER_SL_ADDR >> 2)
+#define IMGU_VMEM1_HST_BUF_ADDR (IMGU_VMEM1_INT_BUF_ADDR + \
+ (IMGU_OSYS_NUM_INTERM_BUFFERS * IMGU_VMEM1_BUF_SIZE))
+#define IMGU_VMEM1_HST_BUF_STRIDE 120
+#define IMGU_VMEM1_HST_BUF_NLINES 3
+
+enum imgu_abi_frame_format {
+ IMGU_ABI_FRAME_FORMAT_NV11, /* 12 bit YUV 411, Y, UV plane */
+ IMGU_ABI_FRAME_FORMAT_NV12, /* 12 bit YUV 420, Y, UV plane */
+ IMGU_ABI_FRAME_FORMAT_NV12_16, /* 16 bit YUV 420, Y, UV plane */
+ IMGU_ABI_FRAME_FORMAT_NV12_TILEY,/* 12 bit YUV 420,Intel tiled format */
+ IMGU_ABI_FRAME_FORMAT_NV16, /* 16 bit YUV 422, Y, UV plane */
+ IMGU_ABI_FRAME_FORMAT_NV21, /* 12 bit YUV 420, Y, VU plane */
+ IMGU_ABI_FRAME_FORMAT_NV61, /* 16 bit YUV 422, Y, VU plane */
+ IMGU_ABI_FRAME_FORMAT_YV12, /* 12 bit YUV 420, Y, V, U plane */
+ IMGU_ABI_FRAME_FORMAT_YV16, /* 16 bit YUV 422, Y, V, U plane */
+ IMGU_ABI_FRAME_FORMAT_YUV420, /* 12 bit YUV 420, Y, U, V plane */
+ IMGU_ABI_FRAME_FORMAT_YUV420_16,/* yuv420, 16 bits per subpixel */
+ IMGU_ABI_FRAME_FORMAT_YUV422, /* 16 bit YUV 422, Y, U, V plane */
+ IMGU_ABI_FRAME_FORMAT_YUV422_16,/* yuv422, 16 bits per subpixel */
+ IMGU_ABI_FRAME_FORMAT_UYVY, /* 16 bit YUV 422, UYVY interleaved */
+ IMGU_ABI_FRAME_FORMAT_YUYV, /* 16 bit YUV 422, YUYV interleaved */
+ IMGU_ABI_FRAME_FORMAT_YUV444, /* 24 bit YUV 444, Y, U, V plane */
+ IMGU_ABI_FRAME_FORMAT_YUV_LINE, /* Internal format, 2 y lines */
+ /* followed by a uv-interleaved line */
+ IMGU_ABI_FRAME_FORMAT_RAW, /* RAW, 1 plane */
+ IMGU_ABI_FRAME_FORMAT_RGB565, /* 16 bit RGB, 1 plane. Each 3 sub
+ * pixels are packed into one 16 bit
+ * value, 5 bits for R, 6 bits for G
+ * and 5 bits for B.
+ */
+ IMGU_ABI_FRAME_FORMAT_PLANAR_RGB888, /* 24 bit RGB, 3 planes */
+ IMGU_ABI_FRAME_FORMAT_RGBA888, /* 32 bit RGBA, 1 plane, A=Alpha
+ * (alpha is unused)
+ */
+ IMGU_ABI_FRAME_FORMAT_QPLANE6, /* Internal, for advanced ISP */
+ IMGU_ABI_FRAME_FORMAT_BINARY_8, /* byte stream, used for jpeg. For
+ * frames of this type, we set the
+ * height to 1 and the width to the
+ * number of allocated bytes.
+ */
+ IMGU_ABI_FRAME_FORMAT_MIPI, /* MIPI frame, 1 plane */
+ IMGU_ABI_FRAME_FORMAT_RAW_PACKED, /* RAW, 1 plane, packed */
+ IMGU_ABI_FRAME_FORMAT_CSI_MIPI_YUV420_8, /* 8 bit per Y/U/V. Y odd line
+ * UYVY interleaved even line
+ */
+ IMGU_ABI_FRAME_FORMAT_CSI_MIPI_LEGACY_YUV420_8, /* Legacy YUV420.
+ * UY odd line;
+ * VY even line
+ */
+ IMGU_ABI_FRAME_FORMAT_CSI_MIPI_YUV420_10,/* 10 bit per Y/U/V. Y odd
+ * line; UYVY interleaved
+ * even line
+ */
+ IMGU_ABI_FRAME_FORMAT_YCGCO444_16, /* Internal format for ISP2.7,
+ * 16 bits per plane YUV 444,
+ * Y, U, V plane
+ */
+ IMGU_ABI_FRAME_FORMAT_NUM
+};
+
+enum imgu_abi_bayer_order {
+ IMGU_ABI_BAYER_ORDER_GRBG,
+ IMGU_ABI_BAYER_ORDER_RGGB,
+ IMGU_ABI_BAYER_ORDER_BGGR,
+ IMGU_ABI_BAYER_ORDER_GBRG
+};
+
+enum imgu_abi_osys_format {
+ IMGU_ABI_OSYS_FORMAT_YUV420,
+ IMGU_ABI_OSYS_FORMAT_YV12,
+ IMGU_ABI_OSYS_FORMAT_NV12,
+ IMGU_ABI_OSYS_FORMAT_NV21,
+ IMGU_ABI_OSYS_FORMAT_YUV_LINE,
+ IMGU_ABI_OSYS_FORMAT_YUY2, /* = IMGU_ABI_OSYS_FORMAT_YUYV */
+ IMGU_ABI_OSYS_FORMAT_NV16,
+ IMGU_ABI_OSYS_FORMAT_RGBA,
+ IMGU_ABI_OSYS_FORMAT_BGRA
+};
+
+enum imgu_abi_osys_tiling {
+ IMGU_ABI_OSYS_TILING_NONE,
+ IMGU_ABI_OSYS_TILING_Y,
+ IMGU_ABI_OSYS_TILING_YF,
+};
+
+enum imgu_abi_osys_procmode {
+ IMGU_ABI_OSYS_PROCMODE_BYPASS,
+ IMGU_ABI_OSYS_PROCMODE_UPSCALE,
+ IMGU_ABI_OSYS_PROCMODE_DOWNSCALE,
+};
+
+enum imgu_abi_queue_id {
+ IMGU_ABI_QUEUE_EVENT_ID = -1,
+ IMGU_ABI_QUEUE_A_ID = 0,
+ IMGU_ABI_QUEUE_B_ID,
+ IMGU_ABI_QUEUE_C_ID,
+ IMGU_ABI_QUEUE_D_ID,
+ IMGU_ABI_QUEUE_E_ID,
+ IMGU_ABI_QUEUE_F_ID,
+ IMGU_ABI_QUEUE_G_ID,
+ IMGU_ABI_QUEUE_H_ID, /* input frame queue for skycam */
+ IMGU_ABI_QUEUE_NUM
+};
+
+enum imgu_abi_buffer_type {
+ IMGU_ABI_BUFFER_TYPE_INVALID = -1,
+ IMGU_ABI_BUFFER_TYPE_3A_STATISTICS = 0,
+ IMGU_ABI_BUFFER_TYPE_DIS_STATISTICS,
+ IMGU_ABI_BUFFER_TYPE_LACE_STATISTICS,
+ IMGU_ABI_BUFFER_TYPE_INPUT_FRAME,
+ IMGU_ABI_BUFFER_TYPE_OUTPUT_FRAME,
+ IMGU_ABI_BUFFER_TYPE_SEC_OUTPUT_FRAME,
+ IMGU_ABI_BUFFER_TYPE_VF_OUTPUT_FRAME,
+ IMGU_ABI_BUFFER_TYPE_SEC_VF_OUTPUT_FRAME,
+ IMGU_ABI_BUFFER_TYPE_RAW_OUTPUT_FRAME,
+ IMGU_ABI_BUFFER_TYPE_CUSTOM_INPUT,
+ IMGU_ABI_BUFFER_TYPE_CUSTOM_OUTPUT,
+ IMGU_ABI_BUFFER_TYPE_METADATA,
+ IMGU_ABI_BUFFER_TYPE_PARAMETER_SET,
+ IMGU_ABI_BUFFER_TYPE_PER_FRAME_PARAMETER_SET,
+ IMGU_ABI_NUM_DYNAMIC_BUFFER_TYPE,
+ IMGU_ABI_NUM_BUFFER_TYPE
+};
+
+enum imgu_abi_raw_type {
+ IMGU_ABI_RAW_TYPE_BAYER,
+ IMGU_ABI_RAW_TYPE_IR_ON_GR,
+ IMGU_ABI_RAW_TYPE_IR_ON_GB
+};
+
+enum imgu_abi_memories {
+ IMGU_ABI_MEM_ISP_PMEM0 = 0,
+ IMGU_ABI_MEM_ISP_DMEM0,
+ IMGU_ABI_MEM_ISP_VMEM0,
+ IMGU_ABI_MEM_ISP_VAMEM0,
+ IMGU_ABI_MEM_ISP_VAMEM1,
+ IMGU_ABI_MEM_ISP_VAMEM2,
+ IMGU_ABI_MEM_ISP_HMEM0,
+ IMGU_ABI_MEM_SP0_DMEM0,
+ IMGU_ABI_MEM_SP1_DMEM0,
+ IMGU_ABI_MEM_DDR,
+ IMGU_ABI_NUM_MEMORIES
+};
+
+enum imgu_abi_param_class {
+ IMGU_ABI_PARAM_CLASS_PARAM, /* Late binding parameters, like 3A */
+ IMGU_ABI_PARAM_CLASS_CONFIG, /* Pipe config time parameters */
+ IMGU_ABI_PARAM_CLASS_STATE, /* State parameters, eg. buffer index */
+ IMGU_ABI_PARAM_CLASS_NUM
+};
+
+enum imgu_abi_bin_input_src {
+ IMGU_ABI_BINARY_INPUT_SOURCE_SENSOR,
+ IMGU_ABI_BINARY_INPUT_SOURCE_MEMORY,
+ IMGU_ABI_BINARY_INPUT_SOURCE_VARIABLE,
+};
+
+enum imgu_abi_sp_swstate {
+ IMGU_ABI_SP_SWSTATE_TERMINATED,
+ IMGU_ABI_SP_SWSTATE_INITIALIZED,
+ IMGU_ABI_SP_SWSTATE_CONNECTED,
+ IMGU_ABI_SP_SWSTATE_RUNNING,
+};
+
+enum imgu_abi_bl_swstate {
+ IMGU_ABI_BL_SWSTATE_OK = 0x100,
+ IMGU_ABI_BL_SWSTATE_BUSY,
+ IMGU_ABI_BL_SWSTATE_ERR,
+};
+
+/* The type of pipe stage */
+enum imgu_abi_stage_type {
+ IMGU_ABI_STAGE_TYPE_SP,
+ IMGU_ABI_STAGE_TYPE_ISP,
+};
+
+struct imgu_abi_acc_operation {
+ /*
+ * zero means on init,
+ * others mean upon receiving an ack signal from the BC acc.
+ */
+ u8 op_indicator;
+ u8 op_type;
+} __packed;
+
+struct imgu_abi_acc_process_lines_cmd_data {
+ u16 lines;
+ u8 cfg_set;
+ u8 reserved; /* Align to 4 bytes */
+} __packed;
+
+/* Bayer shading definitions */
+
+struct imgu_abi_shd_transfer_luts_set_data {
+ u8 set_number;
+ u8 padding[3];
+ imgu_addr_t rg_lut_ddr_addr;
+ imgu_addr_t bg_lut_ddr_addr;
+ u32 align_dummy;
+} __packed;
+
+struct imgu_abi_shd_grid_config {
+ /* reg 0 */
+ u32 grid_width:8;
+ u32 grid_height:8;
+ u32 block_width:3;
+ u32 reserved0:1;
+ u32 block_height:3;
+ u32 reserved1:1;
+ u32 grid_height_per_slice:8;
+ /* reg 1 */
+ s32 x_start:13;
+ s32 reserved2:3;
+ s32 y_start:13;
+ s32 reserved3:3;
+} __packed;
+
+struct imgu_abi_shd_general_config {
+ u32 init_set_vrt_offst_ul:8;
+ u32 shd_enable:1;
+ /* aka 'gf' */
+ u32 gain_factor:2;
+ u32 reserved:21;
+} __packed;
+
+struct imgu_abi_shd_black_level_config {
+ /* reg 0 */
+ s32 bl_r:12;
+ s32 reserved0:4;
+ s32 bl_gr:12;
+ u32 reserved1:1;
+ /* aka 'nf' */
+ u32 normalization_shift:3;
+ /* reg 1 */
+ s32 bl_gb:12;
+ s32 reserved2:4;
+ s32 bl_b:12;
+ s32 reserved3:4;
+} __packed;
+
+struct imgu_abi_shd_intra_frame_operations_data {
+ struct imgu_abi_acc_operation
+ operation_list[IMGU_ABI_SHD_MAX_OPERATIONS] __aligned(32);
+ struct imgu_abi_acc_process_lines_cmd_data
+ process_lines_data[IMGU_ABI_SHD_MAX_PROCESS_LINES] __aligned(32);
+ struct imgu_abi_shd_transfer_luts_set_data
+ transfer_data[IMGU_ABI_SHD_MAX_TRANSFERS] __aligned(32);
+} __packed;
+
+struct imgu_abi_shd_config {
+ struct ipu3_uapi_shd_config_static shd __aligned(32);
+ struct imgu_abi_shd_intra_frame_operations_data shd_ops __aligned(32);
+ struct ipu3_uapi_shd_lut shd_lut __aligned(32);
+} __packed;
+
+struct imgu_abi_stripe_input_frame_resolution {
+ u16 width;
+ u16 height;
+ u32 bayer_order; /* enum ipu3_uapi_bayer_order */
+ u32 raw_bit_depth;
+} __packed;
+
+/* Stripe-based processing */
+
+struct imgu_abi_stripes {
+ /* offset from start of frame - measured in pixels */
+ u16 offset;
+ /* stripe width - measured in pixels */
+ u16 width;
+ /* stripe width - measured in pixels */
+ u16 height;
+} __packed;
+
+struct imgu_abi_stripe_data {
+ /*
+ * number of stripes for current processing source
+ * - VLIW binary parameter we currently support 1 or 2 stripes
+ */
+ u16 num_of_stripes;
+
+ u8 padding[2];
+
+ /*
+ * the following data is derived from resolution-related
+ * pipe config and from num_of_stripes
+ */
+
+ /*
+ *'input-stripes' - before input cropping
+ * used by input feeder
+ */
+ struct imgu_abi_stripe_input_frame_resolution input_frame;
+
+ /*'effective-stripes' - after input cropping used dpc, bds */
+ struct imgu_abi_stripes effective_stripes[IPU3_UAPI_MAX_STRIPES];
+
+ /* 'down-scaled-stripes' - after down-scaling ONLY. used by BDS */
+ struct imgu_abi_stripes down_scaled_stripes[IPU3_UAPI_MAX_STRIPES];
+
+ /*
+ *'bds-out-stripes' - after bayer down-scaling and padding.
+ * used by all algos starting with norm up to the ref-frame for GDC
+ * (currently up to the output kernel)
+ */
+ struct imgu_abi_stripes bds_out_stripes[IPU3_UAPI_MAX_STRIPES];
+
+ /* 'bds-out-stripes (no overlap)' - used for ref kernel */
+ struct imgu_abi_stripes
+ bds_out_stripes_no_overlap[IPU3_UAPI_MAX_STRIPES];
+
+ /*
+ * input resolution for output system (equal to bds_out - envelope)
+ * output-system input frame width as configured by user
+ */
+ u16 output_system_in_frame_width;
+ /* output-system input frame height as configured by user */
+ u16 output_system_in_frame_height;
+
+ /*
+ * 'output-stripes' - accounts for stiching on the output (no overlap)
+ * used by the output kernel
+ */
+ struct imgu_abi_stripes output_stripes[IPU3_UAPI_MAX_STRIPES];
+
+ /*
+ * 'block-stripes' - accounts for stiching by the output system
+ * (1 or more blocks overlap)
+ * used by DVS, TNR and the output system kernel
+ */
+ struct imgu_abi_stripes block_stripes[IPU3_UAPI_MAX_STRIPES];
+
+ u16 effective_frame_width; /* Needed for vertical cropping */
+ u16 bds_frame_width;
+ u16 out_frame_width; /* Output frame width as configured by user */
+ u16 out_frame_height; /* Output frame height as configured by user */
+
+ /* GDC in buffer (A.K.A delay frame,ref buffer) info */
+ u16 gdc_in_buffer_width; /* GDC in buffer width */
+ u16 gdc_in_buffer_height; /* GDC in buffer height */
+ /* GDC in buffer first valid pixel x offset */
+ u16 gdc_in_buffer_offset_x;
+ /* GDC in buffer first valid pixel y offset */
+ u16 gdc_in_buffer_offset_y;
+
+ /* Display frame width as configured by user */
+ u16 display_frame_width;
+ /* Display frame height as configured by user */
+ u16 display_frame_height;
+ u16 bds_aligned_frame_width;
+ /* Number of vectors to left-crop when writing stripes (not stripe 0) */
+ u16 half_overlap_vectors;
+ /* Decimate ISP and fixed func resolutions after BDS (ir_extraction) */
+ u16 ir_ext_decimation;
+ u8 padding1[2];
+} __packed;
+
+/* Input feeder related structs */
+
+struct imgu_abi_input_feeder_data {
+ u32 row_stride; /* row stride */
+ u32 start_row_address; /* start row address */
+ u32 start_pixel; /* start pixel */
+} __packed;
+
+struct imgu_abi_input_feeder_data_aligned {
+ struct imgu_abi_input_feeder_data data __aligned(32);
+} __packed;
+
+struct imgu_abi_input_feeder_data_per_stripe {
+ struct imgu_abi_input_feeder_data_aligned
+ input_feeder_data[IPU3_UAPI_MAX_STRIPES];
+} __packed;
+
+struct imgu_abi_input_feeder_config {
+ struct imgu_abi_input_feeder_data data;
+ struct imgu_abi_input_feeder_data_per_stripe data_per_stripe
+ __aligned(32);
+} __packed;
+
+/* DVS related definitions */
+
+struct imgu_abi_dvs_stat_grd_config {
+ u8 grid_width;
+ u8 grid_height;
+ u8 block_width;
+ u8 block_height;
+ u16 x_start;
+ u16 y_start;
+ u16 enable;
+ u16 x_end;
+ u16 y_end;
+} __packed;
+
+struct imgu_abi_dvs_stat_cfg {
+ u8 reserved0[4];
+ struct imgu_abi_dvs_stat_grd_config
+ grd_config[IMGU_ABI_DVS_STAT_LEVELS];
+ u8 reserved1[18];
+} __packed;
+
+struct imgu_abi_dvs_stat_transfer_op_data {
+ u8 set_number;
+} __packed;
+
+struct imgu_abi_dvs_stat_intra_frame_operations_data {
+ struct imgu_abi_acc_operation
+ ops[IMGU_ABI_DVS_STAT_MAX_OPERATIONS] __aligned(32);
+ struct imgu_abi_acc_process_lines_cmd_data
+ process_lines_data[IMGU_ABI_DVS_STAT_MAX_PROCESS_LINES]
+ __aligned(32);
+ struct imgu_abi_dvs_stat_transfer_op_data
+ transfer_data[IMGU_ABI_DVS_STAT_MAX_TRANSFERS] __aligned(32);
+} __packed;
+
+struct imgu_abi_dvs_stat_config {
+ struct imgu_abi_dvs_stat_cfg cfg __aligned(32);
+ u8 reserved0[128];
+ struct imgu_abi_dvs_stat_intra_frame_operations_data operations_data;
+ u8 reserved1[64];
+} __packed;
+
+/* Y-tone Mapping */
+
+struct imgu_abi_yuvp2_y_tm_lut_static_config {
+ u16 entries[IMGU_ABI_YUVP2_YTM_LUT_ENTRIES];
+ u32 enable;
+} __packed;
+
+/* Output formatter related structs */
+
+struct imgu_abi_osys_formatter_params {
+ u32 format;
+ u32 flip;
+ u32 mirror;
+ u32 tiling;
+ u32 reduce_range;
+ u32 alpha_blending;
+ u32 release_inp_addr;
+ u32 release_inp_en;
+ u32 process_out_buf_addr;
+ u32 image_width_vecs;
+ u32 image_height_lines;
+ u32 inp_buff_y_st_addr;
+ u32 inp_buff_y_line_stride;
+ u32 inp_buff_y_buffer_stride;
+ u32 int_buff_u_st_addr;
+ u32 int_buff_v_st_addr;
+ u32 inp_buff_uv_line_stride;
+ u32 inp_buff_uv_buffer_stride;
+ u32 out_buff_level;
+ u32 out_buff_nr_y_lines;
+ u32 out_buff_u_st_offset;
+ u32 out_buff_v_st_offset;
+ u32 out_buff_y_line_stride;
+ u32 out_buff_uv_line_stride;
+ u32 hist_buff_st_addr;
+ u32 hist_buff_line_stride;
+ u32 hist_buff_nr_lines;
+} __packed;
+
+struct imgu_abi_osys_formatter {
+ struct imgu_abi_osys_formatter_params param __aligned(32);
+} __packed;
+
+struct imgu_abi_osys_scaler_params {
+ u32 inp_buf_y_st_addr;
+ u32 inp_buf_y_line_stride;
+ u32 inp_buf_y_buffer_stride;
+ u32 inp_buf_u_st_addr;
+ u32 inp_buf_v_st_addr;
+ u32 inp_buf_uv_line_stride;
+ u32 inp_buf_uv_buffer_stride;
+ u32 inp_buf_chunk_width;
+ u32 inp_buf_nr_buffers;
+ /* Output buffers */
+ u32 out_buf_y_st_addr;
+ u32 out_buf_y_line_stride;
+ u32 out_buf_y_buffer_stride;
+ u32 out_buf_u_st_addr;
+ u32 out_buf_v_st_addr;
+ u32 out_buf_uv_line_stride;
+ u32 out_buf_uv_buffer_stride;
+ u32 out_buf_nr_buffers;
+ /* Intermediate buffers */
+ u32 int_buf_y_st_addr;
+ u32 int_buf_y_line_stride;
+ u32 int_buf_u_st_addr;
+ u32 int_buf_v_st_addr;
+ u32 int_buf_uv_line_stride;
+ u32 int_buf_height;
+ u32 int_buf_chunk_width;
+ u32 int_buf_chunk_height;
+ /* Context buffers */
+ u32 ctx_buf_hor_y_st_addr;
+ u32 ctx_buf_hor_u_st_addr;
+ u32 ctx_buf_hor_v_st_addr;
+ u32 ctx_buf_ver_y_st_addr;
+ u32 ctx_buf_ver_u_st_addr;
+ u32 ctx_buf_ver_v_st_addr;
+ /* Addresses for release-input and process-output tokens */
+ u32 release_inp_buf_addr;
+ u32 release_inp_buf_en;
+ u32 release_out_buf_en;
+ u32 process_out_buf_addr;
+ /* Settings dimensions, padding, cropping */
+ u32 input_image_y_width;
+ u32 input_image_y_height;
+ u32 input_image_y_start_column;
+ u32 input_image_uv_start_column;
+ u32 input_image_y_left_pad;
+ u32 input_image_uv_left_pad;
+ u32 input_image_y_right_pad;
+ u32 input_image_uv_right_pad;
+ u32 input_image_y_top_pad;
+ u32 input_image_uv_top_pad;
+ u32 input_image_y_bottom_pad;
+ u32 input_image_uv_bottom_pad;
+ u32 processing_mode; /* enum imgu_abi_osys_procmode */
+ u32 scaling_ratio;
+ u32 y_left_phase_init;
+ u32 uv_left_phase_init;
+ u32 y_top_phase_init;
+ u32 uv_top_phase_init;
+ u32 coeffs_exp_shift;
+ u32 out_y_left_crop;
+ u32 out_uv_left_crop;
+ u32 out_y_top_crop;
+ u32 out_uv_top_crop;
+} __packed;
+
+struct imgu_abi_osys_scaler {
+ struct imgu_abi_osys_scaler_params param __aligned(32);
+} __packed;
+
+struct imgu_abi_osys_frame_params {
+ /* Output pins */
+ u32 enable;
+ u32 format; /* enum imgu_abi_osys_format */
+ u32 flip;
+ u32 mirror;
+ u32 tiling; /* enum imgu_abi_osys_tiling */
+ u32 width;
+ u32 height;
+ u32 stride;
+ u32 scaled;
+} __packed;
+
+struct imgu_abi_osys_frame {
+ struct imgu_abi_osys_frame_params param __aligned(32);
+} __packed;
+
+struct imgu_abi_osys_stripe {
+ /* Input resolution */
+ u32 input_width;
+ u32 input_height;
+ /* Output Stripe */
+ u32 output_width[IMGU_ABI_OSYS_PINS];
+ u32 output_height[IMGU_ABI_OSYS_PINS];
+ u32 output_offset[IMGU_ABI_OSYS_PINS];
+ u32 buf_stride[IMGU_ABI_OSYS_PINS];
+ /* Scaler params */
+ u32 block_width;
+ u32 block_height;
+ /* Output Crop factor */
+ u32 crop_top[IMGU_ABI_OSYS_PINS];
+ u32 crop_left[IMGU_ABI_OSYS_PINS];
+} __packed;
+
+struct imgu_abi_osys_config {
+ struct imgu_abi_osys_formatter
+ formatter[IPU3_UAPI_MAX_STRIPES][IMGU_ABI_OSYS_PINS];
+ struct imgu_abi_osys_scaler scaler[IPU3_UAPI_MAX_STRIPES];
+ struct imgu_abi_osys_frame frame[IMGU_ABI_OSYS_PINS];
+ struct imgu_abi_osys_stripe stripe[IPU3_UAPI_MAX_STRIPES];
+ /* 32 packed coefficients for luma and chroma */
+ s8 scaler_coeffs_chroma[128];
+ s8 scaler_coeffs_luma[128];
+} __packed;
+
+/* BDS */
+
+struct imgu_abi_bds_hor_ctrl0 {
+ u32 sample_patrn_length:9;
+ u32 reserved0:3;
+ u32 hor_ds_en:1;
+ u32 min_clip_val:1;
+ u32 max_clip_val:2;
+ u32 out_frame_width:13;
+ u32 reserved1:3;
+} __packed;
+
+struct imgu_abi_bds_ptrn_arr {
+ u32 elems[IMGU_ABI_BDS_SAMPLE_PATTERN_ARRAY_SIZE];
+} __packed;
+
+struct imgu_abi_bds_phase_entry {
+ s8 coeff_min2;
+ s8 coeff_min1;
+ s8 coeff_0;
+ s8 nf;
+ s8 coeff_pls1;
+ s8 coeff_pls2;
+ s8 coeff_pls3;
+ u8 reserved;
+} __packed;
+
+struct imgu_abi_bds_phase_arr {
+ struct imgu_abi_bds_phase_entry
+ even[IMGU_ABI_BDS_PHASE_COEFFS_ARRAY_SIZE];
+ struct imgu_abi_bds_phase_entry
+ odd[IMGU_ABI_BDS_PHASE_COEFFS_ARRAY_SIZE];
+} __packed;
+
+struct imgu_abi_bds_hor_ctrl1 {
+ u32 hor_crop_start:13;
+ u32 reserved0:3;
+ u32 hor_crop_end:13;
+ u32 reserved1:1;
+ u32 hor_crop_en:1;
+ u32 reserved2:1;
+} __packed;
+
+struct imgu_abi_bds_hor_ctrl2 {
+ u32 input_frame_height:13;
+ u32 reserved0:19;
+} __packed;
+
+struct imgu_abi_bds_hor {
+ struct imgu_abi_bds_hor_ctrl0 hor_ctrl0;
+ struct imgu_abi_bds_ptrn_arr hor_ptrn_arr;
+ struct imgu_abi_bds_phase_arr hor_phase_arr;
+ struct imgu_abi_bds_hor_ctrl1 hor_ctrl1;
+ struct imgu_abi_bds_hor_ctrl2 hor_ctrl2;
+} __packed;
+
+struct imgu_abi_bds_ver_ctrl0 {
+ u32 sample_patrn_length:9;
+ u32 reserved0:3;
+ u32 ver_ds_en:1;
+ u32 min_clip_val:1;
+ u32 max_clip_val:2;
+ u32 reserved1:16;
+} __packed;
+
+struct imgu_abi_bds_ver_ctrl1 {
+ u32 out_frame_width:13;
+ u32 reserved0:3;
+ u32 out_frame_height:13;
+ u32 reserved1:3;
+} __packed;
+
+struct imgu_abi_bds_ver {
+ struct imgu_abi_bds_ver_ctrl0 ver_ctrl0;
+ struct imgu_abi_bds_ptrn_arr ver_ptrn_arr;
+ struct imgu_abi_bds_phase_arr ver_phase_arr;
+ struct imgu_abi_bds_ver_ctrl1 ver_ctrl1;
+} __packed;
+
+struct imgu_abi_bds_per_stripe_data {
+ struct imgu_abi_bds_hor_ctrl0 hor_ctrl0;
+ struct imgu_abi_bds_ver_ctrl1 ver_ctrl1;
+ struct imgu_abi_bds_hor_ctrl1 crop;
+} __packed;
+
+struct imgu_abi_bds_per_stripe_data_aligned {
+ struct imgu_abi_bds_per_stripe_data data __aligned(32);
+} __packed;
+
+struct imgu_abi_bds_per_stripe {
+ struct imgu_abi_bds_per_stripe_data_aligned
+ aligned_data[IPU3_UAPI_MAX_STRIPES];
+} __packed;
+
+struct imgu_abi_bds_config {
+ struct imgu_abi_bds_hor hor __aligned(32);
+ struct imgu_abi_bds_ver ver __aligned(32);
+ struct imgu_abi_bds_per_stripe per_stripe __aligned(32);
+ u32 enabled;
+} __packed;
+
+/* ANR */
+
+struct imgu_abi_anr_search_config {
+ u32 enable;
+ u16 frame_width;
+ u16 frame_height;
+} __packed;
+
+struct imgu_abi_anr_stitch_config {
+ u32 anr_stitch_en;
+ u16 frame_width;
+ u16 frame_height;
+ u8 reserved[40];
+ struct ipu3_uapi_anr_stitch_pyramid pyramid[IPU3_UAPI_ANR_PYRAMID_SIZE];
+} __packed;
+
+struct imgu_abi_anr_tile2strm_config {
+ u32 enable;
+ u16 frame_width;
+ u16 frame_height;
+} __packed;
+
+struct imgu_abi_anr_config {
+ struct imgu_abi_anr_search_config search __aligned(32);
+ struct ipu3_uapi_anr_transform_config transform __aligned(32);
+ struct imgu_abi_anr_stitch_config stitch __aligned(32);
+ struct imgu_abi_anr_tile2strm_config tile2strm __aligned(32);
+} __packed;
+
+/* AF */
+
+struct imgu_abi_af_frame_size {
+ u16 width;
+ u16 height;
+} __packed;
+
+struct imgu_abi_af_config_s {
+ struct ipu3_uapi_af_filter_config filter_config __aligned(32);
+ struct imgu_abi_af_frame_size frame_size;
+ struct ipu3_uapi_grid_config grid_cfg __aligned(32);
+} __packed;
+
+struct imgu_abi_af_intra_frame_operations_data {
+ struct imgu_abi_acc_operation ops[IMGU_ABI_AF_MAX_OPERATIONS]
+ __aligned(32);
+ struct imgu_abi_acc_process_lines_cmd_data
+ process_lines_data[IMGU_ABI_AF_MAX_PROCESS_LINES] __aligned(32);
+} __packed;
+
+struct imgu_abi_af_stripe_config {
+ struct imgu_abi_af_frame_size frame_size __aligned(32);
+ struct ipu3_uapi_grid_config grid_cfg __aligned(32);
+} __packed;
+
+struct imgu_abi_af_config {
+ struct imgu_abi_af_config_s config;
+ struct imgu_abi_af_intra_frame_operations_data operations_data;
+ struct imgu_abi_af_stripe_config stripes[IPU3_UAPI_MAX_STRIPES];
+} __packed;
+
+/* AE */
+
+struct imgu_abi_ae_config {
+ struct ipu3_uapi_ae_grid_config grid_cfg __aligned(32);
+ struct ipu3_uapi_ae_weight_elem weights[IPU3_UAPI_AE_WEIGHTS]
+ __aligned(32);
+ struct ipu3_uapi_ae_ccm ae_ccm __aligned(32);
+ struct {
+ struct ipu3_uapi_ae_grid_config grid __aligned(32);
+ } stripes[IPU3_UAPI_MAX_STRIPES];
+} __packed;
+
+/* AWB_FR */
+
+struct imgu_abi_awb_fr_intra_frame_operations_data {
+ struct imgu_abi_acc_operation ops[IMGU_ABI_AWB_FR_MAX_OPERATIONS]
+ __aligned(32);
+ struct imgu_abi_acc_process_lines_cmd_data
+ process_lines_data[IMGU_ABI_AWB_FR_MAX_PROCESS_LINES] __aligned(32);
+} __packed;
+
+struct imgu_abi_awb_fr_config {
+ struct ipu3_uapi_awb_fr_config_s config;
+ struct imgu_abi_awb_fr_intra_frame_operations_data operations_data;
+ struct ipu3_uapi_awb_fr_config_s stripes[IPU3_UAPI_MAX_STRIPES];
+} __packed;
+
+struct imgu_abi_acc_transfer_op_data {
+ u8 set_number;
+} __packed;
+
+struct imgu_abi_awb_intra_frame_operations_data {
+ struct imgu_abi_acc_operation ops[IMGU_ABI_AWB_MAX_OPERATIONS]
+ __aligned(32);
+ struct imgu_abi_acc_process_lines_cmd_data
+ process_lines_data[IMGU_ABI_AWB_MAX_PROCESS_LINES] __aligned(32);
+ struct imgu_abi_acc_transfer_op_data
+ transfer_data[IMGU_ABI_AWB_MAX_TRANSFERS] __aligned(32);
+} __aligned(32) __packed;
+
+struct imgu_abi_awb_config {
+ struct ipu3_uapi_awb_config_s config __aligned(32);
+ struct imgu_abi_awb_intra_frame_operations_data operations_data;
+ struct ipu3_uapi_awb_config_s stripes[IPU3_UAPI_MAX_STRIPES];
+} __packed;
+
+struct imgu_abi_acc_param {
+ struct imgu_abi_stripe_data stripe;
+ u8 padding[8];
+ struct imgu_abi_input_feeder_config input_feeder;
+ struct ipu3_uapi_bnr_static_config bnr;
+ struct ipu3_uapi_bnr_static_config_green_disparity green_disparity
+ __aligned(32);
+ struct ipu3_uapi_dm_config dm __aligned(32);
+ struct ipu3_uapi_ccm_mat_config ccm __aligned(32);
+ struct ipu3_uapi_gamma_config gamma __aligned(32);
+ struct ipu3_uapi_csc_mat_config csc __aligned(32);
+ struct ipu3_uapi_cds_params cds __aligned(32);
+ struct imgu_abi_shd_config shd __aligned(32);
+ struct imgu_abi_dvs_stat_config dvs_stat;
+ u8 padding1[224]; /* reserved for lace_stat */
+ struct ipu3_uapi_yuvp1_iefd_config iefd __aligned(32);
+ struct ipu3_uapi_yuvp1_yds_config yds_c0 __aligned(32);
+ struct ipu3_uapi_yuvp1_chnr_config chnr_c0 __aligned(32);
+ struct ipu3_uapi_yuvp1_y_ee_nr_config y_ee_nr __aligned(32);
+ struct ipu3_uapi_yuvp1_yds_config yds __aligned(32);
+ struct ipu3_uapi_yuvp1_chnr_config chnr __aligned(32);
+ struct imgu_abi_yuvp2_y_tm_lut_static_config ytm __aligned(32);
+ struct ipu3_uapi_yuvp1_yds_config yds2 __aligned(32);
+ struct ipu3_uapi_yuvp2_tcc_static_config tcc __aligned(32);
+ /* reserved for defect pixel correction */
+ u8 dpc[240832] __aligned(32);
+ struct imgu_abi_bds_config bds;
+ struct imgu_abi_anr_config anr;
+ struct imgu_abi_awb_fr_config awb_fr;
+ struct imgu_abi_ae_config ae;
+ struct imgu_abi_af_config af;
+ struct imgu_abi_awb_config awb;
+ struct imgu_abi_osys_config osys;
+} __packed;
+
+/***** Morphing table entry *****/
+
+struct imgu_abi_gdc_warp_param {
+ u32 origin_x;
+ u32 origin_y;
+ u32 in_addr_offset;
+ u32 in_block_width;
+ u32 in_block_height;
+ u32 p0_x;
+ u32 p0_y;
+ u32 p1_x;
+ u32 p1_y;
+ u32 p2_x;
+ u32 p2_y;
+ u32 p3_x;
+ u32 p3_y;
+ u32 in_block_width_a;
+ u32 in_block_width_b;
+ u32 padding; /* struct size multiple of DDR word */
+} __packed;
+
+/******************* Firmware ABI definitions *******************/
+
+/***** struct imgu_abi_sp_stage *****/
+
+struct imgu_abi_crop_pos {
+ u16 x;
+ u16 y;
+} __packed;
+
+struct imgu_abi_sp_resolution {
+ u16 width; /* Width of valid data in pixels */
+ u16 height; /* Height of valid data in lines */
+} __packed;
+
+/*
+ * Frame info struct. This describes the contents of an image frame buffer.
+ */
+struct imgu_abi_frame_sp_info {
+ struct imgu_abi_sp_resolution res;
+ u16 padded_width; /* stride of line in memory
+ * (in pixels)
+ */
+ u8 format; /* format of the frame data */
+ u8 raw_bit_depth; /* number of valid bits per pixel,
+ * only valid for RAW bayer frames
+ */
+ u8 raw_bayer_order; /* bayer order, only valid
+ * for RAW bayer frames
+ */
+ u8 raw_type; /* To choose the proper raw frame type. for
+ * Legacy SKC pipes/Default is set to
+ * IMGU_ABI_RAW_TYPE_BAYER. For RGB IR sensor -
+ * driver should set it to:
+ * IronGr case - IMGU_ABI_RAW_TYPE_IR_ON_GR
+ * IronGb case - IMGU_ABI_RAW_TYPE_IR_ON_GB
+ */
+ u8 padding[2]; /* Extend to 32 bit multiple */
+} __packed;
+
+struct imgu_abi_buffer_sp {
+ union {
+ imgu_addr_t xmem_addr;
+ s32 queue_id; /* enum imgu_abi_queue_id */
+ } buf_src;
+ s32 buf_type; /* enum imgu_abi_buffer_type */
+} __packed;
+
+struct imgu_abi_frame_sp_plane {
+ u32 offset; /* offset in bytes to start of frame data */
+ /* offset is wrt data in imgu_abi_sp_sp_frame */
+} __packed;
+
+struct imgu_abi_frame_sp_rgb_planes {
+ struct imgu_abi_frame_sp_plane r;
+ struct imgu_abi_frame_sp_plane g;
+ struct imgu_abi_frame_sp_plane b;
+} __packed;
+
+struct imgu_abi_frame_sp_yuv_planes {
+ struct imgu_abi_frame_sp_plane y;
+ struct imgu_abi_frame_sp_plane u;
+ struct imgu_abi_frame_sp_plane v;
+} __packed;
+
+struct imgu_abi_frame_sp_nv_planes {
+ struct imgu_abi_frame_sp_plane y;
+ struct imgu_abi_frame_sp_plane uv;
+} __packed;
+
+struct imgu_abi_frame_sp_plane6 {
+ struct imgu_abi_frame_sp_plane r;
+ struct imgu_abi_frame_sp_plane r_at_b;
+ struct imgu_abi_frame_sp_plane gr;
+ struct imgu_abi_frame_sp_plane gb;
+ struct imgu_abi_frame_sp_plane b;
+ struct imgu_abi_frame_sp_plane b_at_r;
+} __packed;
+
+struct imgu_abi_frame_sp_binary_plane {
+ u32 size;
+ struct imgu_abi_frame_sp_plane data;
+} __packed;
+
+struct imgu_abi_frame_sp {
+ struct imgu_abi_frame_sp_info info;
+ struct imgu_abi_buffer_sp buf_attr;
+ union {
+ struct imgu_abi_frame_sp_plane raw;
+ struct imgu_abi_frame_sp_plane rgb;
+ struct imgu_abi_frame_sp_rgb_planes planar_rgb;
+ struct imgu_abi_frame_sp_plane yuyv;
+ struct imgu_abi_frame_sp_yuv_planes yuv;
+ struct imgu_abi_frame_sp_nv_planes nv;
+ struct imgu_abi_frame_sp_plane6 plane6;
+ struct imgu_abi_frame_sp_binary_plane binary;
+ } planes;
+} __packed;
+
+struct imgu_abi_resolution {
+ u32 width;
+ u32 height;
+} __packed;
+
+struct imgu_abi_frames_sp {
+ struct imgu_abi_frame_sp in;
+ struct imgu_abi_frame_sp out[IMGU_ABI_BINARY_MAX_OUTPUT_PORTS];
+ struct imgu_abi_resolution effective_in_res;
+ struct imgu_abi_frame_sp out_vf;
+ struct imgu_abi_frame_sp_info internal_frame_info;
+ struct imgu_abi_buffer_sp s3a_buf;
+ struct imgu_abi_buffer_sp dvs_buf;
+ struct imgu_abi_buffer_sp lace_buf;
+} __packed;
+
+struct imgu_abi_uds_info {
+ u16 curr_dx;
+ u16 curr_dy;
+ u16 xc;
+ u16 yc;
+} __packed;
+
+/* Information for a single pipeline stage */
+struct imgu_abi_sp_stage {
+ /* Multiple boolean flags can be stored in an integer */
+ u8 num; /* Stage number */
+ u8 isp_online;
+ u8 isp_copy_vf;
+ u8 isp_copy_output;
+ u8 sp_enable_xnr;
+ u8 isp_deci_log_factor;
+ u8 isp_vf_downscale_bits;
+ u8 deinterleaved;
+ /*
+ * NOTE: Programming the input circuit can only be done at the
+ * start of a session. It is illegal to program it during execution
+ * The input circuit defines the connectivity
+ */
+ u8 program_input_circuit;
+ u8 func;
+ u8 stage_type; /* enum imgu_abi_stage_type */
+ u8 num_stripes;
+ u8 isp_pipe_version;
+ struct {
+ u8 vf_output;
+ u8 s3a;
+ u8 sdis;
+ u8 dvs_stats;
+ u8 lace_stats;
+ } enable;
+
+ struct imgu_abi_crop_pos sp_out_crop_pos;
+ u8 padding[2];
+ struct imgu_abi_frames_sp frames;
+ struct imgu_abi_resolution dvs_envelope;
+ struct imgu_abi_uds_info uds;
+ imgu_addr_t isp_stage_addr;
+ imgu_addr_t xmem_bin_addr;
+ imgu_addr_t xmem_map_addr;
+
+ u16 top_cropping;
+ u16 row_stripes_height;
+ u16 row_stripes_overlap_lines;
+ u8 if_config_index; /* Which should be applied by this stage. */
+ u8 padding2;
+} __packed;
+
+/***** struct imgu_abi_isp_stage *****/
+
+struct imgu_abi_isp_param_memory_offsets {
+ u32 offsets[IMGU_ABI_PARAM_CLASS_NUM]; /* offset wrt hdr in bytes */
+} __packed;
+
+/*
+ * Blob descriptor.
+ * This structure describes an SP or ISP blob.
+ * It describes the test, data and bss sections as well as position in a
+ * firmware file.
+ * For convenience, it contains dynamic data after loading.
+ */
+struct imgu_abi_blob_info {
+ /* Static blob data */
+ u32 offset; /* Blob offset in fw file */
+ struct imgu_abi_isp_param_memory_offsets memory_offsets;
+ /* offset wrt hdr in bytes */
+ u32 prog_name_offset; /* offset wrt hdr in bytes */
+ u32 size; /* Size of blob */
+ u32 padding_size; /* total cumulative of bytes added
+ * due to section alignment
+ */
+ u32 icache_source; /* Position of icache in blob */
+ u32 icache_size; /* Size of icache section */
+ u32 icache_padding; /* added due to icache section alignment */
+ u32 text_source; /* Position of text in blob */
+ u32 text_size; /* Size of text section */
+ u32 text_padding; /* bytes added due to text section alignment */
+ u32 data_source; /* Position of data in blob */
+ u32 data_target; /* Start of data in SP dmem */
+ u32 data_size; /* Size of text section */
+ u32 data_padding; /* bytes added due to data section alignment */
+ u32 bss_target; /* Start position of bss in SP dmem */
+ u32 bss_size; /* Size of bss section
+ * Dynamic data filled by loader
+ */
+ u64 code __aligned(8); /* Code section absolute pointer */
+ /* within fw, code = icache + text */
+ u64 data __aligned(8); /* Data section absolute pointer */
+ /* within fw, data = data + bss */
+} __packed;
+
+struct imgu_abi_binary_pipeline_info {
+ u32 mode;
+ u32 isp_pipe_version;
+ u32 pipelining;
+ u32 c_subsampling;
+ u32 top_cropping;
+ u32 left_cropping;
+ u32 variable_resolution;
+} __packed;
+
+struct imgu_abi_binary_input_info {
+ u32 min_width;
+ u32 min_height;
+ u32 max_width;
+ u32 max_height;
+ u32 source; /* enum imgu_abi_bin_input_src */
+} __packed;
+
+struct imgu_abi_binary_output_info {
+ u32 min_width;
+ u32 min_height;
+ u32 max_width;
+ u32 max_height;
+ u32 num_chunks;
+ u32 variable_format;
+} __packed;
+
+struct imgu_abi_binary_internal_info {
+ u32 max_width;
+ u32 max_height;
+} __packed;
+
+struct imgu_abi_binary_bds_info {
+ u32 supported_bds_factors;
+} __packed;
+
+struct imgu_abi_binary_dvs_info {
+ u32 max_envelope_width;
+ u32 max_envelope_height;
+} __packed;
+
+struct imgu_abi_binary_vf_dec_info {
+ u32 is_variable;
+ u32 max_log_downscale;
+} __packed;
+
+struct imgu_abi_binary_s3a_info {
+ u32 s3atbl_use_dmem;
+ u32 fixed_s3a_deci_log;
+} __packed;
+
+struct imgu_abi_binary_dpc_info {
+ u32 bnr_lite; /* bnr lite enable flag */
+} __packed;
+
+struct imgu_abi_binary_iterator_info {
+ u32 num_stripes;
+ u32 row_stripes_height;
+ u32 row_stripes_overlap_lines;
+} __packed;
+
+struct imgu_abi_binary_address_info {
+ u32 isp_addresses; /* Address in ISP dmem */
+ u32 main_entry; /* Address of entry fct */
+ u32 in_frame; /* Address in ISP dmem */
+ u32 out_frame; /* Address in ISP dmem */
+ u32 in_data; /* Address in ISP dmem */
+ u32 out_data; /* Address in ISP dmem */
+ u32 sh_dma_cmd_ptr; /* In ISP dmem */
+} __packed;
+
+struct imgu_abi_binary_uds_info {
+ u16 bpp;
+ u16 use_bci;
+ u16 use_str;
+ u16 woix;
+ u16 woiy;
+ u16 extra_out_vecs;
+ u16 vectors_per_line_in;
+ u16 vectors_per_line_out;
+ u16 vectors_c_per_line_in;
+ u16 vectors_c_per_line_out;
+ u16 vmem_gdc_in_block_height_y;
+ u16 vmem_gdc_in_block_height_c;
+} __packed;
+
+struct imgu_abi_binary_block_info {
+ u32 block_width;
+ u32 block_height;
+ u32 output_block_height;
+} __packed;
+
+struct imgu_abi_isp_data {
+ imgu_addr_t address; /* ISP address */
+ u32 size; /* Disabled if 0 */
+} __packed;
+
+struct imgu_abi_isp_param_segments {
+ struct imgu_abi_isp_data
+ params[IMGU_ABI_PARAM_CLASS_NUM][IMGU_ABI_NUM_MEMORIES];
+} __packed;
+
+struct imgu_abi_binary_info {
+ u32 id __aligned(8); /* IMGU_ABI_BINARY_ID_* */
+ struct imgu_abi_binary_pipeline_info pipeline;
+ struct imgu_abi_binary_input_info input;
+ struct imgu_abi_binary_output_info output;
+ struct imgu_abi_binary_internal_info internal;
+ struct imgu_abi_binary_bds_info bds;
+ struct imgu_abi_binary_dvs_info dvs;
+ struct imgu_abi_binary_vf_dec_info vf_dec;
+ struct imgu_abi_binary_s3a_info s3a;
+ struct imgu_abi_binary_dpc_info dpc_bnr; /* DPC related binary info */
+ struct imgu_abi_binary_iterator_info iterator;
+ struct imgu_abi_binary_address_info addresses;
+ struct imgu_abi_binary_uds_info uds;
+ struct imgu_abi_binary_block_info block;
+ struct imgu_abi_isp_param_segments mem_initializers;
+ struct {
+ u8 input_feeder;
+ u8 output_system;
+ u8 obgrid;
+ u8 lin;
+ u8 dpc_acc;
+ u8 bds_acc;
+ u8 shd_acc;
+ u8 shd_ff;
+ u8 stats_3a_raw_buffer;
+ u8 acc_bayer_denoise;
+ u8 bnr_ff;
+ u8 awb_acc;
+ u8 awb_fr_acc;
+ u8 anr_acc;
+ u8 rgbpp_acc;
+ u8 rgbpp_ff;
+ u8 demosaic_acc;
+ u8 demosaic_ff;
+ u8 dvs_stats;
+ u8 lace_stats;
+ u8 yuvp1_b0_acc;
+ u8 yuvp1_c0_acc;
+ u8 yuvp2_acc;
+ u8 ae;
+ u8 af;
+ u8 dergb;
+ u8 rgb2yuv;
+ u8 high_quality;
+ u8 kerneltest;
+ u8 routing_shd_to_bnr; /* connect SHD with BNR ACCs */
+ u8 routing_bnr_to_anr; /* connect BNR with ANR ACCs */
+ u8 routing_anr_to_de; /* connect ANR with DE ACCs */
+ u8 routing_rgb_to_yuvp1; /* connect RGB with YUVP1 */
+ u8 routing_yuvp1_to_yuvp2; /* connect YUVP1 with YUVP2 */
+ u8 luma_only;
+ u8 input_yuv;
+ u8 input_raw;
+ u8 reduced_pipe;
+ u8 vf_veceven;
+ u8 dis;
+ u8 dvs_envelope;
+ u8 uds;
+ u8 dvs_6axis;
+ u8 block_output;
+ u8 streaming_dma;
+ u8 ds;
+ u8 bayer_fir_6db;
+ u8 raw_binning;
+ u8 continuous;
+ u8 s3a;
+ u8 fpnr;
+ u8 sc;
+ u8 macc;
+ u8 output;
+ u8 ref_frame;
+ u8 tnr;
+ u8 xnr;
+ u8 params;
+ u8 ca_gdc;
+ u8 isp_addresses;
+ u8 in_frame;
+ u8 out_frame;
+ u8 high_speed;
+ u8 dpc;
+ u8 padding[2];
+ u8 rgbir;
+ } enable;
+ struct {
+ u8 ref_y_channel;
+ u8 ref_c_channel;
+ u8 tnr_channel;
+ u8 tnr_out_channel;
+ u8 dvs_coords_channel;
+ u8 output_channel;
+ u8 c_channel;
+ u8 vfout_channel;
+ u8 vfout_c_channel;
+ u8 vfdec_bits_per_pixel;
+ u8 claimed_by_isp;
+ u8 padding[2];
+ } dma;
+} __packed;
+
+struct imgu_abi_isp_stage {
+ struct imgu_abi_blob_info blob_info;
+ struct imgu_abi_binary_info binary_info;
+ char binary_name[IMGU_ABI_MAX_BINARY_NAME];
+ struct imgu_abi_isp_param_segments mem_initializers;
+} __packed;
+
+/***** struct imgu_abi_ddr_address_map and parameter set *****/
+
+/* xmem address map allocation */
+struct imgu_abi_ddr_address_map {
+ imgu_addr_t isp_mem_param[IMGU_ABI_MAX_STAGES][IMGU_ABI_NUM_MEMORIES];
+ imgu_addr_t obgrid_tbl[IPU3_UAPI_MAX_STRIPES];
+ imgu_addr_t acc_cluster_params_for_sp;
+ imgu_addr_t dvs_6axis_params_y;
+} __packed;
+
+struct imgu_abi_parameter_set_info {
+ /* Pointers to Parameters in ISP format IMPT */
+ struct imgu_abi_ddr_address_map mem_map;
+ /* Unique ID to track per-frame configurations */
+ u32 isp_parameters_id;
+ /* Output frame to which this config has to be applied (optional) */
+ imgu_addr_t output_frame_ptr;
+} __packed;
+
+/***** struct imgu_abi_sp_group *****/
+
+/* SP configuration information */
+struct imgu_abi_sp_config {
+ u8 no_isp_sync; /* Signal host immediately after start */
+ u8 enable_raw_pool_locking; /* Enable Raw Buffer Locking for HALv3 */
+ u8 lock_all;
+ u8 disable_cont_vf;
+ u8 disable_preview_on_capture;
+ u8 padding[3];
+} __packed;
+
+/* Information for a pipeline */
+struct imgu_abi_sp_pipeline {
+ u32 pipe_id; /* the pipe ID */
+ u32 pipe_num; /* the dynamic pipe number */
+ u32 thread_id; /* the sp thread ID */
+ u32 pipe_config; /* the pipe config */
+ u32 pipe_qos_config; /* Bitmap of multiple QOS extension fw
+ * state, 0xffffffff indicates non
+ * QOS pipe.
+ */
+ u32 inout_port_config;
+ u32 required_bds_factor;
+ u32 dvs_frame_delay;
+ u32 num_stages; /* the pipe config */
+ u32 running; /* needed for pipe termination */
+ imgu_addr_t sp_stage_addr[IMGU_ABI_MAX_STAGES];
+ imgu_addr_t scaler_pp_lut; /* Early bound LUT */
+ u32 stage; /* stage ptr is only used on sp */
+ s32 num_execs; /* number of times to run if this is
+ * an acceleration pipe.
+ */
+ union {
+ struct {
+ u32 bytes_available;
+ } bin;
+ struct {
+ u32 height;
+ u32 width;
+ u32 padded_width;
+ u32 max_input_width;
+ u32 raw_bit_depth;
+ } raw;
+ } copy;
+
+ /* Parameters passed to Shading Correction kernel. */
+ struct {
+ /* Origin X (bqs) of internal frame on shading table */
+ u32 internal_frame_origin_x_bqs_on_sctbl;
+ /* Origin Y (bqs) of internal frame on shading table */
+ u32 internal_frame_origin_y_bqs_on_sctbl;
+ } shading;
+} __packed;
+
+struct imgu_abi_sp_debug_command {
+ /*
+ * The DMA software-mask,
+ * Bit 31...24: unused.
+ * Bit 23...16: unused.
+ * Bit 15...08: reading-request enabling bits for DMA channel 7..0
+ * Bit 07...00: writing-request enabling bits for DMA channel 7..0
+ *
+ * For example, "0...0 0...0 11111011 11111101" indicates that the
+ * writing request through DMA Channel 1 and the reading request
+ * through DMA channel 2 are both disabled. The others are enabled.
+ */
+ u32 dma_sw_reg;
+} __packed;
+
+/*
+ * Group all host initialized SP variables into this struct.
+ * This is initialized every stage through dma.
+ * The stage part itself is transferred through imgu_abi_sp_stage.
+ */
+struct imgu_abi_sp_group {
+ struct imgu_abi_sp_config config;
+ struct imgu_abi_sp_pipeline pipe[IMGU_ABI_MAX_SP_THREADS];
+ struct imgu_abi_sp_debug_command debug;
+} __packed;
+
+/***** parameter and state class binary configurations *****/
+
+struct imgu_abi_isp_iterator_config {
+ struct imgu_abi_frame_sp_info input_info;
+ struct imgu_abi_frame_sp_info internal_info;
+ struct imgu_abi_frame_sp_info output_info;
+ struct imgu_abi_frame_sp_info vf_info;
+ struct imgu_abi_sp_resolution dvs_envelope;
+} __packed;
+
+struct imgu_abi_dma_port_config {
+ u8 crop, elems;
+ u16 width;
+ u32 stride;
+} __packed;
+
+struct imgu_abi_isp_ref_config {
+ u32 width_a_over_b;
+ struct imgu_abi_dma_port_config port_b;
+ u32 ref_frame_addr_y[IMGU_ABI_FRAMES_REF];
+ u32 ref_frame_addr_c[IMGU_ABI_FRAMES_REF];
+ u32 dvs_frame_delay;
+} __packed;
+
+struct imgu_abi_isp_ref_dmem_state {
+ u32 ref_in_buf_idx;
+ u32 ref_out_buf_idx;
+} __packed;
+
+struct imgu_abi_isp_dvs_config {
+ u32 num_horizontal_blocks;
+ u32 num_vertical_blocks;
+} __packed;
+
+struct imgu_abi_isp_tnr3_config {
+ u32 width_a_over_b;
+ u32 frame_height;
+ struct imgu_abi_dma_port_config port_b;
+ u32 delay_frame;
+ u32 frame_addr[IMGU_ABI_FRAMES_TNR];
+} __packed;
+
+struct imgu_abi_isp_tnr3_dmem_state {
+ u32 in_bufidx;
+ u32 out_bufidx;
+ u32 total_frame_counter;
+ u32 buffer_frame_counter[IMGU_ABI_BUF_SETS_TNR];
+ u32 bypass_filter;
+} __packed;
+
+/***** Queues *****/
+
+struct imgu_abi_queue_info {
+ u8 size; /* the maximum number of elements*/
+ u8 step; /* number of bytes per element */
+ u8 start; /* index of the oldest element */
+ u8 end; /* index at which to write the new element */
+} __packed;
+
+struct imgu_abi_queues {
+ /*
+ * Queues for the dynamic frame information,
+ * i.e. the "in_frame" buffer, the "out_frame"
+ * buffer and the "vf_out_frame" buffer.
+ */
+ struct imgu_abi_queue_info host2sp_bufq_info
+ [IMGU_ABI_MAX_SP_THREADS][IMGU_ABI_QUEUE_NUM];
+ u32 host2sp_bufq[IMGU_ABI_MAX_SP_THREADS][IMGU_ABI_QUEUE_NUM]
+ [IMGU_ABI_HOST2SP_BUFQ_SIZE];
+ struct imgu_abi_queue_info sp2host_bufq_info[IMGU_ABI_QUEUE_NUM];
+ u32 sp2host_bufq[IMGU_ABI_QUEUE_NUM][IMGU_ABI_SP2HOST_BUFQ_SIZE];
+
+ /*
+ * The queues for the events.
+ */
+ struct imgu_abi_queue_info host2sp_evtq_info;
+ u32 host2sp_evtq[IMGU_ABI_HOST2SP_EVTQ_SIZE];
+ struct imgu_abi_queue_info sp2host_evtq_info;
+ u32 sp2host_evtq[IMGU_ABI_SP2HOST_EVTQ_SIZE];
+} __packed;
+
+/***** Buffer descriptor *****/
+
+struct imgu_abi_metadata_info {
+ struct imgu_abi_resolution resolution; /* Resolution */
+ u32 stride; /* Stride in bytes */
+ u32 size; /* Total size in bytes */
+} __packed;
+
+struct imgu_abi_isp_3a_statistics {
+ union {
+ struct {
+ imgu_addr_t s3a_tbl;
+ } dmem;
+ struct {
+ imgu_addr_t s3a_tbl_hi;
+ imgu_addr_t s3a_tbl_lo;
+ } vmem;
+ } data;
+ struct {
+ imgu_addr_t rgby_tbl;
+ } data_hmem;
+ u32 exp_id; /* exposure id, to match statistics to a frame, */
+ u32 isp_config_id; /* Tracks per-frame configs */
+ imgu_addr_t data_ptr; /* pointer to base of all data */
+ u32 size; /* total size of all data */
+ u32 dmem_size;
+ u32 vmem_size; /* both lo and hi have this size */
+ u32 hmem_size;
+} __packed;
+
+struct imgu_abi_metadata {
+ struct imgu_abi_metadata_info info; /* Layout info */
+ imgu_addr_t address; /* CSS virtual address */
+ u32 exp_id; /* Exposure ID */
+} __packed;
+
+struct imgu_abi_time_meas {
+ u32 start_timer_value; /* measured time in ticks */
+ u32 end_timer_value; /* measured time in ticks */
+} __packed;
+
+struct imgu_abi_buffer {
+ union {
+ struct imgu_abi_isp_3a_statistics s3a;
+ u8 reserved[28];
+ imgu_addr_t skc_dvs_statistics;
+ imgu_addr_t lace_stat;
+ struct imgu_abi_metadata metadata;
+ struct {
+ imgu_addr_t frame_data;
+ u32 flashed;
+ u32 exp_id;
+ u32 isp_parameters_id; /* Tracks per-frame configs */
+ u32 padded_width;
+ } frame;
+ imgu_addr_t ddr_ptrs;
+ } payload;
+ /*
+ * kernel_ptr is present for host administration purposes only.
+ * type is uint64_t in order to be 64-bit host compatible.
+ * uint64_t does not exist on SP/ISP.
+ * Size of the struct is checked by sp.hive.c.
+ */
+ u64 cookie_ptr __aligned(8);
+ u64 kernel_ptr;
+ struct imgu_abi_time_meas timing_data;
+ u32 isys_eof_clock_tick;
+} __packed;
+
+struct imgu_abi_bl_dma_cmd_entry {
+ u32 src_addr; /* virtual DDR address */
+ u32 size; /* number of bytes to transferred */
+ u32 dst_type;
+ u32 dst_addr; /* hmm address of xMEM or MMIO */
+} __packed;
+
+struct imgu_abi_sp_init_dmem_cfg {
+ u32 ddr_data_addr; /* data segment address in ddr */
+ u32 dmem_data_addr; /* data segment address in dmem */
+ u32 dmem_bss_addr; /* bss segment address in dmem */
+ u32 data_size; /* data segment size */
+ u32 bss_size; /* bss segment size */
+ u32 sp_id; /* sp id */
+} __packed;
+
+#endif
diff --git a/drivers/staging/media/ipu3/ipu3-css-fw.c b/drivers/staging/media/ipu3/ipu3-css-fw.c
new file mode 100644
index 000000000000..37482b626c3c
--- /dev/null
+++ b/drivers/staging/media/ipu3/ipu3-css-fw.c
@@ -0,0 +1,266 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2018 Intel Corporation
+
+#include <linux/device.h>
+#include <linux/firmware.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+
+#include "ipu3-css.h"
+#include "ipu3-css-fw.h"
+#include "ipu3-dmamap.h"
+
+static void imgu_css_fw_show_binary(struct device *dev, struct imgu_fw_info *bi,
+ const char *name)
+{
+ unsigned int i;
+
+ dev_dbg(dev, "found firmware binary type %i size %i name %s\n",
+ bi->type, bi->blob.size, name);
+ if (bi->type != IMGU_FW_ISP_FIRMWARE)
+ return;
+
+ dev_dbg(dev, " id %i mode %i bds 0x%x veceven %i/%i out_pins %i\n",
+ bi->info.isp.sp.id, bi->info.isp.sp.pipeline.mode,
+ bi->info.isp.sp.bds.supported_bds_factors,
+ bi->info.isp.sp.enable.vf_veceven,
+ bi->info.isp.sp.vf_dec.is_variable,
+ bi->info.isp.num_output_pins);
+
+ dev_dbg(dev, " input (%i,%i)-(%i,%i) formats %s%s%s\n",
+ bi->info.isp.sp.input.min_width,
+ bi->info.isp.sp.input.min_height,
+ bi->info.isp.sp.input.max_width,
+ bi->info.isp.sp.input.max_height,
+ bi->info.isp.sp.enable.input_yuv ? "yuv420 " : "",
+ bi->info.isp.sp.enable.input_feeder ||
+ bi->info.isp.sp.enable.input_raw ? "raw8 raw10 " : "",
+ bi->info.isp.sp.enable.input_raw ? "raw12" : "");
+
+ dev_dbg(dev, " internal (%i,%i)\n",
+ bi->info.isp.sp.internal.max_width,
+ bi->info.isp.sp.internal.max_height);
+
+ dev_dbg(dev, " output (%i,%i)-(%i,%i) formats",
+ bi->info.isp.sp.output.min_width,
+ bi->info.isp.sp.output.min_height,
+ bi->info.isp.sp.output.max_width,
+ bi->info.isp.sp.output.max_height);
+ for (i = 0; i < bi->info.isp.num_output_formats; i++)
+ dev_dbg(dev, " %i", bi->info.isp.output_formats[i]);
+ dev_dbg(dev, " vf");
+ for (i = 0; i < bi->info.isp.num_vf_formats; i++)
+ dev_dbg(dev, " %i", bi->info.isp.vf_formats[i]);
+ dev_dbg(dev, "\n");
+}
+
+unsigned int imgu_css_fw_obgrid_size(const struct imgu_fw_info *bi)
+{
+ unsigned int width = DIV_ROUND_UP(bi->info.isp.sp.internal.max_width,
+ IMGU_OBGRID_TILE_SIZE * 2) + 1;
+ unsigned int height = DIV_ROUND_UP(bi->info.isp.sp.internal.max_height,
+ IMGU_OBGRID_TILE_SIZE * 2) + 1;
+ unsigned int obgrid_size;
+
+ width = ALIGN(width, IPU3_UAPI_ISP_VEC_ELEMS / 4);
+ obgrid_size = PAGE_ALIGN(width * height *
+ sizeof(struct ipu3_uapi_obgrid_param)) *
+ bi->info.isp.sp.iterator.num_stripes;
+ return obgrid_size;
+}
+
+void *imgu_css_fw_pipeline_params(struct imgu_css *css, unsigned int pipe,
+ enum imgu_abi_param_class cls,
+ enum imgu_abi_memories mem,
+ struct imgu_fw_isp_parameter *par,
+ size_t par_size, void *binary_params)
+{
+ struct imgu_fw_info *bi =
+ &css->fwp->binary_header[css->pipes[pipe].bindex];
+
+ if (par->offset + par->size >
+ bi->info.isp.sp.mem_initializers.params[cls][mem].size)
+ return NULL;
+
+ if (par->size != par_size)
+ pr_warn("parameter size doesn't match defined size\n");
+
+ if (par->size < par_size)
+ return NULL;
+
+ return binary_params + par->offset;
+}
+
+void imgu_css_fw_cleanup(struct imgu_css *css)
+{
+ struct imgu_device *imgu = dev_get_drvdata(css->dev);
+
+ if (css->binary) {
+ unsigned int i;
+
+ for (i = 0; i < css->fwp->file_header.binary_nr; i++)
+ imgu_dmamap_free(imgu, &css->binary[i]);
+ kfree(css->binary);
+ }
+ if (css->fw)
+ release_firmware(css->fw);
+
+ css->binary = NULL;
+ css->fw = NULL;
+}
+
+int imgu_css_fw_init(struct imgu_css *css)
+{
+ static const u32 BLOCK_MAX = 65536;
+ struct imgu_device *imgu = dev_get_drvdata(css->dev);
+ struct device *dev = css->dev;
+ unsigned int i, j, binary_nr;
+ int r;
+
+ r = request_firmware(&css->fw, IMGU_FW_NAME_IPU_20161208, css->dev);
+ if (r == -ENOENT)
+ r = request_firmware(&css->fw, IMGU_FW_NAME_20161208, css->dev);
+ if (r == -ENOENT)
+ r = request_firmware(&css->fw, IMGU_FW_NAME, css->dev);
+ if (r)
+ return r;
+
+ /* Check and display fw header info */
+
+ css->fwp = (struct imgu_fw_header *)css->fw->data;
+ if (css->fw->size < struct_size(css->fwp, binary_header, 1) ||
+ css->fwp->file_header.h_size != sizeof(struct imgu_fw_bi_file_h))
+ goto bad_fw;
+ if (struct_size(css->fwp, binary_header,
+ css->fwp->file_header.binary_nr) > css->fw->size)
+ goto bad_fw;
+
+ dev_info(dev, "loaded firmware version %.64s, %u binaries, %zu bytes\n",
+ css->fwp->file_header.version, css->fwp->file_header.binary_nr,
+ css->fw->size);
+
+ /* Validate and display info on fw binaries */
+
+ binary_nr = css->fwp->file_header.binary_nr;
+
+ css->fw_bl = -1;
+ css->fw_sp[0] = -1;
+ css->fw_sp[1] = -1;
+
+ for (i = 0; i < binary_nr; i++) {
+ struct imgu_fw_info *bi = &css->fwp->binary_header[i];
+ const char *name = (void *)css->fwp + bi->blob.prog_name_offset;
+ size_t len;
+
+ if (bi->blob.prog_name_offset >= css->fw->size)
+ goto bad_fw;
+ len = strnlen(name, css->fw->size - bi->blob.prog_name_offset);
+ if (len + 1 > css->fw->size - bi->blob.prog_name_offset ||
+ len + 1 >= IMGU_ABI_MAX_BINARY_NAME)
+ goto bad_fw;
+
+ if (bi->blob.size != bi->blob.text_size + bi->blob.icache_size
+ + bi->blob.data_size + bi->blob.padding_size)
+ goto bad_fw;
+ if (bi->blob.offset + bi->blob.size > css->fw->size)
+ goto bad_fw;
+
+ if (bi->type == IMGU_FW_BOOTLOADER_FIRMWARE) {
+ css->fw_bl = i;
+ if (bi->info.bl.sw_state >= css->iomem_length ||
+ bi->info.bl.num_dma_cmds >= css->iomem_length ||
+ bi->info.bl.dma_cmd_list >= css->iomem_length)
+ goto bad_fw;
+ }
+ if (bi->type == IMGU_FW_SP_FIRMWARE ||
+ bi->type == IMGU_FW_SP1_FIRMWARE) {
+ css->fw_sp[bi->type == IMGU_FW_SP_FIRMWARE ? 0 : 1] = i;
+ if (bi->info.sp.per_frame_data >= css->iomem_length ||
+ bi->info.sp.init_dmem_data >= css->iomem_length ||
+ bi->info.sp.host_sp_queue >= css->iomem_length ||
+ bi->info.sp.isp_started >= css->iomem_length ||
+ bi->info.sp.sw_state >= css->iomem_length ||
+ bi->info.sp.sleep_mode >= css->iomem_length ||
+ bi->info.sp.invalidate_tlb >= css->iomem_length ||
+ bi->info.sp.host_sp_com >= css->iomem_length ||
+ bi->info.sp.output + 12 >= css->iomem_length ||
+ bi->info.sp.host_sp_queues_initialized >=
+ css->iomem_length)
+ goto bad_fw;
+ }
+ if (bi->type != IMGU_FW_ISP_FIRMWARE)
+ continue;
+
+ if (bi->info.isp.sp.pipeline.mode >= IPU3_CSS_PIPE_ID_NUM)
+ goto bad_fw;
+
+ if (bi->info.isp.sp.iterator.num_stripes >
+ IPU3_UAPI_MAX_STRIPES)
+ goto bad_fw;
+
+ if (bi->info.isp.num_vf_formats > IMGU_ABI_FRAME_FORMAT_NUM ||
+ bi->info.isp.num_output_formats > IMGU_ABI_FRAME_FORMAT_NUM)
+ goto bad_fw;
+
+ for (j = 0; j < bi->info.isp.num_output_formats; j++)
+ if (bi->info.isp.output_formats[j] >=
+ IMGU_ABI_FRAME_FORMAT_NUM)
+ goto bad_fw;
+ for (j = 0; j < bi->info.isp.num_vf_formats; j++)
+ if (bi->info.isp.vf_formats[j] >=
+ IMGU_ABI_FRAME_FORMAT_NUM)
+ goto bad_fw;
+
+ if (bi->info.isp.sp.block.block_width <= 0 ||
+ bi->info.isp.sp.block.block_width > BLOCK_MAX ||
+ bi->info.isp.sp.block.output_block_height <= 0 ||
+ bi->info.isp.sp.block.output_block_height > BLOCK_MAX)
+ goto bad_fw;
+
+ if (bi->blob.memory_offsets.offsets[IMGU_ABI_PARAM_CLASS_PARAM]
+ + sizeof(struct imgu_fw_param_memory_offsets) >
+ css->fw->size ||
+ bi->blob.memory_offsets.offsets[IMGU_ABI_PARAM_CLASS_CONFIG]
+ + sizeof(struct imgu_fw_config_memory_offsets) >
+ css->fw->size ||
+ bi->blob.memory_offsets.offsets[IMGU_ABI_PARAM_CLASS_STATE]
+ + sizeof(struct imgu_fw_state_memory_offsets) >
+ css->fw->size)
+ goto bad_fw;
+
+ imgu_css_fw_show_binary(dev, bi, name);
+ }
+
+ if (css->fw_bl == -1 || css->fw_sp[0] == -1 || css->fw_sp[1] == -1)
+ goto bad_fw;
+
+ /* Allocate and map fw binaries into IMGU */
+
+ css->binary = kcalloc(binary_nr, sizeof(*css->binary), GFP_KERNEL);
+ if (!css->binary) {
+ r = -ENOMEM;
+ goto error_out;
+ }
+
+ for (i = 0; i < css->fwp->file_header.binary_nr; i++) {
+ struct imgu_fw_info *bi = &css->fwp->binary_header[i];
+ void *blob = (void *)css->fwp + bi->blob.offset;
+ size_t size = bi->blob.size;
+
+ if (!imgu_dmamap_alloc(imgu, &css->binary[i], size)) {
+ r = -ENOMEM;
+ goto error_out;
+ }
+ memcpy(css->binary[i].vaddr, blob, size);
+ }
+
+ return 0;
+
+bad_fw:
+ dev_err(dev, "invalid firmware binary, size %u\n", (int)css->fw->size);
+ r = -ENODEV;
+
+error_out:
+ imgu_css_fw_cleanup(css);
+ return r;
+}
diff --git a/drivers/staging/media/ipu3/ipu3-css-fw.h b/drivers/staging/media/ipu3/ipu3-css-fw.h
new file mode 100644
index 000000000000..c956aa21df25
--- /dev/null
+++ b/drivers/staging/media/ipu3/ipu3-css-fw.h
@@ -0,0 +1,193 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2018 Intel Corporation */
+
+#ifndef __IPU3_CSS_FW_H
+#define __IPU3_CSS_FW_H
+
+/******************* Firmware file definitions *******************/
+
+#define IMGU_FW_NAME "intel/ipu3-fw.bin"
+#define IMGU_FW_NAME_20161208 \
+ "intel/irci_irci_ecr-master_20161208_0213_20170112_1500.bin"
+#define IMGU_FW_NAME_IPU_20161208 \
+ "intel/ipu/irci_irci_ecr-master_20161208_0213_20170112_1500.bin"
+
+typedef u32 imgu_fw_ptr;
+
+enum imgu_fw_type {
+ IMGU_FW_SP_FIRMWARE, /* Firmware for the SP */
+ IMGU_FW_SP1_FIRMWARE, /* Firmware for the SP1 */
+ IMGU_FW_ISP_FIRMWARE, /* Firmware for the ISP */
+ IMGU_FW_BOOTLOADER_FIRMWARE, /* Firmware for the BootLoader */
+ IMGU_FW_ACC_FIRMWARE /* Firmware for accelerations */
+};
+
+enum imgu_fw_acc_type {
+ IMGU_FW_ACC_NONE, /* Normal binary */
+ IMGU_FW_ACC_OUTPUT, /* Accelerator stage on output frame */
+ IMGU_FW_ACC_VIEWFINDER, /* Accelerator stage on viewfinder frame */
+ IMGU_FW_ACC_STANDALONE, /* Stand-alone acceleration */
+};
+
+struct imgu_fw_isp_parameter {
+ u32 offset; /* Offset in isp_<mem> config, params, etc. */
+ u32 size; /* Disabled if 0 */
+};
+
+struct imgu_fw_param_memory_offsets {
+ struct {
+ struct imgu_fw_isp_parameter lin; /* lin_vmem_params */
+ struct imgu_fw_isp_parameter tnr3; /* tnr3_vmem_params */
+ struct imgu_fw_isp_parameter xnr3; /* xnr3_vmem_params */
+ } vmem;
+ struct {
+ struct imgu_fw_isp_parameter tnr;
+ struct imgu_fw_isp_parameter tnr3; /* tnr3_params */
+ struct imgu_fw_isp_parameter xnr3; /* xnr3_params */
+ struct imgu_fw_isp_parameter plane_io_config; /* 192 bytes */
+ struct imgu_fw_isp_parameter rgbir; /* rgbir_params */
+ } dmem;
+};
+
+struct imgu_fw_config_memory_offsets {
+ struct {
+ struct imgu_fw_isp_parameter iterator;
+ struct imgu_fw_isp_parameter dvs;
+ struct imgu_fw_isp_parameter output;
+ struct imgu_fw_isp_parameter raw;
+ struct imgu_fw_isp_parameter input_yuv;
+ struct imgu_fw_isp_parameter tnr;
+ struct imgu_fw_isp_parameter tnr3;
+ struct imgu_fw_isp_parameter ref;
+ } dmem;
+};
+
+struct imgu_fw_state_memory_offsets {
+ struct {
+ struct imgu_fw_isp_parameter tnr;
+ struct imgu_fw_isp_parameter tnr3;
+ struct imgu_fw_isp_parameter ref;
+ } dmem;
+};
+
+union imgu_fw_all_memory_offsets {
+ struct {
+ u64 imgu_fw_mem_offsets[3]; /* params, config, state */
+ } offsets;
+ struct {
+ u64 ptr;
+ } array[IMGU_ABI_PARAM_CLASS_NUM];
+};
+
+struct imgu_fw_binary_xinfo {
+ /* Part that is of interest to the SP. */
+ struct imgu_abi_binary_info sp;
+
+ /* Rest of the binary info, only interesting to the host. */
+ u32 type; /* enum imgu_fw_acc_type */
+
+ u32 num_output_formats __aligned(8);
+ u32 output_formats[IMGU_ABI_FRAME_FORMAT_NUM]; /* enum frame_format */
+
+ /* number of supported vf formats */
+ u32 num_vf_formats __aligned(8);
+ /* types of supported vf formats */
+ u32 vf_formats[IMGU_ABI_FRAME_FORMAT_NUM]; /* enum frame_format */
+ u8 num_output_pins;
+ imgu_fw_ptr xmem_addr;
+
+ u64 imgu_fw_blob_descr_ptr __aligned(8);
+ u32 blob_index __aligned(8);
+ union imgu_fw_all_memory_offsets mem_offsets __aligned(8);
+ struct imgu_fw_binary_xinfo *next __aligned(8);
+};
+
+struct imgu_fw_sp_info {
+ u32 init_dmem_data; /* data sect config, stored to dmem */
+ u32 per_frame_data; /* Per frame data, stored to dmem */
+ u32 group; /* Per pipeline data, loaded by dma */
+ u32 output; /* SP output data, loaded by dmem */
+ u32 host_sp_queue; /* Host <-> SP queues */
+ u32 host_sp_com; /* Host <-> SP commands */
+ u32 isp_started; /* P'ed from sensor thread, csim only */
+ u32 sw_state; /* Polled from css, enum imgu_abi_sp_swstate */
+ u32 host_sp_queues_initialized; /* Polled from the SP */
+ u32 sleep_mode; /* different mode to halt SP */
+ u32 invalidate_tlb; /* inform SP to invalidate mmu TLB */
+ u32 debug_buffer_ddr_address; /* the addr of DDR debug queue */
+
+ /* input system perf count array */
+ u32 perf_counter_input_system_error;
+ u32 threads_stack; /* sp thread's stack pointers */
+ u32 threads_stack_size; /* sp thread's stack sizes */
+ u32 curr_binary_id; /* current binary id */
+ u32 raw_copy_line_count; /* raw copy line counter */
+ u32 ddr_parameter_address; /* acc param ddrptr, sp dmem */
+ u32 ddr_parameter_size; /* acc param size, sp dmem */
+ /* Entry functions */
+ u32 sp_entry; /* The SP entry function */
+ u32 tagger_frames_addr; /* Base address of tagger state */
+};
+
+struct imgu_fw_bl_info {
+ u32 num_dma_cmds; /* Number of cmds sent by CSS */
+ u32 dma_cmd_list; /* Dma command list sent by CSS */
+ u32 sw_state; /* Polled from css, enum imgu_abi_bl_swstate */
+ /* Entry functions */
+ u32 bl_entry; /* The SP entry function */
+};
+
+struct imgu_fw_acc_info {
+ u32 per_frame_data; /* Dummy for now */
+};
+
+union imgu_fw_union {
+ struct imgu_fw_binary_xinfo isp; /* ISP info */
+ struct imgu_fw_sp_info sp; /* SP info */
+ struct imgu_fw_sp_info sp1; /* SP1 info */
+ struct imgu_fw_bl_info bl; /* Bootloader info */
+ struct imgu_fw_acc_info acc; /* Accelerator info */
+};
+
+struct imgu_fw_info {
+ size_t header_size; /* size of fw header */
+ u32 type __aligned(8); /* enum imgu_fw_type */
+
+ union imgu_fw_union info; /* Binary info */
+ struct imgu_abi_blob_info blob; /* Blob info */
+ /* Dynamic part */
+ u64 next;
+
+ u32 loaded __aligned(8); /* Firmware has been loaded */
+ const u64 isp_code __aligned(8); /* ISP pointer to code */
+ /* Firmware handle between user space and kernel */
+ u32 handle __aligned(8);
+ /* Sections to copy from/to ISP */
+ struct imgu_abi_isp_param_segments mem_initializers;
+ /* Initializer for local ISP memories */
+};
+
+struct imgu_fw_bi_file_h {
+ char version[64]; /* branch tag + week day + time */
+ int binary_nr; /* Number of binaries */
+ unsigned int h_size; /* sizeof(struct imgu_fw_bi_file_h) */
+};
+
+struct imgu_fw_header {
+ struct imgu_fw_bi_file_h file_header;
+ struct imgu_fw_info binary_header[]; /* binary_nr items */
+};
+
+/******************* Firmware functions *******************/
+
+int imgu_css_fw_init(struct imgu_css *css);
+void imgu_css_fw_cleanup(struct imgu_css *css);
+
+unsigned int imgu_css_fw_obgrid_size(const struct imgu_fw_info *bi);
+void *imgu_css_fw_pipeline_params(struct imgu_css *css, unsigned int pipe,
+ enum imgu_abi_param_class cls,
+ enum imgu_abi_memories mem,
+ struct imgu_fw_isp_parameter *par,
+ size_t par_size, void *binary_params);
+
+#endif
diff --git a/drivers/staging/media/ipu3/ipu3-css-params.c b/drivers/staging/media/ipu3/ipu3-css-params.c
new file mode 100644
index 000000000000..2c48d57a3180
--- /dev/null
+++ b/drivers/staging/media/ipu3/ipu3-css-params.c
@@ -0,0 +1,2959 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2018 Intel Corporation
+
+#include <linux/device.h>
+
+#include "ipu3-css.h"
+#include "ipu3-css-fw.h"
+#include "ipu3-tables.h"
+#include "ipu3-css-params.h"
+
+#define DIV_ROUND_CLOSEST_DOWN(a, b) (((a) + ((b) / 2) - 1) / (b))
+#define roundclosest_down(a, b) (DIV_ROUND_CLOSEST_DOWN(a, b) * (b))
+
+#define IPU3_UAPI_ANR_MAX_RESET ((1 << 12) - 1)
+#define IPU3_UAPI_ANR_MIN_RESET (((-1) << 12) + 1)
+
+struct imgu_css_scaler_info {
+ unsigned int phase_step; /* Same for luma/chroma */
+ int exp_shift;
+
+ unsigned int phase_init; /* luma/chroma dependent */
+ int pad_left;
+ int pad_right;
+ int crop_left;
+ int crop_top;
+};
+
+static unsigned int imgu_css_scaler_get_exp(unsigned int counter,
+ unsigned int divider)
+{
+ int i = fls(divider) - fls(counter);
+
+ if (i <= 0)
+ return 0;
+
+ if (divider >> i < counter)
+ i = i - 1;
+
+ return i;
+}
+
+/* Set up the CSS scaler look up table */
+static void
+imgu_css_scaler_setup_lut(unsigned int taps, unsigned int input_width,
+ unsigned int output_width, int phase_step_correction,
+ const int *coeffs, unsigned int coeffs_size,
+ s8 coeff_lut[], struct imgu_css_scaler_info *info)
+{
+ int tap, phase, phase_sum_left, phase_sum_right;
+ int exponent = imgu_css_scaler_get_exp(output_width, input_width);
+ int mantissa = (1 << exponent) * output_width;
+ unsigned int phase_step, phase_taps;
+
+ if (input_width == output_width) {
+ for (phase = 0; phase < IMGU_SCALER_PHASES; phase++) {
+ phase_taps = phase * IMGU_SCALER_FILTER_TAPS;
+ for (tap = 0; tap < taps; tap++)
+ coeff_lut[phase_taps + tap] = 0;
+ }
+
+ info->phase_step = IMGU_SCALER_PHASES *
+ (1 << IMGU_SCALER_PHASE_COUNTER_PREC_REF);
+ info->exp_shift = 0;
+ info->pad_left = 0;
+ info->pad_right = 0;
+ info->phase_init = 0;
+ info->crop_left = 0;
+ info->crop_top = 0;
+ return;
+ }
+
+ for (phase = 0; phase < IMGU_SCALER_PHASES; phase++) {
+ phase_taps = phase * IMGU_SCALER_FILTER_TAPS;
+ for (tap = 0; tap < taps; tap++) {
+ /* flip table to for convolution reverse indexing */
+ s64 coeff = coeffs[coeffs_size -
+ ((tap * (coeffs_size / taps)) + phase) - 1];
+ coeff *= mantissa;
+ coeff = div64_long(coeff, input_width);
+
+ /* Add +"0.5" */
+ coeff += 1 << (IMGU_SCALER_COEFF_BITS - 1);
+ coeff >>= IMGU_SCALER_COEFF_BITS;
+ coeff_lut[phase_taps + tap] = coeff;
+ }
+ }
+
+ phase_step = IMGU_SCALER_PHASES *
+ (1 << IMGU_SCALER_PHASE_COUNTER_PREC_REF) *
+ output_width / input_width;
+ phase_step += phase_step_correction;
+ phase_sum_left = (taps / 2 * IMGU_SCALER_PHASES *
+ (1 << IMGU_SCALER_PHASE_COUNTER_PREC_REF)) -
+ (1 << (IMGU_SCALER_PHASE_COUNTER_PREC_REF - 1));
+ phase_sum_right = (taps / 2 * IMGU_SCALER_PHASES *
+ (1 << IMGU_SCALER_PHASE_COUNTER_PREC_REF)) +
+ (1 << (IMGU_SCALER_PHASE_COUNTER_PREC_REF - 1));
+
+ info->exp_shift = IMGU_SCALER_MAX_EXPONENT_SHIFT - exponent;
+ info->pad_left = (phase_sum_left % phase_step == 0) ?
+ phase_sum_left / phase_step - 1 : phase_sum_left / phase_step;
+ info->pad_right = (phase_sum_right % phase_step == 0) ?
+ phase_sum_right / phase_step - 1 : phase_sum_right / phase_step;
+ info->phase_init = phase_sum_left - phase_step * info->pad_left;
+ info->phase_step = phase_step;
+ info->crop_left = taps - 1;
+ info->crop_top = taps - 1;
+}
+
+/*
+ * Calculates the exact output image width/height, based on phase_step setting
+ * (must be perfectly aligned with hardware).
+ */
+static unsigned int
+imgu_css_scaler_calc_scaled_output(unsigned int input,
+ struct imgu_css_scaler_info *info)
+{
+ unsigned int arg1 = input * info->phase_step +
+ (1 - IMGU_SCALER_TAPS_Y / 2) * IMGU_SCALER_FIR_PHASES -
+ IMGU_SCALER_FIR_PHASES / (2 * IMGU_SCALER_PHASES);
+ unsigned int arg2 = ((IMGU_SCALER_TAPS_Y / 2) * IMGU_SCALER_FIR_PHASES +
+ IMGU_SCALER_FIR_PHASES / (2 * IMGU_SCALER_PHASES)) *
+ IMGU_SCALER_FIR_PHASES + info->phase_step / 2;
+
+ return ((arg1 + (arg2 - IMGU_SCALER_FIR_PHASES * info->phase_step) /
+ IMGU_SCALER_FIR_PHASES) / (2 * IMGU_SCALER_FIR_PHASES)) * 2;
+}
+
+/*
+ * Calculate the output width and height, given the luma
+ * and chroma details of a scaler
+ */
+static void
+imgu_css_scaler_calc(u32 input_width, u32 input_height, u32 target_width,
+ u32 target_height, struct imgu_abi_osys_config *cfg,
+ struct imgu_css_scaler_info *info_luma,
+ struct imgu_css_scaler_info *info_chroma,
+ unsigned int *output_width, unsigned int *output_height,
+ unsigned int *procmode)
+{
+ u32 out_width = target_width;
+ u32 out_height = target_height;
+ const unsigned int height_alignment = 2;
+ int phase_step_correction = -1;
+
+ /*
+ * Calculate scaled output width. If the horizontal and vertical scaling
+ * factor is different, then choose the biggest and crop off excess
+ * lines or columns after formatting.
+ */
+ if (target_height * input_width > target_width * input_height)
+ target_width = DIV_ROUND_UP(target_height * input_width,
+ input_height);
+
+ if (input_width == target_width)
+ *procmode = IMGU_ABI_OSYS_PROCMODE_BYPASS;
+ else
+ *procmode = IMGU_ABI_OSYS_PROCMODE_DOWNSCALE;
+
+ memset(&cfg->scaler_coeffs_chroma, 0,
+ sizeof(cfg->scaler_coeffs_chroma));
+ memset(&cfg->scaler_coeffs_luma, 0, sizeof(cfg->scaler_coeffs_luma));
+ do {
+ phase_step_correction++;
+
+ imgu_css_scaler_setup_lut(IMGU_SCALER_TAPS_Y,
+ input_width, target_width,
+ phase_step_correction,
+ imgu_css_downscale_4taps,
+ IMGU_SCALER_DOWNSCALE_4TAPS_LEN,
+ cfg->scaler_coeffs_luma, info_luma);
+
+ imgu_css_scaler_setup_lut(IMGU_SCALER_TAPS_UV,
+ input_width, target_width,
+ phase_step_correction,
+ imgu_css_downscale_2taps,
+ IMGU_SCALER_DOWNSCALE_2TAPS_LEN,
+ cfg->scaler_coeffs_chroma,
+ info_chroma);
+
+ out_width = imgu_css_scaler_calc_scaled_output(input_width,
+ info_luma);
+ out_height = imgu_css_scaler_calc_scaled_output(input_height,
+ info_luma);
+ } while ((out_width < target_width || out_height < target_height ||
+ !IS_ALIGNED(out_height, height_alignment)) &&
+ phase_step_correction <= 5);
+
+ *output_width = out_width;
+ *output_height = out_height;
+}
+
+/********************** Osys routines for scaler****************************/
+
+static void imgu_css_osys_set_format(enum imgu_abi_frame_format host_format,
+ unsigned int *osys_format,
+ unsigned int *osys_tiling)
+{
+ *osys_format = IMGU_ABI_OSYS_FORMAT_YUV420;
+ *osys_tiling = IMGU_ABI_OSYS_TILING_NONE;
+
+ switch (host_format) {
+ case IMGU_ABI_FRAME_FORMAT_YUV420:
+ *osys_format = IMGU_ABI_OSYS_FORMAT_YUV420;
+ break;
+ case IMGU_ABI_FRAME_FORMAT_YV12:
+ *osys_format = IMGU_ABI_OSYS_FORMAT_YV12;
+ break;
+ case IMGU_ABI_FRAME_FORMAT_NV12:
+ *osys_format = IMGU_ABI_OSYS_FORMAT_NV12;
+ break;
+ case IMGU_ABI_FRAME_FORMAT_NV16:
+ *osys_format = IMGU_ABI_OSYS_FORMAT_NV16;
+ break;
+ case IMGU_ABI_FRAME_FORMAT_NV21:
+ *osys_format = IMGU_ABI_OSYS_FORMAT_NV21;
+ break;
+ case IMGU_ABI_FRAME_FORMAT_NV12_TILEY:
+ *osys_format = IMGU_ABI_OSYS_FORMAT_NV12;
+ *osys_tiling = IMGU_ABI_OSYS_TILING_Y;
+ break;
+ default:
+ /* For now, assume use default values */
+ break;
+ }
+}
+
+/*
+ * Function calculates input frame stripe offset, based
+ * on output frame stripe offset and filter parameters.
+ */
+static int imgu_css_osys_calc_stripe_offset(int stripe_offset_out,
+ int fir_phases, int phase_init,
+ int phase_step, int pad_left)
+{
+ int stripe_offset_inp = stripe_offset_out * fir_phases -
+ pad_left * phase_step;
+
+ return DIV_ROUND_UP(stripe_offset_inp - phase_init, phase_step);
+}
+
+/*
+ * Calculate input frame phase, given the output frame
+ * stripe offset and filter parameters
+ */
+static int imgu_css_osys_calc_stripe_phase_init(int stripe_offset_out,
+ int fir_phases, int phase_init,
+ int phase_step, int pad_left)
+{
+ int stripe_offset_inp =
+ imgu_css_osys_calc_stripe_offset(stripe_offset_out,
+ fir_phases, phase_init,
+ phase_step, pad_left);
+
+ return phase_init + ((pad_left + stripe_offset_inp) * phase_step) -
+ stripe_offset_out * fir_phases;
+}
+
+/*
+ * This function calculates input frame stripe width,
+ * based on output frame stripe offset and filter parameters
+ */
+static int imgu_css_osys_calc_inp_stripe_width(int stripe_width_out,
+ int fir_phases, int phase_init,
+ int phase_step, int fir_taps,
+ int pad_left, int pad_right)
+{
+ int stripe_width_inp = (stripe_width_out + fir_taps - 1) * fir_phases;
+
+ stripe_width_inp = DIV_ROUND_UP(stripe_width_inp - phase_init,
+ phase_step);
+
+ return stripe_width_inp - pad_left - pad_right;
+}
+
+/*
+ * This function calculates output frame stripe width, basedi
+ * on output frame stripe offset and filter parameters
+ */
+static int imgu_css_osys_out_stripe_width(int stripe_width_inp, int fir_phases,
+ int phase_init, int phase_step,
+ int fir_taps, int pad_left,
+ int pad_right, int column_offset)
+{
+ int stripe_width_out = (pad_left + stripe_width_inp +
+ pad_right - column_offset) * phase_step;
+
+ stripe_width_out = (stripe_width_out + phase_init) / fir_phases;
+
+ return stripe_width_out - (fir_taps - 1);
+}
+
+struct imgu_css_reso {
+ unsigned int input_width;
+ unsigned int input_height;
+ enum imgu_abi_frame_format input_format;
+ unsigned int pin_width[IMGU_ABI_OSYS_PINS];
+ unsigned int pin_height[IMGU_ABI_OSYS_PINS];
+ unsigned int pin_stride[IMGU_ABI_OSYS_PINS];
+ enum imgu_abi_frame_format pin_format[IMGU_ABI_OSYS_PINS];
+ int chunk_width;
+ int chunk_height;
+ int block_height;
+ int block_width;
+};
+
+struct imgu_css_frame_params {
+ /* Output pins */
+ unsigned int enable;
+ unsigned int format;
+ unsigned int flip;
+ unsigned int mirror;
+ unsigned int tiling;
+ unsigned int reduce_range;
+ unsigned int width;
+ unsigned int height;
+ unsigned int stride;
+ unsigned int scaled;
+ unsigned int crop_left;
+ unsigned int crop_top;
+};
+
+struct imgu_css_stripe_params {
+ unsigned int processing_mode;
+ unsigned int phase_step;
+ unsigned int exp_shift;
+ unsigned int phase_init_left_y;
+ unsigned int phase_init_left_uv;
+ unsigned int phase_init_top_y;
+ unsigned int phase_init_top_uv;
+ unsigned int pad_left_y;
+ unsigned int pad_left_uv;
+ unsigned int pad_right_y;
+ unsigned int pad_right_uv;
+ unsigned int pad_top_y;
+ unsigned int pad_top_uv;
+ unsigned int pad_bottom_y;
+ unsigned int pad_bottom_uv;
+ unsigned int crop_left_y;
+ unsigned int crop_top_y;
+ unsigned int crop_left_uv;
+ unsigned int crop_top_uv;
+ unsigned int start_column_y;
+ unsigned int start_column_uv;
+ unsigned int chunk_width;
+ unsigned int chunk_height;
+ unsigned int block_width;
+ unsigned int block_height;
+ unsigned int input_width;
+ unsigned int input_height;
+ int output_width[IMGU_ABI_OSYS_PINS];
+ int output_height[IMGU_ABI_OSYS_PINS];
+ int output_offset[IMGU_ABI_OSYS_PINS];
+};
+
+/*
+ * frame_params - size IMGU_ABI_OSYS_PINS
+ * stripe_params - size IPU3_UAPI_MAX_STRIPES
+ */
+static int imgu_css_osys_calc_frame_and_stripe_params(
+ struct imgu_css *css, unsigned int stripes,
+ struct imgu_abi_osys_config *osys,
+ struct imgu_css_scaler_info *scaler_luma,
+ struct imgu_css_scaler_info *scaler_chroma,
+ struct imgu_css_frame_params frame_params[],
+ struct imgu_css_stripe_params stripe_params[],
+ unsigned int pipe)
+{
+ struct imgu_css_reso reso;
+ unsigned int output_width, pin, s;
+ u32 input_width, input_height, target_width, target_height;
+ unsigned int procmode = 0;
+ struct imgu_css_pipe *css_pipe = &css->pipes[pipe];
+
+ input_width = css_pipe->rect[IPU3_CSS_RECT_GDC].width;
+ input_height = css_pipe->rect[IPU3_CSS_RECT_GDC].height;
+ target_width = css_pipe->queue[IPU3_CSS_QUEUE_VF].fmt.mpix.width;
+ target_height = css_pipe->queue[IPU3_CSS_QUEUE_VF].fmt.mpix.height;
+
+ /* Frame parameters */
+
+ /* Input width for Output System is output width of DVS (with GDC) */
+ reso.input_width = css_pipe->rect[IPU3_CSS_RECT_GDC].width;
+
+ /* Input height for Output System is output height of DVS (with GDC) */
+ reso.input_height = css_pipe->rect[IPU3_CSS_RECT_GDC].height;
+
+ reso.input_format =
+ css_pipe->queue[IPU3_CSS_QUEUE_OUT].css_fmt->frame_format;
+
+ reso.pin_width[IMGU_ABI_OSYS_PIN_OUT] =
+ css_pipe->queue[IPU3_CSS_QUEUE_OUT].fmt.mpix.width;
+ reso.pin_height[IMGU_ABI_OSYS_PIN_OUT] =
+ css_pipe->queue[IPU3_CSS_QUEUE_OUT].fmt.mpix.height;
+ reso.pin_stride[IMGU_ABI_OSYS_PIN_OUT] =
+ css_pipe->queue[IPU3_CSS_QUEUE_OUT].width_pad;
+ reso.pin_format[IMGU_ABI_OSYS_PIN_OUT] =
+ css_pipe->queue[IPU3_CSS_QUEUE_OUT].css_fmt->frame_format;
+
+ reso.pin_width[IMGU_ABI_OSYS_PIN_VF] =
+ css_pipe->queue[IPU3_CSS_QUEUE_VF].fmt.mpix.width;
+ reso.pin_height[IMGU_ABI_OSYS_PIN_VF] =
+ css_pipe->queue[IPU3_CSS_QUEUE_VF].fmt.mpix.height;
+ reso.pin_stride[IMGU_ABI_OSYS_PIN_VF] =
+ css_pipe->queue[IPU3_CSS_QUEUE_VF].width_pad;
+ reso.pin_format[IMGU_ABI_OSYS_PIN_VF] =
+ css_pipe->queue[IPU3_CSS_QUEUE_VF].css_fmt->frame_format;
+
+ /* Configure the frame parameters for all output pins */
+
+ frame_params[IMGU_ABI_OSYS_PIN_OUT].width =
+ css_pipe->queue[IPU3_CSS_QUEUE_OUT].fmt.mpix.width;
+ frame_params[IMGU_ABI_OSYS_PIN_OUT].height =
+ css_pipe->queue[IPU3_CSS_QUEUE_OUT].fmt.mpix.height;
+ frame_params[IMGU_ABI_OSYS_PIN_VF].width =
+ css_pipe->queue[IPU3_CSS_QUEUE_VF].fmt.mpix.width;
+ frame_params[IMGU_ABI_OSYS_PIN_VF].height =
+ css_pipe->queue[IPU3_CSS_QUEUE_VF].fmt.mpix.height;
+ frame_params[IMGU_ABI_OSYS_PIN_VF].crop_top = 0;
+ frame_params[IMGU_ABI_OSYS_PIN_VF].crop_left = 0;
+
+ for (pin = 0; pin < IMGU_ABI_OSYS_PINS; pin++) {
+ int enable = 0;
+ int scaled = 0;
+ unsigned int format = 0;
+ unsigned int tiling = 0;
+
+ frame_params[pin].flip = 0;
+ frame_params[pin].mirror = 0;
+ frame_params[pin].reduce_range = 0;
+ if (reso.pin_width[pin] != 0 && reso.pin_height[pin] != 0) {
+ enable = 1;
+ if (pin == IMGU_ABI_OSYS_PIN_OUT) {
+ if (reso.input_width < reso.pin_width[pin] ||
+ reso.input_height < reso.pin_height[pin])
+ return -EINVAL;
+ /*
+ * When input and output resolution is
+ * different instead of scaling, cropping
+ * should happen. Determine the crop factor
+ * to do the symmetric cropping
+ */
+ frame_params[pin].crop_left = roundclosest_down(
+ (reso.input_width -
+ reso.pin_width[pin]) / 2,
+ IMGU_OSYS_DMA_CROP_W_LIMIT);
+ frame_params[pin].crop_top = roundclosest_down(
+ (reso.input_height -
+ reso.pin_height[pin]) / 2,
+ IMGU_OSYS_DMA_CROP_H_LIMIT);
+ } else {
+ if (reso.pin_width[pin] != reso.input_width ||
+ reso.pin_height[pin] != reso.input_height) {
+ /*
+ * If resolution is different at input
+ * and output of OSYS, scaling is
+ * considered except when pin is MAIN.
+ * Later it will be decide whether
+ * scaler factor is 1 or other
+ * and cropping has to be done or not.
+ */
+ scaled = 1;
+ }
+ }
+ imgu_css_osys_set_format(reso.pin_format[pin], &format,
+ &tiling);
+ } else {
+ enable = 0;
+ }
+ frame_params[pin].enable = enable;
+ frame_params[pin].format = format;
+ frame_params[pin].tiling = tiling;
+ frame_params[pin].stride = reso.pin_stride[pin];
+ frame_params[pin].scaled = scaled;
+ }
+
+ imgu_css_scaler_calc(input_width, input_height, target_width,
+ target_height, osys, scaler_luma, scaler_chroma,
+ &reso.pin_width[IMGU_ABI_OSYS_PIN_VF],
+ &reso.pin_height[IMGU_ABI_OSYS_PIN_VF], &procmode);
+ dev_dbg(css->dev, "osys scaler procmode is %u", procmode);
+ output_width = reso.pin_width[IMGU_ABI_OSYS_PIN_VF];
+
+ if (output_width < reso.input_width / 2) {
+ /* Scaling factor <= 0.5 */
+ reso.chunk_width = IMGU_OSYS_BLOCK_WIDTH;
+ reso.block_width = IMGU_OSYS_BLOCK_WIDTH;
+ } else { /* 0.5 <= Scaling factor <= 1.0 */
+ reso.chunk_width = IMGU_OSYS_BLOCK_WIDTH / 2;
+ reso.block_width = IMGU_OSYS_BLOCK_WIDTH;
+ }
+
+ if (output_width <= reso.input_width * 7 / 8) {
+ /* Scaling factor <= 0.875 */
+ reso.chunk_height = IMGU_OSYS_BLOCK_HEIGHT;
+ reso.block_height = IMGU_OSYS_BLOCK_HEIGHT;
+ } else { /* 1.0 <= Scaling factor <= 1.75 */
+ reso.chunk_height = IMGU_OSYS_BLOCK_HEIGHT / 2;
+ reso.block_height = IMGU_OSYS_BLOCK_HEIGHT;
+ }
+
+ /*
+ * Calculate scaler configuration parameters based on input and output
+ * resolution.
+ */
+
+ if (frame_params[IMGU_ABI_OSYS_PIN_VF].enable) {
+ /*
+ * When aspect ratio is different between target resolution and
+ * required resolution, determine the crop factor to do
+ * symmetric cropping
+ */
+ u32 w = reso.pin_width[IMGU_ABI_OSYS_PIN_VF] -
+ frame_params[IMGU_ABI_OSYS_PIN_VF].width;
+ u32 h = reso.pin_height[IMGU_ABI_OSYS_PIN_VF] -
+ frame_params[IMGU_ABI_OSYS_PIN_VF].height;
+
+ frame_params[IMGU_ABI_OSYS_PIN_VF].crop_left =
+ roundclosest_down(w / 2, IMGU_OSYS_DMA_CROP_W_LIMIT);
+ frame_params[IMGU_ABI_OSYS_PIN_VF].crop_top =
+ roundclosest_down(h / 2, IMGU_OSYS_DMA_CROP_H_LIMIT);
+
+ if (reso.input_height % 4 || reso.input_width % 8) {
+ dev_err(css->dev, "OSYS input width is not multiple of 8 or\n");
+ dev_err(css->dev, "height is not multiple of 4\n");
+ return -EINVAL;
+ }
+ }
+
+ /* Stripe parameters */
+
+ if (frame_params[IMGU_ABI_OSYS_PIN_VF].enable) {
+ output_width = reso.pin_width[IMGU_ABI_OSYS_PIN_VF];
+ } else {
+ /*
+ * in case scaler output is not enabled
+ * take output width as input width since
+ * there is no scaling at main pin.
+ * Due to the fact that main pin can be different
+ * from input resolution to osys in the case of cropping,
+ * main pin resolution is not taken.
+ */
+ output_width = reso.input_width;
+ }
+
+ for (s = 0; s < stripes; s++) {
+ int stripe_offset_inp_y = 0;
+ int stripe_offset_inp_uv = 0;
+ int stripe_offset_out_y = 0;
+ int stripe_offset_out_uv = 0;
+ int stripe_phase_init_y = scaler_luma->phase_init;
+ int stripe_phase_init_uv = scaler_chroma->phase_init;
+ int stripe_offset_blk_y = 0;
+ int stripe_offset_blk_uv = 0;
+ int stripe_offset_col_y = 0;
+ int stripe_offset_col_uv = 0;
+ int stripe_pad_left_y = scaler_luma->pad_left;
+ int stripe_pad_left_uv = scaler_chroma->pad_left;
+ int stripe_pad_right_y = scaler_luma->pad_right;
+ int stripe_pad_right_uv = scaler_chroma->pad_right;
+ int stripe_crop_left_y = scaler_luma->crop_left;
+ int stripe_crop_left_uv = scaler_chroma->crop_left;
+ int stripe_input_width_y = reso.input_width;
+ int stripe_input_width_uv = 0;
+ int stripe_output_width_y = output_width;
+ int stripe_output_width_uv = 0;
+ int chunk_floor_y = 0;
+ int chunk_floor_uv = 0;
+ int chunk_ceil_uv = 0;
+
+ if (stripes > 1) {
+ if (s > 0) {
+ /* Calculate stripe offsets */
+ stripe_offset_out_y =
+ output_width * s / stripes;
+ stripe_offset_out_y =
+ rounddown(stripe_offset_out_y,
+ IPU3_UAPI_ISP_VEC_ELEMS);
+ stripe_offset_out_uv = stripe_offset_out_y /
+ IMGU_LUMA_TO_CHROMA_RATIO;
+ stripe_offset_inp_y =
+ imgu_css_osys_calc_stripe_offset(
+ stripe_offset_out_y,
+ IMGU_OSYS_FIR_PHASES,
+ scaler_luma->phase_init,
+ scaler_luma->phase_step,
+ scaler_luma->pad_left);
+ stripe_offset_inp_uv =
+ imgu_css_osys_calc_stripe_offset(
+ stripe_offset_out_uv,
+ IMGU_OSYS_FIR_PHASES,
+ scaler_chroma->phase_init,
+ scaler_chroma->phase_step,
+ scaler_chroma->pad_left);
+
+ /* Calculate stripe phase init */
+ stripe_phase_init_y =
+ imgu_css_osys_calc_stripe_phase_init(
+ stripe_offset_out_y,
+ IMGU_OSYS_FIR_PHASES,
+ scaler_luma->phase_init,
+ scaler_luma->phase_step,
+ scaler_luma->pad_left);
+ stripe_phase_init_uv =
+ imgu_css_osys_calc_stripe_phase_init(
+ stripe_offset_out_uv,
+ IMGU_OSYS_FIR_PHASES,
+ scaler_chroma->phase_init,
+ scaler_chroma->phase_step,
+ scaler_chroma->pad_left);
+
+ /*
+ * Chunk boundary corner case - luma and chroma
+ * start from different input chunks.
+ */
+ chunk_floor_y = rounddown(stripe_offset_inp_y,
+ reso.chunk_width);
+ chunk_floor_uv =
+ rounddown(stripe_offset_inp_uv,
+ reso.chunk_width /
+ IMGU_LUMA_TO_CHROMA_RATIO);
+
+ if (chunk_floor_y != chunk_floor_uv *
+ IMGU_LUMA_TO_CHROMA_RATIO) {
+ /*
+ * Match starting luma/chroma chunks.
+ * Decrease offset for UV and add output
+ * cropping.
+ */
+ stripe_offset_inp_uv -= 1;
+ stripe_crop_left_uv += 1;
+ stripe_phase_init_uv -=
+ scaler_luma->phase_step;
+ if (stripe_phase_init_uv < 0)
+ stripe_phase_init_uv =
+ stripe_phase_init_uv +
+ IMGU_OSYS_FIR_PHASES;
+ }
+ /*
+ * FW workaround for a HW bug: if the first
+ * chroma pixel is generated exactly at the end
+ * of chunk scaler HW may not output the pixel
+ * for downscale factors smaller than 1.5
+ * (timing issue).
+ */
+ chunk_ceil_uv =
+ roundup(stripe_offset_inp_uv,
+ reso.chunk_width /
+ IMGU_LUMA_TO_CHROMA_RATIO);
+
+ if (stripe_offset_inp_uv ==
+ chunk_ceil_uv - IMGU_OSYS_TAPS_UV) {
+ /*
+ * Decrease input offset and add
+ * output cropping
+ */
+ stripe_offset_inp_uv -= 1;
+ stripe_phase_init_uv -=
+ scaler_luma->phase_step;
+ if (stripe_phase_init_uv < 0) {
+ stripe_phase_init_uv +=
+ IMGU_OSYS_FIR_PHASES;
+ stripe_crop_left_uv += 1;
+ }
+ }
+
+ /*
+ * Calculate block and column offsets for the
+ * input stripe
+ */
+ stripe_offset_blk_y =
+ rounddown(stripe_offset_inp_y,
+ IMGU_INPUT_BLOCK_WIDTH);
+ stripe_offset_blk_uv =
+ rounddown(stripe_offset_inp_uv,
+ IMGU_INPUT_BLOCK_WIDTH /
+ IMGU_LUMA_TO_CHROMA_RATIO);
+ stripe_offset_col_y = stripe_offset_inp_y -
+ stripe_offset_blk_y;
+ stripe_offset_col_uv = stripe_offset_inp_uv -
+ stripe_offset_blk_uv;
+
+ /* Left padding is only for the first stripe */
+ stripe_pad_left_y = 0;
+ stripe_pad_left_uv = 0;
+ }
+
+ /* Right padding is only for the last stripe */
+ if (s < stripes - 1) {
+ int next_offset;
+
+ stripe_pad_right_y = 0;
+ stripe_pad_right_uv = 0;
+
+ next_offset = output_width * (s + 1) / stripes;
+ next_offset = rounddown(next_offset, 64);
+ stripe_output_width_y = next_offset -
+ stripe_offset_out_y;
+ } else {
+ stripe_output_width_y = output_width -
+ stripe_offset_out_y;
+ }
+
+ /* Calculate target output stripe width */
+ stripe_output_width_uv = stripe_output_width_y /
+ IMGU_LUMA_TO_CHROMA_RATIO;
+ /* Calculate input stripe width */
+ stripe_input_width_y = stripe_offset_col_y +
+ imgu_css_osys_calc_inp_stripe_width(
+ stripe_output_width_y,
+ IMGU_OSYS_FIR_PHASES,
+ stripe_phase_init_y,
+ scaler_luma->phase_step,
+ IMGU_OSYS_TAPS_Y,
+ stripe_pad_left_y,
+ stripe_pad_right_y);
+
+ stripe_input_width_uv = stripe_offset_col_uv +
+ imgu_css_osys_calc_inp_stripe_width(
+ stripe_output_width_uv,
+ IMGU_OSYS_FIR_PHASES,
+ stripe_phase_init_uv,
+ scaler_chroma->phase_step,
+ IMGU_OSYS_TAPS_UV,
+ stripe_pad_left_uv,
+ stripe_pad_right_uv);
+
+ stripe_input_width_uv = max(DIV_ROUND_UP(
+ stripe_input_width_y,
+ IMGU_LUMA_TO_CHROMA_RATIO),
+ stripe_input_width_uv);
+
+ stripe_input_width_y = stripe_input_width_uv *
+ IMGU_LUMA_TO_CHROMA_RATIO;
+
+ if (s >= stripes - 1) {
+ stripe_input_width_y = reso.input_width -
+ stripe_offset_blk_y;
+ /*
+ * The scaler requires that the last stripe
+ * spans at least two input blocks.
+ */
+ }
+
+ /*
+ * Spec: input stripe width must be a multiple of 8.
+ * Increase the input width and recalculate the output
+ * width. This may produce an extra column of junk
+ * blocks which will be overwritten by the
+ * next stripe.
+ */
+ stripe_input_width_y = ALIGN(stripe_input_width_y, 8);
+ stripe_output_width_y =
+ imgu_css_osys_out_stripe_width(
+ stripe_input_width_y,
+ IMGU_OSYS_FIR_PHASES,
+ stripe_phase_init_y,
+ scaler_luma->phase_step,
+ IMGU_OSYS_TAPS_Y,
+ stripe_pad_left_y,
+ stripe_pad_right_y,
+ stripe_offset_col_y);
+
+ stripe_output_width_y =
+ rounddown(stripe_output_width_y,
+ IMGU_LUMA_TO_CHROMA_RATIO);
+ }
+ /*
+ * Following section executes and process parameters
+ * for both cases - Striping or No Striping.
+ */
+ {
+ unsigned int i;
+ /*Input resolution */
+
+ stripe_params[s].input_width = stripe_input_width_y;
+ stripe_params[s].input_height = reso.input_height;
+
+ for (i = 0; i < IMGU_ABI_OSYS_PINS; i++) {
+ if (frame_params[i].scaled) {
+ /*
+ * Output stripe resolution and offset
+ * as produced by the scaler; actual
+ * output resolution may be slightly
+ * smaller.
+ */
+ stripe_params[s].output_width[i] =
+ stripe_output_width_y;
+ stripe_params[s].output_height[i] =
+ reso.pin_height[i];
+ stripe_params[s].output_offset[i] =
+ stripe_offset_out_y;
+ } else {
+ /* Unscaled pin */
+ stripe_params[s].output_width[i] =
+ stripe_params[s].input_width;
+ stripe_params[s].output_height[i] =
+ stripe_params[s].input_height;
+ stripe_params[s].output_offset[i] =
+ stripe_offset_blk_y;
+ }
+ }
+
+ /* If no pin use scale, we use BYPASS mode */
+ stripe_params[s].processing_mode = procmode;
+ stripe_params[s].phase_step = scaler_luma->phase_step;
+ stripe_params[s].exp_shift = scaler_luma->exp_shift;
+ stripe_params[s].phase_init_left_y =
+ stripe_phase_init_y;
+ stripe_params[s].phase_init_left_uv =
+ stripe_phase_init_uv;
+ stripe_params[s].phase_init_top_y =
+ scaler_luma->phase_init;
+ stripe_params[s].phase_init_top_uv =
+ scaler_chroma->phase_init;
+ stripe_params[s].pad_left_y = stripe_pad_left_y;
+ stripe_params[s].pad_left_uv = stripe_pad_left_uv;
+ stripe_params[s].pad_right_y = stripe_pad_right_y;
+ stripe_params[s].pad_right_uv = stripe_pad_right_uv;
+ stripe_params[s].pad_top_y = scaler_luma->pad_left;
+ stripe_params[s].pad_top_uv = scaler_chroma->pad_left;
+ stripe_params[s].pad_bottom_y = scaler_luma->pad_right;
+ stripe_params[s].pad_bottom_uv =
+ scaler_chroma->pad_right;
+ stripe_params[s].crop_left_y = stripe_crop_left_y;
+ stripe_params[s].crop_top_y = scaler_luma->crop_top;
+ stripe_params[s].crop_left_uv = stripe_crop_left_uv;
+ stripe_params[s].crop_top_uv = scaler_chroma->crop_top;
+ stripe_params[s].start_column_y = stripe_offset_col_y;
+ stripe_params[s].start_column_uv = stripe_offset_col_uv;
+ stripe_params[s].chunk_width = reso.chunk_width;
+ stripe_params[s].chunk_height = reso.chunk_height;
+ stripe_params[s].block_width = reso.block_width;
+ stripe_params[s].block_height = reso.block_height;
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * This function configures the Output Formatter System, given the number of
+ * stripes, scaler luma and chrome parameters
+ */
+static int imgu_css_osys_calc(struct imgu_css *css, unsigned int pipe,
+ unsigned int stripes,
+ struct imgu_abi_osys_config *osys,
+ struct imgu_css_scaler_info *scaler_luma,
+ struct imgu_css_scaler_info *scaler_chroma,
+ struct imgu_abi_stripes block_stripes[])
+{
+ struct imgu_css_frame_params frame_params[IMGU_ABI_OSYS_PINS];
+ struct imgu_css_stripe_params stripe_params[IPU3_UAPI_MAX_STRIPES];
+ struct imgu_abi_osys_formatter_params *param;
+ unsigned int pin, s;
+ struct imgu_css_pipe *css_pipe = &css->pipes[pipe];
+
+ memset(osys, 0, sizeof(*osys));
+
+ /* Compute the frame and stripe params */
+ if (imgu_css_osys_calc_frame_and_stripe_params(css, stripes, osys,
+ scaler_luma,
+ scaler_chroma,
+ frame_params,
+ stripe_params, pipe))
+ return -EINVAL;
+
+ /* Output formatter system parameters */
+
+ for (s = 0; s < stripes; s++) {
+ struct imgu_abi_osys_scaler_params *scaler =
+ &osys->scaler[s].param;
+ int fifo_addr_fmt = IMGU_FIFO_ADDR_SCALER_TO_FMT;
+ int fifo_addr_ack = IMGU_FIFO_ADDR_SCALER_TO_SP;
+
+ /* OUTPUT 0 / PIN 0 is only Scaler output */
+ scaler->inp_buf_y_st_addr = IMGU_VMEM1_INP_BUF_ADDR;
+
+ /*
+ * = (IMGU_OSYS_BLOCK_WIDTH / IMGU_VMEM1_ELEMS_PER_VEC)
+ * = (2 * IPU3_UAPI_ISP_VEC_ELEMS) /
+ * (IMGU_HIVE_OF_SYS_OF_SYSTEM_NWAYS)
+ * = 2 * 64 / 32 = 4
+ */
+ scaler->inp_buf_y_line_stride = IMGU_VMEM1_Y_STRIDE;
+ /*
+ * = (IMGU_VMEM1_V_OFFSET + VMEM1_uv_size)
+ * = (IMGU_VMEM1_U_OFFSET + VMEM1_uv_size) +
+ * (VMEM1_y_size / 4)
+ * = (VMEM1_y_size) + (VMEM1_y_size / 4) +
+ * (IMGU_OSYS_BLOCK_HEIGHT * IMGU_VMEM1_Y_STRIDE)/4
+ * = (IMGU_OSYS_BLOCK_HEIGHT * IMGU_VMEM1_Y_STRIDE)
+ */
+ scaler->inp_buf_y_buffer_stride = IMGU_VMEM1_BUF_SIZE;
+ scaler->inp_buf_u_st_addr = IMGU_VMEM1_INP_BUF_ADDR +
+ IMGU_VMEM1_U_OFFSET;
+ scaler->inp_buf_v_st_addr = IMGU_VMEM1_INP_BUF_ADDR +
+ IMGU_VMEM1_V_OFFSET;
+ scaler->inp_buf_uv_line_stride = IMGU_VMEM1_UV_STRIDE;
+ scaler->inp_buf_uv_buffer_stride = IMGU_VMEM1_BUF_SIZE;
+ scaler->inp_buf_chunk_width = stripe_params[s].chunk_width;
+ scaler->inp_buf_nr_buffers = IMGU_OSYS_NUM_INPUT_BUFFERS;
+
+ /* Output buffers */
+ scaler->out_buf_y_st_addr = IMGU_VMEM1_INT_BUF_ADDR;
+ scaler->out_buf_y_line_stride = stripe_params[s].block_width /
+ IMGU_VMEM1_ELEMS_PER_VEC;
+ scaler->out_buf_y_buffer_stride = IMGU_VMEM1_BUF_SIZE;
+ scaler->out_buf_u_st_addr = IMGU_VMEM1_INT_BUF_ADDR +
+ IMGU_VMEM1_U_OFFSET;
+ scaler->out_buf_v_st_addr = IMGU_VMEM1_INT_BUF_ADDR +
+ IMGU_VMEM1_V_OFFSET;
+ scaler->out_buf_uv_line_stride = stripe_params[s].block_width /
+ IMGU_VMEM1_ELEMS_PER_VEC / 2;
+ scaler->out_buf_uv_buffer_stride = IMGU_VMEM1_BUF_SIZE;
+ scaler->out_buf_nr_buffers = IMGU_OSYS_NUM_INTERM_BUFFERS;
+
+ /* Intermediate buffers */
+ scaler->int_buf_y_st_addr = IMGU_VMEM2_BUF_Y_ADDR;
+ scaler->int_buf_y_line_stride = IMGU_VMEM2_BUF_Y_STRIDE;
+ scaler->int_buf_u_st_addr = IMGU_VMEM2_BUF_U_ADDR;
+ scaler->int_buf_v_st_addr = IMGU_VMEM2_BUF_V_ADDR;
+ scaler->int_buf_uv_line_stride = IMGU_VMEM2_BUF_UV_STRIDE;
+ scaler->int_buf_height = IMGU_VMEM2_LINES_PER_BLOCK;
+ scaler->int_buf_chunk_width = stripe_params[s].chunk_height;
+ scaler->int_buf_chunk_height = stripe_params[s].block_width;
+
+ /* Context buffers */
+ scaler->ctx_buf_hor_y_st_addr = IMGU_VMEM3_HOR_Y_ADDR;
+ scaler->ctx_buf_hor_u_st_addr = IMGU_VMEM3_HOR_U_ADDR;
+ scaler->ctx_buf_hor_v_st_addr = IMGU_VMEM3_HOR_V_ADDR;
+ scaler->ctx_buf_ver_y_st_addr = IMGU_VMEM3_VER_Y_ADDR;
+ scaler->ctx_buf_ver_u_st_addr = IMGU_VMEM3_VER_U_ADDR;
+ scaler->ctx_buf_ver_v_st_addr = IMGU_VMEM3_VER_V_ADDR;
+
+ /* Addresses for release-input and process-output tokens */
+ scaler->release_inp_buf_addr = fifo_addr_ack;
+ scaler->release_inp_buf_en = 1;
+ scaler->release_out_buf_en = 1;
+ scaler->process_out_buf_addr = fifo_addr_fmt;
+
+ /* Settings dimensions, padding, cropping */
+ scaler->input_image_y_width = stripe_params[s].input_width;
+ scaler->input_image_y_height = stripe_params[s].input_height;
+ scaler->input_image_y_start_column =
+ stripe_params[s].start_column_y;
+ scaler->input_image_uv_start_column =
+ stripe_params[s].start_column_uv;
+ scaler->input_image_y_left_pad = stripe_params[s].pad_left_y;
+ scaler->input_image_uv_left_pad = stripe_params[s].pad_left_uv;
+ scaler->input_image_y_right_pad = stripe_params[s].pad_right_y;
+ scaler->input_image_uv_right_pad =
+ stripe_params[s].pad_right_uv;
+ scaler->input_image_y_top_pad = stripe_params[s].pad_top_y;
+ scaler->input_image_uv_top_pad = stripe_params[s].pad_top_uv;
+ scaler->input_image_y_bottom_pad =
+ stripe_params[s].pad_bottom_y;
+ scaler->input_image_uv_bottom_pad =
+ stripe_params[s].pad_bottom_uv;
+ scaler->processing_mode = stripe_params[s].processing_mode;
+ scaler->scaling_ratio = stripe_params[s].phase_step;
+ scaler->y_left_phase_init = stripe_params[s].phase_init_left_y;
+ scaler->uv_left_phase_init =
+ stripe_params[s].phase_init_left_uv;
+ scaler->y_top_phase_init = stripe_params[s].phase_init_top_y;
+ scaler->uv_top_phase_init = stripe_params[s].phase_init_top_uv;
+ scaler->coeffs_exp_shift = stripe_params[s].exp_shift;
+ scaler->out_y_left_crop = stripe_params[s].crop_left_y;
+ scaler->out_uv_left_crop = stripe_params[s].crop_left_uv;
+ scaler->out_y_top_crop = stripe_params[s].crop_top_y;
+ scaler->out_uv_top_crop = stripe_params[s].crop_top_uv;
+
+ for (pin = 0; pin < IMGU_ABI_OSYS_PINS; pin++) {
+ int in_fifo_addr;
+ int out_fifo_addr;
+ int block_width_vecs;
+ int input_width_s;
+ int input_width_vecs;
+ int input_buf_y_st_addr;
+ int input_buf_u_st_addr;
+ int input_buf_v_st_addr;
+ int input_buf_y_line_stride;
+ int input_buf_uv_line_stride;
+ int output_buf_y_line_stride;
+ int output_buf_uv_line_stride;
+ int output_buf_nr_y_lines;
+ int block_height;
+ int block_width;
+ struct imgu_abi_osys_frame_params *fr_pr;
+
+ fr_pr = &osys->frame[pin].param;
+
+ /* Frame parameters */
+ fr_pr->enable = frame_params[pin].enable;
+ fr_pr->format = frame_params[pin].format;
+ fr_pr->mirror = frame_params[pin].mirror;
+ fr_pr->flip = frame_params[pin].flip;
+ fr_pr->tiling = frame_params[pin].tiling;
+ fr_pr->width = frame_params[pin].width;
+ fr_pr->height = frame_params[pin].height;
+ fr_pr->stride = frame_params[pin].stride;
+ fr_pr->scaled = frame_params[pin].scaled;
+
+ /* Stripe parameters */
+ osys->stripe[s].crop_top[pin] =
+ frame_params[pin].crop_top;
+ osys->stripe[s].input_width =
+ stripe_params[s].input_width;
+ osys->stripe[s].input_height =
+ stripe_params[s].input_height;
+ osys->stripe[s].block_height =
+ stripe_params[s].block_height;
+ osys->stripe[s].block_width =
+ stripe_params[s].block_width;
+ osys->stripe[s].output_width[pin] =
+ stripe_params[s].output_width[pin];
+ osys->stripe[s].output_height[pin] =
+ stripe_params[s].output_height[pin];
+
+ if (s == 0) {
+ /* Only first stripe should do left cropping */
+ osys->stripe[s].crop_left[pin] =
+ frame_params[pin].crop_left;
+ osys->stripe[s].output_offset[pin] =
+ stripe_params[s].output_offset[pin];
+ } else {
+ /*
+ * Stripe offset for other strips should be
+ * adjusted according to the cropping done
+ * at the first strip
+ */
+ osys->stripe[s].crop_left[pin] = 0;
+ osys->stripe[s].output_offset[pin] =
+ (stripe_params[s].output_offset[pin] -
+ osys->stripe[0].crop_left[pin]);
+ }
+
+ if (!frame_params[pin].enable)
+ continue;
+
+ /* Formatter: configurations */
+
+ /*
+ * Get the dimensions of the input blocks of the
+ * formatter, which is the same as the output
+ * blocks of the scaler.
+ */
+ if (frame_params[pin].scaled) {
+ block_height = stripe_params[s].block_height;
+ block_width = stripe_params[s].block_width;
+ } else {
+ block_height = IMGU_OSYS_BLOCK_HEIGHT;
+ block_width = IMGU_OSYS_BLOCK_WIDTH;
+ }
+ block_width_vecs =
+ block_width / IMGU_VMEM1_ELEMS_PER_VEC;
+ /*
+ * The input/output line stride depends on the
+ * block size.
+ */
+ input_buf_y_line_stride = block_width_vecs;
+ input_buf_uv_line_stride = block_width_vecs / 2;
+ output_buf_y_line_stride = block_width_vecs;
+ output_buf_uv_line_stride = block_width_vecs / 2;
+ output_buf_nr_y_lines = block_height;
+ if (frame_params[pin].format ==
+ IMGU_ABI_OSYS_FORMAT_NV12 ||
+ frame_params[pin].format ==
+ IMGU_ABI_OSYS_FORMAT_NV21)
+ output_buf_uv_line_stride =
+ output_buf_y_line_stride;
+
+ /*
+ * Tiled outputs use a different output buffer
+ * configuration. The input (= scaler output) block
+ * width translates to a tile height, and the block
+ * height to the tile width. The default block size of
+ * 128x32 maps exactly onto a 4kB tile (512x8) for Y.
+ * For UV, the tile width is always half.
+ */
+ if (frame_params[pin].tiling) {
+ output_buf_nr_y_lines = 8;
+ output_buf_y_line_stride = 512 /
+ IMGU_VMEM1_ELEMS_PER_VEC;
+ output_buf_uv_line_stride = 256 /
+ IMGU_VMEM1_ELEMS_PER_VEC;
+ }
+
+ /*
+ * Store the output buffer line stride. Will be
+ * used to compute buffer offsets in boundary
+ * conditions when output blocks are partially
+ * outside the image.
+ */
+ osys->stripe[s].buf_stride[pin] =
+ output_buf_y_line_stride *
+ IMGU_HIVE_OF_SYS_OF_SYSTEM_NWAYS;
+ if (frame_params[pin].scaled) {
+ /*
+ * The input buffs are the intermediate
+ * buffers (scalers' output)
+ */
+ input_buf_y_st_addr = IMGU_VMEM1_INT_BUF_ADDR;
+ input_buf_u_st_addr = IMGU_VMEM1_INT_BUF_ADDR +
+ IMGU_VMEM1_U_OFFSET;
+ input_buf_v_st_addr = IMGU_VMEM1_INT_BUF_ADDR +
+ IMGU_VMEM1_V_OFFSET;
+ } else {
+ /*
+ * The input bufferss are the buffers
+ * filled by the SP
+ */
+ input_buf_y_st_addr = IMGU_VMEM1_INP_BUF_ADDR;
+ input_buf_u_st_addr = IMGU_VMEM1_INP_BUF_ADDR +
+ IMGU_VMEM1_U_OFFSET;
+ input_buf_v_st_addr = IMGU_VMEM1_INP_BUF_ADDR +
+ IMGU_VMEM1_V_OFFSET;
+ }
+
+ /*
+ * The formatter input width must be rounded to
+ * the block width. Otherwise the formatter will
+ * not recognize the end of the line, resulting
+ * in incorrect tiling (system may hang!) and
+ * possibly other problems.
+ */
+ input_width_s =
+ roundup(stripe_params[s].output_width[pin],
+ block_width);
+ input_width_vecs = input_width_s /
+ IMGU_VMEM1_ELEMS_PER_VEC;
+ out_fifo_addr = IMGU_FIFO_ADDR_FMT_TO_SP;
+ /*
+ * Process-output tokens must be sent to the SP.
+ * When scaling, the release-input tokens can be
+ * sent directly to the scaler, otherwise the
+ * formatter should send them to the SP.
+ */
+ if (frame_params[pin].scaled)
+ in_fifo_addr = IMGU_FIFO_ADDR_FMT_TO_SCALER;
+ else
+ in_fifo_addr = IMGU_FIFO_ADDR_FMT_TO_SP;
+
+ /* Formatter */
+ param = &osys->formatter[s][pin].param;
+
+ param->format = frame_params[pin].format;
+ param->flip = frame_params[pin].flip;
+ param->mirror = frame_params[pin].mirror;
+ param->tiling = frame_params[pin].tiling;
+ param->reduce_range = frame_params[pin].reduce_range;
+ param->alpha_blending = 0;
+ param->release_inp_addr = in_fifo_addr;
+ param->release_inp_en = 1;
+ param->process_out_buf_addr = out_fifo_addr;
+ param->image_width_vecs = input_width_vecs;
+ param->image_height_lines =
+ stripe_params[s].output_height[pin];
+ param->inp_buff_y_st_addr = input_buf_y_st_addr;
+ param->inp_buff_y_line_stride = input_buf_y_line_stride;
+ param->inp_buff_y_buffer_stride = IMGU_VMEM1_BUF_SIZE;
+ param->int_buff_u_st_addr = input_buf_u_st_addr;
+ param->int_buff_v_st_addr = input_buf_v_st_addr;
+ param->inp_buff_uv_line_stride =
+ input_buf_uv_line_stride;
+ param->inp_buff_uv_buffer_stride = IMGU_VMEM1_BUF_SIZE;
+ param->out_buff_level = 0;
+ param->out_buff_nr_y_lines = output_buf_nr_y_lines;
+ param->out_buff_u_st_offset = IMGU_VMEM1_U_OFFSET;
+ param->out_buff_v_st_offset = IMGU_VMEM1_V_OFFSET;
+ param->out_buff_y_line_stride =
+ output_buf_y_line_stride;
+ param->out_buff_uv_line_stride =
+ output_buf_uv_line_stride;
+ param->hist_buff_st_addr = IMGU_VMEM1_HST_BUF_ADDR;
+ param->hist_buff_line_stride =
+ IMGU_VMEM1_HST_BUF_STRIDE;
+ param->hist_buff_nr_lines = IMGU_VMEM1_HST_BUF_NLINES;
+ }
+ }
+
+ block_stripes[0].offset = 0;
+ if (stripes <= 1) {
+ block_stripes[0].width = stripe_params[0].input_width;
+ block_stripes[0].height = stripe_params[0].input_height;
+ } else {
+ struct imgu_fw_info *bi =
+ &css->fwp->binary_header[css_pipe->bindex];
+ unsigned int sp_block_width =
+ bi->info.isp.sp.block.block_width *
+ IPU3_UAPI_ISP_VEC_ELEMS;
+
+ block_stripes[0].width = roundup(stripe_params[0].input_width,
+ sp_block_width);
+ block_stripes[1].offset =
+ rounddown(css_pipe->rect[IPU3_CSS_RECT_GDC].width -
+ stripe_params[1].input_width, sp_block_width);
+ block_stripes[1].width =
+ roundup(css_pipe->rect[IPU3_CSS_RECT_GDC].width -
+ block_stripes[1].offset, sp_block_width);
+ block_stripes[0].height = css_pipe->rect[IPU3_CSS_RECT_GDC].height;
+ block_stripes[1].height = block_stripes[0].height;
+ }
+
+ return 0;
+}
+
+/*********************** Mostly 3A operations ******************************/
+
+/*
+ * This function creates a "TO-DO list" (operations) for the sp code.
+ *
+ * There are 2 types of operations:
+ * 1. Transfer: Issue DMA transfer request for copying grid cells from DDR to
+ * accelerator space (NOTE that this space is limited) associated data:
+ * DDR address + accelerator's config set index(acc's address).
+ *
+ * 2. Issue "Process Lines Command" to shd accelerator
+ * associated data: #lines + which config set to use (actually, accelerator
+ * will use x AND (x+1)%num_of_sets - NOTE that this implies the restriction
+ * of not touching config sets x & (x+1)%num_of_sets when process_lines(x)
+ * is active).
+ *
+ * Basically there are 2 types of operations "chunks":
+ * 1. "initial chunk": Initially, we do as much transfers as we can (and need)
+ * [0 - max sets(3) ] followed by 1 or 2 "process lines" operations.
+ *
+ * 2. "regular chunk" - 1 transfer followed by 1 process line operation.
+ * (in some cases we might need additional transfer ate the last chunk).
+ *
+ * for some case:
+ * --> init
+ * tr (0)
+ * tr (1)
+ * tr (2)
+ * pl (0)
+ * pl (1)
+ * --> ack (0)
+ * tr (3)
+ * pl (2)
+ * --> ack (1)
+ * pl (3)
+ * --> ack (2)
+ * do nothing
+ * --> ack (3)
+ * do nothing
+ */
+
+static int
+imgu_css_shd_ops_calc(struct imgu_abi_shd_intra_frame_operations_data *ops,
+ const struct ipu3_uapi_shd_grid_config *grid,
+ unsigned int image_height)
+{
+ unsigned int block_height = 1 << grid->block_height_log2;
+ unsigned int grid_height_per_slice = grid->grid_height_per_slice;
+ unsigned int set_height = grid_height_per_slice * block_height;
+
+ /* We currently support only abs(y_start) > grid_height_per_slice */
+ unsigned int positive_y_start = (unsigned int)-grid->y_start;
+ unsigned int first_process_lines =
+ set_height - (positive_y_start % set_height);
+ unsigned int last_set_height;
+ unsigned int num_of_sets;
+
+ struct imgu_abi_acc_operation *p_op;
+ struct imgu_abi_acc_process_lines_cmd_data *p_pl;
+ struct imgu_abi_shd_transfer_luts_set_data *p_tr;
+
+ unsigned int op_idx, pl_idx, tr_idx;
+ unsigned char tr_set_num, pl_cfg_set;
+
+ /*
+ * When the number of lines for the last process lines command
+ * is equal to a set height, we need another line of grid cell -
+ * additional transfer is required.
+ */
+ unsigned char last_tr = 0;
+
+ /* Add "process lines" command to the list of operations */
+ bool add_pl;
+ /* Add DMA xfer (config set) command to the list of ops */
+ bool add_tr;
+
+ /*
+ * Available partial grid (the part that fits into #IMGU_SHD_SETS sets)
+ * doesn't cover whole frame - need to process in chunks
+ */
+ if (image_height > first_process_lines) {
+ last_set_height =
+ (image_height - first_process_lines) % set_height;
+ num_of_sets = last_set_height > 0 ?
+ (image_height - first_process_lines) / set_height + 2 :
+ (image_height - first_process_lines) / set_height + 1;
+ last_tr = (set_height - last_set_height <= block_height ||
+ last_set_height == 0) ? 1 : 0;
+ } else { /* partial grid covers whole frame */
+ last_set_height = 0;
+ num_of_sets = 1;
+ first_process_lines = image_height;
+ last_tr = set_height - image_height <= block_height ? 1 : 0;
+ }
+
+ /* Init operations lists and counters */
+ p_op = ops->operation_list;
+ op_idx = 0;
+ p_pl = ops->process_lines_data;
+ pl_idx = 0;
+ p_tr = ops->transfer_data;
+ tr_idx = 0;
+
+ memset(ops, 0, sizeof(*ops));
+
+ /* Cyclic counters that holds config set number [0,IMGU_SHD_SETS) */
+ tr_set_num = 0;
+ pl_cfg_set = 0;
+
+ /*
+ * Always start with a transfer - process lines command must be
+ * initiated only after appropriate config sets are in place
+ * (2 configuration sets per process line command, except for last one).
+ */
+ add_pl = false;
+ add_tr = true;
+
+ while (add_pl || add_tr) {
+ /* Transfer ops */
+ if (add_tr) {
+ if (op_idx >= IMGU_ABI_SHD_MAX_OPERATIONS ||
+ tr_idx >= IMGU_ABI_SHD_MAX_TRANSFERS)
+ return -EINVAL;
+ p_op[op_idx].op_type =
+ IMGU_ABI_ACC_OPTYPE_TRANSFER_DATA;
+ p_op[op_idx].op_indicator = IMGU_ABI_ACC_OP_IDLE;
+ op_idx++;
+ p_tr[tr_idx].set_number = tr_set_num;
+ tr_idx++;
+ tr_set_num = (tr_set_num + 1) % IMGU_SHD_SETS;
+ }
+
+ /* Process-lines ops */
+ if (add_pl) {
+ if (op_idx >= IMGU_ABI_SHD_MAX_OPERATIONS ||
+ pl_idx >= IMGU_ABI_SHD_MAX_PROCESS_LINES)
+ return -EINVAL;
+ p_op[op_idx].op_type =
+ IMGU_ABI_ACC_OPTYPE_PROCESS_LINES;
+
+ /*
+ * In case we have 2 process lines commands -
+ * don't stop after the first one
+ */
+ if (pl_idx == 0 && num_of_sets != 1)
+ p_op[op_idx].op_indicator =
+ IMGU_ABI_ACC_OP_IDLE;
+ /*
+ * Initiate last process lines command -
+ * end of operation list.
+ */
+ else if (pl_idx == num_of_sets - 1)
+ p_op[op_idx].op_indicator =
+ IMGU_ABI_ACC_OP_END_OF_OPS;
+ /*
+ * Intermediate process line command - end of operation
+ * "chunk" (meaning few "transfers" followed by few
+ * "process lines" commands).
+ */
+ else
+ p_op[op_idx].op_indicator =
+ IMGU_ABI_ACC_OP_END_OF_ACK;
+
+ op_idx++;
+
+ /* first process line operation */
+ if (pl_idx == 0)
+ p_pl[pl_idx].lines = first_process_lines;
+ /* Last process line operation */
+ else if (pl_idx == num_of_sets - 1 &&
+ last_set_height > 0)
+ p_pl[pl_idx].lines = last_set_height;
+ else /* "regular" process lines operation */
+ p_pl[pl_idx].lines = set_height;
+
+ p_pl[pl_idx].cfg_set = pl_cfg_set;
+ pl_idx++;
+ pl_cfg_set = (pl_cfg_set + 1) % IMGU_SHD_SETS;
+ }
+
+ /*
+ * Initially, we always transfer
+ * min(IMGU_SHD_SETS, num_of_sets) - after that we fill in the
+ * corresponding process lines commands.
+ */
+ if (tr_idx == IMGU_SHD_SETS ||
+ tr_idx == num_of_sets + last_tr) {
+ add_tr = false;
+ add_pl = true;
+ }
+
+ /*
+ * We have finished the "initial" operations chunk -
+ * be ready to get more chunks.
+ */
+ if (pl_idx == 2) {
+ add_tr = true;
+ add_pl = true;
+ }
+
+ /* Stop conditions for each operation type */
+ if (tr_idx == num_of_sets + last_tr)
+ add_tr = false;
+ if (pl_idx == num_of_sets)
+ add_pl = false;
+ }
+
+ return 0;
+}
+
+/*
+ * The following handshake protocol is the same for AF, AWB and AWB FR.
+ *
+ * for n sets of meta-data, the flow is:
+ * --> init
+ * process-lines (0)
+ * process-lines (1) eoc
+ * --> ack (0)
+ * read-meta-data (0)
+ * process-lines (2) eoc
+ * --> ack (1)
+ * read-meta-data (1)
+ * process-lines (3) eoc
+ * ...
+ *
+ * --> ack (n-3)
+ * read-meta-data (n-3)
+ * process-lines (n-1) eoc
+ * --> ack (n-2)
+ * read-meta-data (n-2) eoc
+ * --> ack (n-1)
+ * read-meta-data (n-1) eof
+ *
+ * for 2 sets we get:
+ * --> init
+ * pl (0)
+ * pl (1) eoc
+ * --> ack (0)
+ * pl (2) - rest of image, if applicable)
+ * rmd (0) eoc
+ * --> ack (1)
+ * rmd (1) eof
+ * --> (ack (2))
+ * do nothing
+ *
+ * for only one set:
+ *
+ * --> init
+ * pl(0) eoc
+ * --> ack (0)
+ * rmd (0) eof
+ *
+ * grid smaller than image case
+ * for example 128x128 grid (block size 8x8, 16x16 num of blocks)
+ * start at (0,0)
+ * 1st set holds 160 cells - 10 blocks vertical, 16 horizontal
+ * => 1st process lines = 80
+ * we're left with 128-80=48 lines (6 blocks vertical)
+ * => 2nd process lines = 48
+ * last process lines to cover the image - image_height - 128
+ *
+ * --> init
+ * pl (0) first
+ * pl (1) last-in-grid
+ * --> ack (0)
+ * rmd (0)
+ * pl (2) after-grid
+ * --> ack (1)
+ * rmd (1) eof
+ * --> ack (2)
+ * do nothing
+ */
+struct process_lines {
+ unsigned int image_height;
+ unsigned short grid_height;
+ unsigned short block_height;
+ unsigned short y_start;
+ unsigned char grid_height_per_slice;
+
+ unsigned short max_op; /* max operation */
+ unsigned short max_tr; /* max transaction */
+ unsigned char acc_enable;
+};
+
+/* Helper to config intra_frame_operations_data. */
+static int
+imgu_css_acc_process_lines(const struct process_lines *pl,
+ struct imgu_abi_acc_operation *p_op,
+ struct imgu_abi_acc_process_lines_cmd_data *p_pl,
+ struct imgu_abi_acc_transfer_op_data *p_tr)
+{
+ unsigned short op_idx = 0, pl_idx = 0, tr_idx = 0;
+ unsigned char tr_set_num = 0, pl_cfg_set = 0;
+ const unsigned short grid_last_line =
+ pl->y_start + pl->grid_height * pl->block_height;
+ const unsigned short process_lines =
+ pl->grid_height_per_slice * pl->block_height;
+
+ unsigned int process_lines_after_grid;
+ unsigned short first_process_lines;
+ unsigned short last_process_lines_in_grid;
+
+ unsigned short num_of_process_lines;
+ unsigned short num_of_sets;
+
+ if (pl->grid_height_per_slice == 0)
+ return -EINVAL;
+
+ if (pl->acc_enable && grid_last_line > pl->image_height)
+ return -EINVAL;
+
+ num_of_sets = pl->grid_height / pl->grid_height_per_slice;
+ if (num_of_sets * pl->grid_height_per_slice < pl->grid_height)
+ num_of_sets++;
+
+ /* Account for two line delay inside the FF */
+ if (pl->max_op == IMGU_ABI_AF_MAX_OPERATIONS) {
+ first_process_lines = process_lines + pl->y_start + 2;
+ last_process_lines_in_grid =
+ (grid_last_line - first_process_lines) -
+ ((num_of_sets - 2) * process_lines) + 4;
+ process_lines_after_grid =
+ pl->image_height - grid_last_line - 4;
+ } else {
+ first_process_lines = process_lines + pl->y_start;
+ last_process_lines_in_grid =
+ (grid_last_line - first_process_lines) -
+ ((num_of_sets - 2) * process_lines);
+ process_lines_after_grid = pl->image_height - grid_last_line;
+ }
+
+ num_of_process_lines = num_of_sets;
+ if (process_lines_after_grid > 0)
+ num_of_process_lines++;
+
+ while (tr_idx < num_of_sets || pl_idx < num_of_process_lines) {
+ /* Read meta-data */
+ if (pl_idx >= 2 || (pl_idx == 1 && num_of_sets == 1)) {
+ if (op_idx >= pl->max_op || tr_idx >= pl->max_tr)
+ return -EINVAL;
+
+ p_op[op_idx].op_type =
+ IMGU_ABI_ACC_OPTYPE_TRANSFER_DATA;
+
+ if (tr_idx == num_of_sets - 1)
+ /* The last operation is always a tr */
+ p_op[op_idx].op_indicator =
+ IMGU_ABI_ACC_OP_END_OF_OPS;
+ else if (tr_idx == num_of_sets - 2)
+ if (process_lines_after_grid == 0)
+ /*
+ * No additional pl op left -
+ * this op is left as lats of cycle
+ */
+ p_op[op_idx].op_indicator =
+ IMGU_ABI_ACC_OP_END_OF_ACK;
+ else
+ /*
+ * We still have to process-lines after
+ * the grid so have one more pl op
+ */
+ p_op[op_idx].op_indicator =
+ IMGU_ABI_ACC_OP_IDLE;
+ else
+ /* Default - usually there's a pl after a tr */
+ p_op[op_idx].op_indicator =
+ IMGU_ABI_ACC_OP_IDLE;
+
+ op_idx++;
+ if (p_tr) {
+ p_tr[tr_idx].set_number = tr_set_num;
+ tr_set_num = 1 - tr_set_num;
+ }
+ tr_idx++;
+ }
+
+ /* process_lines */
+ if (pl_idx < num_of_process_lines) {
+ if (op_idx >= pl->max_op || pl_idx >= pl->max_tr)
+ return -EINVAL;
+
+ p_op[op_idx].op_type =
+ IMGU_ABI_ACC_OPTYPE_PROCESS_LINES;
+ if (pl_idx == 0)
+ if (num_of_process_lines == 1)
+ /* Only one pl op */
+ p_op[op_idx].op_indicator =
+ IMGU_ABI_ACC_OP_END_OF_ACK;
+ else
+ /* On init - do two pl ops */
+ p_op[op_idx].op_indicator =
+ IMGU_ABI_ACC_OP_IDLE;
+ else
+ /* Usually pl is the end of the ack cycle */
+ p_op[op_idx].op_indicator =
+ IMGU_ABI_ACC_OP_END_OF_ACK;
+
+ op_idx++;
+
+ if (pl_idx == 0)
+ /* First process line */
+ p_pl[pl_idx].lines = first_process_lines;
+ else if (pl_idx == num_of_sets - 1)
+ /* Last in grid */
+ p_pl[pl_idx].lines = last_process_lines_in_grid;
+ else if (pl_idx == num_of_process_lines - 1)
+ /* After the grid */
+ p_pl[pl_idx].lines = process_lines_after_grid;
+ else
+ /* Inside the grid */
+ p_pl[pl_idx].lines = process_lines;
+
+ if (p_tr) {
+ p_pl[pl_idx].cfg_set = pl_cfg_set;
+ pl_cfg_set = 1 - pl_cfg_set;
+ }
+ pl_idx++;
+ }
+ }
+
+ return 0;
+}
+
+static int imgu_css_af_ops_calc(struct imgu_css *css, unsigned int pipe,
+ struct imgu_abi_af_config *af_config)
+{
+ struct imgu_abi_af_intra_frame_operations_data *to =
+ &af_config->operations_data;
+ struct imgu_css_pipe *css_pipe = &css->pipes[pipe];
+ struct imgu_fw_info *bi =
+ &css->fwp->binary_header[css_pipe->bindex];
+
+ struct process_lines pl = {
+ .image_height = css_pipe->rect[IPU3_CSS_RECT_BDS].height,
+ .grid_height = af_config->config.grid_cfg.height,
+ .block_height =
+ 1 << af_config->config.grid_cfg.block_height_log2,
+ .y_start = af_config->config.grid_cfg.y_start &
+ IPU3_UAPI_GRID_START_MASK,
+ .grid_height_per_slice =
+ af_config->stripes[0].grid_cfg.height_per_slice,
+ .max_op = IMGU_ABI_AF_MAX_OPERATIONS,
+ .max_tr = IMGU_ABI_AF_MAX_TRANSFERS,
+ .acc_enable = bi->info.isp.sp.enable.af,
+ };
+
+ return imgu_css_acc_process_lines(&pl, to->ops, to->process_lines_data,
+ NULL);
+}
+
+static int
+imgu_css_awb_fr_ops_calc(struct imgu_css *css, unsigned int pipe,
+ struct imgu_abi_awb_fr_config *awb_fr_config)
+{
+ struct imgu_abi_awb_fr_intra_frame_operations_data *to =
+ &awb_fr_config->operations_data;
+ struct imgu_css_pipe *css_pipe = &css->pipes[pipe];
+ struct imgu_fw_info *bi =
+ &css->fwp->binary_header[css_pipe->bindex];
+ struct process_lines pl = {
+ .image_height = css_pipe->rect[IPU3_CSS_RECT_BDS].height,
+ .grid_height = awb_fr_config->config.grid_cfg.height,
+ .block_height =
+ 1 << awb_fr_config->config.grid_cfg.block_height_log2,
+ .y_start = awb_fr_config->config.grid_cfg.y_start &
+ IPU3_UAPI_GRID_START_MASK,
+ .grid_height_per_slice =
+ awb_fr_config->stripes[0].grid_cfg.height_per_slice,
+ .max_op = IMGU_ABI_AWB_FR_MAX_OPERATIONS,
+ .max_tr = IMGU_ABI_AWB_FR_MAX_PROCESS_LINES,
+ .acc_enable = bi->info.isp.sp.enable.awb_fr_acc,
+ };
+
+ return imgu_css_acc_process_lines(&pl, to->ops, to->process_lines_data,
+ NULL);
+}
+
+static int imgu_css_awb_ops_calc(struct imgu_css *css, unsigned int pipe,
+ struct imgu_abi_awb_config *awb_config)
+{
+ struct imgu_abi_awb_intra_frame_operations_data *to =
+ &awb_config->operations_data;
+ struct imgu_css_pipe *css_pipe = &css->pipes[pipe];
+ struct imgu_fw_info *bi =
+ &css->fwp->binary_header[css_pipe->bindex];
+
+ struct process_lines pl = {
+ .image_height = css_pipe->rect[IPU3_CSS_RECT_BDS].height,
+ .grid_height = awb_config->config.grid.height,
+ .block_height =
+ 1 << awb_config->config.grid.block_height_log2,
+ .y_start = awb_config->config.grid.y_start,
+ .grid_height_per_slice =
+ awb_config->stripes[0].grid.height_per_slice,
+ .max_op = IMGU_ABI_AWB_MAX_OPERATIONS,
+ .max_tr = IMGU_ABI_AWB_MAX_TRANSFERS,
+ .acc_enable = bi->info.isp.sp.enable.awb_acc,
+ };
+
+ return imgu_css_acc_process_lines(&pl, to->ops, to->process_lines_data,
+ to->transfer_data);
+}
+
+static u16 imgu_css_grid_end(u16 start, u8 width, u8 block_width_log2)
+{
+ return (start & IPU3_UAPI_GRID_START_MASK) +
+ (width << block_width_log2) - 1;
+}
+
+static void imgu_css_grid_end_calc(struct ipu3_uapi_grid_config *grid_cfg)
+{
+ grid_cfg->x_end = imgu_css_grid_end(grid_cfg->x_start, grid_cfg->width,
+ grid_cfg->block_width_log2);
+ grid_cfg->y_end = imgu_css_grid_end(grid_cfg->y_start, grid_cfg->height,
+ grid_cfg->block_height_log2);
+}
+
+/****************** config computation *****************************/
+
+static int imgu_css_cfg_acc_stripe(struct imgu_css *css, unsigned int pipe,
+ struct imgu_abi_acc_param *acc)
+{
+ struct imgu_css_pipe *css_pipe = &css->pipes[pipe];
+ const struct imgu_fw_info *bi =
+ &css->fwp->binary_header[css_pipe->bindex];
+ struct imgu_css_scaler_info scaler_luma, scaler_chroma;
+ const unsigned int stripes = bi->info.isp.sp.iterator.num_stripes;
+ const unsigned int f = IPU3_UAPI_ISP_VEC_ELEMS * 2;
+ unsigned int bds_ds, i;
+
+ memset(acc, 0, sizeof(*acc));
+
+ /* acc_param: osys_config */
+
+ if (imgu_css_osys_calc(css, pipe, stripes, &acc->osys, &scaler_luma,
+ &scaler_chroma, acc->stripe.block_stripes))
+ return -EINVAL;
+
+ /* acc_param: stripe data */
+
+ /*
+ * For the striped case the approach is as follows:
+ * 1. down-scaled stripes are calculated - with 128 overlap
+ * (this is the main limiter therefore it's first)
+ * 2. input stripes are derived by up-scaling the down-scaled stripes
+ * (there are no alignment requirements on input stripes)
+ * 3. output stripes are derived from down-scaled stripes too
+ */
+
+ acc->stripe.num_of_stripes = stripes;
+ acc->stripe.input_frame.width =
+ css_pipe->queue[IPU3_CSS_QUEUE_IN].fmt.mpix.width;
+ acc->stripe.input_frame.height =
+ css_pipe->queue[IPU3_CSS_QUEUE_IN].fmt.mpix.height;
+ acc->stripe.input_frame.bayer_order =
+ css_pipe->queue[IPU3_CSS_QUEUE_IN].css_fmt->bayer_order;
+
+ for (i = 0; i < stripes; i++)
+ acc->stripe.bds_out_stripes[i].height =
+ css_pipe->rect[IPU3_CSS_RECT_BDS].height;
+ acc->stripe.bds_out_stripes[0].offset = 0;
+ if (stripes <= 1) {
+ acc->stripe.bds_out_stripes[0].width =
+ ALIGN(css_pipe->rect[IPU3_CSS_RECT_BDS].width, f);
+ } else {
+ /* Image processing is divided into two stripes */
+ acc->stripe.bds_out_stripes[0].width =
+ acc->stripe.bds_out_stripes[1].width =
+ (css_pipe->rect[IPU3_CSS_RECT_BDS].width / 2 & ~(f - 1)) + f;
+ /*
+ * Sum of width of the two stripes should not be smaller
+ * than output width and must be even times of overlapping
+ * unit f.
+ */
+ if ((css_pipe->rect[IPU3_CSS_RECT_BDS].width / f & 1) !=
+ !!(css_pipe->rect[IPU3_CSS_RECT_BDS].width & (f - 1)))
+ acc->stripe.bds_out_stripes[0].width += f;
+ if ((css_pipe->rect[IPU3_CSS_RECT_BDS].width / f & 1) &&
+ (css_pipe->rect[IPU3_CSS_RECT_BDS].width & (f - 1))) {
+ acc->stripe.bds_out_stripes[0].width += f;
+ acc->stripe.bds_out_stripes[1].width += f;
+ }
+ /* Overlap between stripes is IPU3_UAPI_ISP_VEC_ELEMS * 4 */
+ acc->stripe.bds_out_stripes[1].offset =
+ acc->stripe.bds_out_stripes[0].width - 2 * f;
+ }
+
+ acc->stripe.effective_stripes[0].height =
+ css_pipe->rect[IPU3_CSS_RECT_EFFECTIVE].height;
+ acc->stripe.effective_stripes[0].offset = 0;
+ acc->stripe.bds_out_stripes_no_overlap[0].height =
+ css_pipe->rect[IPU3_CSS_RECT_BDS].height;
+ acc->stripe.bds_out_stripes_no_overlap[0].offset = 0;
+ acc->stripe.output_stripes[0].height =
+ css_pipe->queue[IPU3_CSS_QUEUE_OUT].fmt.mpix.height;
+ acc->stripe.output_stripes[0].offset = 0;
+ if (stripes <= 1) {
+ acc->stripe.down_scaled_stripes[0].width =
+ css_pipe->rect[IPU3_CSS_RECT_BDS].width;
+ acc->stripe.down_scaled_stripes[0].height =
+ css_pipe->rect[IPU3_CSS_RECT_BDS].height;
+ acc->stripe.down_scaled_stripes[0].offset = 0;
+
+ acc->stripe.effective_stripes[0].width =
+ css_pipe->rect[IPU3_CSS_RECT_EFFECTIVE].width;
+ acc->stripe.bds_out_stripes_no_overlap[0].width =
+ ALIGN(css_pipe->rect[IPU3_CSS_RECT_BDS].width, f);
+
+ acc->stripe.output_stripes[0].width =
+ css_pipe->queue[IPU3_CSS_QUEUE_OUT].fmt.mpix.width;
+ } else { /* Two stripes */
+ bds_ds = css_pipe->rect[IPU3_CSS_RECT_EFFECTIVE].width *
+ IMGU_BDS_GRANULARITY /
+ css_pipe->rect[IPU3_CSS_RECT_BDS].width;
+
+ acc->stripe.down_scaled_stripes[0] =
+ acc->stripe.bds_out_stripes[0];
+ acc->stripe.down_scaled_stripes[1] =
+ acc->stripe.bds_out_stripes[1];
+ if (!IS_ALIGNED(css_pipe->rect[IPU3_CSS_RECT_BDS].width, f))
+ acc->stripe.down_scaled_stripes[1].width +=
+ (css_pipe->rect[IPU3_CSS_RECT_BDS].width
+ & (f - 1)) - f;
+
+ acc->stripe.effective_stripes[0].width = bds_ds *
+ acc->stripe.down_scaled_stripes[0].width /
+ IMGU_BDS_GRANULARITY;
+ acc->stripe.effective_stripes[1].width = bds_ds *
+ acc->stripe.down_scaled_stripes[1].width /
+ IMGU_BDS_GRANULARITY;
+ acc->stripe.effective_stripes[1].height =
+ css_pipe->rect[IPU3_CSS_RECT_EFFECTIVE].height;
+ acc->stripe.effective_stripes[1].offset = bds_ds *
+ acc->stripe.down_scaled_stripes[1].offset /
+ IMGU_BDS_GRANULARITY;
+
+ acc->stripe.bds_out_stripes_no_overlap[0].width =
+ acc->stripe.bds_out_stripes_no_overlap[1].offset =
+ ALIGN(css_pipe->rect[IPU3_CSS_RECT_BDS].width, 2 * f) / 2;
+ acc->stripe.bds_out_stripes_no_overlap[1].width =
+ DIV_ROUND_UP(css_pipe->rect[IPU3_CSS_RECT_BDS].width, f)
+ / 2 * f;
+ acc->stripe.bds_out_stripes_no_overlap[1].height =
+ css_pipe->rect[IPU3_CSS_RECT_BDS].height;
+
+ acc->stripe.output_stripes[0].width =
+ acc->stripe.down_scaled_stripes[0].width - f;
+ acc->stripe.output_stripes[1].width =
+ acc->stripe.down_scaled_stripes[1].width - f;
+ acc->stripe.output_stripes[1].height =
+ css_pipe->queue[IPU3_CSS_QUEUE_OUT].fmt.mpix.height;
+ acc->stripe.output_stripes[1].offset =
+ acc->stripe.output_stripes[0].width;
+ }
+
+ acc->stripe.output_system_in_frame_width =
+ css_pipe->rect[IPU3_CSS_RECT_GDC].width;
+ acc->stripe.output_system_in_frame_height =
+ css_pipe->rect[IPU3_CSS_RECT_GDC].height;
+
+ acc->stripe.effective_frame_width =
+ css_pipe->rect[IPU3_CSS_RECT_EFFECTIVE].width;
+ acc->stripe.bds_frame_width = css_pipe->rect[IPU3_CSS_RECT_BDS].width;
+ acc->stripe.out_frame_width =
+ css_pipe->queue[IPU3_CSS_QUEUE_OUT].fmt.mpix.width;
+ acc->stripe.out_frame_height =
+ css_pipe->queue[IPU3_CSS_QUEUE_OUT].fmt.mpix.height;
+ acc->stripe.gdc_in_buffer_width =
+ css_pipe->aux_frames[IPU3_CSS_AUX_FRAME_REF].bytesperline /
+ css_pipe->aux_frames[IPU3_CSS_AUX_FRAME_REF].bytesperpixel;
+ acc->stripe.gdc_in_buffer_height =
+ css_pipe->aux_frames[IPU3_CSS_AUX_FRAME_REF].height;
+ acc->stripe.gdc_in_buffer_offset_x = IMGU_GDC_BUF_X;
+ acc->stripe.gdc_in_buffer_offset_y = IMGU_GDC_BUF_Y;
+ acc->stripe.display_frame_width =
+ css_pipe->queue[IPU3_CSS_QUEUE_VF].fmt.mpix.width;
+ acc->stripe.display_frame_height =
+ css_pipe->queue[IPU3_CSS_QUEUE_VF].fmt.mpix.height;
+ acc->stripe.bds_aligned_frame_width =
+ roundup(css_pipe->rect[IPU3_CSS_RECT_BDS].width,
+ 2 * IPU3_UAPI_ISP_VEC_ELEMS);
+
+ if (stripes > 1)
+ acc->stripe.half_overlap_vectors =
+ IMGU_STRIPE_FIXED_HALF_OVERLAP;
+ else
+ acc->stripe.half_overlap_vectors = 0;
+
+ return 0;
+}
+
+static void imgu_css_cfg_acc_dvs(struct imgu_css *css,
+ struct imgu_abi_acc_param *acc,
+ unsigned int pipe)
+{
+ unsigned int i;
+ struct imgu_css_pipe *css_pipe = &css->pipes[pipe];
+
+ /* Disable DVS statistics */
+ acc->dvs_stat.operations_data.process_lines_data[0].lines =
+ css_pipe->rect[IPU3_CSS_RECT_BDS].height;
+ acc->dvs_stat.operations_data.process_lines_data[0].cfg_set = 0;
+ acc->dvs_stat.operations_data.ops[0].op_type =
+ IMGU_ABI_ACC_OPTYPE_PROCESS_LINES;
+ acc->dvs_stat.operations_data.ops[0].op_indicator =
+ IMGU_ABI_ACC_OP_NO_OPS;
+ for (i = 0; i < IMGU_ABI_DVS_STAT_LEVELS; i++)
+ acc->dvs_stat.cfg.grd_config[i].enable = 0;
+}
+
+static void acc_bds_per_stripe_data(struct imgu_css *css,
+ struct imgu_abi_acc_param *acc,
+ const int i, unsigned int pipe)
+{
+ struct imgu_css_pipe *css_pipe = &css->pipes[pipe];
+
+ acc->bds.per_stripe.aligned_data[i].data.crop.hor_crop_en = 0;
+ acc->bds.per_stripe.aligned_data[i].data.crop.hor_crop_start = 0;
+ acc->bds.per_stripe.aligned_data[i].data.crop.hor_crop_end = 0;
+ acc->bds.per_stripe.aligned_data[i].data.hor_ctrl0 =
+ acc->bds.hor.hor_ctrl0;
+ acc->bds.per_stripe.aligned_data[i].data.hor_ctrl0.out_frame_width =
+ acc->stripe.down_scaled_stripes[i].width;
+ acc->bds.per_stripe.aligned_data[i].data.ver_ctrl1.out_frame_width =
+ acc->stripe.down_scaled_stripes[i].width;
+ acc->bds.per_stripe.aligned_data[i].data.ver_ctrl1.out_frame_height =
+ css_pipe->rect[IPU3_CSS_RECT_BDS].height;
+}
+
+/*
+ * Configure `acc' parameters. `acc_old' contains the old values (or is NULL)
+ * and `acc_user' contains new prospective values. `use' contains flags
+ * telling which fields to take from the old values (or generate if it is NULL)
+ * and which to take from the new user values.
+ */
+int imgu_css_cfg_acc(struct imgu_css *css, unsigned int pipe,
+ struct ipu3_uapi_flags *use,
+ struct imgu_abi_acc_param *acc,
+ struct imgu_abi_acc_param *acc_old,
+ struct ipu3_uapi_acc_param *acc_user)
+{
+ struct imgu_css_pipe *css_pipe = &css->pipes[pipe];
+ const struct imgu_fw_info *bi =
+ &css->fwp->binary_header[css_pipe->bindex];
+ const unsigned int stripes = bi->info.isp.sp.iterator.num_stripes;
+ const unsigned int tnr_frame_width =
+ acc->stripe.bds_aligned_frame_width;
+ const unsigned int min_overlap = 10;
+ const struct v4l2_pix_format_mplane *pixm =
+ &css_pipe->queue[IPU3_CSS_QUEUE_IN].fmt.mpix;
+ const struct imgu_css_bds_config *cfg_bds;
+ struct imgu_abi_input_feeder_data *feeder_data;
+
+ unsigned int bds_ds, ofs_x, ofs_y, i, width, height;
+ u8 b_w_log2; /* Block width log2 */
+
+ /* Update stripe using chroma and luma */
+
+ if (imgu_css_cfg_acc_stripe(css, pipe, acc))
+ return -EINVAL;
+
+ /* acc_param: input_feeder_config */
+
+ ofs_x = ((pixm->width -
+ css_pipe->rect[IPU3_CSS_RECT_EFFECTIVE].width) >> 1) & ~1;
+ ofs_x += css_pipe->queue[IPU3_CSS_QUEUE_IN].css_fmt->bayer_order ==
+ IMGU_ABI_BAYER_ORDER_RGGB ||
+ css_pipe->queue[IPU3_CSS_QUEUE_IN].css_fmt->bayer_order ==
+ IMGU_ABI_BAYER_ORDER_GBRG ? 1 : 0;
+ ofs_y = ((pixm->height -
+ css_pipe->rect[IPU3_CSS_RECT_EFFECTIVE].height) >> 1) & ~1;
+ ofs_y += css_pipe->queue[IPU3_CSS_QUEUE_IN].css_fmt->bayer_order ==
+ IMGU_ABI_BAYER_ORDER_BGGR ||
+ css_pipe->queue[IPU3_CSS_QUEUE_IN].css_fmt->bayer_order ==
+ IMGU_ABI_BAYER_ORDER_GBRG ? 1 : 0;
+ acc->input_feeder.data.row_stride = pixm->plane_fmt[0].bytesperline;
+ acc->input_feeder.data.start_row_address =
+ ofs_x / IMGU_PIXELS_PER_WORD * IMGU_BYTES_PER_WORD +
+ ofs_y * acc->input_feeder.data.row_stride;
+ acc->input_feeder.data.start_pixel = ofs_x % IMGU_PIXELS_PER_WORD;
+
+ acc->input_feeder.data_per_stripe.input_feeder_data[0].data =
+ acc->input_feeder.data;
+
+ ofs_x += acc->stripe.effective_stripes[1].offset;
+
+ feeder_data =
+ &acc->input_feeder.data_per_stripe.input_feeder_data[1].data;
+ feeder_data->row_stride = acc->input_feeder.data.row_stride;
+ feeder_data->start_row_address =
+ ofs_x / IMGU_PIXELS_PER_WORD * IMGU_BYTES_PER_WORD +
+ ofs_y * acc->input_feeder.data.row_stride;
+ feeder_data->start_pixel = ofs_x % IMGU_PIXELS_PER_WORD;
+
+ /* acc_param: bnr_static_config */
+
+ /*
+ * Originate from user or be the original default values if user has
+ * never set them before, when user gives a new set of parameters,
+ * for each chunk in the parameter structure there is a flag use->xxx
+ * whether to use the user-provided parameter or not. If not, the
+ * parameter remains unchanged in the driver:
+ * it's value is taken from acc_old.
+ */
+ if (use && use->acc_bnr) {
+ /* Take values from user */
+ acc->bnr = acc_user->bnr;
+ } else if (acc_old) {
+ /* Use old value */
+ acc->bnr = acc_old->bnr;
+ } else {
+ /* Calculate from scratch */
+ acc->bnr = imgu_css_bnr_defaults;
+ }
+
+ acc->bnr.column_size = tnr_frame_width;
+
+ /* acc_param: bnr_static_config_green_disparity */
+
+ if (use && use->acc_green_disparity) {
+ /* Take values from user */
+ acc->green_disparity = acc_user->green_disparity;
+ } else if (acc_old) {
+ /* Use old value */
+ acc->green_disparity = acc_old->green_disparity;
+ } else {
+ /* Calculate from scratch */
+ memset(&acc->green_disparity, 0, sizeof(acc->green_disparity));
+ }
+
+ /* acc_param: dm_config */
+
+ if (use && use->acc_dm) {
+ /* Take values from user */
+ acc->dm = acc_user->dm;
+ } else if (acc_old) {
+ /* Use old value */
+ acc->dm = acc_old->dm;
+ } else {
+ /* Calculate from scratch */
+ acc->dm = imgu_css_dm_defaults;
+ }
+
+ acc->dm.frame_width = tnr_frame_width;
+
+ /* acc_param: ccm_mat_config */
+
+ if (use && use->acc_ccm) {
+ /* Take values from user */
+ acc->ccm = acc_user->ccm;
+ } else if (acc_old) {
+ /* Use old value */
+ acc->ccm = acc_old->ccm;
+ } else {
+ /* Calculate from scratch */
+ acc->ccm = imgu_css_ccm_defaults;
+ }
+
+ /* acc_param: gamma_config */
+
+ if (use && use->acc_gamma) {
+ /* Take values from user */
+ acc->gamma = acc_user->gamma;
+ } else if (acc_old) {
+ /* Use old value */
+ acc->gamma = acc_old->gamma;
+ } else {
+ /* Calculate from scratch */
+ acc->gamma.gc_ctrl.enable = 1;
+ acc->gamma.gc_lut = imgu_css_gamma_lut;
+ }
+
+ /* acc_param: csc_mat_config */
+
+ if (use && use->acc_csc) {
+ /* Take values from user */
+ acc->csc = acc_user->csc;
+ } else if (acc_old) {
+ /* Use old value */
+ acc->csc = acc_old->csc;
+ } else {
+ /* Calculate from scratch */
+ acc->csc = imgu_css_csc_defaults;
+ }
+
+ /* acc_param: cds_params */
+
+ if (use && use->acc_cds) {
+ /* Take values from user */
+ acc->cds = acc_user->cds;
+ } else if (acc_old) {
+ /* Use old value */
+ acc->cds = acc_old->cds;
+ } else {
+ /* Calculate from scratch */
+ acc->cds = imgu_css_cds_defaults;
+ }
+
+ /* acc_param: shd_config */
+
+ if (use && use->acc_shd) {
+ /* Take values from user */
+ acc->shd.shd = acc_user->shd.shd;
+ acc->shd.shd_lut = acc_user->shd.shd_lut;
+ } else if (acc_old) {
+ /* Use old value */
+ acc->shd.shd = acc_old->shd.shd;
+ acc->shd.shd_lut = acc_old->shd.shd_lut;
+ } else {
+ /* Calculate from scratch */
+ acc->shd.shd = imgu_css_shd_defaults;
+ memset(&acc->shd.shd_lut, 0, sizeof(acc->shd.shd_lut));
+ }
+
+ if (acc->shd.shd.grid.width <= 0)
+ return -EINVAL;
+
+ acc->shd.shd.grid.grid_height_per_slice =
+ IMGU_ABI_SHD_MAX_CELLS_PER_SET / acc->shd.shd.grid.width;
+
+ if (acc->shd.shd.grid.grid_height_per_slice <= 0)
+ return -EINVAL;
+
+ acc->shd.shd.general.init_set_vrt_offst_ul =
+ (-acc->shd.shd.grid.y_start >>
+ acc->shd.shd.grid.block_height_log2) %
+ acc->shd.shd.grid.grid_height_per_slice;
+
+ if (imgu_css_shd_ops_calc(&acc->shd.shd_ops, &acc->shd.shd.grid,
+ css_pipe->rect[IPU3_CSS_RECT_BDS].height))
+ return -EINVAL;
+
+ /* acc_param: dvs_stat_config */
+ imgu_css_cfg_acc_dvs(css, acc, pipe);
+
+ /* acc_param: yuvp1_iefd_config */
+
+ if (use && use->acc_iefd) {
+ /* Take values from user */
+ acc->iefd = acc_user->iefd;
+ } else if (acc_old) {
+ /* Use old value */
+ acc->iefd = acc_old->iefd;
+ } else {
+ /* Calculate from scratch */
+ acc->iefd = imgu_css_iefd_defaults;
+ }
+
+ /* acc_param: yuvp1_yds_config yds_c0 */
+
+ if (use && use->acc_yds_c0) {
+ /* Take values from user */
+ acc->yds_c0 = acc_user->yds_c0;
+ } else if (acc_old) {
+ /* Use old value */
+ acc->yds_c0 = acc_old->yds_c0;
+ } else {
+ /* Calculate from scratch */
+ acc->yds_c0 = imgu_css_yds_defaults;
+ }
+
+ /* acc_param: yuvp1_chnr_config chnr_c0 */
+
+ if (use && use->acc_chnr_c0) {
+ /* Take values from user */
+ acc->chnr_c0 = acc_user->chnr_c0;
+ } else if (acc_old) {
+ /* Use old value */
+ acc->chnr_c0 = acc_old->chnr_c0;
+ } else {
+ /* Calculate from scratch */
+ acc->chnr_c0 = imgu_css_chnr_defaults;
+ }
+
+ /* acc_param: yuvp1_y_ee_nr_config */
+
+ if (use && use->acc_y_ee_nr) {
+ /* Take values from user */
+ acc->y_ee_nr = acc_user->y_ee_nr;
+ } else if (acc_old) {
+ /* Use old value */
+ acc->y_ee_nr = acc_old->y_ee_nr;
+ } else {
+ /* Calculate from scratch */
+ acc->y_ee_nr = imgu_css_y_ee_nr_defaults;
+ }
+
+ /* acc_param: yuvp1_yds_config yds */
+
+ if (use && use->acc_yds) {
+ /* Take values from user */
+ acc->yds = acc_user->yds;
+ } else if (acc_old) {
+ /* Use old value */
+ acc->yds = acc_old->yds;
+ } else {
+ /* Calculate from scratch */
+ acc->yds = imgu_css_yds_defaults;
+ }
+
+ /* acc_param: yuvp1_chnr_config chnr */
+
+ if (use && use->acc_chnr) {
+ /* Take values from user */
+ acc->chnr = acc_user->chnr;
+ } else if (acc_old) {
+ /* Use old value */
+ acc->chnr = acc_old->chnr;
+ } else {
+ /* Calculate from scratch */
+ acc->chnr = imgu_css_chnr_defaults;
+ }
+
+ /* acc_param: yuvp2_y_tm_lut_static_config */
+
+ for (i = 0; i < IMGU_ABI_YUVP2_YTM_LUT_ENTRIES; i++)
+ acc->ytm.entries[i] = i * 32;
+ acc->ytm.enable = 0; /* Always disabled on IPU3 */
+
+ /* acc_param: yuvp1_yds_config yds2 */
+
+ if (use && use->acc_yds2) {
+ /* Take values from user */
+ acc->yds2 = acc_user->yds2;
+ } else if (acc_old) {
+ /* Use old value */
+ acc->yds2 = acc_old->yds2;
+ } else {
+ /* Calculate from scratch */
+ acc->yds2 = imgu_css_yds_defaults;
+ }
+
+ /* acc_param: yuvp2_tcc_static_config */
+
+ if (use && use->acc_tcc) {
+ /* Take values from user */
+ acc->tcc = acc_user->tcc;
+ } else if (acc_old) {
+ /* Use old value */
+ acc->tcc = acc_old->tcc;
+ } else {
+ /* Calculate from scratch */
+ memset(&acc->tcc, 0, sizeof(acc->tcc));
+
+ acc->tcc.gen_control.en = 1;
+ acc->tcc.gen_control.blend_shift = 3;
+ acc->tcc.gen_control.gain_according_to_y_only = 1;
+ acc->tcc.gen_control.gamma = 8;
+ acc->tcc.gen_control.delta = 0;
+
+ for (i = 0; i < IPU3_UAPI_YUVP2_TCC_MACC_TABLE_ELEMENTS; i++) {
+ acc->tcc.macc_table.entries[i].a = 1024;
+ acc->tcc.macc_table.entries[i].b = 0;
+ acc->tcc.macc_table.entries[i].c = 0;
+ acc->tcc.macc_table.entries[i].d = 1024;
+ }
+
+ acc->tcc.inv_y_lut.entries[6] = 1023;
+ for (i = 7; i < IPU3_UAPI_YUVP2_TCC_INV_Y_LUT_ELEMENTS; i++)
+ acc->tcc.inv_y_lut.entries[i] = 1024 >> (i - 6);
+
+ acc->tcc.gain_pcwl = imgu_css_tcc_gain_pcwl_lut;
+ acc->tcc.r_sqr_lut = imgu_css_tcc_r_sqr_lut;
+ }
+
+ /* acc_param: dpc_config */
+
+ if (use && use->acc_dpc)
+ return -EINVAL; /* Not supported yet */
+
+ /* Just disable by default */
+ memset(&acc->dpc, 0, sizeof(acc->dpc));
+
+ /* acc_param: bds_config */
+
+ bds_ds = (css_pipe->rect[IPU3_CSS_RECT_EFFECTIVE].height *
+ IMGU_BDS_GRANULARITY) / css_pipe->rect[IPU3_CSS_RECT_BDS].height;
+ if (bds_ds < IMGU_BDS_MIN_SF_INV ||
+ bds_ds - IMGU_BDS_MIN_SF_INV >= ARRAY_SIZE(imgu_css_bds_configs))
+ return -EINVAL;
+
+ cfg_bds = &imgu_css_bds_configs[bds_ds - IMGU_BDS_MIN_SF_INV];
+ acc->bds.hor.hor_ctrl1.hor_crop_en = 0;
+ acc->bds.hor.hor_ctrl1.hor_crop_start = 0;
+ acc->bds.hor.hor_ctrl1.hor_crop_end = 0;
+ acc->bds.hor.hor_ctrl0.sample_patrn_length =
+ cfg_bds->sample_patrn_length;
+ acc->bds.hor.hor_ctrl0.hor_ds_en = cfg_bds->hor_ds_en;
+ acc->bds.hor.hor_ctrl0.min_clip_val = IMGU_BDS_MIN_CLIP_VAL;
+ acc->bds.hor.hor_ctrl0.max_clip_val = IMGU_BDS_MAX_CLIP_VAL;
+ acc->bds.hor.hor_ctrl0.out_frame_width =
+ css_pipe->rect[IPU3_CSS_RECT_BDS].width;
+ acc->bds.hor.hor_ptrn_arr = cfg_bds->ptrn_arr;
+ acc->bds.hor.hor_phase_arr = cfg_bds->hor_phase_arr;
+ acc->bds.hor.hor_ctrl2.input_frame_height =
+ css_pipe->rect[IPU3_CSS_RECT_EFFECTIVE].height;
+ acc->bds.ver.ver_ctrl0.min_clip_val = IMGU_BDS_MIN_CLIP_VAL;
+ acc->bds.ver.ver_ctrl0.max_clip_val = IMGU_BDS_MAX_CLIP_VAL;
+ acc->bds.ver.ver_ctrl0.sample_patrn_length =
+ cfg_bds->sample_patrn_length;
+ acc->bds.ver.ver_ctrl0.ver_ds_en = cfg_bds->ver_ds_en;
+ acc->bds.ver.ver_ptrn_arr = cfg_bds->ptrn_arr;
+ acc->bds.ver.ver_phase_arr = cfg_bds->ver_phase_arr;
+ acc->bds.ver.ver_ctrl1.out_frame_width =
+ css_pipe->rect[IPU3_CSS_RECT_BDS].width;
+ acc->bds.ver.ver_ctrl1.out_frame_height =
+ css_pipe->rect[IPU3_CSS_RECT_BDS].height;
+ for (i = 0; i < stripes; i++)
+ acc_bds_per_stripe_data(css, acc, i, pipe);
+
+ acc->bds.enabled = cfg_bds->hor_ds_en || cfg_bds->ver_ds_en;
+
+ /* acc_param: anr_config */
+
+ if (use && use->acc_anr) {
+ /* Take values from user */
+ acc->anr.transform = acc_user->anr.transform;
+ acc->anr.stitch.anr_stitch_en =
+ acc_user->anr.stitch.anr_stitch_en;
+ memcpy(acc->anr.stitch.pyramid, acc_user->anr.stitch.pyramid,
+ sizeof(acc->anr.stitch.pyramid));
+ } else if (acc_old) {
+ /* Use old value */
+ acc->anr.transform = acc_old->anr.transform;
+ acc->anr.stitch.anr_stitch_en =
+ acc_old->anr.stitch.anr_stitch_en;
+ memcpy(acc->anr.stitch.pyramid, acc_old->anr.stitch.pyramid,
+ sizeof(acc->anr.stitch.pyramid));
+ } else {
+ /* Calculate from scratch */
+ acc->anr = imgu_css_anr_defaults;
+ }
+
+ /* Always enabled */
+ acc->anr.search.enable = 1;
+ acc->anr.transform.enable = 1;
+ acc->anr.tile2strm.enable = 1;
+ acc->anr.tile2strm.frame_width =
+ ALIGN(css_pipe->rect[IPU3_CSS_RECT_BDS].width, IMGU_ISP_VMEM_ALIGN);
+ acc->anr.search.frame_width = acc->anr.tile2strm.frame_width;
+ acc->anr.stitch.frame_width = acc->anr.tile2strm.frame_width;
+ acc->anr.tile2strm.frame_height = css_pipe->rect[IPU3_CSS_RECT_BDS].height;
+ acc->anr.search.frame_height = acc->anr.tile2strm.frame_height;
+ acc->anr.stitch.frame_height = acc->anr.tile2strm.frame_height;
+
+ width = ALIGN(css_pipe->rect[IPU3_CSS_RECT_BDS].width, IMGU_ISP_VMEM_ALIGN);
+ height = css_pipe->rect[IPU3_CSS_RECT_BDS].height;
+
+ if (acc->anr.transform.xreset + width > IPU3_UAPI_ANR_MAX_RESET)
+ acc->anr.transform.xreset = IPU3_UAPI_ANR_MAX_RESET - width;
+ if (acc->anr.transform.xreset < IPU3_UAPI_ANR_MIN_RESET)
+ acc->anr.transform.xreset = IPU3_UAPI_ANR_MIN_RESET;
+
+ if (acc->anr.transform.yreset + height > IPU3_UAPI_ANR_MAX_RESET)
+ acc->anr.transform.yreset = IPU3_UAPI_ANR_MAX_RESET - height;
+ if (acc->anr.transform.yreset < IPU3_UAPI_ANR_MIN_RESET)
+ acc->anr.transform.yreset = IPU3_UAPI_ANR_MIN_RESET;
+
+ /* acc_param: awb_fr_config */
+
+ if (use && use->acc_awb_fr) {
+ /* Take values from user */
+ acc->awb_fr.config = acc_user->awb_fr;
+ } else if (acc_old) {
+ /* Use old value */
+ acc->awb_fr.config = acc_old->awb_fr.config;
+ } else {
+ /* Set from scratch */
+ acc->awb_fr.config = imgu_css_awb_fr_defaults;
+ }
+
+ imgu_css_grid_end_calc(&acc->awb_fr.config.grid_cfg);
+
+ if (acc->awb_fr.config.grid_cfg.width <= 0)
+ return -EINVAL;
+
+ acc->awb_fr.config.grid_cfg.height_per_slice =
+ IMGU_ABI_AWB_FR_MAX_CELLS_PER_SET /
+ acc->awb_fr.config.grid_cfg.width;
+
+ for (i = 0; i < stripes; i++)
+ acc->awb_fr.stripes[i] = acc->awb_fr.config;
+
+ if (acc->awb_fr.config.grid_cfg.x_start >=
+ acc->stripe.down_scaled_stripes[1].offset + min_overlap) {
+ /* Enable only for rightmost stripe, disable left */
+ acc->awb_fr.stripes[0].grid_cfg.y_start &=
+ ~IPU3_UAPI_GRID_Y_START_EN;
+ } else if (acc->awb_fr.config.grid_cfg.x_end <=
+ acc->stripe.bds_out_stripes[0].width - min_overlap) {
+ /* Enable only for leftmost stripe, disable right */
+ acc->awb_fr.stripes[1].grid_cfg.y_start &=
+ ~IPU3_UAPI_GRID_Y_START_EN;
+ } else {
+ /* Enable for both stripes */
+ u16 end; /* width for grid end */
+
+ acc->awb_fr.stripes[0].grid_cfg.width =
+ (acc->stripe.bds_out_stripes[0].width - min_overlap -
+ acc->awb_fr.config.grid_cfg.x_start + 1) >>
+ acc->awb_fr.config.grid_cfg.block_width_log2;
+ acc->awb_fr.stripes[1].grid_cfg.width =
+ acc->awb_fr.config.grid_cfg.width -
+ acc->awb_fr.stripes[0].grid_cfg.width;
+
+ b_w_log2 = acc->awb_fr.stripes[0].grid_cfg.block_width_log2;
+ end = imgu_css_grid_end(acc->awb_fr.stripes[0].grid_cfg.x_start,
+ acc->awb_fr.stripes[0].grid_cfg.width,
+ b_w_log2);
+ acc->awb_fr.stripes[0].grid_cfg.x_end = end;
+
+ acc->awb_fr.stripes[1].grid_cfg.x_start =
+ (acc->awb_fr.stripes[0].grid_cfg.x_end + 1 -
+ acc->stripe.down_scaled_stripes[1].offset) &
+ IPU3_UAPI_GRID_START_MASK;
+ b_w_log2 = acc->awb_fr.stripes[1].grid_cfg.block_width_log2;
+ end = imgu_css_grid_end(acc->awb_fr.stripes[1].grid_cfg.x_start,
+ acc->awb_fr.stripes[1].grid_cfg.width,
+ b_w_log2);
+ acc->awb_fr.stripes[1].grid_cfg.x_end = end;
+ }
+
+ /*
+ * To reduce complexity of debubbling and loading
+ * statistics fix grid_height_per_slice to 1 for both
+ * stripes.
+ */
+ for (i = 0; i < stripes; i++)
+ acc->awb_fr.stripes[i].grid_cfg.height_per_slice = 1;
+
+ if (imgu_css_awb_fr_ops_calc(css, pipe, &acc->awb_fr))
+ return -EINVAL;
+
+ /* acc_param: ae_config */
+
+ if (use && use->acc_ae) {
+ /* Take values from user */
+ acc->ae.grid_cfg = acc_user->ae.grid_cfg;
+ acc->ae.ae_ccm = acc_user->ae.ae_ccm;
+ for (i = 0; i < IPU3_UAPI_AE_WEIGHTS; i++)
+ acc->ae.weights[i] = acc_user->ae.weights[i];
+ } else if (acc_old) {
+ /* Use old value */
+ acc->ae.grid_cfg = acc_old->ae.grid_cfg;
+ acc->ae.ae_ccm = acc_old->ae.ae_ccm;
+ for (i = 0; i < IPU3_UAPI_AE_WEIGHTS; i++)
+ acc->ae.weights[i] = acc_old->ae.weights[i];
+ } else {
+ /* Set from scratch */
+ static const struct ipu3_uapi_ae_weight_elem
+ weight_def = { 1, 1, 1, 1, 1, 1, 1, 1 };
+
+ acc->ae.grid_cfg = imgu_css_ae_grid_defaults;
+ acc->ae.ae_ccm = imgu_css_ae_ccm_defaults;
+ for (i = 0; i < IPU3_UAPI_AE_WEIGHTS; i++)
+ acc->ae.weights[i] = weight_def;
+ }
+
+ b_w_log2 = acc->ae.grid_cfg.block_width_log2;
+ acc->ae.grid_cfg.x_end = imgu_css_grid_end(acc->ae.grid_cfg.x_start,
+ acc->ae.grid_cfg.width,
+ b_w_log2);
+ b_w_log2 = acc->ae.grid_cfg.block_height_log2;
+ acc->ae.grid_cfg.y_end = imgu_css_grid_end(acc->ae.grid_cfg.y_start,
+ acc->ae.grid_cfg.height,
+ b_w_log2);
+
+ for (i = 0; i < stripes; i++)
+ acc->ae.stripes[i].grid = acc->ae.grid_cfg;
+
+ if (acc->ae.grid_cfg.x_start >=
+ acc->stripe.down_scaled_stripes[1].offset) {
+ /* Enable only for rightmost stripe, disable left */
+ acc->ae.stripes[0].grid.ae_en = 0;
+ } else if (acc->ae.grid_cfg.x_end <=
+ acc->stripe.bds_out_stripes[0].width) {
+ /* Enable only for leftmost stripe, disable right */
+ acc->ae.stripes[1].grid.ae_en = 0;
+ } else {
+ /* Enable for both stripes */
+ u8 b_w_log2;
+
+ acc->ae.stripes[0].grid.width =
+ (acc->stripe.bds_out_stripes[0].width -
+ acc->ae.grid_cfg.x_start + 1) >>
+ acc->ae.grid_cfg.block_width_log2;
+
+ acc->ae.stripes[1].grid.width =
+ acc->ae.grid_cfg.width - acc->ae.stripes[0].grid.width;
+
+ b_w_log2 = acc->ae.stripes[0].grid.block_width_log2;
+ acc->ae.stripes[0].grid.x_end =
+ imgu_css_grid_end(acc->ae.stripes[0].grid.x_start,
+ acc->ae.stripes[0].grid.width,
+ b_w_log2);
+
+ acc->ae.stripes[1].grid.x_start =
+ (acc->ae.stripes[0].grid.x_end + 1 -
+ acc->stripe.down_scaled_stripes[1].offset) &
+ IPU3_UAPI_GRID_START_MASK;
+ b_w_log2 = acc->ae.stripes[1].grid.block_width_log2;
+ acc->ae.stripes[1].grid.x_end =
+ imgu_css_grid_end(acc->ae.stripes[1].grid.x_start,
+ acc->ae.stripes[1].grid.width,
+ b_w_log2);
+ }
+
+ /* acc_param: af_config */
+
+ if (use && use->acc_af) {
+ /* Take values from user */
+ acc->af.config.filter_config = acc_user->af.filter_config;
+ acc->af.config.grid_cfg = acc_user->af.grid_cfg;
+ } else if (acc_old) {
+ /* Use old value */
+ acc->af.config = acc_old->af.config;
+ } else {
+ /* Set from scratch */
+ acc->af.config.filter_config =
+ imgu_css_af_defaults.filter_config;
+ acc->af.config.grid_cfg = imgu_css_af_defaults.grid_cfg;
+ }
+
+ imgu_css_grid_end_calc(&acc->af.config.grid_cfg);
+
+ if (acc->af.config.grid_cfg.width <= 0)
+ return -EINVAL;
+
+ acc->af.config.grid_cfg.height_per_slice =
+ IMGU_ABI_AF_MAX_CELLS_PER_SET / acc->af.config.grid_cfg.width;
+ acc->af.config.frame_size.width =
+ ALIGN(css_pipe->rect[IPU3_CSS_RECT_BDS].width, IMGU_ISP_VMEM_ALIGN);
+ acc->af.config.frame_size.height =
+ css_pipe->rect[IPU3_CSS_RECT_BDS].height;
+
+ if (acc->stripe.bds_out_stripes[0].width <= min_overlap)
+ return -EINVAL;
+
+ for (i = 0; i < stripes; i++) {
+ acc->af.stripes[i].grid_cfg = acc->af.config.grid_cfg;
+ acc->af.stripes[i].frame_size.height =
+ css_pipe->rect[IPU3_CSS_RECT_BDS].height;
+ acc->af.stripes[i].frame_size.width =
+ acc->stripe.bds_out_stripes[i].width;
+ }
+
+ if (acc->af.config.grid_cfg.x_start >=
+ acc->stripe.down_scaled_stripes[1].offset + min_overlap) {
+ /* Enable only for rightmost stripe, disable left */
+ acc->af.stripes[0].grid_cfg.y_start &=
+ ~IPU3_UAPI_GRID_Y_START_EN;
+ acc->af.stripes[1].grid_cfg.x_start =
+ (acc->af.stripes[1].grid_cfg.x_start -
+ acc->stripe.down_scaled_stripes[1].offset) &
+ IPU3_UAPI_GRID_START_MASK;
+ b_w_log2 = acc->af.stripes[1].grid_cfg.block_width_log2;
+ acc->af.stripes[1].grid_cfg.x_end =
+ imgu_css_grid_end(acc->af.stripes[1].grid_cfg.x_start,
+ acc->af.stripes[1].grid_cfg.width,
+ b_w_log2);
+ } else if (acc->af.config.grid_cfg.x_end <=
+ acc->stripe.bds_out_stripes[0].width - min_overlap) {
+ /* Enable only for leftmost stripe, disable right */
+ acc->af.stripes[1].grid_cfg.y_start &=
+ ~IPU3_UAPI_GRID_Y_START_EN;
+ } else {
+ /* Enable for both stripes */
+
+ acc->af.stripes[0].grid_cfg.width =
+ (acc->stripe.bds_out_stripes[0].width - min_overlap -
+ acc->af.config.grid_cfg.x_start + 1) >>
+ acc->af.config.grid_cfg.block_width_log2;
+ acc->af.stripes[1].grid_cfg.width =
+ acc->af.config.grid_cfg.width -
+ acc->af.stripes[0].grid_cfg.width;
+
+ b_w_log2 = acc->af.stripes[0].grid_cfg.block_width_log2;
+ acc->af.stripes[0].grid_cfg.x_end =
+ imgu_css_grid_end(acc->af.stripes[0].grid_cfg.x_start,
+ acc->af.stripes[0].grid_cfg.width,
+ b_w_log2);
+
+ acc->af.stripes[1].grid_cfg.x_start =
+ (acc->af.stripes[0].grid_cfg.x_end + 1 -
+ acc->stripe.down_scaled_stripes[1].offset) &
+ IPU3_UAPI_GRID_START_MASK;
+
+ b_w_log2 = acc->af.stripes[1].grid_cfg.block_width_log2;
+ acc->af.stripes[1].grid_cfg.x_end =
+ imgu_css_grid_end(acc->af.stripes[1].grid_cfg.x_start,
+ acc->af.stripes[1].grid_cfg.width,
+ b_w_log2);
+ }
+
+ /*
+ * To reduce complexity of debubbling and loading statistics
+ * fix grid_height_per_slice to 1 for both stripes
+ */
+ for (i = 0; i < stripes; i++)
+ acc->af.stripes[i].grid_cfg.height_per_slice = 1;
+
+ if (imgu_css_af_ops_calc(css, pipe, &acc->af))
+ return -EINVAL;
+
+ /* acc_param: awb_config */
+
+ if (use && use->acc_awb) {
+ /* Take values from user */
+ acc->awb.config = acc_user->awb.config;
+ } else if (acc_old) {
+ /* Use old value */
+ acc->awb.config = acc_old->awb.config;
+ } else {
+ /* Set from scratch */
+ acc->awb.config = imgu_css_awb_defaults;
+ }
+
+ if (acc->awb.config.grid.width <= 0)
+ return -EINVAL;
+
+ acc->awb.config.grid.height_per_slice =
+ IMGU_ABI_AWB_MAX_CELLS_PER_SET / acc->awb.config.grid.width;
+ imgu_css_grid_end_calc(&acc->awb.config.grid);
+
+ for (i = 0; i < stripes; i++)
+ acc->awb.stripes[i] = acc->awb.config;
+
+ if (acc->awb.config.grid.x_start >=
+ acc->stripe.down_scaled_stripes[1].offset + min_overlap) {
+ /* Enable only for rightmost stripe, disable left */
+ acc->awb.stripes[0].rgbs_thr_b &= ~IPU3_UAPI_AWB_RGBS_THR_B_EN;
+
+ acc->awb.stripes[1].grid.x_start =
+ (acc->awb.stripes[1].grid.x_start -
+ acc->stripe.down_scaled_stripes[1].offset) &
+ IPU3_UAPI_GRID_START_MASK;
+
+ b_w_log2 = acc->awb.stripes[1].grid.block_width_log2;
+ acc->awb.stripes[1].grid.x_end =
+ imgu_css_grid_end(acc->awb.stripes[1].grid.x_start,
+ acc->awb.stripes[1].grid.width,
+ b_w_log2);
+ } else if (acc->awb.config.grid.x_end <=
+ acc->stripe.bds_out_stripes[0].width - min_overlap) {
+ /* Enable only for leftmost stripe, disable right */
+ acc->awb.stripes[1].rgbs_thr_b &= ~IPU3_UAPI_AWB_RGBS_THR_B_EN;
+ } else {
+ /* Enable for both stripes */
+
+ acc->awb.stripes[0].grid.width =
+ (acc->stripe.bds_out_stripes[0].width -
+ acc->awb.config.grid.x_start + 1) >>
+ acc->awb.config.grid.block_width_log2;
+ acc->awb.stripes[1].grid.width = acc->awb.config.grid.width -
+ acc->awb.stripes[0].grid.width;
+
+ b_w_log2 = acc->awb.stripes[0].grid.block_width_log2;
+ acc->awb.stripes[0].grid.x_end =
+ imgu_css_grid_end(acc->awb.stripes[0].grid.x_start,
+ acc->awb.stripes[0].grid.width,
+ b_w_log2);
+
+ acc->awb.stripes[1].grid.x_start =
+ (acc->awb.stripes[0].grid.x_end + 1 -
+ acc->stripe.down_scaled_stripes[1].offset) &
+ IPU3_UAPI_GRID_START_MASK;
+
+ b_w_log2 = acc->awb.stripes[1].grid.block_width_log2;
+ acc->awb.stripes[1].grid.x_end =
+ imgu_css_grid_end(acc->awb.stripes[1].grid.x_start,
+ acc->awb.stripes[1].grid.width,
+ b_w_log2);
+ }
+
+ /*
+ * To reduce complexity of debubbling and loading statistics
+ * fix grid_height_per_slice to 1 for both stripes
+ */
+ for (i = 0; i < stripes; i++)
+ acc->awb.stripes[i].grid.height_per_slice = 1;
+
+ if (imgu_css_awb_ops_calc(css, pipe, &acc->awb))
+ return -EINVAL;
+
+ return 0;
+}
+
+/*
+ * Fill the indicated structure in `new_binary_params' from the possible
+ * sources based on `use_user' flag: if the flag is false, copy from
+ * `old_binary_params', or if the flag is true, copy from `user_setting'
+ * and return NULL (or error pointer on error).
+ * If the flag is false and `old_binary_params' is NULL, return pointer
+ * to the structure inside `new_binary_params'. In that case the caller
+ * should calculate and fill the structure from scratch.
+ */
+static void *imgu_css_cfg_copy(struct imgu_css *css,
+ unsigned int pipe, bool use_user,
+ void *user_setting, void *old_binary_params,
+ void *new_binary_params,
+ enum imgu_abi_memories m,
+ struct imgu_fw_isp_parameter *par,
+ size_t par_size)
+{
+ const enum imgu_abi_param_class c = IMGU_ABI_PARAM_CLASS_PARAM;
+ void *new_setting, *old_setting;
+
+ new_setting = imgu_css_fw_pipeline_params(css, pipe, c, m, par,
+ par_size, new_binary_params);
+ if (!new_setting)
+ return ERR_PTR(-EPROTO); /* Corrupted firmware */
+
+ if (use_user) {
+ /* Take new user parameters */
+ memcpy(new_setting, user_setting, par_size);
+ } else if (old_binary_params) {
+ /* Take previous value */
+ old_setting = imgu_css_fw_pipeline_params(css, pipe, c, m, par,
+ par_size,
+ old_binary_params);
+ if (!old_setting)
+ return ERR_PTR(-EPROTO);
+ memcpy(new_setting, old_setting, par_size);
+ } else {
+ return new_setting; /* Need to calculate */
+ }
+
+ return NULL; /* Copied from other value */
+}
+
+/*
+ * Configure VMEM0 parameters (late binding parameters).
+ */
+int imgu_css_cfg_vmem0(struct imgu_css *css, unsigned int pipe,
+ struct ipu3_uapi_flags *use,
+ void *vmem0, void *vmem0_old,
+ struct ipu3_uapi_params *user)
+{
+ const struct imgu_fw_info *bi =
+ &css->fwp->binary_header[css->pipes[pipe].bindex];
+ struct imgu_fw_param_memory_offsets *pofs = (void *)css->fwp +
+ bi->blob.memory_offsets.offsets[IMGU_ABI_PARAM_CLASS_PARAM];
+ struct ipu3_uapi_isp_lin_vmem_params *lin_vmem = NULL;
+ struct ipu3_uapi_isp_tnr3_vmem_params *tnr_vmem = NULL;
+ struct ipu3_uapi_isp_xnr3_vmem_params *xnr_vmem = NULL;
+ const enum imgu_abi_param_class c = IMGU_ABI_PARAM_CLASS_PARAM;
+ const enum imgu_abi_memories m = IMGU_ABI_MEM_ISP_VMEM0;
+ unsigned int i;
+
+ /* Configure VMEM0 */
+
+ memset(vmem0, 0, bi->info.isp.sp.mem_initializers.params[c][m].size);
+
+ /* Configure Linearization VMEM0 parameters */
+
+ lin_vmem = imgu_css_cfg_copy(css, pipe, use && use->lin_vmem_params,
+ &user->lin_vmem_params, vmem0_old, vmem0,
+ m, &pofs->vmem.lin, sizeof(*lin_vmem));
+ if (!IS_ERR_OR_NULL(lin_vmem)) {
+ /* Generate parameter from scratch */
+ for (i = 0; i < IPU3_UAPI_LIN_LUT_SIZE; i++) {
+ lin_vmem->lin_lutlow_gr[i] = 32 * i;
+ lin_vmem->lin_lutlow_r[i] = 32 * i;
+ lin_vmem->lin_lutlow_b[i] = 32 * i;
+ lin_vmem->lin_lutlow_gb[i] = 32 * i;
+
+ lin_vmem->lin_lutdif_gr[i] = 32;
+ lin_vmem->lin_lutdif_r[i] = 32;
+ lin_vmem->lin_lutdif_b[i] = 32;
+ lin_vmem->lin_lutdif_gb[i] = 32;
+ }
+ }
+
+ /* Configure TNR3 VMEM parameters */
+ if (css->pipes[pipe].pipe_id == IPU3_CSS_PIPE_ID_VIDEO) {
+ tnr_vmem = imgu_css_cfg_copy(css, pipe,
+ use && use->tnr3_vmem_params,
+ &user->tnr3_vmem_params,
+ vmem0_old, vmem0, m,
+ &pofs->vmem.tnr3,
+ sizeof(*tnr_vmem));
+ if (!IS_ERR_OR_NULL(tnr_vmem)) {
+ /* Generate parameter from scratch */
+ for (i = 0; i < IPU3_UAPI_ISP_TNR3_VMEM_LEN; i++)
+ tnr_vmem->sigma[i] = 256;
+ }
+ }
+ i = IPU3_UAPI_ISP_TNR3_VMEM_LEN;
+
+ /* Configure XNR3 VMEM parameters */
+
+ xnr_vmem = imgu_css_cfg_copy(css, pipe, use && use->xnr3_vmem_params,
+ &user->xnr3_vmem_params, vmem0_old, vmem0,
+ m, &pofs->vmem.xnr3, sizeof(*xnr_vmem));
+ if (!IS_ERR_OR_NULL(xnr_vmem)) {
+ xnr_vmem->x[i] = imgu_css_xnr3_vmem_defaults.x
+ [i % IMGU_XNR3_VMEM_LUT_LEN];
+ xnr_vmem->a[i] = imgu_css_xnr3_vmem_defaults.a
+ [i % IMGU_XNR3_VMEM_LUT_LEN];
+ xnr_vmem->b[i] = imgu_css_xnr3_vmem_defaults.b
+ [i % IMGU_XNR3_VMEM_LUT_LEN];
+ xnr_vmem->c[i] = imgu_css_xnr3_vmem_defaults.c
+ [i % IMGU_XNR3_VMEM_LUT_LEN];
+ }
+
+ return IS_ERR(lin_vmem) || IS_ERR(tnr_vmem) || IS_ERR(xnr_vmem) ?
+ -EPROTO : 0;
+}
+
+/*
+ * Configure DMEM0 parameters (late binding parameters).
+ */
+int imgu_css_cfg_dmem0(struct imgu_css *css, unsigned int pipe,
+ struct ipu3_uapi_flags *use,
+ void *dmem0, void *dmem0_old,
+ struct ipu3_uapi_params *user)
+{
+ struct imgu_css_pipe *css_pipe = &css->pipes[pipe];
+ const struct imgu_fw_info *bi =
+ &css->fwp->binary_header[css_pipe->bindex];
+ struct imgu_fw_param_memory_offsets *pofs = (void *)css->fwp +
+ bi->blob.memory_offsets.offsets[IMGU_ABI_PARAM_CLASS_PARAM];
+
+ struct ipu3_uapi_isp_tnr3_params *tnr_dmem = NULL;
+ struct ipu3_uapi_isp_xnr3_params *xnr_dmem;
+
+ const enum imgu_abi_param_class c = IMGU_ABI_PARAM_CLASS_PARAM;
+ const enum imgu_abi_memories m = IMGU_ABI_MEM_ISP_DMEM0;
+
+ /* Configure DMEM0 */
+
+ memset(dmem0, 0, bi->info.isp.sp.mem_initializers.params[c][m].size);
+
+ /* Configure TNR3 DMEM0 parameters */
+ if (css_pipe->pipe_id == IPU3_CSS_PIPE_ID_VIDEO) {
+ tnr_dmem = imgu_css_cfg_copy(css, pipe,
+ use && use->tnr3_dmem_params,
+ &user->tnr3_dmem_params,
+ dmem0_old, dmem0, m,
+ &pofs->dmem.tnr3,
+ sizeof(*tnr_dmem));
+ if (!IS_ERR_OR_NULL(tnr_dmem)) {
+ /* Generate parameter from scratch */
+ tnr_dmem->knee_y1 = 768;
+ tnr_dmem->knee_y2 = 1280;
+ }
+ }
+
+ /* Configure XNR3 DMEM0 parameters */
+
+ xnr_dmem = imgu_css_cfg_copy(css, pipe, use && use->xnr3_dmem_params,
+ &user->xnr3_dmem_params, dmem0_old, dmem0,
+ m, &pofs->dmem.xnr3, sizeof(*xnr_dmem));
+ if (!IS_ERR_OR_NULL(xnr_dmem)) {
+ /* Generate parameter from scratch */
+ xnr_dmem->alpha.y0 = 2047;
+ xnr_dmem->alpha.u0 = 2047;
+ xnr_dmem->alpha.v0 = 2047;
+ }
+
+ return IS_ERR(tnr_dmem) || IS_ERR(xnr_dmem) ? -EPROTO : 0;
+}
+
+/* Generate unity morphing table without morphing effect */
+void imgu_css_cfg_gdc_table(struct imgu_abi_gdc_warp_param *gdc,
+ int frame_in_x, int frame_in_y,
+ int frame_out_x, int frame_out_y,
+ int env_w, int env_h)
+{
+ static const unsigned int FRAC_BITS = IMGU_ABI_GDC_FRAC_BITS;
+ static const unsigned int XMEM_ALIGN = 1 << 4;
+ const unsigned int XMEM_ALIGN_MASK = ~(XMEM_ALIGN - 1);
+ static const unsigned int BCI_ENV = 4;
+ static const unsigned int BYP = 2; /* Bytes per pixel */
+ const unsigned int OFFSET_X = 2 * IMGU_DVS_BLOCK_W + env_w + 1;
+ const unsigned int OFFSET_Y = IMGU_DVS_BLOCK_H + env_h + 1;
+
+ struct imgu_abi_gdc_warp_param gdc_luma, gdc_chroma;
+
+ unsigned int blocks_x = ALIGN(DIV_ROUND_UP(frame_out_x,
+ IMGU_DVS_BLOCK_W), 2);
+ unsigned int blocks_y = DIV_ROUND_UP(frame_out_y, IMGU_DVS_BLOCK_H);
+ unsigned int y0, x0, x1, x, y;
+
+ /* Global luma settings */
+ gdc_luma.origin_x = 0;
+ gdc_luma.origin_y = 0;
+ gdc_luma.p0_x = (OFFSET_X - (OFFSET_X & XMEM_ALIGN_MASK)) << FRAC_BITS;
+ gdc_luma.p0_y = 0;
+ gdc_luma.p1_x = gdc_luma.p0_x + (IMGU_DVS_BLOCK_W << FRAC_BITS);
+ gdc_luma.p1_y = gdc_luma.p0_y;
+ gdc_luma.p2_x = gdc_luma.p0_x;
+ gdc_luma.p2_y = gdc_luma.p0_y + (IMGU_DVS_BLOCK_H << FRAC_BITS);
+ gdc_luma.p3_x = gdc_luma.p1_x;
+ gdc_luma.p3_y = gdc_luma.p2_y;
+
+ gdc_luma.in_block_width = IMGU_DVS_BLOCK_W + BCI_ENV +
+ OFFSET_X - (OFFSET_X & XMEM_ALIGN_MASK);
+ gdc_luma.in_block_width_a = DIV_ROUND_UP(gdc_luma.in_block_width,
+ IPU3_UAPI_ISP_VEC_ELEMS);
+ gdc_luma.in_block_width_b = DIV_ROUND_UP(gdc_luma.in_block_width,
+ IMGU_ABI_ISP_DDR_WORD_BYTES /
+ BYP);
+ gdc_luma.in_block_height = IMGU_DVS_BLOCK_H + BCI_ENV;
+ gdc_luma.padding = 0;
+
+ /* Global chroma settings */
+ gdc_chroma.origin_x = 0;
+ gdc_chroma.origin_y = 0;
+ gdc_chroma.p0_x = (OFFSET_X / 2 - (OFFSET_X / 2 & XMEM_ALIGN_MASK)) <<
+ FRAC_BITS;
+ gdc_chroma.p0_y = 0;
+ gdc_chroma.p1_x = gdc_chroma.p0_x + (IMGU_DVS_BLOCK_W << FRAC_BITS);
+ gdc_chroma.p1_y = gdc_chroma.p0_y;
+ gdc_chroma.p2_x = gdc_chroma.p0_x;
+ gdc_chroma.p2_y = gdc_chroma.p0_y + (IMGU_DVS_BLOCK_H / 2 << FRAC_BITS);
+ gdc_chroma.p3_x = gdc_chroma.p1_x;
+ gdc_chroma.p3_y = gdc_chroma.p2_y;
+
+ gdc_chroma.in_block_width = IMGU_DVS_BLOCK_W + BCI_ENV;
+ gdc_chroma.in_block_width_a = DIV_ROUND_UP(gdc_chroma.in_block_width,
+ IPU3_UAPI_ISP_VEC_ELEMS);
+ gdc_chroma.in_block_width_b = DIV_ROUND_UP(gdc_chroma.in_block_width,
+ IMGU_ABI_ISP_DDR_WORD_BYTES /
+ BYP);
+ gdc_chroma.in_block_height = IMGU_DVS_BLOCK_H / 2 + BCI_ENV;
+ gdc_chroma.padding = 0;
+
+ /* Calculate block offsets for luma and chroma */
+ for (y0 = 0; y0 < blocks_y; y0++) {
+ for (x0 = 0; x0 < blocks_x / 2; x0++) {
+ for (x1 = 0; x1 < 2; x1++) {
+ /* Luma blocks */
+ x = (x0 * 2 + x1) * IMGU_DVS_BLOCK_W + OFFSET_X;
+ x &= XMEM_ALIGN_MASK;
+ y = y0 * IMGU_DVS_BLOCK_H + OFFSET_Y;
+ *gdc = gdc_luma;
+ gdc->in_addr_offset =
+ (y * frame_in_x + x) * BYP;
+ gdc++;
+ }
+
+ /* Chroma block */
+ x = x0 * IMGU_DVS_BLOCK_W + OFFSET_X / 2;
+ x &= XMEM_ALIGN_MASK;
+ y = y0 * (IMGU_DVS_BLOCK_H / 2) + OFFSET_Y / 2;
+ *gdc = gdc_chroma;
+ gdc->in_addr_offset = (y * frame_in_x + x) * BYP;
+ gdc++;
+ }
+ }
+}
diff --git a/drivers/staging/media/ipu3/ipu3-css-params.h b/drivers/staging/media/ipu3/ipu3-css-params.h
new file mode 100644
index 000000000000..ffaec6b7d5cc
--- /dev/null
+++ b/drivers/staging/media/ipu3/ipu3-css-params.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2018 Intel Corporation */
+
+#ifndef __IPU3_PARAMS_H
+#define __IPU3_PARAMS_H
+
+int imgu_css_cfg_acc(struct imgu_css *css, unsigned int pipe,
+ struct ipu3_uapi_flags *use,
+ struct imgu_abi_acc_param *acc,
+ struct imgu_abi_acc_param *acc_old,
+ struct ipu3_uapi_acc_param *acc_user);
+
+int imgu_css_cfg_vmem0(struct imgu_css *css, unsigned int pipe,
+ struct ipu3_uapi_flags *use,
+ void *vmem0, void *vmem0_old,
+ struct ipu3_uapi_params *user);
+
+int imgu_css_cfg_dmem0(struct imgu_css *css, unsigned int pipe,
+ struct ipu3_uapi_flags *use,
+ void *dmem0, void *dmem0_old,
+ struct ipu3_uapi_params *user);
+
+void imgu_css_cfg_gdc_table(struct imgu_abi_gdc_warp_param *gdc,
+ int frame_in_x, int frame_in_y,
+ int frame_out_x, int frame_out_y,
+ int env_w, int env_h);
+
+#endif /*__IPU3_PARAMS_H */
diff --git a/drivers/staging/media/ipu3/ipu3-css-pool.c b/drivers/staging/media/ipu3/ipu3-css-pool.c
new file mode 100644
index 000000000000..fa5b7d3acef2
--- /dev/null
+++ b/drivers/staging/media/ipu3/ipu3-css-pool.c
@@ -0,0 +1,100 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2018 Intel Corporation
+
+#include <linux/device.h>
+
+#include "ipu3.h"
+#include "ipu3-css-pool.h"
+#include "ipu3-dmamap.h"
+
+int imgu_css_dma_buffer_resize(struct imgu_device *imgu,
+ struct imgu_css_map *map, size_t size)
+{
+ if (map->size < size && map->vaddr) {
+ dev_warn(&imgu->pci_dev->dev, "dma buf resized from %zu to %zu",
+ map->size, size);
+
+ imgu_dmamap_free(imgu, map);
+ if (!imgu_dmamap_alloc(imgu, map, size))
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+void imgu_css_pool_cleanup(struct imgu_device *imgu, struct imgu_css_pool *pool)
+{
+ unsigned int i;
+
+ for (i = 0; i < IPU3_CSS_POOL_SIZE; i++)
+ imgu_dmamap_free(imgu, &pool->entry[i].param);
+}
+
+int imgu_css_pool_init(struct imgu_device *imgu, struct imgu_css_pool *pool,
+ size_t size)
+{
+ unsigned int i;
+
+ for (i = 0; i < IPU3_CSS_POOL_SIZE; i++) {
+ pool->entry[i].valid = false;
+ if (size == 0) {
+ pool->entry[i].param.vaddr = NULL;
+ continue;
+ }
+
+ if (!imgu_dmamap_alloc(imgu, &pool->entry[i].param, size))
+ goto fail;
+ }
+
+ pool->last = IPU3_CSS_POOL_SIZE;
+
+ return 0;
+
+fail:
+ imgu_css_pool_cleanup(imgu, pool);
+ return -ENOMEM;
+}
+
+/*
+ * Allocate a new parameter via recycling the oldest entry in the pool.
+ */
+void imgu_css_pool_get(struct imgu_css_pool *pool)
+{
+ /* Get the oldest entry */
+ u32 n = (pool->last + 1) % IPU3_CSS_POOL_SIZE;
+
+ pool->entry[n].valid = true;
+ pool->last = n;
+}
+
+/*
+ * Undo, for all practical purposes, the effect of pool_get().
+ */
+void imgu_css_pool_put(struct imgu_css_pool *pool)
+{
+ pool->entry[pool->last].valid = false;
+ pool->last = (pool->last + IPU3_CSS_POOL_SIZE - 1) % IPU3_CSS_POOL_SIZE;
+}
+
+/**
+ * imgu_css_pool_last - Retrieve the nth pool entry from last
+ *
+ * @pool: a pointer to &struct imgu_css_pool.
+ * @n: the distance to the last index.
+ *
+ * Returns:
+ * The nth entry from last or null map to indicate no frame stored.
+ */
+const struct imgu_css_map *
+imgu_css_pool_last(struct imgu_css_pool *pool, unsigned int n)
+{
+ static const struct imgu_css_map null_map = { 0 };
+ int i = (pool->last + IPU3_CSS_POOL_SIZE - n) % IPU3_CSS_POOL_SIZE;
+
+ WARN_ON(n >= IPU3_CSS_POOL_SIZE);
+
+ if (!pool->entry[i].valid)
+ return &null_map;
+
+ return &pool->entry[i].param;
+}
diff --git a/drivers/staging/media/ipu3/ipu3-css-pool.h b/drivers/staging/media/ipu3/ipu3-css-pool.h
new file mode 100644
index 000000000000..3f9e32e0e9a7
--- /dev/null
+++ b/drivers/staging/media/ipu3/ipu3-css-pool.h
@@ -0,0 +1,54 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2018 Intel Corporation */
+
+#ifndef __IPU3_UTIL_H
+#define __IPU3_UTIL_H
+
+struct device;
+struct imgu_device;
+
+#define IPU3_CSS_POOL_SIZE 4
+
+/**
+ * struct imgu_css_map - store DMA mapping info for buffer
+ *
+ * @size: size of the buffer in bytes.
+ * @vaddr: kernel virtual address.
+ * @daddr: iova dma address to access IPU3.
+ * @pages: pages mapped to this buffer
+ */
+struct imgu_css_map {
+ size_t size;
+ void *vaddr;
+ dma_addr_t daddr;
+ struct page **pages;
+};
+
+/**
+ * struct imgu_css_pool - circular buffer pool definition
+ *
+ * @entry: array with IPU3_CSS_POOL_SIZE elements.
+ * @entry.param: a &struct imgu_css_map for storing the mem mapping.
+ * @entry.valid: used to mark if the entry has valid data.
+ * @last: write pointer, initialized to IPU3_CSS_POOL_SIZE.
+ */
+struct imgu_css_pool {
+ struct {
+ struct imgu_css_map param;
+ bool valid;
+ } entry[IPU3_CSS_POOL_SIZE];
+ u32 last;
+};
+
+int imgu_css_dma_buffer_resize(struct imgu_device *imgu,
+ struct imgu_css_map *map, size_t size);
+void imgu_css_pool_cleanup(struct imgu_device *imgu,
+ struct imgu_css_pool *pool);
+int imgu_css_pool_init(struct imgu_device *imgu, struct imgu_css_pool *pool,
+ size_t size);
+void imgu_css_pool_get(struct imgu_css_pool *pool);
+void imgu_css_pool_put(struct imgu_css_pool *pool);
+const struct imgu_css_map *imgu_css_pool_last(struct imgu_css_pool *pool,
+ u32 last);
+
+#endif
diff --git a/drivers/staging/media/ipu3/ipu3-css.c b/drivers/staging/media/ipu3/ipu3-css.c
new file mode 100644
index 000000000000..777cac1c27bf
--- /dev/null
+++ b/drivers/staging/media/ipu3/ipu3-css.c
@@ -0,0 +1,2355 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2018 Intel Corporation
+
+#include <linux/device.h>
+#include <linux/iopoll.h>
+#include <linux/slab.h>
+#include <linux/string_choices.h>
+
+#include "ipu3.h"
+#include "ipu3-css.h"
+#include "ipu3-css-fw.h"
+#include "ipu3-css-params.h"
+#include "ipu3-dmamap.h"
+#include "ipu3-tables.h"
+
+/* IRQ configuration */
+#define IMGU_IRQCTRL_IRQ_MASK (IMGU_IRQCTRL_IRQ_SP1 | \
+ IMGU_IRQCTRL_IRQ_SP2 | \
+ IMGU_IRQCTRL_IRQ_SW_PIN(0) | \
+ IMGU_IRQCTRL_IRQ_SW_PIN(1))
+
+#define IPU3_CSS_FORMAT_BPP_DEN 50 /* Denominator */
+
+/* Some sane limits for resolutions */
+#define IPU3_CSS_MIN_RES 32
+#define IPU3_CSS_MAX_H 3136
+#define IPU3_CSS_MAX_W 4224
+
+/* minimal envelope size(GDC in - out) should be 4 */
+#define MIN_ENVELOPE 4
+
+/*
+ * pre-allocated buffer size for CSS ABI, auxiliary frames
+ * after BDS and before GDC. Those values should be tuned
+ * to big enough to avoid buffer re-allocation when
+ * streaming to lower streaming latency.
+ */
+#define CSS_ABI_SIZE 136
+#define CSS_BDS_SIZE (4480 * 3200 * 3)
+#define CSS_GDC_SIZE (4224 * 3200 * 12 / 8)
+
+#define IPU3_CSS_QUEUE_TO_FLAGS(q) (1 << (q))
+#define IPU3_CSS_FORMAT_FL_IN \
+ IPU3_CSS_QUEUE_TO_FLAGS(IPU3_CSS_QUEUE_IN)
+#define IPU3_CSS_FORMAT_FL_OUT \
+ IPU3_CSS_QUEUE_TO_FLAGS(IPU3_CSS_QUEUE_OUT)
+#define IPU3_CSS_FORMAT_FL_VF \
+ IPU3_CSS_QUEUE_TO_FLAGS(IPU3_CSS_QUEUE_VF)
+
+/* Formats supported by IPU3 Camera Sub System */
+static const struct imgu_css_format imgu_css_formats[] = {
+ {
+ .pixelformat = V4L2_PIX_FMT_NV12,
+ .colorspace = V4L2_COLORSPACE_SRGB,
+ .frame_format = IMGU_ABI_FRAME_FORMAT_NV12,
+ .osys_format = IMGU_ABI_OSYS_FORMAT_NV12,
+ .osys_tiling = IMGU_ABI_OSYS_TILING_NONE,
+ .chroma_decim = 4,
+ .width_align = IPU3_UAPI_ISP_VEC_ELEMS,
+ .flags = IPU3_CSS_FORMAT_FL_OUT | IPU3_CSS_FORMAT_FL_VF,
+ }, {
+ /* Each 32 bytes contains 25 10-bit pixels */
+ .pixelformat = V4L2_PIX_FMT_IPU3_SBGGR10,
+ .colorspace = V4L2_COLORSPACE_RAW,
+ .frame_format = IMGU_ABI_FRAME_FORMAT_RAW_PACKED,
+ .bayer_order = IMGU_ABI_BAYER_ORDER_BGGR,
+ .bit_depth = 10,
+ .width_align = 2 * IPU3_UAPI_ISP_VEC_ELEMS,
+ .flags = IPU3_CSS_FORMAT_FL_IN,
+ }, {
+ .pixelformat = V4L2_PIX_FMT_IPU3_SGBRG10,
+ .colorspace = V4L2_COLORSPACE_RAW,
+ .frame_format = IMGU_ABI_FRAME_FORMAT_RAW_PACKED,
+ .bayer_order = IMGU_ABI_BAYER_ORDER_GBRG,
+ .bit_depth = 10,
+ .width_align = 2 * IPU3_UAPI_ISP_VEC_ELEMS,
+ .flags = IPU3_CSS_FORMAT_FL_IN,
+ }, {
+ .pixelformat = V4L2_PIX_FMT_IPU3_SGRBG10,
+ .colorspace = V4L2_COLORSPACE_RAW,
+ .frame_format = IMGU_ABI_FRAME_FORMAT_RAW_PACKED,
+ .bayer_order = IMGU_ABI_BAYER_ORDER_GRBG,
+ .bit_depth = 10,
+ .width_align = 2 * IPU3_UAPI_ISP_VEC_ELEMS,
+ .flags = IPU3_CSS_FORMAT_FL_IN,
+ }, {
+ .pixelformat = V4L2_PIX_FMT_IPU3_SRGGB10,
+ .colorspace = V4L2_COLORSPACE_RAW,
+ .frame_format = IMGU_ABI_FRAME_FORMAT_RAW_PACKED,
+ .bayer_order = IMGU_ABI_BAYER_ORDER_RGGB,
+ .bit_depth = 10,
+ .width_align = 2 * IPU3_UAPI_ISP_VEC_ELEMS,
+ .flags = IPU3_CSS_FORMAT_FL_IN,
+ },
+};
+
+static const struct {
+ enum imgu_abi_queue_id qid;
+ size_t ptr_ofs;
+} imgu_css_queues[IPU3_CSS_QUEUES] = {
+ [IPU3_CSS_QUEUE_IN] = {
+ IMGU_ABI_QUEUE_C_ID,
+ offsetof(struct imgu_abi_buffer, payload.frame.frame_data)
+ },
+ [IPU3_CSS_QUEUE_OUT] = {
+ IMGU_ABI_QUEUE_D_ID,
+ offsetof(struct imgu_abi_buffer, payload.frame.frame_data)
+ },
+ [IPU3_CSS_QUEUE_VF] = {
+ IMGU_ABI_QUEUE_E_ID,
+ offsetof(struct imgu_abi_buffer, payload.frame.frame_data)
+ },
+ [IPU3_CSS_QUEUE_STAT_3A] = {
+ IMGU_ABI_QUEUE_F_ID,
+ offsetof(struct imgu_abi_buffer, payload.s3a.data_ptr)
+ },
+};
+
+/* Initialize queue based on given format, adjust format as needed */
+static int imgu_css_queue_init(struct imgu_css_queue *queue,
+ struct v4l2_pix_format_mplane *fmt, u32 flags)
+{
+ struct v4l2_pix_format_mplane *const f = &queue->fmt.mpix;
+ unsigned int i;
+ u32 sizeimage;
+
+ INIT_LIST_HEAD(&queue->bufs);
+
+ queue->css_fmt = NULL; /* Disable */
+ if (!fmt)
+ return 0;
+
+ for (i = 0; i < ARRAY_SIZE(imgu_css_formats); i++) {
+ if (!(imgu_css_formats[i].flags & flags))
+ continue;
+ queue->css_fmt = &imgu_css_formats[i];
+ if (imgu_css_formats[i].pixelformat == fmt->pixelformat)
+ break;
+ }
+ if (!queue->css_fmt)
+ return -EINVAL; /* Could not find any suitable format */
+
+ queue->fmt.mpix = *fmt;
+
+ f->width = ALIGN(clamp_t(u32, f->width,
+ IPU3_CSS_MIN_RES, IPU3_CSS_MAX_W), 2);
+ f->height = ALIGN(clamp_t(u32, f->height,
+ IPU3_CSS_MIN_RES, IPU3_CSS_MAX_H), 2);
+ queue->width_pad = ALIGN(f->width, queue->css_fmt->width_align);
+ f->plane_fmt[0].bytesperline =
+ imgu_bytesperline(f->width, queue->css_fmt->frame_format);
+ sizeimage = f->height * f->plane_fmt[0].bytesperline;
+ if (queue->css_fmt->chroma_decim)
+ sizeimage += 2 * sizeimage / queue->css_fmt->chroma_decim;
+
+ f->plane_fmt[0].sizeimage = sizeimage;
+ f->field = V4L2_FIELD_NONE;
+ f->num_planes = 1;
+ f->colorspace = queue->css_fmt->colorspace;
+ f->flags = 0;
+ f->ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT;
+ f->quantization = V4L2_QUANTIZATION_DEFAULT;
+ f->xfer_func = V4L2_XFER_FUNC_DEFAULT;
+ memset(f->reserved, 0, sizeof(f->reserved));
+
+ return 0;
+}
+
+static bool imgu_css_queue_enabled(struct imgu_css_queue *q)
+{
+ return q->css_fmt;
+}
+
+/******************* css hw *******************/
+
+/* In the style of writesl() defined in include/asm-generic/io.h */
+static inline void writes(const void *mem, ssize_t count, void __iomem *addr)
+{
+ if (count >= 4) {
+ const u32 *buf = mem;
+
+ count /= 4;
+ do {
+ writel(*buf++, addr);
+ addr += 4;
+ } while (--count);
+ }
+}
+
+/* Wait until register `reg', masked with `mask', becomes `cmp' */
+static int imgu_hw_wait(void __iomem *base, int reg, u32 mask, u32 cmp)
+{
+ u32 val;
+
+ return readl_poll_timeout(base + reg, val, (val & mask) == cmp,
+ 1000, 100 * 1000);
+}
+
+/* Initialize the IPU3 CSS hardware and associated h/w blocks */
+
+int imgu_css_set_powerup(struct device *dev, void __iomem *base,
+ unsigned int freq)
+{
+ u32 pm_ctrl, state, val;
+
+ dev_dbg(dev, "%s with freq %u\n", __func__, freq);
+ /* Clear the CSS busy signal */
+ readl(base + IMGU_REG_GP_BUSY);
+ writel(0, base + IMGU_REG_GP_BUSY);
+
+ /* Wait for idle signal */
+ if (imgu_hw_wait(base, IMGU_REG_STATE, IMGU_STATE_IDLE_STS,
+ IMGU_STATE_IDLE_STS)) {
+ dev_err(dev, "failed to set CSS idle\n");
+ goto fail;
+ }
+
+ /* Reset the css */
+ writel(readl(base + IMGU_REG_PM_CTRL) | IMGU_PM_CTRL_FORCE_RESET,
+ base + IMGU_REG_PM_CTRL);
+
+ usleep_range(200, 300);
+
+ /** Prepare CSS */
+
+ pm_ctrl = readl(base + IMGU_REG_PM_CTRL);
+ state = readl(base + IMGU_REG_STATE);
+
+ dev_dbg(dev, "CSS pm_ctrl 0x%x state 0x%x (power %s)\n",
+ pm_ctrl, state, str_down_up(state & IMGU_STATE_POWER_DOWN));
+
+ /* Power up CSS using wrapper */
+ if (state & IMGU_STATE_POWER_DOWN) {
+ writel(IMGU_PM_CTRL_RACE_TO_HALT | IMGU_PM_CTRL_START,
+ base + IMGU_REG_PM_CTRL);
+ if (imgu_hw_wait(base, IMGU_REG_PM_CTRL,
+ IMGU_PM_CTRL_START, 0)) {
+ dev_err(dev, "failed to power up CSS\n");
+ goto fail;
+ }
+ usleep_range(2000, 3000);
+ } else {
+ writel(IMGU_PM_CTRL_RACE_TO_HALT, base + IMGU_REG_PM_CTRL);
+ }
+
+ /* Set the busy bit */
+ writel(readl(base + IMGU_REG_GP_BUSY) | 1, base + IMGU_REG_GP_BUSY);
+
+ /* Set CSS clock frequency */
+ pm_ctrl = readl(base + IMGU_REG_PM_CTRL);
+ val = pm_ctrl & ~(IMGU_PM_CTRL_CSS_PWRDN | IMGU_PM_CTRL_RST_AT_EOF);
+ writel(val, base + IMGU_REG_PM_CTRL);
+ writel(0, base + IMGU_REG_GP_BUSY);
+ if (imgu_hw_wait(base, IMGU_REG_STATE,
+ IMGU_STATE_PWRDNM_FSM_MASK, 0)) {
+ dev_err(dev, "failed to pwrdn CSS\n");
+ goto fail;
+ }
+ val = (freq / IMGU_SYSTEM_REQ_FREQ_DIVIDER) & IMGU_SYSTEM_REQ_FREQ_MASK;
+ writel(val, base + IMGU_REG_SYSTEM_REQ);
+ writel(1, base + IMGU_REG_GP_BUSY);
+ writel(readl(base + IMGU_REG_PM_CTRL) | IMGU_PM_CTRL_FORCE_HALT,
+ base + IMGU_REG_PM_CTRL);
+ if (imgu_hw_wait(base, IMGU_REG_STATE, IMGU_STATE_HALT_STS,
+ IMGU_STATE_HALT_STS)) {
+ dev_err(dev, "failed to halt CSS\n");
+ goto fail;
+ }
+
+ writel(readl(base + IMGU_REG_PM_CTRL) | IMGU_PM_CTRL_START,
+ base + IMGU_REG_PM_CTRL);
+ if (imgu_hw_wait(base, IMGU_REG_PM_CTRL, IMGU_PM_CTRL_START, 0)) {
+ dev_err(dev, "failed to start CSS\n");
+ goto fail;
+ }
+ writel(readl(base + IMGU_REG_PM_CTRL) | IMGU_PM_CTRL_FORCE_UNHALT,
+ base + IMGU_REG_PM_CTRL);
+
+ val = readl(base + IMGU_REG_PM_CTRL); /* get pm_ctrl */
+ val &= ~(IMGU_PM_CTRL_CSS_PWRDN | IMGU_PM_CTRL_RST_AT_EOF);
+ val |= pm_ctrl & (IMGU_PM_CTRL_CSS_PWRDN | IMGU_PM_CTRL_RST_AT_EOF);
+ writel(val, base + IMGU_REG_PM_CTRL);
+
+ return 0;
+
+fail:
+ imgu_css_set_powerdown(dev, base);
+ return -EIO;
+}
+
+void imgu_css_set_powerdown(struct device *dev, void __iomem *base)
+{
+ dev_dbg(dev, "%s\n", __func__);
+ /* wait for cio idle signal */
+ if (imgu_hw_wait(base, IMGU_REG_CIO_GATE_BURST_STATE,
+ IMGU_CIO_GATE_BURST_MASK, 0))
+ dev_warn(dev, "wait cio gate idle timeout");
+
+ /* wait for css idle signal */
+ if (imgu_hw_wait(base, IMGU_REG_STATE, IMGU_STATE_IDLE_STS,
+ IMGU_STATE_IDLE_STS))
+ dev_warn(dev, "wait css idle timeout\n");
+
+ /* do halt-halted handshake with css */
+ writel(1, base + IMGU_REG_GP_HALT);
+ if (imgu_hw_wait(base, IMGU_REG_STATE, IMGU_STATE_HALT_STS,
+ IMGU_STATE_HALT_STS))
+ dev_warn(dev, "failed to halt css");
+
+ /* de-assert the busy bit */
+ writel(0, base + IMGU_REG_GP_BUSY);
+}
+
+static void imgu_css_hw_enable_irq(struct imgu_css *css)
+{
+ void __iomem *const base = css->base;
+ u32 val, i;
+
+ /* Set up interrupts */
+
+ /*
+ * Enable IRQ on the SP which signals that SP goes to idle
+ * (aka ready state) and set trigger to pulse
+ */
+ val = readl(base + IMGU_REG_SP_CTRL(0)) | IMGU_CTRL_IRQ_READY;
+ writel(val, base + IMGU_REG_SP_CTRL(0));
+ writel(val | IMGU_CTRL_IRQ_CLEAR, base + IMGU_REG_SP_CTRL(0));
+
+ /* Enable IRQs from the IMGU wrapper */
+ writel(IMGU_REG_INT_CSS_IRQ, base + IMGU_REG_INT_ENABLE);
+ /* Clear */
+ writel(IMGU_REG_INT_CSS_IRQ, base + IMGU_REG_INT_STATUS);
+
+ /* Enable IRQs from main IRQ controller */
+ writel(~0, base + IMGU_REG_IRQCTRL_EDGE_NOT_PULSE(IMGU_IRQCTRL_MAIN));
+ writel(0, base + IMGU_REG_IRQCTRL_MASK(IMGU_IRQCTRL_MAIN));
+ writel(IMGU_IRQCTRL_IRQ_MASK,
+ base + IMGU_REG_IRQCTRL_EDGE(IMGU_IRQCTRL_MAIN));
+ writel(IMGU_IRQCTRL_IRQ_MASK,
+ base + IMGU_REG_IRQCTRL_ENABLE(IMGU_IRQCTRL_MAIN));
+ writel(IMGU_IRQCTRL_IRQ_MASK,
+ base + IMGU_REG_IRQCTRL_CLEAR(IMGU_IRQCTRL_MAIN));
+ writel(IMGU_IRQCTRL_IRQ_MASK,
+ base + IMGU_REG_IRQCTRL_MASK(IMGU_IRQCTRL_MAIN));
+ /* Wait for write complete */
+ readl(base + IMGU_REG_IRQCTRL_ENABLE(IMGU_IRQCTRL_MAIN));
+
+ /* Enable IRQs from SP0 and SP1 controllers */
+ for (i = IMGU_IRQCTRL_SP0; i <= IMGU_IRQCTRL_SP1; i++) {
+ writel(~0, base + IMGU_REG_IRQCTRL_EDGE_NOT_PULSE(i));
+ writel(0, base + IMGU_REG_IRQCTRL_MASK(i));
+ writel(IMGU_IRQCTRL_IRQ_MASK, base + IMGU_REG_IRQCTRL_EDGE(i));
+ writel(IMGU_IRQCTRL_IRQ_MASK,
+ base + IMGU_REG_IRQCTRL_ENABLE(i));
+ writel(IMGU_IRQCTRL_IRQ_MASK, base + IMGU_REG_IRQCTRL_CLEAR(i));
+ writel(IMGU_IRQCTRL_IRQ_MASK, base + IMGU_REG_IRQCTRL_MASK(i));
+ /* Wait for write complete */
+ readl(base + IMGU_REG_IRQCTRL_ENABLE(i));
+ }
+}
+
+static int imgu_css_hw_init(struct imgu_css *css)
+{
+ /* For checking that streaming monitor statuses are valid */
+ static const struct {
+ u32 reg;
+ u32 mask;
+ const char *name;
+ } stream_monitors[] = {
+ {
+ IMGU_REG_GP_SP1_STRMON_STAT,
+ IMGU_GP_STRMON_STAT_ISP_PORT_SP12ISP,
+ "ISP0 to SP0"
+ }, {
+ IMGU_REG_GP_ISP_STRMON_STAT,
+ IMGU_GP_STRMON_STAT_SP1_PORT_ISP2SP1,
+ "SP0 to ISP0"
+ }, {
+ IMGU_REG_GP_MOD_STRMON_STAT,
+ IMGU_GP_STRMON_STAT_MOD_PORT_ISP2DMA,
+ "ISP0 to DMA0"
+ }, {
+ IMGU_REG_GP_ISP_STRMON_STAT,
+ IMGU_GP_STRMON_STAT_ISP_PORT_DMA2ISP,
+ "DMA0 to ISP0"
+ }, {
+ IMGU_REG_GP_MOD_STRMON_STAT,
+ IMGU_GP_STRMON_STAT_MOD_PORT_CELLS2GDC,
+ "ISP0 to GDC0"
+ }, {
+ IMGU_REG_GP_MOD_STRMON_STAT,
+ IMGU_GP_STRMON_STAT_MOD_PORT_GDC2CELLS,
+ "GDC0 to ISP0"
+ }, {
+ IMGU_REG_GP_MOD_STRMON_STAT,
+ IMGU_GP_STRMON_STAT_MOD_PORT_SP12DMA,
+ "SP0 to DMA0"
+ }, {
+ IMGU_REG_GP_SP1_STRMON_STAT,
+ IMGU_GP_STRMON_STAT_SP1_PORT_DMA2SP1,
+ "DMA0 to SP0"
+ }, {
+ IMGU_REG_GP_MOD_STRMON_STAT,
+ IMGU_GP_STRMON_STAT_MOD_PORT_CELLS2GDC,
+ "SP0 to GDC0"
+ }, {
+ IMGU_REG_GP_MOD_STRMON_STAT,
+ IMGU_GP_STRMON_STAT_MOD_PORT_GDC2CELLS,
+ "GDC0 to SP0"
+ },
+ };
+
+ struct device *dev = css->dev;
+ void __iomem *const base = css->base;
+ u32 val, i;
+
+ /* Set instruction cache address and inv bit for ISP, SP, and SP1 */
+ for (i = 0; i < IMGU_NUM_SP; i++) {
+ struct imgu_fw_info *bi =
+ &css->fwp->binary_header[css->fw_sp[i]];
+
+ writel(css->binary[css->fw_sp[i]].daddr,
+ base + IMGU_REG_SP_ICACHE_ADDR(bi->type));
+ writel(readl(base + IMGU_REG_SP_CTRL(bi->type)) |
+ IMGU_CTRL_ICACHE_INV,
+ base + IMGU_REG_SP_CTRL(bi->type));
+ }
+ writel(css->binary[css->fw_bl].daddr, base + IMGU_REG_ISP_ICACHE_ADDR);
+ writel(readl(base + IMGU_REG_ISP_CTRL) | IMGU_CTRL_ICACHE_INV,
+ base + IMGU_REG_ISP_CTRL);
+
+ /* Check that IMGU hardware is ready */
+
+ if (!(readl(base + IMGU_REG_SP_CTRL(0)) & IMGU_CTRL_IDLE)) {
+ dev_err(dev, "SP is not idle\n");
+ return -EIO;
+ }
+ if (!(readl(base + IMGU_REG_ISP_CTRL) & IMGU_CTRL_IDLE)) {
+ dev_err(dev, "ISP is not idle\n");
+ return -EIO;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(stream_monitors); i++) {
+ val = readl(base + stream_monitors[i].reg);
+ if (val & stream_monitors[i].mask) {
+ dev_err(dev, "error: Stream monitor %s is valid\n",
+ stream_monitors[i].name);
+ return -EIO;
+ }
+ }
+
+ /* Initialize GDC with default values */
+
+ for (i = 0; i < ARRAY_SIZE(imgu_css_gdc_lut[0]); i++) {
+ u32 val0 = imgu_css_gdc_lut[0][i] & IMGU_GDC_LUT_MASK;
+ u32 val1 = imgu_css_gdc_lut[1][i] & IMGU_GDC_LUT_MASK;
+ u32 val2 = imgu_css_gdc_lut[2][i] & IMGU_GDC_LUT_MASK;
+ u32 val3 = imgu_css_gdc_lut[3][i] & IMGU_GDC_LUT_MASK;
+
+ writel(val0 | (val1 << 16),
+ base + IMGU_REG_GDC_LUT_BASE + i * 8);
+ writel(val2 | (val3 << 16),
+ base + IMGU_REG_GDC_LUT_BASE + i * 8 + 4);
+ }
+
+ return 0;
+}
+
+/* Boot the given IPU3 CSS SP */
+static int imgu_css_hw_start_sp(struct imgu_css *css, int sp)
+{
+ void __iomem *const base = css->base;
+ struct imgu_fw_info *bi = &css->fwp->binary_header[css->fw_sp[sp]];
+ struct imgu_abi_sp_init_dmem_cfg dmem_cfg = {
+ .ddr_data_addr = css->binary[css->fw_sp[sp]].daddr
+ + bi->blob.data_source,
+ .dmem_data_addr = bi->blob.data_target,
+ .dmem_bss_addr = bi->blob.bss_target,
+ .data_size = bi->blob.data_size,
+ .bss_size = bi->blob.bss_size,
+ .sp_id = sp,
+ };
+
+ writes(&dmem_cfg, sizeof(dmem_cfg), base +
+ IMGU_REG_SP_DMEM_BASE(sp) + bi->info.sp.init_dmem_data);
+
+ writel(bi->info.sp.sp_entry, base + IMGU_REG_SP_START_ADDR(sp));
+
+ writel(readl(base + IMGU_REG_SP_CTRL(sp))
+ | IMGU_CTRL_START | IMGU_CTRL_RUN, base + IMGU_REG_SP_CTRL(sp));
+
+ if (imgu_hw_wait(css->base, IMGU_REG_SP_DMEM_BASE(sp)
+ + bi->info.sp.sw_state,
+ ~0, IMGU_ABI_SP_SWSTATE_INITIALIZED))
+ return -EIO;
+
+ return 0;
+}
+
+/* Start the IPU3 CSS ImgU (Imaging Unit) and all the SPs */
+static int imgu_css_hw_start(struct imgu_css *css)
+{
+ static const u32 event_mask =
+ ((1 << IMGU_ABI_EVTTYPE_OUT_FRAME_DONE) |
+ (1 << IMGU_ABI_EVTTYPE_2ND_OUT_FRAME_DONE) |
+ (1 << IMGU_ABI_EVTTYPE_VF_OUT_FRAME_DONE) |
+ (1 << IMGU_ABI_EVTTYPE_2ND_VF_OUT_FRAME_DONE) |
+ (1 << IMGU_ABI_EVTTYPE_3A_STATS_DONE) |
+ (1 << IMGU_ABI_EVTTYPE_DIS_STATS_DONE) |
+ (1 << IMGU_ABI_EVTTYPE_PIPELINE_DONE) |
+ (1 << IMGU_ABI_EVTTYPE_FRAME_TAGGED) |
+ (1 << IMGU_ABI_EVTTYPE_INPUT_FRAME_DONE) |
+ (1 << IMGU_ABI_EVTTYPE_METADATA_DONE) |
+ (1 << IMGU_ABI_EVTTYPE_ACC_STAGE_COMPLETE))
+ << IMGU_ABI_SP_COMM_EVENT_IRQ_MASK_OR_SHIFT;
+
+ void __iomem *const base = css->base;
+ struct imgu_fw_info *bi, *bl = &css->fwp->binary_header[css->fw_bl];
+ unsigned int i;
+
+ writel(IMGU_TLB_INVALIDATE, base + IMGU_REG_TLB_INVALIDATE);
+
+ /* Start bootloader */
+
+ writel(IMGU_ABI_BL_SWSTATE_BUSY,
+ base + IMGU_REG_ISP_DMEM_BASE + bl->info.bl.sw_state);
+ writel(IMGU_NUM_SP,
+ base + IMGU_REG_ISP_DMEM_BASE + bl->info.bl.num_dma_cmds);
+
+ for (i = 0; i < IMGU_NUM_SP; i++) {
+ int j = IMGU_NUM_SP - i - 1; /* load sp1 first, then sp0 */
+ struct imgu_fw_info *sp =
+ &css->fwp->binary_header[css->fw_sp[j]];
+ struct imgu_abi_bl_dma_cmd_entry dma_cmd = {
+ .src_addr = css->binary[css->fw_sp[j]].daddr
+ + sp->blob.text_source,
+ .size = sp->blob.text_size,
+ .dst_type = IMGU_ABI_BL_DMACMD_TYPE_SP_PMEM,
+ .dst_addr = IMGU_SP_PMEM_BASE(j),
+ };
+
+ writes(&dma_cmd, sizeof(dma_cmd),
+ base + IMGU_REG_ISP_DMEM_BASE + i * sizeof(dma_cmd) +
+ bl->info.bl.dma_cmd_list);
+ }
+
+ writel(bl->info.bl.bl_entry, base + IMGU_REG_ISP_START_ADDR);
+
+ writel(readl(base + IMGU_REG_ISP_CTRL)
+ | IMGU_CTRL_START | IMGU_CTRL_RUN, base + IMGU_REG_ISP_CTRL);
+ if (imgu_hw_wait(css->base, IMGU_REG_ISP_DMEM_BASE
+ + bl->info.bl.sw_state, ~0,
+ IMGU_ABI_BL_SWSTATE_OK)) {
+ dev_err(css->dev, "failed to start bootloader\n");
+ return -EIO;
+ }
+
+ /* Start ISP */
+
+ memset(css->xmem_sp_group_ptrs.vaddr, 0,
+ sizeof(struct imgu_abi_sp_group));
+
+ bi = &css->fwp->binary_header[css->fw_sp[0]];
+
+ writel(css->xmem_sp_group_ptrs.daddr,
+ base + IMGU_REG_SP_DMEM_BASE(0) + bi->info.sp.per_frame_data);
+
+ writel(IMGU_ABI_SP_SWSTATE_TERMINATED,
+ base + IMGU_REG_SP_DMEM_BASE(0) + bi->info.sp.sw_state);
+ writel(1, base + IMGU_REG_SP_DMEM_BASE(0) + bi->info.sp.invalidate_tlb);
+
+ if (imgu_css_hw_start_sp(css, 0))
+ return -EIO;
+
+ writel(0, base + IMGU_REG_SP_DMEM_BASE(0) + bi->info.sp.isp_started);
+ writel(0, base + IMGU_REG_SP_DMEM_BASE(0) +
+ bi->info.sp.host_sp_queues_initialized);
+ writel(0, base + IMGU_REG_SP_DMEM_BASE(0) + bi->info.sp.sleep_mode);
+ writel(0, base + IMGU_REG_SP_DMEM_BASE(0) + bi->info.sp.invalidate_tlb);
+ writel(IMGU_ABI_SP_COMM_COMMAND_READY, base + IMGU_REG_SP_DMEM_BASE(0)
+ + bi->info.sp.host_sp_com + IMGU_ABI_SP_COMM_COMMAND);
+
+ /* Enable all events for all queues */
+
+ for (i = 0; i < IPU3_CSS_PIPE_ID_NUM; i++)
+ writel(event_mask, base + IMGU_REG_SP_DMEM_BASE(0)
+ + bi->info.sp.host_sp_com
+ + IMGU_ABI_SP_COMM_EVENT_IRQ_MASK(i));
+ writel(1, base + IMGU_REG_SP_DMEM_BASE(0) +
+ bi->info.sp.host_sp_queues_initialized);
+
+ /* Start SP1 */
+
+ bi = &css->fwp->binary_header[css->fw_sp[1]];
+
+ writel(IMGU_ABI_SP_SWSTATE_TERMINATED,
+ base + IMGU_REG_SP_DMEM_BASE(1) + bi->info.sp.sw_state);
+
+ if (imgu_css_hw_start_sp(css, 1))
+ return -EIO;
+
+ writel(IMGU_ABI_SP_COMM_COMMAND_READY, base + IMGU_REG_SP_DMEM_BASE(1)
+ + bi->info.sp.host_sp_com + IMGU_ABI_SP_COMM_COMMAND);
+
+ return 0;
+}
+
+static void imgu_css_hw_stop(struct imgu_css *css)
+{
+ void __iomem *const base = css->base;
+ struct imgu_fw_info *bi = &css->fwp->binary_header[css->fw_sp[0]];
+
+ /* Stop fw */
+ writel(IMGU_ABI_SP_COMM_COMMAND_TERMINATE,
+ base + IMGU_REG_SP_DMEM_BASE(0) +
+ bi->info.sp.host_sp_com + IMGU_ABI_SP_COMM_COMMAND);
+ if (imgu_hw_wait(css->base, IMGU_REG_SP_CTRL(0),
+ IMGU_CTRL_IDLE, IMGU_CTRL_IDLE))
+ dev_err(css->dev, "wait sp0 idle timeout.\n");
+ if (readl(base + IMGU_REG_SP_DMEM_BASE(0) + bi->info.sp.sw_state) !=
+ IMGU_ABI_SP_SWSTATE_TERMINATED)
+ dev_err(css->dev, "sp0 is not terminated.\n");
+ if (imgu_hw_wait(css->base, IMGU_REG_ISP_CTRL,
+ IMGU_CTRL_IDLE, IMGU_CTRL_IDLE))
+ dev_err(css->dev, "wait isp idle timeout\n");
+}
+
+static void imgu_css_hw_cleanup(struct imgu_css *css)
+{
+ void __iomem *const base = css->base;
+
+ /** Reset CSS **/
+
+ /* Clear the CSS busy signal */
+ readl(base + IMGU_REG_GP_BUSY);
+ writel(0, base + IMGU_REG_GP_BUSY);
+
+ /* Wait for idle signal */
+ if (imgu_hw_wait(css->base, IMGU_REG_STATE, IMGU_STATE_IDLE_STS,
+ IMGU_STATE_IDLE_STS))
+ dev_err(css->dev, "failed to shut down hw cleanly\n");
+
+ /* Reset the css */
+ writel(readl(base + IMGU_REG_PM_CTRL) | IMGU_PM_CTRL_FORCE_RESET,
+ base + IMGU_REG_PM_CTRL);
+
+ usleep_range(200, 300);
+}
+
+static void imgu_css_pipeline_cleanup(struct imgu_css *css, unsigned int pipe)
+{
+ struct imgu_device *imgu = dev_get_drvdata(css->dev);
+ struct imgu_css_pipe *css_pipe = &css->pipes[pipe];
+ unsigned int i;
+
+ imgu_css_pool_cleanup(imgu, &css_pipe->pool.parameter_set_info);
+ imgu_css_pool_cleanup(imgu, &css_pipe->pool.acc);
+ imgu_css_pool_cleanup(imgu, &css_pipe->pool.gdc);
+ imgu_css_pool_cleanup(imgu, &css_pipe->pool.obgrid);
+
+ for (i = 0; i < IMGU_ABI_NUM_MEMORIES; i++)
+ imgu_css_pool_cleanup(imgu, &css_pipe->pool.binary_params_p[i]);
+}
+
+/*
+ * This function initializes various stages of the
+ * IPU3 CSS ISP pipeline
+ */
+static int imgu_css_pipeline_init(struct imgu_css *css, unsigned int pipe)
+{
+ static const int BYPC = 2; /* Bytes per component */
+ static const struct imgu_abi_buffer_sp buffer_sp_init = {
+ .buf_src = {.queue_id = IMGU_ABI_QUEUE_EVENT_ID},
+ .buf_type = IMGU_ABI_BUFFER_TYPE_INVALID,
+ };
+
+ struct imgu_abi_isp_iterator_config *cfg_iter;
+ struct imgu_abi_isp_ref_config *cfg_ref;
+ struct imgu_abi_isp_dvs_config *cfg_dvs;
+ struct imgu_abi_isp_tnr3_config *cfg_tnr;
+ struct imgu_abi_isp_ref_dmem_state *cfg_ref_state;
+ struct imgu_abi_isp_tnr3_dmem_state *cfg_tnr_state;
+
+ const int stage = 0;
+ unsigned int i, j;
+
+ struct imgu_css_pipe *css_pipe = &css->pipes[pipe];
+ struct imgu_css_queue *css_queue_in =
+ &css_pipe->queue[IPU3_CSS_QUEUE_IN];
+ struct imgu_css_queue *css_queue_out =
+ &css_pipe->queue[IPU3_CSS_QUEUE_OUT];
+ struct imgu_css_queue *css_queue_vf =
+ &css_pipe->queue[IPU3_CSS_QUEUE_VF];
+ const struct imgu_fw_info *bi =
+ &css->fwp->binary_header[css_pipe->bindex];
+ const unsigned int stripes = bi->info.isp.sp.iterator.num_stripes;
+
+ struct imgu_fw_config_memory_offsets *cofs = (void *)css->fwp +
+ bi->blob.memory_offsets.offsets[IMGU_ABI_PARAM_CLASS_CONFIG];
+ struct imgu_fw_state_memory_offsets *sofs = (void *)css->fwp +
+ bi->blob.memory_offsets.offsets[IMGU_ABI_PARAM_CLASS_STATE];
+
+ struct imgu_abi_isp_stage *isp_stage;
+ struct imgu_abi_sp_stage *sp_stage;
+ struct imgu_abi_sp_group *sp_group;
+ struct imgu_abi_frames_sp *frames_sp;
+ struct imgu_abi_frame_sp *frame_sp;
+ struct imgu_abi_frame_sp_info *frame_sp_info;
+
+ const unsigned int bds_width_pad =
+ ALIGN(css_pipe->rect[IPU3_CSS_RECT_BDS].width,
+ 2 * IPU3_UAPI_ISP_VEC_ELEMS);
+
+ const enum imgu_abi_memories m0 = IMGU_ABI_MEM_ISP_DMEM0;
+ enum imgu_abi_param_class cfg = IMGU_ABI_PARAM_CLASS_CONFIG;
+ void *vaddr = css_pipe->binary_params_cs[cfg - 1][m0].vaddr;
+
+ struct imgu_device *imgu = dev_get_drvdata(css->dev);
+
+ dev_dbg(css->dev, "%s for pipe %d", __func__, pipe);
+
+ /* Configure iterator */
+
+ cfg_iter = imgu_css_fw_pipeline_params(css, pipe, cfg, m0,
+ &cofs->dmem.iterator,
+ sizeof(*cfg_iter), vaddr);
+ if (!cfg_iter)
+ goto bad_firmware;
+
+ frame_sp_info = &cfg_iter->input_info;
+ frame_sp_info->res.width = css_queue_in->fmt.mpix.width;
+ frame_sp_info->res.height = css_queue_in->fmt.mpix.height;
+ frame_sp_info->padded_width = css_queue_in->width_pad;
+ frame_sp_info->format = css_queue_in->css_fmt->frame_format;
+ frame_sp_info->raw_bit_depth = css_queue_in->css_fmt->bit_depth;
+ frame_sp_info->raw_bayer_order = css_queue_in->css_fmt->bayer_order;
+ frame_sp_info->raw_type = IMGU_ABI_RAW_TYPE_BAYER;
+
+ frame_sp_info = &cfg_iter->internal_info;
+ frame_sp_info->res.width = css_pipe->rect[IPU3_CSS_RECT_BDS].width;
+ frame_sp_info->res.height = css_pipe->rect[IPU3_CSS_RECT_BDS].height;
+ frame_sp_info->padded_width = bds_width_pad;
+ frame_sp_info->format = css_queue_out->css_fmt->frame_format;
+ frame_sp_info->raw_bit_depth = css_queue_out->css_fmt->bit_depth;
+ frame_sp_info->raw_bayer_order = css_queue_out->css_fmt->bayer_order;
+ frame_sp_info->raw_type = IMGU_ABI_RAW_TYPE_BAYER;
+
+ frame_sp_info = &cfg_iter->output_info;
+ frame_sp_info->res.width = css_queue_out->fmt.mpix.width;
+ frame_sp_info->res.height = css_queue_out->fmt.mpix.height;
+ frame_sp_info->padded_width = css_queue_out->width_pad;
+ frame_sp_info->format = css_queue_out->css_fmt->frame_format;
+ frame_sp_info->raw_bit_depth = css_queue_out->css_fmt->bit_depth;
+ frame_sp_info->raw_bayer_order = css_queue_out->css_fmt->bayer_order;
+ frame_sp_info->raw_type = IMGU_ABI_RAW_TYPE_BAYER;
+
+ frame_sp_info = &cfg_iter->vf_info;
+ frame_sp_info->res.width = css_queue_vf->fmt.mpix.width;
+ frame_sp_info->res.height = css_queue_vf->fmt.mpix.height;
+ frame_sp_info->padded_width = css_queue_vf->width_pad;
+ frame_sp_info->format = css_queue_vf->css_fmt->frame_format;
+ frame_sp_info->raw_bit_depth = css_queue_vf->css_fmt->bit_depth;
+ frame_sp_info->raw_bayer_order = css_queue_vf->css_fmt->bayer_order;
+ frame_sp_info->raw_type = IMGU_ABI_RAW_TYPE_BAYER;
+
+ cfg_iter->dvs_envelope.width =
+ css_pipe->rect[IPU3_CSS_RECT_ENVELOPE].width;
+ cfg_iter->dvs_envelope.height =
+ css_pipe->rect[IPU3_CSS_RECT_ENVELOPE].height;
+
+ /* Configure reference (delay) frames */
+
+ cfg_ref = imgu_css_fw_pipeline_params(css, pipe, cfg, m0,
+ &cofs->dmem.ref,
+ sizeof(*cfg_ref), vaddr);
+ if (!cfg_ref)
+ goto bad_firmware;
+
+ cfg_ref->port_b.crop = 0;
+ cfg_ref->port_b.elems = IMGU_ABI_ISP_DDR_WORD_BYTES / BYPC;
+ cfg_ref->port_b.width =
+ css_pipe->aux_frames[IPU3_CSS_AUX_FRAME_REF].width;
+ cfg_ref->port_b.stride =
+ css_pipe->aux_frames[IPU3_CSS_AUX_FRAME_REF].bytesperline;
+ cfg_ref->width_a_over_b =
+ IPU3_UAPI_ISP_VEC_ELEMS / cfg_ref->port_b.elems;
+ cfg_ref->dvs_frame_delay = IPU3_CSS_AUX_FRAMES - 1;
+ for (i = 0; i < IPU3_CSS_AUX_FRAMES; i++) {
+ cfg_ref->ref_frame_addr_y[i] =
+ css_pipe->aux_frames[IPU3_CSS_AUX_FRAME_REF].mem[i].daddr;
+ cfg_ref->ref_frame_addr_c[i] =
+ css_pipe->aux_frames[IPU3_CSS_AUX_FRAME_REF].mem[i].daddr +
+ css_pipe->aux_frames[IPU3_CSS_AUX_FRAME_REF].bytesperline *
+ css_pipe->aux_frames[IPU3_CSS_AUX_FRAME_REF].height;
+ }
+ for (; i < IMGU_ABI_FRAMES_REF; i++) {
+ cfg_ref->ref_frame_addr_y[i] = 0;
+ cfg_ref->ref_frame_addr_c[i] = 0;
+ }
+
+ /* Configure DVS (digital video stabilization) */
+
+ cfg_dvs = imgu_css_fw_pipeline_params(css, pipe, cfg, m0,
+ &cofs->dmem.dvs, sizeof(*cfg_dvs),
+ vaddr);
+ if (!cfg_dvs)
+ goto bad_firmware;
+
+ cfg_dvs->num_horizontal_blocks =
+ ALIGN(DIV_ROUND_UP(css_pipe->rect[IPU3_CSS_RECT_GDC].width,
+ IMGU_DVS_BLOCK_W), 2);
+ cfg_dvs->num_vertical_blocks =
+ DIV_ROUND_UP(css_pipe->rect[IPU3_CSS_RECT_GDC].height,
+ IMGU_DVS_BLOCK_H);
+
+ /* Configure TNR (temporal noise reduction) */
+
+ if (css_pipe->pipe_id == IPU3_CSS_PIPE_ID_VIDEO) {
+ cfg_tnr = imgu_css_fw_pipeline_params(css, pipe, cfg, m0,
+ &cofs->dmem.tnr3,
+ sizeof(*cfg_tnr),
+ vaddr);
+ if (!cfg_tnr)
+ goto bad_firmware;
+
+ cfg_tnr->port_b.crop = 0;
+ cfg_tnr->port_b.elems = IMGU_ABI_ISP_DDR_WORD_BYTES;
+ cfg_tnr->port_b.width =
+ css_pipe->aux_frames[IPU3_CSS_AUX_FRAME_TNR].width;
+ cfg_tnr->port_b.stride =
+ css_pipe->aux_frames[IPU3_CSS_AUX_FRAME_TNR].bytesperline;
+ cfg_tnr->width_a_over_b =
+ IPU3_UAPI_ISP_VEC_ELEMS / cfg_tnr->port_b.elems;
+ cfg_tnr->frame_height =
+ css_pipe->aux_frames[IPU3_CSS_AUX_FRAME_TNR].height;
+ cfg_tnr->delay_frame = IPU3_CSS_AUX_FRAMES - 1;
+ for (i = 0; i < IPU3_CSS_AUX_FRAMES; i++)
+ cfg_tnr->frame_addr[i] =
+ css_pipe->aux_frames[IPU3_CSS_AUX_FRAME_TNR]
+ .mem[i].daddr;
+ for (; i < IMGU_ABI_FRAMES_TNR; i++)
+ cfg_tnr->frame_addr[i] = 0;
+ }
+
+ /* Configure ref dmem state parameters */
+
+ cfg = IMGU_ABI_PARAM_CLASS_STATE;
+ vaddr = css_pipe->binary_params_cs[cfg - 1][m0].vaddr;
+
+ cfg_ref_state = imgu_css_fw_pipeline_params(css, pipe, cfg, m0,
+ &sofs->dmem.ref,
+ sizeof(*cfg_ref_state),
+ vaddr);
+ if (!cfg_ref_state)
+ goto bad_firmware;
+
+ cfg_ref_state->ref_in_buf_idx = 0;
+ cfg_ref_state->ref_out_buf_idx = 1;
+
+ /* Configure tnr dmem state parameters */
+ if (css_pipe->pipe_id == IPU3_CSS_PIPE_ID_VIDEO) {
+ cfg_tnr_state =
+ imgu_css_fw_pipeline_params(css, pipe, cfg, m0,
+ &sofs->dmem.tnr3,
+ sizeof(*cfg_tnr_state),
+ vaddr);
+ if (!cfg_tnr_state)
+ goto bad_firmware;
+
+ cfg_tnr_state->in_bufidx = 0;
+ cfg_tnr_state->out_bufidx = 1;
+ cfg_tnr_state->bypass_filter = 0;
+ cfg_tnr_state->total_frame_counter = 0;
+ for (i = 0; i < IMGU_ABI_BUF_SETS_TNR; i++)
+ cfg_tnr_state->buffer_frame_counter[i] = 0;
+ }
+
+ /* Configure ISP stage */
+
+ isp_stage = css_pipe->xmem_isp_stage_ptrs[pipe][stage].vaddr;
+ memset(isp_stage, 0, sizeof(*isp_stage));
+ isp_stage->blob_info = bi->blob;
+ isp_stage->binary_info = bi->info.isp.sp;
+ strscpy(isp_stage->binary_name,
+ (char *)css->fwp + bi->blob.prog_name_offset,
+ sizeof(isp_stage->binary_name));
+ isp_stage->mem_initializers = bi->info.isp.sp.mem_initializers;
+ for (i = IMGU_ABI_PARAM_CLASS_CONFIG; i < IMGU_ABI_PARAM_CLASS_NUM; i++)
+ for (j = 0; j < IMGU_ABI_NUM_MEMORIES; j++)
+ isp_stage->mem_initializers.params[i][j].address =
+ css_pipe->binary_params_cs[i - 1][j].daddr;
+
+ /* Configure SP stage */
+
+ sp_stage = css_pipe->xmem_sp_stage_ptrs[pipe][stage].vaddr;
+ memset(sp_stage, 0, sizeof(*sp_stage));
+
+ frames_sp = &sp_stage->frames;
+ frames_sp->in.buf_attr = buffer_sp_init;
+ for (i = 0; i < IMGU_ABI_BINARY_MAX_OUTPUT_PORTS; i++)
+ frames_sp->out[i].buf_attr = buffer_sp_init;
+ frames_sp->out_vf.buf_attr = buffer_sp_init;
+ frames_sp->s3a_buf = buffer_sp_init;
+ frames_sp->dvs_buf = buffer_sp_init;
+
+ sp_stage->stage_type = IMGU_ABI_STAGE_TYPE_ISP;
+ sp_stage->num = stage;
+ sp_stage->isp_online = 0;
+ sp_stage->isp_copy_vf = 0;
+ sp_stage->isp_copy_output = 0;
+
+ sp_stage->enable.vf_output = css_pipe->vf_output_en;
+
+ frames_sp->effective_in_res.width =
+ css_pipe->rect[IPU3_CSS_RECT_EFFECTIVE].width;
+ frames_sp->effective_in_res.height =
+ css_pipe->rect[IPU3_CSS_RECT_EFFECTIVE].height;
+
+ frame_sp = &frames_sp->in;
+ frame_sp->info.res.width = css_queue_in->fmt.mpix.width;
+ frame_sp->info.res.height = css_queue_in->fmt.mpix.height;
+ frame_sp->info.padded_width = css_queue_in->width_pad;
+ frame_sp->info.format = css_queue_in->css_fmt->frame_format;
+ frame_sp->info.raw_bit_depth = css_queue_in->css_fmt->bit_depth;
+ frame_sp->info.raw_bayer_order = css_queue_in->css_fmt->bayer_order;
+ frame_sp->info.raw_type = IMGU_ABI_RAW_TYPE_BAYER;
+ frame_sp->buf_attr.buf_src.queue_id = IMGU_ABI_QUEUE_C_ID;
+ frame_sp->buf_attr.buf_type = IMGU_ABI_BUFFER_TYPE_INPUT_FRAME;
+
+ frame_sp = &frames_sp->out[0];
+ frame_sp->info.res.width = css_queue_out->fmt.mpix.width;
+ frame_sp->info.res.height = css_queue_out->fmt.mpix.height;
+ frame_sp->info.padded_width = css_queue_out->width_pad;
+ frame_sp->info.format = css_queue_out->css_fmt->frame_format;
+ frame_sp->info.raw_bit_depth = css_queue_out->css_fmt->bit_depth;
+ frame_sp->info.raw_bayer_order = css_queue_out->css_fmt->bayer_order;
+ frame_sp->info.raw_type = IMGU_ABI_RAW_TYPE_BAYER;
+ frame_sp->planes.nv.uv.offset = css_queue_out->width_pad *
+ css_queue_out->fmt.mpix.height;
+ frame_sp->buf_attr.buf_src.queue_id = IMGU_ABI_QUEUE_D_ID;
+ frame_sp->buf_attr.buf_type = IMGU_ABI_BUFFER_TYPE_OUTPUT_FRAME;
+
+ frame_sp = &frames_sp->out[1];
+ frame_sp->buf_attr.buf_src.queue_id = IMGU_ABI_QUEUE_EVENT_ID;
+
+ frame_sp_info = &frames_sp->internal_frame_info;
+ frame_sp_info->res.width = css_pipe->rect[IPU3_CSS_RECT_BDS].width;
+ frame_sp_info->res.height = css_pipe->rect[IPU3_CSS_RECT_BDS].height;
+ frame_sp_info->padded_width = bds_width_pad;
+ frame_sp_info->format = css_queue_out->css_fmt->frame_format;
+ frame_sp_info->raw_bit_depth = css_queue_out->css_fmt->bit_depth;
+ frame_sp_info->raw_bayer_order = css_queue_out->css_fmt->bayer_order;
+ frame_sp_info->raw_type = IMGU_ABI_RAW_TYPE_BAYER;
+
+ frame_sp = &frames_sp->out_vf;
+ frame_sp->info.res.width = css_queue_vf->fmt.mpix.width;
+ frame_sp->info.res.height = css_queue_vf->fmt.mpix.height;
+ frame_sp->info.padded_width = css_queue_vf->width_pad;
+ frame_sp->info.format = css_queue_vf->css_fmt->frame_format;
+ frame_sp->info.raw_bit_depth = css_queue_vf->css_fmt->bit_depth;
+ frame_sp->info.raw_bayer_order = css_queue_vf->css_fmt->bayer_order;
+ frame_sp->info.raw_type = IMGU_ABI_RAW_TYPE_BAYER;
+ frame_sp->planes.yuv.u.offset = css_queue_vf->width_pad *
+ css_queue_vf->fmt.mpix.height;
+ frame_sp->planes.yuv.v.offset = css_queue_vf->width_pad *
+ css_queue_vf->fmt.mpix.height * 5 / 4;
+ frame_sp->buf_attr.buf_src.queue_id = IMGU_ABI_QUEUE_E_ID;
+ frame_sp->buf_attr.buf_type = IMGU_ABI_BUFFER_TYPE_VF_OUTPUT_FRAME;
+
+ frames_sp->s3a_buf.buf_src.queue_id = IMGU_ABI_QUEUE_F_ID;
+ frames_sp->s3a_buf.buf_type = IMGU_ABI_BUFFER_TYPE_3A_STATISTICS;
+
+ frames_sp->dvs_buf.buf_src.queue_id = IMGU_ABI_QUEUE_G_ID;
+ frames_sp->dvs_buf.buf_type = IMGU_ABI_BUFFER_TYPE_DIS_STATISTICS;
+
+ sp_stage->dvs_envelope.width =
+ css_pipe->rect[IPU3_CSS_RECT_ENVELOPE].width;
+ sp_stage->dvs_envelope.height =
+ css_pipe->rect[IPU3_CSS_RECT_ENVELOPE].height;
+
+ sp_stage->isp_pipe_version =
+ bi->info.isp.sp.pipeline.isp_pipe_version;
+ sp_stage->isp_deci_log_factor =
+ clamp(max(fls(css_pipe->rect[IPU3_CSS_RECT_BDS].width /
+ IMGU_MAX_BQ_GRID_WIDTH),
+ fls(css_pipe->rect[IPU3_CSS_RECT_BDS].height /
+ IMGU_MAX_BQ_GRID_HEIGHT)) - 1, 3, 5);
+ sp_stage->isp_vf_downscale_bits = 0;
+ sp_stage->if_config_index = 255;
+ sp_stage->sp_enable_xnr = 0;
+ sp_stage->num_stripes = stripes;
+ sp_stage->enable.s3a = 1;
+ sp_stage->enable.dvs_stats = 0;
+
+ sp_stage->xmem_bin_addr = css->binary[css_pipe->bindex].daddr;
+ sp_stage->xmem_map_addr = css_pipe->sp_ddr_ptrs.daddr;
+ sp_stage->isp_stage_addr =
+ css_pipe->xmem_isp_stage_ptrs[pipe][stage].daddr;
+
+ /* Configure SP group */
+
+ sp_group = css->xmem_sp_group_ptrs.vaddr;
+ memset(&sp_group->pipe[pipe], 0, sizeof(struct imgu_abi_sp_pipeline));
+
+ sp_group->pipe[pipe].num_stages = 1;
+ sp_group->pipe[pipe].pipe_id = css_pipe->pipe_id;
+ sp_group->pipe[pipe].thread_id = pipe;
+ sp_group->pipe[pipe].pipe_num = pipe;
+ sp_group->pipe[pipe].num_execs = -1;
+ sp_group->pipe[pipe].pipe_qos_config = -1;
+ sp_group->pipe[pipe].required_bds_factor = 0;
+ sp_group->pipe[pipe].dvs_frame_delay = IPU3_CSS_AUX_FRAMES - 1;
+ sp_group->pipe[pipe].inout_port_config =
+ IMGU_ABI_PORT_CONFIG_TYPE_INPUT_HOST |
+ IMGU_ABI_PORT_CONFIG_TYPE_OUTPUT_HOST;
+ sp_group->pipe[pipe].scaler_pp_lut = 0;
+ sp_group->pipe[pipe].shading.internal_frame_origin_x_bqs_on_sctbl = 0;
+ sp_group->pipe[pipe].shading.internal_frame_origin_y_bqs_on_sctbl = 0;
+ sp_group->pipe[pipe].sp_stage_addr[stage] =
+ css_pipe->xmem_sp_stage_ptrs[pipe][stage].daddr;
+ sp_group->pipe[pipe].pipe_config =
+ bi->info.isp.sp.enable.params ? (1 << pipe) : 0;
+ sp_group->pipe[pipe].pipe_config |= IMGU_ABI_PIPE_CONFIG_ACQUIRE_ISP;
+
+ /* Initialize parameter pools */
+
+ if (imgu_css_pool_init(imgu, &css_pipe->pool.parameter_set_info,
+ sizeof(struct imgu_abi_parameter_set_info)) ||
+ imgu_css_pool_init(imgu, &css_pipe->pool.acc,
+ sizeof(struct imgu_abi_acc_param)) ||
+ imgu_css_pool_init(imgu, &css_pipe->pool.gdc,
+ sizeof(struct imgu_abi_gdc_warp_param) *
+ 3 * cfg_dvs->num_horizontal_blocks / 2 *
+ cfg_dvs->num_vertical_blocks) ||
+ imgu_css_pool_init(imgu, &css_pipe->pool.obgrid,
+ imgu_css_fw_obgrid_size(
+ &css->fwp->binary_header[css_pipe->bindex])))
+ goto out_of_memory;
+
+ for (i = 0; i < IMGU_ABI_NUM_MEMORIES; i++)
+ if (imgu_css_pool_init(imgu,
+ &css_pipe->pool.binary_params_p[i],
+ bi->info.isp.sp.mem_initializers.params
+ [IMGU_ABI_PARAM_CLASS_PARAM][i].size))
+ goto out_of_memory;
+
+ return 0;
+
+bad_firmware:
+ imgu_css_pipeline_cleanup(css, pipe);
+ return -EPROTO;
+
+out_of_memory:
+ imgu_css_pipeline_cleanup(css, pipe);
+ return -ENOMEM;
+}
+
+static u8 imgu_css_queue_pos(struct imgu_css *css, int queue, int thread)
+{
+ static const unsigned int sp;
+ void __iomem *const base = css->base;
+ struct imgu_fw_info *bi = &css->fwp->binary_header[css->fw_sp[sp]];
+ struct imgu_abi_queues __iomem *q = base + IMGU_REG_SP_DMEM_BASE(sp) +
+ bi->info.sp.host_sp_queue;
+
+ return queue >= 0 ? readb(&q->host2sp_bufq_info[thread][queue].end) :
+ readb(&q->host2sp_evtq_info.end);
+}
+
+/* Sent data to sp using given buffer queue, or if queue < 0, event queue. */
+static int imgu_css_queue_data(struct imgu_css *css,
+ int queue, int thread, u32 data)
+{
+ static const unsigned int sp;
+ void __iomem *const base = css->base;
+ struct imgu_fw_info *bi = &css->fwp->binary_header[css->fw_sp[sp]];
+ struct imgu_abi_queues __iomem *q = base + IMGU_REG_SP_DMEM_BASE(sp) +
+ bi->info.sp.host_sp_queue;
+ u8 size, start, end, end2;
+
+ if (queue >= 0) {
+ size = readb(&q->host2sp_bufq_info[thread][queue].size);
+ start = readb(&q->host2sp_bufq_info[thread][queue].start);
+ end = readb(&q->host2sp_bufq_info[thread][queue].end);
+ } else {
+ size = readb(&q->host2sp_evtq_info.size);
+ start = readb(&q->host2sp_evtq_info.start);
+ end = readb(&q->host2sp_evtq_info.end);
+ }
+
+ if (size == 0)
+ return -EIO;
+
+ end2 = (end + 1) % size;
+ if (end2 == start)
+ return -EBUSY; /* Queue full */
+
+ if (queue >= 0) {
+ writel(data, &q->host2sp_bufq[thread][queue][end]);
+ writeb(end2, &q->host2sp_bufq_info[thread][queue].end);
+ } else {
+ writel(data, &q->host2sp_evtq[end]);
+ writeb(end2, &q->host2sp_evtq_info.end);
+ }
+
+ return 0;
+}
+
+/* Receive data using given buffer queue, or if queue < 0, event queue. */
+static int imgu_css_dequeue_data(struct imgu_css *css, int queue, u32 *data)
+{
+ static const unsigned int sp;
+ void __iomem *const base = css->base;
+ struct imgu_fw_info *bi = &css->fwp->binary_header[css->fw_sp[sp]];
+ struct imgu_abi_queues __iomem *q = base + IMGU_REG_SP_DMEM_BASE(sp) +
+ bi->info.sp.host_sp_queue;
+ u8 size, start, end, start2;
+
+ if (queue >= 0) {
+ size = readb(&q->sp2host_bufq_info[queue].size);
+ start = readb(&q->sp2host_bufq_info[queue].start);
+ end = readb(&q->sp2host_bufq_info[queue].end);
+ } else {
+ size = readb(&q->sp2host_evtq_info.size);
+ start = readb(&q->sp2host_evtq_info.start);
+ end = readb(&q->sp2host_evtq_info.end);
+ }
+
+ if (size == 0)
+ return -EIO;
+
+ if (end == start)
+ return -EBUSY; /* Queue empty */
+
+ start2 = (start + 1) % size;
+
+ if (queue >= 0) {
+ *data = readl(&q->sp2host_bufq[queue][start]);
+ writeb(start2, &q->sp2host_bufq_info[queue].start);
+ } else {
+ int r;
+
+ *data = readl(&q->sp2host_evtq[start]);
+ writeb(start2, &q->sp2host_evtq_info.start);
+
+ /* Acknowledge events dequeued from event queue */
+ r = imgu_css_queue_data(css, queue, 0,
+ IMGU_ABI_EVENT_EVENT_DEQUEUED);
+ if (r < 0)
+ return r;
+ }
+
+ return 0;
+}
+
+/* Free binary-specific resources */
+static void imgu_css_binary_cleanup(struct imgu_css *css, unsigned int pipe)
+{
+ struct imgu_device *imgu = dev_get_drvdata(css->dev);
+ unsigned int i, j;
+
+ struct imgu_css_pipe *css_pipe = &css->pipes[pipe];
+
+ for (j = 0; j < IMGU_ABI_PARAM_CLASS_NUM - 1; j++)
+ for (i = 0; i < IMGU_ABI_NUM_MEMORIES; i++)
+ imgu_dmamap_free(imgu,
+ &css_pipe->binary_params_cs[j][i]);
+
+ j = IPU3_CSS_AUX_FRAME_REF;
+ for (i = 0; i < IPU3_CSS_AUX_FRAMES; i++)
+ imgu_dmamap_free(imgu,
+ &css_pipe->aux_frames[j].mem[i]);
+
+ j = IPU3_CSS_AUX_FRAME_TNR;
+ for (i = 0; i < IPU3_CSS_AUX_FRAMES; i++)
+ imgu_dmamap_free(imgu,
+ &css_pipe->aux_frames[j].mem[i]);
+}
+
+static int imgu_css_binary_preallocate(struct imgu_css *css, unsigned int pipe)
+{
+ struct imgu_device *imgu = dev_get_drvdata(css->dev);
+ unsigned int i, j;
+
+ struct imgu_css_pipe *css_pipe = &css->pipes[pipe];
+
+ for (j = IMGU_ABI_PARAM_CLASS_CONFIG;
+ j < IMGU_ABI_PARAM_CLASS_NUM; j++)
+ for (i = 0; i < IMGU_ABI_NUM_MEMORIES; i++)
+ if (!imgu_dmamap_alloc(imgu,
+ &css_pipe->binary_params_cs[j - 1][i],
+ CSS_ABI_SIZE))
+ goto out_of_memory;
+
+ for (i = 0; i < IPU3_CSS_AUX_FRAMES; i++)
+ if (!imgu_dmamap_alloc(imgu,
+ &css_pipe->aux_frames[IPU3_CSS_AUX_FRAME_REF].mem[i],
+ CSS_BDS_SIZE))
+ goto out_of_memory;
+
+ for (i = 0; i < IPU3_CSS_AUX_FRAMES; i++)
+ if (!imgu_dmamap_alloc(imgu,
+ &css_pipe->aux_frames[IPU3_CSS_AUX_FRAME_TNR].mem[i],
+ CSS_GDC_SIZE))
+ goto out_of_memory;
+
+ return 0;
+
+out_of_memory:
+ imgu_css_binary_cleanup(css, pipe);
+ return -ENOMEM;
+}
+
+/* allocate binary-specific resources */
+static int imgu_css_binary_setup(struct imgu_css *css, unsigned int pipe)
+{
+ struct imgu_css_pipe *css_pipe = &css->pipes[pipe];
+ struct imgu_fw_info *bi = &css->fwp->binary_header[css_pipe->bindex];
+ struct imgu_device *imgu = dev_get_drvdata(css->dev);
+ int i, j, size;
+ static const int BYPC = 2; /* Bytes per component */
+ unsigned int w, h;
+
+ /* Allocate parameter memory blocks for this binary */
+
+ for (j = IMGU_ABI_PARAM_CLASS_CONFIG; j < IMGU_ABI_PARAM_CLASS_NUM; j++)
+ for (i = 0; i < IMGU_ABI_NUM_MEMORIES; i++) {
+ if (imgu_css_dma_buffer_resize(
+ imgu,
+ &css_pipe->binary_params_cs[j - 1][i],
+ bi->info.isp.sp.mem_initializers.params[j][i].size))
+ goto out_of_memory;
+ }
+
+ /* Allocate internal frame buffers */
+
+ /* Reference frames for DVS, FRAME_FORMAT_YUV420_16 */
+ css_pipe->aux_frames[IPU3_CSS_AUX_FRAME_REF].bytesperpixel = BYPC;
+ css_pipe->aux_frames[IPU3_CSS_AUX_FRAME_REF].width =
+ css_pipe->rect[IPU3_CSS_RECT_BDS].width;
+ css_pipe->aux_frames[IPU3_CSS_AUX_FRAME_REF].height =
+ ALIGN(css_pipe->rect[IPU3_CSS_RECT_BDS].height,
+ IMGU_DVS_BLOCK_H) + 2 * IMGU_GDC_BUF_Y;
+ h = css_pipe->aux_frames[IPU3_CSS_AUX_FRAME_REF].height;
+ w = ALIGN(css_pipe->rect[IPU3_CSS_RECT_BDS].width,
+ 2 * IPU3_UAPI_ISP_VEC_ELEMS) + 2 * IMGU_GDC_BUF_X;
+ css_pipe->aux_frames[IPU3_CSS_AUX_FRAME_REF].bytesperline =
+ css_pipe->aux_frames[IPU3_CSS_AUX_FRAME_REF].bytesperpixel * w;
+ size = w * h * BYPC + (w / 2) * (h / 2) * BYPC * 2;
+ for (i = 0; i < IPU3_CSS_AUX_FRAMES; i++)
+ if (imgu_css_dma_buffer_resize(
+ imgu,
+ &css_pipe->aux_frames[IPU3_CSS_AUX_FRAME_REF].mem[i],
+ size))
+ goto out_of_memory;
+
+ /* TNR frames for temporal noise reduction, FRAME_FORMAT_YUV_LINE */
+ css_pipe->aux_frames[IPU3_CSS_AUX_FRAME_TNR].bytesperpixel = 1;
+ css_pipe->aux_frames[IPU3_CSS_AUX_FRAME_TNR].width =
+ roundup(css_pipe->rect[IPU3_CSS_RECT_GDC].width,
+ bi->info.isp.sp.block.block_width *
+ IPU3_UAPI_ISP_VEC_ELEMS);
+ css_pipe->aux_frames[IPU3_CSS_AUX_FRAME_TNR].height =
+ roundup(css_pipe->rect[IPU3_CSS_RECT_GDC].height,
+ bi->info.isp.sp.block.output_block_height);
+
+ w = css_pipe->aux_frames[IPU3_CSS_AUX_FRAME_TNR].width;
+ css_pipe->aux_frames[IPU3_CSS_AUX_FRAME_TNR].bytesperline = w;
+ h = css_pipe->aux_frames[IPU3_CSS_AUX_FRAME_TNR].height;
+ size = w * ALIGN(h * 3 / 2 + 3, 2); /* +3 for vf_pp prefetch */
+ for (i = 0; i < IPU3_CSS_AUX_FRAMES; i++)
+ if (imgu_css_dma_buffer_resize(
+ imgu,
+ &css_pipe->aux_frames[IPU3_CSS_AUX_FRAME_TNR].mem[i],
+ size))
+ goto out_of_memory;
+
+ return 0;
+
+out_of_memory:
+ imgu_css_binary_cleanup(css, pipe);
+ return -ENOMEM;
+}
+
+int imgu_css_start_streaming(struct imgu_css *css)
+{
+ u32 data;
+ int r, pipe;
+
+ if (css->streaming)
+ return -EPROTO;
+
+ for_each_set_bit(pipe, css->enabled_pipes, IMGU_MAX_PIPE_NUM) {
+ r = imgu_css_binary_setup(css, pipe);
+ if (r < 0)
+ return r;
+ }
+
+ r = imgu_css_hw_init(css);
+ if (r < 0)
+ return r;
+
+ r = imgu_css_hw_start(css);
+ if (r < 0)
+ goto fail;
+
+ for_each_set_bit(pipe, css->enabled_pipes, IMGU_MAX_PIPE_NUM) {
+ r = imgu_css_pipeline_init(css, pipe);
+ if (r < 0)
+ goto fail;
+ }
+
+ css->streaming = true;
+
+ imgu_css_hw_enable_irq(css);
+
+ /* Initialize parameters to default */
+ for_each_set_bit(pipe, css->enabled_pipes, IMGU_MAX_PIPE_NUM) {
+ r = imgu_css_set_parameters(css, pipe, NULL);
+ if (r < 0)
+ goto fail;
+ }
+
+ while (!(r = imgu_css_dequeue_data(css, IMGU_ABI_QUEUE_A_ID, &data)))
+ ;
+ if (r != -EBUSY)
+ goto fail;
+
+ while (!(r = imgu_css_dequeue_data(css, IMGU_ABI_QUEUE_B_ID, &data)))
+ ;
+ if (r != -EBUSY)
+ goto fail;
+
+ for_each_set_bit(pipe, css->enabled_pipes, IMGU_MAX_PIPE_NUM) {
+ r = imgu_css_queue_data(css, IMGU_ABI_QUEUE_EVENT_ID, pipe,
+ IMGU_ABI_EVENT_START_STREAM |
+ pipe << 16);
+ if (r < 0)
+ goto fail;
+ }
+
+ return 0;
+
+fail:
+ css->streaming = false;
+ imgu_css_hw_cleanup(css);
+ for_each_set_bit(pipe, css->enabled_pipes, IMGU_MAX_PIPE_NUM) {
+ imgu_css_pipeline_cleanup(css, pipe);
+ imgu_css_binary_cleanup(css, pipe);
+ }
+
+ return r;
+}
+
+void imgu_css_stop_streaming(struct imgu_css *css)
+{
+ struct imgu_css_buffer *b, *b0;
+ int q, r, pipe;
+
+ for_each_set_bit(pipe, css->enabled_pipes, IMGU_MAX_PIPE_NUM) {
+ r = imgu_css_queue_data(css, IMGU_ABI_QUEUE_EVENT_ID, pipe,
+ IMGU_ABI_EVENT_STOP_STREAM);
+ if (r < 0)
+ dev_warn(css->dev, "failed on stop stream event\n");
+ }
+
+ if (!css->streaming)
+ return;
+
+ imgu_css_hw_stop(css);
+
+ imgu_css_hw_cleanup(css);
+
+ for_each_set_bit(pipe, css->enabled_pipes, IMGU_MAX_PIPE_NUM) {
+ struct imgu_css_pipe *css_pipe = &css->pipes[pipe];
+
+ imgu_css_pipeline_cleanup(css, pipe);
+
+ spin_lock(&css_pipe->qlock);
+ for (q = 0; q < IPU3_CSS_QUEUES; q++)
+ list_for_each_entry_safe(b, b0,
+ &css_pipe->queue[q].bufs,
+ list) {
+ b->state = IPU3_CSS_BUFFER_FAILED;
+ list_del(&b->list);
+ }
+ spin_unlock(&css_pipe->qlock);
+ }
+
+ css->streaming = false;
+}
+
+bool imgu_css_pipe_queue_empty(struct imgu_css *css, unsigned int pipe)
+{
+ int q;
+ struct imgu_css_pipe *css_pipe = &css->pipes[pipe];
+
+ spin_lock(&css_pipe->qlock);
+ for (q = 0; q < IPU3_CSS_QUEUES; q++)
+ if (!list_empty(&css_pipe->queue[q].bufs))
+ break;
+ spin_unlock(&css_pipe->qlock);
+ return (q == IPU3_CSS_QUEUES);
+}
+
+bool imgu_css_queue_empty(struct imgu_css *css)
+{
+ unsigned int pipe;
+ bool ret = false;
+
+ for (pipe = 0; pipe < IMGU_MAX_PIPE_NUM; pipe++)
+ ret &= imgu_css_pipe_queue_empty(css, pipe);
+
+ return ret;
+}
+
+bool imgu_css_is_streaming(struct imgu_css *css)
+{
+ return css->streaming;
+}
+
+static int imgu_css_map_init(struct imgu_css *css, unsigned int pipe)
+{
+ struct imgu_device *imgu = dev_get_drvdata(css->dev);
+ struct imgu_css_pipe *css_pipe = &css->pipes[pipe];
+ unsigned int p, q, i;
+
+ /* Allocate and map common structures with imgu hardware */
+ for (p = 0; p < IPU3_CSS_PIPE_ID_NUM; p++)
+ for (i = 0; i < IMGU_ABI_MAX_STAGES; i++) {
+ if (!imgu_dmamap_alloc(imgu,
+ &css_pipe->xmem_sp_stage_ptrs[p][i],
+ sizeof(struct imgu_abi_sp_stage)))
+ return -ENOMEM;
+ if (!imgu_dmamap_alloc(imgu,
+ &css_pipe->xmem_isp_stage_ptrs[p][i],
+ sizeof(struct imgu_abi_isp_stage)))
+ return -ENOMEM;
+ }
+
+ if (!imgu_dmamap_alloc(imgu, &css_pipe->sp_ddr_ptrs,
+ ALIGN(sizeof(struct imgu_abi_ddr_address_map),
+ IMGU_ABI_ISP_DDR_WORD_BYTES)))
+ return -ENOMEM;
+
+ for (q = 0; q < IPU3_CSS_QUEUES; q++) {
+ unsigned int abi_buf_num = ARRAY_SIZE(css_pipe->abi_buffers[q]);
+
+ for (i = 0; i < abi_buf_num; i++)
+ if (!imgu_dmamap_alloc(imgu,
+ &css_pipe->abi_buffers[q][i],
+ sizeof(struct imgu_abi_buffer)))
+ return -ENOMEM;
+ }
+
+ if (imgu_css_binary_preallocate(css, pipe)) {
+ imgu_css_binary_cleanup(css, pipe);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void imgu_css_pipe_cleanup(struct imgu_css *css, unsigned int pipe)
+{
+ struct imgu_device *imgu = dev_get_drvdata(css->dev);
+ struct imgu_css_pipe *css_pipe = &css->pipes[pipe];
+ unsigned int p, q, i, abi_buf_num;
+
+ imgu_css_binary_cleanup(css, pipe);
+
+ for (q = 0; q < IPU3_CSS_QUEUES; q++) {
+ abi_buf_num = ARRAY_SIZE(css_pipe->abi_buffers[q]);
+ for (i = 0; i < abi_buf_num; i++)
+ imgu_dmamap_free(imgu, &css_pipe->abi_buffers[q][i]);
+ }
+
+ for (p = 0; p < IPU3_CSS_PIPE_ID_NUM; p++)
+ for (i = 0; i < IMGU_ABI_MAX_STAGES; i++) {
+ imgu_dmamap_free(imgu,
+ &css_pipe->xmem_sp_stage_ptrs[p][i]);
+ imgu_dmamap_free(imgu,
+ &css_pipe->xmem_isp_stage_ptrs[p][i]);
+ }
+
+ imgu_dmamap_free(imgu, &css_pipe->sp_ddr_ptrs);
+}
+
+void imgu_css_cleanup(struct imgu_css *css)
+{
+ struct imgu_device *imgu = dev_get_drvdata(css->dev);
+ unsigned int pipe;
+
+ imgu_css_stop_streaming(css);
+ for (pipe = 0; pipe < IMGU_MAX_PIPE_NUM; pipe++)
+ imgu_css_pipe_cleanup(css, pipe);
+ imgu_dmamap_free(imgu, &css->xmem_sp_group_ptrs);
+ imgu_css_fw_cleanup(css);
+}
+
+int imgu_css_init(struct device *dev, struct imgu_css *css,
+ void __iomem *base, int length)
+{
+ struct imgu_device *imgu = dev_get_drvdata(dev);
+ int r, q, pipe;
+
+ /* Initialize main data structure */
+ css->dev = dev;
+ css->base = base;
+ css->iomem_length = length;
+
+ for (pipe = 0; pipe < IMGU_MAX_PIPE_NUM; pipe++) {
+ struct imgu_css_pipe *css_pipe = &css->pipes[pipe];
+
+ css_pipe->vf_output_en = false;
+ spin_lock_init(&css_pipe->qlock);
+ css_pipe->bindex = IPU3_CSS_DEFAULT_BINARY;
+ css_pipe->pipe_id = IPU3_CSS_PIPE_ID_VIDEO;
+ for (q = 0; q < IPU3_CSS_QUEUES; q++) {
+ r = imgu_css_queue_init(&css_pipe->queue[q], NULL, 0);
+ if (r)
+ return r;
+ }
+ r = imgu_css_map_init(css, pipe);
+ if (r) {
+ imgu_css_cleanup(css);
+ return r;
+ }
+ }
+ if (!imgu_dmamap_alloc(imgu, &css->xmem_sp_group_ptrs,
+ sizeof(struct imgu_abi_sp_group)))
+ return -ENOMEM;
+
+ r = imgu_css_fw_init(css);
+ if (r)
+ return r;
+
+ return 0;
+}
+
+static u32 imgu_css_adjust(u32 res, u32 align)
+{
+ u32 val = max_t(u32, IPU3_CSS_MIN_RES, res);
+
+ return DIV_ROUND_CLOSEST(val, align) * align;
+}
+
+/* Select a binary matching the required resolutions and formats */
+static int imgu_css_find_binary(struct imgu_css *css,
+ unsigned int pipe,
+ struct imgu_css_queue queue[IPU3_CSS_QUEUES],
+ struct v4l2_rect rects[IPU3_CSS_RECTS])
+{
+ const int binary_nr = css->fwp->file_header.binary_nr;
+ unsigned int binary_mode =
+ (css->pipes[pipe].pipe_id == IPU3_CSS_PIPE_ID_CAPTURE) ?
+ IA_CSS_BINARY_MODE_PRIMARY : IA_CSS_BINARY_MODE_VIDEO;
+ const struct v4l2_pix_format_mplane *in =
+ &queue[IPU3_CSS_QUEUE_IN].fmt.mpix;
+ const struct v4l2_pix_format_mplane *out =
+ &queue[IPU3_CSS_QUEUE_OUT].fmt.mpix;
+ const struct v4l2_pix_format_mplane *vf =
+ &queue[IPU3_CSS_QUEUE_VF].fmt.mpix;
+ u32 stripe_w = 0, stripe_h = 0;
+ const char *name;
+ int i, j;
+
+ if (!imgu_css_queue_enabled(&queue[IPU3_CSS_QUEUE_IN]))
+ return -EINVAL;
+
+ /* Find out the strip size boundary */
+ for (i = 0; i < binary_nr; i++) {
+ struct imgu_fw_info *bi = &css->fwp->binary_header[i];
+
+ u32 max_width = bi->info.isp.sp.output.max_width;
+ u32 max_height = bi->info.isp.sp.output.max_height;
+
+ if (bi->info.isp.sp.iterator.num_stripes <= 1) {
+ stripe_w = stripe_w ?
+ min(stripe_w, max_width) : max_width;
+ stripe_h = stripe_h ?
+ min(stripe_h, max_height) : max_height;
+ }
+ }
+
+ for (i = 0; i < binary_nr; i++) {
+ struct imgu_fw_info *bi = &css->fwp->binary_header[i];
+ enum imgu_abi_frame_format q_fmt;
+
+ name = (void *)css->fwp + bi->blob.prog_name_offset;
+
+ /* Check that binary supports memory-to-memory processing */
+ if (bi->info.isp.sp.input.source !=
+ IMGU_ABI_BINARY_INPUT_SOURCE_MEMORY)
+ continue;
+
+ /* Check that binary supports raw10 input */
+ if (!bi->info.isp.sp.enable.input_feeder &&
+ !bi->info.isp.sp.enable.input_raw)
+ continue;
+
+ /* Check binary mode */
+ if (bi->info.isp.sp.pipeline.mode != binary_mode)
+ continue;
+
+ /* Since input is RGGB bayer, need to process colors */
+ if (bi->info.isp.sp.enable.luma_only)
+ continue;
+
+ if (in->width < bi->info.isp.sp.input.min_width ||
+ in->width > bi->info.isp.sp.input.max_width ||
+ in->height < bi->info.isp.sp.input.min_height ||
+ in->height > bi->info.isp.sp.input.max_height)
+ continue;
+
+ if (imgu_css_queue_enabled(&queue[IPU3_CSS_QUEUE_OUT])) {
+ if (bi->info.isp.num_output_pins <= 0)
+ continue;
+
+ q_fmt = queue[IPU3_CSS_QUEUE_OUT].css_fmt->frame_format;
+ for (j = 0; j < bi->info.isp.num_output_formats; j++)
+ if (bi->info.isp.output_formats[j] == q_fmt)
+ break;
+ if (j >= bi->info.isp.num_output_formats)
+ continue;
+
+ if (out->width < bi->info.isp.sp.output.min_width ||
+ out->width > bi->info.isp.sp.output.max_width ||
+ out->height < bi->info.isp.sp.output.min_height ||
+ out->height > bi->info.isp.sp.output.max_height)
+ continue;
+
+ if (out->width > bi->info.isp.sp.internal.max_width ||
+ out->height > bi->info.isp.sp.internal.max_height)
+ continue;
+ }
+
+ if (imgu_css_queue_enabled(&queue[IPU3_CSS_QUEUE_VF])) {
+ if (bi->info.isp.num_output_pins <= 1)
+ continue;
+
+ q_fmt = queue[IPU3_CSS_QUEUE_VF].css_fmt->frame_format;
+ for (j = 0; j < bi->info.isp.num_output_formats; j++)
+ if (bi->info.isp.output_formats[j] == q_fmt)
+ break;
+ if (j >= bi->info.isp.num_output_formats)
+ continue;
+
+ if (vf->width < bi->info.isp.sp.output.min_width ||
+ vf->width > bi->info.isp.sp.output.max_width ||
+ vf->height < bi->info.isp.sp.output.min_height ||
+ vf->height > bi->info.isp.sp.output.max_height)
+ continue;
+ }
+
+ /* All checks passed, select the binary */
+ dev_dbg(css->dev, "using binary %s id = %u\n", name,
+ bi->info.isp.sp.id);
+ return i;
+ }
+
+ /* Can not find suitable binary for these parameters */
+ return -EINVAL;
+}
+
+/*
+ * Check that there is a binary matching requirements. Parameters may be
+ * NULL indicating disabled input/output. Return negative if given
+ * parameters can not be supported or on error, zero or positive indicating
+ * found binary number. May modify the given parameters if not exact match
+ * is found.
+ */
+int imgu_css_fmt_try(struct imgu_css *css,
+ struct v4l2_pix_format_mplane *fmts[IPU3_CSS_QUEUES],
+ struct v4l2_rect *rects[IPU3_CSS_RECTS],
+ unsigned int pipe)
+{
+ static const u32 EFF_ALIGN_W = 2;
+ static const u32 BDS_ALIGN_W = 4;
+ static const u32 OUT_ALIGN_W = 8;
+ static const u32 OUT_ALIGN_H = 4;
+ static const u32 VF_ALIGN_W = 2;
+ static const char *qnames[IPU3_CSS_QUEUES] = {
+ [IPU3_CSS_QUEUE_IN] = "in",
+ [IPU3_CSS_QUEUE_PARAMS] = "params",
+ [IPU3_CSS_QUEUE_OUT] = "out",
+ [IPU3_CSS_QUEUE_VF] = "vf",
+ [IPU3_CSS_QUEUE_STAT_3A] = "3a",
+ };
+ static const char *rnames[IPU3_CSS_RECTS] = {
+ [IPU3_CSS_RECT_EFFECTIVE] = "effective resolution",
+ [IPU3_CSS_RECT_BDS] = "bayer-domain scaled resolution",
+ [IPU3_CSS_RECT_ENVELOPE] = "DVS envelope size",
+ [IPU3_CSS_RECT_GDC] = "GDC output res",
+ };
+ struct v4l2_rect r[IPU3_CSS_RECTS] = { };
+ struct v4l2_rect *const eff = &r[IPU3_CSS_RECT_EFFECTIVE];
+ struct v4l2_rect *const bds = &r[IPU3_CSS_RECT_BDS];
+ struct v4l2_rect *const env = &r[IPU3_CSS_RECT_ENVELOPE];
+ struct v4l2_rect *const gdc = &r[IPU3_CSS_RECT_GDC];
+ struct imgu_css_queue *q;
+ struct v4l2_pix_format_mplane *in, *out, *vf;
+ int i, s, ret;
+
+ q = kcalloc(IPU3_CSS_QUEUES, sizeof(struct imgu_css_queue), GFP_KERNEL);
+ if (!q)
+ return -ENOMEM;
+
+ in = &q[IPU3_CSS_QUEUE_IN].fmt.mpix;
+ out = &q[IPU3_CSS_QUEUE_OUT].fmt.mpix;
+ vf = &q[IPU3_CSS_QUEUE_VF].fmt.mpix;
+
+ /* Adjust all formats, get statistics buffer sizes and formats */
+ for (i = 0; i < IPU3_CSS_QUEUES; i++) {
+ if (fmts[i])
+ dev_dbg(css->dev, "%s %s: (%i,%i) fmt 0x%x\n", __func__,
+ qnames[i], fmts[i]->width, fmts[i]->height,
+ fmts[i]->pixelformat);
+ else
+ dev_dbg(css->dev, "%s %s: (not set)\n", __func__,
+ qnames[i]);
+ if (imgu_css_queue_init(&q[i], fmts[i],
+ IPU3_CSS_QUEUE_TO_FLAGS(i))) {
+ dev_notice(css->dev, "can not initialize queue %s\n",
+ qnames[i]);
+ ret = -EINVAL;
+ goto out;
+ }
+ }
+ for (i = 0; i < IPU3_CSS_RECTS; i++) {
+ if (rects[i]) {
+ dev_dbg(css->dev, "%s %s: (%i,%i)\n", __func__,
+ rnames[i], rects[i]->width, rects[i]->height);
+ r[i].width = rects[i]->width;
+ r[i].height = rects[i]->height;
+ } else {
+ dev_dbg(css->dev, "%s %s: (not set)\n", __func__,
+ rnames[i]);
+ }
+ /* For now, force known good resolutions */
+ r[i].left = 0;
+ r[i].top = 0;
+ }
+
+ /* Always require one input and vf only if out is also enabled */
+ if (!imgu_css_queue_enabled(&q[IPU3_CSS_QUEUE_IN]) ||
+ !imgu_css_queue_enabled(&q[IPU3_CSS_QUEUE_OUT])) {
+ dev_warn(css->dev, "required queues are disabled\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (!imgu_css_queue_enabled(&q[IPU3_CSS_QUEUE_OUT])) {
+ out->width = in->width;
+ out->height = in->height;
+ }
+ if (eff->width <= 0 || eff->height <= 0) {
+ eff->width = in->width;
+ eff->height = in->height;
+ }
+ if (bds->width <= 0 || bds->height <= 0) {
+ bds->width = out->width;
+ bds->height = out->height;
+ }
+ if (gdc->width <= 0 || gdc->height <= 0) {
+ gdc->width = out->width;
+ gdc->height = out->height;
+ }
+
+ in->width = imgu_css_adjust(in->width, 1);
+ in->height = imgu_css_adjust(in->height, 1);
+ eff->width = imgu_css_adjust(eff->width, EFF_ALIGN_W);
+ eff->height = imgu_css_adjust(eff->height, 1);
+ bds->width = imgu_css_adjust(bds->width, BDS_ALIGN_W);
+ bds->height = imgu_css_adjust(bds->height, 1);
+ gdc->width = imgu_css_adjust(gdc->width, OUT_ALIGN_W);
+ gdc->height = imgu_css_adjust(gdc->height, OUT_ALIGN_H);
+ out->width = imgu_css_adjust(out->width, OUT_ALIGN_W);
+ out->height = imgu_css_adjust(out->height, OUT_ALIGN_H);
+ vf->width = imgu_css_adjust(vf->width, VF_ALIGN_W);
+ vf->height = imgu_css_adjust(vf->height, 1);
+
+ s = (bds->width - gdc->width) / 2;
+ env->width = s < MIN_ENVELOPE ? MIN_ENVELOPE : s;
+ s = (bds->height - gdc->height) / 2;
+ env->height = s < MIN_ENVELOPE ? MIN_ENVELOPE : s;
+
+ ret = imgu_css_find_binary(css, pipe, q, r);
+ if (ret < 0) {
+ dev_err(css->dev, "failed to find suitable binary\n");
+ ret = -EINVAL;
+ goto out;
+ }
+ css->pipes[pipe].bindex = ret;
+
+ dev_dbg(css->dev, "Binary index %d for pipe %d found.",
+ css->pipes[pipe].bindex, pipe);
+
+ /* Final adjustment and set back the queried formats */
+ for (i = 0; i < IPU3_CSS_QUEUES; i++) {
+ if (fmts[i]) {
+ if (imgu_css_queue_init(&q[i], &q[i].fmt.mpix,
+ IPU3_CSS_QUEUE_TO_FLAGS(i))) {
+ dev_err(css->dev,
+ "final resolution adjustment failed\n");
+ ret = -EINVAL;
+ goto out;
+ }
+ *fmts[i] = q[i].fmt.mpix;
+ }
+ }
+
+ for (i = 0; i < IPU3_CSS_RECTS; i++)
+ if (rects[i])
+ *rects[i] = r[i];
+
+ dev_dbg(css->dev,
+ "in(%u,%u) if(%u,%u) ds(%u,%u) gdc(%u,%u) out(%u,%u) vf(%u,%u)",
+ in->width, in->height, eff->width, eff->height,
+ bds->width, bds->height, gdc->width, gdc->height,
+ out->width, out->height, vf->width, vf->height);
+
+ ret = 0;
+out:
+ kfree(q);
+ return ret;
+}
+
+int imgu_css_fmt_set(struct imgu_css *css,
+ struct v4l2_pix_format_mplane *fmts[IPU3_CSS_QUEUES],
+ struct v4l2_rect *rects[IPU3_CSS_RECTS],
+ unsigned int pipe)
+{
+ struct v4l2_rect rect_data[IPU3_CSS_RECTS];
+ struct v4l2_rect *all_rects[IPU3_CSS_RECTS];
+ int i, r;
+ struct imgu_css_pipe *css_pipe = &css->pipes[pipe];
+
+ for (i = 0; i < IPU3_CSS_RECTS; i++) {
+ if (rects[i])
+ rect_data[i] = *rects[i];
+ else
+ memset(&rect_data[i], 0, sizeof(rect_data[i]));
+ all_rects[i] = &rect_data[i];
+ }
+ r = imgu_css_fmt_try(css, fmts, all_rects, pipe);
+ if (r < 0)
+ return r;
+
+ for (i = 0; i < IPU3_CSS_QUEUES; i++)
+ if (imgu_css_queue_init(&css_pipe->queue[i], fmts[i],
+ IPU3_CSS_QUEUE_TO_FLAGS(i)))
+ return -EINVAL;
+ for (i = 0; i < IPU3_CSS_RECTS; i++) {
+ css_pipe->rect[i] = rect_data[i];
+ if (rects[i])
+ *rects[i] = rect_data[i];
+ }
+
+ return 0;
+}
+
+int imgu_css_meta_fmt_set(struct v4l2_meta_format *fmt)
+{
+ switch (fmt->dataformat) {
+ case V4L2_META_FMT_IPU3_PARAMS:
+ fmt->buffersize = sizeof(struct ipu3_uapi_params);
+
+ /*
+ * Sanity check for the parameter struct size. This must
+ * not change!
+ */
+ BUILD_BUG_ON(sizeof(struct ipu3_uapi_params) != 39328);
+
+ break;
+ case V4L2_META_FMT_IPU3_STAT_3A:
+ fmt->buffersize = sizeof(struct ipu3_uapi_stats_3a);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/*
+ * Queue given buffer to CSS. imgu_css_buf_prepare() must have been first
+ * called for the buffer. May be called from interrupt context.
+ * Returns 0 on success, -EBUSY if the buffer queue is full, or some other
+ * code on error conditions.
+ */
+int imgu_css_buf_queue(struct imgu_css *css, unsigned int pipe,
+ struct imgu_css_buffer *b)
+{
+ struct imgu_abi_buffer *abi_buf;
+ struct imgu_addr_t *buf_addr;
+ u32 data;
+ int r;
+ struct imgu_css_pipe *css_pipe = &css->pipes[pipe];
+
+ if (!css->streaming)
+ return -EPROTO; /* CSS or buffer in wrong state */
+
+ if (b->queue >= IPU3_CSS_QUEUES || !imgu_css_queues[b->queue].qid)
+ return -EINVAL;
+
+ b->queue_pos = imgu_css_queue_pos(css, imgu_css_queues[b->queue].qid,
+ pipe);
+
+ if (b->queue_pos >= ARRAY_SIZE(css->pipes[pipe].abi_buffers[b->queue]))
+ return -EIO;
+ abi_buf = css->pipes[pipe].abi_buffers[b->queue][b->queue_pos].vaddr;
+
+ /* Fill struct abi_buffer for firmware */
+ memset(abi_buf, 0, sizeof(*abi_buf));
+
+ buf_addr = (void *)abi_buf + imgu_css_queues[b->queue].ptr_ofs;
+ *(imgu_addr_t *)buf_addr = b->daddr;
+
+ if (b->queue == IPU3_CSS_QUEUE_STAT_3A)
+ abi_buf->payload.s3a.data.dmem.s3a_tbl = b->daddr;
+
+ if (b->queue == IPU3_CSS_QUEUE_OUT)
+ abi_buf->payload.frame.padded_width =
+ css_pipe->queue[IPU3_CSS_QUEUE_OUT].width_pad;
+
+ if (b->queue == IPU3_CSS_QUEUE_VF)
+ abi_buf->payload.frame.padded_width =
+ css_pipe->queue[IPU3_CSS_QUEUE_VF].width_pad;
+
+ spin_lock(&css_pipe->qlock);
+ list_add_tail(&b->list, &css_pipe->queue[b->queue].bufs);
+ spin_unlock(&css_pipe->qlock);
+ b->state = IPU3_CSS_BUFFER_QUEUED;
+
+ data = css->pipes[pipe].abi_buffers[b->queue][b->queue_pos].daddr;
+ r = imgu_css_queue_data(css, imgu_css_queues[b->queue].qid,
+ pipe, data);
+ if (r < 0)
+ goto queueing_failed;
+
+ data = IMGU_ABI_EVENT_BUFFER_ENQUEUED(pipe,
+ imgu_css_queues[b->queue].qid);
+ r = imgu_css_queue_data(css, IMGU_ABI_QUEUE_EVENT_ID, pipe, data);
+ if (r < 0)
+ goto queueing_failed;
+
+ dev_dbg(css->dev, "queued buffer %p to css queue %i in pipe %d\n",
+ b, b->queue, pipe);
+
+ return 0;
+
+queueing_failed:
+ b->state = (r == -EBUSY || r == -EAGAIN) ?
+ IPU3_CSS_BUFFER_NEW : IPU3_CSS_BUFFER_FAILED;
+ list_del(&b->list);
+
+ return r;
+}
+
+/*
+ * Get next ready CSS buffer. Returns -EAGAIN in which case the function
+ * should be called again, or -EBUSY which means that there are no more
+ * buffers available. May be called from interrupt context.
+ */
+struct imgu_css_buffer *imgu_css_buf_dequeue(struct imgu_css *css)
+{
+ static const unsigned char evtype_to_queue[] = {
+ [IMGU_ABI_EVTTYPE_INPUT_FRAME_DONE] = IPU3_CSS_QUEUE_IN,
+ [IMGU_ABI_EVTTYPE_OUT_FRAME_DONE] = IPU3_CSS_QUEUE_OUT,
+ [IMGU_ABI_EVTTYPE_VF_OUT_FRAME_DONE] = IPU3_CSS_QUEUE_VF,
+ [IMGU_ABI_EVTTYPE_3A_STATS_DONE] = IPU3_CSS_QUEUE_STAT_3A,
+ };
+ struct imgu_css_buffer *b = ERR_PTR(-EAGAIN);
+ u32 event, daddr;
+ int evtype, pipe, pipeid, queue, qid, r;
+ struct imgu_css_pipe *css_pipe;
+
+ if (!css->streaming)
+ return ERR_PTR(-EPROTO);
+
+ r = imgu_css_dequeue_data(css, IMGU_ABI_QUEUE_EVENT_ID, &event);
+ if (r < 0)
+ return ERR_PTR(r);
+
+ evtype = (event & IMGU_ABI_EVTTYPE_EVENT_MASK) >>
+ IMGU_ABI_EVTTYPE_EVENT_SHIFT;
+
+ switch (evtype) {
+ case IMGU_ABI_EVTTYPE_OUT_FRAME_DONE:
+ case IMGU_ABI_EVTTYPE_VF_OUT_FRAME_DONE:
+ case IMGU_ABI_EVTTYPE_3A_STATS_DONE:
+ case IMGU_ABI_EVTTYPE_INPUT_FRAME_DONE:
+ pipe = (event & IMGU_ABI_EVTTYPE_PIPE_MASK) >>
+ IMGU_ABI_EVTTYPE_PIPE_SHIFT;
+ pipeid = (event & IMGU_ABI_EVTTYPE_PIPEID_MASK) >>
+ IMGU_ABI_EVTTYPE_PIPEID_SHIFT;
+ queue = evtype_to_queue[evtype];
+ qid = imgu_css_queues[queue].qid;
+
+ if (pipe >= IMGU_MAX_PIPE_NUM) {
+ dev_err(css->dev, "Invalid pipe: %i\n", pipe);
+ return ERR_PTR(-EIO);
+ }
+
+ if (qid >= IMGU_ABI_QUEUE_NUM) {
+ dev_err(css->dev, "Invalid qid: %i\n", qid);
+ return ERR_PTR(-EIO);
+ }
+ css_pipe = &css->pipes[pipe];
+ dev_dbg(css->dev,
+ "event: buffer done 0x%x queue %i pipe %i pipeid %i\n",
+ event, queue, pipe, pipeid);
+
+ r = imgu_css_dequeue_data(css, qid, &daddr);
+ if (r < 0) {
+ dev_err(css->dev, "failed to dequeue buffer\n");
+ /* Force real error, not -EBUSY */
+ return ERR_PTR(-EIO);
+ }
+
+ r = imgu_css_queue_data(css, IMGU_ABI_QUEUE_EVENT_ID, pipe,
+ IMGU_ABI_EVENT_BUFFER_DEQUEUED(qid));
+ if (r < 0) {
+ dev_err(css->dev, "failed to queue event\n");
+ return ERR_PTR(-EIO);
+ }
+
+ spin_lock(&css_pipe->qlock);
+ if (list_empty(&css_pipe->queue[queue].bufs)) {
+ spin_unlock(&css_pipe->qlock);
+ dev_err(css->dev, "event on empty queue\n");
+ return ERR_PTR(-EIO);
+ }
+ b = list_first_entry(&css_pipe->queue[queue].bufs,
+ struct imgu_css_buffer, list);
+ if (queue != b->queue ||
+ daddr != css_pipe->abi_buffers
+ [b->queue][b->queue_pos].daddr) {
+ spin_unlock(&css_pipe->qlock);
+ dev_err(css->dev, "dequeued bad buffer 0x%x\n", daddr);
+ return ERR_PTR(-EIO);
+ }
+
+ dev_dbg(css->dev, "buffer 0x%8x done from pipe %d\n", daddr, pipe);
+ b->pipe = pipe;
+ b->state = IPU3_CSS_BUFFER_DONE;
+ list_del(&b->list);
+ spin_unlock(&css_pipe->qlock);
+ break;
+ case IMGU_ABI_EVTTYPE_PIPELINE_DONE:
+ pipe = (event & IMGU_ABI_EVTTYPE_PIPE_MASK) >>
+ IMGU_ABI_EVTTYPE_PIPE_SHIFT;
+ if (pipe >= IMGU_MAX_PIPE_NUM) {
+ dev_err(css->dev, "Invalid pipe: %i\n", pipe);
+ return ERR_PTR(-EIO);
+ }
+
+ dev_dbg(css->dev, "event: pipeline done 0x%8x for pipe %d\n",
+ event, pipe);
+ break;
+ case IMGU_ABI_EVTTYPE_TIMER:
+ r = imgu_css_dequeue_data(css, IMGU_ABI_QUEUE_EVENT_ID, &event);
+ if (r < 0)
+ return ERR_PTR(r);
+
+ if ((event & IMGU_ABI_EVTTYPE_EVENT_MASK) >>
+ IMGU_ABI_EVTTYPE_EVENT_SHIFT == IMGU_ABI_EVTTYPE_TIMER)
+ dev_dbg(css->dev, "event: timer\n");
+ else
+ dev_warn(css->dev, "half of timer event missing\n");
+ break;
+ case IMGU_ABI_EVTTYPE_FW_WARNING:
+ dev_warn(css->dev, "event: firmware warning 0x%x\n", event);
+ break;
+ case IMGU_ABI_EVTTYPE_FW_ASSERT:
+ dev_err(css->dev,
+ "event: firmware assert 0x%x module_id %i line_no %i\n",
+ event,
+ (event & IMGU_ABI_EVTTYPE_MODULEID_MASK) >>
+ IMGU_ABI_EVTTYPE_MODULEID_SHIFT,
+ swab16((event & IMGU_ABI_EVTTYPE_LINENO_MASK) >>
+ IMGU_ABI_EVTTYPE_LINENO_SHIFT));
+ break;
+ default:
+ dev_warn(css->dev, "received unknown event 0x%x\n", event);
+ }
+
+ return b;
+}
+
+/*
+ * Get a new set of parameters from pool and initialize them based on
+ * the parameters params, gdc, and obgrid. Any of these may be NULL,
+ * in which case the previously set parameters are used.
+ * If parameters haven't been set previously, initialize from scratch.
+ *
+ * Return index to css->parameter_set_info which has the newly created
+ * parameters or negative value on error.
+ */
+int imgu_css_set_parameters(struct imgu_css *css, unsigned int pipe,
+ struct ipu3_uapi_params *set_params)
+{
+ static const unsigned int queue_id = IMGU_ABI_QUEUE_A_ID;
+ struct imgu_css_pipe *css_pipe = &css->pipes[pipe];
+ const int stage = 0;
+ const struct imgu_fw_info *bi;
+ int obgrid_size;
+ unsigned int stripes, i;
+ struct ipu3_uapi_flags *use = set_params ? &set_params->use : NULL;
+
+ /* Destination buffers which are filled here */
+ struct imgu_abi_parameter_set_info *param_set;
+ struct imgu_abi_acc_param *acc = NULL;
+ struct imgu_abi_gdc_warp_param *gdc = NULL;
+ struct ipu3_uapi_obgrid_param *obgrid = NULL;
+ const struct imgu_css_map *map;
+ void *vmem0 = NULL;
+ void *dmem0 = NULL;
+
+ enum imgu_abi_memories m;
+ int r = -EBUSY;
+
+ if (!css->streaming)
+ return -EPROTO;
+
+ dev_dbg(css->dev, "%s for pipe %d", __func__, pipe);
+
+ bi = &css->fwp->binary_header[css_pipe->bindex];
+ obgrid_size = imgu_css_fw_obgrid_size(bi);
+ stripes = bi->info.isp.sp.iterator.num_stripes ? : 1;
+
+ imgu_css_pool_get(&css_pipe->pool.parameter_set_info);
+ param_set = imgu_css_pool_last(&css_pipe->pool.parameter_set_info,
+ 0)->vaddr;
+
+ /* Get a new acc only if new parameters given, or none yet */
+ map = imgu_css_pool_last(&css_pipe->pool.acc, 0);
+ if (set_params || !map->vaddr) {
+ imgu_css_pool_get(&css_pipe->pool.acc);
+ map = imgu_css_pool_last(&css_pipe->pool.acc, 0);
+ acc = map->vaddr;
+ }
+
+ /* Get new VMEM0 only if needed, or none yet */
+ m = IMGU_ABI_MEM_ISP_VMEM0;
+ map = imgu_css_pool_last(&css_pipe->pool.binary_params_p[m], 0);
+ if (!map->vaddr || (set_params && (set_params->use.lin_vmem_params ||
+ set_params->use.tnr3_vmem_params ||
+ set_params->use.xnr3_vmem_params))) {
+ imgu_css_pool_get(&css_pipe->pool.binary_params_p[m]);
+ map = imgu_css_pool_last(&css_pipe->pool.binary_params_p[m], 0);
+ vmem0 = map->vaddr;
+ }
+
+ /* Get new DMEM0 only if needed, or none yet */
+ m = IMGU_ABI_MEM_ISP_DMEM0;
+ map = imgu_css_pool_last(&css_pipe->pool.binary_params_p[m], 0);
+ if (!map->vaddr || (set_params && (set_params->use.tnr3_dmem_params ||
+ set_params->use.xnr3_dmem_params))) {
+ imgu_css_pool_get(&css_pipe->pool.binary_params_p[m]);
+ map = imgu_css_pool_last(&css_pipe->pool.binary_params_p[m], 0);
+ dmem0 = map->vaddr;
+ }
+
+ /* Configure acc parameter cluster */
+ if (acc) {
+ /* get acc_old */
+ map = imgu_css_pool_last(&css_pipe->pool.acc, 1);
+ /* user acc */
+ r = imgu_css_cfg_acc(css, pipe, use, acc, map->vaddr,
+ set_params ? &set_params->acc_param : NULL);
+ if (r < 0)
+ goto fail;
+ }
+
+ /* Configure late binding parameters */
+ if (vmem0) {
+ m = IMGU_ABI_MEM_ISP_VMEM0;
+ map = imgu_css_pool_last(&css_pipe->pool.binary_params_p[m], 1);
+ r = imgu_css_cfg_vmem0(css, pipe, use, vmem0,
+ map->vaddr, set_params);
+ if (r < 0)
+ goto fail;
+ }
+
+ if (dmem0) {
+ m = IMGU_ABI_MEM_ISP_DMEM0;
+ map = imgu_css_pool_last(&css_pipe->pool.binary_params_p[m], 1);
+ r = imgu_css_cfg_dmem0(css, pipe, use, dmem0,
+ map->vaddr, set_params);
+ if (r < 0)
+ goto fail;
+ }
+
+ /* Get a new gdc only if a new gdc is given, or none yet */
+ if (bi->info.isp.sp.enable.dvs_6axis) {
+ unsigned int a = IPU3_CSS_AUX_FRAME_REF;
+ unsigned int g = IPU3_CSS_RECT_GDC;
+ unsigned int e = IPU3_CSS_RECT_ENVELOPE;
+
+ map = imgu_css_pool_last(&css_pipe->pool.gdc, 0);
+ if (!map->vaddr) {
+ imgu_css_pool_get(&css_pipe->pool.gdc);
+ map = imgu_css_pool_last(&css_pipe->pool.gdc, 0);
+ gdc = map->vaddr;
+ imgu_css_cfg_gdc_table(map->vaddr,
+ css_pipe->aux_frames[a].bytesperline /
+ css_pipe->aux_frames[a].bytesperpixel,
+ css_pipe->aux_frames[a].height,
+ css_pipe->rect[g].width,
+ css_pipe->rect[g].height,
+ css_pipe->rect[e].width,
+ css_pipe->rect[e].height);
+ }
+ }
+
+ /* Get a new obgrid only if a new obgrid is given, or none yet */
+ map = imgu_css_pool_last(&css_pipe->pool.obgrid, 0);
+ if (!map->vaddr || (set_params && set_params->use.obgrid_param)) {
+ imgu_css_pool_get(&css_pipe->pool.obgrid);
+ map = imgu_css_pool_last(&css_pipe->pool.obgrid, 0);
+ obgrid = map->vaddr;
+
+ /* Configure optical black level grid (obgrid) */
+ if (set_params && set_params->use.obgrid_param)
+ for (i = 0; i < obgrid_size / sizeof(*obgrid); i++)
+ obgrid[i] = set_params->obgrid_param;
+ else
+ memset(obgrid, 0, obgrid_size);
+ }
+
+ /* Configure parameter set info, queued to `queue_id' */
+
+ memset(param_set, 0, sizeof(*param_set));
+ map = imgu_css_pool_last(&css_pipe->pool.acc, 0);
+ param_set->mem_map.acc_cluster_params_for_sp = map->daddr;
+
+ map = imgu_css_pool_last(&css_pipe->pool.gdc, 0);
+ param_set->mem_map.dvs_6axis_params_y = map->daddr;
+
+ for (i = 0; i < stripes; i++) {
+ map = imgu_css_pool_last(&css_pipe->pool.obgrid, 0);
+ param_set->mem_map.obgrid_tbl[i] =
+ map->daddr + (obgrid_size / stripes) * i;
+ }
+
+ for (m = 0; m < IMGU_ABI_NUM_MEMORIES; m++) {
+ map = imgu_css_pool_last(&css_pipe->pool.binary_params_p[m], 0);
+ param_set->mem_map.isp_mem_param[stage][m] = map->daddr;
+ }
+
+ /* Then queue the new parameter buffer */
+ map = imgu_css_pool_last(&css_pipe->pool.parameter_set_info, 0);
+ r = imgu_css_queue_data(css, queue_id, pipe, map->daddr);
+ if (r < 0)
+ goto fail;
+
+ r = imgu_css_queue_data(css, IMGU_ABI_QUEUE_EVENT_ID, pipe,
+ IMGU_ABI_EVENT_BUFFER_ENQUEUED(pipe,
+ queue_id));
+ if (r < 0)
+ goto fail_no_put;
+
+ /* Finally dequeue all old parameter buffers */
+
+ do {
+ u32 daddr;
+
+ r = imgu_css_dequeue_data(css, queue_id, &daddr);
+ if (r == -EBUSY)
+ break;
+ if (r)
+ goto fail_no_put;
+ r = imgu_css_queue_data(css, IMGU_ABI_QUEUE_EVENT_ID, pipe,
+ IMGU_ABI_EVENT_BUFFER_DEQUEUED
+ (queue_id));
+ if (r < 0) {
+ dev_err(css->dev, "failed to queue parameter event\n");
+ goto fail_no_put;
+ }
+ } while (1);
+
+ return 0;
+
+fail:
+ /*
+ * A failure, most likely the parameter queue was full.
+ * Return error but continue streaming. User can try submitting new
+ * parameters again later.
+ */
+
+ imgu_css_pool_put(&css_pipe->pool.parameter_set_info);
+ if (acc)
+ imgu_css_pool_put(&css_pipe->pool.acc);
+ if (gdc)
+ imgu_css_pool_put(&css_pipe->pool.gdc);
+ if (obgrid)
+ imgu_css_pool_put(&css_pipe->pool.obgrid);
+ if (vmem0)
+ imgu_css_pool_put(
+ &css_pipe->pool.binary_params_p
+ [IMGU_ABI_MEM_ISP_VMEM0]);
+ if (dmem0)
+ imgu_css_pool_put(
+ &css_pipe->pool.binary_params_p
+ [IMGU_ABI_MEM_ISP_DMEM0]);
+
+fail_no_put:
+ return r;
+}
+
+int imgu_css_irq_ack(struct imgu_css *css)
+{
+ static const int NUM_SWIRQS = 3;
+ struct imgu_fw_info *bi = &css->fwp->binary_header[css->fw_sp[0]];
+ void __iomem *const base = css->base;
+ u32 irq_status[IMGU_IRQCTRL_NUM];
+ int i;
+
+ u32 imgu_status = readl(base + IMGU_REG_INT_STATUS);
+
+ writel(imgu_status, base + IMGU_REG_INT_STATUS);
+ for (i = 0; i < IMGU_IRQCTRL_NUM; i++)
+ irq_status[i] = readl(base + IMGU_REG_IRQCTRL_STATUS(i));
+
+ for (i = 0; i < NUM_SWIRQS; i++) {
+ if (irq_status[IMGU_IRQCTRL_SP0] & IMGU_IRQCTRL_IRQ_SW_PIN(i)) {
+ /* SP SW interrupt */
+ u32 cnt = readl(base + IMGU_REG_SP_DMEM_BASE(0) +
+ bi->info.sp.output);
+ u32 val = readl(base + IMGU_REG_SP_DMEM_BASE(0) +
+ bi->info.sp.output + 4 + 4 * i);
+
+ dev_dbg(css->dev, "%s: swirq %i cnt %i val 0x%x\n",
+ __func__, i, cnt, val);
+ }
+ }
+
+ for (i = IMGU_IRQCTRL_NUM - 1; i >= 0; i--)
+ if (irq_status[i]) {
+ writel(irq_status[i], base + IMGU_REG_IRQCTRL_CLEAR(i));
+ /* Wait for write to complete */
+ readl(base + IMGU_REG_IRQCTRL_ENABLE(i));
+ }
+
+ dev_dbg(css->dev, "%s: imgu 0x%x main 0x%x sp0 0x%x sp1 0x%x\n",
+ __func__, imgu_status, irq_status[IMGU_IRQCTRL_MAIN],
+ irq_status[IMGU_IRQCTRL_SP0], irq_status[IMGU_IRQCTRL_SP1]);
+
+ if (!imgu_status && !irq_status[IMGU_IRQCTRL_MAIN])
+ return -ENOMSG;
+
+ return 0;
+}
diff --git a/drivers/staging/media/ipu3/ipu3-css.h b/drivers/staging/media/ipu3/ipu3-css.h
new file mode 100644
index 000000000000..ab64e9521203
--- /dev/null
+++ b/drivers/staging/media/ipu3/ipu3-css.h
@@ -0,0 +1,213 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2018 Intel Corporation */
+
+#ifndef __IPU3_CSS_H
+#define __IPU3_CSS_H
+
+#include <linux/videodev2.h>
+#include <linux/types.h>
+
+#include "ipu3-abi.h"
+#include "ipu3-css-pool.h"
+
+/* 2 stages for split isp pipeline, 1 for scaling */
+#define IMGU_NUM_SP 2
+#define IMGU_MAX_PIPELINE_NUM 20
+#define IMGU_MAX_PIPE_NUM 2
+
+/* For DVS etc., format FRAME_FMT_YUV420_16 */
+#define IPU3_CSS_AUX_FRAME_REF 0
+/* For temporal noise reduction DVS etc., format FRAME_FMT_YUV_LINE */
+#define IPU3_CSS_AUX_FRAME_TNR 1
+#define IPU3_CSS_AUX_FRAME_TYPES 2 /* REF and TNR */
+#define IPU3_CSS_AUX_FRAMES 2 /* 2 for REF and 2 for TNR */
+
+#define IPU3_CSS_QUEUE_IN 0
+#define IPU3_CSS_QUEUE_PARAMS 1
+#define IPU3_CSS_QUEUE_OUT 2
+#define IPU3_CSS_QUEUE_VF 3
+#define IPU3_CSS_QUEUE_STAT_3A 4
+#define IPU3_CSS_QUEUES 5
+
+#define IPU3_CSS_RECT_EFFECTIVE 0 /* Effective resolution */
+#define IPU3_CSS_RECT_BDS 1 /* Resolution after BDS */
+#define IPU3_CSS_RECT_ENVELOPE 2 /* DVS envelope size */
+#define IPU3_CSS_RECT_GDC 3 /* gdc output res */
+#define IPU3_CSS_RECTS 4 /* number of rects */
+
+#define IA_CSS_BINARY_MODE_PRIMARY 2
+#define IA_CSS_BINARY_MODE_VIDEO 3
+#define IPU3_CSS_DEFAULT_BINARY 3 /* default binary index */
+
+/*
+ * The pipe id type, distinguishes the kind of pipes that
+ * can be run in parallel.
+ */
+enum imgu_css_pipe_id {
+ IPU3_CSS_PIPE_ID_PREVIEW,
+ IPU3_CSS_PIPE_ID_COPY,
+ IPU3_CSS_PIPE_ID_VIDEO,
+ IPU3_CSS_PIPE_ID_CAPTURE,
+ IPU3_CSS_PIPE_ID_YUVPP,
+ IPU3_CSS_PIPE_ID_ACC,
+ IPU3_CSS_PIPE_ID_NUM
+};
+
+struct imgu_css_resolution {
+ u32 w;
+ u32 h;
+};
+
+enum imgu_css_buffer_state {
+ IPU3_CSS_BUFFER_NEW, /* Not yet queued */
+ IPU3_CSS_BUFFER_QUEUED, /* Queued, waiting to be filled */
+ IPU3_CSS_BUFFER_DONE, /* Finished processing, removed from queue */
+ IPU3_CSS_BUFFER_FAILED, /* Was not processed, removed from queue */
+};
+
+struct imgu_css_buffer {
+ /* Private fields: user doesn't touch */
+ dma_addr_t daddr;
+ unsigned int queue;
+ enum imgu_css_buffer_state state;
+ struct list_head list;
+ u8 queue_pos;
+ unsigned int pipe;
+};
+
+struct imgu_css_format {
+ u32 pixelformat;
+ enum v4l2_colorspace colorspace;
+ enum imgu_abi_frame_format frame_format;
+ enum imgu_abi_bayer_order bayer_order;
+ enum imgu_abi_osys_format osys_format;
+ enum imgu_abi_osys_tiling osys_tiling;
+ u8 bit_depth; /* Effective bits per pixel */
+ u8 chroma_decim; /* Chroma plane decimation, 0=no chroma plane */
+ u8 width_align; /* Alignment requirement for width_pad */
+ u8 flags;
+};
+
+struct imgu_css_queue {
+ union {
+ struct v4l2_pix_format_mplane mpix;
+ struct v4l2_meta_format meta;
+
+ } fmt;
+ const struct imgu_css_format *css_fmt;
+ unsigned int width_pad;
+ struct list_head bufs;
+};
+
+struct imgu_css_pipe {
+ enum imgu_css_pipe_id pipe_id;
+ unsigned int bindex;
+
+ struct imgu_css_queue queue[IPU3_CSS_QUEUES];
+ struct v4l2_rect rect[IPU3_CSS_RECTS];
+
+ bool vf_output_en;
+ /* Protect access to queue[IPU3_CSS_QUEUES] */
+ spinlock_t qlock;
+
+ /* Data structures shared with IMGU and driver, always allocated */
+ struct imgu_css_map sp_ddr_ptrs;
+ struct imgu_css_map xmem_sp_stage_ptrs[IPU3_CSS_PIPE_ID_NUM]
+ [IMGU_ABI_MAX_STAGES];
+ struct imgu_css_map xmem_isp_stage_ptrs[IPU3_CSS_PIPE_ID_NUM]
+ [IMGU_ABI_MAX_STAGES];
+
+ /*
+ * Data structures shared with IMGU and driver, binary specific.
+ * PARAM_CLASS_CONFIG and PARAM_CLASS_STATE parameters.
+ */
+ struct imgu_css_map binary_params_cs[IMGU_ABI_PARAM_CLASS_NUM - 1]
+ [IMGU_ABI_NUM_MEMORIES];
+
+ struct {
+ struct imgu_css_map mem[IPU3_CSS_AUX_FRAMES];
+ unsigned int width;
+ unsigned int height;
+ unsigned int bytesperline;
+ unsigned int bytesperpixel;
+ } aux_frames[IPU3_CSS_AUX_FRAME_TYPES];
+
+ struct {
+ struct imgu_css_pool parameter_set_info;
+ struct imgu_css_pool acc;
+ struct imgu_css_pool gdc;
+ struct imgu_css_pool obgrid;
+ /* PARAM_CLASS_PARAM parameters for binding while streaming */
+ struct imgu_css_pool binary_params_p[IMGU_ABI_NUM_MEMORIES];
+ } pool;
+
+ struct imgu_css_map abi_buffers[IPU3_CSS_QUEUES]
+ [IMGU_ABI_HOST2SP_BUFQ_SIZE];
+};
+
+/* IPU3 Camera Sub System structure */
+struct imgu_css {
+ struct device *dev;
+ void __iomem *base;
+ const struct firmware *fw;
+ struct imgu_fw_header *fwp;
+ int iomem_length;
+ int fw_bl, fw_sp[IMGU_NUM_SP]; /* Indices of bl and SP binaries */
+ struct imgu_css_map *binary; /* fw binaries mapped to device */
+ bool streaming; /* true when streaming is enabled */
+
+ struct imgu_css_pipe pipes[IMGU_MAX_PIPE_NUM];
+ struct imgu_css_map xmem_sp_group_ptrs;
+
+ /* enabled pipe(s) */
+ DECLARE_BITMAP(enabled_pipes, IMGU_MAX_PIPE_NUM);
+};
+
+/******************* css v4l *******************/
+int imgu_css_init(struct device *dev, struct imgu_css *css,
+ void __iomem *base, int length);
+void imgu_css_cleanup(struct imgu_css *css);
+int imgu_css_fmt_try(struct imgu_css *css,
+ struct v4l2_pix_format_mplane *fmts[IPU3_CSS_QUEUES],
+ struct v4l2_rect *rects[IPU3_CSS_RECTS],
+ unsigned int pipe);
+int imgu_css_fmt_set(struct imgu_css *css,
+ struct v4l2_pix_format_mplane *fmts[IPU3_CSS_QUEUES],
+ struct v4l2_rect *rects[IPU3_CSS_RECTS],
+ unsigned int pipe);
+int imgu_css_meta_fmt_set(struct v4l2_meta_format *fmt);
+int imgu_css_buf_queue(struct imgu_css *css, unsigned int pipe,
+ struct imgu_css_buffer *b);
+struct imgu_css_buffer *imgu_css_buf_dequeue(struct imgu_css *css);
+int imgu_css_start_streaming(struct imgu_css *css);
+void imgu_css_stop_streaming(struct imgu_css *css);
+bool imgu_css_queue_empty(struct imgu_css *css);
+bool imgu_css_is_streaming(struct imgu_css *css);
+bool imgu_css_pipe_queue_empty(struct imgu_css *css, unsigned int pipe);
+
+/******************* css hw *******************/
+int imgu_css_set_powerup(struct device *dev, void __iomem *base,
+ unsigned int freq);
+void imgu_css_set_powerdown(struct device *dev, void __iomem *base);
+int imgu_css_irq_ack(struct imgu_css *css);
+
+/******************* set parameters ************/
+int imgu_css_set_parameters(struct imgu_css *css, unsigned int pipe,
+ struct ipu3_uapi_params *set_params);
+
+/******************* auxiliary helpers *******************/
+static inline enum imgu_css_buffer_state
+imgu_css_buf_state(struct imgu_css_buffer *b)
+{
+ return b->state;
+}
+
+/* Initialize given buffer. May be called several times. */
+static inline void imgu_css_buf_init(struct imgu_css_buffer *b,
+ unsigned int queue, dma_addr_t daddr)
+{
+ b->state = IPU3_CSS_BUFFER_NEW;
+ b->queue = queue;
+ b->daddr = daddr;
+}
+#endif
diff --git a/drivers/staging/media/ipu3/ipu3-dmamap.c b/drivers/staging/media/ipu3/ipu3-dmamap.c
new file mode 100644
index 000000000000..8a19b0024152
--- /dev/null
+++ b/drivers/staging/media/ipu3/ipu3-dmamap.c
@@ -0,0 +1,250 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018 Intel Corporation
+ * Copyright 2018 Google LLC.
+ *
+ * Author: Tomasz Figa <tfiga@chromium.org>
+ * Author: Yong Zhi <yong.zhi@intel.com>
+ */
+
+#include <linux/vmalloc.h>
+
+#include "ipu3.h"
+#include "ipu3-css-pool.h"
+#include "ipu3-mmu.h"
+#include "ipu3-dmamap.h"
+
+/*
+ * Free a buffer allocated by imgu_dmamap_alloc_buffer()
+ */
+static void imgu_dmamap_free_buffer(struct page **pages,
+ size_t size)
+{
+ int count = size >> PAGE_SHIFT;
+
+ while (count--)
+ __free_page(pages[count]);
+ kvfree(pages);
+}
+
+/*
+ * Based on the implementation of __iommu_dma_alloc_pages()
+ * defined in drivers/iommu/dma-iommu.c
+ */
+static struct page **imgu_dmamap_alloc_buffer(size_t size, gfp_t gfp)
+{
+ struct page **pages;
+ unsigned int i = 0, count = size >> PAGE_SHIFT;
+ unsigned int order_mask = 1;
+ const gfp_t high_order_gfp = __GFP_NOWARN | __GFP_NORETRY;
+
+ /* Allocate mem for array of page ptrs */
+ pages = kvmalloc_array(count, sizeof(*pages), GFP_KERNEL);
+
+ if (!pages)
+ return NULL;
+
+ gfp |= __GFP_HIGHMEM | __GFP_ZERO;
+
+ while (count) {
+ struct page *page = NULL;
+ unsigned int order_size;
+
+ for (order_mask &= (2U << __fls(count)) - 1;
+ order_mask; order_mask &= ~order_size) {
+ unsigned int order = __fls(order_mask);
+
+ order_size = 1U << order;
+ page = alloc_pages((order_mask - order_size) ?
+ gfp | high_order_gfp : gfp, order);
+ if (!page)
+ continue;
+ if (!order)
+ break;
+ if (!PageCompound(page)) {
+ split_page(page, order);
+ break;
+ }
+
+ __free_pages(page, order);
+ }
+ if (!page) {
+ imgu_dmamap_free_buffer(pages, i << PAGE_SHIFT);
+ return NULL;
+ }
+ count -= order_size;
+ while (order_size--)
+ pages[i++] = page++;
+ }
+
+ return pages;
+}
+
+/**
+ * imgu_dmamap_alloc - allocate and map a buffer into KVA
+ * @imgu: struct device pointer
+ * @map: struct to store mapping variables
+ * @len: size required
+ *
+ * Returns:
+ * KVA on success
+ * %NULL on failure
+ */
+void *imgu_dmamap_alloc(struct imgu_device *imgu, struct imgu_css_map *map,
+ size_t len)
+{
+ unsigned long shift = iova_shift(&imgu->iova_domain);
+ struct device *dev = &imgu->pci_dev->dev;
+ size_t size = PAGE_ALIGN(len);
+ int count = size >> PAGE_SHIFT;
+ struct page **pages;
+ dma_addr_t iovaddr;
+ struct iova *iova;
+ int i, rval;
+
+ dev_dbg(dev, "%s: allocating %zu\n", __func__, size);
+
+ iova = alloc_iova(&imgu->iova_domain, size >> shift,
+ imgu->mmu->aperture_end >> shift, 0);
+ if (!iova)
+ return NULL;
+
+ pages = imgu_dmamap_alloc_buffer(size, GFP_KERNEL);
+ if (!pages)
+ goto out_free_iova;
+
+ /* Call IOMMU driver to setup pgt */
+ iovaddr = iova_dma_addr(&imgu->iova_domain, iova);
+ for (i = 0; i < count; ++i) {
+ rval = imgu_mmu_map(imgu->mmu, iovaddr,
+ page_to_phys(pages[i]), PAGE_SIZE);
+ if (rval)
+ goto out_unmap;
+
+ iovaddr += PAGE_SIZE;
+ }
+
+ map->vaddr = vmap(pages, count, VM_USERMAP, PAGE_KERNEL);
+ if (!map->vaddr)
+ goto out_unmap;
+
+ map->pages = pages;
+ map->size = size;
+ map->daddr = iova_dma_addr(&imgu->iova_domain, iova);
+
+ dev_dbg(dev, "%s: allocated %zu @ IOVA %pad @ VA %p\n", __func__,
+ size, &map->daddr, map->vaddr);
+
+ return map->vaddr;
+
+out_unmap:
+ imgu_dmamap_free_buffer(pages, size);
+ imgu_mmu_unmap(imgu->mmu, iova_dma_addr(&imgu->iova_domain, iova),
+ i * PAGE_SIZE);
+
+out_free_iova:
+ __free_iova(&imgu->iova_domain, iova);
+
+ return NULL;
+}
+
+void imgu_dmamap_unmap(struct imgu_device *imgu, struct imgu_css_map *map)
+{
+ struct iova *iova;
+
+ iova = find_iova(&imgu->iova_domain,
+ iova_pfn(&imgu->iova_domain, map->daddr));
+ if (WARN_ON(!iova))
+ return;
+
+ imgu_mmu_unmap(imgu->mmu, iova_dma_addr(&imgu->iova_domain, iova),
+ iova_size(iova) << iova_shift(&imgu->iova_domain));
+
+ __free_iova(&imgu->iova_domain, iova);
+}
+
+/*
+ * Counterpart of imgu_dmamap_alloc
+ */
+void imgu_dmamap_free(struct imgu_device *imgu, struct imgu_css_map *map)
+{
+ dev_dbg(&imgu->pci_dev->dev, "%s: freeing %zu @ IOVA %pad @ VA %p\n",
+ __func__, map->size, &map->daddr, map->vaddr);
+
+ if (!map->vaddr)
+ return;
+
+ imgu_dmamap_unmap(imgu, map);
+
+ vunmap(map->vaddr);
+ imgu_dmamap_free_buffer(map->pages, map->size);
+ map->vaddr = NULL;
+}
+
+int imgu_dmamap_map_sg(struct imgu_device *imgu, struct scatterlist *sglist,
+ int nents, struct imgu_css_map *map)
+{
+ unsigned long shift = iova_shift(&imgu->iova_domain);
+ struct scatterlist *sg;
+ struct iova *iova;
+ size_t size = 0;
+ int i;
+
+ for_each_sg(sglist, sg, nents, i) {
+ if (sg->offset)
+ return -EINVAL;
+
+ if (i != nents - 1 && !PAGE_ALIGNED(sg->length))
+ return -EINVAL;
+
+ size += sg->length;
+ }
+
+ size = iova_align(&imgu->iova_domain, size);
+ dev_dbg(&imgu->pci_dev->dev, "dmamap: mapping sg %d entries, %zu pages\n",
+ nents, size >> shift);
+
+ iova = alloc_iova(&imgu->iova_domain, size >> shift,
+ imgu->mmu->aperture_end >> shift, 0);
+ if (!iova)
+ return -ENOMEM;
+
+ dev_dbg(&imgu->pci_dev->dev, "dmamap: iova low pfn %lu, high pfn %lu\n",
+ iova->pfn_lo, iova->pfn_hi);
+
+ if (imgu_mmu_map_sg(imgu->mmu, iova_dma_addr(&imgu->iova_domain, iova),
+ sglist, nents) < size)
+ goto out_fail;
+
+ memset(map, 0, sizeof(*map));
+ map->daddr = iova_dma_addr(&imgu->iova_domain, iova);
+ map->size = size;
+
+ return 0;
+
+out_fail:
+ __free_iova(&imgu->iova_domain, iova);
+
+ return -EFAULT;
+}
+
+int imgu_dmamap_init(struct imgu_device *imgu)
+{
+ unsigned long order, base_pfn;
+ int ret = iova_cache_get();
+
+ if (ret)
+ return ret;
+
+ order = __ffs(IPU3_PAGE_SIZE);
+ base_pfn = max_t(unsigned long, 1, imgu->mmu->aperture_start >> order);
+ init_iova_domain(&imgu->iova_domain, 1UL << order, base_pfn);
+
+ return 0;
+}
+
+void imgu_dmamap_exit(struct imgu_device *imgu)
+{
+ put_iova_domain(&imgu->iova_domain);
+ iova_cache_put();
+}
diff --git a/drivers/staging/media/ipu3/ipu3-dmamap.h b/drivers/staging/media/ipu3/ipu3-dmamap.h
new file mode 100644
index 000000000000..9db513b3d603
--- /dev/null
+++ b/drivers/staging/media/ipu3/ipu3-dmamap.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2018 Intel Corporation */
+/* Copyright 2018 Google LLC. */
+
+#ifndef __IPU3_DMAMAP_H
+#define __IPU3_DMAMAP_H
+
+struct imgu_device;
+struct scatterlist;
+
+void *imgu_dmamap_alloc(struct imgu_device *imgu, struct imgu_css_map *map,
+ size_t len);
+void imgu_dmamap_free(struct imgu_device *imgu, struct imgu_css_map *map);
+
+int imgu_dmamap_map_sg(struct imgu_device *imgu, struct scatterlist *sglist,
+ int nents, struct imgu_css_map *map);
+void imgu_dmamap_unmap(struct imgu_device *imgu, struct imgu_css_map *map);
+
+int imgu_dmamap_init(struct imgu_device *imgu);
+void imgu_dmamap_exit(struct imgu_device *imgu);
+
+#endif
diff --git a/drivers/staging/media/ipu3/ipu3-mmu.c b/drivers/staging/media/ipu3/ipu3-mmu.c
new file mode 100644
index 000000000000..cb9bf5fb29a5
--- /dev/null
+++ b/drivers/staging/media/ipu3/ipu3-mmu.c
@@ -0,0 +1,537 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018 Intel Corporation.
+ * Copyright 2018 Google LLC.
+ *
+ * Author: Tuukka Toivonen <tuukka.toivonen@intel.com>
+ * Author: Sakari Ailus <sakari.ailus@linux.intel.com>
+ * Author: Samu Onkalo <samu.onkalo@intel.com>
+ * Author: Tomasz Figa <tfiga@chromium.org>
+ *
+ */
+
+#include <linux/dma-mapping.h>
+#include <linux/iopoll.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+
+#include <asm/set_memory.h>
+
+#include "ipu3-mmu.h"
+
+#define IPU3_PT_BITS 10
+#define IPU3_PT_PTES (1UL << IPU3_PT_BITS)
+#define IPU3_PT_SIZE (IPU3_PT_PTES << 2)
+#define IPU3_PT_ORDER (IPU3_PT_SIZE >> PAGE_SHIFT)
+
+#define IPU3_ADDR2PTE(addr) ((addr) >> IPU3_PAGE_SHIFT)
+#define IPU3_PTE2ADDR(pte) ((phys_addr_t)(pte) << IPU3_PAGE_SHIFT)
+
+#define IPU3_L2PT_SHIFT IPU3_PT_BITS
+#define IPU3_L2PT_MASK ((1UL << IPU3_L2PT_SHIFT) - 1)
+
+#define IPU3_L1PT_SHIFT IPU3_PT_BITS
+#define IPU3_L1PT_MASK ((1UL << IPU3_L1PT_SHIFT) - 1)
+
+#define IPU3_MMU_ADDRESS_BITS (IPU3_PAGE_SHIFT + \
+ IPU3_L2PT_SHIFT + \
+ IPU3_L1PT_SHIFT)
+
+#define IMGU_REG_BASE 0x4000
+#define REG_TLB_INVALIDATE (IMGU_REG_BASE + 0x300)
+#define TLB_INVALIDATE 1
+#define REG_L1_PHYS (IMGU_REG_BASE + 0x304) /* 27-bit pfn */
+#define REG_GP_HALT (IMGU_REG_BASE + 0x5dc)
+#define REG_GP_HALTED (IMGU_REG_BASE + 0x5e0)
+
+struct imgu_mmu {
+ struct device *dev;
+ void __iomem *base;
+ /* protect access to l2pts, l1pt */
+ spinlock_t lock;
+
+ void *dummy_page;
+ u32 dummy_page_pteval;
+
+ u32 *dummy_l2pt;
+ u32 dummy_l2pt_pteval;
+
+ u32 **l2pts;
+ u32 *l1pt;
+
+ struct imgu_mmu_info geometry;
+};
+
+static inline struct imgu_mmu *to_imgu_mmu(struct imgu_mmu_info *info)
+{
+ return container_of(info, struct imgu_mmu, geometry);
+}
+
+/**
+ * imgu_mmu_tlb_invalidate - invalidate translation look-aside buffer
+ * @mmu: MMU to perform the invalidate operation on
+ *
+ * This function invalidates the whole TLB. Must be called when the hardware
+ * is powered on.
+ */
+static void imgu_mmu_tlb_invalidate(struct imgu_mmu *mmu)
+{
+ writel(TLB_INVALIDATE, mmu->base + REG_TLB_INVALIDATE);
+}
+
+static void call_if_imgu_is_powered(struct imgu_mmu *mmu,
+ void (*func)(struct imgu_mmu *mmu))
+{
+ if (!pm_runtime_get_if_in_use(mmu->dev))
+ return;
+
+ func(mmu);
+ pm_runtime_put(mmu->dev);
+}
+
+/**
+ * imgu_mmu_set_halt - set CIO gate halt bit
+ * @mmu: MMU to set the CIO gate bit in.
+ * @halt: Desired state of the gate bit.
+ *
+ * This function sets the CIO gate bit that controls whether external memory
+ * accesses are allowed. Must be called when the hardware is powered on.
+ */
+static void imgu_mmu_set_halt(struct imgu_mmu *mmu, bool halt)
+{
+ int ret;
+ u32 val;
+
+ writel(halt, mmu->base + REG_GP_HALT);
+ ret = readl_poll_timeout(mmu->base + REG_GP_HALTED,
+ val, (val & 1) == halt, 1000, 100000);
+
+ if (ret)
+ dev_err(mmu->dev, "failed to %s CIO gate halt\n",
+ halt ? "set" : "clear");
+}
+
+/**
+ * imgu_mmu_alloc_page_table - allocate a pre-filled page table
+ * @pteval: Value to initialize for page table entries with.
+ *
+ * Return: Pointer to allocated page table or NULL on failure.
+ */
+static u32 *imgu_mmu_alloc_page_table(u32 pteval)
+{
+ u32 *pt;
+ int pte;
+
+ pt = (u32 *)__get_free_page(GFP_KERNEL);
+ if (!pt)
+ return NULL;
+
+ for (pte = 0; pte < IPU3_PT_PTES; pte++)
+ pt[pte] = pteval;
+
+ set_memory_uc((unsigned long)pt, IPU3_PT_ORDER);
+
+ return pt;
+}
+
+/**
+ * imgu_mmu_free_page_table - free page table
+ * @pt: Page table to free.
+ */
+static void imgu_mmu_free_page_table(u32 *pt)
+{
+ set_memory_wb((unsigned long)pt, IPU3_PT_ORDER);
+ free_page((unsigned long)pt);
+}
+
+/**
+ * address_to_pte_idx - split IOVA into L1 and L2 page table indices
+ * @iova: IOVA to split.
+ * @l1pt_idx: Output for the L1 page table index.
+ * @l2pt_idx: Output for the L2 page index.
+ */
+static inline void address_to_pte_idx(unsigned long iova, u32 *l1pt_idx,
+ u32 *l2pt_idx)
+{
+ iova >>= IPU3_PAGE_SHIFT;
+
+ if (l2pt_idx)
+ *l2pt_idx = iova & IPU3_L2PT_MASK;
+
+ iova >>= IPU3_L2PT_SHIFT;
+
+ if (l1pt_idx)
+ *l1pt_idx = iova & IPU3_L1PT_MASK;
+}
+
+static u32 *imgu_mmu_get_l2pt(struct imgu_mmu *mmu, u32 l1pt_idx)
+{
+ unsigned long flags;
+ u32 *l2pt, *new_l2pt;
+ u32 pteval;
+
+ spin_lock_irqsave(&mmu->lock, flags);
+
+ l2pt = mmu->l2pts[l1pt_idx];
+ if (l2pt) {
+ spin_unlock_irqrestore(&mmu->lock, flags);
+ return l2pt;
+ }
+
+ spin_unlock_irqrestore(&mmu->lock, flags);
+
+ new_l2pt = imgu_mmu_alloc_page_table(mmu->dummy_page_pteval);
+ if (!new_l2pt)
+ return NULL;
+
+ spin_lock_irqsave(&mmu->lock, flags);
+
+ dev_dbg(mmu->dev, "allocated page table %p for l1pt_idx %u\n",
+ new_l2pt, l1pt_idx);
+
+ l2pt = mmu->l2pts[l1pt_idx];
+ if (l2pt) {
+ spin_unlock_irqrestore(&mmu->lock, flags);
+ imgu_mmu_free_page_table(new_l2pt);
+ return l2pt;
+ }
+
+ l2pt = new_l2pt;
+ mmu->l2pts[l1pt_idx] = new_l2pt;
+
+ pteval = IPU3_ADDR2PTE(virt_to_phys(new_l2pt));
+ mmu->l1pt[l1pt_idx] = pteval;
+
+ spin_unlock_irqrestore(&mmu->lock, flags);
+ return l2pt;
+}
+
+static int __imgu_mmu_map(struct imgu_mmu *mmu, unsigned long iova,
+ phys_addr_t paddr)
+{
+ u32 l1pt_idx, l2pt_idx;
+ unsigned long flags;
+ u32 *l2pt;
+
+ if (!mmu)
+ return -ENODEV;
+
+ address_to_pte_idx(iova, &l1pt_idx, &l2pt_idx);
+
+ l2pt = imgu_mmu_get_l2pt(mmu, l1pt_idx);
+ if (!l2pt)
+ return -ENOMEM;
+
+ spin_lock_irqsave(&mmu->lock, flags);
+
+ if (l2pt[l2pt_idx] != mmu->dummy_page_pteval) {
+ spin_unlock_irqrestore(&mmu->lock, flags);
+ return -EBUSY;
+ }
+
+ l2pt[l2pt_idx] = IPU3_ADDR2PTE(paddr);
+
+ spin_unlock_irqrestore(&mmu->lock, flags);
+
+ return 0;
+}
+
+/**
+ * imgu_mmu_map - map a buffer to a physical address
+ *
+ * @info: MMU mappable range
+ * @iova: the virtual address
+ * @paddr: the physical address
+ * @size: length of the mappable area
+ *
+ * The function has been adapted from iommu_map() in
+ * drivers/iommu/iommu.c .
+ */
+int imgu_mmu_map(struct imgu_mmu_info *info, unsigned long iova,
+ phys_addr_t paddr, size_t size)
+{
+ struct imgu_mmu *mmu = to_imgu_mmu(info);
+ int ret = 0;
+
+ /*
+ * both the virtual address and the physical one, as well as
+ * the size of the mapping, must be aligned (at least) to the
+ * size of the smallest page supported by the hardware
+ */
+ if (!IS_ALIGNED(iova | paddr | size, IPU3_PAGE_SIZE)) {
+ dev_err(mmu->dev, "unaligned: iova 0x%lx pa %pa size 0x%zx\n",
+ iova, &paddr, size);
+ return -EINVAL;
+ }
+
+ dev_dbg(mmu->dev, "map: iova 0x%lx pa %pa size 0x%zx\n",
+ iova, &paddr, size);
+
+ while (size) {
+ dev_dbg(mmu->dev, "mapping: iova 0x%lx pa %pa\n", iova, &paddr);
+
+ ret = __imgu_mmu_map(mmu, iova, paddr);
+ if (ret)
+ break;
+
+ iova += IPU3_PAGE_SIZE;
+ paddr += IPU3_PAGE_SIZE;
+ size -= IPU3_PAGE_SIZE;
+ }
+
+ call_if_imgu_is_powered(mmu, imgu_mmu_tlb_invalidate);
+
+ return ret;
+}
+
+/**
+ * imgu_mmu_map_sg - Map a scatterlist
+ *
+ * @info: MMU mappable range
+ * @iova: the virtual address
+ * @sg: the scatterlist to map
+ * @nents: number of entries in the scatterlist
+ *
+ * The function has been adapted from default_iommu_map_sg() in
+ * drivers/iommu/iommu.c .
+ */
+size_t imgu_mmu_map_sg(struct imgu_mmu_info *info, unsigned long iova,
+ struct scatterlist *sg, unsigned int nents)
+{
+ struct imgu_mmu *mmu = to_imgu_mmu(info);
+ struct scatterlist *s;
+ size_t s_length, mapped = 0;
+ unsigned int i;
+ int ret;
+
+ for_each_sg(sg, s, nents, i) {
+ phys_addr_t phys = page_to_phys(sg_page(s)) + s->offset;
+
+ s_length = s->length;
+
+ if (!IS_ALIGNED(s->offset, IPU3_PAGE_SIZE))
+ goto out_err;
+
+ /* must be IPU3_PAGE_SIZE aligned to be mapped singlely */
+ if (i == nents - 1 && !IS_ALIGNED(s->length, IPU3_PAGE_SIZE))
+ s_length = PAGE_ALIGN(s->length);
+
+ ret = imgu_mmu_map(info, iova + mapped, phys, s_length);
+ if (ret)
+ goto out_err;
+
+ mapped += s_length;
+ }
+
+ call_if_imgu_is_powered(mmu, imgu_mmu_tlb_invalidate);
+
+ return mapped;
+
+out_err:
+ /* undo mappings already done */
+ imgu_mmu_unmap(info, iova, mapped);
+
+ return 0;
+}
+
+static size_t __imgu_mmu_unmap(struct imgu_mmu *mmu,
+ unsigned long iova, size_t size)
+{
+ u32 l1pt_idx, l2pt_idx;
+ unsigned long flags;
+ size_t unmap = size;
+ u32 *l2pt;
+
+ if (!mmu)
+ return 0;
+
+ address_to_pte_idx(iova, &l1pt_idx, &l2pt_idx);
+
+ spin_lock_irqsave(&mmu->lock, flags);
+
+ l2pt = mmu->l2pts[l1pt_idx];
+ if (!l2pt) {
+ spin_unlock_irqrestore(&mmu->lock, flags);
+ return 0;
+ }
+
+ if (l2pt[l2pt_idx] == mmu->dummy_page_pteval)
+ unmap = 0;
+
+ l2pt[l2pt_idx] = mmu->dummy_page_pteval;
+
+ spin_unlock_irqrestore(&mmu->lock, flags);
+
+ return unmap;
+}
+
+/**
+ * imgu_mmu_unmap - Unmap a buffer
+ *
+ * @info: MMU mappable range
+ * @iova: the virtual address
+ * @size: the length of the buffer
+ *
+ * The function has been adapted from iommu_unmap() in
+ * drivers/iommu/iommu.c .
+ */
+size_t imgu_mmu_unmap(struct imgu_mmu_info *info, unsigned long iova,
+ size_t size)
+{
+ struct imgu_mmu *mmu = to_imgu_mmu(info);
+ size_t unmapped_page, unmapped = 0;
+
+ /*
+ * The virtual address, as well as the size of the mapping, must be
+ * aligned (at least) to the size of the smallest page supported
+ * by the hardware
+ */
+ if (!IS_ALIGNED(iova | size, IPU3_PAGE_SIZE)) {
+ dev_err(mmu->dev, "unaligned: iova 0x%lx size 0x%zx\n",
+ iova, size);
+ return -EINVAL;
+ }
+
+ dev_dbg(mmu->dev, "unmap this: iova 0x%lx size 0x%zx\n", iova, size);
+
+ /*
+ * Keep iterating until we either unmap 'size' bytes (or more)
+ * or we hit an area that isn't mapped.
+ */
+ while (unmapped < size) {
+ unmapped_page = __imgu_mmu_unmap(mmu, iova, IPU3_PAGE_SIZE);
+ if (!unmapped_page)
+ break;
+
+ dev_dbg(mmu->dev, "unmapped: iova 0x%lx size 0x%zx\n",
+ iova, unmapped_page);
+
+ iova += unmapped_page;
+ unmapped += unmapped_page;
+ }
+
+ call_if_imgu_is_powered(mmu, imgu_mmu_tlb_invalidate);
+
+ return unmapped;
+}
+
+/**
+ * imgu_mmu_init() - initialize IPU3 MMU block
+ *
+ * @parent: struct device parent
+ * @base: IOMEM base of hardware registers.
+ *
+ * Return: Pointer to IPU3 MMU private data pointer or ERR_PTR() on error.
+ */
+struct imgu_mmu_info *imgu_mmu_init(struct device *parent, void __iomem *base)
+{
+ struct imgu_mmu *mmu;
+ u32 pteval;
+
+ mmu = kzalloc(sizeof(*mmu), GFP_KERNEL);
+ if (!mmu)
+ return ERR_PTR(-ENOMEM);
+
+ mmu->dev = parent;
+ mmu->base = base;
+ spin_lock_init(&mmu->lock);
+
+ /* Disallow external memory access when having no valid page tables. */
+ imgu_mmu_set_halt(mmu, true);
+
+ /*
+ * The MMU does not have a "valid" bit, so we have to use a dummy
+ * page for invalid entries.
+ */
+ mmu->dummy_page = (void *)__get_free_page(GFP_KERNEL);
+ if (!mmu->dummy_page)
+ goto fail_group;
+ pteval = IPU3_ADDR2PTE(virt_to_phys(mmu->dummy_page));
+ mmu->dummy_page_pteval = pteval;
+
+ /*
+ * Allocate a dummy L2 page table with all entries pointing to
+ * the dummy page.
+ */
+ mmu->dummy_l2pt = imgu_mmu_alloc_page_table(pteval);
+ if (!mmu->dummy_l2pt)
+ goto fail_dummy_page;
+ pteval = IPU3_ADDR2PTE(virt_to_phys(mmu->dummy_l2pt));
+ mmu->dummy_l2pt_pteval = pteval;
+
+ /*
+ * Allocate the array of L2PT CPU pointers, initialized to zero,
+ * which means the dummy L2PT allocated above.
+ */
+ mmu->l2pts = vzalloc(IPU3_PT_PTES * sizeof(*mmu->l2pts));
+ if (!mmu->l2pts)
+ goto fail_l2pt;
+
+ /* Allocate the L1 page table. */
+ mmu->l1pt = imgu_mmu_alloc_page_table(mmu->dummy_l2pt_pteval);
+ if (!mmu->l1pt)
+ goto fail_l2pts;
+
+ pteval = IPU3_ADDR2PTE(virt_to_phys(mmu->l1pt));
+ writel(pteval, mmu->base + REG_L1_PHYS);
+ imgu_mmu_tlb_invalidate(mmu);
+ imgu_mmu_set_halt(mmu, false);
+
+ mmu->geometry.aperture_start = 0;
+ mmu->geometry.aperture_end = DMA_BIT_MASK(IPU3_MMU_ADDRESS_BITS);
+
+ return &mmu->geometry;
+
+fail_l2pts:
+ vfree(mmu->l2pts);
+fail_l2pt:
+ imgu_mmu_free_page_table(mmu->dummy_l2pt);
+fail_dummy_page:
+ free_page((unsigned long)mmu->dummy_page);
+fail_group:
+ kfree(mmu);
+
+ return ERR_PTR(-ENOMEM);
+}
+
+/**
+ * imgu_mmu_exit() - clean up IPU3 MMU block
+ *
+ * @info: MMU mappable range
+ */
+void imgu_mmu_exit(struct imgu_mmu_info *info)
+{
+ struct imgu_mmu *mmu = to_imgu_mmu(info);
+
+ /* We are going to free our page tables, no more memory access. */
+ imgu_mmu_set_halt(mmu, true);
+ imgu_mmu_tlb_invalidate(mmu);
+
+ imgu_mmu_free_page_table(mmu->l1pt);
+ vfree(mmu->l2pts);
+ imgu_mmu_free_page_table(mmu->dummy_l2pt);
+ free_page((unsigned long)mmu->dummy_page);
+ kfree(mmu);
+}
+
+void imgu_mmu_suspend(struct imgu_mmu_info *info)
+{
+ struct imgu_mmu *mmu = to_imgu_mmu(info);
+
+ imgu_mmu_set_halt(mmu, true);
+}
+
+void imgu_mmu_resume(struct imgu_mmu_info *info)
+{
+ struct imgu_mmu *mmu = to_imgu_mmu(info);
+ u32 pteval;
+
+ imgu_mmu_set_halt(mmu, true);
+
+ pteval = IPU3_ADDR2PTE(virt_to_phys(mmu->l1pt));
+ writel(pteval, mmu->base + REG_L1_PHYS);
+
+ imgu_mmu_tlb_invalidate(mmu);
+ imgu_mmu_set_halt(mmu, false);
+}
diff --git a/drivers/staging/media/ipu3/ipu3-mmu.h b/drivers/staging/media/ipu3/ipu3-mmu.h
new file mode 100644
index 000000000000..a5f0bca7e7e0
--- /dev/null
+++ b/drivers/staging/media/ipu3/ipu3-mmu.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2018 Intel Corporation */
+/* Copyright 2018 Google LLC. */
+
+#ifndef __IPU3_MMU_H
+#define __IPU3_MMU_H
+
+#define IPU3_PAGE_SHIFT 12
+#define IPU3_PAGE_SIZE (1UL << IPU3_PAGE_SHIFT)
+
+/**
+ * struct imgu_mmu_info - Describes mmu geometry
+ *
+ * @aperture_start: First address that can be mapped
+ * @aperture_end: Last address that can be mapped
+ */
+struct imgu_mmu_info {
+ dma_addr_t aperture_start;
+ dma_addr_t aperture_end;
+};
+
+struct device;
+struct scatterlist;
+
+struct imgu_mmu_info *imgu_mmu_init(struct device *parent, void __iomem *base);
+void imgu_mmu_exit(struct imgu_mmu_info *info);
+void imgu_mmu_suspend(struct imgu_mmu_info *info);
+void imgu_mmu_resume(struct imgu_mmu_info *info);
+
+int imgu_mmu_map(struct imgu_mmu_info *info, unsigned long iova,
+ phys_addr_t paddr, size_t size);
+size_t imgu_mmu_unmap(struct imgu_mmu_info *info, unsigned long iova,
+ size_t size);
+size_t imgu_mmu_map_sg(struct imgu_mmu_info *info, unsigned long iova,
+ struct scatterlist *sg, unsigned int nents);
+#endif
diff --git a/drivers/staging/media/ipu3/ipu3-tables.c b/drivers/staging/media/ipu3/ipu3-tables.c
new file mode 100644
index 000000000000..3a3730bd4395
--- /dev/null
+++ b/drivers/staging/media/ipu3/ipu3-tables.c
@@ -0,0 +1,9609 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2018 Intel Corporation
+
+#include "ipu3-tables.h"
+
+#define X 0 /* Don't care value */
+
+const struct imgu_css_bds_config
+ imgu_css_bds_configs[IMGU_BDS_CONFIG_LEN] = { {
+ /* Scale factor 32 / (32 + 0) = 1 */
+ .hor_phase_arr = {
+ .even = { { 0, 0, 64, 6, 0, 0, 0 } },
+ .odd = { { 0, 0, 64, 6, 0, 0, 0 } } },
+ .ver_phase_arr = {
+ .even = { { 0, 0, 64, 6, 0, 0, 0 } },
+ .odd = { { 0, 0, 64, 6, 0, 0, 0 } } },
+ .ptrn_arr = { { 0x3 } },
+ .sample_patrn_length = 2,
+ .hor_ds_en = 0,
+ .ver_ds_en = 0
+}, {
+ /* Scale factor 32 / (32 + 1) = 0.969697 */
+ .hor_phase_arr = {
+ .even = { { 0, 3, 122, 7, 3, 0, 0 },
+ { 0, 0, 122, 7, 7, -1, 0 },
+ { 0, -3, 122, 7, 10, -1, 0 },
+ { 0, -5, 121, 7, 14, -2, 0 },
+ { 0, -7, 120, 7, 18, -3, 0 },
+ { 0, -9, 118, 7, 23, -4, 0 },
+ { 0, -11, 116, 7, 27, -4, 0 },
+ { 0, -12, 113, 7, 32, -5, 0 },
+ { 0, -13, 110, 7, 37, -6, 0 },
+ { 0, -14, 107, 7, 42, -7, 0 },
+ { 0, -14, 103, 7, 47, -8, 0 },
+ { 0, -15, 100, 7, 52, -9, 0 },
+ { 0, -15, 96, 7, 57, -10, 0 },
+ { 0, -15, 92, 7, 62, -11, 0 },
+ { 0, -14, 86, 7, 68, -12, 0 },
+ { 0, -14, 82, 7, 73, -13, 0 },
+ { 0, -14, 78, 7, 78, -14, 0 },
+ { 0, -13, 73, 7, 82, -14, 0 },
+ { 0, -12, 68, 7, 86, -14, 0 },
+ { 0, -11, 62, 7, 92, -15, 0 },
+ { 0, -10, 57, 7, 96, -15, 0 },
+ { 0, -9, 52, 7, 100, -15, 0 },
+ { 0, -8, 47, 7, 103, -14, 0 },
+ { 0, -7, 42, 7, 107, -14, 0 },
+ { 0, -6, 37, 7, 110, -13, 0 },
+ { 0, -5, 32, 7, 113, -12, 0 },
+ { 0, -4, 27, 7, 116, -11, 0 },
+ { 0, -4, 23, 7, 118, -9, 0 },
+ { 0, -3, 18, 7, 120, -7, 0 },
+ { 0, -2, 14, 7, 121, -5, 0 },
+ { 0, -1, 10, 7, 122, -3, 0 },
+ { 0, -1, 7, 7, 122, 0, 0 } },
+ .odd = { { 0, 2, 122, 7, 5, -1, 0 },
+ { 0, -1, 122, 7, 8, -1, 0 },
+ { 0, -4, 122, 7, 12, -2, 0 },
+ { 0, -6, 120, 7, 16, -2, 0 },
+ { 0, -8, 118, 7, 21, -3, 0 },
+ { 0, -10, 117, 7, 25, -4, 0 },
+ { 0, -11, 114, 7, 30, -5, 0 },
+ { 0, -13, 112, 7, 35, -6, 0 },
+ { 0, -14, 109, 7, 40, -7, 0 },
+ { 0, -14, 105, 7, 45, -8, 0 },
+ { 0, -15, 102, 7, 50, -9, 0 },
+ { 0, -15, 98, 7, 55, -10, 0 },
+ { 0, -15, 94, 7, 60, -11, 0 },
+ { 0, -15, 90, 7, 65, -12, 0 },
+ { 0, -14, 85, 7, 70, -13, 0 },
+ { 0, -14, 80, 7, 75, -13, 0 },
+ { 0, -13, 75, 7, 80, -14, 0 },
+ { 0, -13, 70, 7, 85, -14, 0 },
+ { 0, -12, 65, 7, 90, -15, 0 },
+ { 0, -11, 60, 7, 94, -15, 0 },
+ { 0, -10, 55, 7, 98, -15, 0 },
+ { 0, -9, 50, 7, 102, -15, 0 },
+ { 0, -8, 45, 7, 105, -14, 0 },
+ { 0, -7, 40, 7, 109, -14, 0 },
+ { 0, -6, 35, 7, 112, -13, 0 },
+ { 0, -5, 30, 7, 114, -11, 0 },
+ { 0, -4, 25, 7, 117, -10, 0 },
+ { 0, -3, 21, 7, 118, -8, 0 },
+ { 0, -2, 16, 7, 120, -6, 0 },
+ { 0, -2, 12, 7, 122, -4, 0 },
+ { 0, -1, 8, 7, 122, -1, 0 },
+ { 0, -1, 5, 7, 122, 2, 0 } } },
+ .ver_phase_arr = {
+ .even = { { 0, 3, 122, 7, 3, 0, 0 },
+ { 0, 0, 122, 7, 7, -1, 0 },
+ { 0, -3, 122, 7, 10, -1, 0 },
+ { 0, -5, 121, 7, 14, -2, 0 },
+ { 0, -7, 120, 7, 18, -3, 0 },
+ { 0, -9, 118, 7, 23, -4, 0 },
+ { 0, -11, 116, 7, 27, -4, 0 },
+ { 0, -12, 113, 7, 32, -5, 0 },
+ { 0, -13, 110, 7, 37, -6, 0 },
+ { 0, -14, 107, 7, 42, -7, 0 },
+ { 0, -14, 103, 7, 47, -8, 0 },
+ { 0, -15, 100, 7, 52, -9, 0 },
+ { 0, -15, 96, 7, 57, -10, 0 },
+ { 0, -15, 92, 7, 62, -11, 0 },
+ { 0, -14, 86, 7, 68, -12, 0 },
+ { 0, -14, 82, 7, 73, -13, 0 },
+ { 0, -14, 78, 7, 78, -14, 0 },
+ { 0, -13, 73, 7, 82, -14, 0 },
+ { 0, -12, 68, 7, 86, -14, 0 },
+ { 0, -11, 62, 7, 92, -15, 0 },
+ { 0, -10, 57, 7, 96, -15, 0 },
+ { 0, -9, 52, 7, 100, -15, 0 },
+ { 0, -8, 47, 7, 103, -14, 0 },
+ { 0, -7, 42, 7, 107, -14, 0 },
+ { 0, -6, 37, 7, 110, -13, 0 },
+ { 0, -5, 32, 7, 113, -12, 0 },
+ { 0, -4, 27, 7, 116, -11, 0 },
+ { 0, -4, 23, 7, 118, -9, 0 },
+ { 0, -3, 18, 7, 120, -7, 0 },
+ { 0, -2, 14, 7, 121, -5, 0 },
+ { 0, -1, 10, 7, 122, -3, 0 },
+ { 0, -1, 7, 7, 122, 0, 0 } },
+ .odd = { { 0, 2, 122, 7, 5, -1, 0 },
+ { 0, -1, 122, 7, 8, -1, 0 },
+ { 0, -4, 122, 7, 12, -2, 0 },
+ { 0, -6, 120, 7, 16, -2, 0 },
+ { 0, -8, 118, 7, 21, -3, 0 },
+ { 0, -10, 117, 7, 25, -4, 0 },
+ { 0, -11, 114, 7, 30, -5, 0 },
+ { 0, -13, 112, 7, 35, -6, 0 },
+ { 0, -14, 109, 7, 40, -7, 0 },
+ { 0, -14, 105, 7, 45, -8, 0 },
+ { 0, -15, 102, 7, 50, -9, 0 },
+ { 0, -15, 98, 7, 55, -10, 0 },
+ { 0, -15, 94, 7, 60, -11, 0 },
+ { 0, -15, 90, 7, 65, -12, 0 },
+ { 0, -14, 85, 7, 70, -13, 0 },
+ { 0, -14, 80, 7, 75, -13, 0 },
+ { 0, -13, 75, 7, 80, -14, 0 },
+ { 0, -13, 70, 7, 85, -14, 0 },
+ { 0, -12, 65, 7, 90, -15, 0 },
+ { 0, -11, 60, 7, 94, -15, 0 },
+ { 0, -10, 55, 7, 98, -15, 0 },
+ { 0, -9, 50, 7, 102, -15, 0 },
+ { 0, -8, 45, 7, 105, -14, 0 },
+ { 0, -7, 40, 7, 109, -14, 0 },
+ { 0, -6, 35, 7, 112, -13, 0 },
+ { 0, -5, 30, 7, 114, -11, 0 },
+ { 0, -4, 25, 7, 117, -10, 0 },
+ { 0, -3, 21, 7, 118, -8, 0 },
+ { 0, -2, 16, 7, 120, -6, 0 },
+ { 0, -2, 12, 7, 122, -4, 0 },
+ { 0, -1, 8, 7, 122, -1, 0 },
+ { 0, -1, 5, 7, 122, 2, 0 } } },
+ .ptrn_arr = { { 0xffffffff, 0xffffffff } },
+ .sample_patrn_length = 66,
+ .hor_ds_en = 1,
+ .ver_ds_en = 1
+}, {
+ /* Scale factor 32 / (32 + 2) = 0.941176 */
+ .hor_phase_arr = {
+ .even = { { -1, 6, 118, 7, 6, -1, 0 },
+ { 0, 0, 117, 7, 13, -2, 0 },
+ { 0, -5, 116, 7, 21, -4, 0 },
+ { 0, -9, 113, 7, 30, -6, 0 },
+ { 0, -12, 109, 7, 39, -8, 0 },
+ { 0, -13, 102, 7, 49, -10, 0 },
+ { 0, -14, 94, 7, 59, -11, 0 },
+ { 0, -14, 86, 7, 69, -13, 0 },
+ { 0, -14, 78, 7, 78, -14, 0 },
+ { 0, -13, 69, 7, 86, -14, 0 },
+ { 0, -11, 59, 7, 94, -14, 0 },
+ { 0, -10, 49, 7, 102, -13, 0 },
+ { 0, -8, 39, 7, 109, -12, 0 },
+ { 0, -6, 30, 7, 113, -9, 0 },
+ { 0, -4, 21, 7, 116, -5, 0 },
+ { 0, -2, 13, 7, 117, 0, 0 } },
+ .odd = { { -1, 3, 118, 7, 10, -2, 0 },
+ { 0, -3, 117, 7, 17, -3, 0 },
+ { 0, -7, 114, 7, 26, -5, 0 },
+ { 0, -10, 110, 7, 35, -7, 0 },
+ { 0, -13, 106, 7, 44, -9, 0 },
+ { 0, -14, 99, 7, 54, -11, 0 },
+ { 0, -14, 90, 7, 64, -12, 0 },
+ { 0, -14, 82, 7, 73, -13, 0 },
+ { 0, -13, 73, 7, 82, -14, 0 },
+ { 0, -12, 64, 7, 90, -14, 0 },
+ { 0, -11, 54, 7, 99, -14, 0 },
+ { 0, -9, 44, 7, 106, -13, 0 },
+ { 0, -7, 35, 7, 110, -10, 0 },
+ { 0, -5, 26, 7, 114, -7, 0 },
+ { 0, -3, 17, 7, 117, -3, 0 },
+ { 0, -2, 10, 7, 118, 3, -1 } } },
+ .ver_phase_arr = {
+ .even = { { -1, 6, 118, 7, 6, -1, 0 },
+ { 0, 0, 117, 7, 13, -2, 0 },
+ { 0, -5, 116, 7, 21, -4, 0 },
+ { 0, -9, 113, 7, 30, -6, 0 },
+ { 0, -12, 109, 7, 39, -8, 0 },
+ { 0, -13, 102, 7, 49, -10, 0 },
+ { 0, -14, 94, 7, 59, -11, 0 },
+ { 0, -14, 86, 7, 69, -13, 0 },
+ { 0, -14, 78, 7, 78, -14, 0 },
+ { 0, -13, 69, 7, 86, -14, 0 },
+ { 0, -11, 59, 7, 94, -14, 0 },
+ { 0, -10, 49, 7, 102, -13, 0 },
+ { 0, -8, 39, 7, 109, -12, 0 },
+ { 0, -6, 30, 7, 113, -9, 0 },
+ { 0, -4, 21, 7, 116, -5, 0 },
+ { 0, -2, 13, 7, 117, 0, 0 } },
+ .odd = { { -1, 3, 118, 7, 10, -2, 0 },
+ { 0, -3, 117, 7, 17, -3, 0 },
+ { 0, -7, 114, 7, 26, -5, 0 },
+ { 0, -10, 110, 7, 35, -7, 0 },
+ { 0, -13, 106, 7, 44, -9, 0 },
+ { 0, -14, 99, 7, 54, -11, 0 },
+ { 0, -14, 90, 7, 64, -12, 0 },
+ { 0, -14, 82, 7, 73, -13, 0 },
+ { 0, -13, 73, 7, 82, -14, 0 },
+ { 0, -12, 64, 7, 90, -14, 0 },
+ { 0, -11, 54, 7, 99, -14, 0 },
+ { 0, -9, 44, 7, 106, -13, 0 },
+ { 0, -7, 35, 7, 110, -10, 0 },
+ { 0, -5, 26, 7, 114, -7, 0 },
+ { 0, -3, 17, 7, 117, -3, 0 },
+ { 0, -2, 10, 7, 118, 3, -1 } } },
+ .ptrn_arr = { { 0xffffffff } },
+ .sample_patrn_length = 34,
+ .hor_ds_en = 1,
+ .ver_ds_en = 1
+}, {
+ /* Scale factor 32 / (32 + 3) = 0.914286 */
+ .hor_phase_arr = {
+ .even = { { -2, 9, 114, 7, 9, -2, 0 },
+ { -1, 0, 114, 7, 20, -5, 0 },
+ { 0, -7, 110, 7, 32, -7, 0 },
+ { 0, -11, 103, 7, 46, -10, 0 },
+ { 0, -13, 93, 7, 60, -12, 0 },
+ { 0, -14, 82, 7, 74, -14, 0 },
+ { 0, -13, 69, 7, 86, -14, 0 },
+ { 0, -11, 55, 7, 97, -13, 0 },
+ { 0, -9, 41, 7, 106, -10, 0 },
+ { 0, -6, 28, 7, 111, -5, 0 },
+ { 0, -4, 16, 7, 114, 3, -1 },
+ { -2, 6, 115, 7, 12, -3, 0 },
+ { 0, -2, 111, 7, 24, -5, 0 },
+ { 0, -8, 107, 7, 37, -8, 0 },
+ { 0, -12, 100, 7, 51, -11, 0 },
+ { 0, -14, 90, 7, 65, -13, 0 },
+ { 0, -14, 78, 7, 78, -14, 0 },
+ { 0, -13, 65, 7, 90, -14, 0 },
+ { 0, -11, 51, 7, 100, -12, 0 },
+ { 0, -8, 37, 7, 107, -8, 0 },
+ { 0, -5, 24, 7, 111, -2, 0 },
+ { 0, -3, 12, 7, 115, 6, -2 },
+ { -1, 3, 114, 7, 16, -4, 0 },
+ { 0, -5, 111, 7, 28, -6, 0 },
+ { 0, -10, 106, 7, 41, -9, 0 },
+ { 0, -13, 97, 7, 55, -11, 0 },
+ { 0, -14, 86, 7, 69, -13, 0 },
+ { 0, -14, 74, 7, 82, -14, 0 },
+ { 0, -12, 60, 7, 93, -13, 0 },
+ { 0, -10, 46, 7, 103, -11, 0 },
+ { 0, -7, 32, 7, 110, -7, 0 },
+ { 0, -5, 20, 7, 114, 0, -1 } },
+ .odd = { { -1, 4, 114, 7, 14, -3, 0 },
+ { 0, -4, 112, 7, 26, -6, 0 },
+ { 0, -9, 107, 7, 39, -9, 0 },
+ { 0, -13, 99, 7, 53, -11, 0 },
+ { 0, -14, 88, 7, 67, -13, 0 },
+ { 0, -14, 76, 7, 80, -14, 0 },
+ { 0, -13, 62, 7, 93, -14, 0 },
+ { 0, -10, 48, 7, 102, -12, 0 },
+ { 0, -8, 35, 7, 109, -8, 0 },
+ { 0, -5, 22, 7, 112, -1, 0 },
+ { 0, -3, 11, 7, 115, 7, -2 },
+ { -1, 1, 114, 7, 18, -4, 0 },
+ { 0, -6, 111, 7, 30, -7, 0 },
+ { 0, -10, 103, 7, 44, -9, 0 },
+ { 0, -13, 95, 7, 58, -12, 0 },
+ { 0, -14, 85, 7, 71, -14, 0 },
+ { 0, -14, 71, 7, 85, -14, 0 },
+ { 0, -12, 58, 7, 95, -13, 0 },
+ { 0, -9, 44, 7, 103, -10, 0 },
+ { 0, -7, 30, 7, 111, -6, 0 },
+ { 0, -4, 18, 7, 114, 1, -1 },
+ { -2, 7, 115, 7, 11, -3, 0 },
+ { 0, -1, 112, 7, 22, -5, 0 },
+ { 0, -8, 109, 7, 35, -8, 0 },
+ { 0, -12, 102, 7, 48, -10, 0 },
+ { 0, -14, 93, 7, 62, -13, 0 },
+ { 0, -14, 80, 7, 76, -14, 0 },
+ { 0, -13, 67, 7, 88, -14, 0 },
+ { 0, -11, 53, 7, 99, -13, 0 },
+ { 0, -9, 39, 7, 107, -9, 0 },
+ { 0, -6, 26, 7, 112, -4, 0 },
+ { 0, -3, 14, 7, 114, 4, -1 } } },
+ .ver_phase_arr = {
+ .even = { { -2, 9, 114, 7, 9, -2, 0 },
+ { -1, 0, 114, 7, 20, -5, 0 },
+ { 0, -7, 110, 7, 32, -7, 0 },
+ { 0, -11, 103, 7, 46, -10, 0 },
+ { 0, -13, 93, 7, 60, -12, 0 },
+ { 0, -14, 82, 7, 74, -14, 0 },
+ { 0, -13, 69, 7, 86, -14, 0 },
+ { 0, -11, 55, 7, 97, -13, 0 },
+ { 0, -9, 41, 7, 106, -10, 0 },
+ { 0, -6, 28, 7, 111, -5, 0 },
+ { 0, -4, 16, 7, 114, 3, -1 },
+ { -2, 6, 115, 7, 12, -3, 0 },
+ { 0, -2, 111, 7, 24, -5, 0 },
+ { 0, -8, 107, 7, 37, -8, 0 },
+ { 0, -12, 100, 7, 51, -11, 0 },
+ { 0, -14, 90, 7, 65, -13, 0 },
+ { 0, -14, 78, 7, 78, -14, 0 },
+ { 0, -13, 65, 7, 90, -14, 0 },
+ { 0, -11, 51, 7, 100, -12, 0 },
+ { 0, -8, 37, 7, 107, -8, 0 },
+ { 0, -5, 24, 7, 111, -2, 0 },
+ { 0, -3, 12, 7, 115, 6, -2 },
+ { -1, 3, 114, 7, 16, -4, 0 },
+ { 0, -5, 111, 7, 28, -6, 0 },
+ { 0, -10, 106, 7, 41, -9, 0 },
+ { 0, -13, 97, 7, 55, -11, 0 },
+ { 0, -14, 86, 7, 69, -13, 0 },
+ { 0, -14, 74, 7, 82, -14, 0 },
+ { 0, -12, 60, 7, 93, -13, 0 },
+ { 0, -10, 46, 7, 103, -11, 0 },
+ { 0, -7, 32, 7, 110, -7, 0 },
+ { 0, -5, 20, 7, 114, 0, -1 } },
+ .odd = { { -1, 4, 114, 7, 14, -3, 0 },
+ { 0, -4, 112, 7, 26, -6, 0 },
+ { 0, -9, 107, 7, 39, -9, 0 },
+ { 0, -13, 99, 7, 53, -11, 0 },
+ { 0, -14, 88, 7, 67, -13, 0 },
+ { 0, -14, 76, 7, 80, -14, 0 },
+ { 0, -13, 62, 7, 93, -14, 0 },
+ { 0, -10, 48, 7, 102, -12, 0 },
+ { 0, -8, 35, 7, 109, -8, 0 },
+ { 0, -5, 22, 7, 112, -1, 0 },
+ { 0, -3, 11, 7, 115, 7, -2 },
+ { -1, 1, 114, 7, 18, -4, 0 },
+ { 0, -6, 111, 7, 30, -7, 0 },
+ { 0, -10, 103, 7, 44, -9, 0 },
+ { 0, -13, 95, 7, 58, -12, 0 },
+ { 0, -14, 85, 7, 71, -14, 0 },
+ { 0, -14, 71, 7, 85, -14, 0 },
+ { 0, -12, 58, 7, 95, -13, 0 },
+ { 0, -9, 44, 7, 103, -10, 0 },
+ { 0, -7, 30, 7, 111, -6, 0 },
+ { 0, -4, 18, 7, 114, 1, -1 },
+ { -2, 7, 115, 7, 11, -3, 0 },
+ { 0, -1, 112, 7, 22, -5, 0 },
+ { 0, -8, 109, 7, 35, -8, 0 },
+ { 0, -12, 102, 7, 48, -10, 0 },
+ { 0, -14, 93, 7, 62, -13, 0 },
+ { 0, -14, 80, 7, 76, -14, 0 },
+ { 0, -13, 67, 7, 88, -14, 0 },
+ { 0, -11, 53, 7, 99, -13, 0 },
+ { 0, -9, 39, 7, 107, -9, 0 },
+ { 0, -6, 26, 7, 112, -4, 0 },
+ { 0, -3, 14, 7, 114, 4, -1 } } },
+ .ptrn_arr = { { 0xff3fffff, 0xffff9fff, 0xf } },
+ .sample_patrn_length = 70,
+ .hor_ds_en = 1,
+ .ver_ds_en = 1
+}, {
+ /* Scale factor 32 / (32 + 4) = 0.888889 */
+ .hor_phase_arr = {
+ .even = { { -3, 12, 110, 7, 12, -3, 0 },
+ { -1, 0, 110, 7, 26, -7, 0 },
+ { 0, -8, 103, 7, 43, -10, 0 },
+ { 0, -12, 92, 7, 61, -13, 0 },
+ { 0, -14, 78, 7, 78, -14, 0 },
+ { 0, -13, 61, 7, 92, -12, 0 },
+ { 0, -10, 43, 7, 103, -8, 0 },
+ { 0, -7, 26, 7, 110, 0, -1 } },
+ .odd = { { -2, 5, 111, 7, 19, -5, 0 },
+ { 0, -4, 106, 7, 34, -8, 0 },
+ { 0, -11, 98, 7, 52, -11, 0 },
+ { 0, -13, 85, 7, 69, -13, 0 },
+ { 0, -13, 69, 7, 85, -13, 0 },
+ { 0, -11, 52, 7, 98, -11, 0 },
+ { 0, -8, 34, 7, 106, -4, 0 },
+ { 0, -5, 19, 7, 111, 5, -2 } } },
+ .ver_phase_arr = {
+ .even = { { -3, 12, 110, 7, 12, -3, 0 },
+ { -1, 0, 110, 7, 26, -7, 0 },
+ { 0, -8, 103, 7, 43, -10, 0 },
+ { 0, -12, 92, 7, 61, -13, 0 },
+ { 0, -14, 78, 7, 78, -14, 0 },
+ { 0, -13, 61, 7, 92, -12, 0 },
+ { 0, -10, 43, 7, 103, -8, 0 },
+ { 0, -7, 26, 7, 110, 0, -1 } },
+ .odd = { { -2, 5, 111, 7, 19, -5, 0 },
+ { 0, -4, 106, 7, 34, -8, 0 },
+ { 0, -11, 98, 7, 52, -11, 0 },
+ { 0, -13, 85, 7, 69, -13, 0 },
+ { 0, -13, 69, 7, 85, -13, 0 },
+ { 0, -11, 52, 7, 98, -11, 0 },
+ { 0, -8, 34, 7, 106, -4, 0 },
+ { 0, -5, 19, 7, 111, 5, -2 } } },
+ .ptrn_arr = { { 0xffff } },
+ .sample_patrn_length = 18,
+ .hor_ds_en = 1,
+ .ver_ds_en = 1
+}, {
+ /* Scale factor 32 / (32 + 5) = 0.864865 */
+ .hor_phase_arr = {
+ .even = { { -5, 14, 110, 7, 14, -5, 0 },
+ { -1, 0, 106, 7, 32, -9, 0 },
+ { 0, -9, 96, 7, 53, -12, 0 },
+ { 0, -13, 81, 7, 73, -13, 0 },
+ { 0, -13, 61, 7, 91, -11, 0 },
+ { 0, -10, 40, 7, 103, -4, -1 },
+ { 0, -6, 21, 7, 108, 8, -3 },
+ { -3, 5, 108, 7, 25, -7, 0 },
+ { 0, -6, 101, 7, 44, -11, 0 },
+ { 0, -12, 88, 7, 65, -13, 0 },
+ { 0, -13, 69, 7, 85, -13, 0 },
+ { 0, -11, 49, 7, 98, -8, 0 },
+ { 0, -8, 28, 7, 108, 2, -2 },
+ { -4, 11, 108, 7, 18, -5, 0 },
+ { -1, -2, 104, 7, 36, -9, 0 },
+ { 0, -10, 93, 7, 57, -12, 0 },
+ { 0, -13, 77, 7, 77, -13, 0 },
+ { 0, -12, 57, 7, 93, -10, 0 },
+ { 0, -9, 36, 7, 104, -2, -1 },
+ { 0, -5, 18, 7, 108, 11, -4 },
+ { -2, 2, 108, 7, 28, -8, 0 },
+ { 0, -8, 98, 7, 49, -11, 0 },
+ { 0, -13, 85, 7, 69, -13, 0 },
+ { 0, -13, 65, 7, 88, -12, 0 },
+ { 0, -11, 44, 7, 101, -6, 0 },
+ { 0, -7, 25, 7, 108, 5, -3 },
+ { -3, 8, 108, 7, 21, -6, 0 },
+ { -1, -4, 103, 7, 40, -10, 0 },
+ { 0, -11, 91, 7, 61, -13, 0 },
+ { 0, -13, 73, 7, 81, -13, 0 },
+ { 0, -12, 53, 7, 96, -9, 0 },
+ { 0, -9, 32, 7, 106, 0, -1 } },
+ .odd = { { -3, 7, 108, 7, 23, -7, 0 },
+ { 0, -5, 101, 7, 42, -10, 0 },
+ { 0, -12, 90, 7, 63, -13, 0 },
+ { 0, -13, 71, 7, 83, -13, 0 },
+ { 0, -12, 51, 7, 97, -8, 0 },
+ { 0, -8, 30, 7, 107, 1, -2 },
+ { -4, 13, 108, 7, 16, -5, 0 },
+ { -1, -1, 105, 7, 34, -9, 0 },
+ { 0, -10, 95, 7, 55, -12, 0 },
+ { 0, -13, 79, 7, 75, -13, 0 },
+ { 0, -13, 59, 7, 93, -11, 0 },
+ { 0, -10, 38, 7, 104, -3, -1 },
+ { 0, -6, 19, 7, 110, 9, -4 },
+ { -2, 4, 106, 7, 27, -7, 0 },
+ { 0, -7, 99, 7, 47, -11, 0 },
+ { 0, -12, 86, 7, 67, -13, 0 },
+ { 0, -13, 67, 7, 86, -12, 0 },
+ { 0, -11, 47, 7, 99, -7, 0 },
+ { 0, -7, 27, 7, 106, 4, -2 },
+ { -4, 9, 110, 7, 19, -6, 0 },
+ { -1, -3, 104, 7, 38, -10, 0 },
+ { 0, -11, 93, 7, 59, -13, 0 },
+ { 0, -13, 75, 7, 79, -13, 0 },
+ { 0, -12, 55, 7, 95, -10, 0 },
+ { 0, -9, 34, 7, 105, -1, -1 },
+ { 0, -5, 16, 7, 108, 13, -4 },
+ { -2, 1, 107, 7, 30, -8, 0 },
+ { 0, -8, 97, 7, 51, -12, 0 },
+ { 0, -13, 83, 7, 71, -13, 0 },
+ { 0, -13, 63, 7, 90, -12, 0 },
+ { 0, -10, 42, 7, 101, -5, 0 },
+ { 0, -7, 23, 7, 108, 7, -3 } } },
+ .ver_phase_arr = {
+ .even = { { -5, 14, 110, 7, 14, -5, 0 },
+ { -1, 0, 106, 7, 32, -9, 0 },
+ { 0, -9, 96, 7, 53, -12, 0 },
+ { 0, -13, 81, 7, 73, -13, 0 },
+ { 0, -13, 61, 7, 91, -11, 0 },
+ { 0, -10, 40, 7, 103, -4, -1 },
+ { 0, -6, 21, 7, 108, 8, -3 },
+ { -3, 5, 108, 7, 25, -7, 0 },
+ { 0, -6, 101, 7, 44, -11, 0 },
+ { 0, -12, 88, 7, 65, -13, 0 },
+ { 0, -13, 69, 7, 85, -13, 0 },
+ { 0, -11, 49, 7, 98, -8, 0 },
+ { 0, -8, 28, 7, 108, 2, -2 },
+ { -4, 11, 108, 7, 18, -5, 0 },
+ { -1, -2, 104, 7, 36, -9, 0 },
+ { 0, -10, 93, 7, 57, -12, 0 },
+ { 0, -13, 77, 7, 77, -13, 0 },
+ { 0, -12, 57, 7, 93, -10, 0 },
+ { 0, -9, 36, 7, 104, -2, -1 },
+ { 0, -5, 18, 7, 108, 11, -4 },
+ { -2, 2, 108, 7, 28, -8, 0 },
+ { 0, -8, 98, 7, 49, -11, 0 },
+ { 0, -13, 85, 7, 69, -13, 0 },
+ { 0, -13, 65, 7, 88, -12, 0 },
+ { 0, -11, 44, 7, 101, -6, 0 },
+ { 0, -7, 25, 7, 108, 5, -3 },
+ { -3, 8, 108, 7, 21, -6, 0 },
+ { -1, -4, 103, 7, 40, -10, 0 },
+ { 0, -11, 91, 7, 61, -13, 0 },
+ { 0, -13, 73, 7, 81, -13, 0 },
+ { 0, -12, 53, 7, 96, -9, 0 },
+ { 0, -9, 32, 7, 106, 0, -1 } },
+ .odd = { { -3, 7, 108, 7, 23, -7, 0 },
+ { 0, -5, 101, 7, 42, -10, 0 },
+ { 0, -12, 90, 7, 63, -13, 0 },
+ { 0, -13, 71, 7, 83, -13, 0 },
+ { 0, -12, 51, 7, 97, -8, 0 },
+ { 0, -8, 30, 7, 107, 1, -2 },
+ { -4, 13, 108, 7, 16, -5, 0 },
+ { -1, -1, 105, 7, 34, -9, 0 },
+ { 0, -10, 95, 7, 55, -12, 0 },
+ { 0, -13, 79, 7, 75, -13, 0 },
+ { 0, -13, 59, 7, 93, -11, 0 },
+ { 0, -10, 38, 7, 104, -3, -1 },
+ { 0, -6, 19, 7, 110, 9, -4 },
+ { -2, 4, 106, 7, 27, -7, 0 },
+ { 0, -7, 99, 7, 47, -11, 0 },
+ { 0, -12, 86, 7, 67, -13, 0 },
+ { 0, -13, 67, 7, 86, -12, 0 },
+ { 0, -11, 47, 7, 99, -7, 0 },
+ { 0, -7, 27, 7, 106, 4, -2 },
+ { -4, 9, 110, 7, 19, -6, 0 },
+ { -1, -3, 104, 7, 38, -10, 0 },
+ { 0, -11, 93, 7, 59, -13, 0 },
+ { 0, -13, 75, 7, 79, -13, 0 },
+ { 0, -12, 55, 7, 95, -10, 0 },
+ { 0, -9, 34, 7, 105, -1, -1 },
+ { 0, -5, 16, 7, 108, 13, -4 },
+ { -2, 1, 107, 7, 30, -8, 0 },
+ { 0, -8, 97, 7, 51, -12, 0 },
+ { 0, -13, 83, 7, 71, -13, 0 },
+ { 0, -13, 63, 7, 90, -12, 0 },
+ { 0, -10, 42, 7, 101, -5, 0 },
+ { 0, -7, 23, 7, 108, 7, -3 } } },
+ .ptrn_arr = { { 0xcfff9fff, 0xf3ffe7ff, 0xff } },
+ .sample_patrn_length = 74,
+ .hor_ds_en = 1,
+ .ver_ds_en = 1
+}, {
+ /* Scale factor 32 / (32 + 6) = 0.842105 */
+ .hor_phase_arr = {
+ .even = { { -6, 17, 106, 7, 17, -6, 0 },
+ { -2, 0, 102, 7, 38, -10, 0 },
+ { 0, -10, 89, 7, 62, -13, 0 },
+ { 0, -13, 69, 7, 83, -11, 0 },
+ { 0, -11, 46, 7, 98, -4, -1 },
+ { 0, -7, 23, 7, 106, 10, -4 },
+ { -3, 5, 104, 7, 31, -9, 0 },
+ { 0, -7, 93, 7, 54, -12, 0 },
+ { 0, -12, 76, 7, 76, -12, 0 },
+ { 0, -12, 54, 7, 93, -7, 0 },
+ { 0, -9, 31, 7, 104, 5, -3 },
+ { -4, 10, 106, 7, 23, -7, 0 },
+ { -1, -4, 98, 7, 46, -11, 0 },
+ { 0, -11, 83, 7, 69, -13, 0 },
+ { 0, -13, 62, 7, 89, -10, 0 },
+ { 0, -10, 38, 7, 102, 0, -2 } },
+ .odd = { { -4, 8, 105, 7, 27, -8, 0 },
+ { 0, -6, 96, 7, 50, -12, 0 },
+ { 0, -12, 80, 7, 73, -13, 0 },
+ { 0, -13, 58, 7, 92, -9, 0 },
+ { 0, -9, 34, 7, 103, 2, -2 },
+ { -5, 13, 107, 7, 20, -7, 0 },
+ { -1, -2, 100, 7, 42, -11, 0 },
+ { 0, -11, 87, 7, 65, -13, 0 },
+ { 0, -13, 65, 7, 87, -11, 0 },
+ { 0, -11, 42, 7, 100, -2, -1 },
+ { 0, -7, 20, 7, 107, 13, -5 },
+ { -2, 2, 103, 7, 34, -9, 0 },
+ { 0, -9, 92, 7, 58, -13, 0 },
+ { 0, -13, 73, 7, 80, -12, 0 },
+ { 0, -12, 50, 7, 96, -6, 0 },
+ { 0, -8, 27, 7, 105, 8, -4 } } },
+ .ver_phase_arr = {
+ .even = { { -6, 17, 106, 7, 17, -6, 0 },
+ { -2, 0, 102, 7, 38, -10, 0 },
+ { 0, -10, 89, 7, 62, -13, 0 },
+ { 0, -13, 69, 7, 83, -11, 0 },
+ { 0, -11, 46, 7, 98, -4, -1 },
+ { 0, -7, 23, 7, 106, 10, -4 },
+ { -3, 5, 104, 7, 31, -9, 0 },
+ { 0, -7, 93, 7, 54, -12, 0 },
+ { 0, -12, 76, 7, 76, -12, 0 },
+ { 0, -12, 54, 7, 93, -7, 0 },
+ { 0, -9, 31, 7, 104, 5, -3 },
+ { -4, 10, 106, 7, 23, -7, 0 },
+ { -1, -4, 98, 7, 46, -11, 0 },
+ { 0, -11, 83, 7, 69, -13, 0 },
+ { 0, -13, 62, 7, 89, -10, 0 },
+ { 0, -10, 38, 7, 102, 0, -2 } },
+ .odd = { { -4, 8, 105, 7, 27, -8, 0 },
+ { 0, -6, 96, 7, 50, -12, 0 },
+ { 0, -12, 80, 7, 73, -13, 0 },
+ { 0, -13, 58, 7, 92, -9, 0 },
+ { 0, -9, 34, 7, 103, 2, -2 },
+ { -5, 13, 107, 7, 20, -7, 0 },
+ { -1, -2, 100, 7, 42, -11, 0 },
+ { 0, -11, 87, 7, 65, -13, 0 },
+ { 0, -13, 65, 7, 87, -11, 0 },
+ { 0, -11, 42, 7, 100, -2, -1 },
+ { 0, -7, 20, 7, 107, 13, -5 },
+ { -2, 2, 103, 7, 34, -9, 0 },
+ { 0, -9, 92, 7, 58, -13, 0 },
+ { 0, -13, 73, 7, 80, -12, 0 },
+ { 0, -12, 50, 7, 96, -6, 0 },
+ { 0, -8, 27, 7, 105, 8, -4 } } },
+ .ptrn_arr = { { 0xfcffe7ff, 0xf } },
+ .sample_patrn_length = 38,
+ .hor_ds_en = 1,
+ .ver_ds_en = 1
+}, {
+ /* Scale factor 32 / (32 + 7) = 0.820513 */
+ .hor_phase_arr = {
+ .even = { { -7, 19, 104, 7, 19, -7, 0 },
+ { -2, 0, 98, 7, 43, -11, 0 },
+ { 0, -10, 81, 7, 69, -12, 0 },
+ { 0, -12, 58, 7, 89, -7, 0 },
+ { 0, -10, 32, 7, 103, 7, -4 },
+ { -5, 10, 103, 7, 29, -9, 0 },
+ { -1, -6, 93, 7, 54, -12, 0 },
+ { 0, -12, 72, 7, 79, -11, 0 },
+ { 0, -12, 47, 7, 97, -2, -2 },
+ { 0, -8, 22, 7, 104, 16, -6 },
+ { -3, 2, 100, 7, 40, -11, 0 },
+ { 0, -9, 84, 7, 65, -12, 0 },
+ { 0, -13, 62, 7, 87, -8, 0 },
+ { 0, -10, 36, 7, 100, 5, -3 },
+ { -5, 13, 103, 7, 25, -8, 0 },
+ { -1, -4, 94, 7, 51, -12, 0 },
+ { 0, -12, 76, 7, 76, -12, 0 },
+ { 0, -12, 51, 7, 94, -4, -1 },
+ { 0, -8, 25, 7, 103, 13, -5 },
+ { -3, 5, 100, 7, 36, -10, 0 },
+ { 0, -8, 87, 7, 62, -13, 0 },
+ { 0, -12, 65, 7, 84, -9, 0 },
+ { 0, -11, 40, 7, 100, 2, -3 },
+ { -6, 16, 104, 7, 22, -8, 0 },
+ { -2, -2, 97, 7, 47, -12, 0 },
+ { 0, -11, 79, 7, 72, -12, 0 },
+ { 0, -12, 54, 7, 93, -6, -1 },
+ { 0, -9, 29, 7, 103, 10, -5 },
+ { -4, 7, 103, 7, 32, -10, 0 },
+ { 0, -7, 89, 7, 58, -12, 0 },
+ { 0, -12, 69, 7, 81, -10, 0 },
+ { 0, -11, 43, 7, 98, 0, -2 } },
+ .odd = { { -4, 9, 101, 7, 31, -9, 0 },
+ { -1, -6, 91, 7, 56, -12, 0 },
+ { 0, -12, 71, 7, 80, -11, 0 },
+ { 0, -11, 45, 7, 97, -1, -2 },
+ { 0, -7, 20, 7, 105, 17, -7 },
+ { -3, 1, 100, 7, 41, -11, 0 },
+ { 0, -10, 83, 7, 67, -12, 0 },
+ { 0, -13, 60, 7, 89, -8, 0 },
+ { 0, -10, 34, 7, 102, 6, -4 },
+ { -5, 11, 104, 7, 27, -9, 0 },
+ { -1, -5, 94, 7, 52, -12, 0 },
+ { 0, -12, 74, 7, 77, -11, 0 },
+ { 0, -12, 49, 7, 95, -3, -1 },
+ { 0, -8, 24, 7, 104, 14, -6 },
+ { -3, 3, 100, 7, 38, -10, 0 },
+ { 0, -9, 87, 7, 63, -13, 0 },
+ { 0, -13, 63, 7, 87, -9, 0 },
+ { 0, -10, 38, 7, 100, 3, -3 },
+ { -6, 14, 104, 7, 24, -8, 0 },
+ { -1, -3, 95, 7, 49, -12, 0 },
+ { 0, -11, 77, 7, 74, -12, 0 },
+ { 0, -12, 52, 7, 94, -5, -1 },
+ { 0, -9, 27, 7, 104, 11, -5 },
+ { -4, 6, 102, 7, 34, -10, 0 },
+ { 0, -8, 89, 7, 60, -13, 0 },
+ { 0, -12, 67, 7, 83, -10, 0 },
+ { 0, -11, 41, 7, 100, 1, -3 },
+ { -7, 17, 105, 7, 20, -7, 0 },
+ { -2, -1, 97, 7, 45, -11, 0 },
+ { 0, -11, 80, 7, 71, -12, 0 },
+ { 0, -12, 56, 7, 91, -6, -1 },
+ { 0, -9, 31, 7, 101, 9, -4 } } },
+ .ver_phase_arr = {
+ .even = { { -7, 19, 104, 7, 19, -7, 0 },
+ { -2, 0, 98, 7, 43, -11, 0 },
+ { 0, -10, 81, 7, 69, -12, 0 },
+ { 0, -12, 58, 7, 89, -7, 0 },
+ { 0, -10, 32, 7, 103, 7, -4 },
+ { -5, 10, 103, 7, 29, -9, 0 },
+ { -1, -6, 93, 7, 54, -12, 0 },
+ { 0, -12, 72, 7, 79, -11, 0 },
+ { 0, -12, 47, 7, 97, -2, -2 },
+ { 0, -8, 22, 7, 104, 16, -6 },
+ { -3, 2, 100, 7, 40, -11, 0 },
+ { 0, -9, 84, 7, 65, -12, 0 },
+ { 0, -13, 62, 7, 87, -8, 0 },
+ { 0, -10, 36, 7, 100, 5, -3 },
+ { -5, 13, 103, 7, 25, -8, 0 },
+ { -1, -4, 94, 7, 51, -12, 0 },
+ { 0, -12, 76, 7, 76, -12, 0 },
+ { 0, -12, 51, 7, 94, -4, -1 },
+ { 0, -8, 25, 7, 103, 13, -5 },
+ { -3, 5, 100, 7, 36, -10, 0 },
+ { 0, -8, 87, 7, 62, -13, 0 },
+ { 0, -12, 65, 7, 84, -9, 0 },
+ { 0, -11, 40, 7, 100, 2, -3 },
+ { -6, 16, 104, 7, 22, -8, 0 },
+ { -2, -2, 97, 7, 47, -12, 0 },
+ { 0, -11, 79, 7, 72, -12, 0 },
+ { 0, -12, 54, 7, 93, -6, -1 },
+ { 0, -9, 29, 7, 103, 10, -5 },
+ { -4, 7, 103, 7, 32, -10, 0 },
+ { 0, -7, 89, 7, 58, -12, 0 },
+ { 0, -12, 69, 7, 81, -10, 0 },
+ { 0, -11, 43, 7, 98, 0, -2 } },
+ .odd = { { -4, 9, 101, 7, 31, -9, 0 },
+ { -1, -6, 91, 7, 56, -12, 0 },
+ { 0, -12, 71, 7, 80, -11, 0 },
+ { 0, -11, 45, 7, 97, -1, -2 },
+ { 0, -7, 20, 7, 105, 17, -7 },
+ { -3, 1, 100, 7, 41, -11, 0 },
+ { 0, -10, 83, 7, 67, -12, 0 },
+ { 0, -13, 60, 7, 89, -8, 0 },
+ { 0, -10, 34, 7, 102, 6, -4 },
+ { -5, 11, 104, 7, 27, -9, 0 },
+ { -1, -5, 94, 7, 52, -12, 0 },
+ { 0, -12, 74, 7, 77, -11, 0 },
+ { 0, -12, 49, 7, 95, -3, -1 },
+ { 0, -8, 24, 7, 104, 14, -6 },
+ { -3, 3, 100, 7, 38, -10, 0 },
+ { 0, -9, 87, 7, 63, -13, 0 },
+ { 0, -13, 63, 7, 87, -9, 0 },
+ { 0, -10, 38, 7, 100, 3, -3 },
+ { -6, 14, 104, 7, 24, -8, 0 },
+ { -1, -3, 95, 7, 49, -12, 0 },
+ { 0, -11, 77, 7, 74, -12, 0 },
+ { 0, -12, 52, 7, 94, -5, -1 },
+ { 0, -9, 27, 7, 104, 11, -5 },
+ { -4, 6, 102, 7, 34, -10, 0 },
+ { 0, -8, 89, 7, 60, -13, 0 },
+ { 0, -12, 67, 7, 83, -10, 0 },
+ { 0, -11, 41, 7, 100, 1, -3 },
+ { -7, 17, 105, 7, 20, -7, 0 },
+ { -2, -1, 97, 7, 45, -11, 0 },
+ { 0, -11, 80, 7, 71, -12, 0 },
+ { 0, -12, 56, 7, 91, -6, -1 },
+ { 0, -9, 31, 7, 101, 9, -4 } } },
+ .ptrn_arr = { { 0xff9ff3ff, 0xff3fe7fc, 0xff9 } },
+ .sample_patrn_length = 78,
+ .hor_ds_en = 1,
+ .ver_ds_en = 1
+}, {
+ /* Scale factor 32 / (32 + 8) = 0.8 */
+ .hor_phase_arr = {
+ .even = { { -8, 21, 102, 7, 21, -8, 0 },
+ { -3, 0, 95, 7, 48, -12, 0 },
+ { 0, -11, 75, 7, 75, -11, 0 },
+ { 0, -12, 48, 7, 95, 0, -3 } },
+ .odd = { { -5, 9, 100, 7, 34, -10, 0 },
+ { -1, -7, 86, 7, 62, -12, 0 },
+ { 0, -12, 62, 7, 86, -7, -1 },
+ { 0, -10, 34, 7, 100, 9, -5 } } },
+ .ver_phase_arr = {
+ .even = { { -8, 21, 102, 7, 21, -8, 0 },
+ { -3, 0, 95, 7, 48, -12, 0 },
+ { 0, -11, 75, 7, 75, -11, 0 },
+ { 0, -12, 48, 7, 95, 0, -3 } },
+ .odd = { { -5, 9, 100, 7, 34, -10, 0 },
+ { -1, -7, 86, 7, 62, -12, 0 },
+ { 0, -12, 62, 7, 86, -7, -1 },
+ { 0, -10, 34, 7, 100, 9, -5 } } },
+ .ptrn_arr = { { 0xff } },
+ .sample_patrn_length = 10,
+ .hor_ds_en = 1,
+ .ver_ds_en = 1
+}, {
+ /* Scale factor 32 / (32 + 9) = 0.780488 */
+ .hor_phase_arr = {
+ .even = { { -9, 23, 100, 7, 23, -9, 0 },
+ { -3, 0, 91, 7, 52, -12, 0 },
+ { 0, -11, 68, 7, 80, -8, -1 },
+ { 0, -11, 39, 7, 96, 9, -5 },
+ { -6, 12, 98, 7, 35, -11, 0 },
+ { -1, -6, 81, 7, 65, -11, 0 },
+ { 0, -12, 55, 7, 89, -2, -2 },
+ { 0, -9, 26, 7, 99, 20, -8 },
+ { -4, 2, 93, 7, 49, -12, 0 },
+ { 0, -10, 71, 7, 76, -9, 0 },
+ { 0, -11, 42, 7, 95, 7, -5 },
+ { -7, 14, 99, 7, 32, -10, 0 },
+ { -1, -5, 84, 7, 62, -12, 0 },
+ { 0, -12, 59, 7, 87, -4, -2 },
+ { 0, -10, 29, 7, 99, 17, -7 },
+ { -4, 4, 95, 7, 45, -12, 0 },
+ { 0, -9, 72, 7, 74, -9, 0 },
+ { 0, -12, 45, 7, 95, 4, -4 },
+ { -7, 17, 99, 7, 29, -10, 0 },
+ { -2, -4, 87, 7, 59, -12, 0 },
+ { 0, -12, 62, 7, 84, -5, -1 },
+ { 0, -10, 32, 7, 99, 14, -7 },
+ { -5, 7, 95, 7, 42, -11, 0 },
+ { 0, -9, 76, 7, 71, -10, 0 },
+ { 0, -12, 49, 7, 93, 2, -4 },
+ { -8, 20, 99, 7, 26, -9, 0 },
+ { -2, -2, 89, 7, 55, -12, 0 },
+ { 0, -11, 65, 7, 81, -6, -1 },
+ { 0, -11, 35, 7, 98, 12, -6 },
+ { -5, 9, 96, 7, 39, -11, 0 },
+ { -1, -8, 80, 7, 68, -11, 0 },
+ { 0, -12, 52, 7, 91, 0, -3 } },
+ .odd = { { -6, 10, 98, 7, 37, -11, 0 },
+ { -1, -7, 81, 7, 66, -11, 0 },
+ { 0, -12, 54, 7, 90, -1, -3 },
+ { 0, -9, 24, 7, 100, 21, -8 },
+ { -3, 1, 92, 7, 50, -12, 0 },
+ { 0, -10, 69, 7, 78, -8, -1 },
+ { 0, -11, 40, 7, 96, 8, -5 },
+ { -6, 13, 97, 7, 34, -10, 0 },
+ { -1, -6, 83, 7, 63, -11, 0 },
+ { 0, -12, 57, 7, 88, -3, -2 },
+ { 0, -9, 27, 7, 100, 18, -8 },
+ { -4, 3, 94, 7, 47, -12, 0 },
+ { 0, -10, 72, 7, 75, -9, 0 },
+ { 0, -11, 44, 7, 95, 5, -5 },
+ { -7, 16, 98, 7, 31, -10, 0 },
+ { -2, -4, 86, 7, 60, -12, 0 },
+ { 0, -12, 60, 7, 86, -4, -2 },
+ { 0, -10, 31, 7, 98, 16, -7 },
+ { -5, 5, 95, 7, 44, -11, 0 },
+ { 0, -9, 75, 7, 72, -10, 0 },
+ { 0, -12, 47, 7, 94, 3, -4 },
+ { -8, 18, 100, 7, 27, -9, 0 },
+ { -2, -3, 88, 7, 57, -12, 0 },
+ { 0, -11, 63, 7, 83, -6, -1 },
+ { 0, -10, 34, 7, 97, 13, -6 },
+ { -5, 8, 96, 7, 40, -11, 0 },
+ { -1, -8, 78, 7, 69, -10, 0 },
+ { 0, -12, 50, 7, 92, 1, -3 },
+ { -8, 21, 100, 7, 24, -9, 0 },
+ { -3, -1, 90, 7, 54, -12, 0 },
+ { 0, -11, 66, 7, 81, -7, -1 },
+ { 0, -11, 37, 7, 98, 10, -6 } } },
+ .ver_phase_arr = {
+ .even = { { -9, 23, 100, 7, 23, -9, 0 },
+ { -3, 0, 91, 7, 52, -12, 0 },
+ { 0, -11, 68, 7, 80, -8, -1 },
+ { 0, -11, 39, 7, 96, 9, -5 },
+ { -6, 12, 98, 7, 35, -11, 0 },
+ { -1, -6, 81, 7, 65, -11, 0 },
+ { 0, -12, 55, 7, 89, -2, -2 },
+ { 0, -9, 26, 7, 99, 20, -8 },
+ { -4, 2, 93, 7, 49, -12, 0 },
+ { 0, -10, 71, 7, 76, -9, 0 },
+ { 0, -11, 42, 7, 95, 7, -5 },
+ { -7, 14, 99, 7, 32, -10, 0 },
+ { -1, -5, 84, 7, 62, -12, 0 },
+ { 0, -12, 59, 7, 87, -4, -2 },
+ { 0, -10, 29, 7, 99, 17, -7 },
+ { -4, 4, 95, 7, 45, -12, 0 },
+ { 0, -9, 72, 7, 74, -9, 0 },
+ { 0, -12, 45, 7, 95, 4, -4 },
+ { -7, 17, 99, 7, 29, -10, 0 },
+ { -2, -4, 87, 7, 59, -12, 0 },
+ { 0, -12, 62, 7, 84, -5, -1 },
+ { 0, -10, 32, 7, 99, 14, -7 },
+ { -5, 7, 95, 7, 42, -11, 0 },
+ { 0, -9, 76, 7, 71, -10, 0 },
+ { 0, -12, 49, 7, 93, 2, -4 },
+ { -8, 20, 99, 7, 26, -9, 0 },
+ { -2, -2, 89, 7, 55, -12, 0 },
+ { 0, -11, 65, 7, 81, -6, -1 },
+ { 0, -11, 35, 7, 98, 12, -6 },
+ { -5, 9, 96, 7, 39, -11, 0 },
+ { -1, -8, 80, 7, 68, -11, 0 },
+ { 0, -12, 52, 7, 91, 0, -3 } },
+ .odd = { { -6, 10, 98, 7, 37, -11, 0 },
+ { -1, -7, 81, 7, 66, -11, 0 },
+ { 0, -12, 54, 7, 90, -1, -3 },
+ { 0, -9, 24, 7, 100, 21, -8 },
+ { -3, 1, 92, 7, 50, -12, 0 },
+ { 0, -10, 69, 7, 78, -8, -1 },
+ { 0, -11, 40, 7, 96, 8, -5 },
+ { -6, 13, 97, 7, 34, -10, 0 },
+ { -1, -6, 83, 7, 63, -11, 0 },
+ { 0, -12, 57, 7, 88, -3, -2 },
+ { 0, -9, 27, 7, 100, 18, -8 },
+ { -4, 3, 94, 7, 47, -12, 0 },
+ { 0, -10, 72, 7, 75, -9, 0 },
+ { 0, -11, 44, 7, 95, 5, -5 },
+ { -7, 16, 98, 7, 31, -10, 0 },
+ { -2, -4, 86, 7, 60, -12, 0 },
+ { 0, -12, 60, 7, 86, -4, -2 },
+ { 0, -10, 31, 7, 98, 16, -7 },
+ { -5, 5, 95, 7, 44, -11, 0 },
+ { 0, -9, 75, 7, 72, -10, 0 },
+ { 0, -12, 47, 7, 94, 3, -4 },
+ { -8, 18, 100, 7, 27, -9, 0 },
+ { -2, -3, 88, 7, 57, -12, 0 },
+ { 0, -11, 63, 7, 83, -6, -1 },
+ { 0, -10, 34, 7, 97, 13, -6 },
+ { -5, 8, 96, 7, 40, -11, 0 },
+ { -1, -8, 78, 7, 69, -10, 0 },
+ { 0, -12, 50, 7, 92, 1, -3 },
+ { -8, 21, 100, 7, 24, -9, 0 },
+ { -3, -1, 90, 7, 54, -12, 0 },
+ { 0, -11, 66, 7, 81, -7, -1 },
+ { 0, -11, 37, 7, 98, 10, -6 } } },
+ .ptrn_arr = { { 0xf3f9fcff, 0x3f9fcfe7, 0xfe7f } },
+ .sample_patrn_length = 82,
+ .hor_ds_en = 1,
+ .ver_ds_en = 1
+}, {
+ /* Scale factor 32 / (32 + 10) = 0.761905 */
+ .hor_phase_arr = {
+ .even = { { -9, 25, 96, 7, 25, -9, 0 },
+ { -3, 0, 86, 7, 56, -11, 0 },
+ { 0, -11, 62, 7, 82, -3, -2 },
+ { 0, -10, 31, 7, 96, 19, -8 },
+ { -5, 4, 92, 7, 49, -12, 0 },
+ { 0, -10, 67, 7, 78, -6, -1 },
+ { 0, -11, 37, 7, 95, 14, -7 },
+ { -6, 9, 93, 7, 43, -11, 0 },
+ { -1, -8, 73, 7, 73, -8, -1 },
+ { 0, -11, 43, 7, 93, 9, -6 },
+ { -7, 14, 95, 7, 37, -11, 0 },
+ { -1, -6, 78, 7, 67, -10, 0 },
+ { 0, -12, 49, 7, 92, 4, -5 },
+ { -8, 19, 96, 7, 31, -10, 0 },
+ { -2, -3, 82, 7, 62, -11, 0 },
+ { 0, -11, 56, 7, 86, 0, -3 } },
+ .odd = { { -6, 11, 94, 7, 40, -11, 0 },
+ { -1, -7, 75, 7, 70, -9, 0 },
+ { 0, -12, 46, 7, 93, 6, -5 },
+ { -8, 16, 97, 7, 34, -11, 0 },
+ { -2, -5, 81, 7, 64, -10, 0 },
+ { 0, -12, 53, 7, 89, 2, -4 },
+ { -9, 22, 97, 7, 28, -10, 0 },
+ { -3, -2, 85, 7, 59, -11, 0 },
+ { 0, -11, 59, 7, 85, -2, -3 },
+ { 0, -10, 28, 7, 97, 22, -9 },
+ { -4, 2, 89, 7, 53, -12, 0 },
+ { 0, -10, 64, 7, 81, -5, -2 },
+ { 0, -11, 34, 7, 97, 16, -8 },
+ { -5, 6, 93, 7, 46, -12, 0 },
+ { 0, -9, 70, 7, 75, -7, -1 },
+ { 0, -11, 40, 7, 94, 11, -6 } } },
+ .ver_phase_arr = {
+ .even = { { -9, 25, 96, 7, 25, -9, 0 },
+ { -3, 0, 86, 7, 56, -11, 0 },
+ { 0, -11, 62, 7, 82, -3, -2 },
+ { 0, -10, 31, 7, 96, 19, -8 },
+ { -5, 4, 92, 7, 49, -12, 0 },
+ { 0, -10, 67, 7, 78, -6, -1 },
+ { 0, -11, 37, 7, 95, 14, -7 },
+ { -6, 9, 93, 7, 43, -11, 0 },
+ { -1, -8, 73, 7, 73, -8, -1 },
+ { 0, -11, 43, 7, 93, 9, -6 },
+ { -7, 14, 95, 7, 37, -11, 0 },
+ { -1, -6, 78, 7, 67, -10, 0 },
+ { 0, -12, 49, 7, 92, 4, -5 },
+ { -8, 19, 96, 7, 31, -10, 0 },
+ { -2, -3, 82, 7, 62, -11, 0 },
+ { 0, -11, 56, 7, 86, 0, -3 } },
+ .odd = { { -6, 11, 94, 7, 40, -11, 0 },
+ { -1, -7, 75, 7, 70, -9, 0 },
+ { 0, -12, 46, 7, 93, 6, -5 },
+ { -8, 16, 97, 7, 34, -11, 0 },
+ { -2, -5, 81, 7, 64, -10, 0 },
+ { 0, -12, 53, 7, 89, 2, -4 },
+ { -9, 22, 97, 7, 28, -10, 0 },
+ { -3, -2, 85, 7, 59, -11, 0 },
+ { 0, -11, 59, 7, 85, -2, -3 },
+ { 0, -10, 28, 7, 97, 22, -9 },
+ { -4, 2, 89, 7, 53, -12, 0 },
+ { 0, -10, 64, 7, 81, -5, -2 },
+ { 0, -11, 34, 7, 97, 16, -8 },
+ { -5, 6, 93, 7, 46, -12, 0 },
+ { 0, -9, 70, 7, 75, -7, -1 },
+ { 0, -11, 40, 7, 94, 11, -6 } } },
+ .ptrn_arr = { { 0xfcfe7e7f, 0xfc } },
+ .sample_patrn_length = 42,
+ .hor_ds_en = 1,
+ .ver_ds_en = 1
+}, {
+ /* Scale factor 32 / (32 + 11) = 0.744186 */
+ .hor_phase_arr = {
+ .even = { { -10, 26, 96, 7, 26, -10, 0 },
+ { -4, 0, 83, 7, 59, -10, 0 },
+ { 0, -11, 56, 7, 85, 2, -4 },
+ { -9, 23, 95, 7, 29, -10, 0 },
+ { -3, -2, 82, 7, 61, -10, 0 },
+ { 0, -11, 53, 7, 87, 4, -5 },
+ { -9, 21, 94, 7, 32, -10, 0 },
+ { -3, -3, 79, 7, 64, -9, 0 },
+ { 0, -11, 50, 7, 88, 6, -5 },
+ { -8, 18, 94, 7, 35, -11, 0 },
+ { -2, -5, 78, 7, 67, -9, -1 },
+ { 0, -11, 47, 7, 90, 8, -6 },
+ { -8, 15, 94, 7, 38, -11, 0 },
+ { -2, -6, 75, 7, 70, -8, -1 },
+ { 0, -11, 44, 7, 92, 10, -7 },
+ { -7, 13, 92, 7, 41, -11, 0 },
+ { -1, -7, 72, 7, 72, -7, -1 },
+ { 0, -11, 41, 7, 92, 13, -7 },
+ { -7, 10, 92, 7, 44, -11, 0 },
+ { -1, -8, 70, 7, 75, -6, -2 },
+ { 0, -11, 38, 7, 94, 15, -8 },
+ { -6, 8, 90, 7, 47, -11, 0 },
+ { -1, -9, 67, 7, 78, -5, -2 },
+ { 0, -11, 35, 7, 94, 18, -8 },
+ { -5, 6, 88, 7, 50, -11, 0 },
+ { 0, -9, 64, 7, 79, -3, -3 },
+ { 0, -10, 32, 7, 94, 21, -9 },
+ { -5, 4, 87, 7, 53, -11, 0 },
+ { 0, -10, 61, 7, 82, -2, -3 },
+ { 0, -10, 29, 7, 95, 23, -9 },
+ { -4, 2, 85, 7, 56, -11, 0 },
+ { 0, -10, 59, 7, 83, 0, -4 } },
+ .odd = { { -7, 12, 92, 7, 42, -11, 0 },
+ { -1, -7, 71, 7, 72, -6, -1 },
+ { 0, -11, 39, 7, 93, 14, -7 },
+ { -6, 9, 91, 7, 45, -11, 0 },
+ { -1, -8, 68, 7, 76, -5, -2 },
+ { 0, -11, 36, 7, 94, 17, -8 },
+ { -6, 7, 90, 7, 48, -11, 0 },
+ { 0, -9, 66, 7, 77, -4, -2 },
+ { 0, -11, 33, 7, 96, 19, -9 },
+ { -5, 5, 88, 7, 51, -11, 0 },
+ { 0, -10, 63, 7, 80, -2, -3 },
+ { 0, -10, 31, 7, 94, 22, -9 },
+ { -5, 3, 87, 7, 54, -11, 0 },
+ { 0, -10, 60, 7, 82, -1, -3 },
+ { 0, -10, 28, 7, 94, 25, -9 },
+ { -4, 1, 85, 7, 57, -11, 0 },
+ { 0, -11, 57, 7, 85, 1, -4 },
+ { -9, 25, 94, 7, 28, -10, 0 },
+ { -3, -1, 82, 7, 60, -10, 0 },
+ { 0, -11, 54, 7, 87, 3, -5 },
+ { -9, 22, 94, 7, 31, -10, 0 },
+ { -3, -2, 80, 7, 63, -10, 0 },
+ { 0, -11, 51, 7, 88, 5, -5 },
+ { -9, 19, 96, 7, 33, -11, 0 },
+ { -2, -4, 77, 7, 66, -9, 0 },
+ { 0, -11, 48, 7, 90, 7, -6 },
+ { -8, 17, 94, 7, 36, -11, 0 },
+ { -2, -5, 76, 7, 68, -8, -1 },
+ { 0, -11, 45, 7, 91, 9, -6 },
+ { -7, 14, 93, 7, 39, -11, 0 },
+ { -1, -6, 72, 7, 71, -7, -1 },
+ { 0, -11, 42, 7, 92, 12, -7 } } },
+ .ver_phase_arr = {
+ .even = { { -10, 26, 96, 7, 26, -10, 0 },
+ { -4, 0, 83, 7, 59, -10, 0 },
+ { 0, -11, 56, 7, 85, 2, -4 },
+ { -9, 23, 95, 7, 29, -10, 0 },
+ { -3, -2, 82, 7, 61, -10, 0 },
+ { 0, -11, 53, 7, 87, 4, -5 },
+ { -9, 21, 94, 7, 32, -10, 0 },
+ { -3, -3, 79, 7, 64, -9, 0 },
+ { 0, -11, 50, 7, 88, 6, -5 },
+ { -8, 18, 94, 7, 35, -11, 0 },
+ { -2, -5, 78, 7, 67, -9, -1 },
+ { 0, -11, 47, 7, 90, 8, -6 },
+ { -8, 15, 94, 7, 38, -11, 0 },
+ { -2, -6, 75, 7, 70, -8, -1 },
+ { 0, -11, 44, 7, 92, 10, -7 },
+ { -7, 13, 92, 7, 41, -11, 0 },
+ { -1, -7, 72, 7, 72, -7, -1 },
+ { 0, -11, 41, 7, 92, 13, -7 },
+ { -7, 10, 92, 7, 44, -11, 0 },
+ { -1, -8, 70, 7, 75, -6, -2 },
+ { 0, -11, 38, 7, 94, 15, -8 },
+ { -6, 8, 90, 7, 47, -11, 0 },
+ { -1, -9, 67, 7, 78, -5, -2 },
+ { 0, -11, 35, 7, 94, 18, -8 },
+ { -5, 6, 88, 7, 50, -11, 0 },
+ { 0, -9, 64, 7, 79, -3, -3 },
+ { 0, -10, 32, 7, 94, 21, -9 },
+ { -5, 4, 87, 7, 53, -11, 0 },
+ { 0, -10, 61, 7, 82, -2, -3 },
+ { 0, -10, 29, 7, 95, 23, -9 },
+ { -4, 2, 85, 7, 56, -11, 0 },
+ { 0, -10, 59, 7, 83, 0, -4 } },
+ .odd = { { -7, 12, 92, 7, 42, -11, 0 },
+ { -1, -7, 71, 7, 72, -6, -1 },
+ { 0, -11, 39, 7, 93, 14, -7 },
+ { -6, 9, 91, 7, 45, -11, 0 },
+ { -1, -8, 68, 7, 76, -5, -2 },
+ { 0, -11, 36, 7, 94, 17, -8 },
+ { -6, 7, 90, 7, 48, -11, 0 },
+ { 0, -9, 66, 7, 77, -4, -2 },
+ { 0, -11, 33, 7, 96, 19, -9 },
+ { -5, 5, 88, 7, 51, -11, 0 },
+ { 0, -10, 63, 7, 80, -2, -3 },
+ { 0, -10, 31, 7, 94, 22, -9 },
+ { -5, 3, 87, 7, 54, -11, 0 },
+ { 0, -10, 60, 7, 82, -1, -3 },
+ { 0, -10, 28, 7, 94, 25, -9 },
+ { -4, 1, 85, 7, 57, -11, 0 },
+ { 0, -11, 57, 7, 85, 1, -4 },
+ { -9, 25, 94, 7, 28, -10, 0 },
+ { -3, -1, 82, 7, 60, -10, 0 },
+ { 0, -11, 54, 7, 87, 3, -5 },
+ { -9, 22, 94, 7, 31, -10, 0 },
+ { -3, -2, 80, 7, 63, -10, 0 },
+ { 0, -11, 51, 7, 88, 5, -5 },
+ { -9, 19, 96, 7, 33, -11, 0 },
+ { -2, -4, 77, 7, 66, -9, 0 },
+ { 0, -11, 48, 7, 90, 7, -6 },
+ { -8, 17, 94, 7, 36, -11, 0 },
+ { -2, -5, 76, 7, 68, -8, -1 },
+ { 0, -11, 45, 7, 91, 9, -6 },
+ { -7, 14, 93, 7, 39, -11, 0 },
+ { -1, -6, 72, 7, 71, -7, -1 },
+ { 0, -11, 42, 7, 92, 12, -7 } } },
+ .ptrn_arr = { { 0x3f3f3f3f, 0x9f9f9f3f, 0xf9f9f } },
+ .sample_patrn_length = 86,
+ .hor_ds_en = 1,
+ .ver_ds_en = 1
+}, {
+ /* Scale factor 32 / (32 + 12) = 0.727273 */
+ .hor_phase_arr = {
+ .even = { { -10, 28, 92, 7, 28, -10, 0 },
+ { -4, 0, 81, 7, 61, -9, -1 },
+ { 0, -11, 50, 7, 87, 8, -6 },
+ { -8, 17, 91, 7, 39, -11, 0 },
+ { -2, -6, 72, 7, 72, -6, -2 },
+ { 0, -11, 39, 7, 91, 17, -8 },
+ { -6, 8, 87, 7, 50, -11, 0 },
+ { -1, -9, 61, 7, 81, 0, -4 } },
+ .odd = { { -7, 12, 89, 7, 45, -11, 0 },
+ { -1, -8, 67, 7, 76, -3, -3 },
+ { 0, -11, 33, 7, 93, 22, -9 },
+ { -5, 4, 83, 7, 56, -10, 0 },
+ { 0, -10, 56, 7, 83, 4, -5 },
+ { -9, 22, 93, 7, 33, -11, 0 },
+ { -3, -3, 76, 7, 67, -8, -1 },
+ { 0, -11, 45, 7, 89, 12, -7 } } },
+ .ver_phase_arr = {
+ .even = { { -10, 28, 92, 7, 28, -10, 0 },
+ { -4, 0, 81, 7, 61, -9, -1 },
+ { 0, -11, 50, 7, 87, 8, -6 },
+ { -8, 17, 91, 7, 39, -11, 0 },
+ { -2, -6, 72, 7, 72, -6, -2 },
+ { 0, -11, 39, 7, 91, 17, -8 },
+ { -6, 8, 87, 7, 50, -11, 0 },
+ { -1, -9, 61, 7, 81, 0, -4 } },
+ .odd = { { -7, 12, 89, 7, 45, -11, 0 },
+ { -1, -8, 67, 7, 76, -3, -3 },
+ { 0, -11, 33, 7, 93, 22, -9 },
+ { -5, 4, 83, 7, 56, -10, 0 },
+ { 0, -10, 56, 7, 83, 4, -5 },
+ { -9, 22, 93, 7, 33, -11, 0 },
+ { -3, -3, 76, 7, 67, -8, -1 },
+ { 0, -11, 45, 7, 89, 12, -7 } } },
+ .ptrn_arr = { { 0xf9f3f } },
+ .sample_patrn_length = 22,
+ .hor_ds_en = 1,
+ .ver_ds_en = 1
+}, {
+ /* Scale factor 32 / (32 + 13) = 0.711111 */
+ .hor_phase_arr = {
+ .even = { { -10, 29, 90, 7, 29, -10, 0 },
+ { -4, 0, 76, 7, 64, -7, -1 },
+ { 0, -11, 45, 7, 88, 14, -8 },
+ { -7, 12, 85, 7, 48, -10, 0 },
+ { -1, -8, 61, 7, 79, 2, -5 },
+ { -10, 26, 90, 7, 32, -10, 0 },
+ { -4, -2, 76, 7, 66, -6, -2 },
+ { 0, -11, 42, 7, 89, 16, -8 },
+ { -7, 10, 84, 7, 51, -10, 0 },
+ { -1, -9, 59, 7, 81, 3, -5 },
+ { -10, 24, 91, 7, 34, -11, 0 },
+ { -3, -3, 72, 7, 69, -5, -2 },
+ { 0, -11, 40, 7, 89, 19, -9 },
+ { -6, 7, 84, 7, 53, -10, 0 },
+ { -1, -9, 56, 7, 83, 5, -6 },
+ { -9, 21, 90, 7, 37, -11, 0 },
+ { -3, -4, 71, 7, 71, -4, -3 },
+ { 0, -11, 37, 7, 90, 21, -9 },
+ { -6, 5, 83, 7, 56, -9, -1 },
+ { 0, -10, 53, 7, 84, 7, -6 },
+ { -9, 19, 89, 7, 40, -11, 0 },
+ { -2, -5, 69, 7, 72, -3, -3 },
+ { 0, -11, 34, 7, 91, 24, -10 },
+ { -5, 3, 81, 7, 59, -9, -1 },
+ { 0, -10, 51, 7, 84, 10, -7 },
+ { -8, 16, 89, 7, 42, -11, 0 },
+ { -2, -6, 66, 7, 76, -2, -4 },
+ { 0, -10, 32, 7, 90, 26, -10 },
+ { -5, 2, 79, 7, 61, -8, -1 },
+ { 0, -10, 48, 7, 85, 12, -7 },
+ { -8, 14, 88, 7, 45, -11, 0 },
+ { -1, -7, 64, 7, 76, 0, -4 } },
+ .odd = { { -8, 13, 88, 7, 46, -11, 0 },
+ { -1, -8, 63, 7, 78, 1, -5 },
+ { -10, 28, 90, 7, 30, -10, 0 },
+ { -4, -1, 77, 7, 65, -7, -2 },
+ { 0, -11, 44, 7, 88, 15, -8 },
+ { -7, 11, 85, 7, 49, -10, 0 },
+ { -1, -8, 60, 7, 79, 3, -5 },
+ { -10, 25, 91, 7, 33, -11, 0 },
+ { -4, -2, 74, 7, 68, -6, -2 },
+ { 0, -11, 41, 7, 89, 18, -9 },
+ { -7, 8, 85, 7, 52, -10, 0 },
+ { -1, -9, 57, 7, 83, 4, -6 },
+ { -9, 22, 90, 7, 36, -11, 0 },
+ { -3, -4, 73, 7, 70, -5, -3 },
+ { 0, -11, 38, 7, 90, 20, -9 },
+ { -6, 6, 83, 7, 55, -10, 0 },
+ { 0, -10, 55, 7, 83, 6, -6 },
+ { -9, 20, 90, 7, 38, -11, 0 },
+ { -3, -5, 70, 7, 73, -4, -3 },
+ { 0, -11, 36, 7, 90, 22, -9 },
+ { -6, 4, 83, 7, 57, -9, -1 },
+ { 0, -10, 52, 7, 85, 8, -7 },
+ { -9, 18, 89, 7, 41, -11, 0 },
+ { -2, -6, 68, 7, 74, -2, -4 },
+ { 0, -11, 33, 7, 91, 25, -10 },
+ { -5, 3, 79, 7, 60, -8, -1 },
+ { 0, -10, 49, 7, 85, 11, -7 },
+ { -8, 15, 88, 7, 44, -11, 0 },
+ { -2, -7, 65, 7, 77, -1, -4 },
+ { 0, -10, 30, 7, 90, 28, -10 },
+ { -5, 1, 78, 7, 63, -8, -1 },
+ { 0, -11, 46, 7, 88, 13, -8 } } },
+ .ver_phase_arr = {
+ .even = { { -10, 29, 90, 7, 29, -10, 0 },
+ { -4, 0, 76, 7, 64, -7, -1 },
+ { 0, -11, 45, 7, 88, 14, -8 },
+ { -7, 12, 85, 7, 48, -10, 0 },
+ { -1, -8, 61, 7, 79, 2, -5 },
+ { -10, 26, 90, 7, 32, -10, 0 },
+ { -4, -2, 76, 7, 66, -6, -2 },
+ { 0, -11, 42, 7, 89, 16, -8 },
+ { -7, 10, 84, 7, 51, -10, 0 },
+ { -1, -9, 59, 7, 81, 3, -5 },
+ { -10, 24, 91, 7, 34, -11, 0 },
+ { -3, -3, 72, 7, 69, -5, -2 },
+ { 0, -11, 40, 7, 89, 19, -9 },
+ { -6, 7, 84, 7, 53, -10, 0 },
+ { -1, -9, 56, 7, 83, 5, -6 },
+ { -9, 21, 90, 7, 37, -11, 0 },
+ { -3, -4, 71, 7, 71, -4, -3 },
+ { 0, -11, 37, 7, 90, 21, -9 },
+ { -6, 5, 83, 7, 56, -9, -1 },
+ { 0, -10, 53, 7, 84, 7, -6 },
+ { -9, 19, 89, 7, 40, -11, 0 },
+ { -2, -5, 69, 7, 72, -3, -3 },
+ { 0, -11, 34, 7, 91, 24, -10 },
+ { -5, 3, 81, 7, 59, -9, -1 },
+ { 0, -10, 51, 7, 84, 10, -7 },
+ { -8, 16, 89, 7, 42, -11, 0 },
+ { -2, -6, 66, 7, 76, -2, -4 },
+ { 0, -10, 32, 7, 90, 26, -10 },
+ { -5, 2, 79, 7, 61, -8, -1 },
+ { 0, -10, 48, 7, 85, 12, -7 },
+ { -8, 14, 88, 7, 45, -11, 0 },
+ { -1, -7, 64, 7, 76, 0, -4 } },
+ .odd = { { -8, 13, 88, 7, 46, -11, 0 },
+ { -1, -8, 63, 7, 78, 1, -5 },
+ { -10, 28, 90, 7, 30, -10, 0 },
+ { -4, -1, 77, 7, 65, -7, -2 },
+ { 0, -11, 44, 7, 88, 15, -8 },
+ { -7, 11, 85, 7, 49, -10, 0 },
+ { -1, -8, 60, 7, 79, 3, -5 },
+ { -10, 25, 91, 7, 33, -11, 0 },
+ { -4, -2, 74, 7, 68, -6, -2 },
+ { 0, -11, 41, 7, 89, 18, -9 },
+ { -7, 8, 85, 7, 52, -10, 0 },
+ { -1, -9, 57, 7, 83, 4, -6 },
+ { -9, 22, 90, 7, 36, -11, 0 },
+ { -3, -4, 73, 7, 70, -5, -3 },
+ { 0, -11, 38, 7, 90, 20, -9 },
+ { -6, 6, 83, 7, 55, -10, 0 },
+ { 0, -10, 55, 7, 83, 6, -6 },
+ { -9, 20, 90, 7, 38, -11, 0 },
+ { -3, -5, 70, 7, 73, -4, -3 },
+ { 0, -11, 36, 7, 90, 22, -9 },
+ { -6, 4, 83, 7, 57, -9, -1 },
+ { 0, -10, 52, 7, 85, 8, -7 },
+ { -9, 18, 89, 7, 41, -11, 0 },
+ { -2, -6, 68, 7, 74, -2, -4 },
+ { 0, -11, 33, 7, 91, 25, -10 },
+ { -5, 3, 79, 7, 60, -8, -1 },
+ { 0, -10, 49, 7, 85, 11, -7 },
+ { -8, 15, 88, 7, 44, -11, 0 },
+ { -2, -7, 65, 7, 77, -1, -4 },
+ { 0, -10, 30, 7, 90, 28, -10 },
+ { -5, 1, 78, 7, 63, -8, -1 },
+ { 0, -11, 46, 7, 88, 13, -8 } } },
+ .ptrn_arr = { { 0xf3e7cf9f, 0x9f3e7cf9, 0xf3e7cf } },
+ .sample_patrn_length = 90,
+ .hor_ds_en = 1,
+ .ver_ds_en = 1
+}, {
+ /* Scale factor 32 / (32 + 14) = 0.695652 */
+ .hor_phase_arr = {
+ .even = { { -10, 30, 88, 7, 30, -10, 0 },
+ { -5, 0, 75, 7, 66, -5, -3 },
+ { 0, -10, 40, 7, 87, 20, -9 },
+ { -7, 7, 81, 7, 56, -8, -1 },
+ { 0, -9, 51, 7, 83, 11, -8 },
+ { -8, 16, 84, 7, 46, -10, 0 },
+ { -2, -7, 61, 7, 79, 3, -6 },
+ { -10, 25, 88, 7, 35, -10, 0 },
+ { -4, -3, 72, 7, 70, -3, -4 },
+ { 0, -10, 35, 7, 88, 25, -10 },
+ { -6, 3, 79, 7, 61, -7, -2 },
+ { 0, -10, 46, 7, 84, 16, -8 },
+ { -8, 11, 83, 7, 51, -9, 0 },
+ { -1, -8, 56, 7, 81, 7, -7 },
+ { -9, 20, 87, 7, 40, -10, 0 },
+ { -3, -5, 66, 7, 75, 0, -5 } },
+ .odd = { { -8, 13, 85, 7, 48, -10, 0 },
+ { -1, -8, 59, 7, 79, 5, -6 },
+ { -10, 23, 87, 7, 38, -10, 0 },
+ { -3, -4, 68, 7, 72, -1, -4 },
+ { 0, -10, 33, 7, 87, 28, -10 },
+ { -5, 2, 75, 7, 64, -6, -2 },
+ { 0, -10, 43, 7, 86, 18, -9 },
+ { -7, 9, 83, 7, 53, -9, -1 },
+ { -1, -9, 53, 7, 83, 9, -7 },
+ { -9, 18, 86, 7, 43, -10, 0 },
+ { -2, -6, 64, 7, 75, 2, -5 },
+ { -10, 28, 87, 7, 33, -10, 0 },
+ { -4, -1, 72, 7, 68, -4, -3 },
+ { 0, -10, 38, 7, 87, 23, -10 },
+ { -6, 5, 79, 7, 59, -8, -1 },
+ { 0, -10, 48, 7, 85, 13, -8 } } },
+ .ver_phase_arr = {
+ .even = { { -10, 30, 88, 7, 30, -10, 0 },
+ { -5, 0, 75, 7, 66, -5, -3 },
+ { 0, -10, 40, 7, 87, 20, -9 },
+ { -7, 7, 81, 7, 56, -8, -1 },
+ { 0, -9, 51, 7, 83, 11, -8 },
+ { -8, 16, 84, 7, 46, -10, 0 },
+ { -2, -7, 61, 7, 79, 3, -6 },
+ { -10, 25, 88, 7, 35, -10, 0 },
+ { -4, -3, 72, 7, 70, -3, -4 },
+ { 0, -10, 35, 7, 88, 25, -10 },
+ { -6, 3, 79, 7, 61, -7, -2 },
+ { 0, -10, 46, 7, 84, 16, -8 },
+ { -8, 11, 83, 7, 51, -9, 0 },
+ { -1, -8, 56, 7, 81, 7, -7 },
+ { -9, 20, 87, 7, 40, -10, 0 },
+ { -3, -5, 66, 7, 75, 0, -5 } },
+ .odd = { { -8, 13, 85, 7, 48, -10, 0 },
+ { -1, -8, 59, 7, 79, 5, -6 },
+ { -10, 23, 87, 7, 38, -10, 0 },
+ { -3, -4, 68, 7, 72, -1, -4 },
+ { 0, -10, 33, 7, 87, 28, -10 },
+ { -5, 2, 75, 7, 64, -6, -2 },
+ { 0, -10, 43, 7, 86, 18, -9 },
+ { -7, 9, 83, 7, 53, -9, -1 },
+ { -1, -9, 53, 7, 83, 9, -7 },
+ { -9, 18, 86, 7, 43, -10, 0 },
+ { -2, -6, 64, 7, 75, 2, -5 },
+ { -10, 28, 87, 7, 33, -10, 0 },
+ { -4, -1, 72, 7, 68, -4, -3 },
+ { 0, -10, 38, 7, 87, 23, -10 },
+ { -6, 5, 79, 7, 59, -8, -1 },
+ { 0, -10, 48, 7, 85, 13, -8 } } },
+ .ptrn_arr = { { 0x79f3cf9f, 0xf3e } },
+ .sample_patrn_length = 46,
+ .hor_ds_en = 1,
+ .ver_ds_en = 1
+}, {
+ /* Scale factor 32 / (32 + 15) = 0.680851 */
+ .hor_phase_arr = {
+ .even = { { -10, 31, 86, 7, 31, -10, 0 },
+ { -5, 0, 72, 7, 68, -3, -4 },
+ { 0, -10, 36, 7, 86, 26, -10 },
+ { -6, 3, 76, 7, 63, -5, -3 },
+ { 0, -10, 41, 7, 85, 21, -9 },
+ { -7, 7, 78, 7, 59, -7, -2 },
+ { 0, -10, 46, 7, 84, 17, -9 },
+ { -8, 11, 80, 7, 54, -8, -1 },
+ { -1, -9, 51, 7, 82, 13, -8 },
+ { -9, 15, 83, 7, 49, -9, -1 },
+ { -2, -8, 56, 7, 80, 9, -7 },
+ { -9, 19, 85, 7, 43, -10, 0 },
+ { -3, -6, 61, 7, 77, 5, -6 },
+ { -10, 24, 86, 7, 38, -10, 0 },
+ { -3, -4, 66, 7, 72, 2, -5 },
+ { -10, 29, 86, 7, 33, -10, 0 },
+ { -4, -1, 68, 7, 70, -1, -4 },
+ { 0, -10, 33, 7, 86, 29, -10 },
+ { -5, 2, 72, 7, 66, -4, -3 },
+ { 0, -10, 38, 7, 86, 24, -10 },
+ { -6, 5, 77, 7, 61, -6, -3 },
+ { 0, -10, 43, 7, 85, 19, -9 },
+ { -7, 9, 80, 7, 56, -8, -2 },
+ { -1, -9, 49, 7, 83, 15, -9 },
+ { -8, 13, 82, 7, 51, -9, -1 },
+ { -1, -8, 54, 7, 80, 11, -8 },
+ { -9, 17, 84, 7, 46, -10, 0 },
+ { -2, -7, 59, 7, 78, 7, -7 },
+ { -9, 21, 85, 7, 41, -10, 0 },
+ { -3, -5, 63, 7, 76, 3, -6 },
+ { -10, 26, 86, 7, 36, -10, 0 },
+ { -4, -3, 68, 7, 72, 0, -5 } },
+ .odd = { { -8, 14, 82, 7, 50, -9, -1 },
+ { -1, -8, 55, 7, 79, 10, -7 },
+ { -9, 18, 84, 7, 45, -10, 0 },
+ { -2, -6, 60, 7, 77, 6, -7 },
+ { -10, 23, 85, 7, 40, -10, 0 },
+ { -3, -4, 64, 7, 75, 2, -6 },
+ { -10, 27, 86, 7, 35, -10, 0 },
+ { -4, -2, 69, 7, 71, -1, -5 },
+ { 0, -10, 32, 7, 86, 30, -10 },
+ { -5, 1, 72, 7, 67, -3, -4 },
+ { 0, -10, 37, 7, 86, 25, -10 },
+ { -6, 4, 77, 7, 62, -6, -3 },
+ { 0, -10, 42, 7, 85, 20, -9 },
+ { -7, 8, 79, 7, 57, -7, -2 },
+ { -1, -9, 47, 7, 84, 16, -9 },
+ { -8, 12, 81, 7, 52, -8, -1 },
+ { -1, -8, 52, 7, 81, 12, -8 },
+ { -9, 16, 84, 7, 47, -9, -1 },
+ { -2, -7, 57, 7, 79, 8, -7 },
+ { -9, 20, 85, 7, 42, -10, 0 },
+ { -3, -6, 62, 7, 77, 4, -6 },
+ { -10, 25, 86, 7, 37, -10, 0 },
+ { -4, -3, 67, 7, 72, 1, -5 },
+ { -10, 30, 86, 7, 32, -10, 0 },
+ { -5, -1, 71, 7, 69, -2, -4 },
+ { 0, -10, 35, 7, 86, 27, -10 },
+ { -6, 2, 75, 7, 64, -4, -3 },
+ { 0, -10, 40, 7, 85, 23, -10 },
+ { -7, 6, 77, 7, 60, -6, -2 },
+ { 0, -10, 45, 7, 84, 18, -9 },
+ { -7, 10, 79, 7, 55, -8, -1 },
+ { -1, -9, 50, 7, 82, 14, -8 } } },
+ .ver_phase_arr = {
+ .even = { { -10, 31, 86, 7, 31, -10, 0 },
+ { -5, 0, 72, 7, 68, -3, -4 },
+ { 0, -10, 36, 7, 86, 26, -10 },
+ { -6, 3, 76, 7, 63, -5, -3 },
+ { 0, -10, 41, 7, 85, 21, -9 },
+ { -7, 7, 78, 7, 59, -7, -2 },
+ { 0, -10, 46, 7, 84, 17, -9 },
+ { -8, 11, 80, 7, 54, -8, -1 },
+ { -1, -9, 51, 7, 82, 13, -8 },
+ { -9, 15, 83, 7, 49, -9, -1 },
+ { -2, -8, 56, 7, 80, 9, -7 },
+ { -9, 19, 85, 7, 43, -10, 0 },
+ { -3, -6, 61, 7, 77, 5, -6 },
+ { -10, 24, 86, 7, 38, -10, 0 },
+ { -3, -4, 66, 7, 72, 2, -5 },
+ { -10, 29, 86, 7, 33, -10, 0 },
+ { -4, -1, 68, 7, 70, -1, -4 },
+ { 0, -10, 33, 7, 86, 29, -10 },
+ { -5, 2, 72, 7, 66, -4, -3 },
+ { 0, -10, 38, 7, 86, 24, -10 },
+ { -6, 5, 77, 7, 61, -6, -3 },
+ { 0, -10, 43, 7, 85, 19, -9 },
+ { -7, 9, 80, 7, 56, -8, -2 },
+ { -1, -9, 49, 7, 83, 15, -9 },
+ { -8, 13, 82, 7, 51, -9, -1 },
+ { -1, -8, 54, 7, 80, 11, -8 },
+ { -9, 17, 84, 7, 46, -10, 0 },
+ { -2, -7, 59, 7, 78, 7, -7 },
+ { -9, 21, 85, 7, 41, -10, 0 },
+ { -3, -5, 63, 7, 76, 3, -6 },
+ { -10, 26, 86, 7, 36, -10, 0 },
+ { -4, -3, 68, 7, 72, 0, -5 } },
+ .odd = { { -8, 14, 82, 7, 50, -9, -1 },
+ { -1, -8, 55, 7, 79, 10, -7 },
+ { -9, 18, 84, 7, 45, -10, 0 },
+ { -2, -6, 60, 7, 77, 6, -7 },
+ { -10, 23, 85, 7, 40, -10, 0 },
+ { -3, -4, 64, 7, 75, 2, -6 },
+ { -10, 27, 86, 7, 35, -10, 0 },
+ { -4, -2, 69, 7, 71, -1, -5 },
+ { 0, -10, 32, 7, 86, 30, -10 },
+ { -5, 1, 72, 7, 67, -3, -4 },
+ { 0, -10, 37, 7, 86, 25, -10 },
+ { -6, 4, 77, 7, 62, -6, -3 },
+ { 0, -10, 42, 7, 85, 20, -9 },
+ { -7, 8, 79, 7, 57, -7, -2 },
+ { -1, -9, 47, 7, 84, 16, -9 },
+ { -8, 12, 81, 7, 52, -8, -1 },
+ { -1, -8, 52, 7, 81, 12, -8 },
+ { -9, 16, 84, 7, 47, -9, -1 },
+ { -2, -7, 57, 7, 79, 8, -7 },
+ { -9, 20, 85, 7, 42, -10, 0 },
+ { -3, -6, 62, 7, 77, 4, -6 },
+ { -10, 25, 86, 7, 37, -10, 0 },
+ { -4, -3, 67, 7, 72, 1, -5 },
+ { -10, 30, 86, 7, 32, -10, 0 },
+ { -5, -1, 71, 7, 69, -2, -4 },
+ { 0, -10, 35, 7, 86, 27, -10 },
+ { -6, 2, 75, 7, 64, -4, -3 },
+ { 0, -10, 40, 7, 85, 23, -10 },
+ { -7, 6, 77, 7, 60, -6, -2 },
+ { 0, -10, 45, 7, 84, 18, -9 },
+ { -7, 10, 79, 7, 55, -8, -1 },
+ { -1, -9, 50, 7, 82, 14, -8 } } },
+ .ptrn_arr = { { 0x3cf9e79f, 0x9e79f3cf, 0xf3cf3e7 } },
+ .sample_patrn_length = 94,
+ .hor_ds_en = 1,
+ .ver_ds_en = 1
+}, {
+ /* Scale factor 32 / (32 + 16) = 0.666667 */
+ .hor_phase_arr = {
+ .even = { { -10, 32, 84, 7, 32, -10, 0 },
+ { -5, 0, 69, 7, 69, 0, -5 } },
+ .odd = { { -9, 14, 82, 7, 51, -8, -2 },
+ { -2, -8, 51, 7, 82, 14, -9 } } },
+ .ver_phase_arr = {
+ .even = { { -10, 32, 84, 7, 32, -10, 0 },
+ { -5, 0, 69, 7, 69, 0, -5 } },
+ .odd = { { -9, 14, 82, 7, 51, -8, -2 },
+ { -2, -8, 51, 7, 82, 14, -9 } } },
+ .ptrn_arr = { { 0xf } },
+ .sample_patrn_length = 6,
+ .hor_ds_en = 1,
+ .ver_ds_en = 1
+}, {
+ /* Scale factor 32 / (32 + 17) = 0.653061 */
+ .hor_phase_arr = {
+ .even = { { -10, 33, 82, 7, 33, -10, 0 },
+ { -5, 0, 66, 7, 70, 3, -6 },
+ { -10, 28, 82, 7, 37, -9, 0 },
+ { -4, -3, 62, 7, 74, 6, -7 },
+ { -10, 24, 82, 7, 42, -9, -1 },
+ { -3, -5, 58, 7, 76, 10, -8 },
+ { -9, 20, 79, 7, 47, -8, -1 },
+ { -3, -6, 54, 7, 78, 14, -9 },
+ { -9, 16, 79, 7, 51, -7, -2 },
+ { -2, -8, 49, 7, 80, 18, -9 },
+ { -8, 12, 77, 7, 56, -6, -3 },
+ { -1, -9, 44, 7, 81, 22, -9 },
+ { -7, 8, 75, 7, 60, -4, -4 },
+ { -1, -9, 40, 7, 82, 26, -10 },
+ { -7, 5, 71, 7, 65, -1, -5 },
+ { 0, -10, 35, 7, 83, 30, -10 },
+ { -6, 1, 70, 7, 68, 1, -6 },
+ { -10, 30, 83, 7, 35, -10, 0 },
+ { -5, -1, 65, 7, 71, 5, -7 },
+ { -10, 26, 82, 7, 40, -9, -1 },
+ { -4, -4, 60, 7, 75, 8, -7 },
+ { -9, 22, 81, 7, 44, -9, -1 },
+ { -3, -6, 56, 7, 77, 12, -8 },
+ { -9, 18, 80, 7, 49, -8, -2 },
+ { -2, -7, 51, 7, 79, 16, -9 },
+ { -9, 14, 78, 7, 54, -6, -3 },
+ { -1, -8, 47, 7, 79, 20, -9 },
+ { -8, 10, 76, 7, 58, -5, -3 },
+ { -1, -9, 42, 7, 82, 24, -10 },
+ { -7, 6, 74, 7, 62, -3, -4 },
+ { 0, -9, 37, 7, 82, 28, -10 },
+ { -6, 3, 70, 7, 66, 0, -5 } },
+ .odd = { { -9, 15, 79, 7, 52, -7, -2 },
+ { -2, -8, 48, 7, 80, 19, -9 },
+ { -8, 11, 76, 7, 57, -5, -3 },
+ { -1, -9, 43, 7, 82, 23, -10 },
+ { -7, 7, 74, 7, 61, -3, -4 },
+ { -1, -9, 38, 7, 83, 27, -10 },
+ { -6, 4, 70, 7, 66, -1, -5 },
+ { 0, -10, 34, 7, 83, 31, -10 },
+ { -6, 1, 67, 7, 70, 2, -6 },
+ { -10, 29, 83, 7, 36, -10, 0 },
+ { -5, -2, 64, 7, 73, 5, -7 },
+ { -10, 25, 82, 7, 41, -9, -1 },
+ { -4, -4, 59, 7, 76, 9, -8 },
+ { -9, 21, 80, 7, 45, -8, -1 },
+ { -3, -6, 55, 7, 77, 13, -8 },
+ { -9, 17, 79, 7, 50, -7, -2 },
+ { -2, -7, 50, 7, 79, 17, -9 },
+ { -8, 13, 77, 7, 55, -6, -3 },
+ { -1, -8, 45, 7, 80, 21, -9 },
+ { -8, 9, 76, 7, 59, -4, -4 },
+ { -1, -9, 41, 7, 82, 25, -10 },
+ { -7, 5, 73, 7, 64, -2, -5 },
+ { 0, -10, 36, 7, 83, 29, -10 },
+ { -6, 2, 70, 7, 67, 1, -6 },
+ { -10, 31, 83, 7, 34, -10, 0 },
+ { -5, -1, 66, 7, 70, 4, -6 },
+ { -10, 27, 83, 7, 38, -9, -1 },
+ { -4, -3, 61, 7, 74, 7, -7 },
+ { -10, 23, 82, 7, 43, -9, -1 },
+ { -3, -5, 57, 7, 76, 11, -8 },
+ { -9, 19, 80, 7, 48, -8, -2 },
+ { -2, -7, 52, 7, 79, 15, -9 } } },
+ .ver_phase_arr = {
+ .even = { { -10, 33, 82, 7, 33, -10, 0 },
+ { -5, 0, 66, 7, 70, 3, -6 },
+ { -10, 28, 82, 7, 37, -9, 0 },
+ { -4, -3, 62, 7, 74, 6, -7 },
+ { -10, 24, 82, 7, 42, -9, -1 },
+ { -3, -5, 58, 7, 76, 10, -8 },
+ { -9, 20, 79, 7, 47, -8, -1 },
+ { -3, -6, 54, 7, 78, 14, -9 },
+ { -9, 16, 79, 7, 51, -7, -2 },
+ { -2, -8, 49, 7, 80, 18, -9 },
+ { -8, 12, 77, 7, 56, -6, -3 },
+ { -1, -9, 44, 7, 81, 22, -9 },
+ { -7, 8, 75, 7, 60, -4, -4 },
+ { -1, -9, 40, 7, 82, 26, -10 },
+ { -7, 5, 71, 7, 65, -1, -5 },
+ { 0, -10, 35, 7, 83, 30, -10 },
+ { -6, 1, 70, 7, 68, 1, -6 },
+ { -10, 30, 83, 7, 35, -10, 0 },
+ { -5, -1, 65, 7, 71, 5, -7 },
+ { -10, 26, 82, 7, 40, -9, -1 },
+ { -4, -4, 60, 7, 75, 8, -7 },
+ { -9, 22, 81, 7, 44, -9, -1 },
+ { -3, -6, 56, 7, 77, 12, -8 },
+ { -9, 18, 80, 7, 49, -8, -2 },
+ { -2, -7, 51, 7, 79, 16, -9 },
+ { -9, 14, 78, 7, 54, -6, -3 },
+ { -1, -8, 47, 7, 79, 20, -9 },
+ { -8, 10, 76, 7, 58, -5, -3 },
+ { -1, -9, 42, 7, 82, 24, -10 },
+ { -7, 6, 74, 7, 62, -3, -4 },
+ { 0, -9, 37, 7, 82, 28, -10 },
+ { -6, 3, 70, 7, 66, 0, -5 } },
+ .odd = { { -9, 15, 79, 7, 52, -7, -2 },
+ { -2, -8, 48, 7, 80, 19, -9 },
+ { -8, 11, 76, 7, 57, -5, -3 },
+ { -1, -9, 43, 7, 82, 23, -10 },
+ { -7, 7, 74, 7, 61, -3, -4 },
+ { -1, -9, 38, 7, 83, 27, -10 },
+ { -6, 4, 70, 7, 66, -1, -5 },
+ { 0, -10, 34, 7, 83, 31, -10 },
+ { -6, 1, 67, 7, 70, 2, -6 },
+ { -10, 29, 83, 7, 36, -10, 0 },
+ { -5, -2, 64, 7, 73, 5, -7 },
+ { -10, 25, 82, 7, 41, -9, -1 },
+ { -4, -4, 59, 7, 76, 9, -8 },
+ { -9, 21, 80, 7, 45, -8, -1 },
+ { -3, -6, 55, 7, 77, 13, -8 },
+ { -9, 17, 79, 7, 50, -7, -2 },
+ { -2, -7, 50, 7, 79, 17, -9 },
+ { -8, 13, 77, 7, 55, -6, -3 },
+ { -1, -8, 45, 7, 80, 21, -9 },
+ { -8, 9, 76, 7, 59, -4, -4 },
+ { -1, -9, 41, 7, 82, 25, -10 },
+ { -7, 5, 73, 7, 64, -2, -5 },
+ { 0, -10, 36, 7, 83, 29, -10 },
+ { -6, 2, 70, 7, 67, 1, -6 },
+ { -10, 31, 83, 7, 34, -10, 0 },
+ { -5, -1, 66, 7, 70, 4, -6 },
+ { -10, 27, 83, 7, 38, -9, -1 },
+ { -4, -3, 61, 7, 74, 7, -7 },
+ { -10, 23, 82, 7, 43, -9, -1 },
+ { -3, -5, 57, 7, 76, 11, -8 },
+ { -9, 19, 80, 7, 48, -8, -2 },
+ { -2, -7, 52, 7, 79, 15, -9 } } },
+ .ptrn_arr = { { 0xe73cf3cf, 0x3cf39e79, 0xe79e79cf } },
+ .sample_patrn_length = 98,
+ .hor_ds_en = 1,
+ .ver_ds_en = 1
+}, {
+ /* Scale factor 32 / (32 + 18) = 0.64 */
+ .hor_phase_arr = {
+ .even = { { -9, 33, 80, 7, 33, -9, 0 },
+ { -6, 0, 64, 7, 71, 6, -7 },
+ { -10, 25, 80, 7, 42, -8, -1 },
+ { -4, -4, 56, 7, 76, 13, -9 },
+ { -9, 17, 78, 7, 51, -6, -3 },
+ { -2, -7, 47, 7, 78, 21, -9 },
+ { -8, 9, 74, 7, 60, -2, -5 },
+ { -1, -9, 38, 7, 81, 29, -10 },
+ { -6, 3, 66, 7, 68, 3, -6 },
+ { -10, 29, 81, 7, 38, -9, -1 },
+ { -5, -2, 60, 7, 74, 9, -8 },
+ { -9, 21, 78, 7, 47, -7, -2 },
+ { -3, -6, 51, 7, 78, 17, -9 },
+ { -9, 13, 76, 7, 56, -4, -4 },
+ { -1, -8, 42, 7, 80, 25, -10 },
+ { -7, 6, 71, 7, 64, 0, -6 } },
+ .odd = { { -9, 15, 76, 7, 54, -5, -3 },
+ { -2, -8, 45, 7, 80, 23, -10 },
+ { -8, 8, 72, 7, 62, -1, -5 },
+ { -1, -9, 36, 7, 80, 31, -9 },
+ { -6, 1, 66, 7, 70, 4, -7 },
+ { -10, 27, 81, 7, 40, -9, -1 },
+ { -4, -4, 58, 7, 75, 11, -8 },
+ { -9, 19, 78, 7, 49, -7, -2 },
+ { -2, -7, 49, 7, 78, 19, -9 },
+ { -8, 11, 75, 7, 58, -4, -4 },
+ { -1, -9, 40, 7, 81, 27, -10 },
+ { -7, 4, 70, 7, 66, 1, -6 },
+ { -9, 31, 80, 7, 36, -9, -1 },
+ { -5, -1, 62, 7, 72, 8, -8 },
+ { -10, 23, 80, 7, 45, -8, -2 },
+ { -3, -5, 54, 7, 76, 15, -9 } } },
+ .ver_phase_arr = {
+ .even = { { -9, 33, 80, 7, 33, -9, 0 },
+ { -6, 0, 64, 7, 71, 6, -7 },
+ { -10, 25, 80, 7, 42, -8, -1 },
+ { -4, -4, 56, 7, 76, 13, -9 },
+ { -9, 17, 78, 7, 51, -6, -3 },
+ { -2, -7, 47, 7, 78, 21, -9 },
+ { -8, 9, 74, 7, 60, -2, -5 },
+ { -1, -9, 38, 7, 81, 29, -10 },
+ { -6, 3, 66, 7, 68, 3, -6 },
+ { -10, 29, 81, 7, 38, -9, -1 },
+ { -5, -2, 60, 7, 74, 9, -8 },
+ { -9, 21, 78, 7, 47, -7, -2 },
+ { -3, -6, 51, 7, 78, 17, -9 },
+ { -9, 13, 76, 7, 56, -4, -4 },
+ { -1, -8, 42, 7, 80, 25, -10 },
+ { -7, 6, 71, 7, 64, 0, -6 } },
+ .odd = { { -9, 15, 76, 7, 54, -5, -3 },
+ { -2, -8, 45, 7, 80, 23, -10 },
+ { -8, 8, 72, 7, 62, -1, -5 },
+ { -1, -9, 36, 7, 80, 31, -9 },
+ { -6, 1, 66, 7, 70, 4, -7 },
+ { -10, 27, 81, 7, 40, -9, -1 },
+ { -4, -4, 58, 7, 75, 11, -8 },
+ { -9, 19, 78, 7, 49, -7, -2 },
+ { -2, -7, 49, 7, 78, 19, -9 },
+ { -8, 11, 75, 7, 58, -4, -4 },
+ { -1, -9, 40, 7, 81, 27, -10 },
+ { -7, 4, 70, 7, 66, 1, -6 },
+ { -9, 31, 80, 7, 36, -9, -1 },
+ { -5, -1, 62, 7, 72, 8, -8 },
+ { -10, 23, 80, 7, 45, -8, -2 },
+ { -3, -5, 54, 7, 76, 15, -9 } } },
+ .ptrn_arr = { { 0xf39e73cf, 0xe79c } },
+ .sample_patrn_length = 50,
+ .hor_ds_en = 1,
+ .ver_ds_en = 1
+}, {
+ /* Scale factor 32 / (32 + 19) = 0.627451 */
+ .hor_phase_arr = {
+ .even = { { -9, 34, 79, 7, 34, -9, -1 },
+ { -6, 0, 61, 7, 72, 9, -8 },
+ { -9, 22, 78, 7, 47, -7, -3 },
+ { -3, -6, 49, 7, 77, 20, -9 },
+ { -8, 11, 72, 7, 59, -1, -5 },
+ { -1, -9, 36, 7, 79, 32, -9 },
+ { -6, 1, 63, 7, 71, 7, -8 },
+ { -9, 24, 77, 7, 45, -7, -2 },
+ { -4, -5, 51, 7, 77, 18, -9 },
+ { -9, 13, 73, 7, 58, -2, -5 },
+ { -1, -8, 38, 7, 78, 30, -9 },
+ { -6, 3, 65, 7, 67, 6, -7 },
+ { -9, 26, 78, 7, 43, -8, -2 },
+ { -4, -4, 53, 7, 76, 16, -9 },
+ { -9, 14, 75, 7, 55, -3, -4 },
+ { -2, -8, 40, 7, 79, 28, -9 },
+ { -7, 4, 67, 7, 67, 4, -7 },
+ { -9, 28, 79, 7, 40, -8, -2 },
+ { -4, -3, 55, 7, 75, 14, -9 },
+ { -9, 16, 76, 7, 53, -4, -4 },
+ { -2, -8, 43, 7, 78, 26, -9 },
+ { -7, 6, 67, 7, 65, 3, -6 },
+ { -9, 30, 78, 7, 38, -8, -1 },
+ { -5, -2, 58, 7, 73, 13, -9 },
+ { -9, 18, 77, 7, 51, -5, -4 },
+ { -2, -7, 45, 7, 77, 24, -9 },
+ { -8, 7, 71, 7, 63, 1, -6 },
+ { -9, 32, 79, 7, 36, -9, -1 },
+ { -5, -1, 59, 7, 72, 11, -8 },
+ { -9, 20, 77, 7, 49, -6, -3 },
+ { -3, -7, 47, 7, 78, 22, -9 },
+ { -8, 9, 72, 7, 61, 0, -6 } },
+ .odd = { { -9, 15, 76, 7, 54, -4, -4 },
+ { -2, -8, 41, 7, 79, 27, -9 },
+ { -7, 5, 68, 7, 66, 3, -7 },
+ { -9, 29, 78, 7, 39, -8, -1 },
+ { -5, -3, 56, 7, 76, 13, -9 },
+ { -9, 17, 77, 7, 52, -5, -4 },
+ { -2, -7, 44, 7, 77, 25, -9 },
+ { -7, 7, 68, 7, 64, 2, -6 },
+ { -9, 31, 79, 7, 37, -9, -1 },
+ { -5, -2, 59, 7, 72, 12, -8 },
+ { -9, 19, 77, 7, 50, -6, -3 },
+ { -3, -7, 46, 7, 78, 23, -9 },
+ { -8, 8, 71, 7, 62, 1, -6 },
+ { -9, 33, 79, 7, 35, -9, -1 },
+ { -5, -1, 60, 7, 72, 10, -8 },
+ { -9, 21, 77, 7, 48, -6, -3 },
+ { -3, -6, 48, 7, 77, 21, -9 },
+ { -8, 10, 72, 7, 60, -1, -5 },
+ { -1, -9, 35, 7, 79, 33, -9 },
+ { -6, 1, 62, 7, 71, 8, -8 },
+ { -9, 23, 78, 7, 46, -7, -3 },
+ { -3, -6, 50, 7, 77, 19, -9 },
+ { -8, 12, 72, 7, 59, -2, -5 },
+ { -1, -9, 37, 7, 79, 31, -9 },
+ { -6, 2, 64, 7, 68, 7, -7 },
+ { -9, 25, 77, 7, 44, -7, -2 },
+ { -4, -5, 52, 7, 77, 17, -9 },
+ { -9, 13, 76, 7, 56, -3, -5 },
+ { -1, -8, 39, 7, 78, 29, -9 },
+ { -7, 3, 66, 7, 68, 5, -7 },
+ { -9, 27, 79, 7, 41, -8, -2 },
+ { -4, -4, 54, 7, 76, 15, -9 } } },
+ .ver_phase_arr = {
+ .even = { { -9, 34, 79, 7, 34, -9, -1 },
+ { -6, 0, 61, 7, 72, 9, -8 },
+ { -9, 22, 78, 7, 47, -7, -3 },
+ { -3, -6, 49, 7, 77, 20, -9 },
+ { -8, 11, 72, 7, 59, -1, -5 },
+ { -1, -9, 36, 7, 79, 32, -9 },
+ { -6, 1, 63, 7, 71, 7, -8 },
+ { -9, 24, 77, 7, 45, -7, -2 },
+ { -4, -5, 51, 7, 77, 18, -9 },
+ { -9, 13, 73, 7, 58, -2, -5 },
+ { -1, -8, 38, 7, 78, 30, -9 },
+ { -6, 3, 65, 7, 67, 6, -7 },
+ { -9, 26, 78, 7, 43, -8, -2 },
+ { -4, -4, 53, 7, 76, 16, -9 },
+ { -9, 14, 75, 7, 55, -3, -4 },
+ { -2, -8, 40, 7, 79, 28, -9 },
+ { -7, 4, 67, 7, 67, 4, -7 },
+ { -9, 28, 79, 7, 40, -8, -2 },
+ { -4, -3, 55, 7, 75, 14, -9 },
+ { -9, 16, 76, 7, 53, -4, -4 },
+ { -2, -8, 43, 7, 78, 26, -9 },
+ { -7, 6, 67, 7, 65, 3, -6 },
+ { -9, 30, 78, 7, 38, -8, -1 },
+ { -5, -2, 58, 7, 73, 13, -9 },
+ { -9, 18, 77, 7, 51, -5, -4 },
+ { -2, -7, 45, 7, 77, 24, -9 },
+ { -8, 7, 71, 7, 63, 1, -6 },
+ { -9, 32, 79, 7, 36, -9, -1 },
+ { -5, -1, 59, 7, 72, 11, -8 },
+ { -9, 20, 77, 7, 49, -6, -3 },
+ { -3, -7, 47, 7, 78, 22, -9 },
+ { -8, 9, 72, 7, 61, 0, -6 } },
+ .odd = { { -9, 15, 76, 7, 54, -4, -4 },
+ { -2, -8, 41, 7, 79, 27, -9 },
+ { -7, 5, 68, 7, 66, 3, -7 },
+ { -9, 29, 78, 7, 39, -8, -1 },
+ { -5, -3, 56, 7, 76, 13, -9 },
+ { -9, 17, 77, 7, 52, -5, -4 },
+ { -2, -7, 44, 7, 77, 25, -9 },
+ { -7, 7, 68, 7, 64, 2, -6 },
+ { -9, 31, 79, 7, 37, -9, -1 },
+ { -5, -2, 59, 7, 72, 12, -8 },
+ { -9, 19, 77, 7, 50, -6, -3 },
+ { -3, -7, 46, 7, 78, 23, -9 },
+ { -8, 8, 71, 7, 62, 1, -6 },
+ { -9, 33, 79, 7, 35, -9, -1 },
+ { -5, -1, 60, 7, 72, 10, -8 },
+ { -9, 21, 77, 7, 48, -6, -3 },
+ { -3, -6, 48, 7, 77, 21, -9 },
+ { -8, 10, 72, 7, 60, -1, -5 },
+ { -1, -9, 35, 7, 79, 33, -9 },
+ { -6, 1, 62, 7, 71, 8, -8 },
+ { -9, 23, 78, 7, 46, -7, -3 },
+ { -3, -6, 50, 7, 77, 19, -9 },
+ { -8, 12, 72, 7, 59, -2, -5 },
+ { -1, -9, 37, 7, 79, 31, -9 },
+ { -6, 2, 64, 7, 68, 7, -7 },
+ { -9, 25, 77, 7, 44, -7, -2 },
+ { -4, -5, 52, 7, 77, 17, -9 },
+ { -9, 13, 76, 7, 56, -3, -5 },
+ { -1, -8, 39, 7, 78, 29, -9 },
+ { -7, 3, 66, 7, 68, 5, -7 },
+ { -9, 27, 79, 7, 41, -8, -2 },
+ { -4, -4, 54, 7, 76, 15, -9 } } },
+ .ptrn_arr = { { 0x79ce79cf, 0x73ce79ce, 0x73ce73ce, 0xe } },
+ .sample_patrn_length = 102,
+ .hor_ds_en = 1,
+ .ver_ds_en = 1
+}, {
+ /* Scale factor 32 / (32 + 20) = 0.615385 */
+ .hor_phase_arr = {
+ .even = { { -8, 34, 77, 7, 34, -8, -1 },
+ { -6, 0, 59, 7, 71, 12, -8 },
+ { -9, 19, 75, 7, 51, -4, -4 },
+ { -3, -7, 43, 7, 77, 27, -9 },
+ { -7, 6, 64, 7, 66, 6, -7 },
+ { -9, 27, 77, 7, 43, -7, -3 },
+ { -4, -4, 51, 7, 75, 19, -9 },
+ { -8, 12, 71, 7, 59, 0, -6 } },
+ .odd = { { -9, 16, 73, 7, 55, -2, -5 },
+ { -2, -8, 39, 7, 77, 31, -9 },
+ { -7, 3, 63, 7, 68, 9, -8 },
+ { -9, 23, 76, 7, 47, -6, -3 },
+ { -3, -6, 47, 7, 76, 23, -9 },
+ { -8, 9, 68, 7, 63, 3, -7 },
+ { -9, 31, 77, 7, 39, -8, -2 },
+ { -5, -2, 55, 7, 73, 16, -9 } } },
+ .ver_phase_arr = {
+ .even = { { -8, 34, 77, 7, 34, -8, -1 },
+ { -6, 0, 59, 7, 71, 12, -8 },
+ { -9, 19, 75, 7, 51, -4, -4 },
+ { -3, -7, 43, 7, 77, 27, -9 },
+ { -7, 6, 64, 7, 66, 6, -7 },
+ { -9, 27, 77, 7, 43, -7, -3 },
+ { -4, -4, 51, 7, 75, 19, -9 },
+ { -8, 12, 71, 7, 59, 0, -6 } },
+ .odd = { { -9, 16, 73, 7, 55, -2, -5 },
+ { -2, -8, 39, 7, 77, 31, -9 },
+ { -7, 3, 63, 7, 68, 9, -8 },
+ { -9, 23, 76, 7, 47, -6, -3 },
+ { -3, -6, 47, 7, 76, 23, -9 },
+ { -8, 9, 68, 7, 63, 3, -7 },
+ { -9, 31, 77, 7, 39, -8, -2 },
+ { -5, -2, 55, 7, 73, 16, -9 } } },
+ .ptrn_arr = { { 0xe739cf } },
+ .sample_patrn_length = 26,
+ .hor_ds_en = 1,
+ .ver_ds_en = 1
+}, {
+ /* Scale factor 32 / (32 + 21) = 0.603774 */
+ .hor_phase_arr = {
+ .even = { { -8, 35, 76, 7, 35, -8, -2 },
+ { -6, 0, 57, 7, 71, 15, -9 },
+ { -9, 17, 71, 7, 55, -1, -5 },
+ { -2, -8, 37, 7, 76, 33, -8 },
+ { -6, 1, 58, 7, 71, 13, -9 },
+ { -9, 18, 73, 7, 53, -2, -5 },
+ { -2, -7, 39, 7, 75, 31, -8 },
+ { -7, 2, 60, 7, 69, 12, -8 },
+ { -9, 20, 74, 7, 51, -3, -5 },
+ { -3, -7, 41, 7, 77, 29, -9 },
+ { -7, 4, 62, 7, 67, 10, -8 },
+ { -9, 22, 74, 7, 49, -4, -4 },
+ { -3, -6, 43, 7, 75, 28, -9 },
+ { -7, 5, 63, 7, 67, 8, -8 },
+ { -9, 24, 75, 7, 47, -5, -4 },
+ { -4, -5, 45, 7, 75, 26, -9 },
+ { -8, 7, 65, 7, 65, 7, -8 },
+ { -9, 26, 75, 7, 45, -5, -4 },
+ { -4, -5, 47, 7, 75, 24, -9 },
+ { -8, 8, 67, 7, 63, 5, -7 },
+ { -9, 28, 75, 7, 43, -6, -3 },
+ { -4, -4, 49, 7, 74, 22, -9 },
+ { -8, 10, 67, 7, 62, 4, -7 },
+ { -9, 29, 77, 7, 41, -7, -3 },
+ { -5, -3, 51, 7, 74, 20, -9 },
+ { -8, 12, 69, 7, 60, 2, -7 },
+ { -8, 31, 75, 7, 39, -7, -2 },
+ { -5, -2, 53, 7, 73, 18, -9 },
+ { -9, 13, 71, 7, 58, 1, -6 },
+ { -8, 33, 76, 7, 37, -8, -2 },
+ { -5, -1, 55, 7, 71, 17, -9 },
+ { -9, 15, 71, 7, 57, 0, -6 } },
+ .odd = { { -9, 16, 72, 7, 56, -1, -6 },
+ { -2, -8, 36, 7, 76, 34, -8 },
+ { -6, 1, 58, 7, 70, 14, -9 },
+ { -9, 18, 72, 7, 54, -2, -5 },
+ { -2, -7, 38, 7, 75, 32, -8 },
+ { -6, 2, 59, 7, 70, 12, -9 },
+ { -9, 19, 74, 7, 52, -3, -5 },
+ { -3, -7, 40, 7, 77, 30, -9 },
+ { -7, 3, 61, 7, 68, 11, -8 },
+ { -9, 21, 75, 7, 50, -4, -5 },
+ { -3, -6, 42, 7, 75, 29, -9 },
+ { -7, 5, 63, 7, 66, 9, -8 },
+ { -9, 23, 74, 7, 48, -4, -4 },
+ { -3, -6, 44, 7, 75, 27, -9 },
+ { -7, 6, 64, 7, 65, 8, -8 },
+ { -9, 25, 75, 7, 46, -5, -4 },
+ { -4, -5, 46, 7, 75, 25, -9 },
+ { -8, 8, 65, 7, 64, 6, -7 },
+ { -9, 27, 75, 7, 44, -6, -3 },
+ { -4, -4, 48, 7, 74, 23, -9 },
+ { -8, 9, 66, 7, 63, 5, -7 },
+ { -9, 29, 75, 7, 42, -6, -3 },
+ { -5, -4, 50, 7, 75, 21, -9 },
+ { -8, 11, 68, 7, 61, 3, -7 },
+ { -9, 30, 77, 7, 40, -7, -3 },
+ { -5, -3, 52, 7, 74, 19, -9 },
+ { -9, 12, 70, 7, 59, 2, -6 },
+ { -8, 32, 75, 7, 38, -7, -2 },
+ { -5, -2, 54, 7, 72, 18, -9 },
+ { -9, 14, 70, 7, 58, 1, -6 },
+ { -8, 34, 76, 7, 36, -8, -2 },
+ { -6, -1, 56, 7, 72, 16, -9 } } },
+ .ver_phase_arr = {
+ .even = { { -8, 35, 76, 7, 35, -8, -2 },
+ { -6, 0, 57, 7, 71, 15, -9 },
+ { -9, 17, 71, 7, 55, -1, -5 },
+ { -2, -8, 37, 7, 76, 33, -8 },
+ { -6, 1, 58, 7, 71, 13, -9 },
+ { -9, 18, 73, 7, 53, -2, -5 },
+ { -2, -7, 39, 7, 75, 31, -8 },
+ { -7, 2, 60, 7, 69, 12, -8 },
+ { -9, 20, 74, 7, 51, -3, -5 },
+ { -3, -7, 41, 7, 77, 29, -9 },
+ { -7, 4, 62, 7, 67, 10, -8 },
+ { -9, 22, 74, 7, 49, -4, -4 },
+ { -3, -6, 43, 7, 75, 28, -9 },
+ { -7, 5, 63, 7, 67, 8, -8 },
+ { -9, 24, 75, 7, 47, -5, -4 },
+ { -4, -5, 45, 7, 75, 26, -9 },
+ { -8, 7, 65, 7, 65, 7, -8 },
+ { -9, 26, 75, 7, 45, -5, -4 },
+ { -4, -5, 47, 7, 75, 24, -9 },
+ { -8, 8, 67, 7, 63, 5, -7 },
+ { -9, 28, 75, 7, 43, -6, -3 },
+ { -4, -4, 49, 7, 74, 22, -9 },
+ { -8, 10, 67, 7, 62, 4, -7 },
+ { -9, 29, 77, 7, 41, -7, -3 },
+ { -5, -3, 51, 7, 74, 20, -9 },
+ { -8, 12, 69, 7, 60, 2, -7 },
+ { -8, 31, 75, 7, 39, -7, -2 },
+ { -5, -2, 53, 7, 73, 18, -9 },
+ { -9, 13, 71, 7, 58, 1, -6 },
+ { -8, 33, 76, 7, 37, -8, -2 },
+ { -5, -1, 55, 7, 71, 17, -9 },
+ { -9, 15, 71, 7, 57, 0, -6 } },
+ .odd = { { -9, 16, 72, 7, 56, -1, -6 },
+ { -2, -8, 36, 7, 76, 34, -8 },
+ { -6, 1, 58, 7, 70, 14, -9 },
+ { -9, 18, 72, 7, 54, -2, -5 },
+ { -2, -7, 38, 7, 75, 32, -8 },
+ { -6, 2, 59, 7, 70, 12, -9 },
+ { -9, 19, 74, 7, 52, -3, -5 },
+ { -3, -7, 40, 7, 77, 30, -9 },
+ { -7, 3, 61, 7, 68, 11, -8 },
+ { -9, 21, 75, 7, 50, -4, -5 },
+ { -3, -6, 42, 7, 75, 29, -9 },
+ { -7, 5, 63, 7, 66, 9, -8 },
+ { -9, 23, 74, 7, 48, -4, -4 },
+ { -3, -6, 44, 7, 75, 27, -9 },
+ { -7, 6, 64, 7, 65, 8, -8 },
+ { -9, 25, 75, 7, 46, -5, -4 },
+ { -4, -5, 46, 7, 75, 25, -9 },
+ { -8, 8, 65, 7, 64, 6, -7 },
+ { -9, 27, 75, 7, 44, -6, -3 },
+ { -4, -4, 48, 7, 74, 23, -9 },
+ { -8, 9, 66, 7, 63, 5, -7 },
+ { -9, 29, 75, 7, 42, -6, -3 },
+ { -5, -4, 50, 7, 75, 21, -9 },
+ { -8, 11, 68, 7, 61, 3, -7 },
+ { -9, 30, 77, 7, 40, -7, -3 },
+ { -5, -3, 52, 7, 74, 19, -9 },
+ { -9, 12, 70, 7, 59, 2, -6 },
+ { -8, 32, 75, 7, 38, -7, -2 },
+ { -5, -2, 54, 7, 72, 18, -9 },
+ { -9, 14, 70, 7, 58, 1, -6 },
+ { -8, 34, 76, 7, 36, -8, -2 },
+ { -6, -1, 56, 7, 72, 16, -9 } } },
+ .ptrn_arr = { { 0x9ce739cf, 0xe739ce73, 0x39ce739c, 0xe7 } },
+ .sample_patrn_length = 106,
+ .hor_ds_en = 1,
+ .ver_ds_en = 1
+}, {
+ /* Scale factor 32 / (32 + 22) = 0.592593 */
+ .hor_phase_arr = {
+ .even = { { -7, 35, 74, 7, 35, -7, -2 },
+ { -6, 0, 54, 7, 71, 18, -9 },
+ { -9, 14, 70, 7, 58, 2, -7 },
+ { -8, 32, 74, 7, 39, -6, -3 },
+ { -5, -2, 51, 7, 72, 21, -9 },
+ { -8, 11, 66, 7, 61, 5, -7 },
+ { -9, 28, 75, 7, 43, -5, -4 },
+ { -4, -4, 47, 7, 73, 25, -9 },
+ { -8, 8, 64, 7, 64, 8, -8 },
+ { -9, 25, 73, 7, 47, -4, -4 },
+ { -4, -5, 43, 7, 75, 28, -9 },
+ { -7, 5, 61, 7, 66, 11, -8 },
+ { -9, 21, 72, 7, 51, -2, -5 },
+ { -3, -6, 39, 7, 74, 32, -8 },
+ { -7, 2, 58, 7, 70, 14, -9 },
+ { -9, 18, 71, 7, 54, 0, -6 } },
+ .odd = { { -9, 16, 70, 7, 56, 1, -6 },
+ { -8, 34, 75, 7, 37, -7, -3 },
+ { -6, -1, 53, 7, 72, 19, -9 },
+ { -9, 13, 68, 7, 59, 4, -7 },
+ { -8, 30, 74, 7, 41, -6, -3 },
+ { -5, -3, 49, 7, 73, 23, -9 },
+ { -8, 10, 66, 7, 62, 6, -8 },
+ { -9, 27, 74, 7, 45, -5, -4 },
+ { -4, -5, 45, 7, 74, 27, -9 },
+ { -8, 6, 62, 7, 66, 10, -8 },
+ { -9, 23, 73, 7, 49, -3, -5 },
+ { -3, -6, 41, 7, 74, 30, -8 },
+ { -7, 4, 59, 7, 68, 13, -9 },
+ { -9, 19, 72, 7, 53, -1, -6 },
+ { -3, -7, 37, 7, 75, 34, -8 },
+ { -6, 1, 56, 7, 70, 16, -9 } } },
+ .ver_phase_arr = {
+ .even = { { -7, 35, 74, 7, 35, -7, -2 },
+ { -6, 0, 54, 7, 71, 18, -9 },
+ { -9, 14, 70, 7, 58, 2, -7 },
+ { -8, 32, 74, 7, 39, -6, -3 },
+ { -5, -2, 51, 7, 72, 21, -9 },
+ { -8, 11, 66, 7, 61, 5, -7 },
+ { -9, 28, 75, 7, 43, -5, -4 },
+ { -4, -4, 47, 7, 73, 25, -9 },
+ { -8, 8, 64, 7, 64, 8, -8 },
+ { -9, 25, 73, 7, 47, -4, -4 },
+ { -4, -5, 43, 7, 75, 28, -9 },
+ { -7, 5, 61, 7, 66, 11, -8 },
+ { -9, 21, 72, 7, 51, -2, -5 },
+ { -3, -6, 39, 7, 74, 32, -8 },
+ { -7, 2, 58, 7, 70, 14, -9 },
+ { -9, 18, 71, 7, 54, 0, -6 } },
+ .odd = { { -9, 16, 70, 7, 56, 1, -6 },
+ { -8, 34, 75, 7, 37, -7, -3 },
+ { -6, -1, 53, 7, 72, 19, -9 },
+ { -9, 13, 68, 7, 59, 4, -7 },
+ { -8, 30, 74, 7, 41, -6, -3 },
+ { -5, -3, 49, 7, 73, 23, -9 },
+ { -8, 10, 66, 7, 62, 6, -8 },
+ { -9, 27, 74, 7, 45, -5, -4 },
+ { -4, -5, 45, 7, 74, 27, -9 },
+ { -8, 6, 62, 7, 66, 10, -8 },
+ { -9, 23, 73, 7, 49, -3, -5 },
+ { -3, -6, 41, 7, 74, 30, -8 },
+ { -7, 4, 59, 7, 68, 13, -9 },
+ { -9, 19, 72, 7, 53, -1, -6 },
+ { -3, -7, 37, 7, 75, 34, -8 },
+ { -6, 1, 56, 7, 70, 16, -9 } } },
+ .ptrn_arr = { { 0xce739ce7, 0xce739 } },
+ .sample_patrn_length = 54,
+ .hor_ds_en = 1,
+ .ver_ds_en = 1
+}, {
+ /* Scale factor 32 / (32 + 23) = 0.581818 */
+ .hor_phase_arr = {
+ .even = { { -7, 36, 73, 7, 36, -7, -3 },
+ { -6, 0, 52, 7, 71, 20, -9 },
+ { -8, 12, 66, 7, 60, 6, -8 },
+ { -8, 27, 73, 7, 45, -4, -5 },
+ { -4, -4, 43, 7, 72, 29, -8 },
+ { -7, 5, 59, 7, 66, 14, -9 },
+ { -9, 19, 69, 7, 54, 1, -6 },
+ { -7, 34, 72, 7, 38, -6, -3 },
+ { -6, -1, 50, 7, 72, 22, -9 },
+ { -8, 11, 63, 7, 62, 8, -8 },
+ { -9, 26, 72, 7, 47, -3, -5 },
+ { -4, -5, 41, 7, 73, 31, -8 },
+ { -7, 4, 57, 7, 68, 15, -9 },
+ { -9, 17, 69, 7, 56, 2, -7 },
+ { -7, 32, 74, 7, 39, -6, -4 },
+ { -5, -2, 49, 7, 71, 24, -9 },
+ { -8, 9, 63, 7, 63, 9, -8 },
+ { -9, 24, 71, 7, 49, -2, -5 },
+ { -4, -6, 39, 7, 74, 32, -7 },
+ { -7, 2, 56, 7, 69, 17, -9 },
+ { -9, 15, 68, 7, 57, 4, -7 },
+ { -8, 31, 73, 7, 41, -5, -4 },
+ { -5, -3, 47, 7, 72, 26, -9 },
+ { -8, 8, 62, 7, 63, 11, -8 },
+ { -9, 22, 72, 7, 50, -1, -6 },
+ { -3, -6, 38, 7, 72, 34, -7 },
+ { -6, 1, 54, 7, 69, 19, -9 },
+ { -9, 14, 66, 7, 59, 5, -7 },
+ { -8, 29, 72, 7, 43, -4, -4 },
+ { -5, -4, 45, 7, 73, 27, -8 },
+ { -8, 6, 60, 7, 66, 12, -8 },
+ { -9, 20, 71, 7, 52, 0, -6 } },
+ .odd = { { -9, 16, 69, 7, 56, 3, -7 },
+ { -8, 31, 74, 7, 40, -5, -4 },
+ { -5, -2, 48, 7, 71, 25, -9 },
+ { -8, 8, 62, 7, 64, 10, -8 },
+ { -9, 23, 72, 7, 50, -2, -6 },
+ { -3, -6, 39, 7, 72, 33, -7 },
+ { -7, 2, 55, 7, 69, 18, -9 },
+ { -9, 15, 67, 7, 58, 4, -7 },
+ { -8, 30, 73, 7, 42, -5, -4 },
+ { -5, -3, 46, 7, 72, 26, -8 },
+ { -8, 7, 61, 7, 65, 11, -8 },
+ { -9, 21, 72, 7, 51, -1, -6 },
+ { -3, -6, 37, 7, 72, 35, -7 },
+ { -6, 1, 53, 7, 69, 20, -9 },
+ { -9, 13, 66, 7, 59, 6, -7 },
+ { -8, 28, 72, 7, 44, -4, -4 },
+ { -4, -4, 44, 7, 72, 28, -8 },
+ { -7, 6, 59, 7, 66, 13, -9 },
+ { -9, 20, 69, 7, 53, 1, -6 },
+ { -7, 35, 72, 7, 37, -6, -3 },
+ { -6, -1, 51, 7, 72, 21, -9 },
+ { -8, 11, 65, 7, 61, 7, -8 },
+ { -8, 26, 72, 7, 46, -3, -5 },
+ { -4, -5, 42, 7, 73, 30, -8 },
+ { -7, 4, 58, 7, 67, 15, -9 },
+ { -9, 18, 69, 7, 55, 2, -7 },
+ { -7, 33, 72, 7, 39, -6, -3 },
+ { -6, -2, 50, 7, 72, 23, -9 },
+ { -8, 10, 64, 7, 62, 8, -8 },
+ { -9, 25, 71, 7, 48, -2, -5 },
+ { -4, -5, 40, 7, 74, 31, -8 },
+ { -7, 3, 56, 7, 69, 16, -9 } } },
+ .ver_phase_arr = {
+ .even = { { -7, 36, 73, 7, 36, -7, -3 },
+ { -6, 0, 52, 7, 71, 20, -9 },
+ { -8, 12, 66, 7, 60, 6, -8 },
+ { -8, 27, 73, 7, 45, -4, -5 },
+ { -4, -4, 43, 7, 72, 29, -8 },
+ { -7, 5, 59, 7, 66, 14, -9 },
+ { -9, 19, 69, 7, 54, 1, -6 },
+ { -7, 34, 72, 7, 38, -6, -3 },
+ { -6, -1, 50, 7, 72, 22, -9 },
+ { -8, 11, 63, 7, 62, 8, -8 },
+ { -9, 26, 72, 7, 47, -3, -5 },
+ { -4, -5, 41, 7, 73, 31, -8 },
+ { -7, 4, 57, 7, 68, 15, -9 },
+ { -9, 17, 69, 7, 56, 2, -7 },
+ { -7, 32, 74, 7, 39, -6, -4 },
+ { -5, -2, 49, 7, 71, 24, -9 },
+ { -8, 9, 63, 7, 63, 9, -8 },
+ { -9, 24, 71, 7, 49, -2, -5 },
+ { -4, -6, 39, 7, 74, 32, -7 },
+ { -7, 2, 56, 7, 69, 17, -9 },
+ { -9, 15, 68, 7, 57, 4, -7 },
+ { -8, 31, 73, 7, 41, -5, -4 },
+ { -5, -3, 47, 7, 72, 26, -9 },
+ { -8, 8, 62, 7, 63, 11, -8 },
+ { -9, 22, 72, 7, 50, -1, -6 },
+ { -3, -6, 38, 7, 72, 34, -7 },
+ { -6, 1, 54, 7, 69, 19, -9 },
+ { -9, 14, 66, 7, 59, 5, -7 },
+ { -8, 29, 72, 7, 43, -4, -4 },
+ { -5, -4, 45, 7, 73, 27, -8 },
+ { -8, 6, 60, 7, 66, 12, -8 },
+ { -9, 20, 71, 7, 52, 0, -6 } },
+ .odd = { { -9, 16, 69, 7, 56, 3, -7 },
+ { -8, 31, 74, 7, 40, -5, -4 },
+ { -5, -2, 48, 7, 71, 25, -9 },
+ { -8, 8, 62, 7, 64, 10, -8 },
+ { -9, 23, 72, 7, 50, -2, -6 },
+ { -3, -6, 39, 7, 72, 33, -7 },
+ { -7, 2, 55, 7, 69, 18, -9 },
+ { -9, 15, 67, 7, 58, 4, -7 },
+ { -8, 30, 73, 7, 42, -5, -4 },
+ { -5, -3, 46, 7, 72, 26, -8 },
+ { -8, 7, 61, 7, 65, 11, -8 },
+ { -9, 21, 72, 7, 51, -1, -6 },
+ { -3, -6, 37, 7, 72, 35, -7 },
+ { -6, 1, 53, 7, 69, 20, -9 },
+ { -9, 13, 66, 7, 59, 6, -7 },
+ { -8, 28, 72, 7, 44, -4, -4 },
+ { -4, -4, 44, 7, 72, 28, -8 },
+ { -7, 6, 59, 7, 66, 13, -9 },
+ { -9, 20, 69, 7, 53, 1, -6 },
+ { -7, 35, 72, 7, 37, -6, -3 },
+ { -6, -1, 51, 7, 72, 21, -9 },
+ { -8, 11, 65, 7, 61, 7, -8 },
+ { -8, 26, 72, 7, 46, -3, -5 },
+ { -4, -5, 42, 7, 73, 30, -8 },
+ { -7, 4, 58, 7, 67, 15, -9 },
+ { -9, 18, 69, 7, 55, 2, -7 },
+ { -7, 33, 72, 7, 39, -6, -3 },
+ { -6, -2, 50, 7, 72, 23, -9 },
+ { -8, 10, 64, 7, 62, 8, -8 },
+ { -9, 25, 71, 7, 48, -2, -5 },
+ { -4, -5, 40, 7, 74, 31, -8 },
+ { -7, 3, 56, 7, 69, 16, -9 } } },
+ .ptrn_arr = { { 0xe7339ce7, 0x9ce7339c, 0x399ce739, 0xce7 } },
+ .sample_patrn_length = 110,
+ .hor_ds_en = 1,
+ .ver_ds_en = 1
+}, {
+ /* Scale factor 32 / (32 + 24) = 0.571429 */
+ .hor_phase_arr = {
+ .even = { { -6, 36, 71, 7, 36, -6, -3 },
+ { -6, 0, 50, 7, 69, 23, -8 },
+ { -8, 10, 62, 7, 62, 10, -8 },
+ { -8, 23, 69, 7, 50, 0, -6 } },
+ .odd = { { -9, 16, 67, 7, 56, 5, -7 },
+ { -8, 29, 73, 7, 43, -4, -5 },
+ { -5, -4, 43, 7, 73, 29, -8 },
+ { -7, 5, 56, 7, 67, 16, -9 } } },
+ .ver_phase_arr = {
+ .even = { { -6, 36, 71, 7, 36, -6, -3 },
+ { -6, 0, 50, 7, 69, 23, -8 },
+ { -8, 10, 62, 7, 62, 10, -8 },
+ { -8, 23, 69, 7, 50, 0, -6 } },
+ .odd = { { -9, 16, 67, 7, 56, 5, -7 },
+ { -8, 29, 73, 7, 43, -4, -5 },
+ { -5, -4, 43, 7, 73, 29, -8 },
+ { -7, 5, 56, 7, 67, 16, -9 } } },
+ .ptrn_arr = { { 0xce7 } },
+ .sample_patrn_length = 14,
+ .hor_ds_en = 1,
+ .ver_ds_en = 1
+}, {
+ /* Scale factor 32 / (32 + 25) = 0.561404 */
+ .hor_phase_arr = {
+ .even = { { -5, 36, 70, 7, 36, -5, -4 },
+ { -6, 0, 48, 7, 69, 25, -8 },
+ { -8, 8, 59, 7, 63, 14, -8 },
+ { -8, 19, 66, 7, 54, 4, -7 },
+ { -7, 30, 70, 7, 43, -3, -5 },
+ { -5, -3, 41, 7, 70, 32, -7 },
+ { -7, 3, 53, 7, 67, 20, -8 },
+ { -8, 13, 61, 7, 60, 10, -8 },
+ { -8, 24, 67, 7, 50, 1, -6 },
+ { -6, 35, 70, 7, 38, -5, -4 },
+ { -6, -1, 46, 7, 70, 27, -8 },
+ { -8, 7, 57, 7, 64, 16, -8 },
+ { -8, 17, 64, 7, 56, 6, -7 },
+ { -7, 28, 69, 7, 45, -2, -5 },
+ { -4, -4, 40, 7, 69, 33, -6 },
+ { -7, 2, 51, 7, 68, 22, -8 },
+ { -8, 11, 61, 7, 61, 11, -8 },
+ { -8, 22, 68, 7, 51, 2, -7 },
+ { -6, 33, 69, 7, 40, -4, -4 },
+ { -5, -2, 45, 7, 69, 28, -7 },
+ { -7, 6, 56, 7, 64, 17, -8 },
+ { -8, 16, 64, 7, 57, 7, -8 },
+ { -8, 27, 70, 7, 46, -1, -6 },
+ { -4, -5, 38, 7, 70, 35, -6 },
+ { -6, 1, 50, 7, 67, 24, -8 },
+ { -8, 10, 60, 7, 61, 13, -8 },
+ { -8, 20, 67, 7, 53, 3, -7 },
+ { -7, 32, 70, 7, 41, -3, -5 },
+ { -5, -3, 43, 7, 70, 30, -7 },
+ { -7, 4, 54, 7, 66, 19, -8 },
+ { -8, 14, 63, 7, 59, 8, -8 },
+ { -8, 25, 69, 7, 48, 0, -6 } },
+ .odd = { { -8, 16, 66, 7, 56, 6, -8 },
+ { -8, 28, 69, 7, 46, -1, -6 },
+ { -4, -4, 39, 7, 69, 34, -6 },
+ { -7, 2, 51, 7, 67, 23, -8 },
+ { -8, 10, 60, 7, 62, 12, -8 },
+ { -8, 21, 67, 7, 52, 3, -7 },
+ { -7, 32, 71, 7, 41, -4, -5 },
+ { -5, -2, 44, 7, 69, 29, -7 },
+ { -7, 5, 55, 7, 65, 18, -8 },
+ { -8, 15, 63, 7, 58, 8, -8 },
+ { -8, 26, 69, 7, 47, 0, -6 },
+ { -4, -5, 37, 7, 71, 35, -6 },
+ { -6, 1, 49, 7, 68, 24, -8 },
+ { -8, 9, 59, 7, 63, 13, -8 },
+ { -8, 20, 65, 7, 54, 4, -7 },
+ { -7, 31, 70, 7, 42, -3, -5 },
+ { -5, -3, 42, 7, 70, 31, -7 },
+ { -7, 4, 54, 7, 65, 20, -8 },
+ { -8, 13, 63, 7, 59, 9, -8 },
+ { -8, 24, 68, 7, 49, 1, -6 },
+ { -6, 35, 71, 7, 37, -5, -4 },
+ { -6, 0, 47, 7, 69, 26, -8 },
+ { -8, 8, 58, 7, 63, 15, -8 },
+ { -8, 18, 65, 7, 55, 5, -7 },
+ { -7, 29, 69, 7, 44, -2, -5 },
+ { -5, -4, 41, 7, 71, 32, -7 },
+ { -7, 3, 52, 7, 67, 21, -8 },
+ { -8, 12, 62, 7, 60, 10, -8 },
+ { -8, 23, 67, 7, 51, 2, -7 },
+ { -6, 34, 69, 7, 39, -4, -4 },
+ { -6, -1, 46, 7, 69, 28, -8 },
+ { -8, 6, 56, 7, 66, 16, -8 } } },
+ .ver_phase_arr = {
+ .even = { { -5, 36, 70, 7, 36, -5, -4 },
+ { -6, 0, 48, 7, 69, 25, -8 },
+ { -8, 8, 59, 7, 63, 14, -8 },
+ { -8, 19, 66, 7, 54, 4, -7 },
+ { -7, 30, 70, 7, 43, -3, -5 },
+ { -5, -3, 41, 7, 70, 32, -7 },
+ { -7, 3, 53, 7, 67, 20, -8 },
+ { -8, 13, 61, 7, 60, 10, -8 },
+ { -8, 24, 67, 7, 50, 1, -6 },
+ { -6, 35, 70, 7, 38, -5, -4 },
+ { -6, -1, 46, 7, 70, 27, -8 },
+ { -8, 7, 57, 7, 64, 16, -8 },
+ { -8, 17, 64, 7, 56, 6, -7 },
+ { -7, 28, 69, 7, 45, -2, -5 },
+ { -4, -4, 40, 7, 69, 33, -6 },
+ { -7, 2, 51, 7, 68, 22, -8 },
+ { -8, 11, 61, 7, 61, 11, -8 },
+ { -8, 22, 68, 7, 51, 2, -7 },
+ { -6, 33, 69, 7, 40, -4, -4 },
+ { -5, -2, 45, 7, 69, 28, -7 },
+ { -7, 6, 56, 7, 64, 17, -8 },
+ { -8, 16, 64, 7, 57, 7, -8 },
+ { -8, 27, 70, 7, 46, -1, -6 },
+ { -4, -5, 38, 7, 70, 35, -6 },
+ { -6, 1, 50, 7, 67, 24, -8 },
+ { -8, 10, 60, 7, 61, 13, -8 },
+ { -8, 20, 67, 7, 53, 3, -7 },
+ { -7, 32, 70, 7, 41, -3, -5 },
+ { -5, -3, 43, 7, 70, 30, -7 },
+ { -7, 4, 54, 7, 66, 19, -8 },
+ { -8, 14, 63, 7, 59, 8, -8 },
+ { -8, 25, 69, 7, 48, 0, -6 } },
+ .odd = { { -8, 16, 66, 7, 56, 6, -8 },
+ { -8, 28, 69, 7, 46, -1, -6 },
+ { -4, -4, 39, 7, 69, 34, -6 },
+ { -7, 2, 51, 7, 67, 23, -8 },
+ { -8, 10, 60, 7, 62, 12, -8 },
+ { -8, 21, 67, 7, 52, 3, -7 },
+ { -7, 32, 71, 7, 41, -4, -5 },
+ { -5, -2, 44, 7, 69, 29, -7 },
+ { -7, 5, 55, 7, 65, 18, -8 },
+ { -8, 15, 63, 7, 58, 8, -8 },
+ { -8, 26, 69, 7, 47, 0, -6 },
+ { -4, -5, 37, 7, 71, 35, -6 },
+ { -6, 1, 49, 7, 68, 24, -8 },
+ { -8, 9, 59, 7, 63, 13, -8 },
+ { -8, 20, 65, 7, 54, 4, -7 },
+ { -7, 31, 70, 7, 42, -3, -5 },
+ { -5, -3, 42, 7, 70, 31, -7 },
+ { -7, 4, 54, 7, 65, 20, -8 },
+ { -8, 13, 63, 7, 59, 9, -8 },
+ { -8, 24, 68, 7, 49, 1, -6 },
+ { -6, 35, 71, 7, 37, -5, -4 },
+ { -6, 0, 47, 7, 69, 26, -8 },
+ { -8, 8, 58, 7, 63, 15, -8 },
+ { -8, 18, 65, 7, 55, 5, -7 },
+ { -7, 29, 69, 7, 44, -2, -5 },
+ { -5, -4, 41, 7, 71, 32, -7 },
+ { -7, 3, 52, 7, 67, 21, -8 },
+ { -8, 12, 62, 7, 60, 10, -8 },
+ { -8, 23, 67, 7, 51, 2, -7 },
+ { -6, 34, 69, 7, 39, -4, -4 },
+ { -6, -1, 46, 7, 69, 28, -8 },
+ { -8, 6, 56, 7, 66, 16, -8 } } },
+ .ptrn_arr = { { 0x3399cce7, 0x3399cce7, 0x3399ce67, 0xce67 } },
+ .sample_patrn_length = 114,
+ .hor_ds_en = 1,
+ .ver_ds_en = 1
+}, {
+ /* Scale factor 32 / (32 + 26) = 0.551724 */
+ .hor_phase_arr = {
+ .even = { { -5, 36, 70, 7, 36, -5, -4 },
+ { -6, 0, 46, 7, 68, 27, -7 },
+ { -8, 7, 55, 7, 64, 18, -8 },
+ { -8, 15, 62, 7, 58, 9, -8 },
+ { -8, 24, 68, 7, 49, 2, -7 },
+ { -6, 33, 69, 7, 40, -3, -5 },
+ { -6, -2, 43, 7, 70, 30, -7 },
+ { -7, 4, 52, 7, 66, 21, -8 },
+ { -8, 12, 60, 7, 60, 12, -8 },
+ { -8, 21, 66, 7, 52, 4, -7 },
+ { -7, 30, 70, 7, 43, -2, -6 },
+ { -5, -3, 40, 7, 69, 33, -6 },
+ { -7, 2, 49, 7, 68, 24, -8 },
+ { -8, 9, 58, 7, 62, 15, -8 },
+ { -8, 18, 64, 7, 55, 7, -8 },
+ { -7, 27, 68, 7, 46, 0, -6 } },
+ .odd = { { -8, 17, 63, 7, 56, 8, -8 },
+ { -8, 26, 67, 7, 48, 1, -6 },
+ { -5, 35, 69, 7, 38, -4, -5 },
+ { -6, -1, 45, 7, 68, 29, -7 },
+ { -7, 5, 54, 7, 64, 20, -8 },
+ { -8, 14, 60, 7, 59, 11, -8 },
+ { -8, 23, 66, 7, 51, 3, -7 },
+ { -6, 32, 69, 7, 41, -3, -5 },
+ { -5, -3, 41, 7, 69, 32, -6 },
+ { -7, 3, 51, 7, 66, 23, -8 },
+ { -8, 11, 59, 7, 60, 14, -8 },
+ { -8, 20, 64, 7, 54, 5, -7 },
+ { -7, 29, 68, 7, 45, -1, -6 },
+ { -5, -4, 38, 7, 69, 35, -5 },
+ { -6, 1, 48, 7, 67, 26, -8 },
+ { -8, 8, 56, 7, 63, 17, -8 } } },
+ .ver_phase_arr = {
+ .even = { { -5, 36, 70, 7, 36, -5, -4 },
+ { -6, 0, 46, 7, 68, 27, -7 },
+ { -8, 7, 55, 7, 64, 18, -8 },
+ { -8, 15, 62, 7, 58, 9, -8 },
+ { -8, 24, 68, 7, 49, 2, -7 },
+ { -6, 33, 69, 7, 40, -3, -5 },
+ { -6, -2, 43, 7, 70, 30, -7 },
+ { -7, 4, 52, 7, 66, 21, -8 },
+ { -8, 12, 60, 7, 60, 12, -8 },
+ { -8, 21, 66, 7, 52, 4, -7 },
+ { -7, 30, 70, 7, 43, -2, -6 },
+ { -5, -3, 40, 7, 69, 33, -6 },
+ { -7, 2, 49, 7, 68, 24, -8 },
+ { -8, 9, 58, 7, 62, 15, -8 },
+ { -8, 18, 64, 7, 55, 7, -8 },
+ { -7, 27, 68, 7, 46, 0, -6 } },
+ .odd = { { -8, 17, 63, 7, 56, 8, -8 },
+ { -8, 26, 67, 7, 48, 1, -6 },
+ { -5, 35, 69, 7, 38, -4, -5 },
+ { -6, -1, 45, 7, 68, 29, -7 },
+ { -7, 5, 54, 7, 64, 20, -8 },
+ { -8, 14, 60, 7, 59, 11, -8 },
+ { -8, 23, 66, 7, 51, 3, -7 },
+ { -6, 32, 69, 7, 41, -3, -5 },
+ { -5, -3, 41, 7, 69, 32, -6 },
+ { -7, 3, 51, 7, 66, 23, -8 },
+ { -8, 11, 59, 7, 60, 14, -8 },
+ { -8, 20, 64, 7, 54, 5, -7 },
+ { -7, 29, 68, 7, 45, -1, -6 },
+ { -5, -4, 38, 7, 69, 35, -5 },
+ { -6, 1, 48, 7, 67, 26, -8 },
+ { -8, 8, 56, 7, 63, 17, -8 } } },
+ .ptrn_arr = { { 0x399cce67, 0xcce673 } },
+ .sample_patrn_length = 58,
+ .hor_ds_en = 1,
+ .ver_ds_en = 1
+}, {
+ /* Scale factor 32 / (32 + 27) = 0.542373 */
+ .hor_phase_arr = {
+ .even = { { -4, 37, 67, 7, 37, -4, -5 },
+ { -6, 0, 44, 7, 67, 29, -6 },
+ { -7, 5, 52, 7, 64, 22, -8 },
+ { -8, 12, 58, 7, 60, 14, -8 },
+ { -8, 19, 63, 7, 54, 8, -8 },
+ { -7, 26, 67, 7, 47, 2, -7 },
+ { -5, 34, 66, 7, 40, -2, -5 },
+ { -6, -2, 41, 7, 68, 32, -5 },
+ { -7, 3, 49, 7, 65, 25, -7 },
+ { -8, 9, 56, 7, 62, 17, -8 },
+ { -8, 16, 61, 7, 57, 10, -8 },
+ { -8, 23, 66, 7, 50, 4, -7 },
+ { -6, 31, 67, 7, 43, -1, -6 },
+ { -5, -3, 38, 7, 67, 35, -4 },
+ { -6, 1, 46, 7, 66, 28, -7 },
+ { -8, 6, 53, 7, 65, 20, -8 },
+ { -8, 13, 59, 7, 59, 13, -8 },
+ { -8, 20, 65, 7, 53, 6, -8 },
+ { -7, 28, 66, 7, 46, 1, -6 },
+ { -4, 35, 67, 7, 38, -3, -5 },
+ { -6, -1, 43, 7, 67, 31, -6 },
+ { -7, 4, 50, 7, 66, 23, -8 },
+ { -8, 10, 57, 7, 61, 16, -8 },
+ { -8, 17, 62, 7, 56, 9, -8 },
+ { -7, 25, 65, 7, 49, 3, -7 },
+ { -5, 32, 68, 7, 41, -2, -6 },
+ { -5, -2, 40, 7, 66, 34, -5 },
+ { -7, 2, 47, 7, 67, 26, -7 },
+ { -8, 8, 54, 7, 63, 19, -8 },
+ { -8, 14, 60, 7, 58, 12, -8 },
+ { -8, 22, 64, 7, 52, 5, -7 },
+ { -6, 29, 67, 7, 44, 0, -6 } },
+ .odd = { { -8, 17, 61, 7, 56, 10, -8 },
+ { -7, 24, 64, 7, 50, 4, -7 },
+ { -6, 31, 68, 7, 42, -1, -6 },
+ { -5, -3, 39, 7, 68, 34, -5 },
+ { -7, 1, 47, 7, 67, 27, -7 },
+ { -8, 7, 54, 7, 64, 19, -8 },
+ { -8, 14, 59, 7, 59, 12, -8 },
+ { -8, 21, 64, 7, 52, 6, -7 },
+ { -7, 28, 68, 7, 45, 0, -6 },
+ { -4, 36, 68, 7, 37, -4, -5 },
+ { -6, 0, 44, 7, 66, 30, -6 },
+ { -7, 5, 51, 7, 65, 22, -8 },
+ { -8, 11, 57, 7, 61, 15, -8 },
+ { -8, 18, 63, 7, 55, 8, -8 },
+ { -7, 25, 67, 7, 48, 2, -7 },
+ { -5, 33, 66, 7, 41, -2, -5 },
+ { -5, -2, 41, 7, 66, 33, -5 },
+ { -7, 2, 48, 7, 67, 25, -7 },
+ { -8, 8, 55, 7, 63, 18, -8 },
+ { -8, 15, 61, 7, 57, 11, -8 },
+ { -8, 22, 65, 7, 51, 5, -7 },
+ { -6, 30, 66, 7, 44, 0, -6 },
+ { -5, -4, 37, 7, 68, 36, -4 },
+ { -6, 0, 45, 7, 68, 28, -7 },
+ { -7, 6, 52, 7, 64, 21, -8 },
+ { -8, 12, 59, 7, 59, 14, -8 },
+ { -8, 19, 64, 7, 54, 7, -8 },
+ { -7, 27, 67, 7, 47, 1, -7 },
+ { -5, 34, 68, 7, 39, -3, -5 },
+ { -6, -1, 42, 7, 68, 31, -6 },
+ { -7, 4, 50, 7, 64, 24, -7 },
+ { -8, 10, 56, 7, 61, 17, -8 } } },
+ .ver_phase_arr = {
+ .even = { { -4, 37, 67, 7, 37, -4, -5 },
+ { -6, 0, 44, 7, 67, 29, -6 },
+ { -7, 5, 52, 7, 64, 22, -8 },
+ { -8, 12, 58, 7, 60, 14, -8 },
+ { -8, 19, 63, 7, 54, 8, -8 },
+ { -7, 26, 67, 7, 47, 2, -7 },
+ { -5, 34, 66, 7, 40, -2, -5 },
+ { -6, -2, 41, 7, 68, 32, -5 },
+ { -7, 3, 49, 7, 65, 25, -7 },
+ { -8, 9, 56, 7, 62, 17, -8 },
+ { -8, 16, 61, 7, 57, 10, -8 },
+ { -8, 23, 66, 7, 50, 4, -7 },
+ { -6, 31, 67, 7, 43, -1, -6 },
+ { -5, -3, 38, 7, 67, 35, -4 },
+ { -6, 1, 46, 7, 66, 28, -7 },
+ { -8, 6, 53, 7, 65, 20, -8 },
+ { -8, 13, 59, 7, 59, 13, -8 },
+ { -8, 20, 65, 7, 53, 6, -8 },
+ { -7, 28, 66, 7, 46, 1, -6 },
+ { -4, 35, 67, 7, 38, -3, -5 },
+ { -6, -1, 43, 7, 67, 31, -6 },
+ { -7, 4, 50, 7, 66, 23, -8 },
+ { -8, 10, 57, 7, 61, 16, -8 },
+ { -8, 17, 62, 7, 56, 9, -8 },
+ { -7, 25, 65, 7, 49, 3, -7 },
+ { -5, 32, 68, 7, 41, -2, -6 },
+ { -5, -2, 40, 7, 66, 34, -5 },
+ { -7, 2, 47, 7, 67, 26, -7 },
+ { -8, 8, 54, 7, 63, 19, -8 },
+ { -8, 14, 60, 7, 58, 12, -8 },
+ { -8, 22, 64, 7, 52, 5, -7 },
+ { -6, 29, 67, 7, 44, 0, -6 } },
+ .odd = { { -8, 17, 61, 7, 56, 10, -8 },
+ { -7, 24, 64, 7, 50, 4, -7 },
+ { -6, 31, 68, 7, 42, -1, -6 },
+ { -5, -3, 39, 7, 68, 34, -5 },
+ { -7, 1, 47, 7, 67, 27, -7 },
+ { -8, 7, 54, 7, 64, 19, -8 },
+ { -8, 14, 59, 7, 59, 12, -8 },
+ { -8, 21, 64, 7, 52, 6, -7 },
+ { -7, 28, 68, 7, 45, 0, -6 },
+ { -4, 36, 68, 7, 37, -4, -5 },
+ { -6, 0, 44, 7, 66, 30, -6 },
+ { -7, 5, 51, 7, 65, 22, -8 },
+ { -8, 11, 57, 7, 61, 15, -8 },
+ { -8, 18, 63, 7, 55, 8, -8 },
+ { -7, 25, 67, 7, 48, 2, -7 },
+ { -5, 33, 66, 7, 41, -2, -5 },
+ { -5, -2, 41, 7, 66, 33, -5 },
+ { -7, 2, 48, 7, 67, 25, -7 },
+ { -8, 8, 55, 7, 63, 18, -8 },
+ { -8, 15, 61, 7, 57, 11, -8 },
+ { -8, 22, 65, 7, 51, 5, -7 },
+ { -6, 30, 66, 7, 44, 0, -6 },
+ { -5, -4, 37, 7, 68, 36, -4 },
+ { -6, 0, 45, 7, 68, 28, -7 },
+ { -7, 6, 52, 7, 64, 21, -8 },
+ { -8, 12, 59, 7, 59, 14, -8 },
+ { -8, 19, 64, 7, 54, 7, -8 },
+ { -7, 27, 67, 7, 47, 1, -7 },
+ { -5, 34, 68, 7, 39, -3, -5 },
+ { -6, -1, 42, 7, 68, 31, -6 },
+ { -7, 4, 50, 7, 64, 24, -7 },
+ { -8, 10, 56, 7, 61, 17, -8 } } },
+ .ptrn_arr = { { 0x99ccce67, 0xce667339, 0x733399cc, 0xcce66 } },
+ .sample_patrn_length = 118,
+ .hor_ds_en = 1,
+ .ver_ds_en = 1
+}, {
+ /* Scale factor 32 / (32 + 28) = 0.533333 */
+ .hor_phase_arr = {
+ .even = { { -3, 37, 65, 7, 37, -3, -5 },
+ { -6, 0, 43, 7, 65, 31, -5 },
+ { -7, 4, 48, 7, 65, 25, -7 },
+ { -8, 9, 54, 7, 62, 19, -8 },
+ { -8, 14, 58, 7, 58, 14, -8 },
+ { -8, 19, 62, 7, 54, 9, -8 },
+ { -7, 25, 65, 7, 48, 4, -7 },
+ { -5, 31, 65, 7, 43, 0, -6 } },
+ .odd = { { -8, 17, 60, 7, 56, 11, -8 },
+ { -7, 22, 63, 7, 51, 6, -7 },
+ { -6, 28, 65, 7, 46, 2, -7 },
+ { -4, 34, 66, 7, 40, -2, -6 },
+ { -6, -2, 40, 7, 66, 34, -4 },
+ { -7, 2, 46, 7, 65, 28, -6 },
+ { -7, 6, 51, 7, 63, 22, -7 },
+ { -8, 11, 56, 7, 60, 17, -8 } } },
+ .ver_phase_arr = {
+ .even = { { -3, 37, 65, 7, 37, -3, -5 },
+ { -6, 0, 43, 7, 65, 31, -5 },
+ { -7, 4, 48, 7, 65, 25, -7 },
+ { -8, 9, 54, 7, 62, 19, -8 },
+ { -8, 14, 58, 7, 58, 14, -8 },
+ { -8, 19, 62, 7, 54, 9, -8 },
+ { -7, 25, 65, 7, 48, 4, -7 },
+ { -5, 31, 65, 7, 43, 0, -6 } },
+ .odd = { { -8, 17, 60, 7, 56, 11, -8 },
+ { -7, 22, 63, 7, 51, 6, -7 },
+ { -6, 28, 65, 7, 46, 2, -7 },
+ { -4, 34, 66, 7, 40, -2, -6 },
+ { -6, -2, 40, 7, 66, 34, -4 },
+ { -7, 2, 46, 7, 65, 28, -6 },
+ { -7, 6, 51, 7, 63, 22, -7 },
+ { -8, 11, 56, 7, 60, 17, -8 } } },
+ .ptrn_arr = { { 0xccce667 } },
+ .sample_patrn_length = 30,
+ .hor_ds_en = 1,
+ .ver_ds_en = 1
+}, {
+ /* Scale factor 32 / (32 + 29) = 0.52459 */
+ .hor_phase_arr = {
+ .even = { { -2, 37, 63, 7, 37, -2, -5 },
+ { -6, 0, 41, 7, 64, 33, -4 },
+ { -7, 3, 45, 7, 65, 28, -6 },
+ { -7, 6, 49, 7, 63, 24, -7 },
+ { -8, 9, 53, 7, 61, 20, -7 },
+ { -8, 13, 56, 7, 59, 16, -8 },
+ { -8, 17, 60, 7, 55, 12, -8 },
+ { -7, 21, 62, 7, 52, 8, -8 },
+ { -6, 26, 62, 7, 48, 5, -7 },
+ { -5, 30, 64, 7, 44, 2, -7 },
+ { -4, 34, 65, 7, 40, -1, -6 },
+ { -6, -2, 38, 7, 66, 35, -3 },
+ { -6, 1, 42, 7, 65, 31, -5 },
+ { -7, 4, 47, 7, 63, 27, -6 },
+ { -7, 7, 50, 7, 62, 23, -7 },
+ { -8, 11, 54, 7, 59, 19, -7 },
+ { -8, 15, 57, 7, 57, 15, -8 },
+ { -7, 19, 59, 7, 54, 11, -8 },
+ { -7, 23, 62, 7, 50, 7, -7 },
+ { -6, 27, 63, 7, 47, 4, -7 },
+ { -5, 31, 65, 7, 42, 1, -6 },
+ { -3, 35, 66, 7, 38, -2, -6 },
+ { -6, -1, 40, 7, 65, 34, -4 },
+ { -7, 2, 44, 7, 64, 30, -5 },
+ { -7, 5, 48, 7, 62, 26, -6 },
+ { -8, 8, 52, 7, 62, 21, -7 },
+ { -8, 12, 55, 7, 60, 17, -8 },
+ { -8, 16, 59, 7, 56, 13, -8 },
+ { -7, 20, 61, 7, 53, 9, -8 },
+ { -7, 24, 63, 7, 49, 6, -7 },
+ { -6, 28, 65, 7, 45, 3, -7 },
+ { -4, 33, 64, 7, 41, 0, -6 } },
+ .odd = { { -8, 17, 58, 7, 56, 13, -8 },
+ { -7, 21, 61, 7, 52, 9, -8 },
+ { -6, 25, 62, 7, 49, 5, -7 },
+ { -5, 29, 64, 7, 45, 2, -7 },
+ { -4, 33, 65, 7, 40, 0, -6 },
+ { -6, -2, 37, 7, 66, 36, -3 },
+ { -6, 0, 42, 7, 64, 32, -4 },
+ { -7, 3, 46, 7, 64, 28, -6 },
+ { -7, 7, 50, 7, 61, 24, -7 },
+ { -8, 10, 53, 7, 61, 19, -7 },
+ { -8, 14, 57, 7, 58, 15, -8 },
+ { -8, 18, 60, 7, 55, 11, -8 },
+ { -7, 22, 62, 7, 51, 8, -8 },
+ { -6, 26, 64, 7, 47, 4, -7 },
+ { -5, 31, 65, 7, 43, 1, -7 },
+ { -3, 35, 64, 7, 39, -1, -6 },
+ { -6, -1, 39, 7, 64, 35, -3 },
+ { -7, 1, 43, 7, 65, 31, -5 },
+ { -7, 4, 47, 7, 64, 26, -6 },
+ { -8, 8, 51, 7, 62, 22, -7 },
+ { -8, 11, 55, 7, 60, 18, -8 },
+ { -8, 15, 58, 7, 57, 14, -8 },
+ { -7, 19, 61, 7, 53, 10, -8 },
+ { -7, 24, 61, 7, 50, 7, -7 },
+ { -6, 28, 64, 7, 46, 3, -7 },
+ { -4, 32, 64, 7, 42, 0, -6 },
+ { -3, 36, 66, 7, 37, -2, -6 },
+ { -6, 0, 40, 7, 65, 33, -4 },
+ { -7, 2, 45, 7, 64, 29, -5 },
+ { -7, 5, 49, 7, 62, 25, -6 },
+ { -8, 9, 52, 7, 61, 21, -7 },
+ { -8, 13, 56, 7, 58, 17, -8 } } },
+ .ver_phase_arr = {
+ .even = { { -2, 37, 63, 7, 37, -2, -5 },
+ { -6, 0, 41, 7, 64, 33, -4 },
+ { -7, 3, 45, 7, 65, 28, -6 },
+ { -7, 6, 49, 7, 63, 24, -7 },
+ { -8, 9, 53, 7, 61, 20, -7 },
+ { -8, 13, 56, 7, 59, 16, -8 },
+ { -8, 17, 60, 7, 55, 12, -8 },
+ { -7, 21, 62, 7, 52, 8, -8 },
+ { -6, 26, 62, 7, 48, 5, -7 },
+ { -5, 30, 64, 7, 44, 2, -7 },
+ { -4, 34, 65, 7, 40, -1, -6 },
+ { -6, -2, 38, 7, 66, 35, -3 },
+ { -6, 1, 42, 7, 65, 31, -5 },
+ { -7, 4, 47, 7, 63, 27, -6 },
+ { -7, 7, 50, 7, 62, 23, -7 },
+ { -8, 11, 54, 7, 59, 19, -7 },
+ { -8, 15, 57, 7, 57, 15, -8 },
+ { -7, 19, 59, 7, 54, 11, -8 },
+ { -7, 23, 62, 7, 50, 7, -7 },
+ { -6, 27, 63, 7, 47, 4, -7 },
+ { -5, 31, 65, 7, 42, 1, -6 },
+ { -3, 35, 66, 7, 38, -2, -6 },
+ { -6, -1, 40, 7, 65, 34, -4 },
+ { -7, 2, 44, 7, 64, 30, -5 },
+ { -7, 5, 48, 7, 62, 26, -6 },
+ { -8, 8, 52, 7, 62, 21, -7 },
+ { -8, 12, 55, 7, 60, 17, -8 },
+ { -8, 16, 59, 7, 56, 13, -8 },
+ { -7, 20, 61, 7, 53, 9, -8 },
+ { -7, 24, 63, 7, 49, 6, -7 },
+ { -6, 28, 65, 7, 45, 3, -7 },
+ { -4, 33, 64, 7, 41, 0, -6 } },
+ .odd = { { -8, 17, 58, 7, 56, 13, -8 },
+ { -7, 21, 61, 7, 52, 9, -8 },
+ { -6, 25, 62, 7, 49, 5, -7 },
+ { -5, 29, 64, 7, 45, 2, -7 },
+ { -4, 33, 65, 7, 40, 0, -6 },
+ { -6, -2, 37, 7, 66, 36, -3 },
+ { -6, 0, 42, 7, 64, 32, -4 },
+ { -7, 3, 46, 7, 64, 28, -6 },
+ { -7, 7, 50, 7, 61, 24, -7 },
+ { -8, 10, 53, 7, 61, 19, -7 },
+ { -8, 14, 57, 7, 58, 15, -8 },
+ { -8, 18, 60, 7, 55, 11, -8 },
+ { -7, 22, 62, 7, 51, 8, -8 },
+ { -6, 26, 64, 7, 47, 4, -7 },
+ { -5, 31, 65, 7, 43, 1, -7 },
+ { -3, 35, 64, 7, 39, -1, -6 },
+ { -6, -1, 39, 7, 64, 35, -3 },
+ { -7, 1, 43, 7, 65, 31, -5 },
+ { -7, 4, 47, 7, 64, 26, -6 },
+ { -8, 8, 51, 7, 62, 22, -7 },
+ { -8, 11, 55, 7, 60, 18, -8 },
+ { -8, 15, 58, 7, 57, 14, -8 },
+ { -7, 19, 61, 7, 53, 10, -8 },
+ { -7, 24, 61, 7, 50, 7, -7 },
+ { -6, 28, 64, 7, 46, 3, -7 },
+ { -4, 32, 64, 7, 42, 0, -6 },
+ { -3, 36, 66, 7, 37, -2, -6 },
+ { -6, 0, 40, 7, 65, 33, -4 },
+ { -7, 2, 45, 7, 64, 29, -5 },
+ { -7, 5, 49, 7, 62, 25, -6 },
+ { -8, 9, 52, 7, 61, 21, -7 },
+ { -8, 13, 56, 7, 58, 17, -8 } } },
+ .ptrn_arr = { { 0xccce6667, 0x399999cc, 0x66673333, 0xcccce6 } },
+ .sample_patrn_length = 122,
+ .hor_ds_en = 1,
+ .ver_ds_en = 1
+}, {
+ /* Scale factor 32 / (32 + 30) = 0.516129 */
+ .hor_phase_arr = {
+ .even = { { -2, 37, 64, 7, 37, -2, -6 },
+ { -6, 0, 39, 7, 64, 34, -3 },
+ { -7, 2, 42, 7, 64, 31, -4 },
+ { -7, 4, 45, 7, 62, 29, -5 },
+ { -7, 6, 47, 7, 62, 26, -6 },
+ { -7, 8, 50, 7, 60, 23, -6 },
+ { -8, 10, 52, 7, 60, 21, -7 },
+ { -8, 13, 54, 7, 58, 18, -7 },
+ { -8, 15, 58, 7, 56, 15, -8 },
+ { -7, 18, 58, 7, 54, 13, -8 },
+ { -7, 21, 60, 7, 52, 10, -8 },
+ { -6, 23, 60, 7, 50, 8, -7 },
+ { -6, 26, 62, 7, 47, 6, -7 },
+ { -5, 29, 62, 7, 45, 4, -7 },
+ { -4, 31, 64, 7, 42, 2, -7 },
+ { -3, 34, 64, 7, 39, 0, -6 } },
+ .odd = { { -7, 17, 57, 7, 55, 14, -8 },
+ { -7, 19, 59, 7, 53, 12, -8 },
+ { -7, 22, 61, 7, 51, 9, -8 },
+ { -6, 25, 60, 7, 49, 7, -7 },
+ { -5, 27, 62, 7, 46, 5, -7 },
+ { -5, 30, 63, 7, 44, 3, -7 },
+ { -3, 33, 62, 7, 41, 1, -6 },
+ { -2, 35, 64, 7, 38, -1, -6 },
+ { -6, -1, 38, 7, 64, 35, -2 },
+ { -6, 1, 41, 7, 62, 33, -3 },
+ { -7, 3, 44, 7, 63, 30, -5 },
+ { -7, 5, 46, 7, 62, 27, -5 },
+ { -7, 7, 49, 7, 60, 25, -6 },
+ { -8, 9, 51, 7, 61, 22, -7 },
+ { -8, 12, 53, 7, 59, 19, -7 },
+ { -8, 14, 55, 7, 57, 17, -7 } } },
+ .ver_phase_arr = {
+ .even = { { -2, 37, 64, 7, 37, -2, -6 },
+ { -6, 0, 39, 7, 64, 34, -3 },
+ { -7, 2, 42, 7, 64, 31, -4 },
+ { -7, 4, 45, 7, 62, 29, -5 },
+ { -7, 6, 47, 7, 62, 26, -6 },
+ { -7, 8, 50, 7, 60, 23, -6 },
+ { -8, 10, 52, 7, 60, 21, -7 },
+ { -8, 13, 54, 7, 58, 18, -7 },
+ { -8, 15, 58, 7, 56, 15, -8 },
+ { -7, 18, 58, 7, 54, 13, -8 },
+ { -7, 21, 60, 7, 52, 10, -8 },
+ { -6, 23, 60, 7, 50, 8, -7 },
+ { -6, 26, 62, 7, 47, 6, -7 },
+ { -5, 29, 62, 7, 45, 4, -7 },
+ { -4, 31, 64, 7, 42, 2, -7 },
+ { -3, 34, 64, 7, 39, 0, -6 } },
+ .odd = { { -7, 17, 57, 7, 55, 14, -8 },
+ { -7, 19, 59, 7, 53, 12, -8 },
+ { -7, 22, 61, 7, 51, 9, -8 },
+ { -6, 25, 60, 7, 49, 7, -7 },
+ { -5, 27, 62, 7, 46, 5, -7 },
+ { -5, 30, 63, 7, 44, 3, -7 },
+ { -3, 33, 62, 7, 41, 1, -6 },
+ { -2, 35, 64, 7, 38, -1, -6 },
+ { -6, -1, 38, 7, 64, 35, -2 },
+ { -6, 1, 41, 7, 62, 33, -3 },
+ { -7, 3, 44, 7, 63, 30, -5 },
+ { -7, 5, 46, 7, 62, 27, -5 },
+ { -7, 7, 49, 7, 60, 25, -6 },
+ { -8, 9, 51, 7, 61, 22, -7 },
+ { -8, 12, 53, 7, 59, 19, -7 },
+ { -8, 14, 55, 7, 57, 17, -7 } } },
+ .ptrn_arr = { { 0xe6666667, 0xccccccc } },
+ .sample_patrn_length = 62,
+ .hor_ds_en = 1,
+ .ver_ds_en = 1
+}, {
+ /* Scale factor 32 / (32 + 31) = 0.507937 */
+ .hor_phase_arr = {
+ .even = { { -1, 37, 62, 7, 37, -1, -6 },
+ { -6, 0, 38, 7, 62, 35, -1 },
+ { -6, 1, 39, 7, 62, 34, -2 },
+ { -7, 2, 41, 7, 62, 33, -3 },
+ { -7, 3, 42, 7, 61, 32, -3 },
+ { -7, 4, 43, 7, 62, 30, -4 },
+ { -7, 4, 44, 7, 62, 29, -4 },
+ { -7, 6, 46, 7, 60, 28, -5 },
+ { -7, 7, 47, 7, 60, 26, -5 },
+ { -7, 8, 48, 7, 60, 25, -6 },
+ { -7, 9, 49, 7, 59, 24, -6 },
+ { -7, 10, 50, 7, 59, 22, -6 },
+ { -7, 11, 51, 7, 59, 21, -7 },
+ { -7, 12, 52, 7, 58, 20, -7 },
+ { -7, 13, 53, 7, 57, 19, -7 },
+ { -7, 15, 54, 7, 56, 17, -7 },
+ { -7, 16, 55, 7, 55, 16, -7 },
+ { -7, 17, 56, 7, 54, 15, -7 },
+ { -7, 19, 57, 7, 53, 13, -7 },
+ { -7, 20, 58, 7, 52, 12, -7 },
+ { -7, 21, 59, 7, 51, 11, -7 },
+ { -6, 22, 59, 7, 50, 10, -7 },
+ { -6, 24, 59, 7, 49, 9, -7 },
+ { -6, 25, 60, 7, 48, 8, -7 },
+ { -5, 26, 60, 7, 47, 7, -7 },
+ { -5, 28, 60, 7, 46, 6, -7 },
+ { -4, 29, 62, 7, 44, 4, -7 },
+ { -4, 30, 62, 7, 43, 4, -7 },
+ { -3, 32, 61, 7, 42, 3, -7 },
+ { -3, 33, 62, 7, 41, 2, -7 },
+ { -2, 34, 62, 7, 39, 1, -6 },
+ { -1, 35, 62, 7, 38, 0, -6 } },
+ .odd = { { -7, 17, 55, 7, 55, 15, -7 },
+ { -7, 18, 56, 7, 54, 14, -7 },
+ { -7, 19, 57, 7, 53, 13, -7 },
+ { -7, 20, 58, 7, 52, 12, -7 },
+ { -6, 22, 58, 7, 51, 10, -7 },
+ { -6, 23, 59, 7, 50, 9, -7 },
+ { -6, 24, 60, 7, 49, 8, -7 },
+ { -5, 26, 60, 7, 47, 7, -7 },
+ { -5, 27, 61, 7, 46, 6, -7 },
+ { -5, 28, 62, 7, 45, 5, -7 },
+ { -4, 30, 61, 7, 44, 4, -7 },
+ { -4, 31, 62, 7, 43, 3, -7 },
+ { -3, 32, 63, 7, 41, 2, -7 },
+ { -2, 34, 61, 7, 40, 1, -6 },
+ { -2, 35, 62, 7, 39, 0, -6 },
+ { -1, 36, 62, 7, 37, 0, -6 },
+ { -6, 0, 37, 7, 62, 36, -1 },
+ { -6, 0, 39, 7, 62, 35, -2 },
+ { -6, 1, 40, 7, 61, 34, -2 },
+ { -7, 2, 41, 7, 63, 32, -3 },
+ { -7, 3, 43, 7, 62, 31, -4 },
+ { -7, 4, 44, 7, 61, 30, -4 },
+ { -7, 5, 45, 7, 62, 28, -5 },
+ { -7, 6, 46, 7, 61, 27, -5 },
+ { -7, 7, 47, 7, 60, 26, -5 },
+ { -7, 8, 49, 7, 60, 24, -6 },
+ { -7, 9, 50, 7, 59, 23, -6 },
+ { -7, 10, 51, 7, 58, 22, -6 },
+ { -7, 12, 52, 7, 58, 20, -7 },
+ { -7, 13, 53, 7, 57, 19, -7 },
+ { -7, 14, 54, 7, 56, 18, -7 },
+ { -7, 15, 55, 7, 55, 17, -7 } } },
+ .ver_phase_arr = {
+ .even = { { -1, 37, 62, 7, 37, -1, -6 },
+ { -6, 0, 38, 7, 62, 35, -1 },
+ { -6, 1, 39, 7, 62, 34, -2 },
+ { -7, 2, 41, 7, 62, 33, -3 },
+ { -7, 3, 42, 7, 61, 32, -3 },
+ { -7, 4, 43, 7, 62, 30, -4 },
+ { -7, 4, 44, 7, 62, 29, -4 },
+ { -7, 6, 46, 7, 60, 28, -5 },
+ { -7, 7, 47, 7, 60, 26, -5 },
+ { -7, 8, 48, 7, 60, 25, -6 },
+ { -7, 9, 49, 7, 59, 24, -6 },
+ { -7, 10, 50, 7, 59, 22, -6 },
+ { -7, 11, 51, 7, 59, 21, -7 },
+ { -7, 12, 52, 7, 58, 20, -7 },
+ { -7, 13, 53, 7, 57, 19, -7 },
+ { -7, 15, 54, 7, 56, 17, -7 },
+ { -7, 16, 55, 7, 55, 16, -7 },
+ { -7, 17, 56, 7, 54, 15, -7 },
+ { -7, 19, 57, 7, 53, 13, -7 },
+ { -7, 20, 58, 7, 52, 12, -7 },
+ { -7, 21, 59, 7, 51, 11, -7 },
+ { -6, 22, 59, 7, 50, 10, -7 },
+ { -6, 24, 59, 7, 49, 9, -7 },
+ { -6, 25, 60, 7, 48, 8, -7 },
+ { -5, 26, 60, 7, 47, 7, -7 },
+ { -5, 28, 60, 7, 46, 6, -7 },
+ { -4, 29, 62, 7, 44, 4, -7 },
+ { -4, 30, 62, 7, 43, 4, -7 },
+ { -3, 32, 61, 7, 42, 3, -7 },
+ { -3, 33, 62, 7, 41, 2, -7 },
+ { -2, 34, 62, 7, 39, 1, -6 },
+ { -1, 35, 62, 7, 38, 0, -6 } },
+ .odd = { { -7, 17, 55, 7, 55, 15, -7 },
+ { -7, 18, 56, 7, 54, 14, -7 },
+ { -7, 19, 57, 7, 53, 13, -7 },
+ { -7, 20, 58, 7, 52, 12, -7 },
+ { -6, 22, 58, 7, 51, 10, -7 },
+ { -6, 23, 59, 7, 50, 9, -7 },
+ { -6, 24, 60, 7, 49, 8, -7 },
+ { -5, 26, 60, 7, 47, 7, -7 },
+ { -5, 27, 61, 7, 46, 6, -7 },
+ { -5, 28, 62, 7, 45, 5, -7 },
+ { -4, 30, 61, 7, 44, 4, -7 },
+ { -4, 31, 62, 7, 43, 3, -7 },
+ { -3, 32, 63, 7, 41, 2, -7 },
+ { -2, 34, 61, 7, 40, 1, -6 },
+ { -2, 35, 62, 7, 39, 0, -6 },
+ { -1, 36, 62, 7, 37, 0, -6 },
+ { -6, 0, 37, 7, 62, 36, -1 },
+ { -6, 0, 39, 7, 62, 35, -2 },
+ { -6, 1, 40, 7, 61, 34, -2 },
+ { -7, 2, 41, 7, 63, 32, -3 },
+ { -7, 3, 43, 7, 62, 31, -4 },
+ { -7, 4, 44, 7, 61, 30, -4 },
+ { -7, 5, 45, 7, 62, 28, -5 },
+ { -7, 6, 46, 7, 61, 27, -5 },
+ { -7, 7, 47, 7, 60, 26, -5 },
+ { -7, 8, 49, 7, 60, 24, -6 },
+ { -7, 9, 50, 7, 59, 23, -6 },
+ { -7, 10, 51, 7, 58, 22, -6 },
+ { -7, 12, 52, 7, 58, 20, -7 },
+ { -7, 13, 53, 7, 57, 19, -7 },
+ { -7, 14, 54, 7, 56, 18, -7 },
+ { -7, 15, 55, 7, 55, 17, -7 } } },
+ .ptrn_arr = { { 0x66666667, 0xe6666666, 0xcccccccc, 0xccccccc } },
+ .sample_patrn_length = 126,
+ .hor_ds_en = 1,
+ .ver_ds_en = 1
+}, {
+ /* Scale factor 32 / (32 + 32) = 0.5 */
+ .hor_phase_arr = {
+ .even = { { 0, 8, 112, 7, 8, 0, 0 } },
+ .odd = { { 0, 0, 64, 7, 64, 0, 0 } } },
+ .ver_phase_arr = {
+ .even = { { 0, 8, 112, 7, 8, 0, 0 } },
+ .odd = { { 0, 0, 64, 7, 64, 0, 0 } } },
+ .ptrn_arr = { { 0x3 } },
+ .sample_patrn_length = 4,
+ .hor_ds_en = 1,
+ .ver_ds_en = 1
+}, {
+ /* Scale factor 32 / (32 + 33) = 0.492308 */
+ .hor_phase_arr = {
+ .even = { { 0, 9, 110, 7, 9, 0, 0 },
+ { 0, 8, 109, 7, 11, 0, 0 },
+ { 0, 7, 109, 7, 12, 0, 0 },
+ { 0, 6, 108, 7, 14, 0, 0 },
+ { 0, 5, 107, 7, 16, 0, 0 },
+ { 0, 4, 105, 7, 19, 0, 0 },
+ { 0, 3, 103, 7, 22, 0, 0 },
+ { 0, 3, 100, 7, 25, 0, 0 },
+ { 0, 2, 98, 7, 28, 0, 0 },
+ { 0, 2, 94, 7, 32, 0, 0 },
+ { 0, 2, 90, 7, 36, 0, 0 },
+ { 0, 1, 87, 7, 40, 0, 0 },
+ { 0, 1, 83, 7, 44, 0, 0 },
+ { 0, 1, 78, 7, 49, 0, 0 },
+ { 0, 1, 73, 7, 54, 0, 0 },
+ { 0, 1, 68, 7, 59, 0, 0 },
+ { 0, 0, 64, 7, 64, 0, 0 },
+ { 0, 0, 59, 7, 68, 1, 0 },
+ { 0, 0, 54, 7, 73, 1, 0 },
+ { 0, 0, 49, 7, 78, 1, 0 },
+ { 0, 0, 44, 7, 83, 1, 0 },
+ { 0, 0, 40, 7, 87, 1, 0 },
+ { 0, 0, 36, 7, 90, 2, 0 },
+ { 0, 0, 32, 7, 94, 2, 0 },
+ { 0, 0, 28, 7, 98, 2, 0 },
+ { 0, 0, 25, 7, 100, 3, 0 },
+ { 0, 0, 22, 7, 103, 3, 0 },
+ { 0, 0, 19, 7, 105, 4, 0 },
+ { 0, 0, 16, 7, 107, 5, 0 },
+ { 0, 0, 14, 7, 108, 6, 0 },
+ { 0, 0, 12, 7, 109, 7, 0 },
+ { 0, 0, 11, 7, 109, 8, 0 } },
+ .odd = { { 0, 0, 61, 7, 67, 0, 0 },
+ { 0, 0, 56, 7, 71, 1, 0 },
+ { 0, 0, 51, 7, 76, 1, 0 },
+ { 0, 0, 46, 7, 81, 1, 0 },
+ { 0, 0, 42, 7, 85, 1, 0 },
+ { 0, 0, 38, 7, 89, 1, 0 },
+ { 0, 0, 34, 7, 92, 2, 0 },
+ { 0, 0, 30, 7, 96, 2, 0 },
+ { 0, 0, 26, 7, 99, 3, 0 },
+ { 0, 0, 23, 7, 102, 3, 0 },
+ { 0, 0, 20, 7, 104, 4, 0 },
+ { 0, 0, 18, 7, 106, 4, 0 },
+ { 0, 0, 15, 7, 108, 5, 0 },
+ { 0, 0, 13, 7, 109, 6, 0 },
+ { 0, 0, 11, 7, 110, 7, 0 },
+ { 0, 0, 10, 7, 110, 8, 0 },
+ { 0, 8, 110, 7, 10, 0, 0 },
+ { 0, 7, 110, 7, 11, 0, 0 },
+ { 0, 6, 109, 7, 13, 0, 0 },
+ { 0, 5, 108, 7, 15, 0, 0 },
+ { 0, 4, 106, 7, 18, 0, 0 },
+ { 0, 4, 104, 7, 20, 0, 0 },
+ { 0, 3, 102, 7, 23, 0, 0 },
+ { 0, 3, 99, 7, 26, 0, 0 },
+ { 0, 2, 96, 7, 30, 0, 0 },
+ { 0, 2, 92, 7, 34, 0, 0 },
+ { 0, 1, 89, 7, 38, 0, 0 },
+ { 0, 1, 85, 7, 42, 0, 0 },
+ { 0, 1, 81, 7, 46, 0, 0 },
+ { 0, 1, 76, 7, 51, 0, 0 },
+ { 0, 1, 71, 7, 56, 0, 0 },
+ { 0, 0, 67, 7, 61, 0, 0 } } },
+ .ver_phase_arr = {
+ .even = { { 0, 9, 110, 7, 9, 0, 0 },
+ { 0, 8, 109, 7, 11, 0, 0 },
+ { 0, 7, 109, 7, 12, 0, 0 },
+ { 0, 6, 108, 7, 14, 0, 0 },
+ { 0, 5, 107, 7, 16, 0, 0 },
+ { 0, 4, 105, 7, 19, 0, 0 },
+ { 0, 3, 103, 7, 22, 0, 0 },
+ { 0, 3, 100, 7, 25, 0, 0 },
+ { 0, 2, 98, 7, 28, 0, 0 },
+ { 0, 2, 94, 7, 32, 0, 0 },
+ { 0, 2, 90, 7, 36, 0, 0 },
+ { 0, 1, 87, 7, 40, 0, 0 },
+ { 0, 1, 83, 7, 44, 0, 0 },
+ { 0, 1, 78, 7, 49, 0, 0 },
+ { 0, 1, 73, 7, 54, 0, 0 },
+ { 0, 1, 68, 7, 59, 0, 0 },
+ { 0, 0, 64, 7, 64, 0, 0 },
+ { 0, 0, 59, 7, 68, 1, 0 },
+ { 0, 0, 54, 7, 73, 1, 0 },
+ { 0, 0, 49, 7, 78, 1, 0 },
+ { 0, 0, 44, 7, 83, 1, 0 },
+ { 0, 0, 40, 7, 87, 1, 0 },
+ { 0, 0, 36, 7, 90, 2, 0 },
+ { 0, 0, 32, 7, 94, 2, 0 },
+ { 0, 0, 28, 7, 98, 2, 0 },
+ { 0, 0, 25, 7, 100, 3, 0 },
+ { 0, 0, 22, 7, 103, 3, 0 },
+ { 0, 0, 19, 7, 105, 4, 0 },
+ { 0, 0, 16, 7, 107, 5, 0 },
+ { 0, 0, 14, 7, 108, 6, 0 },
+ { 0, 0, 12, 7, 109, 7, 0 },
+ { 0, 0, 11, 7, 109, 8, 0 } },
+ .odd = { { 0, 0, 61, 7, 67, 0, 0 },
+ { 0, 0, 56, 7, 71, 1, 0 },
+ { 0, 0, 51, 7, 76, 1, 0 },
+ { 0, 0, 46, 7, 81, 1, 0 },
+ { 0, 0, 42, 7, 85, 1, 0 },
+ { 0, 0, 38, 7, 89, 1, 0 },
+ { 0, 0, 34, 7, 92, 2, 0 },
+ { 0, 0, 30, 7, 96, 2, 0 },
+ { 0, 0, 26, 7, 99, 3, 0 },
+ { 0, 0, 23, 7, 102, 3, 0 },
+ { 0, 0, 20, 7, 104, 4, 0 },
+ { 0, 0, 18, 7, 106, 4, 0 },
+ { 0, 0, 15, 7, 108, 5, 0 },
+ { 0, 0, 13, 7, 109, 6, 0 },
+ { 0, 0, 11, 7, 110, 7, 0 },
+ { 0, 0, 10, 7, 110, 8, 0 },
+ { 0, 8, 110, 7, 10, 0, 0 },
+ { 0, 7, 110, 7, 11, 0, 0 },
+ { 0, 6, 109, 7, 13, 0, 0 },
+ { 0, 5, 108, 7, 15, 0, 0 },
+ { 0, 4, 106, 7, 18, 0, 0 },
+ { 0, 4, 104, 7, 20, 0, 0 },
+ { 0, 3, 102, 7, 23, 0, 0 },
+ { 0, 3, 99, 7, 26, 0, 0 },
+ { 0, 2, 96, 7, 30, 0, 0 },
+ { 0, 2, 92, 7, 34, 0, 0 },
+ { 0, 1, 89, 7, 38, 0, 0 },
+ { 0, 1, 85, 7, 42, 0, 0 },
+ { 0, 1, 81, 7, 46, 0, 0 },
+ { 0, 1, 76, 7, 51, 0, 0 },
+ { 0, 1, 71, 7, 56, 0, 0 },
+ { 0, 0, 67, 7, 61, 0, 0 } } },
+ .ptrn_arr = { { 0x33333333, 0x33333333, 0x99999999, 0x99999999 } },
+ .sample_patrn_length = 130,
+ .hor_ds_en = 1,
+ .ver_ds_en = 1
+}, {
+ /* Scale factor 32 / (32 + 34) = 0.484848 */
+ .hor_phase_arr = {
+ .even = { { 0, 10, 108, 7, 10, 0, 0 },
+ { 0, 7, 108, 7, 13, 0, 0 },
+ { 0, 5, 106, 7, 17, 0, 0 },
+ { 0, 4, 102, 7, 22, 0, 0 },
+ { 0, 3, 96, 7, 29, 0, 0 },
+ { 0, 2, 90, 7, 36, 0, 0 },
+ { 0, 1, 82, 7, 45, 0, 0 },
+ { 0, 1, 73, 7, 54, 0, 0 },
+ { 0, 0, 64, 7, 64, 0, 0 },
+ { 0, 0, 54, 7, 73, 1, 0 },
+ { 0, 0, 45, 7, 82, 1, 0 },
+ { 0, 0, 36, 7, 90, 2, 0 },
+ { 0, 0, 29, 7, 96, 3, 0 },
+ { 0, 0, 22, 7, 102, 4, 0 },
+ { 0, 0, 17, 7, 106, 5, 0 },
+ { 0, 0, 13, 7, 108, 7, 0 } },
+ .odd = { { 0, 0, 59, 7, 68, 1, 0 },
+ { 0, 0, 49, 7, 78, 1, 0 },
+ { 0, 0, 40, 7, 87, 1, 0 },
+ { 0, 0, 32, 7, 94, 2, 0 },
+ { 0, 0, 25, 7, 100, 3, 0 },
+ { 0, 0, 20, 7, 104, 4, 0 },
+ { 0, 0, 15, 7, 107, 6, 0 },
+ { 0, 0, 11, 7, 109, 8, 0 },
+ { 0, 8, 109, 7, 11, 0, 0 },
+ { 0, 6, 107, 7, 15, 0, 0 },
+ { 0, 4, 104, 7, 20, 0, 0 },
+ { 0, 3, 100, 7, 25, 0, 0 },
+ { 0, 2, 94, 7, 32, 0, 0 },
+ { 0, 1, 87, 7, 40, 0, 0 },
+ { 0, 1, 78, 7, 49, 0, 0 },
+ { 0, 1, 68, 7, 59, 0, 0 } } },
+ .ver_phase_arr = {
+ .even = { { 0, 10, 108, 7, 10, 0, 0 },
+ { 0, 7, 108, 7, 13, 0, 0 },
+ { 0, 5, 106, 7, 17, 0, 0 },
+ { 0, 4, 102, 7, 22, 0, 0 },
+ { 0, 3, 96, 7, 29, 0, 0 },
+ { 0, 2, 90, 7, 36, 0, 0 },
+ { 0, 1, 82, 7, 45, 0, 0 },
+ { 0, 1, 73, 7, 54, 0, 0 },
+ { 0, 0, 64, 7, 64, 0, 0 },
+ { 0, 0, 54, 7, 73, 1, 0 },
+ { 0, 0, 45, 7, 82, 1, 0 },
+ { 0, 0, 36, 7, 90, 2, 0 },
+ { 0, 0, 29, 7, 96, 3, 0 },
+ { 0, 0, 22, 7, 102, 4, 0 },
+ { 0, 0, 17, 7, 106, 5, 0 },
+ { 0, 0, 13, 7, 108, 7, 0 } },
+ .odd = { { 0, 0, 59, 7, 68, 1, 0 },
+ { 0, 0, 49, 7, 78, 1, 0 },
+ { 0, 0, 40, 7, 87, 1, 0 },
+ { 0, 0, 32, 7, 94, 2, 0 },
+ { 0, 0, 25, 7, 100, 3, 0 },
+ { 0, 0, 20, 7, 104, 4, 0 },
+ { 0, 0, 15, 7, 107, 6, 0 },
+ { 0, 0, 11, 7, 109, 8, 0 },
+ { 0, 8, 109, 7, 11, 0, 0 },
+ { 0, 6, 107, 7, 15, 0, 0 },
+ { 0, 4, 104, 7, 20, 0, 0 },
+ { 0, 3, 100, 7, 25, 0, 0 },
+ { 0, 2, 94, 7, 32, 0, 0 },
+ { 0, 1, 87, 7, 40, 0, 0 },
+ { 0, 1, 78, 7, 49, 0, 0 },
+ { 0, 1, 68, 7, 59, 0, 0 } } },
+ .ptrn_arr = { { 0x33333333, 0x99999999 } },
+ .sample_patrn_length = 66,
+ .hor_ds_en = 1,
+ .ver_ds_en = 1
+}, {
+ /* Scale factor 32 / (32 + 35) = 0.477612 */
+ .hor_phase_arr = {
+ .even = { { 0, 10, 108, 7, 10, 0, 0 },
+ { 0, 6, 106, 7, 16, 0, 0 },
+ { 0, 4, 101, 7, 23, 0, 0 },
+ { 0, 2, 93, 7, 33, 0, 0 },
+ { 0, 1, 82, 7, 45, 0, 0 },
+ { 0, 1, 68, 7, 59, 0, 0 },
+ { 0, 0, 54, 7, 73, 1, 0 },
+ { 0, 0, 41, 7, 85, 2, 0 },
+ { 0, 0, 29, 7, 96, 3, 0 },
+ { 0, 0, 20, 7, 103, 5, 0 },
+ { 0, 0, 14, 7, 106, 8, 0 },
+ { 0, 9, 107, 7, 12, 0, 0 },
+ { 0, 5, 105, 7, 18, 0, 0 },
+ { 0, 3, 99, 7, 26, 0, 0 },
+ { 0, 2, 89, 7, 37, 0, 0 },
+ { 0, 1, 77, 7, 50, 0, 0 },
+ { 0, 1, 63, 7, 63, 1, 0 },
+ { 0, 0, 50, 7, 77, 1, 0 },
+ { 0, 0, 37, 7, 89, 2, 0 },
+ { 0, 0, 26, 7, 99, 3, 0 },
+ { 0, 0, 18, 7, 105, 5, 0 },
+ { 0, 0, 12, 7, 107, 9, 0 },
+ { 0, 8, 106, 7, 14, 0, 0 },
+ { 0, 5, 103, 7, 20, 0, 0 },
+ { 0, 3, 96, 7, 29, 0, 0 },
+ { 0, 2, 85, 7, 41, 0, 0 },
+ { 0, 1, 73, 7, 54, 0, 0 },
+ { 0, 0, 59, 7, 68, 1, 0 },
+ { 0, 0, 45, 7, 82, 1, 0 },
+ { 0, 0, 33, 7, 93, 2, 0 },
+ { 0, 0, 23, 7, 101, 4, 0 },
+ { 0, 0, 16, 7, 106, 6, 0 } },
+ .odd = { { 0, 0, 56, 7, 71, 1, 0 },
+ { 0, 0, 43, 7, 84, 1, 0 },
+ { 0, 0, 31, 7, 94, 3, 0 },
+ { 0, 0, 22, 7, 102, 4, 0 },
+ { 0, 0, 15, 7, 106, 7, 0 },
+ { 0, 9, 108, 7, 11, 0, 0 },
+ { 0, 6, 105, 7, 17, 0, 0 },
+ { 0, 4, 99, 7, 25, 0, 0 },
+ { 0, 2, 91, 7, 35, 0, 0 },
+ { 0, 1, 80, 7, 47, 0, 0 },
+ { 0, 1, 65, 7, 61, 1, 0 },
+ { 0, 0, 52, 7, 75, 1, 0 },
+ { 0, 0, 39, 7, 87, 2, 0 },
+ { 0, 0, 28, 7, 97, 3, 0 },
+ { 0, 0, 19, 7, 104, 5, 0 },
+ { 0, 0, 13, 7, 107, 8, 0 },
+ { 0, 8, 107, 7, 13, 0, 0 },
+ { 0, 5, 104, 7, 19, 0, 0 },
+ { 0, 3, 97, 7, 28, 0, 0 },
+ { 0, 2, 87, 7, 39, 0, 0 },
+ { 0, 1, 75, 7, 52, 0, 0 },
+ { 0, 1, 61, 7, 65, 1, 0 },
+ { 0, 0, 47, 7, 80, 1, 0 },
+ { 0, 0, 35, 7, 91, 2, 0 },
+ { 0, 0, 25, 7, 99, 4, 0 },
+ { 0, 0, 17, 7, 105, 6, 0 },
+ { 0, 0, 11, 7, 108, 9, 0 },
+ { 0, 7, 106, 7, 15, 0, 0 },
+ { 0, 4, 102, 7, 22, 0, 0 },
+ { 0, 3, 94, 7, 31, 0, 0 },
+ { 0, 1, 84, 7, 43, 0, 0 },
+ { 0, 1, 71, 7, 56, 0, 0 } } },
+ .ver_phase_arr = {
+ .even = { { 0, 10, 108, 7, 10, 0, 0 },
+ { 0, 6, 106, 7, 16, 0, 0 },
+ { 0, 4, 101, 7, 23, 0, 0 },
+ { 0, 2, 93, 7, 33, 0, 0 },
+ { 0, 1, 82, 7, 45, 0, 0 },
+ { 0, 1, 68, 7, 59, 0, 0 },
+ { 0, 0, 54, 7, 73, 1, 0 },
+ { 0, 0, 41, 7, 85, 2, 0 },
+ { 0, 0, 29, 7, 96, 3, 0 },
+ { 0, 0, 20, 7, 103, 5, 0 },
+ { 0, 0, 14, 7, 106, 8, 0 },
+ { 0, 9, 107, 7, 12, 0, 0 },
+ { 0, 5, 105, 7, 18, 0, 0 },
+ { 0, 3, 99, 7, 26, 0, 0 },
+ { 0, 2, 89, 7, 37, 0, 0 },
+ { 0, 1, 77, 7, 50, 0, 0 },
+ { 0, 1, 63, 7, 63, 1, 0 },
+ { 0, 0, 50, 7, 77, 1, 0 },
+ { 0, 0, 37, 7, 89, 2, 0 },
+ { 0, 0, 26, 7, 99, 3, 0 },
+ { 0, 0, 18, 7, 105, 5, 0 },
+ { 0, 0, 12, 7, 107, 9, 0 },
+ { 0, 8, 106, 7, 14, 0, 0 },
+ { 0, 5, 103, 7, 20, 0, 0 },
+ { 0, 3, 96, 7, 29, 0, 0 },
+ { 0, 2, 85, 7, 41, 0, 0 },
+ { 0, 1, 73, 7, 54, 0, 0 },
+ { 0, 0, 59, 7, 68, 1, 0 },
+ { 0, 0, 45, 7, 82, 1, 0 },
+ { 0, 0, 33, 7, 93, 2, 0 },
+ { 0, 0, 23, 7, 101, 4, 0 },
+ { 0, 0, 16, 7, 106, 6, 0 } },
+ .odd = { { 0, 0, 56, 7, 71, 1, 0 },
+ { 0, 0, 43, 7, 84, 1, 0 },
+ { 0, 0, 31, 7, 94, 3, 0 },
+ { 0, 0, 22, 7, 102, 4, 0 },
+ { 0, 0, 15, 7, 106, 7, 0 },
+ { 0, 9, 108, 7, 11, 0, 0 },
+ { 0, 6, 105, 7, 17, 0, 0 },
+ { 0, 4, 99, 7, 25, 0, 0 },
+ { 0, 2, 91, 7, 35, 0, 0 },
+ { 0, 1, 80, 7, 47, 0, 0 },
+ { 0, 1, 65, 7, 61, 1, 0 },
+ { 0, 0, 52, 7, 75, 1, 0 },
+ { 0, 0, 39, 7, 87, 2, 0 },
+ { 0, 0, 28, 7, 97, 3, 0 },
+ { 0, 0, 19, 7, 104, 5, 0 },
+ { 0, 0, 13, 7, 107, 8, 0 },
+ { 0, 8, 107, 7, 13, 0, 0 },
+ { 0, 5, 104, 7, 19, 0, 0 },
+ { 0, 3, 97, 7, 28, 0, 0 },
+ { 0, 2, 87, 7, 39, 0, 0 },
+ { 0, 1, 75, 7, 52, 0, 0 },
+ { 0, 1, 61, 7, 65, 1, 0 },
+ { 0, 0, 47, 7, 80, 1, 0 },
+ { 0, 0, 35, 7, 91, 2, 0 },
+ { 0, 0, 25, 7, 99, 4, 0 },
+ { 0, 0, 17, 7, 105, 6, 0 },
+ { 0, 0, 11, 7, 108, 9, 0 },
+ { 0, 7, 106, 7, 15, 0, 0 },
+ { 0, 4, 102, 7, 22, 0, 0 },
+ { 0, 3, 94, 7, 31, 0, 0 },
+ { 0, 1, 84, 7, 43, 0, 0 },
+ { 0, 1, 71, 7, 56, 0, 0 } } },
+ .ptrn_arr = { { 0x99933333, 0xccccc999, 0x32666664, 0x99993333,
+ 0x9 } },
+ .sample_patrn_length = 134,
+ .hor_ds_en = 1,
+ .ver_ds_en = 1
+}, {
+ /* Scale factor 32 / (32 + 36) = 0.470588 */
+ .hor_phase_arr = {
+ .even = { { 0, 11, 106, 7, 11, 0, 0 },
+ { 0, 6, 103, 7, 19, 0, 0 },
+ { 0, 3, 95, 7, 30, 0, 0 },
+ { 0, 1, 81, 7, 46, 0, 0 },
+ { 0, 1, 63, 7, 63, 1, 0 },
+ { 0, 0, 46, 7, 81, 1, 0 },
+ { 0, 0, 30, 7, 95, 3, 0 },
+ { 0, 0, 19, 7, 103, 6, 0 } },
+ .odd = { { 0, 0, 54, 7, 73, 1, 0 },
+ { 0, 0, 37, 7, 89, 2, 0 },
+ { 0, 0, 24, 7, 100, 4, 0 },
+ { 0, 0, 14, 7, 106, 8, 0 },
+ { 0, 8, 106, 7, 14, 0, 0 },
+ { 0, 4, 100, 7, 24, 0, 0 },
+ { 0, 2, 89, 7, 37, 0, 0 },
+ { 0, 1, 73, 7, 54, 0, 0 } } },
+ .ver_phase_arr = {
+ .even = { { 0, 11, 106, 7, 11, 0, 0 },
+ { 0, 6, 103, 7, 19, 0, 0 },
+ { 0, 3, 95, 7, 30, 0, 0 },
+ { 0, 1, 81, 7, 46, 0, 0 },
+ { 0, 1, 63, 7, 63, 1, 0 },
+ { 0, 0, 46, 7, 81, 1, 0 },
+ { 0, 0, 30, 7, 95, 3, 0 },
+ { 0, 0, 19, 7, 103, 6, 0 } },
+ .odd = { { 0, 0, 54, 7, 73, 1, 0 },
+ { 0, 0, 37, 7, 89, 2, 0 },
+ { 0, 0, 24, 7, 100, 4, 0 },
+ { 0, 0, 14, 7, 106, 8, 0 },
+ { 0, 8, 106, 7, 14, 0, 0 },
+ { 0, 4, 100, 7, 24, 0, 0 },
+ { 0, 2, 89, 7, 37, 0, 0 },
+ { 0, 1, 73, 7, 54, 0, 0 } } },
+ .ptrn_arr = { { 0x99993333 } },
+ .sample_patrn_length = 34,
+ .hor_ds_en = 1,
+ .ver_ds_en = 1
+}, {
+ /* Scale factor 32 / (32 + 37) = 0.463768 */
+ .hor_phase_arr = {
+ .even = { { 0, 11, 106, 7, 11, 0, 0 },
+ { 0, 5, 101, 7, 22, 0, 0 },
+ { 0, 2, 88, 7, 38, 0, 0 },
+ { 0, 1, 67, 7, 59, 1, 0 },
+ { 0, 0, 46, 7, 80, 2, 0 },
+ { 0, 0, 28, 7, 96, 4, 0 },
+ { 0, 0, 15, 7, 104, 9, 0 },
+ { 0, 7, 104, 7, 17, 0, 0 },
+ { 0, 3, 94, 7, 31, 0, 0 },
+ { 0, 1, 77, 7, 50, 0, 0 },
+ { 0, 0, 54, 7, 73, 1, 0 },
+ { 0, 0, 34, 7, 91, 3, 0 },
+ { 0, 0, 19, 7, 103, 6, 0 },
+ { 0, 10, 105, 7, 13, 0, 0 },
+ { 0, 5, 98, 7, 25, 0, 0 },
+ { 0, 2, 84, 7, 42, 0, 0 },
+ { 0, 1, 63, 7, 63, 1, 0 },
+ { 0, 0, 42, 7, 84, 2, 0 },
+ { 0, 0, 25, 7, 98, 5, 0 },
+ { 0, 0, 13, 7, 105, 10, 0 },
+ { 0, 6, 103, 7, 19, 0, 0 },
+ { 0, 3, 91, 7, 34, 0, 0 },
+ { 0, 1, 73, 7, 54, 0, 0 },
+ { 0, 0, 50, 7, 77, 1, 0 },
+ { 0, 0, 31, 7, 94, 3, 0 },
+ { 0, 0, 17, 7, 104, 7, 0 },
+ { 0, 9, 104, 7, 15, 0, 0 },
+ { 0, 4, 96, 7, 28, 0, 0 },
+ { 0, 2, 80, 7, 46, 0, 0 },
+ { 0, 1, 59, 7, 67, 1, 0 },
+ { 0, 0, 38, 7, 88, 2, 0 },
+ { 0, 0, 22, 7, 101, 5, 0 } },
+ .odd = { { 0, 0, 52, 7, 75, 1, 0 },
+ { 0, 0, 33, 7, 92, 3, 0 },
+ { 0, 0, 18, 7, 103, 7, 0 },
+ { 0, 9, 105, 7, 14, 0, 0 },
+ { 0, 4, 98, 7, 26, 0, 0 },
+ { 0, 2, 82, 7, 44, 0, 0 },
+ { 0, 1, 61, 7, 65, 1, 0 },
+ { 0, 0, 40, 7, 86, 2, 0 },
+ { 0, 0, 23, 7, 100, 5, 0 },
+ { 0, 0, 12, 7, 105, 11, 0 },
+ { 0, 6, 101, 7, 21, 0, 0 },
+ { 0, 3, 89, 7, 36, 0, 0 },
+ { 0, 1, 69, 7, 57, 1, 0 },
+ { 0, 0, 48, 7, 79, 1, 0 },
+ { 0, 0, 29, 7, 95, 4, 0 },
+ { 0, 0, 16, 7, 104, 8, 0 },
+ { 0, 8, 104, 7, 16, 0, 0 },
+ { 0, 4, 95, 7, 29, 0, 0 },
+ { 0, 1, 79, 7, 48, 0, 0 },
+ { 0, 1, 57, 7, 69, 1, 0 },
+ { 0, 0, 36, 7, 89, 3, 0 },
+ { 0, 0, 21, 7, 101, 6, 0 },
+ { 0, 11, 105, 7, 12, 0, 0 },
+ { 0, 5, 100, 7, 23, 0, 0 },
+ { 0, 2, 86, 7, 40, 0, 0 },
+ { 0, 1, 65, 7, 61, 1, 0 },
+ { 0, 0, 44, 7, 82, 2, 0 },
+ { 0, 0, 26, 7, 98, 4, 0 },
+ { 0, 0, 14, 7, 105, 9, 0 },
+ { 0, 7, 103, 7, 18, 0, 0 },
+ { 0, 3, 92, 7, 33, 0, 0 },
+ { 0, 1, 75, 7, 52, 0, 0 } } },
+ .ver_phase_arr = {
+ .even = { { 0, 11, 106, 7, 11, 0, 0 },
+ { 0, 5, 101, 7, 22, 0, 0 },
+ { 0, 2, 88, 7, 38, 0, 0 },
+ { 0, 1, 67, 7, 59, 1, 0 },
+ { 0, 0, 46, 7, 80, 2, 0 },
+ { 0, 0, 28, 7, 96, 4, 0 },
+ { 0, 0, 15, 7, 104, 9, 0 },
+ { 0, 7, 104, 7, 17, 0, 0 },
+ { 0, 3, 94, 7, 31, 0, 0 },
+ { 0, 1, 77, 7, 50, 0, 0 },
+ { 0, 0, 54, 7, 73, 1, 0 },
+ { 0, 0, 34, 7, 91, 3, 0 },
+ { 0, 0, 19, 7, 103, 6, 0 },
+ { 0, 10, 105, 7, 13, 0, 0 },
+ { 0, 5, 98, 7, 25, 0, 0 },
+ { 0, 2, 84, 7, 42, 0, 0 },
+ { 0, 1, 63, 7, 63, 1, 0 },
+ { 0, 0, 42, 7, 84, 2, 0 },
+ { 0, 0, 25, 7, 98, 5, 0 },
+ { 0, 0, 13, 7, 105, 10, 0 },
+ { 0, 6, 103, 7, 19, 0, 0 },
+ { 0, 3, 91, 7, 34, 0, 0 },
+ { 0, 1, 73, 7, 54, 0, 0 },
+ { 0, 0, 50, 7, 77, 1, 0 },
+ { 0, 0, 31, 7, 94, 3, 0 },
+ { 0, 0, 17, 7, 104, 7, 0 },
+ { 0, 9, 104, 7, 15, 0, 0 },
+ { 0, 4, 96, 7, 28, 0, 0 },
+ { 0, 2, 80, 7, 46, 0, 0 },
+ { 0, 1, 59, 7, 67, 1, 0 },
+ { 0, 0, 38, 7, 88, 2, 0 },
+ { 0, 0, 22, 7, 101, 5, 0 } },
+ .odd = { { 0, 0, 52, 7, 75, 1, 0 },
+ { 0, 0, 33, 7, 92, 3, 0 },
+ { 0, 0, 18, 7, 103, 7, 0 },
+ { 0, 9, 105, 7, 14, 0, 0 },
+ { 0, 4, 98, 7, 26, 0, 0 },
+ { 0, 2, 82, 7, 44, 0, 0 },
+ { 0, 1, 61, 7, 65, 1, 0 },
+ { 0, 0, 40, 7, 86, 2, 0 },
+ { 0, 0, 23, 7, 100, 5, 0 },
+ { 0, 0, 12, 7, 105, 11, 0 },
+ { 0, 6, 101, 7, 21, 0, 0 },
+ { 0, 3, 89, 7, 36, 0, 0 },
+ { 0, 1, 69, 7, 57, 1, 0 },
+ { 0, 0, 48, 7, 79, 1, 0 },
+ { 0, 0, 29, 7, 95, 4, 0 },
+ { 0, 0, 16, 7, 104, 8, 0 },
+ { 0, 8, 104, 7, 16, 0, 0 },
+ { 0, 4, 95, 7, 29, 0, 0 },
+ { 0, 1, 79, 7, 48, 0, 0 },
+ { 0, 1, 57, 7, 69, 1, 0 },
+ { 0, 0, 36, 7, 89, 3, 0 },
+ { 0, 0, 21, 7, 101, 6, 0 },
+ { 0, 11, 105, 7, 12, 0, 0 },
+ { 0, 5, 100, 7, 23, 0, 0 },
+ { 0, 2, 86, 7, 40, 0, 0 },
+ { 0, 1, 65, 7, 61, 1, 0 },
+ { 0, 0, 44, 7, 82, 2, 0 },
+ { 0, 0, 26, 7, 98, 4, 0 },
+ { 0, 0, 14, 7, 105, 9, 0 },
+ { 0, 7, 103, 7, 18, 0, 0 },
+ { 0, 3, 92, 7, 33, 0, 0 },
+ { 0, 1, 75, 7, 52, 0, 0 } } },
+ .ptrn_arr = { { 0xc9999333, 0x332664cc, 0x4cc99993, 0x93332666,
+ 0x99 } },
+ .sample_patrn_length = 138,
+ .hor_ds_en = 1,
+ .ver_ds_en = 1
+}, {
+ /* Scale factor 32 / (32 + 38) = 0.457143 */
+ .hor_phase_arr = {
+ .even = { { 0, 12, 104, 7, 12, 0, 0 },
+ { 0, 5, 98, 7, 25, 0, 0 },
+ { 0, 2, 80, 7, 46, 0, 0 },
+ { 0, 1, 55, 7, 71, 1, 0 },
+ { 0, 0, 32, 7, 92, 4, 0 },
+ { 0, 0, 16, 7, 103, 9, 0 },
+ { 0, 7, 101, 7, 20, 0, 0 },
+ { 0, 3, 86, 7, 39, 0, 0 },
+ { 0, 1, 63, 7, 63, 1, 0 },
+ { 0, 0, 39, 7, 86, 3, 0 },
+ { 0, 0, 20, 7, 101, 7, 0 },
+ { 0, 9, 103, 7, 16, 0, 0 },
+ { 0, 4, 92, 7, 32, 0, 0 },
+ { 0, 1, 71, 7, 55, 1, 0 },
+ { 0, 0, 46, 7, 80, 2, 0 },
+ { 0, 0, 25, 7, 98, 5, 0 } },
+ .odd = { { 0, 0, 50, 7, 76, 2, 0 },
+ { 0, 0, 28, 7, 96, 4, 0 },
+ { 0, 0, 14, 7, 104, 10, 0 },
+ { 0, 6, 99, 7, 23, 0, 0 },
+ { 0, 2, 84, 7, 42, 0, 0 },
+ { 0, 1, 59, 7, 67, 1, 0 },
+ { 0, 0, 35, 7, 90, 3, 0 },
+ { 0, 0, 18, 7, 102, 8, 0 },
+ { 0, 8, 102, 7, 18, 0, 0 },
+ { 0, 3, 90, 7, 35, 0, 0 },
+ { 0, 1, 67, 7, 59, 1, 0 },
+ { 0, 0, 42, 7, 84, 2, 0 },
+ { 0, 0, 23, 7, 99, 6, 0 },
+ { 0, 10, 104, 7, 14, 0, 0 },
+ { 0, 4, 96, 7, 28, 0, 0 },
+ { 0, 2, 76, 7, 50, 0, 0 } } },
+ .ver_phase_arr = {
+ .even = { { 0, 12, 104, 7, 12, 0, 0 },
+ { 0, 5, 98, 7, 25, 0, 0 },
+ { 0, 2, 80, 7, 46, 0, 0 },
+ { 0, 1, 55, 7, 71, 1, 0 },
+ { 0, 0, 32, 7, 92, 4, 0 },
+ { 0, 0, 16, 7, 103, 9, 0 },
+ { 0, 7, 101, 7, 20, 0, 0 },
+ { 0, 3, 86, 7, 39, 0, 0 },
+ { 0, 1, 63, 7, 63, 1, 0 },
+ { 0, 0, 39, 7, 86, 3, 0 },
+ { 0, 0, 20, 7, 101, 7, 0 },
+ { 0, 9, 103, 7, 16, 0, 0 },
+ { 0, 4, 92, 7, 32, 0, 0 },
+ { 0, 1, 71, 7, 55, 1, 0 },
+ { 0, 0, 46, 7, 80, 2, 0 },
+ { 0, 0, 25, 7, 98, 5, 0 } },
+ .odd = { { 0, 0, 50, 7, 76, 2, 0 },
+ { 0, 0, 28, 7, 96, 4, 0 },
+ { 0, 0, 14, 7, 104, 10, 0 },
+ { 0, 6, 99, 7, 23, 0, 0 },
+ { 0, 2, 84, 7, 42, 0, 0 },
+ { 0, 1, 59, 7, 67, 1, 0 },
+ { 0, 0, 35, 7, 90, 3, 0 },
+ { 0, 0, 18, 7, 102, 8, 0 },
+ { 0, 8, 102, 7, 18, 0, 0 },
+ { 0, 3, 90, 7, 35, 0, 0 },
+ { 0, 1, 67, 7, 59, 1, 0 },
+ { 0, 0, 42, 7, 84, 2, 0 },
+ { 0, 0, 23, 7, 99, 6, 0 },
+ { 0, 10, 104, 7, 14, 0, 0 },
+ { 0, 4, 96, 7, 28, 0, 0 },
+ { 0, 2, 76, 7, 50, 0, 0 } } },
+ .ptrn_arr = { { 0xcc999333, 0x99332664, 0x9 } },
+ .sample_patrn_length = 70,
+ .hor_ds_en = 1,
+ .ver_ds_en = 1
+}, {
+ /* Scale factor 32 / (32 + 39) = 0.450704 */
+ .hor_phase_arr = {
+ .even = { { 0, 13, 102, 7, 13, 0, 0 },
+ { 0, 5, 94, 7, 29, 0, 0 },
+ { 0, 1, 71, 7, 55, 1, 0 },
+ { 0, 0, 43, 7, 83, 2, 0 },
+ { 0, 0, 21, 7, 100, 7, 0 },
+ { 0, 8, 102, 7, 18, 0, 0 },
+ { 0, 3, 86, 7, 39, 0, 0 },
+ { 0, 1, 59, 7, 67, 1, 0 },
+ { 0, 0, 32, 7, 92, 4, 0 },
+ { 0, 0, 14, 7, 103, 11, 0 },
+ { 0, 5, 97, 7, 26, 0, 0 },
+ { 0, 2, 74, 7, 51, 1, 0 },
+ { 0, 0, 47, 7, 79, 2, 0 },
+ { 0, 0, 23, 7, 99, 6, 0 },
+ { 0, 10, 102, 7, 16, 0, 0 },
+ { 0, 3, 89, 7, 36, 0, 0 },
+ { 0, 1, 63, 7, 63, 1, 0 },
+ { 0, 0, 36, 7, 89, 3, 0 },
+ { 0, 0, 16, 7, 102, 10, 0 },
+ { 0, 6, 99, 7, 23, 0, 0 },
+ { 0, 2, 79, 7, 47, 0, 0 },
+ { 0, 1, 51, 7, 74, 2, 0 },
+ { 0, 0, 26, 7, 97, 5, 0 },
+ { 0, 11, 103, 7, 14, 0, 0 },
+ { 0, 4, 92, 7, 32, 0, 0 },
+ { 0, 1, 67, 7, 59, 1, 0 },
+ { 0, 0, 39, 7, 86, 3, 0 },
+ { 0, 0, 18, 7, 102, 8, 0 },
+ { 0, 7, 100, 7, 21, 0, 0 },
+ { 0, 2, 83, 7, 43, 0, 0 },
+ { 0, 1, 55, 7, 71, 1, 0 },
+ { 0, 0, 29, 7, 94, 5, 0 } },
+ .odd = { { 0, 0, 49, 7, 77, 2, 0 },
+ { 0, 0, 25, 7, 97, 6, 0 },
+ { 0, 10, 103, 7, 15, 0, 0 },
+ { 0, 4, 90, 7, 34, 0, 0 },
+ { 0, 1, 65, 7, 61, 1, 0 },
+ { 0, 0, 37, 7, 88, 3, 0 },
+ { 0, 0, 17, 7, 102, 9, 0 },
+ { 0, 7, 99, 7, 22, 0, 0 },
+ { 0, 2, 81, 7, 45, 0, 0 },
+ { 0, 1, 53, 7, 72, 2, 0 },
+ { 0, 0, 27, 7, 96, 5, 0 },
+ { 0, 12, 103, 7, 13, 0, 0 },
+ { 0, 4, 93, 7, 31, 0, 0 },
+ { 0, 1, 69, 7, 57, 1, 0 },
+ { 0, 0, 41, 7, 84, 3, 0 },
+ { 0, 0, 20, 7, 100, 8, 0 },
+ { 0, 8, 100, 7, 20, 0, 0 },
+ { 0, 3, 84, 7, 41, 0, 0 },
+ { 0, 1, 57, 7, 69, 1, 0 },
+ { 0, 0, 31, 7, 93, 4, 0 },
+ { 0, 0, 13, 7, 103, 12, 0 },
+ { 0, 5, 96, 7, 27, 0, 0 },
+ { 0, 2, 72, 7, 53, 1, 0 },
+ { 0, 0, 45, 7, 81, 2, 0 },
+ { 0, 0, 22, 7, 99, 7, 0 },
+ { 0, 9, 102, 7, 17, 0, 0 },
+ { 0, 3, 88, 7, 37, 0, 0 },
+ { 0, 1, 61, 7, 65, 1, 0 },
+ { 0, 0, 34, 7, 90, 4, 0 },
+ { 0, 0, 15, 7, 103, 10, 0 },
+ { 0, 6, 97, 7, 25, 0, 0 },
+ { 0, 2, 77, 7, 49, 0, 0 } } },
+ .ver_phase_arr = {
+ .even = { { 0, 13, 102, 7, 13, 0, 0 },
+ { 0, 5, 94, 7, 29, 0, 0 },
+ { 0, 1, 71, 7, 55, 1, 0 },
+ { 0, 0, 43, 7, 83, 2, 0 },
+ { 0, 0, 21, 7, 100, 7, 0 },
+ { 0, 8, 102, 7, 18, 0, 0 },
+ { 0, 3, 86, 7, 39, 0, 0 },
+ { 0, 1, 59, 7, 67, 1, 0 },
+ { 0, 0, 32, 7, 92, 4, 0 },
+ { 0, 0, 14, 7, 103, 11, 0 },
+ { 0, 5, 97, 7, 26, 0, 0 },
+ { 0, 2, 74, 7, 51, 1, 0 },
+ { 0, 0, 47, 7, 79, 2, 0 },
+ { 0, 0, 23, 7, 99, 6, 0 },
+ { 0, 10, 102, 7, 16, 0, 0 },
+ { 0, 3, 89, 7, 36, 0, 0 },
+ { 0, 1, 63, 7, 63, 1, 0 },
+ { 0, 0, 36, 7, 89, 3, 0 },
+ { 0, 0, 16, 7, 102, 10, 0 },
+ { 0, 6, 99, 7, 23, 0, 0 },
+ { 0, 2, 79, 7, 47, 0, 0 },
+ { 0, 1, 51, 7, 74, 2, 0 },
+ { 0, 0, 26, 7, 97, 5, 0 },
+ { 0, 11, 103, 7, 14, 0, 0 },
+ { 0, 4, 92, 7, 32, 0, 0 },
+ { 0, 1, 67, 7, 59, 1, 0 },
+ { 0, 0, 39, 7, 86, 3, 0 },
+ { 0, 0, 18, 7, 102, 8, 0 },
+ { 0, 7, 100, 7, 21, 0, 0 },
+ { 0, 2, 83, 7, 43, 0, 0 },
+ { 0, 1, 55, 7, 71, 1, 0 },
+ { 0, 0, 29, 7, 94, 5, 0 } },
+ .odd = { { 0, 0, 49, 7, 77, 2, 0 },
+ { 0, 0, 25, 7, 97, 6, 0 },
+ { 0, 10, 103, 7, 15, 0, 0 },
+ { 0, 4, 90, 7, 34, 0, 0 },
+ { 0, 1, 65, 7, 61, 1, 0 },
+ { 0, 0, 37, 7, 88, 3, 0 },
+ { 0, 0, 17, 7, 102, 9, 0 },
+ { 0, 7, 99, 7, 22, 0, 0 },
+ { 0, 2, 81, 7, 45, 0, 0 },
+ { 0, 1, 53, 7, 72, 2, 0 },
+ { 0, 0, 27, 7, 96, 5, 0 },
+ { 0, 12, 103, 7, 13, 0, 0 },
+ { 0, 4, 93, 7, 31, 0, 0 },
+ { 0, 1, 69, 7, 57, 1, 0 },
+ { 0, 0, 41, 7, 84, 3, 0 },
+ { 0, 0, 20, 7, 100, 8, 0 },
+ { 0, 8, 100, 7, 20, 0, 0 },
+ { 0, 3, 84, 7, 41, 0, 0 },
+ { 0, 1, 57, 7, 69, 1, 0 },
+ { 0, 0, 31, 7, 93, 4, 0 },
+ { 0, 0, 13, 7, 103, 12, 0 },
+ { 0, 5, 96, 7, 27, 0, 0 },
+ { 0, 2, 72, 7, 53, 1, 0 },
+ { 0, 0, 45, 7, 81, 2, 0 },
+ { 0, 0, 22, 7, 99, 7, 0 },
+ { 0, 9, 102, 7, 17, 0, 0 },
+ { 0, 3, 88, 7, 37, 0, 0 },
+ { 0, 1, 61, 7, 65, 1, 0 },
+ { 0, 0, 34, 7, 90, 4, 0 },
+ { 0, 0, 15, 7, 103, 10, 0 },
+ { 0, 6, 97, 7, 25, 0, 0 },
+ { 0, 2, 77, 7, 49, 0, 0 } } },
+ .ptrn_arr = { { 0x4cc99933, 0xc9993266, 0x9332664c, 0x32664cc9,
+ 0x993 } },
+ .sample_patrn_length = 142,
+ .hor_ds_en = 1,
+ .ver_ds_en = 1
+}, {
+ /* Scale factor 32 / (32 + 40) = 0.444444 */
+ .hor_phase_arr = {
+ .even = { { 0, 13, 102, 7, 13, 0, 0 },
+ { 0, 4, 91, 7, 33, 0, 0 },
+ { 0, 1, 63, 7, 63, 1, 0 },
+ { 0, 0, 33, 7, 91, 4, 0 } },
+ .odd = { { 0, 0, 47, 7, 79, 2, 0 },
+ { 0, 0, 21, 7, 99, 8, 0 },
+ { 0, 8, 99, 7, 21, 0, 0 },
+ { 0, 2, 79, 7, 47, 0, 0 } } },
+ .ver_phase_arr = {
+ .even = { { 0, 13, 102, 7, 13, 0, 0 },
+ { 0, 4, 91, 7, 33, 0, 0 },
+ { 0, 1, 63, 7, 63, 1, 0 },
+ { 0, 0, 33, 7, 91, 4, 0 } },
+ .odd = { { 0, 0, 47, 7, 79, 2, 0 },
+ { 0, 0, 21, 7, 99, 8, 0 },
+ { 0, 8, 99, 7, 21, 0, 0 },
+ { 0, 2, 79, 7, 47, 0, 0 } } },
+ .ptrn_arr = { { 0x9933 } },
+ .sample_patrn_length = 18,
+ .hor_ds_en = 1,
+ .ver_ds_en = 1
+}, {
+ /* Scale factor 32 / (32 + 41) = 0.438356 */
+ .hor_phase_arr = {
+ .even = { { 0, 14, 100, 7, 14, 0, 0 },
+ { 0, 4, 87, 7, 37, 0, 0 },
+ { 0, 1, 55, 7, 70, 2, 0 },
+ { 0, 0, 25, 7, 96, 7, 0 },
+ { 0, 8, 98, 7, 22, 0, 0 },
+ { 0, 2, 74, 7, 51, 1, 0 },
+ { 0, 0, 40, 7, 85, 3, 0 },
+ { 0, 0, 16, 7, 100, 12, 0 },
+ { 0, 5, 90, 7, 33, 0, 0 },
+ { 0, 1, 59, 7, 67, 1, 0 },
+ { 0, 0, 27, 7, 95, 6, 0 },
+ { 0, 9, 99, 7, 20, 0, 0 },
+ { 0, 2, 78, 7, 47, 1, 0 },
+ { 0, 0, 44, 7, 81, 3, 0 },
+ { 0, 0, 18, 7, 99, 11, 0 },
+ { 0, 5, 93, 7, 30, 0, 0 },
+ { 0, 1, 63, 7, 63, 1, 0 },
+ { 0, 0, 30, 7, 93, 5, 0 },
+ { 0, 11, 99, 7, 18, 0, 0 },
+ { 0, 3, 81, 7, 44, 0, 0 },
+ { 0, 1, 47, 7, 78, 2, 0 },
+ { 0, 0, 20, 7, 99, 9, 0 },
+ { 0, 6, 95, 7, 27, 0, 0 },
+ { 0, 1, 67, 7, 59, 1, 0 },
+ { 0, 0, 33, 7, 90, 5, 0 },
+ { 0, 12, 100, 7, 16, 0, 0 },
+ { 0, 3, 85, 7, 40, 0, 0 },
+ { 0, 1, 51, 7, 74, 2, 0 },
+ { 0, 0, 22, 7, 98, 8, 0 },
+ { 0, 7, 96, 7, 25, 0, 0 },
+ { 0, 2, 70, 7, 55, 1, 0 },
+ { 0, 0, 37, 7, 87, 4, 0 } },
+ .odd = { { 0, 0, 45, 7, 80, 3, 0 },
+ { 0, 0, 19, 7, 99, 10, 0 },
+ { 0, 6, 93, 7, 29, 0, 0 },
+ { 0, 1, 65, 7, 61, 1, 0 },
+ { 0, 0, 32, 7, 91, 5, 0 },
+ { 0, 11, 100, 7, 17, 0, 0 },
+ { 0, 3, 83, 7, 42, 0, 0 },
+ { 0, 1, 49, 7, 76, 2, 0 },
+ { 0, 0, 21, 7, 98, 9, 0 },
+ { 0, 7, 95, 7, 26, 0, 0 },
+ { 0, 2, 68, 7, 57, 1, 0 },
+ { 0, 0, 35, 7, 89, 4, 0 },
+ { 0, 13, 100, 7, 15, 0, 0 },
+ { 0, 4, 86, 7, 38, 0, 0 },
+ { 0, 1, 53, 7, 72, 2, 0 },
+ { 0, 0, 23, 7, 97, 8, 0 },
+ { 0, 8, 97, 7, 23, 0, 0 },
+ { 0, 2, 72, 7, 53, 1, 0 },
+ { 0, 0, 38, 7, 86, 4, 0 },
+ { 0, 0, 15, 7, 100, 13, 0 },
+ { 0, 4, 89, 7, 35, 0, 0 },
+ { 0, 1, 57, 7, 68, 2, 0 },
+ { 0, 0, 26, 7, 95, 7, 0 },
+ { 0, 9, 98, 7, 21, 0, 0 },
+ { 0, 2, 76, 7, 49, 1, 0 },
+ { 0, 0, 42, 7, 83, 3, 0 },
+ { 0, 0, 17, 7, 100, 11, 0 },
+ { 0, 5, 91, 7, 32, 0, 0 },
+ { 0, 1, 61, 7, 65, 1, 0 },
+ { 0, 0, 29, 7, 93, 6, 0 },
+ { 0, 10, 99, 7, 19, 0, 0 },
+ { 0, 3, 80, 7, 45, 0, 0 } } },
+ .ver_phase_arr = {
+ .even = { { 0, 14, 100, 7, 14, 0, 0 },
+ { 0, 4, 87, 7, 37, 0, 0 },
+ { 0, 1, 55, 7, 70, 2, 0 },
+ { 0, 0, 25, 7, 96, 7, 0 },
+ { 0, 8, 98, 7, 22, 0, 0 },
+ { 0, 2, 74, 7, 51, 1, 0 },
+ { 0, 0, 40, 7, 85, 3, 0 },
+ { 0, 0, 16, 7, 100, 12, 0 },
+ { 0, 5, 90, 7, 33, 0, 0 },
+ { 0, 1, 59, 7, 67, 1, 0 },
+ { 0, 0, 27, 7, 95, 6, 0 },
+ { 0, 9, 99, 7, 20, 0, 0 },
+ { 0, 2, 78, 7, 47, 1, 0 },
+ { 0, 0, 44, 7, 81, 3, 0 },
+ { 0, 0, 18, 7, 99, 11, 0 },
+ { 0, 5, 93, 7, 30, 0, 0 },
+ { 0, 1, 63, 7, 63, 1, 0 },
+ { 0, 0, 30, 7, 93, 5, 0 },
+ { 0, 11, 99, 7, 18, 0, 0 },
+ { 0, 3, 81, 7, 44, 0, 0 },
+ { 0, 1, 47, 7, 78, 2, 0 },
+ { 0, 0, 20, 7, 99, 9, 0 },
+ { 0, 6, 95, 7, 27, 0, 0 },
+ { 0, 1, 67, 7, 59, 1, 0 },
+ { 0, 0, 33, 7, 90, 5, 0 },
+ { 0, 12, 100, 7, 16, 0, 0 },
+ { 0, 3, 85, 7, 40, 0, 0 },
+ { 0, 1, 51, 7, 74, 2, 0 },
+ { 0, 0, 22, 7, 98, 8, 0 },
+ { 0, 7, 96, 7, 25, 0, 0 },
+ { 0, 2, 70, 7, 55, 1, 0 },
+ { 0, 0, 37, 7, 87, 4, 0 } },
+ .odd = { { 0, 0, 45, 7, 80, 3, 0 },
+ { 0, 0, 19, 7, 99, 10, 0 },
+ { 0, 6, 93, 7, 29, 0, 0 },
+ { 0, 1, 65, 7, 61, 1, 0 },
+ { 0, 0, 32, 7, 91, 5, 0 },
+ { 0, 11, 100, 7, 17, 0, 0 },
+ { 0, 3, 83, 7, 42, 0, 0 },
+ { 0, 1, 49, 7, 76, 2, 0 },
+ { 0, 0, 21, 7, 98, 9, 0 },
+ { 0, 7, 95, 7, 26, 0, 0 },
+ { 0, 2, 68, 7, 57, 1, 0 },
+ { 0, 0, 35, 7, 89, 4, 0 },
+ { 0, 13, 100, 7, 15, 0, 0 },
+ { 0, 4, 86, 7, 38, 0, 0 },
+ { 0, 1, 53, 7, 72, 2, 0 },
+ { 0, 0, 23, 7, 97, 8, 0 },
+ { 0, 8, 97, 7, 23, 0, 0 },
+ { 0, 2, 72, 7, 53, 1, 0 },
+ { 0, 0, 38, 7, 86, 4, 0 },
+ { 0, 0, 15, 7, 100, 13, 0 },
+ { 0, 4, 89, 7, 35, 0, 0 },
+ { 0, 1, 57, 7, 68, 2, 0 },
+ { 0, 0, 26, 7, 95, 7, 0 },
+ { 0, 9, 98, 7, 21, 0, 0 },
+ { 0, 2, 76, 7, 49, 1, 0 },
+ { 0, 0, 42, 7, 83, 3, 0 },
+ { 0, 0, 17, 7, 100, 11, 0 },
+ { 0, 5, 91, 7, 32, 0, 0 },
+ { 0, 1, 61, 7, 65, 1, 0 },
+ { 0, 0, 29, 7, 93, 6, 0 },
+ { 0, 10, 99, 7, 19, 0, 0 },
+ { 0, 3, 80, 7, 45, 0, 0 } } },
+ .ptrn_arr = { { 0x664c9933, 0x664c9932, 0x64cc9932, 0x64cc9932,
+ 0x9932 } },
+ .sample_patrn_length = 146,
+ .hor_ds_en = 1,
+ .ver_ds_en = 1
+}, {
+ /* Scale factor 32 / (32 + 42) = 0.432432 */
+ .hor_phase_arr = {
+ .even = { { 0, 14, 100, 7, 14, 0, 0 },
+ { 0, 4, 84, 7, 40, 0, 0 },
+ { 0, 1, 48, 7, 76, 3, 0 },
+ { 0, 0, 18, 7, 99, 11, 0 },
+ { 0, 5, 89, 7, 34, 0, 0 },
+ { 0, 1, 55, 7, 70, 2, 0 },
+ { 0, 0, 23, 7, 96, 9, 0 },
+ { 0, 7, 93, 7, 28, 0, 0 },
+ { 0, 1, 63, 7, 63, 1, 0 },
+ { 0, 0, 28, 7, 93, 7, 0 },
+ { 0, 9, 96, 7, 23, 0, 0 },
+ { 0, 2, 70, 7, 55, 1, 0 },
+ { 0, 0, 34, 7, 89, 5, 0 },
+ { 0, 11, 99, 7, 18, 0, 0 },
+ { 0, 3, 76, 7, 48, 1, 0 },
+ { 0, 0, 40, 7, 84, 4, 0 } },
+ .odd = { { 0, 1, 44, 7, 80, 3, 0 },
+ { 0, 0, 16, 7, 99, 13, 0 },
+ { 0, 4, 87, 7, 37, 0, 0 },
+ { 0, 1, 51, 7, 74, 2, 0 },
+ { 0, 0, 20, 7, 98, 10, 0 },
+ { 0, 6, 91, 7, 31, 0, 0 },
+ { 0, 1, 59, 7, 66, 2, 0 },
+ { 0, 0, 25, 7, 95, 8, 0 },
+ { 0, 8, 95, 7, 25, 0, 0 },
+ { 0, 2, 66, 7, 59, 1, 0 },
+ { 0, 0, 31, 7, 91, 6, 0 },
+ { 0, 10, 98, 7, 20, 0, 0 },
+ { 0, 2, 74, 7, 51, 1, 0 },
+ { 0, 0, 37, 7, 87, 4, 0 },
+ { 0, 13, 99, 7, 16, 0, 0 },
+ { 0, 3, 80, 7, 44, 1, 0 } } },
+ .ver_phase_arr = {
+ .even = { { 0, 14, 100, 7, 14, 0, 0 },
+ { 0, 4, 84, 7, 40, 0, 0 },
+ { 0, 1, 48, 7, 76, 3, 0 },
+ { 0, 0, 18, 7, 99, 11, 0 },
+ { 0, 5, 89, 7, 34, 0, 0 },
+ { 0, 1, 55, 7, 70, 2, 0 },
+ { 0, 0, 23, 7, 96, 9, 0 },
+ { 0, 7, 93, 7, 28, 0, 0 },
+ { 0, 1, 63, 7, 63, 1, 0 },
+ { 0, 0, 28, 7, 93, 7, 0 },
+ { 0, 9, 96, 7, 23, 0, 0 },
+ { 0, 2, 70, 7, 55, 1, 0 },
+ { 0, 0, 34, 7, 89, 5, 0 },
+ { 0, 11, 99, 7, 18, 0, 0 },
+ { 0, 3, 76, 7, 48, 1, 0 },
+ { 0, 0, 40, 7, 84, 4, 0 } },
+ .odd = { { 0, 1, 44, 7, 80, 3, 0 },
+ { 0, 0, 16, 7, 99, 13, 0 },
+ { 0, 4, 87, 7, 37, 0, 0 },
+ { 0, 1, 51, 7, 74, 2, 0 },
+ { 0, 0, 20, 7, 98, 10, 0 },
+ { 0, 6, 91, 7, 31, 0, 0 },
+ { 0, 1, 59, 7, 66, 2, 0 },
+ { 0, 0, 25, 7, 95, 8, 0 },
+ { 0, 8, 95, 7, 25, 0, 0 },
+ { 0, 2, 66, 7, 59, 1, 0 },
+ { 0, 0, 31, 7, 91, 6, 0 },
+ { 0, 10, 98, 7, 20, 0, 0 },
+ { 0, 2, 74, 7, 51, 1, 0 },
+ { 0, 0, 37, 7, 87, 4, 0 },
+ { 0, 13, 99, 7, 16, 0, 0 },
+ { 0, 3, 80, 7, 44, 1, 0 } } },
+ .ptrn_arr = { { 0x264c9933, 0x3264c993, 0x99 } },
+ .sample_patrn_length = 74,
+ .hor_ds_en = 1,
+ .ver_ds_en = 1
+}, {
+ /* Scale factor 32 / (32 + 43) = 0.426667 */
+ .hor_phase_arr = {
+ .even = { { 0, 15, 98, 7, 15, 0, 0 },
+ { 0, 3, 80, 7, 44, 1, 0 },
+ { 0, 0, 41, 7, 83, 4, 0 },
+ { 0, 13, 98, 7, 17, 0, 0 },
+ { 0, 3, 76, 7, 48, 1, 0 },
+ { 0, 0, 38, 7, 85, 5, 0 },
+ { 0, 12, 97, 7, 19, 0, 0 },
+ { 0, 2, 74, 7, 51, 1, 0 },
+ { 0, 0, 34, 7, 89, 5, 0 },
+ { 0, 10, 97, 7, 21, 0, 0 },
+ { 0, 2, 70, 7, 55, 1, 0 },
+ { 0, 0, 31, 7, 91, 6, 0 },
+ { 0, 9, 96, 7, 23, 0, 0 },
+ { 0, 2, 66, 7, 59, 1, 0 },
+ { 0, 0, 29, 7, 92, 7, 0 },
+ { 0, 8, 94, 7, 26, 0, 0 },
+ { 0, 1, 63, 7, 63, 1, 0 },
+ { 0, 0, 26, 7, 94, 8, 0 },
+ { 0, 7, 92, 7, 29, 0, 0 },
+ { 0, 1, 59, 7, 66, 2, 0 },
+ { 0, 0, 23, 7, 96, 9, 0 },
+ { 0, 6, 91, 7, 31, 0, 0 },
+ { 0, 1, 55, 7, 70, 2, 0 },
+ { 0, 0, 21, 7, 97, 10, 0 },
+ { 0, 5, 89, 7, 34, 0, 0 },
+ { 0, 1, 51, 7, 74, 2, 0 },
+ { 0, 0, 19, 7, 97, 12, 0 },
+ { 0, 5, 85, 7, 38, 0, 0 },
+ { 0, 1, 48, 7, 76, 3, 0 },
+ { 0, 0, 17, 7, 98, 13, 0 },
+ { 0, 4, 83, 7, 41, 0, 0 },
+ { 0, 1, 44, 7, 80, 3, 0 } },
+ .odd = { { 0, 1, 43, 7, 80, 4, 0 },
+ { 0, 14, 98, 7, 16, 0, 0 },
+ { 0, 3, 78, 7, 46, 1, 0 },
+ { 0, 0, 39, 7, 85, 4, 0 },
+ { 0, 12, 98, 7, 18, 0, 0 },
+ { 0, 3, 74, 7, 50, 1, 0 },
+ { 0, 0, 36, 7, 87, 5, 0 },
+ { 0, 11, 97, 7, 20, 0, 0 },
+ { 0, 2, 72, 7, 53, 1, 0 },
+ { 0, 0, 33, 7, 89, 6, 0 },
+ { 0, 10, 96, 7, 22, 0, 0 },
+ { 0, 2, 68, 7, 57, 1, 0 },
+ { 0, 0, 30, 7, 92, 6, 0 },
+ { 0, 9, 94, 7, 25, 0, 0 },
+ { 0, 2, 64, 7, 61, 1, 0 },
+ { 0, 0, 27, 7, 94, 7, 0 },
+ { 0, 7, 94, 7, 27, 0, 0 },
+ { 0, 1, 61, 7, 64, 2, 0 },
+ { 0, 0, 25, 7, 94, 9, 0 },
+ { 0, 6, 92, 7, 30, 0, 0 },
+ { 0, 1, 57, 7, 68, 2, 0 },
+ { 0, 0, 22, 7, 96, 10, 0 },
+ { 0, 6, 89, 7, 33, 0, 0 },
+ { 0, 1, 53, 7, 72, 2, 0 },
+ { 0, 0, 20, 7, 97, 11, 0 },
+ { 0, 5, 87, 7, 36, 0, 0 },
+ { 0, 1, 50, 7, 74, 3, 0 },
+ { 0, 0, 18, 7, 98, 12, 0 },
+ { 0, 4, 85, 7, 39, 0, 0 },
+ { 0, 1, 46, 7, 78, 3, 0 },
+ { 0, 0, 16, 7, 98, 14, 0 },
+ { 0, 4, 80, 7, 43, 1, 0 } } },
+ .ver_phase_arr = {
+ .even = { { 0, 15, 98, 7, 15, 0, 0 },
+ { 0, 3, 80, 7, 44, 1, 0 },
+ { 0, 0, 41, 7, 83, 4, 0 },
+ { 0, 13, 98, 7, 17, 0, 0 },
+ { 0, 3, 76, 7, 48, 1, 0 },
+ { 0, 0, 38, 7, 85, 5, 0 },
+ { 0, 12, 97, 7, 19, 0, 0 },
+ { 0, 2, 74, 7, 51, 1, 0 },
+ { 0, 0, 34, 7, 89, 5, 0 },
+ { 0, 10, 97, 7, 21, 0, 0 },
+ { 0, 2, 70, 7, 55, 1, 0 },
+ { 0, 0, 31, 7, 91, 6, 0 },
+ { 0, 9, 96, 7, 23, 0, 0 },
+ { 0, 2, 66, 7, 59, 1, 0 },
+ { 0, 0, 29, 7, 92, 7, 0 },
+ { 0, 8, 94, 7, 26, 0, 0 },
+ { 0, 1, 63, 7, 63, 1, 0 },
+ { 0, 0, 26, 7, 94, 8, 0 },
+ { 0, 7, 92, 7, 29, 0, 0 },
+ { 0, 1, 59, 7, 66, 2, 0 },
+ { 0, 0, 23, 7, 96, 9, 0 },
+ { 0, 6, 91, 7, 31, 0, 0 },
+ { 0, 1, 55, 7, 70, 2, 0 },
+ { 0, 0, 21, 7, 97, 10, 0 },
+ { 0, 5, 89, 7, 34, 0, 0 },
+ { 0, 1, 51, 7, 74, 2, 0 },
+ { 0, 0, 19, 7, 97, 12, 0 },
+ { 0, 5, 85, 7, 38, 0, 0 },
+ { 0, 1, 48, 7, 76, 3, 0 },
+ { 0, 0, 17, 7, 98, 13, 0 },
+ { 0, 4, 83, 7, 41, 0, 0 },
+ { 0, 1, 44, 7, 80, 3, 0 } },
+ .odd = { { 0, 1, 43, 7, 80, 4, 0 },
+ { 0, 14, 98, 7, 16, 0, 0 },
+ { 0, 3, 78, 7, 46, 1, 0 },
+ { 0, 0, 39, 7, 85, 4, 0 },
+ { 0, 12, 98, 7, 18, 0, 0 },
+ { 0, 3, 74, 7, 50, 1, 0 },
+ { 0, 0, 36, 7, 87, 5, 0 },
+ { 0, 11, 97, 7, 20, 0, 0 },
+ { 0, 2, 72, 7, 53, 1, 0 },
+ { 0, 0, 33, 7, 89, 6, 0 },
+ { 0, 10, 96, 7, 22, 0, 0 },
+ { 0, 2, 68, 7, 57, 1, 0 },
+ { 0, 0, 30, 7, 92, 6, 0 },
+ { 0, 9, 94, 7, 25, 0, 0 },
+ { 0, 2, 64, 7, 61, 1, 0 },
+ { 0, 0, 27, 7, 94, 7, 0 },
+ { 0, 7, 94, 7, 27, 0, 0 },
+ { 0, 1, 61, 7, 64, 2, 0 },
+ { 0, 0, 25, 7, 94, 9, 0 },
+ { 0, 6, 92, 7, 30, 0, 0 },
+ { 0, 1, 57, 7, 68, 2, 0 },
+ { 0, 0, 22, 7, 96, 10, 0 },
+ { 0, 6, 89, 7, 33, 0, 0 },
+ { 0, 1, 53, 7, 72, 2, 0 },
+ { 0, 0, 20, 7, 97, 11, 0 },
+ { 0, 5, 87, 7, 36, 0, 0 },
+ { 0, 1, 50, 7, 74, 3, 0 },
+ { 0, 0, 18, 7, 98, 12, 0 },
+ { 0, 4, 85, 7, 39, 0, 0 },
+ { 0, 1, 46, 7, 78, 3, 0 },
+ { 0, 0, 16, 7, 98, 14, 0 },
+ { 0, 4, 80, 7, 43, 1, 0 } } },
+ .ptrn_arr = { { 0x3264c993, 0x93264c99, 0x993264c9, 0xc993264c,
+ 0x93264 } },
+ .sample_patrn_length = 150,
+ .hor_ds_en = 1,
+ .ver_ds_en = 1
+}, {
+ /* Scale factor 32 / (32 + 44) = 0.421053 */
+ .hor_phase_arr = {
+ .even = { { 0, 16, 96, 7, 16, 0, 0 },
+ { 0, 3, 76, 7, 48, 1, 0 },
+ { 0, 0, 35, 7, 87, 6, 0 },
+ { 0, 10, 94, 7, 24, 0, 0 },
+ { 0, 2, 62, 7, 62, 2, 0 },
+ { 0, 0, 24, 7, 94, 10, 0 },
+ { 0, 6, 87, 7, 35, 0, 0 },
+ { 0, 1, 48, 7, 76, 3, 0 } },
+ .odd = { { 0, 1, 41, 7, 82, 4, 0 },
+ { 0, 12, 97, 7, 19, 0, 0 },
+ { 0, 2, 70, 7, 55, 1, 0 },
+ { 0, 0, 29, 7, 92, 7, 0 },
+ { 0, 7, 92, 7, 29, 0, 0 },
+ { 0, 1, 55, 7, 70, 2, 0 },
+ { 0, 0, 19, 7, 97, 12, 0 },
+ { 0, 4, 82, 7, 41, 1, 0 } } },
+ .ver_phase_arr = {
+ .even = { { 0, 16, 96, 7, 16, 0, 0 },
+ { 0, 3, 76, 7, 48, 1, 0 },
+ { 0, 0, 35, 7, 87, 6, 0 },
+ { 0, 10, 94, 7, 24, 0, 0 },
+ { 0, 2, 62, 7, 62, 2, 0 },
+ { 0, 0, 24, 7, 94, 10, 0 },
+ { 0, 6, 87, 7, 35, 0, 0 },
+ { 0, 1, 48, 7, 76, 3, 0 } },
+ .odd = { { 0, 1, 41, 7, 82, 4, 0 },
+ { 0, 12, 97, 7, 19, 0, 0 },
+ { 0, 2, 70, 7, 55, 1, 0 },
+ { 0, 0, 29, 7, 92, 7, 0 },
+ { 0, 7, 92, 7, 29, 0, 0 },
+ { 0, 1, 55, 7, 70, 2, 0 },
+ { 0, 0, 19, 7, 97, 12, 0 },
+ { 0, 4, 82, 7, 41, 1, 0 } } },
+ .ptrn_arr = { { 0x3264c993, 0x9 } },
+ .sample_patrn_length = 38,
+ .hor_ds_en = 1,
+ .ver_ds_en = 1
+}, {
+ /* Scale factor 32 / (32 + 45) = 0.415584 */
+ .hor_phase_arr = {
+ .even = { { 0, 16, 96, 7, 16, 0, 0 },
+ { 0, 3, 72, 7, 52, 1, 0 },
+ { 0, 0, 30, 7, 90, 8, 0 },
+ { 0, 7, 89, 7, 32, 0, 0 },
+ { 0, 1, 48, 7, 76, 3, 0 },
+ { 0, 14, 96, 7, 18, 0, 0 },
+ { 0, 2, 70, 7, 55, 1, 0 },
+ { 0, 0, 27, 7, 92, 9, 0 },
+ { 0, 6, 87, 7, 35, 0, 0 },
+ { 0, 1, 45, 7, 78, 4, 0 },
+ { 0, 13, 95, 7, 20, 0, 0 },
+ { 0, 2, 66, 7, 59, 1, 0 },
+ { 0, 0, 24, 7, 94, 10, 0 },
+ { 0, 5, 85, 7, 38, 0, 0 },
+ { 0, 1, 42, 7, 81, 4, 0 },
+ { 0, 11, 95, 7, 22, 0, 0 },
+ { 0, 2, 62, 7, 62, 2, 0 },
+ { 0, 0, 22, 7, 95, 11, 0 },
+ { 0, 4, 81, 7, 42, 1, 0 },
+ { 0, 0, 38, 7, 85, 5, 0 },
+ { 0, 10, 94, 7, 24, 0, 0 },
+ { 0, 1, 59, 7, 66, 2, 0 },
+ { 0, 0, 20, 7, 95, 13, 0 },
+ { 0, 4, 78, 7, 45, 1, 0 },
+ { 0, 0, 35, 7, 87, 6, 0 },
+ { 0, 9, 92, 7, 27, 0, 0 },
+ { 0, 1, 55, 7, 70, 2, 0 },
+ { 0, 0, 18, 7, 96, 14, 0 },
+ { 0, 3, 76, 7, 48, 1, 0 },
+ { 0, 0, 32, 7, 89, 7, 0 },
+ { 0, 8, 90, 7, 30, 0, 0 },
+ { 0, 1, 52, 7, 72, 3, 0 } },
+ .odd = { { 0, 1, 40, 7, 82, 5, 0 },
+ { 0, 11, 94, 7, 23, 0, 0 },
+ { 0, 2, 61, 7, 63, 2, 0 },
+ { 0, 0, 21, 7, 95, 12, 0 },
+ { 0, 4, 80, 7, 43, 1, 0 },
+ { 0, 0, 37, 7, 85, 6, 0 },
+ { 0, 9, 93, 7, 26, 0, 0 },
+ { 0, 1, 57, 7, 68, 2, 0 },
+ { 0, 0, 19, 7, 95, 14, 0 },
+ { 0, 4, 76, 7, 47, 1, 0 },
+ { 0, 0, 34, 7, 88, 6, 0 },
+ { 0, 8, 92, 7, 28, 0, 0 },
+ { 0, 1, 54, 7, 70, 3, 0 },
+ { 0, 0, 17, 7, 96, 15, 0 },
+ { 0, 3, 74, 7, 50, 1, 0 },
+ { 0, 0, 31, 7, 90, 7, 0 },
+ { 0, 7, 90, 7, 31, 0, 0 },
+ { 0, 1, 50, 7, 74, 3, 0 },
+ { 0, 15, 96, 7, 17, 0, 0 },
+ { 0, 3, 70, 7, 54, 1, 0 },
+ { 0, 0, 28, 7, 92, 8, 0 },
+ { 0, 6, 88, 7, 34, 0, 0 },
+ { 0, 1, 47, 7, 76, 4, 0 },
+ { 0, 14, 95, 7, 19, 0, 0 },
+ { 0, 2, 68, 7, 57, 1, 0 },
+ { 0, 0, 26, 7, 93, 9, 0 },
+ { 0, 6, 85, 7, 37, 0, 0 },
+ { 0, 1, 43, 7, 80, 4, 0 },
+ { 0, 12, 95, 7, 21, 0, 0 },
+ { 0, 2, 63, 7, 61, 2, 0 },
+ { 0, 0, 23, 7, 94, 11, 0 },
+ { 0, 5, 82, 7, 40, 1, 0 } } },
+ .ver_phase_arr = {
+ .even = { { 0, 16, 96, 7, 16, 0, 0 },
+ { 0, 3, 72, 7, 52, 1, 0 },
+ { 0, 0, 30, 7, 90, 8, 0 },
+ { 0, 7, 89, 7, 32, 0, 0 },
+ { 0, 1, 48, 7, 76, 3, 0 },
+ { 0, 14, 96, 7, 18, 0, 0 },
+ { 0, 2, 70, 7, 55, 1, 0 },
+ { 0, 0, 27, 7, 92, 9, 0 },
+ { 0, 6, 87, 7, 35, 0, 0 },
+ { 0, 1, 45, 7, 78, 4, 0 },
+ { 0, 13, 95, 7, 20, 0, 0 },
+ { 0, 2, 66, 7, 59, 1, 0 },
+ { 0, 0, 24, 7, 94, 10, 0 },
+ { 0, 5, 85, 7, 38, 0, 0 },
+ { 0, 1, 42, 7, 81, 4, 0 },
+ { 0, 11, 95, 7, 22, 0, 0 },
+ { 0, 2, 62, 7, 62, 2, 0 },
+ { 0, 0, 22, 7, 95, 11, 0 },
+ { 0, 4, 81, 7, 42, 1, 0 },
+ { 0, 0, 38, 7, 85, 5, 0 },
+ { 0, 10, 94, 7, 24, 0, 0 },
+ { 0, 1, 59, 7, 66, 2, 0 },
+ { 0, 0, 20, 7, 95, 13, 0 },
+ { 0, 4, 78, 7, 45, 1, 0 },
+ { 0, 0, 35, 7, 87, 6, 0 },
+ { 0, 9, 92, 7, 27, 0, 0 },
+ { 0, 1, 55, 7, 70, 2, 0 },
+ { 0, 0, 18, 7, 96, 14, 0 },
+ { 0, 3, 76, 7, 48, 1, 0 },
+ { 0, 0, 32, 7, 89, 7, 0 },
+ { 0, 8, 90, 7, 30, 0, 0 },
+ { 0, 1, 52, 7, 72, 3, 0 } },
+ .odd = { { 0, 1, 40, 7, 82, 5, 0 },
+ { 0, 11, 94, 7, 23, 0, 0 },
+ { 0, 2, 61, 7, 63, 2, 0 },
+ { 0, 0, 21, 7, 95, 12, 0 },
+ { 0, 4, 80, 7, 43, 1, 0 },
+ { 0, 0, 37, 7, 85, 6, 0 },
+ { 0, 9, 93, 7, 26, 0, 0 },
+ { 0, 1, 57, 7, 68, 2, 0 },
+ { 0, 0, 19, 7, 95, 14, 0 },
+ { 0, 4, 76, 7, 47, 1, 0 },
+ { 0, 0, 34, 7, 88, 6, 0 },
+ { 0, 8, 92, 7, 28, 0, 0 },
+ { 0, 1, 54, 7, 70, 3, 0 },
+ { 0, 0, 17, 7, 96, 15, 0 },
+ { 0, 3, 74, 7, 50, 1, 0 },
+ { 0, 0, 31, 7, 90, 7, 0 },
+ { 0, 7, 90, 7, 31, 0, 0 },
+ { 0, 1, 50, 7, 74, 3, 0 },
+ { 0, 15, 96, 7, 17, 0, 0 },
+ { 0, 3, 70, 7, 54, 1, 0 },
+ { 0, 0, 28, 7, 92, 8, 0 },
+ { 0, 6, 88, 7, 34, 0, 0 },
+ { 0, 1, 47, 7, 76, 4, 0 },
+ { 0, 14, 95, 7, 19, 0, 0 },
+ { 0, 2, 68, 7, 57, 1, 0 },
+ { 0, 0, 26, 7, 93, 9, 0 },
+ { 0, 6, 85, 7, 37, 0, 0 },
+ { 0, 1, 43, 7, 80, 4, 0 },
+ { 0, 12, 95, 7, 21, 0, 0 },
+ { 0, 2, 63, 7, 61, 2, 0 },
+ { 0, 0, 23, 7, 94, 11, 0 },
+ { 0, 5, 82, 7, 40, 1, 0 } } },
+ .ptrn_arr = { { 0x9324c993, 0xc99324c9, 0x26499324, 0x93264993,
+ 0x932649 } },
+ .sample_patrn_length = 154,
+ .hor_ds_en = 1,
+ .ver_ds_en = 1
+}, {
+ /* Scale factor 32 / (32 + 46) = 0.410256 */
+ .hor_phase_arr = {
+ .even = { { 0, 17, 94, 7, 17, 0, 0 },
+ { 0, 3, 69, 7, 55, 1, 0 },
+ { 0, 0, 25, 7, 93, 10, 0 },
+ { 0, 5, 80, 7, 42, 1, 0 },
+ { 0, 0, 36, 7, 86, 6, 0 },
+ { 0, 8, 90, 7, 30, 0, 0 },
+ { 0, 1, 49, 7, 74, 4, 0 },
+ { 0, 13, 94, 7, 21, 0, 0 },
+ { 0, 2, 62, 7, 62, 2, 0 },
+ { 0, 0, 21, 7, 94, 13, 0 },
+ { 0, 4, 74, 7, 49, 1, 0 },
+ { 0, 0, 30, 7, 90, 8, 0 },
+ { 0, 6, 86, 7, 36, 0, 0 },
+ { 0, 1, 42, 7, 80, 5, 0 },
+ { 0, 10, 93, 7, 25, 0, 0 },
+ { 0, 1, 55, 7, 69, 3, 0 } },
+ .odd = { { 0, 1, 39, 7, 83, 5, 0 },
+ { 0, 9, 91, 7, 28, 0, 0 },
+ { 0, 1, 52, 7, 72, 3, 0 },
+ { 0, 15, 94, 7, 19, 0, 0 },
+ { 0, 2, 65, 7, 59, 2, 0 },
+ { 0, 0, 23, 7, 93, 12, 0 },
+ { 0, 4, 78, 7, 45, 1, 0 },
+ { 0, 0, 33, 7, 88, 7, 0 },
+ { 0, 7, 88, 7, 33, 0, 0 },
+ { 0, 1, 45, 7, 78, 4, 0 },
+ { 0, 12, 93, 7, 23, 0, 0 },
+ { 0, 2, 59, 7, 65, 2, 0 },
+ { 0, 0, 19, 7, 94, 15, 0 },
+ { 0, 3, 72, 7, 52, 1, 0 },
+ { 0, 0, 28, 7, 91, 9, 0 },
+ { 0, 5, 83, 7, 39, 1, 0 } } },
+ .ver_phase_arr = {
+ .even = { { 0, 17, 94, 7, 17, 0, 0 },
+ { 0, 3, 69, 7, 55, 1, 0 },
+ { 0, 0, 25, 7, 93, 10, 0 },
+ { 0, 5, 80, 7, 42, 1, 0 },
+ { 0, 0, 36, 7, 86, 6, 0 },
+ { 0, 8, 90, 7, 30, 0, 0 },
+ { 0, 1, 49, 7, 74, 4, 0 },
+ { 0, 13, 94, 7, 21, 0, 0 },
+ { 0, 2, 62, 7, 62, 2, 0 },
+ { 0, 0, 21, 7, 94, 13, 0 },
+ { 0, 4, 74, 7, 49, 1, 0 },
+ { 0, 0, 30, 7, 90, 8, 0 },
+ { 0, 6, 86, 7, 36, 0, 0 },
+ { 0, 1, 42, 7, 80, 5, 0 },
+ { 0, 10, 93, 7, 25, 0, 0 },
+ { 0, 1, 55, 7, 69, 3, 0 } },
+ .odd = { { 0, 1, 39, 7, 83, 5, 0 },
+ { 0, 9, 91, 7, 28, 0, 0 },
+ { 0, 1, 52, 7, 72, 3, 0 },
+ { 0, 15, 94, 7, 19, 0, 0 },
+ { 0, 2, 65, 7, 59, 2, 0 },
+ { 0, 0, 23, 7, 93, 12, 0 },
+ { 0, 4, 78, 7, 45, 1, 0 },
+ { 0, 0, 33, 7, 88, 7, 0 },
+ { 0, 7, 88, 7, 33, 0, 0 },
+ { 0, 1, 45, 7, 78, 4, 0 },
+ { 0, 12, 93, 7, 23, 0, 0 },
+ { 0, 2, 59, 7, 65, 2, 0 },
+ { 0, 0, 19, 7, 94, 15, 0 },
+ { 0, 3, 72, 7, 52, 1, 0 },
+ { 0, 0, 28, 7, 91, 9, 0 },
+ { 0, 5, 83, 7, 39, 1, 0 } } },
+ .ptrn_arr = { { 0x93264993, 0x4c99264c, 0x932 } },
+ .sample_patrn_length = 78,
+ .hor_ds_en = 1,
+ .ver_ds_en = 1
+}, {
+ /* Scale factor 32 / (32 + 47) = 0.405063 */
+ .hor_phase_arr = {
+ .even = { { 0, 17, 94, 7, 17, 0, 0 },
+ { 0, 2, 65, 7, 59, 2, 0 },
+ { 0, 0, 21, 7, 93, 14, 0 },
+ { 0, 3, 72, 7, 52, 1, 0 },
+ { 0, 0, 26, 7, 91, 11, 0 },
+ { 0, 4, 78, 7, 45, 1, 0 },
+ { 0, 0, 31, 7, 88, 9, 0 },
+ { 0, 6, 82, 7, 39, 1, 0 },
+ { 0, 1, 36, 7, 84, 7, 0 },
+ { 0, 8, 87, 7, 33, 0, 0 },
+ { 0, 1, 42, 7, 80, 5, 0 },
+ { 0, 10, 90, 7, 28, 0, 0 },
+ { 0, 1, 49, 7, 74, 4, 0 },
+ { 0, 12, 93, 7, 23, 0, 0 },
+ { 0, 2, 55, 7, 68, 3, 0 },
+ { 0, 15, 94, 7, 19, 0, 0 },
+ { 0, 2, 62, 7, 62, 2, 0 },
+ { 0, 0, 19, 7, 94, 15, 0 },
+ { 0, 3, 68, 7, 55, 2, 0 },
+ { 0, 0, 23, 7, 93, 12, 0 },
+ { 0, 4, 74, 7, 49, 1, 0 },
+ { 0, 0, 28, 7, 90, 10, 0 },
+ { 0, 5, 80, 7, 42, 1, 0 },
+ { 0, 0, 33, 7, 87, 8, 0 },
+ { 0, 7, 84, 7, 36, 1, 0 },
+ { 0, 1, 39, 7, 82, 6, 0 },
+ { 0, 9, 88, 7, 31, 0, 0 },
+ { 0, 1, 45, 7, 78, 4, 0 },
+ { 0, 11, 91, 7, 26, 0, 0 },
+ { 0, 1, 52, 7, 72, 3, 0 },
+ { 0, 14, 93, 7, 21, 0, 0 },
+ { 0, 2, 59, 7, 65, 2, 0 } },
+ .odd = { { 0, 1, 38, 7, 83, 6, 0 },
+ { 0, 8, 88, 7, 32, 0, 0 },
+ { 0, 1, 44, 7, 78, 5, 0 },
+ { 0, 10, 91, 7, 27, 0, 0 },
+ { 0, 1, 50, 7, 73, 4, 0 },
+ { 0, 13, 93, 7, 22, 0, 0 },
+ { 0, 2, 57, 7, 66, 3, 0 },
+ { 0, 16, 94, 7, 18, 0, 0 },
+ { 0, 2, 64, 7, 60, 2, 0 },
+ { 0, 0, 20, 7, 93, 15, 0 },
+ { 0, 3, 70, 7, 54, 1, 0 },
+ { 0, 0, 24, 7, 92, 12, 0 },
+ { 0, 4, 76, 7, 47, 1, 0 },
+ { 0, 0, 29, 7, 90, 9, 0 },
+ { 0, 5, 81, 7, 41, 1, 0 },
+ { 0, 0, 35, 7, 86, 7, 0 },
+ { 0, 7, 86, 7, 35, 0, 0 },
+ { 0, 1, 41, 7, 81, 5, 0 },
+ { 0, 9, 90, 7, 29, 0, 0 },
+ { 0, 1, 47, 7, 76, 4, 0 },
+ { 0, 12, 92, 7, 24, 0, 0 },
+ { 0, 1, 54, 7, 70, 3, 0 },
+ { 0, 15, 93, 7, 20, 0, 0 },
+ { 0, 2, 60, 7, 64, 2, 0 },
+ { 0, 0, 18, 7, 94, 16, 0 },
+ { 0, 3, 66, 7, 57, 2, 0 },
+ { 0, 0, 22, 7, 93, 13, 0 },
+ { 0, 4, 73, 7, 50, 1, 0 },
+ { 0, 0, 27, 7, 91, 10, 0 },
+ { 0, 5, 78, 7, 44, 1, 0 },
+ { 0, 0, 32, 7, 88, 8, 0 },
+ { 0, 6, 83, 7, 38, 1, 0 } } },
+ .ver_phase_arr = {
+ .even = { { 0, 17, 94, 7, 17, 0, 0 },
+ { 0, 2, 65, 7, 59, 2, 0 },
+ { 0, 0, 21, 7, 93, 14, 0 },
+ { 0, 3, 72, 7, 52, 1, 0 },
+ { 0, 0, 26, 7, 91, 11, 0 },
+ { 0, 4, 78, 7, 45, 1, 0 },
+ { 0, 0, 31, 7, 88, 9, 0 },
+ { 0, 6, 82, 7, 39, 1, 0 },
+ { 0, 1, 36, 7, 84, 7, 0 },
+ { 0, 8, 87, 7, 33, 0, 0 },
+ { 0, 1, 42, 7, 80, 5, 0 },
+ { 0, 10, 90, 7, 28, 0, 0 },
+ { 0, 1, 49, 7, 74, 4, 0 },
+ { 0, 12, 93, 7, 23, 0, 0 },
+ { 0, 2, 55, 7, 68, 3, 0 },
+ { 0, 15, 94, 7, 19, 0, 0 },
+ { 0, 2, 62, 7, 62, 2, 0 },
+ { 0, 0, 19, 7, 94, 15, 0 },
+ { 0, 3, 68, 7, 55, 2, 0 },
+ { 0, 0, 23, 7, 93, 12, 0 },
+ { 0, 4, 74, 7, 49, 1, 0 },
+ { 0, 0, 28, 7, 90, 10, 0 },
+ { 0, 5, 80, 7, 42, 1, 0 },
+ { 0, 0, 33, 7, 87, 8, 0 },
+ { 0, 7, 84, 7, 36, 1, 0 },
+ { 0, 1, 39, 7, 82, 6, 0 },
+ { 0, 9, 88, 7, 31, 0, 0 },
+ { 0, 1, 45, 7, 78, 4, 0 },
+ { 0, 11, 91, 7, 26, 0, 0 },
+ { 0, 1, 52, 7, 72, 3, 0 },
+ { 0, 14, 93, 7, 21, 0, 0 },
+ { 0, 2, 59, 7, 65, 2, 0 } },
+ .odd = { { 0, 1, 38, 7, 83, 6, 0 },
+ { 0, 8, 88, 7, 32, 0, 0 },
+ { 0, 1, 44, 7, 78, 5, 0 },
+ { 0, 10, 91, 7, 27, 0, 0 },
+ { 0, 1, 50, 7, 73, 4, 0 },
+ { 0, 13, 93, 7, 22, 0, 0 },
+ { 0, 2, 57, 7, 66, 3, 0 },
+ { 0, 16, 94, 7, 18, 0, 0 },
+ { 0, 2, 64, 7, 60, 2, 0 },
+ { 0, 0, 20, 7, 93, 15, 0 },
+ { 0, 3, 70, 7, 54, 1, 0 },
+ { 0, 0, 24, 7, 92, 12, 0 },
+ { 0, 4, 76, 7, 47, 1, 0 },
+ { 0, 0, 29, 7, 90, 9, 0 },
+ { 0, 5, 81, 7, 41, 1, 0 },
+ { 0, 0, 35, 7, 86, 7, 0 },
+ { 0, 7, 86, 7, 35, 0, 0 },
+ { 0, 1, 41, 7, 81, 5, 0 },
+ { 0, 9, 90, 7, 29, 0, 0 },
+ { 0, 1, 47, 7, 76, 4, 0 },
+ { 0, 12, 92, 7, 24, 0, 0 },
+ { 0, 1, 54, 7, 70, 3, 0 },
+ { 0, 15, 93, 7, 20, 0, 0 },
+ { 0, 2, 60, 7, 64, 2, 0 },
+ { 0, 0, 18, 7, 94, 16, 0 },
+ { 0, 3, 66, 7, 57, 2, 0 },
+ { 0, 0, 22, 7, 93, 13, 0 },
+ { 0, 4, 73, 7, 50, 1, 0 },
+ { 0, 0, 27, 7, 91, 10, 0 },
+ { 0, 5, 78, 7, 44, 1, 0 },
+ { 0, 0, 32, 7, 88, 8, 0 },
+ { 0, 6, 83, 7, 38, 1, 0 } } },
+ .ptrn_arr = { { 0x99264993, 0x24c93264, 0x99264c93, 0x24c99264,
+ 0x9324c93 } },
+ .sample_patrn_length = 158,
+ .hor_ds_en = 1,
+ .ver_ds_en = 1
+}, {
+ /* Scale factor 32 / (32 + 48) = 0.4 */
+ .hor_phase_arr = {
+ .even = { { 0, 18, 92, 7, 18, 0, 0 },
+ { 0, 2, 62, 7, 62, 2, 0 } },
+ .odd = { { 0, 1, 37, 7, 83, 7, 0 },
+ { 0, 7, 83, 7, 37, 1, 0 } } },
+ .ver_phase_arr = {
+ .even = { { 0, 18, 92, 7, 18, 0, 0 },
+ { 0, 2, 62, 7, 62, 2, 0 } },
+ .odd = { { 0, 1, 37, 7, 83, 7, 0 },
+ { 0, 7, 83, 7, 37, 1, 0 } } },
+ .ptrn_arr = { { 0x93 } },
+ .sample_patrn_length = 10,
+ .hor_ds_en = 1,
+ .ver_ds_en = 1
+}, {
+ /* Scale factor 32 / (32 + 49) = 0.395062 */
+ .hor_phase_arr = {
+ .even = { { 0, 18, 92, 7, 18, 0, 0 },
+ { 0, 2, 58, 7, 65, 3, 0 },
+ { 0, 15, 91, 7, 22, 0, 0 },
+ { 0, 2, 52, 7, 70, 4, 0 },
+ { 0, 12, 89, 7, 27, 0, 0 },
+ { 0, 1, 46, 7, 76, 5, 0 },
+ { 0, 9, 87, 7, 32, 0, 0 },
+ { 0, 1, 40, 7, 80, 7, 0 },
+ { 0, 7, 83, 7, 37, 1, 0 },
+ { 0, 1, 34, 7, 85, 8, 0 },
+ { 0, 6, 78, 7, 43, 1, 0 },
+ { 0, 0, 29, 7, 88, 11, 0 },
+ { 0, 4, 74, 7, 49, 1, 0 },
+ { 0, 0, 24, 7, 91, 13, 0 },
+ { 0, 3, 68, 7, 55, 2, 0 },
+ { 0, 0, 20, 7, 92, 16, 0 },
+ { 0, 2, 62, 7, 62, 2, 0 },
+ { 0, 16, 92, 7, 20, 0, 0 },
+ { 0, 2, 55, 7, 68, 3, 0 },
+ { 0, 13, 91, 7, 24, 0, 0 },
+ { 0, 1, 49, 7, 74, 4, 0 },
+ { 0, 11, 88, 7, 29, 0, 0 },
+ { 0, 1, 43, 7, 78, 6, 0 },
+ { 0, 8, 85, 7, 34, 1, 0 },
+ { 0, 1, 37, 7, 83, 7, 0 },
+ { 0, 7, 80, 7, 40, 1, 0 },
+ { 0, 0, 32, 7, 87, 9, 0 },
+ { 0, 5, 76, 7, 46, 1, 0 },
+ { 0, 0, 27, 7, 89, 12, 0 },
+ { 0, 4, 70, 7, 52, 2, 0 },
+ { 0, 0, 22, 7, 91, 15, 0 },
+ { 0, 3, 65, 7, 58, 2, 0 } },
+ .odd = { { 0, 1, 36, 7, 83, 8, 0 },
+ { 0, 6, 80, 7, 41, 1, 0 },
+ { 0, 0, 30, 7, 88, 10, 0 },
+ { 0, 5, 75, 7, 47, 1, 0 },
+ { 0, 0, 25, 7, 90, 13, 0 },
+ { 0, 4, 68, 7, 54, 2, 0 },
+ { 0, 0, 21, 7, 91, 16, 0 },
+ { 0, 3, 63, 7, 60, 2, 0 },
+ { 0, 17, 92, 7, 19, 0, 0 },
+ { 0, 2, 57, 7, 66, 3, 0 },
+ { 0, 14, 91, 7, 23, 0, 0 },
+ { 0, 1, 51, 7, 72, 4, 0 },
+ { 0, 11, 89, 7, 28, 0, 0 },
+ { 0, 1, 44, 7, 78, 5, 0 },
+ { 0, 9, 85, 7, 33, 1, 0 },
+ { 0, 1, 38, 7, 82, 7, 0 },
+ { 0, 7, 82, 7, 38, 1, 0 },
+ { 0, 1, 33, 7, 85, 9, 0 },
+ { 0, 5, 78, 7, 44, 1, 0 },
+ { 0, 0, 28, 7, 89, 11, 0 },
+ { 0, 4, 72, 7, 51, 1, 0 },
+ { 0, 0, 23, 7, 91, 14, 0 },
+ { 0, 3, 66, 7, 57, 2, 0 },
+ { 0, 0, 19, 7, 92, 17, 0 },
+ { 0, 2, 60, 7, 63, 3, 0 },
+ { 0, 16, 91, 7, 21, 0, 0 },
+ { 0, 2, 54, 7, 68, 4, 0 },
+ { 0, 13, 90, 7, 25, 0, 0 },
+ { 0, 1, 47, 7, 75, 5, 0 },
+ { 0, 10, 88, 7, 30, 0, 0 },
+ { 0, 1, 41, 7, 80, 6, 0 },
+ { 0, 8, 83, 7, 36, 1, 0 } } },
+ .ver_phase_arr = {
+ .even = { { 0, 18, 92, 7, 18, 0, 0 },
+ { 0, 2, 58, 7, 65, 3, 0 },
+ { 0, 15, 91, 7, 22, 0, 0 },
+ { 0, 2, 52, 7, 70, 4, 0 },
+ { 0, 12, 89, 7, 27, 0, 0 },
+ { 0, 1, 46, 7, 76, 5, 0 },
+ { 0, 9, 87, 7, 32, 0, 0 },
+ { 0, 1, 40, 7, 80, 7, 0 },
+ { 0, 7, 83, 7, 37, 1, 0 },
+ { 0, 1, 34, 7, 85, 8, 0 },
+ { 0, 6, 78, 7, 43, 1, 0 },
+ { 0, 0, 29, 7, 88, 11, 0 },
+ { 0, 4, 74, 7, 49, 1, 0 },
+ { 0, 0, 24, 7, 91, 13, 0 },
+ { 0, 3, 68, 7, 55, 2, 0 },
+ { 0, 0, 20, 7, 92, 16, 0 },
+ { 0, 2, 62, 7, 62, 2, 0 },
+ { 0, 16, 92, 7, 20, 0, 0 },
+ { 0, 2, 55, 7, 68, 3, 0 },
+ { 0, 13, 91, 7, 24, 0, 0 },
+ { 0, 1, 49, 7, 74, 4, 0 },
+ { 0, 11, 88, 7, 29, 0, 0 },
+ { 0, 1, 43, 7, 78, 6, 0 },
+ { 0, 8, 85, 7, 34, 1, 0 },
+ { 0, 1, 37, 7, 83, 7, 0 },
+ { 0, 7, 80, 7, 40, 1, 0 },
+ { 0, 0, 32, 7, 87, 9, 0 },
+ { 0, 5, 76, 7, 46, 1, 0 },
+ { 0, 0, 27, 7, 89, 12, 0 },
+ { 0, 4, 70, 7, 52, 2, 0 },
+ { 0, 0, 22, 7, 91, 15, 0 },
+ { 0, 3, 65, 7, 58, 2, 0 } },
+ .odd = { { 0, 1, 36, 7, 83, 8, 0 },
+ { 0, 6, 80, 7, 41, 1, 0 },
+ { 0, 0, 30, 7, 88, 10, 0 },
+ { 0, 5, 75, 7, 47, 1, 0 },
+ { 0, 0, 25, 7, 90, 13, 0 },
+ { 0, 4, 68, 7, 54, 2, 0 },
+ { 0, 0, 21, 7, 91, 16, 0 },
+ { 0, 3, 63, 7, 60, 2, 0 },
+ { 0, 17, 92, 7, 19, 0, 0 },
+ { 0, 2, 57, 7, 66, 3, 0 },
+ { 0, 14, 91, 7, 23, 0, 0 },
+ { 0, 1, 51, 7, 72, 4, 0 },
+ { 0, 11, 89, 7, 28, 0, 0 },
+ { 0, 1, 44, 7, 78, 5, 0 },
+ { 0, 9, 85, 7, 33, 1, 0 },
+ { 0, 1, 38, 7, 82, 7, 0 },
+ { 0, 7, 82, 7, 38, 1, 0 },
+ { 0, 1, 33, 7, 85, 9, 0 },
+ { 0, 5, 78, 7, 44, 1, 0 },
+ { 0, 0, 28, 7, 89, 11, 0 },
+ { 0, 4, 72, 7, 51, 1, 0 },
+ { 0, 0, 23, 7, 91, 14, 0 },
+ { 0, 3, 66, 7, 57, 2, 0 },
+ { 0, 0, 19, 7, 92, 17, 0 },
+ { 0, 2, 60, 7, 63, 3, 0 },
+ { 0, 16, 91, 7, 21, 0, 0 },
+ { 0, 2, 54, 7, 68, 4, 0 },
+ { 0, 13, 90, 7, 25, 0, 0 },
+ { 0, 1, 47, 7, 75, 5, 0 },
+ { 0, 10, 88, 7, 30, 0, 0 },
+ { 0, 1, 41, 7, 80, 6, 0 },
+ { 0, 8, 83, 7, 36, 1, 0 } } },
+ .ptrn_arr = { { 0xc9324c93, 0x92649924, 0x24c92649, 0x49324c93,
+ 0x92649926 } },
+ .sample_patrn_length = 162,
+ .hor_ds_en = 1,
+ .ver_ds_en = 1
+}, {
+ /* Scale factor 32 / (32 + 50) = 0.390244 */
+ .hor_phase_arr = {
+ .even = { { 0, 19, 90, 7, 19, 0, 0 },
+ { 0, 2, 55, 7, 67, 4, 0 },
+ { 0, 12, 89, 7, 27, 0, 0 },
+ { 0, 1, 43, 7, 78, 6, 0 },
+ { 0, 8, 82, 7, 37, 1, 0 },
+ { 0, 1, 32, 7, 85, 10, 0 },
+ { 0, 5, 73, 7, 49, 1, 0 },
+ { 0, 0, 23, 7, 90, 15, 0 },
+ { 0, 3, 61, 7, 61, 3, 0 },
+ { 0, 15, 90, 7, 23, 0, 0 },
+ { 0, 1, 49, 7, 73, 5, 0 },
+ { 0, 10, 85, 7, 32, 1, 0 },
+ { 0, 1, 37, 7, 82, 8, 0 },
+ { 0, 6, 78, 7, 43, 1, 0 },
+ { 0, 0, 27, 7, 89, 12, 0 },
+ { 0, 4, 67, 7, 55, 2, 0 } },
+ .odd = { { 0, 1, 35, 7, 83, 9, 0 },
+ { 0, 5, 76, 7, 46, 1, 0 },
+ { 0, 0, 25, 7, 89, 14, 0 },
+ { 0, 3, 65, 7, 58, 2, 0 },
+ { 0, 17, 90, 7, 21, 0, 0 },
+ { 0, 2, 52, 7, 70, 4, 0 },
+ { 0, 11, 88, 7, 29, 0, 0 },
+ { 0, 1, 40, 7, 80, 7, 0 },
+ { 0, 7, 80, 7, 40, 1, 0 },
+ { 0, 0, 29, 7, 88, 11, 0 },
+ { 0, 4, 70, 7, 52, 2, 0 },
+ { 0, 0, 21, 7, 90, 17, 0 },
+ { 0, 2, 58, 7, 65, 3, 0 },
+ { 0, 14, 89, 7, 25, 0, 0 },
+ { 0, 1, 46, 7, 76, 5, 0 },
+ { 0, 9, 83, 7, 35, 1, 0 } } },
+ .ver_phase_arr = {
+ .even = { { 0, 19, 90, 7, 19, 0, 0 },
+ { 0, 2, 55, 7, 67, 4, 0 },
+ { 0, 12, 89, 7, 27, 0, 0 },
+ { 0, 1, 43, 7, 78, 6, 0 },
+ { 0, 8, 82, 7, 37, 1, 0 },
+ { 0, 1, 32, 7, 85, 10, 0 },
+ { 0, 5, 73, 7, 49, 1, 0 },
+ { 0, 0, 23, 7, 90, 15, 0 },
+ { 0, 3, 61, 7, 61, 3, 0 },
+ { 0, 15, 90, 7, 23, 0, 0 },
+ { 0, 1, 49, 7, 73, 5, 0 },
+ { 0, 10, 85, 7, 32, 1, 0 },
+ { 0, 1, 37, 7, 82, 8, 0 },
+ { 0, 6, 78, 7, 43, 1, 0 },
+ { 0, 0, 27, 7, 89, 12, 0 },
+ { 0, 4, 67, 7, 55, 2, 0 } },
+ .odd = { { 0, 1, 35, 7, 83, 9, 0 },
+ { 0, 5, 76, 7, 46, 1, 0 },
+ { 0, 0, 25, 7, 89, 14, 0 },
+ { 0, 3, 65, 7, 58, 2, 0 },
+ { 0, 17, 90, 7, 21, 0, 0 },
+ { 0, 2, 52, 7, 70, 4, 0 },
+ { 0, 11, 88, 7, 29, 0, 0 },
+ { 0, 1, 40, 7, 80, 7, 0 },
+ { 0, 7, 80, 7, 40, 1, 0 },
+ { 0, 0, 29, 7, 88, 11, 0 },
+ { 0, 4, 70, 7, 52, 2, 0 },
+ { 0, 0, 21, 7, 90, 17, 0 },
+ { 0, 2, 58, 7, 65, 3, 0 },
+ { 0, 14, 89, 7, 25, 0, 0 },
+ { 0, 1, 46, 7, 76, 5, 0 },
+ { 0, 9, 83, 7, 35, 1, 0 } } },
+ .ptrn_arr = { { 0x49924c93, 0x9324c926, 0x9264 } },
+ .sample_patrn_length = 82,
+ .hor_ds_en = 1,
+ .ver_ds_en = 1
+}, {
+ /* Scale factor 32 / (32 + 51) = 0.385542 */
+ .hor_phase_arr = {
+ .even = { { 0, 19, 90, 7, 19, 0, 0 },
+ { 0, 2, 52, 7, 70, 4, 0 },
+ { 0, 10, 85, 7, 32, 1, 0 },
+ { 0, 1, 35, 7, 83, 9, 0 },
+ { 0, 5, 72, 7, 49, 2, 0 },
+ { 0, 0, 21, 7, 90, 17, 0 },
+ { 0, 2, 55, 7, 67, 4, 0 },
+ { 0, 11, 87, 7, 30, 0, 0 },
+ { 0, 1, 38, 7, 81, 8, 0 },
+ { 0, 6, 75, 7, 46, 1, 0 },
+ { 0, 0, 23, 7, 89, 16, 0 },
+ { 0, 2, 58, 7, 65, 3, 0 },
+ { 0, 13, 87, 7, 28, 0, 0 },
+ { 0, 1, 41, 7, 79, 7, 0 },
+ { 0, 6, 78, 7, 43, 1, 0 },
+ { 0, 0, 25, 7, 89, 14, 0 },
+ { 0, 3, 61, 7, 61, 3, 0 },
+ { 0, 14, 89, 7, 25, 0, 0 },
+ { 0, 1, 43, 7, 78, 6, 0 },
+ { 0, 7, 79, 7, 41, 1, 0 },
+ { 0, 0, 28, 7, 87, 13, 0 },
+ { 0, 3, 65, 7, 58, 2, 0 },
+ { 0, 16, 89, 7, 23, 0, 0 },
+ { 0, 1, 46, 7, 75, 6, 0 },
+ { 0, 8, 81, 7, 38, 1, 0 },
+ { 0, 0, 30, 7, 87, 11, 0 },
+ { 0, 4, 67, 7, 55, 2, 0 },
+ { 0, 17, 90, 7, 21, 0, 0 },
+ { 0, 2, 49, 7, 72, 5, 0 },
+ { 0, 9, 83, 7, 35, 1, 0 },
+ { 0, 1, 32, 7, 85, 10, 0 },
+ { 0, 4, 70, 7, 52, 2, 0 } },
+ .odd = { { 0, 1, 34, 7, 83, 10, 0 },
+ { 0, 5, 70, 7, 51, 2, 0 },
+ { 0, 0, 20, 7, 90, 18, 0 },
+ { 0, 2, 54, 7, 68, 4, 0 },
+ { 0, 11, 85, 7, 31, 1, 0 },
+ { 0, 1, 36, 7, 82, 9, 0 },
+ { 0, 5, 74, 7, 48, 1, 0 },
+ { 0, 0, 22, 7, 89, 17, 0 },
+ { 0, 2, 57, 7, 65, 4, 0 },
+ { 0, 12, 87, 7, 29, 0, 0 },
+ { 0, 1, 39, 7, 80, 8, 0 },
+ { 0, 6, 76, 7, 45, 1, 0 },
+ { 0, 0, 24, 7, 89, 15, 0 },
+ { 0, 3, 60, 7, 62, 3, 0 },
+ { 0, 13, 89, 7, 26, 0, 0 },
+ { 0, 1, 42, 7, 78, 7, 0 },
+ { 0, 7, 78, 7, 42, 1, 0 },
+ { 0, 0, 26, 7, 89, 13, 0 },
+ { 0, 3, 62, 7, 60, 3, 0 },
+ { 0, 15, 89, 7, 24, 0, 0 },
+ { 0, 1, 45, 7, 76, 6, 0 },
+ { 0, 8, 80, 7, 39, 1, 0 },
+ { 0, 0, 29, 7, 87, 12, 0 },
+ { 0, 4, 65, 7, 57, 2, 0 },
+ { 0, 17, 89, 7, 22, 0, 0 },
+ { 0, 1, 48, 7, 74, 5, 0 },
+ { 0, 9, 82, 7, 36, 1, 0 },
+ { 0, 1, 31, 7, 85, 11, 0 },
+ { 0, 4, 68, 7, 54, 2, 0 },
+ { 0, 18, 90, 7, 20, 0, 0 },
+ { 0, 2, 51, 7, 70, 5, 0 },
+ { 0, 10, 83, 7, 34, 1, 0 } } },
+ .ver_phase_arr = {
+ .even = { { 0, 19, 90, 7, 19, 0, 0 },
+ { 0, 2, 52, 7, 70, 4, 0 },
+ { 0, 10, 85, 7, 32, 1, 0 },
+ { 0, 1, 35, 7, 83, 9, 0 },
+ { 0, 5, 72, 7, 49, 2, 0 },
+ { 0, 0, 21, 7, 90, 17, 0 },
+ { 0, 2, 55, 7, 67, 4, 0 },
+ { 0, 11, 87, 7, 30, 0, 0 },
+ { 0, 1, 38, 7, 81, 8, 0 },
+ { 0, 6, 75, 7, 46, 1, 0 },
+ { 0, 0, 23, 7, 89, 16, 0 },
+ { 0, 2, 58, 7, 65, 3, 0 },
+ { 0, 13, 87, 7, 28, 0, 0 },
+ { 0, 1, 41, 7, 79, 7, 0 },
+ { 0, 6, 78, 7, 43, 1, 0 },
+ { 0, 0, 25, 7, 89, 14, 0 },
+ { 0, 3, 61, 7, 61, 3, 0 },
+ { 0, 14, 89, 7, 25, 0, 0 },
+ { 0, 1, 43, 7, 78, 6, 0 },
+ { 0, 7, 79, 7, 41, 1, 0 },
+ { 0, 0, 28, 7, 87, 13, 0 },
+ { 0, 3, 65, 7, 58, 2, 0 },
+ { 0, 16, 89, 7, 23, 0, 0 },
+ { 0, 1, 46, 7, 75, 6, 0 },
+ { 0, 8, 81, 7, 38, 1, 0 },
+ { 0, 0, 30, 7, 87, 11, 0 },
+ { 0, 4, 67, 7, 55, 2, 0 },
+ { 0, 17, 90, 7, 21, 0, 0 },
+ { 0, 2, 49, 7, 72, 5, 0 },
+ { 0, 9, 83, 7, 35, 1, 0 },
+ { 0, 1, 32, 7, 85, 10, 0 },
+ { 0, 4, 70, 7, 52, 2, 0 } },
+ .odd = { { 0, 1, 34, 7, 83, 10, 0 },
+ { 0, 5, 70, 7, 51, 2, 0 },
+ { 0, 0, 20, 7, 90, 18, 0 },
+ { 0, 2, 54, 7, 68, 4, 0 },
+ { 0, 11, 85, 7, 31, 1, 0 },
+ { 0, 1, 36, 7, 82, 9, 0 },
+ { 0, 5, 74, 7, 48, 1, 0 },
+ { 0, 0, 22, 7, 89, 17, 0 },
+ { 0, 2, 57, 7, 65, 4, 0 },
+ { 0, 12, 87, 7, 29, 0, 0 },
+ { 0, 1, 39, 7, 80, 8, 0 },
+ { 0, 6, 76, 7, 45, 1, 0 },
+ { 0, 0, 24, 7, 89, 15, 0 },
+ { 0, 3, 60, 7, 62, 3, 0 },
+ { 0, 13, 89, 7, 26, 0, 0 },
+ { 0, 1, 42, 7, 78, 7, 0 },
+ { 0, 7, 78, 7, 42, 1, 0 },
+ { 0, 0, 26, 7, 89, 13, 0 },
+ { 0, 3, 62, 7, 60, 3, 0 },
+ { 0, 15, 89, 7, 24, 0, 0 },
+ { 0, 1, 45, 7, 76, 6, 0 },
+ { 0, 8, 80, 7, 39, 1, 0 },
+ { 0, 0, 29, 7, 87, 12, 0 },
+ { 0, 4, 65, 7, 57, 2, 0 },
+ { 0, 17, 89, 7, 22, 0, 0 },
+ { 0, 1, 48, 7, 74, 5, 0 },
+ { 0, 9, 82, 7, 36, 1, 0 },
+ { 0, 1, 31, 7, 85, 11, 0 },
+ { 0, 4, 68, 7, 54, 2, 0 },
+ { 0, 18, 90, 7, 20, 0, 0 },
+ { 0, 2, 51, 7, 70, 5, 0 },
+ { 0, 10, 83, 7, 34, 1, 0 } } },
+ .ptrn_arr = { { 0x49924c93, 0xc9264932, 0x93249924, 0x924c9264,
+ 0x26493249, 0x9 } },
+ .sample_patrn_length = 166,
+ .hor_ds_en = 1,
+ .ver_ds_en = 1
+}, {
+ /* Scale factor 32 / (32 + 52) = 0.380952 */
+ .hor_phase_arr = {
+ .even = { { 0, 20, 88, 7, 20, 0, 0 },
+ { 0, 2, 49, 7, 72, 5, 0 },
+ { 0, 8, 81, 7, 38, 1, 0 },
+ { 0, 0, 28, 7, 87, 13, 0 },
+ { 0, 3, 61, 7, 61, 3, 0 },
+ { 0, 13, 87, 7, 28, 0, 0 },
+ { 0, 1, 38, 7, 81, 8, 0 },
+ { 0, 5, 72, 7, 49, 2, 0 } },
+ .odd = { { 0, 1, 33, 7, 83, 11, 0 },
+ { 0, 4, 67, 7, 55, 2, 0 },
+ { 0, 16, 88, 7, 24, 0, 0 },
+ { 0, 1, 44, 7, 76, 7, 0 },
+ { 0, 7, 76, 7, 44, 1, 0 },
+ { 0, 0, 24, 7, 88, 16, 0 },
+ { 0, 2, 55, 7, 67, 4, 0 },
+ { 0, 11, 83, 7, 33, 1, 0 } } },
+ .ver_phase_arr = {
+ .even = { { 0, 20, 88, 7, 20, 0, 0 },
+ { 0, 2, 49, 7, 72, 5, 0 },
+ { 0, 8, 81, 7, 38, 1, 0 },
+ { 0, 0, 28, 7, 87, 13, 0 },
+ { 0, 3, 61, 7, 61, 3, 0 },
+ { 0, 13, 87, 7, 28, 0, 0 },
+ { 0, 1, 38, 7, 81, 8, 0 },
+ { 0, 5, 72, 7, 49, 2, 0 } },
+ .odd = { { 0, 1, 33, 7, 83, 11, 0 },
+ { 0, 4, 67, 7, 55, 2, 0 },
+ { 0, 16, 88, 7, 24, 0, 0 },
+ { 0, 1, 44, 7, 76, 7, 0 },
+ { 0, 7, 76, 7, 44, 1, 0 },
+ { 0, 0, 24, 7, 88, 16, 0 },
+ { 0, 2, 55, 7, 67, 4, 0 },
+ { 0, 11, 83, 7, 33, 1, 0 } } },
+ .ptrn_arr = { { 0x4c926493, 0x92 } },
+ .sample_patrn_length = 42,
+ .hor_ds_en = 1,
+ .ver_ds_en = 1
+}, {
+ /* Scale factor 32 / (32 + 53) = 0.376471 */
+ .hor_phase_arr = {
+ .even = { { 0, 20, 88, 7, 20, 0, 0 },
+ { 0, 2, 47, 7, 73, 6, 0 },
+ { 0, 7, 76, 7, 44, 1, 0 },
+ { 0, 0, 22, 7, 88, 18, 0 },
+ { 0, 2, 49, 7, 72, 5, 0 },
+ { 0, 8, 78, 7, 41, 1, 0 },
+ { 0, 0, 24, 7, 87, 17, 0 },
+ { 0, 2, 52, 7, 69, 5, 0 },
+ { 0, 9, 80, 7, 38, 1, 0 },
+ { 0, 0, 26, 7, 87, 15, 0 },
+ { 0, 2, 55, 7, 67, 4, 0 },
+ { 0, 10, 81, 7, 36, 1, 0 },
+ { 0, 1, 28, 7, 85, 14, 0 },
+ { 0, 3, 58, 7, 63, 4, 0 },
+ { 0, 11, 83, 7, 33, 1, 0 },
+ { 0, 1, 31, 7, 84, 12, 0 },
+ { 0, 3, 61, 7, 61, 3, 0 },
+ { 0, 12, 84, 7, 31, 1, 0 },
+ { 0, 1, 33, 7, 83, 11, 0 },
+ { 0, 4, 63, 7, 58, 3, 0 },
+ { 0, 14, 85, 7, 28, 1, 0 },
+ { 0, 1, 36, 7, 81, 10, 0 },
+ { 0, 4, 67, 7, 55, 2, 0 },
+ { 0, 15, 87, 7, 26, 0, 0 },
+ { 0, 1, 38, 7, 80, 9, 0 },
+ { 0, 5, 69, 7, 52, 2, 0 },
+ { 0, 17, 87, 7, 24, 0, 0 },
+ { 0, 1, 41, 7, 78, 8, 0 },
+ { 0, 5, 72, 7, 49, 2, 0 },
+ { 0, 18, 88, 7, 22, 0, 0 },
+ { 0, 1, 44, 7, 76, 7, 0 },
+ { 0, 6, 73, 7, 47, 2, 0 } },
+ .odd = { { 0, 1, 32, 7, 83, 12, 0 },
+ { 0, 3, 63, 7, 59, 3, 0 },
+ { 0, 13, 84, 7, 30, 1, 0 },
+ { 0, 1, 34, 7, 83, 10, 0 },
+ { 0, 4, 64, 7, 57, 3, 0 },
+ { 0, 14, 87, 7, 27, 0, 0 },
+ { 0, 1, 37, 7, 81, 9, 0 },
+ { 0, 5, 67, 7, 54, 2, 0 },
+ { 0, 16, 87, 7, 25, 0, 0 },
+ { 0, 1, 40, 7, 79, 8, 0 },
+ { 0, 5, 70, 7, 51, 2, 0 },
+ { 0, 18, 87, 7, 23, 0, 0 },
+ { 0, 1, 42, 7, 78, 7, 0 },
+ { 0, 6, 72, 7, 48, 2, 0 },
+ { 0, 19, 88, 7, 21, 0, 0 },
+ { 0, 1, 45, 7, 75, 7, 0 },
+ { 0, 7, 75, 7, 45, 1, 0 },
+ { 0, 0, 21, 7, 88, 19, 0 },
+ { 0, 2, 48, 7, 72, 6, 0 },
+ { 0, 7, 78, 7, 42, 1, 0 },
+ { 0, 0, 23, 7, 87, 18, 0 },
+ { 0, 2, 51, 7, 70, 5, 0 },
+ { 0, 8, 79, 7, 40, 1, 0 },
+ { 0, 0, 25, 7, 87, 16, 0 },
+ { 0, 2, 54, 7, 67, 5, 0 },
+ { 0, 9, 81, 7, 37, 1, 0 },
+ { 0, 0, 27, 7, 87, 14, 0 },
+ { 0, 3, 57, 7, 64, 4, 0 },
+ { 0, 10, 83, 7, 34, 1, 0 },
+ { 0, 1, 30, 7, 84, 13, 0 },
+ { 0, 3, 59, 7, 63, 3, 0 },
+ { 0, 12, 83, 7, 32, 1, 0 } } },
+ .ver_phase_arr = {
+ .even = { { 0, 20, 88, 7, 20, 0, 0 },
+ { 0, 2, 47, 7, 73, 6, 0 },
+ { 0, 7, 76, 7, 44, 1, 0 },
+ { 0, 0, 22, 7, 88, 18, 0 },
+ { 0, 2, 49, 7, 72, 5, 0 },
+ { 0, 8, 78, 7, 41, 1, 0 },
+ { 0, 0, 24, 7, 87, 17, 0 },
+ { 0, 2, 52, 7, 69, 5, 0 },
+ { 0, 9, 80, 7, 38, 1, 0 },
+ { 0, 0, 26, 7, 87, 15, 0 },
+ { 0, 2, 55, 7, 67, 4, 0 },
+ { 0, 10, 81, 7, 36, 1, 0 },
+ { 0, 1, 28, 7, 85, 14, 0 },
+ { 0, 3, 58, 7, 63, 4, 0 },
+ { 0, 11, 83, 7, 33, 1, 0 },
+ { 0, 1, 31, 7, 84, 12, 0 },
+ { 0, 3, 61, 7, 61, 3, 0 },
+ { 0, 12, 84, 7, 31, 1, 0 },
+ { 0, 1, 33, 7, 83, 11, 0 },
+ { 0, 4, 63, 7, 58, 3, 0 },
+ { 0, 14, 85, 7, 28, 1, 0 },
+ { 0, 1, 36, 7, 81, 10, 0 },
+ { 0, 4, 67, 7, 55, 2, 0 },
+ { 0, 15, 87, 7, 26, 0, 0 },
+ { 0, 1, 38, 7, 80, 9, 0 },
+ { 0, 5, 69, 7, 52, 2, 0 },
+ { 0, 17, 87, 7, 24, 0, 0 },
+ { 0, 1, 41, 7, 78, 8, 0 },
+ { 0, 5, 72, 7, 49, 2, 0 },
+ { 0, 18, 88, 7, 22, 0, 0 },
+ { 0, 1, 44, 7, 76, 7, 0 },
+ { 0, 6, 73, 7, 47, 2, 0 } },
+ .odd = { { 0, 1, 32, 7, 83, 12, 0 },
+ { 0, 3, 63, 7, 59, 3, 0 },
+ { 0, 13, 84, 7, 30, 1, 0 },
+ { 0, 1, 34, 7, 83, 10, 0 },
+ { 0, 4, 64, 7, 57, 3, 0 },
+ { 0, 14, 87, 7, 27, 0, 0 },
+ { 0, 1, 37, 7, 81, 9, 0 },
+ { 0, 5, 67, 7, 54, 2, 0 },
+ { 0, 16, 87, 7, 25, 0, 0 },
+ { 0, 1, 40, 7, 79, 8, 0 },
+ { 0, 5, 70, 7, 51, 2, 0 },
+ { 0, 18, 87, 7, 23, 0, 0 },
+ { 0, 1, 42, 7, 78, 7, 0 },
+ { 0, 6, 72, 7, 48, 2, 0 },
+ { 0, 19, 88, 7, 21, 0, 0 },
+ { 0, 1, 45, 7, 75, 7, 0 },
+ { 0, 7, 75, 7, 45, 1, 0 },
+ { 0, 0, 21, 7, 88, 19, 0 },
+ { 0, 2, 48, 7, 72, 6, 0 },
+ { 0, 7, 78, 7, 42, 1, 0 },
+ { 0, 0, 23, 7, 87, 18, 0 },
+ { 0, 2, 51, 7, 70, 5, 0 },
+ { 0, 8, 79, 7, 40, 1, 0 },
+ { 0, 0, 25, 7, 87, 16, 0 },
+ { 0, 2, 54, 7, 67, 5, 0 },
+ { 0, 9, 81, 7, 37, 1, 0 },
+ { 0, 0, 27, 7, 87, 14, 0 },
+ { 0, 3, 57, 7, 64, 4, 0 },
+ { 0, 10, 83, 7, 34, 1, 0 },
+ { 0, 1, 30, 7, 84, 13, 0 },
+ { 0, 3, 59, 7, 63, 3, 0 },
+ { 0, 12, 83, 7, 32, 1, 0 } } },
+ .ptrn_arr = { { 0x64926493, 0x64926492, 0x4c926492, 0x4c924c92,
+ 0x4c924c92, 0x92 } },
+ .sample_patrn_length = 170,
+ .hor_ds_en = 1,
+ .ver_ds_en = 1
+}, {
+ /* Scale factor 32 / (32 + 54) = 0.372093 */
+ .hor_phase_arr = {
+ .even = { { 0, 21, 86, 7, 21, 0, 0 },
+ { 0, 1, 44, 7, 76, 7, 0 },
+ { 0, 6, 71, 7, 49, 2, 0 },
+ { 0, 17, 86, 7, 25, 0, 0 },
+ { 0, 1, 39, 7, 79, 9, 0 },
+ { 0, 5, 65, 7, 55, 3, 0 },
+ { 0, 14, 84, 7, 29, 1, 0 },
+ { 0, 1, 34, 7, 82, 11, 0 },
+ { 0, 3, 61, 7, 61, 3, 0 },
+ { 0, 11, 82, 7, 34, 1, 0 },
+ { 0, 1, 29, 7, 84, 14, 0 },
+ { 0, 3, 55, 7, 65, 5, 0 },
+ { 0, 9, 79, 7, 39, 1, 0 },
+ { 0, 0, 25, 7, 86, 17, 0 },
+ { 0, 2, 49, 7, 71, 6, 0 },
+ { 0, 7, 76, 7, 44, 1, 0 } },
+ .odd = { { 0, 1, 31, 7, 83, 13, 0 },
+ { 0, 3, 58, 7, 63, 4, 0 },
+ { 0, 10, 81, 7, 36, 1, 0 },
+ { 0, 0, 27, 7, 85, 16, 0 },
+ { 0, 2, 52, 7, 69, 5, 0 },
+ { 0, 8, 78, 7, 41, 1, 0 },
+ { 0, 0, 23, 7, 86, 19, 0 },
+ { 0, 2, 47, 7, 72, 7, 0 },
+ { 0, 7, 72, 7, 47, 2, 0 },
+ { 0, 19, 86, 7, 23, 0, 0 },
+ { 0, 1, 41, 7, 78, 8, 0 },
+ { 0, 5, 69, 7, 52, 2, 0 },
+ { 0, 16, 85, 7, 27, 0, 0 },
+ { 0, 1, 36, 7, 81, 10, 0 },
+ { 0, 4, 63, 7, 58, 3, 0 },
+ { 0, 13, 83, 7, 31, 1, 0 } } },
+ .ver_phase_arr = {
+ .even = { { 0, 21, 86, 7, 21, 0, 0 },
+ { 0, 1, 44, 7, 76, 7, 0 },
+ { 0, 6, 71, 7, 49, 2, 0 },
+ { 0, 17, 86, 7, 25, 0, 0 },
+ { 0, 1, 39, 7, 79, 9, 0 },
+ { 0, 5, 65, 7, 55, 3, 0 },
+ { 0, 14, 84, 7, 29, 1, 0 },
+ { 0, 1, 34, 7, 82, 11, 0 },
+ { 0, 3, 61, 7, 61, 3, 0 },
+ { 0, 11, 82, 7, 34, 1, 0 },
+ { 0, 1, 29, 7, 84, 14, 0 },
+ { 0, 3, 55, 7, 65, 5, 0 },
+ { 0, 9, 79, 7, 39, 1, 0 },
+ { 0, 0, 25, 7, 86, 17, 0 },
+ { 0, 2, 49, 7, 71, 6, 0 },
+ { 0, 7, 76, 7, 44, 1, 0 } },
+ .odd = { { 0, 1, 31, 7, 83, 13, 0 },
+ { 0, 3, 58, 7, 63, 4, 0 },
+ { 0, 10, 81, 7, 36, 1, 0 },
+ { 0, 0, 27, 7, 85, 16, 0 },
+ { 0, 2, 52, 7, 69, 5, 0 },
+ { 0, 8, 78, 7, 41, 1, 0 },
+ { 0, 0, 23, 7, 86, 19, 0 },
+ { 0, 2, 47, 7, 72, 7, 0 },
+ { 0, 7, 72, 7, 47, 2, 0 },
+ { 0, 19, 86, 7, 23, 0, 0 },
+ { 0, 1, 41, 7, 78, 8, 0 },
+ { 0, 5, 69, 7, 52, 2, 0 },
+ { 0, 16, 85, 7, 27, 0, 0 },
+ { 0, 1, 36, 7, 81, 10, 0 },
+ { 0, 4, 63, 7, 58, 3, 0 },
+ { 0, 13, 83, 7, 31, 1, 0 } } },
+ .ptrn_arr = { { 0x24932493, 0x24992493, 0x92499 } },
+ .sample_patrn_length = 86,
+ .hor_ds_en = 1,
+ .ver_ds_en = 1
+}, {
+ /* Scale factor 32 / (32 + 55) = 0.367816 */
+ .hor_phase_arr = {
+ .even = { { 0, 21, 86, 7, 21, 0, 0 },
+ { 0, 1, 41, 7, 77, 9, 0 },
+ { 0, 5, 65, 7, 55, 3, 0 },
+ { 0, 13, 82, 7, 32, 1, 0 },
+ { 0, 1, 29, 7, 83, 15, 0 },
+ { 0, 2, 52, 7, 69, 5, 0 },
+ { 0, 8, 74, 7, 44, 2, 0 },
+ { 0, 19, 86, 7, 23, 0, 0 },
+ { 0, 1, 39, 7, 78, 10, 0 },
+ { 0, 4, 63, 7, 58, 3, 0 },
+ { 0, 12, 81, 7, 34, 1, 0 },
+ { 0, 1, 27, 7, 84, 16, 0 },
+ { 0, 2, 50, 7, 70, 6, 0 },
+ { 0, 7, 72, 7, 47, 2, 0 },
+ { 0, 18, 85, 7, 25, 0, 0 },
+ { 0, 1, 36, 7, 80, 11, 0 },
+ { 0, 4, 60, 7, 60, 4, 0 },
+ { 0, 11, 80, 7, 36, 1, 0 },
+ { 0, 0, 25, 7, 85, 18, 0 },
+ { 0, 2, 47, 7, 72, 7, 0 },
+ { 0, 6, 70, 7, 50, 2, 0 },
+ { 0, 16, 84, 7, 27, 1, 0 },
+ { 0, 1, 34, 7, 81, 12, 0 },
+ { 0, 3, 58, 7, 63, 4, 0 },
+ { 0, 10, 78, 7, 39, 1, 0 },
+ { 0, 0, 23, 7, 86, 19, 0 },
+ { 0, 2, 44, 7, 74, 8, 0 },
+ { 0, 5, 69, 7, 52, 2, 0 },
+ { 0, 15, 83, 7, 29, 1, 0 },
+ { 0, 1, 32, 7, 82, 13, 0 },
+ { 0, 3, 55, 7, 65, 5, 0 },
+ { 0, 9, 77, 7, 41, 1, 0 } },
+ .odd = { { 0, 1, 30, 7, 83, 14, 0 },
+ { 0, 3, 54, 7, 66, 5, 0 },
+ { 0, 8, 76, 7, 43, 1, 0 },
+ { 0, 20, 86, 7, 22, 0, 0 },
+ { 0, 1, 40, 7, 78, 9, 0 },
+ { 0, 4, 65, 7, 56, 3, 0 },
+ { 0, 13, 81, 7, 33, 1, 0 },
+ { 0, 1, 28, 7, 84, 15, 0 },
+ { 0, 2, 51, 7, 69, 6, 0 },
+ { 0, 7, 74, 7, 45, 2, 0 },
+ { 0, 18, 86, 7, 24, 0, 0 },
+ { 0, 1, 38, 7, 79, 10, 0 },
+ { 0, 4, 62, 7, 59, 3, 0 },
+ { 0, 11, 81, 7, 35, 1, 0 },
+ { 0, 0, 26, 7, 85, 17, 0 },
+ { 0, 2, 48, 7, 72, 6, 0 },
+ { 0, 6, 72, 7, 48, 2, 0 },
+ { 0, 17, 85, 7, 26, 0, 0 },
+ { 0, 1, 35, 7, 81, 11, 0 },
+ { 0, 3, 59, 7, 62, 4, 0 },
+ { 0, 10, 79, 7, 38, 1, 0 },
+ { 0, 0, 24, 7, 86, 18, 0 },
+ { 0, 2, 45, 7, 74, 7, 0 },
+ { 0, 6, 69, 7, 51, 2, 0 },
+ { 0, 15, 84, 7, 28, 1, 0 },
+ { 0, 1, 33, 7, 81, 13, 0 },
+ { 0, 3, 56, 7, 65, 4, 0 },
+ { 0, 9, 78, 7, 40, 1, 0 },
+ { 0, 0, 22, 7, 86, 20, 0 },
+ { 0, 1, 43, 7, 76, 8, 0 },
+ { 0, 5, 66, 7, 54, 3, 0 },
+ { 0, 14, 83, 7, 30, 1, 0 } } },
+ .ver_phase_arr = {
+ .even = { { 0, 21, 86, 7, 21, 0, 0 },
+ { 0, 1, 41, 7, 77, 9, 0 },
+ { 0, 5, 65, 7, 55, 3, 0 },
+ { 0, 13, 82, 7, 32, 1, 0 },
+ { 0, 1, 29, 7, 83, 15, 0 },
+ { 0, 2, 52, 7, 69, 5, 0 },
+ { 0, 8, 74, 7, 44, 2, 0 },
+ { 0, 19, 86, 7, 23, 0, 0 },
+ { 0, 1, 39, 7, 78, 10, 0 },
+ { 0, 4, 63, 7, 58, 3, 0 },
+ { 0, 12, 81, 7, 34, 1, 0 },
+ { 0, 1, 27, 7, 84, 16, 0 },
+ { 0, 2, 50, 7, 70, 6, 0 },
+ { 0, 7, 72, 7, 47, 2, 0 },
+ { 0, 18, 85, 7, 25, 0, 0 },
+ { 0, 1, 36, 7, 80, 11, 0 },
+ { 0, 4, 60, 7, 60, 4, 0 },
+ { 0, 11, 80, 7, 36, 1, 0 },
+ { 0, 0, 25, 7, 85, 18, 0 },
+ { 0, 2, 47, 7, 72, 7, 0 },
+ { 0, 6, 70, 7, 50, 2, 0 },
+ { 0, 16, 84, 7, 27, 1, 0 },
+ { 0, 1, 34, 7, 81, 12, 0 },
+ { 0, 3, 58, 7, 63, 4, 0 },
+ { 0, 10, 78, 7, 39, 1, 0 },
+ { 0, 0, 23, 7, 86, 19, 0 },
+ { 0, 2, 44, 7, 74, 8, 0 },
+ { 0, 5, 69, 7, 52, 2, 0 },
+ { 0, 15, 83, 7, 29, 1, 0 },
+ { 0, 1, 32, 7, 82, 13, 0 },
+ { 0, 3, 55, 7, 65, 5, 0 },
+ { 0, 9, 77, 7, 41, 1, 0 } },
+ .odd = { { 0, 1, 30, 7, 83, 14, 0 },
+ { 0, 3, 54, 7, 66, 5, 0 },
+ { 0, 8, 76, 7, 43, 1, 0 },
+ { 0, 20, 86, 7, 22, 0, 0 },
+ { 0, 1, 40, 7, 78, 9, 0 },
+ { 0, 4, 65, 7, 56, 3, 0 },
+ { 0, 13, 81, 7, 33, 1, 0 },
+ { 0, 1, 28, 7, 84, 15, 0 },
+ { 0, 2, 51, 7, 69, 6, 0 },
+ { 0, 7, 74, 7, 45, 2, 0 },
+ { 0, 18, 86, 7, 24, 0, 0 },
+ { 0, 1, 38, 7, 79, 10, 0 },
+ { 0, 4, 62, 7, 59, 3, 0 },
+ { 0, 11, 81, 7, 35, 1, 0 },
+ { 0, 0, 26, 7, 85, 17, 0 },
+ { 0, 2, 48, 7, 72, 6, 0 },
+ { 0, 6, 72, 7, 48, 2, 0 },
+ { 0, 17, 85, 7, 26, 0, 0 },
+ { 0, 1, 35, 7, 81, 11, 0 },
+ { 0, 3, 59, 7, 62, 4, 0 },
+ { 0, 10, 79, 7, 38, 1, 0 },
+ { 0, 0, 24, 7, 86, 18, 0 },
+ { 0, 2, 45, 7, 74, 7, 0 },
+ { 0, 6, 69, 7, 51, 2, 0 },
+ { 0, 15, 84, 7, 28, 1, 0 },
+ { 0, 1, 33, 7, 81, 13, 0 },
+ { 0, 3, 56, 7, 65, 4, 0 },
+ { 0, 9, 78, 7, 40, 1, 0 },
+ { 0, 0, 22, 7, 86, 20, 0 },
+ { 0, 1, 43, 7, 76, 8, 0 },
+ { 0, 5, 66, 7, 54, 3, 0 },
+ { 0, 14, 83, 7, 30, 1, 0 } } },
+ .ptrn_arr = { { 0x24992493, 0x264924c9, 0x92493249, 0x924c9249,
+ 0x93249264, 0x924 } },
+ .sample_patrn_length = 174,
+ .hor_ds_en = 1,
+ .ver_ds_en = 1
+}, {
+ /* Scale factor 32 / (32 + 56) = 0.363636 */
+ .hor_phase_arr = {
+ .even = { { 0, 22, 84, 7, 22, 0, 0 },
+ { 0, 1, 39, 7, 78, 10, 0 },
+ { 0, 4, 60, 7, 60, 4, 0 },
+ { 0, 10, 78, 7, 39, 1, 0 } },
+ .odd = { { 0, 1, 30, 7, 82, 15, 0 },
+ { 0, 2, 50, 7, 70, 6, 0 },
+ { 0, 6, 70, 7, 50, 2, 0 },
+ { 0, 15, 82, 7, 30, 1, 0 } } },
+ .ver_phase_arr = {
+ .even = { { 0, 22, 84, 7, 22, 0, 0 },
+ { 0, 1, 39, 7, 78, 10, 0 },
+ { 0, 4, 60, 7, 60, 4, 0 },
+ { 0, 10, 78, 7, 39, 1, 0 } },
+ .odd = { { 0, 1, 30, 7, 82, 15, 0 },
+ { 0, 2, 50, 7, 70, 6, 0 },
+ { 0, 6, 70, 7, 50, 2, 0 },
+ { 0, 15, 82, 7, 30, 1, 0 } } },
+ .ptrn_arr = { { 0x92493 } },
+ .sample_patrn_length = 22,
+ .hor_ds_en = 1,
+ .ver_ds_en = 1
+}, {
+ /* Scale factor 32 / (32 + 57) = 0.359551 */
+ .hor_phase_arr = {
+ .even = { { 0, 22, 84, 7, 22, 0, 0 },
+ { 0, 1, 37, 7, 79, 11, 0 },
+ { 0, 3, 55, 7, 65, 5, 0 },
+ { 0, 7, 72, 7, 47, 2, 0 },
+ { 0, 15, 82, 7, 30, 1, 0 },
+ { 0, 1, 28, 7, 82, 17, 0 },
+ { 0, 2, 44, 7, 74, 8, 0 },
+ { 0, 5, 62, 7, 57, 4, 0 },
+ { 0, 10, 78, 7, 39, 1, 0 },
+ { 0, 20, 84, 7, 24, 0, 0 },
+ { 0, 1, 35, 7, 79, 13, 0 },
+ { 0, 3, 52, 7, 67, 6, 0 },
+ { 0, 7, 69, 7, 50, 2, 0 },
+ { 0, 14, 81, 7, 32, 1, 0 },
+ { 0, 1, 26, 7, 83, 18, 0 },
+ { 0, 2, 42, 7, 75, 9, 0 },
+ { 0, 4, 60, 7, 60, 4, 0 },
+ { 0, 9, 75, 7, 42, 2, 0 },
+ { 0, 18, 83, 7, 26, 1, 0 },
+ { 0, 1, 32, 7, 81, 14, 0 },
+ { 0, 2, 50, 7, 69, 7, 0 },
+ { 0, 6, 67, 7, 52, 3, 0 },
+ { 0, 13, 79, 7, 35, 1, 0 },
+ { 0, 0, 24, 7, 84, 20, 0 },
+ { 0, 1, 39, 7, 78, 10, 0 },
+ { 0, 4, 57, 7, 62, 5, 0 },
+ { 0, 8, 74, 7, 44, 2, 0 },
+ { 0, 17, 82, 7, 28, 1, 0 },
+ { 0, 1, 30, 7, 82, 15, 0 },
+ { 0, 2, 47, 7, 72, 7, 0 },
+ { 0, 5, 65, 7, 55, 3, 0 },
+ { 0, 11, 79, 7, 37, 1, 0 } },
+ .odd = { { 0, 1, 29, 7, 82, 16, 0 },
+ { 0, 2, 46, 7, 72, 8, 0 },
+ { 0, 5, 64, 7, 56, 3, 0 },
+ { 0, 11, 78, 7, 38, 1, 0 },
+ { 0, 21, 84, 7, 23, 0, 0 },
+ { 0, 1, 36, 7, 79, 12, 0 },
+ { 0, 3, 53, 7, 66, 6, 0 },
+ { 0, 7, 71, 7, 48, 2, 0 },
+ { 0, 15, 81, 7, 31, 1, 0 },
+ { 0, 1, 27, 7, 82, 18, 0 },
+ { 0, 2, 43, 7, 74, 9, 0 },
+ { 0, 4, 61, 7, 59, 4, 0 },
+ { 0, 10, 75, 7, 41, 2, 0 },
+ { 0, 19, 83, 7, 25, 1, 0 },
+ { 0, 1, 33, 7, 81, 13, 0 },
+ { 0, 3, 51, 7, 68, 6, 0 },
+ { 0, 6, 68, 7, 51, 3, 0 },
+ { 0, 13, 81, 7, 33, 1, 0 },
+ { 0, 1, 25, 7, 83, 19, 0 },
+ { 0, 2, 41, 7, 75, 10, 0 },
+ { 0, 4, 59, 7, 61, 4, 0 },
+ { 0, 9, 74, 7, 43, 2, 0 },
+ { 0, 18, 82, 7, 27, 1, 0 },
+ { 0, 1, 31, 7, 81, 15, 0 },
+ { 0, 2, 48, 7, 71, 7, 0 },
+ { 0, 6, 66, 7, 53, 3, 0 },
+ { 0, 12, 79, 7, 36, 1, 0 },
+ { 0, 0, 23, 7, 84, 21, 0 },
+ { 0, 1, 38, 7, 78, 11, 0 },
+ { 0, 3, 56, 7, 64, 5, 0 },
+ { 0, 8, 72, 7, 46, 2, 0 },
+ { 0, 16, 82, 7, 29, 1, 0 } } },
+ .ver_phase_arr = {
+ .even = { { 0, 22, 84, 7, 22, 0, 0 },
+ { 0, 1, 37, 7, 79, 11, 0 },
+ { 0, 3, 55, 7, 65, 5, 0 },
+ { 0, 7, 72, 7, 47, 2, 0 },
+ { 0, 15, 82, 7, 30, 1, 0 },
+ { 0, 1, 28, 7, 82, 17, 0 },
+ { 0, 2, 44, 7, 74, 8, 0 },
+ { 0, 5, 62, 7, 57, 4, 0 },
+ { 0, 10, 78, 7, 39, 1, 0 },
+ { 0, 20, 84, 7, 24, 0, 0 },
+ { 0, 1, 35, 7, 79, 13, 0 },
+ { 0, 3, 52, 7, 67, 6, 0 },
+ { 0, 7, 69, 7, 50, 2, 0 },
+ { 0, 14, 81, 7, 32, 1, 0 },
+ { 0, 1, 26, 7, 83, 18, 0 },
+ { 0, 2, 42, 7, 75, 9, 0 },
+ { 0, 4, 60, 7, 60, 4, 0 },
+ { 0, 9, 75, 7, 42, 2, 0 },
+ { 0, 18, 83, 7, 26, 1, 0 },
+ { 0, 1, 32, 7, 81, 14, 0 },
+ { 0, 2, 50, 7, 69, 7, 0 },
+ { 0, 6, 67, 7, 52, 3, 0 },
+ { 0, 13, 79, 7, 35, 1, 0 },
+ { 0, 0, 24, 7, 84, 20, 0 },
+ { 0, 1, 39, 7, 78, 10, 0 },
+ { 0, 4, 57, 7, 62, 5, 0 },
+ { 0, 8, 74, 7, 44, 2, 0 },
+ { 0, 17, 82, 7, 28, 1, 0 },
+ { 0, 1, 30, 7, 82, 15, 0 },
+ { 0, 2, 47, 7, 72, 7, 0 },
+ { 0, 5, 65, 7, 55, 3, 0 },
+ { 0, 11, 79, 7, 37, 1, 0 } },
+ .odd = { { 0, 1, 29, 7, 82, 16, 0 },
+ { 0, 2, 46, 7, 72, 8, 0 },
+ { 0, 5, 64, 7, 56, 3, 0 },
+ { 0, 11, 78, 7, 38, 1, 0 },
+ { 0, 21, 84, 7, 23, 0, 0 },
+ { 0, 1, 36, 7, 79, 12, 0 },
+ { 0, 3, 53, 7, 66, 6, 0 },
+ { 0, 7, 71, 7, 48, 2, 0 },
+ { 0, 15, 81, 7, 31, 1, 0 },
+ { 0, 1, 27, 7, 82, 18, 0 },
+ { 0, 2, 43, 7, 74, 9, 0 },
+ { 0, 4, 61, 7, 59, 4, 0 },
+ { 0, 10, 75, 7, 41, 2, 0 },
+ { 0, 19, 83, 7, 25, 1, 0 },
+ { 0, 1, 33, 7, 81, 13, 0 },
+ { 0, 3, 51, 7, 68, 6, 0 },
+ { 0, 6, 68, 7, 51, 3, 0 },
+ { 0, 13, 81, 7, 33, 1, 0 },
+ { 0, 1, 25, 7, 83, 19, 0 },
+ { 0, 2, 41, 7, 75, 10, 0 },
+ { 0, 4, 59, 7, 61, 4, 0 },
+ { 0, 9, 74, 7, 43, 2, 0 },
+ { 0, 18, 82, 7, 27, 1, 0 },
+ { 0, 1, 31, 7, 81, 15, 0 },
+ { 0, 2, 48, 7, 71, 7, 0 },
+ { 0, 6, 66, 7, 53, 3, 0 },
+ { 0, 12, 79, 7, 36, 1, 0 },
+ { 0, 0, 23, 7, 84, 21, 0 },
+ { 0, 1, 38, 7, 78, 11, 0 },
+ { 0, 3, 56, 7, 64, 5, 0 },
+ { 0, 8, 72, 7, 46, 2, 0 },
+ { 0, 16, 82, 7, 29, 1, 0 } } },
+ .ptrn_arr = { { 0x26492493, 0x924c9249, 0x49249924, 0x64924932,
+ 0x24c92492, 0x9249 } },
+ .sample_patrn_length = 178,
+ .hor_ds_en = 1,
+ .ver_ds_en = 1
+}, {
+ /* Scale factor 32 / (32 + 58) = 0.355556 */
+ .hor_phase_arr = {
+ .even = { { 0, 22, 84, 7, 22, 0, 0 },
+ { 0, 1, 35, 7, 79, 13, 0 },
+ { 0, 3, 50, 7, 68, 7, 0 },
+ { 0, 6, 64, 7, 55, 3, 0 },
+ { 0, 11, 75, 7, 40, 2, 0 },
+ { 0, 19, 82, 7, 26, 1, 0 },
+ { 0, 1, 30, 7, 81, 16, 0 },
+ { 0, 2, 45, 7, 72, 9, 0 },
+ { 0, 4, 60, 7, 60, 4, 0 },
+ { 0, 9, 72, 7, 45, 2, 0 },
+ { 0, 16, 81, 7, 30, 1, 0 },
+ { 0, 1, 26, 7, 82, 19, 0 },
+ { 0, 2, 40, 7, 75, 11, 0 },
+ { 0, 3, 55, 7, 64, 6, 0 },
+ { 0, 7, 68, 7, 50, 3, 0 },
+ { 0, 13, 79, 7, 35, 1, 0 } },
+ .odd = { { 0, 1, 28, 7, 82, 17, 0 },
+ { 0, 2, 42, 7, 74, 10, 0 },
+ { 0, 4, 57, 7, 62, 5, 0 },
+ { 0, 8, 71, 7, 47, 2, 0 },
+ { 0, 14, 80, 7, 33, 1, 0 },
+ { 0, 1, 24, 7, 82, 21, 0 },
+ { 0, 1, 37, 7, 78, 12, 0 },
+ { 0, 3, 52, 7, 67, 6, 0 },
+ { 0, 6, 67, 7, 52, 3, 0 },
+ { 0, 12, 78, 7, 37, 1, 0 },
+ { 0, 21, 82, 7, 24, 1, 0 },
+ { 0, 1, 33, 7, 80, 14, 0 },
+ { 0, 2, 47, 7, 71, 8, 0 },
+ { 0, 5, 62, 7, 57, 4, 0 },
+ { 0, 10, 74, 7, 42, 2, 0 },
+ { 0, 17, 82, 7, 28, 1, 0 } } },
+ .ver_phase_arr = {
+ .even = { { 0, 22, 84, 7, 22, 0, 0 },
+ { 0, 1, 35, 7, 79, 13, 0 },
+ { 0, 3, 50, 7, 68, 7, 0 },
+ { 0, 6, 64, 7, 55, 3, 0 },
+ { 0, 11, 75, 7, 40, 2, 0 },
+ { 0, 19, 82, 7, 26, 1, 0 },
+ { 0, 1, 30, 7, 81, 16, 0 },
+ { 0, 2, 45, 7, 72, 9, 0 },
+ { 0, 4, 60, 7, 60, 4, 0 },
+ { 0, 9, 72, 7, 45, 2, 0 },
+ { 0, 16, 81, 7, 30, 1, 0 },
+ { 0, 1, 26, 7, 82, 19, 0 },
+ { 0, 2, 40, 7, 75, 11, 0 },
+ { 0, 3, 55, 7, 64, 6, 0 },
+ { 0, 7, 68, 7, 50, 3, 0 },
+ { 0, 13, 79, 7, 35, 1, 0 } },
+ .odd = { { 0, 1, 28, 7, 82, 17, 0 },
+ { 0, 2, 42, 7, 74, 10, 0 },
+ { 0, 4, 57, 7, 62, 5, 0 },
+ { 0, 8, 71, 7, 47, 2, 0 },
+ { 0, 14, 80, 7, 33, 1, 0 },
+ { 0, 1, 24, 7, 82, 21, 0 },
+ { 0, 1, 37, 7, 78, 12, 0 },
+ { 0, 3, 52, 7, 67, 6, 0 },
+ { 0, 6, 67, 7, 52, 3, 0 },
+ { 0, 12, 78, 7, 37, 1, 0 },
+ { 0, 21, 82, 7, 24, 1, 0 },
+ { 0, 1, 33, 7, 80, 14, 0 },
+ { 0, 2, 47, 7, 71, 8, 0 },
+ { 0, 5, 62, 7, 57, 4, 0 },
+ { 0, 10, 74, 7, 42, 2, 0 },
+ { 0, 17, 82, 7, 28, 1, 0 } } },
+ .ptrn_arr = { { 0x32492493, 0x99249249, 0x924924 } },
+ .sample_patrn_length = 90,
+ .hor_ds_en = 1,
+ .ver_ds_en = 1
+}, {
+ /* Scale factor 32 / (32 + 59) = 0.351648 */
+ .hor_phase_arr = {
+ .even = { { 0, 23, 82, 7, 23, 0, 0 },
+ { 0, 1, 33, 7, 79, 15, 0 },
+ { 0, 2, 45, 7, 72, 9, 0 },
+ { 0, 4, 57, 7, 62, 5, 0 },
+ { 0, 7, 68, 7, 50, 3, 0 },
+ { 0, 12, 78, 7, 37, 1, 0 },
+ { 0, 19, 81, 7, 27, 1, 0 },
+ { 0, 1, 29, 7, 80, 18, 0 },
+ { 0, 2, 40, 7, 75, 11, 0 },
+ { 0, 3, 52, 7, 66, 7, 0 },
+ { 0, 6, 63, 7, 55, 4, 0 },
+ { 0, 10, 74, 7, 42, 2, 0 },
+ { 0, 16, 80, 7, 31, 1, 0 },
+ { 0, 1, 25, 7, 81, 21, 0 },
+ { 0, 1, 35, 7, 79, 13, 0 },
+ { 0, 2, 47, 7, 71, 8, 0 },
+ { 0, 5, 59, 7, 59, 5, 0 },
+ { 0, 8, 71, 7, 47, 2, 0 },
+ { 0, 13, 79, 7, 35, 1, 0 },
+ { 0, 21, 81, 7, 25, 1, 0 },
+ { 0, 1, 31, 7, 80, 16, 0 },
+ { 0, 2, 42, 7, 74, 10, 0 },
+ { 0, 4, 55, 7, 63, 6, 0 },
+ { 0, 7, 66, 7, 52, 3, 0 },
+ { 0, 11, 75, 7, 40, 2, 0 },
+ { 0, 18, 80, 7, 29, 1, 0 },
+ { 0, 1, 27, 7, 81, 19, 0 },
+ { 0, 1, 37, 7, 78, 12, 0 },
+ { 0, 3, 50, 7, 68, 7, 0 },
+ { 0, 5, 62, 7, 57, 4, 0 },
+ { 0, 9, 72, 7, 45, 2, 0 },
+ { 0, 15, 79, 7, 33, 1, 0 } },
+ .odd = { { 0, 1, 28, 7, 81, 18, 0 },
+ { 0, 2, 39, 7, 75, 12, 0 },
+ { 0, 3, 51, 7, 67, 7, 0 },
+ { 0, 6, 62, 7, 56, 4, 0 },
+ { 0, 10, 73, 7, 43, 2, 0 },
+ { 0, 15, 80, 7, 32, 1, 0 },
+ { 0, 1, 24, 7, 81, 22, 0 },
+ { 0, 1, 34, 7, 79, 14, 0 },
+ { 0, 2, 46, 7, 71, 9, 0 },
+ { 0, 4, 58, 7, 61, 5, 0 },
+ { 0, 8, 69, 7, 48, 3, 0 },
+ { 0, 13, 78, 7, 36, 1, 0 },
+ { 0, 20, 81, 7, 26, 1, 0 },
+ { 0, 1, 30, 7, 80, 17, 0 },
+ { 0, 2, 41, 7, 74, 11, 0 },
+ { 0, 3, 53, 7, 66, 6, 0 },
+ { 0, 6, 66, 7, 53, 3, 0 },
+ { 0, 11, 74, 7, 41, 2, 0 },
+ { 0, 17, 80, 7, 30, 1, 0 },
+ { 0, 1, 26, 7, 81, 20, 0 },
+ { 0, 1, 36, 7, 78, 13, 0 },
+ { 0, 3, 48, 7, 69, 8, 0 },
+ { 0, 5, 61, 7, 58, 4, 0 },
+ { 0, 9, 71, 7, 46, 2, 0 },
+ { 0, 14, 79, 7, 34, 1, 0 },
+ { 0, 22, 81, 7, 24, 1, 0 },
+ { 0, 1, 32, 7, 80, 15, 0 },
+ { 0, 2, 43, 7, 73, 10, 0 },
+ { 0, 4, 56, 7, 62, 6, 0 },
+ { 0, 7, 67, 7, 51, 3, 0 },
+ { 0, 12, 75, 7, 39, 2, 0 },
+ { 0, 18, 81, 7, 28, 1, 0 } } },
+ .ver_phase_arr = {
+ .even = { { 0, 23, 82, 7, 23, 0, 0 },
+ { 0, 1, 33, 7, 79, 15, 0 },
+ { 0, 2, 45, 7, 72, 9, 0 },
+ { 0, 4, 57, 7, 62, 5, 0 },
+ { 0, 7, 68, 7, 50, 3, 0 },
+ { 0, 12, 78, 7, 37, 1, 0 },
+ { 0, 19, 81, 7, 27, 1, 0 },
+ { 0, 1, 29, 7, 80, 18, 0 },
+ { 0, 2, 40, 7, 75, 11, 0 },
+ { 0, 3, 52, 7, 66, 7, 0 },
+ { 0, 6, 63, 7, 55, 4, 0 },
+ { 0, 10, 74, 7, 42, 2, 0 },
+ { 0, 16, 80, 7, 31, 1, 0 },
+ { 0, 1, 25, 7, 81, 21, 0 },
+ { 0, 1, 35, 7, 79, 13, 0 },
+ { 0, 2, 47, 7, 71, 8, 0 },
+ { 0, 5, 59, 7, 59, 5, 0 },
+ { 0, 8, 71, 7, 47, 2, 0 },
+ { 0, 13, 79, 7, 35, 1, 0 },
+ { 0, 21, 81, 7, 25, 1, 0 },
+ { 0, 1, 31, 7, 80, 16, 0 },
+ { 0, 2, 42, 7, 74, 10, 0 },
+ { 0, 4, 55, 7, 63, 6, 0 },
+ { 0, 7, 66, 7, 52, 3, 0 },
+ { 0, 11, 75, 7, 40, 2, 0 },
+ { 0, 18, 80, 7, 29, 1, 0 },
+ { 0, 1, 27, 7, 81, 19, 0 },
+ { 0, 1, 37, 7, 78, 12, 0 },
+ { 0, 3, 50, 7, 68, 7, 0 },
+ { 0, 5, 62, 7, 57, 4, 0 },
+ { 0, 9, 72, 7, 45, 2, 0 },
+ { 0, 15, 79, 7, 33, 1, 0 } },
+ .odd = { { 0, 1, 28, 7, 81, 18, 0 },
+ { 0, 2, 39, 7, 75, 12, 0 },
+ { 0, 3, 51, 7, 67, 7, 0 },
+ { 0, 6, 62, 7, 56, 4, 0 },
+ { 0, 10, 73, 7, 43, 2, 0 },
+ { 0, 15, 80, 7, 32, 1, 0 },
+ { 0, 1, 24, 7, 81, 22, 0 },
+ { 0, 1, 34, 7, 79, 14, 0 },
+ { 0, 2, 46, 7, 71, 9, 0 },
+ { 0, 4, 58, 7, 61, 5, 0 },
+ { 0, 8, 69, 7, 48, 3, 0 },
+ { 0, 13, 78, 7, 36, 1, 0 },
+ { 0, 20, 81, 7, 26, 1, 0 },
+ { 0, 1, 30, 7, 80, 17, 0 },
+ { 0, 2, 41, 7, 74, 11, 0 },
+ { 0, 3, 53, 7, 66, 6, 0 },
+ { 0, 6, 66, 7, 53, 3, 0 },
+ { 0, 11, 74, 7, 41, 2, 0 },
+ { 0, 17, 80, 7, 30, 1, 0 },
+ { 0, 1, 26, 7, 81, 20, 0 },
+ { 0, 1, 36, 7, 78, 13, 0 },
+ { 0, 3, 48, 7, 69, 8, 0 },
+ { 0, 5, 61, 7, 58, 4, 0 },
+ { 0, 9, 71, 7, 46, 2, 0 },
+ { 0, 14, 79, 7, 34, 1, 0 },
+ { 0, 22, 81, 7, 24, 1, 0 },
+ { 0, 1, 32, 7, 80, 15, 0 },
+ { 0, 2, 43, 7, 73, 10, 0 },
+ { 0, 4, 56, 7, 62, 6, 0 },
+ { 0, 7, 67, 7, 51, 3, 0 },
+ { 0, 12, 75, 7, 39, 2, 0 },
+ { 0, 18, 81, 7, 28, 1, 0 } } },
+ .ptrn_arr = { { 0x92492493, 0x4924924c, 0x24924992, 0x92493249,
+ 0x49264924, 0x92492 } },
+ .sample_patrn_length = 182,
+ .hor_ds_en = 1,
+ .ver_ds_en = 1
+}, {
+ /* Scale factor 32 / (32 + 60) = 0.347826 */
+ .hor_phase_arr = {
+ .even = { { 1, 23, 80, 7, 23, 1, 0 },
+ { 0, 1, 31, 7, 79, 17, 0 },
+ { 0, 2, 40, 7, 75, 11, 0 },
+ { 0, 3, 50, 7, 67, 8, 0 },
+ { 0, 5, 59, 7, 59, 5, 0 },
+ { 0, 8, 67, 7, 50, 3, 0 },
+ { 0, 11, 75, 7, 40, 2, 0 },
+ { 0, 17, 79, 7, 31, 1, 0 } },
+ .odd = { { 0, 1, 27, 7, 80, 20, 0 },
+ { 0, 1, 35, 7, 78, 14, 0 },
+ { 0, 2, 45, 7, 72, 9, 0 },
+ { 0, 4, 54, 7, 64, 6, 0 },
+ { 0, 6, 64, 7, 54, 4, 0 },
+ { 0, 9, 72, 7, 45, 2, 0 },
+ { 0, 14, 78, 7, 35, 1, 0 },
+ { 0, 20, 80, 7, 27, 1, 0 } } },
+ .ver_phase_arr = {
+ .even = { { 1, 23, 80, 7, 23, 1, 0 },
+ { 0, 1, 31, 7, 79, 17, 0 },
+ { 0, 2, 40, 7, 75, 11, 0 },
+ { 0, 3, 50, 7, 67, 8, 0 },
+ { 0, 5, 59, 7, 59, 5, 0 },
+ { 0, 8, 67, 7, 50, 3, 0 },
+ { 0, 11, 75, 7, 40, 2, 0 },
+ { 0, 17, 79, 7, 31, 1, 0 } },
+ .odd = { { 0, 1, 27, 7, 80, 20, 0 },
+ { 0, 1, 35, 7, 78, 14, 0 },
+ { 0, 2, 45, 7, 72, 9, 0 },
+ { 0, 4, 54, 7, 64, 6, 0 },
+ { 0, 6, 64, 7, 54, 4, 0 },
+ { 0, 9, 72, 7, 45, 2, 0 },
+ { 0, 14, 78, 7, 35, 1, 0 },
+ { 0, 20, 80, 7, 27, 1, 0 } } },
+ .ptrn_arr = { { 0x92492493, 0x924 } },
+ .sample_patrn_length = 46,
+ .hor_ds_en = 1,
+ .ver_ds_en = 1
+}, {
+ /* Scale factor 32 / (32 + 61) = 0.344086 */
+ .hor_phase_arr = {
+ .even = { { 1, 23, 80, 7, 23, 1, 0 },
+ { 0, 1, 29, 7, 80, 18, 0 },
+ { 0, 1, 36, 7, 77, 14, 0 },
+ { 0, 2, 42, 7, 73, 11, 0 },
+ { 0, 3, 50, 7, 67, 8, 0 },
+ { 0, 5, 57, 7, 60, 6, 0 },
+ { 0, 6, 64, 7, 54, 4, 0 },
+ { 0, 9, 69, 7, 47, 3, 0 },
+ { 0, 12, 74, 7, 40, 2, 0 },
+ { 0, 16, 78, 7, 33, 1, 0 },
+ { 0, 20, 80, 7, 27, 1, 0 },
+ { 0, 1, 25, 7, 79, 22, 1 },
+ { 0, 1, 31, 7, 79, 17, 0 },
+ { 0, 2, 38, 7, 75, 13, 0 },
+ { 0, 2, 45, 7, 71, 10, 0 },
+ { 0, 4, 52, 7, 65, 7, 0 },
+ { 0, 5, 59, 7, 59, 5, 0 },
+ { 0, 7, 65, 7, 52, 4, 0 },
+ { 0, 10, 71, 7, 45, 2, 0 },
+ { 0, 13, 75, 7, 38, 2, 0 },
+ { 0, 17, 79, 7, 31, 1, 0 },
+ { 1, 22, 79, 7, 25, 1, 0 },
+ { 0, 1, 27, 7, 80, 20, 0 },
+ { 0, 1, 33, 7, 78, 16, 0 },
+ { 0, 2, 40, 7, 74, 12, 0 },
+ { 0, 3, 47, 7, 69, 9, 0 },
+ { 0, 4, 54, 7, 64, 6, 0 },
+ { 0, 6, 60, 7, 57, 5, 0 },
+ { 0, 8, 67, 7, 50, 3, 0 },
+ { 0, 11, 73, 7, 42, 2, 0 },
+ { 0, 14, 77, 7, 36, 1, 0 },
+ { 0, 18, 80, 7, 29, 1, 0 } },
+ .odd = { { 0, 1, 26, 7, 80, 21, 0 },
+ { 0, 1, 32, 7, 79, 16, 0 },
+ { 0, 2, 39, 7, 75, 12, 0 },
+ { 0, 3, 46, 7, 70, 9, 0 },
+ { 0, 4, 53, 7, 64, 7, 0 },
+ { 0, 5, 60, 7, 58, 5, 0 },
+ { 0, 8, 66, 7, 51, 3, 0 },
+ { 0, 10, 72, 7, 44, 2, 0 },
+ { 0, 14, 75, 7, 37, 2, 0 },
+ { 0, 18, 79, 7, 30, 1, 0 },
+ { 1, 23, 79, 7, 24, 1, 0 },
+ { 0, 1, 28, 7, 80, 19, 0 },
+ { 0, 1, 35, 7, 77, 15, 0 },
+ { 0, 2, 41, 7, 74, 11, 0 },
+ { 0, 3, 48, 7, 69, 8, 0 },
+ { 0, 4, 55, 7, 63, 6, 0 },
+ { 0, 6, 63, 7, 55, 4, 0 },
+ { 0, 8, 69, 7, 48, 3, 0 },
+ { 0, 11, 74, 7, 41, 2, 0 },
+ { 0, 15, 77, 7, 35, 1, 0 },
+ { 0, 19, 80, 7, 28, 1, 0 },
+ { 0, 1, 24, 7, 79, 23, 1 },
+ { 0, 1, 30, 7, 79, 18, 0 },
+ { 0, 2, 37, 7, 75, 14, 0 },
+ { 0, 2, 44, 7, 72, 10, 0 },
+ { 0, 3, 51, 7, 66, 8, 0 },
+ { 0, 5, 58, 7, 60, 5, 0 },
+ { 0, 7, 64, 7, 53, 4, 0 },
+ { 0, 9, 70, 7, 46, 3, 0 },
+ { 0, 12, 75, 7, 39, 2, 0 },
+ { 0, 16, 79, 7, 32, 1, 0 },
+ { 0, 21, 80, 7, 26, 1, 0 } } },
+ .ver_phase_arr = {
+ .even = { { 1, 23, 80, 7, 23, 1, 0 },
+ { 0, 1, 29, 7, 80, 18, 0 },
+ { 0, 1, 36, 7, 77, 14, 0 },
+ { 0, 2, 42, 7, 73, 11, 0 },
+ { 0, 3, 50, 7, 67, 8, 0 },
+ { 0, 5, 57, 7, 60, 6, 0 },
+ { 0, 6, 64, 7, 54, 4, 0 },
+ { 0, 9, 69, 7, 47, 3, 0 },
+ { 0, 12, 74, 7, 40, 2, 0 },
+ { 0, 16, 78, 7, 33, 1, 0 },
+ { 0, 20, 80, 7, 27, 1, 0 },
+ { 0, 1, 25, 7, 79, 22, 1 },
+ { 0, 1, 31, 7, 79, 17, 0 },
+ { 0, 2, 38, 7, 75, 13, 0 },
+ { 0, 2, 45, 7, 71, 10, 0 },
+ { 0, 4, 52, 7, 65, 7, 0 },
+ { 0, 5, 59, 7, 59, 5, 0 },
+ { 0, 7, 65, 7, 52, 4, 0 },
+ { 0, 10, 71, 7, 45, 2, 0 },
+ { 0, 13, 75, 7, 38, 2, 0 },
+ { 0, 17, 79, 7, 31, 1, 0 },
+ { 1, 22, 79, 7, 25, 1, 0 },
+ { 0, 1, 27, 7, 80, 20, 0 },
+ { 0, 1, 33, 7, 78, 16, 0 },
+ { 0, 2, 40, 7, 74, 12, 0 },
+ { 0, 3, 47, 7, 69, 9, 0 },
+ { 0, 4, 54, 7, 64, 6, 0 },
+ { 0, 6, 60, 7, 57, 5, 0 },
+ { 0, 8, 67, 7, 50, 3, 0 },
+ { 0, 11, 73, 7, 42, 2, 0 },
+ { 0, 14, 77, 7, 36, 1, 0 },
+ { 0, 18, 80, 7, 29, 1, 0 } },
+ .odd = { { 0, 1, 26, 7, 80, 21, 0 },
+ { 0, 1, 32, 7, 79, 16, 0 },
+ { 0, 2, 39, 7, 75, 12, 0 },
+ { 0, 3, 46, 7, 70, 9, 0 },
+ { 0, 4, 53, 7, 64, 7, 0 },
+ { 0, 5, 60, 7, 58, 5, 0 },
+ { 0, 8, 66, 7, 51, 3, 0 },
+ { 0, 10, 72, 7, 44, 2, 0 },
+ { 0, 14, 75, 7, 37, 2, 0 },
+ { 0, 18, 79, 7, 30, 1, 0 },
+ { 1, 23, 79, 7, 24, 1, 0 },
+ { 0, 1, 28, 7, 80, 19, 0 },
+ { 0, 1, 35, 7, 77, 15, 0 },
+ { 0, 2, 41, 7, 74, 11, 0 },
+ { 0, 3, 48, 7, 69, 8, 0 },
+ { 0, 4, 55, 7, 63, 6, 0 },
+ { 0, 6, 63, 7, 55, 4, 0 },
+ { 0, 8, 69, 7, 48, 3, 0 },
+ { 0, 11, 74, 7, 41, 2, 0 },
+ { 0, 15, 77, 7, 35, 1, 0 },
+ { 0, 19, 80, 7, 28, 1, 0 },
+ { 0, 1, 24, 7, 79, 23, 1 },
+ { 0, 1, 30, 7, 79, 18, 0 },
+ { 0, 2, 37, 7, 75, 14, 0 },
+ { 0, 2, 44, 7, 72, 10, 0 },
+ { 0, 3, 51, 7, 66, 8, 0 },
+ { 0, 5, 58, 7, 60, 5, 0 },
+ { 0, 7, 64, 7, 53, 4, 0 },
+ { 0, 9, 70, 7, 46, 3, 0 },
+ { 0, 12, 75, 7, 39, 2, 0 },
+ { 0, 16, 79, 7, 32, 1, 0 },
+ { 0, 21, 80, 7, 26, 1, 0 } } },
+ .ptrn_arr = { { 0x92492493, 0x64924924, 0x92492492, 0x4c924924,
+ 0x92492492, 0x924924 } },
+ .sample_patrn_length = 186,
+ .hor_ds_en = 1,
+ .ver_ds_en = 1
+}, {
+ /* Scale factor 32 / (32 + 62) = 0.340426 */
+ .hor_phase_arr = {
+ .even = { { 1, 24, 78, 7, 24, 1, 0 },
+ { 0, 1, 28, 7, 79, 20, 0 },
+ { 0, 1, 32, 7, 78, 17, 0 },
+ { 0, 2, 36, 7, 75, 15, 0 },
+ { 0, 2, 40, 7, 74, 12, 0 },
+ { 0, 3, 45, 7, 70, 10, 0 },
+ { 0, 3, 50, 7, 67, 8, 0 },
+ { 0, 4, 54, 7, 63, 7, 0 },
+ { 0, 5, 59, 7, 59, 5, 0 },
+ { 0, 7, 63, 7, 54, 4, 0 },
+ { 0, 8, 67, 7, 50, 3, 0 },
+ { 0, 10, 70, 7, 45, 3, 0 },
+ { 0, 12, 74, 7, 40, 2, 0 },
+ { 0, 15, 75, 7, 36, 2, 0 },
+ { 0, 17, 78, 7, 32, 1, 0 },
+ { 0, 20, 79, 7, 28, 1, 0 } },
+ .odd = { { 0, 1, 26, 7, 78, 22, 1 },
+ { 0, 1, 30, 7, 78, 19, 0 },
+ { 0, 1, 34, 7, 77, 16, 0 },
+ { 0, 2, 38, 7, 75, 13, 0 },
+ { 0, 2, 43, 7, 72, 11, 0 },
+ { 0, 3, 47, 7, 69, 9, 0 },
+ { 0, 4, 52, 7, 65, 7, 0 },
+ { 0, 5, 56, 7, 61, 6, 0 },
+ { 0, 6, 61, 7, 56, 5, 0 },
+ { 0, 7, 65, 7, 52, 4, 0 },
+ { 0, 9, 69, 7, 47, 3, 0 },
+ { 0, 11, 72, 7, 43, 2, 0 },
+ { 0, 13, 75, 7, 38, 2, 0 },
+ { 0, 16, 77, 7, 34, 1, 0 },
+ { 0, 19, 78, 7, 30, 1, 0 },
+ { 1, 22, 78, 7, 26, 1, 0 } } },
+ .ver_phase_arr = {
+ .even = { { 1, 24, 78, 7, 24, 1, 0 },
+ { 0, 1, 28, 7, 79, 20, 0 },
+ { 0, 1, 32, 7, 78, 17, 0 },
+ { 0, 2, 36, 7, 75, 15, 0 },
+ { 0, 2, 40, 7, 74, 12, 0 },
+ { 0, 3, 45, 7, 70, 10, 0 },
+ { 0, 3, 50, 7, 67, 8, 0 },
+ { 0, 4, 54, 7, 63, 7, 0 },
+ { 0, 5, 59, 7, 59, 5, 0 },
+ { 0, 7, 63, 7, 54, 4, 0 },
+ { 0, 8, 67, 7, 50, 3, 0 },
+ { 0, 10, 70, 7, 45, 3, 0 },
+ { 0, 12, 74, 7, 40, 2, 0 },
+ { 0, 15, 75, 7, 36, 2, 0 },
+ { 0, 17, 78, 7, 32, 1, 0 },
+ { 0, 20, 79, 7, 28, 1, 0 } },
+ .odd = { { 0, 1, 26, 7, 78, 22, 1 },
+ { 0, 1, 30, 7, 78, 19, 0 },
+ { 0, 1, 34, 7, 77, 16, 0 },
+ { 0, 2, 38, 7, 75, 13, 0 },
+ { 0, 2, 43, 7, 72, 11, 0 },
+ { 0, 3, 47, 7, 69, 9, 0 },
+ { 0, 4, 52, 7, 65, 7, 0 },
+ { 0, 5, 56, 7, 61, 6, 0 },
+ { 0, 6, 61, 7, 56, 5, 0 },
+ { 0, 7, 65, 7, 52, 4, 0 },
+ { 0, 9, 69, 7, 47, 3, 0 },
+ { 0, 11, 72, 7, 43, 2, 0 },
+ { 0, 13, 75, 7, 38, 2, 0 },
+ { 0, 16, 77, 7, 34, 1, 0 },
+ { 0, 19, 78, 7, 30, 1, 0 },
+ { 1, 22, 78, 7, 26, 1, 0 } } },
+ .ptrn_arr = { { 0x92492493, 0x24924924, 0x9249249 } },
+ .sample_patrn_length = 94,
+ .hor_ds_en = 1,
+ .ver_ds_en = 1
+}, {
+ /* Scale factor 32 / (32 + 63) = 0.336842 */
+ .hor_phase_arr = {
+ .even = { { 1, 24, 78, 7, 24, 1, 0 },
+ { 0, 1, 26, 7, 78, 22, 1 },
+ { 0, 1, 28, 7, 77, 21, 1 },
+ { 0, 1, 30, 7, 78, 19, 0 },
+ { 0, 1, 32, 7, 77, 18, 0 },
+ { 0, 1, 34, 7, 77, 16, 0 },
+ { 0, 2, 36, 7, 75, 15, 0 },
+ { 0, 2, 38, 7, 74, 14, 0 },
+ { 0, 2, 40, 7, 73, 13, 0 },
+ { 0, 2, 43, 7, 72, 11, 0 },
+ { 0, 3, 45, 7, 70, 10, 0 },
+ { 0, 3, 47, 7, 69, 9, 0 },
+ { 0, 4, 49, 7, 66, 9, 0 },
+ { 0, 4, 52, 7, 64, 8, 0 },
+ { 0, 4, 54, 7, 63, 7, 0 },
+ { 0, 5, 56, 7, 61, 6, 0 },
+ { 0, 6, 58, 7, 58, 6, 0 },
+ { 0, 6, 61, 7, 56, 5, 0 },
+ { 0, 7, 63, 7, 54, 4, 0 },
+ { 0, 8, 64, 7, 52, 4, 0 },
+ { 0, 9, 66, 7, 49, 4, 0 },
+ { 0, 9, 69, 7, 47, 3, 0 },
+ { 0, 10, 70, 7, 45, 3, 0 },
+ { 0, 11, 72, 7, 43, 2, 0 },
+ { 0, 13, 73, 7, 40, 2, 0 },
+ { 0, 14, 74, 7, 38, 2, 0 },
+ { 0, 15, 75, 7, 36, 2, 0 },
+ { 0, 16, 77, 7, 34, 1, 0 },
+ { 0, 18, 77, 7, 32, 1, 0 },
+ { 0, 19, 78, 7, 30, 1, 0 },
+ { 1, 21, 77, 7, 28, 1, 0 },
+ { 1, 22, 78, 7, 26, 1, 0 } },
+ .odd = { { 0, 1, 25, 7, 78, 23, 1 },
+ { 0, 1, 27, 7, 77, 22, 1 },
+ { 0, 1, 29, 7, 78, 20, 0 },
+ { 0, 1, 31, 7, 78, 18, 0 },
+ { 0, 1, 33, 7, 77, 17, 0 },
+ { 0, 2, 35, 7, 75, 16, 0 },
+ { 0, 2, 37, 7, 75, 14, 0 },
+ { 0, 2, 39, 7, 74, 13, 0 },
+ { 0, 2, 42, 7, 72, 12, 0 },
+ { 0, 3, 44, 7, 70, 11, 0 },
+ { 0, 3, 46, 7, 69, 10, 0 },
+ { 0, 3, 48, 7, 68, 9, 0 },
+ { 0, 4, 51, 7, 65, 8, 0 },
+ { 0, 4, 53, 7, 64, 7, 0 },
+ { 0, 5, 55, 7, 61, 7, 0 },
+ { 0, 5, 57, 7, 60, 6, 0 },
+ { 0, 6, 60, 7, 57, 5, 0 },
+ { 0, 7, 61, 7, 55, 5, 0 },
+ { 0, 7, 64, 7, 53, 4, 0 },
+ { 0, 8, 65, 7, 51, 4, 0 },
+ { 0, 9, 68, 7, 48, 3, 0 },
+ { 0, 10, 69, 7, 46, 3, 0 },
+ { 0, 11, 70, 7, 44, 3, 0 },
+ { 0, 12, 72, 7, 42, 2, 0 },
+ { 0, 13, 74, 7, 39, 2, 0 },
+ { 0, 14, 75, 7, 37, 2, 0 },
+ { 0, 16, 75, 7, 35, 2, 0 },
+ { 0, 17, 77, 7, 33, 1, 0 },
+ { 0, 18, 78, 7, 31, 1, 0 },
+ { 0, 20, 78, 7, 29, 1, 0 },
+ { 1, 22, 77, 7, 27, 1, 0 },
+ { 1, 23, 78, 7, 25, 1, 0 } } },
+ .ver_phase_arr = {
+ .even = { { 1, 24, 78, 7, 24, 1, 0 },
+ { 0, 1, 26, 7, 78, 22, 1 },
+ { 0, 1, 28, 7, 77, 21, 1 },
+ { 0, 1, 30, 7, 78, 19, 0 },
+ { 0, 1, 32, 7, 77, 18, 0 },
+ { 0, 1, 34, 7, 77, 16, 0 },
+ { 0, 2, 36, 7, 75, 15, 0 },
+ { 0, 2, 38, 7, 74, 14, 0 },
+ { 0, 2, 40, 7, 73, 13, 0 },
+ { 0, 2, 43, 7, 72, 11, 0 },
+ { 0, 3, 45, 7, 70, 10, 0 },
+ { 0, 3, 47, 7, 69, 9, 0 },
+ { 0, 4, 49, 7, 66, 9, 0 },
+ { 0, 4, 52, 7, 64, 8, 0 },
+ { 0, 4, 54, 7, 63, 7, 0 },
+ { 0, 5, 56, 7, 61, 6, 0 },
+ { 0, 6, 58, 7, 58, 6, 0 },
+ { 0, 6, 61, 7, 56, 5, 0 },
+ { 0, 7, 63, 7, 54, 4, 0 },
+ { 0, 8, 64, 7, 52, 4, 0 },
+ { 0, 9, 66, 7, 49, 4, 0 },
+ { 0, 9, 69, 7, 47, 3, 0 },
+ { 0, 10, 70, 7, 45, 3, 0 },
+ { 0, 11, 72, 7, 43, 2, 0 },
+ { 0, 13, 73, 7, 40, 2, 0 },
+ { 0, 14, 74, 7, 38, 2, 0 },
+ { 0, 15, 75, 7, 36, 2, 0 },
+ { 0, 16, 77, 7, 34, 1, 0 },
+ { 0, 18, 77, 7, 32, 1, 0 },
+ { 0, 19, 78, 7, 30, 1, 0 },
+ { 1, 21, 77, 7, 28, 1, 0 },
+ { 1, 22, 78, 7, 26, 1, 0 } },
+ .odd = { { 0, 1, 25, 7, 78, 23, 1 },
+ { 0, 1, 27, 7, 77, 22, 1 },
+ { 0, 1, 29, 7, 78, 20, 0 },
+ { 0, 1, 31, 7, 78, 18, 0 },
+ { 0, 1, 33, 7, 77, 17, 0 },
+ { 0, 2, 35, 7, 75, 16, 0 },
+ { 0, 2, 37, 7, 75, 14, 0 },
+ { 0, 2, 39, 7, 74, 13, 0 },
+ { 0, 2, 42, 7, 72, 12, 0 },
+ { 0, 3, 44, 7, 70, 11, 0 },
+ { 0, 3, 46, 7, 69, 10, 0 },
+ { 0, 3, 48, 7, 68, 9, 0 },
+ { 0, 4, 51, 7, 65, 8, 0 },
+ { 0, 4, 53, 7, 64, 7, 0 },
+ { 0, 5, 55, 7, 61, 7, 0 },
+ { 0, 5, 57, 7, 60, 6, 0 },
+ { 0, 6, 60, 7, 57, 5, 0 },
+ { 0, 7, 61, 7, 55, 5, 0 },
+ { 0, 7, 64, 7, 53, 4, 0 },
+ { 0, 8, 65, 7, 51, 4, 0 },
+ { 0, 9, 68, 7, 48, 3, 0 },
+ { 0, 10, 69, 7, 46, 3, 0 },
+ { 0, 11, 70, 7, 44, 3, 0 },
+ { 0, 12, 72, 7, 42, 2, 0 },
+ { 0, 13, 74, 7, 39, 2, 0 },
+ { 0, 14, 75, 7, 37, 2, 0 },
+ { 0, 16, 75, 7, 35, 2, 0 },
+ { 0, 17, 77, 7, 33, 1, 0 },
+ { 0, 18, 78, 7, 31, 1, 0 },
+ { 0, 20, 78, 7, 29, 1, 0 },
+ { 1, 22, 77, 7, 27, 1, 0 },
+ { 1, 23, 78, 7, 25, 1, 0 } } },
+ .ptrn_arr = { { 0x92492493, 0x24924924, 0x49249249, 0x92492492,
+ 0x24924924, 0x9249249 } },
+ .sample_patrn_length = 190,
+ .hor_ds_en = 1,
+ .ver_ds_en = 1
+}, {
+ /* Scale factor 32 / (32 + 64) = 0.333333 */
+ .hor_phase_arr = {
+ .even = { { 0, 21, 86, 7, 21, 0, 0 } },
+ .odd = { { 0, 4, 60, 7, 60, 4, 0 } } },
+ .ver_phase_arr = {
+ .even = { { 0, 21, 86, 7, 21, 0, 0 } },
+ .odd = { { 0, 4, 60, 7, 60, 4, 0 } } },
+ .ptrn_arr = { { 0x9 } },
+ .sample_patrn_length = 6,
+ .hor_ds_en = 1,
+ .ver_ds_en = 1
+}, {
+ /* Scale factor 32 / (32 + 65) = 0.329897 */
+ .hor_phase_arr = {
+ .even = { { 0, 22, 84, 7, 22, 0, 0 },
+ { 0, 20, 85, 7, 23, 0, 0 },
+ { 0, 18, 84, 7, 25, 1, 0 },
+ { 0, 17, 82, 7, 28, 1, 0 },
+ { 0, 15, 82, 7, 30, 1, 0 },
+ { 0, 14, 81, 7, 32, 1, 0 },
+ { 0, 12, 81, 7, 34, 1, 0 },
+ { 0, 11, 79, 7, 37, 1, 0 },
+ { 0, 10, 78, 7, 39, 1, 0 },
+ { 0, 9, 75, 7, 42, 2, 0 },
+ { 0, 8, 74, 7, 44, 2, 0 },
+ { 0, 7, 72, 7, 47, 2, 0 },
+ { 0, 6, 70, 7, 50, 2, 0 },
+ { 0, 6, 67, 7, 52, 3, 0 },
+ { 0, 5, 65, 7, 55, 3, 0 },
+ { 0, 4, 64, 7, 57, 3, 0 },
+ { 0, 4, 60, 7, 60, 4, 0 },
+ { 0, 3, 57, 7, 64, 4, 0 },
+ { 0, 3, 55, 7, 65, 5, 0 },
+ { 0, 3, 52, 7, 67, 6, 0 },
+ { 0, 2, 50, 7, 70, 6, 0 },
+ { 0, 2, 47, 7, 72, 7, 0 },
+ { 0, 2, 44, 7, 74, 8, 0 },
+ { 0, 2, 42, 7, 75, 9, 0 },
+ { 0, 1, 39, 7, 78, 10, 0 },
+ { 0, 1, 37, 7, 79, 11, 0 },
+ { 0, 1, 34, 7, 81, 12, 0 },
+ { 0, 1, 32, 7, 81, 14, 0 },
+ { 0, 1, 30, 7, 82, 15, 0 },
+ { 0, 1, 28, 7, 82, 17, 0 },
+ { 0, 1, 25, 7, 84, 18, 0 },
+ { 0, 0, 23, 7, 85, 20, 0 } },
+ .odd = { { 0, 21, 84, 7, 23, 0, 0 },
+ { 0, 19, 85, 7, 24, 0, 0 },
+ { 0, 17, 84, 7, 26, 1, 0 },
+ { 0, 16, 82, 7, 29, 1, 0 },
+ { 0, 14, 82, 7, 31, 1, 0 },
+ { 0, 13, 81, 7, 33, 1, 0 },
+ { 0, 12, 80, 7, 35, 1, 0 },
+ { 0, 11, 78, 7, 38, 1, 0 },
+ { 0, 10, 77, 7, 40, 1, 0 },
+ { 0, 9, 74, 7, 43, 2, 0 },
+ { 0, 8, 72, 7, 46, 2, 0 },
+ { 0, 7, 71, 7, 48, 2, 0 },
+ { 0, 6, 69, 7, 51, 2, 0 },
+ { 0, 5, 66, 7, 54, 3, 0 },
+ { 0, 5, 64, 7, 56, 3, 0 },
+ { 0, 4, 61, 7, 59, 4, 0 },
+ { 0, 4, 59, 7, 61, 4, 0 },
+ { 0, 3, 56, 7, 64, 5, 0 },
+ { 0, 3, 54, 7, 66, 5, 0 },
+ { 0, 2, 51, 7, 69, 6, 0 },
+ { 0, 2, 48, 7, 71, 7, 0 },
+ { 0, 2, 46, 7, 72, 8, 0 },
+ { 0, 2, 43, 7, 74, 9, 0 },
+ { 0, 1, 40, 7, 77, 10, 0 },
+ { 0, 1, 38, 7, 78, 11, 0 },
+ { 0, 1, 35, 7, 80, 12, 0 },
+ { 0, 1, 33, 7, 81, 13, 0 },
+ { 0, 1, 31, 7, 82, 14, 0 },
+ { 0, 1, 29, 7, 82, 16, 0 },
+ { 0, 1, 26, 7, 84, 17, 0 },
+ { 0, 0, 24, 7, 85, 19, 0 },
+ { 0, 0, 23, 7, 84, 21, 0 } } },
+ .ver_phase_arr = {
+ .even = { { 0, 22, 84, 7, 22, 0, 0 },
+ { 0, 20, 85, 7, 23, 0, 0 },
+ { 0, 18, 84, 7, 25, 1, 0 },
+ { 0, 17, 82, 7, 28, 1, 0 },
+ { 0, 15, 82, 7, 30, 1, 0 },
+ { 0, 14, 81, 7, 32, 1, 0 },
+ { 0, 12, 81, 7, 34, 1, 0 },
+ { 0, 11, 79, 7, 37, 1, 0 },
+ { 0, 10, 78, 7, 39, 1, 0 },
+ { 0, 9, 75, 7, 42, 2, 0 },
+ { 0, 8, 74, 7, 44, 2, 0 },
+ { 0, 7, 72, 7, 47, 2, 0 },
+ { 0, 6, 70, 7, 50, 2, 0 },
+ { 0, 6, 67, 7, 52, 3, 0 },
+ { 0, 5, 65, 7, 55, 3, 0 },
+ { 0, 4, 64, 7, 57, 3, 0 },
+ { 0, 4, 60, 7, 60, 4, 0 },
+ { 0, 3, 57, 7, 64, 4, 0 },
+ { 0, 3, 55, 7, 65, 5, 0 },
+ { 0, 3, 52, 7, 67, 6, 0 },
+ { 0, 2, 50, 7, 70, 6, 0 },
+ { 0, 2, 47, 7, 72, 7, 0 },
+ { 0, 2, 44, 7, 74, 8, 0 },
+ { 0, 2, 42, 7, 75, 9, 0 },
+ { 0, 1, 39, 7, 78, 10, 0 },
+ { 0, 1, 37, 7, 79, 11, 0 },
+ { 0, 1, 34, 7, 81, 12, 0 },
+ { 0, 1, 32, 7, 81, 14, 0 },
+ { 0, 1, 30, 7, 82, 15, 0 },
+ { 0, 1, 28, 7, 82, 17, 0 },
+ { 0, 1, 25, 7, 84, 18, 0 },
+ { 0, 0, 23, 7, 85, 20, 0 } },
+ .odd = { { 0, 21, 84, 7, 23, 0, 0 },
+ { 0, 19, 85, 7, 24, 0, 0 },
+ { 0, 17, 84, 7, 26, 1, 0 },
+ { 0, 16, 82, 7, 29, 1, 0 },
+ { 0, 14, 82, 7, 31, 1, 0 },
+ { 0, 13, 81, 7, 33, 1, 0 },
+ { 0, 12, 80, 7, 35, 1, 0 },
+ { 0, 11, 78, 7, 38, 1, 0 },
+ { 0, 10, 77, 7, 40, 1, 0 },
+ { 0, 9, 74, 7, 43, 2, 0 },
+ { 0, 8, 72, 7, 46, 2, 0 },
+ { 0, 7, 71, 7, 48, 2, 0 },
+ { 0, 6, 69, 7, 51, 2, 0 },
+ { 0, 5, 66, 7, 54, 3, 0 },
+ { 0, 5, 64, 7, 56, 3, 0 },
+ { 0, 4, 61, 7, 59, 4, 0 },
+ { 0, 4, 59, 7, 61, 4, 0 },
+ { 0, 3, 56, 7, 64, 5, 0 },
+ { 0, 3, 54, 7, 66, 5, 0 },
+ { 0, 2, 51, 7, 69, 6, 0 },
+ { 0, 2, 48, 7, 71, 7, 0 },
+ { 0, 2, 46, 7, 72, 8, 0 },
+ { 0, 2, 43, 7, 74, 9, 0 },
+ { 0, 1, 40, 7, 77, 10, 0 },
+ { 0, 1, 38, 7, 78, 11, 0 },
+ { 0, 1, 35, 7, 80, 12, 0 },
+ { 0, 1, 33, 7, 81, 13, 0 },
+ { 0, 1, 31, 7, 82, 14, 0 },
+ { 0, 1, 29, 7, 82, 16, 0 },
+ { 0, 1, 26, 7, 84, 17, 0 },
+ { 0, 0, 24, 7, 85, 19, 0 },
+ { 0, 0, 23, 7, 84, 21, 0 } } },
+ .ptrn_arr = { { 0x49249249, 0x92492492, 0x24924924, 0x49249249,
+ 0x92492492, 0x24924924 } },
+ .sample_patrn_length = 194,
+ .hor_ds_en = 1,
+ .ver_ds_en = 1
+}, {
+ /* Scale factor 32 / (32 + 66) = 0.326531 */
+ .hor_phase_arr = {
+ .even = { { 0, 22, 84, 7, 22, 0, 0 },
+ { 0, 18, 83, 7, 26, 1, 0 },
+ { 0, 15, 82, 7, 30, 1, 0 },
+ { 0, 13, 79, 7, 35, 1, 0 },
+ { 0, 10, 78, 7, 39, 1, 0 },
+ { 0, 8, 74, 7, 44, 2, 0 },
+ { 0, 7, 69, 7, 50, 2, 0 },
+ { 0, 5, 65, 7, 55, 3, 0 },
+ { 0, 4, 60, 7, 60, 4, 0 },
+ { 0, 3, 55, 7, 65, 5, 0 },
+ { 0, 2, 50, 7, 69, 7, 0 },
+ { 0, 2, 44, 7, 74, 8, 0 },
+ { 0, 1, 39, 7, 78, 10, 0 },
+ { 0, 1, 35, 7, 79, 13, 0 },
+ { 0, 1, 30, 7, 82, 15, 0 },
+ { 0, 1, 26, 7, 83, 18, 0 } },
+ .odd = { { 0, 20, 84, 7, 24, 0, 0 },
+ { 0, 17, 82, 7, 28, 1, 0 },
+ { 0, 14, 81, 7, 32, 1, 0 },
+ { 0, 12, 78, 7, 37, 1, 0 },
+ { 0, 9, 75, 7, 42, 2, 0 },
+ { 0, 8, 71, 7, 47, 2, 0 },
+ { 0, 6, 67, 7, 52, 3, 0 },
+ { 0, 5, 62, 7, 57, 4, 0 },
+ { 0, 4, 57, 7, 62, 5, 0 },
+ { 0, 3, 52, 7, 67, 6, 0 },
+ { 0, 2, 47, 7, 71, 8, 0 },
+ { 0, 2, 42, 7, 75, 9, 0 },
+ { 0, 1, 37, 7, 78, 12, 0 },
+ { 0, 1, 32, 7, 81, 14, 0 },
+ { 0, 1, 28, 7, 82, 17, 0 },
+ { 0, 0, 24, 7, 84, 20, 0 } } },
+ .ver_phase_arr = {
+ .even = { { 0, 22, 84, 7, 22, 0, 0 },
+ { 0, 18, 83, 7, 26, 1, 0 },
+ { 0, 15, 82, 7, 30, 1, 0 },
+ { 0, 13, 79, 7, 35, 1, 0 },
+ { 0, 10, 78, 7, 39, 1, 0 },
+ { 0, 8, 74, 7, 44, 2, 0 },
+ { 0, 7, 69, 7, 50, 2, 0 },
+ { 0, 5, 65, 7, 55, 3, 0 },
+ { 0, 4, 60, 7, 60, 4, 0 },
+ { 0, 3, 55, 7, 65, 5, 0 },
+ { 0, 2, 50, 7, 69, 7, 0 },
+ { 0, 2, 44, 7, 74, 8, 0 },
+ { 0, 1, 39, 7, 78, 10, 0 },
+ { 0, 1, 35, 7, 79, 13, 0 },
+ { 0, 1, 30, 7, 82, 15, 0 },
+ { 0, 1, 26, 7, 83, 18, 0 } },
+ .odd = { { 0, 20, 84, 7, 24, 0, 0 },
+ { 0, 17, 82, 7, 28, 1, 0 },
+ { 0, 14, 81, 7, 32, 1, 0 },
+ { 0, 12, 78, 7, 37, 1, 0 },
+ { 0, 9, 75, 7, 42, 2, 0 },
+ { 0, 8, 71, 7, 47, 2, 0 },
+ { 0, 6, 67, 7, 52, 3, 0 },
+ { 0, 5, 62, 7, 57, 4, 0 },
+ { 0, 4, 57, 7, 62, 5, 0 },
+ { 0, 3, 52, 7, 67, 6, 0 },
+ { 0, 2, 47, 7, 71, 8, 0 },
+ { 0, 2, 42, 7, 75, 9, 0 },
+ { 0, 1, 37, 7, 78, 12, 0 },
+ { 0, 1, 32, 7, 81, 14, 0 },
+ { 0, 1, 28, 7, 82, 17, 0 },
+ { 0, 0, 24, 7, 84, 20, 0 } } },
+ .ptrn_arr = { { 0x49249249, 0x92492492, 0x24924924 } },
+ .sample_patrn_length = 98,
+ .hor_ds_en = 1,
+ .ver_ds_en = 1
+}, {
+ /* Scale factor 32 / (32 + 67) = 0.323232 */
+ .hor_phase_arr = {
+ .even = { { 0, 22, 84, 7, 22, 0, 0 },
+ { 0, 17, 82, 7, 28, 1, 0 },
+ { 0, 13, 79, 7, 35, 1, 0 },
+ { 0, 10, 74, 7, 42, 2, 0 },
+ { 0, 7, 68, 7, 50, 3, 0 },
+ { 0, 5, 62, 7, 57, 4, 0 },
+ { 0, 3, 55, 7, 64, 6, 0 },
+ { 0, 2, 47, 7, 71, 8, 0 },
+ { 0, 2, 40, 7, 75, 11, 0 },
+ { 0, 1, 33, 7, 80, 14, 0 },
+ { 0, 1, 26, 7, 82, 19, 0 },
+ { 0, 21, 82, 7, 24, 1, 0 },
+ { 0, 16, 81, 7, 30, 1, 0 },
+ { 0, 12, 78, 7, 37, 1, 0 },
+ { 0, 9, 72, 7, 45, 2, 0 },
+ { 0, 6, 67, 7, 52, 3, 0 },
+ { 0, 4, 60, 7, 60, 4, 0 },
+ { 0, 3, 52, 7, 67, 6, 0 },
+ { 0, 2, 45, 7, 72, 9, 0 },
+ { 0, 1, 37, 7, 78, 12, 0 },
+ { 0, 1, 30, 7, 81, 16, 0 },
+ { 0, 1, 24, 7, 82, 21, 0 },
+ { 0, 19, 82, 7, 26, 1, 0 },
+ { 0, 14, 80, 7, 33, 1, 0 },
+ { 0, 11, 75, 7, 40, 2, 0 },
+ { 0, 8, 71, 7, 47, 2, 0 },
+ { 0, 6, 64, 7, 55, 3, 0 },
+ { 0, 4, 57, 7, 62, 5, 0 },
+ { 0, 3, 50, 7, 68, 7, 0 },
+ { 0, 2, 42, 7, 74, 10, 0 },
+ { 0, 1, 35, 7, 79, 13, 0 },
+ { 0, 1, 28, 7, 82, 17, 0 } },
+ .odd = { { 0, 20, 82, 7, 25, 1, 0 },
+ { 0, 15, 81, 7, 31, 1, 0 },
+ { 0, 11, 78, 7, 38, 1, 0 },
+ { 0, 8, 72, 7, 46, 2, 0 },
+ { 0, 6, 66, 7, 53, 3, 0 },
+ { 0, 4, 58, 7, 61, 5, 0 },
+ { 0, 3, 51, 7, 67, 7, 0 },
+ { 0, 2, 43, 7, 74, 9, 0 },
+ { 0, 1, 36, 7, 79, 12, 0 },
+ { 0, 1, 29, 7, 81, 17, 0 },
+ { 0, 0, 23, 7, 84, 21, 0 },
+ { 0, 18, 82, 7, 27, 1, 0 },
+ { 0, 14, 79, 7, 34, 1, 0 },
+ { 0, 10, 75, 7, 41, 2, 0 },
+ { 0, 7, 71, 7, 48, 2, 0 },
+ { 0, 5, 63, 7, 56, 4, 0 },
+ { 0, 4, 56, 7, 63, 5, 0 },
+ { 0, 2, 48, 7, 71, 7, 0 },
+ { 0, 2, 41, 7, 75, 10, 0 },
+ { 0, 1, 34, 7, 79, 14, 0 },
+ { 0, 1, 27, 7, 82, 18, 0 },
+ { 0, 21, 84, 7, 23, 0, 0 },
+ { 0, 17, 81, 7, 29, 1, 0 },
+ { 0, 12, 79, 7, 36, 1, 0 },
+ { 0, 9, 74, 7, 43, 2, 0 },
+ { 0, 7, 67, 7, 51, 3, 0 },
+ { 0, 5, 61, 7, 58, 4, 0 },
+ { 0, 3, 53, 7, 66, 6, 0 },
+ { 0, 2, 46, 7, 72, 8, 0 },
+ { 0, 1, 38, 7, 78, 11, 0 },
+ { 0, 1, 31, 7, 81, 15, 0 },
+ { 0, 1, 25, 7, 82, 20, 0 } } },
+ .ver_phase_arr = {
+ .even = { { 0, 22, 84, 7, 22, 0, 0 },
+ { 0, 17, 82, 7, 28, 1, 0 },
+ { 0, 13, 79, 7, 35, 1, 0 },
+ { 0, 10, 74, 7, 42, 2, 0 },
+ { 0, 7, 68, 7, 50, 3, 0 },
+ { 0, 5, 62, 7, 57, 4, 0 },
+ { 0, 3, 55, 7, 64, 6, 0 },
+ { 0, 2, 47, 7, 71, 8, 0 },
+ { 0, 2, 40, 7, 75, 11, 0 },
+ { 0, 1, 33, 7, 80, 14, 0 },
+ { 0, 1, 26, 7, 82, 19, 0 },
+ { 0, 21, 82, 7, 24, 1, 0 },
+ { 0, 16, 81, 7, 30, 1, 0 },
+ { 0, 12, 78, 7, 37, 1, 0 },
+ { 0, 9, 72, 7, 45, 2, 0 },
+ { 0, 6, 67, 7, 52, 3, 0 },
+ { 0, 4, 60, 7, 60, 4, 0 },
+ { 0, 3, 52, 7, 67, 6, 0 },
+ { 0, 2, 45, 7, 72, 9, 0 },
+ { 0, 1, 37, 7, 78, 12, 0 },
+ { 0, 1, 30, 7, 81, 16, 0 },
+ { 0, 1, 24, 7, 82, 21, 0 },
+ { 0, 19, 82, 7, 26, 1, 0 },
+ { 0, 14, 80, 7, 33, 1, 0 },
+ { 0, 11, 75, 7, 40, 2, 0 },
+ { 0, 8, 71, 7, 47, 2, 0 },
+ { 0, 6, 64, 7, 55, 3, 0 },
+ { 0, 4, 57, 7, 62, 5, 0 },
+ { 0, 3, 50, 7, 68, 7, 0 },
+ { 0, 2, 42, 7, 74, 10, 0 },
+ { 0, 1, 35, 7, 79, 13, 0 },
+ { 0, 1, 28, 7, 82, 17, 0 } },
+ .odd = { { 0, 20, 82, 7, 25, 1, 0 },
+ { 0, 15, 81, 7, 31, 1, 0 },
+ { 0, 11, 78, 7, 38, 1, 0 },
+ { 0, 8, 72, 7, 46, 2, 0 },
+ { 0, 6, 66, 7, 53, 3, 0 },
+ { 0, 4, 58, 7, 61, 5, 0 },
+ { 0, 3, 51, 7, 67, 7, 0 },
+ { 0, 2, 43, 7, 74, 9, 0 },
+ { 0, 1, 36, 7, 79, 12, 0 },
+ { 0, 1, 29, 7, 81, 17, 0 },
+ { 0, 0, 23, 7, 84, 21, 0 },
+ { 0, 18, 82, 7, 27, 1, 0 },
+ { 0, 14, 79, 7, 34, 1, 0 },
+ { 0, 10, 75, 7, 41, 2, 0 },
+ { 0, 7, 71, 7, 48, 2, 0 },
+ { 0, 5, 63, 7, 56, 4, 0 },
+ { 0, 4, 56, 7, 63, 5, 0 },
+ { 0, 2, 48, 7, 71, 7, 0 },
+ { 0, 2, 41, 7, 75, 10, 0 },
+ { 0, 1, 34, 7, 79, 14, 0 },
+ { 0, 1, 27, 7, 82, 18, 0 },
+ { 0, 21, 84, 7, 23, 0, 0 },
+ { 0, 17, 81, 7, 29, 1, 0 },
+ { 0, 12, 79, 7, 36, 1, 0 },
+ { 0, 9, 74, 7, 43, 2, 0 },
+ { 0, 7, 67, 7, 51, 3, 0 },
+ { 0, 5, 61, 7, 58, 4, 0 },
+ { 0, 3, 53, 7, 66, 6, 0 },
+ { 0, 2, 46, 7, 72, 8, 0 },
+ { 0, 1, 38, 7, 78, 11, 0 },
+ { 0, 1, 31, 7, 81, 15, 0 },
+ { 0, 1, 25, 7, 82, 20, 0 } } },
+ .ptrn_arr = { { 0x49249249, 0x92492492, 0x92492490, 0x24924924,
+ 0x24924921, 0x49249249, 0x2 } },
+ .sample_patrn_length = 198,
+ .hor_ds_en = 1,
+ .ver_ds_en = 1
+}, {
+ /* Scale factor 32 / (32 + 68) = 0.32 */
+ .hor_phase_arr = {
+ .even = { { 0, 23, 82, 7, 23, 0, 0 },
+ { 0, 16, 80, 7, 31, 1, 0 },
+ { 0, 11, 75, 7, 40, 2, 0 },
+ { 0, 7, 68, 7, 50, 3, 0 },
+ { 0, 5, 59, 7, 59, 5, 0 },
+ { 0, 3, 50, 7, 68, 7, 0 },
+ { 0, 2, 40, 7, 75, 11, 0 },
+ { 0, 1, 31, 7, 80, 16, 0 } },
+ .odd = { { 0, 19, 81, 7, 27, 1, 0 },
+ { 0, 13, 79, 7, 35, 1, 0 },
+ { 0, 9, 72, 7, 45, 2, 0 },
+ { 0, 6, 63, 7, 55, 4, 0 },
+ { 0, 4, 55, 7, 63, 6, 0 },
+ { 0, 2, 45, 7, 72, 9, 0 },
+ { 0, 1, 35, 7, 79, 13, 0 },
+ { 0, 1, 27, 7, 81, 19, 0 } } },
+ .ver_phase_arr = {
+ .even = { { 0, 23, 82, 7, 23, 0, 0 },
+ { 0, 16, 80, 7, 31, 1, 0 },
+ { 0, 11, 75, 7, 40, 2, 0 },
+ { 0, 7, 68, 7, 50, 3, 0 },
+ { 0, 5, 59, 7, 59, 5, 0 },
+ { 0, 3, 50, 7, 68, 7, 0 },
+ { 0, 2, 40, 7, 75, 11, 0 },
+ { 0, 1, 31, 7, 80, 16, 0 } },
+ .odd = { { 0, 19, 81, 7, 27, 1, 0 },
+ { 0, 13, 79, 7, 35, 1, 0 },
+ { 0, 9, 72, 7, 45, 2, 0 },
+ { 0, 6, 63, 7, 55, 4, 0 },
+ { 0, 4, 55, 7, 63, 6, 0 },
+ { 0, 2, 45, 7, 72, 9, 0 },
+ { 0, 1, 35, 7, 79, 13, 0 },
+ { 0, 1, 27, 7, 81, 19, 0 } } },
+ .ptrn_arr = { { 0x49249249, 0x2492 } },
+ .sample_patrn_length = 50,
+ .hor_ds_en = 1,
+ .ver_ds_en = 1
+}, {
+ /* Scale factor 32 / (32 + 69) = 0.316832 */
+ .hor_phase_arr = {
+ .even = { { 1, 23, 80, 7, 23, 1, 0 },
+ { 0, 15, 79, 7, 33, 1, 0 },
+ { 0, 9, 72, 7, 45, 2, 0 },
+ { 0, 5, 62, 7, 57, 4, 0 },
+ { 0, 3, 50, 7, 67, 8, 0 },
+ { 0, 2, 38, 7, 75, 13, 0 },
+ { 0, 1, 27, 7, 80, 20, 0 },
+ { 0, 18, 80, 7, 29, 1, 0 },
+ { 0, 11, 75, 7, 40, 2, 0 },
+ { 0, 7, 66, 7, 52, 3, 0 },
+ { 0, 4, 54, 7, 64, 6, 0 },
+ { 0, 2, 42, 7, 74, 10, 0 },
+ { 0, 1, 31, 7, 79, 17, 0 },
+ { 0, 21, 81, 7, 25, 1, 0 },
+ { 0, 14, 78, 7, 35, 1, 0 },
+ { 0, 8, 70, 7, 47, 3, 0 },
+ { 0, 5, 59, 7, 59, 5, 0 },
+ { 0, 3, 47, 7, 70, 8, 0 },
+ { 0, 1, 35, 7, 78, 14, 0 },
+ { 0, 1, 25, 7, 81, 21, 0 },
+ { 0, 17, 79, 7, 31, 1, 0 },
+ { 0, 10, 74, 7, 42, 2, 0 },
+ { 0, 6, 64, 7, 54, 4, 0 },
+ { 0, 3, 52, 7, 66, 7, 0 },
+ { 0, 2, 40, 7, 75, 11, 0 },
+ { 0, 1, 29, 7, 80, 18, 0 },
+ { 0, 20, 80, 7, 27, 1, 0 },
+ { 0, 13, 75, 7, 38, 2, 0 },
+ { 0, 8, 67, 7, 50, 3, 0 },
+ { 0, 4, 57, 7, 62, 5, 0 },
+ { 0, 2, 45, 7, 72, 9, 0 },
+ { 0, 1, 33, 7, 79, 15, 0 } },
+ .odd = { { 0, 19, 80, 7, 28, 1, 0 },
+ { 0, 12, 75, 7, 39, 2, 0 },
+ { 0, 7, 67, 7, 51, 3, 0 },
+ { 0, 4, 56, 7, 62, 6, 0 },
+ { 0, 2, 44, 7, 72, 10, 0 },
+ { 0, 1, 32, 7, 79, 16, 0 },
+ { 0, 22, 81, 7, 24, 1, 0 },
+ { 0, 14, 79, 7, 34, 1, 0 },
+ { 0, 9, 71, 7, 46, 2, 0 },
+ { 0, 5, 60, 7, 58, 5, 0 },
+ { 0, 3, 48, 7, 69, 8, 0 },
+ { 0, 1, 36, 7, 78, 13, 0 },
+ { 0, 1, 26, 7, 81, 20, 0 },
+ { 0, 17, 80, 7, 30, 1, 0 },
+ { 0, 11, 74, 7, 41, 2, 0 },
+ { 0, 6, 65, 7, 53, 4, 0 },
+ { 0, 4, 53, 7, 65, 6, 0 },
+ { 0, 2, 41, 7, 74, 11, 0 },
+ { 0, 1, 30, 7, 80, 17, 0 },
+ { 0, 20, 81, 7, 26, 1, 0 },
+ { 0, 13, 78, 7, 36, 1, 0 },
+ { 0, 8, 69, 7, 48, 3, 0 },
+ { 0, 5, 58, 7, 60, 5, 0 },
+ { 0, 2, 46, 7, 71, 9, 0 },
+ { 0, 1, 34, 7, 79, 14, 0 },
+ { 0, 1, 24, 7, 81, 22, 0 },
+ { 0, 16, 79, 7, 32, 1, 0 },
+ { 0, 10, 72, 7, 44, 2, 0 },
+ { 0, 6, 62, 7, 56, 4, 0 },
+ { 0, 3, 51, 7, 67, 7, 0 },
+ { 0, 2, 39, 7, 75, 12, 0 },
+ { 0, 1, 28, 7, 80, 19, 0 } } },
+ .ver_phase_arr = {
+ .even = { { 1, 23, 80, 7, 23, 1, 0 },
+ { 0, 15, 79, 7, 33, 1, 0 },
+ { 0, 9, 72, 7, 45, 2, 0 },
+ { 0, 5, 62, 7, 57, 4, 0 },
+ { 0, 3, 50, 7, 67, 8, 0 },
+ { 0, 2, 38, 7, 75, 13, 0 },
+ { 0, 1, 27, 7, 80, 20, 0 },
+ { 0, 18, 80, 7, 29, 1, 0 },
+ { 0, 11, 75, 7, 40, 2, 0 },
+ { 0, 7, 66, 7, 52, 3, 0 },
+ { 0, 4, 54, 7, 64, 6, 0 },
+ { 0, 2, 42, 7, 74, 10, 0 },
+ { 0, 1, 31, 7, 79, 17, 0 },
+ { 0, 21, 81, 7, 25, 1, 0 },
+ { 0, 14, 78, 7, 35, 1, 0 },
+ { 0, 8, 70, 7, 47, 3, 0 },
+ { 0, 5, 59, 7, 59, 5, 0 },
+ { 0, 3, 47, 7, 70, 8, 0 },
+ { 0, 1, 35, 7, 78, 14, 0 },
+ { 0, 1, 25, 7, 81, 21, 0 },
+ { 0, 17, 79, 7, 31, 1, 0 },
+ { 0, 10, 74, 7, 42, 2, 0 },
+ { 0, 6, 64, 7, 54, 4, 0 },
+ { 0, 3, 52, 7, 66, 7, 0 },
+ { 0, 2, 40, 7, 75, 11, 0 },
+ { 0, 1, 29, 7, 80, 18, 0 },
+ { 0, 20, 80, 7, 27, 1, 0 },
+ { 0, 13, 75, 7, 38, 2, 0 },
+ { 0, 8, 67, 7, 50, 3, 0 },
+ { 0, 4, 57, 7, 62, 5, 0 },
+ { 0, 2, 45, 7, 72, 9, 0 },
+ { 0, 1, 33, 7, 79, 15, 0 } },
+ .odd = { { 0, 19, 80, 7, 28, 1, 0 },
+ { 0, 12, 75, 7, 39, 2, 0 },
+ { 0, 7, 67, 7, 51, 3, 0 },
+ { 0, 4, 56, 7, 62, 6, 0 },
+ { 0, 2, 44, 7, 72, 10, 0 },
+ { 0, 1, 32, 7, 79, 16, 0 },
+ { 0, 22, 81, 7, 24, 1, 0 },
+ { 0, 14, 79, 7, 34, 1, 0 },
+ { 0, 9, 71, 7, 46, 2, 0 },
+ { 0, 5, 60, 7, 58, 5, 0 },
+ { 0, 3, 48, 7, 69, 8, 0 },
+ { 0, 1, 36, 7, 78, 13, 0 },
+ { 0, 1, 26, 7, 81, 20, 0 },
+ { 0, 17, 80, 7, 30, 1, 0 },
+ { 0, 11, 74, 7, 41, 2, 0 },
+ { 0, 6, 65, 7, 53, 4, 0 },
+ { 0, 4, 53, 7, 65, 6, 0 },
+ { 0, 2, 41, 7, 74, 11, 0 },
+ { 0, 1, 30, 7, 80, 17, 0 },
+ { 0, 20, 81, 7, 26, 1, 0 },
+ { 0, 13, 78, 7, 36, 1, 0 },
+ { 0, 8, 69, 7, 48, 3, 0 },
+ { 0, 5, 58, 7, 60, 5, 0 },
+ { 0, 2, 46, 7, 71, 9, 0 },
+ { 0, 1, 34, 7, 79, 14, 0 },
+ { 0, 1, 24, 7, 81, 22, 0 },
+ { 0, 16, 79, 7, 32, 1, 0 },
+ { 0, 10, 72, 7, 44, 2, 0 },
+ { 0, 6, 62, 7, 56, 4, 0 },
+ { 0, 3, 51, 7, 67, 7, 0 },
+ { 0, 2, 39, 7, 75, 12, 0 },
+ { 0, 1, 28, 7, 80, 19, 0 } } },
+ .ptrn_arr = { { 0x49249249, 0x49249212, 0x49242492, 0x48492492,
+ 0x92492492, 0x92492490, 0x24 } },
+ .sample_patrn_length = 202,
+ .hor_ds_en = 1,
+ .ver_ds_en = 1
+}, {
+ /* Scale factor 32 / (32 + 70) = 0.313725 */
+ .hor_phase_arr = {
+ .even = { { 1, 23, 80, 7, 23, 1, 0 },
+ { 0, 14, 77, 7, 36, 1, 0 },
+ { 0, 8, 67, 7, 50, 3, 0 },
+ { 0, 4, 54, 7, 64, 6, 0 },
+ { 0, 2, 40, 7, 74, 12, 0 },
+ { 0, 1, 27, 7, 80, 20, 0 },
+ { 0, 17, 79, 7, 31, 1, 0 },
+ { 0, 10, 71, 7, 45, 2, 0 },
+ { 0, 5, 59, 7, 59, 5, 0 },
+ { 0, 2, 45, 7, 71, 10, 0 },
+ { 0, 1, 31, 7, 79, 17, 0 },
+ { 0, 20, 80, 7, 27, 1, 0 },
+ { 0, 12, 74, 7, 40, 2, 0 },
+ { 0, 6, 64, 7, 54, 4, 0 },
+ { 0, 3, 50, 7, 67, 8, 0 },
+ { 0, 1, 36, 7, 77, 14, 0 } },
+ .odd = { { 0, 18, 80, 7, 29, 1, 0 },
+ { 0, 11, 73, 7, 42, 2, 0 },
+ { 0, 6, 61, 7, 57, 4, 0 },
+ { 0, 3, 47, 7, 69, 9, 0 },
+ { 0, 1, 33, 7, 79, 15, 0 },
+ { 0, 22, 80, 7, 25, 1, 0 },
+ { 0, 13, 75, 7, 38, 2, 0 },
+ { 0, 7, 65, 7, 52, 4, 0 },
+ { 0, 4, 52, 7, 65, 7, 0 },
+ { 0, 2, 38, 7, 75, 13, 0 },
+ { 0, 1, 25, 7, 80, 22, 0 },
+ { 0, 15, 79, 7, 33, 1, 0 },
+ { 0, 9, 69, 7, 47, 3, 0 },
+ { 0, 4, 57, 7, 61, 6, 0 },
+ { 0, 2, 42, 7, 73, 11, 0 },
+ { 0, 1, 29, 7, 80, 18, 0 } } },
+ .ver_phase_arr = {
+ .even = { { 1, 23, 80, 7, 23, 1, 0 },
+ { 0, 14, 77, 7, 36, 1, 0 },
+ { 0, 8, 67, 7, 50, 3, 0 },
+ { 0, 4, 54, 7, 64, 6, 0 },
+ { 0, 2, 40, 7, 74, 12, 0 },
+ { 0, 1, 27, 7, 80, 20, 0 },
+ { 0, 17, 79, 7, 31, 1, 0 },
+ { 0, 10, 71, 7, 45, 2, 0 },
+ { 0, 5, 59, 7, 59, 5, 0 },
+ { 0, 2, 45, 7, 71, 10, 0 },
+ { 0, 1, 31, 7, 79, 17, 0 },
+ { 0, 20, 80, 7, 27, 1, 0 },
+ { 0, 12, 74, 7, 40, 2, 0 },
+ { 0, 6, 64, 7, 54, 4, 0 },
+ { 0, 3, 50, 7, 67, 8, 0 },
+ { 0, 1, 36, 7, 77, 14, 0 } },
+ .odd = { { 0, 18, 80, 7, 29, 1, 0 },
+ { 0, 11, 73, 7, 42, 2, 0 },
+ { 0, 6, 61, 7, 57, 4, 0 },
+ { 0, 3, 47, 7, 69, 9, 0 },
+ { 0, 1, 33, 7, 79, 15, 0 },
+ { 0, 22, 80, 7, 25, 1, 0 },
+ { 0, 13, 75, 7, 38, 2, 0 },
+ { 0, 7, 65, 7, 52, 4, 0 },
+ { 0, 4, 52, 7, 65, 7, 0 },
+ { 0, 2, 38, 7, 75, 13, 0 },
+ { 0, 1, 25, 7, 80, 22, 0 },
+ { 0, 15, 79, 7, 33, 1, 0 },
+ { 0, 9, 69, 7, 47, 3, 0 },
+ { 0, 4, 57, 7, 61, 6, 0 },
+ { 0, 2, 42, 7, 73, 11, 0 },
+ { 0, 1, 29, 7, 80, 18, 0 } } },
+ .ptrn_arr = { { 0x49249249, 0x49249248, 0x49249242, 0x2 } },
+ .sample_patrn_length = 102,
+ .hor_ds_en = 1,
+ .ver_ds_en = 1
+}, {
+ /* Scale factor 32 / (32 + 71) = 0.31068 */
+ .hor_phase_arr = {
+ .even = { { 1, 24, 78, 7, 24, 1, 0 },
+ { 0, 13, 75, 7, 38, 2, 0 },
+ { 0, 7, 63, 7, 54, 4, 0 },
+ { 0, 3, 47, 7, 69, 9, 0 },
+ { 0, 1, 31, 7, 79, 17, 0 },
+ { 0, 19, 79, 7, 29, 1, 0 },
+ { 0, 10, 70, 7, 45, 3, 0 },
+ { 0, 5, 56, 7, 61, 6, 0 },
+ { 0, 2, 40, 7, 74, 12, 0 },
+ { 0, 1, 26, 7, 78, 22, 1 },
+ { 0, 14, 76, 7, 36, 2, 0 },
+ { 0, 7, 65, 7, 52, 4, 0 },
+ { 0, 3, 50, 7, 67, 8, 0 },
+ { 0, 1, 34, 7, 77, 16, 0 },
+ { 0, 20, 80, 7, 27, 1, 0 },
+ { 0, 11, 72, 7, 43, 2, 0 },
+ { 0, 5, 59, 7, 59, 5, 0 },
+ { 0, 2, 43, 7, 72, 11, 0 },
+ { 0, 1, 27, 7, 80, 20, 0 },
+ { 0, 16, 77, 7, 34, 1, 0 },
+ { 0, 8, 67, 7, 50, 3, 0 },
+ { 0, 4, 52, 7, 65, 7, 0 },
+ { 0, 2, 36, 7, 76, 14, 0 },
+ { 1, 22, 78, 7, 26, 1, 0 },
+ { 0, 12, 74, 7, 40, 2, 0 },
+ { 0, 6, 61, 7, 56, 5, 0 },
+ { 0, 3, 45, 7, 70, 10, 0 },
+ { 0, 1, 29, 7, 79, 19, 0 },
+ { 0, 17, 79, 7, 31, 1, 0 },
+ { 0, 9, 69, 7, 47, 3, 0 },
+ { 0, 4, 54, 7, 63, 7, 0 },
+ { 0, 2, 38, 7, 75, 13, 0 } },
+ .odd = { { 0, 18, 79, 7, 30, 1, 0 },
+ { 0, 9, 70, 7, 46, 3, 0 },
+ { 0, 4, 55, 7, 63, 6, 0 },
+ { 0, 2, 39, 7, 74, 13, 0 },
+ { 0, 1, 25, 7, 78, 23, 1 },
+ { 0, 14, 75, 7, 37, 2, 0 },
+ { 0, 7, 64, 7, 53, 4, 0 },
+ { 0, 3, 48, 7, 68, 9, 0 },
+ { 0, 1, 33, 7, 77, 17, 0 },
+ { 0, 20, 79, 7, 28, 1, 0 },
+ { 0, 10, 72, 7, 44, 2, 0 },
+ { 0, 5, 58, 7, 59, 6, 0 },
+ { 0, 2, 41, 7, 74, 11, 0 },
+ { 0, 1, 26, 7, 79, 21, 1 },
+ { 0, 15, 77, 7, 35, 1, 0 },
+ { 0, 8, 66, 7, 51, 3, 0 },
+ { 0, 3, 51, 7, 66, 8, 0 },
+ { 0, 1, 35, 7, 77, 15, 0 },
+ { 1, 21, 79, 7, 26, 1, 0 },
+ { 0, 11, 74, 7, 41, 2, 0 },
+ { 0, 6, 59, 7, 58, 5, 0 },
+ { 0, 2, 44, 7, 72, 10, 0 },
+ { 0, 1, 28, 7, 79, 20, 0 },
+ { 0, 17, 77, 7, 33, 1, 0 },
+ { 0, 9, 68, 7, 48, 3, 0 },
+ { 0, 4, 53, 7, 64, 7, 0 },
+ { 0, 2, 37, 7, 75, 14, 0 },
+ { 1, 23, 78, 7, 25, 1, 0 },
+ { 0, 13, 74, 7, 39, 2, 0 },
+ { 0, 6, 63, 7, 55, 4, 0 },
+ { 0, 3, 46, 7, 70, 9, 0 },
+ { 0, 1, 30, 7, 79, 18, 0 } } },
+ .ver_phase_arr = {
+ .even = { { 1, 24, 78, 7, 24, 1, 0 },
+ { 0, 13, 75, 7, 38, 2, 0 },
+ { 0, 7, 63, 7, 54, 4, 0 },
+ { 0, 3, 47, 7, 69, 9, 0 },
+ { 0, 1, 31, 7, 79, 17, 0 },
+ { 0, 19, 79, 7, 29, 1, 0 },
+ { 0, 10, 70, 7, 45, 3, 0 },
+ { 0, 5, 56, 7, 61, 6, 0 },
+ { 0, 2, 40, 7, 74, 12, 0 },
+ { 0, 1, 26, 7, 78, 22, 1 },
+ { 0, 14, 76, 7, 36, 2, 0 },
+ { 0, 7, 65, 7, 52, 4, 0 },
+ { 0, 3, 50, 7, 67, 8, 0 },
+ { 0, 1, 34, 7, 77, 16, 0 },
+ { 0, 20, 80, 7, 27, 1, 0 },
+ { 0, 11, 72, 7, 43, 2, 0 },
+ { 0, 5, 59, 7, 59, 5, 0 },
+ { 0, 2, 43, 7, 72, 11, 0 },
+ { 0, 1, 27, 7, 80, 20, 0 },
+ { 0, 16, 77, 7, 34, 1, 0 },
+ { 0, 8, 67, 7, 50, 3, 0 },
+ { 0, 4, 52, 7, 65, 7, 0 },
+ { 0, 2, 36, 7, 76, 14, 0 },
+ { 1, 22, 78, 7, 26, 1, 0 },
+ { 0, 12, 74, 7, 40, 2, 0 },
+ { 0, 6, 61, 7, 56, 5, 0 },
+ { 0, 3, 45, 7, 70, 10, 0 },
+ { 0, 1, 29, 7, 79, 19, 0 },
+ { 0, 17, 79, 7, 31, 1, 0 },
+ { 0, 9, 69, 7, 47, 3, 0 },
+ { 0, 4, 54, 7, 63, 7, 0 },
+ { 0, 2, 38, 7, 75, 13, 0 } },
+ .odd = { { 0, 18, 79, 7, 30, 1, 0 },
+ { 0, 9, 70, 7, 46, 3, 0 },
+ { 0, 4, 55, 7, 63, 6, 0 },
+ { 0, 2, 39, 7, 74, 13, 0 },
+ { 0, 1, 25, 7, 78, 23, 1 },
+ { 0, 14, 75, 7, 37, 2, 0 },
+ { 0, 7, 64, 7, 53, 4, 0 },
+ { 0, 3, 48, 7, 68, 9, 0 },
+ { 0, 1, 33, 7, 77, 17, 0 },
+ { 0, 20, 79, 7, 28, 1, 0 },
+ { 0, 10, 72, 7, 44, 2, 0 },
+ { 0, 5, 58, 7, 59, 6, 0 },
+ { 0, 2, 41, 7, 74, 11, 0 },
+ { 0, 1, 26, 7, 79, 21, 1 },
+ { 0, 15, 77, 7, 35, 1, 0 },
+ { 0, 8, 66, 7, 51, 3, 0 },
+ { 0, 3, 51, 7, 66, 8, 0 },
+ { 0, 1, 35, 7, 77, 15, 0 },
+ { 1, 21, 79, 7, 26, 1, 0 },
+ { 0, 11, 74, 7, 41, 2, 0 },
+ { 0, 6, 59, 7, 58, 5, 0 },
+ { 0, 2, 44, 7, 72, 10, 0 },
+ { 0, 1, 28, 7, 79, 20, 0 },
+ { 0, 17, 77, 7, 33, 1, 0 },
+ { 0, 9, 68, 7, 48, 3, 0 },
+ { 0, 4, 53, 7, 64, 7, 0 },
+ { 0, 2, 37, 7, 75, 14, 0 },
+ { 1, 23, 78, 7, 25, 1, 0 },
+ { 0, 13, 74, 7, 39, 2, 0 },
+ { 0, 6, 63, 7, 55, 4, 0 },
+ { 0, 3, 46, 7, 70, 9, 0 },
+ { 0, 1, 30, 7, 79, 18, 0 } } },
+ .ptrn_arr = { { 0x9249249, 0x21249249, 0x24249249, 0x24849249,
+ 0x24909249, 0x24921249, 0x249 } },
+ .sample_patrn_length = 206,
+ .hor_ds_en = 1,
+ .ver_ds_en = 1
+}, {
+ /* Scale factor 32 / (32 + 72) = 0.307692 */
+ .hor_phase_arr = {
+ .even = { { 1, 24, 78, 7, 24, 1, 0 },
+ { 0, 12, 74, 7, 40, 2, 0 },
+ { 0, 5, 60, 7, 58, 5, 0 },
+ { 0, 2, 40, 7, 74, 12, 0 } },
+ .odd = { { 0, 18, 77, 7, 32, 1, 0 },
+ { 0, 8, 68, 7, 49, 3, 0 },
+ { 0, 3, 49, 7, 68, 8, 0 },
+ { 0, 1, 32, 7, 77, 18, 0 } } },
+ .ver_phase_arr = {
+ .even = { { 1, 24, 78, 7, 24, 1, 0 },
+ { 0, 12, 74, 7, 40, 2, 0 },
+ { 0, 5, 60, 7, 58, 5, 0 },
+ { 0, 2, 40, 7, 74, 12, 0 } },
+ .odd = { { 0, 18, 77, 7, 32, 1, 0 },
+ { 0, 8, 68, 7, 49, 3, 0 },
+ { 0, 3, 49, 7, 68, 8, 0 },
+ { 0, 1, 32, 7, 77, 18, 0 } } },
+ .ptrn_arr = { { 0x249249 } },
+ .sample_patrn_length = 26,
+ .hor_ds_en = 1,
+ .ver_ds_en = 1
+}, {
+ /* Scale factor 32 / (32 + 73) = 0.304762 */
+ .hor_phase_arr = {
+ .even = { { 1, 24, 78, 7, 24, 1, 0 },
+ { 0, 12, 70, 7, 43, 3, 0 },
+ { 0, 5, 54, 7, 62, 7, 0 },
+ { 0, 2, 34, 7, 76, 16, 0 },
+ { 0, 18, 77, 7, 32, 1, 0 },
+ { 0, 8, 64, 7, 52, 4, 0 },
+ { 0, 3, 45, 7, 69, 11, 0 },
+ { 0, 1, 26, 7, 77, 23, 1 },
+ { 0, 13, 73, 7, 40, 2, 0 },
+ { 0, 5, 56, 7, 61, 6, 0 },
+ { 0, 2, 36, 7, 75, 15, 0 },
+ { 0, 19, 78, 7, 30, 1, 0 },
+ { 0, 9, 66, 7, 49, 4, 0 },
+ { 0, 3, 47, 7, 68, 10, 0 },
+ { 0, 1, 28, 7, 77, 21, 1 },
+ { 0, 14, 74, 7, 38, 2, 0 },
+ { 0, 6, 58, 7, 58, 6, 0 },
+ { 0, 2, 38, 7, 74, 14, 0 },
+ { 1, 21, 77, 7, 28, 1, 0 },
+ { 0, 10, 68, 7, 47, 3, 0 },
+ { 0, 4, 49, 7, 66, 9, 0 },
+ { 0, 1, 30, 7, 78, 19, 0 },
+ { 0, 15, 75, 7, 36, 2, 0 },
+ { 0, 6, 61, 7, 56, 5, 0 },
+ { 0, 2, 40, 7, 73, 13, 0 },
+ { 1, 23, 77, 7, 26, 1, 0 },
+ { 0, 11, 69, 7, 45, 3, 0 },
+ { 0, 4, 52, 7, 64, 8, 0 },
+ { 0, 1, 32, 7, 77, 18, 0 },
+ { 0, 16, 76, 7, 34, 2, 0 },
+ { 0, 7, 62, 7, 54, 5, 0 },
+ { 0, 3, 43, 7, 70, 12, 0 } },
+ .odd = { { 0, 17, 77, 7, 33, 1, 0 },
+ { 0, 7, 64, 7, 53, 4, 0 },
+ { 0, 3, 44, 7, 70, 11, 0 },
+ { 0, 1, 25, 7, 78, 23, 1 },
+ { 0, 12, 72, 7, 42, 2, 0 },
+ { 0, 5, 55, 7, 61, 7, 0 },
+ { 0, 2, 35, 7, 75, 16, 0 },
+ { 0, 19, 77, 7, 31, 1, 0 },
+ { 0, 8, 65, 7, 51, 4, 0 },
+ { 0, 3, 46, 7, 69, 10, 0 },
+ { 0, 1, 27, 7, 77, 22, 1 },
+ { 0, 13, 74, 7, 39, 2, 0 },
+ { 0, 5, 57, 7, 60, 6, 0 },
+ { 0, 2, 37, 7, 75, 14, 0 },
+ { 1, 20, 77, 7, 29, 1, 0 },
+ { 0, 9, 68, 7, 48, 3, 0 },
+ { 0, 3, 48, 7, 68, 9, 0 },
+ { 0, 1, 29, 7, 77, 20, 1 },
+ { 0, 14, 75, 7, 37, 2, 0 },
+ { 0, 6, 60, 7, 57, 5, 0 },
+ { 0, 2, 39, 7, 74, 13, 0 },
+ { 1, 22, 77, 7, 27, 1, 0 },
+ { 0, 10, 69, 7, 46, 3, 0 },
+ { 0, 4, 51, 7, 65, 8, 0 },
+ { 0, 1, 31, 7, 77, 19, 0 },
+ { 0, 16, 75, 7, 35, 2, 0 },
+ { 0, 7, 61, 7, 55, 5, 0 },
+ { 0, 2, 42, 7, 72, 12, 0 },
+ { 1, 23, 78, 7, 25, 1, 0 },
+ { 0, 11, 70, 7, 44, 3, 0 },
+ { 0, 4, 53, 7, 64, 7, 0 },
+ { 0, 1, 33, 7, 77, 17, 0 } } },
+ .ver_phase_arr = {
+ .even = { { 1, 24, 78, 7, 24, 1, 0 },
+ { 0, 12, 70, 7, 43, 3, 0 },
+ { 0, 5, 54, 7, 62, 7, 0 },
+ { 0, 2, 34, 7, 76, 16, 0 },
+ { 0, 18, 77, 7, 32, 1, 0 },
+ { 0, 8, 64, 7, 52, 4, 0 },
+ { 0, 3, 45, 7, 69, 11, 0 },
+ { 0, 1, 26, 7, 77, 23, 1 },
+ { 0, 13, 73, 7, 40, 2, 0 },
+ { 0, 5, 56, 7, 61, 6, 0 },
+ { 0, 2, 36, 7, 75, 15, 0 },
+ { 0, 19, 78, 7, 30, 1, 0 },
+ { 0, 9, 66, 7, 49, 4, 0 },
+ { 0, 3, 47, 7, 68, 10, 0 },
+ { 0, 1, 28, 7, 77, 21, 1 },
+ { 0, 14, 74, 7, 38, 2, 0 },
+ { 0, 6, 58, 7, 58, 6, 0 },
+ { 0, 2, 38, 7, 74, 14, 0 },
+ { 1, 21, 77, 7, 28, 1, 0 },
+ { 0, 10, 68, 7, 47, 3, 0 },
+ { 0, 4, 49, 7, 66, 9, 0 },
+ { 0, 1, 30, 7, 78, 19, 0 },
+ { 0, 15, 75, 7, 36, 2, 0 },
+ { 0, 6, 61, 7, 56, 5, 0 },
+ { 0, 2, 40, 7, 73, 13, 0 },
+ { 1, 23, 77, 7, 26, 1, 0 },
+ { 0, 11, 69, 7, 45, 3, 0 },
+ { 0, 4, 52, 7, 64, 8, 0 },
+ { 0, 1, 32, 7, 77, 18, 0 },
+ { 0, 16, 76, 7, 34, 2, 0 },
+ { 0, 7, 62, 7, 54, 5, 0 },
+ { 0, 3, 43, 7, 70, 12, 0 } },
+ .odd = { { 0, 17, 77, 7, 33, 1, 0 },
+ { 0, 7, 64, 7, 53, 4, 0 },
+ { 0, 3, 44, 7, 70, 11, 0 },
+ { 0, 1, 25, 7, 78, 23, 1 },
+ { 0, 12, 72, 7, 42, 2, 0 },
+ { 0, 5, 55, 7, 61, 7, 0 },
+ { 0, 2, 35, 7, 75, 16, 0 },
+ { 0, 19, 77, 7, 31, 1, 0 },
+ { 0, 8, 65, 7, 51, 4, 0 },
+ { 0, 3, 46, 7, 69, 10, 0 },
+ { 0, 1, 27, 7, 77, 22, 1 },
+ { 0, 13, 74, 7, 39, 2, 0 },
+ { 0, 5, 57, 7, 60, 6, 0 },
+ { 0, 2, 37, 7, 75, 14, 0 },
+ { 1, 20, 77, 7, 29, 1, 0 },
+ { 0, 9, 68, 7, 48, 3, 0 },
+ { 0, 3, 48, 7, 68, 9, 0 },
+ { 0, 1, 29, 7, 77, 20, 1 },
+ { 0, 14, 75, 7, 37, 2, 0 },
+ { 0, 6, 60, 7, 57, 5, 0 },
+ { 0, 2, 39, 7, 74, 13, 0 },
+ { 1, 22, 77, 7, 27, 1, 0 },
+ { 0, 10, 69, 7, 46, 3, 0 },
+ { 0, 4, 51, 7, 65, 8, 0 },
+ { 0, 1, 31, 7, 77, 19, 0 },
+ { 0, 16, 75, 7, 35, 2, 0 },
+ { 0, 7, 61, 7, 55, 5, 0 },
+ { 0, 2, 42, 7, 72, 12, 0 },
+ { 1, 23, 78, 7, 25, 1, 0 },
+ { 0, 11, 70, 7, 44, 3, 0 },
+ { 0, 4, 53, 7, 64, 7, 0 },
+ { 0, 1, 33, 7, 77, 17, 0 } } },
+ .ptrn_arr = { { 0x24249249, 0x24921249, 0x84924909, 0x92424924,
+ 0x92492124, 0x48492490, 0x2492 } },
+ .sample_patrn_length = 210,
+ .hor_ds_en = 1,
+ .ver_ds_en = 1
+}, {
+ /* Scale factor 32 / (32 + 74) = 0.301887 */
+ .hor_phase_arr = {
+ .even = { { 1, 25, 76, 7, 25, 1, 0 },
+ { 0, 11, 69, 7, 45, 3, 0 },
+ { 0, 4, 49, 7, 66, 9, 0 },
+ { 0, 1, 28, 7, 77, 21, 1 },
+ { 0, 13, 72, 7, 41, 2, 0 },
+ { 0, 5, 54, 7, 62, 7, 0 },
+ { 0, 1, 32, 7, 77, 18, 0 },
+ { 0, 15, 75, 7, 36, 2, 0 },
+ { 0, 6, 58, 7, 58, 6, 0 },
+ { 0, 2, 36, 7, 75, 15, 0 },
+ { 0, 18, 77, 7, 32, 1, 0 },
+ { 0, 7, 62, 7, 54, 5, 0 },
+ { 0, 2, 41, 7, 72, 13, 0 },
+ { 1, 21, 77, 7, 28, 1, 0 },
+ { 0, 9, 66, 7, 49, 4, 0 },
+ { 0, 3, 45, 7, 69, 11, 0 } },
+ .odd = { { 0, 17, 75, 7, 34, 2, 0 },
+ { 0, 7, 60, 7, 56, 5, 0 },
+ { 0, 2, 38, 7, 74, 14, 0 },
+ { 1, 20, 76, 7, 30, 1, 0 },
+ { 0, 8, 64, 7, 52, 4, 0 },
+ { 0, 3, 43, 7, 70, 12, 0 },
+ { 1, 23, 77, 7, 26, 1, 0 },
+ { 0, 10, 68, 7, 47, 3, 0 },
+ { 0, 3, 47, 7, 68, 10, 0 },
+ { 0, 1, 26, 7, 77, 23, 1 },
+ { 0, 12, 70, 7, 43, 3, 0 },
+ { 0, 4, 52, 7, 64, 8, 0 },
+ { 0, 1, 30, 7, 76, 20, 1 },
+ { 0, 14, 74, 7, 38, 2, 0 },
+ { 0, 5, 56, 7, 60, 7, 0 },
+ { 0, 2, 34, 7, 75, 17, 0 } } },
+ .ver_phase_arr = {
+ .even = { { 1, 25, 76, 7, 25, 1, 0 },
+ { 0, 11, 69, 7, 45, 3, 0 },
+ { 0, 4, 49, 7, 66, 9, 0 },
+ { 0, 1, 28, 7, 77, 21, 1 },
+ { 0, 13, 72, 7, 41, 2, 0 },
+ { 0, 5, 54, 7, 62, 7, 0 },
+ { 0, 1, 32, 7, 77, 18, 0 },
+ { 0, 15, 75, 7, 36, 2, 0 },
+ { 0, 6, 58, 7, 58, 6, 0 },
+ { 0, 2, 36, 7, 75, 15, 0 },
+ { 0, 18, 77, 7, 32, 1, 0 },
+ { 0, 7, 62, 7, 54, 5, 0 },
+ { 0, 2, 41, 7, 72, 13, 0 },
+ { 1, 21, 77, 7, 28, 1, 0 },
+ { 0, 9, 66, 7, 49, 4, 0 },
+ { 0, 3, 45, 7, 69, 11, 0 } },
+ .odd = { { 0, 17, 75, 7, 34, 2, 0 },
+ { 0, 7, 60, 7, 56, 5, 0 },
+ { 0, 2, 38, 7, 74, 14, 0 },
+ { 1, 20, 76, 7, 30, 1, 0 },
+ { 0, 8, 64, 7, 52, 4, 0 },
+ { 0, 3, 43, 7, 70, 12, 0 },
+ { 1, 23, 77, 7, 26, 1, 0 },
+ { 0, 10, 68, 7, 47, 3, 0 },
+ { 0, 3, 47, 7, 68, 10, 0 },
+ { 0, 1, 26, 7, 77, 23, 1 },
+ { 0, 12, 70, 7, 43, 3, 0 },
+ { 0, 4, 52, 7, 64, 8, 0 },
+ { 0, 1, 30, 7, 76, 20, 1 },
+ { 0, 14, 74, 7, 38, 2, 0 },
+ { 0, 5, 56, 7, 60, 7, 0 },
+ { 0, 2, 34, 7, 75, 17, 0 } } },
+ .ptrn_arr = { { 0x24849249, 0x24924849, 0x92424924, 0x24 } },
+ .sample_patrn_length = 106,
+ .hor_ds_en = 1,
+ .ver_ds_en = 1
+}, {
+ /* Scale factor 32 / (32 + 75) = 0.299065 */
+ .hor_phase_arr = {
+ .even = { { 1, 25, 76, 7, 25, 1, 0 },
+ { 0, 10, 67, 7, 47, 4, 0 },
+ { 0, 3, 45, 7, 69, 11, 0 },
+ { 1, 23, 76, 7, 27, 1, 0 },
+ { 0, 9, 66, 7, 49, 4, 0 },
+ { 0, 3, 43, 7, 70, 12, 0 },
+ { 1, 22, 75, 7, 29, 1, 0 },
+ { 0, 8, 65, 7, 51, 4, 0 },
+ { 0, 2, 41, 7, 72, 13, 0 },
+ { 1, 20, 76, 7, 30, 1, 0 },
+ { 0, 8, 61, 7, 54, 5, 0 },
+ { 0, 2, 39, 7, 72, 15, 0 },
+ { 0, 19, 76, 7, 32, 1, 0 },
+ { 0, 7, 59, 7, 56, 6, 0 },
+ { 0, 2, 36, 7, 74, 16, 0 },
+ { 0, 17, 75, 7, 34, 2, 0 },
+ { 0, 6, 58, 7, 58, 6, 0 },
+ { 0, 2, 34, 7, 75, 17, 0 },
+ { 0, 16, 74, 7, 36, 2, 0 },
+ { 0, 6, 56, 7, 59, 7, 0 },
+ { 0, 1, 32, 7, 76, 19, 0 },
+ { 0, 15, 72, 7, 39, 2, 0 },
+ { 0, 5, 54, 7, 61, 8, 0 },
+ { 0, 1, 30, 7, 76, 20, 1 },
+ { 0, 13, 72, 7, 41, 2, 0 },
+ { 0, 4, 51, 7, 65, 8, 0 },
+ { 0, 1, 29, 7, 75, 22, 1 },
+ { 0, 12, 70, 7, 43, 3, 0 },
+ { 0, 4, 49, 7, 66, 9, 0 },
+ { 0, 1, 27, 7, 76, 23, 1 },
+ { 0, 11, 69, 7, 45, 3, 0 },
+ { 0, 4, 47, 7, 67, 10, 0 } },
+ .odd = { { 0, 16, 75, 7, 35, 2, 0 },
+ { 0, 6, 57, 7, 58, 7, 0 },
+ { 0, 2, 33, 7, 75, 18, 0 },
+ { 0, 15, 73, 7, 38, 2, 0 },
+ { 0, 5, 55, 7, 61, 7, 0 },
+ { 0, 1, 31, 7, 76, 19, 1 },
+ { 0, 14, 72, 7, 40, 2, 0 },
+ { 0, 5, 53, 7, 62, 8, 0 },
+ { 0, 1, 30, 7, 75, 21, 1 },
+ { 0, 13, 70, 7, 42, 3, 0 },
+ { 0, 4, 50, 7, 65, 9, 0 },
+ { 0, 1, 28, 7, 76, 22, 1 },
+ { 0, 12, 69, 7, 44, 3, 0 },
+ { 0, 4, 48, 7, 66, 10, 0 },
+ { 0, 1, 26, 7, 76, 24, 1 },
+ { 0, 11, 68, 7, 46, 3, 0 },
+ { 0, 3, 46, 7, 68, 11, 0 },
+ { 1, 24, 76, 7, 26, 1, 0 },
+ { 0, 10, 66, 7, 48, 4, 0 },
+ { 0, 3, 44, 7, 69, 12, 0 },
+ { 1, 22, 76, 7, 28, 1, 0 },
+ { 0, 9, 65, 7, 50, 4, 0 },
+ { 0, 3, 42, 7, 70, 13, 0 },
+ { 1, 21, 75, 7, 30, 1, 0 },
+ { 0, 8, 62, 7, 53, 5, 0 },
+ { 0, 2, 40, 7, 72, 14, 0 },
+ { 1, 19, 76, 7, 31, 1, 0 },
+ { 0, 7, 61, 7, 55, 5, 0 },
+ { 0, 2, 38, 7, 73, 15, 0 },
+ { 0, 18, 75, 7, 33, 2, 0 },
+ { 0, 7, 58, 7, 57, 6, 0 },
+ { 0, 2, 35, 7, 75, 16, 0 } } },
+ .ver_phase_arr = {
+ .even = { { 1, 25, 76, 7, 25, 1, 0 },
+ { 0, 10, 67, 7, 47, 4, 0 },
+ { 0, 3, 45, 7, 69, 11, 0 },
+ { 1, 23, 76, 7, 27, 1, 0 },
+ { 0, 9, 66, 7, 49, 4, 0 },
+ { 0, 3, 43, 7, 70, 12, 0 },
+ { 1, 22, 75, 7, 29, 1, 0 },
+ { 0, 8, 65, 7, 51, 4, 0 },
+ { 0, 2, 41, 7, 72, 13, 0 },
+ { 1, 20, 76, 7, 30, 1, 0 },
+ { 0, 8, 61, 7, 54, 5, 0 },
+ { 0, 2, 39, 7, 72, 15, 0 },
+ { 0, 19, 76, 7, 32, 1, 0 },
+ { 0, 7, 59, 7, 56, 6, 0 },
+ { 0, 2, 36, 7, 74, 16, 0 },
+ { 0, 17, 75, 7, 34, 2, 0 },
+ { 0, 6, 58, 7, 58, 6, 0 },
+ { 0, 2, 34, 7, 75, 17, 0 },
+ { 0, 16, 74, 7, 36, 2, 0 },
+ { 0, 6, 56, 7, 59, 7, 0 },
+ { 0, 1, 32, 7, 76, 19, 0 },
+ { 0, 15, 72, 7, 39, 2, 0 },
+ { 0, 5, 54, 7, 61, 8, 0 },
+ { 0, 1, 30, 7, 76, 20, 1 },
+ { 0, 13, 72, 7, 41, 2, 0 },
+ { 0, 4, 51, 7, 65, 8, 0 },
+ { 0, 1, 29, 7, 75, 22, 1 },
+ { 0, 12, 70, 7, 43, 3, 0 },
+ { 0, 4, 49, 7, 66, 9, 0 },
+ { 0, 1, 27, 7, 76, 23, 1 },
+ { 0, 11, 69, 7, 45, 3, 0 },
+ { 0, 4, 47, 7, 67, 10, 0 } },
+ .odd = { { 0, 16, 75, 7, 35, 2, 0 },
+ { 0, 6, 57, 7, 58, 7, 0 },
+ { 0, 2, 33, 7, 75, 18, 0 },
+ { 0, 15, 73, 7, 38, 2, 0 },
+ { 0, 5, 55, 7, 61, 7, 0 },
+ { 0, 1, 31, 7, 76, 19, 1 },
+ { 0, 14, 72, 7, 40, 2, 0 },
+ { 0, 5, 53, 7, 62, 8, 0 },
+ { 0, 1, 30, 7, 75, 21, 1 },
+ { 0, 13, 70, 7, 42, 3, 0 },
+ { 0, 4, 50, 7, 65, 9, 0 },
+ { 0, 1, 28, 7, 76, 22, 1 },
+ { 0, 12, 69, 7, 44, 3, 0 },
+ { 0, 4, 48, 7, 66, 10, 0 },
+ { 0, 1, 26, 7, 76, 24, 1 },
+ { 0, 11, 68, 7, 46, 3, 0 },
+ { 0, 3, 46, 7, 68, 11, 0 },
+ { 1, 24, 76, 7, 26, 1, 0 },
+ { 0, 10, 66, 7, 48, 4, 0 },
+ { 0, 3, 44, 7, 69, 12, 0 },
+ { 1, 22, 76, 7, 28, 1, 0 },
+ { 0, 9, 65, 7, 50, 4, 0 },
+ { 0, 3, 42, 7, 70, 13, 0 },
+ { 1, 21, 75, 7, 30, 1, 0 },
+ { 0, 8, 62, 7, 53, 5, 0 },
+ { 0, 2, 40, 7, 72, 14, 0 },
+ { 1, 19, 76, 7, 31, 1, 0 },
+ { 0, 7, 61, 7, 55, 5, 0 },
+ { 0, 2, 38, 7, 73, 15, 0 },
+ { 0, 18, 75, 7, 33, 2, 0 },
+ { 0, 7, 58, 7, 57, 6, 0 },
+ { 0, 2, 35, 7, 75, 16, 0 } } },
+ .ptrn_arr = { { 0x24909249, 0x90924909, 0x92490924, 0x49212490,
+ 0x21249212, 0x24921249, 0x24921 } },
+ .sample_patrn_length = 214,
+ .hor_ds_en = 1,
+ .ver_ds_en = 1
+}, {
+ /* Scale factor 32 / (32 + 76) = 0.296296 */
+ .hor_phase_arr = {
+ .even = { { 1, 25, 76, 7, 25, 1, 0 },
+ { 0, 10, 65, 7, 49, 4, 0 },
+ { 0, 3, 41, 7, 70, 14, 0 },
+ { 1, 19, 73, 7, 33, 2, 0 },
+ { 0, 6, 58, 7, 58, 6, 0 },
+ { 0, 2, 33, 7, 73, 19, 1 },
+ { 0, 14, 70, 7, 41, 3, 0 },
+ { 0, 4, 49, 7, 65, 10, 0 } },
+ .odd = { { 0, 16, 73, 7, 37, 2, 0 },
+ { 0, 5, 53, 7, 62, 8, 0 },
+ { 0, 1, 29, 7, 75, 22, 1 },
+ { 0, 11, 69, 7, 45, 3, 0 },
+ { 0, 3, 45, 7, 69, 11, 0 },
+ { 1, 22, 75, 7, 29, 1, 0 },
+ { 0, 8, 62, 7, 53, 5, 0 },
+ { 0, 2, 37, 7, 73, 16, 0 } } },
+ .ver_phase_arr = {
+ .even = { { 1, 25, 76, 7, 25, 1, 0 },
+ { 0, 10, 65, 7, 49, 4, 0 },
+ { 0, 3, 41, 7, 70, 14, 0 },
+ { 1, 19, 73, 7, 33, 2, 0 },
+ { 0, 6, 58, 7, 58, 6, 0 },
+ { 0, 2, 33, 7, 73, 19, 1 },
+ { 0, 14, 70, 7, 41, 3, 0 },
+ { 0, 4, 49, 7, 65, 10, 0 } },
+ .odd = { { 0, 16, 73, 7, 37, 2, 0 },
+ { 0, 5, 53, 7, 62, 8, 0 },
+ { 0, 1, 29, 7, 75, 22, 1 },
+ { 0, 11, 69, 7, 45, 3, 0 },
+ { 0, 3, 45, 7, 69, 11, 0 },
+ { 1, 22, 75, 7, 29, 1, 0 },
+ { 0, 8, 62, 7, 53, 5, 0 },
+ { 0, 2, 37, 7, 73, 16, 0 } } },
+ .ptrn_arr = { { 0x24909249, 0x24921 } },
+ .sample_patrn_length = 54,
+ .hor_ds_en = 1,
+ .ver_ds_en = 1
+}, {
+ /* Scale factor 32 / (32 + 77) = 0.293578 */
+ .hor_phase_arr = {
+ .even = { { 1, 26, 74, 7, 26, 1, 0 },
+ { 0, 9, 63, 7, 51, 5, 0 },
+ { 0, 2, 37, 7, 73, 16, 0 },
+ { 0, 15, 72, 7, 39, 2, 0 },
+ { 0, 4, 49, 7, 65, 10, 0 },
+ { 1, 24, 75, 7, 27, 1, 0 },
+ { 0, 8, 62, 7, 53, 5, 0 },
+ { 0, 2, 35, 7, 72, 18, 1 },
+ { 0, 14, 70, 7, 41, 3, 0 },
+ { 0, 4, 47, 7, 66, 11, 0 },
+ { 1, 22, 75, 7, 29, 1, 0 },
+ { 0, 7, 60, 7, 55, 6, 0 },
+ { 0, 2, 33, 7, 73, 19, 1 },
+ { 0, 13, 69, 7, 43, 3, 0 },
+ { 0, 3, 45, 7, 68, 12, 0 },
+ { 1, 21, 74, 7, 31, 1, 0 },
+ { 0, 7, 57, 7, 57, 7, 0 },
+ { 0, 1, 31, 7, 74, 21, 1 },
+ { 0, 12, 68, 7, 45, 3, 0 },
+ { 0, 3, 43, 7, 69, 13, 0 },
+ { 1, 19, 73, 7, 33, 2, 0 },
+ { 0, 6, 55, 7, 60, 7, 0 },
+ { 0, 1, 29, 7, 75, 22, 1 },
+ { 0, 11, 66, 7, 47, 4, 0 },
+ { 0, 3, 41, 7, 70, 14, 0 },
+ { 1, 18, 72, 7, 35, 2, 0 },
+ { 0, 5, 53, 7, 62, 8, 0 },
+ { 0, 1, 27, 7, 75, 24, 1 },
+ { 0, 10, 65, 7, 49, 4, 0 },
+ { 0, 2, 39, 7, 72, 15, 0 },
+ { 0, 16, 73, 7, 37, 2, 0 },
+ { 0, 5, 51, 7, 63, 9, 0 } },
+ .odd = { { 0, 16, 72, 7, 38, 2, 0 },
+ { 0, 5, 50, 7, 64, 9, 0 },
+ { 1, 25, 75, 7, 26, 1, 0 },
+ { 0, 8, 63, 7, 52, 5, 0 },
+ { 0, 2, 36, 7, 73, 17, 0 },
+ { 0, 15, 70, 7, 40, 3, 0 },
+ { 0, 4, 48, 7, 66, 10, 0 },
+ { 1, 23, 75, 7, 28, 1, 0 },
+ { 0, 8, 60, 7, 54, 6, 0 },
+ { 0, 2, 34, 7, 73, 18, 1 },
+ { 0, 13, 70, 7, 42, 3, 0 },
+ { 0, 4, 46, 7, 67, 11, 0 },
+ { 1, 21, 75, 7, 30, 1, 0 },
+ { 0, 7, 59, 7, 56, 6, 0 },
+ { 0, 2, 32, 7, 73, 20, 1 },
+ { 0, 12, 69, 7, 44, 3, 0 },
+ { 0, 3, 44, 7, 69, 12, 0 },
+ { 1, 20, 73, 7, 32, 2, 0 },
+ { 0, 6, 56, 7, 59, 7, 0 },
+ { 0, 1, 30, 7, 75, 21, 1 },
+ { 0, 11, 67, 7, 46, 4, 0 },
+ { 0, 3, 42, 7, 70, 13, 0 },
+ { 1, 18, 73, 7, 34, 2, 0 },
+ { 0, 6, 54, 7, 60, 8, 0 },
+ { 0, 1, 28, 7, 75, 23, 1 },
+ { 0, 10, 66, 7, 48, 4, 0 },
+ { 0, 3, 40, 7, 70, 15, 0 },
+ { 0, 17, 73, 7, 36, 2, 0 },
+ { 0, 5, 52, 7, 63, 8, 0 },
+ { 0, 1, 26, 7, 75, 25, 1 },
+ { 0, 9, 64, 7, 50, 5, 0 },
+ { 0, 2, 38, 7, 72, 16, 0 } } },
+ .ver_phase_arr = {
+ .even = { { 1, 26, 74, 7, 26, 1, 0 },
+ { 0, 9, 63, 7, 51, 5, 0 },
+ { 0, 2, 37, 7, 73, 16, 0 },
+ { 0, 15, 72, 7, 39, 2, 0 },
+ { 0, 4, 49, 7, 65, 10, 0 },
+ { 1, 24, 75, 7, 27, 1, 0 },
+ { 0, 8, 62, 7, 53, 5, 0 },
+ { 0, 2, 35, 7, 72, 18, 1 },
+ { 0, 14, 70, 7, 41, 3, 0 },
+ { 0, 4, 47, 7, 66, 11, 0 },
+ { 1, 22, 75, 7, 29, 1, 0 },
+ { 0, 7, 60, 7, 55, 6, 0 },
+ { 0, 2, 33, 7, 73, 19, 1 },
+ { 0, 13, 69, 7, 43, 3, 0 },
+ { 0, 3, 45, 7, 68, 12, 0 },
+ { 1, 21, 74, 7, 31, 1, 0 },
+ { 0, 7, 57, 7, 57, 7, 0 },
+ { 0, 1, 31, 7, 74, 21, 1 },
+ { 0, 12, 68, 7, 45, 3, 0 },
+ { 0, 3, 43, 7, 69, 13, 0 },
+ { 1, 19, 73, 7, 33, 2, 0 },
+ { 0, 6, 55, 7, 60, 7, 0 },
+ { 0, 1, 29, 7, 75, 22, 1 },
+ { 0, 11, 66, 7, 47, 4, 0 },
+ { 0, 3, 41, 7, 70, 14, 0 },
+ { 1, 18, 72, 7, 35, 2, 0 },
+ { 0, 5, 53, 7, 62, 8, 0 },
+ { 0, 1, 27, 7, 75, 24, 1 },
+ { 0, 10, 65, 7, 49, 4, 0 },
+ { 0, 2, 39, 7, 72, 15, 0 },
+ { 0, 16, 73, 7, 37, 2, 0 },
+ { 0, 5, 51, 7, 63, 9, 0 } },
+ .odd = { { 0, 16, 72, 7, 38, 2, 0 },
+ { 0, 5, 50, 7, 64, 9, 0 },
+ { 1, 25, 75, 7, 26, 1, 0 },
+ { 0, 8, 63, 7, 52, 5, 0 },
+ { 0, 2, 36, 7, 73, 17, 0 },
+ { 0, 15, 70, 7, 40, 3, 0 },
+ { 0, 4, 48, 7, 66, 10, 0 },
+ { 1, 23, 75, 7, 28, 1, 0 },
+ { 0, 8, 60, 7, 54, 6, 0 },
+ { 0, 2, 34, 7, 73, 18, 1 },
+ { 0, 13, 70, 7, 42, 3, 0 },
+ { 0, 4, 46, 7, 67, 11, 0 },
+ { 1, 21, 75, 7, 30, 1, 0 },
+ { 0, 7, 59, 7, 56, 6, 0 },
+ { 0, 2, 32, 7, 73, 20, 1 },
+ { 0, 12, 69, 7, 44, 3, 0 },
+ { 0, 3, 44, 7, 69, 12, 0 },
+ { 1, 20, 73, 7, 32, 2, 0 },
+ { 0, 6, 56, 7, 59, 7, 0 },
+ { 0, 1, 30, 7, 75, 21, 1 },
+ { 0, 11, 67, 7, 46, 4, 0 },
+ { 0, 3, 42, 7, 70, 13, 0 },
+ { 1, 18, 73, 7, 34, 2, 0 },
+ { 0, 6, 54, 7, 60, 8, 0 },
+ { 0, 1, 28, 7, 75, 23, 1 },
+ { 0, 10, 66, 7, 48, 4, 0 },
+ { 0, 3, 40, 7, 70, 15, 0 },
+ { 0, 17, 73, 7, 36, 2, 0 },
+ { 0, 5, 52, 7, 63, 8, 0 },
+ { 0, 1, 26, 7, 75, 25, 1 },
+ { 0, 9, 64, 7, 50, 5, 0 },
+ { 0, 2, 38, 7, 72, 16, 0 } } },
+ .ptrn_arr = { { 0x24921249, 0x92484924, 0x49212490, 0x24849242,
+ 0x92124909, 0x48492424, 0x249092 } },
+ .sample_patrn_length = 218,
+ .hor_ds_en = 1,
+ .ver_ds_en = 1
+}, {
+ /* Scale factor 32 / (32 + 78) = 0.290909 */
+ .hor_phase_arr = {
+ .even = { { 1, 26, 74, 7, 26, 1, 0 },
+ { 0, 8, 61, 7, 53, 6, 0 },
+ { 0, 2, 33, 7, 73, 19, 1 },
+ { 0, 12, 67, 7, 45, 4, 0 },
+ { 0, 3, 41, 7, 70, 14, 0 },
+ { 0, 17, 72, 7, 37, 2, 0 },
+ { 0, 5, 49, 7, 64, 10, 0 },
+ { 1, 22, 75, 7, 29, 1, 0 },
+ { 0, 7, 57, 7, 57, 7, 0 },
+ { 0, 1, 29, 7, 75, 22, 1 },
+ { 0, 10, 64, 7, 49, 5, 0 },
+ { 0, 2, 37, 7, 72, 17, 0 },
+ { 0, 14, 70, 7, 41, 3, 0 },
+ { 0, 4, 45, 7, 67, 12, 0 },
+ { 1, 19, 73, 7, 33, 2, 0 },
+ { 0, 6, 53, 7, 61, 8, 0 } },
+ .odd = { { 0, 15, 71, 7, 39, 3, 0 },
+ { 0, 4, 47, 7, 66, 11, 0 },
+ { 1, 21, 73, 7, 31, 2, 0 },
+ { 0, 6, 55, 7, 59, 8, 0 },
+ { 0, 1, 28, 7, 74, 24, 1 },
+ { 0, 9, 63, 7, 51, 5, 0 },
+ { 0, 2, 35, 7, 72, 18, 1 },
+ { 0, 13, 69, 7, 43, 3, 0 },
+ { 0, 3, 43, 7, 69, 13, 0 },
+ { 1, 18, 72, 7, 35, 2, 0 },
+ { 0, 5, 51, 7, 63, 9, 0 },
+ { 1, 24, 74, 7, 28, 1, 0 },
+ { 0, 8, 59, 7, 55, 6, 0 },
+ { 0, 2, 31, 7, 73, 21, 1 },
+ { 0, 11, 66, 7, 47, 4, 0 },
+ { 0, 3, 39, 7, 71, 15, 0 } } },
+ .ver_phase_arr = {
+ .even = { { 1, 26, 74, 7, 26, 1, 0 },
+ { 0, 8, 61, 7, 53, 6, 0 },
+ { 0, 2, 33, 7, 73, 19, 1 },
+ { 0, 12, 67, 7, 45, 4, 0 },
+ { 0, 3, 41, 7, 70, 14, 0 },
+ { 0, 17, 72, 7, 37, 2, 0 },
+ { 0, 5, 49, 7, 64, 10, 0 },
+ { 1, 22, 75, 7, 29, 1, 0 },
+ { 0, 7, 57, 7, 57, 7, 0 },
+ { 0, 1, 29, 7, 75, 22, 1 },
+ { 0, 10, 64, 7, 49, 5, 0 },
+ { 0, 2, 37, 7, 72, 17, 0 },
+ { 0, 14, 70, 7, 41, 3, 0 },
+ { 0, 4, 45, 7, 67, 12, 0 },
+ { 1, 19, 73, 7, 33, 2, 0 },
+ { 0, 6, 53, 7, 61, 8, 0 } },
+ .odd = { { 0, 15, 71, 7, 39, 3, 0 },
+ { 0, 4, 47, 7, 66, 11, 0 },
+ { 1, 21, 73, 7, 31, 2, 0 },
+ { 0, 6, 55, 7, 59, 8, 0 },
+ { 0, 1, 28, 7, 74, 24, 1 },
+ { 0, 9, 63, 7, 51, 5, 0 },
+ { 0, 2, 35, 7, 72, 18, 1 },
+ { 0, 13, 69, 7, 43, 3, 0 },
+ { 0, 3, 43, 7, 69, 13, 0 },
+ { 1, 18, 72, 7, 35, 2, 0 },
+ { 0, 5, 51, 7, 63, 9, 0 },
+ { 1, 24, 74, 7, 28, 1, 0 },
+ { 0, 8, 59, 7, 55, 6, 0 },
+ { 0, 2, 31, 7, 73, 21, 1 },
+ { 0, 11, 66, 7, 47, 4, 0 },
+ { 0, 3, 39, 7, 71, 15, 0 } } },
+ .ptrn_arr = { { 0x24921249, 0x12490924, 0x9248492, 0x249 } },
+ .sample_patrn_length = 110,
+ .hor_ds_en = 1,
+ .ver_ds_en = 1
+}, {
+ /* Scale factor 32 / (32 + 79) = 0.288288 */
+ .hor_phase_arr = {
+ .even = { { 1, 26, 74, 7, 26, 1, 0 },
+ { 0, 8, 59, 7, 55, 6, 0 },
+ { 0, 1, 30, 7, 73, 23, 1 },
+ { 0, 9, 63, 7, 51, 5, 0 },
+ { 0, 2, 33, 7, 72, 20, 1 },
+ { 0, 11, 66, 7, 47, 4, 0 },
+ { 0, 2, 37, 7, 71, 17, 1 },
+ { 0, 13, 69, 7, 43, 3, 0 },
+ { 0, 3, 41, 7, 69, 15, 0 },
+ { 0, 16, 70, 7, 39, 3, 0 },
+ { 0, 4, 45, 7, 67, 12, 0 },
+ { 1, 18, 72, 7, 35, 2, 0 },
+ { 0, 5, 49, 7, 64, 10, 0 },
+ { 1, 21, 73, 7, 31, 2, 0 },
+ { 0, 6, 53, 7, 60, 9, 0 },
+ { 1, 24, 74, 7, 28, 1, 0 },
+ { 0, 7, 57, 7, 57, 7, 0 },
+ { 0, 1, 28, 7, 74, 24, 1 },
+ { 0, 9, 60, 7, 53, 6, 0 },
+ { 0, 2, 31, 7, 73, 21, 1 },
+ { 0, 10, 64, 7, 49, 5, 0 },
+ { 0, 2, 35, 7, 72, 18, 1 },
+ { 0, 12, 67, 7, 45, 4, 0 },
+ { 0, 3, 39, 7, 70, 16, 0 },
+ { 0, 15, 69, 7, 41, 3, 0 },
+ { 0, 3, 43, 7, 69, 13, 0 },
+ { 1, 17, 71, 7, 37, 2, 0 },
+ { 0, 4, 47, 7, 66, 11, 0 },
+ { 1, 20, 72, 7, 33, 2, 0 },
+ { 0, 5, 51, 7, 63, 9, 0 },
+ { 1, 23, 73, 7, 30, 1, 0 },
+ { 0, 6, 55, 7, 59, 8, 0 } },
+ .odd = { { 0, 15, 70, 7, 40, 3, 0 },
+ { 0, 4, 44, 7, 67, 13, 0 },
+ { 1, 18, 71, 7, 36, 2, 0 },
+ { 0, 4, 48, 7, 65, 11, 0 },
+ { 1, 20, 73, 7, 32, 2, 0 },
+ { 0, 6, 52, 7, 61, 9, 0 },
+ { 1, 24, 73, 7, 29, 1, 0 },
+ { 0, 7, 56, 7, 58, 7, 0 },
+ { 0, 1, 27, 7, 74, 25, 1 },
+ { 0, 8, 60, 7, 54, 6, 0 },
+ { 0, 2, 30, 7, 73, 22, 1 },
+ { 0, 10, 63, 7, 50, 5, 0 },
+ { 0, 2, 34, 7, 72, 19, 1 },
+ { 0, 12, 66, 7, 46, 4, 0 },
+ { 0, 3, 38, 7, 71, 16, 0 },
+ { 0, 14, 69, 7, 42, 3, 0 },
+ { 0, 3, 42, 7, 69, 14, 0 },
+ { 0, 16, 71, 7, 38, 3, 0 },
+ { 0, 4, 46, 7, 66, 12, 0 },
+ { 1, 19, 72, 7, 34, 2, 0 },
+ { 0, 5, 50, 7, 63, 10, 0 },
+ { 1, 22, 73, 7, 30, 2, 0 },
+ { 0, 6, 54, 7, 60, 8, 0 },
+ { 1, 25, 74, 7, 27, 1, 0 },
+ { 0, 7, 58, 7, 56, 7, 0 },
+ { 0, 1, 29, 7, 73, 24, 1 },
+ { 0, 9, 61, 7, 52, 6, 0 },
+ { 0, 2, 32, 7, 73, 20, 1 },
+ { 0, 11, 65, 7, 48, 4, 0 },
+ { 0, 2, 36, 7, 71, 18, 1 },
+ { 0, 13, 67, 7, 44, 4, 0 },
+ { 0, 3, 40, 7, 70, 15, 0 } } },
+ .ver_phase_arr = {
+ .even = { { 1, 26, 74, 7, 26, 1, 0 },
+ { 0, 8, 59, 7, 55, 6, 0 },
+ { 0, 1, 30, 7, 73, 23, 1 },
+ { 0, 9, 63, 7, 51, 5, 0 },
+ { 0, 2, 33, 7, 72, 20, 1 },
+ { 0, 11, 66, 7, 47, 4, 0 },
+ { 0, 2, 37, 7, 71, 17, 1 },
+ { 0, 13, 69, 7, 43, 3, 0 },
+ { 0, 3, 41, 7, 69, 15, 0 },
+ { 0, 16, 70, 7, 39, 3, 0 },
+ { 0, 4, 45, 7, 67, 12, 0 },
+ { 1, 18, 72, 7, 35, 2, 0 },
+ { 0, 5, 49, 7, 64, 10, 0 },
+ { 1, 21, 73, 7, 31, 2, 0 },
+ { 0, 6, 53, 7, 60, 9, 0 },
+ { 1, 24, 74, 7, 28, 1, 0 },
+ { 0, 7, 57, 7, 57, 7, 0 },
+ { 0, 1, 28, 7, 74, 24, 1 },
+ { 0, 9, 60, 7, 53, 6, 0 },
+ { 0, 2, 31, 7, 73, 21, 1 },
+ { 0, 10, 64, 7, 49, 5, 0 },
+ { 0, 2, 35, 7, 72, 18, 1 },
+ { 0, 12, 67, 7, 45, 4, 0 },
+ { 0, 3, 39, 7, 70, 16, 0 },
+ { 0, 15, 69, 7, 41, 3, 0 },
+ { 0, 3, 43, 7, 69, 13, 0 },
+ { 1, 17, 71, 7, 37, 2, 0 },
+ { 0, 4, 47, 7, 66, 11, 0 },
+ { 1, 20, 72, 7, 33, 2, 0 },
+ { 0, 5, 51, 7, 63, 9, 0 },
+ { 1, 23, 73, 7, 30, 1, 0 },
+ { 0, 6, 55, 7, 59, 8, 0 } },
+ .odd = { { 0, 15, 70, 7, 40, 3, 0 },
+ { 0, 4, 44, 7, 67, 13, 0 },
+ { 1, 18, 71, 7, 36, 2, 0 },
+ { 0, 4, 48, 7, 65, 11, 0 },
+ { 1, 20, 73, 7, 32, 2, 0 },
+ { 0, 6, 52, 7, 61, 9, 0 },
+ { 1, 24, 73, 7, 29, 1, 0 },
+ { 0, 7, 56, 7, 58, 7, 0 },
+ { 0, 1, 27, 7, 74, 25, 1 },
+ { 0, 8, 60, 7, 54, 6, 0 },
+ { 0, 2, 30, 7, 73, 22, 1 },
+ { 0, 10, 63, 7, 50, 5, 0 },
+ { 0, 2, 34, 7, 72, 19, 1 },
+ { 0, 12, 66, 7, 46, 4, 0 },
+ { 0, 3, 38, 7, 71, 16, 0 },
+ { 0, 14, 69, 7, 42, 3, 0 },
+ { 0, 3, 42, 7, 69, 14, 0 },
+ { 0, 16, 71, 7, 38, 3, 0 },
+ { 0, 4, 46, 7, 66, 12, 0 },
+ { 1, 19, 72, 7, 34, 2, 0 },
+ { 0, 5, 50, 7, 63, 10, 0 },
+ { 1, 22, 73, 7, 30, 2, 0 },
+ { 0, 6, 54, 7, 60, 8, 0 },
+ { 1, 25, 74, 7, 27, 1, 0 },
+ { 0, 7, 58, 7, 56, 7, 0 },
+ { 0, 1, 29, 7, 73, 24, 1 },
+ { 0, 9, 61, 7, 52, 6, 0 },
+ { 0, 2, 32, 7, 73, 20, 1 },
+ { 0, 11, 65, 7, 48, 4, 0 },
+ { 0, 2, 36, 7, 71, 18, 1 },
+ { 0, 13, 67, 7, 44, 4, 0 },
+ { 0, 3, 40, 7, 70, 15, 0 } } },
+ .ptrn_arr = { { 0x84921249, 0x42492124, 0x24249092, 0x92124909,
+ 0x49212484, 0x24909248, 0x2490924 } },
+ .sample_patrn_length = 222,
+ .hor_ds_en = 1,
+ .ver_ds_en = 1
+}, {
+ /* Scale factor 32 / (32 + 80) = 0.285714 */
+ .hor_phase_arr = {
+ .even = { { 1, 26, 74, 7, 26, 1, 0 },
+ { 0, 7, 57, 7, 57, 7, 0 } },
+ .odd = { { 0, 15, 69, 7, 41, 3, 0 },
+ { 0, 3, 41, 7, 69, 15, 0 } } },
+ .ver_phase_arr = {
+ .even = { { 1, 26, 74, 7, 26, 1, 0 },
+ { 0, 7, 57, 7, 57, 7, 0 } },
+ .odd = { { 0, 15, 69, 7, 41, 3, 0 },
+ { 0, 3, 41, 7, 69, 15, 0 } } },
+ .ptrn_arr = { { 0x249 } },
+ .sample_patrn_length = 14,
+ .hor_ds_en = 1,
+ .ver_ds_en = 1
+}, {
+ /* Scale factor 32 / (32 + 81) = 0.283186 */
+ .hor_phase_arr = {
+ .even = { { 1, 27, 72, 7, 27, 1, 0 },
+ { 0, 7, 54, 7, 59, 8, 0 },
+ { 1, 23, 72, 7, 30, 2, 0 },
+ { 0, 6, 51, 7, 61, 10, 0 },
+ { 1, 20, 71, 7, 34, 2, 0 },
+ { 0, 5, 47, 7, 64, 12, 0 },
+ { 1, 18, 69, 7, 37, 3, 0 },
+ { 0, 4, 43, 7, 67, 14, 0 },
+ { 0, 15, 69, 7, 41, 3, 0 },
+ { 0, 3, 39, 7, 69, 16, 1 },
+ { 0, 13, 66, 7, 45, 4, 0 },
+ { 0, 2, 35, 7, 71, 19, 1 },
+ { 0, 11, 63, 7, 49, 5, 0 },
+ { 0, 2, 32, 7, 71, 22, 1 },
+ { 0, 9, 60, 7, 53, 6, 0 },
+ { 0, 1, 28, 7, 73, 25, 1 },
+ { 0, 8, 56, 7, 56, 8, 0 },
+ { 1, 25, 73, 7, 28, 1, 0 },
+ { 0, 6, 53, 7, 60, 9, 0 },
+ { 1, 22, 71, 7, 32, 2, 0 },
+ { 0, 5, 49, 7, 63, 11, 0 },
+ { 1, 19, 71, 7, 35, 2, 0 },
+ { 0, 4, 45, 7, 66, 13, 0 },
+ { 1, 16, 69, 7, 39, 3, 0 },
+ { 0, 3, 41, 7, 69, 15, 0 },
+ { 0, 14, 67, 7, 43, 4, 0 },
+ { 0, 3, 37, 7, 69, 18, 1 },
+ { 0, 12, 64, 7, 47, 5, 0 },
+ { 0, 2, 34, 7, 71, 20, 1 },
+ { 0, 10, 61, 7, 51, 6, 0 },
+ { 0, 2, 30, 7, 72, 23, 1 },
+ { 0, 8, 59, 7, 54, 7, 0 } },
+ .odd = { { 0, 15, 67, 7, 42, 4, 0 },
+ { 0, 3, 38, 7, 69, 17, 1 },
+ { 0, 12, 66, 7, 46, 4, 0 },
+ { 0, 2, 34, 7, 71, 20, 1 },
+ { 0, 10, 63, 7, 50, 5, 0 },
+ { 0, 2, 31, 7, 71, 23, 1 },
+ { 0, 9, 58, 7, 54, 7, 0 },
+ { 0, 1, 27, 7, 73, 26, 1 },
+ { 0, 7, 55, 7, 58, 8, 0 },
+ { 1, 24, 72, 7, 29, 2, 0 },
+ { 0, 6, 52, 7, 60, 10, 0 },
+ { 1, 21, 71, 7, 33, 2, 0 },
+ { 0, 5, 48, 7, 64, 11, 0 },
+ { 1, 18, 70, 7, 36, 3, 0 },
+ { 0, 4, 44, 7, 67, 13, 0 },
+ { 0, 16, 69, 7, 40, 3, 0 },
+ { 0, 3, 40, 7, 69, 16, 0 },
+ { 0, 13, 67, 7, 44, 4, 0 },
+ { 0, 3, 36, 7, 70, 18, 1 },
+ { 0, 11, 64, 7, 48, 5, 0 },
+ { 0, 2, 33, 7, 71, 21, 1 },
+ { 0, 10, 60, 7, 52, 6, 0 },
+ { 0, 2, 29, 7, 72, 24, 1 },
+ { 0, 8, 58, 7, 55, 7, 0 },
+ { 1, 26, 73, 7, 27, 1, 0 },
+ { 0, 7, 54, 7, 58, 9, 0 },
+ { 1, 23, 71, 7, 31, 2, 0 },
+ { 0, 5, 50, 7, 63, 10, 0 },
+ { 1, 20, 71, 7, 34, 2, 0 },
+ { 0, 4, 46, 7, 66, 12, 0 },
+ { 1, 17, 69, 7, 38, 3, 0 },
+ { 0, 4, 42, 7, 67, 15, 0 } } },
+ .ver_phase_arr = {
+ .even = { { 1, 27, 72, 7, 27, 1, 0 },
+ { 0, 7, 54, 7, 59, 8, 0 },
+ { 1, 23, 72, 7, 30, 2, 0 },
+ { 0, 6, 51, 7, 61, 10, 0 },
+ { 1, 20, 71, 7, 34, 2, 0 },
+ { 0, 5, 47, 7, 64, 12, 0 },
+ { 1, 18, 69, 7, 37, 3, 0 },
+ { 0, 4, 43, 7, 67, 14, 0 },
+ { 0, 15, 69, 7, 41, 3, 0 },
+ { 0, 3, 39, 7, 69, 16, 1 },
+ { 0, 13, 66, 7, 45, 4, 0 },
+ { 0, 2, 35, 7, 71, 19, 1 },
+ { 0, 11, 63, 7, 49, 5, 0 },
+ { 0, 2, 32, 7, 71, 22, 1 },
+ { 0, 9, 60, 7, 53, 6, 0 },
+ { 0, 1, 28, 7, 73, 25, 1 },
+ { 0, 8, 56, 7, 56, 8, 0 },
+ { 1, 25, 73, 7, 28, 1, 0 },
+ { 0, 6, 53, 7, 60, 9, 0 },
+ { 1, 22, 71, 7, 32, 2, 0 },
+ { 0, 5, 49, 7, 63, 11, 0 },
+ { 1, 19, 71, 7, 35, 2, 0 },
+ { 0, 4, 45, 7, 66, 13, 0 },
+ { 1, 16, 69, 7, 39, 3, 0 },
+ { 0, 3, 41, 7, 69, 15, 0 },
+ { 0, 14, 67, 7, 43, 4, 0 },
+ { 0, 3, 37, 7, 69, 18, 1 },
+ { 0, 12, 64, 7, 47, 5, 0 },
+ { 0, 2, 34, 7, 71, 20, 1 },
+ { 0, 10, 61, 7, 51, 6, 0 },
+ { 0, 2, 30, 7, 72, 23, 1 },
+ { 0, 8, 59, 7, 54, 7, 0 } },
+ .odd = { { 0, 15, 67, 7, 42, 4, 0 },
+ { 0, 3, 38, 7, 69, 17, 1 },
+ { 0, 12, 66, 7, 46, 4, 0 },
+ { 0, 2, 34, 7, 71, 20, 1 },
+ { 0, 10, 63, 7, 50, 5, 0 },
+ { 0, 2, 31, 7, 71, 23, 1 },
+ { 0, 9, 58, 7, 54, 7, 0 },
+ { 0, 1, 27, 7, 73, 26, 1 },
+ { 0, 7, 55, 7, 58, 8, 0 },
+ { 1, 24, 72, 7, 29, 2, 0 },
+ { 0, 6, 52, 7, 60, 10, 0 },
+ { 1, 21, 71, 7, 33, 2, 0 },
+ { 0, 5, 48, 7, 64, 11, 0 },
+ { 1, 18, 70, 7, 36, 3, 0 },
+ { 0, 4, 44, 7, 67, 13, 0 },
+ { 0, 16, 69, 7, 40, 3, 0 },
+ { 0, 3, 40, 7, 69, 16, 0 },
+ { 0, 13, 67, 7, 44, 4, 0 },
+ { 0, 3, 36, 7, 70, 18, 1 },
+ { 0, 11, 64, 7, 48, 5, 0 },
+ { 0, 2, 33, 7, 71, 21, 1 },
+ { 0, 10, 60, 7, 52, 6, 0 },
+ { 0, 2, 29, 7, 72, 24, 1 },
+ { 0, 8, 58, 7, 55, 7, 0 },
+ { 1, 26, 73, 7, 27, 1, 0 },
+ { 0, 7, 54, 7, 58, 9, 0 },
+ { 1, 23, 71, 7, 31, 2, 0 },
+ { 0, 5, 50, 7, 63, 10, 0 },
+ { 1, 20, 71, 7, 34, 2, 0 },
+ { 0, 4, 46, 7, 66, 12, 0 },
+ { 1, 17, 69, 7, 38, 3, 0 },
+ { 0, 4, 42, 7, 67, 15, 0 } } },
+ .ptrn_arr = { { 0x90924249, 0x49092424, 0x84921248, 0x49092124,
+ 0x24909242, 0x48492124, 0x24849212 } },
+ .sample_patrn_length = 226,
+ .hor_ds_en = 1,
+ .ver_ds_en = 1
+}, {
+ /* Scale factor 32 / (32 + 82) = 0.280702 */
+ .hor_phase_arr = {
+ .even = { { 1, 27, 72, 7, 27, 1, 0 },
+ { 0, 6, 52, 7, 61, 9, 0 },
+ { 1, 21, 70, 7, 34, 2, 0 },
+ { 0, 4, 45, 7, 66, 13, 0 },
+ { 0, 15, 68, 7, 41, 4, 0 },
+ { 0, 3, 37, 7, 69, 18, 1 },
+ { 0, 11, 63, 7, 49, 5, 0 },
+ { 0, 2, 30, 7, 71, 24, 1 },
+ { 0, 8, 56, 7, 56, 8, 0 },
+ { 1, 24, 71, 7, 30, 2, 0 },
+ { 0, 5, 49, 7, 63, 11, 0 },
+ { 1, 18, 69, 7, 37, 3, 0 },
+ { 0, 4, 41, 7, 68, 15, 0 },
+ { 0, 13, 66, 7, 45, 4, 0 },
+ { 0, 2, 34, 7, 70, 21, 1 },
+ { 0, 9, 61, 7, 52, 6, 0 } },
+ .odd = { { 0, 14, 67, 7, 43, 4, 0 },
+ { 0, 3, 36, 7, 69, 19, 1 },
+ { 0, 10, 61, 7, 51, 6, 0 },
+ { 0, 2, 28, 7, 72, 25, 1 },
+ { 0, 7, 54, 7, 58, 9, 0 },
+ { 1, 22, 71, 7, 32, 2, 0 },
+ { 0, 5, 47, 7, 64, 12, 0 },
+ { 1, 17, 68, 7, 39, 3, 0 },
+ { 0, 3, 39, 7, 68, 17, 1 },
+ { 0, 12, 64, 7, 47, 5, 0 },
+ { 0, 2, 32, 7, 71, 22, 1 },
+ { 0, 9, 58, 7, 54, 7, 0 },
+ { 1, 25, 72, 7, 28, 2, 0 },
+ { 0, 6, 51, 7, 61, 10, 0 },
+ { 1, 19, 69, 7, 36, 3, 0 },
+ { 0, 4, 43, 7, 67, 14, 0 } } },
+ .ver_phase_arr = {
+ .even = { { 1, 27, 72, 7, 27, 1, 0 },
+ { 0, 6, 52, 7, 61, 9, 0 },
+ { 1, 21, 70, 7, 34, 2, 0 },
+ { 0, 4, 45, 7, 66, 13, 0 },
+ { 0, 15, 68, 7, 41, 4, 0 },
+ { 0, 3, 37, 7, 69, 18, 1 },
+ { 0, 11, 63, 7, 49, 5, 0 },
+ { 0, 2, 30, 7, 71, 24, 1 },
+ { 0, 8, 56, 7, 56, 8, 0 },
+ { 1, 24, 71, 7, 30, 2, 0 },
+ { 0, 5, 49, 7, 63, 11, 0 },
+ { 1, 18, 69, 7, 37, 3, 0 },
+ { 0, 4, 41, 7, 68, 15, 0 },
+ { 0, 13, 66, 7, 45, 4, 0 },
+ { 0, 2, 34, 7, 70, 21, 1 },
+ { 0, 9, 61, 7, 52, 6, 0 } },
+ .odd = { { 0, 14, 67, 7, 43, 4, 0 },
+ { 0, 3, 36, 7, 69, 19, 1 },
+ { 0, 10, 61, 7, 51, 6, 0 },
+ { 0, 2, 28, 7, 72, 25, 1 },
+ { 0, 7, 54, 7, 58, 9, 0 },
+ { 1, 22, 71, 7, 32, 2, 0 },
+ { 0, 5, 47, 7, 64, 12, 0 },
+ { 1, 17, 68, 7, 39, 3, 0 },
+ { 0, 3, 39, 7, 68, 17, 1 },
+ { 0, 12, 64, 7, 47, 5, 0 },
+ { 0, 2, 32, 7, 71, 22, 1 },
+ { 0, 9, 58, 7, 54, 7, 0 },
+ { 1, 25, 72, 7, 28, 2, 0 },
+ { 0, 6, 51, 7, 61, 10, 0 },
+ { 1, 19, 69, 7, 36, 3, 0 },
+ { 0, 4, 43, 7, 67, 14, 0 } } },
+ .ptrn_arr = { { 0x90924249, 0x9212484, 0x92124249, 0x2484 } },
+ .sample_patrn_length = 114,
+ .hor_ds_en = 1,
+ .ver_ds_en = 1
+}, {
+ /* Scale factor 32 / (32 + 83) = 0.278261 */
+ .hor_phase_arr = {
+ .even = { { 1, 27, 72, 7, 27, 1, 0 },
+ { 0, 6, 51, 7, 61, 10, 0 },
+ { 1, 18, 68, 7, 38, 3, 0 },
+ { 0, 3, 39, 7, 68, 17, 1 },
+ { 0, 11, 62, 7, 49, 6, 0 },
+ { 0, 2, 29, 7, 71, 25, 1 },
+ { 0, 7, 52, 7, 59, 10, 0 },
+ { 1, 19, 69, 7, 36, 3, 0 },
+ { 0, 4, 41, 7, 66, 16, 1 },
+ { 0, 12, 64, 7, 47, 5, 0 },
+ { 0, 2, 30, 7, 71, 24, 1 },
+ { 0, 7, 54, 7, 58, 9, 0 },
+ { 1, 21, 70, 7, 34, 2, 0 },
+ { 0, 4, 43, 7, 66, 15, 0 },
+ { 0, 13, 65, 7, 45, 5, 0 },
+ { 0, 2, 32, 7, 71, 22, 1 },
+ { 0, 8, 56, 7, 56, 8, 0 },
+ { 1, 22, 71, 7, 32, 2, 0 },
+ { 0, 5, 45, 7, 65, 13, 0 },
+ { 0, 15, 66, 7, 43, 4, 0 },
+ { 0, 2, 34, 7, 70, 21, 1 },
+ { 0, 9, 58, 7, 54, 7, 0 },
+ { 1, 24, 71, 7, 30, 2, 0 },
+ { 0, 5, 47, 7, 64, 12, 0 },
+ { 1, 16, 66, 7, 41, 4, 0 },
+ { 0, 3, 36, 7, 69, 19, 1 },
+ { 0, 10, 59, 7, 52, 7, 0 },
+ { 1, 25, 71, 7, 29, 2, 0 },
+ { 0, 6, 49, 7, 62, 11, 0 },
+ { 1, 17, 68, 7, 39, 3, 0 },
+ { 0, 3, 38, 7, 68, 18, 1 },
+ { 0, 10, 61, 7, 51, 6, 0 } },
+ .odd = { { 0, 14, 66, 7, 44, 4, 0 },
+ { 0, 2, 33, 7, 70, 22, 1 },
+ { 0, 8, 57, 7, 55, 8, 0 },
+ { 1, 23, 71, 7, 31, 2, 0 },
+ { 0, 5, 46, 7, 64, 13, 0 },
+ { 0, 15, 67, 7, 42, 4, 0 },
+ { 0, 3, 35, 7, 69, 20, 1 },
+ { 0, 9, 59, 7, 53, 7, 0 },
+ { 1, 25, 71, 7, 29, 2, 0 },
+ { 0, 5, 48, 7, 63, 12, 0 },
+ { 1, 16, 68, 7, 40, 3, 0 },
+ { 0, 3, 37, 7, 68, 19, 1 },
+ { 0, 10, 61, 7, 51, 6, 0 },
+ { 1, 26, 71, 7, 28, 2, 0 },
+ { 0, 6, 50, 7, 61, 11, 0 },
+ { 1, 18, 68, 7, 38, 3, 0 },
+ { 0, 3, 38, 7, 68, 18, 1 },
+ { 0, 11, 61, 7, 50, 6, 0 },
+ { 0, 2, 28, 7, 71, 26, 1 },
+ { 0, 6, 51, 7, 61, 10, 0 },
+ { 1, 19, 68, 7, 37, 3, 0 },
+ { 0, 3, 40, 7, 68, 16, 1 },
+ { 0, 12, 63, 7, 48, 5, 0 },
+ { 0, 2, 29, 7, 71, 25, 1 },
+ { 0, 7, 53, 7, 59, 9, 0 },
+ { 1, 20, 69, 7, 35, 3, 0 },
+ { 0, 4, 42, 7, 67, 15, 0 },
+ { 0, 13, 64, 7, 46, 5, 0 },
+ { 0, 2, 31, 7, 71, 23, 1 },
+ { 0, 8, 55, 7, 57, 8, 0 },
+ { 1, 22, 70, 7, 33, 2, 0 },
+ { 0, 4, 44, 7, 66, 14, 0 } } },
+ .ver_phase_arr = {
+ .even = { { 1, 27, 72, 7, 27, 1, 0 },
+ { 0, 6, 51, 7, 61, 10, 0 },
+ { 1, 18, 68, 7, 38, 3, 0 },
+ { 0, 3, 39, 7, 68, 17, 1 },
+ { 0, 11, 62, 7, 49, 6, 0 },
+ { 0, 2, 29, 7, 71, 25, 1 },
+ { 0, 7, 52, 7, 59, 10, 0 },
+ { 1, 19, 69, 7, 36, 3, 0 },
+ { 0, 4, 41, 7, 66, 16, 1 },
+ { 0, 12, 64, 7, 47, 5, 0 },
+ { 0, 2, 30, 7, 71, 24, 1 },
+ { 0, 7, 54, 7, 58, 9, 0 },
+ { 1, 21, 70, 7, 34, 2, 0 },
+ { 0, 4, 43, 7, 66, 15, 0 },
+ { 0, 13, 65, 7, 45, 5, 0 },
+ { 0, 2, 32, 7, 71, 22, 1 },
+ { 0, 8, 56, 7, 56, 8, 0 },
+ { 1, 22, 71, 7, 32, 2, 0 },
+ { 0, 5, 45, 7, 65, 13, 0 },
+ { 0, 15, 66, 7, 43, 4, 0 },
+ { 0, 2, 34, 7, 70, 21, 1 },
+ { 0, 9, 58, 7, 54, 7, 0 },
+ { 1, 24, 71, 7, 30, 2, 0 },
+ { 0, 5, 47, 7, 64, 12, 0 },
+ { 1, 16, 66, 7, 41, 4, 0 },
+ { 0, 3, 36, 7, 69, 19, 1 },
+ { 0, 10, 59, 7, 52, 7, 0 },
+ { 1, 25, 71, 7, 29, 2, 0 },
+ { 0, 6, 49, 7, 62, 11, 0 },
+ { 1, 17, 68, 7, 39, 3, 0 },
+ { 0, 3, 38, 7, 68, 18, 1 },
+ { 0, 10, 61, 7, 51, 6, 0 } },
+ .odd = { { 0, 14, 66, 7, 44, 4, 0 },
+ { 0, 2, 33, 7, 70, 22, 1 },
+ { 0, 8, 57, 7, 55, 8, 0 },
+ { 1, 23, 71, 7, 31, 2, 0 },
+ { 0, 5, 46, 7, 64, 13, 0 },
+ { 0, 15, 67, 7, 42, 4, 0 },
+ { 0, 3, 35, 7, 69, 20, 1 },
+ { 0, 9, 59, 7, 53, 7, 0 },
+ { 1, 25, 71, 7, 29, 2, 0 },
+ { 0, 5, 48, 7, 63, 12, 0 },
+ { 1, 16, 68, 7, 40, 3, 0 },
+ { 0, 3, 37, 7, 68, 19, 1 },
+ { 0, 10, 61, 7, 51, 6, 0 },
+ { 1, 26, 71, 7, 28, 2, 0 },
+ { 0, 6, 50, 7, 61, 11, 0 },
+ { 1, 18, 68, 7, 38, 3, 0 },
+ { 0, 3, 38, 7, 68, 18, 1 },
+ { 0, 11, 61, 7, 50, 6, 0 },
+ { 0, 2, 28, 7, 71, 26, 1 },
+ { 0, 6, 51, 7, 61, 10, 0 },
+ { 1, 19, 68, 7, 37, 3, 0 },
+ { 0, 3, 40, 7, 68, 16, 1 },
+ { 0, 12, 63, 7, 48, 5, 0 },
+ { 0, 2, 29, 7, 71, 25, 1 },
+ { 0, 7, 53, 7, 59, 9, 0 },
+ { 1, 20, 69, 7, 35, 3, 0 },
+ { 0, 4, 42, 7, 67, 15, 0 },
+ { 0, 13, 64, 7, 46, 5, 0 },
+ { 0, 2, 31, 7, 71, 23, 1 },
+ { 0, 8, 55, 7, 57, 8, 0 },
+ { 1, 22, 70, 7, 33, 2, 0 },
+ { 0, 4, 44, 7, 66, 14, 0 } } },
+ .ptrn_arr = { { 0x92124249, 0x21242484, 0x12424849, 0x24248492,
+ 0x42484909, 0x24849092, 0x48490924, 0x2 } },
+ .sample_patrn_length = 230,
+ .hor_ds_en = 1,
+ .ver_ds_en = 1
+}, {
+ /* Scale factor 32 / (32 + 84) = 0.275862 */
+ .hor_phase_arr = {
+ .even = { { 2, 27, 70, 7, 27, 2, 0 },
+ { 0, 6, 49, 7, 61, 12, 0 },
+ { 1, 16, 66, 7, 41, 4, 0 },
+ { 0, 2, 34, 7, 70, 21, 1 },
+ { 0, 8, 56, 7, 56, 8, 0 },
+ { 1, 21, 70, 7, 34, 2, 0 },
+ { 0, 4, 41, 7, 66, 16, 1 },
+ { 0, 12, 61, 7, 49, 6, 0 } },
+ .odd = { { 0, 14, 64, 7, 45, 5, 0 },
+ { 0, 2, 31, 7, 70, 24, 1 },
+ { 0, 7, 52, 7, 59, 10, 0 },
+ { 1, 18, 68, 7, 38, 3, 0 },
+ { 0, 3, 38, 7, 68, 18, 1 },
+ { 0, 10, 59, 7, 52, 7, 0 },
+ { 1, 24, 70, 7, 31, 2, 0 },
+ { 0, 5, 45, 7, 64, 14, 0 } } },
+ .ver_phase_arr = {
+ .even = { { 2, 27, 70, 7, 27, 2, 0 },
+ { 0, 6, 49, 7, 61, 12, 0 },
+ { 1, 16, 66, 7, 41, 4, 0 },
+ { 0, 2, 34, 7, 70, 21, 1 },
+ { 0, 8, 56, 7, 56, 8, 0 },
+ { 1, 21, 70, 7, 34, 2, 0 },
+ { 0, 4, 41, 7, 66, 16, 1 },
+ { 0, 12, 61, 7, 49, 6, 0 } },
+ .odd = { { 0, 14, 64, 7, 45, 5, 0 },
+ { 0, 2, 31, 7, 70, 24, 1 },
+ { 0, 7, 52, 7, 59, 10, 0 },
+ { 1, 18, 68, 7, 38, 3, 0 },
+ { 0, 3, 38, 7, 68, 18, 1 },
+ { 0, 10, 59, 7, 52, 7, 0 },
+ { 1, 24, 70, 7, 31, 2, 0 },
+ { 0, 5, 45, 7, 64, 14, 0 } } },
+ .ptrn_arr = { { 0x92124249, 0x248490 } },
+ .sample_patrn_length = 58,
+ .hor_ds_en = 1,
+ .ver_ds_en = 1
+}, {
+ /* Scale factor 32 / (32 + 85) = 0.273504 */
+ .hor_phase_arr = {
+ .even = { { 2, 27, 70, 7, 27, 2, 0 },
+ { 0, 5, 47, 7, 63, 13, 0 },
+ { 0, 14, 64, 7, 45, 5, 0 },
+ { 0, 2, 29, 7, 70, 26, 1 },
+ { 0, 6, 48, 7, 62, 12, 0 },
+ { 1, 15, 65, 7, 43, 4, 0 },
+ { 0, 2, 31, 7, 70, 24, 1 },
+ { 0, 6, 50, 7, 61, 11, 0 },
+ { 1, 16, 66, 7, 41, 4, 0 },
+ { 0, 2, 32, 7, 70, 23, 1 },
+ { 0, 7, 52, 7, 59, 10, 0 },
+ { 1, 17, 67, 7, 39, 4, 0 },
+ { 0, 3, 34, 7, 69, 21, 1 },
+ { 0, 8, 54, 7, 57, 9, 0 },
+ { 1, 19, 67, 7, 38, 3, 0 },
+ { 0, 3, 36, 7, 68, 20, 1 },
+ { 0, 9, 55, 7, 55, 9, 0 },
+ { 1, 20, 68, 7, 36, 3, 0 },
+ { 0, 3, 38, 7, 67, 19, 1 },
+ { 0, 9, 57, 7, 54, 8, 0 },
+ { 1, 21, 69, 7, 34, 3, 0 },
+ { 0, 4, 39, 7, 67, 17, 1 },
+ { 0, 10, 59, 7, 52, 7, 0 },
+ { 1, 23, 70, 7, 32, 2, 0 },
+ { 0, 4, 41, 7, 66, 16, 1 },
+ { 0, 11, 61, 7, 50, 6, 0 },
+ { 1, 24, 70, 7, 31, 2, 0 },
+ { 0, 4, 43, 7, 65, 15, 1 },
+ { 0, 12, 62, 7, 48, 6, 0 },
+ { 1, 26, 70, 7, 29, 2, 0 },
+ { 0, 5, 45, 7, 64, 14, 0 },
+ { 0, 13, 63, 7, 47, 5, 0 } },
+ .odd = { { 0, 13, 64, 7, 46, 5, 0 },
+ { 0, 2, 28, 7, 69, 27, 2 },
+ { 0, 6, 48, 7, 62, 12, 0 },
+ { 1, 14, 64, 7, 44, 5, 0 },
+ { 0, 2, 30, 7, 70, 25, 1 },
+ { 0, 6, 49, 7, 62, 11, 0 },
+ { 1, 16, 65, 7, 42, 4, 0 },
+ { 0, 2, 32, 7, 69, 24, 1 },
+ { 0, 7, 51, 7, 59, 11, 0 },
+ { 1, 17, 66, 7, 40, 4, 0 },
+ { 0, 2, 33, 7, 70, 22, 1 },
+ { 0, 7, 53, 7, 58, 10, 0 },
+ { 1, 18, 67, 7, 39, 3, 0 },
+ { 0, 3, 35, 7, 68, 21, 1 },
+ { 0, 8, 54, 7, 57, 9, 0 },
+ { 1, 19, 68, 7, 37, 3, 0 },
+ { 0, 3, 37, 7, 68, 19, 1 },
+ { 0, 9, 57, 7, 54, 8, 0 },
+ { 1, 21, 68, 7, 35, 3, 0 },
+ { 0, 3, 39, 7, 67, 18, 1 },
+ { 0, 10, 58, 7, 53, 7, 0 },
+ { 1, 22, 70, 7, 33, 2, 0 },
+ { 0, 4, 40, 7, 66, 17, 1 },
+ { 0, 11, 59, 7, 51, 7, 0 },
+ { 1, 24, 69, 7, 32, 2, 0 },
+ { 0, 4, 42, 7, 65, 16, 1 },
+ { 0, 11, 62, 7, 49, 6, 0 },
+ { 1, 25, 70, 7, 30, 2, 0 },
+ { 0, 5, 44, 7, 64, 14, 1 },
+ { 0, 12, 62, 7, 48, 6, 0 },
+ { 2, 27, 69, 7, 28, 2, 0 },
+ { 0, 5, 46, 7, 64, 13, 0 } } },
+ .ver_phase_arr = {
+ .even = { { 2, 27, 70, 7, 27, 2, 0 },
+ { 0, 5, 47, 7, 63, 13, 0 },
+ { 0, 14, 64, 7, 45, 5, 0 },
+ { 0, 2, 29, 7, 70, 26, 1 },
+ { 0, 6, 48, 7, 62, 12, 0 },
+ { 1, 15, 65, 7, 43, 4, 0 },
+ { 0, 2, 31, 7, 70, 24, 1 },
+ { 0, 6, 50, 7, 61, 11, 0 },
+ { 1, 16, 66, 7, 41, 4, 0 },
+ { 0, 2, 32, 7, 70, 23, 1 },
+ { 0, 7, 52, 7, 59, 10, 0 },
+ { 1, 17, 67, 7, 39, 4, 0 },
+ { 0, 3, 34, 7, 69, 21, 1 },
+ { 0, 8, 54, 7, 57, 9, 0 },
+ { 1, 19, 67, 7, 38, 3, 0 },
+ { 0, 3, 36, 7, 68, 20, 1 },
+ { 0, 9, 55, 7, 55, 9, 0 },
+ { 1, 20, 68, 7, 36, 3, 0 },
+ { 0, 3, 38, 7, 67, 19, 1 },
+ { 0, 9, 57, 7, 54, 8, 0 },
+ { 1, 21, 69, 7, 34, 3, 0 },
+ { 0, 4, 39, 7, 67, 17, 1 },
+ { 0, 10, 59, 7, 52, 7, 0 },
+ { 1, 23, 70, 7, 32, 2, 0 },
+ { 0, 4, 41, 7, 66, 16, 1 },
+ { 0, 11, 61, 7, 50, 6, 0 },
+ { 1, 24, 70, 7, 31, 2, 0 },
+ { 0, 4, 43, 7, 65, 15, 1 },
+ { 0, 12, 62, 7, 48, 6, 0 },
+ { 1, 26, 70, 7, 29, 2, 0 },
+ { 0, 5, 45, 7, 64, 14, 0 },
+ { 0, 13, 63, 7, 47, 5, 0 } },
+ .odd = { { 0, 13, 64, 7, 46, 5, 0 },
+ { 0, 2, 28, 7, 69, 27, 2 },
+ { 0, 6, 48, 7, 62, 12, 0 },
+ { 1, 14, 64, 7, 44, 5, 0 },
+ { 0, 2, 30, 7, 70, 25, 1 },
+ { 0, 6, 49, 7, 62, 11, 0 },
+ { 1, 16, 65, 7, 42, 4, 0 },
+ { 0, 2, 32, 7, 69, 24, 1 },
+ { 0, 7, 51, 7, 59, 11, 0 },
+ { 1, 17, 66, 7, 40, 4, 0 },
+ { 0, 2, 33, 7, 70, 22, 1 },
+ { 0, 7, 53, 7, 58, 10, 0 },
+ { 1, 18, 67, 7, 39, 3, 0 },
+ { 0, 3, 35, 7, 68, 21, 1 },
+ { 0, 8, 54, 7, 57, 9, 0 },
+ { 1, 19, 68, 7, 37, 3, 0 },
+ { 0, 3, 37, 7, 68, 19, 1 },
+ { 0, 9, 57, 7, 54, 8, 0 },
+ { 1, 21, 68, 7, 35, 3, 0 },
+ { 0, 3, 39, 7, 67, 18, 1 },
+ { 0, 10, 58, 7, 53, 7, 0 },
+ { 1, 22, 70, 7, 33, 2, 0 },
+ { 0, 4, 40, 7, 66, 17, 1 },
+ { 0, 11, 59, 7, 51, 7, 0 },
+ { 1, 24, 69, 7, 32, 2, 0 },
+ { 0, 4, 42, 7, 65, 16, 1 },
+ { 0, 11, 62, 7, 49, 6, 0 },
+ { 1, 25, 70, 7, 30, 2, 0 },
+ { 0, 5, 44, 7, 64, 14, 1 },
+ { 0, 12, 62, 7, 48, 6, 0 },
+ { 2, 27, 69, 7, 28, 2, 0 },
+ { 0, 5, 46, 7, 64, 13, 0 } } },
+ .ptrn_arr = { { 0x92124249, 0x24248490, 0x48490921, 0x90921242,
+ 0x21242484, 0x42484909, 0x84909212, 0x24 } },
+ .sample_patrn_length = 234,
+ .hor_ds_en = 1,
+ .ver_ds_en = 1
+}, {
+ /* Scale factor 32 / (32 + 86) = 0.271186 */
+ .hor_phase_arr = {
+ .even = { { 2, 28, 68, 7, 28, 2, 0 },
+ { 0, 5, 45, 7, 63, 14, 1 },
+ { 0, 12, 62, 7, 48, 6, 0 },
+ { 1, 25, 69, 7, 31, 2, 0 },
+ { 0, 4, 41, 7, 66, 16, 1 },
+ { 0, 10, 59, 7, 52, 7, 0 },
+ { 1, 22, 68, 7, 34, 3, 0 },
+ { 0, 3, 38, 7, 67, 19, 1 },
+ { 0, 9, 55, 7, 55, 9, 0 },
+ { 1, 19, 67, 7, 38, 3, 0 },
+ { 0, 3, 34, 7, 68, 22, 1 },
+ { 0, 7, 52, 7, 59, 10, 0 },
+ { 1, 16, 66, 7, 41, 4, 0 },
+ { 0, 2, 31, 7, 69, 25, 1 },
+ { 0, 6, 48, 7, 62, 12, 0 },
+ { 1, 14, 63, 7, 45, 5, 0 } },
+ .odd = { { 0, 13, 62, 7, 47, 6, 0 },
+ { 2, 26, 69, 7, 29, 2, 0 },
+ { 0, 5, 43, 7, 64, 15, 1 },
+ { 0, 11, 60, 7, 50, 7, 0 },
+ { 1, 23, 69, 7, 33, 2, 0 },
+ { 0, 4, 40, 7, 65, 18, 1 },
+ { 0, 10, 57, 7, 53, 8, 0 },
+ { 1, 20, 68, 7, 36, 3, 0 },
+ { 0, 3, 36, 7, 68, 20, 1 },
+ { 0, 8, 53, 7, 57, 10, 0 },
+ { 1, 18, 65, 7, 40, 4, 0 },
+ { 0, 2, 33, 7, 69, 23, 1 },
+ { 0, 7, 50, 7, 60, 11, 0 },
+ { 1, 15, 64, 7, 43, 5, 0 },
+ { 0, 2, 29, 7, 69, 26, 2 },
+ { 0, 6, 47, 7, 62, 13, 0 } } },
+ .ver_phase_arr = {
+ .even = { { 2, 28, 68, 7, 28, 2, 0 },
+ { 0, 5, 45, 7, 63, 14, 1 },
+ { 0, 12, 62, 7, 48, 6, 0 },
+ { 1, 25, 69, 7, 31, 2, 0 },
+ { 0, 4, 41, 7, 66, 16, 1 },
+ { 0, 10, 59, 7, 52, 7, 0 },
+ { 1, 22, 68, 7, 34, 3, 0 },
+ { 0, 3, 38, 7, 67, 19, 1 },
+ { 0, 9, 55, 7, 55, 9, 0 },
+ { 1, 19, 67, 7, 38, 3, 0 },
+ { 0, 3, 34, 7, 68, 22, 1 },
+ { 0, 7, 52, 7, 59, 10, 0 },
+ { 1, 16, 66, 7, 41, 4, 0 },
+ { 0, 2, 31, 7, 69, 25, 1 },
+ { 0, 6, 48, 7, 62, 12, 0 },
+ { 1, 14, 63, 7, 45, 5, 0 } },
+ .odd = { { 0, 13, 62, 7, 47, 6, 0 },
+ { 2, 26, 69, 7, 29, 2, 0 },
+ { 0, 5, 43, 7, 64, 15, 1 },
+ { 0, 11, 60, 7, 50, 7, 0 },
+ { 1, 23, 69, 7, 33, 2, 0 },
+ { 0, 4, 40, 7, 65, 18, 1 },
+ { 0, 10, 57, 7, 53, 8, 0 },
+ { 1, 20, 68, 7, 36, 3, 0 },
+ { 0, 3, 36, 7, 68, 20, 1 },
+ { 0, 8, 53, 7, 57, 10, 0 },
+ { 1, 18, 65, 7, 40, 4, 0 },
+ { 0, 2, 33, 7, 69, 23, 1 },
+ { 0, 7, 50, 7, 60, 11, 0 },
+ { 1, 15, 64, 7, 43, 5, 0 },
+ { 0, 2, 29, 7, 69, 26, 2 },
+ { 0, 6, 47, 7, 62, 13, 0 } } },
+ .ptrn_arr = { { 0x12424849, 0x24849092, 0x49092124, 0x24248 } },
+ .sample_patrn_length = 118,
+ .hor_ds_en = 1,
+ .ver_ds_en = 1
+}, {
+ /* Scale factor 32 / (32 + 87) = 0.268908 */
+ .hor_phase_arr = {
+ .even = { { 2, 28, 68, 7, 28, 2, 0 },
+ { 0, 5, 43, 7, 63, 16, 1 },
+ { 0, 11, 57, 7, 52, 8, 0 },
+ { 1, 21, 67, 7, 36, 3, 0 },
+ { 0, 3, 34, 7, 68, 22, 1 },
+ { 0, 7, 50, 7, 60, 11, 0 },
+ { 1, 14, 63, 7, 45, 5, 0 },
+ { 2, 26, 69, 7, 29, 2, 0 },
+ { 0, 4, 41, 7, 65, 17, 1 },
+ { 0, 10, 57, 7, 53, 8, 0 },
+ { 1, 19, 66, 7, 38, 4, 0 },
+ { 0, 3, 33, 7, 68, 23, 1 },
+ { 0, 6, 48, 7, 62, 12, 0 },
+ { 0, 13, 62, 7, 47, 6, 0 },
+ { 1, 25, 69, 7, 31, 2, 0 },
+ { 0, 4, 40, 7, 65, 18, 1 },
+ { 0, 9, 55, 7, 55, 9, 0 },
+ { 1, 18, 65, 7, 40, 4, 0 },
+ { 0, 2, 31, 7, 69, 25, 1 },
+ { 0, 6, 47, 7, 62, 13, 0 },
+ { 0, 12, 62, 7, 48, 6, 0 },
+ { 1, 23, 68, 7, 33, 3, 0 },
+ { 0, 4, 38, 7, 66, 19, 1 },
+ { 0, 8, 53, 7, 57, 10, 0 },
+ { 1, 17, 65, 7, 41, 4, 0 },
+ { 0, 2, 29, 7, 69, 26, 2 },
+ { 0, 5, 45, 7, 63, 14, 1 },
+ { 0, 11, 60, 7, 50, 7, 0 },
+ { 1, 22, 68, 7, 34, 3, 0 },
+ { 0, 3, 36, 7, 67, 21, 1 },
+ { 0, 8, 52, 7, 57, 11, 0 },
+ { 1, 16, 63, 7, 43, 5, 0 } },
+ .odd = { { 0, 13, 62, 7, 47, 6, 0 },
+ { 1, 24, 69, 7, 32, 2, 0 },
+ { 0, 4, 39, 7, 65, 19, 1 },
+ { 0, 9, 54, 7, 56, 9, 0 },
+ { 1, 17, 66, 7, 40, 4, 0 },
+ { 0, 2, 30, 7, 69, 25, 2 },
+ { 0, 5, 46, 7, 62, 14, 1 },
+ { 0, 12, 60, 7, 49, 7, 0 },
+ { 1, 23, 67, 7, 34, 3, 0 },
+ { 0, 3, 37, 7, 67, 20, 1 },
+ { 0, 8, 52, 7, 58, 10, 0 },
+ { 1, 16, 64, 7, 42, 5, 0 },
+ { 0, 2, 29, 7, 68, 27, 2 },
+ { 0, 5, 44, 7, 63, 15, 1 },
+ { 0, 11, 59, 7, 51, 7, 0 },
+ { 1, 21, 68, 7, 35, 3, 0 },
+ { 0, 3, 35, 7, 68, 21, 1 },
+ { 0, 7, 51, 7, 59, 11, 0 },
+ { 1, 15, 63, 7, 44, 5, 0 },
+ { 2, 27, 68, 7, 29, 2, 0 },
+ { 0, 5, 42, 7, 64, 16, 1 },
+ { 0, 10, 58, 7, 52, 8, 0 },
+ { 1, 20, 67, 7, 37, 3, 0 },
+ { 0, 3, 34, 7, 67, 23, 1 },
+ { 0, 7, 49, 7, 60, 12, 0 },
+ { 1, 14, 62, 7, 46, 5, 0 },
+ { 2, 25, 69, 7, 30, 2, 0 },
+ { 0, 4, 40, 7, 66, 17, 1 },
+ { 0, 9, 56, 7, 54, 9, 0 },
+ { 1, 19, 65, 7, 39, 4, 0 },
+ { 0, 2, 32, 7, 69, 24, 1 },
+ { 0, 6, 47, 7, 62, 13, 0 } } },
+ .ver_phase_arr = {
+ .even = { { 2, 28, 68, 7, 28, 2, 0 },
+ { 0, 5, 43, 7, 63, 16, 1 },
+ { 0, 11, 57, 7, 52, 8, 0 },
+ { 1, 21, 67, 7, 36, 3, 0 },
+ { 0, 3, 34, 7, 68, 22, 1 },
+ { 0, 7, 50, 7, 60, 11, 0 },
+ { 1, 14, 63, 7, 45, 5, 0 },
+ { 2, 26, 69, 7, 29, 2, 0 },
+ { 0, 4, 41, 7, 65, 17, 1 },
+ { 0, 10, 57, 7, 53, 8, 0 },
+ { 1, 19, 66, 7, 38, 4, 0 },
+ { 0, 3, 33, 7, 68, 23, 1 },
+ { 0, 6, 48, 7, 62, 12, 0 },
+ { 0, 13, 62, 7, 47, 6, 0 },
+ { 1, 25, 69, 7, 31, 2, 0 },
+ { 0, 4, 40, 7, 65, 18, 1 },
+ { 0, 9, 55, 7, 55, 9, 0 },
+ { 1, 18, 65, 7, 40, 4, 0 },
+ { 0, 2, 31, 7, 69, 25, 1 },
+ { 0, 6, 47, 7, 62, 13, 0 },
+ { 0, 12, 62, 7, 48, 6, 0 },
+ { 1, 23, 68, 7, 33, 3, 0 },
+ { 0, 4, 38, 7, 66, 19, 1 },
+ { 0, 8, 53, 7, 57, 10, 0 },
+ { 1, 17, 65, 7, 41, 4, 0 },
+ { 0, 2, 29, 7, 69, 26, 2 },
+ { 0, 5, 45, 7, 63, 14, 1 },
+ { 0, 11, 60, 7, 50, 7, 0 },
+ { 1, 22, 68, 7, 34, 3, 0 },
+ { 0, 3, 36, 7, 67, 21, 1 },
+ { 0, 8, 52, 7, 57, 11, 0 },
+ { 1, 16, 63, 7, 43, 5, 0 } },
+ .odd = { { 0, 13, 62, 7, 47, 6, 0 },
+ { 1, 24, 69, 7, 32, 2, 0 },
+ { 0, 4, 39, 7, 65, 19, 1 },
+ { 0, 9, 54, 7, 56, 9, 0 },
+ { 1, 17, 66, 7, 40, 4, 0 },
+ { 0, 2, 30, 7, 69, 25, 2 },
+ { 0, 5, 46, 7, 62, 14, 1 },
+ { 0, 12, 60, 7, 49, 7, 0 },
+ { 1, 23, 67, 7, 34, 3, 0 },
+ { 0, 3, 37, 7, 67, 20, 1 },
+ { 0, 8, 52, 7, 58, 10, 0 },
+ { 1, 16, 64, 7, 42, 5, 0 },
+ { 0, 2, 29, 7, 68, 27, 2 },
+ { 0, 5, 44, 7, 63, 15, 1 },
+ { 0, 11, 59, 7, 51, 7, 0 },
+ { 1, 21, 68, 7, 35, 3, 0 },
+ { 0, 3, 35, 7, 68, 21, 1 },
+ { 0, 7, 51, 7, 59, 11, 0 },
+ { 1, 15, 63, 7, 44, 5, 0 },
+ { 2, 27, 68, 7, 29, 2, 0 },
+ { 0, 5, 42, 7, 64, 16, 1 },
+ { 0, 10, 58, 7, 52, 8, 0 },
+ { 1, 20, 67, 7, 37, 3, 0 },
+ { 0, 3, 34, 7, 67, 23, 1 },
+ { 0, 7, 49, 7, 60, 12, 0 },
+ { 1, 14, 62, 7, 46, 5, 0 },
+ { 2, 25, 69, 7, 30, 2, 0 },
+ { 0, 4, 40, 7, 66, 17, 1 },
+ { 0, 9, 56, 7, 54, 9, 0 },
+ { 1, 19, 65, 7, 39, 4, 0 },
+ { 0, 2, 32, 7, 69, 24, 1 },
+ { 0, 6, 47, 7, 62, 13, 0 } } },
+ .ptrn_arr = { { 0x12424849, 0x84909092, 0x9212424, 0x42484909,
+ 0x90921212, 0x21242484, 0x48490921, 0x242 } },
+ .sample_patrn_length = 238,
+ .hor_ds_en = 1,
+ .ver_ds_en = 1
+}, {
+ /* Scale factor 32 / (32 + 88) = 0.266667 */
+ .hor_phase_arr = {
+ .even = { { 2, 28, 68, 7, 28, 2, 0 },
+ { 0, 4, 41, 7, 65, 17, 1 },
+ { 0, 9, 55, 7, 55, 9, 0 },
+ { 1, 17, 65, 7, 41, 4, 0 } },
+ .odd = { { 0, 13, 60, 7, 48, 7, 0 },
+ { 1, 22, 68, 7, 34, 3, 0 },
+ { 0, 3, 34, 7, 68, 22, 1 },
+ { 0, 7, 48, 7, 60, 13, 0 } } },
+ .ver_phase_arr = {
+ .even = { { 2, 28, 68, 7, 28, 2, 0 },
+ { 0, 4, 41, 7, 65, 17, 1 },
+ { 0, 9, 55, 7, 55, 9, 0 },
+ { 1, 17, 65, 7, 41, 4, 0 } },
+ .odd = { { 0, 13, 60, 7, 48, 7, 0 },
+ { 1, 22, 68, 7, 34, 3, 0 },
+ { 0, 3, 34, 7, 68, 22, 1 },
+ { 0, 7, 48, 7, 60, 13, 0 } } },
+ .ptrn_arr = { { 0x2424849 } },
+ .sample_patrn_length = 30,
+ .hor_ds_en = 1,
+ .ver_ds_en = 1
+}, {
+ /* Scale factor 32 / (32 + 89) = 0.264463 */
+ .hor_phase_arr = {
+ .even = { { 2, 28, 68, 7, 28, 2, 0 },
+ { 0, 4, 40, 7, 65, 18, 1 },
+ { 0, 8, 51, 7, 58, 11, 0 },
+ { 1, 14, 61, 7, 46, 6, 0 },
+ { 1, 22, 67, 7, 35, 3, 0 },
+ { 0, 3, 33, 7, 67, 24, 1 },
+ { 0, 6, 45, 7, 61, 15, 1 },
+ { 0, 10, 56, 7, 53, 9, 0 },
+ { 1, 17, 64, 7, 41, 5, 0 },
+ { 2, 27, 67, 7, 30, 2, 0 },
+ { 0, 4, 38, 7, 65, 20, 1 },
+ { 0, 7, 50, 7, 59, 12, 0 },
+ { 0, 13, 60, 7, 48, 7, 0 },
+ { 1, 21, 67, 7, 36, 3, 0 },
+ { 0, 3, 31, 7, 67, 25, 2 },
+ { 0, 5, 43, 7, 63, 16, 1 },
+ { 0, 9, 56, 7, 54, 9, 0 },
+ { 1, 16, 63, 7, 43, 5, 0 },
+ { 2, 25, 67, 7, 31, 3, 0 },
+ { 0, 3, 36, 7, 67, 21, 1 },
+ { 0, 7, 48, 7, 60, 13, 0 },
+ { 0, 12, 59, 7, 50, 7, 0 },
+ { 1, 20, 65, 7, 38, 4, 0 },
+ { 0, 2, 30, 7, 67, 27, 2 },
+ { 0, 5, 41, 7, 64, 17, 1 },
+ { 0, 9, 53, 7, 56, 10, 0 },
+ { 1, 15, 61, 7, 45, 6, 0 },
+ { 1, 24, 67, 7, 33, 3, 0 },
+ { 0, 3, 35, 7, 67, 22, 1 },
+ { 0, 6, 46, 7, 61, 14, 1 },
+ { 0, 11, 58, 7, 51, 8, 0 },
+ { 1, 18, 65, 7, 40, 4, 0 } },
+ .odd = { { 0, 12, 60, 7, 49, 7, 0 },
+ { 1, 20, 66, 7, 37, 4, 0 },
+ { 0, 2, 31, 7, 67, 26, 2 },
+ { 0, 5, 42, 7, 63, 17, 1 },
+ { 0, 9, 54, 7, 55, 10, 0 },
+ { 1, 16, 62, 7, 44, 5, 0 },
+ { 2, 24, 67, 7, 32, 3, 0 },
+ { 0, 3, 35, 7, 67, 22, 1 },
+ { 0, 6, 47, 7, 61, 13, 1 },
+ { 0, 12, 58, 7, 50, 8, 0 },
+ { 1, 19, 65, 7, 39, 4, 0 },
+ { 0, 2, 29, 7, 68, 27, 2 },
+ { 0, 4, 40, 7, 65, 18, 1 },
+ { 0, 8, 52, 7, 57, 11, 0 },
+ { 1, 14, 61, 7, 46, 6, 0 },
+ { 1, 23, 67, 7, 34, 3, 0 },
+ { 0, 3, 34, 7, 67, 23, 1 },
+ { 0, 6, 46, 7, 61, 14, 1 },
+ { 0, 11, 57, 7, 52, 8, 0 },
+ { 1, 18, 65, 7, 40, 4, 0 },
+ { 2, 27, 68, 7, 29, 2, 0 },
+ { 0, 4, 39, 7, 65, 19, 1 },
+ { 0, 8, 50, 7, 58, 12, 0 },
+ { 1, 13, 61, 7, 47, 6, 0 },
+ { 1, 22, 67, 7, 35, 3, 0 },
+ { 0, 3, 32, 7, 67, 24, 2 },
+ { 0, 5, 44, 7, 62, 16, 1 },
+ { 0, 10, 55, 7, 54, 9, 0 },
+ { 1, 17, 63, 7, 42, 5, 0 },
+ { 2, 26, 67, 7, 31, 2, 0 },
+ { 0, 4, 37, 7, 66, 20, 1 },
+ { 0, 7, 49, 7, 60, 12, 0 } } },
+ .ver_phase_arr = {
+ .even = { { 2, 28, 68, 7, 28, 2, 0 },
+ { 0, 4, 40, 7, 65, 18, 1 },
+ { 0, 8, 51, 7, 58, 11, 0 },
+ { 1, 14, 61, 7, 46, 6, 0 },
+ { 1, 22, 67, 7, 35, 3, 0 },
+ { 0, 3, 33, 7, 67, 24, 1 },
+ { 0, 6, 45, 7, 61, 15, 1 },
+ { 0, 10, 56, 7, 53, 9, 0 },
+ { 1, 17, 64, 7, 41, 5, 0 },
+ { 2, 27, 67, 7, 30, 2, 0 },
+ { 0, 4, 38, 7, 65, 20, 1 },
+ { 0, 7, 50, 7, 59, 12, 0 },
+ { 0, 13, 60, 7, 48, 7, 0 },
+ { 1, 21, 67, 7, 36, 3, 0 },
+ { 0, 3, 31, 7, 67, 25, 2 },
+ { 0, 5, 43, 7, 63, 16, 1 },
+ { 0, 9, 56, 7, 54, 9, 0 },
+ { 1, 16, 63, 7, 43, 5, 0 },
+ { 2, 25, 67, 7, 31, 3, 0 },
+ { 0, 3, 36, 7, 67, 21, 1 },
+ { 0, 7, 48, 7, 60, 13, 0 },
+ { 0, 12, 59, 7, 50, 7, 0 },
+ { 1, 20, 65, 7, 38, 4, 0 },
+ { 0, 2, 30, 7, 67, 27, 2 },
+ { 0, 5, 41, 7, 64, 17, 1 },
+ { 0, 9, 53, 7, 56, 10, 0 },
+ { 1, 15, 61, 7, 45, 6, 0 },
+ { 1, 24, 67, 7, 33, 3, 0 },
+ { 0, 3, 35, 7, 67, 22, 1 },
+ { 0, 6, 46, 7, 61, 14, 1 },
+ { 0, 11, 58, 7, 51, 8, 0 },
+ { 1, 18, 65, 7, 40, 4, 0 } },
+ .odd = { { 0, 12, 60, 7, 49, 7, 0 },
+ { 1, 20, 66, 7, 37, 4, 0 },
+ { 0, 2, 31, 7, 67, 26, 2 },
+ { 0, 5, 42, 7, 63, 17, 1 },
+ { 0, 9, 54, 7, 55, 10, 0 },
+ { 1, 16, 62, 7, 44, 5, 0 },
+ { 2, 24, 67, 7, 32, 3, 0 },
+ { 0, 3, 35, 7, 67, 22, 1 },
+ { 0, 6, 47, 7, 61, 13, 1 },
+ { 0, 12, 58, 7, 50, 8, 0 },
+ { 1, 19, 65, 7, 39, 4, 0 },
+ { 0, 2, 29, 7, 68, 27, 2 },
+ { 0, 4, 40, 7, 65, 18, 1 },
+ { 0, 8, 52, 7, 57, 11, 0 },
+ { 1, 14, 61, 7, 46, 6, 0 },
+ { 1, 23, 67, 7, 34, 3, 0 },
+ { 0, 3, 34, 7, 67, 23, 1 },
+ { 0, 6, 46, 7, 61, 14, 1 },
+ { 0, 11, 57, 7, 52, 8, 0 },
+ { 1, 18, 65, 7, 40, 4, 0 },
+ { 2, 27, 68, 7, 29, 2, 0 },
+ { 0, 4, 39, 7, 65, 19, 1 },
+ { 0, 8, 50, 7, 58, 12, 0 },
+ { 1, 13, 61, 7, 47, 6, 0 },
+ { 1, 22, 67, 7, 35, 3, 0 },
+ { 0, 3, 32, 7, 67, 24, 2 },
+ { 0, 5, 44, 7, 62, 16, 1 },
+ { 0, 10, 55, 7, 54, 9, 0 },
+ { 1, 17, 63, 7, 42, 5, 0 },
+ { 2, 26, 67, 7, 31, 2, 0 },
+ { 0, 4, 37, 7, 66, 20, 1 },
+ { 0, 7, 49, 7, 60, 12, 0 } } },
+ .ptrn_arr = { { 0x42424849, 0x90921212, 0x24248490, 0x9212124,
+ 0x48484909, 0x92121242, 0x84849090, 0x2424 } },
+ .sample_patrn_length = 242,
+ .hor_ds_en = 1,
+ .ver_ds_en = 1
+}, {
+ /* Scale factor 32 / (32 + 90) = 0.262295 */
+ .hor_phase_arr = {
+ .even = { { 2, 28, 68, 7, 28, 2, 0 },
+ { 0, 4, 38, 7, 65, 20, 1 },
+ { 0, 7, 48, 7, 59, 13, 1 },
+ { 0, 11, 58, 7, 51, 8, 0 },
+ { 1, 17, 64, 7, 41, 5, 0 },
+ { 2, 25, 67, 7, 31, 3, 0 },
+ { 0, 3, 35, 7, 66, 23, 1 },
+ { 0, 6, 45, 7, 61, 15, 1 },
+ { 0, 10, 54, 7, 54, 10, 0 },
+ { 1, 15, 61, 7, 45, 6, 0 },
+ { 1, 23, 66, 7, 35, 3, 0 },
+ { 0, 3, 31, 7, 67, 25, 2 },
+ { 0, 5, 41, 7, 64, 17, 1 },
+ { 0, 8, 51, 7, 58, 11, 0 },
+ { 1, 13, 59, 7, 48, 7, 0 },
+ { 1, 20, 65, 7, 38, 4, 0 } },
+ .odd = { { 0, 12, 59, 7, 49, 8, 0 },
+ { 1, 19, 64, 7, 40, 4, 0 },
+ { 2, 27, 67, 7, 30, 2, 0 },
+ { 0, 4, 36, 7, 66, 21, 1 },
+ { 0, 6, 46, 7, 61, 14, 1 },
+ { 0, 10, 56, 7, 53, 9, 0 },
+ { 1, 16, 63, 7, 43, 5, 0 },
+ { 2, 24, 66, 7, 33, 3, 0 },
+ { 0, 3, 33, 7, 66, 24, 2 },
+ { 0, 5, 43, 7, 63, 16, 1 },
+ { 0, 9, 53, 7, 56, 10, 0 },
+ { 1, 14, 61, 7, 46, 6, 0 },
+ { 1, 21, 66, 7, 36, 4, 0 },
+ { 0, 2, 30, 7, 67, 27, 2 },
+ { 0, 4, 40, 7, 64, 19, 1 },
+ { 0, 8, 49, 7, 59, 12, 0 } } },
+ .ver_phase_arr = {
+ .even = { { 2, 28, 68, 7, 28, 2, 0 },
+ { 0, 4, 38, 7, 65, 20, 1 },
+ { 0, 7, 48, 7, 59, 13, 1 },
+ { 0, 11, 58, 7, 51, 8, 0 },
+ { 1, 17, 64, 7, 41, 5, 0 },
+ { 2, 25, 67, 7, 31, 3, 0 },
+ { 0, 3, 35, 7, 66, 23, 1 },
+ { 0, 6, 45, 7, 61, 15, 1 },
+ { 0, 10, 54, 7, 54, 10, 0 },
+ { 1, 15, 61, 7, 45, 6, 0 },
+ { 1, 23, 66, 7, 35, 3, 0 },
+ { 0, 3, 31, 7, 67, 25, 2 },
+ { 0, 5, 41, 7, 64, 17, 1 },
+ { 0, 8, 51, 7, 58, 11, 0 },
+ { 1, 13, 59, 7, 48, 7, 0 },
+ { 1, 20, 65, 7, 38, 4, 0 } },
+ .odd = { { 0, 12, 59, 7, 49, 8, 0 },
+ { 1, 19, 64, 7, 40, 4, 0 },
+ { 2, 27, 67, 7, 30, 2, 0 },
+ { 0, 4, 36, 7, 66, 21, 1 },
+ { 0, 6, 46, 7, 61, 14, 1 },
+ { 0, 10, 56, 7, 53, 9, 0 },
+ { 1, 16, 63, 7, 43, 5, 0 },
+ { 2, 24, 66, 7, 33, 3, 0 },
+ { 0, 3, 33, 7, 66, 24, 2 },
+ { 0, 5, 43, 7, 63, 16, 1 },
+ { 0, 9, 53, 7, 56, 10, 0 },
+ { 1, 14, 61, 7, 46, 6, 0 },
+ { 1, 21, 66, 7, 36, 4, 0 },
+ { 0, 2, 30, 7, 67, 27, 2 },
+ { 0, 4, 40, 7, 64, 19, 1 },
+ { 0, 8, 49, 7, 59, 12, 0 } } },
+ .ptrn_arr = { { 0x42484849, 0x92121242, 0x84849090, 0x242424 } },
+ .sample_patrn_length = 122,
+ .hor_ds_en = 1,
+ .ver_ds_en = 1
+}, {
+ /* Scale factor 32 / (32 + 91) = 0.260163 */
+ .hor_phase_arr = {
+ .even = { { 2, 29, 66, 7, 29, 2, 0 },
+ { 0, 4, 36, 7, 66, 21, 1 },
+ { 0, 6, 45, 7, 61, 15, 1 },
+ { 0, 9, 52, 7, 56, 11, 0 },
+ { 1, 13, 59, 7, 48, 7, 0 },
+ { 1, 19, 63, 7, 40, 5, 0 },
+ { 2, 26, 65, 7, 32, 3, 0 },
+ { 0, 3, 33, 7, 66, 24, 2 },
+ { 0, 5, 41, 7, 63, 18, 1 },
+ { 0, 8, 49, 7, 59, 12, 0 },
+ { 0, 12, 57, 7, 51, 8, 0 },
+ { 1, 17, 62, 7, 43, 5, 0 },
+ { 1, 23, 66, 7, 35, 3, 0 },
+ { 0, 3, 30, 7, 66, 27, 2 },
+ { 0, 4, 38, 7, 65, 20, 1 },
+ { 0, 7, 46, 7, 60, 14, 1 },
+ { 0, 10, 54, 7, 54, 10, 0 },
+ { 1, 14, 60, 7, 46, 7, 0 },
+ { 1, 20, 65, 7, 38, 4, 0 },
+ { 2, 27, 66, 7, 30, 3, 0 },
+ { 0, 3, 35, 7, 66, 23, 1 },
+ { 0, 5, 43, 7, 62, 17, 1 },
+ { 0, 8, 51, 7, 57, 12, 0 },
+ { 0, 12, 59, 7, 49, 8, 0 },
+ { 1, 18, 63, 7, 41, 5, 0 },
+ { 2, 24, 66, 7, 33, 3, 0 },
+ { 0, 3, 32, 7, 65, 26, 2 },
+ { 0, 5, 40, 7, 63, 19, 1 },
+ { 0, 7, 48, 7, 59, 13, 1 },
+ { 0, 11, 56, 7, 52, 9, 0 },
+ { 1, 15, 61, 7, 45, 6, 0 },
+ { 1, 21, 66, 7, 36, 4, 0 } },
+ .odd = { { 0, 12, 58, 7, 50, 8, 0 },
+ { 1, 17, 63, 7, 42, 5, 0 },
+ { 2, 23, 66, 7, 34, 3, 0 },
+ { 0, 3, 31, 7, 66, 26, 2 },
+ { 0, 4, 39, 7, 64, 20, 1 },
+ { 0, 7, 47, 7, 59, 14, 1 },
+ { 0, 10, 55, 7, 53, 10, 0 },
+ { 1, 15, 61, 7, 45, 6, 0 },
+ { 1, 21, 65, 7, 37, 4, 0 },
+ { 2, 28, 67, 7, 29, 2, 0 },
+ { 0, 4, 36, 7, 65, 22, 1 },
+ { 0, 6, 44, 7, 61, 16, 1 },
+ { 0, 9, 52, 7, 56, 11, 0 },
+ { 1, 13, 58, 7, 49, 7, 0 },
+ { 1, 18, 64, 7, 40, 5, 0 },
+ { 2, 25, 66, 7, 32, 3, 0 },
+ { 0, 3, 32, 7, 66, 25, 2 },
+ { 0, 5, 40, 7, 64, 18, 1 },
+ { 0, 7, 49, 7, 58, 13, 1 },
+ { 0, 11, 56, 7, 52, 9, 0 },
+ { 1, 16, 61, 7, 44, 6, 0 },
+ { 1, 22, 65, 7, 36, 4, 0 },
+ { 0, 2, 29, 7, 67, 28, 2 },
+ { 0, 4, 37, 7, 65, 21, 1 },
+ { 0, 6, 45, 7, 61, 15, 1 },
+ { 0, 10, 53, 7, 55, 10, 0 },
+ { 1, 14, 59, 7, 47, 7, 0 },
+ { 1, 20, 64, 7, 39, 4, 0 },
+ { 2, 26, 66, 7, 31, 3, 0 },
+ { 0, 3, 34, 7, 66, 23, 2 },
+ { 0, 5, 42, 7, 63, 17, 1 },
+ { 0, 8, 50, 7, 58, 12, 0 } } },
+ .ver_phase_arr = {
+ .even = { { 2, 29, 66, 7, 29, 2, 0 },
+ { 0, 4, 36, 7, 66, 21, 1 },
+ { 0, 6, 45, 7, 61, 15, 1 },
+ { 0, 9, 52, 7, 56, 11, 0 },
+ { 1, 13, 59, 7, 48, 7, 0 },
+ { 1, 19, 63, 7, 40, 5, 0 },
+ { 2, 26, 65, 7, 32, 3, 0 },
+ { 0, 3, 33, 7, 66, 24, 2 },
+ { 0, 5, 41, 7, 63, 18, 1 },
+ { 0, 8, 49, 7, 59, 12, 0 },
+ { 0, 12, 57, 7, 51, 8, 0 },
+ { 1, 17, 62, 7, 43, 5, 0 },
+ { 1, 23, 66, 7, 35, 3, 0 },
+ { 0, 3, 30, 7, 66, 27, 2 },
+ { 0, 4, 38, 7, 65, 20, 1 },
+ { 0, 7, 46, 7, 60, 14, 1 },
+ { 0, 10, 54, 7, 54, 10, 0 },
+ { 1, 14, 60, 7, 46, 7, 0 },
+ { 1, 20, 65, 7, 38, 4, 0 },
+ { 2, 27, 66, 7, 30, 3, 0 },
+ { 0, 3, 35, 7, 66, 23, 1 },
+ { 0, 5, 43, 7, 62, 17, 1 },
+ { 0, 8, 51, 7, 57, 12, 0 },
+ { 0, 12, 59, 7, 49, 8, 0 },
+ { 1, 18, 63, 7, 41, 5, 0 },
+ { 2, 24, 66, 7, 33, 3, 0 },
+ { 0, 3, 32, 7, 65, 26, 2 },
+ { 0, 5, 40, 7, 63, 19, 1 },
+ { 0, 7, 48, 7, 59, 13, 1 },
+ { 0, 11, 56, 7, 52, 9, 0 },
+ { 1, 15, 61, 7, 45, 6, 0 },
+ { 1, 21, 66, 7, 36, 4, 0 } },
+ .odd = { { 0, 12, 58, 7, 50, 8, 0 },
+ { 1, 17, 63, 7, 42, 5, 0 },
+ { 2, 23, 66, 7, 34, 3, 0 },
+ { 0, 3, 31, 7, 66, 26, 2 },
+ { 0, 4, 39, 7, 64, 20, 1 },
+ { 0, 7, 47, 7, 59, 14, 1 },
+ { 0, 10, 55, 7, 53, 10, 0 },
+ { 1, 15, 61, 7, 45, 6, 0 },
+ { 1, 21, 65, 7, 37, 4, 0 },
+ { 2, 28, 67, 7, 29, 2, 0 },
+ { 0, 4, 36, 7, 65, 22, 1 },
+ { 0, 6, 44, 7, 61, 16, 1 },
+ { 0, 9, 52, 7, 56, 11, 0 },
+ { 1, 13, 58, 7, 49, 7, 0 },
+ { 1, 18, 64, 7, 40, 5, 0 },
+ { 2, 25, 66, 7, 32, 3, 0 },
+ { 0, 3, 32, 7, 66, 25, 2 },
+ { 0, 5, 40, 7, 64, 18, 1 },
+ { 0, 7, 49, 7, 58, 13, 1 },
+ { 0, 11, 56, 7, 52, 9, 0 },
+ { 1, 16, 61, 7, 44, 6, 0 },
+ { 1, 22, 65, 7, 36, 4, 0 },
+ { 0, 2, 29, 7, 67, 28, 2 },
+ { 0, 4, 37, 7, 65, 21, 1 },
+ { 0, 6, 45, 7, 61, 15, 1 },
+ { 0, 10, 53, 7, 55, 10, 0 },
+ { 1, 14, 59, 7, 47, 7, 0 },
+ { 1, 20, 64, 7, 39, 4, 0 },
+ { 2, 26, 66, 7, 31, 3, 0 },
+ { 0, 3, 34, 7, 66, 23, 2 },
+ { 0, 5, 42, 7, 63, 17, 1 },
+ { 0, 8, 50, 7, 58, 12, 0 } } },
+ .ptrn_arr = { { 0x42484849, 0x12124242, 0x90909212, 0x24848484,
+ 0x21242424, 0x9090921, 0x48484849, 0x24242 } },
+ .sample_patrn_length = 246,
+ .hor_ds_en = 1,
+ .ver_ds_en = 1
+}, {
+ /* Scale factor 32 / (32 + 92) = 0.258065 */
+ .hor_phase_arr = {
+ .even = { { 2, 29, 66, 7, 29, 2, 0 },
+ { 0, 4, 35, 7, 64, 23, 2 },
+ { 0, 5, 41, 7, 63, 18, 1 },
+ { 0, 7, 48, 7, 58, 14, 1 },
+ { 0, 10, 54, 7, 54, 10, 0 },
+ { 1, 14, 58, 7, 48, 7, 0 },
+ { 1, 18, 63, 7, 41, 5, 0 },
+ { 2, 23, 64, 7, 35, 4, 0 } },
+ .odd = { { 0, 12, 56, 7, 51, 9, 0 },
+ { 1, 16, 61, 7, 44, 6, 0 },
+ { 1, 20, 65, 7, 38, 4, 0 },
+ { 2, 26, 65, 7, 32, 3, 0 },
+ { 0, 3, 32, 7, 65, 26, 2 },
+ { 0, 4, 38, 7, 65, 20, 1 },
+ { 0, 6, 44, 7, 61, 16, 1 },
+ { 0, 9, 51, 7, 56, 12, 0 } } },
+ .ver_phase_arr = {
+ .even = { { 2, 29, 66, 7, 29, 2, 0 },
+ { 0, 4, 35, 7, 64, 23, 2 },
+ { 0, 5, 41, 7, 63, 18, 1 },
+ { 0, 7, 48, 7, 58, 14, 1 },
+ { 0, 10, 54, 7, 54, 10, 0 },
+ { 1, 14, 58, 7, 48, 7, 0 },
+ { 1, 18, 63, 7, 41, 5, 0 },
+ { 2, 23, 64, 7, 35, 4, 0 } },
+ .odd = { { 0, 12, 56, 7, 51, 9, 0 },
+ { 1, 16, 61, 7, 44, 6, 0 },
+ { 1, 20, 65, 7, 38, 4, 0 },
+ { 2, 26, 65, 7, 32, 3, 0 },
+ { 0, 3, 32, 7, 65, 26, 2 },
+ { 0, 4, 38, 7, 65, 20, 1 },
+ { 0, 6, 44, 7, 61, 16, 1 },
+ { 0, 9, 51, 7, 56, 12, 0 } } },
+ .ptrn_arr = { { 0x48484849, 0x2424242 } },
+ .sample_patrn_length = 62,
+ .hor_ds_en = 1,
+ .ver_ds_en = 1
+}, {
+ /* Scale factor 32 / (32 + 93) = 0.256 */
+ .hor_phase_arr = {
+ .even = { { 2, 29, 66, 7, 29, 2, 0 },
+ { 0, 3, 33, 7, 65, 25, 2 },
+ { 0, 4, 38, 7, 64, 21, 1 },
+ { 0, 6, 43, 7, 61, 17, 1 },
+ { 0, 8, 47, 7, 58, 14, 1 },
+ { 0, 10, 52, 7, 55, 11, 0 },
+ { 1, 12, 56, 7, 50, 9, 0 },
+ { 1, 15, 59, 7, 46, 7, 0 },
+ { 1, 18, 63, 7, 41, 5, 0 },
+ { 1, 22, 65, 7, 36, 4, 0 },
+ { 2, 26, 65, 7, 32, 3, 0 },
+ { 0, 3, 30, 7, 66, 27, 2 },
+ { 0, 4, 35, 7, 64, 23, 2 },
+ { 0, 5, 40, 7, 63, 19, 1 },
+ { 0, 6, 44, 7, 61, 16, 1 },
+ { 0, 8, 49, 7, 57, 13, 1 },
+ { 0, 10, 55, 7, 53, 10, 0 },
+ { 1, 13, 57, 7, 49, 8, 0 },
+ { 1, 16, 61, 7, 44, 6, 0 },
+ { 1, 19, 63, 7, 40, 5, 0 },
+ { 2, 23, 64, 7, 35, 4, 0 },
+ { 2, 27, 66, 7, 30, 3, 0 },
+ { 0, 3, 32, 7, 65, 26, 2 },
+ { 0, 4, 36, 7, 65, 22, 1 },
+ { 0, 5, 41, 7, 63, 18, 1 },
+ { 0, 7, 46, 7, 59, 15, 1 },
+ { 0, 9, 50, 7, 56, 12, 1 },
+ { 0, 11, 55, 7, 52, 10, 0 },
+ { 1, 14, 58, 7, 47, 8, 0 },
+ { 1, 17, 61, 7, 43, 6, 0 },
+ { 1, 21, 64, 7, 38, 4, 0 },
+ { 2, 25, 65, 7, 33, 3, 0 } },
+ .odd = { { 0, 12, 56, 7, 51, 9, 0 },
+ { 1, 14, 59, 7, 47, 7, 0 },
+ { 1, 18, 61, 7, 42, 6, 0 },
+ { 1, 21, 65, 7, 37, 4, 0 },
+ { 2, 25, 65, 7, 33, 3, 0 },
+ { 0, 3, 30, 7, 65, 28, 2 },
+ { 0, 3, 34, 7, 65, 24, 2 },
+ { 0, 5, 39, 7, 63, 20, 1 },
+ { 0, 6, 44, 7, 61, 16, 1 },
+ { 0, 8, 48, 7, 58, 13, 1 },
+ { 0, 10, 53, 7, 54, 11, 0 },
+ { 1, 12, 57, 7, 50, 8, 0 },
+ { 1, 15, 60, 7, 45, 7, 0 },
+ { 1, 19, 63, 7, 40, 5, 0 },
+ { 2, 23, 63, 7, 36, 4, 0 },
+ { 2, 27, 65, 7, 31, 3, 0 },
+ { 0, 3, 31, 7, 65, 27, 2 },
+ { 0, 4, 36, 7, 63, 23, 2 },
+ { 0, 5, 40, 7, 63, 19, 1 },
+ { 0, 7, 45, 7, 60, 15, 1 },
+ { 0, 8, 50, 7, 57, 12, 1 },
+ { 0, 11, 54, 7, 53, 10, 0 },
+ { 1, 13, 58, 7, 48, 8, 0 },
+ { 1, 16, 61, 7, 44, 6, 0 },
+ { 1, 20, 63, 7, 39, 5, 0 },
+ { 2, 24, 65, 7, 34, 3, 0 },
+ { 2, 28, 65, 7, 30, 3, 0 },
+ { 0, 3, 33, 7, 65, 25, 2 },
+ { 0, 4, 37, 7, 65, 21, 1 },
+ { 0, 6, 42, 7, 61, 18, 1 },
+ { 0, 7, 47, 7, 59, 14, 1 },
+ { 0, 9, 51, 7, 56, 12, 0 } } },
+ .ver_phase_arr = {
+ .even = { { 2, 29, 66, 7, 29, 2, 0 },
+ { 0, 3, 33, 7, 65, 25, 2 },
+ { 0, 4, 38, 7, 64, 21, 1 },
+ { 0, 6, 43, 7, 61, 17, 1 },
+ { 0, 8, 47, 7, 58, 14, 1 },
+ { 0, 10, 52, 7, 55, 11, 0 },
+ { 1, 12, 56, 7, 50, 9, 0 },
+ { 1, 15, 59, 7, 46, 7, 0 },
+ { 1, 18, 63, 7, 41, 5, 0 },
+ { 1, 22, 65, 7, 36, 4, 0 },
+ { 2, 26, 65, 7, 32, 3, 0 },
+ { 0, 3, 30, 7, 66, 27, 2 },
+ { 0, 4, 35, 7, 64, 23, 2 },
+ { 0, 5, 40, 7, 63, 19, 1 },
+ { 0, 6, 44, 7, 61, 16, 1 },
+ { 0, 8, 49, 7, 57, 13, 1 },
+ { 0, 10, 55, 7, 53, 10, 0 },
+ { 1, 13, 57, 7, 49, 8, 0 },
+ { 1, 16, 61, 7, 44, 6, 0 },
+ { 1, 19, 63, 7, 40, 5, 0 },
+ { 2, 23, 64, 7, 35, 4, 0 },
+ { 2, 27, 66, 7, 30, 3, 0 },
+ { 0, 3, 32, 7, 65, 26, 2 },
+ { 0, 4, 36, 7, 65, 22, 1 },
+ { 0, 5, 41, 7, 63, 18, 1 },
+ { 0, 7, 46, 7, 59, 15, 1 },
+ { 0, 9, 50, 7, 56, 12, 1 },
+ { 0, 11, 55, 7, 52, 10, 0 },
+ { 1, 14, 58, 7, 47, 8, 0 },
+ { 1, 17, 61, 7, 43, 6, 0 },
+ { 1, 21, 64, 7, 38, 4, 0 },
+ { 2, 25, 65, 7, 33, 3, 0 } },
+ .odd = { { 0, 12, 56, 7, 51, 9, 0 },
+ { 1, 14, 59, 7, 47, 7, 0 },
+ { 1, 18, 61, 7, 42, 6, 0 },
+ { 1, 21, 65, 7, 37, 4, 0 },
+ { 2, 25, 65, 7, 33, 3, 0 },
+ { 0, 3, 30, 7, 65, 28, 2 },
+ { 0, 3, 34, 7, 65, 24, 2 },
+ { 0, 5, 39, 7, 63, 20, 1 },
+ { 0, 6, 44, 7, 61, 16, 1 },
+ { 0, 8, 48, 7, 58, 13, 1 },
+ { 0, 10, 53, 7, 54, 11, 0 },
+ { 1, 12, 57, 7, 50, 8, 0 },
+ { 1, 15, 60, 7, 45, 7, 0 },
+ { 1, 19, 63, 7, 40, 5, 0 },
+ { 2, 23, 63, 7, 36, 4, 0 },
+ { 2, 27, 65, 7, 31, 3, 0 },
+ { 0, 3, 31, 7, 65, 27, 2 },
+ { 0, 4, 36, 7, 63, 23, 2 },
+ { 0, 5, 40, 7, 63, 19, 1 },
+ { 0, 7, 45, 7, 60, 15, 1 },
+ { 0, 8, 50, 7, 57, 12, 1 },
+ { 0, 11, 54, 7, 53, 10, 0 },
+ { 1, 13, 58, 7, 48, 8, 0 },
+ { 1, 16, 61, 7, 44, 6, 0 },
+ { 1, 20, 63, 7, 39, 5, 0 },
+ { 2, 24, 65, 7, 34, 3, 0 },
+ { 2, 28, 65, 7, 30, 3, 0 },
+ { 0, 3, 33, 7, 65, 25, 2 },
+ { 0, 4, 37, 7, 65, 21, 1 },
+ { 0, 6, 42, 7, 61, 18, 1 },
+ { 0, 7, 47, 7, 59, 14, 1 },
+ { 0, 9, 51, 7, 56, 12, 0 } } },
+ .ptrn_arr = { { 0x48484849, 0x42424248, 0x12124242, 0x92121212,
+ 0x90909090, 0x84848490, 0x24248484, 0x242424 } },
+ .sample_patrn_length = 250,
+ .hor_ds_en = 1,
+ .ver_ds_en = 1
+}, {
+ /* Scale factor 32 / (32 + 94) = 0.253968 */
+ .hor_phase_arr = {
+ .even = { { 3, 29, 64, 7, 29, 3, 0 },
+ { 0, 3, 32, 7, 65, 26, 2 },
+ { 0, 4, 35, 7, 64, 23, 2 },
+ { 0, 5, 38, 7, 63, 21, 1 },
+ { 0, 5, 41, 7, 63, 18, 1 },
+ { 0, 7, 44, 7, 60, 16, 1 },
+ { 0, 8, 47, 7, 58, 14, 1 },
+ { 0, 9, 50, 7, 56, 12, 1 },
+ { 0, 11, 53, 7, 53, 11, 0 },
+ { 1, 12, 56, 7, 50, 9, 0 },
+ { 1, 14, 58, 7, 47, 8, 0 },
+ { 1, 16, 60, 7, 44, 7, 0 },
+ { 1, 18, 63, 7, 41, 5, 0 },
+ { 1, 21, 63, 7, 38, 5, 0 },
+ { 2, 23, 64, 7, 35, 4, 0 },
+ { 2, 26, 65, 7, 32, 3, 0 } },
+ .odd = { { 0, 11, 55, 7, 52, 10, 0 },
+ { 1, 13, 57, 7, 49, 8, 0 },
+ { 1, 15, 59, 7, 46, 7, 0 },
+ { 1, 17, 61, 7, 43, 6, 0 },
+ { 1, 20, 62, 7, 40, 5, 0 },
+ { 2, 22, 63, 7, 37, 4, 0 },
+ { 2, 25, 65, 7, 33, 3, 0 },
+ { 2, 28, 65, 7, 30, 3, 0 },
+ { 0, 3, 30, 7, 65, 28, 2 },
+ { 0, 3, 33, 7, 65, 25, 2 },
+ { 0, 4, 37, 7, 63, 22, 2 },
+ { 0, 5, 40, 7, 62, 20, 1 },
+ { 0, 6, 43, 7, 61, 17, 1 },
+ { 0, 7, 46, 7, 59, 15, 1 },
+ { 0, 8, 49, 7, 57, 13, 1 },
+ { 0, 10, 52, 7, 55, 11, 0 } } },
+ .ver_phase_arr = {
+ .even = { { 3, 29, 64, 7, 29, 3, 0 },
+ { 0, 3, 32, 7, 65, 26, 2 },
+ { 0, 4, 35, 7, 64, 23, 2 },
+ { 0, 5, 38, 7, 63, 21, 1 },
+ { 0, 5, 41, 7, 63, 18, 1 },
+ { 0, 7, 44, 7, 60, 16, 1 },
+ { 0, 8, 47, 7, 58, 14, 1 },
+ { 0, 9, 50, 7, 56, 12, 1 },
+ { 0, 11, 53, 7, 53, 11, 0 },
+ { 1, 12, 56, 7, 50, 9, 0 },
+ { 1, 14, 58, 7, 47, 8, 0 },
+ { 1, 16, 60, 7, 44, 7, 0 },
+ { 1, 18, 63, 7, 41, 5, 0 },
+ { 1, 21, 63, 7, 38, 5, 0 },
+ { 2, 23, 64, 7, 35, 4, 0 },
+ { 2, 26, 65, 7, 32, 3, 0 } },
+ .odd = { { 0, 11, 55, 7, 52, 10, 0 },
+ { 1, 13, 57, 7, 49, 8, 0 },
+ { 1, 15, 59, 7, 46, 7, 0 },
+ { 1, 17, 61, 7, 43, 6, 0 },
+ { 1, 20, 62, 7, 40, 5, 0 },
+ { 2, 22, 63, 7, 37, 4, 0 },
+ { 2, 25, 65, 7, 33, 3, 0 },
+ { 2, 28, 65, 7, 30, 3, 0 },
+ { 0, 3, 30, 7, 65, 28, 2 },
+ { 0, 3, 33, 7, 65, 25, 2 },
+ { 0, 4, 37, 7, 63, 22, 2 },
+ { 0, 5, 40, 7, 62, 20, 1 },
+ { 0, 6, 43, 7, 61, 17, 1 },
+ { 0, 7, 46, 7, 59, 15, 1 },
+ { 0, 8, 49, 7, 57, 13, 1 },
+ { 0, 10, 52, 7, 55, 11, 0 } } },
+ .ptrn_arr = { { 0x48484849, 0x48484848, 0x42424242, 0x2424242 } },
+ .sample_patrn_length = 126,
+ .hor_ds_en = 1,
+ .ver_ds_en = 1
+}, {
+ /* Scale factor 32 / (32 + 95) = 0.251969 */
+ .hor_phase_arr = {
+ .even = { { 3, 29, 64, 7, 29, 3, 0 },
+ { 0, 3, 31, 7, 64, 28, 2 },
+ { 0, 3, 32, 7, 65, 26, 2 },
+ { 0, 4, 34, 7, 63, 25, 2 },
+ { 0, 4, 35, 7, 63, 24, 2 },
+ { 0, 4, 37, 7, 63, 22, 2 },
+ { 0, 5, 38, 7, 63, 21, 1 },
+ { 0, 5, 40, 7, 62, 20, 1 },
+ { 0, 6, 41, 7, 61, 19, 1 },
+ { 0, 6, 43, 7, 61, 17, 1 },
+ { 0, 7, 44, 7, 60, 16, 1 },
+ { 0, 7, 46, 7, 59, 15, 1 },
+ { 0, 8, 47, 7, 58, 14, 1 },
+ { 0, 9, 49, 7, 56, 13, 1 },
+ { 0, 9, 50, 7, 56, 12, 1 },
+ { 0, 10, 51, 7, 54, 12, 1 },
+ { 0, 11, 53, 7, 53, 11, 0 },
+ { 1, 12, 54, 7, 51, 10, 0 },
+ { 1, 12, 56, 7, 50, 9, 0 },
+ { 1, 13, 56, 7, 49, 9, 0 },
+ { 1, 14, 58, 7, 47, 8, 0 },
+ { 1, 15, 59, 7, 46, 7, 0 },
+ { 1, 16, 60, 7, 44, 7, 0 },
+ { 1, 17, 61, 7, 43, 6, 0 },
+ { 1, 19, 61, 7, 41, 6, 0 },
+ { 1, 20, 62, 7, 40, 5, 0 },
+ { 1, 21, 63, 7, 38, 5, 0 },
+ { 2, 22, 63, 7, 37, 4, 0 },
+ { 2, 24, 63, 7, 35, 4, 0 },
+ { 2, 25, 63, 7, 34, 4, 0 },
+ { 2, 26, 65, 7, 32, 3, 0 },
+ { 2, 28, 64, 7, 31, 3, 0 } },
+ .odd = { { 0, 11, 55, 7, 52, 10, 0 },
+ { 1, 12, 54, 7, 51, 10, 0 },
+ { 1, 13, 56, 7, 49, 9, 0 },
+ { 1, 14, 57, 7, 48, 8, 0 },
+ { 1, 15, 58, 7, 46, 8, 0 },
+ { 1, 16, 59, 7, 45, 7, 0 },
+ { 1, 17, 61, 7, 43, 6, 0 },
+ { 1, 18, 61, 7, 42, 6, 0 },
+ { 1, 19, 63, 7, 40, 5, 0 },
+ { 1, 20, 63, 7, 39, 5, 0 },
+ { 2, 22, 62, 7, 37, 5, 0 },
+ { 2, 23, 63, 7, 36, 4, 0 },
+ { 2, 24, 64, 7, 34, 4, 0 },
+ { 2, 26, 64, 7, 33, 3, 0 },
+ { 2, 27, 65, 7, 31, 3, 0 },
+ { 3, 28, 64, 7, 30, 3, 0 },
+ { 0, 3, 30, 7, 64, 28, 3 },
+ { 0, 3, 31, 7, 65, 27, 2 },
+ { 0, 3, 33, 7, 64, 26, 2 },
+ { 0, 4, 34, 7, 64, 24, 2 },
+ { 0, 4, 36, 7, 63, 23, 2 },
+ { 0, 5, 37, 7, 62, 22, 2 },
+ { 0, 5, 39, 7, 63, 20, 1 },
+ { 0, 5, 40, 7, 63, 19, 1 },
+ { 0, 6, 42, 7, 61, 18, 1 },
+ { 0, 6, 43, 7, 61, 17, 1 },
+ { 0, 7, 45, 7, 59, 16, 1 },
+ { 0, 8, 46, 7, 58, 15, 1 },
+ { 0, 8, 48, 7, 57, 14, 1 },
+ { 0, 9, 49, 7, 56, 13, 1 },
+ { 0, 10, 51, 7, 54, 12, 1 },
+ { 0, 10, 52, 7, 55, 11, 0 } } },
+ .ver_phase_arr = {
+ .even = { { 3, 29, 64, 7, 29, 3, 0 },
+ { 0, 3, 31, 7, 64, 28, 2 },
+ { 0, 3, 32, 7, 65, 26, 2 },
+ { 0, 4, 34, 7, 63, 25, 2 },
+ { 0, 4, 35, 7, 63, 24, 2 },
+ { 0, 4, 37, 7, 63, 22, 2 },
+ { 0, 5, 38, 7, 63, 21, 1 },
+ { 0, 5, 40, 7, 62, 20, 1 },
+ { 0, 6, 41, 7, 61, 19, 1 },
+ { 0, 6, 43, 7, 61, 17, 1 },
+ { 0, 7, 44, 7, 60, 16, 1 },
+ { 0, 7, 46, 7, 59, 15, 1 },
+ { 0, 8, 47, 7, 58, 14, 1 },
+ { 0, 9, 49, 7, 56, 13, 1 },
+ { 0, 9, 50, 7, 56, 12, 1 },
+ { 0, 10, 51, 7, 54, 12, 1 },
+ { 0, 11, 53, 7, 53, 11, 0 },
+ { 1, 12, 54, 7, 51, 10, 0 },
+ { 1, 12, 56, 7, 50, 9, 0 },
+ { 1, 13, 56, 7, 49, 9, 0 },
+ { 1, 14, 58, 7, 47, 8, 0 },
+ { 1, 15, 59, 7, 46, 7, 0 },
+ { 1, 16, 60, 7, 44, 7, 0 },
+ { 1, 17, 61, 7, 43, 6, 0 },
+ { 1, 19, 61, 7, 41, 6, 0 },
+ { 1, 20, 62, 7, 40, 5, 0 },
+ { 1, 21, 63, 7, 38, 5, 0 },
+ { 2, 22, 63, 7, 37, 4, 0 },
+ { 2, 24, 63, 7, 35, 4, 0 },
+ { 2, 25, 63, 7, 34, 4, 0 },
+ { 2, 26, 65, 7, 32, 3, 0 },
+ { 2, 28, 64, 7, 31, 3, 0 } },
+ .odd = { { 0, 11, 55, 7, 52, 10, 0 },
+ { 1, 12, 54, 7, 51, 10, 0 },
+ { 1, 13, 56, 7, 49, 9, 0 },
+ { 1, 14, 57, 7, 48, 8, 0 },
+ { 1, 15, 58, 7, 46, 8, 0 },
+ { 1, 16, 59, 7, 45, 7, 0 },
+ { 1, 17, 61, 7, 43, 6, 0 },
+ { 1, 18, 61, 7, 42, 6, 0 },
+ { 1, 19, 63, 7, 40, 5, 0 },
+ { 1, 20, 63, 7, 39, 5, 0 },
+ { 2, 22, 62, 7, 37, 5, 0 },
+ { 2, 23, 63, 7, 36, 4, 0 },
+ { 2, 24, 64, 7, 34, 4, 0 },
+ { 2, 26, 64, 7, 33, 3, 0 },
+ { 2, 27, 65, 7, 31, 3, 0 },
+ { 3, 28, 64, 7, 30, 3, 0 },
+ { 0, 3, 30, 7, 64, 28, 3 },
+ { 0, 3, 31, 7, 65, 27, 2 },
+ { 0, 3, 33, 7, 64, 26, 2 },
+ { 0, 4, 34, 7, 64, 24, 2 },
+ { 0, 4, 36, 7, 63, 23, 2 },
+ { 0, 5, 37, 7, 62, 22, 2 },
+ { 0, 5, 39, 7, 63, 20, 1 },
+ { 0, 5, 40, 7, 63, 19, 1 },
+ { 0, 6, 42, 7, 61, 18, 1 },
+ { 0, 6, 43, 7, 61, 17, 1 },
+ { 0, 7, 45, 7, 59, 16, 1 },
+ { 0, 8, 46, 7, 58, 15, 1 },
+ { 0, 8, 48, 7, 57, 14, 1 },
+ { 0, 9, 49, 7, 56, 13, 1 },
+ { 0, 10, 51, 7, 54, 12, 1 },
+ { 0, 10, 52, 7, 55, 11, 0 } } },
+ .ptrn_arr = { { 0x48484849, 0x48484848, 0x48484848, 0x48484848,
+ 0x42424242, 0x42424242, 0x42424242, 0x2424242 } },
+ .sample_patrn_length = 254,
+ .hor_ds_en = 1,
+ .ver_ds_en = 1
+}, {
+ /* Scale factor 32 / (32 + 96) = 0.25 */
+ .hor_phase_arr = {
+ .even = { { 3, 29, 64, 7, 29, 3, 0 } },
+ .odd = { { 0, 11, 53, 7, 53, 11, 0 } } },
+ .ver_phase_arr = {
+ .even = { { 3, 29, 64, 7, 29, 3, 0 } },
+ .odd = { { 0, 11, 53, 7, 53, 11, 0 } } },
+ .ptrn_arr = { { 0x9 } },
+ .sample_patrn_length = 8,
+ .hor_ds_en = 1,
+ .ver_ds_en = 1
+} };
+
+const s32 imgu_css_downscale_4taps[IMGU_SCALER_DOWNSCALE_4TAPS_LEN] = {
+ IMGU_SCALER_FP * -0.000000000000000,
+ IMGU_SCALER_FP * -0.000249009327023,
+ IMGU_SCALER_FP * -0.001022241683322,
+ IMGU_SCALER_FP * -0.002352252699175,
+ IMGU_SCALER_FP * -0.004261594242362,
+ IMGU_SCALER_FP * -0.006761648795689,
+ IMGU_SCALER_FP * -0.009851589454154,
+ IMGU_SCALER_FP * -0.013517488475013,
+ IMGU_SCALER_FP * -0.017731595701026,
+ IMGU_SCALER_FP * -0.022451806160682,
+ IMGU_SCALER_FP * -0.027621333752351,
+ IMGU_SCALER_FP * -0.033168605172067,
+ IMGU_SCALER_FP * -0.039007385183627,
+ IMGU_SCALER_FP * -0.045037140997445,
+ IMGU_SCALER_FP * -0.051143649969349,
+ IMGU_SCALER_FP * -0.057199851105019,
+ IMGU_SCALER_FP * -0.063066937016941,
+ IMGU_SCALER_FP * -0.068595679088417,
+ IMGU_SCALER_FP * -0.073627974715370,
+ IMGU_SCALER_FP * -0.077998601684588,
+ IMGU_SCALER_FP * -0.081537161069780,
+ IMGU_SCALER_FP * -0.084070186546763,
+ IMGU_SCALER_FP * -0.085423394806327,
+ IMGU_SCALER_FP * -0.085424048835192,
+ IMGU_SCALER_FP * -0.083903403294908,
+ IMGU_SCALER_FP * -0.080699199103829,
+ IMGU_SCALER_FP * -0.075658172660608,
+ IMGU_SCALER_FP * -0.068638543974523,
+ IMGU_SCALER_FP * -0.059512447316781,
+ IMGU_SCALER_FP * -0.048168267897836,
+ IMGU_SCALER_FP * -0.034512848520921,
+ IMGU_SCALER_FP * -0.018473531164409,
+ IMGU_SCALER_FP * 0.000000000000000,
+ IMGU_SCALER_FP * 0.020934105554674,
+ IMGU_SCALER_FP * 0.044329836544650,
+ IMGU_SCALER_FP * 0.070161864654994,
+ IMGU_SCALER_FP * 0.098377719033862,
+ IMGU_SCALER_FP * 0.128897348012514,
+ IMGU_SCALER_FP * 0.161613019706978,
+ IMGU_SCALER_FP * 0.196389570939079,
+ IMGU_SCALER_FP * 0.233065009152522,
+ IMGU_SCALER_FP * 0.271451467092549,
+ IMGU_SCALER_FP * 0.311336505037934,
+ IMGU_SCALER_FP * 0.352484750396743,
+ IMGU_SCALER_FP * 0.394639859577736,
+ IMGU_SCALER_FP * 0.437526782302744,
+ IMGU_SCALER_FP * 0.480854304005320,
+ IMGU_SCALER_FP * 0.524317837738108,
+ IMGU_SCALER_FP * 0.567602433152471,
+ IMGU_SCALER_FP * 0.610385966680669,
+ IMGU_SCALER_FP * 0.652342474098843,
+ IMGU_SCALER_FP * 0.693145584226952,
+ IMGU_SCALER_FP * 0.732472010670320,
+ IMGU_SCALER_FP * 0.770005057258970,
+ IMGU_SCALER_FP * 0.805438092218553,
+ IMGU_SCALER_FP * 0.838477946124244,
+ IMGU_SCALER_FP * 0.868848189350256,
+ IMGU_SCALER_FP * 0.896292246026874,
+ IMGU_SCALER_FP * 0.920576303438191,
+ IMGU_SCALER_FP * 0.941491978311745,
+ IMGU_SCALER_FP * 0.958858704531378,
+ IMGU_SCALER_FP * 0.972525810403401,
+ IMGU_SCALER_FP * 0.982374257672165,
+ IMGU_SCALER_FP * 0.988318018955586,
+ IMGU_SCALER_FP * 0.990305075088925,
+ IMGU_SCALER_FP * 0.988318018955586,
+ IMGU_SCALER_FP * 0.982374257672165,
+ IMGU_SCALER_FP * 0.972525810403401,
+ IMGU_SCALER_FP * 0.958858704531378,
+ IMGU_SCALER_FP * 0.941491978311745,
+ IMGU_SCALER_FP * 0.920576303438191,
+ IMGU_SCALER_FP * 0.896292246026874,
+ IMGU_SCALER_FP * 0.868848189350256,
+ IMGU_SCALER_FP * 0.838477946124244,
+ IMGU_SCALER_FP * 0.805438092218553,
+ IMGU_SCALER_FP * 0.770005057258970,
+ IMGU_SCALER_FP * 0.732472010670320,
+ IMGU_SCALER_FP * 0.693145584226952,
+ IMGU_SCALER_FP * 0.652342474098843,
+ IMGU_SCALER_FP * 0.610385966680669,
+ IMGU_SCALER_FP * 0.567602433152471,
+ IMGU_SCALER_FP * 0.524317837738108,
+ IMGU_SCALER_FP * 0.480854304005320,
+ IMGU_SCALER_FP * 0.437526782302744,
+ IMGU_SCALER_FP * 0.394639859577736,
+ IMGU_SCALER_FP * 0.352484750396743,
+ IMGU_SCALER_FP * 0.311336505037934,
+ IMGU_SCALER_FP * 0.271451467092549,
+ IMGU_SCALER_FP * 0.233065009152522,
+ IMGU_SCALER_FP * 0.196389570939079,
+ IMGU_SCALER_FP * 0.161613019706978,
+ IMGU_SCALER_FP * 0.128897348012514,
+ IMGU_SCALER_FP * 0.098377719033862,
+ IMGU_SCALER_FP * 0.070161864654994,
+ IMGU_SCALER_FP * 0.044329836544650,
+ IMGU_SCALER_FP * 0.020934105554674,
+ IMGU_SCALER_FP * 0.000000000000000,
+ IMGU_SCALER_FP * -0.018473531164409,
+ IMGU_SCALER_FP * -0.034512848520921,
+ IMGU_SCALER_FP * -0.048168267897836,
+ IMGU_SCALER_FP * -0.059512447316781,
+ IMGU_SCALER_FP * -0.068638543974523,
+ IMGU_SCALER_FP * -0.075658172660608,
+ IMGU_SCALER_FP * -0.080699199103829,
+ IMGU_SCALER_FP * -0.083903403294908,
+ IMGU_SCALER_FP * -0.085424048835192,
+ IMGU_SCALER_FP * -0.085423394806327,
+ IMGU_SCALER_FP * -0.084070186546763,
+ IMGU_SCALER_FP * -0.081537161069780,
+ IMGU_SCALER_FP * -0.077998601684588,
+ IMGU_SCALER_FP * -0.073627974715370,
+ IMGU_SCALER_FP * -0.068595679088417,
+ IMGU_SCALER_FP * -0.063066937016941,
+ IMGU_SCALER_FP * -0.057199851105019,
+ IMGU_SCALER_FP * -0.051143649969349,
+ IMGU_SCALER_FP * -0.045037140997445,
+ IMGU_SCALER_FP * -0.039007385183627,
+ IMGU_SCALER_FP * -0.033168605172067,
+ IMGU_SCALER_FP * -0.027621333752351,
+ IMGU_SCALER_FP * -0.022451806160682,
+ IMGU_SCALER_FP * -0.017731595701026,
+ IMGU_SCALER_FP * -0.013517488475013,
+ IMGU_SCALER_FP * -0.009851589454154,
+ IMGU_SCALER_FP * -0.006761648795689,
+ IMGU_SCALER_FP * -0.004261594242362,
+ IMGU_SCALER_FP * -0.002352252699175,
+ IMGU_SCALER_FP * -0.001022241683322,
+ IMGU_SCALER_FP * -0.000249009327023
+};
+
+const s32 imgu_css_downscale_2taps[IMGU_SCALER_DOWNSCALE_2TAPS_LEN] = {
+ IMGU_SCALER_FP * 0.074300676367033,
+ IMGU_SCALER_FP * 0.094030234498392,
+ IMGU_SCALER_FP * 0.115522859526596,
+ IMGU_SCALER_FP * 0.138778551451644,
+ IMGU_SCALER_FP * 0.163629399140505,
+ IMGU_SCALER_FP * 0.190075402593178,
+ IMGU_SCALER_FP * 0.217864695110113,
+ IMGU_SCALER_FP * 0.247081232257828,
+ IMGU_SCALER_FP * 0.277389191770256,
+ IMGU_SCALER_FP * 0.308704618080881,
+ IMGU_SCALER_FP * 0.340859600056670,
+ IMGU_SCALER_FP * 0.373602270998074,
+ IMGU_SCALER_FP * 0.406848675338577,
+ IMGU_SCALER_FP * 0.440346946378629,
+ IMGU_SCALER_FP * 0.473845217418681,
+ IMGU_SCALER_FP * 0.507091621759184,
+ IMGU_SCALER_FP * 0.540002203833621,
+ IMGU_SCALER_FP * 0.572157185809410,
+ IMGU_SCALER_FP * 0.603472612120036,
+ IMGU_SCALER_FP * 0.633612660499431,
+ IMGU_SCALER_FP * 0.662493375381080,
+ IMGU_SCALER_FP * 0.689778934498917,
+ IMGU_SCALER_FP * 0.715301426719909,
+ IMGU_SCALER_FP * 0.738892940911023,
+ IMGU_SCALER_FP * 0.760385565939227,
+ IMGU_SCALER_FP * 0.779527435104971,
+ IMGU_SCALER_FP * 0.796234592841739,
+ IMGU_SCALER_FP * 0.810339128016497,
+ IMGU_SCALER_FP * 0.821841040629247,
+ IMGU_SCALER_FP * 0.830488463980438,
+ IMGU_SCALER_FP * 0.836281398070072,
+ IMGU_SCALER_FP * 0.839219842898146,
+ IMGU_SCALER_FP * 0.839219842898146,
+ IMGU_SCALER_FP * 0.836281398070072,
+ IMGU_SCALER_FP * 0.830488463980438,
+ IMGU_SCALER_FP * 0.821841040629247,
+ IMGU_SCALER_FP * 0.810339128016497,
+ IMGU_SCALER_FP * 0.796234592841739,
+ IMGU_SCALER_FP * 0.779527435104971,
+ IMGU_SCALER_FP * 0.760385565939227,
+ IMGU_SCALER_FP * 0.738892940911023,
+ IMGU_SCALER_FP * 0.715301426719909,
+ IMGU_SCALER_FP * 0.689778934498917,
+ IMGU_SCALER_FP * 0.662493375381080,
+ IMGU_SCALER_FP * 0.633612660499431,
+ IMGU_SCALER_FP * 0.603472612120036,
+ IMGU_SCALER_FP * 0.572157185809410,
+ IMGU_SCALER_FP * 0.540002203833621,
+ IMGU_SCALER_FP * 0.507091621759184,
+ IMGU_SCALER_FP * 0.473845217418681,
+ IMGU_SCALER_FP * 0.440346946378629,
+ IMGU_SCALER_FP * 0.406848675338577,
+ IMGU_SCALER_FP * 0.373602270998074,
+ IMGU_SCALER_FP * 0.340859600056670,
+ IMGU_SCALER_FP * 0.308704618080881,
+ IMGU_SCALER_FP * 0.277389191770256,
+ IMGU_SCALER_FP * 0.247081232257828,
+ IMGU_SCALER_FP * 0.217864695110113,
+ IMGU_SCALER_FP * 0.190075402593178,
+ IMGU_SCALER_FP * 0.163629399140505,
+ IMGU_SCALER_FP * 0.138778551451644,
+ IMGU_SCALER_FP * 0.115522859526596,
+ IMGU_SCALER_FP * 0.094030234498392,
+ IMGU_SCALER_FP * 0.074300676367033
+};
+
+/* settings for Geometric Distortion Correction */
+const s16 imgu_css_gdc_lut[4][256] = { {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, -1, -1, -1, -1, -2, -2, -2,
+ -2, -3, -3, -3, -4, -4, -4, -5, -5, -5, -6, -6, -7, -7, -7, -8, -8,
+ -9, -9, -10, -10, -11, -11, -12, -12, -13, -13, -14, -14, -15, -15,
+ -16, -16, -17, -17, -18, -19, -19, -20, -20, -21, -22, -22, -23, -24,
+ -24, -25, -25, -26, -27, -27, -28, -29, -29, -30, -31, -31, -32, -33,
+ -33, -34, -35, -35, -36, -37, -37, -38, -39, -39, -40, -41, -41, -42,
+ -43, -43, -44, -45, -45, -46, -46, -47, -48, -48, -49, -50, -50, -51,
+ -52, -52, -53, -53, -54, -55, -55, -56, -56, -57, -58, -58, -59, -59,
+ -60, -60, -61, -61, -62, -62, -63, -64, -64, -64, -65, -65, -66, -66,
+ -67, -67, -68, -68, -68, -69, -69, -70, -70, -70, -71, -71, -71, -72,
+ -72, -72, -73, -73, -73, -73, -74, -74, -74, -74, -74, -75, -75, -75,
+ -75, -75, -75, -75, -75, -75, -75, -75, -75, -75, -75, -75, -75, -75,
+ -75, -75, -75, -75, -74, -74, -74, -74, -74, -73, -73, -73, -73, -72,
+ -72, -72, -71, -71, -70, -70, -69, -69, -68, -68, -67, -67, -66, -66,
+ -65, -64, -64, -63, -62, -61, -61, -60, -59, -58, -57, -56, -56, -55,
+ -54, -53, -52, -51, -50, -49, -47, -46, -45, -44, -43, -41, -40, -39,
+ -38, -36, -35, -33, -32, -31, -29, -28, -26, -25, -23, -21, -20, -18,
+ -16, -15, -13, -11, -9, -7, -5, -3, -1
+}, {
+ 0, 2, 4, 6, 8, 10, 13, 15, 17, 20, 23, 25, 28, 31, 33, 36, 39, 42, 45,
+ 48, 51, 54, 58, 61, 64, 68, 71, 74, 78, 82, 85, 89, 93, 96, 100, 104,
+ 108, 112, 116, 120, 124, 128, 132, 136, 140, 144, 149, 153, 157, 162,
+ 166, 171, 175, 180, 184, 189, 193, 198, 203, 207, 212, 217, 222, 227,
+ 232, 236, 241, 246, 251, 256, 261, 266, 271, 276, 282, 287, 292, 297,
+ 302, 307, 313, 318, 323, 328, 334, 339, 344, 350, 355, 360, 366, 371,
+ 377, 382, 388, 393, 399, 404, 409, 415, 420, 426, 431, 437, 443, 448,
+ 454, 459, 465, 470, 476, 481, 487, 492, 498, 504, 509, 515, 520, 526,
+ 531, 537, 542, 548, 553, 559, 564, 570, 576, 581, 586, 592, 597, 603,
+ 608, 614, 619, 625, 630, 635, 641, 646, 651, 657, 662, 667, 673, 678,
+ 683, 688, 694, 699, 704, 709, 714, 719, 724, 729, 735, 740, 745, 749,
+ 754, 759, 764, 769, 774, 779, 783, 788, 793, 797, 802, 807, 811, 816,
+ 820, 825, 829, 834, 838, 842, 847, 851, 855, 859, 863, 868, 872, 876,
+ 880, 884, 888, 891, 895, 899, 903, 906, 910, 914, 917, 921, 924, 927,
+ 931, 934, 937, 940, 944, 947, 950, 953, 956, 959, 961, 964, 967, 970,
+ 972, 975, 977, 980, 982, 984, 987, 989, 991, 993, 995, 997, 999, 1001,
+ 1002, 1004, 1006, 1007, 1009, 1010, 1011, 1013, 1014, 1015, 1016, 1017,
+ 1018, 1019, 1020, 1020, 1021, 1022, 1022, 1023, 1023, 1023, 1023, 1023
+}, {
+ 1024, 1023, 1023, 1023, 1023, 1023, 1022, 1022, 1021, 1020, 1020, 1019,
+ 1018, 1017, 1016, 1015, 1014, 1013, 1011, 1010, 1009, 1007, 1006, 1004,
+ 1002, 1001, 999, 997, 995, 993, 991, 989, 987, 984, 982, 980, 977, 975,
+ 972, 970, 967, 964, 961, 959, 956, 953, 950, 947, 944, 940, 937, 934,
+ 931, 927, 924, 921, 917, 914, 910, 906, 903, 899, 895, 891, 888, 884,
+ 880, 876, 872, 868, 863, 859, 855, 851, 847, 842, 838, 834, 829, 825,
+ 820, 816, 811, 807, 802, 797, 793, 788, 783, 779, 774, 769, 764, 759,
+ 754, 749, 745, 740, 735, 729, 724, 719, 714, 709, 704, 699, 694, 688,
+ 683, 678, 673, 667, 662, 657, 651, 646, 641, 635, 630, 625, 619, 614,
+ 608, 603, 597, 592, 586, 581, 576, 570, 564, 559, 553, 548, 542, 537,
+ 531, 526, 520, 515, 509, 504, 498, 492, 487, 481, 476, 470, 465, 459,
+ 454, 448, 443, 437, 431, 426, 420, 415, 409, 404, 399, 393, 388, 382,
+ 377, 371, 366, 360, 355, 350, 344, 339, 334, 328, 323, 318, 313, 307,
+ 302, 297, 292, 287, 282, 276, 271, 266, 261, 256, 251, 246, 241, 236,
+ 232, 227, 222, 217, 212, 207, 203, 198, 193, 189, 184, 180, 175, 171,
+ 166, 162, 157, 153, 149, 144, 140, 136, 132, 128, 124, 120, 116, 112,
+ 108, 104, 100, 96, 93, 89, 85, 82, 78, 74, 71, 68, 64, 61, 58, 54, 51,
+ 48, 45, 42, 39, 36, 33, 31, 28, 25, 23, 20, 17, 15, 13, 10, 8, 6, 4, 2
+}, {
+ 0, -1, -3, -5, -7, -9, -11, -13, -14, -16, -19, -20, -21, -23, -24, -26,
+ -28, -29, -30, -32, -34, -34, -37, -38, -38, -41, -42, -42, -44, -46,
+ -46, -48, -49, -49, -51, -52, -53, -54, -55, -56, -57, -57, -58, -59,
+ -60, -60, -62, -62, -63, -63, -64, -65, -66, -66, -67, -68, -67, -69,
+ -69, -69, -70, -70, -71, -71, -72, -72, -72, -73, -73, -73, -73, -73,
+ -73, -74, -75, -74, -75, -75, -74, -75, -75, -75, -75, -75, -75, -75,
+ -75, -75, -75, -75, -75, -75, -75, -74, -75, -74, -75, -75, -74, -74,
+ -73, -73, -73, -73, -73, -73, -73, -71, -72, -71, -72, -70, -70, -70,
+ -69, -70, -69, -68, -68, -68, -67, -67, -66, -66, -65, -65, -64, -64,
+ -64, -63, -62, -62, -61, -61, -60, -60, -59, -59, -58, -58, -57, -57,
+ -55, -55, -55, -53, -54, -53, -52, -51, -52, -50, -50, -49, -48, -47,
+ -46, -46, -46, -46, -45, -43, -43, -42, -42, -41, -41, -40, -39, -39,
+ -38, -37, -37, -36, -35, -35, -34, -33, -32, -32, -31, -31, -31, -29,
+ -28, -27, -27, -27, -26, -25, -25, -24, -24, -23, -22, -22, -21, -20,
+ -20, -20, -18, -19, -17, -17, -16, -16, -15, -14, -14, -14, -14, -12,
+ -12, -12, -11, -11, -11, -10, -9, -9, -8, -8, -7, -6, -7, -7, -6, -6,
+ -5, -4, -5, -5, -3, -3, -4, -2, -3, -2, -1, -2, -1, -1, 0, -1, -1, 0,
+ -1, 0, 1, 0, 0, 0, 0, 0, 0, 0
+} };
+
+const struct imgu_css_xnr3_vmem_defaults imgu_css_xnr3_vmem_defaults = {
+ .x = {
+ 1024, 1164, 1320, 1492, 1680, 1884, 2108, 2352,
+ 2616, 2900, 3208, 3540, 3896, 4276, 4684, 5120
+ },
+ .a = {
+ -7213, -5580, -4371, -3421, -2722, -2159, -6950, -5585,
+ -4529, -3697, -3010, -2485, -2070, -1727, -1428, 0
+ },
+ .b = {
+ 4096, 3603, 3178, 2811, 2497, 2226, 1990, 1783,
+ 1603, 1446, 1307, 1185, 1077, 981, 895, 819
+ },
+ .c = {
+ 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+ },
+};
+
+/* settings for Bayer Noise Reduction */
+const struct ipu3_uapi_bnr_static_config imgu_css_bnr_defaults = {
+ { 16, 16, 16, 16 }, /* wb_gains */
+ { 16, 16, 16, 16 }, /* wb_gains_thr */
+ { 0, X, 8, 6, X, 14 }, /* thr_coeffs */
+ { 0, 0, 0, 0 }, /* thr_ctrl_shd */
+ { -128, X, -128, X }, /* opt_center */
+ { /* lut */
+ { 17, 23, 28, 32, 36, 39, 42, 45,
+ 48, 51, 53, 55, 58, 60, 62, 64,
+ 66, 68, 70, 72, 73, 75, 77, 78,
+ 80, 82, 83, 85, 86, 88, 89, 90 }
+ },
+ { 4, X, 1, 8, X, 8, X, 8, X }, /* bp_ctrl */
+ { 8, 4, 4, X, 8, X, 1, 1, 1, 1 }, /* dn_detect_ctrl */
+};
+
+const struct ipu3_uapi_dm_config imgu_css_dm_defaults = {
+ 1, 1, 1, X, X, 8, X, 7, X, 8, X, 8, X, 4, X
+};
+
+const struct ipu3_uapi_ccm_mat_config imgu_css_ccm_defaults = {
+ 9775, -2671, 1087, 0,
+ -1071, 8303, 815, 0,
+ -23, -7887, 16103, 0
+};
+
+/* settings for Gamma correction */
+const struct ipu3_uapi_gamma_corr_lut imgu_css_gamma_lut = { {
+ 63, 79, 95, 111, 127, 143, 159, 175, 191, 207, 223, 239, 255, 271, 287,
+ 303, 319, 335, 351, 367, 383, 399, 415, 431, 447, 463, 479, 495, 511,
+ 527, 543, 559, 575, 591, 607, 623, 639, 655, 671, 687, 703, 719, 735,
+ 751, 767, 783, 799, 815, 831, 847, 863, 879, 895, 911, 927, 943, 959,
+ 975, 991, 1007, 1023, 1039, 1055, 1071, 1087, 1103, 1119, 1135, 1151,
+ 1167, 1183, 1199, 1215, 1231, 1247, 1263, 1279, 1295, 1311, 1327, 1343,
+ 1359, 1375, 1391, 1407, 1423, 1439, 1455, 1471, 1487, 1503, 1519, 1535,
+ 1551, 1567, 1583, 1599, 1615, 1631, 1647, 1663, 1679, 1695, 1711, 1727,
+ 1743, 1759, 1775, 1791, 1807, 1823, 1839, 1855, 1871, 1887, 1903, 1919,
+ 1935, 1951, 1967, 1983, 1999, 2015, 2031, 2047, 2063, 2079, 2095, 2111,
+ 2143, 2175, 2207, 2239, 2271, 2303, 2335, 2367, 2399, 2431, 2463, 2495,
+ 2527, 2559, 2591, 2623, 2655, 2687, 2719, 2751, 2783, 2815, 2847, 2879,
+ 2911, 2943, 2975, 3007, 3039, 3071, 3103, 3135, 3167, 3199, 3231, 3263,
+ 3295, 3327, 3359, 3391, 3423, 3455, 3487, 3519, 3551, 3583, 3615, 3647,
+ 3679, 3711, 3743, 3775, 3807, 3839, 3871, 3903, 3935, 3967, 3999, 4031,
+ 4063, 4095, 4127, 4159, 4223, 4287, 4351, 4415, 4479, 4543, 4607, 4671,
+ 4735, 4799, 4863, 4927, 4991, 5055, 5119, 5183, 5247, 5311, 5375, 5439,
+ 5503, 5567, 5631, 5695, 5759, 5823, 5887, 5951, 6015, 6079, 6143, 6207,
+ 6271, 6335, 6399, 6463, 6527, 6591, 6655, 6719, 6783, 6847, 6911, 6975,
+ 7039, 7103, 7167, 7231, 7295, 7359, 7423, 7487, 7551, 7615, 7679, 7743,
+ 7807, 7871, 7935, 7999, 8063, 8127, 8191
+} };
+
+const struct ipu3_uapi_csc_mat_config imgu_css_csc_defaults = {
+ 4898, 9617, 1867, 0,
+ -2410, -4732, 7143, 0,
+ 10076, -8437, -1638, 0
+};
+
+const struct ipu3_uapi_cds_params imgu_css_cds_defaults = {
+ 1, 3, 3, 1,
+ 1, 3, 3, 1,
+ 4, X, /* ds_nf */
+ 1, /* csc_en */
+ 0, X /* uv_bin_output */
+};
+
+const struct ipu3_uapi_shd_config_static imgu_css_shd_defaults = {
+ .grid = {
+ .width = 73,
+ .height = 55,
+ .block_width_log2 = 7,
+ .block_height_log2 = 7,
+ .x_start = 0,
+ .y_start = 0,
+ },
+ .general = {
+ .shd_enable = 1,
+ .gain_factor = 0,
+ },
+ .black_level = {
+ .bl_r = 0,
+ .bl_gr = 0 | (0 << IPU3_UAPI_SHD_BLGR_NF_SHIFT),
+ .bl_gb = 0,
+ .bl_b = 0,
+ },
+};
+
+const struct ipu3_uapi_yuvp1_iefd_config imgu_css_iefd_defaults = {
+ .units = {
+ .cu_1 = { 0, 150, 7, 0 },
+ .cu_ed = { 7, 110, 244, X, 307, 409, 511, X,
+ 184, 255, 255, X, 0, 0, X,
+ 7, 81, 255, X, 255, 255, X },
+ .cu_3 = { 148, 251, 10, 0 },
+ .cu_5 = { 25, 70, 501, X, 32, X },
+ .cu_6 = { 32, 63, 183, X, 397,
+ 33, 0, X, 0,
+ 0, 64, X, 64, X },
+ .cu_7 = { 200, 303,
+ 10, 0 },
+ .cu_unsharp = { 10, 64, 110, X, 511,
+ 66, 12, X, 0,
+ 0, 56, X, 64, X },
+ .cu_radial = { 6, 203, 255, 255, 255, 255, X,
+ 84, 444, 397, 288, 300, X,
+ 4, 69, 207, X, 369, 448, X },
+ .cu_vssnlm = { 61, 100, 25, 0}
+ },
+ .config = { 45, X, 0, X, 16, X, 45, X },
+ .control = { 1, 1, 1, 1, 1, X },
+ .sharp = { { 50, X, 511, X, 50, X, 50, X },
+ { 64, X, 0, X, 0, X},
+ { 56, X, 56, X } },
+ .unsharp = { { 36, 17, 8, X },
+ { 13, 7, 3, X } },
+ .rad = { { -2104, X, -1559, X },
+ { 4426816, X },
+ { 2430481, X },
+ { 6, X, 79, X },
+ { 64, 0, 0, X },
+ { 1, X, 2, X, 0, X, 0, X },
+ { 40, X, 62, X } },
+ .vsslnm = { { 16, 32, 64, X },
+ { 1, X, 2, X, 8, X } },
+};
+
+const struct ipu3_uapi_yuvp1_yds_config imgu_css_yds_defaults = {
+ 0, 1, 1, 0, 0, 1, 1, 0, 2, X, 0, X
+};
+
+const struct ipu3_uapi_yuvp1_chnr_config imgu_css_chnr_defaults = {
+ .coring = { 0, X, 0, X },
+ .sense_gain = { 6, 6, 6, X, 4, 4, 4, X },
+ .iir_fir = { 8, X, 12, X, 0, 256 - 127, X },
+};
+
+const struct ipu3_uapi_yuvp1_y_ee_nr_config imgu_css_y_ee_nr_defaults = {
+ .lpf = { 4, X, 8, X, 16, X, 0 },
+ .sense = { 8191, X, 0, X, 8191, X, 0, X },
+ .gain = { 8, X, 0, X, 8, X, 0, X },
+ .clip = { 8, X, 0, X, 8, X, 0, X },
+ .frng = { 2, X, 200, X, 2, X, 1, 1, X },
+ .diag = { 1, X, 4, 1, 1, 4, X },
+ .fc_coring = { 0, X, 0, X, 0, X, 0, X }
+};
+
+const struct ipu3_uapi_yuvp2_tcc_gain_pcwl_lut_static_config
+ imgu_css_tcc_gain_pcwl_lut = { {
+ 1024, 1032, 1040, 1048, 1057, 1065, 1073, 1081, 1089, 1097, 1105, 1113,
+ 1122, 1130, 1138, 1146, 1154, 1162, 1170, 1178, 1187, 1195, 1203, 1211,
+ 1219, 1227, 1235, 1243, 1252, 1260, 1268, 1276, 1284, 1292, 1300, 1308,
+ 1317, 1325, 1333, 1341, 1349, 1357, 1365, 1373, 1382, 1390, 1398, 1406,
+ 1414, 1422, 1430, 1438, 1447, 1455, 1463, 1471, 1479, 1487, 1495, 1503,
+ 1512, 1520, 1528, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536,
+ 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536,
+ 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536,
+ 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536,
+ 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536,
+ 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536,
+ 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536,
+ 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536,
+ 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536,
+ 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536,
+ 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536,
+ 1536, 1536, 1528, 1520, 1512, 1503, 1495, 1487, 1479, 1471, 1463, 1455,
+ 1447, 1438, 1430, 1422, 1414, 1406, 1398, 1390, 1382, 1373, 1365, 1357,
+ 1349, 1341, 1333, 1325, 1317, 1308, 1300, 1292, 1284, 1276, 1268, 1260,
+ 1252, 1243, 1235, 1227, 1219, 1211, 1203, 1195, 1187, 1178, 1170, 1162,
+ 1154, 1146, 1138, 1130, 1122, 1113, 1105, 1097, 1089, 1081, 1073, 1065,
+ 1057, 1048, 1040, 1032, 1024
+} };
+
+const struct ipu3_uapi_yuvp2_tcc_r_sqr_lut_static_config
+ imgu_css_tcc_r_sqr_lut = { {
+ 32, 44, 64, 92, 128, 180, 256, 364, 512, 628, 724, 808, 888,
+ 956, 1024, 1088, 1144, 1200, 1256, 1304, 1356, 1404, 1448
+} };
+
+const struct imgu_abi_anr_config imgu_css_anr_defaults = {
+ .transform = {
+ .adaptive_treshhold_en = 1,
+ .alpha = { { 13, 13, 13, 13, 0, 0, 0, 0},
+ { 11, 11, 11, 11, 0, 0, 0, 0},
+ { 14, 14, 14, 14, 0, 0, 0, 0} },
+ .beta = { { 24, 24, 24, 24},
+ { 21, 20, 20, 21},
+ { 25, 25, 25, 25} },
+ .color = { { { 166, 173, 149, 166, 161, 146, 145, 173,
+ 145, 150, 141, 149, 145, 141, 142 },
+ { 166, 173, 149, 165, 161, 145, 145, 173,
+ 145, 150, 141, 149, 145, 141, 142 },
+ { 166, 174, 149, 166, 162, 146, 146, 173,
+ 145, 150, 141, 149, 145, 141, 142 },
+ { 166, 173, 149, 165, 161, 145, 145, 173,
+ 146, 150, 141, 149, 145, 141, 142 } },
+ { { 141, 131, 140, 141, 144, 143, 144, 131,
+ 143, 137, 140, 140, 144, 140, 141 },
+ { 141, 131, 140, 141, 143, 143, 144, 131,
+ 143, 137, 140, 140, 144, 140, 141 },
+ { 141, 131, 141, 141, 144, 144, 144, 131,
+ 143, 137, 140, 140, 144, 140, 141 },
+ { 140, 131, 140, 141, 143, 143, 144, 131,
+ 143, 137, 140, 140, 144, 140, 141 } },
+ { { 184, 173, 188, 184, 182, 182, 181, 173,
+ 182, 179, 182, 188, 181, 182, 180 },
+ { 184, 173, 188, 184, 183, 182, 181, 173,
+ 182, 178, 182, 188, 181, 182, 180 },
+ { 184, 173, 188, 184, 182, 182, 181, 173,
+ 182, 178, 182, 188, 181, 182, 181 },
+ { 184, 172, 188, 184, 182, 182, 181, 173,
+ 182, 178, 182, 188, 182, 182, 180 } } },
+ .sqrt_lut = { 724, 768, 810, 849, 887, 923, 958, 991, 1024,
+ 1056, 1086, 1116, 1145, 1173, 1201, 1228, 1254,
+ 1280, 1305, 1330, 1355, 1379, 1402, 1425, 1448 },
+ .xreset = -1632,
+ .yreset = -1224,
+ .x_sqr_reset = 2663424,
+ .r_normfactor = 14,
+ .y_sqr_reset = 1498176,
+ .gain_scale = 115
+ },
+ .stitch = {
+ .anr_stitch_en = 1,
+ .pyramid = { { 1, 3, 5 }, { 7, 7, 5 }, { 3, 1, 3 },
+ { 9, 15, 21 }, { 21, 15, 9 }, { 3, 5, 15 },
+ { 25, 35, 35 }, { 25, 15, 5 }, { 7, 21, 35 },
+ { 49, 49, 35 }, { 21, 7, 7 }, { 21, 35, 49 },
+ { 49, 35, 21 }, { 7, 5, 15 }, { 25, 35, 35 },
+ { 25, 15, 5 }, { 3, 9, 15 }, { 21, 21, 15 },
+ { 9, 3, 1 }, { 3, 5, 7 }, { 7, 5, 3}, { 1 }
+ }
+ }
+};
+
+/* frame settings for Auto White Balance */
+const struct ipu3_uapi_awb_fr_config_s imgu_css_awb_fr_defaults = {
+ .grid_cfg = {
+ .width = 16,
+ .height = 16,
+ .block_width_log2 = 3,
+ .block_height_log2 = 3,
+ .x_start = 10,
+ .y_start = 2 | IPU3_UAPI_GRID_Y_START_EN,
+ },
+ .bayer_coeff = { 0, 0, 0, 0, 0, 128 },
+ .bayer_sign = 0,
+ .bayer_nf = 7
+};
+
+/* settings for Auto Exposure */
+const struct ipu3_uapi_ae_grid_config imgu_css_ae_grid_defaults = {
+ .width = 16,
+ .height = 16,
+ .block_width_log2 = 3,
+ .block_height_log2 = 3,
+ .ae_en = 1,
+ .x_start = 0,
+ .y_start = 0,
+};
+
+/* settings for Auto Exposure color correction matrix */
+const struct ipu3_uapi_ae_ccm imgu_css_ae_ccm_defaults = {
+ 256, 256, 256, 256, /* gain_gr/r/b/gb */
+ .mat = { 128, 0, 0, 0, 0, 128, 0, 0, 0, 0, 128, 0, 0, 0, 0, 128 },
+};
+
+/* settings for Auto Focus */
+const struct ipu3_uapi_af_config_s imgu_css_af_defaults = {
+ .filter_config = {
+ { 0, 0, 0, 0 }, { 0, 0, 0, 0 }, { 0, 0, 0, 128 }, 0,
+ { 0, 0, 0, 0 }, { 0, 0, 0, 0 }, { 0, 0, 0, 128 }, 0,
+ .y_calc = { 8, 8, 8, 8 },
+ .nf = { X, 7, X, 7 },
+ },
+ .grid_cfg = {
+ .width = 16,
+ .height = 16,
+ .block_width_log2 = 3,
+ .block_height_log2 = 3,
+ .x_start = 10,
+ .y_start = 2 | IPU3_UAPI_GRID_Y_START_EN,
+ },
+};
+
+/* settings for Auto White Balance */
+const struct ipu3_uapi_awb_config_s imgu_css_awb_defaults = {
+ 8191, 8191, 8191, 8191 | /* rgbs_thr_gr/r/gb/b */
+ IPU3_UAPI_AWB_RGBS_THR_B_EN | IPU3_UAPI_AWB_RGBS_THR_B_INCL_SAT,
+ .grid = {
+ .width = 16,
+ .height = 16,
+ .block_width_log2 = 3,
+ .block_height_log2 = 3,
+ .x_start = 0,
+ .y_start = 0,
+ },
+};
diff --git a/drivers/staging/media/ipu3/ipu3-tables.h b/drivers/staging/media/ipu3/ipu3-tables.h
new file mode 100644
index 000000000000..9f719c48b432
--- /dev/null
+++ b/drivers/staging/media/ipu3/ipu3-tables.h
@@ -0,0 +1,68 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2018 Intel Corporation */
+
+#ifndef __IPU3_TABLES_H
+#define __IPU3_TABLES_H
+
+#include <linux/bitops.h>
+
+#include "ipu3-abi.h"
+
+#define IMGU_BDS_GRANULARITY 32 /* Downscaling granularity */
+#define IMGU_BDS_MIN_SF_INV IMGU_BDS_GRANULARITY
+#define IMGU_BDS_CONFIG_LEN 97
+
+#define IMGU_SCALER_DOWNSCALE_4TAPS_LEN 128
+#define IMGU_SCALER_DOWNSCALE_2TAPS_LEN 64
+#define IMGU_SCALER_FP BIT(31) /* 1.0 in fixed point */
+
+#define IMGU_XNR3_VMEM_LUT_LEN 16
+
+#define IMGU_GDC_LUT_UNIT 4
+#define IMGU_GDC_LUT_LEN 256
+
+struct imgu_css_bds_config {
+ struct imgu_abi_bds_phase_arr hor_phase_arr;
+ struct imgu_abi_bds_phase_arr ver_phase_arr;
+ struct imgu_abi_bds_ptrn_arr ptrn_arr;
+ u16 sample_patrn_length;
+ u8 hor_ds_en;
+ u8 ver_ds_en;
+};
+
+struct imgu_css_xnr3_vmem_defaults {
+ s16 x[IMGU_XNR3_VMEM_LUT_LEN];
+ s16 a[IMGU_XNR3_VMEM_LUT_LEN];
+ s16 b[IMGU_XNR3_VMEM_LUT_LEN];
+ s16 c[IMGU_XNR3_VMEM_LUT_LEN];
+};
+
+extern const struct imgu_css_bds_config
+ imgu_css_bds_configs[IMGU_BDS_CONFIG_LEN];
+extern const s32 imgu_css_downscale_4taps[IMGU_SCALER_DOWNSCALE_4TAPS_LEN];
+extern const s32 imgu_css_downscale_2taps[IMGU_SCALER_DOWNSCALE_2TAPS_LEN];
+extern const s16 imgu_css_gdc_lut[IMGU_GDC_LUT_UNIT][IMGU_GDC_LUT_LEN];
+extern const struct imgu_css_xnr3_vmem_defaults imgu_css_xnr3_vmem_defaults;
+extern const struct ipu3_uapi_bnr_static_config imgu_css_bnr_defaults;
+extern const struct ipu3_uapi_dm_config imgu_css_dm_defaults;
+extern const struct ipu3_uapi_ccm_mat_config imgu_css_ccm_defaults;
+extern const struct ipu3_uapi_gamma_corr_lut imgu_css_gamma_lut;
+extern const struct ipu3_uapi_csc_mat_config imgu_css_csc_defaults;
+extern const struct ipu3_uapi_cds_params imgu_css_cds_defaults;
+extern const struct ipu3_uapi_shd_config_static imgu_css_shd_defaults;
+extern const struct ipu3_uapi_yuvp1_iefd_config imgu_css_iefd_defaults;
+extern const struct ipu3_uapi_yuvp1_yds_config imgu_css_yds_defaults;
+extern const struct ipu3_uapi_yuvp1_chnr_config imgu_css_chnr_defaults;
+extern const struct ipu3_uapi_yuvp1_y_ee_nr_config imgu_css_y_ee_nr_defaults;
+extern const struct ipu3_uapi_yuvp2_tcc_gain_pcwl_lut_static_config
+ imgu_css_tcc_gain_pcwl_lut;
+extern const struct ipu3_uapi_yuvp2_tcc_r_sqr_lut_static_config
+ imgu_css_tcc_r_sqr_lut;
+extern const struct imgu_abi_anr_config imgu_css_anr_defaults;
+extern const struct ipu3_uapi_awb_fr_config_s imgu_css_awb_fr_defaults;
+extern const struct ipu3_uapi_ae_grid_config imgu_css_ae_grid_defaults;
+extern const struct ipu3_uapi_ae_ccm imgu_css_ae_ccm_defaults;
+extern const struct ipu3_uapi_af_config_s imgu_css_af_defaults;
+extern const struct ipu3_uapi_awb_config_s imgu_css_awb_defaults;
+
+#endif
diff --git a/drivers/staging/media/ipu3/ipu3-v4l2.c b/drivers/staging/media/ipu3/ipu3-v4l2.c
new file mode 100644
index 000000000000..2f6041d342f4
--- /dev/null
+++ b/drivers/staging/media/ipu3/ipu3-v4l2.c
@@ -0,0 +1,1419 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2018 Intel Corporation
+
+#include <linux/module.h>
+#include <linux/pm_runtime.h>
+#include <linux/string_choices.h>
+
+#include <media/v4l2-event.h>
+#include <media/v4l2-ioctl.h>
+
+#include "ipu3.h"
+#include "ipu3-dmamap.h"
+
+/******************** v4l2_subdev_ops ********************/
+
+#define IPU3_RUNNING_MODE_VIDEO 0
+#define IPU3_RUNNING_MODE_STILL 1
+
+static int imgu_subdev_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
+{
+ struct imgu_v4l2_subdev *imgu_sd = container_of(sd,
+ struct imgu_v4l2_subdev,
+ subdev);
+ struct imgu_device *imgu = v4l2_get_subdevdata(sd);
+ struct imgu_media_pipe *imgu_pipe = &imgu->imgu_pipe[imgu_sd->pipe];
+ struct v4l2_rect try_crop = {
+ .top = 0,
+ .left = 0,
+ };
+ unsigned int i;
+
+ try_crop.width =
+ imgu_pipe->nodes[IMGU_NODE_IN].vdev_fmt.fmt.pix_mp.width;
+ try_crop.height =
+ imgu_pipe->nodes[IMGU_NODE_IN].vdev_fmt.fmt.pix_mp.height;
+
+ /* Initialize try_fmt */
+ for (i = 0; i < IMGU_NODE_NUM; i++) {
+ struct v4l2_mbus_framefmt *try_fmt =
+ v4l2_subdev_state_get_format(fh->state, i);
+
+ try_fmt->width = try_crop.width;
+ try_fmt->height = try_crop.height;
+ try_fmt->code = imgu_pipe->nodes[i].pad_fmt.code;
+ try_fmt->field = V4L2_FIELD_NONE;
+ }
+
+ *v4l2_subdev_state_get_crop(fh->state, IMGU_NODE_IN) = try_crop;
+ *v4l2_subdev_state_get_compose(fh->state, IMGU_NODE_IN) = try_crop;
+
+ return 0;
+}
+
+static int imgu_subdev_s_stream(struct v4l2_subdev *sd, int enable)
+{
+ int i;
+ unsigned int node;
+ int r = 0;
+ struct imgu_device *imgu = v4l2_get_subdevdata(sd);
+ struct imgu_v4l2_subdev *imgu_sd = container_of(sd,
+ struct imgu_v4l2_subdev,
+ subdev);
+ unsigned int pipe = imgu_sd->pipe;
+ struct device *dev = &imgu->pci_dev->dev;
+ struct v4l2_pix_format_mplane *fmts[IPU3_CSS_QUEUES] = { NULL };
+ struct v4l2_rect *rects[IPU3_CSS_RECTS] = { NULL };
+ struct imgu_css_pipe *css_pipe = &imgu->css.pipes[pipe];
+ struct imgu_media_pipe *imgu_pipe = &imgu->imgu_pipe[pipe];
+
+ dev_dbg(dev, "%s %d for pipe %u", __func__, enable, pipe);
+ /* grab ctrl after streamon and return after off */
+ v4l2_ctrl_grab(imgu_sd->ctrl, enable);
+
+ if (!enable) {
+ imgu_sd->active = false;
+ return 0;
+ }
+
+ for (i = 0; i < IMGU_NODE_NUM; i++)
+ imgu_pipe->queue_enabled[i] = imgu_pipe->nodes[i].enabled;
+
+ /* This is handled specially */
+ imgu_pipe->queue_enabled[IPU3_CSS_QUEUE_PARAMS] = false;
+
+ /* Initialize CSS formats */
+ for (i = 0; i < IPU3_CSS_QUEUES; i++) {
+ node = imgu_map_node(imgu, i);
+ /* No need to reconfig meta nodes */
+ if (node == IMGU_NODE_STAT_3A || node == IMGU_NODE_PARAMS)
+ continue;
+ fmts[i] = imgu_pipe->queue_enabled[node] ?
+ &imgu_pipe->nodes[node].vdev_fmt.fmt.pix_mp : NULL;
+ }
+
+ /* Enable VF output only when VF queue requested by user */
+ css_pipe->vf_output_en = false;
+ if (imgu_pipe->nodes[IMGU_NODE_VF].enabled)
+ css_pipe->vf_output_en = true;
+
+ if (atomic_read(&imgu_sd->running_mode) == IPU3_RUNNING_MODE_VIDEO)
+ css_pipe->pipe_id = IPU3_CSS_PIPE_ID_VIDEO;
+ else
+ css_pipe->pipe_id = IPU3_CSS_PIPE_ID_CAPTURE;
+
+ dev_dbg(dev, "IPU3 pipe %u pipe_id %u", pipe, css_pipe->pipe_id);
+
+ rects[IPU3_CSS_RECT_EFFECTIVE] = &imgu_sd->rect.eff;
+ rects[IPU3_CSS_RECT_BDS] = &imgu_sd->rect.bds;
+ rects[IPU3_CSS_RECT_GDC] = &imgu_sd->rect.gdc;
+
+ r = imgu_css_fmt_set(&imgu->css, fmts, rects, pipe);
+ if (r) {
+ dev_err(dev, "failed to set initial formats pipe %u with (%d)",
+ pipe, r);
+ return r;
+ }
+
+ imgu_sd->active = true;
+
+ return 0;
+}
+
+static int imgu_subdev_get_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_format *fmt)
+{
+ struct imgu_device *imgu = v4l2_get_subdevdata(sd);
+ struct v4l2_mbus_framefmt *mf;
+ struct imgu_media_pipe *imgu_pipe;
+ u32 pad = fmt->pad;
+ struct imgu_v4l2_subdev *imgu_sd = container_of(sd,
+ struct imgu_v4l2_subdev,
+ subdev);
+ unsigned int pipe = imgu_sd->pipe;
+
+ imgu_pipe = &imgu->imgu_pipe[pipe];
+ if (fmt->which == V4L2_SUBDEV_FORMAT_ACTIVE) {
+ fmt->format = imgu_pipe->nodes[pad].pad_fmt;
+ } else {
+ mf = v4l2_subdev_state_get_format(sd_state, pad);
+ fmt->format = *mf;
+ }
+
+ return 0;
+}
+
+static int imgu_subdev_set_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_format *fmt)
+{
+ struct imgu_media_pipe *imgu_pipe;
+ struct imgu_device *imgu = v4l2_get_subdevdata(sd);
+ struct imgu_v4l2_subdev *imgu_sd = container_of(sd,
+ struct imgu_v4l2_subdev,
+ subdev);
+ struct v4l2_mbus_framefmt *mf;
+ u32 pad = fmt->pad;
+ unsigned int pipe = imgu_sd->pipe;
+
+ dev_dbg(&imgu->pci_dev->dev, "set subdev %u pad %u fmt to [%ux%u]",
+ pipe, pad, fmt->format.width, fmt->format.height);
+
+ imgu_pipe = &imgu->imgu_pipe[pipe];
+ if (fmt->which == V4L2_SUBDEV_FORMAT_TRY)
+ mf = v4l2_subdev_state_get_format(sd_state, pad);
+ else
+ mf = &imgu_pipe->nodes[pad].pad_fmt;
+
+ fmt->format.code = mf->code;
+ /* Clamp the w and h based on the hardware capabilities */
+ if (imgu_sd->subdev_pads[pad].flags & MEDIA_PAD_FL_SOURCE) {
+ fmt->format.width = clamp(fmt->format.width,
+ IPU3_OUTPUT_MIN_WIDTH,
+ IPU3_OUTPUT_MAX_WIDTH);
+ fmt->format.height = clamp(fmt->format.height,
+ IPU3_OUTPUT_MIN_HEIGHT,
+ IPU3_OUTPUT_MAX_HEIGHT);
+ } else {
+ fmt->format.width = clamp(fmt->format.width,
+ IPU3_INPUT_MIN_WIDTH,
+ IPU3_INPUT_MAX_WIDTH);
+ fmt->format.height = clamp(fmt->format.height,
+ IPU3_INPUT_MIN_HEIGHT,
+ IPU3_INPUT_MAX_HEIGHT);
+ }
+
+ *mf = fmt->format;
+
+ return 0;
+}
+
+static struct v4l2_rect *
+imgu_subdev_get_crop(struct imgu_v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state, unsigned int pad,
+ enum v4l2_subdev_format_whence which)
+{
+ if (which == V4L2_SUBDEV_FORMAT_TRY)
+ return v4l2_subdev_state_get_crop(sd_state, pad);
+ else
+ return &sd->rect.eff;
+}
+
+static struct v4l2_rect *
+imgu_subdev_get_compose(struct imgu_v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state, unsigned int pad,
+ enum v4l2_subdev_format_whence which)
+{
+ if (which == V4L2_SUBDEV_FORMAT_TRY)
+ return v4l2_subdev_state_get_compose(sd_state, pad);
+ else
+ return &sd->rect.bds;
+}
+
+static int imgu_subdev_get_selection(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_selection *sel)
+{
+ struct imgu_v4l2_subdev *imgu_sd =
+ container_of(sd, struct imgu_v4l2_subdev, subdev);
+
+ if (sel->pad != IMGU_NODE_IN)
+ return -EINVAL;
+
+ switch (sel->target) {
+ case V4L2_SEL_TGT_CROP:
+ sel->r = *imgu_subdev_get_crop(imgu_sd, sd_state, sel->pad,
+ sel->which);
+ return 0;
+ case V4L2_SEL_TGT_COMPOSE:
+ sel->r = *imgu_subdev_get_compose(imgu_sd, sd_state, sel->pad,
+ sel->which);
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int imgu_subdev_set_selection(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_selection *sel)
+{
+ struct imgu_device *imgu = v4l2_get_subdevdata(sd);
+ struct imgu_v4l2_subdev *imgu_sd =
+ container_of(sd, struct imgu_v4l2_subdev, subdev);
+ struct v4l2_rect *rect;
+
+ dev_dbg(&imgu->pci_dev->dev,
+ "set subdev %u sel which %u target 0x%4x rect [%ux%u]",
+ imgu_sd->pipe, sel->which, sel->target,
+ sel->r.width, sel->r.height);
+
+ if (sel->pad != IMGU_NODE_IN)
+ return -EINVAL;
+
+ switch (sel->target) {
+ case V4L2_SEL_TGT_CROP:
+ rect = imgu_subdev_get_crop(imgu_sd, sd_state, sel->pad,
+ sel->which);
+ break;
+ case V4L2_SEL_TGT_COMPOSE:
+ rect = imgu_subdev_get_compose(imgu_sd, sd_state, sel->pad,
+ sel->which);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ *rect = sel->r;
+ return 0;
+}
+
+/******************** media_entity_operations ********************/
+
+static int imgu_link_setup(struct media_entity *entity,
+ const struct media_pad *local,
+ const struct media_pad *remote, u32 flags)
+{
+ struct imgu_media_pipe *imgu_pipe;
+ struct v4l2_subdev *sd = container_of(entity, struct v4l2_subdev,
+ entity);
+ struct imgu_device *imgu = v4l2_get_subdevdata(sd);
+ struct imgu_v4l2_subdev *imgu_sd = container_of(sd,
+ struct imgu_v4l2_subdev,
+ subdev);
+ unsigned int pipe = imgu_sd->pipe;
+ u32 pad = local->index;
+
+ WARN_ON(pad >= IMGU_NODE_NUM);
+
+ dev_dbg(&imgu->pci_dev->dev, "pipe %u pad %u is %s", pipe, pad,
+ str_enabled_disabled(flags & MEDIA_LNK_FL_ENABLED));
+
+ imgu_pipe = &imgu->imgu_pipe[pipe];
+ imgu_pipe->nodes[pad].enabled = flags & MEDIA_LNK_FL_ENABLED;
+
+ /* enable input node to enable the pipe */
+ if (pad != IMGU_NODE_IN)
+ return 0;
+
+ if (flags & MEDIA_LNK_FL_ENABLED)
+ __set_bit(pipe, imgu->css.enabled_pipes);
+ else
+ __clear_bit(pipe, imgu->css.enabled_pipes);
+
+ dev_dbg(&imgu->pci_dev->dev, "pipe %u is %s", pipe,
+ str_enabled_disabled(flags & MEDIA_LNK_FL_ENABLED));
+
+ return 0;
+}
+
+/******************** vb2_ops ********************/
+
+static int imgu_vb2_buf_init(struct vb2_buffer *vb)
+{
+ struct sg_table *sg = vb2_dma_sg_plane_desc(vb, 0);
+ struct imgu_device *imgu = vb2_get_drv_priv(vb->vb2_queue);
+ struct imgu_buffer *buf = container_of(vb,
+ struct imgu_buffer, vid_buf.vbb.vb2_buf);
+ struct imgu_video_device *node =
+ container_of(vb->vb2_queue, struct imgu_video_device, vbq);
+ unsigned int queue = imgu_node_to_queue(node->id);
+
+ if (queue == IPU3_CSS_QUEUE_PARAMS)
+ return 0;
+
+ return imgu_dmamap_map_sg(imgu, sg->sgl, sg->nents, &buf->map);
+}
+
+/* Called when each buffer is freed */
+static void imgu_vb2_buf_cleanup(struct vb2_buffer *vb)
+{
+ struct imgu_device *imgu = vb2_get_drv_priv(vb->vb2_queue);
+ struct imgu_buffer *buf = container_of(vb,
+ struct imgu_buffer, vid_buf.vbb.vb2_buf);
+ struct imgu_video_device *node =
+ container_of(vb->vb2_queue, struct imgu_video_device, vbq);
+ unsigned int queue = imgu_node_to_queue(node->id);
+
+ if (queue == IPU3_CSS_QUEUE_PARAMS)
+ return;
+
+ imgu_dmamap_unmap(imgu, &buf->map);
+}
+
+/* Transfer buffer ownership to me */
+static void imgu_vb2_buf_queue(struct vb2_buffer *vb)
+{
+ struct imgu_device *imgu = vb2_get_drv_priv(vb->vb2_queue);
+ struct imgu_video_device *node =
+ container_of(vb->vb2_queue, struct imgu_video_device, vbq);
+ unsigned int queue = imgu_node_to_queue(node->id);
+ struct imgu_buffer *buf = container_of(vb, struct imgu_buffer,
+ vid_buf.vbb.vb2_buf);
+ unsigned long need_bytes;
+ unsigned long payload = vb2_get_plane_payload(vb, 0);
+
+ if (vb->vb2_queue->type == V4L2_BUF_TYPE_META_CAPTURE ||
+ vb->vb2_queue->type == V4L2_BUF_TYPE_META_OUTPUT)
+ need_bytes = node->vdev_fmt.fmt.meta.buffersize;
+ else
+ need_bytes = node->vdev_fmt.fmt.pix_mp.plane_fmt[0].sizeimage;
+
+ if (queue == IPU3_CSS_QUEUE_PARAMS && payload && payload < need_bytes) {
+ dev_err(&imgu->pci_dev->dev, "invalid data size for params.");
+ vb2_buffer_done(vb, VB2_BUF_STATE_ERROR);
+ return;
+ }
+
+ mutex_lock(&imgu->lock);
+ if (queue != IPU3_CSS_QUEUE_PARAMS)
+ imgu_css_buf_init(&buf->css_buf, queue, buf->map.daddr);
+
+ list_add_tail(&buf->vid_buf.list, &node->buffers);
+ mutex_unlock(&imgu->lock);
+
+ vb2_set_plane_payload(vb, 0, need_bytes);
+
+ mutex_lock(&imgu->streaming_lock);
+ if (imgu->streaming)
+ imgu_queue_buffers(imgu, false, node->pipe);
+ mutex_unlock(&imgu->streaming_lock);
+
+ dev_dbg(&imgu->pci_dev->dev, "%s for pipe %u node %u", __func__,
+ node->pipe, node->id);
+}
+
+static int imgu_vb2_queue_setup(struct vb2_queue *vq,
+ unsigned int *num_buffers,
+ unsigned int *num_planes,
+ unsigned int sizes[],
+ struct device *alloc_devs[])
+{
+ struct imgu_device *imgu = vb2_get_drv_priv(vq);
+ struct imgu_video_device *node =
+ container_of(vq, struct imgu_video_device, vbq);
+ const struct v4l2_format *fmt = &node->vdev_fmt;
+ unsigned int size;
+
+ *num_buffers = clamp_val(*num_buffers, 1, VB2_MAX_FRAME);
+ alloc_devs[0] = &imgu->pci_dev->dev;
+
+ if (vq->type == V4L2_BUF_TYPE_META_CAPTURE ||
+ vq->type == V4L2_BUF_TYPE_META_OUTPUT)
+ size = fmt->fmt.meta.buffersize;
+ else
+ size = fmt->fmt.pix_mp.plane_fmt[0].sizeimage;
+
+ if (*num_planes) {
+ if (sizes[0] < size)
+ return -EINVAL;
+ size = sizes[0];
+ }
+
+ *num_planes = 1;
+ sizes[0] = size;
+
+ /* Initialize buffer queue */
+ INIT_LIST_HEAD(&node->buffers);
+
+ return 0;
+}
+
+/* Check if all enabled video nodes are streaming, exception ignored */
+static bool imgu_all_nodes_streaming(struct imgu_device *imgu,
+ struct imgu_video_device *except)
+{
+ unsigned int i, pipe, p;
+ struct imgu_video_device *node;
+ struct device *dev = &imgu->pci_dev->dev;
+
+ pipe = except->pipe;
+ if (!test_bit(pipe, imgu->css.enabled_pipes)) {
+ dev_warn(&imgu->pci_dev->dev,
+ "pipe %u link is not ready yet", pipe);
+ return false;
+ }
+
+ for_each_set_bit(p, imgu->css.enabled_pipes, IMGU_MAX_PIPE_NUM) {
+ for (i = 0; i < IMGU_NODE_NUM; i++) {
+ node = &imgu->imgu_pipe[p].nodes[i];
+ dev_dbg(dev, "%s pipe %u queue %u name %s enabled = %u",
+ __func__, p, i, node->name, node->enabled);
+ if (node == except)
+ continue;
+ if (node->enabled && !vb2_start_streaming_called(&node->vbq))
+ return false;
+ }
+ }
+
+ return true;
+}
+
+static void imgu_return_all_buffers(struct imgu_device *imgu,
+ struct imgu_video_device *node,
+ enum vb2_buffer_state state)
+{
+ struct imgu_vb2_buffer *b, *b0;
+
+ /* Return all buffers */
+ mutex_lock(&imgu->lock);
+ list_for_each_entry_safe(b, b0, &node->buffers, list) {
+ list_del(&b->list);
+ vb2_buffer_done(&b->vbb.vb2_buf, state);
+ }
+ mutex_unlock(&imgu->lock);
+}
+
+static int imgu_vb2_start_streaming(struct vb2_queue *vq, unsigned int count)
+{
+ struct imgu_media_pipe *imgu_pipe;
+ struct imgu_device *imgu = vb2_get_drv_priv(vq);
+ struct device *dev = &imgu->pci_dev->dev;
+ struct imgu_video_device *node =
+ container_of(vq, struct imgu_video_device, vbq);
+ int r;
+ unsigned int pipe;
+
+ dev_dbg(dev, "%s node name %s pipe %u id %u", __func__,
+ node->name, node->pipe, node->id);
+
+ mutex_lock(&imgu->streaming_lock);
+ if (imgu->streaming) {
+ r = -EBUSY;
+ mutex_unlock(&imgu->streaming_lock);
+ goto fail_return_bufs;
+ }
+ mutex_unlock(&imgu->streaming_lock);
+
+ if (!node->enabled) {
+ dev_err(dev, "IMGU node is not enabled");
+ r = -EINVAL;
+ goto fail_return_bufs;
+ }
+
+ pipe = node->pipe;
+ imgu_pipe = &imgu->imgu_pipe[pipe];
+ atomic_set(&node->sequence, 0);
+ r = video_device_pipeline_start(&node->vdev, &imgu_pipe->pipeline);
+ if (r < 0)
+ goto fail_return_bufs;
+
+ if (!imgu_all_nodes_streaming(imgu, node))
+ return 0;
+
+ for_each_set_bit(pipe, imgu->css.enabled_pipes, IMGU_MAX_PIPE_NUM) {
+ r = v4l2_subdev_call(&imgu->imgu_pipe[pipe].imgu_sd.subdev,
+ video, s_stream, 1);
+ if (r < 0)
+ goto fail_stop_pipeline;
+ }
+
+ /* Start streaming of the whole pipeline now */
+ dev_dbg(dev, "IMGU streaming is ready to start");
+ mutex_lock(&imgu->streaming_lock);
+ r = imgu_s_stream(imgu, true);
+ if (!r)
+ imgu->streaming = true;
+ mutex_unlock(&imgu->streaming_lock);
+
+ return 0;
+
+fail_stop_pipeline:
+ video_device_pipeline_stop(&node->vdev);
+fail_return_bufs:
+ imgu_return_all_buffers(imgu, node, VB2_BUF_STATE_QUEUED);
+
+ return r;
+}
+
+static void imgu_vb2_stop_streaming(struct vb2_queue *vq)
+{
+ struct imgu_media_pipe *imgu_pipe;
+ struct imgu_device *imgu = vb2_get_drv_priv(vq);
+ struct device *dev = &imgu->pci_dev->dev;
+ struct imgu_video_device *node =
+ container_of(vq, struct imgu_video_device, vbq);
+ int r;
+ unsigned int pipe;
+ bool stop_streaming = false;
+
+ /* Verify that the node had been setup with imgu_v4l2_node_setup() */
+ WARN_ON(!node->enabled);
+
+ pipe = node->pipe;
+ dev_dbg(dev, "Try to stream off node [%u][%u]", pipe, node->id);
+
+ /*
+ * When the first node of a streaming setup is stopped, the entire
+ * pipeline needs to stop before individual nodes are disabled.
+ * Perform the inverse of the initial setup.
+ *
+ * Part 1 - s_stream on the entire pipeline
+ */
+ mutex_lock(&imgu->streaming_lock);
+ if (imgu->streaming) {
+ /* Yes, really stop streaming now */
+ dev_dbg(dev, "IMGU streaming is ready to stop");
+ r = imgu_s_stream(imgu, false);
+ if (!r)
+ imgu->streaming = false;
+ stop_streaming = true;
+ }
+ mutex_unlock(&imgu->streaming_lock);
+
+ /* Part 2 - s_stream on subdevs
+ *
+ * If we call s_stream multiple times, Linux v6.7's call_s_stream()
+ * WARNs and aborts. Thus, disable all pipes at once, and only once.
+ */
+ if (stop_streaming) {
+ for_each_set_bit(pipe, imgu->css.enabled_pipes,
+ IMGU_MAX_PIPE_NUM) {
+ imgu_pipe = &imgu->imgu_pipe[pipe];
+
+ r = v4l2_subdev_call(&imgu_pipe->imgu_sd.subdev,
+ video, s_stream, 0);
+ if (r)
+ dev_err(&imgu->pci_dev->dev,
+ "failed to stop subdev streaming\n");
+ }
+ }
+
+ /* Part 3 - individual node teardown */
+ video_device_pipeline_stop(&node->vdev);
+ imgu_return_all_buffers(imgu, node, VB2_BUF_STATE_ERROR);
+}
+
+/******************** v4l2_ioctl_ops ********************/
+
+#define VID_CAPTURE 0
+#define VID_OUTPUT 1
+#define DEF_VID_CAPTURE 0
+#define DEF_VID_OUTPUT 1
+
+struct imgu_fmt {
+ u32 fourcc;
+ u16 type; /* VID_CAPTURE or VID_OUTPUT not both */
+};
+
+/* format descriptions for capture and preview */
+static const struct imgu_fmt formats[] = {
+ { V4L2_PIX_FMT_NV12, VID_CAPTURE },
+ { V4L2_PIX_FMT_IPU3_SGRBG10, VID_OUTPUT },
+ { V4L2_PIX_FMT_IPU3_SBGGR10, VID_OUTPUT },
+ { V4L2_PIX_FMT_IPU3_SGBRG10, VID_OUTPUT },
+ { V4L2_PIX_FMT_IPU3_SRGGB10, VID_OUTPUT },
+};
+
+/* Find the first matched format, return default if not found */
+static const struct imgu_fmt *find_format(struct v4l2_format *f, u32 type)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(formats); i++) {
+ if (formats[i].fourcc == f->fmt.pix_mp.pixelformat &&
+ formats[i].type == type)
+ return &formats[i];
+ }
+
+ return type == VID_CAPTURE ? &formats[DEF_VID_CAPTURE] :
+ &formats[DEF_VID_OUTPUT];
+}
+
+static int imgu_vidioc_querycap(struct file *file, void *fh,
+ struct v4l2_capability *cap)
+{
+ struct imgu_device *imgu = video_drvdata(file);
+
+ strscpy(cap->driver, IMGU_NAME, sizeof(cap->driver));
+ strscpy(cap->card, IMGU_NAME, sizeof(cap->card));
+ snprintf(cap->bus_info, sizeof(cap->bus_info), "PCI:%s",
+ pci_name(imgu->pci_dev));
+
+ return 0;
+}
+
+static int enum_fmts(struct v4l2_fmtdesc *f, u32 type)
+{
+ unsigned int i, j;
+
+ if (f->mbus_code != 0 && f->mbus_code != MEDIA_BUS_FMT_FIXED)
+ return -EINVAL;
+
+ for (i = j = 0; i < ARRAY_SIZE(formats); ++i) {
+ if (formats[i].type == type) {
+ if (j == f->index)
+ break;
+ ++j;
+ }
+ }
+
+ if (i < ARRAY_SIZE(formats)) {
+ f->pixelformat = formats[i].fourcc;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int vidioc_enum_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ if (f->type != V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
+ return -EINVAL;
+
+ return enum_fmts(f, VID_CAPTURE);
+}
+
+static int vidioc_enum_fmt_vid_out(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ if (f->type != V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+ return -EINVAL;
+
+ return enum_fmts(f, VID_OUTPUT);
+}
+
+/* Propagate forward always the format from the CIO2 subdev */
+static int imgu_vidioc_g_fmt(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ struct imgu_video_device *node = file_to_intel_imgu_node(file);
+
+ f->fmt = node->vdev_fmt.fmt;
+
+ return 0;
+}
+
+/*
+ * Set input/output format. Unless it is just a try, this also resets
+ * selections (ie. effective and BDS resolutions) to defaults.
+ */
+static int imgu_fmt(struct imgu_device *imgu, unsigned int pipe, int node,
+ struct v4l2_format *f, bool try)
+{
+ struct device *dev = &imgu->pci_dev->dev;
+ struct v4l2_pix_format_mplane *fmts[IPU3_CSS_QUEUES] = { NULL };
+ struct v4l2_rect *rects[IPU3_CSS_RECTS] = { NULL };
+ struct v4l2_mbus_framefmt pad_fmt;
+ unsigned int i, css_q;
+ int ret;
+ struct imgu_css_pipe *css_pipe = &imgu->css.pipes[pipe];
+ struct imgu_media_pipe *imgu_pipe = &imgu->imgu_pipe[pipe];
+ struct imgu_v4l2_subdev *imgu_sd = &imgu_pipe->imgu_sd;
+
+ dev_dbg(dev, "set fmt node [%u][%u](try = %u)", pipe, node, try);
+
+ for (i = 0; i < IMGU_NODE_NUM; i++)
+ dev_dbg(dev, "IMGU pipe %u node %u enabled = %u",
+ pipe, i, imgu_pipe->nodes[i].enabled);
+
+ if (imgu_pipe->nodes[IMGU_NODE_VF].enabled)
+ css_pipe->vf_output_en = true;
+
+ if (atomic_read(&imgu_sd->running_mode) == IPU3_RUNNING_MODE_VIDEO)
+ css_pipe->pipe_id = IPU3_CSS_PIPE_ID_VIDEO;
+ else
+ css_pipe->pipe_id = IPU3_CSS_PIPE_ID_CAPTURE;
+
+ dev_dbg(dev, "IPU3 pipe %u pipe_id = %u", pipe, css_pipe->pipe_id);
+
+ css_q = imgu_node_to_queue(node);
+ for (i = 0; i < IPU3_CSS_QUEUES; i++) {
+ unsigned int inode = imgu_map_node(imgu, i);
+
+ /* Skip the meta node */
+ if (inode == IMGU_NODE_STAT_3A || inode == IMGU_NODE_PARAMS)
+ continue;
+
+ /* CSS expects some format on OUT queue */
+ if (i != IPU3_CSS_QUEUE_OUT &&
+ !imgu_pipe->nodes[inode].enabled && !try) {
+ fmts[i] = NULL;
+ continue;
+ }
+
+ if (i == css_q) {
+ fmts[i] = &f->fmt.pix_mp;
+ continue;
+ }
+
+ if (try) {
+ fmts[i] = kmemdup(&imgu_pipe->nodes[inode].vdev_fmt.fmt.pix_mp,
+ sizeof(struct v4l2_pix_format_mplane),
+ GFP_KERNEL);
+ if (!fmts[i]) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ } else {
+ fmts[i] = &imgu_pipe->nodes[inode].vdev_fmt.fmt.pix_mp;
+ }
+
+ }
+
+ if (!try) {
+ /* eff and bds res got by imgu_s_sel */
+ struct imgu_v4l2_subdev *imgu_sd = &imgu_pipe->imgu_sd;
+
+ rects[IPU3_CSS_RECT_EFFECTIVE] = &imgu_sd->rect.eff;
+ rects[IPU3_CSS_RECT_BDS] = &imgu_sd->rect.bds;
+ rects[IPU3_CSS_RECT_GDC] = &imgu_sd->rect.gdc;
+
+ /* suppose that pad fmt was set by subdev s_fmt before */
+ pad_fmt = imgu_pipe->nodes[IMGU_NODE_IN].pad_fmt;
+ rects[IPU3_CSS_RECT_GDC]->width = pad_fmt.width;
+ rects[IPU3_CSS_RECT_GDC]->height = pad_fmt.height;
+ }
+
+ if (!fmts[css_q]) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (try)
+ ret = imgu_css_fmt_try(&imgu->css, fmts, rects, pipe);
+ else
+ ret = imgu_css_fmt_set(&imgu->css, fmts, rects, pipe);
+
+ /* ret is the binary number in the firmware blob */
+ if (ret < 0)
+ goto out;
+
+ /*
+ * imgu doesn't set the node to the value given by user
+ * before we return success from this function, so set it here.
+ */
+ if (!try)
+ imgu_pipe->nodes[node].vdev_fmt.fmt.pix_mp = f->fmt.pix_mp;
+
+out:
+ if (try) {
+ for (i = 0; i < IPU3_CSS_QUEUES; i++)
+ if (i != css_q)
+ kfree(fmts[i]);
+ }
+
+ return ret;
+}
+
+static int imgu_try_fmt(struct file *file, void *fh, struct v4l2_format *f)
+{
+ struct v4l2_pix_format_mplane *pixm = &f->fmt.pix_mp;
+ const struct imgu_fmt *fmt;
+
+ if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
+ fmt = find_format(f, VID_CAPTURE);
+ else if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+ fmt = find_format(f, VID_OUTPUT);
+ else
+ return -EINVAL;
+
+ pixm->pixelformat = fmt->fourcc;
+
+ return 0;
+}
+
+static int imgu_vidioc_try_fmt(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ struct imgu_device *imgu = video_drvdata(file);
+ struct device *dev = &imgu->pci_dev->dev;
+ struct imgu_video_device *node = file_to_intel_imgu_node(file);
+ struct v4l2_pix_format_mplane *pix_mp = &f->fmt.pix_mp;
+ int r;
+
+ dev_dbg(dev, "%s [%ux%u] for node %u\n", __func__,
+ pix_mp->width, pix_mp->height, node->id);
+
+ r = imgu_try_fmt(file, fh, f);
+ if (r)
+ return r;
+
+ return imgu_fmt(imgu, node->pipe, node->id, f, true);
+}
+
+static int imgu_vidioc_s_fmt(struct file *file, void *fh, struct v4l2_format *f)
+{
+ struct imgu_device *imgu = video_drvdata(file);
+ struct device *dev = &imgu->pci_dev->dev;
+ struct imgu_video_device *node = file_to_intel_imgu_node(file);
+ struct v4l2_pix_format_mplane *pix_mp = &f->fmt.pix_mp;
+ int r;
+
+ dev_dbg(dev, "%s [%ux%u] for node %u\n", __func__,
+ pix_mp->width, pix_mp->height, node->id);
+
+ r = imgu_try_fmt(file, fh, f);
+ if (r)
+ return r;
+
+ return imgu_fmt(imgu, node->pipe, node->id, f, false);
+}
+
+struct imgu_meta_fmt {
+ __u32 fourcc;
+ char *name;
+};
+
+/* From drivers/media/v4l2-core/v4l2-ioctl.c */
+static const struct imgu_meta_fmt meta_fmts[] = {
+ { V4L2_META_FMT_IPU3_PARAMS, "IPU3 processing parameters" },
+ { V4L2_META_FMT_IPU3_STAT_3A, "IPU3 3A statistics" },
+};
+
+static int imgu_meta_enum_format(struct file *file, void *fh,
+ struct v4l2_fmtdesc *fmt)
+{
+ struct imgu_video_device *node = file_to_intel_imgu_node(file);
+ unsigned int i = fmt->type == V4L2_BUF_TYPE_META_OUTPUT ? 0 : 1;
+
+ /* Each node is dedicated to only one meta format */
+ if (fmt->index > 0 || fmt->type != node->vbq.type)
+ return -EINVAL;
+
+ if (fmt->mbus_code != 0 && fmt->mbus_code != MEDIA_BUS_FMT_FIXED)
+ return -EINVAL;
+
+ strscpy(fmt->description, meta_fmts[i].name, sizeof(fmt->description));
+ fmt->pixelformat = meta_fmts[i].fourcc;
+
+ return 0;
+}
+
+static int imgu_vidioc_g_meta_fmt(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ struct imgu_video_device *node = file_to_intel_imgu_node(file);
+
+ if (f->type != node->vbq.type)
+ return -EINVAL;
+
+ f->fmt = node->vdev_fmt.fmt;
+
+ return 0;
+}
+
+/******************** function pointers ********************/
+
+static const struct v4l2_subdev_internal_ops imgu_subdev_internal_ops = {
+ .open = imgu_subdev_open,
+};
+
+static const struct v4l2_subdev_core_ops imgu_subdev_core_ops = {
+ .subscribe_event = v4l2_ctrl_subdev_subscribe_event,
+ .unsubscribe_event = v4l2_event_subdev_unsubscribe,
+};
+
+static const struct v4l2_subdev_video_ops imgu_subdev_video_ops = {
+ .s_stream = imgu_subdev_s_stream,
+};
+
+static const struct v4l2_subdev_pad_ops imgu_subdev_pad_ops = {
+ .link_validate = v4l2_subdev_link_validate_default,
+ .get_fmt = imgu_subdev_get_fmt,
+ .set_fmt = imgu_subdev_set_fmt,
+ .get_selection = imgu_subdev_get_selection,
+ .set_selection = imgu_subdev_set_selection,
+};
+
+static const struct v4l2_subdev_ops imgu_subdev_ops = {
+ .core = &imgu_subdev_core_ops,
+ .video = &imgu_subdev_video_ops,
+ .pad = &imgu_subdev_pad_ops,
+};
+
+static const struct media_entity_operations imgu_media_ops = {
+ .link_setup = imgu_link_setup,
+ .link_validate = v4l2_subdev_link_validate,
+};
+
+/****************** vb2_ops of the Q ********************/
+
+static const struct vb2_ops imgu_vb2_ops = {
+ .buf_init = imgu_vb2_buf_init,
+ .buf_cleanup = imgu_vb2_buf_cleanup,
+ .buf_queue = imgu_vb2_buf_queue,
+ .queue_setup = imgu_vb2_queue_setup,
+ .start_streaming = imgu_vb2_start_streaming,
+ .stop_streaming = imgu_vb2_stop_streaming,
+};
+
+/****************** v4l2_file_operations *****************/
+
+static const struct v4l2_file_operations imgu_v4l2_fops = {
+ .unlocked_ioctl = video_ioctl2,
+ .open = v4l2_fh_open,
+ .release = vb2_fop_release,
+ .poll = vb2_fop_poll,
+ .mmap = vb2_fop_mmap,
+};
+
+/******************** v4l2_ioctl_ops ********************/
+
+static const struct v4l2_ioctl_ops imgu_v4l2_ioctl_ops = {
+ .vidioc_querycap = imgu_vidioc_querycap,
+
+ .vidioc_enum_fmt_vid_cap = vidioc_enum_fmt_vid_cap,
+ .vidioc_g_fmt_vid_cap_mplane = imgu_vidioc_g_fmt,
+ .vidioc_s_fmt_vid_cap_mplane = imgu_vidioc_s_fmt,
+ .vidioc_try_fmt_vid_cap_mplane = imgu_vidioc_try_fmt,
+
+ .vidioc_enum_fmt_vid_out = vidioc_enum_fmt_vid_out,
+ .vidioc_g_fmt_vid_out_mplane = imgu_vidioc_g_fmt,
+ .vidioc_s_fmt_vid_out_mplane = imgu_vidioc_s_fmt,
+ .vidioc_try_fmt_vid_out_mplane = imgu_vidioc_try_fmt,
+
+ /* buffer queue management */
+ .vidioc_reqbufs = vb2_ioctl_reqbufs,
+ .vidioc_create_bufs = vb2_ioctl_create_bufs,
+ .vidioc_prepare_buf = vb2_ioctl_prepare_buf,
+ .vidioc_querybuf = vb2_ioctl_querybuf,
+ .vidioc_qbuf = vb2_ioctl_qbuf,
+ .vidioc_dqbuf = vb2_ioctl_dqbuf,
+ .vidioc_streamon = vb2_ioctl_streamon,
+ .vidioc_streamoff = vb2_ioctl_streamoff,
+ .vidioc_expbuf = vb2_ioctl_expbuf,
+};
+
+static const struct v4l2_ioctl_ops imgu_v4l2_meta_ioctl_ops = {
+ .vidioc_querycap = imgu_vidioc_querycap,
+
+ /* meta capture */
+ .vidioc_enum_fmt_meta_cap = imgu_meta_enum_format,
+ .vidioc_g_fmt_meta_cap = imgu_vidioc_g_meta_fmt,
+ .vidioc_s_fmt_meta_cap = imgu_vidioc_g_meta_fmt,
+ .vidioc_try_fmt_meta_cap = imgu_vidioc_g_meta_fmt,
+
+ /* meta output */
+ .vidioc_enum_fmt_meta_out = imgu_meta_enum_format,
+ .vidioc_g_fmt_meta_out = imgu_vidioc_g_meta_fmt,
+ .vidioc_s_fmt_meta_out = imgu_vidioc_g_meta_fmt,
+ .vidioc_try_fmt_meta_out = imgu_vidioc_g_meta_fmt,
+
+ .vidioc_reqbufs = vb2_ioctl_reqbufs,
+ .vidioc_create_bufs = vb2_ioctl_create_bufs,
+ .vidioc_prepare_buf = vb2_ioctl_prepare_buf,
+ .vidioc_querybuf = vb2_ioctl_querybuf,
+ .vidioc_qbuf = vb2_ioctl_qbuf,
+ .vidioc_dqbuf = vb2_ioctl_dqbuf,
+ .vidioc_streamon = vb2_ioctl_streamon,
+ .vidioc_streamoff = vb2_ioctl_streamoff,
+ .vidioc_expbuf = vb2_ioctl_expbuf,
+};
+
+static int imgu_sd_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct imgu_v4l2_subdev *imgu_sd =
+ container_of(ctrl->handler, struct imgu_v4l2_subdev, ctrl_handler);
+ struct imgu_device *imgu = v4l2_get_subdevdata(&imgu_sd->subdev);
+ struct device *dev = &imgu->pci_dev->dev;
+
+ dev_dbg(dev, "set val %d to ctrl 0x%8x for subdev %u",
+ ctrl->val, ctrl->id, imgu_sd->pipe);
+
+ switch (ctrl->id) {
+ case V4L2_CID_INTEL_IPU3_MODE:
+ atomic_set(&imgu_sd->running_mode, ctrl->val);
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+static const struct v4l2_ctrl_ops imgu_subdev_ctrl_ops = {
+ .s_ctrl = imgu_sd_s_ctrl,
+};
+
+static const char * const imgu_ctrl_mode_strings[] = {
+ "Video mode",
+ "Still mode",
+};
+
+static const struct v4l2_ctrl_config imgu_subdev_ctrl_mode = {
+ .ops = &imgu_subdev_ctrl_ops,
+ .id = V4L2_CID_INTEL_IPU3_MODE,
+ .name = "IPU3 Pipe Mode",
+ .type = V4L2_CTRL_TYPE_MENU,
+ .max = ARRAY_SIZE(imgu_ctrl_mode_strings) - 1,
+ .def = IPU3_RUNNING_MODE_VIDEO,
+ .qmenu = imgu_ctrl_mode_strings,
+};
+
+/******************** Framework registration ********************/
+
+/* helper function to config node's video properties */
+static void imgu_node_to_v4l2(u32 node, struct video_device *vdev,
+ struct v4l2_format *f)
+{
+ u32 cap;
+
+ /* Should not happen */
+ WARN_ON(node >= IMGU_NODE_NUM);
+
+ switch (node) {
+ case IMGU_NODE_IN:
+ cap = V4L2_CAP_VIDEO_OUTPUT_MPLANE;
+ f->type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
+ vdev->ioctl_ops = &imgu_v4l2_ioctl_ops;
+ break;
+ case IMGU_NODE_PARAMS:
+ cap = V4L2_CAP_META_OUTPUT;
+ f->type = V4L2_BUF_TYPE_META_OUTPUT;
+ f->fmt.meta.dataformat = V4L2_META_FMT_IPU3_PARAMS;
+ vdev->ioctl_ops = &imgu_v4l2_meta_ioctl_ops;
+ imgu_css_meta_fmt_set(&f->fmt.meta);
+ break;
+ case IMGU_NODE_STAT_3A:
+ cap = V4L2_CAP_META_CAPTURE;
+ f->type = V4L2_BUF_TYPE_META_CAPTURE;
+ f->fmt.meta.dataformat = V4L2_META_FMT_IPU3_STAT_3A;
+ vdev->ioctl_ops = &imgu_v4l2_meta_ioctl_ops;
+ imgu_css_meta_fmt_set(&f->fmt.meta);
+ break;
+ default:
+ cap = V4L2_CAP_VIDEO_CAPTURE_MPLANE;
+ f->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+ vdev->ioctl_ops = &imgu_v4l2_ioctl_ops;
+ }
+
+ vdev->device_caps = V4L2_CAP_STREAMING | V4L2_CAP_IO_MC | cap;
+}
+
+static int imgu_v4l2_subdev_register(struct imgu_device *imgu,
+ struct imgu_v4l2_subdev *imgu_sd,
+ unsigned int pipe)
+{
+ int i, r;
+ struct v4l2_ctrl_handler *hdl = &imgu_sd->ctrl_handler;
+ struct imgu_media_pipe *imgu_pipe = &imgu->imgu_pipe[pipe];
+
+ /* Initialize subdev media entity */
+ imgu_sd->subdev.entity.ops = &imgu_media_ops;
+ for (i = 0; i < IMGU_NODE_NUM; i++) {
+ imgu_sd->subdev_pads[i].flags = imgu_pipe->nodes[i].output ?
+ MEDIA_PAD_FL_SINK : MEDIA_PAD_FL_SOURCE;
+ }
+ r = media_entity_pads_init(&imgu_sd->subdev.entity, IMGU_NODE_NUM,
+ imgu_sd->subdev_pads);
+ if (r) {
+ dev_err(&imgu->pci_dev->dev,
+ "failed initialize subdev media entity (%d)\n", r);
+ return r;
+ }
+
+ /* Initialize subdev */
+ v4l2_subdev_init(&imgu_sd->subdev, &imgu_subdev_ops);
+ imgu_sd->subdev.entity.function = MEDIA_ENT_F_PROC_VIDEO_STATISTICS;
+ imgu_sd->subdev.internal_ops = &imgu_subdev_internal_ops;
+ imgu_sd->subdev.flags = V4L2_SUBDEV_FL_HAS_DEVNODE |
+ V4L2_SUBDEV_FL_HAS_EVENTS;
+ snprintf(imgu_sd->subdev.name, sizeof(imgu_sd->subdev.name),
+ "%s %u", IMGU_NAME, pipe);
+ v4l2_set_subdevdata(&imgu_sd->subdev, imgu);
+ atomic_set(&imgu_sd->running_mode, IPU3_RUNNING_MODE_VIDEO);
+ v4l2_ctrl_handler_init(hdl, 1);
+ imgu_sd->subdev.ctrl_handler = hdl;
+ imgu_sd->ctrl = v4l2_ctrl_new_custom(hdl, &imgu_subdev_ctrl_mode, NULL);
+ if (hdl->error) {
+ r = hdl->error;
+ dev_err(&imgu->pci_dev->dev,
+ "failed to create subdev v4l2 ctrl with err %d", r);
+ goto fail_subdev;
+ }
+ r = v4l2_device_register_subdev(&imgu->v4l2_dev, &imgu_sd->subdev);
+ if (r) {
+ dev_err(&imgu->pci_dev->dev,
+ "failed initialize subdev (%d)\n", r);
+ goto fail_subdev;
+ }
+
+ imgu_sd->pipe = pipe;
+ return 0;
+
+fail_subdev:
+ v4l2_ctrl_handler_free(imgu_sd->subdev.ctrl_handler);
+ media_entity_cleanup(&imgu_sd->subdev.entity);
+
+ return r;
+}
+
+static int imgu_v4l2_node_setup(struct imgu_device *imgu, unsigned int pipe,
+ int node_num)
+{
+ int r;
+ u32 flags;
+ struct v4l2_mbus_framefmt def_bus_fmt = { 0 };
+ struct v4l2_pix_format_mplane def_pix_fmt = { 0 };
+ struct device *dev = &imgu->pci_dev->dev;
+ struct imgu_media_pipe *imgu_pipe = &imgu->imgu_pipe[pipe];
+ struct v4l2_subdev *sd = &imgu_pipe->imgu_sd.subdev;
+ struct imgu_video_device *node = &imgu_pipe->nodes[node_num];
+ struct video_device *vdev = &node->vdev;
+ struct vb2_queue *vbq = &node->vbq;
+
+ /* Initialize formats to default values */
+ def_bus_fmt.width = 1920;
+ def_bus_fmt.height = 1080;
+ def_bus_fmt.code = MEDIA_BUS_FMT_FIXED;
+ def_bus_fmt.field = V4L2_FIELD_NONE;
+ def_bus_fmt.colorspace = V4L2_COLORSPACE_RAW;
+ def_bus_fmt.ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT;
+ def_bus_fmt.quantization = V4L2_QUANTIZATION_DEFAULT;
+ def_bus_fmt.xfer_func = V4L2_XFER_FUNC_DEFAULT;
+
+ def_pix_fmt.width = def_bus_fmt.width;
+ def_pix_fmt.height = def_bus_fmt.height;
+ def_pix_fmt.field = def_bus_fmt.field;
+ def_pix_fmt.num_planes = 1;
+ def_pix_fmt.plane_fmt[0].bytesperline =
+ imgu_bytesperline(def_pix_fmt.width,
+ IMGU_ABI_FRAME_FORMAT_RAW_PACKED);
+ def_pix_fmt.plane_fmt[0].sizeimage =
+ def_pix_fmt.height * def_pix_fmt.plane_fmt[0].bytesperline;
+ def_pix_fmt.flags = 0;
+ def_pix_fmt.colorspace = def_bus_fmt.colorspace;
+ def_pix_fmt.ycbcr_enc = def_bus_fmt.ycbcr_enc;
+ def_pix_fmt.quantization = def_bus_fmt.quantization;
+ def_pix_fmt.xfer_func = def_bus_fmt.xfer_func;
+
+ /* Initialize miscellaneous variables */
+ mutex_init(&node->lock);
+ INIT_LIST_HEAD(&node->buffers);
+
+ /* Initialize formats to default values */
+ node->pad_fmt = def_bus_fmt;
+ node->id = node_num;
+ node->pipe = pipe;
+ imgu_node_to_v4l2(node_num, vdev, &node->vdev_fmt);
+ if (node->vdev_fmt.type ==
+ V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE ||
+ node->vdev_fmt.type ==
+ V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
+ def_pix_fmt.pixelformat = node->output ?
+ V4L2_PIX_FMT_IPU3_SGRBG10 :
+ V4L2_PIX_FMT_NV12;
+ node->vdev_fmt.fmt.pix_mp = def_pix_fmt;
+ }
+
+ /* Initialize media entities */
+ node->vdev_pad.flags = node->output ?
+ MEDIA_PAD_FL_SOURCE : MEDIA_PAD_FL_SINK;
+ vdev->entity.ops = NULL;
+ r = media_entity_pads_init(&vdev->entity, 1, &node->vdev_pad);
+ if (r) {
+ dev_err(dev, "failed initialize media entity (%d)\n", r);
+ mutex_destroy(&node->lock);
+ return r;
+ }
+
+ /* Initialize vbq */
+ vbq->type = node->vdev_fmt.type;
+ vbq->io_modes = VB2_USERPTR | VB2_MMAP | VB2_DMABUF;
+ vbq->ops = &imgu_vb2_ops;
+ vbq->mem_ops = &vb2_dma_sg_memops;
+ if (imgu->buf_struct_size <= 0)
+ imgu->buf_struct_size =
+ sizeof(struct imgu_vb2_buffer);
+ vbq->buf_struct_size = imgu->buf_struct_size;
+ vbq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+ /* can streamon w/o buffers */
+ vbq->min_queued_buffers = 0;
+ vbq->drv_priv = imgu;
+ vbq->lock = &node->lock;
+ r = vb2_queue_init(vbq);
+ if (r) {
+ dev_err(dev, "failed to initialize video queue (%d)", r);
+ media_entity_cleanup(&vdev->entity);
+ return r;
+ }
+
+ /* Initialize vdev */
+ snprintf(vdev->name, sizeof(vdev->name), "%s %u %s",
+ IMGU_NAME, pipe, node->name);
+ vdev->release = video_device_release_empty;
+ vdev->fops = &imgu_v4l2_fops;
+ vdev->lock = &node->lock;
+ vdev->v4l2_dev = &imgu->v4l2_dev;
+ vdev->queue = &node->vbq;
+ vdev->vfl_dir = node->output ? VFL_DIR_TX : VFL_DIR_RX;
+ video_set_drvdata(vdev, imgu);
+ r = video_register_device(vdev, VFL_TYPE_VIDEO, -1);
+ if (r) {
+ dev_err(dev, "failed to register video device (%d)", r);
+ media_entity_cleanup(&vdev->entity);
+ return r;
+ }
+
+ /* Create link between video node and the subdev pad */
+ flags = 0;
+ if (node->enabled)
+ flags |= MEDIA_LNK_FL_ENABLED;
+ if (node->output) {
+ r = media_create_pad_link(&vdev->entity, 0, &sd->entity,
+ node_num, flags);
+ } else {
+ if (node->id == IMGU_NODE_OUT) {
+ flags |= MEDIA_LNK_FL_ENABLED | MEDIA_LNK_FL_IMMUTABLE;
+ node->enabled = true;
+ }
+
+ r = media_create_pad_link(&sd->entity, node_num, &vdev->entity,
+ 0, flags);
+ }
+ if (r) {
+ dev_err(dev, "failed to create pad link (%d)", r);
+ video_unregister_device(vdev);
+ return r;
+ }
+
+ return 0;
+}
+
+static void imgu_v4l2_nodes_cleanup_pipe(struct imgu_device *imgu,
+ unsigned int pipe, int node)
+{
+ int i;
+ struct imgu_media_pipe *imgu_pipe = &imgu->imgu_pipe[pipe];
+
+ for (i = 0; i < node; i++) {
+ video_unregister_device(&imgu_pipe->nodes[i].vdev);
+ media_entity_cleanup(&imgu_pipe->nodes[i].vdev.entity);
+ mutex_destroy(&imgu_pipe->nodes[i].lock);
+ }
+}
+
+static int imgu_v4l2_nodes_setup_pipe(struct imgu_device *imgu, int pipe)
+{
+ int i;
+
+ for (i = 0; i < IMGU_NODE_NUM; i++) {
+ int r = imgu_v4l2_node_setup(imgu, pipe, i);
+
+ if (r) {
+ imgu_v4l2_nodes_cleanup_pipe(imgu, pipe, i);
+ return r;
+ }
+ }
+ return 0;
+}
+
+static void imgu_v4l2_subdev_cleanup(struct imgu_device *imgu, unsigned int i)
+{
+ struct imgu_media_pipe *imgu_pipe = &imgu->imgu_pipe[i];
+
+ v4l2_device_unregister_subdev(&imgu_pipe->imgu_sd.subdev);
+ v4l2_ctrl_handler_free(imgu_pipe->imgu_sd.subdev.ctrl_handler);
+ media_entity_cleanup(&imgu_pipe->imgu_sd.subdev.entity);
+}
+
+static void imgu_v4l2_cleanup_pipes(struct imgu_device *imgu, unsigned int pipe)
+{
+ int i;
+
+ for (i = 0; i < pipe; i++) {
+ imgu_v4l2_nodes_cleanup_pipe(imgu, i, IMGU_NODE_NUM);
+ imgu_v4l2_subdev_cleanup(imgu, i);
+ }
+}
+
+static int imgu_v4l2_register_pipes(struct imgu_device *imgu)
+{
+ struct imgu_media_pipe *imgu_pipe;
+ int i, r;
+
+ for (i = 0; i < IMGU_MAX_PIPE_NUM; i++) {
+ imgu_pipe = &imgu->imgu_pipe[i];
+ r = imgu_v4l2_subdev_register(imgu, &imgu_pipe->imgu_sd, i);
+ if (r) {
+ dev_err(&imgu->pci_dev->dev,
+ "failed to register subdev%u ret (%d)\n", i, r);
+ goto pipes_cleanup;
+ }
+ r = imgu_v4l2_nodes_setup_pipe(imgu, i);
+ if (r) {
+ imgu_v4l2_subdev_cleanup(imgu, i);
+ goto pipes_cleanup;
+ }
+ }
+
+ return 0;
+
+pipes_cleanup:
+ imgu_v4l2_cleanup_pipes(imgu, i);
+ return r;
+}
+
+int imgu_v4l2_register(struct imgu_device *imgu)
+{
+ int r;
+
+ /* Initialize miscellaneous variables */
+ imgu->streaming = false;
+
+ /* Set up media device */
+ media_device_pci_init(&imgu->media_dev, imgu->pci_dev, IMGU_NAME);
+
+ /* Set up v4l2 device */
+ imgu->v4l2_dev.mdev = &imgu->media_dev;
+ imgu->v4l2_dev.ctrl_handler = NULL;
+ r = v4l2_device_register(&imgu->pci_dev->dev, &imgu->v4l2_dev);
+ if (r) {
+ dev_err(&imgu->pci_dev->dev,
+ "failed to register V4L2 device (%d)\n", r);
+ goto fail_v4l2_dev;
+ }
+
+ r = imgu_v4l2_register_pipes(imgu);
+ if (r) {
+ dev_err(&imgu->pci_dev->dev,
+ "failed to register pipes (%d)\n", r);
+ goto fail_v4l2_pipes;
+ }
+
+ r = v4l2_device_register_subdev_nodes(&imgu->v4l2_dev);
+ if (r) {
+ dev_err(&imgu->pci_dev->dev,
+ "failed to register subdevs (%d)\n", r);
+ goto fail_subdevs;
+ }
+
+ r = media_device_register(&imgu->media_dev);
+ if (r) {
+ dev_err(&imgu->pci_dev->dev,
+ "failed to register media device (%d)\n", r);
+ goto fail_subdevs;
+ }
+
+ return 0;
+
+fail_subdevs:
+ imgu_v4l2_cleanup_pipes(imgu, IMGU_MAX_PIPE_NUM);
+fail_v4l2_pipes:
+ v4l2_device_unregister(&imgu->v4l2_dev);
+fail_v4l2_dev:
+ media_device_cleanup(&imgu->media_dev);
+
+ return r;
+}
+
+int imgu_v4l2_unregister(struct imgu_device *imgu)
+{
+ media_device_unregister(&imgu->media_dev);
+ imgu_v4l2_cleanup_pipes(imgu, IMGU_MAX_PIPE_NUM);
+ v4l2_device_unregister(&imgu->v4l2_dev);
+ media_device_cleanup(&imgu->media_dev);
+
+ return 0;
+}
+
+void imgu_v4l2_buffer_done(struct vb2_buffer *vb,
+ enum vb2_buffer_state state)
+{
+ struct imgu_vb2_buffer *b =
+ container_of(vb, struct imgu_vb2_buffer, vbb.vb2_buf);
+
+ list_del(&b->list);
+ vb2_buffer_done(&b->vbb.vb2_buf, state);
+}
diff --git a/drivers/staging/media/ipu3/ipu3.c b/drivers/staging/media/ipu3/ipu3.c
new file mode 100644
index 000000000000..bdf5a457752b
--- /dev/null
+++ b/drivers/staging/media/ipu3/ipu3.c
@@ -0,0 +1,865 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2017 - 2018 Intel Corporation
+ * Copyright 2017 Google LLC
+ *
+ * Based on Intel IPU4 driver.
+ *
+ */
+
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/pm_runtime.h>
+
+#include "ipu3.h"
+#include "ipu3-css-fw.h"
+#include "ipu3-dmamap.h"
+#include "ipu3-mmu.h"
+
+#define IMGU_PCI_ID 0x1919
+#define IMGU_PCI_BAR 0
+#define IMGU_DMA_MASK DMA_BIT_MASK(39)
+#define IMGU_MAX_QUEUE_DEPTH (2 + 2)
+
+/*
+ * pre-allocated buffer size for IMGU dummy buffers. Those
+ * values should be tuned to big enough to avoid buffer
+ * re-allocation when streaming to lower streaming latency.
+ */
+#define CSS_QUEUE_IN_BUF_SIZE 0
+#define CSS_QUEUE_PARAMS_BUF_SIZE 0
+#define CSS_QUEUE_OUT_BUF_SIZE (4160 * 3120 * 12 / 8)
+#define CSS_QUEUE_VF_BUF_SIZE (1920 * 1080 * 12 / 8)
+#define CSS_QUEUE_STAT_3A_BUF_SIZE sizeof(struct ipu3_uapi_stats_3a)
+
+static const size_t css_queue_buf_size_map[IPU3_CSS_QUEUES] = {
+ [IPU3_CSS_QUEUE_IN] = CSS_QUEUE_IN_BUF_SIZE,
+ [IPU3_CSS_QUEUE_PARAMS] = CSS_QUEUE_PARAMS_BUF_SIZE,
+ [IPU3_CSS_QUEUE_OUT] = CSS_QUEUE_OUT_BUF_SIZE,
+ [IPU3_CSS_QUEUE_VF] = CSS_QUEUE_VF_BUF_SIZE,
+ [IPU3_CSS_QUEUE_STAT_3A] = CSS_QUEUE_STAT_3A_BUF_SIZE,
+};
+
+static const struct imgu_node_mapping imgu_node_map[IMGU_NODE_NUM] = {
+ [IMGU_NODE_IN] = {IPU3_CSS_QUEUE_IN, "input"},
+ [IMGU_NODE_PARAMS] = {IPU3_CSS_QUEUE_PARAMS, "parameters"},
+ [IMGU_NODE_OUT] = {IPU3_CSS_QUEUE_OUT, "output"},
+ [IMGU_NODE_VF] = {IPU3_CSS_QUEUE_VF, "viewfinder"},
+ [IMGU_NODE_STAT_3A] = {IPU3_CSS_QUEUE_STAT_3A, "3a stat"},
+};
+
+unsigned int imgu_node_to_queue(unsigned int node)
+{
+ return imgu_node_map[node].css_queue;
+}
+
+unsigned int imgu_map_node(struct imgu_device *imgu, unsigned int css_queue)
+{
+ unsigned int i;
+
+ for (i = 0; i < IMGU_NODE_NUM; i++)
+ if (imgu_node_map[i].css_queue == css_queue)
+ break;
+
+ return i;
+}
+
+/**************** Dummy buffers ****************/
+
+static void imgu_dummybufs_cleanup(struct imgu_device *imgu, unsigned int pipe)
+{
+ unsigned int i;
+ struct imgu_media_pipe *imgu_pipe = &imgu->imgu_pipe[pipe];
+
+ for (i = 0; i < IPU3_CSS_QUEUES; i++)
+ imgu_dmamap_free(imgu,
+ &imgu_pipe->queues[i].dmap);
+}
+
+static int imgu_dummybufs_preallocate(struct imgu_device *imgu,
+ unsigned int pipe)
+{
+ unsigned int i;
+ size_t size;
+ struct imgu_media_pipe *imgu_pipe = &imgu->imgu_pipe[pipe];
+
+ for (i = 0; i < IPU3_CSS_QUEUES; i++) {
+ size = css_queue_buf_size_map[i];
+ /*
+ * Do not enable dummy buffers for master queue,
+ * always require that real buffers from user are
+ * available.
+ */
+ if (i == IMGU_QUEUE_MASTER || size == 0)
+ continue;
+
+ if (!imgu_dmamap_alloc(imgu,
+ &imgu_pipe->queues[i].dmap, size)) {
+ imgu_dummybufs_cleanup(imgu, pipe);
+ return -ENOMEM;
+ }
+ }
+
+ return 0;
+}
+
+static int imgu_dummybufs_init(struct imgu_device *imgu, unsigned int pipe)
+{
+ const struct v4l2_pix_format_mplane *mpix;
+ const struct v4l2_meta_format *meta;
+ unsigned int i, k, node;
+ size_t size;
+ struct imgu_media_pipe *imgu_pipe = &imgu->imgu_pipe[pipe];
+
+ /* Allocate a dummy buffer for each queue where buffer is optional */
+ for (i = 0; i < IPU3_CSS_QUEUES; i++) {
+ node = imgu_map_node(imgu, i);
+ if (!imgu_pipe->queue_enabled[node] || i == IMGU_QUEUE_MASTER)
+ continue;
+
+ if (!imgu_pipe->nodes[IMGU_NODE_VF].enabled &&
+ i == IPU3_CSS_QUEUE_VF)
+ /*
+ * Do not enable dummy buffers for VF if it is not
+ * requested by the user.
+ */
+ continue;
+
+ meta = &imgu_pipe->nodes[node].vdev_fmt.fmt.meta;
+ mpix = &imgu_pipe->nodes[node].vdev_fmt.fmt.pix_mp;
+
+ if (node == IMGU_NODE_STAT_3A || node == IMGU_NODE_PARAMS)
+ size = meta->buffersize;
+ else
+ size = mpix->plane_fmt[0].sizeimage;
+
+ if (imgu_css_dma_buffer_resize(imgu,
+ &imgu_pipe->queues[i].dmap,
+ size)) {
+ imgu_dummybufs_cleanup(imgu, pipe);
+ return -ENOMEM;
+ }
+
+ for (k = 0; k < IMGU_MAX_QUEUE_DEPTH; k++)
+ imgu_css_buf_init(&imgu_pipe->queues[i].dummybufs[k], i,
+ imgu_pipe->queues[i].dmap.daddr);
+ }
+
+ return 0;
+}
+
+/* May be called from atomic context */
+static struct imgu_css_buffer *imgu_dummybufs_get(struct imgu_device *imgu,
+ int queue, unsigned int pipe)
+{
+ unsigned int i;
+ struct imgu_media_pipe *imgu_pipe = &imgu->imgu_pipe[pipe];
+
+ /* dummybufs are not allocated for master q */
+ if (queue == IPU3_CSS_QUEUE_IN)
+ return NULL;
+
+ if (WARN_ON(!imgu_pipe->queues[queue].dmap.vaddr))
+ /* Buffer should not be allocated here */
+ return NULL;
+
+ for (i = 0; i < IMGU_MAX_QUEUE_DEPTH; i++)
+ if (imgu_css_buf_state(&imgu_pipe->queues[queue].dummybufs[i]) !=
+ IPU3_CSS_BUFFER_QUEUED)
+ break;
+
+ if (i == IMGU_MAX_QUEUE_DEPTH)
+ return NULL;
+
+ imgu_css_buf_init(&imgu_pipe->queues[queue].dummybufs[i], queue,
+ imgu_pipe->queues[queue].dmap.daddr);
+
+ return &imgu_pipe->queues[queue].dummybufs[i];
+}
+
+/* Check if given buffer is a dummy buffer */
+static bool imgu_dummybufs_check(struct imgu_device *imgu,
+ struct imgu_css_buffer *buf,
+ unsigned int pipe)
+{
+ unsigned int i;
+ struct imgu_media_pipe *imgu_pipe = &imgu->imgu_pipe[pipe];
+
+ for (i = 0; i < IMGU_MAX_QUEUE_DEPTH; i++)
+ if (buf == &imgu_pipe->queues[buf->queue].dummybufs[i])
+ break;
+
+ return i < IMGU_MAX_QUEUE_DEPTH;
+}
+
+static void imgu_buffer_done(struct imgu_device *imgu, struct vb2_buffer *vb,
+ enum vb2_buffer_state state)
+{
+ mutex_lock(&imgu->lock);
+ imgu_v4l2_buffer_done(vb, state);
+ mutex_unlock(&imgu->lock);
+}
+
+static struct imgu_css_buffer *imgu_queue_getbuf(struct imgu_device *imgu,
+ unsigned int node,
+ unsigned int pipe)
+{
+ struct imgu_buffer *buf;
+ struct imgu_media_pipe *imgu_pipe = &imgu->imgu_pipe[pipe];
+
+ if (WARN_ON(node >= IMGU_NODE_NUM))
+ return NULL;
+
+ /* Find first free buffer from the node */
+ list_for_each_entry(buf, &imgu_pipe->nodes[node].buffers, vid_buf.list) {
+ if (imgu_css_buf_state(&buf->css_buf) == IPU3_CSS_BUFFER_NEW)
+ return &buf->css_buf;
+ }
+
+ /* There were no free buffers, try to return a dummy buffer */
+ return imgu_dummybufs_get(imgu, imgu_node_map[node].css_queue, pipe);
+}
+
+/*
+ * Queue as many buffers to CSS as possible. If all buffers don't fit into
+ * CSS buffer queues, they remain unqueued and will be queued later.
+ */
+int imgu_queue_buffers(struct imgu_device *imgu, bool initial, unsigned int pipe)
+{
+ unsigned int node;
+ int r = 0;
+ struct imgu_media_pipe *imgu_pipe = &imgu->imgu_pipe[pipe];
+
+ if (!imgu_css_is_streaming(&imgu->css))
+ return 0;
+
+ dev_dbg(&imgu->pci_dev->dev, "Queue buffers to pipe %d", pipe);
+ mutex_lock(&imgu->lock);
+
+ if (!imgu_css_pipe_queue_empty(&imgu->css, pipe)) {
+ mutex_unlock(&imgu->lock);
+ return 0;
+ }
+
+ /* Buffer set is queued to FW only when input buffer is ready */
+ for (node = IMGU_NODE_NUM - 1;
+ imgu_queue_getbuf(imgu, IMGU_NODE_IN, pipe);
+ node = node ? node - 1 : IMGU_NODE_NUM - 1) {
+ if (node == IMGU_NODE_VF &&
+ !imgu_pipe->nodes[IMGU_NODE_VF].enabled) {
+ dev_warn(&imgu->pci_dev->dev,
+ "Vf not enabled, ignore queue");
+ continue;
+ } else if (node == IMGU_NODE_PARAMS &&
+ imgu_pipe->nodes[node].enabled) {
+ struct vb2_buffer *vb;
+ struct imgu_vb2_buffer *ivb;
+
+ /* No parameters for this frame */
+ if (list_empty(&imgu_pipe->nodes[node].buffers))
+ continue;
+
+ ivb = list_first_entry(&imgu_pipe->nodes[node].buffers,
+ struct imgu_vb2_buffer, list);
+ list_del(&ivb->list);
+ vb = &ivb->vbb.vb2_buf;
+ r = imgu_css_set_parameters(&imgu->css, pipe,
+ vb2_plane_vaddr(vb, 0));
+ if (r) {
+ vb2_buffer_done(vb, VB2_BUF_STATE_ERROR);
+ dev_warn(&imgu->pci_dev->dev,
+ "set parameters failed.");
+ continue;
+ }
+
+ vb2_buffer_done(vb, VB2_BUF_STATE_DONE);
+ dev_dbg(&imgu->pci_dev->dev,
+ "queue user parameters %d to css.", vb->index);
+ } else if (imgu_pipe->queue_enabled[node]) {
+ struct imgu_css_buffer *buf =
+ imgu_queue_getbuf(imgu, node, pipe);
+ struct imgu_buffer *ibuf = NULL;
+ bool dummy;
+
+ if (!buf)
+ break;
+
+ r = imgu_css_buf_queue(&imgu->css, pipe, buf);
+ if (r)
+ break;
+ dummy = imgu_dummybufs_check(imgu, buf, pipe);
+ if (!dummy)
+ ibuf = container_of(buf, struct imgu_buffer,
+ css_buf);
+ dev_dbg(&imgu->pci_dev->dev,
+ "queue %s %s buffer %u to css da: 0x%08x\n",
+ dummy ? "dummy" : "user",
+ imgu_node_map[node].name,
+ dummy ? 0 : ibuf->vid_buf.vbb.vb2_buf.index,
+ (u32)buf->daddr);
+ }
+ }
+ mutex_unlock(&imgu->lock);
+
+ if (r && r != -EBUSY)
+ goto failed;
+
+ return 0;
+
+failed:
+ /*
+ * On error, mark all buffers as failed which are not
+ * yet queued to CSS
+ */
+ dev_err(&imgu->pci_dev->dev,
+ "failed to queue buffer to CSS on queue %i (%d)\n",
+ node, r);
+
+ if (initial)
+ /* If we were called from streamon(), no need to finish bufs */
+ return r;
+
+ for (node = 0; node < IMGU_NODE_NUM; node++) {
+ struct imgu_buffer *buf, *buf0;
+
+ if (!imgu_pipe->queue_enabled[node])
+ continue; /* Skip disabled queues */
+
+ mutex_lock(&imgu->lock);
+ list_for_each_entry_safe(buf, buf0,
+ &imgu_pipe->nodes[node].buffers,
+ vid_buf.list) {
+ if (imgu_css_buf_state(&buf->css_buf) ==
+ IPU3_CSS_BUFFER_QUEUED)
+ continue; /* Was already queued, skip */
+
+ imgu_v4l2_buffer_done(&buf->vid_buf.vbb.vb2_buf,
+ VB2_BUF_STATE_ERROR);
+ }
+ mutex_unlock(&imgu->lock);
+ }
+
+ return r;
+}
+
+static int imgu_powerup(struct imgu_device *imgu)
+{
+ int r;
+ unsigned int pipe;
+ unsigned int freq = 200;
+ struct v4l2_mbus_framefmt *fmt;
+
+ /* input larger than 2048*1152, ask imgu to work on high freq */
+ for_each_set_bit(pipe, imgu->css.enabled_pipes, IMGU_MAX_PIPE_NUM) {
+ fmt = &imgu->imgu_pipe[pipe].nodes[IMGU_NODE_IN].pad_fmt;
+ dev_dbg(&imgu->pci_dev->dev, "pipe %u input format = %ux%u",
+ pipe, fmt->width, fmt->height);
+ if ((fmt->width * fmt->height) >= (2048 * 1152))
+ freq = 450;
+ }
+
+ r = imgu_css_set_powerup(&imgu->pci_dev->dev, imgu->base, freq);
+ if (r)
+ return r;
+
+ imgu_mmu_resume(imgu->mmu);
+ return 0;
+}
+
+static void imgu_powerdown(struct imgu_device *imgu)
+{
+ imgu_mmu_suspend(imgu->mmu);
+ imgu_css_set_powerdown(&imgu->pci_dev->dev, imgu->base);
+}
+
+int imgu_s_stream(struct imgu_device *imgu, int enable)
+{
+ struct device *dev = &imgu->pci_dev->dev;
+ int r, pipe;
+
+ if (!enable) {
+ /* Stop streaming */
+ dev_dbg(dev, "stream off\n");
+ /* Block new buffers to be queued to CSS. */
+ atomic_set(&imgu->qbuf_barrier, 1);
+ imgu_css_stop_streaming(&imgu->css);
+ synchronize_irq(imgu->pci_dev->irq);
+ atomic_set(&imgu->qbuf_barrier, 0);
+ imgu_powerdown(imgu);
+ pm_runtime_put(&imgu->pci_dev->dev);
+
+ return 0;
+ }
+
+ /* Set Power */
+ r = pm_runtime_resume_and_get(dev);
+ if (r < 0) {
+ dev_err(dev, "failed to set imgu power\n");
+ return r;
+ }
+
+ r = imgu_powerup(imgu);
+ if (r) {
+ dev_err(dev, "failed to power up imgu\n");
+ pm_runtime_put(dev);
+ return r;
+ }
+
+ /* Start CSS streaming */
+ r = imgu_css_start_streaming(&imgu->css);
+ if (r) {
+ dev_err(dev, "failed to start css streaming (%d)", r);
+ goto fail_start_streaming;
+ }
+
+ for_each_set_bit(pipe, imgu->css.enabled_pipes, IMGU_MAX_PIPE_NUM) {
+ /* Initialize dummy buffers */
+ r = imgu_dummybufs_init(imgu, pipe);
+ if (r) {
+ dev_err(dev, "failed to initialize dummy buffers (%d)", r);
+ goto fail_dummybufs;
+ }
+
+ /* Queue as many buffers from queue as possible */
+ r = imgu_queue_buffers(imgu, true, pipe);
+ if (r) {
+ dev_err(dev, "failed to queue initial buffers (%d)", r);
+ goto fail_queueing;
+ }
+ }
+
+ return 0;
+fail_queueing:
+ for_each_set_bit(pipe, imgu->css.enabled_pipes, IMGU_MAX_PIPE_NUM)
+ imgu_dummybufs_cleanup(imgu, pipe);
+fail_dummybufs:
+ imgu_css_stop_streaming(&imgu->css);
+fail_start_streaming:
+ pm_runtime_put(dev);
+
+ return r;
+}
+
+static void imgu_video_nodes_exit(struct imgu_device *imgu)
+{
+ int i;
+
+ for (i = 0; i < IMGU_MAX_PIPE_NUM; i++)
+ imgu_dummybufs_cleanup(imgu, i);
+
+ imgu_v4l2_unregister(imgu);
+}
+
+static int imgu_video_nodes_init(struct imgu_device *imgu)
+{
+ struct v4l2_pix_format_mplane *fmts[IPU3_CSS_QUEUES] = { NULL };
+ struct v4l2_rect *rects[IPU3_CSS_RECTS] = { NULL };
+ struct imgu_media_pipe *imgu_pipe;
+ unsigned int i, j;
+ int r;
+
+ imgu->buf_struct_size = sizeof(struct imgu_buffer);
+
+ for (j = 0; j < IMGU_MAX_PIPE_NUM; j++) {
+ imgu_pipe = &imgu->imgu_pipe[j];
+
+ for (i = 0; i < IMGU_NODE_NUM; i++) {
+ imgu_pipe->nodes[i].name = imgu_node_map[i].name;
+ imgu_pipe->nodes[i].output = i < IMGU_QUEUE_FIRST_INPUT;
+ imgu_pipe->nodes[i].enabled = false;
+
+ if (i != IMGU_NODE_PARAMS && i != IMGU_NODE_STAT_3A)
+ fmts[imgu_node_map[i].css_queue] =
+ &imgu_pipe->nodes[i].vdev_fmt.fmt.pix_mp;
+ atomic_set(&imgu_pipe->nodes[i].sequence, 0);
+ }
+ }
+
+ r = imgu_v4l2_register(imgu);
+ if (r)
+ return r;
+
+ /* Set initial formats and initialize formats of video nodes */
+ for (j = 0; j < IMGU_MAX_PIPE_NUM; j++) {
+ imgu_pipe = &imgu->imgu_pipe[j];
+
+ rects[IPU3_CSS_RECT_EFFECTIVE] = &imgu_pipe->imgu_sd.rect.eff;
+ rects[IPU3_CSS_RECT_BDS] = &imgu_pipe->imgu_sd.rect.bds;
+ imgu_css_fmt_set(&imgu->css, fmts, rects, j);
+
+ /* Pre-allocate dummy buffers */
+ r = imgu_dummybufs_preallocate(imgu, j);
+ if (r) {
+ dev_err(&imgu->pci_dev->dev,
+ "failed to pre-allocate dummy buffers (%d)", r);
+ goto out_cleanup;
+ }
+ }
+
+ return 0;
+
+out_cleanup:
+ imgu_video_nodes_exit(imgu);
+
+ return r;
+}
+
+/**************** PCI interface ****************/
+
+static irqreturn_t imgu_isr_threaded(int irq, void *imgu_ptr)
+{
+ struct imgu_device *imgu = imgu_ptr;
+ struct imgu_media_pipe *imgu_pipe;
+ int p;
+
+ /* Dequeue / queue buffers */
+ do {
+ u64 ns = ktime_get_ns();
+ struct imgu_css_buffer *b;
+ struct imgu_buffer *buf = NULL;
+ unsigned int node, pipe;
+ bool dummy;
+
+ do {
+ mutex_lock(&imgu->lock);
+ b = imgu_css_buf_dequeue(&imgu->css);
+ mutex_unlock(&imgu->lock);
+ } while (PTR_ERR(b) == -EAGAIN);
+
+ if (IS_ERR(b)) {
+ if (PTR_ERR(b) != -EBUSY) /* All done */
+ dev_err(&imgu->pci_dev->dev,
+ "failed to dequeue buffers (%pe)\n", b);
+ break;
+ }
+
+ node = imgu_map_node(imgu, b->queue);
+ pipe = b->pipe;
+ dummy = imgu_dummybufs_check(imgu, b, pipe);
+ if (!dummy)
+ buf = container_of(b, struct imgu_buffer, css_buf);
+ dev_dbg(&imgu->pci_dev->dev,
+ "dequeue %s %s buffer %d daddr 0x%x from css\n",
+ dummy ? "dummy" : "user",
+ imgu_node_map[node].name,
+ dummy ? 0 : buf->vid_buf.vbb.vb2_buf.index,
+ (u32)b->daddr);
+
+ if (dummy)
+ /* It was a dummy buffer, skip it */
+ continue;
+
+ /* Fill vb2 buffer entries and tell it's ready */
+ imgu_pipe = &imgu->imgu_pipe[pipe];
+ if (!imgu_pipe->nodes[node].output) {
+ buf->vid_buf.vbb.vb2_buf.timestamp = ns;
+ buf->vid_buf.vbb.field = V4L2_FIELD_NONE;
+ buf->vid_buf.vbb.sequence =
+ atomic_inc_return(
+ &imgu_pipe->nodes[node].sequence);
+ dev_dbg(&imgu->pci_dev->dev, "vb2 buffer sequence %d",
+ buf->vid_buf.vbb.sequence);
+ }
+ imgu_buffer_done(imgu, &buf->vid_buf.vbb.vb2_buf,
+ imgu_css_buf_state(&buf->css_buf) ==
+ IPU3_CSS_BUFFER_DONE ?
+ VB2_BUF_STATE_DONE :
+ VB2_BUF_STATE_ERROR);
+ mutex_lock(&imgu->lock);
+ if (imgu_css_queue_empty(&imgu->css))
+ wake_up_all(&imgu->buf_drain_wq);
+ mutex_unlock(&imgu->lock);
+ } while (1);
+
+ /*
+ * Try to queue more buffers for CSS.
+ * qbuf_barrier is used to disable new buffers
+ * to be queued to CSS.
+ */
+ if (!atomic_read(&imgu->qbuf_barrier))
+ for_each_set_bit(p, imgu->css.enabled_pipes, IMGU_MAX_PIPE_NUM)
+ imgu_queue_buffers(imgu, false, p);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t imgu_isr(int irq, void *imgu_ptr)
+{
+ struct imgu_device *imgu = imgu_ptr;
+
+ /* acknowledge interruption */
+ if (imgu_css_irq_ack(&imgu->css) < 0)
+ return IRQ_NONE;
+
+ return IRQ_WAKE_THREAD;
+}
+
+static int imgu_pci_config_setup(struct pci_dev *dev)
+{
+ u16 pci_command;
+ int r = pci_enable_msi(dev);
+
+ if (r) {
+ dev_err(&dev->dev, "failed to enable MSI (%d)\n", r);
+ return r;
+ }
+
+ pci_read_config_word(dev, PCI_COMMAND, &pci_command);
+ pci_command |= PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER |
+ PCI_COMMAND_INTX_DISABLE;
+ pci_write_config_word(dev, PCI_COMMAND, pci_command);
+
+ return 0;
+}
+
+static int imgu_pci_probe(struct pci_dev *pci_dev,
+ const struct pci_device_id *id)
+{
+ struct imgu_device *imgu;
+ phys_addr_t phys;
+ unsigned long phys_len;
+ void __iomem *const *iomap;
+ int r;
+
+ imgu = devm_kzalloc(&pci_dev->dev, sizeof(*imgu), GFP_KERNEL);
+ if (!imgu)
+ return -ENOMEM;
+
+ imgu->pci_dev = pci_dev;
+
+ r = pcim_enable_device(pci_dev);
+ if (r) {
+ dev_err(&pci_dev->dev, "failed to enable device (%d)\n", r);
+ return r;
+ }
+
+ dev_info(&pci_dev->dev, "device 0x%x (rev: 0x%x)\n",
+ pci_dev->device, pci_dev->revision);
+
+ phys = pci_resource_start(pci_dev, IMGU_PCI_BAR);
+ phys_len = pci_resource_len(pci_dev, IMGU_PCI_BAR);
+
+ r = pcim_iomap_regions(pci_dev, 1 << IMGU_PCI_BAR, pci_name(pci_dev));
+ if (r) {
+ dev_err(&pci_dev->dev, "failed to remap I/O memory (%d)\n", r);
+ return r;
+ }
+ dev_info(&pci_dev->dev, "physical base address %pap, %lu bytes\n",
+ &phys, phys_len);
+
+ iomap = pcim_iomap_table(pci_dev);
+ if (!iomap) {
+ dev_err(&pci_dev->dev, "failed to iomap table\n");
+ return -ENODEV;
+ }
+
+ imgu->base = iomap[IMGU_PCI_BAR];
+
+ pci_set_drvdata(pci_dev, imgu);
+
+ pci_set_master(pci_dev);
+
+ r = dma_coerce_mask_and_coherent(&pci_dev->dev, IMGU_DMA_MASK);
+ if (r) {
+ dev_err(&pci_dev->dev, "failed to set DMA mask (%d)\n", r);
+ return -ENODEV;
+ }
+
+ r = imgu_pci_config_setup(pci_dev);
+ if (r)
+ return r;
+
+ mutex_init(&imgu->lock);
+ mutex_init(&imgu->streaming_lock);
+ atomic_set(&imgu->qbuf_barrier, 0);
+ init_waitqueue_head(&imgu->buf_drain_wq);
+
+ r = imgu_css_set_powerup(&pci_dev->dev, imgu->base, 200);
+ if (r) {
+ dev_err(&pci_dev->dev,
+ "failed to power up CSS (%d)\n", r);
+ goto out_mutex_destroy;
+ }
+
+ imgu->mmu = imgu_mmu_init(&pci_dev->dev, imgu->base);
+ if (IS_ERR(imgu->mmu)) {
+ r = PTR_ERR(imgu->mmu);
+ dev_err(&pci_dev->dev, "failed to initialize MMU (%d)\n", r);
+ goto out_css_powerdown;
+ }
+
+ r = imgu_dmamap_init(imgu);
+ if (r) {
+ dev_err(&pci_dev->dev,
+ "failed to initialize DMA mapping (%d)\n", r);
+ goto out_mmu_exit;
+ }
+
+ /* ISP programming */
+ r = imgu_css_init(&pci_dev->dev, &imgu->css, imgu->base, phys_len);
+ if (r) {
+ dev_err(&pci_dev->dev, "failed to initialize CSS (%d)\n", r);
+ goto out_dmamap_exit;
+ }
+
+ /* v4l2 sub-device registration */
+ r = imgu_video_nodes_init(imgu);
+ if (r) {
+ dev_err(&pci_dev->dev, "failed to create V4L2 devices (%d)\n",
+ r);
+ goto out_css_cleanup;
+ }
+
+ r = devm_request_threaded_irq(&pci_dev->dev, pci_dev->irq,
+ imgu_isr, imgu_isr_threaded,
+ IRQF_SHARED, IMGU_NAME, imgu);
+ if (r) {
+ dev_err(&pci_dev->dev, "failed to request IRQ (%d)\n", r);
+ goto out_video_exit;
+ }
+
+ pm_runtime_put_noidle(&pci_dev->dev);
+ pm_runtime_allow(&pci_dev->dev);
+
+ return 0;
+
+out_video_exit:
+ imgu_video_nodes_exit(imgu);
+out_css_cleanup:
+ imgu_css_cleanup(&imgu->css);
+out_dmamap_exit:
+ imgu_dmamap_exit(imgu);
+out_mmu_exit:
+ imgu_mmu_exit(imgu->mmu);
+out_css_powerdown:
+ imgu_css_set_powerdown(&pci_dev->dev, imgu->base);
+out_mutex_destroy:
+ mutex_destroy(&imgu->streaming_lock);
+ mutex_destroy(&imgu->lock);
+
+ return r;
+}
+
+static void imgu_pci_remove(struct pci_dev *pci_dev)
+{
+ struct imgu_device *imgu = pci_get_drvdata(pci_dev);
+
+ pm_runtime_forbid(&pci_dev->dev);
+ pm_runtime_get_noresume(&pci_dev->dev);
+
+ imgu_video_nodes_exit(imgu);
+ imgu_css_cleanup(&imgu->css);
+ imgu_css_set_powerdown(&pci_dev->dev, imgu->base);
+ imgu_dmamap_exit(imgu);
+ imgu_mmu_exit(imgu->mmu);
+ mutex_destroy(&imgu->streaming_lock);
+ mutex_destroy(&imgu->lock);
+}
+
+static int __maybe_unused imgu_suspend(struct device *dev)
+{
+ struct pci_dev *pci_dev = to_pci_dev(dev);
+ struct imgu_device *imgu = pci_get_drvdata(pci_dev);
+
+ imgu->suspend_in_stream = imgu_css_is_streaming(&imgu->css);
+ if (!imgu->suspend_in_stream)
+ goto out;
+ /* Block new buffers to be queued to CSS. */
+ atomic_set(&imgu->qbuf_barrier, 1);
+ /*
+ * Wait for currently running irq handler to be done so that
+ * no new buffers will be queued to fw later.
+ */
+ synchronize_irq(pci_dev->irq);
+ /* Wait until all buffers in CSS are done. */
+ if (!wait_event_timeout(imgu->buf_drain_wq,
+ imgu_css_queue_empty(&imgu->css), msecs_to_jiffies(1000)))
+ dev_err(dev, "wait buffer drain timeout.\n");
+
+ imgu_css_stop_streaming(&imgu->css);
+ atomic_set(&imgu->qbuf_barrier, 0);
+ imgu_powerdown(imgu);
+ pm_runtime_force_suspend(dev);
+out:
+ return 0;
+}
+
+static int __maybe_unused imgu_resume(struct device *dev)
+{
+ struct imgu_device *imgu = dev_get_drvdata(dev);
+ int r = 0;
+ unsigned int pipe;
+
+ if (!imgu->suspend_in_stream)
+ goto out;
+
+ pm_runtime_force_resume(dev);
+
+ r = imgu_powerup(imgu);
+ if (r) {
+ dev_err(dev, "failed to power up imgu\n");
+ goto out;
+ }
+
+ /* Start CSS streaming */
+ r = imgu_css_start_streaming(&imgu->css);
+ if (r) {
+ dev_err(dev, "failed to resume css streaming (%d)", r);
+ goto out;
+ }
+
+ for_each_set_bit(pipe, imgu->css.enabled_pipes, IMGU_MAX_PIPE_NUM) {
+ r = imgu_queue_buffers(imgu, true, pipe);
+ if (r)
+ dev_err(dev, "failed to queue buffers to pipe %d (%d)",
+ pipe, r);
+ }
+
+out:
+ return r;
+}
+
+/*
+ * PCI rpm framework checks the existence of driver rpm callbacks.
+ * Place a dummy callback here to avoid rpm going into error state.
+ */
+static __maybe_unused int imgu_rpm_dummy_cb(struct device *dev)
+{
+ return 0;
+}
+
+static const struct dev_pm_ops imgu_pm_ops = {
+ SET_RUNTIME_PM_OPS(&imgu_rpm_dummy_cb, &imgu_rpm_dummy_cb, NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(&imgu_suspend, &imgu_resume)
+};
+
+static const struct pci_device_id imgu_pci_tbl[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, IMGU_PCI_ID) },
+ { 0, }
+};
+
+MODULE_DEVICE_TABLE(pci, imgu_pci_tbl);
+
+static struct pci_driver imgu_pci_driver = {
+ .name = IMGU_NAME,
+ .id_table = imgu_pci_tbl,
+ .probe = imgu_pci_probe,
+ .remove = imgu_pci_remove,
+ .driver = {
+ .pm = &imgu_pm_ops,
+ },
+};
+
+module_pci_driver(imgu_pci_driver);
+
+MODULE_AUTHOR("Tuukka Toivonen");
+MODULE_AUTHOR("Tianshu Qiu <tian.shu.qiu@intel.com>");
+MODULE_AUTHOR("Jian Xu Zheng");
+MODULE_AUTHOR("Yuning Pu");
+MODULE_AUTHOR("Yong Zhi <yong.zhi@intel.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Intel ipu3_imgu PCI driver");
+MODULE_FIRMWARE(IMGU_FW_NAME);
+MODULE_FIRMWARE(IMGU_FW_NAME_20161208);
+MODULE_FIRMWARE(IMGU_FW_NAME_IPU_20161208);
diff --git a/drivers/staging/media/ipu3/ipu3.h b/drivers/staging/media/ipu3/ipu3.h
new file mode 100644
index 000000000000..7b2a1eee2c7f
--- /dev/null
+++ b/drivers/staging/media/ipu3/ipu3.h
@@ -0,0 +1,178 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2018 Intel Corporation */
+
+#ifndef __IPU3_H
+#define __IPU3_H
+
+#include <linux/iova.h>
+#include <linux/pci.h>
+
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/videobuf2-dma-sg.h>
+
+#include "ipu3-css.h"
+
+#define IMGU_NAME "ipu3-imgu"
+
+/*
+ * The semantics of the driver is that whenever there is a buffer available in
+ * master queue, the driver queues a buffer also to all other active nodes.
+ * If user space hasn't provided a buffer to all other video nodes first,
+ * the driver gets an internal dummy buffer and queues it.
+ */
+#define IMGU_QUEUE_MASTER IPU3_CSS_QUEUE_IN
+#define IMGU_QUEUE_FIRST_INPUT IPU3_CSS_QUEUE_OUT
+#define IMGU_MAX_QUEUE_DEPTH (2 + 2)
+
+#define IMGU_NODE_IN 0 /* Input RAW image */
+#define IMGU_NODE_PARAMS 1 /* Input parameters */
+#define IMGU_NODE_OUT 2 /* Main output for still or video */
+#define IMGU_NODE_VF 3 /* Preview */
+#define IMGU_NODE_STAT_3A 4 /* 3A statistics */
+#define IMGU_NODE_NUM 5
+
+#define file_to_intel_imgu_node(__file) \
+ container_of(video_devdata(__file), struct imgu_video_device, vdev)
+
+#define IPU3_INPUT_MIN_WIDTH 0U
+#define IPU3_INPUT_MIN_HEIGHT 0U
+#define IPU3_INPUT_MAX_WIDTH 5120U
+#define IPU3_INPUT_MAX_HEIGHT 38404U
+#define IPU3_OUTPUT_MIN_WIDTH 2U
+#define IPU3_OUTPUT_MIN_HEIGHT 2U
+#define IPU3_OUTPUT_MAX_WIDTH 4480U
+#define IPU3_OUTPUT_MAX_HEIGHT 34004U
+
+struct imgu_vb2_buffer {
+ /* Public fields */
+ struct vb2_v4l2_buffer vbb; /* Must be the first field */
+
+ /* Private fields */
+ struct list_head list;
+};
+
+struct imgu_buffer {
+ struct imgu_vb2_buffer vid_buf; /* Must be the first field */
+ struct imgu_css_buffer css_buf;
+ struct imgu_css_map map;
+};
+
+struct imgu_node_mapping {
+ unsigned int css_queue;
+ const char *name;
+};
+
+struct imgu_video_device {
+ const char *name;
+ bool output;
+ bool enabled;
+ struct v4l2_format vdev_fmt; /* Currently set format */
+
+ /* Private fields */
+ struct video_device vdev;
+ struct media_pad vdev_pad;
+ struct v4l2_mbus_framefmt pad_fmt;
+ struct vb2_queue vbq;
+ struct list_head buffers;
+ /* Protect vb2_queue and vdev structs*/
+ struct mutex lock;
+ atomic_t sequence;
+ unsigned int id;
+ unsigned int pipe;
+};
+
+struct imgu_v4l2_subdev {
+ unsigned int pipe;
+ struct v4l2_subdev subdev;
+ struct media_pad subdev_pads[IMGU_NODE_NUM];
+ struct {
+ struct v4l2_rect eff; /* effective resolution */
+ struct v4l2_rect bds; /* bayer-domain scaled resolution*/
+ struct v4l2_rect gdc; /* gdc output resolution */
+ } rect;
+ struct v4l2_ctrl_handler ctrl_handler;
+ struct v4l2_ctrl *ctrl;
+ atomic_t running_mode;
+ bool active;
+};
+
+struct imgu_media_pipe {
+ unsigned int pipe;
+
+ /* Internally enabled queues */
+ struct {
+ struct imgu_css_map dmap;
+ struct imgu_css_buffer dummybufs[IMGU_MAX_QUEUE_DEPTH];
+ } queues[IPU3_CSS_QUEUES];
+ struct imgu_video_device nodes[IMGU_NODE_NUM];
+ bool queue_enabled[IMGU_NODE_NUM];
+ struct media_pipeline pipeline;
+ struct imgu_v4l2_subdev imgu_sd;
+};
+
+/*
+ * imgu_device -- ImgU (Imaging Unit) driver
+ */
+struct imgu_device {
+ struct pci_dev *pci_dev;
+ void __iomem *base;
+
+ /* Public fields, fill before registering */
+ unsigned int buf_struct_size;
+ bool streaming; /* Public read only */
+
+ struct imgu_media_pipe imgu_pipe[IMGU_MAX_PIPE_NUM];
+
+ /* Private fields */
+ struct v4l2_device v4l2_dev;
+ struct media_device media_dev;
+
+ /* MMU driver for css */
+ struct imgu_mmu_info *mmu;
+ struct iova_domain iova_domain;
+
+ /* css - Camera Sub-System */
+ struct imgu_css css;
+
+ /*
+ * Coarse-grained lock to protect
+ * vid_buf.list and css->queue
+ */
+ struct mutex lock;
+
+ /* Lock to protect writes to streaming flag in this struct */
+ struct mutex streaming_lock;
+
+ /* Forbid streaming and buffer queuing during system suspend. */
+ atomic_t qbuf_barrier;
+ /* Indicate if system suspend take place while imgu is streaming. */
+ bool suspend_in_stream;
+ /* Used to wait for FW buffer queue drain. */
+ wait_queue_head_t buf_drain_wq;
+};
+
+unsigned int imgu_node_to_queue(unsigned int node);
+unsigned int imgu_map_node(struct imgu_device *imgu, unsigned int css_queue);
+int imgu_queue_buffers(struct imgu_device *imgu, bool initial,
+ unsigned int pipe);
+
+int imgu_v4l2_register(struct imgu_device *dev);
+int imgu_v4l2_unregister(struct imgu_device *dev);
+void imgu_v4l2_buffer_done(struct vb2_buffer *vb, enum vb2_buffer_state state);
+
+int imgu_s_stream(struct imgu_device *imgu, int enable);
+
+static inline u32 imgu_bytesperline(const unsigned int width,
+ enum imgu_abi_frame_format frame_format)
+{
+ if (frame_format == IMGU_ABI_FRAME_FORMAT_NV12)
+ return ALIGN(width, IPU3_UAPI_ISP_VEC_ELEMS);
+ /*
+ * 64 bytes for every 50 pixels, the line length
+ * in bytes is multiple of 64 (line end alignment).
+ */
+ return DIV_ROUND_UP(width, 50) * 64;
+}
+
+#endif
diff --git a/drivers/staging/media/ipu7/Kconfig b/drivers/staging/media/ipu7/Kconfig
new file mode 100644
index 000000000000..7d831ba7501d
--- /dev/null
+++ b/drivers/staging/media/ipu7/Kconfig
@@ -0,0 +1,19 @@
+config VIDEO_INTEL_IPU7
+ tristate "Intel IPU7 driver"
+ depends on ACPI || COMPILE_TEST
+ depends on VIDEO_DEV
+ depends on X86 && HAS_DMA
+ depends on IPU_BRIDGE || !IPU_BRIDGE
+ depends on PCI
+ select AUXILIARY_BUS
+ select IOMMU_IOVA
+ select VIDEO_V4L2_SUBDEV_API
+ select MEDIA_CONTROLLER
+ select VIDEOBUF2_DMA_SG
+ select V4L2_FWNODE
+ help
+ This is the 7th Gen Intel Image Processing Unit, found in Intel SoCs
+ and used for capturing images and video from camera sensors.
+
+ To compile this driver, say Y here! It contains 2 modules -
+ intel_ipu7 and intel_ipu7_isys.
diff --git a/drivers/staging/media/ipu7/Makefile b/drivers/staging/media/ipu7/Makefile
new file mode 100644
index 000000000000..6d2aec219e65
--- /dev/null
+++ b/drivers/staging/media/ipu7/Makefile
@@ -0,0 +1,23 @@
+# SPDX-License-Identifier: GPL-2.0
+# Copyright (c) 2017 - 2025 Intel Corporation.
+
+intel-ipu7-objs += ipu7.o \
+ ipu7-bus.o \
+ ipu7-dma.o \
+ ipu7-mmu.o \
+ ipu7-buttress.o \
+ ipu7-cpd.o \
+ ipu7-syscom.o \
+ ipu7-boot.o
+
+obj-$(CONFIG_VIDEO_INTEL_IPU7) += intel-ipu7.o
+
+intel-ipu7-isys-objs += ipu7-isys.o \
+ ipu7-isys-csi2.o \
+ ipu7-isys-csi-phy.o \
+ ipu7-fw-isys.o \
+ ipu7-isys-video.o \
+ ipu7-isys-queue.o \
+ ipu7-isys-subdev.o
+
+obj-$(CONFIG_VIDEO_INTEL_IPU7) += intel-ipu7-isys.o
diff --git a/drivers/staging/media/ipu7/TODO b/drivers/staging/media/ipu7/TODO
new file mode 100644
index 000000000000..7fbc37059adf
--- /dev/null
+++ b/drivers/staging/media/ipu7/TODO
@@ -0,0 +1,28 @@
+This is a list of things that need to be done to get this driver out of the
+staging directory.
+
+- ABI headers cleanup
+ Cleanup the firmware ABI headers
+
+- Add metadata capture support
+ The IPU7 hardware should support metadata capture, but it is not
+ fully verified with IPU7 firmware ABI so far, need to add the metadata
+ capture support.
+
+- Refine CSI2 PHY code
+ Refine the ipu7-isys-csi2-phy.c, move the hardware specific variant
+ into structure, clarify and explain the PHY registers to make it more
+ readable.
+
+- Work with the common IPU module
+ Sakari commented much of the driver code is the same than the IPU6 driver.
+ IPU7 driver is expected to work with the common IPU module in future.
+
+- Register definition cleanup
+ Cleanup the register definitions - remove some unnecessary definitions
+ remove 'U' suffix for hexadecimal and decimal values and add IPU7 prefix
+ for IPU7 specific registers.
+ Some ISYS IO sub-blocks register definitions are offset values from
+ specific sub-block base, but it is not clear and well suited for driver
+ to use, need to update the register definitions to make it more clear
+ and readable.
diff --git a/drivers/staging/media/ipu7/abi/ipu7_fw_boot_abi.h b/drivers/staging/media/ipu7/abi/ipu7_fw_boot_abi.h
new file mode 100644
index 000000000000..a1519c4fe661
--- /dev/null
+++ b/drivers/staging/media/ipu7/abi/ipu7_fw_boot_abi.h
@@ -0,0 +1,163 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2020 - 2025 Intel Corporation
+ */
+
+#ifndef IPU7_FW_BOOT_ABI_H
+#define IPU7_FW_BOOT_ABI_H
+
+#include "ipu7_fw_common_abi.h"
+#include "ipu7_fw_syscom_abi.h"
+
+#define IA_GOFO_FWLOG_SEVERITY_CRIT (0U)
+#define IA_GOFO_FWLOG_SEVERITY_ERROR (1U)
+#define IA_GOFO_FWLOG_SEVERITY_WARNING (2U)
+#define IA_GOFO_FWLOG_SEVERITY_INFO (3U)
+#define IA_GOFO_FWLOG_SEVERITY_DEBUG (4U)
+#define IA_GOFO_FWLOG_SEVERITY_VERBOSE (5U)
+#define IA_GOFO_FWLOG_MAX_LOGGER_SOURCES (64U)
+
+#define LOGGER_CONFIG_CHANNEL_ENABLE_HWPRINTF_BITMASK BIT(0)
+#define LOGGER_CONFIG_CHANNEL_ENABLE_SYSCOM_BITMASK BIT(1)
+#define LOGGER_CONFIG_CHANNEL_ENABLE_ALL_BITMASK \
+ (LOGGER_CONFIG_CHANNEL_ENABLE_HWPRINTF_BITMASK | \
+ LOGGER_CONFIG_CHANNEL_ENABLE_SYSCOM_BITMASK)
+
+struct ia_gofo_logger_config {
+ u8 use_source_severity;
+ u8 source_severity[IA_GOFO_FWLOG_MAX_LOGGER_SOURCES];
+ u8 use_channels_enable_bitmask;
+ u8 channels_enable_bitmask;
+ u8 padding[1];
+ ia_gofo_addr_t hw_printf_buffer_base_addr;
+ u32 hw_printf_buffer_size_bytes;
+};
+
+#pragma pack(push, 1)
+
+#define IA_GOFO_BUTTRESS_FW_BOOT_PARAMS_MAX_REG_IDX_PER_APP \
+ ((u32)IA_GOFO_FW_BOOT_ID_MAX)
+#define IA_GOFO_BUTTRESS_FW_BOOT_PARAMS_IS_OFFSET (0U)
+#define IA_GOFO_BUTTRESS_FW_BOOT_PARAMS_PS_OFFSET \
+ ((IA_GOFO_BUTTRESS_FW_BOOT_PARAMS_IS_OFFSET) + \
+ (u32)(IA_GOFO_BUTTRESS_FW_BOOT_PARAMS_MAX_REG_IDX_PER_APP))
+#define IA_GOFO_BUTTRESS_FW_BOOT_PARAMS_PRIMARY_OFFSET (0U)
+#define IA_GOFO_CCG_IPU_BUTTRESS_FW_BOOT_PARAMS_SECONDARY_OFFSET (0x3000U / 4U)
+#define IA_GOFO_HKR_IPU_BUTTRESS_FW_BOOT_PARAMS_SECONDARY_OFFSET \
+ (IA_GOFO_BUTTRESS_FW_BOOT_PARAMS_MAX_REG_IDX_PER_APP * 2U)
+#define IA_GOFO_HKR_HIF_BUTTRESS_FW_BOOT_PARAMS_SECONDARY_OFFSET \
+ (IA_GOFO_BUTTRESS_FW_BOOT_PARAMS_MAX_REG_IDX_PER_APP)
+#define IA_GOFO_CCG_IPU_BUTTRESS_FW_BOOT_PARAMS_MAX_REG_IDX \
+ (IA_GOFO_BUTTRESS_FW_BOOT_PARAMS_MAX_REG_IDX_PER_APP * 4U)
+#define IA_GOFO_HKR_IPU_BUTTRESS_FW_BOOT_PARAMS_MAX_REG_IDX \
+ (IA_GOFO_BUTTRESS_FW_BOOT_PARAMS_MAX_REG_IDX_PER_APP * 4U)
+
+#define IA_GOFO_BOOT_RESERVED_SIZE (58U)
+#define IA_GOFO_BOOT_SECONDARY_RESERVED_SIZE (IA_GOFO_BOOT_RESERVED_SIZE)
+#define IA_GOFO_BOOT_SECONDARY_RESERVED_FIELDS \
+ (sizeof(ia_gofo_addr_t) + sizeof(ia_gofo_addr_t) + sizeof(u32))
+
+enum ia_gofo_buttress_reg_id {
+ IA_GOFO_FW_BOOT_CONFIG_ID = 0,
+ IA_GOFO_FW_BOOT_STATE_ID = 1,
+ IA_GOFO_FW_BOOT_RESERVED1_ID = IA_GOFO_FW_BOOT_STATE_ID,
+ IA_GOFO_FW_BOOT_SYSCOM_QUEUE_INDICES_BASE_ID = 2,
+ IA_GOFO_FW_BOOT_UNTRUSTED_ADDR_MIN_ID = 3,
+ IA_GOFO_FW_BOOT_RESERVED0_ID = IA_GOFO_FW_BOOT_UNTRUSTED_ADDR_MIN_ID,
+ IA_GOFO_FW_BOOT_MESSAGING_VERSION_ID = 4,
+ IA_GOFO_FW_BOOT_ID_MAX
+};
+
+enum ia_gofo_boot_uc_tile_frequency_units {
+ IA_GOFO_FW_BOOT_UC_FREQUENCY_UNITS_MHZ = 0,
+ IA_GOFO_FW_BOOT_UC_FREQUENCY_UNITS_HZ = 1,
+ IA_GOFO_FW_BOOT_UC_FREQUENCY_UNITS_N
+};
+
+#define IA_GOFO_FW_BOOT_STATE_IS_CRITICAL(boot_state) \
+ (0xdead0000U == ((boot_state) & 0xffff0000U))
+
+struct ia_gofo_boot_config {
+ u32 length;
+ struct ia_gofo_version_s config_version;
+ struct ia_gofo_msg_version_list client_version_support;
+ ia_gofo_addr_t pkg_dir;
+ ia_gofo_addr_t subsys_config;
+ u32 uc_tile_frequency;
+ u16 checksum;
+ u8 uc_tile_frequency_units;
+ u8 padding[1];
+ u32 reserved[IA_GOFO_BOOT_RESERVED_SIZE];
+ struct syscom_config_s syscom_context_config;
+};
+
+struct ia_gofo_secondary_boot_config {
+ u32 length;
+ struct ia_gofo_version_s config_version;
+ struct ia_gofo_msg_version_list client_version_support;
+ u8 reserved1[IA_GOFO_BOOT_SECONDARY_RESERVED_FIELDS];
+ u16 checksum;
+ u8 padding[2];
+ u32 reserved2[IA_GOFO_BOOT_SECONDARY_RESERVED_SIZE];
+ struct syscom_config_s syscom_context_config;
+};
+
+#pragma pack(pop)
+
+#define IA_GOFO_WDT_TIMEOUT_ERR 0xdead0401U
+#define IA_GOFO_MEM_FATAL_DME_ERR 0xdead0801U
+#define IA_GOFO_MEM_UNCORRECTABLE_LOCAL_ERR 0xdead0802U
+#define IA_GOFO_MEM_UNCORRECTABLE_DIRTY_ERR 0xdead0803U
+#define IA_GOFO_MEM_UNCORRECTABLE_DTAG_ERR 0xdead0804U
+#define IA_GOFO_MEM_UNCORRECTABLE_CACHE_ERR 0xdead0805U
+#define IA_GOFO_DOUBLE_EXCEPTION_ERR 0xdead0806U
+#define IA_GOFO_BIST_DMEM_FAULT_DETECTION_ERR 0xdead1000U
+#define IA_GOFO_BIST_DATA_INTEGRITY_FAILURE 0xdead1010U
+
+enum ia_gofo_boot_state {
+ IA_GOFO_FW_BOOT_STATE_SECONDARY_BOOT_CONFIG_READY = 0x57a7b000U,
+ IA_GOFO_FW_BOOT_STATE_UNINIT = 0x57a7e000U,
+ IA_GOFO_FW_BOOT_STATE_STARTING_0 = 0x57a7d000U,
+ IA_GOFO_FW_BOOT_STATE_CACHE_INIT_DONE = 0x57a7d010U,
+ IA_GOFO_FW_BOOT_STATE_MEM_INIT_DONE = 0x57a7d020U,
+ IA_GOFO_FW_BOOT_STATE_STACK_INIT_DONE = 0x57a7d030U,
+ IA_GOFO_FW_BOOT_STATE_EARLY_BOOT_DONE = 0x57a7d100U,
+ IA_GOFO_FW_BOOT_STATE_BOOT_CONFIG_START = 0x57a7d200U,
+ IA_GOFO_FW_BOOT_STATE_QUEUE_INIT_DONE = 0x57a7d300U,
+ IA_GOFO_FW_BOOT_STATE_READY = 0x57a7e100U,
+ IA_GOFO_FW_BOOT_STATE_CRIT_UNSPECIFIED = 0xdead0001U,
+ IA_GOFO_FW_BOOT_STATE_CRIT_CFG_PTR = 0xdead0101U,
+ IA_GOFO_FW_BOOT_STATE_CRIT_CFG_VERSION = 0xdead0201U,
+ IA_GOFO_FW_BOOT_STATE_CRIT_MSG_VERSION = 0xdead0301U,
+ IA_GOFO_FW_BOOT_STATE_CRIT_WDT_TIMEOUT = IA_GOFO_WDT_TIMEOUT_ERR,
+ IA_GOFO_FW_BOOT_STATE_WRONG_DATA_SECTION_UNPACKING = 0xdead0501U,
+ IA_GOFO_FW_BOOT_STATE_WRONG_RO_DATA_SECTION_UNPACKING = 0xdead0601U,
+ IA_GOFO_FW_BOOT_STATE_INVALID_UNTRUSTED_ADDR_MIN = 0xdead0701U,
+ IA_GOFO_FW_BOOT_STATE_CRIT_MEM_FATAL_DME = IA_GOFO_MEM_FATAL_DME_ERR,
+ IA_GOFO_FW_BOOT_STATE_CRIT_MEM_UNCORRECTABLE_LOCAL =
+ IA_GOFO_MEM_UNCORRECTABLE_LOCAL_ERR,
+ IA_GOFO_FW_BOOT_STATE_CRIT_MEM_UNCORRECTABLE_DIRTY =
+ IA_GOFO_MEM_UNCORRECTABLE_DIRTY_ERR,
+ IA_GOFO_FW_BOOT_STATE_CRIT_MEM_UNCORRECTABLE_DTAG =
+ IA_GOFO_MEM_UNCORRECTABLE_DTAG_ERR,
+ IA_GOFO_FW_BOOT_STATE_CRIT_MEM_UNCORRECTABLE_CACHE =
+ IA_GOFO_MEM_UNCORRECTABLE_CACHE_ERR,
+ IA_GOFO_FW_BOOT_STATE_CRIT_DOUBLE_EXCEPTION =
+ IA_GOFO_DOUBLE_EXCEPTION_ERR,
+ IA_GOFO_FW_BOOT_STATE_CRIT_BIST_DMEM_FAULT_DETECTION_ERR =
+ IA_GOFO_BIST_DMEM_FAULT_DETECTION_ERR,
+ IA_GOFO_FW_BOOT_STATE_CRIT_DATA_INTEGRITY_FAILURE = 0xdead1010U,
+ IA_GOFO_FW_BOOT_STATE_CRIT_STACK_CHK_FAILURE = 0xdead1011U,
+ IA_GOFO_FW_BOOT_STATE_CRIT_SYSCOM_CONTEXT_INTEGRITY_FAILURE =
+ 0xdead1012U,
+ IA_GOFO_FW_BOOT_STATE_CRIT_MPU_CONFIG_FAILURE = 0xdead1013U,
+ IA_GOFO_FW_BOOT_STATE_CRIT_SHARED_BUFFER_FAILURE = 0xdead1014U,
+ IA_GOFO_FW_BOOT_STATE_CRIT_CMEM_FAILURE = 0xdead1015U,
+ IA_GOFO_FW_BOOT_STATE_SHUTDOWN_CMD = 0x57a7f001U,
+ IA_GOFO_FW_BOOT_STATE_SHUTDOWN_START = 0x57a7e200U,
+ IA_GOFO_FW_BOOT_STATE_INACTIVE = 0x57a7e300U,
+ IA_GOFO_FW_BOOT_HW_CMD_ACK_TIMEOUT = 0x57a7e400U,
+ IA_GOFO_FW_BOOT_SYSTEM_CYCLES_ERROR = 0x57a7e500U
+};
+
+#endif
diff --git a/drivers/staging/media/ipu7/abi/ipu7_fw_common_abi.h b/drivers/staging/media/ipu7/abi/ipu7_fw_common_abi.h
new file mode 100644
index 000000000000..7bb6fac585a3
--- /dev/null
+++ b/drivers/staging/media/ipu7/abi/ipu7_fw_common_abi.h
@@ -0,0 +1,175 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2020 - 2025 Intel Corporation
+ */
+
+#ifndef IPU7_FW_COMMOM_ABI_H
+#define IPU7_FW_COMMOM_ABI_H
+
+#include <linux/types.h>
+
+#pragma pack(push, 1)
+typedef u32 ia_gofo_addr_t;
+
+#define IA_GOFO_ADDR_NULL (0U)
+
+struct ia_gofo_version_s {
+ u8 patch;
+ u8 subminor;
+ u8 minor;
+ u8 major;
+};
+
+#define IA_GOFO_MSG_VERSION_INIT(major_val, minor_val, subminor_val, patch_val)\
+ {.major = (major_val), .minor = (minor_val), .subminor = \
+ (subminor_val), .patch = (patch_val)}
+
+#define IA_GOFO_MSG_VERSION_LIST_MAX_ENTRIES (3U)
+#define IA_GOFO_MSG_RESERVED_SIZE (3U)
+
+struct ia_gofo_msg_version_list {
+ u8 num_versions;
+ u8 reserved[IA_GOFO_MSG_RESERVED_SIZE];
+ struct ia_gofo_version_s versions[IA_GOFO_MSG_VERSION_LIST_MAX_ENTRIES];
+};
+
+#pragma pack(pop)
+
+#define TLV_TYPE_PADDING (0U)
+
+#pragma pack(push, 1)
+
+#define IA_GOFO_ABI_BITS_PER_BYTE (8U)
+
+struct ia_gofo_tlv_header {
+ u16 tlv_type;
+ u16 tlv_len32;
+};
+
+struct ia_gofo_tlv_list {
+ u16 num_elems;
+ u16 head_offset;
+};
+
+#define TLV_ITEM_ALIGNMENT ((u32)sizeof(u32))
+#define TLV_MSG_ALIGNMENT ((u32)sizeof(u64))
+#define TLV_LIST_ALIGNMENT TLV_ITEM_ALIGNMENT
+#pragma pack(pop)
+
+#define IA_GOFO_MODULO(dividend, divisor) ((dividend) % (divisor))
+
+#define IA_GOFO_MSG_ERR_MAX_DETAILS (4U)
+#define IA_GOFO_MSG_ERR_OK (0U)
+#define IA_GOFO_MSG_ERR_UNSPECIFED (0xffffffffU)
+#define IA_GOFO_MSG_ERR_GROUP_UNSPECIFIED (0U)
+#define IA_GOFO_MSG_ERR_IS_OK(err) (IA_GOFO_MSG_ERR_OK == (err).err_code)
+
+#pragma pack(push, 1)
+struct ia_gofo_msg_err {
+ u32 err_group;
+ u32 err_code;
+ u32 err_detail[IA_GOFO_MSG_ERR_MAX_DETAILS];
+};
+
+#pragma pack(pop)
+
+#define IA_GOFO_MSG_ERR_GROUP_APP_EXT_START (16U)
+#define IA_GOFO_MSG_ERR_GROUP_MAX (31U)
+#define IA_GOFO_MSG_ERR_GROUP_INTERNAL_START (IA_GOFO_MSG_ERR_GROUP_MAX + 1U)
+#define IA_GOFO_MSG_ERR_GROUP_RESERVED IA_GOFO_MSG_ERR_GROUP_UNSPECIFIED
+#define IA_GOFO_MSG_ERR_GROUP_GENERAL 1
+
+enum ia_gofo_msg_err_general {
+ IA_GOFO_MSG_ERR_GENERAL_OK = IA_GOFO_MSG_ERR_OK,
+ IA_GOFO_MSG_ERR_GENERAL_MSG_TOO_SMALL = 1,
+ IA_GOFO_MSG_ERR_GENERAL_MSG_TOO_LARGE = 2,
+ IA_GOFO_MSG_ERR_GENERAL_DEVICE_STATE = 3,
+ IA_GOFO_MSG_ERR_GENERAL_ALIGNMENT = 4,
+ IA_GOFO_MSG_ERR_GENERAL_INDIRECT_REF_PTR_INVALID = 5,
+ IA_GOFO_MSG_ERR_GENERAL_INVALID_MSG_TYPE = 6,
+ IA_GOFO_MSG_ERR_GENERAL_SYSCOM_FAIL = 7,
+ IA_GOFO_MSG_ERR_GENERAL_N
+};
+
+#pragma pack(push, 1)
+#define IA_GOFO_MSG_TYPE_RESERVED 0
+#define IA_GOFO_MSG_TYPE_INDIRECT 1
+#define IA_GOFO_MSG_TYPE_LOG 2
+#define IA_GOFO_MSG_TYPE_GENERAL_ERR 3
+
+struct ia_gofo_msg_header {
+ struct ia_gofo_tlv_header tlv_header;
+ struct ia_gofo_tlv_list msg_options;
+ u64 user_token;
+};
+
+struct ia_gofo_msg_header_ack {
+ struct ia_gofo_msg_header header;
+ struct ia_gofo_msg_err err;
+
+};
+
+struct ia_gofo_msg_general_err {
+ struct ia_gofo_msg_header_ack header;
+};
+
+#pragma pack(pop)
+
+#pragma pack(push, 1)
+enum ia_gofo_msg_link_streaming_mode {
+ IA_GOFO_MSG_LINK_STREAMING_MODE_SOFF = 0,
+ IA_GOFO_MSG_LINK_STREAMING_MODE_DOFF = 1,
+ IA_GOFO_MSG_LINK_STREAMING_MODE_BCLM = 2,
+ IA_GOFO_MSG_LINK_STREAMING_MODE_BCSM_FIX = 3,
+ IA_GOFO_MSG_LINK_STREAMING_MODE_N
+};
+
+enum ia_gofo_soc_pbk_instance_id {
+ IA_GOFO_SOC_PBK_ID0 = 0,
+ IA_GOFO_SOC_PBK_ID1 = 1,
+ IA_GOFO_SOC_PBK_ID_N
+};
+
+#define IA_GOFO_MSG_LINK_PBK_MAX_SLOTS (2U)
+
+struct ia_gofo_msg_indirect {
+ struct ia_gofo_msg_header header;
+ struct ia_gofo_tlv_header ref_header;
+ ia_gofo_addr_t ref_msg_ptr;
+};
+
+#pragma pack(pop)
+
+#pragma pack(push, 1)
+#define IA_GOFO_MSG_LOG_MAX_PARAMS (4U)
+#define IA_GOFO_MSG_LOG_DOC_FMT_ID_MIN (0U)
+
+#define IA_GOFO_MSG_LOG_DOC_FMT_ID_MAX (4095U)
+#define IA_GOFO_MSG_LOG_FMT_ID_INVALID (0xfffffffU)
+
+struct ia_gofo_msg_log_info {
+ u16 log_counter;
+ u8 msg_parameter_types;
+ /* [0:0] is_out_of_order, [1:3] logger_channel, [4:7] reserved */
+ u8 logger_opts;
+ u32 fmt_id;
+ u32 params[IA_GOFO_MSG_LOG_MAX_PARAMS];
+};
+
+struct ia_gofo_msg_log_info_ts {
+ u64 msg_ts;
+ struct ia_gofo_msg_log_info log_info;
+};
+
+struct ia_gofo_msg_log {
+ struct ia_gofo_msg_header header;
+ struct ia_gofo_msg_log_info_ts log_info_ts;
+};
+
+#pragma pack(pop)
+
+#define IA_GOFO_MSG_ABI_OUT_ACK_QUEUE_ID (0U)
+#define IA_GOFO_MSG_ABI_OUT_LOG_QUEUE_ID (1U)
+#define IA_GOFO_MSG_ABI_IN_DEV_QUEUE_ID (2U)
+
+#endif
diff --git a/drivers/staging/media/ipu7/abi/ipu7_fw_config_abi.h b/drivers/staging/media/ipu7/abi/ipu7_fw_config_abi.h
new file mode 100644
index 000000000000..c3f62aaedd86
--- /dev/null
+++ b/drivers/staging/media/ipu7/abi/ipu7_fw_config_abi.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2021 - 2025 Intel Corporation
+ */
+
+#ifndef IPU7_FW_CONFIG_ABI_H
+#define IPU7_FW_CONFIG_ABI_H
+
+#include <linux/types.h>
+
+#define IPU_CONFIG_ABI_WDT_TIMER_DISABLED 0U
+#define IPU_CONFIG_ABI_CMD_TIMER_DISABLED 0U
+
+struct ipu7_wdt_abi {
+ u32 wdt_timer1_us;
+ u32 wdt_timer2_us;
+};
+
+#endif
diff --git a/drivers/staging/media/ipu7/abi/ipu7_fw_insys_config_abi.h b/drivers/staging/media/ipu7/abi/ipu7_fw_insys_config_abi.h
new file mode 100644
index 000000000000..f161a605c500
--- /dev/null
+++ b/drivers/staging/media/ipu7/abi/ipu7_fw_insys_config_abi.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2021 - 2025 Intel Corporation
+ */
+
+#ifndef IPU7_FW_INSYS_CONFIG_ABI_H
+#define IPU7_FW_INSYS_CONFIG_ABI_H
+
+#include "ipu7_fw_boot_abi.h"
+#include "ipu7_fw_config_abi.h"
+#include "ipu7_fw_isys_abi.h"
+
+struct ipu7_insys_config {
+ u32 timeout_val_ms;
+ struct ia_gofo_logger_config logger_config;
+ struct ipu7_wdt_abi wdt_config;
+};
+
+#endif
diff --git a/drivers/staging/media/ipu7/abi/ipu7_fw_isys_abi.h b/drivers/staging/media/ipu7/abi/ipu7_fw_isys_abi.h
new file mode 100644
index 000000000000..c42d0b7a2627
--- /dev/null
+++ b/drivers/staging/media/ipu7/abi/ipu7_fw_isys_abi.h
@@ -0,0 +1,412 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2020 - 2025 Intel Corporation
+ */
+
+#ifndef IPU7_FW_ISYS_ABI_H
+#define IPU7_FW_ISYS_ABI_H
+
+#include "ipu7_fw_common_abi.h"
+#include "ipu7_fw_isys_abi.h"
+
+#define IPU_INSYS_MAX_OUTPUT_QUEUES (3U)
+#define IPU_INSYS_STREAM_ID_MAX (16U)
+
+#define IPU_INSYS_MAX_INPUT_QUEUES (IPU_INSYS_STREAM_ID_MAX + 1U)
+#define IPU_INSYS_OUTPUT_FIRST_QUEUE (0U)
+#define IPU_INSYS_OUTPUT_LAST_QUEUE (IPU_INSYS_MAX_OUTPUT_QUEUES - 1U)
+#define IPU_INSYS_OUTPUT_MSG_QUEUE (IPU_INSYS_OUTPUT_FIRST_QUEUE)
+#define IPU_INSYS_OUTPUT_LOG_QUEUE (IPU_INSYS_OUTPUT_FIRST_QUEUE + 1U)
+#define IPU_INSYS_OUTPUT_RESERVED_QUEUE (IPU_INSYS_OUTPUT_LAST_QUEUE)
+#define IPU_INSYS_INPUT_FIRST_QUEUE (IPU_INSYS_MAX_OUTPUT_QUEUES)
+#define IPU_INSYS_INPUT_LAST_QUEUE \
+ (IPU_INSYS_INPUT_FIRST_QUEUE + IPU_INSYS_MAX_INPUT_QUEUES - 1U)
+#define IPU_INSYS_INPUT_DEV_QUEUE (IPU_INSYS_INPUT_FIRST_QUEUE)
+#define IPU_INSYS_INPUT_MSG_QUEUE (IPU_INSYS_INPUT_FIRST_QUEUE + 1U)
+#define IPU_INSYS_INPUT_MSG_MAX_QUEUE (IPU_INSYS_MAX_INPUT_QUEUES - 1U)
+
+#define MAX_OPINS_FOR_SINGLE_IPINS (3U)
+#define DEV_SEND_QUEUE_SIZE (IPU_INSYS_STREAM_ID_MAX)
+
+#define PIN_PLANES_MAX (4U)
+
+#define INSYS_MSG_ERR_STREAM_INSUFFICIENT_RESOURCES_INPUT \
+ INSYS_MSG_ERR_STREAM_INSUFFICIENT_RESOURCES
+
+typedef u64 ipu7_insys_return_token;
+
+enum ipu7_insys_resp_type {
+ IPU_INSYS_RESP_TYPE_STREAM_OPEN_DONE = 0,
+ IPU_INSYS_RESP_TYPE_STREAM_START_AND_CAPTURE_ACK = 1,
+ IPU_INSYS_RESP_TYPE_STREAM_CAPTURE_ACK = 2,
+ IPU_INSYS_RESP_TYPE_STREAM_ABORT_ACK = 3,
+ IPU_INSYS_RESP_TYPE_STREAM_FLUSH_ACK = 4,
+ IPU_INSYS_RESP_TYPE_STREAM_CLOSE_ACK = 5,
+ IPU_INSYS_RESP_TYPE_PIN_DATA_READY = 6,
+ IPU_INSYS_RESP_TYPE_FRAME_SOF = 7,
+ IPU_INSYS_RESP_TYPE_FRAME_EOF = 8,
+ IPU_INSYS_RESP_TYPE_STREAM_START_AND_CAPTURE_DONE = 9,
+ IPU_INSYS_RESP_TYPE_STREAM_CAPTURE_DONE = 10,
+ IPU_INSYS_RESP_TYPE_PWM_IRQ = 11,
+ N_IPU_INSYS_RESP_TYPE
+};
+
+enum ipu7_insys_send_type {
+ IPU_INSYS_SEND_TYPE_STREAM_OPEN = 0,
+ IPU_INSYS_SEND_TYPE_STREAM_START_AND_CAPTURE = 1,
+ IPU_INSYS_SEND_TYPE_STREAM_CAPTURE = 2,
+ IPU_INSYS_SEND_TYPE_STREAM_ABORT = 3,
+ IPU_INSYS_SEND_TYPE_STREAM_FLUSH = 4,
+ IPU_INSYS_SEND_TYPE_STREAM_CLOSE = 5,
+ N_IPU_INSYS_SEND_TYPE
+};
+
+enum ipu7_insys_mipi_vc {
+ IPU_INSYS_MIPI_VC_0 = 0,
+ IPU_INSYS_MIPI_VC_1 = 1,
+ IPU_INSYS_MIPI_VC_2 = 2,
+ IPU_INSYS_MIPI_VC_3 = 3,
+ IPU_INSYS_MIPI_VC_4 = 4,
+ IPU_INSYS_MIPI_VC_5 = 5,
+ IPU_INSYS_MIPI_VC_6 = 6,
+ IPU_INSYS_MIPI_VC_7 = 7,
+ IPU_INSYS_MIPI_VC_8 = 8,
+ IPU_INSYS_MIPI_VC_9 = 9,
+ IPU_INSYS_MIPI_VC_10 = 10,
+ IPU_INSYS_MIPI_VC_11 = 11,
+ IPU_INSYS_MIPI_VC_12 = 12,
+ IPU_INSYS_MIPI_VC_13 = 13,
+ IPU_INSYS_MIPI_VC_14 = 14,
+ IPU_INSYS_MIPI_VC_15 = 15,
+ N_IPU_INSYS_MIPI_VC
+};
+
+enum ipu7_insys_mipi_port {
+ IPU_INSYS_MIPI_PORT_0 = 0,
+ IPU_INSYS_MIPI_PORT_1 = 1,
+ IPU_INSYS_MIPI_PORT_2 = 2,
+ IPU_INSYS_MIPI_PORT_3 = 3,
+ IPU_INSYS_MIPI_PORT_4 = 4,
+ IPU_INSYS_MIPI_PORT_5 = 5,
+ NA_IPU_INSYS_MIPI_PORT
+};
+
+enum ipu7_insys_frame_format_type {
+ IPU_INSYS_FRAME_FORMAT_NV11 = 0,
+ IPU_INSYS_FRAME_FORMAT_NV12 = 1,
+ IPU_INSYS_FRAME_FORMAT_NV12_16 = 2,
+ IPU_INSYS_FRAME_FORMAT_NV12_TILEY = 3,
+ IPU_INSYS_FRAME_FORMAT_NV16 = 4,
+ IPU_INSYS_FRAME_FORMAT_NV21 = 5,
+ IPU_INSYS_FRAME_FORMAT_NV61 = 6,
+ IPU_INSYS_FRAME_FORMAT_YV12 = 7,
+ IPU_INSYS_FRAME_FORMAT_YV16 = 8,
+ IPU_INSYS_FRAME_FORMAT_YUV420 = 9,
+ IPU_INSYS_FRAME_FORMAT_YUV420_10 = 10,
+ IPU_INSYS_FRAME_FORMAT_YUV420_12 = 11,
+ IPU_INSYS_FRAME_FORMAT_YUV420_14 = 12,
+ IPU_INSYS_FRAME_FORMAT_YUV420_16 = 13,
+ IPU_INSYS_FRAME_FORMAT_YUV422 = 14,
+ IPU_INSYS_FRAME_FORMAT_YUV422_16 = 15,
+ IPU_INSYS_FRAME_FORMAT_UYVY = 16,
+ IPU_INSYS_FRAME_FORMAT_YUYV = 17,
+ IPU_INSYS_FRAME_FORMAT_YUV444 = 18,
+ IPU_INSYS_FRAME_FORMAT_YUV_LINE = 19,
+ IPU_INSYS_FRAME_FORMAT_RAW8 = 20,
+ IPU_INSYS_FRAME_FORMAT_RAW10 = 21,
+ IPU_INSYS_FRAME_FORMAT_RAW12 = 22,
+ IPU_INSYS_FRAME_FORMAT_RAW14 = 23,
+ IPU_INSYS_FRAME_FORMAT_RAW16 = 24,
+ IPU_INSYS_FRAME_FORMAT_RGB565 = 25,
+ IPU_INSYS_FRAME_FORMAT_PLANAR_RGB888 = 26,
+ IPU_INSYS_FRAME_FORMAT_RGBA888 = 27,
+ IPU_INSYS_FRAME_FORMAT_QPLANE6 = 28,
+ IPU_INSYS_FRAME_FORMAT_BINARY_8 = 29,
+ IPU_INSYS_FRAME_FORMAT_Y_8 = 30,
+ IPU_INSYS_FRAME_FORMAT_ARGB888 = 31,
+ IPU_INSYS_FRAME_FORMAT_BGRA888 = 32,
+ IPU_INSYS_FRAME_FORMAT_ABGR888 = 33,
+ N_IPU_INSYS_FRAME_FORMAT
+};
+
+#define IPU_INSYS_FRAME_FORMAT_RAW (IPU_INSYS_FRAME_FORMAT_RAW16)
+#define N_IPU_INSYS_MIPI_DATA_TYPE 0x40
+
+enum ipu7_insys_mipi_dt_rename_mode {
+ IPU_INSYS_MIPI_DT_NO_RENAME = 0,
+ IPU_INSYS_MIPI_DT_RENAMED_MODE = 1,
+ N_IPU_INSYS_MIPI_DT_MODE
+};
+
+#define IPU_INSYS_SEND_MSG_ENABLED 1U
+#define IPU_INSYS_SEND_MSG_DISABLED 0U
+
+#define IPU_INSYS_STREAM_SYNC_MSG_SEND_RESP_SOF BIT(0)
+#define IPU_INSYS_STREAM_SYNC_MSG_SEND_RESP_EOF BIT(1)
+#define IPU_INSYS_STREAM_SYNC_MSG_SEND_IRQ_SOF BIT(2)
+#define IPU_INSYS_STREAM_SYNC_MSG_SEND_IRQ_EOF BIT(3)
+#define IPU_INSYS_STREAM_SYNC_MSG_SEND_RESP_SOF_DISCARDED BIT(4)
+#define IPU_INSYS_STREAM_SYNC_MSG_SEND_RESP_EOF_DISCARDED BIT(5)
+#define IPU_INSYS_STREAM_SYNC_MSG_SEND_IRQ_SOF_DISCARDED BIT(6)
+#define IPU_INSYS_STREAM_SYNC_MSG_SEND_IRQ_EOF_DISCARDED BIT(7)
+#define IPU_INSYS_STREAM_SYNC_MSG_ENABLE_MSG_SEND_RESP ( \
+ IPU_INSYS_STREAM_SYNC_MSG_SEND_RESP_SOF | \
+ IPU_INSYS_STREAM_SYNC_MSG_SEND_RESP_EOF | \
+ IPU_INSYS_STREAM_SYNC_MSG_SEND_RESP_SOF_DISCARDED | \
+ IPU_INSYS_STREAM_SYNC_MSG_SEND_RESP_EOF_DISCARDED)
+#define IPU_INSYS_STREAM_SYNC_MSG_ENABLE_MSG_SEND_IRQ ( \
+ IPU_INSYS_STREAM_SYNC_MSG_SEND_IRQ_SOF | \
+ IPU_INSYS_STREAM_SYNC_MSG_SEND_IRQ_EOF | \
+ IPU_INSYS_STREAM_SYNC_MSG_SEND_IRQ_SOF_DISCARDED | \
+ IPU_INSYS_STREAM_SYNC_MSG_SEND_IRQ_EOF_DISCARDED)
+
+#define IPU_INSYS_STREAM_MSG_SEND_RESP_STREAM_OPEN_DONE BIT(0)
+#define IPU_INSYS_STREAM_MSG_SEND_IRQ_STREAM_OPEN_DONE BIT(1)
+#define IPU_INSYS_STREAM_MSG_SEND_RESP_STREAM_START_ACK BIT(2)
+#define IPU_INSYS_STREAM_MSG_SEND_IRQ_STREAM_START_ACK BIT(3)
+#define IPU_INSYS_STREAM_MSG_SEND_RESP_STREAM_CLOSE_ACK BIT(4)
+#define IPU_INSYS_STREAM_MSG_SEND_IRQ_STREAM_CLOSE_ACK BIT(5)
+#define IPU_INSYS_STREAM_MSG_SEND_RESP_STREAM_FLUSH_ACK BIT(6)
+#define IPU_INSYS_STREAM_MSG_SEND_IRQ_STREAM_FLUSH_ACK BIT(7)
+#define IPU_INSYS_STREAM_MSG_SEND_RESP_STREAM_ABORT_ACK BIT(8)
+#define IPU_INSYS_STREAM_MSG_SEND_IRQ_STREAM_ABORT_ACK BIT(9)
+#define IPU_INSYS_STREAM_ENABLE_MSG_SEND_RESP ( \
+ IPU_INSYS_STREAM_MSG_SEND_RESP_STREAM_OPEN_DONE | \
+ IPU_INSYS_STREAM_MSG_SEND_RESP_STREAM_START_ACK | \
+ IPU_INSYS_STREAM_MSG_SEND_RESP_STREAM_CLOSE_ACK | \
+ IPU_INSYS_STREAM_MSG_SEND_RESP_STREAM_FLUSH_ACK | \
+ IPU_INSYS_STREAM_MSG_SEND_RESP_STREAM_ABORT_ACK)
+#define IPU_INSYS_STREAM_ENABLE_MSG_SEND_IRQ ( \
+ IPU_INSYS_STREAM_MSG_SEND_IRQ_STREAM_OPEN_DONE | \
+ IPU_INSYS_STREAM_MSG_SEND_IRQ_STREAM_START_ACK | \
+ IPU_INSYS_STREAM_MSG_SEND_IRQ_STREAM_CLOSE_ACK | \
+ IPU_INSYS_STREAM_MSG_SEND_IRQ_STREAM_FLUSH_ACK | \
+ IPU_INSYS_STREAM_MSG_SEND_IRQ_STREAM_ABORT_ACK)
+
+#define IPU_INSYS_FRAME_MSG_SEND_RESP_CAPTURE_ACK BIT(0)
+#define IPU_INSYS_FRAME_MSG_SEND_IRQ_CAPTURE_ACK BIT(1)
+#define IPU_INSYS_FRAME_MSG_SEND_RESP_CAPTURE_DONE BIT(2)
+#define IPU_INSYS_FRAME_MSG_SEND_IRQ_CAPTURE_DONE BIT(3)
+#define IPU_INSYS_FRAME_MSG_SEND_RESP_PIN_DATA_READY BIT(4)
+#define IPU_INSYS_FRAME_MSG_SEND_IRQ_PIN_DATA_READY BIT(5)
+#define IPU_INSYS_FRAME_ENABLE_MSG_SEND_RESP ( \
+ IPU_INSYS_FRAME_MSG_SEND_RESP_CAPTURE_ACK | \
+ IPU_INSYS_FRAME_MSG_SEND_RESP_CAPTURE_DONE | \
+ IPU_INSYS_FRAME_MSG_SEND_RESP_PIN_DATA_READY)
+#define IPU_INSYS_FRAME_ENABLE_MSG_SEND_IRQ ( \
+ IPU_INSYS_FRAME_MSG_SEND_IRQ_CAPTURE_ACK | \
+ IPU_INSYS_FRAME_MSG_SEND_IRQ_CAPTURE_DONE | \
+ IPU_INSYS_FRAME_MSG_SEND_IRQ_PIN_DATA_READY)
+
+enum ipu7_insys_output_link_dest {
+ IPU_INSYS_OUTPUT_LINK_DEST_MEM = 0,
+ IPU_INSYS_OUTPUT_LINK_DEST_PSYS = 1,
+ IPU_INSYS_OUTPUT_LINK_DEST_IPU_EXTERNAL = 2
+};
+
+enum ipu7_insys_dpcm_type {
+ IPU_INSYS_DPCM_TYPE_DISABLED = 0,
+ IPU_INSYS_DPCM_TYPE_10_8_10 = 1,
+ IPU_INSYS_DPCM_TYPE_12_8_12 = 2,
+ IPU_INSYS_DPCM_TYPE_12_10_12 = 3,
+ N_IPU_INSYS_DPCM_TYPE
+};
+
+enum ipu7_insys_dpcm_predictor {
+ IPU_INSYS_DPCM_PREDICTOR_1 = 0,
+ IPU_INSYS_DPCM_PREDICTOR_2 = 1,
+ N_IPU_INSYS_DPCM_PREDICTOR
+};
+
+enum ipu7_insys_send_queue_token_flag {
+ IPU_INSYS_SEND_QUEUE_TOKEN_FLAG_NONE = 0,
+ IPU_INSYS_SEND_QUEUE_TOKEN_FLAG_FLUSH_FORCE = 1
+};
+
+#pragma pack(push, 1)
+struct ipu7_insys_resolution {
+ u32 width;
+ u32 height;
+};
+
+struct ipu7_insys_capture_output_pin_payload {
+ u64 user_token;
+ ia_gofo_addr_t addr;
+ u8 pad[4];
+};
+
+struct ipu7_insys_output_link {
+ u32 buffer_lines;
+ u16 foreign_key;
+ u16 granularity_pointer_update;
+ u8 msg_link_streaming_mode;
+ u8 pbk_id;
+ u8 pbk_slot_id;
+ u8 dest;
+ u8 use_sw_managed;
+ u8 is_snoop;
+ u8 pad[2];
+};
+
+struct ipu7_insys_output_cropping {
+ u16 line_top;
+ u16 line_bottom;
+};
+
+struct ipu7_insys_output_dpcm {
+ u8 enable;
+ u8 type;
+ u8 predictor;
+ u8 pad;
+};
+
+struct ipu7_insys_output_pin {
+ struct ipu7_insys_output_link link;
+ struct ipu7_insys_output_cropping crop;
+ struct ipu7_insys_output_dpcm dpcm;
+ u32 stride;
+ u16 ft;
+ u8 send_irq;
+ u8 input_pin_id;
+ u8 early_ack_en;
+ u8 pad[3];
+};
+
+struct ipu7_insys_input_pin {
+ struct ipu7_insys_resolution input_res;
+ u16 sync_msg_map;
+ u8 dt;
+ u8 disable_mipi_unpacking;
+ u8 dt_rename_mode;
+ u8 mapped_dt;
+ u8 pad[2];
+};
+
+struct ipu7_insys_stream_cfg {
+ struct ipu7_insys_input_pin input_pins[4];
+ struct ipu7_insys_output_pin output_pins[4];
+ u16 stream_msg_map;
+ u8 port_id;
+ u8 vc;
+ u8 nof_input_pins;
+ u8 nof_output_pins;
+ u8 pad[2];
+};
+
+struct ipu7_insys_buffset {
+ struct ipu7_insys_capture_output_pin_payload output_pins[4];
+ u8 capture_msg_map;
+ u8 frame_id;
+ u8 skip_frame;
+ u8 pad[5];
+};
+
+struct ipu7_insys_resp {
+ u64 buf_id;
+ struct ipu7_insys_capture_output_pin_payload pin;
+ struct ia_gofo_msg_err error_info;
+ u32 timestamp[2];
+ u8 type;
+ u8 msg_link_streaming_mode;
+ u8 stream_id;
+ u8 pin_id;
+ u8 frame_id;
+ u8 skip_frame;
+ u8 pad[2];
+};
+
+struct ipu7_insys_resp_queue_token {
+ struct ipu7_insys_resp resp_info;
+};
+
+struct ipu7_insys_send_queue_token {
+ u64 buf_handle;
+ ia_gofo_addr_t addr;
+ u16 stream_id;
+ u8 send_type;
+ u8 flag;
+};
+
+#pragma pack(pop)
+
+enum insys_msg_err_stream {
+ INSYS_MSG_ERR_STREAM_OK = IA_GOFO_MSG_ERR_OK,
+ INSYS_MSG_ERR_STREAM_STREAM_ID = 1,
+ INSYS_MSG_ERR_STREAM_MAX_OPINS = 2,
+ INSYS_MSG_ERR_STREAM_MAX_IPINS = 3,
+ INSYS_MSG_ERR_STREAM_STREAM_MESSAGES_MAP = 4,
+ INSYS_MSG_ERR_STREAM_SYNC_MESSAGES_MAP = 5,
+ INSYS_MSG_ERR_STREAM_SENSOR_TYPE = 6,
+ INSYS_MSG_ERR_STREAM_FOREIGN_KEY = 7,
+ INSYS_MSG_ERR_STREAM_STREAMING_MODE = 8,
+ INSYS_MSG_ERR_STREAM_DPCM_EN = 9,
+ INSYS_MSG_ERR_STREAM_DPCM_TYPE = 10,
+ INSYS_MSG_ERR_STREAM_DPCM_PREDICTOR = 11,
+ INSYS_MSG_ERR_STREAM_GRANULARITY_POINTER_UPDATE = 12,
+ INSYS_MSG_ERR_STREAM_MPF_LUT_ENTRY_RESOURCES_BUSY = 13,
+ INSYS_MSG_ERR_STREAM_MPF_DEV_ID = 14,
+ INSYS_MSG_ERR_STREAM_BUFFER_LINES = 15,
+ INSYS_MSG_ERR_STREAM_IPIN_ID = 16,
+ INSYS_MSG_ERR_STREAM_DATA_TYPE = 17,
+ INSYS_MSG_ERR_STREAM_STREAMING_PROTOCOL_STATE = 18,
+ INSYS_MSG_ERR_STREAM_SYSCOM_FLUSH = 19,
+ INSYS_MSG_ERR_STREAM_MIPI_VC = 20,
+ INSYS_MSG_ERR_STREAM_STREAM_SRC = 21,
+ INSYS_MSG_ERR_STREAM_PBK_ID = 22,
+ INSYS_MSG_ERR_STREAM_CMD_QUEUE_DEALLOCATE = 23,
+ INSYS_MSG_ERR_STREAM_INSUFFICIENT_RESOURCES = 24,
+ INSYS_MSG_ERR_STREAM_IPIN_CONFIGURATION = 25,
+ INSYS_MSG_ERR_STREAM_INVALID_STATE = 26,
+ INSYS_MSG_ERR_STREAM_SW_MANAGED = 27,
+ INSYS_MSG_ERR_STREAM_PBK_SLOT_ID = 28,
+ INSYS_MSG_ERR_STREAM_FLUSH_TIMEOUT = 29,
+ INSYS_MSG_ERR_STREAM_IPIN_WIDTH = 30,
+ INSYS_MSG_ERR_STREAM_IPIN_HEIGHT = 31,
+ INSYS_MSG_ERR_STREAM_OUTPUT_PIN_EARLY_ACK_EN = 32,
+ INSYS_MSG_ERR_STREAM_INCONSISTENT_PARAMS = 33,
+ INSYS_MSG_ERR_STREAM_PLANE_COUNT = 34,
+ INSYS_MSG_ERR_STREAM_FRAME_FORMAT_TYPE = 35,
+ INSYS_MSG_ERR_STREAM_INSUFFICIENT_RESOURCES_OUTPUT = 36,
+ INSYS_MSG_ERR_STREAM_WIDTH_OUTPUT_SIZE = 37,
+ INSYS_MSG_ERR_STREAM_CLOSED = 38,
+ INSYS_MSG_ERR_STREAM_N
+};
+
+enum insys_msg_err_capture {
+ INSYS_MSG_ERR_CAPTURE_OK = IA_GOFO_MSG_ERR_OK,
+ INSYS_MSG_ERR_CAPTURE_STREAM_ID = 1,
+ INSYS_MSG_ERR_CAPTURE_PAYLOAD_PTR = 2,
+ INSYS_MSG_ERR_CAPTURE_MEM_SLOT = 3,
+ INSYS_MSG_ERR_CAPTURE_STREAMING_MODE = 4,
+ INSYS_MSG_ERR_CAPTURE_AVAILABLE_CMD_SLOT = 5,
+ INSYS_MSG_ERR_CAPTURE_CONSUMED_CMD_SLOT = 6,
+ INSYS_MSG_ERR_CAPTURE_CMD_SLOT_PAYLOAD_PTR = 7,
+ INSYS_MSG_ERR_CAPTURE_CMD_PREPARE = 8,
+ INSYS_MSG_ERR_CAPTURE_OUTPUT_PIN = 9,
+ INSYS_MSG_ERR_CAPTURE_SYNC_FRAME_DROP = 10,
+ INSYS_MSG_ERR_CAPTURE_FRAME_MESSAGES_MAP = 11,
+ INSYS_MSG_ERR_CAPTURE_TIMEOUT = 12,
+ INSYS_MSG_ERR_CAPTURE_INVALID_STREAM_STATE = 13,
+ INSYS_MSG_ERR_CAPTURE_HW_ERR_MULTIBIT_PH_ERROR_DETECTED = 14,
+ INSYS_MSG_ERR_CAPTURE_HW_ERR_PAYLOAD_CRC_ERROR = 15,
+ INSYS_MSG_ERR_CAPTURE_HW_ERR_INPUT_DATA_LOSS_ELASTIC_FIFO_OVFL = 16,
+ INSYS_MSG_ERR_CAPTURE_HW_ERR_PIXEL_BUFFER_OVERFLOW = 17,
+ INSYS_MSG_ERR_CAPTURE_HW_ERR_BAD_FRAME_DIM = 18,
+ INSYS_MSG_ERR_CAPTURE_HW_ERR_PHY_SYNC_ERR = 19,
+ INSYS_MSG_ERR_CAPTURE_HW_ERR_SECURE_TOUCH = 20,
+ INSYS_MSG_ERR_CAPTURE_HW_ERR_MASTER_SLAVE_SYNC_ERR = 21,
+ INSYS_MSG_ERR_CAPTURE_FRAME_SKIP_ERR = 22,
+ INSYS_MSG_ERR_CAPTURE_FE_INPUT_FIFO_OVERFLOW_ERR = 23,
+ INSYS_MSG_ERR_CAPTURE_CMD_SUBMIT_TO_HW = 24,
+ INSYS_MSG_ERR_CAPTURE_N
+};
+
+enum insys_msg_err_groups {
+ INSYS_MSG_ERR_GROUP_RESERVED = IA_GOFO_MSG_ERR_GROUP_RESERVED,
+ INSYS_MSG_ERR_GROUP_GENERAL = IA_GOFO_MSG_ERR_GROUP_GENERAL,
+ INSYS_MSG_ERR_GROUP_STREAM = 2,
+ INSYS_MSG_ERR_GROUP_CAPTURE = 3,
+ INSYS_MSG_ERR_GROUP_N,
+};
+
+#endif
diff --git a/drivers/staging/media/ipu7/abi/ipu7_fw_msg_abi.h b/drivers/staging/media/ipu7/abi/ipu7_fw_msg_abi.h
new file mode 100644
index 000000000000..8a78dd0936df
--- /dev/null
+++ b/drivers/staging/media/ipu7/abi/ipu7_fw_msg_abi.h
@@ -0,0 +1,465 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2020 - 2025 Intel Corporation
+ */
+
+#ifndef IPU7_FW_MSG_ABI_H
+#define IPU7_FW_MSG_ABI_H
+
+#include "ipu7_fw_common_abi.h"
+
+#pragma pack(push, 1)
+enum ipu7_msg_type {
+ IPU_MSG_TYPE_RESERVED = IA_GOFO_MSG_TYPE_RESERVED,
+ IPU_MSG_TYPE_INDIRECT = IA_GOFO_MSG_TYPE_INDIRECT,
+ IPU_MSG_TYPE_DEV_LOG = IA_GOFO_MSG_TYPE_LOG,
+ IPU_MSG_TYPE_GENERAL_ERR = IA_GOFO_MSG_TYPE_GENERAL_ERR,
+ IPU_MSG_TYPE_DEV_OPEN = 4,
+ IPU_MSG_TYPE_DEV_OPEN_ACK = 5,
+ IPU_MSG_TYPE_GRAPH_OPEN = 6,
+ IPU_MSG_TYPE_GRAPH_OPEN_ACK = 7,
+ IPU_MSG_TYPE_TASK_REQ = 8,
+ IPU_MSG_TYPE_TASK_DONE = 9,
+ IPU_MSG_TYPE_GRAPH_CLOSE = 10,
+ IPU_MSG_TYPE_GRAPH_CLOSE_ACK = 11,
+ IPU_MSG_TYPE_DEV_CLOSE = 12,
+ IPU_MSG_TYPE_DEV_CLOSE_ACK = 13,
+ IPU_MSG_TYPE_TERM_EVENT = 14,
+ IPU_MSG_TYPE_N,
+};
+
+#define IPU_MSG_MAX_NODE_TERMS (64U)
+#define IPU_MSG_MAX_FRAGS (7U)
+
+enum ipu7_msg_node_type {
+ IPU_MSG_NODE_TYPE_PAD = 0,
+ IPU_MSG_NODE_TYPE_BASE,
+ IPU_MSG_NODE_TYPE_N
+};
+
+#define IPU_MSG_NODE_MAX_DEVICES (128U)
+#define DEB_NUM_UINT32 (IPU_MSG_NODE_MAX_DEVICES / (sizeof(u32) * 8U))
+
+typedef u32 ipu7_msg_teb_t[2];
+typedef u32 ipu7_msg_deb_t[DEB_NUM_UINT32];
+
+#define IPU_MSG_NODE_MAX_ROUTE_ENABLES (128U)
+#define RBM_NUM_UINT32 (IPU_MSG_NODE_MAX_ROUTE_ENABLES / (sizeof(u32) * 8U))
+
+typedef u32 ipu7_msg_rbm_t[RBM_NUM_UINT32];
+
+enum ipu7_msg_node_profile_type {
+ IPU_MSG_NODE_PROFILE_TYPE_PAD = 0,
+ IPU_MSG_NODE_PROFILE_TYPE_BASE,
+ IPU_MSG_NODE_PROFILE_TYPE_CB,
+ IPU_MSG_NODE_PROFILE_TYPE_N
+};
+
+struct ipu7_msg_node_profile {
+ struct ia_gofo_tlv_header tlv_header;
+ ipu7_msg_teb_t teb;
+};
+
+struct ipu7_msg_cb_profile {
+ struct ipu7_msg_node_profile profile_base;
+ ipu7_msg_deb_t deb;
+ ipu7_msg_rbm_t rbm;
+ ipu7_msg_rbm_t reb;
+};
+
+#define IPU_MSG_NODE_MAX_PROFILES (2U)
+#define IPU_MSG_NODE_DEF_PROFILE_IDX (0U)
+#define IPU_MSG_NODE_RSRC_ID_EXT_IP (0xffU)
+
+#define IPU_MSG_NODE_DONT_CARE_TEB_HI (0xffffffffU)
+#define IPU_MSG_NODE_DONT_CARE_TEB_LO (0xffffffffU)
+#define IPU_MSG_NODE_RSRC_ID_IS (0xfeU)
+
+struct ipu7_msg_node {
+ struct ia_gofo_tlv_header tlv_header;
+ u8 node_rsrc_id;
+ u8 node_ctx_id;
+ u8 num_frags;
+ u8 reserved[1];
+ struct ia_gofo_tlv_list profiles_list;
+ struct ia_gofo_tlv_list terms_list;
+ struct ia_gofo_tlv_list node_options;
+};
+
+enum ipu7_msg_node_option_types {
+ IPU_MSG_NODE_OPTION_TYPES_PADDING = 0,
+ IPU_MSG_NODE_OPTION_TYPES_N
+};
+
+#pragma pack(pop)
+
+#pragma pack(push, 1)
+
+enum ipu7_msg_link_type {
+ IPU_MSG_LINK_TYPE_PAD = 0,
+ IPU_MSG_LINK_TYPE_GENERIC = 1,
+ IPU_MSG_LINK_TYPE_N
+};
+
+enum ipu7_msg_link_option_types {
+ IPU_MSG_LINK_OPTION_TYPES_PADDING = 0,
+ IPU_MSG_LINK_OPTION_TYPES_CMPRS = 1,
+ IPU_MSG_LINK_OPTION_TYPES_N
+};
+
+enum ipu7_msg_link_cmprs_option_bit_depth {
+ IPU_MSG_LINK_CMPRS_OPTION_8BPP = 0,
+ IPU_MSG_LINK_CMPRS_OPTION_10BPP = 1,
+ IPU_MSG_LINK_CMPRS_OPTION_12BPP = 2,
+};
+
+#define IPU_MSG_LINK_CMPRS_SPACE_SAVING_DENOM (128U)
+#define IPU_MSG_LINK_CMPRS_LOSSY_CFG_PAYLOAD_SIZE (5U)
+#define IPU_MSG_LINK_CMPRS_SPACE_SAVING_NUM_MAX \
+ (IPU_MSG_LINK_CMPRS_SPACE_SAVING_DENOM - 1U)
+
+struct ipu7_msg_link_cmprs_plane_desc {
+ u8 plane_enable;
+ u8 cmprs_enable;
+ u8 encoder_plane_id;
+ u8 decoder_plane_id;
+ u8 cmprs_is_lossy;
+ u8 cmprs_is_footprint;
+ u8 bit_depth;
+ u8 space_saving_numerator;
+ u32 pixels_offset;
+ u32 ts_offset;
+ u32 tile_row_to_tile_row_stride;
+ u32 rows_of_tiles;
+ u32 lossy_cfg[IPU_MSG_LINK_CMPRS_LOSSY_CFG_PAYLOAD_SIZE];
+};
+
+#define IPU_MSG_LINK_CMPRS_MAX_PLANES (2U)
+#define IPU_MSG_LINK_CMPRS_NO_ALIGN_INTERVAL (0U)
+#define IPU_MSG_LINK_CMPRS_MIN_ALIGN_INTERVAL (16U)
+#define IPU_MSG_LINK_CMPRS_MAX_ALIGN_INTERVAL (1024U)
+struct ipu7_msg_link_cmprs_option {
+ struct ia_gofo_tlv_header header;
+ u32 cmprs_buf_size;
+ u16 align_interval;
+ u8 reserved[2];
+ struct ipu7_msg_link_cmprs_plane_desc plane_descs[2];
+};
+
+struct ipu7_msg_link_ep {
+ u8 node_ctx_id;
+ u8 term_id;
+};
+
+struct ipu7_msg_link_ep_pair {
+ struct ipu7_msg_link_ep ep_src;
+ struct ipu7_msg_link_ep ep_dst;
+};
+
+#define IPU_MSG_LINK_FOREIGN_KEY_NONE (65535U)
+#define IPU_MSG_LINK_FOREIGN_KEY_MAX (64U)
+#define IPU_MSG_LINK_PBK_ID_DONT_CARE (255U)
+#define IPU_MSG_LINK_PBK_SLOT_ID_DONT_CARE (255U)
+#define IPU_MSG_LINK_TERM_ID_DONT_CARE (0xffU)
+
+struct ipu7_msg_link {
+ struct ia_gofo_tlv_header tlv_header;
+ struct ipu7_msg_link_ep_pair endpoints;
+ u16 foreign_key;
+ u8 streaming_mode;
+ u8 pbk_id;
+ u8 pbk_slot_id;
+ u8 delayed_link;
+ u8 reserved[2];
+ struct ia_gofo_tlv_list link_options;
+};
+
+#pragma pack(pop)
+
+enum ipu7_msg_dev_state {
+ IPU_MSG_DEV_STATE_CLOSED = 0,
+ IPU_MSG_DEV_STATE_OPEN_WAIT = 1,
+ IPU_MSG_DEV_STATE_OPEN = 2,
+ IPU_MSG_DEV_STATE_CLOSE_WAIT = 3,
+ IPU_MSG_DEV_STATE_N
+};
+
+enum ipu7_msg_graph_state {
+ IPU_MSG_GRAPH_STATE_CLOSED = 0,
+ IPU_MSG_GRAPH_STATE_OPEN_WAIT = 1,
+ IPU_MSG_GRAPH_STATE_OPEN = 2,
+ IPU_MSG_GRAPH_STATE_CLOSE_WAIT = 3,
+ IPU_MSG_GRAPH_STATE_N
+};
+
+enum ipu7_msg_task_state {
+ IPU_MSG_TASK_STATE_DONE = 0,
+ IPU_MSG_TASK_STATE_WAIT_DONE = 1,
+ IPU_MSG_TASK_STATE_N
+};
+
+enum ipu7_msg_err_groups {
+ IPU_MSG_ERR_GROUP_RESERVED = IA_GOFO_MSG_ERR_GROUP_RESERVED,
+ IPU_MSG_ERR_GROUP_GENERAL = IA_GOFO_MSG_ERR_GROUP_GENERAL,
+ IPU_MSG_ERR_GROUP_DEVICE = 2,
+ IPU_MSG_ERR_GROUP_GRAPH = 3,
+ IPU_MSG_ERR_GROUP_TASK = 4,
+ IPU_MSG_ERR_GROUP_N,
+};
+
+#pragma pack(push, 1)
+struct ipu7_msg_task {
+ struct ia_gofo_msg_header header;
+ u8 graph_id;
+ u8 profile_idx;
+ u8 node_ctx_id;
+ u8 frame_id;
+ u8 frag_id;
+ u8 req_done_msg;
+ u8 req_done_irq;
+ u8 reserved[1];
+ ipu7_msg_teb_t payload_reuse_bm;
+ ia_gofo_addr_t term_buffers[IPU_MSG_MAX_NODE_TERMS];
+};
+
+struct ipu7_msg_task_done {
+ struct ia_gofo_msg_header_ack header;
+ u8 graph_id;
+ u8 frame_id;
+ u8 node_ctx_id;
+ u8 profile_idx;
+ u8 frag_id;
+ u8 reserved[3];
+};
+
+enum ipu7_msg_err_task {
+ IPU_MSG_ERR_TASK_OK = IA_GOFO_MSG_ERR_OK,
+ IPU_MSG_ERR_TASK_GRAPH_ID = 1,
+ IPU_MSG_ERR_TASK_NODE_CTX_ID = 2,
+ IPU_MSG_ERR_TASK_PROFILE_IDX = 3,
+ IPU_MSG_ERR_TASK_CTX_MEMORY_TASK = 4,
+ IPU_MSG_ERR_TASK_TERM_PAYLOAD_PTR = 5,
+ IPU_MSG_ERR_TASK_FRAME_ID = 6,
+ IPU_MSG_ERR_TASK_FRAG_ID = 7,
+ IPU_MSG_ERR_TASK_EXEC_EXT = 8,
+ IPU_MSG_ERR_TASK_EXEC_SBX = 9,
+ IPU_MSG_ERR_TASK_EXEC_INT = 10,
+ IPU_MSG_ERR_TASK_EXEC_UNKNOWN = 11,
+ IPU_MSG_ERR_TASK_PRE_EXEC = 12,
+ IPU_MSG_ERR_TASK_N
+};
+
+#pragma pack(pop)
+
+#pragma pack(push, 1)
+enum ipu7_msg_term_type {
+ IPU_MSG_TERM_TYPE_PAD = 0,
+ IPU_MSG_TERM_TYPE_BASE,
+ IPU_MSG_TERM_TYPE_N,
+};
+
+#define IPU_MSG_TERM_EVENT_TYPE_NONE 0U
+#define IPU_MSG_TERM_EVENT_TYPE_PROGRESS 1U
+#define IPU_MSG_TERM_EVENT_TYPE_N (IPU_MSG_TERM_EVENT_TYPE_PROGRESS + 1U)
+
+struct ipu7_msg_term {
+ struct ia_gofo_tlv_header tlv_header;
+ u8 term_id;
+ u8 event_req_bm;
+ u8 reserved[2];
+ u32 payload_size;
+ struct ia_gofo_tlv_list term_options;
+};
+
+enum ipu7_msg_term_option_types {
+ IPU_MSG_TERM_OPTION_TYPES_PADDING = 0,
+ IPU_MSG_TERM_OPTION_TYPES_N
+};
+
+struct ipu7_msg_term_event {
+ struct ia_gofo_msg_header header;
+ u8 graph_id;
+ u8 frame_id;
+ u8 node_ctx_id;
+ u8 profile_idx;
+ u8 frag_id;
+ u8 term_id;
+ u8 event_type;
+ u8 reserved[1];
+ u64 event_ts;
+};
+
+#pragma pack(pop)
+
+#pragma pack(push, 1)
+#define IPU_MSG_DEVICE_SEND_MSG_ENABLED 1U
+#define IPU_MSG_DEVICE_SEND_MSG_DISABLED 0U
+
+#define IPU_MSG_DEVICE_OPEN_SEND_RESP BIT(0)
+#define IPU_MSG_DEVICE_OPEN_SEND_IRQ BIT(1)
+
+#define IPU_MSG_DEVICE_CLOSE_SEND_RESP BIT(0)
+#define IPU_MSG_DEVICE_CLOSE_SEND_IRQ BIT(1)
+
+struct ipu7_msg_dev_open {
+ struct ia_gofo_msg_header header;
+ u32 max_graphs;
+ u8 dev_msg_map;
+ u8 enable_power_gating;
+ u8 reserved[2];
+};
+
+struct ipu7_msg_dev_open_ack {
+ struct ia_gofo_msg_header_ack header;
+};
+
+struct ipu7_msg_dev_close {
+ struct ia_gofo_msg_header header;
+ u8 dev_msg_map;
+ u8 reserved[7];
+};
+
+struct ipu7_msg_dev_close_ack {
+ struct ia_gofo_msg_header_ack header;
+};
+
+enum ipu7_msg_err_device {
+ IPU_MSG_ERR_DEVICE_OK = IA_GOFO_MSG_ERR_OK,
+ IPU_MSG_ERR_DEVICE_MAX_GRAPHS = 1,
+ IPU_MSG_ERR_DEVICE_MSG_MAP = 2,
+ IPU_MSG_ERR_DEVICE_N
+};
+
+#pragma pack(pop)
+
+#pragma pack(push, 1)
+#define IPU_MSG_GRAPH_ID_UNKNOWN (0xffU)
+#define IPU_MSG_GRAPH_SEND_MSG_ENABLED 1U
+#define IPU_MSG_GRAPH_SEND_MSG_DISABLED 0U
+
+#define IPU_MSG_GRAPH_OPEN_SEND_RESP BIT(0)
+#define IPU_MSG_GRAPH_OPEN_SEND_IRQ BIT(1)
+
+#define IPU_MSG_GRAPH_CLOSE_SEND_RESP BIT(0)
+#define IPU_MSG_GRAPH_CLOSE_SEND_IRQ BIT(1)
+
+struct ipu7_msg_graph_open {
+ struct ia_gofo_msg_header header;
+ struct ia_gofo_tlv_list nodes;
+ struct ia_gofo_tlv_list links;
+ u8 graph_id;
+ u8 graph_msg_map;
+ u8 reserved[6];
+};
+
+enum ipu7_msg_graph_ack_option_types {
+ IPU_MSG_GRAPH_ACK_OPTION_TYPES_PADDING = 0,
+ IPU_MSG_GRAPH_ACK_TASK_Q_INFO,
+ IPU_MSG_GRAPH_ACK_OPTION_TYPES_N
+};
+
+struct ipu7_msg_graph_open_ack_task_q_info {
+ struct ia_gofo_tlv_header header;
+ u8 node_ctx_id;
+ u8 q_id;
+ u8 reserved[2];
+};
+
+struct ipu7_msg_graph_open_ack {
+ struct ia_gofo_msg_header_ack header;
+ u8 graph_id;
+ u8 reserved[7];
+};
+
+struct ipu7_msg_graph_close {
+ struct ia_gofo_msg_header header;
+ u8 graph_id;
+ u8 graph_msg_map;
+ u8 reserved[6];
+};
+
+struct ipu7_msg_graph_close_ack {
+ struct ia_gofo_msg_header_ack header;
+ u8 graph_id;
+ u8 reserved[7];
+};
+
+enum ipu7_msg_err_graph {
+ IPU_MSG_ERR_GRAPH_OK = IA_GOFO_MSG_ERR_OK,
+ IPU_MSG_ERR_GRAPH_GRAPH_STATE = 1,
+ IPU_MSG_ERR_GRAPH_MAX_GRAPHS = 2,
+ IPU_MSG_ERR_GRAPH_GRAPH_ID = 3,
+ IPU_MSG_ERR_GRAPH_NODE_CTX_ID = 4,
+ IPU_MSG_ERR_GRAPH_NODE_RSRC_ID = 5,
+ IPU_MSG_ERR_GRAPH_PROFILE_IDX = 6,
+ IPU_MSG_ERR_GRAPH_TERM_ID = 7,
+ IPU_MSG_ERR_GRAPH_TERM_PAYLOAD_SIZE = 8,
+ IPU_MSG_ERR_GRAPH_LINK_NODE_CTX_ID = 9,
+ IPU_MSG_ERR_GRAPH_LINK_TERM_ID = 10,
+ IPU_MSG_ERR_GRAPH_PROFILE_TYPE = 11,
+ IPU_MSG_ERR_GRAPH_NUM_FRAGS = 12,
+ IPU_MSG_ERR_GRAPH_QUEUE_ID_USAGE = 13,
+ IPU_MSG_ERR_GRAPH_QUEUE_OPEN = 14,
+ IPU_MSG_ERR_GRAPH_QUEUE_CLOSE = 15,
+ IPU_MSG_ERR_GRAPH_QUEUE_ID_TASK_REQ_MISMATCH = 16,
+ IPU_MSG_ERR_GRAPH_CTX_MEMORY_FGRAPH = 17,
+ IPU_MSG_ERR_GRAPH_CTX_MEMORY_NODE = 18,
+ IPU_MSG_ERR_GRAPH_CTX_MEMORY_NODE_PROFILE = 19,
+ IPU_MSG_ERR_GRAPH_CTX_MEMORY_TERM = 20,
+ IPU_MSG_ERR_GRAPH_CTX_MEMORY_LINK = 21,
+ IPU_MSG_ERR_GRAPH_CTX_MSG_MAP = 22,
+ IPU_MSG_ERR_GRAPH_CTX_FOREIGN_KEY = 23,
+ IPU_MSG_ERR_GRAPH_CTX_STREAMING_MODE = 24,
+ IPU_MSG_ERR_GRAPH_CTX_PBK_RSRC = 25,
+ IPU_MSG_ERR_GRAPH_UNSUPPORTED_EVENT_TYPE = 26,
+ IPU_MSG_ERR_GRAPH_TOO_MANY_EVENTS = 27,
+ IPU_MSG_ERR_GRAPH_CTX_MEMORY_CMPRS = 28,
+ IPU_MSG_ERR_GRAPH_CTX_CMPRS_ALIGN_INTERVAL = 29,
+ IPU_MSG_ERR_GRAPH_CTX_CMPRS_PLANE_ID = 30,
+ IPU_MSG_ERR_GRAPH_CTX_CMPRS_UNSUPPORTED_MODE = 31,
+ IPU_MSG_ERR_GRAPH_CTX_CMPRS_BIT_DEPTH = 32,
+ IPU_MSG_ERR_GRAPH_CTX_CMPRS_STRIDE_ALIGNMENT = 33,
+ IPU_MSG_ERR_GRAPH_CTX_CMPRS_SUB_BUFFER_ALIGNMENT = 34,
+ IPU_MSG_ERR_GRAPH_CTX_CMPRS_LAYOUT_ORDER = 35,
+ IPU_MSG_ERR_GRAPH_CTX_CMPRS_LAYOUT_OVERLAP = 36,
+ IPU_MSG_ERR_GRAPH_CTX_CMPRS_BUFFER_TOO_SMALL = 37,
+ IPU_MSG_ERR_GRAPH_CTX_DELAYED_LINK = 38,
+ IPU_MSG_ERR_GRAPH_N
+};
+
+#pragma pack(pop)
+
+#define FWPS_MSG_ABI_MAX_INPUT_QUEUES (60U)
+#define FWPS_MSG_ABI_MAX_OUTPUT_QUEUES (2U)
+#define FWPS_MSG_ABI_MAX_QUEUES \
+ (FWPS_MSG_ABI_MAX_OUTPUT_QUEUES + FWPS_MSG_ABI_MAX_INPUT_QUEUES)
+
+#define FWPS_MSG_ABI_OUT_ACK_QUEUE_ID (IA_GOFO_MSG_ABI_OUT_ACK_QUEUE_ID)
+#define FWPS_MSG_ABI_OUT_LOG_QUEUE_ID (IA_GOFO_MSG_ABI_OUT_LOG_QUEUE_ID)
+#if (FWPS_MSG_ABI_OUT_LOG_QUEUE_ID >= FWPS_MSG_ABI_MAX_OUTPUT_QUEUES)
+#error "Maximum output queues configuration is too small to fit ACK and LOG \
+queues"
+#endif
+#define FWPS_MSG_ABI_IN_DEV_QUEUE_ID (IA_GOFO_MSG_ABI_IN_DEV_QUEUE_ID)
+#define FWPS_MSG_ABI_IN_RESERVED_QUEUE_ID (3U)
+#define FWPS_MSG_ABI_IN_FIRST_TASK_QUEUE_ID \
+ (FWPS_MSG_ABI_IN_RESERVED_QUEUE_ID + 1U)
+
+#if (FWPS_MSG_ABI_IN_FIRST_TASK_QUEUE_ID >= FWPS_MSG_ABI_MAX_INPUT_QUEUES)
+#error "Maximum queues configuration is too small to fit minimum number of \
+useful queues"
+#endif
+
+#define FWPS_MSG_ABI_IN_LAST_TASK_QUEUE_ID (FWPS_MSG_ABI_MAX_QUEUES - 1U)
+#define FWPS_MSG_ABI_IN_MAX_TASK_QUEUES \
+ (FWPS_MSG_ABI_IN_LAST_TASK_QUEUE_ID - \
+ FWPS_MSG_ABI_IN_FIRST_TASK_QUEUE_ID + 1U)
+#define FWPS_MSG_ABI_OUT_FIRST_QUEUE_ID (FWPS_MSG_ABI_OUT_ACK_QUEUE_ID)
+#define FWPS_MSG_ABI_OUT_LAST_QUEUE_ID (FWPS_MSG_ABI_MAX_OUTPUT_QUEUES - 1U)
+#define FWPS_MSG_ABI_IN_FIRST_QUEUE_ID (FWPS_MSG_ABI_IN_DEV_QUEUE_ID)
+#define FWPS_MSG_ABI_IN_LAST_QUEUE_ID (FWPS_MSG_ABI_IN_LAST_TASK_QUEUE_ID)
+
+#define FWPS_MSG_HOST2FW_MAX_SIZE (2U * 1024U)
+#define FWPS_MSG_FW2HOST_MAX_SIZE (256U)
+
+#endif
diff --git a/drivers/staging/media/ipu7/abi/ipu7_fw_psys_config_abi.h b/drivers/staging/media/ipu7/abi/ipu7_fw_psys_config_abi.h
new file mode 100644
index 000000000000..0af04c8c6a88
--- /dev/null
+++ b/drivers/staging/media/ipu7/abi/ipu7_fw_psys_config_abi.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2020 - 2025 Intel Corporation
+ */
+
+#ifndef IPU7_PSYS_CONFIG_ABI_H_INCLUDED__
+#define IPU7_PSYS_CONFIG_ABI_H_INCLUDED__
+
+#include <linux/types.h>
+
+#include "ipu7_fw_boot_abi.h"
+#include "ipu7_fw_config_abi.h"
+
+struct ipu7_psys_config {
+ u32 use_debug_manifest;
+ u32 timeout_val_ms;
+ u32 compression_support_enabled;
+ struct ia_gofo_logger_config logger_config;
+ struct ipu7_wdt_abi wdt_config;
+ u8 ipu_psys_debug_bitmask;
+ u8 padding[3];
+};
+
+#endif
diff --git a/drivers/staging/media/ipu7/abi/ipu7_fw_syscom_abi.h b/drivers/staging/media/ipu7/abi/ipu7_fw_syscom_abi.h
new file mode 100644
index 000000000000..bfa5258d5b97
--- /dev/null
+++ b/drivers/staging/media/ipu7/abi/ipu7_fw_syscom_abi.h
@@ -0,0 +1,49 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2020 - 2025 Intel Corporation
+ */
+
+#ifndef IPU7_FW_SYSCOM_ABI_H
+#define IPU7_FW_SYSCOM_ABI_H
+
+#include <linux/types.h>
+
+#include "ipu7_fw_common_abi.h"
+
+#pragma pack(push, 1)
+#define SYSCOM_QUEUE_MIN_CAPACITY 2U
+
+struct syscom_queue_params_config {
+ ia_gofo_addr_t token_array_mem;
+ u16 token_size_in_bytes;
+ u16 max_capacity;
+};
+
+struct syscom_config_s {
+ u16 max_output_queues;
+ u16 max_input_queues;
+};
+
+#pragma pack(pop)
+
+static inline struct syscom_queue_params_config *
+syscom_config_get_queue_configs(struct syscom_config_s *config)
+{
+ return (struct syscom_queue_params_config *)(&config[1]);
+}
+
+static inline const struct syscom_queue_params_config *
+syscom_config_get_queue_configs_const(const struct syscom_config_s *config)
+{
+ return (const struct syscom_queue_params_config *)(&config[1]);
+}
+
+#pragma pack(push, 1)
+struct syscom_queue_indices_s {
+ u32 read_index;
+ u32 write_index;
+};
+
+#pragma pack(pop)
+
+#endif
diff --git a/drivers/staging/media/ipu7/ipu7-boot.c b/drivers/staging/media/ipu7/ipu7-boot.c
new file mode 100644
index 000000000000..d7901ff78b38
--- /dev/null
+++ b/drivers/staging/media/ipu7/ipu7-boot.c
@@ -0,0 +1,430 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2022 - 2025 Intel Corporation
+ */
+
+#include <linux/bug.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/module.h>
+#include <linux/iopoll.h>
+#include <linux/string.h>
+#include <linux/types.h>
+
+#include "abi/ipu7_fw_boot_abi.h"
+
+#include "ipu7.h"
+#include "ipu7-boot.h"
+#include "ipu7-bus.h"
+#include "ipu7-buttress-regs.h"
+#include "ipu7-dma.h"
+#include "ipu7-platform-regs.h"
+#include "ipu7-syscom.h"
+
+#define IPU_FW_START_STOP_TIMEOUT 2000
+#define IPU_BOOT_CELL_RESET_TIMEOUT (2 * USEC_PER_SEC)
+#define BOOT_STATE_IS_CRITICAL(s) IA_GOFO_FW_BOOT_STATE_IS_CRITICAL(s)
+#define BOOT_STATE_IS_READY(s) ((s) == IA_GOFO_FW_BOOT_STATE_READY)
+#define BOOT_STATE_IS_INACTIVE(s) ((s) == IA_GOFO_FW_BOOT_STATE_INACTIVE)
+
+struct ipu7_boot_context {
+ u32 base;
+ u32 dmem_address;
+ u32 status_ctrl_reg;
+ u32 fw_start_address_reg;
+ u32 fw_code_base_reg;
+};
+
+static const struct ipu7_boot_context contexts[IPU_SUBSYS_NUM] = {
+ {
+ /* ISYS */
+ .dmem_address = IPU_ISYS_DMEM_OFFSET,
+ .status_ctrl_reg = BUTTRESS_REG_DRV_IS_UCX_CONTROL_STATUS,
+ .fw_start_address_reg = BUTTRESS_REG_DRV_IS_UCX_START_ADDR,
+ .fw_code_base_reg = IS_UC_CTRL_BASE
+ },
+ {
+ /* PSYS */
+ .dmem_address = IPU_PSYS_DMEM_OFFSET,
+ .status_ctrl_reg = BUTTRESS_REG_DRV_PS_UCX_CONTROL_STATUS,
+ .fw_start_address_reg = BUTTRESS_REG_DRV_PS_UCX_START_ADDR,
+ .fw_code_base_reg = PS_UC_CTRL_BASE
+ }
+};
+
+static u32 get_fw_boot_reg_addr(const struct ipu7_bus_device *adev,
+ enum ia_gofo_buttress_reg_id reg)
+{
+ u32 base = (adev->subsys == IPU_IS) ? 0U : (u32)IA_GOFO_FW_BOOT_ID_MAX;
+
+ return BUTTRESS_FW_BOOT_PARAMS_ENTRY(base + (u32)reg);
+}
+
+static void write_fw_boot_param(const struct ipu7_bus_device *adev,
+ enum ia_gofo_buttress_reg_id reg,
+ u32 val)
+{
+ void __iomem *base = adev->isp->base;
+
+ dev_dbg(&adev->auxdev.dev,
+ "write boot param reg: %d addr: %x val: 0x%x\n",
+ reg, get_fw_boot_reg_addr(adev, reg), val);
+ writel(val, base + get_fw_boot_reg_addr(adev, reg));
+}
+
+static u32 read_fw_boot_param(const struct ipu7_bus_device *adev,
+ enum ia_gofo_buttress_reg_id reg)
+{
+ void __iomem *base = adev->isp->base;
+
+ return readl(base + get_fw_boot_reg_addr(adev, reg));
+}
+
+static int ipu7_boot_cell_reset(const struct ipu7_bus_device *adev)
+{
+ const struct ipu7_boot_context *ctx = &contexts[adev->subsys];
+ const struct device *dev = &adev->auxdev.dev;
+ u32 ucx_ctrl_status = ctx->status_ctrl_reg;
+ u32 timeout = IPU_BOOT_CELL_RESET_TIMEOUT;
+ void __iomem *base = adev->isp->base;
+ u32 val, val2;
+ int ret;
+
+ dev_dbg(dev, "cell enter reset...\n");
+ val = readl(base + ucx_ctrl_status);
+ dev_dbg(dev, "cell_ctrl_reg addr = 0x%x, val = 0x%x\n",
+ ucx_ctrl_status, val);
+
+ dev_dbg(dev, "force cell reset...\n");
+ val |= UCX_CTL_RESET;
+ val &= ~UCX_CTL_RUN;
+
+ dev_dbg(dev, "write status_ctrl_reg(0x%x) to 0x%x\n",
+ ucx_ctrl_status, val);
+ writel(val, base + ucx_ctrl_status);
+
+ ret = readl_poll_timeout(base + ucx_ctrl_status, val2,
+ (val2 & 0x3U) == (val & 0x3U), 100, timeout);
+ if (ret) {
+ dev_err(dev, "cell enter reset timeout. status: 0x%x\n", val2);
+ return -ETIMEDOUT;
+ }
+
+ dev_dbg(dev, "cell exit reset...\n");
+ val = readl(base + ucx_ctrl_status);
+ WARN((!(val & UCX_CTL_RESET) || val & UCX_CTL_RUN),
+ "cell status 0x%x", val);
+
+ val &= ~(UCX_CTL_RESET | UCX_CTL_RUN);
+ dev_dbg(dev, "write status_ctrl_reg(0x%x) to 0x%x\n",
+ ucx_ctrl_status, val);
+ writel(val, base + ucx_ctrl_status);
+
+ ret = readl_poll_timeout(base + ucx_ctrl_status, val2,
+ (val2 & 0x3U) == (val & 0x3U), 100, timeout);
+ if (ret) {
+ dev_err(dev, "cell exit reset timeout. status: 0x%x\n", val2);
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static void ipu7_boot_cell_start(const struct ipu7_bus_device *adev)
+{
+ const struct ipu7_boot_context *ctx = &contexts[adev->subsys];
+ void __iomem *base = adev->isp->base;
+ const struct device *dev = &adev->auxdev.dev;
+ u32 val;
+
+ dev_dbg(dev, "starting cell...\n");
+ val = readl(base + ctx->status_ctrl_reg);
+ WARN_ON(val & (UCX_CTL_RESET | UCX_CTL_RUN));
+
+ val &= ~UCX_CTL_RESET;
+ val |= UCX_CTL_RUN;
+ dev_dbg(dev, "write status_ctrl_reg(0x%x) to 0x%x\n",
+ ctx->status_ctrl_reg, val);
+ writel(val, base + ctx->status_ctrl_reg);
+}
+
+static void ipu7_boot_cell_stop(const struct ipu7_bus_device *adev)
+{
+ const struct ipu7_boot_context *ctx = &contexts[adev->subsys];
+ void __iomem *base = adev->isp->base;
+ const struct device *dev = &adev->auxdev.dev;
+ u32 val;
+
+ dev_dbg(dev, "stopping cell...\n");
+
+ val = readl(base + ctx->status_ctrl_reg);
+ val &= ~UCX_CTL_RUN;
+ dev_dbg(dev, "write status_ctrl_reg(0x%x) to 0x%x\n",
+ ctx->status_ctrl_reg, val);
+ writel(val, base + ctx->status_ctrl_reg);
+
+ /* Wait for uC transactions complete */
+ usleep_range(10, 20);
+
+ val = readl(base + ctx->status_ctrl_reg);
+ val |= UCX_CTL_RESET;
+ dev_dbg(dev, "write status_ctrl_reg(0x%x) to 0x%x\n",
+ ctx->status_ctrl_reg, val);
+ writel(val, base + ctx->status_ctrl_reg);
+}
+
+static int ipu7_boot_cell_init(const struct ipu7_bus_device *adev)
+{
+ const struct ipu7_boot_context *ctx = &contexts[adev->subsys];
+ void __iomem *base = adev->isp->base;
+
+ dev_dbg(&adev->auxdev.dev, "write fw_start_address_reg(0x%x) to 0x%x\n",
+ ctx->fw_start_address_reg, adev->fw_entry);
+ writel(adev->fw_entry, base + ctx->fw_start_address_reg);
+
+ return ipu7_boot_cell_reset(adev);
+}
+
+static void init_boot_config(struct ia_gofo_boot_config *boot_config,
+ u32 length, u8 major)
+{
+ /* syscom version, new syscom2 version */
+ boot_config->length = length;
+ boot_config->config_version.major = 1U;
+ boot_config->config_version.minor = 0U;
+ boot_config->config_version.subminor = 0U;
+ boot_config->config_version.patch = 0U;
+
+ /* msg version for task interface */
+ boot_config->client_version_support.num_versions = 1U;
+ boot_config->client_version_support.versions[0].major = major;
+ boot_config->client_version_support.versions[0].minor = 0U;
+ boot_config->client_version_support.versions[0].subminor = 0U;
+ boot_config->client_version_support.versions[0].patch = 0U;
+}
+
+int ipu7_boot_init_boot_config(struct ipu7_bus_device *adev,
+ struct syscom_queue_config *qconfigs,
+ int num_queues, u32 uc_freq,
+ dma_addr_t subsys_config, u8 major)
+{
+ u32 total_queue_size_aligned = 0;
+ struct ipu7_syscom_context *syscom = adev->syscom;
+ struct ia_gofo_boot_config *boot_config;
+ struct syscom_queue_params_config *cfgs;
+ struct device *dev = &adev->auxdev.dev;
+ struct syscom_config_s *syscfg;
+ dma_addr_t queue_mem_dma_ptr;
+ void *queue_mem_ptr;
+ unsigned int i;
+
+ dev_dbg(dev, "boot config queues_nr: %d freq: %u sys_conf: 0x%pad\n",
+ num_queues, uc_freq, &subsys_config);
+ /* Allocate boot config. */
+ adev->boot_config_size =
+ sizeof(*cfgs) * num_queues + sizeof(*boot_config);
+ adev->boot_config = ipu7_dma_alloc(adev, adev->boot_config_size,
+ &adev->boot_config_dma_addr,
+ GFP_KERNEL, 0);
+ if (!adev->boot_config) {
+ dev_err(dev, "Failed to allocate boot config.\n");
+ return -ENOMEM;
+ }
+
+ boot_config = adev->boot_config;
+ memset(boot_config, 0, sizeof(struct ia_gofo_boot_config));
+ init_boot_config(boot_config, adev->boot_config_size, major);
+ boot_config->subsys_config = subsys_config;
+
+ boot_config->uc_tile_frequency = uc_freq;
+ boot_config->uc_tile_frequency_units =
+ IA_GOFO_FW_BOOT_UC_FREQUENCY_UNITS_MHZ;
+ boot_config->syscom_context_config.max_output_queues =
+ syscom->num_output_queues;
+ boot_config->syscom_context_config.max_input_queues =
+ syscom->num_input_queues;
+
+ ipu7_dma_sync_single(adev, adev->boot_config_dma_addr,
+ adev->boot_config_size);
+
+ for (i = 0; i < num_queues; i++) {
+ u32 queue_size = qconfigs[i].max_capacity *
+ qconfigs[i].token_size_in_bytes;
+
+ queue_size = ALIGN(queue_size, 64U);
+ total_queue_size_aligned += queue_size;
+ qconfigs[i].queue_size = queue_size;
+ }
+
+ /* Allocate queue memory */
+ syscom->queue_mem = ipu7_dma_alloc(adev, total_queue_size_aligned,
+ &syscom->queue_mem_dma_addr,
+ GFP_KERNEL, 0);
+ if (!syscom->queue_mem) {
+ dev_err(dev, "Failed to allocate queue memory.\n");
+ return -ENOMEM;
+ }
+ syscom->queue_mem_size = total_queue_size_aligned;
+
+ syscfg = &boot_config->syscom_context_config;
+ cfgs = ipu7_syscom_get_queue_config(syscfg);
+ queue_mem_ptr = syscom->queue_mem;
+ queue_mem_dma_ptr = syscom->queue_mem_dma_addr;
+ for (i = 0; i < num_queues; i++) {
+ cfgs[i].token_array_mem = queue_mem_dma_ptr;
+ cfgs[i].max_capacity = qconfigs[i].max_capacity;
+ cfgs[i].token_size_in_bytes = qconfigs[i].token_size_in_bytes;
+ qconfigs[i].token_array_mem = queue_mem_ptr;
+ queue_mem_dma_ptr += qconfigs[i].queue_size;
+ queue_mem_ptr += qconfigs[i].queue_size;
+ }
+
+ ipu7_dma_sync_single(adev, syscom->queue_mem_dma_addr,
+ total_queue_size_aligned);
+
+ return 0;
+}
+EXPORT_SYMBOL_NS_GPL(ipu7_boot_init_boot_config, "INTEL_IPU7");
+
+void ipu7_boot_release_boot_config(struct ipu7_bus_device *adev)
+{
+ struct ipu7_syscom_context *syscom = adev->syscom;
+
+ if (syscom->queue_mem) {
+ ipu7_dma_free(adev, syscom->queue_mem_size,
+ syscom->queue_mem,
+ syscom->queue_mem_dma_addr, 0);
+ syscom->queue_mem = NULL;
+ syscom->queue_mem_dma_addr = 0;
+ }
+
+ if (adev->boot_config) {
+ ipu7_dma_free(adev, adev->boot_config_size,
+ adev->boot_config,
+ adev->boot_config_dma_addr, 0);
+ adev->boot_config = NULL;
+ adev->boot_config_dma_addr = 0;
+ }
+}
+EXPORT_SYMBOL_NS_GPL(ipu7_boot_release_boot_config, "INTEL_IPU7");
+
+int ipu7_boot_start_fw(const struct ipu7_bus_device *adev)
+{
+ const struct device *dev = &adev->auxdev.dev;
+ u32 timeout = IPU_FW_START_STOP_TIMEOUT;
+ void __iomem *base = adev->isp->base;
+ u32 boot_state, last_boot_state;
+ u32 indices_addr, msg_ver, id;
+ int ret;
+
+ ret = ipu7_boot_cell_init(adev);
+ if (ret)
+ return ret;
+
+ dev_dbg(dev, "start booting fw...\n");
+ /* store "uninit" state to syscom/boot state reg */
+ write_fw_boot_param(adev, IA_GOFO_FW_BOOT_STATE_ID,
+ IA_GOFO_FW_BOOT_STATE_UNINIT);
+ /*
+ * Set registers to zero
+ * (not strictly required, but recommended for diagnostics)
+ */
+ write_fw_boot_param(adev,
+ IA_GOFO_FW_BOOT_SYSCOM_QUEUE_INDICES_BASE_ID, 0);
+ write_fw_boot_param(adev, IA_GOFO_FW_BOOT_MESSAGING_VERSION_ID, 0);
+ /* store firmware configuration address */
+ write_fw_boot_param(adev, IA_GOFO_FW_BOOT_CONFIG_ID,
+ adev->boot_config_dma_addr);
+
+ /* Kick uC, then wait for boot complete */
+ ipu7_boot_cell_start(adev);
+
+ last_boot_state = IA_GOFO_FW_BOOT_STATE_UNINIT;
+ while (timeout--) {
+ boot_state = read_fw_boot_param(adev,
+ IA_GOFO_FW_BOOT_STATE_ID);
+ if (boot_state != last_boot_state) {
+ dev_dbg(dev, "boot state changed from 0x%x to 0x%x\n",
+ last_boot_state, boot_state);
+ last_boot_state = boot_state;
+ }
+ if (BOOT_STATE_IS_CRITICAL(boot_state) ||
+ BOOT_STATE_IS_READY(boot_state))
+ break;
+ usleep_range(1000, 1200);
+ }
+
+ if (BOOT_STATE_IS_CRITICAL(boot_state)) {
+ ipu7_dump_fw_error_log(adev);
+ dev_err(dev, "critical boot state error 0x%x\n", boot_state);
+ return -EINVAL;
+ } else if (!BOOT_STATE_IS_READY(boot_state)) {
+ dev_err(dev, "fw boot timeout. state: 0x%x\n", boot_state);
+ return -ETIMEDOUT;
+ }
+ dev_dbg(dev, "fw boot done.\n");
+
+ /* Get FW syscom queue indices addr */
+ id = IA_GOFO_FW_BOOT_SYSCOM_QUEUE_INDICES_BASE_ID;
+ indices_addr = read_fw_boot_param(adev, id);
+ adev->syscom->queue_indices = base + indices_addr;
+ dev_dbg(dev, "fw queue indices offset is 0x%x\n", indices_addr);
+
+ /* Get message version. */
+ msg_ver = read_fw_boot_param(adev,
+ IA_GOFO_FW_BOOT_MESSAGING_VERSION_ID);
+ dev_dbg(dev, "ipu message version is 0x%08x\n", msg_ver);
+
+ return 0;
+}
+EXPORT_SYMBOL_NS_GPL(ipu7_boot_start_fw, "INTEL_IPU7");
+
+int ipu7_boot_stop_fw(const struct ipu7_bus_device *adev)
+{
+ const struct device *dev = &adev->auxdev.dev;
+ u32 timeout = IPU_FW_START_STOP_TIMEOUT;
+ u32 boot_state;
+
+ boot_state = read_fw_boot_param(adev, IA_GOFO_FW_BOOT_STATE_ID);
+ if (BOOT_STATE_IS_CRITICAL(boot_state) ||
+ !BOOT_STATE_IS_READY(boot_state)) {
+ dev_err(dev, "fw not ready for shutdown, state 0x%x\n",
+ boot_state);
+ return -EBUSY;
+ }
+
+ /* Issue shutdown to start shutdown process */
+ dev_dbg(dev, "stopping fw...\n");
+ write_fw_boot_param(adev, IA_GOFO_FW_BOOT_STATE_ID,
+ IA_GOFO_FW_BOOT_STATE_SHUTDOWN_CMD);
+ while (timeout--) {
+ boot_state = read_fw_boot_param(adev,
+ IA_GOFO_FW_BOOT_STATE_ID);
+ if (BOOT_STATE_IS_CRITICAL(boot_state) ||
+ BOOT_STATE_IS_INACTIVE(boot_state))
+ break;
+ usleep_range(1000, 1200);
+ }
+
+ if (BOOT_STATE_IS_CRITICAL(boot_state)) {
+ ipu7_dump_fw_error_log(adev);
+ dev_err(dev, "critical boot state error 0x%x\n", boot_state);
+ return -EINVAL;
+ } else if (!BOOT_STATE_IS_INACTIVE(boot_state)) {
+ dev_err(dev, "stop fw timeout. state: 0x%x\n", boot_state);
+ return -ETIMEDOUT;
+ }
+
+ ipu7_boot_cell_stop(adev);
+ dev_dbg(dev, "stop fw done.\n");
+
+ return 0;
+}
+EXPORT_SYMBOL_NS_GPL(ipu7_boot_stop_fw, "INTEL_IPU7");
+
+u32 ipu7_boot_get_boot_state(const struct ipu7_bus_device *adev)
+{
+ return read_fw_boot_param(adev, IA_GOFO_FW_BOOT_STATE_ID);
+}
+EXPORT_SYMBOL_NS_GPL(ipu7_boot_get_boot_state, "INTEL_IPU7");
diff --git a/drivers/staging/media/ipu7/ipu7-boot.h b/drivers/staging/media/ipu7/ipu7-boot.h
new file mode 100644
index 000000000000..5600be849931
--- /dev/null
+++ b/drivers/staging/media/ipu7/ipu7-boot.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2022 - 2025 Intel Corporation
+ */
+
+#ifndef IPU7_BOOT_H
+#define IPU7_BOOT_H
+
+#include <linux/types.h>
+
+struct ipu7_bus_device;
+struct syscom_queue_config;
+
+#define FW_QUEUE_CONFIG_SIZE(num_queues) \
+ (sizeof(struct syscom_queue_config) * (num_queues))
+
+int ipu7_boot_init_boot_config(struct ipu7_bus_device *adev,
+ struct syscom_queue_config *qconfigs,
+ int num_queues, u32 uc_freq,
+ dma_addr_t subsys_config, u8 major);
+void ipu7_boot_release_boot_config(struct ipu7_bus_device *adev);
+int ipu7_boot_start_fw(const struct ipu7_bus_device *adev);
+int ipu7_boot_stop_fw(const struct ipu7_bus_device *adev);
+u32 ipu7_boot_get_boot_state(const struct ipu7_bus_device *adev);
+#endif
diff --git a/drivers/staging/media/ipu7/ipu7-bus.c b/drivers/staging/media/ipu7/ipu7-bus.c
new file mode 100644
index 000000000000..7da44fde002a
--- /dev/null
+++ b/drivers/staging/media/ipu7/ipu7-bus.c
@@ -0,0 +1,158 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2013 - 2025 Intel Corporation
+ */
+
+#include <linux/auxiliary_bus.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/pci.h>
+#include <linux/pm_domain.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+
+#include "ipu7.h"
+#include "ipu7-bus.h"
+#include "ipu7-boot.h"
+#include "ipu7-dma.h"
+
+static int bus_pm_runtime_suspend(struct device *dev)
+{
+ struct ipu7_bus_device *adev = to_ipu7_bus_device(dev);
+ int ret;
+
+ ret = pm_generic_runtime_suspend(dev);
+ if (ret)
+ return ret;
+
+ ret = ipu_buttress_powerdown(dev, adev->ctrl);
+ if (!ret)
+ return 0;
+
+ dev_err(dev, "power down failed!\n");
+
+ /* Powering down failed, attempt to resume device now */
+ ret = pm_generic_runtime_resume(dev);
+ if (!ret)
+ return -EBUSY;
+
+ return -EIO;
+}
+
+static int bus_pm_runtime_resume(struct device *dev)
+{
+ struct ipu7_bus_device *adev = to_ipu7_bus_device(dev);
+ int ret;
+
+ ret = ipu_buttress_powerup(dev, adev->ctrl);
+ if (ret)
+ return ret;
+
+ ret = pm_generic_runtime_resume(dev);
+ if (ret)
+ goto out_err;
+
+ return 0;
+
+out_err:
+ ipu_buttress_powerdown(dev, adev->ctrl);
+
+ return -EBUSY;
+}
+
+static struct dev_pm_domain ipu7_bus_pm_domain = {
+ .ops = {
+ .runtime_suspend = bus_pm_runtime_suspend,
+ .runtime_resume = bus_pm_runtime_resume,
+ },
+};
+
+static DEFINE_MUTEX(ipu7_bus_mutex);
+static void ipu7_bus_release(struct device *dev)
+{
+ struct ipu7_bus_device *adev = to_ipu7_bus_device(dev);
+
+ kfree(adev->pdata);
+ kfree(adev);
+}
+
+struct ipu7_bus_device *
+ipu7_bus_initialize_device(struct pci_dev *pdev, struct device *parent,
+ void *pdata, const struct ipu_buttress_ctrl *ctrl,
+ const char *name)
+{
+ struct auxiliary_device *auxdev;
+ struct ipu7_bus_device *adev;
+ struct ipu7_device *isp = pci_get_drvdata(pdev);
+ int ret;
+
+ adev = kzalloc(sizeof(*adev), GFP_KERNEL);
+ if (!adev)
+ return ERR_PTR(-ENOMEM);
+
+ adev->isp = isp;
+ adev->ctrl = ctrl;
+ adev->pdata = pdata;
+ auxdev = &adev->auxdev;
+ auxdev->name = name;
+ auxdev->id = (pci_domain_nr(pdev->bus) << 16) |
+ PCI_DEVID(pdev->bus->number, pdev->devfn);
+
+ auxdev->dev.parent = parent;
+ auxdev->dev.release = ipu7_bus_release;
+
+ ret = auxiliary_device_init(auxdev);
+ if (ret < 0) {
+ dev_err(&isp->pdev->dev, "auxiliary device init failed (%d)\n",
+ ret);
+ kfree(adev);
+ return ERR_PTR(ret);
+ }
+
+ dev_pm_domain_set(&auxdev->dev, &ipu7_bus_pm_domain);
+
+ pm_runtime_forbid(&adev->auxdev.dev);
+ pm_runtime_enable(&adev->auxdev.dev);
+
+ return adev;
+}
+
+int ipu7_bus_add_device(struct ipu7_bus_device *adev)
+{
+ struct auxiliary_device *auxdev = &adev->auxdev;
+ int ret;
+
+ ret = auxiliary_device_add(auxdev);
+ if (ret) {
+ auxiliary_device_uninit(auxdev);
+ return ret;
+ }
+
+ mutex_lock(&ipu7_bus_mutex);
+ list_add(&adev->list, &adev->isp->devices);
+ mutex_unlock(&ipu7_bus_mutex);
+
+ pm_runtime_allow(&auxdev->dev);
+
+ return 0;
+}
+
+void ipu7_bus_del_devices(struct pci_dev *pdev)
+{
+ struct ipu7_device *isp = pci_get_drvdata(pdev);
+ struct ipu7_bus_device *adev, *save;
+
+ mutex_lock(&ipu7_bus_mutex);
+
+ list_for_each_entry_safe(adev, save, &isp->devices, list) {
+ pm_runtime_disable(&adev->auxdev.dev);
+ list_del(&adev->list);
+ auxiliary_device_delete(&adev->auxdev);
+ auxiliary_device_uninit(&adev->auxdev);
+ }
+
+ mutex_unlock(&ipu7_bus_mutex);
+}
diff --git a/drivers/staging/media/ipu7/ipu7-bus.h b/drivers/staging/media/ipu7/ipu7-bus.h
new file mode 100644
index 000000000000..45157df16e90
--- /dev/null
+++ b/drivers/staging/media/ipu7/ipu7-bus.h
@@ -0,0 +1,69 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2013 - 2025 Intel Corporation
+ */
+
+#ifndef IPU7_BUS_H
+#define IPU7_BUS_H
+
+#include <linux/auxiliary_bus.h>
+#include <linux/container_of.h>
+#include <linux/device.h>
+#include <linux/irqreturn.h>
+#include <linux/list.h>
+#include <linux/scatterlist.h>
+#include <linux/types.h>
+
+#include "abi/ipu7_fw_boot_abi.h"
+
+#include "ipu7-syscom.h"
+
+struct pci_dev;
+struct ipu_buttress_ctrl;
+struct ipu7_mmu;
+struct ipu7_device;
+
+enum ipu7_subsys {
+ IPU_IS = 0,
+ IPU_PS = 1,
+ IPU_SUBSYS_NUM = 2,
+};
+
+struct ipu7_bus_device {
+ struct auxiliary_device auxdev;
+ const struct auxiliary_driver *auxdrv;
+ const struct ipu7_auxdrv_data *auxdrv_data;
+ struct list_head list;
+ enum ipu7_subsys subsys;
+ void *pdata;
+ struct ipu7_mmu *mmu;
+ struct ipu7_device *isp;
+ const struct ipu_buttress_ctrl *ctrl;
+ u64 dma_mask;
+ struct sg_table fw_sgt;
+ u32 fw_entry;
+ struct ipu7_syscom_context *syscom;
+ struct ia_gofo_boot_config *boot_config;
+ dma_addr_t boot_config_dma_addr;
+ u32 boot_config_size;
+};
+
+struct ipu7_auxdrv_data {
+ irqreturn_t (*isr)(struct ipu7_bus_device *adev);
+ irqreturn_t (*isr_threaded)(struct ipu7_bus_device *adev);
+ bool wake_isr_thread;
+};
+
+#define to_ipu7_bus_device(_dev) \
+ container_of(to_auxiliary_dev(_dev), struct ipu7_bus_device, auxdev)
+#define auxdev_to_adev(_auxdev) \
+ container_of(_auxdev, struct ipu7_bus_device, auxdev)
+#define ipu7_bus_get_drvdata(adev) dev_get_drvdata(&(adev)->auxdev.dev)
+
+struct ipu7_bus_device *
+ipu7_bus_initialize_device(struct pci_dev *pdev, struct device *parent,
+ void *pdata, const struct ipu_buttress_ctrl *ctrl,
+ const char *name);
+int ipu7_bus_add_device(struct ipu7_bus_device *adev);
+void ipu7_bus_del_devices(struct pci_dev *pdev);
+#endif
diff --git a/drivers/staging/media/ipu7/ipu7-buttress-regs.h b/drivers/staging/media/ipu7/ipu7-buttress-regs.h
new file mode 100644
index 000000000000..3eafd6a3813d
--- /dev/null
+++ b/drivers/staging/media/ipu7/ipu7-buttress-regs.h
@@ -0,0 +1,461 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2020 - 2025 Intel Corporation
+ */
+
+#ifndef IPU7_BUTTRESS_REGS_H
+#define IPU7_BUTTRESS_REGS_H
+
+#define BUTTRESS_REG_IRQ_STATUS 0x2000
+#define BUTTRESS_REG_IRQ_STATUS_UNMASKED 0x2004
+#define BUTTRESS_REG_IRQ_ENABLE 0x2008
+#define BUTTRESS_REG_IRQ_CLEAR 0x200c
+#define BUTTRESS_REG_IRQ_MASK 0x2010
+#define BUTTRESS_REG_TSC_CMD 0x2014
+#define BUTTRESS_REG_TSC_CTL 0x2018
+#define BUTTRESS_REG_TSC_LO 0x201c
+#define BUTTRESS_REG_TSC_HI 0x2020
+
+/* valid for PTL */
+#define BUTTRESS_REG_PB_TIMESTAMP_LO 0x2030
+#define BUTTRESS_REG_PB_TIMESTAMP_HI 0x2034
+#define BUTTRESS_REG_PB_TIMESTAMP_VALID 0x2038
+
+#define BUTTRESS_REG_PS_WORKPOINT_REQ 0x2100
+#define BUTTRESS_REG_IS_WORKPOINT_REQ 0x2104
+#define BUTTRESS_REG_PS_WORKPOINT_DOMAIN_REQ 0x2108
+#define BUTTRESS_REG_PS_DOMAINS_STATUS 0x2110
+#define BUTTRESS_REG_PWR_STATUS 0x2114
+#define BUTTRESS_REG_PS_WORKPOINT_REQ_SHADOW 0x2120
+#define BUTTRESS_REG_IS_WORKPOINT_REQ_SHADOW 0x2124
+#define BUTTRESS_REG_PS_WORKPOINT_DOMAIN_REQ_SHADOW 0x2128
+#define BUTTRESS_REG_ISPS_WORKPOINT_DOWNLOAD 0x212c
+#define BUTTRESS_REG_PG_FLOW_OVERRIDE 0x2180
+#define BUTTRESS_REG_GLOBAL_OVERRIDE_UNGATE_CTL 0x2184
+#define BUTTRESS_REG_PWR_FSM_CTL 0x2188
+#define BUTTRESS_REG_IDLE_WDT 0x218c
+#define BUTTRESS_REG_PS_PWR_DOMAIN_EVENTQ_EN 0x2190
+#define BUTTRESS_REG_PS_PWR_DOMAIN_EVENTQ_ADDR 0x2194
+#define BUTTRESS_REG_PS_PWR_DOMAIN_EVENTQ_DATA 0x2198
+#define BUTTRESS_REG_POWER_EN_DELAY 0x219c
+#define IPU7_BUTTRESS_REG_LTR_CONTROL 0x21a0
+#define IPU7_BUTTRESS_REG_NDE_CONTROL 0x21a4
+#define IPU7_BUTTRESS_REG_INT_FRM_PUNIT 0x21a8
+#define IPU8_BUTTRESS_REG_LTR_CONTROL 0x21a4
+#define IPU8_BUTTRESS_REG_NDE_CONTROL 0x21a8
+#define IPU8_BUTTRESS_REG_INT_FRM_PUNIT 0x21ac
+#define BUTTRESS_REG_SLEEP_LEVEL_CFG 0x21b0
+#define BUTTRESS_REG_SLEEP_LEVEL_STS 0x21b4
+#define BUTTRESS_REG_DVFS_FSM_STATUS 0x21b8
+#define BUTTRESS_REG_PS_PLL_ENABLE 0x21bc
+#define BUTTRESS_REG_D2D_CTL 0x21d4
+#define BUTTRESS_REG_IB_CLK_CTL 0x21d8
+#define BUTTRESS_REG_IB_CRO_CLK_CTL 0x21dc
+#define BUTTRESS_REG_FUNC_FUSES 0x21e0
+#define BUTTRESS_REG_ISOCH_CTL 0x21e4
+#define BUTTRESS_REG_WORKPOINT_CTL 0x21f0
+#define BUTTRESS_REG_DRV_IS_UCX_CONTROL_STATUS 0x2200
+#define BUTTRESS_REG_DRV_IS_UCX_START_ADDR 0x2204
+#define BUTTRESS_REG_DRV_PS_UCX_CONTROL_STATUS 0x2208
+#define BUTTRESS_REG_DRV_PS_UCX_START_ADDR 0x220c
+#define BUTTRESS_REG_DRV_UCX_RESET_CFG 0x2210
+
+/* configured by CSE */
+#define BUTTRESS_REG_CSE_IS_UCX_CONTROL_STATUS 0x2300
+#define BUTTRESS_REG_CSE_IS_UCX_START_ADDR 0x2304
+#define BUTTRESS_REG_CSE_PS_UCX_CONTROL_STATUS 0x2308
+#define BUTTRESS_REG_CSE_PS_UCX_START_ADDR 0x230c
+
+#define BUTTRESS_REG_CAMERA_MASK 0x2310
+#define BUTTRESS_REG_FW_CTL 0x2314
+#define BUTTRESS_REG_SECURITY_CTL 0x2318
+#define BUTTRESS_REG_FUNCTIONAL_FW_SETUP 0x231c
+#define BUTTRESS_REG_FW_BASE 0x2320
+#define BUTTRESS_REG_FW_BASE_LIMIT 0x2324
+#define BUTTRESS_REG_FW_SCRATCH_BASE 0x2328
+#define BUTTRESS_REG_FW_SCRATCH_LIMIT 0x232c
+#define BUTTRESS_REG_CSE_ACTION 0x2330
+
+/* configured by SW */
+#define BUTTRESS_REG_FW_RESET_CTL 0x2334
+#define BUTTRESS_REG_FW_SOURCE_SIZE 0x2338
+#define BUTTRESS_REG_FW_SOURCE_BASE 0x233c
+
+#define BUTTRESS_REG_IPU_SEC_CP_LSB 0x2400
+#define BUTTRESS_REG_IPU_SEC_CP_MSB 0x2404
+#define BUTTRESS_REG_IPU_SEC_WAC_LSB 0x2408
+#define BUTTRESS_REG_IPU_SEC_WAC_MSB 0x240c
+#define BUTTRESS_REG_IPU_SEC_RAC_LSB 0x2410
+#define BUTTRESS_REG_IPU_SEC_RAC_MSB 0x2414
+#define BUTTRESS_REG_IPU_DRV_CP_LSB 0x2418
+#define BUTTRESS_REG_IPU_DRV_CP_MSB 0x241c
+#define BUTTRESS_REG_IPU_DRV_WAC_LSB 0x2420
+#define BUTTRESS_REG_IPU_DRV_WAC_MSB 0x2424
+#define BUTTRESS_REG_IPU_DRV_RAC_LSB 0x2428
+#define BUTTRESS_REG_IPU_DRV_RAC_MSB 0x242c
+#define BUTTRESS_REG_IPU_FW_CP_LSB 0x2430
+#define BUTTRESS_REG_IPU_FW_CP_MSB 0x2434
+#define BUTTRESS_REG_IPU_FW_WAC_LSB 0x2438
+#define BUTTRESS_REG_IPU_FW_WAC_MSB 0x243c
+#define BUTTRESS_REG_IPU_FW_RAC_LSB 0x2440
+#define BUTTRESS_REG_IPU_FW_RAC_MSB 0x2444
+#define BUTTRESS_REG_IPU_BIOS_SEC_CP_LSB 0x2448
+#define BUTTRESS_REG_IPU_BIOS_SEC_CP_MSB 0x244c
+#define BUTTRESS_REG_IPU_BIOS_SEC_WAC_LSB 0x2450
+#define BUTTRESS_REG_IPU_BIOS_SEC_WAC_MSB 0x2454
+#define BUTTRESS_REG_IPU_BIOS_SEC_RAC_LSB 0x2458
+#define BUTTRESS_REG_IPU_BIOS_SEC_RAC_MSB 0x245c
+#define BUTTRESS_REG_IPU_DFD_CP_LSB 0x2460
+#define BUTTRESS_REG_IPU_DFD_CP_MSB 0x2464
+#define BUTTRESS_REG_IPU_DFD_WAC_LSB 0x2468
+#define BUTTRESS_REG_IPU_DFD_WAC_MSB 0x246c
+#define BUTTRESS_REG_IPU_DFD_RAC_LSB 0x2470
+#define BUTTRESS_REG_IPU_DFD_RAC_MSB 0x2474
+#define BUTTRESS_REG_CSE2IUDB0 0x2500
+#define BUTTRESS_REG_CSE2IUDATA0 0x2504
+#define BUTTRESS_REG_CSE2IUCSR 0x2508
+#define BUTTRESS_REG_IU2CSEDB0 0x250c
+#define BUTTRESS_REG_IU2CSEDATA0 0x2510
+#define BUTTRESS_REG_IU2CSECSR 0x2514
+#define BUTTRESS_REG_CSE2IUDB0_CR_SHADOW 0x2520
+#define BUTTRESS_REG_CSE2IUDATA0_CR_SHADOW 0x2524
+#define BUTTRESS_REG_CSE2IUCSR_CR_SHADOW 0x2528
+#define BUTTRESS_REG_IU2CSEDB0_CR_SHADOW 0x252c
+#define BUTTRESS_REG_DVFS_FSM_SURVIVABILITY 0x2900
+#define BUTTRESS_REG_FLOWS_FSM_SURVIVABILITY 0x2904
+#define BUTTRESS_REG_FABRICS_FSM_SURVIVABILITY 0x2908
+#define BUTTRESS_REG_PS_SUB1_PM_FSM_SURVIVABILITY 0x290c
+#define BUTTRESS_REG_PS_SUB0_PM_FSM_SURVIVABILITY 0x2910
+#define BUTTRESS_REG_PS_PM_FSM_SURVIVABILITY 0x2914
+#define BUTTRESS_REG_IS_PM_FSM_SURVIVABILITY 0x2918
+#define BUTTRESS_REG_FLR_RST_FSM_SURVIVABILITY 0x291c
+#define BUTTRESS_REG_FW_RST_FSM_SURVIVABILITY 0x2920
+#define BUTTRESS_REG_RESETPREP_FSM_SURVIVABILITY 0x2924
+#define BUTTRESS_REG_POWER_FSM_DOMAIN_STATUS 0x3000
+#define BUTTRESS_REG_IDLEREQ_STATUS1 0x3004
+#define BUTTRESS_REG_POWER_FSM_STATUS_IS_PS 0x3008
+#define BUTTRESS_REG_POWER_ACK_B_STATUS 0x300c
+#define BUTTRESS_REG_DOMAIN_RETENTION_CTL 0x3010
+#define BUTTRESS_REG_CG_CTRL_BITS 0x3014
+#define BUTTRESS_REG_IS_IFC_STATUS0 0x3018
+#define BUTTRESS_REG_IS_IFC_STATUS1 0x301c
+#define BUTTRESS_REG_PS_IFC_STATUS0 0x3020
+#define BUTTRESS_REG_PS_IFC_STATUS1 0x3024
+#define BUTTRESS_REG_BTRS_IFC_STATUS0 0x3028
+#define BUTTRESS_REG_BTRS_IFC_STATUS1 0x302c
+#define BUTTRESS_REG_IPU_SKU 0x3030
+#define BUTTRESS_REG_PS_IDLEACK 0x3034
+#define BUTTRESS_REG_IS_IDLEACK 0x3038
+#define BUTTRESS_REG_SPARE_REGS_0 0x303c
+#define BUTTRESS_REG_SPARE_REGS_1 0x3040
+#define BUTTRESS_REG_SPARE_REGS_2 0x3044
+#define BUTTRESS_REG_SPARE_REGS_3 0x3048
+#define BUTTRESS_REG_IUNIT_ACV 0x304c
+#define BUTTRESS_REG_CHICKEN_BITS 0x3050
+#define BUTTRESS_REG_SBENDPOINT_CFG 0x3054
+#define BUTTRESS_REG_ECC_ERR_LOG 0x3058
+#define BUTTRESS_REG_POWER_FSM_STATUS 0x3070
+#define BUTTRESS_REG_RESET_FSM_STATUS 0x3074
+#define BUTTRESS_REG_IDLE_STATUS 0x3078
+#define BUTTRESS_REG_IDLEACK_STATUS 0x307c
+#define BUTTRESS_REG_IPU_DEBUG 0x3080
+
+#define BUTTRESS_REG_FW_BOOT_PARAMS0 0x4000
+#define BUTTRESS_REG_FW_BOOT_PARAMS1 0x4004
+#define BUTTRESS_REG_FW_BOOT_PARAMS2 0x4008
+#define BUTTRESS_REG_FW_BOOT_PARAMS3 0x400c
+#define BUTTRESS_REG_FW_BOOT_PARAMS4 0x4010
+#define BUTTRESS_REG_FW_BOOT_PARAMS5 0x4014
+#define BUTTRESS_REG_FW_BOOT_PARAMS6 0x4018
+#define BUTTRESS_REG_FW_BOOT_PARAMS7 0x401c
+#define BUTTRESS_REG_FW_BOOT_PARAMS8 0x4020
+#define BUTTRESS_REG_FW_BOOT_PARAMS9 0x4024
+#define BUTTRESS_REG_FW_BOOT_PARAMS10 0x4028
+#define BUTTRESS_REG_FW_BOOT_PARAMS11 0x402c
+#define BUTTRESS_REG_FW_BOOT_PARAMS12 0x4030
+#define BUTTRESS_REG_FW_BOOT_PARAMS13 0x4034
+#define BUTTRESS_REG_FW_BOOT_PARAMS14 0x4038
+#define BUTTRESS_REG_FW_BOOT_PARAMS15 0x403c
+
+#define BUTTRESS_FW_BOOT_PARAMS_ENTRY(i) \
+ (BUTTRESS_REG_FW_BOOT_PARAMS0 + ((i) * 4U))
+#define BUTTRESS_REG_FW_GP(i) (0x4040 + 0x4 * (i))
+#define BUTTRESS_REG_FPGA_SUPPORT(i) (0x40c0 + 0x4 * (i))
+
+#define BUTTRESS_REG_FW_GP8 0x4060
+#define BUTTRESS_REG_FW_GP24 0x40a0
+
+#define BUTTRESS_REG_GPIO_0_PADCFG_ADDR_CR 0x4100
+#define BUTTRESS_REG_GPIO_1_PADCFG_ADDR_CR 0x4104
+#define BUTTRESS_REG_GPIO_2_PADCFG_ADDR_CR 0x4108
+#define BUTTRESS_REG_GPIO_3_PADCFG_ADDR_CR 0x410c
+#define BUTTRESS_REG_GPIO_4_PADCFG_ADDR_CR 0x4110
+#define BUTTRESS_REG_GPIO_5_PADCFG_ADDR_CR 0x4114
+#define BUTTRESS_REG_GPIO_6_PADCFG_ADDR_CR 0x4118
+#define BUTTRESS_REG_GPIO_7_PADCFG_ADDR_CR 0x411c
+#define BUTTRESS_REG_GPIO_ENABLE 0x4140
+#define BUTTRESS_REG_GPIO_VALUE_CR 0x4144
+
+#define BUTTRESS_REG_IS_MEM_CORRECTABLE_ERROR_STATUS 0x5000
+#define BUTTRESS_REG_IS_MEM_FATAL_ERROR_STATUS 0x5004
+#define BUTTRESS_REG_IS_MEM_NON_FATAL_ERROR_STATUS 0x5008
+#define BUTTRESS_REG_IS_MEM_CHECK_PASSED 0x500c
+#define BUTTRESS_REG_IS_MEM_ERROR_INJECT 0x5010
+#define BUTTRESS_REG_IS_MEM_ERROR_CLEAR 0x5014
+#define BUTTRESS_REG_PS_MEM_CORRECTABLE_ERROR_STATUS 0x5040
+#define BUTTRESS_REG_PS_MEM_FATAL_ERROR_STATUS 0x5044
+#define BUTTRESS_REG_PS_MEM_NON_FATAL_ERROR_STATUS 0x5048
+#define BUTTRESS_REG_PS_MEM_CHECK_PASSED 0x504c
+#define BUTTRESS_REG_PS_MEM_ERROR_INJECT 0x5050
+#define BUTTRESS_REG_PS_MEM_ERROR_CLEAR 0x5054
+
+#define BUTTRESS_REG_IS_AB_REGION_MIN_ADDRESS(i) (0x6000 + 0x8 * (i))
+#define BUTTRESS_REG_IS_AB_REGION_MAX_ADDRESS(i) (0x6004 + 0x8 * (i))
+#define BUTTRESS_REG_IS_AB_VIOLATION_LOG0 0x6080
+#define BUTTRESS_REG_IS_AB_VIOLATION_LOG1 0x6084
+#define BUTTRESS_REG_PS_AB_REGION_MIN_ADDRESS(i) (0x6100 + 0x8 * (i))
+#define BUTTRESS_REG_PS_AB_REGION_MAX_ADDRESS0 (0x6104 + 0x8 * (i))
+#define BUTTRESS_REG_PS_AB_VIOLATION_LOG0 0x6180
+#define BUTTRESS_REG_PS_AB_VIOLATION_LOG1 0x6184
+#define BUTTRESS_REG_PS_DEBUG_AB_VIOLATION_LOG0 0x6200
+#define BUTTRESS_REG_PS_DEBUG_AB_VIOLATION_LOG1 0x6204
+#define BUTTRESS_REG_IS_DEBUG_AB_VIOLATION_LOG0 0x6208
+#define BUTTRESS_REG_IS_DEBUG_AB_VIOLATION_LOG1 0x620c
+#define BUTTRESS_REG_IB_DVP_AB_VIOLATION_LOG0 0x6210
+#define BUTTRESS_REG_IB_DVP_AB_VIOLATION_LOG1 0x6214
+#define BUTTRESS_REG_IB_ATB2DTF_AB_VIOLATION_LOG0 0x6218
+#define BUTTRESS_REG_IB_ATB2DTF_AB_VIOLATION_LOG1 0x621c
+#define BUTTRESS_REG_AB_ENABLE 0x6220
+#define BUTTRESS_REG_AB_DEFAULT_ACCESS 0x6230
+
+/* Indicates CSE has received an IPU driver IPC transaction */
+#define BUTTRESS_IRQ_IPC_EXEC_DONE_BY_CSE BIT(0)
+/* Indicates an IPC transaction from CSE has arrived */
+#define BUTTRESS_IRQ_IPC_FROM_CSE_IS_WAITING BIT(1)
+/* Indicates a CSR update from CSE has arrived */
+#define BUTTRESS_IRQ_CSE_CSR_SET BIT(2)
+/* Indicates an interrupt set by Punit (not in use at this time) */
+#define BUTTRESS_IRQ_PUNIT_2_IUNIT_IRQ BIT(3)
+/* Indicates an SAI violation was detected on access to IB registers */
+#define BUTTRESS_IRQ_SAI_VIOLATION BIT(4)
+/* Indicates a transaction to IS was not able to pass the access blocker */
+#define BUTTRESS_IRQ_IS_AB_VIOLATION BIT(5)
+/* Indicates a transaction to PS was not able to pass the access blocker */
+#define BUTTRESS_IRQ_PS_AB_VIOLATION BIT(6)
+/* Indicates an error response was detected by the IB config NoC */
+#define BUTTRESS_IRQ_IB_CFG_NOC_ERR_IRQ BIT(7)
+/* Indicates an error response was detected by the IB data NoC */
+#define BUTTRESS_IRQ_IB_DATA_NOC_ERR_IRQ BIT(8)
+/* Transaction to DVP regs was not able to pass the access blocker */
+#define BUTTRESS_IRQ_IB_DVP_AB_VIOLATION BIT(9)
+/* Transaction to ATB2DTF regs was not able to pass the access blocker */
+#define BUTTRESS_IRQ_ATB2DTF_AB_VIOLATION BIT(10)
+/* Transaction to IS debug regs was not able to pass the access blocker */
+#define BUTTRESS_IRQ_IS_DEBUG_AB_VIOLATION BIT(11)
+/* Transaction to PS debug regs was not able to pass the access blocker */
+#define BUTTRESS_IRQ_PS_DEBUG_AB_VIOLATION BIT(12)
+/* Indicates timeout occurred waiting for a response from a target */
+#define BUTTRESS_IRQ_IB_CFG_NOC_TIMEOUT_IRQ BIT(13)
+/* Set when any correctable ECC error input wire to buttress is set */
+#define BUTTRESS_IRQ_ECC_CORRECTABLE BIT(14)
+/* Any noncorrectable-nonfatal ECC error input wire to buttress is set */
+#define BUTTRESS_IRQ_ECC_NONCORRECTABLE_NONFATAL BIT(15)
+/* Set when any noncorrectable-fatal ECC error input wire to buttress is set */
+#define BUTTRESS_IRQ_ECC_NONCORRECTABLE_FATAL BIT(16)
+/* Set when timeout occurred waiting for a response from a target */
+#define BUTTRESS_IRQ_IS_CFG_NOC_TIMEOUT_IRQ BIT(17)
+#define BUTTRESS_IRQ_PS_CFG_NOC_TIMEOUT_IRQ BIT(18)
+#define BUTTRESS_IRQ_LB_CFG_NOC_TIMEOUT_IRQ BIT(19)
+/* IS FW double exception event */
+#define BUTTRESS_IRQ_IS_UC_PFATAL_ERROR BIT(26)
+/* PS FW double exception event */
+#define BUTTRESS_IRQ_PS_UC_PFATAL_ERROR BIT(27)
+/* IS FW watchdog event */
+#define BUTTRESS_IRQ_IS_WATCHDOG BIT(28)
+/* PS FW watchdog event */
+#define BUTTRESS_IRQ_PS_WATCHDOG BIT(29)
+/* IS IRC irq out */
+#define BUTTRESS_IRQ_IS_IRQ BIT(30)
+/* PS IRC irq out */
+#define BUTTRESS_IRQ_PS_IRQ BIT(31)
+
+/* buttress irq */
+#define BUTTRESS_PWR_STATUS_HH_STATE_IDLE 0U
+#define BUTTRESS_PWR_STATUS_HH_STATE_IN_PRGS 1U
+#define BUTTRESS_PWR_STATUS_HH_STATE_DONE 2U
+#define BUTTRESS_PWR_STATUS_HH_STATE_ERR 3U
+
+#define BUTTRESS_TSC_CMD_START_TSC_SYNC BIT(0)
+#define BUTTRESS_PWR_STATUS_HH_STATUS_SHIFT 11
+#define BUTTRESS_PWR_STATUS_HH_STATUS_MASK (0x3U << 11)
+#define BUTTRESS_TSW_WA_SOFT_RESET BIT(8)
+/* new for PTL */
+#define BUTTRESS_SEL_PB_TIMESTAMP BIT(9)
+#define BUTTRESS_IRQS (BUTTRESS_IRQ_IS_IRQ | \
+ BUTTRESS_IRQ_PS_IRQ | \
+ BUTTRESS_IRQ_IPC_FROM_CSE_IS_WAITING | \
+ BUTTRESS_IRQ_CSE_CSR_SET | \
+ BUTTRESS_IRQ_IPC_EXEC_DONE_BY_CSE | \
+ BUTTRESS_IRQ_PUNIT_2_IUNIT_IRQ)
+
+/* Iunit to CSE regs */
+#define BUTTRESS_IU2CSEDB0_BUSY BIT(31)
+#define BUTTRESS_IU2CSEDB0_SHORT_FORMAT_SHIFT 27
+#define BUTTRESS_IU2CSEDB0_CLIENT_ID_SHIFT 10
+#define BUTTRESS_IU2CSEDB0_IPC_CLIENT_ID_VAL 2
+
+#define BUTTRESS_IU2CSEDATA0_IPC_BOOT_LOAD 1
+#define BUTTRESS_IU2CSEDATA0_IPC_AUTH_RUN 2
+#define BUTTRESS_IU2CSEDATA0_IPC_AUTH_REPLACE 3
+#define BUTTRESS_IU2CSEDATA0_IPC_UPDATE_SECURE_TOUCH 16
+
+#define BUTTRESS_CSE2IUDATA0_IPC_BOOT_LOAD_DONE BIT(0)
+#define BUTTRESS_CSE2IUDATA0_IPC_AUTH_RUN_DONE BIT(1)
+#define BUTTRESS_CSE2IUDATA0_IPC_AUTH_REPLACE_DONE BIT(2)
+#define BUTTRESS_CSE2IUDATA0_IPC_UPDATE_SECURE_TOUCH_DONE BIT(4)
+
+#define BUTTRESS_IU2CSECSR_IPC_PEER_COMP_ACTIONS_RST_PHASE1 BIT(0)
+#define BUTTRESS_IU2CSECSR_IPC_PEER_COMP_ACTIONS_RST_PHASE2 BIT(1)
+#define BUTTRESS_IU2CSECSR_IPC_PEER_QUERIED_IP_COMP_ACTIONS_RST_PHASE BIT(2)
+#define BUTTRESS_IU2CSECSR_IPC_PEER_ASSERTED_REG_VALID_REQ BIT(3)
+#define BUTTRESS_IU2CSECSR_IPC_PEER_ACKED_REG_VALID BIT(4)
+#define BUTTRESS_IU2CSECSR_IPC_PEER_DEASSERTED_REG_VALID_REQ BIT(5)
+
+/* 0x20 == NACK, 0xf == unknown command */
+#define BUTTRESS_CSE2IUDATA0_IPC_NACK 0xf20
+#define BUTTRESS_CSE2IUDATA0_IPC_NACK_MASK 0xffff
+
+/* IS/PS freq control */
+#define BUTTRESS_IS_FREQ_CTL_RATIO_MASK 0xffU
+#define BUTTRESS_PS_FREQ_CTL_RATIO_MASK 0xffU
+
+#define IPU7_IS_FREQ_MAX 450
+#define IPU7_IS_FREQ_MIN 50
+#define IPU7_PS_FREQ_MAX 750
+#define BUTTRESS_PS_FREQ_RATIO_STEP 25U
+/* valid for IPU8 */
+#define BUTTRESS_IS_FREQ_RATIO_STEP 25U
+
+/* IS: 400mhz, PS: 500mhz */
+#define IPU7_IS_FREQ_CTL_DEFAULT_RATIO 0x1b
+#define IPU7_PS_FREQ_CTL_DEFAULT_RATIO 0x14
+/* IS: 400mhz, PS: 400mhz */
+#define IPU8_IS_FREQ_CTL_DEFAULT_RATIO 0x10
+#define IPU8_PS_FREQ_CTL_DEFAULT_RATIO 0x10
+
+#define IPU_FREQ_CTL_CDYN 0x80
+#define IPU_FREQ_CTL_RATIO_SHIFT 0x0
+#define IPU_FREQ_CTL_CDYN_SHIFT 0x8
+
+/* buttree power status */
+#define IPU_BUTTRESS_PWR_STATE_IS_PWR_SHIFT 0
+#define IPU_BUTTRESS_PWR_STATE_IS_PWR_MASK \
+ (0x3U << IPU_BUTTRESS_PWR_STATE_IS_PWR_SHIFT)
+
+#define IPU_BUTTRESS_PWR_STATE_PS_PWR_SHIFT 4
+#define IPU_BUTTRESS_PWR_STATE_PS_PWR_MASK \
+ (0x3U << IPU_BUTTRESS_PWR_STATE_PS_PWR_SHIFT)
+
+#define IPU_BUTTRESS_PWR_STATE_DN_DONE 0x0
+#define IPU_BUTTRESS_PWR_STATE_UP_PROCESS 0x1
+#define IPU_BUTTRESS_PWR_STATE_DN_PROCESS 0x2
+#define IPU_BUTTRESS_PWR_STATE_UP_DONE 0x3
+
+#define BUTTRESS_PWR_STATE_IS_PWR_SHIFT 3
+#define BUTTRESS_PWR_STATE_IS_PWR_MASK (0x3 << 3)
+
+#define BUTTRESS_PWR_STATE_PS_PWR_SHIFT 6
+#define BUTTRESS_PWR_STATE_PS_PWR_MASK (0x3 << 6)
+
+#define PS_FSM_CG BIT(3)
+
+#define BUTTRESS_OVERRIDE_IS_CLK BIT(1)
+#define BUTTRESS_OVERRIDE_PS_CLK BIT(2)
+/* ps_pll only valid for ipu8 */
+#define BUTTRESS_OWN_ACK_PS_PLL BIT(8)
+#define BUTTRESS_OWN_ACK_IS_CLK BIT(9)
+#define BUTTRESS_OWN_ACK_PS_CLK BIT(10)
+
+/* FW reset ctrl */
+#define BUTTRESS_FW_RESET_CTL_START BIT(0)
+#define BUTTRESS_FW_RESET_CTL_DONE BIT(1)
+
+/* security */
+#define BUTTRESS_SECURITY_CTL_FW_SECURE_MODE BIT(16)
+#define BUTTRESS_SECURITY_CTL_FW_SETUP_MASK GENMASK(4, 0)
+
+#define BUTTRESS_SECURITY_CTL_FW_SETUP_DONE BIT(0)
+#define BUTTRESS_SECURITY_CTL_AUTH_DONE BIT(1)
+#define BUTTRESS_SECURITY_CTL_AUTH_FAILED BIT(3)
+
+/* D2D */
+#define BUTTRESS_D2D_PWR_EN BIT(0)
+#define BUTTRESS_D2D_PWR_ACK BIT(4)
+
+/* NDE */
+#define NDE_VAL_MASK GENMASK(9, 0)
+#define NDE_SCALE_MASK GENMASK(12, 10)
+#define NDE_VALID_MASK BIT(13)
+#define NDE_RESVEC_MASK GENMASK(19, 16)
+#define NDE_IN_VBLANK_DIS_MASK BIT(31)
+
+#define BUTTRESS_NDE_VAL_ACTIVE 48
+#define BUTTRESS_NDE_SCALE_ACTIVE 2
+#define BUTTRESS_NDE_VALID_ACTIVE 1
+
+#define BUTTRESS_NDE_VAL_DEFAULT 1023
+#define BUTTRESS_NDE_SCALE_DEFAULT 2
+#define BUTTRESS_NDE_VALID_DEFAULT 0
+
+/* IS and PS UCX control */
+#define UCX_CTL_RESET BIT(0)
+#define UCX_CTL_RUN BIT(1)
+#define UCX_CTL_WAKEUP BIT(2)
+#define UCX_CTL_SPARE GENMASK(7, 3)
+#define UCX_STS_PWR GENMASK(17, 16)
+#define UCX_STS_SLEEPING BIT(18)
+
+/* offset from PHY base */
+#define PHY_CSI_CFG 0xc0
+#define PHY_CSI_RCOMP_CONTROL 0xc8
+#define PHY_CSI_BSCAN_EXCLUDE 0xd8
+
+#define PHY_CPHY_DLL_OVRD(x) (0x100 + 0x100 * (x))
+#define PHY_DPHY_DLL_OVRD(x) (0x14c + 0x100 * (x))
+#define PHY_CPHY_RX_CONTROL1(x) (0x110 + 0x100 * (x))
+#define PHY_CPHY_RX_CONTROL2(x) (0x114 + 0x100 * (x))
+#define PHY_DPHY_CFG(x) (0x148 + 0x100 * (x))
+#define PHY_BB_AFE_CONFIG(x) (0x174 + 0x100 * (x))
+
+/* PB registers */
+#define INTERRUPT_STATUS 0x0
+#define BTRS_LOCAL_INTERRUPT_MASK 0x4
+#define GLOBAL_INTERRUPT_MASK 0x8
+#define HM_ATS 0xc
+#define ATS_ERROR_LOG1 0x10
+#define ATS_ERROR_LOG2 0x14
+#define ATS_ERROR_CLEAR 0x18
+#define CFI_0_ERROR_LOG 0x1c
+#define CFI_0_ERROR_CLEAR 0x20
+#define HASH_CONFIG 0x2c
+#define TLBID_HASH_ENABLE_31_0 0x30
+#define TLBID_HASH_ENABLE_63_32 0x34
+#define TLBID_HASH_ENABLE_95_64 0x38
+#define TLBID_HASH_ENABLE_127_96 0x3c
+#define CFI_1_ERROR_LOGGING 0x40
+#define CFI_1_ERROR_CLEAR 0x44
+#define IMR_ERROR_LOGGING_LOW 0x48
+#define IMR_ERROR_LOGGING_HIGH 0x4c
+#define IMR_ERROR_CLEAR 0x50
+#define PORT_ARBITRATION_WEIGHTS 0x54
+#define IMR_ERROR_LOGGING_CFI_1_LOW 0x58
+#define IMR_ERROR_LOGGING_CFI_1_HIGH 0x5c
+#define IMR_ERROR_CLEAR_CFI_1 0x60
+#define BAR2_MISC_CONFIG 0x64
+#define RSP_ID_CONFIG_AXI2CFI_0 0x68
+#define RSP_ID_CONFIG_AXI2CFI_1 0x6c
+#define PB_DRIVER_PCODE_MAILBOX_STATUS 0x70
+#define PB_DRIVER_PCODE_MAILBOX_INTERFACE 0x74
+#define PORT_ARBITRATION_WEIGHTS_ATS 0x78
+
+#endif /* IPU7_BUTTRESS_REGS_H */
diff --git a/drivers/staging/media/ipu7/ipu7-buttress.c b/drivers/staging/media/ipu7/ipu7-buttress.c
new file mode 100644
index 000000000000..e5707f5e300b
--- /dev/null
+++ b/drivers/staging/media/ipu7/ipu7-buttress.c
@@ -0,0 +1,1192 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2013 - 2025 Intel Corporation
+ */
+
+#include <asm/cpu_device_id.h>
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/completion.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/firmware.h>
+#include <linux/interrupt.h>
+#include <linux/iopoll.h>
+#include <linux/math64.h>
+#include <linux/mm.h>
+#include <linux/mutex.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/pm_runtime.h>
+#include <linux/scatterlist.h>
+#include <linux/types.h>
+
+#include "ipu7.h"
+#include "ipu7-bus.h"
+#include "ipu7-buttress.h"
+#include "ipu7-buttress-regs.h"
+
+#define BOOTLOADER_STATUS_OFFSET BUTTRESS_REG_FW_BOOT_PARAMS7
+
+#define BOOTLOADER_MAGIC_KEY 0xb00710adU
+
+#define ENTRY BUTTRESS_IU2CSECSR_IPC_PEER_COMP_ACTIONS_RST_PHASE1
+#define EXIT BUTTRESS_IU2CSECSR_IPC_PEER_COMP_ACTIONS_RST_PHASE2
+#define QUERY BUTTRESS_IU2CSECSR_IPC_PEER_QUERIED_IP_COMP_ACTIONS_RST_PHASE
+
+#define BUTTRESS_TSC_SYNC_RESET_TRIAL_MAX 10U
+
+#define BUTTRESS_POWER_TIMEOUT_US (200 * USEC_PER_MSEC)
+
+#define BUTTRESS_CSE_BOOTLOAD_TIMEOUT_US (5 * USEC_PER_SEC)
+#define BUTTRESS_CSE_AUTHENTICATE_TIMEOUT_US (10 * USEC_PER_SEC)
+#define BUTTRESS_CSE_FWRESET_TIMEOUT_US (100 * USEC_PER_MSEC)
+
+#define BUTTRESS_IPC_TX_TIMEOUT_MS MSEC_PER_SEC
+#define BUTTRESS_IPC_RX_TIMEOUT_MS MSEC_PER_SEC
+#define BUTTRESS_IPC_VALIDITY_TIMEOUT_US (1 * USEC_PER_SEC)
+#define BUTTRESS_TSC_SYNC_TIMEOUT_US (5 * USEC_PER_MSEC)
+
+#define BUTTRESS_IPC_RESET_RETRY 2000U
+#define BUTTRESS_CSE_IPC_RESET_RETRY 4U
+#define BUTTRESS_IPC_CMD_SEND_RETRY 1U
+
+struct ipu7_ipc_buttress_msg {
+ u32 cmd;
+ u32 expected_resp;
+ bool require_resp;
+ u8 cmd_size;
+};
+
+static const u32 ipu7_adev_irq_mask[2] = {
+ BUTTRESS_IRQ_IS_IRQ,
+ BUTTRESS_IRQ_PS_IRQ
+};
+
+int ipu_buttress_ipc_reset(struct ipu7_device *isp,
+ struct ipu_buttress_ipc *ipc)
+{
+ unsigned int retries = BUTTRESS_IPC_RESET_RETRY;
+ struct ipu_buttress *b = &isp->buttress;
+ struct device *dev = &isp->pdev->dev;
+ u32 val = 0, csr_in_clr;
+
+ if (!isp->secure_mode) {
+ dev_dbg(dev, "Skip IPC reset for non-secure mode\n");
+ return 0;
+ }
+
+ mutex_lock(&b->ipc_mutex);
+
+ /* Clear-by-1 CSR (all bits), corresponding internal states. */
+ val = readl(isp->base + ipc->csr_in);
+ writel(val, isp->base + ipc->csr_in);
+
+ /* Set peer CSR bit IPC_PEER_COMP_ACTIONS_RST_PHASE1 */
+ writel(ENTRY, isp->base + ipc->csr_out);
+ /*
+ * Clear-by-1 all CSR bits EXCEPT following
+ * bits:
+ * A. IPC_PEER_COMP_ACTIONS_RST_PHASE1.
+ * B. IPC_PEER_COMP_ACTIONS_RST_PHASE2.
+ * C. Possibly custom bits, depending on
+ * their role.
+ */
+ csr_in_clr = BUTTRESS_IU2CSECSR_IPC_PEER_DEASSERTED_REG_VALID_REQ |
+ BUTTRESS_IU2CSECSR_IPC_PEER_ACKED_REG_VALID |
+ BUTTRESS_IU2CSECSR_IPC_PEER_ASSERTED_REG_VALID_REQ | QUERY;
+
+ do {
+ usleep_range(400, 500);
+ val = readl(isp->base + ipc->csr_in);
+ switch (val) {
+ case ENTRY | EXIT:
+ case ENTRY | EXIT | QUERY:
+ /*
+ * 1) Clear-by-1 CSR bits
+ * (IPC_PEER_COMP_ACTIONS_RST_PHASE1,
+ * IPC_PEER_COMP_ACTIONS_RST_PHASE2).
+ * 2) Set peer CSR bit
+ * IPC_PEER_QUERIED_IP_COMP_ACTIONS_RST_PHASE.
+ */
+ writel(ENTRY | EXIT, isp->base + ipc->csr_in);
+ writel(QUERY, isp->base + ipc->csr_out);
+ break;
+ case ENTRY:
+ case ENTRY | QUERY:
+ /*
+ * 1) Clear-by-1 CSR bits
+ * (IPC_PEER_COMP_ACTIONS_RST_PHASE1,
+ * IPC_PEER_QUERIED_IP_COMP_ACTIONS_RST_PHASE).
+ * 2) Set peer CSR bit
+ * IPC_PEER_COMP_ACTIONS_RST_PHASE1.
+ */
+ writel(ENTRY | QUERY, isp->base + ipc->csr_in);
+ writel(ENTRY, isp->base + ipc->csr_out);
+ break;
+ case EXIT:
+ case EXIT | QUERY:
+ /*
+ * Clear-by-1 CSR bit
+ * IPC_PEER_COMP_ACTIONS_RST_PHASE2.
+ * 1) Clear incoming doorbell.
+ * 2) Clear-by-1 all CSR bits EXCEPT following
+ * bits:
+ * A. IPC_PEER_COMP_ACTIONS_RST_PHASE1.
+ * B. IPC_PEER_COMP_ACTIONS_RST_PHASE2.
+ * C. Possibly custom bits, depending on
+ * their role.
+ * 3) Set peer CSR bit
+ * IPC_PEER_COMP_ACTIONS_RST_PHASE2.
+ */
+ writel(EXIT, isp->base + ipc->csr_in);
+ writel(0, isp->base + ipc->db0_in);
+ writel(csr_in_clr, isp->base + ipc->csr_in);
+ writel(EXIT, isp->base + ipc->csr_out);
+
+ /*
+ * Read csr_in again to make sure if RST_PHASE2 is done.
+ * If csr_in is QUERY, it should be handled again.
+ */
+ usleep_range(200, 300);
+ val = readl(isp->base + ipc->csr_in);
+ if (val & QUERY) {
+ dev_dbg(dev,
+ "RST_PHASE2 retry csr_in = %x\n", val);
+ break;
+ }
+ mutex_unlock(&b->ipc_mutex);
+ return 0;
+ case QUERY:
+ /*
+ * 1) Clear-by-1 CSR bit
+ * IPC_PEER_QUERIED_IP_COMP_ACTIONS_RST_PHASE.
+ * 2) Set peer CSR bit
+ * IPC_PEER_COMP_ACTIONS_RST_PHASE1
+ */
+ writel(QUERY, isp->base + ipc->csr_in);
+ writel(ENTRY, isp->base + ipc->csr_out);
+ break;
+ default:
+ dev_dbg_ratelimited(dev, "Unexpected CSR 0x%x\n", val);
+ break;
+ }
+ } while (retries--);
+
+ mutex_unlock(&b->ipc_mutex);
+ dev_err(dev, "Timed out while waiting for CSE\n");
+
+ return -ETIMEDOUT;
+}
+
+static void ipu_buttress_ipc_validity_close(struct ipu7_device *isp,
+ struct ipu_buttress_ipc *ipc)
+{
+ writel(BUTTRESS_IU2CSECSR_IPC_PEER_DEASSERTED_REG_VALID_REQ,
+ isp->base + ipc->csr_out);
+}
+
+static int
+ipu_buttress_ipc_validity_open(struct ipu7_device *isp,
+ struct ipu_buttress_ipc *ipc)
+{
+ unsigned int mask = BUTTRESS_IU2CSECSR_IPC_PEER_ACKED_REG_VALID;
+ void __iomem *addr;
+ int ret;
+ u32 val;
+
+ writel(BUTTRESS_IU2CSECSR_IPC_PEER_ASSERTED_REG_VALID_REQ,
+ isp->base + ipc->csr_out);
+
+ addr = isp->base + ipc->csr_in;
+ ret = readl_poll_timeout(addr, val, val & mask, 200,
+ BUTTRESS_IPC_VALIDITY_TIMEOUT_US);
+ if (ret) {
+ dev_err(&isp->pdev->dev, "CSE validity timeout 0x%x\n", val);
+ ipu_buttress_ipc_validity_close(isp, ipc);
+ }
+
+ return ret;
+}
+
+static void ipu_buttress_ipc_recv(struct ipu7_device *isp,
+ struct ipu_buttress_ipc *ipc, u32 *ipc_msg)
+{
+ if (ipc_msg)
+ *ipc_msg = readl(isp->base + ipc->data0_in);
+ writel(0, isp->base + ipc->db0_in);
+}
+
+static int ipu_buttress_ipc_send_msg(struct ipu7_device *isp,
+ struct ipu7_ipc_buttress_msg *msg)
+{
+ unsigned long tx_timeout_jiffies, rx_timeout_jiffies;
+ unsigned int retry = BUTTRESS_IPC_CMD_SEND_RETRY;
+ struct ipu_buttress *b = &isp->buttress;
+ struct ipu_buttress_ipc *ipc = &b->cse;
+ struct device *dev = &isp->pdev->dev;
+ int tout;
+ u32 val;
+ int ret;
+
+ mutex_lock(&b->ipc_mutex);
+
+ ret = ipu_buttress_ipc_validity_open(isp, ipc);
+ if (ret) {
+ dev_err(dev, "IPC validity open failed\n");
+ goto out;
+ }
+
+ tx_timeout_jiffies = msecs_to_jiffies(BUTTRESS_IPC_TX_TIMEOUT_MS);
+ rx_timeout_jiffies = msecs_to_jiffies(BUTTRESS_IPC_RX_TIMEOUT_MS);
+
+try:
+ reinit_completion(&ipc->send_complete);
+ if (msg->require_resp)
+ reinit_completion(&ipc->recv_complete);
+
+ dev_dbg(dev, "IPC command: 0x%x\n", msg->cmd);
+ writel(msg->cmd, isp->base + ipc->data0_out);
+ val = BUTTRESS_IU2CSEDB0_BUSY | msg->cmd_size;
+ writel(val, isp->base + ipc->db0_out);
+
+ tout = wait_for_completion_timeout(&ipc->send_complete,
+ tx_timeout_jiffies);
+ if (!tout) {
+ dev_err(dev, "send IPC response timeout\n");
+ if (!retry--) {
+ ret = -ETIMEDOUT;
+ goto out;
+ }
+
+ /* Try again if CSE is not responding on first try */
+ writel(0, isp->base + ipc->db0_out);
+ goto try;
+ }
+
+ if (!msg->require_resp) {
+ ret = -EIO;
+ goto out;
+ }
+
+ tout = wait_for_completion_timeout(&ipc->recv_complete,
+ rx_timeout_jiffies);
+ if (!tout) {
+ dev_err(dev, "recv IPC response timeout\n");
+ ret = -ETIMEDOUT;
+ goto out;
+ }
+
+ if (ipc->nack_mask &&
+ (ipc->recv_data & ipc->nack_mask) == ipc->nack) {
+ dev_err(dev, "IPC NACK for cmd 0x%x\n", msg->cmd);
+ ret = -EIO;
+ goto out;
+ }
+
+ if (ipc->recv_data != msg->expected_resp) {
+ dev_err(dev,
+ "expected resp: 0x%x, IPC response: 0x%x\n",
+ msg->expected_resp, ipc->recv_data);
+ ret = -EIO;
+ goto out;
+ }
+
+ dev_dbg(dev, "IPC commands done\n");
+
+out:
+ ipu_buttress_ipc_validity_close(isp, ipc);
+ mutex_unlock(&b->ipc_mutex);
+
+ return ret;
+}
+
+static int ipu_buttress_ipc_send(struct ipu7_device *isp,
+ u32 ipc_msg, u32 size, bool require_resp,
+ u32 expected_resp)
+{
+ struct ipu7_ipc_buttress_msg msg = {
+ .cmd = ipc_msg,
+ .cmd_size = size,
+ .require_resp = require_resp,
+ .expected_resp = expected_resp,
+ };
+
+ return ipu_buttress_ipc_send_msg(isp, &msg);
+}
+
+static irqreturn_t ipu_buttress_call_isr(struct ipu7_bus_device *adev)
+{
+ irqreturn_t ret = IRQ_WAKE_THREAD;
+
+ if (!adev || !adev->auxdrv || !adev->auxdrv_data)
+ return IRQ_NONE;
+
+ if (adev->auxdrv_data->isr)
+ ret = adev->auxdrv_data->isr(adev);
+
+ if (ret == IRQ_WAKE_THREAD && !adev->auxdrv_data->isr_threaded)
+ ret = IRQ_NONE;
+
+ return ret;
+}
+
+irqreturn_t ipu_buttress_isr(int irq, void *isp_ptr)
+{
+ struct ipu7_device *isp = isp_ptr;
+ struct ipu7_bus_device *adev[] = { isp->isys, isp->psys };
+ struct ipu_buttress *b = &isp->buttress;
+ struct device *dev = &isp->pdev->dev;
+ irqreturn_t ret = IRQ_NONE;
+ u32 pb_irq, pb_local_irq;
+ u32 disable_irqs = 0;
+ u32 irq_status;
+ unsigned int i;
+
+ pm_runtime_get_noresume(dev);
+
+ pb_irq = readl(isp->pb_base + INTERRUPT_STATUS);
+ writel(pb_irq, isp->pb_base + INTERRUPT_STATUS);
+
+ /* check btrs ATS, CFI and IMR errors, BIT(0) is unused for IPU */
+ pb_local_irq = readl(isp->pb_base + BTRS_LOCAL_INTERRUPT_MASK);
+ if (pb_local_irq & ~BIT(0)) {
+ dev_warn(dev, "PB interrupt status 0x%x local 0x%x\n", pb_irq,
+ pb_local_irq);
+ dev_warn(dev, "Details: %x %x %x %x %x %x %x %x\n",
+ readl(isp->pb_base + ATS_ERROR_LOG1),
+ readl(isp->pb_base + ATS_ERROR_LOG2),
+ readl(isp->pb_base + CFI_0_ERROR_LOG),
+ readl(isp->pb_base + CFI_1_ERROR_LOGGING),
+ readl(isp->pb_base + IMR_ERROR_LOGGING_LOW),
+ readl(isp->pb_base + IMR_ERROR_LOGGING_HIGH),
+ readl(isp->pb_base + IMR_ERROR_LOGGING_CFI_1_LOW),
+ readl(isp->pb_base + IMR_ERROR_LOGGING_CFI_1_HIGH));
+ }
+
+ irq_status = readl(isp->base + BUTTRESS_REG_IRQ_STATUS);
+ if (!irq_status) {
+ pm_runtime_put_noidle(dev);
+ return IRQ_NONE;
+ }
+
+ do {
+ writel(irq_status, isp->base + BUTTRESS_REG_IRQ_CLEAR);
+
+ for (i = 0; i < ARRAY_SIZE(ipu7_adev_irq_mask); i++) {
+ irqreturn_t r = ipu_buttress_call_isr(adev[i]);
+
+ if (!(irq_status & ipu7_adev_irq_mask[i]))
+ continue;
+
+ if (r == IRQ_WAKE_THREAD) {
+ ret = IRQ_WAKE_THREAD;
+ disable_irqs |= ipu7_adev_irq_mask[i];
+ } else if (ret == IRQ_NONE && r == IRQ_HANDLED) {
+ ret = IRQ_HANDLED;
+ }
+ }
+
+ if (irq_status & (BUTTRESS_IRQS | BUTTRESS_IRQ_SAI_VIOLATION) &&
+ ret == IRQ_NONE)
+ ret = IRQ_HANDLED;
+
+ if (irq_status & BUTTRESS_IRQ_IPC_FROM_CSE_IS_WAITING) {
+ dev_dbg(dev, "BUTTRESS_IRQ_IPC_FROM_CSE_IS_WAITING\n");
+ ipu_buttress_ipc_recv(isp, &b->cse, &b->cse.recv_data);
+ complete(&b->cse.recv_complete);
+ }
+
+ if (irq_status & BUTTRESS_IRQ_CSE_CSR_SET)
+ dev_dbg(dev, "BUTTRESS_IRQ_CSE_CSR_SET\n");
+
+ if (irq_status & BUTTRESS_IRQ_IPC_EXEC_DONE_BY_CSE) {
+ dev_dbg(dev, "BUTTRESS_IRQ_IPC_EXEC_DONE_BY_CSE\n");
+ complete(&b->cse.send_complete);
+ }
+
+ if (irq_status & BUTTRESS_IRQ_PUNIT_2_IUNIT_IRQ)
+ dev_dbg(dev, "BUTTRESS_IRQ_PUNIT_2_IUNIT_IRQ\n");
+
+ if (irq_status & BUTTRESS_IRQ_SAI_VIOLATION &&
+ ipu_buttress_get_secure_mode(isp))
+ dev_err(dev, "BUTTRESS_IRQ_SAI_VIOLATION\n");
+
+ irq_status = readl(isp->base + BUTTRESS_REG_IRQ_STATUS);
+ } while (irq_status);
+
+ if (disable_irqs)
+ writel(BUTTRESS_IRQS & ~disable_irqs,
+ isp->base + BUTTRESS_REG_IRQ_ENABLE);
+
+ pm_runtime_put(dev);
+
+ return ret;
+}
+
+irqreturn_t ipu_buttress_isr_threaded(int irq, void *isp_ptr)
+{
+ struct ipu7_device *isp = isp_ptr;
+ struct ipu7_bus_device *adev[] = { isp->isys, isp->psys };
+ const struct ipu7_auxdrv_data *drv_data = NULL;
+ irqreturn_t ret = IRQ_NONE;
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(ipu7_adev_irq_mask) && adev[i]; i++) {
+ drv_data = adev[i]->auxdrv_data;
+ if (!drv_data)
+ continue;
+
+ if (drv_data->wake_isr_thread &&
+ drv_data->isr_threaded(adev[i]) == IRQ_HANDLED)
+ ret = IRQ_HANDLED;
+ }
+
+ writel(BUTTRESS_IRQS, isp->base + BUTTRESS_REG_IRQ_ENABLE);
+
+ return ret;
+}
+
+static int isys_d2d_power(struct device *dev, bool on)
+{
+ struct ipu7_device *isp = to_ipu7_bus_device(dev)->isp;
+ int ret = 0;
+ u32 target = on ? BUTTRESS_D2D_PWR_ACK : 0U;
+ u32 val;
+
+ dev_dbg(dev, "power %s isys d2d.\n", on ? "UP" : "DOWN");
+ val = readl(isp->base + BUTTRESS_REG_D2D_CTL);
+ if ((val & BUTTRESS_D2D_PWR_ACK) == target) {
+ dev_info(dev, "d2d already in %s state.\n",
+ on ? "UP" : "DOWN");
+ return 0;
+ }
+
+ val = on ? val | BUTTRESS_D2D_PWR_EN : val & (~BUTTRESS_D2D_PWR_EN);
+ writel(val, isp->base + BUTTRESS_REG_D2D_CTL);
+ ret = readl_poll_timeout(isp->base + BUTTRESS_REG_D2D_CTL,
+ val, (val & BUTTRESS_D2D_PWR_ACK) == target,
+ 100, BUTTRESS_POWER_TIMEOUT_US);
+ if (ret)
+ dev_err(dev, "power %s d2d timeout. status: 0x%x\n",
+ on ? "UP" : "DOWN", val);
+
+ return ret;
+}
+
+static void isys_nde_control(struct device *dev, bool on)
+{
+ struct ipu7_device *isp = to_ipu7_bus_device(dev)->isp;
+ u32 val, value, scale, valid, resvec;
+ u32 nde_reg;
+
+ if (on) {
+ value = BUTTRESS_NDE_VAL_ACTIVE;
+ scale = BUTTRESS_NDE_SCALE_ACTIVE;
+ valid = BUTTRESS_NDE_VALID_ACTIVE;
+ } else {
+ value = BUTTRESS_NDE_VAL_DEFAULT;
+ scale = BUTTRESS_NDE_SCALE_DEFAULT;
+ valid = BUTTRESS_NDE_VALID_DEFAULT;
+ }
+
+ /* only set the fabrics resource ownership for ipu8 */
+ nde_reg = is_ipu8(isp->hw_ver) ? IPU8_BUTTRESS_REG_NDE_CONTROL :
+ IPU7_BUTTRESS_REG_NDE_CONTROL;
+ resvec = is_ipu8(isp->hw_ver) ? 0x2 : 0xe;
+ val = FIELD_PREP(NDE_VAL_MASK, value) |
+ FIELD_PREP(NDE_SCALE_MASK, scale) |
+ FIELD_PREP(NDE_VALID_MASK, valid) |
+ FIELD_PREP(NDE_RESVEC_MASK, resvec);
+
+ writel(val, isp->base + nde_reg);
+}
+
+static int ipu7_buttress_powerup(struct device *dev,
+ const struct ipu_buttress_ctrl *ctrl)
+{
+ struct ipu7_device *isp = to_ipu7_bus_device(dev)->isp;
+ u32 val, exp_sts;
+ int ret = 0;
+
+ if (!ctrl)
+ return 0;
+
+ mutex_lock(&isp->buttress.power_mutex);
+
+ exp_sts = ctrl->pwr_sts_on << ctrl->pwr_sts_shift;
+ if (ctrl->subsys_id == IPU_IS) {
+ ret = isys_d2d_power(dev, true);
+ if (ret)
+ goto out_power;
+ isys_nde_control(dev, true);
+ }
+
+ /* request clock resource ownership */
+ val = readl(isp->base + BUTTRESS_REG_SLEEP_LEVEL_CFG);
+ val |= ctrl->ovrd_clk;
+ writel(val, isp->base + BUTTRESS_REG_SLEEP_LEVEL_CFG);
+ ret = readl_poll_timeout(isp->base + BUTTRESS_REG_SLEEP_LEVEL_STS,
+ val, (val & ctrl->own_clk_ack),
+ 100, BUTTRESS_POWER_TIMEOUT_US);
+ if (ret)
+ dev_warn(dev, "request clk ownership timeout. status 0x%x\n",
+ val);
+
+ val = ctrl->ratio << ctrl->ratio_shift | ctrl->cdyn << ctrl->cdyn_shift;
+
+ dev_dbg(dev, "set 0x%x to %s_WORKPOINT_REQ.\n", val,
+ ctrl->subsys_id == IPU_IS ? "IS" : "PS");
+ writel(val, isp->base + ctrl->freq_ctl);
+
+ ret = readl_poll_timeout(isp->base + BUTTRESS_REG_PWR_STATUS,
+ val, ((val & ctrl->pwr_sts_mask) == exp_sts),
+ 100, BUTTRESS_POWER_TIMEOUT_US);
+ if (ret) {
+ dev_err(dev, "%s power up timeout with status: 0x%x\n",
+ ctrl->subsys_id == IPU_IS ? "IS" : "PS", val);
+ goto out_power;
+ }
+
+ dev_dbg(dev, "%s power up successfully. status: 0x%x\n",
+ ctrl->subsys_id == IPU_IS ? "IS" : "PS", val);
+
+ /* release clock resource ownership */
+ val = readl(isp->base + BUTTRESS_REG_SLEEP_LEVEL_CFG);
+ val &= ~ctrl->ovrd_clk;
+ writel(val, isp->base + BUTTRESS_REG_SLEEP_LEVEL_CFG);
+
+out_power:
+ mutex_unlock(&isp->buttress.power_mutex);
+
+ return ret;
+}
+
+static int ipu7_buttress_powerdown(struct device *dev,
+ const struct ipu_buttress_ctrl *ctrl)
+{
+ struct ipu7_device *isp = to_ipu7_bus_device(dev)->isp;
+ u32 val, exp_sts;
+ int ret = 0;
+
+ if (!ctrl)
+ return 0;
+
+ mutex_lock(&isp->buttress.power_mutex);
+
+ exp_sts = ctrl->pwr_sts_off << ctrl->pwr_sts_shift;
+ val = 0x8U << ctrl->ratio_shift;
+
+ dev_dbg(dev, "set 0x%x to %s_WORKPOINT_REQ.\n", val,
+ ctrl->subsys_id == IPU_IS ? "IS" : "PS");
+ writel(val, isp->base + ctrl->freq_ctl);
+ ret = readl_poll_timeout(isp->base + BUTTRESS_REG_PWR_STATUS,
+ val, ((val & ctrl->pwr_sts_mask) == exp_sts),
+ 100, BUTTRESS_POWER_TIMEOUT_US);
+ if (ret) {
+ dev_err(dev, "%s power down timeout with status: 0x%x\n",
+ ctrl->subsys_id == IPU_IS ? "IS" : "PS", val);
+ goto out_power;
+ }
+
+ dev_dbg(dev, "%s power down successfully. status: 0x%x\n",
+ ctrl->subsys_id == IPU_IS ? "IS" : "PS", val);
+out_power:
+ if (ctrl->subsys_id == IPU_IS && !ret) {
+ isys_d2d_power(dev, false);
+ isys_nde_control(dev, false);
+ }
+
+ mutex_unlock(&isp->buttress.power_mutex);
+
+ return ret;
+}
+
+static int ipu8_buttress_powerup(struct device *dev,
+ const struct ipu_buttress_ctrl *ctrl)
+{
+ struct ipu7_device *isp = to_ipu7_bus_device(dev)->isp;
+ u32 sleep_level_reg = BUTTRESS_REG_SLEEP_LEVEL_STS;
+ u32 val, exp_sts;
+ int ret = 0;
+
+ if (!ctrl)
+ return 0;
+
+ mutex_lock(&isp->buttress.power_mutex);
+ exp_sts = ctrl->pwr_sts_on << ctrl->pwr_sts_shift;
+ if (ctrl->subsys_id == IPU_IS) {
+ ret = isys_d2d_power(dev, true);
+ if (ret)
+ goto out_power;
+ isys_nde_control(dev, true);
+ }
+
+ /* request ps_pll when psys freq > 400Mhz */
+ if (ctrl->subsys_id == IPU_PS && ctrl->ratio > 0x10) {
+ writel(1, isp->base + BUTTRESS_REG_PS_PLL_ENABLE);
+ ret = readl_poll_timeout(isp->base + sleep_level_reg,
+ val, (val & ctrl->own_clk_ack),
+ 100, BUTTRESS_POWER_TIMEOUT_US);
+ if (ret)
+ dev_warn(dev, "ps_pll req ack timeout. status 0x%x\n",
+ val);
+ }
+
+ val = ctrl->ratio << ctrl->ratio_shift | ctrl->cdyn << ctrl->cdyn_shift;
+ dev_dbg(dev, "set 0x%x to %s_WORKPOINT_REQ.\n", val,
+ ctrl->subsys_id == IPU_IS ? "IS" : "PS");
+ writel(val, isp->base + ctrl->freq_ctl);
+ ret = readl_poll_timeout(isp->base + BUTTRESS_REG_PWR_STATUS,
+ val, ((val & ctrl->pwr_sts_mask) == exp_sts),
+ 100, BUTTRESS_POWER_TIMEOUT_US);
+ if (ret) {
+ dev_err(dev, "%s power up timeout with status: 0x%x\n",
+ ctrl->subsys_id == IPU_IS ? "IS" : "PS", val);
+ goto out_power;
+ }
+
+ dev_dbg(dev, "%s power up successfully. status: 0x%x\n",
+ ctrl->subsys_id == IPU_IS ? "IS" : "PS", val);
+out_power:
+ mutex_unlock(&isp->buttress.power_mutex);
+
+ return ret;
+}
+
+static int ipu8_buttress_powerdown(struct device *dev,
+ const struct ipu_buttress_ctrl *ctrl)
+{
+ struct ipu7_device *isp = to_ipu7_bus_device(dev)->isp;
+ u32 val, exp_sts;
+ int ret = 0;
+
+ if (!ctrl)
+ return 0;
+
+ mutex_lock(&isp->buttress.power_mutex);
+ exp_sts = ctrl->pwr_sts_off << ctrl->pwr_sts_shift;
+
+ if (ctrl->subsys_id == IPU_PS)
+ val = 0x10U << ctrl->ratio_shift;
+ else
+ val = 0x8U << ctrl->ratio_shift;
+
+ dev_dbg(dev, "set 0x%x to %s_WORKPOINT_REQ.\n", val,
+ ctrl->subsys_id == IPU_IS ? "IS" : "PS");
+ writel(val, isp->base + ctrl->freq_ctl);
+ ret = readl_poll_timeout(isp->base + BUTTRESS_REG_PWR_STATUS,
+ val, ((val & ctrl->pwr_sts_mask) == exp_sts),
+ 100, BUTTRESS_POWER_TIMEOUT_US);
+ if (ret) {
+ dev_err(dev, "%s power down timeout with status: 0x%x\n",
+ ctrl->subsys_id == IPU_IS ? "IS" : "PS", val);
+ goto out_power;
+ }
+
+ dev_dbg(dev, "%s power down successfully. status: 0x%x\n",
+ ctrl->subsys_id == IPU_IS ? "IS" : "PS", val);
+out_power:
+ if (ctrl->subsys_id == IPU_IS && !ret) {
+ isys_d2d_power(dev, false);
+ isys_nde_control(dev, false);
+ }
+
+ if (ctrl->subsys_id == IPU_PS) {
+ val = readl(isp->base + BUTTRESS_REG_SLEEP_LEVEL_STS);
+ if (val & ctrl->own_clk_ack)
+ writel(0, isp->base + BUTTRESS_REG_PS_PLL_ENABLE);
+ }
+ mutex_unlock(&isp->buttress.power_mutex);
+
+ return ret;
+}
+
+int ipu_buttress_powerup(struct device *dev,
+ const struct ipu_buttress_ctrl *ctrl)
+{
+ struct ipu7_device *isp = to_ipu7_bus_device(dev)->isp;
+
+ if (is_ipu8(isp->hw_ver))
+ return ipu8_buttress_powerup(dev, ctrl);
+
+ return ipu7_buttress_powerup(dev, ctrl);
+}
+
+int ipu_buttress_powerdown(struct device *dev,
+ const struct ipu_buttress_ctrl *ctrl)
+{
+ struct ipu7_device *isp = to_ipu7_bus_device(dev)->isp;
+
+ if (is_ipu8(isp->hw_ver))
+ return ipu8_buttress_powerdown(dev, ctrl);
+
+ return ipu7_buttress_powerdown(dev, ctrl);
+}
+
+bool ipu_buttress_get_secure_mode(struct ipu7_device *isp)
+{
+ u32 val;
+
+ val = readl(isp->base + BUTTRESS_REG_SECURITY_CTL);
+
+ return val & BUTTRESS_SECURITY_CTL_FW_SECURE_MODE;
+}
+
+bool ipu_buttress_auth_done(struct ipu7_device *isp)
+{
+ u32 val;
+
+ if (!isp->secure_mode)
+ return true;
+
+ val = readl(isp->base + BUTTRESS_REG_SECURITY_CTL);
+ val = FIELD_GET(BUTTRESS_SECURITY_CTL_FW_SETUP_MASK, val);
+
+ return val == BUTTRESS_SECURITY_CTL_AUTH_DONE;
+}
+EXPORT_SYMBOL_NS_GPL(ipu_buttress_auth_done, "INTEL_IPU7");
+
+int ipu_buttress_get_isys_freq(struct ipu7_device *isp, u32 *freq)
+{
+ u32 reg_val;
+ int ret;
+
+ ret = pm_runtime_get_sync(&isp->isys->auxdev.dev);
+ if (ret < 0) {
+ pm_runtime_put(&isp->isys->auxdev.dev);
+ dev_err(&isp->pdev->dev, "Runtime PM failed (%d)\n", ret);
+ return ret;
+ }
+
+ reg_val = readl(isp->base + BUTTRESS_REG_IS_WORKPOINT_REQ);
+
+ pm_runtime_put(&isp->isys->auxdev.dev);
+
+ if (is_ipu8(isp->hw_ver))
+ *freq = (reg_val & BUTTRESS_IS_FREQ_CTL_RATIO_MASK) * 25;
+ else
+ *freq = (reg_val & BUTTRESS_IS_FREQ_CTL_RATIO_MASK) * 50 / 3;
+
+ return 0;
+}
+EXPORT_SYMBOL_NS_GPL(ipu_buttress_get_isys_freq, "INTEL_IPU7");
+
+int ipu_buttress_get_psys_freq(struct ipu7_device *isp, u32 *freq)
+{
+ u32 reg_val;
+ int ret;
+
+ ret = pm_runtime_get_sync(&isp->psys->auxdev.dev);
+ if (ret < 0) {
+ pm_runtime_put(&isp->psys->auxdev.dev);
+ dev_err(&isp->pdev->dev, "Runtime PM failed (%d)\n", ret);
+ return ret;
+ }
+
+ reg_val = readl(isp->base + BUTTRESS_REG_PS_WORKPOINT_REQ);
+
+ pm_runtime_put(&isp->psys->auxdev.dev);
+
+ reg_val &= BUTTRESS_PS_FREQ_CTL_RATIO_MASK;
+ *freq = BUTTRESS_PS_FREQ_RATIO_STEP * reg_val;
+
+ return 0;
+}
+EXPORT_SYMBOL_NS_GPL(ipu_buttress_get_psys_freq, "INTEL_IPU7");
+
+int ipu_buttress_reset_authentication(struct ipu7_device *isp)
+{
+ struct device *dev = &isp->pdev->dev;
+ int ret;
+ u32 val;
+
+ if (!isp->secure_mode) {
+ dev_dbg(dev, "Skip auth for non-secure mode\n");
+ return 0;
+ }
+
+ writel(BUTTRESS_FW_RESET_CTL_START, isp->base +
+ BUTTRESS_REG_FW_RESET_CTL);
+
+ ret = readl_poll_timeout(isp->base + BUTTRESS_REG_FW_RESET_CTL, val,
+ val & BUTTRESS_FW_RESET_CTL_DONE, 500,
+ BUTTRESS_CSE_FWRESET_TIMEOUT_US);
+ if (ret) {
+ dev_err(dev, "Time out while resetting authentication state\n");
+ return ret;
+ }
+
+ dev_dbg(dev, "FW reset for authentication done\n");
+ writel(0, isp->base + BUTTRESS_REG_FW_RESET_CTL);
+ /* leave some time for HW restore */
+ usleep_range(800, 1000);
+
+ return 0;
+}
+
+int ipu_buttress_authenticate(struct ipu7_device *isp)
+{
+ struct ipu_buttress *b = &isp->buttress;
+ struct device *dev = &isp->pdev->dev;
+ u32 data, mask, done, fail;
+ int ret;
+
+ if (!isp->secure_mode) {
+ dev_dbg(dev, "Skip auth for non-secure mode\n");
+ return 0;
+ }
+
+ mutex_lock(&b->auth_mutex);
+
+ if (ipu_buttress_auth_done(isp)) {
+ ret = 0;
+ goto out_unlock;
+ }
+
+ /*
+ * BUTTRESS_REG_FW_SOURCE_BASE needs to be set with FW CPD
+ * package address for secure mode.
+ */
+
+ writel(isp->cpd_fw->size, isp->base + BUTTRESS_REG_FW_SOURCE_SIZE);
+ writel(sg_dma_address(isp->psys->fw_sgt.sgl),
+ isp->base + BUTTRESS_REG_FW_SOURCE_BASE);
+
+ /*
+ * Write boot_load into IU2CSEDATA0
+ * Write sizeof(boot_load) | 0x2 << CLIENT_ID to
+ * IU2CSEDB.IU2CSECMD and set IU2CSEDB.IU2CSEBUSY as
+ */
+ dev_info(dev, "Sending BOOT_LOAD to CSE\n");
+ ret = ipu_buttress_ipc_send(isp, BUTTRESS_IU2CSEDATA0_IPC_BOOT_LOAD,
+ 1, true,
+ BUTTRESS_CSE2IUDATA0_IPC_BOOT_LOAD_DONE);
+ if (ret) {
+ dev_err(dev, "CSE boot_load failed\n");
+ goto out_unlock;
+ }
+
+ mask = BUTTRESS_SECURITY_CTL_FW_SETUP_MASK;
+ done = BUTTRESS_SECURITY_CTL_FW_SETUP_DONE;
+ fail = BUTTRESS_SECURITY_CTL_AUTH_FAILED;
+ ret = readl_poll_timeout(isp->base + BUTTRESS_REG_SECURITY_CTL, data,
+ ((data & mask) == done ||
+ (data & mask) == fail), 500,
+ BUTTRESS_CSE_BOOTLOAD_TIMEOUT_US);
+ if (ret) {
+ dev_err(dev, "CSE boot_load timeout\n");
+ goto out_unlock;
+ }
+
+ if ((data & mask) == fail) {
+ dev_err(dev, "CSE auth failed\n");
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ ret = readl_poll_timeout(isp->base + BOOTLOADER_STATUS_OFFSET,
+ data, data == BOOTLOADER_MAGIC_KEY, 500,
+ BUTTRESS_CSE_BOOTLOAD_TIMEOUT_US);
+ if (ret) {
+ dev_err(dev, "Unexpected magic number 0x%x\n", data);
+ goto out_unlock;
+ }
+
+ /*
+ * Write authenticate_run into IU2CSEDATA0
+ * Write sizeof(boot_load) | 0x2 << CLIENT_ID to
+ * IU2CSEDB.IU2CSECMD and set IU2CSEDB.IU2CSEBUSY as
+ */
+ dev_info(dev, "Sending AUTHENTICATE_RUN to CSE\n");
+ ret = ipu_buttress_ipc_send(isp, BUTTRESS_IU2CSEDATA0_IPC_AUTH_RUN,
+ 1, true,
+ BUTTRESS_CSE2IUDATA0_IPC_AUTH_RUN_DONE);
+ if (ret) {
+ dev_err(dev, "CSE authenticate_run failed\n");
+ goto out_unlock;
+ }
+
+ done = BUTTRESS_SECURITY_CTL_AUTH_DONE;
+ ret = readl_poll_timeout(isp->base + BUTTRESS_REG_SECURITY_CTL, data,
+ ((data & mask) == done ||
+ (data & mask) == fail), 500,
+ BUTTRESS_CSE_AUTHENTICATE_TIMEOUT_US);
+ if (ret) {
+ dev_err(dev, "CSE authenticate timeout\n");
+ goto out_unlock;
+ }
+
+ if ((data & mask) == fail) {
+ dev_err(dev, "CSE boot_load failed\n");
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ dev_info(dev, "CSE authenticate_run done\n");
+
+out_unlock:
+ mutex_unlock(&b->auth_mutex);
+
+ return ret;
+}
+
+static int ipu_buttress_send_tsc_request(struct ipu7_device *isp)
+{
+ u32 val, mask, done;
+ int ret;
+
+ mask = BUTTRESS_PWR_STATUS_HH_STATUS_MASK;
+
+ writel(BUTTRESS_TSC_CMD_START_TSC_SYNC,
+ isp->base + BUTTRESS_REG_TSC_CMD);
+
+ val = readl(isp->base + BUTTRESS_REG_PWR_STATUS);
+ val = FIELD_GET(mask, val);
+ if (val == BUTTRESS_PWR_STATUS_HH_STATE_ERR) {
+ dev_err(&isp->pdev->dev, "Start tsc sync failed\n");
+ return -EINVAL;
+ }
+
+ done = BUTTRESS_PWR_STATUS_HH_STATE_DONE;
+ ret = readl_poll_timeout(isp->base + BUTTRESS_REG_PWR_STATUS, val,
+ FIELD_GET(mask, val) == done, 500,
+ BUTTRESS_TSC_SYNC_TIMEOUT_US);
+ if (ret)
+ dev_err(&isp->pdev->dev, "Start tsc sync timeout\n");
+
+ return ret;
+}
+
+int ipu_buttress_start_tsc_sync(struct ipu7_device *isp)
+{
+ void __iomem *base = isp->base;
+ unsigned int i;
+ u32 val;
+
+ if (is_ipu8(isp->hw_ver)) {
+ for (i = 0; i < BUTTRESS_TSC_SYNC_RESET_TRIAL_MAX; i++) {
+ val = readl(base + BUTTRESS_REG_PB_TIMESTAMP_VALID);
+ if (val == 1)
+ return 0;
+ usleep_range(40, 50);
+ }
+
+ dev_err(&isp->pdev->dev, "PB HH sync failed (valid %u)\n", val);
+ return -ETIMEDOUT;
+ }
+
+ if (is_ipu7p5(isp->hw_ver)) {
+ val = readl(base + BUTTRESS_REG_TSC_CTL);
+ val |= BUTTRESS_SEL_PB_TIMESTAMP;
+ writel(val, base + BUTTRESS_REG_TSC_CTL);
+
+ for (i = 0; i < BUTTRESS_TSC_SYNC_RESET_TRIAL_MAX; i++) {
+ val = readl(base + BUTTRESS_REG_PB_TIMESTAMP_VALID);
+ if (val == 1)
+ return 0;
+ usleep_range(40, 50);
+ }
+
+ dev_err(&isp->pdev->dev, "PB HH sync failed (valid %u)\n", val);
+
+ return -ETIMEDOUT;
+ }
+
+ for (i = 0; i < BUTTRESS_TSC_SYNC_RESET_TRIAL_MAX; i++) {
+ int ret;
+
+ ret = ipu_buttress_send_tsc_request(isp);
+ if (ret != -ETIMEDOUT)
+ return ret;
+
+ val = readl(base + BUTTRESS_REG_TSC_CTL);
+ val = val | BUTTRESS_TSW_WA_SOFT_RESET;
+ writel(val, base + BUTTRESS_REG_TSC_CTL);
+ val = val & (~BUTTRESS_TSW_WA_SOFT_RESET);
+ writel(val, base + BUTTRESS_REG_TSC_CTL);
+ }
+
+ dev_err(&isp->pdev->dev, "TSC sync failed (timeout)\n");
+
+ return -ETIMEDOUT;
+}
+EXPORT_SYMBOL_NS_GPL(ipu_buttress_start_tsc_sync, "INTEL_IPU7");
+
+void ipu_buttress_tsc_read(struct ipu7_device *isp, u64 *val)
+{
+ unsigned long flags;
+ u32 tsc_hi, tsc_lo;
+
+ local_irq_save(flags);
+ if (is_ipu7(isp->hw_ver)) {
+ tsc_lo = readl(isp->base + BUTTRESS_REG_TSC_LO);
+ tsc_hi = readl(isp->base + BUTTRESS_REG_TSC_HI);
+ } else {
+ tsc_lo = readl(isp->base + BUTTRESS_REG_PB_TIMESTAMP_LO);
+ tsc_hi = readl(isp->base + BUTTRESS_REG_PB_TIMESTAMP_HI);
+ }
+ *val = (u64)tsc_hi << 32 | tsc_lo;
+ local_irq_restore(flags);
+}
+EXPORT_SYMBOL_NS_GPL(ipu_buttress_tsc_read, "INTEL_IPU7");
+
+u64 ipu_buttress_tsc_ticks_to_ns(u64 ticks, const struct ipu7_device *isp)
+{
+ u64 ns = ticks * 10000;
+
+ /*
+ * converting TSC tick count to ns is calculated by:
+ * Example (TSC clock frequency is 19.2MHz):
+ * ns = ticks * 1000 000 000 / 19.2Mhz
+ * = ticks * 1000 000 000 / 19200000Hz
+ * = ticks * 10000 / 192 ns
+ */
+ return div_u64(ns, isp->buttress.ref_clk);
+}
+EXPORT_SYMBOL_NS_GPL(ipu_buttress_tsc_ticks_to_ns, "INTEL_IPU7");
+
+/* trigger uc control to wakeup fw */
+void ipu_buttress_wakeup_is_uc(const struct ipu7_device *isp)
+{
+ u32 val;
+
+ val = readl(isp->base + BUTTRESS_REG_DRV_IS_UCX_CONTROL_STATUS);
+ val |= UCX_CTL_WAKEUP;
+ writel(val, isp->base + BUTTRESS_REG_DRV_IS_UCX_CONTROL_STATUS);
+}
+EXPORT_SYMBOL_NS_GPL(ipu_buttress_wakeup_is_uc, "INTEL_IPU7");
+
+void ipu_buttress_wakeup_ps_uc(const struct ipu7_device *isp)
+{
+ u32 val;
+
+ val = readl(isp->base + BUTTRESS_REG_DRV_PS_UCX_CONTROL_STATUS);
+ val |= UCX_CTL_WAKEUP;
+ writel(val, isp->base + BUTTRESS_REG_DRV_PS_UCX_CONTROL_STATUS);
+}
+EXPORT_SYMBOL_NS_GPL(ipu_buttress_wakeup_ps_uc, "INTEL_IPU7");
+
+static const struct x86_cpu_id ipu_misc_cfg_exclusion[] = {
+ X86_MATCH_VFM_STEPS(INTEL_PANTHERLAKE_L, 0x1, 0x1, 0),
+ {},
+};
+
+static void ipu_buttress_setup(struct ipu7_device *isp)
+{
+ struct device *dev = &isp->pdev->dev;
+ u32 val;
+
+ /* program PB BAR */
+#define WRXREQOP_OVRD_VAL_MASK GENMASK(22, 19)
+ writel(0, isp->pb_base + GLOBAL_INTERRUPT_MASK);
+ val = readl(isp->pb_base + BAR2_MISC_CONFIG);
+ if (is_ipu7(isp->hw_ver) || x86_match_cpu(ipu_misc_cfg_exclusion))
+ val |= 0x100U;
+ else
+ val |= FIELD_PREP(WRXREQOP_OVRD_VAL_MASK, 0xf) |
+ BIT(18) | 0x100U;
+
+ writel(val, isp->pb_base + BAR2_MISC_CONFIG);
+ val = readl(isp->pb_base + BAR2_MISC_CONFIG);
+
+ if (is_ipu8(isp->hw_ver)) {
+ writel(BIT(13), isp->pb_base + TLBID_HASH_ENABLE_63_32);
+ writel(BIT(9), isp->pb_base + TLBID_HASH_ENABLE_95_64);
+ dev_dbg(dev, "IPU8 TLBID_HASH %x %x\n",
+ readl(isp->pb_base + TLBID_HASH_ENABLE_63_32),
+ readl(isp->pb_base + TLBID_HASH_ENABLE_95_64));
+ } else if (is_ipu7p5(isp->hw_ver)) {
+ writel(BIT(14), isp->pb_base + TLBID_HASH_ENABLE_63_32);
+ writel(BIT(9), isp->pb_base + TLBID_HASH_ENABLE_95_64);
+ dev_dbg(dev, "IPU7P5 TLBID_HASH %x %x\n",
+ readl(isp->pb_base + TLBID_HASH_ENABLE_63_32),
+ readl(isp->pb_base + TLBID_HASH_ENABLE_95_64));
+ } else {
+ writel(BIT(22), isp->pb_base + TLBID_HASH_ENABLE_63_32);
+ writel(BIT(1), isp->pb_base + TLBID_HASH_ENABLE_127_96);
+ dev_dbg(dev, "TLBID_HASH %x %x\n",
+ readl(isp->pb_base + TLBID_HASH_ENABLE_63_32),
+ readl(isp->pb_base + TLBID_HASH_ENABLE_127_96));
+ }
+
+ writel(BUTTRESS_IRQS, isp->base + BUTTRESS_REG_IRQ_CLEAR);
+ writel(BUTTRESS_IRQS, isp->base + BUTTRESS_REG_IRQ_MASK);
+ writel(BUTTRESS_IRQS, isp->base + BUTTRESS_REG_IRQ_ENABLE);
+ /* LNL SW workaround for PS PD hang when PS sub-domain during PD */
+ writel(PS_FSM_CG, isp->base + BUTTRESS_REG_CG_CTRL_BITS);
+}
+
+void ipu_buttress_restore(struct ipu7_device *isp)
+{
+ struct ipu_buttress *b = &isp->buttress;
+
+ ipu_buttress_setup(isp);
+
+ writel(b->wdt_cached_value, isp->base + BUTTRESS_REG_IDLE_WDT);
+}
+
+int ipu_buttress_init(struct ipu7_device *isp)
+{
+ int ret, ipc_reset_retry = BUTTRESS_CSE_IPC_RESET_RETRY;
+ struct ipu_buttress *b = &isp->buttress;
+ struct device *dev = &isp->pdev->dev;
+ u32 val;
+
+ mutex_init(&b->power_mutex);
+ mutex_init(&b->auth_mutex);
+ mutex_init(&b->cons_mutex);
+ mutex_init(&b->ipc_mutex);
+ init_completion(&b->cse.send_complete);
+ init_completion(&b->cse.recv_complete);
+
+ b->cse.nack = BUTTRESS_CSE2IUDATA0_IPC_NACK;
+ b->cse.nack_mask = BUTTRESS_CSE2IUDATA0_IPC_NACK_MASK;
+ b->cse.csr_in = BUTTRESS_REG_CSE2IUCSR;
+ b->cse.csr_out = BUTTRESS_REG_IU2CSECSR;
+ b->cse.db0_in = BUTTRESS_REG_CSE2IUDB0;
+ b->cse.db0_out = BUTTRESS_REG_IU2CSEDB0;
+ b->cse.data0_in = BUTTRESS_REG_CSE2IUDATA0;
+ b->cse.data0_out = BUTTRESS_REG_IU2CSEDATA0;
+
+ isp->secure_mode = ipu_buttress_get_secure_mode(isp);
+ val = readl(isp->base + BUTTRESS_REG_IPU_SKU);
+ dev_info(dev, "IPU%u SKU %u in %s mode mask 0x%x\n", val & 0xfU,
+ (val >> 4) & 0x7U, isp->secure_mode ? "secure" : "non-secure",
+ readl(isp->base + BUTTRESS_REG_CAMERA_MASK));
+ b->wdt_cached_value = readl(isp->base + BUTTRESS_REG_IDLE_WDT);
+ b->ref_clk = 384;
+
+ ipu_buttress_setup(isp);
+
+ /* Retry couple of times in case of CSE initialization is delayed */
+ do {
+ ret = ipu_buttress_ipc_reset(isp, &b->cse);
+ if (ret) {
+ dev_warn(dev, "IPC reset protocol failed, retrying\n");
+ } else {
+ dev_dbg(dev, "IPC reset done\n");
+ return 0;
+ }
+ } while (ipc_reset_retry--);
+
+ dev_err(dev, "IPC reset protocol failed\n");
+
+ mutex_destroy(&b->power_mutex);
+ mutex_destroy(&b->auth_mutex);
+ mutex_destroy(&b->cons_mutex);
+ mutex_destroy(&b->ipc_mutex);
+
+ return ret;
+}
+
+void ipu_buttress_exit(struct ipu7_device *isp)
+{
+ struct ipu_buttress *b = &isp->buttress;
+
+ writel(0, isp->base + BUTTRESS_REG_IRQ_ENABLE);
+ mutex_destroy(&b->power_mutex);
+ mutex_destroy(&b->auth_mutex);
+ mutex_destroy(&b->cons_mutex);
+ mutex_destroy(&b->ipc_mutex);
+}
diff --git a/drivers/staging/media/ipu7/ipu7-buttress.h b/drivers/staging/media/ipu7/ipu7-buttress.h
new file mode 100644
index 000000000000..8da7dd612575
--- /dev/null
+++ b/drivers/staging/media/ipu7/ipu7-buttress.h
@@ -0,0 +1,77 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2013 - 2025 Intel Corporation
+ */
+
+#ifndef IPU7_BUTTRESS_H
+#define IPU7_BUTTRESS_H
+
+#include <linux/completion.h>
+#include <linux/irqreturn.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+
+struct device;
+struct ipu7_device;
+
+struct ipu_buttress_ctrl {
+ u32 subsys_id;
+ u32 freq_ctl, pwr_sts_shift, pwr_sts_mask, pwr_sts_on, pwr_sts_off;
+ u32 ratio;
+ u32 ratio_shift;
+ u32 cdyn;
+ u32 cdyn_shift;
+ u32 ovrd_clk;
+ u32 own_clk_ack;
+};
+
+struct ipu_buttress_ipc {
+ struct completion send_complete;
+ struct completion recv_complete;
+ u32 nack;
+ u32 nack_mask;
+ u32 recv_data;
+ u32 csr_out;
+ u32 csr_in;
+ u32 db0_in;
+ u32 db0_out;
+ u32 data0_out;
+ u32 data0_in;
+};
+
+struct ipu_buttress {
+ struct mutex power_mutex, auth_mutex, cons_mutex, ipc_mutex;
+ struct ipu_buttress_ipc cse;
+ u32 psys_min_freq;
+ u32 wdt_cached_value;
+ u8 psys_force_ratio;
+ bool force_suspend;
+ u32 ref_clk;
+};
+
+int ipu_buttress_ipc_reset(struct ipu7_device *isp,
+ struct ipu_buttress_ipc *ipc);
+int ipu_buttress_powerup(struct device *dev,
+ const struct ipu_buttress_ctrl *ctrl);
+int ipu_buttress_powerdown(struct device *dev,
+ const struct ipu_buttress_ctrl *ctrl);
+bool ipu_buttress_get_secure_mode(struct ipu7_device *isp);
+int ipu_buttress_authenticate(struct ipu7_device *isp);
+int ipu_buttress_reset_authentication(struct ipu7_device *isp);
+bool ipu_buttress_auth_done(struct ipu7_device *isp);
+int ipu_buttress_get_isys_freq(struct ipu7_device *isp, u32 *freq);
+int ipu_buttress_get_psys_freq(struct ipu7_device *isp, u32 *freq);
+int ipu_buttress_start_tsc_sync(struct ipu7_device *isp);
+void ipu_buttress_tsc_read(struct ipu7_device *isp, u64 *val);
+u64 ipu_buttress_tsc_ticks_to_ns(u64 ticks, const struct ipu7_device *isp);
+
+irqreturn_t ipu_buttress_isr(int irq, void *isp_ptr);
+irqreturn_t ipu_buttress_isr_threaded(int irq, void *isp_ptr);
+int ipu_buttress_init(struct ipu7_device *isp);
+void ipu_buttress_exit(struct ipu7_device *isp);
+void ipu_buttress_csi_port_config(struct ipu7_device *isp,
+ u32 legacy, u32 combo);
+void ipu_buttress_restore(struct ipu7_device *isp);
+void ipu_buttress_wakeup_is_uc(const struct ipu7_device *isp);
+void ipu_buttress_wakeup_ps_uc(const struct ipu7_device *isp);
+#endif /* IPU7_BUTTRESS_H */
diff --git a/drivers/staging/media/ipu7/ipu7-cpd.c b/drivers/staging/media/ipu7/ipu7-cpd.c
new file mode 100644
index 000000000000..4f49fb57eae4
--- /dev/null
+++ b/drivers/staging/media/ipu7/ipu7-cpd.c
@@ -0,0 +1,276 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2015 - 2025 Intel Corporation
+ */
+
+#include <linux/device.h>
+#include <linux/export.h>
+#include <linux/gfp_types.h>
+#include <linux/pci.h>
+#include <linux/sizes.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+
+#include "ipu7.h"
+#include "ipu7-cpd.h"
+
+/* $CPD */
+#define CPD_HDR_MARK 0x44504324
+
+/* Maximum size is 4K DWORDs or 16KB */
+#define MAX_MANIFEST_SIZE (SZ_4K * sizeof(u32))
+
+#define CPD_MANIFEST_IDX 0
+#define CPD_BINARY_START_IDX 1U
+#define CPD_METADATA_START_IDX 2U
+#define CPD_BINARY_NUM 2U /* ISYS + PSYS */
+/*
+ * Entries include:
+ * 1 manifest entry.
+ * 1 metadata entry for each sub system(ISYS and PSYS).
+ * 1 binary entry for each sub system(ISYS and PSYS).
+ */
+#define CPD_ENTRY_NUM (CPD_BINARY_NUM * 2U + 1U)
+
+#define CPD_METADATA_ATTR 0xa
+#define CPD_METADATA_IPL 0x1c
+#define ONLINE_METADATA_SIZE 128U
+#define ONLINE_METADATA_LINES 6U
+
+struct ipu7_cpd_hdr {
+ u32 hdr_mark;
+ u32 ent_cnt;
+ u8 hdr_ver;
+ u8 ent_ver;
+ u8 hdr_len;
+ u8 rsvd;
+ u8 partition_name[4];
+ u32 crc32;
+} __packed;
+
+struct ipu7_cpd_ent {
+ u8 name[12];
+ u32 offset;
+ u32 len;
+ u8 rsvd[4];
+} __packed;
+
+struct ipu7_cpd_metadata_hdr {
+ u32 type;
+ u32 len;
+} __packed;
+
+struct ipu7_cpd_metadata_attr {
+ struct ipu7_cpd_metadata_hdr hdr;
+ u8 compression_type;
+ u8 encryption_type;
+ u8 rsvd[2];
+ u32 uncompressed_size;
+ u32 compressed_size;
+ u32 module_id;
+ u8 hash[48];
+} __packed;
+
+struct ipu7_cpd_metadata_ipl {
+ struct ipu7_cpd_metadata_hdr hdr;
+ u32 param[4];
+ u8 rsvd[8];
+} __packed;
+
+struct ipu7_cpd_metadata {
+ struct ipu7_cpd_metadata_attr attr;
+ struct ipu7_cpd_metadata_ipl ipl;
+} __packed;
+
+static inline struct ipu7_cpd_ent *ipu7_cpd_get_entry(const void *cpd, int idx)
+{
+ const struct ipu7_cpd_hdr *cpd_hdr = cpd;
+
+ return ((struct ipu7_cpd_ent *)((u8 *)cpd + cpd_hdr->hdr_len)) + idx;
+}
+
+#define ipu7_cpd_get_manifest(cpd) ipu7_cpd_get_entry(cpd, 0)
+
+static struct ipu7_cpd_metadata *ipu7_cpd_get_metadata(const void *cpd, int idx)
+{
+ struct ipu7_cpd_ent *cpd_ent =
+ ipu7_cpd_get_entry(cpd, CPD_METADATA_START_IDX + idx * 2);
+
+ return (struct ipu7_cpd_metadata *)((u8 *)cpd + cpd_ent->offset);
+}
+
+static int ipu7_cpd_validate_cpd(struct ipu7_device *isp,
+ const void *cpd, unsigned long data_size)
+{
+ const struct ipu7_cpd_hdr *cpd_hdr = cpd;
+ struct device *dev = &isp->pdev->dev;
+ struct ipu7_cpd_ent *ent;
+ unsigned int i;
+ u8 len;
+
+ len = cpd_hdr->hdr_len;
+
+ /* Ensure cpd hdr is within moduledata */
+ if (data_size < len) {
+ dev_err(dev, "Invalid CPD moduledata size\n");
+ return -EINVAL;
+ }
+
+ /* Check for CPD file marker */
+ if (cpd_hdr->hdr_mark != CPD_HDR_MARK) {
+ dev_err(dev, "Invalid CPD header marker\n");
+ return -EINVAL;
+ }
+
+ /* Sanity check for CPD entry header */
+ if (cpd_hdr->ent_cnt != CPD_ENTRY_NUM) {
+ dev_err(dev, "Invalid CPD entry number %d\n",
+ cpd_hdr->ent_cnt);
+ return -EINVAL;
+ }
+ if ((data_size - len) / sizeof(*ent) < cpd_hdr->ent_cnt) {
+ dev_err(dev, "Invalid CPD entry headers\n");
+ return -EINVAL;
+ }
+
+ /* Ensure that all entries are within moduledata */
+ ent = (struct ipu7_cpd_ent *)(((u8 *)cpd_hdr) + len);
+ for (i = 0; i < cpd_hdr->ent_cnt; i++) {
+ if (data_size < ent->offset ||
+ data_size - ent->offset < ent->len) {
+ dev_err(dev, "Invalid CPD entry %d\n", i);
+ return -EINVAL;
+ }
+ ent++;
+ }
+
+ return 0;
+}
+
+static int ipu7_cpd_validate_metadata(struct ipu7_device *isp,
+ const void *cpd, int idx)
+{
+ const struct ipu7_cpd_ent *cpd_ent =
+ ipu7_cpd_get_entry(cpd, CPD_METADATA_START_IDX + idx * 2);
+ const struct ipu7_cpd_metadata *metadata =
+ ipu7_cpd_get_metadata(cpd, idx);
+ struct device *dev = &isp->pdev->dev;
+
+ /* Sanity check for metadata size */
+ if (cpd_ent->len != sizeof(struct ipu7_cpd_metadata)) {
+ dev_err(dev, "Invalid metadata size\n");
+ return -EINVAL;
+ }
+
+ /* Validate type and length of metadata sections */
+ if (metadata->attr.hdr.type != CPD_METADATA_ATTR) {
+ dev_err(dev, "Invalid metadata attr type (%d)\n",
+ metadata->attr.hdr.type);
+ return -EINVAL;
+ }
+ if (metadata->attr.hdr.len != sizeof(struct ipu7_cpd_metadata_attr)) {
+ dev_err(dev, "Invalid metadata attr size (%d)\n",
+ metadata->attr.hdr.len);
+ return -EINVAL;
+ }
+ if (metadata->ipl.hdr.type != CPD_METADATA_IPL) {
+ dev_err(dev, "Invalid metadata ipl type (%d)\n",
+ metadata->ipl.hdr.type);
+ return -EINVAL;
+ }
+ if (metadata->ipl.hdr.len != sizeof(struct ipu7_cpd_metadata_ipl)) {
+ dev_err(dev, "Invalid metadata ipl size (%d)\n",
+ metadata->ipl.hdr.len);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int ipu7_cpd_validate_cpd_file(struct ipu7_device *isp, const void *cpd_file,
+ unsigned long cpd_file_size)
+{
+ struct device *dev = &isp->pdev->dev;
+ struct ipu7_cpd_ent *ent;
+ unsigned int i;
+ int ret;
+ char *buf;
+
+ ret = ipu7_cpd_validate_cpd(isp, cpd_file, cpd_file_size);
+ if (ret) {
+ dev_err(dev, "Invalid CPD in file\n");
+ return -EINVAL;
+ }
+
+ /* Sanity check for manifest size */
+ ent = ipu7_cpd_get_manifest(cpd_file);
+ if (ent->len > MAX_MANIFEST_SIZE) {
+ dev_err(dev, "Invalid manifest size\n");
+ return -EINVAL;
+ }
+
+ /* Validate metadata */
+ for (i = 0; i < CPD_BINARY_NUM; i++) {
+ ret = ipu7_cpd_validate_metadata(isp, cpd_file, i);
+ if (ret) {
+ dev_err(dev, "Invalid metadata%d\n", i);
+ return ret;
+ }
+ }
+
+ /* Get fw binary version. */
+ buf = kmalloc(ONLINE_METADATA_SIZE, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+ for (i = 0; i < CPD_BINARY_NUM; i++) {
+ char *lines[ONLINE_METADATA_LINES];
+ char *info = buf;
+ unsigned int l;
+
+ ent = ipu7_cpd_get_entry(cpd_file,
+ CPD_BINARY_START_IDX + i * 2U);
+ memcpy(info, (u8 *)cpd_file + ent->offset + ent->len -
+ ONLINE_METADATA_SIZE, ONLINE_METADATA_SIZE);
+ for (l = 0; l < ONLINE_METADATA_LINES; l++) {
+ lines[l] = strsep((char **)&info, "\n");
+ if (!lines[l])
+ break;
+ }
+ if (l < ONLINE_METADATA_LINES) {
+ dev_err(dev, "Failed to parse fw binary%d info.\n", i);
+ continue;
+ }
+ dev_info(dev, "FW binary%d info:\n", i);
+ dev_info(dev, "Name: %s\n", lines[1]);
+ dev_info(dev, "Version: %s\n", lines[2]);
+ dev_info(dev, "Timestamp: %s\n", lines[3]);
+ dev_info(dev, "Commit: %s\n", lines[4]);
+ }
+ kfree(buf);
+
+ return 0;
+}
+EXPORT_SYMBOL_NS_GPL(ipu7_cpd_validate_cpd_file, "INTEL_IPU7");
+
+int ipu7_cpd_copy_binary(const void *cpd, const char *name,
+ void *code_region, u32 *entry)
+{
+ unsigned int i;
+
+ for (i = 0; i < CPD_BINARY_NUM; i++) {
+ const struct ipu7_cpd_ent *binary =
+ ipu7_cpd_get_entry(cpd, CPD_BINARY_START_IDX + i * 2U);
+ const struct ipu7_cpd_metadata *metadata =
+ ipu7_cpd_get_metadata(cpd, i);
+
+ if (!strncmp(binary->name, name, sizeof(binary->name))) {
+ memcpy(code_region + metadata->ipl.param[0],
+ cpd + binary->offset, binary->len);
+ *entry = metadata->ipl.param[2];
+ return 0;
+ }
+ }
+
+ return -ENOENT;
+}
+EXPORT_SYMBOL_NS_GPL(ipu7_cpd_copy_binary, "INTEL_IPU7");
diff --git a/drivers/staging/media/ipu7/ipu7-cpd.h b/drivers/staging/media/ipu7/ipu7-cpd.h
new file mode 100644
index 000000000000..b4178848c6b9
--- /dev/null
+++ b/drivers/staging/media/ipu7/ipu7-cpd.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2015 - 2025 Intel Corporation
+ */
+
+#ifndef IPU7_CPD_H
+#define IPU7_CPD_H
+
+struct ipu7_device;
+
+int ipu7_cpd_validate_cpd_file(struct ipu7_device *isp,
+ const void *cpd_file,
+ unsigned long cpd_file_size);
+int ipu7_cpd_copy_binary(const void *cpd, const char *name,
+ void *code_region, u32 *entry);
+#endif /* IPU7_CPD_H */
diff --git a/drivers/staging/media/ipu7/ipu7-dma.c b/drivers/staging/media/ipu7/ipu7-dma.c
new file mode 100644
index 000000000000..a118b41b2f34
--- /dev/null
+++ b/drivers/staging/media/ipu7/ipu7-dma.c
@@ -0,0 +1,477 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2013 - 2025 Intel Corporation
+ */
+
+#include <linux/cacheflush.h>
+#include <linux/dma-mapping.h>
+#include <linux/iova.h>
+#include <linux/list.h>
+#include <linux/mm.h>
+#include <linux/vmalloc.h>
+#include <linux/scatterlist.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+
+#include "ipu7.h"
+#include "ipu7-bus.h"
+#include "ipu7-dma.h"
+#include "ipu7-mmu.h"
+
+struct vm_info {
+ struct list_head list;
+ struct page **pages;
+ dma_addr_t ipu7_iova;
+ void *vaddr;
+ unsigned long size;
+};
+
+static struct vm_info *get_vm_info(struct ipu7_mmu *mmu, dma_addr_t iova)
+{
+ struct vm_info *info, *save;
+
+ list_for_each_entry_safe(info, save, &mmu->vma_list, list) {
+ if (iova >= info->ipu7_iova &&
+ iova < (info->ipu7_iova + info->size))
+ return info;
+ }
+
+ return NULL;
+}
+
+static void __clear_buffer(struct page *page, size_t size, unsigned long attrs)
+{
+ void *ptr;
+
+ if (!page)
+ return;
+ /*
+ * Ensure that the allocated pages are zeroed, and that any data
+ * lurking in the kernel direct-mapped region is invalidated.
+ */
+ ptr = page_address(page);
+ memset(ptr, 0, size);
+ if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
+ clflush_cache_range(ptr, size);
+}
+
+static struct page **__alloc_buffer(size_t size, gfp_t gfp, unsigned long attrs)
+{
+ unsigned int count = PHYS_PFN(size);
+ unsigned int array_size = count * sizeof(struct page *);
+ struct page **pages;
+ int i = 0;
+
+ pages = kvzalloc(array_size, GFP_KERNEL);
+ if (!pages)
+ return NULL;
+
+ gfp |= __GFP_NOWARN;
+
+ while (count) {
+ int j, order = __fls(count);
+
+ pages[i] = alloc_pages(gfp, order);
+ while (!pages[i] && order)
+ pages[i] = alloc_pages(gfp, --order);
+ if (!pages[i])
+ goto error;
+
+ if (order) {
+ split_page(pages[i], order);
+ j = 1U << order;
+ while (j--)
+ pages[i + j] = pages[i] + j;
+ }
+
+ __clear_buffer(pages[i], PAGE_SIZE << order, attrs);
+ i += 1U << order;
+ count -= 1U << order;
+ }
+
+ return pages;
+error:
+ while (i--)
+ if (pages[i])
+ __free_pages(pages[i], 0);
+ kvfree(pages);
+ return NULL;
+}
+
+static void __free_buffer(struct page **pages, size_t size, unsigned long attrs)
+{
+ unsigned int count = PHYS_PFN(size);
+ unsigned int i;
+
+ for (i = 0; i < count && pages[i]; i++) {
+ __clear_buffer(pages[i], PAGE_SIZE, attrs);
+ __free_pages(pages[i], 0);
+ }
+
+ kvfree(pages);
+}
+
+void ipu7_dma_sync_single(struct ipu7_bus_device *sys, dma_addr_t dma_handle,
+ size_t size)
+{
+ void *vaddr;
+ u32 offset;
+ struct vm_info *info;
+ struct ipu7_mmu *mmu = sys->mmu;
+
+ info = get_vm_info(mmu, dma_handle);
+ if (WARN_ON(!info))
+ return;
+
+ offset = dma_handle - info->ipu7_iova;
+ if (WARN_ON(size > (info->size - offset)))
+ return;
+
+ vaddr = info->vaddr + offset;
+ clflush_cache_range(vaddr, size);
+}
+EXPORT_SYMBOL_NS_GPL(ipu7_dma_sync_single, "INTEL_IPU7");
+
+void ipu7_dma_sync_sg(struct ipu7_bus_device *sys, struct scatterlist *sglist,
+ unsigned int nents)
+{
+ struct scatterlist *sg;
+ int i;
+
+ for_each_sg(sglist, sg, nents, i)
+ clflush_cache_range(sg_virt(sg), sg->length);
+}
+EXPORT_SYMBOL_NS_GPL(ipu7_dma_sync_sg, "INTEL_IPU7");
+
+void ipu7_dma_sync_sgtable(struct ipu7_bus_device *sys, struct sg_table *sgt)
+{
+ ipu7_dma_sync_sg(sys, sgt->sgl, sgt->orig_nents);
+}
+EXPORT_SYMBOL_NS_GPL(ipu7_dma_sync_sgtable, "INTEL_IPU7");
+
+void *ipu7_dma_alloc(struct ipu7_bus_device *sys, size_t size,
+ dma_addr_t *dma_handle, gfp_t gfp,
+ unsigned long attrs)
+{
+ struct device *dev = &sys->auxdev.dev;
+ struct pci_dev *pdev = sys->isp->pdev;
+ dma_addr_t pci_dma_addr, ipu7_iova;
+ struct ipu7_mmu *mmu = sys->mmu;
+ struct vm_info *info;
+ unsigned long count;
+ struct page **pages;
+ struct iova *iova;
+ unsigned int i;
+ int ret;
+
+ info = kzalloc(sizeof(*info), GFP_KERNEL);
+ if (!info)
+ return NULL;
+
+ size = PAGE_ALIGN(size);
+ count = PHYS_PFN(size);
+
+ iova = alloc_iova(&mmu->dmap->iovad, count,
+ PHYS_PFN(mmu->dmap->mmu_info->aperture_end), 0);
+ if (!iova)
+ goto out_kfree;
+
+ pages = __alloc_buffer(size, gfp, attrs);
+ if (!pages)
+ goto out_free_iova;
+
+ dev_dbg(dev, "dma_alloc: size %zu iova low pfn %lu, high pfn %lu\n",
+ size, iova->pfn_lo, iova->pfn_hi);
+ for (i = 0; iova->pfn_lo + i <= iova->pfn_hi; i++) {
+ pci_dma_addr = dma_map_page_attrs(&pdev->dev, pages[i], 0,
+ PAGE_SIZE, DMA_BIDIRECTIONAL,
+ attrs);
+ dev_dbg(dev, "dma_alloc: mapped pci_dma_addr %pad\n",
+ &pci_dma_addr);
+ if (dma_mapping_error(&pdev->dev, pci_dma_addr)) {
+ dev_err(dev, "pci_dma_mapping for page[%d] failed", i);
+ goto out_unmap;
+ }
+
+ ret = ipu7_mmu_map(mmu->dmap->mmu_info,
+ PFN_PHYS(iova->pfn_lo + i), pci_dma_addr,
+ PAGE_SIZE);
+ if (ret) {
+ dev_err(dev, "ipu7_mmu_map for pci_dma[%d] %pad failed",
+ i, &pci_dma_addr);
+ dma_unmap_page_attrs(&pdev->dev, pci_dma_addr,
+ PAGE_SIZE, DMA_BIDIRECTIONAL,
+ attrs);
+ goto out_unmap;
+ }
+ }
+
+ info->vaddr = vmap(pages, count, VM_USERMAP, PAGE_KERNEL);
+ if (!info->vaddr)
+ goto out_unmap;
+
+ *dma_handle = PFN_PHYS(iova->pfn_lo);
+
+ info->pages = pages;
+ info->ipu7_iova = *dma_handle;
+ info->size = size;
+ list_add(&info->list, &mmu->vma_list);
+
+ return info->vaddr;
+
+out_unmap:
+ while (i--) {
+ ipu7_iova = PFN_PHYS(iova->pfn_lo + i);
+ pci_dma_addr = ipu7_mmu_iova_to_phys(mmu->dmap->mmu_info,
+ ipu7_iova);
+ dma_unmap_page_attrs(&pdev->dev, pci_dma_addr, PAGE_SIZE,
+ DMA_BIDIRECTIONAL, attrs);
+
+ ipu7_mmu_unmap(mmu->dmap->mmu_info, ipu7_iova, PAGE_SIZE);
+ }
+
+ __free_buffer(pages, size, attrs);
+
+out_free_iova:
+ __free_iova(&mmu->dmap->iovad, iova);
+out_kfree:
+ kfree(info);
+
+ return NULL;
+}
+EXPORT_SYMBOL_NS_GPL(ipu7_dma_alloc, "INTEL_IPU7");
+
+void ipu7_dma_free(struct ipu7_bus_device *sys, size_t size, void *vaddr,
+ dma_addr_t dma_handle, unsigned long attrs)
+{
+ struct ipu7_mmu *mmu = sys->mmu;
+ struct pci_dev *pdev = sys->isp->pdev;
+ struct iova *iova = find_iova(&mmu->dmap->iovad, PHYS_PFN(dma_handle));
+ dma_addr_t pci_dma_addr, ipu7_iova;
+ struct vm_info *info;
+ struct page **pages;
+ unsigned int i;
+
+ if (WARN_ON(!iova))
+ return;
+
+ info = get_vm_info(mmu, dma_handle);
+ if (WARN_ON(!info))
+ return;
+
+ if (WARN_ON(!info->vaddr))
+ return;
+
+ if (WARN_ON(!info->pages))
+ return;
+
+ list_del(&info->list);
+
+ size = PAGE_ALIGN(size);
+
+ pages = info->pages;
+
+ vunmap(vaddr);
+
+ for (i = 0; i < PHYS_PFN(size); i++) {
+ ipu7_iova = PFN_PHYS(iova->pfn_lo + i);
+ pci_dma_addr = ipu7_mmu_iova_to_phys(mmu->dmap->mmu_info,
+ ipu7_iova);
+ dma_unmap_page_attrs(&pdev->dev, pci_dma_addr, PAGE_SIZE,
+ DMA_BIDIRECTIONAL, attrs);
+ }
+
+ ipu7_mmu_unmap(mmu->dmap->mmu_info, PFN_PHYS(iova->pfn_lo),
+ PFN_PHYS(iova_size(iova)));
+
+ __free_buffer(pages, size, attrs);
+
+ mmu->tlb_invalidate(mmu);
+
+ __free_iova(&mmu->dmap->iovad, iova);
+
+ kfree(info);
+}
+EXPORT_SYMBOL_NS_GPL(ipu7_dma_free, "INTEL_IPU7");
+
+int ipu7_dma_mmap(struct ipu7_bus_device *sys, struct vm_area_struct *vma,
+ void *addr, dma_addr_t iova, size_t size,
+ unsigned long attrs)
+{
+ struct ipu7_mmu *mmu = sys->mmu;
+ size_t count = PFN_UP(size);
+ struct vm_info *info;
+ size_t i;
+ int ret;
+
+ info = get_vm_info(mmu, iova);
+ if (!info)
+ return -EFAULT;
+
+ if (!info->vaddr)
+ return -EFAULT;
+
+ if (vma->vm_start & ~PAGE_MASK)
+ return -EINVAL;
+
+ if (size > info->size)
+ return -EFAULT;
+
+ for (i = 0; i < count; i++) {
+ ret = vm_insert_page(vma, vma->vm_start + PFN_PHYS(i),
+ info->pages[i]);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+void ipu7_dma_unmap_sg(struct ipu7_bus_device *sys, struct scatterlist *sglist,
+ int nents, enum dma_data_direction dir,
+ unsigned long attrs)
+{
+ struct device *dev = &sys->auxdev.dev;
+ struct ipu7_mmu *mmu = sys->mmu;
+ struct iova *iova = find_iova(&mmu->dmap->iovad,
+ PHYS_PFN(sg_dma_address(sglist)));
+ struct scatterlist *sg;
+ dma_addr_t pci_dma_addr;
+ unsigned int i;
+
+ if (!nents)
+ return;
+
+ if (WARN_ON(!iova))
+ return;
+
+ /*
+ * Before IPU7 mmu unmap, return the pci dma address back to sg
+ * assume the nents is less than orig_nents as the least granule
+ * is 1 SZ_4K page
+ */
+ dev_dbg(dev, "trying to unmap concatenated %u ents\n", nents);
+ for_each_sg(sglist, sg, nents, i) {
+ dev_dbg(dev, "unmap sg[%d] %pad size %u\n", i,
+ &sg_dma_address(sg), sg_dma_len(sg));
+ pci_dma_addr = ipu7_mmu_iova_to_phys(mmu->dmap->mmu_info,
+ sg_dma_address(sg));
+ dev_dbg(dev, "return pci_dma_addr %pad back to sg[%d]\n",
+ &pci_dma_addr, i);
+ sg_dma_address(sg) = pci_dma_addr;
+ }
+
+ dev_dbg(dev, "ipu7_mmu_unmap low pfn %lu high pfn %lu\n",
+ iova->pfn_lo, iova->pfn_hi);
+ ipu7_mmu_unmap(mmu->dmap->mmu_info, PFN_PHYS(iova->pfn_lo),
+ PFN_PHYS(iova_size(iova)));
+
+ mmu->tlb_invalidate(mmu);
+ __free_iova(&mmu->dmap->iovad, iova);
+}
+EXPORT_SYMBOL_NS_GPL(ipu7_dma_unmap_sg, "INTEL_IPU7");
+
+int ipu7_dma_map_sg(struct ipu7_bus_device *sys, struct scatterlist *sglist,
+ int nents, enum dma_data_direction dir,
+ unsigned long attrs)
+{
+ struct device *dev = &sys->auxdev.dev;
+ struct ipu7_mmu *mmu = sys->mmu;
+ struct scatterlist *sg;
+ struct iova *iova;
+ size_t npages = 0;
+ unsigned long iova_addr;
+ int i;
+
+ for_each_sg(sglist, sg, nents, i) {
+ if (sg->offset) {
+ dev_err(dev, "Unsupported non-zero sg[%d].offset %x\n",
+ i, sg->offset);
+ return -EFAULT;
+ }
+ }
+
+ for_each_sg(sglist, sg, nents, i)
+ npages += PFN_UP(sg_dma_len(sg));
+
+ dev_dbg(dev, "dmamap trying to map %d ents %zu pages\n",
+ nents, npages);
+
+ if (attrs & DMA_ATTR_RESERVE_REGION) {
+ /*
+ * Reserve iova with size aligned to IPU_FW_CODE_REGION_SIZE.
+ * Only apply for non-secure mode.
+ */
+ unsigned long lo, hi;
+
+ lo = iova_pfn(&mmu->dmap->iovad, IPU_FW_CODE_REGION_START);
+ hi = iova_pfn(&mmu->dmap->iovad, IPU_FW_CODE_REGION_END) - 1U;
+ iova = reserve_iova(&mmu->dmap->iovad, lo, hi);
+ if (!iova) {
+ dev_err(dev, "Reserve iova[%lx:%lx] failed.\n", lo, hi);
+ return -ENOMEM;
+ }
+ dev_dbg(dev, "iova[%lx:%lx] reserved for FW code.\n", lo, hi);
+ } else {
+ iova = alloc_iova(&mmu->dmap->iovad, npages,
+ PHYS_PFN(mmu->dmap->mmu_info->aperture_end),
+ 0);
+ if (!iova)
+ return 0;
+ }
+
+ dev_dbg(dev, "dmamap: iova low pfn %lu, high pfn %lu\n", iova->pfn_lo,
+ iova->pfn_hi);
+
+ iova_addr = iova->pfn_lo;
+ for_each_sg(sglist, sg, nents, i) {
+ phys_addr_t iova_pa;
+ int ret;
+
+ iova_pa = PFN_PHYS(iova_addr);
+ dev_dbg(dev, "mapping entry %d: iova %pap phy %pap size %d\n",
+ i, &iova_pa, &sg_dma_address(sg), sg_dma_len(sg));
+
+ ret = ipu7_mmu_map(mmu->dmap->mmu_info, PFN_PHYS(iova_addr),
+ sg_dma_address(sg),
+ PAGE_ALIGN(sg_dma_len(sg)));
+ if (ret)
+ goto out_fail;
+
+ sg_dma_address(sg) = PFN_PHYS(iova_addr);
+
+ iova_addr += PFN_UP(sg_dma_len(sg));
+ }
+
+ dev_dbg(dev, "dmamap %d ents %zu pages mapped\n", nents, npages);
+
+ return nents;
+
+out_fail:
+ ipu7_dma_unmap_sg(sys, sglist, i, dir, attrs);
+
+ return 0;
+}
+EXPORT_SYMBOL_NS_GPL(ipu7_dma_map_sg, "INTEL_IPU7");
+
+int ipu7_dma_map_sgtable(struct ipu7_bus_device *sys, struct sg_table *sgt,
+ enum dma_data_direction dir, unsigned long attrs)
+{
+ int nents;
+
+ nents = ipu7_dma_map_sg(sys, sgt->sgl, sgt->nents, dir, attrs);
+ if (nents < 0)
+ return nents;
+
+ sgt->nents = nents;
+
+ return 0;
+}
+EXPORT_SYMBOL_NS_GPL(ipu7_dma_map_sgtable, "INTEL_IPU7");
+
+void ipu7_dma_unmap_sgtable(struct ipu7_bus_device *sys, struct sg_table *sgt,
+ enum dma_data_direction dir, unsigned long attrs)
+{
+ ipu7_dma_unmap_sg(sys, sgt->sgl, sgt->nents, dir, attrs);
+}
+EXPORT_SYMBOL_NS_GPL(ipu7_dma_unmap_sgtable, "INTEL_IPU7");
diff --git a/drivers/staging/media/ipu7/ipu7-dma.h b/drivers/staging/media/ipu7/ipu7-dma.h
new file mode 100644
index 000000000000..fe789af5e664
--- /dev/null
+++ b/drivers/staging/media/ipu7/ipu7-dma.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (C) 2013--2025 Intel Corporation */
+
+#ifndef IPU7_DMA_H
+#define IPU7_DMA_H
+
+#include <linux/dma-map-ops.h>
+#include <linux/dma-mapping.h>
+#include <linux/iova.h>
+#include <linux/scatterlist.h>
+#include <linux/types.h>
+
+#include "ipu7-bus.h"
+
+#define DMA_ATTR_RESERVE_REGION BIT(31)
+struct ipu7_mmu_info;
+
+struct ipu7_dma_mapping {
+ struct ipu7_mmu_info *mmu_info;
+ struct iova_domain iovad;
+};
+
+void ipu7_dma_sync_single(struct ipu7_bus_device *sys, dma_addr_t dma_handle,
+ size_t size);
+void ipu7_dma_sync_sg(struct ipu7_bus_device *sys, struct scatterlist *sglist,
+ unsigned int nents);
+void ipu7_dma_sync_sgtable(struct ipu7_bus_device *sys, struct sg_table *sgt);
+void *ipu7_dma_alloc(struct ipu7_bus_device *sys, size_t size,
+ dma_addr_t *dma_handle, gfp_t gfp,
+ unsigned long attrs);
+void ipu7_dma_free(struct ipu7_bus_device *sys, size_t size, void *vaddr,
+ dma_addr_t dma_handle, unsigned long attrs);
+int ipu7_dma_mmap(struct ipu7_bus_device *sys, struct vm_area_struct *vma,
+ void *addr, dma_addr_t iova, size_t size,
+ unsigned long attrs);
+int ipu7_dma_map_sg(struct ipu7_bus_device *sys, struct scatterlist *sglist,
+ int nents, enum dma_data_direction dir,
+ unsigned long attrs);
+void ipu7_dma_unmap_sg(struct ipu7_bus_device *sys, struct scatterlist *sglist,
+ int nents, enum dma_data_direction dir,
+ unsigned long attrs);
+int ipu7_dma_map_sgtable(struct ipu7_bus_device *sys, struct sg_table *sgt,
+ enum dma_data_direction dir, unsigned long attrs);
+void ipu7_dma_unmap_sgtable(struct ipu7_bus_device *sys, struct sg_table *sgt,
+ enum dma_data_direction dir, unsigned long attrs);
+#endif /* IPU7_DMA_H */
diff --git a/drivers/staging/media/ipu7/ipu7-fw-isys.c b/drivers/staging/media/ipu7/ipu7-fw-isys.c
new file mode 100644
index 000000000000..e4b9c364572b
--- /dev/null
+++ b/drivers/staging/media/ipu7/ipu7-fw-isys.c
@@ -0,0 +1,301 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2013 - 2025 Intel Corporation
+ */
+
+#include <linux/cacheflush.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/types.h>
+
+#include "abi/ipu7_fw_insys_config_abi.h"
+#include "abi/ipu7_fw_isys_abi.h"
+
+#include "ipu7.h"
+#include "ipu7-boot.h"
+#include "ipu7-bus.h"
+#include "ipu7-dma.h"
+#include "ipu7-fw-isys.h"
+#include "ipu7-isys.h"
+#include "ipu7-platform-regs.h"
+#include "ipu7-syscom.h"
+
+static const char * const send_msg_types[N_IPU_INSYS_SEND_TYPE] = {
+ "STREAM_OPEN",
+ "STREAM_START_AND_CAPTURE",
+ "STREAM_CAPTURE",
+ "STREAM_ABORT",
+ "STREAM_FLUSH",
+ "STREAM_CLOSE"
+};
+
+int ipu7_fw_isys_complex_cmd(struct ipu7_isys *isys,
+ const unsigned int stream_handle,
+ void *cpu_mapped_buf,
+ dma_addr_t dma_mapped_buf,
+ size_t size, u16 send_type)
+{
+ struct ipu7_syscom_context *ctx = isys->adev->syscom;
+ struct device *dev = &isys->adev->auxdev.dev;
+ struct ipu7_insys_send_queue_token *token;
+
+ if (send_type >= N_IPU_INSYS_SEND_TYPE)
+ return -EINVAL;
+
+ dev_dbg(dev, "send_token: %s\n", send_msg_types[send_type]);
+
+ /*
+ * Time to flush cache in case we have some payload. Not all messages
+ * have that
+ */
+ if (cpu_mapped_buf)
+ clflush_cache_range(cpu_mapped_buf, size);
+
+ token = ipu7_syscom_get_token(ctx, stream_handle +
+ IPU_INSYS_INPUT_MSG_QUEUE);
+ if (!token)
+ return -EBUSY;
+
+ token->addr = dma_mapped_buf;
+ token->buf_handle = (unsigned long)cpu_mapped_buf;
+ token->send_type = send_type;
+ token->stream_id = stream_handle;
+ token->flag = IPU_INSYS_SEND_QUEUE_TOKEN_FLAG_NONE;
+
+ ipu7_syscom_put_token(ctx, stream_handle + IPU_INSYS_INPUT_MSG_QUEUE);
+ /* now wakeup FW */
+ ipu_buttress_wakeup_is_uc(isys->adev->isp);
+
+ return 0;
+}
+
+int ipu7_fw_isys_simple_cmd(struct ipu7_isys *isys,
+ const unsigned int stream_handle, u16 send_type)
+{
+ return ipu7_fw_isys_complex_cmd(isys, stream_handle, NULL, 0, 0,
+ send_type);
+}
+
+int ipu7_fw_isys_init(struct ipu7_isys *isys)
+{
+ struct syscom_queue_config *queue_configs;
+ struct ipu7_bus_device *adev = isys->adev;
+ struct device *dev = &adev->auxdev.dev;
+ struct ipu7_insys_config *isys_config;
+ struct ipu7_syscom_context *syscom;
+ dma_addr_t isys_config_dma_addr;
+ unsigned int i, num_queues;
+ u32 freq;
+ u8 major;
+ int ret;
+
+ /* Allocate and init syscom context. */
+ syscom = devm_kzalloc(dev, sizeof(struct ipu7_syscom_context),
+ GFP_KERNEL);
+ if (!syscom)
+ return -ENOMEM;
+
+ adev->syscom = syscom;
+ syscom->num_input_queues = IPU_INSYS_MAX_INPUT_QUEUES;
+ syscom->num_output_queues = IPU_INSYS_MAX_OUTPUT_QUEUES;
+ num_queues = syscom->num_input_queues + syscom->num_output_queues;
+ queue_configs = devm_kzalloc(dev, FW_QUEUE_CONFIG_SIZE(num_queues),
+ GFP_KERNEL);
+ if (!queue_configs) {
+ ipu7_fw_isys_release(isys);
+ return -ENOMEM;
+ }
+ syscom->queue_configs = queue_configs;
+ queue_configs[IPU_INSYS_OUTPUT_MSG_QUEUE].max_capacity =
+ IPU_ISYS_SIZE_RECV_QUEUE;
+ queue_configs[IPU_INSYS_OUTPUT_MSG_QUEUE].token_size_in_bytes =
+ sizeof(struct ipu7_insys_resp);
+ queue_configs[IPU_INSYS_OUTPUT_LOG_QUEUE].max_capacity =
+ IPU_ISYS_SIZE_LOG_QUEUE;
+ queue_configs[IPU_INSYS_OUTPUT_LOG_QUEUE].token_size_in_bytes =
+ sizeof(struct ipu7_insys_resp);
+ queue_configs[IPU_INSYS_OUTPUT_RESERVED_QUEUE].max_capacity = 0;
+ queue_configs[IPU_INSYS_OUTPUT_RESERVED_QUEUE].token_size_in_bytes = 0;
+
+ queue_configs[IPU_INSYS_INPUT_DEV_QUEUE].max_capacity =
+ IPU_ISYS_MAX_STREAMS;
+ queue_configs[IPU_INSYS_INPUT_DEV_QUEUE].token_size_in_bytes =
+ sizeof(struct ipu7_insys_send_queue_token);
+
+ for (i = IPU_INSYS_INPUT_MSG_QUEUE; i < num_queues; i++) {
+ queue_configs[i].max_capacity = IPU_ISYS_SIZE_SEND_QUEUE;
+ queue_configs[i].token_size_in_bytes =
+ sizeof(struct ipu7_insys_send_queue_token);
+ }
+
+ /* Allocate ISYS subsys config. */
+ isys_config = ipu7_dma_alloc(adev, sizeof(struct ipu7_insys_config),
+ &isys_config_dma_addr, GFP_KERNEL, 0);
+ if (!isys_config) {
+ dev_err(dev, "Failed to allocate isys subsys config.\n");
+ ipu7_fw_isys_release(isys);
+ return -ENOMEM;
+ }
+ isys->subsys_config = isys_config;
+ isys->subsys_config_dma_addr = isys_config_dma_addr;
+ memset(isys_config, 0, sizeof(struct ipu7_insys_config));
+ isys_config->logger_config.use_source_severity = 0;
+ isys_config->logger_config.use_channels_enable_bitmask = 1;
+ isys_config->logger_config.channels_enable_bitmask =
+ LOGGER_CONFIG_CHANNEL_ENABLE_SYSCOM_BITMASK;
+ isys_config->logger_config.hw_printf_buffer_base_addr = 0U;
+ isys_config->logger_config.hw_printf_buffer_size_bytes = 0U;
+ isys_config->wdt_config.wdt_timer1_us = 0;
+ isys_config->wdt_config.wdt_timer2_us = 0;
+ ret = ipu_buttress_get_isys_freq(adev->isp, &freq);
+ if (ret) {
+ dev_err(dev, "Failed to get ISYS frequency.\n");
+ ipu7_fw_isys_release(isys);
+ return ret;
+ }
+
+ ipu7_dma_sync_single(adev, isys_config_dma_addr,
+ sizeof(struct ipu7_insys_config));
+
+ major = is_ipu8(adev->isp->hw_ver) ? 2U : 1U;
+ ret = ipu7_boot_init_boot_config(adev, queue_configs, num_queues,
+ freq, isys_config_dma_addr, major);
+ if (ret)
+ ipu7_fw_isys_release(isys);
+
+ return ret;
+}
+
+void ipu7_fw_isys_release(struct ipu7_isys *isys)
+{
+ struct ipu7_bus_device *adev = isys->adev;
+
+ ipu7_boot_release_boot_config(adev);
+ if (isys->subsys_config) {
+ ipu7_dma_free(adev,
+ sizeof(struct ipu7_insys_config),
+ isys->subsys_config,
+ isys->subsys_config_dma_addr, 0);
+ isys->subsys_config = NULL;
+ isys->subsys_config_dma_addr = 0;
+ }
+}
+
+int ipu7_fw_isys_open(struct ipu7_isys *isys)
+{
+ return ipu7_boot_start_fw(isys->adev);
+}
+
+int ipu7_fw_isys_close(struct ipu7_isys *isys)
+{
+ return ipu7_boot_stop_fw(isys->adev);
+}
+
+struct ipu7_insys_resp *ipu7_fw_isys_get_resp(struct ipu7_isys *isys)
+{
+ return (struct ipu7_insys_resp *)
+ ipu7_syscom_get_token(isys->adev->syscom,
+ IPU_INSYS_OUTPUT_MSG_QUEUE);
+}
+
+void ipu7_fw_isys_put_resp(struct ipu7_isys *isys)
+{
+ ipu7_syscom_put_token(isys->adev->syscom, IPU_INSYS_OUTPUT_MSG_QUEUE);
+}
+
+void ipu7_fw_isys_dump_stream_cfg(struct device *dev,
+ struct ipu7_insys_stream_cfg *cfg)
+{
+ unsigned int i;
+
+ dev_dbg(dev, "---------------------------\n");
+ dev_dbg(dev, "IPU_FW_ISYS_STREAM_CFG_DATA\n");
+
+ dev_dbg(dev, ".port id %d\n", cfg->port_id);
+ dev_dbg(dev, ".vc %d\n", cfg->vc);
+ dev_dbg(dev, ".nof_input_pins = %d\n", cfg->nof_input_pins);
+ dev_dbg(dev, ".nof_output_pins = %d\n", cfg->nof_output_pins);
+ dev_dbg(dev, ".stream_msg_map = 0x%x\n", cfg->stream_msg_map);
+
+ for (i = 0; i < cfg->nof_input_pins; i++) {
+ dev_dbg(dev, ".input_pin[%d]:\n", i);
+ dev_dbg(dev, "\t.dt = 0x%0x\n",
+ cfg->input_pins[i].dt);
+ dev_dbg(dev, "\t.disable_mipi_unpacking = %d\n",
+ cfg->input_pins[i].disable_mipi_unpacking);
+ dev_dbg(dev, "\t.dt_rename_mode = %d\n",
+ cfg->input_pins[i].dt_rename_mode);
+ dev_dbg(dev, "\t.mapped_dt = 0x%0x\n",
+ cfg->input_pins[i].mapped_dt);
+ dev_dbg(dev, "\t.input_res = %d x %d\n",
+ cfg->input_pins[i].input_res.width,
+ cfg->input_pins[i].input_res.height);
+ dev_dbg(dev, "\t.sync_msg_map = 0x%x\n",
+ cfg->input_pins[i].sync_msg_map);
+ }
+
+ for (i = 0; i < cfg->nof_output_pins; i++) {
+ dev_dbg(dev, ".output_pin[%d]:\n", i);
+ dev_dbg(dev, "\t.input_pin_id = %d\n",
+ cfg->output_pins[i].input_pin_id);
+ dev_dbg(dev, "\t.stride = %d\n", cfg->output_pins[i].stride);
+ dev_dbg(dev, "\t.send_irq = %d\n",
+ cfg->output_pins[i].send_irq);
+ dev_dbg(dev, "\t.ft = %d\n", cfg->output_pins[i].ft);
+
+ dev_dbg(dev, "\t.link.buffer_lines = %d\n",
+ cfg->output_pins[i].link.buffer_lines);
+ dev_dbg(dev, "\t.link.foreign_key = %d\n",
+ cfg->output_pins[i].link.foreign_key);
+ dev_dbg(dev, "\t.link.granularity_pointer_update = %d\n",
+ cfg->output_pins[i].link.granularity_pointer_update);
+ dev_dbg(dev, "\t.link.msg_link_streaming_mode = %d\n",
+ cfg->output_pins[i].link.msg_link_streaming_mode);
+ dev_dbg(dev, "\t.link.pbk_id = %d\n",
+ cfg->output_pins[i].link.pbk_id);
+ dev_dbg(dev, "\t.link.pbk_slot_id = %d\n",
+ cfg->output_pins[i].link.pbk_slot_id);
+ dev_dbg(dev, "\t.link.dest = %d\n",
+ cfg->output_pins[i].link.dest);
+ dev_dbg(dev, "\t.link.use_sw_managed = %d\n",
+ cfg->output_pins[i].link.use_sw_managed);
+ dev_dbg(dev, "\t.link.is_snoop = %d\n",
+ cfg->output_pins[i].link.is_snoop);
+
+ dev_dbg(dev, "\t.crop.line_top = %d\n",
+ cfg->output_pins[i].crop.line_top);
+ dev_dbg(dev, "\t.crop.line_bottom = %d\n",
+ cfg->output_pins[i].crop.line_bottom);
+
+ dev_dbg(dev, "\t.dpcm_enable = %d\n",
+ cfg->output_pins[i].dpcm.enable);
+ dev_dbg(dev, "\t.dpcm.type = %d\n",
+ cfg->output_pins[i].dpcm.type);
+ dev_dbg(dev, "\t.dpcm.predictor = %d\n",
+ cfg->output_pins[i].dpcm.predictor);
+ }
+ dev_dbg(dev, "---------------------------\n");
+}
+
+void ipu7_fw_isys_dump_frame_buff_set(struct device *dev,
+ struct ipu7_insys_buffset *buf,
+ unsigned int outputs)
+{
+ unsigned int i;
+
+ dev_dbg(dev, "--------------------------\n");
+ dev_dbg(dev, "IPU_ISYS_BUFF_SET\n");
+ dev_dbg(dev, ".capture_msg_map = %d\n", buf->capture_msg_map);
+ dev_dbg(dev, ".frame_id = %d\n", buf->frame_id);
+ dev_dbg(dev, ".skip_frame = %d\n", buf->skip_frame);
+
+ for (i = 0; i < outputs; i++) {
+ dev_dbg(dev, ".output_pin[%d]:\n", i);
+ dev_dbg(dev, "\t.user_token = %llx\n",
+ buf->output_pins[i].user_token);
+ dev_dbg(dev, "\t.addr = 0x%x\n", buf->output_pins[i].addr);
+ }
+ dev_dbg(dev, "---------------------------\n");
+}
diff --git a/drivers/staging/media/ipu7/ipu7-fw-isys.h b/drivers/staging/media/ipu7/ipu7-fw-isys.h
new file mode 100644
index 000000000000..b556feda6b08
--- /dev/null
+++ b/drivers/staging/media/ipu7/ipu7-fw-isys.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2013 - 2025 Intel Corporation
+ */
+
+#ifndef IPU7_FW_ISYS_H
+#define IPU7_FW_ISYS_H
+
+#include <linux/types.h>
+
+#include "abi/ipu7_fw_isys_abi.h"
+
+struct device;
+struct ipu7_insys_buffset;
+struct ipu7_insys_stream_cfg;
+struct ipu7_isys;
+
+/* From here on type defines not coming from the ISYSAPI interface */
+
+int ipu7_fw_isys_init(struct ipu7_isys *isys);
+void ipu7_fw_isys_release(struct ipu7_isys *isys);
+int ipu7_fw_isys_open(struct ipu7_isys *isys);
+int ipu7_fw_isys_close(struct ipu7_isys *isys);
+
+void ipu7_fw_isys_dump_stream_cfg(struct device *dev,
+ struct ipu7_insys_stream_cfg *cfg);
+void ipu7_fw_isys_dump_frame_buff_set(struct device *dev,
+ struct ipu7_insys_buffset *buf,
+ unsigned int outputs);
+int ipu7_fw_isys_simple_cmd(struct ipu7_isys *isys,
+ const unsigned int stream_handle, u16 send_type);
+int ipu7_fw_isys_complex_cmd(struct ipu7_isys *isys,
+ const unsigned int stream_handle,
+ void *cpu_mapped_buf,
+ dma_addr_t dma_mapped_buf,
+ size_t size, u16 send_type);
+struct ipu7_insys_resp *ipu7_fw_isys_get_resp(struct ipu7_isys *isys);
+void ipu7_fw_isys_put_resp(struct ipu7_isys *isys);
+#endif
diff --git a/drivers/staging/media/ipu7/ipu7-isys-csi-phy.c b/drivers/staging/media/ipu7/ipu7-isys-csi-phy.c
new file mode 100644
index 000000000000..2d5717883518
--- /dev/null
+++ b/drivers/staging/media/ipu7/ipu7-isys-csi-phy.c
@@ -0,0 +1,1034 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2013 - 2025 Intel Corporation
+ */
+
+#include <linux/bitmap.h>
+#include <linux/bug.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+
+#include <media/mipi-csi2.h>
+#include <media/v4l2-device.h>
+
+#include "ipu7.h"
+#include "ipu7-bus.h"
+#include "ipu7-buttress.h"
+#include "ipu7-isys.h"
+#include "ipu7-isys-csi2.h"
+#include "ipu7-isys-csi2-regs.h"
+#include "ipu7-platform-regs.h"
+#include "ipu7-isys-csi-phy.h"
+
+#define PORT_A 0U
+#define PORT_B 1U
+#define PORT_C 2U
+#define PORT_D 3U
+
+#define N_DATA_IDS 8U
+static DECLARE_BITMAP(data_ids, N_DATA_IDS);
+
+struct ddlcal_counter_ref_s {
+ u16 min_mbps;
+ u16 max_mbps;
+
+ u16 ddlcal_counter_ref;
+};
+
+struct ddlcal_params {
+ u16 min_mbps;
+ u16 max_mbps;
+ u16 oa_lanex_hsrx_cdphy_sel_fast;
+ u16 ddlcal_max_phase;
+ u16 phase_bound;
+ u16 ddlcal_dll_fbk;
+ u16 ddlcal_ddl_coarse_bank;
+ u16 fjump_deskew;
+ u16 min_eye_opening_deskew;
+};
+
+struct i_thssettle_params {
+ u16 min_mbps;
+ u16 max_mbps;
+ u16 i_thssettle;
+};
+
+ /* lane2 for 4l3t, lane1 for 2l2t */
+struct oa_lane_clk_div_params {
+ u16 min_mbps;
+ u16 max_mbps;
+ u16 oa_lane_hsrx_hs_clk_div;
+};
+
+struct cdr_fbk_cap_prog_params {
+ u16 min_mbps;
+ u16 max_mbps;
+ u16 val;
+};
+
+static const struct ddlcal_counter_ref_s table0[] = {
+ { 1500, 1999, 118 },
+ { 2000, 2499, 157 },
+ { 2500, 3499, 196 },
+ { 3500, 4499, 274 },
+ { 4500, 4500, 352 },
+ { }
+};
+
+static const struct ddlcal_params table1[] = {
+ { 1500, 1587, 0, 143, 167, 17, 3, 4, 29 },
+ { 1588, 1687, 0, 135, 167, 15, 3, 4, 27 },
+ { 1688, 1799, 0, 127, 135, 15, 2, 4, 26 },
+ { 1800, 1928, 0, 119, 135, 13, 2, 3, 24 },
+ { 1929, 2076, 0, 111, 135, 13, 2, 3, 23 },
+ { 2077, 2249, 0, 103, 135, 11, 2, 3, 21 },
+ { 2250, 2454, 0, 95, 103, 11, 1, 3, 19 },
+ { 2455, 2699, 0, 87, 103, 9, 1, 3, 18 },
+ { 2700, 2999, 0, 79, 103, 9, 1, 2, 16 },
+ { 3000, 3229, 0, 71, 71, 7, 1, 2, 15 },
+ { 3230, 3599, 1, 87, 103, 9, 1, 3, 18 },
+ { 3600, 3999, 1, 79, 103, 9, 1, 2, 16 },
+ { 4000, 4499, 1, 71, 103, 7, 1, 2, 15 },
+ { 4500, 4500, 1, 63, 71, 7, 0, 2, 13 },
+ { }
+};
+
+static const struct i_thssettle_params table2[] = {
+ { 80, 124, 24 },
+ { 125, 249, 20 },
+ { 250, 499, 16 },
+ { 500, 749, 14 },
+ { 750, 1499, 13 },
+ { 1500, 4500, 12 },
+ { }
+};
+
+static const struct oa_lane_clk_div_params table6[] = {
+ { 80, 159, 0x1 },
+ { 160, 319, 0x2 },
+ { 320, 639, 0x3 },
+ { 640, 1279, 0x4 },
+ { 1280, 2560, 0x5 },
+ { 2561, 4500, 0x6 },
+ { }
+};
+
+static const struct cdr_fbk_cap_prog_params table7[] = {
+ { 80, 919, 0 },
+ { 920, 1029, 1 },
+ { 1030, 1169, 2 },
+ { 1170, 1349, 3 },
+ { 1350, 1589, 4 },
+ { 1590, 1949, 5 },
+ { 1950, 2499, 6 },
+ { }
+};
+
+static void dwc_phy_write(struct ipu7_isys *isys, u32 id, u32 addr, u16 data)
+{
+ void __iomem *isys_base = isys->pdata->base;
+ void __iomem *base = isys_base + IS_IO_CDPHY_BASE(id);
+
+ dev_dbg(&isys->adev->auxdev.dev, "phy write: reg 0x%zx = data 0x%04x",
+ base + addr - isys_base, data);
+ writew(data, base + addr);
+}
+
+static u16 dwc_phy_read(struct ipu7_isys *isys, u32 id, u32 addr)
+{
+ void __iomem *isys_base = isys->pdata->base;
+ void __iomem *base = isys_base + IS_IO_CDPHY_BASE(id);
+ u16 data;
+
+ data = readw(base + addr);
+ dev_dbg(&isys->adev->auxdev.dev, "phy read: reg 0x%zx = data 0x%04x",
+ base + addr - isys_base, data);
+
+ return data;
+}
+
+static void dwc_csi_write(struct ipu7_isys *isys, u32 id, u32 addr, u32 data)
+{
+ void __iomem *isys_base = isys->pdata->base;
+ void __iomem *base = isys_base + IS_IO_CSI2_HOST_BASE(id);
+ struct device *dev = &isys->adev->auxdev.dev;
+
+ dev_dbg(dev, "csi write: reg 0x%zx = data 0x%08x",
+ base + addr - isys_base, data);
+ writel(data, base + addr);
+ dev_dbg(dev, "csi read: reg 0x%zx = data 0x%08x",
+ base + addr - isys_base, readl(base + addr));
+}
+
+static void gpreg_write(struct ipu7_isys *isys, u32 id, u32 addr, u32 data)
+{
+ void __iomem *isys_base = isys->pdata->base;
+ u32 gpreg = isys->pdata->ipdata->csi2.gpreg;
+ void __iomem *base = isys_base + gpreg + 0x1000 * id;
+ struct device *dev = &isys->adev->auxdev.dev;
+
+ dev_dbg(dev, "gpreg write: reg 0x%zx = data 0x%08x",
+ base + addr - isys_base, data);
+ writel(data, base + addr);
+ dev_dbg(dev, "gpreg read: reg 0x%zx = data 0x%08x",
+ base + addr - isys_base, readl(base + addr));
+}
+
+static u32 dwc_csi_read(struct ipu7_isys *isys, u32 id, u32 addr)
+{
+ void __iomem *isys_base = isys->pdata->base;
+ void __iomem *base = isys_base + IS_IO_CSI2_HOST_BASE(id);
+ u32 data;
+
+ data = readl(base + addr);
+ dev_dbg(&isys->adev->auxdev.dev, "csi read: reg 0x%zx = data 0x%x",
+ base + addr - isys_base, data);
+
+ return data;
+}
+
+static void dwc_phy_write_mask(struct ipu7_isys *isys, u32 id, u32 addr,
+ u16 val, u8 lo, u8 hi)
+{
+ u32 temp, mask;
+
+ WARN_ON(lo > hi);
+ WARN_ON(hi > 15);
+
+ mask = ((~0U - (1U << lo) + 1U)) & (~0U >> (31 - hi));
+ temp = dwc_phy_read(isys, id, addr);
+ temp &= ~mask;
+ temp |= (val << lo) & mask;
+ dwc_phy_write(isys, id, addr, temp);
+}
+
+static void dwc_csi_write_mask(struct ipu7_isys *isys, u32 id, u32 addr,
+ u32 val, u8 hi, u8 lo)
+{
+ u32 temp, mask;
+
+ WARN_ON(lo > hi);
+
+ mask = ((~0U - (1U << lo) + 1U)) & (~0U >> (31 - hi));
+ temp = dwc_csi_read(isys, id, addr);
+ temp &= ~mask;
+ temp |= (val << lo) & mask;
+ dwc_csi_write(isys, id, addr, temp);
+}
+
+static void ipu7_isys_csi_ctrl_cfg(struct ipu7_isys_csi2 *csi2)
+{
+ struct ipu7_isys *isys = csi2->isys;
+ struct device *dev = &isys->adev->auxdev.dev;
+ u32 id, lanes, phy_mode;
+ u32 val;
+
+ id = csi2->port;
+ lanes = csi2->nlanes;
+ phy_mode = csi2->phy_mode;
+ dev_dbg(dev, "csi-%d controller init with %u lanes, phy mode %u",
+ id, lanes, phy_mode);
+
+ val = dwc_csi_read(isys, id, VERSION);
+ dev_dbg(dev, "csi-%d controller version = 0x%x", id, val);
+
+ /* num of active data lanes */
+ dwc_csi_write(isys, id, N_LANES, lanes - 1);
+ dwc_csi_write(isys, id, CDPHY_MODE, phy_mode);
+ dwc_csi_write(isys, id, VC_EXTENSION, 0);
+
+ /* only mask PHY_FATAL and PKT_FATAL interrupts */
+ dwc_csi_write(isys, id, INT_MSK_PHY_FATAL, 0xff);
+ dwc_csi_write(isys, id, INT_MSK_PKT_FATAL, 0x3);
+ dwc_csi_write(isys, id, INT_MSK_PHY, 0x0);
+ dwc_csi_write(isys, id, INT_MSK_LINE, 0x0);
+ dwc_csi_write(isys, id, INT_MSK_BNDRY_FRAME_FATAL, 0x0);
+ dwc_csi_write(isys, id, INT_MSK_SEQ_FRAME_FATAL, 0x0);
+ dwc_csi_write(isys, id, INT_MSK_CRC_FRAME_FATAL, 0x0);
+ dwc_csi_write(isys, id, INT_MSK_PLD_CRC_FATAL, 0x0);
+ dwc_csi_write(isys, id, INT_MSK_DATA_ID, 0x0);
+ dwc_csi_write(isys, id, INT_MSK_ECC_CORRECTED, 0x0);
+}
+
+static void ipu7_isys_csi_phy_reset(struct ipu7_isys *isys, u32 id)
+{
+ dwc_csi_write(isys, id, PHY_SHUTDOWNZ, 0);
+ dwc_csi_write(isys, id, DPHY_RSTZ, 0);
+ dwc_csi_write(isys, id, CSI2_RESETN, 0);
+ gpreg_write(isys, id, PHY_RESET, 0);
+ gpreg_write(isys, id, PHY_SHUTDOWN, 0);
+}
+
+/* 8 Data ID monitors, each Data ID is composed by pair of VC and data type */
+static int __dids_config(struct ipu7_isys_csi2 *csi2, u32 id, u8 vc, u8 dt)
+{
+ struct ipu7_isys *isys = csi2->isys;
+ u32 reg, n;
+ u8 lo, hi;
+ int ret;
+
+ dev_dbg(&isys->adev->auxdev.dev, "config CSI-%u with vc:%u dt:0x%02x\n",
+ id, vc, dt);
+
+ dwc_csi_write(isys, id, VC_EXTENSION, 0x0);
+ n = find_first_zero_bit(data_ids, N_DATA_IDS);
+ if (n == N_DATA_IDS)
+ return -ENOSPC;
+
+ ret = test_and_set_bit(n, data_ids);
+ if (ret)
+ return -EBUSY;
+
+ reg = n < 4 ? DATA_IDS_VC_1 : DATA_IDS_VC_2;
+ lo = (n % 4) * 8;
+ hi = lo + 4;
+ dwc_csi_write_mask(isys, id, reg, vc & GENMASK(4, 0), hi, lo);
+
+ reg = n < 4 ? DATA_IDS_1 : DATA_IDS_2;
+ lo = (n % 4) * 8;
+ hi = lo + 5;
+ dwc_csi_write_mask(isys, id, reg, dt & GENMASK(5, 0), hi, lo);
+
+ return 0;
+}
+
+static int ipu7_isys_csi_ctrl_dids_config(struct ipu7_isys_csi2 *csi2, u32 id)
+{
+ struct v4l2_mbus_frame_desc_entry *desc_entry = NULL;
+ struct device *dev = &csi2->isys->adev->auxdev.dev;
+ struct v4l2_mbus_frame_desc desc;
+ struct v4l2_subdev *ext_sd;
+ struct media_pad *pad;
+ unsigned int i;
+ int ret;
+
+ pad = media_entity_remote_source_pad_unique(&csi2->asd.sd.entity);
+ if (IS_ERR(pad)) {
+ dev_warn(dev, "can't get remote source pad of %s (%pe)\n",
+ csi2->asd.sd.name, pad);
+ return PTR_ERR(pad);
+ }
+
+ ext_sd = media_entity_to_v4l2_subdev(pad->entity);
+ if (WARN(!ext_sd, "Failed to get subdev for entity %s\n",
+ pad->entity->name))
+ return -ENODEV;
+
+ ret = v4l2_subdev_call(ext_sd, pad, get_frame_desc, pad->index, &desc);
+ if (ret)
+ return ret;
+
+ if (desc.type != V4L2_MBUS_FRAME_DESC_TYPE_CSI2) {
+ dev_warn(dev, "Unsupported frame descriptor type\n");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < desc.num_entries; i++) {
+ desc_entry = &desc.entry[i];
+ if (desc_entry->bus.csi2.vc < IPU7_NR_OF_CSI2_VC) {
+ ret = __dids_config(csi2, id, desc_entry->bus.csi2.vc,
+ desc_entry->bus.csi2.dt);
+ if (ret)
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+#define CDPHY_TIMEOUT 5000000U
+static int ipu7_isys_phy_ready(struct ipu7_isys *isys, u32 id)
+{
+ void __iomem *isys_base = isys->pdata->base;
+ u32 gpreg_offset = isys->pdata->ipdata->csi2.gpreg;
+ void __iomem *gpreg = isys_base + gpreg_offset + 0x1000 * id;
+ struct device *dev = &isys->adev->auxdev.dev;
+ unsigned int i;
+ u32 phy_ready;
+ u32 reg, rext;
+ int ret;
+
+ dev_dbg(dev, "waiting phy ready...\n");
+ ret = readl_poll_timeout(gpreg + PHY_READY, phy_ready,
+ phy_ready & BIT(0) && phy_ready != ~0U,
+ 100, CDPHY_TIMEOUT);
+ dev_dbg(dev, "phy %u ready = 0x%08x\n", id, readl(gpreg + PHY_READY));
+ dev_dbg(dev, "csi %u PHY_RX = 0x%08x\n", id,
+ dwc_csi_read(isys, id, PHY_RX));
+ dev_dbg(dev, "csi %u PHY_STOPSTATE = 0x%08x\n", id,
+ dwc_csi_read(isys, id, PHY_STOPSTATE));
+ dev_dbg(dev, "csi %u PHY_CAL = 0x%08x\n", id,
+ dwc_csi_read(isys, id, PHY_CAL));
+ for (i = 0; i < 4U; i++) {
+ reg = CORE_DIG_DLANE_0_R_HS_RX_0 + (i * 0x400U);
+ dev_dbg(dev, "phy %u DLANE%u skewcal = 0x%04x\n",
+ id, i, dwc_phy_read(isys, id, reg));
+ }
+ dev_dbg(dev, "phy %u DDLCAL = 0x%04x\n", id,
+ dwc_phy_read(isys, id, PPI_CALIBCTRL_R_COMMON_CALIBCTRL_2_5));
+ dev_dbg(dev, "phy %u TERMCAL = 0x%04x\n", id,
+ dwc_phy_read(isys, id, PPI_R_TERMCAL_DEBUG_0));
+ dev_dbg(dev, "phy %u LPDCOCAL = 0x%04x\n", id,
+ dwc_phy_read(isys, id, PPI_R_LPDCOCAL_DEBUG_RB));
+ dev_dbg(dev, "phy %u HSDCOCAL = 0x%04x\n", id,
+ dwc_phy_read(isys, id, PPI_R_HSDCOCAL_DEBUG_RB));
+ dev_dbg(dev, "phy %u LPDCOCAL_VT = 0x%04x\n", id,
+ dwc_phy_read(isys, id, PPI_R_LPDCOCAL_DEBUG_VT));
+
+ if (!ret) {
+ if (id) {
+ dev_dbg(dev, "ignore phy %u rext\n", id);
+ return 0;
+ }
+
+ rext = dwc_phy_read(isys, id,
+ CORE_DIG_IOCTRL_R_AFE_CB_CTRL_2_15) & 0xfU;
+ dev_dbg(dev, "phy %u rext value = %u\n", id, rext);
+ isys->phy_rext_cal = (rext ? rext : 5);
+
+ return 0;
+ }
+
+ dev_err(dev, "wait phy ready timeout!\n");
+
+ return ret;
+}
+
+static int lookup_table1(u64 mbps)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(table1); i++) {
+ if (mbps >= table1[i].min_mbps && mbps <= table1[i].max_mbps)
+ return i;
+ }
+
+ return -ENXIO;
+}
+
+static const u16 deskew_fine_mem[] = {
+ 0x0404, 0x040c, 0x0414, 0x041c,
+ 0x0423, 0x0429, 0x0430, 0x043a,
+ 0x0445, 0x044a, 0x0450, 0x045a,
+ 0x0465, 0x0469, 0x0472, 0x047a,
+ 0x0485, 0x0489, 0x0490, 0x049a,
+ 0x04a4, 0x04ac, 0x04b4, 0x04bc,
+ 0x04c4, 0x04cc, 0x04d4, 0x04dc,
+ 0x04e4, 0x04ec, 0x04f4, 0x04fc,
+ 0x0504, 0x050c, 0x0514, 0x051c,
+ 0x0523, 0x0529, 0x0530, 0x053a,
+ 0x0545, 0x054a, 0x0550, 0x055a,
+ 0x0565, 0x0569, 0x0572, 0x057a,
+ 0x0585, 0x0589, 0x0590, 0x059a,
+ 0x05a4, 0x05ac, 0x05b4, 0x05bc,
+ 0x05c4, 0x05cc, 0x05d4, 0x05dc,
+ 0x05e4, 0x05ec, 0x05f4, 0x05fc,
+ 0x0604, 0x060c, 0x0614, 0x061c,
+ 0x0623, 0x0629, 0x0632, 0x063a,
+ 0x0645, 0x064a, 0x0650, 0x065a,
+ 0x0665, 0x0669, 0x0672, 0x067a,
+ 0x0685, 0x0689, 0x0690, 0x069a,
+ 0x06a4, 0x06ac, 0x06b4, 0x06bc,
+ 0x06c4, 0x06cc, 0x06d4, 0x06dc,
+ 0x06e4, 0x06ec, 0x06f4, 0x06fc,
+ 0x0704, 0x070c, 0x0714, 0x071c,
+ 0x0723, 0x072a, 0x0730, 0x073a,
+ 0x0745, 0x074a, 0x0750, 0x075a,
+ 0x0765, 0x0769, 0x0772, 0x077a,
+ 0x0785, 0x0789, 0x0790, 0x079a,
+ 0x07a4, 0x07ac, 0x07b4, 0x07bc,
+ 0x07c4, 0x07cc, 0x07d4, 0x07dc,
+ 0x07e4, 0x07ec, 0x07f4, 0x07fc,
+};
+
+static void ipu7_isys_dphy_config(struct ipu7_isys *isys, u8 id, u8 lanes,
+ bool aggregation, u64 mbps)
+{
+ u16 hsrxval0 = 0;
+ u16 hsrxval1 = 0;
+ u16 hsrxval2 = 0;
+ int index;
+ u16 reg;
+ u16 val;
+ u32 i;
+
+ dwc_phy_write_mask(isys, id, CORE_DIG_RW_COMMON_7, 0, 0, 9);
+ if (mbps > 1500)
+ dwc_phy_write_mask(isys, id, PPI_STARTUP_RW_COMMON_DPHY_7,
+ 40, 0, 7);
+ else
+ dwc_phy_write_mask(isys, id, PPI_STARTUP_RW_COMMON_DPHY_7,
+ 104, 0, 7);
+
+ dwc_phy_write_mask(isys, id, PPI_STARTUP_RW_COMMON_DPHY_8, 80, 0, 7);
+ dwc_phy_write_mask(isys, id, PPI_RW_DDLCAL_CFG_0, 191, 0, 9);
+ dwc_phy_write_mask(isys, id, PPI_RW_DDLCAL_CFG_7, 34, 7, 12);
+ dwc_phy_write_mask(isys, id, PPI_RW_DDLCAL_CFG_1, 38, 8, 15);
+ dwc_phy_write_mask(isys, id, PPI_RW_DDLCAL_CFG_2, 4, 12, 15);
+ dwc_phy_write_mask(isys, id, PPI_RW_DDLCAL_CFG_2, 2, 10, 11);
+ dwc_phy_write_mask(isys, id, PPI_RW_DDLCAL_CFG_2, 1, 8, 8);
+ dwc_phy_write_mask(isys, id, PPI_RW_DDLCAL_CFG_2, 38, 0, 7);
+ dwc_phy_write_mask(isys, id, PPI_RW_DDLCAL_CFG_2, 1, 9, 9);
+ dwc_phy_write_mask(isys, id, PPI_RW_DDLCAL_CFG_4, 10, 0, 9);
+ dwc_phy_write_mask(isys, id, PPI_RW_DDLCAL_CFG_6, 20, 0, 9);
+ dwc_phy_write_mask(isys, id, PPI_RW_DDLCAL_CFG_7, 19, 0, 6);
+
+ for (i = 0; i < ARRAY_SIZE(table0); i++) {
+ if (mbps >= table0[i].min_mbps && mbps <= table0[i].max_mbps) {
+ dwc_phy_write_mask(isys, id, PPI_RW_DDLCAL_CFG_3,
+ table0[i].ddlcal_counter_ref,
+ 0, 9);
+ break;
+ }
+ }
+
+ index = lookup_table1(mbps);
+ if (index >= 0) {
+ dwc_phy_write_mask(isys, id, PPI_RW_DDLCAL_CFG_1,
+ table1[index].phase_bound, 0, 7);
+ dwc_phy_write_mask(isys, id, PPI_RW_DDLCAL_CFG_5,
+ table1[index].ddlcal_dll_fbk, 4, 9);
+ dwc_phy_write_mask(isys, id, PPI_RW_DDLCAL_CFG_5,
+ table1[index].ddlcal_ddl_coarse_bank, 0, 3);
+
+ reg = CORE_DIG_IOCTRL_RW_AFE_LANE0_CTRL_2_8;
+ val = table1[index].oa_lanex_hsrx_cdphy_sel_fast;
+ for (i = 0; i < lanes + 1; i++)
+ dwc_phy_write_mask(isys, id, reg + (i * 0x400), val,
+ 12, 12);
+ }
+
+ reg = CORE_DIG_DLANE_0_RW_LP_0;
+ for (i = 0; i < lanes; i++)
+ dwc_phy_write_mask(isys, id, reg + (i * 0x400), 6, 8, 11);
+
+ dwc_phy_write_mask(isys, id, CORE_DIG_IOCTRL_RW_AFE_LANE0_CTRL_2_2,
+ 0, 0, 0);
+ if (!is_ipu7(isys->adev->isp->hw_ver) ||
+ id == PORT_B || id == PORT_C) {
+ dwc_phy_write_mask(isys, id,
+ CORE_DIG_IOCTRL_RW_AFE_LANE1_CTRL_2_2,
+ 1, 0, 0);
+ dwc_phy_write_mask(isys, id,
+ CORE_DIG_IOCTRL_RW_AFE_LANE2_CTRL_2_2,
+ 0, 0, 0);
+ } else {
+ dwc_phy_write_mask(isys, id,
+ CORE_DIG_IOCTRL_RW_AFE_LANE1_CTRL_2_2,
+ 0, 0, 0);
+ dwc_phy_write_mask(isys, id,
+ CORE_DIG_IOCTRL_RW_AFE_LANE2_CTRL_2_2,
+ 1, 0, 0);
+ }
+
+ if (lanes == 4 && is_ipu7(isys->adev->isp->hw_ver)) {
+ dwc_phy_write_mask(isys, id,
+ CORE_DIG_IOCTRL_RW_AFE_LANE3_CTRL_2_2,
+ 0, 0, 0);
+ dwc_phy_write_mask(isys, id,
+ CORE_DIG_IOCTRL_RW_AFE_LANE4_CTRL_2_2,
+ 0, 0, 0);
+ }
+
+ dwc_phy_write_mask(isys, id, CORE_DIG_RW_COMMON_6, 1, 0, 2);
+ dwc_phy_write_mask(isys, id, CORE_DIG_RW_COMMON_6, 1, 3, 5);
+
+ reg = CORE_DIG_IOCTRL_RW_AFE_LANE0_CTRL_2_12;
+ val = (mbps > 1500) ? 0 : 1;
+ for (i = 0; i < lanes + 1; i++) {
+ dwc_phy_write_mask(isys, id, reg + (i * 0x400), val, 1, 1);
+ dwc_phy_write_mask(isys, id, reg + (i * 0x400), !val, 3, 3);
+ }
+
+ reg = CORE_DIG_IOCTRL_RW_AFE_LANE0_CTRL_2_13;
+ val = (mbps > 1500) ? 0 : 1;
+ for (i = 0; i < lanes + 1; i++) {
+ dwc_phy_write_mask(isys, id, reg + (i * 0x400), val, 1, 1);
+ dwc_phy_write_mask(isys, id, reg + (i * 0x400), val, 3, 3);
+ }
+
+ if (!is_ipu7(isys->adev->isp->hw_ver) || id == PORT_B || id == PORT_C)
+ reg = CORE_DIG_IOCTRL_RW_AFE_LANE1_CTRL_2_9;
+ else
+ reg = CORE_DIG_IOCTRL_RW_AFE_LANE2_CTRL_2_9;
+
+ for (i = 0; i < ARRAY_SIZE(table6); i++) {
+ if (mbps >= table6[i].min_mbps && mbps <= table6[i].max_mbps) {
+ dwc_phy_write_mask(isys, id, reg,
+ table6[i].oa_lane_hsrx_hs_clk_div,
+ 5, 7);
+ break;
+ }
+ }
+
+ if (aggregation) {
+ dwc_phy_write_mask(isys, id, CORE_DIG_RW_COMMON_0, 1,
+ 1, 1);
+
+ reg = CORE_DIG_IOCTRL_RW_AFE_LANE0_CTRL_2_15;
+ dwc_phy_write_mask(isys, id, reg, 3, 3, 4);
+
+ val = (id == PORT_A) ? 3 : 0;
+ reg = CORE_DIG_IOCTRL_RW_AFE_LANE1_CTRL_2_15;
+ dwc_phy_write_mask(isys, id, reg, val, 3, 4);
+
+ reg = CORE_DIG_IOCTRL_RW_AFE_LANE2_CTRL_2_15;
+ dwc_phy_write_mask(isys, id, reg, 3, 3, 4);
+ }
+
+ dwc_phy_write_mask(isys, id, CORE_DIG_DLANE_CLK_RW_HS_RX_0, 28, 0, 7);
+ dwc_phy_write_mask(isys, id, CORE_DIG_DLANE_CLK_RW_HS_RX_7, 6, 0, 7);
+
+ reg = CORE_DIG_DLANE_0_RW_HS_RX_0;
+ for (i = 0; i < ARRAY_SIZE(table2); i++) {
+ if (mbps >= table2[i].min_mbps && mbps <= table2[i].max_mbps) {
+ u8 j;
+
+ for (j = 0; j < lanes; j++)
+ dwc_phy_write_mask(isys, id, reg + (j * 0x400),
+ table2[i].i_thssettle,
+ 8, 15);
+ break;
+ }
+ }
+
+ /* deskew */
+ for (i = 0; i < lanes; i++) {
+ reg = CORE_DIG_DLANE_0_RW_CFG_1;
+ dwc_phy_write_mask(isys, id, reg + (i * 0x400),
+ ((mbps > 1500) ? 0x1 : 0x2), 2, 3);
+
+ reg = CORE_DIG_DLANE_0_RW_HS_RX_2;
+ dwc_phy_write_mask(isys, id, reg + (i * 0x400),
+ ((mbps > 2500) ? 0 : 1), 15, 15);
+ dwc_phy_write_mask(isys, id, reg + (i * 0x400), 1, 13, 13);
+ dwc_phy_write_mask(isys, id, reg + (i * 0x400), 7, 9, 12);
+
+ reg = CORE_DIG_DLANE_0_RW_LP_0;
+ dwc_phy_write_mask(isys, id, reg + (i * 0x400), 1, 12, 15);
+
+ reg = CORE_DIG_DLANE_0_RW_LP_2;
+ dwc_phy_write_mask(isys, id, reg + (i * 0x400), 0, 0, 0);
+
+ reg = CORE_DIG_DLANE_0_RW_HS_RX_1;
+ dwc_phy_write_mask(isys, id, reg + (i * 0x400), 16, 0, 7);
+
+ reg = CORE_DIG_DLANE_0_RW_HS_RX_3;
+ dwc_phy_write_mask(isys, id, reg + (i * 0x400), 2, 0, 2);
+ index = lookup_table1(mbps);
+ if (index >= 0) {
+ val = table1[index].fjump_deskew;
+ dwc_phy_write_mask(isys, id, reg + (i * 0x400), val,
+ 3, 8);
+ }
+
+ reg = CORE_DIG_DLANE_0_RW_HS_RX_4;
+ dwc_phy_write_mask(isys, id, reg + (i * 0x400), 150, 0, 15);
+
+ reg = CORE_DIG_DLANE_0_RW_HS_RX_5;
+ dwc_phy_write_mask(isys, id, reg + (i * 0x400), 0, 0, 7);
+ dwc_phy_write_mask(isys, id, reg + (i * 0x400), 1, 8, 15);
+
+ reg = CORE_DIG_DLANE_0_RW_HS_RX_6;
+ dwc_phy_write_mask(isys, id, reg + (i * 0x400), 2, 0, 7);
+ index = lookup_table1(mbps);
+ if (index >= 0) {
+ val = table1[index].min_eye_opening_deskew;
+ dwc_phy_write_mask(isys, id, reg + (i * 0x400), val,
+ 8, 15);
+ }
+ reg = CORE_DIG_DLANE_0_RW_HS_RX_7;
+ dwc_phy_write_mask(isys, id, reg + (i * 0x400), 0, 13, 13);
+ dwc_phy_write_mask(isys, id, reg + (i * 0x400), 0, 15, 15);
+
+ reg = CORE_DIG_DLANE_0_RW_HS_RX_9;
+ index = lookup_table1(mbps);
+ if (index >= 0) {
+ val = table1[index].ddlcal_max_phase;
+ dwc_phy_write_mask(isys, id, reg + (i * 0x400),
+ val, 0, 7);
+ }
+ }
+
+ dwc_phy_write_mask(isys, id, CORE_DIG_DLANE_CLK_RW_LP_0, 1, 12, 15);
+ dwc_phy_write_mask(isys, id, CORE_DIG_DLANE_CLK_RW_LP_2, 0, 0, 0);
+
+ for (i = 0; i < ARRAY_SIZE(deskew_fine_mem); i++)
+ dwc_phy_write_mask(isys, id, CORE_DIG_COMMON_RW_DESKEW_FINE_MEM,
+ deskew_fine_mem[i], 0, 15);
+
+ if (mbps > 1500) {
+ hsrxval0 = 4;
+ hsrxval2 = 3;
+ }
+
+ if (mbps > 2500)
+ hsrxval1 = 2;
+
+ dwc_phy_write_mask(isys, id, CORE_DIG_IOCTRL_RW_AFE_LANE0_CTRL_2_9,
+ hsrxval0, 0, 2);
+ dwc_phy_write_mask(isys, id, CORE_DIG_IOCTRL_RW_AFE_LANE1_CTRL_2_9,
+ hsrxval0, 0, 2);
+ dwc_phy_write_mask(isys, id, CORE_DIG_IOCTRL_RW_AFE_LANE2_CTRL_2_9,
+ hsrxval0, 0, 2);
+ if (lanes == 4 && is_ipu7(isys->adev->isp->hw_ver)) {
+ dwc_phy_write_mask(isys, id,
+ CORE_DIG_IOCTRL_RW_AFE_LANE3_CTRL_2_9,
+ hsrxval0, 0, 2);
+ dwc_phy_write_mask(isys, id,
+ CORE_DIG_IOCTRL_RW_AFE_LANE4_CTRL_2_9,
+ hsrxval0, 0, 2);
+ }
+
+ dwc_phy_write_mask(isys, id, CORE_DIG_IOCTRL_RW_AFE_LANE0_CTRL_2_9,
+ hsrxval1, 3, 4);
+ dwc_phy_write_mask(isys, id, CORE_DIG_IOCTRL_RW_AFE_LANE1_CTRL_2_9,
+ hsrxval1, 3, 4);
+ dwc_phy_write_mask(isys, id, CORE_DIG_IOCTRL_RW_AFE_LANE2_CTRL_2_9,
+ hsrxval1, 3, 4);
+ if (lanes == 4 && is_ipu7(isys->adev->isp->hw_ver)) {
+ dwc_phy_write_mask(isys, id,
+ CORE_DIG_IOCTRL_RW_AFE_LANE3_CTRL_2_9,
+ hsrxval1, 3, 4);
+ dwc_phy_write_mask(isys, id,
+ CORE_DIG_IOCTRL_RW_AFE_LANE4_CTRL_2_9,
+ hsrxval1, 3, 4);
+ }
+
+ dwc_phy_write_mask(isys, id, CORE_DIG_IOCTRL_RW_AFE_LANE0_CTRL_2_15,
+ hsrxval2, 0, 2);
+ dwc_phy_write_mask(isys, id, CORE_DIG_IOCTRL_RW_AFE_LANE1_CTRL_2_15,
+ hsrxval2, 0, 2);
+ dwc_phy_write_mask(isys, id, CORE_DIG_IOCTRL_RW_AFE_LANE2_CTRL_2_15,
+ hsrxval2, 0, 2);
+ if (lanes == 4 && is_ipu7(isys->adev->isp->hw_ver)) {
+ dwc_phy_write_mask(isys, id,
+ CORE_DIG_IOCTRL_RW_AFE_LANE3_CTRL_2_15,
+ hsrxval2, 0, 2);
+ dwc_phy_write_mask(isys, id,
+ CORE_DIG_IOCTRL_RW_AFE_LANE4_CTRL_2_15,
+ hsrxval2, 0, 2);
+ }
+
+ /* force and override rext */
+ if (isys->phy_rext_cal && id) {
+ dwc_phy_write_mask(isys, id, CORE_DIG_IOCTRL_RW_AFE_CB_CTRL_2_8,
+ isys->phy_rext_cal, 0, 3);
+ dwc_phy_write_mask(isys, id, CORE_DIG_IOCTRL_RW_AFE_CB_CTRL_2_7,
+ 1, 11, 11);
+ }
+}
+
+static void ipu7_isys_cphy_config(struct ipu7_isys *isys, u8 id, u8 lanes,
+ bool aggregation, u64 mbps)
+{
+ u8 trios = 2;
+ u16 coarse_target;
+ u16 deass_thresh;
+ u16 delay_thresh;
+ u16 reset_thresh;
+ u16 cap_prog = 6U;
+ u16 reg;
+ u16 val;
+ u32 i;
+ u64 r64;
+ u32 r;
+
+ if (is_ipu7p5(isys->adev->isp->hw_ver))
+ val = 0x15;
+ else
+ val = 0x155;
+
+ if (is_ipu7(isys->adev->isp->hw_ver))
+ trios = 3;
+
+ dwc_phy_write_mask(isys, id, CORE_DIG_RW_COMMON_7, val, 0, 9);
+ dwc_phy_write_mask(isys, id, PPI_STARTUP_RW_COMMON_DPHY_7, 104, 0, 7);
+ dwc_phy_write_mask(isys, id, PPI_STARTUP_RW_COMMON_DPHY_8, 16, 0, 7);
+
+ reg = CORE_DIG_CLANE_0_RW_LP_0;
+ for (i = 0; i < trios; i++)
+ dwc_phy_write_mask(isys, id, reg + (i * 0x400), 6, 8, 11);
+
+ val = (mbps > 900U) ? 1U : 0U;
+ for (i = 0; i < trios; i++) {
+ reg = CORE_DIG_CLANE_0_RW_HS_RX_0;
+ dwc_phy_write_mask(isys, id, reg + (i * 0x400), 1, 0, 0);
+ dwc_phy_write_mask(isys, id, reg + (i * 0x400), val, 1, 1);
+
+ reg = CORE_DIG_CLANE_0_RW_HS_RX_1;
+ dwc_phy_write_mask(isys, id, reg + (i * 0x400), 38, 0, 15);
+
+ reg = CORE_DIG_CLANE_0_RW_HS_RX_5;
+ dwc_phy_write_mask(isys, id, reg + (i * 0x400), 38, 0, 15);
+
+ reg = CORE_DIG_CLANE_0_RW_HS_RX_6;
+ dwc_phy_write_mask(isys, id, reg + (i * 0x400), 10, 0, 15);
+ }
+
+ /*
+ * Below 900Msps, always use the same value.
+ * The formula is suitable for data rate 80-3500Msps.
+ * Timebase (us) = 1, DIV = 32, TDDL (UI) = 0.5
+ */
+ if (mbps >= 80U)
+ coarse_target = DIV_ROUND_UP_ULL(mbps, 16) - 1;
+ else
+ coarse_target = 56;
+
+ for (i = 0; i < trios; i++) {
+ reg = CORE_DIG_CLANE_0_RW_HS_RX_2 + i * 0x400;
+ dwc_phy_write_mask(isys, id, reg, coarse_target, 0, 15);
+ }
+
+ dwc_phy_write_mask(isys, id,
+ CORE_DIG_IOCTRL_RW_AFE_LANE0_CTRL_2_2, 1, 0, 0);
+ dwc_phy_write_mask(isys, id,
+ CORE_DIG_IOCTRL_RW_AFE_LANE1_CTRL_2_2, 0, 0, 0);
+ dwc_phy_write_mask(isys, id,
+ CORE_DIG_IOCTRL_RW_AFE_LANE2_CTRL_2_2, 1, 0, 0);
+
+ if (!is_ipu7p5(isys->adev->isp->hw_ver) && lanes == 4) {
+ dwc_phy_write_mask(isys, id,
+ CORE_DIG_IOCTRL_RW_AFE_LANE3_CTRL_2_2,
+ 1, 0, 0);
+ dwc_phy_write_mask(isys, id,
+ CORE_DIG_IOCTRL_RW_AFE_LANE4_CTRL_2_2,
+ 0, 0, 0);
+ }
+
+ for (i = 0; i < trios; i++) {
+ reg = CORE_DIG_RW_TRIO0_0 + i * 0x400;
+ dwc_phy_write_mask(isys, id, reg, 1, 6, 8);
+ dwc_phy_write_mask(isys, id, reg, 1, 3, 5);
+ dwc_phy_write_mask(isys, id, reg, 2, 0, 2);
+ }
+
+ deass_thresh = (u16)div64_u64_rem(7 * 1000 * 6, mbps * 5U, &r64) + 1;
+ if (r64 != 0)
+ deass_thresh++;
+
+ reg = CORE_DIG_RW_TRIO0_2;
+ for (i = 0; i < trios; i++)
+ dwc_phy_write_mask(isys, id, reg + 0x400 * i,
+ deass_thresh, 0, 7);
+
+ delay_thresh = div64_u64((224U - (9U * 7U)) * 1000U, 5U * mbps) - 7u;
+
+ if (delay_thresh < 1)
+ delay_thresh = 1;
+
+ reg = CORE_DIG_RW_TRIO0_1;
+ for (i = 0; i < trios; i++)
+ dwc_phy_write_mask(isys, id, reg + 0x400 * i,
+ delay_thresh, 0, 15);
+
+ reset_thresh = (u16)div_u64_rem(2U * 5U * mbps, 7U * 1000U, &r);
+ if (!r)
+ reset_thresh--;
+
+ if (reset_thresh < 1)
+ reset_thresh = 1;
+
+ reg = CORE_DIG_RW_TRIO0_0;
+ for (i = 0; i < trios; i++)
+ dwc_phy_write_mask(isys, id, reg + 0x400 * i,
+ reset_thresh, 9, 11);
+
+ reg = CORE_DIG_CLANE_0_RW_LP_0;
+ for (i = 0; i < trios; i++)
+ dwc_phy_write_mask(isys, id, reg + 0x400 * i, 1, 12, 15);
+
+ reg = CORE_DIG_CLANE_0_RW_LP_2;
+ for (i = 0; i < trios; i++)
+ dwc_phy_write_mask(isys, id, reg + 0x400 * i, 0, 0, 0);
+
+ reg = CORE_DIG_CLANE_0_RW_HS_RX_0;
+ for (i = 0; i < trios; i++)
+ dwc_phy_write_mask(isys, id, reg + 0x400 * i, 12, 2, 6);
+
+ for (i = 0; i < ARRAY_SIZE(table7); i++) {
+ if (mbps >= table7[i].min_mbps && mbps <= table7[i].max_mbps) {
+ cap_prog = table7[i].val;
+ break;
+ }
+ }
+
+ for (i = 0; i < (lanes + 1); i++) {
+ reg = CORE_DIG_IOCTRL_RW_AFE_LANE0_CTRL_2_9 + 0x400 * i;
+ dwc_phy_write_mask(isys, id, reg, 4U, 0, 2);
+ dwc_phy_write_mask(isys, id, reg, 0U, 3, 4);
+
+ reg = CORE_DIG_IOCTRL_RW_AFE_LANE0_CTRL_2_7 + 0x400 * i;
+ dwc_phy_write_mask(isys, id, reg, cap_prog, 10, 12);
+ }
+}
+
+static int ipu7_isys_phy_config(struct ipu7_isys *isys, u8 id, u8 lanes,
+ bool aggregation)
+{
+ struct device *dev = &isys->adev->auxdev.dev;
+ u32 phy_mode;
+ s64 link_freq;
+ u64 mbps;
+
+ if (aggregation)
+ link_freq = ipu7_isys_csi2_get_link_freq(&isys->csi2[0]);
+ else
+ link_freq = ipu7_isys_csi2_get_link_freq(&isys->csi2[id]);
+
+ if (link_freq < 0) {
+ dev_err(dev, "get link freq failed (%lld)\n", link_freq);
+ return link_freq;
+ }
+
+ mbps = div_u64(link_freq, 500000);
+ dev_dbg(dev, "config phy %u with lanes %u aggregation %d mbps %lld\n",
+ id, lanes, aggregation, mbps);
+
+ dwc_phy_write_mask(isys, id, PPI_STARTUP_RW_COMMON_DPHY_10, 48, 0, 7);
+ dwc_phy_write_mask(isys, id, CORE_DIG_ANACTRL_RW_COMMON_ANACTRL_2,
+ 1, 12, 13);
+ dwc_phy_write_mask(isys, id, CORE_DIG_ANACTRL_RW_COMMON_ANACTRL_0,
+ 63, 2, 7);
+ dwc_phy_write_mask(isys, id, PPI_STARTUP_RW_COMMON_STARTUP_1_1,
+ 563, 0, 11);
+ dwc_phy_write_mask(isys, id, PPI_STARTUP_RW_COMMON_DPHY_2, 5, 0, 7);
+ /* bypass the RCAL state (bit6) */
+ if (aggregation && id != PORT_A)
+ dwc_phy_write_mask(isys, id, PPI_STARTUP_RW_COMMON_DPHY_2, 0x45,
+ 0, 7);
+
+ dwc_phy_write_mask(isys, id, PPI_STARTUP_RW_COMMON_DPHY_6, 39, 0, 7);
+ dwc_phy_write_mask(isys, id, PPI_CALIBCTRL_RW_COMMON_BG_0, 500, 0, 8);
+ dwc_phy_write_mask(isys, id, PPI_RW_TERMCAL_CFG_0, 38, 0, 6);
+ dwc_phy_write_mask(isys, id, PPI_RW_OFFSETCAL_CFG_0, 7, 0, 4);
+ dwc_phy_write_mask(isys, id, PPI_RW_LPDCOCAL_TIMEBASE, 153, 0, 9);
+ dwc_phy_write_mask(isys, id, PPI_RW_LPDCOCAL_NREF, 800, 0, 10);
+ dwc_phy_write_mask(isys, id, PPI_RW_LPDCOCAL_NREF_RANGE, 27, 0, 4);
+ dwc_phy_write_mask(isys, id, PPI_RW_LPDCOCAL_TWAIT_CONFIG, 47, 0, 8);
+ dwc_phy_write_mask(isys, id, PPI_RW_LPDCOCAL_TWAIT_CONFIG, 127, 9, 15);
+ dwc_phy_write_mask(isys, id, PPI_RW_LPDCOCAL_VT_CONFIG, 47, 7, 15);
+ dwc_phy_write_mask(isys, id, PPI_RW_LPDCOCAL_VT_CONFIG, 27, 2, 6);
+ dwc_phy_write_mask(isys, id, PPI_RW_LPDCOCAL_VT_CONFIG, 3, 0, 1);
+ dwc_phy_write_mask(isys, id, PPI_RW_LPDCOCAL_COARSE_CFG, 1, 0, 1);
+ dwc_phy_write_mask(isys, id, PPI_RW_COMMON_CFG, 3, 0, 1);
+ dwc_phy_write_mask(isys, id, CORE_DIG_IOCTRL_RW_AFE_CB_CTRL_2_0,
+ 0, 10, 10);
+ dwc_phy_write_mask(isys, id, CORE_DIG_IOCTRL_RW_AFE_CB_CTRL_2_1,
+ 1, 10, 10);
+ dwc_phy_write_mask(isys, id, CORE_DIG_IOCTRL_RW_AFE_CB_CTRL_2_1,
+ 0, 15, 15);
+ dwc_phy_write_mask(isys, id, CORE_DIG_IOCTRL_RW_AFE_CB_CTRL_2_3,
+ 3, 8, 9);
+ dwc_phy_write_mask(isys, id, CORE_DIG_IOCTRL_RW_AFE_CB_CTRL_2_0,
+ 0, 15, 15);
+ dwc_phy_write_mask(isys, id, CORE_DIG_IOCTRL_RW_AFE_CB_CTRL_2_6,
+ 7, 12, 14);
+ dwc_phy_write_mask(isys, id, CORE_DIG_IOCTRL_RW_AFE_CB_CTRL_2_7,
+ 0, 8, 10);
+ dwc_phy_write_mask(isys, id, CORE_DIG_IOCTRL_RW_AFE_CB_CTRL_2_5,
+ 0, 8, 8);
+
+ if (aggregation)
+ phy_mode = isys->csi2[0].phy_mode;
+ else
+ phy_mode = isys->csi2[id].phy_mode;
+
+ if (phy_mode == PHY_MODE_DPHY) {
+ ipu7_isys_dphy_config(isys, id, lanes, aggregation, mbps);
+ } else if (phy_mode == PHY_MODE_CPHY) {
+ ipu7_isys_cphy_config(isys, id, lanes, aggregation, mbps);
+ } else {
+ dev_err(dev, "unsupported phy mode %d!\n",
+ isys->csi2[id].phy_mode);
+ }
+
+ return 0;
+}
+
+int ipu7_isys_csi_phy_powerup(struct ipu7_isys_csi2 *csi2)
+{
+ struct ipu7_isys *isys = csi2->isys;
+ u32 lanes = csi2->nlanes;
+ bool aggregation = false;
+ u32 id = csi2->port;
+ int ret;
+
+ /* lanes remapping for aggregation (port AB) mode */
+ if (!is_ipu7(isys->adev->isp->hw_ver) && lanes > 2 && id == PORT_A) {
+ aggregation = true;
+ lanes = 2;
+ }
+
+ ipu7_isys_csi_phy_reset(isys, id);
+ gpreg_write(isys, id, PHY_CLK_LANE_CONTROL, 0x1);
+ gpreg_write(isys, id, PHY_CLK_LANE_FORCE_CONTROL, 0x2);
+ gpreg_write(isys, id, PHY_LANE_CONTROL_EN, (1U << lanes) - 1U);
+ gpreg_write(isys, id, PHY_LANE_FORCE_CONTROL, 0xf);
+ gpreg_write(isys, id, PHY_MODE, csi2->phy_mode);
+
+ /* config PORT_B if aggregation mode */
+ if (aggregation) {
+ ipu7_isys_csi_phy_reset(isys, PORT_B);
+ gpreg_write(isys, PORT_B, PHY_CLK_LANE_CONTROL, 0x0);
+ gpreg_write(isys, PORT_B, PHY_LANE_CONTROL_EN, 0x3);
+ gpreg_write(isys, PORT_B, PHY_CLK_LANE_FORCE_CONTROL, 0x2);
+ gpreg_write(isys, PORT_B, PHY_LANE_FORCE_CONTROL, 0xf);
+ gpreg_write(isys, PORT_B, PHY_MODE, csi2->phy_mode);
+ }
+
+ ipu7_isys_csi_ctrl_cfg(csi2);
+ ipu7_isys_csi_ctrl_dids_config(csi2, id);
+
+ ret = ipu7_isys_phy_config(isys, id, lanes, aggregation);
+ if (ret < 0)
+ return ret;
+
+ gpreg_write(isys, id, PHY_RESET, 1);
+ gpreg_write(isys, id, PHY_SHUTDOWN, 1);
+ dwc_csi_write(isys, id, DPHY_RSTZ, 1);
+ dwc_csi_write(isys, id, PHY_SHUTDOWNZ, 1);
+ dwc_csi_write(isys, id, CSI2_RESETN, 1);
+
+ ret = ipu7_isys_phy_ready(isys, id);
+ if (ret < 0)
+ return ret;
+
+ gpreg_write(isys, id, PHY_LANE_FORCE_CONTROL, 0);
+ gpreg_write(isys, id, PHY_CLK_LANE_FORCE_CONTROL, 0);
+
+ /* config PORT_B if aggregation mode */
+ if (aggregation) {
+ ret = ipu7_isys_phy_config(isys, PORT_B, 2, aggregation);
+ if (ret < 0)
+ return ret;
+
+ gpreg_write(isys, PORT_B, PHY_RESET, 1);
+ gpreg_write(isys, PORT_B, PHY_SHUTDOWN, 1);
+ dwc_csi_write(isys, PORT_B, DPHY_RSTZ, 1);
+ dwc_csi_write(isys, PORT_B, PHY_SHUTDOWNZ, 1);
+ dwc_csi_write(isys, PORT_B, CSI2_RESETN, 1);
+ ret = ipu7_isys_phy_ready(isys, PORT_B);
+ if (ret < 0)
+ return ret;
+
+ gpreg_write(isys, PORT_B, PHY_LANE_FORCE_CONTROL, 0);
+ gpreg_write(isys, PORT_B, PHY_CLK_LANE_FORCE_CONTROL, 0);
+ }
+
+ return 0;
+}
+
+void ipu7_isys_csi_phy_powerdown(struct ipu7_isys_csi2 *csi2)
+{
+ struct ipu7_isys *isys = csi2->isys;
+
+ ipu7_isys_csi_phy_reset(isys, csi2->port);
+ if (!is_ipu7(isys->adev->isp->hw_ver) &&
+ csi2->nlanes > 2U && csi2->port == PORT_A)
+ ipu7_isys_csi_phy_reset(isys, PORT_B);
+}
diff --git a/drivers/staging/media/ipu7/ipu7-isys-csi-phy.h b/drivers/staging/media/ipu7/ipu7-isys-csi-phy.h
new file mode 100644
index 000000000000..dfdcb61540c4
--- /dev/null
+++ b/drivers/staging/media/ipu7/ipu7-isys-csi-phy.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2013 - 2025 Intel Corporation
+ */
+
+#ifndef IPU7_ISYS_CSI_PHY_H
+#define IPU7_ISYS_CSI_PHY_H
+
+struct ipu7_isys;
+
+#define PHY_MODE_DPHY 0U
+#define PHY_MODE_CPHY 1U
+
+int ipu7_isys_csi_phy_powerup(struct ipu7_isys_csi2 *csi2);
+void ipu7_isys_csi_phy_powerdown(struct ipu7_isys_csi2 *csi2);
+#endif
diff --git a/drivers/staging/media/ipu7/ipu7-isys-csi2-regs.h b/drivers/staging/media/ipu7/ipu7-isys-csi2-regs.h
new file mode 100644
index 000000000000..aad52c44a005
--- /dev/null
+++ b/drivers/staging/media/ipu7/ipu7-isys-csi2-regs.h
@@ -0,0 +1,1197 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2020 - 2025 Intel Corporation
+ */
+
+#ifndef IPU7_ISYS_CSI2_REG_H
+#define IPU7_ISYS_CSI2_REG_H
+
+/* IS main regs base */
+#define IS_MAIN_BASE 0x240000
+#define IS_MAIN_S2B_BASE (IS_MAIN_BASE + 0x22000)
+#define IS_MAIN_B2O_BASE (IS_MAIN_BASE + 0x26000)
+#define IS_MAIN_ISD_M0_BASE (IS_MAIN_BASE + 0x2b000)
+#define IS_MAIN_ISD_M1_BASE (IS_MAIN_BASE + 0x2b100)
+#define IS_MAIN_ISD_INT_BASE (IS_MAIN_BASE + 0x2b200)
+#define IS_MAIN_GDA_BASE (IS_MAIN_BASE + 0x32000)
+#define IS_MAIN_GPREGS_MAIN_BASE (IS_MAIN_BASE + 0x32500)
+#define IS_MAIN_IRQ_CTRL_BASE (IS_MAIN_BASE + 0x32700)
+#define IS_MAIN_PWM_CTRL_BASE (IS_MAIN_BASE + 0x32b00)
+
+#define S2B_IRQ_COMMON_0_CTL_STATUS (IS_MAIN_S2B_BASE + 0x1c)
+#define S2B_IRQ_COMMON_0_CTL_CLEAR (IS_MAIN_S2B_BASE + 0x20)
+#define S2B_IRQ_COMMON_0_CTL_ENABLE (IS_MAIN_S2B_BASE + 0x24)
+#define S2B_IID_IRQ_CTL_STATUS(iid) (IS_MAIN_S2B_BASE + 0x94 + \
+ 0x100 * (iid))
+
+#define B2O_IRQ_COMMON_0_CTL_STATUS (IS_MAIN_B2O_BASE + 0x30)
+#define B2O_IRQ_COMMON_0_CTL_CLEAR (IS_MAIN_B2O_BASE + 0x34)
+#define B2O_IRQ_COMMON_0_CTL_ENABLE (IS_MAIN_B2O_BASE + 0x38)
+#define B2O_IID_IRQ_CTL_STATUS(oid) (IS_MAIN_B2O_BASE + 0x3dc + \
+ 0x200 * (oid))
+
+#define ISD_M0_IRQ_CTL_STATUS (IS_MAIN_ISD_M0_BASE + 0x1c)
+#define ISD_M0_IRQ_CTL_CLEAR (IS_MAIN_ISD_M0_BASE + 0x20)
+#define ISD_M0_IRQ_CTL_ENABLE (IS_MAIN_ISD_M0_BASE + 0x24)
+
+#define ISD_M1_IRQ_CTL_STATUS (IS_MAIN_ISD_M1_BASE + 0x1c)
+#define ISD_M1_IRQ_CTL_CLEAR (IS_MAIN_ISD_M1_BASE + 0x20)
+#define ISD_M1_IRQ_CTL_ENABLE (IS_MAIN_ISD_M1_BASE + 0x24)
+
+#define ISD_INT_IRQ_CTL_STATUS (IS_MAIN_ISD_INT_BASE + 0x1c)
+#define ISD_INT_IRQ_CTL_CLEAR (IS_MAIN_ISD_INT_BASE + 0x20)
+#define ISD_INT_IRQ_CTL_ENABLE (IS_MAIN_ISD_INT_BASE + 0x24)
+
+#define GDA_IRQ_CTL_STATUS (IS_MAIN_GDA_BASE + 0x1c)
+#define GDA_IRQ_CTL_CLEAR (IS_MAIN_GDA_BASE + 0x20)
+#define GDA_IRQ_CTL_ENABLE (IS_MAIN_GDA_BASE + 0x24)
+
+#define IS_MAIN_IRQ_CTL_EDGE IS_MAIN_IRQ_CTRL_BASE
+#define IS_MAIN_IRQ_CTL_MASK (IS_MAIN_IRQ_CTRL_BASE + 0x4)
+#define IS_MAIN_IRQ_CTL_STATUS (IS_MAIN_IRQ_CTRL_BASE + 0x8)
+#define IS_MAIN_IRQ_CTL_CLEAR (IS_MAIN_IRQ_CTRL_BASE + 0xc)
+#define IS_MAIN_IRQ_CTL_ENABLE (IS_MAIN_IRQ_CTRL_BASE + 0x10)
+#define IS_MAIN_IRQ_CTL_LEVEL_NOT_PULSE (IS_MAIN_IRQ_CTRL_BASE + 0x14)
+
+/* IS IO regs base */
+#define IS_PHY_NUM 4U
+#define IS_IO_BASE 0x280000
+
+/* dwc csi cdphy registers */
+#define IS_IO_CDPHY_BASE(i) (IS_IO_BASE + 0x10000 * (i))
+#define PPI_STARTUP_RW_COMMON_DPHY_0 0x1800
+#define PPI_STARTUP_RW_COMMON_DPHY_1 0x1802
+#define PPI_STARTUP_RW_COMMON_DPHY_2 0x1804
+#define PPI_STARTUP_RW_COMMON_DPHY_3 0x1806
+#define PPI_STARTUP_RW_COMMON_DPHY_4 0x1808
+#define PPI_STARTUP_RW_COMMON_DPHY_5 0x180a
+#define PPI_STARTUP_RW_COMMON_DPHY_6 0x180c
+#define PPI_STARTUP_RW_COMMON_DPHY_7 0x180e
+#define PPI_STARTUP_RW_COMMON_DPHY_8 0x1810
+#define PPI_STARTUP_RW_COMMON_DPHY_9 0x1812
+#define PPI_STARTUP_RW_COMMON_DPHY_A 0x1814
+#define PPI_STARTUP_RW_COMMON_DPHY_10 0x1820
+#define PPI_STARTUP_RW_COMMON_STARTUP_1_1 0x1822
+#define PPI_STARTUP_RW_COMMON_STARTUP_1_2 0x1824
+#define PPI_CALIBCTRL_RW_COMMON_CALIBCTRL_2_0 0x1840
+#define PPI_CALIBCTRL_R_COMMON_CALIBCTRL_2_1 0x1842
+#define PPI_CALIBCTRL_R_COMMON_CALIBCTRL_2_2 0x1844
+#define PPI_CALIBCTRL_R_COMMON_CALIBCTRL_2_3 0x1846
+#define PPI_CALIBCTRL_R_COMMON_CALIBCTRL_2_4 0x1848
+#define PPI_CALIBCTRL_R_COMMON_CALIBCTRL_2_5 0x184a
+#define PPI_CALIBCTRL_RW_COMMON_BG_0 0x184c
+#define PPI_CALIBCTRL_RW_COMMON_CALIBCTRL_2_7 0x184e
+#define PPI_CALIBCTRL_RW_ADC_CFG_0 0x1850
+#define PPI_CALIBCTRL_RW_ADC_CFG_1 0x1852
+#define PPI_CALIBCTRL_R_ADC_DEBUG 0x1854
+#define PPI_RW_LPDCOCAL_TOP_OVERRIDE 0x1c00
+#define PPI_RW_LPDCOCAL_TIMEBASE 0x1c02
+#define PPI_RW_LPDCOCAL_NREF 0x1c04
+#define PPI_RW_LPDCOCAL_NREF_RANGE 0x1c06
+#define PPI_RW_LPDCOCAL_NREF_TRIGGER_MAN 0x1c08
+#define PPI_RW_LPDCOCAL_TWAIT_CONFIG 0x1c0a
+#define PPI_RW_LPDCOCAL_VT_CONFIG 0x1c0c
+#define PPI_R_LPDCOCAL_DEBUG_RB 0x1c0e
+#define PPI_RW_LPDCOCAL_COARSE_CFG 0x1c10
+#define PPI_R_LPDCOCAL_DEBUG_COARSE_RB 0x1c12
+#define PPI_R_LPDCOCAL_DEBUG_COARSE_MEAS_0_RB 0x1c14
+#define PPI_R_LPDCOCAL_DEBUG_COARSE_MEAS_1_RB 0x1c16
+#define PPI_R_LPDCOCAL_DEBUG_COARSE_FWORD_RB 0x1c18
+#define PPI_R_LPDCOCAL_DEBUG_MEASURE_CURR_ERROR 0x1c1a
+#define PPI_R_LPDCOCAL_DEBUG_MEASURE_LAST_ERROR 0x1c1c
+#define PPI_R_LPDCOCAL_DEBUG_VT 0x1c1e
+#define PPI_RW_LB_TIMEBASE_CONFIG 0x1c20
+#define PPI_RW_LB_STARTCMU_CONFIG 0x1c22
+#define PPI_R_LBPULSE_COUNTER_RB 0x1c24
+#define PPI_R_LB_START_CMU_RB 0x1c26
+#define PPI_RW_LB_DPHY_BURST_START 0x1c28
+#define PPI_RW_LB_CPHY_BURST_START 0x1c2a
+#define PPI_RW_DDLCAL_CFG_0 0x1c40
+#define PPI_RW_DDLCAL_CFG_1 0x1c42
+#define PPI_RW_DDLCAL_CFG_2 0x1c44
+#define PPI_RW_DDLCAL_CFG_3 0x1c46
+#define PPI_RW_DDLCAL_CFG_4 0x1c48
+#define PPI_RW_DDLCAL_CFG_5 0x1c4a
+#define PPI_RW_DDLCAL_CFG_6 0x1c4c
+#define PPI_RW_DDLCAL_CFG_7 0x1c4e
+#define PPI_R_DDLCAL_DEBUG_0 0x1c50
+#define PPI_R_DDLCAL_DEBUG_1 0x1c52
+#define PPI_RW_PARITY_TEST 0x1c60
+#define PPI_RW_STARTUP_OVR_0 0x1c62
+#define PPI_RW_STARTUP_STATE_OVR_1 0x1c64
+#define PPI_RW_DTB_SELECTOR 0x1c66
+#define PPI_RW_DPHY_CLK_SPARE 0x1c6a
+#define PPI_RW_COMMON_CFG 0x1c6c
+#define PPI_RW_TERMCAL_CFG_0 0x1c80
+#define PPI_R_TERMCAL_DEBUG_0 0x1c82
+#define PPI_RW_TERMCAL_CTRL_0 0x1c84
+#define PPI_RW_OFFSETCAL_CFG_0 0x1ca0
+#define PPI_R_OFFSETCAL_DEBUG_LANE0 0x1ca2
+#define PPI_R_OFFSETCAL_DEBUG_LANE1 0x1ca4
+#define PPI_R_OFFSETCAL_DEBUG_LANE2 0x1ca6
+#define PPI_R_OFFSETCAL_DEBUG_LANE3 0x1ca8
+#define PPI_R_OFFSETCAL_DEBUG_LANE4 0x1caa
+#define PPI_RW_HSDCOCAL_CFG_O 0x1d00
+#define PPI_RW_HSDCOCAL_CFG_1 0x1d02
+#define PPI_RW_HSDCOCAL_CFG_2 0x1d04
+#define PPI_RW_HSDCOCAL_CFG_3 0x1d06
+#define PPI_RW_HSDCOCAL_CFG_4 0x1d08
+#define PPI_RW_HSDCOCAL_CFG_5 0x1d0a
+#define PPI_RW_HSDCOCAL_CFG_6 0x1d0c
+#define PPI_RW_HSDCOCAL_CFG_7 0x1d0e
+#define PPI_RW_HSDCOCAL_CFG_8 0x1d10
+#define PPI_R_HSDCOCAL_DEBUG_RB 0x1d12
+#define CORE_DIG_IOCTRL_RW_DPHY_PPI_LANE0_OVR_0_0 0x2000
+#define CORE_DIG_IOCTRL_RW_DPHY_PPI_LANE0_OVR_0_1 0x2002
+#define CORE_DIG_IOCTRL_RW_DPHY_PPI_LANE0_OVR_0_2 0x2004
+#define CORE_DIG_IOCTRL_RW_DPHY_PPI_LANE0_OVR_0_3 0x2006
+#define CORE_DIG_IOCTRL_RW_DPHY_PPI_LANE0_OVR_0_4 0x2008
+#define CORE_DIG_IOCTRL_RW_DPHY_PPI_LANE0_OVR_0_5 0x200a
+#define CORE_DIG_IOCTRL_RW_DPHY_PPI_LANE0_OVR_0_6 0x200c
+#define CORE_DIG_IOCTRL_RW_DPHY_PPI_LANE0_OVR_0_7 0x200e
+#define CORE_DIG_IOCTRL_RW_DPHY_PPI_LANE0_OVR_0_8 0x2010
+#define CORE_DIG_IOCTRL_R_DPHY_PPI_LANE0_OVR_0_9 0x2012
+#define CORE_DIG_IOCTRL_R_DPHY_PPI_LANE0_OVR_0_10 0x2014
+#define CORE_DIG_IOCTRL_R_DPHY_PPI_LANE0_OVR_0_11 0x2016
+#define CORE_DIG_IOCTRL_R_DPHY_PPI_LANE0_OVR_0_12 0x2018
+#define CORE_DIG_IOCTRL_R_DPHY_PPI_LANE0_OVR_0_13 0x201a
+#define CORE_DIG_IOCTRL_R_DPHY_PPI_LANE0_OVR_0_14 0x201c
+#define CORE_DIG_IOCTRL_R_DPHY_PPI_LANE0_OVR_0_15 0x201e
+#define CORE_DIG_IOCTRL_RW_CPHY_PPI_LANE0_OVR_1_0 0x2020
+#define CORE_DIG_IOCTRL_RW_CPHY_PPI_LANE0_OVR_1_1 0x2022
+#define CORE_DIG_IOCTRL_RW_CPHY_PPI_LANE0_OVR_1_2 0x2024
+#define CORE_DIG_IOCTRL_RW_CPHY_PPI_LANE0_OVR_1_3 0x2026
+#define CORE_DIG_IOCTRL_RW_CPHY_PPI_LANE0_OVR_1_4 0x2028
+#define CORE_DIG_IOCTRL_RW_CPHY_PPI_LANE0_OVR_1_5 0x202a
+#define CORE_DIG_IOCTRL_RW_CPHY_PPI_LANE0_OVR_1_6 0x202c
+#define CORE_DIG_IOCTRL_RW_CPHY_PPI_LANE0_OVR_1_7 0x202e
+#define CORE_DIG_IOCTRL_RW_CPHY_PPI_LANE0_OVR_1_8 0x2030
+#define CORE_DIG_IOCTRL_RW_CPHY_PPI_LANE0_OVR_1_9 0x2032
+#define CORE_DIG_IOCTRL_R_CPHY_PPI_LANE0_OVR_1_10 0x2034
+#define CORE_DIG_IOCTRL_R_CPHY_PPI_LANE0_OVR_1_11 0x2036
+#define CORE_DIG_IOCTRL_R_CPHY_PPI_LANE0_OVR_1_12 0x2038
+#define CORE_DIG_IOCTRL_R_CPHY_PPI_LANE0_OVR_1_13 0x203a
+#define CORE_DIG_IOCTRL_R_CPHY_PPI_LANE0_OVR_1_14 0x203c
+#define CORE_DIG_IOCTRL_R_CPHY_PPI_LANE0_OVR_1_15 0x203e
+#define CORE_DIG_IOCTRL_RW_AFE_LANE0_CTRL_2_0 0x2040
+#define CORE_DIG_IOCTRL_RW_AFE_LANE0_CTRL_2_1 0x2042
+#define CORE_DIG_IOCTRL_RW_AFE_LANE0_CTRL_2_2 0x2044
+#define CORE_DIG_IOCTRL_RW_AFE_LANE0_CTRL_2_3 0x2046
+#define CORE_DIG_IOCTRL_RW_AFE_LANE0_CTRL_2_4 0x2048
+#define CORE_DIG_IOCTRL_RW_AFE_LANE0_CTRL_2_5 0x204a
+#define CORE_DIG_IOCTRL_RW_AFE_LANE0_CTRL_2_6 0x204c
+#define CORE_DIG_IOCTRL_RW_AFE_LANE0_CTRL_2_7 0x204e
+#define CORE_DIG_IOCTRL_RW_AFE_LANE0_CTRL_2_8 0x2050
+#define CORE_DIG_IOCTRL_RW_AFE_LANE0_CTRL_2_9 0x2052
+#define CORE_DIG_IOCTRL_RW_AFE_LANE0_CTRL_2_10 0x2054
+#define CORE_DIG_IOCTRL_RW_AFE_LANE0_CTRL_2_11 0x2056
+#define CORE_DIG_IOCTRL_RW_AFE_LANE0_CTRL_2_12 0x2058
+#define CORE_DIG_IOCTRL_RW_AFE_LANE0_CTRL_2_13 0x205a
+#define CORE_DIG_IOCTRL_RW_AFE_LANE0_CTRL_2_14 0x205c
+#define CORE_DIG_IOCTRL_RW_AFE_LANE0_CTRL_2_15 0x205e
+#define CORE_DIG_IOCTRL_RW_AFE_LANE0_CTRL_3_0 0x2060
+#define CORE_DIG_IOCTRL_RW_AFE_LANE0_CTRL_3_1 0x2062
+#define CORE_DIG_IOCTRL_RW_AFE_LANE0_CTRL_3_2 0x2064
+#define CORE_DIG_IOCTRL_RW_AFE_LANE0_CTRL_3_3 0x2066
+#define CORE_DIG_IOCTRL_RW_AFE_LANE0_CTRL_3_4 0x2068
+#define CORE_DIG_IOCTRL_RW_AFE_LANE0_CTRL_3_5 0x206a
+#define CORE_DIG_IOCTRL_RW_AFE_LANE0_CTRL_3_6 0x206c
+#define CORE_DIG_IOCTRL_RW_AFE_LANE0_CTRL_3_7 0x206e
+#define CORE_DIG_IOCTRL_R_AFE_LANE0_CTRL_3_8 0x2070
+#define CORE_DIG_IOCTRL_R_AFE_LANE0_CTRL_3_9 0x2072
+#define CORE_DIG_IOCTRL_R_AFE_LANE0_CTRL_3_10 0x2074
+#define CORE_DIG_IOCTRL_R_AFE_LANE0_CTRL_3_11 0x2076
+#define CORE_DIG_IOCTRL_R_AFE_LANE0_CTRL_3_12 0x2078
+#define CORE_DIG_IOCTRL_R_AFE_LANE0_CTRL_3_13 0x207a
+#define CORE_DIG_IOCTRL_R_AFE_LANE0_CTRL_3_14 0x207c
+#define CORE_DIG_IOCTRL_R_AFE_LANE0_CTRL_3_15 0x207e
+#define CORE_DIG_IOCTRL_R_AFE_LANE0_CTRL_4_0 0x2080
+#define CORE_DIG_IOCTRL_R_AFE_LANE0_CTRL_4_1 0x2082
+#define CORE_DIG_IOCTRL_R_AFE_LANE0_CTRL_4_2 0x2084
+#define CORE_DIG_IOCTRL_R_AFE_LANE0_CTRL_4_3 0x2086
+#define CORE_DIG_IOCTRL_R_AFE_LANE0_CTRL_4_4 0x2088
+#define CORE_DIG_IOCTRL_R_CPHY_PPI_LANE0_OVR_5_0 0x20a0
+#define CORE_DIG_IOCTRL_R_CPHY_PPI_LANE0_OVR_5_1 0x20a2
+#define CORE_DIG_IOCTRL_RW_CPHY_PPI_LANE0_OVR_5_2 0x20a4
+#define CORE_DIG_IOCTRL_RW_CPHY_PPI_LANE0_OVR_5_3 0x20a6
+#define CORE_DIG_IOCTRL_R_CPHY_PPI_LANE0_OVR_5_4 0x20a8
+#define CORE_DIG_RW_TRIO0_0 0x2100
+#define CORE_DIG_RW_TRIO0_1 0x2102
+#define CORE_DIG_RW_TRIO0_2 0x2104
+#define CORE_DIG_IOCTRL_RW_DPHY_PPI_LANE1_OVR_0_0 0x2400
+#define CORE_DIG_IOCTRL_RW_DPHY_PPI_LANE1_OVR_0_1 0x2402
+#define CORE_DIG_IOCTRL_RW_DPHY_PPI_LANE1_OVR_0_2 0x2404
+#define CORE_DIG_IOCTRL_RW_DPHY_PPI_LANE1_OVR_0_3 0x2406
+#define CORE_DIG_IOCTRL_RW_DPHY_PPI_LANE1_OVR_0_4 0x2408
+#define CORE_DIG_IOCTRL_RW_DPHY_PPI_LANE1_OVR_0_5 0x240a
+#define CORE_DIG_IOCTRL_RW_DPHY_PPI_LANE1_OVR_0_6 0x240c
+#define CORE_DIG_IOCTRL_RW_DPHY_PPI_LANE1_OVR_0_7 0x240e
+#define CORE_DIG_IOCTRL_RW_DPHY_PPI_LANE1_OVR_0_8 0x2410
+#define CORE_DIG_IOCTRL_R_DPHY_PPI_LANE1_OVR_0_9 0x2412
+#define CORE_DIG_IOCTRL_R_DPHY_PPI_LANE1_OVR_0_10 0x2414
+#define CORE_DIG_IOCTRL_R_DPHY_PPI_LANE1_OVR_0_11 0x2416
+#define CORE_DIG_IOCTRL_R_DPHY_PPI_LANE1_OVR_0_12 0x2418
+#define CORE_DIG_IOCTRL_R_DPHY_PPI_LANE1_OVR_0_13 0x241a
+#define CORE_DIG_IOCTRL_R_DPHY_PPI_LANE1_OVR_0_14 0x241c
+#define CORE_DIG_IOCTRL_R_DPHY_PPI_LANE1_OVR_0_15 0x241e
+#define CORE_DIG_IOCTRL_RW_CPHY_PPI_LANE1_OVR_1_0 0x2420
+#define CORE_DIG_IOCTRL_RW_CPHY_PPI_LANE1_OVR_1_1 0x2422
+#define CORE_DIG_IOCTRL_RW_CPHY_PPI_LANE1_OVR_1_2 0x2424
+#define CORE_DIG_IOCTRL_RW_CPHY_PPI_LANE1_OVR_1_3 0x2426
+#define CORE_DIG_IOCTRL_RW_CPHY_PPI_LANE1_OVR_1_4 0x2428
+#define CORE_DIG_IOCTRL_RW_CPHY_PPI_LANE1_OVR_1_5 0x242a
+#define CORE_DIG_IOCTRL_RW_CPHY_PPI_LANE1_OVR_1_6 0x242c
+#define CORE_DIG_IOCTRL_RW_CPHY_PPI_LANE1_OVR_1_7 0x242e
+#define CORE_DIG_IOCTRL_RW_CPHY_PPI_LANE1_OVR_1_8 0x2430
+#define CORE_DIG_IOCTRL_RW_CPHY_PPI_LANE1_OVR_1_9 0x2432
+#define CORE_DIG_IOCTRL_R_CPHY_PPI_LANE1_OVR_1_10 0x2434
+#define CORE_DIG_IOCTRL_R_CPHY_PPI_LANE1_OVR_1_11 0x2436
+#define CORE_DIG_IOCTRL_R_CPHY_PPI_LANE1_OVR_1_12 0x2438
+#define CORE_DIG_IOCTRL_R_CPHY_PPI_LANE1_OVR_1_13 0x243a
+#define CORE_DIG_IOCTRL_R_CPHY_PPI_LANE1_OVR_1_14 0x243c
+#define CORE_DIG_IOCTRL_R_CPHY_PPI_LANE1_OVR_1_15 0x243e
+#define CORE_DIG_IOCTRL_RW_AFE_LANE1_CTRL_2_0 0x2440
+#define CORE_DIG_IOCTRL_RW_AFE_LANE1_CTRL_2_1 0x2442
+#define CORE_DIG_IOCTRL_RW_AFE_LANE1_CTRL_2_2 0x2444
+#define CORE_DIG_IOCTRL_RW_AFE_LANE1_CTRL_2_3 0x2446
+#define CORE_DIG_IOCTRL_RW_AFE_LANE1_CTRL_2_4 0x2448
+#define CORE_DIG_IOCTRL_RW_AFE_LANE1_CTRL_2_5 0x244a
+#define CORE_DIG_IOCTRL_RW_AFE_LANE1_CTRL_2_6 0x244c
+#define CORE_DIG_IOCTRL_RW_AFE_LANE1_CTRL_2_7 0x244e
+#define CORE_DIG_IOCTRL_RW_AFE_LANE1_CTRL_2_8 0x2450
+#define CORE_DIG_IOCTRL_RW_AFE_LANE1_CTRL_2_9 0x2452
+#define CORE_DIG_IOCTRL_RW_AFE_LANE1_CTRL_2_10 0x2454
+#define CORE_DIG_IOCTRL_RW_AFE_LANE1_CTRL_2_11 0x2456
+#define CORE_DIG_IOCTRL_RW_AFE_LANE1_CTRL_2_12 0x2458
+#define CORE_DIG_IOCTRL_RW_AFE_LANE1_CTRL_2_13 0x245a
+#define CORE_DIG_IOCTRL_RW_AFE_LANE1_CTRL_2_14 0x245c
+#define CORE_DIG_IOCTRL_RW_AFE_LANE1_CTRL_2_15 0x245e
+#define CORE_DIG_IOCTRL_RW_AFE_LANE1_CTRL_3_0 0x2460
+#define CORE_DIG_IOCTRL_RW_AFE_LANE1_CTRL_3_1 0x2462
+#define CORE_DIG_IOCTRL_RW_AFE_LANE1_CTRL_3_2 0x2464
+#define CORE_DIG_IOCTRL_RW_AFE_LANE1_CTRL_3_3 0x2466
+#define CORE_DIG_IOCTRL_RW_AFE_LANE1_CTRL_3_4 0x2468
+#define CORE_DIG_IOCTRL_RW_AFE_LANE1_CTRL_3_5 0x246a
+#define CORE_DIG_IOCTRL_RW_AFE_LANE1_CTRL_3_6 0x246c
+#define CORE_DIG_IOCTRL_RW_AFE_LANE1_CTRL_3_7 0x246e
+#define CORE_DIG_IOCTRL_R_AFE_LANE1_CTRL_3_8 0x2470
+#define CORE_DIG_IOCTRL_R_AFE_LANE1_CTRL_3_9 0x2472
+#define CORE_DIG_IOCTRL_R_AFE_LANE1_CTRL_3_10 0x2474
+#define CORE_DIG_IOCTRL_R_AFE_LANE1_CTRL_3_11 0x2476
+#define CORE_DIG_IOCTRL_R_AFE_LANE1_CTRL_3_12 0x2478
+#define CORE_DIG_IOCTRL_R_AFE_LANE1_CTRL_3_13 0x247a
+#define CORE_DIG_IOCTRL_R_AFE_LANE1_CTRL_3_14 0x247c
+#define CORE_DIG_IOCTRL_R_AFE_LANE1_CTRL_3_15 0x247e
+#define CORE_DIG_IOCTRL_R_AFE_LANE1_CTRL_4_0 0x2480
+#define CORE_DIG_IOCTRL_R_AFE_LANE1_CTRL_4_1 0x2482
+#define CORE_DIG_IOCTRL_R_AFE_LANE1_CTRL_4_2 0x2484
+#define CORE_DIG_IOCTRL_R_AFE_LANE1_CTRL_4_3 0x2486
+#define CORE_DIG_IOCTRL_R_AFE_LANE1_CTRL_4_4 0x2488
+#define CORE_DIG_IOCTRL_R_CPHY_PPI_LANE1_OVR_5_0 0x24a0
+#define CORE_DIG_IOCTRL_R_CPHY_PPI_LANE1_OVR_5_1 0x24a2
+#define CORE_DIG_IOCTRL_RW_CPHY_PPI_LANE1_OVR_5_2 0x24a4
+#define CORE_DIG_IOCTRL_RW_CPHY_PPI_LANE1_OVR_5_3 0x24a6
+#define CORE_DIG_IOCTRL_R_CPHY_PPI_LANE1_OVR_5_4 0x24a8
+#define CORE_DIG_RW_TRIO1_0 0x2500
+#define CORE_DIG_RW_TRIO1_1 0x2502
+#define CORE_DIG_RW_TRIO1_2 0x2504
+#define CORE_DIG_IOCTRL_RW_DPHY_PPI_LANE2_OVR_0_0 0x2800
+#define CORE_DIG_IOCTRL_RW_DPHY_PPI_LANE2_OVR_0_1 0x2802
+#define CORE_DIG_IOCTRL_RW_DPHY_PPI_LANE2_OVR_0_2 0x2804
+#define CORE_DIG_IOCTRL_RW_DPHY_PPI_LANE2_OVR_0_3 0x2806
+#define CORE_DIG_IOCTRL_RW_DPHY_PPI_LANE2_OVR_0_4 0x2808
+#define CORE_DIG_IOCTRL_RW_DPHY_PPI_LANE2_OVR_0_5 0x280a
+#define CORE_DIG_IOCTRL_RW_DPHY_PPI_LANE2_OVR_0_6 0x280c
+#define CORE_DIG_IOCTRL_RW_DPHY_PPI_LANE2_OVR_0_7 0x280e
+#define CORE_DIG_IOCTRL_RW_DPHY_PPI_LANE2_OVR_0_8 0x2810
+#define CORE_DIG_IOCTRL_R_DPHY_PPI_LANE2_OVR_0_9 0x2812
+#define CORE_DIG_IOCTRL_R_DPHY_PPI_LANE2_OVR_0_10 0x2814
+#define CORE_DIG_IOCTRL_R_DPHY_PPI_LANE2_OVR_0_11 0x2816
+#define CORE_DIG_IOCTRL_R_DPHY_PPI_LANE2_OVR_0_12 0x2818
+#define CORE_DIG_IOCTRL_R_DPHY_PPI_LANE2_OVR_0_13 0x281a
+#define CORE_DIG_IOCTRL_R_DPHY_PPI_LANE2_OVR_0_14 0x281c
+#define CORE_DIG_IOCTRL_R_DPHY_PPI_LANE2_OVR_0_15 0x281e
+#define CORE_DIG_IOCTRL_RW_CPHY_PPI_LANE2_OVR_1_0 0x2820
+#define CORE_DIG_IOCTRL_RW_CPHY_PPI_LANE2_OVR_1_1 0x2822
+#define CORE_DIG_IOCTRL_RW_CPHY_PPI_LANE2_OVR_1_2 0x2824
+#define CORE_DIG_IOCTRL_RW_CPHY_PPI_LANE2_OVR_1_3 0x2826
+#define CORE_DIG_IOCTRL_RW_CPHY_PPI_LANE2_OVR_1_4 0x2828
+#define CORE_DIG_IOCTRL_RW_CPHY_PPI_LANE2_OVR_1_5 0x282a
+#define CORE_DIG_IOCTRL_RW_CPHY_PPI_LANE2_OVR_1_6 0x282c
+#define CORE_DIG_IOCTRL_RW_CPHY_PPI_LANE2_OVR_1_7 0x282e
+#define CORE_DIG_IOCTRL_RW_CPHY_PPI_LANE2_OVR_1_8 0x2830
+#define CORE_DIG_IOCTRL_RW_CPHY_PPI_LANE2_OVR_1_9 0x2832
+#define CORE_DIG_IOCTRL_R_CPHY_PPI_LANE2_OVR_1_10 0x2834
+#define CORE_DIG_IOCTRL_R_CPHY_PPI_LANE2_OVR_1_11 0x2836
+#define CORE_DIG_IOCTRL_R_CPHY_PPI_LANE2_OVR_1_12 0x2838
+#define CORE_DIG_IOCTRL_R_CPHY_PPI_LANE2_OVR_1_13 0x283a
+#define CORE_DIG_IOCTRL_R_CPHY_PPI_LANE2_OVR_1_14 0x283c
+#define CORE_DIG_IOCTRL_R_CPHY_PPI_LANE2_OVR_1_15 0x283e
+#define CORE_DIG_IOCTRL_RW_AFE_LANE2_CTRL_2_0 0x2840
+#define CORE_DIG_IOCTRL_RW_AFE_LANE2_CTRL_2_1 0x2842
+#define CORE_DIG_IOCTRL_RW_AFE_LANE2_CTRL_2_2 0x2844
+#define CORE_DIG_IOCTRL_RW_AFE_LANE2_CTRL_2_3 0x2846
+#define CORE_DIG_IOCTRL_RW_AFE_LANE2_CTRL_2_4 0x2848
+#define CORE_DIG_IOCTRL_RW_AFE_LANE2_CTRL_2_5 0x284a
+#define CORE_DIG_IOCTRL_RW_AFE_LANE2_CTRL_2_6 0x284c
+#define CORE_DIG_IOCTRL_RW_AFE_LANE2_CTRL_2_7 0x284e
+#define CORE_DIG_IOCTRL_RW_AFE_LANE2_CTRL_2_8 0x2850
+#define CORE_DIG_IOCTRL_RW_AFE_LANE2_CTRL_2_9 0x2852
+#define CORE_DIG_IOCTRL_RW_AFE_LANE2_CTRL_2_10 0x2854
+#define CORE_DIG_IOCTRL_RW_AFE_LANE2_CTRL_2_11 0x2856
+#define CORE_DIG_IOCTRL_RW_AFE_LANE2_CTRL_2_12 0x2858
+#define CORE_DIG_IOCTRL_RW_AFE_LANE2_CTRL_2_13 0x285a
+#define CORE_DIG_IOCTRL_RW_AFE_LANE2_CTRL_2_14 0x285c
+#define CORE_DIG_IOCTRL_RW_AFE_LANE2_CTRL_2_15 0x285e
+#define CORE_DIG_IOCTRL_RW_AFE_LANE2_CTRL_3_0 0x2860
+#define CORE_DIG_IOCTRL_RW_AFE_LANE2_CTRL_3_1 0x2862
+#define CORE_DIG_IOCTRL_RW_AFE_LANE2_CTRL_3_2 0x2864
+#define CORE_DIG_IOCTRL_RW_AFE_LANE2_CTRL_3_3 0x2866
+#define CORE_DIG_IOCTRL_RW_AFE_LANE2_CTRL_3_4 0x2868
+#define CORE_DIG_IOCTRL_RW_AFE_LANE2_CTRL_3_5 0x286a
+#define CORE_DIG_IOCTRL_RW_AFE_LANE2_CTRL_3_6 0x286c
+#define CORE_DIG_IOCTRL_RW_AFE_LANE2_CTRL_3_7 0x286e
+#define CORE_DIG_IOCTRL_R_AFE_LANE2_CTRL_3_8 0x2870
+#define CORE_DIG_IOCTRL_R_AFE_LANE2_CTRL_3_9 0x2872
+#define CORE_DIG_IOCTRL_R_AFE_LANE2_CTRL_3_10 0x2874
+#define CORE_DIG_IOCTRL_R_AFE_LANE2_CTRL_3_11 0x2876
+#define CORE_DIG_IOCTRL_R_AFE_LANE2_CTRL_3_12 0x2878
+#define CORE_DIG_IOCTRL_R_AFE_LANE2_CTRL_3_13 0x287a
+#define CORE_DIG_IOCTRL_R_AFE_LANE2_CTRL_3_14 0x287c
+#define CORE_DIG_IOCTRL_R_AFE_LANE2_CTRL_3_15 0x287e
+#define CORE_DIG_IOCTRL_R_AFE_LANE2_CTRL_4_0 0x2880
+#define CORE_DIG_IOCTRL_R_AFE_LANE2_CTRL_4_1 0x2882
+#define CORE_DIG_IOCTRL_R_AFE_LANE2_CTRL_4_2 0x2884
+#define CORE_DIG_IOCTRL_R_AFE_LANE2_CTRL_4_3 0x2886
+#define CORE_DIG_IOCTRL_R_AFE_LANE2_CTRL_4_4 0x2888
+#define CORE_DIG_IOCTRL_R_CPHY_PPI_LANE2_OVR_5_0 0x28a0
+#define CORE_DIG_IOCTRL_R_CPHY_PPI_LANE2_OVR_5_1 0x28a2
+#define CORE_DIG_IOCTRL_RW_CPHY_PPI_LANE2_OVR_5_2 0x28a4
+#define CORE_DIG_IOCTRL_RW_CPHY_PPI_LANE2_OVR_5_3 0x28a6
+#define CORE_DIG_IOCTRL_R_CPHY_PPI_LANE2_OVR_5_4 0x28a8
+#define CORE_DIG_RW_TRIO2_0 0x2900
+#define CORE_DIG_RW_TRIO2_1 0x2902
+#define CORE_DIG_RW_TRIO2_2 0x2904
+#define CORE_DIG_IOCTRL_RW_DPHY_PPI_LANE3_OVR_0_0 0x2c00
+#define CORE_DIG_IOCTRL_RW_DPHY_PPI_LANE3_OVR_0_1 0x2c02
+#define CORE_DIG_IOCTRL_RW_DPHY_PPI_LANE3_OVR_0_2 0x2c04
+#define CORE_DIG_IOCTRL_RW_DPHY_PPI_LANE3_OVR_0_3 0x2c06
+#define CORE_DIG_IOCTRL_RW_DPHY_PPI_LANE3_OVR_0_4 0x2c08
+#define CORE_DIG_IOCTRL_RW_DPHY_PPI_LANE3_OVR_0_5 0x2c0a
+#define CORE_DIG_IOCTRL_RW_DPHY_PPI_LANE3_OVR_0_6 0x2c0c
+#define CORE_DIG_IOCTRL_RW_DPHY_PPI_LANE3_OVR_0_7 0x2c0e
+#define CORE_DIG_IOCTRL_RW_DPHY_PPI_LANE3_OVR_0_8 0x2c10
+#define CORE_DIG_IOCTRL_R_DPHY_PPI_LANE3_OVR_0_9 0x2c12
+#define CORE_DIG_IOCTRL_R_DPHY_PPI_LANE3_OVR_0_10 0x2c14
+#define CORE_DIG_IOCTRL_R_DPHY_PPI_LANE3_OVR_0_11 0x2c16
+#define CORE_DIG_IOCTRL_R_DPHY_PPI_LANE3_OVR_0_12 0x2c18
+#define CORE_DIG_IOCTRL_R_DPHY_PPI_LANE3_OVR_0_13 0x2c1a
+#define CORE_DIG_IOCTRL_R_DPHY_PPI_LANE3_OVR_0_14 0x2c1c
+#define CORE_DIG_IOCTRL_R_DPHY_PPI_LANE3_OVR_0_15 0x2c1e
+#define CORE_DIG_IOCTRL_RW_AFE_LANE3_CTRL_2_0 0x2c40
+#define CORE_DIG_IOCTRL_RW_AFE_LANE3_CTRL_2_1 0x2c42
+#define CORE_DIG_IOCTRL_RW_AFE_LANE3_CTRL_2_2 0x2c44
+#define CORE_DIG_IOCTRL_RW_AFE_LANE3_CTRL_2_3 0x2c46
+#define CORE_DIG_IOCTRL_RW_AFE_LANE3_CTRL_2_4 0x2c48
+#define CORE_DIG_IOCTRL_RW_AFE_LANE3_CTRL_2_5 0x2c4a
+#define CORE_DIG_IOCTRL_RW_AFE_LANE3_CTRL_2_6 0x2c4c
+#define CORE_DIG_IOCTRL_RW_AFE_LANE3_CTRL_2_7 0x2c4e
+#define CORE_DIG_IOCTRL_RW_AFE_LANE3_CTRL_2_8 0x2c50
+#define CORE_DIG_IOCTRL_RW_AFE_LANE3_CTRL_2_9 0x2c52
+#define CORE_DIG_IOCTRL_RW_AFE_LANE3_CTRL_2_10 0x2c54
+#define CORE_DIG_IOCTRL_RW_AFE_LANE3_CTRL_2_11 0x2c56
+#define CORE_DIG_IOCTRL_RW_AFE_LANE3_CTRL_2_12 0x2c58
+#define CORE_DIG_IOCTRL_RW_AFE_LANE3_CTRL_2_13 0x2c5a
+#define CORE_DIG_IOCTRL_RW_AFE_LANE3_CTRL_2_14 0x2c5c
+#define CORE_DIG_IOCTRL_RW_AFE_LANE3_CTRL_2_15 0x2c5e
+#define CORE_DIG_IOCTRL_RW_AFE_LANE3_CTRL_3_0 0x2c60
+#define CORE_DIG_IOCTRL_RW_AFE_LANE3_CTRL_3_1 0x2c62
+#define CORE_DIG_IOCTRL_RW_AFE_LANE3_CTRL_3_2 0x2c64
+#define CORE_DIG_IOCTRL_RW_AFE_LANE3_CTRL_3_3 0x2c66
+#define CORE_DIG_IOCTRL_RW_AFE_LANE3_CTRL_3_4 0x2c68
+#define CORE_DIG_IOCTRL_RW_AFE_LANE3_CTRL_3_5 0x2c6a
+#define CORE_DIG_IOCTRL_RW_AFE_LANE3_CTRL_3_6 0x2c6c
+#define CORE_DIG_IOCTRL_RW_AFE_LANE3_CTRL_3_7 0x2c6e
+#define CORE_DIG_IOCTRL_R_AFE_LANE3_CTRL_3_8 0x2c70
+#define CORE_DIG_IOCTRL_R_AFE_LANE3_CTRL_3_9 0x2c72
+#define CORE_DIG_IOCTRL_R_AFE_LANE3_CTRL_3_10 0x2c74
+#define CORE_DIG_IOCTRL_R_AFE_LANE3_CTRL_3_11 0x2c76
+#define CORE_DIG_IOCTRL_R_AFE_LANE3_CTRL_3_12 0x2c78
+#define CORE_DIG_IOCTRL_R_AFE_LANE3_CTRL_3_13 0x2c7a
+#define CORE_DIG_IOCTRL_R_AFE_LANE3_CTRL_3_14 0x2c7c
+#define CORE_DIG_IOCTRL_R_AFE_LANE3_CTRL_3_15 0x2c7e
+#define CORE_DIG_IOCTRL_R_AFE_LANE3_CTRL_4_0 0x2c80
+#define CORE_DIG_IOCTRL_R_AFE_LANE3_CTRL_4_1 0x2c82
+#define CORE_DIG_IOCTRL_R_AFE_LANE3_CTRL_4_2 0x2c84
+#define CORE_DIG_IOCTRL_R_AFE_LANE3_CTRL_4_3 0x2c86
+#define CORE_DIG_IOCTRL_R_AFE_LANE3_CTRL_4_4 0x2c88
+#define CORE_DIG_IOCTRL_RW_AFE_LANE4_CTRL_2_0 0x3040
+#define CORE_DIG_IOCTRL_RW_AFE_LANE4_CTRL_2_1 0x3042
+#define CORE_DIG_IOCTRL_RW_AFE_LANE4_CTRL_2_2 0x3044
+#define CORE_DIG_IOCTRL_RW_AFE_LANE4_CTRL_2_3 0x3046
+#define CORE_DIG_IOCTRL_RW_AFE_LANE4_CTRL_2_4 0x3048
+#define CORE_DIG_IOCTRL_RW_AFE_LANE4_CTRL_2_5 0x304a
+#define CORE_DIG_IOCTRL_RW_AFE_LANE4_CTRL_2_6 0x304c
+#define CORE_DIG_IOCTRL_RW_AFE_LANE4_CTRL_2_7 0x304e
+#define CORE_DIG_IOCTRL_RW_AFE_LANE4_CTRL_2_8 0x3050
+#define CORE_DIG_IOCTRL_RW_AFE_LANE4_CTRL_2_9 0x3052
+#define CORE_DIG_IOCTRL_RW_AFE_LANE4_CTRL_2_10 0x3054
+#define CORE_DIG_IOCTRL_RW_AFE_LANE4_CTRL_2_11 0x3056
+#define CORE_DIG_IOCTRL_RW_AFE_LANE4_CTRL_2_12 0x3058
+#define CORE_DIG_IOCTRL_RW_AFE_LANE4_CTRL_2_13 0x305a
+#define CORE_DIG_IOCTRL_RW_AFE_LANE4_CTRL_2_14 0x305c
+#define CORE_DIG_IOCTRL_RW_AFE_LANE4_CTRL_2_15 0x305e
+#define CORE_DIG_IOCTRL_RW_AFE_LANE4_CTRL_3_0 0x3060
+#define CORE_DIG_IOCTRL_RW_AFE_LANE4_CTRL_3_1 0x3062
+#define CORE_DIG_IOCTRL_RW_AFE_LANE4_CTRL_3_2 0x3064
+#define CORE_DIG_IOCTRL_RW_AFE_LANE4_CTRL_3_3 0x3066
+#define CORE_DIG_IOCTRL_RW_AFE_LANE4_CTRL_3_4 0x3068
+#define CORE_DIG_IOCTRL_RW_AFE_LANE4_CTRL_3_5 0x306a
+#define CORE_DIG_IOCTRL_RW_AFE_LANE4_CTRL_3_6 0x306c
+#define CORE_DIG_IOCTRL_RW_AFE_LANE4_CTRL_3_7 0x306e
+#define CORE_DIG_IOCTRL_R_AFE_LANE4_CTRL_3_8 0x3070
+#define CORE_DIG_IOCTRL_R_AFE_LANE4_CTRL_3_9 0x3072
+#define CORE_DIG_IOCTRL_R_AFE_LANE4_CTRL_3_10 0x3074
+#define CORE_DIG_IOCTRL_R_AFE_LANE4_CTRL_3_11 0x3076
+#define CORE_DIG_IOCTRL_R_AFE_LANE4_CTRL_3_12 0x3078
+#define CORE_DIG_IOCTRL_R_AFE_LANE4_CTRL_3_13 0x307a
+#define CORE_DIG_IOCTRL_R_AFE_LANE4_CTRL_3_14 0x307c
+#define CORE_DIG_IOCTRL_R_AFE_LANE4_CTRL_3_15 0x307e
+#define CORE_DIG_IOCTRL_R_AFE_LANE4_CTRL_4_0 0x3080
+#define CORE_DIG_IOCTRL_R_AFE_LANE4_CTRL_4_1 0x3082
+#define CORE_DIG_IOCTRL_R_AFE_LANE4_CTRL_4_2 0x3084
+#define CORE_DIG_IOCTRL_R_AFE_LANE4_CTRL_4_3 0x3086
+#define CORE_DIG_IOCTRL_R_AFE_LANE4_CTRL_4_4 0x3088
+#define CORE_DIG_IOCTRL_RW_DPHY_PPI_CLK_OVR_0_0 0x3400
+#define CORE_DIG_IOCTRL_RW_DPHY_PPI_CLK_OVR_0_1 0x3402
+#define CORE_DIG_IOCTRL_R_DPHY_PPI_CLK_OVR_0_2 0x3404
+#define CORE_DIG_IOCTRL_RW_COMMON_PPI_OVR_0_0 0x3800
+#define CORE_DIG_IOCTRL_RW_COMMON_PPI_OVR_0_1 0x3802
+#define CORE_DIG_IOCTRL_RW_COMMON_PPI_OVR_0_2 0x3804
+#define CORE_DIG_IOCTRL_RW_COMMON_PPI_OVR_0_3 0x3806
+#define CORE_DIG_IOCTRL_RW_COMMON_PPI_OVR_0_4 0x3808
+#define CORE_DIG_IOCTRL_RW_COMMON_PPI_OVR_0_5 0x380a
+#define CORE_DIG_IOCTRL_RW_COMMON_PPI_OVR_0_6 0x380c
+#define CORE_DIG_IOCTRL_RW_COMMON_PPI_OVR_0_7 0x380e
+#define CORE_DIG_IOCTRL_RW_COMMON_PPI_OVR_0_8 0x3810
+#define CORE_DIG_IOCTRL_RW_COMMON_PPI_OVR_0_9 0x3812
+#define CORE_DIG_IOCTRL_RW_COMMON_PPI_OVR_0_10 0x3814
+#define CORE_DIG_IOCTRL_R_COMMON_PPI_OVR_0_11 0x3816
+#define CORE_DIG_IOCTRL_R_COMMON_PPI_OVR_0_12 0x3818
+#define CORE_DIG_IOCTRL_R_COMMON_PPI_OVR_0_13 0x381a
+#define CORE_DIG_IOCTRL_R_COMMON_PPI_OVR_0_14 0x381c
+#define CORE_DIG_IOCTRL_R_COMMON_PPI_OVR_0_15 0x381e
+#define CORE_DIG_IOCTRL_R_COMMON_PPI_OVR_1_0 0x3820
+#define CORE_DIG_IOCTRL_R_COMMON_PPI_OVR_1_1 0x3822
+#define CORE_DIG_IOCTRL_R_COMMON_PPI_OVR_1_2 0x3824
+#define CORE_DIG_IOCTRL_R_COMMON_PPI_OVR_1_3 0x3826
+#define CORE_DIG_IOCTRL_R_COMMON_PPI_OVR_1_4 0x3828
+#define CORE_DIG_IOCTRL_RW_AFE_CB_CTRL_2_0 0x3840
+#define CORE_DIG_IOCTRL_RW_AFE_CB_CTRL_2_1 0x3842
+#define CORE_DIG_IOCTRL_RW_AFE_CB_CTRL_2_2 0x3844
+#define CORE_DIG_IOCTRL_RW_AFE_CB_CTRL_2_3 0x3846
+#define CORE_DIG_IOCTRL_RW_AFE_CB_CTRL_2_4 0x3848
+#define CORE_DIG_IOCTRL_RW_AFE_CB_CTRL_2_5 0x384a
+#define CORE_DIG_IOCTRL_RW_AFE_CB_CTRL_2_6 0x384c
+#define CORE_DIG_IOCTRL_RW_AFE_CB_CTRL_2_7 0x384e
+#define CORE_DIG_IOCTRL_RW_AFE_CB_CTRL_2_8 0x3850
+#define CORE_DIG_IOCTRL_RW_AFE_CB_CTRL_2_9 0x3852
+#define CORE_DIG_IOCTRL_RW_AFE_CB_CTRL_2_10 0x3854
+#define CORE_DIG_IOCTRL_RW_AFE_CB_CTRL_2_11 0x3856
+#define CORE_DIG_IOCTRL_R_AFE_CB_CTRL_2_12 0x3858
+#define CORE_DIG_IOCTRL_R_AFE_CB_CTRL_2_13 0x385a
+#define CORE_DIG_IOCTRL_R_AFE_CB_CTRL_2_14 0x385c
+#define CORE_DIG_IOCTRL_R_AFE_CB_CTRL_2_15 0x385e
+#define CORE_DIG_IOCTRL_R_AFE_CB_CTRL_3_0 0x3860
+#define CORE_DIG_RW_COMMON_0 0x3880
+#define CORE_DIG_RW_COMMON_1 0x3882
+#define CORE_DIG_RW_COMMON_2 0x3884
+#define CORE_DIG_RW_COMMON_3 0x3886
+#define CORE_DIG_RW_COMMON_4 0x3888
+#define CORE_DIG_RW_COMMON_5 0x388a
+#define CORE_DIG_RW_COMMON_6 0x388c
+#define CORE_DIG_RW_COMMON_7 0x388e
+#define CORE_DIG_RW_COMMON_8 0x3890
+#define CORE_DIG_RW_COMMON_9 0x3892
+#define CORE_DIG_RW_COMMON_10 0x3894
+#define CORE_DIG_RW_COMMON_11 0x3896
+#define CORE_DIG_RW_COMMON_12 0x3898
+#define CORE_DIG_RW_COMMON_13 0x389a
+#define CORE_DIG_RW_COMMON_14 0x389c
+#define CORE_DIG_RW_COMMON_15 0x389e
+#define CORE_DIG_ANACTRL_RW_COMMON_ANACTRL_0 0x39e0
+#define CORE_DIG_ANACTRL_RW_COMMON_ANACTRL_1 0x39e2
+#define CORE_DIG_ANACTRL_RW_COMMON_ANACTRL_2 0x39e4
+#define CORE_DIG_ANACTRL_RW_COMMON_ANACTRL_3 0x39e6
+#define CORE_DIG_COMMON_RW_DESKEW_FINE_MEM 0x3fe0
+#define CORE_DIG_COMMON_R_DESKEW_FINE_MEM 0x3fe2
+#define PPI_RW_DPHY_LANE0_LBERT_0 0x4000
+#define PPI_RW_DPHY_LANE0_LBERT_1 0x4002
+#define PPI_R_DPHY_LANE0_LBERT_0 0x4004
+#define PPI_R_DPHY_LANE0_LBERT_1 0x4006
+#define PPI_RW_DPHY_LANE0_SPARE 0x4008
+#define PPI_RW_DPHY_LANE1_LBERT_0 0x4400
+#define PPI_RW_DPHY_LANE1_LBERT_1 0x4402
+#define PPI_R_DPHY_LANE1_LBERT_0 0x4404
+#define PPI_R_DPHY_LANE1_LBERT_1 0x4406
+#define PPI_RW_DPHY_LANE1_SPARE 0x4408
+#define PPI_RW_DPHY_LANE2_LBERT_0 0x4800
+#define PPI_RW_DPHY_LANE2_LBERT_1 0x4802
+#define PPI_R_DPHY_LANE2_LBERT_0 0x4804
+#define PPI_R_DPHY_LANE2_LBERT_1 0x4806
+#define PPI_RW_DPHY_LANE2_SPARE 0x4808
+#define PPI_RW_DPHY_LANE3_LBERT_0 0x4c00
+#define PPI_RW_DPHY_LANE3_LBERT_1 0x4c02
+#define PPI_R_DPHY_LANE3_LBERT_0 0x4c04
+#define PPI_R_DPHY_LANE3_LBERT_1 0x4c06
+#define PPI_RW_DPHY_LANE3_SPARE 0x4c08
+#define CORE_DIG_DLANE_0_RW_CFG_0 0x6000
+#define CORE_DIG_DLANE_0_RW_CFG_1 0x6002
+#define CORE_DIG_DLANE_0_RW_CFG_2 0x6004
+#define CORE_DIG_DLANE_0_RW_LP_0 0x6080
+#define CORE_DIG_DLANE_0_RW_LP_1 0x6082
+#define CORE_DIG_DLANE_0_RW_LP_2 0x6084
+#define CORE_DIG_DLANE_0_R_LP_0 0x60a0
+#define CORE_DIG_DLANE_0_R_LP_1 0x60a2
+#define CORE_DIG_DLANE_0_R_HS_TX_0 0x60e0
+#define CORE_DIG_DLANE_0_RW_HS_RX_0 0x6100
+#define CORE_DIG_DLANE_0_RW_HS_RX_1 0x6102
+#define CORE_DIG_DLANE_0_RW_HS_RX_2 0x6104
+#define CORE_DIG_DLANE_0_RW_HS_RX_3 0x6106
+#define CORE_DIG_DLANE_0_RW_HS_RX_4 0x6108
+#define CORE_DIG_DLANE_0_RW_HS_RX_5 0x610a
+#define CORE_DIG_DLANE_0_RW_HS_RX_6 0x610c
+#define CORE_DIG_DLANE_0_RW_HS_RX_7 0x610e
+#define CORE_DIG_DLANE_0_RW_HS_RX_8 0x6110
+#define CORE_DIG_DLANE_0_RW_HS_RX_9 0x6112
+#define CORE_DIG_DLANE_0_R_HS_RX_0 0x6120
+#define CORE_DIG_DLANE_0_R_HS_RX_1 0x6122
+#define CORE_DIG_DLANE_0_R_HS_RX_2 0x6124
+#define CORE_DIG_DLANE_0_R_HS_RX_3 0x6126
+#define CORE_DIG_DLANE_0_R_HS_RX_4 0x6128
+#define CORE_DIG_DLANE_0_RW_HS_TX_0 0x6200
+#define CORE_DIG_DLANE_0_RW_HS_TX_1 0x6202
+#define CORE_DIG_DLANE_0_RW_HS_TX_2 0x6204
+#define CORE_DIG_DLANE_0_RW_HS_TX_3 0x6206
+#define CORE_DIG_DLANE_0_RW_HS_TX_4 0x6208
+#define CORE_DIG_DLANE_0_RW_HS_TX_5 0x620a
+#define CORE_DIG_DLANE_0_RW_HS_TX_6 0x620c
+#define CORE_DIG_DLANE_0_RW_HS_TX_7 0x620e
+#define CORE_DIG_DLANE_0_RW_HS_TX_8 0x6210
+#define CORE_DIG_DLANE_0_RW_HS_TX_9 0x6212
+#define CORE_DIG_DLANE_0_RW_HS_TX_10 0x6214
+#define CORE_DIG_DLANE_0_RW_HS_TX_11 0x6216
+#define CORE_DIG_DLANE_0_RW_HS_TX_12 0x6218
+#define CORE_DIG_DLANE_1_RW_CFG_0 0x6400
+#define CORE_DIG_DLANE_1_RW_CFG_1 0x6402
+#define CORE_DIG_DLANE_1_RW_CFG_2 0x6404
+#define CORE_DIG_DLANE_1_RW_LP_0 0x6480
+#define CORE_DIG_DLANE_1_RW_LP_1 0x6482
+#define CORE_DIG_DLANE_1_RW_LP_2 0x6484
+#define CORE_DIG_DLANE_1_R_LP_0 0x64a0
+#define CORE_DIG_DLANE_1_R_LP_1 0x64a2
+#define CORE_DIG_DLANE_1_R_HS_TX_0 0x64e0
+#define CORE_DIG_DLANE_1_RW_HS_RX_0 0x6500
+#define CORE_DIG_DLANE_1_RW_HS_RX_1 0x6502
+#define CORE_DIG_DLANE_1_RW_HS_RX_2 0x6504
+#define CORE_DIG_DLANE_1_RW_HS_RX_3 0x6506
+#define CORE_DIG_DLANE_1_RW_HS_RX_4 0x6508
+#define CORE_DIG_DLANE_1_RW_HS_RX_5 0x650a
+#define CORE_DIG_DLANE_1_RW_HS_RX_6 0x650c
+#define CORE_DIG_DLANE_1_RW_HS_RX_7 0x650e
+#define CORE_DIG_DLANE_1_RW_HS_RX_8 0x6510
+#define CORE_DIG_DLANE_1_RW_HS_RX_9 0x6512
+#define CORE_DIG_DLANE_1_R_HS_RX_0 0x6520
+#define CORE_DIG_DLANE_1_R_HS_RX_1 0x6522
+#define CORE_DIG_DLANE_1_R_HS_RX_2 0x6524
+#define CORE_DIG_DLANE_1_R_HS_RX_3 0x6526
+#define CORE_DIG_DLANE_1_R_HS_RX_4 0x6528
+#define CORE_DIG_DLANE_1_RW_HS_TX_0 0x6600
+#define CORE_DIG_DLANE_1_RW_HS_TX_1 0x6602
+#define CORE_DIG_DLANE_1_RW_HS_TX_2 0x6604
+#define CORE_DIG_DLANE_1_RW_HS_TX_3 0x6606
+#define CORE_DIG_DLANE_1_RW_HS_TX_4 0x6608
+#define CORE_DIG_DLANE_1_RW_HS_TX_5 0x660a
+#define CORE_DIG_DLANE_1_RW_HS_TX_6 0x660c
+#define CORE_DIG_DLANE_1_RW_HS_TX_7 0x660e
+#define CORE_DIG_DLANE_1_RW_HS_TX_8 0x6610
+#define CORE_DIG_DLANE_1_RW_HS_TX_9 0x6612
+#define CORE_DIG_DLANE_1_RW_HS_TX_10 0x6614
+#define CORE_DIG_DLANE_1_RW_HS_TX_11 0x6616
+#define CORE_DIG_DLANE_1_RW_HS_TX_12 0x6618
+#define CORE_DIG_DLANE_2_RW_CFG_0 0x6800
+#define CORE_DIG_DLANE_2_RW_CFG_1 0x6802
+#define CORE_DIG_DLANE_2_RW_CFG_2 0x6804
+#define CORE_DIG_DLANE_2_RW_LP_0 0x6880
+#define CORE_DIG_DLANE_2_RW_LP_1 0x6882
+#define CORE_DIG_DLANE_2_RW_LP_2 0x6884
+#define CORE_DIG_DLANE_2_R_LP_0 0x68a0
+#define CORE_DIG_DLANE_2_R_LP_1 0x68a2
+#define CORE_DIG_DLANE_2_R_HS_TX_0 0x68e0
+#define CORE_DIG_DLANE_2_RW_HS_RX_0 0x6900
+#define CORE_DIG_DLANE_2_RW_HS_RX_1 0x6902
+#define CORE_DIG_DLANE_2_RW_HS_RX_2 0x6904
+#define CORE_DIG_DLANE_2_RW_HS_RX_3 0x6906
+#define CORE_DIG_DLANE_2_RW_HS_RX_4 0x6908
+#define CORE_DIG_DLANE_2_RW_HS_RX_5 0x690a
+#define CORE_DIG_DLANE_2_RW_HS_RX_6 0x690c
+#define CORE_DIG_DLANE_2_RW_HS_RX_7 0x690e
+#define CORE_DIG_DLANE_2_RW_HS_RX_8 0x6910
+#define CORE_DIG_DLANE_2_RW_HS_RX_9 0x6912
+#define CORE_DIG_DLANE_2_R_HS_RX_0 0x6920
+#define CORE_DIG_DLANE_2_R_HS_RX_1 0x6922
+#define CORE_DIG_DLANE_2_R_HS_RX_2 0x6924
+#define CORE_DIG_DLANE_2_R_HS_RX_3 0x6926
+#define CORE_DIG_DLANE_2_R_HS_RX_4 0x6928
+#define CORE_DIG_DLANE_2_RW_HS_TX_0 0x6a00
+#define CORE_DIG_DLANE_2_RW_HS_TX_1 0x6a02
+#define CORE_DIG_DLANE_2_RW_HS_TX_2 0x6a04
+#define CORE_DIG_DLANE_2_RW_HS_TX_3 0x6a06
+#define CORE_DIG_DLANE_2_RW_HS_TX_4 0x6a08
+#define CORE_DIG_DLANE_2_RW_HS_TX_5 0x6a0a
+#define CORE_DIG_DLANE_2_RW_HS_TX_6 0x6a0c
+#define CORE_DIG_DLANE_2_RW_HS_TX_7 0x6a0e
+#define CORE_DIG_DLANE_2_RW_HS_TX_8 0x6a10
+#define CORE_DIG_DLANE_2_RW_HS_TX_9 0x6a12
+#define CORE_DIG_DLANE_2_RW_HS_TX_10 0x6a14
+#define CORE_DIG_DLANE_2_RW_HS_TX_11 0x6a16
+#define CORE_DIG_DLANE_2_RW_HS_TX_12 0x6a18
+#define CORE_DIG_DLANE_3_RW_CFG_0 0x6c00
+#define CORE_DIG_DLANE_3_RW_CFG_1 0x6c02
+#define CORE_DIG_DLANE_3_RW_CFG_2 0x6c04
+#define CORE_DIG_DLANE_3_RW_LP_0 0x6c80
+#define CORE_DIG_DLANE_3_RW_LP_1 0x6c82
+#define CORE_DIG_DLANE_3_RW_LP_2 0x6c84
+#define CORE_DIG_DLANE_3_R_LP_0 0x6ca0
+#define CORE_DIG_DLANE_3_R_LP_1 0x6ca2
+#define CORE_DIG_DLANE_3_R_HS_TX_0 0x6ce0
+#define CORE_DIG_DLANE_3_RW_HS_RX_0 0x6d00
+#define CORE_DIG_DLANE_3_RW_HS_RX_1 0x6d02
+#define CORE_DIG_DLANE_3_RW_HS_RX_2 0x6d04
+#define CORE_DIG_DLANE_3_RW_HS_RX_3 0x6d06
+#define CORE_DIG_DLANE_3_RW_HS_RX_4 0x6d08
+#define CORE_DIG_DLANE_3_RW_HS_RX_5 0x6d0a
+#define CORE_DIG_DLANE_3_RW_HS_RX_6 0x6d0c
+#define CORE_DIG_DLANE_3_RW_HS_RX_7 0x6d0e
+#define CORE_DIG_DLANE_3_RW_HS_RX_8 0x6d10
+#define CORE_DIG_DLANE_3_RW_HS_RX_9 0x6d12
+#define CORE_DIG_DLANE_3_R_HS_RX_0 0x6d20
+#define CORE_DIG_DLANE_3_R_HS_RX_1 0x6d22
+#define CORE_DIG_DLANE_3_R_HS_RX_2 0x6d24
+#define CORE_DIG_DLANE_3_R_HS_RX_3 0x6d26
+#define CORE_DIG_DLANE_3_R_HS_RX_4 0x6d28
+#define CORE_DIG_DLANE_3_RW_HS_TX_0 0x6e00
+#define CORE_DIG_DLANE_3_RW_HS_TX_1 0x6e02
+#define CORE_DIG_DLANE_3_RW_HS_TX_2 0x6e04
+#define CORE_DIG_DLANE_3_RW_HS_TX_3 0x6e06
+#define CORE_DIG_DLANE_3_RW_HS_TX_4 0x6e08
+#define CORE_DIG_DLANE_3_RW_HS_TX_5 0x6e0a
+#define CORE_DIG_DLANE_3_RW_HS_TX_6 0x6e0c
+#define CORE_DIG_DLANE_3_RW_HS_TX_7 0x6e0e
+#define CORE_DIG_DLANE_3_RW_HS_TX_8 0x6e10
+#define CORE_DIG_DLANE_3_RW_HS_TX_9 0x6e12
+#define CORE_DIG_DLANE_3_RW_HS_TX_10 0x6e14
+#define CORE_DIG_DLANE_3_RW_HS_TX_11 0x6e16
+#define CORE_DIG_DLANE_3_RW_HS_TX_12 0x6e18
+#define CORE_DIG_DLANE_CLK_RW_CFG_0 0x7000
+#define CORE_DIG_DLANE_CLK_RW_CFG_1 0x7002
+#define CORE_DIG_DLANE_CLK_RW_CFG_2 0x7004
+#define CORE_DIG_DLANE_CLK_RW_LP_0 0x7080
+#define CORE_DIG_DLANE_CLK_RW_LP_1 0x7082
+#define CORE_DIG_DLANE_CLK_RW_LP_2 0x7084
+#define CORE_DIG_DLANE_CLK_R_LP_0 0x70a0
+#define CORE_DIG_DLANE_CLK_R_LP_1 0x70a2
+#define CORE_DIG_DLANE_CLK_R_HS_TX_0 0x70e0
+#define CORE_DIG_DLANE_CLK_RW_HS_RX_0 0x7100
+#define CORE_DIG_DLANE_CLK_RW_HS_RX_1 0x7102
+#define CORE_DIG_DLANE_CLK_RW_HS_RX_2 0x7104
+#define CORE_DIG_DLANE_CLK_RW_HS_RX_3 0x7106
+#define CORE_DIG_DLANE_CLK_RW_HS_RX_4 0x7108
+#define CORE_DIG_DLANE_CLK_RW_HS_RX_5 0x710a
+#define CORE_DIG_DLANE_CLK_RW_HS_RX_6 0x710c
+#define CORE_DIG_DLANE_CLK_RW_HS_RX_7 0x710e
+#define CORE_DIG_DLANE_CLK_RW_HS_RX_8 0x7110
+#define CORE_DIG_DLANE_CLK_RW_HS_RX_9 0x7112
+#define CORE_DIG_DLANE_CLK_R_HS_RX_0 0x7120
+#define CORE_DIG_DLANE_CLK_R_HS_RX_1 0x7122
+#define CORE_DIG_DLANE_CLK_R_HS_RX_2 0x7124
+#define CORE_DIG_DLANE_CLK_R_HS_RX_3 0x7126
+#define CORE_DIG_DLANE_CLK_R_HS_RX_4 0x7128
+#define CORE_DIG_DLANE_CLK_RW_HS_TX_0 0x7200
+#define CORE_DIG_DLANE_CLK_RW_HS_TX_1 0x7202
+#define CORE_DIG_DLANE_CLK_RW_HS_TX_2 0x7204
+#define CORE_DIG_DLANE_CLK_RW_HS_TX_3 0x7206
+#define CORE_DIG_DLANE_CLK_RW_HS_TX_4 0x7208
+#define CORE_DIG_DLANE_CLK_RW_HS_TX_5 0x720a
+#define CORE_DIG_DLANE_CLK_RW_HS_TX_6 0x720c
+#define CORE_DIG_DLANE_CLK_RW_HS_TX_7 0x720e
+#define CORE_DIG_DLANE_CLK_RW_HS_TX_8 0x7210
+#define CORE_DIG_DLANE_CLK_RW_HS_TX_9 0x7212
+#define CORE_DIG_DLANE_CLK_RW_HS_TX_10 0x7214
+#define CORE_DIG_DLANE_CLK_RW_HS_TX_11 0x7216
+#define CORE_DIG_DLANE_CLK_RW_HS_TX_12 0x7218
+#define PPI_RW_CPHY_TRIO0_LBERT_0 0x8000
+#define PPI_RW_CPHY_TRIO0_LBERT_1 0x8002
+#define PPI_R_CPHY_TRIO0_LBERT_0 0x8004
+#define PPI_R_CPHY_TRIO0_LBERT_1 0x8006
+#define PPI_RW_CPHY_TRIO0_SPARE 0x8008
+#define PPI_RW_CPHY_TRIO1_LBERT_0 0x8400
+#define PPI_RW_CPHY_TRIO1_LBERT_1 0x8402
+#define PPI_R_CPHY_TRIO1_LBERT_0 0x8404
+#define PPI_R_CPHY_TRIO1_LBERT_1 0x8406
+#define PPI_RW_CPHY_TRIO1_SPARE 0x8408
+#define PPI_RW_CPHY_TRIO2_LBERT_0 0x8800
+#define PPI_RW_CPHY_TRIO2_LBERT_1 0x8802
+#define PPI_R_CPHY_TRIO2_LBERT_0 0x8804
+#define PPI_R_CPHY_TRIO2_LBERT_1 0x8806
+#define PPI_RW_CPHY_TRIO2_SPARE 0x8808
+#define CORE_DIG_CLANE_0_RW_CFG_0 0xa000
+#define CORE_DIG_CLANE_0_RW_CFG_2 0xa004
+#define CORE_DIG_CLANE_0_RW_LP_0 0xa080
+#define CORE_DIG_CLANE_0_RW_LP_1 0xa082
+#define CORE_DIG_CLANE_0_RW_LP_2 0xa084
+#define CORE_DIG_CLANE_0_R_LP_0 0xa0a0
+#define CORE_DIG_CLANE_0_R_LP_1 0xa0a2
+#define CORE_DIG_CLANE_0_RW_HS_RX_0 0xa100
+#define CORE_DIG_CLANE_0_RW_HS_RX_1 0xa102
+#define CORE_DIG_CLANE_0_RW_HS_RX_2 0xa104
+#define CORE_DIG_CLANE_0_RW_HS_RX_3 0xa106
+#define CORE_DIG_CLANE_0_RW_HS_RX_4 0xa108
+#define CORE_DIG_CLANE_0_RW_HS_RX_5 0xa10a
+#define CORE_DIG_CLANE_0_RW_HS_RX_6 0xa10c
+#define CORE_DIG_CLANE_0_R_RX_0 0xa120
+#define CORE_DIG_CLANE_0_R_RX_1 0xa122
+#define CORE_DIG_CLANE_0_R_TX_0 0xa124
+#define CORE_DIG_CLANE_0_R_RX_2 0xa126
+#define CORE_DIG_CLANE_0_R_RX_3 0xa128
+#define CORE_DIG_CLANE_0_RW_HS_TX_0 0xa200
+#define CORE_DIG_CLANE_0_RW_HS_TX_1 0xa202
+#define CORE_DIG_CLANE_0_RW_HS_TX_2 0xa204
+#define CORE_DIG_CLANE_0_RW_HS_TX_3 0xa206
+#define CORE_DIG_CLANE_0_RW_HS_TX_4 0xa208
+#define CORE_DIG_CLANE_0_RW_HS_TX_5 0xa20a
+#define CORE_DIG_CLANE_0_RW_HS_TX_6 0xa20c
+#define CORE_DIG_CLANE_0_RW_HS_TX_7 0xa20e
+#define CORE_DIG_CLANE_0_RW_HS_TX_8 0xa210
+#define CORE_DIG_CLANE_0_RW_HS_TX_9 0xa212
+#define CORE_DIG_CLANE_0_RW_HS_TX_10 0xa214
+#define CORE_DIG_CLANE_0_RW_HS_TX_11 0xa216
+#define CORE_DIG_CLANE_0_RW_HS_TX_12 0xa218
+#define CORE_DIG_CLANE_0_RW_HS_TX_13 0xa21a
+#define CORE_DIG_CLANE_1_RW_CFG_0 0xa400
+#define CORE_DIG_CLANE_1_RW_CFG_2 0xa404
+#define CORE_DIG_CLANE_1_RW_LP_0 0xa480
+#define CORE_DIG_CLANE_1_RW_LP_1 0xa482
+#define CORE_DIG_CLANE_1_RW_LP_2 0xa484
+#define CORE_DIG_CLANE_1_R_LP_0 0xa4a0
+#define CORE_DIG_CLANE_1_R_LP_1 0xa4a2
+#define CORE_DIG_CLANE_1_RW_HS_RX_0 0xa500
+#define CORE_DIG_CLANE_1_RW_HS_RX_1 0xa502
+#define CORE_DIG_CLANE_1_RW_HS_RX_2 0xa504
+#define CORE_DIG_CLANE_1_RW_HS_RX_3 0xa506
+#define CORE_DIG_CLANE_1_RW_HS_RX_4 0xa508
+#define CORE_DIG_CLANE_1_RW_HS_RX_5 0xa50a
+#define CORE_DIG_CLANE_1_RW_HS_RX_6 0xa50c
+#define CORE_DIG_CLANE_1_R_RX_0 0xa520
+#define CORE_DIG_CLANE_1_R_RX_1 0xa522
+#define CORE_DIG_CLANE_1_R_TX_0 0xa524
+#define CORE_DIG_CLANE_1_R_RX_2 0xa526
+#define CORE_DIG_CLANE_1_R_RX_3 0xa528
+#define CORE_DIG_CLANE_1_RW_HS_TX_0 0xa600
+#define CORE_DIG_CLANE_1_RW_HS_TX_1 0xa602
+#define CORE_DIG_CLANE_1_RW_HS_TX_2 0xa604
+#define CORE_DIG_CLANE_1_RW_HS_TX_3 0xa606
+#define CORE_DIG_CLANE_1_RW_HS_TX_4 0xa608
+#define CORE_DIG_CLANE_1_RW_HS_TX_5 0xa60a
+#define CORE_DIG_CLANE_1_RW_HS_TX_6 0xa60c
+#define CORE_DIG_CLANE_1_RW_HS_TX_7 0xa60e
+#define CORE_DIG_CLANE_1_RW_HS_TX_8 0xa610
+#define CORE_DIG_CLANE_1_RW_HS_TX_9 0xa612
+#define CORE_DIG_CLANE_1_RW_HS_TX_10 0xa614
+#define CORE_DIG_CLANE_1_RW_HS_TX_11 0xa616
+#define CORE_DIG_CLANE_1_RW_HS_TX_12 0xa618
+#define CORE_DIG_CLANE_1_RW_HS_TX_13 0xa61a
+#define CORE_DIG_CLANE_2_RW_CFG_0 0xa800
+#define CORE_DIG_CLANE_2_RW_CFG_2 0xa804
+#define CORE_DIG_CLANE_2_RW_LP_0 0xa880
+#define CORE_DIG_CLANE_2_RW_LP_1 0xa882
+#define CORE_DIG_CLANE_2_RW_LP_2 0xa884
+#define CORE_DIG_CLANE_2_R_LP_0 0xa8a0
+#define CORE_DIG_CLANE_2_R_LP_1 0xa8a2
+#define CORE_DIG_CLANE_2_RW_HS_RX_0 0xa900
+#define CORE_DIG_CLANE_2_RW_HS_RX_1 0xa902
+#define CORE_DIG_CLANE_2_RW_HS_RX_2 0xa904
+#define CORE_DIG_CLANE_2_RW_HS_RX_3 0xa906
+#define CORE_DIG_CLANE_2_RW_HS_RX_4 0xa908
+#define CORE_DIG_CLANE_2_RW_HS_RX_5 0xa90a
+#define CORE_DIG_CLANE_2_RW_HS_RX_6 0xa90c
+#define CORE_DIG_CLANE_2_R_RX_0 0xa920
+#define CORE_DIG_CLANE_2_R_RX_1 0xa922
+#define CORE_DIG_CLANE_2_R_TX_0 0xa924
+#define CORE_DIG_CLANE_2_R_RX_2 0xa926
+#define CORE_DIG_CLANE_2_R_RX_3 0xa928
+#define CORE_DIG_CLANE_2_RW_HS_TX_0 0xaa00
+#define CORE_DIG_CLANE_2_RW_HS_TX_1 0xaa02
+#define CORE_DIG_CLANE_2_RW_HS_TX_2 0xaa04
+#define CORE_DIG_CLANE_2_RW_HS_TX_3 0xaa06
+#define CORE_DIG_CLANE_2_RW_HS_TX_4 0xaa08
+#define CORE_DIG_CLANE_2_RW_HS_TX_5 0xaa0a
+#define CORE_DIG_CLANE_2_RW_HS_TX_6 0xaa0c
+#define CORE_DIG_CLANE_2_RW_HS_TX_7 0xaa0e
+#define CORE_DIG_CLANE_2_RW_HS_TX_8 0xaa10
+#define CORE_DIG_CLANE_2_RW_HS_TX_9 0xaa12
+#define CORE_DIG_CLANE_2_RW_HS_TX_10 0xaa14
+#define CORE_DIG_CLANE_2_RW_HS_TX_11 0xaa16
+#define CORE_DIG_CLANE_2_RW_HS_TX_12 0xaa18
+#define CORE_DIG_CLANE_2_RW_HS_TX_13 0xaa1a
+
+/* dwc csi host controller registers */
+#define IS_IO_CSI2_HOST_BASE(i) (IS_IO_BASE + 0x40000 + \
+ 0x2000 * (i))
+#define VERSION 0x0
+#define N_LANES 0x4
+#define CSI2_RESETN 0x8
+#define INT_ST_MAIN 0xc
+#define DATA_IDS_1 0x10
+#define DATA_IDS_2 0x14
+#define CDPHY_MODE 0x1c
+#define DATA_IDS_VC_1 0x30
+#define DATA_IDS_VC_2 0x34
+#define PHY_SHUTDOWNZ 0x40
+#define DPHY_RSTZ 0x44
+#define PHY_RX 0x48
+#define PHY_STOPSTATE 0x4c
+#define PHY_TEST_CTRL0 0x50
+#define PHY_TEST_CTRL1 0x54
+#define PPI_PG_PATTERN_VRES 0x60
+#define PPI_PG_PATTERN_HRES 0x64
+#define PPI_PG_CONFIG 0x68
+#define PPI_PG_ENABLE 0x6c
+#define PPI_PG_STATUS 0x70
+#define VC_EXTENSION 0xc8
+#define PHY_CAL 0xcc
+#define INT_ST_PHY_FATAL 0xe0
+#define INT_MSK_PHY_FATAL 0xe4
+#define INT_FORCE_PHY_FATAL 0xe8
+#define INT_ST_PKT_FATAL 0xf0
+#define INT_MSK_PKT_FATAL 0xf4
+#define INT_FORCE_PKT_FATAL 0xf8
+#define INT_ST_PHY 0x110
+#define INT_MSK_PHY 0x114
+#define INT_FORCE_PHY 0x118
+#define INT_ST_LINE 0x130
+#define INT_MSK_LINE 0x134
+#define INT_FORCE_LINE 0x138
+#define INT_ST_BNDRY_FRAME_FATAL 0x280
+#define INT_MSK_BNDRY_FRAME_FATAL 0x284
+#define INT_FORCE_BNDRY_FRAME_FATAL 0x288
+#define INT_ST_SEQ_FRAME_FATAL 0x290
+#define INT_MSK_SEQ_FRAME_FATAL 0x294
+#define INT_FORCE_SEQ_FRAME_FATAL 0x298
+#define INT_ST_CRC_FRAME_FATAL 0x2a0
+#define INT_MSK_CRC_FRAME_FATAL 0x2a4
+#define INT_FORCE_CRC_FRAME_FATAL 0x2a8
+#define INT_ST_PLD_CRC_FATAL 0x2b0
+#define INT_MSK_PLD_CRC_FATAL 0x2b4
+#define INT_FORCE_PLD_CRC_FATAL 0x2b8
+#define INT_ST_DATA_ID 0x2c0
+#define INT_MSK_DATA_ID 0x2c4
+#define INT_FORCE_DATA_ID 0x2c8
+#define INT_ST_ECC_CORRECTED 0x2d0
+#define INT_MSK_ECC_CORRECTED 0x2d4
+#define INT_FORCE_ECC_CORRECTED 0x2d8
+#define SCRAMBLING 0x300
+#define SCRAMBLING_SEED1 0x304
+#define SCRAMBLING_SEED2 0x308
+#define SCRAMBLING_SEED3 0x30c
+#define SCRAMBLING_SEED4 0x310
+#define SCRAMBLING 0x300
+
+#define IS_IO_CSI2_ADPL_PORT_BASE(i) (IS_IO_BASE + 0x40800 + \
+ 0x2000 * (i))
+#define CSI2_ADPL_INPUT_MODE 0x0
+#define CSI2_ADPL_CSI_RX_ERR_IRQ_CLEAR_EN 0x4
+#define CSI2_ADPL_CSI_RX_ERR_IRQ_CLEAR_ADDR 0x8
+#define CSI2_ADPL_CSI_RX_ERR_IRQ_STATUS 0xc
+#define CSI2_ADPL_IRQ_CTL_COMMON_STATUS 0xa4
+#define CSI2_ADPL_IRQ_CTL_COMMON_CLEAR 0xa8
+#define CSI2_ADPL_IRQ_CTL_COMMON_ENABLE 0xac
+#define CSI2_ADPL_IRQ_CTL_FS_STATUS 0xbc
+#define CSI2_ADPL_IRQ_CTL_FS_CLEAR 0xc0
+#define CSI2_ADPL_IRQ_CTL_FS_ENABLE 0xc4
+#define CSI2_ADPL_IRQ_CTL_FE_STATUS 0xc8
+#define CSI2_ADPL_IRQ_CTL_FE_CLEAR 0xcc
+#define CSI2_ADPL_IRQ_CTL_FE_ENABLE 0xd0
+
+/* software control the legacy csi irq */
+#define IS_IO_CSI2_ERR_LEGACY_IRQ_CTL_BASE(i) (IS_IO_BASE + 0x40c00 + \
+ 0x2000 * (i))
+#define IS_IO_CSI2_SYNC_LEGACY_IRQ_CTL_BASE(i) (IS_IO_BASE + 0x40d00 + \
+ 0x2000 * (i))
+#define IS_IO_CSI2_LEGACY_IRQ_CTRL_BASE (IS_IO_BASE + 0x49000)
+#define IS_IO_CSI2_IRQ_CTRL_BASE (IS_IO_BASE + 0x4e100)
+
+#define IRQ_CTL_EDGE 0x0
+#define IRQ_CTL_MASK 0x4
+#define IRQ_CTL_STATUS 0x8
+#define IRQ_CTL_CLEAR 0xc
+#define IRQ_CTL_ENABLE 0x10
+/* FE irq for PTL */
+#define IRQ1_CTL_MASK 0x14
+#define IRQ1_CTL_STATUS 0x18
+#define IRQ1_CTL_CLEAR 0x1c
+#define IRQ1_CTL_ENABLE 0x20
+
+/* software to set the clock gate to use the port or mgc */
+#define IS_IO_GPREGS_BASE (IS_IO_BASE + 0x49200)
+#define SRST_PORT_ARB 0x0
+#define SRST_MGC 0x4
+#define SRST_WIDTH_CONV 0x8
+#define SRST_CSI_IRQ 0xc
+#define SRST_CSI_LEGACY_IRQ 0x10
+#define CLK_EN_TXCLKESC 0x14
+#define CLK_DIV_FACTOR_IS_CLK 0x18
+#define CLK_DIV_FACTOR_APB_CLK 0x1c
+#define CSI_PORT_CLK_GATE 0x20
+#define CSI_PORTAB_AGGREGATION 0x24
+#define MGC_CLK_GATE 0x2c
+#define CG_CTRL_BITS 0x3c
+#define SPARE_RW 0xf8
+#define SPARE_RO 0xfc
+
+#define IS_IO_CSI2_MPF_PORT_BASE(i) (IS_IO_BASE + 0x53000 + \
+ 0x1000 * (i))
+#define MPF_16_IRQ_CNTRL_STATUS 0x238
+#define MPF_16_IRQ_CNTRL_CLEAR 0x23c
+#define MPF_16_IRQ_CNTRL_ENABLE 0x240
+
+/* software config the phy */
+#define IS_IO_CSI2_GPREGS_BASE (IS_IO_BASE + 0x53400)
+#define IPU8_IS_IO_CSI2_GPREGS_BASE (IS_IO_BASE + 0x40e00)
+#define CSI_ADAPT_LAYER_SRST 0x0
+#define MPF_SRST_RST 0x4
+#define CSI_ERR_IRQ_CTRL_SRST 0x8
+#define CSI_SYNC_RC_SRST 0xc
+#define CSI_CG_CTRL_BITS 0x10
+#define SOC_CSI2HOST_SELECT 0x14
+#define PHY_RESET 0x18
+#define PHY_SHUTDOWN 0x1c
+#define PHY_MODE 0x20
+#define PHY_READY 0x24
+#define PHY_CLK_LANE_FORCE_CONTROL 0x28
+#define PHY_CLK_LANE_CONTROL 0x2c
+#define PHY_CLK_LANE_STATUS 0x30
+#define PHY_LANE_RX_ESC_REQ 0x34
+#define PHY_LANE_RX_ESC_DATA 0x38
+#define PHY_LANE_TURNDISABLE 0x3c
+#define PHY_LANE_DIRECTION 0x40
+#define PHY_LANE_FORCE_CONTROL 0x44
+#define PHY_LANE_CONTROL_EN 0x48
+#define PHY_LANE_CONTROL_DATAWIDTH 0x4c
+#define PHY_LANE_STATUS 0x50
+#define PHY_LANE_ERR 0x54
+#define PHY_LANE_RXALP 0x58
+#define PHY_LANE_RXALP_NIBBLE 0x5c
+#define PHY_PARITY_ERROR 0x60
+#define PHY_DEBUG_REGS_CLK_GATE_EN 0x64
+#define SPARE_RW 0xf8
+#define SPARE_RO 0xfc
+
+/* software not touch */
+#define PORT_ARB_BASE (IS_IO_BASE + 0x4e000)
+#define PORT_ARB_IRQ_CTL_STATUS 0x4
+#define PORT_ARB_IRQ_CTL_CLEAR 0x8
+#define PORT_ARB_IRQ_CTL_ENABLE 0xc
+
+#define MGC_PPC 4U
+#define MGC_DTYPE_RAW(i) (((i) - 8) / 2)
+#define IS_IO_MGC_BASE (IS_IO_BASE + 0x48000)
+#define MGC_KICK 0x0
+#define MGC_ASYNC_STOP 0x4
+#define MGC_PORT_OFFSET 0x100
+#define MGC_CSI_PORT_MAP(i) (0x8 + (i) * 0x4)
+#define MGC_MG_PORT(i) (IS_IO_MGC_BASE + \
+ (i) * MGC_PORT_OFFSET)
+/* per mgc instance */
+#define MGC_MG_CSI_ADAPT_LAYER_TYPE 0x28
+#define MGC_MG_MODE 0x2c
+#define MGC_MG_INIT_COUNTER 0x30
+#define MGC_MG_MIPI_VC 0x34
+#define MGC_MG_MIPI_DTYPES 0x38
+#define MGC_MG_MULTI_DTYPES_MODE 0x3c
+#define MGC_MG_NOF_FRAMES 0x40
+#define MGC_MG_FRAME_DIM 0x44
+#define MGC_MG_HBLANK 0x48
+#define MGC_MG_VBLANK 0x4c
+#define MGC_MG_TPG_MODE 0x50
+#define MGC_MG_TPG_R0 0x54
+#define MGC_MG_TPG_G0 0x58
+#define MGC_MG_TPG_B0 0x5c
+#define MGC_MG_TPG_R1 0x60
+#define MGC_MG_TPG_G1 0x64
+#define MGC_MG_TPG_B1 0x68
+#define MGC_MG_TPG_FACTORS 0x6c
+#define MGC_MG_TPG_MASKS 0x70
+#define MGC_MG_TPG_XY_MASK 0x74
+#define MGC_MG_TPG_TILE_DIM 0x78
+#define MGC_MG_PRBS_LFSR_INIT_0 0x7c
+#define MGC_MG_PRBS_LFSR_INIT_1 0x80
+#define MGC_MG_SYNC_STOP_POINT 0x84
+#define MGC_MG_SYNC_STOP_POINT_LOC 0x88
+#define MGC_MG_ERR_INJECT 0x8c
+#define MGC_MG_ERR_LOCATION 0x90
+#define MGC_MG_DTO_SPEED_CTRL_EN 0x94
+#define MGC_MG_DTO_SPEED_CTRL_INCR_VAL 0x98
+#define MGC_MG_HOR_LOC_STTS 0x9c
+#define MGC_MG_VER_LOC_STTS 0xa0
+#define MGC_MG_FRAME_NUM_STTS 0xa4
+#define MGC_MG_BUSY_STTS 0xa8
+#define MGC_MG_STOPPED_STTS 0xac
+/* tile width and height in pixels for Chess board and Color palette */
+#define MGC_TPG_TILE_WIDTH 64U
+#define MGC_TPG_TILE_HEIGHT 64U
+
+#define IPU_CSI_PORT_A_ADDR_OFFSET 0x0
+#define IPU_CSI_PORT_B_ADDR_OFFSET 0x0
+#define IPU_CSI_PORT_C_ADDR_OFFSET 0x0
+#define IPU_CSI_PORT_D_ADDR_OFFSET 0x0
+
+/*
+ * 0 - CSI RX Port 0 interrupt;
+ * 1 - MPF Port 0 interrupt;
+ * 2 - CSI RX Port 1 interrupt;
+ * 3 - MPF Port 1 interrupt;
+ * 4 - CSI RX Port 2 interrupt;
+ * 5 - MPF Port 2 interrupt;
+ * 6 - CSI RX Port 3 interrupt;
+ * 7 - MPF Port 3 interrupt;
+ * 8 - Port ARB FIFO 0 overflow;
+ * 9 - Port ARB FIFO 1 overflow;
+ * 10 - Port ARB FIFO 2 overflow;
+ * 11 - Port ARB FIFO 3 overflow;
+ * 12 - isys_cfgnoc_err_probe_intl;
+ * 13-15 - reserved
+ */
+#define IPU7_CSI_IS_IO_IRQ_MASK 0xffff
+
+/* Adapter layer irq */
+#define IPU7_CSI_ADPL_IRQ_MASK 0xffff
+
+/* sw irq from legacy irq control
+ * legacy irq status
+ * IPU7
+ * 0 - CSI Port 0 error interrupt
+ * 1 - CSI Port 0 sync interrupt
+ * 2 - CSI Port 1 error interrupt
+ * 3 - CSI Port 1 sync interrupt
+ * 4 - CSI Port 2 error interrupt
+ * 5 - CSI Port 2 sync interrupt
+ * 6 - CSI Port 3 error interrupt
+ * 7 - CSI Port 3 sync interrupt
+ * IPU7P5
+ * 0 - CSI Port 0 error interrupt
+ * 1 - CSI Port 0 fs interrupt
+ * 2 - CSI Port 0 fe interrupt
+ * 3 - CSI Port 1 error interrupt
+ * 4 - CSI Port 1 fs interrupt
+ * 5 - CSI Port 1 fe interrupt
+ * 6 - CSI Port 2 error interrupt
+ * 7 - CSI Port 2 fs interrupt
+ * 8 - CSI Port 2 fe interrupt
+ */
+#define IPU7_CSI_RX_LEGACY_IRQ_MASK 0x1ff
+
+/* legacy error status per port
+ * 0 - Error handler FIFO full;
+ * 1 - Reserved Short Packet encoding detected;
+ * 2 - Reserved Long Packet encoding detected;
+ * 3 - Received packet is too short (fewer data words than specified in header);
+ * 4 - Received packet is too long (more data words than specified in header);
+ * 5 - Short packet discarded due to errors;
+ * 6 - Long packet discarded due to errors;
+ * 7 - CSI Combo Rx interrupt;
+ * 8 - IDI CDC FIFO overflow; remaining bits are reserved and tied to 0;
+ */
+#define IPU7_CSI_RX_ERROR_IRQ_MASK 0xfff
+
+/*
+ * 0 - VC0 frame start received
+ * 1 - VC0 frame end received
+ * 2 - VC1 frame start received
+ * 3 - VC1 frame end received
+ * 4 - VC2 frame start received
+ * 5 - VC2 frame end received
+ * 6 - VC3 frame start received
+ * 7 - VC3 frame end received
+ * 8 - VC4 frame start received
+ * 9 - VC4 frame end received
+ * 10 - VC5 frame start received
+ * 11 - VC5 frame end received
+ * 12 - VC6 frame start received
+ * 13 - VC6 frame end received
+ * 14 - VC7 frame start received
+ * 15 - VC7 frame end received
+ * 16 - VC8 frame start received
+ * 17 - VC8 frame end received
+ * 18 - VC9 frame start received
+ * 19 - VC9 frame end received
+ * 20 - VC10 frame start received
+ * 21 - VC10 frame end received
+ * 22 - VC11 frame start received
+ * 23 - VC11 frame end received
+ * 24 - VC12 frame start received
+ * 25 - VC12 frame end received
+ * 26 - VC13 frame start received
+ * 27 - VC13 frame end received
+ * 28 - VC14 frame start received
+ * 29 - VC14 frame end received
+ * 30 - VC15 frame start received
+ * 31 - VC15 frame end received
+ */
+
+#define IPU7_CSI_RX_SYNC_IRQ_MASK 0x0
+#define IPU7P5_CSI_RX_SYNC_FE_IRQ_MASK 0x0
+
+#define CSI_RX_NUM_ERRORS_IN_IRQ 12U
+#define CSI_RX_NUM_SYNC_IN_IRQ 32U
+
+enum CSI_FE_MODE_TYPE {
+ CSI_FE_DPHY_MODE = 0,
+ CSI_FE_CPHY_MODE = 1,
+};
+
+enum CSI_FE_INPUT_MODE {
+ CSI_SENSOR_INPUT = 0,
+ CSI_MIPIGEN_INPUT = 1,
+};
+
+enum MGC_CSI_ADPL_TYPE {
+ MGC_MAPPED_2_LANES = 0,
+ MGC_MAPPED_4_LANES = 1,
+};
+
+enum CSI2HOST_SELECTION {
+ CSI2HOST_SEL_SOC = 0,
+ CSI2HOST_SEL_CSI2HOST = 1,
+};
+
+#define IPU7_ISYS_LEGACY_IRQ_CSI2(port) (0x3 << (port))
+#define IPU7P5_ISYS_LEGACY_IRQ_CSI2(port) (0x7 << (port))
+
+/* ---------------------------------------------------------------- */
+#define CSI_REG_BASE 0x220000
+#define CSI_REG_BASE_PORT(id) ((id) * 0x1000)
+
+/* CSI Port General Purpose Registers */
+#define CSI_REG_PORT_GPREG_SRST 0x0
+#define CSI_REG_PORT_GPREG_CSI2_SLV_REG_SRST 0x4
+#define CSI_REG_PORT_GPREG_CSI2_PORT_CONTROL 0x8
+
+#define CSI_RX_NUM_IRQ 32U
+
+#define IPU7_CSI_RX_SYNC_FS_VC 0x55555555
+#define IPU7_CSI_RX_SYNC_FE_VC 0xaaaaaaaa
+#define IPU7P5_CSI_RX_SYNC_FS_VC 0xffff
+#define IPU7P5_CSI_RX_SYNC_FE_VC 0xffff
+
+#endif /* IPU7_ISYS_CSI2_REG_H */
diff --git a/drivers/staging/media/ipu7/ipu7-isys-csi2.c b/drivers/staging/media/ipu7/ipu7-isys-csi2.c
new file mode 100644
index 000000000000..f34eabfe8a98
--- /dev/null
+++ b/drivers/staging/media/ipu7/ipu7-isys-csi2.c
@@ -0,0 +1,543 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2013 - 2025 Intel Corporation
+ */
+
+#include <linux/atomic.h>
+#include <linux/bits.h>
+#include <linux/bug.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/minmax.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+
+#include <media/media-entity.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-subdev.h>
+
+#include "ipu7.h"
+#include "ipu7-bus.h"
+#include "ipu7-isys.h"
+#include "ipu7-isys-csi2.h"
+#include "ipu7-isys-csi2-regs.h"
+#include "ipu7-isys-csi-phy.h"
+
+static const u32 csi2_supported_codes[] = {
+ MEDIA_BUS_FMT_Y10_1X10,
+ MEDIA_BUS_FMT_RGB565_1X16,
+ MEDIA_BUS_FMT_RGB888_1X24,
+ MEDIA_BUS_FMT_UYVY8_1X16,
+ MEDIA_BUS_FMT_YUYV8_1X16,
+ MEDIA_BUS_FMT_YUYV10_1X20,
+ MEDIA_BUS_FMT_SBGGR10_1X10,
+ MEDIA_BUS_FMT_SGBRG10_1X10,
+ MEDIA_BUS_FMT_SGRBG10_1X10,
+ MEDIA_BUS_FMT_SRGGB10_1X10,
+ MEDIA_BUS_FMT_SBGGR12_1X12,
+ MEDIA_BUS_FMT_SGBRG12_1X12,
+ MEDIA_BUS_FMT_SGRBG12_1X12,
+ MEDIA_BUS_FMT_SRGGB12_1X12,
+ MEDIA_BUS_FMT_SBGGR8_1X8,
+ MEDIA_BUS_FMT_SGBRG8_1X8,
+ MEDIA_BUS_FMT_SGRBG8_1X8,
+ MEDIA_BUS_FMT_SRGGB8_1X8,
+ 0,
+};
+
+s64 ipu7_isys_csi2_get_link_freq(struct ipu7_isys_csi2 *csi2)
+{
+ struct media_pad *src_pad;
+
+ src_pad = media_entity_remote_source_pad_unique(&csi2->asd.sd.entity);
+ if (IS_ERR(src_pad)) {
+ dev_err(&csi2->isys->adev->auxdev.dev,
+ "can't get source pad of %s (%pe)\n",
+ csi2->asd.sd.name, src_pad);
+ return PTR_ERR(src_pad);
+ }
+
+ return v4l2_get_link_freq(src_pad, 0, 0);
+}
+
+static int csi2_subscribe_event(struct v4l2_subdev *sd, struct v4l2_fh *fh,
+ struct v4l2_event_subscription *sub)
+{
+ struct ipu7_isys_subdev *asd = to_ipu7_isys_subdev(sd);
+ struct ipu7_isys_csi2 *csi2 = to_ipu7_isys_csi2(asd);
+ struct device *dev = &csi2->isys->adev->auxdev.dev;
+
+ dev_dbg(dev, "csi2 subscribe event(type %u id %u)\n",
+ sub->type, sub->id);
+
+ switch (sub->type) {
+ case V4L2_EVENT_FRAME_SYNC:
+ return v4l2_event_subscribe(fh, sub, 10, NULL);
+ case V4L2_EVENT_CTRL:
+ return v4l2_ctrl_subdev_subscribe_event(sd, fh, sub);
+ default:
+ return -EINVAL;
+ }
+}
+
+static const struct v4l2_subdev_core_ops csi2_sd_core_ops = {
+ .subscribe_event = csi2_subscribe_event,
+ .unsubscribe_event = v4l2_event_subdev_unsubscribe,
+};
+
+static void csi2_irq_enable(struct ipu7_isys_csi2 *csi2)
+{
+ struct ipu7_device *isp = csi2->isys->adev->isp;
+ unsigned int offset, mask;
+
+ /* enable CSI2 legacy error irq */
+ offset = IS_IO_CSI2_ERR_LEGACY_IRQ_CTL_BASE(csi2->port);
+ mask = IPU7_CSI_RX_ERROR_IRQ_MASK;
+ writel(mask, csi2->base + offset + IRQ_CTL_CLEAR);
+ writel(mask, csi2->base + offset + IRQ_CTL_MASK);
+ writel(mask, csi2->base + offset + IRQ_CTL_ENABLE);
+
+ /* enable CSI2 legacy sync irq */
+ offset = IS_IO_CSI2_SYNC_LEGACY_IRQ_CTL_BASE(csi2->port);
+ mask = IPU7_CSI_RX_SYNC_IRQ_MASK;
+ writel(mask, csi2->base + offset + IRQ_CTL_CLEAR);
+ writel(mask, csi2->base + offset + IRQ_CTL_MASK);
+ writel(mask, csi2->base + offset + IRQ_CTL_ENABLE);
+
+ mask = IPU7P5_CSI_RX_SYNC_FE_IRQ_MASK;
+ if (!is_ipu7(isp->hw_ver)) {
+ writel(mask, csi2->base + offset + IRQ1_CTL_CLEAR);
+ writel(mask, csi2->base + offset + IRQ1_CTL_MASK);
+ writel(mask, csi2->base + offset + IRQ1_CTL_ENABLE);
+ }
+}
+
+static void csi2_irq_disable(struct ipu7_isys_csi2 *csi2)
+{
+ struct ipu7_device *isp = csi2->isys->adev->isp;
+ unsigned int offset, mask;
+
+ /* disable CSI2 legacy error irq */
+ offset = IS_IO_CSI2_ERR_LEGACY_IRQ_CTL_BASE(csi2->port);
+ mask = IPU7_CSI_RX_ERROR_IRQ_MASK;
+ writel(mask, csi2->base + offset + IRQ_CTL_CLEAR);
+ writel(0, csi2->base + offset + IRQ_CTL_MASK);
+ writel(0, csi2->base + offset + IRQ_CTL_ENABLE);
+
+ /* disable CSI2 legacy sync irq */
+ offset = IS_IO_CSI2_SYNC_LEGACY_IRQ_CTL_BASE(csi2->port);
+ mask = IPU7_CSI_RX_SYNC_IRQ_MASK;
+ writel(mask, csi2->base + offset + IRQ_CTL_CLEAR);
+ writel(0, csi2->base + offset + IRQ_CTL_MASK);
+ writel(0, csi2->base + offset + IRQ_CTL_ENABLE);
+
+ if (!is_ipu7(isp->hw_ver)) {
+ writel(mask, csi2->base + offset + IRQ1_CTL_CLEAR);
+ writel(0, csi2->base + offset + IRQ1_CTL_MASK);
+ writel(0, csi2->base + offset + IRQ1_CTL_ENABLE);
+ }
+}
+
+static void ipu7_isys_csi2_disable_stream(struct ipu7_isys_csi2 *csi2)
+{
+ struct ipu7_isys *isys = csi2->isys;
+ void __iomem *isys_base = isys->pdata->base;
+
+ ipu7_isys_csi_phy_powerdown(csi2);
+
+ writel(0x4, isys_base + IS_IO_GPREGS_BASE + CLK_DIV_FACTOR_APB_CLK);
+ csi2_irq_disable(csi2);
+}
+
+static int ipu7_isys_csi2_enable_stream(struct ipu7_isys_csi2 *csi2)
+{
+ struct ipu7_isys *isys = csi2->isys;
+ struct device *dev = &isys->adev->auxdev.dev;
+ void __iomem *isys_base = isys->pdata->base;
+ unsigned int port, nlanes, offset;
+ int ret;
+
+ port = csi2->port;
+ nlanes = csi2->nlanes;
+
+ offset = IS_IO_GPREGS_BASE;
+ writel(0x2, isys_base + offset + CLK_DIV_FACTOR_APB_CLK);
+ dev_dbg(dev, "port %u CLK_GATE = 0x%04x DIV_FACTOR_APB_CLK=0x%04x\n",
+ port, readl(isys_base + offset + CSI_PORT_CLK_GATE),
+ readl(isys_base + offset + CLK_DIV_FACTOR_APB_CLK));
+ if (port == 0U && nlanes == 4U && !is_ipu7(isys->adev->isp->hw_ver)) {
+ dev_dbg(dev, "CSI port %u in aggregation mode\n", port);
+ writel(0x1, isys_base + offset + CSI_PORTAB_AGGREGATION);
+ }
+
+ /* input is coming from CSI receiver (sensor) */
+ offset = IS_IO_CSI2_ADPL_PORT_BASE(port);
+ writel(CSI_SENSOR_INPUT, isys_base + offset + CSI2_ADPL_INPUT_MODE);
+ writel(1, isys_base + offset + CSI2_ADPL_CSI_RX_ERR_IRQ_CLEAR_EN);
+
+ ret = ipu7_isys_csi_phy_powerup(csi2);
+ if (ret) {
+ dev_err(dev, "CSI-%d PHY power up failed %d\n", port, ret);
+ return ret;
+ }
+
+ csi2_irq_enable(csi2);
+
+ return 0;
+}
+
+static int ipu7_isys_csi2_set_sel(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ struct v4l2_subdev_selection *sel)
+{
+ struct ipu7_isys_subdev *asd = to_ipu7_isys_subdev(sd);
+ struct device *dev = &asd->isys->adev->auxdev.dev;
+ struct v4l2_mbus_framefmt *sink_ffmt;
+ struct v4l2_mbus_framefmt *src_ffmt;
+ struct v4l2_rect *crop;
+
+ if (sel->pad == IPU7_CSI2_PAD_SINK || sel->target != V4L2_SEL_TGT_CROP)
+ return -EINVAL;
+
+ sink_ffmt = v4l2_subdev_state_get_opposite_stream_format(state,
+ sel->pad,
+ sel->stream);
+ if (!sink_ffmt)
+ return -EINVAL;
+
+ src_ffmt = v4l2_subdev_state_get_format(state, sel->pad, sel->stream);
+ if (!src_ffmt)
+ return -EINVAL;
+
+ crop = v4l2_subdev_state_get_crop(state, sel->pad, sel->stream);
+ if (!crop)
+ return -EINVAL;
+
+ /* Only vertical cropping is supported */
+ sel->r.left = 0;
+ sel->r.width = sink_ffmt->width;
+ /* Non-bayer formats can't be single line cropped */
+ if (!ipu7_isys_is_bayer_format(sink_ffmt->code))
+ sel->r.top &= ~1U;
+ sel->r.height = clamp(sel->r.height & ~1U, IPU_ISYS_MIN_HEIGHT,
+ sink_ffmt->height - sel->r.top);
+ *crop = sel->r;
+
+ /* update source pad format */
+ src_ffmt->width = sel->r.width;
+ src_ffmt->height = sel->r.height;
+ if (ipu7_isys_is_bayer_format(sink_ffmt->code))
+ src_ffmt->code = ipu7_isys_convert_bayer_order(sink_ffmt->code,
+ sel->r.left,
+ sel->r.top);
+ dev_dbg(dev, "set crop for %s sel: %d,%d,%d,%d code: 0x%x\n",
+ sd->name, sel->r.left, sel->r.top, sel->r.width, sel->r.height,
+ src_ffmt->code);
+
+ return 0;
+}
+
+static int ipu7_isys_csi2_get_sel(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ struct v4l2_subdev_selection *sel)
+{
+ struct v4l2_mbus_framefmt *sink_ffmt;
+ struct v4l2_rect *crop;
+ int ret = 0;
+
+ if (sd->entity.pads[sel->pad].flags & MEDIA_PAD_FL_SINK)
+ return -EINVAL;
+
+ sink_ffmt = v4l2_subdev_state_get_opposite_stream_format(state,
+ sel->pad,
+ sel->stream);
+ if (!sink_ffmt)
+ return -EINVAL;
+
+ crop = v4l2_subdev_state_get_crop(state, sel->pad, sel->stream);
+ if (!crop)
+ return -EINVAL;
+
+ switch (sel->target) {
+ case V4L2_SEL_TGT_CROP_DEFAULT:
+ case V4L2_SEL_TGT_CROP_BOUNDS:
+ sel->r.left = 0;
+ sel->r.top = 0;
+ sel->r.width = sink_ffmt->width;
+ sel->r.height = sink_ffmt->height;
+ break;
+ case V4L2_SEL_TGT_CROP:
+ sel->r = *crop;
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+/*
+ * Maximum stream ID is 63 for now, as we use u64 bitmask to represent a set
+ * of streams.
+ */
+#define CSI2_SUBDEV_MAX_STREAM_ID 63
+
+static int ipu7_isys_csi2_enable_streams(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ u32 pad, u64 streams_mask)
+{
+ struct ipu7_isys_subdev *asd = to_ipu7_isys_subdev(sd);
+ struct ipu7_isys_csi2 *csi2 = to_ipu7_isys_csi2(asd);
+ struct v4l2_subdev *r_sd;
+ struct media_pad *rp;
+ u32 sink_pad, sink_stream;
+ int ret, i;
+
+ if (!csi2->stream_count) {
+ dev_dbg(&csi2->isys->adev->auxdev.dev,
+ "stream on CSI2-%u with %u lanes\n", csi2->port,
+ csi2->nlanes);
+ ret = ipu7_isys_csi2_enable_stream(csi2);
+ if (ret)
+ return ret;
+ }
+
+ for (i = 0; i <= CSI2_SUBDEV_MAX_STREAM_ID; i++) {
+ if (streams_mask & BIT_ULL(i))
+ break;
+ }
+
+ ret = v4l2_subdev_routing_find_opposite_end(&state->routing, pad, i,
+ &sink_pad, &sink_stream);
+ if (ret)
+ return ret;
+
+ rp = media_pad_remote_pad_first(&sd->entity.pads[IPU7_CSI2_PAD_SINK]);
+ r_sd = media_entity_to_v4l2_subdev(rp->entity);
+
+ ret = v4l2_subdev_enable_streams(r_sd, rp->index,
+ BIT_ULL(sink_stream));
+ if (!ret) {
+ csi2->stream_count++;
+ return 0;
+ }
+
+ if (!csi2->stream_count)
+ ipu7_isys_csi2_disable_stream(csi2);
+
+ return ret;
+}
+
+static int ipu7_isys_csi2_disable_streams(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ u32 pad, u64 streams_mask)
+{
+ struct ipu7_isys_subdev *asd = to_ipu7_isys_subdev(sd);
+ struct ipu7_isys_csi2 *csi2 = to_ipu7_isys_csi2(asd);
+ struct v4l2_subdev *r_sd;
+ struct media_pad *rp;
+ u32 sink_pad, sink_stream;
+ int ret, i;
+
+ for (i = 0; i <= CSI2_SUBDEV_MAX_STREAM_ID; i++) {
+ if (streams_mask & BIT_ULL(i))
+ break;
+ }
+
+ ret = v4l2_subdev_routing_find_opposite_end(&state->routing, pad, i,
+ &sink_pad, &sink_stream);
+ if (ret)
+ return ret;
+
+ rp = media_pad_remote_pad_first(&sd->entity.pads[IPU7_CSI2_PAD_SINK]);
+ r_sd = media_entity_to_v4l2_subdev(rp->entity);
+
+ v4l2_subdev_disable_streams(r_sd, rp->index, BIT_ULL(sink_stream));
+
+ if (--csi2->stream_count)
+ return 0;
+
+ dev_dbg(&csi2->isys->adev->auxdev.dev,
+ "stream off CSI2-%u with %u lanes\n", csi2->port, csi2->nlanes);
+
+ ipu7_isys_csi2_disable_stream(csi2);
+
+ return 0;
+}
+
+static const struct v4l2_subdev_pad_ops csi2_sd_pad_ops = {
+ .get_fmt = v4l2_subdev_get_fmt,
+ .set_fmt = ipu7_isys_subdev_set_fmt,
+ .get_selection = ipu7_isys_csi2_get_sel,
+ .set_selection = ipu7_isys_csi2_set_sel,
+ .enum_mbus_code = ipu7_isys_subdev_enum_mbus_code,
+ .enable_streams = ipu7_isys_csi2_enable_streams,
+ .disable_streams = ipu7_isys_csi2_disable_streams,
+ .set_routing = ipu7_isys_subdev_set_routing,
+};
+
+static const struct v4l2_subdev_ops csi2_sd_ops = {
+ .core = &csi2_sd_core_ops,
+ .pad = &csi2_sd_pad_ops,
+};
+
+static const struct media_entity_operations csi2_entity_ops = {
+ .link_validate = v4l2_subdev_link_validate,
+ .has_pad_interdep = v4l2_subdev_has_pad_interdep,
+};
+
+void ipu7_isys_csi2_cleanup(struct ipu7_isys_csi2 *csi2)
+{
+ if (!csi2->isys)
+ return;
+
+ v4l2_device_unregister_subdev(&csi2->asd.sd);
+ v4l2_subdev_cleanup(&csi2->asd.sd);
+ ipu7_isys_subdev_cleanup(&csi2->asd);
+ csi2->isys = NULL;
+}
+
+int ipu7_isys_csi2_init(struct ipu7_isys_csi2 *csi2,
+ struct ipu7_isys *isys,
+ void __iomem *base, unsigned int index)
+{
+ struct device *dev = &isys->adev->auxdev.dev;
+ int ret;
+
+ csi2->isys = isys;
+ csi2->base = base;
+ csi2->port = index;
+
+ if (!is_ipu7(isys->adev->isp->hw_ver))
+ csi2->legacy_irq_mask = 0x7U << (index * 3U);
+ else
+ csi2->legacy_irq_mask = 0x3U << (index * 2U);
+
+ dev_dbg(dev, "csi-%d legacy irq mask = 0x%x\n", index,
+ csi2->legacy_irq_mask);
+
+ csi2->asd.sd.entity.ops = &csi2_entity_ops;
+ csi2->asd.isys = isys;
+
+ ret = ipu7_isys_subdev_init(&csi2->asd, &csi2_sd_ops, 0,
+ IPU7_NR_OF_CSI2_SINK_PADS,
+ IPU7_NR_OF_CSI2_SRC_PADS);
+ if (ret)
+ return ret;
+
+ csi2->asd.source = (int)index;
+ csi2->asd.supported_codes = csi2_supported_codes;
+ snprintf(csi2->asd.sd.name, sizeof(csi2->asd.sd.name),
+ IPU_ISYS_ENTITY_PREFIX " CSI2 %u", index);
+ v4l2_set_subdevdata(&csi2->asd.sd, &csi2->asd);
+
+ ret = v4l2_subdev_init_finalize(&csi2->asd.sd);
+ if (ret) {
+ dev_err(dev, "failed to init v4l2 subdev (%d)\n", ret);
+ goto isys_subdev_cleanup;
+ }
+
+ ret = v4l2_device_register_subdev(&isys->v4l2_dev, &csi2->asd.sd);
+ if (ret) {
+ dev_err(dev, "failed to register v4l2 subdev (%d)\n", ret);
+ goto v4l2_subdev_cleanup;
+ }
+
+ return 0;
+
+v4l2_subdev_cleanup:
+ v4l2_subdev_cleanup(&csi2->asd.sd);
+isys_subdev_cleanup:
+ ipu7_isys_subdev_cleanup(&csi2->asd);
+
+ return ret;
+}
+
+void ipu7_isys_csi2_sof_event_by_stream(struct ipu7_isys_stream *stream)
+{
+ struct ipu7_isys_csi2 *csi2 = ipu7_isys_subdev_to_csi2(stream->asd);
+ struct device *dev = &stream->isys->adev->auxdev.dev;
+ struct video_device *vdev = csi2->asd.sd.devnode;
+ struct v4l2_event ev = {
+ .type = V4L2_EVENT_FRAME_SYNC,
+ };
+
+ ev.id = stream->vc;
+ ev.u.frame_sync.frame_sequence = atomic_fetch_inc(&stream->sequence);
+ v4l2_event_queue(vdev, &ev);
+
+ dev_dbg(dev, "sof_event::csi2-%i sequence: %i, vc: %d\n",
+ csi2->port, ev.u.frame_sync.frame_sequence, stream->vc);
+}
+
+void ipu7_isys_csi2_eof_event_by_stream(struct ipu7_isys_stream *stream)
+{
+ struct ipu7_isys_csi2 *csi2 = ipu7_isys_subdev_to_csi2(stream->asd);
+ struct device *dev = &stream->isys->adev->auxdev.dev;
+ u32 frame_sequence = atomic_read(&stream->sequence);
+
+ dev_dbg(dev, "eof_event::csi2-%i sequence: %i\n",
+ csi2->port, frame_sequence);
+}
+
+int ipu7_isys_csi2_get_remote_desc(u32 source_stream,
+ struct ipu7_isys_csi2 *csi2,
+ struct media_entity *source_entity,
+ struct v4l2_mbus_frame_desc_entry *entry,
+ int *nr_queues)
+{
+ struct v4l2_mbus_frame_desc_entry *desc_entry = NULL;
+ struct device *dev = &csi2->isys->adev->auxdev.dev;
+ struct v4l2_mbus_frame_desc desc;
+ struct v4l2_subdev *source;
+ struct media_pad *pad;
+ unsigned int i;
+ int ret;
+
+ source = media_entity_to_v4l2_subdev(source_entity);
+ if (!source)
+ return -EPIPE;
+
+ pad = media_pad_remote_pad_first(&csi2->asd.pad[IPU7_CSI2_PAD_SINK]);
+ if (!pad)
+ return -EPIPE;
+
+ ret = v4l2_subdev_call(source, pad, get_frame_desc, pad->index, &desc);
+ if (ret)
+ return ret;
+
+ if (desc.type != V4L2_MBUS_FRAME_DESC_TYPE_CSI2) {
+ dev_err(dev, "Unsupported frame descriptor type\n");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < desc.num_entries; i++) {
+ if (source_stream == desc.entry[i].stream) {
+ desc_entry = &desc.entry[i];
+ break;
+ }
+ }
+
+ if (!desc_entry) {
+ dev_err(dev, "Failed to find stream %u from remote subdev\n",
+ source_stream);
+ return -EINVAL;
+ }
+
+ if (desc_entry->bus.csi2.vc >= IPU7_NR_OF_CSI2_VC) {
+ dev_err(dev, "invalid vc %d\n", desc_entry->bus.csi2.vc);
+ return -EINVAL;
+ }
+
+ *entry = *desc_entry;
+
+ for (i = 0; i < desc.num_entries; i++) {
+ if (desc_entry->bus.csi2.vc == desc.entry[i].bus.csi2.vc)
+ (*nr_queues)++;
+ }
+
+ return 0;
+}
diff --git a/drivers/staging/media/ipu7/ipu7-isys-csi2.h b/drivers/staging/media/ipu7/ipu7-isys-csi2.h
new file mode 100644
index 000000000000..6c23b80f92a2
--- /dev/null
+++ b/drivers/staging/media/ipu7/ipu7-isys-csi2.h
@@ -0,0 +1,64 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2013 - 2025 Intel Corporation
+ */
+
+#ifndef IPU7_ISYS_CSI2_H
+#define IPU7_ISYS_CSI2_H
+
+#include <linux/container_of.h>
+#include <linux/types.h>
+
+#include "ipu7-isys-subdev.h"
+#include "ipu7-isys-video.h"
+
+struct ipu7_isys;
+struct ipu7_isys_csi2_pdata;
+struct ipu7_isys_stream;
+
+#define IPU7_NR_OF_CSI2_VC 16U
+#define INVALID_VC_ID -1
+#define IPU7_NR_OF_CSI2_SINK_PADS 1U
+#define IPU7_CSI2_PAD_SINK 0U
+#define IPU7_NR_OF_CSI2_SRC_PADS 8U
+#define IPU7_CSI2_PAD_SRC 1U
+#define IPU7_NR_OF_CSI2_PADS (IPU7_NR_OF_CSI2_SINK_PADS + \
+ IPU7_NR_OF_CSI2_SRC_PADS)
+
+/*
+ * struct ipu7_isys_csi2
+ *
+ * @nlanes: number of lanes in the receiver
+ */
+struct ipu7_isys_csi2 {
+ struct ipu7_isys_subdev asd;
+ struct ipu7_isys_csi2_pdata *pdata;
+ struct ipu7_isys *isys;
+ struct ipu7_isys_video av[IPU7_NR_OF_CSI2_SRC_PADS];
+
+ void __iomem *base;
+ u32 receiver_errors;
+ u32 legacy_irq_mask;
+ unsigned int nlanes;
+ unsigned int port;
+ unsigned int phy_mode;
+ unsigned int stream_count;
+};
+
+#define ipu7_isys_subdev_to_csi2(__sd) \
+ container_of(__sd, struct ipu7_isys_csi2, asd)
+
+#define to_ipu7_isys_csi2(__asd) container_of(__asd, struct ipu7_isys_csi2, asd)
+
+s64 ipu7_isys_csi2_get_link_freq(struct ipu7_isys_csi2 *csi2);
+int ipu7_isys_csi2_init(struct ipu7_isys_csi2 *csi2, struct ipu7_isys *isys,
+ void __iomem *base, unsigned int index);
+void ipu7_isys_csi2_cleanup(struct ipu7_isys_csi2 *csi2);
+void ipu7_isys_csi2_sof_event_by_stream(struct ipu7_isys_stream *stream);
+void ipu7_isys_csi2_eof_event_by_stream(struct ipu7_isys_stream *stream);
+int ipu7_isys_csi2_get_remote_desc(u32 source_stream,
+ struct ipu7_isys_csi2 *csi2,
+ struct media_entity *source_entity,
+ struct v4l2_mbus_frame_desc_entry *entry,
+ int *nr_queues);
+#endif /* IPU7_ISYS_CSI2_H */
diff --git a/drivers/staging/media/ipu7/ipu7-isys-queue.c b/drivers/staging/media/ipu7/ipu7-isys-queue.c
new file mode 100644
index 000000000000..434d9d9c7158
--- /dev/null
+++ b/drivers/staging/media/ipu7/ipu7-isys-queue.c
@@ -0,0 +1,828 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2013 - 2025 Intel Corporation
+ */
+
+#include <linux/atomic.h>
+#include <linux/bug.h>
+#include <linux/device.h>
+#include <linux/list.h>
+#include <linux/lockdep.h>
+#include <linux/mutex.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+
+#include <media/media-entity.h>
+#include <media/v4l2-subdev.h>
+#include <media/videobuf2-dma-sg.h>
+#include <media/videobuf2-v4l2.h>
+
+#include "abi/ipu7_fw_isys_abi.h"
+
+#include "ipu7-bus.h"
+#include "ipu7-dma.h"
+#include "ipu7-fw-isys.h"
+#include "ipu7-isys.h"
+#include "ipu7-isys-csi2-regs.h"
+#include "ipu7-isys-video.h"
+#include "ipu7-platform-regs.h"
+
+#define IPU_MAX_FRAME_COUNTER (U8_MAX + 1)
+
+static int ipu7_isys_buf_init(struct vb2_buffer *vb)
+{
+ struct ipu7_isys *isys = vb2_get_drv_priv(vb->vb2_queue);
+ struct sg_table *sg = vb2_dma_sg_plane_desc(vb, 0);
+ struct vb2_v4l2_buffer *vvb = to_vb2_v4l2_buffer(vb);
+ struct ipu7_isys_video_buffer *ivb =
+ vb2_buffer_to_ipu7_isys_video_buffer(vvb);
+ int ret;
+
+ ret = ipu7_dma_map_sgtable(isys->adev, sg, DMA_TO_DEVICE, 0);
+ if (ret)
+ return ret;
+
+ ivb->dma_addr = sg_dma_address(sg->sgl);
+
+ return 0;
+}
+
+static void ipu7_isys_buf_cleanup(struct vb2_buffer *vb)
+{
+ struct ipu7_isys *isys = vb2_get_drv_priv(vb->vb2_queue);
+ struct sg_table *sg = vb2_dma_sg_plane_desc(vb, 0);
+ struct vb2_v4l2_buffer *vvb = to_vb2_v4l2_buffer(vb);
+ struct ipu7_isys_video_buffer *ivb =
+ vb2_buffer_to_ipu7_isys_video_buffer(vvb);
+
+ ivb->dma_addr = 0;
+ ipu7_dma_unmap_sgtable(isys->adev, sg, DMA_TO_DEVICE, 0);
+}
+
+static int ipu7_isys_queue_setup(struct vb2_queue *q, unsigned int *num_buffers,
+ unsigned int *num_planes, unsigned int sizes[],
+ struct device *alloc_devs[])
+{
+ struct ipu7_isys_queue *aq = vb2_queue_to_isys_queue(q);
+ struct ipu7_isys_video *av = ipu7_isys_queue_to_video(aq);
+ struct device *dev = &av->isys->adev->auxdev.dev;
+ u32 size = av->pix_fmt.sizeimage;
+
+ /* num_planes == 0: we're being called through VIDIOC_REQBUFS */
+ if (!*num_planes) {
+ sizes[0] = size;
+ } else if (sizes[0] < size) {
+ dev_dbg(dev, "%s: queue setup: size %u < %u\n",
+ av->vdev.name, sizes[0], size);
+ return -EINVAL;
+ }
+
+ *num_planes = 1;
+
+ return 0;
+}
+
+static int ipu7_isys_buf_prepare(struct vb2_buffer *vb)
+{
+ struct ipu7_isys_queue *aq = vb2_queue_to_isys_queue(vb->vb2_queue);
+ struct ipu7_isys_video *av = ipu7_isys_queue_to_video(aq);
+ struct device *dev = &av->isys->adev->auxdev.dev;
+ u32 bytesperline = av->pix_fmt.bytesperline;
+ u32 height = av->pix_fmt.height;
+
+ dev_dbg(dev, "buffer: %s: configured size %u, buffer size %lu\n",
+ av->vdev.name, av->pix_fmt.sizeimage, vb2_plane_size(vb, 0));
+
+ if (av->pix_fmt.sizeimage > vb2_plane_size(vb, 0))
+ return -EINVAL;
+
+ dev_dbg(dev, "buffer: %s: bytesperline %u, height %u\n",
+ av->vdev.name, bytesperline, height);
+ vb2_set_plane_payload(vb, 0, bytesperline * height);
+
+ return 0;
+}
+
+/*
+ * Queue a buffer list back to incoming or active queues. The buffers
+ * are removed from the buffer list.
+ */
+void ipu7_isys_buffer_list_queue(struct ipu7_isys_buffer_list *bl,
+ unsigned long op_flags,
+ enum vb2_buffer_state state)
+{
+ struct ipu7_isys_buffer *ib, *ib_safe;
+ unsigned long flags;
+ bool first = true;
+
+ if (!bl)
+ return;
+
+ WARN_ON_ONCE(!bl->nbufs);
+ WARN_ON_ONCE(op_flags & IPU_ISYS_BUFFER_LIST_FL_ACTIVE &&
+ op_flags & IPU_ISYS_BUFFER_LIST_FL_INCOMING);
+
+ list_for_each_entry_safe(ib, ib_safe, &bl->head, head) {
+ struct ipu7_isys_video *av;
+
+ struct vb2_buffer *vb = ipu7_isys_buffer_to_vb2_buffer(ib);
+ struct ipu7_isys_queue *aq =
+ vb2_queue_to_isys_queue(vb->vb2_queue);
+
+ av = ipu7_isys_queue_to_video(aq);
+ spin_lock_irqsave(&aq->lock, flags);
+ list_del(&ib->head);
+ if (op_flags & IPU_ISYS_BUFFER_LIST_FL_ACTIVE)
+ list_add(&ib->head, &aq->active);
+ else if (op_flags & IPU_ISYS_BUFFER_LIST_FL_INCOMING)
+ list_add_tail(&ib->head, &aq->incoming);
+ spin_unlock_irqrestore(&aq->lock, flags);
+
+ if (op_flags & IPU_ISYS_BUFFER_LIST_FL_SET_STATE)
+ vb2_buffer_done(vb, state);
+
+ if (first) {
+ dev_dbg(&av->isys->adev->auxdev.dev,
+ "queue buf list %p flags %lx, s %d, %d bufs\n",
+ bl, op_flags, state, bl->nbufs);
+ first = false;
+ }
+
+ bl->nbufs--;
+ }
+
+ WARN_ON(bl->nbufs);
+}
+
+/*
+ * flush_firmware_streamon_fail() - Flush in cases where requests may
+ * have been queued to firmware and the *firmware streamon fails for a
+ * reason or another.
+ */
+static void flush_firmware_streamon_fail(struct ipu7_isys_stream *stream)
+{
+ struct ipu7_isys_queue *aq;
+ unsigned long flags;
+
+ lockdep_assert_held(&stream->mutex);
+
+ list_for_each_entry(aq, &stream->queues, node) {
+ struct ipu7_isys_video *av = ipu7_isys_queue_to_video(aq);
+ struct device *dev = &av->isys->adev->auxdev.dev;
+ struct ipu7_isys_buffer *ib, *ib_safe;
+
+ spin_lock_irqsave(&aq->lock, flags);
+ list_for_each_entry_safe(ib, ib_safe, &aq->active, head) {
+ struct vb2_buffer *vb =
+ ipu7_isys_buffer_to_vb2_buffer(ib);
+
+ list_del(&ib->head);
+ if (av->streaming) {
+ dev_dbg(dev,
+ "%s: queue buffer %u back to incoming\n",
+ av->vdev.name, vb->index);
+ /* Queue already streaming, return to driver. */
+ list_add(&ib->head, &aq->incoming);
+ continue;
+ }
+ /* Queue not yet streaming, return to user. */
+ dev_dbg(dev, "%s: return %u back to videobuf2\n",
+ av->vdev.name, vb->index);
+ vb2_buffer_done(ipu7_isys_buffer_to_vb2_buffer(ib),
+ VB2_BUF_STATE_QUEUED);
+ }
+ spin_unlock_irqrestore(&aq->lock, flags);
+ }
+}
+
+/*
+ * Attempt obtaining a buffer list from the incoming queues, a list of buffers
+ * that contains one entry from each video buffer queue. If a buffer can't be
+ * obtained from every queue, the buffers are returned back to the queue.
+ */
+static int buffer_list_get(struct ipu7_isys_stream *stream,
+ struct ipu7_isys_buffer_list *bl)
+{
+ unsigned long buf_flag = IPU_ISYS_BUFFER_LIST_FL_INCOMING;
+ struct device *dev = &stream->isys->adev->auxdev.dev;
+ struct ipu7_isys_queue *aq;
+ unsigned long flags;
+
+ bl->nbufs = 0;
+ INIT_LIST_HEAD(&bl->head);
+
+ list_for_each_entry(aq, &stream->queues, node) {
+ struct ipu7_isys_buffer *ib;
+
+ spin_lock_irqsave(&aq->lock, flags);
+ if (list_empty(&aq->incoming)) {
+ spin_unlock_irqrestore(&aq->lock, flags);
+ if (!list_empty(&bl->head))
+ ipu7_isys_buffer_list_queue(bl, buf_flag, 0);
+ return -ENODATA;
+ }
+
+ ib = list_last_entry(&aq->incoming,
+ struct ipu7_isys_buffer, head);
+
+ dev_dbg(dev, "buffer: %s: buffer %u\n",
+ ipu7_isys_queue_to_video(aq)->vdev.name,
+ ipu7_isys_buffer_to_vb2_buffer(ib)->index);
+ list_del(&ib->head);
+ list_add(&ib->head, &bl->head);
+ spin_unlock_irqrestore(&aq->lock, flags);
+
+ bl->nbufs++;
+ }
+
+ dev_dbg(dev, "get buffer list %p, %u buffers\n", bl, bl->nbufs);
+
+ return 0;
+}
+
+static void ipu7_isys_buf_to_fw_frame_buf_pin(struct vb2_buffer *vb,
+ struct ipu7_insys_buffset *set)
+{
+ struct ipu7_isys_queue *aq = vb2_queue_to_isys_queue(vb->vb2_queue);
+ struct vb2_v4l2_buffer *vvb = to_vb2_v4l2_buffer(vb);
+ struct ipu7_isys_video_buffer *ivb =
+ vb2_buffer_to_ipu7_isys_video_buffer(vvb);
+
+ set->output_pins[aq->fw_output].addr = ivb->dma_addr;
+ set->output_pins[aq->fw_output].user_token = (uintptr_t)set;
+}
+
+/*
+ * Convert a buffer list to a isys fw ABI framebuffer set. The
+ * buffer list is not modified.
+ */
+#define IPU_ISYS_FRAME_NUM_THRESHOLD (30)
+void ipu7_isys_buffer_to_fw_frame_buff(struct ipu7_insys_buffset *set,
+ struct ipu7_isys_stream *stream,
+ struct ipu7_isys_buffer_list *bl)
+{
+ struct ipu7_isys_buffer *ib;
+ u32 buf_id;
+
+ WARN_ON(!bl->nbufs);
+
+ set->skip_frame = 0;
+ set->capture_msg_map = IPU_INSYS_FRAME_ENABLE_MSG_SEND_RESP |
+ IPU_INSYS_FRAME_ENABLE_MSG_SEND_IRQ;
+
+ buf_id = atomic_fetch_inc(&stream->buf_id);
+ set->frame_id = buf_id % IPU_MAX_FRAME_COUNTER;
+
+ list_for_each_entry(ib, &bl->head, head) {
+ struct vb2_buffer *vb = ipu7_isys_buffer_to_vb2_buffer(ib);
+
+ ipu7_isys_buf_to_fw_frame_buf_pin(vb, set);
+ }
+}
+
+/* Start streaming for real. The buffer list must be available. */
+static int ipu7_isys_stream_start(struct ipu7_isys_video *av,
+ struct ipu7_isys_buffer_list *bl, bool error)
+{
+ struct ipu7_isys_stream *stream = av->stream;
+ struct device *dev = &stream->isys->adev->auxdev.dev;
+ struct ipu7_isys_buffer_list __bl;
+ int ret;
+
+ mutex_lock(&stream->isys->stream_mutex);
+
+ ret = ipu7_isys_video_set_streaming(av, 1, bl);
+ mutex_unlock(&stream->isys->stream_mutex);
+ if (ret)
+ goto out_requeue;
+
+ stream->streaming = 1;
+
+ bl = &__bl;
+
+ do {
+ struct ipu7_insys_buffset *buf = NULL;
+ struct isys_fw_msgs *msg;
+ enum ipu7_insys_send_type send_type =
+ IPU_INSYS_SEND_TYPE_STREAM_CAPTURE;
+
+ ret = buffer_list_get(stream, bl);
+ if (ret < 0)
+ break;
+
+ msg = ipu7_get_fw_msg_buf(stream);
+ if (!msg)
+ return -ENOMEM;
+
+ buf = &msg->fw_msg.frame;
+
+ ipu7_isys_buffer_to_fw_frame_buff(buf, stream, bl);
+
+ ipu7_fw_isys_dump_frame_buff_set(dev, buf,
+ stream->nr_output_pins);
+
+ ipu7_isys_buffer_list_queue(bl, IPU_ISYS_BUFFER_LIST_FL_ACTIVE,
+ 0);
+
+ ret = ipu7_fw_isys_complex_cmd(stream->isys,
+ stream->stream_handle, buf,
+ msg->dma_addr, sizeof(*buf),
+ send_type);
+ } while (!WARN_ON(ret));
+
+ return 0;
+
+out_requeue:
+ if (bl && bl->nbufs)
+ ipu7_isys_buffer_list_queue(bl,
+ IPU_ISYS_BUFFER_LIST_FL_INCOMING |
+ (error ?
+ IPU_ISYS_BUFFER_LIST_FL_SET_STATE :
+ 0), error ? VB2_BUF_STATE_ERROR :
+ VB2_BUF_STATE_QUEUED);
+ flush_firmware_streamon_fail(stream);
+
+ return ret;
+}
+
+static void buf_queue(struct vb2_buffer *vb)
+{
+ struct ipu7_isys_queue *aq = vb2_queue_to_isys_queue(vb->vb2_queue);
+ struct ipu7_isys_video *av = ipu7_isys_queue_to_video(aq);
+ struct vb2_v4l2_buffer *vvb = to_vb2_v4l2_buffer(vb);
+ struct ipu7_isys_video_buffer *ivb =
+ vb2_buffer_to_ipu7_isys_video_buffer(vvb);
+ struct media_pipeline *media_pipe =
+ media_entity_pipeline(&av->vdev.entity);
+ struct device *dev = &av->isys->adev->auxdev.dev;
+ struct ipu7_isys_stream *stream = av->stream;
+ struct ipu7_isys_buffer *ib = &ivb->ib;
+ struct ipu7_insys_buffset *buf = NULL;
+ struct ipu7_isys_buffer_list bl;
+ struct isys_fw_msgs *msg;
+ unsigned long flags;
+ dma_addr_t dma;
+ int ret;
+
+ dev_dbg(dev, "queue buffer %u for %s\n", vb->index, av->vdev.name);
+
+ dma = ivb->dma_addr;
+ dev_dbg(dev, "iova: iova %pad\n", &dma);
+
+ spin_lock_irqsave(&aq->lock, flags);
+ list_add(&ib->head, &aq->incoming);
+ spin_unlock_irqrestore(&aq->lock, flags);
+
+ if (!media_pipe || !vb->vb2_queue->start_streaming_called) {
+ dev_dbg(dev, "media pipeline is not ready for %s\n",
+ av->vdev.name);
+ return;
+ }
+
+ mutex_lock(&stream->mutex);
+
+ if (stream->nr_streaming != stream->nr_queues) {
+ dev_dbg(dev, "not streaming yet, adding to incoming\n");
+ goto out;
+ }
+
+ /*
+ * We just put one buffer to the incoming list of this queue
+ * (above). Let's see whether all queues in the pipeline would
+ * have a buffer.
+ */
+ ret = buffer_list_get(stream, &bl);
+ if (ret < 0) {
+ dev_dbg(dev, "No buffers available\n");
+ goto out;
+ }
+
+ msg = ipu7_get_fw_msg_buf(stream);
+ if (!msg) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ buf = &msg->fw_msg.frame;
+
+ ipu7_isys_buffer_to_fw_frame_buff(buf, stream, &bl);
+
+ ipu7_fw_isys_dump_frame_buff_set(dev, buf, stream->nr_output_pins);
+
+ if (!stream->streaming) {
+ ret = ipu7_isys_stream_start(av, &bl, true);
+ if (ret)
+ dev_err(dev, "stream start failed.\n");
+ goto out;
+ }
+
+ /*
+ * We must queue the buffers in the buffer list to the
+ * appropriate video buffer queues BEFORE passing them to the
+ * firmware since we could get a buffer event back before we
+ * have queued them ourselves to the active queue.
+ */
+ ipu7_isys_buffer_list_queue(&bl, IPU_ISYS_BUFFER_LIST_FL_ACTIVE, 0);
+
+ ret = ipu7_fw_isys_complex_cmd(stream->isys, stream->stream_handle,
+ buf, msg->dma_addr, sizeof(*buf),
+ IPU_INSYS_SEND_TYPE_STREAM_CAPTURE);
+ if (ret < 0)
+ dev_err(dev, "send stream capture failed\n");
+
+out:
+ mutex_unlock(&stream->mutex);
+}
+
+static int ipu7_isys_link_fmt_validate(struct ipu7_isys_queue *aq)
+{
+ struct ipu7_isys_video *av = ipu7_isys_queue_to_video(aq);
+ struct device *dev = &av->isys->adev->auxdev.dev;
+ struct media_pad *remote_pad =
+ media_pad_remote_pad_first(av->vdev.entity.pads);
+ struct v4l2_mbus_framefmt format;
+ struct v4l2_subdev *sd;
+ u32 r_stream = 0, code;
+ int ret;
+
+ if (!remote_pad)
+ return -ENOTCONN;
+
+ sd = media_entity_to_v4l2_subdev(remote_pad->entity);
+
+ ret = ipu7_isys_get_stream_pad_fmt(sd, remote_pad->index, r_stream,
+ &format);
+ if (ret) {
+ dev_dbg(dev, "failed to get %s: pad %d, stream:%d format\n",
+ sd->entity.name, remote_pad->index, r_stream);
+ return ret;
+ }
+
+ if (format.width != av->pix_fmt.width ||
+ format.height != av->pix_fmt.height) {
+ dev_dbg(dev, "wrong width or height %ux%u (%ux%u expected)\n",
+ av->pix_fmt.width, av->pix_fmt.height, format.width,
+ format.height);
+ return -EINVAL;
+ }
+
+ code = ipu7_isys_get_isys_format(av->pix_fmt.pixelformat)->code;
+ if (format.code != code) {
+ dev_dbg(dev, "wrong mbus code 0x%8.8x (0x%8.8x expected)\n",
+ code, format.code);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void return_buffers(struct ipu7_isys_queue *aq,
+ enum vb2_buffer_state state)
+{
+ struct ipu7_isys_buffer *ib;
+ struct vb2_buffer *vb;
+ unsigned long flags;
+
+ spin_lock_irqsave(&aq->lock, flags);
+ /*
+ * Something went wrong (FW crash / HW hang / not all buffers
+ * returned from isys) if there are still buffers queued in active
+ * queue. We have to clean up places a bit.
+ */
+ while (!list_empty(&aq->active)) {
+ ib = list_last_entry(&aq->active, struct ipu7_isys_buffer,
+ head);
+ vb = ipu7_isys_buffer_to_vb2_buffer(ib);
+
+ list_del(&ib->head);
+ spin_unlock_irqrestore(&aq->lock, flags);
+
+ vb2_buffer_done(vb, state);
+
+ spin_lock_irqsave(&aq->lock, flags);
+ }
+
+ while (!list_empty(&aq->incoming)) {
+ ib = list_last_entry(&aq->incoming, struct ipu7_isys_buffer,
+ head);
+ vb = ipu7_isys_buffer_to_vb2_buffer(ib);
+ list_del(&ib->head);
+ spin_unlock_irqrestore(&aq->lock, flags);
+
+ vb2_buffer_done(vb, state);
+
+ spin_lock_irqsave(&aq->lock, flags);
+ }
+
+ spin_unlock_irqrestore(&aq->lock, flags);
+}
+
+static void ipu7_isys_stream_cleanup(struct ipu7_isys_video *av)
+{
+ video_device_pipeline_stop(&av->vdev);
+ ipu7_isys_put_stream(av->stream);
+ av->stream = NULL;
+}
+
+static int start_streaming(struct vb2_queue *q, unsigned int count)
+{
+ struct ipu7_isys_queue *aq = vb2_queue_to_isys_queue(q);
+ struct ipu7_isys_video *av = ipu7_isys_queue_to_video(aq);
+ struct device *dev = &av->isys->adev->auxdev.dev;
+ const struct ipu7_isys_pixelformat *pfmt =
+ ipu7_isys_get_isys_format(av->pix_fmt.pixelformat);
+ struct ipu7_isys_buffer_list __bl, *bl = NULL;
+ struct ipu7_isys_stream *stream;
+ struct media_entity *source_entity = NULL;
+ int nr_queues, ret;
+
+ dev_dbg(dev, "stream: %s: width %u, height %u, css pixelformat %u\n",
+ av->vdev.name, av->pix_fmt.width, av->pix_fmt.height,
+ pfmt->css_pixelformat);
+
+ ret = ipu7_isys_setup_video(av, &source_entity, &nr_queues);
+ if (ret < 0) {
+ dev_dbg(dev, "failed to setup video\n");
+ goto out_return_buffers;
+ }
+
+ ret = ipu7_isys_link_fmt_validate(aq);
+ if (ret) {
+ dev_dbg(dev,
+ "%s: link format validation failed (%d)\n",
+ av->vdev.name, ret);
+ goto out_pipeline_stop;
+ }
+
+ stream = av->stream;
+ mutex_lock(&stream->mutex);
+ if (!stream->nr_streaming) {
+ ret = ipu7_isys_video_prepare_stream(av, source_entity,
+ nr_queues);
+ if (ret) {
+ mutex_unlock(&stream->mutex);
+ goto out_pipeline_stop;
+ }
+ }
+
+ stream->nr_streaming++;
+ dev_dbg(dev, "queue %u of %u\n", stream->nr_streaming,
+ stream->nr_queues);
+
+ list_add(&aq->node, &stream->queues);
+
+ if (stream->nr_streaming != stream->nr_queues)
+ goto out;
+
+ bl = &__bl;
+ ret = buffer_list_get(stream, bl);
+ if (ret < 0) {
+ dev_warn(dev, "no buffer available, DRIVER BUG?\n");
+ goto out;
+ }
+
+ ret = ipu7_isys_fw_open(av->isys);
+ if (ret)
+ goto out_stream_start;
+
+ ipu7_isys_setup_hw(av->isys);
+
+ ret = ipu7_isys_stream_start(av, bl, false);
+ if (ret)
+ goto out_isys_fw_close;
+
+out:
+ mutex_unlock(&stream->mutex);
+
+ return 0;
+
+out_isys_fw_close:
+ ipu7_isys_fw_close(av->isys);
+
+out_stream_start:
+ list_del(&aq->node);
+ stream->nr_streaming--;
+ mutex_unlock(&stream->mutex);
+
+out_pipeline_stop:
+ ipu7_isys_stream_cleanup(av);
+
+out_return_buffers:
+ return_buffers(aq, VB2_BUF_STATE_QUEUED);
+
+ return ret;
+}
+
+static void stop_streaming(struct vb2_queue *q)
+{
+ struct ipu7_isys_queue *aq = vb2_queue_to_isys_queue(q);
+ struct ipu7_isys_video *av = ipu7_isys_queue_to_video(aq);
+ struct ipu7_isys_stream *stream = av->stream;
+
+ mutex_lock(&stream->mutex);
+ mutex_lock(&av->isys->stream_mutex);
+ if (stream->nr_streaming == stream->nr_queues && stream->streaming)
+ ipu7_isys_video_set_streaming(av, 0, NULL);
+ mutex_unlock(&av->isys->stream_mutex);
+
+ stream->nr_streaming--;
+ list_del(&aq->node);
+ stream->streaming = 0;
+
+ mutex_unlock(&stream->mutex);
+
+ ipu7_isys_stream_cleanup(av);
+
+ return_buffers(aq, VB2_BUF_STATE_ERROR);
+
+ ipu7_isys_fw_close(av->isys);
+}
+
+static unsigned int
+get_sof_sequence_by_timestamp(struct ipu7_isys_stream *stream, u64 time)
+{
+ struct ipu7_isys *isys = stream->isys;
+ struct device *dev = &isys->adev->auxdev.dev;
+ unsigned int i;
+
+ /*
+ * The timestamp is invalid as no TSC in some FPGA platform,
+ * so get the sequence from pipeline directly in this case.
+ */
+ if (time == 0)
+ return atomic_read(&stream->sequence) - 1;
+
+ for (i = 0; i < IPU_ISYS_MAX_PARALLEL_SOF; i++)
+ if (time == stream->seq[i].timestamp) {
+ dev_dbg(dev, "SOF: using seq nr %u for ts %llu\n",
+ stream->seq[i].sequence, time);
+ return stream->seq[i].sequence;
+ }
+
+ dev_dbg(dev, "SOF: looking for %llu\n", time);
+ for (i = 0; i < IPU_ISYS_MAX_PARALLEL_SOF; i++)
+ dev_dbg(dev, "SOF: sequence %u, timestamp value %llu\n",
+ stream->seq[i].sequence, stream->seq[i].timestamp);
+ dev_dbg(dev, "SOF sequence number not found\n");
+
+ return atomic_read(&stream->sequence) - 1;
+}
+
+static u64 get_sof_ns_delta(struct ipu7_isys_video *av, u64 time)
+{
+ struct ipu7_bus_device *adev = av->isys->adev;
+ struct ipu7_device *isp = adev->isp;
+ u64 delta, tsc_now;
+
+ ipu_buttress_tsc_read(isp, &tsc_now);
+ if (!tsc_now)
+ return 0;
+
+ delta = tsc_now - time;
+
+ return ipu_buttress_tsc_ticks_to_ns(delta, isp);
+}
+
+static void ipu7_isys_buf_calc_sequence_time(struct ipu7_isys_buffer *ib,
+ u64 time)
+{
+ struct vb2_buffer *vb = ipu7_isys_buffer_to_vb2_buffer(ib);
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct ipu7_isys_queue *aq = vb2_queue_to_isys_queue(vb->vb2_queue);
+ struct ipu7_isys_video *av = ipu7_isys_queue_to_video(aq);
+ struct device *dev = &av->isys->adev->auxdev.dev;
+ struct ipu7_isys_stream *stream = av->stream;
+ u64 ns;
+ u32 sequence;
+
+ ns = ktime_get_ns() - get_sof_ns_delta(av, time);
+ sequence = get_sof_sequence_by_timestamp(stream, time);
+
+ vbuf->vb2_buf.timestamp = ns;
+ vbuf->sequence = sequence;
+
+ dev_dbg(dev, "buf: %s: buffer done, CPU-timestamp:%lld, sequence:%d\n",
+ av->vdev.name, ktime_get_ns(), sequence);
+ dev_dbg(dev, "index:%d, vbuf timestamp:%lld\n", vb->index,
+ vbuf->vb2_buf.timestamp);
+}
+
+static void ipu7_isys_queue_buf_done(struct ipu7_isys_buffer *ib)
+{
+ struct vb2_buffer *vb = ipu7_isys_buffer_to_vb2_buffer(ib);
+
+ if (atomic_read(&ib->str2mmio_flag)) {
+ vb2_buffer_done(vb, VB2_BUF_STATE_ERROR);
+ /*
+ * Operation on buffer is ended with error and will be reported
+ * to the userspace when it is de-queued
+ */
+ atomic_set(&ib->str2mmio_flag, 0);
+ } else {
+ vb2_buffer_done(vb, VB2_BUF_STATE_DONE);
+ }
+}
+
+void ipu7_isys_queue_buf_ready(struct ipu7_isys_stream *stream,
+ struct ipu7_insys_resp *info)
+{
+ struct ipu7_isys_queue *aq = stream->output_pins[info->pin_id].aq;
+ u64 time = ((u64)info->timestamp[1] << 32 | info->timestamp[0]);
+ struct ipu7_isys *isys = stream->isys;
+ struct device *dev = &isys->adev->auxdev.dev;
+ struct ipu7_isys_buffer *ib;
+ struct vb2_buffer *vb;
+ unsigned long flags;
+ bool first = true;
+ struct vb2_v4l2_buffer *buf;
+
+ dev_dbg(dev, "buffer: %s: received buffer %8.8x %d\n",
+ ipu7_isys_queue_to_video(aq)->vdev.name, info->pin.addr,
+ info->frame_id);
+
+ spin_lock_irqsave(&aq->lock, flags);
+ if (list_empty(&aq->active)) {
+ spin_unlock_irqrestore(&aq->lock, flags);
+ dev_err(dev, "active queue empty\n");
+ return;
+ }
+
+ list_for_each_entry_reverse(ib, &aq->active, head) {
+ struct ipu7_isys_video_buffer *ivb;
+ struct vb2_v4l2_buffer *vvb;
+ dma_addr_t addr;
+
+ vb = ipu7_isys_buffer_to_vb2_buffer(ib);
+ vvb = to_vb2_v4l2_buffer(vb);
+ ivb = vb2_buffer_to_ipu7_isys_video_buffer(vvb);
+ addr = ivb->dma_addr;
+
+ if (info->pin.addr != addr) {
+ if (first)
+ dev_err(dev, "Unexpected buffer address %pad\n",
+ &addr);
+
+ first = false;
+ continue;
+ }
+
+ dev_dbg(dev, "buffer: found buffer %pad\n", &addr);
+
+ buf = to_vb2_v4l2_buffer(vb);
+ buf->field = V4L2_FIELD_NONE;
+
+ list_del(&ib->head);
+ spin_unlock_irqrestore(&aq->lock, flags);
+
+ ipu7_isys_buf_calc_sequence_time(ib, time);
+
+ ipu7_isys_queue_buf_done(ib);
+
+ return;
+ }
+
+ dev_err(dev, "Failed to find a matching video buffer\n");
+
+ spin_unlock_irqrestore(&aq->lock, flags);
+}
+
+static const struct vb2_ops ipu7_isys_queue_ops = {
+ .queue_setup = ipu7_isys_queue_setup,
+ .buf_init = ipu7_isys_buf_init,
+ .buf_prepare = ipu7_isys_buf_prepare,
+ .buf_cleanup = ipu7_isys_buf_cleanup,
+ .start_streaming = start_streaming,
+ .stop_streaming = stop_streaming,
+ .buf_queue = buf_queue,
+};
+
+int ipu7_isys_queue_init(struct ipu7_isys_queue *aq)
+{
+ struct ipu7_isys *isys = ipu7_isys_queue_to_video(aq)->isys;
+ struct ipu7_isys_video *av = ipu7_isys_queue_to_video(aq);
+ struct ipu7_bus_device *adev = isys->adev;
+ int ret;
+
+ if (!aq->vbq.io_modes)
+ aq->vbq.io_modes = VB2_MMAP | VB2_DMABUF;
+
+ aq->vbq.drv_priv = isys;
+ aq->vbq.ops = &ipu7_isys_queue_ops;
+ aq->vbq.lock = &av->mutex;
+ aq->vbq.mem_ops = &vb2_dma_sg_memops;
+ aq->vbq.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ aq->vbq.min_queued_buffers = 1;
+ aq->vbq.timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+
+ ret = vb2_queue_init(&aq->vbq);
+ if (ret)
+ return ret;
+
+ aq->dev = &adev->auxdev.dev;
+ aq->vbq.dev = &adev->isp->pdev->dev;
+ spin_lock_init(&aq->lock);
+ INIT_LIST_HEAD(&aq->active);
+ INIT_LIST_HEAD(&aq->incoming);
+
+ return 0;
+}
diff --git a/drivers/staging/media/ipu7/ipu7-isys-queue.h b/drivers/staging/media/ipu7/ipu7-isys-queue.h
new file mode 100644
index 000000000000..0cb08a38f756
--- /dev/null
+++ b/drivers/staging/media/ipu7/ipu7-isys-queue.h
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2013 - 2025 Intel Corporation
+ */
+
+#ifndef IPU7_ISYS_QUEUE_H
+#define IPU7_ISYS_QUEUE_H
+
+#include <linux/atomic.h>
+#include <linux/container_of.h>
+#include <linux/list.h>
+#include <linux/spinlock_types.h>
+
+#include <media/videobuf2-v4l2.h>
+
+struct device;
+struct ipu7_isys_stream;
+struct ipu7_insys_resp;
+struct ipu7_insys_buffset;
+
+struct ipu7_isys_queue {
+ struct vb2_queue vbq;
+ struct list_head node;
+ struct device *dev;
+ spinlock_t lock;
+ struct list_head active;
+ struct list_head incoming;
+ unsigned int fw_output;
+};
+
+struct ipu7_isys_buffer {
+ struct list_head head;
+ atomic_t str2mmio_flag;
+};
+
+struct ipu7_isys_video_buffer {
+ struct vb2_v4l2_buffer vb_v4l2;
+ struct ipu7_isys_buffer ib;
+ dma_addr_t dma_addr;
+};
+
+#define IPU_ISYS_BUFFER_LIST_FL_INCOMING BIT(0)
+#define IPU_ISYS_BUFFER_LIST_FL_ACTIVE BIT(1)
+#define IPU_ISYS_BUFFER_LIST_FL_SET_STATE BIT(2)
+
+struct ipu7_isys_buffer_list {
+ struct list_head head;
+ unsigned int nbufs;
+};
+
+#define vb2_queue_to_isys_queue(__vb2) \
+ container_of(__vb2, struct ipu7_isys_queue, vbq)
+
+#define ipu7_isys_to_isys_video_buffer(__ib) \
+ container_of(__ib, struct ipu7_isys_video_buffer, ib)
+
+#define vb2_buffer_to_ipu7_isys_video_buffer(__vvb) \
+ container_of(__vvb, struct ipu7_isys_video_buffer, vb_v4l2)
+
+#define ipu7_isys_buffer_to_vb2_buffer(__ib) \
+ (&ipu7_isys_to_isys_video_buffer(__ib)->vb_v4l2.vb2_buf)
+
+void ipu7_isys_buffer_list_queue(struct ipu7_isys_buffer_list *bl,
+ unsigned long op_flags,
+ enum vb2_buffer_state state);
+void ipu7_isys_buffer_to_fw_frame_buff(struct ipu7_insys_buffset *set,
+ struct ipu7_isys_stream *stream,
+ struct ipu7_isys_buffer_list *bl);
+void ipu7_isys_queue_buf_ready(struct ipu7_isys_stream *stream,
+ struct ipu7_insys_resp *info);
+int ipu7_isys_queue_init(struct ipu7_isys_queue *aq);
+#endif /* IPU7_ISYS_QUEUE_H */
diff --git a/drivers/staging/media/ipu7/ipu7-isys-subdev.c b/drivers/staging/media/ipu7/ipu7-isys-subdev.c
new file mode 100644
index 000000000000..67a776033d5b
--- /dev/null
+++ b/drivers/staging/media/ipu7/ipu7-isys-subdev.c
@@ -0,0 +1,333 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2013 - 2025 Intel Corporation
+ */
+
+#include <linux/bug.h>
+#include <linux/device.h>
+#include <linux/minmax.h>
+#include <linux/types.h>
+
+#include <media/media-entity.h>
+#include <media/mipi-csi2.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-subdev.h>
+
+#include <uapi/linux/media-bus-format.h>
+
+#include "ipu7-bus.h"
+#include "ipu7-isys.h"
+#include "ipu7-isys-subdev.h"
+
+unsigned int ipu7_isys_mbus_code_to_mipi(u32 code)
+{
+ switch (code) {
+ case MEDIA_BUS_FMT_RGB565_1X16:
+ return MIPI_CSI2_DT_RGB565;
+ case MEDIA_BUS_FMT_RGB888_1X24:
+ return MIPI_CSI2_DT_RGB888;
+ case MEDIA_BUS_FMT_YUYV10_1X20:
+ return MIPI_CSI2_DT_YUV422_10B;
+ case MEDIA_BUS_FMT_UYVY8_1X16:
+ case MEDIA_BUS_FMT_YUYV8_1X16:
+ return MIPI_CSI2_DT_YUV422_8B;
+ case MEDIA_BUS_FMT_SBGGR12_1X12:
+ case MEDIA_BUS_FMT_SGBRG12_1X12:
+ case MEDIA_BUS_FMT_SGRBG12_1X12:
+ case MEDIA_BUS_FMT_SRGGB12_1X12:
+ return MIPI_CSI2_DT_RAW12;
+ case MEDIA_BUS_FMT_Y10_1X10:
+ case MEDIA_BUS_FMT_SBGGR10_1X10:
+ case MEDIA_BUS_FMT_SGBRG10_1X10:
+ case MEDIA_BUS_FMT_SGRBG10_1X10:
+ case MEDIA_BUS_FMT_SRGGB10_1X10:
+ return MIPI_CSI2_DT_RAW10;
+ case MEDIA_BUS_FMT_SBGGR8_1X8:
+ case MEDIA_BUS_FMT_SGBRG8_1X8:
+ case MEDIA_BUS_FMT_SGRBG8_1X8:
+ case MEDIA_BUS_FMT_SRGGB8_1X8:
+ return MIPI_CSI2_DT_RAW8;
+ default:
+ WARN_ON(1);
+ return 0xff;
+ }
+}
+
+bool ipu7_isys_is_bayer_format(u32 code)
+{
+ switch (ipu7_isys_mbus_code_to_mipi(code)) {
+ case MIPI_CSI2_DT_RAW8:
+ case MIPI_CSI2_DT_RAW10:
+ case MIPI_CSI2_DT_RAW12:
+ case MIPI_CSI2_DT_RAW14:
+ case MIPI_CSI2_DT_RAW16:
+ case MIPI_CSI2_DT_RAW20:
+ case MIPI_CSI2_DT_RAW24:
+ case MIPI_CSI2_DT_RAW28:
+ return true;
+ default:
+ return false;
+ }
+}
+
+u32 ipu7_isys_convert_bayer_order(u32 code, int x, int y)
+{
+ static const u32 code_map[] = {
+ MEDIA_BUS_FMT_SRGGB8_1X8,
+ MEDIA_BUS_FMT_SGRBG8_1X8,
+ MEDIA_BUS_FMT_SGBRG8_1X8,
+ MEDIA_BUS_FMT_SBGGR8_1X8,
+ MEDIA_BUS_FMT_SRGGB10_1X10,
+ MEDIA_BUS_FMT_SGRBG10_1X10,
+ MEDIA_BUS_FMT_SGBRG10_1X10,
+ MEDIA_BUS_FMT_SBGGR10_1X10,
+ MEDIA_BUS_FMT_SRGGB12_1X12,
+ MEDIA_BUS_FMT_SGRBG12_1X12,
+ MEDIA_BUS_FMT_SGBRG12_1X12,
+ MEDIA_BUS_FMT_SBGGR12_1X12,
+ };
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(code_map); i++)
+ if (code_map[i] == code)
+ break;
+
+ if (WARN_ON(i == ARRAY_SIZE(code_map)))
+ return code;
+
+ return code_map[i ^ ((((u32)y & 1U) << 1U) | ((u32)x & 1U))];
+}
+
+int ipu7_isys_subdev_set_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ struct v4l2_subdev_format *format)
+{
+ struct ipu7_isys_subdev *asd = to_ipu7_isys_subdev(sd);
+ u32 code = asd->supported_codes[0];
+ struct v4l2_mbus_framefmt *fmt;
+ u32 other_pad, other_stream;
+ struct v4l2_rect *crop;
+ unsigned int i;
+ int ret;
+
+ /* No transcoding, source and sink formats must match. */
+ if ((sd->entity.pads[format->pad].flags & MEDIA_PAD_FL_SOURCE) &&
+ sd->entity.num_pads > 1)
+ return v4l2_subdev_get_fmt(sd, state, format);
+
+ format->format.width = clamp(format->format.width, IPU_ISYS_MIN_WIDTH,
+ IPU_ISYS_MAX_WIDTH);
+ format->format.height = clamp(format->format.height,
+ IPU_ISYS_MIN_HEIGHT,
+ IPU_ISYS_MAX_HEIGHT);
+
+ for (i = 0; asd->supported_codes[i]; i++) {
+ if (asd->supported_codes[i] == format->format.code) {
+ code = asd->supported_codes[i];
+ break;
+ }
+ }
+ format->format.code = code;
+ format->format.field = V4L2_FIELD_NONE;
+
+ /* Store the format and propagate it to the source pad. */
+ fmt = v4l2_subdev_state_get_format(state, format->pad, format->stream);
+ if (!fmt)
+ return -EINVAL;
+
+ *fmt = format->format;
+
+ if (!(sd->entity.pads[format->pad].flags & MEDIA_PAD_FL_SINK))
+ return 0;
+
+ /* propagate format to following source pad */
+ fmt = v4l2_subdev_state_get_opposite_stream_format(state, format->pad,
+ format->stream);
+ if (!fmt)
+ return -EINVAL;
+
+ *fmt = format->format;
+
+ ret = v4l2_subdev_routing_find_opposite_end(&state->routing,
+ format->pad,
+ format->stream,
+ &other_pad,
+ &other_stream);
+ if (ret)
+ return -EINVAL;
+
+ crop = v4l2_subdev_state_get_crop(state, other_pad, other_stream);
+ /* reset crop */
+ crop->left = 0;
+ crop->top = 0;
+ crop->width = fmt->width;
+ crop->height = fmt->height;
+
+ return 0;
+}
+
+int ipu7_isys_subdev_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ struct ipu7_isys_subdev *asd = to_ipu7_isys_subdev(sd);
+ const u32 *supported_codes = asd->supported_codes;
+ u32 index;
+
+ for (index = 0; supported_codes[index]; index++) {
+ if (index == code->index) {
+ code->code = supported_codes[index];
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static int subdev_set_routing(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ struct v4l2_subdev_krouting *routing)
+{
+ static const struct v4l2_mbus_framefmt fmt = {
+ .width = 4096,
+ .height = 3072,
+ .code = MEDIA_BUS_FMT_SGRBG10_1X10,
+ .field = V4L2_FIELD_NONE,
+ };
+ struct v4l2_subdev_route *route;
+ int ret;
+
+ ret = v4l2_subdev_routing_validate(sd, routing,
+ V4L2_SUBDEV_ROUTING_ONLY_1_TO_1 |
+ V4L2_SUBDEV_ROUTING_NO_SOURCE_MULTIPLEXING);
+ if (ret)
+ return ret;
+
+ /*
+ * The device doesn't support source multiplexing, set all source
+ * streams to 0 to simplify stream handling through the driver.
+ */
+ for_each_active_route(routing, route)
+ route->source_stream = 0;
+
+ return v4l2_subdev_set_routing_with_fmt(sd, state, routing, &fmt);
+}
+
+int ipu7_isys_get_stream_pad_fmt(struct v4l2_subdev *sd, u32 pad, u32 stream,
+ struct v4l2_mbus_framefmt *format)
+{
+ struct v4l2_subdev_state *state;
+ struct v4l2_mbus_framefmt *fmt;
+
+ if (!sd || !format)
+ return -EINVAL;
+
+ state = v4l2_subdev_lock_and_get_active_state(sd);
+ fmt = v4l2_subdev_state_get_format(state, pad, stream);
+ if (fmt)
+ *format = *fmt;
+ v4l2_subdev_unlock_state(state);
+
+ return fmt ? 0 : -EINVAL;
+}
+
+static int ipu7_isys_subdev_init_state(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state)
+{
+ struct v4l2_subdev_route route = {
+ .sink_pad = 0,
+ .sink_stream = 0,
+ .source_pad = 1,
+ .source_stream = 0,
+ .flags = V4L2_SUBDEV_ROUTE_FL_ACTIVE,
+ };
+ struct v4l2_subdev_krouting routing = {
+ .num_routes = 1,
+ .routes = &route,
+ };
+
+ return subdev_set_routing(sd, state, &routing);
+}
+
+int ipu7_isys_subdev_set_routing(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ enum v4l2_subdev_format_whence which,
+ struct v4l2_subdev_krouting *routing)
+{
+ return subdev_set_routing(sd, state, routing);
+}
+
+static const struct v4l2_subdev_internal_ops ipu7_isys_subdev_internal_ops = {
+ .init_state = ipu7_isys_subdev_init_state,
+};
+
+int ipu7_isys_subdev_init(struct ipu7_isys_subdev *asd,
+ const struct v4l2_subdev_ops *ops,
+ unsigned int nr_ctrls,
+ unsigned int num_sink_pads,
+ unsigned int num_source_pads)
+{
+ unsigned int num_pads = num_sink_pads + num_source_pads;
+ unsigned int i;
+ int ret;
+
+ v4l2_subdev_init(&asd->sd, ops);
+
+ asd->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE |
+ V4L2_SUBDEV_FL_HAS_EVENTS |
+ V4L2_SUBDEV_FL_STREAMS;
+ asd->sd.owner = THIS_MODULE;
+ asd->sd.dev = &asd->isys->adev->auxdev.dev;
+ asd->sd.entity.function = MEDIA_ENT_F_VID_IF_BRIDGE;
+ asd->sd.internal_ops = &ipu7_isys_subdev_internal_ops;
+
+ asd->pad = devm_kcalloc(&asd->isys->adev->auxdev.dev, num_pads,
+ sizeof(*asd->pad), GFP_KERNEL);
+ if (!asd->pad)
+ return -ENOMEM;
+
+ for (i = 0; i < num_sink_pads; i++)
+ asd->pad[i].flags = MEDIA_PAD_FL_SINK |
+ MEDIA_PAD_FL_MUST_CONNECT;
+
+ for (i = num_sink_pads; i < num_pads; i++)
+ asd->pad[i].flags = MEDIA_PAD_FL_SOURCE;
+
+ ret = media_entity_pads_init(&asd->sd.entity, num_pads, asd->pad);
+ if (ret) {
+ pr_err("isys subdev init failed %d.\n", ret);
+ return ret;
+ }
+
+ if (asd->ctrl_init) {
+ ret = v4l2_ctrl_handler_init(&asd->ctrl_handler, nr_ctrls);
+ if (ret)
+ goto out_media_entity_cleanup;
+
+ asd->ctrl_init(&asd->sd);
+ if (asd->ctrl_handler.error) {
+ ret = asd->ctrl_handler.error;
+ goto out_v4l2_ctrl_handler_free;
+ }
+
+ asd->sd.ctrl_handler = &asd->ctrl_handler;
+ }
+
+ asd->source = -1;
+
+ return 0;
+
+out_v4l2_ctrl_handler_free:
+ v4l2_ctrl_handler_free(&asd->ctrl_handler);
+
+out_media_entity_cleanup:
+ media_entity_cleanup(&asd->sd.entity);
+
+ return ret;
+}
+
+void ipu7_isys_subdev_cleanup(struct ipu7_isys_subdev *asd)
+{
+ media_entity_cleanup(&asd->sd.entity);
+ v4l2_ctrl_handler_free(&asd->ctrl_handler);
+}
diff --git a/drivers/staging/media/ipu7/ipu7-isys-subdev.h b/drivers/staging/media/ipu7/ipu7-isys-subdev.h
new file mode 100644
index 000000000000..faa50031cf24
--- /dev/null
+++ b/drivers/staging/media/ipu7/ipu7-isys-subdev.h
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2013 - 2025 Intel Corporation
+ */
+
+#ifndef IPU7_ISYS_SUBDEV_H
+#define IPU7_ISYS_SUBDEV_H
+
+#include <linux/container_of.h>
+
+#include <media/media-entity.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-subdev.h>
+
+struct ipu7_isys;
+
+struct ipu7_isys_subdev {
+ struct v4l2_subdev sd;
+ struct ipu7_isys *isys;
+ u32 const *supported_codes;
+ struct media_pad *pad;
+ struct v4l2_ctrl_handler ctrl_handler;
+ void (*ctrl_init)(struct v4l2_subdev *sd);
+ int source; /* SSI stream source; -1 if unset */
+};
+
+#define to_ipu7_isys_subdev(__sd) \
+ container_of(__sd, struct ipu7_isys_subdev, sd)
+unsigned int ipu7_isys_mbus_code_to_mipi(u32 code);
+bool ipu7_isys_is_bayer_format(u32 code);
+u32 ipu7_isys_convert_bayer_order(u32 code, int x, int y);
+
+int ipu7_isys_subdev_set_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ struct v4l2_subdev_format *format);
+int ipu7_isys_subdev_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ struct v4l2_subdev_mbus_code_enum
+ *code);
+int ipu7_isys_get_stream_pad_fmt(struct v4l2_subdev *sd, u32 pad, u32 stream,
+ struct v4l2_mbus_framefmt *format);
+int ipu7_isys_subdev_set_routing(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ enum v4l2_subdev_format_whence which,
+ struct v4l2_subdev_krouting *routing);
+int ipu7_isys_subdev_init(struct ipu7_isys_subdev *asd,
+ const struct v4l2_subdev_ops *ops,
+ unsigned int nr_ctrls,
+ unsigned int num_sink_pads,
+ unsigned int num_source_pads);
+void ipu7_isys_subdev_cleanup(struct ipu7_isys_subdev *asd);
+#endif /* IPU7_ISYS_SUBDEV_H */
diff --git a/drivers/staging/media/ipu7/ipu7-isys-video.c b/drivers/staging/media/ipu7/ipu7-isys-video.c
new file mode 100644
index 000000000000..8c6730833f24
--- /dev/null
+++ b/drivers/staging/media/ipu7/ipu7-isys-video.c
@@ -0,0 +1,1078 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2013 - 2025 Intel Corporation
+ */
+
+#include <linux/align.h>
+#include <linux/bits.h>
+#include <linux/bug.h>
+#include <linux/completion.h>
+#include <linux/container_of.h>
+#include <linux/compat.h>
+#include <linux/device.h>
+#include <linux/iopoll.h>
+#include <linux/list.h>
+#include <linux/minmax.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/pm_runtime.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+
+#include <media/media-entity.h>
+#include <media/v4l2-dev.h>
+#include <media/v4l2-fh.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-mc.h>
+#include <media/v4l2-subdev.h>
+#include <media/videobuf2-v4l2.h>
+
+#include "abi/ipu7_fw_isys_abi.h"
+
+#include "ipu7.h"
+#include "ipu7-bus.h"
+#include "ipu7-buttress-regs.h"
+#include "ipu7-fw-isys.h"
+#include "ipu7-isys.h"
+#include "ipu7-isys-video.h"
+#include "ipu7-platform-regs.h"
+
+const struct ipu7_isys_pixelformat ipu7_isys_pfmts[] = {
+ {V4L2_PIX_FMT_SBGGR12, 16, 12, MEDIA_BUS_FMT_SBGGR12_1X12,
+ IPU_INSYS_FRAME_FORMAT_RAW16},
+ {V4L2_PIX_FMT_SGBRG12, 16, 12, MEDIA_BUS_FMT_SGBRG12_1X12,
+ IPU_INSYS_FRAME_FORMAT_RAW16},
+ {V4L2_PIX_FMT_SGRBG12, 16, 12, MEDIA_BUS_FMT_SGRBG12_1X12,
+ IPU_INSYS_FRAME_FORMAT_RAW16},
+ {V4L2_PIX_FMT_SRGGB12, 16, 12, MEDIA_BUS_FMT_SRGGB12_1X12,
+ IPU_INSYS_FRAME_FORMAT_RAW16},
+ {V4L2_PIX_FMT_SBGGR10, 16, 10, MEDIA_BUS_FMT_SBGGR10_1X10,
+ IPU_INSYS_FRAME_FORMAT_RAW16},
+ {V4L2_PIX_FMT_SGBRG10, 16, 10, MEDIA_BUS_FMT_SGBRG10_1X10,
+ IPU_INSYS_FRAME_FORMAT_RAW16},
+ {V4L2_PIX_FMT_SGRBG10, 16, 10, MEDIA_BUS_FMT_SGRBG10_1X10,
+ IPU_INSYS_FRAME_FORMAT_RAW16},
+ {V4L2_PIX_FMT_SRGGB10, 16, 10, MEDIA_BUS_FMT_SRGGB10_1X10,
+ IPU_INSYS_FRAME_FORMAT_RAW16},
+ {V4L2_PIX_FMT_SBGGR8, 8, 8, MEDIA_BUS_FMT_SBGGR8_1X8,
+ IPU_INSYS_FRAME_FORMAT_RAW8},
+ {V4L2_PIX_FMT_SGBRG8, 8, 8, MEDIA_BUS_FMT_SGBRG8_1X8,
+ IPU_INSYS_FRAME_FORMAT_RAW8},
+ {V4L2_PIX_FMT_SGRBG8, 8, 8, MEDIA_BUS_FMT_SGRBG8_1X8,
+ IPU_INSYS_FRAME_FORMAT_RAW8},
+ {V4L2_PIX_FMT_SRGGB8, 8, 8, MEDIA_BUS_FMT_SRGGB8_1X8,
+ IPU_INSYS_FRAME_FORMAT_RAW8},
+ {V4L2_PIX_FMT_SBGGR12P, 12, 12, MEDIA_BUS_FMT_SBGGR12_1X12,
+ IPU_INSYS_FRAME_FORMAT_RAW12},
+ {V4L2_PIX_FMT_SGBRG12P, 12, 12, MEDIA_BUS_FMT_SGBRG12_1X12,
+ IPU_INSYS_FRAME_FORMAT_RAW12},
+ {V4L2_PIX_FMT_SGRBG12P, 12, 12, MEDIA_BUS_FMT_SGRBG12_1X12,
+ IPU_INSYS_FRAME_FORMAT_RAW12},
+ {V4L2_PIX_FMT_SRGGB12P, 12, 12, MEDIA_BUS_FMT_SRGGB12_1X12,
+ IPU_INSYS_FRAME_FORMAT_RAW12},
+ {V4L2_PIX_FMT_SBGGR10P, 10, 10, MEDIA_BUS_FMT_SBGGR10_1X10,
+ IPU_INSYS_FRAME_FORMAT_RAW10},
+ {V4L2_PIX_FMT_SGBRG10P, 10, 10, MEDIA_BUS_FMT_SGBRG10_1X10,
+ IPU_INSYS_FRAME_FORMAT_RAW10},
+ {V4L2_PIX_FMT_SGRBG10P, 10, 10, MEDIA_BUS_FMT_SGRBG10_1X10,
+ IPU_INSYS_FRAME_FORMAT_RAW10},
+ {V4L2_PIX_FMT_SRGGB10P, 10, 10, MEDIA_BUS_FMT_SRGGB10_1X10,
+ IPU_INSYS_FRAME_FORMAT_RAW10},
+ {V4L2_PIX_FMT_UYVY, 16, 16, MEDIA_BUS_FMT_UYVY8_1X16,
+ IPU_INSYS_FRAME_FORMAT_UYVY},
+ {V4L2_PIX_FMT_YUYV, 16, 16, MEDIA_BUS_FMT_YUYV8_1X16,
+ IPU_INSYS_FRAME_FORMAT_YUYV},
+ {V4L2_PIX_FMT_RGB565, 16, 16, MEDIA_BUS_FMT_RGB565_1X16,
+ IPU_INSYS_FRAME_FORMAT_RGB565},
+ {V4L2_PIX_FMT_BGR24, 24, 24, MEDIA_BUS_FMT_RGB888_1X24,
+ IPU_INSYS_FRAME_FORMAT_RGBA888},
+};
+
+const struct ipu7_isys_pixelformat *ipu7_isys_get_isys_format(u32 pixelformat)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(ipu7_isys_pfmts); i++) {
+ const struct ipu7_isys_pixelformat *pfmt = &ipu7_isys_pfmts[i];
+
+ if (pfmt->pixelformat == pixelformat)
+ return pfmt;
+ }
+
+ return &ipu7_isys_pfmts[0];
+}
+
+static int ipu7_isys_vidioc_querycap(struct file *file, void *fh,
+ struct v4l2_capability *cap)
+{
+ struct ipu7_isys_video *av = video_drvdata(file);
+
+ strscpy(cap->driver, IPU_ISYS_NAME, sizeof(cap->driver));
+ strscpy(cap->card, av->isys->media_dev.model, sizeof(cap->card));
+
+ return 0;
+}
+
+static int ipu7_isys_vidioc_enum_fmt(struct file *file, void *fh,
+ struct v4l2_fmtdesc *f)
+{
+ unsigned int i, num_found;
+
+ for (i = 0, num_found = 0; i < ARRAY_SIZE(ipu7_isys_pfmts); i++) {
+ if (f->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ continue;
+
+ if (f->mbus_code && f->mbus_code != ipu7_isys_pfmts[i].code)
+ continue;
+
+ if (num_found < f->index) {
+ num_found++;
+ continue;
+ }
+
+ f->flags = 0;
+ f->pixelformat = ipu7_isys_pfmts[i].pixelformat;
+
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int ipu7_isys_vidioc_enum_framesizes(struct file *file, void *fh,
+ struct v4l2_frmsizeenum *fsize)
+{
+ unsigned int i;
+
+ if (fsize->index > 0)
+ return -EINVAL;
+
+ for (i = 0; i < ARRAY_SIZE(ipu7_isys_pfmts); i++) {
+ if (fsize->pixel_format != ipu7_isys_pfmts[i].pixelformat)
+ continue;
+
+ fsize->type = V4L2_FRMSIZE_TYPE_STEPWISE;
+ fsize->stepwise.min_width = IPU_ISYS_MIN_WIDTH;
+ fsize->stepwise.max_width = IPU_ISYS_MAX_WIDTH;
+ fsize->stepwise.min_height = IPU_ISYS_MIN_HEIGHT;
+ fsize->stepwise.max_height = IPU_ISYS_MAX_HEIGHT;
+ fsize->stepwise.step_width = 2;
+ fsize->stepwise.step_height = 2;
+
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int ipu7_isys_vidioc_g_fmt_vid_cap(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ struct ipu7_isys_video *av = video_drvdata(file);
+
+ f->fmt.pix = av->pix_fmt;
+
+ return 0;
+}
+
+static void ipu7_isys_try_fmt_cap(struct ipu7_isys_video *av, u32 type,
+ u32 *format, u32 *width, u32 *height,
+ u32 *bytesperline, u32 *sizeimage)
+{
+ const struct ipu7_isys_pixelformat *pfmt =
+ ipu7_isys_get_isys_format(*format);
+
+ *format = pfmt->pixelformat;
+ *width = clamp(*width, IPU_ISYS_MIN_WIDTH, IPU_ISYS_MAX_WIDTH);
+ *height = clamp(*height, IPU_ISYS_MIN_HEIGHT, IPU_ISYS_MAX_HEIGHT);
+
+ if (pfmt->bpp != pfmt->bpp_packed)
+ *bytesperline = *width * DIV_ROUND_UP(pfmt->bpp, BITS_PER_BYTE);
+ else
+ *bytesperline = DIV_ROUND_UP(*width * pfmt->bpp, BITS_PER_BYTE);
+
+ *bytesperline = ALIGN(*bytesperline, 64U);
+
+ /*
+ * (height + 1) * bytesperline due to a hardware issue: the DMA unit
+ * is a power of two, and a line should be transferred as few units
+ * as possible. The result is that up to line length more data than
+ * the image size may be transferred to memory after the image.
+ * Another limitation is the GDA allocation unit size. For low
+ * resolution it gives a bigger number. Use larger one to avoid
+ * memory corruption.
+ */
+ *sizeimage = *bytesperline * *height +
+ max(*bytesperline,
+ av->isys->pdata->ipdata->isys_dma_overshoot);
+}
+
+static void __ipu_isys_vidioc_try_fmt_vid_cap(struct ipu7_isys_video *av,
+ struct v4l2_format *f)
+{
+ ipu7_isys_try_fmt_cap(av, f->type, &f->fmt.pix.pixelformat,
+ &f->fmt.pix.width, &f->fmt.pix.height,
+ &f->fmt.pix.bytesperline, &f->fmt.pix.sizeimage);
+
+ f->fmt.pix.field = V4L2_FIELD_NONE;
+ f->fmt.pix.colorspace = V4L2_COLORSPACE_RAW;
+ f->fmt.pix.ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT;
+ f->fmt.pix.quantization = V4L2_QUANTIZATION_DEFAULT;
+ f->fmt.pix.xfer_func = V4L2_XFER_FUNC_DEFAULT;
+}
+
+static int ipu7_isys_vidioc_try_fmt_vid_cap(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ struct ipu7_isys_video *av = video_drvdata(file);
+
+ if (vb2_is_busy(&av->aq.vbq))
+ return -EBUSY;
+
+ __ipu_isys_vidioc_try_fmt_vid_cap(av, f);
+
+ return 0;
+}
+
+static int ipu7_isys_vidioc_s_fmt_vid_cap(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ struct ipu7_isys_video *av = video_drvdata(file);
+
+ ipu7_isys_vidioc_try_fmt_vid_cap(file, fh, f);
+ av->pix_fmt = f->fmt.pix;
+
+ return 0;
+}
+
+static int ipu7_isys_vidioc_reqbufs(struct file *file, void *priv,
+ struct v4l2_requestbuffers *p)
+{
+ struct ipu7_isys_video *av = video_drvdata(file);
+ int ret;
+
+ av->aq.vbq.is_multiplanar = V4L2_TYPE_IS_MULTIPLANAR(p->type);
+ av->aq.vbq.is_output = V4L2_TYPE_IS_OUTPUT(p->type);
+
+ ret = vb2_queue_change_type(&av->aq.vbq, p->type);
+ if (ret)
+ return ret;
+
+ return vb2_ioctl_reqbufs(file, priv, p);
+}
+
+static int ipu7_isys_vidioc_create_bufs(struct file *file, void *priv,
+ struct v4l2_create_buffers *p)
+{
+ struct ipu7_isys_video *av = video_drvdata(file);
+ int ret;
+
+ av->aq.vbq.is_multiplanar = V4L2_TYPE_IS_MULTIPLANAR(p->format.type);
+ av->aq.vbq.is_output = V4L2_TYPE_IS_OUTPUT(p->format.type);
+
+ ret = vb2_queue_change_type(&av->aq.vbq, p->format.type);
+ if (ret)
+ return ret;
+
+ return vb2_ioctl_create_bufs(file, priv, p);
+}
+
+static int link_validate(struct media_link *link)
+{
+ struct ipu7_isys_video *av =
+ container_of(link->sink, struct ipu7_isys_video, pad);
+ struct device *dev = &av->isys->adev->auxdev.dev;
+ struct v4l2_subdev_state *s_state;
+ struct v4l2_mbus_framefmt *s_fmt;
+ struct v4l2_subdev *s_sd;
+ struct media_pad *s_pad;
+ u32 s_stream = 0, code;
+ int ret = -EPIPE;
+
+ if (!link->source->entity)
+ return ret;
+
+ s_sd = media_entity_to_v4l2_subdev(link->source->entity);
+ s_state = v4l2_subdev_get_unlocked_active_state(s_sd);
+ if (!s_state)
+ return ret;
+
+ dev_dbg(dev, "validating link \"%s\":%u -> \"%s\"\n",
+ link->source->entity->name, link->source->index,
+ link->sink->entity->name);
+
+ s_pad = media_pad_remote_pad_first(&av->pad);
+
+ v4l2_subdev_lock_state(s_state);
+
+ s_fmt = v4l2_subdev_state_get_format(s_state, s_pad->index, s_stream);
+ if (!s_fmt) {
+ dev_err(dev, "failed to get source pad format\n");
+ goto unlock;
+ }
+
+ code = ipu7_isys_get_isys_format(av->pix_fmt.pixelformat)->code;
+
+ if (s_fmt->width != av->pix_fmt.width ||
+ s_fmt->height != av->pix_fmt.height || s_fmt->code != code) {
+ dev_dbg(dev, "format mismatch %dx%d,%x != %dx%d,%x\n",
+ s_fmt->width, s_fmt->height, s_fmt->code,
+ av->pix_fmt.width, av->pix_fmt.height, code);
+ goto unlock;
+ }
+
+ v4l2_subdev_unlock_state(s_state);
+
+ return 0;
+unlock:
+ v4l2_subdev_unlock_state(s_state);
+
+ return ret;
+}
+
+static void get_stream_opened(struct ipu7_isys_video *av)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&av->isys->streams_lock, flags);
+ av->isys->stream_opened++;
+ spin_unlock_irqrestore(&av->isys->streams_lock, flags);
+}
+
+static void put_stream_opened(struct ipu7_isys_video *av)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&av->isys->streams_lock, flags);
+ av->isys->stream_opened--;
+ spin_unlock_irqrestore(&av->isys->streams_lock, flags);
+}
+
+static int ipu7_isys_fw_pin_cfg(struct ipu7_isys_video *av,
+ struct ipu7_insys_stream_cfg *cfg)
+{
+ struct media_pad *src_pad = media_pad_remote_pad_first(&av->pad);
+ struct v4l2_subdev *sd = media_entity_to_v4l2_subdev(src_pad->entity);
+ struct ipu7_isys_stream *stream = av->stream;
+ const struct ipu7_isys_pixelformat *pfmt =
+ ipu7_isys_get_isys_format(av->pix_fmt.pixelformat);
+ struct ipu7_insys_output_pin *output_pin;
+ struct ipu7_insys_input_pin *input_pin;
+ int input_pins = cfg->nof_input_pins++;
+ struct ipu7_isys_queue *aq = &av->aq;
+ struct ipu7_isys *isys = av->isys;
+ struct device *dev = &isys->adev->auxdev.dev;
+ struct v4l2_mbus_framefmt fmt;
+ int output_pins;
+ u32 src_stream = 0;
+ int ret;
+
+ ret = ipu7_isys_get_stream_pad_fmt(sd, src_pad->index, src_stream,
+ &fmt);
+ if (ret < 0) {
+ dev_err(dev, "can't get stream format (%d)\n", ret);
+ return ret;
+ }
+
+ input_pin = &cfg->input_pins[input_pins];
+ input_pin->input_res.width = fmt.width;
+ input_pin->input_res.height = fmt.height;
+ input_pin->dt = av->dt;
+ input_pin->disable_mipi_unpacking = 0;
+ pfmt = ipu7_isys_get_isys_format(av->pix_fmt.pixelformat);
+ if (pfmt->bpp == pfmt->bpp_packed && pfmt->bpp % BITS_PER_BYTE)
+ input_pin->disable_mipi_unpacking = 1;
+ input_pin->mapped_dt = N_IPU_INSYS_MIPI_DATA_TYPE;
+ input_pin->dt_rename_mode = IPU_INSYS_MIPI_DT_NO_RENAME;
+ /* if enable polling isys interrupt, the follow values maybe set */
+ input_pin->sync_msg_map = IPU_INSYS_STREAM_SYNC_MSG_SEND_RESP_SOF |
+ IPU_INSYS_STREAM_SYNC_MSG_SEND_RESP_SOF_DISCARDED |
+ IPU_INSYS_STREAM_SYNC_MSG_SEND_IRQ_SOF |
+ IPU_INSYS_STREAM_SYNC_MSG_SEND_IRQ_SOF_DISCARDED;
+
+ output_pins = cfg->nof_output_pins++;
+ aq->fw_output = output_pins;
+ stream->output_pins[output_pins].pin_ready = ipu7_isys_queue_buf_ready;
+ stream->output_pins[output_pins].aq = aq;
+
+ output_pin = &cfg->output_pins[output_pins];
+ /* output pin msg link */
+ output_pin->link.buffer_lines = 0;
+ output_pin->link.foreign_key = IPU_MSG_LINK_FOREIGN_KEY_NONE;
+ output_pin->link.granularity_pointer_update = 0;
+ output_pin->link.msg_link_streaming_mode =
+ IA_GOFO_MSG_LINK_STREAMING_MODE_SOFF;
+
+ output_pin->link.pbk_id = IPU_MSG_LINK_PBK_ID_DONT_CARE;
+ output_pin->link.pbk_slot_id = IPU_MSG_LINK_PBK_SLOT_ID_DONT_CARE;
+ output_pin->link.dest = IPU_INSYS_OUTPUT_LINK_DEST_MEM;
+ output_pin->link.use_sw_managed = 1;
+ /* TODO: set the snoop bit for metadata capture */
+ output_pin->link.is_snoop = 0;
+
+ /* output pin crop */
+ output_pin->crop.line_top = 0;
+ output_pin->crop.line_bottom = 0;
+
+ /* output de-compression */
+ output_pin->dpcm.enable = 0;
+
+ /* frame format type */
+ pfmt = ipu7_isys_get_isys_format(av->pix_fmt.pixelformat);
+ output_pin->ft = (u16)pfmt->css_pixelformat;
+
+ /* stride in bytes */
+ output_pin->stride = av->pix_fmt.bytesperline;
+ output_pin->send_irq = 1;
+ output_pin->early_ack_en = 0;
+
+ /* input pin id */
+ output_pin->input_pin_id = input_pins;
+
+ return 0;
+}
+
+/* Create stream and start it using the CSS FW ABI. */
+static int start_stream_firmware(struct ipu7_isys_video *av,
+ struct ipu7_isys_buffer_list *bl)
+{
+ struct device *dev = &av->isys->adev->auxdev.dev;
+ struct ipu7_isys_stream *stream = av->stream;
+ struct ipu7_insys_stream_cfg *stream_cfg;
+ struct ipu7_insys_buffset *buf = NULL;
+ struct isys_fw_msgs *msg = NULL;
+ struct ipu7_isys_queue *aq;
+ int ret, retout, tout;
+ u16 send_type;
+
+ if (WARN_ON(!bl))
+ return -EIO;
+
+ msg = ipu7_get_fw_msg_buf(stream);
+ if (!msg)
+ return -ENOMEM;
+
+ stream_cfg = &msg->fw_msg.stream;
+ stream_cfg->port_id = stream->stream_source;
+ stream_cfg->vc = stream->vc;
+ stream_cfg->stream_msg_map = IPU_INSYS_STREAM_ENABLE_MSG_SEND_RESP |
+ IPU_INSYS_STREAM_ENABLE_MSG_SEND_IRQ;
+
+ list_for_each_entry(aq, &stream->queues, node) {
+ struct ipu7_isys_video *__av = ipu7_isys_queue_to_video(aq);
+
+ ret = ipu7_isys_fw_pin_cfg(__av, stream_cfg);
+ if (ret < 0) {
+ ipu7_put_fw_msg_buf(av->isys, (uintptr_t)stream_cfg);
+ return ret;
+ }
+ }
+
+ ipu7_fw_isys_dump_stream_cfg(dev, stream_cfg);
+
+ stream->nr_output_pins = stream_cfg->nof_output_pins;
+
+ reinit_completion(&stream->stream_open_completion);
+
+ ret = ipu7_fw_isys_complex_cmd(av->isys, stream->stream_handle,
+ stream_cfg, msg->dma_addr,
+ sizeof(*stream_cfg),
+ IPU_INSYS_SEND_TYPE_STREAM_OPEN);
+ if (ret < 0) {
+ dev_err(dev, "can't open stream (%d)\n", ret);
+ ipu7_put_fw_msg_buf(av->isys, (uintptr_t)stream_cfg);
+ return ret;
+ }
+
+ get_stream_opened(av);
+
+ tout = wait_for_completion_timeout(&stream->stream_open_completion,
+ FW_CALL_TIMEOUT_JIFFIES);
+
+ ipu7_put_fw_msg_buf(av->isys, (uintptr_t)stream_cfg);
+
+ if (!tout) {
+ dev_err(dev, "stream open time out\n");
+ ret = -ETIMEDOUT;
+ goto out_put_stream_opened;
+ }
+ if (stream->error) {
+ dev_err(dev, "stream open error: %d\n", stream->error);
+ ret = -EIO;
+ goto out_put_stream_opened;
+ }
+ dev_dbg(dev, "start stream: open complete\n");
+
+ msg = ipu7_get_fw_msg_buf(stream);
+ if (!msg) {
+ ret = -ENOMEM;
+ goto out_put_stream_opened;
+ }
+ buf = &msg->fw_msg.frame;
+
+ ipu7_isys_buffer_to_fw_frame_buff(buf, stream, bl);
+ ipu7_isys_buffer_list_queue(bl, IPU_ISYS_BUFFER_LIST_FL_ACTIVE, 0);
+
+ reinit_completion(&stream->stream_start_completion);
+
+ send_type = IPU_INSYS_SEND_TYPE_STREAM_START_AND_CAPTURE;
+ ipu7_fw_isys_dump_frame_buff_set(dev, buf,
+ stream_cfg->nof_output_pins);
+ ret = ipu7_fw_isys_complex_cmd(av->isys, stream->stream_handle, buf,
+ msg->dma_addr, sizeof(*buf),
+ send_type);
+ if (ret < 0) {
+ dev_err(dev, "can't start streaming (%d)\n", ret);
+ goto out_stream_close;
+ }
+
+ tout = wait_for_completion_timeout(&stream->stream_start_completion,
+ FW_CALL_TIMEOUT_JIFFIES);
+ if (!tout) {
+ dev_err(dev, "stream start time out\n");
+ ret = -ETIMEDOUT;
+ goto out_stream_close;
+ }
+ if (stream->error) {
+ dev_err(dev, "stream start error: %d\n", stream->error);
+ ret = -EIO;
+ goto out_stream_close;
+ }
+ dev_dbg(dev, "start stream: complete\n");
+
+ return 0;
+
+out_stream_close:
+ reinit_completion(&stream->stream_close_completion);
+
+ retout = ipu7_fw_isys_simple_cmd(av->isys, stream->stream_handle,
+ IPU_INSYS_SEND_TYPE_STREAM_CLOSE);
+ if (retout < 0) {
+ dev_dbg(dev, "can't close stream (%d)\n", retout);
+ goto out_put_stream_opened;
+ }
+
+ tout = wait_for_completion_timeout(&stream->stream_close_completion,
+ FW_CALL_TIMEOUT_JIFFIES);
+ if (!tout)
+ dev_err(dev, "stream close time out with error %d\n",
+ stream->error);
+ else
+ dev_dbg(dev, "stream close complete\n");
+
+out_put_stream_opened:
+ put_stream_opened(av);
+
+ return ret;
+}
+
+static void stop_streaming_firmware(struct ipu7_isys_video *av)
+{
+ struct device *dev = &av->isys->adev->auxdev.dev;
+ struct ipu7_isys_stream *stream = av->stream;
+ int ret, tout;
+
+ reinit_completion(&stream->stream_stop_completion);
+
+ ret = ipu7_fw_isys_simple_cmd(av->isys, stream->stream_handle,
+ IPU_INSYS_SEND_TYPE_STREAM_FLUSH);
+ if (ret < 0) {
+ dev_err(dev, "can't stop stream (%d)\n", ret);
+ return;
+ }
+
+ tout = wait_for_completion_timeout(&stream->stream_stop_completion,
+ FW_CALL_TIMEOUT_JIFFIES);
+ if (!tout)
+ dev_warn(dev, "stream stop time out\n");
+ else if (stream->error)
+ dev_warn(dev, "stream stop error: %d\n", stream->error);
+ else
+ dev_dbg(dev, "stop stream: complete\n");
+}
+
+static void close_streaming_firmware(struct ipu7_isys_video *av)
+{
+ struct device *dev = &av->isys->adev->auxdev.dev;
+ struct ipu7_isys_stream *stream = av->stream;
+ int ret, tout;
+
+ reinit_completion(&stream->stream_close_completion);
+
+ ret = ipu7_fw_isys_simple_cmd(av->isys, stream->stream_handle,
+ IPU_INSYS_SEND_TYPE_STREAM_CLOSE);
+ if (ret < 0) {
+ dev_err(dev, "can't close stream (%d)\n", ret);
+ return;
+ }
+
+ tout = wait_for_completion_timeout(&stream->stream_close_completion,
+ FW_CALL_TIMEOUT_JIFFIES);
+ if (!tout)
+ dev_warn(dev, "stream close time out\n");
+ else if (stream->error)
+ dev_warn(dev, "stream close error: %d\n", stream->error);
+ else
+ dev_dbg(dev, "close stream: complete\n");
+
+ put_stream_opened(av);
+}
+
+int ipu7_isys_video_prepare_stream(struct ipu7_isys_video *av,
+ struct media_entity *source_entity,
+ int nr_queues)
+{
+ struct ipu7_isys_stream *stream = av->stream;
+ struct ipu7_isys_csi2 *csi2;
+
+ if (WARN_ON(stream->nr_streaming))
+ return -EINVAL;
+
+ stream->nr_queues = nr_queues;
+ atomic_set(&stream->sequence, 0);
+ atomic_set(&stream->buf_id, 0);
+
+ stream->seq_index = 0;
+ memset(stream->seq, 0, sizeof(stream->seq));
+
+ if (WARN_ON(!list_empty(&stream->queues)))
+ return -EINVAL;
+
+ stream->stream_source = stream->asd->source;
+
+ csi2 = ipu7_isys_subdev_to_csi2(stream->asd);
+ csi2->receiver_errors = 0;
+ stream->source_entity = source_entity;
+
+ dev_dbg(&av->isys->adev->auxdev.dev,
+ "prepare stream: external entity %s\n",
+ stream->source_entity->name);
+
+ return 0;
+}
+
+void ipu7_isys_put_stream(struct ipu7_isys_stream *stream)
+{
+ unsigned long flags;
+ struct device *dev;
+ unsigned int i;
+
+ if (!stream) {
+ pr_err("ipu7-isys: no available stream\n");
+ return;
+ }
+
+ dev = &stream->isys->adev->auxdev.dev;
+
+ spin_lock_irqsave(&stream->isys->streams_lock, flags);
+ for (i = 0; i < IPU_ISYS_MAX_STREAMS; i++) {
+ if (&stream->isys->streams[i] == stream) {
+ if (stream->isys->streams_ref_count[i] > 0)
+ stream->isys->streams_ref_count[i]--;
+ else
+ dev_warn(dev, "invalid stream %d\n", i);
+
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&stream->isys->streams_lock, flags);
+}
+
+static struct ipu7_isys_stream *
+ipu7_isys_get_stream(struct ipu7_isys_video *av, struct ipu7_isys_subdev *asd)
+{
+ struct ipu7_isys_stream *stream = NULL;
+ struct ipu7_isys *isys = av->isys;
+ unsigned long flags;
+ unsigned int i;
+ u8 vc = av->vc;
+
+ if (!isys)
+ return NULL;
+
+ spin_lock_irqsave(&isys->streams_lock, flags);
+ for (i = 0; i < IPU_ISYS_MAX_STREAMS; i++) {
+ if (isys->streams_ref_count[i] && isys->streams[i].vc == vc &&
+ isys->streams[i].asd == asd) {
+ isys->streams_ref_count[i]++;
+ stream = &isys->streams[i];
+ break;
+ }
+ }
+
+ if (!stream) {
+ for (i = 0; i < IPU_ISYS_MAX_STREAMS; i++) {
+ if (!isys->streams_ref_count[i]) {
+ isys->streams_ref_count[i]++;
+ stream = &isys->streams[i];
+ stream->vc = vc;
+ stream->asd = asd;
+ break;
+ }
+ }
+ }
+ spin_unlock_irqrestore(&isys->streams_lock, flags);
+
+ return stream;
+}
+
+struct ipu7_isys_stream *
+ipu7_isys_query_stream_by_handle(struct ipu7_isys *isys, u8 stream_handle)
+{
+ unsigned long flags;
+ struct ipu7_isys_stream *stream = NULL;
+
+ if (!isys)
+ return NULL;
+
+ if (stream_handle >= IPU_ISYS_MAX_STREAMS) {
+ dev_err(&isys->adev->auxdev.dev,
+ "stream_handle %d is invalid\n", stream_handle);
+ return NULL;
+ }
+
+ spin_lock_irqsave(&isys->streams_lock, flags);
+ if (isys->streams_ref_count[stream_handle] > 0) {
+ isys->streams_ref_count[stream_handle]++;
+ stream = &isys->streams[stream_handle];
+ }
+ spin_unlock_irqrestore(&isys->streams_lock, flags);
+
+ return stream;
+}
+
+struct ipu7_isys_stream *
+ipu7_isys_query_stream_by_source(struct ipu7_isys *isys, int source, u8 vc)
+{
+ struct ipu7_isys_stream *stream = NULL;
+ unsigned long flags;
+ unsigned int i;
+
+ if (!isys)
+ return NULL;
+
+ if (source < 0) {
+ dev_err(&isys->adev->auxdev.dev,
+ "query stream with invalid port number\n");
+ return NULL;
+ }
+
+ spin_lock_irqsave(&isys->streams_lock, flags);
+ for (i = 0; i < IPU_ISYS_MAX_STREAMS; i++) {
+ if (!isys->streams_ref_count[i])
+ continue;
+
+ if (isys->streams[i].stream_source == source &&
+ isys->streams[i].vc == vc) {
+ stream = &isys->streams[i];
+ isys->streams_ref_count[i]++;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&isys->streams_lock, flags);
+
+ return stream;
+}
+
+int ipu7_isys_video_set_streaming(struct ipu7_isys_video *av, int state,
+ struct ipu7_isys_buffer_list *bl)
+{
+ struct ipu7_isys_stream *stream = av->stream;
+ struct device *dev = &av->isys->adev->auxdev.dev;
+ struct media_pad *r_pad;
+ struct v4l2_subdev *sd;
+ u32 r_stream = 0;
+ int ret = 0;
+
+ dev_dbg(dev, "set stream: %d\n", state);
+
+ if (WARN(!stream->source_entity, "No source entity for stream\n"))
+ return -ENODEV;
+
+ sd = &stream->asd->sd;
+ r_pad = media_pad_remote_pad_first(&av->pad);
+ if (!state) {
+ stop_streaming_firmware(av);
+
+ /* stop sub-device which connects with video */
+ dev_dbg(dev, "disable streams %s pad:%d mask:0x%llx\n",
+ sd->name, r_pad->index, BIT_ULL(r_stream));
+ ret = v4l2_subdev_disable_streams(sd, r_pad->index,
+ BIT_ULL(r_stream));
+ if (ret) {
+ dev_err(dev, "disable streams %s failed with %d\n",
+ sd->name, ret);
+ return ret;
+ }
+
+ close_streaming_firmware(av);
+ } else {
+ ret = start_stream_firmware(av, bl);
+ if (ret) {
+ dev_err(dev, "start stream of firmware failed\n");
+ return ret;
+ }
+
+ /* start sub-device which connects with video */
+ dev_dbg(dev, "enable streams %s pad: %d mask:0x%llx\n",
+ sd->name, r_pad->index, BIT_ULL(r_stream));
+ ret = v4l2_subdev_enable_streams(sd, r_pad->index,
+ BIT_ULL(r_stream));
+ if (ret) {
+ dev_err(dev, "enable streams %s failed with %d\n",
+ sd->name, ret);
+ goto out_media_entity_stop_streaming_firmware;
+ }
+ }
+
+ av->streaming = state;
+
+ return 0;
+
+out_media_entity_stop_streaming_firmware:
+ stop_streaming_firmware(av);
+
+ return ret;
+}
+
+static const struct v4l2_ioctl_ops ipu7_v4l2_ioctl_ops = {
+ .vidioc_querycap = ipu7_isys_vidioc_querycap,
+ .vidioc_enum_fmt_vid_cap = ipu7_isys_vidioc_enum_fmt,
+ .vidioc_enum_framesizes = ipu7_isys_vidioc_enum_framesizes,
+ .vidioc_g_fmt_vid_cap = ipu7_isys_vidioc_g_fmt_vid_cap,
+ .vidioc_s_fmt_vid_cap = ipu7_isys_vidioc_s_fmt_vid_cap,
+ .vidioc_try_fmt_vid_cap = ipu7_isys_vidioc_try_fmt_vid_cap,
+ .vidioc_reqbufs = ipu7_isys_vidioc_reqbufs,
+ .vidioc_create_bufs = ipu7_isys_vidioc_create_bufs,
+ .vidioc_prepare_buf = vb2_ioctl_prepare_buf,
+ .vidioc_querybuf = vb2_ioctl_querybuf,
+ .vidioc_qbuf = vb2_ioctl_qbuf,
+ .vidioc_dqbuf = vb2_ioctl_dqbuf,
+ .vidioc_streamon = vb2_ioctl_streamon,
+ .vidioc_streamoff = vb2_ioctl_streamoff,
+ .vidioc_expbuf = vb2_ioctl_expbuf,
+};
+
+static const struct media_entity_operations entity_ops = {
+ .link_validate = link_validate,
+};
+
+static const struct v4l2_file_operations isys_fops = {
+ .owner = THIS_MODULE,
+ .poll = vb2_fop_poll,
+ .unlocked_ioctl = video_ioctl2,
+ .mmap = vb2_fop_mmap,
+ .open = v4l2_fh_open,
+ .release = vb2_fop_release,
+};
+
+int ipu7_isys_fw_open(struct ipu7_isys *isys)
+{
+ struct ipu7_bus_device *adev = isys->adev;
+ int ret;
+
+ ret = pm_runtime_resume_and_get(&adev->auxdev.dev);
+ if (ret < 0)
+ return ret;
+
+ mutex_lock(&isys->mutex);
+
+ if (isys->ref_count++)
+ goto unlock;
+
+ /*
+ * Buffers could have been left to wrong queue at last closure.
+ * Move them now back to empty buffer queue.
+ */
+ ipu7_cleanup_fw_msg_bufs(isys);
+
+ ret = ipu7_fw_isys_open(isys);
+ if (ret < 0)
+ goto out;
+
+unlock:
+ mutex_unlock(&isys->mutex);
+
+ return 0;
+out:
+ isys->ref_count--;
+ mutex_unlock(&isys->mutex);
+ pm_runtime_put(&adev->auxdev.dev);
+
+ return ret;
+}
+
+void ipu7_isys_fw_close(struct ipu7_isys *isys)
+{
+ mutex_lock(&isys->mutex);
+
+ isys->ref_count--;
+
+ if (!isys->ref_count)
+ ipu7_fw_isys_close(isys);
+
+ mutex_unlock(&isys->mutex);
+ pm_runtime_put(&isys->adev->auxdev.dev);
+}
+
+int ipu7_isys_setup_video(struct ipu7_isys_video *av,
+ struct media_entity **source_entity, int *nr_queues)
+{
+ const struct ipu7_isys_pixelformat *pfmt =
+ ipu7_isys_get_isys_format(av->pix_fmt.pixelformat);
+ struct device *dev = &av->isys->adev->auxdev.dev;
+ struct media_pad *source_pad, *remote_pad;
+ struct v4l2_mbus_frame_desc_entry entry;
+ struct v4l2_subdev_route *route = NULL;
+ struct v4l2_subdev_route *r;
+ struct v4l2_subdev_state *state;
+ struct ipu7_isys_subdev *asd;
+ struct v4l2_subdev *remote_sd;
+ struct media_pipeline *pipeline;
+ int ret = -EINVAL;
+
+ *nr_queues = 0;
+
+ remote_pad = media_pad_remote_pad_unique(&av->pad);
+ if (IS_ERR(remote_pad)) {
+ dev_dbg(dev, "failed to get remote pad\n");
+ return PTR_ERR(remote_pad);
+ }
+
+ remote_sd = media_entity_to_v4l2_subdev(remote_pad->entity);
+ asd = to_ipu7_isys_subdev(remote_sd);
+
+ source_pad = media_pad_remote_pad_first(&remote_pad->entity->pads[0]);
+ if (!source_pad) {
+ dev_dbg(dev, "No external source entity\n");
+ return -ENODEV;
+ }
+
+ *source_entity = source_pad->entity;
+
+ state = v4l2_subdev_lock_and_get_active_state(remote_sd);
+ for_each_active_route(&state->routing, r) {
+ if (r->source_pad == remote_pad->index)
+ route = r;
+ }
+
+ if (!route) {
+ v4l2_subdev_unlock_state(state);
+ dev_dbg(dev, "Failed to find route\n");
+ return -ENODEV;
+ }
+
+ v4l2_subdev_unlock_state(state);
+
+ ret = ipu7_isys_csi2_get_remote_desc(route->sink_stream,
+ to_ipu7_isys_csi2(asd),
+ *source_entity, &entry,
+ nr_queues);
+ if (ret == -ENOIOCTLCMD) {
+ av->vc = 0;
+ av->dt = ipu7_isys_mbus_code_to_mipi(pfmt->code);
+ if (av->dt == 0xff)
+ return -EINVAL;
+ *nr_queues = 1;
+ } else if (*nr_queues && !ret) {
+ dev_dbg(dev, "Framedesc: stream %u, len %u, vc %u, dt %#x\n",
+ entry.stream, entry.length, entry.bus.csi2.vc,
+ entry.bus.csi2.dt);
+
+ av->vc = entry.bus.csi2.vc;
+ av->dt = entry.bus.csi2.dt;
+ } else {
+ dev_err(dev, "failed to get remote frame desc\n");
+ return ret;
+ }
+
+ pipeline = media_entity_pipeline(&av->vdev.entity);
+ if (!pipeline)
+ ret = video_device_pipeline_alloc_start(&av->vdev);
+ else
+ ret = video_device_pipeline_start(&av->vdev, pipeline);
+ if (ret < 0) {
+ dev_dbg(dev, "media pipeline start failed\n");
+ return ret;
+ }
+
+ av->stream = ipu7_isys_get_stream(av, asd);
+ if (!av->stream) {
+ video_device_pipeline_stop(&av->vdev);
+ dev_err(dev, "no available stream for firmware\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/*
+ * Do everything that's needed to initialise things related to video
+ * buffer queue, video node, and the related media entity. The caller
+ * is expected to assign isys field and set the name of the video
+ * device.
+ */
+int ipu7_isys_video_init(struct ipu7_isys_video *av)
+{
+ struct v4l2_format format = {
+ .type = V4L2_BUF_TYPE_VIDEO_CAPTURE,
+ .fmt.pix = {
+ .width = 1920,
+ .height = 1080,
+ },
+ };
+ int ret;
+
+ mutex_init(&av->mutex);
+ av->vdev.device_caps = V4L2_CAP_STREAMING | V4L2_CAP_IO_MC |
+ V4L2_CAP_VIDEO_CAPTURE;
+ av->vdev.vfl_dir = VFL_DIR_RX;
+
+ ret = ipu7_isys_queue_init(&av->aq);
+ if (ret)
+ goto out_mutex_destroy;
+
+ av->pad.flags = MEDIA_PAD_FL_SINK | MEDIA_PAD_FL_MUST_CONNECT;
+ ret = media_entity_pads_init(&av->vdev.entity, 1, &av->pad);
+ if (ret)
+ goto out_vb2_queue_cleanup;
+
+ av->vdev.entity.ops = &entity_ops;
+ av->vdev.release = video_device_release_empty;
+ av->vdev.fops = &isys_fops;
+ av->vdev.v4l2_dev = &av->isys->v4l2_dev;
+ av->vdev.dev_parent = &av->isys->adev->isp->pdev->dev;
+ av->vdev.ioctl_ops = &ipu7_v4l2_ioctl_ops;
+ av->vdev.queue = &av->aq.vbq;
+ av->vdev.lock = &av->mutex;
+
+ __ipu_isys_vidioc_try_fmt_vid_cap(av, &format);
+ av->pix_fmt = format.fmt.pix;
+
+ video_set_drvdata(&av->vdev, av);
+
+ ret = video_register_device(&av->vdev, VFL_TYPE_VIDEO, -1);
+ if (ret)
+ goto out_media_entity_cleanup;
+
+ return ret;
+
+out_media_entity_cleanup:
+ vb2_video_unregister_device(&av->vdev);
+ media_entity_cleanup(&av->vdev.entity);
+
+out_vb2_queue_cleanup:
+ vb2_queue_release(&av->aq.vbq);
+
+out_mutex_destroy:
+ mutex_destroy(&av->mutex);
+
+ return ret;
+}
+
+void ipu7_isys_video_cleanup(struct ipu7_isys_video *av)
+{
+ vb2_video_unregister_device(&av->vdev);
+ media_entity_cleanup(&av->vdev.entity);
+ mutex_destroy(&av->mutex);
+}
diff --git a/drivers/staging/media/ipu7/ipu7-isys-video.h b/drivers/staging/media/ipu7/ipu7-isys-video.h
new file mode 100644
index 000000000000..1ac1787fabef
--- /dev/null
+++ b/drivers/staging/media/ipu7/ipu7-isys-video.h
@@ -0,0 +1,117 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2013 - 2025 Intel Corporation
+ */
+
+#ifndef IPU7_ISYS_VIDEO_H
+#define IPU7_ISYS_VIDEO_H
+
+#include <linux/atomic.h>
+#include <linux/completion.h>
+#include <linux/container_of.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+
+#include <media/media-entity.h>
+#include <media/v4l2-dev.h>
+
+#include "ipu7-isys-queue.h"
+
+#define IPU_INSYS_OUTPUT_PINS 11U
+#define IPU_ISYS_MAX_PARALLEL_SOF 2U
+
+struct file;
+struct ipu7_isys;
+struct ipu7_isys_csi2;
+struct ipu7_insys_stream_cfg;
+struct ipu7_isys_subdev;
+
+struct ipu7_isys_pixelformat {
+ u32 pixelformat;
+ u32 bpp;
+ u32 bpp_packed;
+ u32 code;
+ u32 css_pixelformat;
+};
+
+struct sequence_info {
+ unsigned int sequence;
+ u64 timestamp;
+};
+
+struct output_pin_data {
+ void (*pin_ready)(struct ipu7_isys_stream *stream,
+ struct ipu7_insys_resp *info);
+ struct ipu7_isys_queue *aq;
+};
+
+/*
+ * Align with firmware stream. Each stream represents a CSI virtual channel.
+ * May map to multiple video devices
+ */
+struct ipu7_isys_stream {
+ struct mutex mutex;
+ struct media_entity *source_entity;
+ atomic_t sequence;
+ atomic_t buf_id;
+ unsigned int seq_index;
+ struct sequence_info seq[IPU_ISYS_MAX_PARALLEL_SOF];
+ int stream_source;
+ int stream_handle;
+ unsigned int nr_output_pins;
+ struct ipu7_isys_subdev *asd;
+
+ int nr_queues; /* Number of capture queues */
+ int nr_streaming;
+ int streaming;
+ struct list_head queues;
+ struct completion stream_open_completion;
+ struct completion stream_close_completion;
+ struct completion stream_start_completion;
+ struct completion stream_stop_completion;
+ struct ipu7_isys *isys;
+
+ struct output_pin_data output_pins[IPU_INSYS_OUTPUT_PINS];
+ int error;
+ u8 vc;
+};
+
+struct ipu7_isys_video {
+ struct ipu7_isys_queue aq;
+ /* Serialise access to other fields in the struct. */
+ struct mutex mutex;
+ struct media_pad pad;
+ struct video_device vdev;
+ struct v4l2_pix_format pix_fmt;
+ struct ipu7_isys *isys;
+ struct ipu7_isys_csi2 *csi2;
+ struct ipu7_isys_stream *stream;
+ unsigned int streaming;
+ u8 vc;
+ u8 dt;
+};
+
+#define ipu7_isys_queue_to_video(__aq) \
+ container_of(__aq, struct ipu7_isys_video, aq)
+
+extern const struct ipu7_isys_pixelformat ipu7_isys_pfmts[];
+
+const struct ipu7_isys_pixelformat *ipu7_isys_get_isys_format(u32 pixelformat);
+int ipu7_isys_video_prepare_stream(struct ipu7_isys_video *av,
+ struct media_entity *source_entity,
+ int nr_queues);
+int ipu7_isys_video_set_streaming(struct ipu7_isys_video *av, int state,
+ struct ipu7_isys_buffer_list *bl);
+int ipu7_isys_fw_open(struct ipu7_isys *isys);
+void ipu7_isys_fw_close(struct ipu7_isys *isys);
+int ipu7_isys_setup_video(struct ipu7_isys_video *av,
+ struct media_entity **source_entity, int *nr_queues);
+int ipu7_isys_video_init(struct ipu7_isys_video *av);
+void ipu7_isys_video_cleanup(struct ipu7_isys_video *av);
+void ipu7_isys_put_stream(struct ipu7_isys_stream *stream);
+struct ipu7_isys_stream *
+ipu7_isys_query_stream_by_handle(struct ipu7_isys *isys,
+ u8 stream_handle);
+struct ipu7_isys_stream *
+ipu7_isys_query_stream_by_source(struct ipu7_isys *isys, int source, u8 vc);
+#endif /* IPU7_ISYS_VIDEO_H */
diff --git a/drivers/staging/media/ipu7/ipu7-isys.c b/drivers/staging/media/ipu7/ipu7-isys.c
new file mode 100644
index 000000000000..cb2f49f3e0fa
--- /dev/null
+++ b/drivers/staging/media/ipu7/ipu7-isys.c
@@ -0,0 +1,1166 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2013 - 2025 Intel Corporation
+ */
+
+#include <linux/auxiliary_bus.h>
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/bug.h>
+#include <linux/completion.h>
+#include <linux/container_of.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/firmware.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/pci.h>
+#include <linux/pm_runtime.h>
+#include <linux/pm_qos.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+#include <linux/types.h>
+
+#include <media/ipu-bridge.h>
+#include <media/media-entity.h>
+#include <media/v4l2-async.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-fwnode.h>
+#include <media/v4l2-subdev.h>
+
+#include "abi/ipu7_fw_isys_abi.h"
+
+#include "ipu7-bus.h"
+#include "ipu7-buttress-regs.h"
+#include "ipu7-cpd.h"
+#include "ipu7-dma.h"
+#include "ipu7-fw-isys.h"
+#include "ipu7-mmu.h"
+#include "ipu7-isys.h"
+#include "ipu7-isys-csi2.h"
+#include "ipu7-isys-csi-phy.h"
+#include "ipu7-isys-csi2-regs.h"
+#include "ipu7-isys-video.h"
+#include "ipu7-platform-regs.h"
+
+#define ISYS_PM_QOS_VALUE 300
+
+static int
+isys_complete_ext_device_registration(struct ipu7_isys *isys,
+ struct v4l2_subdev *sd,
+ struct ipu7_isys_csi2_config *csi2)
+{
+ struct device *dev = &isys->adev->auxdev.dev;
+ unsigned int i;
+ int ret;
+
+ v4l2_set_subdev_hostdata(sd, csi2);
+
+ for (i = 0; i < sd->entity.num_pads; i++) {
+ if (sd->entity.pads[i].flags & MEDIA_PAD_FL_SOURCE)
+ break;
+ }
+
+ if (i == sd->entity.num_pads) {
+ dev_warn(dev, "no source pad in external entity\n");
+ ret = -ENOENT;
+ goto skip_unregister_subdev;
+ }
+
+ ret = media_create_pad_link(&sd->entity, i,
+ &isys->csi2[csi2->port].asd.sd.entity,
+ 0, MEDIA_LNK_FL_ENABLED |
+ MEDIA_LNK_FL_IMMUTABLE);
+ if (ret) {
+ dev_warn(dev, "can't create link\n");
+ goto skip_unregister_subdev;
+ }
+
+ isys->csi2[csi2->port].nlanes = csi2->nlanes;
+ if (csi2->bus_type == V4L2_MBUS_CSI2_DPHY)
+ isys->csi2[csi2->port].phy_mode = PHY_MODE_DPHY;
+ else
+ isys->csi2[csi2->port].phy_mode = PHY_MODE_CPHY;
+
+ return 0;
+
+skip_unregister_subdev:
+ v4l2_device_unregister_subdev(sd);
+ return ret;
+}
+
+static void isys_stream_init(struct ipu7_isys *isys)
+{
+ unsigned int i;
+
+ for (i = 0; i < IPU_ISYS_MAX_STREAMS; i++) {
+ mutex_init(&isys->streams[i].mutex);
+ init_completion(&isys->streams[i].stream_open_completion);
+ init_completion(&isys->streams[i].stream_close_completion);
+ init_completion(&isys->streams[i].stream_start_completion);
+ init_completion(&isys->streams[i].stream_stop_completion);
+ INIT_LIST_HEAD(&isys->streams[i].queues);
+ isys->streams[i].isys = isys;
+ isys->streams[i].stream_handle = i;
+ isys->streams[i].vc = INVALID_VC_ID;
+ }
+}
+
+static int isys_fw_log_init(struct ipu7_isys *isys)
+{
+ struct device *dev = &isys->adev->auxdev.dev;
+ struct isys_fw_log *fw_log;
+ void *log_buf;
+
+ if (isys->fw_log)
+ return 0;
+
+ fw_log = devm_kzalloc(dev, sizeof(*fw_log), GFP_KERNEL);
+ if (!fw_log)
+ return -ENOMEM;
+
+ mutex_init(&fw_log->mutex);
+
+ log_buf = devm_kzalloc(dev, FW_LOG_BUF_SIZE, GFP_KERNEL);
+ if (!log_buf)
+ return -ENOMEM;
+
+ fw_log->head = log_buf;
+ fw_log->addr = log_buf;
+ fw_log->count = 0;
+ fw_log->size = 0;
+
+ isys->fw_log = fw_log;
+
+ return 0;
+}
+
+/* The .bound() notifier callback when a match is found */
+static int isys_notifier_bound(struct v4l2_async_notifier *notifier,
+ struct v4l2_subdev *sd,
+ struct v4l2_async_connection *asc)
+{
+ struct ipu7_isys *isys = container_of(notifier,
+ struct ipu7_isys, notifier);
+ struct sensor_async_sd *s_asd =
+ container_of(asc, struct sensor_async_sd, asc);
+ struct device *dev = &isys->adev->auxdev.dev;
+ int ret;
+
+ ret = ipu_bridge_instantiate_vcm(sd->dev);
+ if (ret) {
+ dev_err(dev, "instantiate vcm failed\n");
+ return ret;
+ }
+
+ dev_info(dev, "bind %s nlanes is %d port is %d\n",
+ sd->name, s_asd->csi2.nlanes, s_asd->csi2.port);
+ isys_complete_ext_device_registration(isys, sd, &s_asd->csi2);
+
+ return v4l2_device_register_subdev_nodes(&isys->v4l2_dev);
+}
+
+static int isys_notifier_complete(struct v4l2_async_notifier *notifier)
+{
+ struct ipu7_isys *isys = container_of(notifier,
+ struct ipu7_isys, notifier);
+
+ dev_info(&isys->adev->auxdev.dev,
+ "All sensor registration completed.\n");
+
+ return v4l2_device_register_subdev_nodes(&isys->v4l2_dev);
+}
+
+static const struct v4l2_async_notifier_operations isys_async_ops = {
+ .bound = isys_notifier_bound,
+ .complete = isys_notifier_complete,
+};
+
+static int isys_notifier_init(struct ipu7_isys *isys)
+{
+ const struct ipu7_isys_internal_csi2_pdata *csi2 =
+ &isys->pdata->ipdata->csi2;
+ struct ipu7_device *isp = isys->adev->isp;
+ struct device *dev = &isp->pdev->dev;
+ unsigned int i;
+ int ret;
+
+ v4l2_async_nf_init(&isys->notifier, &isys->v4l2_dev);
+
+ for (i = 0; i < csi2->nports; i++) {
+ struct v4l2_fwnode_endpoint vep = {
+ .bus_type = V4L2_MBUS_UNKNOWN
+ };
+ struct sensor_async_sd *s_asd;
+ struct fwnode_handle *ep;
+
+ ep = fwnode_graph_get_endpoint_by_id(dev_fwnode(dev), i, 0,
+ FWNODE_GRAPH_ENDPOINT_NEXT);
+ if (!ep)
+ continue;
+
+ ret = v4l2_fwnode_endpoint_parse(ep, &vep);
+ if (ret)
+ goto err_parse;
+
+ if (vep.bus_type != V4L2_MBUS_CSI2_DPHY &&
+ vep.bus_type != V4L2_MBUS_CSI2_CPHY) {
+ ret = -EINVAL;
+ dev_err(dev, "unsupported bus type %d!\n",
+ vep.bus_type);
+ goto err_parse;
+ }
+
+ s_asd = v4l2_async_nf_add_fwnode_remote(&isys->notifier, ep,
+ struct
+ sensor_async_sd);
+ if (IS_ERR(s_asd)) {
+ ret = PTR_ERR(s_asd);
+ goto err_parse;
+ }
+
+ s_asd->csi2.port = vep.base.port;
+ s_asd->csi2.nlanes = vep.bus.mipi_csi2.num_data_lanes;
+ s_asd->csi2.bus_type = vep.bus_type;
+
+ fwnode_handle_put(ep);
+
+ continue;
+
+err_parse:
+ fwnode_handle_put(ep);
+ return ret;
+ }
+
+ if (list_empty(&isys->notifier.waiting_list)) {
+ /* isys probe could continue with async subdevs missing */
+ dev_warn(dev, "no subdev found in graph\n");
+ return 0;
+ }
+
+ isys->notifier.ops = &isys_async_ops;
+ ret = v4l2_async_nf_register(&isys->notifier);
+ if (ret) {
+ dev_err(dev, "failed to register async notifier(%d)\n", ret);
+ v4l2_async_nf_cleanup(&isys->notifier);
+ }
+
+ return ret;
+}
+
+static void isys_notifier_cleanup(struct ipu7_isys *isys)
+{
+ v4l2_async_nf_unregister(&isys->notifier);
+ v4l2_async_nf_cleanup(&isys->notifier);
+}
+
+static void isys_unregister_video_devices(struct ipu7_isys *isys)
+{
+ const struct ipu7_isys_internal_csi2_pdata *csi2_pdata =
+ &isys->pdata->ipdata->csi2;
+ unsigned int i, j;
+
+ for (i = 0; i < csi2_pdata->nports; i++)
+ for (j = 0; j < IPU7_NR_OF_CSI2_SRC_PADS; j++)
+ ipu7_isys_video_cleanup(&isys->csi2[i].av[j]);
+}
+
+static int isys_register_video_devices(struct ipu7_isys *isys)
+{
+ const struct ipu7_isys_internal_csi2_pdata *csi2_pdata =
+ &isys->pdata->ipdata->csi2;
+ unsigned int i, j;
+ int ret;
+
+ for (i = 0; i < csi2_pdata->nports; i++) {
+ for (j = 0; j < IPU7_NR_OF_CSI2_SRC_PADS; j++) {
+ struct ipu7_isys_video *av = &isys->csi2[i].av[j];
+
+ snprintf(av->vdev.name, sizeof(av->vdev.name),
+ IPU_ISYS_ENTITY_PREFIX " ISYS Capture %u",
+ i * IPU7_NR_OF_CSI2_SRC_PADS + j);
+ av->isys = isys;
+ av->aq.vbq.buf_struct_size =
+ sizeof(struct ipu7_isys_video_buffer);
+
+ ret = ipu7_isys_video_init(av);
+ if (ret)
+ goto fail;
+ }
+ }
+
+ return 0;
+
+fail:
+ i = i + 1U;
+ while (i--) {
+ while (j--)
+ ipu7_isys_video_cleanup(&isys->csi2[i].av[j]);
+ j = IPU7_NR_OF_CSI2_SRC_PADS;
+ }
+
+ return ret;
+}
+
+static void isys_csi2_unregister_subdevices(struct ipu7_isys *isys)
+{
+ const struct ipu7_isys_internal_csi2_pdata *csi2 =
+ &isys->pdata->ipdata->csi2;
+ unsigned int i;
+
+ for (i = 0; i < csi2->nports; i++)
+ ipu7_isys_csi2_cleanup(&isys->csi2[i]);
+}
+
+static int isys_csi2_register_subdevices(struct ipu7_isys *isys)
+{
+ const struct ipu7_isys_internal_csi2_pdata *csi2_pdata =
+ &isys->pdata->ipdata->csi2;
+ unsigned int i;
+ int ret;
+
+ for (i = 0; i < csi2_pdata->nports; i++) {
+ ret = ipu7_isys_csi2_init(&isys->csi2[i], isys,
+ isys->pdata->base +
+ csi2_pdata->offsets[i], i);
+ if (ret)
+ goto fail;
+ }
+
+ isys->isr_csi2_mask = IPU7_CSI_RX_LEGACY_IRQ_MASK;
+
+ return 0;
+
+fail:
+ while (i--)
+ ipu7_isys_csi2_cleanup(&isys->csi2[i]);
+
+ return ret;
+}
+
+static int isys_csi2_create_media_links(struct ipu7_isys *isys)
+{
+ const struct ipu7_isys_internal_csi2_pdata *csi2_pdata =
+ &isys->pdata->ipdata->csi2;
+ struct device *dev = &isys->adev->auxdev.dev;
+ struct media_entity *sd;
+ unsigned int i, j;
+ int ret;
+
+ for (i = 0; i < csi2_pdata->nports; i++) {
+ sd = &isys->csi2[i].asd.sd.entity;
+
+ for (j = 0; j < IPU7_NR_OF_CSI2_SRC_PADS; j++) {
+ struct ipu7_isys_video *av = &isys->csi2[i].av[j];
+
+ ret = media_create_pad_link(sd, IPU7_CSI2_PAD_SRC + j,
+ &av->vdev.entity, 0, 0);
+ if (ret) {
+ dev_err(dev, "CSI2 can't create link\n");
+ return ret;
+ }
+
+ av->csi2 = &isys->csi2[i];
+ }
+ }
+
+ return 0;
+}
+
+static int isys_register_devices(struct ipu7_isys *isys)
+{
+ struct device *dev = &isys->adev->auxdev.dev;
+ struct pci_dev *pdev = isys->adev->isp->pdev;
+ int ret;
+
+ media_device_pci_init(&isys->media_dev,
+ pdev, IPU_MEDIA_DEV_MODEL_NAME);
+
+ strscpy(isys->v4l2_dev.name, isys->media_dev.model,
+ sizeof(isys->v4l2_dev.name));
+
+ ret = media_device_register(&isys->media_dev);
+ if (ret < 0)
+ goto out_media_device_unregister;
+
+ isys->v4l2_dev.mdev = &isys->media_dev;
+ isys->v4l2_dev.ctrl_handler = NULL;
+
+ ret = v4l2_device_register(dev, &isys->v4l2_dev);
+ if (ret < 0)
+ goto out_media_device_unregister;
+
+ ret = isys_register_video_devices(isys);
+ if (ret)
+ goto out_v4l2_device_unregister;
+
+ ret = isys_csi2_register_subdevices(isys);
+ if (ret)
+ goto out_video_unregister_device;
+
+ ret = isys_csi2_create_media_links(isys);
+ if (ret)
+ goto out_csi2_unregister_subdevices;
+
+ ret = isys_notifier_init(isys);
+ if (ret)
+ goto out_csi2_unregister_subdevices;
+
+ return 0;
+
+out_csi2_unregister_subdevices:
+ isys_csi2_unregister_subdevices(isys);
+
+out_video_unregister_device:
+ isys_unregister_video_devices(isys);
+
+out_v4l2_device_unregister:
+ v4l2_device_unregister(&isys->v4l2_dev);
+
+out_media_device_unregister:
+ media_device_unregister(&isys->media_dev);
+ media_device_cleanup(&isys->media_dev);
+
+ dev_err(dev, "failed to register isys devices\n");
+
+ return ret;
+}
+
+static void isys_unregister_devices(struct ipu7_isys *isys)
+{
+ isys_unregister_video_devices(isys);
+ isys_csi2_unregister_subdevices(isys);
+ v4l2_device_unregister(&isys->v4l2_dev);
+ media_device_unregister(&isys->media_dev);
+ media_device_cleanup(&isys->media_dev);
+}
+
+static void enable_csi2_legacy_irq(struct ipu7_isys *isys, bool enable)
+{
+ u32 offset = IS_IO_CSI2_LEGACY_IRQ_CTRL_BASE;
+ void __iomem *base = isys->pdata->base;
+ u32 mask = isys->isr_csi2_mask;
+
+ if (!enable) {
+ writel(mask, base + offset + IRQ_CTL_CLEAR);
+ writel(0, base + offset + IRQ_CTL_ENABLE);
+ return;
+ }
+
+ writel(mask, base + offset + IRQ_CTL_EDGE);
+ writel(mask, base + offset + IRQ_CTL_CLEAR);
+ writel(mask, base + offset + IRQ_CTL_MASK);
+ writel(mask, base + offset + IRQ_CTL_ENABLE);
+}
+
+static void enable_to_sw_irq(struct ipu7_isys *isys, bool enable)
+{
+ void __iomem *base = isys->pdata->base;
+ u32 mask = IS_UC_TO_SW_IRQ_MASK;
+ u32 offset = IS_UC_CTRL_BASE;
+
+ if (!enable) {
+ writel(0, base + offset + TO_SW_IRQ_CNTL_ENABLE);
+ return;
+ }
+
+ writel(mask, base + offset + TO_SW_IRQ_CNTL_CLEAR);
+ writel(mask, base + offset + TO_SW_IRQ_CNTL_MASK_N);
+ writel(mask, base + offset + TO_SW_IRQ_CNTL_ENABLE);
+}
+
+void ipu7_isys_setup_hw(struct ipu7_isys *isys)
+{
+ u32 offset;
+ void __iomem *base = isys->pdata->base;
+
+ /* soft reset */
+ offset = IS_IO_GPREGS_BASE;
+
+ writel(0x0, base + offset + CLK_EN_TXCLKESC);
+ /* Update if ISYS freq updated (0: 400/1, 1:400/2, 63:400/64) */
+ writel(0x0, base + offset + CLK_DIV_FACTOR_IS_CLK);
+ /* correct the initial printf configuration */
+ writel(0x200, base + IS_UC_CTRL_BASE + PRINTF_AXI_CNTL);
+
+ enable_to_sw_irq(isys, 1);
+ enable_csi2_legacy_irq(isys, 1);
+}
+
+static void isys_cleanup_hw(struct ipu7_isys *isys)
+{
+ enable_csi2_legacy_irq(isys, 0);
+ enable_to_sw_irq(isys, 0);
+}
+
+static int isys_runtime_pm_resume(struct device *dev)
+{
+ struct ipu7_bus_device *adev = to_ipu7_bus_device(dev);
+ struct ipu7_isys *isys = ipu7_bus_get_drvdata(adev);
+ struct ipu7_device *isp = adev->isp;
+ unsigned long flags;
+ int ret;
+
+ if (!isys)
+ return 0;
+
+ ret = ipu7_mmu_hw_init(adev->mmu);
+ if (ret)
+ return ret;
+
+ cpu_latency_qos_update_request(&isys->pm_qos, ISYS_PM_QOS_VALUE);
+
+ ret = ipu_buttress_start_tsc_sync(isp);
+ if (ret)
+ return ret;
+
+ spin_lock_irqsave(&isys->power_lock, flags);
+ isys->power = 1;
+ spin_unlock_irqrestore(&isys->power_lock, flags);
+
+ return 0;
+}
+
+static int isys_runtime_pm_suspend(struct device *dev)
+{
+ struct ipu7_bus_device *adev = to_ipu7_bus_device(dev);
+ struct ipu7_isys *isys = ipu7_bus_get_drvdata(adev);
+ unsigned long flags;
+
+ if (!isys)
+ return 0;
+
+ isys_cleanup_hw(isys);
+
+ spin_lock_irqsave(&isys->power_lock, flags);
+ isys->power = 0;
+ spin_unlock_irqrestore(&isys->power_lock, flags);
+
+ cpu_latency_qos_update_request(&isys->pm_qos, PM_QOS_DEFAULT_VALUE);
+
+ ipu7_mmu_hw_cleanup(adev->mmu);
+
+ return 0;
+}
+
+static int isys_suspend(struct device *dev)
+{
+ struct ipu7_isys *isys = dev_get_drvdata(dev);
+
+ /* If stream is open, refuse to suspend */
+ if (isys->stream_opened)
+ return -EBUSY;
+
+ return 0;
+}
+
+static int isys_resume(struct device *dev)
+{
+ return 0;
+}
+
+static const struct dev_pm_ops isys_pm_ops = {
+ .runtime_suspend = isys_runtime_pm_suspend,
+ .runtime_resume = isys_runtime_pm_resume,
+ .suspend = isys_suspend,
+ .resume = isys_resume,
+};
+
+static void isys_remove(struct auxiliary_device *auxdev)
+{
+ struct ipu7_isys *isys = dev_get_drvdata(&auxdev->dev);
+ struct isys_fw_msgs *fwmsg, *safe;
+ struct ipu7_bus_device *adev = auxdev_to_adev(auxdev);
+
+ for (int i = 0; i < IPU_ISYS_MAX_STREAMS; i++)
+ mutex_destroy(&isys->streams[i].mutex);
+
+ list_for_each_entry_safe(fwmsg, safe, &isys->framebuflist, head)
+ ipu7_dma_free(adev, sizeof(struct isys_fw_msgs),
+ fwmsg, fwmsg->dma_addr, 0);
+
+ list_for_each_entry_safe(fwmsg, safe, &isys->framebuflist_fw, head)
+ ipu7_dma_free(adev, sizeof(struct isys_fw_msgs),
+ fwmsg, fwmsg->dma_addr, 0);
+
+ isys_notifier_cleanup(isys);
+ isys_unregister_devices(isys);
+
+ cpu_latency_qos_remove_request(&isys->pm_qos);
+
+ mutex_destroy(&isys->stream_mutex);
+ mutex_destroy(&isys->mutex);
+}
+
+static int alloc_fw_msg_bufs(struct ipu7_isys *isys, int amount)
+{
+ struct ipu7_bus_device *adev = isys->adev;
+ struct isys_fw_msgs *addr;
+ dma_addr_t dma_addr;
+ unsigned long flags;
+ unsigned int i;
+
+ for (i = 0; i < amount; i++) {
+ addr = ipu7_dma_alloc(adev, sizeof(struct isys_fw_msgs),
+ &dma_addr, GFP_KERNEL, 0);
+ if (!addr)
+ break;
+ addr->dma_addr = dma_addr;
+
+ spin_lock_irqsave(&isys->listlock, flags);
+ list_add(&addr->head, &isys->framebuflist);
+ spin_unlock_irqrestore(&isys->listlock, flags);
+ }
+
+ if (i == amount)
+ return 0;
+
+ spin_lock_irqsave(&isys->listlock, flags);
+ while (!list_empty(&isys->framebuflist)) {
+ addr = list_first_entry(&isys->framebuflist,
+ struct isys_fw_msgs, head);
+ list_del(&addr->head);
+ spin_unlock_irqrestore(&isys->listlock, flags);
+ ipu7_dma_free(adev, sizeof(struct isys_fw_msgs),
+ addr, addr->dma_addr, 0);
+ spin_lock_irqsave(&isys->listlock, flags);
+ }
+ spin_unlock_irqrestore(&isys->listlock, flags);
+
+ return -ENOMEM;
+}
+
+struct isys_fw_msgs *ipu7_get_fw_msg_buf(struct ipu7_isys_stream *stream)
+{
+ struct device *dev = &stream->isys->adev->auxdev.dev;
+ struct ipu7_isys *isys = stream->isys;
+ struct isys_fw_msgs *msg;
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&isys->listlock, flags);
+ if (list_empty(&isys->framebuflist)) {
+ spin_unlock_irqrestore(&isys->listlock, flags);
+ dev_dbg(dev, "Frame buffer list empty\n");
+
+ ret = alloc_fw_msg_bufs(isys, 5);
+ if (ret < 0)
+ return NULL;
+
+ spin_lock_irqsave(&isys->listlock, flags);
+ if (list_empty(&isys->framebuflist)) {
+ spin_unlock_irqrestore(&isys->listlock, flags);
+ dev_err(dev, "Frame list empty\n");
+ return NULL;
+ }
+ }
+ msg = list_last_entry(&isys->framebuflist, struct isys_fw_msgs, head);
+ list_move(&msg->head, &isys->framebuflist_fw);
+ spin_unlock_irqrestore(&isys->listlock, flags);
+ memset(&msg->fw_msg, 0, sizeof(msg->fw_msg));
+
+ return msg;
+}
+
+void ipu7_cleanup_fw_msg_bufs(struct ipu7_isys *isys)
+{
+ struct isys_fw_msgs *fwmsg, *fwmsg0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&isys->listlock, flags);
+ list_for_each_entry_safe(fwmsg, fwmsg0, &isys->framebuflist_fw, head)
+ list_move(&fwmsg->head, &isys->framebuflist);
+ spin_unlock_irqrestore(&isys->listlock, flags);
+}
+
+void ipu7_put_fw_msg_buf(struct ipu7_isys *isys, uintptr_t data)
+{
+ struct isys_fw_msgs *msg;
+ void *ptr = (void *)data;
+ unsigned long flags;
+
+ if (WARN_ON_ONCE(!ptr))
+ return;
+
+ spin_lock_irqsave(&isys->listlock, flags);
+ msg = container_of(ptr, struct isys_fw_msgs, fw_msg.dummy);
+ list_move(&msg->head, &isys->framebuflist);
+ spin_unlock_irqrestore(&isys->listlock, flags);
+}
+
+static int isys_probe(struct auxiliary_device *auxdev,
+ const struct auxiliary_device_id *auxdev_id)
+{
+ const struct ipu7_isys_internal_csi2_pdata *csi2_pdata;
+ struct ipu7_bus_device *adev = auxdev_to_adev(auxdev);
+ struct ipu7_device *isp = adev->isp;
+ struct ipu7_isys *isys;
+ int ret = 0;
+
+ if (!isp->ipu7_bus_ready_to_probe)
+ return -EPROBE_DEFER;
+
+ isys = devm_kzalloc(&auxdev->dev, sizeof(*isys), GFP_KERNEL);
+ if (!isys)
+ return -ENOMEM;
+
+ ret = pm_runtime_resume_and_get(&auxdev->dev);
+ if (ret < 0)
+ return ret;
+
+ adev->auxdrv_data =
+ (const struct ipu7_auxdrv_data *)auxdev_id->driver_data;
+ adev->auxdrv = to_auxiliary_drv(auxdev->dev.driver);
+ isys->adev = adev;
+ isys->pdata = adev->pdata;
+
+ INIT_LIST_HEAD(&isys->requests);
+ csi2_pdata = &isys->pdata->ipdata->csi2;
+
+ isys->csi2 = devm_kcalloc(&auxdev->dev, csi2_pdata->nports,
+ sizeof(*isys->csi2), GFP_KERNEL);
+ if (!isys->csi2) {
+ ret = -ENOMEM;
+ goto out_runtime_put;
+ }
+
+ ret = ipu7_mmu_hw_init(adev->mmu);
+ if (ret)
+ goto out_runtime_put;
+
+ spin_lock_init(&isys->streams_lock);
+ spin_lock_init(&isys->power_lock);
+ isys->power = 0;
+
+ mutex_init(&isys->mutex);
+ mutex_init(&isys->stream_mutex);
+
+ spin_lock_init(&isys->listlock);
+ INIT_LIST_HEAD(&isys->framebuflist);
+ INIT_LIST_HEAD(&isys->framebuflist_fw);
+
+ dev_set_drvdata(&auxdev->dev, isys);
+
+ isys->icache_prefetch = 0;
+ isys->phy_rext_cal = 0;
+
+ isys_stream_init(isys);
+
+ cpu_latency_qos_add_request(&isys->pm_qos, PM_QOS_DEFAULT_VALUE);
+ ret = alloc_fw_msg_bufs(isys, 20);
+ if (ret < 0)
+ goto out_cleanup_isys;
+
+ ret = ipu7_fw_isys_init(isys);
+ if (ret)
+ goto out_cleanup_isys;
+
+ ret = isys_register_devices(isys);
+ if (ret)
+ goto out_cleanup_fw;
+
+ ret = isys_fw_log_init(isys);
+ if (ret)
+ goto out_cleanup;
+
+ ipu7_mmu_hw_cleanup(adev->mmu);
+ pm_runtime_put(&auxdev->dev);
+
+ return 0;
+
+out_cleanup:
+ isys_unregister_devices(isys);
+out_cleanup_fw:
+ ipu7_fw_isys_release(isys);
+out_cleanup_isys:
+ cpu_latency_qos_remove_request(&isys->pm_qos);
+
+ for (unsigned int i = 0; i < IPU_ISYS_MAX_STREAMS; i++)
+ mutex_destroy(&isys->streams[i].mutex);
+
+ mutex_destroy(&isys->mutex);
+ mutex_destroy(&isys->stream_mutex);
+
+ ipu7_mmu_hw_cleanup(adev->mmu);
+
+out_runtime_put:
+ pm_runtime_put(&auxdev->dev);
+
+ return ret;
+}
+
+struct ipu7_csi2_error {
+ const char *error_string;
+ bool is_info_only;
+};
+
+/*
+ * Strings corresponding to CSI-2 receiver errors are here.
+ * Corresponding macros are defined in the header file.
+ */
+static const struct ipu7_csi2_error dphy_rx_errors[] = {
+ { "Error handler FIFO full", false },
+ { "Reserved Short Packet encoding detected", true },
+ { "Reserved Long Packet encoding detected", true },
+ { "Received packet is too short", false},
+ { "Received packet is too long", false},
+ { "Short packet discarded due to errors", false },
+ { "Long packet discarded due to errors", false },
+ { "CSI Combo Rx interrupt", false },
+ { "IDI CDC FIFO overflow(remaining bits are reserved as 0)", false },
+ { "Received NULL packet", true },
+ { "Received blanking packet", true },
+ { "Tie to 0", true },
+ { }
+};
+
+static void ipu7_isys_register_errors(struct ipu7_isys_csi2 *csi2)
+{
+ u32 offset = IS_IO_CSI2_ERR_LEGACY_IRQ_CTL_BASE(csi2->port);
+ u32 status = readl(csi2->base + offset + IRQ_CTL_STATUS);
+ u32 mask = IPU7_CSI_RX_ERROR_IRQ_MASK;
+
+ if (!status)
+ return;
+
+ dev_dbg(&csi2->isys->adev->auxdev.dev, "csi2-%u error status 0x%08x\n",
+ csi2->port, status);
+
+ writel(status & mask, csi2->base + offset + IRQ_CTL_CLEAR);
+ csi2->receiver_errors |= status & mask;
+}
+
+static void ipu7_isys_csi2_error(struct ipu7_isys_csi2 *csi2)
+{
+ struct ipu7_csi2_error const *errors;
+ unsigned int i;
+ u32 status;
+
+ /* Register errors once more in case of error interrupts are disabled */
+ ipu7_isys_register_errors(csi2);
+ status = csi2->receiver_errors;
+ csi2->receiver_errors = 0;
+ errors = dphy_rx_errors;
+
+ for (i = 0; i < CSI_RX_NUM_ERRORS_IN_IRQ; i++) {
+ if (status & BIT(i))
+ dev_err_ratelimited(&csi2->isys->adev->auxdev.dev,
+ "csi2-%i error: %s\n",
+ csi2->port,
+ errors[i].error_string);
+ }
+}
+
+struct resp_to_msg {
+ enum ipu7_insys_resp_type type;
+ const char *msg;
+};
+
+static const struct resp_to_msg is_fw_msg[] = {
+ {IPU_INSYS_RESP_TYPE_STREAM_OPEN_DONE,
+ "IPU_INSYS_RESP_TYPE_STREAM_OPEN_DONE"},
+ {IPU_INSYS_RESP_TYPE_STREAM_START_AND_CAPTURE_ACK,
+ "IPU_INSYS_RESP_TYPE_STREAM_START_AND_CAPTURE_ACK"},
+ {IPU_INSYS_RESP_TYPE_STREAM_CAPTURE_ACK,
+ "IPU_INSYS_RESP_TYPE_STREAM_CAPTURE_ACK"},
+ {IPU_INSYS_RESP_TYPE_STREAM_ABORT_ACK,
+ "IPU_INSYS_RESP_TYPE_STREAM_ABORT_ACK"},
+ {IPU_INSYS_RESP_TYPE_STREAM_FLUSH_ACK,
+ "IPU_INSYS_RESP_TYPE_STREAM_FLUSH_ACK"},
+ {IPU_INSYS_RESP_TYPE_STREAM_CLOSE_ACK,
+ "IPU_INSYS_RESP_TYPE_STREAM_CLOSE_ACK"},
+ {IPU_INSYS_RESP_TYPE_PIN_DATA_READY,
+ "IPU_INSYS_RESP_TYPE_PIN_DATA_READY"},
+ {IPU_INSYS_RESP_TYPE_FRAME_SOF, "IPU_INSYS_RESP_TYPE_FRAME_SOF"},
+ {IPU_INSYS_RESP_TYPE_FRAME_EOF, "IPU_INSYS_RESP_TYPE_FRAME_EOF"},
+ {IPU_INSYS_RESP_TYPE_STREAM_START_AND_CAPTURE_DONE,
+ "IPU_INSYS_RESP_TYPE_STREAM_START_AND_CAPTURE_DONE"},
+ {IPU_INSYS_RESP_TYPE_STREAM_CAPTURE_DONE,
+ "IPU_INSYS_RESP_TYPE_STREAM_CAPTURE_DONE"},
+ {N_IPU_INSYS_RESP_TYPE, "N_IPU_INSYS_RESP_TYPE"},
+};
+
+int isys_isr_one(struct ipu7_bus_device *adev)
+{
+ struct ipu7_isys *isys = ipu7_bus_get_drvdata(adev);
+ struct ipu7_isys_stream *stream = NULL;
+ struct device *dev = &adev->auxdev.dev;
+ struct ipu7_isys_csi2 *csi2 = NULL;
+ struct ia_gofo_msg_err err_info;
+ struct ipu7_insys_resp *resp;
+ u64 ts;
+
+ if (!isys->adev->syscom)
+ return 1;
+
+ resp = ipu7_fw_isys_get_resp(isys);
+ if (!resp)
+ return 1;
+ if (resp->type >= N_IPU_INSYS_RESP_TYPE) {
+ dev_err(dev, "Unknown response type %u stream %u\n",
+ resp->type, resp->stream_id);
+ ipu7_fw_isys_put_resp(isys);
+ return 1;
+ }
+
+ err_info = resp->error_info;
+ ts = ((u64)resp->timestamp[1] << 32) | resp->timestamp[0];
+ if (err_info.err_group == INSYS_MSG_ERR_GROUP_CAPTURE &&
+ err_info.err_code == INSYS_MSG_ERR_CAPTURE_SYNC_FRAME_DROP) {
+ /* receive a sp w/o command, firmware drop it */
+ dev_dbg(dev, "FRAME DROP: %02u %s stream %u\n",
+ resp->type, is_fw_msg[resp->type].msg,
+ resp->stream_id);
+ dev_dbg(dev, "\tpin %u buf_id %llx frame %u\n",
+ resp->pin_id, resp->buf_id, resp->frame_id);
+ dev_dbg(dev, "\terror group %u code %u details [%u %u]\n",
+ err_info.err_group, err_info.err_code,
+ err_info.err_detail[0], err_info.err_detail[1]);
+ } else if (!IA_GOFO_MSG_ERR_IS_OK(err_info)) {
+ dev_err(dev, "%02u %s stream %u pin %u buf_id %llx frame %u\n",
+ resp->type, is_fw_msg[resp->type].msg, resp->stream_id,
+ resp->pin_id, resp->buf_id, resp->frame_id);
+ dev_err(dev, "\terror group %u code %u details [%u %u]\n",
+ err_info.err_group, err_info.err_code,
+ err_info.err_detail[0], err_info.err_detail[1]);
+ } else {
+ dev_dbg(dev, "%02u %s stream %u pin %u buf_id %llx frame %u\n",
+ resp->type, is_fw_msg[resp->type].msg, resp->stream_id,
+ resp->pin_id, resp->buf_id, resp->frame_id);
+ dev_dbg(dev, "\tts %llu\n", ts);
+ }
+
+ if (resp->stream_id >= IPU_ISYS_MAX_STREAMS) {
+ dev_err(dev, "bad stream handle %u\n",
+ resp->stream_id);
+ goto leave;
+ }
+
+ stream = ipu7_isys_query_stream_by_handle(isys, resp->stream_id);
+ if (!stream) {
+ dev_err(dev, "stream of stream_handle %u is unused\n",
+ resp->stream_id);
+ goto leave;
+ }
+
+ stream->error = err_info.err_code;
+
+ if (stream->asd)
+ csi2 = ipu7_isys_subdev_to_csi2(stream->asd);
+
+ switch (resp->type) {
+ case IPU_INSYS_RESP_TYPE_STREAM_OPEN_DONE:
+ complete(&stream->stream_open_completion);
+ break;
+ case IPU_INSYS_RESP_TYPE_STREAM_CLOSE_ACK:
+ complete(&stream->stream_close_completion);
+ break;
+ case IPU_INSYS_RESP_TYPE_STREAM_START_AND_CAPTURE_ACK:
+ complete(&stream->stream_start_completion);
+ break;
+ case IPU_INSYS_RESP_TYPE_STREAM_ABORT_ACK:
+ complete(&stream->stream_stop_completion);
+ break;
+ case IPU_INSYS_RESP_TYPE_STREAM_FLUSH_ACK:
+ complete(&stream->stream_stop_completion);
+ break;
+ case IPU_INSYS_RESP_TYPE_PIN_DATA_READY:
+ /*
+ * firmware only release the capture msg until software
+ * get pin_data_ready event
+ */
+ ipu7_put_fw_msg_buf(ipu7_bus_get_drvdata(adev), resp->buf_id);
+ if (resp->pin_id < IPU_INSYS_OUTPUT_PINS &&
+ stream->output_pins[resp->pin_id].pin_ready)
+ stream->output_pins[resp->pin_id].pin_ready(stream,
+ resp);
+ else
+ dev_err(dev, "No handler for pin %u ready\n",
+ resp->pin_id);
+ if (csi2)
+ ipu7_isys_csi2_error(csi2);
+
+ break;
+ case IPU_INSYS_RESP_TYPE_STREAM_CAPTURE_ACK:
+ break;
+ case IPU_INSYS_RESP_TYPE_STREAM_START_AND_CAPTURE_DONE:
+ case IPU_INSYS_RESP_TYPE_STREAM_CAPTURE_DONE:
+ break;
+ case IPU_INSYS_RESP_TYPE_FRAME_SOF:
+ if (csi2)
+ ipu7_isys_csi2_sof_event_by_stream(stream);
+
+ stream->seq[stream->seq_index].sequence =
+ atomic_read(&stream->sequence) - 1U;
+ stream->seq[stream->seq_index].timestamp = ts;
+ dev_dbg(dev,
+ "SOF: stream %u frame %u (index %u), ts 0x%16.16llx\n",
+ resp->stream_id, resp->frame_id,
+ stream->seq[stream->seq_index].sequence, ts);
+ stream->seq_index = (stream->seq_index + 1U)
+ % IPU_ISYS_MAX_PARALLEL_SOF;
+ break;
+ case IPU_INSYS_RESP_TYPE_FRAME_EOF:
+ if (csi2)
+ ipu7_isys_csi2_eof_event_by_stream(stream);
+
+ dev_dbg(dev, "eof: stream %d(index %u) ts 0x%16.16llx\n",
+ resp->stream_id,
+ stream->seq[stream->seq_index].sequence, ts);
+ break;
+ default:
+ dev_err(dev, "Unknown response type %u stream %u\n",
+ resp->type, resp->stream_id);
+ break;
+ }
+
+ ipu7_isys_put_stream(stream);
+leave:
+ ipu7_fw_isys_put_resp(isys);
+
+ return 0;
+}
+
+static void ipu7_isys_csi2_isr(struct ipu7_isys_csi2 *csi2)
+{
+ struct device *dev = &csi2->isys->adev->auxdev.dev;
+ struct ipu7_device *isp = csi2->isys->adev->isp;
+ struct ipu7_isys_stream *s;
+ u32 sync, offset;
+ u32 fe = 0;
+ u8 vc;
+
+ ipu7_isys_register_errors(csi2);
+
+ offset = IS_IO_CSI2_SYNC_LEGACY_IRQ_CTL_BASE(csi2->port);
+ sync = readl(csi2->base + offset + IRQ_CTL_STATUS);
+ writel(sync, csi2->base + offset + IRQ_CTL_CLEAR);
+ dev_dbg(dev, "csi2-%u sync status 0x%08x\n", csi2->port, sync);
+
+ if (!is_ipu7(isp->hw_ver)) {
+ fe = readl(csi2->base + offset + IRQ1_CTL_STATUS);
+ writel(fe, csi2->base + offset + IRQ1_CTL_CLEAR);
+ dev_dbg(dev, "csi2-%u FE status 0x%08x\n", csi2->port, fe);
+ }
+
+ for (vc = 0; vc < IPU7_NR_OF_CSI2_VC && (sync || fe); vc++) {
+ s = ipu7_isys_query_stream_by_source(csi2->isys,
+ csi2->asd.source, vc);
+ if (!s)
+ continue;
+
+ if (!is_ipu7(isp->hw_ver)) {
+ if (sync & IPU7P5_CSI_RX_SYNC_FS_VC & (1U << vc))
+ ipu7_isys_csi2_sof_event_by_stream(s);
+
+ if (fe & IPU7P5_CSI_RX_SYNC_FE_VC & (1U << vc))
+ ipu7_isys_csi2_eof_event_by_stream(s);
+ } else {
+ if (sync & IPU7_CSI_RX_SYNC_FS_VC & (1U << (vc * 2)))
+ ipu7_isys_csi2_sof_event_by_stream(s);
+
+ if (sync & IPU7_CSI_RX_SYNC_FE_VC & (2U << (vc * 2)))
+ ipu7_isys_csi2_eof_event_by_stream(s);
+ }
+ }
+}
+
+static irqreturn_t isys_isr(struct ipu7_bus_device *adev)
+{
+ struct ipu7_isys *isys = ipu7_bus_get_drvdata(adev);
+ u32 status_csi, status_sw, csi_offset, sw_offset;
+ struct device *dev = &isys->adev->auxdev.dev;
+ void __iomem *base = isys->pdata->base;
+
+ spin_lock(&isys->power_lock);
+ if (!isys->power) {
+ spin_unlock(&isys->power_lock);
+ return IRQ_NONE;
+ }
+
+ csi_offset = IS_IO_CSI2_LEGACY_IRQ_CTRL_BASE;
+ sw_offset = IS_BASE;
+
+ status_csi = readl(base + csi_offset + IRQ_CTL_STATUS);
+ status_sw = readl(base + sw_offset + TO_SW_IRQ_CNTL_STATUS);
+ if (!status_csi && !status_sw) {
+ spin_unlock(&isys->power_lock);
+ return IRQ_NONE;
+ }
+
+ if (status_csi)
+ dev_dbg(dev, "status csi 0x%08x\n", status_csi);
+ if (status_sw)
+ dev_dbg(dev, "status to_sw 0x%08x\n", status_sw);
+
+ do {
+ writel(status_sw, base + sw_offset + TO_SW_IRQ_CNTL_CLEAR);
+ writel(status_csi, base + csi_offset + IRQ_CTL_CLEAR);
+
+ if (isys->isr_csi2_mask & status_csi) {
+ unsigned int i;
+
+ for (i = 0; i < isys->pdata->ipdata->csi2.nports; i++) {
+ /* irq from not enabled port */
+ if (!isys->csi2[i].base)
+ continue;
+ if (status_csi & isys->csi2[i].legacy_irq_mask)
+ ipu7_isys_csi2_isr(&isys->csi2[i]);
+ }
+ }
+
+ if (!isys_isr_one(adev))
+ status_sw = TO_SW_IRQ_FW;
+ else
+ status_sw = 0;
+
+ status_csi = readl(base + csi_offset + IRQ_CTL_STATUS);
+ status_sw |= readl(base + sw_offset + TO_SW_IRQ_CNTL_STATUS);
+ } while ((status_csi & isys->isr_csi2_mask) ||
+ (status_sw & TO_SW_IRQ_FW));
+
+ writel(TO_SW_IRQ_MASK, base + sw_offset + TO_SW_IRQ_CNTL_MASK_N);
+
+ spin_unlock(&isys->power_lock);
+
+ return IRQ_HANDLED;
+}
+
+static const struct ipu7_auxdrv_data ipu7_isys_auxdrv_data = {
+ .isr = isys_isr,
+ .isr_threaded = NULL,
+ .wake_isr_thread = false,
+};
+
+static const struct auxiliary_device_id ipu7_isys_id_table[] = {
+ {
+ .name = "intel_ipu7.isys",
+ .driver_data = (kernel_ulong_t)&ipu7_isys_auxdrv_data,
+ },
+ { }
+};
+MODULE_DEVICE_TABLE(auxiliary, ipu7_isys_id_table);
+
+static struct auxiliary_driver isys_driver = {
+ .name = IPU_ISYS_NAME,
+ .probe = isys_probe,
+ .remove = isys_remove,
+ .id_table = ipu7_isys_id_table,
+ .driver = {
+ .pm = &isys_pm_ops,
+ },
+};
+
+module_auxiliary_driver(isys_driver);
+
+MODULE_AUTHOR("Bingbu Cao <bingbu.cao@intel.com>");
+MODULE_AUTHOR("Tianshu Qiu <tian.shu.qiu@intel.com>");
+MODULE_AUTHOR("Qingwu Zhang <qingwu.zhang@intel.com>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Intel ipu7 input system driver");
+MODULE_IMPORT_NS("INTEL_IPU7");
+MODULE_IMPORT_NS("INTEL_IPU_BRIDGE");
diff --git a/drivers/staging/media/ipu7/ipu7-isys.h b/drivers/staging/media/ipu7/ipu7-isys.h
new file mode 100644
index 000000000000..ef1ab1b42f6c
--- /dev/null
+++ b/drivers/staging/media/ipu7/ipu7-isys.h
@@ -0,0 +1,140 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2013 - 2025 Intel Corporation
+ */
+
+#ifndef IPU7_ISYS_H
+#define IPU7_ISYS_H
+
+#include <linux/irqreturn.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/pm_qos.h>
+#include <linux/spinlock_types.h>
+#include <linux/types.h>
+
+#include <media/media-device.h>
+#include <media/v4l2-async.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-mediabus.h>
+
+#include "abi/ipu7_fw_msg_abi.h"
+#include "abi/ipu7_fw_isys_abi.h"
+
+#include "ipu7.h"
+#include "ipu7-isys-csi2.h"
+#include "ipu7-isys-video.h"
+
+#define IPU_ISYS_ENTITY_PREFIX "Intel IPU7"
+
+/* FW support max 16 streams */
+#define IPU_ISYS_MAX_STREAMS 16U
+
+/*
+ * Current message queue configuration. These must be big enough
+ * so that they never gets full. Queues are located in system memory
+ */
+#define IPU_ISYS_SIZE_RECV_QUEUE 40U
+#define IPU_ISYS_SIZE_LOG_QUEUE 256U
+#define IPU_ISYS_SIZE_SEND_QUEUE 40U
+#define IPU_ISYS_NUM_RECV_QUEUE 1U
+
+#define IPU_ISYS_MIN_WIDTH 2U
+#define IPU_ISYS_MIN_HEIGHT 2U
+#define IPU_ISYS_MAX_WIDTH 8160U
+#define IPU_ISYS_MAX_HEIGHT 8190U
+
+#define FW_CALL_TIMEOUT_JIFFIES \
+ msecs_to_jiffies(IPU_LIB_CALL_TIMEOUT_MS)
+
+struct isys_fw_log {
+ struct mutex mutex; /* protect whole struct */
+ void *head;
+ void *addr;
+ u32 count; /* running counter of log */
+ u32 size; /* actual size of log content, in bits */
+};
+
+/*
+ * struct ipu7_isys
+ *
+ * @media_dev: Media device
+ * @v4l2_dev: V4L2 device
+ * @adev: ISYS bus device
+ * @power: Is ISYS powered on or not?
+ * @isr_bits: Which bits does the ISR handle?
+ * @power_lock: Serialise access to power (power state in general)
+ * @csi2_rx_ctrl_cached: cached shared value between all CSI2 receivers
+ * @streams_lock: serialise access to streams
+ * @streams: streams per firmware stream ID
+ * @syscom: fw communication layer context
+ * @ref_count: total number of callers fw open
+ * @mutex: serialise access isys video open/release related operations
+ * @stream_mutex: serialise stream start and stop, queueing requests
+ * @pdata: platform data pointer
+ * @csi2: CSI-2 receivers
+ */
+struct ipu7_isys {
+ struct media_device media_dev;
+ struct v4l2_device v4l2_dev;
+ struct ipu7_bus_device *adev;
+
+ int power;
+ spinlock_t power_lock; /* Serialise access to power */
+ u32 isr_csi2_mask;
+ u32 csi2_rx_ctrl_cached;
+ spinlock_t streams_lock;
+ struct ipu7_isys_stream streams[IPU_ISYS_MAX_STREAMS];
+ int streams_ref_count[IPU_ISYS_MAX_STREAMS];
+ u32 phy_rext_cal;
+ bool icache_prefetch;
+ bool csi2_cse_ipc_not_supported;
+ unsigned int ref_count;
+ unsigned int stream_opened;
+
+ struct mutex mutex; /* Serialise isys video open/release related */
+ struct mutex stream_mutex; /* Stream start, stop, queueing reqs */
+
+ struct ipu7_isys_pdata *pdata;
+
+ struct ipu7_isys_csi2 *csi2;
+ struct isys_fw_log *fw_log;
+
+ struct list_head requests;
+ struct pm_qos_request pm_qos;
+ spinlock_t listlock; /* Protect framebuflist */
+ struct list_head framebuflist;
+ struct list_head framebuflist_fw;
+ struct v4l2_async_notifier notifier;
+
+ struct ipu7_insys_config *subsys_config;
+ dma_addr_t subsys_config_dma_addr;
+};
+
+struct isys_fw_msgs {
+ union {
+ u64 dummy;
+ struct ipu7_insys_buffset frame;
+ struct ipu7_insys_stream_cfg stream;
+ } fw_msg;
+ struct list_head head;
+ dma_addr_t dma_addr;
+};
+
+struct ipu7_isys_csi2_config {
+ unsigned int nlanes;
+ unsigned int port;
+ enum v4l2_mbus_type bus_type;
+};
+
+struct sensor_async_sd {
+ struct v4l2_async_connection asc;
+ struct ipu7_isys_csi2_config csi2;
+};
+
+struct isys_fw_msgs *ipu7_get_fw_msg_buf(struct ipu7_isys_stream *stream);
+void ipu7_put_fw_msg_buf(struct ipu7_isys *isys, uintptr_t data);
+void ipu7_cleanup_fw_msg_bufs(struct ipu7_isys *isys);
+int isys_isr_one(struct ipu7_bus_device *adev);
+void ipu7_isys_setup_hw(struct ipu7_isys *isys);
+#endif /* IPU7_ISYS_H */
diff --git a/drivers/staging/media/ipu7/ipu7-mmu.c b/drivers/staging/media/ipu7/ipu7-mmu.c
new file mode 100644
index 000000000000..ded1986eb8ba
--- /dev/null
+++ b/drivers/staging/media/ipu7/ipu7-mmu.c
@@ -0,0 +1,853 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2013 - 2025 Intel Corporation
+ */
+
+#include <asm/barrier.h>
+
+#include <linux/align.h>
+#include <linux/atomic.h>
+#include <linux/bitops.h>
+#include <linux/bits.h>
+#include <linux/bug.h>
+#include <linux/cacheflush.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/gfp.h>
+#include <linux/iopoll.h>
+#include <linux/iova.h>
+#include <linux/math.h>
+#include <linux/minmax.h>
+#include <linux/pci.h>
+#include <linux/pfn.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+#include <linux/vmalloc.h>
+
+#include "ipu7.h"
+#include "ipu7-dma.h"
+#include "ipu7-mmu.h"
+#include "ipu7-platform-regs.h"
+
+#define ISP_PAGE_SHIFT 12
+#define ISP_PAGE_SIZE BIT(ISP_PAGE_SHIFT)
+#define ISP_PAGE_MASK (~(ISP_PAGE_SIZE - 1U))
+
+#define ISP_L1PT_SHIFT 22
+#define ISP_L1PT_MASK (~((1U << ISP_L1PT_SHIFT) - 1))
+
+#define ISP_L2PT_SHIFT 12
+#define ISP_L2PT_MASK (~(ISP_L1PT_MASK | (~(ISP_PAGE_MASK))))
+
+#define ISP_L1PT_PTES 1024U
+#define ISP_L2PT_PTES 1024U
+
+#define ISP_PADDR_SHIFT 12
+
+#define REG_L1_PHYS 0x0004 /* 27-bit pfn */
+#define REG_INFO 0x0008
+
+#define TBL_PHYS_ADDR(a) ((phys_addr_t)(a) << ISP_PADDR_SHIFT)
+
+#define MMU_TLB_INVALIDATE_TIMEOUT 2000
+
+static __maybe_unused void mmu_irq_handler(struct ipu7_mmu *mmu)
+{
+ unsigned int i;
+ u32 irq_cause;
+
+ for (i = 0; i < mmu->nr_mmus; i++) {
+ irq_cause = readl(mmu->mmu_hw[i].base + MMU_REG_IRQ_CAUSE);
+ pr_info("mmu %s irq_cause = 0x%x", mmu->mmu_hw[i].name,
+ irq_cause);
+ writel(0x1ffff, mmu->mmu_hw[i].base + MMU_REG_IRQ_CLEAR);
+ }
+}
+
+static void tlb_invalidate(struct ipu7_mmu *mmu)
+{
+ unsigned long flags;
+ unsigned int i;
+ int ret;
+ u32 val;
+
+ spin_lock_irqsave(&mmu->ready_lock, flags);
+ if (!mmu->ready) {
+ spin_unlock_irqrestore(&mmu->ready_lock, flags);
+ return;
+ }
+
+ for (i = 0; i < mmu->nr_mmus; i++) {
+ writel(0xffffffffU, mmu->mmu_hw[i].base +
+ MMU_REG_INVALIDATE_0);
+
+ /* Need check with HW, use l1streams or l2streams */
+ if (mmu->mmu_hw[i].nr_l2streams > 32)
+ writel(0xffffffffU, mmu->mmu_hw[i].base +
+ MMU_REG_INVALIDATE_1);
+
+ /*
+ * The TLB invalidation is a "single cycle" (IOMMU clock cycles)
+ * When the actual MMIO write reaches the IPU TLB Invalidate
+ * register, wmb() will force the TLB invalidate out if the CPU
+ * attempts to update the IOMMU page table (or sooner).
+ */
+ wmb();
+
+ /* wait invalidation done */
+ ret = readl_poll_timeout_atomic(mmu->mmu_hw[i].base +
+ MMU_REG_INVALIDATION_STATUS,
+ val, !(val & 0x1U), 500,
+ MMU_TLB_INVALIDATE_TIMEOUT);
+ if (ret)
+ dev_err(mmu->dev, "MMU[%u] TLB invalidate failed\n", i);
+ }
+
+ spin_unlock_irqrestore(&mmu->ready_lock, flags);
+}
+
+static dma_addr_t map_single(struct ipu7_mmu_info *mmu_info, void *ptr)
+{
+ dma_addr_t dma;
+
+ dma = dma_map_single(mmu_info->dev, ptr, PAGE_SIZE, DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(mmu_info->dev, dma))
+ return 0;
+
+ return dma;
+}
+
+static int get_dummy_page(struct ipu7_mmu_info *mmu_info)
+{
+ void *pt = (void *)get_zeroed_page(GFP_ATOMIC | GFP_DMA32);
+ dma_addr_t dma;
+
+ if (!pt)
+ return -ENOMEM;
+
+ dev_dbg(mmu_info->dev, "dummy_page: get_zeroed_page() == %p\n", pt);
+
+ dma = map_single(mmu_info, pt);
+ if (!dma) {
+ dev_err(mmu_info->dev, "Failed to map dummy page\n");
+ goto err_free_page;
+ }
+
+ mmu_info->dummy_page = pt;
+ mmu_info->dummy_page_pteval = dma >> ISP_PAGE_SHIFT;
+
+ return 0;
+
+err_free_page:
+ free_page((unsigned long)pt);
+ return -ENOMEM;
+}
+
+static void free_dummy_page(struct ipu7_mmu_info *mmu_info)
+{
+ dma_unmap_single(mmu_info->dev,
+ TBL_PHYS_ADDR(mmu_info->dummy_page_pteval),
+ PAGE_SIZE, DMA_BIDIRECTIONAL);
+ free_page((unsigned long)mmu_info->dummy_page);
+}
+
+static int alloc_dummy_l2_pt(struct ipu7_mmu_info *mmu_info)
+{
+ u32 *pt = (u32 *)get_zeroed_page(GFP_ATOMIC | GFP_DMA32);
+ dma_addr_t dma;
+ unsigned int i;
+
+ if (!pt)
+ return -ENOMEM;
+
+ dev_dbg(mmu_info->dev, "dummy_l2: get_zeroed_page() = %p\n", pt);
+
+ dma = map_single(mmu_info, pt);
+ if (!dma) {
+ dev_err(mmu_info->dev, "Failed to map l2pt page\n");
+ goto err_free_page;
+ }
+
+ for (i = 0; i < ISP_L2PT_PTES; i++)
+ pt[i] = mmu_info->dummy_page_pteval;
+
+ mmu_info->dummy_l2_pt = pt;
+ mmu_info->dummy_l2_pteval = dma >> ISP_PAGE_SHIFT;
+
+ return 0;
+
+err_free_page:
+ free_page((unsigned long)pt);
+ return -ENOMEM;
+}
+
+static void free_dummy_l2_pt(struct ipu7_mmu_info *mmu_info)
+{
+ dma_unmap_single(mmu_info->dev,
+ TBL_PHYS_ADDR(mmu_info->dummy_l2_pteval),
+ PAGE_SIZE, DMA_BIDIRECTIONAL);
+ free_page((unsigned long)mmu_info->dummy_l2_pt);
+}
+
+static u32 *alloc_l1_pt(struct ipu7_mmu_info *mmu_info)
+{
+ u32 *pt = (u32 *)get_zeroed_page(GFP_ATOMIC | GFP_DMA32);
+ dma_addr_t dma;
+ unsigned int i;
+
+ if (!pt)
+ return NULL;
+
+ dev_dbg(mmu_info->dev, "alloc_l1: get_zeroed_page() = %p\n", pt);
+
+ for (i = 0; i < ISP_L1PT_PTES; i++)
+ pt[i] = mmu_info->dummy_l2_pteval;
+
+ dma = map_single(mmu_info, pt);
+ if (!dma) {
+ dev_err(mmu_info->dev, "Failed to map l1pt page\n");
+ goto err_free_page;
+ }
+
+ mmu_info->l1_pt_dma = dma >> ISP_PADDR_SHIFT;
+ dev_dbg(mmu_info->dev, "l1 pt %p mapped at %pad\n", pt, &dma);
+
+ return pt;
+
+err_free_page:
+ free_page((unsigned long)pt);
+ return NULL;
+}
+
+static u32 *alloc_l2_pt(struct ipu7_mmu_info *mmu_info)
+{
+ u32 *pt = (u32 *)get_zeroed_page(GFP_ATOMIC | GFP_DMA32);
+ unsigned int i;
+
+ if (!pt)
+ return NULL;
+
+ dev_dbg(mmu_info->dev, "alloc_l2: get_zeroed_page() = %p\n", pt);
+
+ for (i = 0; i < ISP_L1PT_PTES; i++)
+ pt[i] = mmu_info->dummy_page_pteval;
+
+ return pt;
+}
+
+static void l2_unmap(struct ipu7_mmu_info *mmu_info, unsigned long iova,
+ phys_addr_t dummy, size_t size)
+{
+ unsigned int l2_entries;
+ unsigned int l2_idx;
+ unsigned long flags;
+ u32 l1_idx;
+ u32 *l2_pt;
+
+ spin_lock_irqsave(&mmu_info->lock, flags);
+ for (l1_idx = iova >> ISP_L1PT_SHIFT;
+ size > 0U && l1_idx < ISP_L1PT_PTES; l1_idx++) {
+ dev_dbg(mmu_info->dev,
+ "unmapping l2 pgtable (l1 index %u (iova 0x%8.8lx))\n",
+ l1_idx, iova);
+
+ if (mmu_info->l1_pt[l1_idx] == mmu_info->dummy_l2_pteval) {
+ dev_err(mmu_info->dev,
+ "unmap not mapped iova 0x%8.8lx l1 index %u\n",
+ iova, l1_idx);
+ continue;
+ }
+ l2_pt = mmu_info->l2_pts[l1_idx];
+
+ l2_entries = 0;
+ for (l2_idx = (iova & ISP_L2PT_MASK) >> ISP_L2PT_SHIFT;
+ size > 0U && l2_idx < ISP_L2PT_PTES; l2_idx++) {
+ phys_addr_t pteval = TBL_PHYS_ADDR(l2_pt[l2_idx]);
+
+ dev_dbg(mmu_info->dev,
+ "unmap l2 index %u with pteval 0x%p\n",
+ l2_idx, &pteval);
+ l2_pt[l2_idx] = mmu_info->dummy_page_pteval;
+
+ iova += ISP_PAGE_SIZE;
+ size -= ISP_PAGE_SIZE;
+
+ l2_entries++;
+ }
+
+ WARN_ON_ONCE(!l2_entries);
+ clflush_cache_range(&l2_pt[l2_idx - l2_entries],
+ sizeof(l2_pt[0]) * l2_entries);
+ }
+
+ WARN_ON_ONCE(size);
+ spin_unlock_irqrestore(&mmu_info->lock, flags);
+}
+
+static int l2_map(struct ipu7_mmu_info *mmu_info, unsigned long iova,
+ phys_addr_t paddr, size_t size)
+{
+ struct device *dev = mmu_info->dev;
+ unsigned int l2_entries;
+ u32 *l2_pt, *l2_virt;
+ unsigned int l2_idx;
+ unsigned long flags;
+ size_t mapped = 0;
+ dma_addr_t dma;
+ u32 l1_entry;
+ u32 l1_idx;
+ int err = 0;
+
+ spin_lock_irqsave(&mmu_info->lock, flags);
+
+ paddr = ALIGN(paddr, ISP_PAGE_SIZE);
+ for (l1_idx = iova >> ISP_L1PT_SHIFT;
+ size && l1_idx < ISP_L1PT_PTES; l1_idx++) {
+ dev_dbg(dev,
+ "mapping l2 page table for l1 index %u (iova %8.8x)\n",
+ l1_idx, (u32)iova);
+
+ l1_entry = mmu_info->l1_pt[l1_idx];
+ if (l1_entry == mmu_info->dummy_l2_pteval) {
+ l2_virt = mmu_info->l2_pts[l1_idx];
+ if (likely(!l2_virt)) {
+ l2_virt = alloc_l2_pt(mmu_info);
+ if (!l2_virt) {
+ err = -ENOMEM;
+ goto error;
+ }
+ }
+
+ dma = map_single(mmu_info, l2_virt);
+ if (!dma) {
+ dev_err(dev, "Failed to map l2pt page\n");
+ free_page((unsigned long)l2_virt);
+ err = -EINVAL;
+ goto error;
+ }
+
+ l1_entry = dma >> ISP_PADDR_SHIFT;
+
+ dev_dbg(dev, "page for l1_idx %u %p allocated\n",
+ l1_idx, l2_virt);
+ mmu_info->l1_pt[l1_idx] = l1_entry;
+ mmu_info->l2_pts[l1_idx] = l2_virt;
+
+ clflush_cache_range(&mmu_info->l1_pt[l1_idx],
+ sizeof(mmu_info->l1_pt[l1_idx]));
+ }
+
+ l2_pt = mmu_info->l2_pts[l1_idx];
+ l2_entries = 0;
+
+ for (l2_idx = (iova & ISP_L2PT_MASK) >> ISP_L2PT_SHIFT;
+ size && l2_idx < ISP_L2PT_PTES; l2_idx++) {
+ l2_pt[l2_idx] = paddr >> ISP_PADDR_SHIFT;
+
+ dev_dbg(dev, "l2 index %u mapped as 0x%8.8x\n", l2_idx,
+ l2_pt[l2_idx]);
+
+ iova += ISP_PAGE_SIZE;
+ paddr += ISP_PAGE_SIZE;
+ mapped += ISP_PAGE_SIZE;
+ size -= ISP_PAGE_SIZE;
+
+ l2_entries++;
+ }
+
+ WARN_ON_ONCE(!l2_entries);
+ clflush_cache_range(&l2_pt[l2_idx - l2_entries],
+ sizeof(l2_pt[0]) * l2_entries);
+ }
+
+ spin_unlock_irqrestore(&mmu_info->lock, flags);
+
+ return 0;
+
+error:
+ spin_unlock_irqrestore(&mmu_info->lock, flags);
+ /* unroll mapping in case something went wrong */
+ if (mapped)
+ l2_unmap(mmu_info, iova - mapped, paddr - mapped, mapped);
+
+ return err;
+}
+
+static int __ipu7_mmu_map(struct ipu7_mmu_info *mmu_info, unsigned long iova,
+ phys_addr_t paddr, size_t size)
+{
+ u32 iova_start = round_down(iova, ISP_PAGE_SIZE);
+ u32 iova_end = ALIGN(iova + size, ISP_PAGE_SIZE);
+
+ dev_dbg(mmu_info->dev,
+ "mapping iova 0x%8.8x--0x%8.8x, size %zu at paddr %pap\n",
+ iova_start, iova_end, size, &paddr);
+
+ return l2_map(mmu_info, iova_start, paddr, size);
+}
+
+static void __ipu7_mmu_unmap(struct ipu7_mmu_info *mmu_info,
+ unsigned long iova, size_t size)
+{
+ l2_unmap(mmu_info, iova, 0, size);
+}
+
+static int allocate_trash_buffer(struct ipu7_mmu *mmu)
+{
+ unsigned int n_pages = PFN_UP(IPU_MMUV2_TRASH_RANGE);
+ unsigned long iova_addr;
+ struct iova *iova;
+ unsigned int i;
+ dma_addr_t dma;
+ int ret;
+
+ /* Allocate 8MB in iova range */
+ iova = alloc_iova(&mmu->dmap->iovad, n_pages,
+ PHYS_PFN(mmu->dmap->mmu_info->aperture_end), 0);
+ if (!iova) {
+ dev_err(mmu->dev, "cannot allocate iova range for trash\n");
+ return -ENOMEM;
+ }
+
+ dma = dma_map_page(mmu->dmap->mmu_info->dev, mmu->trash_page, 0,
+ PAGE_SIZE, DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(mmu->dmap->mmu_info->dev, dma)) {
+ dev_err(mmu->dmap->mmu_info->dev, "Failed to map trash page\n");
+ ret = -ENOMEM;
+ goto out_free_iova;
+ }
+
+ mmu->pci_trash_page = dma;
+
+ /*
+ * Map the 8MB iova address range to the same physical trash page
+ * mmu->trash_page which is already reserved at the probe
+ */
+ iova_addr = iova->pfn_lo;
+ for (i = 0; i < n_pages; i++) {
+ ret = ipu7_mmu_map(mmu->dmap->mmu_info, PFN_PHYS(iova_addr),
+ mmu->pci_trash_page, PAGE_SIZE);
+ if (ret) {
+ dev_err(mmu->dev,
+ "mapping trash buffer range failed\n");
+ goto out_unmap;
+ }
+
+ iova_addr++;
+ }
+
+ mmu->iova_trash_page = PFN_PHYS(iova->pfn_lo);
+ dev_dbg(mmu->dev, "iova trash buffer for MMUID: %d is %u\n",
+ mmu->mmid, (unsigned int)mmu->iova_trash_page);
+ return 0;
+
+out_unmap:
+ ipu7_mmu_unmap(mmu->dmap->mmu_info, PFN_PHYS(iova->pfn_lo),
+ PFN_PHYS(iova_size(iova)));
+ dma_unmap_page(mmu->dmap->mmu_info->dev, mmu->pci_trash_page,
+ PAGE_SIZE, DMA_BIDIRECTIONAL);
+out_free_iova:
+ __free_iova(&mmu->dmap->iovad, iova);
+ return ret;
+}
+
+static void __mmu_at_init(struct ipu7_mmu *mmu)
+{
+ struct ipu7_mmu_info *mmu_info;
+ unsigned int i;
+
+ mmu_info = mmu->dmap->mmu_info;
+ for (i = 0; i < mmu->nr_mmus; i++) {
+ struct ipu7_mmu_hw *mmu_hw = &mmu->mmu_hw[i];
+ unsigned int j;
+
+ /* Write page table address per MMU */
+ writel((phys_addr_t)mmu_info->l1_pt_dma,
+ mmu_hw->base + MMU_REG_PAGE_TABLE_BASE_ADDR);
+ dev_dbg(mmu->dev, "mmu %s base was set as %x\n", mmu_hw->name,
+ readl(mmu_hw->base + MMU_REG_PAGE_TABLE_BASE_ADDR));
+
+ /* Set info bits and axi_refill per MMU */
+ writel(mmu_hw->info_bits,
+ mmu_hw->base + MMU_REG_USER_INFO_BITS);
+ writel(mmu_hw->refill, mmu_hw->base + MMU_REG_AXI_REFILL_IF_ID);
+ writel(mmu_hw->collapse_en_bitmap,
+ mmu_hw->base + MMU_REG_COLLAPSE_ENABLE_BITMAP);
+
+ dev_dbg(mmu->dev, "mmu %s info_bits was set as %x\n",
+ mmu_hw->name,
+ readl(mmu_hw->base + MMU_REG_USER_INFO_BITS));
+
+ if (mmu_hw->at_sp_arb_cfg)
+ writel(mmu_hw->at_sp_arb_cfg,
+ mmu_hw->base + MMU_REG_AT_SP_ARB_CFG);
+
+ /* default irq configuration */
+ writel(0x3ff, mmu_hw->base + MMU_REG_IRQ_MASK);
+ writel(0x3ff, mmu_hw->base + MMU_REG_IRQ_ENABLE);
+
+ /* Configure MMU TLB stream configuration for L1/L2 */
+ for (j = 0; j < mmu_hw->nr_l1streams; j++) {
+ writel(mmu_hw->l1_block_sz[j], mmu_hw->base +
+ mmu_hw->l1_block + 4U * j);
+ }
+
+ for (j = 0; j < mmu_hw->nr_l2streams; j++) {
+ writel(mmu_hw->l2_block_sz[j], mmu_hw->base +
+ mmu_hw->l2_block + 4U * j);
+ }
+
+ for (j = 0; j < mmu_hw->uao_p_num; j++) {
+ if (!mmu_hw->uao_p2tlb[j])
+ continue;
+ writel(mmu_hw->uao_p2tlb[j], mmu_hw->uao_base + 4U * j);
+ }
+ }
+}
+
+static void __mmu_zlx_init(struct ipu7_mmu *mmu)
+{
+ unsigned int i;
+
+ dev_dbg(mmu->dev, "mmu zlx init\n");
+
+ for (i = 0; i < mmu->nr_mmus; i++) {
+ struct ipu7_mmu_hw *mmu_hw = &mmu->mmu_hw[i];
+ unsigned int j;
+
+ dev_dbg(mmu->dev, "mmu %s zlx init\n", mmu_hw->name);
+ for (j = 0; j < IPU_ZLX_POOL_NUM; j++) {
+ if (!mmu_hw->zlx_axi_pool[j])
+ continue;
+ writel(mmu_hw->zlx_axi_pool[j],
+ mmu_hw->zlx_base + ZLX_REG_AXI_POOL + j * 0x4U);
+ }
+
+ for (j = 0; j < mmu_hw->zlx_nr; j++) {
+ if (!mmu_hw->zlx_conf[j])
+ continue;
+
+ writel(mmu_hw->zlx_conf[j],
+ mmu_hw->zlx_base + ZLX_REG_CONF + j * 0x8U);
+ }
+
+ for (j = 0; j < mmu_hw->zlx_nr; j++) {
+ if (!mmu_hw->zlx_en[j])
+ continue;
+
+ writel(mmu_hw->zlx_en[j],
+ mmu_hw->zlx_base + ZLX_REG_EN + j * 0x8U);
+ }
+ }
+}
+
+int ipu7_mmu_hw_init(struct ipu7_mmu *mmu)
+{
+ unsigned long flags;
+
+ dev_dbg(mmu->dev, "IPU mmu hardware init\n");
+
+ /* Initialise the each MMU and ZLX */
+ __mmu_at_init(mmu);
+ __mmu_zlx_init(mmu);
+
+ if (!mmu->trash_page) {
+ int ret;
+
+ mmu->trash_page = alloc_page(GFP_KERNEL);
+ if (!mmu->trash_page) {
+ dev_err(mmu->dev, "insufficient memory for trash buffer\n");
+ return -ENOMEM;
+ }
+
+ ret = allocate_trash_buffer(mmu);
+ if (ret) {
+ __free_page(mmu->trash_page);
+ mmu->trash_page = NULL;
+ dev_err(mmu->dev, "trash buffer allocation failed\n");
+ return ret;
+ }
+ }
+
+ spin_lock_irqsave(&mmu->ready_lock, flags);
+ mmu->ready = true;
+ spin_unlock_irqrestore(&mmu->ready_lock, flags);
+
+ return 0;
+}
+EXPORT_SYMBOL_NS_GPL(ipu7_mmu_hw_init, "INTEL_IPU7");
+
+static struct ipu7_mmu_info *ipu7_mmu_alloc(struct ipu7_device *isp)
+{
+ struct ipu7_mmu_info *mmu_info;
+ int ret;
+
+ mmu_info = kzalloc(sizeof(*mmu_info), GFP_KERNEL);
+ if (!mmu_info)
+ return NULL;
+
+ if (isp->secure_mode) {
+ mmu_info->aperture_start = IPU_FW_CODE_REGION_END;
+ mmu_info->aperture_end =
+ (dma_addr_t)DMA_BIT_MASK(IPU_MMU_ADDR_BITS);
+ } else {
+ mmu_info->aperture_start = IPU_FW_CODE_REGION_START;
+ mmu_info->aperture_end =
+ (dma_addr_t)DMA_BIT_MASK(IPU_MMU_ADDR_BITS_NON_SECURE);
+ }
+
+ mmu_info->pgsize_bitmap = SZ_4K;
+ mmu_info->dev = &isp->pdev->dev;
+
+ ret = get_dummy_page(mmu_info);
+ if (ret)
+ goto err_free_info;
+
+ ret = alloc_dummy_l2_pt(mmu_info);
+ if (ret)
+ goto err_free_dummy_page;
+
+ mmu_info->l2_pts = vzalloc(ISP_L2PT_PTES * sizeof(*mmu_info->l2_pts));
+ if (!mmu_info->l2_pts)
+ goto err_free_dummy_l2_pt;
+
+ /*
+ * We always map the L1 page table (a single page as well as
+ * the L2 page tables).
+ */
+ mmu_info->l1_pt = alloc_l1_pt(mmu_info);
+ if (!mmu_info->l1_pt)
+ goto err_free_l2_pts;
+
+ spin_lock_init(&mmu_info->lock);
+
+ dev_dbg(mmu_info->dev, "domain initialised\n");
+
+ return mmu_info;
+
+err_free_l2_pts:
+ vfree(mmu_info->l2_pts);
+err_free_dummy_l2_pt:
+ free_dummy_l2_pt(mmu_info);
+err_free_dummy_page:
+ free_dummy_page(mmu_info);
+err_free_info:
+ kfree(mmu_info);
+
+ return NULL;
+}
+
+void ipu7_mmu_hw_cleanup(struct ipu7_mmu *mmu)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&mmu->ready_lock, flags);
+ mmu->ready = false;
+ spin_unlock_irqrestore(&mmu->ready_lock, flags);
+}
+EXPORT_SYMBOL_NS_GPL(ipu7_mmu_hw_cleanup, "INTEL_IPU7");
+
+static struct ipu7_dma_mapping *alloc_dma_mapping(struct ipu7_device *isp)
+{
+ struct ipu7_dma_mapping *dmap;
+ unsigned long base_pfn;
+
+ dmap = kzalloc(sizeof(*dmap), GFP_KERNEL);
+ if (!dmap)
+ return NULL;
+
+ dmap->mmu_info = ipu7_mmu_alloc(isp);
+ if (!dmap->mmu_info) {
+ kfree(dmap);
+ return NULL;
+ }
+
+ /* 0~64M is forbidden for uctile controller */
+ base_pfn = max_t(unsigned long, 1,
+ PFN_DOWN(dmap->mmu_info->aperture_start));
+ init_iova_domain(&dmap->iovad, SZ_4K, base_pfn);
+ dmap->mmu_info->dmap = dmap;
+
+ dev_dbg(&isp->pdev->dev, "alloc mapping\n");
+
+ iova_cache_get();
+
+ return dmap;
+}
+
+phys_addr_t ipu7_mmu_iova_to_phys(struct ipu7_mmu_info *mmu_info,
+ dma_addr_t iova)
+{
+ phys_addr_t phy_addr;
+ unsigned long flags;
+ u32 *l2_pt;
+
+ spin_lock_irqsave(&mmu_info->lock, flags);
+ l2_pt = mmu_info->l2_pts[iova >> ISP_L1PT_SHIFT];
+ phy_addr = (phys_addr_t)l2_pt[(iova & ISP_L2PT_MASK) >> ISP_L2PT_SHIFT];
+ phy_addr <<= ISP_PAGE_SHIFT;
+ spin_unlock_irqrestore(&mmu_info->lock, flags);
+
+ return phy_addr;
+}
+
+void ipu7_mmu_unmap(struct ipu7_mmu_info *mmu_info, unsigned long iova,
+ size_t size)
+{
+ unsigned int min_pagesz;
+
+ dev_dbg(mmu_info->dev, "unmapping iova 0x%lx size 0x%zx\n", iova, size);
+
+ /* find out the minimum page size supported */
+ min_pagesz = 1U << __ffs(mmu_info->pgsize_bitmap);
+
+ /*
+ * The virtual address and the size of the mapping must be
+ * aligned (at least) to the size of the smallest page supported
+ * by the hardware
+ */
+ if (!IS_ALIGNED(iova | size, min_pagesz)) {
+ dev_err(mmu_info->dev,
+ "unaligned: iova 0x%lx size 0x%zx min_pagesz 0x%x\n",
+ iova, size, min_pagesz);
+ return;
+ }
+
+ __ipu7_mmu_unmap(mmu_info, iova, size);
+}
+
+int ipu7_mmu_map(struct ipu7_mmu_info *mmu_info, unsigned long iova,
+ phys_addr_t paddr, size_t size)
+{
+ unsigned int min_pagesz;
+
+ if (mmu_info->pgsize_bitmap == 0UL)
+ return -ENODEV;
+
+ /* find out the minimum page size supported */
+ min_pagesz = 1U << __ffs(mmu_info->pgsize_bitmap);
+
+ /*
+ * both the virtual address and the physical one, as well as
+ * the size of the mapping, must be aligned (at least) to the
+ * size of the smallest page supported by the hardware
+ */
+ if (!IS_ALIGNED(iova | paddr | size, min_pagesz)) {
+ dev_err(mmu_info->dev,
+ "unaligned: iova %lx pa %pa size %zx min_pagesz %x\n",
+ iova, &paddr, size, min_pagesz);
+ return -EINVAL;
+ }
+
+ dev_dbg(mmu_info->dev, "map: iova 0x%lx pa %pa size 0x%zx\n",
+ iova, &paddr, size);
+
+ return __ipu7_mmu_map(mmu_info, iova, paddr, size);
+}
+
+static void ipu7_mmu_destroy(struct ipu7_mmu *mmu)
+{
+ struct ipu7_dma_mapping *dmap = mmu->dmap;
+ struct ipu7_mmu_info *mmu_info = dmap->mmu_info;
+ struct iova *iova;
+ u32 l1_idx;
+
+ if (mmu->iova_trash_page) {
+ iova = find_iova(&dmap->iovad, PHYS_PFN(mmu->iova_trash_page));
+ if (iova) {
+ /* unmap and free the trash buffer iova */
+ ipu7_mmu_unmap(mmu_info, PFN_PHYS(iova->pfn_lo),
+ PFN_PHYS(iova_size(iova)));
+ __free_iova(&dmap->iovad, iova);
+ } else {
+ dev_err(mmu->dev, "trash buffer iova not found.\n");
+ }
+
+ mmu->iova_trash_page = 0;
+ dma_unmap_page(mmu_info->dev, mmu->pci_trash_page,
+ PAGE_SIZE, DMA_BIDIRECTIONAL);
+ mmu->pci_trash_page = 0;
+ __free_page(mmu->trash_page);
+ }
+
+ for (l1_idx = 0; l1_idx < ISP_L1PT_PTES; l1_idx++) {
+ if (mmu_info->l1_pt[l1_idx] != mmu_info->dummy_l2_pteval) {
+ dma_unmap_single(mmu_info->dev,
+ TBL_PHYS_ADDR(mmu_info->l1_pt[l1_idx]),
+ PAGE_SIZE, DMA_BIDIRECTIONAL);
+ free_page((unsigned long)mmu_info->l2_pts[l1_idx]);
+ }
+ }
+
+ vfree(mmu_info->l2_pts);
+ free_dummy_page(mmu_info);
+ dma_unmap_single(mmu_info->dev, TBL_PHYS_ADDR(mmu_info->l1_pt_dma),
+ PAGE_SIZE, DMA_BIDIRECTIONAL);
+ free_page((unsigned long)mmu_info->dummy_l2_pt);
+ free_page((unsigned long)mmu_info->l1_pt);
+ kfree(mmu_info);
+}
+
+struct ipu7_mmu *ipu7_mmu_init(struct device *dev,
+ void __iomem *base, int mmid,
+ const struct ipu7_hw_variants *hw)
+{
+ struct ipu7_device *isp = pci_get_drvdata(to_pci_dev(dev));
+ struct ipu7_mmu_pdata *pdata;
+ struct ipu7_mmu *mmu;
+ unsigned int i;
+
+ if (hw->nr_mmus > IPU_MMU_MAX_NUM)
+ return ERR_PTR(-EINVAL);
+
+ pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata)
+ return ERR_PTR(-ENOMEM);
+
+ for (i = 0; i < hw->nr_mmus; i++) {
+ struct ipu7_mmu_hw *pdata_mmu = &pdata->mmu_hw[i];
+ const struct ipu7_mmu_hw *src_mmu = &hw->mmu_hw[i];
+
+ if (src_mmu->nr_l1streams > IPU_MMU_MAX_TLB_L1_STREAMS ||
+ src_mmu->nr_l2streams > IPU_MMU_MAX_TLB_L2_STREAMS)
+ return ERR_PTR(-EINVAL);
+
+ *pdata_mmu = *src_mmu;
+ pdata_mmu->base = base + src_mmu->offset;
+ pdata_mmu->zlx_base = base + src_mmu->zlx_offset;
+ pdata_mmu->uao_base = base + src_mmu->uao_offset;
+ }
+
+ mmu = devm_kzalloc(dev, sizeof(*mmu), GFP_KERNEL);
+ if (!mmu)
+ return ERR_PTR(-ENOMEM);
+
+ mmu->mmid = mmid;
+ mmu->mmu_hw = pdata->mmu_hw;
+ mmu->nr_mmus = hw->nr_mmus;
+ mmu->tlb_invalidate = tlb_invalidate;
+ mmu->ready = false;
+ INIT_LIST_HEAD(&mmu->vma_list);
+ spin_lock_init(&mmu->ready_lock);
+
+ mmu->dmap = alloc_dma_mapping(isp);
+ if (!mmu->dmap) {
+ dev_err(dev, "can't alloc dma mapping\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ return mmu;
+}
+
+void ipu7_mmu_cleanup(struct ipu7_mmu *mmu)
+{
+ struct ipu7_dma_mapping *dmap = mmu->dmap;
+
+ ipu7_mmu_destroy(mmu);
+ mmu->dmap = NULL;
+ iova_cache_put();
+ put_iova_domain(&dmap->iovad);
+ kfree(dmap);
+}
diff --git a/drivers/staging/media/ipu7/ipu7-mmu.h b/drivers/staging/media/ipu7/ipu7-mmu.h
new file mode 100644
index 000000000000..d85bb8ffc711
--- /dev/null
+++ b/drivers/staging/media/ipu7/ipu7-mmu.h
@@ -0,0 +1,414 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2013 - 2025 Intel Corporation
+ */
+
+#ifndef IPU7_MMU_H
+#define IPU7_MMU_H
+
+#include <linux/dma-mapping.h>
+#include <linux/list.h>
+#include <linux/spinlock_types.h>
+#include <linux/types.h>
+
+struct device;
+struct page;
+struct ipu7_hw_variants;
+struct ipu7_mmu;
+struct ipu7_mmu_info;
+
+#define ISYS_MMID 0x1
+#define PSYS_MMID 0x0
+
+/* IPU7 for LNL */
+/* IS MMU Cmd RD */
+#define IPU7_IS_MMU_FW_RD_OFFSET 0x274000
+#define IPU7_IS_MMU_FW_RD_STREAM_NUM 3
+#define IPU7_IS_MMU_FW_RD_L1_BLOCKNR_REG 0x54
+#define IPU7_IS_MMU_FW_RD_L2_BLOCKNR_REG 0x60
+
+/* IS MMU Cmd WR */
+#define IPU7_IS_MMU_FW_WR_OFFSET 0x275000
+#define IPU7_IS_MMU_FW_WR_STREAM_NUM 3
+#define IPU7_IS_MMU_FW_WR_L1_BLOCKNR_REG 0x54
+#define IPU7_IS_MMU_FW_WR_L2_BLOCKNR_REG 0x60
+
+/* IS MMU Data WR Snoop */
+#define IPU7_IS_MMU_M0_OFFSET 0x276000
+#define IPU7_IS_MMU_M0_STREAM_NUM 8
+#define IPU7_IS_MMU_M0_L1_BLOCKNR_REG 0x54
+#define IPU7_IS_MMU_M0_L2_BLOCKNR_REG 0x74
+
+/* IS MMU Data WR ISOC */
+#define IPU7_IS_MMU_M1_OFFSET 0x277000
+#define IPU7_IS_MMU_M1_STREAM_NUM 16
+#define IPU7_IS_MMU_M1_L1_BLOCKNR_REG 0x54
+#define IPU7_IS_MMU_M1_L2_BLOCKNR_REG 0x94
+
+/* PS MMU FW RD */
+#define IPU7_PS_MMU_FW_RD_OFFSET 0x148000
+#define IPU7_PS_MMU_FW_RD_STREAM_NUM 20
+#define IPU7_PS_MMU_FW_RD_L1_BLOCKNR_REG 0x54
+#define IPU7_PS_MMU_FW_RD_L2_BLOCKNR_REG 0xa4
+
+/* PS MMU FW WR */
+#define IPU7_PS_MMU_FW_WR_OFFSET 0x149000
+#define IPU7_PS_MMU_FW_WR_STREAM_NUM 10
+#define IPU7_PS_MMU_FW_WR_L1_BLOCKNR_REG 0x54
+#define IPU7_PS_MMU_FW_WR_L2_BLOCKNR_REG 0x7c
+
+/* PS MMU FW Data RD VC0 */
+#define IPU7_PS_MMU_SRT_RD_OFFSET 0x14a000
+#define IPU7_PS_MMU_SRT_RD_STREAM_NUM 40
+#define IPU7_PS_MMU_SRT_RD_L1_BLOCKNR_REG 0x54
+#define IPU7_PS_MMU_SRT_RD_L2_BLOCKNR_REG 0xf4
+
+/* PS MMU FW Data WR VC0 */
+#define IPU7_PS_MMU_SRT_WR_OFFSET 0x14b000
+#define IPU7_PS_MMU_SRT_WR_STREAM_NUM 40
+#define IPU7_PS_MMU_SRT_WR_L1_BLOCKNR_REG 0x54
+#define IPU7_PS_MMU_SRT_WR_L2_BLOCKNR_REG 0xf4
+
+/* IS UAO UC RD */
+#define IPU7_IS_UAO_UC_RD_OFFSET 0x27c000
+#define IPU7_IS_UAO_UC_RD_PLANENUM 4
+
+/* IS UAO UC WR */
+#define IPU7_IS_UAO_UC_WR_OFFSET 0x27d000
+#define IPU7_IS_UAO_UC_WR_PLANENUM 4
+
+/* IS UAO M0 WR */
+#define IPU7_IS_UAO_M0_WR_OFFSET 0x27e000
+#define IPU7_IS_UAO_M0_WR_PLANENUM 8
+
+/* IS UAO M1 WR */
+#define IPU7_IS_UAO_M1_WR_OFFSET 0x27f000
+#define IPU7_IS_UAO_M1_WR_PLANENUM 16
+
+/* PS UAO FW RD */
+#define IPU7_PS_UAO_FW_RD_OFFSET 0x156000
+#define IPU7_PS_UAO_FW_RD_PLANENUM 20
+
+/* PS UAO FW WR */
+#define IPU7_PS_UAO_FW_WR_OFFSET 0x157000
+#define IPU7_PS_UAO_FW_WR_PLANENUM 16
+
+/* PS UAO SRT RD */
+#define IPU7_PS_UAO_SRT_RD_OFFSET 0x154000
+#define IPU7_PS_UAO_SRT_RD_PLANENUM 40
+
+/* PS UAO SRT WR */
+#define IPU7_PS_UAO_SRT_WR_OFFSET 0x155000
+#define IPU7_PS_UAO_SRT_WR_PLANENUM 40
+
+#define IPU7_IS_ZLX_UC_RD_OFFSET 0x278000
+#define IPU7_IS_ZLX_UC_WR_OFFSET 0x279000
+#define IPU7_IS_ZLX_M0_OFFSET 0x27a000
+#define IPU7_IS_ZLX_M1_OFFSET 0x27b000
+#define IPU7_IS_ZLX_UC_RD_NUM 4
+#define IPU7_IS_ZLX_UC_WR_NUM 4
+#define IPU7_IS_ZLX_M0_NUM 8
+#define IPU7_IS_ZLX_M1_NUM 16
+
+#define IPU7_PS_ZLX_DATA_RD_OFFSET 0x14e000
+#define IPU7_PS_ZLX_DATA_WR_OFFSET 0x14f000
+#define IPU7_PS_ZLX_FW_RD_OFFSET 0x150000
+#define IPU7_PS_ZLX_FW_WR_OFFSET 0x151000
+#define IPU7_PS_ZLX_DATA_RD_NUM 32
+#define IPU7_PS_ZLX_DATA_WR_NUM 32
+#define IPU7_PS_ZLX_FW_RD_NUM 16
+#define IPU7_PS_ZLX_FW_WR_NUM 10
+
+/* IPU7P5 for PTL */
+/* IS MMU Cmd RD */
+#define IPU7P5_IS_MMU_FW_RD_OFFSET 0x274000
+#define IPU7P5_IS_MMU_FW_RD_STREAM_NUM 3
+#define IPU7P5_IS_MMU_FW_RD_L1_BLOCKNR_REG 0x54
+#define IPU7P5_IS_MMU_FW_RD_L2_BLOCKNR_REG 0x60
+
+/* IS MMU Cmd WR */
+#define IPU7P5_IS_MMU_FW_WR_OFFSET 0x275000
+#define IPU7P5_IS_MMU_FW_WR_STREAM_NUM 3
+#define IPU7P5_IS_MMU_FW_WR_L1_BLOCKNR_REG 0x54
+#define IPU7P5_IS_MMU_FW_WR_L2_BLOCKNR_REG 0x60
+
+/* IS MMU Data WR Snoop */
+#define IPU7P5_IS_MMU_M0_OFFSET 0x276000
+#define IPU7P5_IS_MMU_M0_STREAM_NUM 16
+#define IPU7P5_IS_MMU_M0_L1_BLOCKNR_REG 0x54
+#define IPU7P5_IS_MMU_M0_L2_BLOCKNR_REG 0x94
+
+/* IS MMU Data WR ISOC */
+#define IPU7P5_IS_MMU_M1_OFFSET 0x277000
+#define IPU7P5_IS_MMU_M1_STREAM_NUM 16
+#define IPU7P5_IS_MMU_M1_L1_BLOCKNR_REG 0x54
+#define IPU7P5_IS_MMU_M1_L2_BLOCKNR_REG 0x94
+
+/* PS MMU FW RD */
+#define IPU7P5_PS_MMU_FW_RD_OFFSET 0x148000
+#define IPU7P5_PS_MMU_FW_RD_STREAM_NUM 16
+#define IPU7P5_PS_MMU_FW_RD_L1_BLOCKNR_REG 0x54
+#define IPU7P5_PS_MMU_FW_RD_L2_BLOCKNR_REG 0x94
+
+/* PS MMU FW WR */
+#define IPU7P5_PS_MMU_FW_WR_OFFSET 0x149000
+#define IPU7P5_PS_MMU_FW_WR_STREAM_NUM 10
+#define IPU7P5_PS_MMU_FW_WR_L1_BLOCKNR_REG 0x54
+#define IPU7P5_PS_MMU_FW_WR_L2_BLOCKNR_REG 0x7c
+
+/* PS MMU FW Data RD VC0 */
+#define IPU7P5_PS_MMU_SRT_RD_OFFSET 0x14a000
+#define IPU7P5_PS_MMU_SRT_RD_STREAM_NUM 22
+#define IPU7P5_PS_MMU_SRT_RD_L1_BLOCKNR_REG 0x54
+#define IPU7P5_PS_MMU_SRT_RD_L2_BLOCKNR_REG 0xac
+
+/* PS MMU FW Data WR VC0 */
+#define IPU7P5_PS_MMU_SRT_WR_OFFSET 0x14b000
+#define IPU7P5_PS_MMU_SRT_WR_STREAM_NUM 32
+#define IPU7P5_PS_MMU_SRT_WR_L1_BLOCKNR_REG 0x54
+#define IPU7P5_PS_MMU_SRT_WR_L2_BLOCKNR_REG 0xd4
+
+/* IS UAO UC RD */
+#define IPU7P5_IS_UAO_UC_RD_OFFSET 0x27c000
+#define IPU7P5_IS_UAO_UC_RD_PLANENUM 4
+
+/* IS UAO UC WR */
+#define IPU7P5_IS_UAO_UC_WR_OFFSET 0x27d000
+#define IPU7P5_IS_UAO_UC_WR_PLANENUM 4
+
+/* IS UAO M0 WR */
+#define IPU7P5_IS_UAO_M0_WR_OFFSET 0x27e000
+#define IPU7P5_IS_UAO_M0_WR_PLANENUM 16
+
+/* IS UAO M1 WR */
+#define IPU7P5_IS_UAO_M1_WR_OFFSET 0x27f000
+#define IPU7P5_IS_UAO_M1_WR_PLANENUM 16
+
+/* PS UAO FW RD */
+#define IPU7P5_PS_UAO_FW_RD_OFFSET 0x156000
+#define IPU7P5_PS_UAO_FW_RD_PLANENUM 16
+
+/* PS UAO FW WR */
+#define IPU7P5_PS_UAO_FW_WR_OFFSET 0x157000
+#define IPU7P5_PS_UAO_FW_WR_PLANENUM 10
+
+/* PS UAO SRT RD */
+#define IPU7P5_PS_UAO_SRT_RD_OFFSET 0x154000
+#define IPU7P5_PS_UAO_SRT_RD_PLANENUM 22
+
+/* PS UAO SRT WR */
+#define IPU7P5_PS_UAO_SRT_WR_OFFSET 0x155000
+#define IPU7P5_PS_UAO_SRT_WR_PLANENUM 32
+
+#define IPU7P5_IS_ZLX_UC_RD_OFFSET 0x278000
+#define IPU7P5_IS_ZLX_UC_WR_OFFSET 0x279000
+#define IPU7P5_IS_ZLX_M0_OFFSET 0x27a000
+#define IPU7P5_IS_ZLX_M1_OFFSET 0x27b000
+#define IPU7P5_IS_ZLX_UC_RD_NUM 4
+#define IPU7P5_IS_ZLX_UC_WR_NUM 4
+#define IPU7P5_IS_ZLX_M0_NUM 16
+#define IPU7P5_IS_ZLX_M1_NUM 16
+
+#define IPU7P5_PS_ZLX_DATA_RD_OFFSET 0x14e000
+#define IPU7P5_PS_ZLX_DATA_WR_OFFSET 0x14f000
+#define IPU7P5_PS_ZLX_FW_RD_OFFSET 0x150000
+#define IPU7P5_PS_ZLX_FW_WR_OFFSET 0x151000
+#define IPU7P5_PS_ZLX_DATA_RD_NUM 22
+#define IPU7P5_PS_ZLX_DATA_WR_NUM 32
+#define IPU7P5_PS_ZLX_FW_RD_NUM 16
+#define IPU7P5_PS_ZLX_FW_WR_NUM 10
+
+/* IS MMU Cmd RD */
+#define IPU8_IS_MMU_FW_RD_OFFSET 0x270000
+#define IPU8_IS_MMU_FW_RD_STREAM_NUM 3
+#define IPU8_IS_MMU_FW_RD_L1_BLOCKNR_REG 0x54
+#define IPU8_IS_MMU_FW_RD_L2_BLOCKNR_REG 0x60
+
+/* IS MMU Cmd WR */
+#define IPU8_IS_MMU_FW_WR_OFFSET 0x271000
+#define IPU8_IS_MMU_FW_WR_STREAM_NUM 3
+#define IPU8_IS_MMU_FW_WR_L1_BLOCKNR_REG 0x54
+#define IPU8_IS_MMU_FW_WR_L2_BLOCKNR_REG 0x60
+
+/* IS MMU Data WR Snoop */
+#define IPU8_IS_MMU_M0_OFFSET 0x272000
+#define IPU8_IS_MMU_M0_STREAM_NUM 16
+#define IPU8_IS_MMU_M0_L1_BLOCKNR_REG 0x54
+#define IPU8_IS_MMU_M0_L2_BLOCKNR_REG 0x94
+
+/* IS MMU Data WR ISOC */
+#define IPU8_IS_MMU_M1_OFFSET 0x273000
+#define IPU8_IS_MMU_M1_STREAM_NUM 16
+#define IPU8_IS_MMU_M1_L1_BLOCKNR_REG 0x54
+#define IPU8_IS_MMU_M1_L2_BLOCKNR_REG 0x94
+
+/* IS MMU UPIPE ISOC */
+#define IPU8_IS_MMU_UPIPE_OFFSET 0x274000
+#define IPU8_IS_MMU_UPIPE_STREAM_NUM 6
+#define IPU8_IS_MMU_UPIPE_L1_BLOCKNR_REG 0x54
+#define IPU8_IS_MMU_UPIPE_L2_BLOCKNR_REG 0x6c
+
+/* PS MMU FW RD */
+#define IPU8_PS_MMU_FW_RD_OFFSET 0x148000
+#define IPU8_PS_MMU_FW_RD_STREAM_NUM 12
+#define IPU8_PS_MMU_FW_RD_L1_BLOCKNR_REG 0x54
+#define IPU8_PS_MMU_FW_RD_L2_BLOCKNR_REG 0x84
+
+/* PS MMU FW WR */
+#define IPU8_PS_MMU_FW_WR_OFFSET 0x149000
+#define IPU8_PS_MMU_FW_WR_STREAM_NUM 8
+#define IPU8_PS_MMU_FW_WR_L1_BLOCKNR_REG 0x54
+#define IPU8_PS_MMU_FW_WR_L2_BLOCKNR_REG 0x74
+
+/* PS MMU FW Data RD VC0 */
+#define IPU8_PS_MMU_SRT_RD_OFFSET 0x14a000
+#define IPU8_PS_MMU_SRT_RD_STREAM_NUM 26
+#define IPU8_PS_MMU_SRT_RD_L1_BLOCKNR_REG 0x54
+#define IPU8_PS_MMU_SRT_RD_L2_BLOCKNR_REG 0xbc
+
+/* PS MMU FW Data WR VC0 */
+#define IPU8_PS_MMU_SRT_WR_OFFSET 0x14b000
+#define IPU8_PS_MMU_SRT_WR_STREAM_NUM 26
+#define IPU8_PS_MMU_SRT_WR_L1_BLOCKNR_REG 0x54
+#define IPU8_PS_MMU_SRT_WR_L2_BLOCKNR_REG 0xbc
+
+/* IS UAO UC RD */
+#define IPU8_IS_UAO_UC_RD_OFFSET 0x27a000
+#define IPU8_IS_UAO_UC_RD_PLANENUM 4
+
+/* IS UAO UC WR */
+#define IPU8_IS_UAO_UC_WR_OFFSET 0x27b000
+#define IPU8_IS_UAO_UC_WR_PLANENUM 4
+
+/* IS UAO M0 WR */
+#define IPU8_IS_UAO_M0_WR_OFFSET 0x27c000
+#define IPU8_IS_UAO_M0_WR_PLANENUM 16
+
+/* IS UAO M1 WR */
+#define IPU8_IS_UAO_M1_WR_OFFSET 0x27d000
+#define IPU8_IS_UAO_M1_WR_PLANENUM 16
+
+/* IS UAO UPIPE */
+#define IPU8_IS_UAO_UPIPE_OFFSET 0x27e000
+#define IPU8_IS_UAO_UPIPE_PLANENUM 6
+
+/* PS UAO FW RD */
+#define IPU8_PS_UAO_FW_RD_OFFSET 0x156000
+#define IPU8_PS_UAO_FW_RD_PLANENUM 12
+
+/* PS UAO FW WR */
+#define IPU8_PS_UAO_FW_WR_OFFSET 0x157000
+#define IPU8_PS_UAO_FW_WR_PLANENUM 8
+
+/* PS UAO SRT RD */
+#define IPU8_PS_UAO_SRT_RD_OFFSET 0x154000
+#define IPU8_PS_UAO_SRT_RD_PLANENUM 26
+
+/* PS UAO SRT WR */
+#define IPU8_PS_UAO_SRT_WR_OFFSET 0x155000
+#define IPU8_PS_UAO_SRT_WR_PLANENUM 26
+
+#define IPU8_IS_ZLX_UC_RD_OFFSET 0x275000
+#define IPU8_IS_ZLX_UC_WR_OFFSET 0x276000
+#define IPU8_IS_ZLX_M0_OFFSET 0x277000
+#define IPU8_IS_ZLX_M1_OFFSET 0x278000
+#define IPU8_IS_ZLX_UPIPE_OFFSET 0x279000
+#define IPU8_IS_ZLX_UC_RD_NUM 4
+#define IPU8_IS_ZLX_UC_WR_NUM 4
+#define IPU8_IS_ZLX_M0_NUM 16
+#define IPU8_IS_ZLX_M1_NUM 16
+#define IPU8_IS_ZLX_UPIPE_NUM 6
+
+#define IPU8_PS_ZLX_DATA_RD_OFFSET 0x14e000
+#define IPU8_PS_ZLX_DATA_WR_OFFSET 0x14f000
+#define IPU8_PS_ZLX_FW_RD_OFFSET 0x150000
+#define IPU8_PS_ZLX_FW_WR_OFFSET 0x151000
+#define IPU8_PS_ZLX_DATA_RD_NUM 26
+#define IPU8_PS_ZLX_DATA_WR_NUM 26
+#define IPU8_PS_ZLX_FW_RD_NUM 12
+#define IPU8_PS_ZLX_FW_WR_NUM 8
+
+#define MMU_REG_INVALIDATE_0 0x00
+#define MMU_REG_INVALIDATE_1 0x04
+#define MMU_REG_PAGE_TABLE_BASE_ADDR 0x08
+#define MMU_REG_USER_INFO_BITS 0x0c
+#define MMU_REG_AXI_REFILL_IF_ID 0x10
+#define MMU_REG_PW_EN_BITMAP 0x14
+#define MMU_REG_COLLAPSE_ENABLE_BITMAP 0x18
+#define MMU_REG_GENERAL_REG 0x1c
+#define MMU_REG_AT_SP_ARB_CFG 0x20
+#define MMU_REG_INVALIDATION_STATUS 0x24
+#define MMU_REG_IRQ_LEVEL_NO_PULSE 0x28
+#define MMU_REG_IRQ_MASK 0x2c
+#define MMU_REG_IRQ_ENABLE 0x30
+#define MMU_REG_IRQ_EDGE 0x34
+#define MMU_REG_IRQ_CLEAR 0x38
+#define MMU_REG_IRQ_CAUSE 0x3c
+#define MMU_REG_CG_CTRL_BITS 0x40
+#define MMU_REG_RD_FIFOS_STATUS 0x44
+#define MMU_REG_WR_FIFOS_STATUS 0x48
+#define MMU_REG_COMMON_FIFOS_STATUS 0x4c
+#define MMU_REG_FSM_STATUS 0x50
+
+#define ZLX_REG_AXI_POOL 0x0
+#define ZLX_REG_EN 0x20
+#define ZLX_REG_CONF 0x24
+#define ZLX_REG_CG_CTRL 0x900
+#define ZLX_REG_FORCE_BYPASS 0x904
+
+struct ipu7_mmu_info {
+ struct device *dev;
+
+ u32 *l1_pt;
+ u32 l1_pt_dma;
+ u32 **l2_pts;
+
+ u32 *dummy_l2_pt;
+ u32 dummy_l2_pteval;
+ void *dummy_page;
+ u32 dummy_page_pteval;
+
+ dma_addr_t aperture_start;
+ dma_addr_t aperture_end;
+ unsigned long pgsize_bitmap;
+
+ spinlock_t lock; /* Serialize access to users */
+ struct ipu7_dma_mapping *dmap;
+};
+
+struct ipu7_mmu {
+ struct list_head node;
+
+ struct ipu7_mmu_hw *mmu_hw;
+ unsigned int nr_mmus;
+ unsigned int mmid;
+
+ phys_addr_t pgtbl;
+ struct device *dev;
+
+ struct ipu7_dma_mapping *dmap;
+ struct list_head vma_list;
+
+ struct page *trash_page;
+ dma_addr_t pci_trash_page; /* IOVA from PCI DMA services (parent) */
+ dma_addr_t iova_trash_page; /* IOVA for IPU child nodes to use */
+
+ bool ready;
+ spinlock_t ready_lock; /* Serialize access to bool ready */
+
+ void (*tlb_invalidate)(struct ipu7_mmu *mmu);
+};
+
+struct ipu7_mmu *ipu7_mmu_init(struct device *dev,
+ void __iomem *base, int mmid,
+ const struct ipu7_hw_variants *hw);
+void ipu7_mmu_cleanup(struct ipu7_mmu *mmu);
+int ipu7_mmu_hw_init(struct ipu7_mmu *mmu);
+void ipu7_mmu_hw_cleanup(struct ipu7_mmu *mmu);
+int ipu7_mmu_map(struct ipu7_mmu_info *mmu_info, unsigned long iova,
+ phys_addr_t paddr, size_t size);
+void ipu7_mmu_unmap(struct ipu7_mmu_info *mmu_info, unsigned long iova,
+ size_t size);
+phys_addr_t ipu7_mmu_iova_to_phys(struct ipu7_mmu_info *mmu_info,
+ dma_addr_t iova);
+#endif
diff --git a/drivers/staging/media/ipu7/ipu7-platform-regs.h b/drivers/staging/media/ipu7/ipu7-platform-regs.h
new file mode 100644
index 000000000000..eeadc886a8cf
--- /dev/null
+++ b/drivers/staging/media/ipu7/ipu7-platform-regs.h
@@ -0,0 +1,82 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2018 - 2025 Intel Corporation
+ */
+
+#ifndef IPU7_PLATFORM_REGS_H
+#define IPU7_PLATFORM_REGS_H
+
+#define IS_BASE 0x230000
+#define IS_UC_CTRL_BASE (IS_BASE + 0x0)
+
+#define PS_BASE 0x130000
+#define PS_UC_CTRL_BASE (PS_BASE + 0x0)
+
+/*
+ * bit 0: IRQ from FW,
+ * bit 1, 2 and 3: IRQ from HW
+ */
+#define TO_SW_IRQ_MASK 0xf
+#define TO_SW_IRQ_FW BIT(0)
+
+#define FW_CODE_BASE 0x0
+#define FW_DATA_BASE 0x4
+#define PRINTF_EN_THROUGH_TRACE 0x3004
+#define PRINTF_EN_DIRECTLY_TO_DDR 0x3008
+#define PRINTF_DDR_BASE_ADDR 0x300c
+#define PRINTF_DDR_SIZE 0x3010
+#define PRINTF_DDR_NEXT_ADDR 0x3014
+#define PRINTF_STATUS 0x3018
+#define PRINTF_AXI_CNTL 0x301c
+#define PRINTF_MSG_LENGTH 0x3020
+#define TO_SW_IRQ_CNTL_EDGE 0x4000
+#define TO_SW_IRQ_CNTL_MASK_N 0x4004
+#define TO_SW_IRQ_CNTL_STATUS 0x4008
+#define TO_SW_IRQ_CNTL_CLEAR 0x400c
+#define TO_SW_IRQ_CNTL_ENABLE 0x4010
+#define TO_SW_IRQ_CNTL_LEVEL_NOT_PULSE 0x4014
+#define ERR_IRQ_CNTL_EDGE 0x4018
+#define ERR_IRQ_CNTL_MASK_N 0x401c
+#define ERR_IRQ_CNTL_STATUS 0x4020
+#define ERR_IRQ_CNTL_CLEAR 0x4024
+#define ERR_IRQ_CNTL_ENABLE 0x4028
+#define ERR_IRQ_CNTL_LEVEL_NOT_PULSE 0x402c
+#define LOCAL_DMEM_BASE_ADDR 0x1300000
+
+/*
+ * IS_UC_TO_SW irqs
+ * bit 0: IRQ from local FW
+ * bit 1~3: IRQ from HW
+ */
+#define IS_UC_TO_SW_IRQ_MASK 0xf
+
+#define IPU_ISYS_SPC_OFFSET 0x210000
+#define IPU7_PSYS_SPC_OFFSET 0x118000
+#define IPU_ISYS_DMEM_OFFSET 0x200000
+#define IPU_PSYS_DMEM_OFFSET 0x100000
+
+#define IPU7_ISYS_CSI_PORT_NUM 4
+
+/* IRQ-related registers in PSYS */
+#define IPU_REG_PSYS_TO_SW_IRQ_CNTL_EDGE 0x134000
+#define IPU_REG_PSYS_TO_SW_IRQ_CNTL_MASK 0x134004
+#define IPU_REG_PSYS_TO_SW_IRQ_CNTL_STATUS 0x134008
+#define IPU_REG_PSYS_TO_SW_IRQ_CNTL_CLEAR 0x13400c
+#define IPU_REG_PSYS_TO_SW_IRQ_CNTL_ENABLE 0x134010
+#define IPU_REG_PSYS_TO_SW_IRQ_CNTL_LEVEL_NOT_PULSE 0x134014
+#define IRQ_FROM_LOCAL_FW BIT(0)
+
+/*
+ * psys subdomains power request regs
+ */
+enum ipu7_device_buttress_psys_domain_pos {
+ IPU_PSYS_SUBDOMAIN_LB = 0,
+ IPU_PSYS_SUBDOMAIN_BB = 1,
+};
+
+#define IPU7_PSYS_DOMAIN_POWER_MASK (BIT(IPU_PSYS_SUBDOMAIN_LB) | \
+ BIT(IPU_PSYS_SUBDOMAIN_BB))
+#define IPU8_PSYS_DOMAIN_POWER_MASK BIT(IPU_PSYS_SUBDOMAIN_LB)
+#define IPU_PSYS_DOMAIN_POWER_IN_PROGRESS BIT(31)
+
+#endif /* IPU7_PLATFORM_REGS_H */
diff --git a/drivers/staging/media/ipu7/ipu7-syscom.c b/drivers/staging/media/ipu7/ipu7-syscom.c
new file mode 100644
index 000000000000..3f9f9c5c3cce
--- /dev/null
+++ b/drivers/staging/media/ipu7/ipu7-syscom.c
@@ -0,0 +1,78 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2013 - 2025 Intel Corporation
+ */
+
+#include <linux/export.h>
+#include <linux/io.h>
+
+#include "abi/ipu7_fw_syscom_abi.h"
+
+#include "ipu7.h"
+#include "ipu7-syscom.h"
+
+static void __iomem *ipu7_syscom_get_indices(struct ipu7_syscom_context *ctx,
+ u32 q)
+{
+ return ctx->queue_indices + (q * sizeof(struct syscom_queue_indices_s));
+}
+
+void *ipu7_syscom_get_token(struct ipu7_syscom_context *ctx, int q)
+{
+ struct syscom_queue_config *queue_params = &ctx->queue_configs[q];
+ void __iomem *queue_indices = ipu7_syscom_get_indices(ctx, q);
+ u32 write_index = readl(queue_indices +
+ offsetof(struct syscom_queue_indices_s,
+ write_index));
+ u32 read_index = readl(queue_indices +
+ offsetof(struct syscom_queue_indices_s,
+ read_index));
+ void *token = NULL;
+
+ if (q < ctx->num_output_queues) {
+ /* Output queue */
+ bool empty = (write_index == read_index);
+
+ if (!empty)
+ token = queue_params->token_array_mem +
+ read_index *
+ queue_params->token_size_in_bytes;
+ } else {
+ /* Input queue */
+ bool full = (read_index == ((write_index + 1U) %
+ (u32)queue_params->max_capacity));
+
+ if (!full)
+ token = queue_params->token_array_mem +
+ write_index * queue_params->token_size_in_bytes;
+ }
+ return token;
+}
+EXPORT_SYMBOL_NS_GPL(ipu7_syscom_get_token, "INTEL_IPU7");
+
+void ipu7_syscom_put_token(struct ipu7_syscom_context *ctx, int q)
+{
+ struct syscom_queue_config *queue_params = &ctx->queue_configs[q];
+ void __iomem *queue_indices = ipu7_syscom_get_indices(ctx, q);
+ u32 offset, index;
+
+ if (q < ctx->num_output_queues)
+ /* Output queue */
+ offset = offsetof(struct syscom_queue_indices_s, read_index);
+
+ else
+ /* Input queue */
+ offset = offsetof(struct syscom_queue_indices_s, write_index);
+
+ index = readl(queue_indices + offset);
+ writel((index + 1U) % queue_params->max_capacity,
+ queue_indices + offset);
+}
+EXPORT_SYMBOL_NS_GPL(ipu7_syscom_put_token, "INTEL_IPU7");
+
+struct syscom_queue_params_config *
+ipu7_syscom_get_queue_config(struct syscom_config_s *config)
+{
+ return (struct syscom_queue_params_config *)(&config[1]);
+}
+EXPORT_SYMBOL_NS_GPL(ipu7_syscom_get_queue_config, "INTEL_IPU7");
diff --git a/drivers/staging/media/ipu7/ipu7-syscom.h b/drivers/staging/media/ipu7/ipu7-syscom.h
new file mode 100644
index 000000000000..e1fbe3b7914e
--- /dev/null
+++ b/drivers/staging/media/ipu7/ipu7-syscom.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2013 - 2025 Intel Corporation
+ */
+
+#ifndef IPU7_SYSCOM_H
+#define IPU7_SYSCOM_H
+
+#include <linux/types.h>
+
+struct syscom_config_s;
+struct syscom_queue_params_config;
+
+struct syscom_queue_config {
+ void *token_array_mem;
+ u32 queue_size;
+ u16 token_size_in_bytes;
+ u16 max_capacity;
+};
+
+struct ipu7_syscom_context {
+ u16 num_input_queues;
+ u16 num_output_queues;
+ struct syscom_queue_config *queue_configs;
+ void __iomem *queue_indices;
+ dma_addr_t queue_mem_dma_addr;
+ void *queue_mem;
+ u32 queue_mem_size;
+};
+
+void ipu7_syscom_put_token(struct ipu7_syscom_context *ctx, int q);
+void *ipu7_syscom_get_token(struct ipu7_syscom_context *ctx, int q);
+struct syscom_queue_params_config *
+ipu7_syscom_get_queue_config(struct syscom_config_s *config);
+#endif
diff --git a/drivers/staging/media/ipu7/ipu7.c b/drivers/staging/media/ipu7/ipu7.c
new file mode 100644
index 000000000000..5cddc09c72bf
--- /dev/null
+++ b/drivers/staging/media/ipu7/ipu7.c
@@ -0,0 +1,2778 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2013 - 2025 Intel Corporation
+ */
+
+#include <linux/acpi.h>
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/bug.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/firmware.h>
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/pm_runtime.h>
+#include <linux/property.h>
+#include <linux/scatterlist.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/vmalloc.h>
+
+#include <media/ipu-bridge.h>
+
+#include "abi/ipu7_fw_common_abi.h"
+
+#include "ipu7.h"
+#include "ipu7-bus.h"
+#include "ipu7-buttress.h"
+#include "ipu7-buttress-regs.h"
+#include "ipu7-cpd.h"
+#include "ipu7-dma.h"
+#include "ipu7-isys-csi2-regs.h"
+#include "ipu7-mmu.h"
+#include "ipu7-platform-regs.h"
+
+#define IPU_PCI_BAR 0
+#define IPU_PCI_PBBAR 4
+
+static const unsigned int ipu7_csi_offsets[] = {
+ IPU_CSI_PORT_A_ADDR_OFFSET,
+ IPU_CSI_PORT_B_ADDR_OFFSET,
+ IPU_CSI_PORT_C_ADDR_OFFSET,
+ IPU_CSI_PORT_D_ADDR_OFFSET,
+};
+
+static struct ipu_isys_internal_pdata ipu7p5_isys_ipdata = {
+ .csi2 = {
+ .gpreg = IS_IO_CSI2_GPREGS_BASE,
+ },
+ .hw_variant = {
+ .offset = IPU_UNIFIED_OFFSET,
+ .nr_mmus = IPU7P5_IS_MMU_NUM,
+ .mmu_hw = {
+ {
+ .name = "IS_FW_RD",
+ .offset = IPU7P5_IS_MMU_FW_RD_OFFSET,
+ .zlx_offset = IPU7P5_IS_ZLX_UC_RD_OFFSET,
+ .uao_offset = IPU7P5_IS_UAO_UC_RD_OFFSET,
+ .info_bits = 0x20005101,
+ .refill = 0x00002726,
+ .collapse_en_bitmap = 0x1,
+ .at_sp_arb_cfg = 0x1,
+ .l1_block = IPU7P5_IS_MMU_FW_RD_L1_BLOCKNR_REG,
+ .l2_block = IPU7P5_IS_MMU_FW_RD_L2_BLOCKNR_REG,
+ .nr_l1streams = IPU7P5_IS_MMU_FW_RD_STREAM_NUM,
+ .nr_l2streams = IPU7P5_IS_MMU_FW_RD_STREAM_NUM,
+ .l1_block_sz = {
+ 0x0, 0x8, 0xa,
+ },
+ .l2_block_sz = {
+ 0x0, 0x2, 0x4,
+ },
+ .zlx_nr = IPU7P5_IS_ZLX_UC_RD_NUM,
+ .zlx_axi_pool = {
+ 0x00000f30,
+ },
+ .zlx_en = {
+ 0, 1, 0, 0
+ },
+ .zlx_conf = {
+ 0x0,
+ },
+ .uao_p_num = IPU7P5_IS_UAO_UC_RD_PLANENUM,
+ .uao_p2tlb = {
+ 0x00000049,
+ 0x0000004c,
+ 0x0000004d,
+ 0x00000000,
+ },
+ },
+ {
+ .name = "IS_FW_WR",
+ .offset = IPU7P5_IS_MMU_FW_WR_OFFSET,
+ .zlx_offset = IPU7P5_IS_ZLX_UC_WR_OFFSET,
+ .uao_offset = IPU7P5_IS_UAO_UC_WR_OFFSET,
+ .info_bits = 0x20005001,
+ .refill = 0x00002524,
+ .collapse_en_bitmap = 0x1,
+ .at_sp_arb_cfg = 0x1,
+ .l1_block = IPU7P5_IS_MMU_FW_WR_L1_BLOCKNR_REG,
+ .l2_block = IPU7P5_IS_MMU_FW_WR_L2_BLOCKNR_REG,
+ .nr_l1streams = IPU7P5_IS_MMU_FW_WR_STREAM_NUM,
+ .nr_l2streams = IPU7P5_IS_MMU_FW_WR_STREAM_NUM,
+ .l1_block_sz = {
+ 0x0, 0x8, 0xa,
+ },
+ .l2_block_sz = {
+ 0x0, 0x2, 0x4,
+ },
+ .zlx_nr = IPU7P5_IS_ZLX_UC_WR_NUM,
+ .zlx_axi_pool = {
+ 0x00000f20,
+ },
+ .zlx_en = {
+ 0, 1, 1, 0,
+ },
+ .zlx_conf = {
+ 0x0,
+ 0x00010101,
+ 0x00010101,
+ 0x0,
+ },
+ .uao_p_num = IPU7P5_IS_UAO_UC_WR_PLANENUM,
+ .uao_p2tlb = {
+ 0x00000049,
+ 0x0000004a,
+ 0x0000004b,
+ 0x00000000,
+ },
+ },
+ {
+ .name = "IS_DATA_WR_ISOC",
+ .offset = IPU7P5_IS_MMU_M0_OFFSET,
+ .zlx_offset = IPU7P5_IS_ZLX_M0_OFFSET,
+ .uao_offset = IPU7P5_IS_UAO_M0_WR_OFFSET,
+ .info_bits = 0x20004e01,
+ .refill = 0x00002120,
+ .collapse_en_bitmap = 0x1,
+ .at_sp_arb_cfg = 0x1,
+ .l1_block = IPU7P5_IS_MMU_M0_L1_BLOCKNR_REG,
+ .l2_block = IPU7P5_IS_MMU_M0_L2_BLOCKNR_REG,
+ .nr_l1streams = IPU7P5_IS_MMU_M0_STREAM_NUM,
+ .nr_l2streams = IPU7P5_IS_MMU_M0_STREAM_NUM,
+ .l1_block_sz = {
+ 0x00000000,
+ 0x00000002,
+ 0x00000004,
+ 0x00000006,
+ 0x00000008,
+ 0x0000000a,
+ 0x0000000c,
+ 0x0000000e,
+ 0x00000010,
+ 0x00000012,
+ 0x00000014,
+ 0x00000016,
+ 0x00000018,
+ 0x0000001a,
+ 0x0000001c,
+ 0x0000001e,
+ },
+ .l2_block_sz = {
+ 0x00000000,
+ 0x00000002,
+ 0x00000004,
+ 0x00000006,
+ 0x00000008,
+ 0x0000000a,
+ 0x0000000c,
+ 0x0000000e,
+ 0x00000010,
+ 0x00000012,
+ 0x00000014,
+ 0x00000016,
+ 0x00000018,
+ 0x0000001a,
+ 0x0000001c,
+ 0x0000001e,
+ },
+ .zlx_nr = IPU7P5_IS_ZLX_M0_NUM,
+ .zlx_axi_pool = {
+ 0x00000f10,
+ },
+ .zlx_en = {
+ 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1,
+ },
+ .zlx_conf = {
+ 0x00010103,
+ 0x00010103,
+ 0x00010103,
+ 0x00010103,
+ 0x00010103,
+ 0x00010103,
+ 0x00010103,
+ 0x00010103,
+ 0x00010103,
+ 0x00010103,
+ 0x00010103,
+ 0x00010103,
+ 0x00010103,
+ 0x00010103,
+ 0x00010103,
+ 0x00010103,
+ },
+ .uao_p_num = IPU7P5_IS_UAO_M0_WR_PLANENUM,
+ .uao_p2tlb = {
+ 0x00000041,
+ 0x00000042,
+ 0x00000043,
+ 0x00000044,
+ 0x00000041,
+ 0x00000042,
+ 0x00000043,
+ 0x00000044,
+ 0x00000041,
+ 0x00000042,
+ 0x00000043,
+ 0x00000044,
+ 0x00000041,
+ 0x00000042,
+ 0x00000043,
+ 0x00000044,
+ },
+ },
+ {
+ .name = "IS_DATA_WR_SNOOP",
+ .offset = IPU7P5_IS_MMU_M1_OFFSET,
+ .zlx_offset = IPU7P5_IS_ZLX_M1_OFFSET,
+ .uao_offset = IPU7P5_IS_UAO_M1_WR_OFFSET,
+ .info_bits = 0x20004f01,
+ .refill = 0x00002322,
+ .collapse_en_bitmap = 0x1,
+ .at_sp_arb_cfg = 0x1,
+ .l1_block = IPU7P5_IS_MMU_M1_L1_BLOCKNR_REG,
+ .l2_block = IPU7P5_IS_MMU_M1_L2_BLOCKNR_REG,
+ .nr_l1streams = IPU7P5_IS_MMU_M1_STREAM_NUM,
+ .nr_l2streams = IPU7P5_IS_MMU_M1_STREAM_NUM,
+ .l1_block_sz = {
+ 0x00000000,
+ 0x00000002,
+ 0x00000004,
+ 0x00000006,
+ 0x00000008,
+ 0x0000000a,
+ 0x0000000c,
+ 0x0000000e,
+ 0x00000010,
+ 0x00000012,
+ 0x00000014,
+ 0x00000016,
+ 0x00000018,
+ 0x0000001a,
+ 0x0000001c,
+ 0x0000001e,
+ },
+ .l2_block_sz = {
+ 0x00000000,
+ 0x00000002,
+ 0x00000004,
+ 0x00000006,
+ 0x00000008,
+ 0x0000000a,
+ 0x0000000c,
+ 0x0000000e,
+ 0x00000010,
+ 0x00000012,
+ 0x00000014,
+ 0x00000016,
+ 0x00000018,
+ 0x0000001a,
+ 0x0000001c,
+ 0x0000001e,
+ },
+ .zlx_nr = IPU7P5_IS_ZLX_M1_NUM,
+ .zlx_axi_pool = {
+ 0x00000f20,
+ },
+ .zlx_en = {
+ 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1,
+ },
+ .zlx_conf = {
+ 0x00010103,
+ 0x00010103,
+ 0x00010103,
+ 0x00010103,
+ 0x00010103,
+ 0x00010103,
+ 0x00010103,
+ 0x00010103,
+ 0x00010103,
+ 0x00010103,
+ 0x00010103,
+ 0x00010103,
+ 0x00010103,
+ 0x00010103,
+ 0x00010103,
+ 0x00010103,
+ },
+ .uao_p_num = IPU7P5_IS_UAO_M1_WR_PLANENUM,
+ .uao_p2tlb = {
+ 0x00000045,
+ 0x00000046,
+ 0x00000047,
+ 0x00000048,
+ 0x00000045,
+ 0x00000046,
+ 0x00000047,
+ 0x00000048,
+ 0x00000045,
+ 0x00000046,
+ 0x00000047,
+ 0x00000048,
+ 0x00000045,
+ 0x00000046,
+ 0x00000047,
+ 0x00000048,
+ },
+ },
+ },
+ .cdc_fifos = 3,
+ .cdc_fifo_threshold = {6, 8, 2},
+ .dmem_offset = IPU_ISYS_DMEM_OFFSET,
+ .spc_offset = IPU_ISYS_SPC_OFFSET,
+ },
+ .isys_dma_overshoot = IPU_ISYS_OVERALLOC_MIN,
+};
+
+static struct ipu_psys_internal_pdata ipu7p5_psys_ipdata = {
+ .hw_variant = {
+ .offset = IPU_UNIFIED_OFFSET,
+ .nr_mmus = IPU7P5_PS_MMU_NUM,
+ .mmu_hw = {
+ {
+ .name = "PS_FW_RD",
+ .offset = IPU7P5_PS_MMU_FW_RD_OFFSET,
+ .zlx_offset = IPU7P5_PS_ZLX_FW_RD_OFFSET,
+ .uao_offset = IPU7P5_PS_UAO_FW_RD_OFFSET,
+ .info_bits = 0x20004001,
+ .refill = 0x00002726,
+ .collapse_en_bitmap = 0x1,
+ .at_sp_arb_cfg = 0x1,
+ .l1_block = IPU7P5_PS_MMU_FW_RD_L1_BLOCKNR_REG,
+ .l2_block = IPU7P5_PS_MMU_FW_RD_L2_BLOCKNR_REG,
+ .nr_l1streams = IPU7P5_PS_MMU_FW_RD_STREAM_NUM,
+ .nr_l2streams = IPU7P5_PS_MMU_FW_RD_STREAM_NUM,
+ .l1_block_sz = {
+ 0x00000000,
+ 0x00000008,
+ 0x0000000a,
+ 0x0000000c,
+ 0x0000000d,
+ 0x0000000f,
+ 0x00000011,
+ 0x00000012,
+ 0x00000013,
+ 0x00000014,
+ 0x00000016,
+ 0x00000018,
+ 0x00000019,
+ 0x0000001a,
+ 0x0000001a,
+ 0x0000001a,
+ },
+ .l2_block_sz = {
+ 0x00000000,
+ 0x00000002,
+ 0x00000004,
+ 0x00000006,
+ 0x00000008,
+ 0x0000000a,
+ 0x0000000c,
+ 0x0000000e,
+ 0x00000010,
+ 0x00000012,
+ 0x00000014,
+ 0x00000016,
+ 0x00000018,
+ 0x0000001a,
+ 0x0000001c,
+ 0x0000001e,
+ },
+ .zlx_nr = IPU7P5_PS_ZLX_FW_RD_NUM,
+ .zlx_axi_pool = {
+ 0x00000f30,
+ },
+ .zlx_en = {
+ 0, 1, 0, 0, 1, 1, 0, 0,
+ 0, 1, 1, 0, 0, 0, 0, 0,
+ },
+ .zlx_conf = {
+ 0x00000000,
+ 0x00010101,
+ 0x00000000,
+ 0x00000000,
+ 0x00010101,
+ 0x00010101,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00010101,
+ 0x00010101,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ },
+ .uao_p_num = IPU7P5_PS_UAO_FW_RD_PLANENUM,
+ .uao_p2tlb = {
+ 0x0000002e,
+ 0x00000035,
+ 0x00000036,
+ 0x00000031,
+ 0x00000037,
+ 0x00000038,
+ 0x00000039,
+ 0x00000032,
+ 0x00000033,
+ 0x0000003a,
+ 0x0000003b,
+ 0x0000003c,
+ 0x00000034,
+ 0x0,
+ 0x0,
+ 0x0,
+ },
+ },
+ {
+ .name = "PS_FW_WR",
+ .offset = IPU7P5_PS_MMU_FW_WR_OFFSET,
+ .zlx_offset = IPU7P5_PS_ZLX_FW_WR_OFFSET,
+ .uao_offset = IPU7P5_PS_UAO_FW_WR_OFFSET,
+ .info_bits = 0x20003e01,
+ .refill = 0x00002322,
+ .collapse_en_bitmap = 0x1,
+ .at_sp_arb_cfg = 0x1,
+ .l1_block = IPU7P5_PS_MMU_FW_WR_L1_BLOCKNR_REG,
+ .l2_block = IPU7P5_PS_MMU_FW_WR_L2_BLOCKNR_REG,
+ .nr_l1streams = IPU7P5_PS_MMU_FW_WR_STREAM_NUM,
+ .nr_l2streams = IPU7P5_PS_MMU_FW_WR_STREAM_NUM,
+ .l1_block_sz = {
+ 0x00000000,
+ 0x00000008,
+ 0x0000000a,
+ 0x0000000c,
+ 0x0000000d,
+ 0x0000000e,
+ 0x0000000f,
+ 0x00000010,
+ 0x00000010,
+ 0x00000010,
+ },
+ .l2_block_sz = {
+ 0x00000000,
+ 0x00000002,
+ 0x00000004,
+ 0x00000006,
+ 0x00000008,
+ 0x0000000a,
+ 0x0000000c,
+ 0x0000000e,
+ 0x00000010,
+ 0x00000012,
+ },
+ .zlx_nr = IPU7P5_PS_ZLX_FW_WR_NUM,
+ .zlx_axi_pool = {
+ 0x00000f20,
+ },
+ .zlx_en = {
+ 0, 1, 1, 0, 0, 0, 0, 0, 0, 0,
+ },
+ .zlx_conf = {
+ 0x00000000,
+ 0x00010101,
+ 0x00010101,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ },
+ .uao_p_num = IPU7P5_PS_UAO_FW_WR_PLANENUM,
+ .uao_p2tlb = {
+ 0x0000002e,
+ 0x0000002f,
+ 0x00000030,
+ 0x00000031,
+ 0x00000032,
+ 0x00000033,
+ 0x00000034,
+ 0x0,
+ 0x0,
+ 0x0,
+ },
+ },
+ {
+ .name = "PS_DATA_RD",
+ .offset = IPU7P5_PS_MMU_SRT_RD_OFFSET,
+ .zlx_offset = IPU7P5_PS_ZLX_DATA_RD_OFFSET,
+ .uao_offset = IPU7P5_PS_UAO_SRT_RD_OFFSET,
+ .info_bits = 0x20003f01,
+ .refill = 0x00002524,
+ .collapse_en_bitmap = 0x1,
+ .at_sp_arb_cfg = 0x1,
+ .l1_block = IPU7P5_PS_MMU_SRT_RD_L1_BLOCKNR_REG,
+ .l2_block = IPU7P5_PS_MMU_SRT_RD_L2_BLOCKNR_REG,
+ .nr_l1streams = IPU7P5_PS_MMU_SRT_RD_STREAM_NUM,
+ .nr_l2streams = IPU7P5_PS_MMU_SRT_RD_STREAM_NUM,
+ .l1_block_sz = {
+ 0x00000000,
+ 0x00000004,
+ 0x00000006,
+ 0x00000008,
+ 0x0000000b,
+ 0x0000000d,
+ 0x0000000f,
+ 0x00000013,
+ 0x00000017,
+ 0x00000019,
+ 0x0000001b,
+ 0x0000001d,
+ 0x0000001f,
+ 0x0000002b,
+ 0x00000033,
+ 0x0000003f,
+ 0x00000047,
+ 0x00000049,
+ 0x0000004b,
+ 0x0000004c,
+ 0x0000004d,
+ 0x0000004e,
+ },
+ .l2_block_sz = {
+ 0x00000000,
+ 0x00000002,
+ 0x00000004,
+ 0x00000006,
+ 0x00000008,
+ 0x0000000a,
+ 0x0000000c,
+ 0x0000000e,
+ 0x00000010,
+ 0x00000012,
+ 0x00000014,
+ 0x00000016,
+ 0x00000018,
+ 0x0000001a,
+ 0x0000001c,
+ 0x0000001e,
+ 0x00000020,
+ 0x00000022,
+ 0x00000024,
+ 0x00000026,
+ 0x00000028,
+ 0x0000002a,
+ },
+ .zlx_nr = IPU7P5_PS_ZLX_DATA_RD_NUM,
+ .zlx_axi_pool = {
+ 0x00000f30,
+ },
+ .zlx_en = {
+ 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 0, 0, 0, 0,
+ },
+ .zlx_conf = {
+ 0x00030303,
+ 0x00010101,
+ 0x00010101,
+ 0x00030202,
+ 0x00010101,
+ 0x00010101,
+ 0x00030303,
+ 0x00030303,
+ 0x00010101,
+ 0x00030800,
+ 0x00030500,
+ 0x00020101,
+ 0x00042000,
+ 0x00031000,
+ 0x00042000,
+ 0x00031000,
+ 0x00020400,
+ 0x00010101,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ },
+ .uao_p_num = IPU7P5_PS_UAO_SRT_RD_PLANENUM,
+ .uao_p2tlb = {
+ 0x0000001c,
+ 0x0000001d,
+ 0x0000001e,
+ 0x0000001f,
+ 0x00000020,
+ 0x00000021,
+ 0x00000022,
+ 0x00000023,
+ 0x00000024,
+ 0x00000025,
+ 0x00000026,
+ 0x00000027,
+ 0x00000028,
+ 0x00000029,
+ 0x0000002a,
+ 0x0000002b,
+ 0x0000002c,
+ 0x0000002d,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ },
+ },
+ {
+ .name = "PS_DATA_WR",
+ .offset = IPU7P5_PS_MMU_SRT_WR_OFFSET,
+ .zlx_offset = IPU7P5_PS_ZLX_DATA_WR_OFFSET,
+ .uao_offset = IPU7P5_PS_UAO_SRT_WR_OFFSET,
+ .info_bits = 0x20003d01,
+ .refill = 0x00002120,
+ .collapse_en_bitmap = 0x1,
+ .at_sp_arb_cfg = 0x1,
+ .l1_block = IPU7P5_PS_MMU_SRT_WR_L1_BLOCKNR_REG,
+ .l2_block = IPU7P5_PS_MMU_SRT_WR_L2_BLOCKNR_REG,
+ .nr_l1streams = IPU7P5_PS_MMU_SRT_WR_STREAM_NUM,
+ .nr_l2streams = IPU7P5_PS_MMU_SRT_WR_STREAM_NUM,
+ .l1_block_sz = {
+ 0x00000000,
+ 0x00000002,
+ 0x00000006,
+ 0x0000000a,
+ 0x0000000c,
+ 0x0000000e,
+ 0x00000010,
+ 0x00000012,
+ 0x00000014,
+ 0x00000016,
+ 0x00000018,
+ 0x0000001a,
+ 0x0000001c,
+ 0x0000001e,
+ 0x00000020,
+ 0x00000022,
+ 0x00000024,
+ 0x00000028,
+ 0x0000002a,
+ 0x00000036,
+ 0x0000003e,
+ 0x00000040,
+ 0x00000042,
+ 0x0000004e,
+ 0x00000056,
+ 0x0000005c,
+ 0x00000068,
+ 0x00000070,
+ 0x00000076,
+ 0x00000077,
+ 0x00000078,
+ 0x00000079,
+ },
+ .l2_block_sz = {
+ 0x00000000,
+ 0x00000002,
+ 0x00000006,
+ 0x0000000a,
+ 0x0000000c,
+ 0x0000000e,
+ 0x00000010,
+ 0x00000012,
+ 0x00000014,
+ 0x00000016,
+ 0x00000018,
+ 0x0000001a,
+ 0x0000001c,
+ 0x0000001e,
+ 0x00000020,
+ 0x00000022,
+ 0x00000024,
+ 0x00000028,
+ 0x0000002a,
+ 0x00000036,
+ 0x0000003e,
+ 0x00000040,
+ 0x00000042,
+ 0x0000004e,
+ 0x00000056,
+ 0x0000005c,
+ 0x00000068,
+ 0x00000070,
+ 0x00000076,
+ 0x00000077,
+ 0x00000078,
+ 0x00000079,
+ },
+ .zlx_nr = IPU7P5_PS_ZLX_DATA_WR_NUM,
+ .zlx_axi_pool = {
+ 0x00000f50,
+ },
+ .zlx_en = {
+ 1, 1, 1, 1, 1, 1, 1, 1,
+ 0, 0, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 0, 0, 0, 0,
+ },
+ .zlx_conf = {
+ 0x00010102,
+ 0x00030103,
+ 0x00030103,
+ 0x00010101,
+ 0x00010101,
+ 0x00030101,
+ 0x00010101,
+ 0x38010101,
+ 0x00000000,
+ 0x00000000,
+ 0x38010101,
+ 0x38010101,
+ 0x38010101,
+ 0x38010101,
+ 0x38010101,
+ 0x38010101,
+ 0x00030303,
+ 0x00010101,
+ 0x00042000,
+ 0x00031000,
+ 0x00010101,
+ 0x00010101,
+ 0x00042000,
+ 0x00031000,
+ 0x00031000,
+ 0x00042000,
+ 0x00031000,
+ 0x00031000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ },
+ .uao_p_num = IPU7P5_PS_UAO_SRT_WR_PLANENUM,
+ .uao_p2tlb = {
+ 0x00000000,
+ 0x00000001,
+ 0x00000002,
+ 0x00000003,
+ 0x00000004,
+ 0x00000005,
+ 0x00000006,
+ 0x00000007,
+ 0x00000008,
+ 0x00000009,
+ 0x0000000a,
+ 0x0000000b,
+ 0x0000000c,
+ 0x0000000d,
+ 0x0000000e,
+ 0x0000000f,
+ 0x00000010,
+ 0x00000011,
+ 0x00000012,
+ 0x00000013,
+ 0x00000014,
+ 0x00000015,
+ 0x00000016,
+ 0x00000017,
+ 0x00000018,
+ 0x00000019,
+ 0x0000001a,
+ 0x0000001b,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ },
+ },
+ },
+ .dmem_offset = IPU_PSYS_DMEM_OFFSET,
+ },
+};
+
+static struct ipu_isys_internal_pdata ipu7_isys_ipdata = {
+ .csi2 = {
+ .gpreg = IS_IO_CSI2_GPREGS_BASE,
+ },
+ .hw_variant = {
+ .offset = IPU_UNIFIED_OFFSET,
+ .nr_mmus = IPU7_IS_MMU_NUM,
+ .mmu_hw = {
+ {
+ .name = "IS_FW_RD",
+ .offset = IPU7_IS_MMU_FW_RD_OFFSET,
+ .zlx_offset = IPU7_IS_ZLX_UC_RD_OFFSET,
+ .uao_offset = IPU7_IS_UAO_UC_RD_OFFSET,
+ .info_bits = 0x20006701,
+ .refill = 0x00002726,
+ .collapse_en_bitmap = 0x0,
+ .l1_block = IPU7_IS_MMU_FW_RD_L1_BLOCKNR_REG,
+ .l2_block = IPU7_IS_MMU_FW_RD_L2_BLOCKNR_REG,
+ .nr_l1streams = IPU7_IS_MMU_FW_RD_STREAM_NUM,
+ .nr_l2streams = IPU7_IS_MMU_FW_RD_STREAM_NUM,
+ .l1_block_sz = {
+ 0x0, 0x8, 0xa,
+ },
+ .l2_block_sz = {
+ 0x0, 0x2, 0x4,
+ },
+ .zlx_nr = IPU7_IS_ZLX_UC_RD_NUM,
+ .zlx_axi_pool = {
+ 0x00000f30,
+ },
+ .zlx_en = {
+ 0, 0, 0, 0
+ },
+ .zlx_conf = {
+ 0x0, 0x0, 0x0, 0x0,
+ },
+ .uao_p_num = IPU7_IS_UAO_UC_RD_PLANENUM,
+ .uao_p2tlb = {
+ 0x00000061,
+ 0x00000064,
+ 0x00000065,
+ },
+ },
+ {
+ .name = "IS_FW_WR",
+ .offset = IPU7_IS_MMU_FW_WR_OFFSET,
+ .zlx_offset = IPU7_IS_ZLX_UC_WR_OFFSET,
+ .uao_offset = IPU7_IS_UAO_UC_WR_OFFSET,
+ .info_bits = 0x20006801,
+ .refill = 0x00002524,
+ .collapse_en_bitmap = 0x0,
+ .l1_block = IPU7_IS_MMU_FW_WR_L1_BLOCKNR_REG,
+ .l2_block = IPU7_IS_MMU_FW_WR_L2_BLOCKNR_REG,
+ .nr_l1streams = IPU7_IS_MMU_FW_WR_STREAM_NUM,
+ .nr_l2streams = IPU7_IS_MMU_FW_WR_STREAM_NUM,
+ .l1_block_sz = {
+ 0x0, 0x8, 0xa,
+ },
+ .l2_block_sz = {
+ 0x0, 0x2, 0x4,
+ },
+ .zlx_nr = IPU7_IS_ZLX_UC_WR_NUM,
+ .zlx_axi_pool = {
+ 0x00000f20,
+ },
+ .zlx_en = {
+ 0, 1, 1, 0,
+ },
+ .zlx_conf = {
+ 0x0,
+ 0x00010101,
+ 0x00010101,
+ },
+ .uao_p_num = IPU7_IS_UAO_UC_WR_PLANENUM,
+ .uao_p2tlb = {
+ 0x00000061,
+ 0x00000062,
+ 0x00000063,
+ },
+ },
+ {
+ .name = "IS_DATA_WR_ISOC",
+ .offset = IPU7_IS_MMU_M0_OFFSET,
+ .zlx_offset = IPU7_IS_ZLX_M0_OFFSET,
+ .uao_offset = IPU7_IS_UAO_M0_WR_OFFSET,
+ .info_bits = 0x20006601,
+ .refill = 0x00002120,
+ .collapse_en_bitmap = 0x0,
+ .l1_block = IPU7_IS_MMU_M0_L1_BLOCKNR_REG,
+ .l2_block = IPU7_IS_MMU_M0_L2_BLOCKNR_REG,
+ .nr_l1streams = IPU7_IS_MMU_M0_STREAM_NUM,
+ .nr_l2streams = IPU7_IS_MMU_M0_STREAM_NUM,
+ .l1_block_sz = {
+ 0x0, 0x3, 0x6, 0x8, 0xa, 0xc, 0xe, 0x10,
+ },
+ .l2_block_sz = {
+ 0x0, 0x2, 0x4, 0x6, 0x8, 0xa, 0xc, 0xe,
+ },
+ .zlx_nr = IPU7_IS_ZLX_M0_NUM,
+ .zlx_axi_pool = {
+ 0x00000f10,
+ },
+ .zlx_en = {
+ 1, 1, 1, 1, 1, 1, 1, 1,
+ },
+ .zlx_conf = {
+ 0x00010103,
+ 0x00010103,
+ 0x00010101,
+ 0x00010101,
+ 0x00010101,
+ 0x00010101,
+ 0x00010101,
+ 0x00010101,
+ },
+ .uao_p_num = IPU7_IS_UAO_M0_WR_PLANENUM,
+ .uao_p2tlb = {
+ 0x00000049,
+ 0x0000004a,
+ 0x0000004b,
+ 0x0000004c,
+ 0x0000004d,
+ 0x0000004e,
+ 0x0000004f,
+ 0x00000050,
+ },
+ },
+ {
+ .name = "IS_DATA_WR_SNOOP",
+ .offset = IPU7_IS_MMU_M1_OFFSET,
+ .zlx_offset = IPU7_IS_ZLX_M1_OFFSET,
+ .uao_offset = IPU7_IS_UAO_M1_WR_OFFSET,
+ .info_bits = 0x20006901,
+ .refill = 0x00002322,
+ .collapse_en_bitmap = 0x0,
+ .l1_block = IPU7_IS_MMU_M1_L1_BLOCKNR_REG,
+ .l2_block = IPU7_IS_MMU_M1_L2_BLOCKNR_REG,
+ .nr_l1streams = IPU7_IS_MMU_M1_STREAM_NUM,
+ .nr_l2streams = IPU7_IS_MMU_M1_STREAM_NUM,
+ .l1_block_sz = {
+ 0x0, 0x3, 0x6, 0x9, 0xc,
+ 0xe, 0x10, 0x12, 0x14, 0x16,
+ 0x18, 0x1a, 0x1c, 0x1e, 0x20,
+ 0x22,
+ },
+ .l2_block_sz = {
+ 0x0, 0x2, 0x4, 0x6, 0x8,
+ 0xa, 0xc, 0xe, 0x10, 0x12,
+ 0x14, 0x16, 0x18, 0x1a, 0x1c,
+ 0x1e,
+ },
+ .zlx_nr = IPU7_IS_ZLX_M1_NUM,
+ .zlx_axi_pool = {
+ 0x00000f20,
+ },
+ .zlx_en = {
+ 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1,
+ },
+ .zlx_conf = {
+ 0x00010103,
+ 0x00010103,
+ 0x00010103,
+ 0x00010103,
+ 0x00010103,
+ 0x00010103,
+ 0x00010103,
+ 0x00010103,
+ 0x00010101,
+ 0x00010101,
+ 0x00010101,
+ 0x00010101,
+ 0x00010101,
+ 0x00010101,
+ 0x00010101,
+ 0x00010101,
+ },
+ .uao_p_num = IPU7_IS_UAO_M1_WR_PLANENUM,
+ .uao_p2tlb = {
+ 0x00000051,
+ 0x00000052,
+ 0x00000053,
+ 0x00000054,
+ 0x00000055,
+ 0x00000056,
+ 0x00000057,
+ 0x00000058,
+ 0x00000059,
+ 0x0000005a,
+ 0x0000005b,
+ 0x0000005c,
+ 0x0000005d,
+ 0x0000005e,
+ 0x0000005f,
+ 0x00000060,
+ },
+ },
+ },
+ .cdc_fifos = 3,
+ .cdc_fifo_threshold = {6, 8, 2},
+ .dmem_offset = IPU_ISYS_DMEM_OFFSET,
+ .spc_offset = IPU_ISYS_SPC_OFFSET,
+ },
+ .isys_dma_overshoot = IPU_ISYS_OVERALLOC_MIN,
+};
+
+static struct ipu_psys_internal_pdata ipu7_psys_ipdata = {
+ .hw_variant = {
+ .offset = IPU_UNIFIED_OFFSET,
+ .nr_mmus = IPU7_PS_MMU_NUM,
+ .mmu_hw = {
+ {
+ .name = "PS_FW_RD",
+ .offset = IPU7_PS_MMU_FW_RD_OFFSET,
+ .zlx_offset = IPU7_PS_ZLX_FW_RD_OFFSET,
+ .uao_offset = IPU7_PS_UAO_FW_RD_OFFSET,
+ .info_bits = 0x20004801,
+ .refill = 0x00002726,
+ .collapse_en_bitmap = 0x0,
+ .l1_block = IPU7_PS_MMU_FW_RD_L1_BLOCKNR_REG,
+ .l2_block = IPU7_PS_MMU_FW_RD_L2_BLOCKNR_REG,
+ .nr_l1streams = IPU7_PS_MMU_FW_RD_STREAM_NUM,
+ .nr_l2streams = IPU7_PS_MMU_FW_RD_STREAM_NUM,
+ .l1_block_sz = {
+ 0, 0x8, 0xa, 0xc, 0xd,
+ 0xf, 0x11, 0x12, 0x13, 0x14,
+ 0x16, 0x18, 0x19, 0x1a, 0x1a,
+ 0x1a, 0x1a, 0x1a, 0x1a, 0x1a,
+ },
+ .l2_block_sz = {
+ 0x0, 0x2, 0x4, 0x6, 0x8,
+ 0xa, 0xc, 0xe, 0x10, 0x12,
+ 0x14, 0x16, 0x18, 0x1a, 0x1c,
+ 0x1e, 0x20, 0x22, 0x24, 0x26,
+ },
+ .zlx_nr = IPU7_PS_ZLX_FW_RD_NUM,
+ .zlx_axi_pool = {
+ 0x00000f30,
+ },
+ .zlx_en = {
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ },
+ .zlx_conf = {
+ 0x0,
+ },
+ .uao_p_num = IPU7_PS_UAO_FW_RD_PLANENUM,
+ .uao_p2tlb = {
+ 0x00000036,
+ 0x0000003d,
+ 0x0000003e,
+ 0x00000039,
+ 0x0000003f,
+ 0x00000040,
+ 0x00000041,
+ 0x0000003a,
+ 0x0000003b,
+ 0x00000042,
+ 0x00000043,
+ 0x00000044,
+ 0x0000003c,
+ },
+ },
+ {
+ .name = "PS_FW_WR",
+ .offset = IPU7_PS_MMU_FW_WR_OFFSET,
+ .zlx_offset = IPU7_PS_ZLX_FW_WR_OFFSET,
+ .uao_offset = IPU7_PS_UAO_FW_WR_OFFSET,
+ .info_bits = 0x20004601,
+ .refill = 0x00002322,
+ .collapse_en_bitmap = 0x0,
+ .l1_block = IPU7_PS_MMU_FW_WR_L1_BLOCKNR_REG,
+ .l2_block = IPU7_PS_MMU_FW_WR_L2_BLOCKNR_REG,
+ .nr_l1streams = IPU7_PS_MMU_FW_WR_STREAM_NUM,
+ .nr_l2streams = IPU7_PS_MMU_FW_WR_STREAM_NUM,
+ .l1_block_sz = {
+ 0, 0x8, 0xa, 0xc, 0xd,
+ 0xe, 0xf, 0x10, 0x10, 0x10,
+ },
+ .l2_block_sz = {
+ 0x0, 0x2, 0x4, 0x6, 0x8,
+ 0xa, 0xc, 0xe, 0x10, 0x12,
+ },
+ .zlx_nr = IPU7_PS_ZLX_FW_WR_NUM,
+ .zlx_axi_pool = {
+ 0x00000f20,
+ },
+ .zlx_en = {
+ 0, 1, 1, 0, 0, 0, 0, 0,
+ 0, 0,
+ },
+ .zlx_conf = {
+ 0x0,
+ 0x00010101,
+ 0x00010101,
+ },
+ .uao_p_num = IPU7_PS_UAO_FW_WR_PLANENUM,
+ .uao_p2tlb = {
+ 0x00000036,
+ 0x00000037,
+ 0x00000038,
+ 0x00000039,
+ 0x0000003a,
+ 0x0000003b,
+ 0x0000003c,
+ },
+ },
+ {
+ .name = "PS_DATA_RD",
+ .offset = IPU7_PS_MMU_SRT_RD_OFFSET,
+ .zlx_offset = IPU7_PS_ZLX_DATA_RD_OFFSET,
+ .uao_offset = IPU7_PS_UAO_SRT_RD_OFFSET,
+ .info_bits = 0x20004701,
+ .refill = 0x00002120,
+ .collapse_en_bitmap = 0x0,
+ .l1_block = IPU7_PS_MMU_SRT_RD_L1_BLOCKNR_REG,
+ .l2_block = IPU7_PS_MMU_SRT_RD_L2_BLOCKNR_REG,
+ .nr_l1streams = IPU7_PS_MMU_SRT_RD_STREAM_NUM,
+ .nr_l2streams = IPU7_PS_MMU_SRT_RD_STREAM_NUM,
+ .l1_block_sz = {
+ 0x0, 0x4, 0x6, 0x8, 0xb,
+ 0xd, 0xf, 0x11, 0x13, 0x15,
+ 0x17, 0x23, 0x2b, 0x37, 0x3f,
+ 0x41, 0x43, 0x44, 0x45, 0x46,
+ 0x47, 0x48, 0x49, 0x4a, 0x4b,
+ 0x4c, 0x4d, 0x4e, 0x4f, 0x50,
+ 0x51, 0x52, 0x53, 0x55, 0x57,
+ 0x59, 0x5b, 0x5d, 0x5f, 0x61,
+ },
+ .l2_block_sz = {
+ 0x0, 0x2, 0x4, 0x6, 0x8,
+ 0xa, 0xc, 0xe, 0x10, 0x12,
+ 0x14, 0x16, 0x18, 0x1a, 0x1c,
+ 0x1e, 0x20, 0x22, 0x24, 0x26,
+ 0x28, 0x2a, 0x2c, 0x2e, 0x30,
+ 0x32, 0x34, 0x36, 0x38, 0x3a,
+ 0x3c, 0x3e, 0x40, 0x42, 0x44,
+ 0x46, 0x48, 0x4a, 0x4c, 0x4e,
+ },
+ .zlx_nr = IPU7_PS_ZLX_DATA_RD_NUM,
+ .zlx_axi_pool = {
+ 0x00000f30,
+ },
+ .zlx_en = {
+ 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ },
+ .zlx_conf = {
+ 0x00030303,
+ 0x00010101,
+ 0x00010101,
+ 0x00030202,
+ 0x00010101,
+ 0x00010101,
+ 0x00010101,
+ 0x00030800,
+ 0x00030500,
+ 0x00020101,
+ 0x00042000,
+ 0x00031000,
+ 0x00042000,
+ 0x00031000,
+ 0x00020400,
+ 0x00010101,
+ },
+ .uao_p_num = IPU7_PS_UAO_SRT_RD_PLANENUM,
+ .uao_p2tlb = {
+ 0x00000022,
+ 0x00000023,
+ 0x00000024,
+ 0x00000025,
+ 0x00000026,
+ 0x00000027,
+ 0x00000028,
+ 0x00000029,
+ 0x0000002a,
+ 0x0000002b,
+ 0x0000002c,
+ 0x0000002d,
+ 0x0000002e,
+ 0x0000002f,
+ 0x00000030,
+ 0x00000031,
+ 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+ 0x0000001e,
+ 0x0000001f,
+ 0x00000020,
+ 0x00000021,
+ 0x00000032,
+ 0x00000033,
+ 0x00000034,
+ 0x00000035,
+ },
+ },
+ {
+ .name = "PS_DATA_WR",
+ .offset = IPU7_PS_MMU_SRT_WR_OFFSET,
+ .zlx_offset = IPU7_PS_ZLX_DATA_WR_OFFSET,
+ .uao_offset = IPU7_PS_UAO_SRT_WR_OFFSET,
+ .info_bits = 0x20004501,
+ .refill = 0x00002120,
+ .collapse_en_bitmap = 0x0,
+ .l1_block = IPU7_PS_MMU_SRT_WR_L1_BLOCKNR_REG,
+ .l2_block = IPU7_PS_MMU_SRT_WR_L2_BLOCKNR_REG,
+ .nr_l1streams = IPU7_PS_MMU_SRT_WR_STREAM_NUM,
+ .nr_l2streams = IPU7_PS_MMU_SRT_WR_STREAM_NUM,
+ .l1_block_sz = {
+ 0x0, 0x2, 0x6, 0xa, 0xc,
+ 0xe, 0x10, 0x12, 0x14, 0x16,
+ 0x18, 0x1a, 0x1c, 0x1e, 0x20,
+ 0x22, 0x24, 0x26, 0x32, 0x3a,
+ 0x3c, 0x3e, 0x4a, 0x52, 0x58,
+ 0x64, 0x6c, 0x72, 0x7e, 0x86,
+ 0x8c, 0x8d, 0x8e, 0x8f, 0x90,
+ 0x91, 0x92, 0x94, 0x96, 0x98,
+ },
+ .l2_block_sz = {
+ 0x0, 0x2, 0x4, 0x6, 0x8,
+ 0xa, 0xc, 0xe, 0x10, 0x12,
+ 0x14, 0x16, 0x18, 0x1a, 0x1c,
+ 0x1e, 0x20, 0x22, 0x24, 0x26,
+ 0x28, 0x2a, 0x2c, 0x2e, 0x30,
+ 0x32, 0x34, 0x36, 0x38, 0x3a,
+ 0x3c, 0x3e, 0x40, 0x42, 0x44,
+ 0x46, 0x48, 0x4a, 0x4c, 0x4e,
+ },
+ .zlx_nr = IPU7_PS_ZLX_DATA_WR_NUM,
+ .zlx_axi_pool = {
+ 0x00000f50,
+ },
+ .zlx_en = {
+ 1, 1, 1, 1, 1, 1, 1, 1,
+ 0, 0, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 0, 0,
+ },
+ .zlx_conf = {
+ 0x00010102,
+ 0x00030103,
+ 0x00030103,
+ 0x00010101,
+ 0x00010101,
+ 0x00030101,
+ 0x00010101,
+ 0x38010101,
+ 0x0,
+ 0x0,
+ 0x38010101,
+ 0x38010101,
+ 0x38010101,
+ 0x38010101,
+ 0x38010101,
+ 0x38010101,
+ 0x00010101,
+ 0x00042000,
+ 0x00031000,
+ 0x00010101,
+ 0x00010101,
+ 0x00042000,
+ 0x00031000,
+ 0x00031000,
+ 0x00042000,
+ 0x00031000,
+ 0x00031000,
+ 0x00042000,
+ 0x00031000,
+ 0x00031000,
+ 0x0,
+ 0x0,
+ },
+ .uao_p_num = IPU7_PS_UAO_SRT_WR_PLANENUM,
+ .uao_p2tlb = {
+ 0x00000000,
+ 0x00000001,
+ 0x00000002,
+ 0x00000003,
+ 0x00000004,
+ 0x00000005,
+ 0x00000006,
+ 0x00000007,
+ 0x00000008,
+ 0x00000009,
+ 0x0000000a,
+ 0x0000000b,
+ 0x0000000c,
+ 0x0000000d,
+ 0x0000000e,
+ 0x0000000f,
+ 0x00000010,
+ 0x00000011,
+ 0x00000012,
+ 0x00000013,
+ 0x00000014,
+ 0x00000015,
+ 0x00000016,
+ 0x00000017,
+ 0x00000018,
+ 0x00000019,
+ 0x0000001a,
+ 0x0000001b,
+ 0x0000001c,
+ 0x0000001d,
+ 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+ 0x0000001e,
+ 0x0000001f,
+ 0x00000020,
+ 0x00000021,
+ },
+ },
+ },
+ .dmem_offset = IPU_PSYS_DMEM_OFFSET,
+ },
+};
+
+static struct ipu_isys_internal_pdata ipu8_isys_ipdata = {
+ .csi2 = {
+ .gpreg = IPU8_IS_IO_CSI2_GPREGS_BASE,
+ },
+ .hw_variant = {
+ .offset = IPU_UNIFIED_OFFSET,
+ .nr_mmus = IPU8_IS_MMU_NUM,
+ .mmu_hw = {
+ {
+ .name = "IS_FW_RD",
+ .offset = IPU8_IS_MMU_FW_RD_OFFSET,
+ .zlx_offset = IPU8_IS_ZLX_UC_RD_OFFSET,
+ .uao_offset = IPU8_IS_UAO_UC_RD_OFFSET,
+ .info_bits = 0x20005101,
+ .refill = 0x00002726,
+ .collapse_en_bitmap = 0x1,
+ .at_sp_arb_cfg = 0x1,
+ .l1_block = IPU8_IS_MMU_FW_RD_L1_BLOCKNR_REG,
+ .l2_block = IPU8_IS_MMU_FW_RD_L2_BLOCKNR_REG,
+ .nr_l1streams = IPU8_IS_MMU_FW_RD_STREAM_NUM,
+ .nr_l2streams = IPU8_IS_MMU_FW_RD_STREAM_NUM,
+ .l1_block_sz = {
+ 0x0, 0x8, 0xa,
+ },
+ .l2_block_sz = {
+ 0x0, 0x2, 0x4,
+ },
+ .zlx_nr = IPU8_IS_ZLX_UC_RD_NUM,
+ .zlx_axi_pool = {
+ 0x00000f30,
+ },
+ .zlx_en = {
+ 0, 1, 0, 0
+ },
+ .zlx_conf = {
+ 0, 2, 0, 0
+ },
+ .uao_p_num = IPU8_IS_UAO_UC_RD_PLANENUM,
+ .uao_p2tlb = {
+ 0x00000049,
+ 0x0000004c,
+ 0x0000004d,
+ 0x00000000,
+ },
+ },
+ {
+ .name = "IS_FW_WR",
+ .offset = IPU8_IS_MMU_FW_WR_OFFSET,
+ .zlx_offset = IPU8_IS_ZLX_UC_WR_OFFSET,
+ .uao_offset = IPU8_IS_UAO_UC_WR_OFFSET,
+ .info_bits = 0x20005001,
+ .refill = 0x00002524,
+ .collapse_en_bitmap = 0x1,
+ .at_sp_arb_cfg = 0x1,
+ .l1_block = IPU8_IS_MMU_FW_WR_L1_BLOCKNR_REG,
+ .l2_block = IPU8_IS_MMU_FW_WR_L2_BLOCKNR_REG,
+ .nr_l1streams = IPU8_IS_MMU_FW_WR_STREAM_NUM,
+ .nr_l2streams = IPU8_IS_MMU_FW_WR_STREAM_NUM,
+ .l1_block_sz = {
+ 0x0, 0x8, 0xa,
+ },
+ .l2_block_sz = {
+ 0x0, 0x2, 0x4,
+ },
+ .zlx_nr = IPU8_IS_ZLX_UC_WR_NUM,
+ .zlx_axi_pool = {
+ 0x00000f20,
+ },
+ .zlx_en = {
+ 0, 1, 1, 0,
+ },
+ .zlx_conf = {
+ 0x0,
+ 0x2,
+ 0x2,
+ 0x0,
+ },
+ .uao_p_num = IPU8_IS_UAO_UC_WR_PLANENUM,
+ .uao_p2tlb = {
+ 0x00000049,
+ 0x0000004a,
+ 0x0000004b,
+ 0x00000000,
+ },
+ },
+ {
+ .name = "IS_DATA_WR_ISOC",
+ .offset = IPU8_IS_MMU_M0_OFFSET,
+ .zlx_offset = IPU8_IS_ZLX_M0_OFFSET,
+ .uao_offset = IPU8_IS_UAO_M0_WR_OFFSET,
+ .info_bits = 0x20004e01,
+ .refill = 0x00002120,
+ .collapse_en_bitmap = 0x1,
+ .at_sp_arb_cfg = 0x1,
+ .l1_block = IPU8_IS_MMU_M0_L1_BLOCKNR_REG,
+ .l2_block = IPU8_IS_MMU_M0_L2_BLOCKNR_REG,
+ .nr_l1streams = IPU8_IS_MMU_M0_STREAM_NUM,
+ .nr_l2streams = IPU8_IS_MMU_M0_STREAM_NUM,
+ .l1_block_sz = {
+ 0x00000000,
+ 0x00000002,
+ 0x00000004,
+ 0x00000006,
+ 0x00000008,
+ 0x0000000a,
+ 0x0000000c,
+ 0x0000000e,
+ 0x00000010,
+ 0x00000012,
+ 0x00000014,
+ 0x00000016,
+ 0x00000018,
+ 0x0000001a,
+ 0x0000001c,
+ 0x0000001e,
+ },
+ .l2_block_sz = {
+ 0x00000000,
+ 0x00000002,
+ 0x00000004,
+ 0x00000006,
+ 0x00000008,
+ 0x0000000a,
+ 0x0000000c,
+ 0x0000000e,
+ 0x00000010,
+ 0x00000012,
+ 0x00000014,
+ 0x00000016,
+ 0x00000018,
+ 0x0000001a,
+ 0x0000001c,
+ 0x0000001e,
+ },
+ .zlx_nr = IPU8_IS_ZLX_M0_NUM,
+ .zlx_axi_pool = {
+ 0x00000f10,
+ },
+ .zlx_en = {
+ 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1,
+ },
+ .zlx_conf = {
+ 0x3,
+ 0x3,
+ 0x3,
+ 0x3,
+ 0x3,
+ 0x3,
+ 0x3,
+ 0x3,
+ 0x3,
+ 0x3,
+ 0x3,
+ 0x3,
+ 0x3,
+ 0x3,
+ 0x3,
+ 0x3,
+ },
+ .uao_p_num = IPU8_IS_UAO_M0_WR_PLANENUM,
+ .uao_p2tlb = {
+ 0x0000003b,
+ 0x0000003c,
+ 0x0000003d,
+ 0x0000003e,
+ 0x0000003b,
+ 0x0000003c,
+ 0x0000003d,
+ 0x0000003e,
+ 0x0000003b,
+ 0x0000003c,
+ 0x0000003d,
+ 0x0000003e,
+ 0x0000003b,
+ 0x0000003c,
+ 0x0000003d,
+ 0x0000003e,
+ },
+ },
+ {
+ .name = "IS_DATA_WR_SNOOP",
+ .offset = IPU8_IS_MMU_M1_OFFSET,
+ .zlx_offset = IPU8_IS_ZLX_M1_OFFSET,
+ .uao_offset = IPU8_IS_UAO_M1_WR_OFFSET,
+ .info_bits = 0x20004f01,
+ .refill = 0x00002322,
+ .collapse_en_bitmap = 0x1,
+ .at_sp_arb_cfg = 0x1,
+ .l1_block = IPU8_IS_MMU_M1_L1_BLOCKNR_REG,
+ .l2_block = IPU8_IS_MMU_M1_L2_BLOCKNR_REG,
+ .nr_l1streams = IPU8_IS_MMU_M1_STREAM_NUM,
+ .nr_l2streams = IPU8_IS_MMU_M1_STREAM_NUM,
+ .l1_block_sz = {
+ 0x00000000,
+ 0x00000002,
+ 0x00000004,
+ 0x00000006,
+ 0x00000008,
+ 0x0000000a,
+ 0x0000000c,
+ 0x0000000e,
+ 0x00000010,
+ 0x00000012,
+ 0x00000014,
+ 0x00000016,
+ 0x00000018,
+ 0x0000001a,
+ 0x0000001c,
+ 0x0000001e,
+ },
+ .l2_block_sz = {
+ 0x00000000,
+ 0x00000002,
+ 0x00000004,
+ 0x00000006,
+ 0x00000008,
+ 0x0000000a,
+ 0x0000000c,
+ 0x0000000e,
+ 0x00000010,
+ 0x00000012,
+ 0x00000014,
+ 0x00000016,
+ 0x00000018,
+ 0x0000001a,
+ 0x0000001c,
+ 0x0000001e,
+ },
+ .zlx_nr = IPU8_IS_ZLX_M1_NUM,
+ .zlx_axi_pool = {
+ 0x00000f20,
+ },
+ .zlx_en = {
+ 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1,
+ },
+ .zlx_conf = {
+ 0x3,
+ 0x3,
+ 0x3,
+ 0x3,
+ 0x3,
+ 0x3,
+ 0x3,
+ 0x3,
+ 0x3,
+ 0x3,
+ 0x3,
+ 0x3,
+ 0x3,
+ 0x3,
+ 0x3,
+ 0x3,
+ },
+ .uao_p_num = IPU8_IS_UAO_M1_WR_PLANENUM,
+ .uao_p2tlb = {
+ 0x0000003f,
+ 0x00000040,
+ 0x00000041,
+ 0x00000042,
+ 0x0000003f,
+ 0x00000040,
+ 0x00000041,
+ 0x00000042,
+ 0x0000003f,
+ 0x00000040,
+ 0x00000041,
+ 0x00000042,
+ 0x0000003f,
+ 0x00000040,
+ 0x00000041,
+ 0x00000042,
+ },
+ },
+ {
+ .name = "IS_UPIPE",
+ .offset = IPU8_IS_MMU_UPIPE_OFFSET,
+ .zlx_offset = IPU8_IS_ZLX_UPIPE_OFFSET,
+ .uao_offset = IPU8_IS_UAO_UPIPE_OFFSET,
+ .info_bits = 0x20005201,
+ .refill = 0x00002928,
+ .collapse_en_bitmap = 0x1,
+ .at_sp_arb_cfg = 0x1,
+ .l1_block = IPU8_IS_MMU_UPIPE_L1_BLOCKNR_REG,
+ .l2_block = IPU8_IS_MMU_UPIPE_L2_BLOCKNR_REG,
+ .nr_l1streams = IPU8_IS_MMU_UPIPE_STREAM_NUM,
+ .nr_l2streams = IPU8_IS_MMU_UPIPE_STREAM_NUM,
+ .l1_block_sz = {
+ 0x00000000,
+ 0x00000002,
+ 0x00000004,
+ 0x00000006,
+ 0x00000008,
+ 0x0000000a,
+ },
+ .l2_block_sz = {
+ 0x00000000,
+ 0x00000002,
+ 0x00000004,
+ 0x00000006,
+ 0x00000008,
+ 0x0000000a,
+ },
+ .zlx_nr = IPU8_IS_ZLX_UPIPE_NUM,
+ .zlx_axi_pool = {
+ 0x00000f20,
+ },
+ .zlx_en = {
+ 1, 1, 1, 1, 1, 1,
+ },
+ .zlx_conf = {
+ 0x3,
+ 0x3,
+ 0x3,
+ 0x3,
+ 0x3,
+ 0x3,
+ },
+ .uao_p_num = IPU8_IS_UAO_UPIPE_PLANENUM,
+ .uao_p2tlb = {
+ 0x00000043,
+ 0x00000044,
+ 0x00000045,
+ 0x00000046,
+ 0x00000047,
+ 0x00000048,
+ },
+ },
+ },
+ .cdc_fifos = 3,
+ .cdc_fifo_threshold = {6, 8, 2},
+ .dmem_offset = IPU_ISYS_DMEM_OFFSET,
+ .spc_offset = IPU_ISYS_SPC_OFFSET,
+ },
+ .isys_dma_overshoot = IPU_ISYS_OVERALLOC_MIN,
+};
+
+static struct ipu_psys_internal_pdata ipu8_psys_ipdata = {
+ .hw_variant = {
+ .offset = IPU_UNIFIED_OFFSET,
+ .nr_mmus = IPU8_PS_MMU_NUM,
+ .mmu_hw = {
+ {
+ .name = "PS_FW_RD",
+ .offset = IPU8_PS_MMU_FW_RD_OFFSET,
+ .zlx_offset = IPU8_PS_ZLX_FW_RD_OFFSET,
+ .uao_offset = IPU8_PS_UAO_FW_RD_OFFSET,
+ .info_bits = 0x20003a01,
+ .refill = 0x00002726,
+ .collapse_en_bitmap = 0x1,
+ .at_sp_arb_cfg = 0x1,
+ .l1_block = IPU8_PS_MMU_FW_RD_L1_BLOCKNR_REG,
+ .l2_block = IPU8_PS_MMU_FW_RD_L2_BLOCKNR_REG,
+ .nr_l1streams = IPU8_PS_MMU_FW_RD_STREAM_NUM,
+ .nr_l2streams = IPU8_PS_MMU_FW_RD_STREAM_NUM,
+ .l1_block_sz = {
+ 0x00000000,
+ 0x00000008,
+ 0x0000000a,
+ 0x0000000e,
+ 0x00000010,
+ 0x00000012,
+ 0x00000014,
+ 0x00000016,
+ 0x00000018,
+ 0x00000018,
+ 0x00000018,
+ 0x00000018,
+ },
+ .l2_block_sz = {
+ 0x00000000,
+ 0x00000002,
+ 0x00000004,
+ 0x00000006,
+ 0x00000008,
+ 0x0000000a,
+ 0x0000000c,
+ 0x0000000e,
+ 0x00000010,
+ 0x00000012,
+ 0x00000014,
+ 0x00000016,
+ },
+ .zlx_nr = IPU8_PS_ZLX_FW_RD_NUM,
+ .zlx_axi_pool = {
+ 0x00000f30,
+ },
+ .zlx_en = {
+ 0, 1, 0, 0, 1, 1, 0, 0,
+ 0, 0, 0, 0,
+ },
+ .zlx_conf = {
+ 0x0,
+ 0x2,
+ 0x0,
+ 0x0,
+ 0x2,
+ 0x2,
+ 0x0,
+ 0x0,
+ 0x0,
+ 0x0,
+ 0x0,
+ 0x0,
+ },
+ .uao_p_num = IPU8_PS_UAO_FW_RD_PLANENUM,
+ .uao_p2tlb = {
+ 0x0000002d,
+ 0x00000032,
+ 0x00000033,
+ 0x00000030,
+ 0x00000034,
+ 0x00000035,
+ 0x00000036,
+ 0x00000031,
+ 0x0,
+ 0x0,
+ 0x0,
+ 0x0,
+ },
+ },
+ {
+ .name = "PS_FW_WR",
+ .offset = IPU8_PS_MMU_FW_WR_OFFSET,
+ .zlx_offset = IPU8_PS_ZLX_FW_WR_OFFSET,
+ .uao_offset = IPU8_PS_UAO_FW_WR_OFFSET,
+ .info_bits = 0x20003901,
+ .refill = 0x00002524,
+ .collapse_en_bitmap = 0x1,
+ .at_sp_arb_cfg = 0x1,
+ .l1_block = IPU8_PS_MMU_FW_WR_L1_BLOCKNR_REG,
+ .l2_block = IPU8_PS_MMU_FW_WR_L2_BLOCKNR_REG,
+ .nr_l1streams = IPU8_PS_MMU_FW_WR_STREAM_NUM,
+ .nr_l2streams = IPU8_PS_MMU_FW_WR_STREAM_NUM,
+ .l1_block_sz = {
+ 0x00000000,
+ 0x00000008,
+ 0x0000000a,
+ 0x0000000c,
+ 0x0000000e,
+ 0x00000010,
+ 0x00000010,
+ 0x00000010,
+ },
+ .l2_block_sz = {
+ 0x00000000,
+ 0x00000002,
+ 0x00000004,
+ 0x00000006,
+ 0x00000008,
+ 0x0000000a,
+ 0x0000000c,
+ 0x0000000e,
+ },
+ .zlx_nr = IPU8_PS_ZLX_FW_WR_NUM,
+ .zlx_axi_pool = {
+ 0x00000f20,
+ },
+ .zlx_en = {
+ 0, 1, 1, 0, 0, 0, 0, 0,
+ },
+ .zlx_conf = {
+ 0x0, 0x2, 0x2, 0x0,
+ 0x0, 0x0, 0x0, 0x0,
+ },
+ .uao_p_num = IPU8_PS_UAO_FW_WR_PLANENUM,
+ .uao_p2tlb = {
+ 0x0000002d,
+ 0x0000002e,
+ 0x0000002f,
+ 0x00000030,
+ 0x00000031,
+ 0x0,
+ 0x0,
+ 0x0,
+ },
+ },
+ {
+ .name = "PS_DATA_RD",
+ .offset = IPU8_PS_MMU_SRT_RD_OFFSET,
+ .zlx_offset = IPU8_PS_ZLX_DATA_RD_OFFSET,
+ .uao_offset = IPU8_PS_UAO_SRT_RD_OFFSET,
+ .info_bits = 0x20003801,
+ .refill = 0x00002322,
+ .collapse_en_bitmap = 0x1,
+ .at_sp_arb_cfg = 0x1,
+ .l1_block = IPU8_PS_MMU_SRT_RD_L1_BLOCKNR_REG,
+ .l2_block = IPU8_PS_MMU_SRT_RD_L2_BLOCKNR_REG,
+ .nr_l1streams = IPU8_PS_MMU_SRT_RD_STREAM_NUM,
+ .nr_l2streams = IPU8_PS_MMU_SRT_RD_STREAM_NUM,
+ .l1_block_sz = {
+ 0x00000000,
+ 0x00000004,
+ 0x00000006,
+ 0x00000008,
+ 0x0000000c,
+ 0x0000000e,
+ 0x00000010,
+ 0x00000014,
+ 0x00000018,
+ 0x0000001c,
+ 0x0000001e,
+ 0x00000022,
+ 0x00000024,
+ 0x00000026,
+ 0x00000028,
+ 0x0000002a,
+ 0x0000002c,
+ 0x0000002e,
+ 0x00000030,
+ 0x00000032,
+ 0x00000036,
+ 0x0000003a,
+ 0x0000003c,
+ 0x0000003c,
+ 0x0000003c,
+ 0x0000003c,
+ },
+ .l2_block_sz = {
+ 0x00000000,
+ 0x00000002,
+ 0x00000004,
+ 0x00000006,
+ 0x00000008,
+ 0x0000000a,
+ 0x0000000c,
+ 0x0000000e,
+ 0x00000010,
+ 0x00000012,
+ 0x00000014,
+ 0x00000016,
+ 0x00000018,
+ 0x0000001a,
+ 0x0000001c,
+ 0x0000001e,
+ 0x00000020,
+ 0x00000022,
+ 0x00000024,
+ 0x00000026,
+ 0x00000028,
+ 0x0000002a,
+ 0x0000002c,
+ 0x0000002e,
+ 0x00000030,
+ 0x00000032,
+ },
+ .zlx_nr = IPU8_PS_ZLX_DATA_RD_NUM,
+ .zlx_axi_pool = {
+ 0x00000f30,
+ },
+ .zlx_en = {
+ 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 0, 0,
+ 0, 0,
+ },
+ .zlx_conf = {
+ 0x6, 0x3, 0x3, 0x6,
+ 0x2, 0x2, 0x6, 0x6,
+ 0x6, 0x3, 0x6, 0x3,
+ 0x3, 0x2, 0x2, 0x2,
+ 0x2, 0x2, 0x2, 0x6,
+ 0x6, 0x3, 0x0, 0x0,
+ 0x0, 0x0,
+ },
+ .uao_p_num = IPU8_PS_UAO_SRT_RD_PLANENUM,
+ .uao_p2tlb = {
+ 0x00000017,
+ 0x00000018,
+ 0x00000019,
+ 0x0000001a,
+ 0x0000001b,
+ 0x0000001c,
+ 0x0000001d,
+ 0x0000001e,
+ 0x0000001f,
+ 0x00000020,
+ 0x00000021,
+ 0x00000022,
+ 0x00000023,
+ 0x00000024,
+ 0x00000025,
+ 0x00000026,
+ 0x00000027,
+ 0x00000028,
+ 0x00000029,
+ 0x0000002a,
+ 0x0000002b,
+ 0x0000002c,
+ 0x0,
+ 0x0,
+ 0x0,
+ 0x0,
+ },
+ },
+ {
+ .name = "PS_DATA_WR",
+ .offset = IPU8_PS_MMU_SRT_WR_OFFSET,
+ .zlx_offset = IPU8_PS_ZLX_DATA_WR_OFFSET,
+ .uao_offset = IPU8_PS_UAO_SRT_WR_OFFSET,
+ .info_bits = 0x20003701,
+ .refill = 0x00002120,
+ .collapse_en_bitmap = 0x1,
+ .at_sp_arb_cfg = 0x1,
+ .l1_block = IPU8_PS_MMU_SRT_WR_L1_BLOCKNR_REG,
+ .l2_block = IPU8_PS_MMU_SRT_WR_L2_BLOCKNR_REG,
+ .nr_l1streams = IPU8_PS_MMU_SRT_WR_STREAM_NUM,
+ .nr_l2streams = IPU8_PS_MMU_SRT_WR_STREAM_NUM,
+ .l1_block_sz = {
+ 0x00000000,
+ 0x00000002,
+ 0x00000006,
+ 0x00000008,
+ 0x0000000a,
+ 0x0000000c,
+ 0x0000000e,
+ 0x00000010,
+ 0x00000012,
+ 0x00000014,
+ 0x00000016,
+ 0x00000018,
+ 0x0000001c,
+ 0x0000001e,
+ 0x00000022,
+ 0x00000024,
+ 0x00000028,
+ 0x0000002a,
+ 0x0000002e,
+ 0x00000030,
+ 0x00000032,
+ 0x00000036,
+ 0x00000038,
+ 0x0000003a,
+ 0x0000003a,
+ 0x0000003a,
+ },
+ .l2_block_sz = {
+ 0x00000000,
+ 0x00000002,
+ 0x00000004,
+ 0x00000006,
+ 0x00000008,
+ 0x0000000a,
+ 0x0000000c,
+ 0x0000000e,
+ 0x00000010,
+ 0x00000012,
+ 0x00000014,
+ 0x00000016,
+ 0x00000018,
+ 0x0000001a,
+ 0x0000001c,
+ 0x0000001e,
+ 0x00000020,
+ 0x00000022,
+ 0x00000024,
+ 0x00000026,
+ 0x00000028,
+ 0x0000002a,
+ 0x0000002c,
+ 0x0000002e,
+ 0x00000030,
+ 0x00000032,
+ },
+ .zlx_nr = IPU8_PS_ZLX_DATA_WR_NUM,
+ .zlx_axi_pool = {
+ 0x00000f50,
+ },
+ .zlx_en = {
+ 1, 1, 1, 0, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 0,
+ 0, 0,
+ },
+ .zlx_conf = {
+ 0x3,
+ 0x6,
+ 0x38000002,
+ 0x38000000,
+ 0x3,
+ 0x38000002,
+ 0x38000002,
+ 0x38000002,
+ 0x38000002,
+ 0x38000002,
+ 0x38000002,
+ 0x6,
+ 0x3,
+ 0x6,
+ 0x3,
+ 0x6,
+ 0x3,
+ 0x6,
+ 0x3,
+ 0x3,
+ 0x6,
+ 0x3,
+ 0x3,
+ 0x0,
+ 0x0,
+ 0x0,
+ },
+ .uao_p_num = IPU8_PS_UAO_SRT_WR_PLANENUM,
+ .uao_p2tlb = {
+ 0x00000000,
+ 0x00000001,
+ 0x00000002,
+ 0x00000003,
+ 0x00000004,
+ 0x00000005,
+ 0x00000006,
+ 0x00000007,
+ 0x00000008,
+ 0x00000009,
+ 0x0000000a,
+ 0x0000000b,
+ 0x0000000c,
+ 0x0000000d,
+ 0x0000000e,
+ 0x0000000f,
+ 0x00000010,
+ 0x00000011,
+ 0x00000012,
+ 0x00000013,
+ 0x00000014,
+ 0x00000015,
+ 0x00000016,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ },
+ },
+ },
+ .dmem_offset = IPU_PSYS_DMEM_OFFSET,
+ },
+};
+
+static const struct ipu_buttress_ctrl ipu7_isys_buttress_ctrl = {
+ .subsys_id = IPU_IS,
+ .ratio = IPU7_IS_FREQ_CTL_DEFAULT_RATIO,
+ .ratio_shift = IPU_FREQ_CTL_RATIO_SHIFT,
+ .cdyn = IPU_FREQ_CTL_CDYN,
+ .cdyn_shift = IPU_FREQ_CTL_CDYN_SHIFT,
+ .freq_ctl = BUTTRESS_REG_IS_WORKPOINT_REQ,
+ .pwr_sts_shift = IPU_BUTTRESS_PWR_STATE_IS_PWR_SHIFT,
+ .pwr_sts_mask = IPU_BUTTRESS_PWR_STATE_IS_PWR_MASK,
+ .pwr_sts_on = IPU_BUTTRESS_PWR_STATE_UP_DONE,
+ .pwr_sts_off = IPU_BUTTRESS_PWR_STATE_DN_DONE,
+ .ovrd_clk = BUTTRESS_OVERRIDE_IS_CLK,
+ .own_clk_ack = BUTTRESS_OWN_ACK_IS_CLK,
+};
+
+static const struct ipu_buttress_ctrl ipu7_psys_buttress_ctrl = {
+ .subsys_id = IPU_PS,
+ .ratio = IPU7_PS_FREQ_CTL_DEFAULT_RATIO,
+ .ratio_shift = IPU_FREQ_CTL_RATIO_SHIFT,
+ .cdyn = IPU_FREQ_CTL_CDYN,
+ .cdyn_shift = IPU_FREQ_CTL_CDYN_SHIFT,
+ .freq_ctl = BUTTRESS_REG_PS_WORKPOINT_REQ,
+ .pwr_sts_shift = IPU_BUTTRESS_PWR_STATE_PS_PWR_SHIFT,
+ .pwr_sts_mask = IPU_BUTTRESS_PWR_STATE_PS_PWR_MASK,
+ .pwr_sts_on = IPU_BUTTRESS_PWR_STATE_UP_DONE,
+ .pwr_sts_off = IPU_BUTTRESS_PWR_STATE_DN_DONE,
+ .ovrd_clk = BUTTRESS_OVERRIDE_PS_CLK,
+ .own_clk_ack = BUTTRESS_OWN_ACK_PS_CLK,
+};
+
+static const struct ipu_buttress_ctrl ipu8_isys_buttress_ctrl = {
+ .subsys_id = IPU_IS,
+ .ratio = IPU8_IS_FREQ_CTL_DEFAULT_RATIO,
+ .ratio_shift = IPU_FREQ_CTL_RATIO_SHIFT,
+ .cdyn = IPU_FREQ_CTL_CDYN,
+ .cdyn_shift = IPU_FREQ_CTL_CDYN_SHIFT,
+ .freq_ctl = BUTTRESS_REG_IS_WORKPOINT_REQ,
+ .pwr_sts_shift = IPU_BUTTRESS_PWR_STATE_IS_PWR_SHIFT,
+ .pwr_sts_mask = IPU_BUTTRESS_PWR_STATE_IS_PWR_MASK,
+ .pwr_sts_on = IPU_BUTTRESS_PWR_STATE_UP_DONE,
+ .pwr_sts_off = IPU_BUTTRESS_PWR_STATE_DN_DONE,
+};
+
+static const struct ipu_buttress_ctrl ipu8_psys_buttress_ctrl = {
+ .subsys_id = IPU_PS,
+ .ratio = IPU8_PS_FREQ_CTL_DEFAULT_RATIO,
+ .ratio_shift = IPU_FREQ_CTL_RATIO_SHIFT,
+ .cdyn = IPU_FREQ_CTL_CDYN,
+ .cdyn_shift = IPU_FREQ_CTL_CDYN_SHIFT,
+ .freq_ctl = BUTTRESS_REG_PS_WORKPOINT_REQ,
+ .pwr_sts_shift = IPU_BUTTRESS_PWR_STATE_PS_PWR_SHIFT,
+ .pwr_sts_mask = IPU_BUTTRESS_PWR_STATE_PS_PWR_MASK,
+ .pwr_sts_on = IPU_BUTTRESS_PWR_STATE_UP_DONE,
+ .pwr_sts_off = IPU_BUTTRESS_PWR_STATE_DN_DONE,
+ .own_clk_ack = BUTTRESS_OWN_ACK_PS_PLL,
+};
+
+void ipu_internal_pdata_init(struct ipu_isys_internal_pdata *isys_ipdata,
+ struct ipu_psys_internal_pdata *psys_ipdata)
+{
+ isys_ipdata->csi2.nports = ARRAY_SIZE(ipu7_csi_offsets);
+ isys_ipdata->csi2.offsets = ipu7_csi_offsets;
+ isys_ipdata->num_parallel_streams = IPU7_ISYS_NUM_STREAMS;
+ psys_ipdata->hw_variant.spc_offset = IPU7_PSYS_SPC_OFFSET;
+}
+
+static int ipu7_isys_check_fwnode_graph(struct fwnode_handle *fwnode)
+{
+ struct fwnode_handle *endpoint;
+
+ if (IS_ERR_OR_NULL(fwnode))
+ return -EINVAL;
+
+ endpoint = fwnode_graph_get_next_endpoint(fwnode, NULL);
+ if (endpoint) {
+ fwnode_handle_put(endpoint);
+ return 0;
+ }
+
+ return ipu7_isys_check_fwnode_graph(fwnode->secondary);
+}
+
+static struct ipu7_bus_device *
+ipu7_isys_init(struct pci_dev *pdev, struct device *parent,
+ const struct ipu_buttress_ctrl *ctrl, void __iomem *base,
+ const struct ipu_isys_internal_pdata *ipdata,
+ unsigned int nr)
+{
+ struct fwnode_handle *fwnode = dev_fwnode(&pdev->dev);
+ struct ipu7_bus_device *isys_adev;
+ struct device *dev = &pdev->dev;
+ struct ipu7_isys_pdata *pdata;
+ int ret;
+
+ ret = ipu7_isys_check_fwnode_graph(fwnode);
+ if (ret) {
+ if (fwnode && !IS_ERR_OR_NULL(fwnode->secondary)) {
+ dev_err(dev,
+ "fwnode graph has no endpoints connection\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ ret = ipu_bridge_init(dev, ipu_bridge_parse_ssdb);
+ if (ret) {
+ dev_err_probe(dev, ret, "IPU bridge init failed\n");
+ return ERR_PTR(ret);
+ }
+ }
+
+ pdata = kzalloc(sizeof(*pdata), GFP_KERNEL);
+ if (!pdata)
+ return ERR_PTR(-ENOMEM);
+
+ pdata->base = base;
+ pdata->ipdata = ipdata;
+
+ isys_adev = ipu7_bus_initialize_device(pdev, parent, pdata, ctrl,
+ IPU_ISYS_NAME);
+ if (IS_ERR(isys_adev)) {
+ dev_err_probe(dev, PTR_ERR(isys_adev),
+ "ipu7_bus_initialize_device isys failed\n");
+ kfree(pdata);
+ return ERR_CAST(isys_adev);
+ }
+
+ isys_adev->mmu = ipu7_mmu_init(dev, base, ISYS_MMID,
+ &ipdata->hw_variant);
+ if (IS_ERR(isys_adev->mmu)) {
+ dev_err_probe(dev, PTR_ERR(isys_adev->mmu),
+ "ipu7_mmu_init(isys_adev->mmu) failed\n");
+ put_device(&isys_adev->auxdev.dev);
+ kfree(pdata);
+ return ERR_CAST(isys_adev->mmu);
+ }
+
+ isys_adev->mmu->dev = &isys_adev->auxdev.dev;
+ isys_adev->subsys = IPU_IS;
+
+ ret = ipu7_bus_add_device(isys_adev);
+ if (ret) {
+ kfree(pdata);
+ return ERR_PTR(ret);
+ }
+
+ return isys_adev;
+}
+
+static struct ipu7_bus_device *
+ipu7_psys_init(struct pci_dev *pdev, struct device *parent,
+ const struct ipu_buttress_ctrl *ctrl, void __iomem *base,
+ const struct ipu_psys_internal_pdata *ipdata, unsigned int nr)
+{
+ struct ipu7_bus_device *psys_adev;
+ struct ipu7_psys_pdata *pdata;
+ int ret;
+
+ pdata = kzalloc(sizeof(*pdata), GFP_KERNEL);
+ if (!pdata)
+ return ERR_PTR(-ENOMEM);
+
+ pdata->base = base;
+ pdata->ipdata = ipdata;
+
+ psys_adev = ipu7_bus_initialize_device(pdev, parent, pdata, ctrl,
+ IPU_PSYS_NAME);
+ if (IS_ERR(psys_adev)) {
+ dev_err_probe(&pdev->dev, PTR_ERR(psys_adev),
+ "ipu7_bus_initialize_device psys failed\n");
+ kfree(pdata);
+ return ERR_CAST(psys_adev);
+ }
+
+ psys_adev->mmu = ipu7_mmu_init(&pdev->dev, base, PSYS_MMID,
+ &ipdata->hw_variant);
+ if (IS_ERR(psys_adev->mmu)) {
+ dev_err_probe(&pdev->dev, PTR_ERR(psys_adev->mmu),
+ "ipu7_mmu_init(psys_adev->mmu) failed\n");
+ put_device(&psys_adev->auxdev.dev);
+ kfree(pdata);
+ return ERR_CAST(psys_adev->mmu);
+ }
+
+ psys_adev->mmu->dev = &psys_adev->auxdev.dev;
+ psys_adev->subsys = IPU_PS;
+
+ ret = ipu7_bus_add_device(psys_adev);
+ if (ret) {
+ kfree(pdata);
+ return ERR_PTR(ret);
+ }
+
+ return psys_adev;
+}
+
+static struct ia_gofo_msg_log_info_ts fw_error_log[IPU_SUBSYS_NUM];
+void ipu7_dump_fw_error_log(const struct ipu7_bus_device *adev)
+{
+ void __iomem *reg = adev->isp->base + ((adev->subsys == IPU_IS) ?
+ BUTTRESS_REG_FW_GP24 :
+ BUTTRESS_REG_FW_GP8);
+
+ memcpy_fromio(&fw_error_log[adev->subsys], reg,
+ sizeof(fw_error_log[adev->subsys]));
+}
+EXPORT_SYMBOL_NS_GPL(ipu7_dump_fw_error_log, "INTEL_IPU7");
+
+static void ipu7_pci_config_setup(struct pci_dev *dev)
+{
+ u16 pci_command;
+
+ pci_read_config_word(dev, PCI_COMMAND, &pci_command);
+ pci_command |= PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER;
+ pci_write_config_word(dev, PCI_COMMAND, pci_command);
+}
+
+static int ipu7_map_fw_code_region(struct ipu7_bus_device *sys,
+ void *data, size_t size)
+{
+ struct device *dev = &sys->auxdev.dev;
+ struct ipu7_bus_device *adev = to_ipu7_bus_device(dev);
+ struct sg_table *sgt = &sys->fw_sgt;
+ struct ipu7_device *isp = adev->isp;
+ struct pci_dev *pdev = isp->pdev;
+ unsigned long n_pages, i;
+ unsigned long attr = 0;
+ struct page **pages;
+ int ret;
+
+ n_pages = PFN_UP(size);
+
+ pages = kmalloc_array(n_pages, sizeof(*pages), GFP_KERNEL);
+ if (!pages)
+ return -ENOMEM;
+
+ for (i = 0; i < n_pages; i++) {
+ struct page *p = vmalloc_to_page(data);
+
+ if (!p) {
+ ret = -ENODEV;
+ goto out;
+ }
+
+ pages[i] = p;
+ data += PAGE_SIZE;
+ }
+
+ ret = sg_alloc_table_from_pages(sgt, pages, n_pages, 0, size,
+ GFP_KERNEL);
+ if (ret) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ if (!isp->secure_mode)
+ attr |= DMA_ATTR_RESERVE_REGION;
+
+ ret = dma_map_sgtable(&pdev->dev, sgt, DMA_BIDIRECTIONAL, 0);
+ if (ret < 0) {
+ dev_err(dev, "map fw code[%lu pages %u nents] failed\n",
+ n_pages, sgt->nents);
+ ret = -ENOMEM;
+ sg_free_table(sgt);
+ goto out;
+ }
+
+ ret = ipu7_dma_map_sgtable(sys, sgt, DMA_BIDIRECTIONAL, attr);
+ if (ret) {
+ dma_unmap_sgtable(&pdev->dev, sgt, DMA_BIDIRECTIONAL, 0);
+ sg_free_table(sgt);
+ goto out;
+ }
+
+ ipu7_dma_sync_sgtable(sys, sgt);
+
+ dev_dbg(dev, "fw code region mapped at 0x%pad entries %d\n",
+ &sgt->sgl->dma_address, sgt->nents);
+
+out:
+ kfree(pages);
+
+ return ret;
+}
+
+static void ipu7_unmap_fw_code_region(struct ipu7_bus_device *sys)
+{
+ struct pci_dev *pdev = sys->isp->pdev;
+ struct sg_table *sgt = &sys->fw_sgt;
+
+ ipu7_dma_unmap_sgtable(sys, sgt, DMA_BIDIRECTIONAL, 0);
+ dma_unmap_sgtable(&pdev->dev, sgt, DMA_BIDIRECTIONAL, 0);
+ sg_free_table(sgt);
+}
+
+static int ipu7_init_fw_code_region_by_sys(struct ipu7_bus_device *sys,
+ const char *sys_name)
+{
+ struct device *dev = &sys->auxdev.dev;
+ struct ipu7_device *isp = sys->isp;
+ int ret;
+
+ /* Copy FW binaries to specific location. */
+ ret = ipu7_cpd_copy_binary(isp->cpd_fw->data, sys_name,
+ isp->fw_code_region, &sys->fw_entry);
+ if (ret) {
+ dev_err(dev, "%s binary not found.\n", sys_name);
+ return ret;
+ }
+
+ ret = pm_runtime_get_sync(dev);
+ if (ret < 0) {
+ dev_err(dev, "Failed to get runtime PM\n");
+ return ret;
+ }
+
+ ret = ipu7_mmu_hw_init(sys->mmu);
+ if (ret) {
+ dev_err(dev, "Failed to set mmu hw\n");
+ pm_runtime_put(dev);
+ return ret;
+ }
+
+ /* Map code region. */
+ ret = ipu7_map_fw_code_region(sys, isp->fw_code_region,
+ IPU_FW_CODE_REGION_SIZE);
+ if (ret)
+ dev_err(dev, "Failed to map fw code region for %s.\n",
+ sys_name);
+
+ ipu7_mmu_hw_cleanup(sys->mmu);
+ pm_runtime_put(dev);
+
+ return ret;
+}
+
+static int ipu7_init_fw_code_region(struct ipu7_device *isp)
+{
+ int ret;
+
+ /*
+ * Allocate and map memory for FW execution.
+ * Not required in secure mode, in which FW runs in IMR.
+ */
+ isp->fw_code_region = vmalloc(IPU_FW_CODE_REGION_SIZE);
+ if (!isp->fw_code_region)
+ return -ENOMEM;
+
+ ret = ipu7_init_fw_code_region_by_sys(isp->isys, "isys");
+ if (ret)
+ goto fail_init;
+
+ ret = ipu7_init_fw_code_region_by_sys(isp->psys, "psys");
+ if (ret)
+ goto fail_init;
+
+ return 0;
+
+fail_init:
+ vfree(isp->fw_code_region);
+
+ return ret;
+}
+
+static int ipu7_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct ipu_buttress_ctrl *isys_ctrl = NULL, *psys_ctrl = NULL;
+ struct fwnode_handle *fwnode = dev_fwnode(&pdev->dev);
+ const struct ipu_buttress_ctrl *isys_buttress_ctrl;
+ const struct ipu_buttress_ctrl *psys_buttress_ctrl;
+ struct ipu_isys_internal_pdata *isys_ipdata;
+ struct ipu_psys_internal_pdata *psys_ipdata;
+ unsigned int dma_mask = IPU_DMA_MASK;
+ struct device *dev = &pdev->dev;
+ void __iomem *isys_base = NULL;
+ void __iomem *psys_base = NULL;
+ phys_addr_t phys, pb_phys;
+ struct ipu7_device *isp;
+ u32 is_es;
+ int ret;
+
+ if (!fwnode || fwnode_property_read_u32(fwnode, "is_es", &is_es))
+ is_es = 0;
+
+ isp = devm_kzalloc(dev, sizeof(*isp), GFP_KERNEL);
+ if (!isp)
+ return -ENOMEM;
+
+ isp->pdev = pdev;
+ INIT_LIST_HEAD(&isp->devices);
+
+ ret = pcim_enable_device(pdev);
+ if (ret)
+ return dev_err_probe(dev, ret, "Enable PCI device failed\n");
+
+ dev_info(dev, "Device 0x%x (rev: 0x%x)\n",
+ pdev->device, pdev->revision);
+
+ phys = pci_resource_start(pdev, IPU_PCI_BAR);
+ pb_phys = pci_resource_start(pdev, IPU_PCI_PBBAR);
+ dev_info(dev, "IPU7 PCI BAR0 base %pap BAR2 base %pap\n",
+ &phys, &pb_phys);
+
+ isp->base = pcim_iomap_region(pdev, IPU_PCI_BAR, IPU_NAME);
+ if (IS_ERR(isp->base))
+ return dev_err_probe(dev, PTR_ERR(isp->base),
+ "Failed to I/O memory remapping bar %u\n",
+ IPU_PCI_BAR);
+
+ isp->pb_base = pcim_iomap_region(pdev, IPU_PCI_PBBAR, IPU_NAME);
+ if (IS_ERR(isp->pb_base))
+ return dev_err_probe(dev, PTR_ERR(isp->pb_base),
+ "Failed to I/O memory remapping bar %u\n",
+ IPU_PCI_PBBAR);
+
+ dev_info(dev, "IPU7 PCI BAR0 mapped at %p\n BAR2 mapped at %p\n",
+ isp->base, isp->pb_base);
+
+ pci_set_drvdata(pdev, isp);
+ pci_set_master(pdev);
+
+ switch (id->device) {
+ case IPU7_PCI_ID:
+ isp->hw_ver = IPU_VER_7;
+ isp->cpd_fw_name = IPU7_FIRMWARE_NAME;
+ isys_ipdata = &ipu7_isys_ipdata;
+ psys_ipdata = &ipu7_psys_ipdata;
+ isys_buttress_ctrl = &ipu7_isys_buttress_ctrl;
+ psys_buttress_ctrl = &ipu7_psys_buttress_ctrl;
+ break;
+ case IPU7P5_PCI_ID:
+ isp->hw_ver = IPU_VER_7P5;
+ isp->cpd_fw_name = IPU7P5_FIRMWARE_NAME;
+ isys_ipdata = &ipu7p5_isys_ipdata;
+ psys_ipdata = &ipu7p5_psys_ipdata;
+ isys_buttress_ctrl = &ipu7_isys_buttress_ctrl;
+ psys_buttress_ctrl = &ipu7_psys_buttress_ctrl;
+ break;
+ case IPU8_PCI_ID:
+ isp->hw_ver = IPU_VER_8;
+ isp->cpd_fw_name = IPU8_FIRMWARE_NAME;
+ isys_ipdata = &ipu8_isys_ipdata;
+ psys_ipdata = &ipu8_psys_ipdata;
+ isys_buttress_ctrl = &ipu8_isys_buttress_ctrl;
+ psys_buttress_ctrl = &ipu8_psys_buttress_ctrl;
+ break;
+ default:
+ WARN(1, "Unsupported IPU device");
+ return -ENODEV;
+ }
+
+ ipu_internal_pdata_init(isys_ipdata, psys_ipdata);
+
+ isys_base = isp->base + isys_ipdata->hw_variant.offset;
+ psys_base = isp->base + psys_ipdata->hw_variant.offset;
+
+ ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(dma_mask));
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to set DMA mask\n");
+
+ dma_set_max_seg_size(dev, UINT_MAX);
+
+ ipu7_pci_config_setup(pdev);
+
+ ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES);
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "Failed to alloc irq vector\n");
+
+ ret = ipu_buttress_init(isp);
+ if (ret)
+ goto pci_irq_free;
+
+ dev_info(dev, "firmware cpd file: %s\n", isp->cpd_fw_name);
+
+ ret = request_firmware(&isp->cpd_fw, isp->cpd_fw_name, dev);
+ if (ret) {
+ dev_err_probe(dev, ret,
+ "Requesting signed firmware %s failed\n",
+ isp->cpd_fw_name);
+ goto buttress_exit;
+ }
+
+ ret = ipu7_cpd_validate_cpd_file(isp, isp->cpd_fw->data,
+ isp->cpd_fw->size);
+ if (ret) {
+ dev_err_probe(dev, ret, "Failed to validate cpd\n");
+ goto out_ipu_bus_del_devices;
+ }
+
+ isys_ctrl = devm_kmemdup(dev, isys_buttress_ctrl,
+ sizeof(*isys_buttress_ctrl), GFP_KERNEL);
+ if (!isys_ctrl) {
+ ret = -ENOMEM;
+ goto out_ipu_bus_del_devices;
+ }
+
+ isp->isys = ipu7_isys_init(pdev, dev, isys_ctrl, isys_base,
+ isys_ipdata, 0);
+ if (IS_ERR(isp->isys)) {
+ ret = PTR_ERR(isp->isys);
+ goto out_ipu_bus_del_devices;
+ }
+
+ psys_ctrl = devm_kmemdup(dev, psys_buttress_ctrl,
+ sizeof(*psys_buttress_ctrl), GFP_KERNEL);
+ if (!psys_ctrl) {
+ ret = -ENOMEM;
+ goto out_ipu_bus_del_devices;
+ }
+
+ isp->psys = ipu7_psys_init(pdev, &isp->isys->auxdev.dev,
+ psys_ctrl, psys_base,
+ psys_ipdata, 0);
+ if (IS_ERR(isp->psys)) {
+ ret = PTR_ERR(isp->psys);
+ goto out_ipu_bus_del_devices;
+ }
+
+ ret = devm_request_threaded_irq(dev, pdev->irq,
+ ipu_buttress_isr,
+ ipu_buttress_isr_threaded,
+ IRQF_SHARED, IPU_NAME, isp);
+ if (ret)
+ goto out_ipu_bus_del_devices;
+
+ if (!isp->secure_mode) {
+ ret = ipu7_init_fw_code_region(isp);
+ if (ret)
+ goto out_ipu_bus_del_devices;
+ } else {
+ ret = pm_runtime_get_sync(&isp->psys->auxdev.dev);
+ if (ret < 0) {
+ dev_err(&isp->psys->auxdev.dev,
+ "Failed to get runtime PM\n");
+ goto out_ipu_bus_del_devices;
+ }
+
+ ret = ipu7_mmu_hw_init(isp->psys->mmu);
+ if (ret) {
+ dev_err_probe(&isp->pdev->dev, ret,
+ "Failed to init MMU hardware\n");
+ goto out_ipu_bus_del_devices;
+ }
+
+ ret = ipu7_map_fw_code_region(isp->psys,
+ (void *)isp->cpd_fw->data,
+ isp->cpd_fw->size);
+ if (ret) {
+ dev_err_probe(&isp->pdev->dev, ret,
+ "failed to map fw image\n");
+ goto out_ipu_bus_del_devices;
+ }
+
+ ret = ipu_buttress_authenticate(isp);
+ if (ret) {
+ dev_err_probe(&isp->pdev->dev, ret,
+ "FW authentication failed\n");
+ goto out_ipu_bus_del_devices;
+ }
+
+ ipu7_mmu_hw_cleanup(isp->psys->mmu);
+ pm_runtime_put(&isp->psys->auxdev.dev);
+ }
+
+ pm_runtime_put_noidle(dev);
+ pm_runtime_allow(dev);
+
+ isp->ipu7_bus_ready_to_probe = true;
+
+ return 0;
+
+out_ipu_bus_del_devices:
+ if (!IS_ERR_OR_NULL(isp->isys) && isp->isys->fw_sgt.nents)
+ ipu7_unmap_fw_code_region(isp->isys);
+ if (!IS_ERR_OR_NULL(isp->psys) && isp->psys->fw_sgt.nents)
+ ipu7_unmap_fw_code_region(isp->psys);
+ if (!IS_ERR_OR_NULL(isp->psys) && !IS_ERR_OR_NULL(isp->psys->mmu))
+ ipu7_mmu_cleanup(isp->psys->mmu);
+ if (!IS_ERR_OR_NULL(isp->isys) && !IS_ERR_OR_NULL(isp->isys->mmu))
+ ipu7_mmu_cleanup(isp->isys->mmu);
+ if (!IS_ERR_OR_NULL(isp->psys))
+ pm_runtime_put(&isp->psys->auxdev.dev);
+ ipu7_bus_del_devices(pdev);
+ release_firmware(isp->cpd_fw);
+buttress_exit:
+ ipu_buttress_exit(isp);
+pci_irq_free:
+ pci_free_irq_vectors(pdev);
+
+ return ret;
+}
+
+static void ipu7_pci_remove(struct pci_dev *pdev)
+{
+ struct ipu7_device *isp = pci_get_drvdata(pdev);
+
+ if (!IS_ERR_OR_NULL(isp->isys) && isp->isys->fw_sgt.nents)
+ ipu7_unmap_fw_code_region(isp->isys);
+ if (!IS_ERR_OR_NULL(isp->psys) && isp->psys->fw_sgt.nents)
+ ipu7_unmap_fw_code_region(isp->psys);
+
+ if (!IS_ERR_OR_NULL(isp->fw_code_region))
+ vfree(isp->fw_code_region);
+
+ ipu7_mmu_cleanup(isp->isys->mmu);
+ ipu7_mmu_cleanup(isp->psys->mmu);
+
+ ipu7_bus_del_devices(pdev);
+
+ pm_runtime_forbid(&pdev->dev);
+ pm_runtime_get_noresume(&pdev->dev);
+
+ ipu_buttress_exit(isp);
+
+ release_firmware(isp->cpd_fw);
+}
+
+static void ipu7_pci_reset_prepare(struct pci_dev *pdev)
+{
+ struct ipu7_device *isp = pci_get_drvdata(pdev);
+
+ dev_warn(&pdev->dev, "FLR prepare\n");
+ pm_runtime_forbid(&isp->pdev->dev);
+}
+
+static void ipu7_pci_reset_done(struct pci_dev *pdev)
+{
+ struct ipu7_device *isp = pci_get_drvdata(pdev);
+
+ ipu_buttress_restore(isp);
+ if (isp->secure_mode)
+ ipu_buttress_reset_authentication(isp);
+
+ isp->ipc_reinit = true;
+ pm_runtime_allow(&isp->pdev->dev);
+
+ dev_warn(&pdev->dev, "FLR completed\n");
+}
+
+/*
+ * PCI base driver code requires driver to provide these to enable
+ * PCI device level PM state transitions (D0<->D3)
+ */
+static int ipu7_suspend(struct device *dev)
+{
+ return 0;
+}
+
+static int ipu7_resume(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct ipu7_device *isp = pci_get_drvdata(pdev);
+ struct ipu_buttress *b = &isp->buttress;
+ int ret;
+
+ isp->secure_mode = ipu_buttress_get_secure_mode(isp);
+ dev_info(dev, "IPU7 in %s mode\n",
+ isp->secure_mode ? "secure" : "non-secure");
+
+ ipu_buttress_restore(isp);
+
+ ret = ipu_buttress_ipc_reset(isp, &b->cse);
+ if (ret)
+ dev_err(dev, "IPC reset protocol failed!\n");
+
+ ret = pm_runtime_get_sync(&isp->psys->auxdev.dev);
+ if (ret < 0) {
+ dev_err(dev, "Failed to get runtime PM\n");
+ return 0;
+ }
+
+ ret = ipu_buttress_authenticate(isp);
+ if (ret)
+ dev_err(dev, "FW authentication failed(%d)\n", ret);
+
+ pm_runtime_put(&isp->psys->auxdev.dev);
+
+ return 0;
+}
+
+static int ipu7_runtime_resume(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct ipu7_device *isp = pci_get_drvdata(pdev);
+ int ret;
+
+ ipu_buttress_restore(isp);
+
+ if (isp->ipc_reinit) {
+ struct ipu_buttress *b = &isp->buttress;
+
+ isp->ipc_reinit = false;
+ ret = ipu_buttress_ipc_reset(isp, &b->cse);
+ if (ret)
+ dev_err(dev, "IPC reset protocol failed!\n");
+ }
+
+ return 0;
+}
+
+static const struct dev_pm_ops ipu7_pm_ops = {
+ SYSTEM_SLEEP_PM_OPS(&ipu7_suspend, &ipu7_resume)
+ RUNTIME_PM_OPS(&ipu7_suspend, &ipu7_runtime_resume, NULL)
+};
+
+static const struct pci_device_id ipu7_pci_tbl[] = {
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, IPU7_PCI_ID)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, IPU7P5_PCI_ID)},
+ {0,}
+};
+MODULE_DEVICE_TABLE(pci, ipu7_pci_tbl);
+
+static const struct pci_error_handlers pci_err_handlers = {
+ .reset_prepare = ipu7_pci_reset_prepare,
+ .reset_done = ipu7_pci_reset_done,
+};
+
+static struct pci_driver ipu7_pci_driver = {
+ .name = IPU_NAME,
+ .id_table = ipu7_pci_tbl,
+ .probe = ipu7_pci_probe,
+ .remove = ipu7_pci_remove,
+ .driver = {
+ .pm = &ipu7_pm_ops,
+ },
+ .err_handler = &pci_err_handlers,
+};
+
+module_pci_driver(ipu7_pci_driver);
+
+MODULE_IMPORT_NS("INTEL_IPU_BRIDGE");
+MODULE_AUTHOR("Bingbu Cao <bingbu.cao@intel.com>");
+MODULE_AUTHOR("Tianshu Qiu <tian.shu.qiu@intel.com>");
+MODULE_AUTHOR("Qingwu Zhang <qingwu.zhang@intel.com>");
+MODULE_AUTHOR("Intel");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Intel ipu7 pci driver");
diff --git a/drivers/staging/media/ipu7/ipu7.h b/drivers/staging/media/ipu7/ipu7.h
new file mode 100644
index 000000000000..ac8ac0689468
--- /dev/null
+++ b/drivers/staging/media/ipu7/ipu7.h
@@ -0,0 +1,242 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2013 - 2025 Intel Corporation
+ */
+
+#ifndef IPU7_H
+#define IPU7_H
+
+#include <linux/list.h>
+#include <linux/pci.h>
+#include <linux/types.h>
+
+#include "ipu7-buttress.h"
+
+struct ipu7_bus_device;
+struct pci_dev;
+struct firmware;
+
+#define IPU_NAME "intel-ipu7"
+#define IPU_MEDIA_DEV_MODEL_NAME "ipu7"
+
+#define IPU7_FIRMWARE_NAME "intel/ipu/ipu7_fw.bin"
+#define IPU7P5_FIRMWARE_NAME "intel/ipu/ipu7ptl_fw.bin"
+#define IPU8_FIRMWARE_NAME "intel/ipu/ipu8_fw.bin"
+
+#define IPU7_ISYS_NUM_STREAMS 12
+
+#define IPU7_PCI_ID 0x645d
+#define IPU7P5_PCI_ID 0xb05d
+#define IPU8_PCI_ID 0xd719
+
+#define FW_LOG_BUF_SIZE (2 * 1024 * 1024)
+
+enum ipu_version {
+ IPU_VER_INVALID = 0,
+ IPU_VER_7 = 1,
+ IPU_VER_7P5 = 2,
+ IPU_VER_8 = 3,
+};
+
+static inline bool is_ipu7p5(u8 hw_ver)
+{
+ return hw_ver == IPU_VER_7P5;
+}
+
+static inline bool is_ipu7(u8 hw_ver)
+{
+ return hw_ver == IPU_VER_7;
+}
+
+static inline bool is_ipu8(u8 hw_ver)
+{
+ return hw_ver == IPU_VER_8;
+}
+
+#define IPU_UNIFIED_OFFSET 0
+
+/*
+ * ISYS DMA can overshoot. For higher resolutions over allocation is one line
+ * but it must be at minimum 1024 bytes. Value could be different in
+ * different versions / generations thus provide it via platform data.
+ */
+#define IPU_ISYS_OVERALLOC_MIN 1024
+
+#define IPU_FW_CODE_REGION_SIZE 0x1000000 /* 16MB */
+#define IPU_FW_CODE_REGION_START 0x4000000 /* 64MB */
+#define IPU_FW_CODE_REGION_END (IPU_FW_CODE_REGION_START + \
+ IPU_FW_CODE_REGION_SIZE) /* 80MB */
+
+struct ipu7_device {
+ struct pci_dev *pdev;
+ struct list_head devices;
+ struct ipu7_bus_device *isys;
+ struct ipu7_bus_device *psys;
+ struct ipu_buttress buttress;
+
+ const struct firmware *cpd_fw;
+ const char *cpd_fw_name;
+ /* Only for non-secure mode. */
+ void *fw_code_region;
+
+ void __iomem *base;
+ void __iomem *pb_base;
+ u8 hw_ver;
+ bool ipc_reinit;
+ bool secure_mode;
+ bool ipu7_bus_ready_to_probe;
+};
+
+#define IPU_DMA_MASK 39
+#define IPU_LIB_CALL_TIMEOUT_MS 2000
+#define IPU_PSYS_CMD_TIMEOUT_MS 2000
+#define IPU_PSYS_OPEN_CLOSE_TIMEOUT_US 50
+#define IPU_PSYS_OPEN_CLOSE_RETRY (10000 / IPU_PSYS_OPEN_CLOSE_TIMEOUT_US)
+
+#define IPU_ISYS_NAME "isys"
+#define IPU_PSYS_NAME "psys"
+
+#define IPU_MMU_ADDR_BITS 32
+/* FW is accessible within the first 2 GiB only in non-secure mode. */
+#define IPU_MMU_ADDR_BITS_NON_SECURE 31
+
+#define IPU7_IS_MMU_NUM 4U
+#define IPU7_PS_MMU_NUM 4U
+#define IPU7P5_IS_MMU_NUM 4U
+#define IPU7P5_PS_MMU_NUM 4U
+#define IPU8_IS_MMU_NUM 5U
+#define IPU8_PS_MMU_NUM 4U
+#define IPU_MMU_MAX_NUM 5U /* max(IS, PS) */
+#define IPU_MMU_MAX_TLB_L1_STREAMS 40U
+#define IPU_MMU_MAX_TLB_L2_STREAMS 40U
+#define IPU_ZLX_MAX_NUM 32U
+#define IPU_ZLX_POOL_NUM 8U
+#define IPU_UAO_PLANE_MAX_NUM 64U
+
+/*
+ * To maximize the IOSF utlization, IPU need to send requests in bursts.
+ * At the DMA interface with the buttress, there are CDC FIFOs with burst
+ * collection capability. CDC FIFO burst collectors have a configurable
+ * threshold and is configured based on the outcome of performance measurements.
+ *
+ * isys has 3 ports with IOSF interface for VC0, VC1 and VC2
+ * psys has 4 ports with IOSF interface for VC0, VC1w, VC1r and VC2
+ *
+ * Threshold values are pre-defined and are arrived at after performance
+ * evaluations on a type of IPU
+ */
+#define IPU_MAX_VC_IOSF_PORTS 4
+
+/*
+ * IPU must configure correct arbitration mechanism related to the IOSF VC
+ * requests. There are two options per VC0 and VC1 - > 0 means rearbitrate on
+ * stall and 1 means stall until the request is completed.
+ */
+#define IPU_BTRS_ARB_MODE_TYPE_REARB 0
+#define IPU_BTRS_ARB_MODE_TYPE_STALL 1
+
+/* Currently chosen arbitration mechanism for VC0 */
+#define IPU_BTRS_ARB_STALL_MODE_VC0 IPU_BTRS_ARB_MODE_TYPE_REARB
+
+/* Currently chosen arbitration mechanism for VC1 */
+#define IPU_BTRS_ARB_STALL_MODE_VC1 IPU_BTRS_ARB_MODE_TYPE_REARB
+
+/* One L2 entry maps 1024 L1 entries and one L1 entry per page */
+#define IPU_MMUV2_L2_RANGE (1024 * PAGE_SIZE)
+/* Max L2 blocks per stream */
+#define IPU_MMUV2_MAX_L2_BLOCKS 2
+/* Max L1 blocks per stream */
+#define IPU_MMUV2_MAX_L1_BLOCKS 16
+#define IPU_MMUV2_TRASH_RANGE (IPU_MMUV2_L2_RANGE * \
+ IPU_MMUV2_MAX_L2_BLOCKS)
+/* Entries per L1 block */
+#define MMUV2_ENTRIES_PER_L1_BLOCK 16
+#define MMUV2_TRASH_L1_BLOCK_OFFSET (MMUV2_ENTRIES_PER_L1_BLOCK * PAGE_SIZE)
+#define MMUV2_TRASH_L2_BLOCK_OFFSET IPU_MMUV2_L2_RANGE
+
+struct ipu7_mmu_hw {
+ char name[32];
+
+ void __iomem *base;
+ void __iomem *zlx_base;
+ void __iomem *uao_base;
+
+ u32 offset;
+ u32 zlx_offset;
+ u32 uao_offset;
+
+ u32 info_bits;
+ u32 refill;
+ u32 collapse_en_bitmap;
+ u32 at_sp_arb_cfg;
+
+ u32 l1_block;
+ u32 l2_block;
+
+ u8 nr_l1streams;
+ u8 nr_l2streams;
+ u32 l1_block_sz[IPU_MMU_MAX_TLB_L1_STREAMS];
+ u32 l2_block_sz[IPU_MMU_MAX_TLB_L2_STREAMS];
+
+ u8 zlx_nr;
+ u32 zlx_axi_pool[IPU_ZLX_POOL_NUM];
+ u32 zlx_en[IPU_ZLX_MAX_NUM];
+ u32 zlx_conf[IPU_ZLX_MAX_NUM];
+
+ u32 uao_p_num;
+ u32 uao_p2tlb[IPU_UAO_PLANE_MAX_NUM];
+};
+
+struct ipu7_mmu_pdata {
+ u32 nr_mmus;
+ struct ipu7_mmu_hw mmu_hw[IPU_MMU_MAX_NUM];
+ int mmid;
+};
+
+struct ipu7_isys_csi2_pdata {
+ void __iomem *base;
+};
+
+struct ipu7_isys_internal_csi2_pdata {
+ u32 nports;
+ u32 const *offsets;
+ u32 gpreg;
+};
+
+struct ipu7_hw_variants {
+ unsigned long offset;
+ u32 nr_mmus;
+ struct ipu7_mmu_hw mmu_hw[IPU_MMU_MAX_NUM];
+ u8 cdc_fifos;
+ u8 cdc_fifo_threshold[IPU_MAX_VC_IOSF_PORTS];
+ u32 dmem_offset;
+ u32 spc_offset; /* SPC offset from psys base */
+};
+
+struct ipu_isys_internal_pdata {
+ struct ipu7_isys_internal_csi2_pdata csi2;
+ struct ipu7_hw_variants hw_variant;
+ u32 num_parallel_streams;
+ u32 isys_dma_overshoot;
+};
+
+struct ipu7_isys_pdata {
+ void __iomem *base;
+ const struct ipu_isys_internal_pdata *ipdata;
+};
+
+struct ipu_psys_internal_pdata {
+ struct ipu7_hw_variants hw_variant;
+};
+
+struct ipu7_psys_pdata {
+ void __iomem *base;
+ const struct ipu_psys_internal_pdata *ipdata;
+};
+
+int request_cpd_fw(const struct firmware **firmware_p, const char *name,
+ struct device *device);
+void ipu_internal_pdata_init(struct ipu_isys_internal_pdata *isys_ipdata,
+ struct ipu_psys_internal_pdata *psys_ipdata);
+void ipu7_dump_fw_error_log(const struct ipu7_bus_device *adev);
+#endif /* IPU7_H */
diff --git a/drivers/staging/media/lirc/Kconfig b/drivers/staging/media/lirc/Kconfig
deleted file mode 100644
index 6879c4651b46..000000000000
--- a/drivers/staging/media/lirc/Kconfig
+++ /dev/null
@@ -1,66 +0,0 @@
-#
-# LIRC driver(s) configuration
-#
-menuconfig LIRC_STAGING
- bool "Linux Infrared Remote Control IR receiver/transmitter drivers"
- depends on LIRC
- help
- Say Y here, and all supported Linux Infrared Remote Control IR and
- RF receiver and transmitter drivers will be displayed. When paired
- with a remote control and the lirc daemon, the receiver drivers
- allow control of your Linux system via remote control.
-
-if LIRC_STAGING
-
-config LIRC_BT829
- tristate "BT829 based hardware"
- depends on LIRC && PCI
- help
- Driver for the IR interface on BT829-based hardware
-
-config LIRC_IMON
- tristate "Legacy SoundGraph iMON Receiver and Display"
- depends on LIRC && USB
- help
- Driver for the original SoundGraph iMON IR Receiver and Display
-
- Current generation iMON devices use the input layer imon driver.
-
-config LIRC_PARALLEL
- tristate "Homebrew Parallel Port Receiver"
- depends on LIRC && PARPORT
- help
- Driver for Homebrew Parallel Port Receivers
-
-config LIRC_SASEM
- tristate "Sasem USB IR Remote"
- depends on LIRC && USB
- help
- Driver for the Sasem OnAir Remocon-V or Dign HV5 HTPC IR/VFD Module
-
-config LIRC_SERIAL
- tristate "Homebrew Serial Port Receiver"
- depends on LIRC
- help
- Driver for Homebrew Serial Port Receivers
-
-config LIRC_SERIAL_TRANSMITTER
- bool "Serial Port Transmitter"
- default y
- depends on LIRC_SERIAL
- help
- Serial Port Transmitter support
-
-config LIRC_SIR
- tristate "Built-in SIR IrDA port"
- depends on LIRC
- help
- Driver for the SIR IrDA port
-
-config LIRC_ZILOG
- tristate "Zilog/Hauppauge IR Transmitter"
- depends on LIRC && I2C
- help
- Driver for the Zilog/Hauppauge IR Transmitter, found on
- PVR-150/500, HVR-1200/1250/1700/1800, HD-PVR and other cards
-endif
diff --git a/drivers/staging/media/lirc/Makefile b/drivers/staging/media/lirc/Makefile
deleted file mode 100644
index 5430adf0475d..000000000000
--- a/drivers/staging/media/lirc/Makefile
+++ /dev/null
@@ -1,12 +0,0 @@
-# Makefile for the lirc drivers.
-#
-
-# Each configuration option enables a list of files.
-
-obj-$(CONFIG_LIRC_BT829) += lirc_bt829.o
-obj-$(CONFIG_LIRC_IMON) += lirc_imon.o
-obj-$(CONFIG_LIRC_PARALLEL) += lirc_parallel.o
-obj-$(CONFIG_LIRC_SASEM) += lirc_sasem.o
-obj-$(CONFIG_LIRC_SERIAL) += lirc_serial.o
-obj-$(CONFIG_LIRC_SIR) += lirc_sir.o
-obj-$(CONFIG_LIRC_ZILOG) += lirc_zilog.o
diff --git a/drivers/staging/media/lirc/TODO b/drivers/staging/media/lirc/TODO
deleted file mode 100644
index cbea5d84fed3..000000000000
--- a/drivers/staging/media/lirc/TODO
+++ /dev/null
@@ -1,13 +0,0 @@
-- All drivers should either be ported to ir-core, or dropped entirely
- (see drivers/media/IR/mceusb.c vs. lirc_mceusb.c in lirc cvs for an
- example of a previously completed port).
-
-- lirc_bt829 uses registers on a Mach64 VT, which has a separate kernel
- framebuffer driver (atyfb) and userland X driver (mach64). It can't
- simply be converted to a normal PCI driver, but ideally it should be
- coordinated with the other drivers.
-
-Please send patches to:
-Jarod Wilson <jarod@wilsonet.com>
-Greg Kroah-Hartman <greg@kroah.com>
-
diff --git a/drivers/staging/media/lirc/TODO.lirc_zilog b/drivers/staging/media/lirc/TODO.lirc_zilog
deleted file mode 100644
index a97800a8e127..000000000000
--- a/drivers/staging/media/lirc/TODO.lirc_zilog
+++ /dev/null
@@ -1,36 +0,0 @@
-1. Both ir-kbd-i2c and lirc_zilog provide support for RX events for
-the chips supported by lirc_zilog. Before moving lirc_zilog out of staging:
-
-a. ir-kbd-i2c needs a module parameter added to allow the user to tell
- ir-kbd-i2c to ignore Z8 IR units.
-
-b. lirc_zilog should provide Rx key presses to the rc core like ir-kbd-i2c
- does.
-
-
-2. lirc_zilog module ref-counting need examination. It has not been
-verified that cdev and lirc_dev will take the proper module references on
-lirc_zilog to prevent removal of lirc_zilog when the /dev/lircN device node
-is open.
-
-(The good news is ref-counting of lirc_zilog internal structures appears to be
-complete. Testing has shown the cx18 module can be unloaded out from under
-irw + lircd + lirc_dev, with the /dev/lirc0 device node open, with no adverse
-effects. The cx18 module could then be reloaded and irw properly began
-receiving button presses again and ir_send worked without error.)
-
-
-3. Bridge drivers, if able, should provide a chip reset() callback
-to lirc_zilog via struct IR_i2c_init_data. cx18 and ivtv already have routines
-to perform Z8 chip resets via GPIO manipulations. This would allow lirc_zilog
-to bring the chip back to normal when it hangs, in the same places the
-original lirc_pvr150 driver code does. This is not strictly needed, so it
-is not required to move lirc_zilog out of staging.
-
-Note: Both lirc_zilog and ir-kbd-i2c support the Zilog Z8 for IR, as programmed
-and installed on Hauppauge products. When working on either module, developers
-must consider at least the following bridge drivers which mention an IR Rx unit
-at address 0x71 (indicative of a Z8):
-
- ivtv cx18 hdpvr pvrusb2 bt8xx cx88 saa7134
-
diff --git a/drivers/staging/media/lirc/lirc_bt829.c b/drivers/staging/media/lirc/lirc_bt829.c
deleted file mode 100644
index 44f565547179..000000000000
--- a/drivers/staging/media/lirc/lirc_bt829.c
+++ /dev/null
@@ -1,404 +0,0 @@
-/*
- * Remote control driver for the TV-card based on bt829
- *
- * by Leonid Froenchenko <lfroen@galileo.co.il>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-*/
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/threads.h>
-#include <linux/sched.h>
-#include <linux/ioport.h>
-#include <linux/pci.h>
-#include <linux/delay.h>
-
-#include <media/lirc_dev.h>
-
-static int poll_main(void);
-static int atir_init_start(void);
-
-static void write_index(unsigned char index, unsigned int value);
-static unsigned int read_index(unsigned char index);
-
-static void do_i2c_start(void);
-static void do_i2c_stop(void);
-
-static void seems_wr_byte(unsigned char al);
-static unsigned char seems_rd_byte(void);
-
-static unsigned int read_index(unsigned char al);
-static void write_index(unsigned char ah, unsigned int edx);
-
-static void cycle_delay(int cycle);
-
-static void do_set_bits(unsigned char bl);
-static unsigned char do_get_bits(void);
-
-#define DATA_PCI_OFF 0x7FFC00
-#define WAIT_CYCLE 20
-
-#define DRIVER_NAME "lirc_bt829"
-
-static bool debug;
-
-static int atir_minor;
-static phys_addr_t pci_addr_phys;
-static unsigned char __iomem *pci_addr_lin;
-
-static struct lirc_driver atir_driver;
-
-static struct pci_dev *do_pci_probe(void)
-{
- struct pci_dev *my_dev;
-
- my_dev = pci_get_device(PCI_VENDOR_ID_ATI,
- PCI_DEVICE_ID_ATI_264VT, NULL);
- if (my_dev) {
- pr_err("Using device: %s\n", pci_name(my_dev));
- pci_addr_phys = 0;
- if (my_dev->resource[0].flags & IORESOURCE_MEM) {
- pci_addr_phys = my_dev->resource[0].start;
- pr_info("memory at %pa\n", &pci_addr_phys);
- }
- if (pci_addr_phys == 0) {
- pr_err("no memory resource ?\n");
- pci_dev_put(my_dev);
- return NULL;
- }
- } else {
- pr_err("pci_probe failed\n");
- return NULL;
- }
- return my_dev;
-}
-
-static int atir_add_to_buf(void *data, struct lirc_buffer *buf)
-{
- unsigned char key;
- int status;
-
- status = poll_main();
- key = (status >> 8) & 0xFF;
- if (status & 0xFF) {
- dev_dbg(atir_driver.dev, "reading key %02X\n", key);
- lirc_buffer_write(buf, &key);
- return 0;
- }
- return -ENODATA;
-}
-
-static int atir_set_use_inc(void *data)
-{
- dev_dbg(atir_driver.dev, "driver is opened\n");
- return 0;
-}
-
-static void atir_set_use_dec(void *data)
-{
- dev_dbg(atir_driver.dev, "driver is closed\n");
-}
-
-int init_module(void)
-{
- struct pci_dev *pdev;
- int rc;
-
- pdev = do_pci_probe();
- if (pdev == NULL)
- return -ENODEV;
-
- rc = pci_enable_device(pdev);
- if (rc)
- goto err_put_dev;
-
- if (!atir_init_start()) {
- rc = -ENODEV;
- goto err_disable;
- }
-
- strcpy(atir_driver.name, "ATIR");
- atir_driver.minor = -1;
- atir_driver.code_length = 8;
- atir_driver.sample_rate = 10;
- atir_driver.data = NULL;
- atir_driver.add_to_buf = atir_add_to_buf;
- atir_driver.set_use_inc = atir_set_use_inc;
- atir_driver.set_use_dec = atir_set_use_dec;
- atir_driver.dev = &pdev->dev;
- atir_driver.owner = THIS_MODULE;
-
- atir_minor = lirc_register_driver(&atir_driver);
- if (atir_minor < 0) {
- pr_err("failed to register driver!\n");
- rc = atir_minor;
- goto err_unmap;
- }
- dev_dbg(atir_driver.dev, "driver is registered on minor %d\n",
- atir_minor);
-
- return 0;
-
-err_unmap:
- iounmap(pci_addr_lin);
-err_disable:
- pci_disable_device(pdev);
-err_put_dev:
- pci_dev_put(pdev);
- return rc;
-}
-
-
-void cleanup_module(void)
-{
- struct pci_dev *pdev = to_pci_dev(atir_driver.dev);
-
- lirc_unregister_driver(atir_minor);
- iounmap(pci_addr_lin);
- pci_disable_device(pdev);
- pci_dev_put(pdev);
-}
-
-
-static int atir_init_start(void)
-{
- pci_addr_lin = ioremap(pci_addr_phys + DATA_PCI_OFF, 0x400);
- if (!pci_addr_lin) {
- pr_info("pci mem must be mapped\n");
- return 0;
- }
- return 1;
-}
-
-static void cycle_delay(int cycle)
-{
- udelay(WAIT_CYCLE*cycle);
-}
-
-
-static int poll_main(void)
-{
- unsigned char status_high, status_low;
-
- do_i2c_start();
-
- seems_wr_byte(0xAA);
- seems_wr_byte(0x01);
-
- do_i2c_start();
-
- seems_wr_byte(0xAB);
-
- status_low = seems_rd_byte();
- status_high = seems_rd_byte();
-
- do_i2c_stop();
-
- return (status_high << 8) | status_low;
-}
-
-static void do_i2c_start(void)
-{
- do_set_bits(3);
- cycle_delay(4);
-
- do_set_bits(1);
- cycle_delay(7);
-
- do_set_bits(0);
- cycle_delay(2);
-}
-
-static void do_i2c_stop(void)
-{
- unsigned char bits;
-
- bits = do_get_bits() & 0xFD;
- do_set_bits(bits);
- cycle_delay(1);
-
- bits |= 1;
- do_set_bits(bits);
- cycle_delay(2);
-
- bits |= 2;
- do_set_bits(bits);
- bits = 3;
- do_set_bits(bits);
- cycle_delay(2);
-}
-
-static void seems_wr_byte(unsigned char value)
-{
- int i;
- unsigned char reg;
-
- reg = do_get_bits();
- for (i = 0; i < 8; i++) {
- if (value & 0x80)
- reg |= 0x02;
- else
- reg &= 0xFD;
-
- do_set_bits(reg);
- cycle_delay(1);
-
- reg |= 1;
- do_set_bits(reg);
- cycle_delay(1);
-
- reg &= 0xFE;
- do_set_bits(reg);
- cycle_delay(1);
- value <<= 1;
- }
- cycle_delay(2);
-
- reg |= 2;
- do_set_bits(reg);
-
- reg |= 1;
- do_set_bits(reg);
-
- cycle_delay(1);
- do_get_bits();
-
- reg &= 0xFE;
- do_set_bits(reg);
- cycle_delay(3);
-}
-
-static unsigned char seems_rd_byte(void)
-{
- int i;
- int rd_byte;
- unsigned char bits_2, bits_1;
-
- bits_1 = do_get_bits() | 2;
- do_set_bits(bits_1);
-
- rd_byte = 0;
- for (i = 0; i < 8; i++) {
- bits_1 &= 0xFE;
- do_set_bits(bits_1);
- cycle_delay(2);
-
- bits_1 |= 1;
- do_set_bits(bits_1);
- cycle_delay(1);
-
- bits_2 = do_get_bits();
- if (bits_2 & 2)
- rd_byte |= 1;
-
- rd_byte <<= 1;
- }
-
- bits_1 = 0;
- if (bits_2 == 0)
- bits_1 |= 2;
-
- do_set_bits(bits_1);
- cycle_delay(2);
-
- bits_1 |= 1;
- do_set_bits(bits_1);
- cycle_delay(3);
-
- bits_1 &= 0xFE;
- do_set_bits(bits_1);
- cycle_delay(2);
-
- rd_byte >>= 1;
- rd_byte &= 0xFF;
- return rd_byte;
-}
-
-static void do_set_bits(unsigned char new_bits)
-{
- int reg_val;
-
- reg_val = read_index(0x34);
- if (new_bits & 2) {
- reg_val &= 0xFFFFFFDF;
- reg_val |= 1;
- } else {
- reg_val &= 0xFFFFFFFE;
- reg_val |= 0x20;
- }
- reg_val |= 0x10;
- write_index(0x34, reg_val);
-
- reg_val = read_index(0x31);
- if (new_bits & 1)
- reg_val |= 0x1000000;
- else
- reg_val &= 0xFEFFFFFF;
-
- reg_val |= 0x8000000;
- write_index(0x31, reg_val);
-}
-
-static unsigned char do_get_bits(void)
-{
- unsigned char bits;
- int reg_val;
-
- reg_val = read_index(0x34);
- reg_val |= 0x10;
- reg_val &= 0xFFFFFFDF;
- write_index(0x34, reg_val);
-
- reg_val = read_index(0x34);
- bits = 0;
- if (reg_val & 8)
- bits |= 2;
- else
- bits &= 0xFD;
-
- reg_val = read_index(0x31);
- if (reg_val & 0x1000000)
- bits |= 1;
- else
- bits &= 0xFE;
-
- return bits;
-}
-
-static unsigned int read_index(unsigned char index)
-{
- unsigned char __iomem *addr;
- /* addr = pci_addr_lin + DATA_PCI_OFF + ((index & 0xFF) << 2); */
- addr = pci_addr_lin + ((index & 0xFF) << 2);
- return readl(addr);
-}
-
-static void write_index(unsigned char index, unsigned int reg_val)
-{
- unsigned char __iomem *addr;
-
- addr = pci_addr_lin + ((index & 0xFF) << 2);
- writel(reg_val, addr);
-}
-
-MODULE_AUTHOR("Froenchenko Leonid");
-MODULE_DESCRIPTION("IR remote driver for bt829 based TV cards");
-MODULE_LICENSE("GPL");
-
-module_param(debug, bool, S_IRUGO | S_IWUSR);
-MODULE_PARM_DESC(debug, "Debug enabled or not");
diff --git a/drivers/staging/media/lirc/lirc_imon.c b/drivers/staging/media/lirc/lirc_imon.c
deleted file mode 100644
index 534b8103ae80..000000000000
--- a/drivers/staging/media/lirc/lirc_imon.c
+++ /dev/null
@@ -1,983 +0,0 @@
-/*
- * lirc_imon.c: LIRC/VFD/LCD driver for SoundGraph iMON IR/VFD/LCD
- * including the iMON PAD model
- *
- * Copyright(C) 2004 Venky Raju(dev@venky.ws)
- * Copyright(C) 2009 Jarod Wilson <jarod@wilsonet.com>
- *
- * lirc_imon is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/errno.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/uaccess.h>
-#include <linux/usb.h>
-
-#include <media/lirc.h>
-#include <media/lirc_dev.h>
-
-
-#define MOD_AUTHOR "Venky Raju <dev@venky.ws>"
-#define MOD_DESC "Driver for SoundGraph iMON MultiMedia IR/Display"
-#define MOD_NAME "lirc_imon"
-#define MOD_VERSION "0.8"
-
-#define DISPLAY_MINOR_BASE 144
-#define DEVICE_NAME "lcd%d"
-
-#define BUF_CHUNK_SIZE 4
-#define BUF_SIZE 128
-
-#define BIT_DURATION 250 /* each bit received is 250us */
-
-/*** P R O T O T Y P E S ***/
-
-/* USB Callback prototypes */
-static int imon_probe(struct usb_interface *interface,
- const struct usb_device_id *id);
-static void imon_disconnect(struct usb_interface *interface);
-static void usb_rx_callback(struct urb *urb);
-static void usb_tx_callback(struct urb *urb);
-
-/* suspend/resume support */
-static int imon_resume(struct usb_interface *intf);
-static int imon_suspend(struct usb_interface *intf, pm_message_t message);
-
-/* Display file_operations function prototypes */
-static int display_open(struct inode *inode, struct file *file);
-static int display_close(struct inode *inode, struct file *file);
-
-/* VFD write operation */
-static ssize_t vfd_write(struct file *file, const char __user *buf,
- size_t n_bytes, loff_t *pos);
-
-/* LIRC driver function prototypes */
-static int ir_open(void *data);
-static void ir_close(void *data);
-
-/*** G L O B A L S ***/
-#define IMON_DATA_BUF_SZ 35
-
-struct imon_context {
- struct usb_device *usbdev;
- /* Newer devices have two interfaces */
- int display; /* not all controllers do */
- int display_isopen; /* display port has been opened */
- int ir_isopen; /* IR port open */
- int dev_present; /* USB device presence */
- struct mutex ctx_lock; /* to lock this object */
- wait_queue_head_t remove_ok; /* For unexpected USB disconnects */
-
- int vfd_proto_6p; /* some VFD require a 6th packet */
-
- struct lirc_driver *driver;
- struct usb_endpoint_descriptor *rx_endpoint;
- struct usb_endpoint_descriptor *tx_endpoint;
- struct urb *rx_urb;
- struct urb *tx_urb;
- unsigned char usb_rx_buf[8];
- unsigned char usb_tx_buf[8];
-
- struct rx_data {
- int count; /* length of 0 or 1 sequence */
- int prev_bit; /* logic level of sequence */
- int initial_space; /* initial space flag */
- } rx;
-
- struct tx_t {
- unsigned char data_buf[IMON_DATA_BUF_SZ]; /* user data buffer */
- struct completion finished; /* wait for write to finish */
- atomic_t busy; /* write in progress */
- int status; /* status of tx completion */
- } tx;
-};
-
-static const struct file_operations display_fops = {
- .owner = THIS_MODULE,
- .open = &display_open,
- .write = &vfd_write,
- .release = &display_close,
- .llseek = noop_llseek,
-};
-
-/*
- * USB Device ID for iMON USB Control Boards
- *
- * The Windows drivers contain 6 different inf files, more or less one for
- * each new device until the 0x0034-0x0046 devices, which all use the same
- * driver. Some of the devices in the 34-46 range haven't been definitively
- * identified yet. Early devices have either a TriGem Computer, Inc. or a
- * Samsung vendor ID (0x0aa8 and 0x04e8 respectively), while all later
- * devices use the SoundGraph vendor ID (0x15c2).
- */
-static struct usb_device_id imon_usb_id_table[] = {
- /* TriGem iMON (IR only) -- TG_iMON.inf */
- { USB_DEVICE(0x0aa8, 0x8001) },
-
- /* SoundGraph iMON (IR only) -- sg_imon.inf */
- { USB_DEVICE(0x04e8, 0xff30) },
-
- /* SoundGraph iMON VFD (IR & VFD) -- iMON_VFD.inf */
- { USB_DEVICE(0x0aa8, 0xffda) },
-
- /* SoundGraph iMON SS (IR & VFD) -- iMON_SS.inf */
- { USB_DEVICE(0x15c2, 0xffda) },
-
- {}
-};
-
-/* Some iMON VFD models requires a 6th packet for VFD writes */
-static struct usb_device_id vfd_proto_6p_list[] = {
- { USB_DEVICE(0x15c2, 0xffda) },
- {}
-};
-
-/* Some iMON devices have no lcd/vfd, don't set one up */
-static struct usb_device_id ir_only_list[] = {
- { USB_DEVICE(0x0aa8, 0x8001) },
- { USB_DEVICE(0x04e8, 0xff30) },
- {}
-};
-
-/* USB Device data */
-static struct usb_driver imon_driver = {
- .name = MOD_NAME,
- .probe = imon_probe,
- .disconnect = imon_disconnect,
- .suspend = imon_suspend,
- .resume = imon_resume,
- .id_table = imon_usb_id_table,
-};
-
-static struct usb_class_driver imon_class = {
- .name = DEVICE_NAME,
- .fops = &display_fops,
- .minor_base = DISPLAY_MINOR_BASE,
-};
-
-/* to prevent races between open() and disconnect(), probing, etc */
-static DEFINE_MUTEX(driver_lock);
-
-static int debug;
-
-/*** M O D U L E C O D E ***/
-
-MODULE_AUTHOR(MOD_AUTHOR);
-MODULE_DESCRIPTION(MOD_DESC);
-MODULE_VERSION(MOD_VERSION);
-MODULE_LICENSE("GPL");
-MODULE_DEVICE_TABLE(usb, imon_usb_id_table);
-module_param(debug, int, S_IRUGO | S_IWUSR);
-MODULE_PARM_DESC(debug, "Debug messages: 0=no, 1=yes(default: no)");
-
-static void free_imon_context(struct imon_context *context)
-{
- struct device *dev = context->driver->dev;
-
- usb_free_urb(context->tx_urb);
- usb_free_urb(context->rx_urb);
- lirc_buffer_free(context->driver->rbuf);
- kfree(context->driver->rbuf);
- kfree(context->driver);
- kfree(context);
-
- dev_dbg(dev, "%s: iMON context freed\n", __func__);
-}
-
-static void deregister_from_lirc(struct imon_context *context)
-{
- int retval;
- int minor = context->driver->minor;
-
- retval = lirc_unregister_driver(minor);
- if (retval)
- dev_err(&context->usbdev->dev,
- "unable to deregister from lirc(%d)", retval);
- else
- dev_info(&context->usbdev->dev,
- "Deregistered iMON driver (minor:%d)\n", minor);
-
-}
-
-/**
- * Called when the Display device (e.g. /dev/lcd0)
- * is opened by the application.
- */
-static int display_open(struct inode *inode, struct file *file)
-{
- struct usb_interface *interface;
- struct imon_context *context = NULL;
- int subminor;
- int retval = 0;
-
- /* prevent races with disconnect */
- mutex_lock(&driver_lock);
-
- subminor = iminor(inode);
- interface = usb_find_interface(&imon_driver, subminor);
- if (!interface) {
- pr_err("%s: could not find interface for minor %d\n",
- __func__, subminor);
- retval = -ENODEV;
- goto exit;
- }
- context = usb_get_intfdata(interface);
-
- if (!context) {
- dev_err(&interface->dev, "no context found for minor %d\n",
- subminor);
- retval = -ENODEV;
- goto exit;
- }
-
- mutex_lock(&context->ctx_lock);
-
- if (!context->display) {
- dev_err(&interface->dev,
- "%s: display not supported by device\n", __func__);
- retval = -ENODEV;
- } else if (context->display_isopen) {
- dev_err(&interface->dev,
- "%s: display port is already open\n", __func__);
- retval = -EBUSY;
- } else {
- context->display_isopen = 1;
- file->private_data = context;
- dev_info(context->driver->dev, "display port opened\n");
- }
-
- mutex_unlock(&context->ctx_lock);
-
-exit:
- mutex_unlock(&driver_lock);
- return retval;
-}
-
-/**
- * Called when the display device (e.g. /dev/lcd0)
- * is closed by the application.
- */
-static int display_close(struct inode *inode, struct file *file)
-{
- struct imon_context *context = NULL;
- int retval = 0;
-
- context = file->private_data;
-
- if (!context) {
- pr_err("%s: no context for device\n", __func__);
- return -ENODEV;
- }
-
- mutex_lock(&context->ctx_lock);
-
- if (!context->display) {
- dev_err(&context->usbdev->dev,
- "%s: display not supported by device\n", __func__);
- retval = -ENODEV;
- } else if (!context->display_isopen) {
- dev_err(&context->usbdev->dev,
- "%s: display is not open\n", __func__);
- retval = -EIO;
- } else {
- context->display_isopen = 0;
- dev_info(context->driver->dev, "display port closed\n");
- if (!context->dev_present && !context->ir_isopen) {
- /*
- * Device disconnected before close and IR port is not
- * open. If IR port is open, context will be deleted by
- * ir_close.
- */
- mutex_unlock(&context->ctx_lock);
- free_imon_context(context);
- return retval;
- }
- }
-
- mutex_unlock(&context->ctx_lock);
- return retval;
-}
-
-/**
- * Sends a packet to the device -- this function must be called
- * with context->ctx_lock held.
- */
-static int send_packet(struct imon_context *context)
-{
- unsigned int pipe;
- int interval = 0;
- int retval = 0;
-
- /* Check if we need to use control or interrupt urb */
- pipe = usb_sndintpipe(context->usbdev,
- context->tx_endpoint->bEndpointAddress);
- interval = context->tx_endpoint->bInterval;
-
- usb_fill_int_urb(context->tx_urb, context->usbdev, pipe,
- context->usb_tx_buf,
- sizeof(context->usb_tx_buf),
- usb_tx_callback, context, interval);
-
- context->tx_urb->actual_length = 0;
-
- init_completion(&context->tx.finished);
- atomic_set(&context->tx.busy, 1);
-
- retval = usb_submit_urb(context->tx_urb, GFP_KERNEL);
- if (retval) {
- atomic_set(&context->tx.busy, 0);
- dev_err(&context->usbdev->dev, "error submitting urb(%d)\n",
- retval);
- } else {
- /* Wait for transmission to complete (or abort) */
- mutex_unlock(&context->ctx_lock);
- retval = wait_for_completion_interruptible(
- &context->tx.finished);
- if (retval)
- dev_err(&context->usbdev->dev,
- "%s: task interrupted\n", __func__);
- mutex_lock(&context->ctx_lock);
-
- retval = context->tx.status;
- if (retval)
- dev_err(&context->usbdev->dev,
- "packet tx failed (%d)\n", retval);
- }
-
- return retval;
-}
-
-/**
- * Writes data to the VFD. The iMON VFD is 2x16 characters
- * and requires data in 5 consecutive USB interrupt packets,
- * each packet but the last carrying 7 bytes.
- *
- * I don't know if the VFD board supports features such as
- * scrolling, clearing rows, blanking, etc. so at
- * the caller must provide a full screen of data. If fewer
- * than 32 bytes are provided spaces will be appended to
- * generate a full screen.
- */
-static ssize_t vfd_write(struct file *file, const char __user *buf,
- size_t n_bytes, loff_t *pos)
-{
- int i;
- int offset;
- int seq;
- int retval = 0;
- struct imon_context *context;
- const unsigned char vfd_packet6[] = {
- 0x01, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF };
- int *data_buf = NULL;
-
- context = file->private_data;
- if (!context) {
- pr_err("%s: no context for device\n", __func__);
- return -ENODEV;
- }
-
- mutex_lock(&context->ctx_lock);
-
- if (!context->dev_present) {
- dev_err(&context->usbdev->dev,
- "%s: no iMON device present\n", __func__);
- retval = -ENODEV;
- goto exit;
- }
-
- if (n_bytes <= 0 || n_bytes > IMON_DATA_BUF_SZ - 3) {
- dev_err(&context->usbdev->dev,
- "%s: invalid payload size\n", __func__);
- retval = -EINVAL;
- goto exit;
- }
-
- data_buf = memdup_user(buf, n_bytes);
- if (IS_ERR(data_buf)) {
- retval = PTR_ERR(data_buf);
- data_buf = NULL;
- goto exit;
- }
-
- memcpy(context->tx.data_buf, data_buf, n_bytes);
-
- /* Pad with spaces */
- for (i = n_bytes; i < IMON_DATA_BUF_SZ - 3; ++i)
- context->tx.data_buf[i] = ' ';
-
- for (i = IMON_DATA_BUF_SZ - 3; i < IMON_DATA_BUF_SZ; ++i)
- context->tx.data_buf[i] = 0xFF;
-
- offset = 0;
- seq = 0;
-
- do {
- memcpy(context->usb_tx_buf, context->tx.data_buf + offset, 7);
- context->usb_tx_buf[7] = (unsigned char) seq;
-
- retval = send_packet(context);
- if (retval) {
- dev_err(&context->usbdev->dev,
- "send packet failed for packet #%d\n",
- seq / 2);
- goto exit;
- } else {
- seq += 2;
- offset += 7;
- }
-
- } while (offset < IMON_DATA_BUF_SZ);
-
- if (context->vfd_proto_6p) {
- /* Send packet #6 */
- memcpy(context->usb_tx_buf, &vfd_packet6, sizeof(vfd_packet6));
- context->usb_tx_buf[7] = (unsigned char) seq;
- retval = send_packet(context);
- if (retval)
- dev_err(&context->usbdev->dev,
- "send packet failed for packet #%d\n",
- seq / 2);
- }
-
-exit:
- mutex_unlock(&context->ctx_lock);
- kfree(data_buf);
-
- return (!retval) ? n_bytes : retval;
-}
-
-/**
- * Callback function for USB core API: transmit data
- */
-static void usb_tx_callback(struct urb *urb)
-{
- struct imon_context *context;
-
- if (!urb)
- return;
- context = (struct imon_context *)urb->context;
- if (!context)
- return;
-
- context->tx.status = urb->status;
-
- /* notify waiters that write has finished */
- atomic_set(&context->tx.busy, 0);
- complete(&context->tx.finished);
-}
-
-/**
- * Called by lirc_dev when the application opens /dev/lirc
- */
-static int ir_open(void *data)
-{
- struct imon_context *context;
-
- /* prevent races with disconnect */
- mutex_lock(&driver_lock);
-
- context = data;
-
- /* initial IR protocol decode variables */
- context->rx.count = 0;
- context->rx.initial_space = 1;
- context->rx.prev_bit = 0;
-
- context->ir_isopen = 1;
- dev_info(context->driver->dev, "IR port opened\n");
-
- mutex_unlock(&driver_lock);
- return 0;
-}
-
-/**
- * Called by lirc_dev when the application closes /dev/lirc
- */
-static void ir_close(void *data)
-{
- struct imon_context *context;
-
- context = data;
- if (!context) {
- pr_err("%s: no context for device\n", __func__);
- return;
- }
-
- mutex_lock(&context->ctx_lock);
-
- context->ir_isopen = 0;
- dev_info(context->driver->dev, "IR port closed\n");
-
- if (!context->dev_present) {
- /*
- * Device disconnected while IR port was still open. Driver
- * was not deregistered at disconnect time, so do it now.
- */
- deregister_from_lirc(context);
-
- if (!context->display_isopen) {
- mutex_unlock(&context->ctx_lock);
- free_imon_context(context);
- return;
- }
- /*
- * If display port is open, context will be deleted by
- * display_close
- */
- }
-
- mutex_unlock(&context->ctx_lock);
-}
-
-/**
- * Convert bit count to time duration (in us) and submit
- * the value to lirc_dev.
- */
-static void submit_data(struct imon_context *context)
-{
- unsigned char buf[4];
- int value = context->rx.count;
- int i;
-
- dev_dbg(context->driver->dev, "submitting data to LIRC\n");
-
- value *= BIT_DURATION;
- value &= PULSE_MASK;
- if (context->rx.prev_bit)
- value |= PULSE_BIT;
-
- for (i = 0; i < 4; ++i)
- buf[i] = value>>(i*8);
-
- lirc_buffer_write(context->driver->rbuf, buf);
- wake_up(&context->driver->rbuf->wait_poll);
-}
-
-/**
- * Process the incoming packet
- */
-static void imon_incoming_packet(struct imon_context *context,
- struct urb *urb, int intf)
-{
- int len = urb->actual_length;
- unsigned char *buf = urb->transfer_buffer;
- struct device *dev = context->driver->dev;
- int octet, bit;
- unsigned char mask;
-
- /*
- * just bail out if no listening IR client
- */
- if (!context->ir_isopen)
- return;
-
- if (len != 8) {
- dev_warn(dev, "imon %s: invalid incoming packet size (len = %d, intf%d)\n",
- __func__, len, intf);
- return;
- }
-
- if (debug)
- dev_info(dev, "raw packet: %*ph\n", len, buf);
- /*
- * Translate received data to pulse and space lengths.
- * Received data is active low, i.e. pulses are 0 and
- * spaces are 1.
- *
- * My original algorithm was essentially similar to
- * Changwoo Ryu's with the exception that he switched
- * the incoming bits to active high and also fed an
- * initial space to LIRC at the start of a new sequence
- * if the previous bit was a pulse.
- *
- * I've decided to adopt his algorithm.
- */
-
- if (buf[7] == 1 && context->rx.initial_space) {
- /* LIRC requires a leading space */
- context->rx.prev_bit = 0;
- context->rx.count = 4;
- submit_data(context);
- context->rx.count = 0;
- }
-
- for (octet = 0; octet < 5; ++octet) {
- mask = 0x80;
- for (bit = 0; bit < 8; ++bit) {
- int curr_bit = !(buf[octet] & mask);
-
- if (curr_bit != context->rx.prev_bit) {
- if (context->rx.count) {
- submit_data(context);
- context->rx.count = 0;
- }
- context->rx.prev_bit = curr_bit;
- }
- ++context->rx.count;
- mask >>= 1;
- }
- }
-
- if (buf[7] == 10) {
- if (context->rx.count) {
- submit_data(context);
- context->rx.count = 0;
- }
- context->rx.initial_space = context->rx.prev_bit;
- }
-}
-
-/**
- * Callback function for USB core API: receive data
- */
-static void usb_rx_callback(struct urb *urb)
-{
- struct imon_context *context;
- int intfnum = 0;
-
- if (!urb)
- return;
-
- context = (struct imon_context *)urb->context;
- if (!context)
- return;
-
- switch (urb->status) {
- case -ENOENT: /* usbcore unlink successful! */
- return;
-
- case 0:
- imon_incoming_packet(context, urb, intfnum);
- break;
-
- default:
- dev_warn(context->driver->dev, "imon %s: status(%d): ignored\n",
- __func__, urb->status);
- break;
- }
-
- usb_submit_urb(context->rx_urb, GFP_ATOMIC);
-}
-
-/**
- * Callback function for USB core API: Probe
- */
-static int imon_probe(struct usb_interface *interface,
- const struct usb_device_id *id)
-{
- struct usb_device *usbdev = NULL;
- struct usb_host_interface *iface_desc = NULL;
- struct usb_endpoint_descriptor *rx_endpoint = NULL;
- struct usb_endpoint_descriptor *tx_endpoint = NULL;
- struct urb *rx_urb = NULL;
- struct urb *tx_urb = NULL;
- struct lirc_driver *driver = NULL;
- struct lirc_buffer *rbuf = NULL;
- struct device *dev = &interface->dev;
- int ifnum;
- int lirc_minor = 0;
- int num_endpts;
- int retval = -ENOMEM;
- int display_ep_found = 0;
- int ir_ep_found = 0;
- int vfd_proto_6p = 0;
- struct imon_context *context = NULL;
- int i;
- u16 vendor, product;
-
- /* prevent races probing devices w/multiple interfaces */
- mutex_lock(&driver_lock);
-
- context = kzalloc(sizeof(struct imon_context), GFP_KERNEL);
- if (!context)
- goto driver_unlock;
-
- /*
- * Try to auto-detect the type of display if the user hasn't set
- * it by hand via the display_type modparam. Default is VFD.
- */
- if (usb_match_id(interface, ir_only_list))
- context->display = 0;
- else
- context->display = 1;
-
- usbdev = usb_get_dev(interface_to_usbdev(interface));
- iface_desc = interface->cur_altsetting;
- num_endpts = iface_desc->desc.bNumEndpoints;
- ifnum = iface_desc->desc.bInterfaceNumber;
- vendor = le16_to_cpu(usbdev->descriptor.idVendor);
- product = le16_to_cpu(usbdev->descriptor.idProduct);
-
- dev_dbg(dev, "%s: found iMON device (%04x:%04x, intf%d)\n",
- __func__, vendor, product, ifnum);
-
- /*
- * Scan the endpoint list and set:
- * first input endpoint = IR endpoint
- * first output endpoint = display endpoint
- */
- for (i = 0; i < num_endpts && !(ir_ep_found && display_ep_found); ++i) {
- struct usb_endpoint_descriptor *ep;
- int ep_dir;
- int ep_type;
-
- ep = &iface_desc->endpoint[i].desc;
- ep_dir = ep->bEndpointAddress & USB_ENDPOINT_DIR_MASK;
- ep_type = usb_endpoint_type(ep);
-
- if (!ir_ep_found &&
- ep_dir == USB_DIR_IN &&
- ep_type == USB_ENDPOINT_XFER_INT) {
-
- rx_endpoint = ep;
- ir_ep_found = 1;
- dev_dbg(dev, "%s: found IR endpoint\n", __func__);
-
- } else if (!display_ep_found && ep_dir == USB_DIR_OUT &&
- ep_type == USB_ENDPOINT_XFER_INT) {
- tx_endpoint = ep;
- display_ep_found = 1;
- dev_dbg(dev, "%s: found display endpoint\n", __func__);
- }
- }
-
- /*
- * Some iMON receivers have no display. Unfortunately, it seems
- * that SoundGraph recycles device IDs between devices both with
- * and without... :\
- */
- if (context->display == 0) {
- display_ep_found = 0;
- dev_dbg(dev, "%s: device has no display\n", __func__);
- }
-
- /* Input endpoint is mandatory */
- if (!ir_ep_found) {
- dev_err(dev, "%s: no valid input (IR) endpoint found.\n",
- __func__);
- retval = -ENODEV;
- goto free_context;
- }
-
- /* Determine if display requires 6 packets */
- if (display_ep_found) {
- if (usb_match_id(interface, vfd_proto_6p_list))
- vfd_proto_6p = 1;
-
- dev_dbg(dev, "%s: vfd_proto_6p: %d\n",
- __func__, vfd_proto_6p);
- }
-
- driver = kzalloc(sizeof(struct lirc_driver), GFP_KERNEL);
- if (!driver)
- goto free_context;
-
- rbuf = kmalloc(sizeof(struct lirc_buffer), GFP_KERNEL);
- if (!rbuf)
- goto free_driver;
-
- if (lirc_buffer_init(rbuf, BUF_CHUNK_SIZE, BUF_SIZE)) {
- dev_err(dev, "%s: lirc_buffer_init failed\n", __func__);
- goto free_rbuf;
- }
- rx_urb = usb_alloc_urb(0, GFP_KERNEL);
- if (!rx_urb) {
- dev_err(dev, "%s: usb_alloc_urb failed for IR urb\n", __func__);
- goto free_lirc_buf;
- }
- tx_urb = usb_alloc_urb(0, GFP_KERNEL);
- if (!tx_urb) {
- dev_err(dev, "%s: usb_alloc_urb failed for display urb\n",
- __func__);
- goto free_rx_urb;
- }
-
- mutex_init(&context->ctx_lock);
- context->vfd_proto_6p = vfd_proto_6p;
-
- strcpy(driver->name, MOD_NAME);
- driver->minor = -1;
- driver->code_length = BUF_CHUNK_SIZE * 8;
- driver->sample_rate = 0;
- driver->features = LIRC_CAN_REC_MODE2;
- driver->data = context;
- driver->rbuf = rbuf;
- driver->set_use_inc = ir_open;
- driver->set_use_dec = ir_close;
- driver->dev = &interface->dev;
- driver->owner = THIS_MODULE;
-
- mutex_lock(&context->ctx_lock);
-
- context->driver = driver;
- /* start out in keyboard mode */
-
- lirc_minor = lirc_register_driver(driver);
- if (lirc_minor < 0) {
- dev_err(dev, "%s: lirc_register_driver failed\n", __func__);
- goto free_tx_urb;
- }
-
- dev_info(dev, "Registered iMON driver (lirc minor: %d)\n",
- lirc_minor);
-
- /* Needed while unregistering! */
- driver->minor = lirc_minor;
-
- context->usbdev = usbdev;
- context->dev_present = 1;
- context->rx_endpoint = rx_endpoint;
- context->rx_urb = rx_urb;
-
- /*
- * tx is used to send characters to lcd/vfd, associate RF
- * remotes, set IR protocol, and maybe more...
- */
- context->tx_endpoint = tx_endpoint;
- context->tx_urb = tx_urb;
-
- if (display_ep_found)
- context->display = 1;
-
- usb_fill_int_urb(context->rx_urb, context->usbdev,
- usb_rcvintpipe(context->usbdev,
- context->rx_endpoint->bEndpointAddress),
- context->usb_rx_buf, sizeof(context->usb_rx_buf),
- usb_rx_callback, context,
- context->rx_endpoint->bInterval);
-
- retval = usb_submit_urb(context->rx_urb, GFP_KERNEL);
- if (retval) {
- dev_err(dev, "usb_submit_urb failed for intf0 (%d)\n", retval);
- goto unregister_lirc;
- }
-
- usb_set_intfdata(interface, context);
-
- if (context->display && ifnum == 0) {
- dev_dbg(dev, "%s: Registering iMON display with sysfs\n",
- __func__);
-
- if (usb_register_dev(interface, &imon_class)) {
- /* Not a fatal error, so ignore */
- dev_info(dev, "%s: could not get a minor number for display\n",
- __func__);
- }
- }
-
- dev_info(dev, "iMON device (%04x:%04x, intf%d) on usb<%d:%d> initialized\n",
- vendor, product, ifnum, usbdev->bus->busnum, usbdev->devnum);
-
- /* Everything went fine. Just unlock and return retval (with is 0) */
- goto driver_unlock;
-
-unregister_lirc:
- lirc_unregister_driver(driver->minor);
-
-free_tx_urb:
- usb_free_urb(tx_urb);
-
-free_rx_urb:
- usb_free_urb(rx_urb);
-
-free_lirc_buf:
- lirc_buffer_free(rbuf);
-
-free_rbuf:
- kfree(rbuf);
-
-free_driver:
- kfree(driver);
-free_context:
- kfree(context);
- context = NULL;
-
-driver_unlock:
- mutex_unlock(&driver_lock);
-
- return retval;
-}
-
-/**
- * Callback function for USB core API: disconnect
- */
-static void imon_disconnect(struct usb_interface *interface)
-{
- struct imon_context *context;
- int ifnum;
-
- /* prevent races with ir_open()/display_open() */
- mutex_lock(&driver_lock);
-
- context = usb_get_intfdata(interface);
- ifnum = interface->cur_altsetting->desc.bInterfaceNumber;
-
- mutex_lock(&context->ctx_lock);
-
- usb_set_intfdata(interface, NULL);
-
- /* Abort ongoing write */
- if (atomic_read(&context->tx.busy)) {
- usb_kill_urb(context->tx_urb);
- complete_all(&context->tx.finished);
- }
-
- context->dev_present = 0;
- usb_kill_urb(context->rx_urb);
- if (context->display)
- usb_deregister_dev(interface, &imon_class);
-
- if (!context->ir_isopen && !context->dev_present) {
- deregister_from_lirc(context);
- mutex_unlock(&context->ctx_lock);
- if (!context->display_isopen)
- free_imon_context(context);
- } else
- mutex_unlock(&context->ctx_lock);
-
- mutex_unlock(&driver_lock);
-
- dev_info(&interface->dev, "%s: iMON device (intf%d) disconnected\n",
- __func__, ifnum);
-}
-
-static int imon_suspend(struct usb_interface *intf, pm_message_t message)
-{
- struct imon_context *context = usb_get_intfdata(intf);
-
- usb_kill_urb(context->rx_urb);
-
- return 0;
-}
-
-static int imon_resume(struct usb_interface *intf)
-{
- struct imon_context *context = usb_get_intfdata(intf);
-
- usb_fill_int_urb(context->rx_urb, context->usbdev,
- usb_rcvintpipe(context->usbdev,
- context->rx_endpoint->bEndpointAddress),
- context->usb_rx_buf, sizeof(context->usb_rx_buf),
- usb_rx_callback, context,
- context->rx_endpoint->bInterval);
-
- return usb_submit_urb(context->rx_urb, GFP_ATOMIC);
-}
-
-module_usb_driver(imon_driver);
diff --git a/drivers/staging/media/lirc/lirc_parallel.c b/drivers/staging/media/lirc/lirc_parallel.c
deleted file mode 100644
index c1408342b1d0..000000000000
--- a/drivers/staging/media/lirc/lirc_parallel.c
+++ /dev/null
@@ -1,744 +0,0 @@
-/*
- * lirc_parallel.c
- *
- * lirc_parallel - device driver for infra-red signal receiving and
- * transmitting unit built by the author
- *
- * Copyright (C) 1998 Christoph Bartelmus <lirc@bartelmus.de>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-/*** Includes ***/
-
-#include <linux/module.h>
-#include <linux/sched.h>
-#include <linux/errno.h>
-#include <linux/signal.h>
-#include <linux/fs.h>
-#include <linux/kernel.h>
-#include <linux/ioport.h>
-#include <linux/time.h>
-#include <linux/mm.h>
-#include <linux/delay.h>
-
-#include <linux/io.h>
-#include <linux/irq.h>
-#include <linux/uaccess.h>
-#include <asm/div64.h>
-
-#include <linux/poll.h>
-#include <linux/parport.h>
-#include <linux/platform_device.h>
-
-#include <media/lirc.h>
-#include <media/lirc_dev.h>
-
-#include "lirc_parallel.h"
-
-#define LIRC_DRIVER_NAME "lirc_parallel"
-
-#ifndef LIRC_IRQ
-#define LIRC_IRQ 7
-#endif
-#ifndef LIRC_PORT
-#define LIRC_PORT 0x378
-#endif
-#ifndef LIRC_TIMER
-#define LIRC_TIMER 65536
-#endif
-
-/*** Global Variables ***/
-
-static bool debug;
-static bool check_pselecd;
-
-static unsigned int irq = LIRC_IRQ;
-static unsigned int io = LIRC_PORT;
-#ifdef LIRC_TIMER
-static unsigned int timer;
-static unsigned int default_timer = LIRC_TIMER;
-#endif
-
-#define RBUF_SIZE (256) /* this must be a power of 2 larger than 1 */
-
-static int rbuf[RBUF_SIZE];
-
-static DECLARE_WAIT_QUEUE_HEAD(lirc_wait);
-
-static unsigned int rptr;
-static unsigned int wptr;
-static unsigned int lost_irqs;
-static int is_open;
-
-static struct parport *pport;
-static struct pardevice *ppdevice;
-static int is_claimed;
-
-static unsigned int tx_mask = 1;
-
-/*** Internal Functions ***/
-
-static unsigned int in(int offset)
-{
- switch (offset) {
- case LIRC_LP_BASE:
- return parport_read_data(pport);
- case LIRC_LP_STATUS:
- return parport_read_status(pport);
- case LIRC_LP_CONTROL:
- return parport_read_control(pport);
- }
- return 0; /* make compiler happy */
-}
-
-static void out(int offset, int value)
-{
- switch (offset) {
- case LIRC_LP_BASE:
- parport_write_data(pport, value);
- break;
- case LIRC_LP_CONTROL:
- parport_write_control(pport, value);
- break;
- case LIRC_LP_STATUS:
- pr_info("attempt to write to status register\n");
- break;
- }
-}
-
-static unsigned int lirc_get_timer(void)
-{
- return in(LIRC_PORT_TIMER) & LIRC_PORT_TIMER_BIT;
-}
-
-static unsigned int lirc_get_signal(void)
-{
- return in(LIRC_PORT_SIGNAL) & LIRC_PORT_SIGNAL_BIT;
-}
-
-static void lirc_on(void)
-{
- out(LIRC_PORT_DATA, tx_mask);
-}
-
-static void lirc_off(void)
-{
- out(LIRC_PORT_DATA, 0);
-}
-
-static unsigned int init_lirc_timer(void)
-{
- struct timeval tv, now;
- unsigned int level, newlevel, timeelapsed, newtimer;
- int count = 0;
-
- do_gettimeofday(&tv);
- tv.tv_sec++; /* wait max. 1 sec. */
- level = lirc_get_timer();
- do {
- newlevel = lirc_get_timer();
- if (level == 0 && newlevel != 0)
- count++;
- level = newlevel;
- do_gettimeofday(&now);
- } while (count < 1000 && (now.tv_sec < tv.tv_sec
- || (now.tv_sec == tv.tv_sec
- && now.tv_usec < tv.tv_usec)));
-
- timeelapsed = (now.tv_sec + 1 - tv.tv_sec)*1000000
- + (now.tv_usec - tv.tv_usec);
- if (count >= 1000 && timeelapsed > 0) {
- if (default_timer == 0) {
- /* autodetect timer */
- newtimer = (1000000*count)/timeelapsed;
- pr_info("%u Hz timer detected\n", newtimer);
- return newtimer;
- }
- newtimer = (1000000*count)/timeelapsed;
- if (abs(newtimer - default_timer) > default_timer/10) {
- /* bad timer */
- pr_notice("bad timer: %u Hz\n", newtimer);
- pr_notice("using default timer: %u Hz\n",
- default_timer);
- return default_timer;
- }
- pr_info("%u Hz timer detected\n", newtimer);
- return newtimer; /* use detected value */
- }
-
- pr_notice("no timer detected\n");
- return 0;
-}
-
-static int lirc_claim(void)
-{
- if (parport_claim(ppdevice) != 0) {
- pr_warn("could not claim port\n");
- pr_warn("waiting for port becoming available\n");
- if (parport_claim_or_block(ppdevice) < 0) {
- pr_notice("could not claim port, giving up\n");
- return 0;
- }
- }
- out(LIRC_LP_CONTROL, LP_PSELECP|LP_PINITP);
- is_claimed = 1;
- return 1;
-}
-
-/*** interrupt handler ***/
-
-static void rbuf_write(int signal)
-{
- unsigned int nwptr;
-
- nwptr = (wptr + 1) & (RBUF_SIZE - 1);
- if (nwptr == rptr) {
- /* no new signals will be accepted */
- lost_irqs++;
- pr_notice("buffer overrun\n");
- return;
- }
- rbuf[wptr] = signal;
- wptr = nwptr;
-}
-
-static void lirc_lirc_irq_handler(void *blah)
-{
- struct timeval tv;
- static struct timeval lasttv;
- static int init;
- long signal;
- int data;
- unsigned int level, newlevel;
- unsigned int timeout;
-
- if (!is_open)
- return;
-
- if (!is_claimed)
- return;
-
-#if 0
- /* disable interrupt */
- disable_irq(irq);
- out(LIRC_PORT_IRQ, in(LIRC_PORT_IRQ) & (~LP_PINTEN));
-#endif
- if (check_pselecd && (in(1) & LP_PSELECD))
- return;
-
-#ifdef LIRC_TIMER
- if (init) {
- do_gettimeofday(&tv);
-
- signal = tv.tv_sec - lasttv.tv_sec;
- if (signal > 15)
- /* really long time */
- data = PULSE_MASK;
- else
- data = (int) (signal*1000000 +
- tv.tv_usec - lasttv.tv_usec +
- LIRC_SFH506_DELAY);
-
- rbuf_write(data); /* space */
- } else {
- if (timer == 0) {
- /*
- * wake up; we'll lose this signal, but it will be
- * garbage if the device is turned on anyway
- */
- timer = init_lirc_timer();
- /* enable_irq(irq); */
- return;
- }
- init = 1;
- }
-
- timeout = timer/10; /* timeout after 1/10 sec. */
- signal = 1;
- level = lirc_get_timer();
- do {
- newlevel = lirc_get_timer();
- if (level == 0 && newlevel != 0)
- signal++;
- level = newlevel;
-
- /* giving up */
- if (signal > timeout
- || (check_pselecd && (in(1) & LP_PSELECD))) {
- signal = 0;
- pr_notice("timeout\n");
- break;
- }
- } while (lirc_get_signal());
-
- if (signal != 0) {
- /* adjust value to usecs */
- __u64 helper;
-
- helper = ((__u64) signal)*1000000;
- do_div(helper, timer);
- signal = (long) helper;
-
- if (signal > LIRC_SFH506_DELAY)
- data = signal - LIRC_SFH506_DELAY;
- else
- data = 1;
- rbuf_write(PULSE_BIT|data); /* pulse */
- }
- do_gettimeofday(&lasttv);
-#else
- /* add your code here */
-#endif
-
- wake_up_interruptible(&lirc_wait);
-
- /* enable interrupt */
- /*
- enable_irq(irq);
- out(LIRC_PORT_IRQ, in(LIRC_PORT_IRQ)|LP_PINTEN);
- */
-}
-
-/*** file operations ***/
-
-static loff_t lirc_lseek(struct file *filep, loff_t offset, int orig)
-{
- return -ESPIPE;
-}
-
-static ssize_t lirc_read(struct file *filep, char __user *buf, size_t n,
- loff_t *ppos)
-{
- int result = 0;
- int count = 0;
- DECLARE_WAITQUEUE(wait, current);
-
- if (n % sizeof(int))
- return -EINVAL;
-
- add_wait_queue(&lirc_wait, &wait);
- set_current_state(TASK_INTERRUPTIBLE);
- while (count < n) {
- if (rptr != wptr) {
- if (copy_to_user(buf+count, &rbuf[rptr],
- sizeof(int))) {
- result = -EFAULT;
- break;
- }
- rptr = (rptr + 1) & (RBUF_SIZE - 1);
- count += sizeof(int);
- } else {
- if (filep->f_flags & O_NONBLOCK) {
- result = -EAGAIN;
- break;
- }
- if (signal_pending(current)) {
- result = -ERESTARTSYS;
- break;
- }
- schedule();
- set_current_state(TASK_INTERRUPTIBLE);
- }
- }
- remove_wait_queue(&lirc_wait, &wait);
- set_current_state(TASK_RUNNING);
- return count ? count : result;
-}
-
-static ssize_t lirc_write(struct file *filep, const char __user *buf, size_t n,
- loff_t *ppos)
-{
- int count;
- unsigned int i;
- unsigned int level, newlevel;
- unsigned long flags;
- int counttimer;
- int *wbuf;
- ssize_t ret;
-
- if (!is_claimed)
- return -EBUSY;
-
- count = n / sizeof(int);
-
- if (n % sizeof(int) || count % 2 == 0)
- return -EINVAL;
-
- wbuf = memdup_user(buf, n);
- if (IS_ERR(wbuf))
- return PTR_ERR(wbuf);
-
-#ifdef LIRC_TIMER
- if (timer == 0) {
- /* try again if device is ready */
- timer = init_lirc_timer();
- if (timer == 0) {
- ret = -EIO;
- goto out;
- }
- }
-
- /* adjust values from usecs */
- for (i = 0; i < count; i++) {
- __u64 helper;
-
- helper = ((__u64) wbuf[i])*timer;
- do_div(helper, 1000000);
- wbuf[i] = (int) helper;
- }
-
- local_irq_save(flags);
- i = 0;
- while (i < count) {
- level = lirc_get_timer();
- counttimer = 0;
- lirc_on();
- do {
- newlevel = lirc_get_timer();
- if (level == 0 && newlevel != 0)
- counttimer++;
- level = newlevel;
- if (check_pselecd && (in(1) & LP_PSELECD)) {
- lirc_off();
- local_irq_restore(flags);
- ret = -EIO;
- goto out;
- }
- } while (counttimer < wbuf[i]);
- i++;
-
- lirc_off();
- if (i == count)
- break;
- counttimer = 0;
- do {
- newlevel = lirc_get_timer();
- if (level == 0 && newlevel != 0)
- counttimer++;
- level = newlevel;
- if (check_pselecd && (in(1) & LP_PSELECD)) {
- local_irq_restore(flags);
- ret = -EIO;
- goto out;
- }
- } while (counttimer < wbuf[i]);
- i++;
- }
- local_irq_restore(flags);
-#else
- /* place code that handles write without external timer here */
-#endif
- ret = n;
-out:
- kfree(wbuf);
-
- return ret;
-}
-
-static unsigned int lirc_poll(struct file *file, poll_table *wait)
-{
- poll_wait(file, &lirc_wait, wait);
- if (rptr != wptr)
- return POLLIN | POLLRDNORM;
- return 0;
-}
-
-static long lirc_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
-{
- int result;
- u32 __user *uptr = (u32 __user *)arg;
- u32 features = LIRC_CAN_SET_TRANSMITTER_MASK |
- LIRC_CAN_SEND_PULSE | LIRC_CAN_REC_MODE2;
- u32 mode;
- u32 value;
-
- switch (cmd) {
- case LIRC_GET_FEATURES:
- result = put_user(features, uptr);
- if (result)
- return result;
- break;
- case LIRC_GET_SEND_MODE:
- result = put_user(LIRC_MODE_PULSE, uptr);
- if (result)
- return result;
- break;
- case LIRC_GET_REC_MODE:
- result = put_user(LIRC_MODE_MODE2, uptr);
- if (result)
- return result;
- break;
- case LIRC_SET_SEND_MODE:
- result = get_user(mode, uptr);
- if (result)
- return result;
- if (mode != LIRC_MODE_PULSE)
- return -EINVAL;
- break;
- case LIRC_SET_REC_MODE:
- result = get_user(mode, uptr);
- if (result)
- return result;
- if (mode != LIRC_MODE_MODE2)
- return -ENOSYS;
- break;
- case LIRC_SET_TRANSMITTER_MASK:
- result = get_user(value, uptr);
- if (result)
- return result;
- if ((value & LIRC_PARALLEL_TRANSMITTER_MASK) != value)
- return LIRC_PARALLEL_MAX_TRANSMITTERS;
- tx_mask = value;
- break;
- default:
- return -ENOIOCTLCMD;
- }
- return 0;
-}
-
-static int lirc_open(struct inode *node, struct file *filep)
-{
- if (is_open || !lirc_claim())
- return -EBUSY;
-
- parport_enable_irq(pport);
-
- /* init read ptr */
- rptr = 0;
- wptr = 0;
- lost_irqs = 0;
-
- is_open = 1;
- return 0;
-}
-
-static int lirc_close(struct inode *node, struct file *filep)
-{
- if (is_claimed) {
- is_claimed = 0;
- parport_release(ppdevice);
- }
- is_open = 0;
- return 0;
-}
-
-static const struct file_operations lirc_fops = {
- .owner = THIS_MODULE,
- .llseek = lirc_lseek,
- .read = lirc_read,
- .write = lirc_write,
- .poll = lirc_poll,
- .unlocked_ioctl = lirc_ioctl,
-#ifdef CONFIG_COMPAT
- .compat_ioctl = lirc_ioctl,
-#endif
- .open = lirc_open,
- .release = lirc_close
-};
-
-static int set_use_inc(void *data)
-{
- return 0;
-}
-
-static void set_use_dec(void *data)
-{
-}
-
-static struct lirc_driver driver = {
- .name = LIRC_DRIVER_NAME,
- .minor = -1,
- .code_length = 1,
- .sample_rate = 0,
- .data = NULL,
- .add_to_buf = NULL,
- .set_use_inc = set_use_inc,
- .set_use_dec = set_use_dec,
- .fops = &lirc_fops,
- .dev = NULL,
- .owner = THIS_MODULE,
-};
-
-static struct platform_device *lirc_parallel_dev;
-
-static int lirc_parallel_probe(struct platform_device *dev)
-{
- return 0;
-}
-
-static int lirc_parallel_remove(struct platform_device *dev)
-{
- return 0;
-}
-
-static int lirc_parallel_suspend(struct platform_device *dev,
- pm_message_t state)
-{
- return 0;
-}
-
-static int lirc_parallel_resume(struct platform_device *dev)
-{
- return 0;
-}
-
-static struct platform_driver lirc_parallel_driver = {
- .probe = lirc_parallel_probe,
- .remove = lirc_parallel_remove,
- .suspend = lirc_parallel_suspend,
- .resume = lirc_parallel_resume,
- .driver = {
- .name = LIRC_DRIVER_NAME,
- },
-};
-
-static int pf(void *handle)
-{
- parport_disable_irq(pport);
- is_claimed = 0;
- return 0;
-}
-
-static void kf(void *handle)
-{
- if (!is_open)
- return;
- if (!lirc_claim())
- return;
- parport_enable_irq(pport);
- lirc_off();
- /* this is a bit annoying when you actually print...*/
- /*
- printk(KERN_INFO "%s: reclaimed port\n", LIRC_DRIVER_NAME);
- */
-}
-
-/*** module initialization and cleanup ***/
-
-static int __init lirc_parallel_init(void)
-{
- int result;
-
- result = platform_driver_register(&lirc_parallel_driver);
- if (result) {
- pr_notice("platform_driver_register returned %d\n", result);
- return result;
- }
-
- lirc_parallel_dev = platform_device_alloc(LIRC_DRIVER_NAME, 0);
- if (!lirc_parallel_dev) {
- result = -ENOMEM;
- goto exit_driver_unregister;
- }
-
- result = platform_device_add(lirc_parallel_dev);
- if (result)
- goto exit_device_put;
-
- pport = parport_find_base(io);
- if (pport == NULL) {
- pr_notice("no port at %x found\n", io);
- result = -ENXIO;
- goto exit_device_put;
- }
- ppdevice = parport_register_device(pport, LIRC_DRIVER_NAME,
- pf, kf, lirc_lirc_irq_handler, 0,
- NULL);
- parport_put_port(pport);
- if (ppdevice == NULL) {
- pr_notice("parport_register_device() failed\n");
- result = -ENXIO;
- goto exit_device_put;
- }
- if (parport_claim(ppdevice) != 0)
- goto skip_init;
- is_claimed = 1;
- out(LIRC_LP_CONTROL, LP_PSELECP|LP_PINITP);
-
-#ifdef LIRC_TIMER
- if (debug)
- out(LIRC_PORT_DATA, tx_mask);
-
- timer = init_lirc_timer();
-
-#if 0 /* continue even if device is offline */
- if (timer == 0) {
- is_claimed = 0;
- parport_release(pport);
- parport_unregister_device(ppdevice);
- result = -EIO;
- goto exit_device_put;
- }
-
-#endif
- if (debug)
- out(LIRC_PORT_DATA, 0);
-#endif
-
- is_claimed = 0;
- parport_release(ppdevice);
- skip_init:
- driver.dev = &lirc_parallel_dev->dev;
- driver.minor = lirc_register_driver(&driver);
- if (driver.minor < 0) {
- pr_notice("register_chrdev() failed\n");
- parport_unregister_device(ppdevice);
- result = -EIO;
- goto exit_device_put;
- }
- pr_info("installed using port 0x%04x irq %d\n", io, irq);
- return 0;
-
-exit_device_put:
- platform_device_put(lirc_parallel_dev);
-exit_driver_unregister:
- platform_driver_unregister(&lirc_parallel_driver);
- return result;
-}
-
-static void __exit lirc_parallel_exit(void)
-{
- parport_unregister_device(ppdevice);
- lirc_unregister_driver(driver.minor);
-
- platform_device_unregister(lirc_parallel_dev);
- platform_driver_unregister(&lirc_parallel_driver);
-}
-
-module_init(lirc_parallel_init);
-module_exit(lirc_parallel_exit);
-
-MODULE_DESCRIPTION("Infrared receiver driver for parallel ports.");
-MODULE_AUTHOR("Christoph Bartelmus");
-MODULE_LICENSE("GPL");
-
-module_param(io, int, S_IRUGO);
-MODULE_PARM_DESC(io, "I/O address base (0x3bc, 0x378 or 0x278)");
-
-module_param(irq, int, S_IRUGO);
-MODULE_PARM_DESC(irq, "Interrupt (7 or 5)");
-
-module_param(tx_mask, int, S_IRUGO);
-MODULE_PARM_DESC(tx_maxk, "Transmitter mask (default: 0x01)");
-
-module_param(debug, bool, S_IRUGO | S_IWUSR);
-MODULE_PARM_DESC(debug, "Enable debugging messages");
-
-module_param(check_pselecd, bool, S_IRUGO | S_IWUSR);
-MODULE_PARM_DESC(check_pselecd, "Check for printer (default: 0)");
diff --git a/drivers/staging/media/lirc/lirc_parallel.h b/drivers/staging/media/lirc/lirc_parallel.h
deleted file mode 100644
index 4bed6afe0632..000000000000
--- a/drivers/staging/media/lirc/lirc_parallel.h
+++ /dev/null
@@ -1,26 +0,0 @@
-/* lirc_parallel.h */
-
-#ifndef _LIRC_PARALLEL_H
-#define _LIRC_PARALLEL_H
-
-#include <linux/lp.h>
-
-#define LIRC_PORT_LEN 3
-
-#define LIRC_LP_BASE 0
-#define LIRC_LP_STATUS 1
-#define LIRC_LP_CONTROL 2
-
-#define LIRC_PORT_DATA LIRC_LP_BASE /* base */
-#define LIRC_PORT_TIMER LIRC_LP_STATUS /* status port */
-#define LIRC_PORT_TIMER_BIT LP_PBUSY /* busy signal */
-#define LIRC_PORT_SIGNAL LIRC_LP_STATUS /* status port */
-#define LIRC_PORT_SIGNAL_BIT LP_PACK /* ack signal */
-#define LIRC_PORT_IRQ LIRC_LP_CONTROL /* control port */
-
-#define LIRC_SFH506_DELAY 0 /* delay t_phl in usecs */
-
-#define LIRC_PARALLEL_MAX_TRANSMITTERS 8
-#define LIRC_PARALLEL_TRANSMITTER_MASK ((1<<LIRC_PARALLEL_MAX_TRANSMITTERS) - 1)
-
-#endif
diff --git a/drivers/staging/media/lirc/lirc_sasem.c b/drivers/staging/media/lirc/lirc_sasem.c
deleted file mode 100644
index b247649a99eb..000000000000
--- a/drivers/staging/media/lirc/lirc_sasem.c
+++ /dev/null
@@ -1,921 +0,0 @@
-/*
- * lirc_sasem.c - USB remote support for LIRC
- * Version 0.5
- *
- * Copyright (C) 2004-2005 Oliver Stabel <oliver.stabel@gmx.de>
- * Tim Davies <tim@opensystems.net.au>
- *
- * This driver was derived from:
- * Venky Raju <dev@venky.ws>
- * "lirc_imon - "LIRC/VFD driver for Ahanix/Soundgraph IMON IR/VFD"
- * Paul Miller <pmiller9@users.sourceforge.net>'s 2003-2004
- * "lirc_atiusb - USB remote support for LIRC"
- * Culver Consulting Services <henry@culcon.com>'s 2003
- * "Sasem OnAir VFD/IR USB driver"
- *
- *
- * NOTE - The LCDproc iMon driver should work with this module. More info at
- * http://www.frogstorm.info/sasem
- */
-
-/*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/errno.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/uaccess.h>
-#include <linux/usb.h>
-
-#include <media/lirc.h>
-#include <media/lirc_dev.h>
-
-
-#define MOD_AUTHOR "Oliver Stabel <oliver.stabel@gmx.de>, " \
- "Tim Davies <tim@opensystems.net.au>"
-#define MOD_DESC "USB Driver for Sasem Remote Controller V1.1"
-#define MOD_NAME "lirc_sasem"
-#define MOD_VERSION "0.5"
-
-#define VFD_MINOR_BASE 144 /* Same as LCD */
-#define DEVICE_NAME "lcd%d"
-
-#define BUF_CHUNK_SIZE 8
-#define BUF_SIZE 128
-
-#define IOCTL_LCD_CONTRAST 1
-
-/*** P R O T O T Y P E S ***/
-
-/* USB Callback prototypes */
-static int sasem_probe(struct usb_interface *interface,
- const struct usb_device_id *id);
-static void sasem_disconnect(struct usb_interface *interface);
-static void usb_rx_callback(struct urb *urb);
-static void usb_tx_callback(struct urb *urb);
-
-/* VFD file_operations function prototypes */
-static int vfd_open(struct inode *inode, struct file *file);
-static long vfd_ioctl(struct file *file, unsigned cmd, unsigned long arg);
-static int vfd_close(struct inode *inode, struct file *file);
-static ssize_t vfd_write(struct file *file, const char __user *buf,
- size_t n_bytes, loff_t *pos);
-
-/* LIRC driver function prototypes */
-static int ir_open(void *data);
-static void ir_close(void *data);
-
-/*** G L O B A L S ***/
-#define SASEM_DATA_BUF_SZ 32
-
-struct sasem_context {
-
- struct usb_device *dev;
- int vfd_isopen; /* VFD port has been opened */
- unsigned int vfd_contrast; /* VFD contrast */
- int ir_isopen; /* IR port has been opened */
- int dev_present; /* USB device presence */
- struct mutex ctx_lock; /* to lock this object */
- wait_queue_head_t remove_ok; /* For unexpected USB disconnects */
-
- struct lirc_driver *driver;
- struct usb_endpoint_descriptor *rx_endpoint;
- struct usb_endpoint_descriptor *tx_endpoint;
- struct urb *rx_urb;
- struct urb *tx_urb;
- unsigned char usb_rx_buf[8];
- unsigned char usb_tx_buf[8];
-
- struct tx_t {
- unsigned char data_buf[SASEM_DATA_BUF_SZ]; /* user data
- * buffer */
- struct completion finished; /* wait for write to finish */
- atomic_t busy; /* write in progress */
- int status; /* status of tx completion */
- } tx;
-
- /* for dealing with repeat codes (wish there was a toggle bit!) */
- struct timeval presstime;
- char lastcode[8];
- int codesaved;
-};
-
-/* VFD file operations */
-static const struct file_operations vfd_fops = {
- .owner = THIS_MODULE,
- .open = &vfd_open,
- .write = vfd_write,
- .unlocked_ioctl = &vfd_ioctl,
- .release = &vfd_close,
- .llseek = noop_llseek,
-};
-
-/* USB Device ID for Sasem USB Control Board */
-static struct usb_device_id sasem_usb_id_table[] = {
- /* Sasem USB Control Board */
- { USB_DEVICE(0x11ba, 0x0101) },
- /* Terminating entry */
- {}
-};
-
-/* USB Device data */
-static struct usb_driver sasem_driver = {
- .name = MOD_NAME,
- .probe = sasem_probe,
- .disconnect = sasem_disconnect,
- .id_table = sasem_usb_id_table,
-};
-
-static struct usb_class_driver sasem_class = {
- .name = DEVICE_NAME,
- .fops = &vfd_fops,
- .minor_base = VFD_MINOR_BASE,
-};
-
-/* to prevent races between open() and disconnect() */
-static DEFINE_MUTEX(disconnect_lock);
-
-static int debug;
-
-
-/*** M O D U L E C O D E ***/
-
-MODULE_AUTHOR(MOD_AUTHOR);
-MODULE_DESCRIPTION(MOD_DESC);
-MODULE_LICENSE("GPL");
-module_param(debug, int, S_IRUGO | S_IWUSR);
-MODULE_PARM_DESC(debug, "Debug messages: 0=no, 1=yes (default: no)");
-
-static void delete_context(struct sasem_context *context)
-{
- usb_free_urb(context->tx_urb); /* VFD */
- usb_free_urb(context->rx_urb); /* IR */
- lirc_buffer_free(context->driver->rbuf);
- kfree(context->driver->rbuf);
- kfree(context->driver);
- kfree(context);
-}
-
-static void deregister_from_lirc(struct sasem_context *context)
-{
- int retval;
- int minor = context->driver->minor;
-
- retval = lirc_unregister_driver(minor);
- if (retval)
- dev_err(&context->dev->dev,
- "%s: unable to deregister from lirc (%d)\n",
- __func__, retval);
- else
- dev_info(&context->dev->dev,
- "Deregistered Sasem driver (minor:%d)\n", minor);
-
-}
-
-/**
- * Called when the VFD device (e.g. /dev/usb/lcd)
- * is opened by the application.
- */
-static int vfd_open(struct inode *inode, struct file *file)
-{
- struct usb_interface *interface;
- struct sasem_context *context = NULL;
- int subminor;
- int retval = 0;
-
- /* prevent races with disconnect */
- mutex_lock(&disconnect_lock);
-
- subminor = iminor(inode);
- interface = usb_find_interface(&sasem_driver, subminor);
- if (!interface) {
- pr_err("%s: could not find interface for minor %d\n",
- __func__, subminor);
- retval = -ENODEV;
- goto exit;
- }
- context = usb_get_intfdata(interface);
-
- if (!context) {
- dev_err(&interface->dev, "no context found for minor %d\n",
- subminor);
- retval = -ENODEV;
- goto exit;
- }
-
- mutex_lock(&context->ctx_lock);
-
- if (context->vfd_isopen) {
- dev_err(&interface->dev,
- "%s: VFD port is already open", __func__);
- retval = -EBUSY;
- } else {
- context->vfd_isopen = 1;
- file->private_data = context;
- dev_info(&interface->dev, "VFD port opened\n");
- }
-
- mutex_unlock(&context->ctx_lock);
-
-exit:
- mutex_unlock(&disconnect_lock);
- return retval;
-}
-
-/**
- * Called when the VFD device (e.g. /dev/usb/lcd)
- * is closed by the application.
- */
-static long vfd_ioctl(struct file *file, unsigned cmd, unsigned long arg)
-{
- struct sasem_context *context = NULL;
-
- context = (struct sasem_context *) file->private_data;
-
- if (!context) {
- pr_err("%s: no context for device\n", __func__);
- return -ENODEV;
- }
-
- mutex_lock(&context->ctx_lock);
-
- switch (cmd) {
- case IOCTL_LCD_CONTRAST:
- if (arg > 1000)
- arg = 1000;
- context->vfd_contrast = (unsigned int)arg;
- break;
- default:
- pr_info("Unknown IOCTL command\n");
- mutex_unlock(&context->ctx_lock);
- return -ENOIOCTLCMD; /* not supported */
- }
-
- mutex_unlock(&context->ctx_lock);
- return 0;
-}
-
-/**
- * Called when the VFD device (e.g. /dev/usb/lcd)
- * is closed by the application.
- */
-static int vfd_close(struct inode *inode, struct file *file)
-{
- struct sasem_context *context = NULL;
- int retval = 0;
-
- context = (struct sasem_context *) file->private_data;
-
- if (!context) {
- pr_err("%s: no context for device\n", __func__);
- return -ENODEV;
- }
-
- mutex_lock(&context->ctx_lock);
-
- if (!context->vfd_isopen) {
- dev_err(&context->dev->dev, "%s: VFD is not open\n", __func__);
- retval = -EIO;
- } else {
- context->vfd_isopen = 0;
- dev_info(&context->dev->dev, "VFD port closed\n");
- if (!context->dev_present && !context->ir_isopen) {
-
- /* Device disconnected before close and IR port is
- * not open. If IR port is open, context will be
- * deleted by ir_close. */
- mutex_unlock(&context->ctx_lock);
- delete_context(context);
- return retval;
- }
- }
-
- mutex_unlock(&context->ctx_lock);
- return retval;
-}
-
-/**
- * Sends a packet to the VFD.
- */
-static int send_packet(struct sasem_context *context)
-{
- unsigned int pipe;
- int interval = 0;
- int retval = 0;
-
- pipe = usb_sndintpipe(context->dev,
- context->tx_endpoint->bEndpointAddress);
- interval = context->tx_endpoint->bInterval;
-
- usb_fill_int_urb(context->tx_urb, context->dev, pipe,
- context->usb_tx_buf, sizeof(context->usb_tx_buf),
- usb_tx_callback, context, interval);
-
- context->tx_urb->actual_length = 0;
-
- init_completion(&context->tx.finished);
- atomic_set(&context->tx.busy, 1);
-
- retval = usb_submit_urb(context->tx_urb, GFP_KERNEL);
- if (retval) {
- atomic_set(&context->tx.busy, 0);
- dev_err(&context->dev->dev, "error submitting urb (%d)\n",
- retval);
- } else {
- /* Wait for transmission to complete (or abort) */
- mutex_unlock(&context->ctx_lock);
- wait_for_completion(&context->tx.finished);
- mutex_lock(&context->ctx_lock);
-
- retval = context->tx.status;
- if (retval)
- dev_err(&context->dev->dev,
- "packet tx failed (%d)\n", retval);
- }
-
- return retval;
-}
-
-/**
- * Writes data to the VFD. The Sasem VFD is 2x16 characters
- * and requires data in 9 consecutive USB interrupt packets,
- * each packet carrying 8 bytes.
- */
-static ssize_t vfd_write(struct file *file, const char __user *buf,
- size_t n_bytes, loff_t *pos)
-{
- int i;
- int retval = 0;
- struct sasem_context *context;
- int *data_buf = NULL;
-
- context = (struct sasem_context *) file->private_data;
- if (!context) {
- pr_err("%s: no context for device\n", __func__);
- return -ENODEV;
- }
-
- mutex_lock(&context->ctx_lock);
-
- if (!context->dev_present) {
- pr_err("%s: no Sasem device present\n", __func__);
- retval = -ENODEV;
- goto exit;
- }
-
- if (n_bytes <= 0 || n_bytes > SASEM_DATA_BUF_SZ) {
- dev_err(&context->dev->dev, "%s: invalid payload size\n",
- __func__);
- retval = -EINVAL;
- goto exit;
- }
-
- data_buf = memdup_user(buf, n_bytes);
- if (IS_ERR(data_buf)) {
- retval = PTR_ERR(data_buf);
- data_buf = NULL;
- goto exit;
- }
-
- memcpy(context->tx.data_buf, data_buf, n_bytes);
-
- /* Pad with spaces */
- for (i = n_bytes; i < SASEM_DATA_BUF_SZ; ++i)
- context->tx.data_buf[i] = ' ';
-
- /* Nine 8 byte packets to be sent */
- /* NOTE: "\x07\x01\0\0\0\0\0\0" or "\x0c\0\0\0\0\0\0\0"
- * will clear the VFD */
- for (i = 0; i < 9; i++) {
- switch (i) {
- case 0:
- memcpy(context->usb_tx_buf, "\x07\0\0\0\0\0\0\0", 8);
- context->usb_tx_buf[1] = (context->vfd_contrast) ?
- (0x2B - (context->vfd_contrast - 1) / 250)
- : 0x2B;
- break;
- case 1:
- memcpy(context->usb_tx_buf, "\x09\x01\0\0\0\0\0\0", 8);
- break;
- case 2:
- memcpy(context->usb_tx_buf, "\x0b\x01\0\0\0\0\0\0", 8);
- break;
- case 3:
- memcpy(context->usb_tx_buf, context->tx.data_buf, 8);
- break;
- case 4:
- memcpy(context->usb_tx_buf,
- context->tx.data_buf + 8, 8);
- break;
- case 5:
- memcpy(context->usb_tx_buf, "\x09\x01\0\0\0\0\0\0", 8);
- break;
- case 6:
- memcpy(context->usb_tx_buf, "\x0b\x02\0\0\0\0\0\0", 8);
- break;
- case 7:
- memcpy(context->usb_tx_buf,
- context->tx.data_buf + 16, 8);
- break;
- case 8:
- memcpy(context->usb_tx_buf,
- context->tx.data_buf + 24, 8);
- break;
- }
- retval = send_packet(context);
- if (retval) {
- dev_err(&context->dev->dev,
- "send packet failed for packet #%d\n", i);
- goto exit;
- }
- }
-exit:
-
- mutex_unlock(&context->ctx_lock);
- kfree(data_buf);
-
- return (!retval) ? n_bytes : retval;
-}
-
-/**
- * Callback function for USB core API: transmit data
- */
-static void usb_tx_callback(struct urb *urb)
-{
- struct sasem_context *context;
-
- if (!urb)
- return;
- context = (struct sasem_context *) urb->context;
- if (!context)
- return;
-
- context->tx.status = urb->status;
-
- /* notify waiters that write has finished */
- atomic_set(&context->tx.busy, 0);
- complete(&context->tx.finished);
-}
-
-/**
- * Called by lirc_dev when the application opens /dev/lirc
- */
-static int ir_open(void *data)
-{
- int retval = 0;
- struct sasem_context *context;
-
- /* prevent races with disconnect */
- mutex_lock(&disconnect_lock);
-
- context = data;
-
- mutex_lock(&context->ctx_lock);
-
- if (context->ir_isopen) {
- dev_err(&context->dev->dev, "%s: IR port is already open\n",
- __func__);
- retval = -EBUSY;
- goto exit;
- }
-
- usb_fill_int_urb(context->rx_urb, context->dev,
- usb_rcvintpipe(context->dev,
- context->rx_endpoint->bEndpointAddress),
- context->usb_rx_buf, sizeof(context->usb_rx_buf),
- usb_rx_callback, context, context->rx_endpoint->bInterval);
-
- retval = usb_submit_urb(context->rx_urb, GFP_KERNEL);
-
- if (retval)
- dev_err(&context->dev->dev,
- "usb_submit_urb failed for ir_open (%d)\n", retval);
- else {
- context->ir_isopen = 1;
- dev_info(&context->dev->dev, "IR port opened\n");
- }
-
-exit:
- mutex_unlock(&context->ctx_lock);
-
- mutex_unlock(&disconnect_lock);
- return retval;
-}
-
-/**
- * Called by lirc_dev when the application closes /dev/lirc
- */
-static void ir_close(void *data)
-{
- struct sasem_context *context;
-
- context = data;
- if (!context) {
- pr_err("%s: no context for device\n", __func__);
- return;
- }
-
- mutex_lock(&context->ctx_lock);
-
- usb_kill_urb(context->rx_urb);
- context->ir_isopen = 0;
- pr_info("IR port closed\n");
-
- if (!context->dev_present) {
-
- /*
- * Device disconnected while IR port was
- * still open. Driver was not deregistered
- * at disconnect time, so do it now.
- */
- deregister_from_lirc(context);
-
- if (!context->vfd_isopen) {
-
- mutex_unlock(&context->ctx_lock);
- delete_context(context);
- return;
- }
- /* If VFD port is open, context will be deleted by vfd_close */
- }
-
- mutex_unlock(&context->ctx_lock);
-}
-
-/**
- * Process the incoming packet
- */
-static void incoming_packet(struct sasem_context *context,
- struct urb *urb)
-{
- int len = urb->actual_length;
- unsigned char *buf = urb->transfer_buffer;
- long ms;
- struct timeval tv;
-
- if (len != 8) {
- dev_warn(&context->dev->dev,
- "%s: invalid incoming packet size (%d)\n",
- __func__, len);
- return;
- }
-
- if (debug)
- dev_info(&context->dev->dev, "Incoming data: %*ph\n", len, buf);
- /*
- * Lirc could deal with the repeat code, but we really need to block it
- * if it arrives too late. Otherwise we could repeat the wrong code.
- */
-
- /* get the time since the last button press */
- do_gettimeofday(&tv);
- ms = (tv.tv_sec - context->presstime.tv_sec) * 1000 +
- (tv.tv_usec - context->presstime.tv_usec) / 1000;
-
- if (memcmp(buf, "\x08\0\0\0\0\0\0\0", 8) == 0) {
- /*
- * the repeat code is being sent, so we copy
- * the old code to LIRC
- */
-
- /*
- * NOTE: Only if the last code was less than 250ms ago
- * - no one should be able to push another (undetected) button
- * in that time and then get a false repeat of the previous
- * press but it is long enough for a genuine repeat
- */
- if ((ms < 250) && (context->codesaved != 0)) {
- memcpy(buf, &context->lastcode, 8);
- context->presstime.tv_sec = tv.tv_sec;
- context->presstime.tv_usec = tv.tv_usec;
- }
- } else {
- /* save the current valid code for repeats */
- memcpy(&context->lastcode, buf, 8);
- /*
- * set flag to signal a valid code was save;
- * just for safety reasons
- */
- context->codesaved = 1;
- context->presstime.tv_sec = tv.tv_sec;
- context->presstime.tv_usec = tv.tv_usec;
- }
-
- lirc_buffer_write(context->driver->rbuf, buf);
- wake_up(&context->driver->rbuf->wait_poll);
-}
-
-/**
- * Callback function for USB core API: receive data
- */
-static void usb_rx_callback(struct urb *urb)
-{
- struct sasem_context *context;
-
- if (!urb)
- return;
- context = (struct sasem_context *) urb->context;
- if (!context)
- return;
-
- switch (urb->status) {
-
- case -ENOENT: /* usbcore unlink successful! */
- return;
-
- case 0:
- if (context->ir_isopen)
- incoming_packet(context, urb);
- break;
-
- default:
- dev_warn(&urb->dev->dev, "%s: status (%d): ignored",
- __func__, urb->status);
- break;
- }
-
- usb_submit_urb(context->rx_urb, GFP_ATOMIC);
-}
-
-
-
-/**
- * Callback function for USB core API: Probe
- */
-static int sasem_probe(struct usb_interface *interface,
- const struct usb_device_id *id)
-{
- struct usb_device *dev = NULL;
- struct usb_host_interface *iface_desc = NULL;
- struct usb_endpoint_descriptor *rx_endpoint = NULL;
- struct usb_endpoint_descriptor *tx_endpoint = NULL;
- struct urb *rx_urb = NULL;
- struct urb *tx_urb = NULL;
- struct lirc_driver *driver = NULL;
- struct lirc_buffer *rbuf = NULL;
- int lirc_minor = 0;
- int num_endpoints;
- int retval = 0;
- int vfd_ep_found;
- int ir_ep_found;
- int alloc_status;
- struct sasem_context *context = NULL;
- int i;
-
- dev_info(&interface->dev, "%s: found Sasem device\n", __func__);
-
-
- dev = usb_get_dev(interface_to_usbdev(interface));
- iface_desc = interface->cur_altsetting;
- num_endpoints = iface_desc->desc.bNumEndpoints;
-
- /*
- * Scan the endpoint list and set:
- * first input endpoint = IR endpoint
- * first output endpoint = VFD endpoint
- */
-
- ir_ep_found = 0;
- vfd_ep_found = 0;
-
- for (i = 0; i < num_endpoints && !(ir_ep_found && vfd_ep_found); ++i) {
-
- struct usb_endpoint_descriptor *ep;
- int ep_dir;
- int ep_type;
-
- ep = &iface_desc->endpoint [i].desc;
- ep_dir = ep->bEndpointAddress & USB_ENDPOINT_DIR_MASK;
- ep_type = ep->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK;
-
- if (!ir_ep_found &&
- ep_dir == USB_DIR_IN &&
- ep_type == USB_ENDPOINT_XFER_INT) {
-
- rx_endpoint = ep;
- ir_ep_found = 1;
- if (debug)
- dev_info(&interface->dev,
- "%s: found IR endpoint\n", __func__);
-
- } else if (!vfd_ep_found &&
- ep_dir == USB_DIR_OUT &&
- ep_type == USB_ENDPOINT_XFER_INT) {
-
- tx_endpoint = ep;
- vfd_ep_found = 1;
- if (debug)
- dev_info(&interface->dev,
- "%s: found VFD endpoint\n", __func__);
- }
- }
-
- /* Input endpoint is mandatory */
- if (!ir_ep_found) {
- dev_err(&interface->dev,
- "%s: no valid input (IR) endpoint found.\n", __func__);
- retval = -ENODEV;
- goto exit;
- }
-
- if (!vfd_ep_found)
- dev_info(&interface->dev,
- "%s: no valid output (VFD) endpoint found.\n",
- __func__);
-
-
- /* Allocate memory */
- alloc_status = 0;
-
- context = kzalloc(sizeof(struct sasem_context), GFP_KERNEL);
- if (!context) {
- alloc_status = 1;
- goto alloc_status_switch;
- }
- driver = kzalloc(sizeof(struct lirc_driver), GFP_KERNEL);
- if (!driver) {
- alloc_status = 2;
- goto alloc_status_switch;
- }
- rbuf = kmalloc(sizeof(struct lirc_buffer), GFP_KERNEL);
- if (!rbuf) {
- alloc_status = 3;
- goto alloc_status_switch;
- }
- if (lirc_buffer_init(rbuf, BUF_CHUNK_SIZE, BUF_SIZE)) {
- dev_err(&interface->dev,
- "%s: lirc_buffer_init failed\n", __func__);
- alloc_status = 4;
- goto alloc_status_switch;
- }
- rx_urb = usb_alloc_urb(0, GFP_KERNEL);
- if (!rx_urb) {
- dev_err(&interface->dev,
- "%s: usb_alloc_urb failed for IR urb\n", __func__);
- alloc_status = 5;
- goto alloc_status_switch;
- }
- if (vfd_ep_found) {
- tx_urb = usb_alloc_urb(0, GFP_KERNEL);
- if (!tx_urb) {
- dev_err(&interface->dev,
- "%s: usb_alloc_urb failed for VFD urb",
- __func__);
- alloc_status = 6;
- goto alloc_status_switch;
- }
- }
-
- mutex_init(&context->ctx_lock);
-
- strcpy(driver->name, MOD_NAME);
- driver->minor = -1;
- driver->code_length = 64;
- driver->sample_rate = 0;
- driver->features = LIRC_CAN_REC_LIRCCODE;
- driver->data = context;
- driver->rbuf = rbuf;
- driver->set_use_inc = ir_open;
- driver->set_use_dec = ir_close;
- driver->dev = &interface->dev;
- driver->owner = THIS_MODULE;
-
- mutex_lock(&context->ctx_lock);
-
- lirc_minor = lirc_register_driver(driver);
- if (lirc_minor < 0) {
- dev_err(&interface->dev,
- "%s: lirc_register_driver failed\n", __func__);
- alloc_status = 7;
- retval = lirc_minor;
- goto unlock;
- } else
- dev_info(&interface->dev,
- "%s: Registered Sasem driver (minor:%d)\n",
- __func__, lirc_minor);
-
- /* Needed while unregistering! */
- driver->minor = lirc_minor;
-
- context->dev = dev;
- context->dev_present = 1;
- context->rx_endpoint = rx_endpoint;
- context->rx_urb = rx_urb;
- if (vfd_ep_found) {
- context->tx_endpoint = tx_endpoint;
- context->tx_urb = tx_urb;
- context->vfd_contrast = 1000; /* range 0 - 1000 */
- }
- context->driver = driver;
-
- usb_set_intfdata(interface, context);
-
- if (vfd_ep_found) {
-
- if (debug)
- dev_info(&interface->dev,
- "Registering VFD with sysfs\n");
- if (usb_register_dev(interface, &sasem_class))
- /* Not a fatal error, so ignore */
- dev_info(&interface->dev,
- "%s: could not get a minor number for VFD\n",
- __func__);
- }
-
- dev_info(&interface->dev,
- "%s: Sasem device on usb<%d:%d> initialized\n",
- __func__, dev->bus->busnum, dev->devnum);
-unlock:
- mutex_unlock(&context->ctx_lock);
-
-alloc_status_switch:
- switch (alloc_status) {
-
- case 7:
- if (vfd_ep_found)
- usb_free_urb(tx_urb);
- case 6:
- usb_free_urb(rx_urb);
- /* fall-through */
- case 5:
- lirc_buffer_free(rbuf);
- /* fall-through */
- case 4:
- kfree(rbuf);
- /* fall-through */
- case 3:
- kfree(driver);
- /* fall-through */
- case 2:
- kfree(context);
- context = NULL;
- /* fall-through */
- case 1:
- if (retval == 0)
- retval = -ENOMEM;
- }
-
-exit:
- return retval;
-}
-
-/**
- * Callback function for USB core API: disconnect
- */
-static void sasem_disconnect(struct usb_interface *interface)
-{
- struct sasem_context *context;
-
- /* prevent races with ir_open()/vfd_open() */
- mutex_lock(&disconnect_lock);
-
- context = usb_get_intfdata(interface);
- mutex_lock(&context->ctx_lock);
-
- dev_info(&interface->dev, "%s: Sasem device disconnected\n",
- __func__);
-
- usb_set_intfdata(interface, NULL);
- context->dev_present = 0;
-
- /* Stop reception */
- usb_kill_urb(context->rx_urb);
-
- /* Abort ongoing write */
- if (atomic_read(&context->tx.busy)) {
-
- usb_kill_urb(context->tx_urb);
- wait_for_completion(&context->tx.finished);
- }
-
- /* De-register from lirc_dev if IR port is not open */
- if (!context->ir_isopen)
- deregister_from_lirc(context);
-
- usb_deregister_dev(interface, &sasem_class);
-
- mutex_unlock(&context->ctx_lock);
-
- if (!context->ir_isopen && !context->vfd_isopen)
- delete_context(context);
-
- mutex_unlock(&disconnect_lock);
-}
-
-module_usb_driver(sasem_driver);
diff --git a/drivers/staging/media/lirc/lirc_serial.c b/drivers/staging/media/lirc/lirc_serial.c
deleted file mode 100644
index 465796a686c4..000000000000
--- a/drivers/staging/media/lirc/lirc_serial.c
+++ /dev/null
@@ -1,1158 +0,0 @@
-/*
- * lirc_serial.c
- *
- * lirc_serial - Device driver that records pulse- and pause-lengths
- * (space-lengths) between DDCD event on a serial port.
- *
- * Copyright (C) 1996,97 Ralph Metzler <rjkm@thp.uni-koeln.de>
- * Copyright (C) 1998 Trent Piepho <xyzzy@u.washington.edu>
- * Copyright (C) 1998 Ben Pfaff <blp@gnu.org>
- * Copyright (C) 1999 Christoph Bartelmus <lirc@bartelmus.de>
- * Copyright (C) 2007 Andrei Tanas <andrei@tanas.ca> (suspend/resume support)
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- */
-
-/*
- * Steve's changes to improve transmission fidelity:
- * - for systems with the rdtsc instruction and the clock counter, a
- * send_pule that times the pulses directly using the counter.
- * This means that the LIRC_SERIAL_TRANSMITTER_LATENCY fudge is
- * not needed. Measurement shows very stable waveform, even where
- * PCI activity slows the access to the UART, which trips up other
- * versions.
- * - For other system, non-integer-microsecond pulse/space lengths,
- * done using fixed point binary. So, much more accurate carrier
- * frequency.
- * - fine tuned transmitter latency, taking advantage of fractional
- * microseconds in previous change
- * - Fixed bug in the way transmitter latency was accounted for by
- * tuning the pulse lengths down - the send_pulse routine ignored
- * this overhead as it timed the overall pulse length - so the
- * pulse frequency was right but overall pulse length was too
- * long. Fixed by accounting for latency on each pulse/space
- * iteration.
- *
- * Steve Davies <steve@daviesfam.org> July 2001
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/module.h>
-#include <linux/errno.h>
-#include <linux/signal.h>
-#include <linux/sched.h>
-#include <linux/fs.h>
-#include <linux/interrupt.h>
-#include <linux/ioport.h>
-#include <linux/kernel.h>
-#include <linux/serial_reg.h>
-#include <linux/time.h>
-#include <linux/string.h>
-#include <linux/types.h>
-#include <linux/wait.h>
-#include <linux/mm.h>
-#include <linux/delay.h>
-#include <linux/poll.h>
-#include <linux/platform_device.h>
-#include <linux/gpio.h>
-#include <linux/io.h>
-#include <linux/irq.h>
-#include <linux/fcntl.h>
-#include <linux/spinlock.h>
-
-/* From Intel IXP42X Developer's Manual (#252480-005): */
-/* ftp://download.intel.com/design/network/manuals/25248005.pdf */
-#define UART_IE_IXP42X_UUE 0x40 /* IXP42X UART Unit enable */
-#define UART_IE_IXP42X_RTOIE 0x10 /* IXP42X Receiver Data Timeout int.enable */
-
-#include <media/lirc.h>
-#include <media/lirc_dev.h>
-
-#define LIRC_DRIVER_NAME "lirc_serial"
-
-struct lirc_serial {
- int signal_pin;
- int signal_pin_change;
- u8 on;
- u8 off;
- long (*send_pulse)(unsigned long length);
- void (*send_space)(long length);
- int features;
- spinlock_t lock;
-};
-
-#define LIRC_HOMEBREW 0
-#define LIRC_IRDEO 1
-#define LIRC_IRDEO_REMOTE 2
-#define LIRC_ANIMAX 3
-#define LIRC_IGOR 4
-#define LIRC_NSLU2 5
-
-/*** module parameters ***/
-static int type;
-static int io;
-static int irq;
-static bool iommap;
-static int ioshift;
-static bool softcarrier = true;
-static bool share_irq;
-static bool debug;
-static int sense = -1; /* -1 = auto, 0 = active high, 1 = active low */
-static bool txsense; /* 0 = active high, 1 = active low */
-
-#define dprintk(fmt, args...) \
- do { \
- if (debug) \
- printk(KERN_DEBUG LIRC_DRIVER_NAME ": " \
- fmt, ## args); \
- } while (0)
-
-/* forward declarations */
-static long send_pulse_irdeo(unsigned long length);
-static long send_pulse_homebrew(unsigned long length);
-static void send_space_irdeo(long length);
-static void send_space_homebrew(long length);
-
-static struct lirc_serial hardware[] = {
- [LIRC_HOMEBREW] = {
- .lock = __SPIN_LOCK_UNLOCKED(hardware[LIRC_HOMEBREW].lock),
- .signal_pin = UART_MSR_DCD,
- .signal_pin_change = UART_MSR_DDCD,
- .on = (UART_MCR_RTS | UART_MCR_OUT2 | UART_MCR_DTR),
- .off = (UART_MCR_RTS | UART_MCR_OUT2),
- .send_pulse = send_pulse_homebrew,
- .send_space = send_space_homebrew,
-#ifdef CONFIG_LIRC_SERIAL_TRANSMITTER
- .features = (LIRC_CAN_SET_SEND_DUTY_CYCLE |
- LIRC_CAN_SET_SEND_CARRIER |
- LIRC_CAN_SEND_PULSE | LIRC_CAN_REC_MODE2)
-#else
- .features = LIRC_CAN_REC_MODE2
-#endif
- },
-
- [LIRC_IRDEO] = {
- .lock = __SPIN_LOCK_UNLOCKED(hardware[LIRC_IRDEO].lock),
- .signal_pin = UART_MSR_DSR,
- .signal_pin_change = UART_MSR_DDSR,
- .on = UART_MCR_OUT2,
- .off = (UART_MCR_RTS | UART_MCR_DTR | UART_MCR_OUT2),
- .send_pulse = send_pulse_irdeo,
- .send_space = send_space_irdeo,
- .features = (LIRC_CAN_SET_SEND_DUTY_CYCLE |
- LIRC_CAN_SEND_PULSE | LIRC_CAN_REC_MODE2)
- },
-
- [LIRC_IRDEO_REMOTE] = {
- .lock = __SPIN_LOCK_UNLOCKED(hardware[LIRC_IRDEO_REMOTE].lock),
- .signal_pin = UART_MSR_DSR,
- .signal_pin_change = UART_MSR_DDSR,
- .on = (UART_MCR_RTS | UART_MCR_DTR | UART_MCR_OUT2),
- .off = (UART_MCR_RTS | UART_MCR_DTR | UART_MCR_OUT2),
- .send_pulse = send_pulse_irdeo,
- .send_space = send_space_irdeo,
- .features = (LIRC_CAN_SET_SEND_DUTY_CYCLE |
- LIRC_CAN_SEND_PULSE | LIRC_CAN_REC_MODE2)
- },
-
- [LIRC_ANIMAX] = {
- .lock = __SPIN_LOCK_UNLOCKED(hardware[LIRC_ANIMAX].lock),
- .signal_pin = UART_MSR_DCD,
- .signal_pin_change = UART_MSR_DDCD,
- .on = 0,
- .off = (UART_MCR_RTS | UART_MCR_DTR | UART_MCR_OUT2),
- .send_pulse = NULL,
- .send_space = NULL,
- .features = LIRC_CAN_REC_MODE2
- },
-
- [LIRC_IGOR] = {
- .lock = __SPIN_LOCK_UNLOCKED(hardware[LIRC_IGOR].lock),
- .signal_pin = UART_MSR_DSR,
- .signal_pin_change = UART_MSR_DDSR,
- .on = (UART_MCR_RTS | UART_MCR_OUT2 | UART_MCR_DTR),
- .off = (UART_MCR_RTS | UART_MCR_OUT2),
- .send_pulse = send_pulse_homebrew,
- .send_space = send_space_homebrew,
-#ifdef CONFIG_LIRC_SERIAL_TRANSMITTER
- .features = (LIRC_CAN_SET_SEND_DUTY_CYCLE |
- LIRC_CAN_SET_SEND_CARRIER |
- LIRC_CAN_SEND_PULSE | LIRC_CAN_REC_MODE2)
-#else
- .features = LIRC_CAN_REC_MODE2
-#endif
- },
-};
-
-#define RS_ISR_PASS_LIMIT 256
-
-/*
- * A long pulse code from a remote might take up to 300 bytes. The
- * daemon should read the bytes as soon as they are generated, so take
- * the number of keys you think you can push before the daemon runs
- * and multiply by 300. The driver will warn you if you overrun this
- * buffer. If you have a slow computer or non-busmastering IDE disks,
- * maybe you will need to increase this.
- */
-
-/* This MUST be a power of two! It has to be larger than 1 as well. */
-
-#define RBUF_LEN 256
-
-static struct timeval lasttv = {0, 0};
-
-static struct lirc_buffer rbuf;
-
-static unsigned int freq = 38000;
-static unsigned int duty_cycle = 50;
-
-/* Initialized in init_timing_params() */
-static unsigned long period;
-static unsigned long pulse_width;
-static unsigned long space_width;
-
-#if defined(__i386__)
-/*
- * From:
- * Linux I/O port programming mini-HOWTO
- * Author: Riku Saikkonen <Riku.Saikkonen@hut.fi>
- * v, 28 December 1997
- *
- * [...]
- * Actually, a port I/O instruction on most ports in the 0-0x3ff range
- * takes almost exactly 1 microsecond, so if you're, for example, using
- * the parallel port directly, just do additional inb()s from that port
- * to delay.
- * [...]
- */
-/* transmitter latency 1.5625us 0x1.90 - this figure arrived at from
- * comment above plus trimming to match actual measured frequency.
- * This will be sensitive to cpu speed, though hopefully most of the 1.5us
- * is spent in the uart access. Still - for reference test machine was a
- * 1.13GHz Athlon system - Steve
- */
-
-/*
- * changed from 400 to 450 as this works better on slower machines;
- * faster machines will use the rdtsc code anyway
- */
-#define LIRC_SERIAL_TRANSMITTER_LATENCY 450
-
-#else
-
-/* does anybody have information on other platforms ? */
-/* 256 = 1<<8 */
-#define LIRC_SERIAL_TRANSMITTER_LATENCY 256
-
-#endif /* __i386__ */
-/*
- * FIXME: should we be using hrtimers instead of this
- * LIRC_SERIAL_TRANSMITTER_LATENCY nonsense?
- */
-
-/* fetch serial input packet (1 byte) from register offset */
-static u8 sinp(int offset)
-{
- if (iommap)
- /* the register is memory-mapped */
- offset <<= ioshift;
-
- return inb(io + offset);
-}
-
-/* write serial output packet (1 byte) of value to register offset */
-static void soutp(int offset, u8 value)
-{
- if (iommap)
- /* the register is memory-mapped */
- offset <<= ioshift;
-
- outb(value, io + offset);
-}
-
-static void on(void)
-{
- if (txsense)
- soutp(UART_MCR, hardware[type].off);
- else
- soutp(UART_MCR, hardware[type].on);
-}
-
-static void off(void)
-{
- if (txsense)
- soutp(UART_MCR, hardware[type].on);
- else
- soutp(UART_MCR, hardware[type].off);
-}
-
-#ifndef MAX_UDELAY_MS
-#define MAX_UDELAY_US 5000
-#else
-#define MAX_UDELAY_US (MAX_UDELAY_MS*1000)
-#endif
-
-static void safe_udelay(unsigned long usecs)
-{
- while (usecs > MAX_UDELAY_US) {
- udelay(MAX_UDELAY_US);
- usecs -= MAX_UDELAY_US;
- }
- udelay(usecs);
-}
-
-#ifdef USE_RDTSC
-/*
- * This is an overflow/precision juggle, complicated in that we can't
- * do long long divide in the kernel
- */
-
-/*
- * When we use the rdtsc instruction to measure clocks, we keep the
- * pulse and space widths as clock cycles. As this is CPU speed
- * dependent, the widths must be calculated in init_port and ioctl
- * time
- */
-
-static int init_timing_params(unsigned int new_duty_cycle,
- unsigned int new_freq)
-{
- __u64 loops_per_sec, work;
-
- duty_cycle = new_duty_cycle;
- freq = new_freq;
-
- loops_per_sec = __this_cpu_read(cpu.info.loops_per_jiffy);
- loops_per_sec *= HZ;
-
- /* How many clocks in a microsecond?, avoiding long long divide */
- work = loops_per_sec;
- work *= 4295; /* 4295 = 2^32 / 1e6 */
-
- /*
- * Carrier period in clocks, approach good up to 32GHz clock,
- * gets carrier frequency within 8Hz
- */
- period = loops_per_sec >> 3;
- period /= (freq >> 3);
-
- /* Derive pulse and space from the period */
- pulse_width = period * duty_cycle / 100;
- space_width = period - pulse_width;
- dprintk("in init_timing_params, freq=%d, duty_cycle=%d, "
- "clk/jiffy=%ld, pulse=%ld, space=%ld\n",
- freq, duty_cycle, __this_cpu_read(cpu_info.loops_per_jiffy),
- pulse_width, space_width);
- return 0;
-}
-#else /* ! USE_RDTSC */
-static int init_timing_params(unsigned int new_duty_cycle,
- unsigned int new_freq)
-{
-/*
- * period, pulse/space width are kept with 8 binary places -
- * IE multiplied by 256.
- */
- if (256 * 1000000L / new_freq * new_duty_cycle / 100 <=
- LIRC_SERIAL_TRANSMITTER_LATENCY)
- return -EINVAL;
- if (256 * 1000000L / new_freq * (100 - new_duty_cycle) / 100 <=
- LIRC_SERIAL_TRANSMITTER_LATENCY)
- return -EINVAL;
- duty_cycle = new_duty_cycle;
- freq = new_freq;
- period = 256 * 1000000L / freq;
- pulse_width = period * duty_cycle / 100;
- space_width = period - pulse_width;
- dprintk("in init_timing_params, freq=%d pulse=%ld, space=%ld\n",
- freq, pulse_width, space_width);
- return 0;
-}
-#endif /* USE_RDTSC */
-
-
-/* return value: space length delta */
-
-static long send_pulse_irdeo(unsigned long length)
-{
- long rawbits, ret;
- int i;
- unsigned char output;
- unsigned char chunk, shifted;
-
- /* how many bits have to be sent ? */
- rawbits = length * 1152 / 10000;
- if (duty_cycle > 50)
- chunk = 3;
- else
- chunk = 1;
- for (i = 0, output = 0x7f; rawbits > 0; rawbits -= 3) {
- shifted = chunk << (i * 3);
- shifted >>= 1;
- output &= (~shifted);
- i++;
- if (i == 3) {
- soutp(UART_TX, output);
- while (!(sinp(UART_LSR) & UART_LSR_THRE))
- ;
- output = 0x7f;
- i = 0;
- }
- }
- if (i != 0) {
- soutp(UART_TX, output);
- while (!(sinp(UART_LSR) & UART_LSR_TEMT))
- ;
- }
-
- if (i == 0)
- ret = (-rawbits) * 10000 / 1152;
- else
- ret = (3 - i) * 3 * 10000 / 1152 + (-rawbits) * 10000 / 1152;
-
- return ret;
-}
-
-/* Version using udelay() */
-
-/*
- * here we use fixed point arithmetic, with 8
- * fractional bits. that gets us within 0.1% or so of the right average
- * frequency, albeit with some jitter in pulse length - Steve
- *
- * This should use ndelay instead.
- */
-
-/* To match 8 fractional bits used for pulse/space length */
-
-static long send_pulse_homebrew_softcarrier(unsigned long length)
-{
- int flag;
- unsigned long actual, target, d;
-
- length <<= 8;
-
- actual = 0; target = 0; flag = 0;
- while (actual < length) {
- if (flag) {
- off();
- target += space_width;
- } else {
- on();
- target += pulse_width;
- }
- d = (target - actual -
- LIRC_SERIAL_TRANSMITTER_LATENCY + 128) >> 8;
- /*
- * Note - we've checked in ioctl that the pulse/space
- * widths are big enough so that d is > 0
- */
- udelay(d);
- actual += (d << 8) + LIRC_SERIAL_TRANSMITTER_LATENCY;
- flag = !flag;
- }
- return (actual-length) >> 8;
-}
-
-static long send_pulse_homebrew(unsigned long length)
-{
- if (length <= 0)
- return 0;
-
- if (softcarrier)
- return send_pulse_homebrew_softcarrier(length);
-
- on();
- safe_udelay(length);
- return 0;
-}
-
-static void send_space_irdeo(long length)
-{
- if (length <= 0)
- return;
-
- safe_udelay(length);
-}
-
-static void send_space_homebrew(long length)
-{
- off();
- if (length <= 0)
- return;
- safe_udelay(length);
-}
-
-static void rbwrite(int l)
-{
- if (lirc_buffer_full(&rbuf)) {
- /* no new signals will be accepted */
- dprintk("Buffer overrun\n");
- return;
- }
- lirc_buffer_write(&rbuf, (void *)&l);
-}
-
-static void frbwrite(int l)
-{
- /* simple noise filter */
- static int pulse, space;
- static unsigned int ptr;
-
- if (ptr > 0 && (l & PULSE_BIT)) {
- pulse += l & PULSE_MASK;
- if (pulse > 250) {
- rbwrite(space);
- rbwrite(pulse | PULSE_BIT);
- ptr = 0;
- pulse = 0;
- }
- return;
- }
- if (!(l & PULSE_BIT)) {
- if (ptr == 0) {
- if (l > 20000) {
- space = l;
- ptr++;
- return;
- }
- } else {
- if (l > 20000) {
- space += pulse;
- if (space > PULSE_MASK)
- space = PULSE_MASK;
- space += l;
- if (space > PULSE_MASK)
- space = PULSE_MASK;
- pulse = 0;
- return;
- }
- rbwrite(space);
- rbwrite(pulse | PULSE_BIT);
- ptr = 0;
- pulse = 0;
- }
- }
- rbwrite(l);
-}
-
-static irqreturn_t lirc_irq_handler(int i, void *blah)
-{
- struct timeval tv;
- int counter, dcd;
- u8 status;
- long deltv;
- int data;
- static int last_dcd = -1;
-
- if ((sinp(UART_IIR) & UART_IIR_NO_INT)) {
- /* not our interrupt */
- return IRQ_NONE;
- }
-
- counter = 0;
- do {
- counter++;
- status = sinp(UART_MSR);
- if (counter > RS_ISR_PASS_LIMIT) {
- pr_warn("AIEEEE: We're caught!\n");
- break;
- }
- if ((status & hardware[type].signal_pin_change)
- && sense != -1) {
- /* get current time */
- do_gettimeofday(&tv);
-
- /* New mode, written by Trent Piepho
- <xyzzy@u.washington.edu>. */
-
- /*
- * The old format was not very portable.
- * We now use an int to pass pulses
- * and spaces to user space.
- *
- * If PULSE_BIT is set a pulse has been
- * received, otherwise a space has been
- * received. The driver needs to know if your
- * receiver is active high or active low, or
- * the space/pulse sense could be
- * inverted. The bits denoted by PULSE_MASK are
- * the length in microseconds. Lengths greater
- * than or equal to 16 seconds are clamped to
- * PULSE_MASK. All other bits are unused.
- * This is a much simpler interface for user
- * programs, as well as eliminating "out of
- * phase" errors with space/pulse
- * autodetection.
- */
-
- /* calc time since last interrupt in microseconds */
- dcd = (status & hardware[type].signal_pin) ? 1 : 0;
-
- if (dcd == last_dcd) {
- pr_warn("ignoring spike: %d %d %lx %lx %lx %lx\n",
- dcd, sense,
- tv.tv_sec, lasttv.tv_sec,
- (unsigned long)tv.tv_usec,
- (unsigned long)lasttv.tv_usec);
- continue;
- }
-
- deltv = tv.tv_sec-lasttv.tv_sec;
- if (tv.tv_sec < lasttv.tv_sec ||
- (tv.tv_sec == lasttv.tv_sec &&
- tv.tv_usec < lasttv.tv_usec)) {
- pr_warn("AIEEEE: your clock just jumped backwards\n");
- pr_warn("%d %d %lx %lx %lx %lx\n",
- dcd, sense,
- tv.tv_sec, lasttv.tv_sec,
- (unsigned long)tv.tv_usec,
- (unsigned long)lasttv.tv_usec);
- data = PULSE_MASK;
- } else if (deltv > 15) {
- data = PULSE_MASK; /* really long time */
- if (!(dcd^sense)) {
- /* sanity check */
- pr_warn("AIEEEE: %d %d %lx %lx %lx %lx\n",
- dcd, sense,
- tv.tv_sec, lasttv.tv_sec,
- (unsigned long)tv.tv_usec,
- (unsigned long)lasttv.tv_usec);
- /*
- * detecting pulse while this
- * MUST be a space!
- */
- sense = sense ? 0 : 1;
- }
- } else
- data = (int) (deltv*1000000 +
- tv.tv_usec -
- lasttv.tv_usec);
- frbwrite(dcd^sense ? data : (data|PULSE_BIT));
- lasttv = tv;
- last_dcd = dcd;
- wake_up_interruptible(&rbuf.wait_poll);
- }
- } while (!(sinp(UART_IIR) & UART_IIR_NO_INT)); /* still pending ? */
- return IRQ_HANDLED;
-}
-
-
-static int hardware_init_port(void)
-{
- u8 scratch, scratch2, scratch3;
-
- /*
- * This is a simple port existence test, borrowed from the autoconfig
- * function in drivers/serial/8250.c
- */
- scratch = sinp(UART_IER);
- soutp(UART_IER, 0);
-#ifdef __i386__
- outb(0xff, 0x080);
-#endif
- scratch2 = sinp(UART_IER) & 0x0f;
- soutp(UART_IER, 0x0f);
-#ifdef __i386__
- outb(0x00, 0x080);
-#endif
- scratch3 = sinp(UART_IER) & 0x0f;
- soutp(UART_IER, scratch);
- if (scratch2 != 0 || scratch3 != 0x0f) {
- /* we fail, there's nothing here */
- pr_err("port existence test failed, cannot continue\n");
- return -ENODEV;
- }
-
-
-
- /* Set DLAB 0. */
- soutp(UART_LCR, sinp(UART_LCR) & (~UART_LCR_DLAB));
-
- /* First of all, disable all interrupts */
- soutp(UART_IER, sinp(UART_IER) &
- (~(UART_IER_MSI|UART_IER_RLSI|UART_IER_THRI|UART_IER_RDI)));
-
- /* Clear registers. */
- sinp(UART_LSR);
- sinp(UART_RX);
- sinp(UART_IIR);
- sinp(UART_MSR);
-
- /* Set line for power source */
- off();
-
- /* Clear registers again to be sure. */
- sinp(UART_LSR);
- sinp(UART_RX);
- sinp(UART_IIR);
- sinp(UART_MSR);
-
- switch (type) {
- case LIRC_IRDEO:
- case LIRC_IRDEO_REMOTE:
- /* setup port to 7N1 @ 115200 Baud */
- /* 7N1+start = 9 bits at 115200 ~ 3 bits at 38kHz */
-
- /* Set DLAB 1. */
- soutp(UART_LCR, sinp(UART_LCR) | UART_LCR_DLAB);
- /* Set divisor to 1 => 115200 Baud */
- soutp(UART_DLM, 0);
- soutp(UART_DLL, 1);
- /* Set DLAB 0 + 7N1 */
- soutp(UART_LCR, UART_LCR_WLEN7);
- /* THR interrupt already disabled at this point */
- break;
- default:
- break;
- }
-
- return 0;
-}
-
-static int lirc_serial_probe(struct platform_device *dev)
-{
- int i, nlow, nhigh, result;
-
- result = devm_request_irq(&dev->dev, irq, lirc_irq_handler,
- (share_irq ? IRQF_SHARED : 0),
- LIRC_DRIVER_NAME, &hardware);
- if (result < 0) {
- if (result == -EBUSY)
- dev_err(&dev->dev, "IRQ %d busy\n", irq);
- else if (result == -EINVAL)
- dev_err(&dev->dev, "Bad irq number or handler\n");
- return result;
- }
-
- /* Reserve io region. */
- /*
- * Future MMAP-Developers: Attention!
- * For memory mapped I/O you *might* need to use ioremap() first,
- * for the NSLU2 it's done in boot code.
- */
- if (((iommap)
- && (devm_request_mem_region(&dev->dev, iommap, 8 << ioshift,
- LIRC_DRIVER_NAME) == NULL))
- || ((!iommap)
- && (devm_request_region(&dev->dev, io, 8,
- LIRC_DRIVER_NAME) == NULL))) {
- dev_err(&dev->dev, "port %04x already in use\n", io);
- dev_warn(&dev->dev, "use 'setserial /dev/ttySX uart none'\n");
- dev_warn(&dev->dev,
- "or compile the serial port driver as module and\n");
- dev_warn(&dev->dev, "make sure this module is loaded first\n");
- return -EBUSY;
- }
-
- result = hardware_init_port();
- if (result < 0)
- return result;
-
- /* Initialize pulse/space widths */
- init_timing_params(duty_cycle, freq);
-
- /* If pin is high, then this must be an active low receiver. */
- if (sense == -1) {
- /* wait 1/2 sec for the power supply */
- msleep(500);
-
- /*
- * probe 9 times every 0.04s, collect "votes" for
- * active high/low
- */
- nlow = 0;
- nhigh = 0;
- for (i = 0; i < 9; i++) {
- if (sinp(UART_MSR) & hardware[type].signal_pin)
- nlow++;
- else
- nhigh++;
- msleep(40);
- }
- sense = nlow >= nhigh ? 1 : 0;
- dev_info(&dev->dev, "auto-detected active %s receiver\n",
- sense ? "low" : "high");
- } else
- dev_info(&dev->dev, "Manually using active %s receiver\n",
- sense ? "low" : "high");
-
- dprintk("Interrupt %d, port %04x obtained\n", irq, io);
- return 0;
-}
-
-static int set_use_inc(void *data)
-{
- unsigned long flags;
-
- /* initialize timestamp */
- do_gettimeofday(&lasttv);
-
- spin_lock_irqsave(&hardware[type].lock, flags);
-
- /* Set DLAB 0. */
- soutp(UART_LCR, sinp(UART_LCR) & (~UART_LCR_DLAB));
-
- soutp(UART_IER, sinp(UART_IER)|UART_IER_MSI);
-
- spin_unlock_irqrestore(&hardware[type].lock, flags);
-
- return 0;
-}
-
-static void set_use_dec(void *data)
-{ unsigned long flags;
-
- spin_lock_irqsave(&hardware[type].lock, flags);
-
- /* Set DLAB 0. */
- soutp(UART_LCR, sinp(UART_LCR) & (~UART_LCR_DLAB));
-
- /* First of all, disable all interrupts */
- soutp(UART_IER, sinp(UART_IER) &
- (~(UART_IER_MSI|UART_IER_RLSI|UART_IER_THRI|UART_IER_RDI)));
- spin_unlock_irqrestore(&hardware[type].lock, flags);
-}
-
-static ssize_t lirc_write(struct file *file, const char __user *buf,
- size_t n, loff_t *ppos)
-{
- int i, count;
- unsigned long flags;
- long delta = 0;
- int *wbuf;
-
- if (!(hardware[type].features & LIRC_CAN_SEND_PULSE))
- return -EPERM;
-
- count = n / sizeof(int);
- if (n % sizeof(int) || count % 2 == 0)
- return -EINVAL;
- wbuf = memdup_user(buf, n);
- if (IS_ERR(wbuf))
- return PTR_ERR(wbuf);
- spin_lock_irqsave(&hardware[type].lock, flags);
- if (type == LIRC_IRDEO) {
- /* DTR, RTS down */
- on();
- }
- for (i = 0; i < count; i++) {
- if (i%2)
- hardware[type].send_space(wbuf[i] - delta);
- else
- delta = hardware[type].send_pulse(wbuf[i]);
- }
- off();
- spin_unlock_irqrestore(&hardware[type].lock, flags);
- kfree(wbuf);
- return n;
-}
-
-static long lirc_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
-{
- int result;
- u32 __user *uptr = (u32 __user *)arg;
- u32 value;
-
- switch (cmd) {
- case LIRC_GET_SEND_MODE:
- if (!(hardware[type].features&LIRC_CAN_SEND_MASK))
- return -ENOIOCTLCMD;
-
- result = put_user(LIRC_SEND2MODE
- (hardware[type].features&LIRC_CAN_SEND_MASK),
- uptr);
- if (result)
- return result;
- break;
-
- case LIRC_SET_SEND_MODE:
- if (!(hardware[type].features&LIRC_CAN_SEND_MASK))
- return -ENOIOCTLCMD;
-
- result = get_user(value, uptr);
- if (result)
- return result;
- /* only LIRC_MODE_PULSE supported */
- if (value != LIRC_MODE_PULSE)
- return -EINVAL;
- break;
-
- case LIRC_GET_LENGTH:
- return -ENOIOCTLCMD;
-
- case LIRC_SET_SEND_DUTY_CYCLE:
- dprintk("SET_SEND_DUTY_CYCLE\n");
- if (!(hardware[type].features&LIRC_CAN_SET_SEND_DUTY_CYCLE))
- return -ENOIOCTLCMD;
-
- result = get_user(value, uptr);
- if (result)
- return result;
- if (value <= 0 || value > 100)
- return -EINVAL;
- return init_timing_params(value, freq);
-
- case LIRC_SET_SEND_CARRIER:
- dprintk("SET_SEND_CARRIER\n");
- if (!(hardware[type].features&LIRC_CAN_SET_SEND_CARRIER))
- return -ENOIOCTLCMD;
-
- result = get_user(value, uptr);
- if (result)
- return result;
- if (value > 500000 || value < 20000)
- return -EINVAL;
- return init_timing_params(duty_cycle, value);
-
- default:
- return lirc_dev_fop_ioctl(filep, cmd, arg);
- }
- return 0;
-}
-
-static const struct file_operations lirc_fops = {
- .owner = THIS_MODULE,
- .write = lirc_write,
- .unlocked_ioctl = lirc_ioctl,
-#ifdef CONFIG_COMPAT
- .compat_ioctl = lirc_ioctl,
-#endif
- .read = lirc_dev_fop_read,
- .poll = lirc_dev_fop_poll,
- .open = lirc_dev_fop_open,
- .release = lirc_dev_fop_close,
- .llseek = no_llseek,
-};
-
-static struct lirc_driver driver = {
- .name = LIRC_DRIVER_NAME,
- .minor = -1,
- .code_length = 1,
- .sample_rate = 0,
- .data = NULL,
- .add_to_buf = NULL,
- .rbuf = &rbuf,
- .set_use_inc = set_use_inc,
- .set_use_dec = set_use_dec,
- .fops = &lirc_fops,
- .dev = NULL,
- .owner = THIS_MODULE,
-};
-
-static struct platform_device *lirc_serial_dev;
-
-static int lirc_serial_suspend(struct platform_device *dev,
- pm_message_t state)
-{
- /* Set DLAB 0. */
- soutp(UART_LCR, sinp(UART_LCR) & (~UART_LCR_DLAB));
-
- /* Disable all interrupts */
- soutp(UART_IER, sinp(UART_IER) &
- (~(UART_IER_MSI|UART_IER_RLSI|UART_IER_THRI|UART_IER_RDI)));
-
- /* Clear registers. */
- sinp(UART_LSR);
- sinp(UART_RX);
- sinp(UART_IIR);
- sinp(UART_MSR);
-
- return 0;
-}
-
-/* twisty maze... need a forward-declaration here... */
-static void lirc_serial_exit(void);
-
-static int lirc_serial_resume(struct platform_device *dev)
-{
- unsigned long flags;
- int result;
-
- result = hardware_init_port();
- if (result < 0)
- return result;
-
- spin_lock_irqsave(&hardware[type].lock, flags);
- /* Enable Interrupt */
- do_gettimeofday(&lasttv);
- soutp(UART_IER, sinp(UART_IER)|UART_IER_MSI);
- off();
-
- lirc_buffer_clear(&rbuf);
-
- spin_unlock_irqrestore(&hardware[type].lock, flags);
-
- return 0;
-}
-
-static struct platform_driver lirc_serial_driver = {
- .probe = lirc_serial_probe,
- .suspend = lirc_serial_suspend,
- .resume = lirc_serial_resume,
- .driver = {
- .name = "lirc_serial",
- },
-};
-
-static int __init lirc_serial_init(void)
-{
- int result;
-
- /* Init read buffer. */
- result = lirc_buffer_init(&rbuf, sizeof(int), RBUF_LEN);
- if (result < 0)
- return result;
-
- result = platform_driver_register(&lirc_serial_driver);
- if (result) {
- printk("lirc register returned %d\n", result);
- goto exit_buffer_free;
- }
-
- lirc_serial_dev = platform_device_alloc("lirc_serial", 0);
- if (!lirc_serial_dev) {
- result = -ENOMEM;
- goto exit_driver_unregister;
- }
-
- result = platform_device_add(lirc_serial_dev);
- if (result)
- goto exit_device_put;
-
- return 0;
-
-exit_device_put:
- platform_device_put(lirc_serial_dev);
-exit_driver_unregister:
- platform_driver_unregister(&lirc_serial_driver);
-exit_buffer_free:
- lirc_buffer_free(&rbuf);
- return result;
-}
-
-static void lirc_serial_exit(void)
-{
- platform_device_unregister(lirc_serial_dev);
- platform_driver_unregister(&lirc_serial_driver);
- lirc_buffer_free(&rbuf);
-}
-
-static int __init lirc_serial_init_module(void)
-{
- int result;
-
- switch (type) {
- case LIRC_HOMEBREW:
- case LIRC_IRDEO:
- case LIRC_IRDEO_REMOTE:
- case LIRC_ANIMAX:
- case LIRC_IGOR:
- /* if nothing specified, use ttyS0/com1 and irq 4 */
- io = io ? io : 0x3f8;
- irq = irq ? irq : 4;
- break;
- default:
- return -EINVAL;
- }
- if (!softcarrier) {
- switch (type) {
- case LIRC_HOMEBREW:
- case LIRC_IGOR:
- hardware[type].features &=
- ~(LIRC_CAN_SET_SEND_DUTY_CYCLE|
- LIRC_CAN_SET_SEND_CARRIER);
- break;
- }
- }
-
- /* make sure sense is either -1, 0, or 1 */
- if (sense != -1)
- sense = !!sense;
-
- result = lirc_serial_init();
- if (result)
- return result;
-
- driver.features = hardware[type].features;
- driver.dev = &lirc_serial_dev->dev;
- driver.minor = lirc_register_driver(&driver);
- if (driver.minor < 0) {
- pr_err("register_chrdev failed!\n");
- lirc_serial_exit();
- return driver.minor;
- }
- return 0;
-}
-
-static void __exit lirc_serial_exit_module(void)
-{
- lirc_unregister_driver(driver.minor);
- lirc_serial_exit();
- dprintk("cleaned up module\n");
-}
-
-
-module_init(lirc_serial_init_module);
-module_exit(lirc_serial_exit_module);
-
-MODULE_DESCRIPTION("Infra-red receiver driver for serial ports.");
-MODULE_AUTHOR("Ralph Metzler, Trent Piepho, Ben Pfaff, "
- "Christoph Bartelmus, Andrei Tanas");
-MODULE_LICENSE("GPL");
-
-module_param(type, int, S_IRUGO);
-MODULE_PARM_DESC(type, "Hardware type (0 = home-brew, 1 = IRdeo,"
- " 2 = IRdeo Remote, 3 = AnimaX, 4 = IgorPlug,"
- " 5 = NSLU2 RX:CTS2/TX:GreenLED)");
-
-module_param(io, int, S_IRUGO);
-MODULE_PARM_DESC(io, "I/O address base (0x3f8 or 0x2f8)");
-
-/* some architectures (e.g. intel xscale) have memory mapped registers */
-module_param(iommap, bool, S_IRUGO);
-MODULE_PARM_DESC(iommap, "physical base for memory mapped I/O"
- " (0 = no memory mapped io)");
-
-/*
- * some architectures (e.g. intel xscale) align the 8bit serial registers
- * on 32bit word boundaries.
- * See linux-kernel/drivers/tty/serial/8250/8250.c serial_in()/out()
- */
-module_param(ioshift, int, S_IRUGO);
-MODULE_PARM_DESC(ioshift, "shift I/O register offset (0 = no shift)");
-
-module_param(irq, int, S_IRUGO);
-MODULE_PARM_DESC(irq, "Interrupt (4 or 3)");
-
-module_param(share_irq, bool, S_IRUGO);
-MODULE_PARM_DESC(share_irq, "Share interrupts (0 = off, 1 = on)");
-
-module_param(sense, int, S_IRUGO);
-MODULE_PARM_DESC(sense, "Override autodetection of IR receiver circuit"
- " (0 = active high, 1 = active low )");
-
-#ifdef CONFIG_LIRC_SERIAL_TRANSMITTER
-module_param(txsense, bool, S_IRUGO);
-MODULE_PARM_DESC(txsense, "Sense of transmitter circuit"
- " (0 = active high, 1 = active low )");
-#endif
-
-module_param(softcarrier, bool, S_IRUGO);
-MODULE_PARM_DESC(softcarrier, "Software carrier (0 = off, 1 = on, default on)");
-
-module_param(debug, bool, S_IRUGO | S_IWUSR);
-MODULE_PARM_DESC(debug, "Enable debugging messages");
diff --git a/drivers/staging/media/lirc/lirc_sir.c b/drivers/staging/media/lirc/lirc_sir.c
deleted file mode 100644
index 4f326e97ad75..000000000000
--- a/drivers/staging/media/lirc/lirc_sir.c
+++ /dev/null
@@ -1,999 +0,0 @@
-/*
- * LIRC SIR driver, (C) 2000 Milan Pikula <www@fornax.sk>
- *
- * lirc_sir - Device driver for use with SIR (serial infra red)
- * mode of IrDA on many notebooks.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- *
- * 2000/09/16 Frank Przybylski <mail@frankprzybylski.de> :
- * added timeout and relaxed pulse detection, removed gap bug
- *
- * 2000/12/15 Christoph Bartelmus <lirc@bartelmus.de> :
- * added support for Tekram Irmate 210 (sending does not work yet,
- * kind of disappointing that nobody was able to implement that
- * before),
- * major clean-up
- *
- * 2001/02/27 Christoph Bartelmus <lirc@bartelmus.de> :
- * added support for StrongARM SA1100 embedded microprocessor
- * parts cut'n'pasted from sa1100_ir.c (C) 2000 Russell King
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/module.h>
-#include <linux/sched.h>
-#include <linux/errno.h>
-#include <linux/signal.h>
-#include <linux/fs.h>
-#include <linux/interrupt.h>
-#include <linux/ioport.h>
-#include <linux/kernel.h>
-#include <linux/serial_reg.h>
-#include <linux/ktime.h>
-#include <linux/string.h>
-#include <linux/types.h>
-#include <linux/wait.h>
-#include <linux/mm.h>
-#include <linux/delay.h>
-#include <linux/poll.h>
-#include <linux/io.h>
-#include <asm/irq.h>
-#include <linux/fcntl.h>
-#include <linux/platform_device.h>
-
-#include <linux/timer.h>
-
-#include <media/lirc.h>
-#include <media/lirc_dev.h>
-
-/* SECTION: Definitions */
-
-/*** Tekram dongle ***/
-#ifdef LIRC_SIR_TEKRAM
-/* stolen from kernel source */
-/* definitions for Tekram dongle */
-#define TEKRAM_115200 0x00
-#define TEKRAM_57600 0x01
-#define TEKRAM_38400 0x02
-#define TEKRAM_19200 0x03
-#define TEKRAM_9600 0x04
-#define TEKRAM_2400 0x08
-
-#define TEKRAM_PW 0x10 /* Pulse select bit */
-
-/* 10bit * 1s/115200bit in milliseconds = 87ms*/
-#define TIME_CONST (10000000ul/115200ul)
-
-#endif
-
-#ifdef LIRC_SIR_ACTISYS_ACT200L
-static void init_act200(void);
-#elif defined(LIRC_SIR_ACTISYS_ACT220L)
-static void init_act220(void);
-#endif
-
-#define RBUF_LEN 1024
-#define WBUF_LEN 1024
-
-#define LIRC_DRIVER_NAME "lirc_sir"
-
-#define PULSE '['
-
-#ifndef LIRC_SIR_TEKRAM
-/* 9bit * 1s/115200bit in milli seconds = 78.125ms*/
-#define TIME_CONST (9000000ul/115200ul)
-#endif
-
-
-/* timeout for sequences in jiffies (=5/100s), must be longer than TIME_CONST */
-#define SIR_TIMEOUT (HZ*5/100)
-
-#ifndef LIRC_ON_SA1100
-#ifndef LIRC_IRQ
-#define LIRC_IRQ 4
-#endif
-#ifndef LIRC_PORT
-/* for external dongles, default to com1 */
-#if defined(LIRC_SIR_ACTISYS_ACT200L) || \
- defined(LIRC_SIR_ACTISYS_ACT220L) || \
- defined(LIRC_SIR_TEKRAM)
-#define LIRC_PORT 0x3f8
-#else
-/* onboard sir ports are typically com3 */
-#define LIRC_PORT 0x3e8
-#endif
-#endif
-
-static int io = LIRC_PORT;
-static int irq = LIRC_IRQ;
-static int threshold = 3;
-#endif
-
-static DEFINE_SPINLOCK(timer_lock);
-static struct timer_list timerlist;
-/* time of last signal change detected */
-static ktime_t last;
-/* time of last UART data ready interrupt */
-static ktime_t last_intr_time;
-static int last_value;
-
-static DECLARE_WAIT_QUEUE_HEAD(lirc_read_queue);
-
-static DEFINE_SPINLOCK(hardware_lock);
-
-static int rx_buf[RBUF_LEN];
-static unsigned int rx_tail, rx_head;
-
-static bool debug;
-
-/* SECTION: Prototypes */
-
-/* Communication with user-space */
-static unsigned int lirc_poll(struct file *file, poll_table *wait);
-static ssize_t lirc_read(struct file *file, char __user *buf, size_t count,
- loff_t *ppos);
-static ssize_t lirc_write(struct file *file, const char __user *buf, size_t n,
- loff_t *pos);
-static long lirc_ioctl(struct file *filep, unsigned int cmd, unsigned long arg);
-static void add_read_queue(int flag, unsigned long val);
-static int init_chrdev(void);
-static void drop_chrdev(void);
-/* Hardware */
-static irqreturn_t sir_interrupt(int irq, void *dev_id);
-static void send_space(unsigned long len);
-static void send_pulse(unsigned long len);
-static int init_hardware(void);
-static void drop_hardware(void);
-/* Initialisation */
-static int init_port(void);
-static void drop_port(void);
-
-static inline unsigned int sinp(int offset)
-{
- return inb(io + offset);
-}
-
-static inline void soutp(int offset, int value)
-{
- outb(value, io + offset);
-}
-
-#ifndef MAX_UDELAY_MS
-#define MAX_UDELAY_US 5000
-#else
-#define MAX_UDELAY_US (MAX_UDELAY_MS*1000)
-#endif
-
-static void safe_udelay(unsigned long usecs)
-{
- while (usecs > MAX_UDELAY_US) {
- udelay(MAX_UDELAY_US);
- usecs -= MAX_UDELAY_US;
- }
- udelay(usecs);
-}
-
-/* SECTION: Communication with user-space */
-
-static unsigned int lirc_poll(struct file *file, poll_table *wait)
-{
- poll_wait(file, &lirc_read_queue, wait);
- if (rx_head != rx_tail)
- return POLLIN | POLLRDNORM;
- return 0;
-}
-
-static ssize_t lirc_read(struct file *file, char __user *buf, size_t count,
- loff_t *ppos)
-{
- int n = 0;
- int retval = 0;
- DECLARE_WAITQUEUE(wait, current);
-
- if (count % sizeof(int))
- return -EINVAL;
-
- add_wait_queue(&lirc_read_queue, &wait);
- set_current_state(TASK_INTERRUPTIBLE);
- while (n < count) {
- if (rx_head != rx_tail) {
- if (copy_to_user(buf + n,
- rx_buf + rx_head,
- sizeof(int))) {
- retval = -EFAULT;
- break;
- }
- rx_head = (rx_head + 1) & (RBUF_LEN - 1);
- n += sizeof(int);
- } else {
- if (file->f_flags & O_NONBLOCK) {
- retval = -EAGAIN;
- break;
- }
- if (signal_pending(current)) {
- retval = -ERESTARTSYS;
- break;
- }
- schedule();
- set_current_state(TASK_INTERRUPTIBLE);
- }
- }
- remove_wait_queue(&lirc_read_queue, &wait);
- set_current_state(TASK_RUNNING);
- return n ? n : retval;
-}
-static ssize_t lirc_write(struct file *file, const char __user *buf, size_t n,
- loff_t *pos)
-{
- unsigned long flags;
- int i, count;
- int *tx_buf;
-
- count = n / sizeof(int);
- if (n % sizeof(int) || count % 2 == 0)
- return -EINVAL;
- tx_buf = memdup_user(buf, n);
- if (IS_ERR(tx_buf))
- return PTR_ERR(tx_buf);
- i = 0;
- local_irq_save(flags);
- while (1) {
- if (i >= count)
- break;
- if (tx_buf[i])
- send_pulse(tx_buf[i]);
- i++;
- if (i >= count)
- break;
- if (tx_buf[i])
- send_space(tx_buf[i]);
- i++;
- }
- local_irq_restore(flags);
- kfree(tx_buf);
- return count;
-}
-
-static long lirc_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
-{
- u32 __user *uptr = (u32 __user *)arg;
- int retval = 0;
- u32 value = 0;
-
- if (cmd == LIRC_GET_FEATURES)
- value = LIRC_CAN_SEND_PULSE | LIRC_CAN_REC_MODE2;
- else if (cmd == LIRC_GET_SEND_MODE)
- value = LIRC_MODE_PULSE;
- else if (cmd == LIRC_GET_REC_MODE)
- value = LIRC_MODE_MODE2;
-
- switch (cmd) {
- case LIRC_GET_FEATURES:
- case LIRC_GET_SEND_MODE:
- case LIRC_GET_REC_MODE:
- retval = put_user(value, uptr);
- break;
-
- case LIRC_SET_SEND_MODE:
- case LIRC_SET_REC_MODE:
- retval = get_user(value, uptr);
- break;
- default:
- retval = -ENOIOCTLCMD;
-
- }
-
- if (retval)
- return retval;
- if (cmd == LIRC_SET_REC_MODE) {
- if (value != LIRC_MODE_MODE2)
- retval = -ENOSYS;
- } else if (cmd == LIRC_SET_SEND_MODE) {
- if (value != LIRC_MODE_PULSE)
- retval = -ENOSYS;
- }
-
- return retval;
-}
-
-static void add_read_queue(int flag, unsigned long val)
-{
- unsigned int new_rx_tail;
- int newval;
-
- pr_debug("add flag %d with val %lu\n", flag, val);
-
- newval = val & PULSE_MASK;
-
- /*
- * statistically, pulses are ~TIME_CONST/2 too long. we could
- * maybe make this more exact, but this is good enough
- */
- if (flag) {
- /* pulse */
- if (newval > TIME_CONST/2)
- newval -= TIME_CONST/2;
- else /* should not ever happen */
- newval = 1;
- newval |= PULSE_BIT;
- } else {
- newval += TIME_CONST/2;
- }
- new_rx_tail = (rx_tail + 1) & (RBUF_LEN - 1);
- if (new_rx_tail == rx_head) {
- pr_debug("Buffer overrun.\n");
- return;
- }
- rx_buf[rx_tail] = newval;
- rx_tail = new_rx_tail;
- wake_up_interruptible(&lirc_read_queue);
-}
-
-static const struct file_operations lirc_fops = {
- .owner = THIS_MODULE,
- .read = lirc_read,
- .write = lirc_write,
- .poll = lirc_poll,
- .unlocked_ioctl = lirc_ioctl,
-#ifdef CONFIG_COMPAT
- .compat_ioctl = lirc_ioctl,
-#endif
- .open = lirc_dev_fop_open,
- .release = lirc_dev_fop_close,
- .llseek = no_llseek,
-};
-
-static int set_use_inc(void *data)
-{
- return 0;
-}
-
-static void set_use_dec(void *data)
-{
-}
-
-static struct lirc_driver driver = {
- .name = LIRC_DRIVER_NAME,
- .minor = -1,
- .code_length = 1,
- .sample_rate = 0,
- .data = NULL,
- .add_to_buf = NULL,
- .set_use_inc = set_use_inc,
- .set_use_dec = set_use_dec,
- .fops = &lirc_fops,
- .dev = NULL,
- .owner = THIS_MODULE,
-};
-
-static struct platform_device *lirc_sir_dev;
-
-static int init_chrdev(void)
-{
- driver.dev = &lirc_sir_dev->dev;
- driver.minor = lirc_register_driver(&driver);
- if (driver.minor < 0) {
- pr_err("init_chrdev() failed.\n");
- return -EIO;
- }
- return 0;
-}
-
-static void drop_chrdev(void)
-{
- lirc_unregister_driver(driver.minor);
-}
-
-/* SECTION: Hardware */
-static void sir_timeout(unsigned long data)
-{
- /*
- * if last received signal was a pulse, but receiving stopped
- * within the 9 bit frame, we need to finish this pulse and
- * simulate a signal change to from pulse to space. Otherwise
- * upper layers will receive two sequences next time.
- */
-
- unsigned long flags;
- unsigned long pulse_end;
-
- /* avoid interference with interrupt */
- spin_lock_irqsave(&timer_lock, flags);
- if (last_value) {
- /* clear unread bits in UART and restart */
- outb(UART_FCR_CLEAR_RCVR, io + UART_FCR);
- /* determine 'virtual' pulse end: */
- pulse_end = min_t(unsigned long,
- ktime_us_delta(last, last_intr_time),
- PULSE_MASK);
- dev_dbg(driver.dev, "timeout add %d for %lu usec\n",
- last_value, pulse_end);
- add_read_queue(last_value, pulse_end);
- last_value = 0;
- last = last_intr_time;
- }
- spin_unlock_irqrestore(&timer_lock, flags);
-}
-
-static irqreturn_t sir_interrupt(int irq, void *dev_id)
-{
- unsigned char data;
- ktime_t curr_time;
- static unsigned long delt;
- unsigned long deltintr;
- unsigned long flags;
- int iir, lsr;
-
- while ((iir = inb(io + UART_IIR) & UART_IIR_ID)) {
- switch (iir&UART_IIR_ID) { /* FIXME toto treba preriedit */
- case UART_IIR_MSI:
- (void) inb(io + UART_MSR);
- break;
- case UART_IIR_RLSI:
- (void) inb(io + UART_LSR);
- break;
- case UART_IIR_THRI:
-#if 0
- if (lsr & UART_LSR_THRE) /* FIFO is empty */
- outb(data, io + UART_TX)
-#endif
- break;
- case UART_IIR_RDI:
- /* avoid interference with timer */
- spin_lock_irqsave(&timer_lock, flags);
- do {
- del_timer(&timerlist);
- data = inb(io + UART_RX);
- curr_time = ktime_get();
- delt = min_t(unsigned long,
- ktime_us_delta(last, curr_time),
- PULSE_MASK);
- deltintr = min_t(unsigned long,
- ktime_us_delta(last_intr_time,
- curr_time),
- PULSE_MASK);
- dev_dbg(driver.dev, "t %lu, d %d\n",
- deltintr, (int)data);
- /*
- * if nothing came in last X cycles,
- * it was gap
- */
- if (deltintr > TIME_CONST * threshold) {
- if (last_value) {
- dev_dbg(driver.dev, "GAP\n");
- /* simulate signal change */
- add_read_queue(last_value,
- delt -
- deltintr);
- last_value = 0;
- last = last_intr_time;
- delt = deltintr;
- }
- }
- data = 1;
- if (data ^ last_value) {
- /*
- * deltintr > 2*TIME_CONST, remember?
- * the other case is timeout
- */
- add_read_queue(last_value,
- delt-TIME_CONST);
- last_value = data;
- last = curr_time;
- last = ktime_sub_us(last,
- TIME_CONST);
- }
- last_intr_time = curr_time;
- if (data) {
- /*
- * start timer for end of
- * sequence detection
- */
- timerlist.expires = jiffies +
- SIR_TIMEOUT;
- add_timer(&timerlist);
- }
-
- lsr = inb(io + UART_LSR);
- } while (lsr & UART_LSR_DR); /* data ready */
- spin_unlock_irqrestore(&timer_lock, flags);
- break;
- default:
- break;
- }
- }
- return IRQ_RETVAL(IRQ_HANDLED);
-}
-
-static void send_space(unsigned long len)
-{
- safe_udelay(len);
-}
-
-static void send_pulse(unsigned long len)
-{
- long bytes_out = len / TIME_CONST;
-
- if (bytes_out == 0)
- bytes_out++;
-
- while (bytes_out--) {
- outb(PULSE, io + UART_TX);
- /* FIXME treba seriozne cakanie z char/serial.c */
- while (!(inb(io + UART_LSR) & UART_LSR_THRE))
- ;
- }
-}
-
-static int init_hardware(void)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&hardware_lock, flags);
- /* reset UART */
-#if defined(LIRC_SIR_TEKRAM)
- /* disable FIFO */
- soutp(UART_FCR,
- UART_FCR_CLEAR_RCVR|
- UART_FCR_CLEAR_XMIT|
- UART_FCR_TRIGGER_1);
-
- /* Set DLAB 0. */
- soutp(UART_LCR, sinp(UART_LCR) & (~UART_LCR_DLAB));
-
- /* First of all, disable all interrupts */
- soutp(UART_IER, sinp(UART_IER) &
- (~(UART_IER_MSI|UART_IER_RLSI|UART_IER_THRI|UART_IER_RDI)));
-
- /* Set DLAB 1. */
- soutp(UART_LCR, sinp(UART_LCR) | UART_LCR_DLAB);
-
- /* Set divisor to 12 => 9600 Baud */
- soutp(UART_DLM, 0);
- soutp(UART_DLL, 12);
-
- /* Set DLAB 0. */
- soutp(UART_LCR, sinp(UART_LCR) & (~UART_LCR_DLAB));
-
- /* power supply */
- soutp(UART_MCR, UART_MCR_RTS|UART_MCR_DTR|UART_MCR_OUT2);
- safe_udelay(50*1000);
-
- /* -DTR low -> reset PIC */
- soutp(UART_MCR, UART_MCR_RTS|UART_MCR_OUT2);
- udelay(1*1000);
-
- soutp(UART_MCR, UART_MCR_RTS|UART_MCR_DTR|UART_MCR_OUT2);
- udelay(100);
-
-
- /* -RTS low -> send control byte */
- soutp(UART_MCR, UART_MCR_DTR|UART_MCR_OUT2);
- udelay(7);
- soutp(UART_TX, TEKRAM_115200|TEKRAM_PW);
-
- /* one byte takes ~1042 usec to transmit at 9600,8N1 */
- udelay(1500);
-
- /* back to normal operation */
- soutp(UART_MCR, UART_MCR_RTS|UART_MCR_DTR|UART_MCR_OUT2);
- udelay(50);
-
- udelay(1500);
-
- /* read previous control byte */
- pr_info("0x%02x\n", sinp(UART_RX));
-
- /* Set DLAB 1. */
- soutp(UART_LCR, sinp(UART_LCR) | UART_LCR_DLAB);
-
- /* Set divisor to 1 => 115200 Baud */
- soutp(UART_DLM, 0);
- soutp(UART_DLL, 1);
-
- /* Set DLAB 0, 8 Bit */
- soutp(UART_LCR, UART_LCR_WLEN8);
- /* enable interrupts */
- soutp(UART_IER, sinp(UART_IER)|UART_IER_RDI);
-#else
- outb(0, io + UART_MCR);
- outb(0, io + UART_IER);
- /* init UART */
- /* set DLAB, speed = 115200 */
- outb(UART_LCR_DLAB | UART_LCR_WLEN7, io + UART_LCR);
- outb(1, io + UART_DLL); outb(0, io + UART_DLM);
- /* 7N1+start = 9 bits at 115200 ~ 3 bits at 44000 */
- outb(UART_LCR_WLEN7, io + UART_LCR);
- /* FIFO operation */
- outb(UART_FCR_ENABLE_FIFO, io + UART_FCR);
- /* interrupts */
- /* outb(UART_IER_RLSI|UART_IER_RDI|UART_IER_THRI, io + UART_IER); */
- outb(UART_IER_RDI, io + UART_IER);
- /* turn on UART */
- outb(UART_MCR_DTR|UART_MCR_RTS|UART_MCR_OUT2, io + UART_MCR);
-#ifdef LIRC_SIR_ACTISYS_ACT200L
- init_act200();
-#elif defined(LIRC_SIR_ACTISYS_ACT220L)
- init_act220();
-#endif
-#endif
- spin_unlock_irqrestore(&hardware_lock, flags);
- return 0;
-}
-
-static void drop_hardware(void)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&hardware_lock, flags);
-
- /* turn off interrupts */
- outb(0, io + UART_IER);
-
- spin_unlock_irqrestore(&hardware_lock, flags);
-}
-
-/* SECTION: Initialisation */
-
-static int init_port(void)
-{
- int retval;
-
- /* get I/O port access and IRQ line */
- if (request_region(io, 8, LIRC_DRIVER_NAME) == NULL) {
- pr_err("i/o port 0x%.4x already in use.\n", io);
- return -EBUSY;
- }
- retval = request_irq(irq, sir_interrupt, 0,
- LIRC_DRIVER_NAME, NULL);
- if (retval < 0) {
- release_region(io, 8);
- pr_err("IRQ %d already in use.\n", irq);
- return retval;
- }
- pr_info("I/O port 0x%.4x, IRQ %d.\n", io, irq);
-
- setup_timer(&timerlist, sir_timeout, 0);
-
- return 0;
-}
-
-static void drop_port(void)
-{
- free_irq(irq, NULL);
- del_timer_sync(&timerlist);
- release_region(io, 8);
-}
-
-#ifdef LIRC_SIR_ACTISYS_ACT200L
-/* Crystal/Cirrus CS8130 IR transceiver, used in Actisys Act200L dongle */
-/* some code borrowed from Linux IRDA driver */
-
-/* Register 0: Control register #1 */
-#define ACT200L_REG0 0x00
-#define ACT200L_TXEN 0x01 /* Enable transmitter */
-#define ACT200L_RXEN 0x02 /* Enable receiver */
-#define ACT200L_ECHO 0x08 /* Echo control chars */
-
-/* Register 1: Control register #2 */
-#define ACT200L_REG1 0x10
-#define ACT200L_LODB 0x01 /* Load new baud rate count value */
-#define ACT200L_WIDE 0x04 /* Expand the maximum allowable pulse */
-
-/* Register 3: Transmit mode register #2 */
-#define ACT200L_REG3 0x30
-#define ACT200L_B0 0x01 /* DataBits, 0=6, 1=7, 2=8, 3=9(8P) */
-#define ACT200L_B1 0x02 /* DataBits, 0=6, 1=7, 2=8, 3=9(8P) */
-#define ACT200L_CHSY 0x04 /* StartBit Synced 0=bittime, 1=startbit */
-
-/* Register 4: Output Power register */
-#define ACT200L_REG4 0x40
-#define ACT200L_OP0 0x01 /* Enable LED1C output */
-#define ACT200L_OP1 0x02 /* Enable LED2C output */
-#define ACT200L_BLKR 0x04
-
-/* Register 5: Receive Mode register */
-#define ACT200L_REG5 0x50
-#define ACT200L_RWIDL 0x01 /* fixed 1.6us pulse mode */
- /*.. other various IRDA bit modes, and TV remote modes..*/
-
-/* Register 6: Receive Sensitivity register #1 */
-#define ACT200L_REG6 0x60
-#define ACT200L_RS0 0x01 /* receive threshold bit 0 */
-#define ACT200L_RS1 0x02 /* receive threshold bit 1 */
-
-/* Register 7: Receive Sensitivity register #2 */
-#define ACT200L_REG7 0x70
-#define ACT200L_ENPOS 0x04 /* Ignore the falling edge */
-
-/* Register 8,9: Baud Rate Divider register #1,#2 */
-#define ACT200L_REG8 0x80
-#define ACT200L_REG9 0x90
-
-#define ACT200L_2400 0x5f
-#define ACT200L_9600 0x17
-#define ACT200L_19200 0x0b
-#define ACT200L_38400 0x05
-#define ACT200L_57600 0x03
-#define ACT200L_115200 0x01
-
-/* Register 13: Control register #3 */
-#define ACT200L_REG13 0xd0
-#define ACT200L_SHDW 0x01 /* Enable access to shadow registers */
-
-/* Register 15: Status register */
-#define ACT200L_REG15 0xf0
-
-/* Register 21: Control register #4 */
-#define ACT200L_REG21 0x50
-#define ACT200L_EXCK 0x02 /* Disable clock output driver */
-#define ACT200L_OSCL 0x04 /* oscillator in low power, medium accuracy mode */
-
-static void init_act200(void)
-{
- int i;
- __u8 control[] = {
- ACT200L_REG15,
- ACT200L_REG13 | ACT200L_SHDW,
- ACT200L_REG21 | ACT200L_EXCK | ACT200L_OSCL,
- ACT200L_REG13,
- ACT200L_REG7 | ACT200L_ENPOS,
- ACT200L_REG6 | ACT200L_RS0 | ACT200L_RS1,
- ACT200L_REG5 | ACT200L_RWIDL,
- ACT200L_REG4 | ACT200L_OP0 | ACT200L_OP1 | ACT200L_BLKR,
- ACT200L_REG3 | ACT200L_B0,
- ACT200L_REG0 | ACT200L_TXEN | ACT200L_RXEN,
- ACT200L_REG8 | (ACT200L_115200 & 0x0f),
- ACT200L_REG9 | ((ACT200L_115200 >> 4) & 0x0f),
- ACT200L_REG1 | ACT200L_LODB | ACT200L_WIDE
- };
-
- /* Set DLAB 1. */
- soutp(UART_LCR, UART_LCR_DLAB | UART_LCR_WLEN8);
-
- /* Set divisor to 12 => 9600 Baud */
- soutp(UART_DLM, 0);
- soutp(UART_DLL, 12);
-
- /* Set DLAB 0. */
- soutp(UART_LCR, UART_LCR_WLEN8);
- /* Set divisor to 12 => 9600 Baud */
-
- /* power supply */
- soutp(UART_MCR, UART_MCR_RTS|UART_MCR_DTR|UART_MCR_OUT2);
- for (i = 0; i < 50; i++)
- safe_udelay(1000);
-
- /* Reset the dongle : set RTS low for 25 ms */
- soutp(UART_MCR, UART_MCR_DTR|UART_MCR_OUT2);
- for (i = 0; i < 25; i++)
- udelay(1000);
-
- soutp(UART_MCR, UART_MCR_RTS|UART_MCR_DTR|UART_MCR_OUT2);
- udelay(100);
-
- /* Clear DTR and set RTS to enter command mode */
- soutp(UART_MCR, UART_MCR_RTS|UART_MCR_OUT2);
- udelay(7);
-
- /* send out the control register settings for 115K 7N1 SIR operation */
- for (i = 0; i < sizeof(control); i++) {
- soutp(UART_TX, control[i]);
- /* one byte takes ~1042 usec to transmit at 9600,8N1 */
- udelay(1500);
- }
-
- /* back to normal operation */
- soutp(UART_MCR, UART_MCR_RTS|UART_MCR_DTR|UART_MCR_OUT2);
- udelay(50);
-
- udelay(1500);
- soutp(UART_LCR, sinp(UART_LCR) | UART_LCR_DLAB);
-
- /* Set DLAB 1. */
- soutp(UART_LCR, UART_LCR_DLAB | UART_LCR_WLEN7);
-
- /* Set divisor to 1 => 115200 Baud */
- soutp(UART_DLM, 0);
- soutp(UART_DLL, 1);
-
- /* Set DLAB 0. */
- soutp(UART_LCR, sinp(UART_LCR) & (~UART_LCR_DLAB));
-
- /* Set DLAB 0, 7 Bit */
- soutp(UART_LCR, UART_LCR_WLEN7);
-
- /* enable interrupts */
- soutp(UART_IER, sinp(UART_IER)|UART_IER_RDI);
-}
-#endif
-
-#ifdef LIRC_SIR_ACTISYS_ACT220L
-/*
- * Derived from linux IrDA driver (net/irda/actisys.c)
- * Drop me a mail for any kind of comment: maxx@spaceboyz.net
- */
-
-void init_act220(void)
-{
- int i;
-
- /* DLAB 1 */
- soutp(UART_LCR, UART_LCR_DLAB|UART_LCR_WLEN7);
-
- /* 9600 baud */
- soutp(UART_DLM, 0);
- soutp(UART_DLL, 12);
-
- /* DLAB 0 */
- soutp(UART_LCR, UART_LCR_WLEN7);
-
- /* reset the dongle, set DTR low for 10us */
- soutp(UART_MCR, UART_MCR_RTS|UART_MCR_OUT2);
- udelay(10);
-
- /* back to normal (still 9600) */
- soutp(UART_MCR, UART_MCR_DTR|UART_MCR_RTS|UART_MCR_OUT2);
-
- /*
- * send RTS pulses until we reach 115200
- * i hope this is really the same for act220l/act220l+
- */
- for (i = 0; i < 3; i++) {
- udelay(10);
- /* set RTS low for 10 us */
- soutp(UART_MCR, UART_MCR_DTR|UART_MCR_OUT2);
- udelay(10);
- /* set RTS high for 10 us */
- soutp(UART_MCR, UART_MCR_RTS|UART_MCR_DTR|UART_MCR_OUT2);
- }
-
- /* back to normal operation */
- udelay(1500); /* better safe than sorry ;) */
-
- /* Set DLAB 1. */
- soutp(UART_LCR, UART_LCR_DLAB | UART_LCR_WLEN7);
-
- /* Set divisor to 1 => 115200 Baud */
- soutp(UART_DLM, 0);
- soutp(UART_DLL, 1);
-
- /* Set DLAB 0, 7 Bit */
- /* The dongle doesn't seem to have any problems with operation at 7N1 */
- soutp(UART_LCR, UART_LCR_WLEN7);
-
- /* enable interrupts */
- soutp(UART_IER, UART_IER_RDI);
-}
-#endif
-
-static int init_lirc_sir(void)
-{
- int retval;
-
- init_waitqueue_head(&lirc_read_queue);
- retval = init_port();
- if (retval < 0)
- return retval;
- init_hardware();
- pr_info("Installed.\n");
- return 0;
-}
-
-static int lirc_sir_probe(struct platform_device *dev)
-{
- return 0;
-}
-
-static int lirc_sir_remove(struct platform_device *dev)
-{
- return 0;
-}
-
-static struct platform_driver lirc_sir_driver = {
- .probe = lirc_sir_probe,
- .remove = lirc_sir_remove,
- .driver = {
- .name = "lirc_sir",
- },
-};
-
-static int __init lirc_sir_init(void)
-{
- int retval;
-
- retval = platform_driver_register(&lirc_sir_driver);
- if (retval) {
- pr_err("Platform driver register failed!\n");
- return -ENODEV;
- }
-
- lirc_sir_dev = platform_device_alloc("lirc_dev", 0);
- if (!lirc_sir_dev) {
- pr_err("Platform device alloc failed!\n");
- retval = -ENOMEM;
- goto pdev_alloc_fail;
- }
-
- retval = platform_device_add(lirc_sir_dev);
- if (retval) {
- pr_err("Platform device add failed!\n");
- retval = -ENODEV;
- goto pdev_add_fail;
- }
-
- retval = init_chrdev();
- if (retval < 0)
- goto fail;
-
- retval = init_lirc_sir();
- if (retval) {
- drop_chrdev();
- goto fail;
- }
-
- return 0;
-
-fail:
- platform_device_del(lirc_sir_dev);
-pdev_add_fail:
- platform_device_put(lirc_sir_dev);
-pdev_alloc_fail:
- platform_driver_unregister(&lirc_sir_driver);
- return retval;
-}
-
-static void __exit lirc_sir_exit(void)
-{
- drop_hardware();
- drop_chrdev();
- drop_port();
- platform_device_unregister(lirc_sir_dev);
- platform_driver_unregister(&lirc_sir_driver);
- pr_info("Uninstalled.\n");
-}
-
-module_init(lirc_sir_init);
-module_exit(lirc_sir_exit);
-
-#ifdef LIRC_SIR_TEKRAM
-MODULE_DESCRIPTION("Infrared receiver driver for Tekram Irmate 210");
-MODULE_AUTHOR("Christoph Bartelmus");
-#elif defined(LIRC_SIR_ACTISYS_ACT200L)
-MODULE_DESCRIPTION("LIRC driver for Actisys Act200L");
-MODULE_AUTHOR("Karl Bongers");
-#elif defined(LIRC_SIR_ACTISYS_ACT220L)
-MODULE_DESCRIPTION("LIRC driver for Actisys Act220L(+)");
-MODULE_AUTHOR("Jan Roemisch");
-#else
-MODULE_DESCRIPTION("Infrared receiver driver for SIR type serial ports");
-MODULE_AUTHOR("Milan Pikula");
-#endif
-MODULE_LICENSE("GPL");
-
-module_param(io, int, S_IRUGO);
-MODULE_PARM_DESC(io, "I/O address base (0x3f8 or 0x2f8)");
-
-module_param(irq, int, S_IRUGO);
-MODULE_PARM_DESC(irq, "Interrupt (4 or 3)");
-
-module_param(threshold, int, S_IRUGO);
-MODULE_PARM_DESC(threshold, "space detection threshold (3)");
-
-module_param(debug, bool, S_IRUGO | S_IWUSR);
-MODULE_PARM_DESC(debug, "Enable debugging messages");
diff --git a/drivers/staging/media/lirc/lirc_zilog.c b/drivers/staging/media/lirc/lirc_zilog.c
deleted file mode 100644
index ce3b5f230e2e..000000000000
--- a/drivers/staging/media/lirc/lirc_zilog.c
+++ /dev/null
@@ -1,1697 +0,0 @@
-/*
- * i2c IR lirc driver for devices with zilog IR processors
- *
- * Copyright (c) 2000 Gerd Knorr <kraxel@goldbach.in-berlin.de>
- * modified for PixelView (BT878P+W/FM) by
- * Michal Kochanowicz <mkochano@pld.org.pl>
- * Christoph Bartelmus <lirc@bartelmus.de>
- * modified for KNC ONE TV Station/Anubis Typhoon TView Tuner by
- * Ulrich Mueller <ulrich.mueller42@web.de>
- * modified for Asus TV-Box and Creative/VisionTek BreakOut-Box by
- * Stefan Jahn <stefan@lkcc.org>
- * modified for inclusion into kernel sources by
- * Jerome Brock <jbrock@users.sourceforge.net>
- * modified for Leadtek Winfast PVR2000 by
- * Thomas Reitmayr (treitmayr@yahoo.com)
- * modified for Hauppauge PVR-150 IR TX device by
- * Mark Weaver <mark@npsl.co.uk>
- * changed name from lirc_pvr150 to lirc_zilog, works on more than pvr-150
- * Jarod Wilson <jarod@redhat.com>
- *
- * parts are cut&pasted from the lirc_i2c.c driver
- *
- * Numerous changes updating lirc_zilog.c in kernel 2.6.38 and later are
- * Copyright (C) 2011 Andy Walls <awalls@md.metrocast.net>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- */
-
-#include <linux/module.h>
-#include <linux/kmod.h>
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/fs.h>
-#include <linux/poll.h>
-#include <linux/string.h>
-#include <linux/timer.h>
-#include <linux/delay.h>
-#include <linux/completion.h>
-#include <linux/errno.h>
-#include <linux/slab.h>
-#include <linux/i2c.h>
-#include <linux/firmware.h>
-#include <linux/vmalloc.h>
-
-#include <linux/mutex.h>
-#include <linux/kthread.h>
-
-#include <media/lirc_dev.h>
-#include <media/lirc.h>
-
-/* Max transfer size done by I2C transfer functions */
-#define MAX_XFER_SIZE 64
-
-struct IR;
-
-struct IR_rx {
- struct kref ref;
- struct IR *ir;
-
- /* RX device */
- struct mutex client_lock;
- struct i2c_client *c;
-
- /* RX polling thread data */
- struct task_struct *task;
-
- /* RX read data */
- unsigned char b[3];
- bool hdpvr_data_fmt;
-};
-
-struct IR_tx {
- struct kref ref;
- struct IR *ir;
-
- /* TX device */
- struct mutex client_lock;
- struct i2c_client *c;
-
- /* TX additional actions needed */
- int need_boot;
- bool post_tx_ready_poll;
-};
-
-struct IR {
- struct kref ref;
- struct list_head list;
-
- /* FIXME spinlock access to l.features */
- struct lirc_driver l;
- struct lirc_buffer rbuf;
-
- struct mutex ir_lock;
- atomic_t open_count;
-
- struct i2c_adapter *adapter;
-
- spinlock_t rx_ref_lock; /* struct IR_rx kref get()/put() */
- struct IR_rx *rx;
-
- spinlock_t tx_ref_lock; /* struct IR_tx kref get()/put() */
- struct IR_tx *tx;
-};
-
-/* IR transceiver instance object list */
-/*
- * This lock is used for the following:
- * a. ir_devices_list access, insertions, deletions
- * b. struct IR kref get()s and put()s
- * c. serialization of ir_probe() for the two i2c_clients for a Z8
- */
-static DEFINE_MUTEX(ir_devices_lock);
-static LIST_HEAD(ir_devices_list);
-
-/* Block size for IR transmitter */
-#define TX_BLOCK_SIZE 99
-
-/* Hauppauge IR transmitter data */
-struct tx_data_struct {
- /* Boot block */
- unsigned char *boot_data;
-
- /* Start of binary data block */
- unsigned char *datap;
-
- /* End of binary data block */
- unsigned char *endp;
-
- /* Number of installed codesets */
- unsigned int num_code_sets;
-
- /* Pointers to codesets */
- unsigned char **code_sets;
-
- /* Global fixed data template */
- int fixed[TX_BLOCK_SIZE];
-};
-
-static struct tx_data_struct *tx_data;
-static struct mutex tx_data_lock;
-
-
-/* module parameters */
-static bool debug; /* debug output */
-static bool tx_only; /* only handle the IR Tx function */
-static int minor = -1; /* minor number */
-
-
-/* struct IR reference counting */
-static struct IR *get_ir_device(struct IR *ir, bool ir_devices_lock_held)
-{
- if (ir_devices_lock_held) {
- kref_get(&ir->ref);
- } else {
- mutex_lock(&ir_devices_lock);
- kref_get(&ir->ref);
- mutex_unlock(&ir_devices_lock);
- }
- return ir;
-}
-
-static void release_ir_device(struct kref *ref)
-{
- struct IR *ir = container_of(ref, struct IR, ref);
-
- /*
- * Things should be in this state by now:
- * ir->rx set to NULL and deallocated - happens before ir->rx->ir put()
- * ir->rx->task kthread stopped - happens before ir->rx->ir put()
- * ir->tx set to NULL and deallocated - happens before ir->tx->ir put()
- * ir->open_count == 0 - happens on final close()
- * ir_lock, tx_ref_lock, rx_ref_lock, all released
- */
- if (ir->l.minor >= 0 && ir->l.minor < MAX_IRCTL_DEVICES) {
- lirc_unregister_driver(ir->l.minor);
- ir->l.minor = MAX_IRCTL_DEVICES;
- }
- if (kfifo_initialized(&ir->rbuf.fifo))
- lirc_buffer_free(&ir->rbuf);
- list_del(&ir->list);
- kfree(ir);
-}
-
-static int put_ir_device(struct IR *ir, bool ir_devices_lock_held)
-{
- int released;
-
- if (ir_devices_lock_held)
- return kref_put(&ir->ref, release_ir_device);
-
- mutex_lock(&ir_devices_lock);
- released = kref_put(&ir->ref, release_ir_device);
- mutex_unlock(&ir_devices_lock);
-
- return released;
-}
-
-/* struct IR_rx reference counting */
-static struct IR_rx *get_ir_rx(struct IR *ir)
-{
- struct IR_rx *rx;
-
- spin_lock(&ir->rx_ref_lock);
- rx = ir->rx;
- if (rx != NULL)
- kref_get(&rx->ref);
- spin_unlock(&ir->rx_ref_lock);
- return rx;
-}
-
-static void destroy_rx_kthread(struct IR_rx *rx, bool ir_devices_lock_held)
-{
- /* end up polling thread */
- if (!IS_ERR_OR_NULL(rx->task)) {
- kthread_stop(rx->task);
- rx->task = NULL;
- /* Put the ir ptr that ir_probe() gave to the rx poll thread */
- put_ir_device(rx->ir, ir_devices_lock_held);
- }
-}
-
-static void release_ir_rx(struct kref *ref)
-{
- struct IR_rx *rx = container_of(ref, struct IR_rx, ref);
- struct IR *ir = rx->ir;
-
- /*
- * This release function can't do all the work, as we want
- * to keep the rx_ref_lock a spinlock, and killing the poll thread
- * and releasing the ir reference can cause a sleep. That work is
- * performed by put_ir_rx()
- */
- ir->l.features &= ~LIRC_CAN_REC_LIRCCODE;
- /* Don't put_ir_device(rx->ir) here; lock can't be freed yet */
- ir->rx = NULL;
- /* Don't do the kfree(rx) here; we still need to kill the poll thread */
-}
-
-static int put_ir_rx(struct IR_rx *rx, bool ir_devices_lock_held)
-{
- int released;
- struct IR *ir = rx->ir;
-
- spin_lock(&ir->rx_ref_lock);
- released = kref_put(&rx->ref, release_ir_rx);
- spin_unlock(&ir->rx_ref_lock);
- /* Destroy the rx kthread while not holding the spinlock */
- if (released) {
- destroy_rx_kthread(rx, ir_devices_lock_held);
- kfree(rx);
- /* Make sure we're not still in a poll_table somewhere */
- wake_up_interruptible(&ir->rbuf.wait_poll);
- }
- /* Do a reference put() for the rx->ir reference, if we released rx */
- if (released)
- put_ir_device(ir, ir_devices_lock_held);
- return released;
-}
-
-/* struct IR_tx reference counting */
-static struct IR_tx *get_ir_tx(struct IR *ir)
-{
- struct IR_tx *tx;
-
- spin_lock(&ir->tx_ref_lock);
- tx = ir->tx;
- if (tx != NULL)
- kref_get(&tx->ref);
- spin_unlock(&ir->tx_ref_lock);
- return tx;
-}
-
-static void release_ir_tx(struct kref *ref)
-{
- struct IR_tx *tx = container_of(ref, struct IR_tx, ref);
- struct IR *ir = tx->ir;
-
- ir->l.features &= ~LIRC_CAN_SEND_PULSE;
- /* Don't put_ir_device(tx->ir) here, so our lock doesn't get freed */
- ir->tx = NULL;
- kfree(tx);
-}
-
-static int put_ir_tx(struct IR_tx *tx, bool ir_devices_lock_held)
-{
- int released;
- struct IR *ir = tx->ir;
-
- spin_lock(&ir->tx_ref_lock);
- released = kref_put(&tx->ref, release_ir_tx);
- spin_unlock(&ir->tx_ref_lock);
- /* Do a reference put() for the tx->ir reference, if we released tx */
- if (released)
- put_ir_device(ir, ir_devices_lock_held);
- return released;
-}
-
-static int add_to_buf(struct IR *ir)
-{
- __u16 code;
- unsigned char codes[2];
- unsigned char keybuf[6];
- int got_data = 0;
- int ret;
- int failures = 0;
- unsigned char sendbuf[1] = { 0 };
- struct lirc_buffer *rbuf = ir->l.rbuf;
- struct IR_rx *rx;
- struct IR_tx *tx;
-
- if (lirc_buffer_full(rbuf)) {
- dev_dbg(ir->l.dev, "buffer overflow\n");
- return -EOVERFLOW;
- }
-
- rx = get_ir_rx(ir);
- if (rx == NULL)
- return -ENXIO;
-
- /* Ensure our rx->c i2c_client remains valid for the duration */
- mutex_lock(&rx->client_lock);
- if (rx->c == NULL) {
- mutex_unlock(&rx->client_lock);
- put_ir_rx(rx, false);
- return -ENXIO;
- }
-
- tx = get_ir_tx(ir);
-
- /*
- * service the device as long as it is returning
- * data and we have space
- */
- do {
- if (kthread_should_stop()) {
- ret = -ENODATA;
- break;
- }
-
- /*
- * Lock i2c bus for the duration. RX/TX chips interfere so
- * this is worth it
- */
- mutex_lock(&ir->ir_lock);
-
- if (kthread_should_stop()) {
- mutex_unlock(&ir->ir_lock);
- ret = -ENODATA;
- break;
- }
-
- /*
- * Send random "poll command" (?) Windows driver does this
- * and it is a good point to detect chip failure.
- */
- ret = i2c_master_send(rx->c, sendbuf, 1);
- if (ret != 1) {
- dev_err(ir->l.dev, "i2c_master_send failed with %d\n",
- ret);
- if (failures >= 3) {
- mutex_unlock(&ir->ir_lock);
- dev_err(ir->l.dev,
- "unable to read from the IR chip after 3 resets, giving up\n");
- break;
- }
-
- /* Looks like the chip crashed, reset it */
- dev_err(ir->l.dev,
- "polling the IR receiver chip failed, trying reset\n");
-
- set_current_state(TASK_UNINTERRUPTIBLE);
- if (kthread_should_stop()) {
- mutex_unlock(&ir->ir_lock);
- ret = -ENODATA;
- break;
- }
- schedule_timeout((100 * HZ + 999) / 1000);
- if (tx != NULL)
- tx->need_boot = 1;
-
- ++failures;
- mutex_unlock(&ir->ir_lock);
- ret = 0;
- continue;
- }
-
- if (kthread_should_stop()) {
- mutex_unlock(&ir->ir_lock);
- ret = -ENODATA;
- break;
- }
- ret = i2c_master_recv(rx->c, keybuf, sizeof(keybuf));
- mutex_unlock(&ir->ir_lock);
- if (ret != sizeof(keybuf)) {
- dev_err(ir->l.dev,
- "i2c_master_recv failed with %d -- keeping last read buffer\n",
- ret);
- } else {
- rx->b[0] = keybuf[3];
- rx->b[1] = keybuf[4];
- rx->b[2] = keybuf[5];
- dev_dbg(ir->l.dev,
- "key (0x%02x/0x%02x)\n",
- rx->b[0], rx->b[1]);
- }
-
- /* key pressed ? */
- if (rx->hdpvr_data_fmt) {
- if (got_data && (keybuf[0] == 0x80)) {
- ret = 0;
- break;
- } else if (got_data && (keybuf[0] == 0x00)) {
- ret = -ENODATA;
- break;
- }
- } else if ((rx->b[0] & 0x80) == 0) {
- ret = got_data ? 0 : -ENODATA;
- break;
- }
-
- /* look what we have */
- code = (((__u16)rx->b[0] & 0x7f) << 6) | (rx->b[1] >> 2);
-
- codes[0] = (code >> 8) & 0xff;
- codes[1] = code & 0xff;
-
- /* return it */
- lirc_buffer_write(rbuf, codes);
- ++got_data;
- ret = 0;
- } while (!lirc_buffer_full(rbuf));
-
- mutex_unlock(&rx->client_lock);
- if (tx != NULL)
- put_ir_tx(tx, false);
- put_ir_rx(rx, false);
- return ret;
-}
-
-/*
- * Main function of the polling thread -- from lirc_dev.
- * We don't fit the LIRC model at all anymore. This is horrible, but
- * basically we have a single RX/TX device with a nasty failure mode
- * that needs to be accounted for across the pair. lirc lets us provide
- * fops, but prevents us from using the internal polling, etc. if we do
- * so. Hence the replication. Might be neater to extend the LIRC model
- * to account for this but I'd think it's a very special case of seriously
- * messed up hardware.
- */
-static int lirc_thread(void *arg)
-{
- struct IR *ir = arg;
- struct lirc_buffer *rbuf = ir->l.rbuf;
-
- dev_dbg(ir->l.dev, "poll thread started\n");
-
- while (!kthread_should_stop()) {
- set_current_state(TASK_INTERRUPTIBLE);
-
- /* if device not opened, we can sleep half a second */
- if (atomic_read(&ir->open_count) == 0) {
- schedule_timeout(HZ/2);
- continue;
- }
-
- /*
- * This is ~113*2 + 24 + jitter (2*repeat gap + code length).
- * We use this interval as the chip resets every time you poll
- * it (bad!). This is therefore just sufficient to catch all
- * of the button presses. It makes the remote much more
- * responsive. You can see the difference by running irw and
- * holding down a button. With 100ms, the old polling
- * interval, you'll notice breaks in the repeat sequence
- * corresponding to lost keypresses.
- */
- schedule_timeout((260 * HZ) / 1000);
- if (kthread_should_stop())
- break;
- if (!add_to_buf(ir))
- wake_up_interruptible(&rbuf->wait_poll);
- }
-
- dev_dbg(ir->l.dev, "poll thread ended\n");
- return 0;
-}
-
-static int set_use_inc(void *data)
-{
- return 0;
-}
-
-static void set_use_dec(void *data)
-{
-}
-
-/* safe read of a uint32 (always network byte order) */
-static int read_uint32(unsigned char **data,
- unsigned char *endp, unsigned int *val)
-{
- if (*data + 4 > endp)
- return 0;
- *val = ((*data)[0] << 24) | ((*data)[1] << 16) |
- ((*data)[2] << 8) | (*data)[3];
- *data += 4;
- return 1;
-}
-
-/* safe read of a uint8 */
-static int read_uint8(unsigned char **data,
- unsigned char *endp, unsigned char *val)
-{
- if (*data + 1 > endp)
- return 0;
- *val = *((*data)++);
- return 1;
-}
-
-/* safe skipping of N bytes */
-static int skip(unsigned char **data,
- unsigned char *endp, unsigned int distance)
-{
- if (*data + distance > endp)
- return 0;
- *data += distance;
- return 1;
-}
-
-/* decompress key data into the given buffer */
-static int get_key_data(unsigned char *buf,
- unsigned int codeset, unsigned int key)
-{
- unsigned char *data, *endp, *diffs, *key_block;
- unsigned char keys, ndiffs, id;
- unsigned int base, lim, pos, i;
-
- /* Binary search for the codeset */
- for (base = 0, lim = tx_data->num_code_sets; lim; lim >>= 1) {
- pos = base + (lim >> 1);
- data = tx_data->code_sets[pos];
-
- if (!read_uint32(&data, tx_data->endp, &i))
- goto corrupt;
-
- if (i == codeset)
- break;
- else if (codeset > i) {
- base = pos + 1;
- --lim;
- }
- }
- /* Not found? */
- if (!lim)
- return -EPROTO;
-
- /* Set end of data block */
- endp = pos < tx_data->num_code_sets - 1 ?
- tx_data->code_sets[pos + 1] : tx_data->endp;
-
- /* Read the block header */
- if (!read_uint8(&data, endp, &keys) ||
- !read_uint8(&data, endp, &ndiffs) ||
- ndiffs > TX_BLOCK_SIZE || keys == 0)
- goto corrupt;
-
- /* Save diffs & skip */
- diffs = data;
- if (!skip(&data, endp, ndiffs))
- goto corrupt;
-
- /* Read the id of the first key */
- if (!read_uint8(&data, endp, &id))
- goto corrupt;
-
- /* Unpack the first key's data */
- for (i = 0; i < TX_BLOCK_SIZE; ++i) {
- if (tx_data->fixed[i] == -1) {
- if (!read_uint8(&data, endp, &buf[i]))
- goto corrupt;
- } else {
- buf[i] = (unsigned char)tx_data->fixed[i];
- }
- }
-
- /* Early out key found/not found */
- if (key == id)
- return 0;
- if (keys == 1)
- return -EPROTO;
-
- /* Sanity check */
- key_block = data;
- if (!skip(&data, endp, (keys - 1) * (ndiffs + 1)))
- goto corrupt;
-
- /* Binary search for the key */
- for (base = 0, lim = keys - 1; lim; lim >>= 1) {
- /* Seek to block */
- unsigned char *key_data;
-
- pos = base + (lim >> 1);
- key_data = key_block + (ndiffs + 1) * pos;
-
- if (*key_data == key) {
- /* skip key id */
- ++key_data;
-
- /* found, so unpack the diffs */
- for (i = 0; i < ndiffs; ++i) {
- unsigned char val;
-
- if (!read_uint8(&key_data, endp, &val) ||
- diffs[i] >= TX_BLOCK_SIZE)
- goto corrupt;
- buf[diffs[i]] = val;
- }
-
- return 0;
- } else if (key > *key_data) {
- base = pos + 1;
- --lim;
- }
- }
- /* Key not found */
- return -EPROTO;
-
-corrupt:
- pr_err("firmware is corrupt\n");
- return -EFAULT;
-}
-
-/* send a block of data to the IR TX device */
-static int send_data_block(struct IR_tx *tx, unsigned char *data_block)
-{
- int i, j, ret;
- unsigned char buf[5];
-
- for (i = 0; i < TX_BLOCK_SIZE;) {
- int tosend = TX_BLOCK_SIZE - i;
-
- if (tosend > 4)
- tosend = 4;
- buf[0] = (unsigned char)(i + 1);
- for (j = 0; j < tosend; ++j)
- buf[1 + j] = data_block[i + j];
- dev_dbg(tx->ir->l.dev, "%*ph", 5, buf);
- ret = i2c_master_send(tx->c, buf, tosend + 1);
- if (ret != tosend + 1) {
- dev_err(tx->ir->l.dev,
- "i2c_master_send failed with %d\n", ret);
- return ret < 0 ? ret : -EFAULT;
- }
- i += tosend;
- }
- return 0;
-}
-
-/* send boot data to the IR TX device */
-static int send_boot_data(struct IR_tx *tx)
-{
- int ret, i;
- unsigned char buf[4];
-
- /* send the boot block */
- ret = send_data_block(tx, tx_data->boot_data);
- if (ret != 0)
- return ret;
-
- /* Hit the go button to activate the new boot data */
- buf[0] = 0x00;
- buf[1] = 0x20;
- ret = i2c_master_send(tx->c, buf, 2);
- if (ret != 2) {
- dev_err(tx->ir->l.dev, "i2c_master_send failed with %d\n", ret);
- return ret < 0 ? ret : -EFAULT;
- }
-
- /*
- * Wait for zilog to settle after hitting go post boot block upload.
- * Without this delay, the HD-PVR and HVR-1950 both return an -EIO
- * upon attempting to get firmware revision, and tx probe thus fails.
- */
- for (i = 0; i < 10; i++) {
- ret = i2c_master_send(tx->c, buf, 1);
- if (ret == 1)
- break;
- udelay(100);
- }
-
- if (ret != 1) {
- dev_err(tx->ir->l.dev, "i2c_master_send failed with %d\n", ret);
- return ret < 0 ? ret : -EFAULT;
- }
-
- /* Here comes the firmware version... (hopefully) */
- ret = i2c_master_recv(tx->c, buf, 4);
- if (ret != 4) {
- dev_err(tx->ir->l.dev, "i2c_master_recv failed with %d\n", ret);
- return 0;
- }
- if ((buf[0] != 0x80) && (buf[0] != 0xa0)) {
- dev_err(tx->ir->l.dev, "unexpected IR TX init response: %02x\n",
- buf[0]);
- return 0;
- }
- dev_notice(tx->ir->l.dev,
- "Zilog/Hauppauge IR blaster firmware version %d.%d.%d loaded\n",
- buf[1], buf[2], buf[3]);
-
- return 0;
-}
-
-/* unload "firmware", lock held */
-static void fw_unload_locked(void)
-{
- if (tx_data) {
- vfree(tx_data->code_sets);
-
- vfree(tx_data->datap);
-
- vfree(tx_data);
- tx_data = NULL;
- pr_debug("successfully unloaded IR blaster firmware\n");
- }
-}
-
-/* unload "firmware" for the IR TX device */
-static void fw_unload(void)
-{
- mutex_lock(&tx_data_lock);
- fw_unload_locked();
- mutex_unlock(&tx_data_lock);
-}
-
-/* load "firmware" for the IR TX device */
-static int fw_load(struct IR_tx *tx)
-{
- int ret;
- unsigned int i;
- unsigned char *data, version, num_global_fixed;
- const struct firmware *fw_entry;
-
- /* Already loaded? */
- mutex_lock(&tx_data_lock);
- if (tx_data) {
- ret = 0;
- goto out;
- }
-
- /* Request codeset data file */
- ret = request_firmware(&fw_entry, "haup-ir-blaster.bin", tx->ir->l.dev);
- if (ret != 0) {
- dev_err(tx->ir->l.dev,
- "firmware haup-ir-blaster.bin not available (%d)\n",
- ret);
- ret = ret < 0 ? ret : -EFAULT;
- goto out;
- }
- dev_dbg(tx->ir->l.dev, "firmware of size %zu loaded\n", fw_entry->size);
-
- /* Parse the file */
- tx_data = vmalloc(sizeof(*tx_data));
- if (tx_data == NULL) {
- release_firmware(fw_entry);
- ret = -ENOMEM;
- goto out;
- }
- tx_data->code_sets = NULL;
-
- /* Copy the data so hotplug doesn't get confused and timeout */
- tx_data->datap = vmalloc(fw_entry->size);
- if (tx_data->datap == NULL) {
- release_firmware(fw_entry);
- vfree(tx_data);
- ret = -ENOMEM;
- goto out;
- }
- memcpy(tx_data->datap, fw_entry->data, fw_entry->size);
- tx_data->endp = tx_data->datap + fw_entry->size;
- release_firmware(fw_entry); fw_entry = NULL;
-
- /* Check version */
- data = tx_data->datap;
- if (!read_uint8(&data, tx_data->endp, &version))
- goto corrupt;
- if (version != 1) {
- dev_err(tx->ir->l.dev,
- "unsupported code set file version (%u, expected 1) -- please upgrade to a newer driver\n",
- version);
- fw_unload_locked();
- ret = -EFAULT;
- goto out;
- }
-
- /* Save boot block for later */
- tx_data->boot_data = data;
- if (!skip(&data, tx_data->endp, TX_BLOCK_SIZE))
- goto corrupt;
-
- if (!read_uint32(&data, tx_data->endp,
- &tx_data->num_code_sets))
- goto corrupt;
-
- dev_dbg(tx->ir->l.dev, "%u IR blaster codesets loaded\n",
- tx_data->num_code_sets);
-
- tx_data->code_sets = vmalloc(
- tx_data->num_code_sets * sizeof(char *));
- if (tx_data->code_sets == NULL) {
- fw_unload_locked();
- ret = -ENOMEM;
- goto out;
- }
-
- for (i = 0; i < TX_BLOCK_SIZE; ++i)
- tx_data->fixed[i] = -1;
-
- /* Read global fixed data template */
- if (!read_uint8(&data, tx_data->endp, &num_global_fixed) ||
- num_global_fixed > TX_BLOCK_SIZE)
- goto corrupt;
- for (i = 0; i < num_global_fixed; ++i) {
- unsigned char pos, val;
-
- if (!read_uint8(&data, tx_data->endp, &pos) ||
- !read_uint8(&data, tx_data->endp, &val) ||
- pos >= TX_BLOCK_SIZE)
- goto corrupt;
- tx_data->fixed[pos] = (int)val;
- }
-
- /* Filch out the position of each code set */
- for (i = 0; i < tx_data->num_code_sets; ++i) {
- unsigned int id;
- unsigned char keys;
- unsigned char ndiffs;
-
- /* Save the codeset position */
- tx_data->code_sets[i] = data;
-
- /* Read header */
- if (!read_uint32(&data, tx_data->endp, &id) ||
- !read_uint8(&data, tx_data->endp, &keys) ||
- !read_uint8(&data, tx_data->endp, &ndiffs) ||
- ndiffs > TX_BLOCK_SIZE || keys == 0)
- goto corrupt;
-
- /* skip diff positions */
- if (!skip(&data, tx_data->endp, ndiffs))
- goto corrupt;
-
- /*
- * After the diffs we have the first key id + data -
- * global fixed
- */
- if (!skip(&data, tx_data->endp,
- 1 + TX_BLOCK_SIZE - num_global_fixed))
- goto corrupt;
-
- /* Then we have keys-1 blocks of key id+diffs */
- if (!skip(&data, tx_data->endp,
- (ndiffs + 1) * (keys - 1)))
- goto corrupt;
- }
- ret = 0;
- goto out;
-
-corrupt:
- dev_err(tx->ir->l.dev, "firmware is corrupt\n");
- fw_unload_locked();
- ret = -EFAULT;
-
-out:
- mutex_unlock(&tx_data_lock);
- return ret;
-}
-
-/* copied from lirc_dev */
-static ssize_t read(struct file *filep, char __user *outbuf, size_t n,
- loff_t *ppos)
-{
- struct IR *ir = filep->private_data;
- struct IR_rx *rx;
- struct lirc_buffer *rbuf = ir->l.rbuf;
- int ret = 0, written = 0, retries = 0;
- unsigned int m;
- DECLARE_WAITQUEUE(wait, current);
-
- dev_dbg(ir->l.dev, "read called\n");
- if (n % rbuf->chunk_size) {
- dev_dbg(ir->l.dev, "read result = -EINVAL\n");
- return -EINVAL;
- }
-
- rx = get_ir_rx(ir);
- if (rx == NULL)
- return -ENXIO;
-
- /*
- * we add ourselves to the task queue before buffer check
- * to avoid losing scan code (in case when queue is awaken somewhere
- * between while condition checking and scheduling)
- */
- add_wait_queue(&rbuf->wait_poll, &wait);
- set_current_state(TASK_INTERRUPTIBLE);
-
- /*
- * while we didn't provide 'length' bytes, device is opened in blocking
- * mode and 'copy_to_user' is happy, wait for data.
- */
- while (written < n && ret == 0) {
- if (lirc_buffer_empty(rbuf)) {
- /*
- * According to the read(2) man page, 'written' can be
- * returned as less than 'n', instead of blocking
- * again, returning -EWOULDBLOCK, or returning
- * -ERESTARTSYS
- */
- if (written)
- break;
- if (filep->f_flags & O_NONBLOCK) {
- ret = -EWOULDBLOCK;
- break;
- }
- if (signal_pending(current)) {
- ret = -ERESTARTSYS;
- break;
- }
- schedule();
- set_current_state(TASK_INTERRUPTIBLE);
- } else {
- unsigned char buf[MAX_XFER_SIZE];
-
- if (rbuf->chunk_size > sizeof(buf)) {
- dev_err(ir->l.dev,
- "chunk_size is too big (%d)!\n",
- rbuf->chunk_size);
- ret = -EINVAL;
- break;
- }
- m = lirc_buffer_read(rbuf, buf);
- if (m == rbuf->chunk_size) {
- ret = copy_to_user(outbuf + written, buf,
- rbuf->chunk_size);
- written += rbuf->chunk_size;
- } else {
- retries++;
- }
- if (retries >= 5) {
- dev_err(ir->l.dev, "Buffer read failed!\n");
- ret = -EIO;
- }
- }
- }
-
- remove_wait_queue(&rbuf->wait_poll, &wait);
- put_ir_rx(rx, false);
- set_current_state(TASK_RUNNING);
-
- dev_dbg(ir->l.dev, "read result = %d (%s)\n", ret,
- ret ? "Error" : "OK");
-
- return ret ? ret : written;
-}
-
-/* send a keypress to the IR TX device */
-static int send_code(struct IR_tx *tx, unsigned int code, unsigned int key)
-{
- unsigned char data_block[TX_BLOCK_SIZE];
- unsigned char buf[2];
- int i, ret;
-
- /* Get data for the codeset/key */
- ret = get_key_data(data_block, code, key);
-
- if (ret == -EPROTO) {
- dev_err(tx->ir->l.dev,
- "failed to get data for code %u, key %u -- check lircd.conf entries\n",
- code, key);
- return ret;
- } else if (ret != 0)
- return ret;
-
- /* Send the data block */
- ret = send_data_block(tx, data_block);
- if (ret != 0)
- return ret;
-
- /* Send data block length? */
- buf[0] = 0x00;
- buf[1] = 0x40;
- ret = i2c_master_send(tx->c, buf, 2);
- if (ret != 2) {
- dev_err(tx->ir->l.dev, "i2c_master_send failed with %d\n", ret);
- return ret < 0 ? ret : -EFAULT;
- }
-
- /* Give the z8 a moment to process data block */
- for (i = 0; i < 10; i++) {
- ret = i2c_master_send(tx->c, buf, 1);
- if (ret == 1)
- break;
- udelay(100);
- }
-
- if (ret != 1) {
- dev_err(tx->ir->l.dev, "i2c_master_send failed with %d\n", ret);
- return ret < 0 ? ret : -EFAULT;
- }
-
- /* Send finished download? */
- ret = i2c_master_recv(tx->c, buf, 1);
- if (ret != 1) {
- dev_err(tx->ir->l.dev, "i2c_master_recv failed with %d\n", ret);
- return ret < 0 ? ret : -EFAULT;
- }
- if (buf[0] != 0xA0) {
- dev_err(tx->ir->l.dev, "unexpected IR TX response #1: %02x\n",
- buf[0]);
- return -EFAULT;
- }
-
- /* Send prepare command? */
- buf[0] = 0x00;
- buf[1] = 0x80;
- ret = i2c_master_send(tx->c, buf, 2);
- if (ret != 2) {
- dev_err(tx->ir->l.dev, "i2c_master_send failed with %d\n", ret);
- return ret < 0 ? ret : -EFAULT;
- }
-
- /*
- * The sleep bits aren't necessary on the HD PVR, and in fact, the
- * last i2c_master_recv always fails with a -5, so for now, we're
- * going to skip this whole mess and say we're done on the HD PVR
- */
- if (!tx->post_tx_ready_poll) {
- dev_dbg(tx->ir->l.dev, "sent code %u, key %u\n", code, key);
- return 0;
- }
-
- /*
- * This bit NAKs until the device is ready, so we retry it
- * sleeping a bit each time. This seems to be what the windows
- * driver does, approximately.
- * Try for up to 1s.
- */
- for (i = 0; i < 20; ++i) {
- set_current_state(TASK_UNINTERRUPTIBLE);
- schedule_timeout((50 * HZ + 999) / 1000);
- ret = i2c_master_send(tx->c, buf, 1);
- if (ret == 1)
- break;
- dev_dbg(tx->ir->l.dev,
- "NAK expected: i2c_master_send failed with %d (try %d)\n",
- ret, i+1);
- }
- if (ret != 1) {
- dev_err(tx->ir->l.dev,
- "IR TX chip never got ready: last i2c_master_send failed with %d\n",
- ret);
- return ret < 0 ? ret : -EFAULT;
- }
-
- /* Seems to be an 'ok' response */
- i = i2c_master_recv(tx->c, buf, 1);
- if (i != 1) {
- dev_err(tx->ir->l.dev, "i2c_master_recv failed with %d\n", ret);
- return -EFAULT;
- }
- if (buf[0] != 0x80) {
- dev_err(tx->ir->l.dev, "unexpected IR TX response #2: %02x\n",
- buf[0]);
- return -EFAULT;
- }
-
- /* Oh good, it worked */
- dev_dbg(tx->ir->l.dev, "sent code %u, key %u\n", code, key);
- return 0;
-}
-
-/*
- * Write a code to the device. We take in a 32-bit number (an int) and then
- * decode this to a codeset/key index. The key data is then decompressed and
- * sent to the device. We have a spin lock as per i2c documentation to prevent
- * multiple concurrent sends which would probably cause the device to explode.
- */
-static ssize_t write(struct file *filep, const char __user *buf, size_t n,
- loff_t *ppos)
-{
- struct IR *ir = filep->private_data;
- struct IR_tx *tx;
- size_t i;
- int failures = 0;
-
- /* Validate user parameters */
- if (n % sizeof(int))
- return -EINVAL;
-
- /* Get a struct IR_tx reference */
- tx = get_ir_tx(ir);
- if (tx == NULL)
- return -ENXIO;
-
- /* Ensure our tx->c i2c_client remains valid for the duration */
- mutex_lock(&tx->client_lock);
- if (tx->c == NULL) {
- mutex_unlock(&tx->client_lock);
- put_ir_tx(tx, false);
- return -ENXIO;
- }
-
- /* Lock i2c bus for the duration */
- mutex_lock(&ir->ir_lock);
-
- /* Send each keypress */
- for (i = 0; i < n;) {
- int ret = 0;
- int command;
-
- if (copy_from_user(&command, buf + i, sizeof(command))) {
- mutex_unlock(&ir->ir_lock);
- mutex_unlock(&tx->client_lock);
- put_ir_tx(tx, false);
- return -EFAULT;
- }
-
- /* Send boot data first if required */
- if (tx->need_boot == 1) {
- /* Make sure we have the 'firmware' loaded, first */
- ret = fw_load(tx);
- if (ret != 0) {
- mutex_unlock(&ir->ir_lock);
- mutex_unlock(&tx->client_lock);
- put_ir_tx(tx, false);
- if (ret != -ENOMEM)
- ret = -EIO;
- return ret;
- }
- /* Prep the chip for transmitting codes */
- ret = send_boot_data(tx);
- if (ret == 0)
- tx->need_boot = 0;
- }
-
- /* Send the code */
- if (ret == 0) {
- ret = send_code(tx, (unsigned)command >> 16,
- (unsigned)command & 0xFFFF);
- if (ret == -EPROTO) {
- mutex_unlock(&ir->ir_lock);
- mutex_unlock(&tx->client_lock);
- put_ir_tx(tx, false);
- return ret;
- }
- }
-
- /*
- * Hmm, a failure. If we've had a few then give up, otherwise
- * try a reset
- */
- if (ret != 0) {
- /* Looks like the chip crashed, reset it */
- dev_err(tx->ir->l.dev,
- "sending to the IR transmitter chip failed, trying reset\n");
-
- if (failures >= 3) {
- dev_err(tx->ir->l.dev,
- "unable to send to the IR chip after 3 resets, giving up\n");
- mutex_unlock(&ir->ir_lock);
- mutex_unlock(&tx->client_lock);
- put_ir_tx(tx, false);
- return ret;
- }
- set_current_state(TASK_UNINTERRUPTIBLE);
- schedule_timeout((100 * HZ + 999) / 1000);
- tx->need_boot = 1;
- ++failures;
- } else
- i += sizeof(int);
- }
-
- /* Release i2c bus */
- mutex_unlock(&ir->ir_lock);
-
- mutex_unlock(&tx->client_lock);
-
- /* Give back our struct IR_tx reference */
- put_ir_tx(tx, false);
-
- /* All looks good */
- return n;
-}
-
-/* copied from lirc_dev */
-static unsigned int poll(struct file *filep, poll_table *wait)
-{
- struct IR *ir = filep->private_data;
- struct IR_rx *rx;
- struct lirc_buffer *rbuf = ir->l.rbuf;
- unsigned int ret;
-
- dev_dbg(ir->l.dev, "poll called\n");
-
- rx = get_ir_rx(ir);
- if (rx == NULL) {
- /*
- * Revisit this, if our poll function ever reports writeable
- * status for Tx
- */
- dev_dbg(ir->l.dev, "poll result = POLLERR\n");
- return POLLERR;
- }
-
- /*
- * Add our lirc_buffer's wait_queue to the poll_table. A wake up on
- * that buffer's wait queue indicates we may have a new poll status.
- */
- poll_wait(filep, &rbuf->wait_poll, wait);
-
- /* Indicate what ops could happen immediately without blocking */
- ret = lirc_buffer_empty(rbuf) ? 0 : (POLLIN|POLLRDNORM);
-
- dev_dbg(ir->l.dev, "poll result = %s\n",
- ret ? "POLLIN|POLLRDNORM" : "none");
- return ret;
-}
-
-static long ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
-{
- struct IR *ir = filep->private_data;
- unsigned long __user *uptr = (unsigned long __user *)arg;
- int result;
- unsigned long mode, features;
-
- features = ir->l.features;
-
- switch (cmd) {
- case LIRC_GET_LENGTH:
- result = put_user(13UL, uptr);
- break;
- case LIRC_GET_FEATURES:
- result = put_user(features, uptr);
- break;
- case LIRC_GET_REC_MODE:
- if (!(features&LIRC_CAN_REC_MASK))
- return -ENOSYS;
-
- result = put_user(LIRC_REC2MODE
- (features&LIRC_CAN_REC_MASK),
- uptr);
- break;
- case LIRC_SET_REC_MODE:
- if (!(features&LIRC_CAN_REC_MASK))
- return -ENOSYS;
-
- result = get_user(mode, uptr);
- if (!result && !(LIRC_MODE2REC(mode) & features))
- result = -EINVAL;
- break;
- case LIRC_GET_SEND_MODE:
- if (!(features&LIRC_CAN_SEND_MASK))
- return -ENOSYS;
-
- result = put_user(LIRC_MODE_PULSE, uptr);
- break;
- case LIRC_SET_SEND_MODE:
- if (!(features&LIRC_CAN_SEND_MASK))
- return -ENOSYS;
-
- result = get_user(mode, uptr);
- if (!result && mode != LIRC_MODE_PULSE)
- return -EINVAL;
- break;
- default:
- return -EINVAL;
- }
- return result;
-}
-
-static struct IR *get_ir_device_by_minor(unsigned int minor)
-{
- struct IR *ir;
- struct IR *ret = NULL;
-
- mutex_lock(&ir_devices_lock);
-
- if (!list_empty(&ir_devices_list)) {
- list_for_each_entry(ir, &ir_devices_list, list) {
- if (ir->l.minor == minor) {
- ret = get_ir_device(ir, true);
- break;
- }
- }
- }
-
- mutex_unlock(&ir_devices_lock);
- return ret;
-}
-
-/*
- * Open the IR device. Get hold of our IR structure and
- * stash it in private_data for the file
- */
-static int open(struct inode *node, struct file *filep)
-{
- struct IR *ir;
- unsigned int minor = MINOR(node->i_rdev);
-
- /* find our IR struct */
- ir = get_ir_device_by_minor(minor);
-
- if (ir == NULL)
- return -ENODEV;
-
- atomic_inc(&ir->open_count);
-
- /* stash our IR struct */
- filep->private_data = ir;
-
- nonseekable_open(node, filep);
- return 0;
-}
-
-/* Close the IR device */
-static int close(struct inode *node, struct file *filep)
-{
- /* find our IR struct */
- struct IR *ir = filep->private_data;
-
- if (ir == NULL) {
- pr_err("ir: close: no private_data attached to the file!\n");
- return -ENODEV;
- }
-
- atomic_dec(&ir->open_count);
-
- put_ir_device(ir, false);
- return 0;
-}
-
-static int ir_remove(struct i2c_client *client);
-static int ir_probe(struct i2c_client *client, const struct i2c_device_id *id);
-
-#define ID_FLAG_TX 0x01
-#define ID_FLAG_HDPVR 0x02
-
-static const struct i2c_device_id ir_transceiver_id[] = {
- { "ir_tx_z8f0811_haup", ID_FLAG_TX },
- { "ir_rx_z8f0811_haup", 0 },
- { "ir_tx_z8f0811_hdpvr", ID_FLAG_HDPVR | ID_FLAG_TX },
- { "ir_rx_z8f0811_hdpvr", ID_FLAG_HDPVR },
- { }
-};
-MODULE_DEVICE_TABLE(i2c, ir_transceiver_id);
-
-static struct i2c_driver driver = {
- .driver = {
- .name = "Zilog/Hauppauge i2c IR",
- },
- .probe = ir_probe,
- .remove = ir_remove,
- .id_table = ir_transceiver_id,
-};
-
-static const struct file_operations lirc_fops = {
- .owner = THIS_MODULE,
- .llseek = no_llseek,
- .read = read,
- .write = write,
- .poll = poll,
- .unlocked_ioctl = ioctl,
-#ifdef CONFIG_COMPAT
- .compat_ioctl = ioctl,
-#endif
- .open = open,
- .release = close
-};
-
-static struct lirc_driver lirc_template = {
- .name = "lirc_zilog",
- .minor = -1,
- .code_length = 13,
- .buffer_size = BUFLEN / 2,
- .sample_rate = 0, /* tell lirc_dev to not start its own kthread */
- .chunk_size = 2,
- .set_use_inc = set_use_inc,
- .set_use_dec = set_use_dec,
- .fops = &lirc_fops,
- .owner = THIS_MODULE,
-};
-
-static int ir_remove(struct i2c_client *client)
-{
- if (strncmp("ir_tx_z8", client->name, 8) == 0) {
- struct IR_tx *tx = i2c_get_clientdata(client);
-
- if (tx != NULL) {
- mutex_lock(&tx->client_lock);
- tx->c = NULL;
- mutex_unlock(&tx->client_lock);
- put_ir_tx(tx, false);
- }
- } else if (strncmp("ir_rx_z8", client->name, 8) == 0) {
- struct IR_rx *rx = i2c_get_clientdata(client);
-
- if (rx != NULL) {
- mutex_lock(&rx->client_lock);
- rx->c = NULL;
- mutex_unlock(&rx->client_lock);
- put_ir_rx(rx, false);
- }
- }
- return 0;
-}
-
-
-/* ir_devices_lock must be held */
-static struct IR *get_ir_device_by_adapter(struct i2c_adapter *adapter)
-{
- struct IR *ir;
-
- if (list_empty(&ir_devices_list))
- return NULL;
-
- list_for_each_entry(ir, &ir_devices_list, list)
- if (ir->adapter == adapter) {
- get_ir_device(ir, true);
- return ir;
- }
-
- return NULL;
-}
-
-static int ir_probe(struct i2c_client *client, const struct i2c_device_id *id)
-{
- struct IR *ir;
- struct IR_tx *tx;
- struct IR_rx *rx;
- struct i2c_adapter *adap = client->adapter;
- int ret;
- bool tx_probe = false;
-
- dev_dbg(&client->dev, "%s: %s on i2c-%d (%s), client addr=0x%02x\n",
- __func__, id->name, adap->nr, adap->name, client->addr);
-
- /*
- * The IR receiver is at i2c address 0x71.
- * The IR transmitter is at i2c address 0x70.
- */
-
- if (id->driver_data & ID_FLAG_TX)
- tx_probe = true;
- else if (tx_only) /* module option */
- return -ENXIO;
-
- pr_info("probing IR %s on %s (i2c-%d)\n",
- tx_probe ? "Tx" : "Rx", adap->name, adap->nr);
-
- mutex_lock(&ir_devices_lock);
-
- /* Use a single struct IR instance for both the Rx and Tx functions */
- ir = get_ir_device_by_adapter(adap);
- if (ir == NULL) {
- ir = kzalloc(sizeof(struct IR), GFP_KERNEL);
- if (ir == NULL) {
- ret = -ENOMEM;
- goto out_no_ir;
- }
- kref_init(&ir->ref);
-
- /* store for use in ir_probe() again, and open() later on */
- INIT_LIST_HEAD(&ir->list);
- list_add_tail(&ir->list, &ir_devices_list);
-
- ir->adapter = adap;
- mutex_init(&ir->ir_lock);
- atomic_set(&ir->open_count, 0);
- spin_lock_init(&ir->tx_ref_lock);
- spin_lock_init(&ir->rx_ref_lock);
-
- /* set lirc_dev stuff */
- memcpy(&ir->l, &lirc_template, sizeof(struct lirc_driver));
- /*
- * FIXME this is a pointer reference to us, but no refcount.
- *
- * This OK for now, since lirc_dev currently won't touch this
- * buffer as we provide our own lirc_fops.
- *
- * Currently our own lirc_fops rely on this ir->l.rbuf pointer
- */
- ir->l.rbuf = &ir->rbuf;
- ir->l.dev = &adap->dev;
- ret = lirc_buffer_init(ir->l.rbuf,
- ir->l.chunk_size, ir->l.buffer_size);
- if (ret)
- goto out_put_ir;
- }
-
- if (tx_probe) {
- /* Get the IR_rx instance for later, if already allocated */
- rx = get_ir_rx(ir);
-
- /* Set up a struct IR_tx instance */
- tx = kzalloc(sizeof(struct IR_tx), GFP_KERNEL);
- if (tx == NULL) {
- ret = -ENOMEM;
- goto out_put_xx;
- }
- kref_init(&tx->ref);
- ir->tx = tx;
-
- ir->l.features |= LIRC_CAN_SEND_PULSE;
- mutex_init(&tx->client_lock);
- tx->c = client;
- tx->need_boot = 1;
- tx->post_tx_ready_poll =
- (id->driver_data & ID_FLAG_HDPVR) ? false : true;
-
- /* An ir ref goes to the struct IR_tx instance */
- tx->ir = get_ir_device(ir, true);
-
- /* A tx ref goes to the i2c_client */
- i2c_set_clientdata(client, get_ir_tx(ir));
-
- /*
- * Load the 'firmware'. We do this before registering with
- * lirc_dev, so the first firmware load attempt does not happen
- * after a open() or write() call on the device.
- *
- * Failure here is not deemed catastrophic, so the receiver will
- * still be usable. Firmware load will be retried in write(),
- * if it is needed.
- */
- fw_load(tx);
-
- /* Proceed only if the Rx client is also ready or not needed */
- if (rx == NULL && !tx_only) {
- dev_info(tx->ir->l.dev,
- "probe of IR Tx on %s (i2c-%d) done. Waiting on IR Rx.\n",
- adap->name, adap->nr);
- goto out_ok;
- }
- } else {
- /* Get the IR_tx instance for later, if already allocated */
- tx = get_ir_tx(ir);
-
- /* Set up a struct IR_rx instance */
- rx = kzalloc(sizeof(struct IR_rx), GFP_KERNEL);
- if (rx == NULL) {
- ret = -ENOMEM;
- goto out_put_xx;
- }
- kref_init(&rx->ref);
- ir->rx = rx;
-
- ir->l.features |= LIRC_CAN_REC_LIRCCODE;
- mutex_init(&rx->client_lock);
- rx->c = client;
- rx->hdpvr_data_fmt =
- (id->driver_data & ID_FLAG_HDPVR) ? true : false;
-
- /* An ir ref goes to the struct IR_rx instance */
- rx->ir = get_ir_device(ir, true);
-
- /* An rx ref goes to the i2c_client */
- i2c_set_clientdata(client, get_ir_rx(ir));
-
- /*
- * Start the polling thread.
- * It will only perform an empty loop around schedule_timeout()
- * until we register with lirc_dev and the first user open()
- */
- /* An ir ref goes to the new rx polling kthread */
- rx->task = kthread_run(lirc_thread, get_ir_device(ir, true),
- "zilog-rx-i2c-%d", adap->nr);
- if (IS_ERR(rx->task)) {
- ret = PTR_ERR(rx->task);
- dev_err(tx->ir->l.dev,
- "%s: could not start IR Rx polling thread\n",
- __func__);
- /* Failed kthread, so put back the ir ref */
- put_ir_device(ir, true);
- /* Failure exit, so put back rx ref from i2c_client */
- i2c_set_clientdata(client, NULL);
- put_ir_rx(rx, true);
- ir->l.features &= ~LIRC_CAN_REC_LIRCCODE;
- goto out_put_xx;
- }
-
- /* Proceed only if the Tx client is also ready */
- if (tx == NULL) {
- pr_info("probe of IR Rx on %s (i2c-%d) done. Waiting on IR Tx.\n",
- adap->name, adap->nr);
- goto out_ok;
- }
- }
-
- /* register with lirc */
- ir->l.minor = minor; /* module option: user requested minor number */
- ir->l.minor = lirc_register_driver(&ir->l);
- if (ir->l.minor < 0 || ir->l.minor >= MAX_IRCTL_DEVICES) {
- dev_err(tx->ir->l.dev,
- "%s: \"minor\" must be between 0 and %d (%d)!\n",
- __func__, MAX_IRCTL_DEVICES-1, ir->l.minor);
- ret = -EBADRQC;
- goto out_put_xx;
- }
- dev_info(ir->l.dev,
- "IR unit on %s (i2c-%d) registered as lirc%d and ready\n",
- adap->name, adap->nr, ir->l.minor);
-
-out_ok:
- if (rx != NULL)
- put_ir_rx(rx, true);
- if (tx != NULL)
- put_ir_tx(tx, true);
- put_ir_device(ir, true);
- dev_info(ir->l.dev,
- "probe of IR %s on %s (i2c-%d) done\n",
- tx_probe ? "Tx" : "Rx", adap->name, adap->nr);
- mutex_unlock(&ir_devices_lock);
- return 0;
-
-out_put_xx:
- if (rx != NULL)
- put_ir_rx(rx, true);
- if (tx != NULL)
- put_ir_tx(tx, true);
-out_put_ir:
- put_ir_device(ir, true);
-out_no_ir:
- dev_err(&client->dev,
- "%s: probing IR %s on %s (i2c-%d) failed with %d\n",
- __func__, tx_probe ? "Tx" : "Rx", adap->name, adap->nr, ret);
- mutex_unlock(&ir_devices_lock);
- return ret;
-}
-
-static int __init zilog_init(void)
-{
- int ret;
-
- pr_notice("Zilog/Hauppauge IR driver initializing\n");
-
- mutex_init(&tx_data_lock);
-
- request_module("firmware_class");
-
- ret = i2c_add_driver(&driver);
- if (ret)
- pr_err("initialization failed\n");
- else
- pr_notice("initialization complete\n");
-
- return ret;
-}
-
-static void __exit zilog_exit(void)
-{
- i2c_del_driver(&driver);
- /* if loaded */
- fw_unload();
- pr_notice("Zilog/Hauppauge IR driver unloaded\n");
-}
-
-module_init(zilog_init);
-module_exit(zilog_exit);
-
-MODULE_DESCRIPTION("Zilog/Hauppauge infrared transmitter driver (i2c stack)");
-MODULE_AUTHOR("Gerd Knorr, Michal Kochanowicz, Christoph Bartelmus, "
- "Ulrich Mueller, Stefan Jahn, Jerome Brock, Mark Weaver, "
- "Andy Walls");
-MODULE_LICENSE("GPL");
-/* for compat with old name, which isn't all that accurate anymore */
-MODULE_ALIAS("lirc_pvr150");
-
-module_param(minor, int, 0444);
-MODULE_PARM_DESC(minor, "Preferred minor device number");
-
-module_param(debug, bool, 0644);
-MODULE_PARM_DESC(debug, "Enable debugging messages");
-
-module_param(tx_only, bool, 0644);
-MODULE_PARM_DESC(tx_only, "Only handle the IR transmit function");
diff --git a/drivers/staging/media/max96712/Kconfig b/drivers/staging/media/max96712/Kconfig
new file mode 100644
index 000000000000..117fadf81bd0
--- /dev/null
+++ b/drivers/staging/media/max96712/Kconfig
@@ -0,0 +1,14 @@
+# SPDX-License-Identifier: GPL-2.0
+config VIDEO_MAX96712
+ tristate "Maxim MAX96712 Quad GMSL2 Deserializer support"
+ depends on I2C
+ depends on OF_GPIO
+ depends on VIDEO_DEV
+ select V4L2_FWNODE
+ select VIDEO_V4L2_SUBDEV_API
+ select MEDIA_CONTROLLER
+ help
+ This driver supports the Maxim MAX96712 Quad GMSL2 Deserializer.
+
+ To compile this driver as a module, choose M here: the
+ module will be called max96712.
diff --git a/drivers/staging/media/max96712/Makefile b/drivers/staging/media/max96712/Makefile
new file mode 100644
index 000000000000..70c1974ce3f0
--- /dev/null
+++ b/drivers/staging/media/max96712/Makefile
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_VIDEO_MAX96712) += max96712.o
diff --git a/drivers/staging/media/max96712/max96712.c b/drivers/staging/media/max96712/max96712.c
new file mode 100644
index 000000000000..0751b2e04895
--- /dev/null
+++ b/drivers/staging/media/max96712/max96712.c
@@ -0,0 +1,487 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Maxim MAX96712 Quad GMSL2 Deserializer Driver
+ *
+ * Copyright (C) 2021 Renesas Electronics Corporation
+ * Copyright (C) 2021 Niklas Söderlund
+ */
+
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/of_graph.h>
+#include <linux/regmap.h>
+
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-fwnode.h>
+#include <media/v4l2-subdev.h>
+
+#define DEBUG_EXTRA_REG 0x09
+#define DEBUG_EXTRA_PCLK_25MHZ 0x00
+#define DEBUG_EXTRA_PCLK_75MHZ 0x01
+
+enum max96712_pattern {
+ MAX96712_PATTERN_CHECKERBOARD = 0,
+ MAX96712_PATTERN_GRADIENT,
+};
+
+struct max96712_info {
+ unsigned int dpllfreq;
+ bool have_debug_extra;
+};
+
+struct max96712_priv {
+ struct i2c_client *client;
+ struct regmap *regmap;
+ struct gpio_desc *gpiod_pwdn;
+
+ const struct max96712_info *info;
+
+ bool cphy;
+ struct v4l2_mbus_config_mipi_csi2 mipi;
+
+ struct v4l2_subdev sd;
+ struct v4l2_ctrl_handler ctrl_handler;
+ struct media_pad pads[1];
+
+ enum max96712_pattern pattern;
+};
+
+static int max96712_write(struct max96712_priv *priv, unsigned int reg, u8 val)
+{
+ int ret;
+
+ ret = regmap_write(priv->regmap, reg, val);
+ if (ret)
+ dev_err(&priv->client->dev, "write 0x%04x failed\n", reg);
+
+ return ret;
+}
+
+static int max96712_update_bits(struct max96712_priv *priv, unsigned int reg,
+ u8 mask, u8 val)
+{
+ int ret;
+
+ ret = regmap_update_bits(priv->regmap, reg, mask, val);
+ if (ret)
+ dev_err(&priv->client->dev, "update 0x%04x failed\n", reg);
+
+ return ret;
+}
+
+static int max96712_write_bulk(struct max96712_priv *priv, unsigned int reg,
+ const void *val, size_t val_count)
+{
+ int ret;
+
+ ret = regmap_bulk_write(priv->regmap, reg, val, val_count);
+ if (ret)
+ dev_err(&priv->client->dev, "bulk write 0x%04x failed\n", reg);
+
+ return ret;
+}
+
+static int max96712_write_bulk_value(struct max96712_priv *priv,
+ unsigned int reg, unsigned int val,
+ size_t val_count)
+{
+ unsigned int i;
+ u8 values[4];
+
+ for (i = 1; i <= val_count; i++)
+ values[i - 1] = (val >> ((val_count - i) * 8)) & 0xff;
+
+ return max96712_write_bulk(priv, reg, &values, val_count);
+}
+
+static void max96712_reset(struct max96712_priv *priv)
+{
+ max96712_update_bits(priv, 0x13, 0x40, 0x40);
+ msleep(20);
+}
+
+static void max96712_mipi_enable(struct max96712_priv *priv, bool enable)
+{
+ if (enable) {
+ max96712_update_bits(priv, 0x40b, 0x02, 0x02);
+ max96712_update_bits(priv, 0x8a0, 0x80, 0x80);
+ } else {
+ max96712_update_bits(priv, 0x8a0, 0x80, 0x00);
+ max96712_update_bits(priv, 0x40b, 0x02, 0x00);
+ }
+}
+
+static void max96712_mipi_configure(struct max96712_priv *priv)
+{
+ unsigned int i;
+ u8 phy5 = 0;
+
+ max96712_mipi_enable(priv, false);
+
+ /* Select 2x4 mode. */
+ max96712_write(priv, 0x8a0, 0x04);
+
+ /* TODO: Add support for 2-lane and 1-lane configurations. */
+ if (priv->cphy) {
+ /* Configure a 3-lane C-PHY using PHY0 and PHY1. */
+ max96712_write(priv, 0x94a, 0xa0);
+
+ /* Configure C-PHY timings. */
+ max96712_write(priv, 0x8ad, 0x3f);
+ max96712_write(priv, 0x8ae, 0x7d);
+ } else {
+ /* Configure a 4-lane D-PHY using PHY0 and PHY1. */
+ max96712_write(priv, 0x94a, 0xc0);
+ }
+
+ /* Configure lane mapping for PHY0 and PHY1. */
+ /* TODO: Add support for lane swapping. */
+ max96712_write(priv, 0x8a3, 0xe4);
+
+ /* Configure lane polarity for PHY0 and PHY1. */
+ for (i = 0; i < priv->mipi.num_data_lanes + 1; i++)
+ if (priv->mipi.lane_polarities[i])
+ phy5 |= BIT(i == 0 ? 5 : i < 3 ? i - 1 : i);
+ max96712_write(priv, 0x8a5, phy5);
+
+ /* Set link frequency for PHY0 and PHY1. */
+ max96712_update_bits(priv, 0x415, 0x3f,
+ ((priv->info->dpllfreq / 100) & 0x1f) | BIT(5));
+ max96712_update_bits(priv, 0x418, 0x3f,
+ ((priv->info->dpllfreq / 100) & 0x1f) | BIT(5));
+
+ /* Enable PHY0 and PHY1 */
+ max96712_update_bits(priv, 0x8a2, 0xf0, 0x30);
+}
+
+static void max96712_pattern_enable(struct max96712_priv *priv, bool enable)
+{
+ const u32 h_active = 1920;
+ const u32 h_fp = 88;
+ const u32 h_sw = 44;
+ const u32 h_bp = 148;
+ const u32 h_tot = h_active + h_fp + h_sw + h_bp;
+
+ const u32 v_active = 1080;
+ const u32 v_fp = 4;
+ const u32 v_sw = 5;
+ const u32 v_bp = 36;
+ const u32 v_tot = v_active + v_fp + v_sw + v_bp;
+
+ if (!enable) {
+ max96712_write(priv, 0x1051, 0x00);
+ return;
+ }
+
+ /* Set PCLK to 75MHz if device have DEBUG_EXTRA register. */
+ if (priv->info->have_debug_extra)
+ max96712_write(priv, DEBUG_EXTRA_REG, DEBUG_EXTRA_PCLK_75MHZ);
+
+ /* Configure Video Timing Generator for 1920x1080 @ 30 fps. */
+ max96712_write_bulk_value(priv, 0x1052, 0, 3);
+ max96712_write_bulk_value(priv, 0x1055, v_sw * h_tot, 3);
+ max96712_write_bulk_value(priv, 0x1058,
+ (v_active + v_fp + + v_bp) * h_tot, 3);
+ max96712_write_bulk_value(priv, 0x105b, 0, 3);
+ max96712_write_bulk_value(priv, 0x105e, h_sw, 2);
+ max96712_write_bulk_value(priv, 0x1060, h_active + h_fp + h_bp, 2);
+ max96712_write_bulk_value(priv, 0x1062, v_tot, 2);
+ max96712_write_bulk_value(priv, 0x1064,
+ h_tot * (v_sw + v_bp) + (h_sw + h_bp), 3);
+ max96712_write_bulk_value(priv, 0x1067, h_active, 2);
+ max96712_write_bulk_value(priv, 0x1069, h_fp + h_sw + h_bp, 2);
+ max96712_write_bulk_value(priv, 0x106b, v_active, 2);
+
+ /* Generate VS, HS and DE in free-running mode. */
+ max96712_write(priv, 0x1050, 0xfb);
+
+ /* Configure Video Pattern Generator. */
+ if (priv->pattern == MAX96712_PATTERN_CHECKERBOARD) {
+ /* Set checkerboard pattern size. */
+ max96712_write(priv, 0x1074, 0x3c);
+ max96712_write(priv, 0x1075, 0x3c);
+ max96712_write(priv, 0x1076, 0x3c);
+
+ /* Set checkerboard pattern colors. */
+ max96712_write_bulk_value(priv, 0x106e, 0xfecc00, 3);
+ max96712_write_bulk_value(priv, 0x1071, 0x006aa7, 3);
+
+ /* Generate checkerboard pattern. */
+ max96712_write(priv, 0x1051, 0x10);
+ } else {
+ /* Set gradient increment. */
+ max96712_write(priv, 0x106d, 0x10);
+
+ /* Generate gradient pattern. */
+ max96712_write(priv, 0x1051, 0x20);
+ }
+}
+
+static int max96712_s_stream(struct v4l2_subdev *sd, int enable)
+{
+ struct max96712_priv *priv = v4l2_get_subdevdata(sd);
+
+ if (enable) {
+ max96712_pattern_enable(priv, true);
+ max96712_mipi_enable(priv, true);
+ } else {
+ max96712_mipi_enable(priv, false);
+ max96712_pattern_enable(priv, false);
+ }
+
+ return 0;
+}
+
+static const struct v4l2_subdev_video_ops max96712_video_ops = {
+ .s_stream = max96712_s_stream,
+};
+
+static int max96712_init_state(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state)
+{
+ static const struct v4l2_mbus_framefmt default_fmt = {
+ .width = 1920,
+ .height = 1080,
+ .code = MEDIA_BUS_FMT_RGB888_1X24,
+ .colorspace = V4L2_COLORSPACE_SRGB,
+ .field = V4L2_FIELD_NONE,
+ .ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT,
+ .quantization = V4L2_QUANTIZATION_DEFAULT,
+ .xfer_func = V4L2_XFER_FUNC_DEFAULT,
+ };
+ struct v4l2_mbus_framefmt *fmt;
+
+ fmt = v4l2_subdev_state_get_format(state, 0);
+ *fmt = default_fmt;
+
+ return 0;
+}
+
+static const struct v4l2_subdev_internal_ops max96712_internal_ops = {
+ .init_state = max96712_init_state,
+};
+
+static const struct v4l2_subdev_pad_ops max96712_pad_ops = {
+ .get_fmt = v4l2_subdev_get_fmt,
+ .set_fmt = v4l2_subdev_get_fmt,
+};
+
+static const struct v4l2_subdev_ops max96712_subdev_ops = {
+ .video = &max96712_video_ops,
+ .pad = &max96712_pad_ops,
+};
+
+static const char * const max96712_test_pattern[] = {
+ "Checkerboard",
+ "Gradient",
+};
+
+static int max96712_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct max96712_priv *priv =
+ container_of(ctrl->handler, struct max96712_priv, ctrl_handler);
+
+ switch (ctrl->id) {
+ case V4L2_CID_TEST_PATTERN:
+ priv->pattern = ctrl->val ?
+ MAX96712_PATTERN_GRADIENT :
+ MAX96712_PATTERN_CHECKERBOARD;
+ break;
+ }
+ return 0;
+}
+
+static const struct v4l2_ctrl_ops max96712_ctrl_ops = {
+ .s_ctrl = max96712_s_ctrl,
+};
+
+static int max96712_v4l2_register(struct max96712_priv *priv)
+{
+ long pixel_rate;
+ int ret;
+
+ priv->sd.internal_ops = &max96712_internal_ops;
+ v4l2_i2c_subdev_init(&priv->sd, priv->client, &max96712_subdev_ops);
+ priv->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ priv->sd.entity.function = MEDIA_ENT_F_VID_IF_BRIDGE;
+
+ v4l2_ctrl_handler_init(&priv->ctrl_handler, 2);
+
+ /*
+ * TODO: Once V4L2_CID_LINK_FREQ is changed from a menu control to an
+ * INT64 control it should be used here instead of V4L2_CID_PIXEL_RATE.
+ */
+ pixel_rate = priv->info->dpllfreq / priv->mipi.num_data_lanes * 1000000;
+ v4l2_ctrl_new_std(&priv->ctrl_handler, NULL, V4L2_CID_PIXEL_RATE,
+ pixel_rate, pixel_rate, 1, pixel_rate);
+
+ v4l2_ctrl_new_std_menu_items(&priv->ctrl_handler, &max96712_ctrl_ops,
+ V4L2_CID_TEST_PATTERN,
+ ARRAY_SIZE(max96712_test_pattern) - 1,
+ 0, 0, max96712_test_pattern);
+
+ priv->sd.ctrl_handler = &priv->ctrl_handler;
+ ret = priv->ctrl_handler.error;
+ if (ret)
+ goto error;
+
+ priv->pads[0].flags = MEDIA_PAD_FL_SOURCE;
+ ret = media_entity_pads_init(&priv->sd.entity, 1, priv->pads);
+ if (ret)
+ goto error;
+
+ v4l2_set_subdevdata(&priv->sd, priv);
+
+ priv->sd.state_lock = priv->ctrl_handler.lock;
+ ret = v4l2_subdev_init_finalize(&priv->sd);
+ if (ret)
+ goto error;
+
+ ret = v4l2_async_register_subdev(&priv->sd);
+ if (ret < 0) {
+ dev_err(&priv->client->dev, "Unable to register subdevice\n");
+ goto error;
+ }
+
+ return 0;
+error:
+ v4l2_ctrl_handler_free(&priv->ctrl_handler);
+
+ return ret;
+}
+
+static int max96712_parse_dt(struct max96712_priv *priv)
+{
+ struct fwnode_handle *ep;
+ struct v4l2_fwnode_endpoint v4l2_ep = {
+ .bus_type = V4L2_MBUS_UNKNOWN,
+ };
+ unsigned int supported_lanes;
+ int ret;
+
+ ep = fwnode_graph_get_endpoint_by_id(dev_fwnode(&priv->client->dev), 4,
+ 0, 0);
+ if (!ep) {
+ dev_err(&priv->client->dev, "Not connected to subdevice\n");
+ return -EINVAL;
+ }
+
+ ret = v4l2_fwnode_endpoint_parse(ep, &v4l2_ep);
+ fwnode_handle_put(ep);
+ if (ret) {
+ dev_err(&priv->client->dev, "Could not parse v4l2 endpoint\n");
+ return -EINVAL;
+ }
+
+ switch (v4l2_ep.bus_type) {
+ case V4L2_MBUS_CSI2_DPHY:
+ supported_lanes = 4;
+ priv->cphy = false;
+ break;
+ case V4L2_MBUS_CSI2_CPHY:
+ supported_lanes = 3;
+ priv->cphy = true;
+ break;
+ default:
+ dev_err(&priv->client->dev, "Unsupported bus-type %u\n",
+ v4l2_ep.bus_type);
+ return -EINVAL;
+ }
+
+ if (v4l2_ep.bus.mipi_csi2.num_data_lanes != supported_lanes) {
+ dev_err(&priv->client->dev, "Only %u data lanes supported\n",
+ supported_lanes);
+ return -EINVAL;
+ }
+
+ priv->mipi = v4l2_ep.bus.mipi_csi2;
+
+ return 0;
+}
+
+static const struct regmap_config max96712_i2c_regmap = {
+ .reg_bits = 16,
+ .val_bits = 8,
+ .max_register = 0x1f00,
+};
+
+static int max96712_probe(struct i2c_client *client)
+{
+ struct max96712_priv *priv;
+ int ret;
+
+ priv = devm_kzalloc(&client->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->info = of_device_get_match_data(&client->dev);
+
+ priv->client = client;
+
+ priv->regmap = devm_regmap_init_i2c(client, &max96712_i2c_regmap);
+ if (IS_ERR(priv->regmap))
+ return PTR_ERR(priv->regmap);
+
+ priv->gpiod_pwdn = devm_gpiod_get_optional(&client->dev, "enable",
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(priv->gpiod_pwdn))
+ return PTR_ERR(priv->gpiod_pwdn);
+
+ gpiod_set_consumer_name(priv->gpiod_pwdn, "max96712-pwdn");
+ gpiod_set_value_cansleep(priv->gpiod_pwdn, 1);
+
+ if (priv->gpiod_pwdn)
+ usleep_range(4000, 5000);
+
+ max96712_reset(priv);
+
+ ret = max96712_parse_dt(priv);
+ if (ret)
+ return ret;
+
+ max96712_mipi_configure(priv);
+
+ return max96712_v4l2_register(priv);
+}
+
+static void max96712_remove(struct i2c_client *client)
+{
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct max96712_priv *priv = container_of(sd, struct max96712_priv, sd);
+
+ v4l2_async_unregister_subdev(&priv->sd);
+
+ gpiod_set_value_cansleep(priv->gpiod_pwdn, 0);
+}
+
+static const struct max96712_info max96712_info_max96712 = {
+ .dpllfreq = 1000,
+ .have_debug_extra = true,
+};
+
+static const struct max96712_info max96712_info_max96724 = {
+ .dpllfreq = 1200,
+};
+
+static const struct of_device_id max96712_of_table[] = {
+ { .compatible = "maxim,max96712", .data = &max96712_info_max96712 },
+ { .compatible = "maxim,max96724", .data = &max96712_info_max96724 },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, max96712_of_table);
+
+static struct i2c_driver max96712_i2c_driver = {
+ .driver = {
+ .name = "max96712",
+ .of_match_table = of_match_ptr(max96712_of_table),
+ },
+ .probe = max96712_probe,
+ .remove = max96712_remove,
+};
+
+module_i2c_driver(max96712_i2c_driver);
+
+MODULE_DESCRIPTION("Maxim MAX96712 Quad GMSL2 Deserializer Driver");
+MODULE_AUTHOR("Niklas Söderlund <niklas.soderlund@ragnatech.se>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/media/meson/vdec/Kconfig b/drivers/staging/media/meson/vdec/Kconfig
new file mode 100644
index 000000000000..19ffea987b89
--- /dev/null
+++ b/drivers/staging/media/meson/vdec/Kconfig
@@ -0,0 +1,11 @@
+# SPDX-License-Identifier: GPL-2.0
+
+config VIDEO_MESON_VDEC
+ tristate "Amlogic video decoder driver"
+ depends on VIDEO_DEV && HAS_DMA
+ depends on ARCH_MESON || COMPILE_TEST
+ select VIDEOBUF2_DMA_CONTIG
+ select V4L2_MEM2MEM_DEV
+ select MESON_CANVAS
+ help
+ Support for the video decoder found in gxbb/gxl/gxm chips.
diff --git a/drivers/staging/media/meson/vdec/Makefile b/drivers/staging/media/meson/vdec/Makefile
new file mode 100644
index 000000000000..6e726af84ac9
--- /dev/null
+++ b/drivers/staging/media/meson/vdec/Makefile
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-2.0
+# Makefile for Amlogic meson video decoder driver
+
+meson-vdec-objs = esparser.o vdec.o vdec_helpers.o vdec_platform.o
+meson-vdec-objs += vdec_1.o vdec_hevc.o
+meson-vdec-objs += codec_mpeg12.o codec_h264.o codec_hevc_common.o codec_vp9.o
+
+obj-$(CONFIG_VIDEO_MESON_VDEC) += meson-vdec.o
diff --git a/drivers/staging/media/meson/vdec/TODO b/drivers/staging/media/meson/vdec/TODO
new file mode 100644
index 000000000000..70ae990cf13b
--- /dev/null
+++ b/drivers/staging/media/meson/vdec/TODO
@@ -0,0 +1,8 @@
+This driver is in staging until the V4L2 documentation about stateful video
+decoders is finalized, as well as the corresponding compliance tests.
+
+It is at the moment not guaranteed to work properly with a userspace
+stack that follows the latest version of the specification, especially
+with compression standards like MPEG1/2 where the driver does not support
+dynamic resolution switching, including the first one used to determine coded
+resolution.
diff --git a/drivers/staging/media/meson/vdec/codec_h264.c b/drivers/staging/media/meson/vdec/codec_h264.c
new file mode 100644
index 000000000000..c61128fc4bb9
--- /dev/null
+++ b/drivers/staging/media/meson/vdec/codec_h264.c
@@ -0,0 +1,485 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2019 BayLibre, SAS
+ * Author: Maxime Jourdan <mjourdan@baylibre.com>
+ */
+
+#include <media/v4l2-mem2mem.h>
+#include <media/videobuf2-dma-contig.h>
+
+#include "vdec_helpers.h"
+#include "dos_regs.h"
+#include "codec_h264.h"
+
+#define SIZE_EXT_FW (20 * SZ_1K)
+#define SIZE_WORKSPACE 0x1ee000
+#define SIZE_SEI (8 * SZ_1K)
+
+/*
+ * Offset added by the firmware which must be substracted
+ * from the workspace phyaddr
+ */
+#define WORKSPACE_BUF_OFFSET 0x1000000
+
+/* ISR status */
+#define CMD_MASK GENMASK(7, 0)
+#define CMD_SRC_CHANGE 1
+#define CMD_FRAMES_READY 2
+#define CMD_FATAL_ERROR 6
+#define CMD_BAD_WIDTH 7
+#define CMD_BAD_HEIGHT 8
+
+#define SEI_DATA_READY BIT(15)
+
+/* Picture type */
+#define PIC_TOP_BOT 5
+#define PIC_BOT_TOP 6
+
+/* Size of Motion Vector per macroblock */
+#define MB_MV_SIZE 96
+
+/* Frame status data */
+#define PIC_STRUCT_BIT 5
+#define PIC_STRUCT_MASK GENMASK(2, 0)
+#define BUF_IDX_MASK GENMASK(4, 0)
+#define ERROR_FLAG BIT(9)
+#define OFFSET_BIT 16
+#define OFFSET_MASK GENMASK(15, 0)
+
+/* Bitstream parsed data */
+#define MB_TOTAL_BIT 8
+#define MB_TOTAL_MASK GENMASK(15, 0)
+#define MB_WIDTH_MASK GENMASK(7, 0)
+#define MAX_REF_BIT 24
+#define MAX_REF_MASK GENMASK(6, 0)
+#define AR_IDC_BIT 16
+#define AR_IDC_MASK GENMASK(7, 0)
+#define AR_PRESENT_FLAG BIT(0)
+#define AR_EXTEND 0xff
+
+/*
+ * Buffer to send to the ESPARSER to signal End Of Stream for H.264.
+ * This is a 16x16 encoded picture that will trigger drain firmware-side.
+ * There is no known alternative.
+ */
+static const u8 eos_sequence[SZ_4K] = {
+ 0x00, 0x00, 0x00, 0x01, 0x06, 0x05, 0xff, 0xe4, 0xdc, 0x45, 0xe9, 0xbd,
+ 0xe6, 0xd9, 0x48, 0xb7, 0x96, 0x2c, 0xd8, 0x20, 0xd9, 0x23, 0xee, 0xef,
+ 0x78, 0x32, 0x36, 0x34, 0x20, 0x2d, 0x20, 0x63, 0x6f, 0x72, 0x65, 0x20,
+ 0x36, 0x37, 0x20, 0x72, 0x31, 0x31, 0x33, 0x30, 0x20, 0x38, 0x34, 0x37,
+ 0x35, 0x39, 0x37, 0x37, 0x20, 0x2d, 0x20, 0x48, 0x2e, 0x32, 0x36, 0x34,
+ 0x2f, 0x4d, 0x50, 0x45, 0x47, 0x2d, 0x34, 0x20, 0x41, 0x56, 0x43, 0x20,
+ 0x63, 0x6f, 0x64, 0x65, 0x63, 0x20, 0x2d, 0x20, 0x43, 0x6f, 0x70, 0x79,
+ 0x6c, 0x65, 0x66, 0x74, 0x20, 0x32, 0x30, 0x30, 0x33, 0x2d, 0x32, 0x30,
+ 0x30, 0x39, 0x20, 0x2d, 0x20, 0x68, 0x74, 0x74, 0x70, 0x3a, 0x2f, 0x2f,
+ 0x77, 0x77, 0x77, 0x2e, 0x76, 0x69, 0x64, 0x65, 0x6f, 0x6c, 0x61, 0x6e,
+ 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x78, 0x32, 0x36, 0x34, 0x2e, 0x68, 0x74,
+ 0x6d, 0x6c, 0x20, 0x2d, 0x20, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73,
+ 0x3a, 0x20, 0x63, 0x61, 0x62, 0x61, 0x63, 0x3d, 0x31, 0x20, 0x72, 0x65,
+ 0x66, 0x3d, 0x31, 0x20, 0x64, 0x65, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x3d,
+ 0x31, 0x3a, 0x30, 0x3a, 0x30, 0x20, 0x61, 0x6e, 0x61, 0x6c, 0x79, 0x73,
+ 0x65, 0x3d, 0x30, 0x78, 0x31, 0x3a, 0x30, 0x78, 0x31, 0x31, 0x31, 0x20,
+ 0x6d, 0x65, 0x3d, 0x68, 0x65, 0x78, 0x20, 0x73, 0x75, 0x62, 0x6d, 0x65,
+ 0x3d, 0x36, 0x20, 0x70, 0x73, 0x79, 0x5f, 0x72, 0x64, 0x3d, 0x31, 0x2e,
+ 0x30, 0x3a, 0x30, 0x2e, 0x30, 0x20, 0x6d, 0x69, 0x78, 0x65, 0x64, 0x5f,
+ 0x72, 0x65, 0x66, 0x3d, 0x30, 0x20, 0x6d, 0x65, 0x5f, 0x72, 0x61, 0x6e,
+ 0x67, 0x65, 0x3d, 0x31, 0x36, 0x20, 0x63, 0x68, 0x72, 0x6f, 0x6d, 0x61,
+ 0x5f, 0x6d, 0x65, 0x3d, 0x31, 0x20, 0x74, 0x72, 0x65, 0x6c, 0x6c, 0x69,
+ 0x73, 0x3d, 0x30, 0x20, 0x38, 0x78, 0x38, 0x64, 0x63, 0x74, 0x3d, 0x30,
+ 0x20, 0x63, 0x71, 0x6d, 0x3d, 0x30, 0x20, 0x64, 0x65, 0x61, 0x64, 0x7a,
+ 0x6f, 0x6e, 0x65, 0x3d, 0x32, 0x31, 0x2c, 0x31, 0x31, 0x20, 0x63, 0x68,
+ 0x72, 0x6f, 0x6d, 0x61, 0x5f, 0x71, 0x70, 0x5f, 0x6f, 0x66, 0x66, 0x73,
+ 0x65, 0x74, 0x3d, 0x2d, 0x32, 0x20, 0x74, 0x68, 0x72, 0x65, 0x61, 0x64,
+ 0x73, 0x3d, 0x31, 0x20, 0x6e, 0x72, 0x3d, 0x30, 0x20, 0x64, 0x65, 0x63,
+ 0x69, 0x6d, 0x61, 0x74, 0x65, 0x3d, 0x31, 0x20, 0x6d, 0x62, 0x61, 0x66,
+ 0x66, 0x3d, 0x30, 0x20, 0x62, 0x66, 0x72, 0x61, 0x6d, 0x65, 0x73, 0x3d,
+ 0x30, 0x20, 0x6b, 0x65, 0x79, 0x69, 0x6e, 0x74, 0x3d, 0x32, 0x35, 0x30,
+ 0x20, 0x6b, 0x65, 0x79, 0x69, 0x6e, 0x74, 0x5f, 0x6d, 0x69, 0x6e, 0x3d,
+ 0x32, 0x35, 0x20, 0x73, 0x63, 0x65, 0x6e, 0x65, 0x63, 0x75, 0x74, 0x3d,
+ 0x34, 0x30, 0x20, 0x72, 0x63, 0x3d, 0x61, 0x62, 0x72, 0x20, 0x62, 0x69,
+ 0x74, 0x72, 0x61, 0x74, 0x65, 0x3d, 0x31, 0x30, 0x20, 0x72, 0x61, 0x74,
+ 0x65, 0x74, 0x6f, 0x6c, 0x3d, 0x31, 0x2e, 0x30, 0x20, 0x71, 0x63, 0x6f,
+ 0x6d, 0x70, 0x3d, 0x30, 0x2e, 0x36, 0x30, 0x20, 0x71, 0x70, 0x6d, 0x69,
+ 0x6e, 0x3d, 0x31, 0x30, 0x20, 0x71, 0x70, 0x6d, 0x61, 0x78, 0x3d, 0x35,
+ 0x31, 0x20, 0x71, 0x70, 0x73, 0x74, 0x65, 0x70, 0x3d, 0x34, 0x20, 0x69,
+ 0x70, 0x5f, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x3d, 0x31, 0x2e, 0x34, 0x30,
+ 0x20, 0x61, 0x71, 0x3d, 0x31, 0x3a, 0x31, 0x2e, 0x30, 0x30, 0x00, 0x80,
+ 0x00, 0x00, 0x00, 0x01, 0x67, 0x4d, 0x40, 0x0a, 0x9a, 0x74, 0xf4, 0x20,
+ 0x00, 0x00, 0x03, 0x00, 0x20, 0x00, 0x00, 0x06, 0x51, 0xe2, 0x44, 0xd4,
+ 0x00, 0x00, 0x00, 0x01, 0x68, 0xee, 0x32, 0xc8, 0x00, 0x00, 0x00, 0x01,
+ 0x65, 0x88, 0x80, 0x20, 0x00, 0x08, 0x7f, 0xea, 0x6a, 0xe2, 0x99, 0xb6,
+ 0x57, 0xae, 0x49, 0x30, 0xf5, 0xfe, 0x5e, 0x46, 0x0b, 0x72, 0x44, 0xc4,
+ 0xe1, 0xfc, 0x62, 0xda, 0xf1, 0xfb, 0xa2, 0xdb, 0xd6, 0xbe, 0x5c, 0xd7,
+ 0x24, 0xa3, 0xf5, 0xb9, 0x2f, 0x57, 0x16, 0x49, 0x75, 0x47, 0x77, 0x09,
+ 0x5c, 0xa1, 0xb4, 0xc3, 0x4f, 0x60, 0x2b, 0xb0, 0x0c, 0xc8, 0xd6, 0x66,
+ 0xba, 0x9b, 0x82, 0x29, 0x33, 0x92, 0x26, 0x99, 0x31, 0x1c, 0x7f, 0x9b,
+ 0x00, 0x00, 0x01, 0x0ff,
+};
+
+static const u8 *codec_h264_eos_sequence(u32 *len)
+{
+ *len = ARRAY_SIZE(eos_sequence);
+ return eos_sequence;
+}
+
+struct codec_h264 {
+ /* H.264 decoder requires an extended firmware */
+ void *ext_fw_vaddr;
+ dma_addr_t ext_fw_paddr;
+
+ /* Buffer for the H.264 Workspace */
+ void *workspace_vaddr;
+ dma_addr_t workspace_paddr;
+
+ /* Buffer for the H.264 references MV */
+ void *ref_vaddr;
+ dma_addr_t ref_paddr;
+ u32 ref_size;
+
+ /* Buffer for parsed SEI data */
+ void *sei_vaddr;
+ dma_addr_t sei_paddr;
+
+ u32 mb_width;
+ u32 mb_height;
+ u32 max_refs;
+};
+
+static int codec_h264_can_recycle(struct amvdec_core *core)
+{
+ return !amvdec_read_dos(core, AV_SCRATCH_7) ||
+ !amvdec_read_dos(core, AV_SCRATCH_8);
+}
+
+static void codec_h264_recycle(struct amvdec_core *core, u32 buf_idx)
+{
+ /*
+ * Tell the firmware it can recycle this buffer.
+ * AV_SCRATCH_8 serves the same purpose.
+ */
+ if (!amvdec_read_dos(core, AV_SCRATCH_7))
+ amvdec_write_dos(core, AV_SCRATCH_7, buf_idx + 1);
+ else
+ amvdec_write_dos(core, AV_SCRATCH_8, buf_idx + 1);
+}
+
+static int codec_h264_start(struct amvdec_session *sess)
+{
+ u32 workspace_offset;
+ struct amvdec_core *core = sess->core;
+ struct codec_h264 *h264 = sess->priv;
+
+ /* Allocate some memory for the H.264 decoder's state */
+ h264->workspace_vaddr =
+ dma_alloc_coherent(core->dev, SIZE_WORKSPACE,
+ &h264->workspace_paddr, GFP_KERNEL);
+ if (!h264->workspace_vaddr)
+ return -ENOMEM;
+
+ /* Allocate some memory for the H.264 SEI dump */
+ h264->sei_vaddr = dma_alloc_coherent(core->dev, SIZE_SEI,
+ &h264->sei_paddr, GFP_KERNEL);
+ if (!h264->sei_vaddr)
+ return -ENOMEM;
+
+ amvdec_write_dos_bits(core, POWER_CTL_VLD, BIT(9) | BIT(6));
+
+ workspace_offset = h264->workspace_paddr - WORKSPACE_BUF_OFFSET;
+ amvdec_write_dos(core, AV_SCRATCH_1, workspace_offset);
+ amvdec_write_dos(core, AV_SCRATCH_G, h264->ext_fw_paddr);
+ amvdec_write_dos(core, AV_SCRATCH_I, h264->sei_paddr -
+ workspace_offset);
+
+ /* Enable "error correction" */
+ amvdec_write_dos(core, AV_SCRATCH_F,
+ (amvdec_read_dos(core, AV_SCRATCH_F) & 0xffffffc3) |
+ BIT(4) | BIT(7));
+
+ amvdec_write_dos(core, MDEC_PIC_DC_THRESH, 0x404038aa);
+
+ return 0;
+}
+
+static int codec_h264_stop(struct amvdec_session *sess)
+{
+ struct codec_h264 *h264 = sess->priv;
+ struct amvdec_core *core = sess->core;
+
+ if (h264->ext_fw_vaddr)
+ dma_free_coherent(core->dev, SIZE_EXT_FW,
+ h264->ext_fw_vaddr, h264->ext_fw_paddr);
+
+ if (h264->workspace_vaddr)
+ dma_free_coherent(core->dev, SIZE_WORKSPACE,
+ h264->workspace_vaddr, h264->workspace_paddr);
+
+ if (h264->ref_vaddr)
+ dma_free_coherent(core->dev, h264->ref_size,
+ h264->ref_vaddr, h264->ref_paddr);
+
+ if (h264->sei_vaddr)
+ dma_free_coherent(core->dev, SIZE_SEI,
+ h264->sei_vaddr, h264->sei_paddr);
+
+ return 0;
+}
+
+static int codec_h264_load_extended_firmware(struct amvdec_session *sess,
+ const u8 *data, u32 len)
+{
+ struct codec_h264 *h264;
+ struct amvdec_core *core = sess->core;
+
+ if (len < SIZE_EXT_FW)
+ return -EINVAL;
+
+ h264 = kzalloc(sizeof(*h264), GFP_KERNEL);
+ if (!h264)
+ return -ENOMEM;
+
+ h264->ext_fw_vaddr = dma_alloc_coherent(core->dev, SIZE_EXT_FW,
+ &h264->ext_fw_paddr,
+ GFP_KERNEL);
+ if (!h264->ext_fw_vaddr) {
+ kfree(h264);
+ return -ENOMEM;
+ }
+
+ memcpy(h264->ext_fw_vaddr, data, SIZE_EXT_FW);
+ sess->priv = h264;
+
+ return 0;
+}
+
+static const struct v4l2_fract par_table[] = {
+ { 1, 1 }, { 1, 1 }, { 12, 11 }, { 10, 11 },
+ { 16, 11 }, { 40, 33 }, { 24, 11 }, { 20, 11 },
+ { 32, 11 }, { 80, 33 }, { 18, 11 }, { 15, 11 },
+ { 64, 33 }, { 160, 99 }, { 4, 3 }, { 3, 2 },
+ { 2, 1 }
+};
+
+static void codec_h264_set_par(struct amvdec_session *sess)
+{
+ struct amvdec_core *core = sess->core;
+ u32 seq_info = amvdec_read_dos(core, AV_SCRATCH_2);
+ u32 ar_idc = (seq_info >> AR_IDC_BIT) & AR_IDC_MASK;
+
+ if (!(seq_info & AR_PRESENT_FLAG))
+ return;
+
+ if (ar_idc == AR_EXTEND) {
+ u32 ar_info = amvdec_read_dos(core, AV_SCRATCH_3);
+
+ sess->pixelaspect.numerator = ar_info & 0xffff;
+ sess->pixelaspect.denominator = (ar_info >> 16) & 0xffff;
+ return;
+ }
+
+ if (ar_idc >= ARRAY_SIZE(par_table))
+ return;
+
+ sess->pixelaspect = par_table[ar_idc];
+}
+
+static void codec_h264_resume(struct amvdec_session *sess)
+{
+ struct amvdec_core *core = sess->core;
+ struct codec_h264 *h264 = sess->priv;
+ u32 mb_width, mb_height, mb_total;
+
+ amvdec_set_canvases(sess,
+ (u32[]){ ANC0_CANVAS_ADDR, 0 },
+ (u32[]){ 24, 0 });
+
+ dev_dbg(core->dev, "max_refs = %u; actual_dpb_size = %u\n",
+ h264->max_refs, sess->num_dst_bufs);
+
+ /* Align to a multiple of 4 macroblocks */
+ mb_width = ALIGN(h264->mb_width, 4);
+ mb_height = ALIGN(h264->mb_height, 4);
+ mb_total = mb_width * mb_height;
+
+ h264->ref_size = mb_total * MB_MV_SIZE * h264->max_refs;
+ h264->ref_vaddr = dma_alloc_coherent(core->dev, h264->ref_size,
+ &h264->ref_paddr, GFP_KERNEL);
+ if (!h264->ref_vaddr) {
+ amvdec_abort(sess);
+ return;
+ }
+
+ /* Address to store the references' MVs */
+ amvdec_write_dos(core, AV_SCRATCH_1, h264->ref_paddr);
+ /* End of ref MV */
+ amvdec_write_dos(core, AV_SCRATCH_4, h264->ref_paddr + h264->ref_size);
+
+ amvdec_write_dos(core, AV_SCRATCH_0, (h264->max_refs << 24) |
+ (sess->num_dst_bufs << 16) |
+ ((h264->max_refs - 1) << 8));
+}
+
+/*
+ * Configure the H.264 decoder when the parser detected a parameter set change
+ */
+static void codec_h264_src_change(struct amvdec_session *sess)
+{
+ struct amvdec_core *core = sess->core;
+ struct codec_h264 *h264 = sess->priv;
+ u32 parsed_info, mb_total;
+ u32 crop_infor, crop_bottom, crop_right;
+ u32 frame_width, frame_height;
+
+ sess->keyframe_found = 1;
+
+ parsed_info = amvdec_read_dos(core, AV_SCRATCH_1);
+
+ /* Total number of 16x16 macroblocks */
+ mb_total = (parsed_info >> MB_TOTAL_BIT) & MB_TOTAL_MASK;
+ /* Number of macroblocks per line */
+ h264->mb_width = parsed_info & MB_WIDTH_MASK;
+ /* Number of macroblock lines */
+ h264->mb_height = mb_total / h264->mb_width;
+
+ h264->max_refs = ((parsed_info >> MAX_REF_BIT) & MAX_REF_MASK) + 1;
+
+ crop_infor = amvdec_read_dos(core, AV_SCRATCH_6);
+ crop_bottom = (crop_infor & 0xff);
+ crop_right = (crop_infor >> 16) & 0xff;
+
+ frame_width = h264->mb_width * 16 - crop_right;
+ frame_height = h264->mb_height * 16 - crop_bottom;
+
+ dev_dbg(core->dev, "frame: %ux%u; crop: %u %u\n",
+ frame_width, frame_height, crop_right, crop_bottom);
+
+ codec_h264_set_par(sess);
+ amvdec_src_change(sess, frame_width, frame_height, h264->max_refs + 5);
+}
+
+/*
+ * The bitstream offset is split in half in 2 different registers.
+ * Fetch its MSB here, which location depends on the frame number.
+ */
+static u32 get_offset_msb(struct amvdec_core *core, int frame_num)
+{
+ int take_msb = frame_num % 2;
+ int reg_offset = (frame_num / 2) * 4;
+ u32 offset_msb = amvdec_read_dos(core, AV_SCRATCH_A + reg_offset);
+
+ if (take_msb)
+ return offset_msb & 0xffff0000;
+
+ return (offset_msb & 0x0000ffff) << 16;
+}
+
+static void codec_h264_frames_ready(struct amvdec_session *sess, u32 status)
+{
+ struct amvdec_core *core = sess->core;
+ int error_count;
+ int num_frames;
+ int i;
+
+ error_count = amvdec_read_dos(core, AV_SCRATCH_D);
+ num_frames = (status >> 8) & 0xff;
+ if (error_count) {
+ dev_warn(core->dev,
+ "decoder error(s) happened, count %d\n", error_count);
+ amvdec_write_dos(core, AV_SCRATCH_D, 0);
+ }
+
+ for (i = 0; i < num_frames; i++) {
+ u32 frame_status = amvdec_read_dos(core, AV_SCRATCH_1 + i * 4);
+ u32 buffer_index = frame_status & BUF_IDX_MASK;
+ u32 pic_struct = (frame_status >> PIC_STRUCT_BIT) &
+ PIC_STRUCT_MASK;
+ u32 offset = (frame_status >> OFFSET_BIT) & OFFSET_MASK;
+ u32 field = V4L2_FIELD_NONE;
+
+ /*
+ * A buffer decode error means it was decoded,
+ * but part of the picture will have artifacts.
+ * Typical reason is a temporarily corrupted bitstream
+ */
+ if (frame_status & ERROR_FLAG)
+ dev_dbg(core->dev, "Buffer %d decode error\n",
+ buffer_index);
+
+ if (pic_struct == PIC_TOP_BOT)
+ field = V4L2_FIELD_INTERLACED_TB;
+ else if (pic_struct == PIC_BOT_TOP)
+ field = V4L2_FIELD_INTERLACED_BT;
+
+ offset |= get_offset_msb(core, i);
+ amvdec_dst_buf_done_idx(sess, buffer_index, offset, field);
+ }
+}
+
+static irqreturn_t codec_h264_threaded_isr(struct amvdec_session *sess)
+{
+ struct amvdec_core *core = sess->core;
+ u32 status;
+ u32 size;
+ u8 cmd;
+
+ status = amvdec_read_dos(core, AV_SCRATCH_0);
+ cmd = status & CMD_MASK;
+
+ switch (cmd) {
+ case CMD_SRC_CHANGE:
+ codec_h264_src_change(sess);
+ break;
+ case CMD_FRAMES_READY:
+ codec_h264_frames_ready(sess, status);
+ break;
+ case CMD_FATAL_ERROR:
+ dev_err(core->dev, "H.264 decoder fatal error\n");
+ goto abort;
+ case CMD_BAD_WIDTH:
+ size = (amvdec_read_dos(core, AV_SCRATCH_1) + 1) * 16;
+ dev_err(core->dev, "Unsupported video width: %u\n", size);
+ goto abort;
+ case CMD_BAD_HEIGHT:
+ size = (amvdec_read_dos(core, AV_SCRATCH_1) + 1) * 16;
+ dev_err(core->dev, "Unsupported video height: %u\n", size);
+ goto abort;
+ case 0: /* Unused but not worth printing for */
+ case 9:
+ break;
+ default:
+ dev_info(core->dev, "Unexpected H264 ISR: %08X\n", cmd);
+ break;
+ }
+
+ if (cmd && cmd != CMD_SRC_CHANGE)
+ amvdec_write_dos(core, AV_SCRATCH_0, 0);
+
+ /* Decoder has some SEI data for us ; ignore */
+ if (amvdec_read_dos(core, AV_SCRATCH_J) & SEI_DATA_READY)
+ amvdec_write_dos(core, AV_SCRATCH_J, 0);
+
+ return IRQ_HANDLED;
+abort:
+ amvdec_abort(sess);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t codec_h264_isr(struct amvdec_session *sess)
+{
+ struct amvdec_core *core = sess->core;
+
+ amvdec_write_dos(core, ASSIST_MBOX1_CLR_REG, 1);
+
+ return IRQ_WAKE_THREAD;
+}
+
+struct amvdec_codec_ops codec_h264_ops = {
+ .start = codec_h264_start,
+ .stop = codec_h264_stop,
+ .load_extended_firmware = codec_h264_load_extended_firmware,
+ .isr = codec_h264_isr,
+ .threaded_isr = codec_h264_threaded_isr,
+ .can_recycle = codec_h264_can_recycle,
+ .recycle = codec_h264_recycle,
+ .eos_sequence = codec_h264_eos_sequence,
+ .resume = codec_h264_resume,
+};
diff --git a/drivers/staging/media/meson/vdec/codec_h264.h b/drivers/staging/media/meson/vdec/codec_h264.h
new file mode 100644
index 000000000000..7cb4fb86ff36
--- /dev/null
+++ b/drivers/staging/media/meson/vdec/codec_h264.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright (C) 2019 BayLibre, SAS
+ * Author: Maxime Jourdan <mjourdan@baylibre.com>
+ */
+
+#ifndef __MESON_VDEC_CODEC_H264_H_
+#define __MESON_VDEC_CODEC_H264_H_
+
+#include "vdec.h"
+
+extern struct amvdec_codec_ops codec_h264_ops;
+
+#endif
diff --git a/drivers/staging/media/meson/vdec/codec_hevc_common.c b/drivers/staging/media/meson/vdec/codec_hevc_common.c
new file mode 100644
index 000000000000..0315cc0911cd
--- /dev/null
+++ b/drivers/staging/media/meson/vdec/codec_hevc_common.c
@@ -0,0 +1,297 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2018 Maxime Jourdan <mjourdan@baylibre.com>
+ */
+
+#include <media/v4l2-mem2mem.h>
+#include <media/videobuf2-dma-contig.h>
+
+#include "codec_hevc_common.h"
+#include "vdec_helpers.h"
+#include "hevc_regs.h"
+
+#define MMU_COMPRESS_HEADER_SIZE 0x48000
+#define MMU_MAP_SIZE 0x4800
+
+const u16 vdec_hevc_parser_cmd[] = {
+ 0x0401, 0x8401, 0x0800, 0x0402,
+ 0x9002, 0x1423, 0x8CC3, 0x1423,
+ 0x8804, 0x9825, 0x0800, 0x04FE,
+ 0x8406, 0x8411, 0x1800, 0x8408,
+ 0x8409, 0x8C2A, 0x9C2B, 0x1C00,
+ 0x840F, 0x8407, 0x8000, 0x8408,
+ 0x2000, 0xA800, 0x8410, 0x04DE,
+ 0x840C, 0x840D, 0xAC00, 0xA000,
+ 0x08C0, 0x08E0, 0xA40E, 0xFC00,
+ 0x7C00
+};
+
+/* Configure decode head read mode */
+void codec_hevc_setup_decode_head(struct amvdec_session *sess, int is_10bit)
+{
+ struct amvdec_core *core = sess->core;
+ u32 body_size = amvdec_am21c_body_size(sess->width, sess->height);
+ u32 head_size = amvdec_am21c_head_size(sess->width, sess->height);
+
+ if (!codec_hevc_use_fbc(sess->pixfmt_cap, is_10bit)) {
+ /* Enable 2-plane reference read mode */
+ amvdec_write_dos(core, HEVCD_MPP_DECOMP_CTL1, BIT(31));
+ return;
+ }
+
+ if (codec_hevc_use_mmu(core->platform->revision,
+ sess->pixfmt_cap, is_10bit))
+ amvdec_write_dos(core, HEVCD_MPP_DECOMP_CTL1, BIT(4));
+ else
+ amvdec_write_dos(core, HEVCD_MPP_DECOMP_CTL1, 0);
+
+ if (core->platform->revision < VDEC_REVISION_SM1)
+ amvdec_write_dos(core, HEVCD_MPP_DECOMP_CTL2, body_size / 32);
+ amvdec_write_dos(core, HEVC_CM_BODY_LENGTH, body_size);
+ amvdec_write_dos(core, HEVC_CM_HEADER_OFFSET, body_size);
+ amvdec_write_dos(core, HEVC_CM_HEADER_LENGTH, head_size);
+}
+EXPORT_SYMBOL_GPL(codec_hevc_setup_decode_head);
+
+static void codec_hevc_setup_buffers_gxbb(struct amvdec_session *sess,
+ struct codec_hevc_common *comm,
+ int is_10bit)
+{
+ struct amvdec_core *core = sess->core;
+ struct v4l2_m2m_buffer *buf;
+ u32 buf_num = v4l2_m2m_num_dst_bufs_ready(sess->m2m_ctx);
+ dma_addr_t buf_y_paddr = 0;
+ dma_addr_t buf_uv_paddr = 0;
+ u32 idx = 0;
+ u32 val;
+ int i;
+
+ amvdec_write_dos(core, HEVCD_MPP_ANC2AXI_TBL_CONF_ADDR, 0);
+
+ v4l2_m2m_for_each_dst_buf(sess->m2m_ctx, buf) {
+ struct vb2_buffer *vb = &buf->vb.vb2_buf;
+
+ idx = vb->index;
+
+ if (codec_hevc_use_downsample(sess->pixfmt_cap, is_10bit))
+ buf_y_paddr = comm->fbc_buffer_paddr[idx];
+ else
+ buf_y_paddr = vb2_dma_contig_plane_dma_addr(vb, 0);
+
+ if (codec_hevc_use_fbc(sess->pixfmt_cap, is_10bit)) {
+ val = buf_y_paddr | (idx << 8) | 1;
+ amvdec_write_dos(core, HEVCD_MPP_ANC2AXI_TBL_CMD_ADDR,
+ val);
+ } else {
+ buf_uv_paddr = vb2_dma_contig_plane_dma_addr(vb, 1);
+ val = buf_y_paddr | ((idx * 2) << 8) | 1;
+ amvdec_write_dos(core, HEVCD_MPP_ANC2AXI_TBL_CMD_ADDR,
+ val);
+ val = buf_uv_paddr | ((idx * 2 + 1) << 8) | 1;
+ amvdec_write_dos(core, HEVCD_MPP_ANC2AXI_TBL_CMD_ADDR,
+ val);
+ }
+ }
+
+ if (codec_hevc_use_fbc(sess->pixfmt_cap, is_10bit))
+ val = buf_y_paddr | (idx << 8) | 1;
+ else
+ val = buf_y_paddr | ((idx * 2) << 8) | 1;
+
+ /* Fill the remaining unused slots with the last buffer's Y addr */
+ for (i = buf_num; i < MAX_REF_PIC_NUM; ++i)
+ amvdec_write_dos(core, HEVCD_MPP_ANC2AXI_TBL_CMD_ADDR, val);
+
+ amvdec_write_dos(core, HEVCD_MPP_ANC2AXI_TBL_CONF_ADDR, 1);
+ amvdec_write_dos(core, HEVCD_MPP_ANC_CANVAS_ACCCONFIG_ADDR, 1);
+ for (i = 0; i < 32; ++i)
+ amvdec_write_dos(core, HEVCD_MPP_ANC_CANVAS_DATA_ADDR, 0);
+}
+
+static void codec_hevc_setup_buffers_gxl(struct amvdec_session *sess,
+ struct codec_hevc_common *comm,
+ int is_10bit)
+{
+ struct amvdec_core *core = sess->core;
+ struct v4l2_m2m_buffer *buf;
+ u32 revision = core->platform->revision;
+ u32 pixfmt_cap = sess->pixfmt_cap;
+ int i;
+
+ amvdec_write_dos(core, HEVCD_MPP_ANC2AXI_TBL_CONF_ADDR,
+ BIT(2) | BIT(1));
+
+ v4l2_m2m_for_each_dst_buf(sess->m2m_ctx, buf) {
+ struct vb2_buffer *vb = &buf->vb.vb2_buf;
+ dma_addr_t buf_y_paddr = 0;
+ dma_addr_t buf_uv_paddr = 0;
+ u32 idx = vb->index;
+
+ if (codec_hevc_use_mmu(revision, pixfmt_cap, is_10bit))
+ buf_y_paddr = comm->mmu_header_paddr[idx];
+ else if (codec_hevc_use_downsample(pixfmt_cap, is_10bit))
+ buf_y_paddr = comm->fbc_buffer_paddr[idx];
+ else
+ buf_y_paddr = vb2_dma_contig_plane_dma_addr(vb, 0);
+
+ amvdec_write_dos(core, HEVCD_MPP_ANC2AXI_TBL_DATA,
+ buf_y_paddr >> 5);
+
+ if (!codec_hevc_use_fbc(pixfmt_cap, is_10bit)) {
+ buf_uv_paddr = vb2_dma_contig_plane_dma_addr(vb, 1);
+ amvdec_write_dos(core, HEVCD_MPP_ANC2AXI_TBL_DATA,
+ buf_uv_paddr >> 5);
+ }
+ }
+
+ amvdec_write_dos(core, HEVCD_MPP_ANC2AXI_TBL_CONF_ADDR, 1);
+ amvdec_write_dos(core, HEVCD_MPP_ANC_CANVAS_ACCCONFIG_ADDR, 1);
+ for (i = 0; i < 32; ++i)
+ amvdec_write_dos(core, HEVCD_MPP_ANC_CANVAS_DATA_ADDR, 0);
+}
+
+void codec_hevc_free_fbc_buffers(struct amvdec_session *sess,
+ struct codec_hevc_common *comm)
+{
+ struct device *dev = sess->core->dev;
+ u32 am21_size = amvdec_am21c_size(sess->width, sess->height);
+ int i;
+
+ for (i = 0; i < MAX_REF_PIC_NUM; ++i) {
+ if (comm->fbc_buffer_vaddr[i]) {
+ dma_free_coherent(dev, am21_size,
+ comm->fbc_buffer_vaddr[i],
+ comm->fbc_buffer_paddr[i]);
+ comm->fbc_buffer_vaddr[i] = NULL;
+ }
+ }
+}
+EXPORT_SYMBOL_GPL(codec_hevc_free_fbc_buffers);
+
+static int codec_hevc_alloc_fbc_buffers(struct amvdec_session *sess,
+ struct codec_hevc_common *comm)
+{
+ struct device *dev = sess->core->dev;
+ struct v4l2_m2m_buffer *buf;
+ u32 am21_size = amvdec_am21c_size(sess->width, sess->height);
+
+ v4l2_m2m_for_each_dst_buf(sess->m2m_ctx, buf) {
+ u32 idx = buf->vb.vb2_buf.index;
+ dma_addr_t paddr;
+ void *vaddr = dma_alloc_coherent(dev, am21_size, &paddr,
+ GFP_KERNEL);
+ if (!vaddr) {
+ codec_hevc_free_fbc_buffers(sess, comm);
+ return -ENOMEM;
+ }
+
+ comm->fbc_buffer_vaddr[idx] = vaddr;
+ comm->fbc_buffer_paddr[idx] = paddr;
+ }
+
+ return 0;
+}
+
+void codec_hevc_free_mmu_headers(struct amvdec_session *sess,
+ struct codec_hevc_common *comm)
+{
+ struct device *dev = sess->core->dev;
+ int i;
+
+ for (i = 0; i < MAX_REF_PIC_NUM; ++i) {
+ if (comm->mmu_header_vaddr[i]) {
+ dma_free_coherent(dev, MMU_COMPRESS_HEADER_SIZE,
+ comm->mmu_header_vaddr[i],
+ comm->mmu_header_paddr[i]);
+ comm->mmu_header_vaddr[i] = NULL;
+ }
+ }
+
+ if (comm->mmu_map_vaddr) {
+ dma_free_coherent(dev, MMU_MAP_SIZE,
+ comm->mmu_map_vaddr,
+ comm->mmu_map_paddr);
+ comm->mmu_map_vaddr = NULL;
+ }
+}
+EXPORT_SYMBOL_GPL(codec_hevc_free_mmu_headers);
+
+static int codec_hevc_alloc_mmu_headers(struct amvdec_session *sess,
+ struct codec_hevc_common *comm)
+{
+ struct device *dev = sess->core->dev;
+ struct v4l2_m2m_buffer *buf;
+
+ comm->mmu_map_vaddr = dma_alloc_coherent(dev, MMU_MAP_SIZE,
+ &comm->mmu_map_paddr,
+ GFP_KERNEL);
+ if (!comm->mmu_map_vaddr)
+ return -ENOMEM;
+
+ v4l2_m2m_for_each_dst_buf(sess->m2m_ctx, buf) {
+ u32 idx = buf->vb.vb2_buf.index;
+ dma_addr_t paddr;
+ void *vaddr = dma_alloc_coherent(dev, MMU_COMPRESS_HEADER_SIZE,
+ &paddr, GFP_KERNEL);
+ if (!vaddr) {
+ codec_hevc_free_mmu_headers(sess, comm);
+ return -ENOMEM;
+ }
+
+ comm->mmu_header_vaddr[idx] = vaddr;
+ comm->mmu_header_paddr[idx] = paddr;
+ }
+
+ return 0;
+}
+
+int codec_hevc_setup_buffers(struct amvdec_session *sess,
+ struct codec_hevc_common *comm,
+ int is_10bit)
+{
+ struct amvdec_core *core = sess->core;
+ int ret;
+
+ if (codec_hevc_use_downsample(sess->pixfmt_cap, is_10bit)) {
+ ret = codec_hevc_alloc_fbc_buffers(sess, comm);
+ if (ret)
+ return ret;
+ }
+
+ if (codec_hevc_use_mmu(core->platform->revision,
+ sess->pixfmt_cap, is_10bit)) {
+ ret = codec_hevc_alloc_mmu_headers(sess, comm);
+ if (ret) {
+ codec_hevc_free_fbc_buffers(sess, comm);
+ return ret;
+ }
+ }
+
+ if (core->platform->revision == VDEC_REVISION_GXBB)
+ codec_hevc_setup_buffers_gxbb(sess, comm, is_10bit);
+ else
+ codec_hevc_setup_buffers_gxl(sess, comm, is_10bit);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(codec_hevc_setup_buffers);
+
+void codec_hevc_fill_mmu_map(struct amvdec_session *sess,
+ struct codec_hevc_common *comm,
+ struct vb2_buffer *vb)
+{
+ u32 size = amvdec_am21c_size(sess->width, sess->height);
+ u32 nb_pages = size / PAGE_SIZE;
+ u32 *mmu_map = comm->mmu_map_vaddr;
+ u32 first_page;
+ u32 i;
+
+ if (sess->pixfmt_cap == V4L2_PIX_FMT_NV12M)
+ first_page = comm->fbc_buffer_paddr[vb->index] >> PAGE_SHIFT;
+ else
+ first_page = vb2_dma_contig_plane_dma_addr(vb, 0) >> PAGE_SHIFT;
+
+ for (i = 0; i < nb_pages; ++i)
+ mmu_map[i] = first_page + i;
+}
+EXPORT_SYMBOL_GPL(codec_hevc_fill_mmu_map);
diff --git a/drivers/staging/media/meson/vdec/codec_hevc_common.h b/drivers/staging/media/meson/vdec/codec_hevc_common.h
new file mode 100644
index 000000000000..cf072b8a9da2
--- /dev/null
+++ b/drivers/staging/media/meson/vdec/codec_hevc_common.h
@@ -0,0 +1,69 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright (C) 2018 BayLibre, SAS
+ * Author: Maxime Jourdan <mjourdan@baylibre.com>
+ */
+
+#ifndef __MESON_VDEC_HEVC_COMMON_H_
+#define __MESON_VDEC_HEVC_COMMON_H_
+
+#include "vdec.h"
+
+#define PARSER_CMD_SKIP_CFG_0 0x0000090b
+#define PARSER_CMD_SKIP_CFG_1 0x1b14140f
+#define PARSER_CMD_SKIP_CFG_2 0x001b1910
+
+#define VDEC_HEVC_PARSER_CMD_LEN 37
+extern const u16 vdec_hevc_parser_cmd[VDEC_HEVC_PARSER_CMD_LEN];
+
+#define MAX_REF_PIC_NUM 24
+
+struct codec_hevc_common {
+ void *fbc_buffer_vaddr[MAX_REF_PIC_NUM];
+ dma_addr_t fbc_buffer_paddr[MAX_REF_PIC_NUM];
+
+ void *mmu_header_vaddr[MAX_REF_PIC_NUM];
+ dma_addr_t mmu_header_paddr[MAX_REF_PIC_NUM];
+
+ void *mmu_map_vaddr;
+ dma_addr_t mmu_map_paddr;
+};
+
+/* Returns 1 if we must use framebuffer compression */
+static inline int codec_hevc_use_fbc(u32 pixfmt, int is_10bit)
+{
+ /* TOFIX: Handle Amlogic Compressed buffer for 8bit also */
+ return is_10bit;
+}
+
+/* Returns 1 if we are decoding 10-bit but outputting 8-bit NV12 */
+static inline int codec_hevc_use_downsample(u32 pixfmt, int is_10bit)
+{
+ return is_10bit;
+}
+
+/* Returns 1 if we are decoding using the IOMMU */
+static inline int codec_hevc_use_mmu(u32 revision, u32 pixfmt, int is_10bit)
+{
+ return revision >= VDEC_REVISION_G12A &&
+ codec_hevc_use_fbc(pixfmt, is_10bit);
+}
+
+/* Configure decode head read mode */
+void codec_hevc_setup_decode_head(struct amvdec_session *sess, int is_10bit);
+
+void codec_hevc_free_fbc_buffers(struct amvdec_session *sess,
+ struct codec_hevc_common *comm);
+
+void codec_hevc_free_mmu_headers(struct amvdec_session *sess,
+ struct codec_hevc_common *comm);
+
+int codec_hevc_setup_buffers(struct amvdec_session *sess,
+ struct codec_hevc_common *comm,
+ int is_10bit);
+
+void codec_hevc_fill_mmu_map(struct amvdec_session *sess,
+ struct codec_hevc_common *comm,
+ struct vb2_buffer *vb);
+
+#endif
diff --git a/drivers/staging/media/meson/vdec/codec_mpeg12.c b/drivers/staging/media/meson/vdec/codec_mpeg12.c
new file mode 100644
index 000000000000..48869cc3d973
--- /dev/null
+++ b/drivers/staging/media/meson/vdec/codec_mpeg12.c
@@ -0,0 +1,210 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2018 BayLibre, SAS
+ * Author: Maxime Jourdan <mjourdan@baylibre.com>
+ */
+
+#include <media/v4l2-mem2mem.h>
+#include <media/videobuf2-dma-contig.h>
+
+#include "codec_mpeg12.h"
+#include "dos_regs.h"
+#include "vdec_helpers.h"
+
+#define SIZE_WORKSPACE SZ_128K
+/* Offset substracted by the firmware from the workspace paddr */
+#define WORKSPACE_OFFSET (5 * SZ_1K)
+
+/* map firmware registers to known MPEG1/2 functions */
+#define MREG_SEQ_INFO AV_SCRATCH_4
+ #define MPEG2_SEQ_DAR_MASK GENMASK(3, 0)
+ #define MPEG2_DAR_4_3 2
+ #define MPEG2_DAR_16_9 3
+ #define MPEG2_DAR_221_100 4
+#define MREG_PIC_INFO AV_SCRATCH_5
+#define MREG_PIC_WIDTH AV_SCRATCH_6
+#define MREG_PIC_HEIGHT AV_SCRATCH_7
+#define MREG_BUFFERIN AV_SCRATCH_8
+#define MREG_BUFFEROUT AV_SCRATCH_9
+#define MREG_CMD AV_SCRATCH_A
+#define MREG_CO_MV_START AV_SCRATCH_B
+#define MREG_ERROR_COUNT AV_SCRATCH_C
+#define MREG_FRAME_OFFSET AV_SCRATCH_D
+#define MREG_WAIT_BUFFER AV_SCRATCH_E
+#define MREG_FATAL_ERROR AV_SCRATCH_F
+
+#define PICINFO_PROG 0x00008000
+#define PICINFO_TOP_FIRST 0x00002000
+
+struct codec_mpeg12 {
+ /* Buffer for the MPEG1/2 Workspace */
+ void *workspace_vaddr;
+ dma_addr_t workspace_paddr;
+};
+
+static const u8 eos_sequence[SZ_1K] = { 0x00, 0x00, 0x01, 0xB7 };
+
+static const u8 *codec_mpeg12_eos_sequence(u32 *len)
+{
+ *len = ARRAY_SIZE(eos_sequence);
+ return eos_sequence;
+}
+
+static int codec_mpeg12_can_recycle(struct amvdec_core *core)
+{
+ return !amvdec_read_dos(core, MREG_BUFFERIN);
+}
+
+static void codec_mpeg12_recycle(struct amvdec_core *core, u32 buf_idx)
+{
+ amvdec_write_dos(core, MREG_BUFFERIN, buf_idx + 1);
+}
+
+static int codec_mpeg12_start(struct amvdec_session *sess)
+{
+ struct amvdec_core *core = sess->core;
+ struct codec_mpeg12 *mpeg12;
+ int ret;
+
+ mpeg12 = kzalloc(sizeof(*mpeg12), GFP_KERNEL);
+ if (!mpeg12)
+ return -ENOMEM;
+
+ /* Allocate some memory for the MPEG1/2 decoder's state */
+ mpeg12->workspace_vaddr = dma_alloc_coherent(core->dev, SIZE_WORKSPACE,
+ &mpeg12->workspace_paddr,
+ GFP_KERNEL);
+ if (!mpeg12->workspace_vaddr) {
+ dev_err(core->dev, "Failed to request MPEG 1/2 Workspace\n");
+ ret = -ENOMEM;
+ goto free_mpeg12;
+ }
+
+ ret = amvdec_set_canvases(sess, (u32[]){ AV_SCRATCH_0, 0 },
+ (u32[]){ 8, 0 });
+ if (ret)
+ goto free_workspace;
+
+ amvdec_write_dos(core, POWER_CTL_VLD, BIT(4));
+ amvdec_write_dos(core, MREG_CO_MV_START,
+ mpeg12->workspace_paddr + WORKSPACE_OFFSET);
+
+ amvdec_write_dos(core, MPEG1_2_REG, 0);
+ amvdec_write_dos(core, PSCALE_CTRL, 0);
+ amvdec_write_dos(core, PIC_HEAD_INFO, 0x380);
+ amvdec_write_dos(core, M4_CONTROL_REG, 0);
+ amvdec_write_dos(core, MREG_BUFFERIN, 0);
+ amvdec_write_dos(core, MREG_BUFFEROUT, 0);
+ amvdec_write_dos(core, MREG_CMD, (sess->width << 16) | sess->height);
+ amvdec_write_dos(core, MREG_ERROR_COUNT, 0);
+ amvdec_write_dos(core, MREG_FATAL_ERROR, 0);
+ amvdec_write_dos(core, MREG_WAIT_BUFFER, 0);
+
+ sess->keyframe_found = 1;
+ sess->priv = mpeg12;
+
+ return 0;
+
+free_workspace:
+ dma_free_coherent(core->dev, SIZE_WORKSPACE, mpeg12->workspace_vaddr,
+ mpeg12->workspace_paddr);
+free_mpeg12:
+ kfree(mpeg12);
+
+ return ret;
+}
+
+static int codec_mpeg12_stop(struct amvdec_session *sess)
+{
+ struct codec_mpeg12 *mpeg12 = sess->priv;
+ struct amvdec_core *core = sess->core;
+
+ if (mpeg12->workspace_vaddr)
+ dma_free_coherent(core->dev, SIZE_WORKSPACE,
+ mpeg12->workspace_vaddr,
+ mpeg12->workspace_paddr);
+
+ return 0;
+}
+
+static void codec_mpeg12_update_dar(struct amvdec_session *sess)
+{
+ struct amvdec_core *core = sess->core;
+ u32 seq = amvdec_read_dos(core, MREG_SEQ_INFO);
+ u32 ar = seq & MPEG2_SEQ_DAR_MASK;
+
+ switch (ar) {
+ case MPEG2_DAR_4_3:
+ amvdec_set_par_from_dar(sess, 4, 3);
+ break;
+ case MPEG2_DAR_16_9:
+ amvdec_set_par_from_dar(sess, 16, 9);
+ break;
+ case MPEG2_DAR_221_100:
+ amvdec_set_par_from_dar(sess, 221, 100);
+ break;
+ default:
+ sess->pixelaspect.numerator = 1;
+ sess->pixelaspect.denominator = 1;
+ break;
+ }
+}
+
+static irqreturn_t codec_mpeg12_threaded_isr(struct amvdec_session *sess)
+{
+ struct amvdec_core *core = sess->core;
+ u32 reg;
+ u32 pic_info;
+ u32 is_progressive;
+ u32 buffer_index;
+ u32 field = V4L2_FIELD_NONE;
+ u32 offset;
+
+ amvdec_write_dos(core, ASSIST_MBOX1_CLR_REG, 1);
+ reg = amvdec_read_dos(core, MREG_FATAL_ERROR);
+ if (reg == 1) {
+ dev_err(core->dev, "MPEG1/2 fatal error\n");
+ amvdec_abort(sess);
+ return IRQ_HANDLED;
+ }
+
+ reg = amvdec_read_dos(core, MREG_BUFFEROUT);
+ if (!reg)
+ return IRQ_HANDLED;
+
+ /* Unclear what this means */
+ if ((reg & GENMASK(23, 17)) == GENMASK(23, 17))
+ goto end;
+
+ pic_info = amvdec_read_dos(core, MREG_PIC_INFO);
+ is_progressive = pic_info & PICINFO_PROG;
+
+ if (!is_progressive)
+ field = (pic_info & PICINFO_TOP_FIRST) ?
+ V4L2_FIELD_INTERLACED_TB :
+ V4L2_FIELD_INTERLACED_BT;
+
+ codec_mpeg12_update_dar(sess);
+ buffer_index = ((reg & 0xf) - 1) & 7;
+ offset = amvdec_read_dos(core, MREG_FRAME_OFFSET);
+ amvdec_dst_buf_done_idx(sess, buffer_index, offset, field);
+
+end:
+ amvdec_write_dos(core, MREG_BUFFEROUT, 0);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t codec_mpeg12_isr(struct amvdec_session *sess)
+{
+ return IRQ_WAKE_THREAD;
+}
+
+struct amvdec_codec_ops codec_mpeg12_ops = {
+ .start = codec_mpeg12_start,
+ .stop = codec_mpeg12_stop,
+ .isr = codec_mpeg12_isr,
+ .threaded_isr = codec_mpeg12_threaded_isr,
+ .can_recycle = codec_mpeg12_can_recycle,
+ .recycle = codec_mpeg12_recycle,
+ .eos_sequence = codec_mpeg12_eos_sequence,
+};
diff --git a/drivers/staging/media/meson/vdec/codec_mpeg12.h b/drivers/staging/media/meson/vdec/codec_mpeg12.h
new file mode 100644
index 000000000000..43cab5f39ca0
--- /dev/null
+++ b/drivers/staging/media/meson/vdec/codec_mpeg12.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright (C) 2018 BayLibre, SAS
+ * Author: Maxime Jourdan <mjourdan@baylibre.com>
+ */
+
+#ifndef __MESON_VDEC_CODEC_MPEG12_H_
+#define __MESON_VDEC_CODEC_MPEG12_H_
+
+#include "vdec.h"
+
+extern struct amvdec_codec_ops codec_mpeg12_ops;
+
+#endif
diff --git a/drivers/staging/media/meson/vdec/codec_vp9.c b/drivers/staging/media/meson/vdec/codec_vp9.c
new file mode 100644
index 000000000000..394df5761556
--- /dev/null
+++ b/drivers/staging/media/meson/vdec/codec_vp9.c
@@ -0,0 +1,2170 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2018 Maxime Jourdan <mjourdan@baylibre.com>
+ * Copyright (C) 2015 Amlogic, Inc. All rights reserved.
+ */
+
+#include <media/v4l2-mem2mem.h>
+#include <media/videobuf2-dma-contig.h>
+
+#include "dos_regs.h"
+#include "hevc_regs.h"
+#include "codec_vp9.h"
+#include "vdec_helpers.h"
+#include "codec_hevc_common.h"
+
+/* HEVC reg mapping */
+#define VP9_DEC_STATUS_REG HEVC_ASSIST_SCRATCH_0
+ #define VP9_10B_DECODE_SLICE 5
+ #define VP9_HEAD_PARSER_DONE 0xf0
+#define VP9_RPM_BUFFER HEVC_ASSIST_SCRATCH_1
+#define VP9_SHORT_TERM_RPS HEVC_ASSIST_SCRATCH_2
+#define VP9_ADAPT_PROB_REG HEVC_ASSIST_SCRATCH_3
+#define VP9_MMU_MAP_BUFFER HEVC_ASSIST_SCRATCH_4
+#define VP9_PPS_BUFFER HEVC_ASSIST_SCRATCH_5
+#define VP9_SAO_UP HEVC_ASSIST_SCRATCH_6
+#define VP9_STREAM_SWAP_BUFFER HEVC_ASSIST_SCRATCH_7
+#define VP9_STREAM_SWAP_BUFFER2 HEVC_ASSIST_SCRATCH_8
+#define VP9_PROB_SWAP_BUFFER HEVC_ASSIST_SCRATCH_9
+#define VP9_COUNT_SWAP_BUFFER HEVC_ASSIST_SCRATCH_A
+#define VP9_SEG_MAP_BUFFER HEVC_ASSIST_SCRATCH_B
+#define VP9_SCALELUT HEVC_ASSIST_SCRATCH_D
+#define VP9_WAIT_FLAG HEVC_ASSIST_SCRATCH_E
+#define LMEM_DUMP_ADR HEVC_ASSIST_SCRATCH_F
+#define NAL_SEARCH_CTL HEVC_ASSIST_SCRATCH_I
+#define VP9_DECODE_MODE HEVC_ASSIST_SCRATCH_J
+ #define DECODE_MODE_SINGLE 0
+#define DECODE_STOP_POS HEVC_ASSIST_SCRATCH_K
+#define HEVC_DECODE_COUNT HEVC_ASSIST_SCRATCH_M
+#define HEVC_DECODE_SIZE HEVC_ASSIST_SCRATCH_N
+
+/* VP9 Constants */
+#define LCU_SIZE 64
+#define MAX_REF_PIC_NUM 24
+#define REFS_PER_FRAME 3
+#define REF_FRAMES 8
+#define MV_MEM_UNIT 0x240
+#define ADAPT_PROB_SIZE 0xf80
+
+enum FRAME_TYPE {
+ KEY_FRAME = 0,
+ INTER_FRAME = 1,
+ FRAME_TYPES,
+};
+
+/* VP9 Workspace layout */
+#define MPRED_MV_BUF_SIZE 0x120000
+
+#define IPP_SIZE 0x4000
+#define SAO_ABV_SIZE 0x30000
+#define SAO_VB_SIZE 0x30000
+#define SH_TM_RPS_SIZE 0x800
+#define VPS_SIZE 0x800
+#define SPS_SIZE 0x800
+#define PPS_SIZE 0x2000
+#define SAO_UP_SIZE 0x2800
+#define SWAP_BUF_SIZE 0x800
+#define SWAP_BUF2_SIZE 0x800
+#define SCALELUT_SIZE 0x8000
+#define DBLK_PARA_SIZE 0x80000
+#define DBLK_DATA_SIZE 0x80000
+#define SEG_MAP_SIZE 0xd800
+#define PROB_SIZE 0x5000
+#define COUNT_SIZE 0x3000
+#define MMU_VBH_SIZE 0x5000
+#define MPRED_ABV_SIZE 0x10000
+#define MPRED_MV_SIZE (MPRED_MV_BUF_SIZE * MAX_REF_PIC_NUM)
+#define RPM_BUF_SIZE 0x100
+#define LMEM_SIZE 0x800
+
+#define IPP_OFFSET 0x00
+#define SAO_ABV_OFFSET (IPP_OFFSET + IPP_SIZE)
+#define SAO_VB_OFFSET (SAO_ABV_OFFSET + SAO_ABV_SIZE)
+#define SH_TM_RPS_OFFSET (SAO_VB_OFFSET + SAO_VB_SIZE)
+#define VPS_OFFSET (SH_TM_RPS_OFFSET + SH_TM_RPS_SIZE)
+#define SPS_OFFSET (VPS_OFFSET + VPS_SIZE)
+#define PPS_OFFSET (SPS_OFFSET + SPS_SIZE)
+#define SAO_UP_OFFSET (PPS_OFFSET + PPS_SIZE)
+#define SWAP_BUF_OFFSET (SAO_UP_OFFSET + SAO_UP_SIZE)
+#define SWAP_BUF2_OFFSET (SWAP_BUF_OFFSET + SWAP_BUF_SIZE)
+#define SCALELUT_OFFSET (SWAP_BUF2_OFFSET + SWAP_BUF2_SIZE)
+#define DBLK_PARA_OFFSET (SCALELUT_OFFSET + SCALELUT_SIZE)
+#define DBLK_DATA_OFFSET (DBLK_PARA_OFFSET + DBLK_PARA_SIZE)
+#define SEG_MAP_OFFSET (DBLK_DATA_OFFSET + DBLK_DATA_SIZE)
+#define PROB_OFFSET (SEG_MAP_OFFSET + SEG_MAP_SIZE)
+#define COUNT_OFFSET (PROB_OFFSET + PROB_SIZE)
+#define MMU_VBH_OFFSET (COUNT_OFFSET + COUNT_SIZE)
+#define MPRED_ABV_OFFSET (MMU_VBH_OFFSET + MMU_VBH_SIZE)
+#define MPRED_MV_OFFSET (MPRED_ABV_OFFSET + MPRED_ABV_SIZE)
+#define RPM_OFFSET (MPRED_MV_OFFSET + MPRED_MV_SIZE)
+#define LMEM_OFFSET (RPM_OFFSET + RPM_BUF_SIZE)
+
+#define SIZE_WORKSPACE ALIGN(LMEM_OFFSET + LMEM_SIZE, 64 * SZ_1K)
+
+#define NONE -1
+#define INTRA_FRAME 0
+#define LAST_FRAME 1
+#define GOLDEN_FRAME 2
+#define ALTREF_FRAME 3
+#define MAX_REF_FRAMES 4
+
+/*
+ * Defines, declarations, sub-functions for vp9 de-block loop
+ filter Thr/Lvl table update
+ * - struct segmentation is for loop filter only (removed something)
+ * - function "vp9_loop_filter_init" and "vp9_loop_filter_frame_init" will
+ be instantiated in C_Entry
+ * - vp9_loop_filter_init run once before decoding start
+ * - vp9_loop_filter_frame_init run before every frame decoding start
+ * - set video format to VP9 is in vp9_loop_filter_init
+ */
+#define MAX_LOOP_FILTER 63
+#define MAX_REF_LF_DELTAS 4
+#define MAX_MODE_LF_DELTAS 2
+#define SEGMENT_DELTADATA 0
+#define SEGMENT_ABSDATA 1
+#define MAX_SEGMENTS 8
+
+/* VP9 PROB processing defines */
+#define VP9_PARTITION_START 0
+#define VP9_PARTITION_SIZE_STEP (3 * 4)
+#define VP9_PARTITION_ONE_SIZE (4 * VP9_PARTITION_SIZE_STEP)
+#define VP9_PARTITION_KEY_START 0
+#define VP9_PARTITION_P_START VP9_PARTITION_ONE_SIZE
+#define VP9_PARTITION_SIZE (2 * VP9_PARTITION_ONE_SIZE)
+#define VP9_SKIP_START (VP9_PARTITION_START + VP9_PARTITION_SIZE)
+#define VP9_SKIP_SIZE 4 /* only use 3*/
+#define VP9_TX_MODE_START (VP9_SKIP_START + VP9_SKIP_SIZE)
+#define VP9_TX_MODE_8_0_OFFSET 0
+#define VP9_TX_MODE_8_1_OFFSET 1
+#define VP9_TX_MODE_16_0_OFFSET 2
+#define VP9_TX_MODE_16_1_OFFSET 4
+#define VP9_TX_MODE_32_0_OFFSET 6
+#define VP9_TX_MODE_32_1_OFFSET 9
+#define VP9_TX_MODE_SIZE 12
+#define VP9_COEF_START (VP9_TX_MODE_START + VP9_TX_MODE_SIZE)
+#define VP9_COEF_BAND_0_OFFSET 0
+#define VP9_COEF_BAND_1_OFFSET (VP9_COEF_BAND_0_OFFSET + 3 * 3 + 1)
+#define VP9_COEF_BAND_2_OFFSET (VP9_COEF_BAND_1_OFFSET + 6 * 3)
+#define VP9_COEF_BAND_3_OFFSET (VP9_COEF_BAND_2_OFFSET + 6 * 3)
+#define VP9_COEF_BAND_4_OFFSET (VP9_COEF_BAND_3_OFFSET + 6 * 3)
+#define VP9_COEF_BAND_5_OFFSET (VP9_COEF_BAND_4_OFFSET + 6 * 3)
+#define VP9_COEF_SIZE_ONE_SET 100 /* ((3 + 5 * 6) * 3 + 1 padding)*/
+#define VP9_COEF_4X4_START (VP9_COEF_START + 0 * VP9_COEF_SIZE_ONE_SET)
+#define VP9_COEF_8X8_START (VP9_COEF_START + 4 * VP9_COEF_SIZE_ONE_SET)
+#define VP9_COEF_16X16_START (VP9_COEF_START + 8 * VP9_COEF_SIZE_ONE_SET)
+#define VP9_COEF_32X32_START (VP9_COEF_START + 12 * VP9_COEF_SIZE_ONE_SET)
+#define VP9_COEF_SIZE_PLANE (2 * VP9_COEF_SIZE_ONE_SET)
+#define VP9_COEF_SIZE (4 * 2 * 2 * VP9_COEF_SIZE_ONE_SET)
+#define VP9_INTER_MODE_START (VP9_COEF_START + VP9_COEF_SIZE)
+#define VP9_INTER_MODE_SIZE 24 /* only use 21 (# * 7)*/
+#define VP9_INTERP_START (VP9_INTER_MODE_START + VP9_INTER_MODE_SIZE)
+#define VP9_INTERP_SIZE 8
+#define VP9_INTRA_INTER_START (VP9_INTERP_START + VP9_INTERP_SIZE)
+#define VP9_INTRA_INTER_SIZE 4
+#define VP9_INTERP_INTRA_INTER_START VP9_INTERP_START
+#define VP9_INTERP_INTRA_INTER_SIZE (VP9_INTERP_SIZE + VP9_INTRA_INTER_SIZE)
+#define VP9_COMP_INTER_START \
+ (VP9_INTERP_INTRA_INTER_START + VP9_INTERP_INTRA_INTER_SIZE)
+#define VP9_COMP_INTER_SIZE 5
+#define VP9_COMP_REF_START (VP9_COMP_INTER_START + VP9_COMP_INTER_SIZE)
+#define VP9_COMP_REF_SIZE 5
+#define VP9_SINGLE_REF_START (VP9_COMP_REF_START + VP9_COMP_REF_SIZE)
+#define VP9_SINGLE_REF_SIZE 10
+#define VP9_REF_MODE_START VP9_COMP_INTER_START
+#define VP9_REF_MODE_SIZE \
+ (VP9_COMP_INTER_SIZE + VP9_COMP_REF_SIZE + VP9_SINGLE_REF_SIZE)
+#define VP9_IF_Y_MODE_START (VP9_REF_MODE_START + VP9_REF_MODE_SIZE)
+#define VP9_IF_Y_MODE_SIZE 36
+#define VP9_IF_UV_MODE_START (VP9_IF_Y_MODE_START + VP9_IF_Y_MODE_SIZE)
+#define VP9_IF_UV_MODE_SIZE 92 /* only use 90*/
+#define VP9_MV_JOINTS_START (VP9_IF_UV_MODE_START + VP9_IF_UV_MODE_SIZE)
+#define VP9_MV_JOINTS_SIZE 3
+#define VP9_MV_SIGN_0_START (VP9_MV_JOINTS_START + VP9_MV_JOINTS_SIZE)
+#define VP9_MV_SIGN_0_SIZE 1
+#define VP9_MV_CLASSES_0_START (VP9_MV_SIGN_0_START + VP9_MV_SIGN_0_SIZE)
+#define VP9_MV_CLASSES_0_SIZE 10
+#define VP9_MV_CLASS0_0_START \
+ (VP9_MV_CLASSES_0_START + VP9_MV_CLASSES_0_SIZE)
+#define VP9_MV_CLASS0_0_SIZE 1
+#define VP9_MV_BITS_0_START (VP9_MV_CLASS0_0_START + VP9_MV_CLASS0_0_SIZE)
+#define VP9_MV_BITS_0_SIZE 10
+#define VP9_MV_SIGN_1_START (VP9_MV_BITS_0_START + VP9_MV_BITS_0_SIZE)
+#define VP9_MV_SIGN_1_SIZE 1
+#define VP9_MV_CLASSES_1_START \
+ (VP9_MV_SIGN_1_START + VP9_MV_SIGN_1_SIZE)
+#define VP9_MV_CLASSES_1_SIZE 10
+#define VP9_MV_CLASS0_1_START \
+ (VP9_MV_CLASSES_1_START + VP9_MV_CLASSES_1_SIZE)
+#define VP9_MV_CLASS0_1_SIZE 1
+#define VP9_MV_BITS_1_START \
+ (VP9_MV_CLASS0_1_START + VP9_MV_CLASS0_1_SIZE)
+#define VP9_MV_BITS_1_SIZE 10
+#define VP9_MV_CLASS0_FP_0_START \
+ (VP9_MV_BITS_1_START + VP9_MV_BITS_1_SIZE)
+#define VP9_MV_CLASS0_FP_0_SIZE 9
+#define VP9_MV_CLASS0_FP_1_START \
+ (VP9_MV_CLASS0_FP_0_START + VP9_MV_CLASS0_FP_0_SIZE)
+#define VP9_MV_CLASS0_FP_1_SIZE 9
+#define VP9_MV_CLASS0_HP_0_START \
+ (VP9_MV_CLASS0_FP_1_START + VP9_MV_CLASS0_FP_1_SIZE)
+#define VP9_MV_CLASS0_HP_0_SIZE 2
+#define VP9_MV_CLASS0_HP_1_START \
+ (VP9_MV_CLASS0_HP_0_START + VP9_MV_CLASS0_HP_0_SIZE)
+#define VP9_MV_CLASS0_HP_1_SIZE 2
+#define VP9_MV_START VP9_MV_JOINTS_START
+#define VP9_MV_SIZE 72 /*only use 69*/
+
+#define VP9_TOTAL_SIZE (VP9_MV_START + VP9_MV_SIZE)
+
+/* VP9 COUNT mem processing defines */
+#define VP9_COEF_COUNT_START 0
+#define VP9_COEF_COUNT_BAND_0_OFFSET 0
+#define VP9_COEF_COUNT_BAND_1_OFFSET \
+ (VP9_COEF_COUNT_BAND_0_OFFSET + 3 * 5)
+#define VP9_COEF_COUNT_BAND_2_OFFSET \
+ (VP9_COEF_COUNT_BAND_1_OFFSET + 6 * 5)
+#define VP9_COEF_COUNT_BAND_3_OFFSET \
+ (VP9_COEF_COUNT_BAND_2_OFFSET + 6 * 5)
+#define VP9_COEF_COUNT_BAND_4_OFFSET \
+ (VP9_COEF_COUNT_BAND_3_OFFSET + 6 * 5)
+#define VP9_COEF_COUNT_BAND_5_OFFSET \
+ (VP9_COEF_COUNT_BAND_4_OFFSET + 6 * 5)
+#define VP9_COEF_COUNT_SIZE_ONE_SET 165 /* ((3 + 5 * 6) * 5 */
+#define VP9_COEF_COUNT_4X4_START \
+ (VP9_COEF_COUNT_START + 0 * VP9_COEF_COUNT_SIZE_ONE_SET)
+#define VP9_COEF_COUNT_8X8_START \
+ (VP9_COEF_COUNT_START + 4 * VP9_COEF_COUNT_SIZE_ONE_SET)
+#define VP9_COEF_COUNT_16X16_START \
+ (VP9_COEF_COUNT_START + 8 * VP9_COEF_COUNT_SIZE_ONE_SET)
+#define VP9_COEF_COUNT_32X32_START \
+ (VP9_COEF_COUNT_START + 12 * VP9_COEF_COUNT_SIZE_ONE_SET)
+#define VP9_COEF_COUNT_SIZE_PLANE (2 * VP9_COEF_COUNT_SIZE_ONE_SET)
+#define VP9_COEF_COUNT_SIZE (4 * 2 * 2 * VP9_COEF_COUNT_SIZE_ONE_SET)
+
+#define VP9_INTRA_INTER_COUNT_START \
+ (VP9_COEF_COUNT_START + VP9_COEF_COUNT_SIZE)
+#define VP9_INTRA_INTER_COUNT_SIZE (4 * 2)
+#define VP9_COMP_INTER_COUNT_START \
+ (VP9_INTRA_INTER_COUNT_START + VP9_INTRA_INTER_COUNT_SIZE)
+#define VP9_COMP_INTER_COUNT_SIZE (5 * 2)
+#define VP9_COMP_REF_COUNT_START \
+ (VP9_COMP_INTER_COUNT_START + VP9_COMP_INTER_COUNT_SIZE)
+#define VP9_COMP_REF_COUNT_SIZE (5 * 2)
+#define VP9_SINGLE_REF_COUNT_START \
+ (VP9_COMP_REF_COUNT_START + VP9_COMP_REF_COUNT_SIZE)
+#define VP9_SINGLE_REF_COUNT_SIZE (10 * 2)
+#define VP9_TX_MODE_COUNT_START \
+ (VP9_SINGLE_REF_COUNT_START + VP9_SINGLE_REF_COUNT_SIZE)
+#define VP9_TX_MODE_COUNT_SIZE (12 * 2)
+#define VP9_SKIP_COUNT_START \
+ (VP9_TX_MODE_COUNT_START + VP9_TX_MODE_COUNT_SIZE)
+#define VP9_SKIP_COUNT_SIZE (3 * 2)
+#define VP9_MV_SIGN_0_COUNT_START \
+ (VP9_SKIP_COUNT_START + VP9_SKIP_COUNT_SIZE)
+#define VP9_MV_SIGN_0_COUNT_SIZE (1 * 2)
+#define VP9_MV_SIGN_1_COUNT_START \
+ (VP9_MV_SIGN_0_COUNT_START + VP9_MV_SIGN_0_COUNT_SIZE)
+#define VP9_MV_SIGN_1_COUNT_SIZE (1 * 2)
+#define VP9_MV_BITS_0_COUNT_START \
+ (VP9_MV_SIGN_1_COUNT_START + VP9_MV_SIGN_1_COUNT_SIZE)
+#define VP9_MV_BITS_0_COUNT_SIZE (10 * 2)
+#define VP9_MV_BITS_1_COUNT_START \
+ (VP9_MV_BITS_0_COUNT_START + VP9_MV_BITS_0_COUNT_SIZE)
+#define VP9_MV_BITS_1_COUNT_SIZE (10 * 2)
+#define VP9_MV_CLASS0_HP_0_COUNT_START \
+ (VP9_MV_BITS_1_COUNT_START + VP9_MV_BITS_1_COUNT_SIZE)
+#define VP9_MV_CLASS0_HP_0_COUNT_SIZE (2 * 2)
+#define VP9_MV_CLASS0_HP_1_COUNT_START \
+ (VP9_MV_CLASS0_HP_0_COUNT_START + VP9_MV_CLASS0_HP_0_COUNT_SIZE)
+#define VP9_MV_CLASS0_HP_1_COUNT_SIZE (2 * 2)
+
+/* Start merge_tree */
+#define VP9_INTER_MODE_COUNT_START \
+ (VP9_MV_CLASS0_HP_1_COUNT_START + VP9_MV_CLASS0_HP_1_COUNT_SIZE)
+#define VP9_INTER_MODE_COUNT_SIZE (7 * 4)
+#define VP9_IF_Y_MODE_COUNT_START \
+ (VP9_INTER_MODE_COUNT_START + VP9_INTER_MODE_COUNT_SIZE)
+#define VP9_IF_Y_MODE_COUNT_SIZE (10 * 4)
+#define VP9_IF_UV_MODE_COUNT_START \
+ (VP9_IF_Y_MODE_COUNT_START + VP9_IF_Y_MODE_COUNT_SIZE)
+#define VP9_IF_UV_MODE_COUNT_SIZE (10 * 10)
+#define VP9_PARTITION_P_COUNT_START \
+ (VP9_IF_UV_MODE_COUNT_START + VP9_IF_UV_MODE_COUNT_SIZE)
+#define VP9_PARTITION_P_COUNT_SIZE (4 * 4 * 4)
+#define VP9_INTERP_COUNT_START \
+ (VP9_PARTITION_P_COUNT_START + VP9_PARTITION_P_COUNT_SIZE)
+#define VP9_INTERP_COUNT_SIZE (4 * 3)
+#define VP9_MV_JOINTS_COUNT_START \
+ (VP9_INTERP_COUNT_START + VP9_INTERP_COUNT_SIZE)
+#define VP9_MV_JOINTS_COUNT_SIZE (1 * 4)
+#define VP9_MV_CLASSES_0_COUNT_START \
+ (VP9_MV_JOINTS_COUNT_START + VP9_MV_JOINTS_COUNT_SIZE)
+#define VP9_MV_CLASSES_0_COUNT_SIZE (1 * 11)
+#define VP9_MV_CLASS0_0_COUNT_START \
+ (VP9_MV_CLASSES_0_COUNT_START + VP9_MV_CLASSES_0_COUNT_SIZE)
+#define VP9_MV_CLASS0_0_COUNT_SIZE (1 * 2)
+#define VP9_MV_CLASSES_1_COUNT_START \
+ (VP9_MV_CLASS0_0_COUNT_START + VP9_MV_CLASS0_0_COUNT_SIZE)
+#define VP9_MV_CLASSES_1_COUNT_SIZE (1 * 11)
+#define VP9_MV_CLASS0_1_COUNT_START \
+ (VP9_MV_CLASSES_1_COUNT_START + VP9_MV_CLASSES_1_COUNT_SIZE)
+#define VP9_MV_CLASS0_1_COUNT_SIZE (1 * 2)
+#define VP9_MV_CLASS0_FP_0_COUNT_START \
+ (VP9_MV_CLASS0_1_COUNT_START + VP9_MV_CLASS0_1_COUNT_SIZE)
+#define VP9_MV_CLASS0_FP_0_COUNT_SIZE (3 * 4)
+#define VP9_MV_CLASS0_FP_1_COUNT_START \
+ (VP9_MV_CLASS0_FP_0_COUNT_START + VP9_MV_CLASS0_FP_0_COUNT_SIZE)
+#define VP9_MV_CLASS0_FP_1_COUNT_SIZE (3 * 4)
+
+#define DC_PRED 0 /* Average of above and left pixels */
+#define V_PRED 1 /* Vertical */
+#define H_PRED 2 /* Horizontal */
+#define D45_PRED 3 /* Directional 45 deg = round(arctan(1/1) * 180/pi) */
+#define D135_PRED 4 /* Directional 135 deg = 180 - 45 */
+#define D117_PRED 5 /* Directional 117 deg = 180 - 63 */
+#define D153_PRED 6 /* Directional 153 deg = 180 - 27 */
+#define D207_PRED 7 /* Directional 207 deg = 180 + 27 */
+#define D63_PRED 8 /* Directional 63 deg = round(arctan(2/1) * 180/pi) */
+#define TM_PRED 9 /* True-motion */
+
+/* Use a static inline to avoid possible side effect from num being reused */
+static inline int round_power_of_two(int value, int num)
+{
+ return (value + (1 << (num - 1))) >> num;
+}
+
+#define MODE_MV_COUNT_SAT 20
+static const int count_to_update_factor[MODE_MV_COUNT_SAT + 1] = {
+ 0, 6, 12, 19, 25, 32, 38, 44, 51, 57, 64,
+ 70, 76, 83, 89, 96, 102, 108, 115, 121, 128
+};
+
+union rpm_param {
+ struct {
+ u16 data[RPM_BUF_SIZE];
+ } l;
+ struct {
+ u16 profile;
+ u16 show_existing_frame;
+ u16 frame_to_show_idx;
+ u16 frame_type; /*1 bit*/
+ u16 show_frame; /*1 bit*/
+ u16 error_resilient_mode; /*1 bit*/
+ u16 intra_only; /*1 bit*/
+ u16 display_size_present; /*1 bit*/
+ u16 reset_frame_context;
+ u16 refresh_frame_flags;
+ u16 width;
+ u16 height;
+ u16 display_width;
+ u16 display_height;
+ u16 ref_info;
+ u16 same_frame_size;
+ u16 mode_ref_delta_enabled;
+ u16 ref_deltas[4];
+ u16 mode_deltas[2];
+ u16 filter_level;
+ u16 sharpness_level;
+ u16 bit_depth;
+ u16 seg_quant_info[8];
+ u16 seg_enabled;
+ u16 seg_abs_delta;
+ /* bit 15: feature enabled; bit 8, sign; bit[5:0], data */
+ u16 seg_lf_info[8];
+ } p;
+};
+
+enum SEG_LVL_FEATURES {
+ SEG_LVL_ALT_Q = 0, /* Use alternate Quantizer */
+ SEG_LVL_ALT_LF = 1, /* Use alternate loop filter value */
+ SEG_LVL_REF_FRAME = 2, /* Optional Segment reference frame */
+ SEG_LVL_SKIP = 3, /* Optional Segment (0,0) + skip mode */
+ SEG_LVL_MAX = 4 /* Number of features supported */
+};
+
+struct segmentation {
+ u8 enabled;
+ u8 update_map;
+ u8 update_data;
+ u8 abs_delta;
+ u8 temporal_update;
+ s16 feature_data[MAX_SEGMENTS][SEG_LVL_MAX];
+ unsigned int feature_mask[MAX_SEGMENTS];
+};
+
+struct loop_filter_thresh {
+ u8 mblim;
+ u8 lim;
+ u8 hev_thr;
+};
+
+struct loop_filter_info_n {
+ struct loop_filter_thresh lfthr[MAX_LOOP_FILTER + 1];
+ u8 lvl[MAX_SEGMENTS][MAX_REF_FRAMES][MAX_MODE_LF_DELTAS];
+};
+
+struct loopfilter {
+ int filter_level;
+
+ int sharpness_level;
+ int last_sharpness_level;
+
+ u8 mode_ref_delta_enabled;
+ u8 mode_ref_delta_update;
+
+ /*0 = Intra, Last, GF, ARF*/
+ signed char ref_deltas[MAX_REF_LF_DELTAS];
+ signed char last_ref_deltas[MAX_REF_LF_DELTAS];
+
+ /*0 = ZERO_MV, MV*/
+ signed char mode_deltas[MAX_MODE_LF_DELTAS];
+ signed char last_mode_deltas[MAX_MODE_LF_DELTAS];
+};
+
+struct vp9_frame {
+ struct list_head list;
+ struct vb2_v4l2_buffer *vbuf;
+ int index;
+ int intra_only;
+ int show;
+ int type;
+ int done;
+ unsigned int width;
+ unsigned int height;
+};
+
+struct codec_vp9 {
+ /* VP9 context lock */
+ struct mutex lock;
+
+ /* Common part with the HEVC decoder */
+ struct codec_hevc_common common;
+
+ /* Buffer for the VP9 Workspace */
+ void *workspace_vaddr;
+ dma_addr_t workspace_paddr;
+
+ /* Contains many information parsed from the bitstream */
+ union rpm_param rpm_param;
+
+ /* Whether we detected the bitstream as 10-bit */
+ int is_10bit;
+
+ /* Coded resolution reported by the hardware */
+ u32 width, height;
+
+ /* All ref frames used by the HW at a given time */
+ struct list_head ref_frames_list;
+ u32 frames_num;
+
+ /* In case of downsampling (decoding with FBC but outputting in NV12M),
+ * we need to allocate additional buffers for FBC.
+ */
+ void *fbc_buffer_vaddr[MAX_REF_PIC_NUM];
+ dma_addr_t fbc_buffer_paddr[MAX_REF_PIC_NUM];
+
+ int ref_frame_map[REF_FRAMES];
+ int next_ref_frame_map[REF_FRAMES];
+ struct vp9_frame *frame_refs[REFS_PER_FRAME];
+
+ u32 lcu_total;
+
+ /* loop filter */
+ int default_filt_lvl;
+ struct loop_filter_info_n lfi;
+ struct loopfilter lf;
+ struct segmentation seg_4lf;
+
+ struct vp9_frame *cur_frame;
+ struct vp9_frame *prev_frame;
+};
+
+static int div_r32(s64 m, int n)
+{
+ s64 qu = div_s64(m, n);
+
+ return (int)qu;
+}
+
+static int clip_prob(int p)
+{
+ return clamp_val(p, 1, 255);
+}
+
+static int segfeature_active(struct segmentation *seg, int segment_id,
+ enum SEG_LVL_FEATURES feature_id)
+{
+ return seg->enabled &&
+ (seg->feature_mask[segment_id] & (1 << feature_id));
+}
+
+static int get_segdata(struct segmentation *seg, int segment_id,
+ enum SEG_LVL_FEATURES feature_id)
+{
+ return seg->feature_data[segment_id][feature_id];
+}
+
+static void vp9_update_sharpness(struct loop_filter_info_n *lfi,
+ int sharpness_lvl)
+{
+ int lvl;
+
+ /* For each possible value for the loop filter fill out limits*/
+ for (lvl = 0; lvl <= MAX_LOOP_FILTER; lvl++) {
+ /* Set loop filter parameters that control sharpness.*/
+ int block_inside_limit = lvl >> ((sharpness_lvl > 0) +
+ (sharpness_lvl > 4));
+
+ if (sharpness_lvl > 0) {
+ if (block_inside_limit > (9 - sharpness_lvl))
+ block_inside_limit = (9 - sharpness_lvl);
+ }
+
+ if (block_inside_limit < 1)
+ block_inside_limit = 1;
+
+ lfi->lfthr[lvl].lim = (u8)block_inside_limit;
+ lfi->lfthr[lvl].mblim = (u8)(2 * (lvl + 2) +
+ block_inside_limit);
+ }
+}
+
+/* Instantiate this function once when decode is started */
+static void
+vp9_loop_filter_init(struct amvdec_core *core, struct codec_vp9 *vp9)
+{
+ struct loop_filter_info_n *lfi = &vp9->lfi;
+ struct loopfilter *lf = &vp9->lf;
+ struct segmentation *seg_4lf = &vp9->seg_4lf;
+ int i;
+
+ memset(lfi, 0, sizeof(struct loop_filter_info_n));
+ memset(lf, 0, sizeof(struct loopfilter));
+ memset(seg_4lf, 0, sizeof(struct segmentation));
+ lf->sharpness_level = 0;
+ vp9_update_sharpness(lfi, lf->sharpness_level);
+ lf->last_sharpness_level = lf->sharpness_level;
+
+ for (i = 0; i < 32; i++) {
+ unsigned int thr;
+
+ thr = ((lfi->lfthr[i * 2 + 1].lim & 0x3f) << 8) |
+ (lfi->lfthr[i * 2 + 1].mblim & 0xff);
+ thr = (thr << 16) | ((lfi->lfthr[i * 2].lim & 0x3f) << 8) |
+ (lfi->lfthr[i * 2].mblim & 0xff);
+
+ amvdec_write_dos(core, HEVC_DBLK_CFG9, thr);
+ }
+
+ if (core->platform->revision >= VDEC_REVISION_SM1)
+ amvdec_write_dos(core, HEVC_DBLK_CFGB,
+ (0x3 << 14) | /* dw fifo thres r and b */
+ (0x3 << 12) | /* dw fifo thres r or b */
+ (0x3 << 10) | /* dw fifo thres not r/b */
+ BIT(0)); /* VP9 video format */
+ else if (core->platform->revision >= VDEC_REVISION_G12A)
+ /* VP9 video format */
+ amvdec_write_dos(core, HEVC_DBLK_CFGB, (0x54 << 8) | BIT(0));
+ else
+ amvdec_write_dos(core, HEVC_DBLK_CFGB, 0x40400001);
+}
+
+static void
+vp9_loop_filter_frame_init(struct amvdec_core *core, struct segmentation *seg,
+ struct loop_filter_info_n *lfi,
+ struct loopfilter *lf, int default_filt_lvl)
+{
+ int i;
+ int seg_id;
+
+ /*
+ * n_shift is the multiplier for lf_deltas
+ * the multiplier is:
+ * - 1 for when filter_lvl is between 0 and 31
+ * - 2 when filter_lvl is between 32 and 63
+ */
+ const int scale = 1 << (default_filt_lvl >> 5);
+
+ /* update limits if sharpness has changed */
+ if (lf->last_sharpness_level != lf->sharpness_level) {
+ vp9_update_sharpness(lfi, lf->sharpness_level);
+ lf->last_sharpness_level = lf->sharpness_level;
+
+ /* Write to register */
+ for (i = 0; i < 32; i++) {
+ unsigned int thr;
+
+ thr = ((lfi->lfthr[i * 2 + 1].lim & 0x3f) << 8) |
+ (lfi->lfthr[i * 2 + 1].mblim & 0xff);
+ thr = (thr << 16) |
+ ((lfi->lfthr[i * 2].lim & 0x3f) << 8) |
+ (lfi->lfthr[i * 2].mblim & 0xff);
+
+ amvdec_write_dos(core, HEVC_DBLK_CFG9, thr);
+ }
+ }
+
+ for (seg_id = 0; seg_id < MAX_SEGMENTS; seg_id++) {
+ int lvl_seg = default_filt_lvl;
+
+ if (segfeature_active(seg, seg_id, SEG_LVL_ALT_LF)) {
+ const int data = get_segdata(seg, seg_id,
+ SEG_LVL_ALT_LF);
+ lvl_seg = clamp_t(int,
+ seg->abs_delta == SEGMENT_ABSDATA ?
+ data : default_filt_lvl + data,
+ 0, MAX_LOOP_FILTER);
+ }
+
+ if (!lf->mode_ref_delta_enabled) {
+ /*
+ * We could get rid of this if we assume that deltas
+ * are set to zero when not in use.
+ * encoder always uses deltas
+ */
+ memset(lfi->lvl[seg_id], lvl_seg,
+ sizeof(lfi->lvl[seg_id]));
+ } else {
+ int ref, mode;
+ const int intra_lvl =
+ lvl_seg + lf->ref_deltas[INTRA_FRAME] * scale;
+ lfi->lvl[seg_id][INTRA_FRAME][0] =
+ clamp_val(intra_lvl, 0, MAX_LOOP_FILTER);
+
+ for (ref = LAST_FRAME; ref < MAX_REF_FRAMES; ++ref) {
+ for (mode = 0; mode < MAX_MODE_LF_DELTAS;
+ ++mode) {
+ const int inter_lvl =
+ lvl_seg +
+ lf->ref_deltas[ref] * scale +
+ lf->mode_deltas[mode] * scale;
+ lfi->lvl[seg_id][ref][mode] =
+ clamp_val(inter_lvl, 0,
+ MAX_LOOP_FILTER);
+ }
+ }
+ }
+ }
+
+ for (i = 0; i < 16; i++) {
+ unsigned int level;
+
+ level = ((lfi->lvl[i >> 1][3][i & 1] & 0x3f) << 24) |
+ ((lfi->lvl[i >> 1][2][i & 1] & 0x3f) << 16) |
+ ((lfi->lvl[i >> 1][1][i & 1] & 0x3f) << 8) |
+ (lfi->lvl[i >> 1][0][i & 1] & 0x3f);
+ if (!default_filt_lvl)
+ level = 0;
+
+ amvdec_write_dos(core, HEVC_DBLK_CFGA, level);
+ }
+}
+
+static void codec_vp9_flush_output(struct amvdec_session *sess)
+{
+ struct codec_vp9 *vp9 = sess->priv;
+ struct vp9_frame *tmp, *n;
+
+ mutex_lock(&vp9->lock);
+ list_for_each_entry_safe(tmp, n, &vp9->ref_frames_list, list) {
+ if (!tmp->done) {
+ if (tmp->show)
+ amvdec_dst_buf_done(sess, tmp->vbuf,
+ V4L2_FIELD_NONE);
+ else
+ v4l2_m2m_buf_queue(sess->m2m_ctx, tmp->vbuf);
+
+ vp9->frames_num--;
+ }
+
+ list_del(&tmp->list);
+ kfree(tmp);
+ }
+ mutex_unlock(&vp9->lock);
+}
+
+static u32 codec_vp9_num_pending_bufs(struct amvdec_session *sess)
+{
+ struct codec_vp9 *vp9 = sess->priv;
+
+ if (!vp9)
+ return 0;
+
+ return vp9->frames_num;
+}
+
+static int codec_vp9_alloc_workspace(struct amvdec_core *core,
+ struct codec_vp9 *vp9)
+{
+ /* Allocate some memory for the VP9 decoder's state */
+ vp9->workspace_vaddr = dma_alloc_coherent(core->dev, SIZE_WORKSPACE,
+ &vp9->workspace_paddr,
+ GFP_KERNEL);
+ if (!vp9->workspace_vaddr) {
+ dev_err(core->dev, "Failed to allocate VP9 Workspace\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void codec_vp9_setup_workspace(struct amvdec_session *sess,
+ struct codec_vp9 *vp9)
+{
+ struct amvdec_core *core = sess->core;
+ u32 revision = core->platform->revision;
+ dma_addr_t wkaddr = vp9->workspace_paddr;
+
+ amvdec_write_dos(core, HEVCD_IPP_LINEBUFF_BASE, wkaddr + IPP_OFFSET);
+ amvdec_write_dos(core, VP9_RPM_BUFFER, wkaddr + RPM_OFFSET);
+ amvdec_write_dos(core, VP9_SHORT_TERM_RPS, wkaddr + SH_TM_RPS_OFFSET);
+ amvdec_write_dos(core, VP9_PPS_BUFFER, wkaddr + PPS_OFFSET);
+ amvdec_write_dos(core, VP9_SAO_UP, wkaddr + SAO_UP_OFFSET);
+
+ amvdec_write_dos(core, VP9_STREAM_SWAP_BUFFER,
+ wkaddr + SWAP_BUF_OFFSET);
+ amvdec_write_dos(core, VP9_STREAM_SWAP_BUFFER2,
+ wkaddr + SWAP_BUF2_OFFSET);
+ amvdec_write_dos(core, VP9_SCALELUT, wkaddr + SCALELUT_OFFSET);
+
+ if (core->platform->revision >= VDEC_REVISION_G12A)
+ amvdec_write_dos(core, HEVC_DBLK_CFGE,
+ wkaddr + DBLK_PARA_OFFSET);
+
+ amvdec_write_dos(core, HEVC_DBLK_CFG4, wkaddr + DBLK_PARA_OFFSET);
+ amvdec_write_dos(core, HEVC_DBLK_CFG5, wkaddr + DBLK_DATA_OFFSET);
+ amvdec_write_dos(core, VP9_SEG_MAP_BUFFER, wkaddr + SEG_MAP_OFFSET);
+ amvdec_write_dos(core, VP9_PROB_SWAP_BUFFER, wkaddr + PROB_OFFSET);
+ amvdec_write_dos(core, VP9_COUNT_SWAP_BUFFER, wkaddr + COUNT_OFFSET);
+ amvdec_write_dos(core, LMEM_DUMP_ADR, wkaddr + LMEM_OFFSET);
+
+ if (codec_hevc_use_mmu(revision, sess->pixfmt_cap, vp9->is_10bit)) {
+ amvdec_write_dos(core, HEVC_SAO_MMU_VH0_ADDR,
+ wkaddr + MMU_VBH_OFFSET);
+ amvdec_write_dos(core, HEVC_SAO_MMU_VH1_ADDR,
+ wkaddr + MMU_VBH_OFFSET + (MMU_VBH_SIZE / 2));
+
+ if (revision >= VDEC_REVISION_G12A)
+ amvdec_write_dos(core, HEVC_ASSIST_MMU_MAP_ADDR,
+ vp9->common.mmu_map_paddr);
+ else
+ amvdec_write_dos(core, VP9_MMU_MAP_BUFFER,
+ vp9->common.mmu_map_paddr);
+ }
+}
+
+static int codec_vp9_start(struct amvdec_session *sess)
+{
+ struct amvdec_core *core = sess->core;
+ struct codec_vp9 *vp9;
+ u32 val;
+ int i;
+ int ret;
+
+ vp9 = kzalloc(sizeof(*vp9), GFP_KERNEL);
+ if (!vp9)
+ return -ENOMEM;
+
+ ret = codec_vp9_alloc_workspace(core, vp9);
+ if (ret)
+ goto free_vp9;
+
+ codec_vp9_setup_workspace(sess, vp9);
+ amvdec_write_dos_bits(core, HEVC_STREAM_CONTROL, BIT(0));
+ /* stream_fifo_hole */
+ if (core->platform->revision >= VDEC_REVISION_G12A)
+ amvdec_write_dos_bits(core, HEVC_STREAM_FIFO_CTL, BIT(29));
+
+ val = amvdec_read_dos(core, HEVC_PARSER_INT_CONTROL) & 0x7fffffff;
+ val |= (3 << 29) | BIT(24) | BIT(22) | BIT(7) | BIT(4) | BIT(0);
+ amvdec_write_dos(core, HEVC_PARSER_INT_CONTROL, val);
+ amvdec_write_dos_bits(core, HEVC_SHIFT_STATUS, BIT(0));
+ amvdec_write_dos(core, HEVC_SHIFT_CONTROL, BIT(10) | BIT(9) |
+ (3 << 6) | BIT(5) | BIT(2) | BIT(1) | BIT(0));
+ amvdec_write_dos(core, HEVC_CABAC_CONTROL, BIT(0));
+ amvdec_write_dos(core, HEVC_PARSER_CORE_CONTROL, BIT(0));
+ amvdec_write_dos(core, HEVC_SHIFT_STARTCODE, 0x00000001);
+
+ amvdec_write_dos(core, VP9_DEC_STATUS_REG, 0);
+
+ amvdec_write_dos(core, HEVC_PARSER_CMD_WRITE, BIT(16));
+ for (i = 0; i < ARRAY_SIZE(vdec_hevc_parser_cmd); ++i)
+ amvdec_write_dos(core, HEVC_PARSER_CMD_WRITE,
+ vdec_hevc_parser_cmd[i]);
+
+ amvdec_write_dos(core, HEVC_PARSER_CMD_SKIP_0, PARSER_CMD_SKIP_CFG_0);
+ amvdec_write_dos(core, HEVC_PARSER_CMD_SKIP_1, PARSER_CMD_SKIP_CFG_1);
+ amvdec_write_dos(core, HEVC_PARSER_CMD_SKIP_2, PARSER_CMD_SKIP_CFG_2);
+ amvdec_write_dos(core, HEVC_PARSER_IF_CONTROL,
+ BIT(5) | BIT(2) | BIT(0));
+
+ amvdec_write_dos(core, HEVCD_IPP_TOP_CNTL, BIT(0));
+ amvdec_write_dos(core, HEVCD_IPP_TOP_CNTL, BIT(1));
+
+ amvdec_write_dos(core, VP9_WAIT_FLAG, 1);
+
+ /* clear mailbox interrupt */
+ amvdec_write_dos(core, HEVC_ASSIST_MBOX1_CLR_REG, 1);
+ /* enable mailbox interrupt */
+ amvdec_write_dos(core, HEVC_ASSIST_MBOX1_MASK, 1);
+ /* disable PSCALE for hardware sharing */
+ amvdec_write_dos(core, HEVC_PSCALE_CTRL, 0);
+ /* Let the uCode do all the parsing */
+ amvdec_write_dos(core, NAL_SEARCH_CTL, 0x8);
+
+ amvdec_write_dos(core, DECODE_STOP_POS, 0);
+ amvdec_write_dos(core, VP9_DECODE_MODE, DECODE_MODE_SINGLE);
+
+ pr_debug("decode_count: %u; decode_size: %u\n",
+ amvdec_read_dos(core, HEVC_DECODE_COUNT),
+ amvdec_read_dos(core, HEVC_DECODE_SIZE));
+
+ vp9_loop_filter_init(core, vp9);
+
+ INIT_LIST_HEAD(&vp9->ref_frames_list);
+ mutex_init(&vp9->lock);
+ memset(&vp9->ref_frame_map, -1, sizeof(vp9->ref_frame_map));
+ memset(&vp9->next_ref_frame_map, -1, sizeof(vp9->next_ref_frame_map));
+ for (i = 0; i < REFS_PER_FRAME; ++i)
+ vp9->frame_refs[i] = NULL;
+ sess->priv = vp9;
+
+ return 0;
+
+free_vp9:
+ kfree(vp9);
+ return ret;
+}
+
+static int codec_vp9_stop(struct amvdec_session *sess)
+{
+ struct amvdec_core *core = sess->core;
+ struct codec_vp9 *vp9 = sess->priv;
+
+ mutex_lock(&vp9->lock);
+ if (vp9->workspace_vaddr)
+ dma_free_coherent(core->dev, SIZE_WORKSPACE,
+ vp9->workspace_vaddr,
+ vp9->workspace_paddr);
+
+ codec_hevc_free_fbc_buffers(sess, &vp9->common);
+ mutex_unlock(&vp9->lock);
+
+ return 0;
+}
+
+/*
+ * Program LAST & GOLDEN frames into the motion compensation reference cache
+ * controller
+ */
+static void codec_vp9_set_mcrcc(struct amvdec_session *sess)
+{
+ struct amvdec_core *core = sess->core;
+ struct codec_vp9 *vp9 = sess->priv;
+ u32 val;
+
+ /* Reset mcrcc */
+ amvdec_write_dos(core, HEVCD_MCRCC_CTL1, 0x2);
+ /* Disable on I-frame */
+ if (vp9->cur_frame->type == KEY_FRAME || vp9->cur_frame->intra_only) {
+ amvdec_write_dos(core, HEVCD_MCRCC_CTL1, 0x0);
+ return;
+ }
+
+ amvdec_write_dos(core, HEVCD_MPP_ANC_CANVAS_ACCCONFIG_ADDR, BIT(1));
+ val = amvdec_read_dos(core, HEVCD_MPP_ANC_CANVAS_DATA_ADDR) & 0xffff;
+ val |= (val << 16);
+ amvdec_write_dos(core, HEVCD_MCRCC_CTL2, val);
+ val = amvdec_read_dos(core, HEVCD_MPP_ANC_CANVAS_DATA_ADDR) & 0xffff;
+ val |= (val << 16);
+ amvdec_write_dos(core, HEVCD_MCRCC_CTL3, val);
+
+ /* Enable mcrcc progressive-mode */
+ amvdec_write_dos(core, HEVCD_MCRCC_CTL1, 0xff0);
+}
+
+static void codec_vp9_set_sao(struct amvdec_session *sess,
+ struct vb2_buffer *vb)
+{
+ struct amvdec_core *core = sess->core;
+ struct codec_vp9 *vp9 = sess->priv;
+
+ dma_addr_t buf_y_paddr;
+ dma_addr_t buf_u_v_paddr;
+ u32 val;
+
+ if (codec_hevc_use_downsample(sess->pixfmt_cap, vp9->is_10bit))
+ buf_y_paddr =
+ vp9->common.fbc_buffer_paddr[vb->index];
+ else
+ buf_y_paddr =
+ vb2_dma_contig_plane_dma_addr(vb, 0);
+
+ if (codec_hevc_use_fbc(sess->pixfmt_cap, vp9->is_10bit)) {
+ val = amvdec_read_dos(core, HEVC_SAO_CTRL5) & ~0xff0200;
+ amvdec_write_dos(core, HEVC_SAO_CTRL5, val);
+ amvdec_write_dos(core, HEVC_CM_BODY_START_ADDR, buf_y_paddr);
+ }
+
+ if (sess->pixfmt_cap == V4L2_PIX_FMT_NV12M) {
+ buf_y_paddr =
+ vb2_dma_contig_plane_dma_addr(vb, 0);
+ buf_u_v_paddr =
+ vb2_dma_contig_plane_dma_addr(vb, 1);
+ amvdec_write_dos(core, HEVC_SAO_Y_START_ADDR, buf_y_paddr);
+ amvdec_write_dos(core, HEVC_SAO_C_START_ADDR, buf_u_v_paddr);
+ amvdec_write_dos(core, HEVC_SAO_Y_WPTR, buf_y_paddr);
+ amvdec_write_dos(core, HEVC_SAO_C_WPTR, buf_u_v_paddr);
+ }
+
+ if (codec_hevc_use_mmu(core->platform->revision, sess->pixfmt_cap,
+ vp9->is_10bit)) {
+ amvdec_write_dos(core, HEVC_CM_HEADER_START_ADDR,
+ vp9->common.mmu_header_paddr[vb->index]);
+ /* use HEVC_CM_HEADER_START_ADDR */
+ amvdec_write_dos_bits(core, HEVC_SAO_CTRL5, BIT(10));
+ }
+
+ amvdec_write_dos(core, HEVC_SAO_Y_LENGTH,
+ amvdec_get_output_size(sess));
+ amvdec_write_dos(core, HEVC_SAO_C_LENGTH,
+ (amvdec_get_output_size(sess) / 2));
+
+ if (core->platform->revision >= VDEC_REVISION_G12A) {
+ amvdec_clear_dos_bits(core, HEVC_DBLK_CFGB,
+ BIT(4) | BIT(5) | BIT(8) | BIT(9));
+ /* enable first, compressed write */
+ if (codec_hevc_use_fbc(sess->pixfmt_cap, vp9->is_10bit))
+ amvdec_write_dos_bits(core, HEVC_DBLK_CFGB, BIT(8));
+
+ /* enable second, uncompressed write */
+ if (sess->pixfmt_cap == V4L2_PIX_FMT_NV12M)
+ amvdec_write_dos_bits(core, HEVC_DBLK_CFGB, BIT(9));
+
+ /* dblk pipeline mode=1 for performance */
+ if (sess->width >= 1280)
+ amvdec_write_dos_bits(core, HEVC_DBLK_CFGB, BIT(4));
+
+ pr_debug("HEVC_DBLK_CFGB: %08X\n",
+ amvdec_read_dos(core, HEVC_DBLK_CFGB));
+ }
+
+ val = amvdec_read_dos(core, HEVC_SAO_CTRL1) & ~0x3ff0;
+ val |= 0xff0; /* Set endianness for 2-bytes swaps (nv12) */
+ if (core->platform->revision < VDEC_REVISION_G12A) {
+ val &= ~0x3;
+ if (!codec_hevc_use_fbc(sess->pixfmt_cap, vp9->is_10bit))
+ val |= BIT(0); /* disable cm compression */
+ /* TOFIX: Handle Amlogic Framebuffer compression */
+ }
+
+ amvdec_write_dos(core, HEVC_SAO_CTRL1, val);
+ pr_debug("HEVC_SAO_CTRL1: %08X\n", val);
+
+ /* no downscale for NV12 */
+ val = amvdec_read_dos(core, HEVC_SAO_CTRL5) & ~0xff0000;
+ amvdec_write_dos(core, HEVC_SAO_CTRL5, val);
+
+ val = amvdec_read_dos(core, HEVCD_IPP_AXIIF_CONFIG) & ~0x30;
+ val |= 0xf;
+ val &= ~BIT(12); /* NV12 */
+ amvdec_write_dos(core, HEVCD_IPP_AXIIF_CONFIG, val);
+}
+
+static dma_addr_t codec_vp9_get_frame_mv_paddr(struct codec_vp9 *vp9,
+ struct vp9_frame *frame)
+{
+ return vp9->workspace_paddr + MPRED_MV_OFFSET +
+ (frame->index * MPRED_MV_BUF_SIZE);
+}
+
+static void codec_vp9_set_mpred_mv(struct amvdec_core *core,
+ struct codec_vp9 *vp9)
+{
+ int mpred_mv_rd_end_addr;
+ int use_prev_frame_mvs = vp9->prev_frame->width ==
+ vp9->cur_frame->width &&
+ vp9->prev_frame->height ==
+ vp9->cur_frame->height &&
+ !vp9->prev_frame->intra_only &&
+ vp9->prev_frame->show &&
+ vp9->prev_frame->type != KEY_FRAME;
+
+ amvdec_write_dos(core, HEVC_MPRED_CTRL3, 0x24122412);
+ amvdec_write_dos(core, HEVC_MPRED_ABV_START_ADDR,
+ vp9->workspace_paddr + MPRED_ABV_OFFSET);
+
+ amvdec_clear_dos_bits(core, HEVC_MPRED_CTRL4, BIT(6));
+ if (use_prev_frame_mvs)
+ amvdec_write_dos_bits(core, HEVC_MPRED_CTRL4, BIT(6));
+
+ amvdec_write_dos(core, HEVC_MPRED_MV_WR_START_ADDR,
+ codec_vp9_get_frame_mv_paddr(vp9, vp9->cur_frame));
+ amvdec_write_dos(core, HEVC_MPRED_MV_WPTR,
+ codec_vp9_get_frame_mv_paddr(vp9, vp9->cur_frame));
+
+ amvdec_write_dos(core, HEVC_MPRED_MV_RD_START_ADDR,
+ codec_vp9_get_frame_mv_paddr(vp9, vp9->prev_frame));
+ amvdec_write_dos(core, HEVC_MPRED_MV_RPTR,
+ codec_vp9_get_frame_mv_paddr(vp9, vp9->prev_frame));
+
+ mpred_mv_rd_end_addr =
+ codec_vp9_get_frame_mv_paddr(vp9, vp9->prev_frame) +
+ (vp9->lcu_total * MV_MEM_UNIT);
+ amvdec_write_dos(core, HEVC_MPRED_MV_RD_END_ADDR, mpred_mv_rd_end_addr);
+}
+
+static void codec_vp9_update_next_ref(struct codec_vp9 *vp9)
+{
+ union rpm_param *param = &vp9->rpm_param;
+ u32 buf_idx = vp9->cur_frame->index;
+ int ref_index = 0;
+ int refresh_frame_flags;
+ int mask;
+
+ refresh_frame_flags = vp9->cur_frame->type == KEY_FRAME ?
+ 0xff : param->p.refresh_frame_flags;
+
+ for (mask = refresh_frame_flags; mask; mask >>= 1) {
+ pr_debug("mask=%08X; ref_index=%d\n", mask, ref_index);
+ if (mask & 1)
+ vp9->next_ref_frame_map[ref_index] = buf_idx;
+ else
+ vp9->next_ref_frame_map[ref_index] =
+ vp9->ref_frame_map[ref_index];
+
+ ++ref_index;
+ }
+
+ for (; ref_index < REF_FRAMES; ++ref_index)
+ vp9->next_ref_frame_map[ref_index] =
+ vp9->ref_frame_map[ref_index];
+}
+
+static void codec_vp9_save_refs(struct codec_vp9 *vp9)
+{
+ union rpm_param *param = &vp9->rpm_param;
+ int i;
+
+ for (i = 0; i < REFS_PER_FRAME; ++i) {
+ const int ref = (param->p.ref_info >>
+ (((REFS_PER_FRAME - i - 1) * 4) + 1)) & 0x7;
+
+ if (vp9->ref_frame_map[ref] < 0)
+ continue;
+
+ pr_warn("%s: FIXME, would need to save ref %d\n",
+ __func__, vp9->ref_frame_map[ref]);
+ }
+}
+
+static void codec_vp9_update_ref(struct codec_vp9 *vp9)
+{
+ union rpm_param *param = &vp9->rpm_param;
+ int ref_index = 0;
+ int mask;
+ int refresh_frame_flags;
+
+ if (!vp9->cur_frame)
+ return;
+
+ refresh_frame_flags = vp9->cur_frame->type == KEY_FRAME ?
+ 0xff : param->p.refresh_frame_flags;
+
+ for (mask = refresh_frame_flags; mask; mask >>= 1) {
+ vp9->ref_frame_map[ref_index] =
+ vp9->next_ref_frame_map[ref_index];
+ ++ref_index;
+ }
+
+ if (param->p.show_existing_frame)
+ return;
+
+ for (; ref_index < REF_FRAMES; ++ref_index)
+ vp9->ref_frame_map[ref_index] =
+ vp9->next_ref_frame_map[ref_index];
+}
+
+static struct vp9_frame *codec_vp9_get_frame_by_idx(struct codec_vp9 *vp9,
+ int idx)
+{
+ struct vp9_frame *frame;
+
+ list_for_each_entry(frame, &vp9->ref_frames_list, list) {
+ if (frame->index == idx)
+ return frame;
+ }
+
+ return NULL;
+}
+
+static void codec_vp9_sync_ref(struct codec_vp9 *vp9)
+{
+ union rpm_param *param = &vp9->rpm_param;
+ int i;
+
+ for (i = 0; i < REFS_PER_FRAME; ++i) {
+ const int ref = (param->p.ref_info >>
+ (((REFS_PER_FRAME - i - 1) * 4) + 1)) & 0x7;
+ const int idx = vp9->ref_frame_map[ref];
+
+ vp9->frame_refs[i] = codec_vp9_get_frame_by_idx(vp9, idx);
+ if (!vp9->frame_refs[i])
+ pr_warn("%s: couldn't find VP9 ref %d\n", __func__,
+ idx);
+ }
+}
+
+static void codec_vp9_set_refs(struct amvdec_session *sess,
+ struct codec_vp9 *vp9)
+{
+ struct amvdec_core *core = sess->core;
+ int i;
+
+ for (i = 0; i < REFS_PER_FRAME; ++i) {
+ struct vp9_frame *frame = vp9->frame_refs[i];
+ int id_y;
+ int id_u_v;
+
+ if (!frame)
+ continue;
+
+ if (codec_hevc_use_fbc(sess->pixfmt_cap, vp9->is_10bit)) {
+ id_y = frame->index;
+ id_u_v = id_y;
+ } else {
+ id_y = frame->index * 2;
+ id_u_v = id_y + 1;
+ }
+
+ amvdec_write_dos(core, HEVCD_MPP_ANC_CANVAS_DATA_ADDR,
+ (id_u_v << 16) | (id_u_v << 8) | id_y);
+ }
+}
+
+static void codec_vp9_set_mc(struct amvdec_session *sess,
+ struct codec_vp9 *vp9)
+{
+ struct amvdec_core *core = sess->core;
+ u32 scale = 0;
+ u32 sz;
+ int i;
+
+ amvdec_write_dos(core, HEVCD_MPP_ANC_CANVAS_ACCCONFIG_ADDR, 1);
+ codec_vp9_set_refs(sess, vp9);
+ amvdec_write_dos(core, HEVCD_MPP_ANC_CANVAS_ACCCONFIG_ADDR,
+ (16 << 8) | 1);
+ codec_vp9_set_refs(sess, vp9);
+
+ amvdec_write_dos(core, VP9D_MPP_REFINFO_TBL_ACCCONFIG, BIT(2));
+ for (i = 0; i < REFS_PER_FRAME; ++i) {
+ if (!vp9->frame_refs[i])
+ continue;
+
+ if (vp9->frame_refs[i]->width != vp9->width ||
+ vp9->frame_refs[i]->height != vp9->height)
+ scale = 1;
+
+ sz = amvdec_am21c_body_size(vp9->frame_refs[i]->width,
+ vp9->frame_refs[i]->height);
+
+ amvdec_write_dos(core, VP9D_MPP_REFINFO_DATA,
+ vp9->frame_refs[i]->width);
+ amvdec_write_dos(core, VP9D_MPP_REFINFO_DATA,
+ vp9->frame_refs[i]->height);
+ amvdec_write_dos(core, VP9D_MPP_REFINFO_DATA,
+ (vp9->frame_refs[i]->width << 14) /
+ vp9->width);
+ amvdec_write_dos(core, VP9D_MPP_REFINFO_DATA,
+ (vp9->frame_refs[i]->height << 14) /
+ vp9->height);
+ amvdec_write_dos(core, VP9D_MPP_REFINFO_DATA, sz >> 5);
+ }
+
+ amvdec_write_dos(core, VP9D_MPP_REF_SCALE_ENBL, scale);
+}
+
+static struct vp9_frame *codec_vp9_get_new_frame(struct amvdec_session *sess)
+{
+ struct codec_vp9 *vp9 = sess->priv;
+ union rpm_param *param = &vp9->rpm_param;
+ struct vb2_v4l2_buffer *vbuf;
+ struct vp9_frame *new_frame;
+
+ new_frame = kzalloc(sizeof(*new_frame), GFP_KERNEL);
+ if (!new_frame)
+ return NULL;
+
+ vbuf = v4l2_m2m_dst_buf_remove(sess->m2m_ctx);
+ if (!vbuf) {
+ dev_err(sess->core->dev, "No dst buffer available\n");
+ kfree(new_frame);
+ return NULL;
+ }
+
+ while (codec_vp9_get_frame_by_idx(vp9, vbuf->vb2_buf.index)) {
+ struct vb2_v4l2_buffer *old_vbuf = vbuf;
+
+ vbuf = v4l2_m2m_dst_buf_remove(sess->m2m_ctx);
+ v4l2_m2m_buf_queue(sess->m2m_ctx, old_vbuf);
+ if (!vbuf) {
+ dev_err(sess->core->dev, "No dst buffer available\n");
+ kfree(new_frame);
+ return NULL;
+ }
+ }
+
+ new_frame->vbuf = vbuf;
+ new_frame->index = vbuf->vb2_buf.index;
+ new_frame->intra_only = param->p.intra_only;
+ new_frame->show = param->p.show_frame;
+ new_frame->type = param->p.frame_type;
+ new_frame->width = vp9->width;
+ new_frame->height = vp9->height;
+ list_add_tail(&new_frame->list, &vp9->ref_frames_list);
+ vp9->frames_num++;
+
+ return new_frame;
+}
+
+static void codec_vp9_show_existing_frame(struct codec_vp9 *vp9)
+{
+ union rpm_param *param = &vp9->rpm_param;
+
+ if (!param->p.show_existing_frame)
+ return;
+
+ pr_debug("showing frame %u\n", param->p.frame_to_show_idx);
+}
+
+static void codec_vp9_rm_noshow_frame(struct amvdec_session *sess)
+{
+ struct codec_vp9 *vp9 = sess->priv;
+ struct vp9_frame *tmp;
+
+ list_for_each_entry(tmp, &vp9->ref_frames_list, list) {
+ if (tmp->show)
+ continue;
+
+ pr_debug("rm noshow: %u\n", tmp->index);
+ v4l2_m2m_buf_queue(sess->m2m_ctx, tmp->vbuf);
+ list_del(&tmp->list);
+ kfree(tmp);
+ vp9->frames_num--;
+ return;
+ }
+}
+
+static void codec_vp9_process_frame(struct amvdec_session *sess)
+{
+ struct amvdec_core *core = sess->core;
+ struct codec_vp9 *vp9 = sess->priv;
+ union rpm_param *param = &vp9->rpm_param;
+ int intra_only;
+
+ if (!param->p.show_frame)
+ codec_vp9_rm_noshow_frame(sess);
+
+ vp9->cur_frame = codec_vp9_get_new_frame(sess);
+ if (!vp9->cur_frame)
+ return;
+
+ pr_debug("frame %d: type: %08X; show_exist: %u; show: %u, intra_only: %u\n",
+ vp9->cur_frame->index,
+ param->p.frame_type, param->p.show_existing_frame,
+ param->p.show_frame, param->p.intra_only);
+
+ if (param->p.frame_type != KEY_FRAME)
+ codec_vp9_sync_ref(vp9);
+ codec_vp9_update_next_ref(vp9);
+ codec_vp9_show_existing_frame(vp9);
+
+ if (codec_hevc_use_mmu(core->platform->revision, sess->pixfmt_cap,
+ vp9->is_10bit))
+ codec_hevc_fill_mmu_map(sess, &vp9->common,
+ &vp9->cur_frame->vbuf->vb2_buf);
+
+ intra_only = param->p.show_frame ? 0 : param->p.intra_only;
+
+ /* clear mpred (for keyframe only) */
+ if (param->p.frame_type != KEY_FRAME && !intra_only) {
+ codec_vp9_set_mc(sess, vp9);
+ codec_vp9_set_mpred_mv(core, vp9);
+ } else {
+ amvdec_clear_dos_bits(core, HEVC_MPRED_CTRL4, BIT(6));
+ }
+
+ amvdec_write_dos(core, HEVC_PARSER_PICTURE_SIZE,
+ (vp9->height << 16) | vp9->width);
+ codec_vp9_set_mcrcc(sess);
+ codec_vp9_set_sao(sess, &vp9->cur_frame->vbuf->vb2_buf);
+
+ vp9_loop_filter_frame_init(core, &vp9->seg_4lf,
+ &vp9->lfi, &vp9->lf,
+ vp9->default_filt_lvl);
+
+ /* ask uCode to start decoding */
+ amvdec_write_dos(core, VP9_DEC_STATUS_REG, VP9_10B_DECODE_SLICE);
+}
+
+static void codec_vp9_process_lf(struct codec_vp9 *vp9)
+{
+ union rpm_param *param = &vp9->rpm_param;
+ int i;
+
+ vp9->lf.mode_ref_delta_enabled = param->p.mode_ref_delta_enabled;
+ vp9->lf.sharpness_level = param->p.sharpness_level;
+ vp9->default_filt_lvl = param->p.filter_level;
+ vp9->seg_4lf.enabled = param->p.seg_enabled;
+ vp9->seg_4lf.abs_delta = param->p.seg_abs_delta;
+
+ for (i = 0; i < 4; i++)
+ vp9->lf.ref_deltas[i] = param->p.ref_deltas[i];
+
+ for (i = 0; i < 2; i++)
+ vp9->lf.mode_deltas[i] = param->p.mode_deltas[i];
+
+ for (i = 0; i < MAX_SEGMENTS; i++)
+ vp9->seg_4lf.feature_mask[i] =
+ (param->p.seg_lf_info[i] & 0x8000) ?
+ (1 << SEG_LVL_ALT_LF) : 0;
+
+ for (i = 0; i < MAX_SEGMENTS; i++)
+ vp9->seg_4lf.feature_data[i][SEG_LVL_ALT_LF] =
+ (param->p.seg_lf_info[i] & 0x100) ?
+ -(param->p.seg_lf_info[i] & 0x3f)
+ : (param->p.seg_lf_info[i] & 0x3f);
+}
+
+static void codec_vp9_resume(struct amvdec_session *sess)
+{
+ struct codec_vp9 *vp9 = sess->priv;
+
+ mutex_lock(&vp9->lock);
+ if (codec_hevc_setup_buffers(sess, &vp9->common, vp9->is_10bit)) {
+ mutex_unlock(&vp9->lock);
+ amvdec_abort(sess);
+ return;
+ }
+
+ codec_vp9_setup_workspace(sess, vp9);
+ codec_hevc_setup_decode_head(sess, vp9->is_10bit);
+ codec_vp9_process_lf(vp9);
+ codec_vp9_process_frame(sess);
+
+ mutex_unlock(&vp9->lock);
+}
+
+/*
+ * The RPM section within the workspace contains
+ * many information regarding the parsed bitstream
+ */
+static void codec_vp9_fetch_rpm(struct amvdec_session *sess)
+{
+ struct codec_vp9 *vp9 = sess->priv;
+ u16 *rpm_vaddr = vp9->workspace_vaddr + RPM_OFFSET;
+ int i, j;
+
+ for (i = 0; i < RPM_BUF_SIZE; i += 4)
+ for (j = 0; j < 4; j++)
+ vp9->rpm_param.l.data[i + j] = rpm_vaddr[i + 3 - j];
+}
+
+static int codec_vp9_process_rpm(struct codec_vp9 *vp9)
+{
+ union rpm_param *param = &vp9->rpm_param;
+ int src_changed = 0;
+ int is_10bit = 0;
+ int pic_width_64 = ALIGN(param->p.width, 64);
+ int pic_height_32 = ALIGN(param->p.height, 32);
+ int pic_width_lcu = (pic_width_64 % LCU_SIZE) ?
+ pic_width_64 / LCU_SIZE + 1
+ : pic_width_64 / LCU_SIZE;
+ int pic_height_lcu = (pic_height_32 % LCU_SIZE) ?
+ pic_height_32 / LCU_SIZE + 1
+ : pic_height_32 / LCU_SIZE;
+ vp9->lcu_total = pic_width_lcu * pic_height_lcu;
+
+ if (param->p.bit_depth == 10)
+ is_10bit = 1;
+
+ if (vp9->width != param->p.width || vp9->height != param->p.height ||
+ vp9->is_10bit != is_10bit)
+ src_changed = 1;
+
+ vp9->width = param->p.width;
+ vp9->height = param->p.height;
+ vp9->is_10bit = is_10bit;
+
+ pr_debug("width: %u; height: %u; is_10bit: %d; src_changed: %d\n",
+ vp9->width, vp9->height, is_10bit, src_changed);
+
+ return src_changed;
+}
+
+static bool codec_vp9_is_ref(struct codec_vp9 *vp9, struct vp9_frame *frame)
+{
+ int i;
+
+ for (i = 0; i < REF_FRAMES; ++i)
+ if (vp9->ref_frame_map[i] == frame->index)
+ return true;
+
+ return false;
+}
+
+static void codec_vp9_show_frame(struct amvdec_session *sess)
+{
+ struct codec_vp9 *vp9 = sess->priv;
+ struct vp9_frame *tmp, *n;
+
+ list_for_each_entry_safe(tmp, n, &vp9->ref_frames_list, list) {
+ if (!tmp->show || tmp == vp9->cur_frame)
+ continue;
+
+ if (!tmp->done) {
+ pr_debug("Doning %u\n", tmp->index);
+ amvdec_dst_buf_done(sess, tmp->vbuf, V4L2_FIELD_NONE);
+ tmp->done = 1;
+ vp9->frames_num--;
+ }
+
+ if (codec_vp9_is_ref(vp9, tmp) || tmp == vp9->prev_frame)
+ continue;
+
+ pr_debug("deleting %d\n", tmp->index);
+ list_del(&tmp->list);
+ kfree(tmp);
+ }
+}
+
+static void vp9_tree_merge_probs(unsigned int *prev_prob,
+ unsigned int *cur_prob,
+ int coef_node_start, int tree_left,
+ int tree_right,
+ int tree_i, int node)
+{
+ int prob_32, prob_res, prob_shift;
+ int pre_prob, new_prob;
+ int den, m_count, get_prob, factor;
+
+ prob_32 = prev_prob[coef_node_start / 4 * 2];
+ prob_res = coef_node_start & 3;
+ prob_shift = prob_res * 8;
+ pre_prob = (prob_32 >> prob_shift) & 0xff;
+
+ den = tree_left + tree_right;
+
+ if (den == 0) {
+ new_prob = pre_prob;
+ } else {
+ m_count = min(den, MODE_MV_COUNT_SAT);
+ get_prob =
+ clip_prob(div_r32(((int64_t)tree_left * 256 +
+ (den >> 1)),
+ den));
+
+ /* weighted_prob */
+ factor = count_to_update_factor[m_count];
+ new_prob = round_power_of_two(pre_prob * (256 - factor) +
+ get_prob * factor, 8);
+ }
+
+ cur_prob[coef_node_start / 4 * 2] =
+ (cur_prob[coef_node_start / 4 * 2] & (~(0xff << prob_shift))) |
+ (new_prob << prob_shift);
+}
+
+static void adapt_coef_probs_cxt(unsigned int *prev_prob,
+ unsigned int *cur_prob,
+ unsigned int *count,
+ int update_factor,
+ int cxt_num,
+ int coef_cxt_start,
+ int coef_count_cxt_start)
+{
+ int prob_32, prob_res, prob_shift;
+ int pre_prob, new_prob;
+ int num, den, m_count, get_prob, factor;
+ int node, coef_node_start;
+ int count_sat = 24;
+ int cxt;
+
+ for (cxt = 0; cxt < cxt_num; cxt++) {
+ const int n0 = count[coef_count_cxt_start];
+ const int n1 = count[coef_count_cxt_start + 1];
+ const int n2 = count[coef_count_cxt_start + 2];
+ const int neob = count[coef_count_cxt_start + 3];
+ const int nneob = count[coef_count_cxt_start + 4];
+ const unsigned int branch_ct[3][2] = {
+ { neob, nneob },
+ { n0, n1 + n2 },
+ { n1, n2 }
+ };
+
+ coef_node_start = coef_cxt_start;
+ for (node = 0 ; node < 3 ; node++) {
+ prob_32 = prev_prob[coef_node_start / 4 * 2];
+ prob_res = coef_node_start & 3;
+ prob_shift = prob_res * 8;
+ pre_prob = (prob_32 >> prob_shift) & 0xff;
+
+ /* get binary prob */
+ num = branch_ct[node][0];
+ den = branch_ct[node][0] + branch_ct[node][1];
+ m_count = min(den, count_sat);
+
+ get_prob = (den == 0) ?
+ 128u :
+ clip_prob(div_r32(((int64_t)num * 256 +
+ (den >> 1)), den));
+
+ factor = update_factor * m_count / count_sat;
+ new_prob =
+ round_power_of_two(pre_prob * (256 - factor) +
+ get_prob * factor, 8);
+
+ cur_prob[coef_node_start / 4 * 2] =
+ (cur_prob[coef_node_start / 4 * 2] &
+ (~(0xff << prob_shift))) |
+ (new_prob << prob_shift);
+
+ coef_node_start += 1;
+ }
+
+ coef_cxt_start = coef_cxt_start + 3;
+ coef_count_cxt_start = coef_count_cxt_start + 5;
+ }
+}
+
+static void adapt_coef_probs(int prev_kf, int cur_kf, int pre_fc,
+ unsigned int *prev_prob, unsigned int *cur_prob,
+ unsigned int *count)
+{
+ int tx_size, coef_tx_size_start, coef_count_tx_size_start;
+ int plane, coef_plane_start, coef_count_plane_start;
+ int type, coef_type_start, coef_count_type_start;
+ int band, coef_band_start, coef_count_band_start;
+ int cxt_num;
+ int coef_cxt_start, coef_count_cxt_start;
+ int node, coef_node_start, coef_count_node_start;
+
+ int tree_i, tree_left, tree_right;
+ int mvd_i;
+
+ int update_factor = cur_kf ? 112 : (prev_kf ? 128 : 112);
+
+ int prob_32;
+ int prob_res;
+ int prob_shift;
+ int pre_prob;
+
+ int den;
+ int get_prob;
+ int m_count;
+ int factor;
+
+ int new_prob;
+
+ for (tx_size = 0 ; tx_size < 4 ; tx_size++) {
+ coef_tx_size_start = VP9_COEF_START +
+ tx_size * 4 * VP9_COEF_SIZE_ONE_SET;
+ coef_count_tx_size_start = VP9_COEF_COUNT_START +
+ tx_size * 4 * VP9_COEF_COUNT_SIZE_ONE_SET;
+ coef_plane_start = coef_tx_size_start;
+ coef_count_plane_start = coef_count_tx_size_start;
+
+ for (plane = 0 ; plane < 2 ; plane++) {
+ coef_type_start = coef_plane_start;
+ coef_count_type_start = coef_count_plane_start;
+
+ for (type = 0 ; type < 2 ; type++) {
+ coef_band_start = coef_type_start;
+ coef_count_band_start = coef_count_type_start;
+
+ for (band = 0 ; band < 6 ; band++) {
+ if (band == 0)
+ cxt_num = 3;
+ else
+ cxt_num = 6;
+ coef_cxt_start = coef_band_start;
+ coef_count_cxt_start =
+ coef_count_band_start;
+
+ adapt_coef_probs_cxt(prev_prob,
+ cur_prob,
+ count,
+ update_factor,
+ cxt_num,
+ coef_cxt_start,
+ coef_count_cxt_start);
+
+ if (band == 0) {
+ coef_band_start += 10;
+ coef_count_band_start += 15;
+ } else {
+ coef_band_start += 18;
+ coef_count_band_start += 30;
+ }
+ }
+ coef_type_start += VP9_COEF_SIZE_ONE_SET;
+ coef_count_type_start +=
+ VP9_COEF_COUNT_SIZE_ONE_SET;
+ }
+
+ coef_plane_start += 2 * VP9_COEF_SIZE_ONE_SET;
+ coef_count_plane_start +=
+ 2 * VP9_COEF_COUNT_SIZE_ONE_SET;
+ }
+ }
+
+ if (cur_kf == 0) {
+ /* mode_mv_merge_probs - merge_intra_inter_prob */
+ for (coef_count_node_start = VP9_INTRA_INTER_COUNT_START;
+ coef_count_node_start < (VP9_MV_CLASS0_HP_1_COUNT_START +
+ VP9_MV_CLASS0_HP_1_COUNT_SIZE);
+ coef_count_node_start += 2) {
+ if (coef_count_node_start ==
+ VP9_INTRA_INTER_COUNT_START)
+ coef_node_start = VP9_INTRA_INTER_START;
+ else if (coef_count_node_start ==
+ VP9_COMP_INTER_COUNT_START)
+ coef_node_start = VP9_COMP_INTER_START;
+ else if (coef_count_node_start ==
+ VP9_TX_MODE_COUNT_START)
+ coef_node_start = VP9_TX_MODE_START;
+ else if (coef_count_node_start ==
+ VP9_SKIP_COUNT_START)
+ coef_node_start = VP9_SKIP_START;
+ else if (coef_count_node_start ==
+ VP9_MV_SIGN_0_COUNT_START)
+ coef_node_start = VP9_MV_SIGN_0_START;
+ else if (coef_count_node_start ==
+ VP9_MV_SIGN_1_COUNT_START)
+ coef_node_start = VP9_MV_SIGN_1_START;
+ else if (coef_count_node_start ==
+ VP9_MV_BITS_0_COUNT_START)
+ coef_node_start = VP9_MV_BITS_0_START;
+ else if (coef_count_node_start ==
+ VP9_MV_BITS_1_COUNT_START)
+ coef_node_start = VP9_MV_BITS_1_START;
+ else /* node_start == VP9_MV_CLASS0_HP_0_COUNT_START */
+ coef_node_start = VP9_MV_CLASS0_HP_0_START;
+
+ den = count[coef_count_node_start] +
+ count[coef_count_node_start + 1];
+
+ prob_32 = prev_prob[coef_node_start / 4 * 2];
+ prob_res = coef_node_start & 3;
+ prob_shift = prob_res * 8;
+ pre_prob = (prob_32 >> prob_shift) & 0xff;
+
+ if (den == 0) {
+ new_prob = pre_prob;
+ } else {
+ m_count = min(den, MODE_MV_COUNT_SAT);
+ get_prob =
+ clip_prob(div_r32(((int64_t)
+ count[coef_count_node_start] * 256 +
+ (den >> 1)),
+ den));
+
+ /* weighted prob */
+ factor = count_to_update_factor[m_count];
+ new_prob =
+ round_power_of_two(pre_prob *
+ (256 - factor) +
+ get_prob * factor,
+ 8);
+ }
+
+ cur_prob[coef_node_start / 4 * 2] =
+ (cur_prob[coef_node_start / 4 * 2] &
+ (~(0xff << prob_shift))) |
+ (new_prob << prob_shift);
+
+ coef_node_start = coef_node_start + 1;
+ }
+
+ coef_node_start = VP9_INTER_MODE_START;
+ coef_count_node_start = VP9_INTER_MODE_COUNT_START;
+ for (tree_i = 0 ; tree_i < 7 ; tree_i++) {
+ for (node = 0 ; node < 3 ; node++) {
+ unsigned int start = coef_count_node_start;
+
+ switch (node) {
+ case 2:
+ tree_left = count[start + 1];
+ tree_right = count[start + 3];
+ break;
+ case 1:
+ tree_left = count[start + 0];
+ tree_right = count[start + 1] +
+ count[start + 3];
+ break;
+ default:
+ tree_left = count[start + 2];
+ tree_right = count[start + 0] +
+ count[start + 1] +
+ count[start + 3];
+ break;
+ }
+
+ vp9_tree_merge_probs(prev_prob, cur_prob,
+ coef_node_start,
+ tree_left, tree_right,
+ tree_i, node);
+
+ coef_node_start = coef_node_start + 1;
+ }
+
+ coef_count_node_start = coef_count_node_start + 4;
+ }
+
+ coef_node_start = VP9_IF_Y_MODE_START;
+ coef_count_node_start = VP9_IF_Y_MODE_COUNT_START;
+ for (tree_i = 0 ; tree_i < 14 ; tree_i++) {
+ for (node = 0 ; node < 9 ; node++) {
+ unsigned int start = coef_count_node_start;
+
+ switch (node) {
+ case 8:
+ tree_left =
+ count[start + D153_PRED];
+ tree_right =
+ count[start + D207_PRED];
+ break;
+ case 7:
+ tree_left =
+ count[start + D63_PRED];
+ tree_right =
+ count[start + D207_PRED] +
+ count[start + D153_PRED];
+ break;
+ case 6:
+ tree_left =
+ count[start + D45_PRED];
+ tree_right =
+ count[start + D207_PRED] +
+ count[start + D153_PRED] +
+ count[start + D63_PRED];
+ break;
+ case 5:
+ tree_left =
+ count[start + D135_PRED];
+ tree_right =
+ count[start + D117_PRED];
+ break;
+ case 4:
+ tree_left =
+ count[start + H_PRED];
+ tree_right =
+ count[start + D117_PRED] +
+ count[start + D135_PRED];
+ break;
+ case 3:
+ tree_left =
+ count[start + H_PRED] +
+ count[start + D117_PRED] +
+ count[start + D135_PRED];
+ tree_right =
+ count[start + D45_PRED] +
+ count[start + D207_PRED] +
+ count[start + D153_PRED] +
+ count[start + D63_PRED];
+ break;
+ case 2:
+ tree_left =
+ count[start + V_PRED];
+ tree_right =
+ count[start + H_PRED] +
+ count[start + D117_PRED] +
+ count[start + D135_PRED] +
+ count[start + D45_PRED] +
+ count[start + D207_PRED] +
+ count[start + D153_PRED] +
+ count[start + D63_PRED];
+ break;
+ case 1:
+ tree_left =
+ count[start + TM_PRED];
+ tree_right =
+ count[start + V_PRED] +
+ count[start + H_PRED] +
+ count[start + D117_PRED] +
+ count[start + D135_PRED] +
+ count[start + D45_PRED] +
+ count[start + D207_PRED] +
+ count[start + D153_PRED] +
+ count[start + D63_PRED];
+ break;
+ default:
+ tree_left =
+ count[start + DC_PRED];
+ tree_right =
+ count[start + TM_PRED] +
+ count[start + V_PRED] +
+ count[start + H_PRED] +
+ count[start + D117_PRED] +
+ count[start + D135_PRED] +
+ count[start + D45_PRED] +
+ count[start + D207_PRED] +
+ count[start + D153_PRED] +
+ count[start + D63_PRED];
+ break;
+ }
+
+ vp9_tree_merge_probs(prev_prob, cur_prob,
+ coef_node_start,
+ tree_left, tree_right,
+ tree_i, node);
+
+ coef_node_start = coef_node_start + 1;
+ }
+ coef_count_node_start = coef_count_node_start + 10;
+ }
+
+ coef_node_start = VP9_PARTITION_P_START;
+ coef_count_node_start = VP9_PARTITION_P_COUNT_START;
+ for (tree_i = 0 ; tree_i < 16 ; tree_i++) {
+ for (node = 0 ; node < 3 ; node++) {
+ unsigned int start = coef_count_node_start;
+
+ switch (node) {
+ case 2:
+ tree_left = count[start + 2];
+ tree_right = count[start + 3];
+ break;
+ case 1:
+ tree_left = count[start + 1];
+ tree_right = count[start + 2] +
+ count[start + 3];
+ break;
+ default:
+ tree_left = count[start + 0];
+ tree_right = count[start + 1] +
+ count[start + 2] +
+ count[start + 3];
+ break;
+ }
+
+ vp9_tree_merge_probs(prev_prob, cur_prob,
+ coef_node_start,
+ tree_left, tree_right,
+ tree_i, node);
+
+ coef_node_start = coef_node_start + 1;
+ }
+
+ coef_count_node_start = coef_count_node_start + 4;
+ }
+
+ coef_node_start = VP9_INTERP_START;
+ coef_count_node_start = VP9_INTERP_COUNT_START;
+ for (tree_i = 0 ; tree_i < 4 ; tree_i++) {
+ for (node = 0 ; node < 2 ; node++) {
+ unsigned int start = coef_count_node_start;
+
+ switch (node) {
+ case 1:
+ tree_left = count[start + 1];
+ tree_right = count[start + 2];
+ break;
+ default:
+ tree_left = count[start + 0];
+ tree_right = count[start + 1] +
+ count[start + 2];
+ break;
+ }
+
+ vp9_tree_merge_probs(prev_prob, cur_prob,
+ coef_node_start,
+ tree_left, tree_right,
+ tree_i, node);
+
+ coef_node_start = coef_node_start + 1;
+ }
+ coef_count_node_start = coef_count_node_start + 3;
+ }
+
+ coef_node_start = VP9_MV_JOINTS_START;
+ coef_count_node_start = VP9_MV_JOINTS_COUNT_START;
+ for (tree_i = 0 ; tree_i < 1 ; tree_i++) {
+ for (node = 0 ; node < 3 ; node++) {
+ unsigned int start = coef_count_node_start;
+
+ switch (node) {
+ case 2:
+ tree_left = count[start + 2];
+ tree_right = count[start + 3];
+ break;
+ case 1:
+ tree_left = count[start + 1];
+ tree_right = count[start + 2] +
+ count[start + 3];
+ break;
+ default:
+ tree_left = count[start + 0];
+ tree_right = count[start + 1] +
+ count[start + 2] +
+ count[start + 3];
+ break;
+ }
+
+ vp9_tree_merge_probs(prev_prob, cur_prob,
+ coef_node_start,
+ tree_left, tree_right,
+ tree_i, node);
+
+ coef_node_start = coef_node_start + 1;
+ }
+ coef_count_node_start = coef_count_node_start + 4;
+ }
+
+ for (mvd_i = 0 ; mvd_i < 2 ; mvd_i++) {
+ coef_node_start = mvd_i ? VP9_MV_CLASSES_1_START :
+ VP9_MV_CLASSES_0_START;
+ coef_count_node_start = mvd_i ?
+ VP9_MV_CLASSES_1_COUNT_START :
+ VP9_MV_CLASSES_0_COUNT_START;
+ tree_i = 0;
+ for (node = 0; node < 10; node++) {
+ unsigned int start = coef_count_node_start;
+
+ switch (node) {
+ case 9:
+ tree_left = count[start + 9];
+ tree_right = count[start + 10];
+ break;
+ case 8:
+ tree_left = count[start + 7];
+ tree_right = count[start + 8];
+ break;
+ case 7:
+ tree_left = count[start + 7] +
+ count[start + 8];
+ tree_right = count[start + 9] +
+ count[start + 10];
+ break;
+ case 6:
+ tree_left = count[start + 6];
+ tree_right = count[start + 7] +
+ count[start + 8] +
+ count[start + 9] +
+ count[start + 10];
+ break;
+ case 5:
+ tree_left = count[start + 4];
+ tree_right = count[start + 5];
+ break;
+ case 4:
+ tree_left = count[start + 4] +
+ count[start + 5];
+ tree_right = count[start + 6] +
+ count[start + 7] +
+ count[start + 8] +
+ count[start + 9] +
+ count[start + 10];
+ break;
+ case 3:
+ tree_left = count[start + 2];
+ tree_right = count[start + 3];
+ break;
+ case 2:
+ tree_left = count[start + 2] +
+ count[start + 3];
+ tree_right = count[start + 4] +
+ count[start + 5] +
+ count[start + 6] +
+ count[start + 7] +
+ count[start + 8] +
+ count[start + 9] +
+ count[start + 10];
+ break;
+ case 1:
+ tree_left = count[start + 1];
+ tree_right = count[start + 2] +
+ count[start + 3] +
+ count[start + 4] +
+ count[start + 5] +
+ count[start + 6] +
+ count[start + 7] +
+ count[start + 8] +
+ count[start + 9] +
+ count[start + 10];
+ break;
+ default:
+ tree_left = count[start + 0];
+ tree_right = count[start + 1] +
+ count[start + 2] +
+ count[start + 3] +
+ count[start + 4] +
+ count[start + 5] +
+ count[start + 6] +
+ count[start + 7] +
+ count[start + 8] +
+ count[start + 9] +
+ count[start + 10];
+ break;
+ }
+
+ vp9_tree_merge_probs(prev_prob, cur_prob,
+ coef_node_start,
+ tree_left, tree_right,
+ tree_i, node);
+
+ coef_node_start = coef_node_start + 1;
+ }
+
+ coef_node_start = mvd_i ? VP9_MV_CLASS0_1_START :
+ VP9_MV_CLASS0_0_START;
+ coef_count_node_start = mvd_i ?
+ VP9_MV_CLASS0_1_COUNT_START :
+ VP9_MV_CLASS0_0_COUNT_START;
+ tree_i = 0;
+ node = 0;
+ tree_left = count[coef_count_node_start + 0];
+ tree_right = count[coef_count_node_start + 1];
+
+ vp9_tree_merge_probs(prev_prob, cur_prob,
+ coef_node_start,
+ tree_left, tree_right,
+ tree_i, node);
+ coef_node_start = mvd_i ? VP9_MV_CLASS0_FP_1_START :
+ VP9_MV_CLASS0_FP_0_START;
+ coef_count_node_start = mvd_i ?
+ VP9_MV_CLASS0_FP_1_COUNT_START :
+ VP9_MV_CLASS0_FP_0_COUNT_START;
+
+ for (tree_i = 0; tree_i < 3; tree_i++) {
+ for (node = 0; node < 3; node++) {
+ unsigned int start =
+ coef_count_node_start;
+ switch (node) {
+ case 2:
+ tree_left = count[start + 2];
+ tree_right = count[start + 3];
+ break;
+ case 1:
+ tree_left = count[start + 1];
+ tree_right = count[start + 2] +
+ count[start + 3];
+ break;
+ default:
+ tree_left = count[start + 0];
+ tree_right = count[start + 1] +
+ count[start + 2] +
+ count[start + 3];
+ break;
+ }
+
+ vp9_tree_merge_probs(prev_prob,
+ cur_prob,
+ coef_node_start,
+ tree_left,
+ tree_right,
+ tree_i, node);
+
+ coef_node_start = coef_node_start + 1;
+ }
+ coef_count_node_start =
+ coef_count_node_start + 4;
+ }
+ }
+ }
+}
+
+static irqreturn_t codec_vp9_threaded_isr(struct amvdec_session *sess)
+{
+ struct amvdec_core *core = sess->core;
+ struct codec_vp9 *vp9 = sess->priv;
+ u32 dec_status = amvdec_read_dos(core, VP9_DEC_STATUS_REG);
+ u32 prob_status = amvdec_read_dos(core, VP9_ADAPT_PROB_REG);
+ int i;
+
+ if (!vp9)
+ return IRQ_HANDLED;
+
+ mutex_lock(&vp9->lock);
+ if (dec_status != VP9_HEAD_PARSER_DONE) {
+ dev_err(core->dev_dec, "Unrecognized dec_status: %08X\n",
+ dec_status);
+ amvdec_abort(sess);
+ goto unlock;
+ }
+
+ pr_debug("ISR: %08X;%08X\n", dec_status, prob_status);
+ sess->keyframe_found = 1;
+
+ if ((prob_status & 0xff) == 0xfd && vp9->cur_frame) {
+ /* VP9_REQ_ADAPT_PROB */
+ u8 *prev_prob_b = ((u8 *)vp9->workspace_vaddr +
+ PROB_OFFSET) +
+ ((prob_status >> 8) * 0x1000);
+ u8 *cur_prob_b = ((u8 *)vp9->workspace_vaddr +
+ PROB_OFFSET) + 0x4000;
+ u8 *count_b = (u8 *)vp9->workspace_vaddr +
+ COUNT_OFFSET;
+ int last_frame_type = vp9->prev_frame ?
+ vp9->prev_frame->type :
+ KEY_FRAME;
+
+ adapt_coef_probs(last_frame_type == KEY_FRAME,
+ vp9->cur_frame->type == KEY_FRAME ? 1 : 0,
+ prob_status >> 8,
+ (unsigned int *)prev_prob_b,
+ (unsigned int *)cur_prob_b,
+ (unsigned int *)count_b);
+
+ memcpy(prev_prob_b, cur_prob_b, ADAPT_PROB_SIZE);
+ amvdec_write_dos(core, VP9_ADAPT_PROB_REG, 0);
+ }
+
+ /* Invalidate first 3 refs */
+ for (i = 0; i < REFS_PER_FRAME ; ++i)
+ vp9->frame_refs[i] = NULL;
+
+ vp9->prev_frame = vp9->cur_frame;
+ codec_vp9_update_ref(vp9);
+
+ codec_vp9_fetch_rpm(sess);
+ if (codec_vp9_process_rpm(vp9)) {
+ amvdec_src_change(sess, vp9->width, vp9->height, 16);
+
+ /* No frame is actually processed */
+ vp9->cur_frame = NULL;
+
+ /* Show the remaining frame */
+ codec_vp9_show_frame(sess);
+
+ /* FIXME: Save refs for resized frame */
+ if (vp9->frames_num)
+ codec_vp9_save_refs(vp9);
+
+ goto unlock;
+ }
+
+ codec_vp9_process_lf(vp9);
+ codec_vp9_process_frame(sess);
+ codec_vp9_show_frame(sess);
+
+unlock:
+ mutex_unlock(&vp9->lock);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t codec_vp9_isr(struct amvdec_session *sess)
+{
+ return IRQ_WAKE_THREAD;
+}
+
+struct amvdec_codec_ops codec_vp9_ops = {
+ .start = codec_vp9_start,
+ .stop = codec_vp9_stop,
+ .isr = codec_vp9_isr,
+ .threaded_isr = codec_vp9_threaded_isr,
+ .num_pending_bufs = codec_vp9_num_pending_bufs,
+ .drain = codec_vp9_flush_output,
+ .resume = codec_vp9_resume,
+};
diff --git a/drivers/staging/media/meson/vdec/codec_vp9.h b/drivers/staging/media/meson/vdec/codec_vp9.h
new file mode 100644
index 000000000000..62db65a2b939
--- /dev/null
+++ b/drivers/staging/media/meson/vdec/codec_vp9.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright (C) 2018 Maxime Jourdan <maxi.jourdan@wanadoo.fr>
+ */
+
+#ifndef __MESON_VDEC_CODEC_VP9_H_
+#define __MESON_VDEC_CODEC_VP9_H_
+
+#include "vdec.h"
+
+extern struct amvdec_codec_ops codec_vp9_ops;
+
+#endif
diff --git a/drivers/staging/media/meson/vdec/dos_regs.h b/drivers/staging/media/meson/vdec/dos_regs.h
new file mode 100644
index 000000000000..abd810542dbb
--- /dev/null
+++ b/drivers/staging/media/meson/vdec/dos_regs.h
@@ -0,0 +1,98 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright (C) 2018 BayLibre, SAS
+ * Author: Maxime Jourdan <mjourdan@baylibre.com>
+ */
+
+#ifndef __MESON_VDEC_DOS_REGS_H_
+#define __MESON_VDEC_DOS_REGS_H_
+
+/* DOS registers */
+#define VDEC_ASSIST_AMR1_INT8 0x00b4
+
+#define ASSIST_MBOX1_CLR_REG 0x01d4
+#define ASSIST_MBOX1_MASK 0x01d8
+
+#define MPSR 0x0c04
+#define MCPU_INTR_MSK 0x0c10
+#define CPSR 0x0c84
+
+#define IMEM_DMA_CTRL 0x0d00
+#define IMEM_DMA_ADR 0x0d04
+#define IMEM_DMA_COUNT 0x0d08
+#define LMEM_DMA_CTRL 0x0d40
+
+#define MC_STATUS0 0x2424
+#define MC_CTRL1 0x242c
+
+#define PSCALE_RST 0x2440
+#define PSCALE_CTRL 0x2444
+#define PSCALE_BMEM_ADDR 0x247c
+#define PSCALE_BMEM_DAT 0x2480
+
+#define DBLK_CTRL 0x2544
+#define DBLK_STATUS 0x254c
+
+#define GCLK_EN 0x260c
+#define MDEC_PIC_DC_CTRL 0x2638
+#define MDEC_PIC_DC_STATUS 0x263c
+#define ANC0_CANVAS_ADDR 0x2640
+#define MDEC_PIC_DC_THRESH 0x26e0
+
+/* Firmware interface registers */
+#define AV_SCRATCH_0 0x2700
+#define AV_SCRATCH_1 0x2704
+#define AV_SCRATCH_2 0x2708
+#define AV_SCRATCH_3 0x270c
+#define AV_SCRATCH_4 0x2710
+#define AV_SCRATCH_5 0x2714
+#define AV_SCRATCH_6 0x2718
+#define AV_SCRATCH_7 0x271c
+#define AV_SCRATCH_8 0x2720
+#define AV_SCRATCH_9 0x2724
+#define AV_SCRATCH_A 0x2728
+#define AV_SCRATCH_B 0x272c
+#define AV_SCRATCH_C 0x2730
+#define AV_SCRATCH_D 0x2734
+#define AV_SCRATCH_E 0x2738
+#define AV_SCRATCH_F 0x273c
+#define AV_SCRATCH_G 0x2740
+#define AV_SCRATCH_H 0x2744
+#define AV_SCRATCH_I 0x2748
+#define AV_SCRATCH_J 0x274c
+#define AV_SCRATCH_K 0x2750
+#define AV_SCRATCH_L 0x2754
+
+#define MPEG1_2_REG 0x3004
+#define PIC_HEAD_INFO 0x300c
+#define POWER_CTL_VLD 0x3020
+#define M4_CONTROL_REG 0x30a4
+
+/* Stream Buffer (stbuf) regs */
+#define VLD_MEM_VIFIFO_START_PTR 0x3100
+#define VLD_MEM_VIFIFO_CURR_PTR 0x3104
+#define VLD_MEM_VIFIFO_END_PTR 0x3108
+#define VLD_MEM_VIFIFO_CONTROL 0x3110
+ #define MEM_FIFO_CNT_BIT 16
+ #define MEM_FILL_ON_LEVEL BIT(10)
+ #define MEM_CTRL_EMPTY_EN BIT(2)
+ #define MEM_CTRL_FILL_EN BIT(1)
+#define VLD_MEM_VIFIFO_WP 0x3114
+#define VLD_MEM_VIFIFO_RP 0x3118
+#define VLD_MEM_VIFIFO_LEVEL 0x311c
+#define VLD_MEM_VIFIFO_BUF_CNTL 0x3120
+ #define MEM_BUFCTRL_MANUAL BIT(1)
+#define VLD_MEM_VIFIFO_WRAP_COUNT 0x3144
+
+#define DCAC_DMA_CTRL 0x3848
+
+#define DOS_SW_RESET0 0xfc00
+#define DOS_GCLK_EN0 0xfc04
+#define DOS_GEN_CTRL0 0xfc08
+#define DOS_MEM_PD_VDEC 0xfcc0
+#define DOS_MEM_PD_HEVC 0xfccc
+#define DOS_SW_RESET3 0xfcd0
+#define DOS_GCLK_EN3 0xfcd4
+#define DOS_VDEC_MCRCC_STALL_CTRL 0xfd00
+
+#endif
diff --git a/drivers/staging/media/meson/vdec/esparser.c b/drivers/staging/media/meson/vdec/esparser.c
new file mode 100644
index 000000000000..4632346f04a9
--- /dev/null
+++ b/drivers/staging/media/meson/vdec/esparser.c
@@ -0,0 +1,457 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2018 BayLibre, SAS
+ * Author: Maxime Jourdan <mjourdan@baylibre.com>
+ *
+ * The Elementary Stream Parser is a HW bitstream parser.
+ * It reads bitstream buffers and feeds them to the VIFIFO
+ */
+
+#include <linux/init.h>
+#include <linux/ioctl.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/reset.h>
+#include <linux/interrupt.h>
+#include <media/videobuf2-dma-contig.h>
+#include <media/v4l2-mem2mem.h>
+
+#include "dos_regs.h"
+#include "esparser.h"
+#include "vdec_helpers.h"
+
+/* PARSER REGS (CBUS) */
+#define PARSER_CONTROL 0x00
+ #define ES_PACK_SIZE_BIT 8
+ #define ES_WRITE BIT(5)
+ #define ES_SEARCH BIT(1)
+ #define ES_PARSER_START BIT(0)
+#define PARSER_FETCH_ADDR 0x4
+#define PARSER_FETCH_CMD 0x8
+#define PARSER_CONFIG 0x14
+ #define PS_CFG_MAX_FETCH_CYCLE_BIT 0
+ #define PS_CFG_STARTCODE_WID_24_BIT 10
+ #define PS_CFG_MAX_ES_WR_CYCLE_BIT 12
+ #define PS_CFG_PFIFO_EMPTY_CNT_BIT 16
+#define PFIFO_WR_PTR 0x18
+#define PFIFO_RD_PTR 0x1c
+#define PARSER_SEARCH_PATTERN 0x24
+ #define ES_START_CODE_PATTERN 0x00000100
+#define PARSER_SEARCH_MASK 0x28
+ #define ES_START_CODE_MASK 0xffffff00
+ #define FETCH_ENDIAN_BIT 27
+#define PARSER_INT_ENABLE 0x2c
+ #define PARSER_INT_HOST_EN_BIT 8
+#define PARSER_INT_STATUS 0x30
+ #define PARSER_INTSTAT_SC_FOUND 1
+#define PARSER_ES_CONTROL 0x5c
+#define PARSER_VIDEO_START_PTR 0x80
+#define PARSER_VIDEO_END_PTR 0x84
+#define PARSER_VIDEO_WP 0x88
+#define PARSER_VIDEO_HOLE 0x90
+
+#define SEARCH_PATTERN_LEN 512
+#define VP9_HEADER_SIZE 16
+
+static DECLARE_WAIT_QUEUE_HEAD(wq);
+static int search_done;
+
+static irqreturn_t esparser_isr(int irq, void *dev)
+{
+ int int_status;
+ struct amvdec_core *core = dev;
+
+ int_status = amvdec_read_parser(core, PARSER_INT_STATUS);
+ amvdec_write_parser(core, PARSER_INT_STATUS, int_status);
+
+ if (int_status & PARSER_INTSTAT_SC_FOUND) {
+ amvdec_write_parser(core, PFIFO_RD_PTR, 0);
+ amvdec_write_parser(core, PFIFO_WR_PTR, 0);
+ search_done = 1;
+ wake_up_interruptible(&wq);
+ }
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * VP9 frame headers need to be appended by a 16-byte long
+ * Amlogic custom header
+ */
+static int vp9_update_header(struct amvdec_core *core, struct vb2_buffer *buf)
+{
+ u8 *dp;
+ u8 marker;
+ int dsize;
+ int num_frames, cur_frame;
+ int cur_mag, mag, mag_ptr;
+ int frame_size[8], tot_frame_size[8];
+ int total_datasize = 0;
+ int new_frame_size;
+ unsigned char *old_header = NULL;
+
+ dp = (uint8_t *)vb2_plane_vaddr(buf, 0);
+ dsize = vb2_get_plane_payload(buf, 0);
+
+ if (dsize == vb2_plane_size(buf, 0)) {
+ dev_warn(core->dev, "%s: unable to update header\n", __func__);
+ return 0;
+ }
+
+ marker = dp[dsize - 1];
+ if ((marker & 0xe0) == 0xc0) {
+ num_frames = (marker & 0x7) + 1;
+ mag = ((marker >> 3) & 0x3) + 1;
+ mag_ptr = dsize - mag * num_frames - 2;
+ if (dp[mag_ptr] != marker)
+ return 0;
+
+ mag_ptr++;
+ for (cur_frame = 0; cur_frame < num_frames; cur_frame++) {
+ frame_size[cur_frame] = 0;
+ for (cur_mag = 0; cur_mag < mag; cur_mag++) {
+ frame_size[cur_frame] |=
+ (dp[mag_ptr] << (cur_mag * 8));
+ mag_ptr++;
+ }
+ if (cur_frame == 0)
+ tot_frame_size[cur_frame] =
+ frame_size[cur_frame];
+ else
+ tot_frame_size[cur_frame] =
+ tot_frame_size[cur_frame - 1] +
+ frame_size[cur_frame];
+ total_datasize += frame_size[cur_frame];
+ }
+ } else {
+ num_frames = 1;
+ frame_size[0] = dsize;
+ tot_frame_size[0] = dsize;
+ total_datasize = dsize;
+ }
+
+ new_frame_size = total_datasize + num_frames * VP9_HEADER_SIZE;
+
+ if (new_frame_size >= vb2_plane_size(buf, 0)) {
+ dev_warn(core->dev, "%s: unable to update header\n", __func__);
+ return 0;
+ }
+
+ for (cur_frame = num_frames - 1; cur_frame >= 0; cur_frame--) {
+ int framesize = frame_size[cur_frame];
+ int framesize_header = framesize + 4;
+ int oldframeoff = tot_frame_size[cur_frame] - framesize;
+ int outheaderoff = oldframeoff + cur_frame * VP9_HEADER_SIZE;
+ u8 *fdata = dp + outheaderoff;
+ u8 *old_framedata = dp + oldframeoff;
+
+ memmove(fdata + VP9_HEADER_SIZE, old_framedata, framesize);
+
+ fdata[0] = (framesize_header >> 24) & 0xff;
+ fdata[1] = (framesize_header >> 16) & 0xff;
+ fdata[2] = (framesize_header >> 8) & 0xff;
+ fdata[3] = (framesize_header >> 0) & 0xff;
+ fdata[4] = ((framesize_header >> 24) & 0xff) ^ 0xff;
+ fdata[5] = ((framesize_header >> 16) & 0xff) ^ 0xff;
+ fdata[6] = ((framesize_header >> 8) & 0xff) ^ 0xff;
+ fdata[7] = ((framesize_header >> 0) & 0xff) ^ 0xff;
+ fdata[8] = 0;
+ fdata[9] = 0;
+ fdata[10] = 0;
+ fdata[11] = 1;
+ fdata[12] = 'A';
+ fdata[13] = 'M';
+ fdata[14] = 'L';
+ fdata[15] = 'V';
+
+ if (!old_header) {
+ /* nothing */
+ } else if (old_header > fdata + 16 + framesize) {
+ dev_dbg(core->dev, "%s: data has gaps, setting to 0\n",
+ __func__);
+ memset(fdata + 16 + framesize, 0,
+ (old_header - fdata + 16 + framesize));
+ } else if (old_header < fdata + 16 + framesize) {
+ dev_err(core->dev, "%s: data overwritten\n", __func__);
+ }
+ old_header = fdata;
+ }
+
+ return new_frame_size;
+}
+
+/* Pad the packet to at least 4KiB bytes otherwise the VDEC unit won't trigger
+ * ISRs.
+ * Also append a start code 000001ff at the end to trigger
+ * the ESPARSER interrupt.
+ */
+static u32 esparser_pad_start_code(struct amvdec_core *core,
+ struct vb2_buffer *vb,
+ u32 payload_size)
+{
+ u32 pad_size = 0;
+ u8 *vaddr = vb2_plane_vaddr(vb, 0);
+
+ if (payload_size < ESPARSER_MIN_PACKET_SIZE) {
+ pad_size = ESPARSER_MIN_PACKET_SIZE - payload_size;
+ memset(vaddr + payload_size, 0, pad_size);
+ }
+
+ if ((payload_size + pad_size + SEARCH_PATTERN_LEN) >
+ vb2_plane_size(vb, 0)) {
+ dev_warn(core->dev, "%s: unable to pad start code\n", __func__);
+ return pad_size;
+ }
+
+ memset(vaddr + payload_size + pad_size, 0, SEARCH_PATTERN_LEN);
+ vaddr[payload_size + pad_size] = 0x00;
+ vaddr[payload_size + pad_size + 1] = 0x00;
+ vaddr[payload_size + pad_size + 2] = 0x01;
+ vaddr[payload_size + pad_size + 3] = 0xff;
+
+ return pad_size;
+}
+
+static int
+esparser_write_data(struct amvdec_core *core, dma_addr_t addr, u32 size)
+{
+ amvdec_write_parser(core, PFIFO_RD_PTR, 0);
+ amvdec_write_parser(core, PFIFO_WR_PTR, 0);
+ amvdec_write_parser(core, PARSER_CONTROL,
+ ES_WRITE |
+ ES_PARSER_START |
+ ES_SEARCH |
+ (size << ES_PACK_SIZE_BIT));
+
+ amvdec_write_parser(core, PARSER_FETCH_ADDR, addr);
+ amvdec_write_parser(core, PARSER_FETCH_CMD,
+ (7 << FETCH_ENDIAN_BIT) |
+ (size + SEARCH_PATTERN_LEN));
+
+ search_done = 0;
+ return wait_event_interruptible_timeout(wq, search_done, (HZ / 5));
+}
+
+static u32 esparser_vififo_get_free_space(struct amvdec_session *sess)
+{
+ u32 vififo_usage;
+ struct amvdec_ops *vdec_ops = sess->fmt_out->vdec_ops;
+ struct amvdec_core *core = sess->core;
+
+ vififo_usage = vdec_ops->vififo_level(sess);
+ vififo_usage += amvdec_read_parser(core, PARSER_VIDEO_HOLE);
+ vififo_usage += (6 * SZ_1K); // 6 KiB internal fifo
+
+ if (vififo_usage > sess->vififo_size) {
+ dev_warn(sess->core->dev,
+ "VIFIFO usage (%u) > VIFIFO size (%u)\n",
+ vififo_usage, sess->vififo_size);
+ return 0;
+ }
+
+ return sess->vififo_size - vififo_usage;
+}
+
+int esparser_queue_eos(struct amvdec_core *core, const u8 *data, u32 len)
+{
+ struct device *dev = core->dev;
+ void *eos_vaddr;
+ dma_addr_t eos_paddr;
+ int ret;
+
+ eos_vaddr = dma_alloc_coherent(dev, len + SEARCH_PATTERN_LEN,
+ &eos_paddr, GFP_KERNEL);
+ if (!eos_vaddr)
+ return -ENOMEM;
+
+ memcpy(eos_vaddr, data, len);
+ ret = esparser_write_data(core, eos_paddr, len);
+ dma_free_coherent(dev, len + SEARCH_PATTERN_LEN,
+ eos_vaddr, eos_paddr);
+
+ return ret;
+}
+
+static u32 esparser_get_offset(struct amvdec_session *sess)
+{
+ struct amvdec_core *core = sess->core;
+ u32 offset = amvdec_read_parser(core, PARSER_VIDEO_WP) -
+ sess->vififo_paddr;
+
+ if (offset < sess->last_offset)
+ sess->wrap_count++;
+
+ sess->last_offset = offset;
+ offset += (sess->wrap_count * sess->vififo_size);
+
+ return offset;
+}
+
+static int
+esparser_queue(struct amvdec_session *sess, struct vb2_v4l2_buffer *vbuf)
+{
+ int ret;
+ struct vb2_buffer *vb = &vbuf->vb2_buf;
+ struct amvdec_core *core = sess->core;
+ struct amvdec_codec_ops *codec_ops = sess->fmt_out->codec_ops;
+ u32 payload_size = vb2_get_plane_payload(vb, 0);
+ dma_addr_t phy = vb2_dma_contig_plane_dma_addr(vb, 0);
+ u32 num_dst_bufs = 0;
+ u32 offset;
+ u32 pad_size;
+
+ /*
+ * When max ref frame is held by VP9, this should be -= 3 to prevent a
+ * shortage of CAPTURE buffers on the decoder side.
+ * For the future, a good enhancement of the way this is handled could
+ * be to notify new capture buffers to the decoding modules, so that
+ * they could pause when there is no capture buffer available and
+ * resume on this notification.
+ */
+ if (sess->fmt_out->pixfmt == V4L2_PIX_FMT_VP9) {
+ if (codec_ops->num_pending_bufs)
+ num_dst_bufs = codec_ops->num_pending_bufs(sess);
+
+ num_dst_bufs += v4l2_m2m_num_dst_bufs_ready(sess->m2m_ctx);
+ num_dst_bufs -= 3;
+
+ if (esparser_vififo_get_free_space(sess) < payload_size ||
+ atomic_read(&sess->esparser_queued_bufs) >= num_dst_bufs)
+ return -EAGAIN;
+ } else if (esparser_vififo_get_free_space(sess) < payload_size) {
+ return -EAGAIN;
+ }
+
+ v4l2_m2m_src_buf_remove_by_buf(sess->m2m_ctx, vbuf);
+
+ offset = esparser_get_offset(sess);
+
+ ret = amvdec_add_ts(sess, vb->timestamp, vbuf->timecode, offset, vbuf->flags);
+ if (ret) {
+ v4l2_m2m_buf_done(vbuf, VB2_BUF_STATE_ERROR);
+ return ret;
+ }
+
+ dev_dbg(core->dev, "esparser: ts = %llu pld_size = %u offset = %08X flags = %08X\n",
+ vb->timestamp, payload_size, offset, vbuf->flags);
+
+ vbuf->flags = 0;
+ vbuf->field = V4L2_FIELD_NONE;
+ vbuf->sequence = sess->sequence_out++;
+
+ if (sess->fmt_out->pixfmt == V4L2_PIX_FMT_VP9) {
+ payload_size = vp9_update_header(core, vb);
+
+ /* If unable to alter buffer to add headers */
+ if (payload_size == 0) {
+ amvdec_remove_ts(sess, vb->timestamp);
+ v4l2_m2m_buf_done(vbuf, VB2_BUF_STATE_ERROR);
+
+ return 0;
+ }
+ }
+
+ pad_size = esparser_pad_start_code(core, vb, payload_size);
+ ret = esparser_write_data(core, phy, payload_size + pad_size);
+
+ if (ret <= 0) {
+ dev_warn(core->dev, "esparser: input parsing error\n");
+ amvdec_remove_ts(sess, vb->timestamp);
+ v4l2_m2m_buf_done(vbuf, VB2_BUF_STATE_ERROR);
+ amvdec_write_parser(core, PARSER_FETCH_CMD, 0);
+
+ return 0;
+ }
+
+ atomic_inc(&sess->esparser_queued_bufs);
+ v4l2_m2m_buf_done(vbuf, VB2_BUF_STATE_DONE);
+
+ return 0;
+}
+
+void esparser_queue_all_src(struct work_struct *work)
+{
+ struct v4l2_m2m_buffer *buf, *n;
+ struct amvdec_session *sess =
+ container_of(work, struct amvdec_session, esparser_queue_work);
+
+ mutex_lock(&sess->lock);
+ v4l2_m2m_for_each_src_buf_safe(sess->m2m_ctx, buf, n) {
+ if (sess->should_stop)
+ break;
+
+ if (esparser_queue(sess, &buf->vb) < 0)
+ break;
+ }
+ mutex_unlock(&sess->lock);
+}
+
+int esparser_power_up(struct amvdec_session *sess)
+{
+ struct amvdec_core *core = sess->core;
+ struct amvdec_ops *vdec_ops = sess->fmt_out->vdec_ops;
+
+ reset_control_reset(core->esparser_reset);
+ amvdec_write_parser(core, PARSER_CONFIG,
+ (10 << PS_CFG_PFIFO_EMPTY_CNT_BIT) |
+ (1 << PS_CFG_MAX_ES_WR_CYCLE_BIT) |
+ (16 << PS_CFG_MAX_FETCH_CYCLE_BIT));
+
+ amvdec_write_parser(core, PFIFO_RD_PTR, 0);
+ amvdec_write_parser(core, PFIFO_WR_PTR, 0);
+
+ amvdec_write_parser(core, PARSER_SEARCH_PATTERN,
+ ES_START_CODE_PATTERN);
+ amvdec_write_parser(core, PARSER_SEARCH_MASK, ES_START_CODE_MASK);
+
+ amvdec_write_parser(core, PARSER_CONFIG,
+ (10 << PS_CFG_PFIFO_EMPTY_CNT_BIT) |
+ (1 << PS_CFG_MAX_ES_WR_CYCLE_BIT) |
+ (16 << PS_CFG_MAX_FETCH_CYCLE_BIT) |
+ (2 << PS_CFG_STARTCODE_WID_24_BIT));
+
+ amvdec_write_parser(core, PARSER_CONTROL,
+ (ES_SEARCH | ES_PARSER_START));
+
+ amvdec_write_parser(core, PARSER_VIDEO_START_PTR, sess->vififo_paddr);
+ amvdec_write_parser(core, PARSER_VIDEO_END_PTR,
+ sess->vififo_paddr + sess->vififo_size - 8);
+ amvdec_write_parser(core, PARSER_ES_CONTROL,
+ amvdec_read_parser(core, PARSER_ES_CONTROL) & ~1);
+
+ if (vdec_ops->conf_esparser)
+ vdec_ops->conf_esparser(sess);
+
+ amvdec_write_parser(core, PARSER_INT_STATUS, 0xffff);
+ amvdec_write_parser(core, PARSER_INT_ENABLE,
+ BIT(PARSER_INT_HOST_EN_BIT));
+
+ return 0;
+}
+
+int esparser_init(struct platform_device *pdev, struct amvdec_core *core)
+{
+ struct device *dev = &pdev->dev;
+ int ret;
+ int irq;
+
+ irq = platform_get_irq_byname(pdev, "esparser");
+ if (irq < 0)
+ return irq;
+
+ ret = devm_request_irq(dev, irq, esparser_isr, IRQF_SHARED,
+ "esparserirq", core);
+ if (ret) {
+ dev_err(dev, "Failed requesting ESPARSER IRQ\n");
+ return ret;
+ }
+
+ core->esparser_reset =
+ devm_reset_control_get_exclusive(dev, "esparser");
+ if (IS_ERR(core->esparser_reset)) {
+ dev_err(dev, "Failed to get esparser_reset\n");
+ return PTR_ERR(core->esparser_reset);
+ }
+
+ return 0;
+}
diff --git a/drivers/staging/media/meson/vdec/esparser.h b/drivers/staging/media/meson/vdec/esparser.h
new file mode 100644
index 000000000000..9351e62c70e6
--- /dev/null
+++ b/drivers/staging/media/meson/vdec/esparser.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright (C) 2018 BayLibre, SAS
+ * Author: Maxime Jourdan <mjourdan@baylibre.com>
+ */
+
+#ifndef __MESON_VDEC_ESPARSER_H_
+#define __MESON_VDEC_ESPARSER_H_
+
+#include <linux/platform_device.h>
+
+#include "vdec.h"
+
+int esparser_init(struct platform_device *pdev, struct amvdec_core *core);
+int esparser_power_up(struct amvdec_session *sess);
+
+/**
+ * esparser_queue_eos() - write End Of Stream sequence to the ESPARSER
+ *
+ * @core: vdec core struct
+ * @data: EOS sequence
+ * @len: length of EOS sequence
+ */
+int esparser_queue_eos(struct amvdec_core *core, const u8 *data, u32 len);
+
+/**
+ * esparser_queue_all_src() - work handler that writes as many src buffers
+ * as possible to the ESPARSER
+ *
+ * @work: work struct
+ */
+void esparser_queue_all_src(struct work_struct *work);
+
+#define ESPARSER_MIN_PACKET_SIZE SZ_4K
+
+#endif
diff --git a/drivers/staging/media/meson/vdec/hevc_regs.h b/drivers/staging/media/meson/vdec/hevc_regs.h
new file mode 100644
index 000000000000..0392f41a1eed
--- /dev/null
+++ b/drivers/staging/media/meson/vdec/hevc_regs.h
@@ -0,0 +1,218 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright (C) 2015 Amlogic, Inc. All rights reserved.
+ */
+
+#ifndef __MESON_VDEC_HEVC_REGS_H_
+#define __MESON_VDEC_HEVC_REGS_H_
+
+#define HEVC_ASSIST_MMU_MAP_ADDR 0xc024
+
+#define HEVC_ASSIST_MBOX1_CLR_REG 0xc1d4
+#define HEVC_ASSIST_MBOX1_MASK 0xc1d8
+
+#define HEVC_ASSIST_SCRATCH_0 0xc300
+#define HEVC_ASSIST_SCRATCH_1 0xc304
+#define HEVC_ASSIST_SCRATCH_2 0xc308
+#define HEVC_ASSIST_SCRATCH_3 0xc30c
+#define HEVC_ASSIST_SCRATCH_4 0xc310
+#define HEVC_ASSIST_SCRATCH_5 0xc314
+#define HEVC_ASSIST_SCRATCH_6 0xc318
+#define HEVC_ASSIST_SCRATCH_7 0xc31c
+#define HEVC_ASSIST_SCRATCH_8 0xc320
+#define HEVC_ASSIST_SCRATCH_9 0xc324
+#define HEVC_ASSIST_SCRATCH_A 0xc328
+#define HEVC_ASSIST_SCRATCH_B 0xc32c
+#define HEVC_ASSIST_SCRATCH_C 0xc330
+#define HEVC_ASSIST_SCRATCH_D 0xc334
+#define HEVC_ASSIST_SCRATCH_E 0xc338
+#define HEVC_ASSIST_SCRATCH_F 0xc33c
+#define HEVC_ASSIST_SCRATCH_G 0xc340
+#define HEVC_ASSIST_SCRATCH_H 0xc344
+#define HEVC_ASSIST_SCRATCH_I 0xc348
+#define HEVC_ASSIST_SCRATCH_J 0xc34c
+#define HEVC_ASSIST_SCRATCH_K 0xc350
+#define HEVC_ASSIST_SCRATCH_L 0xc354
+#define HEVC_ASSIST_SCRATCH_M 0xc358
+#define HEVC_ASSIST_SCRATCH_N 0xc35c
+
+#define HEVC_PARSER_VERSION 0xc400
+#define HEVC_STREAM_CONTROL 0xc404
+#define HEVC_STREAM_START_ADDR 0xc408
+#define HEVC_STREAM_END_ADDR 0xc40c
+#define HEVC_STREAM_WR_PTR 0xc410
+#define HEVC_STREAM_RD_PTR 0xc414
+#define HEVC_STREAM_LEVEL 0xc418
+#define HEVC_STREAM_FIFO_CTL 0xc41c
+#define HEVC_SHIFT_CONTROL 0xc420
+#define HEVC_SHIFT_STARTCODE 0xc424
+#define HEVC_SHIFT_EMULATECODE 0xc428
+#define HEVC_SHIFT_STATUS 0xc42c
+#define HEVC_SHIFTED_DATA 0xc430
+#define HEVC_SHIFT_BYTE_COUNT 0xc434
+#define HEVC_SHIFT_COMMAND 0xc438
+#define HEVC_ELEMENT_RESULT 0xc43c
+#define HEVC_CABAC_CONTROL 0xc440
+#define HEVC_PARSER_SLICE_INFO 0xc444
+#define HEVC_PARSER_CMD_WRITE 0xc448
+#define HEVC_PARSER_CORE_CONTROL 0xc44c
+#define HEVC_PARSER_CMD_FETCH 0xc450
+#define HEVC_PARSER_CMD_STATUS 0xc454
+#define HEVC_PARSER_LCU_INFO 0xc458
+#define HEVC_PARSER_HEADER_INFO 0xc45c
+#define HEVC_PARSER_INT_CONTROL 0xc480
+#define HEVC_PARSER_INT_STATUS 0xc484
+#define HEVC_PARSER_IF_CONTROL 0xc488
+#define HEVC_PARSER_PICTURE_SIZE 0xc48c
+#define HEVC_PARSER_LCU_START 0xc490
+#define HEVC_PARSER_HEADER_INFO2 0xc494
+#define HEVC_PARSER_QUANT_READ 0xc498
+#define HEVC_PARSER_RESERVED_27 0xc49c
+#define HEVC_PARSER_CMD_SKIP_0 0xc4a0
+#define HEVC_PARSER_CMD_SKIP_1 0xc4a4
+#define HEVC_PARSER_CMD_SKIP_2 0xc4a8
+#define HEVC_SAO_IF_STATUS 0xc4c0
+#define HEVC_SAO_IF_DATA_Y 0xc4c4
+#define HEVC_SAO_IF_DATA_U 0xc4c8
+#define HEVC_SAO_IF_DATA_V 0xc4cc
+#define HEVC_STREAM_SWAP_ADDR 0xc4d0
+#define HEVC_STREAM_SWAP_CTRL 0xc4d4
+#define HEVC_IQIT_IF_WAIT_CNT 0xc4d8
+#define HEVC_MPRED_IF_WAIT_CNT 0xc4dc
+#define HEVC_SAO_IF_WAIT_CNT 0xc4e0
+
+#define HEVC_MPRED_VERSION 0xc800
+#define HEVC_MPRED_CTRL0 0xc804
+ #define MPRED_CTRL0_NEW_PIC BIT(2)
+ #define MPRED_CTRL0_NEW_TILE BIT(3)
+ #define MPRED_CTRL0_NEW_SLI_SEG BIT(4)
+ #define MPRED_CTRL0_TMVP BIT(5)
+ #define MPRED_CTRL0_LDC BIT(6)
+ #define MPRED_CTRL0_COL_FROM_L0 BIT(7)
+ #define MPRED_CTRL0_ABOVE_EN BIT(9)
+ #define MPRED_CTRL0_MV_WR_EN BIT(10)
+ #define MPRED_CTRL0_MV_RD_EN BIT(11)
+ #define MPRED_CTRL0_BUF_LINEAR BIT(13)
+#define HEVC_MPRED_CTRL1 0xc808
+#define HEVC_MPRED_INT_EN 0xc80c
+#define HEVC_MPRED_INT_STATUS 0xc810
+#define HEVC_MPRED_PIC_SIZE 0xc814
+#define HEVC_MPRED_PIC_SIZE_LCU 0xc818
+#define HEVC_MPRED_TILE_START 0xc81c
+#define HEVC_MPRED_TILE_SIZE_LCU 0xc820
+#define HEVC_MPRED_REF_NUM 0xc824
+#define HEVC_MPRED_REF_EN_L0 0xc830
+#define HEVC_MPRED_REF_EN_L1 0xc834
+#define HEVC_MPRED_COLREF_EN_L0 0xc838
+#define HEVC_MPRED_COLREF_EN_L1 0xc83c
+#define HEVC_MPRED_AXI_WCTRL 0xc840
+#define HEVC_MPRED_AXI_RCTRL 0xc844
+#define HEVC_MPRED_ABV_START_ADDR 0xc848
+#define HEVC_MPRED_MV_WR_START_ADDR 0xc84c
+#define HEVC_MPRED_MV_RD_START_ADDR 0xc850
+#define HEVC_MPRED_MV_WPTR 0xc854
+#define HEVC_MPRED_MV_RPTR 0xc858
+#define HEVC_MPRED_MV_WR_ROW_JUMP 0xc85c
+#define HEVC_MPRED_MV_RD_ROW_JUMP 0xc860
+#define HEVC_MPRED_CURR_LCU 0xc864
+#define HEVC_MPRED_ABV_WPTR 0xc868
+#define HEVC_MPRED_ABV_RPTR 0xc86c
+#define HEVC_MPRED_CTRL2 0xc870
+#define HEVC_MPRED_CTRL3 0xc874
+#define HEVC_MPRED_L0_REF00_POC 0xc880
+#define HEVC_MPRED_L1_REF00_POC 0xc8c0
+
+#define HEVC_MPRED_CTRL4 0xc930
+
+#define HEVC_MPRED_CUR_POC 0xc980
+#define HEVC_MPRED_COL_POC 0xc984
+#define HEVC_MPRED_MV_RD_END_ADDR 0xc988
+
+#define HEVC_MSP 0xcc00
+#define HEVC_MPSR 0xcc04
+#define HEVC_MCPU_INTR_MSK 0xcc10
+#define HEVC_MCPU_INTR_REQ 0xcc14
+#define HEVC_CPSR 0xcc84
+
+#define HEVC_IMEM_DMA_CTRL 0xcd00
+#define HEVC_IMEM_DMA_ADR 0xcd04
+#define HEVC_IMEM_DMA_COUNT 0xcd08
+
+#define HEVCD_IPP_TOP_CNTL 0xd000
+#define HEVCD_IPP_LINEBUFF_BASE 0xd024
+#define HEVCD_IPP_AXIIF_CONFIG 0xd02c
+
+#define VP9D_MPP_REF_SCALE_ENBL 0xd104
+#define VP9D_MPP_REFINFO_TBL_ACCCONFIG 0xd108
+#define VP9D_MPP_REFINFO_DATA 0xd10c
+
+#define HEVCD_MPP_ANC2AXI_TBL_CONF_ADDR 0xd180
+#define HEVCD_MPP_ANC2AXI_TBL_CMD_ADDR 0xd184
+#define HEVCD_MPP_ANC2AXI_TBL_DATA 0xd190
+
+#define HEVCD_MPP_ANC_CANVAS_ACCCONFIG_ADDR 0xd300
+#define HEVCD_MPP_ANC_CANVAS_DATA_ADDR 0xd304
+#define HEVCD_MPP_DECOMP_CTL1 0xd308
+#define HEVCD_MPP_DECOMP_CTL2 0xd30c
+#define HEVCD_MCRCC_CTL1 0xd3c0
+#define HEVCD_MCRCC_CTL2 0xd3c4
+#define HEVCD_MCRCC_CTL3 0xd3c8
+
+#define HEVC_DBLK_CFG0 0xd400
+#define HEVC_DBLK_CFG1 0xd404
+#define HEVC_DBLK_CFG2 0xd408
+#define HEVC_DBLK_CFG3 0xd40c
+#define HEVC_DBLK_CFG4 0xd410
+#define HEVC_DBLK_CFG5 0xd414
+#define HEVC_DBLK_CFG6 0xd418
+#define HEVC_DBLK_CFG7 0xd41c
+#define HEVC_DBLK_CFG8 0xd420
+#define HEVC_DBLK_CFG9 0xd424
+#define HEVC_DBLK_CFGA 0xd428
+#define HEVC_DBLK_STS0 0xd42c
+#define HEVC_DBLK_CFGB 0xd42c
+#define HEVC_DBLK_STS1 0xd430
+#define HEVC_DBLK_CFGE 0xd438
+
+#define HEVC_SAO_VERSION 0xd800
+#define HEVC_SAO_CTRL0 0xd804
+#define HEVC_SAO_CTRL1 0xd808
+#define HEVC_SAO_PIC_SIZE 0xd814
+#define HEVC_SAO_PIC_SIZE_LCU 0xd818
+#define HEVC_SAO_TILE_START 0xd81c
+#define HEVC_SAO_TILE_SIZE_LCU 0xd820
+#define HEVC_SAO_Y_START_ADDR 0xd82c
+#define HEVC_SAO_Y_LENGTH 0xd830
+#define HEVC_SAO_C_START_ADDR 0xd834
+#define HEVC_SAO_C_LENGTH 0xd838
+#define HEVC_SAO_Y_WPTR 0xd83c
+#define HEVC_SAO_C_WPTR 0xd840
+#define HEVC_SAO_ABV_START_ADDR 0xd844
+#define HEVC_SAO_VB_WR_START_ADDR 0xd848
+#define HEVC_SAO_VB_RD_START_ADDR 0xd84c
+#define HEVC_SAO_ABV_WPTR 0xd850
+#define HEVC_SAO_ABV_RPTR 0xd854
+#define HEVC_SAO_VB_WPTR 0xd858
+#define HEVC_SAO_VB_RPTR 0xd85c
+#define HEVC_SAO_CTRL2 0xd880
+#define HEVC_SAO_CTRL3 0xd884
+#define HEVC_SAO_CTRL4 0xd888
+#define HEVC_SAO_CTRL5 0xd88c
+#define HEVC_SAO_CTRL6 0xd890
+#define HEVC_SAO_CTRL7 0xd894
+#define HEVC_CM_BODY_START_ADDR 0xd898
+#define HEVC_CM_BODY_LENGTH 0xd89c
+#define HEVC_CM_HEADER_START_ADDR 0xd8a0
+#define HEVC_CM_HEADER_LENGTH 0xd8a4
+#define HEVC_CM_HEADER_OFFSET 0xd8ac
+#define HEVC_SAO_MMU_VH0_ADDR 0xd8e8
+#define HEVC_SAO_MMU_VH1_ADDR 0xd8ec
+
+#define HEVC_IQIT_CLK_RST_CTRL 0xdc00
+#define HEVC_IQIT_SCALELUT_WR_ADDR 0xdc08
+#define HEVC_IQIT_SCALELUT_RD_ADDR 0xdc0c
+#define HEVC_IQIT_SCALELUT_DATA 0xdc10
+
+#define HEVC_PSCALE_CTRL 0xe444
+
+#endif
diff --git a/drivers/staging/media/meson/vdec/vdec.c b/drivers/staging/media/meson/vdec/vdec.c
new file mode 100644
index 000000000000..49e497a32973
--- /dev/null
+++ b/drivers/staging/media/meson/vdec/vdec.c
@@ -0,0 +1,1121 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2018 BayLibre, SAS
+ * Author: Maxime Jourdan <mjourdan@baylibre.com>
+ */
+
+#include <linux/of.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/mfd/syscon.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/kthread.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-mem2mem.h>
+#include <media/v4l2-dev.h>
+#include <media/videobuf2-dma-contig.h>
+
+#include "vdec.h"
+#include "esparser.h"
+#include "vdec_helpers.h"
+
+struct dummy_buf {
+ struct vb2_v4l2_buffer vb;
+ struct list_head list;
+};
+
+/* 16 MiB for parsed bitstream swap exchange */
+#define SIZE_VIFIFO SZ_16M
+
+static u32 get_output_size(u32 width, u32 height)
+{
+ return ALIGN(width * height, SZ_64K);
+}
+
+u32 amvdec_get_output_size(struct amvdec_session *sess)
+{
+ return get_output_size(sess->width, sess->height);
+}
+EXPORT_SYMBOL_GPL(amvdec_get_output_size);
+
+static int vdec_codec_needs_recycle(struct amvdec_session *sess)
+{
+ struct amvdec_codec_ops *codec_ops = sess->fmt_out->codec_ops;
+
+ return codec_ops->can_recycle && codec_ops->recycle;
+}
+
+static int vdec_recycle_thread(void *data)
+{
+ struct amvdec_session *sess = data;
+ struct amvdec_core *core = sess->core;
+ struct amvdec_codec_ops *codec_ops = sess->fmt_out->codec_ops;
+ struct amvdec_buffer *tmp, *n;
+
+ while (!kthread_should_stop()) {
+ mutex_lock(&sess->bufs_recycle_lock);
+ list_for_each_entry_safe(tmp, n, &sess->bufs_recycle, list) {
+ if (!codec_ops->can_recycle(core))
+ break;
+
+ codec_ops->recycle(core, tmp->vb->index);
+ list_del(&tmp->list);
+ kfree(tmp);
+ }
+ mutex_unlock(&sess->bufs_recycle_lock);
+
+ usleep_range(5000, 10000);
+ }
+
+ return 0;
+}
+
+static int vdec_poweron(struct amvdec_session *sess)
+{
+ int ret;
+ struct amvdec_ops *vdec_ops = sess->fmt_out->vdec_ops;
+
+ ret = clk_prepare_enable(sess->core->dos_parser_clk);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(sess->core->dos_clk);
+ if (ret)
+ goto disable_dos_parser;
+
+ ret = vdec_ops->start(sess);
+ if (ret)
+ goto disable_dos;
+
+ esparser_power_up(sess);
+
+ return 0;
+
+disable_dos:
+ clk_disable_unprepare(sess->core->dos_clk);
+disable_dos_parser:
+ clk_disable_unprepare(sess->core->dos_parser_clk);
+
+ return ret;
+}
+
+static void vdec_wait_inactive(struct amvdec_session *sess)
+{
+ /* We consider 50ms with no IRQ to be inactive. */
+ while (time_is_after_jiffies64(sess->last_irq_jiffies +
+ msecs_to_jiffies(50)))
+ msleep(25);
+}
+
+static void vdec_poweroff(struct amvdec_session *sess)
+{
+ struct amvdec_ops *vdec_ops = sess->fmt_out->vdec_ops;
+ struct amvdec_codec_ops *codec_ops = sess->fmt_out->codec_ops;
+
+ sess->should_stop = 1;
+ vdec_wait_inactive(sess);
+ if (codec_ops->drain)
+ codec_ops->drain(sess);
+
+ vdec_ops->stop(sess);
+ clk_disable_unprepare(sess->core->dos_clk);
+ clk_disable_unprepare(sess->core->dos_parser_clk);
+}
+
+static void
+vdec_queue_recycle(struct amvdec_session *sess, struct vb2_buffer *vb)
+{
+ struct amvdec_buffer *new_buf;
+
+ new_buf = kmalloc(sizeof(*new_buf), GFP_KERNEL);
+ if (!new_buf)
+ return;
+ new_buf->vb = vb;
+
+ mutex_lock(&sess->bufs_recycle_lock);
+ list_add_tail(&new_buf->list, &sess->bufs_recycle);
+ mutex_unlock(&sess->bufs_recycle_lock);
+}
+
+static void vdec_m2m_device_run(void *priv)
+{
+ struct amvdec_session *sess = priv;
+
+ schedule_work(&sess->esparser_queue_work);
+}
+
+static void vdec_m2m_job_abort(void *priv)
+{
+ struct amvdec_session *sess = priv;
+
+ v4l2_m2m_job_finish(sess->m2m_dev, sess->m2m_ctx);
+}
+
+static const struct v4l2_m2m_ops vdec_m2m_ops = {
+ .device_run = vdec_m2m_device_run,
+ .job_abort = vdec_m2m_job_abort,
+};
+
+static void process_num_buffers(struct vb2_queue *q,
+ struct amvdec_session *sess,
+ unsigned int *num_buffers,
+ bool is_reqbufs)
+{
+ const struct amvdec_format *fmt_out = sess->fmt_out;
+ unsigned int q_num_bufs = vb2_get_num_buffers(q);
+ unsigned int buffers_total = q_num_bufs + *num_buffers;
+ u32 min_buf_capture = v4l2_ctrl_g_ctrl(sess->ctrl_min_buf_capture);
+
+ if (q_num_bufs + *num_buffers < min_buf_capture)
+ *num_buffers = min_buf_capture - q_num_bufs;
+ if (is_reqbufs && buffers_total < fmt_out->min_buffers)
+ *num_buffers = fmt_out->min_buffers - q_num_bufs;
+ if (buffers_total > fmt_out->max_buffers)
+ *num_buffers = fmt_out->max_buffers - q_num_bufs;
+
+ /* We need to program the complete CAPTURE buffer list
+ * in registers during start_streaming, and the firmwares
+ * are free to choose any of them to write frames to. As such,
+ * we need all of them to be queued into the driver
+ */
+ sess->num_dst_bufs = q_num_bufs + *num_buffers;
+ q->min_queued_buffers = max(fmt_out->min_buffers, sess->num_dst_bufs);
+}
+
+static int vdec_queue_setup(struct vb2_queue *q, unsigned int *num_buffers,
+ unsigned int *num_planes, unsigned int sizes[],
+ struct device *alloc_devs[])
+{
+ struct amvdec_session *sess = vb2_get_drv_priv(q);
+ u32 output_size = amvdec_get_output_size(sess);
+
+ if (*num_planes) {
+ switch (q->type) {
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
+ if (*num_planes != 1 ||
+ sizes[0] < sess->src_buffer_size)
+ return -EINVAL;
+ break;
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
+ switch (sess->pixfmt_cap) {
+ case V4L2_PIX_FMT_NV12M:
+ if (*num_planes != 2 ||
+ sizes[0] < output_size ||
+ sizes[1] < output_size / 2)
+ return -EINVAL;
+ break;
+ case V4L2_PIX_FMT_YUV420M:
+ if (*num_planes != 3 ||
+ sizes[0] < output_size ||
+ sizes[1] < output_size / 4 ||
+ sizes[2] < output_size / 4)
+ return -EINVAL;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ process_num_buffers(q, sess, num_buffers, false);
+ break;
+ }
+
+ return 0;
+ }
+
+ switch (q->type) {
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
+ sizes[0] = sess->src_buffer_size;
+ *num_planes = 1;
+ break;
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
+ switch (sess->pixfmt_cap) {
+ case V4L2_PIX_FMT_NV12M:
+ sizes[0] = output_size;
+ sizes[1] = output_size / 2;
+ *num_planes = 2;
+ break;
+ case V4L2_PIX_FMT_YUV420M:
+ sizes[0] = output_size;
+ sizes[1] = output_size / 4;
+ sizes[2] = output_size / 4;
+ *num_planes = 3;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ process_num_buffers(q, sess, num_buffers, true);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ sess->changed_format = 1;
+ return 0;
+}
+
+static void vdec_vb2_buf_queue(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct amvdec_session *sess = vb2_get_drv_priv(vb->vb2_queue);
+ struct v4l2_m2m_ctx *m2m_ctx = sess->m2m_ctx;
+
+ v4l2_m2m_buf_queue(m2m_ctx, vbuf);
+
+ if (!sess->streamon_out)
+ return;
+
+ if (sess->streamon_cap &&
+ vb->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE &&
+ vdec_codec_needs_recycle(sess))
+ vdec_queue_recycle(sess, vb);
+
+ schedule_work(&sess->esparser_queue_work);
+}
+
+static int vdec_start_streaming(struct vb2_queue *q, unsigned int count)
+{
+ struct amvdec_session *sess = vb2_get_drv_priv(q);
+ struct amvdec_codec_ops *codec_ops = sess->fmt_out->codec_ops;
+ struct amvdec_core *core = sess->core;
+ struct vb2_v4l2_buffer *buf;
+ int ret;
+
+ if (core->cur_sess && core->cur_sess != sess) {
+ ret = -EBUSY;
+ goto bufs_done;
+ }
+
+ if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+ sess->streamon_out = 1;
+ else
+ sess->streamon_cap = 1;
+
+ if (!sess->streamon_out)
+ return 0;
+
+ if (sess->status == STATUS_NEEDS_RESUME &&
+ q->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE &&
+ sess->changed_format) {
+ codec_ops->resume(sess);
+ sess->status = STATUS_RUNNING;
+ return 0;
+ }
+
+ if (sess->status == STATUS_RUNNING ||
+ sess->status == STATUS_NEEDS_RESUME ||
+ sess->status == STATUS_INIT)
+ return 0;
+
+ sess->vififo_size = SIZE_VIFIFO;
+ sess->vififo_vaddr =
+ dma_alloc_coherent(sess->core->dev, sess->vififo_size,
+ &sess->vififo_paddr, GFP_KERNEL);
+ if (!sess->vififo_vaddr) {
+ dev_err(sess->core->dev, "Failed to request VIFIFO buffer\n");
+ ret = -ENOMEM;
+ goto bufs_done;
+ }
+
+ sess->should_stop = 0;
+ sess->keyframe_found = 0;
+ sess->last_offset = 0;
+ sess->wrap_count = 0;
+ sess->pixelaspect.numerator = 1;
+ sess->pixelaspect.denominator = 1;
+ atomic_set(&sess->esparser_queued_bufs, 0);
+ v4l2_ctrl_s_ctrl(sess->ctrl_min_buf_capture, 1);
+
+ ret = vdec_poweron(sess);
+ if (ret)
+ goto vififo_free;
+
+ sess->sequence_cap = 0;
+ sess->sequence_out = 0;
+ if (vdec_codec_needs_recycle(sess))
+ sess->recycle_thread = kthread_run(vdec_recycle_thread, sess,
+ "vdec_recycle");
+
+ sess->status = STATUS_INIT;
+ core->cur_sess = sess;
+ schedule_work(&sess->esparser_queue_work);
+ return 0;
+
+vififo_free:
+ dma_free_coherent(sess->core->dev, sess->vififo_size,
+ sess->vififo_vaddr, sess->vififo_paddr);
+bufs_done:
+ while ((buf = v4l2_m2m_src_buf_remove(sess->m2m_ctx)))
+ v4l2_m2m_buf_done(buf, VB2_BUF_STATE_QUEUED);
+ while ((buf = v4l2_m2m_dst_buf_remove(sess->m2m_ctx)))
+ v4l2_m2m_buf_done(buf, VB2_BUF_STATE_QUEUED);
+
+ if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+ sess->streamon_out = 0;
+ else
+ sess->streamon_cap = 0;
+
+ return ret;
+}
+
+static void vdec_free_canvas(struct amvdec_session *sess)
+{
+ int i;
+
+ for (i = 0; i < sess->canvas_num; ++i)
+ meson_canvas_free(sess->core->canvas, sess->canvas_alloc[i]);
+
+ sess->canvas_num = 0;
+}
+
+static void vdec_reset_timestamps(struct amvdec_session *sess)
+{
+ struct amvdec_timestamp *tmp, *n;
+
+ list_for_each_entry_safe(tmp, n, &sess->timestamps, list) {
+ list_del(&tmp->list);
+ kfree(tmp);
+ }
+}
+
+static void vdec_reset_bufs_recycle(struct amvdec_session *sess)
+{
+ struct amvdec_buffer *tmp, *n;
+
+ list_for_each_entry_safe(tmp, n, &sess->bufs_recycle, list) {
+ list_del(&tmp->list);
+ kfree(tmp);
+ }
+}
+
+static void vdec_stop_streaming(struct vb2_queue *q)
+{
+ struct amvdec_session *sess = vb2_get_drv_priv(q);
+ struct amvdec_codec_ops *codec_ops = sess->fmt_out->codec_ops;
+ struct amvdec_core *core = sess->core;
+ struct vb2_v4l2_buffer *buf;
+
+ if (sess->status == STATUS_RUNNING ||
+ sess->status == STATUS_INIT ||
+ (sess->status == STATUS_NEEDS_RESUME &&
+ (!sess->streamon_out || !sess->streamon_cap))) {
+ if (vdec_codec_needs_recycle(sess))
+ kthread_stop(sess->recycle_thread);
+
+ vdec_poweroff(sess);
+ vdec_free_canvas(sess);
+ dma_free_coherent(sess->core->dev, sess->vififo_size,
+ sess->vififo_vaddr, sess->vififo_paddr);
+ vdec_reset_timestamps(sess);
+ vdec_reset_bufs_recycle(sess);
+ kfree(sess->priv);
+ sess->priv = NULL;
+ core->cur_sess = NULL;
+ sess->status = STATUS_STOPPED;
+ }
+
+ if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+ while ((buf = v4l2_m2m_src_buf_remove(sess->m2m_ctx)))
+ v4l2_m2m_buf_done(buf, VB2_BUF_STATE_ERROR);
+
+ sess->streamon_out = 0;
+ } else {
+ /* Drain remaining refs if was still running */
+ if (sess->status >= STATUS_RUNNING && codec_ops->drain)
+ codec_ops->drain(sess);
+
+ while ((buf = v4l2_m2m_dst_buf_remove(sess->m2m_ctx)))
+ v4l2_m2m_buf_done(buf, VB2_BUF_STATE_ERROR);
+
+ sess->streamon_cap = 0;
+ }
+}
+
+static int vdec_vb2_buf_prepare(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+
+ vbuf->field = V4L2_FIELD_NONE;
+ return 0;
+}
+
+static const struct vb2_ops vdec_vb2_ops = {
+ .queue_setup = vdec_queue_setup,
+ .start_streaming = vdec_start_streaming,
+ .stop_streaming = vdec_stop_streaming,
+ .buf_queue = vdec_vb2_buf_queue,
+ .buf_prepare = vdec_vb2_buf_prepare,
+};
+
+static int
+vdec_querycap(struct file *file, void *fh, struct v4l2_capability *cap)
+{
+ strscpy(cap->driver, "meson-vdec", sizeof(cap->driver));
+ strscpy(cap->card, "Amlogic Video Decoder", sizeof(cap->card));
+ strscpy(cap->bus_info, "platform:meson-vdec", sizeof(cap->bus_info));
+
+ return 0;
+}
+
+static const struct amvdec_format *
+find_format(const struct amvdec_format *fmts, u32 size, u32 pixfmt)
+{
+ unsigned int i;
+
+ for (i = 0; i < size; i++) {
+ if (fmts[i].pixfmt == pixfmt)
+ return &fmts[i];
+ }
+
+ return NULL;
+}
+
+static unsigned int
+vdec_supports_pixfmt_cap(const struct amvdec_format *fmt_out, u32 pixfmt_cap)
+{
+ int i;
+
+ for (i = 0; fmt_out->pixfmts_cap[i]; i++)
+ if (fmt_out->pixfmts_cap[i] == pixfmt_cap)
+ return 1;
+
+ return 0;
+}
+
+static const struct amvdec_format *
+vdec_try_fmt_common(struct amvdec_session *sess, u32 size,
+ struct v4l2_format *f)
+{
+ struct v4l2_pix_format_mplane *pixmp = &f->fmt.pix_mp;
+ struct v4l2_plane_pix_format *pfmt = pixmp->plane_fmt;
+ const struct amvdec_format *fmts = sess->core->platform->formats;
+ const struct amvdec_format *fmt_out = NULL;
+ u32 output_size = 0;
+
+ memset(pfmt[0].reserved, 0, sizeof(pfmt[0].reserved));
+ memset(pixmp->reserved, 0, sizeof(pixmp->reserved));
+
+ switch (f->type) {
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
+ fmt_out = find_format(fmts, size, pixmp->pixelformat);
+ if (!fmt_out) {
+ pixmp->pixelformat = V4L2_PIX_FMT_MPEG2;
+ fmt_out = find_format(fmts, size, pixmp->pixelformat);
+ }
+ break;
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
+ fmt_out = sess->fmt_out;
+ break;
+ default:
+ return NULL;
+ }
+
+ pixmp->width = clamp(pixmp->width, (u32)256, fmt_out->max_width);
+ pixmp->height = clamp(pixmp->height, (u32)144, fmt_out->max_height);
+ output_size = get_output_size(pixmp->width, pixmp->height);
+
+ if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+ if (!pfmt[0].sizeimage)
+ pfmt[0].sizeimage = sess->src_buffer_size;
+ pfmt[0].bytesperline = 0;
+ pixmp->num_planes = 1;
+ } else if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
+ fmt_out = sess->fmt_out;
+ if (!vdec_supports_pixfmt_cap(fmt_out, pixmp->pixelformat))
+ pixmp->pixelformat = fmt_out->pixfmts_cap[0];
+
+ memset(pfmt[1].reserved, 0, sizeof(pfmt[1].reserved));
+ if (pixmp->pixelformat == V4L2_PIX_FMT_NV12M) {
+ pfmt[0].sizeimage = output_size;
+ pfmt[0].bytesperline = ALIGN(pixmp->width, 32);
+
+ pfmt[1].sizeimage = output_size / 2;
+ pfmt[1].bytesperline = ALIGN(pixmp->width, 32);
+ pixmp->num_planes = 2;
+ } else if (pixmp->pixelformat == V4L2_PIX_FMT_YUV420M) {
+ pfmt[0].sizeimage = output_size;
+ pfmt[0].bytesperline = ALIGN(pixmp->width, 32);
+
+ pfmt[1].sizeimage = output_size / 4;
+ pfmt[1].bytesperline = ALIGN(pixmp->width, 32) / 2;
+
+ pfmt[2].sizeimage = output_size / 2;
+ pfmt[2].bytesperline = ALIGN(pixmp->width, 32) / 2;
+ pixmp->num_planes = 3;
+ }
+ }
+
+ if (pixmp->field == V4L2_FIELD_ANY)
+ pixmp->field = V4L2_FIELD_NONE;
+
+ return fmt_out;
+}
+
+static int vdec_try_fmt(struct file *file, void *fh, struct v4l2_format *f)
+{
+ struct amvdec_session *sess = file_to_amvdec_session(file);
+
+ vdec_try_fmt_common(sess, sess->core->platform->num_formats, f);
+
+ return 0;
+}
+
+static int vdec_g_fmt(struct file *file, void *fh, struct v4l2_format *f)
+{
+ struct amvdec_session *sess = file_to_amvdec_session(file);
+ struct v4l2_pix_format_mplane *pixmp = &f->fmt.pix_mp;
+
+ if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
+ pixmp->pixelformat = sess->pixfmt_cap;
+ else if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+ pixmp->pixelformat = sess->fmt_out->pixfmt;
+
+ if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
+ pixmp->width = sess->width;
+ pixmp->height = sess->height;
+ pixmp->colorspace = sess->colorspace;
+ pixmp->ycbcr_enc = sess->ycbcr_enc;
+ pixmp->quantization = sess->quantization;
+ pixmp->xfer_func = sess->xfer_func;
+ } else if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+ pixmp->width = sess->width;
+ pixmp->height = sess->height;
+ }
+
+ vdec_try_fmt_common(sess, sess->core->platform->num_formats, f);
+
+ return 0;
+}
+
+static int vdec_s_fmt(struct file *file, void *fh, struct v4l2_format *f)
+{
+ struct amvdec_session *sess = file_to_amvdec_session(file);
+ struct v4l2_pix_format_mplane *pixmp = &f->fmt.pix_mp;
+ u32 num_formats = sess->core->platform->num_formats;
+ const struct amvdec_format *fmt_out;
+ struct v4l2_pix_format_mplane orig_pixmp;
+ struct v4l2_format format;
+ u32 pixfmt_out = 0, pixfmt_cap = 0;
+
+ orig_pixmp = *pixmp;
+
+ fmt_out = vdec_try_fmt_common(sess, num_formats, f);
+ if (!fmt_out)
+ return -EINVAL;
+
+ if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+ pixfmt_out = pixmp->pixelformat;
+ pixfmt_cap = sess->pixfmt_cap;
+ } else if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
+ pixfmt_cap = pixmp->pixelformat;
+ pixfmt_out = sess->fmt_out->pixfmt;
+ }
+
+ memset(&format, 0, sizeof(format));
+
+ format.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
+ format.fmt.pix_mp.pixelformat = pixfmt_out;
+ format.fmt.pix_mp.width = orig_pixmp.width;
+ format.fmt.pix_mp.height = orig_pixmp.height;
+ vdec_try_fmt_common(sess, num_formats, &format);
+
+ if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+ sess->width = format.fmt.pix_mp.width;
+ sess->height = format.fmt.pix_mp.height;
+ sess->colorspace = pixmp->colorspace;
+ sess->ycbcr_enc = pixmp->ycbcr_enc;
+ sess->quantization = pixmp->quantization;
+ sess->xfer_func = pixmp->xfer_func;
+ sess->src_buffer_size = pixmp->plane_fmt[0].sizeimage;
+ }
+
+ memset(&format, 0, sizeof(format));
+
+ format.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+ format.fmt.pix_mp.pixelformat = pixfmt_cap;
+ format.fmt.pix_mp.width = orig_pixmp.width;
+ format.fmt.pix_mp.height = orig_pixmp.height;
+ vdec_try_fmt_common(sess, num_formats, &format);
+
+ sess->width = format.fmt.pix_mp.width;
+ sess->height = format.fmt.pix_mp.height;
+
+ if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+ sess->fmt_out = fmt_out;
+ else if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
+ sess->pixfmt_cap = format.fmt.pix_mp.pixelformat;
+
+ return 0;
+}
+
+static int vdec_enum_fmt(struct file *file, void *fh, struct v4l2_fmtdesc *f)
+{
+ struct amvdec_session *sess = file_to_amvdec_session(file);
+ const struct vdec_platform *platform = sess->core->platform;
+ const struct amvdec_format *fmt_out;
+
+ memset(f->reserved, 0, sizeof(f->reserved));
+
+ if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+ if (f->index >= platform->num_formats)
+ return -EINVAL;
+
+ fmt_out = &platform->formats[f->index];
+ f->pixelformat = fmt_out->pixfmt;
+ f->flags = fmt_out->flags;
+ } else if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
+ fmt_out = sess->fmt_out;
+ if (f->index >= 4 || !fmt_out->pixfmts_cap[f->index])
+ return -EINVAL;
+
+ f->pixelformat = fmt_out->pixfmts_cap[f->index];
+ } else {
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int vdec_enum_framesizes(struct file *file, void *fh,
+ struct v4l2_frmsizeenum *fsize)
+{
+ struct amvdec_session *sess = file_to_amvdec_session(file);
+ const struct amvdec_format *formats = sess->core->platform->formats;
+ const struct amvdec_format *fmt;
+ u32 num_formats = sess->core->platform->num_formats;
+
+ fmt = find_format(formats, num_formats, fsize->pixel_format);
+ if (!fmt || fsize->index)
+ return -EINVAL;
+
+ fsize->type = V4L2_FRMSIZE_TYPE_CONTINUOUS;
+
+ fsize->stepwise.min_width = 256;
+ fsize->stepwise.max_width = fmt->max_width;
+ fsize->stepwise.step_width = 1;
+ fsize->stepwise.min_height = 144;
+ fsize->stepwise.max_height = fmt->max_height;
+ fsize->stepwise.step_height = 1;
+
+ return 0;
+}
+
+static int
+vdec_decoder_cmd(struct file *file, void *fh, struct v4l2_decoder_cmd *cmd)
+{
+ struct amvdec_session *sess = file_to_amvdec_session(file);
+ struct amvdec_codec_ops *codec_ops = sess->fmt_out->codec_ops;
+ struct device *dev = sess->core->dev;
+ int ret;
+
+ ret = v4l2_m2m_ioctl_try_decoder_cmd(file, fh, cmd);
+ if (ret)
+ return ret;
+
+ if (!(sess->streamon_out & sess->streamon_cap))
+ return 0;
+
+ if (cmd->cmd == V4L2_DEC_CMD_START) {
+ v4l2_m2m_clear_state(sess->m2m_ctx);
+ sess->should_stop = 0;
+ return 0;
+ }
+
+ /* Should not happen */
+ if (cmd->cmd != V4L2_DEC_CMD_STOP)
+ return -EINVAL;
+
+ dev_dbg(dev, "Received V4L2_DEC_CMD_STOP\n");
+
+ sess->should_stop = 1;
+
+ v4l2_m2m_mark_stopped(sess->m2m_ctx);
+
+ if (codec_ops->drain) {
+ vdec_wait_inactive(sess);
+ codec_ops->drain(sess);
+ } else if (codec_ops->eos_sequence) {
+ u32 len;
+ const u8 *data = codec_ops->eos_sequence(&len);
+
+ esparser_queue_eos(sess->core, data, len);
+ vdec_wait_inactive(sess);
+ }
+
+ return ret;
+}
+
+static int vdec_subscribe_event(struct v4l2_fh *fh,
+ const struct v4l2_event_subscription *sub)
+{
+ switch (sub->type) {
+ case V4L2_EVENT_EOS:
+ case V4L2_EVENT_SOURCE_CHANGE:
+ return v4l2_event_subscribe(fh, sub, 0, NULL);
+ case V4L2_EVENT_CTRL:
+ return v4l2_ctrl_subscribe_event(fh, sub);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int vdec_g_pixelaspect(struct file *file, void *fh, int type,
+ struct v4l2_fract *f)
+{
+ struct amvdec_session *sess = file_to_amvdec_session(file);
+
+ if (type != V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
+ return -EINVAL;
+
+ *f = sess->pixelaspect;
+ return 0;
+}
+
+static const struct v4l2_ioctl_ops vdec_ioctl_ops = {
+ .vidioc_querycap = vdec_querycap,
+ .vidioc_enum_fmt_vid_cap = vdec_enum_fmt,
+ .vidioc_enum_fmt_vid_out = vdec_enum_fmt,
+ .vidioc_s_fmt_vid_cap_mplane = vdec_s_fmt,
+ .vidioc_s_fmt_vid_out_mplane = vdec_s_fmt,
+ .vidioc_g_fmt_vid_cap_mplane = vdec_g_fmt,
+ .vidioc_g_fmt_vid_out_mplane = vdec_g_fmt,
+ .vidioc_try_fmt_vid_cap_mplane = vdec_try_fmt,
+ .vidioc_try_fmt_vid_out_mplane = vdec_try_fmt,
+ .vidioc_reqbufs = v4l2_m2m_ioctl_reqbufs,
+ .vidioc_querybuf = v4l2_m2m_ioctl_querybuf,
+ .vidioc_prepare_buf = v4l2_m2m_ioctl_prepare_buf,
+ .vidioc_qbuf = v4l2_m2m_ioctl_qbuf,
+ .vidioc_expbuf = v4l2_m2m_ioctl_expbuf,
+ .vidioc_dqbuf = v4l2_m2m_ioctl_dqbuf,
+ .vidioc_create_bufs = v4l2_m2m_ioctl_create_bufs,
+ .vidioc_streamon = v4l2_m2m_ioctl_streamon,
+ .vidioc_streamoff = v4l2_m2m_ioctl_streamoff,
+ .vidioc_enum_framesizes = vdec_enum_framesizes,
+ .vidioc_subscribe_event = vdec_subscribe_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
+ .vidioc_try_decoder_cmd = v4l2_m2m_ioctl_try_decoder_cmd,
+ .vidioc_decoder_cmd = vdec_decoder_cmd,
+ .vidioc_g_pixelaspect = vdec_g_pixelaspect,
+};
+
+static int m2m_queue_init(void *priv, struct vb2_queue *src_vq,
+ struct vb2_queue *dst_vq)
+{
+ struct amvdec_session *sess = priv;
+ int ret;
+
+ src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
+ src_vq->io_modes = VB2_MMAP | VB2_DMABUF;
+ src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ src_vq->ops = &vdec_vb2_ops;
+ src_vq->mem_ops = &vb2_dma_contig_memops;
+ src_vq->drv_priv = sess;
+ src_vq->buf_struct_size = sizeof(struct dummy_buf);
+ src_vq->min_queued_buffers = 1;
+ src_vq->dev = sess->core->dev;
+ src_vq->lock = &sess->lock;
+ ret = vb2_queue_init(src_vq);
+ if (ret)
+ return ret;
+
+ dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+ dst_vq->io_modes = VB2_MMAP | VB2_DMABUF;
+ dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ dst_vq->ops = &vdec_vb2_ops;
+ dst_vq->mem_ops = &vb2_dma_contig_memops;
+ dst_vq->drv_priv = sess;
+ dst_vq->buf_struct_size = sizeof(struct dummy_buf);
+ dst_vq->min_queued_buffers = 1;
+ dst_vq->dev = sess->core->dev;
+ dst_vq->lock = &sess->lock;
+ return vb2_queue_init(dst_vq);
+}
+
+static int vdec_init_ctrls(struct amvdec_session *sess)
+{
+ struct v4l2_ctrl_handler *ctrl_handler = &sess->ctrl_handler;
+ int ret;
+
+ ret = v4l2_ctrl_handler_init(ctrl_handler, 1);
+ if (ret)
+ return ret;
+
+ sess->ctrl_min_buf_capture =
+ v4l2_ctrl_new_std(ctrl_handler, NULL,
+ V4L2_CID_MIN_BUFFERS_FOR_CAPTURE, 1, 32, 1,
+ 1);
+
+ ret = ctrl_handler->error;
+ if (ret) {
+ v4l2_ctrl_handler_free(ctrl_handler);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int vdec_open(struct file *file)
+{
+ struct amvdec_core *core = video_drvdata(file);
+ struct device *dev = core->dev;
+ const struct amvdec_format *formats = core->platform->formats;
+ struct amvdec_session *sess;
+ int ret;
+
+ sess = kzalloc(sizeof(*sess), GFP_KERNEL);
+ if (!sess)
+ return -ENOMEM;
+
+ sess->core = core;
+
+ sess->m2m_dev = v4l2_m2m_init(&vdec_m2m_ops);
+ if (IS_ERR(sess->m2m_dev)) {
+ dev_err(dev, "Fail to v4l2_m2m_init\n");
+ ret = PTR_ERR(sess->m2m_dev);
+ goto err_free_sess;
+ }
+
+ sess->m2m_ctx = v4l2_m2m_ctx_init(sess->m2m_dev, sess, m2m_queue_init);
+ if (IS_ERR(sess->m2m_ctx)) {
+ dev_err(dev, "Fail to v4l2_m2m_ctx_init\n");
+ ret = PTR_ERR(sess->m2m_ctx);
+ goto err_m2m_release;
+ }
+
+ ret = vdec_init_ctrls(sess);
+ if (ret)
+ goto err_m2m_release;
+
+ sess->pixfmt_cap = formats[0].pixfmts_cap[0];
+ sess->fmt_out = &formats[0];
+ sess->width = 1280;
+ sess->height = 720;
+ sess->pixelaspect.numerator = 1;
+ sess->pixelaspect.denominator = 1;
+ sess->src_buffer_size = SZ_1M;
+
+ INIT_LIST_HEAD(&sess->timestamps);
+ INIT_LIST_HEAD(&sess->bufs_recycle);
+ INIT_WORK(&sess->esparser_queue_work, esparser_queue_all_src);
+ mutex_init(&sess->lock);
+ mutex_init(&sess->bufs_recycle_lock);
+ spin_lock_init(&sess->ts_spinlock);
+
+ v4l2_fh_init(&sess->fh, core->vdev_dec);
+ sess->fh.ctrl_handler = &sess->ctrl_handler;
+ v4l2_fh_add(&sess->fh, file);
+ sess->fh.m2m_ctx = sess->m2m_ctx;
+
+ return 0;
+
+err_m2m_release:
+ v4l2_m2m_release(sess->m2m_dev);
+err_free_sess:
+ kfree(sess);
+ return ret;
+}
+
+static int vdec_close(struct file *file)
+{
+ struct amvdec_session *sess = file_to_amvdec_session(file);
+
+ v4l2_m2m_ctx_release(sess->m2m_ctx);
+ v4l2_m2m_release(sess->m2m_dev);
+ v4l2_fh_del(&sess->fh, file);
+ v4l2_fh_exit(&sess->fh);
+
+ mutex_destroy(&sess->lock);
+ mutex_destroy(&sess->bufs_recycle_lock);
+
+ kfree(sess);
+
+ return 0;
+}
+
+static const struct v4l2_file_operations vdec_fops = {
+ .owner = THIS_MODULE,
+ .open = vdec_open,
+ .release = vdec_close,
+ .unlocked_ioctl = video_ioctl2,
+ .poll = v4l2_m2m_fop_poll,
+ .mmap = v4l2_m2m_fop_mmap,
+};
+
+static irqreturn_t vdec_isr(int irq, void *data)
+{
+ struct amvdec_core *core = data;
+ struct amvdec_session *sess = core->cur_sess;
+
+ sess->last_irq_jiffies = get_jiffies_64();
+
+ return sess->fmt_out->codec_ops->isr(sess);
+}
+
+static irqreturn_t vdec_threaded_isr(int irq, void *data)
+{
+ struct amvdec_core *core = data;
+ struct amvdec_session *sess = core->cur_sess;
+
+ return sess->fmt_out->codec_ops->threaded_isr(sess);
+}
+
+static const struct of_device_id vdec_dt_match[] = {
+ { .compatible = "amlogic,gxbb-vdec",
+ .data = &vdec_platform_gxbb },
+ { .compatible = "amlogic,gxm-vdec",
+ .data = &vdec_platform_gxm },
+ { .compatible = "amlogic,gxl-vdec",
+ .data = &vdec_platform_gxl },
+ { .compatible = "amlogic,gxlx-vdec",
+ .data = &vdec_platform_gxlx },
+ { .compatible = "amlogic,g12a-vdec",
+ .data = &vdec_platform_g12a },
+ { .compatible = "amlogic,sm1-vdec",
+ .data = &vdec_platform_sm1 },
+ {}
+};
+MODULE_DEVICE_TABLE(of, vdec_dt_match);
+
+static int vdec_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct video_device *vdev;
+ struct amvdec_core *core;
+ const struct of_device_id *of_id;
+ int irq;
+ int ret;
+
+ core = devm_kzalloc(dev, sizeof(*core), GFP_KERNEL);
+ if (!core)
+ return -ENOMEM;
+
+ core->dev = dev;
+ platform_set_drvdata(pdev, core);
+
+ core->dos_base = devm_platform_ioremap_resource_byname(pdev, "dos");
+ if (IS_ERR(core->dos_base))
+ return PTR_ERR(core->dos_base);
+
+ core->esparser_base = devm_platform_ioremap_resource_byname(pdev, "esparser");
+ if (IS_ERR(core->esparser_base))
+ return PTR_ERR(core->esparser_base);
+
+ core->regmap_ao =
+ syscon_regmap_lookup_by_phandle(dev->of_node,
+ "amlogic,ao-sysctrl");
+ if (IS_ERR(core->regmap_ao)) {
+ dev_err(dev, "Couldn't regmap AO sysctrl\n");
+ return PTR_ERR(core->regmap_ao);
+ }
+
+ core->canvas = meson_canvas_get(dev);
+ if (IS_ERR(core->canvas))
+ return PTR_ERR(core->canvas);
+
+ of_id = of_match_node(vdec_dt_match, dev->of_node);
+ core->platform = of_id->data;
+
+ if (core->platform->revision == VDEC_REVISION_G12A ||
+ core->platform->revision == VDEC_REVISION_SM1) {
+ core->vdec_hevcf_clk = devm_clk_get(dev, "vdec_hevcf");
+ if (IS_ERR(core->vdec_hevcf_clk))
+ return -EPROBE_DEFER;
+ }
+
+ core->dos_parser_clk = devm_clk_get(dev, "dos_parser");
+ if (IS_ERR(core->dos_parser_clk))
+ return -EPROBE_DEFER;
+
+ core->dos_clk = devm_clk_get(dev, "dos");
+ if (IS_ERR(core->dos_clk))
+ return -EPROBE_DEFER;
+
+ core->vdec_1_clk = devm_clk_get(dev, "vdec_1");
+ if (IS_ERR(core->vdec_1_clk))
+ return -EPROBE_DEFER;
+
+ core->vdec_hevc_clk = devm_clk_get(dev, "vdec_hevc");
+ if (IS_ERR(core->vdec_hevc_clk))
+ return -EPROBE_DEFER;
+
+ irq = platform_get_irq_byname(pdev, "vdec");
+ if (irq < 0)
+ return irq;
+
+ ret = devm_request_threaded_irq(core->dev, irq, vdec_isr,
+ vdec_threaded_isr, IRQF_ONESHOT,
+ "vdec", core);
+ if (ret)
+ return ret;
+
+ ret = esparser_init(pdev, core);
+ if (ret)
+ return ret;
+
+ ret = v4l2_device_register(dev, &core->v4l2_dev);
+ if (ret) {
+ dev_err(dev, "Couldn't register v4l2 device\n");
+ return -ENOMEM;
+ }
+
+ vdev = video_device_alloc();
+ if (!vdev) {
+ ret = -ENOMEM;
+ goto err_vdev_release;
+ }
+
+ core->vdev_dec = vdev;
+ core->dev_dec = dev;
+ mutex_init(&core->lock);
+
+ strscpy(vdev->name, "meson-video-decoder", sizeof(vdev->name));
+ vdev->release = video_device_release;
+ vdev->fops = &vdec_fops;
+ vdev->ioctl_ops = &vdec_ioctl_ops;
+ vdev->vfl_dir = VFL_DIR_M2M;
+ vdev->v4l2_dev = &core->v4l2_dev;
+ vdev->lock = &core->lock;
+ vdev->device_caps = V4L2_CAP_VIDEO_M2M_MPLANE | V4L2_CAP_STREAMING;
+
+ video_set_drvdata(vdev, core);
+
+ ret = video_register_device(vdev, VFL_TYPE_VIDEO, -1);
+ if (ret) {
+ dev_err(dev, "Failed registering video device\n");
+ goto err_vdev_release;
+ }
+
+ return 0;
+
+err_vdev_release:
+ video_device_release(vdev);
+ v4l2_device_unregister(&core->v4l2_dev);
+ return ret;
+}
+
+static void vdec_remove(struct platform_device *pdev)
+{
+ struct amvdec_core *core = platform_get_drvdata(pdev);
+
+ video_unregister_device(core->vdev_dec);
+ v4l2_device_unregister(&core->v4l2_dev);
+}
+
+static struct platform_driver meson_vdec_driver = {
+ .probe = vdec_probe,
+ .remove = vdec_remove,
+ .driver = {
+ .name = "meson-vdec",
+ .of_match_table = vdec_dt_match,
+ },
+};
+module_platform_driver(meson_vdec_driver);
+
+MODULE_DESCRIPTION("Meson video decoder driver for GXBB/GXL/GXM/G12/SM1");
+MODULE_AUTHOR("Maxime Jourdan <mjourdan@baylibre.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/media/meson/vdec/vdec.h b/drivers/staging/media/meson/vdec/vdec.h
new file mode 100644
index 000000000000..7a5d8e871d70
--- /dev/null
+++ b/drivers/staging/media/meson/vdec/vdec.h
@@ -0,0 +1,292 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright (C) 2018 BayLibre, SAS
+ * Author: Maxime Jourdan <mjourdan@baylibre.com>
+ */
+
+#ifndef __MESON_VDEC_CORE_H_
+#define __MESON_VDEC_CORE_H_
+
+#include <linux/irqreturn.h>
+#include <linux/regmap.h>
+#include <linux/list.h>
+#include <media/videobuf2-v4l2.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <linux/soc/amlogic/meson-canvas.h>
+
+#include "vdec_platform.h"
+
+/* 32 buffers in 3-plane YUV420 */
+#define MAX_CANVAS (32 * 3)
+
+struct amvdec_buffer {
+ struct list_head list;
+ struct vb2_buffer *vb;
+};
+
+/**
+ * struct amvdec_timestamp - stores a src timestamp along with a VIFIFO offset
+ *
+ * @list: used to make lists out of this struct
+ * @tc: timecode from the v4l2 buffer
+ * @ts: timestamp from the VB2 buffer
+ * @offset: offset in the VIFIFO where the associated packet was written
+ * @flags: flags from the v4l2 buffer
+ * @used_count: times this timestamp was checked for a match with a dst buffer
+ */
+struct amvdec_timestamp {
+ struct list_head list;
+ struct v4l2_timecode tc;
+ u64 ts;
+ u32 offset;
+ u32 flags;
+ u32 used_count;
+};
+
+struct amvdec_session;
+
+/**
+ * struct amvdec_core - device parameters, singleton
+ *
+ * @dos_base: DOS memory base address
+ * @esparser_base: PARSER memory base address
+ * @regmap_ao: regmap for the AO bus
+ * @dev: core device
+ * @dev_dec: decoder device
+ * @platform: platform-specific data
+ * @canvas: canvas provider reference
+ * @dos_parser_clk: DOS_PARSER clock
+ * @dos_clk: DOS clock
+ * @vdec_1_clk: VDEC_1 clock
+ * @vdec_hevc_clk: VDEC_HEVC clock
+ * @vdec_hevcf_clk: VDEC_HEVCF clock
+ * @esparser_reset: RESET for the PARSER
+ * @vdev_dec: video device for the decoder
+ * @v4l2_dev: v4l2 device
+ * @cur_sess: current decoding session
+ * @lock: video device lock
+ */
+struct amvdec_core {
+ void __iomem *dos_base;
+ void __iomem *esparser_base;
+ struct regmap *regmap_ao;
+
+ struct device *dev;
+ struct device *dev_dec;
+ const struct vdec_platform *platform;
+
+ struct meson_canvas *canvas;
+
+ struct clk *dos_parser_clk;
+ struct clk *dos_clk;
+ struct clk *vdec_1_clk;
+ struct clk *vdec_hevc_clk;
+ struct clk *vdec_hevcf_clk;
+
+ struct reset_control *esparser_reset;
+
+ struct video_device *vdev_dec;
+ struct v4l2_device v4l2_dev;
+
+ struct amvdec_session *cur_sess;
+ struct mutex lock;
+};
+
+/**
+ * struct amvdec_ops - vdec operations
+ *
+ * @start: mandatory call when the vdec needs to initialize
+ * @stop: mandatory call when the vdec needs to stop
+ * @conf_esparser: mandatory call to let the vdec configure the ESPARSER
+ * @vififo_level: mandatory call to get the current amount of data
+ * in the VIFIFO
+ */
+struct amvdec_ops {
+ int (*start)(struct amvdec_session *sess);
+ int (*stop)(struct amvdec_session *sess);
+ void (*conf_esparser)(struct amvdec_session *sess);
+ u32 (*vififo_level)(struct amvdec_session *sess);
+};
+
+/**
+ * struct amvdec_codec_ops - codec operations
+ *
+ * @start: mandatory call when the codec needs to initialize
+ * @stop: mandatory call when the codec needs to stop
+ * @load_extended_firmware: optional call to load additional firmware bits
+ * @num_pending_bufs: optional call to get the number of dst buffers on hold
+ * @can_recycle: optional call to know if the codec is ready to recycle
+ * a dst buffer
+ * @recycle: optional call to tell the codec to recycle a dst buffer. Must go
+ * in pair with @can_recycle
+ * @drain: optional call if the codec has a custom way of draining
+ * @resume: optional call to resume after a resolution change
+ * @eos_sequence: optional call to get an end sequence to send to esparser
+ * for flush. Mutually exclusive with @drain.
+ * @isr: mandatory call when the ISR triggers
+ * @threaded_isr: mandatory call for the threaded ISR
+ */
+struct amvdec_codec_ops {
+ int (*start)(struct amvdec_session *sess);
+ int (*stop)(struct amvdec_session *sess);
+ int (*load_extended_firmware)(struct amvdec_session *sess,
+ const u8 *data, u32 len);
+ u32 (*num_pending_bufs)(struct amvdec_session *sess);
+ int (*can_recycle)(struct amvdec_core *core);
+ void (*recycle)(struct amvdec_core *core, u32 buf_idx);
+ void (*drain)(struct amvdec_session *sess);
+ void (*resume)(struct amvdec_session *sess);
+ const u8 * (*eos_sequence)(u32 *len);
+ irqreturn_t (*isr)(struct amvdec_session *sess);
+ irqreturn_t (*threaded_isr)(struct amvdec_session *sess);
+};
+
+/**
+ * struct amvdec_format - describes one of the OUTPUT (src) format supported
+ *
+ * @pixfmt: V4L2 pixel format
+ * @min_buffers: minimum amount of CAPTURE (dst) buffers
+ * @max_buffers: maximum amount of CAPTURE (dst) buffers
+ * @max_width: maximum picture width supported
+ * @max_height: maximum picture height supported
+ * @flags: enum flags associated with this pixfmt
+ * @vdec_ops: the VDEC operations that support this format
+ * @codec_ops: the codec operations that support this format
+ * @firmware_path: Path to the firmware that supports this format
+ * @pixfmts_cap: list of CAPTURE pixel formats available with pixfmt
+ */
+struct amvdec_format {
+ u32 pixfmt;
+ u32 min_buffers;
+ u32 max_buffers;
+ u32 max_width;
+ u32 max_height;
+ u32 flags;
+
+ struct amvdec_ops *vdec_ops;
+ struct amvdec_codec_ops *codec_ops;
+
+ char *firmware_path;
+ u32 pixfmts_cap[4];
+};
+
+enum amvdec_status {
+ STATUS_STOPPED,
+ STATUS_INIT,
+ STATUS_RUNNING,
+ STATUS_NEEDS_RESUME,
+};
+
+/**
+ * struct amvdec_session - decoding session parameters
+ *
+ * @core: reference to the vdec core struct
+ * @fh: v4l2 file handle
+ * @m2m_dev: v4l2 m2m device
+ * @m2m_ctx: v4l2 m2m context
+ * @ctrl_handler: V4L2 control handler
+ * @ctrl_min_buf_capture: V4L2 control V4L2_CID_MIN_BUFFERS_FOR_CAPTURE
+ * @lock: cap & out queues lock
+ * @fmt_out: vdec pixel format for the OUTPUT queue
+ * @pixfmt_cap: V4L2 pixel format for the CAPTURE queue
+ * @src_buffer_size: size in bytes of the OUTPUT buffers' only plane
+ * @width: current picture width
+ * @height: current picture height
+ * @colorspace: current colorspace
+ * @ycbcr_enc: current ycbcr_enc
+ * @quantization: current quantization
+ * @xfer_func: current transfer function
+ * @pixelaspect: Pixel Aspect Ratio reported by the decoder
+ * @esparser_queued_bufs: number of buffers currently queued into ESPARSER
+ * @esparser_queue_work: work struct for the ESPARSER to process src buffers
+ * @streamon_cap: stream on flag for capture queue
+ * @streamon_out: stream on flag for output queue
+ * @sequence_cap: capture sequence counter
+ * @sequence_out: output sequence counter
+ * @should_stop: flag set if userspace signaled EOS via command
+ * or empty buffer
+ * @keyframe_found: flag set once a keyframe has been parsed
+ * @num_dst_bufs: number of destination buffers
+ * @changed_format: the format changed
+ * @canvas_alloc: array of all the canvas IDs allocated
+ * @canvas_num: number of canvas IDs allocated
+ * @vififo_vaddr: virtual address for the VIFIFO
+ * @vififo_paddr: physical address for the VIFIFO
+ * @vififo_size: size of the VIFIFO dma alloc
+ * @bufs_recycle: list of buffers that need to be recycled
+ * @bufs_recycle_lock: lock for the bufs_recycle list
+ * @recycle_thread: task struct for the recycling thread
+ * @timestamps: chronological list of src timestamps
+ * @ts_spinlock: spinlock for the timestamps list
+ * @last_irq_jiffies: tracks last time the vdec triggered an IRQ
+ * @last_offset: tracks last offset of vififo
+ * @wrap_count: number of times the vififo wrapped around
+ * @fw_idx_to_vb2_idx: firmware buffer index to vb2 buffer index
+ * @status: current decoding status
+ * @priv: codec private data
+ */
+struct amvdec_session {
+ struct amvdec_core *core;
+
+ struct v4l2_fh fh;
+ struct v4l2_m2m_dev *m2m_dev;
+ struct v4l2_m2m_ctx *m2m_ctx;
+ struct v4l2_ctrl_handler ctrl_handler;
+ struct v4l2_ctrl *ctrl_min_buf_capture;
+ struct mutex lock;
+
+ const struct amvdec_format *fmt_out;
+ u32 pixfmt_cap;
+ u32 src_buffer_size;
+
+ u32 width;
+ u32 height;
+ u32 colorspace;
+ u8 ycbcr_enc;
+ u8 quantization;
+ u8 xfer_func;
+
+ struct v4l2_fract pixelaspect;
+
+ atomic_t esparser_queued_bufs;
+ struct work_struct esparser_queue_work;
+
+ unsigned int streamon_cap, streamon_out;
+ unsigned int sequence_cap, sequence_out;
+ unsigned int should_stop;
+ unsigned int keyframe_found;
+ unsigned int num_dst_bufs;
+ unsigned int changed_format;
+
+ u8 canvas_alloc[MAX_CANVAS];
+ u32 canvas_num;
+
+ void *vififo_vaddr;
+ dma_addr_t vififo_paddr;
+ u32 vififo_size;
+
+ struct list_head bufs_recycle;
+ struct mutex bufs_recycle_lock; /* bufs_recycle list lock */
+ struct task_struct *recycle_thread;
+
+ struct list_head timestamps;
+ spinlock_t ts_spinlock; /* timestamp list lock */
+
+ u64 last_irq_jiffies;
+ u32 last_offset;
+ u32 wrap_count;
+ u32 fw_idx_to_vb2_idx[32];
+
+ enum amvdec_status status;
+ void *priv;
+};
+
+static inline struct amvdec_session *file_to_amvdec_session(struct file *filp)
+{
+ return container_of(file_to_v4l2_fh(filp), struct amvdec_session, fh);
+}
+
+u32 amvdec_get_output_size(struct amvdec_session *sess);
+
+#endif
diff --git a/drivers/staging/media/meson/vdec/vdec_1.c b/drivers/staging/media/meson/vdec/vdec_1.c
new file mode 100644
index 000000000000..a65cb4959446
--- /dev/null
+++ b/drivers/staging/media/meson/vdec/vdec_1.c
@@ -0,0 +1,255 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2018 BayLibre, SAS
+ * Author: Maxime Jourdan <mjourdan@baylibre.com>
+ *
+ * VDEC_1 is a video decoding block that allows decoding of
+ * MPEG 1/2/4, H.263, H.264, MJPEG, VC1
+ */
+
+#include <linux/firmware.h>
+#include <linux/clk.h>
+
+#include "vdec_1.h"
+#include "vdec_helpers.h"
+#include "dos_regs.h"
+
+/* AO Registers */
+#define AO_RTI_GEN_PWR_SLEEP0 0xe8
+#define AO_RTI_GEN_PWR_ISO0 0xec
+ #define GEN_PWR_VDEC_1 (BIT(3) | BIT(2))
+ #define GEN_PWR_VDEC_1_SM1 (BIT(1))
+
+#define MC_SIZE (4096 * 4)
+
+static int
+vdec_1_load_firmware(struct amvdec_session *sess, const char *fwname)
+{
+ const struct firmware *fw;
+ struct amvdec_core *core = sess->core;
+ struct device *dev = core->dev_dec;
+ struct amvdec_codec_ops *codec_ops = sess->fmt_out->codec_ops;
+ static void *mc_addr;
+ static dma_addr_t mc_addr_map;
+ int ret;
+ u32 i = 1000;
+
+ ret = request_firmware(&fw, fwname, dev);
+ if (ret < 0)
+ return -EINVAL;
+
+ if (fw->size < MC_SIZE) {
+ dev_err(dev, "Firmware size %zu is too small. Expected %u.\n",
+ fw->size, MC_SIZE);
+ ret = -EINVAL;
+ goto release_firmware;
+ }
+
+ mc_addr = dma_alloc_coherent(core->dev, MC_SIZE,
+ &mc_addr_map, GFP_KERNEL);
+ if (!mc_addr) {
+ ret = -ENOMEM;
+ goto release_firmware;
+ }
+
+ memcpy(mc_addr, fw->data, MC_SIZE);
+
+ amvdec_write_dos(core, MPSR, 0);
+ amvdec_write_dos(core, CPSR, 0);
+
+ amvdec_clear_dos_bits(core, MDEC_PIC_DC_CTRL, BIT(31));
+
+ amvdec_write_dos(core, IMEM_DMA_ADR, mc_addr_map);
+ amvdec_write_dos(core, IMEM_DMA_COUNT, MC_SIZE / 4);
+ amvdec_write_dos(core, IMEM_DMA_CTRL, (0x8000 | (7 << 16)));
+
+ while (--i && amvdec_read_dos(core, IMEM_DMA_CTRL) & 0x8000);
+
+ if (i == 0) {
+ dev_err(dev, "Firmware load fail (DMA hang?)\n");
+ ret = -EINVAL;
+ goto free_mc;
+ }
+
+ if (codec_ops->load_extended_firmware)
+ ret = codec_ops->load_extended_firmware(sess,
+ fw->data + MC_SIZE,
+ fw->size - MC_SIZE);
+
+free_mc:
+ dma_free_coherent(core->dev, MC_SIZE, mc_addr, mc_addr_map);
+release_firmware:
+ release_firmware(fw);
+ return ret;
+}
+
+static int vdec_1_stbuf_power_up(struct amvdec_session *sess)
+{
+ struct amvdec_core *core = sess->core;
+
+ amvdec_write_dos(core, VLD_MEM_VIFIFO_CONTROL, 0);
+ amvdec_write_dos(core, VLD_MEM_VIFIFO_WRAP_COUNT, 0);
+ amvdec_write_dos(core, POWER_CTL_VLD, BIT(4));
+
+ amvdec_write_dos(core, VLD_MEM_VIFIFO_START_PTR, sess->vififo_paddr);
+ amvdec_write_dos(core, VLD_MEM_VIFIFO_CURR_PTR, sess->vififo_paddr);
+ amvdec_write_dos(core, VLD_MEM_VIFIFO_END_PTR,
+ sess->vififo_paddr + sess->vififo_size - 8);
+
+ amvdec_write_dos_bits(core, VLD_MEM_VIFIFO_CONTROL, 1);
+ amvdec_clear_dos_bits(core, VLD_MEM_VIFIFO_CONTROL, 1);
+
+ amvdec_write_dos(core, VLD_MEM_VIFIFO_BUF_CNTL, MEM_BUFCTRL_MANUAL);
+ amvdec_write_dos(core, VLD_MEM_VIFIFO_WP, sess->vififo_paddr);
+
+ amvdec_write_dos_bits(core, VLD_MEM_VIFIFO_BUF_CNTL, 1);
+ amvdec_clear_dos_bits(core, VLD_MEM_VIFIFO_BUF_CNTL, 1);
+
+ amvdec_write_dos_bits(core, VLD_MEM_VIFIFO_CONTROL,
+ (0x11 << MEM_FIFO_CNT_BIT) | MEM_FILL_ON_LEVEL |
+ MEM_CTRL_FILL_EN | MEM_CTRL_EMPTY_EN);
+
+ return 0;
+}
+
+static void vdec_1_conf_esparser(struct amvdec_session *sess)
+{
+ struct amvdec_core *core = sess->core;
+
+ /* VDEC_1 specific ESPARSER stuff */
+ amvdec_write_dos(core, DOS_GEN_CTRL0, 0);
+ amvdec_write_dos(core, VLD_MEM_VIFIFO_BUF_CNTL, 1);
+ amvdec_clear_dos_bits(core, VLD_MEM_VIFIFO_BUF_CNTL, 1);
+}
+
+static u32 vdec_1_vififo_level(struct amvdec_session *sess)
+{
+ struct amvdec_core *core = sess->core;
+
+ return amvdec_read_dos(core, VLD_MEM_VIFIFO_LEVEL);
+}
+
+static void __vdec_1_stop(struct amvdec_session *sess)
+{
+ struct amvdec_core *core = sess->core;
+ struct amvdec_codec_ops *codec_ops = sess->fmt_out->codec_ops;
+
+ amvdec_write_dos(core, MPSR, 0);
+ amvdec_write_dos(core, CPSR, 0);
+ amvdec_write_dos(core, ASSIST_MBOX1_MASK, 0);
+
+ amvdec_write_dos(core, DOS_SW_RESET0, BIT(12) | BIT(11));
+ amvdec_write_dos(core, DOS_SW_RESET0, 0);
+ amvdec_read_dos(core, DOS_SW_RESET0);
+
+ /* enable vdec1 isolation */
+ if (core->platform->revision == VDEC_REVISION_SM1)
+ regmap_update_bits(core->regmap_ao, AO_RTI_GEN_PWR_ISO0,
+ GEN_PWR_VDEC_1_SM1, GEN_PWR_VDEC_1_SM1);
+ else
+ regmap_write(core->regmap_ao, AO_RTI_GEN_PWR_ISO0, 0xc0);
+ /* power off vdec1 memories */
+ amvdec_write_dos(core, DOS_MEM_PD_VDEC, 0xffffffff);
+ /* power off vdec1 */
+ if (core->platform->revision == VDEC_REVISION_SM1)
+ regmap_update_bits(core->regmap_ao, AO_RTI_GEN_PWR_SLEEP0,
+ GEN_PWR_VDEC_1_SM1, GEN_PWR_VDEC_1_SM1);
+ else
+ regmap_update_bits(core->regmap_ao, AO_RTI_GEN_PWR_SLEEP0,
+ GEN_PWR_VDEC_1, GEN_PWR_VDEC_1);
+
+ if (sess->priv)
+ codec_ops->stop(sess);
+}
+
+static int vdec_1_stop(struct amvdec_session *sess)
+{
+ struct amvdec_core *core = sess->core;
+
+ __vdec_1_stop(sess);
+
+ clk_disable_unprepare(core->vdec_1_clk);
+
+ return 0;
+}
+
+static int vdec_1_start(struct amvdec_session *sess)
+{
+ int ret;
+ struct amvdec_core *core = sess->core;
+ struct amvdec_codec_ops *codec_ops = sess->fmt_out->codec_ops;
+
+ /* Configure the vdec clk to the maximum available */
+ clk_set_rate(core->vdec_1_clk, 666666666);
+ ret = clk_prepare_enable(core->vdec_1_clk);
+ if (ret)
+ return ret;
+
+ /* Enable power for VDEC_1 */
+ if (core->platform->revision == VDEC_REVISION_SM1)
+ regmap_update_bits(core->regmap_ao, AO_RTI_GEN_PWR_SLEEP0,
+ GEN_PWR_VDEC_1_SM1, 0);
+ else
+ regmap_update_bits(core->regmap_ao, AO_RTI_GEN_PWR_SLEEP0,
+ GEN_PWR_VDEC_1, 0);
+ usleep_range(10, 20);
+
+ /* Reset VDEC1 */
+ amvdec_write_dos(core, DOS_SW_RESET0, 0xfffffffc);
+ amvdec_write_dos(core, DOS_SW_RESET0, 0x00000000);
+
+ amvdec_write_dos(core, DOS_GCLK_EN0, 0x3ff);
+
+ /* enable VDEC Memories */
+ amvdec_write_dos(core, DOS_MEM_PD_VDEC, 0);
+ /* Remove VDEC1 Isolation */
+ if (core->platform->revision == VDEC_REVISION_SM1)
+ regmap_update_bits(core->regmap_ao, AO_RTI_GEN_PWR_ISO0,
+ GEN_PWR_VDEC_1_SM1, 0);
+ else
+ regmap_write(core->regmap_ao, AO_RTI_GEN_PWR_ISO0, 0);
+ /* Reset DOS top registers */
+ amvdec_write_dos(core, DOS_VDEC_MCRCC_STALL_CTRL, 0);
+
+ amvdec_write_dos(core, GCLK_EN, 0x3ff);
+ amvdec_clear_dos_bits(core, MDEC_PIC_DC_CTRL, BIT(31));
+
+ vdec_1_stbuf_power_up(sess);
+
+ ret = vdec_1_load_firmware(sess, sess->fmt_out->firmware_path);
+ if (ret)
+ goto stop;
+
+ ret = codec_ops->start(sess);
+ if (ret)
+ goto stop;
+
+ /* Enable IRQ */
+ amvdec_write_dos(core, ASSIST_MBOX1_CLR_REG, 1);
+ amvdec_write_dos(core, ASSIST_MBOX1_MASK, 1);
+
+ /* Enable 2-plane output */
+ if (sess->pixfmt_cap == V4L2_PIX_FMT_NV12M)
+ amvdec_write_dos_bits(core, MDEC_PIC_DC_CTRL, BIT(17));
+ else
+ amvdec_clear_dos_bits(core, MDEC_PIC_DC_CTRL, BIT(17));
+
+ /* Enable firmware processor */
+ amvdec_write_dos(core, MPSR, 1);
+ /* Let the firmware settle */
+ usleep_range(10, 20);
+
+ return 0;
+
+stop:
+ __vdec_1_stop(sess);
+ clk_disable_unprepare(core->vdec_1_clk);
+ return ret;
+}
+
+struct amvdec_ops vdec_1_ops = {
+ .start = vdec_1_start,
+ .stop = vdec_1_stop,
+ .conf_esparser = vdec_1_conf_esparser,
+ .vififo_level = vdec_1_vififo_level,
+};
diff --git a/drivers/staging/media/meson/vdec/vdec_1.h b/drivers/staging/media/meson/vdec/vdec_1.h
new file mode 100644
index 000000000000..042d930c40d7
--- /dev/null
+++ b/drivers/staging/media/meson/vdec/vdec_1.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright (C) 2018 BayLibre, SAS
+ * Author: Maxime Jourdan <mjourdan@baylibre.com>
+ */
+
+#ifndef __MESON_VDEC_VDEC_1_H_
+#define __MESON_VDEC_VDEC_1_H_
+
+#include "vdec.h"
+
+extern struct amvdec_ops vdec_1_ops;
+
+#endif
diff --git a/drivers/staging/media/meson/vdec/vdec_helpers.c b/drivers/staging/media/meson/vdec/vdec_helpers.c
new file mode 100644
index 000000000000..7d2a75653250
--- /dev/null
+++ b/drivers/staging/media/meson/vdec/vdec_helpers.c
@@ -0,0 +1,480 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2018 BayLibre, SAS
+ * Author: Maxime Jourdan <mjourdan@baylibre.com>
+ */
+
+#include <linux/gcd.h>
+#include <media/v4l2-mem2mem.h>
+#include <media/v4l2-event.h>
+#include <media/videobuf2-dma-contig.h>
+
+#include "vdec_helpers.h"
+
+#define NUM_CANVAS_NV12 2
+#define NUM_CANVAS_YUV420 3
+
+u32 amvdec_read_dos(struct amvdec_core *core, u32 reg)
+{
+ return readl_relaxed(core->dos_base + reg);
+}
+EXPORT_SYMBOL_GPL(amvdec_read_dos);
+
+void amvdec_write_dos(struct amvdec_core *core, u32 reg, u32 val)
+{
+ writel_relaxed(val, core->dos_base + reg);
+}
+EXPORT_SYMBOL_GPL(amvdec_write_dos);
+
+void amvdec_write_dos_bits(struct amvdec_core *core, u32 reg, u32 val)
+{
+ amvdec_write_dos(core, reg, amvdec_read_dos(core, reg) | val);
+}
+EXPORT_SYMBOL_GPL(amvdec_write_dos_bits);
+
+void amvdec_clear_dos_bits(struct amvdec_core *core, u32 reg, u32 val)
+{
+ amvdec_write_dos(core, reg, amvdec_read_dos(core, reg) & ~val);
+}
+EXPORT_SYMBOL_GPL(amvdec_clear_dos_bits);
+
+u32 amvdec_read_parser(struct amvdec_core *core, u32 reg)
+{
+ return readl_relaxed(core->esparser_base + reg);
+}
+EXPORT_SYMBOL_GPL(amvdec_read_parser);
+
+void amvdec_write_parser(struct amvdec_core *core, u32 reg, u32 val)
+{
+ writel_relaxed(val, core->esparser_base + reg);
+}
+EXPORT_SYMBOL_GPL(amvdec_write_parser);
+
+/* 4 KiB per 64x32 block */
+u32 amvdec_am21c_body_size(u32 width, u32 height)
+{
+ u32 width_64 = ALIGN(width, 64) / 64;
+ u32 height_32 = ALIGN(height, 32) / 32;
+
+ return SZ_4K * width_64 * height_32;
+}
+EXPORT_SYMBOL_GPL(amvdec_am21c_body_size);
+
+/* 32 bytes per 128x64 block */
+u32 amvdec_am21c_head_size(u32 width, u32 height)
+{
+ u32 width_128 = ALIGN(width, 128) / 128;
+ u32 height_64 = ALIGN(height, 64) / 64;
+
+ return 32 * width_128 * height_64;
+}
+EXPORT_SYMBOL_GPL(amvdec_am21c_head_size);
+
+u32 amvdec_am21c_size(u32 width, u32 height)
+{
+ return ALIGN(amvdec_am21c_body_size(width, height) +
+ amvdec_am21c_head_size(width, height), SZ_64K);
+}
+EXPORT_SYMBOL_GPL(amvdec_am21c_size);
+
+static int canvas_alloc(struct amvdec_session *sess, u8 *canvas_id)
+{
+ int ret;
+
+ if (sess->canvas_num >= MAX_CANVAS) {
+ dev_err(sess->core->dev, "Reached max number of canvas\n");
+ return -ENOMEM;
+ }
+
+ ret = meson_canvas_alloc(sess->core->canvas, canvas_id);
+ if (ret)
+ return ret;
+
+ sess->canvas_alloc[sess->canvas_num++] = *canvas_id;
+ return 0;
+}
+
+static int set_canvas_yuv420m(struct amvdec_session *sess,
+ struct vb2_buffer *vb, u32 width,
+ u32 height, u32 reg)
+{
+ struct amvdec_core *core = sess->core;
+ u8 canvas_id[NUM_CANVAS_YUV420]; /* Y U V */
+ dma_addr_t buf_paddr[NUM_CANVAS_YUV420]; /* Y U V */
+ int ret, i;
+
+ for (i = 0; i < NUM_CANVAS_YUV420; ++i) {
+ ret = canvas_alloc(sess, &canvas_id[i]);
+ if (ret)
+ return ret;
+
+ buf_paddr[i] =
+ vb2_dma_contig_plane_dma_addr(vb, i);
+ }
+
+ /* Y plane */
+ meson_canvas_config(core->canvas, canvas_id[0], buf_paddr[0],
+ width, height, MESON_CANVAS_WRAP_NONE,
+ MESON_CANVAS_BLKMODE_LINEAR,
+ MESON_CANVAS_ENDIAN_SWAP64);
+
+ /* U plane */
+ meson_canvas_config(core->canvas, canvas_id[1], buf_paddr[1],
+ width / 2, height / 2, MESON_CANVAS_WRAP_NONE,
+ MESON_CANVAS_BLKMODE_LINEAR,
+ MESON_CANVAS_ENDIAN_SWAP64);
+
+ /* V plane */
+ meson_canvas_config(core->canvas, canvas_id[2], buf_paddr[2],
+ width / 2, height / 2, MESON_CANVAS_WRAP_NONE,
+ MESON_CANVAS_BLKMODE_LINEAR,
+ MESON_CANVAS_ENDIAN_SWAP64);
+
+ amvdec_write_dos(core, reg,
+ ((canvas_id[2]) << 16) |
+ ((canvas_id[1]) << 8) |
+ (canvas_id[0]));
+
+ return 0;
+}
+
+static int set_canvas_nv12m(struct amvdec_session *sess,
+ struct vb2_buffer *vb, u32 width,
+ u32 height, u32 reg)
+{
+ struct amvdec_core *core = sess->core;
+ u8 canvas_id[NUM_CANVAS_NV12]; /* Y U/V */
+ dma_addr_t buf_paddr[NUM_CANVAS_NV12]; /* Y U/V */
+ int ret, i;
+
+ for (i = 0; i < NUM_CANVAS_NV12; ++i) {
+ ret = canvas_alloc(sess, &canvas_id[i]);
+ if (ret)
+ return ret;
+
+ buf_paddr[i] =
+ vb2_dma_contig_plane_dma_addr(vb, i);
+ }
+
+ /* Y plane */
+ meson_canvas_config(core->canvas, canvas_id[0], buf_paddr[0],
+ width, height, MESON_CANVAS_WRAP_NONE,
+ MESON_CANVAS_BLKMODE_LINEAR,
+ MESON_CANVAS_ENDIAN_SWAP64);
+
+ /* U/V plane */
+ meson_canvas_config(core->canvas, canvas_id[1], buf_paddr[1],
+ width, height / 2, MESON_CANVAS_WRAP_NONE,
+ MESON_CANVAS_BLKMODE_LINEAR,
+ MESON_CANVAS_ENDIAN_SWAP64);
+
+ amvdec_write_dos(core, reg,
+ ((canvas_id[1]) << 16) |
+ ((canvas_id[1]) << 8) |
+ (canvas_id[0]));
+
+ return 0;
+}
+
+int amvdec_set_canvases(struct amvdec_session *sess,
+ u32 reg_base[], u32 reg_num[])
+{
+ struct v4l2_m2m_buffer *buf;
+ u32 pixfmt = sess->pixfmt_cap;
+ u32 width = ALIGN(sess->width, 32);
+ u32 height = ALIGN(sess->height, 32);
+ u32 reg_cur;
+ u32 reg_num_cur = 0;
+ u32 reg_base_cur = 0;
+ int i = 0;
+ int ret;
+
+ v4l2_m2m_for_each_dst_buf(sess->m2m_ctx, buf) {
+ if (!reg_base[reg_base_cur])
+ return -EINVAL;
+
+ reg_cur = reg_base[reg_base_cur] + reg_num_cur * 4;
+
+ switch (pixfmt) {
+ case V4L2_PIX_FMT_NV12M:
+ ret = set_canvas_nv12m(sess, &buf->vb.vb2_buf, width,
+ height, reg_cur);
+ if (ret)
+ return ret;
+ break;
+ case V4L2_PIX_FMT_YUV420M:
+ ret = set_canvas_yuv420m(sess, &buf->vb.vb2_buf, width,
+ height, reg_cur);
+ if (ret)
+ return ret;
+ break;
+ default:
+ dev_err(sess->core->dev, "Unsupported pixfmt %08X\n",
+ pixfmt);
+ return -EINVAL;
+ }
+
+ reg_num_cur++;
+ if (reg_num_cur >= reg_num[reg_base_cur]) {
+ reg_base_cur++;
+ reg_num_cur = 0;
+ }
+
+ sess->fw_idx_to_vb2_idx[i++] = buf->vb.vb2_buf.index;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(amvdec_set_canvases);
+
+int amvdec_add_ts(struct amvdec_session *sess, u64 ts,
+ struct v4l2_timecode tc, u32 offset, u32 vbuf_flags)
+{
+ struct amvdec_timestamp *new_ts;
+ unsigned long flags;
+
+ new_ts = kzalloc(sizeof(*new_ts), GFP_KERNEL);
+ if (!new_ts)
+ return -ENOMEM;
+
+ new_ts->ts = ts;
+ new_ts->tc = tc;
+ new_ts->offset = offset;
+ new_ts->flags = vbuf_flags;
+
+ spin_lock_irqsave(&sess->ts_spinlock, flags);
+ list_add_tail(&new_ts->list, &sess->timestamps);
+ spin_unlock_irqrestore(&sess->ts_spinlock, flags);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(amvdec_add_ts);
+
+void amvdec_remove_ts(struct amvdec_session *sess, u64 ts)
+{
+ struct amvdec_timestamp *tmp;
+ unsigned long flags;
+
+ spin_lock_irqsave(&sess->ts_spinlock, flags);
+ list_for_each_entry(tmp, &sess->timestamps, list) {
+ if (tmp->ts == ts) {
+ list_del(&tmp->list);
+ kfree(tmp);
+ goto unlock;
+ }
+ }
+ dev_warn(sess->core->dev_dec,
+ "Couldn't remove buffer with timestamp %llu from list\n", ts);
+
+unlock:
+ spin_unlock_irqrestore(&sess->ts_spinlock, flags);
+}
+EXPORT_SYMBOL_GPL(amvdec_remove_ts);
+
+static void dst_buf_done(struct amvdec_session *sess,
+ struct vb2_v4l2_buffer *vbuf,
+ u32 field, u64 timestamp,
+ struct v4l2_timecode timecode, u32 flags)
+{
+ struct device *dev = sess->core->dev_dec;
+ u32 output_size = amvdec_get_output_size(sess);
+
+ switch (sess->pixfmt_cap) {
+ case V4L2_PIX_FMT_NV12M:
+ vb2_set_plane_payload(&vbuf->vb2_buf, 0, output_size);
+ vb2_set_plane_payload(&vbuf->vb2_buf, 1, output_size / 2);
+ break;
+ case V4L2_PIX_FMT_YUV420M:
+ vb2_set_plane_payload(&vbuf->vb2_buf, 0, output_size);
+ vb2_set_plane_payload(&vbuf->vb2_buf, 1, output_size / 4);
+ vb2_set_plane_payload(&vbuf->vb2_buf, 2, output_size / 4);
+ break;
+ }
+
+ vbuf->vb2_buf.timestamp = timestamp;
+ vbuf->sequence = sess->sequence_cap++;
+ vbuf->flags = flags;
+ vbuf->timecode = timecode;
+
+ if (sess->should_stop &&
+ atomic_read(&sess->esparser_queued_bufs) <= 1) {
+ const struct v4l2_event ev = { .type = V4L2_EVENT_EOS };
+
+ dev_dbg(dev, "Signaling EOS, sequence_cap = %u\n",
+ sess->sequence_cap - 1);
+ v4l2_event_queue_fh(&sess->fh, &ev);
+ vbuf->flags |= V4L2_BUF_FLAG_LAST;
+ } else if (sess->status == STATUS_NEEDS_RESUME) {
+ /* Mark LAST for drained show frames during a source change */
+ vbuf->flags |= V4L2_BUF_FLAG_LAST;
+ sess->sequence_cap = 0;
+ } else if (sess->should_stop)
+ dev_dbg(dev, "should_stop, %u bufs remain\n",
+ atomic_read(&sess->esparser_queued_bufs));
+
+ dev_dbg(dev, "Buffer %u done, ts = %llu, flags = %08X\n",
+ vbuf->vb2_buf.index, timestamp, flags);
+ vbuf->field = field;
+ v4l2_m2m_buf_done(vbuf, VB2_BUF_STATE_DONE);
+
+ /* Buffer done probably means the vififo got freed */
+ schedule_work(&sess->esparser_queue_work);
+}
+
+void amvdec_dst_buf_done(struct amvdec_session *sess,
+ struct vb2_v4l2_buffer *vbuf, u32 field)
+{
+ struct device *dev = sess->core->dev_dec;
+ struct amvdec_timestamp *tmp;
+ struct list_head *timestamps = &sess->timestamps;
+ struct v4l2_timecode timecode;
+ u64 timestamp;
+ u32 vbuf_flags;
+ unsigned long flags;
+
+ spin_lock_irqsave(&sess->ts_spinlock, flags);
+ if (list_empty(timestamps)) {
+ dev_err(dev, "Buffer %u done but list is empty\n",
+ vbuf->vb2_buf.index);
+
+ v4l2_m2m_buf_done(vbuf, VB2_BUF_STATE_ERROR);
+ spin_unlock_irqrestore(&sess->ts_spinlock, flags);
+ return;
+ }
+
+ tmp = list_first_entry(timestamps, struct amvdec_timestamp, list);
+ timestamp = tmp->ts;
+ timecode = tmp->tc;
+ vbuf_flags = tmp->flags;
+ list_del(&tmp->list);
+ kfree(tmp);
+ spin_unlock_irqrestore(&sess->ts_spinlock, flags);
+
+ dst_buf_done(sess, vbuf, field, timestamp, timecode, vbuf_flags);
+ atomic_dec(&sess->esparser_queued_bufs);
+}
+EXPORT_SYMBOL_GPL(amvdec_dst_buf_done);
+
+void amvdec_dst_buf_done_offset(struct amvdec_session *sess,
+ struct vb2_v4l2_buffer *vbuf,
+ u32 offset, u32 field, bool allow_drop)
+{
+ struct device *dev = sess->core->dev_dec;
+ struct amvdec_timestamp *match = NULL;
+ struct amvdec_timestamp *tmp, *n;
+ struct v4l2_timecode timecode = { 0 };
+ u64 timestamp = 0;
+ u32 vbuf_flags = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&sess->ts_spinlock, flags);
+
+ /* Look for our vififo offset to get the corresponding timestamp. */
+ list_for_each_entry_safe(tmp, n, &sess->timestamps, list) {
+ if (tmp->offset > offset) {
+ /*
+ * Delete any record that remained unused for 32 match
+ * checks
+ */
+ if (tmp->used_count++ >= 32) {
+ list_del(&tmp->list);
+ kfree(tmp);
+ }
+ break;
+ }
+
+ match = tmp;
+ }
+
+ if (!match) {
+ dev_err(dev, "Buffer %u done but can't match offset (%08X)\n",
+ vbuf->vb2_buf.index, offset);
+ } else {
+ timestamp = match->ts;
+ timecode = match->tc;
+ vbuf_flags = match->flags;
+ list_del(&match->list);
+ kfree(match);
+ }
+ spin_unlock_irqrestore(&sess->ts_spinlock, flags);
+
+ dst_buf_done(sess, vbuf, field, timestamp, timecode, vbuf_flags);
+ if (match)
+ atomic_dec(&sess->esparser_queued_bufs);
+}
+EXPORT_SYMBOL_GPL(amvdec_dst_buf_done_offset);
+
+void amvdec_dst_buf_done_idx(struct amvdec_session *sess,
+ u32 buf_idx, u32 offset, u32 field)
+{
+ struct vb2_v4l2_buffer *vbuf;
+ struct device *dev = sess->core->dev_dec;
+
+ vbuf = v4l2_m2m_dst_buf_remove_by_idx(sess->m2m_ctx,
+ sess->fw_idx_to_vb2_idx[buf_idx]);
+
+ if (!vbuf) {
+ dev_err(dev,
+ "Buffer %u done but it doesn't exist in m2m_ctx\n",
+ buf_idx);
+ return;
+ }
+
+ if (offset != -1)
+ amvdec_dst_buf_done_offset(sess, vbuf, offset, field, true);
+ else
+ amvdec_dst_buf_done(sess, vbuf, field);
+}
+EXPORT_SYMBOL_GPL(amvdec_dst_buf_done_idx);
+
+void amvdec_set_par_from_dar(struct amvdec_session *sess,
+ u32 dar_num, u32 dar_den)
+{
+ u32 div;
+
+ sess->pixelaspect.numerator = sess->height * dar_num;
+ sess->pixelaspect.denominator = sess->width * dar_den;
+ div = gcd(sess->pixelaspect.numerator, sess->pixelaspect.denominator);
+ sess->pixelaspect.numerator /= div;
+ sess->pixelaspect.denominator /= div;
+}
+EXPORT_SYMBOL_GPL(amvdec_set_par_from_dar);
+
+void amvdec_src_change(struct amvdec_session *sess, u32 width,
+ u32 height, u32 dpb_size)
+{
+ static const struct v4l2_event ev = {
+ .type = V4L2_EVENT_SOURCE_CHANGE,
+ .u.src_change.changes = V4L2_EVENT_SRC_CH_RESOLUTION };
+
+ v4l2_ctrl_s_ctrl(sess->ctrl_min_buf_capture, dpb_size);
+
+ /*
+ * Check if the capture queue is already configured well for our
+ * usecase. If so, keep decoding with it and do not send the event
+ */
+ if (sess->streamon_cap &&
+ sess->width == width &&
+ sess->height == height &&
+ dpb_size <= sess->num_dst_bufs) {
+ sess->fmt_out->codec_ops->resume(sess);
+ return;
+ }
+
+ sess->changed_format = 0;
+ sess->width = width;
+ sess->height = height;
+ sess->status = STATUS_NEEDS_RESUME;
+
+ dev_dbg(sess->core->dev, "Res. changed (%ux%u), DPB size %u\n",
+ width, height, dpb_size);
+ v4l2_event_queue_fh(&sess->fh, &ev);
+}
+EXPORT_SYMBOL_GPL(amvdec_src_change);
+
+void amvdec_abort(struct amvdec_session *sess)
+{
+ dev_info(sess->core->dev, "Aborting decoding session!\n");
+ vb2_queue_error(&sess->m2m_ctx->cap_q_ctx.q);
+ vb2_queue_error(&sess->m2m_ctx->out_q_ctx.q);
+}
+EXPORT_SYMBOL_GPL(amvdec_abort);
diff --git a/drivers/staging/media/meson/vdec/vdec_helpers.h b/drivers/staging/media/meson/vdec/vdec_helpers.h
new file mode 100644
index 000000000000..4bf3e61d081b
--- /dev/null
+++ b/drivers/staging/media/meson/vdec/vdec_helpers.h
@@ -0,0 +1,90 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright (C) 2018 BayLibre, SAS
+ * Author: Maxime Jourdan <mjourdan@baylibre.com>
+ */
+
+#ifndef __MESON_VDEC_HELPERS_H_
+#define __MESON_VDEC_HELPERS_H_
+
+#include "vdec.h"
+
+/**
+ * amvdec_set_canvases() - Map VB2 buffers to canvases
+ *
+ * @sess: current session
+ * @reg_base: Registry bases of where to write the canvas indexes
+ * @reg_num: number of contiguous registers after each reg_base (including it)
+ */
+int amvdec_set_canvases(struct amvdec_session *sess,
+ u32 reg_base[], u32 reg_num[]);
+
+/* Helpers to read/write to the various IPs (DOS, PARSER) */
+u32 amvdec_read_dos(struct amvdec_core *core, u32 reg);
+void amvdec_write_dos(struct amvdec_core *core, u32 reg, u32 val);
+void amvdec_write_dos_bits(struct amvdec_core *core, u32 reg, u32 val);
+void amvdec_clear_dos_bits(struct amvdec_core *core, u32 reg, u32 val);
+u32 amvdec_read_parser(struct amvdec_core *core, u32 reg);
+void amvdec_write_parser(struct amvdec_core *core, u32 reg, u32 val);
+
+u32 amvdec_am21c_body_size(u32 width, u32 height);
+u32 amvdec_am21c_head_size(u32 width, u32 height);
+u32 amvdec_am21c_size(u32 width, u32 height);
+
+/**
+ * amvdec_dst_buf_done_idx() - Signal that a buffer is done decoding
+ *
+ * @sess: current session
+ * @buf_idx: hardware buffer index
+ * @offset: VIFIFO bitstream offset corresponding to the buffer
+ * @field: V4L2 interlaced field
+ */
+void amvdec_dst_buf_done_idx(struct amvdec_session *sess, u32 buf_idx,
+ u32 offset, u32 field);
+void amvdec_dst_buf_done(struct amvdec_session *sess,
+ struct vb2_v4l2_buffer *vbuf, u32 field);
+void amvdec_dst_buf_done_offset(struct amvdec_session *sess,
+ struct vb2_v4l2_buffer *vbuf,
+ u32 offset, u32 field, bool allow_drop);
+
+/**
+ * amvdec_add_ts() - Add a timestamp to the list
+ *
+ * @sess: current session
+ * @ts: timestamp to add
+ * @tc: timecode to add
+ * @offset: offset in the VIFIFO where the associated packet was written
+ * @flags: the vb2_v4l2_buffer flags
+ */
+int amvdec_add_ts(struct amvdec_session *sess, u64 ts,
+ struct v4l2_timecode tc, u32 offset, u32 flags);
+void amvdec_remove_ts(struct amvdec_session *sess, u64 ts);
+
+/**
+ * amvdec_set_par_from_dar() - Set Pixel Aspect Ratio from Display Aspect Ratio
+ *
+ * @sess: current session
+ * @dar_num: numerator of the DAR
+ * @dar_den: denominator of the DAR
+ */
+void amvdec_set_par_from_dar(struct amvdec_session *sess,
+ u32 dar_num, u32 dar_den);
+
+/**
+ * amvdec_src_change() - Notify new resolution/DPB size to the core
+ *
+ * @sess: current session
+ * @width: picture width detected by the hardware
+ * @height: picture height detected by the hardware
+ * @dpb_size: Decoded Picture Buffer size (= amount of buffers for decoding)
+ */
+void amvdec_src_change(struct amvdec_session *sess, u32 width,
+ u32 height, u32 dpb_size);
+
+/**
+ * amvdec_abort() - Abort the current decoding session
+ *
+ * @sess: current session
+ */
+void amvdec_abort(struct amvdec_session *sess);
+#endif
diff --git a/drivers/staging/media/meson/vdec/vdec_hevc.c b/drivers/staging/media/meson/vdec/vdec_hevc.c
new file mode 100644
index 000000000000..1939c47def58
--- /dev/null
+++ b/drivers/staging/media/meson/vdec/vdec_hevc.c
@@ -0,0 +1,256 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2018 Maxime Jourdan <maxi.jourdan@wanadoo.fr>
+ *
+ * VDEC_HEVC is a video decoding block that allows decoding of
+ * HEVC, VP9
+ */
+
+#include <linux/firmware.h>
+#include <linux/clk.h>
+
+#include "vdec_1.h"
+#include "vdec_helpers.h"
+#include "vdec_hevc.h"
+#include "hevc_regs.h"
+#include "dos_regs.h"
+
+/* AO Registers */
+#define AO_RTI_GEN_PWR_SLEEP0 0xe8
+#define AO_RTI_GEN_PWR_ISO0 0xec
+ #define GEN_PWR_VDEC_HEVC (BIT(7) | BIT(6))
+ #define GEN_PWR_VDEC_HEVC_SM1 (BIT(2))
+
+#define MC_SIZE (4096 * 4)
+
+static int vdec_hevc_load_firmware(struct amvdec_session *sess,
+ const char *fwname)
+{
+ struct amvdec_core *core = sess->core;
+ struct device *dev = core->dev_dec;
+ const struct firmware *fw;
+ static void *mc_addr;
+ static dma_addr_t mc_addr_map;
+ int ret;
+ u32 i = 100;
+
+ ret = request_firmware(&fw, fwname, dev);
+ if (ret < 0) {
+ dev_err(dev, "Unable to request firmware %s\n", fwname);
+ return ret;
+ }
+
+ if (fw->size < MC_SIZE) {
+ dev_err(dev, "Firmware size %zu is too small. Expected %u.\n",
+ fw->size, MC_SIZE);
+ ret = -EINVAL;
+ goto release_firmware;
+ }
+
+ mc_addr = dma_alloc_coherent(core->dev, MC_SIZE, &mc_addr_map,
+ GFP_KERNEL);
+ if (!mc_addr) {
+ ret = -ENOMEM;
+ goto release_firmware;
+ }
+
+ memcpy(mc_addr, fw->data, MC_SIZE);
+
+ amvdec_write_dos(core, HEVC_MPSR, 0);
+ amvdec_write_dos(core, HEVC_CPSR, 0);
+
+ amvdec_write_dos(core, HEVC_IMEM_DMA_ADR, mc_addr_map);
+ amvdec_write_dos(core, HEVC_IMEM_DMA_COUNT, MC_SIZE / 4);
+ amvdec_write_dos(core, HEVC_IMEM_DMA_CTRL, (0x8000 | (7 << 16)));
+
+ while (i && (readl(core->dos_base + HEVC_IMEM_DMA_CTRL) & 0x8000))
+ i--;
+
+ if (i == 0) {
+ dev_err(dev, "Firmware load fail (DMA hang?)\n");
+ ret = -ENODEV;
+ }
+
+ dma_free_coherent(core->dev, MC_SIZE, mc_addr, mc_addr_map);
+release_firmware:
+ release_firmware(fw);
+ return ret;
+}
+
+static void vdec_hevc_stbuf_init(struct amvdec_session *sess)
+{
+ struct amvdec_core *core = sess->core;
+
+ amvdec_write_dos(core, HEVC_STREAM_CONTROL,
+ amvdec_read_dos(core, HEVC_STREAM_CONTROL) & ~1);
+ amvdec_write_dos(core, HEVC_STREAM_START_ADDR, sess->vififo_paddr);
+ amvdec_write_dos(core, HEVC_STREAM_END_ADDR,
+ sess->vififo_paddr + sess->vififo_size);
+ amvdec_write_dos(core, HEVC_STREAM_RD_PTR, sess->vififo_paddr);
+ amvdec_write_dos(core, HEVC_STREAM_WR_PTR, sess->vififo_paddr);
+}
+
+/* VDEC_HEVC specific ESPARSER configuration */
+static void vdec_hevc_conf_esparser(struct amvdec_session *sess)
+{
+ struct amvdec_core *core = sess->core;
+
+ /* set vififo_vbuf_rp_sel=>vdec_hevc */
+ amvdec_write_dos(core, DOS_GEN_CTRL0, 3 << 1);
+ amvdec_write_dos(core, HEVC_STREAM_CONTROL,
+ amvdec_read_dos(core, HEVC_STREAM_CONTROL) | BIT(3));
+ amvdec_write_dos(core, HEVC_STREAM_CONTROL,
+ amvdec_read_dos(core, HEVC_STREAM_CONTROL) | 1);
+ amvdec_write_dos(core, HEVC_STREAM_FIFO_CTL,
+ amvdec_read_dos(core, HEVC_STREAM_FIFO_CTL) | BIT(29));
+}
+
+static u32 vdec_hevc_vififo_level(struct amvdec_session *sess)
+{
+ return readl_relaxed(sess->core->dos_base + HEVC_STREAM_LEVEL);
+}
+
+static void __vdec_hevc_stop(struct amvdec_session *sess)
+{
+ struct amvdec_core *core = sess->core;
+ struct amvdec_codec_ops *codec_ops = sess->fmt_out->codec_ops;
+
+ /* Disable interrupt */
+ amvdec_write_dos(core, HEVC_ASSIST_MBOX1_MASK, 0);
+ /* Disable firmware processor */
+ amvdec_write_dos(core, HEVC_MPSR, 0);
+
+ if (sess->priv)
+ codec_ops->stop(sess);
+
+ /* Enable VDEC_HEVC Isolation */
+ if (core->platform->revision == VDEC_REVISION_SM1)
+ regmap_update_bits(core->regmap_ao, AO_RTI_GEN_PWR_ISO0,
+ GEN_PWR_VDEC_HEVC_SM1,
+ GEN_PWR_VDEC_HEVC_SM1);
+ else
+ regmap_update_bits(core->regmap_ao, AO_RTI_GEN_PWR_ISO0,
+ 0xc00, 0xc00);
+
+ /* VDEC_HEVC Memories */
+ amvdec_write_dos(core, DOS_MEM_PD_HEVC, 0xffffffffUL);
+
+ if (core->platform->revision == VDEC_REVISION_SM1)
+ regmap_update_bits(core->regmap_ao, AO_RTI_GEN_PWR_SLEEP0,
+ GEN_PWR_VDEC_HEVC_SM1,
+ GEN_PWR_VDEC_HEVC_SM1);
+ else
+ regmap_update_bits(core->regmap_ao, AO_RTI_GEN_PWR_SLEEP0,
+ GEN_PWR_VDEC_HEVC, GEN_PWR_VDEC_HEVC);
+}
+
+static int vdec_hevc_stop(struct amvdec_session *sess)
+{
+ struct amvdec_core *core = sess->core;
+
+ __vdec_hevc_stop(sess);
+
+ clk_disable_unprepare(core->vdec_hevc_clk);
+ if (core->platform->revision == VDEC_REVISION_G12A ||
+ core->platform->revision == VDEC_REVISION_SM1)
+ clk_disable_unprepare(core->vdec_hevcf_clk);
+
+ return 0;
+}
+
+static int __vdec_hevc_start(struct amvdec_session *sess)
+{
+ int ret;
+ struct amvdec_core *core = sess->core;
+ struct amvdec_codec_ops *codec_ops = sess->fmt_out->codec_ops;
+
+ clk_set_rate(core->vdec_hevc_clk, 666666666);
+ ret = clk_prepare_enable(core->vdec_hevc_clk);
+ if (ret) {
+ if (core->platform->revision == VDEC_REVISION_G12A ||
+ core->platform->revision == VDEC_REVISION_SM1)
+ clk_disable_unprepare(core->vdec_hevcf_clk);
+ return ret;
+ }
+
+ if (core->platform->revision == VDEC_REVISION_SM1)
+ regmap_update_bits(core->regmap_ao, AO_RTI_GEN_PWR_SLEEP0,
+ GEN_PWR_VDEC_HEVC_SM1, 0);
+ else
+ regmap_update_bits(core->regmap_ao, AO_RTI_GEN_PWR_SLEEP0,
+ GEN_PWR_VDEC_HEVC, 0);
+ usleep_range(10, 20);
+
+ /* Reset VDEC_HEVC*/
+ amvdec_write_dos(core, DOS_SW_RESET3, 0xffffffff);
+ amvdec_write_dos(core, DOS_SW_RESET3, 0x00000000);
+
+ amvdec_write_dos(core, DOS_GCLK_EN3, 0xffffffff);
+
+ /* VDEC_HEVC Memories */
+ amvdec_write_dos(core, DOS_MEM_PD_HEVC, 0x00000000);
+
+ /* Remove VDEC_HEVC Isolation */
+ if (core->platform->revision == VDEC_REVISION_SM1)
+ regmap_update_bits(core->regmap_ao, AO_RTI_GEN_PWR_ISO0,
+ GEN_PWR_VDEC_HEVC_SM1, 0);
+ else
+ regmap_update_bits(core->regmap_ao, AO_RTI_GEN_PWR_ISO0,
+ 0xc00, 0);
+
+ amvdec_write_dos(core, DOS_SW_RESET3, 0xffffffff);
+ amvdec_write_dos(core, DOS_SW_RESET3, 0x00000000);
+
+ vdec_hevc_stbuf_init(sess);
+
+ ret = vdec_hevc_load_firmware(sess, sess->fmt_out->firmware_path);
+ if (ret)
+ goto stop;
+
+ ret = codec_ops->start(sess);
+ if (ret)
+ goto stop;
+
+ amvdec_write_dos(core, DOS_SW_RESET3, BIT(12) | BIT(11));
+ amvdec_write_dos(core, DOS_SW_RESET3, 0);
+ amvdec_read_dos(core, DOS_SW_RESET3);
+
+ amvdec_write_dos(core, HEVC_MPSR, 1);
+ /* Let the firmware settle */
+ usleep_range(10, 20);
+
+ return 0;
+
+stop:
+ __vdec_hevc_stop(sess);
+ clk_disable_unprepare(core->vdec_hevc_clk);
+ return ret;
+}
+
+static int vdec_hevc_start(struct amvdec_session *sess)
+{
+ struct amvdec_core *core = sess->core;
+ int ret;
+
+ if (core->platform->revision == VDEC_REVISION_G12A ||
+ core->platform->revision == VDEC_REVISION_SM1) {
+ clk_set_rate(core->vdec_hevcf_clk, 666666666);
+ ret = clk_prepare_enable(core->vdec_hevcf_clk);
+ if (ret)
+ return ret;
+
+ ret = __vdec_hevc_start(sess);
+ if (ret)
+ clk_disable_unprepare(core->vdec_hevcf_clk);
+ return ret;
+ }
+
+ return __vdec_hevc_start(sess);
+}
+
+struct amvdec_ops vdec_hevc_ops = {
+ .start = vdec_hevc_start,
+ .stop = vdec_hevc_stop,
+ .conf_esparser = vdec_hevc_conf_esparser,
+ .vififo_level = vdec_hevc_vififo_level,
+};
diff --git a/drivers/staging/media/meson/vdec/vdec_hevc.h b/drivers/staging/media/meson/vdec/vdec_hevc.h
new file mode 100644
index 000000000000..cd576a73a966
--- /dev/null
+++ b/drivers/staging/media/meson/vdec/vdec_hevc.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright (C) 2018 Maxime Jourdan <maxi.jourdan@wanadoo.fr>
+ */
+
+#ifndef __MESON_VDEC_VDEC_HEVC_H_
+#define __MESON_VDEC_VDEC_HEVC_H_
+
+#include "vdec.h"
+
+extern struct amvdec_ops vdec_hevc_ops;
+
+#endif
diff --git a/drivers/staging/media/meson/vdec/vdec_platform.c b/drivers/staging/media/meson/vdec/vdec_platform.c
new file mode 100644
index 000000000000..66bb307db85a
--- /dev/null
+++ b/drivers/staging/media/meson/vdec/vdec_platform.c
@@ -0,0 +1,335 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2018 BayLibre, SAS
+ * Author: Maxime Jourdan <mjourdan@baylibre.com>
+ */
+
+#include "vdec_platform.h"
+#include "vdec.h"
+
+#include "vdec_1.h"
+#include "vdec_hevc.h"
+#include "codec_mpeg12.h"
+#include "codec_h264.h"
+#include "codec_vp9.h"
+
+static const struct amvdec_format vdec_formats_gxbb[] = {
+ {
+ .pixfmt = V4L2_PIX_FMT_H264,
+ .min_buffers = 2,
+ .max_buffers = 24,
+ .max_width = 1920,
+ .max_height = 1080,
+ .vdec_ops = &vdec_1_ops,
+ .codec_ops = &codec_h264_ops,
+ .firmware_path = "meson/vdec/gxbb_h264.bin",
+ .pixfmts_cap = { V4L2_PIX_FMT_NV12M, 0 },
+ .flags = V4L2_FMT_FLAG_COMPRESSED |
+ V4L2_FMT_FLAG_DYN_RESOLUTION,
+ }, {
+ .pixfmt = V4L2_PIX_FMT_MPEG1,
+ .min_buffers = 8,
+ .max_buffers = 8,
+ .max_width = 1920,
+ .max_height = 1080,
+ .vdec_ops = &vdec_1_ops,
+ .codec_ops = &codec_mpeg12_ops,
+ .firmware_path = "meson/vdec/gxl_mpeg12.bin",
+ .pixfmts_cap = { V4L2_PIX_FMT_NV12M, V4L2_PIX_FMT_YUV420M, 0 },
+ .flags = V4L2_FMT_FLAG_COMPRESSED,
+ }, {
+ .pixfmt = V4L2_PIX_FMT_MPEG2,
+ .min_buffers = 8,
+ .max_buffers = 8,
+ .max_width = 1920,
+ .max_height = 1080,
+ .vdec_ops = &vdec_1_ops,
+ .codec_ops = &codec_mpeg12_ops,
+ .firmware_path = "meson/vdec/gxl_mpeg12.bin",
+ .pixfmts_cap = { V4L2_PIX_FMT_NV12M, V4L2_PIX_FMT_YUV420M, 0 },
+ .flags = V4L2_FMT_FLAG_COMPRESSED,
+ },
+};
+
+static const struct amvdec_format vdec_formats_gxl[] = {
+ {
+ .pixfmt = V4L2_PIX_FMT_VP9,
+ .min_buffers = 16,
+ .max_buffers = 24,
+ .max_width = 3840,
+ .max_height = 2160,
+ .vdec_ops = &vdec_hevc_ops,
+ .codec_ops = &codec_vp9_ops,
+ .firmware_path = "meson/vdec/gxl_vp9.bin",
+ .pixfmts_cap = { V4L2_PIX_FMT_NV12M, 0 },
+ .flags = V4L2_FMT_FLAG_COMPRESSED |
+ V4L2_FMT_FLAG_DYN_RESOLUTION,
+ }, {
+ .pixfmt = V4L2_PIX_FMT_H264,
+ .min_buffers = 2,
+ .max_buffers = 24,
+ .max_width = 3840,
+ .max_height = 2160,
+ .vdec_ops = &vdec_1_ops,
+ .codec_ops = &codec_h264_ops,
+ .firmware_path = "meson/vdec/gxl_h264.bin",
+ .pixfmts_cap = { V4L2_PIX_FMT_NV12M, 0 },
+ .flags = V4L2_FMT_FLAG_COMPRESSED |
+ V4L2_FMT_FLAG_DYN_RESOLUTION,
+ }, {
+ .pixfmt = V4L2_PIX_FMT_MPEG1,
+ .min_buffers = 8,
+ .max_buffers = 8,
+ .max_width = 1920,
+ .max_height = 1080,
+ .vdec_ops = &vdec_1_ops,
+ .codec_ops = &codec_mpeg12_ops,
+ .firmware_path = "meson/vdec/gxl_mpeg12.bin",
+ .pixfmts_cap = { V4L2_PIX_FMT_NV12M, V4L2_PIX_FMT_YUV420M, 0 },
+ .flags = V4L2_FMT_FLAG_COMPRESSED,
+ }, {
+ .pixfmt = V4L2_PIX_FMT_MPEG2,
+ .min_buffers = 8,
+ .max_buffers = 8,
+ .max_width = 1920,
+ .max_height = 1080,
+ .vdec_ops = &vdec_1_ops,
+ .codec_ops = &codec_mpeg12_ops,
+ .firmware_path = "meson/vdec/gxl_mpeg12.bin",
+ .pixfmts_cap = { V4L2_PIX_FMT_NV12M, V4L2_PIX_FMT_YUV420M, 0 },
+ .flags = V4L2_FMT_FLAG_COMPRESSED,
+ },
+};
+
+static const struct amvdec_format vdec_formats_gxlx[] = {
+ {
+ .pixfmt = V4L2_PIX_FMT_H264,
+ .min_buffers = 2,
+ .max_buffers = 24,
+ .max_width = 3840,
+ .max_height = 2160,
+ .vdec_ops = &vdec_1_ops,
+ .codec_ops = &codec_h264_ops,
+ .firmware_path = "meson/vdec/gxl_h264.bin",
+ .pixfmts_cap = { V4L2_PIX_FMT_NV12M, 0 },
+ .flags = V4L2_FMT_FLAG_COMPRESSED |
+ V4L2_FMT_FLAG_DYN_RESOLUTION,
+ }, {
+ .pixfmt = V4L2_PIX_FMT_MPEG1,
+ .min_buffers = 8,
+ .max_buffers = 8,
+ .max_width = 1920,
+ .max_height = 1080,
+ .vdec_ops = &vdec_1_ops,
+ .codec_ops = &codec_mpeg12_ops,
+ .firmware_path = "meson/vdec/gxl_mpeg12.bin",
+ .pixfmts_cap = { V4L2_PIX_FMT_NV12M, V4L2_PIX_FMT_YUV420M, 0 },
+ .flags = V4L2_FMT_FLAG_COMPRESSED,
+ }, {
+ .pixfmt = V4L2_PIX_FMT_MPEG2,
+ .min_buffers = 8,
+ .max_buffers = 8,
+ .max_width = 1920,
+ .max_height = 1080,
+ .vdec_ops = &vdec_1_ops,
+ .codec_ops = &codec_mpeg12_ops,
+ .firmware_path = "meson/vdec/gxl_mpeg12.bin",
+ .pixfmts_cap = { V4L2_PIX_FMT_NV12M, V4L2_PIX_FMT_YUV420M, 0 },
+ .flags = V4L2_FMT_FLAG_COMPRESSED,
+ },
+};
+
+static const struct amvdec_format vdec_formats_gxm[] = {
+ {
+ .pixfmt = V4L2_PIX_FMT_VP9,
+ .min_buffers = 16,
+ .max_buffers = 24,
+ .max_width = 3840,
+ .max_height = 2160,
+ .vdec_ops = &vdec_hevc_ops,
+ .codec_ops = &codec_vp9_ops,
+ .firmware_path = "meson/vdec/gxl_vp9.bin",
+ .pixfmts_cap = { V4L2_PIX_FMT_NV12M, 0 },
+ .flags = V4L2_FMT_FLAG_COMPRESSED |
+ V4L2_FMT_FLAG_DYN_RESOLUTION,
+ }, {
+ .pixfmt = V4L2_PIX_FMT_H264,
+ .min_buffers = 2,
+ .max_buffers = 24,
+ .max_width = 3840,
+ .max_height = 2160,
+ .vdec_ops = &vdec_1_ops,
+ .codec_ops = &codec_h264_ops,
+ .firmware_path = "meson/vdec/gxm_h264.bin",
+ .pixfmts_cap = { V4L2_PIX_FMT_NV12M, 0 },
+ .flags = V4L2_FMT_FLAG_COMPRESSED |
+ V4L2_FMT_FLAG_DYN_RESOLUTION,
+ }, {
+ .pixfmt = V4L2_PIX_FMT_MPEG1,
+ .min_buffers = 8,
+ .max_buffers = 8,
+ .max_width = 1920,
+ .max_height = 1080,
+ .vdec_ops = &vdec_1_ops,
+ .codec_ops = &codec_mpeg12_ops,
+ .firmware_path = "meson/vdec/gxl_mpeg12.bin",
+ .pixfmts_cap = { V4L2_PIX_FMT_NV12M, V4L2_PIX_FMT_YUV420M, 0 },
+ .flags = V4L2_FMT_FLAG_COMPRESSED,
+ }, {
+ .pixfmt = V4L2_PIX_FMT_MPEG2,
+ .min_buffers = 8,
+ .max_buffers = 8,
+ .max_width = 1920,
+ .max_height = 1080,
+ .vdec_ops = &vdec_1_ops,
+ .codec_ops = &codec_mpeg12_ops,
+ .firmware_path = "meson/vdec/gxl_mpeg12.bin",
+ .pixfmts_cap = { V4L2_PIX_FMT_NV12M, V4L2_PIX_FMT_YUV420M, 0 },
+ .flags = V4L2_FMT_FLAG_COMPRESSED,
+ },
+};
+
+static const struct amvdec_format vdec_formats_g12a[] = {
+ {
+ .pixfmt = V4L2_PIX_FMT_VP9,
+ .min_buffers = 16,
+ .max_buffers = 24,
+ .max_width = 3840,
+ .max_height = 2160,
+ .vdec_ops = &vdec_hevc_ops,
+ .codec_ops = &codec_vp9_ops,
+ .firmware_path = "meson/vdec/g12a_vp9.bin",
+ .pixfmts_cap = { V4L2_PIX_FMT_NV12M, 0 },
+ .flags = V4L2_FMT_FLAG_COMPRESSED |
+ V4L2_FMT_FLAG_DYN_RESOLUTION,
+ }, {
+ .pixfmt = V4L2_PIX_FMT_H264,
+ .min_buffers = 2,
+ .max_buffers = 24,
+ .max_width = 3840,
+ .max_height = 2160,
+ .vdec_ops = &vdec_1_ops,
+ .codec_ops = &codec_h264_ops,
+ .firmware_path = "meson/vdec/g12a_h264.bin",
+ .pixfmts_cap = { V4L2_PIX_FMT_NV12M, 0 },
+ .flags = V4L2_FMT_FLAG_COMPRESSED |
+ V4L2_FMT_FLAG_DYN_RESOLUTION,
+ }, {
+ .pixfmt = V4L2_PIX_FMT_MPEG1,
+ .min_buffers = 8,
+ .max_buffers = 8,
+ .max_width = 1920,
+ .max_height = 1080,
+ .vdec_ops = &vdec_1_ops,
+ .codec_ops = &codec_mpeg12_ops,
+ .firmware_path = "meson/vdec/gxl_mpeg12.bin",
+ .pixfmts_cap = { V4L2_PIX_FMT_NV12M, V4L2_PIX_FMT_YUV420M, 0 },
+ .flags = V4L2_FMT_FLAG_COMPRESSED,
+ }, {
+ .pixfmt = V4L2_PIX_FMT_MPEG2,
+ .min_buffers = 8,
+ .max_buffers = 8,
+ .max_width = 1920,
+ .max_height = 1080,
+ .vdec_ops = &vdec_1_ops,
+ .codec_ops = &codec_mpeg12_ops,
+ .firmware_path = "meson/vdec/gxl_mpeg12.bin",
+ .pixfmts_cap = { V4L2_PIX_FMT_NV12M, V4L2_PIX_FMT_YUV420M, 0 },
+ .flags = V4L2_FMT_FLAG_COMPRESSED,
+ },
+};
+
+static const struct amvdec_format vdec_formats_sm1[] = {
+ {
+ .pixfmt = V4L2_PIX_FMT_VP9,
+ .min_buffers = 16,
+ .max_buffers = 24,
+ .max_width = 3840,
+ .max_height = 2160,
+ .vdec_ops = &vdec_hevc_ops,
+ .codec_ops = &codec_vp9_ops,
+ .firmware_path = "meson/vdec/sm1_vp9_mmu.bin",
+ .pixfmts_cap = { V4L2_PIX_FMT_NV12M, 0 },
+ .flags = V4L2_FMT_FLAG_COMPRESSED |
+ V4L2_FMT_FLAG_DYN_RESOLUTION,
+ }, {
+ .pixfmt = V4L2_PIX_FMT_H264,
+ .min_buffers = 2,
+ .max_buffers = 24,
+ .max_width = 3840,
+ .max_height = 2160,
+ .vdec_ops = &vdec_1_ops,
+ .codec_ops = &codec_h264_ops,
+ .firmware_path = "meson/vdec/g12a_h264.bin",
+ .pixfmts_cap = { V4L2_PIX_FMT_NV12M, 0 },
+ .flags = V4L2_FMT_FLAG_COMPRESSED |
+ V4L2_FMT_FLAG_DYN_RESOLUTION,
+ }, {
+ .pixfmt = V4L2_PIX_FMT_MPEG1,
+ .min_buffers = 8,
+ .max_buffers = 8,
+ .max_width = 1920,
+ .max_height = 1080,
+ .vdec_ops = &vdec_1_ops,
+ .codec_ops = &codec_mpeg12_ops,
+ .firmware_path = "meson/vdec/gxl_mpeg12.bin",
+ .pixfmts_cap = { V4L2_PIX_FMT_NV12M, V4L2_PIX_FMT_YUV420M, 0 },
+ .flags = V4L2_FMT_FLAG_COMPRESSED,
+ }, {
+ .pixfmt = V4L2_PIX_FMT_MPEG2,
+ .min_buffers = 8,
+ .max_buffers = 8,
+ .max_width = 1920,
+ .max_height = 1080,
+ .vdec_ops = &vdec_1_ops,
+ .codec_ops = &codec_mpeg12_ops,
+ .firmware_path = "meson/vdec/gxl_mpeg12.bin",
+ .pixfmts_cap = { V4L2_PIX_FMT_NV12M, V4L2_PIX_FMT_YUV420M, 0 },
+ .flags = V4L2_FMT_FLAG_COMPRESSED,
+ },
+};
+
+const struct vdec_platform vdec_platform_gxbb = {
+ .formats = vdec_formats_gxbb,
+ .num_formats = ARRAY_SIZE(vdec_formats_gxbb),
+ .revision = VDEC_REVISION_GXBB,
+};
+
+const struct vdec_platform vdec_platform_gxl = {
+ .formats = vdec_formats_gxl,
+ .num_formats = ARRAY_SIZE(vdec_formats_gxl),
+ .revision = VDEC_REVISION_GXL,
+};
+
+const struct vdec_platform vdec_platform_gxlx = {
+ .formats = vdec_formats_gxlx,
+ .num_formats = ARRAY_SIZE(vdec_formats_gxlx),
+ .revision = VDEC_REVISION_GXLX,
+};
+
+const struct vdec_platform vdec_platform_gxm = {
+ .formats = vdec_formats_gxm,
+ .num_formats = ARRAY_SIZE(vdec_formats_gxm),
+ .revision = VDEC_REVISION_GXM,
+};
+
+const struct vdec_platform vdec_platform_g12a = {
+ .formats = vdec_formats_g12a,
+ .num_formats = ARRAY_SIZE(vdec_formats_g12a),
+ .revision = VDEC_REVISION_G12A,
+};
+
+const struct vdec_platform vdec_platform_sm1 = {
+ .formats = vdec_formats_sm1,
+ .num_formats = ARRAY_SIZE(vdec_formats_sm1),
+ .revision = VDEC_REVISION_SM1,
+};
+
+MODULE_FIRMWARE("meson/vdec/g12a_h264.bin");
+MODULE_FIRMWARE("meson/vdec/g12a_vp9.bin");
+MODULE_FIRMWARE("meson/vdec/gxbb_h264.bin");
+MODULE_FIRMWARE("meson/vdec/gxl_h264.bin");
+MODULE_FIRMWARE("meson/vdec/gxl_mpeg12.bin");
+MODULE_FIRMWARE("meson/vdec/gxl_vp9.bin");
+MODULE_FIRMWARE("meson/vdec/gxm_h264.bin");
+MODULE_FIRMWARE("meson/vdec/sm1_vp9_mmu.bin");
diff --git a/drivers/staging/media/meson/vdec/vdec_platform.h b/drivers/staging/media/meson/vdec/vdec_platform.h
new file mode 100644
index 000000000000..88ca4a9db8a8
--- /dev/null
+++ b/drivers/staging/media/meson/vdec/vdec_platform.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright (C) 2018 BayLibre, SAS
+ * Author: Maxime Jourdan <mjourdan@baylibre.com>
+ */
+
+#ifndef __MESON_VDEC_PLATFORM_H_
+#define __MESON_VDEC_PLATFORM_H_
+
+#include "vdec.h"
+
+struct amvdec_format;
+
+enum vdec_revision {
+ VDEC_REVISION_GXBB,
+ VDEC_REVISION_GXL,
+ VDEC_REVISION_GXLX,
+ VDEC_REVISION_GXM,
+ VDEC_REVISION_G12A,
+ VDEC_REVISION_SM1,
+};
+
+struct vdec_platform {
+ const struct amvdec_format *formats;
+ const u32 num_formats;
+ enum vdec_revision revision;
+};
+
+extern const struct vdec_platform vdec_platform_gxbb;
+extern const struct vdec_platform vdec_platform_gxm;
+extern const struct vdec_platform vdec_platform_gxl;
+extern const struct vdec_platform vdec_platform_gxlx;
+extern const struct vdec_platform vdec_platform_g12a;
+extern const struct vdec_platform vdec_platform_sm1;
+
+#endif
diff --git a/drivers/staging/media/mn88472/Kconfig b/drivers/staging/media/mn88472/Kconfig
deleted file mode 100644
index a85c90a60bce..000000000000
--- a/drivers/staging/media/mn88472/Kconfig
+++ /dev/null
@@ -1,7 +0,0 @@
-config DVB_MN88472
- tristate "Panasonic MN88472"
- depends on DVB_CORE && I2C
- select REGMAP_I2C
- default m if !MEDIA_SUBDRV_AUTOSELECT
- help
- Say Y when you want to support this frontend.
diff --git a/drivers/staging/media/mn88472/Makefile b/drivers/staging/media/mn88472/Makefile
deleted file mode 100644
index 5987b7e6d82a..000000000000
--- a/drivers/staging/media/mn88472/Makefile
+++ /dev/null
@@ -1,5 +0,0 @@
-obj-$(CONFIG_DVB_MN88472) += mn88472.o
-
-ccflags-y += -Idrivers/media/dvb-core/
-ccflags-y += -Idrivers/media/dvb-frontends/
-ccflags-y += -Idrivers/media/tuners/
diff --git a/drivers/staging/media/mn88472/TODO b/drivers/staging/media/mn88472/TODO
deleted file mode 100644
index b90a14be3beb..000000000000
--- a/drivers/staging/media/mn88472/TODO
+++ /dev/null
@@ -1,21 +0,0 @@
-Driver general quality is not good enough for mainline. Also, other
-device drivers (USB-bridge, tuner) needed for Astrometa receiver in
-question could need some changes. However, if that driver is mainlined
-due to some other device than Astrometa, unrelated TODOs could be
-skipped. In that case rtl28xxu driver needs module parameter to prevent
-driver loading.
-
-Required TODOs:
-* missing lock flags
-* I2C errors
-* tuner sensitivity
-
-*Do not* send any patch fixing checkpatch.pl issues. Currently it passes
-checkpatch.pl tests. I don't want waste my time to review this kind of
-trivial stuff. *Do not* add missing register I/O error checks. Those are
-missing for the reason it is much easier to compare I2C data sniffs when
-there is less lines. Those error checks are about the last thing to be added.
-
-Patches should be submitted to:
-linux-media@vger.kernel.org and Antti Palosaari <crope@iki.fi>
-
diff --git a/drivers/staging/media/mn88472/mn88472.c b/drivers/staging/media/mn88472/mn88472.c
deleted file mode 100644
index cf2e96bcf395..000000000000
--- a/drivers/staging/media/mn88472/mn88472.c
+++ /dev/null
@@ -1,576 +0,0 @@
-/*
- * Panasonic MN88472 DVB-T/T2/C demodulator driver
- *
- * Copyright (C) 2013 Antti Palosaari <crope@iki.fi>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#include "mn88472_priv.h"
-
-static int mn88472_get_tune_settings(struct dvb_frontend *fe,
- struct dvb_frontend_tune_settings *s)
-{
- s->min_delay_ms = 800;
- return 0;
-}
-
-static int mn88472_set_frontend(struct dvb_frontend *fe)
-{
- struct i2c_client *client = fe->demodulator_priv;
- struct mn88472_dev *dev = i2c_get_clientdata(client);
- struct dtv_frontend_properties *c = &fe->dtv_property_cache;
- int ret, i;
- u32 if_frequency = 0;
- u64 tmp;
- u8 delivery_system_val, if_val[3], bw_val[7], bw_val2;
-
- dev_dbg(&client->dev,
- "delivery_system=%d modulation=%d frequency=%d symbol_rate=%d inversion=%d\n",
- c->delivery_system, c->modulation,
- c->frequency, c->symbol_rate, c->inversion);
-
- if (!dev->warm) {
- ret = -EAGAIN;
- goto err;
- }
-
- switch (c->delivery_system) {
- case SYS_DVBT:
- delivery_system_val = 0x02;
- break;
- case SYS_DVBT2:
- delivery_system_val = 0x03;
- break;
- case SYS_DVBC_ANNEX_A:
- delivery_system_val = 0x04;
- break;
- default:
- ret = -EINVAL;
- goto err;
- }
-
- if (c->bandwidth_hz <= 5000000) {
- memcpy(bw_val, "\xe5\x99\x9a\x1b\xa9\x1b\xa9", 7);
- bw_val2 = 0x03;
- } else if (c->bandwidth_hz <= 6000000) {
- /* IF 3570000 Hz, BW 6000000 Hz */
- memcpy(bw_val, "\xbf\x55\x55\x15\x6b\x15\x6b", 7);
- bw_val2 = 0x02;
- } else if (c->bandwidth_hz <= 7000000) {
- /* IF 4570000 Hz, BW 7000000 Hz */
- memcpy(bw_val, "\xa4\x00\x00\x0f\x2c\x0f\x2c", 7);
- bw_val2 = 0x01;
- } else if (c->bandwidth_hz <= 8000000) {
- /* IF 4570000 Hz, BW 8000000 Hz */
- memcpy(bw_val, "\x8f\x80\x00\x08\xee\x08\xee", 7);
- bw_val2 = 0x00;
- } else {
- ret = -EINVAL;
- goto err;
- }
-
- /* program tuner */
- if (fe->ops.tuner_ops.set_params) {
- ret = fe->ops.tuner_ops.set_params(fe);
- if (ret)
- goto err;
- }
-
- if (fe->ops.tuner_ops.get_if_frequency) {
- ret = fe->ops.tuner_ops.get_if_frequency(fe, &if_frequency);
- if (ret)
- goto err;
-
- dev_dbg(&client->dev, "get_if_frequency=%d\n", if_frequency);
- }
-
- /* Calculate IF registers ( (1<<24)*IF / Xtal ) */
- tmp = div_u64(if_frequency * (u64)(1<<24) + (dev->xtal / 2),
- dev->xtal);
- if_val[0] = ((tmp >> 16) & 0xff);
- if_val[1] = ((tmp >> 8) & 0xff);
- if_val[2] = ((tmp >> 0) & 0xff);
-
- ret = regmap_write(dev->regmap[2], 0xfb, 0x13);
- ret = regmap_write(dev->regmap[2], 0xef, 0x13);
- ret = regmap_write(dev->regmap[2], 0xf9, 0x13);
- if (ret)
- goto err;
-
- ret = regmap_write(dev->regmap[2], 0x00, 0x66);
- if (ret)
- goto err;
- ret = regmap_write(dev->regmap[2], 0x01, 0x00);
- if (ret)
- goto err;
- ret = regmap_write(dev->regmap[2], 0x02, 0x01);
- if (ret)
- goto err;
- ret = regmap_write(dev->regmap[2], 0x03, delivery_system_val);
- if (ret)
- goto err;
- ret = regmap_write(dev->regmap[2], 0x04, bw_val2);
- if (ret)
- goto err;
-
- for (i = 0; i < sizeof(if_val); i++) {
- ret = regmap_write(dev->regmap[2], 0x10 + i, if_val[i]);
- if (ret)
- goto err;
- }
-
- for (i = 0; i < sizeof(bw_val); i++) {
- ret = regmap_write(dev->regmap[2], 0x13 + i, bw_val[i]);
- if (ret)
- goto err;
- }
-
- switch (c->delivery_system) {
- case SYS_DVBT:
- ret = regmap_write(dev->regmap[0], 0x07, 0x26);
- ret = regmap_write(dev->regmap[0], 0xb0, 0x0a);
- ret = regmap_write(dev->regmap[0], 0xb4, 0x00);
- ret = regmap_write(dev->regmap[0], 0xcd, 0x1f);
- ret = regmap_write(dev->regmap[0], 0xd4, 0x0a);
- ret = regmap_write(dev->regmap[0], 0xd6, 0x48);
- ret = regmap_write(dev->regmap[0], 0x00, 0xba);
- ret = regmap_write(dev->regmap[0], 0x01, 0x13);
- if (ret)
- goto err;
- break;
- case SYS_DVBT2:
- ret = regmap_write(dev->regmap[2], 0x2b, 0x13);
- ret = regmap_write(dev->regmap[2], 0x4f, 0x05);
- ret = regmap_write(dev->regmap[1], 0xf6, 0x05);
- ret = regmap_write(dev->regmap[0], 0xb0, 0x0a);
- ret = regmap_write(dev->regmap[0], 0xb4, 0xf6);
- ret = regmap_write(dev->regmap[0], 0xcd, 0x01);
- ret = regmap_write(dev->regmap[0], 0xd4, 0x09);
- ret = regmap_write(dev->regmap[0], 0xd6, 0x46);
- ret = regmap_write(dev->regmap[2], 0x30, 0x80);
- ret = regmap_write(dev->regmap[2], 0x32, 0x00);
- if (ret)
- goto err;
- break;
- case SYS_DVBC_ANNEX_A:
- ret = regmap_write(dev->regmap[0], 0xb0, 0x0b);
- ret = regmap_write(dev->regmap[0], 0xb4, 0x00);
- ret = regmap_write(dev->regmap[0], 0xcd, 0x17);
- ret = regmap_write(dev->regmap[0], 0xd4, 0x09);
- ret = regmap_write(dev->regmap[0], 0xd6, 0x48);
- ret = regmap_write(dev->regmap[1], 0x00, 0xb0);
- if (ret)
- goto err;
- break;
- default:
- ret = -EINVAL;
- goto err;
- }
-
- ret = regmap_write(dev->regmap[0], 0x46, 0x00);
- ret = regmap_write(dev->regmap[0], 0xae, 0x00);
-
- switch (dev->ts_mode) {
- case SERIAL_TS_MODE:
- ret = regmap_write(dev->regmap[2], 0x08, 0x1d);
- break;
- case PARALLEL_TS_MODE:
- ret = regmap_write(dev->regmap[2], 0x08, 0x00);
- break;
- default:
- dev_dbg(&client->dev, "ts_mode error: %d\n", dev->ts_mode);
- ret = -EINVAL;
- goto err;
- }
-
- switch (dev->ts_clock) {
- case VARIABLE_TS_CLOCK:
- ret = regmap_write(dev->regmap[0], 0xd9, 0xe3);
- break;
- case FIXED_TS_CLOCK:
- ret = regmap_write(dev->regmap[0], 0xd9, 0xe1);
- break;
- default:
- dev_dbg(&client->dev, "ts_clock error: %d\n", dev->ts_clock);
- ret = -EINVAL;
- goto err;
- }
-
- /* Reset demod */
- ret = regmap_write(dev->regmap[2], 0xf8, 0x9f);
- if (ret)
- goto err;
-
- dev->delivery_system = c->delivery_system;
-
- return 0;
-err:
- dev_dbg(&client->dev, "failed=%d\n", ret);
- return ret;
-}
-
-static int mn88472_read_status(struct dvb_frontend *fe, enum fe_status *status)
-{
- struct i2c_client *client = fe->demodulator_priv;
- struct mn88472_dev *dev = i2c_get_clientdata(client);
- struct dtv_frontend_properties *c = &fe->dtv_property_cache;
- int ret;
- unsigned int utmp;
- int lock = 0;
-
- *status = 0;
-
- if (!dev->warm) {
- ret = -EAGAIN;
- goto err;
- }
-
- switch (c->delivery_system) {
- case SYS_DVBT:
- ret = regmap_read(dev->regmap[0], 0x7F, &utmp);
- if (ret)
- goto err;
- if ((utmp & 0xF) >= 0x09)
- lock = 1;
- break;
- case SYS_DVBT2:
- ret = regmap_read(dev->regmap[2], 0x92, &utmp);
- if (ret)
- goto err;
- if ((utmp & 0xF) >= 0x07)
- *status |= FE_HAS_SIGNAL;
- if ((utmp & 0xF) >= 0x0a)
- *status |= FE_HAS_CARRIER;
- if ((utmp & 0xF) >= 0x0d)
- *status |= FE_HAS_VITERBI | FE_HAS_SYNC | FE_HAS_LOCK;
- break;
- case SYS_DVBC_ANNEX_A:
- ret = regmap_read(dev->regmap[1], 0x84, &utmp);
- if (ret)
- goto err;
- if ((utmp & 0xF) >= 0x08)
- lock = 1;
- break;
- default:
- ret = -EINVAL;
- goto err;
- }
-
- if (lock)
- *status = FE_HAS_SIGNAL | FE_HAS_CARRIER | FE_HAS_VITERBI |
- FE_HAS_SYNC | FE_HAS_LOCK;
-
- return 0;
-err:
- dev_dbg(&client->dev, "failed=%d\n", ret);
- return ret;
-}
-
-static int mn88472_init(struct dvb_frontend *fe)
-{
- struct i2c_client *client = fe->demodulator_priv;
- struct mn88472_dev *dev = i2c_get_clientdata(client);
- int ret, len, remaining;
- const struct firmware *fw = NULL;
- u8 *fw_file = MN88472_FIRMWARE;
- unsigned int tmp;
-
- dev_dbg(&client->dev, "\n");
-
- /* set cold state by default */
- dev->warm = false;
-
- /* power on */
- ret = regmap_write(dev->regmap[2], 0x05, 0x00);
- if (ret)
- goto err;
-
- ret = regmap_bulk_write(dev->regmap[2], 0x0b, "\x00\x00", 2);
- if (ret)
- goto err;
-
- /* check if firmware is already running */
- ret = regmap_read(dev->regmap[0], 0xf5, &tmp);
- if (ret)
- goto err;
-
- if (!(tmp & 0x1)) {
- dev_info(&client->dev, "firmware already running\n");
- dev->warm = true;
- return 0;
- }
-
- /* request the firmware, this will block and timeout */
- ret = request_firmware(&fw, fw_file, &client->dev);
- if (ret) {
- dev_err(&client->dev, "firmare file '%s' not found\n",
- fw_file);
- goto err;
- }
-
- dev_info(&client->dev, "downloading firmware from file '%s'\n",
- fw_file);
-
- ret = regmap_write(dev->regmap[0], 0xf5, 0x03);
- if (ret)
- goto firmware_release;
-
- for (remaining = fw->size; remaining > 0;
- remaining -= (dev->i2c_wr_max - 1)) {
- len = remaining;
- if (len > (dev->i2c_wr_max - 1))
- len = dev->i2c_wr_max - 1;
-
- ret = regmap_bulk_write(dev->regmap[0], 0xf6,
- &fw->data[fw->size - remaining], len);
- if (ret) {
- dev_err(&client->dev,
- "firmware download failed=%d\n", ret);
- goto firmware_release;
- }
- }
-
- /* parity check of firmware */
- ret = regmap_read(dev->regmap[0], 0xf8, &tmp);
- if (ret) {
- dev_err(&client->dev,
- "parity reg read failed=%d\n", ret);
- goto firmware_release;
- }
- if (tmp & 0x10) {
- dev_err(&client->dev,
- "firmware parity check failed=0x%x\n", tmp);
- goto firmware_release;
- }
- dev_err(&client->dev, "firmware parity check succeeded=0x%x\n", tmp);
-
- ret = regmap_write(dev->regmap[0], 0xf5, 0x00);
- if (ret)
- goto firmware_release;
-
- release_firmware(fw);
- fw = NULL;
-
- /* warm state */
- dev->warm = true;
-
- return 0;
-firmware_release:
- release_firmware(fw);
-err:
- dev_dbg(&client->dev, "failed=%d\n", ret);
- return ret;
-}
-
-static int mn88472_sleep(struct dvb_frontend *fe)
-{
- struct i2c_client *client = fe->demodulator_priv;
- struct mn88472_dev *dev = i2c_get_clientdata(client);
- int ret;
-
- dev_dbg(&client->dev, "\n");
-
- /* power off */
- ret = regmap_write(dev->regmap[2], 0x0b, 0x30);
-
- if (ret)
- goto err;
-
- ret = regmap_write(dev->regmap[2], 0x05, 0x3e);
- if (ret)
- goto err;
-
- dev->delivery_system = SYS_UNDEFINED;
-
- return 0;
-err:
- dev_dbg(&client->dev, "failed=%d\n", ret);
- return ret;
-}
-
-static struct dvb_frontend_ops mn88472_ops = {
- .delsys = {SYS_DVBT, SYS_DVBT2, SYS_DVBC_ANNEX_A},
- .info = {
- .name = "Panasonic MN88472",
- .symbol_rate_min = 1000000,
- .symbol_rate_max = 7200000,
- .caps = FE_CAN_FEC_1_2 |
- FE_CAN_FEC_2_3 |
- FE_CAN_FEC_3_4 |
- FE_CAN_FEC_5_6 |
- FE_CAN_FEC_7_8 |
- FE_CAN_FEC_AUTO |
- FE_CAN_QPSK |
- FE_CAN_QAM_16 |
- FE_CAN_QAM_32 |
- FE_CAN_QAM_64 |
- FE_CAN_QAM_128 |
- FE_CAN_QAM_256 |
- FE_CAN_QAM_AUTO |
- FE_CAN_TRANSMISSION_MODE_AUTO |
- FE_CAN_GUARD_INTERVAL_AUTO |
- FE_CAN_HIERARCHY_AUTO |
- FE_CAN_MUTE_TS |
- FE_CAN_2G_MODULATION |
- FE_CAN_MULTISTREAM
- },
-
- .get_tune_settings = mn88472_get_tune_settings,
-
- .init = mn88472_init,
- .sleep = mn88472_sleep,
-
- .set_frontend = mn88472_set_frontend,
-
- .read_status = mn88472_read_status,
-};
-
-static int mn88472_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
-{
- struct mn88472_config *config = client->dev.platform_data;
- struct mn88472_dev *dev;
- int ret;
- unsigned int utmp;
- static const struct regmap_config regmap_config = {
- .reg_bits = 8,
- .val_bits = 8,
- };
-
- dev_dbg(&client->dev, "\n");
-
- /* Caller really need to provide pointer for frontend we create. */
- if (config->fe == NULL) {
- dev_err(&client->dev, "frontend pointer not defined\n");
- ret = -EINVAL;
- goto err;
- }
-
- dev = kzalloc(sizeof(*dev), GFP_KERNEL);
- if (dev == NULL) {
- ret = -ENOMEM;
- goto err;
- }
-
- dev->i2c_wr_max = config->i2c_wr_max;
- dev->xtal = config->xtal;
- dev->ts_mode = config->ts_mode;
- dev->ts_clock = config->ts_clock;
- dev->client[0] = client;
- dev->regmap[0] = regmap_init_i2c(dev->client[0], &regmap_config);
- if (IS_ERR(dev->regmap[0])) {
- ret = PTR_ERR(dev->regmap[0]);
- goto err_kfree;
- }
-
- /* check demod answers to I2C */
- ret = regmap_read(dev->regmap[0], 0x00, &utmp);
- if (ret)
- goto err_regmap_0_regmap_exit;
-
- /*
- * Chip has three I2C addresses for different register pages. Used
- * addresses are 0x18, 0x1a and 0x1c. We register two dummy clients,
- * 0x1a and 0x1c, in order to get own I2C client for each register page.
- */
- dev->client[1] = i2c_new_dummy(client->adapter, 0x1a);
- if (dev->client[1] == NULL) {
- ret = -ENODEV;
- dev_err(&client->dev, "I2C registration failed\n");
- if (ret)
- goto err_regmap_0_regmap_exit;
- }
- dev->regmap[1] = regmap_init_i2c(dev->client[1], &regmap_config);
- if (IS_ERR(dev->regmap[1])) {
- ret = PTR_ERR(dev->regmap[1]);
- goto err_client_1_i2c_unregister_device;
- }
- i2c_set_clientdata(dev->client[1], dev);
-
- dev->client[2] = i2c_new_dummy(client->adapter, 0x1c);
- if (dev->client[2] == NULL) {
- ret = -ENODEV;
- dev_err(&client->dev, "2nd I2C registration failed\n");
- if (ret)
- goto err_regmap_1_regmap_exit;
- }
- dev->regmap[2] = regmap_init_i2c(dev->client[2], &regmap_config);
- if (IS_ERR(dev->regmap[2])) {
- ret = PTR_ERR(dev->regmap[2]);
- goto err_client_2_i2c_unregister_device;
- }
- i2c_set_clientdata(dev->client[2], dev);
-
- /* create dvb_frontend */
- memcpy(&dev->fe.ops, &mn88472_ops, sizeof(struct dvb_frontend_ops));
- dev->fe.demodulator_priv = client;
- *config->fe = &dev->fe;
- i2c_set_clientdata(client, dev);
-
- dev_info(&client->dev, "Panasonic MN88472 successfully attached\n");
- return 0;
-
-err_client_2_i2c_unregister_device:
- i2c_unregister_device(dev->client[2]);
-err_regmap_1_regmap_exit:
- regmap_exit(dev->regmap[1]);
-err_client_1_i2c_unregister_device:
- i2c_unregister_device(dev->client[1]);
-err_regmap_0_regmap_exit:
- regmap_exit(dev->regmap[0]);
-err_kfree:
- kfree(dev);
-err:
- dev_dbg(&client->dev, "failed=%d\n", ret);
- return ret;
-}
-
-static int mn88472_remove(struct i2c_client *client)
-{
- struct mn88472_dev *dev = i2c_get_clientdata(client);
-
- dev_dbg(&client->dev, "\n");
-
- regmap_exit(dev->regmap[2]);
- i2c_unregister_device(dev->client[2]);
-
- regmap_exit(dev->regmap[1]);
- i2c_unregister_device(dev->client[1]);
-
- regmap_exit(dev->regmap[0]);
-
- kfree(dev);
-
- return 0;
-}
-
-static const struct i2c_device_id mn88472_id_table[] = {
- {"mn88472", 0},
- {}
-};
-MODULE_DEVICE_TABLE(i2c, mn88472_id_table);
-
-static struct i2c_driver mn88472_driver = {
- .driver = {
- .name = "mn88472",
- },
- .probe = mn88472_probe,
- .remove = mn88472_remove,
- .id_table = mn88472_id_table,
-};
-
-module_i2c_driver(mn88472_driver);
-
-MODULE_AUTHOR("Antti Palosaari <crope@iki.fi>");
-MODULE_DESCRIPTION("Panasonic MN88472 DVB-T/T2/C demodulator driver");
-MODULE_LICENSE("GPL");
-MODULE_FIRMWARE(MN88472_FIRMWARE);
diff --git a/drivers/staging/media/mn88472/mn88472_priv.h b/drivers/staging/media/mn88472/mn88472_priv.h
deleted file mode 100644
index 1a0de9e46b66..000000000000
--- a/drivers/staging/media/mn88472/mn88472_priv.h
+++ /dev/null
@@ -1,39 +0,0 @@
-/*
- * Panasonic MN88472 DVB-T/T2/C demodulator driver
- *
- * Copyright (C) 2013 Antti Palosaari <crope@iki.fi>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#ifndef MN88472_PRIV_H
-#define MN88472_PRIV_H
-
-#include "dvb_frontend.h"
-#include "mn88472.h"
-#include <linux/firmware.h>
-#include <linux/regmap.h>
-
-#define MN88472_FIRMWARE "dvb-demod-mn88472-02.fw"
-
-struct mn88472_dev {
- struct i2c_client *client[3];
- struct regmap *regmap[3];
- struct dvb_frontend fe;
- u16 i2c_wr_max;
- enum fe_delivery_system delivery_system;
- bool warm; /* FW running */
- u32 xtal;
- int ts_mode;
- int ts_clock;
-};
-
-#endif
diff --git a/drivers/staging/media/mn88473/Kconfig b/drivers/staging/media/mn88473/Kconfig
deleted file mode 100644
index 6c9ebf51c2c7..000000000000
--- a/drivers/staging/media/mn88473/Kconfig
+++ /dev/null
@@ -1,7 +0,0 @@
-config DVB_MN88473
- tristate "Panasonic MN88473"
- depends on DVB_CORE && I2C
- select REGMAP_I2C
- default m if !MEDIA_SUBDRV_AUTOSELECT
- help
- Say Y when you want to support this frontend.
diff --git a/drivers/staging/media/mn88473/Makefile b/drivers/staging/media/mn88473/Makefile
deleted file mode 100644
index fac55410ce55..000000000000
--- a/drivers/staging/media/mn88473/Makefile
+++ /dev/null
@@ -1,5 +0,0 @@
-obj-$(CONFIG_DVB_MN88473) += mn88473.o
-
-ccflags-y += -Idrivers/media/dvb-core/
-ccflags-y += -Idrivers/media/dvb-frontends/
-ccflags-y += -Idrivers/media/tuners/
diff --git a/drivers/staging/media/mn88473/TODO b/drivers/staging/media/mn88473/TODO
deleted file mode 100644
index b90a14be3beb..000000000000
--- a/drivers/staging/media/mn88473/TODO
+++ /dev/null
@@ -1,21 +0,0 @@
-Driver general quality is not good enough for mainline. Also, other
-device drivers (USB-bridge, tuner) needed for Astrometa receiver in
-question could need some changes. However, if that driver is mainlined
-due to some other device than Astrometa, unrelated TODOs could be
-skipped. In that case rtl28xxu driver needs module parameter to prevent
-driver loading.
-
-Required TODOs:
-* missing lock flags
-* I2C errors
-* tuner sensitivity
-
-*Do not* send any patch fixing checkpatch.pl issues. Currently it passes
-checkpatch.pl tests. I don't want waste my time to review this kind of
-trivial stuff. *Do not* add missing register I/O error checks. Those are
-missing for the reason it is much easier to compare I2C data sniffs when
-there is less lines. Those error checks are about the last thing to be added.
-
-Patches should be submitted to:
-linux-media@vger.kernel.org and Antti Palosaari <crope@iki.fi>
-
diff --git a/drivers/staging/media/mn88473/mn88473.c b/drivers/staging/media/mn88473/mn88473.c
deleted file mode 100644
index a222e99935d2..000000000000
--- a/drivers/staging/media/mn88473/mn88473.c
+++ /dev/null
@@ -1,522 +0,0 @@
-/*
- * Panasonic MN88473 DVB-T/T2/C demodulator driver
- *
- * Copyright (C) 2014 Antti Palosaari <crope@iki.fi>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#include "mn88473_priv.h"
-
-static int mn88473_get_tune_settings(struct dvb_frontend *fe,
- struct dvb_frontend_tune_settings *s)
-{
- s->min_delay_ms = 1000;
- return 0;
-}
-
-static int mn88473_set_frontend(struct dvb_frontend *fe)
-{
- struct i2c_client *client = fe->demodulator_priv;
- struct mn88473_dev *dev = i2c_get_clientdata(client);
- struct dtv_frontend_properties *c = &fe->dtv_property_cache;
- int ret, i;
- u32 if_frequency;
- u64 tmp;
- u8 delivery_system_val, if_val[3], bw_val[7];
-
- dev_dbg(&client->dev,
- "delivery_system=%u modulation=%u frequency=%u bandwidth_hz=%u symbol_rate=%u inversion=%d stream_id=%d\n",
- c->delivery_system,
- c->modulation,
- c->frequency,
- c->bandwidth_hz,
- c->symbol_rate,
- c->inversion,
- c->stream_id);
-
- if (!dev->warm) {
- ret = -EAGAIN;
- goto err;
- }
-
- switch (c->delivery_system) {
- case SYS_DVBT:
- delivery_system_val = 0x02;
- break;
- case SYS_DVBT2:
- delivery_system_val = 0x03;
- break;
- case SYS_DVBC_ANNEX_A:
- delivery_system_val = 0x04;
- break;
- default:
- ret = -EINVAL;
- goto err;
- }
-
- if (c->bandwidth_hz <= 6000000) {
- memcpy(bw_val, "\xe9\x55\x55\x1c\x29\x1c\x29", 7);
- } else if (c->bandwidth_hz <= 7000000) {
- memcpy(bw_val, "\xc8\x00\x00\x17\x0a\x17\x0a", 7);
- } else if (c->bandwidth_hz <= 8000000) {
- memcpy(bw_val, "\xaf\x00\x00\x11\xec\x11\xec", 7);
- } else {
- ret = -EINVAL;
- goto err;
- }
-
- /* program tuner */
- if (fe->ops.tuner_ops.set_params) {
- ret = fe->ops.tuner_ops.set_params(fe);
- if (ret)
- goto err;
- }
-
- if (fe->ops.tuner_ops.get_if_frequency) {
- ret = fe->ops.tuner_ops.get_if_frequency(fe, &if_frequency);
- if (ret)
- goto err;
-
- dev_dbg(&client->dev, "get_if_frequency=%d\n", if_frequency);
- } else {
- if_frequency = 0;
- }
-
- /* Calculate IF registers ( (1<<24)*IF / Xtal ) */
- tmp = div_u64(if_frequency * (u64)(1<<24) + (dev->xtal / 2),
- dev->xtal);
- if_val[0] = ((tmp >> 16) & 0xff);
- if_val[1] = ((tmp >> 8) & 0xff);
- if_val[2] = ((tmp >> 0) & 0xff);
-
- ret = regmap_write(dev->regmap[2], 0x05, 0x00);
- ret = regmap_write(dev->regmap[2], 0xfb, 0x13);
- ret = regmap_write(dev->regmap[2], 0xef, 0x13);
- ret = regmap_write(dev->regmap[2], 0xf9, 0x13);
- ret = regmap_write(dev->regmap[2], 0x00, 0x18);
- ret = regmap_write(dev->regmap[2], 0x01, 0x01);
- ret = regmap_write(dev->regmap[2], 0x02, 0x21);
- ret = regmap_write(dev->regmap[2], 0x03, delivery_system_val);
- ret = regmap_write(dev->regmap[2], 0x0b, 0x00);
-
- for (i = 0; i < sizeof(if_val); i++) {
- ret = regmap_write(dev->regmap[2], 0x10 + i, if_val[i]);
- if (ret)
- goto err;
- }
-
- for (i = 0; i < sizeof(bw_val); i++) {
- ret = regmap_write(dev->regmap[2], 0x13 + i, bw_val[i]);
- if (ret)
- goto err;
- }
-
- ret = regmap_write(dev->regmap[2], 0x2d, 0x3b);
- ret = regmap_write(dev->regmap[2], 0x2e, 0x00);
- ret = regmap_write(dev->regmap[2], 0x56, 0x0d);
- ret = regmap_write(dev->regmap[0], 0x01, 0xba);
- ret = regmap_write(dev->regmap[0], 0x02, 0x13);
- ret = regmap_write(dev->regmap[0], 0x03, 0x80);
- ret = regmap_write(dev->regmap[0], 0x04, 0xba);
- ret = regmap_write(dev->regmap[0], 0x05, 0x91);
- ret = regmap_write(dev->regmap[0], 0x07, 0xe7);
- ret = regmap_write(dev->regmap[0], 0x08, 0x28);
- ret = regmap_write(dev->regmap[0], 0x0a, 0x1a);
- ret = regmap_write(dev->regmap[0], 0x13, 0x1f);
- ret = regmap_write(dev->regmap[0], 0x19, 0x03);
- ret = regmap_write(dev->regmap[0], 0x1d, 0xb0);
- ret = regmap_write(dev->regmap[0], 0x2a, 0x72);
- ret = regmap_write(dev->regmap[0], 0x2d, 0x00);
- ret = regmap_write(dev->regmap[0], 0x3c, 0x00);
- ret = regmap_write(dev->regmap[0], 0x3f, 0xf8);
- ret = regmap_write(dev->regmap[0], 0x40, 0xf4);
- ret = regmap_write(dev->regmap[0], 0x41, 0x08);
- ret = regmap_write(dev->regmap[0], 0xd2, 0x29);
- ret = regmap_write(dev->regmap[0], 0xd4, 0x55);
- ret = regmap_write(dev->regmap[1], 0x10, 0x10);
- ret = regmap_write(dev->regmap[1], 0x11, 0xab);
- ret = regmap_write(dev->regmap[1], 0x12, 0x0d);
- ret = regmap_write(dev->regmap[1], 0x13, 0xae);
- ret = regmap_write(dev->regmap[1], 0x14, 0x1d);
- ret = regmap_write(dev->regmap[1], 0x15, 0x9d);
- ret = regmap_write(dev->regmap[1], 0xbe, 0x08);
- ret = regmap_write(dev->regmap[2], 0x09, 0x08);
- ret = regmap_write(dev->regmap[2], 0x08, 0x1d);
- ret = regmap_write(dev->regmap[0], 0xb2, 0x37);
- ret = regmap_write(dev->regmap[0], 0xd7, 0x04);
- ret = regmap_write(dev->regmap[2], 0x32, 0x80);
- ret = regmap_write(dev->regmap[2], 0x36, 0x00);
- ret = regmap_write(dev->regmap[2], 0xf8, 0x9f);
- if (ret)
- goto err;
-
- dev->delivery_system = c->delivery_system;
-
- return 0;
-err:
- dev_dbg(&client->dev, "failed=%d\n", ret);
- return ret;
-}
-
-static int mn88473_read_status(struct dvb_frontend *fe, enum fe_status *status)
-{
- struct i2c_client *client = fe->demodulator_priv;
- struct mn88473_dev *dev = i2c_get_clientdata(client);
- struct dtv_frontend_properties *c = &fe->dtv_property_cache;
- int ret;
- unsigned int utmp;
- int lock = 0;
-
- *status = 0;
-
- if (!dev->warm) {
- ret = -EAGAIN;
- goto err;
- }
-
- switch (c->delivery_system) {
- case SYS_DVBT:
- ret = regmap_read(dev->regmap[0], 0x62, &utmp);
- if (ret)
- goto err;
- if (!(utmp & 0xA0)) {
- if ((utmp & 0xF) >= 0x03)
- *status |= FE_HAS_SIGNAL;
- if ((utmp & 0xF) >= 0x09)
- lock = 1;
- }
- break;
- case SYS_DVBT2:
- ret = regmap_read(dev->regmap[2], 0x8B, &utmp);
- if (ret)
- goto err;
- if (!(utmp & 0x40)) {
- if ((utmp & 0xF) >= 0x07)
- *status |= FE_HAS_SIGNAL;
- if ((utmp & 0xF) >= 0x0a)
- *status |= FE_HAS_CARRIER;
- if ((utmp & 0xF) >= 0x0d)
- *status |= FE_HAS_VITERBI | FE_HAS_SYNC | FE_HAS_LOCK;
- }
- break;
- case SYS_DVBC_ANNEX_A:
- ret = regmap_read(dev->regmap[1], 0x85, &utmp);
- if (ret)
- goto err;
- if (!(utmp & 0x40)) {
- ret = regmap_read(dev->regmap[1], 0x89, &utmp);
- if (ret)
- goto err;
- if (utmp & 0x01)
- lock = 1;
- }
- break;
- default:
- ret = -EINVAL;
- goto err;
- }
-
- if (lock)
- *status = FE_HAS_SIGNAL | FE_HAS_CARRIER | FE_HAS_VITERBI |
- FE_HAS_SYNC | FE_HAS_LOCK;
-
- return 0;
-err:
- dev_dbg(&client->dev, "failed=%d\n", ret);
- return ret;
-}
-
-static int mn88473_init(struct dvb_frontend *fe)
-{
- struct i2c_client *client = fe->demodulator_priv;
- struct mn88473_dev *dev = i2c_get_clientdata(client);
- int ret, len, remaining;
- const struct firmware *fw = NULL;
- u8 *fw_file = MN88473_FIRMWARE;
- unsigned int tmp;
-
- dev_dbg(&client->dev, "\n");
-
- /* set cold state by default */
- dev->warm = false;
-
- /* check if firmware is already running */
- ret = regmap_read(dev->regmap[0], 0xf5, &tmp);
- if (ret)
- goto err;
-
- if (!(tmp & 0x1)) {
- dev_info(&client->dev, "firmware already running\n");
- dev->warm = true;
- return 0;
- }
-
- /* request the firmware, this will block and timeout */
- ret = request_firmware(&fw, fw_file, &client->dev);
- if (ret) {
- dev_err(&client->dev, "firmare file '%s' not found\n", fw_file);
- goto err_request_firmware;
- }
-
- dev_info(&client->dev, "downloading firmware from file '%s'\n",
- fw_file);
-
- ret = regmap_write(dev->regmap[0], 0xf5, 0x03);
- if (ret)
- goto err;
-
- for (remaining = fw->size; remaining > 0;
- remaining -= (dev->i2c_wr_max - 1)) {
- len = remaining;
- if (len > (dev->i2c_wr_max - 1))
- len = dev->i2c_wr_max - 1;
-
- ret = regmap_bulk_write(dev->regmap[0], 0xf6,
- &fw->data[fw->size - remaining], len);
- if (ret) {
- dev_err(&client->dev, "firmware download failed=%d\n",
- ret);
- goto err;
- }
- }
-
- /* parity check of firmware */
- ret = regmap_read(dev->regmap[0], 0xf8, &tmp);
- if (ret) {
- dev_err(&client->dev,
- "parity reg read failed=%d\n", ret);
- goto err;
- }
- if (tmp & 0x10) {
- dev_err(&client->dev,
- "firmware parity check failed=0x%x\n", tmp);
- goto err;
- }
- dev_err(&client->dev, "firmware parity check succeeded=0x%x\n", tmp);
-
- ret = regmap_write(dev->regmap[0], 0xf5, 0x00);
- if (ret)
- goto err;
-
- release_firmware(fw);
- fw = NULL;
-
- /* warm state */
- dev->warm = true;
-
- return 0;
-
-err:
- release_firmware(fw);
-err_request_firmware:
- dev_dbg(&client->dev, "failed=%d\n", ret);
- return ret;
-}
-
-static int mn88473_sleep(struct dvb_frontend *fe)
-{
- struct i2c_client *client = fe->demodulator_priv;
- struct mn88473_dev *dev = i2c_get_clientdata(client);
- int ret;
-
- dev_dbg(&client->dev, "\n");
-
- ret = regmap_write(dev->regmap[2], 0x05, 0x3e);
- if (ret)
- goto err;
-
- dev->delivery_system = SYS_UNDEFINED;
-
- return 0;
-err:
- dev_dbg(&client->dev, "failed=%d\n", ret);
- return ret;
-}
-
-static struct dvb_frontend_ops mn88473_ops = {
- .delsys = {SYS_DVBT, SYS_DVBT2, SYS_DVBC_ANNEX_AC},
- .info = {
- .name = "Panasonic MN88473",
- .symbol_rate_min = 1000000,
- .symbol_rate_max = 7200000,
- .caps = FE_CAN_FEC_1_2 |
- FE_CAN_FEC_2_3 |
- FE_CAN_FEC_3_4 |
- FE_CAN_FEC_5_6 |
- FE_CAN_FEC_7_8 |
- FE_CAN_FEC_AUTO |
- FE_CAN_QPSK |
- FE_CAN_QAM_16 |
- FE_CAN_QAM_32 |
- FE_CAN_QAM_64 |
- FE_CAN_QAM_128 |
- FE_CAN_QAM_256 |
- FE_CAN_QAM_AUTO |
- FE_CAN_TRANSMISSION_MODE_AUTO |
- FE_CAN_GUARD_INTERVAL_AUTO |
- FE_CAN_HIERARCHY_AUTO |
- FE_CAN_MUTE_TS |
- FE_CAN_2G_MODULATION |
- FE_CAN_MULTISTREAM
- },
-
- .get_tune_settings = mn88473_get_tune_settings,
-
- .init = mn88473_init,
- .sleep = mn88473_sleep,
-
- .set_frontend = mn88473_set_frontend,
-
- .read_status = mn88473_read_status,
-};
-
-static int mn88473_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
-{
- struct mn88473_config *config = client->dev.platform_data;
- struct mn88473_dev *dev;
- int ret;
- unsigned int utmp;
- static const struct regmap_config regmap_config = {
- .reg_bits = 8,
- .val_bits = 8,
- };
-
- dev_dbg(&client->dev, "\n");
-
- /* Caller really need to provide pointer for frontend we create. */
- if (config->fe == NULL) {
- dev_err(&client->dev, "frontend pointer not defined\n");
- ret = -EINVAL;
- goto err;
- }
-
- dev = kzalloc(sizeof(*dev), GFP_KERNEL);
- if (dev == NULL) {
- ret = -ENOMEM;
- goto err;
- }
-
- dev->i2c_wr_max = config->i2c_wr_max;
- if (!config->xtal)
- dev->xtal = 25000000;
- else
- dev->xtal = config->xtal;
- dev->client[0] = client;
- dev->regmap[0] = regmap_init_i2c(dev->client[0], &regmap_config);
- if (IS_ERR(dev->regmap[0])) {
- ret = PTR_ERR(dev->regmap[0]);
- goto err_kfree;
- }
-
- /* check demod answers to I2C */
- ret = regmap_read(dev->regmap[0], 0x00, &utmp);
- if (ret)
- goto err_regmap_0_regmap_exit;
-
- /*
- * Chip has three I2C addresses for different register pages. Used
- * addresses are 0x18, 0x1a and 0x1c. We register two dummy clients,
- * 0x1a and 0x1c, in order to get own I2C client for each register page.
- */
- dev->client[1] = i2c_new_dummy(client->adapter, 0x1a);
- if (dev->client[1] == NULL) {
- ret = -ENODEV;
- dev_err(&client->dev, "I2C registration failed\n");
- if (ret)
- goto err_regmap_0_regmap_exit;
- }
- dev->regmap[1] = regmap_init_i2c(dev->client[1], &regmap_config);
- if (IS_ERR(dev->regmap[1])) {
- ret = PTR_ERR(dev->regmap[1]);
- goto err_client_1_i2c_unregister_device;
- }
- i2c_set_clientdata(dev->client[1], dev);
-
- dev->client[2] = i2c_new_dummy(client->adapter, 0x1c);
- if (dev->client[2] == NULL) {
- ret = -ENODEV;
- dev_err(&client->dev, "2nd I2C registration failed\n");
- if (ret)
- goto err_regmap_1_regmap_exit;
- }
- dev->regmap[2] = regmap_init_i2c(dev->client[2], &regmap_config);
- if (IS_ERR(dev->regmap[2])) {
- ret = PTR_ERR(dev->regmap[2]);
- goto err_client_2_i2c_unregister_device;
- }
- i2c_set_clientdata(dev->client[2], dev);
-
- /* create dvb_frontend */
- memcpy(&dev->fe.ops, &mn88473_ops, sizeof(struct dvb_frontend_ops));
- dev->fe.demodulator_priv = client;
- *config->fe = &dev->fe;
- i2c_set_clientdata(client, dev);
-
- dev_info(&dev->client[0]->dev, "Panasonic MN88473 successfully attached\n");
- return 0;
-
-err_client_2_i2c_unregister_device:
- i2c_unregister_device(dev->client[2]);
-err_regmap_1_regmap_exit:
- regmap_exit(dev->regmap[1]);
-err_client_1_i2c_unregister_device:
- i2c_unregister_device(dev->client[1]);
-err_regmap_0_regmap_exit:
- regmap_exit(dev->regmap[0]);
-err_kfree:
- kfree(dev);
-err:
- dev_dbg(&client->dev, "failed=%d\n", ret);
- return ret;
-}
-
-static int mn88473_remove(struct i2c_client *client)
-{
- struct mn88473_dev *dev = i2c_get_clientdata(client);
-
- dev_dbg(&client->dev, "\n");
-
- regmap_exit(dev->regmap[2]);
- i2c_unregister_device(dev->client[2]);
-
- regmap_exit(dev->regmap[1]);
- i2c_unregister_device(dev->client[1]);
-
- regmap_exit(dev->regmap[0]);
-
- kfree(dev);
-
- return 0;
-}
-
-static const struct i2c_device_id mn88473_id_table[] = {
- {"mn88473", 0},
- {}
-};
-MODULE_DEVICE_TABLE(i2c, mn88473_id_table);
-
-static struct i2c_driver mn88473_driver = {
- .driver = {
- .name = "mn88473",
- },
- .probe = mn88473_probe,
- .remove = mn88473_remove,
- .id_table = mn88473_id_table,
-};
-
-module_i2c_driver(mn88473_driver);
-
-MODULE_AUTHOR("Antti Palosaari <crope@iki.fi>");
-MODULE_DESCRIPTION("Panasonic MN88473 DVB-T/T2/C demodulator driver");
-MODULE_LICENSE("GPL");
-MODULE_FIRMWARE(MN88473_FIRMWARE);
diff --git a/drivers/staging/media/mn88473/mn88473_priv.h b/drivers/staging/media/mn88473/mn88473_priv.h
deleted file mode 100644
index 54beb4241ccf..000000000000
--- a/drivers/staging/media/mn88473/mn88473_priv.h
+++ /dev/null
@@ -1,37 +0,0 @@
-/*
- * Panasonic MN88473 DVB-T/T2/C demodulator driver
- *
- * Copyright (C) 2014 Antti Palosaari <crope@iki.fi>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#ifndef MN88473_PRIV_H
-#define MN88473_PRIV_H
-
-#include "dvb_frontend.h"
-#include "mn88473.h"
-#include <linux/firmware.h>
-#include <linux/regmap.h>
-
-#define MN88473_FIRMWARE "dvb-demod-mn88473-01.fw"
-
-struct mn88473_dev {
- struct i2c_client *client[3];
- struct regmap *regmap[3];
- struct dvb_frontend fe;
- u16 i2c_wr_max;
- enum fe_delivery_system delivery_system;
- bool warm; /* FW running */
- u32 xtal;
-};
-
-#endif
diff --git a/drivers/staging/media/omap4iss/Kconfig b/drivers/staging/media/omap4iss/Kconfig
deleted file mode 100644
index 8d4e3bd1bfe1..000000000000
--- a/drivers/staging/media/omap4iss/Kconfig
+++ /dev/null
@@ -1,8 +0,0 @@
-config VIDEO_OMAP4
- tristate "OMAP 4 Camera support"
- depends on VIDEO_V4L2=y && VIDEO_V4L2_SUBDEV_API && I2C=y && ARCH_OMAP4
- depends on HAS_DMA
- select MFD_SYSCON
- select VIDEOBUF2_DMA_CONTIG
- ---help---
- Driver for an OMAP 4 ISS controller.
diff --git a/drivers/staging/media/omap4iss/Makefile b/drivers/staging/media/omap4iss/Makefile
deleted file mode 100644
index a716ce936cf6..000000000000
--- a/drivers/staging/media/omap4iss/Makefile
+++ /dev/null
@@ -1,6 +0,0 @@
-# Makefile for OMAP4 ISS driver
-
-omap4-iss-objs += \
- iss.o iss_csi2.o iss_csiphy.o iss_ipipeif.o iss_ipipe.o iss_resizer.o iss_video.o
-
-obj-$(CONFIG_VIDEO_OMAP4) += omap4-iss.o
diff --git a/drivers/staging/media/omap4iss/TODO b/drivers/staging/media/omap4iss/TODO
deleted file mode 100644
index 4d220ef82653..000000000000
--- a/drivers/staging/media/omap4iss/TODO
+++ /dev/null
@@ -1,3 +0,0 @@
-* Fix FIFO/buffer overflows and underflows
-* Replace dummy resizer code with a real implementation
-* Fix checkpatch errors and warnings
diff --git a/drivers/staging/media/omap4iss/iss.c b/drivers/staging/media/omap4iss/iss.c
deleted file mode 100644
index 9bfb725b9986..000000000000
--- a/drivers/staging/media/omap4iss/iss.c
+++ /dev/null
@@ -1,1512 +0,0 @@
-/*
- * TI OMAP4 ISS V4L2 Driver
- *
- * Copyright (C) 2012, Texas Instruments
- *
- * Author: Sergio Aguirre <sergio.a.aguirre@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-#include <linux/clk.h>
-#include <linux/delay.h>
-#include <linux/device.h>
-#include <linux/dma-mapping.h>
-#include <linux/i2c.h>
-#include <linux/interrupt.h>
-#include <linux/mfd/syscon.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/slab.h>
-#include <linux/sched.h>
-#include <linux/vmalloc.h>
-
-#include <media/v4l2-common.h>
-#include <media/v4l2-device.h>
-#include <media/v4l2-ctrls.h>
-
-#include "iss.h"
-#include "iss_regs.h"
-
-#define ISS_PRINT_REGISTER(iss, name)\
- dev_dbg(iss->dev, "###ISS " #name "=0x%08x\n", \
- iss_reg_read(iss, OMAP4_ISS_MEM_TOP, ISS_##name))
-
-static void iss_print_status(struct iss_device *iss)
-{
- dev_dbg(iss->dev, "-------------ISS HL Register dump-------------\n");
-
- ISS_PRINT_REGISTER(iss, HL_REVISION);
- ISS_PRINT_REGISTER(iss, HL_SYSCONFIG);
- ISS_PRINT_REGISTER(iss, HL_IRQSTATUS(5));
- ISS_PRINT_REGISTER(iss, HL_IRQENABLE_SET(5));
- ISS_PRINT_REGISTER(iss, HL_IRQENABLE_CLR(5));
- ISS_PRINT_REGISTER(iss, CTRL);
- ISS_PRINT_REGISTER(iss, CLKCTRL);
- ISS_PRINT_REGISTER(iss, CLKSTAT);
-
- dev_dbg(iss->dev, "-----------------------------------------------\n");
-}
-
-/*
- * omap4iss_flush - Post pending L3 bus writes by doing a register readback
- * @iss: OMAP4 ISS device
- *
- * In order to force posting of pending writes, we need to write and
- * readback the same register, in this case the revision register.
- *
- * See this link for reference:
- * http://www.mail-archive.com/linux-omap@vger.kernel.org/msg08149.html
- */
-void omap4iss_flush(struct iss_device *iss)
-{
- iss_reg_write(iss, OMAP4_ISS_MEM_TOP, ISS_HL_REVISION, 0);
- iss_reg_read(iss, OMAP4_ISS_MEM_TOP, ISS_HL_REVISION);
-}
-
-/*
- * iss_isp_enable_interrupts - Enable ISS ISP interrupts.
- * @iss: OMAP4 ISS device
- */
-static void omap4iss_isp_enable_interrupts(struct iss_device *iss)
-{
- static const u32 isp_irq = ISP5_IRQ_OCP_ERR |
- ISP5_IRQ_RSZ_FIFO_IN_BLK_ERR |
- ISP5_IRQ_RSZ_FIFO_OVF |
- ISP5_IRQ_RSZ_INT_DMA |
- ISP5_IRQ_ISIF_INT(0);
-
- /* Enable ISP interrupts */
- iss_reg_write(iss, OMAP4_ISS_MEM_ISP_SYS1, ISP5_IRQSTATUS(0), isp_irq);
- iss_reg_write(iss, OMAP4_ISS_MEM_ISP_SYS1, ISP5_IRQENABLE_SET(0),
- isp_irq);
-}
-
-/*
- * iss_isp_disable_interrupts - Disable ISS interrupts.
- * @iss: OMAP4 ISS device
- */
-static void omap4iss_isp_disable_interrupts(struct iss_device *iss)
-{
- iss_reg_write(iss, OMAP4_ISS_MEM_ISP_SYS1, ISP5_IRQENABLE_CLR(0), ~0);
-}
-
-/*
- * iss_enable_interrupts - Enable ISS interrupts.
- * @iss: OMAP4 ISS device
- */
-static void iss_enable_interrupts(struct iss_device *iss)
-{
- static const u32 hl_irq = ISS_HL_IRQ_CSIA | ISS_HL_IRQ_CSIB
- | ISS_HL_IRQ_ISP(0);
-
- /* Enable HL interrupts */
- iss_reg_write(iss, OMAP4_ISS_MEM_TOP, ISS_HL_IRQSTATUS(5), hl_irq);
- iss_reg_write(iss, OMAP4_ISS_MEM_TOP, ISS_HL_IRQENABLE_SET(5), hl_irq);
-
- if (iss->regs[OMAP4_ISS_MEM_ISP_SYS1])
- omap4iss_isp_enable_interrupts(iss);
-}
-
-/*
- * iss_disable_interrupts - Disable ISS interrupts.
- * @iss: OMAP4 ISS device
- */
-static void iss_disable_interrupts(struct iss_device *iss)
-{
- if (iss->regs[OMAP4_ISS_MEM_ISP_SYS1])
- omap4iss_isp_disable_interrupts(iss);
-
- iss_reg_write(iss, OMAP4_ISS_MEM_TOP, ISS_HL_IRQENABLE_CLR(5), ~0);
-}
-
-int omap4iss_get_external_info(struct iss_pipeline *pipe,
- struct media_link *link)
-{
- struct iss_device *iss =
- container_of(pipe, struct iss_video, pipe)->iss;
- struct v4l2_subdev_format fmt;
- struct v4l2_ctrl *ctrl;
- int ret;
-
- if (!pipe->external)
- return 0;
-
- if (pipe->external_rate)
- return 0;
-
- memset(&fmt, 0, sizeof(fmt));
-
- fmt.pad = link->source->index;
- fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
- ret = v4l2_subdev_call(media_entity_to_v4l2_subdev(link->sink->entity),
- pad, get_fmt, NULL, &fmt);
- if (ret < 0)
- return -EPIPE;
-
- pipe->external_bpp = omap4iss_video_format_info(fmt.format.code)->bpp;
-
- ctrl = v4l2_ctrl_find(pipe->external->ctrl_handler,
- V4L2_CID_PIXEL_RATE);
- if (ctrl == NULL) {
- dev_warn(iss->dev, "no pixel rate control in subdev %s\n",
- pipe->external->name);
- return -EPIPE;
- }
-
- pipe->external_rate = v4l2_ctrl_g_ctrl_int64(ctrl);
-
- return 0;
-}
-
-/*
- * Configure the bridge. Valid inputs are
- *
- * IPIPEIF_INPUT_CSI2A: CSI2a receiver
- * IPIPEIF_INPUT_CSI2B: CSI2b receiver
- *
- * The bridge and lane shifter are configured according to the selected input
- * and the ISP platform data.
- */
-void omap4iss_configure_bridge(struct iss_device *iss,
- enum ipipeif_input_entity input)
-{
- u32 issctrl_val;
- u32 isp5ctrl_val;
-
- issctrl_val = iss_reg_read(iss, OMAP4_ISS_MEM_TOP, ISS_CTRL);
- issctrl_val &= ~ISS_CTRL_INPUT_SEL_MASK;
- issctrl_val &= ~ISS_CTRL_CLK_DIV_MASK;
-
- isp5ctrl_val = iss_reg_read(iss, OMAP4_ISS_MEM_ISP_SYS1, ISP5_CTRL);
-
- switch (input) {
- case IPIPEIF_INPUT_CSI2A:
- issctrl_val |= ISS_CTRL_INPUT_SEL_CSI2A;
- break;
-
- case IPIPEIF_INPUT_CSI2B:
- issctrl_val |= ISS_CTRL_INPUT_SEL_CSI2B;
- break;
-
- default:
- return;
- }
-
- issctrl_val |= ISS_CTRL_SYNC_DETECT_VS_RAISING;
-
- isp5ctrl_val |= ISP5_CTRL_VD_PULSE_EXT | ISP5_CTRL_PSYNC_CLK_SEL |
- ISP5_CTRL_SYNC_ENABLE;
-
- iss_reg_write(iss, OMAP4_ISS_MEM_TOP, ISS_CTRL, issctrl_val);
- iss_reg_write(iss, OMAP4_ISS_MEM_ISP_SYS1, ISP5_CTRL, isp5ctrl_val);
-}
-
-#ifdef ISS_ISR_DEBUG
-static void iss_isr_dbg(struct iss_device *iss, u32 irqstatus)
-{
- static const char * const name[] = {
- "ISP_0",
- "ISP_1",
- "ISP_2",
- "ISP_3",
- "CSIA",
- "CSIB",
- "CCP2_0",
- "CCP2_1",
- "CCP2_2",
- "CCP2_3",
- "CBUFF",
- "BTE",
- "SIMCOP_0",
- "SIMCOP_1",
- "SIMCOP_2",
- "SIMCOP_3",
- "CCP2_8",
- "HS_VS",
- "18",
- "19",
- "20",
- "21",
- "22",
- "23",
- "24",
- "25",
- "26",
- "27",
- "28",
- "29",
- "30",
- "31",
- };
- unsigned int i;
-
- dev_dbg(iss->dev, "ISS IRQ: ");
-
- for (i = 0; i < ARRAY_SIZE(name); i++) {
- if ((1 << i) & irqstatus)
- pr_cont("%s ", name[i]);
- }
- pr_cont("\n");
-}
-
-static void iss_isp_isr_dbg(struct iss_device *iss, u32 irqstatus)
-{
- static const char * const name[] = {
- "ISIF_0",
- "ISIF_1",
- "ISIF_2",
- "ISIF_3",
- "IPIPEREQ",
- "IPIPELAST_PIX",
- "IPIPEDMA",
- "IPIPEBSC",
- "IPIPEHST",
- "IPIPEIF",
- "AEW",
- "AF",
- "H3A",
- "RSZ_REG",
- "RSZ_LAST_PIX",
- "RSZ_DMA",
- "RSZ_CYC_RZA",
- "RSZ_CYC_RZB",
- "RSZ_FIFO_OVF",
- "RSZ_FIFO_IN_BLK_ERR",
- "20",
- "21",
- "RSZ_EOF0",
- "RSZ_EOF1",
- "H3A_EOF",
- "IPIPE_EOF",
- "26",
- "IPIPE_DPC_INI",
- "IPIPE_DPC_RNEW0",
- "IPIPE_DPC_RNEW1",
- "30",
- "OCP_ERR",
- };
- unsigned int i;
-
- dev_dbg(iss->dev, "ISP IRQ: ");
-
- for (i = 0; i < ARRAY_SIZE(name); i++) {
- if ((1 << i) & irqstatus)
- pr_cont("%s ", name[i]);
- }
- pr_cont("\n");
-}
-#endif
-
-/*
- * iss_isr - Interrupt Service Routine for ISS module.
- * @irq: Not used currently.
- * @_iss: Pointer to the OMAP4 ISS device
- *
- * Handles the corresponding callback if plugged in.
- *
- * Returns IRQ_HANDLED when IRQ was correctly handled, or IRQ_NONE when the
- * IRQ wasn't handled.
- */
-static irqreturn_t iss_isr(int irq, void *_iss)
-{
- static const u32 ipipeif_events = ISP5_IRQ_IPIPEIF_IRQ |
- ISP5_IRQ_ISIF_INT(0);
- static const u32 resizer_events = ISP5_IRQ_RSZ_FIFO_IN_BLK_ERR |
- ISP5_IRQ_RSZ_FIFO_OVF |
- ISP5_IRQ_RSZ_INT_DMA;
- struct iss_device *iss = _iss;
- u32 irqstatus;
-
- irqstatus = iss_reg_read(iss, OMAP4_ISS_MEM_TOP, ISS_HL_IRQSTATUS(5));
- iss_reg_write(iss, OMAP4_ISS_MEM_TOP, ISS_HL_IRQSTATUS(5), irqstatus);
-
- if (irqstatus & ISS_HL_IRQ_CSIA)
- omap4iss_csi2_isr(&iss->csi2a);
-
- if (irqstatus & ISS_HL_IRQ_CSIB)
- omap4iss_csi2_isr(&iss->csi2b);
-
- if (irqstatus & ISS_HL_IRQ_ISP(0)) {
- u32 isp_irqstatus = iss_reg_read(iss, OMAP4_ISS_MEM_ISP_SYS1,
- ISP5_IRQSTATUS(0));
- iss_reg_write(iss, OMAP4_ISS_MEM_ISP_SYS1, ISP5_IRQSTATUS(0),
- isp_irqstatus);
-
- if (isp_irqstatus & ISP5_IRQ_OCP_ERR)
- dev_dbg(iss->dev, "ISP5 OCP Error!\n");
-
- if (isp_irqstatus & ipipeif_events) {
- omap4iss_ipipeif_isr(&iss->ipipeif,
- isp_irqstatus & ipipeif_events);
- }
-
- if (isp_irqstatus & resizer_events)
- omap4iss_resizer_isr(&iss->resizer,
- isp_irqstatus & resizer_events);
-
-#ifdef ISS_ISR_DEBUG
- iss_isp_isr_dbg(iss, isp_irqstatus);
-#endif
- }
-
- omap4iss_flush(iss);
-
-#ifdef ISS_ISR_DEBUG
- iss_isr_dbg(iss, irqstatus);
-#endif
-
- return IRQ_HANDLED;
-}
-
-/* -----------------------------------------------------------------------------
- * Pipeline power management
- *
- * Entities must be powered up when part of a pipeline that contains at least
- * one open video device node.
- *
- * To achieve this use the entity use_count field to track the number of users.
- * For entities corresponding to video device nodes the use_count field stores
- * the users count of the node. For entities corresponding to subdevs the
- * use_count field stores the total number of users of all video device nodes
- * in the pipeline.
- *
- * The omap4iss_pipeline_pm_use() function must be called in the open() and
- * close() handlers of video device nodes. It increments or decrements the use
- * count of all subdev entities in the pipeline.
- *
- * To react to link management on powered pipelines, the link setup notification
- * callback updates the use count of all entities in the source and sink sides
- * of the link.
- */
-
-/*
- * iss_pipeline_pm_use_count - Count the number of users of a pipeline
- * @entity: The entity
- *
- * Return the total number of users of all video device nodes in the pipeline.
- */
-static int iss_pipeline_pm_use_count(struct media_entity *entity)
-{
- struct media_entity_graph graph;
- int use = 0;
-
- media_entity_graph_walk_start(&graph, entity);
-
- while ((entity = media_entity_graph_walk_next(&graph))) {
- if (media_entity_type(entity) == MEDIA_ENT_T_DEVNODE)
- use += entity->use_count;
- }
-
- return use;
-}
-
-/*
- * iss_pipeline_pm_power_one - Apply power change to an entity
- * @entity: The entity
- * @change: Use count change
- *
- * Change the entity use count by @change. If the entity is a subdev update its
- * power state by calling the core::s_power operation when the use count goes
- * from 0 to != 0 or from != 0 to 0.
- *
- * Return 0 on success or a negative error code on failure.
- */
-static int iss_pipeline_pm_power_one(struct media_entity *entity, int change)
-{
- struct v4l2_subdev *subdev;
-
- subdev = media_entity_type(entity) == MEDIA_ENT_T_V4L2_SUBDEV
- ? media_entity_to_v4l2_subdev(entity) : NULL;
-
- if (entity->use_count == 0 && change > 0 && subdev != NULL) {
- int ret;
-
- ret = v4l2_subdev_call(subdev, core, s_power, 1);
- if (ret < 0 && ret != -ENOIOCTLCMD)
- return ret;
- }
-
- entity->use_count += change;
- WARN_ON(entity->use_count < 0);
-
- if (entity->use_count == 0 && change < 0 && subdev != NULL)
- v4l2_subdev_call(subdev, core, s_power, 0);
-
- return 0;
-}
-
-/*
- * iss_pipeline_pm_power - Apply power change to all entities in a pipeline
- * @entity: The entity
- * @change: Use count change
- *
- * Walk the pipeline to update the use count and the power state of all non-node
- * entities.
- *
- * Return 0 on success or a negative error code on failure.
- */
-static int iss_pipeline_pm_power(struct media_entity *entity, int change)
-{
- struct media_entity_graph graph;
- struct media_entity *first = entity;
- int ret = 0;
-
- if (!change)
- return 0;
-
- media_entity_graph_walk_start(&graph, entity);
-
- while (!ret && (entity = media_entity_graph_walk_next(&graph)))
- if (media_entity_type(entity) != MEDIA_ENT_T_DEVNODE)
- ret = iss_pipeline_pm_power_one(entity, change);
-
- if (!ret)
- return 0;
-
- media_entity_graph_walk_start(&graph, first);
-
- while ((first = media_entity_graph_walk_next(&graph))
- && first != entity)
- if (media_entity_type(first) != MEDIA_ENT_T_DEVNODE)
- iss_pipeline_pm_power_one(first, -change);
-
- return ret;
-}
-
-/*
- * omap4iss_pipeline_pm_use - Update the use count of an entity
- * @entity: The entity
- * @use: Use (1) or stop using (0) the entity
- *
- * Update the use count of all entities in the pipeline and power entities on or
- * off accordingly.
- *
- * Return 0 on success or a negative error code on failure. Powering entities
- * off is assumed to never fail. No failure can occur when the use parameter is
- * set to 0.
- */
-int omap4iss_pipeline_pm_use(struct media_entity *entity, int use)
-{
- int change = use ? 1 : -1;
- int ret;
-
- mutex_lock(&entity->parent->graph_mutex);
-
- /* Apply use count to node. */
- entity->use_count += change;
- WARN_ON(entity->use_count < 0);
-
- /* Apply power change to connected non-nodes. */
- ret = iss_pipeline_pm_power(entity, change);
- if (ret < 0)
- entity->use_count -= change;
-
- mutex_unlock(&entity->parent->graph_mutex);
-
- return ret;
-}
-
-/*
- * iss_pipeline_link_notify - Link management notification callback
- * @link: The link
- * @flags: New link flags that will be applied
- *
- * React to link management on powered pipelines by updating the use count of
- * all entities in the source and sink sides of the link. Entities are powered
- * on or off accordingly.
- *
- * Return 0 on success or a negative error code on failure. Powering entities
- * off is assumed to never fail. This function will not fail for disconnection
- * events.
- */
-static int iss_pipeline_link_notify(struct media_link *link, u32 flags,
- unsigned int notification)
-{
- struct media_entity *source = link->source->entity;
- struct media_entity *sink = link->sink->entity;
- int source_use = iss_pipeline_pm_use_count(source);
- int sink_use = iss_pipeline_pm_use_count(sink);
- int ret;
-
- if (notification == MEDIA_DEV_NOTIFY_POST_LINK_CH &&
- !(link->flags & MEDIA_LNK_FL_ENABLED)) {
- /* Powering off entities is assumed to never fail. */
- iss_pipeline_pm_power(source, -sink_use);
- iss_pipeline_pm_power(sink, -source_use);
- return 0;
- }
-
- if (notification == MEDIA_DEV_NOTIFY_POST_LINK_CH &&
- (flags & MEDIA_LNK_FL_ENABLED)) {
- ret = iss_pipeline_pm_power(source, sink_use);
- if (ret < 0)
- return ret;
-
- ret = iss_pipeline_pm_power(sink, source_use);
- if (ret < 0)
- iss_pipeline_pm_power(source, -sink_use);
-
- return ret;
- }
-
- return 0;
-}
-
-/* -----------------------------------------------------------------------------
- * Pipeline stream management
- */
-
-/*
- * iss_pipeline_disable - Disable streaming on a pipeline
- * @pipe: ISS pipeline
- * @until: entity at which to stop pipeline walk
- *
- * Walk the entities chain starting at the pipeline output video node and stop
- * all modules in the chain. Wait synchronously for the modules to be stopped if
- * necessary.
- *
- * If the until argument isn't NULL, stop the pipeline walk when reaching the
- * until entity. This is used to disable a partially started pipeline due to a
- * subdev start error.
- */
-static int iss_pipeline_disable(struct iss_pipeline *pipe,
- struct media_entity *until)
-{
- struct iss_device *iss = pipe->output->iss;
- struct media_entity *entity;
- struct media_pad *pad;
- struct v4l2_subdev *subdev;
- int failure = 0;
- int ret;
-
- entity = &pipe->output->video.entity;
- while (1) {
- pad = &entity->pads[0];
- if (!(pad->flags & MEDIA_PAD_FL_SINK))
- break;
-
- pad = media_entity_remote_pad(pad);
- if (pad == NULL ||
- media_entity_type(pad->entity) != MEDIA_ENT_T_V4L2_SUBDEV)
- break;
-
- entity = pad->entity;
- if (entity == until)
- break;
-
- subdev = media_entity_to_v4l2_subdev(entity);
- ret = v4l2_subdev_call(subdev, video, s_stream, 0);
- if (ret < 0) {
- dev_dbg(iss->dev, "%s: module stop timeout.\n",
- subdev->name);
- /* If the entity failed to stopped, assume it has
- * crashed. Mark it as such, the ISS will be reset when
- * applications will release it.
- */
- iss->crashed |= 1U << subdev->entity.id;
- failure = -ETIMEDOUT;
- }
- }
-
- return failure;
-}
-
-/*
- * iss_pipeline_enable - Enable streaming on a pipeline
- * @pipe: ISS pipeline
- * @mode: Stream mode (single shot or continuous)
- *
- * Walk the entities chain starting at the pipeline output video node and start
- * all modules in the chain in the given mode.
- *
- * Return 0 if successful, or the return value of the failed video::s_stream
- * operation otherwise.
- */
-static int iss_pipeline_enable(struct iss_pipeline *pipe,
- enum iss_pipeline_stream_state mode)
-{
- struct iss_device *iss = pipe->output->iss;
- struct media_entity *entity;
- struct media_pad *pad;
- struct v4l2_subdev *subdev;
- unsigned long flags;
- int ret;
-
- /* If one of the entities in the pipeline has crashed it will not work
- * properly. Refuse to start streaming in that case. This check must be
- * performed before the loop below to avoid starting entities if the
- * pipeline won't start anyway (those entities would then likely fail to
- * stop, making the problem worse).
- */
- if (pipe->entities & iss->crashed)
- return -EIO;
-
- spin_lock_irqsave(&pipe->lock, flags);
- pipe->state &= ~(ISS_PIPELINE_IDLE_INPUT | ISS_PIPELINE_IDLE_OUTPUT);
- spin_unlock_irqrestore(&pipe->lock, flags);
-
- pipe->do_propagation = false;
-
- entity = &pipe->output->video.entity;
- while (1) {
- pad = &entity->pads[0];
- if (!(pad->flags & MEDIA_PAD_FL_SINK))
- break;
-
- pad = media_entity_remote_pad(pad);
- if (pad == NULL ||
- media_entity_type(pad->entity) != MEDIA_ENT_T_V4L2_SUBDEV)
- break;
-
- entity = pad->entity;
- subdev = media_entity_to_v4l2_subdev(entity);
-
- ret = v4l2_subdev_call(subdev, video, s_stream, mode);
- if (ret < 0 && ret != -ENOIOCTLCMD) {
- iss_pipeline_disable(pipe, entity);
- return ret;
- }
-
- if (subdev == &iss->csi2a.subdev ||
- subdev == &iss->csi2b.subdev)
- pipe->do_propagation = true;
- }
-
- iss_print_status(pipe->output->iss);
- return 0;
-}
-
-/*
- * omap4iss_pipeline_set_stream - Enable/disable streaming on a pipeline
- * @pipe: ISS pipeline
- * @state: Stream state (stopped, single shot or continuous)
- *
- * Set the pipeline to the given stream state. Pipelines can be started in
- * single-shot or continuous mode.
- *
- * Return 0 if successful, or the return value of the failed video::s_stream
- * operation otherwise. The pipeline state is not updated when the operation
- * fails, except when stopping the pipeline.
- */
-int omap4iss_pipeline_set_stream(struct iss_pipeline *pipe,
- enum iss_pipeline_stream_state state)
-{
- int ret;
-
- if (state == ISS_PIPELINE_STREAM_STOPPED)
- ret = iss_pipeline_disable(pipe, NULL);
- else
- ret = iss_pipeline_enable(pipe, state);
-
- if (ret == 0 || state == ISS_PIPELINE_STREAM_STOPPED)
- pipe->stream_state = state;
-
- return ret;
-}
-
-/*
- * omap4iss_pipeline_cancel_stream - Cancel stream on a pipeline
- * @pipe: ISS pipeline
- *
- * Cancelling a stream mark all buffers on all video nodes in the pipeline as
- * erroneous and makes sure no new buffer can be queued. This function is called
- * when a fatal error that prevents any further operation on the pipeline
- * occurs.
- */
-void omap4iss_pipeline_cancel_stream(struct iss_pipeline *pipe)
-{
- if (pipe->input)
- omap4iss_video_cancel_stream(pipe->input);
- if (pipe->output)
- omap4iss_video_cancel_stream(pipe->output);
-}
-
-/*
- * iss_pipeline_is_last - Verify if entity has an enabled link to the output
- * video node
- * @me: ISS module's media entity
- *
- * Returns 1 if the entity has an enabled link to the output video node or 0
- * otherwise. It's true only while pipeline can have no more than one output
- * node.
- */
-static int iss_pipeline_is_last(struct media_entity *me)
-{
- struct iss_pipeline *pipe;
- struct media_pad *pad;
-
- if (!me->pipe)
- return 0;
- pipe = to_iss_pipeline(me);
- if (pipe->stream_state == ISS_PIPELINE_STREAM_STOPPED)
- return 0;
- pad = media_entity_remote_pad(&pipe->output->pad);
- return pad->entity == me;
-}
-
-static int iss_reset(struct iss_device *iss)
-{
- unsigned int timeout;
-
- iss_reg_set(iss, OMAP4_ISS_MEM_TOP, ISS_HL_SYSCONFIG,
- ISS_HL_SYSCONFIG_SOFTRESET);
-
- timeout = iss_poll_condition_timeout(
- !(iss_reg_read(iss, OMAP4_ISS_MEM_TOP, ISS_HL_SYSCONFIG) &
- ISS_HL_SYSCONFIG_SOFTRESET), 1000, 10, 100);
- if (timeout) {
- dev_err(iss->dev, "ISS reset timeout\n");
- return -ETIMEDOUT;
- }
-
- iss->crashed = 0;
- return 0;
-}
-
-static int iss_isp_reset(struct iss_device *iss)
-{
- unsigned int timeout;
-
- /* Fist, ensure that the ISP is IDLE (no transactions happening) */
- iss_reg_update(iss, OMAP4_ISS_MEM_ISP_SYS1, ISP5_SYSCONFIG,
- ISP5_SYSCONFIG_STANDBYMODE_MASK,
- ISP5_SYSCONFIG_STANDBYMODE_SMART);
-
- iss_reg_set(iss, OMAP4_ISS_MEM_ISP_SYS1, ISP5_CTRL, ISP5_CTRL_MSTANDBY);
-
- timeout = iss_poll_condition_timeout(
- iss_reg_read(iss, OMAP4_ISS_MEM_ISP_SYS1, ISP5_CTRL) &
- ISP5_CTRL_MSTANDBY_WAIT, 1000000, 1000, 1500);
- if (timeout) {
- dev_err(iss->dev, "ISP5 standby timeout\n");
- return -ETIMEDOUT;
- }
-
- /* Now finally, do the reset */
- iss_reg_set(iss, OMAP4_ISS_MEM_ISP_SYS1, ISP5_SYSCONFIG,
- ISP5_SYSCONFIG_SOFTRESET);
-
- timeout = iss_poll_condition_timeout(
- !(iss_reg_read(iss, OMAP4_ISS_MEM_ISP_SYS1, ISP5_SYSCONFIG) &
- ISP5_SYSCONFIG_SOFTRESET), 1000000, 1000, 1500);
- if (timeout) {
- dev_err(iss->dev, "ISP5 reset timeout\n");
- return -ETIMEDOUT;
- }
-
- return 0;
-}
-
-/*
- * iss_module_sync_idle - Helper to sync module with its idle state
- * @me: ISS submodule's media entity
- * @wait: ISS submodule's wait queue for streamoff/interrupt synchronization
- * @stopping: flag which tells module wants to stop
- *
- * This function checks if ISS submodule needs to wait for next interrupt. If
- * yes, makes the caller to sleep while waiting for such event.
- */
-int omap4iss_module_sync_idle(struct media_entity *me, wait_queue_head_t *wait,
- atomic_t *stopping)
-{
- struct iss_pipeline *pipe = to_iss_pipeline(me);
- struct iss_video *video = pipe->output;
- unsigned long flags;
-
- if (pipe->stream_state == ISS_PIPELINE_STREAM_STOPPED ||
- (pipe->stream_state == ISS_PIPELINE_STREAM_SINGLESHOT &&
- !iss_pipeline_ready(pipe)))
- return 0;
-
- /*
- * atomic_set() doesn't include memory barrier on ARM platform for SMP
- * scenario. We'll call it here to avoid race conditions.
- */
- atomic_set(stopping, 1);
- smp_wmb();
-
- /*
- * If module is the last one, it's writing to memory. In this case,
- * it's necessary to check if the module is already paused due to
- * DMA queue underrun or if it has to wait for next interrupt to be
- * idle.
- * If it isn't the last one, the function won't sleep but *stopping
- * will still be set to warn next submodule caller's interrupt the
- * module wants to be idle.
- */
- if (!iss_pipeline_is_last(me))
- return 0;
-
- spin_lock_irqsave(&video->qlock, flags);
- if (video->dmaqueue_flags & ISS_VIDEO_DMAQUEUE_UNDERRUN) {
- spin_unlock_irqrestore(&video->qlock, flags);
- atomic_set(stopping, 0);
- smp_wmb();
- return 0;
- }
- spin_unlock_irqrestore(&video->qlock, flags);
- if (!wait_event_timeout(*wait, !atomic_read(stopping),
- msecs_to_jiffies(1000))) {
- atomic_set(stopping, 0);
- smp_wmb();
- return -ETIMEDOUT;
- }
-
- return 0;
-}
-
-/*
- * omap4iss_module_sync_is_stopped - Helper to verify if module was stopping
- * @wait: ISS submodule's wait queue for streamoff/interrupt synchronization
- * @stopping: flag which tells module wants to stop
- *
- * This function checks if ISS submodule was stopping. In case of yes, it
- * notices the caller by setting stopping to 0 and waking up the wait queue.
- * Returns 1 if it was stopping or 0 otherwise.
- */
-int omap4iss_module_sync_is_stopping(wait_queue_head_t *wait,
- atomic_t *stopping)
-{
- if (atomic_cmpxchg(stopping, 1, 0)) {
- wake_up(wait);
- return 1;
- }
-
- return 0;
-}
-
-/* --------------------------------------------------------------------------
- * Clock management
- */
-
-#define ISS_CLKCTRL_MASK (ISS_CLKCTRL_CSI2_A |\
- ISS_CLKCTRL_CSI2_B |\
- ISS_CLKCTRL_ISP)
-
-static int __iss_subclk_update(struct iss_device *iss)
-{
- u32 clk = 0;
- int ret = 0, timeout = 1000;
-
- if (iss->subclk_resources & OMAP4_ISS_SUBCLK_CSI2_A)
- clk |= ISS_CLKCTRL_CSI2_A;
-
- if (iss->subclk_resources & OMAP4_ISS_SUBCLK_CSI2_B)
- clk |= ISS_CLKCTRL_CSI2_B;
-
- if (iss->subclk_resources & OMAP4_ISS_SUBCLK_ISP)
- clk |= ISS_CLKCTRL_ISP;
-
- iss_reg_update(iss, OMAP4_ISS_MEM_TOP, ISS_CLKCTRL,
- ISS_CLKCTRL_MASK, clk);
-
- /* Wait for HW assertion */
- while (--timeout > 0) {
- udelay(1);
- if ((iss_reg_read(iss, OMAP4_ISS_MEM_TOP, ISS_CLKSTAT) &
- ISS_CLKCTRL_MASK) == clk)
- break;
- }
-
- if (!timeout)
- ret = -EBUSY;
-
- return ret;
-}
-
-int omap4iss_subclk_enable(struct iss_device *iss,
- enum iss_subclk_resource res)
-{
- iss->subclk_resources |= res;
-
- return __iss_subclk_update(iss);
-}
-
-int omap4iss_subclk_disable(struct iss_device *iss,
- enum iss_subclk_resource res)
-{
- iss->subclk_resources &= ~res;
-
- return __iss_subclk_update(iss);
-}
-
-#define ISS_ISP5_CLKCTRL_MASK (ISP5_CTRL_BL_CLK_ENABLE |\
- ISP5_CTRL_ISIF_CLK_ENABLE |\
- ISP5_CTRL_H3A_CLK_ENABLE |\
- ISP5_CTRL_RSZ_CLK_ENABLE |\
- ISP5_CTRL_IPIPE_CLK_ENABLE |\
- ISP5_CTRL_IPIPEIF_CLK_ENABLE)
-
-static void __iss_isp_subclk_update(struct iss_device *iss)
-{
- u32 clk = 0;
-
- if (iss->isp_subclk_resources & OMAP4_ISS_ISP_SUBCLK_ISIF)
- clk |= ISP5_CTRL_ISIF_CLK_ENABLE;
-
- if (iss->isp_subclk_resources & OMAP4_ISS_ISP_SUBCLK_H3A)
- clk |= ISP5_CTRL_H3A_CLK_ENABLE;
-
- if (iss->isp_subclk_resources & OMAP4_ISS_ISP_SUBCLK_RSZ)
- clk |= ISP5_CTRL_RSZ_CLK_ENABLE;
-
- if (iss->isp_subclk_resources & OMAP4_ISS_ISP_SUBCLK_IPIPE)
- clk |= ISP5_CTRL_IPIPE_CLK_ENABLE;
-
- if (iss->isp_subclk_resources & OMAP4_ISS_ISP_SUBCLK_IPIPEIF)
- clk |= ISP5_CTRL_IPIPEIF_CLK_ENABLE;
-
- if (clk)
- clk |= ISP5_CTRL_BL_CLK_ENABLE;
-
- iss_reg_update(iss, OMAP4_ISS_MEM_ISP_SYS1, ISP5_CTRL,
- ISS_ISP5_CLKCTRL_MASK, clk);
-}
-
-void omap4iss_isp_subclk_enable(struct iss_device *iss,
- enum iss_isp_subclk_resource res)
-{
- iss->isp_subclk_resources |= res;
-
- __iss_isp_subclk_update(iss);
-}
-
-void omap4iss_isp_subclk_disable(struct iss_device *iss,
- enum iss_isp_subclk_resource res)
-{
- iss->isp_subclk_resources &= ~res;
-
- __iss_isp_subclk_update(iss);
-}
-
-/*
- * iss_enable_clocks - Enable ISS clocks
- * @iss: OMAP4 ISS device
- *
- * Return 0 if successful, or clk_enable return value if any of tthem fails.
- */
-static int iss_enable_clocks(struct iss_device *iss)
-{
- int ret;
-
- ret = clk_enable(iss->iss_fck);
- if (ret) {
- dev_err(iss->dev, "clk_enable iss_fck failed\n");
- return ret;
- }
-
- ret = clk_enable(iss->iss_ctrlclk);
- if (ret) {
- dev_err(iss->dev, "clk_enable iss_ctrlclk failed\n");
- clk_disable(iss->iss_fck);
- return ret;
- }
-
- return 0;
-}
-
-/*
- * iss_disable_clocks - Disable ISS clocks
- * @iss: OMAP4 ISS device
- */
-static void iss_disable_clocks(struct iss_device *iss)
-{
- clk_disable(iss->iss_ctrlclk);
- clk_disable(iss->iss_fck);
-}
-
-static int iss_get_clocks(struct iss_device *iss)
-{
- iss->iss_fck = devm_clk_get(iss->dev, "iss_fck");
- if (IS_ERR(iss->iss_fck)) {
- dev_err(iss->dev, "Unable to get iss_fck clock info\n");
- return PTR_ERR(iss->iss_fck);
- }
-
- iss->iss_ctrlclk = devm_clk_get(iss->dev, "iss_ctrlclk");
- if (IS_ERR(iss->iss_ctrlclk)) {
- dev_err(iss->dev, "Unable to get iss_ctrlclk clock info\n");
- return PTR_ERR(iss->iss_ctrlclk);
- }
-
- return 0;
-}
-
-/*
- * omap4iss_get - Acquire the ISS resource.
- *
- * Initializes the clocks for the first acquire.
- *
- * Increment the reference count on the ISS. If the first reference is taken,
- * enable clocks and power-up all submodules.
- *
- * Return a pointer to the ISS device structure, or NULL if an error occurred.
- */
-struct iss_device *omap4iss_get(struct iss_device *iss)
-{
- struct iss_device *__iss = iss;
-
- if (iss == NULL)
- return NULL;
-
- mutex_lock(&iss->iss_mutex);
- if (iss->ref_count > 0)
- goto out;
-
- if (iss_enable_clocks(iss) < 0) {
- __iss = NULL;
- goto out;
- }
-
- iss_enable_interrupts(iss);
-
-out:
- if (__iss != NULL)
- iss->ref_count++;
- mutex_unlock(&iss->iss_mutex);
-
- return __iss;
-}
-
-/*
- * omap4iss_put - Release the ISS
- *
- * Decrement the reference count on the ISS. If the last reference is released,
- * power-down all submodules, disable clocks and free temporary buffers.
- */
-void omap4iss_put(struct iss_device *iss)
-{
- if (iss == NULL)
- return;
-
- mutex_lock(&iss->iss_mutex);
- BUG_ON(iss->ref_count == 0);
- if (--iss->ref_count == 0) {
- iss_disable_interrupts(iss);
- /* Reset the ISS if an entity has failed to stop. This is the
- * only way to recover from such conditions, although it would
- * be worth investigating whether resetting the ISP only can't
- * fix the problem in some cases.
- */
- if (iss->crashed)
- iss_reset(iss);
- iss_disable_clocks(iss);
- }
- mutex_unlock(&iss->iss_mutex);
-}
-
-static int iss_map_mem_resource(struct platform_device *pdev,
- struct iss_device *iss,
- enum iss_mem_resources res)
-{
- struct resource *mem;
-
- mem = platform_get_resource(pdev, IORESOURCE_MEM, res);
-
- iss->regs[res] = devm_ioremap_resource(iss->dev, mem);
-
- return PTR_ERR_OR_ZERO(iss->regs[res]);
-}
-
-static void iss_unregister_entities(struct iss_device *iss)
-{
- omap4iss_resizer_unregister_entities(&iss->resizer);
- omap4iss_ipipe_unregister_entities(&iss->ipipe);
- omap4iss_ipipeif_unregister_entities(&iss->ipipeif);
- omap4iss_csi2_unregister_entities(&iss->csi2a);
- omap4iss_csi2_unregister_entities(&iss->csi2b);
-
- v4l2_device_unregister(&iss->v4l2_dev);
- media_device_unregister(&iss->media_dev);
-}
-
-/*
- * iss_register_subdev_group - Register a group of subdevices
- * @iss: OMAP4 ISS device
- * @board_info: I2C subdevs board information array
- *
- * Register all I2C subdevices in the board_info array. The array must be
- * terminated by a NULL entry, and the first entry must be the sensor.
- *
- * Return a pointer to the sensor media entity if it has been successfully
- * registered, or NULL otherwise.
- */
-static struct v4l2_subdev *
-iss_register_subdev_group(struct iss_device *iss,
- struct iss_subdev_i2c_board_info *board_info)
-{
- struct v4l2_subdev *sensor = NULL;
- unsigned int first;
-
- if (board_info->board_info == NULL)
- return NULL;
-
- for (first = 1; board_info->board_info; ++board_info, first = 0) {
- struct v4l2_subdev *subdev;
- struct i2c_adapter *adapter;
-
- adapter = i2c_get_adapter(board_info->i2c_adapter_id);
- if (adapter == NULL) {
- dev_err(iss->dev,
- "%s: Unable to get I2C adapter %d for device %s\n",
- __func__, board_info->i2c_adapter_id,
- board_info->board_info->type);
- continue;
- }
-
- subdev = v4l2_i2c_new_subdev_board(&iss->v4l2_dev, adapter,
- board_info->board_info, NULL);
- if (subdev == NULL) {
- dev_err(iss->dev, "Unable to register subdev %s\n",
- board_info->board_info->type);
- continue;
- }
-
- if (first)
- sensor = subdev;
- }
-
- return sensor;
-}
-
-static int iss_register_entities(struct iss_device *iss)
-{
- struct iss_platform_data *pdata = iss->pdata;
- struct iss_v4l2_subdevs_group *subdevs;
- int ret;
-
- iss->media_dev.dev = iss->dev;
- strlcpy(iss->media_dev.model, "TI OMAP4 ISS",
- sizeof(iss->media_dev.model));
- iss->media_dev.hw_revision = iss->revision;
- iss->media_dev.link_notify = iss_pipeline_link_notify;
- ret = media_device_register(&iss->media_dev);
- if (ret < 0) {
- dev_err(iss->dev, "Media device registration failed (%d)\n",
- ret);
- return ret;
- }
-
- iss->v4l2_dev.mdev = &iss->media_dev;
- ret = v4l2_device_register(iss->dev, &iss->v4l2_dev);
- if (ret < 0) {
- dev_err(iss->dev, "V4L2 device registration failed (%d)\n",
- ret);
- goto done;
- }
-
- /* Register internal entities */
- ret = omap4iss_csi2_register_entities(&iss->csi2a, &iss->v4l2_dev);
- if (ret < 0)
- goto done;
-
- ret = omap4iss_csi2_register_entities(&iss->csi2b, &iss->v4l2_dev);
- if (ret < 0)
- goto done;
-
- ret = omap4iss_ipipeif_register_entities(&iss->ipipeif, &iss->v4l2_dev);
- if (ret < 0)
- goto done;
-
- ret = omap4iss_ipipe_register_entities(&iss->ipipe, &iss->v4l2_dev);
- if (ret < 0)
- goto done;
-
- ret = omap4iss_resizer_register_entities(&iss->resizer, &iss->v4l2_dev);
- if (ret < 0)
- goto done;
-
- /* Register external entities */
- for (subdevs = pdata->subdevs; subdevs && subdevs->subdevs; ++subdevs) {
- struct v4l2_subdev *sensor;
- struct media_entity *input;
- unsigned int flags;
- unsigned int pad;
-
- sensor = iss_register_subdev_group(iss, subdevs->subdevs);
- if (sensor == NULL)
- continue;
-
- sensor->host_priv = subdevs;
-
- /* Connect the sensor to the correct interface module.
- * CSI2a receiver through CSIPHY1, or
- * CSI2b receiver through CSIPHY2
- */
- switch (subdevs->interface) {
- case ISS_INTERFACE_CSI2A_PHY1:
- input = &iss->csi2a.subdev.entity;
- pad = CSI2_PAD_SINK;
- flags = MEDIA_LNK_FL_IMMUTABLE
- | MEDIA_LNK_FL_ENABLED;
- break;
-
- case ISS_INTERFACE_CSI2B_PHY2:
- input = &iss->csi2b.subdev.entity;
- pad = CSI2_PAD_SINK;
- flags = MEDIA_LNK_FL_IMMUTABLE
- | MEDIA_LNK_FL_ENABLED;
- break;
-
- default:
- dev_err(iss->dev, "invalid interface type %u\n",
- subdevs->interface);
- ret = -EINVAL;
- goto done;
- }
-
- ret = media_entity_create_link(&sensor->entity, 0, input, pad,
- flags);
- if (ret < 0)
- goto done;
- }
-
- ret = v4l2_device_register_subdev_nodes(&iss->v4l2_dev);
-
-done:
- if (ret < 0)
- iss_unregister_entities(iss);
-
- return ret;
-}
-
-static void iss_cleanup_modules(struct iss_device *iss)
-{
- omap4iss_csi2_cleanup(iss);
- omap4iss_ipipeif_cleanup(iss);
- omap4iss_ipipe_cleanup(iss);
- omap4iss_resizer_cleanup(iss);
-}
-
-static int iss_initialize_modules(struct iss_device *iss)
-{
- int ret;
-
- ret = omap4iss_csiphy_init(iss);
- if (ret < 0) {
- dev_err(iss->dev, "CSI PHY initialization failed\n");
- goto error_csiphy;
- }
-
- ret = omap4iss_csi2_init(iss);
- if (ret < 0) {
- dev_err(iss->dev, "CSI2 initialization failed\n");
- goto error_csi2;
- }
-
- ret = omap4iss_ipipeif_init(iss);
- if (ret < 0) {
- dev_err(iss->dev, "ISP IPIPEIF initialization failed\n");
- goto error_ipipeif;
- }
-
- ret = omap4iss_ipipe_init(iss);
- if (ret < 0) {
- dev_err(iss->dev, "ISP IPIPE initialization failed\n");
- goto error_ipipe;
- }
-
- ret = omap4iss_resizer_init(iss);
- if (ret < 0) {
- dev_err(iss->dev, "ISP RESIZER initialization failed\n");
- goto error_resizer;
- }
-
- /* Connect the submodules. */
- ret = media_entity_create_link(
- &iss->csi2a.subdev.entity, CSI2_PAD_SOURCE,
- &iss->ipipeif.subdev.entity, IPIPEIF_PAD_SINK, 0);
- if (ret < 0)
- goto error_link;
-
- ret = media_entity_create_link(
- &iss->csi2b.subdev.entity, CSI2_PAD_SOURCE,
- &iss->ipipeif.subdev.entity, IPIPEIF_PAD_SINK, 0);
- if (ret < 0)
- goto error_link;
-
- ret = media_entity_create_link(
- &iss->ipipeif.subdev.entity, IPIPEIF_PAD_SOURCE_VP,
- &iss->resizer.subdev.entity, RESIZER_PAD_SINK, 0);
- if (ret < 0)
- goto error_link;
-
- ret = media_entity_create_link(
- &iss->ipipeif.subdev.entity, IPIPEIF_PAD_SOURCE_VP,
- &iss->ipipe.subdev.entity, IPIPE_PAD_SINK, 0);
- if (ret < 0)
- goto error_link;
-
- ret = media_entity_create_link(
- &iss->ipipe.subdev.entity, IPIPE_PAD_SOURCE_VP,
- &iss->resizer.subdev.entity, RESIZER_PAD_SINK, 0);
- if (ret < 0)
- goto error_link;
-
- return 0;
-
-error_link:
- omap4iss_resizer_cleanup(iss);
-error_resizer:
- omap4iss_ipipe_cleanup(iss);
-error_ipipe:
- omap4iss_ipipeif_cleanup(iss);
-error_ipipeif:
- omap4iss_csi2_cleanup(iss);
-error_csi2:
-error_csiphy:
- return ret;
-}
-
-static int iss_probe(struct platform_device *pdev)
-{
- struct iss_platform_data *pdata = pdev->dev.platform_data;
- struct iss_device *iss;
- unsigned int i;
- int ret;
-
- if (pdata == NULL)
- return -EINVAL;
-
- iss = devm_kzalloc(&pdev->dev, sizeof(*iss), GFP_KERNEL);
- if (!iss)
- return -ENOMEM;
-
- mutex_init(&iss->iss_mutex);
-
- iss->dev = &pdev->dev;
- iss->pdata = pdata;
-
- iss->raw_dmamask = DMA_BIT_MASK(32);
- iss->dev->dma_mask = &iss->raw_dmamask;
- iss->dev->coherent_dma_mask = DMA_BIT_MASK(32);
-
- platform_set_drvdata(pdev, iss);
-
- /*
- * TODO: When implementing DT support switch to syscon regmap lookup by
- * phandle.
- */
- iss->syscon = syscon_regmap_lookup_by_compatible("syscon");
- if (IS_ERR(iss->syscon)) {
- ret = PTR_ERR(iss->syscon);
- goto error;
- }
-
- /* Clocks */
- ret = iss_map_mem_resource(pdev, iss, OMAP4_ISS_MEM_TOP);
- if (ret < 0)
- goto error;
-
- ret = iss_get_clocks(iss);
- if (ret < 0)
- goto error;
-
- if (omap4iss_get(iss) == NULL)
- goto error;
-
- ret = iss_reset(iss);
- if (ret < 0)
- goto error_iss;
-
- iss->revision = iss_reg_read(iss, OMAP4_ISS_MEM_TOP, ISS_HL_REVISION);
- dev_info(iss->dev, "Revision %08x found\n", iss->revision);
-
- for (i = 1; i < OMAP4_ISS_MEM_LAST; i++) {
- ret = iss_map_mem_resource(pdev, iss, i);
- if (ret)
- goto error_iss;
- }
-
- /* Configure BTE BW_LIMITER field to max recommended value (1 GB) */
- iss_reg_update(iss, OMAP4_ISS_MEM_BTE, BTE_CTRL,
- BTE_CTRL_BW_LIMITER_MASK,
- 18 << BTE_CTRL_BW_LIMITER_SHIFT);
-
- /* Perform ISP reset */
- ret = omap4iss_subclk_enable(iss, OMAP4_ISS_SUBCLK_ISP);
- if (ret < 0)
- goto error_iss;
-
- ret = iss_isp_reset(iss);
- if (ret < 0)
- goto error_iss;
-
- dev_info(iss->dev, "ISP Revision %08x found\n",
- iss_reg_read(iss, OMAP4_ISS_MEM_ISP_SYS1, ISP5_REVISION));
-
- /* Interrupt */
- iss->irq_num = platform_get_irq(pdev, 0);
- if (iss->irq_num <= 0) {
- dev_err(iss->dev, "No IRQ resource\n");
- ret = -ENODEV;
- goto error_iss;
- }
-
- if (devm_request_irq(iss->dev, iss->irq_num, iss_isr, IRQF_SHARED,
- "OMAP4 ISS", iss)) {
- dev_err(iss->dev, "Unable to request IRQ\n");
- ret = -EINVAL;
- goto error_iss;
- }
-
- /* Entities */
- ret = iss_initialize_modules(iss);
- if (ret < 0)
- goto error_iss;
-
- ret = iss_register_entities(iss);
- if (ret < 0)
- goto error_modules;
-
- omap4iss_put(iss);
-
- return 0;
-
-error_modules:
- iss_cleanup_modules(iss);
-error_iss:
- omap4iss_put(iss);
-error:
- platform_set_drvdata(pdev, NULL);
-
- mutex_destroy(&iss->iss_mutex);
-
- return ret;
-}
-
-static int iss_remove(struct platform_device *pdev)
-{
- struct iss_device *iss = platform_get_drvdata(pdev);
-
- iss_unregister_entities(iss);
- iss_cleanup_modules(iss);
-
- return 0;
-}
-
-static const struct platform_device_id omap4iss_id_table[] = {
- { "omap4iss", 0 },
- { },
-};
-MODULE_DEVICE_TABLE(platform, omap4iss_id_table);
-
-static struct platform_driver iss_driver = {
- .probe = iss_probe,
- .remove = iss_remove,
- .id_table = omap4iss_id_table,
- .driver = {
- .name = "omap4iss",
- },
-};
-
-module_platform_driver(iss_driver);
-
-MODULE_DESCRIPTION("TI OMAP4 ISS driver");
-MODULE_AUTHOR("Sergio Aguirre <sergio.a.aguirre@gmail.com>");
-MODULE_LICENSE("GPL");
-MODULE_VERSION(ISS_VIDEO_DRIVER_VERSION);
diff --git a/drivers/staging/media/omap4iss/iss.h b/drivers/staging/media/omap4iss/iss.h
deleted file mode 100644
index 35df8b4709e6..000000000000
--- a/drivers/staging/media/omap4iss/iss.h
+++ /dev/null
@@ -1,254 +0,0 @@
-/*
- * TI OMAP4 ISS V4L2 Driver
- *
- * Copyright (C) 2012 Texas Instruments.
- *
- * Author: Sergio Aguirre <sergio.a.aguirre@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-#ifndef _OMAP4_ISS_H_
-#define _OMAP4_ISS_H_
-
-#include <media/v4l2-device.h>
-#include <linux/device.h>
-#include <linux/io.h>
-#include <linux/platform_device.h>
-#include <linux/wait.h>
-
-#include <media/omap4iss.h>
-
-#include "iss_regs.h"
-#include "iss_csiphy.h"
-#include "iss_csi2.h"
-#include "iss_ipipeif.h"
-#include "iss_ipipe.h"
-#include "iss_resizer.h"
-
-struct regmap;
-
-#define to_iss_device(ptr_module) \
- container_of(ptr_module, struct iss_device, ptr_module)
-#define to_device(ptr_module) \
- (to_iss_device(ptr_module)->dev)
-
-enum iss_mem_resources {
- OMAP4_ISS_MEM_TOP,
- OMAP4_ISS_MEM_CSI2_A_REGS1,
- OMAP4_ISS_MEM_CAMERARX_CORE1,
- OMAP4_ISS_MEM_CSI2_B_REGS1,
- OMAP4_ISS_MEM_CAMERARX_CORE2,
- OMAP4_ISS_MEM_BTE,
- OMAP4_ISS_MEM_ISP_SYS1,
- OMAP4_ISS_MEM_ISP_RESIZER,
- OMAP4_ISS_MEM_ISP_IPIPE,
- OMAP4_ISS_MEM_ISP_ISIF,
- OMAP4_ISS_MEM_ISP_IPIPEIF,
- OMAP4_ISS_MEM_LAST,
-};
-
-enum iss_subclk_resource {
- OMAP4_ISS_SUBCLK_SIMCOP = (1 << 0),
- OMAP4_ISS_SUBCLK_ISP = (1 << 1),
- OMAP4_ISS_SUBCLK_CSI2_A = (1 << 2),
- OMAP4_ISS_SUBCLK_CSI2_B = (1 << 3),
- OMAP4_ISS_SUBCLK_CCP2 = (1 << 4),
-};
-
-enum iss_isp_subclk_resource {
- OMAP4_ISS_ISP_SUBCLK_BL = (1 << 0),
- OMAP4_ISS_ISP_SUBCLK_ISIF = (1 << 1),
- OMAP4_ISS_ISP_SUBCLK_H3A = (1 << 2),
- OMAP4_ISS_ISP_SUBCLK_RSZ = (1 << 3),
- OMAP4_ISS_ISP_SUBCLK_IPIPE = (1 << 4),
- OMAP4_ISS_ISP_SUBCLK_IPIPEIF = (1 << 5),
-};
-
-/*
- * struct iss_reg - Structure for ISS register values.
- * @reg: 32-bit Register address.
- * @val: 32-bit Register value.
- */
-struct iss_reg {
- enum iss_mem_resources mmio_range;
- u32 reg;
- u32 val;
-};
-
-/*
- * struct iss_device - ISS device structure.
- * @syscon: Regmap for the syscon register space
- * @crashed: Bitmask of crashed entities (indexed by entity ID)
- */
-struct iss_device {
- struct v4l2_device v4l2_dev;
- struct media_device media_dev;
- struct device *dev;
- u32 revision;
-
- /* platform HW resources */
- struct iss_platform_data *pdata;
- unsigned int irq_num;
-
- struct resource *res[OMAP4_ISS_MEM_LAST];
- void __iomem *regs[OMAP4_ISS_MEM_LAST];
- struct regmap *syscon;
-
- u64 raw_dmamask;
-
- struct mutex iss_mutex; /* For handling ref_count field */
- unsigned int crashed;
- int has_context;
- int ref_count;
-
- struct clk *iss_fck;
- struct clk *iss_ctrlclk;
-
- /* ISS modules */
- struct iss_csi2_device csi2a;
- struct iss_csi2_device csi2b;
- struct iss_csiphy csiphy1;
- struct iss_csiphy csiphy2;
- struct iss_ipipeif_device ipipeif;
- struct iss_ipipe_device ipipe;
- struct iss_resizer_device resizer;
-
- unsigned int subclk_resources;
- unsigned int isp_subclk_resources;
-};
-
-#define v4l2_dev_to_iss_device(dev) \
- container_of(dev, struct iss_device, v4l2_dev)
-
-int omap4iss_get_external_info(struct iss_pipeline *pipe,
- struct media_link *link);
-
-int omap4iss_module_sync_idle(struct media_entity *me, wait_queue_head_t *wait,
- atomic_t *stopping);
-
-int omap4iss_module_sync_is_stopping(wait_queue_head_t *wait,
- atomic_t *stopping);
-
-int omap4iss_pipeline_set_stream(struct iss_pipeline *pipe,
- enum iss_pipeline_stream_state state);
-void omap4iss_pipeline_cancel_stream(struct iss_pipeline *pipe);
-
-void omap4iss_configure_bridge(struct iss_device *iss,
- enum ipipeif_input_entity input);
-
-struct iss_device *omap4iss_get(struct iss_device *iss);
-void omap4iss_put(struct iss_device *iss);
-int omap4iss_subclk_enable(struct iss_device *iss,
- enum iss_subclk_resource res);
-int omap4iss_subclk_disable(struct iss_device *iss,
- enum iss_subclk_resource res);
-void omap4iss_isp_subclk_enable(struct iss_device *iss,
- enum iss_isp_subclk_resource res);
-void omap4iss_isp_subclk_disable(struct iss_device *iss,
- enum iss_isp_subclk_resource res);
-
-int omap4iss_pipeline_pm_use(struct media_entity *entity, int use);
-
-int omap4iss_register_entities(struct platform_device *pdev,
- struct v4l2_device *v4l2_dev);
-void omap4iss_unregister_entities(struct platform_device *pdev);
-
-/*
- * iss_reg_read - Read the value of an OMAP4 ISS register
- * @iss: the ISS device
- * @res: memory resource in which the register is located
- * @offset: register offset in the memory resource
- *
- * Return the register value.
- */
-static inline
-u32 iss_reg_read(struct iss_device *iss, enum iss_mem_resources res,
- u32 offset)
-{
- return readl(iss->regs[res] + offset);
-}
-
-/*
- * iss_reg_write - Write a value to an OMAP4 ISS register
- * @iss: the ISS device
- * @res: memory resource in which the register is located
- * @offset: register offset in the memory resource
- * @value: value to be written
- */
-static inline
-void iss_reg_write(struct iss_device *iss, enum iss_mem_resources res,
- u32 offset, u32 value)
-{
- writel(value, iss->regs[res] + offset);
-}
-
-/*
- * iss_reg_clr - Clear bits in an OMAP4 ISS register
- * @iss: the ISS device
- * @res: memory resource in which the register is located
- * @offset: register offset in the memory resource
- * @clr: bit mask to be cleared
- */
-static inline
-void iss_reg_clr(struct iss_device *iss, enum iss_mem_resources res,
- u32 offset, u32 clr)
-{
- u32 v = iss_reg_read(iss, res, offset);
-
- iss_reg_write(iss, res, offset, v & ~clr);
-}
-
-/*
- * iss_reg_set - Set bits in an OMAP4 ISS register
- * @iss: the ISS device
- * @res: memory resource in which the register is located
- * @offset: register offset in the memory resource
- * @set: bit mask to be set
- */
-static inline
-void iss_reg_set(struct iss_device *iss, enum iss_mem_resources res,
- u32 offset, u32 set)
-{
- u32 v = iss_reg_read(iss, res, offset);
-
- iss_reg_write(iss, res, offset, v | set);
-}
-
-/*
- * iss_reg_update - Clear and set bits in an OMAP4 ISS register
- * @iss: the ISS device
- * @res: memory resource in which the register is located
- * @offset: register offset in the memory resource
- * @clr: bit mask to be cleared
- * @set: bit mask to be set
- *
- * Clear the clr mask first and then set the set mask.
- */
-static inline
-void iss_reg_update(struct iss_device *iss, enum iss_mem_resources res,
- u32 offset, u32 clr, u32 set)
-{
- u32 v = iss_reg_read(iss, res, offset);
-
- iss_reg_write(iss, res, offset, (v & ~clr) | set);
-}
-
-#define iss_poll_condition_timeout(cond, timeout, min_ival, max_ival) \
-({ \
- unsigned long __timeout = jiffies + usecs_to_jiffies(timeout); \
- unsigned int __min_ival = (min_ival); \
- unsigned int __max_ival = (max_ival); \
- bool __cond; \
- while (!(__cond = (cond))) { \
- if (time_after(jiffies, __timeout)) \
- break; \
- usleep_range(__min_ival, __max_ival); \
- } \
- !__cond; \
-})
-
-#endif /* _OMAP4_ISS_H_ */
diff --git a/drivers/staging/media/omap4iss/iss_csi2.c b/drivers/staging/media/omap4iss/iss_csi2.c
deleted file mode 100644
index bc83f8246101..000000000000
--- a/drivers/staging/media/omap4iss/iss_csi2.c
+++ /dev/null
@@ -1,1357 +0,0 @@
-/*
- * TI OMAP4 ISS V4L2 Driver - CSI PHY module
- *
- * Copyright (C) 2012 Texas Instruments, Inc.
- *
- * Author: Sergio Aguirre <sergio.a.aguirre@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-#include <linux/delay.h>
-#include <media/v4l2-common.h>
-#include <linux/v4l2-mediabus.h>
-#include <linux/mm.h>
-
-#include "iss.h"
-#include "iss_regs.h"
-#include "iss_csi2.h"
-
-/*
- * csi2_if_enable - Enable CSI2 Receiver interface.
- * @enable: enable flag
- *
- */
-static void csi2_if_enable(struct iss_csi2_device *csi2, u8 enable)
-{
- struct iss_csi2_ctrl_cfg *currctrl = &csi2->ctrl;
-
- iss_reg_update(csi2->iss, csi2->regs1, CSI2_CTRL, CSI2_CTRL_IF_EN,
- enable ? CSI2_CTRL_IF_EN : 0);
-
- currctrl->if_enable = enable;
-}
-
-/*
- * csi2_recv_config - CSI2 receiver module configuration.
- * @currctrl: iss_csi2_ctrl_cfg structure
- *
- */
-static void csi2_recv_config(struct iss_csi2_device *csi2,
- struct iss_csi2_ctrl_cfg *currctrl)
-{
- u32 reg = 0;
-
- if (currctrl->frame_mode)
- reg |= CSI2_CTRL_FRAME;
- else
- reg &= ~CSI2_CTRL_FRAME;
-
- if (currctrl->vp_clk_enable)
- reg |= CSI2_CTRL_VP_CLK_EN;
- else
- reg &= ~CSI2_CTRL_VP_CLK_EN;
-
- if (currctrl->vp_only_enable)
- reg |= CSI2_CTRL_VP_ONLY_EN;
- else
- reg &= ~CSI2_CTRL_VP_ONLY_EN;
-
- reg &= ~CSI2_CTRL_VP_OUT_CTRL_MASK;
- reg |= currctrl->vp_out_ctrl << CSI2_CTRL_VP_OUT_CTRL_SHIFT;
-
- if (currctrl->ecc_enable)
- reg |= CSI2_CTRL_ECC_EN;
- else
- reg &= ~CSI2_CTRL_ECC_EN;
-
- /*
- * Set MFlag assertion boundaries to:
- * Low: 4/8 of FIFO size
- * High: 6/8 of FIFO size
- */
- reg &= ~(CSI2_CTRL_MFLAG_LEVH_MASK | CSI2_CTRL_MFLAG_LEVL_MASK);
- reg |= (2 << CSI2_CTRL_MFLAG_LEVH_SHIFT) |
- (4 << CSI2_CTRL_MFLAG_LEVL_SHIFT);
-
- /* Generation of 16x64-bit bursts (Recommended) */
- reg |= CSI2_CTRL_BURST_SIZE_EXPAND;
-
- /* Do Non-Posted writes (Recommended) */
- reg |= CSI2_CTRL_NON_POSTED_WRITE;
-
- /*
- * Enforce Little endian for all formats, including:
- * YUV4:2:2 8-bit and YUV4:2:0 Legacy
- */
- reg |= CSI2_CTRL_ENDIANNESS;
-
- iss_reg_write(csi2->iss, csi2->regs1, CSI2_CTRL, reg);
-}
-
-static const unsigned int csi2_input_fmts[] = {
- MEDIA_BUS_FMT_SGRBG10_1X10,
- MEDIA_BUS_FMT_SGRBG10_DPCM8_1X8,
- MEDIA_BUS_FMT_SRGGB10_1X10,
- MEDIA_BUS_FMT_SRGGB10_DPCM8_1X8,
- MEDIA_BUS_FMT_SBGGR10_1X10,
- MEDIA_BUS_FMT_SBGGR10_DPCM8_1X8,
- MEDIA_BUS_FMT_SGBRG10_1X10,
- MEDIA_BUS_FMT_SGBRG10_DPCM8_1X8,
- MEDIA_BUS_FMT_SBGGR8_1X8,
- MEDIA_BUS_FMT_SGBRG8_1X8,
- MEDIA_BUS_FMT_SGRBG8_1X8,
- MEDIA_BUS_FMT_SRGGB8_1X8,
- MEDIA_BUS_FMT_UYVY8_1X16,
- MEDIA_BUS_FMT_YUYV8_1X16,
-};
-
-/* To set the format on the CSI2 requires a mapping function that takes
- * the following inputs:
- * - 3 different formats (at this time)
- * - 2 destinations (mem, vp+mem) (vp only handled separately)
- * - 2 decompression options (on, off)
- * Output should be CSI2 frame format code
- * Array indices as follows: [format][dest][decompr]
- * Not all combinations are valid. 0 means invalid.
- */
-static const u16 __csi2_fmt_map[][2][2] = {
- /* RAW10 formats */
- {
- /* Output to memory */
- {
- /* No DPCM decompression */
- CSI2_PIX_FMT_RAW10_EXP16,
- /* DPCM decompression */
- 0,
- },
- /* Output to both */
- {
- /* No DPCM decompression */
- CSI2_PIX_FMT_RAW10_EXP16_VP,
- /* DPCM decompression */
- 0,
- },
- },
- /* RAW10 DPCM8 formats */
- {
- /* Output to memory */
- {
- /* No DPCM decompression */
- CSI2_USERDEF_8BIT_DATA1,
- /* DPCM decompression */
- CSI2_USERDEF_8BIT_DATA1_DPCM10,
- },
- /* Output to both */
- {
- /* No DPCM decompression */
- CSI2_PIX_FMT_RAW8_VP,
- /* DPCM decompression */
- CSI2_USERDEF_8BIT_DATA1_DPCM10_VP,
- },
- },
- /* RAW8 formats */
- {
- /* Output to memory */
- {
- /* No DPCM decompression */
- CSI2_PIX_FMT_RAW8,
- /* DPCM decompression */
- 0,
- },
- /* Output to both */
- {
- /* No DPCM decompression */
- CSI2_PIX_FMT_RAW8_VP,
- /* DPCM decompression */
- 0,
- },
- },
- /* YUV422 formats */
- {
- /* Output to memory */
- {
- /* No DPCM decompression */
- CSI2_PIX_FMT_YUV422_8BIT,
- /* DPCM decompression */
- 0,
- },
- /* Output to both */
- {
- /* No DPCM decompression */
- CSI2_PIX_FMT_YUV422_8BIT_VP16,
- /* DPCM decompression */
- 0,
- },
- },
-};
-
-/*
- * csi2_ctx_map_format - Map CSI2 sink media bus format to CSI2 format ID
- * @csi2: ISS CSI2 device
- *
- * Returns CSI2 physical format id
- */
-static u16 csi2_ctx_map_format(struct iss_csi2_device *csi2)
-{
- const struct v4l2_mbus_framefmt *fmt = &csi2->formats[CSI2_PAD_SINK];
- int fmtidx, destidx;
-
- switch (fmt->code) {
- case MEDIA_BUS_FMT_SGRBG10_1X10:
- case MEDIA_BUS_FMT_SRGGB10_1X10:
- case MEDIA_BUS_FMT_SBGGR10_1X10:
- case MEDIA_BUS_FMT_SGBRG10_1X10:
- fmtidx = 0;
- break;
- case MEDIA_BUS_FMT_SGRBG10_DPCM8_1X8:
- case MEDIA_BUS_FMT_SRGGB10_DPCM8_1X8:
- case MEDIA_BUS_FMT_SBGGR10_DPCM8_1X8:
- case MEDIA_BUS_FMT_SGBRG10_DPCM8_1X8:
- fmtidx = 1;
- break;
- case MEDIA_BUS_FMT_SBGGR8_1X8:
- case MEDIA_BUS_FMT_SGBRG8_1X8:
- case MEDIA_BUS_FMT_SGRBG8_1X8:
- case MEDIA_BUS_FMT_SRGGB8_1X8:
- fmtidx = 2;
- break;
- case MEDIA_BUS_FMT_UYVY8_1X16:
- case MEDIA_BUS_FMT_YUYV8_1X16:
- fmtidx = 3;
- break;
- default:
- WARN(1, KERN_ERR "CSI2: pixel format %08x unsupported!\n",
- fmt->code);
- return 0;
- }
-
- if (!(csi2->output & CSI2_OUTPUT_IPIPEIF) &&
- !(csi2->output & CSI2_OUTPUT_MEMORY)) {
- /* Neither output enabled is a valid combination */
- return CSI2_PIX_FMT_OTHERS;
- }
-
- /* If we need to skip frames at the beginning of the stream disable the
- * video port to avoid sending the skipped frames to the IPIPEIF.
- */
- destidx = csi2->frame_skip ? 0 : !!(csi2->output & CSI2_OUTPUT_IPIPEIF);
-
- return __csi2_fmt_map[fmtidx][destidx][csi2->dpcm_decompress];
-}
-
-/*
- * csi2_set_outaddr - Set memory address to save output image
- * @csi2: Pointer to ISS CSI2a device.
- * @addr: 32-bit memory address aligned on 32 byte boundary.
- *
- * Sets the memory address where the output will be saved.
- *
- * Returns 0 if successful, or -EINVAL if the address is not in the 32 byte
- * boundary.
- */
-static void csi2_set_outaddr(struct iss_csi2_device *csi2, u32 addr)
-{
- struct iss_csi2_ctx_cfg *ctx = &csi2->contexts[0];
-
- ctx->ping_addr = addr;
- ctx->pong_addr = addr;
- iss_reg_write(csi2->iss, csi2->regs1, CSI2_CTX_PING_ADDR(ctx->ctxnum),
- ctx->ping_addr);
- iss_reg_write(csi2->iss, csi2->regs1, CSI2_CTX_PONG_ADDR(ctx->ctxnum),
- ctx->pong_addr);
-}
-
-/*
- * is_usr_def_mapping - Checks whether USER_DEF_MAPPING should
- * be enabled by CSI2.
- * @format_id: mapped format id
- *
- */
-static inline int is_usr_def_mapping(u32 format_id)
-{
- return (format_id & 0xf0) == 0x40 ? 1 : 0;
-}
-
-/*
- * csi2_ctx_enable - Enable specified CSI2 context
- * @ctxnum: Context number, valid between 0 and 7 values.
- * @enable: enable
- *
- */
-static void csi2_ctx_enable(struct iss_csi2_device *csi2, u8 ctxnum, u8 enable)
-{
- struct iss_csi2_ctx_cfg *ctx = &csi2->contexts[ctxnum];
- u32 reg;
-
- reg = iss_reg_read(csi2->iss, csi2->regs1, CSI2_CTX_CTRL1(ctxnum));
-
- if (enable) {
- unsigned int skip = 0;
-
- if (csi2->frame_skip)
- skip = csi2->frame_skip;
- else if (csi2->output & CSI2_OUTPUT_MEMORY)
- skip = 1;
-
- reg &= ~CSI2_CTX_CTRL1_COUNT_MASK;
- reg |= CSI2_CTX_CTRL1_COUNT_UNLOCK
- | (skip << CSI2_CTX_CTRL1_COUNT_SHIFT)
- | CSI2_CTX_CTRL1_CTX_EN;
- } else {
- reg &= ~CSI2_CTX_CTRL1_CTX_EN;
- }
-
- iss_reg_write(csi2->iss, csi2->regs1, CSI2_CTX_CTRL1(ctxnum), reg);
- ctx->enabled = enable;
-}
-
-/*
- * csi2_ctx_config - CSI2 context configuration.
- * @ctx: context configuration
- *
- */
-static void csi2_ctx_config(struct iss_csi2_device *csi2,
- struct iss_csi2_ctx_cfg *ctx)
-{
- u32 reg = 0;
-
- ctx->frame = 0;
-
- /* Set up CSI2_CTx_CTRL1 */
- if (ctx->eof_enabled)
- reg = CSI2_CTX_CTRL1_EOF_EN;
-
- if (ctx->eol_enabled)
- reg |= CSI2_CTX_CTRL1_EOL_EN;
-
- if (ctx->checksum_enabled)
- reg |= CSI2_CTX_CTRL1_CS_EN;
-
- iss_reg_write(csi2->iss, csi2->regs1, CSI2_CTX_CTRL1(ctx->ctxnum), reg);
-
- /* Set up CSI2_CTx_CTRL2 */
- reg = ctx->virtual_id << CSI2_CTX_CTRL2_VIRTUAL_ID_SHIFT;
- reg |= ctx->format_id << CSI2_CTX_CTRL2_FORMAT_SHIFT;
-
- if (ctx->dpcm_decompress && ctx->dpcm_predictor)
- reg |= CSI2_CTX_CTRL2_DPCM_PRED;
-
- if (is_usr_def_mapping(ctx->format_id))
- reg |= 2 << CSI2_CTX_CTRL2_USER_DEF_MAP_SHIFT;
-
- iss_reg_write(csi2->iss, csi2->regs1, CSI2_CTX_CTRL2(ctx->ctxnum), reg);
-
- /* Set up CSI2_CTx_CTRL3 */
- iss_reg_write(csi2->iss, csi2->regs1, CSI2_CTX_CTRL3(ctx->ctxnum),
- ctx->alpha << CSI2_CTX_CTRL3_ALPHA_SHIFT);
-
- /* Set up CSI2_CTx_DAT_OFST */
- iss_reg_update(csi2->iss, csi2->regs1, CSI2_CTX_DAT_OFST(ctx->ctxnum),
- CSI2_CTX_DAT_OFST_MASK, ctx->data_offset);
-
- iss_reg_write(csi2->iss, csi2->regs1, CSI2_CTX_PING_ADDR(ctx->ctxnum),
- ctx->ping_addr);
- iss_reg_write(csi2->iss, csi2->regs1, CSI2_CTX_PONG_ADDR(ctx->ctxnum),
- ctx->pong_addr);
-}
-
-/*
- * csi2_timing_config - CSI2 timing configuration.
- * @timing: csi2_timing_cfg structure
- */
-static void csi2_timing_config(struct iss_csi2_device *csi2,
- struct iss_csi2_timing_cfg *timing)
-{
- u32 reg;
-
- reg = iss_reg_read(csi2->iss, csi2->regs1, CSI2_TIMING);
-
- if (timing->force_rx_mode)
- reg |= CSI2_TIMING_FORCE_RX_MODE_IO1;
- else
- reg &= ~CSI2_TIMING_FORCE_RX_MODE_IO1;
-
- if (timing->stop_state_16x)
- reg |= CSI2_TIMING_STOP_STATE_X16_IO1;
- else
- reg &= ~CSI2_TIMING_STOP_STATE_X16_IO1;
-
- if (timing->stop_state_4x)
- reg |= CSI2_TIMING_STOP_STATE_X4_IO1;
- else
- reg &= ~CSI2_TIMING_STOP_STATE_X4_IO1;
-
- reg &= ~CSI2_TIMING_STOP_STATE_COUNTER_IO1_MASK;
- reg |= timing->stop_state_counter <<
- CSI2_TIMING_STOP_STATE_COUNTER_IO1_SHIFT;
-
- iss_reg_write(csi2->iss, csi2->regs1, CSI2_TIMING, reg);
-}
-
-/*
- * csi2_irq_ctx_set - Enables CSI2 Context IRQs.
- * @enable: Enable/disable CSI2 Context interrupts
- */
-static void csi2_irq_ctx_set(struct iss_csi2_device *csi2, int enable)
-{
- const u32 mask = CSI2_CTX_IRQ_FE | CSI2_CTX_IRQ_FS;
- int i;
-
- for (i = 0; i < 8; i++) {
- iss_reg_write(csi2->iss, csi2->regs1, CSI2_CTX_IRQSTATUS(i),
- mask);
- if (enable)
- iss_reg_set(csi2->iss, csi2->regs1,
- CSI2_CTX_IRQENABLE(i), mask);
- else
- iss_reg_clr(csi2->iss, csi2->regs1,
- CSI2_CTX_IRQENABLE(i), mask);
- }
-}
-
-/*
- * csi2_irq_complexio1_set - Enables CSI2 ComplexIO IRQs.
- * @enable: Enable/disable CSI2 ComplexIO #1 interrupts
- */
-static void csi2_irq_complexio1_set(struct iss_csi2_device *csi2, int enable)
-{
- u32 reg;
-
- reg = CSI2_COMPLEXIO_IRQ_STATEALLULPMEXIT |
- CSI2_COMPLEXIO_IRQ_STATEALLULPMENTER |
- CSI2_COMPLEXIO_IRQ_STATEULPM5 |
- CSI2_COMPLEXIO_IRQ_ERRCONTROL5 |
- CSI2_COMPLEXIO_IRQ_ERRESC5 |
- CSI2_COMPLEXIO_IRQ_ERRSOTSYNCHS5 |
- CSI2_COMPLEXIO_IRQ_ERRSOTHS5 |
- CSI2_COMPLEXIO_IRQ_STATEULPM4 |
- CSI2_COMPLEXIO_IRQ_ERRCONTROL4 |
- CSI2_COMPLEXIO_IRQ_ERRESC4 |
- CSI2_COMPLEXIO_IRQ_ERRSOTSYNCHS4 |
- CSI2_COMPLEXIO_IRQ_ERRSOTHS4 |
- CSI2_COMPLEXIO_IRQ_STATEULPM3 |
- CSI2_COMPLEXIO_IRQ_ERRCONTROL3 |
- CSI2_COMPLEXIO_IRQ_ERRESC3 |
- CSI2_COMPLEXIO_IRQ_ERRSOTSYNCHS3 |
- CSI2_COMPLEXIO_IRQ_ERRSOTHS3 |
- CSI2_COMPLEXIO_IRQ_STATEULPM2 |
- CSI2_COMPLEXIO_IRQ_ERRCONTROL2 |
- CSI2_COMPLEXIO_IRQ_ERRESC2 |
- CSI2_COMPLEXIO_IRQ_ERRSOTSYNCHS2 |
- CSI2_COMPLEXIO_IRQ_ERRSOTHS2 |
- CSI2_COMPLEXIO_IRQ_STATEULPM1 |
- CSI2_COMPLEXIO_IRQ_ERRCONTROL1 |
- CSI2_COMPLEXIO_IRQ_ERRESC1 |
- CSI2_COMPLEXIO_IRQ_ERRSOTSYNCHS1 |
- CSI2_COMPLEXIO_IRQ_ERRSOTHS1;
- iss_reg_write(csi2->iss, csi2->regs1, CSI2_COMPLEXIO_IRQSTATUS, reg);
- if (enable)
- iss_reg_set(csi2->iss, csi2->regs1, CSI2_COMPLEXIO_IRQENABLE,
- reg);
- else
- iss_reg_write(csi2->iss, csi2->regs1, CSI2_COMPLEXIO_IRQENABLE,
- 0);
-}
-
-/*
- * csi2_irq_status_set - Enables CSI2 Status IRQs.
- * @enable: Enable/disable CSI2 Status interrupts
- */
-static void csi2_irq_status_set(struct iss_csi2_device *csi2, int enable)
-{
- u32 reg;
-
- reg = CSI2_IRQ_OCP_ERR |
- CSI2_IRQ_SHORT_PACKET |
- CSI2_IRQ_ECC_CORRECTION |
- CSI2_IRQ_ECC_NO_CORRECTION |
- CSI2_IRQ_COMPLEXIO_ERR |
- CSI2_IRQ_FIFO_OVF |
- CSI2_IRQ_CONTEXT0;
- iss_reg_write(csi2->iss, csi2->regs1, CSI2_IRQSTATUS, reg);
- if (enable)
- iss_reg_set(csi2->iss, csi2->regs1, CSI2_IRQENABLE, reg);
- else
- iss_reg_write(csi2->iss, csi2->regs1, CSI2_IRQENABLE, 0);
-}
-
-/*
- * omap4iss_csi2_reset - Resets the CSI2 module.
- *
- * Must be called with the phy lock held.
- *
- * Returns 0 if successful, or -EBUSY if power command didn't respond.
- */
-int omap4iss_csi2_reset(struct iss_csi2_device *csi2)
-{
- unsigned int timeout;
-
- if (!csi2->available)
- return -ENODEV;
-
- if (csi2->phy->phy_in_use)
- return -EBUSY;
-
- iss_reg_set(csi2->iss, csi2->regs1, CSI2_SYSCONFIG,
- CSI2_SYSCONFIG_SOFT_RESET);
-
- timeout = iss_poll_condition_timeout(
- iss_reg_read(csi2->iss, csi2->regs1, CSI2_SYSSTATUS) &
- CSI2_SYSSTATUS_RESET_DONE, 500, 100, 200);
- if (timeout) {
- dev_err(csi2->iss->dev, "CSI2: Soft reset timeout!\n");
- return -EBUSY;
- }
-
- iss_reg_set(csi2->iss, csi2->regs1, CSI2_COMPLEXIO_CFG,
- CSI2_COMPLEXIO_CFG_RESET_CTRL);
-
- timeout = iss_poll_condition_timeout(
- iss_reg_read(csi2->iss, csi2->phy->phy_regs, REGISTER1) &
- REGISTER1_RESET_DONE_CTRLCLK, 10000, 100, 500);
- if (timeout) {
- dev_err(csi2->iss->dev, "CSI2: CSI2_96M_FCLK reset timeout!\n");
- return -EBUSY;
- }
-
- iss_reg_update(csi2->iss, csi2->regs1, CSI2_SYSCONFIG,
- CSI2_SYSCONFIG_MSTANDBY_MODE_MASK |
- CSI2_SYSCONFIG_AUTO_IDLE,
- CSI2_SYSCONFIG_MSTANDBY_MODE_NO);
-
- return 0;
-}
-
-static int csi2_configure(struct iss_csi2_device *csi2)
-{
- const struct iss_v4l2_subdevs_group *pdata;
- struct iss_csi2_timing_cfg *timing = &csi2->timing[0];
- struct v4l2_subdev *sensor;
- struct media_pad *pad;
-
- /*
- * CSI2 fields that can be updated while the context has
- * been enabled or the interface has been enabled are not
- * updated dynamically currently. So we do not allow to
- * reconfigure if either has been enabled
- */
- if (csi2->contexts[0].enabled || csi2->ctrl.if_enable)
- return -EBUSY;
-
- pad = media_entity_remote_pad(&csi2->pads[CSI2_PAD_SINK]);
- sensor = media_entity_to_v4l2_subdev(pad->entity);
- pdata = sensor->host_priv;
-
- csi2->frame_skip = 0;
- v4l2_subdev_call(sensor, sensor, g_skip_frames, &csi2->frame_skip);
-
- csi2->ctrl.vp_out_ctrl = pdata->bus.csi2.vpclk_div;
- csi2->ctrl.frame_mode = ISS_CSI2_FRAME_IMMEDIATE;
- csi2->ctrl.ecc_enable = pdata->bus.csi2.crc;
-
- timing->force_rx_mode = 1;
- timing->stop_state_16x = 1;
- timing->stop_state_4x = 1;
- timing->stop_state_counter = 0x1ff;
-
- /*
- * The CSI2 receiver can't do any format conversion except DPCM
- * decompression, so every set_format call configures both pads
- * and enables DPCM decompression as a special case:
- */
- if (csi2->formats[CSI2_PAD_SINK].code !=
- csi2->formats[CSI2_PAD_SOURCE].code)
- csi2->dpcm_decompress = true;
- else
- csi2->dpcm_decompress = false;
-
- csi2->contexts[0].format_id = csi2_ctx_map_format(csi2);
-
- if (csi2->video_out.bpl_padding == 0)
- csi2->contexts[0].data_offset = 0;
- else
- csi2->contexts[0].data_offset = csi2->video_out.bpl_value;
-
- /*
- * Enable end of frame and end of line signals generation for
- * context 0. These signals are generated from CSI2 receiver to
- * qualify the last pixel of a frame and the last pixel of a line.
- * Without enabling the signals CSI2 receiver writes data to memory
- * beyond buffer size and/or data line offset is not handled correctly.
- */
- csi2->contexts[0].eof_enabled = 1;
- csi2->contexts[0].eol_enabled = 1;
-
- csi2_irq_complexio1_set(csi2, 1);
- csi2_irq_ctx_set(csi2, 1);
- csi2_irq_status_set(csi2, 1);
-
- /* Set configuration (timings, format and links) */
- csi2_timing_config(csi2, timing);
- csi2_recv_config(csi2, &csi2->ctrl);
- csi2_ctx_config(csi2, &csi2->contexts[0]);
-
- return 0;
-}
-
-/*
- * csi2_print_status - Prints CSI2 debug information.
- */
-#define CSI2_PRINT_REGISTER(iss, regs, name)\
- dev_dbg(iss->dev, "###CSI2 " #name "=0x%08x\n", \
- iss_reg_read(iss, regs, CSI2_##name))
-
-static void csi2_print_status(struct iss_csi2_device *csi2)
-{
- struct iss_device *iss = csi2->iss;
-
- if (!csi2->available)
- return;
-
- dev_dbg(iss->dev, "-------------CSI2 Register dump-------------\n");
-
- CSI2_PRINT_REGISTER(iss, csi2->regs1, SYSCONFIG);
- CSI2_PRINT_REGISTER(iss, csi2->regs1, SYSSTATUS);
- CSI2_PRINT_REGISTER(iss, csi2->regs1, IRQENABLE);
- CSI2_PRINT_REGISTER(iss, csi2->regs1, IRQSTATUS);
- CSI2_PRINT_REGISTER(iss, csi2->regs1, CTRL);
- CSI2_PRINT_REGISTER(iss, csi2->regs1, DBG_H);
- CSI2_PRINT_REGISTER(iss, csi2->regs1, COMPLEXIO_CFG);
- CSI2_PRINT_REGISTER(iss, csi2->regs1, COMPLEXIO_IRQSTATUS);
- CSI2_PRINT_REGISTER(iss, csi2->regs1, SHORT_PACKET);
- CSI2_PRINT_REGISTER(iss, csi2->regs1, COMPLEXIO_IRQENABLE);
- CSI2_PRINT_REGISTER(iss, csi2->regs1, DBG_P);
- CSI2_PRINT_REGISTER(iss, csi2->regs1, TIMING);
- CSI2_PRINT_REGISTER(iss, csi2->regs1, CTX_CTRL1(0));
- CSI2_PRINT_REGISTER(iss, csi2->regs1, CTX_CTRL2(0));
- CSI2_PRINT_REGISTER(iss, csi2->regs1, CTX_DAT_OFST(0));
- CSI2_PRINT_REGISTER(iss, csi2->regs1, CTX_PING_ADDR(0));
- CSI2_PRINT_REGISTER(iss, csi2->regs1, CTX_PONG_ADDR(0));
- CSI2_PRINT_REGISTER(iss, csi2->regs1, CTX_IRQENABLE(0));
- CSI2_PRINT_REGISTER(iss, csi2->regs1, CTX_IRQSTATUS(0));
- CSI2_PRINT_REGISTER(iss, csi2->regs1, CTX_CTRL3(0));
-
- dev_dbg(iss->dev, "--------------------------------------------\n");
-}
-
-/* -----------------------------------------------------------------------------
- * Interrupt handling
- */
-
-/*
- * csi2_isr_buffer - Does buffer handling at end-of-frame
- * when writing to memory.
- */
-static void csi2_isr_buffer(struct iss_csi2_device *csi2)
-{
- struct iss_buffer *buffer;
-
- csi2_ctx_enable(csi2, 0, 0);
-
- buffer = omap4iss_video_buffer_next(&csi2->video_out);
-
- /*
- * Let video queue operation restart engine if there is an underrun
- * condition.
- */
- if (buffer == NULL)
- return;
-
- csi2_set_outaddr(csi2, buffer->iss_addr);
- csi2_ctx_enable(csi2, 0, 1);
-}
-
-static void csi2_isr_ctx(struct iss_csi2_device *csi2,
- struct iss_csi2_ctx_cfg *ctx)
-{
- unsigned int n = ctx->ctxnum;
- u32 status;
-
- status = iss_reg_read(csi2->iss, csi2->regs1, CSI2_CTX_IRQSTATUS(n));
- iss_reg_write(csi2->iss, csi2->regs1, CSI2_CTX_IRQSTATUS(n), status);
-
- /* Propagate frame number */
- if (status & CSI2_CTX_IRQ_FS) {
- struct iss_pipeline *pipe =
- to_iss_pipeline(&csi2->subdev.entity);
- u16 frame;
- u16 delta;
-
- frame = iss_reg_read(csi2->iss, csi2->regs1,
- CSI2_CTX_CTRL2(ctx->ctxnum))
- >> CSI2_CTX_CTRL2_FRAME_SHIFT;
-
- if (frame == 0) {
- /* A zero value means that the counter isn't implemented
- * by the source. Increment the frame number in software
- * in that case.
- */
- atomic_inc(&pipe->frame_number);
- } else {
- /* Extend the 16 bit frame number to 32 bits by
- * computing the delta between two consecutive CSI2
- * frame numbers and adding it to the software frame
- * number. The hardware counter starts at 1 and wraps
- * from 0xffff to 1 without going through 0, so subtract
- * 1 when the counter wraps.
- */
- delta = frame - ctx->frame;
- if (frame < ctx->frame)
- delta--;
- ctx->frame = frame;
-
- atomic_add(delta, &pipe->frame_number);
- }
- }
-
- if (!(status & CSI2_CTX_IRQ_FE))
- return;
-
- /* Skip interrupts until we reach the frame skip count. The CSI2 will be
- * automatically disabled, as the frame skip count has been programmed
- * in the CSI2_CTx_CTRL1::COUNT field, so reenable it.
- *
- * It would have been nice to rely on the FRAME_NUMBER interrupt instead
- * but it turned out that the interrupt is only generated when the CSI2
- * writes to memory (the CSI2_CTx_CTRL1::COUNT field is decreased
- * correctly and reaches 0 when data is forwarded to the video port only
- * but no interrupt arrives). Maybe a CSI2 hardware bug.
- */
- if (csi2->frame_skip) {
- csi2->frame_skip--;
- if (csi2->frame_skip == 0) {
- ctx->format_id = csi2_ctx_map_format(csi2);
- csi2_ctx_config(csi2, ctx);
- csi2_ctx_enable(csi2, n, 1);
- }
- return;
- }
-
- if (csi2->output & CSI2_OUTPUT_MEMORY)
- csi2_isr_buffer(csi2);
-}
-
-/*
- * omap4iss_csi2_isr - CSI2 interrupt handling.
- */
-void omap4iss_csi2_isr(struct iss_csi2_device *csi2)
-{
- struct iss_pipeline *pipe = to_iss_pipeline(&csi2->subdev.entity);
- u32 csi2_irqstatus, cpxio1_irqstatus;
- struct iss_device *iss = csi2->iss;
-
- if (!csi2->available)
- return;
-
- csi2_irqstatus = iss_reg_read(csi2->iss, csi2->regs1, CSI2_IRQSTATUS);
- iss_reg_write(csi2->iss, csi2->regs1, CSI2_IRQSTATUS, csi2_irqstatus);
-
- /* Failure Cases */
- if (csi2_irqstatus & CSI2_IRQ_COMPLEXIO_ERR) {
- cpxio1_irqstatus = iss_reg_read(csi2->iss, csi2->regs1,
- CSI2_COMPLEXIO_IRQSTATUS);
- iss_reg_write(csi2->iss, csi2->regs1, CSI2_COMPLEXIO_IRQSTATUS,
- cpxio1_irqstatus);
- dev_dbg(iss->dev, "CSI2: ComplexIO Error IRQ %x\n",
- cpxio1_irqstatus);
- pipe->error = true;
- }
-
- if (csi2_irqstatus & (CSI2_IRQ_OCP_ERR |
- CSI2_IRQ_SHORT_PACKET |
- CSI2_IRQ_ECC_NO_CORRECTION |
- CSI2_IRQ_COMPLEXIO_ERR |
- CSI2_IRQ_FIFO_OVF)) {
- dev_dbg(iss->dev,
- "CSI2 Err: OCP:%d SHORT:%d ECC:%d CPXIO:%d OVF:%d\n",
- csi2_irqstatus & CSI2_IRQ_OCP_ERR ? 1 : 0,
- csi2_irqstatus & CSI2_IRQ_SHORT_PACKET ? 1 : 0,
- csi2_irqstatus & CSI2_IRQ_ECC_NO_CORRECTION ? 1 : 0,
- csi2_irqstatus & CSI2_IRQ_COMPLEXIO_ERR ? 1 : 0,
- csi2_irqstatus & CSI2_IRQ_FIFO_OVF ? 1 : 0);
- pipe->error = true;
- }
-
- if (omap4iss_module_sync_is_stopping(&csi2->wait, &csi2->stopping))
- return;
-
- /* Successful cases */
- if (csi2_irqstatus & CSI2_IRQ_CONTEXT0)
- csi2_isr_ctx(csi2, &csi2->contexts[0]);
-
- if (csi2_irqstatus & CSI2_IRQ_ECC_CORRECTION)
- dev_dbg(iss->dev, "CSI2: ECC correction done\n");
-}
-
-/* -----------------------------------------------------------------------------
- * ISS video operations
- */
-
-/*
- * csi2_queue - Queues the first buffer when using memory output
- * @video: The video node
- * @buffer: buffer to queue
- */
-static int csi2_queue(struct iss_video *video, struct iss_buffer *buffer)
-{
- struct iss_csi2_device *csi2 = container_of(video,
- struct iss_csi2_device, video_out);
-
- csi2_set_outaddr(csi2, buffer->iss_addr);
-
- /*
- * If streaming was enabled before there was a buffer queued
- * or underrun happened in the ISR, the hardware was not enabled
- * and DMA queue flag ISS_VIDEO_DMAQUEUE_UNDERRUN is still set.
- * Enable it now.
- */
- if (csi2->video_out.dmaqueue_flags & ISS_VIDEO_DMAQUEUE_UNDERRUN) {
- /* Enable / disable context 0 and IRQs */
- csi2_if_enable(csi2, 1);
- csi2_ctx_enable(csi2, 0, 1);
- iss_video_dmaqueue_flags_clr(&csi2->video_out);
- }
-
- return 0;
-}
-
-static const struct iss_video_operations csi2_issvideo_ops = {
- .queue = csi2_queue,
-};
-
-/* -----------------------------------------------------------------------------
- * V4L2 subdev operations
- */
-
-static struct v4l2_mbus_framefmt *
-__csi2_get_format(struct iss_csi2_device *csi2,
- struct v4l2_subdev_pad_config *cfg,
- unsigned int pad,
- enum v4l2_subdev_format_whence which)
-{
- if (which == V4L2_SUBDEV_FORMAT_TRY)
- return v4l2_subdev_get_try_format(&csi2->subdev, cfg, pad);
-
- return &csi2->formats[pad];
-}
-
-static void
-csi2_try_format(struct iss_csi2_device *csi2,
- struct v4l2_subdev_pad_config *cfg,
- unsigned int pad,
- struct v4l2_mbus_framefmt *fmt,
- enum v4l2_subdev_format_whence which)
-{
- u32 pixelcode;
- struct v4l2_mbus_framefmt *format;
- const struct iss_format_info *info;
- unsigned int i;
-
- switch (pad) {
- case CSI2_PAD_SINK:
- /* Clamp the width and height to valid range (1-8191). */
- for (i = 0; i < ARRAY_SIZE(csi2_input_fmts); i++) {
- if (fmt->code == csi2_input_fmts[i])
- break;
- }
-
- /* If not found, use SGRBG10 as default */
- if (i >= ARRAY_SIZE(csi2_input_fmts))
- fmt->code = MEDIA_BUS_FMT_SGRBG10_1X10;
-
- fmt->width = clamp_t(u32, fmt->width, 1, 8191);
- fmt->height = clamp_t(u32, fmt->height, 1, 8191);
- break;
-
- case CSI2_PAD_SOURCE:
- /* Source format same as sink format, except for DPCM
- * compression.
- */
- pixelcode = fmt->code;
- format = __csi2_get_format(csi2, cfg, CSI2_PAD_SINK, which);
- memcpy(fmt, format, sizeof(*fmt));
-
- /*
- * Only Allow DPCM decompression, and check that the
- * pattern is preserved
- */
- info = omap4iss_video_format_info(fmt->code);
- if (info->uncompressed == pixelcode)
- fmt->code = pixelcode;
- break;
- }
-
- /* RGB, non-interlaced */
- fmt->colorspace = V4L2_COLORSPACE_SRGB;
- fmt->field = V4L2_FIELD_NONE;
-}
-
-/*
- * csi2_enum_mbus_code - Handle pixel format enumeration
- * @sd : pointer to v4l2 subdev structure
- * @cfg : V4L2 subdev pad config
- * @code : pointer to v4l2_subdev_mbus_code_enum structure
- * return -EINVAL or zero on success
- */
-static int csi2_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
- struct v4l2_subdev_mbus_code_enum *code)
-{
- struct iss_csi2_device *csi2 = v4l2_get_subdevdata(sd);
- struct v4l2_mbus_framefmt *format;
- const struct iss_format_info *info;
-
- if (code->pad == CSI2_PAD_SINK) {
- if (code->index >= ARRAY_SIZE(csi2_input_fmts))
- return -EINVAL;
-
- code->code = csi2_input_fmts[code->index];
- } else {
- format = __csi2_get_format(csi2, cfg, CSI2_PAD_SINK,
- code->which);
- switch (code->index) {
- case 0:
- /* Passthrough sink pad code */
- code->code = format->code;
- break;
- case 1:
- /* Uncompressed code */
- info = omap4iss_video_format_info(format->code);
- if (info->uncompressed == format->code)
- return -EINVAL;
-
- code->code = info->uncompressed;
- break;
- default:
- return -EINVAL;
- }
- }
-
- return 0;
-}
-
-static int csi2_enum_frame_size(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
- struct v4l2_subdev_frame_size_enum *fse)
-{
- struct iss_csi2_device *csi2 = v4l2_get_subdevdata(sd);
- struct v4l2_mbus_framefmt format;
-
- if (fse->index != 0)
- return -EINVAL;
-
- format.code = fse->code;
- format.width = 1;
- format.height = 1;
- csi2_try_format(csi2, cfg, fse->pad, &format, fse->which);
- fse->min_width = format.width;
- fse->min_height = format.height;
-
- if (format.code != fse->code)
- return -EINVAL;
-
- format.code = fse->code;
- format.width = -1;
- format.height = -1;
- csi2_try_format(csi2, cfg, fse->pad, &format, fse->which);
- fse->max_width = format.width;
- fse->max_height = format.height;
-
- return 0;
-}
-
-/*
- * csi2_get_format - Handle get format by pads subdev method
- * @sd : pointer to v4l2 subdev structure
- * @cfg: V4L2 subdev pad config
- * @fmt: pointer to v4l2 subdev format structure
- * return -EINVAL or zero on success
- */
-static int csi2_get_format(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
- struct v4l2_subdev_format *fmt)
-{
- struct iss_csi2_device *csi2 = v4l2_get_subdevdata(sd);
- struct v4l2_mbus_framefmt *format;
-
- format = __csi2_get_format(csi2, cfg, fmt->pad, fmt->which);
- if (format == NULL)
- return -EINVAL;
-
- fmt->format = *format;
- return 0;
-}
-
-/*
- * csi2_set_format - Handle set format by pads subdev method
- * @sd : pointer to v4l2 subdev structure
- * @cfg: V4L2 subdev pad config
- * @fmt: pointer to v4l2 subdev format structure
- * return -EINVAL or zero on success
- */
-static int csi2_set_format(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
- struct v4l2_subdev_format *fmt)
-{
- struct iss_csi2_device *csi2 = v4l2_get_subdevdata(sd);
- struct v4l2_mbus_framefmt *format;
-
- format = __csi2_get_format(csi2, cfg, fmt->pad, fmt->which);
- if (format == NULL)
- return -EINVAL;
-
- csi2_try_format(csi2, cfg, fmt->pad, &fmt->format, fmt->which);
- *format = fmt->format;
-
- /* Propagate the format from sink to source */
- if (fmt->pad == CSI2_PAD_SINK) {
- format = __csi2_get_format(csi2, cfg, CSI2_PAD_SOURCE,
- fmt->which);
- *format = fmt->format;
- csi2_try_format(csi2, cfg, CSI2_PAD_SOURCE, format, fmt->which);
- }
-
- return 0;
-}
-
-static int csi2_link_validate(struct v4l2_subdev *sd, struct media_link *link,
- struct v4l2_subdev_format *source_fmt,
- struct v4l2_subdev_format *sink_fmt)
-{
- struct iss_csi2_device *csi2 = v4l2_get_subdevdata(sd);
- struct iss_pipeline *pipe = to_iss_pipeline(&csi2->subdev.entity);
- int rval;
-
- pipe->external = media_entity_to_v4l2_subdev(link->source->entity);
- rval = omap4iss_get_external_info(pipe, link);
- if (rval < 0)
- return rval;
-
- return v4l2_subdev_link_validate_default(sd, link, source_fmt,
- sink_fmt);
-}
-
-/*
- * csi2_init_formats - Initialize formats on all pads
- * @sd: ISS CSI2 V4L2 subdevice
- * @fh: V4L2 subdev file handle
- *
- * Initialize all pad formats with default values. If fh is not NULL, try
- * formats are initialized on the file handle. Otherwise active formats are
- * initialized on the device.
- */
-static int csi2_init_formats(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
-{
- struct v4l2_subdev_format format;
-
- memset(&format, 0, sizeof(format));
- format.pad = CSI2_PAD_SINK;
- format.which = fh ? V4L2_SUBDEV_FORMAT_TRY : V4L2_SUBDEV_FORMAT_ACTIVE;
- format.format.code = MEDIA_BUS_FMT_SGRBG10_1X10;
- format.format.width = 4096;
- format.format.height = 4096;
- csi2_set_format(sd, fh ? fh->pad : NULL, &format);
-
- return 0;
-}
-
-/*
- * csi2_set_stream - Enable/Disable streaming on the CSI2 module
- * @sd: ISS CSI2 V4L2 subdevice
- * @enable: ISS pipeline stream state
- *
- * Return 0 on success or a negative error code otherwise.
- */
-static int csi2_set_stream(struct v4l2_subdev *sd, int enable)
-{
- struct iss_csi2_device *csi2 = v4l2_get_subdevdata(sd);
- struct iss_device *iss = csi2->iss;
- struct iss_video *video_out = &csi2->video_out;
- int ret = 0;
-
- if (csi2->state == ISS_PIPELINE_STREAM_STOPPED) {
- if (enable == ISS_PIPELINE_STREAM_STOPPED)
- return 0;
-
- omap4iss_subclk_enable(iss, csi2->subclk);
- }
-
- switch (enable) {
- case ISS_PIPELINE_STREAM_CONTINUOUS: {
- ret = omap4iss_csiphy_config(iss, sd);
- if (ret < 0)
- return ret;
-
- if (omap4iss_csiphy_acquire(csi2->phy) < 0)
- return -ENODEV;
- csi2_configure(csi2);
- csi2_print_status(csi2);
-
- /*
- * When outputting to memory with no buffer available, let the
- * buffer queue handler start the hardware. A DMA queue flag
- * ISS_VIDEO_DMAQUEUE_QUEUED will be set as soon as there is
- * a buffer available.
- */
- if (csi2->output & CSI2_OUTPUT_MEMORY &&
- !(video_out->dmaqueue_flags & ISS_VIDEO_DMAQUEUE_QUEUED))
- break;
- /* Enable context 0 and IRQs */
- atomic_set(&csi2->stopping, 0);
- csi2_ctx_enable(csi2, 0, 1);
- csi2_if_enable(csi2, 1);
- iss_video_dmaqueue_flags_clr(video_out);
- break;
- }
- case ISS_PIPELINE_STREAM_STOPPED:
- if (csi2->state == ISS_PIPELINE_STREAM_STOPPED)
- return 0;
- if (omap4iss_module_sync_idle(&sd->entity, &csi2->wait,
- &csi2->stopping))
- ret = -ETIMEDOUT;
- csi2_ctx_enable(csi2, 0, 0);
- csi2_if_enable(csi2, 0);
- csi2_irq_ctx_set(csi2, 0);
- omap4iss_csiphy_release(csi2->phy);
- omap4iss_subclk_disable(iss, csi2->subclk);
- iss_video_dmaqueue_flags_clr(video_out);
- break;
- }
-
- csi2->state = enable;
- return ret;
-}
-
-/* subdev video operations */
-static const struct v4l2_subdev_video_ops csi2_video_ops = {
- .s_stream = csi2_set_stream,
-};
-
-/* subdev pad operations */
-static const struct v4l2_subdev_pad_ops csi2_pad_ops = {
- .enum_mbus_code = csi2_enum_mbus_code,
- .enum_frame_size = csi2_enum_frame_size,
- .get_fmt = csi2_get_format,
- .set_fmt = csi2_set_format,
- .link_validate = csi2_link_validate,
-};
-
-/* subdev operations */
-static const struct v4l2_subdev_ops csi2_ops = {
- .video = &csi2_video_ops,
- .pad = &csi2_pad_ops,
-};
-
-/* subdev internal operations */
-static const struct v4l2_subdev_internal_ops csi2_internal_ops = {
- .open = csi2_init_formats,
-};
-
-/* -----------------------------------------------------------------------------
- * Media entity operations
- */
-
-/*
- * csi2_link_setup - Setup CSI2 connections.
- * @entity : Pointer to media entity structure
- * @local : Pointer to local pad array
- * @remote : Pointer to remote pad array
- * @flags : Link flags
- * return -EINVAL or zero on success
- */
-static int csi2_link_setup(struct media_entity *entity,
- const struct media_pad *local,
- const struct media_pad *remote, u32 flags)
-{
- struct v4l2_subdev *sd = media_entity_to_v4l2_subdev(entity);
- struct iss_csi2_device *csi2 = v4l2_get_subdevdata(sd);
- struct iss_csi2_ctrl_cfg *ctrl = &csi2->ctrl;
-
- /*
- * The ISS core doesn't support pipelines with multiple video outputs.
- * Revisit this when it will be implemented, and return -EBUSY for now.
- */
-
- switch (local->index | media_entity_type(remote->entity)) {
- case CSI2_PAD_SOURCE | MEDIA_ENT_T_DEVNODE:
- if (flags & MEDIA_LNK_FL_ENABLED) {
- if (csi2->output & ~CSI2_OUTPUT_MEMORY)
- return -EBUSY;
- csi2->output |= CSI2_OUTPUT_MEMORY;
- } else {
- csi2->output &= ~CSI2_OUTPUT_MEMORY;
- }
- break;
-
- case CSI2_PAD_SOURCE | MEDIA_ENT_T_V4L2_SUBDEV:
- if (flags & MEDIA_LNK_FL_ENABLED) {
- if (csi2->output & ~CSI2_OUTPUT_IPIPEIF)
- return -EBUSY;
- csi2->output |= CSI2_OUTPUT_IPIPEIF;
- } else {
- csi2->output &= ~CSI2_OUTPUT_IPIPEIF;
- }
- break;
-
- default:
- /* Link from camera to CSI2 is fixed... */
- return -EINVAL;
- }
-
- ctrl->vp_only_enable = csi2->output & CSI2_OUTPUT_MEMORY ? false : true;
- ctrl->vp_clk_enable = !!(csi2->output & CSI2_OUTPUT_IPIPEIF);
-
- return 0;
-}
-
-/* media operations */
-static const struct media_entity_operations csi2_media_ops = {
- .link_setup = csi2_link_setup,
- .link_validate = v4l2_subdev_link_validate,
-};
-
-void omap4iss_csi2_unregister_entities(struct iss_csi2_device *csi2)
-{
- v4l2_device_unregister_subdev(&csi2->subdev);
- omap4iss_video_unregister(&csi2->video_out);
-}
-
-int omap4iss_csi2_register_entities(struct iss_csi2_device *csi2,
- struct v4l2_device *vdev)
-{
- int ret;
-
- /* Register the subdev and video nodes. */
- ret = v4l2_device_register_subdev(vdev, &csi2->subdev);
- if (ret < 0)
- goto error;
-
- ret = omap4iss_video_register(&csi2->video_out, vdev);
- if (ret < 0)
- goto error;
-
- return 0;
-
-error:
- omap4iss_csi2_unregister_entities(csi2);
- return ret;
-}
-
-/* -----------------------------------------------------------------------------
- * ISS CSI2 initialisation and cleanup
- */
-
-/*
- * csi2_init_entities - Initialize subdev and media entity.
- * @csi2: Pointer to csi2 structure.
- * return -ENOMEM or zero on success
- */
-static int csi2_init_entities(struct iss_csi2_device *csi2, const char *subname)
-{
- struct v4l2_subdev *sd = &csi2->subdev;
- struct media_pad *pads = csi2->pads;
- struct media_entity *me = &sd->entity;
- int ret;
- char name[V4L2_SUBDEV_NAME_SIZE];
-
- v4l2_subdev_init(sd, &csi2_ops);
- sd->internal_ops = &csi2_internal_ops;
- snprintf(name, sizeof(name), "CSI2%s", subname);
- snprintf(sd->name, sizeof(sd->name), "OMAP4 ISS %s", name);
-
- sd->grp_id = 1 << 16; /* group ID for iss subdevs */
- v4l2_set_subdevdata(sd, csi2);
- sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
-
- pads[CSI2_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE;
- pads[CSI2_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
-
- me->ops = &csi2_media_ops;
- ret = media_entity_init(me, CSI2_PADS_NUM, pads, 0);
- if (ret < 0)
- return ret;
-
- csi2_init_formats(sd, NULL);
-
- /* Video device node */
- csi2->video_out.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
- csi2->video_out.ops = &csi2_issvideo_ops;
- csi2->video_out.bpl_alignment = 32;
- csi2->video_out.bpl_zero_padding = 1;
- csi2->video_out.bpl_max = 0x1ffe0;
- csi2->video_out.iss = csi2->iss;
- csi2->video_out.capture_mem = PAGE_ALIGN(4096 * 4096) * 3;
-
- ret = omap4iss_video_init(&csi2->video_out, name);
- if (ret < 0)
- goto error_video;
-
- /* Connect the CSI2 subdev to the video node. */
- ret = media_entity_create_link(&csi2->subdev.entity, CSI2_PAD_SOURCE,
- &csi2->video_out.video.entity, 0, 0);
- if (ret < 0)
- goto error_link;
-
- return 0;
-
-error_link:
- omap4iss_video_cleanup(&csi2->video_out);
-error_video:
- media_entity_cleanup(&csi2->subdev.entity);
- return ret;
-}
-
-/*
- * omap4iss_csi2_init - Routine for module driver init
- */
-int omap4iss_csi2_init(struct iss_device *iss)
-{
- struct iss_csi2_device *csi2a = &iss->csi2a;
- struct iss_csi2_device *csi2b = &iss->csi2b;
- int ret;
-
- csi2a->iss = iss;
- csi2a->available = 1;
- csi2a->regs1 = OMAP4_ISS_MEM_CSI2_A_REGS1;
- csi2a->phy = &iss->csiphy1;
- csi2a->subclk = OMAP4_ISS_SUBCLK_CSI2_A;
- csi2a->state = ISS_PIPELINE_STREAM_STOPPED;
- init_waitqueue_head(&csi2a->wait);
-
- ret = csi2_init_entities(csi2a, "a");
- if (ret < 0)
- return ret;
-
- csi2b->iss = iss;
- csi2b->available = 1;
- csi2b->regs1 = OMAP4_ISS_MEM_CSI2_B_REGS1;
- csi2b->phy = &iss->csiphy2;
- csi2b->subclk = OMAP4_ISS_SUBCLK_CSI2_B;
- csi2b->state = ISS_PIPELINE_STREAM_STOPPED;
- init_waitqueue_head(&csi2b->wait);
-
- ret = csi2_init_entities(csi2b, "b");
- if (ret < 0)
- return ret;
-
- return 0;
-}
-
-/*
- * omap4iss_csi2_cleanup - Routine for module driver cleanup
- */
-void omap4iss_csi2_cleanup(struct iss_device *iss)
-{
- struct iss_csi2_device *csi2a = &iss->csi2a;
- struct iss_csi2_device *csi2b = &iss->csi2b;
-
- omap4iss_video_cleanup(&csi2a->video_out);
- media_entity_cleanup(&csi2a->subdev.entity);
-
- omap4iss_video_cleanup(&csi2b->video_out);
- media_entity_cleanup(&csi2b->subdev.entity);
-}
diff --git a/drivers/staging/media/omap4iss/iss_csi2.h b/drivers/staging/media/omap4iss/iss_csi2.h
deleted file mode 100644
index 3b37978a3bdf..000000000000
--- a/drivers/staging/media/omap4iss/iss_csi2.h
+++ /dev/null
@@ -1,158 +0,0 @@
-/*
- * TI OMAP4 ISS V4L2 Driver - CSI2 module
- *
- * Copyright (C) 2012 Texas Instruments, Inc.
- *
- * Author: Sergio Aguirre <sergio.a.aguirre@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-#ifndef OMAP4_ISS_CSI2_H
-#define OMAP4_ISS_CSI2_H
-
-#include <linux/types.h>
-#include <linux/videodev2.h>
-
-#include "iss_video.h"
-
-struct iss_csiphy;
-
-/* This is not an exhaustive list */
-enum iss_csi2_pix_formats {
- CSI2_PIX_FMT_OTHERS = 0,
- CSI2_PIX_FMT_YUV422_8BIT = 0x1e,
- CSI2_PIX_FMT_YUV422_8BIT_VP = 0x9e,
- CSI2_PIX_FMT_YUV422_8BIT_VP16 = 0xde,
- CSI2_PIX_FMT_RAW10_EXP16 = 0xab,
- CSI2_PIX_FMT_RAW10_EXP16_VP = 0x12f,
- CSI2_PIX_FMT_RAW8 = 0x2a,
- CSI2_PIX_FMT_RAW8_DPCM10_EXP16 = 0x2aa,
- CSI2_PIX_FMT_RAW8_DPCM10_VP = 0x32a,
- CSI2_PIX_FMT_RAW8_VP = 0x12a,
- CSI2_USERDEF_8BIT_DATA1_DPCM10_VP = 0x340,
- CSI2_USERDEF_8BIT_DATA1_DPCM10 = 0x2c0,
- CSI2_USERDEF_8BIT_DATA1 = 0x40,
-};
-
-enum iss_csi2_irqevents {
- OCP_ERR_IRQ = 0x4000,
- SHORT_PACKET_IRQ = 0x2000,
- ECC_CORRECTION_IRQ = 0x1000,
- ECC_NO_CORRECTION_IRQ = 0x800,
- COMPLEXIO2_ERR_IRQ = 0x400,
- COMPLEXIO1_ERR_IRQ = 0x200,
- FIFO_OVF_IRQ = 0x100,
- CONTEXT7 = 0x80,
- CONTEXT6 = 0x40,
- CONTEXT5 = 0x20,
- CONTEXT4 = 0x10,
- CONTEXT3 = 0x8,
- CONTEXT2 = 0x4,
- CONTEXT1 = 0x2,
- CONTEXT0 = 0x1,
-};
-
-enum iss_csi2_ctx_irqevents {
- CTX_ECC_CORRECTION = 0x100,
- CTX_LINE_NUMBER = 0x80,
- CTX_FRAME_NUMBER = 0x40,
- CTX_CS = 0x20,
- CTX_LE = 0x8,
- CTX_LS = 0x4,
- CTX_FE = 0x2,
- CTX_FS = 0x1,
-};
-
-enum iss_csi2_frame_mode {
- ISS_CSI2_FRAME_IMMEDIATE,
- ISS_CSI2_FRAME_AFTERFEC,
-};
-
-#define ISS_CSI2_MAX_CTX_NUM 7
-
-struct iss_csi2_ctx_cfg {
- u8 ctxnum; /* context number 0 - 7 */
- u8 dpcm_decompress;
-
- /* Fields in CSI2_CTx_CTRL2 - locked by CSI2_CTx_CTRL1.CTX_EN */
- u8 virtual_id;
- u16 format_id; /* as in CSI2_CTx_CTRL2[9:0] */
- u8 dpcm_predictor; /* 1: simple, 0: advanced */
- u16 frame;
-
- /* Fields in CSI2_CTx_CTRL1/3 - Shadowed */
- u16 alpha;
- u16 data_offset;
- u32 ping_addr;
- u32 pong_addr;
- u8 eof_enabled;
- u8 eol_enabled;
- u8 checksum_enabled;
- u8 enabled;
-};
-
-struct iss_csi2_timing_cfg {
- u8 ionum; /* IO1 or IO2 as in CSI2_TIMING */
- unsigned force_rx_mode:1;
- unsigned stop_state_16x:1;
- unsigned stop_state_4x:1;
- u16 stop_state_counter;
-};
-
-struct iss_csi2_ctrl_cfg {
- bool vp_clk_enable;
- bool vp_only_enable;
- u8 vp_out_ctrl;
- enum iss_csi2_frame_mode frame_mode;
- bool ecc_enable;
- bool if_enable;
-};
-
-#define CSI2_PAD_SINK 0
-#define CSI2_PAD_SOURCE 1
-#define CSI2_PADS_NUM 2
-
-#define CSI2_OUTPUT_IPIPEIF (1 << 0)
-#define CSI2_OUTPUT_MEMORY (1 << 1)
-
-struct iss_csi2_device {
- struct v4l2_subdev subdev;
- struct media_pad pads[CSI2_PADS_NUM];
- struct v4l2_mbus_framefmt formats[CSI2_PADS_NUM];
-
- struct iss_video video_out;
- struct iss_device *iss;
-
- u8 available; /* Is the IP present on the silicon? */
-
- /* memory resources, as defined in enum iss_mem_resources */
- unsigned int regs1;
- unsigned int regs2;
- /* ISP subclock, as defined in enum iss_isp_subclk_resource */
- unsigned int subclk;
-
- u32 output; /* output to IPIPEIF, memory or both? */
- bool dpcm_decompress;
- unsigned int frame_skip;
-
- struct iss_csiphy *phy;
- struct iss_csi2_ctx_cfg contexts[ISS_CSI2_MAX_CTX_NUM + 1];
- struct iss_csi2_timing_cfg timing[2];
- struct iss_csi2_ctrl_cfg ctrl;
- enum iss_pipeline_stream_state state;
- wait_queue_head_t wait;
- atomic_t stopping;
-};
-
-void omap4iss_csi2_isr(struct iss_csi2_device *csi2);
-int omap4iss_csi2_reset(struct iss_csi2_device *csi2);
-int omap4iss_csi2_init(struct iss_device *iss);
-void omap4iss_csi2_cleanup(struct iss_device *iss);
-void omap4iss_csi2_unregister_entities(struct iss_csi2_device *csi2);
-int omap4iss_csi2_register_entities(struct iss_csi2_device *csi2,
- struct v4l2_device *vdev);
-#endif /* OMAP4_ISS_CSI2_H */
diff --git a/drivers/staging/media/omap4iss/iss_csiphy.c b/drivers/staging/media/omap4iss/iss_csiphy.c
deleted file mode 100644
index 748607f8918f..000000000000
--- a/drivers/staging/media/omap4iss/iss_csiphy.c
+++ /dev/null
@@ -1,281 +0,0 @@
-/*
- * TI OMAP4 ISS V4L2 Driver - CSI PHY module
- *
- * Copyright (C) 2012 Texas Instruments, Inc.
- *
- * Author: Sergio Aguirre <sergio.a.aguirre@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-#include <linux/delay.h>
-#include <linux/device.h>
-#include <linux/regmap.h>
-
-#include "../../../../arch/arm/mach-omap2/control.h"
-
-#include "iss.h"
-#include "iss_regs.h"
-#include "iss_csiphy.h"
-
-/*
- * csiphy_lanes_config - Configuration of CSIPHY lanes.
- *
- * Updates HW configuration.
- * Called with phy->mutex taken.
- */
-static void csiphy_lanes_config(struct iss_csiphy *phy)
-{
- unsigned int i;
- u32 reg;
-
- reg = iss_reg_read(phy->iss, phy->cfg_regs, CSI2_COMPLEXIO_CFG);
-
- for (i = 0; i < phy->max_data_lanes; i++) {
- reg &= ~(CSI2_COMPLEXIO_CFG_DATA_POL(i + 1) |
- CSI2_COMPLEXIO_CFG_DATA_POSITION_MASK(i + 1));
- reg |= (phy->lanes.data[i].pol ?
- CSI2_COMPLEXIO_CFG_DATA_POL(i + 1) : 0);
- reg |= (phy->lanes.data[i].pos <<
- CSI2_COMPLEXIO_CFG_DATA_POSITION_SHIFT(i + 1));
- }
-
- reg &= ~(CSI2_COMPLEXIO_CFG_CLOCK_POL |
- CSI2_COMPLEXIO_CFG_CLOCK_POSITION_MASK);
- reg |= phy->lanes.clk.pol ? CSI2_COMPLEXIO_CFG_CLOCK_POL : 0;
- reg |= phy->lanes.clk.pos << CSI2_COMPLEXIO_CFG_CLOCK_POSITION_SHIFT;
-
- iss_reg_write(phy->iss, phy->cfg_regs, CSI2_COMPLEXIO_CFG, reg);
-}
-
-/*
- * csiphy_set_power
- * @power: Power state to be set.
- *
- * Returns 0 if successful, or -EBUSY if the retry count is exceeded.
- */
-static int csiphy_set_power(struct iss_csiphy *phy, u32 power)
-{
- u32 reg;
- u8 retry_count;
-
- iss_reg_update(phy->iss, phy->cfg_regs, CSI2_COMPLEXIO_CFG,
- CSI2_COMPLEXIO_CFG_PWD_CMD_MASK,
- power | CSI2_COMPLEXIO_CFG_PWR_AUTO);
-
- retry_count = 0;
- do {
- udelay(1);
- reg = iss_reg_read(phy->iss, phy->cfg_regs, CSI2_COMPLEXIO_CFG)
- & CSI2_COMPLEXIO_CFG_PWD_STATUS_MASK;
-
- if (reg != power >> 2)
- retry_count++;
-
- } while ((reg != power >> 2) && (retry_count < 250));
-
- if (retry_count == 250) {
- dev_err(phy->iss->dev, "CSI2 CIO set power failed!\n");
- return -EBUSY;
- }
-
- return 0;
-}
-
-/*
- * csiphy_dphy_config - Configure CSI2 D-PHY parameters.
- *
- * Called with phy->mutex taken.
- */
-static void csiphy_dphy_config(struct iss_csiphy *phy)
-{
- u32 reg;
-
- /* Set up REGISTER0 */
- reg = phy->dphy.ths_term << REGISTER0_THS_TERM_SHIFT;
- reg |= phy->dphy.ths_settle << REGISTER0_THS_SETTLE_SHIFT;
-
- iss_reg_write(phy->iss, phy->phy_regs, REGISTER0, reg);
-
- /* Set up REGISTER1 */
- reg = phy->dphy.tclk_term << REGISTER1_TCLK_TERM_SHIFT;
- reg |= phy->dphy.tclk_miss << REGISTER1_CTRLCLK_DIV_FACTOR_SHIFT;
- reg |= phy->dphy.tclk_settle << REGISTER1_TCLK_SETTLE_SHIFT;
- reg |= 0xb8 << REGISTER1_DPHY_HS_SYNC_PATTERN_SHIFT;
-
- iss_reg_write(phy->iss, phy->phy_regs, REGISTER1, reg);
-}
-
-/*
- * TCLK values are OK at their reset values
- */
-#define TCLK_TERM 0
-#define TCLK_MISS 1
-#define TCLK_SETTLE 14
-
-int omap4iss_csiphy_config(struct iss_device *iss,
- struct v4l2_subdev *csi2_subdev)
-{
- struct iss_csi2_device *csi2 = v4l2_get_subdevdata(csi2_subdev);
- struct iss_pipeline *pipe = to_iss_pipeline(&csi2_subdev->entity);
- struct iss_v4l2_subdevs_group *subdevs = pipe->external->host_priv;
- struct iss_csiphy_dphy_cfg csi2phy;
- int csi2_ddrclk_khz;
- struct iss_csiphy_lanes_cfg *lanes;
- unsigned int used_lanes = 0;
- u32 cam_rx_ctrl;
- unsigned int i;
-
- lanes = &subdevs->bus.csi2.lanecfg;
-
- /*
- * SCM.CONTROL_CAMERA_RX
- * - bit [31] : CSIPHY2 lane 2 enable (4460+ only)
- * - bit [30:29] : CSIPHY2 per-lane enable (1 to 0)
- * - bit [28:24] : CSIPHY1 per-lane enable (4 to 0)
- * - bit [21] : CSIPHY2 CTRLCLK enable
- * - bit [20:19] : CSIPHY2 config: 00 d-phy, 01/10 ccp2
- * - bit [18] : CSIPHY1 CTRLCLK enable
- * - bit [17:16] : CSIPHY1 config: 00 d-phy, 01/10 ccp2
- */
- /*
- * TODO: When implementing DT support specify the CONTROL_CAMERA_RX
- * register offset in the syscon property instead of hardcoding it.
- */
- regmap_read(iss->syscon, 0x68, &cam_rx_ctrl);
-
- if (subdevs->interface == ISS_INTERFACE_CSI2A_PHY1) {
- cam_rx_ctrl &= ~(OMAP4_CAMERARX_CSI21_LANEENABLE_MASK |
- OMAP4_CAMERARX_CSI21_CAMMODE_MASK);
- /* NOTE: Leave CSIPHY1 config to 0x0: D-PHY mode */
- /* Enable all lanes for now */
- cam_rx_ctrl |=
- 0x1f << OMAP4_CAMERARX_CSI21_LANEENABLE_SHIFT;
- /* Enable CTRLCLK */
- cam_rx_ctrl |= OMAP4_CAMERARX_CSI21_CTRLCLKEN_MASK;
- }
-
- if (subdevs->interface == ISS_INTERFACE_CSI2B_PHY2) {
- cam_rx_ctrl &= ~(OMAP4_CAMERARX_CSI22_LANEENABLE_MASK |
- OMAP4_CAMERARX_CSI22_CAMMODE_MASK);
- /* NOTE: Leave CSIPHY2 config to 0x0: D-PHY mode */
- /* Enable all lanes for now */
- cam_rx_ctrl |=
- 0x3 << OMAP4_CAMERARX_CSI22_LANEENABLE_SHIFT;
- /* Enable CTRLCLK */
- cam_rx_ctrl |= OMAP4_CAMERARX_CSI22_CTRLCLKEN_MASK;
- }
-
- regmap_write(iss->syscon, 0x68, cam_rx_ctrl);
-
- /* Reset used lane count */
- csi2->phy->used_data_lanes = 0;
-
- /* Clock and data lanes verification */
- for (i = 0; i < csi2->phy->max_data_lanes; i++) {
- if (lanes->data[i].pos == 0)
- continue;
-
- if (lanes->data[i].pol > 1 ||
- lanes->data[i].pos > (csi2->phy->max_data_lanes + 1))
- return -EINVAL;
-
- if (used_lanes & (1 << lanes->data[i].pos))
- return -EINVAL;
-
- used_lanes |= 1 << lanes->data[i].pos;
- csi2->phy->used_data_lanes++;
- }
-
- if (lanes->clk.pol > 1 ||
- lanes->clk.pos > (csi2->phy->max_data_lanes + 1))
- return -EINVAL;
-
- if (lanes->clk.pos == 0 || used_lanes & (1 << lanes->clk.pos))
- return -EINVAL;
-
- csi2_ddrclk_khz = pipe->external_rate / 1000
- / (2 * csi2->phy->used_data_lanes)
- * pipe->external_bpp;
-
- /*
- * THS_TERM: Programmed value = ceil(12.5 ns/DDRClk period) - 1.
- * THS_SETTLE: Programmed value = ceil(90 ns/DDRClk period) + 3.
- */
- csi2phy.ths_term = DIV_ROUND_UP(25 * csi2_ddrclk_khz, 2000000) - 1;
- csi2phy.ths_settle = DIV_ROUND_UP(90 * csi2_ddrclk_khz, 1000000) + 3;
- csi2phy.tclk_term = TCLK_TERM;
- csi2phy.tclk_miss = TCLK_MISS;
- csi2phy.tclk_settle = TCLK_SETTLE;
-
- mutex_lock(&csi2->phy->mutex);
- csi2->phy->dphy = csi2phy;
- csi2->phy->lanes = *lanes;
- mutex_unlock(&csi2->phy->mutex);
-
- return 0;
-}
-
-int omap4iss_csiphy_acquire(struct iss_csiphy *phy)
-{
- int rval;
-
- mutex_lock(&phy->mutex);
-
- rval = omap4iss_csi2_reset(phy->csi2);
- if (rval)
- goto done;
-
- csiphy_dphy_config(phy);
- csiphy_lanes_config(phy);
-
- rval = csiphy_set_power(phy, CSI2_COMPLEXIO_CFG_PWD_CMD_ON);
- if (rval)
- goto done;
-
- phy->phy_in_use = 1;
-
-done:
- mutex_unlock(&phy->mutex);
- return rval;
-}
-
-void omap4iss_csiphy_release(struct iss_csiphy *phy)
-{
- mutex_lock(&phy->mutex);
- if (phy->phy_in_use) {
- csiphy_set_power(phy, CSI2_COMPLEXIO_CFG_PWD_CMD_OFF);
- phy->phy_in_use = 0;
- }
- mutex_unlock(&phy->mutex);
-}
-
-/*
- * omap4iss_csiphy_init - Initialize the CSI PHY frontends
- */
-int omap4iss_csiphy_init(struct iss_device *iss)
-{
- struct iss_csiphy *phy1 = &iss->csiphy1;
- struct iss_csiphy *phy2 = &iss->csiphy2;
-
- phy1->iss = iss;
- phy1->csi2 = &iss->csi2a;
- phy1->max_data_lanes = ISS_CSIPHY1_NUM_DATA_LANES;
- phy1->used_data_lanes = 0;
- phy1->cfg_regs = OMAP4_ISS_MEM_CSI2_A_REGS1;
- phy1->phy_regs = OMAP4_ISS_MEM_CAMERARX_CORE1;
- mutex_init(&phy1->mutex);
-
- phy2->iss = iss;
- phy2->csi2 = &iss->csi2b;
- phy2->max_data_lanes = ISS_CSIPHY2_NUM_DATA_LANES;
- phy2->used_data_lanes = 0;
- phy2->cfg_regs = OMAP4_ISS_MEM_CSI2_B_REGS1;
- phy2->phy_regs = OMAP4_ISS_MEM_CAMERARX_CORE2;
- mutex_init(&phy2->mutex);
-
- return 0;
-}
diff --git a/drivers/staging/media/omap4iss/iss_csiphy.h b/drivers/staging/media/omap4iss/iss_csiphy.h
deleted file mode 100644
index e9ca43955654..000000000000
--- a/drivers/staging/media/omap4iss/iss_csiphy.h
+++ /dev/null
@@ -1,51 +0,0 @@
-/*
- * TI OMAP4 ISS V4L2 Driver - CSI PHY module
- *
- * Copyright (C) 2012 Texas Instruments, Inc.
- *
- * Author: Sergio Aguirre <sergio.a.aguirre@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-#ifndef OMAP4_ISS_CSI_PHY_H
-#define OMAP4_ISS_CSI_PHY_H
-
-#include <media/omap4iss.h>
-
-struct iss_csi2_device;
-
-struct iss_csiphy_dphy_cfg {
- u8 ths_term;
- u8 ths_settle;
- u8 tclk_term;
- unsigned tclk_miss:1;
- u8 tclk_settle;
-};
-
-struct iss_csiphy {
- struct iss_device *iss;
- struct mutex mutex; /* serialize csiphy configuration */
- u8 phy_in_use;
- struct iss_csi2_device *csi2;
-
- /* memory resources, as defined in enum iss_mem_resources */
- unsigned int cfg_regs;
- unsigned int phy_regs;
-
- u8 max_data_lanes; /* number of CSI2 Data Lanes supported */
- u8 used_data_lanes; /* number of CSI2 Data Lanes used */
- struct iss_csiphy_lanes_cfg lanes;
- struct iss_csiphy_dphy_cfg dphy;
-};
-
-int omap4iss_csiphy_config(struct iss_device *iss,
- struct v4l2_subdev *csi2_subdev);
-int omap4iss_csiphy_acquire(struct iss_csiphy *phy);
-void omap4iss_csiphy_release(struct iss_csiphy *phy);
-int omap4iss_csiphy_init(struct iss_device *iss);
-
-#endif /* OMAP4_ISS_CSI_PHY_H */
diff --git a/drivers/staging/media/omap4iss/iss_ipipe.c b/drivers/staging/media/omap4iss/iss_ipipe.c
deleted file mode 100644
index f94a59299a83..000000000000
--- a/drivers/staging/media/omap4iss/iss_ipipe.c
+++ /dev/null
@@ -1,578 +0,0 @@
-/*
- * TI OMAP4 ISS V4L2 Driver - ISP IPIPE module
- *
- * Copyright (C) 2012 Texas Instruments, Inc.
- *
- * Author: Sergio Aguirre <sergio.a.aguirre@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-#include <linux/module.h>
-#include <linux/uaccess.h>
-#include <linux/delay.h>
-#include <linux/device.h>
-#include <linux/dma-mapping.h>
-#include <linux/mm.h>
-#include <linux/sched.h>
-
-#include "iss.h"
-#include "iss_regs.h"
-#include "iss_ipipe.h"
-
-static struct v4l2_mbus_framefmt *
-__ipipe_get_format(struct iss_ipipe_device *ipipe,
- struct v4l2_subdev_pad_config *cfg,
- unsigned int pad,
- enum v4l2_subdev_format_whence which);
-
-static const unsigned int ipipe_fmts[] = {
- MEDIA_BUS_FMT_SGRBG10_1X10,
- MEDIA_BUS_FMT_SRGGB10_1X10,
- MEDIA_BUS_FMT_SBGGR10_1X10,
- MEDIA_BUS_FMT_SGBRG10_1X10,
-};
-
-/*
- * ipipe_print_status - Print current IPIPE Module register values.
- * @ipipe: Pointer to ISS ISP IPIPE device.
- *
- * Also prints other debug information stored in the IPIPE module.
- */
-#define IPIPE_PRINT_REGISTER(iss, name)\
- dev_dbg(iss->dev, "###IPIPE " #name "=0x%08x\n", \
- iss_reg_read(iss, OMAP4_ISS_MEM_ISP_IPIPE, IPIPE_##name))
-
-static void ipipe_print_status(struct iss_ipipe_device *ipipe)
-{
- struct iss_device *iss = to_iss_device(ipipe);
-
- dev_dbg(iss->dev, "-------------IPIPE Register dump-------------\n");
-
- IPIPE_PRINT_REGISTER(iss, SRC_EN);
- IPIPE_PRINT_REGISTER(iss, SRC_MODE);
- IPIPE_PRINT_REGISTER(iss, SRC_FMT);
- IPIPE_PRINT_REGISTER(iss, SRC_COL);
- IPIPE_PRINT_REGISTER(iss, SRC_VPS);
- IPIPE_PRINT_REGISTER(iss, SRC_VSZ);
- IPIPE_PRINT_REGISTER(iss, SRC_HPS);
- IPIPE_PRINT_REGISTER(iss, SRC_HSZ);
- IPIPE_PRINT_REGISTER(iss, GCK_MMR);
- IPIPE_PRINT_REGISTER(iss, YUV_PHS);
-
- dev_dbg(iss->dev, "-----------------------------------------------\n");
-}
-
-/*
- * ipipe_enable - Enable/Disable IPIPE.
- * @enable: enable flag
- *
- */
-static void ipipe_enable(struct iss_ipipe_device *ipipe, u8 enable)
-{
- struct iss_device *iss = to_iss_device(ipipe);
-
- iss_reg_update(iss, OMAP4_ISS_MEM_ISP_IPIPE, IPIPE_SRC_EN,
- IPIPE_SRC_EN_EN, enable ? IPIPE_SRC_EN_EN : 0);
-}
-
-/* -----------------------------------------------------------------------------
- * Format- and pipeline-related configuration helpers
- */
-
-static void ipipe_configure(struct iss_ipipe_device *ipipe)
-{
- struct iss_device *iss = to_iss_device(ipipe);
- struct v4l2_mbus_framefmt *format;
-
- /* IPIPE_PAD_SINK */
- format = &ipipe->formats[IPIPE_PAD_SINK];
-
- /* NOTE: Currently just supporting pipeline IN: RGB, OUT: YUV422 */
- iss_reg_write(iss, OMAP4_ISS_MEM_ISP_IPIPE, IPIPE_SRC_FMT,
- IPIPE_SRC_FMT_RAW2YUV);
-
- /* Enable YUV444 -> YUV422 conversion */
- iss_reg_write(iss, OMAP4_ISS_MEM_ISP_IPIPE, IPIPE_YUV_PHS,
- IPIPE_YUV_PHS_LPF);
-
- iss_reg_write(iss, OMAP4_ISS_MEM_ISP_IPIPE, IPIPE_SRC_VPS, 0);
- iss_reg_write(iss, OMAP4_ISS_MEM_ISP_IPIPE, IPIPE_SRC_HPS, 0);
- iss_reg_write(iss, OMAP4_ISS_MEM_ISP_IPIPE, IPIPE_SRC_VSZ,
- (format->height - 2) & IPIPE_SRC_VSZ_MASK);
- iss_reg_write(iss, OMAP4_ISS_MEM_ISP_IPIPE, IPIPE_SRC_HSZ,
- (format->width - 1) & IPIPE_SRC_HSZ_MASK);
-
- /* Ignore ipipeif_wrt signal, and operate on-the-fly. */
- iss_reg_clr(iss, OMAP4_ISS_MEM_ISP_IPIPE, IPIPE_SRC_MODE,
- IPIPE_SRC_MODE_WRT | IPIPE_SRC_MODE_OST);
-
- /* HACK: Values tuned for Ducati SW (OV) */
- iss_reg_write(iss, OMAP4_ISS_MEM_ISP_IPIPE, IPIPE_SRC_COL,
- IPIPE_SRC_COL_EE_B | IPIPE_SRC_COL_EO_GB |
- IPIPE_SRC_COL_OE_GR | IPIPE_SRC_COL_OO_R);
-
- /* IPIPE_PAD_SOURCE_VP */
- format = &ipipe->formats[IPIPE_PAD_SOURCE_VP];
- /* Do nothing? */
-}
-
-/* -----------------------------------------------------------------------------
- * V4L2 subdev operations
- */
-
-/*
- * ipipe_set_stream - Enable/Disable streaming on the IPIPE module
- * @sd: ISP IPIPE V4L2 subdevice
- * @enable: Enable/disable stream
- */
-static int ipipe_set_stream(struct v4l2_subdev *sd, int enable)
-{
- struct iss_ipipe_device *ipipe = v4l2_get_subdevdata(sd);
- struct iss_device *iss = to_iss_device(ipipe);
- int ret = 0;
-
- if (ipipe->state == ISS_PIPELINE_STREAM_STOPPED) {
- if (enable == ISS_PIPELINE_STREAM_STOPPED)
- return 0;
-
- omap4iss_isp_subclk_enable(iss, OMAP4_ISS_ISP_SUBCLK_IPIPE);
-
- /* Enable clk_arm_g0 */
- iss_reg_write(iss, OMAP4_ISS_MEM_ISP_IPIPE, IPIPE_GCK_MMR,
- IPIPE_GCK_MMR_REG);
-
- /* Enable clk_pix_g[3:0] */
- iss_reg_write(iss, OMAP4_ISS_MEM_ISP_IPIPE, IPIPE_GCK_PIX,
- IPIPE_GCK_PIX_G3 | IPIPE_GCK_PIX_G2 |
- IPIPE_GCK_PIX_G1 | IPIPE_GCK_PIX_G0);
- }
-
- switch (enable) {
- case ISS_PIPELINE_STREAM_CONTINUOUS:
-
- ipipe_configure(ipipe);
- ipipe_print_status(ipipe);
-
- atomic_set(&ipipe->stopping, 0);
- ipipe_enable(ipipe, 1);
- break;
-
- case ISS_PIPELINE_STREAM_STOPPED:
- if (ipipe->state == ISS_PIPELINE_STREAM_STOPPED)
- return 0;
- if (omap4iss_module_sync_idle(&sd->entity, &ipipe->wait,
- &ipipe->stopping))
- ret = -ETIMEDOUT;
-
- ipipe_enable(ipipe, 0);
- omap4iss_isp_subclk_disable(iss, OMAP4_ISS_ISP_SUBCLK_IPIPE);
- break;
- }
-
- ipipe->state = enable;
- return ret;
-}
-
-static struct v4l2_mbus_framefmt *
-__ipipe_get_format(struct iss_ipipe_device *ipipe,
- struct v4l2_subdev_pad_config *cfg,
- unsigned int pad,
- enum v4l2_subdev_format_whence which)
-{
- if (which == V4L2_SUBDEV_FORMAT_TRY)
- return v4l2_subdev_get_try_format(&ipipe->subdev, cfg, pad);
-
- return &ipipe->formats[pad];
-}
-
-/*
- * ipipe_try_format - Try video format on a pad
- * @ipipe: ISS IPIPE device
- * @cfg: V4L2 subdev pad config
- * @pad: Pad number
- * @fmt: Format
- */
-static void
-ipipe_try_format(struct iss_ipipe_device *ipipe,
- struct v4l2_subdev_pad_config *cfg,
- unsigned int pad,
- struct v4l2_mbus_framefmt *fmt,
- enum v4l2_subdev_format_whence which)
-{
- struct v4l2_mbus_framefmt *format;
- unsigned int width = fmt->width;
- unsigned int height = fmt->height;
- unsigned int i;
-
- switch (pad) {
- case IPIPE_PAD_SINK:
- for (i = 0; i < ARRAY_SIZE(ipipe_fmts); i++) {
- if (fmt->code == ipipe_fmts[i])
- break;
- }
-
- /* If not found, use SGRBG10 as default */
- if (i >= ARRAY_SIZE(ipipe_fmts))
- fmt->code = MEDIA_BUS_FMT_SGRBG10_1X10;
-
- /* Clamp the input size. */
- fmt->width = clamp_t(u32, width, 1, 8192);
- fmt->height = clamp_t(u32, height, 1, 8192);
- fmt->colorspace = V4L2_COLORSPACE_SRGB;
- break;
-
- case IPIPE_PAD_SOURCE_VP:
- format = __ipipe_get_format(ipipe, cfg, IPIPE_PAD_SINK, which);
- memcpy(fmt, format, sizeof(*fmt));
-
- fmt->code = MEDIA_BUS_FMT_UYVY8_1X16;
- fmt->width = clamp_t(u32, width, 32, fmt->width);
- fmt->height = clamp_t(u32, height, 32, fmt->height);
- fmt->colorspace = V4L2_COLORSPACE_JPEG;
- break;
- }
-
- fmt->field = V4L2_FIELD_NONE;
-}
-
-/*
- * ipipe_enum_mbus_code - Handle pixel format enumeration
- * @sd : pointer to v4l2 subdev structure
- * @cfg : V4L2 subdev pad config
- * @code : pointer to v4l2_subdev_mbus_code_enum structure
- * return -EINVAL or zero on success
- */
-static int ipipe_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
- struct v4l2_subdev_mbus_code_enum *code)
-{
- switch (code->pad) {
- case IPIPE_PAD_SINK:
- if (code->index >= ARRAY_SIZE(ipipe_fmts))
- return -EINVAL;
-
- code->code = ipipe_fmts[code->index];
- break;
-
- case IPIPE_PAD_SOURCE_VP:
- /* FIXME: Forced format conversion inside IPIPE ? */
- if (code->index != 0)
- return -EINVAL;
-
- code->code = MEDIA_BUS_FMT_UYVY8_1X16;
- break;
-
- default:
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int ipipe_enum_frame_size(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
- struct v4l2_subdev_frame_size_enum *fse)
-{
- struct iss_ipipe_device *ipipe = v4l2_get_subdevdata(sd);
- struct v4l2_mbus_framefmt format;
-
- if (fse->index != 0)
- return -EINVAL;
-
- format.code = fse->code;
- format.width = 1;
- format.height = 1;
- ipipe_try_format(ipipe, cfg, fse->pad, &format, fse->which);
- fse->min_width = format.width;
- fse->min_height = format.height;
-
- if (format.code != fse->code)
- return -EINVAL;
-
- format.code = fse->code;
- format.width = -1;
- format.height = -1;
- ipipe_try_format(ipipe, cfg, fse->pad, &format, fse->which);
- fse->max_width = format.width;
- fse->max_height = format.height;
-
- return 0;
-}
-
-/*
- * ipipe_get_format - Retrieve the video format on a pad
- * @sd : ISP IPIPE V4L2 subdevice
- * @cfg: V4L2 subdev pad config
- * @fmt: Format
- *
- * Return 0 on success or -EINVAL if the pad is invalid or doesn't correspond
- * to the format type.
- */
-static int ipipe_get_format(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
- struct v4l2_subdev_format *fmt)
-{
- struct iss_ipipe_device *ipipe = v4l2_get_subdevdata(sd);
- struct v4l2_mbus_framefmt *format;
-
- format = __ipipe_get_format(ipipe, cfg, fmt->pad, fmt->which);
- if (format == NULL)
- return -EINVAL;
-
- fmt->format = *format;
- return 0;
-}
-
-/*
- * ipipe_set_format - Set the video format on a pad
- * @sd : ISP IPIPE V4L2 subdevice
- * @cfg: V4L2 subdev pad config
- * @fmt: Format
- *
- * Return 0 on success or -EINVAL if the pad is invalid or doesn't correspond
- * to the format type.
- */
-static int ipipe_set_format(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
- struct v4l2_subdev_format *fmt)
-{
- struct iss_ipipe_device *ipipe = v4l2_get_subdevdata(sd);
- struct v4l2_mbus_framefmt *format;
-
- format = __ipipe_get_format(ipipe, cfg, fmt->pad, fmt->which);
- if (format == NULL)
- return -EINVAL;
-
- ipipe_try_format(ipipe, cfg, fmt->pad, &fmt->format, fmt->which);
- *format = fmt->format;
-
- /* Propagate the format from sink to source */
- if (fmt->pad == IPIPE_PAD_SINK) {
- format = __ipipe_get_format(ipipe, cfg, IPIPE_PAD_SOURCE_VP,
- fmt->which);
- *format = fmt->format;
- ipipe_try_format(ipipe, cfg, IPIPE_PAD_SOURCE_VP, format,
- fmt->which);
- }
-
- return 0;
-}
-
-static int ipipe_link_validate(struct v4l2_subdev *sd, struct media_link *link,
- struct v4l2_subdev_format *source_fmt,
- struct v4l2_subdev_format *sink_fmt)
-{
- /* Check if the two ends match */
- if (source_fmt->format.width != sink_fmt->format.width ||
- source_fmt->format.height != sink_fmt->format.height)
- return -EPIPE;
-
- if (source_fmt->format.code != sink_fmt->format.code)
- return -EPIPE;
-
- return 0;
-}
-
-/*
- * ipipe_init_formats - Initialize formats on all pads
- * @sd: ISP IPIPE V4L2 subdevice
- * @fh: V4L2 subdev file handle
- *
- * Initialize all pad formats with default values. If fh is not NULL, try
- * formats are initialized on the file handle. Otherwise active formats are
- * initialized on the device.
- */
-static int ipipe_init_formats(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
-{
- struct v4l2_subdev_format format;
-
- memset(&format, 0, sizeof(format));
- format.pad = IPIPE_PAD_SINK;
- format.which = fh ? V4L2_SUBDEV_FORMAT_TRY : V4L2_SUBDEV_FORMAT_ACTIVE;
- format.format.code = MEDIA_BUS_FMT_SGRBG10_1X10;
- format.format.width = 4096;
- format.format.height = 4096;
- ipipe_set_format(sd, fh ? fh->pad : NULL, &format);
-
- return 0;
-}
-
-/* V4L2 subdev video operations */
-static const struct v4l2_subdev_video_ops ipipe_v4l2_video_ops = {
- .s_stream = ipipe_set_stream,
-};
-
-/* V4L2 subdev pad operations */
-static const struct v4l2_subdev_pad_ops ipipe_v4l2_pad_ops = {
- .enum_mbus_code = ipipe_enum_mbus_code,
- .enum_frame_size = ipipe_enum_frame_size,
- .get_fmt = ipipe_get_format,
- .set_fmt = ipipe_set_format,
- .link_validate = ipipe_link_validate,
-};
-
-/* V4L2 subdev operations */
-static const struct v4l2_subdev_ops ipipe_v4l2_ops = {
- .video = &ipipe_v4l2_video_ops,
- .pad = &ipipe_v4l2_pad_ops,
-};
-
-/* V4L2 subdev internal operations */
-static const struct v4l2_subdev_internal_ops ipipe_v4l2_internal_ops = {
- .open = ipipe_init_formats,
-};
-
-/* -----------------------------------------------------------------------------
- * Media entity operations
- */
-
-/*
- * ipipe_link_setup - Setup IPIPE connections
- * @entity: IPIPE media entity
- * @local: Pad at the local end of the link
- * @remote: Pad at the remote end of the link
- * @flags: Link flags
- *
- * return -EINVAL or zero on success
- */
-static int ipipe_link_setup(struct media_entity *entity,
- const struct media_pad *local,
- const struct media_pad *remote, u32 flags)
-{
- struct v4l2_subdev *sd = media_entity_to_v4l2_subdev(entity);
- struct iss_ipipe_device *ipipe = v4l2_get_subdevdata(sd);
- struct iss_device *iss = to_iss_device(ipipe);
-
- switch (local->index | media_entity_type(remote->entity)) {
- case IPIPE_PAD_SINK | MEDIA_ENT_T_V4L2_SUBDEV:
- /* Read from IPIPEIF. */
- if (!(flags & MEDIA_LNK_FL_ENABLED)) {
- ipipe->input = IPIPE_INPUT_NONE;
- break;
- }
-
- if (ipipe->input != IPIPE_INPUT_NONE)
- return -EBUSY;
-
- if (remote->entity == &iss->ipipeif.subdev.entity)
- ipipe->input = IPIPE_INPUT_IPIPEIF;
-
- break;
-
- case IPIPE_PAD_SOURCE_VP | MEDIA_ENT_T_V4L2_SUBDEV:
- /* Send to RESIZER */
- if (flags & MEDIA_LNK_FL_ENABLED) {
- if (ipipe->output & ~IPIPE_OUTPUT_VP)
- return -EBUSY;
- ipipe->output |= IPIPE_OUTPUT_VP;
- } else {
- ipipe->output &= ~IPIPE_OUTPUT_VP;
- }
- break;
-
- default:
- return -EINVAL;
- }
-
- return 0;
-}
-
-/* media operations */
-static const struct media_entity_operations ipipe_media_ops = {
- .link_setup = ipipe_link_setup,
- .link_validate = v4l2_subdev_link_validate,
-};
-
-/*
- * ipipe_init_entities - Initialize V4L2 subdev and media entity
- * @ipipe: ISS ISP IPIPE module
- *
- * Return 0 on success and a negative error code on failure.
- */
-static int ipipe_init_entities(struct iss_ipipe_device *ipipe)
-{
- struct v4l2_subdev *sd = &ipipe->subdev;
- struct media_pad *pads = ipipe->pads;
- struct media_entity *me = &sd->entity;
- int ret;
-
- ipipe->input = IPIPE_INPUT_NONE;
-
- v4l2_subdev_init(sd, &ipipe_v4l2_ops);
- sd->internal_ops = &ipipe_v4l2_internal_ops;
- strlcpy(sd->name, "OMAP4 ISS ISP IPIPE", sizeof(sd->name));
- sd->grp_id = 1 << 16; /* group ID for iss subdevs */
- v4l2_set_subdevdata(sd, ipipe);
- sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
-
- pads[IPIPE_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
- pads[IPIPE_PAD_SOURCE_VP].flags = MEDIA_PAD_FL_SOURCE;
-
- me->ops = &ipipe_media_ops;
- ret = media_entity_init(me, IPIPE_PADS_NUM, pads, 0);
- if (ret < 0)
- return ret;
-
- ipipe_init_formats(sd, NULL);
-
- return 0;
-}
-
-void omap4iss_ipipe_unregister_entities(struct iss_ipipe_device *ipipe)
-{
- v4l2_device_unregister_subdev(&ipipe->subdev);
-}
-
-int omap4iss_ipipe_register_entities(struct iss_ipipe_device *ipipe,
- struct v4l2_device *vdev)
-{
- int ret;
-
- /* Register the subdev and video node. */
- ret = v4l2_device_register_subdev(vdev, &ipipe->subdev);
- if (ret < 0)
- goto error;
-
- return 0;
-
-error:
- omap4iss_ipipe_unregister_entities(ipipe);
- return ret;
-}
-
-/* -----------------------------------------------------------------------------
- * ISP IPIPE initialisation and cleanup
- */
-
-/*
- * omap4iss_ipipe_init - IPIPE module initialization.
- * @iss: Device pointer specific to the OMAP4 ISS.
- *
- * TODO: Get the initialisation values from platform data.
- *
- * Return 0 on success or a negative error code otherwise.
- */
-int omap4iss_ipipe_init(struct iss_device *iss)
-{
- struct iss_ipipe_device *ipipe = &iss->ipipe;
-
- ipipe->state = ISS_PIPELINE_STREAM_STOPPED;
- init_waitqueue_head(&ipipe->wait);
-
- return ipipe_init_entities(ipipe);
-}
-
-/*
- * omap4iss_ipipe_cleanup - IPIPE module cleanup.
- * @iss: Device pointer specific to the OMAP4 ISS.
- */
-void omap4iss_ipipe_cleanup(struct iss_device *iss)
-{
- struct iss_ipipe_device *ipipe = &iss->ipipe;
-
- media_entity_cleanup(&ipipe->subdev.entity);
-}
diff --git a/drivers/staging/media/omap4iss/iss_ipipe.h b/drivers/staging/media/omap4iss/iss_ipipe.h
deleted file mode 100644
index c22d9041f2a5..000000000000
--- a/drivers/staging/media/omap4iss/iss_ipipe.h
+++ /dev/null
@@ -1,67 +0,0 @@
-/*
- * TI OMAP4 ISS V4L2 Driver - ISP IPIPE module
- *
- * Copyright (C) 2012 Texas Instruments, Inc.
- *
- * Author: Sergio Aguirre <sergio.a.aguirre@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-#ifndef OMAP4_ISS_IPIPE_H
-#define OMAP4_ISS_IPIPE_H
-
-#include "iss_video.h"
-
-enum ipipe_input_entity {
- IPIPE_INPUT_NONE,
- IPIPE_INPUT_IPIPEIF,
-};
-
-#define IPIPE_OUTPUT_VP (1 << 0)
-
-/* Sink and source IPIPE pads */
-#define IPIPE_PAD_SINK 0
-#define IPIPE_PAD_SOURCE_VP 1
-#define IPIPE_PADS_NUM 2
-
-/*
- * struct iss_ipipe_device - Structure for the IPIPE module to store its own
- * information
- * @subdev: V4L2 subdevice
- * @pads: Sink and source media entity pads
- * @formats: Active video formats
- * @input: Active input
- * @output: Active outputs
- * @error: A hardware error occurred during capture
- * @state: Streaming state
- * @wait: Wait queue used to stop the module
- * @stopping: Stopping state
- */
-struct iss_ipipe_device {
- struct v4l2_subdev subdev;
- struct media_pad pads[IPIPE_PADS_NUM];
- struct v4l2_mbus_framefmt formats[IPIPE_PADS_NUM];
-
- enum ipipe_input_entity input;
- unsigned int output;
- unsigned int error;
-
- enum iss_pipeline_stream_state state;
- wait_queue_head_t wait;
- atomic_t stopping;
-};
-
-struct iss_device;
-
-int omap4iss_ipipe_register_entities(struct iss_ipipe_device *ipipe,
- struct v4l2_device *vdev);
-void omap4iss_ipipe_unregister_entities(struct iss_ipipe_device *ipipe);
-
-int omap4iss_ipipe_init(struct iss_device *iss);
-void omap4iss_ipipe_cleanup(struct iss_device *iss);
-
-#endif /* OMAP4_ISS_IPIPE_H */
diff --git a/drivers/staging/media/omap4iss/iss_ipipeif.c b/drivers/staging/media/omap4iss/iss_ipipeif.c
deleted file mode 100644
index c0da13d55865..000000000000
--- a/drivers/staging/media/omap4iss/iss_ipipeif.c
+++ /dev/null
@@ -1,832 +0,0 @@
-/*
- * TI OMAP4 ISS V4L2 Driver - ISP IPIPEIF module
- *
- * Copyright (C) 2012 Texas Instruments, Inc.
- *
- * Author: Sergio Aguirre <sergio.a.aguirre@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-#include <linux/module.h>
-#include <linux/uaccess.h>
-#include <linux/delay.h>
-#include <linux/device.h>
-#include <linux/dma-mapping.h>
-#include <linux/mm.h>
-#include <linux/sched.h>
-
-#include "iss.h"
-#include "iss_regs.h"
-#include "iss_ipipeif.h"
-
-static const unsigned int ipipeif_fmts[] = {
- MEDIA_BUS_FMT_SGRBG10_1X10,
- MEDIA_BUS_FMT_SRGGB10_1X10,
- MEDIA_BUS_FMT_SBGGR10_1X10,
- MEDIA_BUS_FMT_SGBRG10_1X10,
- MEDIA_BUS_FMT_UYVY8_1X16,
- MEDIA_BUS_FMT_YUYV8_1X16,
-};
-
-/*
- * ipipeif_print_status - Print current IPIPEIF Module register values.
- * @ipipeif: Pointer to ISS ISP IPIPEIF device.
- *
- * Also prints other debug information stored in the IPIPEIF module.
- */
-#define IPIPEIF_PRINT_REGISTER(iss, name)\
- dev_dbg(iss->dev, "###IPIPEIF " #name "=0x%08x\n", \
- iss_reg_read(iss, OMAP4_ISS_MEM_ISP_IPIPEIF, IPIPEIF_##name))
-
-#define ISIF_PRINT_REGISTER(iss, name)\
- dev_dbg(iss->dev, "###ISIF " #name "=0x%08x\n", \
- iss_reg_read(iss, OMAP4_ISS_MEM_ISP_ISIF, ISIF_##name))
-
-#define ISP5_PRINT_REGISTER(iss, name)\
- dev_dbg(iss->dev, "###ISP5 " #name "=0x%08x\n", \
- iss_reg_read(iss, OMAP4_ISS_MEM_ISP_SYS1, ISP5_##name))
-
-static void ipipeif_print_status(struct iss_ipipeif_device *ipipeif)
-{
- struct iss_device *iss = to_iss_device(ipipeif);
-
- dev_dbg(iss->dev, "-------------IPIPEIF Register dump-------------\n");
-
- IPIPEIF_PRINT_REGISTER(iss, CFG1);
- IPIPEIF_PRINT_REGISTER(iss, CFG2);
-
- ISIF_PRINT_REGISTER(iss, SYNCEN);
- ISIF_PRINT_REGISTER(iss, CADU);
- ISIF_PRINT_REGISTER(iss, CADL);
- ISIF_PRINT_REGISTER(iss, MODESET);
- ISIF_PRINT_REGISTER(iss, CCOLP);
- ISIF_PRINT_REGISTER(iss, SPH);
- ISIF_PRINT_REGISTER(iss, LNH);
- ISIF_PRINT_REGISTER(iss, LNV);
- ISIF_PRINT_REGISTER(iss, VDINT(0));
- ISIF_PRINT_REGISTER(iss, HSIZE);
-
- ISP5_PRINT_REGISTER(iss, SYSCONFIG);
- ISP5_PRINT_REGISTER(iss, CTRL);
- ISP5_PRINT_REGISTER(iss, IRQSTATUS(0));
- ISP5_PRINT_REGISTER(iss, IRQENABLE_SET(0));
- ISP5_PRINT_REGISTER(iss, IRQENABLE_CLR(0));
-
- dev_dbg(iss->dev, "-----------------------------------------------\n");
-}
-
-static void ipipeif_write_enable(struct iss_ipipeif_device *ipipeif, u8 enable)
-{
- struct iss_device *iss = to_iss_device(ipipeif);
-
- iss_reg_update(iss, OMAP4_ISS_MEM_ISP_ISIF, ISIF_SYNCEN,
- ISIF_SYNCEN_DWEN, enable ? ISIF_SYNCEN_DWEN : 0);
-}
-
-/*
- * ipipeif_enable - Enable/Disable IPIPEIF.
- * @enable: enable flag
- *
- */
-static void ipipeif_enable(struct iss_ipipeif_device *ipipeif, u8 enable)
-{
- struct iss_device *iss = to_iss_device(ipipeif);
-
- iss_reg_update(iss, OMAP4_ISS_MEM_ISP_ISIF, ISIF_SYNCEN,
- ISIF_SYNCEN_SYEN, enable ? ISIF_SYNCEN_SYEN : 0);
-}
-
-/* -----------------------------------------------------------------------------
- * Format- and pipeline-related configuration helpers
- */
-
-/*
- * ipipeif_set_outaddr - Set memory address to save output image
- * @ipipeif: Pointer to ISP IPIPEIF device.
- * @addr: 32-bit memory address aligned on 32 byte boundary.
- *
- * Sets the memory address where the output will be saved.
- */
-static void ipipeif_set_outaddr(struct iss_ipipeif_device *ipipeif, u32 addr)
-{
- struct iss_device *iss = to_iss_device(ipipeif);
-
- /* Save address splitted in Base Address H & L */
- iss_reg_write(iss, OMAP4_ISS_MEM_ISP_ISIF, ISIF_CADU,
- (addr >> (16 + 5)) & ISIF_CADU_MASK);
- iss_reg_write(iss, OMAP4_ISS_MEM_ISP_ISIF, ISIF_CADL,
- (addr >> 5) & ISIF_CADL_MASK);
-}
-
-static void ipipeif_configure(struct iss_ipipeif_device *ipipeif)
-{
- struct iss_device *iss = to_iss_device(ipipeif);
- const struct iss_format_info *info;
- struct v4l2_mbus_framefmt *format;
- u32 isif_ccolp = 0;
-
- omap4iss_configure_bridge(iss, ipipeif->input);
-
- /* IPIPEIF_PAD_SINK */
- format = &ipipeif->formats[IPIPEIF_PAD_SINK];
-
- /* IPIPEIF with YUV422 input from ISIF */
- iss_reg_clr(iss, OMAP4_ISS_MEM_ISP_IPIPEIF, IPIPEIF_CFG1,
- IPIPEIF_CFG1_INPSRC1_MASK | IPIPEIF_CFG1_INPSRC2_MASK);
-
- /* Select ISIF/IPIPEIF input format */
- switch (format->code) {
- case MEDIA_BUS_FMT_UYVY8_1X16:
- case MEDIA_BUS_FMT_YUYV8_1X16:
- iss_reg_update(iss, OMAP4_ISS_MEM_ISP_ISIF, ISIF_MODESET,
- ISIF_MODESET_CCDMD | ISIF_MODESET_INPMOD_MASK |
- ISIF_MODESET_CCDW_MASK,
- ISIF_MODESET_INPMOD_YCBCR16);
-
- iss_reg_update(iss, OMAP4_ISS_MEM_ISP_IPIPEIF, IPIPEIF_CFG2,
- IPIPEIF_CFG2_YUV8, IPIPEIF_CFG2_YUV16);
-
- break;
- case MEDIA_BUS_FMT_SGRBG10_1X10:
- isif_ccolp = ISIF_CCOLP_CP0_F0_GR |
- ISIF_CCOLP_CP1_F0_R |
- ISIF_CCOLP_CP2_F0_B |
- ISIF_CCOLP_CP3_F0_GB;
- goto cont_raw;
- case MEDIA_BUS_FMT_SRGGB10_1X10:
- isif_ccolp = ISIF_CCOLP_CP0_F0_R |
- ISIF_CCOLP_CP1_F0_GR |
- ISIF_CCOLP_CP2_F0_GB |
- ISIF_CCOLP_CP3_F0_B;
- goto cont_raw;
- case MEDIA_BUS_FMT_SBGGR10_1X10:
- isif_ccolp = ISIF_CCOLP_CP0_F0_B |
- ISIF_CCOLP_CP1_F0_GB |
- ISIF_CCOLP_CP2_F0_GR |
- ISIF_CCOLP_CP3_F0_R;
- goto cont_raw;
- case MEDIA_BUS_FMT_SGBRG10_1X10:
- isif_ccolp = ISIF_CCOLP_CP0_F0_GB |
- ISIF_CCOLP_CP1_F0_B |
- ISIF_CCOLP_CP2_F0_R |
- ISIF_CCOLP_CP3_F0_GR;
-cont_raw:
- iss_reg_clr(iss, OMAP4_ISS_MEM_ISP_IPIPEIF, IPIPEIF_CFG2,
- IPIPEIF_CFG2_YUV16);
-
- iss_reg_update(iss, OMAP4_ISS_MEM_ISP_ISIF, ISIF_MODESET,
- ISIF_MODESET_CCDMD | ISIF_MODESET_INPMOD_MASK |
- ISIF_MODESET_CCDW_MASK, ISIF_MODESET_INPMOD_RAW |
- ISIF_MODESET_CCDW_2BIT);
-
- info = omap4iss_video_format_info(format->code);
- iss_reg_update(iss, OMAP4_ISS_MEM_ISP_ISIF, ISIF_CGAMMAWD,
- ISIF_CGAMMAWD_GWDI_MASK,
- ISIF_CGAMMAWD_GWDI(info->bpp));
-
- /* Set RAW Bayer pattern */
- iss_reg_write(iss, OMAP4_ISS_MEM_ISP_ISIF, ISIF_CCOLP,
- isif_ccolp);
- break;
- }
-
- iss_reg_write(iss, OMAP4_ISS_MEM_ISP_ISIF, ISIF_SPH, 0 & ISIF_SPH_MASK);
- iss_reg_write(iss, OMAP4_ISS_MEM_ISP_ISIF, ISIF_LNH,
- (format->width - 1) & ISIF_LNH_MASK);
- iss_reg_write(iss, OMAP4_ISS_MEM_ISP_ISIF, ISIF_LNV,
- (format->height - 1) & ISIF_LNV_MASK);
-
- /* Generate ISIF0 on the last line of the image */
- iss_reg_write(iss, OMAP4_ISS_MEM_ISP_ISIF, ISIF_VDINT(0),
- format->height - 1);
-
- /* IPIPEIF_PAD_SOURCE_ISIF_SF */
- format = &ipipeif->formats[IPIPEIF_PAD_SOURCE_ISIF_SF];
-
- iss_reg_write(iss, OMAP4_ISS_MEM_ISP_ISIF, ISIF_HSIZE,
- (ipipeif->video_out.bpl_value >> 5) &
- ISIF_HSIZE_HSIZE_MASK);
-
- /* IPIPEIF_PAD_SOURCE_VP */
- /* Do nothing? */
-}
-
-/* -----------------------------------------------------------------------------
- * Interrupt handling
- */
-
-static void ipipeif_isr_buffer(struct iss_ipipeif_device *ipipeif)
-{
- struct iss_buffer *buffer;
-
- /* The ISIF generates VD0 interrupts even when writes are disabled.
- * deal with it anyway). Disabling the ISIF when no buffer is available
- * is thus not be enough, we need to handle the situation explicitly.
- */
- if (list_empty(&ipipeif->video_out.dmaqueue))
- return;
-
- ipipeif_write_enable(ipipeif, 0);
-
- buffer = omap4iss_video_buffer_next(&ipipeif->video_out);
- if (buffer == NULL)
- return;
-
- ipipeif_set_outaddr(ipipeif, buffer->iss_addr);
-
- ipipeif_write_enable(ipipeif, 1);
-}
-
-/*
- * omap4iss_ipipeif_isr - Configure ipipeif during interframe time.
- * @ipipeif: Pointer to ISP IPIPEIF device.
- * @events: IPIPEIF events
- */
-void omap4iss_ipipeif_isr(struct iss_ipipeif_device *ipipeif, u32 events)
-{
- if (omap4iss_module_sync_is_stopping(&ipipeif->wait,
- &ipipeif->stopping))
- return;
-
- if ((events & ISP5_IRQ_ISIF_INT(0)) &&
- (ipipeif->output & IPIPEIF_OUTPUT_MEMORY))
- ipipeif_isr_buffer(ipipeif);
-}
-
-/* -----------------------------------------------------------------------------
- * ISP video operations
- */
-
-static int ipipeif_video_queue(struct iss_video *video,
- struct iss_buffer *buffer)
-{
- struct iss_ipipeif_device *ipipeif = container_of(video,
- struct iss_ipipeif_device, video_out);
-
- if (!(ipipeif->output & IPIPEIF_OUTPUT_MEMORY))
- return -ENODEV;
-
- ipipeif_set_outaddr(ipipeif, buffer->iss_addr);
-
- /*
- * If streaming was enabled before there was a buffer queued
- * or underrun happened in the ISR, the hardware was not enabled
- * and DMA queue flag ISS_VIDEO_DMAQUEUE_UNDERRUN is still set.
- * Enable it now.
- */
- if (video->dmaqueue_flags & ISS_VIDEO_DMAQUEUE_UNDERRUN) {
- if (ipipeif->output & IPIPEIF_OUTPUT_MEMORY)
- ipipeif_write_enable(ipipeif, 1);
- ipipeif_enable(ipipeif, 1);
- iss_video_dmaqueue_flags_clr(video);
- }
-
- return 0;
-}
-
-static const struct iss_video_operations ipipeif_video_ops = {
- .queue = ipipeif_video_queue,
-};
-
-/* -----------------------------------------------------------------------------
- * V4L2 subdev operations
- */
-
-#define IPIPEIF_DRV_SUBCLK_MASK (OMAP4_ISS_ISP_SUBCLK_IPIPEIF |\
- OMAP4_ISS_ISP_SUBCLK_ISIF)
-/*
- * ipipeif_set_stream - Enable/Disable streaming on the IPIPEIF module
- * @sd: ISP IPIPEIF V4L2 subdevice
- * @enable: Enable/disable stream
- */
-static int ipipeif_set_stream(struct v4l2_subdev *sd, int enable)
-{
- struct iss_ipipeif_device *ipipeif = v4l2_get_subdevdata(sd);
- struct iss_device *iss = to_iss_device(ipipeif);
- struct iss_video *video_out = &ipipeif->video_out;
- int ret = 0;
-
- if (ipipeif->state == ISS_PIPELINE_STREAM_STOPPED) {
- if (enable == ISS_PIPELINE_STREAM_STOPPED)
- return 0;
-
- omap4iss_isp_subclk_enable(iss, IPIPEIF_DRV_SUBCLK_MASK);
- }
-
- switch (enable) {
- case ISS_PIPELINE_STREAM_CONTINUOUS:
-
- ipipeif_configure(ipipeif);
- ipipeif_print_status(ipipeif);
-
- /*
- * When outputting to memory with no buffer available, let the
- * buffer queue handler start the hardware. A DMA queue flag
- * ISS_VIDEO_DMAQUEUE_QUEUED will be set as soon as there is
- * a buffer available.
- */
- if (ipipeif->output & IPIPEIF_OUTPUT_MEMORY &&
- !(video_out->dmaqueue_flags & ISS_VIDEO_DMAQUEUE_QUEUED))
- break;
-
- atomic_set(&ipipeif->stopping, 0);
- if (ipipeif->output & IPIPEIF_OUTPUT_MEMORY)
- ipipeif_write_enable(ipipeif, 1);
- ipipeif_enable(ipipeif, 1);
- iss_video_dmaqueue_flags_clr(video_out);
- break;
-
- case ISS_PIPELINE_STREAM_STOPPED:
- if (ipipeif->state == ISS_PIPELINE_STREAM_STOPPED)
- return 0;
- if (omap4iss_module_sync_idle(&sd->entity, &ipipeif->wait,
- &ipipeif->stopping))
- ret = -ETIMEDOUT;
-
- if (ipipeif->output & IPIPEIF_OUTPUT_MEMORY)
- ipipeif_write_enable(ipipeif, 0);
- ipipeif_enable(ipipeif, 0);
- omap4iss_isp_subclk_disable(iss, IPIPEIF_DRV_SUBCLK_MASK);
- iss_video_dmaqueue_flags_clr(video_out);
- break;
- }
-
- ipipeif->state = enable;
- return ret;
-}
-
-static struct v4l2_mbus_framefmt *
-__ipipeif_get_format(struct iss_ipipeif_device *ipipeif,
- struct v4l2_subdev_pad_config *cfg, unsigned int pad,
- enum v4l2_subdev_format_whence which)
-{
- if (which == V4L2_SUBDEV_FORMAT_TRY)
- return v4l2_subdev_get_try_format(&ipipeif->subdev, cfg, pad);
- return &ipipeif->formats[pad];
-}
-
-/*
- * ipipeif_try_format - Try video format on a pad
- * @ipipeif: ISS IPIPEIF device
- * @cfg: V4L2 subdev pad config
- * @pad: Pad number
- * @fmt: Format
- */
-static void
-ipipeif_try_format(struct iss_ipipeif_device *ipipeif,
- struct v4l2_subdev_pad_config *cfg, unsigned int pad,
- struct v4l2_mbus_framefmt *fmt,
- enum v4l2_subdev_format_whence which)
-{
- struct v4l2_mbus_framefmt *format;
- unsigned int width = fmt->width;
- unsigned int height = fmt->height;
- unsigned int i;
-
- switch (pad) {
- case IPIPEIF_PAD_SINK:
- /* TODO: If the IPIPEIF output formatter pad is connected
- * directly to the resizer, only YUV formats can be used.
- */
- for (i = 0; i < ARRAY_SIZE(ipipeif_fmts); i++) {
- if (fmt->code == ipipeif_fmts[i])
- break;
- }
-
- /* If not found, use SGRBG10 as default */
- if (i >= ARRAY_SIZE(ipipeif_fmts))
- fmt->code = MEDIA_BUS_FMT_SGRBG10_1X10;
-
- /* Clamp the input size. */
- fmt->width = clamp_t(u32, width, 1, 8192);
- fmt->height = clamp_t(u32, height, 1, 8192);
- break;
-
- case IPIPEIF_PAD_SOURCE_ISIF_SF:
- format = __ipipeif_get_format(ipipeif, cfg, IPIPEIF_PAD_SINK,
- which);
- memcpy(fmt, format, sizeof(*fmt));
-
- /* The data formatter truncates the number of horizontal output
- * pixels to a multiple of 16. To avoid clipping data, allow
- * callers to request an output size bigger than the input size
- * up to the nearest multiple of 16.
- */
- fmt->width = clamp_t(u32, width, 32, (fmt->width + 15) & ~15);
- fmt->width &= ~15;
- fmt->height = clamp_t(u32, height, 32, fmt->height);
- break;
-
- case IPIPEIF_PAD_SOURCE_VP:
- format = __ipipeif_get_format(ipipeif, cfg, IPIPEIF_PAD_SINK,
- which);
- memcpy(fmt, format, sizeof(*fmt));
-
- fmt->width = clamp_t(u32, width, 32, fmt->width);
- fmt->height = clamp_t(u32, height, 32, fmt->height);
- break;
- }
-
- /* Data is written to memory unpacked, each 10-bit or 12-bit pixel is
- * stored on 2 bytes.
- */
- fmt->colorspace = V4L2_COLORSPACE_SRGB;
- fmt->field = V4L2_FIELD_NONE;
-}
-
-/*
- * ipipeif_enum_mbus_code - Handle pixel format enumeration
- * @sd : pointer to v4l2 subdev structure
- * @cfg : V4L2 subdev pad config
- * @code : pointer to v4l2_subdev_mbus_code_enum structure
- * return -EINVAL or zero on success
- */
-static int ipipeif_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
- struct v4l2_subdev_mbus_code_enum *code)
-{
- struct iss_ipipeif_device *ipipeif = v4l2_get_subdevdata(sd);
- struct v4l2_mbus_framefmt *format;
-
- switch (code->pad) {
- case IPIPEIF_PAD_SINK:
- if (code->index >= ARRAY_SIZE(ipipeif_fmts))
- return -EINVAL;
-
- code->code = ipipeif_fmts[code->index];
- break;
-
- case IPIPEIF_PAD_SOURCE_ISIF_SF:
- case IPIPEIF_PAD_SOURCE_VP:
- /* No format conversion inside IPIPEIF */
- if (code->index != 0)
- return -EINVAL;
-
- format = __ipipeif_get_format(ipipeif, cfg, IPIPEIF_PAD_SINK,
- code->which);
-
- code->code = format->code;
- break;
-
- default:
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int ipipeif_enum_frame_size(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
- struct v4l2_subdev_frame_size_enum *fse)
-{
- struct iss_ipipeif_device *ipipeif = v4l2_get_subdevdata(sd);
- struct v4l2_mbus_framefmt format;
-
- if (fse->index != 0)
- return -EINVAL;
-
- format.code = fse->code;
- format.width = 1;
- format.height = 1;
- ipipeif_try_format(ipipeif, cfg, fse->pad, &format, fse->which);
- fse->min_width = format.width;
- fse->min_height = format.height;
-
- if (format.code != fse->code)
- return -EINVAL;
-
- format.code = fse->code;
- format.width = -1;
- format.height = -1;
- ipipeif_try_format(ipipeif, cfg, fse->pad, &format, fse->which);
- fse->max_width = format.width;
- fse->max_height = format.height;
-
- return 0;
-}
-
-/*
- * ipipeif_get_format - Retrieve the video format on a pad
- * @sd : ISP IPIPEIF V4L2 subdevice
- * @cfg: V4L2 subdev pad config
- * @fmt: Format
- *
- * Return 0 on success or -EINVAL if the pad is invalid or doesn't correspond
- * to the format type.
- */
-static int ipipeif_get_format(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
- struct v4l2_subdev_format *fmt)
-{
- struct iss_ipipeif_device *ipipeif = v4l2_get_subdevdata(sd);
- struct v4l2_mbus_framefmt *format;
-
- format = __ipipeif_get_format(ipipeif, cfg, fmt->pad, fmt->which);
- if (format == NULL)
- return -EINVAL;
-
- fmt->format = *format;
- return 0;
-}
-
-/*
- * ipipeif_set_format - Set the video format on a pad
- * @sd : ISP IPIPEIF V4L2 subdevice
- * @cfg: V4L2 subdev pad config
- * @fmt: Format
- *
- * Return 0 on success or -EINVAL if the pad is invalid or doesn't correspond
- * to the format type.
- */
-static int ipipeif_set_format(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
- struct v4l2_subdev_format *fmt)
-{
- struct iss_ipipeif_device *ipipeif = v4l2_get_subdevdata(sd);
- struct v4l2_mbus_framefmt *format;
-
- format = __ipipeif_get_format(ipipeif, cfg, fmt->pad, fmt->which);
- if (format == NULL)
- return -EINVAL;
-
- ipipeif_try_format(ipipeif, cfg, fmt->pad, &fmt->format, fmt->which);
- *format = fmt->format;
-
- /* Propagate the format from sink to source */
- if (fmt->pad == IPIPEIF_PAD_SINK) {
- format = __ipipeif_get_format(ipipeif, cfg,
- IPIPEIF_PAD_SOURCE_ISIF_SF,
- fmt->which);
- *format = fmt->format;
- ipipeif_try_format(ipipeif, cfg, IPIPEIF_PAD_SOURCE_ISIF_SF,
- format, fmt->which);
-
- format = __ipipeif_get_format(ipipeif, cfg,
- IPIPEIF_PAD_SOURCE_VP,
- fmt->which);
- *format = fmt->format;
- ipipeif_try_format(ipipeif, cfg, IPIPEIF_PAD_SOURCE_VP, format,
- fmt->which);
- }
-
- return 0;
-}
-
-static int ipipeif_link_validate(struct v4l2_subdev *sd,
- struct media_link *link,
- struct v4l2_subdev_format *source_fmt,
- struct v4l2_subdev_format *sink_fmt)
-{
- /* Check if the two ends match */
- if (source_fmt->format.width != sink_fmt->format.width ||
- source_fmt->format.height != sink_fmt->format.height)
- return -EPIPE;
-
- if (source_fmt->format.code != sink_fmt->format.code)
- return -EPIPE;
-
- return 0;
-}
-
-/*
- * ipipeif_init_formats - Initialize formats on all pads
- * @sd: ISP IPIPEIF V4L2 subdevice
- * @fh: V4L2 subdev file handle
- *
- * Initialize all pad formats with default values. If fh is not NULL, try
- * formats are initialized on the file handle. Otherwise active formats are
- * initialized on the device.
- */
-static int ipipeif_init_formats(struct v4l2_subdev *sd,
- struct v4l2_subdev_fh *fh)
-{
- struct v4l2_subdev_format format;
-
- memset(&format, 0, sizeof(format));
- format.pad = IPIPEIF_PAD_SINK;
- format.which = fh ? V4L2_SUBDEV_FORMAT_TRY : V4L2_SUBDEV_FORMAT_ACTIVE;
- format.format.code = MEDIA_BUS_FMT_SGRBG10_1X10;
- format.format.width = 4096;
- format.format.height = 4096;
- ipipeif_set_format(sd, fh ? fh->pad : NULL, &format);
-
- return 0;
-}
-
-/* V4L2 subdev video operations */
-static const struct v4l2_subdev_video_ops ipipeif_v4l2_video_ops = {
- .s_stream = ipipeif_set_stream,
-};
-
-/* V4L2 subdev pad operations */
-static const struct v4l2_subdev_pad_ops ipipeif_v4l2_pad_ops = {
- .enum_mbus_code = ipipeif_enum_mbus_code,
- .enum_frame_size = ipipeif_enum_frame_size,
- .get_fmt = ipipeif_get_format,
- .set_fmt = ipipeif_set_format,
- .link_validate = ipipeif_link_validate,
-};
-
-/* V4L2 subdev operations */
-static const struct v4l2_subdev_ops ipipeif_v4l2_ops = {
- .video = &ipipeif_v4l2_video_ops,
- .pad = &ipipeif_v4l2_pad_ops,
-};
-
-/* V4L2 subdev internal operations */
-static const struct v4l2_subdev_internal_ops ipipeif_v4l2_internal_ops = {
- .open = ipipeif_init_formats,
-};
-
-/* -----------------------------------------------------------------------------
- * Media entity operations
- */
-
-/*
- * ipipeif_link_setup - Setup IPIPEIF connections
- * @entity: IPIPEIF media entity
- * @local: Pad at the local end of the link
- * @remote: Pad at the remote end of the link
- * @flags: Link flags
- *
- * return -EINVAL or zero on success
- */
-static int ipipeif_link_setup(struct media_entity *entity,
- const struct media_pad *local,
- const struct media_pad *remote, u32 flags)
-{
- struct v4l2_subdev *sd = media_entity_to_v4l2_subdev(entity);
- struct iss_ipipeif_device *ipipeif = v4l2_get_subdevdata(sd);
- struct iss_device *iss = to_iss_device(ipipeif);
-
- switch (local->index | media_entity_type(remote->entity)) {
- case IPIPEIF_PAD_SINK | MEDIA_ENT_T_V4L2_SUBDEV:
- /* Read from the sensor CSI2a or CSI2b. */
- if (!(flags & MEDIA_LNK_FL_ENABLED)) {
- ipipeif->input = IPIPEIF_INPUT_NONE;
- break;
- }
-
- if (ipipeif->input != IPIPEIF_INPUT_NONE)
- return -EBUSY;
-
- if (remote->entity == &iss->csi2a.subdev.entity)
- ipipeif->input = IPIPEIF_INPUT_CSI2A;
- else if (remote->entity == &iss->csi2b.subdev.entity)
- ipipeif->input = IPIPEIF_INPUT_CSI2B;
-
- break;
-
- case IPIPEIF_PAD_SOURCE_ISIF_SF | MEDIA_ENT_T_DEVNODE:
- /* Write to memory */
- if (flags & MEDIA_LNK_FL_ENABLED) {
- if (ipipeif->output & ~IPIPEIF_OUTPUT_MEMORY)
- return -EBUSY;
- ipipeif->output |= IPIPEIF_OUTPUT_MEMORY;
- } else {
- ipipeif->output &= ~IPIPEIF_OUTPUT_MEMORY;
- }
- break;
-
- case IPIPEIF_PAD_SOURCE_VP | MEDIA_ENT_T_V4L2_SUBDEV:
- /* Send to IPIPE/RESIZER */
- if (flags & MEDIA_LNK_FL_ENABLED) {
- if (ipipeif->output & ~IPIPEIF_OUTPUT_VP)
- return -EBUSY;
- ipipeif->output |= IPIPEIF_OUTPUT_VP;
- } else {
- ipipeif->output &= ~IPIPEIF_OUTPUT_VP;
- }
- break;
-
- default:
- return -EINVAL;
- }
-
- return 0;
-}
-
-/* media operations */
-static const struct media_entity_operations ipipeif_media_ops = {
- .link_setup = ipipeif_link_setup,
- .link_validate = v4l2_subdev_link_validate,
-};
-
-/*
- * ipipeif_init_entities - Initialize V4L2 subdev and media entity
- * @ipipeif: ISS ISP IPIPEIF module
- *
- * Return 0 on success and a negative error code on failure.
- */
-static int ipipeif_init_entities(struct iss_ipipeif_device *ipipeif)
-{
- struct v4l2_subdev *sd = &ipipeif->subdev;
- struct media_pad *pads = ipipeif->pads;
- struct media_entity *me = &sd->entity;
- int ret;
-
- ipipeif->input = IPIPEIF_INPUT_NONE;
-
- v4l2_subdev_init(sd, &ipipeif_v4l2_ops);
- sd->internal_ops = &ipipeif_v4l2_internal_ops;
- strlcpy(sd->name, "OMAP4 ISS ISP IPIPEIF", sizeof(sd->name));
- sd->grp_id = 1 << 16; /* group ID for iss subdevs */
- v4l2_set_subdevdata(sd, ipipeif);
- sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
-
- pads[IPIPEIF_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
- pads[IPIPEIF_PAD_SOURCE_ISIF_SF].flags = MEDIA_PAD_FL_SOURCE;
- pads[IPIPEIF_PAD_SOURCE_VP].flags = MEDIA_PAD_FL_SOURCE;
-
- me->ops = &ipipeif_media_ops;
- ret = media_entity_init(me, IPIPEIF_PADS_NUM, pads, 0);
- if (ret < 0)
- return ret;
-
- ipipeif_init_formats(sd, NULL);
-
- ipipeif->video_out.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
- ipipeif->video_out.ops = &ipipeif_video_ops;
- ipipeif->video_out.iss = to_iss_device(ipipeif);
- ipipeif->video_out.capture_mem = PAGE_ALIGN(4096 * 4096) * 3;
- ipipeif->video_out.bpl_alignment = 32;
- ipipeif->video_out.bpl_zero_padding = 1;
- ipipeif->video_out.bpl_max = 0x1ffe0;
-
- ret = omap4iss_video_init(&ipipeif->video_out, "ISP IPIPEIF");
- if (ret < 0)
- return ret;
-
- /* Connect the IPIPEIF subdev to the video node. */
- ret = media_entity_create_link(&ipipeif->subdev.entity,
- IPIPEIF_PAD_SOURCE_ISIF_SF,
- &ipipeif->video_out.video.entity, 0, 0);
- if (ret < 0)
- return ret;
-
- return 0;
-}
-
-void omap4iss_ipipeif_unregister_entities(struct iss_ipipeif_device *ipipeif)
-{
- v4l2_device_unregister_subdev(&ipipeif->subdev);
- omap4iss_video_unregister(&ipipeif->video_out);
-}
-
-int omap4iss_ipipeif_register_entities(struct iss_ipipeif_device *ipipeif,
- struct v4l2_device *vdev)
-{
- int ret;
-
- /* Register the subdev and video node. */
- ret = v4l2_device_register_subdev(vdev, &ipipeif->subdev);
- if (ret < 0)
- goto error;
-
- ret = omap4iss_video_register(&ipipeif->video_out, vdev);
- if (ret < 0)
- goto error;
-
- return 0;
-
-error:
- omap4iss_ipipeif_unregister_entities(ipipeif);
- return ret;
-}
-
-/* -----------------------------------------------------------------------------
- * ISP IPIPEIF initialisation and cleanup
- */
-
-/*
- * omap4iss_ipipeif_init - IPIPEIF module initialization.
- * @iss: Device pointer specific to the OMAP4 ISS.
- *
- * TODO: Get the initialisation values from platform data.
- *
- * Return 0 on success or a negative error code otherwise.
- */
-int omap4iss_ipipeif_init(struct iss_device *iss)
-{
- struct iss_ipipeif_device *ipipeif = &iss->ipipeif;
-
- ipipeif->state = ISS_PIPELINE_STREAM_STOPPED;
- init_waitqueue_head(&ipipeif->wait);
-
- return ipipeif_init_entities(ipipeif);
-}
-
-/*
- * omap4iss_ipipeif_cleanup - IPIPEIF module cleanup.
- * @iss: Device pointer specific to the OMAP4 ISS.
- */
-void omap4iss_ipipeif_cleanup(struct iss_device *iss)
-{
- struct iss_ipipeif_device *ipipeif = &iss->ipipeif;
-
- media_entity_cleanup(&ipipeif->subdev.entity);
-}
diff --git a/drivers/staging/media/omap4iss/iss_ipipeif.h b/drivers/staging/media/omap4iss/iss_ipipeif.h
deleted file mode 100644
index cbdccb982eee..000000000000
--- a/drivers/staging/media/omap4iss/iss_ipipeif.h
+++ /dev/null
@@ -1,92 +0,0 @@
-/*
- * TI OMAP4 ISS V4L2 Driver - ISP IPIPEIF module
- *
- * Copyright (C) 2012 Texas Instruments, Inc.
- *
- * Author: Sergio Aguirre <sergio.a.aguirre@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-#ifndef OMAP4_ISS_IPIPEIF_H
-#define OMAP4_ISS_IPIPEIF_H
-
-#include "iss_video.h"
-
-enum ipipeif_input_entity {
- IPIPEIF_INPUT_NONE,
- IPIPEIF_INPUT_CSI2A,
- IPIPEIF_INPUT_CSI2B
-};
-
-#define IPIPEIF_OUTPUT_MEMORY (1 << 0)
-#define IPIPEIF_OUTPUT_VP (1 << 1)
-
-/* Sink and source IPIPEIF pads */
-#define IPIPEIF_PAD_SINK 0
-#define IPIPEIF_PAD_SOURCE_ISIF_SF 1
-#define IPIPEIF_PAD_SOURCE_VP 2
-#define IPIPEIF_PADS_NUM 3
-
-/*
- * struct iss_ipipeif_device - Structure for the IPIPEIF module to store its own
- * information
- * @subdev: V4L2 subdevice
- * @pads: Sink and source media entity pads
- * @formats: Active video formats
- * @input: Active input
- * @output: Active outputs
- * @video_out: Output video node
- * @error: A hardware error occurred during capture
- * @alaw: A-law compression enabled (1) or disabled (0)
- * @lpf: Low pass filter enabled (1) or disabled (0)
- * @obclamp: Optical-black clamp enabled (1) or disabled (0)
- * @fpc_en: Faulty pixels correction enabled (1) or disabled (0)
- * @blcomp: Black level compensation configuration
- * @clamp: Optical-black or digital clamp configuration
- * @fpc: Faulty pixels correction configuration
- * @lsc: Lens shading compensation configuration
- * @update: Bitmask of controls to update during the next interrupt
- * @shadow_update: Controls update in progress by userspace
- * @syncif: Interface synchronization configuration
- * @vpcfg: Video port configuration
- * @underrun: A buffer underrun occurred and a new buffer has been queued
- * @state: Streaming state
- * @lock: Serializes shadow_update with interrupt handler
- * @wait: Wait queue used to stop the module
- * @stopping: Stopping state
- * @ioctl_lock: Serializes ioctl calls and LSC requests freeing
- */
-struct iss_ipipeif_device {
- struct v4l2_subdev subdev;
- struct media_pad pads[IPIPEIF_PADS_NUM];
- struct v4l2_mbus_framefmt formats[IPIPEIF_PADS_NUM];
-
- enum ipipeif_input_entity input;
- unsigned int output;
- struct iss_video video_out;
- unsigned int error;
-
- enum iss_pipeline_stream_state state;
- wait_queue_head_t wait;
- atomic_t stopping;
-};
-
-struct iss_device;
-
-int omap4iss_ipipeif_init(struct iss_device *iss);
-void omap4iss_ipipeif_cleanup(struct iss_device *iss);
-int omap4iss_ipipeif_register_entities(struct iss_ipipeif_device *ipipeif,
- struct v4l2_device *vdev);
-void omap4iss_ipipeif_unregister_entities(struct iss_ipipeif_device *ipipeif);
-
-int omap4iss_ipipeif_busy(struct iss_ipipeif_device *ipipeif);
-void omap4iss_ipipeif_isr(struct iss_ipipeif_device *ipipeif, u32 events);
-void omap4iss_ipipeif_restore_context(struct iss_device *iss);
-void omap4iss_ipipeif_max_rate(struct iss_ipipeif_device *ipipeif,
- unsigned int *max_rate);
-
-#endif /* OMAP4_ISS_IPIPEIF_H */
diff --git a/drivers/staging/media/omap4iss/iss_regs.h b/drivers/staging/media/omap4iss/iss_regs.h
deleted file mode 100644
index d2b6b6ae9174..000000000000
--- a/drivers/staging/media/omap4iss/iss_regs.h
+++ /dev/null
@@ -1,903 +0,0 @@
-/*
- * TI OMAP4 ISS V4L2 Driver - Register defines
- *
- * Copyright (C) 2012 Texas Instruments.
- *
- * Author: Sergio Aguirre <sergio.a.aguirre@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-#ifndef _OMAP4_ISS_REGS_H_
-#define _OMAP4_ISS_REGS_H_
-
-/* ISS */
-#define ISS_HL_REVISION 0x0
-
-#define ISS_HL_SYSCONFIG 0x10
-#define ISS_HL_SYSCONFIG_IDLEMODE_SHIFT 2
-#define ISS_HL_SYSCONFIG_IDLEMODE_FORCEIDLE 0x0
-#define ISS_HL_SYSCONFIG_IDLEMODE_NOIDLE 0x1
-#define ISS_HL_SYSCONFIG_IDLEMODE_SMARTIDLE 0x2
-#define ISS_HL_SYSCONFIG_SOFTRESET (1 << 0)
-
-#define ISS_HL_IRQSTATUS_RAW(i) (0x20 + (0x10 * (i)))
-#define ISS_HL_IRQSTATUS(i) (0x24 + (0x10 * (i)))
-#define ISS_HL_IRQENABLE_SET(i) (0x28 + (0x10 * (i)))
-#define ISS_HL_IRQENABLE_CLR(i) (0x2c + (0x10 * (i)))
-
-#define ISS_HL_IRQ_HS_VS (1 << 17)
-#define ISS_HL_IRQ_SIMCOP(i) (1 << (12 + (i)))
-#define ISS_HL_IRQ_BTE (1 << 11)
-#define ISS_HL_IRQ_CBUFF (1 << 10)
-#define ISS_HL_IRQ_CCP2(i) (1 << ((i) > 3 ? 16 : 14 + (i)))
-#define ISS_HL_IRQ_CSIB (1 << 5)
-#define ISS_HL_IRQ_CSIA (1 << 4)
-#define ISS_HL_IRQ_ISP(i) (1 << (i))
-
-#define ISS_CTRL 0x80
-#define ISS_CTRL_CLK_DIV_MASK (3 << 4)
-#define ISS_CTRL_INPUT_SEL_MASK (3 << 2)
-#define ISS_CTRL_INPUT_SEL_CSI2A (0 << 2)
-#define ISS_CTRL_INPUT_SEL_CSI2B (1 << 2)
-#define ISS_CTRL_SYNC_DETECT_VS_RAISING (3 << 0)
-
-#define ISS_CLKCTRL 0x84
-#define ISS_CLKCTRL_VPORT2_CLK (1 << 30)
-#define ISS_CLKCTRL_VPORT1_CLK (1 << 29)
-#define ISS_CLKCTRL_VPORT0_CLK (1 << 28)
-#define ISS_CLKCTRL_CCP2 (1 << 4)
-#define ISS_CLKCTRL_CSI2_B (1 << 3)
-#define ISS_CLKCTRL_CSI2_A (1 << 2)
-#define ISS_CLKCTRL_ISP (1 << 1)
-#define ISS_CLKCTRL_SIMCOP (1 << 0)
-
-#define ISS_CLKSTAT 0x88
-#define ISS_CLKSTAT_VPORT2_CLK (1 << 30)
-#define ISS_CLKSTAT_VPORT1_CLK (1 << 29)
-#define ISS_CLKSTAT_VPORT0_CLK (1 << 28)
-#define ISS_CLKSTAT_CCP2 (1 << 4)
-#define ISS_CLKSTAT_CSI2_B (1 << 3)
-#define ISS_CLKSTAT_CSI2_A (1 << 2)
-#define ISS_CLKSTAT_ISP (1 << 1)
-#define ISS_CLKSTAT_SIMCOP (1 << 0)
-
-#define ISS_PM_STATUS 0x8c
-#define ISS_PM_STATUS_CBUFF_PM_MASK (3 << 12)
-#define ISS_PM_STATUS_BTE_PM_MASK (3 << 10)
-#define ISS_PM_STATUS_SIMCOP_PM_MASK (3 << 8)
-#define ISS_PM_STATUS_ISP_PM_MASK (3 << 6)
-#define ISS_PM_STATUS_CCP2_PM_MASK (3 << 4)
-#define ISS_PM_STATUS_CSI2_B_PM_MASK (3 << 2)
-#define ISS_PM_STATUS_CSI2_A_PM_MASK (3 << 0)
-
-#define REGISTER0 0x0
-#define REGISTER0_HSCLOCKCONFIG (1 << 24)
-#define REGISTER0_THS_TERM_MASK (0xff << 8)
-#define REGISTER0_THS_TERM_SHIFT 8
-#define REGISTER0_THS_SETTLE_MASK (0xff << 0)
-#define REGISTER0_THS_SETTLE_SHIFT 0
-
-#define REGISTER1 0x4
-#define REGISTER1_RESET_DONE_CTRLCLK (1 << 29)
-#define REGISTER1_CLOCK_MISS_DETECTOR_STATUS (1 << 25)
-#define REGISTER1_TCLK_TERM_MASK (0x3f << 18)
-#define REGISTER1_TCLK_TERM_SHIFT 18
-#define REGISTER1_DPHY_HS_SYNC_PATTERN_SHIFT 10
-#define REGISTER1_CTRLCLK_DIV_FACTOR_MASK (0x3 << 8)
-#define REGISTER1_CTRLCLK_DIV_FACTOR_SHIFT 8
-#define REGISTER1_TCLK_SETTLE_MASK (0xff << 0)
-#define REGISTER1_TCLK_SETTLE_SHIFT 0
-
-#define REGISTER2 0x8
-
-#define CSI2_SYSCONFIG 0x10
-#define CSI2_SYSCONFIG_MSTANDBY_MODE_MASK (3 << 12)
-#define CSI2_SYSCONFIG_MSTANDBY_MODE_FORCE (0 << 12)
-#define CSI2_SYSCONFIG_MSTANDBY_MODE_NO (1 << 12)
-#define CSI2_SYSCONFIG_MSTANDBY_MODE_SMART (2 << 12)
-#define CSI2_SYSCONFIG_SOFT_RESET (1 << 1)
-#define CSI2_SYSCONFIG_AUTO_IDLE (1 << 0)
-
-#define CSI2_SYSSTATUS 0x14
-#define CSI2_SYSSTATUS_RESET_DONE (1 << 0)
-
-#define CSI2_IRQSTATUS 0x18
-#define CSI2_IRQENABLE 0x1c
-
-/* Shared bits across CSI2_IRQENABLE and IRQSTATUS */
-
-#define CSI2_IRQ_OCP_ERR (1 << 14)
-#define CSI2_IRQ_SHORT_PACKET (1 << 13)
-#define CSI2_IRQ_ECC_CORRECTION (1 << 12)
-#define CSI2_IRQ_ECC_NO_CORRECTION (1 << 11)
-#define CSI2_IRQ_COMPLEXIO_ERR (1 << 9)
-#define CSI2_IRQ_FIFO_OVF (1 << 8)
-#define CSI2_IRQ_CONTEXT0 (1 << 0)
-
-#define CSI2_CTRL 0x40
-#define CSI2_CTRL_MFLAG_LEVH_MASK (7 << 20)
-#define CSI2_CTRL_MFLAG_LEVH_SHIFT 20
-#define CSI2_CTRL_MFLAG_LEVL_MASK (7 << 17)
-#define CSI2_CTRL_MFLAG_LEVL_SHIFT 17
-#define CSI2_CTRL_BURST_SIZE_EXPAND (1 << 16)
-#define CSI2_CTRL_VP_CLK_EN (1 << 15)
-#define CSI2_CTRL_NON_POSTED_WRITE (1 << 13)
-#define CSI2_CTRL_VP_ONLY_EN (1 << 11)
-#define CSI2_CTRL_VP_OUT_CTRL_MASK (3 << 8)
-#define CSI2_CTRL_VP_OUT_CTRL_SHIFT 8
-#define CSI2_CTRL_DBG_EN (1 << 7)
-#define CSI2_CTRL_BURST_SIZE_MASK (3 << 5)
-#define CSI2_CTRL_ENDIANNESS (1 << 4)
-#define CSI2_CTRL_FRAME (1 << 3)
-#define CSI2_CTRL_ECC_EN (1 << 2)
-#define CSI2_CTRL_IF_EN (1 << 0)
-
-#define CSI2_DBG_H 0x44
-
-#define CSI2_COMPLEXIO_CFG 0x50
-#define CSI2_COMPLEXIO_CFG_RESET_CTRL (1 << 30)
-#define CSI2_COMPLEXIO_CFG_RESET_DONE (1 << 29)
-#define CSI2_COMPLEXIO_CFG_PWD_CMD_MASK (3 << 27)
-#define CSI2_COMPLEXIO_CFG_PWD_CMD_OFF (0 << 27)
-#define CSI2_COMPLEXIO_CFG_PWD_CMD_ON (1 << 27)
-#define CSI2_COMPLEXIO_CFG_PWD_CMD_ULP (2 << 27)
-#define CSI2_COMPLEXIO_CFG_PWD_STATUS_MASK (3 << 25)
-#define CSI2_COMPLEXIO_CFG_PWD_STATUS_OFF (0 << 25)
-#define CSI2_COMPLEXIO_CFG_PWD_STATUS_ON (1 << 25)
-#define CSI2_COMPLEXIO_CFG_PWD_STATUS_ULP (2 << 25)
-#define CSI2_COMPLEXIO_CFG_PWR_AUTO (1 << 24)
-#define CSI2_COMPLEXIO_CFG_DATA_POL(i) (1 << (((i) * 4) + 3))
-#define CSI2_COMPLEXIO_CFG_DATA_POSITION_MASK(i) (7 << ((i) * 4))
-#define CSI2_COMPLEXIO_CFG_DATA_POSITION_SHIFT(i) ((i) * 4)
-#define CSI2_COMPLEXIO_CFG_CLOCK_POL (1 << 3)
-#define CSI2_COMPLEXIO_CFG_CLOCK_POSITION_MASK (7 << 0)
-#define CSI2_COMPLEXIO_CFG_CLOCK_POSITION_SHIFT 0
-
-#define CSI2_COMPLEXIO_IRQSTATUS 0x54
-
-#define CSI2_SHORT_PACKET 0x5c
-
-#define CSI2_COMPLEXIO_IRQENABLE 0x60
-
-/* Shared bits across CSI2_COMPLEXIO_IRQENABLE and IRQSTATUS */
-#define CSI2_COMPLEXIO_IRQ_STATEALLULPMEXIT (1 << 26)
-#define CSI2_COMPLEXIO_IRQ_STATEALLULPMENTER (1 << 25)
-#define CSI2_COMPLEXIO_IRQ_STATEULPM5 (1 << 24)
-#define CSI2_COMPLEXIO_IRQ_STATEULPM4 (1 << 23)
-#define CSI2_COMPLEXIO_IRQ_STATEULPM3 (1 << 22)
-#define CSI2_COMPLEXIO_IRQ_STATEULPM2 (1 << 21)
-#define CSI2_COMPLEXIO_IRQ_STATEULPM1 (1 << 20)
-#define CSI2_COMPLEXIO_IRQ_ERRCONTROL5 (1 << 19)
-#define CSI2_COMPLEXIO_IRQ_ERRCONTROL4 (1 << 18)
-#define CSI2_COMPLEXIO_IRQ_ERRCONTROL3 (1 << 17)
-#define CSI2_COMPLEXIO_IRQ_ERRCONTROL2 (1 << 16)
-#define CSI2_COMPLEXIO_IRQ_ERRCONTROL1 (1 << 15)
-#define CSI2_COMPLEXIO_IRQ_ERRESC5 (1 << 14)
-#define CSI2_COMPLEXIO_IRQ_ERRESC4 (1 << 13)
-#define CSI2_COMPLEXIO_IRQ_ERRESC3 (1 << 12)
-#define CSI2_COMPLEXIO_IRQ_ERRESC2 (1 << 11)
-#define CSI2_COMPLEXIO_IRQ_ERRESC1 (1 << 10)
-#define CSI2_COMPLEXIO_IRQ_ERRSOTSYNCHS5 (1 << 9)
-#define CSI2_COMPLEXIO_IRQ_ERRSOTSYNCHS4 (1 << 8)
-#define CSI2_COMPLEXIO_IRQ_ERRSOTSYNCHS3 (1 << 7)
-#define CSI2_COMPLEXIO_IRQ_ERRSOTSYNCHS2 (1 << 6)
-#define CSI2_COMPLEXIO_IRQ_ERRSOTSYNCHS1 (1 << 5)
-#define CSI2_COMPLEXIO_IRQ_ERRSOTHS5 (1 << 4)
-#define CSI2_COMPLEXIO_IRQ_ERRSOTHS4 (1 << 3)
-#define CSI2_COMPLEXIO_IRQ_ERRSOTHS3 (1 << 2)
-#define CSI2_COMPLEXIO_IRQ_ERRSOTHS2 (1 << 1)
-#define CSI2_COMPLEXIO_IRQ_ERRSOTHS1 (1 << 0)
-
-#define CSI2_DBG_P 0x68
-
-#define CSI2_TIMING 0x6c
-#define CSI2_TIMING_FORCE_RX_MODE_IO1 (1 << 15)
-#define CSI2_TIMING_STOP_STATE_X16_IO1 (1 << 14)
-#define CSI2_TIMING_STOP_STATE_X4_IO1 (1 << 13)
-#define CSI2_TIMING_STOP_STATE_COUNTER_IO1_MASK (0x1fff << 0)
-#define CSI2_TIMING_STOP_STATE_COUNTER_IO1_SHIFT 0
-
-#define CSI2_CTX_CTRL1(i) (0x70 + (0x20 * i))
-#define CSI2_CTX_CTRL1_GENERIC (1 << 30)
-#define CSI2_CTX_CTRL1_TRANSCODE (0xf << 24)
-#define CSI2_CTX_CTRL1_FEC_NUMBER_MASK (0xff << 16)
-#define CSI2_CTX_CTRL1_COUNT_MASK (0xff << 8)
-#define CSI2_CTX_CTRL1_COUNT_SHIFT 8
-#define CSI2_CTX_CTRL1_EOF_EN (1 << 7)
-#define CSI2_CTX_CTRL1_EOL_EN (1 << 6)
-#define CSI2_CTX_CTRL1_CS_EN (1 << 5)
-#define CSI2_CTX_CTRL1_COUNT_UNLOCK (1 << 4)
-#define CSI2_CTX_CTRL1_PING_PONG (1 << 3)
-#define CSI2_CTX_CTRL1_CTX_EN (1 << 0)
-
-#define CSI2_CTX_CTRL2(i) (0x74 + (0x20 * i))
-#define CSI2_CTX_CTRL2_FRAME_MASK (0xffff << 16)
-#define CSI2_CTX_CTRL2_FRAME_SHIFT 16
-#define CSI2_CTX_CTRL2_USER_DEF_MAP_SHIFT 13
-#define CSI2_CTX_CTRL2_USER_DEF_MAP_MASK \
- (0x3 << CSI2_CTX_CTRL2_USER_DEF_MAP_SHIFT)
-#define CSI2_CTX_CTRL2_VIRTUAL_ID_MASK (3 << 11)
-#define CSI2_CTX_CTRL2_VIRTUAL_ID_SHIFT 11
-#define CSI2_CTX_CTRL2_DPCM_PRED (1 << 10)
-#define CSI2_CTX_CTRL2_FORMAT_MASK (0x3ff << 0)
-#define CSI2_CTX_CTRL2_FORMAT_SHIFT 0
-
-#define CSI2_CTX_DAT_OFST(i) (0x78 + (0x20 * i))
-#define CSI2_CTX_DAT_OFST_MASK (0xfff << 5)
-
-#define CSI2_CTX_PING_ADDR(i) (0x7c + (0x20 * i))
-#define CSI2_CTX_PING_ADDR_MASK 0xffffffe0
-
-#define CSI2_CTX_PONG_ADDR(i) (0x80 + (0x20 * i))
-#define CSI2_CTX_PONG_ADDR_MASK CSI2_CTX_PING_ADDR_MASK
-
-#define CSI2_CTX_IRQENABLE(i) (0x84 + (0x20 * i))
-#define CSI2_CTX_IRQSTATUS(i) (0x88 + (0x20 * i))
-
-#define CSI2_CTX_CTRL3(i) (0x8c + (0x20 * i))
-#define CSI2_CTX_CTRL3_ALPHA_SHIFT 5
-#define CSI2_CTX_CTRL3_ALPHA_MASK \
- (0x3fff << CSI2_CTX_CTRL3_ALPHA_SHIFT)
-
-/* Shared bits across CSI2_CTX_IRQENABLE and IRQSTATUS */
-#define CSI2_CTX_IRQ_ECC_CORRECTION (1 << 8)
-#define CSI2_CTX_IRQ_LINE_NUMBER (1 << 7)
-#define CSI2_CTX_IRQ_FRAME_NUMBER (1 << 6)
-#define CSI2_CTX_IRQ_CS (1 << 5)
-#define CSI2_CTX_IRQ_LE (1 << 3)
-#define CSI2_CTX_IRQ_LS (1 << 2)
-#define CSI2_CTX_IRQ_FE (1 << 1)
-#define CSI2_CTX_IRQ_FS (1 << 0)
-
-/* ISS BTE */
-#define BTE_CTRL (0x0030)
-#define BTE_CTRL_BW_LIMITER_MASK (0x3ff << 22)
-#define BTE_CTRL_BW_LIMITER_SHIFT 22
-
-/* ISS ISP_SYS1 */
-#define ISP5_REVISION (0x0000)
-#define ISP5_SYSCONFIG (0x0010)
-#define ISP5_SYSCONFIG_STANDBYMODE_MASK (3 << 4)
-#define ISP5_SYSCONFIG_STANDBYMODE_FORCE (0 << 4)
-#define ISP5_SYSCONFIG_STANDBYMODE_NO (1 << 4)
-#define ISP5_SYSCONFIG_STANDBYMODE_SMART (2 << 4)
-#define ISP5_SYSCONFIG_SOFTRESET (1 << 1)
-
-#define ISP5_IRQSTATUS(i) (0x0028 + (0x10 * (i)))
-#define ISP5_IRQENABLE_SET(i) (0x002c + (0x10 * (i)))
-#define ISP5_IRQENABLE_CLR(i) (0x0030 + (0x10 * (i)))
-
-/* Bits shared for ISP5_IRQ* registers */
-#define ISP5_IRQ_OCP_ERR (1 << 31)
-#define ISP5_IRQ_IPIPE_INT_DPC_RNEW1 (1 << 29)
-#define ISP5_IRQ_IPIPE_INT_DPC_RNEW0 (1 << 28)
-#define ISP5_IRQ_IPIPE_INT_DPC_INIT (1 << 27)
-#define ISP5_IRQ_IPIPE_INT_EOF (1 << 25)
-#define ISP5_IRQ_H3A_INT_EOF (1 << 24)
-#define ISP5_IRQ_RSZ_INT_EOF1 (1 << 23)
-#define ISP5_IRQ_RSZ_INT_EOF0 (1 << 22)
-#define ISP5_IRQ_RSZ_FIFO_IN_BLK_ERR (1 << 19)
-#define ISP5_IRQ_RSZ_FIFO_OVF (1 << 18)
-#define ISP5_IRQ_RSZ_INT_CYC_RSZB (1 << 17)
-#define ISP5_IRQ_RSZ_INT_CYC_RSZA (1 << 16)
-#define ISP5_IRQ_RSZ_INT_DMA (1 << 15)
-#define ISP5_IRQ_RSZ_INT_LAST_PIX (1 << 14)
-#define ISP5_IRQ_RSZ_INT_REG (1 << 13)
-#define ISP5_IRQ_H3A_INT (1 << 12)
-#define ISP5_IRQ_AF_INT (1 << 11)
-#define ISP5_IRQ_AEW_INT (1 << 10)
-#define ISP5_IRQ_IPIPEIF_IRQ (1 << 9)
-#define ISP5_IRQ_IPIPE_INT_HST (1 << 8)
-#define ISP5_IRQ_IPIPE_INT_BSC (1 << 7)
-#define ISP5_IRQ_IPIPE_INT_DMA (1 << 6)
-#define ISP5_IRQ_IPIPE_INT_LAST_PIX (1 << 5)
-#define ISP5_IRQ_IPIPE_INT_REG (1 << 4)
-#define ISP5_IRQ_ISIF_INT(i) (1 << (i))
-
-#define ISP5_CTRL (0x006c)
-#define ISP5_CTRL_MSTANDBY (1 << 24)
-#define ISP5_CTRL_VD_PULSE_EXT (1 << 23)
-#define ISP5_CTRL_MSTANDBY_WAIT (1 << 20)
-#define ISP5_CTRL_BL_CLK_ENABLE (1 << 15)
-#define ISP5_CTRL_ISIF_CLK_ENABLE (1 << 14)
-#define ISP5_CTRL_H3A_CLK_ENABLE (1 << 13)
-#define ISP5_CTRL_RSZ_CLK_ENABLE (1 << 12)
-#define ISP5_CTRL_IPIPE_CLK_ENABLE (1 << 11)
-#define ISP5_CTRL_IPIPEIF_CLK_ENABLE (1 << 10)
-#define ISP5_CTRL_SYNC_ENABLE (1 << 9)
-#define ISP5_CTRL_PSYNC_CLK_SEL (1 << 8)
-
-/* ISS ISP ISIF register offsets */
-#define ISIF_SYNCEN (0x0000)
-#define ISIF_SYNCEN_DWEN (1 << 1)
-#define ISIF_SYNCEN_SYEN (1 << 0)
-
-#define ISIF_MODESET (0x0004)
-#define ISIF_MODESET_INPMOD_MASK (3 << 12)
-#define ISIF_MODESET_INPMOD_RAW (0 << 12)
-#define ISIF_MODESET_INPMOD_YCBCR16 (1 << 12)
-#define ISIF_MODESET_INPMOD_YCBCR8 (2 << 12)
-#define ISIF_MODESET_CCDW_MASK (7 << 8)
-#define ISIF_MODESET_CCDW_2BIT (2 << 8)
-#define ISIF_MODESET_CCDMD (1 << 7)
-#define ISIF_MODESET_SWEN (1 << 5)
-#define ISIF_MODESET_HDPOL (1 << 3)
-#define ISIF_MODESET_VDPOL (1 << 2)
-
-#define ISIF_SPH (0x0018)
-#define ISIF_SPH_MASK (0x7fff)
-
-#define ISIF_LNH (0x001c)
-#define ISIF_LNH_MASK (0x7fff)
-
-#define ISIF_LNV (0x0028)
-#define ISIF_LNV_MASK (0x7fff)
-
-#define ISIF_HSIZE (0x0034)
-#define ISIF_HSIZE_ADCR (1 << 12)
-#define ISIF_HSIZE_HSIZE_MASK (0xfff)
-
-#define ISIF_CADU (0x003c)
-#define ISIF_CADU_MASK (0x7ff)
-
-#define ISIF_CADL (0x0040)
-#define ISIF_CADL_MASK (0xffff)
-
-#define ISIF_CCOLP (0x004c)
-#define ISIF_CCOLP_CP0_F0_R (0 << 6)
-#define ISIF_CCOLP_CP0_F0_GR (1 << 6)
-#define ISIF_CCOLP_CP0_F0_B (3 << 6)
-#define ISIF_CCOLP_CP0_F0_GB (2 << 6)
-#define ISIF_CCOLP_CP1_F0_R (0 << 4)
-#define ISIF_CCOLP_CP1_F0_GR (1 << 4)
-#define ISIF_CCOLP_CP1_F0_B (3 << 4)
-#define ISIF_CCOLP_CP1_F0_GB (2 << 4)
-#define ISIF_CCOLP_CP2_F0_R (0 << 2)
-#define ISIF_CCOLP_CP2_F0_GR (1 << 2)
-#define ISIF_CCOLP_CP2_F0_B (3 << 2)
-#define ISIF_CCOLP_CP2_F0_GB (2 << 2)
-#define ISIF_CCOLP_CP3_F0_R (0 << 0)
-#define ISIF_CCOLP_CP3_F0_GR (1 << 0)
-#define ISIF_CCOLP_CP3_F0_B (3 << 0)
-#define ISIF_CCOLP_CP3_F0_GB (2 << 0)
-
-#define ISIF_VDINT(i) (0x0070 + (i) * 4)
-#define ISIF_VDINT_MASK (0x7fff)
-
-#define ISIF_CGAMMAWD (0x0080)
-#define ISIF_CGAMMAWD_GWDI_MASK (0xf << 1)
-#define ISIF_CGAMMAWD_GWDI(bpp) ((16 - (bpp)) << 1)
-
-#define ISIF_CCDCFG (0x0088)
-#define ISIF_CCDCFG_Y8POS (1 << 11)
-
-/* ISS ISP IPIPEIF register offsets */
-#define IPIPEIF_ENABLE (0x0000)
-
-#define IPIPEIF_CFG1 (0x0004)
-#define IPIPEIF_CFG1_INPSRC1_MASK (3 << 14)
-#define IPIPEIF_CFG1_INPSRC1_VPORT_RAW (0 << 14)
-#define IPIPEIF_CFG1_INPSRC1_SDRAM_RAW (1 << 14)
-#define IPIPEIF_CFG1_INPSRC1_ISIF_DARKFM (2 << 14)
-#define IPIPEIF_CFG1_INPSRC1_SDRAM_YUV (3 << 14)
-#define IPIPEIF_CFG1_INPSRC2_MASK (3 << 2)
-#define IPIPEIF_CFG1_INPSRC2_ISIF (0 << 2)
-#define IPIPEIF_CFG1_INPSRC2_SDRAM_RAW (1 << 2)
-#define IPIPEIF_CFG1_INPSRC2_ISIF_DARKFM (2 << 2)
-#define IPIPEIF_CFG1_INPSRC2_SDRAM_YUV (3 << 2)
-
-#define IPIPEIF_CFG2 (0x0030)
-#define IPIPEIF_CFG2_YUV8P (1 << 7)
-#define IPIPEIF_CFG2_YUV8 (1 << 6)
-#define IPIPEIF_CFG2_YUV16 (1 << 3)
-#define IPIPEIF_CFG2_VDPOL (1 << 2)
-#define IPIPEIF_CFG2_HDPOL (1 << 1)
-#define IPIPEIF_CFG2_INTSW (1 << 0)
-
-#define IPIPEIF_CLKDIV (0x0040)
-
-/* ISS ISP IPIPE register offsets */
-#define IPIPE_SRC_EN (0x0000)
-#define IPIPE_SRC_EN_EN (1 << 0)
-
-#define IPIPE_SRC_MODE (0x0004)
-#define IPIPE_SRC_MODE_WRT (1 << 1)
-#define IPIPE_SRC_MODE_OST (1 << 0)
-
-#define IPIPE_SRC_FMT (0x0008)
-#define IPIPE_SRC_FMT_RAW2YUV (0 << 0)
-#define IPIPE_SRC_FMT_RAW2RAW (1 << 0)
-#define IPIPE_SRC_FMT_RAW2STATS (2 << 0)
-#define IPIPE_SRC_FMT_YUV2YUV (3 << 0)
-
-#define IPIPE_SRC_COL (0x000c)
-#define IPIPE_SRC_COL_OO_R (0 << 6)
-#define IPIPE_SRC_COL_OO_GR (1 << 6)
-#define IPIPE_SRC_COL_OO_B (3 << 6)
-#define IPIPE_SRC_COL_OO_GB (2 << 6)
-#define IPIPE_SRC_COL_OE_R (0 << 4)
-#define IPIPE_SRC_COL_OE_GR (1 << 4)
-#define IPIPE_SRC_COL_OE_B (3 << 4)
-#define IPIPE_SRC_COL_OE_GB (2 << 4)
-#define IPIPE_SRC_COL_EO_R (0 << 2)
-#define IPIPE_SRC_COL_EO_GR (1 << 2)
-#define IPIPE_SRC_COL_EO_B (3 << 2)
-#define IPIPE_SRC_COL_EO_GB (2 << 2)
-#define IPIPE_SRC_COL_EE_R (0 << 0)
-#define IPIPE_SRC_COL_EE_GR (1 << 0)
-#define IPIPE_SRC_COL_EE_B (3 << 0)
-#define IPIPE_SRC_COL_EE_GB (2 << 0)
-
-#define IPIPE_SRC_VPS (0x0010)
-#define IPIPE_SRC_VPS_MASK (0xffff)
-
-#define IPIPE_SRC_VSZ (0x0014)
-#define IPIPE_SRC_VSZ_MASK (0x1fff)
-
-#define IPIPE_SRC_HPS (0x0018)
-#define IPIPE_SRC_HPS_MASK (0xffff)
-
-#define IPIPE_SRC_HSZ (0x001c)
-#define IPIPE_SRC_HSZ_MASK (0x1ffe)
-
-#define IPIPE_SEL_SBU (0x0020)
-
-#define IPIPE_SRC_STA (0x0024)
-
-#define IPIPE_GCK_MMR (0x0028)
-#define IPIPE_GCK_MMR_REG (1 << 0)
-
-#define IPIPE_GCK_PIX (0x002c)
-#define IPIPE_GCK_PIX_G3 (1 << 3)
-#define IPIPE_GCK_PIX_G2 (1 << 2)
-#define IPIPE_GCK_PIX_G1 (1 << 1)
-#define IPIPE_GCK_PIX_G0 (1 << 0)
-
-#define IPIPE_DPC_LUT_EN (0x0034)
-#define IPIPE_DPC_LUT_SEL (0x0038)
-#define IPIPE_DPC_LUT_ADR (0x003c)
-#define IPIPE_DPC_LUT_SIZ (0x0040)
-
-#define IPIPE_DPC_OTF_EN (0x0044)
-#define IPIPE_DPC_OTF_TYP (0x0048)
-#define IPIPE_DPC_OTF_2_D_THR_R (0x004c)
-#define IPIPE_DPC_OTF_2_D_THR_GR (0x0050)
-#define IPIPE_DPC_OTF_2_D_THR_GB (0x0054)
-#define IPIPE_DPC_OTF_2_D_THR_B (0x0058)
-#define IPIPE_DPC_OTF_2_C_THR_R (0x005c)
-#define IPIPE_DPC_OTF_2_C_THR_GR (0x0060)
-#define IPIPE_DPC_OTF_2_C_THR_GB (0x0064)
-#define IPIPE_DPC_OTF_2_C_THR_B (0x0068)
-#define IPIPE_DPC_OTF_3_SHF (0x006c)
-#define IPIPE_DPC_OTF_3_D_THR (0x0070)
-#define IPIPE_DPC_OTF_3_D_SPL (0x0074)
-#define IPIPE_DPC_OTF_3_D_MIN (0x0078)
-#define IPIPE_DPC_OTF_3_D_MAX (0x007c)
-#define IPIPE_DPC_OTF_3_C_THR (0x0080)
-#define IPIPE_DPC_OTF_3_C_SLP (0x0084)
-#define IPIPE_DPC_OTF_3_C_MIN (0x0088)
-#define IPIPE_DPC_OTF_3_C_MAX (0x008c)
-
-#define IPIPE_LSC_VOFT (0x0090)
-#define IPIPE_LSC_VA2 (0x0094)
-#define IPIPE_LSC_VA1 (0x0098)
-#define IPIPE_LSC_VS (0x009c)
-#define IPIPE_LSC_HOFT (0x00a0)
-#define IPIPE_LSC_HA2 (0x00a4)
-#define IPIPE_LSC_HA1 (0x00a8)
-#define IPIPE_LSC_HS (0x00ac)
-#define IPIPE_LSC_GAN_R (0x00b0)
-#define IPIPE_LSC_GAN_GR (0x00b4)
-#define IPIPE_LSC_GAN_GB (0x00b8)
-#define IPIPE_LSC_GAN_B (0x00bc)
-#define IPIPE_LSC_OFT_R (0x00c0)
-#define IPIPE_LSC_OFT_GR (0x00c4)
-#define IPIPE_LSC_OFT_GB (0x00c8)
-#define IPIPE_LSC_OFT_B (0x00cc)
-#define IPIPE_LSC_SHF (0x00d0)
-#define IPIPE_LSC_MAX (0x00d4)
-
-#define IPIPE_D2F_1ST_EN (0x00d8)
-#define IPIPE_D2F_1ST_TYP (0x00dc)
-#define IPIPE_D2F_1ST_THR_00 (0x00e0)
-#define IPIPE_D2F_1ST_THR_01 (0x00e4)
-#define IPIPE_D2F_1ST_THR_02 (0x00e8)
-#define IPIPE_D2F_1ST_THR_03 (0x00ec)
-#define IPIPE_D2F_1ST_THR_04 (0x00f0)
-#define IPIPE_D2F_1ST_THR_05 (0x00f4)
-#define IPIPE_D2F_1ST_THR_06 (0x00f8)
-#define IPIPE_D2F_1ST_THR_07 (0x00fc)
-#define IPIPE_D2F_1ST_STR_00 (0x0100)
-#define IPIPE_D2F_1ST_STR_01 (0x0104)
-#define IPIPE_D2F_1ST_STR_02 (0x0108)
-#define IPIPE_D2F_1ST_STR_03 (0x010c)
-#define IPIPE_D2F_1ST_STR_04 (0x0110)
-#define IPIPE_D2F_1ST_STR_05 (0x0114)
-#define IPIPE_D2F_1ST_STR_06 (0x0118)
-#define IPIPE_D2F_1ST_STR_07 (0x011c)
-#define IPIPE_D2F_1ST_SPR_00 (0x0120)
-#define IPIPE_D2F_1ST_SPR_01 (0x0124)
-#define IPIPE_D2F_1ST_SPR_02 (0x0128)
-#define IPIPE_D2F_1ST_SPR_03 (0x012c)
-#define IPIPE_D2F_1ST_SPR_04 (0x0130)
-#define IPIPE_D2F_1ST_SPR_05 (0x0134)
-#define IPIPE_D2F_1ST_SPR_06 (0x0138)
-#define IPIPE_D2F_1ST_SPR_07 (0x013c)
-#define IPIPE_D2F_1ST_EDG_MIN (0x0140)
-#define IPIPE_D2F_1ST_EDG_MAX (0x0144)
-#define IPIPE_D2F_2ND_EN (0x0148)
-#define IPIPE_D2F_2ND_TYP (0x014c)
-#define IPIPE_D2F_2ND_THR00 (0x0150)
-#define IPIPE_D2F_2ND_THR01 (0x0154)
-#define IPIPE_D2F_2ND_THR02 (0x0158)
-#define IPIPE_D2F_2ND_THR03 (0x015c)
-#define IPIPE_D2F_2ND_THR04 (0x0160)
-#define IPIPE_D2F_2ND_THR05 (0x0164)
-#define IPIPE_D2F_2ND_THR06 (0x0168)
-#define IPIPE_D2F_2ND_THR07 (0x016c)
-#define IPIPE_D2F_2ND_STR_00 (0x0170)
-#define IPIPE_D2F_2ND_STR_01 (0x0174)
-#define IPIPE_D2F_2ND_STR_02 (0x0178)
-#define IPIPE_D2F_2ND_STR_03 (0x017c)
-#define IPIPE_D2F_2ND_STR_04 (0x0180)
-#define IPIPE_D2F_2ND_STR_05 (0x0184)
-#define IPIPE_D2F_2ND_STR_06 (0x0188)
-#define IPIPE_D2F_2ND_STR_07 (0x018c)
-#define IPIPE_D2F_2ND_SPR_00 (0x0190)
-#define IPIPE_D2F_2ND_SPR_01 (0x0194)
-#define IPIPE_D2F_2ND_SPR_02 (0x0198)
-#define IPIPE_D2F_2ND_SPR_03 (0x019c)
-#define IPIPE_D2F_2ND_SPR_04 (0x01a0)
-#define IPIPE_D2F_2ND_SPR_05 (0x01a4)
-#define IPIPE_D2F_2ND_SPR_06 (0x01a8)
-#define IPIPE_D2F_2ND_SPR_07 (0x01ac)
-#define IPIPE_D2F_2ND_EDG_MIN (0x01b0)
-#define IPIPE_D2F_2ND_EDG_MAX (0x01b4)
-
-#define IPIPE_GIC_EN (0x01b8)
-#define IPIPE_GIC_TYP (0x01bc)
-#define IPIPE_GIC_GAN (0x01c0)
-#define IPIPE_GIC_NFGAIN (0x01c4)
-#define IPIPE_GIC_THR (0x01c8)
-#define IPIPE_GIC_SLP (0x01cc)
-
-#define IPIPE_WB2_OFT_R (0x01d0)
-#define IPIPE_WB2_OFT_GR (0x01d4)
-#define IPIPE_WB2_OFT_GB (0x01d8)
-#define IPIPE_WB2_OFT_B (0x01dc)
-
-#define IPIPE_WB2_WGN_R (0x01e0)
-#define IPIPE_WB2_WGN_GR (0x01e4)
-#define IPIPE_WB2_WGN_GB (0x01e8)
-#define IPIPE_WB2_WGN_B (0x01ec)
-
-#define IPIPE_CFA_MODE (0x01f0)
-#define IPIPE_CFA_2DIR_HPF_THR (0x01f4)
-#define IPIPE_CFA_2DIR_HPF_SLP (0x01f8)
-#define IPIPE_CFA_2DIR_MIX_THR (0x01fc)
-#define IPIPE_CFA_2DIR_MIX_SLP (0x0200)
-#define IPIPE_CFA_2DIR_DIR_TRH (0x0204)
-#define IPIPE_CFA_2DIR_DIR_SLP (0x0208)
-#define IPIPE_CFA_2DIR_NDWT (0x020c)
-#define IPIPE_CFA_MONO_HUE_FRA (0x0210)
-#define IPIPE_CFA_MONO_EDG_THR (0x0214)
-#define IPIPE_CFA_MONO_THR_MIN (0x0218)
-
-#define IPIPE_CFA_MONO_THR_SLP (0x021c)
-#define IPIPE_CFA_MONO_SLP_MIN (0x0220)
-#define IPIPE_CFA_MONO_SLP_SLP (0x0224)
-#define IPIPE_CFA_MONO_LPWT (0x0228)
-
-#define IPIPE_RGB1_MUL_RR (0x022c)
-#define IPIPE_RGB1_MUL_GR (0x0230)
-#define IPIPE_RGB1_MUL_BR (0x0234)
-#define IPIPE_RGB1_MUL_RG (0x0238)
-#define IPIPE_RGB1_MUL_GG (0x023c)
-#define IPIPE_RGB1_MUL_BG (0x0240)
-#define IPIPE_RGB1_MUL_RB (0x0244)
-#define IPIPE_RGB1_MUL_GB (0x0248)
-#define IPIPE_RGB1_MUL_BB (0x024c)
-#define IPIPE_RGB1_OFT_OR (0x0250)
-#define IPIPE_RGB1_OFT_OG (0x0254)
-#define IPIPE_RGB1_OFT_OB (0x0258)
-#define IPIPE_GMM_CFG (0x025c)
-#define IPIPE_RGB2_MUL_RR (0x0260)
-#define IPIPE_RGB2_MUL_GR (0x0264)
-#define IPIPE_RGB2_MUL_BR (0x0268)
-#define IPIPE_RGB2_MUL_RG (0x026c)
-#define IPIPE_RGB2_MUL_GG (0x0270)
-#define IPIPE_RGB2_MUL_BG (0x0274)
-#define IPIPE_RGB2_MUL_RB (0x0278)
-#define IPIPE_RGB2_MUL_GB (0x027c)
-#define IPIPE_RGB2_MUL_BB (0x0280)
-#define IPIPE_RGB2_OFT_OR (0x0284)
-#define IPIPE_RGB2_OFT_OG (0x0288)
-#define IPIPE_RGB2_OFT_OB (0x028c)
-
-#define IPIPE_YUV_ADJ (0x0294)
-#define IPIPE_YUV_MUL_RY (0x0298)
-#define IPIPE_YUV_MUL_GY (0x029c)
-#define IPIPE_YUV_MUL_BY (0x02a0)
-#define IPIPE_YUV_MUL_RCB (0x02a4)
-#define IPIPE_YUV_MUL_GCB (0x02a8)
-#define IPIPE_YUV_MUL_BCB (0x02ac)
-#define IPIPE_YUV_MUL_RCR (0x02b0)
-#define IPIPE_YUV_MUL_GCR (0x02b4)
-#define IPIPE_YUV_MUL_BCR (0x02b8)
-#define IPIPE_YUV_OFT_Y (0x02bc)
-#define IPIPE_YUV_OFT_CB (0x02c0)
-#define IPIPE_YUV_OFT_CR (0x02c4)
-
-#define IPIPE_YUV_PHS (0x02c8)
-#define IPIPE_YUV_PHS_LPF (1 << 1)
-#define IPIPE_YUV_PHS_POS (1 << 0)
-
-#define IPIPE_YEE_EN (0x02d4)
-#define IPIPE_YEE_TYP (0x02d8)
-#define IPIPE_YEE_SHF (0x02dc)
-#define IPIPE_YEE_MUL_00 (0x02e0)
-#define IPIPE_YEE_MUL_01 (0x02e4)
-#define IPIPE_YEE_MUL_02 (0x02e8)
-#define IPIPE_YEE_MUL_10 (0x02ec)
-#define IPIPE_YEE_MUL_11 (0x02f0)
-#define IPIPE_YEE_MUL_12 (0x02f4)
-#define IPIPE_YEE_MUL_20 (0x02f8)
-#define IPIPE_YEE_MUL_21 (0x02fc)
-#define IPIPE_YEE_MUL_22 (0x0300)
-#define IPIPE_YEE_THR (0x0304)
-#define IPIPE_YEE_E_GAN (0x0308)
-#define IPIPE_YEE_E_THR_1 (0x030c)
-#define IPIPE_YEE_E_THR_2 (0x0310)
-#define IPIPE_YEE_G_GAN (0x0314)
-#define IPIPE_YEE_G_OFT (0x0318)
-
-#define IPIPE_CAR_EN (0x031c)
-#define IPIPE_CAR_TYP (0x0320)
-#define IPIPE_CAR_SW (0x0324)
-#define IPIPE_CAR_HPF_TYP (0x0328)
-#define IPIPE_CAR_HPF_SHF (0x032c)
-#define IPIPE_CAR_HPF_THR (0x0330)
-#define IPIPE_CAR_GN1_GAN (0x0334)
-#define IPIPE_CAR_GN1_SHF (0x0338)
-#define IPIPE_CAR_GN1_MIN (0x033c)
-#define IPIPE_CAR_GN2_GAN (0x0340)
-#define IPIPE_CAR_GN2_SHF (0x0344)
-#define IPIPE_CAR_GN2_MIN (0x0348)
-#define IPIPE_CGS_EN (0x034c)
-#define IPIPE_CGS_GN1_L_THR (0x0350)
-#define IPIPE_CGS_GN1_L_GAIN (0x0354)
-#define IPIPE_CGS_GN1_L_SHF (0x0358)
-#define IPIPE_CGS_GN1_L_MIN (0x035c)
-#define IPIPE_CGS_GN1_H_THR (0x0360)
-#define IPIPE_CGS_GN1_H_GAIN (0x0364)
-#define IPIPE_CGS_GN1_H_SHF (0x0368)
-#define IPIPE_CGS_GN1_H_MIN (0x036c)
-#define IPIPE_CGS_GN2_L_THR (0x0370)
-#define IPIPE_CGS_GN2_L_GAIN (0x0374)
-#define IPIPE_CGS_GN2_L_SHF (0x0378)
-#define IPIPE_CGS_GN2_L_MIN (0x037c)
-
-#define IPIPE_BOX_EN (0x0380)
-#define IPIPE_BOX_MODE (0x0384)
-#define IPIPE_BOX_TYP (0x0388)
-#define IPIPE_BOX_SHF (0x038c)
-#define IPIPE_BOX_SDR_SAD_H (0x0390)
-#define IPIPE_BOX_SDR_SAD_L (0x0394)
-
-#define IPIPE_HST_EN (0x039c)
-#define IPIPE_HST_MODE (0x03a0)
-#define IPIPE_HST_SEL (0x03a4)
-#define IPIPE_HST_PARA (0x03a8)
-#define IPIPE_HST_0_VPS (0x03ac)
-#define IPIPE_HST_0_VSZ (0x03b0)
-#define IPIPE_HST_0_HPS (0x03b4)
-#define IPIPE_HST_0_HSZ (0x03b8)
-#define IPIPE_HST_1_VPS (0x03bc)
-#define IPIPE_HST_1_VSZ (0x03c0)
-#define IPIPE_HST_1_HPS (0x03c4)
-#define IPIPE_HST_1_HSZ (0x03c8)
-#define IPIPE_HST_2_VPS (0x03cc)
-#define IPIPE_HST_2_VSZ (0x03d0)
-#define IPIPE_HST_2_HPS (0x03d4)
-#define IPIPE_HST_2_HSZ (0x03d8)
-#define IPIPE_HST_3_VPS (0x03dc)
-#define IPIPE_HST_3_VSZ (0x03e0)
-#define IPIPE_HST_3_HPS (0x03e4)
-#define IPIPE_HST_3_HSZ (0x03e8)
-#define IPIPE_HST_TBL (0x03ec)
-#define IPIPE_HST_MUL_R (0x03f0)
-#define IPIPE_HST_MUL_GR (0x03f4)
-#define IPIPE_HST_MUL_GB (0x03f8)
-#define IPIPE_HST_MUL_B (0x03fc)
-
-#define IPIPE_BSC_EN (0x0400)
-#define IPIPE_BSC_MODE (0x0404)
-#define IPIPE_BSC_TYP (0x0408)
-#define IPIPE_BSC_ROW_VCT (0x040c)
-#define IPIPE_BSC_ROW_SHF (0x0410)
-#define IPIPE_BSC_ROW_VPO (0x0414)
-#define IPIPE_BSC_ROW_VNU (0x0418)
-#define IPIPE_BSC_ROW_VSKIP (0x041c)
-#define IPIPE_BSC_ROW_HPO (0x0420)
-#define IPIPE_BSC_ROW_HNU (0x0424)
-#define IPIPE_BSC_ROW_HSKIP (0x0428)
-#define IPIPE_BSC_COL_VCT (0x042c)
-#define IPIPE_BSC_COL_SHF (0x0430)
-#define IPIPE_BSC_COL_VPO (0x0434)
-#define IPIPE_BSC_COL_VNU (0x0438)
-#define IPIPE_BSC_COL_VSKIP (0x043c)
-#define IPIPE_BSC_COL_HPO (0x0440)
-#define IPIPE_BSC_COL_HNU (0x0444)
-#define IPIPE_BSC_COL_HSKIP (0x0448)
-
-#define IPIPE_BSC_EN (0x0400)
-
-/* ISS ISP Resizer register offsets */
-#define RSZ_REVISION (0x0000)
-#define RSZ_SYSCONFIG (0x0004)
-#define RSZ_SYSCONFIG_RSZB_CLK_EN (1 << 9)
-#define RSZ_SYSCONFIG_RSZA_CLK_EN (1 << 8)
-
-#define RSZ_IN_FIFO_CTRL (0x000c)
-#define RSZ_IN_FIFO_CTRL_THRLD_LOW_MASK (0x1ff << 16)
-#define RSZ_IN_FIFO_CTRL_THRLD_LOW_SHIFT 16
-#define RSZ_IN_FIFO_CTRL_THRLD_HIGH_MASK (0x1ff << 0)
-#define RSZ_IN_FIFO_CTRL_THRLD_HIGH_SHIFT 0
-
-#define RSZ_FRACDIV (0x0008)
-#define RSZ_FRACDIV_MASK (0xffff)
-
-#define RSZ_SRC_EN (0x0020)
-#define RSZ_SRC_EN_SRC_EN (1 << 0)
-
-#define RSZ_SRC_MODE (0x0024)
-#define RSZ_SRC_MODE_OST (1 << 0)
-#define RSZ_SRC_MODE_WRT (1 << 1)
-
-#define RSZ_SRC_FMT0 (0x0028)
-#define RSZ_SRC_FMT0_BYPASS (1 << 1)
-#define RSZ_SRC_FMT0_SEL (1 << 0)
-
-#define RSZ_SRC_FMT1 (0x002c)
-#define RSZ_SRC_FMT1_IN420 (1 << 1)
-
-#define RSZ_SRC_VPS (0x0030)
-#define RSZ_SRC_VSZ (0x0034)
-#define RSZ_SRC_HPS (0x0038)
-#define RSZ_SRC_HSZ (0x003c)
-#define RSZ_DMA_RZA (0x0040)
-#define RSZ_DMA_RZB (0x0044)
-#define RSZ_DMA_STA (0x0048)
-#define RSZ_GCK_MMR (0x004c)
-#define RSZ_GCK_MMR_MMR (1 << 0)
-
-#define RSZ_GCK_SDR (0x0054)
-#define RSZ_GCK_SDR_CORE (1 << 0)
-
-#define RSZ_IRQ_RZA (0x0058)
-#define RSZ_IRQ_RZA_MASK (0x1fff)
-
-#define RSZ_IRQ_RZB (0x005c)
-#define RSZ_IRQ_RZB_MASK (0x1fff)
-
-#define RSZ_YUV_Y_MIN (0x0060)
-#define RSZ_YUV_Y_MAX (0x0064)
-#define RSZ_YUV_C_MIN (0x0068)
-#define RSZ_YUV_C_MAX (0x006c)
-
-#define RSZ_SEQ (0x0074)
-#define RSZ_SEQ_HRVB (1 << 2)
-#define RSZ_SEQ_HRVA (1 << 0)
-
-#define RZA_EN (0x0078)
-#define RZA_MODE (0x007c)
-#define RZA_MODE_ONE_SHOT (1 << 0)
-
-#define RZA_420 (0x0080)
-#define RZA_I_VPS (0x0084)
-#define RZA_I_HPS (0x0088)
-#define RZA_O_VSZ (0x008c)
-#define RZA_O_HSZ (0x0090)
-#define RZA_V_PHS_Y (0x0094)
-#define RZA_V_PHS_C (0x0098)
-#define RZA_V_DIF (0x009c)
-#define RZA_V_TYP (0x00a0)
-#define RZA_V_LPF (0x00a4)
-#define RZA_H_PHS (0x00a8)
-#define RZA_H_DIF (0x00b0)
-#define RZA_H_TYP (0x00b4)
-#define RZA_H_LPF (0x00b8)
-#define RZA_DWN_EN (0x00bc)
-#define RZA_SDR_Y_BAD_H (0x00d0)
-#define RZA_SDR_Y_BAD_L (0x00d4)
-#define RZA_SDR_Y_SAD_H (0x00d8)
-#define RZA_SDR_Y_SAD_L (0x00dc)
-#define RZA_SDR_Y_OFT (0x00e0)
-#define RZA_SDR_Y_PTR_S (0x00e4)
-#define RZA_SDR_Y_PTR_E (0x00e8)
-#define RZA_SDR_C_BAD_H (0x00ec)
-#define RZA_SDR_C_BAD_L (0x00f0)
-#define RZA_SDR_C_SAD_H (0x00f4)
-#define RZA_SDR_C_SAD_L (0x00f8)
-#define RZA_SDR_C_OFT (0x00fc)
-#define RZA_SDR_C_PTR_S (0x0100)
-#define RZA_SDR_C_PTR_E (0x0104)
-
-#define RZB_EN (0x0108)
-#define RZB_MODE (0x010c)
-#define RZB_420 (0x0110)
-#define RZB_I_VPS (0x0114)
-#define RZB_I_HPS (0x0118)
-#define RZB_O_VSZ (0x011c)
-#define RZB_O_HSZ (0x0120)
-
-#define RZB_V_DIF (0x012c)
-#define RZB_V_TYP (0x0130)
-#define RZB_V_LPF (0x0134)
-
-#define RZB_H_DIF (0x0140)
-#define RZB_H_TYP (0x0144)
-#define RZB_H_LPF (0x0148)
-
-#define RZB_SDR_Y_BAD_H (0x0160)
-#define RZB_SDR_Y_BAD_L (0x0164)
-#define RZB_SDR_Y_SAD_H (0x0168)
-#define RZB_SDR_Y_SAD_L (0x016c)
-#define RZB_SDR_Y_OFT (0x0170)
-#define RZB_SDR_Y_PTR_S (0x0174)
-#define RZB_SDR_Y_PTR_E (0x0178)
-#define RZB_SDR_C_BAD_H (0x017c)
-#define RZB_SDR_C_BAD_L (0x0180)
-#define RZB_SDR_C_SAD_H (0x0184)
-#define RZB_SDR_C_SAD_L (0x0188)
-
-#define RZB_SDR_C_PTR_S (0x0190)
-#define RZB_SDR_C_PTR_E (0x0194)
-
-/* Shared Bitmasks between RZA & RZB */
-#define RSZ_EN_EN (1 << 0)
-
-#define RSZ_420_CEN (1 << 1)
-#define RSZ_420_YEN (1 << 0)
-
-#define RSZ_I_VPS_MASK (0x1fff)
-
-#define RSZ_I_HPS_MASK (0x1fff)
-
-#define RSZ_O_VSZ_MASK (0x1fff)
-
-#define RSZ_O_HSZ_MASK (0x1ffe)
-
-#define RSZ_V_PHS_Y_MASK (0x3fff)
-
-#define RSZ_V_PHS_C_MASK (0x3fff)
-
-#define RSZ_V_DIF_MASK (0x3fff)
-
-#define RSZ_V_TYP_C (1 << 1)
-#define RSZ_V_TYP_Y (1 << 0)
-
-#define RSZ_V_LPF_C_MASK (0x3f << 6)
-#define RSZ_V_LPF_C_SHIFT 6
-#define RSZ_V_LPF_Y_MASK (0x3f << 0)
-#define RSZ_V_LPF_Y_SHIFT 0
-
-#define RSZ_H_PHS_MASK (0x3fff)
-
-#define RSZ_H_DIF_MASK (0x3fff)
-
-#define RSZ_H_TYP_C (1 << 1)
-#define RSZ_H_TYP_Y (1 << 0)
-
-#define RSZ_H_LPF_C_MASK (0x3f << 6)
-#define RSZ_H_LPF_C_SHIFT 6
-#define RSZ_H_LPF_Y_MASK (0x3f << 0)
-#define RSZ_H_LPF_Y_SHIFT 0
-
-#define RSZ_DWN_EN_DWN_EN (1 << 0)
-
-#endif /* _OMAP4_ISS_REGS_H_ */
diff --git a/drivers/staging/media/omap4iss/iss_resizer.c b/drivers/staging/media/omap4iss/iss_resizer.c
deleted file mode 100644
index 5030cf3cd34c..000000000000
--- a/drivers/staging/media/omap4iss/iss_resizer.c
+++ /dev/null
@@ -1,876 +0,0 @@
-/*
- * TI OMAP4 ISS V4L2 Driver - ISP RESIZER module
- *
- * Copyright (C) 2012 Texas Instruments, Inc.
- *
- * Author: Sergio Aguirre <sergio.a.aguirre@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-#include <linux/module.h>
-#include <linux/uaccess.h>
-#include <linux/delay.h>
-#include <linux/device.h>
-#include <linux/dma-mapping.h>
-#include <linux/mm.h>
-#include <linux/sched.h>
-
-#include "iss.h"
-#include "iss_regs.h"
-#include "iss_resizer.h"
-
-static const unsigned int resizer_fmts[] = {
- MEDIA_BUS_FMT_UYVY8_1X16,
- MEDIA_BUS_FMT_YUYV8_1X16,
-};
-
-/*
- * resizer_print_status - Print current RESIZER Module register values.
- * @resizer: Pointer to ISS ISP RESIZER device.
- *
- * Also prints other debug information stored in the RESIZER module.
- */
-#define RSZ_PRINT_REGISTER(iss, name)\
- dev_dbg(iss->dev, "###RSZ " #name "=0x%08x\n", \
- iss_reg_read(iss, OMAP4_ISS_MEM_ISP_RESIZER, RSZ_##name))
-
-#define RZA_PRINT_REGISTER(iss, name)\
- dev_dbg(iss->dev, "###RZA " #name "=0x%08x\n", \
- iss_reg_read(iss, OMAP4_ISS_MEM_ISP_RESIZER, RZA_##name))
-
-static void resizer_print_status(struct iss_resizer_device *resizer)
-{
- struct iss_device *iss = to_iss_device(resizer);
-
- dev_dbg(iss->dev, "-------------RESIZER Register dump-------------\n");
-
- RSZ_PRINT_REGISTER(iss, SYSCONFIG);
- RSZ_PRINT_REGISTER(iss, IN_FIFO_CTRL);
- RSZ_PRINT_REGISTER(iss, FRACDIV);
- RSZ_PRINT_REGISTER(iss, SRC_EN);
- RSZ_PRINT_REGISTER(iss, SRC_MODE);
- RSZ_PRINT_REGISTER(iss, SRC_FMT0);
- RSZ_PRINT_REGISTER(iss, SRC_FMT1);
- RSZ_PRINT_REGISTER(iss, SRC_VPS);
- RSZ_PRINT_REGISTER(iss, SRC_VSZ);
- RSZ_PRINT_REGISTER(iss, SRC_HPS);
- RSZ_PRINT_REGISTER(iss, SRC_HSZ);
- RSZ_PRINT_REGISTER(iss, DMA_RZA);
- RSZ_PRINT_REGISTER(iss, DMA_RZB);
- RSZ_PRINT_REGISTER(iss, DMA_STA);
- RSZ_PRINT_REGISTER(iss, GCK_MMR);
- RSZ_PRINT_REGISTER(iss, GCK_SDR);
- RSZ_PRINT_REGISTER(iss, IRQ_RZA);
- RSZ_PRINT_REGISTER(iss, IRQ_RZB);
- RSZ_PRINT_REGISTER(iss, YUV_Y_MIN);
- RSZ_PRINT_REGISTER(iss, YUV_Y_MAX);
- RSZ_PRINT_REGISTER(iss, YUV_C_MIN);
- RSZ_PRINT_REGISTER(iss, YUV_C_MAX);
- RSZ_PRINT_REGISTER(iss, SEQ);
-
- RZA_PRINT_REGISTER(iss, EN);
- RZA_PRINT_REGISTER(iss, MODE);
- RZA_PRINT_REGISTER(iss, 420);
- RZA_PRINT_REGISTER(iss, I_VPS);
- RZA_PRINT_REGISTER(iss, I_HPS);
- RZA_PRINT_REGISTER(iss, O_VSZ);
- RZA_PRINT_REGISTER(iss, O_HSZ);
- RZA_PRINT_REGISTER(iss, V_PHS_Y);
- RZA_PRINT_REGISTER(iss, V_PHS_C);
- RZA_PRINT_REGISTER(iss, V_DIF);
- RZA_PRINT_REGISTER(iss, V_TYP);
- RZA_PRINT_REGISTER(iss, V_LPF);
- RZA_PRINT_REGISTER(iss, H_PHS);
- RZA_PRINT_REGISTER(iss, H_DIF);
- RZA_PRINT_REGISTER(iss, H_TYP);
- RZA_PRINT_REGISTER(iss, H_LPF);
- RZA_PRINT_REGISTER(iss, DWN_EN);
- RZA_PRINT_REGISTER(iss, SDR_Y_BAD_H);
- RZA_PRINT_REGISTER(iss, SDR_Y_BAD_L);
- RZA_PRINT_REGISTER(iss, SDR_Y_SAD_H);
- RZA_PRINT_REGISTER(iss, SDR_Y_SAD_L);
- RZA_PRINT_REGISTER(iss, SDR_Y_OFT);
- RZA_PRINT_REGISTER(iss, SDR_Y_PTR_S);
- RZA_PRINT_REGISTER(iss, SDR_Y_PTR_E);
- RZA_PRINT_REGISTER(iss, SDR_C_BAD_H);
- RZA_PRINT_REGISTER(iss, SDR_C_BAD_L);
- RZA_PRINT_REGISTER(iss, SDR_C_SAD_H);
- RZA_PRINT_REGISTER(iss, SDR_C_SAD_L);
- RZA_PRINT_REGISTER(iss, SDR_C_OFT);
- RZA_PRINT_REGISTER(iss, SDR_C_PTR_S);
- RZA_PRINT_REGISTER(iss, SDR_C_PTR_E);
-
- dev_dbg(iss->dev, "-----------------------------------------------\n");
-}
-
-/*
- * resizer_enable - Enable/Disable RESIZER.
- * @enable: enable flag
- *
- */
-static void resizer_enable(struct iss_resizer_device *resizer, u8 enable)
-{
- struct iss_device *iss = to_iss_device(resizer);
-
- iss_reg_update(iss, OMAP4_ISS_MEM_ISP_RESIZER, RSZ_SRC_EN,
- RSZ_SRC_EN_SRC_EN, enable ? RSZ_SRC_EN_SRC_EN : 0);
-
- /* TODO: Enable RSZB */
- iss_reg_update(iss, OMAP4_ISS_MEM_ISP_RESIZER, RZA_EN, RSZ_EN_EN,
- enable ? RSZ_EN_EN : 0);
-}
-
-/* -----------------------------------------------------------------------------
- * Format- and pipeline-related configuration helpers
- */
-
-/*
- * resizer_set_outaddr - Set memory address to save output image
- * @resizer: Pointer to ISP RESIZER device.
- * @addr: 32-bit memory address aligned on 32 byte boundary.
- *
- * Sets the memory address where the output will be saved.
- */
-static void resizer_set_outaddr(struct iss_resizer_device *resizer, u32 addr)
-{
- struct iss_device *iss = to_iss_device(resizer);
- struct v4l2_mbus_framefmt *informat, *outformat;
-
- informat = &resizer->formats[RESIZER_PAD_SINK];
- outformat = &resizer->formats[RESIZER_PAD_SOURCE_MEM];
-
- /* Save address splitted in Base Address H & L */
- iss_reg_write(iss, OMAP4_ISS_MEM_ISP_RESIZER, RZA_SDR_Y_BAD_H,
- (addr >> 16) & 0xffff);
- iss_reg_write(iss, OMAP4_ISS_MEM_ISP_RESIZER, RZA_SDR_Y_BAD_L,
- addr & 0xffff);
-
- /* SAD = BAD */
- iss_reg_write(iss, OMAP4_ISS_MEM_ISP_RESIZER, RZA_SDR_Y_SAD_H,
- (addr >> 16) & 0xffff);
- iss_reg_write(iss, OMAP4_ISS_MEM_ISP_RESIZER, RZA_SDR_Y_SAD_L,
- addr & 0xffff);
-
- /* Program UV buffer address... Hardcoded to be contiguous! */
- if ((informat->code == MEDIA_BUS_FMT_UYVY8_1X16) &&
- (outformat->code == MEDIA_BUS_FMT_YUYV8_1_5X8)) {
- u32 c_addr = addr + (resizer->video_out.bpl_value *
- (outformat->height - 1));
-
- /* Ensure Y_BAD_L[6:0] = C_BAD_L[6:0]*/
- if ((c_addr ^ addr) & 0x7f) {
- c_addr &= ~0x7f;
- c_addr += 0x80;
- c_addr |= addr & 0x7f;
- }
-
- /* Save address splitted in Base Address H & L */
- iss_reg_write(iss, OMAP4_ISS_MEM_ISP_RESIZER, RZA_SDR_C_BAD_H,
- (c_addr >> 16) & 0xffff);
- iss_reg_write(iss, OMAP4_ISS_MEM_ISP_RESIZER, RZA_SDR_C_BAD_L,
- c_addr & 0xffff);
-
- /* SAD = BAD */
- iss_reg_write(iss, OMAP4_ISS_MEM_ISP_RESIZER, RZA_SDR_C_SAD_H,
- (c_addr >> 16) & 0xffff);
- iss_reg_write(iss, OMAP4_ISS_MEM_ISP_RESIZER, RZA_SDR_C_SAD_L,
- c_addr & 0xffff);
- }
-}
-
-static void resizer_configure(struct iss_resizer_device *resizer)
-{
- struct iss_device *iss = to_iss_device(resizer);
- struct v4l2_mbus_framefmt *informat, *outformat;
-
- informat = &resizer->formats[RESIZER_PAD_SINK];
- outformat = &resizer->formats[RESIZER_PAD_SOURCE_MEM];
-
- /* Disable pass-through more. Despite its name, the BYPASS bit controls
- * pass-through mode, not bypass mode.
- */
- iss_reg_clr(iss, OMAP4_ISS_MEM_ISP_RESIZER, RSZ_SRC_FMT0,
- RSZ_SRC_FMT0_BYPASS);
-
- /* Select RSZ input */
- iss_reg_update(iss, OMAP4_ISS_MEM_ISP_RESIZER, RSZ_SRC_FMT0,
- RSZ_SRC_FMT0_SEL,
- resizer->input == RESIZER_INPUT_IPIPEIF ?
- RSZ_SRC_FMT0_SEL : 0);
-
- /* RSZ ignores WEN signal from IPIPE/IPIPEIF */
- iss_reg_clr(iss, OMAP4_ISS_MEM_ISP_RESIZER, RSZ_SRC_MODE,
- RSZ_SRC_MODE_WRT);
-
- /* Set Resizer in free-running mode */
- iss_reg_clr(iss, OMAP4_ISS_MEM_ISP_RESIZER, RSZ_SRC_MODE,
- RSZ_SRC_MODE_OST);
-
- /* Init Resizer A */
- iss_reg_clr(iss, OMAP4_ISS_MEM_ISP_RESIZER, RZA_MODE,
- RZA_MODE_ONE_SHOT);
-
- /* Set size related things now */
- iss_reg_write(iss, OMAP4_ISS_MEM_ISP_RESIZER, RSZ_SRC_VPS, 0);
- iss_reg_write(iss, OMAP4_ISS_MEM_ISP_RESIZER, RSZ_SRC_HPS, 0);
- iss_reg_write(iss, OMAP4_ISS_MEM_ISP_RESIZER, RSZ_SRC_VSZ,
- informat->height - 2);
- iss_reg_write(iss, OMAP4_ISS_MEM_ISP_RESIZER, RSZ_SRC_HSZ,
- informat->width - 1);
-
- iss_reg_write(iss, OMAP4_ISS_MEM_ISP_RESIZER, RZA_I_VPS, 0);
- iss_reg_write(iss, OMAP4_ISS_MEM_ISP_RESIZER, RZA_I_HPS, 0);
-
- iss_reg_write(iss, OMAP4_ISS_MEM_ISP_RESIZER, RZA_O_VSZ,
- outformat->height - 2);
- iss_reg_write(iss, OMAP4_ISS_MEM_ISP_RESIZER, RZA_O_HSZ,
- outformat->width - 1);
-
- iss_reg_write(iss, OMAP4_ISS_MEM_ISP_RESIZER, RZA_V_DIF, 0x100);
- iss_reg_write(iss, OMAP4_ISS_MEM_ISP_RESIZER, RZA_H_DIF, 0x100);
-
- /* Buffer output settings */
- iss_reg_write(iss, OMAP4_ISS_MEM_ISP_RESIZER, RZA_SDR_Y_PTR_S, 0);
- iss_reg_write(iss, OMAP4_ISS_MEM_ISP_RESIZER, RZA_SDR_Y_PTR_E,
- outformat->height - 1);
-
- iss_reg_write(iss, OMAP4_ISS_MEM_ISP_RESIZER, RZA_SDR_Y_OFT,
- resizer->video_out.bpl_value);
-
- /* UYVY -> NV12 conversion */
- if ((informat->code == MEDIA_BUS_FMT_UYVY8_1X16) &&
- (outformat->code == MEDIA_BUS_FMT_YUYV8_1_5X8)) {
- iss_reg_write(iss, OMAP4_ISS_MEM_ISP_RESIZER, RZA_420,
- RSZ_420_CEN | RSZ_420_YEN);
-
- /* UV Buffer output settings */
- iss_reg_write(iss, OMAP4_ISS_MEM_ISP_RESIZER, RZA_SDR_C_PTR_S,
- 0);
- iss_reg_write(iss, OMAP4_ISS_MEM_ISP_RESIZER, RZA_SDR_C_PTR_E,
- outformat->height - 1);
-
- iss_reg_write(iss, OMAP4_ISS_MEM_ISP_RESIZER, RZA_SDR_C_OFT,
- resizer->video_out.bpl_value);
- } else {
- iss_reg_write(iss, OMAP4_ISS_MEM_ISP_RESIZER, RZA_420, 0);
- }
-}
-
-/* -----------------------------------------------------------------------------
- * Interrupt handling
- */
-
-static void resizer_isr_buffer(struct iss_resizer_device *resizer)
-{
- struct iss_buffer *buffer;
-
- /* The whole resizer needs to be stopped. Disabling RZA only produces
- * input FIFO overflows, most probably when the next frame is received.
- */
- resizer_enable(resizer, 0);
-
- buffer = omap4iss_video_buffer_next(&resizer->video_out);
- if (buffer == NULL)
- return;
-
- resizer_set_outaddr(resizer, buffer->iss_addr);
-
- resizer_enable(resizer, 1);
-}
-
-/*
- * omap4iss_resizer_isr - Configure resizer during interframe time.
- * @resizer: Pointer to ISP RESIZER device.
- * @events: RESIZER events
- */
-void omap4iss_resizer_isr(struct iss_resizer_device *resizer, u32 events)
-{
- struct iss_device *iss = to_iss_device(resizer);
- struct iss_pipeline *pipe =
- to_iss_pipeline(&resizer->subdev.entity);
-
- if (events & (ISP5_IRQ_RSZ_FIFO_IN_BLK_ERR |
- ISP5_IRQ_RSZ_FIFO_OVF)) {
- dev_dbg(iss->dev, "RSZ Err: FIFO_IN_BLK:%d, FIFO_OVF:%d\n",
- events & ISP5_IRQ_RSZ_FIFO_IN_BLK_ERR ? 1 : 0,
- events & ISP5_IRQ_RSZ_FIFO_OVF ? 1 : 0);
- omap4iss_pipeline_cancel_stream(pipe);
- }
-
- if (omap4iss_module_sync_is_stopping(&resizer->wait,
- &resizer->stopping))
- return;
-
- if (events & ISP5_IRQ_RSZ_INT_DMA)
- resizer_isr_buffer(resizer);
-}
-
-/* -----------------------------------------------------------------------------
- * ISS video operations
- */
-
-static int resizer_video_queue(struct iss_video *video,
- struct iss_buffer *buffer)
-{
- struct iss_resizer_device *resizer = container_of(video,
- struct iss_resizer_device, video_out);
-
- if (!(resizer->output & RESIZER_OUTPUT_MEMORY))
- return -ENODEV;
-
- resizer_set_outaddr(resizer, buffer->iss_addr);
-
- /*
- * If streaming was enabled before there was a buffer queued
- * or underrun happened in the ISR, the hardware was not enabled
- * and DMA queue flag ISS_VIDEO_DMAQUEUE_UNDERRUN is still set.
- * Enable it now.
- */
- if (video->dmaqueue_flags & ISS_VIDEO_DMAQUEUE_UNDERRUN) {
- resizer_enable(resizer, 1);
- iss_video_dmaqueue_flags_clr(video);
- }
-
- return 0;
-}
-
-static const struct iss_video_operations resizer_video_ops = {
- .queue = resizer_video_queue,
-};
-
-/* -----------------------------------------------------------------------------
- * V4L2 subdev operations
- */
-
-/*
- * resizer_set_stream - Enable/Disable streaming on the RESIZER module
- * @sd: ISP RESIZER V4L2 subdevice
- * @enable: Enable/disable stream
- */
-static int resizer_set_stream(struct v4l2_subdev *sd, int enable)
-{
- struct iss_resizer_device *resizer = v4l2_get_subdevdata(sd);
- struct iss_device *iss = to_iss_device(resizer);
- struct iss_video *video_out = &resizer->video_out;
- int ret = 0;
-
- if (resizer->state == ISS_PIPELINE_STREAM_STOPPED) {
- if (enable == ISS_PIPELINE_STREAM_STOPPED)
- return 0;
-
- omap4iss_isp_subclk_enable(iss, OMAP4_ISS_ISP_SUBCLK_RSZ);
-
- iss_reg_set(iss, OMAP4_ISS_MEM_ISP_RESIZER, RSZ_GCK_MMR,
- RSZ_GCK_MMR_MMR);
- iss_reg_set(iss, OMAP4_ISS_MEM_ISP_RESIZER, RSZ_GCK_SDR,
- RSZ_GCK_SDR_CORE);
-
- /* FIXME: Enable RSZB also */
- iss_reg_set(iss, OMAP4_ISS_MEM_ISP_RESIZER, RSZ_SYSCONFIG,
- RSZ_SYSCONFIG_RSZA_CLK_EN);
- }
-
- switch (enable) {
- case ISS_PIPELINE_STREAM_CONTINUOUS:
-
- resizer_configure(resizer);
- resizer_print_status(resizer);
-
- /*
- * When outputting to memory with no buffer available, let the
- * buffer queue handler start the hardware. A DMA queue flag
- * ISS_VIDEO_DMAQUEUE_QUEUED will be set as soon as there is
- * a buffer available.
- */
- if (resizer->output & RESIZER_OUTPUT_MEMORY &&
- !(video_out->dmaqueue_flags & ISS_VIDEO_DMAQUEUE_QUEUED))
- break;
-
- atomic_set(&resizer->stopping, 0);
- resizer_enable(resizer, 1);
- iss_video_dmaqueue_flags_clr(video_out);
- break;
-
- case ISS_PIPELINE_STREAM_STOPPED:
- if (resizer->state == ISS_PIPELINE_STREAM_STOPPED)
- return 0;
- if (omap4iss_module_sync_idle(&sd->entity, &resizer->wait,
- &resizer->stopping))
- ret = -ETIMEDOUT;
-
- resizer_enable(resizer, 0);
- iss_reg_clr(iss, OMAP4_ISS_MEM_ISP_RESIZER, RSZ_SYSCONFIG,
- RSZ_SYSCONFIG_RSZA_CLK_EN);
- iss_reg_clr(iss, OMAP4_ISS_MEM_ISP_RESIZER, RSZ_GCK_SDR,
- RSZ_GCK_SDR_CORE);
- iss_reg_clr(iss, OMAP4_ISS_MEM_ISP_RESIZER, RSZ_GCK_MMR,
- RSZ_GCK_MMR_MMR);
- omap4iss_isp_subclk_disable(iss, OMAP4_ISS_ISP_SUBCLK_RSZ);
- iss_video_dmaqueue_flags_clr(video_out);
- break;
- }
-
- resizer->state = enable;
- return ret;
-}
-
-static struct v4l2_mbus_framefmt *
-__resizer_get_format(struct iss_resizer_device *resizer,
- struct v4l2_subdev_pad_config *cfg, unsigned int pad,
- enum v4l2_subdev_format_whence which)
-{
- if (which == V4L2_SUBDEV_FORMAT_TRY)
- return v4l2_subdev_get_try_format(&resizer->subdev, cfg, pad);
- return &resizer->formats[pad];
-}
-
-/*
- * resizer_try_format - Try video format on a pad
- * @resizer: ISS RESIZER device
- * @cfg: V4L2 subdev pad config
- * @pad: Pad number
- * @fmt: Format
- */
-static void
-resizer_try_format(struct iss_resizer_device *resizer,
- struct v4l2_subdev_pad_config *cfg, unsigned int pad,
- struct v4l2_mbus_framefmt *fmt,
- enum v4l2_subdev_format_whence which)
-{
- u32 pixelcode;
- struct v4l2_mbus_framefmt *format;
- unsigned int width = fmt->width;
- unsigned int height = fmt->height;
- unsigned int i;
-
- switch (pad) {
- case RESIZER_PAD_SINK:
- for (i = 0; i < ARRAY_SIZE(resizer_fmts); i++) {
- if (fmt->code == resizer_fmts[i])
- break;
- }
-
- /* If not found, use UYVY as default */
- if (i >= ARRAY_SIZE(resizer_fmts))
- fmt->code = MEDIA_BUS_FMT_UYVY8_1X16;
-
- /* Clamp the input size. */
- fmt->width = clamp_t(u32, width, 1, 8192);
- fmt->height = clamp_t(u32, height, 1, 8192);
- break;
-
- case RESIZER_PAD_SOURCE_MEM:
- pixelcode = fmt->code;
- format = __resizer_get_format(resizer, cfg, RESIZER_PAD_SINK,
- which);
- memcpy(fmt, format, sizeof(*fmt));
-
- if ((pixelcode == MEDIA_BUS_FMT_YUYV8_1_5X8) &&
- (fmt->code == MEDIA_BUS_FMT_UYVY8_1X16))
- fmt->code = pixelcode;
-
- /* The data formatter truncates the number of horizontal output
- * pixels to a multiple of 16. To avoid clipping data, allow
- * callers to request an output size bigger than the input size
- * up to the nearest multiple of 16.
- */
- fmt->width = clamp_t(u32, width, 32, (fmt->width + 15) & ~15);
- fmt->width &= ~15;
- fmt->height = clamp_t(u32, height, 32, fmt->height);
- break;
-
- }
-
- fmt->colorspace = V4L2_COLORSPACE_JPEG;
- fmt->field = V4L2_FIELD_NONE;
-}
-
-/*
- * resizer_enum_mbus_code - Handle pixel format enumeration
- * @sd : pointer to v4l2 subdev structure
- * @cfg: V4L2 subdev pad config
- * @code : pointer to v4l2_subdev_mbus_code_enum structure
- * return -EINVAL or zero on success
- */
-static int resizer_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
- struct v4l2_subdev_mbus_code_enum *code)
-{
- struct iss_resizer_device *resizer = v4l2_get_subdevdata(sd);
- struct v4l2_mbus_framefmt *format;
-
- switch (code->pad) {
- case RESIZER_PAD_SINK:
- if (code->index >= ARRAY_SIZE(resizer_fmts))
- return -EINVAL;
-
- code->code = resizer_fmts[code->index];
- break;
-
- case RESIZER_PAD_SOURCE_MEM:
- format = __resizer_get_format(resizer, cfg, RESIZER_PAD_SINK,
- code->which);
-
- if (code->index == 0) {
- code->code = format->code;
- break;
- }
-
- switch (format->code) {
- case MEDIA_BUS_FMT_UYVY8_1X16:
- if (code->index == 1)
- code->code = MEDIA_BUS_FMT_YUYV8_1_5X8;
- else
- return -EINVAL;
- break;
- default:
- if (code->index != 0)
- return -EINVAL;
- }
-
- break;
-
- default:
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int resizer_enum_frame_size(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
- struct v4l2_subdev_frame_size_enum *fse)
-{
- struct iss_resizer_device *resizer = v4l2_get_subdevdata(sd);
- struct v4l2_mbus_framefmt format;
-
- if (fse->index != 0)
- return -EINVAL;
-
- format.code = fse->code;
- format.width = 1;
- format.height = 1;
- resizer_try_format(resizer, cfg, fse->pad, &format, fse->which);
- fse->min_width = format.width;
- fse->min_height = format.height;
-
- if (format.code != fse->code)
- return -EINVAL;
-
- format.code = fse->code;
- format.width = -1;
- format.height = -1;
- resizer_try_format(resizer, cfg, fse->pad, &format, fse->which);
- fse->max_width = format.width;
- fse->max_height = format.height;
-
- return 0;
-}
-
-/*
- * resizer_get_format - Retrieve the video format on a pad
- * @sd : ISP RESIZER V4L2 subdevice
- * @cfg: V4L2 subdev pad config
- * @fmt: Format
- *
- * Return 0 on success or -EINVAL if the pad is invalid or doesn't correspond
- * to the format type.
- */
-static int resizer_get_format(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
- struct v4l2_subdev_format *fmt)
-{
- struct iss_resizer_device *resizer = v4l2_get_subdevdata(sd);
- struct v4l2_mbus_framefmt *format;
-
- format = __resizer_get_format(resizer, cfg, fmt->pad, fmt->which);
- if (format == NULL)
- return -EINVAL;
-
- fmt->format = *format;
- return 0;
-}
-
-/*
- * resizer_set_format - Set the video format on a pad
- * @sd : ISP RESIZER V4L2 subdevice
- * @cfg: V4L2 subdev pad config
- * @fmt: Format
- *
- * Return 0 on success or -EINVAL if the pad is invalid or doesn't correspond
- * to the format type.
- */
-static int resizer_set_format(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
- struct v4l2_subdev_format *fmt)
-{
- struct iss_resizer_device *resizer = v4l2_get_subdevdata(sd);
- struct v4l2_mbus_framefmt *format;
-
- format = __resizer_get_format(resizer, cfg, fmt->pad, fmt->which);
- if (format == NULL)
- return -EINVAL;
-
- resizer_try_format(resizer, cfg, fmt->pad, &fmt->format, fmt->which);
- *format = fmt->format;
-
- /* Propagate the format from sink to source */
- if (fmt->pad == RESIZER_PAD_SINK) {
- format = __resizer_get_format(resizer, cfg,
- RESIZER_PAD_SOURCE_MEM,
- fmt->which);
- *format = fmt->format;
- resizer_try_format(resizer, cfg, RESIZER_PAD_SOURCE_MEM, format,
- fmt->which);
- }
-
- return 0;
-}
-
-static int resizer_link_validate(struct v4l2_subdev *sd,
- struct media_link *link,
- struct v4l2_subdev_format *source_fmt,
- struct v4l2_subdev_format *sink_fmt)
-{
- /* Check if the two ends match */
- if (source_fmt->format.width != sink_fmt->format.width ||
- source_fmt->format.height != sink_fmt->format.height)
- return -EPIPE;
-
- if (source_fmt->format.code != sink_fmt->format.code)
- return -EPIPE;
-
- return 0;
-}
-
-/*
- * resizer_init_formats - Initialize formats on all pads
- * @sd: ISP RESIZER V4L2 subdevice
- * @fh: V4L2 subdev file handle
- *
- * Initialize all pad formats with default values. If fh is not NULL, try
- * formats are initialized on the file handle. Otherwise active formats are
- * initialized on the device.
- */
-static int resizer_init_formats(struct v4l2_subdev *sd,
- struct v4l2_subdev_fh *fh)
-{
- struct v4l2_subdev_format format;
-
- memset(&format, 0, sizeof(format));
- format.pad = RESIZER_PAD_SINK;
- format.which = fh ? V4L2_SUBDEV_FORMAT_TRY : V4L2_SUBDEV_FORMAT_ACTIVE;
- format.format.code = MEDIA_BUS_FMT_UYVY8_1X16;
- format.format.width = 4096;
- format.format.height = 4096;
- resizer_set_format(sd, fh ? fh->pad : NULL, &format);
-
- return 0;
-}
-
-/* V4L2 subdev video operations */
-static const struct v4l2_subdev_video_ops resizer_v4l2_video_ops = {
- .s_stream = resizer_set_stream,
-};
-
-/* V4L2 subdev pad operations */
-static const struct v4l2_subdev_pad_ops resizer_v4l2_pad_ops = {
- .enum_mbus_code = resizer_enum_mbus_code,
- .enum_frame_size = resizer_enum_frame_size,
- .get_fmt = resizer_get_format,
- .set_fmt = resizer_set_format,
- .link_validate = resizer_link_validate,
-};
-
-/* V4L2 subdev operations */
-static const struct v4l2_subdev_ops resizer_v4l2_ops = {
- .video = &resizer_v4l2_video_ops,
- .pad = &resizer_v4l2_pad_ops,
-};
-
-/* V4L2 subdev internal operations */
-static const struct v4l2_subdev_internal_ops resizer_v4l2_internal_ops = {
- .open = resizer_init_formats,
-};
-
-/* -----------------------------------------------------------------------------
- * Media entity operations
- */
-
-/*
- * resizer_link_setup - Setup RESIZER connections
- * @entity: RESIZER media entity
- * @local: Pad at the local end of the link
- * @remote: Pad at the remote end of the link
- * @flags: Link flags
- *
- * return -EINVAL or zero on success
- */
-static int resizer_link_setup(struct media_entity *entity,
- const struct media_pad *local,
- const struct media_pad *remote, u32 flags)
-{
- struct v4l2_subdev *sd = media_entity_to_v4l2_subdev(entity);
- struct iss_resizer_device *resizer = v4l2_get_subdevdata(sd);
- struct iss_device *iss = to_iss_device(resizer);
-
- switch (local->index | media_entity_type(remote->entity)) {
- case RESIZER_PAD_SINK | MEDIA_ENT_T_V4L2_SUBDEV:
- /* Read from IPIPE or IPIPEIF. */
- if (!(flags & MEDIA_LNK_FL_ENABLED)) {
- resizer->input = RESIZER_INPUT_NONE;
- break;
- }
-
- if (resizer->input != RESIZER_INPUT_NONE)
- return -EBUSY;
-
- if (remote->entity == &iss->ipipeif.subdev.entity)
- resizer->input = RESIZER_INPUT_IPIPEIF;
- else if (remote->entity == &iss->ipipe.subdev.entity)
- resizer->input = RESIZER_INPUT_IPIPE;
-
-
- break;
-
- case RESIZER_PAD_SOURCE_MEM | MEDIA_ENT_T_DEVNODE:
- /* Write to memory */
- if (flags & MEDIA_LNK_FL_ENABLED) {
- if (resizer->output & ~RESIZER_OUTPUT_MEMORY)
- return -EBUSY;
- resizer->output |= RESIZER_OUTPUT_MEMORY;
- } else {
- resizer->output &= ~RESIZER_OUTPUT_MEMORY;
- }
- break;
-
- default:
- return -EINVAL;
- }
-
- return 0;
-}
-
-/* media operations */
-static const struct media_entity_operations resizer_media_ops = {
- .link_setup = resizer_link_setup,
- .link_validate = v4l2_subdev_link_validate,
-};
-
-/*
- * resizer_init_entities - Initialize V4L2 subdev and media entity
- * @resizer: ISS ISP RESIZER module
- *
- * Return 0 on success and a negative error code on failure.
- */
-static int resizer_init_entities(struct iss_resizer_device *resizer)
-{
- struct v4l2_subdev *sd = &resizer->subdev;
- struct media_pad *pads = resizer->pads;
- struct media_entity *me = &sd->entity;
- int ret;
-
- resizer->input = RESIZER_INPUT_NONE;
-
- v4l2_subdev_init(sd, &resizer_v4l2_ops);
- sd->internal_ops = &resizer_v4l2_internal_ops;
- strlcpy(sd->name, "OMAP4 ISS ISP resizer", sizeof(sd->name));
- sd->grp_id = 1 << 16; /* group ID for iss subdevs */
- v4l2_set_subdevdata(sd, resizer);
- sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
-
- pads[RESIZER_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
- pads[RESIZER_PAD_SOURCE_MEM].flags = MEDIA_PAD_FL_SOURCE;
-
- me->ops = &resizer_media_ops;
- ret = media_entity_init(me, RESIZER_PADS_NUM, pads, 0);
- if (ret < 0)
- return ret;
-
- resizer_init_formats(sd, NULL);
-
- resizer->video_out.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
- resizer->video_out.ops = &resizer_video_ops;
- resizer->video_out.iss = to_iss_device(resizer);
- resizer->video_out.capture_mem = PAGE_ALIGN(4096 * 4096) * 3;
- resizer->video_out.bpl_alignment = 32;
- resizer->video_out.bpl_zero_padding = 1;
- resizer->video_out.bpl_max = 0x1ffe0;
-
- ret = omap4iss_video_init(&resizer->video_out, "ISP resizer a");
- if (ret < 0)
- return ret;
-
- /* Connect the RESIZER subdev to the video node. */
- ret = media_entity_create_link(&resizer->subdev.entity,
- RESIZER_PAD_SOURCE_MEM,
- &resizer->video_out.video.entity, 0, 0);
- if (ret < 0)
- return ret;
-
- return 0;
-}
-
-void omap4iss_resizer_unregister_entities(struct iss_resizer_device *resizer)
-{
- v4l2_device_unregister_subdev(&resizer->subdev);
- omap4iss_video_unregister(&resizer->video_out);
-}
-
-int omap4iss_resizer_register_entities(struct iss_resizer_device *resizer,
- struct v4l2_device *vdev)
-{
- int ret;
-
- /* Register the subdev and video node. */
- ret = v4l2_device_register_subdev(vdev, &resizer->subdev);
- if (ret < 0)
- goto error;
-
- ret = omap4iss_video_register(&resizer->video_out, vdev);
- if (ret < 0)
- goto error;
-
- return 0;
-
-error:
- omap4iss_resizer_unregister_entities(resizer);
- return ret;
-}
-
-/* -----------------------------------------------------------------------------
- * ISP RESIZER initialisation and cleanup
- */
-
-/*
- * omap4iss_resizer_init - RESIZER module initialization.
- * @iss: Device pointer specific to the OMAP4 ISS.
- *
- * TODO: Get the initialisation values from platform data.
- *
- * Return 0 on success or a negative error code otherwise.
- */
-int omap4iss_resizer_init(struct iss_device *iss)
-{
- struct iss_resizer_device *resizer = &iss->resizer;
-
- resizer->state = ISS_PIPELINE_STREAM_STOPPED;
- init_waitqueue_head(&resizer->wait);
-
- return resizer_init_entities(resizer);
-}
-
-/*
- * omap4iss_resizer_cleanup - RESIZER module cleanup.
- * @iss: Device pointer specific to the OMAP4 ISS.
- */
-void omap4iss_resizer_cleanup(struct iss_device *iss)
-{
- struct iss_resizer_device *resizer = &iss->resizer;
-
- media_entity_cleanup(&resizer->subdev.entity);
-}
diff --git a/drivers/staging/media/omap4iss/iss_resizer.h b/drivers/staging/media/omap4iss/iss_resizer.h
deleted file mode 100644
index 3727498b06a3..000000000000
--- a/drivers/staging/media/omap4iss/iss_resizer.h
+++ /dev/null
@@ -1,75 +0,0 @@
-/*
- * TI OMAP4 ISS V4L2 Driver - ISP RESIZER module
- *
- * Copyright (C) 2012 Texas Instruments, Inc.
- *
- * Author: Sergio Aguirre <sergio.a.aguirre@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-#ifndef OMAP4_ISS_RESIZER_H
-#define OMAP4_ISS_RESIZER_H
-
-#include "iss_video.h"
-
-enum resizer_input_entity {
- RESIZER_INPUT_NONE,
- RESIZER_INPUT_IPIPE,
- RESIZER_INPUT_IPIPEIF
-};
-
-#define RESIZER_OUTPUT_MEMORY (1 << 0)
-
-/* Sink and source RESIZER pads */
-#define RESIZER_PAD_SINK 0
-#define RESIZER_PAD_SOURCE_MEM 1
-#define RESIZER_PADS_NUM 2
-
-/*
- * struct iss_resizer_device - Structure for the RESIZER module to store its own
- * information
- * @subdev: V4L2 subdevice
- * @pads: Sink and source media entity pads
- * @formats: Active video formats
- * @input: Active input
- * @output: Active outputs
- * @video_out: Output video node
- * @error: A hardware error occurred during capture
- * @state: Streaming state
- * @wait: Wait queue used to stop the module
- * @stopping: Stopping state
- */
-struct iss_resizer_device {
- struct v4l2_subdev subdev;
- struct media_pad pads[RESIZER_PADS_NUM];
- struct v4l2_mbus_framefmt formats[RESIZER_PADS_NUM];
-
- enum resizer_input_entity input;
- unsigned int output;
- struct iss_video video_out;
- unsigned int error;
-
- enum iss_pipeline_stream_state state;
- wait_queue_head_t wait;
- atomic_t stopping;
-};
-
-struct iss_device;
-
-int omap4iss_resizer_init(struct iss_device *iss);
-void omap4iss_resizer_cleanup(struct iss_device *iss);
-int omap4iss_resizer_register_entities(struct iss_resizer_device *resizer,
- struct v4l2_device *vdev);
-void omap4iss_resizer_unregister_entities(struct iss_resizer_device *resizer);
-
-int omap4iss_resizer_busy(struct iss_resizer_device *resizer);
-void omap4iss_resizer_isr(struct iss_resizer_device *resizer, u32 events);
-void omap4iss_resizer_restore_context(struct iss_device *iss);
-void omap4iss_resizer_max_rate(struct iss_resizer_device *resizer,
- unsigned int *max_rate);
-
-#endif /* OMAP4_ISS_RESIZER_H */
diff --git a/drivers/staging/media/omap4iss/iss_video.c b/drivers/staging/media/omap4iss/iss_video.c
deleted file mode 100644
index 40405d8710a6..000000000000
--- a/drivers/staging/media/omap4iss/iss_video.c
+++ /dev/null
@@ -1,1159 +0,0 @@
-/*
- * TI OMAP4 ISS V4L2 Driver - Generic video node
- *
- * Copyright (C) 2012 Texas Instruments, Inc.
- *
- * Author: Sergio Aguirre <sergio.a.aguirre@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-#include <asm/cacheflush.h>
-#include <linux/clk.h>
-#include <linux/mm.h>
-#include <linux/pagemap.h>
-#include <linux/sched.h>
-#include <linux/slab.h>
-#include <linux/vmalloc.h>
-#include <linux/module.h>
-#include <media/v4l2-dev.h>
-#include <media/v4l2-ioctl.h>
-
-#include "iss_video.h"
-#include "iss.h"
-
-
-/* -----------------------------------------------------------------------------
- * Helper functions
- */
-
-static struct iss_format_info formats[] = {
- { MEDIA_BUS_FMT_Y8_1X8, MEDIA_BUS_FMT_Y8_1X8,
- MEDIA_BUS_FMT_Y8_1X8, MEDIA_BUS_FMT_Y8_1X8,
- V4L2_PIX_FMT_GREY, 8, "Greyscale 8 bpp", },
- { MEDIA_BUS_FMT_Y10_1X10, MEDIA_BUS_FMT_Y10_1X10,
- MEDIA_BUS_FMT_Y10_1X10, MEDIA_BUS_FMT_Y8_1X8,
- V4L2_PIX_FMT_Y10, 10, "Greyscale 10 bpp", },
- { MEDIA_BUS_FMT_Y12_1X12, MEDIA_BUS_FMT_Y10_1X10,
- MEDIA_BUS_FMT_Y12_1X12, MEDIA_BUS_FMT_Y8_1X8,
- V4L2_PIX_FMT_Y12, 12, "Greyscale 12 bpp", },
- { MEDIA_BUS_FMT_SBGGR8_1X8, MEDIA_BUS_FMT_SBGGR8_1X8,
- MEDIA_BUS_FMT_SBGGR8_1X8, MEDIA_BUS_FMT_SBGGR8_1X8,
- V4L2_PIX_FMT_SBGGR8, 8, "BGGR Bayer 8 bpp", },
- { MEDIA_BUS_FMT_SGBRG8_1X8, MEDIA_BUS_FMT_SGBRG8_1X8,
- MEDIA_BUS_FMT_SGBRG8_1X8, MEDIA_BUS_FMT_SGBRG8_1X8,
- V4L2_PIX_FMT_SGBRG8, 8, "GBRG Bayer 8 bpp", },
- { MEDIA_BUS_FMT_SGRBG8_1X8, MEDIA_BUS_FMT_SGRBG8_1X8,
- MEDIA_BUS_FMT_SGRBG8_1X8, MEDIA_BUS_FMT_SGRBG8_1X8,
- V4L2_PIX_FMT_SGRBG8, 8, "GRBG Bayer 8 bpp", },
- { MEDIA_BUS_FMT_SRGGB8_1X8, MEDIA_BUS_FMT_SRGGB8_1X8,
- MEDIA_BUS_FMT_SRGGB8_1X8, MEDIA_BUS_FMT_SRGGB8_1X8,
- V4L2_PIX_FMT_SRGGB8, 8, "RGGB Bayer 8 bpp", },
- { MEDIA_BUS_FMT_SGRBG10_DPCM8_1X8, MEDIA_BUS_FMT_SGRBG10_DPCM8_1X8,
- MEDIA_BUS_FMT_SGRBG10_1X10, 0,
- V4L2_PIX_FMT_SGRBG10DPCM8, 8, "GRBG Bayer 10 bpp DPCM8", },
- { MEDIA_BUS_FMT_SBGGR10_1X10, MEDIA_BUS_FMT_SBGGR10_1X10,
- MEDIA_BUS_FMT_SBGGR10_1X10, MEDIA_BUS_FMT_SBGGR8_1X8,
- V4L2_PIX_FMT_SBGGR10, 10, "BGGR Bayer 10 bpp", },
- { MEDIA_BUS_FMT_SGBRG10_1X10, MEDIA_BUS_FMT_SGBRG10_1X10,
- MEDIA_BUS_FMT_SGBRG10_1X10, MEDIA_BUS_FMT_SGBRG8_1X8,
- V4L2_PIX_FMT_SGBRG10, 10, "GBRG Bayer 10 bpp", },
- { MEDIA_BUS_FMT_SGRBG10_1X10, MEDIA_BUS_FMT_SGRBG10_1X10,
- MEDIA_BUS_FMT_SGRBG10_1X10, MEDIA_BUS_FMT_SGRBG8_1X8,
- V4L2_PIX_FMT_SGRBG10, 10, "GRBG Bayer 10 bpp", },
- { MEDIA_BUS_FMT_SRGGB10_1X10, MEDIA_BUS_FMT_SRGGB10_1X10,
- MEDIA_BUS_FMT_SRGGB10_1X10, MEDIA_BUS_FMT_SRGGB8_1X8,
- V4L2_PIX_FMT_SRGGB10, 10, "RGGB Bayer 10 bpp", },
- { MEDIA_BUS_FMT_SBGGR12_1X12, MEDIA_BUS_FMT_SBGGR10_1X10,
- MEDIA_BUS_FMT_SBGGR12_1X12, MEDIA_BUS_FMT_SBGGR8_1X8,
- V4L2_PIX_FMT_SBGGR12, 12, "BGGR Bayer 12 bpp", },
- { MEDIA_BUS_FMT_SGBRG12_1X12, MEDIA_BUS_FMT_SGBRG10_1X10,
- MEDIA_BUS_FMT_SGBRG12_1X12, MEDIA_BUS_FMT_SGBRG8_1X8,
- V4L2_PIX_FMT_SGBRG12, 12, "GBRG Bayer 12 bpp", },
- { MEDIA_BUS_FMT_SGRBG12_1X12, MEDIA_BUS_FMT_SGRBG10_1X10,
- MEDIA_BUS_FMT_SGRBG12_1X12, MEDIA_BUS_FMT_SGRBG8_1X8,
- V4L2_PIX_FMT_SGRBG12, 12, "GRBG Bayer 12 bpp", },
- { MEDIA_BUS_FMT_SRGGB12_1X12, MEDIA_BUS_FMT_SRGGB10_1X10,
- MEDIA_BUS_FMT_SRGGB12_1X12, MEDIA_BUS_FMT_SRGGB8_1X8,
- V4L2_PIX_FMT_SRGGB12, 12, "RGGB Bayer 12 bpp", },
- { MEDIA_BUS_FMT_UYVY8_1X16, MEDIA_BUS_FMT_UYVY8_1X16,
- MEDIA_BUS_FMT_UYVY8_1X16, 0,
- V4L2_PIX_FMT_UYVY, 16, "YUV 4:2:2 (UYVY)", },
- { MEDIA_BUS_FMT_YUYV8_1X16, MEDIA_BUS_FMT_YUYV8_1X16,
- MEDIA_BUS_FMT_YUYV8_1X16, 0,
- V4L2_PIX_FMT_YUYV, 16, "YUV 4:2:2 (YUYV)", },
- { MEDIA_BUS_FMT_YUYV8_1_5X8, MEDIA_BUS_FMT_YUYV8_1_5X8,
- MEDIA_BUS_FMT_YUYV8_1_5X8, 0,
- V4L2_PIX_FMT_NV12, 8, "YUV 4:2:0 (NV12)", },
-};
-
-const struct iss_format_info *
-omap4iss_video_format_info(u32 code)
-{
- unsigned int i;
-
- for (i = 0; i < ARRAY_SIZE(formats); ++i) {
- if (formats[i].code == code)
- return &formats[i];
- }
-
- return NULL;
-}
-
-/*
- * iss_video_mbus_to_pix - Convert v4l2_mbus_framefmt to v4l2_pix_format
- * @video: ISS video instance
- * @mbus: v4l2_mbus_framefmt format (input)
- * @pix: v4l2_pix_format format (output)
- *
- * Fill the output pix structure with information from the input mbus format.
- * The bytesperline and sizeimage fields are computed from the requested bytes
- * per line value in the pix format and information from the video instance.
- *
- * Return the number of padding bytes at end of line.
- */
-static unsigned int iss_video_mbus_to_pix(const struct iss_video *video,
- const struct v4l2_mbus_framefmt *mbus,
- struct v4l2_pix_format *pix)
-{
- unsigned int bpl = pix->bytesperline;
- unsigned int min_bpl;
- unsigned int i;
-
- memset(pix, 0, sizeof(*pix));
- pix->width = mbus->width;
- pix->height = mbus->height;
-
- /* Skip the last format in the loop so that it will be selected if no
- * match is found.
- */
- for (i = 0; i < ARRAY_SIZE(formats) - 1; ++i) {
- if (formats[i].code == mbus->code)
- break;
- }
-
- min_bpl = pix->width * ALIGN(formats[i].bpp, 8) / 8;
-
- /* Clamp the requested bytes per line value. If the maximum bytes per
- * line value is zero, the module doesn't support user configurable line
- * sizes. Override the requested value with the minimum in that case.
- */
- if (video->bpl_max)
- bpl = clamp(bpl, min_bpl, video->bpl_max);
- else
- bpl = min_bpl;
-
- if (!video->bpl_zero_padding || bpl != min_bpl)
- bpl = ALIGN(bpl, video->bpl_alignment);
-
- pix->pixelformat = formats[i].pixelformat;
- pix->bytesperline = bpl;
- pix->sizeimage = pix->bytesperline * pix->height;
- pix->colorspace = mbus->colorspace;
- pix->field = mbus->field;
-
- /* FIXME: Special case for NV12! We should make this nicer... */
- if (pix->pixelformat == V4L2_PIX_FMT_NV12)
- pix->sizeimage += (pix->bytesperline * pix->height) / 2;
-
- return bpl - min_bpl;
-}
-
-static void iss_video_pix_to_mbus(const struct v4l2_pix_format *pix,
- struct v4l2_mbus_framefmt *mbus)
-{
- unsigned int i;
-
- memset(mbus, 0, sizeof(*mbus));
- mbus->width = pix->width;
- mbus->height = pix->height;
-
- /* Skip the last format in the loop so that it will be selected if no
- * match is found.
- */
- for (i = 0; i < ARRAY_SIZE(formats) - 1; ++i) {
- if (formats[i].pixelformat == pix->pixelformat)
- break;
- }
-
- mbus->code = formats[i].code;
- mbus->colorspace = pix->colorspace;
- mbus->field = pix->field;
-}
-
-static struct v4l2_subdev *
-iss_video_remote_subdev(struct iss_video *video, u32 *pad)
-{
- struct media_pad *remote;
-
- remote = media_entity_remote_pad(&video->pad);
-
- if (remote == NULL ||
- media_entity_type(remote->entity) != MEDIA_ENT_T_V4L2_SUBDEV)
- return NULL;
-
- if (pad)
- *pad = remote->index;
-
- return media_entity_to_v4l2_subdev(remote->entity);
-}
-
-/* Return a pointer to the ISS video instance at the far end of the pipeline. */
-static struct iss_video *
-iss_video_far_end(struct iss_video *video)
-{
- struct media_entity_graph graph;
- struct media_entity *entity = &video->video.entity;
- struct media_device *mdev = entity->parent;
- struct iss_video *far_end = NULL;
-
- mutex_lock(&mdev->graph_mutex);
- media_entity_graph_walk_start(&graph, entity);
-
- while ((entity = media_entity_graph_walk_next(&graph))) {
- if (entity == &video->video.entity)
- continue;
-
- if (media_entity_type(entity) != MEDIA_ENT_T_DEVNODE)
- continue;
-
- far_end = to_iss_video(media_entity_to_video_device(entity));
- if (far_end->type != video->type)
- break;
-
- far_end = NULL;
- }
-
- mutex_unlock(&mdev->graph_mutex);
- return far_end;
-}
-
-static int
-__iss_video_get_format(struct iss_video *video,
- struct v4l2_mbus_framefmt *format)
-{
- struct v4l2_subdev_format fmt;
- struct v4l2_subdev *subdev;
- u32 pad;
- int ret;
-
- subdev = iss_video_remote_subdev(video, &pad);
- if (subdev == NULL)
- return -EINVAL;
-
- memset(&fmt, 0, sizeof(fmt));
- fmt.pad = pad;
- fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
-
- mutex_lock(&video->mutex);
- ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &fmt);
- mutex_unlock(&video->mutex);
-
- if (ret)
- return ret;
-
- *format = fmt.format;
- return 0;
-}
-
-static int
-iss_video_check_format(struct iss_video *video, struct iss_video_fh *vfh)
-{
- struct v4l2_mbus_framefmt format;
- struct v4l2_pix_format pixfmt;
- int ret;
-
- ret = __iss_video_get_format(video, &format);
- if (ret < 0)
- return ret;
-
- pixfmt.bytesperline = 0;
- ret = iss_video_mbus_to_pix(video, &format, &pixfmt);
-
- if (vfh->format.fmt.pix.pixelformat != pixfmt.pixelformat ||
- vfh->format.fmt.pix.height != pixfmt.height ||
- vfh->format.fmt.pix.width != pixfmt.width ||
- vfh->format.fmt.pix.bytesperline != pixfmt.bytesperline ||
- vfh->format.fmt.pix.sizeimage != pixfmt.sizeimage)
- return -EINVAL;
-
- return ret;
-}
-
-/* -----------------------------------------------------------------------------
- * Video queue operations
- */
-
-static int iss_video_queue_setup(struct vb2_queue *vq,
- const struct v4l2_format *fmt,
- unsigned int *count, unsigned int *num_planes,
- unsigned int sizes[], void *alloc_ctxs[])
-{
- struct iss_video_fh *vfh = vb2_get_drv_priv(vq);
- struct iss_video *video = vfh->video;
-
- /* Revisit multi-planar support for NV12 */
- *num_planes = 1;
-
- sizes[0] = vfh->format.fmt.pix.sizeimage;
- if (sizes[0] == 0)
- return -EINVAL;
-
- alloc_ctxs[0] = video->alloc_ctx;
-
- *count = min(*count, video->capture_mem / PAGE_ALIGN(sizes[0]));
-
- return 0;
-}
-
-static void iss_video_buf_cleanup(struct vb2_buffer *vb)
-{
- struct iss_buffer *buffer = container_of(vb, struct iss_buffer, vb);
-
- if (buffer->iss_addr)
- buffer->iss_addr = 0;
-}
-
-static int iss_video_buf_prepare(struct vb2_buffer *vb)
-{
- struct iss_video_fh *vfh = vb2_get_drv_priv(vb->vb2_queue);
- struct iss_buffer *buffer = container_of(vb, struct iss_buffer, vb);
- struct iss_video *video = vfh->video;
- unsigned long size = vfh->format.fmt.pix.sizeimage;
- dma_addr_t addr;
-
- if (vb2_plane_size(vb, 0) < size)
- return -ENOBUFS;
-
- addr = vb2_dma_contig_plane_dma_addr(vb, 0);
- if (!IS_ALIGNED(addr, 32)) {
- dev_dbg(video->iss->dev,
- "Buffer address must be aligned to 32 bytes boundary.\n");
- return -EINVAL;
- }
-
- vb2_set_plane_payload(vb, 0, size);
- buffer->iss_addr = addr;
- return 0;
-}
-
-static void iss_video_buf_queue(struct vb2_buffer *vb)
-{
- struct iss_video_fh *vfh = vb2_get_drv_priv(vb->vb2_queue);
- struct iss_video *video = vfh->video;
- struct iss_buffer *buffer = container_of(vb, struct iss_buffer, vb);
- struct iss_pipeline *pipe = to_iss_pipeline(&video->video.entity);
- unsigned long flags;
- bool empty;
-
- spin_lock_irqsave(&video->qlock, flags);
-
- /* Mark the buffer is faulty and give it back to the queue immediately
- * if the video node has registered an error. vb2 will perform the same
- * check when preparing the buffer, but that is inherently racy, so we
- * need to handle the race condition with an authoritative check here.
- */
- if (unlikely(video->error)) {
- vb2_buffer_done(vb, VB2_BUF_STATE_ERROR);
- spin_unlock_irqrestore(&video->qlock, flags);
- return;
- }
-
- empty = list_empty(&video->dmaqueue);
- list_add_tail(&buffer->list, &video->dmaqueue);
-
- spin_unlock_irqrestore(&video->qlock, flags);
-
- if (empty) {
- enum iss_pipeline_state state;
- unsigned int start;
-
- if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
- state = ISS_PIPELINE_QUEUE_OUTPUT;
- else
- state = ISS_PIPELINE_QUEUE_INPUT;
-
- spin_lock_irqsave(&pipe->lock, flags);
- pipe->state |= state;
- video->ops->queue(video, buffer);
- video->dmaqueue_flags |= ISS_VIDEO_DMAQUEUE_QUEUED;
-
- start = iss_pipeline_ready(pipe);
- if (start)
- pipe->state |= ISS_PIPELINE_STREAM;
- spin_unlock_irqrestore(&pipe->lock, flags);
-
- if (start)
- omap4iss_pipeline_set_stream(pipe,
- ISS_PIPELINE_STREAM_SINGLESHOT);
- }
-}
-
-static const struct vb2_ops iss_video_vb2ops = {
- .queue_setup = iss_video_queue_setup,
- .buf_prepare = iss_video_buf_prepare,
- .buf_queue = iss_video_buf_queue,
- .buf_cleanup = iss_video_buf_cleanup,
-};
-
-/*
- * omap4iss_video_buffer_next - Complete the current buffer and return the next
- * @video: ISS video object
- *
- * Remove the current video buffer from the DMA queue and fill its timestamp,
- * field count and state fields before waking up its completion handler.
- *
- * For capture video nodes, the buffer state is set to VB2_BUF_STATE_DONE if no
- * error has been flagged in the pipeline, or to VB2_BUF_STATE_ERROR otherwise.
- *
- * The DMA queue is expected to contain at least one buffer.
- *
- * Return a pointer to the next buffer in the DMA queue, or NULL if the queue is
- * empty.
- */
-struct iss_buffer *omap4iss_video_buffer_next(struct iss_video *video)
-{
- struct iss_pipeline *pipe = to_iss_pipeline(&video->video.entity);
- enum iss_pipeline_state state;
- struct iss_buffer *buf;
- unsigned long flags;
- struct timespec ts;
-
- spin_lock_irqsave(&video->qlock, flags);
- if (WARN_ON(list_empty(&video->dmaqueue))) {
- spin_unlock_irqrestore(&video->qlock, flags);
- return NULL;
- }
-
- buf = list_first_entry(&video->dmaqueue, struct iss_buffer,
- list);
- list_del(&buf->list);
- spin_unlock_irqrestore(&video->qlock, flags);
-
- ktime_get_ts(&ts);
- buf->vb.v4l2_buf.timestamp.tv_sec = ts.tv_sec;
- buf->vb.v4l2_buf.timestamp.tv_usec = ts.tv_nsec / NSEC_PER_USEC;
-
- /* Do frame number propagation only if this is the output video node.
- * Frame number either comes from the CSI receivers or it gets
- * incremented here if H3A is not active.
- * Note: There is no guarantee that the output buffer will finish
- * first, so the input number might lag behind by 1 in some cases.
- */
- if (video == pipe->output && !pipe->do_propagation)
- buf->vb.v4l2_buf.sequence =
- atomic_inc_return(&pipe->frame_number);
- else
- buf->vb.v4l2_buf.sequence = atomic_read(&pipe->frame_number);
-
- vb2_buffer_done(&buf->vb, pipe->error ?
- VB2_BUF_STATE_ERROR : VB2_BUF_STATE_DONE);
- pipe->error = false;
-
- spin_lock_irqsave(&video->qlock, flags);
- if (list_empty(&video->dmaqueue)) {
- spin_unlock_irqrestore(&video->qlock, flags);
- if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
- state = ISS_PIPELINE_QUEUE_OUTPUT
- | ISS_PIPELINE_STREAM;
- else
- state = ISS_PIPELINE_QUEUE_INPUT
- | ISS_PIPELINE_STREAM;
-
- spin_lock_irqsave(&pipe->lock, flags);
- pipe->state &= ~state;
- if (video->pipe.stream_state == ISS_PIPELINE_STREAM_CONTINUOUS)
- video->dmaqueue_flags |= ISS_VIDEO_DMAQUEUE_UNDERRUN;
- spin_unlock_irqrestore(&pipe->lock, flags);
- return NULL;
- }
-
- if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE && pipe->input != NULL) {
- spin_lock(&pipe->lock);
- pipe->state &= ~ISS_PIPELINE_STREAM;
- spin_unlock(&pipe->lock);
- }
-
- buf = list_first_entry(&video->dmaqueue, struct iss_buffer,
- list);
- spin_unlock_irqrestore(&video->qlock, flags);
- buf->vb.state = VB2_BUF_STATE_ACTIVE;
- return buf;
-}
-
-/*
- * omap4iss_video_cancel_stream - Cancel stream on a video node
- * @video: ISS video object
- *
- * Cancelling a stream mark all buffers on the video node as erroneous and makes
- * sure no new buffer can be queued.
- */
-void omap4iss_video_cancel_stream(struct iss_video *video)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&video->qlock, flags);
-
- while (!list_empty(&video->dmaqueue)) {
- struct iss_buffer *buf;
-
- buf = list_first_entry(&video->dmaqueue, struct iss_buffer,
- list);
- list_del(&buf->list);
- vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
- }
-
- vb2_queue_error(video->queue);
- video->error = true;
-
- spin_unlock_irqrestore(&video->qlock, flags);
-}
-
-/* -----------------------------------------------------------------------------
- * V4L2 ioctls
- */
-
-static int
-iss_video_querycap(struct file *file, void *fh, struct v4l2_capability *cap)
-{
- struct iss_video *video = video_drvdata(file);
-
- strlcpy(cap->driver, ISS_VIDEO_DRIVER_NAME, sizeof(cap->driver));
- strlcpy(cap->card, video->video.name, sizeof(cap->card));
- strlcpy(cap->bus_info, "media", sizeof(cap->bus_info));
-
- if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
- cap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
- else
- cap->device_caps = V4L2_CAP_VIDEO_OUTPUT | V4L2_CAP_STREAMING;
-
- cap->capabilities = V4L2_CAP_DEVICE_CAPS | V4L2_CAP_STREAMING
- | V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_VIDEO_OUTPUT;
-
- return 0;
-}
-
-static int
-iss_video_enum_format(struct file *file, void *fh, struct v4l2_fmtdesc *f)
-{
- struct iss_video *video = video_drvdata(file);
- struct v4l2_mbus_framefmt format;
- unsigned int index = f->index;
- unsigned int i;
- int ret;
-
- if (f->type != video->type)
- return -EINVAL;
-
- ret = __iss_video_get_format(video, &format);
- if (ret < 0)
- return ret;
-
- for (i = 0; i < ARRAY_SIZE(formats); ++i) {
- const struct iss_format_info *info = &formats[i];
-
- if (format.code != info->code)
- continue;
-
- if (index == 0) {
- f->pixelformat = info->pixelformat;
- strlcpy(f->description, info->description,
- sizeof(f->description));
- return 0;
- }
-
- index--;
- }
-
- return -EINVAL;
-}
-
-static int
-iss_video_get_format(struct file *file, void *fh, struct v4l2_format *format)
-{
- struct iss_video_fh *vfh = to_iss_video_fh(fh);
- struct iss_video *video = video_drvdata(file);
-
- if (format->type != video->type)
- return -EINVAL;
-
- mutex_lock(&video->mutex);
- *format = vfh->format;
- mutex_unlock(&video->mutex);
-
- return 0;
-}
-
-static int
-iss_video_set_format(struct file *file, void *fh, struct v4l2_format *format)
-{
- struct iss_video_fh *vfh = to_iss_video_fh(fh);
- struct iss_video *video = video_drvdata(file);
- struct v4l2_mbus_framefmt fmt;
-
- if (format->type != video->type)
- return -EINVAL;
-
- mutex_lock(&video->mutex);
-
- /* Fill the bytesperline and sizeimage fields by converting to media bus
- * format and back to pixel format.
- */
- iss_video_pix_to_mbus(&format->fmt.pix, &fmt);
- iss_video_mbus_to_pix(video, &fmt, &format->fmt.pix);
-
- vfh->format = *format;
-
- mutex_unlock(&video->mutex);
- return 0;
-}
-
-static int
-iss_video_try_format(struct file *file, void *fh, struct v4l2_format *format)
-{
- struct iss_video *video = video_drvdata(file);
- struct v4l2_subdev_format fmt;
- struct v4l2_subdev *subdev;
- u32 pad;
- int ret;
-
- if (format->type != video->type)
- return -EINVAL;
-
- subdev = iss_video_remote_subdev(video, &pad);
- if (subdev == NULL)
- return -EINVAL;
-
- iss_video_pix_to_mbus(&format->fmt.pix, &fmt.format);
-
- fmt.pad = pad;
- fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
- ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &fmt);
- if (ret)
- return ret;
-
- iss_video_mbus_to_pix(video, &fmt.format, &format->fmt.pix);
- return 0;
-}
-
-static int
-iss_video_get_param(struct file *file, void *fh, struct v4l2_streamparm *a)
-{
- struct iss_video_fh *vfh = to_iss_video_fh(fh);
- struct iss_video *video = video_drvdata(file);
-
- if (video->type != V4L2_BUF_TYPE_VIDEO_OUTPUT ||
- video->type != a->type)
- return -EINVAL;
-
- memset(a, 0, sizeof(*a));
- a->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
- a->parm.output.capability = V4L2_CAP_TIMEPERFRAME;
- a->parm.output.timeperframe = vfh->timeperframe;
-
- return 0;
-}
-
-static int
-iss_video_set_param(struct file *file, void *fh, struct v4l2_streamparm *a)
-{
- struct iss_video_fh *vfh = to_iss_video_fh(fh);
- struct iss_video *video = video_drvdata(file);
-
- if (video->type != V4L2_BUF_TYPE_VIDEO_OUTPUT ||
- video->type != a->type)
- return -EINVAL;
-
- if (a->parm.output.timeperframe.denominator == 0)
- a->parm.output.timeperframe.denominator = 1;
-
- vfh->timeperframe = a->parm.output.timeperframe;
-
- return 0;
-}
-
-static int
-iss_video_reqbufs(struct file *file, void *fh, struct v4l2_requestbuffers *rb)
-{
- struct iss_video_fh *vfh = to_iss_video_fh(fh);
-
- return vb2_reqbufs(&vfh->queue, rb);
-}
-
-static int
-iss_video_querybuf(struct file *file, void *fh, struct v4l2_buffer *b)
-{
- struct iss_video_fh *vfh = to_iss_video_fh(fh);
-
- return vb2_querybuf(&vfh->queue, b);
-}
-
-static int
-iss_video_qbuf(struct file *file, void *fh, struct v4l2_buffer *b)
-{
- struct iss_video_fh *vfh = to_iss_video_fh(fh);
-
- return vb2_qbuf(&vfh->queue, b);
-}
-
-static int
-iss_video_expbuf(struct file *file, void *fh, struct v4l2_exportbuffer *e)
-{
- struct iss_video_fh *vfh = to_iss_video_fh(fh);
-
- return vb2_expbuf(&vfh->queue, e);
-}
-
-static int
-iss_video_dqbuf(struct file *file, void *fh, struct v4l2_buffer *b)
-{
- struct iss_video_fh *vfh = to_iss_video_fh(fh);
-
- return vb2_dqbuf(&vfh->queue, b, file->f_flags & O_NONBLOCK);
-}
-
-/*
- * Stream management
- *
- * Every ISS pipeline has a single input and a single output. The input can be
- * either a sensor or a video node. The output is always a video node.
- *
- * As every pipeline has an output video node, the ISS video objects at the
- * pipeline output stores the pipeline state. It tracks the streaming state of
- * both the input and output, as well as the availability of buffers.
- *
- * In sensor-to-memory mode, frames are always available at the pipeline input.
- * Starting the sensor usually requires I2C transfers and must be done in
- * interruptible context. The pipeline is started and stopped synchronously
- * to the stream on/off commands. All modules in the pipeline will get their
- * subdev set stream handler called. The module at the end of the pipeline must
- * delay starting the hardware until buffers are available at its output.
- *
- * In memory-to-memory mode, starting/stopping the stream requires
- * synchronization between the input and output. ISS modules can't be stopped
- * in the middle of a frame, and at least some of the modules seem to become
- * busy as soon as they're started, even if they don't receive a frame start
- * event. For that reason frames need to be processed in single-shot mode. The
- * driver needs to wait until a frame is completely processed and written to
- * memory before restarting the pipeline for the next frame. Pipelined
- * processing might be possible but requires more testing.
- *
- * Stream start must be delayed until buffers are available at both the input
- * and output. The pipeline must be started in the videobuf queue callback with
- * the buffers queue spinlock held. The modules subdev set stream operation must
- * not sleep.
- */
-static int
-iss_video_streamon(struct file *file, void *fh, enum v4l2_buf_type type)
-{
- struct iss_video_fh *vfh = to_iss_video_fh(fh);
- struct iss_video *video = video_drvdata(file);
- struct media_entity_graph graph;
- struct media_entity *entity;
- enum iss_pipeline_state state;
- struct iss_pipeline *pipe;
- struct iss_video *far_end;
- unsigned long flags;
- int ret;
-
- if (type != video->type)
- return -EINVAL;
-
- mutex_lock(&video->stream_lock);
-
- /* Start streaming on the pipeline. No link touching an entity in the
- * pipeline can be activated or deactivated once streaming is started.
- */
- pipe = video->video.entity.pipe
- ? to_iss_pipeline(&video->video.entity) : &video->pipe;
- pipe->external = NULL;
- pipe->external_rate = 0;
- pipe->external_bpp = 0;
- pipe->entities = 0;
-
- if (video->iss->pdata->set_constraints)
- video->iss->pdata->set_constraints(video->iss, true);
-
- ret = media_entity_pipeline_start(&video->video.entity, &pipe->pipe);
- if (ret < 0)
- goto err_media_entity_pipeline_start;
-
- entity = &video->video.entity;
- media_entity_graph_walk_start(&graph, entity);
- while ((entity = media_entity_graph_walk_next(&graph)))
- pipe->entities |= 1 << entity->id;
-
- /* Verify that the currently configured format matches the output of
- * the connected subdev.
- */
- ret = iss_video_check_format(video, vfh);
- if (ret < 0)
- goto err_iss_video_check_format;
-
- video->bpl_padding = ret;
- video->bpl_value = vfh->format.fmt.pix.bytesperline;
-
- /* Find the ISS video node connected at the far end of the pipeline and
- * update the pipeline.
- */
- far_end = iss_video_far_end(video);
-
- if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) {
- state = ISS_PIPELINE_STREAM_OUTPUT | ISS_PIPELINE_IDLE_OUTPUT;
- pipe->input = far_end;
- pipe->output = video;
- } else {
- if (far_end == NULL) {
- ret = -EPIPE;
- goto err_iss_video_check_format;
- }
-
- state = ISS_PIPELINE_STREAM_INPUT | ISS_PIPELINE_IDLE_INPUT;
- pipe->input = video;
- pipe->output = far_end;
- }
-
- spin_lock_irqsave(&pipe->lock, flags);
- pipe->state &= ~ISS_PIPELINE_STREAM;
- pipe->state |= state;
- spin_unlock_irqrestore(&pipe->lock, flags);
-
- /* Set the maximum time per frame as the value requested by userspace.
- * This is a soft limit that can be overridden if the hardware doesn't
- * support the request limit.
- */
- if (video->type == V4L2_BUF_TYPE_VIDEO_OUTPUT)
- pipe->max_timeperframe = vfh->timeperframe;
-
- video->queue = &vfh->queue;
- INIT_LIST_HEAD(&video->dmaqueue);
- video->error = false;
- atomic_set(&pipe->frame_number, -1);
-
- ret = vb2_streamon(&vfh->queue, type);
- if (ret < 0)
- goto err_iss_video_check_format;
-
- /* In sensor-to-memory mode, the stream can be started synchronously
- * to the stream on command. In memory-to-memory mode, it will be
- * started when buffers are queued on both the input and output.
- */
- if (pipe->input == NULL) {
- unsigned long flags;
-
- ret = omap4iss_pipeline_set_stream(pipe,
- ISS_PIPELINE_STREAM_CONTINUOUS);
- if (ret < 0)
- goto err_omap4iss_set_stream;
- spin_lock_irqsave(&video->qlock, flags);
- if (list_empty(&video->dmaqueue))
- video->dmaqueue_flags |= ISS_VIDEO_DMAQUEUE_UNDERRUN;
- spin_unlock_irqrestore(&video->qlock, flags);
- }
-
- mutex_unlock(&video->stream_lock);
- return 0;
-
-err_omap4iss_set_stream:
- vb2_streamoff(&vfh->queue, type);
-err_iss_video_check_format:
- media_entity_pipeline_stop(&video->video.entity);
-err_media_entity_pipeline_start:
- if (video->iss->pdata->set_constraints)
- video->iss->pdata->set_constraints(video->iss, false);
- video->queue = NULL;
-
- mutex_unlock(&video->stream_lock);
- return ret;
-}
-
-static int
-iss_video_streamoff(struct file *file, void *fh, enum v4l2_buf_type type)
-{
- struct iss_video_fh *vfh = to_iss_video_fh(fh);
- struct iss_video *video = video_drvdata(file);
- struct iss_pipeline *pipe = to_iss_pipeline(&video->video.entity);
- enum iss_pipeline_state state;
- unsigned long flags;
-
- if (type != video->type)
- return -EINVAL;
-
- mutex_lock(&video->stream_lock);
-
- if (!vb2_is_streaming(&vfh->queue))
- goto done;
-
- /* Update the pipeline state. */
- if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
- state = ISS_PIPELINE_STREAM_OUTPUT
- | ISS_PIPELINE_QUEUE_OUTPUT;
- else
- state = ISS_PIPELINE_STREAM_INPUT
- | ISS_PIPELINE_QUEUE_INPUT;
-
- spin_lock_irqsave(&pipe->lock, flags);
- pipe->state &= ~state;
- spin_unlock_irqrestore(&pipe->lock, flags);
-
- /* Stop the stream. */
- omap4iss_pipeline_set_stream(pipe, ISS_PIPELINE_STREAM_STOPPED);
- vb2_streamoff(&vfh->queue, type);
- video->queue = NULL;
-
- if (video->iss->pdata->set_constraints)
- video->iss->pdata->set_constraints(video->iss, false);
- media_entity_pipeline_stop(&video->video.entity);
-
-done:
- mutex_unlock(&video->stream_lock);
- return 0;
-}
-
-static int
-iss_video_enum_input(struct file *file, void *fh, struct v4l2_input *input)
-{
- if (input->index > 0)
- return -EINVAL;
-
- strlcpy(input->name, "camera", sizeof(input->name));
- input->type = V4L2_INPUT_TYPE_CAMERA;
-
- return 0;
-}
-
-static int
-iss_video_g_input(struct file *file, void *fh, unsigned int *input)
-{
- *input = 0;
-
- return 0;
-}
-
-static int
-iss_video_s_input(struct file *file, void *fh, unsigned int input)
-{
- return input == 0 ? 0 : -EINVAL;
-}
-
-static const struct v4l2_ioctl_ops iss_video_ioctl_ops = {
- .vidioc_querycap = iss_video_querycap,
- .vidioc_enum_fmt_vid_cap = iss_video_enum_format,
- .vidioc_g_fmt_vid_cap = iss_video_get_format,
- .vidioc_s_fmt_vid_cap = iss_video_set_format,
- .vidioc_try_fmt_vid_cap = iss_video_try_format,
- .vidioc_g_fmt_vid_out = iss_video_get_format,
- .vidioc_s_fmt_vid_out = iss_video_set_format,
- .vidioc_try_fmt_vid_out = iss_video_try_format,
- .vidioc_g_parm = iss_video_get_param,
- .vidioc_s_parm = iss_video_set_param,
- .vidioc_reqbufs = iss_video_reqbufs,
- .vidioc_querybuf = iss_video_querybuf,
- .vidioc_qbuf = iss_video_qbuf,
- .vidioc_expbuf = iss_video_expbuf,
- .vidioc_dqbuf = iss_video_dqbuf,
- .vidioc_streamon = iss_video_streamon,
- .vidioc_streamoff = iss_video_streamoff,
- .vidioc_enum_input = iss_video_enum_input,
- .vidioc_g_input = iss_video_g_input,
- .vidioc_s_input = iss_video_s_input,
-};
-
-/* -----------------------------------------------------------------------------
- * V4L2 file operations
- */
-
-static int iss_video_open(struct file *file)
-{
- struct iss_video *video = video_drvdata(file);
- struct iss_video_fh *handle;
- struct vb2_queue *q;
- int ret = 0;
-
- handle = kzalloc(sizeof(*handle), GFP_KERNEL);
- if (handle == NULL)
- return -ENOMEM;
-
- v4l2_fh_init(&handle->vfh, &video->video);
- v4l2_fh_add(&handle->vfh);
-
- /* If this is the first user, initialise the pipeline. */
- if (omap4iss_get(video->iss) == NULL) {
- ret = -EBUSY;
- goto done;
- }
-
- ret = omap4iss_pipeline_pm_use(&video->video.entity, 1);
- if (ret < 0) {
- omap4iss_put(video->iss);
- goto done;
- }
-
- video->alloc_ctx = vb2_dma_contig_init_ctx(video->iss->dev);
- if (IS_ERR(video->alloc_ctx)) {
- ret = PTR_ERR(video->alloc_ctx);
- omap4iss_put(video->iss);
- goto done;
- }
-
- q = &handle->queue;
-
- q->type = video->type;
- q->io_modes = VB2_MMAP | VB2_DMABUF;
- q->drv_priv = handle;
- q->ops = &iss_video_vb2ops;
- q->mem_ops = &vb2_dma_contig_memops;
- q->buf_struct_size = sizeof(struct iss_buffer);
- q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
-
- ret = vb2_queue_init(q);
- if (ret) {
- omap4iss_put(video->iss);
- goto done;
- }
-
- memset(&handle->format, 0, sizeof(handle->format));
- handle->format.type = video->type;
- handle->timeperframe.denominator = 1;
-
- handle->video = video;
- file->private_data = &handle->vfh;
-
-done:
- if (ret < 0) {
- v4l2_fh_del(&handle->vfh);
- kfree(handle);
- }
-
- return ret;
-}
-
-static int iss_video_release(struct file *file)
-{
- struct iss_video *video = video_drvdata(file);
- struct v4l2_fh *vfh = file->private_data;
- struct iss_video_fh *handle = to_iss_video_fh(vfh);
-
- /* Disable streaming and free the buffers queue resources. */
- iss_video_streamoff(file, vfh, video->type);
-
- omap4iss_pipeline_pm_use(&video->video.entity, 0);
-
- /* Release the videobuf2 queue */
- vb2_queue_release(&handle->queue);
-
- /* Release the file handle. */
- v4l2_fh_del(vfh);
- kfree(handle);
- file->private_data = NULL;
-
- omap4iss_put(video->iss);
-
- return 0;
-}
-
-static unsigned int iss_video_poll(struct file *file, poll_table *wait)
-{
- struct iss_video_fh *vfh = to_iss_video_fh(file->private_data);
-
- return vb2_poll(&vfh->queue, file, wait);
-}
-
-static int iss_video_mmap(struct file *file, struct vm_area_struct *vma)
-{
- struct iss_video_fh *vfh = to_iss_video_fh(file->private_data);
-
- return vb2_mmap(&vfh->queue, vma);
-}
-
-static struct v4l2_file_operations iss_video_fops = {
- .owner = THIS_MODULE,
- .unlocked_ioctl = video_ioctl2,
- .open = iss_video_open,
- .release = iss_video_release,
- .poll = iss_video_poll,
- .mmap = iss_video_mmap,
-};
-
-/* -----------------------------------------------------------------------------
- * ISS video core
- */
-
-static const struct iss_video_operations iss_video_dummy_ops = {
-};
-
-int omap4iss_video_init(struct iss_video *video, const char *name)
-{
- const char *direction;
- int ret;
-
- switch (video->type) {
- case V4L2_BUF_TYPE_VIDEO_CAPTURE:
- direction = "output";
- video->pad.flags = MEDIA_PAD_FL_SINK;
- break;
- case V4L2_BUF_TYPE_VIDEO_OUTPUT:
- direction = "input";
- video->pad.flags = MEDIA_PAD_FL_SOURCE;
- break;
-
- default:
- return -EINVAL;
- }
-
- ret = media_entity_init(&video->video.entity, 1, &video->pad, 0);
- if (ret < 0)
- return ret;
-
- spin_lock_init(&video->qlock);
- mutex_init(&video->mutex);
- atomic_set(&video->active, 0);
-
- spin_lock_init(&video->pipe.lock);
- mutex_init(&video->stream_lock);
-
- /* Initialize the video device. */
- if (video->ops == NULL)
- video->ops = &iss_video_dummy_ops;
-
- video->video.fops = &iss_video_fops;
- snprintf(video->video.name, sizeof(video->video.name),
- "OMAP4 ISS %s %s", name, direction);
- video->video.vfl_type = VFL_TYPE_GRABBER;
- video->video.release = video_device_release_empty;
- video->video.ioctl_ops = &iss_video_ioctl_ops;
- video->pipe.stream_state = ISS_PIPELINE_STREAM_STOPPED;
-
- video_set_drvdata(&video->video, video);
-
- return 0;
-}
-
-void omap4iss_video_cleanup(struct iss_video *video)
-{
- media_entity_cleanup(&video->video.entity);
- mutex_destroy(&video->stream_lock);
- mutex_destroy(&video->mutex);
-}
-
-int omap4iss_video_register(struct iss_video *video, struct v4l2_device *vdev)
-{
- int ret;
-
- video->video.v4l2_dev = vdev;
-
- ret = video_register_device(&video->video, VFL_TYPE_GRABBER, -1);
- if (ret < 0)
- dev_err(video->iss->dev,
- "could not register video device (%d)\n", ret);
-
- return ret;
-}
-
-void omap4iss_video_unregister(struct iss_video *video)
-{
- video_unregister_device(&video->video);
-}
diff --git a/drivers/staging/media/omap4iss/iss_video.h b/drivers/staging/media/omap4iss/iss_video.h
deleted file mode 100644
index f11fce2cb977..000000000000
--- a/drivers/staging/media/omap4iss/iss_video.h
+++ /dev/null
@@ -1,204 +0,0 @@
-/*
- * TI OMAP4 ISS V4L2 Driver - Generic video node
- *
- * Copyright (C) 2012 Texas Instruments, Inc.
- *
- * Author: Sergio Aguirre <sergio.a.aguirre@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-#ifndef OMAP4_ISS_VIDEO_H
-#define OMAP4_ISS_VIDEO_H
-
-#include <linux/v4l2-mediabus.h>
-#include <media/media-entity.h>
-#include <media/v4l2-dev.h>
-#include <media/v4l2-fh.h>
-#include <media/videobuf2-core.h>
-#include <media/videobuf2-dma-contig.h>
-
-#define ISS_VIDEO_DRIVER_NAME "issvideo"
-#define ISS_VIDEO_DRIVER_VERSION "0.0.2"
-
-struct iss_device;
-struct iss_video;
-struct v4l2_mbus_framefmt;
-struct v4l2_pix_format;
-
-/*
- * struct iss_format_info - ISS media bus format information
- * @code: V4L2 media bus format code
- * @truncated: V4L2 media bus format code for the same format truncated to 10
- * bits. Identical to @code if the format is 10 bits wide or less.
- * @uncompressed: V4L2 media bus format code for the corresponding uncompressed
- * format. Identical to @code if the format is not DPCM compressed.
- * @flavor: V4L2 media bus format code for the same pixel layout but
- * shifted to be 8 bits per pixel. =0 if format is not shiftable.
- * @pixelformat: V4L2 pixel format FCC identifier
- * @bpp: Bits per pixel
- * @description: Human-readable format description
- */
-struct iss_format_info {
- u32 code;
- u32 truncated;
- u32 uncompressed;
- u32 flavor;
- u32 pixelformat;
- unsigned int bpp;
- const char *description;
-};
-
-enum iss_pipeline_stream_state {
- ISS_PIPELINE_STREAM_STOPPED = 0,
- ISS_PIPELINE_STREAM_CONTINUOUS = 1,
- ISS_PIPELINE_STREAM_SINGLESHOT = 2,
-};
-
-enum iss_pipeline_state {
- /* The stream has been started on the input video node. */
- ISS_PIPELINE_STREAM_INPUT = 1,
- /* The stream has been started on the output video node. */
- ISS_PIPELINE_STREAM_OUTPUT = (1 << 1),
- /* At least one buffer is queued on the input video node. */
- ISS_PIPELINE_QUEUE_INPUT = (1 << 2),
- /* At least one buffer is queued on the output video node. */
- ISS_PIPELINE_QUEUE_OUTPUT = (1 << 3),
- /* The input entity is idle, ready to be started. */
- ISS_PIPELINE_IDLE_INPUT = (1 << 4),
- /* The output entity is idle, ready to be started. */
- ISS_PIPELINE_IDLE_OUTPUT = (1 << 5),
- /* The pipeline is currently streaming. */
- ISS_PIPELINE_STREAM = (1 << 6),
-};
-
-/*
- * struct iss_pipeline - An OMAP4 ISS hardware pipeline
- * @entities: Bitmask of entities in the pipeline (indexed by entity ID)
- * @error: A hardware error occurred during capture
- */
-struct iss_pipeline {
- struct media_pipeline pipe;
- spinlock_t lock; /* Pipeline state and queue flags */
- unsigned int state;
- enum iss_pipeline_stream_state stream_state;
- struct iss_video *input;
- struct iss_video *output;
- unsigned int entities;
- atomic_t frame_number;
- bool do_propagation; /* of frame number */
- bool error;
- struct v4l2_fract max_timeperframe;
- struct v4l2_subdev *external;
- unsigned int external_rate;
- int external_bpp;
-};
-
-#define to_iss_pipeline(__e) \
- container_of((__e)->pipe, struct iss_pipeline, pipe)
-
-static inline int iss_pipeline_ready(struct iss_pipeline *pipe)
-{
- return pipe->state == (ISS_PIPELINE_STREAM_INPUT |
- ISS_PIPELINE_STREAM_OUTPUT |
- ISS_PIPELINE_QUEUE_INPUT |
- ISS_PIPELINE_QUEUE_OUTPUT |
- ISS_PIPELINE_IDLE_INPUT |
- ISS_PIPELINE_IDLE_OUTPUT);
-}
-
-/*
- * struct iss_buffer - ISS buffer
- * @buffer: ISS video buffer
- * @iss_addr: Physical address of the buffer.
- */
-struct iss_buffer {
- /* common v4l buffer stuff -- must be first */
- struct vb2_buffer vb;
- struct list_head list;
- dma_addr_t iss_addr;
-};
-
-#define to_iss_buffer(buf) container_of(buf, struct iss_buffer, buffer)
-
-enum iss_video_dmaqueue_flags {
- /* Set if DMA queue becomes empty when ISS_PIPELINE_STREAM_CONTINUOUS */
- ISS_VIDEO_DMAQUEUE_UNDERRUN = (1 << 0),
- /* Set when queuing buffer to an empty DMA queue */
- ISS_VIDEO_DMAQUEUE_QUEUED = (1 << 1),
-};
-
-#define iss_video_dmaqueue_flags_clr(video) \
- ({ (video)->dmaqueue_flags = 0; })
-
-/*
- * struct iss_video_operations - ISS video operations
- * @queue: Resume streaming when a buffer is queued. Called on VIDIOC_QBUF
- * if there was no buffer previously queued.
- */
-struct iss_video_operations {
- int (*queue)(struct iss_video *video, struct iss_buffer *buffer);
-};
-
-struct iss_video {
- struct video_device video;
- enum v4l2_buf_type type;
- struct media_pad pad;
-
- struct mutex mutex; /* format and crop settings */
- atomic_t active;
-
- struct iss_device *iss;
-
- unsigned int capture_mem;
- unsigned int bpl_alignment; /* alignment value */
- unsigned int bpl_zero_padding; /* whether the alignment is optional */
- unsigned int bpl_max; /* maximum bytes per line value */
- unsigned int bpl_value; /* bytes per line value */
- unsigned int bpl_padding; /* padding at end of line */
-
- /* Pipeline state */
- struct iss_pipeline pipe;
- struct mutex stream_lock; /* pipeline and stream states */
- bool error;
-
- /* Video buffers queue */
- struct vb2_queue *queue;
- spinlock_t qlock; /* protects dmaqueue and error */
- struct list_head dmaqueue;
- enum iss_video_dmaqueue_flags dmaqueue_flags;
- struct vb2_alloc_ctx *alloc_ctx;
-
- const struct iss_video_operations *ops;
-};
-
-#define to_iss_video(vdev) container_of(vdev, struct iss_video, video)
-
-struct iss_video_fh {
- struct v4l2_fh vfh;
- struct iss_video *video;
- struct vb2_queue queue;
- struct v4l2_format format;
- struct v4l2_fract timeperframe;
-};
-
-#define to_iss_video_fh(fh) container_of(fh, struct iss_video_fh, vfh)
-#define iss_video_queue_to_iss_video_fh(q) \
- container_of(q, struct iss_video_fh, queue)
-
-int omap4iss_video_init(struct iss_video *video, const char *name);
-void omap4iss_video_cleanup(struct iss_video *video);
-int omap4iss_video_register(struct iss_video *video,
- struct v4l2_device *vdev);
-void omap4iss_video_unregister(struct iss_video *video);
-struct iss_buffer *omap4iss_video_buffer_next(struct iss_video *video);
-void omap4iss_video_cancel_stream(struct iss_video *video);
-struct media_pad *omap4iss_video_remote_pad(struct iss_video *video);
-
-const struct iss_format_info *
-omap4iss_video_format_info(u32 code);
-
-#endif /* OMAP4_ISS_VIDEO_H */
diff --git a/drivers/staging/media/starfive/Kconfig b/drivers/staging/media/starfive/Kconfig
new file mode 100644
index 000000000000..34727cf56072
--- /dev/null
+++ b/drivers/staging/media/starfive/Kconfig
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+comment "StarFive media platform drivers"
+
+source "drivers/staging/media/starfive/camss/Kconfig"
diff --git a/drivers/staging/media/starfive/Makefile b/drivers/staging/media/starfive/Makefile
new file mode 100644
index 000000000000..4639fa1bca32
--- /dev/null
+++ b/drivers/staging/media/starfive/Makefile
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-y += camss/
diff --git a/drivers/staging/media/starfive/camss/Kconfig b/drivers/staging/media/starfive/camss/Kconfig
new file mode 100644
index 000000000000..9ea5708fe409
--- /dev/null
+++ b/drivers/staging/media/starfive/camss/Kconfig
@@ -0,0 +1,18 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config VIDEO_STARFIVE_CAMSS
+ tristate "Starfive Camera Subsystem driver"
+ depends on V4L_PLATFORM_DRIVERS
+ depends on VIDEO_DEV && OF
+ depends on HAS_DMA
+ depends on PM
+ depends on ARCH_STARFIVE || COMPILE_TEST
+ select MEDIA_CONTROLLER
+ select VIDEO_V4L2_SUBDEV_API
+ select VIDEOBUF2_DMA_CONTIG
+ select V4L2_FWNODE
+ help
+ Enable this to support for the Starfive Camera subsystem
+ found on Starfive JH7110 SoC.
+
+ To compile this driver as a module, choose M here: the
+ module will be called starfive-camss.
diff --git a/drivers/staging/media/starfive/camss/Makefile b/drivers/staging/media/starfive/camss/Makefile
new file mode 100644
index 000000000000..005790202e7b
--- /dev/null
+++ b/drivers/staging/media/starfive/camss/Makefile
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for StarFive Camera Subsystem driver
+#
+
+starfive-camss-objs += \
+ stf-camss.o \
+ stf-capture.o \
+ stf-isp.o \
+ stf-isp-hw-ops.o \
+ stf-video.o
+
+obj-$(CONFIG_VIDEO_STARFIVE_CAMSS) += starfive-camss.o
diff --git a/drivers/staging/media/starfive/camss/TODO.txt b/drivers/staging/media/starfive/camss/TODO.txt
new file mode 100644
index 000000000000..7d459f4f09d0
--- /dev/null
+++ b/drivers/staging/media/starfive/camss/TODO.txt
@@ -0,0 +1,4 @@
+Unstaging requirements:
+- Add userspace support which demonstrates the ability to receive statistics and
+ adapt hardware modules configuration accordingly;
+- Add documentation for description of the statistics data structures;
diff --git a/drivers/staging/media/starfive/camss/stf-camss.c b/drivers/staging/media/starfive/camss/stf-camss.c
new file mode 100644
index 000000000000..259aaad010d2
--- /dev/null
+++ b/drivers/staging/media/starfive/camss/stf-camss.c
@@ -0,0 +1,438 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * stf_camss.c
+ *
+ * Starfive Camera Subsystem driver
+ *
+ * Copyright (C) 2021-2023 StarFive Technology Co., Ltd.
+ *
+ * Author: Jack Zhu <jack.zhu@starfivetech.com>
+ * Author: Changhuang Liang <changhuang.liang@starfivetech.com>
+ *
+ */
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_graph.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/videodev2.h>
+#include <media/v4l2-fwnode.h>
+#include <media/v4l2-mc.h>
+
+#include "stf-camss.h"
+
+static const char * const stfcamss_clocks[] = {
+ "wrapper_clk_c",
+ "ispcore_2x",
+ "isp_axi",
+};
+
+static const char * const stfcamss_resets[] = {
+ "wrapper_p",
+ "wrapper_c",
+ "axiwr",
+ "isp_top_n",
+ "isp_top_axi",
+};
+
+static const struct stf_isr_data stf_isrs[] = {
+ {"wr_irq", stf_wr_irq_handler},
+ {"isp_irq", stf_isp_irq_handler},
+ {"line_irq", stf_line_irq_handler},
+};
+
+static int stfcamss_get_mem_res(struct stfcamss *stfcamss)
+{
+ struct platform_device *pdev = to_platform_device(stfcamss->dev);
+
+ stfcamss->syscon_base =
+ devm_platform_ioremap_resource_byname(pdev, "syscon");
+ if (IS_ERR(stfcamss->syscon_base))
+ return PTR_ERR(stfcamss->syscon_base);
+
+ stfcamss->isp_base = devm_platform_ioremap_resource_byname(pdev, "isp");
+ if (IS_ERR(stfcamss->isp_base))
+ return PTR_ERR(stfcamss->isp_base);
+
+ return 0;
+}
+
+/*
+ * stfcamss_of_parse_endpoint_node - Parse port endpoint node
+ * @dev: Device
+ * @node: Device node to be parsed
+ * @csd: Parsed data from port endpoint node
+ *
+ * Return 0 on success or a negative error code on failure
+ */
+static int stfcamss_of_parse_endpoint_node(struct stfcamss *stfcamss,
+ struct device_node *node,
+ struct stfcamss_async_subdev *csd)
+{
+ struct v4l2_fwnode_endpoint vep = { { 0 } };
+ int ret;
+
+ ret = v4l2_fwnode_endpoint_parse(of_fwnode_handle(node), &vep);
+ if (ret) {
+ dev_err(stfcamss->dev, "endpoint not defined at %pOF\n", node);
+ return ret;
+ }
+
+ csd->port = vep.base.port;
+
+ return 0;
+}
+
+/*
+ * stfcamss_of_parse_ports - Parse ports node
+ * @stfcamss: STFCAMSS device
+ *
+ * Return number of "port" nodes found in "ports" node
+ */
+static int stfcamss_of_parse_ports(struct stfcamss *stfcamss)
+{
+ struct device_node *node = NULL;
+ int ret, num_subdevs = 0;
+
+ for_each_endpoint_of_node(stfcamss->dev->of_node, node) {
+ struct stfcamss_async_subdev *csd;
+
+ if (!of_device_is_available(node))
+ continue;
+
+ csd = v4l2_async_nf_add_fwnode_remote(&stfcamss->notifier,
+ of_fwnode_handle(node),
+ struct stfcamss_async_subdev);
+ if (IS_ERR(csd)) {
+ ret = PTR_ERR(csd);
+ dev_err(stfcamss->dev, "failed to add async notifier\n");
+ goto err_cleanup;
+ }
+
+ ret = stfcamss_of_parse_endpoint_node(stfcamss, node, csd);
+ if (ret)
+ goto err_cleanup;
+
+ num_subdevs++;
+ }
+
+ return num_subdevs;
+
+err_cleanup:
+ of_node_put(node);
+ return ret;
+}
+
+static int stfcamss_register_devs(struct stfcamss *stfcamss)
+{
+ struct stf_capture *cap_yuv = &stfcamss->captures[STF_CAPTURE_YUV];
+ struct stf_isp_dev *isp_dev = &stfcamss->isp_dev;
+ int ret;
+
+ ret = stf_isp_register(isp_dev, &stfcamss->v4l2_dev);
+ if (ret < 0) {
+ dev_err(stfcamss->dev,
+ "failed to register stf isp%d entity: %d\n", 0, ret);
+ return ret;
+ }
+
+ ret = stf_capture_register(stfcamss, &stfcamss->v4l2_dev);
+ if (ret < 0) {
+ dev_err(stfcamss->dev,
+ "failed to register capture: %d\n", ret);
+ goto err_isp_unregister;
+ }
+
+ ret = media_create_pad_link(&isp_dev->subdev.entity, STF_ISP_PAD_SRC,
+ &cap_yuv->video.vdev.entity, 0, 0);
+ if (ret)
+ goto err_cap_unregister;
+
+ cap_yuv->video.source_subdev = &isp_dev->subdev;
+
+ return ret;
+
+err_cap_unregister:
+ stf_capture_unregister(stfcamss);
+err_isp_unregister:
+ stf_isp_unregister(&stfcamss->isp_dev);
+
+ return ret;
+}
+
+static void stfcamss_unregister_devs(struct stfcamss *stfcamss)
+{
+ struct stf_capture *cap_yuv = &stfcamss->captures[STF_CAPTURE_YUV];
+ struct stf_isp_dev *isp_dev = &stfcamss->isp_dev;
+
+ media_entity_remove_links(&isp_dev->subdev.entity);
+ media_entity_remove_links(&cap_yuv->video.vdev.entity);
+
+ stf_isp_unregister(&stfcamss->isp_dev);
+ stf_capture_unregister(stfcamss);
+}
+
+static int stfcamss_subdev_notifier_bound(struct v4l2_async_notifier *async,
+ struct v4l2_subdev *subdev,
+ struct v4l2_async_connection *asc)
+{
+ struct stfcamss *stfcamss =
+ container_of(async, struct stfcamss, notifier);
+ struct stfcamss_async_subdev *csd =
+ container_of(asc, struct stfcamss_async_subdev, asd);
+ enum stf_port_num port = csd->port;
+ struct stf_isp_dev *isp_dev = &stfcamss->isp_dev;
+ struct stf_capture *cap_raw = &stfcamss->captures[STF_CAPTURE_RAW];
+ struct media_pad *pad;
+ int ret;
+
+ if (port == STF_PORT_CSI2RX) {
+ pad = &isp_dev->pads[STF_ISP_PAD_SINK];
+ } else {
+ dev_err(stfcamss->dev, "not support port %d\n", port);
+ return -EPERM;
+ }
+
+ ret = v4l2_create_fwnode_links_to_pad(subdev, pad, 0);
+ if (ret)
+ return ret;
+
+ ret = media_create_pad_link(&subdev->entity, 1,
+ &cap_raw->video.vdev.entity, 0, 0);
+ if (ret)
+ return ret;
+
+ isp_dev->source_subdev = subdev;
+ cap_raw->video.source_subdev = subdev;
+
+ return 0;
+}
+
+static int stfcamss_subdev_notifier_complete(struct v4l2_async_notifier *ntf)
+{
+ struct stfcamss *stfcamss =
+ container_of(ntf, struct stfcamss, notifier);
+
+ return v4l2_device_register_subdev_nodes(&stfcamss->v4l2_dev);
+}
+
+static const struct v4l2_async_notifier_operations
+stfcamss_subdev_notifier_ops = {
+ .bound = stfcamss_subdev_notifier_bound,
+ .complete = stfcamss_subdev_notifier_complete,
+};
+
+static void stfcamss_mc_init(struct platform_device *pdev,
+ struct stfcamss *stfcamss)
+{
+ stfcamss->media_dev.dev = stfcamss->dev;
+ strscpy(stfcamss->media_dev.model, "Starfive Camera Subsystem",
+ sizeof(stfcamss->media_dev.model));
+ media_device_init(&stfcamss->media_dev);
+
+ stfcamss->v4l2_dev.mdev = &stfcamss->media_dev;
+}
+
+/*
+ * stfcamss_probe - Probe STFCAMSS platform device
+ * @pdev: Pointer to STFCAMSS platform device
+ *
+ * Return 0 on success or a negative error code on failure
+ */
+static int stfcamss_probe(struct platform_device *pdev)
+{
+ struct stfcamss *stfcamss;
+ struct device *dev = &pdev->dev;
+ int ret, num_subdevs;
+ unsigned int i;
+
+ stfcamss = devm_kzalloc(dev, sizeof(*stfcamss), GFP_KERNEL);
+ if (!stfcamss)
+ return -ENOMEM;
+
+ stfcamss->dev = dev;
+
+ for (i = 0; i < ARRAY_SIZE(stf_isrs); ++i) {
+ int irq;
+
+ irq = platform_get_irq(pdev, i);
+ if (irq < 0)
+ return irq;
+
+ ret = devm_request_irq(stfcamss->dev, irq, stf_isrs[i].isr, 0,
+ stf_isrs[i].name, stfcamss);
+ if (ret) {
+ dev_err(dev, "request irq failed: %d\n", ret);
+ return ret;
+ }
+ }
+
+ stfcamss->nclks = ARRAY_SIZE(stfcamss->sys_clk);
+ for (i = 0; i < stfcamss->nclks; ++i)
+ stfcamss->sys_clk[i].id = stfcamss_clocks[i];
+ ret = devm_clk_bulk_get(dev, stfcamss->nclks, stfcamss->sys_clk);
+ if (ret) {
+ dev_err(dev, "Failed to get clk controls\n");
+ return ret;
+ }
+
+ stfcamss->nrsts = ARRAY_SIZE(stfcamss->sys_rst);
+ for (i = 0; i < stfcamss->nrsts; ++i)
+ stfcamss->sys_rst[i].id = stfcamss_resets[i];
+ ret = devm_reset_control_bulk_get_shared(dev, stfcamss->nrsts,
+ stfcamss->sys_rst);
+ if (ret) {
+ dev_err(dev, "Failed to get reset controls\n");
+ return ret;
+ }
+
+ ret = stfcamss_get_mem_res(stfcamss);
+ if (ret) {
+ dev_err(dev, "Could not map registers\n");
+ return ret;
+ }
+
+ platform_set_drvdata(pdev, stfcamss);
+
+ v4l2_async_nf_init(&stfcamss->notifier, &stfcamss->v4l2_dev);
+
+ num_subdevs = stfcamss_of_parse_ports(stfcamss);
+ if (num_subdevs < 0) {
+ ret = -ENODEV;
+ dev_err(dev, "Failed to get sub devices: %d\n", ret);
+ goto err_cleanup_notifier;
+ }
+
+ ret = stf_isp_init(stfcamss);
+ if (ret < 0) {
+ dev_err(dev, "Failed to init isp: %d\n", ret);
+ goto err_cleanup_notifier;
+ }
+
+ stfcamss_mc_init(pdev, stfcamss);
+
+ ret = v4l2_device_register(stfcamss->dev, &stfcamss->v4l2_dev);
+ if (ret < 0) {
+ dev_err(dev, "Failed to register V4L2 device: %d\n", ret);
+ goto err_cleanup_media_device;
+ }
+
+ ret = media_device_register(&stfcamss->media_dev);
+ if (ret) {
+ dev_err(dev, "Failed to register media device: %d\n", ret);
+ goto err_unregister_device;
+ }
+
+ ret = stfcamss_register_devs(stfcamss);
+ if (ret < 0) {
+ dev_err(dev, "Failed to register subdevice: %d\n", ret);
+ goto err_unregister_media_dev;
+ }
+
+ pm_runtime_enable(dev);
+
+ stfcamss->notifier.ops = &stfcamss_subdev_notifier_ops;
+ ret = v4l2_async_nf_register(&stfcamss->notifier);
+ if (ret) {
+ dev_err(dev, "Failed to register async subdev nodes: %d\n",
+ ret);
+ pm_runtime_disable(dev);
+ goto err_unregister_subdevs;
+ }
+
+ return 0;
+
+err_unregister_subdevs:
+ stfcamss_unregister_devs(stfcamss);
+err_unregister_media_dev:
+ media_device_unregister(&stfcamss->media_dev);
+err_unregister_device:
+ v4l2_device_unregister(&stfcamss->v4l2_dev);
+err_cleanup_media_device:
+ media_device_cleanup(&stfcamss->media_dev);
+err_cleanup_notifier:
+ v4l2_async_nf_cleanup(&stfcamss->notifier);
+ return ret;
+}
+
+/*
+ * stfcamss_remove - Remove STFCAMSS platform device
+ * @pdev: Pointer to STFCAMSS platform device
+ */
+static void stfcamss_remove(struct platform_device *pdev)
+{
+ struct stfcamss *stfcamss = platform_get_drvdata(pdev);
+
+ stfcamss_unregister_devs(stfcamss);
+ v4l2_device_unregister(&stfcamss->v4l2_dev);
+ media_device_cleanup(&stfcamss->media_dev);
+ v4l2_async_nf_cleanup(&stfcamss->notifier);
+ pm_runtime_disable(&pdev->dev);
+}
+
+static const struct of_device_id stfcamss_of_match[] = {
+ { .compatible = "starfive,jh7110-camss" },
+ { /* sentinel */ },
+};
+
+MODULE_DEVICE_TABLE(of, stfcamss_of_match);
+
+static int __maybe_unused stfcamss_runtime_suspend(struct device *dev)
+{
+ struct stfcamss *stfcamss = dev_get_drvdata(dev);
+ int ret;
+
+ ret = reset_control_bulk_assert(stfcamss->nrsts, stfcamss->sys_rst);
+ if (ret) {
+ dev_err(dev, "reset assert failed\n");
+ return ret;
+ }
+
+ clk_bulk_disable_unprepare(stfcamss->nclks, stfcamss->sys_clk);
+
+ return 0;
+}
+
+static int __maybe_unused stfcamss_runtime_resume(struct device *dev)
+{
+ struct stfcamss *stfcamss = dev_get_drvdata(dev);
+ int ret;
+
+ ret = clk_bulk_prepare_enable(stfcamss->nclks, stfcamss->sys_clk);
+ if (ret) {
+ dev_err(dev, "clock prepare enable failed\n");
+ return ret;
+ }
+
+ ret = reset_control_bulk_deassert(stfcamss->nrsts, stfcamss->sys_rst);
+ if (ret < 0) {
+ dev_err(dev, "cannot deassert resets\n");
+ clk_bulk_disable_unprepare(stfcamss->nclks, stfcamss->sys_clk);
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct dev_pm_ops stfcamss_pm_ops = {
+ SET_RUNTIME_PM_OPS(stfcamss_runtime_suspend,
+ stfcamss_runtime_resume,
+ NULL)
+};
+
+static struct platform_driver stfcamss_driver = {
+ .probe = stfcamss_probe,
+ .remove = stfcamss_remove,
+ .driver = {
+ .name = "starfive-camss",
+ .pm = &stfcamss_pm_ops,
+ .of_match_table = stfcamss_of_match,
+ },
+};
+
+module_platform_driver(stfcamss_driver);
+
+MODULE_AUTHOR("Jack Zhu <jack.zhu@starfivetech.com>");
+MODULE_AUTHOR("Changhuang Liang <changhuang.liang@starfivetech.com>");
+MODULE_DESCRIPTION("StarFive Camera Subsystem driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/media/starfive/camss/stf-camss.h b/drivers/staging/media/starfive/camss/stf-camss.h
new file mode 100644
index 000000000000..e2b0cfb437bd
--- /dev/null
+++ b/drivers/staging/media/starfive/camss/stf-camss.h
@@ -0,0 +1,134 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * stf_camss.h
+ *
+ * Starfive Camera Subsystem driver
+ *
+ * Copyright (C) 2021-2023 StarFive Technology Co., Ltd.
+ */
+
+#ifndef STF_CAMSS_H
+#define STF_CAMSS_H
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/reset.h>
+#include <media/media-device.h>
+#include <media/media-entity.h>
+#include <media/v4l2-async.h>
+#include <media/v4l2-device.h>
+
+#include "stf-isp.h"
+#include "stf-capture.h"
+
+enum stf_port_num {
+ STF_PORT_DVP = 0,
+ STF_PORT_CSI2RX
+};
+
+enum stf_clk {
+ STF_CLK_WRAPPER_CLK_C = 0,
+ STF_CLK_ISPCORE_2X,
+ STF_CLK_ISP_AXI,
+ STF_CLK_NUM
+};
+
+enum stf_rst {
+ STF_RST_WRAPPER_P = 0,
+ STF_RST_WRAPPER_C,
+ STF_RST_AXIWR,
+ STF_RST_ISP_TOP_N,
+ STF_RST_ISP_TOP_AXI,
+ STF_RST_NUM
+};
+
+struct stf_isr_data {
+ const char *name;
+ irqreturn_t (*isr)(int irq, void *priv);
+};
+
+struct stfcamss {
+ struct v4l2_device v4l2_dev;
+ struct media_device media_dev;
+ struct media_pipeline pipe;
+ struct device *dev;
+ struct stf_isp_dev isp_dev;
+ struct stf_capture captures[STF_CAPTURE_NUM];
+ struct v4l2_async_notifier notifier;
+ void __iomem *syscon_base;
+ void __iomem *isp_base;
+ struct clk_bulk_data sys_clk[STF_CLK_NUM];
+ int nclks;
+ struct reset_control_bulk_data sys_rst[STF_RST_NUM];
+ int nrsts;
+};
+
+struct stfcamss_async_subdev {
+ struct v4l2_async_connection asd; /* must be first */
+ enum stf_port_num port;
+};
+
+static inline u32 stf_isp_reg_read(struct stfcamss *stfcamss, u32 reg)
+{
+ return ioread32(stfcamss->isp_base + reg);
+}
+
+static inline void stf_isp_reg_write(struct stfcamss *stfcamss,
+ u32 reg, u32 val)
+{
+ iowrite32(val, stfcamss->isp_base + reg);
+}
+
+static inline void stf_isp_reg_write_delay(struct stfcamss *stfcamss,
+ u32 reg, u32 val, u32 delay)
+{
+ iowrite32(val, stfcamss->isp_base + reg);
+ usleep_range(1000 * delay, 1000 * delay + 100);
+}
+
+static inline void stf_isp_reg_set_bit(struct stfcamss *stfcamss,
+ u32 reg, u32 mask, u32 val)
+{
+ u32 value;
+
+ value = ioread32(stfcamss->isp_base + reg) & ~mask;
+ val &= mask;
+ val |= value;
+ iowrite32(val, stfcamss->isp_base + reg);
+}
+
+static inline void stf_isp_reg_set(struct stfcamss *stfcamss, u32 reg, u32 mask)
+{
+ iowrite32(ioread32(stfcamss->isp_base + reg) | mask,
+ stfcamss->isp_base + reg);
+}
+
+static inline u32 stf_syscon_reg_read(struct stfcamss *stfcamss, u32 reg)
+{
+ return ioread32(stfcamss->syscon_base + reg);
+}
+
+static inline void stf_syscon_reg_write(struct stfcamss *stfcamss,
+ u32 reg, u32 val)
+{
+ iowrite32(val, stfcamss->syscon_base + reg);
+}
+
+static inline void stf_syscon_reg_set_bit(struct stfcamss *stfcamss,
+ u32 reg, u32 bit_mask)
+{
+ u32 value;
+
+ value = ioread32(stfcamss->syscon_base + reg);
+ iowrite32(value | bit_mask, stfcamss->syscon_base + reg);
+}
+
+static inline void stf_syscon_reg_clear_bit(struct stfcamss *stfcamss,
+ u32 reg, u32 bit_mask)
+{
+ u32 value;
+
+ value = ioread32(stfcamss->syscon_base + reg);
+ iowrite32(value & ~bit_mask, stfcamss->syscon_base + reg);
+}
+#endif /* STF_CAMSS_H */
diff --git a/drivers/staging/media/starfive/camss/stf-capture.c b/drivers/staging/media/starfive/camss/stf-capture.c
new file mode 100644
index 000000000000..e15d2e97eb0b
--- /dev/null
+++ b/drivers/staging/media/starfive/camss/stf-capture.c
@@ -0,0 +1,605 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * stf_capture.c
+ *
+ * StarFive Camera Subsystem - capture device
+ *
+ * Copyright (C) 2021-2023 StarFive Technology Co., Ltd.
+ */
+
+#include "stf-camss.h"
+
+static const char * const stf_cap_names[] = {
+ "capture_raw",
+ "capture_yuv",
+};
+
+static const struct stfcamss_format_info stf_wr_fmts[] = {
+ {
+ .code = MEDIA_BUS_FMT_SRGGB10_1X10,
+ .pixelformat = V4L2_PIX_FMT_SRGGB10,
+ .planes = 1,
+ .vsub = { 1 },
+ .bpp = 16,
+ },
+ {
+ .code = MEDIA_BUS_FMT_SGRBG10_1X10,
+ .pixelformat = V4L2_PIX_FMT_SGRBG10,
+ .planes = 1,
+ .vsub = { 1 },
+ .bpp = 16,
+ },
+ {
+ .code = MEDIA_BUS_FMT_SGBRG10_1X10,
+ .pixelformat = V4L2_PIX_FMT_SGBRG10,
+ .planes = 1,
+ .vsub = { 1 },
+ .bpp = 16,
+ },
+ {
+ .code = MEDIA_BUS_FMT_SBGGR10_1X10,
+ .pixelformat = V4L2_PIX_FMT_SBGGR10,
+ .planes = 1,
+ .vsub = { 1 },
+ .bpp = 16,
+ },
+};
+
+static const struct stfcamss_format_info stf_isp_fmts[] = {
+ {
+ .code = MEDIA_BUS_FMT_YUYV8_1_5X8,
+ .pixelformat = V4L2_PIX_FMT_NV12,
+ .planes = 2,
+ .vsub = { 1, 2 },
+ .bpp = 8,
+ },
+};
+
+static inline struct stf_capture *to_stf_capture(struct stfcamss_video *video)
+{
+ return container_of(video, struct stf_capture, video);
+}
+
+static void stf_set_raw_addr(struct stfcamss *stfcamss, dma_addr_t addr)
+{
+ stf_syscon_reg_write(stfcamss, VIN_START_ADDR_O, (long)addr);
+ stf_syscon_reg_write(stfcamss, VIN_START_ADDR_N, (long)addr);
+}
+
+static void stf_set_yuv_addr(struct stfcamss *stfcamss,
+ dma_addr_t y_addr, dma_addr_t uv_addr)
+{
+ stf_isp_reg_write(stfcamss, ISP_REG_Y_PLANE_START_ADDR, y_addr);
+ stf_isp_reg_write(stfcamss, ISP_REG_UV_PLANE_START_ADDR, uv_addr);
+}
+
+static void stf_init_addrs(struct stfcamss_video *video)
+{
+ struct stf_capture *cap = to_stf_capture(video);
+ struct stf_v_buf *output = &cap->buffers;
+ dma_addr_t addr0, addr1;
+
+ output->active_buf = 0;
+
+ if (!output->buf[0])
+ return;
+
+ addr0 = output->buf[0]->addr[0];
+ addr1 = output->buf[0]->addr[1];
+
+ if (cap->type == STF_CAPTURE_RAW)
+ stf_set_raw_addr(video->stfcamss, addr0);
+ else if (cap->type == STF_CAPTURE_YUV)
+ stf_set_yuv_addr(video->stfcamss, addr0, addr1);
+}
+
+static struct stfcamss_buffer *stf_buf_get_pending(struct stf_v_buf *output)
+{
+ struct stfcamss_buffer *buffer = NULL;
+
+ if (!list_empty(&output->pending_bufs)) {
+ buffer = list_first_entry(&output->pending_bufs,
+ struct stfcamss_buffer,
+ queue);
+ list_del(&buffer->queue);
+ }
+
+ return buffer;
+}
+
+static void stf_cap_s_cfg(struct stfcamss_video *video)
+{
+ struct stf_capture *cap = to_stf_capture(video);
+ struct stf_v_buf *output = &cap->buffers;
+ unsigned long flags;
+
+ spin_lock_irqsave(&output->lock, flags);
+
+ output->state = STF_OUTPUT_IDLE;
+ output->buf[0] = stf_buf_get_pending(output);
+
+ if (!output->buf[0] && output->buf[1]) {
+ output->buf[0] = output->buf[1];
+ output->buf[1] = NULL;
+ }
+
+ if (output->buf[0])
+ output->state = STF_OUTPUT_SINGLE;
+
+ output->sequence = 0;
+ stf_init_addrs(video);
+
+ spin_unlock_irqrestore(&output->lock, flags);
+}
+
+static int stf_cap_s_cleanup(struct stfcamss_video *video)
+{
+ struct stf_capture *cap = to_stf_capture(video);
+ struct stf_v_buf *output = &cap->buffers;
+ unsigned long flags;
+
+ spin_lock_irqsave(&output->lock, flags);
+
+ output->state = STF_OUTPUT_OFF;
+
+ spin_unlock_irqrestore(&output->lock, flags);
+
+ return 0;
+}
+
+static void stf_wr_data_en(struct stfcamss_video *video)
+{
+ struct stf_capture *cap = to_stf_capture(video);
+ struct stfcamss *stfcamss = cap->video.stfcamss;
+
+ stf_syscon_reg_set_bit(stfcamss, VIN_CHANNEL_SEL_EN, U0_VIN_AXIWR0_EN);
+}
+
+static void stf_wr_irq_enable(struct stfcamss_video *video)
+{
+ struct stf_capture *cap = to_stf_capture(video);
+ struct stfcamss *stfcamss = cap->video.stfcamss;
+
+ stf_syscon_reg_clear_bit(stfcamss, VIN_INRT_PIX_CFG, U0_VIN_INTR_M);
+}
+
+static void stf_wr_irq_disable(struct stfcamss_video *video)
+{
+ struct stf_capture *cap = to_stf_capture(video);
+ struct stfcamss *stfcamss = cap->video.stfcamss;
+
+ stf_syscon_reg_set_bit(stfcamss, VIN_INRT_PIX_CFG, U0_VIN_INTR_CLEAN);
+ stf_syscon_reg_clear_bit(stfcamss, VIN_INRT_PIX_CFG, U0_VIN_INTR_CLEAN);
+ stf_syscon_reg_set_bit(stfcamss, VIN_INRT_PIX_CFG, U0_VIN_INTR_M);
+}
+
+static void stf_channel_set(struct stfcamss_video *video)
+{
+ struct stf_capture *cap = to_stf_capture(video);
+ struct stfcamss *stfcamss = cap->video.stfcamss;
+ u32 val;
+
+ if (cap->type == STF_CAPTURE_RAW) {
+ const struct v4l2_pix_format *pix = &video->active_fmt.fmt.pix;
+
+ val = stf_syscon_reg_read(stfcamss, VIN_CHANNEL_SEL_EN);
+ val &= ~U0_VIN_CHANNEL_SEL_MASK;
+ val |= CHANNEL(0);
+ stf_syscon_reg_write(stfcamss, VIN_CHANNEL_SEL_EN, val);
+
+ val = stf_syscon_reg_read(stfcamss, VIN_INRT_PIX_CFG);
+ val &= ~U0_VIN_PIX_CT_MASK;
+ val |= PIX_CT(1);
+
+ val &= ~U0_VIN_PIXEL_HEIGH_BIT_SEL_MAKS;
+ val |= PIXEL_HEIGH_BIT_SEL(0);
+
+ val &= ~U0_VIN_PIX_CNT_END_MASK;
+ val |= PIX_CNT_END(pix->width / 4 - 1);
+
+ stf_syscon_reg_write(stfcamss, VIN_INRT_PIX_CFG, val);
+ } else if (cap->type == STF_CAPTURE_YUV) {
+ val = stf_syscon_reg_read(stfcamss, VIN_CFG_REG);
+ val &= ~U0_VIN_MIPI_BYTE_EN_ISP0_MASK;
+ val |= U0_VIN_MIPI_BYTE_EN_ISP0(0);
+
+ val &= ~U0_VIN_MIPI_CHANNEL_SEL0_MASK;
+ val |= U0_VIN_MIPI_CHANNEL_SEL0(0);
+
+ val &= ~U0_VIN_PIX_NUM_MASK;
+ val |= U0_VIN_PIX_NUM(0);
+
+ val &= ~U0_VIN_P_I_MIPI_HAEDER_EN0_MASK;
+ val |= U0_VIN_P_I_MIPI_HAEDER_EN0(1);
+
+ stf_syscon_reg_write(stfcamss, VIN_CFG_REG, val);
+ }
+}
+
+static void stf_capture_start(struct stfcamss_video *video)
+{
+ struct stf_capture *cap = to_stf_capture(video);
+
+ stf_channel_set(video);
+ if (cap->type == STF_CAPTURE_RAW) {
+ stf_wr_irq_enable(video);
+ stf_wr_data_en(video);
+ }
+
+ stf_cap_s_cfg(video);
+}
+
+static void stf_capture_stop(struct stfcamss_video *video)
+{
+ struct stf_capture *cap = to_stf_capture(video);
+
+ if (cap->type == STF_CAPTURE_RAW)
+ stf_wr_irq_disable(video);
+
+ stf_cap_s_cleanup(video);
+}
+
+static void stf_capture_init(struct stfcamss *stfcamss, struct stf_capture *cap)
+{
+ cap->buffers.state = STF_OUTPUT_OFF;
+ cap->buffers.buf[0] = NULL;
+ cap->buffers.buf[1] = NULL;
+ cap->buffers.active_buf = 0;
+ atomic_set(&cap->buffers.frame_skip, 4);
+ INIT_LIST_HEAD(&cap->buffers.pending_bufs);
+ INIT_LIST_HEAD(&cap->buffers.ready_bufs);
+ spin_lock_init(&cap->buffers.lock);
+
+ cap->video.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ cap->video.stfcamss = stfcamss;
+ cap->video.bpl_alignment = 16 * 8;
+
+ if (cap->type == STF_CAPTURE_RAW) {
+ cap->video.formats = stf_wr_fmts;
+ cap->video.nformats = ARRAY_SIZE(stf_wr_fmts);
+ cap->video.bpl_alignment = 8;
+ } else if (cap->type == STF_CAPTURE_YUV) {
+ cap->video.formats = stf_isp_fmts;
+ cap->video.nformats = ARRAY_SIZE(stf_isp_fmts);
+ cap->video.bpl_alignment = 1;
+ }
+}
+
+static void stf_buf_add_ready(struct stf_v_buf *output,
+ struct stfcamss_buffer *buffer)
+{
+ INIT_LIST_HEAD(&buffer->queue);
+ list_add_tail(&buffer->queue, &output->ready_bufs);
+}
+
+static struct stfcamss_buffer *stf_buf_get_ready(struct stf_v_buf *output)
+{
+ struct stfcamss_buffer *buffer = NULL;
+
+ if (!list_empty(&output->ready_bufs)) {
+ buffer = list_first_entry(&output->ready_bufs,
+ struct stfcamss_buffer,
+ queue);
+ list_del(&buffer->queue);
+ }
+
+ return buffer;
+}
+
+static void stf_buf_add_pending(struct stf_v_buf *output,
+ struct stfcamss_buffer *buffer)
+{
+ INIT_LIST_HEAD(&buffer->queue);
+ list_add_tail(&buffer->queue, &output->pending_bufs);
+}
+
+static void stf_buf_update_on_last(struct stf_v_buf *output)
+{
+ switch (output->state) {
+ case STF_OUTPUT_CONTINUOUS:
+ output->state = STF_OUTPUT_SINGLE;
+ output->active_buf = !output->active_buf;
+ break;
+ case STF_OUTPUT_SINGLE:
+ output->state = STF_OUTPUT_STOPPING;
+ break;
+ default:
+ break;
+ }
+}
+
+static void stf_buf_update_on_next(struct stf_v_buf *output)
+{
+ switch (output->state) {
+ case STF_OUTPUT_CONTINUOUS:
+ output->active_buf = !output->active_buf;
+ break;
+ case STF_OUTPUT_SINGLE:
+ default:
+ break;
+ }
+}
+
+static void stf_buf_update_on_new(struct stfcamss_video *video,
+ struct stfcamss_buffer *new_buf)
+{
+ struct stf_capture *cap = to_stf_capture(video);
+ struct stf_v_buf *output = &cap->buffers;
+
+ switch (output->state) {
+ case STF_OUTPUT_SINGLE:
+ stf_buf_add_pending(output, new_buf);
+ break;
+ case STF_OUTPUT_IDLE:
+ if (!output->buf[0]) {
+ output->buf[0] = new_buf;
+ stf_init_addrs(video);
+ output->state = STF_OUTPUT_SINGLE;
+ } else {
+ stf_buf_add_pending(output, new_buf);
+ }
+ break;
+ case STF_OUTPUT_STOPPING:
+ if (output->last_buffer) {
+ output->buf[output->active_buf] = output->last_buffer;
+ output->last_buffer = NULL;
+ }
+
+ output->state = STF_OUTPUT_SINGLE;
+ stf_buf_add_pending(output, new_buf);
+ break;
+ case STF_OUTPUT_CONTINUOUS:
+ default:
+ stf_buf_add_pending(output, new_buf);
+ break;
+ }
+}
+
+static void stf_buf_flush(struct stf_v_buf *output, enum vb2_buffer_state state)
+{
+ struct stfcamss_buffer *buf;
+ struct stfcamss_buffer *t;
+
+ list_for_each_entry_safe(buf, t, &output->pending_bufs, queue) {
+ vb2_buffer_done(&buf->vb.vb2_buf, state);
+ list_del(&buf->queue);
+ }
+ list_for_each_entry_safe(buf, t, &output->ready_bufs, queue) {
+ vb2_buffer_done(&buf->vb.vb2_buf, state);
+ list_del(&buf->queue);
+ }
+}
+
+static void stf_buf_done(struct stf_v_buf *output)
+{
+ struct stfcamss_buffer *ready_buf;
+ u64 ts = ktime_get_ns();
+ unsigned long flags;
+
+ if (output->state == STF_OUTPUT_OFF ||
+ output->state == STF_OUTPUT_RESERVED)
+ return;
+
+ spin_lock_irqsave(&output->lock, flags);
+
+ while ((ready_buf = stf_buf_get_ready(output))) {
+ ready_buf->vb.vb2_buf.timestamp = ts;
+ ready_buf->vb.sequence = output->sequence++;
+
+ vb2_buffer_done(&ready_buf->vb.vb2_buf, VB2_BUF_STATE_DONE);
+ }
+
+ spin_unlock_irqrestore(&output->lock, flags);
+}
+
+static void stf_change_buffer(struct stf_v_buf *output)
+{
+ struct stf_capture *cap = container_of(output, struct stf_capture,
+ buffers);
+ struct stfcamss *stfcamss = cap->video.stfcamss;
+ struct stfcamss_buffer *ready_buf;
+ dma_addr_t *new_addr;
+ unsigned long flags;
+ u32 active_index;
+
+ if (output->state == STF_OUTPUT_OFF ||
+ output->state == STF_OUTPUT_STOPPING ||
+ output->state == STF_OUTPUT_RESERVED ||
+ output->state == STF_OUTPUT_IDLE)
+ return;
+
+ spin_lock_irqsave(&output->lock, flags);
+
+ active_index = output->active_buf;
+
+ ready_buf = output->buf[active_index];
+ if (!ready_buf) {
+ dev_dbg(stfcamss->dev, "missing ready buf %d %d.\n",
+ active_index, output->state);
+ active_index = !active_index;
+ ready_buf = output->buf[active_index];
+ if (!ready_buf) {
+ dev_dbg(stfcamss->dev,
+ "missing ready buf2 %d %d.\n",
+ active_index, output->state);
+ goto out_unlock;
+ }
+ }
+
+ /* Get next buffer */
+ output->buf[active_index] = stf_buf_get_pending(output);
+ if (!output->buf[active_index]) {
+ new_addr = ready_buf->addr;
+ stf_buf_update_on_last(output);
+ } else {
+ new_addr = output->buf[active_index]->addr;
+ stf_buf_update_on_next(output);
+ }
+
+ if (output->state == STF_OUTPUT_STOPPING) {
+ output->last_buffer = ready_buf;
+ } else {
+ if (cap->type == STF_CAPTURE_RAW)
+ stf_set_raw_addr(stfcamss, new_addr[0]);
+ else if (cap->type == STF_CAPTURE_YUV)
+ stf_set_yuv_addr(stfcamss, new_addr[0], new_addr[1]);
+
+ stf_buf_add_ready(output, ready_buf);
+ }
+
+out_unlock:
+ spin_unlock_irqrestore(&output->lock, flags);
+}
+
+irqreturn_t stf_wr_irq_handler(int irq, void *priv)
+{
+ struct stfcamss *stfcamss = priv;
+ struct stf_capture *cap = &stfcamss->captures[STF_CAPTURE_RAW];
+
+ if (atomic_dec_if_positive(&cap->buffers.frame_skip) < 0) {
+ stf_change_buffer(&cap->buffers);
+ stf_buf_done(&cap->buffers);
+ }
+
+ stf_syscon_reg_set_bit(stfcamss, VIN_INRT_PIX_CFG, U0_VIN_INTR_CLEAN);
+ stf_syscon_reg_clear_bit(stfcamss, VIN_INRT_PIX_CFG, U0_VIN_INTR_CLEAN);
+
+ return IRQ_HANDLED;
+}
+
+irqreturn_t stf_isp_irq_handler(int irq, void *priv)
+{
+ struct stfcamss *stfcamss = priv;
+ struct stf_capture *cap = &stfcamss->captures[STF_CAPTURE_YUV];
+ u32 status;
+
+ status = stf_isp_reg_read(stfcamss, ISP_REG_ISP_CTRL_0);
+ if (status & ISPC_ISP) {
+ if (status & ISPC_ENUO)
+ stf_buf_done(&cap->buffers);
+
+ stf_isp_reg_write(stfcamss, ISP_REG_ISP_CTRL_0,
+ (status & ~ISPC_INT_ALL_MASK) |
+ ISPC_ISP | ISPC_CSI | ISPC_SC);
+ }
+
+ return IRQ_HANDLED;
+}
+
+irqreturn_t stf_line_irq_handler(int irq, void *priv)
+{
+ struct stfcamss *stfcamss = priv;
+ struct stf_capture *cap = &stfcamss->captures[STF_CAPTURE_YUV];
+ u32 status;
+
+ status = stf_isp_reg_read(stfcamss, ISP_REG_ISP_CTRL_0);
+ if (status & ISPC_LINE) {
+ if (atomic_dec_if_positive(&cap->buffers.frame_skip) < 0) {
+ if ((status & ISPC_ENUO))
+ stf_change_buffer(&cap->buffers);
+ }
+
+ stf_isp_reg_set_bit(stfcamss, ISP_REG_CSIINTS,
+ CSI_INTS_MASK, CSI_INTS(0x3));
+ stf_isp_reg_set_bit(stfcamss, ISP_REG_IESHD,
+ SHAD_UP_M | SHAD_UP_EN, 0x3);
+
+ stf_isp_reg_write(stfcamss, ISP_REG_ISP_CTRL_0,
+ (status & ~ISPC_INT_ALL_MASK) | ISPC_LINE);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int stf_queue_buffer(struct stfcamss_video *video,
+ struct stfcamss_buffer *buf)
+{
+ struct stf_capture *cap = to_stf_capture(video);
+ struct stf_v_buf *v_bufs = &cap->buffers;
+ unsigned long flags;
+
+ spin_lock_irqsave(&v_bufs->lock, flags);
+ stf_buf_update_on_new(video, buf);
+ spin_unlock_irqrestore(&v_bufs->lock, flags);
+
+ return 0;
+}
+
+static int stf_flush_buffers(struct stfcamss_video *video,
+ enum vb2_buffer_state state)
+{
+ struct stf_capture *cap = to_stf_capture(video);
+ struct stf_v_buf *v_bufs = &cap->buffers;
+ unsigned long flags;
+ unsigned int i;
+
+ spin_lock_irqsave(&v_bufs->lock, flags);
+
+ stf_buf_flush(v_bufs, state);
+
+ for (i = 0; i < ARRAY_SIZE(v_bufs->buf); i++) {
+ if (v_bufs->buf[i])
+ vb2_buffer_done(&v_bufs->buf[i]->vb.vb2_buf, state);
+
+ v_bufs->buf[i] = NULL;
+ }
+
+ if (v_bufs->last_buffer) {
+ vb2_buffer_done(&v_bufs->last_buffer->vb.vb2_buf, state);
+ v_bufs->last_buffer = NULL;
+ }
+
+ spin_unlock_irqrestore(&v_bufs->lock, flags);
+ return 0;
+}
+
+static const struct stfcamss_video_ops stf_capture_ops = {
+ .queue_buffer = stf_queue_buffer,
+ .flush_buffers = stf_flush_buffers,
+ .start_streaming = stf_capture_start,
+ .stop_streaming = stf_capture_stop,
+};
+
+static void stf_capture_unregister_one(struct stf_capture *cap)
+{
+ if (!video_is_registered(&cap->video.vdev))
+ return;
+
+ media_entity_cleanup(&cap->video.vdev.entity);
+ vb2_video_unregister_device(&cap->video.vdev);
+}
+
+void stf_capture_unregister(struct stfcamss *stfcamss)
+{
+ struct stf_capture *cap_raw = &stfcamss->captures[STF_CAPTURE_RAW];
+ struct stf_capture *cap_yuv = &stfcamss->captures[STF_CAPTURE_YUV];
+
+ stf_capture_unregister_one(cap_raw);
+ stf_capture_unregister_one(cap_yuv);
+}
+
+int stf_capture_register(struct stfcamss *stfcamss,
+ struct v4l2_device *v4l2_dev)
+{
+ unsigned int i;
+ int ret;
+
+ for (i = 0; i < ARRAY_SIZE(stfcamss->captures); i++) {
+ struct stf_capture *capture = &stfcamss->captures[i];
+
+ capture->type = i;
+ capture->video.ops = &stf_capture_ops;
+ stf_capture_init(stfcamss, capture);
+
+ ret = stf_video_register(&capture->video, v4l2_dev,
+ stf_cap_names[i]);
+ if (ret < 0) {
+ dev_err(stfcamss->dev,
+ "Failed to register video node: %d\n", ret);
+ stf_capture_unregister(stfcamss);
+ return ret;
+ }
+ }
+
+ return 0;
+}
diff --git a/drivers/staging/media/starfive/camss/stf-capture.h b/drivers/staging/media/starfive/camss/stf-capture.h
new file mode 100644
index 000000000000..2f9740b7e500
--- /dev/null
+++ b/drivers/staging/media/starfive/camss/stf-capture.h
@@ -0,0 +1,86 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * stf_capture.h
+ *
+ * Starfive Camera Subsystem driver
+ *
+ * Copyright (C) 2021-2023 StarFive Technology Co., Ltd.
+ */
+
+#ifndef STF_CAPTURE_H
+#define STF_CAPTURE_H
+
+#include "stf-video.h"
+
+#define VIN_CHANNEL_SEL_EN 0x14
+#define VIN_START_ADDR_N 0x18
+#define VIN_INRT_PIX_CFG 0x1c
+#define VIN_START_ADDR_O 0x20
+#define VIN_CFG_REG 0x24
+
+#define U0_VIN_CNFG_AXI_DVP_EN BIT(2)
+
+#define U0_VIN_CHANNEL_SEL_MASK GENMASK(3, 0)
+#define U0_VIN_AXIWR0_EN BIT(4)
+#define CHANNEL(x) ((x) << 0)
+
+#define U0_VIN_INTR_CLEAN BIT(0)
+#define U0_VIN_INTR_M BIT(1)
+#define U0_VIN_PIX_CNT_END_MASK GENMASK(12, 2)
+#define U0_VIN_PIX_CT_MASK GENMASK(14, 13)
+#define U0_VIN_PIXEL_HEIGH_BIT_SEL_MAKS GENMASK(16, 15)
+
+#define PIX_CNT_END(x) ((x) << 2)
+#define PIX_CT(x) ((x) << 13)
+#define PIXEL_HEIGH_BIT_SEL(x) ((x) << 15)
+
+#define U0_VIN_CNFG_DVP_HS_POS BIT(1)
+#define U0_VIN_CNFG_DVP_SWAP_EN BIT(2)
+#define U0_VIN_CNFG_DVP_VS_POS BIT(3)
+#define U0_VIN_CNFG_GEN_EN_AXIRD BIT(4)
+#define U0_VIN_CNFG_ISP_DVP_EN0 BIT(5)
+#define U0_VIN_MIPI_BYTE_EN_ISP0(n) ((n) << 6)
+#define U0_VIN_MIPI_CHANNEL_SEL0(n) ((n) << 8)
+#define U0_VIN_P_I_MIPI_HAEDER_EN0(n) ((n) << 12)
+#define U0_VIN_PIX_NUM(n) ((n) << 13)
+#define U0_VIN_MIPI_BYTE_EN_ISP0_MASK GENMASK(7, 6)
+#define U0_VIN_MIPI_CHANNEL_SEL0_MASK GENMASK(11, 8)
+#define U0_VIN_P_I_MIPI_HAEDER_EN0_MASK BIT(12)
+#define U0_VIN_PIX_NUM_MASK GENMASK(16, 13)
+
+enum stf_v_state {
+ STF_OUTPUT_OFF,
+ STF_OUTPUT_RESERVED,
+ STF_OUTPUT_SINGLE,
+ STF_OUTPUT_CONTINUOUS,
+ STF_OUTPUT_IDLE,
+ STF_OUTPUT_STOPPING
+};
+
+struct stf_v_buf {
+ int active_buf;
+ struct stfcamss_buffer *buf[2];
+ struct stfcamss_buffer *last_buffer;
+ struct list_head pending_bufs;
+ struct list_head ready_bufs;
+ enum stf_v_state state;
+ unsigned int sequence;
+ /* protects the above member variables */
+ spinlock_t lock;
+ atomic_t frame_skip;
+};
+
+struct stf_capture {
+ struct stfcamss_video video;
+ struct stf_v_buf buffers;
+ enum stf_capture_type type;
+};
+
+irqreturn_t stf_wr_irq_handler(int irq, void *priv);
+irqreturn_t stf_isp_irq_handler(int irq, void *priv);
+irqreturn_t stf_line_irq_handler(int irq, void *priv);
+int stf_capture_register(struct stfcamss *stfcamss,
+ struct v4l2_device *v4l2_dev);
+void stf_capture_unregister(struct stfcamss *stfcamss);
+
+#endif
diff --git a/drivers/staging/media/starfive/camss/stf-isp-hw-ops.c b/drivers/staging/media/starfive/camss/stf-isp-hw-ops.c
new file mode 100644
index 000000000000..c34631ff9422
--- /dev/null
+++ b/drivers/staging/media/starfive/camss/stf-isp-hw-ops.c
@@ -0,0 +1,445 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * stf_isp_hw_ops.c
+ *
+ * Register interface file for StarFive ISP driver
+ *
+ * Copyright (C) 2021-2023 StarFive Technology Co., Ltd.
+ *
+ */
+
+#include "stf-camss.h"
+
+static void stf_isp_config_obc(struct stfcamss *stfcamss)
+{
+ u32 reg_val, reg_add;
+
+ stf_isp_reg_write(stfcamss, ISP_REG_OBC_CFG, OBC_W_H(11) | OBC_W_W(11));
+
+ reg_val = GAIN_D_POINT(0x40) | GAIN_C_POINT(0x40) |
+ GAIN_B_POINT(0x40) | GAIN_A_POINT(0x40);
+ for (reg_add = ISP_REG_OBCG_CFG_0; reg_add <= ISP_REG_OBCG_CFG_3;) {
+ stf_isp_reg_write(stfcamss, reg_add, reg_val);
+ reg_add += 4;
+ }
+
+ reg_val = OFFSET_D_POINT(0) | OFFSET_C_POINT(0) |
+ OFFSET_B_POINT(0) | OFFSET_A_POINT(0);
+ for (reg_add = ISP_REG_OBCO_CFG_0; reg_add <= ISP_REG_OBCO_CFG_3;) {
+ stf_isp_reg_write(stfcamss, reg_add, reg_val);
+ reg_add += 4;
+ }
+}
+
+static void stf_isp_config_oecf(struct stfcamss *stfcamss)
+{
+ u32 reg_add, par_val;
+ u16 par_h, par_l;
+
+ par_h = 0x10; par_l = 0;
+ par_val = OCEF_PAR_H(par_h) | OCEF_PAR_L(par_l);
+ for (reg_add = ISP_REG_OECF_X0_CFG0; reg_add <= ISP_REG_OECF_Y3_CFG0;) {
+ stf_isp_reg_write(stfcamss, reg_add, par_val);
+ reg_add += 0x20;
+ }
+
+ par_h = 0x40; par_l = 0x20;
+ par_val = OCEF_PAR_H(par_h) | OCEF_PAR_L(par_l);
+ for (reg_add = ISP_REG_OECF_X0_CFG1; reg_add <= ISP_REG_OECF_Y3_CFG1;) {
+ stf_isp_reg_write(stfcamss, reg_add, par_val);
+ reg_add += 0x20;
+ }
+
+ par_h = 0x80; par_l = 0x60;
+ par_val = OCEF_PAR_H(par_h) | OCEF_PAR_L(par_l);
+ for (reg_add = ISP_REG_OECF_X0_CFG2; reg_add <= ISP_REG_OECF_Y3_CFG2;) {
+ stf_isp_reg_write(stfcamss, reg_add, par_val);
+ reg_add += 0x20;
+ }
+
+ par_h = 0xc0; par_l = 0xa0;
+ par_val = OCEF_PAR_H(par_h) | OCEF_PAR_L(par_l);
+ for (reg_add = ISP_REG_OECF_X0_CFG3; reg_add <= ISP_REG_OECF_Y3_CFG3;) {
+ stf_isp_reg_write(stfcamss, reg_add, par_val);
+ reg_add += 0x20;
+ }
+
+ par_h = 0x100; par_l = 0xe0;
+ par_val = OCEF_PAR_H(par_h) | OCEF_PAR_L(par_l);
+ for (reg_add = ISP_REG_OECF_X0_CFG4; reg_add <= ISP_REG_OECF_Y3_CFG4;) {
+ stf_isp_reg_write(stfcamss, reg_add, par_val);
+ reg_add += 0x20;
+ }
+
+ par_h = 0x200; par_l = 0x180;
+ par_val = OCEF_PAR_H(par_h) | OCEF_PAR_L(par_l);
+ for (reg_add = ISP_REG_OECF_X0_CFG5; reg_add <= ISP_REG_OECF_Y3_CFG5;) {
+ stf_isp_reg_write(stfcamss, reg_add, par_val);
+ reg_add += 0x20;
+ }
+
+ par_h = 0x300; par_l = 0x280;
+ par_val = OCEF_PAR_H(par_h) | OCEF_PAR_L(par_l);
+ for (reg_add = ISP_REG_OECF_X0_CFG6; reg_add <= ISP_REG_OECF_Y3_CFG6;) {
+ stf_isp_reg_write(stfcamss, reg_add, par_val);
+ reg_add += 0x20;
+ }
+
+ par_h = 0x3fe; par_l = 0x380;
+ par_val = OCEF_PAR_H(par_h) | OCEF_PAR_L(par_l);
+ for (reg_add = ISP_REG_OECF_X0_CFG7; reg_add <= ISP_REG_OECF_Y3_CFG7;) {
+ stf_isp_reg_write(stfcamss, reg_add, par_val);
+ reg_add += 0x20;
+ }
+
+ par_h = 0x80; par_l = 0x80;
+ par_val = OCEF_PAR_H(par_h) | OCEF_PAR_L(par_l);
+ for (reg_add = ISP_REG_OECF_S0_CFG0; reg_add <= ISP_REG_OECF_S3_CFG7;) {
+ stf_isp_reg_write(stfcamss, reg_add, par_val);
+ reg_add += 4;
+ }
+}
+
+static void stf_isp_config_lccf(struct stfcamss *stfcamss)
+{
+ u32 reg_add;
+
+ stf_isp_reg_write(stfcamss, ISP_REG_LCCF_CFG_0,
+ Y_DISTANCE(0x21C) | X_DISTANCE(0x3C0));
+ stf_isp_reg_write(stfcamss, ISP_REG_LCCF_CFG_1, LCCF_MAX_DIS(0xb));
+
+ for (reg_add = ISP_REG_LCCF_CFG_2; reg_add <= ISP_REG_LCCF_CFG_5;) {
+ stf_isp_reg_write(stfcamss, reg_add,
+ LCCF_F2_PAR(0x0) | LCCF_F1_PAR(0x0));
+ reg_add += 4;
+ }
+}
+
+static void stf_isp_config_awb(struct stfcamss *stfcamss)
+{
+ u32 reg_val, reg_add;
+ u16 symbol_h, symbol_l;
+
+ symbol_h = 0x0; symbol_l = 0x0;
+ reg_val = AWB_X_SYMBOL_H(symbol_h) | AWB_X_SYMBOL_L(symbol_l);
+
+ for (reg_add = ISP_REG_AWB_X0_CFG_0; reg_add <= ISP_REG_AWB_X3_CFG_1;) {
+ stf_isp_reg_write(stfcamss, reg_add, reg_val);
+ reg_add += 4;
+ }
+
+ symbol_h = 0x0, symbol_l = 0x0;
+ reg_val = AWB_Y_SYMBOL_H(symbol_h) | AWB_Y_SYMBOL_L(symbol_l);
+
+ for (reg_add = ISP_REG_AWB_Y0_CFG_0; reg_add <= ISP_REG_AWB_Y3_CFG_1;) {
+ stf_isp_reg_write(stfcamss, reg_add, reg_val);
+ reg_add += 4;
+ }
+
+ symbol_h = 0x80, symbol_l = 0x80;
+ reg_val = AWB_S_SYMBOL_H(symbol_h) | AWB_S_SYMBOL_L(symbol_l);
+
+ for (reg_add = ISP_REG_AWB_S0_CFG_0; reg_add <= ISP_REG_AWB_S3_CFG_1;) {
+ stf_isp_reg_write(stfcamss, reg_add, reg_val);
+ reg_add += 4;
+ }
+}
+
+static void stf_isp_config_grgb(struct stfcamss *stfcamss)
+{
+ stf_isp_reg_write(stfcamss, ISP_REG_ICTC,
+ GF_MODE(1) | MAXGT(0x140) | MINGT(0x40));
+ stf_isp_reg_write(stfcamss, ISP_REG_IDBC, BADGT(0x200) | BADXT(0x200));
+}
+
+static void stf_isp_config_cfa(struct stfcamss *stfcamss)
+{
+ stf_isp_reg_write(stfcamss, ISP_REG_RAW_FORMAT_CFG,
+ SMY13(0) | SMY12(1) | SMY11(0) | SMY10(1) | SMY3(2) |
+ SMY2(3) | SMY1(2) | SMY0(3));
+ stf_isp_reg_write(stfcamss, ISP_REG_ICFAM, CROSS_COV(3) | HV_W(2));
+}
+
+static void stf_isp_config_ccm(struct stfcamss *stfcamss)
+{
+ u32 reg_add;
+
+ stf_isp_reg_write(stfcamss, ISP_REG_ICAMD_0, DNRM_F(6) | CCM_M_DAT(0));
+
+ for (reg_add = ISP_REG_ICAMD_12; reg_add <= ISP_REG_ICAMD_20;) {
+ stf_isp_reg_write(stfcamss, reg_add, CCM_M_DAT(0x80));
+ reg_add += 0x10;
+ }
+
+ stf_isp_reg_write(stfcamss, ISP_REG_ICAMD_24, CCM_M_DAT(0x700));
+ stf_isp_reg_write(stfcamss, ISP_REG_ICAMD_25, CCM_M_DAT(0x200));
+}
+
+static void stf_isp_config_gamma(struct stfcamss *stfcamss)
+{
+ u32 reg_val, reg_add;
+ u16 gamma_slope_v, gamma_v;
+
+ gamma_slope_v = 0x2400; gamma_v = 0x0;
+ reg_val = GAMMA_S_VAL(gamma_slope_v) | GAMMA_VAL(gamma_v);
+ stf_isp_reg_write(stfcamss, ISP_REG_GAMMA_VAL0, reg_val);
+
+ gamma_slope_v = 0x800; gamma_v = 0x20;
+ for (reg_add = ISP_REG_GAMMA_VAL1; reg_add <= ISP_REG_GAMMA_VAL7;) {
+ reg_val = GAMMA_S_VAL(gamma_slope_v) | GAMMA_VAL(gamma_v);
+ stf_isp_reg_write(stfcamss, reg_add, reg_val);
+ reg_add += 4;
+ gamma_v += 0x20;
+ }
+
+ gamma_v = 0x100;
+ for (reg_add = ISP_REG_GAMMA_VAL8; reg_add <= ISP_REG_GAMMA_VAL13;) {
+ reg_val = GAMMA_S_VAL(gamma_slope_v) | GAMMA_VAL(gamma_v);
+ stf_isp_reg_write(stfcamss, reg_add, reg_val);
+ reg_add += 4;
+ gamma_v += 0x80;
+ }
+
+ gamma_v = 0x3fe;
+ reg_val = GAMMA_S_VAL(gamma_slope_v) | GAMMA_VAL(gamma_v);
+ stf_isp_reg_write(stfcamss, ISP_REG_GAMMA_VAL14, reg_val);
+}
+
+static void stf_isp_config_r2y(struct stfcamss *stfcamss)
+{
+ stf_isp_reg_write(stfcamss, ISP_REG_R2Y_0, 0x4C);
+ stf_isp_reg_write(stfcamss, ISP_REG_R2Y_1, 0x97);
+ stf_isp_reg_write(stfcamss, ISP_REG_R2Y_2, 0x1d);
+ stf_isp_reg_write(stfcamss, ISP_REG_R2Y_3, 0x1d5);
+ stf_isp_reg_write(stfcamss, ISP_REG_R2Y_4, 0x1ac);
+ stf_isp_reg_write(stfcamss, ISP_REG_R2Y_5, 0x80);
+ stf_isp_reg_write(stfcamss, ISP_REG_R2Y_6, 0x80);
+ stf_isp_reg_write(stfcamss, ISP_REG_R2Y_7, 0x194);
+ stf_isp_reg_write(stfcamss, ISP_REG_R2Y_8, 0x1ec);
+}
+
+static void stf_isp_config_y_curve(struct stfcamss *stfcamss)
+{
+ u32 reg_add;
+ u16 y_curve;
+
+ y_curve = 0x0;
+ for (reg_add = ISP_REG_YCURVE_0; reg_add <= ISP_REG_YCURVE_63;) {
+ stf_isp_reg_write(stfcamss, reg_add, y_curve);
+ reg_add += 4;
+ y_curve += 0x10;
+ }
+}
+
+static void stf_isp_config_sharpen(struct stfcamss *sc)
+{
+ u32 reg_add;
+
+ stf_isp_reg_write(sc, ISP_REG_SHARPEN0, S_DELTA(0x7) | S_WEIGHT(0xf));
+ stf_isp_reg_write(sc, ISP_REG_SHARPEN1, S_DELTA(0x18) | S_WEIGHT(0xf));
+ stf_isp_reg_write(sc, ISP_REG_SHARPEN2, S_DELTA(0x80) | S_WEIGHT(0xf));
+ stf_isp_reg_write(sc, ISP_REG_SHARPEN3, S_DELTA(0x100) | S_WEIGHT(0xf));
+ stf_isp_reg_write(sc, ISP_REG_SHARPEN4, S_DELTA(0x10) | S_WEIGHT(0xf));
+ stf_isp_reg_write(sc, ISP_REG_SHARPEN5, S_DELTA(0x60) | S_WEIGHT(0xf));
+ stf_isp_reg_write(sc, ISP_REG_SHARPEN6, S_DELTA(0x100) | S_WEIGHT(0xf));
+ stf_isp_reg_write(sc, ISP_REG_SHARPEN7, S_DELTA(0x190) | S_WEIGHT(0xf));
+ stf_isp_reg_write(sc, ISP_REG_SHARPEN8, S_DELTA(0x0) | S_WEIGHT(0xf));
+
+ for (reg_add = ISP_REG_SHARPEN9; reg_add <= ISP_REG_SHARPEN14;) {
+ stf_isp_reg_write(sc, reg_add, S_WEIGHT(0xf));
+ reg_add += 4;
+ }
+
+ for (reg_add = ISP_REG_SHARPEN_FS0; reg_add <= ISP_REG_SHARPEN_FS5;) {
+ stf_isp_reg_write(sc, reg_add, S_FACTOR(0x10) | S_SLOPE(0x0));
+ reg_add += 4;
+ }
+
+ stf_isp_reg_write(sc, ISP_REG_SHARPEN_WN,
+ PDIRF(0x8) | NDIRF(0x8) | WSUM(0xd7c));
+ stf_isp_reg_write(sc, ISP_REG_IUVS1, UVDIFF2(0xC0) | UVDIFF1(0x40));
+ stf_isp_reg_write(sc, ISP_REG_IUVS2, UVF(0xff) | UVSLOPE(0x0));
+ stf_isp_reg_write(sc, ISP_REG_IUVCKS1,
+ UVCKDIFF2(0xa0) | UVCKDIFF1(0x40));
+}
+
+static void stf_isp_config_dnyuv(struct stfcamss *stfcamss)
+{
+ u32 reg_val;
+
+ reg_val = YUVSW5(7) | YUVSW4(7) | YUVSW3(7) | YUVSW2(7) |
+ YUVSW1(7) | YUVSW0(7);
+ stf_isp_reg_write(stfcamss, ISP_REG_DNYUV_YSWR0, reg_val);
+ stf_isp_reg_write(stfcamss, ISP_REG_DNYUV_CSWR0, reg_val);
+
+ reg_val = YUVSW3(7) | YUVSW2(7) | YUVSW1(7) | YUVSW0(7);
+ stf_isp_reg_write(stfcamss, ISP_REG_DNYUV_YSWR1, reg_val);
+ stf_isp_reg_write(stfcamss, ISP_REG_DNYUV_CSWR1, reg_val);
+
+ reg_val = CURVE_D_H(0x60) | CURVE_D_L(0x40);
+ stf_isp_reg_write(stfcamss, ISP_REG_DNYUV_YDR0, reg_val);
+ stf_isp_reg_write(stfcamss, ISP_REG_DNYUV_CDR0, reg_val);
+
+ reg_val = CURVE_D_H(0xd8) | CURVE_D_L(0x90);
+ stf_isp_reg_write(stfcamss, ISP_REG_DNYUV_YDR1, reg_val);
+ stf_isp_reg_write(stfcamss, ISP_REG_DNYUV_CDR1, reg_val);
+
+ reg_val = CURVE_D_H(0x1e6) | CURVE_D_L(0x144);
+ stf_isp_reg_write(stfcamss, ISP_REG_DNYUV_YDR2, reg_val);
+ stf_isp_reg_write(stfcamss, ISP_REG_DNYUV_CDR2, reg_val);
+}
+
+static void stf_isp_config_sat(struct stfcamss *stfcamss)
+{
+ stf_isp_reg_write(stfcamss, ISP_REG_CS_GAIN, CMAD(0x0) | CMAB(0x100));
+ stf_isp_reg_write(stfcamss, ISP_REG_CS_THRESHOLD, CMD(0x1f) | CMB(0x1));
+ stf_isp_reg_write(stfcamss, ISP_REG_CS_OFFSET, VOFF(0x0) | UOFF(0x0));
+ stf_isp_reg_write(stfcamss, ISP_REG_CS_HUE_F, SIN(0x0) | COS(0x100));
+ stf_isp_reg_write(stfcamss, ISP_REG_CS_SCALE, 0x8);
+ stf_isp_reg_write(stfcamss, ISP_REG_YADJ0, YOIR(0x401) | YIMIN(0x1));
+ stf_isp_reg_write(stfcamss, ISP_REG_YADJ1, YOMAX(0x3ff) | YOMIN(0x1));
+}
+
+int stf_isp_reset(struct stf_isp_dev *isp_dev)
+{
+ stf_isp_reg_set_bit(isp_dev->stfcamss, ISP_REG_ISP_CTRL_0,
+ ISPC_RST_MASK, ISPC_RST);
+ stf_isp_reg_set_bit(isp_dev->stfcamss, ISP_REG_ISP_CTRL_0,
+ ISPC_RST_MASK, 0);
+
+ return 0;
+}
+
+void stf_isp_init_cfg(struct stf_isp_dev *isp_dev)
+{
+ stf_isp_reg_write(isp_dev->stfcamss, ISP_REG_DC_CFG_1, DC_AXI_ID(0x0));
+ stf_isp_reg_write(isp_dev->stfcamss, ISP_REG_DEC_CFG,
+ DEC_V_KEEP(0x0) |
+ DEC_V_PERIOD(0x0) |
+ DEC_H_KEEP(0x0) |
+ DEC_H_PERIOD(0x0));
+
+ stf_isp_config_obc(isp_dev->stfcamss);
+ stf_isp_config_oecf(isp_dev->stfcamss);
+ stf_isp_config_lccf(isp_dev->stfcamss);
+ stf_isp_config_awb(isp_dev->stfcamss);
+ stf_isp_config_grgb(isp_dev->stfcamss);
+ stf_isp_config_cfa(isp_dev->stfcamss);
+ stf_isp_config_ccm(isp_dev->stfcamss);
+ stf_isp_config_gamma(isp_dev->stfcamss);
+ stf_isp_config_r2y(isp_dev->stfcamss);
+ stf_isp_config_y_curve(isp_dev->stfcamss);
+ stf_isp_config_sharpen(isp_dev->stfcamss);
+ stf_isp_config_dnyuv(isp_dev->stfcamss);
+ stf_isp_config_sat(isp_dev->stfcamss);
+
+ stf_isp_reg_write(isp_dev->stfcamss, ISP_REG_CSI_MODULE_CFG,
+ CSI_DUMP_EN | CSI_SC_EN | CSI_AWB_EN |
+ CSI_LCCF_EN | CSI_OECF_EN | CSI_OBC_EN | CSI_DEC_EN);
+ stf_isp_reg_write(isp_dev->stfcamss, ISP_REG_ISP_CTRL_1,
+ CTRL_SAT(1) | CTRL_DBC | CTRL_CTC | CTRL_YHIST |
+ CTRL_YCURVE | CTRL_BIYUV | CTRL_SCE | CTRL_EE |
+ CTRL_CCE | CTRL_RGE | CTRL_CME | CTRL_AE | CTRL_CE);
+}
+
+static void stf_isp_config_crop(struct stfcamss *stfcamss,
+ struct v4l2_rect *crop)
+{
+ u32 bpp = stfcamss->isp_dev.current_fmt->bpp;
+ u32 val;
+
+ val = VSTART_CAP(crop->top) | HSTART_CAP(crop->left);
+ stf_isp_reg_write(stfcamss, ISP_REG_PIC_CAPTURE_START_CFG, val);
+
+ val = VEND_CAP(crop->height + crop->top - 1) |
+ HEND_CAP(crop->width + crop->left - 1);
+ stf_isp_reg_write(stfcamss, ISP_REG_PIC_CAPTURE_END_CFG, val);
+
+ val = H_ACT_CAP(crop->height) | W_ACT_CAP(crop->width);
+ stf_isp_reg_write(stfcamss, ISP_REG_PIPELINE_XY_SIZE, val);
+
+ val = ALIGN(crop->width * bpp / 8, STFCAMSS_FRAME_WIDTH_ALIGN_8);
+ stf_isp_reg_write(stfcamss, ISP_REG_STRIDE, val);
+}
+
+static void stf_isp_config_raw_fmt(struct stfcamss *stfcamss, u32 mcode)
+{
+ u32 val, val1;
+
+ switch (mcode) {
+ case MEDIA_BUS_FMT_SRGGB10_1X10:
+ case MEDIA_BUS_FMT_SRGGB8_1X8:
+ /* 3 2 3 2 1 0 1 0 B Gb B Gb Gr R Gr R */
+ val = SMY13(3) | SMY12(2) | SMY11(3) | SMY10(2) |
+ SMY3(1) | SMY2(0) | SMY1(1) | SMY0(0);
+ val1 = CTRL_SAT(0x0);
+ break;
+ case MEDIA_BUS_FMT_SGRBG10_1X10:
+ case MEDIA_BUS_FMT_SGRBG8_1X8:
+ /* 2 3 2 3 0 1 0 1, Gb B Gb B R Gr R Gr */
+ val = SMY13(2) | SMY12(3) | SMY11(2) | SMY10(3) |
+ SMY3(0) | SMY2(1) | SMY1(0) | SMY0(1);
+ val1 = CTRL_SAT(0x2);
+ break;
+ case MEDIA_BUS_FMT_SGBRG10_1X10:
+ case MEDIA_BUS_FMT_SGBRG8_1X8:
+ /* 1 0 1 0 3 2 3 2, Gr R Gr R B Gb B Gb */
+ val = SMY13(1) | SMY12(0) | SMY11(1) | SMY10(0) |
+ SMY3(3) | SMY2(2) | SMY1(3) | SMY0(2);
+ val1 = CTRL_SAT(0x3);
+ break;
+ case MEDIA_BUS_FMT_SBGGR10_1X10:
+ case MEDIA_BUS_FMT_SBGGR8_1X8:
+ /* 0 1 0 1 2 3 2 3 R Gr R Gr Gb B Gb B */
+ val = SMY13(0) | SMY12(1) | SMY11(0) | SMY10(1) |
+ SMY3(2) | SMY2(3) | SMY1(2) | SMY0(3);
+ val1 = CTRL_SAT(0x1);
+ break;
+ default:
+ val = SMY13(0) | SMY12(1) | SMY11(0) | SMY10(1) |
+ SMY3(2) | SMY2(3) | SMY1(2) | SMY0(3);
+ val1 = CTRL_SAT(0x1);
+ break;
+ }
+ stf_isp_reg_write(stfcamss, ISP_REG_RAW_FORMAT_CFG, val);
+ stf_isp_reg_set_bit(stfcamss, ISP_REG_ISP_CTRL_1, CTRL_SAT_MASK, val1);
+}
+
+void stf_isp_settings(struct stf_isp_dev *isp_dev,
+ struct v4l2_rect *crop, u32 mcode)
+{
+ struct stfcamss *stfcamss = isp_dev->stfcamss;
+
+ stf_isp_config_crop(stfcamss, crop);
+ stf_isp_config_raw_fmt(stfcamss, mcode);
+
+ stf_isp_reg_set_bit(stfcamss, ISP_REG_DUMP_CFG_1,
+ DUMP_BURST_LEN_MASK | DUMP_SD_MASK,
+ DUMP_BURST_LEN(3));
+
+ stf_isp_reg_write(stfcamss, ISP_REG_ITIIWSR,
+ ITI_HSIZE(IMAGE_MAX_HEIGH) |
+ ITI_WSIZE(IMAGE_MAX_WIDTH));
+ stf_isp_reg_write(stfcamss, ISP_REG_ITIDWLSR, 0x960);
+ stf_isp_reg_write(stfcamss, ISP_REG_ITIDRLSR, 0x960);
+ stf_isp_reg_write(stfcamss, ISP_REG_SENSOR, IMAGER_SEL(1));
+}
+
+void stf_isp_stream_set(struct stf_isp_dev *isp_dev)
+{
+ struct stfcamss *stfcamss = isp_dev->stfcamss;
+
+ stf_isp_reg_write_delay(stfcamss, ISP_REG_ISP_CTRL_0,
+ ISPC_ENUO | ISPC_ENLS | ISPC_RST, 10);
+ stf_isp_reg_write_delay(stfcamss, ISP_REG_ISP_CTRL_0,
+ ISPC_ENUO | ISPC_ENLS, 10);
+ stf_isp_reg_write(stfcamss, ISP_REG_IESHD, SHAD_UP_M);
+ stf_isp_reg_write_delay(stfcamss, ISP_REG_ISP_CTRL_0,
+ ISPC_ENUO | ISPC_ENLS | ISPC_EN, 10);
+ stf_isp_reg_write_delay(stfcamss, ISP_REG_CSIINTS,
+ CSI_INTS(1) | CSI_SHA_M(4), 10);
+ stf_isp_reg_write_delay(stfcamss, ISP_REG_CSIINTS,
+ CSI_INTS(2) | CSI_SHA_M(4), 10);
+ stf_isp_reg_write_delay(stfcamss, ISP_REG_CSI_INPUT_EN_AND_STATUS,
+ CSI_EN_S, 10);
+}
diff --git a/drivers/staging/media/starfive/camss/stf-isp.c b/drivers/staging/media/starfive/camss/stf-isp.c
new file mode 100644
index 000000000000..df7a903fbb1b
--- /dev/null
+++ b/drivers/staging/media/starfive/camss/stf-isp.c
@@ -0,0 +1,379 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * stf_isp.c
+ *
+ * StarFive Camera Subsystem - ISP Module
+ *
+ * Copyright (C) 2021-2023 StarFive Technology Co., Ltd.
+ */
+#include <media/v4l2-rect.h>
+
+#include "stf-camss.h"
+
+static int isp_set_selection(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ struct v4l2_subdev_selection *sel);
+
+static const struct stf_isp_format isp_formats_sink[] = {
+ { MEDIA_BUS_FMT_SRGGB10_1X10, 10 },
+ { MEDIA_BUS_FMT_SGRBG10_1X10, 10 },
+ { MEDIA_BUS_FMT_SGBRG10_1X10, 10 },
+ { MEDIA_BUS_FMT_SBGGR10_1X10, 10 },
+};
+
+static const struct stf_isp_format isp_formats_source[] = {
+ { MEDIA_BUS_FMT_YUYV8_1_5X8, 8 },
+};
+
+static const struct stf_isp_format_table isp_formats_st7110[] = {
+ { isp_formats_sink, ARRAY_SIZE(isp_formats_sink) },
+ { isp_formats_source, ARRAY_SIZE(isp_formats_source) },
+};
+
+static const struct stf_isp_format *
+stf_g_fmt_by_mcode(const struct stf_isp_format_table *fmt_table, u32 mcode)
+{
+ unsigned int i;
+
+ for (i = 0; i < fmt_table->nfmts; i++) {
+ if (fmt_table->fmts[i].code == mcode)
+ return &fmt_table->fmts[i];
+ }
+
+ return NULL;
+}
+
+int stf_isp_init(struct stfcamss *stfcamss)
+{
+ struct stf_isp_dev *isp_dev = &stfcamss->isp_dev;
+
+ isp_dev->stfcamss = stfcamss;
+ isp_dev->formats = isp_formats_st7110;
+ isp_dev->nformats = ARRAY_SIZE(isp_formats_st7110);
+ isp_dev->current_fmt = &isp_formats_source[0];
+
+ return 0;
+}
+
+static int isp_set_stream(struct v4l2_subdev *sd, int enable)
+{
+ struct stf_isp_dev *isp_dev = v4l2_get_subdevdata(sd);
+ struct v4l2_subdev_state *sd_state;
+ struct v4l2_mbus_framefmt *fmt;
+ struct v4l2_rect *crop;
+
+ sd_state = v4l2_subdev_lock_and_get_active_state(sd);
+ fmt = v4l2_subdev_state_get_format(sd_state, STF_ISP_PAD_SINK);
+ crop = v4l2_subdev_state_get_crop(sd_state, STF_ISP_PAD_SRC);
+
+ if (enable) {
+ stf_isp_reset(isp_dev);
+ stf_isp_init_cfg(isp_dev);
+ stf_isp_settings(isp_dev, crop, fmt->code);
+ stf_isp_stream_set(isp_dev);
+ }
+
+ v4l2_subdev_call(isp_dev->source_subdev, video, s_stream, enable);
+
+ v4l2_subdev_unlock_state(sd_state);
+ return 0;
+}
+
+static void isp_try_format(struct stf_isp_dev *isp_dev,
+ struct v4l2_subdev_state *state,
+ unsigned int pad,
+ struct v4l2_mbus_framefmt *fmt)
+{
+ const struct stf_isp_format_table *formats;
+
+ if (pad >= STF_ISP_PAD_MAX) {
+ fmt->colorspace = V4L2_COLORSPACE_SRGB;
+ return;
+ }
+
+ formats = &isp_dev->formats[pad];
+
+ fmt->width = clamp_t(u32, fmt->width, STFCAMSS_FRAME_MIN_WIDTH,
+ STFCAMSS_FRAME_MAX_WIDTH);
+ fmt->height = clamp_t(u32, fmt->height, STFCAMSS_FRAME_MIN_HEIGHT,
+ STFCAMSS_FRAME_MAX_HEIGHT);
+ fmt->height &= ~0x1;
+ fmt->field = V4L2_FIELD_NONE;
+ fmt->colorspace = V4L2_COLORSPACE_SRGB;
+ fmt->flags = 0;
+
+ if (!stf_g_fmt_by_mcode(formats, fmt->code))
+ fmt->code = formats->fmts[0].code;
+}
+
+static int isp_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ struct stf_isp_dev *isp_dev = v4l2_get_subdevdata(sd);
+ const struct stf_isp_format_table *formats;
+
+ if (code->pad == STF_ISP_PAD_SINK) {
+ if (code->index >= ARRAY_SIZE(isp_formats_sink))
+ return -EINVAL;
+
+ formats = &isp_dev->formats[code->pad];
+ code->code = formats->fmts[code->index].code;
+ } else {
+ struct v4l2_mbus_framefmt *sink_fmt;
+
+ if (code->index >= ARRAY_SIZE(isp_formats_source))
+ return -EINVAL;
+
+ sink_fmt = v4l2_subdev_state_get_format(state,
+ STF_ISP_PAD_SRC);
+
+ code->code = sink_fmt->code;
+ if (!code->code)
+ return -EINVAL;
+ }
+ code->flags = 0;
+
+ return 0;
+}
+
+static int isp_set_format(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ struct v4l2_subdev_format *fmt)
+{
+ struct stf_isp_dev *isp_dev = v4l2_get_subdevdata(sd);
+ struct v4l2_mbus_framefmt *format;
+
+ format = v4l2_subdev_state_get_format(state, fmt->pad);
+ if (!format)
+ return -EINVAL;
+
+ isp_try_format(isp_dev, state, fmt->pad, &fmt->format);
+ *format = fmt->format;
+
+ isp_dev->current_fmt = stf_g_fmt_by_mcode(&isp_dev->formats[fmt->pad],
+ fmt->format.code);
+
+ /* Propagate to in crop */
+ if (fmt->pad == STF_ISP_PAD_SINK) {
+ struct v4l2_subdev_selection sel = { 0 };
+
+ /* Reset sink pad compose selection */
+ sel.which = fmt->which;
+ sel.pad = STF_ISP_PAD_SINK;
+ sel.target = V4L2_SEL_TGT_CROP;
+ sel.r.width = fmt->format.width;
+ sel.r.height = fmt->format.height;
+ isp_set_selection(sd, state, &sel);
+ }
+
+ return 0;
+}
+
+static const struct v4l2_rect stf_frame_min_crop = {
+ .width = STFCAMSS_FRAME_MIN_WIDTH,
+ .height = STFCAMSS_FRAME_MIN_HEIGHT,
+ .top = 0,
+ .left = 0,
+};
+
+static void isp_try_crop(struct stf_isp_dev *isp_dev,
+ struct v4l2_subdev_state *state,
+ struct v4l2_rect *crop)
+{
+ struct v4l2_mbus_framefmt *fmt =
+ v4l2_subdev_state_get_format(state, STF_ISP_PAD_SINK);
+
+ const struct v4l2_rect bounds = {
+ .width = fmt->width,
+ .height = fmt->height,
+ .left = 0,
+ .top = 0,
+ };
+
+ v4l2_rect_set_min_size(crop, &stf_frame_min_crop);
+ v4l2_rect_map_inside(crop, &bounds);
+}
+
+static int isp_get_selection(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ struct v4l2_subdev_selection *sel)
+{
+ struct v4l2_subdev_format fmt = { 0 };
+ struct v4l2_rect *rect;
+
+ switch (sel->target) {
+ case V4L2_SEL_TGT_CROP_BOUNDS:
+ if (sel->pad == STF_ISP_PAD_SINK) {
+ fmt.format = *v4l2_subdev_state_get_format(state,
+ sel->pad);
+ sel->r.left = 0;
+ sel->r.top = 0;
+ sel->r.width = fmt.format.width;
+ sel->r.height = fmt.format.height;
+ } else if (sel->pad == STF_ISP_PAD_SRC) {
+ rect = v4l2_subdev_state_get_crop(state, sel->pad);
+ sel->r = *rect;
+ }
+ break;
+
+ case V4L2_SEL_TGT_CROP:
+ rect = v4l2_subdev_state_get_crop(state, sel->pad);
+ if (!rect)
+ return -EINVAL;
+
+ sel->r = *rect;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int isp_set_selection(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ struct v4l2_subdev_selection *sel)
+{
+ struct stf_isp_dev *isp_dev = v4l2_get_subdevdata(sd);
+ struct v4l2_rect *rect;
+
+ if (sel->target != V4L2_SEL_TGT_CROP)
+ return -EINVAL;
+
+ if (sel->target == V4L2_SEL_TGT_CROP &&
+ sel->pad == STF_ISP_PAD_SINK) {
+ struct v4l2_subdev_selection crop = { 0 };
+
+ rect = v4l2_subdev_state_get_crop(state, sel->pad);
+ if (!rect)
+ return -EINVAL;
+
+ isp_try_crop(isp_dev, state, &sel->r);
+ *rect = sel->r;
+
+ /* Reset source crop selection */
+ crop.which = sel->which;
+ crop.pad = STF_ISP_PAD_SRC;
+ crop.target = V4L2_SEL_TGT_CROP;
+ crop.r = *rect;
+ isp_set_selection(sd, state, &crop);
+ } else if (sel->target == V4L2_SEL_TGT_CROP &&
+ sel->pad == STF_ISP_PAD_SRC) {
+ struct v4l2_subdev_format fmt = { 0 };
+
+ rect = v4l2_subdev_state_get_crop(state, sel->pad);
+ if (!rect)
+ return -EINVAL;
+
+ isp_try_crop(isp_dev, state, &sel->r);
+ *rect = sel->r;
+
+ /* Reset source pad format width and height */
+ fmt.which = sel->which;
+ fmt.pad = STF_ISP_PAD_SRC;
+ fmt.format.width = rect->width;
+ fmt.format.height = rect->height;
+ isp_set_format(sd, state, &fmt);
+ }
+
+ dev_dbg(isp_dev->stfcamss->dev, "pad: %d sel(%d,%d)/%ux%u\n",
+ sel->pad, sel->r.left, sel->r.top, sel->r.width, sel->r.height);
+
+ return 0;
+}
+
+static int isp_init_formats(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state)
+{
+ struct v4l2_subdev_format format = {
+ .pad = STF_ISP_PAD_SINK,
+ .which = V4L2_SUBDEV_FORMAT_ACTIVE,
+ .format = {
+ .code = MEDIA_BUS_FMT_SRGGB10_1X10,
+ .width = 1920,
+ .height = 1080
+ }
+ };
+
+ return isp_set_format(sd, sd_state, &format);
+}
+
+static const struct v4l2_subdev_video_ops isp_video_ops = {
+ .s_stream = isp_set_stream,
+};
+
+static const struct v4l2_subdev_pad_ops isp_pad_ops = {
+ .enum_mbus_code = isp_enum_mbus_code,
+ .get_fmt = v4l2_subdev_get_fmt,
+ .set_fmt = isp_set_format,
+ .get_selection = isp_get_selection,
+ .set_selection = isp_set_selection,
+};
+
+static const struct v4l2_subdev_ops isp_v4l2_ops = {
+ .video = &isp_video_ops,
+ .pad = &isp_pad_ops,
+};
+
+static const struct v4l2_subdev_internal_ops isp_internal_ops = {
+ .init_state = isp_init_formats,
+};
+
+static const struct media_entity_operations isp_media_ops = {
+ .link_validate = v4l2_subdev_link_validate,
+};
+
+int stf_isp_register(struct stf_isp_dev *isp_dev, struct v4l2_device *v4l2_dev)
+{
+ struct v4l2_subdev *sd = &isp_dev->subdev;
+ struct media_pad *pads = isp_dev->pads;
+ int ret;
+
+ v4l2_subdev_init(sd, &isp_v4l2_ops);
+ sd->internal_ops = &isp_internal_ops;
+ sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ snprintf(sd->name, ARRAY_SIZE(sd->name), "stf_isp");
+ v4l2_set_subdevdata(sd, isp_dev);
+
+ pads[STF_ISP_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
+ pads[STF_ISP_PAD_SRC].flags = MEDIA_PAD_FL_SOURCE;
+
+ sd->entity.function = MEDIA_ENT_F_PROC_VIDEO_ISP;
+ sd->entity.ops = &isp_media_ops;
+ ret = media_entity_pads_init(&sd->entity, STF_ISP_PAD_MAX, pads);
+ if (ret) {
+ dev_err(isp_dev->stfcamss->dev,
+ "Failed to init media entity: %d\n", ret);
+ return ret;
+ }
+
+ ret = v4l2_subdev_init_finalize(sd);
+ if (ret)
+ goto err_entity_cleanup;
+
+ ret = v4l2_device_register_subdev(v4l2_dev, sd);
+ if (ret) {
+ dev_err(isp_dev->stfcamss->dev,
+ "Failed to register subdev: %d\n", ret);
+ goto err_subdev_cleanup;
+ }
+
+ return 0;
+
+err_subdev_cleanup:
+ v4l2_subdev_cleanup(sd);
+err_entity_cleanup:
+ media_entity_cleanup(&sd->entity);
+ return ret;
+}
+
+int stf_isp_unregister(struct stf_isp_dev *isp_dev)
+{
+ v4l2_device_unregister_subdev(&isp_dev->subdev);
+ v4l2_subdev_cleanup(&isp_dev->subdev);
+ media_entity_cleanup(&isp_dev->subdev.entity);
+
+ return 0;
+}
diff --git a/drivers/staging/media/starfive/camss/stf-isp.h b/drivers/staging/media/starfive/camss/stf-isp.h
new file mode 100644
index 000000000000..955cbb048363
--- /dev/null
+++ b/drivers/staging/media/starfive/camss/stf-isp.h
@@ -0,0 +1,428 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * stf_isp.h
+ *
+ * StarFive Camera Subsystem - ISP Module
+ *
+ * Copyright (C) 2021-2023 StarFive Technology Co., Ltd.
+ */
+
+#ifndef STF_ISP_H
+#define STF_ISP_H
+
+#include <media/v4l2-subdev.h>
+
+#include "stf-video.h"
+
+#define ISP_RAW_DATA_BITS 12
+#define SCALER_RATIO_MAX 1
+#define STF_ISP_REG_OFFSET_MAX 0x0fff
+#define STF_ISP_REG_DELAY_MAX 100
+
+/* isp registers */
+#define ISP_REG_CSI_INPUT_EN_AND_STATUS 0x000
+#define CSI_SCD_ERR BIT(6)
+#define CSI_ITU656_ERR BIT(4)
+#define CSI_ITU656_F BIT(3)
+#define CSI_SCD_DONE BIT(2)
+#define CSI_BUSY_S BIT(1)
+#define CSI_EN_S BIT(0)
+
+#define ISP_REG_CSIINTS 0x008
+#define CSI_INTS(n) ((n) << 16)
+#define CSI_SHA_M(n) ((n) << 0)
+#define CSI_INTS_MASK GENMASK(17, 16)
+
+#define ISP_REG_CSI_MODULE_CFG 0x010
+#define CSI_DUMP_EN BIT(19)
+#define CSI_VS_EN BIT(18)
+#define CSI_SC_EN BIT(17)
+#define CSI_OBA_EN BIT(16)
+#define CSI_AWB_EN BIT(7)
+#define CSI_LCCF_EN BIT(6)
+#define CSI_OECFHM_EN BIT(5)
+#define CSI_OECF_EN BIT(4)
+#define CSI_LCBQ_EN BIT(3)
+#define CSI_OBC_EN BIT(2)
+#define CSI_DEC_EN BIT(1)
+#define CSI_DC_EN BIT(0)
+
+#define ISP_REG_SENSOR 0x014
+#define DVP_SYNC_POL(n) ((n) << 2)
+#define ITU656_EN(n) ((n) << 1)
+#define IMAGER_SEL(n) ((n) << 0)
+
+#define ISP_REG_RAW_FORMAT_CFG 0x018
+#define SMY13(n) ((n) << 14)
+#define SMY12(n) ((n) << 12)
+#define SMY11(n) ((n) << 10)
+#define SMY10(n) ((n) << 8)
+#define SMY3(n) ((n) << 6)
+#define SMY2(n) ((n) << 4)
+#define SMY1(n) ((n) << 2)
+#define SMY0(n) ((n) << 0)
+
+#define ISP_REG_PIC_CAPTURE_START_CFG 0x01c
+#define VSTART_CAP(n) ((n) << 16)
+#define HSTART_CAP(n) ((n) << 0)
+
+#define ISP_REG_PIC_CAPTURE_END_CFG 0x020
+#define VEND_CAP(n) ((n) << 16)
+#define HEND_CAP(n) ((n) << 0)
+
+#define ISP_REG_DUMP_CFG_0 0x024
+#define ISP_REG_DUMP_CFG_1 0x028
+#define DUMP_ID(n) ((n) << 24)
+#define DUMP_SHT(n) ((n) << 20)
+#define DUMP_BURST_LEN(n) ((n) << 16)
+#define DUMP_SD(n) ((n) << 0)
+#define DUMP_BURST_LEN_MASK GENMASK(17, 16)
+#define DUMP_SD_MASK GENMASK(15, 0)
+
+#define ISP_REG_DEC_CFG 0x030
+#define DEC_V_KEEP(n) ((n) << 24)
+#define DEC_V_PERIOD(n) ((n) << 16)
+#define DEC_H_KEEP(n) ((n) << 8)
+#define DEC_H_PERIOD(n) ((n) << 0)
+
+#define ISP_REG_OBC_CFG 0x034
+#define OBC_W_H(y) ((y) << 4)
+#define OBC_W_W(x) ((x) << 0)
+
+#define ISP_REG_DC_CFG_1 0x044
+#define DC_AXI_ID(n) ((n) << 0)
+
+#define ISP_REG_LCCF_CFG_0 0x050
+#define Y_DISTANCE(y) ((y) << 16)
+#define X_DISTANCE(x) ((x) << 0)
+
+#define ISP_REG_LCCF_CFG_1 0x058
+#define LCCF_MAX_DIS(n) ((n) << 0)
+
+#define ISP_REG_LCBQ_CFG_0 0x074
+#define H_LCBQ(y) ((y) << 12)
+#define W_LCBQ(x) ((x) << 8)
+
+#define ISP_REG_LCBQ_CFG_1 0x07c
+#define Y_COOR(y) ((y) << 16)
+#define X_COOR(x) ((x) << 0)
+
+#define ISP_REG_LCCF_CFG_2 0x0e0
+#define ISP_REG_LCCF_CFG_3 0x0e4
+#define ISP_REG_LCCF_CFG_4 0x0e8
+#define ISP_REG_LCCF_CFG_5 0x0ec
+#define LCCF_F2_PAR(n) ((n) << 16)
+#define LCCF_F1_PAR(n) ((n) << 0)
+
+#define ISP_REG_OECF_X0_CFG0 0x100
+#define ISP_REG_OECF_X0_CFG1 0x104
+#define ISP_REG_OECF_X0_CFG2 0x108
+#define ISP_REG_OECF_X0_CFG3 0x10c
+#define ISP_REG_OECF_X0_CFG4 0x110
+#define ISP_REG_OECF_X0_CFG5 0x114
+#define ISP_REG_OECF_X0_CFG6 0x118
+#define ISP_REG_OECF_X0_CFG7 0x11c
+
+#define ISP_REG_OECF_Y3_CFG0 0x1e0
+#define ISP_REG_OECF_Y3_CFG1 0x1e4
+#define ISP_REG_OECF_Y3_CFG2 0x1e8
+#define ISP_REG_OECF_Y3_CFG3 0x1ec
+#define ISP_REG_OECF_Y3_CFG4 0x1f0
+#define ISP_REG_OECF_Y3_CFG5 0x1f4
+#define ISP_REG_OECF_Y3_CFG6 0x1f8
+#define ISP_REG_OECF_Y3_CFG7 0x1fc
+
+#define ISP_REG_OECF_S0_CFG0 0x200
+#define ISP_REG_OECF_S3_CFG7 0x27c
+#define OCEF_PAR_H(n) ((n) << 16)
+#define OCEF_PAR_L(n) ((n) << 0)
+
+#define ISP_REG_AWB_X0_CFG_0 0x280
+#define ISP_REG_AWB_X0_CFG_1 0x284
+#define ISP_REG_AWB_X1_CFG_0 0x288
+#define ISP_REG_AWB_X1_CFG_1 0x28c
+#define ISP_REG_AWB_X2_CFG_0 0x290
+#define ISP_REG_AWB_X2_CFG_1 0x294
+#define ISP_REG_AWB_X3_CFG_0 0x298
+#define ISP_REG_AWB_X3_CFG_1 0x29c
+#define AWB_X_SYMBOL_H(n) ((n) << 16)
+#define AWB_X_SYMBOL_L(n) ((n) << 0)
+
+#define ISP_REG_AWB_Y0_CFG_0 0x2a0
+#define ISP_REG_AWB_Y0_CFG_1 0x2a4
+#define ISP_REG_AWB_Y1_CFG_0 0x2a8
+#define ISP_REG_AWB_Y1_CFG_1 0x2ac
+#define ISP_REG_AWB_Y2_CFG_0 0x2b0
+#define ISP_REG_AWB_Y2_CFG_1 0x2b4
+#define ISP_REG_AWB_Y3_CFG_0 0x2b8
+#define ISP_REG_AWB_Y3_CFG_1 0x2bc
+#define AWB_Y_SYMBOL_H(n) ((n) << 16)
+#define AWB_Y_SYMBOL_L(n) ((n) << 0)
+
+#define ISP_REG_AWB_S0_CFG_0 0x2c0
+#define ISP_REG_AWB_S0_CFG_1 0x2c4
+#define ISP_REG_AWB_S1_CFG_0 0x2c8
+#define ISP_REG_AWB_S1_CFG_1 0x2cc
+#define ISP_REG_AWB_S2_CFG_0 0x2d0
+#define ISP_REG_AWB_S2_CFG_1 0x2d4
+#define ISP_REG_AWB_S3_CFG_0 0x2d8
+#define ISP_REG_AWB_S3_CFG_1 0x2dc
+#define AWB_S_SYMBOL_H(n) ((n) << 16)
+#define AWB_S_SYMBOL_L(n) ((n) << 0)
+
+#define ISP_REG_OBCG_CFG_0 0x2e0
+#define ISP_REG_OBCG_CFG_1 0x2e4
+#define ISP_REG_OBCG_CFG_2 0x2e8
+#define ISP_REG_OBCG_CFG_3 0x2ec
+#define ISP_REG_OBCO_CFG_0 0x2f0
+#define ISP_REG_OBCO_CFG_1 0x2f4
+#define ISP_REG_OBCO_CFG_2 0x2f8
+#define ISP_REG_OBCO_CFG_3 0x2fc
+#define GAIN_D_POINT(x) ((x) << 24)
+#define GAIN_C_POINT(x) ((x) << 16)
+#define GAIN_B_POINT(x) ((x) << 8)
+#define GAIN_A_POINT(x) ((x) << 0)
+#define OFFSET_D_POINT(x) ((x) << 24)
+#define OFFSET_C_POINT(x) ((x) << 16)
+#define OFFSET_B_POINT(x) ((x) << 8)
+#define OFFSET_A_POINT(x) ((x) << 0)
+
+#define ISP_REG_ISP_CTRL_0 0xa00
+#define ISPC_LINE BIT(27)
+#define ISPC_SC BIT(26)
+#define ISPC_CSI BIT(25)
+#define ISPC_ISP BIT(24)
+#define ISPC_ENUO BIT(20)
+#define ISPC_ENLS BIT(17)
+#define ISPC_ENSS1 BIT(12)
+#define ISPC_ENSS0 BIT(11)
+#define ISPC_RST BIT(1)
+#define ISPC_EN BIT(0)
+#define ISPC_RST_MASK BIT(1)
+#define ISPC_INT_ALL_MASK GENMASK(27, 24)
+
+#define ISP_REG_ISP_CTRL_1 0xa08
+#define CTRL_SAT(n) ((n) << 28)
+#define CTRL_DBC BIT(22)
+#define CTRL_CTC BIT(21)
+#define CTRL_YHIST BIT(20)
+#define CTRL_YCURVE BIT(19)
+#define CTRL_CTM BIT(18)
+#define CTRL_BIYUV BIT(17)
+#define CTRL_SCE BIT(8)
+#define CTRL_EE BIT(7)
+#define CTRL_CCE BIT(5)
+#define CTRL_RGE BIT(4)
+#define CTRL_CME BIT(3)
+#define CTRL_AE BIT(2)
+#define CTRL_CE BIT(1)
+#define CTRL_SAT_MASK GENMASK(31, 28)
+
+#define ISP_REG_PIPELINE_XY_SIZE 0xa0c
+#define H_ACT_CAP(n) ((n) << 16)
+#define W_ACT_CAP(n) ((n) << 0)
+
+#define ISP_REG_ICTC 0xa10
+#define GF_MODE(n) ((n) << 30)
+#define MAXGT(n) ((n) << 16)
+#define MINGT(n) ((n) << 0)
+
+#define ISP_REG_IDBC 0xa14
+#define BADGT(n) ((n) << 16)
+#define BADXT(n) ((n) << 0)
+
+#define ISP_REG_ICFAM 0xa1c
+#define CROSS_COV(n) ((n) << 4)
+#define HV_W(n) ((n) << 0)
+
+#define ISP_REG_CS_GAIN 0xa30
+#define CMAD(n) ((n) << 16)
+#define CMAB(n) ((n) << 0)
+
+#define ISP_REG_CS_THRESHOLD 0xa34
+#define CMD(n) ((n) << 16)
+#define CMB(n) ((n) << 0)
+
+#define ISP_REG_CS_OFFSET 0xa38
+#define VOFF(n) ((n) << 16)
+#define UOFF(n) ((n) << 0)
+
+#define ISP_REG_CS_HUE_F 0xa3c
+#define SIN(n) ((n) << 16)
+#define COS(n) ((n) << 0)
+
+#define ISP_REG_CS_SCALE 0xa40
+
+#define ISP_REG_IESHD 0xa50
+#define SHAD_UP_M BIT(1)
+#define SHAD_UP_EN BIT(0)
+
+#define ISP_REG_YADJ0 0xa54
+#define YOIR(n) ((n) << 16)
+#define YIMIN(n) ((n) << 0)
+
+#define ISP_REG_YADJ1 0xa58
+#define YOMAX(n) ((n) << 16)
+#define YOMIN(n) ((n) << 0)
+
+#define ISP_REG_Y_PLANE_START_ADDR 0xa80
+#define ISP_REG_UV_PLANE_START_ADDR 0xa84
+#define ISP_REG_STRIDE 0xa88
+
+#define ISP_REG_ITIIWSR 0xb20
+#define ITI_HSIZE(n) ((n) << 16)
+#define ITI_WSIZE(n) ((n) << 0)
+
+#define ISP_REG_ITIDWLSR 0xb24
+#define ISP_REG_ITIPDFR 0xb38
+#define ISP_REG_ITIDRLSR 0xb3C
+
+#define ISP_REG_DNYUV_YSWR0 0xc00
+#define ISP_REG_DNYUV_YSWR1 0xc04
+#define ISP_REG_DNYUV_CSWR0 0xc08
+#define ISP_REG_DNYUV_CSWR1 0xc0c
+#define YUVSW5(n) ((n) << 20)
+#define YUVSW4(n) ((n) << 16)
+#define YUVSW3(n) ((n) << 12)
+#define YUVSW2(n) ((n) << 8)
+#define YUVSW1(n) ((n) << 4)
+#define YUVSW0(n) ((n) << 0)
+
+#define ISP_REG_DNYUV_YDR0 0xc10
+#define ISP_REG_DNYUV_YDR1 0xc14
+#define ISP_REG_DNYUV_YDR2 0xc18
+#define ISP_REG_DNYUV_CDR0 0xc1c
+#define ISP_REG_DNYUV_CDR1 0xc20
+#define ISP_REG_DNYUV_CDR2 0xc24
+#define CURVE_D_H(n) ((n) << 16)
+#define CURVE_D_L(n) ((n) << 0)
+
+#define ISP_REG_ICAMD_0 0xc40
+#define ISP_REG_ICAMD_12 0xc70
+#define ISP_REG_ICAMD_20 0xc90
+#define ISP_REG_ICAMD_24 0xca0
+#define ISP_REG_ICAMD_25 0xca4
+#define DNRM_F(n) ((n) << 16)
+#define CCM_M_DAT(n) ((n) << 0)
+
+#define ISP_REG_GAMMA_VAL0 0xe00
+#define ISP_REG_GAMMA_VAL1 0xe04
+#define ISP_REG_GAMMA_VAL2 0xe08
+#define ISP_REG_GAMMA_VAL3 0xe0c
+#define ISP_REG_GAMMA_VAL4 0xe10
+#define ISP_REG_GAMMA_VAL5 0xe14
+#define ISP_REG_GAMMA_VAL6 0xe18
+#define ISP_REG_GAMMA_VAL7 0xe1c
+#define ISP_REG_GAMMA_VAL8 0xe20
+#define ISP_REG_GAMMA_VAL9 0xe24
+#define ISP_REG_GAMMA_VAL10 0xe28
+#define ISP_REG_GAMMA_VAL11 0xe2c
+#define ISP_REG_GAMMA_VAL12 0xe30
+#define ISP_REG_GAMMA_VAL13 0xe34
+#define ISP_REG_GAMMA_VAL14 0xe38
+#define GAMMA_S_VAL(n) ((n) << 16)
+#define GAMMA_VAL(n) ((n) << 0)
+
+#define ISP_REG_R2Y_0 0xe40
+#define ISP_REG_R2Y_1 0xe44
+#define ISP_REG_R2Y_2 0xe48
+#define ISP_REG_R2Y_3 0xe4c
+#define ISP_REG_R2Y_4 0xe50
+#define ISP_REG_R2Y_5 0xe54
+#define ISP_REG_R2Y_6 0xe58
+#define ISP_REG_R2Y_7 0xe5c
+#define ISP_REG_R2Y_8 0xe60
+
+#define ISP_REG_SHARPEN0 0xe80
+#define ISP_REG_SHARPEN1 0xe84
+#define ISP_REG_SHARPEN2 0xe88
+#define ISP_REG_SHARPEN3 0xe8c
+#define ISP_REG_SHARPEN4 0xe90
+#define ISP_REG_SHARPEN5 0xe94
+#define ISP_REG_SHARPEN6 0xe98
+#define ISP_REG_SHARPEN7 0xe9c
+#define ISP_REG_SHARPEN8 0xea0
+#define ISP_REG_SHARPEN9 0xea4
+#define ISP_REG_SHARPEN10 0xea8
+#define ISP_REG_SHARPEN11 0xeac
+#define ISP_REG_SHARPEN12 0xeb0
+#define ISP_REG_SHARPEN13 0xeb4
+#define ISP_REG_SHARPEN14 0xeb8
+#define S_DELTA(n) ((n) << 16)
+#define S_WEIGHT(n) ((n) << 8)
+
+#define ISP_REG_SHARPEN_FS0 0xebc
+#define ISP_REG_SHARPEN_FS1 0xec0
+#define ISP_REG_SHARPEN_FS2 0xec4
+#define ISP_REG_SHARPEN_FS3 0xec8
+#define ISP_REG_SHARPEN_FS4 0xecc
+#define ISP_REG_SHARPEN_FS5 0xed0
+#define S_FACTOR(n) ((n) << 24)
+#define S_SLOPE(n) ((n) << 0)
+
+#define ISP_REG_SHARPEN_WN 0xed4
+#define PDIRF(n) ((n) << 28)
+#define NDIRF(n) ((n) << 24)
+#define WSUM(n) ((n) << 0)
+
+#define ISP_REG_IUVS1 0xed8
+#define UVDIFF2(n) ((n) << 16)
+#define UVDIFF1(n) ((n) << 0)
+
+#define ISP_REG_IUVS2 0xedc
+#define UVF(n) ((n) << 24)
+#define UVSLOPE(n) ((n) << 0)
+
+#define ISP_REG_IUVCKS1 0xee0
+#define UVCKDIFF2(n) ((n) << 16)
+#define UVCKDIFF1(n) ((n) << 0)
+
+#define ISP_REG_IUVCKS2 0xee4
+
+#define ISP_REG_ISHRPET 0xee8
+#define TH(n) ((n) << 8)
+#define EN(n) ((n) << 0)
+
+#define ISP_REG_YCURVE_0 0xf00
+#define ISP_REG_YCURVE_63 0xffc
+
+#define IMAGE_MAX_WIDTH 1920
+#define IMAGE_MAX_HEIGH 1080
+
+/* pad id for media framework */
+enum stf_isp_pad_id {
+ STF_ISP_PAD_SINK = 0,
+ STF_ISP_PAD_SRC,
+ STF_ISP_PAD_MAX
+};
+
+struct stf_isp_format {
+ u32 code;
+ u8 bpp;
+};
+
+struct stf_isp_format_table {
+ const struct stf_isp_format *fmts;
+ int nfmts;
+};
+
+struct stf_isp_dev {
+ struct stfcamss *stfcamss;
+ struct v4l2_subdev subdev;
+ struct media_pad pads[STF_ISP_PAD_MAX];
+ const struct stf_isp_format_table *formats;
+ unsigned int nformats;
+ struct v4l2_subdev *source_subdev;
+ const struct stf_isp_format *current_fmt;
+};
+
+int stf_isp_reset(struct stf_isp_dev *isp_dev);
+void stf_isp_init_cfg(struct stf_isp_dev *isp_dev);
+void stf_isp_settings(struct stf_isp_dev *isp_dev,
+ struct v4l2_rect *crop, u32 mcode);
+void stf_isp_stream_set(struct stf_isp_dev *isp_dev);
+int stf_isp_init(struct stfcamss *stfcamss);
+int stf_isp_register(struct stf_isp_dev *isp_dev, struct v4l2_device *v4l2_dev);
+int stf_isp_unregister(struct stf_isp_dev *isp_dev);
+
+#endif /* STF_ISP_H */
diff --git a/drivers/staging/media/starfive/camss/stf-video.c b/drivers/staging/media/starfive/camss/stf-video.c
new file mode 100644
index 000000000000..a0420eb6a0aa
--- /dev/null
+++ b/drivers/staging/media/starfive/camss/stf-video.c
@@ -0,0 +1,570 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * stf_video.c
+ *
+ * StarFive Camera Subsystem - V4L2 device node
+ *
+ * Copyright (C) 2021-2023 StarFive Technology Co., Ltd.
+ */
+
+#include <linux/pm_runtime.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-mc.h>
+#include <media/videobuf2-dma-contig.h>
+
+#include "stf-camss.h"
+#include "stf-video.h"
+
+/* -----------------------------------------------------------------------------
+ * Helper functions
+ */
+
+static inline struct stfcamss_buffer *
+to_stfcamss_buffer(struct vb2_v4l2_buffer *vbuf)
+{
+ return container_of(vbuf, struct stfcamss_buffer, vb);
+}
+
+static const struct stfcamss_format_info *
+video_g_fi_by_index(struct stfcamss_video *video, int index)
+{
+ if (index >= video->nformats)
+ return NULL;
+
+ return &video->formats[index];
+}
+
+static const struct stfcamss_format_info *
+video_g_fi_by_mcode(struct stfcamss_video *video, u32 mcode)
+{
+ unsigned int i;
+
+ for (i = 0; i < video->nformats; i++) {
+ if (video->formats[i].code == mcode)
+ return &video->formats[i];
+ }
+
+ return NULL;
+}
+
+static const struct stfcamss_format_info *
+video_g_fi_by_pfmt(struct stfcamss_video *video, u32 pixelformat)
+{
+ unsigned int i;
+
+ for (i = 0; i < video->nformats; i++) {
+ if (video->formats[i].pixelformat == pixelformat)
+ return &video->formats[i];
+ }
+
+ return NULL;
+}
+
+static int __video_try_fmt(struct stfcamss_video *video, struct v4l2_format *f)
+{
+ struct v4l2_pix_format *pix = &f->fmt.pix;
+ const struct stfcamss_format_info *fi;
+ u32 width, height;
+ u32 bpl;
+ unsigned int i;
+
+ fi = video_g_fi_by_pfmt(video, pix->pixelformat);
+ if (!fi)
+ fi = &video->formats[0]; /* default format */
+
+ width = pix->width;
+ height = pix->height;
+
+ memset(pix, 0, sizeof(*pix));
+
+ pix->pixelformat = fi->pixelformat;
+ pix->width = clamp_t(u32, width, STFCAMSS_FRAME_MIN_WIDTH,
+ STFCAMSS_FRAME_MAX_WIDTH);
+ pix->height = clamp_t(u32, height, STFCAMSS_FRAME_MIN_HEIGHT,
+ STFCAMSS_FRAME_MAX_HEIGHT);
+ bpl = pix->width * fi->bpp / 8;
+ bpl = ALIGN(bpl, video->bpl_alignment);
+ pix->bytesperline = bpl;
+
+ for (i = 0; i < fi->planes; ++i)
+ pix->sizeimage += bpl * pix->height / fi->vsub[i];
+
+ pix->field = V4L2_FIELD_NONE;
+ pix->colorspace = V4L2_COLORSPACE_SRGB;
+ pix->flags = 0;
+ pix->ycbcr_enc =
+ V4L2_MAP_YCBCR_ENC_DEFAULT(pix->colorspace);
+ pix->quantization = V4L2_MAP_QUANTIZATION_DEFAULT(true,
+ pix->colorspace,
+ pix->ycbcr_enc);
+ pix->xfer_func = V4L2_MAP_XFER_FUNC_DEFAULT(pix->colorspace);
+
+ return 0;
+}
+
+static int stf_video_init_format(struct stfcamss_video *video)
+{
+ int ret;
+ struct v4l2_format format = {
+ .type = video->type,
+ .fmt.pix = {
+ .width = 1920,
+ .height = 1080,
+ .pixelformat = V4L2_PIX_FMT_NV12,
+ },
+ };
+
+ ret = __video_try_fmt(video, &format);
+
+ if (ret < 0)
+ return ret;
+
+ video->active_fmt = format;
+
+ return 0;
+}
+
+/* -----------------------------------------------------------------------------
+ * Video queue operations
+ */
+
+static int video_queue_setup(struct vb2_queue *q,
+ unsigned int *num_buffers,
+ unsigned int *num_planes,
+ unsigned int sizes[],
+ struct device *alloc_devs[])
+{
+ struct stfcamss_video *video = vb2_get_drv_priv(q);
+ const struct v4l2_pix_format *format = &video->active_fmt.fmt.pix;
+
+ if (*num_planes) {
+ if (*num_planes != 1)
+ return -EINVAL;
+
+ if (sizes[0] < format->sizeimage)
+ return -EINVAL;
+ } else {
+ *num_planes = 1;
+ sizes[0] = format->sizeimage;
+ }
+
+ if (!sizes[0]) {
+ dev_dbg(video->stfcamss->dev,
+ "%s: error size is zero.\n", __func__);
+ return -EINVAL;
+ }
+
+ dev_dbg(video->stfcamss->dev, "planes = %d, size = %d\n",
+ *num_planes, sizes[0]);
+
+ return 0;
+}
+
+static int video_buf_init(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct stfcamss_video *video = vb2_get_drv_priv(vb->vb2_queue);
+ struct stfcamss_buffer *buffer = to_stfcamss_buffer(vbuf);
+ const struct v4l2_pix_format *fmt = &video->active_fmt.fmt.pix;
+ dma_addr_t *paddr;
+
+ paddr = vb2_plane_cookie(vb, 0);
+ buffer->addr[0] = *paddr;
+
+ if (fmt->pixelformat == V4L2_PIX_FMT_NV12)
+ buffer->addr[1] =
+ buffer->addr[0] + fmt->bytesperline * fmt->height;
+
+ return 0;
+}
+
+static int video_buf_prepare(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct stfcamss_video *video = vb2_get_drv_priv(vb->vb2_queue);
+ const struct v4l2_pix_format *fmt = &video->active_fmt.fmt.pix;
+
+ if (fmt->sizeimage > vb2_plane_size(vb, 0)) {
+ dev_dbg(video->stfcamss->dev,
+ "sizeimage = %u, plane size = %u\n",
+ fmt->sizeimage, (unsigned int)vb2_plane_size(vb, 0));
+ return -EINVAL;
+ }
+ vb2_set_plane_payload(vb, 0, fmt->sizeimage);
+
+ vbuf->field = V4L2_FIELD_NONE;
+
+ return 0;
+}
+
+static void video_buf_queue(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct stfcamss_video *video = vb2_get_drv_priv(vb->vb2_queue);
+ struct stfcamss_buffer *buffer = to_stfcamss_buffer(vbuf);
+
+ video->ops->queue_buffer(video, buffer);
+}
+
+static int video_get_subdev_format(struct stfcamss_video *video,
+ struct v4l2_subdev_format *fmt)
+{
+ struct v4l2_subdev *subdev;
+ struct media_pad *pad;
+ struct media_entity *entity;
+ int ret;
+
+ entity = &video->vdev.entity;
+ while (1) {
+ pad = &entity->pads[0];
+ if (!(pad->flags & MEDIA_PAD_FL_SINK))
+ break;
+
+ pad = media_pad_remote_pad_first(pad);
+ if (!pad || !is_media_entity_v4l2_subdev(pad->entity))
+ break;
+
+ entity = pad->entity;
+ subdev = media_entity_to_v4l2_subdev(entity);
+
+ fmt->pad = pad->index;
+
+ ret = v4l2_subdev_call_state_active(subdev, pad, get_fmt, fmt);
+ if (ret < 0 && ret != -ENOIOCTLCMD)
+ return ret;
+ else if (!ret)
+ break;
+ }
+
+ return 0;
+}
+
+static int stf_video_check_format(struct stfcamss_video *video)
+{
+ struct v4l2_pix_format *pix = &video->active_fmt.fmt.pix;
+ const struct stfcamss_format_info *fi;
+ int ret;
+ struct v4l2_subdev_format sd_fmt = {
+ .which = V4L2_SUBDEV_FORMAT_ACTIVE,
+ };
+
+ ret = video_get_subdev_format(video, &sd_fmt);
+ if (ret < 0)
+ return ret;
+
+ fi = video_g_fi_by_mcode(video, sd_fmt.format.code);
+ if (!fi)
+ return -EINVAL;
+
+ if (pix->pixelformat != fi->pixelformat ||
+ pix->height != sd_fmt.format.height ||
+ pix->width != sd_fmt.format.width ||
+ pix->field != sd_fmt.format.field)
+ return -EPIPE;
+
+ return 0;
+}
+
+static int video_start_streaming(struct vb2_queue *q, unsigned int count)
+{
+ struct stfcamss_video *video = vb2_get_drv_priv(q);
+ struct video_device *vdev = &video->vdev;
+ int ret;
+
+ ret = video_device_pipeline_start(vdev, &video->stfcamss->pipe);
+ if (ret < 0) {
+ dev_err(video->stfcamss->dev,
+ "Failed to media_pipeline_start: %d\n", ret);
+ goto err_ret_buffers;
+ }
+
+ ret = pm_runtime_resume_and_get(video->stfcamss->dev);
+ if (ret < 0) {
+ dev_err(video->stfcamss->dev, "power up failed %d\n", ret);
+ goto err_pipeline_stop;
+ }
+
+ video->ops->start_streaming(video);
+
+ ret = v4l2_subdev_call(video->source_subdev, video, s_stream, true);
+ if (ret) {
+ dev_err(video->stfcamss->dev, "stream on failed\n");
+ goto err_pm_put;
+ }
+
+ return 0;
+
+err_pm_put:
+ pm_runtime_put(video->stfcamss->dev);
+err_pipeline_stop:
+ video_device_pipeline_stop(vdev);
+err_ret_buffers:
+ video->ops->flush_buffers(video, VB2_BUF_STATE_QUEUED);
+ return ret;
+}
+
+static void video_stop_streaming(struct vb2_queue *q)
+{
+ struct stfcamss_video *video = vb2_get_drv_priv(q);
+ struct video_device *vdev = &video->vdev;
+
+ video->ops->stop_streaming(video);
+
+ v4l2_subdev_call(video->source_subdev, video, s_stream, false);
+
+ pm_runtime_put(video->stfcamss->dev);
+
+ video_device_pipeline_stop(vdev);
+ video->ops->flush_buffers(video, VB2_BUF_STATE_ERROR);
+}
+
+static const struct vb2_ops stf_video_vb2_q_ops = {
+ .queue_setup = video_queue_setup,
+ .buf_init = video_buf_init,
+ .buf_prepare = video_buf_prepare,
+ .buf_queue = video_buf_queue,
+ .start_streaming = video_start_streaming,
+ .stop_streaming = video_stop_streaming,
+};
+
+/* -----------------------------------------------------------------------------
+ * V4L2 ioctls
+ */
+
+static int video_querycap(struct file *file, void *fh,
+ struct v4l2_capability *cap)
+{
+ strscpy(cap->driver, "starfive-camss", sizeof(cap->driver));
+ strscpy(cap->card, "Starfive Camera Subsystem", sizeof(cap->card));
+
+ return 0;
+}
+
+static int video_enum_fmt(struct file *file, void *fh, struct v4l2_fmtdesc *f)
+{
+ struct stfcamss_video *video = video_drvdata(file);
+ const struct stfcamss_format_info *fi;
+
+ if (f->index >= video->nformats)
+ return -EINVAL;
+
+ if (f->mbus_code) {
+ /* Each entry in formats[] table has unique mbus_code */
+ if (f->index > 0)
+ return -EINVAL;
+
+ fi = video_g_fi_by_mcode(video, f->mbus_code);
+ } else {
+ fi = video_g_fi_by_index(video, f->index);
+ }
+
+ if (!fi)
+ return -EINVAL;
+
+ f->pixelformat = fi->pixelformat;
+
+ return 0;
+}
+
+static int video_enum_framesizes(struct file *file, void *fh,
+ struct v4l2_frmsizeenum *fsize)
+{
+ struct stfcamss_video *video = video_drvdata(file);
+ unsigned int i;
+
+ if (fsize->index)
+ return -EINVAL;
+
+ for (i = 0; i < video->nformats; i++) {
+ if (video->formats[i].pixelformat == fsize->pixel_format)
+ break;
+ }
+
+ if (i == video->nformats)
+ return -EINVAL;
+
+ fsize->type = V4L2_FRMSIZE_TYPE_CONTINUOUS;
+ fsize->stepwise.min_width = STFCAMSS_FRAME_MIN_WIDTH;
+ fsize->stepwise.max_width = STFCAMSS_FRAME_MAX_WIDTH;
+ fsize->stepwise.min_height = STFCAMSS_FRAME_MIN_HEIGHT;
+ fsize->stepwise.max_height = STFCAMSS_FRAME_MAX_HEIGHT;
+ fsize->stepwise.step_width = 1;
+ fsize->stepwise.step_height = 1;
+
+ return 0;
+}
+
+static int video_g_fmt(struct file *file, void *fh, struct v4l2_format *f)
+{
+ struct stfcamss_video *video = video_drvdata(file);
+
+ *f = video->active_fmt;
+
+ return 0;
+}
+
+static int video_s_fmt(struct file *file, void *fh, struct v4l2_format *f)
+{
+ struct stfcamss_video *video = video_drvdata(file);
+ int ret;
+
+ if (vb2_is_busy(&video->vb2_q))
+ return -EBUSY;
+
+ ret = __video_try_fmt(video, f);
+ if (ret < 0)
+ return ret;
+
+ video->active_fmt = *f;
+
+ return 0;
+}
+
+static int video_try_fmt(struct file *file, void *fh, struct v4l2_format *f)
+{
+ struct stfcamss_video *video = video_drvdata(file);
+
+ return __video_try_fmt(video, f);
+}
+
+static const struct v4l2_ioctl_ops stf_vid_ioctl_ops = {
+ .vidioc_querycap = video_querycap,
+ .vidioc_enum_fmt_vid_cap = video_enum_fmt,
+ .vidioc_enum_framesizes = video_enum_framesizes,
+ .vidioc_g_fmt_vid_cap = video_g_fmt,
+ .vidioc_s_fmt_vid_cap = video_s_fmt,
+ .vidioc_try_fmt_vid_cap = video_try_fmt,
+ .vidioc_reqbufs = vb2_ioctl_reqbufs,
+ .vidioc_querybuf = vb2_ioctl_querybuf,
+ .vidioc_qbuf = vb2_ioctl_qbuf,
+ .vidioc_expbuf = vb2_ioctl_expbuf,
+ .vidioc_dqbuf = vb2_ioctl_dqbuf,
+ .vidioc_create_bufs = vb2_ioctl_create_bufs,
+ .vidioc_prepare_buf = vb2_ioctl_prepare_buf,
+ .vidioc_streamon = vb2_ioctl_streamon,
+ .vidioc_streamoff = vb2_ioctl_streamoff,
+};
+
+/* -----------------------------------------------------------------------------
+ * V4L2 file operations
+ */
+
+static const struct v4l2_file_operations stf_vid_fops = {
+ .owner = THIS_MODULE,
+ .unlocked_ioctl = video_ioctl2,
+ .open = v4l2_fh_open,
+ .release = vb2_fop_release,
+ .poll = vb2_fop_poll,
+ .mmap = vb2_fop_mmap,
+ .read = vb2_fop_read,
+};
+
+/* -----------------------------------------------------------------------------
+ * STFCAMSS video core
+ */
+
+static int stf_link_validate(struct media_link *link)
+{
+ struct video_device *vdev =
+ media_entity_to_video_device(link->sink->entity);
+ struct stfcamss_video *video = video_get_drvdata(vdev);
+ int ret;
+
+ ret = stf_video_check_format(video);
+
+ return ret;
+}
+
+static const struct media_entity_operations stf_media_ops = {
+ .link_validate = stf_link_validate,
+};
+
+static void stf_video_release(struct video_device *vdev)
+{
+ struct stfcamss_video *video = video_get_drvdata(vdev);
+
+ media_entity_cleanup(&vdev->entity);
+
+ mutex_destroy(&video->q_lock);
+ mutex_destroy(&video->lock);
+}
+
+int stf_video_register(struct stfcamss_video *video,
+ struct v4l2_device *v4l2_dev, const char *name)
+{
+ struct video_device *vdev = &video->vdev;
+ struct vb2_queue *q;
+ struct media_pad *pad = &video->pad;
+ int ret;
+
+ mutex_init(&video->q_lock);
+ mutex_init(&video->lock);
+
+ q = &video->vb2_q;
+ q->drv_priv = video;
+ q->mem_ops = &vb2_dma_contig_memops;
+ q->ops = &stf_video_vb2_q_ops;
+ q->type = video->type;
+ q->io_modes = VB2_DMABUF | VB2_MMAP;
+ q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+ q->buf_struct_size = sizeof(struct stfcamss_buffer);
+ q->dev = video->stfcamss->dev;
+ q->lock = &video->q_lock;
+ q->min_queued_buffers = STFCAMSS_MIN_BUFFERS;
+ ret = vb2_queue_init(q);
+ if (ret < 0) {
+ dev_err(video->stfcamss->dev,
+ "Failed to init vb2 queue: %d\n", ret);
+ goto err_mutex_destroy;
+ }
+
+ pad->flags = MEDIA_PAD_FL_SINK;
+ ret = media_entity_pads_init(&vdev->entity, 1, pad);
+ if (ret < 0) {
+ dev_err(video->stfcamss->dev,
+ "Failed to init video entity: %d\n", ret);
+ goto err_mutex_destroy;
+ }
+
+ ret = stf_video_init_format(video);
+ if (ret < 0) {
+ dev_err(video->stfcamss->dev,
+ "Failed to init format: %d\n", ret);
+ goto err_media_cleanup;
+ }
+
+ vdev->fops = &stf_vid_fops;
+ vdev->ioctl_ops = &stf_vid_ioctl_ops;
+ vdev->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
+ vdev->entity.ops = &stf_media_ops;
+ vdev->vfl_dir = VFL_DIR_RX;
+ vdev->release = stf_video_release;
+ vdev->v4l2_dev = v4l2_dev;
+ vdev->queue = &video->vb2_q;
+ vdev->lock = &video->lock;
+ strscpy(vdev->name, name, sizeof(vdev->name));
+
+ video_set_drvdata(vdev, video);
+
+ ret = video_register_device(vdev, VFL_TYPE_VIDEO, -1);
+ if (ret < 0) {
+ dev_err(video->stfcamss->dev,
+ "Failed to register video device: %d\n", ret);
+ goto err_media_cleanup;
+ }
+
+ return 0;
+
+err_media_cleanup:
+ media_entity_cleanup(&vdev->entity);
+err_mutex_destroy:
+ mutex_destroy(&video->lock);
+ mutex_destroy(&video->q_lock);
+ return ret;
+}
+
+void stf_video_unregister(struct stfcamss_video *video)
+{
+ vb2_video_unregister_device(&video->vdev);
+}
diff --git a/drivers/staging/media/starfive/camss/stf-video.h b/drivers/staging/media/starfive/camss/stf-video.h
new file mode 100644
index 000000000000..8052b77e3ad8
--- /dev/null
+++ b/drivers/staging/media/starfive/camss/stf-video.h
@@ -0,0 +1,100 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * stf_video.h
+ *
+ * StarFive Camera Subsystem - V4L2 device node
+ *
+ * Copyright (C) 2021-2023 StarFive Technology Co., Ltd.
+ */
+
+#ifndef STF_VIDEO_H
+#define STF_VIDEO_H
+
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/videodev2.h>
+#include <media/v4l2-dev.h>
+#include <media/v4l2-fh.h>
+#include <media/v4l2-ioctl.h>
+#include <media/videobuf2-v4l2.h>
+
+#define STFCAMSS_FRAME_MIN_WIDTH 64
+#define STFCAMSS_FRAME_MAX_WIDTH 1920
+#define STFCAMSS_FRAME_MIN_HEIGHT 64
+#define STFCAMSS_FRAME_MAX_HEIGHT 1080
+#define STFCAMSS_FRAME_WIDTH_ALIGN_8 8
+#define STFCAMSS_FRAME_WIDTH_ALIGN_128 128
+#define STFCAMSS_MIN_BUFFERS 2
+
+#define STFCAMSS_MAX_ENTITY_NAME_LEN 27
+
+enum stf_v_line_id {
+ STF_V_LINE_WR = 0,
+ STF_V_LINE_ISP,
+ STF_V_LINE_MAX,
+};
+
+enum stf_capture_type {
+ STF_CAPTURE_RAW = 0,
+ STF_CAPTURE_YUV,
+ STF_CAPTURE_NUM,
+};
+
+struct stfcamss_buffer {
+ struct vb2_v4l2_buffer vb;
+ dma_addr_t addr[2];
+ struct list_head queue;
+};
+
+struct fract {
+ u8 numerator;
+ u8 denominator;
+};
+
+/*
+ * struct stfcamss_format_info - ISP media bus format information
+ * @code: V4L2 media bus format code
+ * @pixelformat: V4L2 pixel format FCC identifier
+ * @planes: Number of planes
+ * @vsub: Vertical subsampling (for each plane)
+ * @bpp: Bits per pixel when stored in memory (for each plane)
+ */
+struct stfcamss_format_info {
+ u32 code;
+ u32 pixelformat;
+ u8 planes;
+ u8 vsub[3];
+ u8 bpp;
+};
+
+struct stfcamss_video {
+ struct stfcamss *stfcamss;
+ struct vb2_queue vb2_q;
+ struct video_device vdev;
+ struct media_pad pad;
+ struct v4l2_format active_fmt;
+ enum v4l2_buf_type type;
+ const struct stfcamss_video_ops *ops;
+ struct mutex lock; /* serialize device access */
+ struct mutex q_lock; /* protects the queue */
+ unsigned int bpl_alignment;
+ const struct stfcamss_format_info *formats;
+ unsigned int nformats;
+ struct v4l2_subdev *source_subdev;
+};
+
+struct stfcamss_video_ops {
+ int (*queue_buffer)(struct stfcamss_video *video,
+ struct stfcamss_buffer *buf);
+ int (*flush_buffers)(struct stfcamss_video *video,
+ enum vb2_buffer_state state);
+ void (*start_streaming)(struct stfcamss_video *video);
+ void (*stop_streaming)(struct stfcamss_video *video);
+};
+
+int stf_video_register(struct stfcamss_video *video,
+ struct v4l2_device *v4l2_dev, const char *name);
+
+void stf_video_unregister(struct stfcamss_video *video);
+
+#endif /* STF_VIDEO_H */
diff --git a/drivers/staging/media/sunxi/Kconfig b/drivers/staging/media/sunxi/Kconfig
new file mode 100644
index 000000000000..62a486aba88b
--- /dev/null
+++ b/drivers/staging/media/sunxi/Kconfig
@@ -0,0 +1,17 @@
+# SPDX-License-Identifier: GPL-2.0
+config VIDEO_SUNXI
+ bool "Allwinner sunXi family Video Devices"
+ depends on ARCH_SUNXI || COMPILE_TEST
+ help
+ If you have an Allwinner SoC based on the sunXi family, say Y.
+
+ Note that this option doesn't include new drivers in the
+ kernel: saying N will just cause Kconfig to skip all the
+ questions about Allwinner media devices.
+
+if VIDEO_SUNXI
+
+source "drivers/staging/media/sunxi/cedrus/Kconfig"
+source "drivers/staging/media/sunxi/sun6i-isp/Kconfig"
+
+endif
diff --git a/drivers/staging/media/sunxi/Makefile b/drivers/staging/media/sunxi/Makefile
new file mode 100644
index 000000000000..3d20b2f0e644
--- /dev/null
+++ b/drivers/staging/media/sunxi/Makefile
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_VIDEO_SUNXI_CEDRUS) += cedrus/
+obj-$(CONFIG_VIDEO_SUN6I_ISP) += sun6i-isp/
diff --git a/drivers/staging/media/sunxi/cedrus/Kconfig b/drivers/staging/media/sunxi/cedrus/Kconfig
new file mode 100644
index 000000000000..cb07a343c9c2
--- /dev/null
+++ b/drivers/staging/media/sunxi/cedrus/Kconfig
@@ -0,0 +1,17 @@
+# SPDX-License-Identifier: GPL-2.0
+config VIDEO_SUNXI_CEDRUS
+ tristate "Allwinner Cedrus VPU driver"
+ depends on VIDEO_DEV
+ depends on RESET_CONTROLLER
+ depends on HAS_DMA
+ depends on OF
+ select MEDIA_CONTROLLER
+ select SUNXI_SRAM
+ select VIDEOBUF2_DMA_CONTIG
+ select V4L2_MEM2MEM_DEV
+ help
+ Support for the VPU found in Allwinner SoCs, also known as the Cedar
+ video engine.
+
+ To compile this driver as a module, choose M here: the module
+ will be called sunxi-cedrus.
diff --git a/drivers/staging/media/sunxi/cedrus/Makefile b/drivers/staging/media/sunxi/cedrus/Makefile
new file mode 100644
index 000000000000..a647b3690bf8
--- /dev/null
+++ b/drivers/staging/media/sunxi/cedrus/Makefile
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_VIDEO_SUNXI_CEDRUS) += sunxi-cedrus.o
+
+sunxi-cedrus-y = cedrus.o cedrus_video.o cedrus_hw.o cedrus_dec.o \
+ cedrus_mpeg2.o cedrus_h264.o cedrus_h265.o \
+ cedrus_vp8.o
diff --git a/drivers/staging/media/sunxi/cedrus/TODO b/drivers/staging/media/sunxi/cedrus/TODO
new file mode 100644
index 000000000000..00aa304a7e36
--- /dev/null
+++ b/drivers/staging/media/sunxi/cedrus/TODO
@@ -0,0 +1,16 @@
+This driver suffers from a bad initial design that results in various aspects
+being intricated, making it difficult to scale to new codecs and to add encoding
+support in the future.
+
+Before leaving the staging area, it should be reworked to clearly distinguish
+between different aspects:
+- platform, with resources management, interrupt handler, watchdog,
+ v4l2 and m2m devices registration;
+- proc, with video device registration and related operations;
+- context, with m2m context, queue and controls management;
+- engine, with each individual codec job execution and codec-specific
+ operation callbacks;
+
+This will make it possible to register two different procs (decoder and
+encoder) while sharing significant common infrastructure, common v4l2 and m2m
+devices but exposing distinct video devices.
diff --git a/drivers/staging/media/sunxi/cedrus/cedrus.c b/drivers/staging/media/sunxi/cedrus/cedrus.c
new file mode 100644
index 000000000000..bff42ea1871f
--- /dev/null
+++ b/drivers/staging/media/sunxi/cedrus/cedrus.c
@@ -0,0 +1,719 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Cedrus VPU driver
+ *
+ * Copyright (C) 2016 Florent Revest <florent.revest@free-electrons.com>
+ * Copyright (C) 2018 Paul Kocialkowski <paul.kocialkowski@bootlin.com>
+ * Copyright (C) 2018 Bootlin
+ *
+ * Based on the vim2m driver, that is:
+ *
+ * Copyright (c) 2009-2010 Samsung Electronics Co., Ltd.
+ * Pawel Osciak, <pawel@osciak.com>
+ * Marek Szyprowski, <m.szyprowski@samsung.com>
+ */
+
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/pm.h>
+
+#include <media/v4l2-device.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-mem2mem.h>
+
+#include "cedrus.h"
+#include "cedrus_video.h"
+#include "cedrus_dec.h"
+#include "cedrus_hw.h"
+
+static int cedrus_try_ctrl(struct v4l2_ctrl *ctrl)
+{
+ if (ctrl->id == V4L2_CID_STATELESS_H264_SPS) {
+ const struct v4l2_ctrl_h264_sps *sps = ctrl->p_new.p_h264_sps;
+
+ if (sps->chroma_format_idc != 1)
+ /* Only 4:2:0 is supported */
+ return -EINVAL;
+ if (sps->bit_depth_luma_minus8 != sps->bit_depth_chroma_minus8)
+ /* Luma and chroma bit depth mismatch */
+ return -EINVAL;
+ if (sps->bit_depth_luma_minus8 != 0)
+ /* Only 8-bit is supported */
+ return -EINVAL;
+ } else if (ctrl->id == V4L2_CID_STATELESS_HEVC_SPS) {
+ const struct v4l2_ctrl_hevc_sps *sps = ctrl->p_new.p_hevc_sps;
+ struct cedrus_ctx *ctx = container_of(ctrl->handler, struct cedrus_ctx, hdl);
+ unsigned int bit_depth, max_depth;
+ struct vb2_queue *vq;
+
+ if (sps->chroma_format_idc != 1)
+ /* Only 4:2:0 is supported */
+ return -EINVAL;
+
+ bit_depth = max(sps->bit_depth_luma_minus8,
+ sps->bit_depth_chroma_minus8) + 8;
+
+ if (cedrus_is_capable(ctx, CEDRUS_CAPABILITY_H265_10_DEC))
+ max_depth = 10;
+ else
+ max_depth = 8;
+
+ if (bit_depth > max_depth)
+ return -EINVAL;
+
+ vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx,
+ V4L2_BUF_TYPE_VIDEO_CAPTURE);
+
+ /*
+ * Bit depth can't be higher than currently set once
+ * buffers are allocated.
+ */
+ if (vb2_is_busy(vq)) {
+ if (ctx->bit_depth < bit_depth)
+ return -EINVAL;
+ } else {
+ ctx->bit_depth = bit_depth;
+ cedrus_reset_cap_format(ctx);
+ }
+ }
+
+ return 0;
+}
+
+static const struct v4l2_ctrl_ops cedrus_ctrl_ops = {
+ .try_ctrl = cedrus_try_ctrl,
+};
+
+static const struct cedrus_control cedrus_controls[] = {
+ {
+ .cfg = {
+ .id = V4L2_CID_STATELESS_MPEG2_SEQUENCE,
+ },
+ .capabilities = CEDRUS_CAPABILITY_MPEG2_DEC,
+ },
+ {
+ .cfg = {
+ .id = V4L2_CID_STATELESS_MPEG2_PICTURE,
+ },
+ .capabilities = CEDRUS_CAPABILITY_MPEG2_DEC,
+ },
+ {
+ .cfg = {
+ .id = V4L2_CID_STATELESS_MPEG2_QUANTISATION,
+ },
+ .capabilities = CEDRUS_CAPABILITY_MPEG2_DEC,
+ },
+ {
+ .cfg = {
+ .id = V4L2_CID_STATELESS_H264_DECODE_PARAMS,
+ },
+ .capabilities = CEDRUS_CAPABILITY_H264_DEC,
+ },
+ {
+ .cfg = {
+ .id = V4L2_CID_STATELESS_H264_SLICE_PARAMS,
+ },
+ .capabilities = CEDRUS_CAPABILITY_H264_DEC,
+ },
+ {
+ .cfg = {
+ .id = V4L2_CID_STATELESS_H264_SPS,
+ .ops = &cedrus_ctrl_ops,
+ },
+ .capabilities = CEDRUS_CAPABILITY_H264_DEC,
+ },
+ {
+ .cfg = {
+ .id = V4L2_CID_STATELESS_H264_PPS,
+ },
+ .capabilities = CEDRUS_CAPABILITY_H264_DEC,
+ },
+ {
+ .cfg = {
+ .id = V4L2_CID_STATELESS_H264_SCALING_MATRIX,
+ },
+ .capabilities = CEDRUS_CAPABILITY_H264_DEC,
+ },
+ {
+ .cfg = {
+ .id = V4L2_CID_STATELESS_H264_PRED_WEIGHTS,
+ },
+ .capabilities = CEDRUS_CAPABILITY_H264_DEC,
+ },
+ {
+ .cfg = {
+ .id = V4L2_CID_STATELESS_H264_DECODE_MODE,
+ .max = V4L2_STATELESS_H264_DECODE_MODE_SLICE_BASED,
+ .def = V4L2_STATELESS_H264_DECODE_MODE_SLICE_BASED,
+ },
+ .capabilities = CEDRUS_CAPABILITY_H264_DEC,
+ },
+ {
+ .cfg = {
+ .id = V4L2_CID_STATELESS_H264_START_CODE,
+ .max = V4L2_STATELESS_H264_START_CODE_NONE,
+ .def = V4L2_STATELESS_H264_START_CODE_NONE,
+ },
+ .capabilities = CEDRUS_CAPABILITY_H264_DEC,
+ },
+ /*
+ * We only expose supported profiles information,
+ * and not levels as it's not clear what is supported
+ * for each hardware/core version.
+ * In any case, TRY/S_FMT will clamp the format resolution
+ * to the maximum supported.
+ */
+ {
+ .cfg = {
+ .id = V4L2_CID_MPEG_VIDEO_H264_PROFILE,
+ .min = V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE,
+ .def = V4L2_MPEG_VIDEO_H264_PROFILE_MAIN,
+ .max = V4L2_MPEG_VIDEO_H264_PROFILE_HIGH,
+ .menu_skip_mask =
+ BIT(V4L2_MPEG_VIDEO_H264_PROFILE_EXTENDED),
+ },
+ .capabilities = CEDRUS_CAPABILITY_H264_DEC,
+ },
+ {
+ .cfg = {
+ .id = V4L2_CID_STATELESS_HEVC_SPS,
+ .ops = &cedrus_ctrl_ops,
+ },
+ .capabilities = CEDRUS_CAPABILITY_H265_DEC,
+ },
+ {
+ .cfg = {
+ .id = V4L2_CID_STATELESS_HEVC_PPS,
+ },
+ .capabilities = CEDRUS_CAPABILITY_H265_DEC,
+ },
+ {
+ .cfg = {
+ .id = V4L2_CID_STATELESS_HEVC_SLICE_PARAMS,
+ /* The driver can only handle 1 entry per slice for now */
+ .dims = { 1 },
+ },
+ .capabilities = CEDRUS_CAPABILITY_H265_DEC,
+ },
+ {
+ .cfg = {
+ .id = V4L2_CID_STATELESS_HEVC_SCALING_MATRIX,
+ },
+ .capabilities = CEDRUS_CAPABILITY_H265_DEC,
+ },
+ {
+ .cfg = {
+ .id = V4L2_CID_STATELESS_HEVC_ENTRY_POINT_OFFSETS,
+ /* maximum 256 entry point offsets per slice */
+ .dims = { 256 },
+ .max = 0xffffffff,
+ .step = 1,
+ },
+ .capabilities = CEDRUS_CAPABILITY_H265_DEC,
+ },
+ {
+ .cfg = {
+ .id = V4L2_CID_STATELESS_HEVC_DECODE_MODE,
+ .max = V4L2_STATELESS_HEVC_DECODE_MODE_SLICE_BASED,
+ .def = V4L2_STATELESS_HEVC_DECODE_MODE_SLICE_BASED,
+ },
+ .capabilities = CEDRUS_CAPABILITY_H265_DEC,
+ },
+ {
+ .cfg = {
+ .id = V4L2_CID_STATELESS_HEVC_START_CODE,
+ .max = V4L2_STATELESS_HEVC_START_CODE_NONE,
+ .def = V4L2_STATELESS_HEVC_START_CODE_NONE,
+ },
+ .capabilities = CEDRUS_CAPABILITY_H265_DEC,
+ },
+ {
+ .cfg = {
+ .id = V4L2_CID_STATELESS_VP8_FRAME,
+ },
+ .capabilities = CEDRUS_CAPABILITY_VP8_DEC,
+ },
+ {
+ .cfg = {
+ .id = V4L2_CID_STATELESS_HEVC_DECODE_PARAMS,
+ },
+ .capabilities = CEDRUS_CAPABILITY_H265_DEC,
+ },
+};
+
+#define CEDRUS_CONTROLS_COUNT ARRAY_SIZE(cedrus_controls)
+
+void *cedrus_find_control_data(struct cedrus_ctx *ctx, u32 id)
+{
+ unsigned int i;
+
+ for (i = 0; ctx->ctrls[i]; i++)
+ if (ctx->ctrls[i]->id == id)
+ return ctx->ctrls[i]->p_cur.p;
+
+ return NULL;
+}
+
+u32 cedrus_get_num_of_controls(struct cedrus_ctx *ctx, u32 id)
+{
+ unsigned int i;
+
+ for (i = 0; ctx->ctrls[i]; i++)
+ if (ctx->ctrls[i]->id == id)
+ return ctx->ctrls[i]->elems;
+
+ return 0;
+}
+
+static int cedrus_init_ctrls(struct cedrus_dev *dev, struct cedrus_ctx *ctx)
+{
+ struct v4l2_ctrl_handler *hdl = &ctx->hdl;
+ struct v4l2_ctrl *ctrl;
+ unsigned int ctrl_size;
+ unsigned int i, j;
+
+ v4l2_ctrl_handler_init(hdl, CEDRUS_CONTROLS_COUNT);
+ if (hdl->error) {
+ v4l2_err(&dev->v4l2_dev,
+ "Failed to initialize control handler: %d\n",
+ hdl->error);
+ return hdl->error;
+ }
+
+ ctrl_size = sizeof(ctrl) * CEDRUS_CONTROLS_COUNT + 1;
+
+ ctx->ctrls = kzalloc(ctrl_size, GFP_KERNEL);
+ if (!ctx->ctrls)
+ return -ENOMEM;
+
+ j = 0;
+ for (i = 0; i < CEDRUS_CONTROLS_COUNT; i++) {
+ if (!cedrus_is_capable(ctx, cedrus_controls[i].capabilities))
+ continue;
+
+ ctrl = v4l2_ctrl_new_custom(hdl, &cedrus_controls[i].cfg,
+ NULL);
+ if (hdl->error) {
+ v4l2_err(&dev->v4l2_dev,
+ "Failed to create %s control: %d\n",
+ v4l2_ctrl_get_name(cedrus_controls[i].cfg.id),
+ hdl->error);
+
+ v4l2_ctrl_handler_free(hdl);
+ kfree(ctx->ctrls);
+ ctx->ctrls = NULL;
+ return hdl->error;
+ }
+
+ ctx->ctrls[j++] = ctrl;
+ }
+
+ ctx->fh.ctrl_handler = hdl;
+ v4l2_ctrl_handler_setup(hdl);
+
+ return 0;
+}
+
+static int cedrus_request_validate(struct media_request *req)
+{
+ struct media_request_object *obj;
+ struct cedrus_ctx *ctx = NULL;
+ unsigned int count;
+
+ list_for_each_entry(obj, &req->objects, list) {
+ struct vb2_buffer *vb;
+
+ if (vb2_request_object_is_buffer(obj)) {
+ vb = container_of(obj, struct vb2_buffer, req_obj);
+ ctx = vb2_get_drv_priv(vb->vb2_queue);
+
+ break;
+ }
+ }
+
+ if (!ctx)
+ return -ENOENT;
+
+ count = vb2_request_buffer_cnt(req);
+ if (!count) {
+ v4l2_info(&ctx->dev->v4l2_dev,
+ "No buffer was provided with the request\n");
+ return -ENOENT;
+ } else if (count > 1) {
+ v4l2_info(&ctx->dev->v4l2_dev,
+ "More than one buffer was provided with the request\n");
+ return -EINVAL;
+ }
+
+ return vb2_request_validate(req);
+}
+
+static int cedrus_open(struct file *file)
+{
+ struct cedrus_dev *dev = video_drvdata(file);
+ struct cedrus_ctx *ctx = NULL;
+ int ret;
+
+ if (mutex_lock_interruptible(&dev->dev_mutex))
+ return -ERESTARTSYS;
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx) {
+ mutex_unlock(&dev->dev_mutex);
+ return -ENOMEM;
+ }
+
+ v4l2_fh_init(&ctx->fh, video_devdata(file));
+ ctx->dev = dev;
+ ctx->bit_depth = 8;
+
+ ctx->fh.m2m_ctx = v4l2_m2m_ctx_init(dev->m2m_dev, ctx,
+ &cedrus_queue_init);
+ if (IS_ERR(ctx->fh.m2m_ctx)) {
+ ret = PTR_ERR(ctx->fh.m2m_ctx);
+ goto err_free;
+ }
+
+ cedrus_reset_out_format(ctx);
+
+ ret = cedrus_init_ctrls(dev, ctx);
+ if (ret)
+ goto err_m2m_release;
+
+ v4l2_fh_add(&ctx->fh, file);
+
+ mutex_unlock(&dev->dev_mutex);
+
+ return 0;
+
+err_m2m_release:
+ v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
+err_free:
+ kfree(ctx);
+ mutex_unlock(&dev->dev_mutex);
+
+ return ret;
+}
+
+static int cedrus_release(struct file *file)
+{
+ struct cedrus_dev *dev = video_drvdata(file);
+ struct cedrus_ctx *ctx = cedrus_file2ctx(file);
+
+ mutex_lock(&dev->dev_mutex);
+
+ v4l2_fh_del(&ctx->fh, file);
+ v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
+
+ v4l2_ctrl_handler_free(&ctx->hdl);
+ kfree(ctx->ctrls);
+
+ v4l2_fh_exit(&ctx->fh);
+
+ kfree(ctx);
+
+ mutex_unlock(&dev->dev_mutex);
+
+ return 0;
+}
+
+static const struct v4l2_file_operations cedrus_fops = {
+ .owner = THIS_MODULE,
+ .open = cedrus_open,
+ .release = cedrus_release,
+ .poll = v4l2_m2m_fop_poll,
+ .unlocked_ioctl = video_ioctl2,
+ .mmap = v4l2_m2m_fop_mmap,
+};
+
+static const struct video_device cedrus_video_device = {
+ .name = CEDRUS_NAME,
+ .vfl_dir = VFL_DIR_M2M,
+ .fops = &cedrus_fops,
+ .ioctl_ops = &cedrus_ioctl_ops,
+ .minor = -1,
+ .release = video_device_release_empty,
+ .device_caps = V4L2_CAP_VIDEO_M2M | V4L2_CAP_STREAMING,
+};
+
+static const struct v4l2_m2m_ops cedrus_m2m_ops = {
+ .device_run = cedrus_device_run,
+};
+
+static const struct media_device_ops cedrus_m2m_media_ops = {
+ .req_validate = cedrus_request_validate,
+ .req_queue = v4l2_m2m_request_queue,
+};
+
+static int cedrus_probe(struct platform_device *pdev)
+{
+ struct cedrus_dev *dev;
+ struct video_device *vfd;
+ int ret;
+
+ dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
+ if (!dev)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, dev);
+
+ dev->vfd = cedrus_video_device;
+ dev->dev = &pdev->dev;
+ dev->pdev = pdev;
+
+ ret = cedrus_hw_probe(dev);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to probe hardware\n");
+ return ret;
+ }
+
+ mutex_init(&dev->dev_mutex);
+
+ INIT_DELAYED_WORK(&dev->watchdog_work, cedrus_watchdog);
+
+ ret = v4l2_device_register(&pdev->dev, &dev->v4l2_dev);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to register V4L2 device\n");
+ return ret;
+ }
+
+ vfd = &dev->vfd;
+ vfd->lock = &dev->dev_mutex;
+ vfd->v4l2_dev = &dev->v4l2_dev;
+
+ snprintf(vfd->name, sizeof(vfd->name), "%s", cedrus_video_device.name);
+ video_set_drvdata(vfd, dev);
+
+ dev->m2m_dev = v4l2_m2m_init(&cedrus_m2m_ops);
+ if (IS_ERR(dev->m2m_dev)) {
+ v4l2_err(&dev->v4l2_dev,
+ "Failed to initialize V4L2 M2M device\n");
+ ret = PTR_ERR(dev->m2m_dev);
+
+ goto err_v4l2;
+ }
+
+ dev->mdev.dev = &pdev->dev;
+ strscpy(dev->mdev.model, CEDRUS_NAME, sizeof(dev->mdev.model));
+ strscpy(dev->mdev.bus_info, "platform:" CEDRUS_NAME,
+ sizeof(dev->mdev.bus_info));
+
+ media_device_init(&dev->mdev);
+ dev->mdev.ops = &cedrus_m2m_media_ops;
+ dev->v4l2_dev.mdev = &dev->mdev;
+
+ ret = video_register_device(vfd, VFL_TYPE_VIDEO, 0);
+ if (ret) {
+ v4l2_err(&dev->v4l2_dev, "Failed to register video device\n");
+ goto err_m2m;
+ }
+
+ v4l2_info(&dev->v4l2_dev,
+ "Device registered as /dev/video%d\n", vfd->num);
+
+ ret = v4l2_m2m_register_media_controller(dev->m2m_dev, vfd,
+ MEDIA_ENT_F_PROC_VIDEO_DECODER);
+ if (ret) {
+ v4l2_err(&dev->v4l2_dev,
+ "Failed to initialize V4L2 M2M media controller\n");
+ goto err_video;
+ }
+
+ ret = media_device_register(&dev->mdev);
+ if (ret) {
+ v4l2_err(&dev->v4l2_dev, "Failed to register media device\n");
+ goto err_m2m_mc;
+ }
+
+ return 0;
+
+err_m2m_mc:
+ v4l2_m2m_unregister_media_controller(dev->m2m_dev);
+err_video:
+ video_unregister_device(&dev->vfd);
+err_m2m:
+ v4l2_m2m_release(dev->m2m_dev);
+err_v4l2:
+ v4l2_device_unregister(&dev->v4l2_dev);
+
+ return ret;
+}
+
+static void cedrus_remove(struct platform_device *pdev)
+{
+ struct cedrus_dev *dev = platform_get_drvdata(pdev);
+
+ cancel_delayed_work_sync(&dev->watchdog_work);
+ if (media_devnode_is_registered(dev->mdev.devnode)) {
+ media_device_unregister(&dev->mdev);
+ v4l2_m2m_unregister_media_controller(dev->m2m_dev);
+ media_device_cleanup(&dev->mdev);
+ }
+
+ v4l2_m2m_release(dev->m2m_dev);
+ video_unregister_device(&dev->vfd);
+ v4l2_device_unregister(&dev->v4l2_dev);
+
+ cedrus_hw_remove(dev);
+}
+
+static const struct cedrus_variant sun4i_a10_cedrus_variant = {
+ .capabilities = CEDRUS_CAPABILITY_MPEG2_DEC |
+ CEDRUS_CAPABILITY_H264_DEC |
+ CEDRUS_CAPABILITY_VP8_DEC,
+ .mod_rate = 320000000,
+};
+
+static const struct cedrus_variant sun5i_a13_cedrus_variant = {
+ .capabilities = CEDRUS_CAPABILITY_MPEG2_DEC |
+ CEDRUS_CAPABILITY_H264_DEC |
+ CEDRUS_CAPABILITY_VP8_DEC,
+ .mod_rate = 320000000,
+};
+
+static const struct cedrus_variant sun7i_a20_cedrus_variant = {
+ .capabilities = CEDRUS_CAPABILITY_MPEG2_DEC |
+ CEDRUS_CAPABILITY_H264_DEC |
+ CEDRUS_CAPABILITY_VP8_DEC,
+ .mod_rate = 320000000,
+};
+
+static const struct cedrus_variant sun8i_a33_cedrus_variant = {
+ .capabilities = CEDRUS_CAPABILITY_UNTILED |
+ CEDRUS_CAPABILITY_MPEG2_DEC |
+ CEDRUS_CAPABILITY_H264_DEC |
+ CEDRUS_CAPABILITY_VP8_DEC,
+ .mod_rate = 320000000,
+};
+
+static const struct cedrus_variant sun8i_h3_cedrus_variant = {
+ .capabilities = CEDRUS_CAPABILITY_UNTILED |
+ CEDRUS_CAPABILITY_MPEG2_DEC |
+ CEDRUS_CAPABILITY_H264_DEC |
+ CEDRUS_CAPABILITY_H265_DEC |
+ CEDRUS_CAPABILITY_VP8_DEC,
+ .mod_rate = 402000000,
+};
+
+static const struct cedrus_variant sun8i_v3s_cedrus_variant = {
+ .capabilities = CEDRUS_CAPABILITY_UNTILED |
+ CEDRUS_CAPABILITY_H264_DEC,
+ .mod_rate = 297000000,
+};
+
+static const struct cedrus_variant sun8i_r40_cedrus_variant = {
+ .capabilities = CEDRUS_CAPABILITY_UNTILED |
+ CEDRUS_CAPABILITY_MPEG2_DEC |
+ CEDRUS_CAPABILITY_H264_DEC |
+ CEDRUS_CAPABILITY_VP8_DEC,
+ .mod_rate = 297000000,
+};
+
+static const struct cedrus_variant sun20i_d1_cedrus_variant = {
+ .capabilities = CEDRUS_CAPABILITY_UNTILED |
+ CEDRUS_CAPABILITY_MPEG2_DEC |
+ CEDRUS_CAPABILITY_H264_DEC |
+ CEDRUS_CAPABILITY_H265_DEC,
+ .mod_rate = 432000000,
+};
+
+static const struct cedrus_variant sun50i_a64_cedrus_variant = {
+ .capabilities = CEDRUS_CAPABILITY_UNTILED |
+ CEDRUS_CAPABILITY_MPEG2_DEC |
+ CEDRUS_CAPABILITY_H264_DEC |
+ CEDRUS_CAPABILITY_H265_DEC |
+ CEDRUS_CAPABILITY_VP8_DEC,
+ .mod_rate = 402000000,
+};
+
+static const struct cedrus_variant sun50i_h5_cedrus_variant = {
+ .capabilities = CEDRUS_CAPABILITY_UNTILED |
+ CEDRUS_CAPABILITY_MPEG2_DEC |
+ CEDRUS_CAPABILITY_H264_DEC |
+ CEDRUS_CAPABILITY_H265_DEC |
+ CEDRUS_CAPABILITY_VP8_DEC,
+ .mod_rate = 402000000,
+};
+
+static const struct cedrus_variant sun50i_h6_cedrus_variant = {
+ .capabilities = CEDRUS_CAPABILITY_UNTILED |
+ CEDRUS_CAPABILITY_MPEG2_DEC |
+ CEDRUS_CAPABILITY_H264_DEC |
+ CEDRUS_CAPABILITY_H265_DEC |
+ CEDRUS_CAPABILITY_H265_10_DEC |
+ CEDRUS_CAPABILITY_VP8_DEC,
+ .mod_rate = 600000000,
+};
+
+static const struct of_device_id cedrus_dt_match[] = {
+ {
+ .compatible = "allwinner,sun4i-a10-video-engine",
+ .data = &sun4i_a10_cedrus_variant,
+ },
+ {
+ .compatible = "allwinner,sun5i-a13-video-engine",
+ .data = &sun5i_a13_cedrus_variant,
+ },
+ {
+ .compatible = "allwinner,sun7i-a20-video-engine",
+ .data = &sun7i_a20_cedrus_variant,
+ },
+ {
+ .compatible = "allwinner,sun8i-a33-video-engine",
+ .data = &sun8i_a33_cedrus_variant,
+ },
+ {
+ .compatible = "allwinner,sun8i-h3-video-engine",
+ .data = &sun8i_h3_cedrus_variant,
+ },
+ {
+ .compatible = "allwinner,sun8i-v3s-video-engine",
+ .data = &sun8i_v3s_cedrus_variant,
+ },
+ {
+ .compatible = "allwinner,sun8i-r40-video-engine",
+ .data = &sun8i_r40_cedrus_variant,
+ },
+ {
+ .compatible = "allwinner,sun20i-d1-video-engine",
+ .data = &sun20i_d1_cedrus_variant,
+ },
+ {
+ .compatible = "allwinner,sun50i-a64-video-engine",
+ .data = &sun50i_a64_cedrus_variant,
+ },
+ {
+ .compatible = "allwinner,sun50i-h5-video-engine",
+ .data = &sun50i_h5_cedrus_variant,
+ },
+ {
+ .compatible = "allwinner,sun50i-h6-video-engine",
+ .data = &sun50i_h6_cedrus_variant,
+ },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, cedrus_dt_match);
+
+static const struct dev_pm_ops cedrus_dev_pm_ops = {
+ SET_RUNTIME_PM_OPS(cedrus_hw_suspend,
+ cedrus_hw_resume, NULL)
+};
+
+static struct platform_driver cedrus_driver = {
+ .probe = cedrus_probe,
+ .remove = cedrus_remove,
+ .driver = {
+ .name = CEDRUS_NAME,
+ .of_match_table = cedrus_dt_match,
+ .pm = &cedrus_dev_pm_ops,
+ },
+};
+module_platform_driver(cedrus_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Florent Revest <florent.revest@free-electrons.com>");
+MODULE_AUTHOR("Paul Kocialkowski <paul.kocialkowski@bootlin.com>");
+MODULE_AUTHOR("Maxime Ripard <maxime.ripard@bootlin.com>");
+MODULE_DESCRIPTION("Cedrus VPU driver");
diff --git a/drivers/staging/media/sunxi/cedrus/cedrus.h b/drivers/staging/media/sunxi/cedrus/cedrus.h
new file mode 100644
index 000000000000..c4b1217c14b6
--- /dev/null
+++ b/drivers/staging/media/sunxi/cedrus/cedrus.h
@@ -0,0 +1,279 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Cedrus VPU driver
+ *
+ * Copyright (C) 2016 Florent Revest <florent.revest@free-electrons.com>
+ * Copyright (C) 2018 Paul Kocialkowski <paul.kocialkowski@bootlin.com>
+ * Copyright (C) 2018 Bootlin
+ *
+ * Based on the vim2m driver, that is:
+ *
+ * Copyright (c) 2009-2010 Samsung Electronics Co., Ltd.
+ * Pawel Osciak, <pawel@osciak.com>
+ * Marek Szyprowski, <m.szyprowski@samsung.com>
+ */
+
+#ifndef _CEDRUS_H_
+#define _CEDRUS_H_
+
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-mem2mem.h>
+#include <media/videobuf2-v4l2.h>
+#include <media/videobuf2-dma-contig.h>
+
+#include <linux/iopoll.h>
+#include <linux/platform_device.h>
+#include <linux/workqueue.h>
+
+#define CEDRUS_NAME "cedrus"
+
+#define CEDRUS_CAPABILITY_UNTILED BIT(0)
+#define CEDRUS_CAPABILITY_H265_DEC BIT(1)
+#define CEDRUS_CAPABILITY_H264_DEC BIT(2)
+#define CEDRUS_CAPABILITY_MPEG2_DEC BIT(3)
+#define CEDRUS_CAPABILITY_VP8_DEC BIT(4)
+#define CEDRUS_CAPABILITY_H265_10_DEC BIT(5)
+
+enum cedrus_irq_status {
+ CEDRUS_IRQ_NONE,
+ CEDRUS_IRQ_ERROR,
+ CEDRUS_IRQ_OK,
+};
+
+enum cedrus_h264_pic_type {
+ CEDRUS_H264_PIC_TYPE_FRAME = 0,
+ CEDRUS_H264_PIC_TYPE_FIELD,
+ CEDRUS_H264_PIC_TYPE_MBAFF,
+};
+
+struct cedrus_control {
+ struct v4l2_ctrl_config cfg;
+ unsigned int capabilities;
+};
+
+struct cedrus_h264_run {
+ const struct v4l2_ctrl_h264_decode_params *decode_params;
+ const struct v4l2_ctrl_h264_pps *pps;
+ const struct v4l2_ctrl_h264_scaling_matrix *scaling_matrix;
+ const struct v4l2_ctrl_h264_slice_params *slice_params;
+ const struct v4l2_ctrl_h264_sps *sps;
+ const struct v4l2_ctrl_h264_pred_weights *pred_weights;
+};
+
+struct cedrus_mpeg2_run {
+ const struct v4l2_ctrl_mpeg2_sequence *sequence;
+ const struct v4l2_ctrl_mpeg2_picture *picture;
+ const struct v4l2_ctrl_mpeg2_quantisation *quantisation;
+};
+
+struct cedrus_h265_run {
+ const struct v4l2_ctrl_hevc_sps *sps;
+ const struct v4l2_ctrl_hevc_pps *pps;
+ const struct v4l2_ctrl_hevc_slice_params *slice_params;
+ const struct v4l2_ctrl_hevc_decode_params *decode_params;
+ const struct v4l2_ctrl_hevc_scaling_matrix *scaling_matrix;
+ const u32 *entry_points;
+ u32 entry_points_count;
+};
+
+struct cedrus_vp8_run {
+ const struct v4l2_ctrl_vp8_frame *frame_params;
+};
+
+struct cedrus_run {
+ struct vb2_v4l2_buffer *src;
+ struct vb2_v4l2_buffer *dst;
+
+ union {
+ struct cedrus_h264_run h264;
+ struct cedrus_mpeg2_run mpeg2;
+ struct cedrus_h265_run h265;
+ struct cedrus_vp8_run vp8;
+ };
+};
+
+struct cedrus_buffer {
+ struct v4l2_m2m_buffer m2m_buf;
+
+ union {
+ struct {
+ unsigned int position;
+ enum cedrus_h264_pic_type pic_type;
+ void *mv_col_buf;
+ dma_addr_t mv_col_buf_dma;
+ ssize_t mv_col_buf_size;
+ } h264;
+ struct {
+ void *mv_col_buf;
+ dma_addr_t mv_col_buf_dma;
+ ssize_t mv_col_buf_size;
+ } h265;
+ } codec;
+};
+
+struct cedrus_ctx {
+ struct v4l2_fh fh;
+ struct cedrus_dev *dev;
+
+ struct v4l2_pix_format src_fmt;
+ struct v4l2_pix_format dst_fmt;
+ struct cedrus_dec_ops *current_codec;
+ unsigned int bit_depth;
+
+ struct v4l2_ctrl_handler hdl;
+ struct v4l2_ctrl **ctrls;
+
+ union {
+ struct {
+ void *pic_info_buf;
+ dma_addr_t pic_info_buf_dma;
+ ssize_t pic_info_buf_size;
+ void *neighbor_info_buf;
+ dma_addr_t neighbor_info_buf_dma;
+ void *deblk_buf;
+ dma_addr_t deblk_buf_dma;
+ ssize_t deblk_buf_size;
+ void *intra_pred_buf;
+ dma_addr_t intra_pred_buf_dma;
+ ssize_t intra_pred_buf_size;
+ } h264;
+ struct {
+ void *neighbor_info_buf;
+ dma_addr_t neighbor_info_buf_addr;
+ void *entry_points_buf;
+ dma_addr_t entry_points_buf_addr;
+ } h265;
+ struct {
+ unsigned int last_frame_p_type;
+ unsigned int last_filter_type;
+ unsigned int last_sharpness_level;
+
+ u8 *entropy_probs_buf;
+ dma_addr_t entropy_probs_buf_dma;
+ } vp8;
+ } codec;
+};
+
+static inline struct cedrus_ctx *cedrus_file2ctx(struct file *file)
+{
+ return container_of(file_to_v4l2_fh(file), struct cedrus_ctx, fh);
+}
+
+struct cedrus_dec_ops {
+ void (*irq_clear)(struct cedrus_ctx *ctx);
+ void (*irq_disable)(struct cedrus_ctx *ctx);
+ enum cedrus_irq_status (*irq_status)(struct cedrus_ctx *ctx);
+ int (*setup)(struct cedrus_ctx *ctx, struct cedrus_run *run);
+ int (*start)(struct cedrus_ctx *ctx);
+ void (*stop)(struct cedrus_ctx *ctx);
+ void (*trigger)(struct cedrus_ctx *ctx);
+ unsigned int (*extra_cap_size)(struct cedrus_ctx *ctx,
+ struct v4l2_pix_format *pix_fmt);
+};
+
+struct cedrus_variant {
+ unsigned int capabilities;
+ unsigned int mod_rate;
+};
+
+struct cedrus_dev {
+ struct v4l2_device v4l2_dev;
+ struct video_device vfd;
+ struct media_device mdev;
+ struct media_pad pad[2];
+ struct platform_device *pdev;
+ struct device *dev;
+ struct v4l2_m2m_dev *m2m_dev;
+
+ /* Device file mutex */
+ struct mutex dev_mutex;
+
+ void __iomem *base;
+
+ struct clk *mod_clk;
+ struct clk *ahb_clk;
+ struct clk *ram_clk;
+
+ struct reset_control *rstc;
+
+ unsigned int capabilities;
+
+ struct delayed_work watchdog_work;
+};
+
+extern struct cedrus_dec_ops cedrus_dec_ops_mpeg2;
+extern struct cedrus_dec_ops cedrus_dec_ops_h264;
+extern struct cedrus_dec_ops cedrus_dec_ops_h265;
+extern struct cedrus_dec_ops cedrus_dec_ops_vp8;
+
+static inline void cedrus_write(struct cedrus_dev *dev, u32 reg, u32 val)
+{
+ writel(val, dev->base + reg);
+}
+
+static inline u32 cedrus_read(struct cedrus_dev *dev, u32 reg)
+{
+ return readl(dev->base + reg);
+}
+
+static inline u32 cedrus_wait_for(struct cedrus_dev *dev, u32 reg, u32 flag)
+{
+ u32 value;
+
+ return readl_poll_timeout_atomic(dev->base + reg, value,
+ (value & flag) == 0, 10, 1000);
+}
+
+static inline dma_addr_t cedrus_buf_addr(struct vb2_buffer *buf,
+ struct v4l2_pix_format *pix_fmt,
+ unsigned int plane)
+{
+ dma_addr_t addr = vb2_dma_contig_plane_dma_addr(buf, 0);
+
+ return addr + (pix_fmt ? (dma_addr_t)pix_fmt->bytesperline *
+ pix_fmt->height * plane : 0);
+}
+
+static inline dma_addr_t cedrus_dst_buf_addr(struct cedrus_ctx *ctx,
+ struct vb2_buffer *buf,
+ unsigned int plane)
+{
+ return buf ? cedrus_buf_addr(buf, &ctx->dst_fmt, plane) : 0;
+}
+
+static inline void cedrus_write_ref_buf_addr(struct cedrus_ctx *ctx,
+ struct vb2_queue *q,
+ u64 timestamp,
+ u32 luma_reg,
+ u32 chroma_reg)
+{
+ struct cedrus_dev *dev = ctx->dev;
+ struct vb2_buffer *buf = vb2_find_buffer(q, timestamp);
+
+ cedrus_write(dev, luma_reg, cedrus_dst_buf_addr(ctx, buf, 0));
+ cedrus_write(dev, chroma_reg, cedrus_dst_buf_addr(ctx, buf, 1));
+}
+
+static inline struct cedrus_buffer *
+vb2_v4l2_to_cedrus_buffer(const struct vb2_v4l2_buffer *p)
+{
+ return container_of(p, struct cedrus_buffer, m2m_buf.vb);
+}
+
+static inline struct cedrus_buffer *
+vb2_to_cedrus_buffer(const struct vb2_buffer *p)
+{
+ return vb2_v4l2_to_cedrus_buffer(to_vb2_v4l2_buffer(p));
+}
+
+static inline bool
+cedrus_is_capable(struct cedrus_ctx *ctx, unsigned int capabilities)
+{
+ return (ctx->dev->capabilities & capabilities) == capabilities;
+}
+
+void *cedrus_find_control_data(struct cedrus_ctx *ctx, u32 id);
+u32 cedrus_get_num_of_controls(struct cedrus_ctx *ctx, u32 id);
+
+#endif
diff --git a/drivers/staging/media/sunxi/cedrus/cedrus_dec.c b/drivers/staging/media/sunxi/cedrus/cedrus_dec.c
new file mode 100644
index 000000000000..9f8b0555b7dc
--- /dev/null
+++ b/drivers/staging/media/sunxi/cedrus/cedrus_dec.c
@@ -0,0 +1,119 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Cedrus VPU driver
+ *
+ * Copyright (C) 2016 Florent Revest <florent.revest@free-electrons.com>
+ * Copyright (C) 2018 Paul Kocialkowski <paul.kocialkowski@bootlin.com>
+ * Copyright (C) 2018 Bootlin
+ *
+ * Based on the vim2m driver, that is:
+ *
+ * Copyright (c) 2009-2010 Samsung Electronics Co., Ltd.
+ * Pawel Osciak, <pawel@osciak.com>
+ * Marek Szyprowski, <m.szyprowski@samsung.com>
+ */
+
+#include <media/v4l2-device.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-mem2mem.h>
+
+#include "cedrus.h"
+#include "cedrus_dec.h"
+#include "cedrus_hw.h"
+
+void cedrus_device_run(void *priv)
+{
+ struct cedrus_ctx *ctx = priv;
+ struct cedrus_dev *dev = ctx->dev;
+ struct cedrus_run run = {};
+ struct media_request *src_req;
+ int error;
+
+ run.src = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
+ run.dst = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
+
+ /* Apply request(s) controls if needed. */
+ src_req = run.src->vb2_buf.req_obj.req;
+
+ if (src_req)
+ v4l2_ctrl_request_setup(src_req, &ctx->hdl);
+
+ switch (ctx->src_fmt.pixelformat) {
+ case V4L2_PIX_FMT_MPEG2_SLICE:
+ run.mpeg2.sequence = cedrus_find_control_data(ctx,
+ V4L2_CID_STATELESS_MPEG2_SEQUENCE);
+ run.mpeg2.picture = cedrus_find_control_data(ctx,
+ V4L2_CID_STATELESS_MPEG2_PICTURE);
+ run.mpeg2.quantisation = cedrus_find_control_data(ctx,
+ V4L2_CID_STATELESS_MPEG2_QUANTISATION);
+ break;
+
+ case V4L2_PIX_FMT_H264_SLICE:
+ run.h264.decode_params = cedrus_find_control_data(ctx,
+ V4L2_CID_STATELESS_H264_DECODE_PARAMS);
+ run.h264.pps = cedrus_find_control_data(ctx,
+ V4L2_CID_STATELESS_H264_PPS);
+ run.h264.scaling_matrix = cedrus_find_control_data(ctx,
+ V4L2_CID_STATELESS_H264_SCALING_MATRIX);
+ run.h264.slice_params = cedrus_find_control_data(ctx,
+ V4L2_CID_STATELESS_H264_SLICE_PARAMS);
+ run.h264.sps = cedrus_find_control_data(ctx,
+ V4L2_CID_STATELESS_H264_SPS);
+ run.h264.pred_weights = cedrus_find_control_data(ctx,
+ V4L2_CID_STATELESS_H264_PRED_WEIGHTS);
+ break;
+
+ case V4L2_PIX_FMT_HEVC_SLICE:
+ run.h265.sps = cedrus_find_control_data(ctx,
+ V4L2_CID_STATELESS_HEVC_SPS);
+ run.h265.pps = cedrus_find_control_data(ctx,
+ V4L2_CID_STATELESS_HEVC_PPS);
+ run.h265.slice_params = cedrus_find_control_data(ctx,
+ V4L2_CID_STATELESS_HEVC_SLICE_PARAMS);
+ run.h265.decode_params = cedrus_find_control_data(ctx,
+ V4L2_CID_STATELESS_HEVC_DECODE_PARAMS);
+ run.h265.scaling_matrix = cedrus_find_control_data(ctx,
+ V4L2_CID_STATELESS_HEVC_SCALING_MATRIX);
+ run.h265.entry_points = cedrus_find_control_data(ctx,
+ V4L2_CID_STATELESS_HEVC_ENTRY_POINT_OFFSETS);
+ run.h265.entry_points_count = cedrus_get_num_of_controls(ctx,
+ V4L2_CID_STATELESS_HEVC_ENTRY_POINT_OFFSETS);
+ break;
+
+ case V4L2_PIX_FMT_VP8_FRAME:
+ run.vp8.frame_params = cedrus_find_control_data(ctx,
+ V4L2_CID_STATELESS_VP8_FRAME);
+ break;
+
+ default:
+ break;
+ }
+
+ v4l2_m2m_buf_copy_metadata(run.src, run.dst);
+
+ cedrus_dst_format_set(dev, &ctx->dst_fmt);
+
+ error = ctx->current_codec->setup(ctx, &run);
+ if (error)
+ v4l2_err(&ctx->dev->v4l2_dev,
+ "Failed to setup decoding job: %d\n", error);
+
+ /* Complete request(s) controls if needed. */
+
+ if (src_req)
+ v4l2_ctrl_request_complete(src_req, &ctx->hdl);
+
+ /* Trigger decoding if setup went well, bail out otherwise. */
+ if (!error) {
+ /* Start the watchdog timer. */
+ schedule_delayed_work(&dev->watchdog_work,
+ msecs_to_jiffies(2000));
+
+ ctx->current_codec->trigger(ctx);
+ } else {
+ v4l2_m2m_buf_done_and_job_finish(ctx->dev->m2m_dev,
+ ctx->fh.m2m_ctx,
+ VB2_BUF_STATE_ERROR);
+ }
+}
diff --git a/drivers/staging/media/sunxi/cedrus/cedrus_dec.h b/drivers/staging/media/sunxi/cedrus/cedrus_dec.h
new file mode 100644
index 000000000000..d1ae7903677b
--- /dev/null
+++ b/drivers/staging/media/sunxi/cedrus/cedrus_dec.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Cedrus VPU driver
+ *
+ * Copyright (C) 2016 Florent Revest <florent.revest@free-electrons.com>
+ * Copyright (C) 2018 Paul Kocialkowski <paul.kocialkowski@bootlin.com>
+ * Copyright (C) 2018 Bootlin
+ *
+ * Based on the vim2m driver, that is:
+ *
+ * Copyright (c) 2009-2010 Samsung Electronics Co., Ltd.
+ * Pawel Osciak, <pawel@osciak.com>
+ * Marek Szyprowski, <m.szyprowski@samsung.com>
+ */
+
+#ifndef _CEDRUS_DEC_H_
+#define _CEDRUS_DEC_H_
+
+void cedrus_device_run(void *priv);
+
+#endif
diff --git a/drivers/staging/media/sunxi/cedrus/cedrus_h264.c b/drivers/staging/media/sunxi/cedrus/cedrus_h264.c
new file mode 100644
index 000000000000..3e2843ef6cce
--- /dev/null
+++ b/drivers/staging/media/sunxi/cedrus/cedrus_h264.c
@@ -0,0 +1,711 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Cedrus VPU driver
+ *
+ * Copyright (c) 2013 Jens Kuske <jenskuske@gmail.com>
+ * Copyright (c) 2018 Bootlin
+ */
+
+#include <linux/delay.h>
+#include <linux/types.h>
+
+#include <media/videobuf2-dma-contig.h>
+
+#include "cedrus.h"
+#include "cedrus_hw.h"
+#include "cedrus_regs.h"
+
+enum cedrus_h264_sram_off {
+ CEDRUS_SRAM_H264_PRED_WEIGHT_TABLE = 0x000,
+ CEDRUS_SRAM_H264_FRAMEBUFFER_LIST = 0x100,
+ CEDRUS_SRAM_H264_REF_LIST_0 = 0x190,
+ CEDRUS_SRAM_H264_REF_LIST_1 = 0x199,
+ CEDRUS_SRAM_H264_SCALING_LIST_8x8_0 = 0x200,
+ CEDRUS_SRAM_H264_SCALING_LIST_8x8_1 = 0x210,
+ CEDRUS_SRAM_H264_SCALING_LIST_4x4 = 0x220,
+};
+
+struct cedrus_h264_sram_ref_pic {
+ __le32 top_field_order_cnt;
+ __le32 bottom_field_order_cnt;
+ __le32 frame_info;
+ __le32 luma_ptr;
+ __le32 chroma_ptr;
+ __le32 mv_col_top_ptr;
+ __le32 mv_col_bot_ptr;
+ __le32 reserved;
+} __packed;
+
+#define CEDRUS_H264_FRAME_NUM 18
+
+#define CEDRUS_NEIGHBOR_INFO_BUF_SIZE (32 * SZ_1K)
+#define CEDRUS_MIN_PIC_INFO_BUF_SIZE (130 * SZ_1K)
+
+static void cedrus_h264_write_sram(struct cedrus_dev *dev,
+ enum cedrus_h264_sram_off off,
+ const void *data, size_t len)
+{
+ const u32 *buffer = data;
+ size_t count = DIV_ROUND_UP(len, 4);
+
+ cedrus_write(dev, VE_AVC_SRAM_PORT_OFFSET, off << 2);
+
+ while (count--)
+ cedrus_write(dev, VE_AVC_SRAM_PORT_DATA, *buffer++);
+}
+
+static dma_addr_t cedrus_h264_mv_col_buf_addr(struct cedrus_buffer *buf,
+ unsigned int field)
+{
+ dma_addr_t addr = buf->codec.h264.mv_col_buf_dma;
+
+ /* Adjust for the field */
+ addr += field * buf->codec.h264.mv_col_buf_size / 2;
+
+ return addr;
+}
+
+static void cedrus_fill_ref_pic(struct cedrus_ctx *ctx,
+ struct cedrus_buffer *buf,
+ unsigned int top_field_order_cnt,
+ unsigned int bottom_field_order_cnt,
+ struct cedrus_h264_sram_ref_pic *pic)
+{
+ struct vb2_buffer *vbuf = &buf->m2m_buf.vb.vb2_buf;
+
+ pic->top_field_order_cnt = cpu_to_le32(top_field_order_cnt);
+ pic->bottom_field_order_cnt = cpu_to_le32(bottom_field_order_cnt);
+ pic->frame_info = cpu_to_le32(buf->codec.h264.pic_type << 8);
+
+ pic->luma_ptr = cpu_to_le32(cedrus_buf_addr(vbuf, &ctx->dst_fmt, 0));
+ pic->chroma_ptr = cpu_to_le32(cedrus_buf_addr(vbuf, &ctx->dst_fmt, 1));
+ pic->mv_col_top_ptr = cpu_to_le32(cedrus_h264_mv_col_buf_addr(buf, 0));
+ pic->mv_col_bot_ptr = cpu_to_le32(cedrus_h264_mv_col_buf_addr(buf, 1));
+}
+
+static int cedrus_write_frame_list(struct cedrus_ctx *ctx,
+ struct cedrus_run *run)
+{
+ struct cedrus_h264_sram_ref_pic pic_list[CEDRUS_H264_FRAME_NUM];
+ const struct v4l2_ctrl_h264_decode_params *decode = run->h264.decode_params;
+ const struct v4l2_ctrl_h264_sps *sps = run->h264.sps;
+ struct vb2_queue *cap_q;
+ struct cedrus_buffer *output_buf;
+ struct cedrus_dev *dev = ctx->dev;
+ unsigned long used_dpbs = 0;
+ unsigned int position;
+ int output = -1;
+ unsigned int i;
+
+ cap_q = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
+
+ memset(pic_list, 0, sizeof(pic_list));
+
+ for (i = 0; i < ARRAY_SIZE(decode->dpb); i++) {
+ const struct v4l2_h264_dpb_entry *dpb = &decode->dpb[i];
+ struct cedrus_buffer *cedrus_buf;
+ struct vb2_buffer *buf;
+
+ if (!(dpb->flags & V4L2_H264_DPB_ENTRY_FLAG_VALID))
+ continue;
+
+ buf = vb2_find_buffer(cap_q, dpb->reference_ts);
+ if (!buf)
+ continue;
+
+ cedrus_buf = vb2_to_cedrus_buffer(buf);
+ position = cedrus_buf->codec.h264.position;
+ used_dpbs |= BIT(position);
+
+ if (run->dst->vb2_buf.timestamp == dpb->reference_ts) {
+ output = position;
+ continue;
+ }
+
+ if (!(dpb->flags & V4L2_H264_DPB_ENTRY_FLAG_ACTIVE))
+ continue;
+
+ cedrus_fill_ref_pic(ctx, cedrus_buf,
+ dpb->top_field_order_cnt,
+ dpb->bottom_field_order_cnt,
+ &pic_list[position]);
+ }
+
+ if (output >= 0)
+ position = output;
+ else
+ position = find_first_zero_bit(&used_dpbs, CEDRUS_H264_FRAME_NUM);
+
+ output_buf = vb2_to_cedrus_buffer(&run->dst->vb2_buf);
+ output_buf->codec.h264.position = position;
+
+ if (!output_buf->codec.h264.mv_col_buf_size) {
+ const struct v4l2_ctrl_h264_sps *sps = run->h264.sps;
+ unsigned int field_size;
+
+ field_size = DIV_ROUND_UP(ctx->src_fmt.width, 16) *
+ DIV_ROUND_UP(ctx->src_fmt.height, 16) * 16;
+ if (!(sps->flags & V4L2_H264_SPS_FLAG_DIRECT_8X8_INFERENCE))
+ field_size = field_size * 2;
+ if (!(sps->flags & V4L2_H264_SPS_FLAG_FRAME_MBS_ONLY))
+ field_size = field_size * 2;
+
+ output_buf->codec.h264.mv_col_buf_size = field_size * 2;
+ /* Buffer is never accessed by CPU, so we can skip kernel mapping. */
+ output_buf->codec.h264.mv_col_buf =
+ dma_alloc_attrs(dev->dev,
+ output_buf->codec.h264.mv_col_buf_size,
+ &output_buf->codec.h264.mv_col_buf_dma,
+ GFP_KERNEL, DMA_ATTR_NO_KERNEL_MAPPING);
+
+ if (!output_buf->codec.h264.mv_col_buf) {
+ output_buf->codec.h264.mv_col_buf_size = 0;
+ return -ENOMEM;
+ }
+ }
+
+ if (decode->flags & V4L2_H264_DECODE_PARAM_FLAG_FIELD_PIC)
+ output_buf->codec.h264.pic_type = CEDRUS_H264_PIC_TYPE_FIELD;
+ else if (sps->flags & V4L2_H264_SPS_FLAG_MB_ADAPTIVE_FRAME_FIELD)
+ output_buf->codec.h264.pic_type = CEDRUS_H264_PIC_TYPE_MBAFF;
+ else
+ output_buf->codec.h264.pic_type = CEDRUS_H264_PIC_TYPE_FRAME;
+
+ cedrus_fill_ref_pic(ctx, output_buf,
+ decode->top_field_order_cnt,
+ decode->bottom_field_order_cnt,
+ &pic_list[position]);
+
+ cedrus_h264_write_sram(dev, CEDRUS_SRAM_H264_FRAMEBUFFER_LIST,
+ pic_list, sizeof(pic_list));
+
+ cedrus_write(dev, VE_H264_OUTPUT_FRAME_IDX, position);
+
+ return 0;
+}
+
+#define CEDRUS_MAX_REF_IDX 32
+
+static void _cedrus_write_ref_list(struct cedrus_ctx *ctx,
+ struct cedrus_run *run,
+ const struct v4l2_h264_reference *ref_list,
+ u8 num_ref, enum cedrus_h264_sram_off sram)
+{
+ const struct v4l2_ctrl_h264_decode_params *decode = run->h264.decode_params;
+ struct vb2_queue *cap_q;
+ struct cedrus_dev *dev = ctx->dev;
+ u8 sram_array[CEDRUS_MAX_REF_IDX];
+ unsigned int i;
+ size_t size;
+
+ cap_q = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
+
+ memset(sram_array, 0, sizeof(sram_array));
+
+ for (i = 0; i < num_ref; i++) {
+ const struct v4l2_h264_dpb_entry *dpb;
+ const struct cedrus_buffer *cedrus_buf;
+ unsigned int position;
+ struct vb2_buffer *buf;
+ u8 dpb_idx;
+
+ dpb_idx = ref_list[i].index;
+ dpb = &decode->dpb[dpb_idx];
+
+ if (!(dpb->flags & V4L2_H264_DPB_ENTRY_FLAG_ACTIVE))
+ continue;
+
+ buf = vb2_find_buffer(cap_q, dpb->reference_ts);
+ if (!buf)
+ continue;
+
+ cedrus_buf = vb2_to_cedrus_buffer(buf);
+ position = cedrus_buf->codec.h264.position;
+
+ sram_array[i] |= position << 1;
+ if (ref_list[i].fields == V4L2_H264_BOTTOM_FIELD_REF)
+ sram_array[i] |= BIT(0);
+ }
+
+ size = min_t(size_t, ALIGN(num_ref, 4), sizeof(sram_array));
+ cedrus_h264_write_sram(dev, sram, &sram_array, size);
+}
+
+static void cedrus_write_ref_list0(struct cedrus_ctx *ctx,
+ struct cedrus_run *run)
+{
+ const struct v4l2_ctrl_h264_slice_params *slice = run->h264.slice_params;
+
+ _cedrus_write_ref_list(ctx, run,
+ slice->ref_pic_list0,
+ slice->num_ref_idx_l0_active_minus1 + 1,
+ CEDRUS_SRAM_H264_REF_LIST_0);
+}
+
+static void cedrus_write_ref_list1(struct cedrus_ctx *ctx,
+ struct cedrus_run *run)
+{
+ const struct v4l2_ctrl_h264_slice_params *slice = run->h264.slice_params;
+
+ _cedrus_write_ref_list(ctx, run,
+ slice->ref_pic_list1,
+ slice->num_ref_idx_l1_active_minus1 + 1,
+ CEDRUS_SRAM_H264_REF_LIST_1);
+}
+
+static void cedrus_write_scaling_lists(struct cedrus_ctx *ctx,
+ struct cedrus_run *run)
+{
+ const struct v4l2_ctrl_h264_scaling_matrix *scaling =
+ run->h264.scaling_matrix;
+ const struct v4l2_ctrl_h264_pps *pps = run->h264.pps;
+ struct cedrus_dev *dev = ctx->dev;
+
+ if (!(pps->flags & V4L2_H264_PPS_FLAG_SCALING_MATRIX_PRESENT))
+ return;
+
+ cedrus_h264_write_sram(dev, CEDRUS_SRAM_H264_SCALING_LIST_8x8_0,
+ scaling->scaling_list_8x8[0],
+ sizeof(scaling->scaling_list_8x8[0]));
+
+ cedrus_h264_write_sram(dev, CEDRUS_SRAM_H264_SCALING_LIST_8x8_1,
+ scaling->scaling_list_8x8[1],
+ sizeof(scaling->scaling_list_8x8[1]));
+
+ cedrus_h264_write_sram(dev, CEDRUS_SRAM_H264_SCALING_LIST_4x4,
+ scaling->scaling_list_4x4,
+ sizeof(scaling->scaling_list_4x4));
+}
+
+static void cedrus_write_pred_weight_table(struct cedrus_ctx *ctx,
+ struct cedrus_run *run)
+{
+ const struct v4l2_ctrl_h264_pred_weights *pred_weight =
+ run->h264.pred_weights;
+ struct cedrus_dev *dev = ctx->dev;
+ int i, j, k;
+
+ cedrus_write(dev, VE_H264_SHS_WP,
+ ((pred_weight->chroma_log2_weight_denom & 0x7) << 4) |
+ ((pred_weight->luma_log2_weight_denom & 0x7) << 0));
+
+ cedrus_write(dev, VE_AVC_SRAM_PORT_OFFSET,
+ CEDRUS_SRAM_H264_PRED_WEIGHT_TABLE << 2);
+
+ for (i = 0; i < ARRAY_SIZE(pred_weight->weight_factors); i++) {
+ const struct v4l2_h264_weight_factors *factors =
+ &pred_weight->weight_factors[i];
+
+ for (j = 0; j < ARRAY_SIZE(factors->luma_weight); j++) {
+ u32 val;
+
+ val = (((u32)factors->luma_offset[j] & 0x1ff) << 16) |
+ (factors->luma_weight[j] & 0x1ff);
+ cedrus_write(dev, VE_AVC_SRAM_PORT_DATA, val);
+ }
+
+ for (j = 0; j < ARRAY_SIZE(factors->chroma_weight); j++) {
+ for (k = 0; k < ARRAY_SIZE(factors->chroma_weight[0]); k++) {
+ u32 val;
+
+ val = (((u32)factors->chroma_offset[j][k] & 0x1ff) << 16) |
+ (factors->chroma_weight[j][k] & 0x1ff);
+ cedrus_write(dev, VE_AVC_SRAM_PORT_DATA, val);
+ }
+ }
+ }
+}
+
+/*
+ * It turns out that using VE_H264_VLD_OFFSET to skip bits is not reliable. In
+ * rare cases frame is not decoded correctly. However, setting offset to 0 and
+ * skipping appropriate amount of bits with flush bits trigger always works.
+ */
+static void cedrus_skip_bits(struct cedrus_dev *dev, int num)
+{
+ int count = 0;
+
+ while (count < num) {
+ int tmp = min(num - count, 32);
+
+ cedrus_write(dev, VE_H264_TRIGGER_TYPE,
+ VE_H264_TRIGGER_TYPE_FLUSH_BITS |
+ VE_H264_TRIGGER_TYPE_N_BITS(tmp));
+ while (cedrus_read(dev, VE_H264_STATUS) & VE_H264_STATUS_VLD_BUSY)
+ udelay(1);
+
+ count += tmp;
+ }
+}
+
+static void cedrus_set_params(struct cedrus_ctx *ctx,
+ struct cedrus_run *run)
+{
+ const struct v4l2_ctrl_h264_decode_params *decode = run->h264.decode_params;
+ const struct v4l2_ctrl_h264_slice_params *slice = run->h264.slice_params;
+ const struct v4l2_ctrl_h264_pps *pps = run->h264.pps;
+ const struct v4l2_ctrl_h264_sps *sps = run->h264.sps;
+ struct vb2_buffer *src_buf = &run->src->vb2_buf;
+ struct cedrus_dev *dev = ctx->dev;
+ dma_addr_t src_buf_addr;
+ size_t slice_bytes = vb2_get_plane_payload(src_buf, 0);
+ unsigned int pic_width_in_mbs;
+ bool mbaff_pic;
+ u32 reg;
+
+ cedrus_write(dev, VE_H264_VLD_LEN, slice_bytes * 8);
+ cedrus_write(dev, VE_H264_VLD_OFFSET, 0);
+
+ src_buf_addr = vb2_dma_contig_plane_dma_addr(src_buf, 0);
+ cedrus_write(dev, VE_H264_VLD_END, src_buf_addr + slice_bytes);
+ cedrus_write(dev, VE_H264_VLD_ADDR,
+ VE_H264_VLD_ADDR_VAL(src_buf_addr) |
+ VE_H264_VLD_ADDR_FIRST | VE_H264_VLD_ADDR_VALID |
+ VE_H264_VLD_ADDR_LAST);
+
+ if (ctx->src_fmt.width > 2048) {
+ cedrus_write(dev, VE_BUF_CTRL,
+ VE_BUF_CTRL_INTRAPRED_MIXED_RAM |
+ VE_BUF_CTRL_DBLK_MIXED_RAM);
+ cedrus_write(dev, VE_DBLK_DRAM_BUF_ADDR,
+ ctx->codec.h264.deblk_buf_dma);
+ cedrus_write(dev, VE_INTRAPRED_DRAM_BUF_ADDR,
+ ctx->codec.h264.intra_pred_buf_dma);
+ } else {
+ cedrus_write(dev, VE_BUF_CTRL,
+ VE_BUF_CTRL_INTRAPRED_INT_SRAM |
+ VE_BUF_CTRL_DBLK_INT_SRAM);
+ }
+
+ /*
+ * FIXME: Since the bitstream parsing is done in software, and
+ * in userspace, this shouldn't be needed anymore. But it
+ * turns out that removing it breaks the decoding process,
+ * without any clear indication why.
+ */
+ cedrus_write(dev, VE_H264_TRIGGER_TYPE,
+ VE_H264_TRIGGER_TYPE_INIT_SWDEC);
+
+ cedrus_skip_bits(dev, slice->header_bit_size);
+
+ if (V4L2_H264_CTRL_PRED_WEIGHTS_REQUIRED(pps, slice))
+ cedrus_write_pred_weight_table(ctx, run);
+
+ if ((slice->slice_type == V4L2_H264_SLICE_TYPE_P) ||
+ (slice->slice_type == V4L2_H264_SLICE_TYPE_SP) ||
+ (slice->slice_type == V4L2_H264_SLICE_TYPE_B))
+ cedrus_write_ref_list0(ctx, run);
+
+ if (slice->slice_type == V4L2_H264_SLICE_TYPE_B)
+ cedrus_write_ref_list1(ctx, run);
+
+ // picture parameters
+ reg = 0;
+ /*
+ * FIXME: the kernel headers are allowing the default value to
+ * be passed, but the libva doesn't give us that.
+ */
+ reg |= (slice->num_ref_idx_l0_active_minus1 & 0x1f) << 10;
+ reg |= (slice->num_ref_idx_l1_active_minus1 & 0x1f) << 5;
+ reg |= (pps->weighted_bipred_idc & 0x3) << 2;
+ if (pps->flags & V4L2_H264_PPS_FLAG_ENTROPY_CODING_MODE)
+ reg |= VE_H264_PPS_ENTROPY_CODING_MODE;
+ if (pps->flags & V4L2_H264_PPS_FLAG_WEIGHTED_PRED)
+ reg |= VE_H264_PPS_WEIGHTED_PRED;
+ if (pps->flags & V4L2_H264_PPS_FLAG_CONSTRAINED_INTRA_PRED)
+ reg |= VE_H264_PPS_CONSTRAINED_INTRA_PRED;
+ if (pps->flags & V4L2_H264_PPS_FLAG_TRANSFORM_8X8_MODE)
+ reg |= VE_H264_PPS_TRANSFORM_8X8_MODE;
+ cedrus_write(dev, VE_H264_PPS, reg);
+
+ // sequence parameters
+ reg = 0;
+ reg |= (sps->chroma_format_idc & 0x7) << 19;
+ reg |= (sps->pic_width_in_mbs_minus1 & 0xff) << 8;
+ reg |= sps->pic_height_in_map_units_minus1 & 0xff;
+ if (sps->flags & V4L2_H264_SPS_FLAG_FRAME_MBS_ONLY)
+ reg |= VE_H264_SPS_MBS_ONLY;
+ if (sps->flags & V4L2_H264_SPS_FLAG_MB_ADAPTIVE_FRAME_FIELD)
+ reg |= VE_H264_SPS_MB_ADAPTIVE_FRAME_FIELD;
+ if (sps->flags & V4L2_H264_SPS_FLAG_DIRECT_8X8_INFERENCE)
+ reg |= VE_H264_SPS_DIRECT_8X8_INFERENCE;
+ cedrus_write(dev, VE_H264_SPS, reg);
+
+ mbaff_pic = !(decode->flags & V4L2_H264_DECODE_PARAM_FLAG_FIELD_PIC) &&
+ (sps->flags & V4L2_H264_SPS_FLAG_MB_ADAPTIVE_FRAME_FIELD);
+ pic_width_in_mbs = sps->pic_width_in_mbs_minus1 + 1;
+
+ // slice parameters
+ reg = 0;
+ reg |= ((slice->first_mb_in_slice % pic_width_in_mbs) & 0xff) << 24;
+ reg |= (((slice->first_mb_in_slice / pic_width_in_mbs) *
+ (mbaff_pic + 1)) & 0xff) << 16;
+ reg |= decode->nal_ref_idc ? BIT(12) : 0;
+ reg |= (slice->slice_type & 0xf) << 8;
+ reg |= slice->cabac_init_idc & 0x3;
+ if (ctx->fh.m2m_ctx->new_frame)
+ reg |= VE_H264_SHS_FIRST_SLICE_IN_PIC;
+ if (decode->flags & V4L2_H264_DECODE_PARAM_FLAG_FIELD_PIC)
+ reg |= VE_H264_SHS_FIELD_PIC;
+ if (decode->flags & V4L2_H264_DECODE_PARAM_FLAG_BOTTOM_FIELD)
+ reg |= VE_H264_SHS_BOTTOM_FIELD;
+ if (slice->flags & V4L2_H264_SLICE_FLAG_DIRECT_SPATIAL_MV_PRED)
+ reg |= VE_H264_SHS_DIRECT_SPATIAL_MV_PRED;
+ cedrus_write(dev, VE_H264_SHS, reg);
+
+ reg = 0;
+ reg |= VE_H264_SHS2_NUM_REF_IDX_ACTIVE_OVRD;
+ reg |= (slice->num_ref_idx_l0_active_minus1 & 0x1f) << 24;
+ reg |= (slice->num_ref_idx_l1_active_minus1 & 0x1f) << 16;
+ reg |= (slice->disable_deblocking_filter_idc & 0x3) << 8;
+ reg |= (slice->slice_alpha_c0_offset_div2 & 0xf) << 4;
+ reg |= slice->slice_beta_offset_div2 & 0xf;
+ cedrus_write(dev, VE_H264_SHS2, reg);
+
+ reg = 0;
+ reg |= (pps->second_chroma_qp_index_offset & 0x3f) << 16;
+ reg |= (pps->chroma_qp_index_offset & 0x3f) << 8;
+ reg |= (pps->pic_init_qp_minus26 + 26 + slice->slice_qp_delta) & 0x3f;
+ if (!(pps->flags & V4L2_H264_PPS_FLAG_SCALING_MATRIX_PRESENT))
+ reg |= VE_H264_SHS_QP_SCALING_MATRIX_DEFAULT;
+ cedrus_write(dev, VE_H264_SHS_QP, reg);
+
+ // clear status flags
+ cedrus_write(dev, VE_H264_STATUS, cedrus_read(dev, VE_H264_STATUS));
+
+ // enable int
+ cedrus_write(dev, VE_H264_CTRL,
+ VE_H264_CTRL_SLICE_DECODE_INT |
+ VE_H264_CTRL_DECODE_ERR_INT |
+ VE_H264_CTRL_VLD_DATA_REQ_INT);
+}
+
+static enum cedrus_irq_status
+cedrus_h264_irq_status(struct cedrus_ctx *ctx)
+{
+ struct cedrus_dev *dev = ctx->dev;
+ u32 reg = cedrus_read(dev, VE_H264_STATUS);
+
+ if (reg & (VE_H264_STATUS_DECODE_ERR_INT |
+ VE_H264_STATUS_VLD_DATA_REQ_INT))
+ return CEDRUS_IRQ_ERROR;
+
+ if (reg & VE_H264_CTRL_SLICE_DECODE_INT)
+ return CEDRUS_IRQ_OK;
+
+ return CEDRUS_IRQ_NONE;
+}
+
+static void cedrus_h264_irq_clear(struct cedrus_ctx *ctx)
+{
+ struct cedrus_dev *dev = ctx->dev;
+
+ cedrus_write(dev, VE_H264_STATUS,
+ VE_H264_STATUS_INT_MASK);
+}
+
+static void cedrus_h264_irq_disable(struct cedrus_ctx *ctx)
+{
+ struct cedrus_dev *dev = ctx->dev;
+ u32 reg = cedrus_read(dev, VE_H264_CTRL);
+
+ cedrus_write(dev, VE_H264_CTRL,
+ reg & ~VE_H264_CTRL_INT_MASK);
+}
+
+static int cedrus_h264_setup(struct cedrus_ctx *ctx, struct cedrus_run *run)
+{
+ struct cedrus_dev *dev = ctx->dev;
+ int ret;
+
+ cedrus_engine_enable(ctx);
+
+ cedrus_write(dev, VE_H264_SDROT_CTRL, 0);
+ cedrus_write(dev, VE_H264_EXTRA_BUFFER1,
+ ctx->codec.h264.pic_info_buf_dma);
+ cedrus_write(dev, VE_H264_EXTRA_BUFFER2,
+ ctx->codec.h264.neighbor_info_buf_dma);
+
+ cedrus_write_scaling_lists(ctx, run);
+ ret = cedrus_write_frame_list(ctx, run);
+ if (ret)
+ return ret;
+
+ cedrus_set_params(ctx, run);
+
+ return 0;
+}
+
+static int cedrus_h264_start(struct cedrus_ctx *ctx)
+{
+ struct cedrus_dev *dev = ctx->dev;
+ unsigned int pic_info_size;
+ int ret;
+
+ /*
+ * NOTE: All buffers allocated here are only used by HW, so we
+ * can add DMA_ATTR_NO_KERNEL_MAPPING flag when allocating them.
+ */
+
+ /* Formula for picture buffer size is taken from CedarX source. */
+
+ if (ctx->src_fmt.width > 2048)
+ pic_info_size = CEDRUS_H264_FRAME_NUM * 0x4000;
+ else
+ pic_info_size = CEDRUS_H264_FRAME_NUM * 0x1000;
+
+ /*
+ * FIXME: If V4L2_H264_SPS_FLAG_FRAME_MBS_ONLY is set,
+ * there is no need to multiply by 2.
+ */
+ pic_info_size += ctx->src_fmt.height * 2 * 64;
+
+ if (pic_info_size < CEDRUS_MIN_PIC_INFO_BUF_SIZE)
+ pic_info_size = CEDRUS_MIN_PIC_INFO_BUF_SIZE;
+
+ ctx->codec.h264.pic_info_buf_size = pic_info_size;
+ ctx->codec.h264.pic_info_buf =
+ dma_alloc_attrs(dev->dev, ctx->codec.h264.pic_info_buf_size,
+ &ctx->codec.h264.pic_info_buf_dma,
+ GFP_KERNEL, DMA_ATTR_NO_KERNEL_MAPPING);
+ if (!ctx->codec.h264.pic_info_buf)
+ return -ENOMEM;
+
+ /*
+ * That buffer is supposed to be 16kiB in size, and be aligned
+ * on 16kiB as well. However, dma_alloc_attrs provides the
+ * guarantee that we'll have a DMA address aligned on the
+ * smallest page order that is greater to the requested size,
+ * so we don't have to overallocate.
+ */
+ ctx->codec.h264.neighbor_info_buf =
+ dma_alloc_attrs(dev->dev, CEDRUS_NEIGHBOR_INFO_BUF_SIZE,
+ &ctx->codec.h264.neighbor_info_buf_dma,
+ GFP_KERNEL, DMA_ATTR_NO_KERNEL_MAPPING);
+ if (!ctx->codec.h264.neighbor_info_buf) {
+ ret = -ENOMEM;
+ goto err_pic_buf;
+ }
+
+ if (ctx->src_fmt.width > 2048) {
+ /*
+ * Formulas for deblock and intra prediction buffer sizes
+ * are taken from CedarX source.
+ */
+
+ ctx->codec.h264.deblk_buf_size =
+ ALIGN(ctx->src_fmt.width, 32) * 12;
+ ctx->codec.h264.deblk_buf =
+ dma_alloc_attrs(dev->dev,
+ ctx->codec.h264.deblk_buf_size,
+ &ctx->codec.h264.deblk_buf_dma,
+ GFP_KERNEL, DMA_ATTR_NO_KERNEL_MAPPING);
+ if (!ctx->codec.h264.deblk_buf) {
+ ret = -ENOMEM;
+ goto err_neighbor_buf;
+ }
+
+ /*
+ * NOTE: Multiplying by two deviates from CedarX logic, but it
+ * is for some unknown reason needed for H264 4K decoding on H6.
+ */
+ ctx->codec.h264.intra_pred_buf_size =
+ ALIGN(ctx->src_fmt.width, 64) * 5 * 2;
+ ctx->codec.h264.intra_pred_buf =
+ dma_alloc_attrs(dev->dev,
+ ctx->codec.h264.intra_pred_buf_size,
+ &ctx->codec.h264.intra_pred_buf_dma,
+ GFP_KERNEL, DMA_ATTR_NO_KERNEL_MAPPING);
+ if (!ctx->codec.h264.intra_pred_buf) {
+ ret = -ENOMEM;
+ goto err_deblk_buf;
+ }
+ }
+
+ return 0;
+
+err_deblk_buf:
+ dma_free_attrs(dev->dev, ctx->codec.h264.deblk_buf_size,
+ ctx->codec.h264.deblk_buf,
+ ctx->codec.h264.deblk_buf_dma,
+ DMA_ATTR_NO_KERNEL_MAPPING);
+
+err_neighbor_buf:
+ dma_free_attrs(dev->dev, CEDRUS_NEIGHBOR_INFO_BUF_SIZE,
+ ctx->codec.h264.neighbor_info_buf,
+ ctx->codec.h264.neighbor_info_buf_dma,
+ DMA_ATTR_NO_KERNEL_MAPPING);
+
+err_pic_buf:
+ dma_free_attrs(dev->dev, ctx->codec.h264.pic_info_buf_size,
+ ctx->codec.h264.pic_info_buf,
+ ctx->codec.h264.pic_info_buf_dma,
+ DMA_ATTR_NO_KERNEL_MAPPING);
+ return ret;
+}
+
+static void cedrus_h264_stop(struct cedrus_ctx *ctx)
+{
+ struct cedrus_dev *dev = ctx->dev;
+ struct cedrus_buffer *buf;
+ struct vb2_queue *vq;
+ unsigned int i;
+
+ vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
+
+ for (i = 0; i < vb2_get_num_buffers(vq); i++) {
+ struct vb2_buffer *vb = vb2_get_buffer(vq, i);
+
+ if (!vb)
+ continue;
+
+ buf = vb2_to_cedrus_buffer(vb);
+
+ if (buf->codec.h264.mv_col_buf_size > 0) {
+ dma_free_attrs(dev->dev,
+ buf->codec.h264.mv_col_buf_size,
+ buf->codec.h264.mv_col_buf,
+ buf->codec.h264.mv_col_buf_dma,
+ DMA_ATTR_NO_KERNEL_MAPPING);
+
+ buf->codec.h264.mv_col_buf_size = 0;
+ }
+ }
+
+ dma_free_attrs(dev->dev, CEDRUS_NEIGHBOR_INFO_BUF_SIZE,
+ ctx->codec.h264.neighbor_info_buf,
+ ctx->codec.h264.neighbor_info_buf_dma,
+ DMA_ATTR_NO_KERNEL_MAPPING);
+ dma_free_attrs(dev->dev, ctx->codec.h264.pic_info_buf_size,
+ ctx->codec.h264.pic_info_buf,
+ ctx->codec.h264.pic_info_buf_dma,
+ DMA_ATTR_NO_KERNEL_MAPPING);
+ if (ctx->codec.h264.deblk_buf_size)
+ dma_free_attrs(dev->dev, ctx->codec.h264.deblk_buf_size,
+ ctx->codec.h264.deblk_buf,
+ ctx->codec.h264.deblk_buf_dma,
+ DMA_ATTR_NO_KERNEL_MAPPING);
+ if (ctx->codec.h264.intra_pred_buf_size)
+ dma_free_attrs(dev->dev, ctx->codec.h264.intra_pred_buf_size,
+ ctx->codec.h264.intra_pred_buf,
+ ctx->codec.h264.intra_pred_buf_dma,
+ DMA_ATTR_NO_KERNEL_MAPPING);
+}
+
+static void cedrus_h264_trigger(struct cedrus_ctx *ctx)
+{
+ struct cedrus_dev *dev = ctx->dev;
+
+ cedrus_write(dev, VE_H264_TRIGGER_TYPE,
+ VE_H264_TRIGGER_TYPE_AVC_SLICE_DECODE);
+}
+
+struct cedrus_dec_ops cedrus_dec_ops_h264 = {
+ .irq_clear = cedrus_h264_irq_clear,
+ .irq_disable = cedrus_h264_irq_disable,
+ .irq_status = cedrus_h264_irq_status,
+ .setup = cedrus_h264_setup,
+ .start = cedrus_h264_start,
+ .stop = cedrus_h264_stop,
+ .trigger = cedrus_h264_trigger,
+};
diff --git a/drivers/staging/media/sunxi/cedrus/cedrus_h265.c b/drivers/staging/media/sunxi/cedrus/cedrus_h265.c
new file mode 100644
index 000000000000..780da4a8b5af
--- /dev/null
+++ b/drivers/staging/media/sunxi/cedrus/cedrus_h265.c
@@ -0,0 +1,923 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Cedrus VPU driver
+ *
+ * Copyright (C) 2013 Jens Kuske <jenskuske@gmail.com>
+ * Copyright (C) 2018 Paul Kocialkowski <paul.kocialkowski@bootlin.com>
+ * Copyright (C) 2018 Bootlin
+ */
+
+#include <linux/delay.h>
+#include <linux/types.h>
+
+#include <media/videobuf2-dma-contig.h>
+
+#include "cedrus.h"
+#include "cedrus_hw.h"
+#include "cedrus_regs.h"
+
+/*
+ * These are the sizes for side buffers required by the hardware for storing
+ * internal decoding metadata. They match the values used by the early BSP
+ * implementations, that were initially exposed in libvdpau-sunxi.
+ * Subsequent BSP implementations seem to double the neighbor info buffer size
+ * for the H6 SoC, which may be related to 10 bit H265 support.
+ */
+#define CEDRUS_H265_NEIGHBOR_INFO_BUF_SIZE (794 * SZ_1K)
+#define CEDRUS_H265_ENTRY_POINTS_BUF_SIZE (4 * SZ_1K)
+#define CEDRUS_H265_MV_COL_BUF_UNIT_CTB_SIZE 160
+
+struct cedrus_h265_sram_frame_info {
+ __le32 top_pic_order_cnt;
+ __le32 bottom_pic_order_cnt;
+ __le32 top_mv_col_buf_addr;
+ __le32 bottom_mv_col_buf_addr;
+ __le32 luma_addr;
+ __le32 chroma_addr;
+} __packed;
+
+struct cedrus_h265_sram_pred_weight {
+ __s8 delta_weight;
+ __s8 offset;
+} __packed;
+
+static unsigned int cedrus_h265_2bit_size(unsigned int width,
+ unsigned int height)
+{
+ /*
+ * Vendor library additionally aligns width and height to 16,
+ * but all capture formats are already aligned to that anyway,
+ * so we can skip that here. All formats are also one form of
+ * YUV 4:2:0 or another, so we can safely assume multiplication
+ * factor of 1.5.
+ */
+ return ALIGN(width / 4, 32) * height * 3 / 2;
+}
+
+static enum cedrus_irq_status cedrus_h265_irq_status(struct cedrus_ctx *ctx)
+{
+ struct cedrus_dev *dev = ctx->dev;
+ u32 reg;
+
+ reg = cedrus_read(dev, VE_DEC_H265_STATUS);
+ reg &= VE_DEC_H265_STATUS_CHECK_MASK;
+
+ if (reg & VE_DEC_H265_STATUS_CHECK_ERROR ||
+ !(reg & VE_DEC_H265_STATUS_SUCCESS))
+ return CEDRUS_IRQ_ERROR;
+
+ return CEDRUS_IRQ_OK;
+}
+
+static void cedrus_h265_irq_clear(struct cedrus_ctx *ctx)
+{
+ struct cedrus_dev *dev = ctx->dev;
+
+ cedrus_write(dev, VE_DEC_H265_STATUS, VE_DEC_H265_STATUS_CHECK_MASK);
+}
+
+static void cedrus_h265_irq_disable(struct cedrus_ctx *ctx)
+{
+ struct cedrus_dev *dev = ctx->dev;
+ u32 reg = cedrus_read(dev, VE_DEC_H265_CTRL);
+
+ reg &= ~VE_DEC_H265_CTRL_IRQ_MASK;
+
+ cedrus_write(dev, VE_DEC_H265_CTRL, reg);
+}
+
+static void cedrus_h265_sram_write_offset(struct cedrus_dev *dev, u32 offset)
+{
+ cedrus_write(dev, VE_DEC_H265_SRAM_OFFSET, offset);
+}
+
+static void cedrus_h265_sram_write_data(struct cedrus_dev *dev, void *data,
+ unsigned int size)
+{
+ u32 *word = data;
+
+ while (size >= sizeof(u32)) {
+ cedrus_write(dev, VE_DEC_H265_SRAM_DATA, *word++);
+ size -= sizeof(u32);
+ }
+}
+
+static inline dma_addr_t
+cedrus_h265_frame_info_mv_col_buf_addr(struct vb2_buffer *buf,
+ unsigned int field)
+{
+ struct cedrus_buffer *cedrus_buf = vb2_to_cedrus_buffer(buf);
+
+ return cedrus_buf->codec.h265.mv_col_buf_dma +
+ field * cedrus_buf->codec.h265.mv_col_buf_size / 2;
+}
+
+static void cedrus_h265_frame_info_write_single(struct cedrus_ctx *ctx,
+ unsigned int index,
+ bool field_pic,
+ u32 pic_order_cnt[],
+ struct vb2_buffer *buf)
+{
+ struct cedrus_dev *dev = ctx->dev;
+ dma_addr_t dst_luma_addr = cedrus_dst_buf_addr(ctx, buf, 0);
+ dma_addr_t dst_chroma_addr = cedrus_dst_buf_addr(ctx, buf, 1);
+ dma_addr_t mv_col_buf_addr[2] = {
+ cedrus_h265_frame_info_mv_col_buf_addr(buf, 0),
+ cedrus_h265_frame_info_mv_col_buf_addr(buf, field_pic ? 1 : 0)
+ };
+ u32 offset = VE_DEC_H265_SRAM_OFFSET_FRAME_INFO +
+ VE_DEC_H265_SRAM_OFFSET_FRAME_INFO_UNIT * index;
+ struct cedrus_h265_sram_frame_info frame_info = {
+ .top_pic_order_cnt = cpu_to_le32(pic_order_cnt[0]),
+ .bottom_pic_order_cnt = cpu_to_le32(field_pic ?
+ pic_order_cnt[1] :
+ pic_order_cnt[0]),
+ .top_mv_col_buf_addr =
+ cpu_to_le32(VE_DEC_H265_SRAM_DATA_ADDR_BASE(mv_col_buf_addr[0])),
+ .bottom_mv_col_buf_addr = cpu_to_le32(field_pic ?
+ VE_DEC_H265_SRAM_DATA_ADDR_BASE(mv_col_buf_addr[1]) :
+ VE_DEC_H265_SRAM_DATA_ADDR_BASE(mv_col_buf_addr[0])),
+ .luma_addr = cpu_to_le32(VE_DEC_H265_SRAM_DATA_ADDR_BASE(dst_luma_addr)),
+ .chroma_addr = cpu_to_le32(VE_DEC_H265_SRAM_DATA_ADDR_BASE(dst_chroma_addr)),
+ };
+
+ cedrus_h265_sram_write_offset(dev, offset);
+ cedrus_h265_sram_write_data(dev, &frame_info, sizeof(frame_info));
+}
+
+static void cedrus_h265_frame_info_write_dpb(struct cedrus_ctx *ctx,
+ const struct v4l2_hevc_dpb_entry *dpb,
+ u8 num_active_dpb_entries)
+{
+ struct vb2_queue *vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx,
+ V4L2_BUF_TYPE_VIDEO_CAPTURE);
+ unsigned int i;
+
+ for (i = 0; i < num_active_dpb_entries; i++) {
+ struct vb2_buffer *buf = vb2_find_buffer(vq, dpb[i].timestamp);
+ u32 pic_order_cnt[2] = {
+ dpb[i].pic_order_cnt_val,
+ dpb[i].pic_order_cnt_val
+ };
+
+ if (!buf)
+ continue;
+
+ cedrus_h265_frame_info_write_single(ctx, i, dpb[i].field_pic,
+ pic_order_cnt,
+ buf);
+ }
+}
+
+static void cedrus_h265_ref_pic_list_write(struct cedrus_dev *dev,
+ const struct v4l2_hevc_dpb_entry *dpb,
+ const u8 list[],
+ u8 num_ref_idx_active,
+ u32 sram_offset)
+{
+ unsigned int i;
+ u32 word = 0;
+
+ cedrus_h265_sram_write_offset(dev, sram_offset);
+
+ for (i = 0; i < num_ref_idx_active; i++) {
+ unsigned int shift = (i % 4) * 8;
+ unsigned int index = list[i];
+ u8 value = list[i];
+
+ if (dpb[index].flags & V4L2_HEVC_DPB_ENTRY_LONG_TERM_REFERENCE)
+ value |= VE_DEC_H265_SRAM_REF_PIC_LIST_LT_REF;
+
+ /* Each SRAM word gathers up to 4 references. */
+ word |= value << shift;
+
+ /* Write the word to SRAM and clear it for the next batch. */
+ if ((i % 4) == 3 || i == (num_ref_idx_active - 1)) {
+ cedrus_h265_sram_write_data(dev, &word, sizeof(word));
+ word = 0;
+ }
+ }
+}
+
+static void cedrus_h265_pred_weight_write(struct cedrus_dev *dev,
+ const s8 delta_luma_weight[],
+ const s8 luma_offset[],
+ const s8 delta_chroma_weight[][2],
+ const s8 chroma_offset[][2],
+ u8 num_ref_idx_active,
+ u32 sram_luma_offset,
+ u32 sram_chroma_offset)
+{
+ struct cedrus_h265_sram_pred_weight pred_weight[2] = { { 0 } };
+ unsigned int i, j;
+
+ cedrus_h265_sram_write_offset(dev, sram_luma_offset);
+
+ for (i = 0; i < num_ref_idx_active; i++) {
+ unsigned int index = i % 2;
+
+ pred_weight[index].delta_weight = delta_luma_weight[i];
+ pred_weight[index].offset = luma_offset[i];
+
+ if (index == 1 || i == (num_ref_idx_active - 1))
+ cedrus_h265_sram_write_data(dev, (u32 *)&pred_weight,
+ sizeof(pred_weight));
+ }
+
+ cedrus_h265_sram_write_offset(dev, sram_chroma_offset);
+
+ for (i = 0; i < num_ref_idx_active; i++) {
+ for (j = 0; j < 2; j++) {
+ pred_weight[j].delta_weight = delta_chroma_weight[i][j];
+ pred_weight[j].offset = chroma_offset[i][j];
+ }
+
+ cedrus_h265_sram_write_data(dev, &pred_weight,
+ sizeof(pred_weight));
+ }
+}
+
+static void cedrus_h265_skip_bits(struct cedrus_dev *dev, int num)
+{
+ int count = 0;
+
+ while (count < num) {
+ int tmp = min(num - count, 32);
+
+ cedrus_write(dev, VE_DEC_H265_TRIGGER,
+ VE_DEC_H265_TRIGGER_FLUSH_BITS |
+ VE_DEC_H265_TRIGGER_TYPE_N_BITS(tmp));
+
+ if (cedrus_wait_for(dev, VE_DEC_H265_STATUS, VE_DEC_H265_STATUS_VLD_BUSY))
+ dev_err_ratelimited(dev->dev, "timed out waiting to skip bits\n");
+
+ count += tmp;
+ }
+}
+
+static u32 cedrus_h265_show_bits(struct cedrus_dev *dev, int num)
+{
+ cedrus_write(dev, VE_DEC_H265_TRIGGER,
+ VE_DEC_H265_TRIGGER_SHOW_BITS |
+ VE_DEC_H265_TRIGGER_TYPE_N_BITS(num));
+
+ cedrus_wait_for(dev, VE_DEC_H265_STATUS,
+ VE_DEC_H265_STATUS_VLD_BUSY);
+
+ return cedrus_read(dev, VE_DEC_H265_BITS_READ);
+}
+
+static void cedrus_h265_write_scaling_list(struct cedrus_ctx *ctx,
+ struct cedrus_run *run)
+{
+ const struct v4l2_ctrl_hevc_scaling_matrix *scaling;
+ struct cedrus_dev *dev = ctx->dev;
+ u32 i, j, k, val;
+
+ scaling = run->h265.scaling_matrix;
+
+ cedrus_write(dev, VE_DEC_H265_SCALING_LIST_DC_COEF0,
+ (scaling->scaling_list_dc_coef_32x32[1] << 24) |
+ (scaling->scaling_list_dc_coef_32x32[0] << 16) |
+ (scaling->scaling_list_dc_coef_16x16[1] << 8) |
+ (scaling->scaling_list_dc_coef_16x16[0] << 0));
+
+ cedrus_write(dev, VE_DEC_H265_SCALING_LIST_DC_COEF1,
+ (scaling->scaling_list_dc_coef_16x16[5] << 24) |
+ (scaling->scaling_list_dc_coef_16x16[4] << 16) |
+ (scaling->scaling_list_dc_coef_16x16[3] << 8) |
+ (scaling->scaling_list_dc_coef_16x16[2] << 0));
+
+ cedrus_h265_sram_write_offset(dev, VE_DEC_H265_SRAM_OFFSET_SCALING_LISTS);
+
+ for (i = 0; i < 6; i++)
+ for (j = 0; j < 8; j++)
+ for (k = 0; k < 8; k += 4) {
+ val = ((u32)scaling->scaling_list_8x8[i][j + (k + 3) * 8] << 24) |
+ ((u32)scaling->scaling_list_8x8[i][j + (k + 2) * 8] << 16) |
+ ((u32)scaling->scaling_list_8x8[i][j + (k + 1) * 8] << 8) |
+ scaling->scaling_list_8x8[i][j + k * 8];
+ cedrus_write(dev, VE_DEC_H265_SRAM_DATA, val);
+ }
+
+ for (i = 0; i < 2; i++)
+ for (j = 0; j < 8; j++)
+ for (k = 0; k < 8; k += 4) {
+ val = ((u32)scaling->scaling_list_32x32[i][j + (k + 3) * 8] << 24) |
+ ((u32)scaling->scaling_list_32x32[i][j + (k + 2) * 8] << 16) |
+ ((u32)scaling->scaling_list_32x32[i][j + (k + 1) * 8] << 8) |
+ scaling->scaling_list_32x32[i][j + k * 8];
+ cedrus_write(dev, VE_DEC_H265_SRAM_DATA, val);
+ }
+
+ for (i = 0; i < 6; i++)
+ for (j = 0; j < 8; j++)
+ for (k = 0; k < 8; k += 4) {
+ val = ((u32)scaling->scaling_list_16x16[i][j + (k + 3) * 8] << 24) |
+ ((u32)scaling->scaling_list_16x16[i][j + (k + 2) * 8] << 16) |
+ ((u32)scaling->scaling_list_16x16[i][j + (k + 1) * 8] << 8) |
+ scaling->scaling_list_16x16[i][j + k * 8];
+ cedrus_write(dev, VE_DEC_H265_SRAM_DATA, val);
+ }
+
+ for (i = 0; i < 6; i++)
+ for (j = 0; j < 4; j++) {
+ val = ((u32)scaling->scaling_list_4x4[i][j + 12] << 24) |
+ ((u32)scaling->scaling_list_4x4[i][j + 8] << 16) |
+ ((u32)scaling->scaling_list_4x4[i][j + 4] << 8) |
+ scaling->scaling_list_4x4[i][j];
+ cedrus_write(dev, VE_DEC_H265_SRAM_DATA, val);
+ }
+}
+
+static int cedrus_h265_is_low_delay(struct cedrus_run *run)
+{
+ const struct v4l2_ctrl_hevc_slice_params *slice_params;
+ const struct v4l2_hevc_dpb_entry *dpb;
+ s32 poc;
+ int i;
+
+ slice_params = run->h265.slice_params;
+ poc = run->h265.decode_params->pic_order_cnt_val;
+ dpb = run->h265.decode_params->dpb;
+
+ for (i = 0; i < slice_params->num_ref_idx_l0_active_minus1 + 1; i++)
+ if (dpb[slice_params->ref_idx_l0[i]].pic_order_cnt_val > poc)
+ return 1;
+
+ if (slice_params->slice_type != V4L2_HEVC_SLICE_TYPE_B)
+ return 0;
+
+ for (i = 0; i < slice_params->num_ref_idx_l1_active_minus1 + 1; i++)
+ if (dpb[slice_params->ref_idx_l1[i]].pic_order_cnt_val > poc)
+ return 1;
+
+ return 0;
+}
+
+static void cedrus_h265_write_tiles(struct cedrus_ctx *ctx,
+ struct cedrus_run *run,
+ unsigned int ctb_addr_x,
+ unsigned int ctb_addr_y)
+{
+ const struct v4l2_ctrl_hevc_slice_params *slice_params;
+ const struct v4l2_ctrl_hevc_pps *pps;
+ struct cedrus_dev *dev = ctx->dev;
+ const u32 *entry_points;
+ u32 *entry_points_buf;
+ int i, x, tx, y, ty;
+
+ pps = run->h265.pps;
+ slice_params = run->h265.slice_params;
+ entry_points = run->h265.entry_points;
+ entry_points_buf = ctx->codec.h265.entry_points_buf;
+
+ for (x = 0, tx = 0; tx < pps->num_tile_columns_minus1 + 1; tx++) {
+ if (x + pps->column_width_minus1[tx] + 1 > ctb_addr_x)
+ break;
+
+ x += pps->column_width_minus1[tx] + 1;
+ }
+
+ for (y = 0, ty = 0; ty < pps->num_tile_rows_minus1 + 1; ty++) {
+ if (y + pps->row_height_minus1[ty] + 1 > ctb_addr_y)
+ break;
+
+ y += pps->row_height_minus1[ty] + 1;
+ }
+
+ cedrus_write(dev, VE_DEC_H265_TILE_START_CTB, (y << 16) | (x << 0));
+ cedrus_write(dev, VE_DEC_H265_TILE_END_CTB,
+ ((y + pps->row_height_minus1[ty]) << 16) |
+ ((x + pps->column_width_minus1[tx]) << 0));
+
+ if (pps->flags & V4L2_HEVC_PPS_FLAG_ENTROPY_CODING_SYNC_ENABLED) {
+ for (i = 0; i < slice_params->num_entry_point_offsets; i++)
+ entry_points_buf[i] = entry_points[i];
+ } else {
+ for (i = 0; i < slice_params->num_entry_point_offsets; i++) {
+ if (tx + 1 >= pps->num_tile_columns_minus1 + 1) {
+ x = 0;
+ tx = 0;
+ y += pps->row_height_minus1[ty++] + 1;
+ } else {
+ x += pps->column_width_minus1[tx++] + 1;
+ }
+
+ entry_points_buf[i * 4 + 0] = entry_points[i];
+ entry_points_buf[i * 4 + 1] = 0x0;
+ entry_points_buf[i * 4 + 2] = (y << 16) | (x << 0);
+ entry_points_buf[i * 4 + 3] =
+ ((y + pps->row_height_minus1[ty]) << 16) |
+ ((x + pps->column_width_minus1[tx]) << 0);
+ }
+ }
+}
+
+static int cedrus_h265_setup(struct cedrus_ctx *ctx, struct cedrus_run *run)
+{
+ struct cedrus_dev *dev = ctx->dev;
+ const struct v4l2_ctrl_hevc_sps *sps;
+ const struct v4l2_ctrl_hevc_pps *pps;
+ const struct v4l2_ctrl_hevc_slice_params *slice_params;
+ const struct v4l2_ctrl_hevc_decode_params *decode_params;
+ const struct v4l2_hevc_pred_weight_table *pred_weight_table;
+ unsigned int width_in_ctb_luma, ctb_size_luma;
+ unsigned int log2_max_luma_coding_block_size;
+ unsigned int ctb_addr_x, ctb_addr_y;
+ struct cedrus_buffer *cedrus_buf;
+ dma_addr_t src_buf_addr;
+ u32 chroma_log2_weight_denom;
+ u32 num_entry_point_offsets;
+ u32 output_pic_list_index;
+ u32 pic_order_cnt[2];
+ size_t slice_bytes;
+ u8 padding;
+ int count;
+ u32 reg;
+
+ sps = run->h265.sps;
+ pps = run->h265.pps;
+ slice_params = run->h265.slice_params;
+ decode_params = run->h265.decode_params;
+ pred_weight_table = &slice_params->pred_weight_table;
+ num_entry_point_offsets = slice_params->num_entry_point_offsets;
+ cedrus_buf = vb2_to_cedrus_buffer(&run->dst->vb2_buf);
+ slice_bytes = vb2_get_plane_payload(&run->src->vb2_buf, 0);
+
+ /*
+ * If entry points offsets are present, we should get them
+ * exactly the right amount.
+ */
+ if (num_entry_point_offsets &&
+ num_entry_point_offsets != run->h265.entry_points_count)
+ return -ERANGE;
+
+ log2_max_luma_coding_block_size =
+ sps->log2_min_luma_coding_block_size_minus3 + 3 +
+ sps->log2_diff_max_min_luma_coding_block_size;
+ ctb_size_luma = 1UL << log2_max_luma_coding_block_size;
+ width_in_ctb_luma =
+ DIV_ROUND_UP(sps->pic_width_in_luma_samples, ctb_size_luma);
+
+ /* MV column buffer size and allocation. */
+ if (!cedrus_buf->codec.h265.mv_col_buf_size) {
+ /*
+ * Each CTB requires a MV col buffer with a specific unit size.
+ * Since the address is given with missing lsb bits, 1 KiB is
+ * added to each buffer to ensure proper alignment.
+ */
+ cedrus_buf->codec.h265.mv_col_buf_size =
+ DIV_ROUND_UP(ctx->src_fmt.width, ctb_size_luma) *
+ DIV_ROUND_UP(ctx->src_fmt.height, ctb_size_luma) *
+ CEDRUS_H265_MV_COL_BUF_UNIT_CTB_SIZE + SZ_1K;
+
+ /* Buffer is never accessed by CPU, so we can skip kernel mapping. */
+ cedrus_buf->codec.h265.mv_col_buf =
+ dma_alloc_attrs(dev->dev,
+ cedrus_buf->codec.h265.mv_col_buf_size,
+ &cedrus_buf->codec.h265.mv_col_buf_dma,
+ GFP_KERNEL, DMA_ATTR_NO_KERNEL_MAPPING);
+ if (!cedrus_buf->codec.h265.mv_col_buf) {
+ cedrus_buf->codec.h265.mv_col_buf_size = 0;
+ return -ENOMEM;
+ }
+ }
+
+ /* Activate H265 engine. */
+ cedrus_engine_enable(ctx);
+
+ /* Source offset and length in bits. */
+
+ cedrus_write(dev, VE_DEC_H265_BITS_OFFSET, 0);
+
+ reg = slice_bytes * 8;
+ cedrus_write(dev, VE_DEC_H265_BITS_LEN, reg);
+
+ /* Source beginning and end addresses. */
+
+ src_buf_addr = vb2_dma_contig_plane_dma_addr(&run->src->vb2_buf, 0);
+
+ reg = VE_DEC_H265_BITS_ADDR_BASE(src_buf_addr);
+ reg |= VE_DEC_H265_BITS_ADDR_VALID_SLICE_DATA;
+ reg |= VE_DEC_H265_BITS_ADDR_LAST_SLICE_DATA;
+ reg |= VE_DEC_H265_BITS_ADDR_FIRST_SLICE_DATA;
+
+ cedrus_write(dev, VE_DEC_H265_BITS_ADDR, reg);
+
+ reg = VE_DEC_H265_BITS_END_ADDR_BASE(src_buf_addr + slice_bytes);
+ cedrus_write(dev, VE_DEC_H265_BITS_END_ADDR, reg);
+
+ /* Coding tree block address */
+ ctb_addr_x = slice_params->slice_segment_addr % width_in_ctb_luma;
+ ctb_addr_y = slice_params->slice_segment_addr / width_in_ctb_luma;
+ reg = VE_DEC_H265_DEC_CTB_ADDR_X(ctb_addr_x);
+ reg |= VE_DEC_H265_DEC_CTB_ADDR_Y(ctb_addr_y);
+ cedrus_write(dev, VE_DEC_H265_DEC_CTB_ADDR, reg);
+
+ if ((pps->flags & V4L2_HEVC_PPS_FLAG_TILES_ENABLED) ||
+ (pps->flags & V4L2_HEVC_PPS_FLAG_ENTROPY_CODING_SYNC_ENABLED)) {
+ cedrus_h265_write_tiles(ctx, run, ctb_addr_x, ctb_addr_y);
+ } else {
+ cedrus_write(dev, VE_DEC_H265_TILE_START_CTB, 0);
+ cedrus_write(dev, VE_DEC_H265_TILE_END_CTB, 0);
+ }
+
+ /* Clear the number of correctly-decoded coding tree blocks. */
+ if (ctx->fh.m2m_ctx->new_frame)
+ cedrus_write(dev, VE_DEC_H265_DEC_CTB_NUM, 0);
+
+ /* Initialize bitstream access. */
+ cedrus_write(dev, VE_DEC_H265_TRIGGER, VE_DEC_H265_TRIGGER_INIT_SWDEC);
+
+ /*
+ * Cedrus expects that bitstream pointer is actually at the end of the slice header
+ * instead of start of slice data. Padding is 8 bits at most (one bit set to 1 and
+ * at most seven bits set to 0), so we have to inspect only one byte before slice data.
+ */
+
+ if (slice_params->data_byte_offset == 0)
+ return -EOPNOTSUPP;
+
+ cedrus_h265_skip_bits(dev, (slice_params->data_byte_offset - 1) * 8);
+
+ padding = cedrus_h265_show_bits(dev, 8);
+
+ /* at least one bit must be set in that byte */
+ if (padding == 0)
+ return -EINVAL;
+
+ for (count = 0; count < 8; count++)
+ if (padding & (1 << count))
+ break;
+
+ /* Include the one bit. */
+ count++;
+
+ cedrus_h265_skip_bits(dev, 8 - count);
+
+ /* Bitstream parameters. */
+
+ reg = VE_DEC_H265_DEC_NAL_HDR_NAL_UNIT_TYPE(slice_params->nal_unit_type) |
+ VE_DEC_H265_DEC_NAL_HDR_NUH_TEMPORAL_ID_PLUS1(slice_params->nuh_temporal_id_plus1);
+
+ cedrus_write(dev, VE_DEC_H265_DEC_NAL_HDR, reg);
+
+ /* SPS. */
+
+ reg = VE_DEC_H265_DEC_SPS_HDR_MAX_TRANSFORM_HIERARCHY_DEPTH_INTRA(sps->max_transform_hierarchy_depth_intra) |
+ VE_DEC_H265_DEC_SPS_HDR_MAX_TRANSFORM_HIERARCHY_DEPTH_INTER(sps->max_transform_hierarchy_depth_inter) |
+ VE_DEC_H265_DEC_SPS_HDR_LOG2_DIFF_MAX_MIN_TRANSFORM_BLOCK_SIZE(sps->log2_diff_max_min_luma_transform_block_size) |
+ VE_DEC_H265_DEC_SPS_HDR_LOG2_MIN_TRANSFORM_BLOCK_SIZE_MINUS2(sps->log2_min_luma_transform_block_size_minus2) |
+ VE_DEC_H265_DEC_SPS_HDR_LOG2_DIFF_MAX_MIN_LUMA_CODING_BLOCK_SIZE(sps->log2_diff_max_min_luma_coding_block_size) |
+ VE_DEC_H265_DEC_SPS_HDR_LOG2_MIN_LUMA_CODING_BLOCK_SIZE_MINUS3(sps->log2_min_luma_coding_block_size_minus3) |
+ VE_DEC_H265_DEC_SPS_HDR_BIT_DEPTH_CHROMA_MINUS8(sps->bit_depth_chroma_minus8) |
+ VE_DEC_H265_DEC_SPS_HDR_BIT_DEPTH_LUMA_MINUS8(sps->bit_depth_luma_minus8) |
+ VE_DEC_H265_DEC_SPS_HDR_CHROMA_FORMAT_IDC(sps->chroma_format_idc);
+
+ reg |= VE_DEC_H265_FLAG(VE_DEC_H265_DEC_SPS_HDR_FLAG_STRONG_INTRA_SMOOTHING_ENABLE,
+ V4L2_HEVC_SPS_FLAG_STRONG_INTRA_SMOOTHING_ENABLED,
+ sps->flags);
+
+ reg |= VE_DEC_H265_FLAG(VE_DEC_H265_DEC_SPS_HDR_FLAG_SPS_TEMPORAL_MVP_ENABLED,
+ V4L2_HEVC_SPS_FLAG_SPS_TEMPORAL_MVP_ENABLED,
+ sps->flags);
+
+ reg |= VE_DEC_H265_FLAG(VE_DEC_H265_DEC_SPS_HDR_FLAG_SAMPLE_ADAPTIVE_OFFSET_ENABLED,
+ V4L2_HEVC_SPS_FLAG_SAMPLE_ADAPTIVE_OFFSET,
+ sps->flags);
+
+ reg |= VE_DEC_H265_FLAG(VE_DEC_H265_DEC_SPS_HDR_FLAG_AMP_ENABLED,
+ V4L2_HEVC_SPS_FLAG_AMP_ENABLED, sps->flags);
+
+ reg |= VE_DEC_H265_FLAG(VE_DEC_H265_DEC_SPS_HDR_FLAG_SEPARATE_COLOUR_PLANE,
+ V4L2_HEVC_SPS_FLAG_SEPARATE_COLOUR_PLANE,
+ sps->flags);
+
+ cedrus_write(dev, VE_DEC_H265_DEC_SPS_HDR, reg);
+
+ reg = VE_DEC_H265_DEC_PCM_CTRL_LOG2_DIFF_MAX_MIN_PCM_LUMA_CODING_BLOCK_SIZE(sps->log2_diff_max_min_pcm_luma_coding_block_size) |
+ VE_DEC_H265_DEC_PCM_CTRL_LOG2_MIN_PCM_LUMA_CODING_BLOCK_SIZE_MINUS3(sps->log2_min_pcm_luma_coding_block_size_minus3) |
+ VE_DEC_H265_DEC_PCM_CTRL_PCM_SAMPLE_BIT_DEPTH_CHROMA_MINUS1(sps->pcm_sample_bit_depth_chroma_minus1) |
+ VE_DEC_H265_DEC_PCM_CTRL_PCM_SAMPLE_BIT_DEPTH_LUMA_MINUS1(sps->pcm_sample_bit_depth_luma_minus1);
+
+ reg |= VE_DEC_H265_FLAG(VE_DEC_H265_DEC_PCM_CTRL_FLAG_PCM_ENABLED,
+ V4L2_HEVC_SPS_FLAG_PCM_ENABLED, sps->flags);
+
+ reg |= VE_DEC_H265_FLAG(VE_DEC_H265_DEC_PCM_CTRL_FLAG_PCM_LOOP_FILTER_DISABLED,
+ V4L2_HEVC_SPS_FLAG_PCM_LOOP_FILTER_DISABLED,
+ sps->flags);
+
+ cedrus_write(dev, VE_DEC_H265_DEC_PCM_CTRL, reg);
+
+ /* PPS. */
+
+ reg = VE_DEC_H265_DEC_PPS_CTRL0_PPS_CR_QP_OFFSET(pps->pps_cr_qp_offset) |
+ VE_DEC_H265_DEC_PPS_CTRL0_PPS_CB_QP_OFFSET(pps->pps_cb_qp_offset) |
+ VE_DEC_H265_DEC_PPS_CTRL0_INIT_QP_MINUS26(pps->init_qp_minus26) |
+ VE_DEC_H265_DEC_PPS_CTRL0_DIFF_CU_QP_DELTA_DEPTH(pps->diff_cu_qp_delta_depth);
+
+ reg |= VE_DEC_H265_FLAG(VE_DEC_H265_DEC_PPS_CTRL0_FLAG_CU_QP_DELTA_ENABLED,
+ V4L2_HEVC_PPS_FLAG_CU_QP_DELTA_ENABLED,
+ pps->flags);
+
+ reg |= VE_DEC_H265_FLAG(VE_DEC_H265_DEC_PPS_CTRL0_FLAG_TRANSFORM_SKIP_ENABLED,
+ V4L2_HEVC_PPS_FLAG_TRANSFORM_SKIP_ENABLED,
+ pps->flags);
+
+ reg |= VE_DEC_H265_FLAG(VE_DEC_H265_DEC_PPS_CTRL0_FLAG_CONSTRAINED_INTRA_PRED,
+ V4L2_HEVC_PPS_FLAG_CONSTRAINED_INTRA_PRED,
+ pps->flags);
+
+ reg |= VE_DEC_H265_FLAG(VE_DEC_H265_DEC_PPS_CTRL0_FLAG_SIGN_DATA_HIDING_ENABLED,
+ V4L2_HEVC_PPS_FLAG_SIGN_DATA_HIDING_ENABLED,
+ pps->flags);
+
+ cedrus_write(dev, VE_DEC_H265_DEC_PPS_CTRL0, reg);
+
+ reg = VE_DEC_H265_DEC_PPS_CTRL1_LOG2_PARALLEL_MERGE_LEVEL_MINUS2(pps->log2_parallel_merge_level_minus2);
+
+ reg |= VE_DEC_H265_FLAG(VE_DEC_H265_DEC_PPS_CTRL1_FLAG_PPS_LOOP_FILTER_ACROSS_SLICES_ENABLED,
+ V4L2_HEVC_PPS_FLAG_PPS_LOOP_FILTER_ACROSS_SLICES_ENABLED,
+ pps->flags);
+
+ reg |= VE_DEC_H265_FLAG(VE_DEC_H265_DEC_PPS_CTRL1_FLAG_LOOP_FILTER_ACROSS_TILES_ENABLED,
+ V4L2_HEVC_PPS_FLAG_LOOP_FILTER_ACROSS_TILES_ENABLED,
+ pps->flags);
+
+ reg |= VE_DEC_H265_FLAG(VE_DEC_H265_DEC_PPS_CTRL1_FLAG_ENTROPY_CODING_SYNC_ENABLED,
+ V4L2_HEVC_PPS_FLAG_ENTROPY_CODING_SYNC_ENABLED,
+ pps->flags);
+
+ reg |= VE_DEC_H265_FLAG(VE_DEC_H265_DEC_PPS_CTRL1_FLAG_TILES_ENABLED,
+ V4L2_HEVC_PPS_FLAG_TILES_ENABLED,
+ pps->flags);
+
+ reg |= VE_DEC_H265_FLAG(VE_DEC_H265_DEC_PPS_CTRL1_FLAG_TRANSQUANT_BYPASS_ENABLED,
+ V4L2_HEVC_PPS_FLAG_TRANSQUANT_BYPASS_ENABLED,
+ pps->flags);
+
+ reg |= VE_DEC_H265_FLAG(VE_DEC_H265_DEC_PPS_CTRL1_FLAG_WEIGHTED_BIPRED,
+ V4L2_HEVC_PPS_FLAG_WEIGHTED_BIPRED, pps->flags);
+
+ reg |= VE_DEC_H265_FLAG(VE_DEC_H265_DEC_PPS_CTRL1_FLAG_WEIGHTED_PRED,
+ V4L2_HEVC_PPS_FLAG_WEIGHTED_PRED, pps->flags);
+
+ cedrus_write(dev, VE_DEC_H265_DEC_PPS_CTRL1, reg);
+
+ /* Slice Parameters. */
+
+ reg = VE_DEC_H265_DEC_SLICE_HDR_INFO0_PICTURE_TYPE(slice_params->pic_struct) |
+ VE_DEC_H265_DEC_SLICE_HDR_INFO0_FIVE_MINUS_MAX_NUM_MERGE_CAND(slice_params->five_minus_max_num_merge_cand) |
+ VE_DEC_H265_DEC_SLICE_HDR_INFO0_NUM_REF_IDX_L1_ACTIVE_MINUS1(slice_params->num_ref_idx_l1_active_minus1) |
+ VE_DEC_H265_DEC_SLICE_HDR_INFO0_NUM_REF_IDX_L0_ACTIVE_MINUS1(slice_params->num_ref_idx_l0_active_minus1) |
+ VE_DEC_H265_DEC_SLICE_HDR_INFO0_COLLOCATED_REF_IDX(slice_params->collocated_ref_idx) |
+ VE_DEC_H265_DEC_SLICE_HDR_INFO0_COLOUR_PLANE_ID(slice_params->colour_plane_id) |
+ VE_DEC_H265_DEC_SLICE_HDR_INFO0_SLICE_TYPE(slice_params->slice_type);
+
+ reg |= VE_DEC_H265_FLAG(VE_DEC_H265_DEC_SLICE_HDR_INFO0_FLAG_COLLOCATED_FROM_L0,
+ V4L2_HEVC_SLICE_PARAMS_FLAG_COLLOCATED_FROM_L0,
+ slice_params->flags);
+
+ reg |= VE_DEC_H265_FLAG(VE_DEC_H265_DEC_SLICE_HDR_INFO0_FLAG_CABAC_INIT,
+ V4L2_HEVC_SLICE_PARAMS_FLAG_CABAC_INIT,
+ slice_params->flags);
+
+ reg |= VE_DEC_H265_FLAG(VE_DEC_H265_DEC_SLICE_HDR_INFO0_FLAG_MVD_L1_ZERO,
+ V4L2_HEVC_SLICE_PARAMS_FLAG_MVD_L1_ZERO,
+ slice_params->flags);
+
+ reg |= VE_DEC_H265_FLAG(VE_DEC_H265_DEC_SLICE_HDR_INFO0_FLAG_SLICE_SAO_CHROMA,
+ V4L2_HEVC_SLICE_PARAMS_FLAG_SLICE_SAO_CHROMA,
+ slice_params->flags);
+
+ reg |= VE_DEC_H265_FLAG(VE_DEC_H265_DEC_SLICE_HDR_INFO0_FLAG_SLICE_SAO_LUMA,
+ V4L2_HEVC_SLICE_PARAMS_FLAG_SLICE_SAO_LUMA,
+ slice_params->flags);
+
+ reg |= VE_DEC_H265_FLAG(VE_DEC_H265_DEC_SLICE_HDR_INFO0_FLAG_SLICE_TEMPORAL_MVP_ENABLE,
+ V4L2_HEVC_SLICE_PARAMS_FLAG_SLICE_TEMPORAL_MVP_ENABLED,
+ slice_params->flags);
+
+ reg |= VE_DEC_H265_FLAG(VE_DEC_H265_DEC_SLICE_HDR_INFO0_FLAG_DEPENDENT_SLICE_SEGMENT,
+ V4L2_HEVC_SLICE_PARAMS_FLAG_DEPENDENT_SLICE_SEGMENT,
+ slice_params->flags);
+
+ if (ctx->fh.m2m_ctx->new_frame)
+ reg |= VE_DEC_H265_DEC_SLICE_HDR_INFO0_FLAG_FIRST_SLICE_SEGMENT_IN_PIC;
+
+ cedrus_write(dev, VE_DEC_H265_DEC_SLICE_HDR_INFO0, reg);
+
+ reg = VE_DEC_H265_DEC_SLICE_HDR_INFO1_SLICE_TC_OFFSET_DIV2(slice_params->slice_tc_offset_div2) |
+ VE_DEC_H265_DEC_SLICE_HDR_INFO1_SLICE_BETA_OFFSET_DIV2(slice_params->slice_beta_offset_div2) |
+ VE_DEC_H265_DEC_SLICE_HDR_INFO1_SLICE_CR_QP_OFFSET(slice_params->slice_cr_qp_offset) |
+ VE_DEC_H265_DEC_SLICE_HDR_INFO1_SLICE_CB_QP_OFFSET(slice_params->slice_cb_qp_offset) |
+ VE_DEC_H265_DEC_SLICE_HDR_INFO1_SLICE_QP_DELTA(slice_params->slice_qp_delta);
+
+ reg |= VE_DEC_H265_FLAG(VE_DEC_H265_DEC_SLICE_HDR_INFO1_FLAG_SLICE_DEBLOCKING_FILTER_DISABLED,
+ V4L2_HEVC_SLICE_PARAMS_FLAG_SLICE_DEBLOCKING_FILTER_DISABLED,
+ slice_params->flags);
+
+ reg |= VE_DEC_H265_FLAG(VE_DEC_H265_DEC_SLICE_HDR_INFO1_FLAG_SLICE_LOOP_FILTER_ACROSS_SLICES_ENABLED,
+ V4L2_HEVC_SLICE_PARAMS_FLAG_SLICE_LOOP_FILTER_ACROSS_SLICES_ENABLED,
+ slice_params->flags);
+
+ if (slice_params->slice_type != V4L2_HEVC_SLICE_TYPE_I && !cedrus_h265_is_low_delay(run))
+ reg |= VE_DEC_H265_DEC_SLICE_HDR_INFO1_FLAG_SLICE_NOT_LOW_DELAY;
+
+ cedrus_write(dev, VE_DEC_H265_DEC_SLICE_HDR_INFO1, reg);
+
+ chroma_log2_weight_denom = pred_weight_table->luma_log2_weight_denom +
+ pred_weight_table->delta_chroma_log2_weight_denom;
+ reg = VE_DEC_H265_DEC_SLICE_HDR_INFO2_NUM_ENTRY_POINT_OFFSETS(num_entry_point_offsets) |
+ VE_DEC_H265_DEC_SLICE_HDR_INFO2_CHROMA_LOG2_WEIGHT_DENOM(chroma_log2_weight_denom) |
+ VE_DEC_H265_DEC_SLICE_HDR_INFO2_LUMA_LOG2_WEIGHT_DENOM(pred_weight_table->luma_log2_weight_denom);
+
+ cedrus_write(dev, VE_DEC_H265_DEC_SLICE_HDR_INFO2, reg);
+
+ cedrus_write(dev, VE_DEC_H265_ENTRY_POINT_OFFSET_ADDR,
+ ctx->codec.h265.entry_points_buf_addr >> 8);
+
+ /* Decoded picture size. */
+
+ reg = VE_DEC_H265_DEC_PIC_SIZE_WIDTH(ctx->src_fmt.width) |
+ VE_DEC_H265_DEC_PIC_SIZE_HEIGHT(ctx->src_fmt.height);
+
+ cedrus_write(dev, VE_DEC_H265_DEC_PIC_SIZE, reg);
+
+ /* Scaling list. */
+
+ if (sps->flags & V4L2_HEVC_SPS_FLAG_SCALING_LIST_ENABLED) {
+ cedrus_h265_write_scaling_list(ctx, run);
+ reg = VE_DEC_H265_SCALING_LIST_CTRL0_FLAG_ENABLED;
+ } else {
+ reg = VE_DEC_H265_SCALING_LIST_CTRL0_DEFAULT;
+ }
+ cedrus_write(dev, VE_DEC_H265_SCALING_LIST_CTRL0, reg);
+
+ /* Neightbor information address. */
+ reg = VE_DEC_H265_NEIGHBOR_INFO_ADDR_BASE(ctx->codec.h265.neighbor_info_buf_addr);
+ cedrus_write(dev, VE_DEC_H265_NEIGHBOR_INFO_ADDR, reg);
+
+ /* Write decoded picture buffer in pic list. */
+ cedrus_h265_frame_info_write_dpb(ctx, decode_params->dpb,
+ decode_params->num_active_dpb_entries);
+
+ /* Output frame. */
+
+ output_pic_list_index = V4L2_HEVC_DPB_ENTRIES_NUM_MAX;
+ pic_order_cnt[0] = slice_params->slice_pic_order_cnt;
+ pic_order_cnt[1] = slice_params->slice_pic_order_cnt;
+
+ cedrus_h265_frame_info_write_single(ctx, output_pic_list_index,
+ slice_params->pic_struct != 0,
+ pic_order_cnt,
+ &run->dst->vb2_buf);
+
+ cedrus_write(dev, VE_DEC_H265_OUTPUT_FRAME_IDX, output_pic_list_index);
+
+ /* Reference picture list 0 (for P/B frames). */
+ if (slice_params->slice_type != V4L2_HEVC_SLICE_TYPE_I) {
+ cedrus_h265_ref_pic_list_write(dev, decode_params->dpb,
+ slice_params->ref_idx_l0,
+ slice_params->num_ref_idx_l0_active_minus1 + 1,
+ VE_DEC_H265_SRAM_OFFSET_REF_PIC_LIST0);
+
+ if ((pps->flags & V4L2_HEVC_PPS_FLAG_WEIGHTED_PRED) ||
+ (pps->flags & V4L2_HEVC_PPS_FLAG_WEIGHTED_BIPRED))
+ cedrus_h265_pred_weight_write(dev,
+ pred_weight_table->delta_luma_weight_l0,
+ pred_weight_table->luma_offset_l0,
+ pred_weight_table->delta_chroma_weight_l0,
+ pred_weight_table->chroma_offset_l0,
+ slice_params->num_ref_idx_l0_active_minus1 + 1,
+ VE_DEC_H265_SRAM_OFFSET_PRED_WEIGHT_LUMA_L0,
+ VE_DEC_H265_SRAM_OFFSET_PRED_WEIGHT_CHROMA_L0);
+ }
+
+ /* Reference picture list 1 (for B frames). */
+ if (slice_params->slice_type == V4L2_HEVC_SLICE_TYPE_B) {
+ cedrus_h265_ref_pic_list_write(dev, decode_params->dpb,
+ slice_params->ref_idx_l1,
+ slice_params->num_ref_idx_l1_active_minus1 + 1,
+ VE_DEC_H265_SRAM_OFFSET_REF_PIC_LIST1);
+
+ if (pps->flags & V4L2_HEVC_PPS_FLAG_WEIGHTED_BIPRED)
+ cedrus_h265_pred_weight_write(dev,
+ pred_weight_table->delta_luma_weight_l1,
+ pred_weight_table->luma_offset_l1,
+ pred_weight_table->delta_chroma_weight_l1,
+ pred_weight_table->chroma_offset_l1,
+ slice_params->num_ref_idx_l1_active_minus1 + 1,
+ VE_DEC_H265_SRAM_OFFSET_PRED_WEIGHT_LUMA_L1,
+ VE_DEC_H265_SRAM_OFFSET_PRED_WEIGHT_CHROMA_L1);
+ }
+
+ if (ctx->bit_depth > 8) {
+ unsigned int stride = ALIGN(ctx->dst_fmt.width / 4, 32);
+
+ reg = ctx->dst_fmt.sizeimage -
+ cedrus_h265_2bit_size(ctx->dst_fmt.width,
+ ctx->dst_fmt.height);
+ cedrus_write(dev, VE_DEC_H265_OFFSET_ADDR_FIRST_OUT, reg);
+
+ reg = VE_DEC_H265_10BIT_CONFIGURE_FIRST_2BIT_STRIDE(stride);
+ cedrus_write(dev, VE_DEC_H265_10BIT_CONFIGURE, reg);
+ }
+
+ /* Enable appropriate interruptions. */
+ cedrus_write(dev, VE_DEC_H265_CTRL, VE_DEC_H265_CTRL_IRQ_MASK);
+
+ return 0;
+}
+
+static int cedrus_h265_start(struct cedrus_ctx *ctx)
+{
+ struct cedrus_dev *dev = ctx->dev;
+
+ /* Buffer is never accessed by CPU, so we can skip kernel mapping. */
+ ctx->codec.h265.neighbor_info_buf =
+ dma_alloc_attrs(dev->dev, CEDRUS_H265_NEIGHBOR_INFO_BUF_SIZE,
+ &ctx->codec.h265.neighbor_info_buf_addr,
+ GFP_KERNEL, DMA_ATTR_NO_KERNEL_MAPPING);
+ if (!ctx->codec.h265.neighbor_info_buf)
+ return -ENOMEM;
+
+ ctx->codec.h265.entry_points_buf =
+ dma_alloc_coherent(dev->dev, CEDRUS_H265_ENTRY_POINTS_BUF_SIZE,
+ &ctx->codec.h265.entry_points_buf_addr,
+ GFP_KERNEL);
+ if (!ctx->codec.h265.entry_points_buf) {
+ dma_free_attrs(dev->dev, CEDRUS_H265_NEIGHBOR_INFO_BUF_SIZE,
+ ctx->codec.h265.neighbor_info_buf,
+ ctx->codec.h265.neighbor_info_buf_addr,
+ DMA_ATTR_NO_KERNEL_MAPPING);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void cedrus_h265_stop(struct cedrus_ctx *ctx)
+{
+ struct cedrus_dev *dev = ctx->dev;
+ struct cedrus_buffer *buf;
+ struct vb2_queue *vq;
+ unsigned int i;
+
+ vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
+
+ for (i = 0; i < vb2_get_num_buffers(vq); i++) {
+ struct vb2_buffer *vb = vb2_get_buffer(vq, i);
+
+ if (!vb)
+ continue;
+
+ buf = vb2_to_cedrus_buffer(vb);
+
+ if (buf->codec.h265.mv_col_buf_size > 0) {
+ dma_free_attrs(dev->dev,
+ buf->codec.h265.mv_col_buf_size,
+ buf->codec.h265.mv_col_buf,
+ buf->codec.h265.mv_col_buf_dma,
+ DMA_ATTR_NO_KERNEL_MAPPING);
+
+ buf->codec.h265.mv_col_buf_size = 0;
+ }
+ }
+
+ dma_free_attrs(dev->dev, CEDRUS_H265_NEIGHBOR_INFO_BUF_SIZE,
+ ctx->codec.h265.neighbor_info_buf,
+ ctx->codec.h265.neighbor_info_buf_addr,
+ DMA_ATTR_NO_KERNEL_MAPPING);
+ dma_free_coherent(dev->dev, CEDRUS_H265_ENTRY_POINTS_BUF_SIZE,
+ ctx->codec.h265.entry_points_buf,
+ ctx->codec.h265.entry_points_buf_addr);
+}
+
+static void cedrus_h265_trigger(struct cedrus_ctx *ctx)
+{
+ struct cedrus_dev *dev = ctx->dev;
+
+ cedrus_write(dev, VE_DEC_H265_TRIGGER, VE_DEC_H265_TRIGGER_DEC_SLICE);
+}
+
+static unsigned int cedrus_h265_extra_cap_size(struct cedrus_ctx *ctx,
+ struct v4l2_pix_format *pix_fmt)
+{
+ if (ctx->bit_depth > 8)
+ return cedrus_h265_2bit_size(pix_fmt->width, pix_fmt->height);
+
+ return 0;
+}
+
+struct cedrus_dec_ops cedrus_dec_ops_h265 = {
+ .irq_clear = cedrus_h265_irq_clear,
+ .irq_disable = cedrus_h265_irq_disable,
+ .irq_status = cedrus_h265_irq_status,
+ .setup = cedrus_h265_setup,
+ .start = cedrus_h265_start,
+ .stop = cedrus_h265_stop,
+ .trigger = cedrus_h265_trigger,
+ .extra_cap_size = cedrus_h265_extra_cap_size,
+};
diff --git a/drivers/staging/media/sunxi/cedrus/cedrus_hw.c b/drivers/staging/media/sunxi/cedrus/cedrus_hw.c
new file mode 100644
index 000000000000..444fb53878d1
--- /dev/null
+++ b/drivers/staging/media/sunxi/cedrus/cedrus_hw.c
@@ -0,0 +1,358 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Cedrus VPU driver
+ *
+ * Copyright (C) 2016 Florent Revest <florent.revest@free-electrons.com>
+ * Copyright (C) 2018 Paul Kocialkowski <paul.kocialkowski@bootlin.com>
+ * Copyright (C) 2018 Bootlin
+ *
+ * Based on the vim2m driver, that is:
+ *
+ * Copyright (c) 2009-2010 Samsung Electronics Co., Ltd.
+ * Pawel Osciak, <pawel@osciak.com>
+ * Marek Szyprowski, <m.szyprowski@samsung.com>
+ */
+
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/of_reserved_mem.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/clk.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
+#include <linux/soc/sunxi/sunxi_sram.h>
+
+#include <media/videobuf2-core.h>
+#include <media/v4l2-mem2mem.h>
+
+#include "cedrus.h"
+#include "cedrus_hw.h"
+#include "cedrus_regs.h"
+
+int cedrus_engine_enable(struct cedrus_ctx *ctx)
+{
+ u32 reg = 0;
+
+ /*
+ * FIXME: This is only valid on 32-bits DDR's, we should test
+ * it on the A13/A33.
+ */
+ reg |= VE_MODE_REC_WR_MODE_2MB;
+ reg |= VE_MODE_DDR_MODE_BW_128;
+
+ switch (ctx->src_fmt.pixelformat) {
+ case V4L2_PIX_FMT_MPEG2_SLICE:
+ reg |= VE_MODE_DEC_MPEG;
+ break;
+
+ /* H.264 and VP8 both use the same decoding mode bit. */
+ case V4L2_PIX_FMT_H264_SLICE:
+ case V4L2_PIX_FMT_VP8_FRAME:
+ reg |= VE_MODE_DEC_H264;
+ break;
+
+ case V4L2_PIX_FMT_HEVC_SLICE:
+ reg |= VE_MODE_DEC_H265;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ if (ctx->src_fmt.width == 4096)
+ reg |= VE_MODE_PIC_WIDTH_IS_4096;
+ if (ctx->src_fmt.width > 2048)
+ reg |= VE_MODE_PIC_WIDTH_MORE_2048;
+
+ cedrus_write(ctx->dev, VE_MODE, reg);
+
+ return 0;
+}
+
+void cedrus_engine_disable(struct cedrus_dev *dev)
+{
+ cedrus_write(dev, VE_MODE, VE_MODE_DISABLED);
+}
+
+void cedrus_dst_format_set(struct cedrus_dev *dev,
+ struct v4l2_pix_format *fmt)
+{
+ unsigned int width = fmt->width;
+ unsigned int height = fmt->height;
+ u32 chroma_size;
+ u32 reg;
+
+ switch (fmt->pixelformat) {
+ case V4L2_PIX_FMT_NV12:
+ case V4L2_PIX_FMT_NV21:
+ case V4L2_PIX_FMT_YUV420:
+ case V4L2_PIX_FMT_YVU420:
+ chroma_size = ALIGN(width, 16) * ALIGN(height, 16) / 2;
+
+ switch (fmt->pixelformat) {
+ case V4L2_PIX_FMT_NV12:
+ reg = VE_PRIMARY_OUT_FMT_NV12;
+ break;
+ case V4L2_PIX_FMT_NV21:
+ reg = VE_PRIMARY_OUT_FMT_NV21;
+ break;
+ case V4L2_PIX_FMT_YUV420:
+ reg = VE_PRIMARY_OUT_FMT_YU12;
+ break;
+ case V4L2_PIX_FMT_YVU420:
+ default:
+ reg = VE_PRIMARY_OUT_FMT_YV12;
+ break;
+ }
+ cedrus_write(dev, VE_PRIMARY_OUT_FMT, reg);
+
+ reg = chroma_size / 2;
+ cedrus_write(dev, VE_PRIMARY_CHROMA_BUF_LEN, reg);
+
+ reg = VE_PRIMARY_FB_LINE_STRIDE_LUMA(ALIGN(width, 16)) |
+ VE_PRIMARY_FB_LINE_STRIDE_CHROMA(ALIGN(width, 16) / 2);
+ cedrus_write(dev, VE_PRIMARY_FB_LINE_STRIDE, reg);
+
+ break;
+ case V4L2_PIX_FMT_NV12_32L32:
+ default:
+ reg = VE_PRIMARY_OUT_FMT_TILED_32_NV12;
+ cedrus_write(dev, VE_PRIMARY_OUT_FMT, reg);
+
+ reg = VE_SECONDARY_OUT_FMT_TILED_32_NV12;
+ cedrus_write(dev, VE_CHROMA_BUF_LEN, reg);
+
+ break;
+ }
+}
+
+static irqreturn_t cedrus_irq(int irq, void *data)
+{
+ struct cedrus_dev *dev = data;
+ struct cedrus_ctx *ctx;
+ enum vb2_buffer_state state;
+ enum cedrus_irq_status status;
+
+ /*
+ * If cancel_delayed_work returns false it means watchdog already
+ * executed and finished the job.
+ */
+ if (!cancel_delayed_work(&dev->watchdog_work))
+ return IRQ_HANDLED;
+
+ ctx = v4l2_m2m_get_curr_priv(dev->m2m_dev);
+ if (!ctx) {
+ v4l2_err(&dev->v4l2_dev,
+ "Instance released before the end of transaction\n");
+ return IRQ_NONE;
+ }
+
+ status = ctx->current_codec->irq_status(ctx);
+ if (status == CEDRUS_IRQ_NONE)
+ return IRQ_NONE;
+
+ ctx->current_codec->irq_disable(ctx);
+ ctx->current_codec->irq_clear(ctx);
+
+ if (status == CEDRUS_IRQ_ERROR)
+ state = VB2_BUF_STATE_ERROR;
+ else
+ state = VB2_BUF_STATE_DONE;
+
+ v4l2_m2m_buf_done_and_job_finish(ctx->dev->m2m_dev, ctx->fh.m2m_ctx,
+ state);
+
+ return IRQ_HANDLED;
+}
+
+void cedrus_watchdog(struct work_struct *work)
+{
+ struct cedrus_dev *dev;
+ struct cedrus_ctx *ctx;
+
+ dev = container_of(to_delayed_work(work),
+ struct cedrus_dev, watchdog_work);
+
+ ctx = v4l2_m2m_get_curr_priv(dev->m2m_dev);
+ if (!ctx)
+ return;
+
+ v4l2_err(&dev->v4l2_dev, "frame processing timed out!\n");
+ reset_control_reset(dev->rstc);
+ v4l2_m2m_buf_done_and_job_finish(ctx->dev->m2m_dev, ctx->fh.m2m_ctx,
+ VB2_BUF_STATE_ERROR);
+}
+
+int cedrus_hw_suspend(struct device *device)
+{
+ struct cedrus_dev *dev = dev_get_drvdata(device);
+
+ clk_disable_unprepare(dev->ram_clk);
+ clk_disable_unprepare(dev->mod_clk);
+ clk_disable_unprepare(dev->ahb_clk);
+
+ reset_control_assert(dev->rstc);
+
+ return 0;
+}
+
+int cedrus_hw_resume(struct device *device)
+{
+ struct cedrus_dev *dev = dev_get_drvdata(device);
+ int ret;
+
+ ret = reset_control_reset(dev->rstc);
+ if (ret) {
+ dev_err(dev->dev, "Failed to apply reset\n");
+
+ return ret;
+ }
+
+ ret = clk_prepare_enable(dev->ahb_clk);
+ if (ret) {
+ dev_err(dev->dev, "Failed to enable AHB clock\n");
+
+ goto err_rst;
+ }
+
+ ret = clk_prepare_enable(dev->mod_clk);
+ if (ret) {
+ dev_err(dev->dev, "Failed to enable MOD clock\n");
+
+ goto err_ahb_clk;
+ }
+
+ ret = clk_prepare_enable(dev->ram_clk);
+ if (ret) {
+ dev_err(dev->dev, "Failed to enable RAM clock\n");
+
+ goto err_mod_clk;
+ }
+
+ return 0;
+
+err_mod_clk:
+ clk_disable_unprepare(dev->mod_clk);
+err_ahb_clk:
+ clk_disable_unprepare(dev->ahb_clk);
+err_rst:
+ reset_control_assert(dev->rstc);
+
+ return ret;
+}
+
+int cedrus_hw_probe(struct cedrus_dev *dev)
+{
+ const struct cedrus_variant *variant;
+ int irq_dec;
+ int ret;
+
+ variant = of_device_get_match_data(dev->dev);
+ if (!variant)
+ return -EINVAL;
+
+ dev->capabilities = variant->capabilities;
+
+ irq_dec = platform_get_irq(dev->pdev, 0);
+ if (irq_dec <= 0)
+ return irq_dec;
+ ret = devm_request_irq(dev->dev, irq_dec, cedrus_irq,
+ 0, dev_name(dev->dev), dev);
+ if (ret) {
+ dev_err(dev->dev, "Failed to request IRQ\n");
+
+ return ret;
+ }
+
+ ret = of_reserved_mem_device_init(dev->dev);
+ if (ret && ret != -ENODEV) {
+ dev_err(dev->dev, "Failed to reserve memory\n");
+
+ return ret;
+ }
+
+ ret = sunxi_sram_claim(dev->dev);
+ if (ret) {
+ dev_err(dev->dev, "Failed to claim SRAM\n");
+
+ goto err_mem;
+ }
+
+ dev->ahb_clk = devm_clk_get(dev->dev, "ahb");
+ if (IS_ERR(dev->ahb_clk)) {
+ dev_err(dev->dev, "Failed to get AHB clock\n");
+
+ ret = PTR_ERR(dev->ahb_clk);
+ goto err_sram;
+ }
+
+ dev->mod_clk = devm_clk_get(dev->dev, "mod");
+ if (IS_ERR(dev->mod_clk)) {
+ dev_err(dev->dev, "Failed to get MOD clock\n");
+
+ ret = PTR_ERR(dev->mod_clk);
+ goto err_sram;
+ }
+
+ dev->ram_clk = devm_clk_get(dev->dev, "ram");
+ if (IS_ERR(dev->ram_clk)) {
+ dev_err(dev->dev, "Failed to get RAM clock\n");
+
+ ret = PTR_ERR(dev->ram_clk);
+ goto err_sram;
+ }
+
+ dev->rstc = devm_reset_control_get(dev->dev, NULL);
+ if (IS_ERR(dev->rstc)) {
+ dev_err(dev->dev, "Failed to get reset control\n");
+
+ ret = PTR_ERR(dev->rstc);
+ goto err_sram;
+ }
+
+ dev->base = devm_platform_ioremap_resource(dev->pdev, 0);
+ if (IS_ERR(dev->base)) {
+ dev_err(dev->dev, "Failed to map registers\n");
+
+ ret = PTR_ERR(dev->base);
+ goto err_sram;
+ }
+
+ ret = clk_set_rate(dev->mod_clk, variant->mod_rate);
+ if (ret) {
+ dev_err(dev->dev, "Failed to set clock rate\n");
+
+ goto err_sram;
+ }
+
+ pm_runtime_enable(dev->dev);
+ if (!pm_runtime_enabled(dev->dev)) {
+ ret = cedrus_hw_resume(dev->dev);
+ if (ret)
+ goto err_pm;
+ }
+
+ return 0;
+
+err_pm:
+ pm_runtime_disable(dev->dev);
+err_sram:
+ sunxi_sram_release(dev->dev);
+err_mem:
+ of_reserved_mem_device_release(dev->dev);
+
+ return ret;
+}
+
+void cedrus_hw_remove(struct cedrus_dev *dev)
+{
+ pm_runtime_disable(dev->dev);
+ if (!pm_runtime_status_suspended(dev->dev))
+ cedrus_hw_suspend(dev->dev);
+
+ sunxi_sram_release(dev->dev);
+
+ of_reserved_mem_device_release(dev->dev);
+}
diff --git a/drivers/staging/media/sunxi/cedrus/cedrus_hw.h b/drivers/staging/media/sunxi/cedrus/cedrus_hw.h
new file mode 100644
index 000000000000..6f1e701b1ea8
--- /dev/null
+++ b/drivers/staging/media/sunxi/cedrus/cedrus_hw.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Cedrus VPU driver
+ *
+ * Copyright (C) 2016 Florent Revest <florent.revest@free-electrons.com>
+ * Copyright (C) 2018 Paul Kocialkowski <paul.kocialkowski@bootlin.com>
+ * Copyright (C) 2018 Bootlin
+ *
+ * Based on the vim2m driver, that is:
+ *
+ * Copyright (c) 2009-2010 Samsung Electronics Co., Ltd.
+ * Pawel Osciak, <pawel@osciak.com>
+ * Marek Szyprowski, <m.szyprowski@samsung.com>
+ */
+
+#ifndef _CEDRUS_HW_H_
+#define _CEDRUS_HW_H_
+
+int cedrus_engine_enable(struct cedrus_ctx *ctx);
+void cedrus_engine_disable(struct cedrus_dev *dev);
+
+void cedrus_dst_format_set(struct cedrus_dev *dev,
+ struct v4l2_pix_format *fmt);
+
+int cedrus_hw_suspend(struct device *device);
+int cedrus_hw_resume(struct device *device);
+
+int cedrus_hw_probe(struct cedrus_dev *dev);
+void cedrus_hw_remove(struct cedrus_dev *dev);
+
+void cedrus_watchdog(struct work_struct *work);
+
+#endif
diff --git a/drivers/staging/media/sunxi/cedrus/cedrus_mpeg2.c b/drivers/staging/media/sunxi/cedrus/cedrus_mpeg2.c
new file mode 100644
index 000000000000..10e98f08aafc
--- /dev/null
+++ b/drivers/staging/media/sunxi/cedrus/cedrus_mpeg2.c
@@ -0,0 +1,198 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Cedrus VPU driver
+ *
+ * Copyright (C) 2016 Florent Revest <florent.revest@free-electrons.com>
+ * Copyright (C) 2018 Paul Kocialkowski <paul.kocialkowski@bootlin.com>
+ * Copyright (C) 2018 Bootlin
+ */
+
+#include <media/videobuf2-dma-contig.h>
+
+#include "cedrus.h"
+#include "cedrus_hw.h"
+#include "cedrus_regs.h"
+
+static enum cedrus_irq_status cedrus_mpeg2_irq_status(struct cedrus_ctx *ctx)
+{
+ struct cedrus_dev *dev = ctx->dev;
+ u32 reg;
+
+ reg = cedrus_read(dev, VE_DEC_MPEG_STATUS);
+ reg &= VE_DEC_MPEG_STATUS_CHECK_MASK;
+
+ if (!reg)
+ return CEDRUS_IRQ_NONE;
+
+ if (reg & VE_DEC_MPEG_STATUS_CHECK_ERROR ||
+ !(reg & VE_DEC_MPEG_STATUS_SUCCESS))
+ return CEDRUS_IRQ_ERROR;
+
+ return CEDRUS_IRQ_OK;
+}
+
+static void cedrus_mpeg2_irq_clear(struct cedrus_ctx *ctx)
+{
+ struct cedrus_dev *dev = ctx->dev;
+
+ cedrus_write(dev, VE_DEC_MPEG_STATUS, VE_DEC_MPEG_STATUS_CHECK_MASK);
+}
+
+static void cedrus_mpeg2_irq_disable(struct cedrus_ctx *ctx)
+{
+ struct cedrus_dev *dev = ctx->dev;
+ u32 reg = cedrus_read(dev, VE_DEC_MPEG_CTRL);
+
+ reg &= ~VE_DEC_MPEG_CTRL_IRQ_MASK;
+
+ cedrus_write(dev, VE_DEC_MPEG_CTRL, reg);
+}
+
+static int cedrus_mpeg2_setup(struct cedrus_ctx *ctx, struct cedrus_run *run)
+{
+ const struct v4l2_ctrl_mpeg2_sequence *seq;
+ const struct v4l2_ctrl_mpeg2_picture *pic;
+ const struct v4l2_ctrl_mpeg2_quantisation *quantisation;
+ dma_addr_t src_buf_addr, dst_luma_addr, dst_chroma_addr;
+ struct cedrus_dev *dev = ctx->dev;
+ struct vb2_queue *vq;
+ const u8 *matrix;
+ unsigned int i;
+ u32 reg;
+
+ seq = run->mpeg2.sequence;
+ pic = run->mpeg2.picture;
+
+ quantisation = run->mpeg2.quantisation;
+
+ /* Activate MPEG engine. */
+ cedrus_engine_enable(ctx);
+
+ /* Set intra quantisation matrix. */
+ matrix = quantisation->intra_quantiser_matrix;
+ for (i = 0; i < 64; i++) {
+ reg = VE_DEC_MPEG_IQMINPUT_WEIGHT(i, matrix[i]);
+ reg |= VE_DEC_MPEG_IQMINPUT_FLAG_INTRA;
+
+ cedrus_write(dev, VE_DEC_MPEG_IQMINPUT, reg);
+ }
+
+ /* Set non-intra quantisation matrix. */
+ matrix = quantisation->non_intra_quantiser_matrix;
+ for (i = 0; i < 64; i++) {
+ reg = VE_DEC_MPEG_IQMINPUT_WEIGHT(i, matrix[i]);
+ reg |= VE_DEC_MPEG_IQMINPUT_FLAG_NON_INTRA;
+
+ cedrus_write(dev, VE_DEC_MPEG_IQMINPUT, reg);
+ }
+
+ /* Set MPEG picture header. */
+
+ reg = VE_DEC_MPEG_MP12HDR_SLICE_TYPE(pic->picture_coding_type);
+ reg |= VE_DEC_MPEG_MP12HDR_F_CODE(0, 0, pic->f_code[0][0]);
+ reg |= VE_DEC_MPEG_MP12HDR_F_CODE(0, 1, pic->f_code[0][1]);
+ reg |= VE_DEC_MPEG_MP12HDR_F_CODE(1, 0, pic->f_code[1][0]);
+ reg |= VE_DEC_MPEG_MP12HDR_F_CODE(1, 1, pic->f_code[1][1]);
+ reg |= VE_DEC_MPEG_MP12HDR_INTRA_DC_PRECISION(pic->intra_dc_precision);
+ reg |= VE_DEC_MPEG_MP12HDR_INTRA_PICTURE_STRUCTURE(pic->picture_structure);
+ reg |= VE_DEC_MPEG_MP12HDR_TOP_FIELD_FIRST(pic->flags & V4L2_MPEG2_PIC_FLAG_TOP_FIELD_FIRST);
+ reg |= VE_DEC_MPEG_MP12HDR_FRAME_PRED_FRAME_DCT(pic->flags & V4L2_MPEG2_PIC_FLAG_FRAME_PRED_DCT);
+ reg |= VE_DEC_MPEG_MP12HDR_CONCEALMENT_MOTION_VECTORS(pic->flags & V4L2_MPEG2_PIC_FLAG_CONCEALMENT_MV);
+ reg |= VE_DEC_MPEG_MP12HDR_Q_SCALE_TYPE(pic->flags & V4L2_MPEG2_PIC_FLAG_Q_SCALE_TYPE);
+ reg |= VE_DEC_MPEG_MP12HDR_INTRA_VLC_FORMAT(pic->flags & V4L2_MPEG2_PIC_FLAG_INTRA_VLC);
+ reg |= VE_DEC_MPEG_MP12HDR_ALTERNATE_SCAN(pic->flags & V4L2_MPEG2_PIC_FLAG_ALT_SCAN);
+ reg |= VE_DEC_MPEG_MP12HDR_FULL_PEL_FORWARD_VECTOR(0);
+ reg |= VE_DEC_MPEG_MP12HDR_FULL_PEL_BACKWARD_VECTOR(0);
+
+ cedrus_write(dev, VE_DEC_MPEG_MP12HDR, reg);
+
+ /* Set frame dimensions. */
+
+ reg = VE_DEC_MPEG_PICCODEDSIZE_WIDTH(seq->horizontal_size);
+ reg |= VE_DEC_MPEG_PICCODEDSIZE_HEIGHT(seq->vertical_size);
+
+ cedrus_write(dev, VE_DEC_MPEG_PICCODEDSIZE, reg);
+
+ reg = VE_DEC_MPEG_PICBOUNDSIZE_WIDTH(ctx->src_fmt.width);
+ reg |= VE_DEC_MPEG_PICBOUNDSIZE_HEIGHT(ctx->src_fmt.height);
+
+ cedrus_write(dev, VE_DEC_MPEG_PICBOUNDSIZE, reg);
+
+ /* Forward and backward prediction reference buffers. */
+ vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
+
+ cedrus_write_ref_buf_addr(ctx, vq, pic->forward_ref_ts,
+ VE_DEC_MPEG_FWD_REF_LUMA_ADDR,
+ VE_DEC_MPEG_FWD_REF_CHROMA_ADDR);
+ cedrus_write_ref_buf_addr(ctx, vq, pic->backward_ref_ts,
+ VE_DEC_MPEG_BWD_REF_LUMA_ADDR,
+ VE_DEC_MPEG_BWD_REF_CHROMA_ADDR);
+
+ /* Destination luma and chroma buffers. */
+
+ dst_luma_addr = cedrus_dst_buf_addr(ctx, &run->dst->vb2_buf, 0);
+ dst_chroma_addr = cedrus_dst_buf_addr(ctx, &run->dst->vb2_buf, 1);
+
+ cedrus_write(dev, VE_DEC_MPEG_REC_LUMA, dst_luma_addr);
+ cedrus_write(dev, VE_DEC_MPEG_REC_CHROMA, dst_chroma_addr);
+
+ /* Source offset and length in bits. */
+
+ cedrus_write(dev, VE_DEC_MPEG_VLD_OFFSET, 0);
+
+ reg = vb2_get_plane_payload(&run->src->vb2_buf, 0) * 8;
+ cedrus_write(dev, VE_DEC_MPEG_VLD_LEN, reg);
+
+ /* Source beginning and end addresses. */
+
+ src_buf_addr = vb2_dma_contig_plane_dma_addr(&run->src->vb2_buf, 0);
+
+ reg = VE_DEC_MPEG_VLD_ADDR_BASE(src_buf_addr);
+ reg |= VE_DEC_MPEG_VLD_ADDR_VALID_PIC_DATA;
+ reg |= VE_DEC_MPEG_VLD_ADDR_LAST_PIC_DATA;
+ reg |= VE_DEC_MPEG_VLD_ADDR_FIRST_PIC_DATA;
+
+ cedrus_write(dev, VE_DEC_MPEG_VLD_ADDR, reg);
+
+ reg = src_buf_addr + vb2_get_plane_payload(&run->src->vb2_buf, 0);
+ cedrus_write(dev, VE_DEC_MPEG_VLD_END_ADDR, reg);
+
+ /* Macroblock address: start at the beginning. */
+ reg = VE_DEC_MPEG_MBADDR_Y(0) | VE_DEC_MPEG_MBADDR_X(0);
+ cedrus_write(dev, VE_DEC_MPEG_MBADDR, reg);
+
+ /* Clear previous errors. */
+ cedrus_write(dev, VE_DEC_MPEG_ERROR, 0);
+
+ /* Clear correct macroblocks register. */
+ cedrus_write(dev, VE_DEC_MPEG_CRTMBADDR, 0);
+
+ /* Enable appropriate interruptions and components. */
+
+ reg = VE_DEC_MPEG_CTRL_IRQ_MASK | VE_DEC_MPEG_CTRL_MC_NO_WRITEBACK |
+ VE_DEC_MPEG_CTRL_MC_CACHE_EN;
+
+ cedrus_write(dev, VE_DEC_MPEG_CTRL, reg);
+
+ return 0;
+}
+
+static void cedrus_mpeg2_trigger(struct cedrus_ctx *ctx)
+{
+ struct cedrus_dev *dev = ctx->dev;
+ u32 reg;
+
+ /* Trigger MPEG engine. */
+ reg = VE_DEC_MPEG_TRIGGER_HW_MPEG_VLD | VE_DEC_MPEG_TRIGGER_MPEG2 |
+ VE_DEC_MPEG_TRIGGER_MB_BOUNDARY;
+
+ cedrus_write(dev, VE_DEC_MPEG_TRIGGER, reg);
+}
+
+struct cedrus_dec_ops cedrus_dec_ops_mpeg2 = {
+ .irq_clear = cedrus_mpeg2_irq_clear,
+ .irq_disable = cedrus_mpeg2_irq_disable,
+ .irq_status = cedrus_mpeg2_irq_status,
+ .setup = cedrus_mpeg2_setup,
+ .trigger = cedrus_mpeg2_trigger,
+};
diff --git a/drivers/staging/media/sunxi/cedrus/cedrus_regs.h b/drivers/staging/media/sunxi/cedrus/cedrus_regs.h
new file mode 100644
index 000000000000..05e6cbc548ab
--- /dev/null
+++ b/drivers/staging/media/sunxi/cedrus/cedrus_regs.h
@@ -0,0 +1,717 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Cedrus VPU driver
+ *
+ * Copyright (c) 2013-2016 Jens Kuske <jenskuske@gmail.com>
+ * Copyright (C) 2016 Florent Revest <florent.revest@free-electrons.com>
+ * Copyright (C) 2018 Paul Kocialkowski <paul.kocialkowski@bootlin.com>
+ */
+
+#ifndef _CEDRUS_REGS_H_
+#define _CEDRUS_REGS_H_
+
+#define SHIFT_AND_MASK_BITS(v, h, l) \
+ (((unsigned long)(v) << (l)) & GENMASK(h, l))
+
+/*
+ * Common acronyms and contractions used in register descriptions:
+ * * VLD : Variable-Length Decoder
+ * * IQ: Inverse Quantization
+ * * IDCT: Inverse Discrete Cosine Transform
+ * * MC: Motion Compensation
+ * * STCD: Start Code Detect
+ * * SDRT: Scale Down and Rotate
+ * * WB: Writeback
+ * * BITS/BS: Bitstream
+ * * MB: Macroblock
+ * * CTU: Coding Tree Unit
+ * * CTB: Coding Tree Block
+ * * IDX: Index
+ */
+
+#define VE_ENGINE_DEC_MPEG 0x100
+#define VE_ENGINE_DEC_H264 0x200
+#define VE_ENGINE_DEC_H265 0x500
+
+#define VE_MODE 0x00
+
+#define VE_MODE_PIC_WIDTH_IS_4096 BIT(22)
+#define VE_MODE_PIC_WIDTH_MORE_2048 BIT(21)
+#define VE_MODE_REC_WR_MODE_2MB (0x01 << 20)
+#define VE_MODE_REC_WR_MODE_1MB (0x00 << 20)
+#define VE_MODE_DDR_MODE_BW_128 (0x03 << 16)
+#define VE_MODE_DDR_MODE_BW_256 (0x02 << 16)
+#define VE_MODE_DISABLED (0x07 << 0)
+#define VE_MODE_DEC_H265 (0x04 << 0)
+#define VE_MODE_DEC_H264 (0x01 << 0)
+#define VE_MODE_DEC_MPEG (0x00 << 0)
+
+#define VE_BUF_CTRL 0x50
+
+#define VE_BUF_CTRL_INTRAPRED_EXT_RAM (0x02 << 2)
+#define VE_BUF_CTRL_INTRAPRED_MIXED_RAM (0x01 << 2)
+#define VE_BUF_CTRL_INTRAPRED_INT_SRAM (0x00 << 2)
+#define VE_BUF_CTRL_DBLK_EXT_RAM (0x02 << 0)
+#define VE_BUF_CTRL_DBLK_MIXED_RAM (0x01 << 0)
+#define VE_BUF_CTRL_DBLK_INT_SRAM (0x00 << 0)
+
+#define VE_DBLK_DRAM_BUF_ADDR 0x54
+#define VE_INTRAPRED_DRAM_BUF_ADDR 0x58
+#define VE_PRIMARY_CHROMA_BUF_LEN 0xc4
+#define VE_PRIMARY_FB_LINE_STRIDE 0xc8
+
+#define VE_PRIMARY_FB_LINE_STRIDE_CHROMA(s) SHIFT_AND_MASK_BITS(s, 31, 16)
+#define VE_PRIMARY_FB_LINE_STRIDE_LUMA(s) SHIFT_AND_MASK_BITS(s, 15, 0)
+
+#define VE_CHROMA_BUF_LEN 0xe8
+
+#define VE_SECONDARY_OUT_FMT_TILED_32_NV12 (0x00 << 30)
+#define VE_SECONDARY_OUT_FMT_EXT (0x01 << 30)
+#define VE_SECONDARY_OUT_FMT_YU12 (0x02 << 30)
+#define VE_SECONDARY_OUT_FMT_YV12 (0x03 << 30)
+#define VE_CHROMA_BUF_LEN_SDRT(l) SHIFT_AND_MASK_BITS(l, 27, 0)
+
+#define VE_PRIMARY_OUT_FMT 0xec
+
+#define VE_PRIMARY_OUT_FMT_TILED_32_NV12 (0x00 << 4)
+#define VE_PRIMARY_OUT_FMT_TILED_128_NV12 (0x01 << 4)
+#define VE_PRIMARY_OUT_FMT_YU12 (0x02 << 4)
+#define VE_PRIMARY_OUT_FMT_YV12 (0x03 << 4)
+#define VE_PRIMARY_OUT_FMT_NV12 (0x04 << 4)
+#define VE_PRIMARY_OUT_FMT_NV21 (0x05 << 4)
+#define VE_SECONDARY_OUT_FMT_EXT_TILED_32_NV12 (0x00 << 0)
+#define VE_SECONDARY_OUT_FMT_EXT_TILED_128_NV12 (0x01 << 0)
+#define VE_SECONDARY_OUT_FMT_EXT_YU12 (0x02 << 0)
+#define VE_SECONDARY_OUT_FMT_EXT_YV12 (0x03 << 0)
+#define VE_SECONDARY_OUT_FMT_EXT_NV12 (0x04 << 0)
+#define VE_SECONDARY_OUT_FMT_EXT_NV21 (0x05 << 0)
+
+#define VE_VERSION 0xf0
+
+#define VE_VERSION_SHIFT 16
+
+#define VE_DEC_MPEG_MP12HDR (VE_ENGINE_DEC_MPEG + 0x00)
+
+#define VE_DEC_MPEG_MP12HDR_SLICE_TYPE(t) SHIFT_AND_MASK_BITS(t, 30, 28)
+#define VE_DEC_MPEG_MP12HDR_F_CODE_SHIFT(x, y) (24 - 4 * (y) - 8 * (x))
+#define VE_DEC_MPEG_MP12HDR_F_CODE(__x, __y, __v) \
+ (((unsigned long)(__v) & GENMASK(3, 0)) << VE_DEC_MPEG_MP12HDR_F_CODE_SHIFT(__x, __y))
+
+#define VE_DEC_MPEG_MP12HDR_INTRA_DC_PRECISION(p) \
+ SHIFT_AND_MASK_BITS(p, 11, 10)
+#define VE_DEC_MPEG_MP12HDR_INTRA_PICTURE_STRUCTURE(s) \
+ SHIFT_AND_MASK_BITS(s, 9, 8)
+#define VE_DEC_MPEG_MP12HDR_TOP_FIELD_FIRST(v) \
+ ((v) ? BIT(7) : 0)
+#define VE_DEC_MPEG_MP12HDR_FRAME_PRED_FRAME_DCT(v) \
+ ((v) ? BIT(6) : 0)
+#define VE_DEC_MPEG_MP12HDR_CONCEALMENT_MOTION_VECTORS(v) \
+ ((v) ? BIT(5) : 0)
+#define VE_DEC_MPEG_MP12HDR_Q_SCALE_TYPE(v) \
+ ((v) ? BIT(4) : 0)
+#define VE_DEC_MPEG_MP12HDR_INTRA_VLC_FORMAT(v) \
+ ((v) ? BIT(3) : 0)
+#define VE_DEC_MPEG_MP12HDR_ALTERNATE_SCAN(v) \
+ ((v) ? BIT(2) : 0)
+#define VE_DEC_MPEG_MP12HDR_FULL_PEL_FORWARD_VECTOR(v) \
+ ((v) ? BIT(1) : 0)
+#define VE_DEC_MPEG_MP12HDR_FULL_PEL_BACKWARD_VECTOR(v) \
+ ((v) ? BIT(0) : 0)
+
+#define VE_DEC_MPEG_PICCODEDSIZE (VE_ENGINE_DEC_MPEG + 0x08)
+
+#define VE_DEC_MPEG_PICCODEDSIZE_WIDTH(w) \
+ SHIFT_AND_MASK_BITS(DIV_ROUND_UP(w, 16), 15, 8)
+#define VE_DEC_MPEG_PICCODEDSIZE_HEIGHT(h) \
+ SHIFT_AND_MASK_BITS(DIV_ROUND_UP(h, 16), 7, 0)
+
+#define VE_DEC_MPEG_PICBOUNDSIZE (VE_ENGINE_DEC_MPEG + 0x0c)
+
+#define VE_DEC_MPEG_PICBOUNDSIZE_WIDTH(w) SHIFT_AND_MASK_BITS(w, 27, 16)
+#define VE_DEC_MPEG_PICBOUNDSIZE_HEIGHT(h) SHIFT_AND_MASK_BITS(h, 11, 0)
+
+#define VE_DEC_MPEG_MBADDR (VE_ENGINE_DEC_MPEG + 0x10)
+
+#define VE_DEC_MPEG_MBADDR_X(w) SHIFT_AND_MASK_BITS(w, 15, 8)
+#define VE_DEC_MPEG_MBADDR_Y(h) SHIFT_AND_MASK_BITS(h, 7, 0)
+
+#define VE_DEC_MPEG_CTRL (VE_ENGINE_DEC_MPEG + 0x14)
+
+#define VE_DEC_MPEG_CTRL_MC_CACHE_EN BIT(31)
+#define VE_DEC_MPEG_CTRL_SW_VLD BIT(27)
+#define VE_DEC_MPEG_CTRL_SW_IQ_IS BIT(17)
+#define VE_DEC_MPEG_CTRL_QP_AC_DC_OUT_EN BIT(14)
+#define VE_DEC_MPEG_CTRL_ROTATE_SCALE_OUT_EN BIT(8)
+#define VE_DEC_MPEG_CTRL_MC_NO_WRITEBACK BIT(7)
+#define VE_DEC_MPEG_CTRL_ROTATE_IRQ_EN BIT(6)
+#define VE_DEC_MPEG_CTRL_VLD_DATA_REQ_IRQ_EN BIT(5)
+#define VE_DEC_MPEG_CTRL_ERROR_IRQ_EN BIT(4)
+#define VE_DEC_MPEG_CTRL_FINISH_IRQ_EN BIT(3)
+#define VE_DEC_MPEG_CTRL_IRQ_MASK \
+ (VE_DEC_MPEG_CTRL_FINISH_IRQ_EN | VE_DEC_MPEG_CTRL_ERROR_IRQ_EN | \
+ VE_DEC_MPEG_CTRL_VLD_DATA_REQ_IRQ_EN)
+
+#define VE_DEC_MPEG_TRIGGER (VE_ENGINE_DEC_MPEG + 0x18)
+
+#define VE_DEC_MPEG_TRIGGER_MB_BOUNDARY BIT(31)
+
+#define VE_DEC_MPEG_TRIGGER_CHROMA_FMT_420 (0x00 << 27)
+#define VE_DEC_MPEG_TRIGGER_CHROMA_FMT_411 (0x01 << 27)
+#define VE_DEC_MPEG_TRIGGER_CHROMA_FMT_422 (0x02 << 27)
+#define VE_DEC_MPEG_TRIGGER_CHROMA_FMT_444 (0x03 << 27)
+#define VE_DEC_MPEG_TRIGGER_CHROMA_FMT_422T (0x04 << 27)
+
+#define VE_DEC_MPEG_TRIGGER_MPEG1 (0x01 << 24)
+#define VE_DEC_MPEG_TRIGGER_MPEG2 (0x02 << 24)
+#define VE_DEC_MPEG_TRIGGER_JPEG (0x03 << 24)
+#define VE_DEC_MPEG_TRIGGER_MPEG4 (0x04 << 24)
+#define VE_DEC_MPEG_TRIGGER_VP62 (0x05 << 24)
+
+#define VE_DEC_MPEG_TRIGGER_VP62_AC_GET_BITS BIT(7)
+
+#define VE_DEC_MPEG_TRIGGER_STCD_VC1 (0x02 << 4)
+#define VE_DEC_MPEG_TRIGGER_STCD_MPEG2 (0x01 << 4)
+#define VE_DEC_MPEG_TRIGGER_STCD_AVC (0x00 << 4)
+
+#define VE_DEC_MPEG_TRIGGER_HW_MPEG_VLD (0x0f << 0)
+#define VE_DEC_MPEG_TRIGGER_HW_JPEG_VLD (0x0e << 0)
+#define VE_DEC_MPEG_TRIGGER_HW_MB (0x0d << 0)
+#define VE_DEC_MPEG_TRIGGER_HW_ROTATE (0x0c << 0)
+#define VE_DEC_MPEG_TRIGGER_HW_VP6_VLD (0x0b << 0)
+#define VE_DEC_MPEG_TRIGGER_HW_MAF (0x0a << 0)
+#define VE_DEC_MPEG_TRIGGER_HW_STCD_END (0x09 << 0)
+#define VE_DEC_MPEG_TRIGGER_HW_STCD_BEGIN (0x08 << 0)
+#define VE_DEC_MPEG_TRIGGER_SW_MC (0x07 << 0)
+#define VE_DEC_MPEG_TRIGGER_SW_IQ (0x06 << 0)
+#define VE_DEC_MPEG_TRIGGER_SW_IDCT (0x05 << 0)
+#define VE_DEC_MPEG_TRIGGER_SW_SCALE (0x04 << 0)
+#define VE_DEC_MPEG_TRIGGER_SW_VP6 (0x03 << 0)
+#define VE_DEC_MPEG_TRIGGER_SW_VP62_AC_GET_BITS (0x02 << 0)
+
+#define VE_DEC_MPEG_STATUS (VE_ENGINE_DEC_MPEG + 0x1c)
+
+#define VE_DEC_MPEG_STATUS_START_DETECT_BUSY BIT(27)
+#define VE_DEC_MPEG_STATUS_VP6_BIT BIT(26)
+#define VE_DEC_MPEG_STATUS_VP6_BIT_BUSY BIT(25)
+#define VE_DEC_MPEG_STATUS_MAF_BUSY BIT(23)
+#define VE_DEC_MPEG_STATUS_VP6_MVP_BUSY BIT(22)
+#define VE_DEC_MPEG_STATUS_JPEG_BIT_END BIT(21)
+#define VE_DEC_MPEG_STATUS_JPEG_RESTART_ERROR BIT(20)
+#define VE_DEC_MPEG_STATUS_JPEG_MARKER BIT(19)
+#define VE_DEC_MPEG_STATUS_ROTATE_BUSY BIT(18)
+#define VE_DEC_MPEG_STATUS_DEBLOCKING_BUSY BIT(17)
+#define VE_DEC_MPEG_STATUS_SCALE_DOWN_BUSY BIT(16)
+#define VE_DEC_MPEG_STATUS_IQIS_BUF_EMPTY BIT(15)
+#define VE_DEC_MPEG_STATUS_IDCT_BUF_EMPTY BIT(14)
+#define VE_DEC_MPEG_STATUS_VE_BUSY BIT(13)
+#define VE_DEC_MPEG_STATUS_MC_BUSY BIT(12)
+#define VE_DEC_MPEG_STATUS_IDCT_BUSY BIT(11)
+#define VE_DEC_MPEG_STATUS_IQIS_BUSY BIT(10)
+#define VE_DEC_MPEG_STATUS_DCAC_BUSY BIT(9)
+#define VE_DEC_MPEG_STATUS_VLD_BUSY BIT(8)
+#define VE_DEC_MPEG_STATUS_ROTATE_SUCCESS BIT(3)
+#define VE_DEC_MPEG_STATUS_VLD_DATA_REQ BIT(2)
+#define VE_DEC_MPEG_STATUS_ERROR BIT(1)
+#define VE_DEC_MPEG_STATUS_SUCCESS BIT(0)
+#define VE_DEC_MPEG_STATUS_CHECK_MASK \
+ (VE_DEC_MPEG_STATUS_SUCCESS | VE_DEC_MPEG_STATUS_ERROR | \
+ VE_DEC_MPEG_STATUS_VLD_DATA_REQ)
+#define VE_DEC_MPEG_STATUS_CHECK_ERROR \
+ (VE_DEC_MPEG_STATUS_ERROR | VE_DEC_MPEG_STATUS_VLD_DATA_REQ)
+
+#define VE_DEC_MPEG_VLD_ADDR (VE_ENGINE_DEC_MPEG + 0x28)
+
+#define VE_DEC_MPEG_VLD_ADDR_FIRST_PIC_DATA BIT(30)
+#define VE_DEC_MPEG_VLD_ADDR_LAST_PIC_DATA BIT(29)
+#define VE_DEC_MPEG_VLD_ADDR_VALID_PIC_DATA BIT(28)
+#define VE_DEC_MPEG_VLD_ADDR_BASE(a) \
+ ({ \
+ u32 _tmp = (a); \
+ u32 _lo = _tmp & GENMASK(27, 4); \
+ u32 _hi = (_tmp >> 28) & GENMASK(3, 0); \
+ (_lo | _hi); \
+ })
+
+#define VE_DEC_MPEG_VLD_OFFSET (VE_ENGINE_DEC_MPEG + 0x2c)
+#define VE_DEC_MPEG_VLD_LEN (VE_ENGINE_DEC_MPEG + 0x30)
+#define VE_DEC_MPEG_VLD_END_ADDR (VE_ENGINE_DEC_MPEG + 0x34)
+
+#define VE_DEC_MPEG_REC_LUMA (VE_ENGINE_DEC_MPEG + 0x48)
+#define VE_DEC_MPEG_REC_CHROMA (VE_ENGINE_DEC_MPEG + 0x4c)
+#define VE_DEC_MPEG_FWD_REF_LUMA_ADDR (VE_ENGINE_DEC_MPEG + 0x50)
+#define VE_DEC_MPEG_FWD_REF_CHROMA_ADDR (VE_ENGINE_DEC_MPEG + 0x54)
+#define VE_DEC_MPEG_BWD_REF_LUMA_ADDR (VE_ENGINE_DEC_MPEG + 0x58)
+#define VE_DEC_MPEG_BWD_REF_CHROMA_ADDR (VE_ENGINE_DEC_MPEG + 0x5c)
+
+#define VE_DEC_MPEG_IQMINPUT (VE_ENGINE_DEC_MPEG + 0x80)
+
+#define VE_DEC_MPEG_IQMINPUT_FLAG_INTRA (0x01 << 14)
+#define VE_DEC_MPEG_IQMINPUT_FLAG_NON_INTRA (0x00 << 14)
+#define VE_DEC_MPEG_IQMINPUT_WEIGHT(i, v) \
+ (SHIFT_AND_MASK_BITS(i, 13, 8) | SHIFT_AND_MASK_BITS(v, 7, 0))
+
+#define VE_DEC_MPEG_ERROR (VE_ENGINE_DEC_MPEG + 0xc4)
+#define VE_DEC_MPEG_CRTMBADDR (VE_ENGINE_DEC_MPEG + 0xc8)
+#define VE_DEC_MPEG_ROT_LUMA (VE_ENGINE_DEC_MPEG + 0xcc)
+#define VE_DEC_MPEG_ROT_CHROMA (VE_ENGINE_DEC_MPEG + 0xd0)
+
+#define VE_DEC_H265_DEC_NAL_HDR (VE_ENGINE_DEC_H265 + 0x00)
+
+#define VE_DEC_H265_DEC_NAL_HDR_NUH_TEMPORAL_ID_PLUS1(v) \
+ SHIFT_AND_MASK_BITS(v, 8, 6)
+#define VE_DEC_H265_DEC_NAL_HDR_NAL_UNIT_TYPE(v) \
+ SHIFT_AND_MASK_BITS(v, 5, 0)
+
+#define VE_DEC_H265_FLAG(reg_flag, ctrl_flag, flags) \
+ (((flags) & (ctrl_flag)) ? reg_flag : 0)
+
+#define VE_DEC_H265_DEC_SPS_HDR (VE_ENGINE_DEC_H265 + 0x04)
+
+#define VE_DEC_H265_DEC_SPS_HDR_FLAG_STRONG_INTRA_SMOOTHING_ENABLE BIT(26)
+#define VE_DEC_H265_DEC_SPS_HDR_FLAG_SPS_TEMPORAL_MVP_ENABLED BIT(25)
+#define VE_DEC_H265_DEC_SPS_HDR_FLAG_SAMPLE_ADAPTIVE_OFFSET_ENABLED BIT(24)
+#define VE_DEC_H265_DEC_SPS_HDR_FLAG_AMP_ENABLED BIT(23)
+#define VE_DEC_H265_DEC_SPS_HDR_FLAG_SEPARATE_COLOUR_PLANE BIT(2)
+
+#define VE_DEC_H265_DEC_SPS_HDR_MAX_TRANSFORM_HIERARCHY_DEPTH_INTRA(v) \
+ SHIFT_AND_MASK_BITS(v, 22, 20)
+#define VE_DEC_H265_DEC_SPS_HDR_MAX_TRANSFORM_HIERARCHY_DEPTH_INTER(v) \
+ SHIFT_AND_MASK_BITS(v, 19, 17)
+#define VE_DEC_H265_DEC_SPS_HDR_LOG2_DIFF_MAX_MIN_TRANSFORM_BLOCK_SIZE(v) \
+ SHIFT_AND_MASK_BITS(v, 16, 15)
+#define VE_DEC_H265_DEC_SPS_HDR_LOG2_MIN_TRANSFORM_BLOCK_SIZE_MINUS2(v) \
+ SHIFT_AND_MASK_BITS(v, 14, 13)
+#define VE_DEC_H265_DEC_SPS_HDR_LOG2_DIFF_MAX_MIN_LUMA_CODING_BLOCK_SIZE(v) \
+ SHIFT_AND_MASK_BITS(v, 12, 11)
+#define VE_DEC_H265_DEC_SPS_HDR_LOG2_MIN_LUMA_CODING_BLOCK_SIZE_MINUS3(v) \
+ SHIFT_AND_MASK_BITS(v, 10, 9)
+#define VE_DEC_H265_DEC_SPS_HDR_BIT_DEPTH_CHROMA_MINUS8(v) \
+ SHIFT_AND_MASK_BITS(v, 8, 6)
+#define VE_DEC_H265_DEC_SPS_HDR_BIT_DEPTH_LUMA_MINUS8(v) \
+ SHIFT_AND_MASK_BITS(v, 5, 3)
+#define VE_DEC_H265_DEC_SPS_HDR_CHROMA_FORMAT_IDC(v) \
+ SHIFT_AND_MASK_BITS(v, 1, 0)
+
+#define VE_DEC_H265_DEC_PIC_SIZE (VE_ENGINE_DEC_H265 + 0x08)
+
+#define VE_DEC_H265_DEC_PIC_SIZE_WIDTH(w) (((w) << 0) & GENMASK(13, 0))
+#define VE_DEC_H265_DEC_PIC_SIZE_HEIGHT(h) (((h) << 16) & GENMASK(29, 16))
+
+#define VE_DEC_H265_DEC_PCM_CTRL (VE_ENGINE_DEC_H265 + 0x0c)
+
+#define VE_DEC_H265_DEC_PCM_CTRL_FLAG_PCM_ENABLED BIT(15)
+#define VE_DEC_H265_DEC_PCM_CTRL_FLAG_PCM_LOOP_FILTER_DISABLED BIT(14)
+
+#define VE_DEC_H265_DEC_PCM_CTRL_LOG2_DIFF_MAX_MIN_PCM_LUMA_CODING_BLOCK_SIZE(v) \
+ SHIFT_AND_MASK_BITS(v, 11, 10)
+#define VE_DEC_H265_DEC_PCM_CTRL_LOG2_MIN_PCM_LUMA_CODING_BLOCK_SIZE_MINUS3(v) \
+ SHIFT_AND_MASK_BITS(v, 9, 8)
+#define VE_DEC_H265_DEC_PCM_CTRL_PCM_SAMPLE_BIT_DEPTH_CHROMA_MINUS1(v) \
+ SHIFT_AND_MASK_BITS(v, 7, 4)
+#define VE_DEC_H265_DEC_PCM_CTRL_PCM_SAMPLE_BIT_DEPTH_LUMA_MINUS1(v) \
+ SHIFT_AND_MASK_BITS(v, 3, 0)
+
+#define VE_DEC_H265_DEC_PPS_CTRL0 (VE_ENGINE_DEC_H265 + 0x10)
+
+#define VE_DEC_H265_DEC_PPS_CTRL0_FLAG_CU_QP_DELTA_ENABLED BIT(3)
+#define VE_DEC_H265_DEC_PPS_CTRL0_FLAG_TRANSFORM_SKIP_ENABLED BIT(2)
+#define VE_DEC_H265_DEC_PPS_CTRL0_FLAG_CONSTRAINED_INTRA_PRED BIT(1)
+#define VE_DEC_H265_DEC_PPS_CTRL0_FLAG_SIGN_DATA_HIDING_ENABLED BIT(0)
+
+#define VE_DEC_H265_DEC_PPS_CTRL0_PPS_CR_QP_OFFSET(v) \
+ SHIFT_AND_MASK_BITS(v, 29, 24)
+#define VE_DEC_H265_DEC_PPS_CTRL0_PPS_CB_QP_OFFSET(v) \
+ SHIFT_AND_MASK_BITS(v, 21, 16)
+#define VE_DEC_H265_DEC_PPS_CTRL0_INIT_QP_MINUS26(v) \
+ SHIFT_AND_MASK_BITS(v, 14, 8)
+#define VE_DEC_H265_DEC_PPS_CTRL0_DIFF_CU_QP_DELTA_DEPTH(v) \
+ SHIFT_AND_MASK_BITS(v, 5, 4)
+
+#define VE_DEC_H265_DEC_PPS_CTRL1 (VE_ENGINE_DEC_H265 + 0x14)
+
+#define VE_DEC_H265_DEC_PPS_CTRL1_FLAG_PPS_LOOP_FILTER_ACROSS_SLICES_ENABLED BIT(6)
+#define VE_DEC_H265_DEC_PPS_CTRL1_FLAG_LOOP_FILTER_ACROSS_TILES_ENABLED BIT(5)
+#define VE_DEC_H265_DEC_PPS_CTRL1_FLAG_ENTROPY_CODING_SYNC_ENABLED BIT(4)
+#define VE_DEC_H265_DEC_PPS_CTRL1_FLAG_TILES_ENABLED BIT(3)
+#define VE_DEC_H265_DEC_PPS_CTRL1_FLAG_TRANSQUANT_BYPASS_ENABLED BIT(2)
+#define VE_DEC_H265_DEC_PPS_CTRL1_FLAG_WEIGHTED_BIPRED BIT(1)
+#define VE_DEC_H265_DEC_PPS_CTRL1_FLAG_WEIGHTED_PRED BIT(0)
+
+#define VE_DEC_H265_DEC_PPS_CTRL1_LOG2_PARALLEL_MERGE_LEVEL_MINUS2(v) \
+ SHIFT_AND_MASK_BITS(v, 10, 8)
+
+#define VE_DEC_H265_SCALING_LIST_CTRL0 (VE_ENGINE_DEC_H265 + 0x18)
+
+#define VE_DEC_H265_SCALING_LIST_CTRL0_FLAG_ENABLED BIT(31)
+
+#define VE_DEC_H265_SCALING_LIST_CTRL0_SRAM (0 << 30)
+#define VE_DEC_H265_SCALING_LIST_CTRL0_DEFAULT (1 << 30)
+
+#define VE_DEC_H265_DEC_SLICE_HDR_INFO0 (VE_ENGINE_DEC_H265 + 0x20)
+
+#define VE_DEC_H265_DEC_SLICE_HDR_INFO0_FLAG_COLLOCATED_FROM_L0 BIT(11)
+#define VE_DEC_H265_DEC_SLICE_HDR_INFO0_FLAG_CABAC_INIT BIT(10)
+#define VE_DEC_H265_DEC_SLICE_HDR_INFO0_FLAG_MVD_L1_ZERO BIT(9)
+#define VE_DEC_H265_DEC_SLICE_HDR_INFO0_FLAG_SLICE_SAO_CHROMA BIT(8)
+#define VE_DEC_H265_DEC_SLICE_HDR_INFO0_FLAG_SLICE_SAO_LUMA BIT(7)
+#define VE_DEC_H265_DEC_SLICE_HDR_INFO0_FLAG_SLICE_TEMPORAL_MVP_ENABLE BIT(6)
+#define VE_DEC_H265_DEC_SLICE_HDR_INFO0_FLAG_DEPENDENT_SLICE_SEGMENT BIT(1)
+#define VE_DEC_H265_DEC_SLICE_HDR_INFO0_FLAG_FIRST_SLICE_SEGMENT_IN_PIC BIT(0)
+
+#define VE_DEC_H265_DEC_SLICE_HDR_INFO0_PICTURE_TYPE(v) \
+ SHIFT_AND_MASK_BITS(v, 29, 28)
+#define VE_DEC_H265_DEC_SLICE_HDR_INFO0_FIVE_MINUS_MAX_NUM_MERGE_CAND(v) \
+ SHIFT_AND_MASK_BITS(v, 26, 24)
+#define VE_DEC_H265_DEC_SLICE_HDR_INFO0_NUM_REF_IDX_L1_ACTIVE_MINUS1(v) \
+ SHIFT_AND_MASK_BITS(v, 23, 20)
+#define VE_DEC_H265_DEC_SLICE_HDR_INFO0_NUM_REF_IDX_L0_ACTIVE_MINUS1(v) \
+ SHIFT_AND_MASK_BITS(v, 19, 16)
+#define VE_DEC_H265_DEC_SLICE_HDR_INFO0_COLLOCATED_REF_IDX(v) \
+ SHIFT_AND_MASK_BITS(v, 15, 12)
+#define VE_DEC_H265_DEC_SLICE_HDR_INFO0_COLOUR_PLANE_ID(v) \
+ SHIFT_AND_MASK_BITS(v, 5, 4)
+#define VE_DEC_H265_DEC_SLICE_HDR_INFO0_SLICE_TYPE(v) \
+ SHIFT_AND_MASK_BITS(v, 3, 2)
+
+#define VE_DEC_H265_DEC_SLICE_HDR_INFO1 (VE_ENGINE_DEC_H265 + 0x24)
+
+#define VE_DEC_H265_DEC_SLICE_HDR_INFO1_FLAG_SLICE_DEBLOCKING_FILTER_DISABLED BIT(23)
+#define VE_DEC_H265_DEC_SLICE_HDR_INFO1_FLAG_SLICE_LOOP_FILTER_ACROSS_SLICES_ENABLED BIT(22)
+#define VE_DEC_H265_DEC_SLICE_HDR_INFO1_FLAG_SLICE_NOT_LOW_DELAY BIT(21)
+
+#define VE_DEC_H265_DEC_SLICE_HDR_INFO1_SLICE_TC_OFFSET_DIV2(v) \
+ SHIFT_AND_MASK_BITS(v, 31, 28)
+#define VE_DEC_H265_DEC_SLICE_HDR_INFO1_SLICE_BETA_OFFSET_DIV2(v) \
+ SHIFT_AND_MASK_BITS(v, 27, 24)
+#define VE_DEC_H265_DEC_SLICE_HDR_INFO1_SLICE_CR_QP_OFFSET(v) \
+ SHIFT_AND_MASK_BITS(v, 20, 16)
+#define VE_DEC_H265_DEC_SLICE_HDR_INFO1_SLICE_CB_QP_OFFSET(v) \
+ SHIFT_AND_MASK_BITS(v, 12, 8)
+#define VE_DEC_H265_DEC_SLICE_HDR_INFO1_SLICE_QP_DELTA(v) \
+ SHIFT_AND_MASK_BITS(v, 6, 0)
+
+#define VE_DEC_H265_DEC_SLICE_HDR_INFO2 (VE_ENGINE_DEC_H265 + 0x28)
+
+#define VE_DEC_H265_DEC_SLICE_HDR_INFO2_NUM_ENTRY_POINT_OFFSETS(v) \
+ SHIFT_AND_MASK_BITS(v, 21, 8)
+#define VE_DEC_H265_DEC_SLICE_HDR_INFO2_CHROMA_LOG2_WEIGHT_DENOM(v) \
+ SHIFT_AND_MASK_BITS(v, 6, 4)
+#define VE_DEC_H265_DEC_SLICE_HDR_INFO2_LUMA_LOG2_WEIGHT_DENOM(v) \
+ SHIFT_AND_MASK_BITS(v, 2, 0)
+
+#define VE_DEC_H265_DEC_CTB_ADDR (VE_ENGINE_DEC_H265 + 0x2c)
+
+#define VE_DEC_H265_DEC_CTB_ADDR_Y(y) SHIFT_AND_MASK_BITS(y, 25, 16)
+#define VE_DEC_H265_DEC_CTB_ADDR_X(x) SHIFT_AND_MASK_BITS(x, 9, 0)
+
+#define VE_DEC_H265_CTRL (VE_ENGINE_DEC_H265 + 0x30)
+
+#define VE_DEC_H265_CTRL_DDR_CONSISTENCY_EN BIT(31)
+#define VE_DEC_H265_CTRL_STCD_EN BIT(25)
+#define VE_DEC_H265_CTRL_EPTB_DEC_BYPASS_EN BIT(24)
+#define VE_DEC_H265_CTRL_TQ_BYPASS_EN BIT(12)
+#define VE_DEC_H265_CTRL_VLD_BYPASS_EN BIT(11)
+#define VE_DEC_H265_CTRL_NCRI_CACHE_DISABLE BIT(10)
+#define VE_DEC_H265_CTRL_ROTATE_SCALE_OUT_EN BIT(9)
+#define VE_DEC_H265_CTRL_MC_NO_WRITEBACK BIT(8)
+#define VE_DEC_H265_CTRL_VLD_DATA_REQ_IRQ_EN BIT(2)
+#define VE_DEC_H265_CTRL_ERROR_IRQ_EN BIT(1)
+#define VE_DEC_H265_CTRL_FINISH_IRQ_EN BIT(0)
+#define VE_DEC_H265_CTRL_IRQ_MASK \
+ (VE_DEC_H265_CTRL_FINISH_IRQ_EN | VE_DEC_H265_CTRL_ERROR_IRQ_EN | \
+ VE_DEC_H265_CTRL_VLD_DATA_REQ_IRQ_EN)
+
+#define VE_DEC_H265_TRIGGER (VE_ENGINE_DEC_H265 + 0x34)
+
+#define VE_DEC_H265_TRIGGER_TYPE_N_BITS(x) (((x) & 0x3f) << 8)
+#define VE_DEC_H265_TRIGGER_STCD_VC1 (0x02 << 4)
+#define VE_DEC_H265_TRIGGER_STCD_AVS (0x01 << 4)
+#define VE_DEC_H265_TRIGGER_STCD_HEVC (0x00 << 4)
+#define VE_DEC_H265_TRIGGER_DEC_SLICE (0x08 << 0)
+#define VE_DEC_H265_TRIGGER_INIT_SWDEC (0x07 << 0)
+#define VE_DEC_H265_TRIGGER_BYTE_ALIGN (0x06 << 0)
+#define VE_DEC_H265_TRIGGER_GET_VLCUE (0x05 << 0)
+#define VE_DEC_H265_TRIGGER_GET_VLCSE (0x04 << 0)
+#define VE_DEC_H265_TRIGGER_FLUSH_BITS (0x03 << 0)
+#define VE_DEC_H265_TRIGGER_GET_BITS (0x02 << 0)
+#define VE_DEC_H265_TRIGGER_SHOW_BITS (0x01 << 0)
+
+#define VE_DEC_H265_STATUS (VE_ENGINE_DEC_H265 + 0x38)
+
+#define VE_DEC_H265_STATUS_STCD BIT(24)
+#define VE_DEC_H265_STATUS_STCD_BUSY BIT(21)
+#define VE_DEC_H265_STATUS_WB_BUSY BIT(20)
+#define VE_DEC_H265_STATUS_BS_DMA_BUSY BIT(19)
+#define VE_DEC_H265_STATUS_IT_BUSY BIT(18)
+#define VE_DEC_H265_STATUS_INTER_BUSY BIT(17)
+#define VE_DEC_H265_STATUS_MORE_DATA BIT(16)
+#define VE_DEC_H265_STATUS_DBLK_BUSY BIT(15)
+#define VE_DEC_H265_STATUS_IREC_BUSY BIT(14)
+#define VE_DEC_H265_STATUS_INTRA_BUSY BIT(13)
+#define VE_DEC_H265_STATUS_MCRI_BUSY BIT(12)
+#define VE_DEC_H265_STATUS_IQIT_BUSY BIT(11)
+#define VE_DEC_H265_STATUS_MVP_BUSY BIT(10)
+#define VE_DEC_H265_STATUS_IS_BUSY BIT(9)
+#define VE_DEC_H265_STATUS_VLD_BUSY BIT(8)
+#define VE_DEC_H265_STATUS_OVER_TIME BIT(3)
+#define VE_DEC_H265_STATUS_VLD_DATA_REQ BIT(2)
+#define VE_DEC_H265_STATUS_ERROR BIT(1)
+#define VE_DEC_H265_STATUS_SUCCESS BIT(0)
+#define VE_DEC_H265_STATUS_STCD_TYPE_MASK GENMASK(23, 22)
+#define VE_DEC_H265_STATUS_CHECK_MASK \
+ (VE_DEC_H265_STATUS_SUCCESS | VE_DEC_H265_STATUS_ERROR | \
+ VE_DEC_H265_STATUS_VLD_DATA_REQ)
+#define VE_DEC_H265_STATUS_CHECK_ERROR \
+ (VE_DEC_H265_STATUS_ERROR | VE_DEC_H265_STATUS_VLD_DATA_REQ)
+
+#define VE_DEC_H265_DEC_CTB_NUM (VE_ENGINE_DEC_H265 + 0x3c)
+
+#define VE_DEC_H265_BITS_ADDR (VE_ENGINE_DEC_H265 + 0x40)
+
+#define VE_DEC_H265_BITS_ADDR_FIRST_SLICE_DATA BIT(30)
+#define VE_DEC_H265_BITS_ADDR_LAST_SLICE_DATA BIT(29)
+#define VE_DEC_H265_BITS_ADDR_VALID_SLICE_DATA BIT(28)
+#define VE_DEC_H265_BITS_ADDR_BASE(a) (((a) >> 8) & GENMASK(27, 0))
+
+#define VE_DEC_H265_BITS_OFFSET (VE_ENGINE_DEC_H265 + 0x44)
+#define VE_DEC_H265_BITS_LEN (VE_ENGINE_DEC_H265 + 0x48)
+
+#define VE_DEC_H265_BITS_END_ADDR (VE_ENGINE_DEC_H265 + 0x4c)
+
+#define VE_DEC_H265_BITS_END_ADDR_BASE(a) ((a) >> 8)
+
+#define VE_DEC_H265_SDRT_CTRL (VE_ENGINE_DEC_H265 + 0x50)
+#define VE_DEC_H265_SDRT_LUMA_ADDR (VE_ENGINE_DEC_H265 + 0x54)
+#define VE_DEC_H265_SDRT_CHROMA_ADDR (VE_ENGINE_DEC_H265 + 0x58)
+
+#define VE_DEC_H265_OUTPUT_FRAME_IDX (VE_ENGINE_DEC_H265 + 0x5c)
+
+#define VE_DEC_H265_NEIGHBOR_INFO_ADDR (VE_ENGINE_DEC_H265 + 0x60)
+
+#define VE_DEC_H265_NEIGHBOR_INFO_ADDR_BASE(a) ((a) >> 8)
+
+#define VE_DEC_H265_ENTRY_POINT_OFFSET_ADDR (VE_ENGINE_DEC_H265 + 0x64)
+#define VE_DEC_H265_TILE_START_CTB (VE_ENGINE_DEC_H265 + 0x68)
+#define VE_DEC_H265_TILE_END_CTB (VE_ENGINE_DEC_H265 + 0x6c)
+#define VE_DEC_H265_SCALING_LIST_DC_COEF0 (VE_ENGINE_DEC_H265 + 0x78)
+#define VE_DEC_H265_SCALING_LIST_DC_COEF1 (VE_ENGINE_DEC_H265 + 0x7c)
+
+#define VE_DEC_H265_LOW_ADDR (VE_ENGINE_DEC_H265 + 0x80)
+
+#define VE_DEC_H265_OFFSET_ADDR_FIRST_OUT (VE_ENGINE_DEC_H265 + 0x84)
+#define VE_DEC_H265_OFFSET_ADDR_SECOND_OUT (VE_ENGINE_DEC_H265 + 0x88)
+
+#define VE_DEC_H265_SECOND_OUT_FMT_8BIT_PLUS_2BIT 0
+#define VE_DEC_H265_SECOND_OUT_FMT_P010 1
+#define VE_DEC_H265_SECOND_OUT_FMT_10BIT_4x4_TILED 2
+
+#define VE_DEC_H265_10BIT_CONFIGURE_SECOND_OUT_FMT(v) \
+ SHIFT_AND_MASK_BITS(v, 24, 23)
+#define VE_DEC_H265_10BIT_CONFIGURE_SECOND_2BIT_ENABLE BIT(22)
+#define VE_DEC_H265_10BIT_CONFIGURE_SECOND_2BIT_STRIDE(v) \
+ SHIFT_AND_MASK_BITS(v, 21, 11)
+#define VE_DEC_H265_10BIT_CONFIGURE_FIRST_2BIT_STRIDE(v) \
+ SHIFT_AND_MASK_BITS(v, 10, 0)
+#define VE_DEC_H265_10BIT_CONFIGURE (VE_ENGINE_DEC_H265 + 0x8c)
+
+#define VE_DEC_H265_LOW_ADDR_PRIMARY_CHROMA(a) \
+ SHIFT_AND_MASK_BITS(a, 31, 24)
+#define VE_DEC_H265_LOW_ADDR_SECONDARY_CHROMA(a) \
+ SHIFT_AND_MASK_BITS(a, 23, 16)
+#define VE_DEC_H265_LOW_ADDR_ENTRY_POINTS_BUF(a) \
+ SHIFT_AND_MASK_BITS(a, 7, 0)
+
+#define VE_DEC_H265_BITS_READ (VE_ENGINE_DEC_H265 + 0xdc)
+
+#define VE_DEC_H265_SRAM_OFFSET (VE_ENGINE_DEC_H265 + 0xe0)
+
+#define VE_DEC_H265_SRAM_OFFSET_PRED_WEIGHT_LUMA_L0 0x00
+#define VE_DEC_H265_SRAM_OFFSET_PRED_WEIGHT_CHROMA_L0 0x20
+#define VE_DEC_H265_SRAM_OFFSET_PRED_WEIGHT_LUMA_L1 0x60
+#define VE_DEC_H265_SRAM_OFFSET_PRED_WEIGHT_CHROMA_L1 0x80
+#define VE_DEC_H265_SRAM_OFFSET_FRAME_INFO 0x400
+#define VE_DEC_H265_SRAM_OFFSET_FRAME_INFO_UNIT 0x20
+#define VE_DEC_H265_SRAM_OFFSET_SCALING_LISTS 0x800
+#define VE_DEC_H265_SRAM_OFFSET_REF_PIC_LIST0 0xc00
+#define VE_DEC_H265_SRAM_OFFSET_REF_PIC_LIST1 0xc10
+
+#define VE_DEC_H265_SRAM_DATA (VE_ENGINE_DEC_H265 + 0xe4)
+
+#define VE_DEC_H265_SRAM_DATA_ADDR_BASE(a) ((a) >> 8)
+#define VE_DEC_H265_SRAM_REF_PIC_LIST_LT_REF BIT(7)
+
+#define VE_H264_SPS 0x200
+#define VE_H264_SPS_MBS_ONLY BIT(18)
+#define VE_H264_SPS_MB_ADAPTIVE_FRAME_FIELD BIT(17)
+#define VE_H264_SPS_DIRECT_8X8_INFERENCE BIT(16)
+
+#define VE_H264_PPS 0x204
+#define VE_H264_PPS_ENTROPY_CODING_MODE BIT(15)
+#define VE_H264_PPS_WEIGHTED_PRED BIT(4)
+#define VE_H264_PPS_CONSTRAINED_INTRA_PRED BIT(1)
+#define VE_H264_PPS_TRANSFORM_8X8_MODE BIT(0)
+
+#define VE_H264_SHS 0x208
+#define VE_H264_SHS_FIRST_SLICE_IN_PIC BIT(5)
+#define VE_H264_SHS_FIELD_PIC BIT(4)
+#define VE_H264_SHS_BOTTOM_FIELD BIT(3)
+#define VE_H264_SHS_DIRECT_SPATIAL_MV_PRED BIT(2)
+
+#define VE_H264_SHS2 0x20c
+#define VE_H264_SHS2_NUM_REF_IDX_ACTIVE_OVRD BIT(12)
+
+#define VE_H264_SHS_WP 0x210
+
+#define VE_H264_SHS_QP 0x21c
+#define VE_H264_SHS_QP_SCALING_MATRIX_DEFAULT BIT(24)
+
+#define VE_H264_CTRL 0x220
+#define VE_H264_CTRL_VP8 BIT(29)
+#define VE_H264_CTRL_VLD_DATA_REQ_INT BIT(2)
+#define VE_H264_CTRL_DECODE_ERR_INT BIT(1)
+#define VE_H264_CTRL_SLICE_DECODE_INT BIT(0)
+
+#define VE_H264_CTRL_INT_MASK (VE_H264_CTRL_VLD_DATA_REQ_INT | \
+ VE_H264_CTRL_DECODE_ERR_INT | \
+ VE_H264_CTRL_SLICE_DECODE_INT)
+
+#define VE_H264_TRIGGER_TYPE 0x224
+#define VE_H264_TRIGGER_TYPE_PROBABILITY(x) SHIFT_AND_MASK_BITS(x, 31, 24)
+#define VE_H264_TRIGGER_TYPE_BIN_LENS(x) SHIFT_AND_MASK_BITS((x) - 1, 18, 16)
+#define VE_H264_TRIGGER_TYPE_N_BITS(x) (((x) & 0x3f) << 8)
+#define VE_H264_TRIGGER_TYPE_VP8_GET_BITS (15 << 0)
+#define VE_H264_TRIGGER_TYPE_VP8_UPDATE_COEF (14 << 0)
+#define VE_H264_TRIGGER_TYPE_VP8_SLICE_DECODE (10 << 0)
+#define VE_H264_TRIGGER_TYPE_AVC_SLICE_DECODE (8 << 0)
+#define VE_H264_TRIGGER_TYPE_INIT_SWDEC (7 << 0)
+#define VE_H264_TRIGGER_TYPE_FLUSH_BITS (3 << 0)
+
+#define VE_H264_STATUS 0x228
+#define VE_H264_STATUS_VLD_DATA_REQ_INT VE_H264_CTRL_VLD_DATA_REQ_INT
+#define VE_H264_STATUS_DECODE_ERR_INT VE_H264_CTRL_DECODE_ERR_INT
+#define VE_H264_STATUS_SLICE_DECODE_INT VE_H264_CTRL_SLICE_DECODE_INT
+#define VE_H264_STATUS_VLD_BUSY BIT(8)
+#define VE_H264_STATUS_VP8_UPPROB_BUSY BIT(17)
+
+#define VE_H264_STATUS_INT_MASK VE_H264_CTRL_INT_MASK
+
+#define VE_H264_CUR_MB_NUM 0x22c
+
+#define VE_H264_VLD_ADDR 0x230
+#define VE_H264_VLD_ADDR_FIRST BIT(30)
+#define VE_H264_VLD_ADDR_LAST BIT(29)
+#define VE_H264_VLD_ADDR_VALID BIT(28)
+#define VE_H264_VLD_ADDR_VAL(x) (((x) & 0x0ffffff0) | ((x) >> 28))
+
+#define VE_H264_VLD_OFFSET 0x234
+#define VE_H264_VLD_LEN 0x238
+#define VE_H264_VLD_END 0x23c
+#define VE_H264_SDROT_CTRL 0x240
+#define VE_H264_OUTPUT_FRAME_IDX 0x24c
+#define VE_H264_EXTRA_BUFFER1 0x250
+#define VE_H264_EXTRA_BUFFER2 0x254
+#define VE_H264_MB_ADDR 0x260
+#define VE_H264_ERROR_CASE 0x2b8
+#define VE_H264_BASIC_BITS 0x2dc
+#define VE_AVC_SRAM_PORT_OFFSET 0x2e0
+#define VE_AVC_SRAM_PORT_DATA 0x2e4
+
+#define VE_VP8_PPS 0x214
+#define VE_VP8_PPS_PIC_TYPE_P_FRAME BIT(31)
+#define VE_VP8_PPS_LAST_SHARPNESS_LEVEL(v) SHIFT_AND_MASK_BITS(v, 30, 28)
+#define VE_VP8_PPS_LAST_PIC_TYPE_P_FRAME BIT(27)
+#define VE_VP8_PPS_ALTREF_SIGN_BIAS BIT(26)
+#define VE_VP8_PPS_GOLDEN_SIGN_BIAS BIT(25)
+#define VE_VP8_PPS_RELOAD_ENTROPY_PROBS BIT(24)
+#define VE_VP8_PPS_REFRESH_ENTROPY_PROBS BIT(23)
+#define VE_VP8_PPS_MB_NO_COEFF_SKIP BIT(22)
+#define VE_VP8_PPS_TOKEN_PARTITION(v) SHIFT_AND_MASK_BITS(v, 21, 20)
+#define VE_VP8_PPS_MODE_REF_LF_DELTA_UPDATE BIT(19)
+#define VE_VP8_PPS_MODE_REF_LF_DELTA_ENABLE BIT(18)
+#define VE_VP8_PPS_LOOP_FILTER_LEVEL(v) SHIFT_AND_MASK_BITS(v, 17, 12)
+#define VE_VP8_PPS_LOOP_FILTER_SIMPLE BIT(11)
+#define VE_VP8_PPS_SHARPNESS_LEVEL(v) SHIFT_AND_MASK_BITS(v, 10, 8)
+#define VE_VP8_PPS_LAST_LOOP_FILTER_SIMPLE BIT(7)
+#define VE_VP8_PPS_SEGMENTATION_ENABLE BIT(6)
+#define VE_VP8_PPS_MB_SEGMENT_ABS_DELTA BIT(5)
+#define VE_VP8_PPS_UPDATE_MB_SEGMENTATION_MAP BIT(4)
+#define VE_VP8_PPS_FULL_PIXEL BIT(3)
+#define VE_VP8_PPS_BILINEAR_MC_FILTER BIT(2)
+#define VE_VP8_PPS_FILTER_TYPE_SIMPLE BIT(1)
+#define VE_VP8_PPS_LPF_DISABLE BIT(0)
+
+#define VE_VP8_QP_INDEX_DELTA 0x218
+#define VE_VP8_QP_INDEX_DELTA_UVAC(v) SHIFT_AND_MASK_BITS(v, 31, 27)
+#define VE_VP8_QP_INDEX_DELTA_UVDC(v) SHIFT_AND_MASK_BITS(v, 26, 22)
+#define VE_VP8_QP_INDEX_DELTA_Y2AC(v) SHIFT_AND_MASK_BITS(v, 21, 17)
+#define VE_VP8_QP_INDEX_DELTA_Y2DC(v) SHIFT_AND_MASK_BITS(v, 16, 12)
+#define VE_VP8_QP_INDEX_DELTA_Y1DC(v) SHIFT_AND_MASK_BITS(v, 11, 7)
+#define VE_VP8_QP_INDEX_DELTA_BASE_QINDEX(v) SHIFT_AND_MASK_BITS(v, 6, 0)
+
+#define VE_VP8_PART_SIZE_OFFSET 0x21c
+#define VE_VP8_ENTROPY_PROBS_ADDR 0x250
+#define VE_VP8_FIRST_DATA_PART_LEN 0x254
+
+#define VE_VP8_FSIZE 0x258
+#define VE_VP8_FSIZE_WIDTH(w) \
+ SHIFT_AND_MASK_BITS(DIV_ROUND_UP(w, 16), 15, 8)
+#define VE_VP8_FSIZE_HEIGHT(h) \
+ SHIFT_AND_MASK_BITS(DIV_ROUND_UP(h, 16), 7, 0)
+
+#define VE_VP8_PICSIZE 0x25c
+#define VE_VP8_PICSIZE_WIDTH(w) SHIFT_AND_MASK_BITS(w, 27, 16)
+#define VE_VP8_PICSIZE_HEIGHT(h) SHIFT_AND_MASK_BITS(h, 11, 0)
+
+#define VE_VP8_REC_LUMA 0x2ac
+#define VE_VP8_FWD_LUMA 0x2b0
+#define VE_VP8_BWD_LUMA 0x2b4
+#define VE_VP8_REC_CHROMA 0x2d0
+#define VE_VP8_FWD_CHROMA 0x2d4
+#define VE_VP8_BWD_CHROMA 0x2d8
+#define VE_VP8_ALT_LUMA 0x2e8
+#define VE_VP8_ALT_CHROMA 0x2ec
+
+#define VE_VP8_SEGMENT_FEAT_MB_LV0 0x2f0
+#define VE_VP8_SEGMENT_FEAT_MB_LV1 0x2f4
+
+#define VE_VP8_SEGMENT3(v) SHIFT_AND_MASK_BITS(v, 31, 24)
+#define VE_VP8_SEGMENT2(v) SHIFT_AND_MASK_BITS(v, 23, 16)
+#define VE_VP8_SEGMENT1(v) SHIFT_AND_MASK_BITS(v, 15, 8)
+#define VE_VP8_SEGMENT0(v) SHIFT_AND_MASK_BITS(v, 7, 0)
+
+#define VE_VP8_REF_LF_DELTA 0x2f8
+#define VE_VP8_MODE_LF_DELTA 0x2fc
+
+#define VE_VP8_LF_DELTA3(v) SHIFT_AND_MASK_BITS(v, 30, 24)
+#define VE_VP8_LF_DELTA2(v) SHIFT_AND_MASK_BITS(v, 22, 16)
+#define VE_VP8_LF_DELTA1(v) SHIFT_AND_MASK_BITS(v, 14, 8)
+#define VE_VP8_LF_DELTA0(v) SHIFT_AND_MASK_BITS(v, 6, 0)
+
+#define VE_ISP_INPUT_SIZE 0xa00
+#define VE_ISP_INPUT_STRIDE 0xa04
+#define VE_ISP_CTRL 0xa08
+#define VE_ISP_INPUT_LUMA 0xa78
+#define VE_ISP_INPUT_CHROMA 0xa7c
+
+#define VE_AVC_PARAM 0xb04
+#define VE_AVC_QP 0xb08
+#define VE_AVC_MOTION_EST 0xb10
+#define VE_AVC_CTRL 0xb14
+#define VE_AVC_TRIGGER 0xb18
+#define VE_AVC_STATUS 0xb1c
+#define VE_AVC_BASIC_BITS 0xb20
+#define VE_AVC_UNK_BUF 0xb60
+#define VE_AVC_VLE_ADDR 0xb80
+#define VE_AVC_VLE_END 0xb84
+#define VE_AVC_VLE_OFFSET 0xb88
+#define VE_AVC_VLE_MAX 0xb8c
+#define VE_AVC_VLE_LENGTH 0xb90
+#define VE_AVC_REF_LUMA 0xba0
+#define VE_AVC_REF_CHROMA 0xba4
+#define VE_AVC_REC_LUMA 0xbb0
+#define VE_AVC_REC_CHROMA 0xbb4
+#define VE_AVC_REF_SLUMA 0xbb8
+#define VE_AVC_REC_SLUMA 0xbbc
+#define VE_AVC_MB_INFO 0xbc0
+
+#endif
diff --git a/drivers/staging/media/sunxi/cedrus/cedrus_video.c b/drivers/staging/media/sunxi/cedrus/cedrus_video.c
new file mode 100644
index 000000000000..ad4ec3490775
--- /dev/null
+++ b/drivers/staging/media/sunxi/cedrus/cedrus_video.c
@@ -0,0 +1,621 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Cedrus VPU driver
+ *
+ * Copyright (C) 2016 Florent Revest <florent.revest@free-electrons.com>
+ * Copyright (C) 2018 Paul Kocialkowski <paul.kocialkowski@bootlin.com>
+ * Copyright (C) 2018 Bootlin
+ *
+ * Based on the vim2m driver, that is:
+ *
+ * Copyright (c) 2009-2010 Samsung Electronics Co., Ltd.
+ * Pawel Osciak, <pawel@osciak.com>
+ * Marek Szyprowski, <m.szyprowski@samsung.com>
+ */
+
+#include <linux/pm_runtime.h>
+
+#include <media/videobuf2-dma-contig.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-mem2mem.h>
+
+#include "cedrus.h"
+#include "cedrus_video.h"
+#include "cedrus_dec.h"
+#include "cedrus_hw.h"
+
+#define CEDRUS_DECODE_SRC BIT(0)
+#define CEDRUS_DECODE_DST BIT(1)
+
+#define CEDRUS_MIN_WIDTH 16U
+#define CEDRUS_MIN_HEIGHT 16U
+#define CEDRUS_MAX_WIDTH 4096U
+#define CEDRUS_MAX_HEIGHT 2304U
+
+static struct cedrus_format cedrus_formats[] = {
+ {
+ .pixelformat = V4L2_PIX_FMT_MPEG2_SLICE,
+ .directions = CEDRUS_DECODE_SRC,
+ .capabilities = CEDRUS_CAPABILITY_MPEG2_DEC,
+ },
+ {
+ .pixelformat = V4L2_PIX_FMT_H264_SLICE,
+ .directions = CEDRUS_DECODE_SRC,
+ .capabilities = CEDRUS_CAPABILITY_H264_DEC,
+ },
+ {
+ .pixelformat = V4L2_PIX_FMT_HEVC_SLICE,
+ .directions = CEDRUS_DECODE_SRC,
+ .capabilities = CEDRUS_CAPABILITY_H265_DEC,
+ },
+ {
+ .pixelformat = V4L2_PIX_FMT_VP8_FRAME,
+ .directions = CEDRUS_DECODE_SRC,
+ .capabilities = CEDRUS_CAPABILITY_VP8_DEC,
+ },
+ {
+ .pixelformat = V4L2_PIX_FMT_NV12,
+ .directions = CEDRUS_DECODE_DST,
+ .capabilities = CEDRUS_CAPABILITY_UNTILED,
+ },
+ {
+ .pixelformat = V4L2_PIX_FMT_NV12_32L32,
+ .directions = CEDRUS_DECODE_DST,
+ },
+ {
+ .pixelformat = V4L2_PIX_FMT_NV21,
+ .directions = CEDRUS_DECODE_DST,
+ .capabilities = CEDRUS_CAPABILITY_UNTILED,
+ },
+ {
+ .pixelformat = V4L2_PIX_FMT_YUV420,
+ .directions = CEDRUS_DECODE_DST,
+ .capabilities = CEDRUS_CAPABILITY_UNTILED,
+ },
+ {
+ .pixelformat = V4L2_PIX_FMT_YVU420,
+ .directions = CEDRUS_DECODE_DST,
+ .capabilities = CEDRUS_CAPABILITY_UNTILED,
+ },
+};
+
+#define CEDRUS_FORMATS_COUNT ARRAY_SIZE(cedrus_formats)
+
+static struct cedrus_format *cedrus_find_format(struct cedrus_ctx *ctx,
+ u32 pixelformat, u32 directions)
+{
+ struct cedrus_format *first_valid_fmt = NULL;
+ struct cedrus_format *fmt;
+ unsigned int i;
+
+ for (i = 0; i < CEDRUS_FORMATS_COUNT; i++) {
+ fmt = &cedrus_formats[i];
+
+ if (!cedrus_is_capable(ctx, fmt->capabilities) ||
+ !(fmt->directions & directions))
+ continue;
+
+ if (fmt->pixelformat == pixelformat)
+ break;
+
+ if (!first_valid_fmt)
+ first_valid_fmt = fmt;
+ }
+
+ if (i == CEDRUS_FORMATS_COUNT)
+ return first_valid_fmt;
+
+ return &cedrus_formats[i];
+}
+
+void cedrus_prepare_format(struct v4l2_pix_format *pix_fmt)
+{
+ unsigned int width = pix_fmt->width;
+ unsigned int height = pix_fmt->height;
+ unsigned int sizeimage = pix_fmt->sizeimage;
+ unsigned int bytesperline = pix_fmt->bytesperline;
+
+ pix_fmt->field = V4L2_FIELD_NONE;
+
+ /* Limit to hardware min/max. */
+ width = clamp(width, CEDRUS_MIN_WIDTH, CEDRUS_MAX_WIDTH);
+ height = clamp(height, CEDRUS_MIN_HEIGHT, CEDRUS_MAX_HEIGHT);
+
+ switch (pix_fmt->pixelformat) {
+ case V4L2_PIX_FMT_MPEG2_SLICE:
+ case V4L2_PIX_FMT_H264_SLICE:
+ case V4L2_PIX_FMT_HEVC_SLICE:
+ case V4L2_PIX_FMT_VP8_FRAME:
+ /* Zero bytes per line for encoded source. */
+ bytesperline = 0;
+ /* Choose some minimum size since this can't be 0 */
+ sizeimage = max_t(u32, SZ_1K, sizeimage);
+ break;
+
+ case V4L2_PIX_FMT_NV12_32L32:
+ /* 32-aligned stride. */
+ bytesperline = ALIGN(width, 32);
+
+ /* 32-aligned height. */
+ height = ALIGN(height, 32);
+
+ /* Luma plane size. */
+ sizeimage = bytesperline * height;
+
+ /* Chroma plane size. */
+ sizeimage += bytesperline * ALIGN(height, 64) / 2;
+
+ break;
+
+ case V4L2_PIX_FMT_NV12:
+ case V4L2_PIX_FMT_NV21:
+ case V4L2_PIX_FMT_YUV420:
+ case V4L2_PIX_FMT_YVU420:
+ /* 16-aligned stride. */
+ bytesperline = ALIGN(width, 16);
+
+ /* 16-aligned height. */
+ height = ALIGN(height, 16);
+
+ /* Luma plane size. */
+ sizeimage = bytesperline * height;
+
+ /* Chroma plane size. */
+ sizeimage += bytesperline * height / 2;
+
+ break;
+ }
+
+ pix_fmt->width = width;
+ pix_fmt->height = height;
+
+ pix_fmt->bytesperline = bytesperline;
+ pix_fmt->sizeimage = sizeimage;
+}
+
+static int cedrus_querycap(struct file *file, void *priv,
+ struct v4l2_capability *cap)
+{
+ strscpy(cap->driver, CEDRUS_NAME, sizeof(cap->driver));
+ strscpy(cap->card, CEDRUS_NAME, sizeof(cap->card));
+ snprintf(cap->bus_info, sizeof(cap->bus_info),
+ "platform:%s", CEDRUS_NAME);
+
+ return 0;
+}
+
+static int cedrus_enum_fmt(struct file *file, struct v4l2_fmtdesc *f,
+ u32 direction)
+{
+ struct cedrus_ctx *ctx = cedrus_file2ctx(file);
+ unsigned int i, index;
+
+ /* Index among formats that match the requested direction. */
+ index = 0;
+
+ for (i = 0; i < CEDRUS_FORMATS_COUNT; i++) {
+ if (!cedrus_is_capable(ctx, cedrus_formats[i].capabilities))
+ continue;
+
+ if (!(cedrus_formats[i].directions & direction))
+ continue;
+
+ if (index == f->index)
+ break;
+
+ index++;
+ }
+
+ /* Matched format. */
+ if (i < CEDRUS_FORMATS_COUNT) {
+ f->pixelformat = cedrus_formats[i].pixelformat;
+
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int cedrus_enum_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ return cedrus_enum_fmt(file, f, CEDRUS_DECODE_DST);
+}
+
+static int cedrus_enum_fmt_vid_out(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ return cedrus_enum_fmt(file, f, CEDRUS_DECODE_SRC);
+}
+
+static int cedrus_g_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct cedrus_ctx *ctx = cedrus_file2ctx(file);
+
+ f->fmt.pix = ctx->dst_fmt;
+ return 0;
+}
+
+static int cedrus_g_fmt_vid_out(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct cedrus_ctx *ctx = cedrus_file2ctx(file);
+
+ f->fmt.pix = ctx->src_fmt;
+ return 0;
+}
+
+static int cedrus_try_fmt_vid_cap_p(struct cedrus_ctx *ctx,
+ struct v4l2_pix_format *pix_fmt)
+{
+ struct cedrus_format *fmt =
+ cedrus_find_format(ctx, pix_fmt->pixelformat,
+ CEDRUS_DECODE_DST);
+
+ if (!fmt)
+ return -EINVAL;
+
+ pix_fmt->pixelformat = fmt->pixelformat;
+ pix_fmt->width = ctx->src_fmt.width;
+ pix_fmt->height = ctx->src_fmt.height;
+ cedrus_prepare_format(pix_fmt);
+
+ if (ctx->current_codec->extra_cap_size)
+ pix_fmt->sizeimage +=
+ ctx->current_codec->extra_cap_size(ctx, pix_fmt);
+
+ return 0;
+}
+
+static int cedrus_try_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ return cedrus_try_fmt_vid_cap_p(cedrus_file2ctx(file), &f->fmt.pix);
+}
+
+static int cedrus_try_fmt_vid_out_p(struct cedrus_ctx *ctx,
+ struct v4l2_pix_format *pix_fmt)
+{
+ struct cedrus_format *fmt =
+ cedrus_find_format(ctx, pix_fmt->pixelformat,
+ CEDRUS_DECODE_SRC);
+
+ if (!fmt)
+ return -EINVAL;
+
+ pix_fmt->pixelformat = fmt->pixelformat;
+ cedrus_prepare_format(pix_fmt);
+
+ return 0;
+}
+
+static int cedrus_try_fmt_vid_out(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ return cedrus_try_fmt_vid_out_p(cedrus_file2ctx(file), &f->fmt.pix);
+}
+
+static int cedrus_s_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct cedrus_ctx *ctx = cedrus_file2ctx(file);
+ struct vb2_queue *vq;
+ int ret;
+
+ vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type);
+ if (vb2_is_busy(vq))
+ return -EBUSY;
+
+ ret = cedrus_try_fmt_vid_cap(file, priv, f);
+ if (ret)
+ return ret;
+
+ ctx->dst_fmt = f->fmt.pix;
+
+ return 0;
+}
+
+void cedrus_reset_cap_format(struct cedrus_ctx *ctx)
+{
+ ctx->dst_fmt.pixelformat = 0;
+ cedrus_try_fmt_vid_cap_p(ctx, &ctx->dst_fmt);
+}
+
+static int cedrus_s_fmt_vid_out_p(struct cedrus_ctx *ctx,
+ struct v4l2_pix_format *pix_fmt)
+{
+ struct vb2_queue *vq;
+ int ret;
+
+ ret = cedrus_try_fmt_vid_out_p(ctx, pix_fmt);
+ if (ret)
+ return ret;
+
+ ctx->src_fmt = *pix_fmt;
+
+ vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
+
+ switch (ctx->src_fmt.pixelformat) {
+ case V4L2_PIX_FMT_H264_SLICE:
+ case V4L2_PIX_FMT_HEVC_SLICE:
+ vq->subsystem_flags |=
+ VB2_V4L2_FL_SUPPORTS_M2M_HOLD_CAPTURE_BUF;
+ break;
+ default:
+ vq->subsystem_flags &=
+ ~VB2_V4L2_FL_SUPPORTS_M2M_HOLD_CAPTURE_BUF;
+ break;
+ }
+
+ switch (ctx->src_fmt.pixelformat) {
+ case V4L2_PIX_FMT_MPEG2_SLICE:
+ ctx->current_codec = &cedrus_dec_ops_mpeg2;
+ break;
+ case V4L2_PIX_FMT_H264_SLICE:
+ ctx->current_codec = &cedrus_dec_ops_h264;
+ break;
+ case V4L2_PIX_FMT_HEVC_SLICE:
+ ctx->current_codec = &cedrus_dec_ops_h265;
+ break;
+ case V4L2_PIX_FMT_VP8_FRAME:
+ ctx->current_codec = &cedrus_dec_ops_vp8;
+ break;
+ }
+
+ /* Propagate format information to capture. */
+ ctx->dst_fmt.colorspace = pix_fmt->colorspace;
+ ctx->dst_fmt.xfer_func = pix_fmt->xfer_func;
+ ctx->dst_fmt.ycbcr_enc = pix_fmt->ycbcr_enc;
+ ctx->dst_fmt.quantization = pix_fmt->quantization;
+ cedrus_reset_cap_format(ctx);
+
+ return 0;
+}
+
+void cedrus_reset_out_format(struct cedrus_ctx *ctx)
+{
+ ctx->src_fmt.pixelformat = 0;
+ cedrus_s_fmt_vid_out_p(ctx, &ctx->src_fmt);
+}
+
+static int cedrus_s_fmt_vid_out(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct cedrus_ctx *ctx = cedrus_file2ctx(file);
+ struct vb2_queue *vq;
+ struct vb2_queue *peer_vq;
+
+ vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type);
+ /*
+ * In order to support dynamic resolution change,
+ * the decoder admits a resolution change, as long
+ * as the pixelformat remains. Can't be done if streaming.
+ */
+ if (vb2_is_streaming(vq) || (vb2_is_busy(vq) &&
+ f->fmt.pix.pixelformat != ctx->src_fmt.pixelformat))
+ return -EBUSY;
+ /*
+ * Since format change on the OUTPUT queue will reset
+ * the CAPTURE queue, we can't allow doing so
+ * when the CAPTURE queue has buffers allocated.
+ */
+ peer_vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx,
+ V4L2_BUF_TYPE_VIDEO_CAPTURE);
+ if (vb2_is_busy(peer_vq))
+ return -EBUSY;
+
+ return cedrus_s_fmt_vid_out_p(cedrus_file2ctx(file), &f->fmt.pix);
+}
+
+const struct v4l2_ioctl_ops cedrus_ioctl_ops = {
+ .vidioc_querycap = cedrus_querycap,
+
+ .vidioc_enum_fmt_vid_cap = cedrus_enum_fmt_vid_cap,
+ .vidioc_g_fmt_vid_cap = cedrus_g_fmt_vid_cap,
+ .vidioc_try_fmt_vid_cap = cedrus_try_fmt_vid_cap,
+ .vidioc_s_fmt_vid_cap = cedrus_s_fmt_vid_cap,
+
+ .vidioc_enum_fmt_vid_out = cedrus_enum_fmt_vid_out,
+ .vidioc_g_fmt_vid_out = cedrus_g_fmt_vid_out,
+ .vidioc_try_fmt_vid_out = cedrus_try_fmt_vid_out,
+ .vidioc_s_fmt_vid_out = cedrus_s_fmt_vid_out,
+
+ .vidioc_reqbufs = v4l2_m2m_ioctl_reqbufs,
+ .vidioc_querybuf = v4l2_m2m_ioctl_querybuf,
+ .vidioc_qbuf = v4l2_m2m_ioctl_qbuf,
+ .vidioc_dqbuf = v4l2_m2m_ioctl_dqbuf,
+ .vidioc_prepare_buf = v4l2_m2m_ioctl_prepare_buf,
+ .vidioc_create_bufs = v4l2_m2m_ioctl_create_bufs,
+ .vidioc_expbuf = v4l2_m2m_ioctl_expbuf,
+
+ .vidioc_streamon = v4l2_m2m_ioctl_streamon,
+ .vidioc_streamoff = v4l2_m2m_ioctl_streamoff,
+
+ .vidioc_try_decoder_cmd = v4l2_m2m_ioctl_stateless_try_decoder_cmd,
+ .vidioc_decoder_cmd = v4l2_m2m_ioctl_stateless_decoder_cmd,
+
+ .vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
+};
+
+static int cedrus_queue_setup(struct vb2_queue *vq, unsigned int *nbufs,
+ unsigned int *nplanes, unsigned int sizes[],
+ struct device *alloc_devs[])
+{
+ struct cedrus_ctx *ctx = vb2_get_drv_priv(vq);
+ struct v4l2_pix_format *pix_fmt;
+
+ if (V4L2_TYPE_IS_OUTPUT(vq->type))
+ pix_fmt = &ctx->src_fmt;
+ else
+ pix_fmt = &ctx->dst_fmt;
+
+ if (*nplanes) {
+ if (sizes[0] < pix_fmt->sizeimage)
+ return -EINVAL;
+ } else {
+ sizes[0] = pix_fmt->sizeimage;
+ *nplanes = 1;
+ }
+
+ return 0;
+}
+
+static void cedrus_queue_cleanup(struct vb2_queue *vq, u32 state)
+{
+ struct cedrus_ctx *ctx = vb2_get_drv_priv(vq);
+ struct vb2_v4l2_buffer *vbuf;
+
+ for (;;) {
+ if (V4L2_TYPE_IS_OUTPUT(vq->type))
+ vbuf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
+ else
+ vbuf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
+
+ if (!vbuf)
+ return;
+
+ v4l2_ctrl_request_complete(vbuf->vb2_buf.req_obj.req,
+ &ctx->hdl);
+ v4l2_m2m_buf_done(vbuf, state);
+ }
+}
+
+static int cedrus_buf_out_validate(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+
+ vbuf->field = V4L2_FIELD_NONE;
+ return 0;
+}
+
+static int cedrus_buf_prepare(struct vb2_buffer *vb)
+{
+ struct vb2_queue *vq = vb->vb2_queue;
+ struct cedrus_ctx *ctx = vb2_get_drv_priv(vq);
+ struct v4l2_pix_format *pix_fmt;
+
+ if (V4L2_TYPE_IS_OUTPUT(vq->type))
+ pix_fmt = &ctx->src_fmt;
+ else
+ pix_fmt = &ctx->dst_fmt;
+
+ if (vb2_plane_size(vb, 0) < pix_fmt->sizeimage)
+ return -EINVAL;
+
+ /*
+ * Buffer's bytesused must be written by driver for CAPTURE buffers.
+ * (for OUTPUT buffers, if userspace passes 0 bytesused, v4l2-core sets
+ * it to buffer length).
+ */
+ if (V4L2_TYPE_IS_CAPTURE(vq->type))
+ vb2_set_plane_payload(vb, 0, pix_fmt->sizeimage);
+
+ return 0;
+}
+
+static int cedrus_start_streaming(struct vb2_queue *vq, unsigned int count)
+{
+ struct cedrus_ctx *ctx = vb2_get_drv_priv(vq);
+ struct cedrus_dev *dev = ctx->dev;
+ int ret = 0;
+
+ if (V4L2_TYPE_IS_OUTPUT(vq->type)) {
+ ret = pm_runtime_resume_and_get(dev->dev);
+ if (ret < 0)
+ goto err_cleanup;
+
+ if (ctx->current_codec->start) {
+ ret = ctx->current_codec->start(ctx);
+ if (ret)
+ goto err_pm;
+ }
+ }
+
+ return 0;
+
+err_pm:
+ pm_runtime_put(dev->dev);
+err_cleanup:
+ cedrus_queue_cleanup(vq, VB2_BUF_STATE_QUEUED);
+
+ return ret;
+}
+
+static void cedrus_stop_streaming(struct vb2_queue *vq)
+{
+ struct cedrus_ctx *ctx = vb2_get_drv_priv(vq);
+ struct cedrus_dev *dev = ctx->dev;
+
+ if (V4L2_TYPE_IS_OUTPUT(vq->type)) {
+ if (ctx->current_codec->stop)
+ ctx->current_codec->stop(ctx);
+
+ pm_runtime_put(dev->dev);
+ }
+
+ cedrus_queue_cleanup(vq, VB2_BUF_STATE_ERROR);
+}
+
+static void cedrus_buf_queue(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct cedrus_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+
+ v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vbuf);
+}
+
+static void cedrus_buf_request_complete(struct vb2_buffer *vb)
+{
+ struct cedrus_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+
+ v4l2_ctrl_request_complete(vb->req_obj.req, &ctx->hdl);
+}
+
+static const struct vb2_ops cedrus_qops = {
+ .queue_setup = cedrus_queue_setup,
+ .buf_prepare = cedrus_buf_prepare,
+ .buf_queue = cedrus_buf_queue,
+ .buf_out_validate = cedrus_buf_out_validate,
+ .buf_request_complete = cedrus_buf_request_complete,
+ .start_streaming = cedrus_start_streaming,
+ .stop_streaming = cedrus_stop_streaming,
+};
+
+int cedrus_queue_init(void *priv, struct vb2_queue *src_vq,
+ struct vb2_queue *dst_vq)
+{
+ struct cedrus_ctx *ctx = priv;
+ int ret;
+
+ src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
+ src_vq->io_modes = VB2_MMAP | VB2_DMABUF;
+ src_vq->drv_priv = ctx;
+ src_vq->buf_struct_size = sizeof(struct cedrus_buffer);
+ src_vq->ops = &cedrus_qops;
+ src_vq->mem_ops = &vb2_dma_contig_memops;
+ src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ src_vq->lock = &ctx->dev->dev_mutex;
+ src_vq->dev = ctx->dev->dev;
+ src_vq->supports_requests = true;
+ src_vq->requires_requests = true;
+
+ ret = vb2_queue_init(src_vq);
+ if (ret)
+ return ret;
+
+ dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ dst_vq->io_modes = VB2_MMAP | VB2_DMABUF;
+ dst_vq->drv_priv = ctx;
+ dst_vq->buf_struct_size = sizeof(struct cedrus_buffer);
+ dst_vq->ops = &cedrus_qops;
+ dst_vq->mem_ops = &vb2_dma_contig_memops;
+ dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ dst_vq->lock = &ctx->dev->dev_mutex;
+ dst_vq->dev = ctx->dev->dev;
+
+ return vb2_queue_init(dst_vq);
+}
diff --git a/drivers/staging/media/sunxi/cedrus/cedrus_video.h b/drivers/staging/media/sunxi/cedrus/cedrus_video.h
new file mode 100644
index 000000000000..8e1afc16a6a1
--- /dev/null
+++ b/drivers/staging/media/sunxi/cedrus/cedrus_video.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Cedrus VPU driver
+ *
+ * Copyright (C) 2016 Florent Revest <florent.revest@free-electrons.com>
+ * Copyright (C) 2018 Paul Kocialkowski <paul.kocialkowski@bootlin.com>
+ * Copyright (C) 2018 Bootlin
+ *
+ * Based on the vim2m driver, that is:
+ *
+ * Copyright (c) 2009-2010 Samsung Electronics Co., Ltd.
+ * Pawel Osciak, <pawel@osciak.com>
+ * Marek Szyprowski, <m.szyprowski@samsung.com>
+ */
+
+#ifndef _CEDRUS_VIDEO_H_
+#define _CEDRUS_VIDEO_H_
+
+struct cedrus_format {
+ u32 pixelformat;
+ u32 directions;
+ unsigned int capabilities;
+};
+
+extern const struct v4l2_ioctl_ops cedrus_ioctl_ops;
+
+int cedrus_queue_init(void *priv, struct vb2_queue *src_vq,
+ struct vb2_queue *dst_vq);
+void cedrus_prepare_format(struct v4l2_pix_format *pix_fmt);
+void cedrus_reset_cap_format(struct cedrus_ctx *ctx);
+void cedrus_reset_out_format(struct cedrus_ctx *ctx);
+
+#endif
diff --git a/drivers/staging/media/sunxi/cedrus/cedrus_vp8.c b/drivers/staging/media/sunxi/cedrus/cedrus_vp8.c
new file mode 100644
index 000000000000..969677a3bbf9
--- /dev/null
+++ b/drivers/staging/media/sunxi/cedrus/cedrus_vp8.c
@@ -0,0 +1,882 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Cedrus VPU driver
+ *
+ * Copyright (c) 2019 Jernej Skrabec <jernej.skrabec@siol.net>
+ */
+
+/*
+ * VP8 in Cedrus shares same engine as H264.
+ *
+ * Note that it seems necessary to call bitstream parsing functions,
+ * to parse frame header, otherwise decoded image is garbage. This is
+ * contrary to what is driver supposed to do. However, values are not
+ * really used, so this might be acceptable. It's possible that bitstream
+ * parsing functions set some internal VPU state, which is later necessary
+ * for proper decoding. Biggest suspect is "VP8 probs update" trigger.
+ */
+
+#include <linux/delay.h>
+#include <linux/types.h>
+
+#include <media/videobuf2-dma-contig.h>
+
+#include "cedrus.h"
+#include "cedrus_hw.h"
+#include "cedrus_regs.h"
+
+#define CEDRUS_ENTROPY_PROBS_SIZE 0x2400
+#define VP8_PROB_HALF 128
+#define QUANT_DELTA_COUNT 5
+
+/*
+ * This table comes from the concatenation of k_coeff_entropy_update_probs,
+ * kf_ymode_prob, default_mv_context, etc. It is provided in this form in
+ * order to avoid computing it every time the driver is initialised, and is
+ * suitable for direct consumption by the hardware.
+ */
+static const u8 prob_table_init[] = {
+ /* k_coeff_entropy_update_probs */
+ /* block 0 */
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+
+ 0xB0, 0xF6, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xDF, 0xF1, 0xFC, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xF9, 0xFD, 0xFD, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+
+ 0xFF, 0xF4, 0xFC, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xEA, 0xFE, 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xFD, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+
+ 0xFF, 0xF6, 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xEF, 0xFD, 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xFE, 0xFF, 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+
+ 0xFF, 0xF8, 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xFB, 0xFF, 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+
+ 0xFF, 0xFD, 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xFB, 0xFE, 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xFE, 0xFF, 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+
+ 0xFF, 0xFE, 0xFD, 0xFF, 0xFE, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xFA, 0xFF, 0xFE, 0xFF, 0xFE, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+
+ /* block 1 */
+ 0xD9, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xE1, 0xFC, 0xF1, 0xFD, 0xFF, 0xFF, 0xFE, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xEA, 0xFA, 0xF1, 0xFA, 0xFD, 0xFF, 0xFD, 0xFE,
+ 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+
+ 0xFF, 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xDF, 0xFE, 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xEE, 0xFD, 0xFE, 0xFE, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+
+ 0xFF, 0xF8, 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xF9, 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+
+ 0xFF, 0xFD, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xF7, 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+
+ 0xFF, 0xFD, 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xFC, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+
+ 0xFF, 0xFE, 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xFD, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+
+ 0xFF, 0xFE, 0xFD, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xFA, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+
+ /* block 2 */
+ 0xBA, 0xFB, 0xFA, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xEA, 0xFB, 0xF4, 0xFE, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xFB, 0xFB, 0xF3, 0xFD, 0xFE, 0xFF, 0xFE, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+
+ 0xFF, 0xFD, 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xEC, 0xFD, 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xFB, 0xFD, 0xFD, 0xFE, 0xFE, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+
+ 0xFF, 0xFE, 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xFE, 0xFE, 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+
+ 0xFF, 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xFE, 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+
+ /* block 3 */
+ 0xF8, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xFA, 0xFE, 0xFC, 0xFE, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xF8, 0xFE, 0xF9, 0xFD, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+
+ 0xFF, 0xFD, 0xFD, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xF6, 0xFD, 0xFD, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xFC, 0xFE, 0xFB, 0xFE, 0xFE, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+
+ 0xFF, 0xFE, 0xFC, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xF8, 0xFE, 0xFD, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xFD, 0xFF, 0xFE, 0xFE, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+
+ 0xFF, 0xFB, 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xF5, 0xFB, 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xFD, 0xFD, 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+
+ 0xFF, 0xFB, 0xFD, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xFC, 0xFD, 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xFF, 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+
+ 0xFF, 0xFC, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xF9, 0xFF, 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xFF, 0xFF, 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+
+ 0xFF, 0xFF, 0xFD, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xFA, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+
+ /* kf_y_mode_probs */
+ 0x91, 0x9C, 0xA3, 0x80, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+
+ /* split_mv_probs */
+ 0x6E, 0x6F, 0x96, 0x00, 0x00, 0x00, 0x00, 0x00,
+
+ /* bmode_prob */
+ 0x78, 0x5A, 0x4F, 0x85, 0x57, 0x55, 0x50, 0x6F,
+ 0x97, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+
+ /* sub_mv_ref_prob */
+ 0x93, 0x88, 0x12, 0x00,
+ 0x6A, 0x91, 0x01, 0x00,
+ 0xB3, 0x79, 0x01, 0x00,
+ 0xDF, 0x01, 0x22, 0x00,
+ 0xD0, 0x01, 0x01, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+
+ /* mv_counts_to_probs */
+ 0x07, 0x01, 0x01, 0x8F,
+ 0x0E, 0x12, 0x0E, 0x6B,
+ 0x87, 0x40, 0x39, 0x44,
+ 0x3C, 0x38, 0x80, 0x41,
+ 0x9F, 0x86, 0x80, 0x22,
+ 0xEA, 0xBC, 0x80, 0x1C,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+
+ /* kf_y_mode_tree */
+ 0x84, 0x02, 0x04, 0x06, 0x80, 0x81, 0x82, 0x83,
+
+ /* y_mode_tree */
+ 0x80, 0x02, 0x04, 0x06, 0x81, 0x82, 0x83, 0x84,
+
+ /* uv_mode_tree */
+ 0x80, 0x02, 0x81, 0x04, 0x82, 0x83, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00,
+
+ /* small_mv_tree */
+ 0x02, 0x08, 0x04, 0x06, 0x80, 0x81, 0x82, 0x83,
+ 0x0A, 0x0C, 0x84, 0x85, 0x86, 0x87, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+
+ /* small_mv_tree again */
+ 0x02, 0x08, 0x04, 0x06, 0x80, 0x81, 0x82, 0x83,
+ 0x0A, 0x0C, 0x84, 0x85, 0x86, 0x87, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+
+ /* split_mv_tree */
+ 0x83, 0x02, 0x82, 0x04, 0x80, 0x81, 0x00, 0x00,
+
+ /* b_mode_tree */
+ 0x80, 0x02, 0x81, 0x04, 0x82, 0x06, 0x08, 0x0C,
+ 0x83, 0x0A, 0x85, 0x86, 0x84, 0x0E, 0x87, 0x10,
+ 0x88, 0x89, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+
+ /* submv_ref_tree */
+ 0x8A, 0x02, 0x8B, 0x04, 0x8C, 0x8D, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+
+ /* mv_ref_tree */
+ 0x87, 0x02, 0x85, 0x04, 0x86, 0x06, 0x88, 0x89,
+};
+
+/*
+ * This table is a copy of k_mv_entropy_update_probs from the VP8
+ * specification.
+ *
+ * FIXME: If any other driver uses it, we can consider moving
+ * this table so it can be shared.
+ */
+static const u8 k_mv_entropy_update_probs[2][V4L2_VP8_MV_PROB_CNT] = {
+ { 237, 246, 253, 253, 254, 254, 254, 254, 254,
+ 254, 254, 254, 254, 254, 250, 250, 252, 254, 254 },
+ { 231, 243, 245, 253, 254, 254, 254, 254, 254,
+ 254, 254, 254, 254, 254, 251, 251, 254, 254, 254 }
+};
+
+static uint8_t read_bits(struct cedrus_dev *dev, unsigned int bits_count,
+ unsigned int probability)
+{
+ cedrus_write(dev, VE_H264_TRIGGER_TYPE,
+ VE_H264_TRIGGER_TYPE_VP8_GET_BITS |
+ VE_H264_TRIGGER_TYPE_BIN_LENS(bits_count) |
+ VE_H264_TRIGGER_TYPE_PROBABILITY(probability));
+
+ cedrus_wait_for(dev, VE_H264_STATUS, VE_H264_STATUS_VLD_BUSY);
+
+ return cedrus_read(dev, VE_H264_BASIC_BITS);
+}
+
+static void get_delta_q(struct cedrus_dev *dev)
+{
+ if (read_bits(dev, 1, VP8_PROB_HALF)) {
+ read_bits(dev, 4, VP8_PROB_HALF);
+ read_bits(dev, 1, VP8_PROB_HALF);
+ }
+}
+
+static void process_segmentation_info(struct cedrus_dev *dev)
+{
+ int update, i;
+
+ update = read_bits(dev, 1, VP8_PROB_HALF);
+
+ if (read_bits(dev, 1, VP8_PROB_HALF)) {
+ read_bits(dev, 1, VP8_PROB_HALF);
+
+ for (i = 0; i < 4; i++)
+ if (read_bits(dev, 1, VP8_PROB_HALF)) {
+ read_bits(dev, 7, VP8_PROB_HALF);
+ read_bits(dev, 1, VP8_PROB_HALF);
+ }
+
+ for (i = 0; i < 4; i++)
+ if (read_bits(dev, 1, VP8_PROB_HALF)) {
+ read_bits(dev, 6, VP8_PROB_HALF);
+ read_bits(dev, 1, VP8_PROB_HALF);
+ }
+ }
+
+ if (update)
+ for (i = 0; i < 3; i++)
+ if (read_bits(dev, 1, VP8_PROB_HALF))
+ read_bits(dev, 8, VP8_PROB_HALF);
+}
+
+static void process_ref_lf_delta_info(struct cedrus_dev *dev)
+{
+ if (read_bits(dev, 1, VP8_PROB_HALF)) {
+ int i;
+
+ for (i = 0; i < 4; i++)
+ if (read_bits(dev, 1, VP8_PROB_HALF)) {
+ read_bits(dev, 6, VP8_PROB_HALF);
+ read_bits(dev, 1, VP8_PROB_HALF);
+ }
+
+ for (i = 0; i < 4; i++)
+ if (read_bits(dev, 1, VP8_PROB_HALF)) {
+ read_bits(dev, 6, VP8_PROB_HALF);
+ read_bits(dev, 1, VP8_PROB_HALF);
+ }
+ }
+}
+
+static void process_ref_frame_info(struct cedrus_dev *dev)
+{
+ u8 refresh_golden_frame = read_bits(dev, 1, VP8_PROB_HALF);
+ u8 refresh_alt_ref_frame = read_bits(dev, 1, VP8_PROB_HALF);
+
+ if (!refresh_golden_frame)
+ read_bits(dev, 2, VP8_PROB_HALF);
+
+ if (!refresh_alt_ref_frame)
+ read_bits(dev, 2, VP8_PROB_HALF);
+
+ read_bits(dev, 1, VP8_PROB_HALF);
+ read_bits(dev, 1, VP8_PROB_HALF);
+}
+
+static void cedrus_irq_clear(struct cedrus_dev *dev)
+{
+ cedrus_write(dev, VE_H264_STATUS,
+ VE_H264_STATUS_INT_MASK);
+}
+
+static void cedrus_read_header(struct cedrus_dev *dev,
+ const struct v4l2_ctrl_vp8_frame *slice)
+{
+ int i, j;
+
+ if (V4L2_VP8_FRAME_IS_KEY_FRAME(slice)) {
+ read_bits(dev, 1, VP8_PROB_HALF);
+ read_bits(dev, 1, VP8_PROB_HALF);
+ }
+
+ if (read_bits(dev, 1, VP8_PROB_HALF))
+ process_segmentation_info(dev);
+
+ read_bits(dev, 1, VP8_PROB_HALF);
+ read_bits(dev, 6, VP8_PROB_HALF);
+ read_bits(dev, 3, VP8_PROB_HALF);
+
+ if (read_bits(dev, 1, VP8_PROB_HALF))
+ process_ref_lf_delta_info(dev);
+
+ read_bits(dev, 2, VP8_PROB_HALF);
+
+ /* y_ac_qi */
+ read_bits(dev, 7, VP8_PROB_HALF);
+
+ /* Parses y_dc_delta, y2_dc_delta, etc. */
+ for (i = 0; i < QUANT_DELTA_COUNT; i++)
+ get_delta_q(dev);
+
+ if (!V4L2_VP8_FRAME_IS_KEY_FRAME(slice))
+ process_ref_frame_info(dev);
+
+ read_bits(dev, 1, VP8_PROB_HALF);
+
+ if (!V4L2_VP8_FRAME_IS_KEY_FRAME(slice))
+ read_bits(dev, 1, VP8_PROB_HALF);
+
+ cedrus_write(dev, VE_H264_TRIGGER_TYPE, VE_H264_TRIGGER_TYPE_VP8_UPDATE_COEF);
+ cedrus_wait_for(dev, VE_H264_STATUS, VE_H264_STATUS_VP8_UPPROB_BUSY);
+ cedrus_irq_clear(dev);
+
+ if (read_bits(dev, 1, VP8_PROB_HALF))
+ read_bits(dev, 8, VP8_PROB_HALF);
+
+ if (!V4L2_VP8_FRAME_IS_KEY_FRAME(slice)) {
+ read_bits(dev, 8, VP8_PROB_HALF);
+ read_bits(dev, 8, VP8_PROB_HALF);
+ read_bits(dev, 8, VP8_PROB_HALF);
+
+ if (read_bits(dev, 1, VP8_PROB_HALF)) {
+ read_bits(dev, 8, VP8_PROB_HALF);
+ read_bits(dev, 8, VP8_PROB_HALF);
+ read_bits(dev, 8, VP8_PROB_HALF);
+ read_bits(dev, 8, VP8_PROB_HALF);
+ }
+
+ if (read_bits(dev, 1, VP8_PROB_HALF)) {
+ read_bits(dev, 8, VP8_PROB_HALF);
+ read_bits(dev, 8, VP8_PROB_HALF);
+ read_bits(dev, 8, VP8_PROB_HALF);
+ }
+
+ for (i = 0; i < 2; i++)
+ for (j = 0; j < V4L2_VP8_MV_PROB_CNT; j++)
+ if (read_bits(dev, 1, k_mv_entropy_update_probs[i][j]))
+ read_bits(dev, 7, VP8_PROB_HALF);
+ }
+}
+
+static void cedrus_vp8_update_probs(const struct v4l2_ctrl_vp8_frame *slice,
+ u8 *prob_table)
+{
+ int i, j, k;
+
+ memcpy(&prob_table[0x1008], slice->entropy.y_mode_probs,
+ sizeof(slice->entropy.y_mode_probs));
+ memcpy(&prob_table[0x1010], slice->entropy.uv_mode_probs,
+ sizeof(slice->entropy.uv_mode_probs));
+
+ memcpy(&prob_table[0x1018], slice->segment.segment_probs,
+ sizeof(slice->segment.segment_probs));
+
+ prob_table[0x101c] = slice->prob_skip_false;
+ prob_table[0x101d] = slice->prob_intra;
+ prob_table[0x101e] = slice->prob_last;
+ prob_table[0x101f] = slice->prob_gf;
+
+ memcpy(&prob_table[0x1020], slice->entropy.mv_probs[0],
+ V4L2_VP8_MV_PROB_CNT);
+ memcpy(&prob_table[0x1040], slice->entropy.mv_probs[1],
+ V4L2_VP8_MV_PROB_CNT);
+
+ for (i = 0; i < 4; ++i)
+ for (j = 0; j < 8; ++j)
+ for (k = 0; k < 3; ++k)
+ memcpy(&prob_table[i * 512 + j * 64 + k * 16],
+ slice->entropy.coeff_probs[i][j][k], 11);
+}
+
+static enum cedrus_irq_status
+cedrus_vp8_irq_status(struct cedrus_ctx *ctx)
+{
+ struct cedrus_dev *dev = ctx->dev;
+ u32 reg = cedrus_read(dev, VE_H264_STATUS);
+
+ if (reg & (VE_H264_STATUS_DECODE_ERR_INT |
+ VE_H264_STATUS_VLD_DATA_REQ_INT))
+ return CEDRUS_IRQ_ERROR;
+
+ if (reg & VE_H264_CTRL_SLICE_DECODE_INT)
+ return CEDRUS_IRQ_OK;
+
+ return CEDRUS_IRQ_NONE;
+}
+
+static void cedrus_vp8_irq_clear(struct cedrus_ctx *ctx)
+{
+ cedrus_irq_clear(ctx->dev);
+}
+
+static void cedrus_vp8_irq_disable(struct cedrus_ctx *ctx)
+{
+ struct cedrus_dev *dev = ctx->dev;
+ u32 reg = cedrus_read(dev, VE_H264_CTRL);
+
+ cedrus_write(dev, VE_H264_CTRL,
+ reg & ~VE_H264_CTRL_INT_MASK);
+}
+
+static int cedrus_vp8_setup(struct cedrus_ctx *ctx, struct cedrus_run *run)
+{
+ const struct v4l2_ctrl_vp8_frame *slice = run->vp8.frame_params;
+ struct vb2_queue *cap_q = &ctx->fh.m2m_ctx->cap_q_ctx.q;
+ struct vb2_buffer *src_buf = &run->src->vb2_buf;
+ struct cedrus_dev *dev = ctx->dev;
+ dma_addr_t luma_addr, chroma_addr;
+ dma_addr_t src_buf_addr;
+ int header_size;
+ u32 reg;
+
+ cedrus_engine_enable(ctx);
+
+ cedrus_write(dev, VE_H264_CTRL, VE_H264_CTRL_VP8);
+
+ cedrus_vp8_update_probs(slice, ctx->codec.vp8.entropy_probs_buf);
+
+ reg = slice->first_part_size * 8;
+ cedrus_write(dev, VE_VP8_FIRST_DATA_PART_LEN, reg);
+
+ header_size = V4L2_VP8_FRAME_IS_KEY_FRAME(slice) ? 10 : 3;
+
+ reg = slice->first_part_size + header_size;
+ cedrus_write(dev, VE_VP8_PART_SIZE_OFFSET, reg);
+
+ reg = vb2_plane_size(src_buf, 0) * 8;
+ cedrus_write(dev, VE_H264_VLD_LEN, reg);
+
+ /*
+ * FIXME: There is a problem if frame header is skipped (adding
+ * first_part_header_bits to offset). It seems that functions
+ * for parsing bitstreams change internal state of VPU in some
+ * way that can't be otherwise set. Maybe this can be bypassed
+ * by somehow fixing probability table buffer?
+ */
+ reg = header_size * 8;
+ cedrus_write(dev, VE_H264_VLD_OFFSET, reg);
+
+ src_buf_addr = vb2_dma_contig_plane_dma_addr(src_buf, 0);
+ cedrus_write(dev, VE_H264_VLD_END,
+ src_buf_addr + vb2_get_plane_payload(src_buf, 0));
+ cedrus_write(dev, VE_H264_VLD_ADDR,
+ VE_H264_VLD_ADDR_VAL(src_buf_addr) |
+ VE_H264_VLD_ADDR_FIRST | VE_H264_VLD_ADDR_VALID |
+ VE_H264_VLD_ADDR_LAST);
+
+ cedrus_write(dev, VE_H264_TRIGGER_TYPE,
+ VE_H264_TRIGGER_TYPE_INIT_SWDEC);
+
+ cedrus_write(dev, VE_VP8_ENTROPY_PROBS_ADDR,
+ ctx->codec.vp8.entropy_probs_buf_dma);
+
+ reg = 0;
+ switch (slice->version) {
+ case 1:
+ reg |= VE_VP8_PPS_FILTER_TYPE_SIMPLE;
+ reg |= VE_VP8_PPS_BILINEAR_MC_FILTER;
+ break;
+ case 2:
+ reg |= VE_VP8_PPS_LPF_DISABLE;
+ reg |= VE_VP8_PPS_BILINEAR_MC_FILTER;
+ break;
+ case 3:
+ reg |= VE_VP8_PPS_LPF_DISABLE;
+ reg |= VE_VP8_PPS_FULL_PIXEL;
+ break;
+ }
+ if (slice->segment.flags & V4L2_VP8_SEGMENT_FLAG_UPDATE_MAP)
+ reg |= VE_VP8_PPS_UPDATE_MB_SEGMENTATION_MAP;
+ if (!(slice->segment.flags & V4L2_VP8_SEGMENT_FLAG_DELTA_VALUE_MODE))
+ reg |= VE_VP8_PPS_MB_SEGMENT_ABS_DELTA;
+ if (slice->segment.flags & V4L2_VP8_SEGMENT_FLAG_ENABLED)
+ reg |= VE_VP8_PPS_SEGMENTATION_ENABLE;
+ if (ctx->codec.vp8.last_filter_type)
+ reg |= VE_VP8_PPS_LAST_LOOP_FILTER_SIMPLE;
+ reg |= VE_VP8_PPS_SHARPNESS_LEVEL(slice->lf.sharpness_level);
+ if (slice->lf.flags & V4L2_VP8_LF_FILTER_TYPE_SIMPLE)
+ reg |= VE_VP8_PPS_LOOP_FILTER_SIMPLE;
+ reg |= VE_VP8_PPS_LOOP_FILTER_LEVEL(slice->lf.level);
+ if (slice->lf.flags & V4L2_VP8_LF_ADJ_ENABLE)
+ reg |= VE_VP8_PPS_MODE_REF_LF_DELTA_ENABLE;
+ if (slice->lf.flags & V4L2_VP8_LF_DELTA_UPDATE)
+ reg |= VE_VP8_PPS_MODE_REF_LF_DELTA_UPDATE;
+ reg |= VE_VP8_PPS_TOKEN_PARTITION(ilog2(slice->num_dct_parts));
+ if (slice->flags & V4L2_VP8_FRAME_FLAG_MB_NO_SKIP_COEFF)
+ reg |= VE_VP8_PPS_MB_NO_COEFF_SKIP;
+ reg |= VE_VP8_PPS_RELOAD_ENTROPY_PROBS;
+ if (slice->flags & V4L2_VP8_FRAME_FLAG_SIGN_BIAS_GOLDEN)
+ reg |= VE_VP8_PPS_GOLDEN_SIGN_BIAS;
+ if (slice->flags & V4L2_VP8_FRAME_FLAG_SIGN_BIAS_ALT)
+ reg |= VE_VP8_PPS_ALTREF_SIGN_BIAS;
+ if (ctx->codec.vp8.last_frame_p_type)
+ reg |= VE_VP8_PPS_LAST_PIC_TYPE_P_FRAME;
+ reg |= VE_VP8_PPS_LAST_SHARPNESS_LEVEL(ctx->codec.vp8.last_sharpness_level);
+ if (!(slice->flags & V4L2_VP8_FRAME_FLAG_KEY_FRAME))
+ reg |= VE_VP8_PPS_PIC_TYPE_P_FRAME;
+ cedrus_write(dev, VE_VP8_PPS, reg);
+
+ cedrus_read_header(dev, slice);
+
+ /* reset registers changed by HW */
+ cedrus_write(dev, VE_H264_CUR_MB_NUM, 0);
+ cedrus_write(dev, VE_H264_MB_ADDR, 0);
+ cedrus_write(dev, VE_H264_ERROR_CASE, 0);
+
+ reg = 0;
+ reg |= VE_VP8_QP_INDEX_DELTA_UVAC(slice->quant.uv_ac_delta);
+ reg |= VE_VP8_QP_INDEX_DELTA_UVDC(slice->quant.uv_dc_delta);
+ reg |= VE_VP8_QP_INDEX_DELTA_Y2AC(slice->quant.y2_ac_delta);
+ reg |= VE_VP8_QP_INDEX_DELTA_Y2DC(slice->quant.y2_dc_delta);
+ reg |= VE_VP8_QP_INDEX_DELTA_Y1DC(slice->quant.y_dc_delta);
+ reg |= VE_VP8_QP_INDEX_DELTA_BASE_QINDEX(slice->quant.y_ac_qi);
+ cedrus_write(dev, VE_VP8_QP_INDEX_DELTA, reg);
+
+ reg = 0;
+ reg |= VE_VP8_FSIZE_WIDTH(slice->width);
+ reg |= VE_VP8_FSIZE_HEIGHT(slice->height);
+ cedrus_write(dev, VE_VP8_FSIZE, reg);
+
+ reg = 0;
+ reg |= VE_VP8_PICSIZE_WIDTH(slice->width);
+ reg |= VE_VP8_PICSIZE_HEIGHT(slice->height);
+ cedrus_write(dev, VE_VP8_PICSIZE, reg);
+
+ reg = 0;
+ reg |= VE_VP8_SEGMENT3(slice->segment.quant_update[3]);
+ reg |= VE_VP8_SEGMENT2(slice->segment.quant_update[2]);
+ reg |= VE_VP8_SEGMENT1(slice->segment.quant_update[1]);
+ reg |= VE_VP8_SEGMENT0(slice->segment.quant_update[0]);
+ cedrus_write(dev, VE_VP8_SEGMENT_FEAT_MB_LV0, reg);
+
+ reg = 0;
+ reg |= VE_VP8_SEGMENT3(slice->segment.lf_update[3]);
+ reg |= VE_VP8_SEGMENT2(slice->segment.lf_update[2]);
+ reg |= VE_VP8_SEGMENT1(slice->segment.lf_update[1]);
+ reg |= VE_VP8_SEGMENT0(slice->segment.lf_update[0]);
+ cedrus_write(dev, VE_VP8_SEGMENT_FEAT_MB_LV1, reg);
+
+ reg = 0;
+ reg |= VE_VP8_LF_DELTA3(slice->lf.ref_frm_delta[3]);
+ reg |= VE_VP8_LF_DELTA2(slice->lf.ref_frm_delta[2]);
+ reg |= VE_VP8_LF_DELTA1(slice->lf.ref_frm_delta[1]);
+ reg |= VE_VP8_LF_DELTA0(slice->lf.ref_frm_delta[0]);
+ cedrus_write(dev, VE_VP8_REF_LF_DELTA, reg);
+
+ reg = 0;
+ reg |= VE_VP8_LF_DELTA3(slice->lf.mb_mode_delta[3]);
+ reg |= VE_VP8_LF_DELTA2(slice->lf.mb_mode_delta[2]);
+ reg |= VE_VP8_LF_DELTA1(slice->lf.mb_mode_delta[1]);
+ reg |= VE_VP8_LF_DELTA0(slice->lf.mb_mode_delta[0]);
+ cedrus_write(dev, VE_VP8_MODE_LF_DELTA, reg);
+
+ luma_addr = cedrus_dst_buf_addr(ctx, &run->dst->vb2_buf, 0);
+ chroma_addr = cedrus_dst_buf_addr(ctx, &run->dst->vb2_buf, 1);
+ cedrus_write(dev, VE_VP8_REC_LUMA, luma_addr);
+ cedrus_write(dev, VE_VP8_REC_CHROMA, chroma_addr);
+
+ cedrus_write_ref_buf_addr(ctx, cap_q, slice->last_frame_ts,
+ VE_VP8_FWD_LUMA, VE_VP8_FWD_CHROMA);
+ cedrus_write_ref_buf_addr(ctx, cap_q, slice->golden_frame_ts,
+ VE_VP8_BWD_LUMA, VE_VP8_BWD_CHROMA);
+ cedrus_write_ref_buf_addr(ctx, cap_q, slice->alt_frame_ts,
+ VE_VP8_ALT_LUMA, VE_VP8_ALT_CHROMA);
+
+ cedrus_write(dev, VE_H264_CTRL, VE_H264_CTRL_VP8 |
+ VE_H264_CTRL_DECODE_ERR_INT |
+ VE_H264_CTRL_SLICE_DECODE_INT);
+
+ if (slice->lf.level) {
+ ctx->codec.vp8.last_filter_type =
+ !!(slice->lf.flags & V4L2_VP8_LF_FILTER_TYPE_SIMPLE);
+ ctx->codec.vp8.last_frame_p_type =
+ !V4L2_VP8_FRAME_IS_KEY_FRAME(slice);
+ ctx->codec.vp8.last_sharpness_level =
+ slice->lf.sharpness_level;
+ }
+
+ return 0;
+}
+
+static int cedrus_vp8_start(struct cedrus_ctx *ctx)
+{
+ struct cedrus_dev *dev = ctx->dev;
+
+ ctx->codec.vp8.entropy_probs_buf =
+ dma_alloc_coherent(dev->dev, CEDRUS_ENTROPY_PROBS_SIZE,
+ &ctx->codec.vp8.entropy_probs_buf_dma,
+ GFP_KERNEL);
+ if (!ctx->codec.vp8.entropy_probs_buf)
+ return -ENOMEM;
+
+ /*
+ * This offset has been discovered by reverse engineering, we don’t know
+ * what it actually means.
+ */
+ memcpy(&ctx->codec.vp8.entropy_probs_buf[2048],
+ prob_table_init, sizeof(prob_table_init));
+
+ return 0;
+}
+
+static void cedrus_vp8_stop(struct cedrus_ctx *ctx)
+{
+ struct cedrus_dev *dev = ctx->dev;
+
+ cedrus_engine_disable(dev);
+
+ dma_free_coherent(dev->dev, CEDRUS_ENTROPY_PROBS_SIZE,
+ ctx->codec.vp8.entropy_probs_buf,
+ ctx->codec.vp8.entropy_probs_buf_dma);
+}
+
+static void cedrus_vp8_trigger(struct cedrus_ctx *ctx)
+{
+ struct cedrus_dev *dev = ctx->dev;
+
+ cedrus_write(dev, VE_H264_TRIGGER_TYPE,
+ VE_H264_TRIGGER_TYPE_VP8_SLICE_DECODE);
+}
+
+struct cedrus_dec_ops cedrus_dec_ops_vp8 = {
+ .irq_clear = cedrus_vp8_irq_clear,
+ .irq_disable = cedrus_vp8_irq_disable,
+ .irq_status = cedrus_vp8_irq_status,
+ .setup = cedrus_vp8_setup,
+ .start = cedrus_vp8_start,
+ .stop = cedrus_vp8_stop,
+ .trigger = cedrus_vp8_trigger,
+};
diff --git a/drivers/staging/media/sunxi/sun6i-isp/Kconfig b/drivers/staging/media/sunxi/sun6i-isp/Kconfig
new file mode 100644
index 000000000000..68dcae9cd7d7
--- /dev/null
+++ b/drivers/staging/media/sunxi/sun6i-isp/Kconfig
@@ -0,0 +1,15 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config VIDEO_SUN6I_ISP
+ tristate "Allwinner A31 Image Signal Processor (ISP) Driver"
+ depends on V4L_PLATFORM_DRIVERS && VIDEO_DEV
+ depends on ARCH_SUNXI || COMPILE_TEST
+ depends on PM && COMMON_CLK && HAS_DMA
+ select MEDIA_CONTROLLER
+ select VIDEO_V4L2_SUBDEV_API
+ select VIDEOBUF2_DMA_CONTIG
+ select VIDEOBUF2_VMALLOC
+ select V4L2_FWNODE
+ select REGMAP_MMIO
+ help
+ Support for the Allwinner A31 Image Signal Processor (ISP), also
+ found on other platforms such as the A80, A83T or V3/V3s.
diff --git a/drivers/staging/media/sunxi/sun6i-isp/Makefile b/drivers/staging/media/sunxi/sun6i-isp/Makefile
new file mode 100644
index 000000000000..da1034785144
--- /dev/null
+++ b/drivers/staging/media/sunxi/sun6i-isp/Makefile
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
+sun6i-isp-y += sun6i_isp.o sun6i_isp_proc.o sun6i_isp_capture.o sun6i_isp_params.o
+
+obj-$(CONFIG_VIDEO_SUN6I_ISP) += sun6i-isp.o
diff --git a/drivers/staging/media/sunxi/sun6i-isp/TODO.txt b/drivers/staging/media/sunxi/sun6i-isp/TODO.txt
new file mode 100644
index 000000000000..1e3236edc1ab
--- /dev/null
+++ b/drivers/staging/media/sunxi/sun6i-isp/TODO.txt
@@ -0,0 +1,6 @@
+Unstaging requirements:
+- Add uAPI support and documentation for the configuration of all the hardware
+ modules and description of the statistics data structures;
+- Add support for statistics reporting;
+- Add userspace support in libcamera which demonstrates the ability to receive
+ statistics and adapt hardware modules configuration accordingly;
diff --git a/drivers/staging/media/sunxi/sun6i-isp/sun6i_isp.c b/drivers/staging/media/sunxi/sun6i-isp/sun6i_isp.c
new file mode 100644
index 000000000000..6877f2beee8c
--- /dev/null
+++ b/drivers/staging/media/sunxi/sun6i-isp/sun6i_isp.c
@@ -0,0 +1,551 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2021-2022 Bootlin
+ * Author: Paul Kocialkowski <paul.kocialkowski@bootlin.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-mc.h>
+
+#include "sun6i_isp.h"
+#include "sun6i_isp_capture.h"
+#include "sun6i_isp_params.h"
+#include "sun6i_isp_proc.h"
+#include "sun6i_isp_reg.h"
+
+/* Helpers */
+
+u32 sun6i_isp_load_read(struct sun6i_isp_device *isp_dev, u32 offset)
+{
+ u32 *data = (u32 *)(isp_dev->tables.load.data + offset);
+
+ return *data;
+}
+
+void sun6i_isp_load_write(struct sun6i_isp_device *isp_dev, u32 offset,
+ u32 value)
+{
+ u32 *data = (u32 *)(isp_dev->tables.load.data + offset);
+
+ *data = value;
+}
+
+/* State */
+
+/*
+ * The ISP works with a load buffer, which gets copied to the actual registers
+ * by the hardware before processing a frame when a specific flag is set.
+ * This is represented by tracking the ISP state in the different parts of
+ * the code with explicit sync points:
+ * - state update: to update the load buffer for the next frame if necessary;
+ * - state complete: to indicate that the state update was applied.
+ */
+
+static void sun6i_isp_state_ready(struct sun6i_isp_device *isp_dev)
+{
+ struct regmap *regmap = isp_dev->regmap;
+ u32 value;
+
+ regmap_read(regmap, SUN6I_ISP_FE_CTRL_REG, &value);
+ value |= SUN6I_ISP_FE_CTRL_PARA_READY;
+ regmap_write(regmap, SUN6I_ISP_FE_CTRL_REG, value);
+}
+
+static void sun6i_isp_state_complete(struct sun6i_isp_device *isp_dev)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&isp_dev->state_lock, flags);
+
+ sun6i_isp_capture_state_complete(isp_dev);
+ sun6i_isp_params_state_complete(isp_dev);
+
+ spin_unlock_irqrestore(&isp_dev->state_lock, flags);
+}
+
+void sun6i_isp_state_update(struct sun6i_isp_device *isp_dev, bool ready_hold)
+{
+ bool update = false;
+ unsigned long flags;
+
+ spin_lock_irqsave(&isp_dev->state_lock, flags);
+
+ sun6i_isp_capture_state_update(isp_dev, &update);
+ sun6i_isp_params_state_update(isp_dev, &update);
+
+ if (update && !ready_hold)
+ sun6i_isp_state_ready(isp_dev);
+
+ spin_unlock_irqrestore(&isp_dev->state_lock, flags);
+}
+
+/* Tables */
+
+static int sun6i_isp_table_setup(struct sun6i_isp_device *isp_dev,
+ struct sun6i_isp_table *table)
+{
+ table->data = dma_alloc_coherent(isp_dev->dev, table->size,
+ &table->address, GFP_KERNEL);
+ if (!table->data)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void sun6i_isp_table_cleanup(struct sun6i_isp_device *isp_dev,
+ struct sun6i_isp_table *table)
+{
+ dma_free_coherent(isp_dev->dev, table->size, table->data,
+ table->address);
+}
+
+void sun6i_isp_tables_configure(struct sun6i_isp_device *isp_dev)
+{
+ struct regmap *regmap = isp_dev->regmap;
+
+ regmap_write(regmap, SUN6I_ISP_REG_LOAD_ADDR_REG,
+ SUN6I_ISP_ADDR_VALUE(isp_dev->tables.load.address));
+
+ regmap_write(regmap, SUN6I_ISP_REG_SAVE_ADDR_REG,
+ SUN6I_ISP_ADDR_VALUE(isp_dev->tables.save.address));
+
+ regmap_write(regmap, SUN6I_ISP_LUT_TABLE_ADDR_REG,
+ SUN6I_ISP_ADDR_VALUE(isp_dev->tables.lut.address));
+
+ regmap_write(regmap, SUN6I_ISP_DRC_TABLE_ADDR_REG,
+ SUN6I_ISP_ADDR_VALUE(isp_dev->tables.drc.address));
+
+ regmap_write(regmap, SUN6I_ISP_STATS_ADDR_REG,
+ SUN6I_ISP_ADDR_VALUE(isp_dev->tables.stats.address));
+}
+
+static int sun6i_isp_tables_setup(struct sun6i_isp_device *isp_dev,
+ const struct sun6i_isp_variant *variant)
+{
+ struct sun6i_isp_tables *tables = &isp_dev->tables;
+ int ret;
+
+ tables->load.size = variant->table_load_save_size;
+ ret = sun6i_isp_table_setup(isp_dev, &tables->load);
+ if (ret)
+ return ret;
+
+ tables->save.size = variant->table_load_save_size;
+ ret = sun6i_isp_table_setup(isp_dev, &tables->save);
+ if (ret)
+ return ret;
+
+ tables->lut.size = variant->table_lut_size;
+ ret = sun6i_isp_table_setup(isp_dev, &tables->lut);
+ if (ret)
+ return ret;
+
+ tables->drc.size = variant->table_drc_size;
+ ret = sun6i_isp_table_setup(isp_dev, &tables->drc);
+ if (ret)
+ return ret;
+
+ tables->stats.size = variant->table_stats_size;
+ ret = sun6i_isp_table_setup(isp_dev, &tables->stats);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static void sun6i_isp_tables_cleanup(struct sun6i_isp_device *isp_dev)
+{
+ struct sun6i_isp_tables *tables = &isp_dev->tables;
+
+ sun6i_isp_table_cleanup(isp_dev, &tables->stats);
+ sun6i_isp_table_cleanup(isp_dev, &tables->drc);
+ sun6i_isp_table_cleanup(isp_dev, &tables->lut);
+ sun6i_isp_table_cleanup(isp_dev, &tables->save);
+ sun6i_isp_table_cleanup(isp_dev, &tables->load);
+}
+
+/* Media */
+
+static const struct media_device_ops sun6i_isp_media_ops = {
+ .link_notify = v4l2_pipeline_link_notify,
+};
+
+/* V4L2 */
+
+static int sun6i_isp_v4l2_setup(struct sun6i_isp_device *isp_dev)
+{
+ struct sun6i_isp_v4l2 *v4l2 = &isp_dev->v4l2;
+ struct v4l2_device *v4l2_dev = &v4l2->v4l2_dev;
+ struct media_device *media_dev = &v4l2->media_dev;
+ struct device *dev = isp_dev->dev;
+ int ret;
+
+ /* Media Device */
+
+ strscpy(media_dev->model, SUN6I_ISP_DESCRIPTION,
+ sizeof(media_dev->model));
+ media_dev->ops = &sun6i_isp_media_ops;
+ media_dev->hw_revision = 0;
+ media_dev->dev = dev;
+
+ media_device_init(media_dev);
+
+ ret = media_device_register(media_dev);
+ if (ret) {
+ dev_err(dev, "failed to register media device\n");
+ return ret;
+ }
+
+ /* V4L2 Device */
+
+ v4l2_dev->mdev = media_dev;
+
+ ret = v4l2_device_register(dev, v4l2_dev);
+ if (ret) {
+ dev_err(dev, "failed to register v4l2 device\n");
+ goto error_media;
+ }
+
+ return 0;
+
+error_media:
+ media_device_unregister(media_dev);
+ media_device_cleanup(media_dev);
+
+ return ret;
+}
+
+static void sun6i_isp_v4l2_cleanup(struct sun6i_isp_device *isp_dev)
+{
+ struct sun6i_isp_v4l2 *v4l2 = &isp_dev->v4l2;
+
+ media_device_unregister(&v4l2->media_dev);
+ v4l2_device_unregister(&v4l2->v4l2_dev);
+ media_device_cleanup(&v4l2->media_dev);
+}
+
+/* Platform */
+
+static irqreturn_t sun6i_isp_interrupt(int irq, void *private)
+{
+ struct sun6i_isp_device *isp_dev = private;
+ struct regmap *regmap = isp_dev->regmap;
+ u32 status = 0, enable = 0;
+
+ regmap_read(regmap, SUN6I_ISP_FE_INT_STA_REG, &status);
+ regmap_read(regmap, SUN6I_ISP_FE_INT_EN_REG, &enable);
+
+ if (!status)
+ return IRQ_NONE;
+ else if (!(status & enable))
+ goto complete;
+
+ /*
+ * The ISP working cycle starts with a params-load, which makes the
+ * state from the load buffer active. Then it starts processing the
+ * frame and gives a finish interrupt. Soon after that, the next state
+ * coming from the load buffer will be applied for the next frame,
+ * giving a params-load as well.
+ *
+ * Because both frame finish and params-load are received almost
+ * at the same time (one ISR call), handle them in chronology order.
+ */
+
+ if (status & SUN6I_ISP_FE_INT_STA_FINISH)
+ sun6i_isp_capture_finish(isp_dev);
+
+ if (status & SUN6I_ISP_FE_INT_STA_PARA_LOAD) {
+ sun6i_isp_state_complete(isp_dev);
+ sun6i_isp_state_update(isp_dev, false);
+ }
+
+complete:
+ regmap_write(regmap, SUN6I_ISP_FE_INT_STA_REG, status);
+
+ return IRQ_HANDLED;
+}
+
+static int sun6i_isp_suspend(struct device *dev)
+{
+ struct sun6i_isp_device *isp_dev = dev_get_drvdata(dev);
+
+ reset_control_assert(isp_dev->reset);
+ clk_disable_unprepare(isp_dev->clock_ram);
+ clk_disable_unprepare(isp_dev->clock_mod);
+
+ return 0;
+}
+
+static int sun6i_isp_resume(struct device *dev)
+{
+ struct sun6i_isp_device *isp_dev = dev_get_drvdata(dev);
+ int ret;
+
+ ret = reset_control_deassert(isp_dev->reset);
+ if (ret) {
+ dev_err(dev, "failed to deassert reset\n");
+ return ret;
+ }
+
+ ret = clk_prepare_enable(isp_dev->clock_mod);
+ if (ret) {
+ dev_err(dev, "failed to enable module clock\n");
+ goto error_reset;
+ }
+
+ ret = clk_prepare_enable(isp_dev->clock_ram);
+ if (ret) {
+ dev_err(dev, "failed to enable ram clock\n");
+ goto error_clock_mod;
+ }
+
+ return 0;
+
+error_clock_mod:
+ clk_disable_unprepare(isp_dev->clock_mod);
+
+error_reset:
+ reset_control_assert(isp_dev->reset);
+
+ return ret;
+}
+
+static const struct dev_pm_ops sun6i_isp_pm_ops = {
+ .runtime_suspend = sun6i_isp_suspend,
+ .runtime_resume = sun6i_isp_resume,
+};
+
+static const struct regmap_config sun6i_isp_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x400,
+};
+
+static int sun6i_isp_resources_setup(struct sun6i_isp_device *isp_dev,
+ struct platform_device *platform_dev)
+{
+ struct device *dev = isp_dev->dev;
+ void __iomem *io_base;
+ int irq;
+ int ret;
+
+ /* Registers */
+
+ io_base = devm_platform_ioremap_resource(platform_dev, 0);
+ if (IS_ERR(io_base))
+ return PTR_ERR(io_base);
+
+ isp_dev->regmap = devm_regmap_init_mmio_clk(dev, "bus", io_base,
+ &sun6i_isp_regmap_config);
+ if (IS_ERR(isp_dev->regmap)) {
+ dev_err(dev, "failed to init register map\n");
+ return PTR_ERR(isp_dev->regmap);
+ }
+
+ /* Clocks */
+
+ isp_dev->clock_mod = devm_clk_get(dev, "mod");
+ if (IS_ERR(isp_dev->clock_mod)) {
+ dev_err(dev, "failed to acquire module clock\n");
+ return PTR_ERR(isp_dev->clock_mod);
+ }
+
+ isp_dev->clock_ram = devm_clk_get(dev, "ram");
+ if (IS_ERR(isp_dev->clock_ram)) {
+ dev_err(dev, "failed to acquire ram clock\n");
+ return PTR_ERR(isp_dev->clock_ram);
+ }
+
+ ret = clk_set_rate_exclusive(isp_dev->clock_mod, 297000000);
+ if (ret) {
+ dev_err(dev, "failed to set mod clock rate\n");
+ return ret;
+ }
+
+ /* Reset */
+
+ isp_dev->reset = devm_reset_control_get_shared(dev, NULL);
+ if (IS_ERR(isp_dev->reset)) {
+ dev_err(dev, "failed to acquire reset\n");
+ ret = PTR_ERR(isp_dev->reset);
+ goto error_clock_rate_exclusive;
+ }
+
+ /* Interrupt */
+
+ irq = platform_get_irq(platform_dev, 0);
+ if (irq < 0) {
+ ret = irq;
+ goto error_clock_rate_exclusive;
+ }
+
+ ret = devm_request_irq(dev, irq, sun6i_isp_interrupt, IRQF_SHARED,
+ SUN6I_ISP_NAME, isp_dev);
+ if (ret) {
+ dev_err(dev, "failed to request interrupt\n");
+ goto error_clock_rate_exclusive;
+ }
+
+ /* Runtime PM */
+
+ pm_runtime_enable(dev);
+
+ return 0;
+
+error_clock_rate_exclusive:
+ clk_rate_exclusive_put(isp_dev->clock_mod);
+
+ return ret;
+}
+
+static void sun6i_isp_resources_cleanup(struct sun6i_isp_device *isp_dev)
+{
+ struct device *dev = isp_dev->dev;
+
+ pm_runtime_disable(dev);
+ clk_rate_exclusive_put(isp_dev->clock_mod);
+}
+
+static int sun6i_isp_probe(struct platform_device *platform_dev)
+{
+ struct sun6i_isp_device *isp_dev;
+ struct device *dev = &platform_dev->dev;
+ const struct sun6i_isp_variant *variant;
+ int ret;
+
+ variant = of_device_get_match_data(dev);
+ if (!variant)
+ return -EINVAL;
+
+ isp_dev = devm_kzalloc(dev, sizeof(*isp_dev), GFP_KERNEL);
+ if (!isp_dev)
+ return -ENOMEM;
+
+ isp_dev->dev = dev;
+ platform_set_drvdata(platform_dev, isp_dev);
+
+ spin_lock_init(&isp_dev->state_lock);
+
+ ret = sun6i_isp_resources_setup(isp_dev, platform_dev);
+ if (ret)
+ return ret;
+
+ ret = sun6i_isp_tables_setup(isp_dev, variant);
+ if (ret) {
+ dev_err(dev, "failed to setup tables\n");
+ goto error_resources;
+ }
+
+ ret = sun6i_isp_v4l2_setup(isp_dev);
+ if (ret) {
+ dev_err(dev, "failed to setup v4l2\n");
+ goto error_tables;
+ }
+
+ ret = sun6i_isp_proc_setup(isp_dev);
+ if (ret) {
+ dev_err(dev, "failed to setup proc\n");
+ goto error_v4l2;
+ }
+
+ ret = sun6i_isp_capture_setup(isp_dev);
+ if (ret) {
+ dev_err(dev, "failed to setup capture\n");
+ goto error_proc;
+ }
+
+ ret = sun6i_isp_params_setup(isp_dev);
+ if (ret) {
+ dev_err(dev, "failed to setup params\n");
+ goto error_capture;
+ }
+
+ return 0;
+
+error_capture:
+ sun6i_isp_capture_cleanup(isp_dev);
+
+error_proc:
+ sun6i_isp_proc_cleanup(isp_dev);
+
+error_v4l2:
+ sun6i_isp_v4l2_cleanup(isp_dev);
+
+error_tables:
+ sun6i_isp_tables_cleanup(isp_dev);
+
+error_resources:
+ sun6i_isp_resources_cleanup(isp_dev);
+
+ return ret;
+}
+
+static void sun6i_isp_remove(struct platform_device *platform_dev)
+{
+ struct sun6i_isp_device *isp_dev = platform_get_drvdata(platform_dev);
+
+ sun6i_isp_params_cleanup(isp_dev);
+ sun6i_isp_capture_cleanup(isp_dev);
+ sun6i_isp_proc_cleanup(isp_dev);
+ sun6i_isp_v4l2_cleanup(isp_dev);
+ sun6i_isp_tables_cleanup(isp_dev);
+ sun6i_isp_resources_cleanup(isp_dev);
+}
+
+/*
+ * History of sun6i-isp:
+ * - sun4i-a10-isp: initial ISP tied to the CSI0 controller,
+ * apparently unused in software implementations;
+ * - sun6i-a31-isp: separate ISP loosely based on sun4i-a10-isp,
+ * adding extra modules and features;
+ * - sun9i-a80-isp: based on sun6i-a31-isp with some register offset changes
+ * and new modules like saturation and cnr;
+ * - sun8i-a23-isp/sun8i-h3-isp: based on sun9i-a80-isp with most modules
+ * related to raw removed;
+ * - sun8i-a83t-isp: based on sun9i-a80-isp with some register offset changes
+ * - sun8i-v3s-isp: based on sun8i-a83t-isp with a new disc module;
+ */
+
+static const struct sun6i_isp_variant sun8i_v3s_isp_variant = {
+ .table_load_save_size = 0x1000,
+ .table_lut_size = 0xe00,
+ .table_drc_size = 0x600,
+ .table_stats_size = 0x2100,
+};
+
+static const struct of_device_id sun6i_isp_of_match[] = {
+ {
+ .compatible = "allwinner,sun8i-v3s-isp",
+ .data = &sun8i_v3s_isp_variant,
+ },
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, sun6i_isp_of_match);
+
+static struct platform_driver sun6i_isp_platform_driver = {
+ .probe = sun6i_isp_probe,
+ .remove = sun6i_isp_remove,
+ .driver = {
+ .name = SUN6I_ISP_NAME,
+ .of_match_table = sun6i_isp_of_match,
+ .pm = &sun6i_isp_pm_ops,
+ },
+};
+
+module_platform_driver(sun6i_isp_platform_driver);
+
+MODULE_DESCRIPTION("Allwinner A31 Image Signal Processor driver");
+MODULE_AUTHOR("Paul Kocialkowski <paul.kocialkowski@bootlin.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/media/sunxi/sun6i-isp/sun6i_isp.h b/drivers/staging/media/sunxi/sun6i-isp/sun6i_isp.h
new file mode 100644
index 000000000000..0e5f188319ff
--- /dev/null
+++ b/drivers/staging/media/sunxi/sun6i-isp/sun6i_isp.h
@@ -0,0 +1,90 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright 2021-2022 Bootlin
+ * Author: Paul Kocialkowski <paul.kocialkowski@bootlin.com>
+ */
+
+#ifndef _SUN6I_ISP_H_
+#define _SUN6I_ISP_H_
+
+#include <media/v4l2-device.h>
+#include <media/videobuf2-v4l2.h>
+
+#include "sun6i_isp_capture.h"
+#include "sun6i_isp_params.h"
+#include "sun6i_isp_proc.h"
+
+#define SUN6I_ISP_NAME "sun6i-isp"
+#define SUN6I_ISP_DESCRIPTION "Allwinner A31 ISP Device"
+
+enum sun6i_isp_port {
+ SUN6I_ISP_PORT_CSI0 = 0,
+ SUN6I_ISP_PORT_CSI1 = 1,
+};
+
+struct sun6i_isp_buffer {
+ struct vb2_v4l2_buffer v4l2_buffer;
+ struct list_head list;
+};
+
+struct sun6i_isp_v4l2 {
+ struct v4l2_device v4l2_dev;
+ struct media_device media_dev;
+};
+
+struct sun6i_isp_table {
+ void *data;
+ dma_addr_t address;
+ unsigned int size;
+};
+
+struct sun6i_isp_tables {
+ struct sun6i_isp_table load;
+ struct sun6i_isp_table save;
+
+ struct sun6i_isp_table lut;
+ struct sun6i_isp_table drc;
+ struct sun6i_isp_table stats;
+};
+
+struct sun6i_isp_device {
+ struct device *dev;
+
+ struct sun6i_isp_tables tables;
+
+ struct sun6i_isp_v4l2 v4l2;
+ struct sun6i_isp_proc proc;
+ struct sun6i_isp_capture capture;
+ struct sun6i_isp_params params;
+
+ struct regmap *regmap;
+ struct clk *clock_mod;
+ struct clk *clock_ram;
+ struct reset_control *reset;
+
+ spinlock_t state_lock; /* State helpers lock. */
+};
+
+struct sun6i_isp_variant {
+ unsigned int table_load_save_size;
+ unsigned int table_lut_size;
+ unsigned int table_drc_size;
+ unsigned int table_stats_size;
+};
+
+/* Helpers */
+
+u32 sun6i_isp_load_read(struct sun6i_isp_device *isp_dev, u32 offset);
+void sun6i_isp_load_write(struct sun6i_isp_device *isp_dev, u32 offset,
+ u32 value);
+u32 sun6i_isp_address_value(dma_addr_t address);
+
+/* State */
+
+void sun6i_isp_state_update(struct sun6i_isp_device *isp_dev, bool ready_hold);
+
+/* Tables */
+
+void sun6i_isp_tables_configure(struct sun6i_isp_device *isp_dev);
+
+#endif
diff --git a/drivers/staging/media/sunxi/sun6i-isp/sun6i_isp_capture.c b/drivers/staging/media/sunxi/sun6i-isp/sun6i_isp_capture.c
new file mode 100644
index 000000000000..e7b99cee63d6
--- /dev/null
+++ b/drivers/staging/media/sunxi/sun6i-isp/sun6i_isp_capture.c
@@ -0,0 +1,740 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2021-2022 Bootlin
+ * Author: Paul Kocialkowski <paul.kocialkowski@bootlin.com>
+ */
+
+#include <media/v4l2-device.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-mc.h>
+#include <media/videobuf2-dma-contig.h>
+#include <media/videobuf2-v4l2.h>
+
+#include "sun6i_isp.h"
+#include "sun6i_isp_capture.h"
+#include "sun6i_isp_proc.h"
+#include "sun6i_isp_reg.h"
+
+/* Helpers */
+
+void sun6i_isp_capture_dimensions(struct sun6i_isp_device *isp_dev,
+ unsigned int *width, unsigned int *height)
+{
+ if (width)
+ *width = isp_dev->capture.format.fmt.pix.width;
+ if (height)
+ *height = isp_dev->capture.format.fmt.pix.height;
+}
+
+void sun6i_isp_capture_format(struct sun6i_isp_device *isp_dev,
+ u32 *pixelformat)
+{
+ if (pixelformat)
+ *pixelformat = isp_dev->capture.format.fmt.pix.pixelformat;
+}
+
+/* Format */
+
+static const struct sun6i_isp_capture_format sun6i_isp_capture_formats[] = {
+ {
+ .pixelformat = V4L2_PIX_FMT_NV12,
+ .output_format = SUN6I_ISP_OUTPUT_FMT_YUV420SP,
+ },
+ {
+ .pixelformat = V4L2_PIX_FMT_NV21,
+ .output_format = SUN6I_ISP_OUTPUT_FMT_YVU420SP,
+ },
+};
+
+const struct sun6i_isp_capture_format *
+sun6i_isp_capture_format_find(u32 pixelformat)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(sun6i_isp_capture_formats); i++)
+ if (sun6i_isp_capture_formats[i].pixelformat == pixelformat)
+ return &sun6i_isp_capture_formats[i];
+
+ return NULL;
+}
+
+/* Capture */
+
+static void
+sun6i_isp_capture_buffer_configure(struct sun6i_isp_device *isp_dev,
+ struct sun6i_isp_buffer *isp_buffer)
+{
+ const struct v4l2_format_info *info;
+ struct vb2_buffer *vb2_buffer;
+ unsigned int width, height;
+ unsigned int width_aligned;
+ dma_addr_t address;
+ u32 pixelformat;
+
+ vb2_buffer = &isp_buffer->v4l2_buffer.vb2_buf;
+ address = vb2_dma_contig_plane_dma_addr(vb2_buffer, 0);
+
+ sun6i_isp_load_write(isp_dev, SUN6I_ISP_MCH_Y_ADDR0_REG,
+ SUN6I_ISP_ADDR_VALUE(address));
+
+ sun6i_isp_capture_dimensions(isp_dev, &width, &height);
+ sun6i_isp_capture_format(isp_dev, &pixelformat);
+
+ info = v4l2_format_info(pixelformat);
+ if (WARN_ON(!info))
+ return;
+
+ /* Stride needs to be aligned to 4. */
+ width_aligned = ALIGN(width, 2);
+
+ if (info->comp_planes > 1) {
+ address += info->bpp[0] * width_aligned * height;
+
+ sun6i_isp_load_write(isp_dev, SUN6I_ISP_MCH_U_ADDR0_REG,
+ SUN6I_ISP_ADDR_VALUE(address));
+ }
+
+ if (info->comp_planes > 2) {
+ address += info->bpp[1] *
+ DIV_ROUND_UP(width_aligned, info->hdiv) *
+ DIV_ROUND_UP(height, info->vdiv);
+
+ sun6i_isp_load_write(isp_dev, SUN6I_ISP_MCH_V_ADDR0_REG,
+ SUN6I_ISP_ADDR_VALUE(address));
+ }
+}
+
+void sun6i_isp_capture_configure(struct sun6i_isp_device *isp_dev)
+{
+ unsigned int width, height;
+ unsigned int stride_luma, stride_chroma;
+ unsigned int stride_luma_div4, stride_chroma_div4 = 0;
+ const struct sun6i_isp_capture_format *format;
+ const struct v4l2_format_info *info;
+ u32 pixelformat;
+
+ sun6i_isp_capture_dimensions(isp_dev, &width, &height);
+ sun6i_isp_capture_format(isp_dev, &pixelformat);
+
+ format = sun6i_isp_capture_format_find(pixelformat);
+ if (WARN_ON(!format))
+ return;
+
+ sun6i_isp_load_write(isp_dev, SUN6I_ISP_MCH_SIZE_CFG_REG,
+ SUN6I_ISP_MCH_SIZE_CFG_WIDTH(width) |
+ SUN6I_ISP_MCH_SIZE_CFG_HEIGHT(height));
+
+ info = v4l2_format_info(pixelformat);
+ if (WARN_ON(!info))
+ return;
+
+ stride_luma = width * info->bpp[0];
+ stride_luma_div4 = DIV_ROUND_UP(stride_luma, 4);
+
+ if (info->comp_planes > 1) {
+ stride_chroma = width * info->bpp[1] / info->hdiv;
+ stride_chroma_div4 = DIV_ROUND_UP(stride_chroma, 4);
+ }
+
+ sun6i_isp_load_write(isp_dev, SUN6I_ISP_MCH_CFG_REG,
+ SUN6I_ISP_MCH_CFG_EN |
+ SUN6I_ISP_MCH_CFG_OUTPUT_FMT(format->output_format) |
+ SUN6I_ISP_MCH_CFG_STRIDE_Y_DIV4(stride_luma_div4) |
+ SUN6I_ISP_MCH_CFG_STRIDE_UV_DIV4(stride_chroma_div4));
+}
+
+/* State */
+
+static void sun6i_isp_capture_state_cleanup(struct sun6i_isp_device *isp_dev,
+ bool error)
+{
+ struct sun6i_isp_capture_state *state = &isp_dev->capture.state;
+ struct sun6i_isp_buffer **isp_buffer_states[] = {
+ &state->pending, &state->current, &state->complete,
+ };
+ struct sun6i_isp_buffer *isp_buffer;
+ struct vb2_buffer *vb2_buffer;
+ unsigned long flags;
+ unsigned int i;
+
+ spin_lock_irqsave(&state->lock, flags);
+
+ for (i = 0; i < ARRAY_SIZE(isp_buffer_states); i++) {
+ isp_buffer = *isp_buffer_states[i];
+ if (!isp_buffer)
+ continue;
+
+ vb2_buffer = &isp_buffer->v4l2_buffer.vb2_buf;
+ vb2_buffer_done(vb2_buffer, error ? VB2_BUF_STATE_ERROR :
+ VB2_BUF_STATE_QUEUED);
+
+ *isp_buffer_states[i] = NULL;
+ }
+
+ list_for_each_entry(isp_buffer, &state->queue, list) {
+ vb2_buffer = &isp_buffer->v4l2_buffer.vb2_buf;
+ vb2_buffer_done(vb2_buffer, error ? VB2_BUF_STATE_ERROR :
+ VB2_BUF_STATE_QUEUED);
+ }
+
+ INIT_LIST_HEAD(&state->queue);
+
+ spin_unlock_irqrestore(&state->lock, flags);
+}
+
+void sun6i_isp_capture_state_update(struct sun6i_isp_device *isp_dev,
+ bool *update)
+{
+ struct sun6i_isp_capture_state *state = &isp_dev->capture.state;
+ struct sun6i_isp_buffer *isp_buffer;
+ unsigned long flags;
+
+ spin_lock_irqsave(&state->lock, flags);
+
+ if (list_empty(&state->queue))
+ goto complete;
+
+ if (state->pending)
+ goto complete;
+
+ isp_buffer = list_first_entry(&state->queue, struct sun6i_isp_buffer,
+ list);
+
+ sun6i_isp_capture_buffer_configure(isp_dev, isp_buffer);
+
+ list_del(&isp_buffer->list);
+
+ state->pending = isp_buffer;
+
+ if (update)
+ *update = true;
+
+complete:
+ spin_unlock_irqrestore(&state->lock, flags);
+}
+
+void sun6i_isp_capture_state_complete(struct sun6i_isp_device *isp_dev)
+{
+ struct sun6i_isp_capture_state *state = &isp_dev->capture.state;
+ unsigned long flags;
+
+ spin_lock_irqsave(&state->lock, flags);
+
+ if (!state->pending)
+ goto complete;
+
+ state->complete = state->current;
+ state->current = state->pending;
+ state->pending = NULL;
+
+ if (state->complete) {
+ struct sun6i_isp_buffer *isp_buffer = state->complete;
+ struct vb2_buffer *vb2_buffer =
+ &isp_buffer->v4l2_buffer.vb2_buf;
+
+ vb2_buffer->timestamp = ktime_get_ns();
+ isp_buffer->v4l2_buffer.sequence = state->sequence;
+
+ vb2_buffer_done(vb2_buffer, VB2_BUF_STATE_DONE);
+
+ state->complete = NULL;
+ }
+
+complete:
+ spin_unlock_irqrestore(&state->lock, flags);
+}
+
+void sun6i_isp_capture_finish(struct sun6i_isp_device *isp_dev)
+{
+ struct sun6i_isp_capture_state *state = &isp_dev->capture.state;
+ unsigned long flags;
+
+ spin_lock_irqsave(&state->lock, flags);
+ state->sequence++;
+ spin_unlock_irqrestore(&state->lock, flags);
+}
+
+/* Queue */
+
+static int sun6i_isp_capture_queue_setup(struct vb2_queue *queue,
+ unsigned int *buffers_count,
+ unsigned int *planes_count,
+ unsigned int sizes[],
+ struct device *alloc_devs[])
+{
+ struct sun6i_isp_device *isp_dev = vb2_get_drv_priv(queue);
+ unsigned int size = isp_dev->capture.format.fmt.pix.sizeimage;
+
+ if (*planes_count)
+ return sizes[0] < size ? -EINVAL : 0;
+
+ *planes_count = 1;
+ sizes[0] = size;
+
+ return 0;
+}
+
+static int sun6i_isp_capture_buffer_prepare(struct vb2_buffer *vb2_buffer)
+{
+ struct sun6i_isp_device *isp_dev =
+ vb2_get_drv_priv(vb2_buffer->vb2_queue);
+ struct v4l2_device *v4l2_dev = &isp_dev->v4l2.v4l2_dev;
+ unsigned int size = isp_dev->capture.format.fmt.pix.sizeimage;
+
+ if (vb2_plane_size(vb2_buffer, 0) < size) {
+ v4l2_err(v4l2_dev, "buffer too small (%lu < %u)\n",
+ vb2_plane_size(vb2_buffer, 0), size);
+ return -EINVAL;
+ }
+
+ vb2_set_plane_payload(vb2_buffer, 0, size);
+
+ return 0;
+}
+
+static void sun6i_isp_capture_buffer_queue(struct vb2_buffer *vb2_buffer)
+{
+ struct sun6i_isp_device *isp_dev =
+ vb2_get_drv_priv(vb2_buffer->vb2_queue);
+ struct sun6i_isp_capture_state *state = &isp_dev->capture.state;
+ struct vb2_v4l2_buffer *v4l2_buffer = to_vb2_v4l2_buffer(vb2_buffer);
+ struct sun6i_isp_buffer *isp_buffer =
+ container_of(v4l2_buffer, struct sun6i_isp_buffer, v4l2_buffer);
+ unsigned long flags;
+
+ spin_lock_irqsave(&state->lock, flags);
+ list_add_tail(&isp_buffer->list, &state->queue);
+ spin_unlock_irqrestore(&state->lock, flags);
+
+ /* Update the state to schedule our buffer as soon as possible. */
+ if (state->streaming)
+ sun6i_isp_state_update(isp_dev, false);
+}
+
+static int sun6i_isp_capture_start_streaming(struct vb2_queue *queue,
+ unsigned int count)
+{
+ struct sun6i_isp_device *isp_dev = vb2_get_drv_priv(queue);
+ struct sun6i_isp_capture_state *state = &isp_dev->capture.state;
+ struct video_device *video_dev = &isp_dev->capture.video_dev;
+ struct v4l2_subdev *subdev = &isp_dev->proc.subdev;
+ int ret;
+
+ state->sequence = 0;
+
+ ret = video_device_pipeline_alloc_start(video_dev);
+ if (ret < 0)
+ goto error_state;
+
+ state->streaming = true;
+
+ ret = v4l2_subdev_call(subdev, video, s_stream, 1);
+ if (ret && ret != -ENOIOCTLCMD)
+ goto error_streaming;
+
+ return 0;
+
+error_streaming:
+ state->streaming = false;
+
+ video_device_pipeline_stop(video_dev);
+
+error_state:
+ sun6i_isp_capture_state_cleanup(isp_dev, false);
+
+ return ret;
+}
+
+static void sun6i_isp_capture_stop_streaming(struct vb2_queue *queue)
+{
+ struct sun6i_isp_device *isp_dev = vb2_get_drv_priv(queue);
+ struct sun6i_isp_capture_state *state = &isp_dev->capture.state;
+ struct video_device *video_dev = &isp_dev->capture.video_dev;
+ struct v4l2_subdev *subdev = &isp_dev->proc.subdev;
+
+ v4l2_subdev_call(subdev, video, s_stream, 0);
+
+ state->streaming = false;
+
+ video_device_pipeline_stop(video_dev);
+
+ sun6i_isp_capture_state_cleanup(isp_dev, true);
+}
+
+static const struct vb2_ops sun6i_isp_capture_queue_ops = {
+ .queue_setup = sun6i_isp_capture_queue_setup,
+ .buf_prepare = sun6i_isp_capture_buffer_prepare,
+ .buf_queue = sun6i_isp_capture_buffer_queue,
+ .start_streaming = sun6i_isp_capture_start_streaming,
+ .stop_streaming = sun6i_isp_capture_stop_streaming,
+};
+
+/* Video Device */
+
+static void sun6i_isp_capture_format_prepare(struct v4l2_format *format)
+{
+ struct v4l2_pix_format *pix_format = &format->fmt.pix;
+ const struct v4l2_format_info *info;
+ unsigned int width, height;
+ unsigned int width_aligned;
+ unsigned int i;
+
+ v4l_bound_align_image(&pix_format->width, SUN6I_ISP_CAPTURE_WIDTH_MIN,
+ SUN6I_ISP_CAPTURE_WIDTH_MAX, 1,
+ &pix_format->height, SUN6I_ISP_CAPTURE_HEIGHT_MIN,
+ SUN6I_ISP_CAPTURE_HEIGHT_MAX, 1, 0);
+
+ if (!sun6i_isp_capture_format_find(pix_format->pixelformat))
+ pix_format->pixelformat =
+ sun6i_isp_capture_formats[0].pixelformat;
+
+ info = v4l2_format_info(pix_format->pixelformat);
+ if (WARN_ON(!info))
+ return;
+
+ width = pix_format->width;
+ height = pix_format->height;
+
+ /* Stride needs to be aligned to 4. */
+ width_aligned = ALIGN(width, 2);
+
+ pix_format->bytesperline = width_aligned * info->bpp[0];
+ pix_format->sizeimage = 0;
+
+ for (i = 0; i < info->comp_planes; i++) {
+ unsigned int hdiv = (i == 0) ? 1 : info->hdiv;
+ unsigned int vdiv = (i == 0) ? 1 : info->vdiv;
+
+ pix_format->sizeimage += info->bpp[i] *
+ DIV_ROUND_UP(width_aligned, hdiv) *
+ DIV_ROUND_UP(height, vdiv);
+ }
+
+ pix_format->field = V4L2_FIELD_NONE;
+
+ pix_format->colorspace = V4L2_COLORSPACE_RAW;
+ pix_format->ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT;
+ pix_format->quantization = V4L2_QUANTIZATION_DEFAULT;
+ pix_format->xfer_func = V4L2_XFER_FUNC_DEFAULT;
+}
+
+static int sun6i_isp_capture_querycap(struct file *file, void *priv,
+ struct v4l2_capability *capability)
+{
+ struct sun6i_isp_device *isp_dev = video_drvdata(file);
+ struct video_device *video_dev = &isp_dev->capture.video_dev;
+
+ strscpy(capability->driver, SUN6I_ISP_NAME, sizeof(capability->driver));
+ strscpy(capability->card, video_dev->name, sizeof(capability->card));
+ snprintf(capability->bus_info, sizeof(capability->bus_info),
+ "platform:%s", dev_name(isp_dev->dev));
+
+ return 0;
+}
+
+static int sun6i_isp_capture_enum_fmt(struct file *file, void *priv,
+ struct v4l2_fmtdesc *fmtdesc)
+{
+ u32 index = fmtdesc->index;
+
+ if (index >= ARRAY_SIZE(sun6i_isp_capture_formats))
+ return -EINVAL;
+
+ fmtdesc->pixelformat = sun6i_isp_capture_formats[index].pixelformat;
+
+ return 0;
+}
+
+static int sun6i_isp_capture_g_fmt(struct file *file, void *priv,
+ struct v4l2_format *format)
+{
+ struct sun6i_isp_device *isp_dev = video_drvdata(file);
+
+ *format = isp_dev->capture.format;
+
+ return 0;
+}
+
+static int sun6i_isp_capture_s_fmt(struct file *file, void *priv,
+ struct v4l2_format *format)
+{
+ struct sun6i_isp_device *isp_dev = video_drvdata(file);
+
+ if (vb2_is_busy(&isp_dev->capture.queue))
+ return -EBUSY;
+
+ sun6i_isp_capture_format_prepare(format);
+
+ isp_dev->capture.format = *format;
+
+ return 0;
+}
+
+static int sun6i_isp_capture_try_fmt(struct file *file, void *priv,
+ struct v4l2_format *format)
+{
+ sun6i_isp_capture_format_prepare(format);
+
+ return 0;
+}
+
+static int sun6i_isp_capture_enum_input(struct file *file, void *priv,
+ struct v4l2_input *input)
+{
+ if (input->index != 0)
+ return -EINVAL;
+
+ input->type = V4L2_INPUT_TYPE_CAMERA;
+ strscpy(input->name, "Camera", sizeof(input->name));
+
+ return 0;
+}
+
+static int sun6i_isp_capture_g_input(struct file *file, void *priv,
+ unsigned int *index)
+{
+ *index = 0;
+
+ return 0;
+}
+
+static int sun6i_isp_capture_s_input(struct file *file, void *priv,
+ unsigned int index)
+{
+ if (index != 0)
+ return -EINVAL;
+
+ return 0;
+}
+
+static const struct v4l2_ioctl_ops sun6i_isp_capture_ioctl_ops = {
+ .vidioc_querycap = sun6i_isp_capture_querycap,
+
+ .vidioc_enum_fmt_vid_cap = sun6i_isp_capture_enum_fmt,
+ .vidioc_g_fmt_vid_cap = sun6i_isp_capture_g_fmt,
+ .vidioc_s_fmt_vid_cap = sun6i_isp_capture_s_fmt,
+ .vidioc_try_fmt_vid_cap = sun6i_isp_capture_try_fmt,
+
+ .vidioc_enum_input = sun6i_isp_capture_enum_input,
+ .vidioc_g_input = sun6i_isp_capture_g_input,
+ .vidioc_s_input = sun6i_isp_capture_s_input,
+
+ .vidioc_create_bufs = vb2_ioctl_create_bufs,
+ .vidioc_prepare_buf = vb2_ioctl_prepare_buf,
+ .vidioc_reqbufs = vb2_ioctl_reqbufs,
+ .vidioc_querybuf = vb2_ioctl_querybuf,
+ .vidioc_expbuf = vb2_ioctl_expbuf,
+ .vidioc_qbuf = vb2_ioctl_qbuf,
+ .vidioc_dqbuf = vb2_ioctl_dqbuf,
+ .vidioc_streamon = vb2_ioctl_streamon,
+ .vidioc_streamoff = vb2_ioctl_streamoff,
+};
+
+static int sun6i_isp_capture_open(struct file *file)
+{
+ struct sun6i_isp_device *isp_dev = video_drvdata(file);
+ struct video_device *video_dev = &isp_dev->capture.video_dev;
+ struct mutex *lock = &isp_dev->capture.lock;
+ int ret;
+
+ if (mutex_lock_interruptible(lock))
+ return -ERESTARTSYS;
+
+ ret = v4l2_pipeline_pm_get(&video_dev->entity);
+ if (ret)
+ goto error_mutex;
+
+ ret = v4l2_fh_open(file);
+ if (ret)
+ goto error_pipeline;
+
+ mutex_unlock(lock);
+
+ return 0;
+
+error_pipeline:
+ v4l2_pipeline_pm_put(&video_dev->entity);
+
+error_mutex:
+ mutex_unlock(lock);
+
+ return ret;
+}
+
+static int sun6i_isp_capture_release(struct file *file)
+{
+ struct sun6i_isp_device *isp_dev = video_drvdata(file);
+ struct video_device *video_dev = &isp_dev->capture.video_dev;
+ struct mutex *lock = &isp_dev->capture.lock;
+
+ mutex_lock(lock);
+
+ _vb2_fop_release(file, NULL);
+ v4l2_pipeline_pm_put(&video_dev->entity);
+
+ mutex_unlock(lock);
+
+ return 0;
+}
+
+static const struct v4l2_file_operations sun6i_isp_capture_fops = {
+ .owner = THIS_MODULE,
+ .open = sun6i_isp_capture_open,
+ .release = sun6i_isp_capture_release,
+ .unlocked_ioctl = video_ioctl2,
+ .poll = vb2_fop_poll,
+ .mmap = vb2_fop_mmap,
+};
+
+/* Media Entity */
+
+static int sun6i_isp_capture_link_validate(struct media_link *link)
+{
+ struct video_device *video_dev =
+ media_entity_to_video_device(link->sink->entity);
+ struct sun6i_isp_device *isp_dev = video_get_drvdata(video_dev);
+ struct v4l2_device *v4l2_dev = &isp_dev->v4l2.v4l2_dev;
+ unsigned int capture_width, capture_height;
+ unsigned int proc_width, proc_height;
+
+ sun6i_isp_capture_dimensions(isp_dev, &capture_width, &capture_height);
+ sun6i_isp_proc_dimensions(isp_dev, &proc_width, &proc_height);
+
+ /* No cropping/scaling is supported (yet). */
+ if (capture_width != proc_width || capture_height != proc_height) {
+ v4l2_err(v4l2_dev,
+ "invalid input/output dimensions: %ux%u/%ux%u\n",
+ proc_width, proc_height, capture_width,
+ capture_height);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static const struct media_entity_operations sun6i_isp_capture_entity_ops = {
+ .link_validate = sun6i_isp_capture_link_validate,
+};
+
+/* Capture */
+
+int sun6i_isp_capture_setup(struct sun6i_isp_device *isp_dev)
+{
+ struct sun6i_isp_capture *capture = &isp_dev->capture;
+ struct sun6i_isp_capture_state *state = &capture->state;
+ struct v4l2_device *v4l2_dev = &isp_dev->v4l2.v4l2_dev;
+ struct v4l2_subdev *proc_subdev = &isp_dev->proc.subdev;
+ struct video_device *video_dev = &capture->video_dev;
+ struct vb2_queue *queue = &capture->queue;
+ struct media_pad *pad = &capture->pad;
+ struct v4l2_format *format = &capture->format;
+ struct v4l2_pix_format *pix_format = &format->fmt.pix;
+ int ret;
+
+ /* State */
+
+ INIT_LIST_HEAD(&state->queue);
+ spin_lock_init(&state->lock);
+
+ /* Media Entity */
+
+ video_dev->entity.ops = &sun6i_isp_capture_entity_ops;
+
+ /* Media Pads */
+
+ pad->flags = MEDIA_PAD_FL_SINK | MEDIA_PAD_FL_MUST_CONNECT;
+
+ ret = media_entity_pads_init(&video_dev->entity, 1, pad);
+ if (ret)
+ goto error_mutex;
+
+ /* Queue */
+
+ mutex_init(&capture->lock);
+
+ queue->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ queue->io_modes = VB2_MMAP | VB2_DMABUF;
+ queue->buf_struct_size = sizeof(struct sun6i_isp_buffer);
+ queue->ops = &sun6i_isp_capture_queue_ops;
+ queue->mem_ops = &vb2_dma_contig_memops;
+ queue->min_queued_buffers = 2;
+ queue->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+ queue->lock = &capture->lock;
+ queue->dev = isp_dev->dev;
+ queue->drv_priv = isp_dev;
+
+ ret = vb2_queue_init(queue);
+ if (ret) {
+ v4l2_err(v4l2_dev, "failed to initialize vb2 queue: %d\n", ret);
+ goto error_media_entity;
+ }
+
+ /* V4L2 Format */
+
+ format->type = queue->type;
+ pix_format->pixelformat = sun6i_isp_capture_formats[0].pixelformat;
+ pix_format->width = 1280;
+ pix_format->height = 720;
+
+ sun6i_isp_capture_format_prepare(format);
+
+ /* Video Device */
+
+ strscpy(video_dev->name, SUN6I_ISP_CAPTURE_NAME,
+ sizeof(video_dev->name));
+ video_dev->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
+ video_dev->vfl_dir = VFL_DIR_RX;
+ video_dev->release = video_device_release_empty;
+ video_dev->fops = &sun6i_isp_capture_fops;
+ video_dev->ioctl_ops = &sun6i_isp_capture_ioctl_ops;
+ video_dev->v4l2_dev = v4l2_dev;
+ video_dev->queue = queue;
+ video_dev->lock = &capture->lock;
+
+ video_set_drvdata(video_dev, isp_dev);
+
+ ret = video_register_device(video_dev, VFL_TYPE_VIDEO, -1);
+ if (ret) {
+ v4l2_err(v4l2_dev, "failed to register video device: %d\n",
+ ret);
+ goto error_media_entity;
+ }
+
+ /* Media Pad Link */
+
+ ret = media_create_pad_link(&proc_subdev->entity,
+ SUN6I_ISP_PROC_PAD_SOURCE,
+ &video_dev->entity, 0,
+ MEDIA_LNK_FL_ENABLED |
+ MEDIA_LNK_FL_IMMUTABLE);
+ if (ret < 0) {
+ v4l2_err(v4l2_dev, "failed to create %s:%u -> %s:%u link\n",
+ proc_subdev->entity.name, SUN6I_ISP_PROC_PAD_SOURCE,
+ video_dev->entity.name, 0);
+ goto error_video_device;
+ }
+
+ return 0;
+
+error_video_device:
+ vb2_video_unregister_device(video_dev);
+
+error_media_entity:
+ media_entity_cleanup(&video_dev->entity);
+
+error_mutex:
+ mutex_destroy(&capture->lock);
+
+ return ret;
+}
+
+void sun6i_isp_capture_cleanup(struct sun6i_isp_device *isp_dev)
+{
+ struct sun6i_isp_capture *capture = &isp_dev->capture;
+ struct video_device *video_dev = &capture->video_dev;
+
+ vb2_video_unregister_device(video_dev);
+ media_entity_cleanup(&video_dev->entity);
+ mutex_destroy(&capture->lock);
+}
diff --git a/drivers/staging/media/sunxi/sun6i-isp/sun6i_isp_capture.h b/drivers/staging/media/sunxi/sun6i-isp/sun6i_isp_capture.h
new file mode 100644
index 000000000000..0e3e4fa7a0f4
--- /dev/null
+++ b/drivers/staging/media/sunxi/sun6i-isp/sun6i_isp_capture.h
@@ -0,0 +1,78 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright 2021-2022 Bootlin
+ * Author: Paul Kocialkowski <paul.kocialkowski@bootlin.com>
+ */
+
+#ifndef _SUN6I_ISP_CAPTURE_H_
+#define _SUN6I_ISP_CAPTURE_H_
+
+#include <media/v4l2-device.h>
+
+#define SUN6I_ISP_CAPTURE_NAME "sun6i-isp-capture"
+
+#define SUN6I_ISP_CAPTURE_WIDTH_MIN 16
+#define SUN6I_ISP_CAPTURE_WIDTH_MAX 3264
+#define SUN6I_ISP_CAPTURE_HEIGHT_MIN 16
+#define SUN6I_ISP_CAPTURE_HEIGHT_MAX 2448
+
+struct sun6i_isp_device;
+
+struct sun6i_isp_capture_format {
+ u32 pixelformat;
+ u8 output_format;
+};
+
+#undef current
+struct sun6i_isp_capture_state {
+ struct list_head queue;
+ spinlock_t lock; /* Queue and buffers lock. */
+
+ struct sun6i_isp_buffer *pending;
+ struct sun6i_isp_buffer *current;
+ struct sun6i_isp_buffer *complete;
+
+ unsigned int sequence;
+ bool streaming;
+};
+
+struct sun6i_isp_capture {
+ struct sun6i_isp_capture_state state;
+
+ struct video_device video_dev;
+ struct vb2_queue queue;
+ struct mutex lock; /* Queue lock. */
+ struct media_pad pad;
+
+ struct v4l2_format format;
+};
+
+/* Helpers */
+
+void sun6i_isp_capture_dimensions(struct sun6i_isp_device *isp_dev,
+ unsigned int *width, unsigned int *height);
+void sun6i_isp_capture_format(struct sun6i_isp_device *isp_dev,
+ u32 *pixelformat);
+
+/* Format */
+
+const struct sun6i_isp_capture_format *
+sun6i_isp_capture_format_find(u32 pixelformat);
+
+/* Capture */
+
+void sun6i_isp_capture_configure(struct sun6i_isp_device *isp_dev);
+
+/* State */
+
+void sun6i_isp_capture_state_update(struct sun6i_isp_device *isp_dev,
+ bool *update);
+void sun6i_isp_capture_state_complete(struct sun6i_isp_device *isp_dev);
+void sun6i_isp_capture_finish(struct sun6i_isp_device *isp_dev);
+
+/* Capture */
+
+int sun6i_isp_capture_setup(struct sun6i_isp_device *isp_dev);
+void sun6i_isp_capture_cleanup(struct sun6i_isp_device *isp_dev);
+
+#endif
diff --git a/drivers/staging/media/sunxi/sun6i-isp/sun6i_isp_params.c b/drivers/staging/media/sunxi/sun6i-isp/sun6i_isp_params.c
new file mode 100644
index 000000000000..77c2d06c0436
--- /dev/null
+++ b/drivers/staging/media/sunxi/sun6i-isp/sun6i_isp_params.c
@@ -0,0 +1,566 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2021-2022 Bootlin
+ * Author: Paul Kocialkowski <paul.kocialkowski@bootlin.com>
+ */
+
+#include <media/v4l2-device.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-mc.h>
+#include <media/videobuf2-vmalloc.h>
+#include <media/videobuf2-v4l2.h>
+
+#include "sun6i_isp.h"
+#include "sun6i_isp_params.h"
+#include "sun6i_isp_reg.h"
+#include "uapi/sun6i-isp-config.h"
+
+/* Params */
+
+static const struct sun6i_isp_params_config sun6i_isp_params_config_default = {
+ .modules_used = SUN6I_ISP_MODULE_BAYER,
+
+ .bayer = {
+ .offset_r = 32,
+ .offset_gr = 32,
+ .offset_gb = 32,
+ .offset_b = 32,
+
+ .gain_r = 256,
+ .gain_gr = 256,
+ .gain_gb = 256,
+ .gain_b = 256,
+
+ },
+
+ .bdnf = {
+ .in_dis_min = 8,
+ .in_dis_max = 16,
+
+ .coefficients_g = { 15, 4, 1 },
+ .coefficients_rb = { 15, 4 },
+ },
+};
+
+static void sun6i_isp_params_configure_ob(struct sun6i_isp_device *isp_dev)
+{
+ unsigned int width, height;
+
+ sun6i_isp_proc_dimensions(isp_dev, &width, &height);
+
+ sun6i_isp_load_write(isp_dev, SUN6I_ISP_OB_SIZE_REG,
+ SUN6I_ISP_OB_SIZE_WIDTH(width) |
+ SUN6I_ISP_OB_SIZE_HEIGHT(height));
+
+ sun6i_isp_load_write(isp_dev, SUN6I_ISP_OB_VALID_REG,
+ SUN6I_ISP_OB_VALID_WIDTH(width) |
+ SUN6I_ISP_OB_VALID_HEIGHT(height));
+
+ sun6i_isp_load_write(isp_dev, SUN6I_ISP_OB_SRC0_VALID_START_REG,
+ SUN6I_ISP_OB_SRC0_VALID_START_HORZ(0) |
+ SUN6I_ISP_OB_SRC0_VALID_START_VERT(0));
+}
+
+static void sun6i_isp_params_configure_ae(struct sun6i_isp_device *isp_dev)
+{
+ /* These are default values that need to be set to get an output. */
+
+ sun6i_isp_load_write(isp_dev, SUN6I_ISP_AE_CFG_REG,
+ SUN6I_ISP_AE_CFG_LOW_BRI_TH(0xff) |
+ SUN6I_ISP_AE_CFG_HORZ_NUM(8) |
+ SUN6I_ISP_AE_CFG_HIGH_BRI_TH(0xf00) |
+ SUN6I_ISP_AE_CFG_VERT_NUM(8));
+}
+
+static void
+sun6i_isp_params_configure_bayer(struct sun6i_isp_device *isp_dev,
+ const struct sun6i_isp_params_config *config)
+{
+ const struct sun6i_isp_params_config_bayer *bayer = &config->bayer;
+
+ sun6i_isp_load_write(isp_dev, SUN6I_ISP_BAYER_OFFSET0_REG,
+ SUN6I_ISP_BAYER_OFFSET0_R(bayer->offset_r) |
+ SUN6I_ISP_BAYER_OFFSET0_GR(bayer->offset_gr));
+
+ sun6i_isp_load_write(isp_dev, SUN6I_ISP_BAYER_OFFSET1_REG,
+ SUN6I_ISP_BAYER_OFFSET1_GB(bayer->offset_gb) |
+ SUN6I_ISP_BAYER_OFFSET1_B(bayer->offset_b));
+
+ sun6i_isp_load_write(isp_dev, SUN6I_ISP_BAYER_GAIN0_REG,
+ SUN6I_ISP_BAYER_GAIN0_R(bayer->gain_r) |
+ SUN6I_ISP_BAYER_GAIN0_GR(bayer->gain_gr));
+
+ sun6i_isp_load_write(isp_dev, SUN6I_ISP_BAYER_GAIN1_REG,
+ SUN6I_ISP_BAYER_GAIN1_GB(bayer->gain_gb) |
+ SUN6I_ISP_BAYER_GAIN1_B(bayer->gain_b));
+}
+
+static void sun6i_isp_params_configure_wb(struct sun6i_isp_device *isp_dev)
+{
+ /* These are default values that need to be set to get an output. */
+
+ sun6i_isp_load_write(isp_dev, SUN6I_ISP_WB_GAIN0_REG,
+ SUN6I_ISP_WB_GAIN0_R(256) |
+ SUN6I_ISP_WB_GAIN0_GR(256));
+
+ sun6i_isp_load_write(isp_dev, SUN6I_ISP_WB_GAIN1_REG,
+ SUN6I_ISP_WB_GAIN1_GB(256) |
+ SUN6I_ISP_WB_GAIN1_B(256));
+
+ sun6i_isp_load_write(isp_dev, SUN6I_ISP_WB_CFG_REG,
+ SUN6I_ISP_WB_CFG_CLIP(0xfff));
+}
+
+static void sun6i_isp_params_configure_base(struct sun6i_isp_device *isp_dev)
+{
+ sun6i_isp_params_configure_ae(isp_dev);
+ sun6i_isp_params_configure_ob(isp_dev);
+ sun6i_isp_params_configure_wb(isp_dev);
+}
+
+static void
+sun6i_isp_params_configure_bdnf(struct sun6i_isp_device *isp_dev,
+ const struct sun6i_isp_params_config *config)
+{
+ const struct sun6i_isp_params_config_bdnf *bdnf = &config->bdnf;
+
+ sun6i_isp_load_write(isp_dev, SUN6I_ISP_BDNF_CFG_REG,
+ SUN6I_ISP_BDNF_CFG_IN_DIS_MIN(bdnf->in_dis_min) |
+ SUN6I_ISP_BDNF_CFG_IN_DIS_MAX(bdnf->in_dis_max));
+
+ sun6i_isp_load_write(isp_dev, SUN6I_ISP_BDNF_COEF_RB_REG,
+ SUN6I_ISP_BDNF_COEF_RB(0, bdnf->coefficients_rb[0]) |
+ SUN6I_ISP_BDNF_COEF_RB(1, bdnf->coefficients_rb[1]) |
+ SUN6I_ISP_BDNF_COEF_RB(2, bdnf->coefficients_rb[2]) |
+ SUN6I_ISP_BDNF_COEF_RB(3, bdnf->coefficients_rb[3]) |
+ SUN6I_ISP_BDNF_COEF_RB(4, bdnf->coefficients_rb[4]));
+
+ sun6i_isp_load_write(isp_dev, SUN6I_ISP_BDNF_COEF_G_REG,
+ SUN6I_ISP_BDNF_COEF_G(0, bdnf->coefficients_g[0]) |
+ SUN6I_ISP_BDNF_COEF_G(1, bdnf->coefficients_g[1]) |
+ SUN6I_ISP_BDNF_COEF_G(2, bdnf->coefficients_g[2]) |
+ SUN6I_ISP_BDNF_COEF_G(3, bdnf->coefficients_g[3]) |
+ SUN6I_ISP_BDNF_COEF_G(4, bdnf->coefficients_g[4]) |
+ SUN6I_ISP_BDNF_COEF_G(5, bdnf->coefficients_g[5]) |
+ SUN6I_ISP_BDNF_COEF_G(6, bdnf->coefficients_g[6]));
+}
+
+static void
+sun6i_isp_params_configure_modules(struct sun6i_isp_device *isp_dev,
+ const struct sun6i_isp_params_config *config)
+{
+ u32 value;
+
+ if (config->modules_used & SUN6I_ISP_MODULE_BDNF)
+ sun6i_isp_params_configure_bdnf(isp_dev, config);
+
+ if (config->modules_used & SUN6I_ISP_MODULE_BAYER)
+ sun6i_isp_params_configure_bayer(isp_dev, config);
+
+ value = sun6i_isp_load_read(isp_dev, SUN6I_ISP_MODULE_EN_REG);
+ /* Clear all modules but keep input configuration. */
+ value &= SUN6I_ISP_MODULE_EN_SRC0 | SUN6I_ISP_MODULE_EN_SRC1;
+
+ if (config->modules_used & SUN6I_ISP_MODULE_BDNF)
+ value |= SUN6I_ISP_MODULE_EN_BDNF;
+
+ /* Bayer stage is always enabled. */
+
+ sun6i_isp_load_write(isp_dev, SUN6I_ISP_MODULE_EN_REG, value);
+}
+
+void sun6i_isp_params_configure(struct sun6i_isp_device *isp_dev)
+{
+ struct sun6i_isp_params_state *state = &isp_dev->params.state;
+ unsigned long flags;
+
+ spin_lock_irqsave(&state->lock, flags);
+
+ sun6i_isp_params_configure_base(isp_dev);
+
+ /* Default config is only applied at the very first stream start. */
+ if (state->configured)
+ goto complete;
+
+ sun6i_isp_params_configure_modules(isp_dev,
+ &sun6i_isp_params_config_default);
+
+ state->configured = true;
+
+complete:
+ spin_unlock_irqrestore(&state->lock, flags);
+}
+
+/* State */
+
+static void sun6i_isp_params_state_cleanup(struct sun6i_isp_device *isp_dev,
+ bool error)
+{
+ struct sun6i_isp_params_state *state = &isp_dev->params.state;
+ struct sun6i_isp_buffer *isp_buffer;
+ struct vb2_buffer *vb2_buffer;
+ unsigned long flags;
+
+ spin_lock_irqsave(&state->lock, flags);
+
+ if (state->pending) {
+ vb2_buffer = &state->pending->v4l2_buffer.vb2_buf;
+ vb2_buffer_done(vb2_buffer, error ? VB2_BUF_STATE_ERROR :
+ VB2_BUF_STATE_QUEUED);
+
+ state->pending = NULL;
+ }
+
+ list_for_each_entry(isp_buffer, &state->queue, list) {
+ vb2_buffer = &isp_buffer->v4l2_buffer.vb2_buf;
+ vb2_buffer_done(vb2_buffer, error ? VB2_BUF_STATE_ERROR :
+ VB2_BUF_STATE_QUEUED);
+ }
+
+ INIT_LIST_HEAD(&state->queue);
+
+ spin_unlock_irqrestore(&state->lock, flags);
+}
+
+void sun6i_isp_params_state_update(struct sun6i_isp_device *isp_dev,
+ bool *update)
+{
+ struct sun6i_isp_params_state *state = &isp_dev->params.state;
+ struct sun6i_isp_buffer *isp_buffer;
+ struct vb2_buffer *vb2_buffer;
+ const struct sun6i_isp_params_config *config;
+ unsigned long flags;
+
+ spin_lock_irqsave(&state->lock, flags);
+
+ if (list_empty(&state->queue))
+ goto complete;
+
+ if (state->pending)
+ goto complete;
+
+ isp_buffer = list_first_entry(&state->queue, struct sun6i_isp_buffer,
+ list);
+
+ vb2_buffer = &isp_buffer->v4l2_buffer.vb2_buf;
+ config = vb2_plane_vaddr(vb2_buffer, 0);
+
+ sun6i_isp_params_configure_modules(isp_dev, config);
+
+ list_del(&isp_buffer->list);
+
+ state->pending = isp_buffer;
+
+ if (update)
+ *update = true;
+
+complete:
+ spin_unlock_irqrestore(&state->lock, flags);
+}
+
+void sun6i_isp_params_state_complete(struct sun6i_isp_device *isp_dev)
+{
+ struct sun6i_isp_params_state *state = &isp_dev->params.state;
+ struct sun6i_isp_buffer *isp_buffer;
+ struct vb2_buffer *vb2_buffer;
+ unsigned long flags;
+
+ spin_lock_irqsave(&state->lock, flags);
+
+ if (!state->pending)
+ goto complete;
+
+ isp_buffer = state->pending;
+ vb2_buffer = &isp_buffer->v4l2_buffer.vb2_buf;
+
+ vb2_buffer->timestamp = ktime_get_ns();
+
+ /* Parameters will be applied starting from the next frame. */
+ isp_buffer->v4l2_buffer.sequence = isp_dev->capture.state.sequence + 1;
+
+ vb2_buffer_done(vb2_buffer, VB2_BUF_STATE_DONE);
+
+ state->pending = NULL;
+
+complete:
+ spin_unlock_irqrestore(&state->lock, flags);
+}
+
+/* Queue */
+
+static int sun6i_isp_params_queue_setup(struct vb2_queue *queue,
+ unsigned int *buffers_count,
+ unsigned int *planes_count,
+ unsigned int sizes[],
+ struct device *alloc_devs[])
+{
+ struct sun6i_isp_device *isp_dev = vb2_get_drv_priv(queue);
+ unsigned int size = isp_dev->params.format.fmt.meta.buffersize;
+
+ if (*planes_count)
+ return sizes[0] < size ? -EINVAL : 0;
+
+ *planes_count = 1;
+ sizes[0] = size;
+
+ return 0;
+}
+
+static int sun6i_isp_params_buffer_prepare(struct vb2_buffer *vb2_buffer)
+{
+ struct sun6i_isp_device *isp_dev =
+ vb2_get_drv_priv(vb2_buffer->vb2_queue);
+ struct v4l2_device *v4l2_dev = &isp_dev->v4l2.v4l2_dev;
+ unsigned int size = isp_dev->params.format.fmt.meta.buffersize;
+
+ if (vb2_plane_size(vb2_buffer, 0) < size) {
+ v4l2_err(v4l2_dev, "buffer too small (%lu < %u)\n",
+ vb2_plane_size(vb2_buffer, 0), size);
+ return -EINVAL;
+ }
+
+ vb2_set_plane_payload(vb2_buffer, 0, size);
+
+ return 0;
+}
+
+static void sun6i_isp_params_buffer_queue(struct vb2_buffer *vb2_buffer)
+{
+ struct sun6i_isp_device *isp_dev =
+ vb2_get_drv_priv(vb2_buffer->vb2_queue);
+ struct sun6i_isp_params_state *state = &isp_dev->params.state;
+ struct vb2_v4l2_buffer *v4l2_buffer = to_vb2_v4l2_buffer(vb2_buffer);
+ struct sun6i_isp_buffer *isp_buffer =
+ container_of(v4l2_buffer, struct sun6i_isp_buffer, v4l2_buffer);
+ bool capture_streaming = isp_dev->capture.state.streaming;
+ unsigned long flags;
+
+ spin_lock_irqsave(&state->lock, flags);
+ list_add_tail(&isp_buffer->list, &state->queue);
+ spin_unlock_irqrestore(&state->lock, flags);
+
+ if (state->streaming && capture_streaming)
+ sun6i_isp_state_update(isp_dev, false);
+}
+
+static int sun6i_isp_params_start_streaming(struct vb2_queue *queue,
+ unsigned int count)
+{
+ struct sun6i_isp_device *isp_dev = vb2_get_drv_priv(queue);
+ struct sun6i_isp_params_state *state = &isp_dev->params.state;
+ bool capture_streaming = isp_dev->capture.state.streaming;
+
+ state->streaming = true;
+
+ /*
+ * Update the state as soon as possible if capture is streaming,
+ * otherwise it will be applied when capture starts streaming.
+ */
+
+ if (capture_streaming)
+ sun6i_isp_state_update(isp_dev, false);
+
+ return 0;
+}
+
+static void sun6i_isp_params_stop_streaming(struct vb2_queue *queue)
+{
+ struct sun6i_isp_device *isp_dev = vb2_get_drv_priv(queue);
+ struct sun6i_isp_params_state *state = &isp_dev->params.state;
+
+ state->streaming = false;
+ sun6i_isp_params_state_cleanup(isp_dev, true);
+}
+
+static const struct vb2_ops sun6i_isp_params_queue_ops = {
+ .queue_setup = sun6i_isp_params_queue_setup,
+ .buf_prepare = sun6i_isp_params_buffer_prepare,
+ .buf_queue = sun6i_isp_params_buffer_queue,
+ .start_streaming = sun6i_isp_params_start_streaming,
+ .stop_streaming = sun6i_isp_params_stop_streaming,
+};
+
+/* Video Device */
+
+static int sun6i_isp_params_querycap(struct file *file, void *priv,
+ struct v4l2_capability *capability)
+{
+ struct sun6i_isp_device *isp_dev = video_drvdata(file);
+ struct video_device *video_dev = &isp_dev->params.video_dev;
+
+ strscpy(capability->driver, SUN6I_ISP_NAME, sizeof(capability->driver));
+ strscpy(capability->card, video_dev->name, sizeof(capability->card));
+ snprintf(capability->bus_info, sizeof(capability->bus_info),
+ "platform:%s", dev_name(isp_dev->dev));
+
+ return 0;
+}
+
+static int sun6i_isp_params_enum_fmt(struct file *file, void *priv,
+ struct v4l2_fmtdesc *fmtdesc)
+{
+ struct sun6i_isp_device *isp_dev = video_drvdata(file);
+ struct v4l2_meta_format *params_format =
+ &isp_dev->params.format.fmt.meta;
+
+ if (fmtdesc->index > 0)
+ return -EINVAL;
+
+ fmtdesc->pixelformat = params_format->dataformat;
+
+ return 0;
+}
+
+static int sun6i_isp_params_g_fmt(struct file *file, void *priv,
+ struct v4l2_format *format)
+{
+ struct sun6i_isp_device *isp_dev = video_drvdata(file);
+
+ *format = isp_dev->params.format;
+
+ return 0;
+}
+
+static const struct v4l2_ioctl_ops sun6i_isp_params_ioctl_ops = {
+ .vidioc_querycap = sun6i_isp_params_querycap,
+
+ .vidioc_enum_fmt_meta_out = sun6i_isp_params_enum_fmt,
+ .vidioc_g_fmt_meta_out = sun6i_isp_params_g_fmt,
+ .vidioc_s_fmt_meta_out = sun6i_isp_params_g_fmt,
+ .vidioc_try_fmt_meta_out = sun6i_isp_params_g_fmt,
+
+ .vidioc_create_bufs = vb2_ioctl_create_bufs,
+ .vidioc_prepare_buf = vb2_ioctl_prepare_buf,
+ .vidioc_reqbufs = vb2_ioctl_reqbufs,
+ .vidioc_querybuf = vb2_ioctl_querybuf,
+ .vidioc_expbuf = vb2_ioctl_expbuf,
+ .vidioc_qbuf = vb2_ioctl_qbuf,
+ .vidioc_dqbuf = vb2_ioctl_dqbuf,
+ .vidioc_streamon = vb2_ioctl_streamon,
+ .vidioc_streamoff = vb2_ioctl_streamoff,
+};
+
+static const struct v4l2_file_operations sun6i_isp_params_fops = {
+ .owner = THIS_MODULE,
+ .unlocked_ioctl = video_ioctl2,
+ .open = v4l2_fh_open,
+ .release = vb2_fop_release,
+ .mmap = vb2_fop_mmap,
+ .poll = vb2_fop_poll,
+};
+
+/* Params */
+
+int sun6i_isp_params_setup(struct sun6i_isp_device *isp_dev)
+{
+ struct sun6i_isp_params *params = &isp_dev->params;
+ struct sun6i_isp_params_state *state = &params->state;
+ struct v4l2_device *v4l2_dev = &isp_dev->v4l2.v4l2_dev;
+ struct v4l2_subdev *proc_subdev = &isp_dev->proc.subdev;
+ struct video_device *video_dev = &params->video_dev;
+ struct vb2_queue *queue = &isp_dev->params.queue;
+ struct media_pad *pad = &isp_dev->params.pad;
+ struct v4l2_format *format = &isp_dev->params.format;
+ struct v4l2_meta_format *params_format = &format->fmt.meta;
+ int ret;
+
+ /* State */
+
+ INIT_LIST_HEAD(&state->queue);
+ spin_lock_init(&state->lock);
+
+ /* Media Pads */
+
+ pad->flags = MEDIA_PAD_FL_SOURCE | MEDIA_PAD_FL_MUST_CONNECT;
+
+ ret = media_entity_pads_init(&video_dev->entity, 1, pad);
+ if (ret)
+ goto error_mutex;
+
+ /* Queue */
+
+ mutex_init(&params->lock);
+
+ queue->type = V4L2_BUF_TYPE_META_OUTPUT;
+ queue->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF;
+ queue->buf_struct_size = sizeof(struct sun6i_isp_buffer);
+ queue->ops = &sun6i_isp_params_queue_ops;
+ queue->mem_ops = &vb2_vmalloc_memops;
+ queue->min_queued_buffers = 1;
+ queue->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+ queue->lock = &params->lock;
+ queue->dev = isp_dev->dev;
+ queue->drv_priv = isp_dev;
+
+ ret = vb2_queue_init(queue);
+ if (ret) {
+ v4l2_err(v4l2_dev, "failed to initialize vb2 queue: %d\n", ret);
+ goto error_media_entity;
+ }
+
+ /* V4L2 Format */
+
+ format->type = queue->type;
+ params_format->dataformat = V4L2_META_FMT_SUN6I_ISP_PARAMS;
+ params_format->buffersize = sizeof(struct sun6i_isp_params_config);
+
+ /* Video Device */
+
+ strscpy(video_dev->name, SUN6I_ISP_PARAMS_NAME,
+ sizeof(video_dev->name));
+ video_dev->device_caps = V4L2_CAP_META_OUTPUT | V4L2_CAP_STREAMING;
+ video_dev->vfl_dir = VFL_DIR_TX;
+ video_dev->release = video_device_release_empty;
+ video_dev->fops = &sun6i_isp_params_fops;
+ video_dev->ioctl_ops = &sun6i_isp_params_ioctl_ops;
+ video_dev->v4l2_dev = v4l2_dev;
+ video_dev->queue = queue;
+ video_dev->lock = &params->lock;
+
+ video_set_drvdata(video_dev, isp_dev);
+
+ ret = video_register_device(video_dev, VFL_TYPE_VIDEO, -1);
+ if (ret) {
+ v4l2_err(v4l2_dev, "failed to register video device: %d\n",
+ ret);
+ goto error_media_entity;
+ }
+
+ /* Media Pad Link */
+
+ ret = media_create_pad_link(&video_dev->entity, 0,
+ &proc_subdev->entity,
+ SUN6I_ISP_PROC_PAD_SINK_PARAMS,
+ MEDIA_LNK_FL_ENABLED |
+ MEDIA_LNK_FL_IMMUTABLE);
+ if (ret < 0) {
+ v4l2_err(v4l2_dev, "failed to create %s:%u -> %s:%u link\n",
+ video_dev->entity.name, 0, proc_subdev->entity.name,
+ SUN6I_ISP_PROC_PAD_SINK_PARAMS);
+ goto error_video_device;
+ }
+
+ return 0;
+
+error_video_device:
+ vb2_video_unregister_device(video_dev);
+
+error_media_entity:
+ media_entity_cleanup(&video_dev->entity);
+
+error_mutex:
+ mutex_destroy(&params->lock);
+
+ return ret;
+}
+
+void sun6i_isp_params_cleanup(struct sun6i_isp_device *isp_dev)
+{
+ struct sun6i_isp_params *params = &isp_dev->params;
+ struct video_device *video_dev = &params->video_dev;
+
+ vb2_video_unregister_device(video_dev);
+ media_entity_cleanup(&video_dev->entity);
+ mutex_destroy(&params->lock);
+}
diff --git a/drivers/staging/media/sunxi/sun6i-isp/sun6i_isp_params.h b/drivers/staging/media/sunxi/sun6i-isp/sun6i_isp_params.h
new file mode 100644
index 000000000000..50f10f879c42
--- /dev/null
+++ b/drivers/staging/media/sunxi/sun6i-isp/sun6i_isp_params.h
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright 2021-2022 Bootlin
+ * Author: Paul Kocialkowski <paul.kocialkowski@bootlin.com>
+ */
+
+#ifndef _SUN6I_ISP_PARAMS_H_
+#define _SUN6I_ISP_PARAMS_H_
+
+#include <media/v4l2-device.h>
+
+#define SUN6I_ISP_PARAMS_NAME "sun6i-isp-params"
+
+struct sun6i_isp_device;
+
+struct sun6i_isp_params_state {
+ struct list_head queue; /* Queue and buffers lock. */
+ spinlock_t lock;
+
+ struct sun6i_isp_buffer *pending;
+
+ bool configured;
+ bool streaming;
+};
+
+struct sun6i_isp_params {
+ struct sun6i_isp_params_state state;
+
+ struct video_device video_dev;
+ struct vb2_queue queue;
+ struct mutex lock; /* Queue lock. */
+ struct media_pad pad;
+
+ struct v4l2_format format;
+};
+
+/* Params */
+
+void sun6i_isp_params_configure(struct sun6i_isp_device *isp_dev);
+
+/* State */
+
+void sun6i_isp_params_state_update(struct sun6i_isp_device *isp_dev,
+ bool *update);
+void sun6i_isp_params_state_complete(struct sun6i_isp_device *isp_dev);
+
+/* Params */
+
+int sun6i_isp_params_setup(struct sun6i_isp_device *isp_dev);
+void sun6i_isp_params_cleanup(struct sun6i_isp_device *isp_dev);
+
+#endif
diff --git a/drivers/staging/media/sunxi/sun6i-isp/sun6i_isp_proc.c b/drivers/staging/media/sunxi/sun6i-isp/sun6i_isp_proc.c
new file mode 100644
index 000000000000..46a334b602f1
--- /dev/null
+++ b/drivers/staging/media/sunxi/sun6i-isp/sun6i_isp_proc.c
@@ -0,0 +1,581 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2021-2022 Bootlin
+ * Author: Paul Kocialkowski <paul.kocialkowski@bootlin.com>
+ */
+
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-fwnode.h>
+
+#include "sun6i_isp.h"
+#include "sun6i_isp_capture.h"
+#include "sun6i_isp_params.h"
+#include "sun6i_isp_proc.h"
+#include "sun6i_isp_reg.h"
+
+/* Helpers */
+
+void sun6i_isp_proc_dimensions(struct sun6i_isp_device *isp_dev,
+ unsigned int *width, unsigned int *height)
+{
+ if (width)
+ *width = isp_dev->proc.mbus_format.width;
+ if (height)
+ *height = isp_dev->proc.mbus_format.height;
+}
+
+/* Format */
+
+static const struct sun6i_isp_proc_format sun6i_isp_proc_formats[] = {
+ {
+ .mbus_code = MEDIA_BUS_FMT_SBGGR8_1X8,
+ .input_format = SUN6I_ISP_INPUT_FMT_RAW_BGGR,
+ },
+ {
+ .mbus_code = MEDIA_BUS_FMT_SGBRG8_1X8,
+ .input_format = SUN6I_ISP_INPUT_FMT_RAW_GBRG,
+ },
+ {
+ .mbus_code = MEDIA_BUS_FMT_SGRBG8_1X8,
+ .input_format = SUN6I_ISP_INPUT_FMT_RAW_GRBG,
+ },
+ {
+ .mbus_code = MEDIA_BUS_FMT_SRGGB8_1X8,
+ .input_format = SUN6I_ISP_INPUT_FMT_RAW_RGGB,
+ },
+
+ {
+ .mbus_code = MEDIA_BUS_FMT_SBGGR10_1X10,
+ .input_format = SUN6I_ISP_INPUT_FMT_RAW_BGGR,
+ },
+ {
+ .mbus_code = MEDIA_BUS_FMT_SGBRG10_1X10,
+ .input_format = SUN6I_ISP_INPUT_FMT_RAW_GBRG,
+ },
+ {
+ .mbus_code = MEDIA_BUS_FMT_SGRBG10_1X10,
+ .input_format = SUN6I_ISP_INPUT_FMT_RAW_GRBG,
+ },
+ {
+ .mbus_code = MEDIA_BUS_FMT_SRGGB10_1X10,
+ .input_format = SUN6I_ISP_INPUT_FMT_RAW_RGGB,
+ },
+};
+
+const struct sun6i_isp_proc_format *sun6i_isp_proc_format_find(u32 mbus_code)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(sun6i_isp_proc_formats); i++)
+ if (sun6i_isp_proc_formats[i].mbus_code == mbus_code)
+ return &sun6i_isp_proc_formats[i];
+
+ return NULL;
+}
+
+/* Processor */
+
+static void sun6i_isp_proc_irq_enable(struct sun6i_isp_device *isp_dev)
+{
+ struct regmap *regmap = isp_dev->regmap;
+
+ regmap_write(regmap, SUN6I_ISP_FE_INT_EN_REG,
+ SUN6I_ISP_FE_INT_EN_FINISH |
+ SUN6I_ISP_FE_INT_EN_START |
+ SUN6I_ISP_FE_INT_EN_PARA_SAVE |
+ SUN6I_ISP_FE_INT_EN_PARA_LOAD |
+ SUN6I_ISP_FE_INT_EN_SRC0_FIFO |
+ SUN6I_ISP_FE_INT_EN_ROT_FINISH);
+}
+
+static void sun6i_isp_proc_irq_disable(struct sun6i_isp_device *isp_dev)
+{
+ struct regmap *regmap = isp_dev->regmap;
+
+ regmap_write(regmap, SUN6I_ISP_FE_INT_EN_REG, 0);
+}
+
+static void sun6i_isp_proc_irq_clear(struct sun6i_isp_device *isp_dev)
+{
+ struct regmap *regmap = isp_dev->regmap;
+
+ regmap_write(regmap, SUN6I_ISP_FE_INT_EN_REG, 0);
+ regmap_write(regmap, SUN6I_ISP_FE_INT_STA_REG,
+ SUN6I_ISP_FE_INT_STA_CLEAR);
+}
+
+static void sun6i_isp_proc_enable(struct sun6i_isp_device *isp_dev,
+ struct sun6i_isp_proc_source *source)
+{
+ struct sun6i_isp_proc *proc = &isp_dev->proc;
+ struct regmap *regmap = isp_dev->regmap;
+ u8 mode;
+
+ /* Frontend */
+
+ if (source == &proc->source_csi0)
+ mode = SUN6I_ISP_SRC_MODE_CSI(0);
+ else
+ mode = SUN6I_ISP_SRC_MODE_CSI(1);
+
+ regmap_write(regmap, SUN6I_ISP_FE_CFG_REG,
+ SUN6I_ISP_FE_CFG_EN | SUN6I_ISP_FE_CFG_SRC0_MODE(mode));
+
+ regmap_write(regmap, SUN6I_ISP_FE_CTRL_REG,
+ SUN6I_ISP_FE_CTRL_VCAP_EN | SUN6I_ISP_FE_CTRL_PARA_READY);
+}
+
+static void sun6i_isp_proc_disable(struct sun6i_isp_device *isp_dev)
+{
+ struct regmap *regmap = isp_dev->regmap;
+
+ /* Frontend */
+
+ regmap_write(regmap, SUN6I_ISP_FE_CTRL_REG, 0);
+ regmap_write(regmap, SUN6I_ISP_FE_CFG_REG, 0);
+}
+
+static void sun6i_isp_proc_configure(struct sun6i_isp_device *isp_dev)
+{
+ struct v4l2_mbus_framefmt *mbus_format = &isp_dev->proc.mbus_format;
+ const struct sun6i_isp_proc_format *format;
+ u32 value;
+
+ /* Module */
+
+ value = sun6i_isp_load_read(isp_dev, SUN6I_ISP_MODULE_EN_REG);
+ value |= SUN6I_ISP_MODULE_EN_SRC0;
+ sun6i_isp_load_write(isp_dev, SUN6I_ISP_MODULE_EN_REG, value);
+
+ /* Input */
+
+ format = sun6i_isp_proc_format_find(mbus_format->code);
+ if (WARN_ON(!format))
+ return;
+
+ sun6i_isp_load_write(isp_dev, SUN6I_ISP_MODE_REG,
+ SUN6I_ISP_MODE_INPUT_FMT(format->input_format) |
+ SUN6I_ISP_MODE_INPUT_YUV_SEQ(format->input_yuv_seq) |
+ SUN6I_ISP_MODE_SHARP(1) |
+ SUN6I_ISP_MODE_HIST(2));
+}
+
+/* V4L2 Subdev */
+
+static int sun6i_isp_proc_s_stream(struct v4l2_subdev *subdev, int on)
+{
+ struct sun6i_isp_device *isp_dev = v4l2_get_subdevdata(subdev);
+ struct sun6i_isp_proc *proc = &isp_dev->proc;
+ struct media_pad *local_pad = &proc->pads[SUN6I_ISP_PROC_PAD_SINK_CSI];
+ struct device *dev = isp_dev->dev;
+ struct sun6i_isp_proc_source *source;
+ struct v4l2_subdev *source_subdev;
+ struct media_pad *remote_pad;
+ int ret;
+
+ /* Source */
+
+ remote_pad = media_pad_remote_pad_unique(local_pad);
+ if (IS_ERR(remote_pad)) {
+ dev_err(dev,
+ "zero or more than a single source connected to the bridge\n");
+ return PTR_ERR(remote_pad);
+ }
+
+ source_subdev = media_entity_to_v4l2_subdev(remote_pad->entity);
+
+ if (source_subdev == proc->source_csi0.subdev)
+ source = &proc->source_csi0;
+ else
+ source = &proc->source_csi1;
+
+ if (!on) {
+ sun6i_isp_proc_irq_disable(isp_dev);
+ v4l2_subdev_call(source_subdev, video, s_stream, 0);
+ ret = 0;
+ goto disable;
+ }
+
+ /* PM */
+
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret < 0)
+ return ret;
+
+ /* Clear */
+
+ sun6i_isp_proc_irq_clear(isp_dev);
+
+ /* Configure */
+
+ sun6i_isp_tables_configure(isp_dev);
+ sun6i_isp_params_configure(isp_dev);
+ sun6i_isp_proc_configure(isp_dev);
+ sun6i_isp_capture_configure(isp_dev);
+
+ /* State Update */
+
+ sun6i_isp_state_update(isp_dev, true);
+
+ /* Enable */
+
+ sun6i_isp_proc_irq_enable(isp_dev);
+ sun6i_isp_proc_enable(isp_dev, source);
+
+ ret = v4l2_subdev_call(source_subdev, video, s_stream, 1);
+ if (ret && ret != -ENOIOCTLCMD) {
+ sun6i_isp_proc_irq_disable(isp_dev);
+ goto disable;
+ }
+
+ return 0;
+
+disable:
+ sun6i_isp_proc_disable(isp_dev);
+
+ pm_runtime_put(dev);
+
+ return ret;
+}
+
+static const struct v4l2_subdev_video_ops sun6i_isp_proc_video_ops = {
+ .s_stream = sun6i_isp_proc_s_stream,
+};
+
+static void
+sun6i_isp_proc_mbus_format_prepare(struct v4l2_mbus_framefmt *mbus_format)
+{
+ if (!sun6i_isp_proc_format_find(mbus_format->code))
+ mbus_format->code = sun6i_isp_proc_formats[0].mbus_code;
+
+ mbus_format->field = V4L2_FIELD_NONE;
+ mbus_format->colorspace = V4L2_COLORSPACE_RAW;
+ mbus_format->quantization = V4L2_QUANTIZATION_DEFAULT;
+ mbus_format->xfer_func = V4L2_XFER_FUNC_DEFAULT;
+}
+
+static int sun6i_isp_proc_init_state(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_state *state)
+{
+ struct sun6i_isp_device *isp_dev = v4l2_get_subdevdata(subdev);
+ unsigned int pad = SUN6I_ISP_PROC_PAD_SINK_CSI;
+ struct v4l2_mbus_framefmt *mbus_format =
+ v4l2_subdev_state_get_format(state, pad);
+ struct mutex *lock = &isp_dev->proc.lock;
+
+ mutex_lock(lock);
+
+ mbus_format->code = sun6i_isp_proc_formats[0].mbus_code;
+ mbus_format->width = 1280;
+ mbus_format->height = 720;
+
+ sun6i_isp_proc_mbus_format_prepare(mbus_format);
+
+ mutex_unlock(lock);
+
+ return 0;
+}
+
+static int
+sun6i_isp_proc_enum_mbus_code(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_state *state,
+ struct v4l2_subdev_mbus_code_enum *code_enum)
+{
+ if (code_enum->index >= ARRAY_SIZE(sun6i_isp_proc_formats))
+ return -EINVAL;
+
+ code_enum->code = sun6i_isp_proc_formats[code_enum->index].mbus_code;
+
+ return 0;
+}
+
+static int sun6i_isp_proc_get_fmt(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_state *state,
+ struct v4l2_subdev_format *format)
+{
+ struct sun6i_isp_device *isp_dev = v4l2_get_subdevdata(subdev);
+ struct v4l2_mbus_framefmt *mbus_format = &format->format;
+ struct mutex *lock = &isp_dev->proc.lock;
+
+ mutex_lock(lock);
+
+ if (format->which == V4L2_SUBDEV_FORMAT_TRY)
+ *mbus_format = *v4l2_subdev_state_get_format(state,
+ format->pad);
+ else
+ *mbus_format = isp_dev->proc.mbus_format;
+
+ mutex_unlock(lock);
+
+ return 0;
+}
+
+static int sun6i_isp_proc_set_fmt(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_state *state,
+ struct v4l2_subdev_format *format)
+{
+ struct sun6i_isp_device *isp_dev = v4l2_get_subdevdata(subdev);
+ struct v4l2_mbus_framefmt *mbus_format = &format->format;
+ struct mutex *lock = &isp_dev->proc.lock;
+
+ mutex_lock(lock);
+
+ sun6i_isp_proc_mbus_format_prepare(mbus_format);
+
+ if (format->which == V4L2_SUBDEV_FORMAT_TRY)
+ *v4l2_subdev_state_get_format(state, format->pad) =
+ *mbus_format;
+ else
+ isp_dev->proc.mbus_format = *mbus_format;
+
+ mutex_unlock(lock);
+
+ return 0;
+}
+
+static const struct v4l2_subdev_pad_ops sun6i_isp_proc_pad_ops = {
+ .enum_mbus_code = sun6i_isp_proc_enum_mbus_code,
+ .get_fmt = sun6i_isp_proc_get_fmt,
+ .set_fmt = sun6i_isp_proc_set_fmt,
+};
+
+static const struct v4l2_subdev_ops sun6i_isp_proc_subdev_ops = {
+ .video = &sun6i_isp_proc_video_ops,
+ .pad = &sun6i_isp_proc_pad_ops,
+};
+
+static const struct v4l2_subdev_internal_ops sun6i_isp_proc_internal_ops = {
+ .init_state = sun6i_isp_proc_init_state,
+};
+
+/* Media Entity */
+
+static const struct media_entity_operations sun6i_isp_proc_entity_ops = {
+ .link_validate = v4l2_subdev_link_validate,
+};
+
+/* V4L2 Async */
+
+static int sun6i_isp_proc_link(struct sun6i_isp_device *isp_dev,
+ int sink_pad_index,
+ struct v4l2_subdev *remote_subdev, bool enabled)
+{
+ struct device *dev = isp_dev->dev;
+ struct v4l2_subdev *subdev = &isp_dev->proc.subdev;
+ struct media_entity *sink_entity = &subdev->entity;
+ struct media_entity *source_entity = &remote_subdev->entity;
+ int source_pad_index;
+ int ret;
+
+ /* Get the first remote source pad. */
+ ret = media_entity_get_fwnode_pad(source_entity, remote_subdev->fwnode,
+ MEDIA_PAD_FL_SOURCE);
+ if (ret < 0) {
+ dev_err(dev, "missing source pad in external entity %s\n",
+ source_entity->name);
+ return -EINVAL;
+ }
+
+ source_pad_index = ret;
+
+ dev_dbg(dev, "creating %s:%u -> %s:%u link\n", source_entity->name,
+ source_pad_index, sink_entity->name, sink_pad_index);
+
+ ret = media_create_pad_link(source_entity, source_pad_index,
+ sink_entity, sink_pad_index,
+ enabled ? MEDIA_LNK_FL_ENABLED : 0);
+ if (ret < 0) {
+ dev_err(dev, "failed to create %s:%u -> %s:%u link\n",
+ source_entity->name, source_pad_index,
+ sink_entity->name, sink_pad_index);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int sun6i_isp_proc_notifier_bound(struct v4l2_async_notifier *notifier,
+ struct v4l2_subdev *remote_subdev,
+ struct v4l2_async_connection *async_subdev)
+{
+ struct sun6i_isp_device *isp_dev =
+ container_of(notifier, struct sun6i_isp_device, proc.notifier);
+ struct sun6i_isp_proc_async_subdev *proc_async_subdev =
+ container_of(async_subdev, struct sun6i_isp_proc_async_subdev,
+ async_subdev);
+ struct sun6i_isp_proc *proc = &isp_dev->proc;
+ struct sun6i_isp_proc_source *source = proc_async_subdev->source;
+ bool enabled;
+
+ switch (source->endpoint.base.port) {
+ case SUN6I_ISP_PORT_CSI0:
+ source = &proc->source_csi0;
+ enabled = true;
+ break;
+ case SUN6I_ISP_PORT_CSI1:
+ source = &proc->source_csi1;
+ enabled = !proc->source_csi0.expected;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ source->subdev = remote_subdev;
+
+ return sun6i_isp_proc_link(isp_dev, SUN6I_ISP_PROC_PAD_SINK_CSI,
+ remote_subdev, enabled);
+}
+
+static int
+sun6i_isp_proc_notifier_complete(struct v4l2_async_notifier *notifier)
+{
+ struct sun6i_isp_device *isp_dev =
+ container_of(notifier, struct sun6i_isp_device, proc.notifier);
+ struct v4l2_device *v4l2_dev = &isp_dev->v4l2.v4l2_dev;
+ int ret;
+
+ ret = v4l2_device_register_subdev_nodes(v4l2_dev);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static const struct v4l2_async_notifier_operations
+sun6i_isp_proc_notifier_ops = {
+ .bound = sun6i_isp_proc_notifier_bound,
+ .complete = sun6i_isp_proc_notifier_complete,
+};
+
+/* Processor */
+
+static int sun6i_isp_proc_source_setup(struct sun6i_isp_device *isp_dev,
+ struct sun6i_isp_proc_source *source,
+ u32 port)
+{
+ struct device *dev = isp_dev->dev;
+ struct v4l2_async_notifier *notifier = &isp_dev->proc.notifier;
+ struct v4l2_fwnode_endpoint *endpoint = &source->endpoint;
+ struct sun6i_isp_proc_async_subdev *proc_async_subdev;
+ struct fwnode_handle *handle = NULL;
+ int ret;
+
+ handle = fwnode_graph_get_endpoint_by_id(dev_fwnode(dev), port, 0, 0);
+ if (!handle)
+ return -ENODEV;
+
+ ret = v4l2_fwnode_endpoint_parse(handle, endpoint);
+ if (ret)
+ goto complete;
+
+ proc_async_subdev =
+ v4l2_async_nf_add_fwnode_remote(notifier, handle,
+ struct
+ sun6i_isp_proc_async_subdev);
+ if (IS_ERR(proc_async_subdev)) {
+ ret = PTR_ERR(proc_async_subdev);
+ goto complete;
+ }
+
+ proc_async_subdev->source = source;
+
+ source->expected = true;
+
+complete:
+ fwnode_handle_put(handle);
+
+ return ret;
+}
+
+int sun6i_isp_proc_setup(struct sun6i_isp_device *isp_dev)
+{
+ struct device *dev = isp_dev->dev;
+ struct sun6i_isp_proc *proc = &isp_dev->proc;
+ struct v4l2_device *v4l2_dev = &isp_dev->v4l2.v4l2_dev;
+ struct v4l2_async_notifier *notifier = &proc->notifier;
+ struct v4l2_subdev *subdev = &proc->subdev;
+ struct media_pad *pads = proc->pads;
+ int ret;
+
+ mutex_init(&proc->lock);
+
+ /* V4L2 Subdev */
+
+ v4l2_subdev_init(subdev, &sun6i_isp_proc_subdev_ops);
+ subdev->internal_ops = &sun6i_isp_proc_internal_ops;
+ strscpy(subdev->name, SUN6I_ISP_PROC_NAME, sizeof(subdev->name));
+ subdev->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ subdev->owner = THIS_MODULE;
+ subdev->dev = dev;
+
+ v4l2_set_subdevdata(subdev, isp_dev);
+
+ /* Media Entity */
+
+ subdev->entity.function = MEDIA_ENT_F_PROC_VIDEO_ISP;
+ subdev->entity.ops = &sun6i_isp_proc_entity_ops;
+
+ /* Media Pads */
+
+ pads[SUN6I_ISP_PROC_PAD_SINK_CSI].flags = MEDIA_PAD_FL_SINK |
+ MEDIA_PAD_FL_MUST_CONNECT;
+ pads[SUN6I_ISP_PROC_PAD_SINK_PARAMS].flags = MEDIA_PAD_FL_SINK |
+ MEDIA_PAD_FL_MUST_CONNECT;
+ pads[SUN6I_ISP_PROC_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE;
+
+ ret = media_entity_pads_init(&subdev->entity, SUN6I_ISP_PROC_PAD_COUNT,
+ pads);
+ if (ret)
+ return ret;
+
+ /* V4L2 Subdev */
+
+ ret = v4l2_device_register_subdev(v4l2_dev, subdev);
+ if (ret < 0) {
+ v4l2_err(v4l2_dev, "failed to register v4l2 subdev: %d\n", ret);
+ goto error_media_entity;
+ }
+
+ /* V4L2 Async */
+
+ v4l2_async_nf_init(notifier, v4l2_dev);
+ notifier->ops = &sun6i_isp_proc_notifier_ops;
+
+ sun6i_isp_proc_source_setup(isp_dev, &proc->source_csi0,
+ SUN6I_ISP_PORT_CSI0);
+ sun6i_isp_proc_source_setup(isp_dev, &proc->source_csi1,
+ SUN6I_ISP_PORT_CSI1);
+
+ ret = v4l2_async_nf_register(notifier);
+ if (ret) {
+ v4l2_err(v4l2_dev,
+ "failed to register v4l2 async notifier: %d\n", ret);
+ goto error_v4l2_async_notifier;
+ }
+
+ return 0;
+
+error_v4l2_async_notifier:
+ v4l2_async_nf_cleanup(notifier);
+
+ v4l2_device_unregister_subdev(subdev);
+
+error_media_entity:
+ media_entity_cleanup(&subdev->entity);
+
+ return ret;
+}
+
+void sun6i_isp_proc_cleanup(struct sun6i_isp_device *isp_dev)
+{
+ struct v4l2_async_notifier *notifier = &isp_dev->proc.notifier;
+ struct v4l2_subdev *subdev = &isp_dev->proc.subdev;
+
+ v4l2_async_nf_unregister(notifier);
+ v4l2_async_nf_cleanup(notifier);
+
+ v4l2_device_unregister_subdev(subdev);
+ media_entity_cleanup(&subdev->entity);
+}
diff --git a/drivers/staging/media/sunxi/sun6i-isp/sun6i_isp_proc.h b/drivers/staging/media/sunxi/sun6i-isp/sun6i_isp_proc.h
new file mode 100644
index 000000000000..db6738a39147
--- /dev/null
+++ b/drivers/staging/media/sunxi/sun6i-isp/sun6i_isp_proc.h
@@ -0,0 +1,66 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright 2021-2022 Bootlin
+ * Author: Paul Kocialkowski <paul.kocialkowski@bootlin.com>
+ */
+
+#ifndef _SUN6I_ISP_PROC_H_
+#define _SUN6I_ISP_PROC_H_
+
+#include <media/v4l2-device.h>
+#include <media/v4l2-fwnode.h>
+
+#define SUN6I_ISP_PROC_NAME "sun6i-isp-proc"
+
+enum sun6i_isp_proc_pad {
+ SUN6I_ISP_PROC_PAD_SINK_CSI = 0,
+ SUN6I_ISP_PROC_PAD_SINK_PARAMS = 1,
+ SUN6I_ISP_PROC_PAD_SOURCE = 2,
+ SUN6I_ISP_PROC_PAD_COUNT = 3,
+};
+
+struct sun6i_isp_device;
+
+struct sun6i_isp_proc_format {
+ u32 mbus_code;
+ u8 input_format;
+ u8 input_yuv_seq;
+};
+
+struct sun6i_isp_proc_source {
+ struct v4l2_subdev *subdev;
+ struct v4l2_fwnode_endpoint endpoint;
+ bool expected;
+};
+
+struct sun6i_isp_proc_async_subdev {
+ struct v4l2_async_connection async_subdev;
+ struct sun6i_isp_proc_source *source;
+};
+
+struct sun6i_isp_proc {
+ struct v4l2_subdev subdev;
+ struct media_pad pads[3];
+ struct v4l2_async_notifier notifier;
+ struct v4l2_mbus_framefmt mbus_format;
+ struct mutex lock; /* Mbus format lock. */
+
+ struct sun6i_isp_proc_source source_csi0;
+ struct sun6i_isp_proc_source source_csi1;
+};
+
+/* Helpers */
+
+void sun6i_isp_proc_dimensions(struct sun6i_isp_device *isp_dev,
+ unsigned int *width, unsigned int *height);
+
+/* Format */
+
+const struct sun6i_isp_proc_format *sun6i_isp_proc_format_find(u32 mbus_code);
+
+/* Proc */
+
+int sun6i_isp_proc_setup(struct sun6i_isp_device *isp_dev);
+void sun6i_isp_proc_cleanup(struct sun6i_isp_device *isp_dev);
+
+#endif
diff --git a/drivers/staging/media/sunxi/sun6i-isp/sun6i_isp_reg.h b/drivers/staging/media/sunxi/sun6i-isp/sun6i_isp_reg.h
new file mode 100644
index 000000000000..83b9cdab2134
--- /dev/null
+++ b/drivers/staging/media/sunxi/sun6i-isp/sun6i_isp_reg.h
@@ -0,0 +1,275 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright 2021-2022 Bootlin
+ * Author: Paul Kocialkowski <paul.kocialkowski@bootlin.com>
+ */
+
+#ifndef _SUN6I_ISP_REG_H_
+#define _SUN6I_ISP_REG_H_
+
+#include <linux/kernel.h>
+
+#define SUN6I_ISP_ADDR_VALUE(a) ((a) >> 2)
+
+/* Frontend */
+
+#define SUN6I_ISP_SRC_MODE_DRAM 0
+#define SUN6I_ISP_SRC_MODE_CSI(n) (1 + (n))
+
+#define SUN6I_ISP_FE_CFG_REG 0x0
+#define SUN6I_ISP_FE_CFG_EN BIT(0)
+#define SUN6I_ISP_FE_CFG_SRC0_MODE(v) (((v) << 8) & GENMASK(9, 8))
+#define SUN6I_ISP_FE_CFG_SRC1_MODE(v) (((v) << 16) & GENMASK(17, 16))
+
+#define SUN6I_ISP_FE_CTRL_REG 0x4
+#define SUN6I_ISP_FE_CTRL_SCAP_EN BIT(0)
+#define SUN6I_ISP_FE_CTRL_VCAP_EN BIT(1)
+#define SUN6I_ISP_FE_CTRL_PARA_READY BIT(2)
+#define SUN6I_ISP_FE_CTRL_LUT_UPDATE BIT(3)
+#define SUN6I_ISP_FE_CTRL_LENS_UPDATE BIT(4)
+#define SUN6I_ISP_FE_CTRL_GAMMA_UPDATE BIT(5)
+#define SUN6I_ISP_FE_CTRL_DRC_UPDATE BIT(6)
+#define SUN6I_ISP_FE_CTRL_DISC_UPDATE BIT(7)
+#define SUN6I_ISP_FE_CTRL_OUTPUT_SPEED_CTRL(v) (((v) << 16) & GENMASK(17, 16))
+#define SUN6I_ISP_FE_CTRL_VCAP_READ_START BIT(31)
+
+#define SUN6I_ISP_FE_INT_EN_REG 0x8
+#define SUN6I_ISP_FE_INT_EN_FINISH BIT(0)
+#define SUN6I_ISP_FE_INT_EN_START BIT(1)
+#define SUN6I_ISP_FE_INT_EN_PARA_SAVE BIT(2)
+#define SUN6I_ISP_FE_INT_EN_PARA_LOAD BIT(3)
+#define SUN6I_ISP_FE_INT_EN_SRC0_FIFO BIT(4)
+#define SUN6I_ISP_FE_INT_EN_SRC1_FIFO BIT(5)
+#define SUN6I_ISP_FE_INT_EN_ROT_FINISH BIT(6)
+#define SUN6I_ISP_FE_INT_EN_LINE_NUM_START BIT(7)
+
+#define SUN6I_ISP_FE_INT_STA_REG 0xc
+#define SUN6I_ISP_FE_INT_STA_CLEAR 0xff
+#define SUN6I_ISP_FE_INT_STA_FINISH BIT(0)
+#define SUN6I_ISP_FE_INT_STA_START BIT(1)
+#define SUN6I_ISP_FE_INT_STA_PARA_SAVE BIT(2)
+#define SUN6I_ISP_FE_INT_STA_PARA_LOAD BIT(3)
+#define SUN6I_ISP_FE_INT_STA_SRC0_FIFO BIT(4)
+#define SUN6I_ISP_FE_INT_STA_SRC1_FIFO BIT(5)
+#define SUN6I_ISP_FE_INT_STA_ROT_FINISH BIT(6)
+#define SUN6I_ISP_FE_INT_STA_LINE_NUM_START BIT(7)
+
+/* Only since sun9i-a80-isp. */
+#define SUN6I_ISP_FE_INT_LINE_NUM_REG 0x18
+#define SUN6I_ISP_FE_ROT_OF_CFG_REG 0x1c
+
+/* Buffers/tables */
+
+#define SUN6I_ISP_REG_LOAD_ADDR_REG 0x20
+#define SUN6I_ISP_REG_SAVE_ADDR_REG 0x24
+
+#define SUN6I_ISP_LUT_TABLE_ADDR_REG 0x28
+#define SUN6I_ISP_DRC_TABLE_ADDR_REG 0x2c
+#define SUN6I_ISP_STATS_ADDR_REG 0x30
+
+/* SRAM */
+
+#define SUN6I_ISP_SRAM_RW_OFFSET_REG 0x38
+#define SUN6I_ISP_SRAM_RW_DATA_REG 0x3c
+
+/* Global */
+
+#define SUN6I_ISP_MODULE_EN_REG 0x40
+#define SUN6I_ISP_MODULE_EN_AE BIT(0)
+#define SUN6I_ISP_MODULE_EN_OBC BIT(1)
+#define SUN6I_ISP_MODULE_EN_DPC_LUT BIT(2)
+#define SUN6I_ISP_MODULE_EN_DPC_OTF BIT(3)
+#define SUN6I_ISP_MODULE_EN_BDNF BIT(4)
+#define SUN6I_ISP_MODULE_EN_AWB BIT(6)
+#define SUN6I_ISP_MODULE_EN_WB BIT(7)
+#define SUN6I_ISP_MODULE_EN_LSC BIT(8)
+#define SUN6I_ISP_MODULE_EN_BGC BIT(9)
+#define SUN6I_ISP_MODULE_EN_SAP BIT(10)
+#define SUN6I_ISP_MODULE_EN_AF BIT(11)
+#define SUN6I_ISP_MODULE_EN_RGB2RGB BIT(12)
+#define SUN6I_ISP_MODULE_EN_RGB_DRC BIT(13)
+#define SUN6I_ISP_MODULE_EN_TDNF BIT(15)
+#define SUN6I_ISP_MODULE_EN_AFS BIT(16)
+#define SUN6I_ISP_MODULE_EN_HIST BIT(17)
+#define SUN6I_ISP_MODULE_EN_YUV_GAIN_OFFSET BIT(18)
+#define SUN6I_ISP_MODULE_EN_YUV_DRC BIT(19)
+#define SUN6I_ISP_MODULE_EN_TG BIT(20)
+#define SUN6I_ISP_MODULE_EN_ROT BIT(21)
+#define SUN6I_ISP_MODULE_EN_CONTRAST BIT(22)
+#define SUN6I_ISP_MODULE_EN_SATU BIT(24)
+#define SUN6I_ISP_MODULE_EN_SRC1 BIT(30)
+#define SUN6I_ISP_MODULE_EN_SRC0 BIT(31)
+
+#define SUN6I_ISP_MODE_REG 0x44
+#define SUN6I_ISP_MODE_INPUT_FMT(v) ((v) & GENMASK(2, 0))
+#define SUN6I_ISP_MODE_INPUT_YUV_SEQ(v) (((v) << 3) & GENMASK(4, 3))
+#define SUN6I_ISP_MODE_OTF_DPC(v) (((v) << 16) & BIT(16))
+#define SUN6I_ISP_MODE_SHARP(v) (((v) << 17) & BIT(17))
+#define SUN6I_ISP_MODE_HIST(v) (((v) << 20) & GENMASK(21, 20))
+
+#define SUN6I_ISP_INPUT_FMT_YUV420 0
+#define SUN6I_ISP_INPUT_FMT_YUV422 1
+#define SUN6I_ISP_INPUT_FMT_RAW_BGGR 4
+#define SUN6I_ISP_INPUT_FMT_RAW_RGGB 5
+#define SUN6I_ISP_INPUT_FMT_RAW_GBRG 6
+#define SUN6I_ISP_INPUT_FMT_RAW_GRBG 7
+
+#define SUN6I_ISP_INPUT_YUV_SEQ_YUYV 0
+#define SUN6I_ISP_INPUT_YUV_SEQ_YVYU 1
+#define SUN6I_ISP_INPUT_YUV_SEQ_UYVY 2
+#define SUN6I_ISP_INPUT_YUV_SEQ_VYUY 3
+
+#define SUN6I_ISP_IN_CFG_REG 0x48
+#define SUN6I_ISP_IN_CFG_STRIDE_DIV16(v) ((v) & GENMASK(10, 0))
+
+#define SUN6I_ISP_IN_LUMA_RGB_ADDR0_REG 0x4c
+#define SUN6I_ISP_IN_CHROMA_ADDR0_REG 0x50
+#define SUN6I_ISP_IN_LUMA_RGB_ADDR1_REG 0x54
+#define SUN6I_ISP_IN_CHROMA_ADDR1_REG 0x58
+
+/* AE */
+
+#define SUN6I_ISP_AE_CFG_REG 0x60
+#define SUN6I_ISP_AE_CFG_LOW_BRI_TH(v) ((v) & GENMASK(11, 0))
+#define SUN6I_ISP_AE_CFG_HORZ_NUM(v) (((v) << 12) & GENMASK(15, 12))
+#define SUN6I_ISP_AE_CFG_HIGH_BRI_TH(v) (((v) << 16) & GENMASK(27, 16))
+#define SUN6I_ISP_AE_CFG_VERT_NUM(v) (((v) << 28) & GENMASK(31, 28))
+
+#define SUN6I_ISP_AE_SIZE_REG 0x64
+#define SUN6I_ISP_AE_SIZE_WIDTH(v) ((v) & GENMASK(10, 0))
+#define SUN6I_ISP_AE_SIZE_HEIGHT(v) (((v) << 16) & GENMASK(26, 16))
+
+#define SUN6I_ISP_AE_POS_REG 0x68
+#define SUN6I_ISP_AE_POS_HORZ_START(v) ((v) & GENMASK(10, 0))
+#define SUN6I_ISP_AE_POS_VERT_START(v) (((v) << 16) & GENMASK(26, 16))
+
+/* OB */
+
+#define SUN6I_ISP_OB_SIZE_REG 0x78
+#define SUN6I_ISP_OB_SIZE_WIDTH(v) ((v) & GENMASK(13, 0))
+#define SUN6I_ISP_OB_SIZE_HEIGHT(v) (((v) << 16) & GENMASK(29, 16))
+
+#define SUN6I_ISP_OB_VALID_REG 0x7c
+#define SUN6I_ISP_OB_VALID_WIDTH(v) ((v) & GENMASK(12, 0))
+#define SUN6I_ISP_OB_VALID_HEIGHT(v) (((v) << 16) & GENMASK(28, 16))
+
+#define SUN6I_ISP_OB_SRC0_VALID_START_REG 0x80
+#define SUN6I_ISP_OB_SRC0_VALID_START_HORZ(v) ((v) & GENMASK(11, 0))
+#define SUN6I_ISP_OB_SRC0_VALID_START_VERT(v) (((v) << 16) & GENMASK(27, 16))
+
+#define SUN6I_ISP_OB_SRC1_VALID_START_REG 0x84
+#define SUN6I_ISP_OB_SRC1_VALID_START_HORZ(v) ((v) & GENMASK(11, 0))
+#define SUN6I_ISP_OB_SRC1_VALID_START_VERT(v) (((v) << 16) & GENMASK(27, 16))
+
+#define SUN6I_ISP_OB_SPRITE_REG 0x88
+#define SUN6I_ISP_OB_SPRITE_WIDTH(v) ((v) & GENMASK(12, 0))
+#define SUN6I_ISP_OB_SPRITE_HEIGHT(v) (((v) << 16) & GENMASK(28, 16))
+
+#define SUN6I_ISP_OB_SPRITE_START_REG 0x8c
+#define SUN6I_ISP_OB_SPRITE_START_HORZ(v) ((v) & GENMASK(11, 0))
+#define SUN6I_ISP_OB_SPRITE_START_VERT(v) (((v) << 16) & GENMASK(27, 16))
+
+#define SUN6I_ISP_OB_CFG_REG 0x90
+#define SUN6I_ISP_OB_HORZ_POS_REG 0x94
+#define SUN6I_ISP_OB_VERT_PARA_REG 0x98
+#define SUN6I_ISP_OB_OFFSET_FIXED_REG 0x9c
+
+/* BDNF */
+
+#define SUN6I_ISP_BDNF_CFG_REG 0xcc
+#define SUN6I_ISP_BDNF_CFG_IN_DIS_MIN(v) ((v) & GENMASK(7, 0))
+#define SUN6I_ISP_BDNF_CFG_IN_DIS_MAX(v) (((v) << 16) & GENMASK(23, 16))
+
+#define SUN6I_ISP_BDNF_COEF_RB_REG 0xd0
+#define SUN6I_ISP_BDNF_COEF_RB(i, v) (((v) << (4 * (i))) & \
+ GENMASK(4 * (i) + 3, 4 * (i)))
+
+#define SUN6I_ISP_BDNF_COEF_G_REG 0xd4
+#define SUN6I_ISP_BDNF_COEF_G(i, v) (((v) << (4 * (i))) & \
+ GENMASK(4 * (i) + 3, 4 * (i)))
+
+/* Bayer */
+
+#define SUN6I_ISP_BAYER_OFFSET0_REG 0xe0
+#define SUN6I_ISP_BAYER_OFFSET0_R(v) ((v) & GENMASK(12, 0))
+#define SUN6I_ISP_BAYER_OFFSET0_GR(v) (((v) << 16) & GENMASK(28, 16))
+
+#define SUN6I_ISP_BAYER_OFFSET1_REG 0xe4
+#define SUN6I_ISP_BAYER_OFFSET1_GB(v) ((v) & GENMASK(12, 0))
+#define SUN6I_ISP_BAYER_OFFSET1_B(v) (((v) << 16) & GENMASK(28, 16))
+
+#define SUN6I_ISP_BAYER_GAIN0_REG 0xe8
+#define SUN6I_ISP_BAYER_GAIN0_R(v) ((v) & GENMASK(11, 0))
+#define SUN6I_ISP_BAYER_GAIN0_GR(v) (((v) << 16) & GENMASK(27, 16))
+
+#define SUN6I_ISP_BAYER_GAIN1_REG 0xec
+#define SUN6I_ISP_BAYER_GAIN1_GB(v) ((v) & GENMASK(11, 0))
+#define SUN6I_ISP_BAYER_GAIN1_B(v) (((v) << 16) & GENMASK(27, 16))
+
+/* WB */
+
+#define SUN6I_ISP_WB_GAIN0_REG 0x140
+#define SUN6I_ISP_WB_GAIN0_R(v) ((v) & GENMASK(11, 0))
+#define SUN6I_ISP_WB_GAIN0_GR(v) (((v) << 16) & GENMASK(27, 16))
+
+#define SUN6I_ISP_WB_GAIN1_REG 0x144
+#define SUN6I_ISP_WB_GAIN1_GB(v) ((v) & GENMASK(11, 0))
+#define SUN6I_ISP_WB_GAIN1_B(v) (((v) << 16) & GENMASK(27, 16))
+
+#define SUN6I_ISP_WB_CFG_REG 0x148
+#define SUN6I_ISP_WB_CFG_CLIP(v) ((v) & GENMASK(11, 0))
+
+/* Global */
+
+#define SUN6I_ISP_MCH_SIZE_CFG_REG 0x1e0
+#define SUN6I_ISP_MCH_SIZE_CFG_WIDTH(v) ((v) & GENMASK(12, 0))
+#define SUN6I_ISP_MCH_SIZE_CFG_HEIGHT(v) (((v) << 16) & GENMASK(28, 16))
+
+#define SUN6I_ISP_MCH_SCALE_CFG_REG 0x1e4
+#define SUN6I_ISP_MCH_SCALE_CFG_X_RATIO(v) ((v) & GENMASK(11, 0))
+#define SUN6I_ISP_MCH_SCALE_CFG_Y_RATIO(v) (((v) << 16) & GENMASK(27, 16))
+#define SUN6I_ISP_MCH_SCALE_CFG_WEIGHT_SHIFT(v) (((v) << 28) & GENMASK(31, 28))
+
+#define SUN6I_ISP_SCH_SIZE_CFG_REG 0x1e8
+#define SUN6I_ISP_SCH_SIZE_CFG_WIDTH(v) ((v) & GENMASK(12, 0))
+#define SUN6I_ISP_SCH_SIZE_CFG_HEIGHT(v) (((v) << 16) & GENMASK(28, 16))
+
+#define SUN6I_ISP_SCH_SCALE_CFG_REG 0x1ec
+#define SUN6I_ISP_SCH_SCALE_CFG_X_RATIO(v) ((v) & GENMASK(11, 0))
+#define SUN6I_ISP_SCH_SCALE_CFG_Y_RATIO(v) (((v) << 16) & GENMASK(27, 16))
+#define SUN6I_ISP_SCH_SCALE_CFG_WEIGHT_SHIFT(v) (((v) << 28) & GENMASK(31, 28))
+
+#define SUN6I_ISP_MCH_CFG_REG 0x1f0
+#define SUN6I_ISP_MCH_CFG_EN BIT(0)
+#define SUN6I_ISP_MCH_CFG_SCALE_EN BIT(1)
+#define SUN6I_ISP_MCH_CFG_OUTPUT_FMT(v) (((v) << 2) & GENMASK(4, 2))
+#define SUN6I_ISP_MCH_CFG_MIRROR_EN BIT(5)
+#define SUN6I_ISP_MCH_CFG_FLIP_EN BIT(6)
+#define SUN6I_ISP_MCH_CFG_STRIDE_Y_DIV4(v) (((v) << 8) & GENMASK(18, 8))
+#define SUN6I_ISP_MCH_CFG_STRIDE_UV_DIV4(v) (((v) << 20) & GENMASK(30, 20))
+
+#define SUN6I_ISP_OUTPUT_FMT_YUV420SP 0
+#define SUN6I_ISP_OUTPUT_FMT_YUV422SP 1
+#define SUN6I_ISP_OUTPUT_FMT_YVU420SP 2
+#define SUN6I_ISP_OUTPUT_FMT_YVU422SP 3
+#define SUN6I_ISP_OUTPUT_FMT_YUV420P 4
+#define SUN6I_ISP_OUTPUT_FMT_YUV422P 5
+#define SUN6I_ISP_OUTPUT_FMT_YVU420P 6
+#define SUN6I_ISP_OUTPUT_FMT_YVU422P 7
+
+#define SUN6I_ISP_SCH_CFG_REG 0x1f4
+
+#define SUN6I_ISP_MCH_Y_ADDR0_REG 0x1f8
+#define SUN6I_ISP_MCH_U_ADDR0_REG 0x1fc
+#define SUN6I_ISP_MCH_V_ADDR0_REG 0x200
+#define SUN6I_ISP_MCH_Y_ADDR1_REG 0x204
+#define SUN6I_ISP_MCH_U_ADDR1_REG 0x208
+#define SUN6I_ISP_MCH_V_ADDR1_REG 0x20c
+#define SUN6I_ISP_SCH_Y_ADDR0_REG 0x210
+#define SUN6I_ISP_SCH_U_ADDR0_REG 0x214
+#define SUN6I_ISP_SCH_V_ADDR0_REG 0x218
+#define SUN6I_ISP_SCH_Y_ADDR1_REG 0x21c
+#define SUN6I_ISP_SCH_U_ADDR1_REG 0x220
+#define SUN6I_ISP_SCH_V_ADDR1_REG 0x224
+
+#endif
diff --git a/drivers/staging/media/sunxi/sun6i-isp/uapi/sun6i-isp-config.h b/drivers/staging/media/sunxi/sun6i-isp/uapi/sun6i-isp-config.h
new file mode 100644
index 000000000000..19c24c5fd322
--- /dev/null
+++ b/drivers/staging/media/sunxi/sun6i-isp/uapi/sun6i-isp-config.h
@@ -0,0 +1,43 @@
+/* SPDX-License-Identifier: ((GPL-2.0+ WITH Linux-syscall-note) OR MIT) */
+/*
+ * Allwinner A31 ISP Configuration
+ */
+
+#ifndef _UAPI_SUN6I_ISP_CONFIG_H
+#define _UAPI_SUN6I_ISP_CONFIG_H
+
+#include <linux/types.h>
+
+#define V4L2_META_FMT_SUN6I_ISP_PARAMS v4l2_fourcc('S', '6', 'I', 'P') /* Allwinner A31 ISP Parameters */
+
+#define SUN6I_ISP_MODULE_BAYER (1U << 0)
+#define SUN6I_ISP_MODULE_BDNF (1U << 1)
+
+struct sun6i_isp_params_config_bayer {
+ __u16 offset_r;
+ __u16 offset_gr;
+ __u16 offset_gb;
+ __u16 offset_b;
+
+ __u16 gain_r;
+ __u16 gain_gr;
+ __u16 gain_gb;
+ __u16 gain_b;
+};
+
+struct sun6i_isp_params_config_bdnf {
+ __u8 in_dis_min;
+ __u8 in_dis_max;
+
+ __u8 coefficients_g[7];
+ __u8 coefficients_rb[5];
+};
+
+struct sun6i_isp_params_config {
+ __u32 modules_used;
+
+ struct sun6i_isp_params_config_bayer bayer;
+ struct sun6i_isp_params_config_bdnf bdnf;
+};
+
+#endif /* _UAPI_SUN6I_ISP_CONFIG_H */
diff --git a/drivers/staging/media/tegra-video/Kconfig b/drivers/staging/media/tegra-video/Kconfig
new file mode 100644
index 000000000000..c53441822fdf
--- /dev/null
+++ b/drivers/staging/media/tegra-video/Kconfig
@@ -0,0 +1,20 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config VIDEO_TEGRA
+ tristate "NVIDIA Tegra VI driver"
+ depends on TEGRA_HOST1X
+ depends on VIDEO_DEV
+ select MEDIA_CONTROLLER
+ select VIDEOBUF2_DMA_CONTIG
+ select V4L2_FWNODE
+ help
+ Choose this option if you have an NVIDIA Tegra SoC.
+
+ To compile this driver as a module, choose M here: the module
+ will be called tegra-video.
+
+config VIDEO_TEGRA_TPG
+ bool "NVIDIA Tegra VI driver TPG mode"
+ depends on VIDEO_TEGRA
+ depends on ARCH_TEGRA_210_SOC
+ help
+ Say yes here to enable Tegra internal TPG mode
diff --git a/drivers/staging/media/tegra-video/Makefile b/drivers/staging/media/tegra-video/Makefile
new file mode 100644
index 000000000000..6c7552e05109
--- /dev/null
+++ b/drivers/staging/media/tegra-video/Makefile
@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: GPL-2.0
+tegra-video-objs := \
+ video.o \
+ vi.o \
+ vip.o \
+ csi.o
+
+tegra-video-$(CONFIG_ARCH_TEGRA_2x_SOC) += tegra20.o
+tegra-video-$(CONFIG_ARCH_TEGRA_210_SOC) += tegra210.o
+obj-$(CONFIG_VIDEO_TEGRA) += tegra-video.o
diff --git a/drivers/staging/media/tegra-video/TODO b/drivers/staging/media/tegra-video/TODO
new file mode 100644
index 000000000000..c82108166894
--- /dev/null
+++ b/drivers/staging/media/tegra-video/TODO
@@ -0,0 +1,5 @@
+TODO list
+* Add support for Ganged mode.
+* Add RAW10 packed video format support to Tegra210 video formats.
+* Add support for suspend and resume.
+* Make sure v4l2-compliance tests pass with all of the above implementations.
diff --git a/drivers/staging/media/tegra-video/csi.c b/drivers/staging/media/tegra-video/csi.c
new file mode 100644
index 000000000000..604185c00a1a
--- /dev/null
+++ b/drivers/staging/media/tegra-video/csi.c
@@ -0,0 +1,862 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2020 NVIDIA CORPORATION. All rights reserved.
+ */
+
+#include <linux/clk.h>
+#include <linux/clk/tegra.h>
+#include <linux/device.h>
+#include <linux/host1x.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_graph.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+
+#include <media/v4l2-fwnode.h>
+
+#include "csi.h"
+#include "video.h"
+
+#define MHZ 1000000
+
+static inline struct tegra_csi *
+host1x_client_to_csi(struct host1x_client *client)
+{
+ return container_of(client, struct tegra_csi, client);
+}
+
+static inline struct tegra_csi_channel *to_csi_chan(struct v4l2_subdev *subdev)
+{
+ return container_of(subdev, struct tegra_csi_channel, subdev);
+}
+
+/*
+ * CSI is a separate subdevice which has 6 source pads to generate
+ * test pattern. CSI subdevice pad ops are used only for TPG and
+ * allows below TPG formats.
+ */
+static const struct v4l2_mbus_framefmt tegra_csi_tpg_fmts[] = {
+ {
+ TEGRA_DEF_WIDTH,
+ TEGRA_DEF_HEIGHT,
+ MEDIA_BUS_FMT_SRGGB10_1X10,
+ V4L2_FIELD_NONE,
+ V4L2_COLORSPACE_SRGB
+ },
+ {
+ TEGRA_DEF_WIDTH,
+ TEGRA_DEF_HEIGHT,
+ MEDIA_BUS_FMT_RGB888_1X32_PADHI,
+ V4L2_FIELD_NONE,
+ V4L2_COLORSPACE_SRGB
+ },
+};
+
+static const struct v4l2_frmsize_discrete tegra_csi_tpg_sizes[] = {
+ { 1280, 720 },
+ { 1920, 1080 },
+ { 3840, 2160 },
+};
+
+/*
+ * V4L2 Subdevice Pad Operations
+ */
+static int csi_enum_bus_code(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ if (!IS_ENABLED(CONFIG_VIDEO_TEGRA_TPG))
+ return -ENOIOCTLCMD;
+
+ if (code->index >= ARRAY_SIZE(tegra_csi_tpg_fmts))
+ return -EINVAL;
+
+ code->code = tegra_csi_tpg_fmts[code->index].code;
+
+ return 0;
+}
+
+static int csi_get_format(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_format *fmt)
+{
+ struct tegra_csi_channel *csi_chan = to_csi_chan(subdev);
+
+ if (!IS_ENABLED(CONFIG_VIDEO_TEGRA_TPG))
+ return -ENOIOCTLCMD;
+
+ fmt->format = csi_chan->format;
+
+ return 0;
+}
+
+static int csi_get_frmrate_table_index(struct tegra_csi *csi, u32 code,
+ u32 width, u32 height)
+{
+ const struct tpg_framerate *frmrate;
+ unsigned int i;
+
+ frmrate = csi->soc->tpg_frmrate_table;
+ for (i = 0; i < csi->soc->tpg_frmrate_table_size; i++) {
+ if (frmrate[i].code == code &&
+ frmrate[i].frmsize.width == width &&
+ frmrate[i].frmsize.height == height) {
+ return i;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static void csi_chan_update_blank_intervals(struct tegra_csi_channel *csi_chan,
+ u32 code, u32 width, u32 height)
+{
+ struct tegra_csi *csi = csi_chan->csi;
+ const struct tpg_framerate *frmrate = csi->soc->tpg_frmrate_table;
+ int index;
+
+ index = csi_get_frmrate_table_index(csi_chan->csi, code,
+ width, height);
+ if (index >= 0) {
+ csi_chan->h_blank = frmrate[index].h_blank;
+ csi_chan->v_blank = frmrate[index].v_blank;
+ csi_chan->framerate = frmrate[index].framerate;
+ }
+}
+
+static int csi_enum_framesizes(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_frame_size_enum *fse)
+{
+ unsigned int i;
+
+ if (!IS_ENABLED(CONFIG_VIDEO_TEGRA_TPG))
+ return -ENOIOCTLCMD;
+
+ if (fse->index >= ARRAY_SIZE(tegra_csi_tpg_sizes))
+ return -EINVAL;
+
+ for (i = 0; i < ARRAY_SIZE(tegra_csi_tpg_fmts); i++)
+ if (fse->code == tegra_csi_tpg_fmts[i].code)
+ break;
+
+ if (i == ARRAY_SIZE(tegra_csi_tpg_fmts))
+ return -EINVAL;
+
+ fse->min_width = tegra_csi_tpg_sizes[fse->index].width;
+ fse->max_width = tegra_csi_tpg_sizes[fse->index].width;
+ fse->min_height = tegra_csi_tpg_sizes[fse->index].height;
+ fse->max_height = tegra_csi_tpg_sizes[fse->index].height;
+
+ return 0;
+}
+
+static int csi_enum_frameintervals(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_frame_interval_enum *fie)
+{
+ struct tegra_csi_channel *csi_chan = to_csi_chan(subdev);
+ struct tegra_csi *csi = csi_chan->csi;
+ const struct tpg_framerate *frmrate = csi->soc->tpg_frmrate_table;
+ int index;
+
+ if (!IS_ENABLED(CONFIG_VIDEO_TEGRA_TPG))
+ return -ENOIOCTLCMD;
+
+ /* one framerate per format and resolution */
+ if (fie->index > 0)
+ return -EINVAL;
+
+ index = csi_get_frmrate_table_index(csi_chan->csi, fie->code,
+ fie->width, fie->height);
+ if (index < 0)
+ return -EINVAL;
+
+ fie->interval.numerator = 1;
+ fie->interval.denominator = frmrate[index].framerate;
+
+ return 0;
+}
+
+static int csi_set_format(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_format *fmt)
+{
+ struct tegra_csi_channel *csi_chan = to_csi_chan(subdev);
+ struct v4l2_mbus_framefmt *format = &fmt->format;
+ const struct v4l2_frmsize_discrete *sizes;
+ unsigned int i;
+
+ if (!IS_ENABLED(CONFIG_VIDEO_TEGRA_TPG))
+ return -ENOIOCTLCMD;
+
+ sizes = v4l2_find_nearest_size(tegra_csi_tpg_sizes,
+ ARRAY_SIZE(tegra_csi_tpg_sizes),
+ width, height,
+ format->width, format->width);
+ format->width = sizes->width;
+ format->height = sizes->height;
+
+ for (i = 0; i < ARRAY_SIZE(tegra_csi_tpg_fmts); i++)
+ if (format->code == tegra_csi_tpg_fmts[i].code)
+ break;
+
+ if (i == ARRAY_SIZE(tegra_csi_tpg_fmts))
+ i = 0;
+
+ format->code = tegra_csi_tpg_fmts[i].code;
+ format->field = V4L2_FIELD_NONE;
+
+ if (fmt->which == V4L2_SUBDEV_FORMAT_TRY)
+ return 0;
+
+ /* update blanking intervals from frame rate table and format */
+ csi_chan_update_blank_intervals(csi_chan, format->code,
+ format->width, format->height);
+ csi_chan->format = *format;
+
+ return 0;
+}
+
+/*
+ * V4L2 Subdevice Video Operations
+ */
+static int tegra_csi_get_frame_interval(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_frame_interval *vfi)
+{
+ struct tegra_csi_channel *csi_chan = to_csi_chan(subdev);
+
+ if (!IS_ENABLED(CONFIG_VIDEO_TEGRA_TPG))
+ return -ENOIOCTLCMD;
+
+ /*
+ * FIXME: Implement support for V4L2_SUBDEV_FORMAT_TRY, using the V4L2
+ * subdev active state API.
+ */
+ if (vfi->which != V4L2_SUBDEV_FORMAT_ACTIVE)
+ return -EINVAL;
+
+ vfi->interval.numerator = 1;
+ vfi->interval.denominator = csi_chan->framerate;
+
+ return 0;
+}
+
+static unsigned int csi_get_pixel_rate(struct tegra_csi_channel *csi_chan)
+{
+ struct tegra_vi_channel *chan;
+ struct v4l2_subdev *src_subdev;
+ struct v4l2_ctrl *ctrl;
+
+ chan = v4l2_get_subdev_hostdata(&csi_chan->subdev);
+ src_subdev = tegra_channel_get_remote_source_subdev(chan);
+ ctrl = v4l2_ctrl_find(src_subdev->ctrl_handler, V4L2_CID_PIXEL_RATE);
+ if (ctrl)
+ return v4l2_ctrl_g_ctrl_int64(ctrl);
+
+ return 0;
+}
+
+void tegra_csi_calc_settle_time(struct tegra_csi_channel *csi_chan,
+ u8 csi_port_num,
+ u8 *clk_settle_time,
+ u8 *ths_settle_time)
+{
+ struct tegra_csi *csi = csi_chan->csi;
+ unsigned int cil_clk_mhz;
+ unsigned int pix_clk_mhz;
+ int clk_idx = (csi_port_num >> 1) + 1;
+
+ cil_clk_mhz = clk_get_rate(csi->clks[clk_idx].clk) / MHZ;
+ pix_clk_mhz = csi_get_pixel_rate(csi_chan) / MHZ;
+
+ /*
+ * CLK Settle time is the interval during which HS receiver should
+ * ignore any clock lane HS transitions, starting from the beginning
+ * of T-CLK-PREPARE.
+ * Per DPHY specification, T-CLK-SETTLE should be between 95ns ~ 300ns
+ *
+ * 95ns < (clk-settle-programmed + 7) * lp clk period < 300ns
+ * midpoint = 197.5 ns
+ */
+ *clk_settle_time = ((95 + 300) * cil_clk_mhz - 14000) / 2000;
+
+ /*
+ * THS Settle time is the interval during which HS receiver should
+ * ignore any data lane HS transitions, starting from the beginning
+ * of THS-PREPARE.
+ *
+ * Per DPHY specification, T-HS-SETTLE should be between 85ns + 6UI
+ * and 145ns+10UI.
+ * 85ns + 6UI < (Ths-settle-prog + 5) * lp_clk_period < 145ns + 10UI
+ * midpoint = 115ns + 8UI
+ */
+ if (pix_clk_mhz)
+ *ths_settle_time = (115 * cil_clk_mhz + 8000 * cil_clk_mhz
+ / (2 * pix_clk_mhz) - 5000) / 1000;
+}
+
+static int tegra_csi_enable_stream(struct v4l2_subdev *subdev)
+{
+ struct tegra_vi_channel *chan = v4l2_get_subdev_hostdata(subdev);
+ struct tegra_csi_channel *csi_chan = to_csi_chan(subdev);
+ struct tegra_csi *csi = csi_chan->csi;
+ int ret, err;
+
+ ret = pm_runtime_resume_and_get(csi->dev);
+ if (ret < 0) {
+ dev_err(csi->dev, "failed to get runtime PM: %d\n", ret);
+ return ret;
+ }
+
+ if (csi_chan->mipi) {
+ ret = tegra_mipi_enable(csi_chan->mipi);
+ if (ret < 0) {
+ dev_err(csi->dev,
+ "failed to enable MIPI pads: %d\n", ret);
+ goto rpm_put;
+ }
+
+ /*
+ * CSI MIPI pads PULLUP, PULLDN and TERM impedances need to
+ * be calibrated after power on.
+ * So, trigger the calibration start here and results will
+ * be latched and applied to the pads when link is in LP11
+ * state during start of sensor streaming.
+ */
+ ret = tegra_mipi_start_calibration(csi_chan->mipi);
+ if (ret < 0) {
+ dev_err(csi->dev,
+ "failed to start MIPI calibration: %d\n", ret);
+ goto disable_mipi;
+ }
+ }
+
+ csi_chan->pg_mode = chan->pg_mode;
+
+ /*
+ * Tegra CSI receiver can detect the first LP to HS transition.
+ * So, start the CSI stream-on prior to sensor stream-on and
+ * vice-versa for stream-off.
+ */
+ ret = csi->ops->csi_start_streaming(csi_chan);
+ if (ret < 0)
+ goto finish_calibration;
+
+ if (csi_chan->mipi) {
+ struct v4l2_subdev *src_subdev;
+ /*
+ * TRM has incorrectly documented to wait for done status from
+ * calibration logic after CSI interface power on.
+ * As per the design, calibration results are latched and applied
+ * to the pads only when the link is in LP11 state which will happen
+ * during the sensor stream-on.
+ * CSI subdev stream-on triggers start of MIPI pads calibration.
+ * Wait for calibration to finish here after sensor subdev stream-on.
+ */
+ src_subdev = tegra_channel_get_remote_source_subdev(chan);
+ ret = v4l2_subdev_call(src_subdev, video, s_stream, true);
+
+ if (ret < 0 && ret != -ENOIOCTLCMD)
+ goto disable_csi_stream;
+
+ err = tegra_mipi_finish_calibration(csi_chan->mipi);
+ if (err < 0)
+ dev_warn(csi->dev, "MIPI calibration failed: %d\n", err);
+ }
+
+ return 0;
+
+disable_csi_stream:
+ csi->ops->csi_stop_streaming(csi_chan);
+finish_calibration:
+ if (csi_chan->mipi)
+ tegra_mipi_finish_calibration(csi_chan->mipi);
+disable_mipi:
+ if (csi_chan->mipi) {
+ err = tegra_mipi_disable(csi_chan->mipi);
+ if (err < 0)
+ dev_err(csi->dev,
+ "failed to disable MIPI pads: %d\n", err);
+ }
+
+rpm_put:
+ pm_runtime_put(csi->dev);
+ return ret;
+}
+
+static int tegra_csi_disable_stream(struct v4l2_subdev *subdev)
+{
+ struct tegra_vi_channel *chan = v4l2_get_subdev_hostdata(subdev);
+ struct tegra_csi_channel *csi_chan = to_csi_chan(subdev);
+ struct tegra_csi *csi = csi_chan->csi;
+ int err;
+
+ /*
+ * Stream-off subdevices in reverse order to stream-on.
+ * Remote source subdev in TPG mode is same as CSI subdev.
+ */
+ if (csi_chan->mipi) {
+ struct v4l2_subdev *src_subdev;
+
+ src_subdev = tegra_channel_get_remote_source_subdev(chan);
+ err = v4l2_subdev_call(src_subdev, video, s_stream, false);
+ if (err < 0 && err != -ENOIOCTLCMD)
+ dev_err_probe(csi->dev, err, "source subdev stream off failed\n");
+ }
+
+ csi->ops->csi_stop_streaming(csi_chan);
+
+ if (csi_chan->mipi) {
+ err = tegra_mipi_disable(csi_chan->mipi);
+ if (err < 0)
+ dev_err(csi->dev,
+ "failed to disable MIPI pads: %d\n", err);
+ }
+
+ pm_runtime_put(csi->dev);
+
+ return 0;
+}
+
+static int tegra_csi_s_stream(struct v4l2_subdev *subdev, int enable)
+{
+ int ret;
+
+ if (enable)
+ ret = tegra_csi_enable_stream(subdev);
+ else
+ ret = tegra_csi_disable_stream(subdev);
+
+ return ret;
+}
+
+/*
+ * V4L2 Subdevice Operations
+ */
+static const struct v4l2_subdev_video_ops tegra_csi_video_ops = {
+ .s_stream = tegra_csi_s_stream,
+};
+
+static const struct v4l2_subdev_pad_ops tegra_csi_pad_ops = {
+ .enum_mbus_code = csi_enum_bus_code,
+ .enum_frame_size = csi_enum_framesizes,
+ .enum_frame_interval = csi_enum_frameintervals,
+ .get_fmt = csi_get_format,
+ .set_fmt = csi_set_format,
+ .get_frame_interval = tegra_csi_get_frame_interval,
+ .set_frame_interval = tegra_csi_get_frame_interval,
+};
+
+static const struct v4l2_subdev_ops tegra_csi_ops = {
+ .video = &tegra_csi_video_ops,
+ .pad = &tegra_csi_pad_ops,
+};
+
+static int tegra_csi_channel_alloc(struct tegra_csi *csi,
+ struct device_node *node,
+ unsigned int port_num, unsigned int lanes,
+ unsigned int num_pads)
+{
+ struct tegra_csi_channel *chan;
+ int ret = 0, i;
+
+ chan = kzalloc(sizeof(*chan), GFP_KERNEL);
+ if (!chan)
+ return -ENOMEM;
+
+ list_add_tail(&chan->list, &csi->csi_chans);
+ chan->csi = csi;
+ /*
+ * Each CSI brick has maximum of 4 lanes.
+ * For lanes more than 4, use multiple of immediate CSI bricks as gang.
+ */
+ if (lanes <= CSI_LANES_PER_BRICK) {
+ chan->numlanes = lanes;
+ chan->numgangports = 1;
+ } else {
+ chan->numlanes = CSI_LANES_PER_BRICK;
+ chan->numgangports = lanes / CSI_LANES_PER_BRICK;
+ }
+
+ for (i = 0; i < chan->numgangports; i++)
+ chan->csi_port_nums[i] = port_num + i * CSI_PORTS_PER_BRICK;
+
+ chan->of_node = of_node_get(node);
+ chan->numpads = num_pads;
+ if (num_pads & 0x2) {
+ chan->pads[0].flags = MEDIA_PAD_FL_SINK;
+ chan->pads[1].flags = MEDIA_PAD_FL_SOURCE;
+ } else {
+ chan->pads[0].flags = MEDIA_PAD_FL_SOURCE;
+ }
+
+ if (IS_ENABLED(CONFIG_VIDEO_TEGRA_TPG))
+ return 0;
+
+ chan->mipi = tegra_mipi_request(csi->dev, node);
+ if (IS_ERR(chan->mipi)) {
+ ret = PTR_ERR(chan->mipi);
+ chan->mipi = NULL;
+ dev_err(csi->dev, "failed to get mipi device: %d\n", ret);
+ }
+
+ return ret;
+}
+
+static int tegra_csi_tpg_channels_alloc(struct tegra_csi *csi)
+{
+ struct device_node *node = csi->dev->of_node;
+ unsigned int port_num;
+ unsigned int tpg_channels = csi->soc->csi_max_channels;
+ int ret;
+
+ /* allocate CSI channel for each CSI x2 ports */
+ for (port_num = 0; port_num < tpg_channels; port_num++) {
+ ret = tegra_csi_channel_alloc(csi, node, port_num, 2, 1);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int tegra_csi_channels_alloc(struct tegra_csi *csi)
+{
+ struct device_node *node = csi->dev->of_node;
+ struct v4l2_fwnode_endpoint v4l2_ep = {
+ .bus_type = V4L2_MBUS_CSI2_DPHY
+ };
+ struct fwnode_handle *fwh;
+ struct device_node *channel;
+ struct device_node *ep;
+ unsigned int lanes, portno, num_pads;
+ int ret;
+
+ for_each_child_of_node(node, channel) {
+ if (!of_node_name_eq(channel, "channel"))
+ continue;
+
+ ret = of_property_read_u32(channel, "reg", &portno);
+ if (ret < 0)
+ continue;
+
+ if (portno >= csi->soc->csi_max_channels) {
+ dev_err(csi->dev, "invalid port num %d for %pOF\n",
+ portno, channel);
+ ret = -EINVAL;
+ goto err_node_put;
+ }
+
+ ep = of_graph_get_endpoint_by_regs(channel, 0, 0);
+ if (!ep)
+ continue;
+
+ fwh = of_fwnode_handle(ep);
+ ret = v4l2_fwnode_endpoint_parse(fwh, &v4l2_ep);
+ of_node_put(ep);
+ if (ret) {
+ dev_err(csi->dev,
+ "failed to parse v4l2 endpoint for %pOF: %d\n",
+ channel, ret);
+ goto err_node_put;
+ }
+
+ lanes = v4l2_ep.bus.mipi_csi2.num_data_lanes;
+ /*
+ * Each CSI brick has maximum 4 data lanes.
+ * For lanes more than 4, validate lanes to be multiple of 4
+ * so multiple of consecutive CSI bricks can be ganged up for
+ * streaming.
+ */
+ if (!lanes || ((lanes & (lanes - 1)) != 0) ||
+ (lanes > CSI_LANES_PER_BRICK && ((portno & 1) != 0))) {
+ dev_err(csi->dev, "invalid data-lanes %d for %pOF\n",
+ lanes, channel);
+ ret = -EINVAL;
+ goto err_node_put;
+ }
+
+ num_pads = of_graph_get_endpoint_count(channel);
+ if (num_pads == TEGRA_CSI_PADS_NUM) {
+ ret = tegra_csi_channel_alloc(csi, channel, portno,
+ lanes, num_pads);
+ if (ret < 0)
+ goto err_node_put;
+ }
+ }
+
+ return 0;
+
+err_node_put:
+ of_node_put(channel);
+ return ret;
+}
+
+static int tegra_csi_channel_init(struct tegra_csi_channel *chan)
+{
+ struct tegra_csi *csi = chan->csi;
+ struct v4l2_subdev *subdev;
+ int ret;
+
+ /* initialize the default format */
+ chan->format.code = MEDIA_BUS_FMT_SRGGB10_1X10;
+ chan->format.field = V4L2_FIELD_NONE;
+ chan->format.colorspace = V4L2_COLORSPACE_SRGB;
+ chan->format.width = TEGRA_DEF_WIDTH;
+ chan->format.height = TEGRA_DEF_HEIGHT;
+ csi_chan_update_blank_intervals(chan, chan->format.code,
+ chan->format.width,
+ chan->format.height);
+ /* initialize V4L2 subdevice and media entity */
+ subdev = &chan->subdev;
+ v4l2_subdev_init(subdev, &tegra_csi_ops);
+ subdev->dev = csi->dev;
+ if (IS_ENABLED(CONFIG_VIDEO_TEGRA_TPG))
+ snprintf(subdev->name, sizeof(subdev->name), "%s-%d", "tpg",
+ chan->csi_port_nums[0]);
+ else
+ snprintf(subdev->name, sizeof(subdev->name), "%s",
+ kbasename(chan->of_node->full_name));
+
+ v4l2_set_subdevdata(subdev, chan);
+ subdev->fwnode = of_fwnode_handle(chan->of_node);
+ subdev->entity.function = MEDIA_ENT_F_VID_IF_BRIDGE;
+
+ /* initialize media entity pads */
+ ret = media_entity_pads_init(&subdev->entity, chan->numpads,
+ chan->pads);
+ if (ret < 0) {
+ dev_err(csi->dev,
+ "failed to initialize media entity: %d\n", ret);
+ subdev->dev = NULL;
+ return ret;
+ }
+
+ if (!IS_ENABLED(CONFIG_VIDEO_TEGRA_TPG)) {
+ ret = v4l2_async_register_subdev(subdev);
+ if (ret < 0) {
+ dev_err(csi->dev,
+ "failed to register subdev: %d\n", ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+void tegra_csi_error_recover(struct v4l2_subdev *sd)
+{
+ struct tegra_csi_channel *csi_chan = to_csi_chan(sd);
+ struct tegra_csi *csi = csi_chan->csi;
+
+ /* stop streaming during error recovery */
+ csi->ops->csi_stop_streaming(csi_chan);
+ csi->ops->csi_err_recover(csi_chan);
+ csi->ops->csi_start_streaming(csi_chan);
+}
+
+static int tegra_csi_channels_init(struct tegra_csi *csi)
+{
+ struct tegra_csi_channel *chan;
+ int ret;
+
+ list_for_each_entry(chan, &csi->csi_chans, list) {
+ ret = tegra_csi_channel_init(chan);
+ if (ret) {
+ dev_err(csi->dev,
+ "failed to initialize channel-%d: %d\n",
+ chan->csi_port_nums[0], ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static void tegra_csi_channels_cleanup(struct tegra_csi *csi)
+{
+ struct v4l2_subdev *subdev;
+ struct tegra_csi_channel *chan, *tmp;
+
+ list_for_each_entry_safe(chan, tmp, &csi->csi_chans, list) {
+ if (chan->mipi)
+ tegra_mipi_free(chan->mipi);
+
+ subdev = &chan->subdev;
+ if (subdev->dev) {
+ if (!IS_ENABLED(CONFIG_VIDEO_TEGRA_TPG))
+ v4l2_async_unregister_subdev(subdev);
+ media_entity_cleanup(&subdev->entity);
+ }
+
+ of_node_put(chan->of_node);
+ list_del(&chan->list);
+ kfree(chan);
+ }
+}
+
+static int __maybe_unused csi_runtime_suspend(struct device *dev)
+{
+ struct tegra_csi *csi = dev_get_drvdata(dev);
+
+ clk_bulk_disable_unprepare(csi->soc->num_clks, csi->clks);
+
+ return 0;
+}
+
+static int __maybe_unused csi_runtime_resume(struct device *dev)
+{
+ struct tegra_csi *csi = dev_get_drvdata(dev);
+ int ret;
+
+ ret = clk_bulk_prepare_enable(csi->soc->num_clks, csi->clks);
+ if (ret < 0) {
+ dev_err(csi->dev, "failed to enable clocks: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int tegra_csi_init(struct host1x_client *client)
+{
+ struct tegra_csi *csi = host1x_client_to_csi(client);
+ struct tegra_video_device *vid = dev_get_drvdata(client->host);
+ int ret;
+
+ INIT_LIST_HEAD(&csi->csi_chans);
+
+ if (IS_ENABLED(CONFIG_VIDEO_TEGRA_TPG))
+ ret = tegra_csi_tpg_channels_alloc(csi);
+ else
+ ret = tegra_csi_channels_alloc(csi);
+ if (ret < 0) {
+ dev_err(csi->dev,
+ "failed to allocate channels: %d\n", ret);
+ goto cleanup;
+ }
+
+ ret = tegra_csi_channels_init(csi);
+ if (ret < 0)
+ goto cleanup;
+
+ vid->csi = csi;
+
+ return 0;
+
+cleanup:
+ tegra_csi_channels_cleanup(csi);
+ return ret;
+}
+
+static int tegra_csi_exit(struct host1x_client *client)
+{
+ struct tegra_csi *csi = host1x_client_to_csi(client);
+
+ tegra_csi_channels_cleanup(csi);
+
+ return 0;
+}
+
+static const struct host1x_client_ops csi_client_ops = {
+ .init = tegra_csi_init,
+ .exit = tegra_csi_exit,
+};
+
+static int tegra_csi_probe(struct platform_device *pdev)
+{
+ struct tegra_csi *csi;
+ unsigned int i;
+ int ret;
+
+ csi = devm_kzalloc(&pdev->dev, sizeof(*csi), GFP_KERNEL);
+ if (!csi)
+ return -ENOMEM;
+
+ csi->iomem = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(csi->iomem))
+ return PTR_ERR(csi->iomem);
+
+ csi->soc = of_device_get_match_data(&pdev->dev);
+
+ csi->clks = devm_kcalloc(&pdev->dev, csi->soc->num_clks,
+ sizeof(*csi->clks), GFP_KERNEL);
+ if (!csi->clks)
+ return -ENOMEM;
+
+ for (i = 0; i < csi->soc->num_clks; i++)
+ csi->clks[i].id = csi->soc->clk_names[i];
+
+ ret = devm_clk_bulk_get(&pdev->dev, csi->soc->num_clks, csi->clks);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to get the clocks: %d\n", ret);
+ return ret;
+ }
+
+ if (!pdev->dev.pm_domain) {
+ ret = -ENOENT;
+ dev_warn(&pdev->dev, "PM domain is not attached: %d\n", ret);
+ return ret;
+ }
+
+ csi->dev = &pdev->dev;
+ csi->ops = csi->soc->ops;
+ platform_set_drvdata(pdev, csi);
+ pm_runtime_enable(&pdev->dev);
+
+ /* initialize host1x interface */
+ INIT_LIST_HEAD(&csi->client.list);
+ csi->client.ops = &csi_client_ops;
+ csi->client.dev = &pdev->dev;
+
+ ret = host1x_client_register(&csi->client);
+ if (ret < 0) {
+ dev_err(&pdev->dev,
+ "failed to register host1x client: %d\n", ret);
+ goto rpm_disable;
+ }
+
+ return 0;
+
+rpm_disable:
+ pm_runtime_disable(&pdev->dev);
+ return ret;
+}
+
+static void tegra_csi_remove(struct platform_device *pdev)
+{
+ struct tegra_csi *csi = platform_get_drvdata(pdev);
+
+ host1x_client_unregister(&csi->client);
+
+ pm_runtime_disable(&pdev->dev);
+}
+
+#if defined(CONFIG_ARCH_TEGRA_210_SOC)
+extern const struct tegra_csi_soc tegra210_csi_soc;
+#endif
+
+static const struct of_device_id tegra_csi_of_id_table[] = {
+#if defined(CONFIG_ARCH_TEGRA_210_SOC)
+ { .compatible = "nvidia,tegra210-csi", .data = &tegra210_csi_soc },
+#endif
+ { }
+};
+MODULE_DEVICE_TABLE(of, tegra_csi_of_id_table);
+
+static const struct dev_pm_ops tegra_csi_pm_ops = {
+ SET_RUNTIME_PM_OPS(csi_runtime_suspend, csi_runtime_resume, NULL)
+};
+
+struct platform_driver tegra_csi_driver = {
+ .driver = {
+ .name = "tegra-csi",
+ .of_match_table = tegra_csi_of_id_table,
+ .pm = &tegra_csi_pm_ops,
+ },
+ .probe = tegra_csi_probe,
+ .remove = tegra_csi_remove,
+};
diff --git a/drivers/staging/media/tegra-video/csi.h b/drivers/staging/media/tegra-video/csi.h
new file mode 100644
index 000000000000..3e6e5ee1bb1e
--- /dev/null
+++ b/drivers/staging/media/tegra-video/csi.h
@@ -0,0 +1,159 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2020 NVIDIA CORPORATION. All rights reserved.
+ */
+
+#ifndef __TEGRA_CSI_H__
+#define __TEGRA_CSI_H__
+
+#include <media/media-entity.h>
+#include <media/v4l2-async.h>
+#include <media/v4l2-subdev.h>
+
+/*
+ * Each CSI brick supports max of 4 lanes that can be used as either
+ * one x4 port using both CILA and CILB partitions of a CSI brick or can
+ * be used as two x2 ports with one x2 from CILA and the other x2 from
+ * CILB.
+ */
+#define CSI_PORTS_PER_BRICK 2
+#define CSI_LANES_PER_BRICK 4
+
+/* Maximum 2 CSI x4 ports can be ganged up for streaming */
+#define GANG_PORTS_MAX 2
+
+/* each CSI channel can have one sink and one source pads */
+#define TEGRA_CSI_PADS_NUM 2
+
+enum tegra_csi_cil_port {
+ PORT_A = 0,
+ PORT_B,
+};
+
+enum tegra_csi_block {
+ CSI_CIL_AB = 0,
+ CSI_CIL_CD,
+ CSI_CIL_EF,
+};
+
+struct tegra_csi;
+
+/**
+ * struct tegra_csi_channel - Tegra CSI channel
+ *
+ * @list: list head for this entry
+ * @subdev: V4L2 subdevice associated with this channel
+ * @pads: media pads for the subdevice entity
+ * @numpads: number of pads.
+ * @csi: Tegra CSI device structure
+ * @of_node: csi device tree node
+ * @numgangports: number of immediate ports ganged up to meet the
+ * channel bus-width
+ * @numlanes: number of lanes used per port
+ * @csi_port_nums: CSI channel port numbers
+ * @pg_mode: test pattern generator mode for channel
+ * @format: active format of the channel
+ * @framerate: active framerate for TPG
+ * @h_blank: horizontal blanking for TPG active format
+ * @v_blank: vertical blanking for TPG active format
+ * @mipi: mipi device for corresponding csi channel pads, or NULL if not applicable (TPG, error)
+ * @pixel_rate: active pixel rate from the sensor on this channel
+ */
+struct tegra_csi_channel {
+ struct list_head list;
+ struct v4l2_subdev subdev;
+ struct media_pad pads[TEGRA_CSI_PADS_NUM];
+ unsigned int numpads;
+ struct tegra_csi *csi;
+ struct device_node *of_node;
+ u8 numgangports;
+ unsigned int numlanes;
+ u8 csi_port_nums[GANG_PORTS_MAX];
+ u8 pg_mode;
+ struct v4l2_mbus_framefmt format;
+ unsigned int framerate;
+ unsigned int h_blank;
+ unsigned int v_blank;
+ struct tegra_mipi_device *mipi;
+ unsigned int pixel_rate;
+};
+
+/**
+ * struct tpg_framerate - Tegra CSI TPG framerate configuration
+ *
+ * @frmsize: frame resolution
+ * @code: media bus format code
+ * @h_blank: horizontal blanking used for TPG
+ * @v_blank: vertical blanking interval used for TPG
+ * @framerate: framerate achieved with the corresponding blanking intervals,
+ * format and resolution.
+ */
+struct tpg_framerate {
+ struct v4l2_frmsize_discrete frmsize;
+ u32 code;
+ unsigned int h_blank;
+ unsigned int v_blank;
+ unsigned int framerate;
+};
+
+/**
+ * struct tegra_csi_ops - Tegra CSI operations
+ *
+ * @csi_start_streaming: programs csi hardware to enable streaming.
+ * @csi_stop_streaming: programs csi hardware to disable streaming.
+ * @csi_err_recover: csi hardware block recovery in case of any capture errors
+ * due to missing source stream or due to improper csi input from
+ * the external source.
+ */
+struct tegra_csi_ops {
+ int (*csi_start_streaming)(struct tegra_csi_channel *csi_chan);
+ void (*csi_stop_streaming)(struct tegra_csi_channel *csi_chan);
+ void (*csi_err_recover)(struct tegra_csi_channel *csi_chan);
+};
+
+/**
+ * struct tegra_csi_soc - NVIDIA Tegra CSI SoC structure
+ *
+ * @ops: csi hardware operations
+ * @csi_max_channels: supported max streaming channels
+ * @clk_names: csi and cil clock names
+ * @num_clks: total clocks count
+ * @tpg_frmrate_table: csi tpg frame rate table with blanking intervals
+ * @tpg_frmrate_table_size: size of frame rate table
+ */
+struct tegra_csi_soc {
+ const struct tegra_csi_ops *ops;
+ unsigned int csi_max_channels;
+ const char * const *clk_names;
+ unsigned int num_clks;
+ const struct tpg_framerate *tpg_frmrate_table;
+ unsigned int tpg_frmrate_table_size;
+};
+
+/**
+ * struct tegra_csi - NVIDIA Tegra CSI device structure
+ *
+ * @dev: device struct
+ * @client: host1x_client struct
+ * @iomem: register base
+ * @clks: clock for CSI and CIL
+ * @soc: pointer to SoC data structure
+ * @ops: csi operations
+ * @csi_chans: list head for CSI channels
+ */
+struct tegra_csi {
+ struct device *dev;
+ struct host1x_client client;
+ void __iomem *iomem;
+ struct clk_bulk_data *clks;
+ const struct tegra_csi_soc *soc;
+ const struct tegra_csi_ops *ops;
+ struct list_head csi_chans;
+};
+
+void tegra_csi_error_recover(struct v4l2_subdev *subdev);
+void tegra_csi_calc_settle_time(struct tegra_csi_channel *csi_chan,
+ u8 csi_port_num,
+ u8 *clk_settle_time,
+ u8 *ths_settle_time);
+#endif
diff --git a/drivers/staging/media/tegra-video/tegra20.c b/drivers/staging/media/tegra-video/tegra20.c
new file mode 100644
index 000000000000..aa9ff7fec4f9
--- /dev/null
+++ b/drivers/staging/media/tegra-video/tegra20.c
@@ -0,0 +1,657 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Tegra20-specific VI implementation
+ *
+ * Copyright (C) 2023 SKIDATA GmbH
+ * Author: Luca Ceresoli <luca.ceresoli@bootlin.com>
+ */
+
+/*
+ * This source file contains Tegra20 supported video formats,
+ * VI and VIP SoC specific data, operations and registers accessors.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/delay.h>
+#include <linux/host1x.h>
+#include <linux/kernel.h>
+#include <linux/kthread.h>
+#include <linux/v4l2-mediabus.h>
+
+#include "vip.h"
+#include "vi.h"
+
+#define TEGRA_VI_SYNCPT_WAIT_TIMEOUT msecs_to_jiffies(200)
+
+/* This are just good-sense numbers. The actual min/max is not documented. */
+#define TEGRA20_MIN_WIDTH 32U
+#define TEGRA20_MIN_HEIGHT 32U
+#define TEGRA20_MAX_WIDTH 2048U
+#define TEGRA20_MAX_HEIGHT 2048U
+
+/* --------------------------------------------------------------------------
+ * Registers
+ */
+
+#define TEGRA_VI_CONT_SYNCPT_OUT_1 0x0060
+#define VI_CONT_SYNCPT_OUT_1_CONTINUOUS_SYNCPT BIT(8)
+#define VI_CONT_SYNCPT_OUT_1_SYNCPT_IDX_SFT 0
+
+#define TEGRA_VI_VI_INPUT_CONTROL 0x0088
+#define VI_INPUT_FIELD_DETECT BIT(27)
+#define VI_INPUT_BT656 BIT(25)
+#define VI_INPUT_YUV_INPUT_FORMAT_SFT 8 /* bits [9:8] */
+#define VI_INPUT_YUV_INPUT_FORMAT_UYVY (0 << VI_INPUT_YUV_INPUT_FORMAT_SFT)
+#define VI_INPUT_YUV_INPUT_FORMAT_VYUY BIT(VI_INPUT_YUV_INPUT_FORMAT_SFT)
+#define VI_INPUT_YUV_INPUT_FORMAT_YUYV (2 << VI_INPUT_YUV_INPUT_FORMAT_SFT)
+#define VI_INPUT_YUV_INPUT_FORMAT_YVYU (3 << VI_INPUT_YUV_INPUT_FORMAT_SFT)
+#define VI_INPUT_INPUT_FORMAT_SFT 2 /* bits [5:2] */
+#define VI_INPUT_INPUT_FORMAT_YUV422 (0 << VI_INPUT_INPUT_FORMAT_SFT)
+#define VI_INPUT_VIP_INPUT_ENABLE BIT(1)
+
+#define TEGRA_VI_VI_CORE_CONTROL 0x008c
+#define VI_VI_CORE_CONTROL_PLANAR_CONV_IN_SEL_EXT BIT(31)
+#define VI_VI_CORE_CONTROL_CSC_INPUT_SEL_EXT BIT(30)
+#define VI_VI_CORE_CONTROL_INPUT_TO_ALT_MUX_SFT 27
+#define VI_VI_CORE_CONTROL_INPUT_TO_CORE_EXT_SFT 24
+#define VI_VI_CORE_CONTROL_OUTPUT_TO_ISP_EXT_SFT 21
+#define VI_VI_CORE_CONTROL_ISP_HOST_STALL_OFF BIT(20)
+#define VI_VI_CORE_CONTROL_V_DOWNSCALING BIT(19)
+#define VI_VI_CORE_CONTROL_V_AVERAGING BIT(18)
+#define VI_VI_CORE_CONTROL_H_DOWNSCALING BIT(17)
+#define VI_VI_CORE_CONTROL_H_AVERAGING BIT(16)
+#define VI_VI_CORE_CONTROL_CSC_INPUT_SEL BIT(11)
+#define VI_VI_CORE_CONTROL_PLANAR_CONV_INPUT_SEL BIT(10)
+#define VI_VI_CORE_CONTROL_INPUT_TO_CORE_SFT 8
+#define VI_VI_CORE_CONTROL_ISP_DOWNSAMPLE_SFT 5
+#define VI_VI_CORE_CONTROL_OUTPUT_TO_EPP_SFT 2
+#define VI_VI_CORE_CONTROL_OUTPUT_TO_ISP_SFT 0
+
+#define TEGRA_VI_VI_FIRST_OUTPUT_CONTROL 0x0090
+#define VI_OUTPUT_FORMAT_EXT BIT(22)
+#define VI_OUTPUT_V_DIRECTION BIT(20)
+#define VI_OUTPUT_H_DIRECTION BIT(19)
+#define VI_OUTPUT_YUV_OUTPUT_FORMAT_SFT 17
+#define VI_OUTPUT_YUV_OUTPUT_FORMAT_UYVY (0 << VI_OUTPUT_YUV_OUTPUT_FORMAT_SFT)
+#define VI_OUTPUT_YUV_OUTPUT_FORMAT_VYUY BIT(VI_OUTPUT_YUV_OUTPUT_FORMAT_SFT)
+#define VI_OUTPUT_YUV_OUTPUT_FORMAT_YUYV (2 << VI_OUTPUT_YUV_OUTPUT_FORMAT_SFT)
+#define VI_OUTPUT_YUV_OUTPUT_FORMAT_YVYU (3 << VI_OUTPUT_YUV_OUTPUT_FORMAT_SFT)
+#define VI_OUTPUT_OUTPUT_BYTE_SWAP BIT(16)
+#define VI_OUTPUT_LAST_PIXEL_DUPLICATION BIT(8)
+#define VI_OUTPUT_OUTPUT_FORMAT_SFT 0
+#define VI_OUTPUT_OUTPUT_FORMAT_YUV422POST (3 << VI_OUTPUT_OUTPUT_FORMAT_SFT)
+#define VI_OUTPUT_OUTPUT_FORMAT_YUV420PLANAR (6 << VI_OUTPUT_OUTPUT_FORMAT_SFT)
+
+#define TEGRA_VI_VIP_H_ACTIVE 0x00a4
+#define VI_VIP_H_ACTIVE_PERIOD_SFT 16 /* active pixels/line, must be even */
+#define VI_VIP_H_ACTIVE_START_SFT 0
+
+#define TEGRA_VI_VIP_V_ACTIVE 0x00a8
+#define VI_VIP_V_ACTIVE_PERIOD_SFT 16 /* active lines */
+#define VI_VIP_V_ACTIVE_START_SFT 0
+
+#define TEGRA_VI_VB0_START_ADDRESS_FIRST 0x00c4
+#define TEGRA_VI_VB0_BASE_ADDRESS_FIRST 0x00c8
+#define TEGRA_VI_VB0_START_ADDRESS_U 0x00cc
+#define TEGRA_VI_VB0_BASE_ADDRESS_U 0x00d0
+#define TEGRA_VI_VB0_START_ADDRESS_V 0x00d4
+#define TEGRA_VI_VB0_BASE_ADDRESS_V 0x00d8
+
+#define TEGRA_VI_FIRST_OUTPUT_FRAME_SIZE 0x00e0
+#define VI_FIRST_OUTPUT_FRAME_HEIGHT_SFT 16
+#define VI_FIRST_OUTPUT_FRAME_WIDTH_SFT 0
+
+#define TEGRA_VI_VB0_COUNT_FIRST 0x00e4
+
+#define TEGRA_VI_VB0_SIZE_FIRST 0x00e8
+#define VI_VB0_SIZE_FIRST_V_SFT 16
+#define VI_VB0_SIZE_FIRST_H_SFT 0
+
+#define TEGRA_VI_VB0_BUFFER_STRIDE_FIRST 0x00ec
+#define VI_VB0_BUFFER_STRIDE_FIRST_CHROMA_SFT 30
+#define VI_VB0_BUFFER_STRIDE_FIRST_LUMA_SFT 0
+
+#define TEGRA_VI_H_LPF_CONTROL 0x0108
+#define VI_H_LPF_CONTROL_CHROMA_SFT 16
+#define VI_H_LPF_CONTROL_LUMA_SFT 0
+
+#define TEGRA_VI_H_DOWNSCALE_CONTROL 0x010c
+#define TEGRA_VI_V_DOWNSCALE_CONTROL 0x0110
+
+#define TEGRA_VI_VIP_INPUT_STATUS 0x0144
+
+#define TEGRA_VI_VI_DATA_INPUT_CONTROL 0x0168
+#define VI_DATA_INPUT_SFT 0 /* [11:0] = mask pin inputs to VI core */
+
+#define TEGRA_VI_PIN_INPUT_ENABLE 0x016c
+#define VI_PIN_INPUT_VSYNC BIT(14)
+#define VI_PIN_INPUT_HSYNC BIT(13)
+#define VI_PIN_INPUT_VD_SFT 0 /* [11:0] = data bin N input enable */
+
+#define TEGRA_VI_PIN_INVERSION 0x0174
+#define VI_PIN_INVERSION_VSYNC_ACTIVE_HIGH BIT(1)
+#define VI_PIN_INVERSION_HSYNC_ACTIVE_HIGH BIT(0)
+
+#define TEGRA_VI_CAMERA_CONTROL 0x01a0
+#define VI_CAMERA_CONTROL_STOP_CAPTURE BIT(2)
+#define VI_CAMERA_CONTROL_TEST_MODE BIT(1)
+#define VI_CAMERA_CONTROL_VIP_ENABLE BIT(0)
+
+#define TEGRA_VI_VI_ENABLE 0x01a4
+#define VI_VI_ENABLE_SW_FLOW_CONTROL_OUT1 BIT(1)
+#define VI_VI_ENABLE_FIRST_OUTPUT_TO_MEM_DISABLE BIT(0)
+
+#define TEGRA_VI_VI_RAISE 0x01ac
+#define VI_VI_RAISE_ON_EDGE BIT(0)
+
+/* --------------------------------------------------------------------------
+ * VI
+ */
+
+static void tegra20_vi_write(struct tegra_vi_channel *chan, unsigned int addr, u32 val)
+{
+ writel(val, chan->vi->iomem + addr);
+}
+
+/*
+ * Get the main input format (YUV/RGB...) and the YUV variant as values to
+ * be written into registers for the current VI input mbus code.
+ */
+static void tegra20_vi_get_input_formats(struct tegra_vi_channel *chan,
+ unsigned int *main_input_format,
+ unsigned int *yuv_input_format)
+{
+ unsigned int input_mbus_code = chan->fmtinfo->code;
+
+ (*main_input_format) = VI_INPUT_INPUT_FORMAT_YUV422;
+ (*yuv_input_format) = VI_INPUT_YUV_INPUT_FORMAT_UYVY;
+
+ switch (input_mbus_code) {
+ case MEDIA_BUS_FMT_UYVY8_2X8:
+ (*yuv_input_format) = VI_INPUT_YUV_INPUT_FORMAT_UYVY;
+ break;
+ case MEDIA_BUS_FMT_VYUY8_2X8:
+ (*yuv_input_format) = VI_INPUT_YUV_INPUT_FORMAT_VYUY;
+ break;
+ case MEDIA_BUS_FMT_YUYV8_2X8:
+ (*yuv_input_format) = VI_INPUT_YUV_INPUT_FORMAT_YUYV;
+ break;
+ case MEDIA_BUS_FMT_YVYU8_2X8:
+ (*yuv_input_format) = VI_INPUT_YUV_INPUT_FORMAT_YVYU;
+ break;
+ }
+}
+
+/*
+ * Get the main output format (YUV/RGB...) and the YUV variant as values to
+ * be written into registers for the current VI output pixel format.
+ */
+static void tegra20_vi_get_output_formats(struct tegra_vi_channel *chan,
+ unsigned int *main_output_format,
+ unsigned int *yuv_output_format)
+{
+ u32 output_fourcc = chan->format.pixelformat;
+
+ /* Default to YUV422 non-planar (U8Y8V8Y8) after downscaling */
+ (*main_output_format) = VI_OUTPUT_OUTPUT_FORMAT_YUV422POST;
+ (*yuv_output_format) = VI_OUTPUT_YUV_OUTPUT_FORMAT_UYVY;
+
+ switch (output_fourcc) {
+ case V4L2_PIX_FMT_UYVY:
+ (*yuv_output_format) = VI_OUTPUT_YUV_OUTPUT_FORMAT_UYVY;
+ break;
+ case V4L2_PIX_FMT_VYUY:
+ (*yuv_output_format) = VI_OUTPUT_YUV_OUTPUT_FORMAT_VYUY;
+ break;
+ case V4L2_PIX_FMT_YUYV:
+ (*yuv_output_format) = VI_OUTPUT_YUV_OUTPUT_FORMAT_YUYV;
+ break;
+ case V4L2_PIX_FMT_YVYU:
+ (*yuv_output_format) = VI_OUTPUT_YUV_OUTPUT_FORMAT_YVYU;
+ break;
+ case V4L2_PIX_FMT_YUV420:
+ case V4L2_PIX_FMT_YVU420:
+ (*main_output_format) = VI_OUTPUT_OUTPUT_FORMAT_YUV420PLANAR;
+ break;
+ }
+}
+
+/*
+ * Make the VI accessible (needed on Tegra20).
+ *
+ * This function writes an unknown bit into an unknown register. The code
+ * comes from a downstream 3.1 kernel that has a working VIP driver for
+ * Tegra20, and removing it makes the VI completely unaccessible. It should
+ * be rewritten and possibly moved elsewhere, but the appropriate location
+ * and implementation is unknown due to a total lack of documentation.
+ */
+static int tegra20_vi_enable(struct tegra_vi *vi, bool on)
+{
+ /* from arch/arm/mach-tegra/iomap.h */
+ const phys_addr_t TEGRA_APB_MISC_BASE = 0x70000000;
+ const unsigned long reg_offset = 0x42c;
+ void __iomem *apb_misc;
+ u32 val;
+
+ apb_misc = ioremap(TEGRA_APB_MISC_BASE, PAGE_SIZE);
+ if (!apb_misc)
+ apb_misc = ERR_PTR(-ENOENT);
+ if (IS_ERR(apb_misc))
+ return dev_err_probe(vi->dev, PTR_ERR(apb_misc), "cannot access APB_MISC");
+
+ val = readl(apb_misc + reg_offset);
+ val &= ~BIT(0);
+ val |= on ? BIT(0) : 0;
+ writel(val, apb_misc + reg_offset);
+ iounmap(apb_misc);
+
+ return 0;
+}
+
+static int tegra20_channel_host1x_syncpt_init(struct tegra_vi_channel *chan)
+{
+ struct tegra_vi *vi = chan->vi;
+ struct host1x_syncpt *out_sp;
+
+ out_sp = host1x_syncpt_request(&vi->client, HOST1X_SYNCPT_CLIENT_MANAGED);
+ if (!out_sp)
+ return -ENOMEM;
+
+ chan->mw_ack_sp[0] = out_sp;
+
+ return 0;
+}
+
+static void tegra20_channel_host1x_syncpt_free(struct tegra_vi_channel *chan)
+{
+ host1x_syncpt_put(chan->mw_ack_sp[0]);
+}
+
+static void tegra20_fmt_align(struct v4l2_pix_format *pix, unsigned int bpp)
+{
+ pix->width = clamp(pix->width, TEGRA20_MIN_WIDTH, TEGRA20_MAX_WIDTH);
+ pix->height = clamp(pix->height, TEGRA20_MIN_HEIGHT, TEGRA20_MAX_HEIGHT);
+
+ switch (pix->pixelformat) {
+ case V4L2_PIX_FMT_UYVY:
+ case V4L2_PIX_FMT_VYUY:
+ case V4L2_PIX_FMT_YUYV:
+ case V4L2_PIX_FMT_YVYU:
+ pix->bytesperline = roundup(pix->width, 2) * 2;
+ pix->sizeimage = roundup(pix->width, 2) * 2 * pix->height;
+ break;
+ case V4L2_PIX_FMT_YUV420:
+ case V4L2_PIX_FMT_YVU420:
+ pix->bytesperline = roundup(pix->width, 8);
+ pix->sizeimage = roundup(pix->width, 8) * pix->height * 3 / 2;
+ break;
+ }
+}
+
+/*
+ * Compute buffer offsets once per stream so that
+ * tegra20_channel_vi_buffer_setup() only has to do very simple maths for
+ * each buffer.
+ */
+static void tegra20_channel_queue_setup(struct tegra_vi_channel *chan)
+{
+ unsigned int stride = chan->format.bytesperline;
+ unsigned int height = chan->format.height;
+
+ chan->start_offset = 0;
+
+ switch (chan->format.pixelformat) {
+ case V4L2_PIX_FMT_UYVY:
+ case V4L2_PIX_FMT_VYUY:
+ case V4L2_PIX_FMT_YUYV:
+ case V4L2_PIX_FMT_YVYU:
+ if (chan->vflip)
+ chan->start_offset += stride * (height - 1);
+ if (chan->hflip)
+ chan->start_offset += stride - 1;
+ break;
+
+ case V4L2_PIX_FMT_YUV420:
+ case V4L2_PIX_FMT_YVU420:
+ chan->addr_offset_u = stride * height;
+ chan->addr_offset_v = chan->addr_offset_u + stride * height / 4;
+
+ /* For YVU420, we swap the locations of the U and V planes. */
+ if (chan->format.pixelformat == V4L2_PIX_FMT_YVU420)
+ swap(chan->addr_offset_u, chan->addr_offset_v);
+
+ chan->start_offset_u = chan->addr_offset_u;
+ chan->start_offset_v = chan->addr_offset_v;
+
+ if (chan->vflip) {
+ chan->start_offset += stride * (height - 1);
+ chan->start_offset_u += (stride / 2) * ((height / 2) - 1);
+ chan->start_offset_v += (stride / 2) * ((height / 2) - 1);
+ }
+ if (chan->hflip) {
+ chan->start_offset += stride - 1;
+ chan->start_offset_u += (stride / 2) - 1;
+ chan->start_offset_v += (stride / 2) - 1;
+ }
+ break;
+ }
+}
+
+static void release_buffer(struct tegra_vi_channel *chan,
+ struct tegra_channel_buffer *buf,
+ enum vb2_buffer_state state)
+{
+ struct vb2_v4l2_buffer *vb = &buf->buf;
+
+ vb->sequence = chan->sequence++;
+ vb->field = V4L2_FIELD_NONE;
+ vb->vb2_buf.timestamp = ktime_get_ns();
+ vb2_buffer_done(&vb->vb2_buf, state);
+}
+
+static void tegra20_channel_vi_buffer_setup(struct tegra_vi_channel *chan,
+ struct tegra_channel_buffer *buf)
+{
+ dma_addr_t base = buf->addr;
+
+ switch (chan->fmtinfo->fourcc) {
+ case V4L2_PIX_FMT_YUV420:
+ case V4L2_PIX_FMT_YVU420:
+ tegra20_vi_write(chan, TEGRA_VI_VB0_BASE_ADDRESS_U, base + chan->addr_offset_u);
+ tegra20_vi_write(chan, TEGRA_VI_VB0_START_ADDRESS_U, base + chan->start_offset_u);
+ tegra20_vi_write(chan, TEGRA_VI_VB0_BASE_ADDRESS_V, base + chan->addr_offset_v);
+ tegra20_vi_write(chan, TEGRA_VI_VB0_START_ADDRESS_V, base + chan->start_offset_v);
+ fallthrough;
+
+ case V4L2_PIX_FMT_UYVY:
+ case V4L2_PIX_FMT_VYUY:
+ case V4L2_PIX_FMT_YUYV:
+ case V4L2_PIX_FMT_YVYU:
+ tegra20_vi_write(chan, TEGRA_VI_VB0_BASE_ADDRESS_FIRST, base);
+ tegra20_vi_write(chan, TEGRA_VI_VB0_START_ADDRESS_FIRST, base + chan->start_offset);
+ break;
+ }
+}
+
+static int tegra20_channel_capture_frame(struct tegra_vi_channel *chan,
+ struct tegra_channel_buffer *buf)
+{
+ int err;
+
+ chan->next_out_sp_idx++;
+
+ tegra20_channel_vi_buffer_setup(chan, buf);
+
+ tegra20_vi_write(chan, TEGRA_VI_CAMERA_CONTROL, VI_CAMERA_CONTROL_VIP_ENABLE);
+
+ /* Wait for syncpt counter to reach frame start event threshold */
+ err = host1x_syncpt_wait(chan->mw_ack_sp[0], chan->next_out_sp_idx,
+ TEGRA_VI_SYNCPT_WAIT_TIMEOUT, NULL);
+ if (err) {
+ host1x_syncpt_incr(chan->mw_ack_sp[0]);
+ dev_err_ratelimited(&chan->video.dev, "frame start syncpt timeout: %d\n", err);
+ release_buffer(chan, buf, VB2_BUF_STATE_ERROR);
+ return err;
+ }
+
+ tegra20_vi_write(chan, TEGRA_VI_CAMERA_CONTROL,
+ VI_CAMERA_CONTROL_STOP_CAPTURE | VI_CAMERA_CONTROL_VIP_ENABLE);
+
+ release_buffer(chan, buf, VB2_BUF_STATE_DONE);
+
+ return 0;
+}
+
+static int tegra20_chan_capture_kthread_start(void *data)
+{
+ struct tegra_vi_channel *chan = data;
+ struct tegra_channel_buffer *buf;
+ unsigned int retries = 0;
+ int err = 0;
+
+ while (1) {
+ /*
+ * Source is not streaming if error is non-zero.
+ * So, do not dequeue buffers on error and let the thread sleep
+ * till kthread stop signal is received.
+ */
+ wait_event_interruptible(chan->start_wait,
+ kthread_should_stop() ||
+ (!list_empty(&chan->capture) && !err));
+
+ if (kthread_should_stop())
+ break;
+
+ /* dequeue the buffer and start capture */
+ spin_lock(&chan->start_lock);
+ if (list_empty(&chan->capture)) {
+ spin_unlock(&chan->start_lock);
+ continue;
+ }
+
+ buf = list_first_entry(&chan->capture, struct tegra_channel_buffer, queue);
+ list_del_init(&buf->queue);
+ spin_unlock(&chan->start_lock);
+
+ err = tegra20_channel_capture_frame(chan, buf);
+ if (!err) {
+ retries = 0;
+ continue;
+ }
+
+ if (retries++ > chan->syncpt_timeout_retry)
+ vb2_queue_error(&chan->queue);
+ else
+ err = 0;
+ }
+
+ return 0;
+}
+
+static void tegra20_camera_capture_setup(struct tegra_vi_channel *chan)
+{
+ u32 output_fourcc = chan->format.pixelformat;
+ int width = chan->format.width;
+ int height = chan->format.height;
+ int stride_l = chan->format.bytesperline;
+ int stride_c = (output_fourcc == V4L2_PIX_FMT_YUV420 ||
+ output_fourcc == V4L2_PIX_FMT_YVU420) ? 1 : 0;
+ int main_output_format;
+ int yuv_output_format;
+
+ tegra20_vi_get_output_formats(chan, &main_output_format, &yuv_output_format);
+
+ /*
+ * Set up low pass filter. Use 0x240 for chromaticity and 0x240
+ * for luminance, which is the default and means not to touch
+ * anything.
+ */
+ tegra20_vi_write(chan, TEGRA_VI_H_LPF_CONTROL,
+ 0x0240 << VI_H_LPF_CONTROL_LUMA_SFT |
+ 0x0240 << VI_H_LPF_CONTROL_CHROMA_SFT);
+
+ /* Set up raise-on-edge, so we get an interrupt on end of frame. */
+ tegra20_vi_write(chan, TEGRA_VI_VI_RAISE, VI_VI_RAISE_ON_EDGE);
+
+ tegra20_vi_write(chan, TEGRA_VI_VI_FIRST_OUTPUT_CONTROL,
+ (chan->vflip ? VI_OUTPUT_V_DIRECTION : 0) |
+ (chan->hflip ? VI_OUTPUT_H_DIRECTION : 0) |
+ yuv_output_format << VI_OUTPUT_YUV_OUTPUT_FORMAT_SFT |
+ main_output_format << VI_OUTPUT_OUTPUT_FORMAT_SFT);
+
+ /* Set up frame size */
+ tegra20_vi_write(chan, TEGRA_VI_FIRST_OUTPUT_FRAME_SIZE,
+ height << VI_FIRST_OUTPUT_FRAME_HEIGHT_SFT |
+ width << VI_FIRST_OUTPUT_FRAME_WIDTH_SFT);
+
+ /* First output memory enabled */
+ tegra20_vi_write(chan, TEGRA_VI_VI_ENABLE, 0);
+
+ /* Set the number of frames in the buffer */
+ tegra20_vi_write(chan, TEGRA_VI_VB0_COUNT_FIRST, 1);
+
+ /* Set up buffer frame size */
+ tegra20_vi_write(chan, TEGRA_VI_VB0_SIZE_FIRST,
+ height << VI_VB0_SIZE_FIRST_V_SFT |
+ width << VI_VB0_SIZE_FIRST_H_SFT);
+
+ tegra20_vi_write(chan, TEGRA_VI_VB0_BUFFER_STRIDE_FIRST,
+ stride_l << VI_VB0_BUFFER_STRIDE_FIRST_LUMA_SFT |
+ stride_c << VI_VB0_BUFFER_STRIDE_FIRST_CHROMA_SFT);
+
+ tegra20_vi_write(chan, TEGRA_VI_VI_ENABLE, 0);
+}
+
+static int tegra20_vi_start_streaming(struct vb2_queue *vq, u32 count)
+{
+ struct tegra_vi_channel *chan = vb2_get_drv_priv(vq);
+ struct media_pipeline *pipe = &chan->video.pipe;
+ int err;
+
+ chan->next_out_sp_idx = host1x_syncpt_read(chan->mw_ack_sp[0]);
+
+ err = video_device_pipeline_start(&chan->video, pipe);
+ if (err)
+ goto error_pipeline_start;
+
+ tegra20_camera_capture_setup(chan);
+
+ err = tegra_channel_set_stream(chan, true);
+ if (err)
+ goto error_set_stream;
+
+ chan->sequence = 0;
+
+ chan->kthread_start_capture = kthread_run(tegra20_chan_capture_kthread_start,
+ chan, "%s:0", chan->video.name);
+ if (IS_ERR(chan->kthread_start_capture)) {
+ err = PTR_ERR(chan->kthread_start_capture);
+ chan->kthread_start_capture = NULL;
+ dev_err_probe(&chan->video.dev, err, "failed to run capture kthread\n");
+ goto error_kthread_start;
+ }
+
+ return 0;
+
+error_kthread_start:
+ tegra_channel_set_stream(chan, false);
+error_set_stream:
+ video_device_pipeline_stop(&chan->video);
+error_pipeline_start:
+ tegra_channel_release_buffers(chan, VB2_BUF_STATE_QUEUED);
+
+ return err;
+}
+
+static void tegra20_vi_stop_streaming(struct vb2_queue *vq)
+{
+ struct tegra_vi_channel *chan = vb2_get_drv_priv(vq);
+
+ if (chan->kthread_start_capture) {
+ kthread_stop(chan->kthread_start_capture);
+ chan->kthread_start_capture = NULL;
+ }
+
+ tegra_channel_release_buffers(chan, VB2_BUF_STATE_ERROR);
+ tegra_channel_set_stream(chan, false);
+ video_device_pipeline_stop(&chan->video);
+}
+
+static const struct tegra_vi_ops tegra20_vi_ops = {
+ .vi_enable = tegra20_vi_enable,
+ .channel_host1x_syncpt_init = tegra20_channel_host1x_syncpt_init,
+ .channel_host1x_syncpt_free = tegra20_channel_host1x_syncpt_free,
+ .vi_fmt_align = tegra20_fmt_align,
+ .channel_queue_setup = tegra20_channel_queue_setup,
+ .vi_start_streaming = tegra20_vi_start_streaming,
+ .vi_stop_streaming = tegra20_vi_stop_streaming,
+};
+
+#define TEGRA20_VIDEO_FMT(MBUS_CODE, BPP, FOURCC) \
+{ \
+ .code = MEDIA_BUS_FMT_##MBUS_CODE, \
+ .bpp = BPP, \
+ .fourcc = V4L2_PIX_FMT_##FOURCC, \
+}
+
+static const struct tegra_video_format tegra20_video_formats[] = {
+ TEGRA20_VIDEO_FMT(UYVY8_2X8, 2, UYVY),
+ TEGRA20_VIDEO_FMT(VYUY8_2X8, 2, VYUY),
+ TEGRA20_VIDEO_FMT(YUYV8_2X8, 2, YUYV),
+ TEGRA20_VIDEO_FMT(YVYU8_2X8, 2, YVYU),
+ TEGRA20_VIDEO_FMT(UYVY8_2X8, 1, YUV420),
+ TEGRA20_VIDEO_FMT(UYVY8_2X8, 1, YVU420),
+};
+
+const struct tegra_vi_soc tegra20_vi_soc = {
+ .video_formats = tegra20_video_formats,
+ .nformats = ARRAY_SIZE(tegra20_video_formats),
+ .default_video_format = &tegra20_video_formats[0],
+ .ops = &tegra20_vi_ops,
+ .vi_max_channels = 1, /* parallel input (VIP) */
+ .vi_max_clk_hz = 150000000,
+ .has_h_v_flip = true,
+};
+
+/* --------------------------------------------------------------------------
+ * VIP
+ */
+
+/*
+ * VIP-specific configuration for stream start.
+ *
+ * Whatever is common among VIP and CSI is done by the VI component (see
+ * tegra20_vi_start_streaming()). Here we do what is VIP-specific.
+ */
+static int tegra20_vip_start_streaming(struct tegra_vip_channel *vip_chan)
+{
+ struct tegra_vi_channel *vi_chan = v4l2_get_subdev_hostdata(&vip_chan->subdev);
+ int width = vi_chan->format.width;
+ int height = vi_chan->format.height;
+
+ unsigned int main_input_format;
+ unsigned int yuv_input_format;
+
+ tegra20_vi_get_input_formats(vi_chan, &main_input_format, &yuv_input_format);
+
+ tegra20_vi_write(vi_chan, TEGRA_VI_VI_CORE_CONTROL, 0);
+
+ tegra20_vi_write(vi_chan, TEGRA_VI_VI_INPUT_CONTROL,
+ VI_INPUT_VIP_INPUT_ENABLE | main_input_format | yuv_input_format);
+
+ tegra20_vi_write(vi_chan, TEGRA_VI_V_DOWNSCALE_CONTROL, 0);
+ tegra20_vi_write(vi_chan, TEGRA_VI_H_DOWNSCALE_CONTROL, 0);
+
+ tegra20_vi_write(vi_chan, TEGRA_VI_VIP_V_ACTIVE, height << VI_VIP_V_ACTIVE_PERIOD_SFT);
+ tegra20_vi_write(vi_chan, TEGRA_VI_VIP_H_ACTIVE,
+ roundup(width, 2) << VI_VIP_H_ACTIVE_PERIOD_SFT);
+
+ /*
+ * For VIP, D9..D2 is mapped to the video decoder's P7..P0.
+ * Disable/mask out the other Dn wires. When not in BT656
+ * mode we also need the V/H sync.
+ */
+ tegra20_vi_write(vi_chan, TEGRA_VI_PIN_INPUT_ENABLE,
+ GENMASK(9, 2) << VI_PIN_INPUT_VD_SFT |
+ VI_PIN_INPUT_HSYNC | VI_PIN_INPUT_VSYNC);
+ tegra20_vi_write(vi_chan, TEGRA_VI_VI_DATA_INPUT_CONTROL,
+ GENMASK(9, 2) << VI_DATA_INPUT_SFT);
+ tegra20_vi_write(vi_chan, TEGRA_VI_PIN_INVERSION, 0);
+
+ tegra20_vi_write(vi_chan, TEGRA_VI_CONT_SYNCPT_OUT_1,
+ VI_CONT_SYNCPT_OUT_1_CONTINUOUS_SYNCPT |
+ host1x_syncpt_id(vi_chan->mw_ack_sp[0])
+ << VI_CONT_SYNCPT_OUT_1_SYNCPT_IDX_SFT);
+
+ tegra20_vi_write(vi_chan, TEGRA_VI_CAMERA_CONTROL, VI_CAMERA_CONTROL_STOP_CAPTURE);
+
+ return 0;
+}
+
+static const struct tegra_vip_ops tegra20_vip_ops = {
+ .vip_start_streaming = tegra20_vip_start_streaming,
+};
+
+const struct tegra_vip_soc tegra20_vip_soc = {
+ .ops = &tegra20_vip_ops,
+};
diff --git a/drivers/staging/media/tegra-video/tegra210.c b/drivers/staging/media/tegra-video/tegra210.c
new file mode 100644
index 000000000000..da99f19a39e7
--- /dev/null
+++ b/drivers/staging/media/tegra-video/tegra210.c
@@ -0,0 +1,1221 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2020 NVIDIA CORPORATION. All rights reserved.
+ */
+
+/*
+ * This source file contains Tegra210 supported video formats,
+ * VI and CSI SoC specific data, operations and registers accessors.
+ */
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/clk/tegra.h>
+#include <linux/delay.h>
+#include <linux/host1x.h>
+#include <linux/kthread.h>
+
+#include "csi.h"
+#include "vi.h"
+
+#define TEGRA210_MIN_WIDTH 32U
+#define TEGRA210_MAX_WIDTH 32768U
+#define TEGRA210_MIN_HEIGHT 32U
+#define TEGRA210_MAX_HEIGHT 32768U
+
+#define SURFACE_ALIGN_BYTES 64
+
+#define TEGRA_VI_SYNCPT_WAIT_TIMEOUT msecs_to_jiffies(200)
+
+/* Tegra210 VI registers */
+#define TEGRA_VI_CFG_VI_INCR_SYNCPT 0x000
+#define VI_CFG_VI_INCR_SYNCPT_COND(x) (((x) & 0xff) << 8)
+#define VI_CSI_PP_FRAME_START(port) (5 + (port) * 4)
+#define VI_CSI_MW_ACK_DONE(port) (7 + (port) * 4)
+#define TEGRA_VI_CFG_VI_INCR_SYNCPT_CNTRL 0x004
+#define VI_INCR_SYNCPT_NO_STALL BIT(8)
+#define TEGRA_VI_CFG_VI_INCR_SYNCPT_ERROR 0x008
+#define TEGRA_VI_CFG_CG_CTRL 0x0b8
+#define VI_CG_2ND_LEVEL_EN 0x1
+
+/* Tegra210 VI CSI registers */
+#define TEGRA_VI_CSI_SW_RESET 0x000
+#define TEGRA_VI_CSI_SINGLE_SHOT 0x004
+#define SINGLE_SHOT_CAPTURE 0x1
+#define TEGRA_VI_CSI_IMAGE_DEF 0x00c
+#define BYPASS_PXL_TRANSFORM_OFFSET 24
+#define IMAGE_DEF_FORMAT_OFFSET 16
+#define IMAGE_DEF_DEST_MEM 0x1
+#define TEGRA_VI_CSI_IMAGE_SIZE 0x018
+#define IMAGE_SIZE_HEIGHT_OFFSET 16
+#define TEGRA_VI_CSI_IMAGE_SIZE_WC 0x01c
+#define TEGRA_VI_CSI_IMAGE_DT 0x020
+#define TEGRA_VI_CSI_SURFACE0_OFFSET_MSB 0x024
+#define TEGRA_VI_CSI_SURFACE0_OFFSET_LSB 0x028
+#define TEGRA_VI_CSI_SURFACE1_OFFSET_MSB 0x02c
+#define TEGRA_VI_CSI_SURFACE1_OFFSET_LSB 0x030
+#define TEGRA_VI_CSI_SURFACE2_OFFSET_MSB 0x034
+#define TEGRA_VI_CSI_SURFACE2_OFFSET_LSB 0x038
+#define TEGRA_VI_CSI_SURFACE0_STRIDE 0x054
+#define TEGRA_VI_CSI_SURFACE1_STRIDE 0x058
+#define TEGRA_VI_CSI_SURFACE2_STRIDE 0x05c
+#define TEGRA_VI_CSI_SURFACE_HEIGHT0 0x060
+#define TEGRA_VI_CSI_ERROR_STATUS 0x084
+
+/* Tegra210 CSI Pixel Parser registers: Starts from 0x838, offset 0x0 */
+#define TEGRA_CSI_INPUT_STREAM_CONTROL 0x000
+#define CSI_SKIP_PACKET_THRESHOLD_OFFSET 16
+#define TEGRA_CSI_PIXEL_STREAM_CONTROL0 0x004
+#define CSI_PP_PACKET_HEADER_SENT BIT(4)
+#define CSI_PP_DATA_IDENTIFIER_ENABLE BIT(5)
+#define CSI_PP_WORD_COUNT_SELECT_HEADER BIT(6)
+#define CSI_PP_CRC_CHECK_ENABLE BIT(7)
+#define CSI_PP_WC_CHECK BIT(8)
+#define CSI_PP_OUTPUT_FORMAT_STORE (0x3 << 16)
+#define CSI_PPA_PAD_LINE_NOPAD (0x2 << 24)
+#define CSI_PP_HEADER_EC_DISABLE (0x1 << 27)
+#define CSI_PPA_PAD_FRAME_NOPAD (0x2 << 28)
+#define TEGRA_CSI_PIXEL_STREAM_CONTROL1 0x008
+#define CSI_PP_TOP_FIELD_FRAME_OFFSET 0
+#define CSI_PP_TOP_FIELD_FRAME_MASK_OFFSET 4
+#define TEGRA_CSI_PIXEL_STREAM_GAP 0x00c
+#define PP_FRAME_MIN_GAP_OFFSET 16
+#define TEGRA_CSI_PIXEL_STREAM_PP_COMMAND 0x010
+#define CSI_PP_ENABLE 0x1
+#define CSI_PP_DISABLE 0x2
+#define CSI_PP_RST 0x3
+#define CSI_PP_SINGLE_SHOT_ENABLE (0x1 << 2)
+#define CSI_PP_START_MARKER_FRAME_MAX_OFFSET 12
+#define TEGRA_CSI_PIXEL_STREAM_EXPECTED_FRAME 0x014
+#define TEGRA_CSI_PIXEL_PARSER_INTERRUPT_MASK 0x018
+#define TEGRA_CSI_PIXEL_PARSER_STATUS 0x01c
+
+/* Tegra210 CSI PHY registers */
+/* CSI_PHY_CIL_COMMAND_0 offset 0x0d0 from TEGRA_CSI_PIXEL_PARSER_0_BASE */
+#define TEGRA_CSI_PHY_CIL_COMMAND 0x0d0
+#define CSI_A_PHY_CIL_NOP 0x0
+#define CSI_A_PHY_CIL_ENABLE 0x1
+#define CSI_A_PHY_CIL_DISABLE 0x2
+#define CSI_A_PHY_CIL_ENABLE_MASK 0x3
+#define CSI_B_PHY_CIL_NOP (0x0 << 8)
+#define CSI_B_PHY_CIL_ENABLE (0x1 << 8)
+#define CSI_B_PHY_CIL_DISABLE (0x2 << 8)
+#define CSI_B_PHY_CIL_ENABLE_MASK (0x3 << 8)
+
+#define TEGRA_CSI_CIL_PAD_CONFIG0 0x000
+#define BRICK_CLOCK_A_4X (0x1 << 16)
+#define BRICK_CLOCK_B_4X (0x2 << 16)
+#define TEGRA_CSI_CIL_PAD_CONFIG1 0x004
+#define TEGRA_CSI_CIL_PHY_CONTROL 0x008
+#define CLK_SETTLE_MASK GENMASK(13, 8)
+#define THS_SETTLE_MASK GENMASK(5, 0)
+#define TEGRA_CSI_CIL_INTERRUPT_MASK 0x00c
+#define TEGRA_CSI_CIL_STATUS 0x010
+#define TEGRA_CSI_CILX_STATUS 0x014
+#define TEGRA_CSI_CIL_SW_SENSOR_RESET 0x020
+
+#define TEGRA_CSI_PATTERN_GENERATOR_CTRL 0x000
+#define PG_MODE_OFFSET 2
+#define PG_ENABLE 0x1
+#define PG_DISABLE 0x0
+#define TEGRA_CSI_PG_BLANK 0x004
+#define PG_VBLANK_OFFSET 16
+#define TEGRA_CSI_PG_PHASE 0x008
+#define TEGRA_CSI_PG_RED_FREQ 0x00c
+#define PG_RED_VERT_INIT_FREQ_OFFSET 16
+#define PG_RED_HOR_INIT_FREQ_OFFSET 0
+#define TEGRA_CSI_PG_RED_FREQ_RATE 0x010
+#define TEGRA_CSI_PG_GREEN_FREQ 0x014
+#define PG_GREEN_VERT_INIT_FREQ_OFFSET 16
+#define PG_GREEN_HOR_INIT_FREQ_OFFSET 0
+#define TEGRA_CSI_PG_GREEN_FREQ_RATE 0x018
+#define TEGRA_CSI_PG_BLUE_FREQ 0x01c
+#define PG_BLUE_VERT_INIT_FREQ_OFFSET 16
+#define PG_BLUE_HOR_INIT_FREQ_OFFSET 0
+#define TEGRA_CSI_PG_BLUE_FREQ_RATE 0x020
+#define TEGRA_CSI_PG_AOHDR 0x024
+#define TEGRA_CSI_CSI_SW_STATUS_RESET 0x214
+#define TEGRA_CSI_CLKEN_OVERRIDE 0x218
+
+#define TEGRA210_CSI_PORT_OFFSET 0x34
+#define TEGRA210_CSI_CIL_OFFSET 0x0f4
+#define TEGRA210_CSI_TPG_OFFSET 0x18c
+
+#define CSI_PP_OFFSET(block) ((block) * 0x800)
+#define TEGRA210_VI_CSI_BASE(x) (0x100 + (x) * 0x100)
+
+/* Tegra210 VI registers accessors */
+static void tegra_vi_write(struct tegra_vi_channel *chan, unsigned int addr,
+ u32 val)
+{
+ writel_relaxed(val, chan->vi->iomem + addr);
+}
+
+static u32 tegra_vi_read(struct tegra_vi_channel *chan, unsigned int addr)
+{
+ return readl_relaxed(chan->vi->iomem + addr);
+}
+
+/* Tegra210 VI_CSI registers accessors */
+static void vi_csi_write(struct tegra_vi_channel *chan, u8 portno,
+ unsigned int addr, u32 val)
+{
+ void __iomem *vi_csi_base;
+
+ vi_csi_base = chan->vi->iomem + TEGRA210_VI_CSI_BASE(portno);
+
+ writel_relaxed(val, vi_csi_base + addr);
+}
+
+static u32 vi_csi_read(struct tegra_vi_channel *chan, u8 portno,
+ unsigned int addr)
+{
+ void __iomem *vi_csi_base;
+
+ vi_csi_base = chan->vi->iomem + TEGRA210_VI_CSI_BASE(portno);
+
+ return readl_relaxed(vi_csi_base + addr);
+}
+
+/*
+ * Tegra210 VI channel capture operations
+ */
+
+static int tegra210_channel_host1x_syncpt_init(struct tegra_vi_channel *chan)
+{
+ struct tegra_vi *vi = chan->vi;
+ unsigned long flags = HOST1X_SYNCPT_CLIENT_MANAGED;
+ struct host1x_syncpt *fs_sp;
+ struct host1x_syncpt *mw_sp;
+ int ret, i;
+
+ for (i = 0; i < chan->numgangports; i++) {
+ fs_sp = host1x_syncpt_request(&vi->client, flags);
+ if (!fs_sp) {
+ dev_err(vi->dev, "failed to request frame start syncpoint\n");
+ ret = -ENOMEM;
+ goto free_syncpts;
+ }
+
+ mw_sp = host1x_syncpt_request(&vi->client, flags);
+ if (!mw_sp) {
+ dev_err(vi->dev, "failed to request memory ack syncpoint\n");
+ host1x_syncpt_put(fs_sp);
+ ret = -ENOMEM;
+ goto free_syncpts;
+ }
+
+ chan->frame_start_sp[i] = fs_sp;
+ chan->mw_ack_sp[i] = mw_sp;
+ spin_lock_init(&chan->sp_incr_lock[i]);
+ }
+
+ return 0;
+
+free_syncpts:
+ for (i = 0; i < chan->numgangports; i++) {
+ host1x_syncpt_put(chan->mw_ack_sp[i]);
+ host1x_syncpt_put(chan->frame_start_sp[i]);
+ }
+ return ret;
+}
+
+static void tegra210_channel_host1x_syncpt_free(struct tegra_vi_channel *chan)
+{
+ int i;
+
+ for (i = 0; i < chan->numgangports; i++) {
+ host1x_syncpt_put(chan->mw_ack_sp[i]);
+ host1x_syncpt_put(chan->frame_start_sp[i]);
+ }
+}
+
+static void tegra210_fmt_align(struct v4l2_pix_format *pix, unsigned int bpp)
+{
+ unsigned int min_bpl;
+ unsigned int max_bpl;
+ unsigned int bpl;
+
+ /*
+ * The transfer alignment requirements are expressed in bytes.
+ * Clamp the requested width and height to the limits.
+ */
+ pix->width = clamp(pix->width, TEGRA210_MIN_WIDTH, TEGRA210_MAX_WIDTH);
+ pix->height = clamp(pix->height, TEGRA210_MIN_HEIGHT, TEGRA210_MAX_HEIGHT);
+
+ /* Clamp the requested bytes per line value. If the maximum bytes per
+ * line value is zero, the module doesn't support user configurable
+ * line sizes. Override the requested value with the minimum in that
+ * case.
+ */
+ min_bpl = pix->width * bpp;
+ max_bpl = rounddown(TEGRA210_MAX_WIDTH, SURFACE_ALIGN_BYTES);
+ bpl = roundup(pix->bytesperline, SURFACE_ALIGN_BYTES);
+
+ pix->bytesperline = clamp(bpl, min_bpl, max_bpl);
+ pix->sizeimage = pix->bytesperline * pix->height;
+ if (pix->pixelformat == V4L2_PIX_FMT_NV16)
+ pix->sizeimage *= 2;
+}
+
+static int tegra_channel_capture_setup(struct tegra_vi_channel *chan,
+ u8 portno)
+{
+ u32 height = chan->format.height;
+ u32 width = chan->format.width;
+ u32 format = chan->fmtinfo->img_fmt;
+ u32 data_type = chan->fmtinfo->img_dt;
+ u32 word_count = (width * chan->fmtinfo->bit_width) / 8;
+ u32 bypass_pixel_transform = BIT(BYPASS_PXL_TRANSFORM_OFFSET);
+
+ /*
+ * VI Pixel transformation unit converts source pixels data format
+ * into selected destination pixel format and aligns properly while
+ * interfacing with memory packer.
+ * This pixel transformation should be enabled for YUV and RGB
+ * formats and should be bypassed for RAW formats as RAW formats
+ * only support direct to memory.
+ */
+ if (chan->pg_mode || data_type == TEGRA_IMAGE_DT_YUV422_8 ||
+ data_type == TEGRA_IMAGE_DT_RGB888)
+ bypass_pixel_transform = 0;
+
+ /*
+ * For x8 source streaming, the source image is split onto two x4 ports
+ * with left half to first x4 port and right half to second x4 port.
+ * So, use split width and corresponding word count for each x4 port.
+ */
+ if (chan->numgangports > 1) {
+ width = width >> 1;
+ word_count = (width * chan->fmtinfo->bit_width) / 8;
+ }
+
+ vi_csi_write(chan, portno, TEGRA_VI_CSI_ERROR_STATUS, 0xffffffff);
+ vi_csi_write(chan, portno, TEGRA_VI_CSI_IMAGE_DEF,
+ bypass_pixel_transform |
+ (format << IMAGE_DEF_FORMAT_OFFSET) |
+ IMAGE_DEF_DEST_MEM);
+ vi_csi_write(chan, portno, TEGRA_VI_CSI_IMAGE_DT, data_type);
+ vi_csi_write(chan, portno, TEGRA_VI_CSI_IMAGE_SIZE_WC, word_count);
+ vi_csi_write(chan, portno, TEGRA_VI_CSI_IMAGE_SIZE,
+ (height << IMAGE_SIZE_HEIGHT_OFFSET) | width);
+ return 0;
+}
+
+static void tegra_channel_vi_soft_reset(struct tegra_vi_channel *chan,
+ u8 portno)
+{
+ /* disable clock gating to enable continuous clock */
+ tegra_vi_write(chan, TEGRA_VI_CFG_CG_CTRL, 0);
+ /*
+ * Soft reset memory client interface, pixel format logic, sensor
+ * control logic, and a shadow copy logic to bring VI to clean state.
+ */
+ vi_csi_write(chan, portno, TEGRA_VI_CSI_SW_RESET, 0xf);
+ usleep_range(100, 200);
+ vi_csi_write(chan, portno, TEGRA_VI_CSI_SW_RESET, 0x0);
+
+ /* enable back VI clock gating */
+ tegra_vi_write(chan, TEGRA_VI_CFG_CG_CTRL, VI_CG_2ND_LEVEL_EN);
+}
+
+static void tegra_channel_capture_error_recover(struct tegra_vi_channel *chan,
+ u8 portno)
+{
+ struct v4l2_subdev *subdev;
+ u32 val;
+
+ /*
+ * Recover VI and CSI hardware blocks in case of missing frame start
+ * events due to source not streaming or noisy csi inputs from the
+ * external source or many outstanding frame start or MW_ACK_DONE
+ * events which can cause CSI and VI hardware hang.
+ * This helps to have a clean capture for next frame.
+ */
+ val = vi_csi_read(chan, portno, TEGRA_VI_CSI_ERROR_STATUS);
+ dev_dbg(&chan->video.dev, "TEGRA_VI_CSI_ERROR_STATUS 0x%08x\n", val);
+ vi_csi_write(chan, portno, TEGRA_VI_CSI_ERROR_STATUS, val);
+
+ val = tegra_vi_read(chan, TEGRA_VI_CFG_VI_INCR_SYNCPT_ERROR);
+ dev_dbg(&chan->video.dev,
+ "TEGRA_VI_CFG_VI_INCR_SYNCPT_ERROR 0x%08x\n", val);
+ tegra_vi_write(chan, TEGRA_VI_CFG_VI_INCR_SYNCPT_ERROR, val);
+
+ /* recover VI by issuing software reset and re-setup for capture */
+ tegra_channel_vi_soft_reset(chan, portno);
+ tegra_channel_capture_setup(chan, portno);
+
+ /* recover CSI block */
+ subdev = tegra_channel_get_remote_csi_subdev(chan);
+ tegra_csi_error_recover(subdev);
+}
+
+static struct tegra_channel_buffer *
+dequeue_buf_done(struct tegra_vi_channel *chan)
+{
+ struct tegra_channel_buffer *buf = NULL;
+
+ spin_lock(&chan->done_lock);
+ if (list_empty(&chan->done)) {
+ spin_unlock(&chan->done_lock);
+ return NULL;
+ }
+
+ buf = list_first_entry(&chan->done,
+ struct tegra_channel_buffer, queue);
+ if (buf)
+ list_del_init(&buf->queue);
+ spin_unlock(&chan->done_lock);
+
+ return buf;
+}
+
+static void release_buffer(struct tegra_vi_channel *chan,
+ struct tegra_channel_buffer *buf,
+ enum vb2_buffer_state state)
+{
+ struct vb2_v4l2_buffer *vb = &buf->buf;
+
+ vb->sequence = chan->sequence++;
+ vb->field = V4L2_FIELD_NONE;
+ vb->vb2_buf.timestamp = ktime_get_ns();
+ vb2_buffer_done(&vb->vb2_buf, state);
+}
+
+static void tegra_channel_vi_buffer_setup(struct tegra_vi_channel *chan,
+ u8 portno, u32 buf_offset,
+ struct tegra_channel_buffer *buf)
+{
+ int bytesperline = chan->format.bytesperline;
+ u32 sizeimage = chan->format.sizeimage;
+
+ /* program buffer address by using surface 0 */
+ vi_csi_write(chan, portno, TEGRA_VI_CSI_SURFACE0_OFFSET_MSB,
+ ((u64)buf->addr + buf_offset) >> 32);
+ vi_csi_write(chan, portno, TEGRA_VI_CSI_SURFACE0_OFFSET_LSB,
+ buf->addr + buf_offset);
+ vi_csi_write(chan, portno, TEGRA_VI_CSI_SURFACE0_STRIDE, bytesperline);
+
+ if (chan->fmtinfo->fourcc != V4L2_PIX_FMT_NV16)
+ return;
+ /*
+ * Program surface 1 for UV plane with offset sizeimage from Y plane.
+ */
+ vi_csi_write(chan, portno, TEGRA_VI_CSI_SURFACE1_OFFSET_MSB,
+ (((u64)buf->addr + sizeimage / 2) + buf_offset) >> 32);
+ vi_csi_write(chan, portno, TEGRA_VI_CSI_SURFACE1_OFFSET_LSB,
+ buf->addr + sizeimage / 2 + buf_offset);
+ vi_csi_write(chan, portno, TEGRA_VI_CSI_SURFACE1_STRIDE, bytesperline);
+}
+
+static int tegra_channel_capture_frame(struct tegra_vi_channel *chan,
+ struct tegra_channel_buffer *buf)
+{
+ u32 thresh, value, frame_start, mw_ack_done;
+ u32 fs_thresh[GANG_PORTS_MAX];
+ u8 *portnos = chan->portnos;
+ int gang_bpl = (chan->format.width >> 1) * chan->fmtinfo->bpp;
+ u32 buf_offset;
+ bool capture_timedout = false;
+ int err, i;
+
+ for (i = 0; i < chan->numgangports; i++) {
+ /*
+ * Align buffers side-by-side for all consecutive x4 ports
+ * in gang ports using bytes per line based on source split
+ * width.
+ */
+ buf_offset = i * roundup(gang_bpl, SURFACE_ALIGN_BYTES);
+ tegra_channel_vi_buffer_setup(chan, portnos[i], buf_offset,
+ buf);
+
+ /*
+ * Tegra VI block interacts with host1x syncpt to synchronize
+ * programmed condition and hardware operation for capture.
+ * Frame start and Memory write acknowledge syncpts has their
+ * own FIFO of depth 2.
+ *
+ * Syncpoint trigger conditions set through VI_INCR_SYNCPT
+ * register are added to HW syncpt FIFO and when HW triggers,
+ * syncpt condition is removed from the FIFO and counter at
+ * syncpoint index will be incremented by the hardware and
+ * software can wait for counter to reach threshold to
+ * synchronize capturing frame with hardware capture events.
+ */
+
+ /* increase channel syncpoint threshold for FRAME_START */
+ thresh = host1x_syncpt_incr_max(chan->frame_start_sp[i], 1);
+ fs_thresh[i] = thresh;
+
+ /* Program FRAME_START trigger condition syncpt request */
+ frame_start = VI_CSI_PP_FRAME_START(portnos[i]);
+ value = VI_CFG_VI_INCR_SYNCPT_COND(frame_start) |
+ host1x_syncpt_id(chan->frame_start_sp[i]);
+ tegra_vi_write(chan, TEGRA_VI_CFG_VI_INCR_SYNCPT, value);
+
+ /* increase channel syncpoint threshold for MW_ACK_DONE */
+ thresh = host1x_syncpt_incr_max(chan->mw_ack_sp[i], 1);
+ buf->mw_ack_sp_thresh[i] = thresh;
+
+ /* Program MW_ACK_DONE trigger condition syncpt request */
+ mw_ack_done = VI_CSI_MW_ACK_DONE(portnos[i]);
+ value = VI_CFG_VI_INCR_SYNCPT_COND(mw_ack_done) |
+ host1x_syncpt_id(chan->mw_ack_sp[i]);
+ tegra_vi_write(chan, TEGRA_VI_CFG_VI_INCR_SYNCPT, value);
+ }
+
+ /* enable single shot capture after all ganged ports are ready */
+ for (i = 0; i < chan->numgangports; i++)
+ vi_csi_write(chan, portnos[i], TEGRA_VI_CSI_SINGLE_SHOT,
+ SINGLE_SHOT_CAPTURE);
+
+ for (i = 0; i < chan->numgangports; i++) {
+ /*
+ * Wait for syncpt counter to reach frame start event threshold
+ */
+ err = host1x_syncpt_wait(chan->frame_start_sp[i], fs_thresh[i],
+ TEGRA_VI_SYNCPT_WAIT_TIMEOUT, &value);
+ if (err) {
+ capture_timedout = true;
+ /* increment syncpoint counter for timedout events */
+ host1x_syncpt_incr(chan->frame_start_sp[i]);
+ spin_lock(&chan->sp_incr_lock[i]);
+ host1x_syncpt_incr(chan->mw_ack_sp[i]);
+ spin_unlock(&chan->sp_incr_lock[i]);
+ /* clear errors and recover */
+ tegra_channel_capture_error_recover(chan, portnos[i]);
+ }
+ }
+
+ if (capture_timedout) {
+ dev_err_ratelimited(&chan->video.dev,
+ "frame start syncpt timeout: %d\n", err);
+ release_buffer(chan, buf, VB2_BUF_STATE_ERROR);
+ return err;
+ }
+
+ /* move buffer to capture done queue */
+ spin_lock(&chan->done_lock);
+ list_add_tail(&buf->queue, &chan->done);
+ spin_unlock(&chan->done_lock);
+
+ /* wait up kthread for capture done */
+ wake_up_interruptible(&chan->done_wait);
+
+ return 0;
+}
+
+static void tegra_channel_capture_done(struct tegra_vi_channel *chan,
+ struct tegra_channel_buffer *buf)
+{
+ enum vb2_buffer_state state = VB2_BUF_STATE_DONE;
+ u32 value;
+ bool capture_timedout = false;
+ int ret, i;
+
+ for (i = 0; i < chan->numgangports; i++) {
+ /*
+ * Wait for syncpt counter to reach MW_ACK_DONE event threshold
+ */
+ ret = host1x_syncpt_wait(chan->mw_ack_sp[i],
+ buf->mw_ack_sp_thresh[i],
+ TEGRA_VI_SYNCPT_WAIT_TIMEOUT, &value);
+ if (ret) {
+ capture_timedout = true;
+ state = VB2_BUF_STATE_ERROR;
+ /* increment syncpoint counter for timedout event */
+ spin_lock(&chan->sp_incr_lock[i]);
+ host1x_syncpt_incr(chan->mw_ack_sp[i]);
+ spin_unlock(&chan->sp_incr_lock[i]);
+ }
+ }
+
+ if (capture_timedout)
+ dev_err_ratelimited(&chan->video.dev,
+ "MW_ACK_DONE syncpt timeout: %d\n", ret);
+ release_buffer(chan, buf, state);
+}
+
+static int chan_capture_kthread_start(void *data)
+{
+ struct tegra_vi_channel *chan = data;
+ struct tegra_channel_buffer *buf;
+ unsigned int retries = 0;
+ int err = 0;
+
+ while (1) {
+ /*
+ * Source is not streaming if error is non-zero.
+ * So, do not dequeue buffers on error and let the thread sleep
+ * till kthread stop signal is received.
+ */
+ wait_event_interruptible(chan->start_wait,
+ kthread_should_stop() ||
+ (!list_empty(&chan->capture) &&
+ !err));
+
+ if (kthread_should_stop())
+ break;
+
+ /* dequeue the buffer and start capture */
+ spin_lock(&chan->start_lock);
+ if (list_empty(&chan->capture)) {
+ spin_unlock(&chan->start_lock);
+ continue;
+ }
+
+ buf = list_first_entry(&chan->capture,
+ struct tegra_channel_buffer, queue);
+ list_del_init(&buf->queue);
+ spin_unlock(&chan->start_lock);
+
+ err = tegra_channel_capture_frame(chan, buf);
+ if (!err) {
+ retries = 0;
+ continue;
+ }
+
+ if (retries++ > chan->syncpt_timeout_retry)
+ vb2_queue_error(&chan->queue);
+ else
+ err = 0;
+ }
+
+ return 0;
+}
+
+static int chan_capture_kthread_finish(void *data)
+{
+ struct tegra_vi_channel *chan = data;
+ struct tegra_channel_buffer *buf;
+
+ while (1) {
+ wait_event_interruptible(chan->done_wait,
+ !list_empty(&chan->done) ||
+ kthread_should_stop());
+
+ /* dequeue buffers and finish capture */
+ buf = dequeue_buf_done(chan);
+ while (buf) {
+ tegra_channel_capture_done(chan, buf);
+ buf = dequeue_buf_done(chan);
+ }
+
+ if (kthread_should_stop())
+ break;
+ }
+
+ return 0;
+}
+
+static int tegra210_vi_start_streaming(struct vb2_queue *vq, u32 count)
+{
+ struct tegra_vi_channel *chan = vb2_get_drv_priv(vq);
+ struct media_pipeline *pipe = &chan->video.pipe;
+ u32 val;
+ u8 *portnos = chan->portnos;
+ int ret, i;
+
+ tegra_vi_write(chan, TEGRA_VI_CFG_CG_CTRL, VI_CG_2ND_LEVEL_EN);
+
+ /* clear syncpt errors */
+ val = tegra_vi_read(chan, TEGRA_VI_CFG_VI_INCR_SYNCPT_ERROR);
+ tegra_vi_write(chan, TEGRA_VI_CFG_VI_INCR_SYNCPT_ERROR, val);
+
+ /*
+ * Sync point FIFO full stalls the host interface.
+ * Setting NO_STALL will drop INCR_SYNCPT methods when fifos are
+ * full and the corresponding condition bits in INCR_SYNCPT_ERROR
+ * register will be set.
+ * This allows SW to process error recovery.
+ */
+ tegra_vi_write(chan, TEGRA_VI_CFG_VI_INCR_SYNCPT_CNTRL,
+ VI_INCR_SYNCPT_NO_STALL);
+
+ /* start the pipeline */
+ ret = video_device_pipeline_start(&chan->video, pipe);
+ if (ret < 0)
+ goto error_pipeline_start;
+
+ /* clear csi errors and do capture setup for all ports in gang mode */
+ for (i = 0; i < chan->numgangports; i++) {
+ val = vi_csi_read(chan, portnos[i], TEGRA_VI_CSI_ERROR_STATUS);
+ vi_csi_write(chan, portnos[i], TEGRA_VI_CSI_ERROR_STATUS, val);
+
+ tegra_channel_capture_setup(chan, portnos[i]);
+ }
+
+ ret = tegra_channel_set_stream(chan, true);
+ if (ret < 0)
+ goto error_set_stream;
+
+ chan->sequence = 0;
+
+ /* start kthreads to capture data to buffer and return them */
+ chan->kthread_start_capture = kthread_run(chan_capture_kthread_start,
+ chan, "%s:0",
+ chan->video.name);
+ if (IS_ERR(chan->kthread_start_capture)) {
+ ret = PTR_ERR(chan->kthread_start_capture);
+ chan->kthread_start_capture = NULL;
+ dev_err(&chan->video.dev,
+ "failed to run capture start kthread: %d\n", ret);
+ goto error_kthread_start;
+ }
+
+ chan->kthread_finish_capture = kthread_run(chan_capture_kthread_finish,
+ chan, "%s:1",
+ chan->video.name);
+ if (IS_ERR(chan->kthread_finish_capture)) {
+ ret = PTR_ERR(chan->kthread_finish_capture);
+ chan->kthread_finish_capture = NULL;
+ dev_err(&chan->video.dev,
+ "failed to run capture finish kthread: %d\n", ret);
+ goto error_kthread_done;
+ }
+
+ return 0;
+
+error_kthread_done:
+ kthread_stop(chan->kthread_start_capture);
+error_kthread_start:
+ tegra_channel_set_stream(chan, false);
+error_set_stream:
+ video_device_pipeline_stop(&chan->video);
+error_pipeline_start:
+ tegra_channel_release_buffers(chan, VB2_BUF_STATE_QUEUED);
+ return ret;
+}
+
+static void tegra210_vi_stop_streaming(struct vb2_queue *vq)
+{
+ struct tegra_vi_channel *chan = vb2_get_drv_priv(vq);
+
+ if (chan->kthread_start_capture) {
+ kthread_stop(chan->kthread_start_capture);
+ chan->kthread_start_capture = NULL;
+ }
+
+ if (chan->kthread_finish_capture) {
+ kthread_stop(chan->kthread_finish_capture);
+ chan->kthread_finish_capture = NULL;
+ }
+
+ tegra_channel_release_buffers(chan, VB2_BUF_STATE_ERROR);
+ tegra_channel_set_stream(chan, false);
+ video_device_pipeline_stop(&chan->video);
+}
+
+/*
+ * Tegra210 VI Pixel memory format enum.
+ * These format enum value gets programmed into corresponding Tegra VI
+ * channel register bits.
+ */
+enum tegra210_image_format {
+ TEGRA210_IMAGE_FORMAT_T_L8 = 16,
+
+ TEGRA210_IMAGE_FORMAT_T_R16_I = 32,
+ TEGRA210_IMAGE_FORMAT_T_B5G6R5,
+ TEGRA210_IMAGE_FORMAT_T_R5G6B5,
+ TEGRA210_IMAGE_FORMAT_T_A1B5G5R5,
+ TEGRA210_IMAGE_FORMAT_T_A1R5G5B5,
+ TEGRA210_IMAGE_FORMAT_T_B5G5R5A1,
+ TEGRA210_IMAGE_FORMAT_T_R5G5B5A1,
+ TEGRA210_IMAGE_FORMAT_T_A4B4G4R4,
+ TEGRA210_IMAGE_FORMAT_T_A4R4G4B4,
+ TEGRA210_IMAGE_FORMAT_T_B4G4R4A4,
+ TEGRA210_IMAGE_FORMAT_T_R4G4B4A4,
+
+ TEGRA210_IMAGE_FORMAT_T_A8B8G8R8 = 64,
+ TEGRA210_IMAGE_FORMAT_T_A8R8G8B8,
+ TEGRA210_IMAGE_FORMAT_T_B8G8R8A8,
+ TEGRA210_IMAGE_FORMAT_T_R8G8B8A8,
+ TEGRA210_IMAGE_FORMAT_T_A2B10G10R10,
+ TEGRA210_IMAGE_FORMAT_T_A2R10G10B10,
+ TEGRA210_IMAGE_FORMAT_T_B10G10R10A2,
+ TEGRA210_IMAGE_FORMAT_T_R10G10B10A2,
+
+ TEGRA210_IMAGE_FORMAT_T_A8Y8U8V8 = 193,
+ TEGRA210_IMAGE_FORMAT_T_V8U8Y8A8,
+
+ TEGRA210_IMAGE_FORMAT_T_A2Y10U10V10 = 197,
+ TEGRA210_IMAGE_FORMAT_T_V10U10Y10A2,
+ TEGRA210_IMAGE_FORMAT_T_Y8_U8__Y8_V8,
+ TEGRA210_IMAGE_FORMAT_T_Y8_V8__Y8_U8,
+ TEGRA210_IMAGE_FORMAT_T_U8_Y8__V8_Y8,
+ TEGRA210_IMAGE_FORMAT_T_V8_Y8__U8_Y8,
+
+ TEGRA210_IMAGE_FORMAT_T_Y8__U8__V8_N444 = 224,
+ TEGRA210_IMAGE_FORMAT_T_Y8__U8V8_N444,
+ TEGRA210_IMAGE_FORMAT_T_Y8__V8U8_N444,
+ TEGRA210_IMAGE_FORMAT_T_Y8__U8__V8_N422,
+ TEGRA210_IMAGE_FORMAT_T_Y8__U8V8_N422,
+ TEGRA210_IMAGE_FORMAT_T_Y8__V8U8_N422,
+ TEGRA210_IMAGE_FORMAT_T_Y8__U8__V8_N420,
+ TEGRA210_IMAGE_FORMAT_T_Y8__U8V8_N420,
+ TEGRA210_IMAGE_FORMAT_T_Y8__V8U8_N420,
+ TEGRA210_IMAGE_FORMAT_T_X2LC10LB10LA10,
+ TEGRA210_IMAGE_FORMAT_T_A2R6R6R6R6R6,
+};
+
+#define TEGRA210_VIDEO_FMT(DATA_TYPE, BIT_WIDTH, MBUS_CODE, BPP, \
+ FORMAT, FOURCC) \
+{ \
+ TEGRA_IMAGE_DT_##DATA_TYPE, \
+ BIT_WIDTH, \
+ MEDIA_BUS_FMT_##MBUS_CODE, \
+ BPP, \
+ TEGRA210_IMAGE_FORMAT_##FORMAT, \
+ V4L2_PIX_FMT_##FOURCC, \
+}
+
+/* Tegra210 supported video formats */
+static const struct tegra_video_format tegra210_video_formats[] = {
+ /* RAW 8 */
+ TEGRA210_VIDEO_FMT(RAW8, 8, SRGGB8_1X8, 1, T_L8, SRGGB8),
+ TEGRA210_VIDEO_FMT(RAW8, 8, SGRBG8_1X8, 1, T_L8, SGRBG8),
+ TEGRA210_VIDEO_FMT(RAW8, 8, SGBRG8_1X8, 1, T_L8, SGBRG8),
+ TEGRA210_VIDEO_FMT(RAW8, 8, SBGGR8_1X8, 1, T_L8, SBGGR8),
+ /* RAW 10 */
+ TEGRA210_VIDEO_FMT(RAW10, 10, SRGGB10_1X10, 2, T_R16_I, SRGGB10),
+ TEGRA210_VIDEO_FMT(RAW10, 10, SGRBG10_1X10, 2, T_R16_I, SGRBG10),
+ TEGRA210_VIDEO_FMT(RAW10, 10, SGBRG10_1X10, 2, T_R16_I, SGBRG10),
+ TEGRA210_VIDEO_FMT(RAW10, 10, SBGGR10_1X10, 2, T_R16_I, SBGGR10),
+ /* RAW 12 */
+ TEGRA210_VIDEO_FMT(RAW12, 12, SRGGB12_1X12, 2, T_R16_I, SRGGB12),
+ TEGRA210_VIDEO_FMT(RAW12, 12, SGRBG12_1X12, 2, T_R16_I, SGRBG12),
+ TEGRA210_VIDEO_FMT(RAW12, 12, SGBRG12_1X12, 2, T_R16_I, SGBRG12),
+ TEGRA210_VIDEO_FMT(RAW12, 12, SBGGR12_1X12, 2, T_R16_I, SBGGR12),
+ /* RGB888 */
+ TEGRA210_VIDEO_FMT(RGB888, 24, RGB888_1X24, 4, T_A8R8G8B8, XBGR32),
+ TEGRA210_VIDEO_FMT(RGB888, 24, RGB888_1X32_PADHI, 4, T_A8B8G8R8,
+ RGBX32),
+ /* YUV422 */
+ TEGRA210_VIDEO_FMT(YUV422_8, 16, UYVY8_1X16, 2, T_U8_Y8__V8_Y8, YVYU),
+ TEGRA210_VIDEO_FMT(YUV422_8, 16, VYUY8_1X16, 2, T_V8_Y8__U8_Y8, YUYV),
+ TEGRA210_VIDEO_FMT(YUV422_8, 16, YUYV8_1X16, 2, T_Y8_U8__Y8_V8, VYUY),
+ TEGRA210_VIDEO_FMT(YUV422_8, 16, YVYU8_1X16, 2, T_Y8_V8__Y8_U8, UYVY),
+ TEGRA210_VIDEO_FMT(YUV422_8, 16, UYVY8_1X16, 1, T_Y8__V8U8_N422, NV16),
+ TEGRA210_VIDEO_FMT(YUV422_8, 16, UYVY8_2X8, 2, T_U8_Y8__V8_Y8, YVYU),
+ TEGRA210_VIDEO_FMT(YUV422_8, 16, VYUY8_2X8, 2, T_V8_Y8__U8_Y8, YUYV),
+ TEGRA210_VIDEO_FMT(YUV422_8, 16, YUYV8_2X8, 2, T_Y8_U8__Y8_V8, VYUY),
+ TEGRA210_VIDEO_FMT(YUV422_8, 16, YVYU8_2X8, 2, T_Y8_V8__Y8_U8, UYVY),
+};
+
+/* Tegra210 VI operations */
+static const struct tegra_vi_ops tegra210_vi_ops = {
+ .channel_host1x_syncpt_init = tegra210_channel_host1x_syncpt_init,
+ .channel_host1x_syncpt_free = tegra210_channel_host1x_syncpt_free,
+ .vi_fmt_align = tegra210_fmt_align,
+ .vi_start_streaming = tegra210_vi_start_streaming,
+ .vi_stop_streaming = tegra210_vi_stop_streaming,
+};
+
+/* Tegra210 VI SoC data */
+const struct tegra_vi_soc tegra210_vi_soc = {
+ .video_formats = tegra210_video_formats,
+ .nformats = ARRAY_SIZE(tegra210_video_formats),
+ .ops = &tegra210_vi_ops,
+ .hw_revision = 3,
+ .vi_max_channels = 6,
+#if IS_ENABLED(CONFIG_VIDEO_TEGRA_TPG)
+ .default_video_format = &tegra210_video_formats[0],
+ .vi_max_clk_hz = 499200000,
+#else
+ .default_video_format = &tegra210_video_formats[4],
+ .vi_max_clk_hz = 998400000,
+#endif
+};
+
+/* Tegra210 CSI PHY registers accessors */
+static void csi_write(struct tegra_csi *csi, u8 portno, unsigned int addr,
+ u32 val)
+{
+ void __iomem *csi_pp_base;
+
+ csi_pp_base = csi->iomem + CSI_PP_OFFSET(portno >> 1);
+
+ writel_relaxed(val, csi_pp_base + addr);
+}
+
+/* Tegra210 CSI Pixel parser registers accessors */
+static void pp_write(struct tegra_csi *csi, u8 portno, u32 addr, u32 val)
+{
+ void __iomem *csi_pp_base;
+ unsigned int offset;
+
+ csi_pp_base = csi->iomem + CSI_PP_OFFSET(portno >> 1);
+ offset = (portno % CSI_PORTS_PER_BRICK) * TEGRA210_CSI_PORT_OFFSET;
+
+ writel_relaxed(val, csi_pp_base + offset + addr);
+}
+
+static u32 pp_read(struct tegra_csi *csi, u8 portno, u32 addr)
+{
+ void __iomem *csi_pp_base;
+ unsigned int offset;
+
+ csi_pp_base = csi->iomem + CSI_PP_OFFSET(portno >> 1);
+ offset = (portno % CSI_PORTS_PER_BRICK) * TEGRA210_CSI_PORT_OFFSET;
+
+ return readl_relaxed(csi_pp_base + offset + addr);
+}
+
+/* Tegra210 CSI CIL A/B port registers accessors */
+static void cil_write(struct tegra_csi *csi, u8 portno, u32 addr, u32 val)
+{
+ void __iomem *csi_cil_base;
+ unsigned int offset;
+
+ csi_cil_base = csi->iomem + CSI_PP_OFFSET(portno >> 1) +
+ TEGRA210_CSI_CIL_OFFSET;
+ offset = (portno % CSI_PORTS_PER_BRICK) * TEGRA210_CSI_PORT_OFFSET;
+
+ writel_relaxed(val, csi_cil_base + offset + addr);
+}
+
+static u32 cil_read(struct tegra_csi *csi, u8 portno, u32 addr)
+{
+ void __iomem *csi_cil_base;
+ unsigned int offset;
+
+ csi_cil_base = csi->iomem + CSI_PP_OFFSET(portno >> 1) +
+ TEGRA210_CSI_CIL_OFFSET;
+ offset = (portno % CSI_PORTS_PER_BRICK) * TEGRA210_CSI_PORT_OFFSET;
+
+ return readl_relaxed(csi_cil_base + offset + addr);
+}
+
+/* Tegra210 CSI Test pattern generator registers accessor */
+static void tpg_write(struct tegra_csi *csi, u8 portno, unsigned int addr,
+ u32 val)
+{
+ void __iomem *csi_pp_base;
+ unsigned int offset;
+
+ csi_pp_base = csi->iomem + CSI_PP_OFFSET(portno >> 1);
+ offset = (portno % CSI_PORTS_PER_BRICK) * TEGRA210_CSI_PORT_OFFSET +
+ TEGRA210_CSI_TPG_OFFSET;
+
+ writel_relaxed(val, csi_pp_base + offset + addr);
+}
+
+/*
+ * Tegra210 CSI operations
+ */
+static void tegra210_csi_port_recover(struct tegra_csi_channel *csi_chan,
+ u8 portno)
+{
+ struct tegra_csi *csi = csi_chan->csi;
+ u32 val;
+
+ /*
+ * Recover CSI hardware in case of capture errors by issuing
+ * software reset to CSICIL sensor, pixel parser, and clear errors
+ * to have clean capture on next streaming.
+ */
+ val = pp_read(csi, portno, TEGRA_CSI_PIXEL_PARSER_STATUS);
+ dev_dbg(csi->dev, "TEGRA_CSI_PIXEL_PARSER_STATUS 0x%08x\n", val);
+
+ val = cil_read(csi, portno, TEGRA_CSI_CIL_STATUS);
+ dev_dbg(csi->dev, "TEGRA_CSI_CIL_STATUS 0x%08x\n", val);
+
+ val = cil_read(csi, portno, TEGRA_CSI_CILX_STATUS);
+ dev_dbg(csi->dev, "TEGRA_CSI_CILX_STATUS 0x%08x\n", val);
+
+ if (csi_chan->numlanes == 4) {
+ /* reset CSI CIL sensor */
+ cil_write(csi, portno, TEGRA_CSI_CIL_SW_SENSOR_RESET, 0x1);
+ cil_write(csi, portno + 1, TEGRA_CSI_CIL_SW_SENSOR_RESET, 0x1);
+ /*
+ * SW_STATUS_RESET resets all status bits of PPA, PPB, CILA,
+ * CILB status registers and debug counters.
+ * So, SW_STATUS_RESET can be used only when CSI brick is in
+ * x4 mode.
+ */
+ csi_write(csi, portno, TEGRA_CSI_CSI_SW_STATUS_RESET, 0x1);
+
+ /* sleep for 20 clock cycles to drain the FIFO */
+ usleep_range(10, 20);
+
+ cil_write(csi, portno + 1, TEGRA_CSI_CIL_SW_SENSOR_RESET, 0x0);
+ cil_write(csi, portno, TEGRA_CSI_CIL_SW_SENSOR_RESET, 0x0);
+ csi_write(csi, portno, TEGRA_CSI_CSI_SW_STATUS_RESET, 0x0);
+ } else {
+ /* reset CSICIL sensor */
+ cil_write(csi, portno, TEGRA_CSI_CIL_SW_SENSOR_RESET, 0x1);
+ usleep_range(10, 20);
+ cil_write(csi, portno, TEGRA_CSI_CIL_SW_SENSOR_RESET, 0x0);
+
+ /* clear the errors */
+ pp_write(csi, portno, TEGRA_CSI_PIXEL_PARSER_STATUS,
+ 0xffffffff);
+ cil_write(csi, portno, TEGRA_CSI_CIL_STATUS, 0xffffffff);
+ cil_write(csi, portno, TEGRA_CSI_CILX_STATUS, 0xffffffff);
+ }
+}
+
+static void tegra210_csi_error_recover(struct tegra_csi_channel *csi_chan)
+{
+ u8 *portnos = csi_chan->csi_port_nums;
+ int i;
+
+ for (i = 0; i < csi_chan->numgangports; i++)
+ tegra210_csi_port_recover(csi_chan, portnos[i]);
+}
+
+static int
+tegra210_csi_port_start_streaming(struct tegra_csi_channel *csi_chan,
+ u8 portno)
+{
+ struct tegra_csi *csi = csi_chan->csi;
+ u8 clk_settle_time = 0;
+ u8 ths_settle_time = 10;
+ u32 val;
+
+ if (!csi_chan->pg_mode)
+ tegra_csi_calc_settle_time(csi_chan, portno, &clk_settle_time,
+ &ths_settle_time);
+
+ csi_write(csi, portno, TEGRA_CSI_CLKEN_OVERRIDE, 0);
+
+ /* clean up status */
+ pp_write(csi, portno, TEGRA_CSI_PIXEL_PARSER_STATUS, 0xffffffff);
+ cil_write(csi, portno, TEGRA_CSI_CIL_STATUS, 0xffffffff);
+ cil_write(csi, portno, TEGRA_CSI_CILX_STATUS, 0xffffffff);
+ cil_write(csi, portno, TEGRA_CSI_CIL_INTERRUPT_MASK, 0x0);
+
+ /* CIL PHY registers setup */
+ cil_write(csi, portno, TEGRA_CSI_CIL_PAD_CONFIG0, 0x0);
+ cil_write(csi, portno, TEGRA_CSI_CIL_PHY_CONTROL,
+ FIELD_PREP(CLK_SETTLE_MASK, clk_settle_time) |
+ FIELD_PREP(THS_SETTLE_MASK, ths_settle_time));
+
+ /*
+ * The CSI unit provides for connection of up to six cameras in
+ * the system and is organized as three identical instances of
+ * two MIPI support blocks, each with a separate 4-lane
+ * interface that can be configured as a single camera with 4
+ * lanes or as a dual camera with 2 lanes available for each
+ * camera.
+ */
+ if (csi_chan->numlanes == 4) {
+ cil_write(csi, portno + 1, TEGRA_CSI_CIL_STATUS, 0xffffffff);
+ cil_write(csi, portno + 1, TEGRA_CSI_CILX_STATUS, 0xffffffff);
+ cil_write(csi, portno + 1, TEGRA_CSI_CIL_INTERRUPT_MASK, 0x0);
+
+ cil_write(csi, portno, TEGRA_CSI_CIL_PAD_CONFIG0,
+ BRICK_CLOCK_A_4X);
+ cil_write(csi, portno + 1, TEGRA_CSI_CIL_PAD_CONFIG0, 0x0);
+ cil_write(csi, portno + 1, TEGRA_CSI_CIL_INTERRUPT_MASK, 0x0);
+ cil_write(csi, portno + 1, TEGRA_CSI_CIL_PHY_CONTROL,
+ FIELD_PREP(CLK_SETTLE_MASK, clk_settle_time) |
+ FIELD_PREP(THS_SETTLE_MASK, ths_settle_time));
+ csi_write(csi, portno, TEGRA_CSI_PHY_CIL_COMMAND,
+ CSI_A_PHY_CIL_ENABLE | CSI_B_PHY_CIL_ENABLE);
+ } else {
+ val = ((portno & 1) == PORT_A) ?
+ CSI_A_PHY_CIL_ENABLE | CSI_B_PHY_CIL_NOP :
+ CSI_B_PHY_CIL_ENABLE | CSI_A_PHY_CIL_NOP;
+ csi_write(csi, portno, TEGRA_CSI_PHY_CIL_COMMAND, val);
+ }
+
+ /* CSI pixel parser registers setup */
+ pp_write(csi, portno, TEGRA_CSI_PIXEL_STREAM_PP_COMMAND,
+ (0xf << CSI_PP_START_MARKER_FRAME_MAX_OFFSET) |
+ CSI_PP_SINGLE_SHOT_ENABLE | CSI_PP_RST);
+ pp_write(csi, portno, TEGRA_CSI_PIXEL_PARSER_INTERRUPT_MASK, 0x0);
+ pp_write(csi, portno, TEGRA_CSI_PIXEL_STREAM_CONTROL0,
+ CSI_PP_PACKET_HEADER_SENT |
+ CSI_PP_DATA_IDENTIFIER_ENABLE |
+ CSI_PP_WORD_COUNT_SELECT_HEADER |
+ CSI_PP_CRC_CHECK_ENABLE | CSI_PP_WC_CHECK |
+ CSI_PP_OUTPUT_FORMAT_STORE | CSI_PPA_PAD_LINE_NOPAD |
+ CSI_PP_HEADER_EC_DISABLE | CSI_PPA_PAD_FRAME_NOPAD |
+ (portno & 1));
+ pp_write(csi, portno, TEGRA_CSI_PIXEL_STREAM_CONTROL1,
+ (0x1 << CSI_PP_TOP_FIELD_FRAME_OFFSET) |
+ (0x1 << CSI_PP_TOP_FIELD_FRAME_MASK_OFFSET));
+ pp_write(csi, portno, TEGRA_CSI_PIXEL_STREAM_GAP,
+ 0x14 << PP_FRAME_MIN_GAP_OFFSET);
+ pp_write(csi, portno, TEGRA_CSI_PIXEL_STREAM_EXPECTED_FRAME, 0x0);
+ pp_write(csi, portno, TEGRA_CSI_INPUT_STREAM_CONTROL,
+ (0x3f << CSI_SKIP_PACKET_THRESHOLD_OFFSET) |
+ (csi_chan->numlanes - 1));
+
+ /* TPG setup */
+ if (csi_chan->pg_mode) {
+ tpg_write(csi, portno, TEGRA_CSI_PATTERN_GENERATOR_CTRL,
+ ((csi_chan->pg_mode - 1) << PG_MODE_OFFSET) |
+ PG_ENABLE);
+ tpg_write(csi, portno, TEGRA_CSI_PG_BLANK,
+ csi_chan->v_blank << PG_VBLANK_OFFSET |
+ csi_chan->h_blank);
+ tpg_write(csi, portno, TEGRA_CSI_PG_PHASE, 0x0);
+ tpg_write(csi, portno, TEGRA_CSI_PG_RED_FREQ,
+ (0x10 << PG_RED_VERT_INIT_FREQ_OFFSET) |
+ (0x10 << PG_RED_HOR_INIT_FREQ_OFFSET));
+ tpg_write(csi, portno, TEGRA_CSI_PG_RED_FREQ_RATE, 0x0);
+ tpg_write(csi, portno, TEGRA_CSI_PG_GREEN_FREQ,
+ (0x10 << PG_GREEN_VERT_INIT_FREQ_OFFSET) |
+ (0x10 << PG_GREEN_HOR_INIT_FREQ_OFFSET));
+ tpg_write(csi, portno, TEGRA_CSI_PG_GREEN_FREQ_RATE, 0x0);
+ tpg_write(csi, portno, TEGRA_CSI_PG_BLUE_FREQ,
+ (0x10 << PG_BLUE_VERT_INIT_FREQ_OFFSET) |
+ (0x10 << PG_BLUE_HOR_INIT_FREQ_OFFSET));
+ tpg_write(csi, portno, TEGRA_CSI_PG_BLUE_FREQ_RATE, 0x0);
+ }
+
+ pp_write(csi, portno, TEGRA_CSI_PIXEL_STREAM_PP_COMMAND,
+ (0xf << CSI_PP_START_MARKER_FRAME_MAX_OFFSET) |
+ CSI_PP_SINGLE_SHOT_ENABLE | CSI_PP_ENABLE);
+
+ return 0;
+}
+
+static void
+tegra210_csi_port_stop_streaming(struct tegra_csi_channel *csi_chan, u8 portno)
+{
+ struct tegra_csi *csi = csi_chan->csi;
+ u32 val;
+
+ val = pp_read(csi, portno, TEGRA_CSI_PIXEL_PARSER_STATUS);
+
+ dev_dbg(csi->dev, "TEGRA_CSI_PIXEL_PARSER_STATUS 0x%08x\n", val);
+ pp_write(csi, portno, TEGRA_CSI_PIXEL_PARSER_STATUS, val);
+
+ val = cil_read(csi, portno, TEGRA_CSI_CIL_STATUS);
+ dev_dbg(csi->dev, "TEGRA_CSI_CIL_STATUS 0x%08x\n", val);
+ cil_write(csi, portno, TEGRA_CSI_CIL_STATUS, val);
+
+ val = cil_read(csi, portno, TEGRA_CSI_CILX_STATUS);
+ dev_dbg(csi->dev, "TEGRA_CSI_CILX_STATUS 0x%08x\n", val);
+ cil_write(csi, portno, TEGRA_CSI_CILX_STATUS, val);
+
+ pp_write(csi, portno, TEGRA_CSI_PIXEL_STREAM_PP_COMMAND,
+ (0xf << CSI_PP_START_MARKER_FRAME_MAX_OFFSET) |
+ CSI_PP_DISABLE);
+
+ if (csi_chan->pg_mode) {
+ tpg_write(csi, portno, TEGRA_CSI_PATTERN_GENERATOR_CTRL,
+ PG_DISABLE);
+ return;
+ }
+
+ if (csi_chan->numlanes == 4) {
+ csi_write(csi, portno, TEGRA_CSI_PHY_CIL_COMMAND,
+ CSI_A_PHY_CIL_DISABLE |
+ CSI_B_PHY_CIL_DISABLE);
+ } else {
+ val = ((portno & 1) == PORT_A) ?
+ CSI_A_PHY_CIL_DISABLE | CSI_B_PHY_CIL_NOP :
+ CSI_B_PHY_CIL_DISABLE | CSI_A_PHY_CIL_NOP;
+ csi_write(csi, portno, TEGRA_CSI_PHY_CIL_COMMAND, val);
+ }
+}
+
+static int tegra210_csi_start_streaming(struct tegra_csi_channel *csi_chan)
+{
+ u8 *portnos = csi_chan->csi_port_nums;
+ int ret, i;
+
+ for (i = 0; i < csi_chan->numgangports; i++) {
+ ret = tegra210_csi_port_start_streaming(csi_chan, portnos[i]);
+ if (ret)
+ goto stream_start_fail;
+ }
+
+ return 0;
+
+stream_start_fail:
+ for (i = i - 1; i >= 0; i--)
+ tegra210_csi_port_stop_streaming(csi_chan, portnos[i]);
+
+ return ret;
+}
+
+static void tegra210_csi_stop_streaming(struct tegra_csi_channel *csi_chan)
+{
+ u8 *portnos = csi_chan->csi_port_nums;
+ int i;
+
+ for (i = 0; i < csi_chan->numgangports; i++)
+ tegra210_csi_port_stop_streaming(csi_chan, portnos[i]);
+}
+
+/*
+ * Tegra210 CSI TPG frame rate table with horizontal and vertical
+ * blanking intervals for corresponding format and resolution.
+ * Blanking intervals are tuned values from design team for max TPG
+ * clock rate.
+ */
+static const struct tpg_framerate tegra210_tpg_frmrate_table[] = {
+ {
+ .frmsize = { 1280, 720 },
+ .code = MEDIA_BUS_FMT_SRGGB10_1X10,
+ .framerate = 120,
+ .h_blank = 512,
+ .v_blank = 8,
+ },
+ {
+ .frmsize = { 1920, 1080 },
+ .code = MEDIA_BUS_FMT_SRGGB10_1X10,
+ .framerate = 60,
+ .h_blank = 512,
+ .v_blank = 8,
+ },
+ {
+ .frmsize = { 3840, 2160 },
+ .code = MEDIA_BUS_FMT_SRGGB10_1X10,
+ .framerate = 20,
+ .h_blank = 8,
+ .v_blank = 8,
+ },
+ {
+ .frmsize = { 1280, 720 },
+ .code = MEDIA_BUS_FMT_RGB888_1X32_PADHI,
+ .framerate = 60,
+ .h_blank = 512,
+ .v_blank = 8,
+ },
+ {
+ .frmsize = { 1920, 1080 },
+ .code = MEDIA_BUS_FMT_RGB888_1X32_PADHI,
+ .framerate = 30,
+ .h_blank = 512,
+ .v_blank = 8,
+ },
+ {
+ .frmsize = { 3840, 2160 },
+ .code = MEDIA_BUS_FMT_RGB888_1X32_PADHI,
+ .framerate = 8,
+ .h_blank = 8,
+ .v_blank = 8,
+ },
+};
+
+static const char * const tegra210_csi_cil_clks[] = {
+ "csi",
+ "cilab",
+ "cilcd",
+ "cile",
+#if IS_ENABLED(CONFIG_VIDEO_TEGRA_TPG)
+ "csi_tpg",
+#endif
+};
+
+/* Tegra210 CSI operations */
+static const struct tegra_csi_ops tegra210_csi_ops = {
+ .csi_start_streaming = tegra210_csi_start_streaming,
+ .csi_stop_streaming = tegra210_csi_stop_streaming,
+ .csi_err_recover = tegra210_csi_error_recover,
+};
+
+/* Tegra210 CSI SoC data */
+const struct tegra_csi_soc tegra210_csi_soc = {
+ .ops = &tegra210_csi_ops,
+ .csi_max_channels = 6,
+ .clk_names = tegra210_csi_cil_clks,
+ .num_clks = ARRAY_SIZE(tegra210_csi_cil_clks),
+ .tpg_frmrate_table = tegra210_tpg_frmrate_table,
+ .tpg_frmrate_table_size = ARRAY_SIZE(tegra210_tpg_frmrate_table),
+};
diff --git a/drivers/staging/media/tegra-video/vi.c b/drivers/staging/media/tegra-video/vi.c
new file mode 100644
index 000000000000..c9276ff76157
--- /dev/null
+++ b/drivers/staging/media/tegra-video/vi.c
@@ -0,0 +1,1981 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2020 NVIDIA CORPORATION. All rights reserved.
+ */
+
+#include <linux/bitmap.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/host1x.h>
+#include <linux/lcm.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_graph.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+
+#include <media/v4l2-dv-timings.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-fh.h>
+#include <media/v4l2-fwnode.h>
+#include <media/v4l2-ioctl.h>
+#include <media/videobuf2-dma-contig.h>
+
+#include <soc/tegra/pmc.h>
+
+#include "vi.h"
+#include "video.h"
+
+#define MAX_CID_CONTROLS 3
+
+/**
+ * struct tegra_vi_graph_entity - Entity in the video graph
+ *
+ * @asd: subdev asynchronous registration information
+ * @entity: media entity from the corresponding V4L2 subdev
+ * @subdev: V4L2 subdev
+ */
+struct tegra_vi_graph_entity {
+ struct v4l2_async_connection asd;
+ struct media_entity *entity;
+ struct v4l2_subdev *subdev;
+};
+
+static inline struct tegra_vi *
+host1x_client_to_vi(struct host1x_client *client)
+{
+ return container_of(client, struct tegra_vi, client);
+}
+
+static inline struct tegra_channel_buffer *
+to_tegra_channel_buffer(struct vb2_v4l2_buffer *vb)
+{
+ return container_of(vb, struct tegra_channel_buffer, buf);
+}
+
+static inline struct tegra_vi_graph_entity *
+to_tegra_vi_graph_entity(struct v4l2_async_connection *asd)
+{
+ return container_of(asd, struct tegra_vi_graph_entity, asd);
+}
+
+static int tegra_get_format_idx_by_code(struct tegra_vi *vi,
+ unsigned int code,
+ unsigned int offset)
+{
+ unsigned int i;
+
+ for (i = offset; i < vi->soc->nformats; ++i) {
+ if (vi->soc->video_formats[i].code == code)
+ return i;
+ }
+
+ return -1;
+}
+
+static u32 tegra_get_format_fourcc_by_idx(struct tegra_vi *vi,
+ unsigned int index)
+{
+ if (index >= vi->soc->nformats)
+ return -EINVAL;
+
+ return vi->soc->video_formats[index].fourcc;
+}
+
+static const struct tegra_video_format *
+tegra_get_format_by_fourcc(struct tegra_vi *vi, u32 fourcc)
+{
+ unsigned int i;
+
+ for (i = 0; i < vi->soc->nformats; ++i) {
+ if (vi->soc->video_formats[i].fourcc == fourcc)
+ return &vi->soc->video_formats[i];
+ }
+
+ return NULL;
+}
+
+/*
+ * videobuf2 queue operations
+ */
+
+static int tegra_channel_queue_setup(struct vb2_queue *vq,
+ unsigned int *nbuffers,
+ unsigned int *nplanes,
+ unsigned int sizes[],
+ struct device *alloc_devs[])
+{
+ struct tegra_vi_channel *chan = vb2_get_drv_priv(vq);
+
+ if (*nplanes)
+ return sizes[0] < chan->format.sizeimage ? -EINVAL : 0;
+
+ *nplanes = 1;
+ sizes[0] = chan->format.sizeimage;
+ alloc_devs[0] = chan->vi->dev;
+
+ if (chan->vi->ops->channel_queue_setup)
+ chan->vi->ops->channel_queue_setup(chan);
+
+ return 0;
+}
+
+static int tegra_channel_buffer_prepare(struct vb2_buffer *vb)
+{
+ struct tegra_vi_channel *chan = vb2_get_drv_priv(vb->vb2_queue);
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct tegra_channel_buffer *buf = to_tegra_channel_buffer(vbuf);
+ unsigned long size = chan->format.sizeimage;
+
+ if (vb2_plane_size(vb, 0) < size) {
+ v4l2_err(chan->video.v4l2_dev,
+ "buffer too small (%lu < %lu)\n",
+ vb2_plane_size(vb, 0), size);
+ return -EINVAL;
+ }
+
+ vb2_set_plane_payload(vb, 0, size);
+ buf->chan = chan;
+ buf->addr = vb2_dma_contig_plane_dma_addr(vb, 0);
+
+ return 0;
+}
+
+static void tegra_channel_buffer_queue(struct vb2_buffer *vb)
+{
+ struct tegra_vi_channel *chan = vb2_get_drv_priv(vb->vb2_queue);
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct tegra_channel_buffer *buf = to_tegra_channel_buffer(vbuf);
+
+ /* put buffer into the capture queue */
+ spin_lock(&chan->start_lock);
+ list_add_tail(&buf->queue, &chan->capture);
+ spin_unlock(&chan->start_lock);
+
+ /* wait up kthread for capture */
+ wake_up_interruptible(&chan->start_wait);
+}
+
+struct v4l2_subdev *
+tegra_channel_get_remote_csi_subdev(struct tegra_vi_channel *chan)
+{
+ struct media_pad *pad;
+
+ pad = media_pad_remote_pad_first(&chan->pad);
+ if (!pad)
+ return NULL;
+
+ return media_entity_to_v4l2_subdev(pad->entity);
+}
+
+/*
+ * Walk up the chain until the initial source (e.g. image sensor)
+ */
+struct v4l2_subdev *
+tegra_channel_get_remote_source_subdev(struct tegra_vi_channel *chan)
+{
+ struct media_pad *pad;
+ struct v4l2_subdev *subdev;
+ struct media_entity *entity;
+
+ subdev = tegra_channel_get_remote_csi_subdev(chan);
+ if (!subdev)
+ return NULL;
+
+ pad = &subdev->entity.pads[0];
+ while (!(pad->flags & MEDIA_PAD_FL_SOURCE)) {
+ pad = media_pad_remote_pad_first(pad);
+ if (!pad || !is_media_entity_v4l2_subdev(pad->entity))
+ break;
+ entity = pad->entity;
+ pad = &entity->pads[0];
+ subdev = media_entity_to_v4l2_subdev(entity);
+ }
+
+ return subdev;
+}
+
+static int tegra_channel_enable_stream(struct tegra_vi_channel *chan)
+{
+ struct v4l2_subdev *subdev;
+ int ret;
+
+ subdev = tegra_channel_get_remote_csi_subdev(chan);
+ ret = v4l2_subdev_call(subdev, video, s_stream, true);
+ if (ret < 0 && ret != -ENOIOCTLCMD)
+ return ret;
+
+ return 0;
+}
+
+static int tegra_channel_disable_stream(struct tegra_vi_channel *chan)
+{
+ struct v4l2_subdev *subdev;
+ int ret;
+
+ subdev = tegra_channel_get_remote_csi_subdev(chan);
+ ret = v4l2_subdev_call(subdev, video, s_stream, false);
+ if (ret < 0 && ret != -ENOIOCTLCMD)
+ return ret;
+
+ return 0;
+}
+
+int tegra_channel_set_stream(struct tegra_vi_channel *chan, bool on)
+{
+ int ret;
+
+ if (on)
+ ret = tegra_channel_enable_stream(chan);
+ else
+ ret = tegra_channel_disable_stream(chan);
+
+ return ret;
+}
+
+void tegra_channel_release_buffers(struct tegra_vi_channel *chan,
+ enum vb2_buffer_state state)
+{
+ struct tegra_channel_buffer *buf, *nbuf;
+
+ spin_lock(&chan->start_lock);
+ list_for_each_entry_safe(buf, nbuf, &chan->capture, queue) {
+ vb2_buffer_done(&buf->buf.vb2_buf, state);
+ list_del(&buf->queue);
+ }
+ spin_unlock(&chan->start_lock);
+
+ spin_lock(&chan->done_lock);
+ list_for_each_entry_safe(buf, nbuf, &chan->done, queue) {
+ vb2_buffer_done(&buf->buf.vb2_buf, state);
+ list_del(&buf->queue);
+ }
+ spin_unlock(&chan->done_lock);
+}
+
+static int tegra_channel_start_streaming(struct vb2_queue *vq, u32 count)
+{
+ struct tegra_vi_channel *chan = vb2_get_drv_priv(vq);
+ int ret;
+
+ ret = pm_runtime_resume_and_get(chan->vi->dev);
+ if (ret < 0) {
+ dev_err(chan->vi->dev, "failed to get runtime PM: %d\n", ret);
+ return ret;
+ }
+
+ ret = chan->vi->ops->vi_start_streaming(vq, count);
+ if (ret < 0)
+ pm_runtime_put(chan->vi->dev);
+
+ return ret;
+}
+
+static void tegra_channel_stop_streaming(struct vb2_queue *vq)
+{
+ struct tegra_vi_channel *chan = vb2_get_drv_priv(vq);
+
+ chan->vi->ops->vi_stop_streaming(vq);
+ pm_runtime_put(chan->vi->dev);
+}
+
+static const struct vb2_ops tegra_channel_queue_qops = {
+ .queue_setup = tegra_channel_queue_setup,
+ .buf_prepare = tegra_channel_buffer_prepare,
+ .buf_queue = tegra_channel_buffer_queue,
+ .start_streaming = tegra_channel_start_streaming,
+ .stop_streaming = tegra_channel_stop_streaming,
+};
+
+/*
+ * V4L2 ioctl operations
+ */
+static int tegra_channel_querycap(struct file *file, void *fh,
+ struct v4l2_capability *cap)
+{
+ struct tegra_vi_channel *chan = video_drvdata(file);
+
+ strscpy(cap->driver, "tegra-video", sizeof(cap->driver));
+ strscpy(cap->card, chan->video.name, sizeof(cap->card));
+ snprintf(cap->bus_info, sizeof(cap->bus_info), "platform:%s",
+ dev_name(chan->vi->dev));
+
+ return 0;
+}
+
+static int tegra_channel_g_parm(struct file *file, void *fh,
+ struct v4l2_streamparm *a)
+{
+ struct tegra_vi_channel *chan = video_drvdata(file);
+ struct v4l2_subdev *subdev;
+
+ subdev = tegra_channel_get_remote_source_subdev(chan);
+ return v4l2_g_parm_cap(&chan->video, subdev, a);
+}
+
+static int tegra_channel_s_parm(struct file *file, void *fh,
+ struct v4l2_streamparm *a)
+{
+ struct tegra_vi_channel *chan = video_drvdata(file);
+ struct v4l2_subdev *subdev;
+
+ subdev = tegra_channel_get_remote_source_subdev(chan);
+ return v4l2_s_parm_cap(&chan->video, subdev, a);
+}
+
+static int tegra_channel_enum_framesizes(struct file *file, void *fh,
+ struct v4l2_frmsizeenum *sizes)
+{
+ int ret;
+ struct tegra_vi_channel *chan = video_drvdata(file);
+ struct v4l2_subdev *subdev;
+ const struct tegra_video_format *fmtinfo;
+ struct v4l2_subdev_frame_size_enum fse = {
+ .index = sizes->index,
+ .which = V4L2_SUBDEV_FORMAT_ACTIVE,
+ };
+
+ fmtinfo = tegra_get_format_by_fourcc(chan->vi, sizes->pixel_format);
+ if (!fmtinfo)
+ return -EINVAL;
+
+ fse.code = fmtinfo->code;
+
+ subdev = tegra_channel_get_remote_source_subdev(chan);
+ ret = v4l2_subdev_call(subdev, pad, enum_frame_size, NULL, &fse);
+ if (ret)
+ return ret;
+
+ sizes->type = V4L2_FRMSIZE_TYPE_DISCRETE;
+ sizes->discrete.width = fse.max_width;
+ sizes->discrete.height = fse.max_height;
+
+ return 0;
+}
+
+static int tegra_channel_enum_frameintervals(struct file *file, void *fh,
+ struct v4l2_frmivalenum *ivals)
+{
+ int ret;
+ struct tegra_vi_channel *chan = video_drvdata(file);
+ struct v4l2_subdev *subdev;
+ const struct tegra_video_format *fmtinfo;
+ struct v4l2_subdev_frame_interval_enum fie = {
+ .index = ivals->index,
+ .width = ivals->width,
+ .height = ivals->height,
+ .which = V4L2_SUBDEV_FORMAT_ACTIVE,
+ };
+
+ fmtinfo = tegra_get_format_by_fourcc(chan->vi, ivals->pixel_format);
+ if (!fmtinfo)
+ return -EINVAL;
+
+ fie.code = fmtinfo->code;
+
+ subdev = tegra_channel_get_remote_source_subdev(chan);
+ ret = v4l2_subdev_call(subdev, pad, enum_frame_interval, NULL, &fie);
+ if (ret)
+ return ret;
+
+ ivals->type = V4L2_FRMIVAL_TYPE_DISCRETE;
+ ivals->discrete.numerator = fie.interval.numerator;
+ ivals->discrete.denominator = fie.interval.denominator;
+
+ return 0;
+}
+
+static int tegra_channel_enum_format(struct file *file, void *fh,
+ struct v4l2_fmtdesc *f)
+{
+ struct tegra_vi_channel *chan = video_drvdata(file);
+ unsigned int index = 0, i;
+ unsigned long *fmts_bitmap = chan->tpg_fmts_bitmap;
+
+ if (!IS_ENABLED(CONFIG_VIDEO_TEGRA_TPG))
+ fmts_bitmap = chan->fmts_bitmap;
+
+ if (f->index >= bitmap_weight(fmts_bitmap, MAX_FORMAT_NUM))
+ return -EINVAL;
+
+ for (i = 0; i < f->index + 1; i++, index++)
+ index = find_next_bit(fmts_bitmap, MAX_FORMAT_NUM, index);
+
+ f->pixelformat = tegra_get_format_fourcc_by_idx(chan->vi, index - 1);
+
+ return 0;
+}
+
+static int tegra_channel_get_format(struct file *file, void *fh,
+ struct v4l2_format *format)
+{
+ struct tegra_vi_channel *chan = video_drvdata(file);
+
+ format->fmt.pix = chan->format;
+
+ return 0;
+}
+
+static int __tegra_channel_try_format(struct tegra_vi_channel *chan,
+ struct v4l2_pix_format *pix)
+{
+ const struct tegra_video_format *fmtinfo;
+ static struct lock_class_key key;
+ struct v4l2_subdev *subdev;
+ struct v4l2_subdev_format fmt = {
+ .which = V4L2_SUBDEV_FORMAT_TRY,
+ };
+ struct v4l2_subdev_state *sd_state;
+ struct v4l2_subdev_frame_size_enum fse = {
+ .which = V4L2_SUBDEV_FORMAT_TRY,
+ };
+ struct v4l2_subdev_selection sdsel = {
+ .which = V4L2_SUBDEV_FORMAT_ACTIVE,
+ .target = V4L2_SEL_TGT_CROP_BOUNDS,
+ };
+ struct v4l2_rect *try_crop;
+ int ret;
+
+ subdev = tegra_channel_get_remote_source_subdev(chan);
+ if (!subdev)
+ return -ENODEV;
+
+ /*
+ * FIXME: Drop this call, drivers are not supposed to use
+ * __v4l2_subdev_state_alloc().
+ */
+ sd_state = __v4l2_subdev_state_alloc(subdev, "tegra:state->lock",
+ &key);
+ if (IS_ERR(sd_state))
+ return PTR_ERR(sd_state);
+ /*
+ * Retrieve the format information and if requested format isn't
+ * supported, keep the current format.
+ */
+ fmtinfo = tegra_get_format_by_fourcc(chan->vi, pix->pixelformat);
+ if (!fmtinfo) {
+ pix->pixelformat = chan->format.pixelformat;
+ pix->colorspace = chan->format.colorspace;
+ fmtinfo = tegra_get_format_by_fourcc(chan->vi,
+ pix->pixelformat);
+ }
+
+ pix->field = V4L2_FIELD_NONE;
+ fmt.pad = 0;
+ v4l2_fill_mbus_format(&fmt.format, pix, fmtinfo->code);
+
+ /*
+ * Attempt to obtain the format size from subdev.
+ * If not available, try to get crop boundary from subdev.
+ */
+ try_crop = v4l2_subdev_state_get_crop(sd_state, 0);
+ fse.code = fmtinfo->code;
+ ret = v4l2_subdev_call(subdev, pad, enum_frame_size, sd_state, &fse);
+ if (ret) {
+ if (!v4l2_subdev_has_op(subdev, pad, get_selection)) {
+ try_crop->width = 0;
+ try_crop->height = 0;
+ } else {
+ ret = v4l2_subdev_call(subdev, pad, get_selection,
+ NULL, &sdsel);
+ if (ret)
+ return -EINVAL;
+
+ try_crop->width = sdsel.r.width;
+ try_crop->height = sdsel.r.height;
+ }
+ } else {
+ try_crop->width = fse.max_width;
+ try_crop->height = fse.max_height;
+ }
+
+ ret = v4l2_subdev_call(subdev, pad, set_fmt, sd_state, &fmt);
+ if (ret < 0)
+ return ret;
+
+ v4l2_fill_pix_format(pix, &fmt.format);
+ chan->vi->ops->vi_fmt_align(pix, fmtinfo->bpp);
+
+ __v4l2_subdev_state_free(sd_state);
+
+ return 0;
+}
+
+static int tegra_channel_try_format(struct file *file, void *fh,
+ struct v4l2_format *format)
+{
+ struct tegra_vi_channel *chan = video_drvdata(file);
+
+ return __tegra_channel_try_format(chan, &format->fmt.pix);
+}
+
+static void tegra_channel_update_gangports(struct tegra_vi_channel *chan)
+{
+ if (chan->format.width <= 1920)
+ chan->numgangports = 1;
+ else
+ chan->numgangports = chan->totalports;
+}
+
+static int tegra_channel_set_format(struct file *file, void *fh,
+ struct v4l2_format *format)
+{
+ struct tegra_vi_channel *chan = video_drvdata(file);
+ const struct tegra_video_format *fmtinfo;
+ struct v4l2_subdev_format fmt = {
+ .which = V4L2_SUBDEV_FORMAT_ACTIVE,
+ };
+ struct v4l2_subdev *subdev;
+ struct v4l2_pix_format *pix = &format->fmt.pix;
+ int ret;
+
+ if (vb2_is_busy(&chan->queue))
+ return -EBUSY;
+
+ /* get supported format by try_fmt */
+ ret = __tegra_channel_try_format(chan, pix);
+ if (ret)
+ return ret;
+
+ fmtinfo = tegra_get_format_by_fourcc(chan->vi, pix->pixelformat);
+
+ fmt.pad = 0;
+ v4l2_fill_mbus_format(&fmt.format, pix, fmtinfo->code);
+ subdev = tegra_channel_get_remote_source_subdev(chan);
+ ret = v4l2_subdev_call(subdev, pad, set_fmt, NULL, &fmt);
+ if (ret < 0)
+ return ret;
+
+ v4l2_fill_pix_format(pix, &fmt.format);
+ chan->vi->ops->vi_fmt_align(pix, fmtinfo->bpp);
+
+ chan->format = *pix;
+ chan->fmtinfo = fmtinfo;
+ tegra_channel_update_gangports(chan);
+
+ return 0;
+}
+
+static int tegra_channel_set_subdev_active_fmt(struct tegra_vi_channel *chan)
+{
+ int ret, index;
+ struct v4l2_subdev *subdev;
+ struct v4l2_subdev_format fmt = {
+ .which = V4L2_SUBDEV_FORMAT_ACTIVE,
+ };
+
+ /*
+ * Initialize channel format to the sub-device active format if there
+ * is corresponding match in the Tegra supported video formats.
+ */
+ subdev = tegra_channel_get_remote_source_subdev(chan);
+ ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &fmt);
+ if (ret)
+ return ret;
+
+ index = tegra_get_format_idx_by_code(chan->vi, fmt.format.code, 0);
+ if (index < 0)
+ return -EINVAL;
+
+ chan->fmtinfo = &chan->vi->soc->video_formats[index];
+ v4l2_fill_pix_format(&chan->format, &fmt.format);
+ chan->format.pixelformat = chan->fmtinfo->fourcc;
+ chan->format.bytesperline = chan->format.width * chan->fmtinfo->bpp;
+ chan->format.sizeimage = chan->format.bytesperline *
+ chan->format.height;
+ chan->vi->ops->vi_fmt_align(&chan->format, chan->fmtinfo->bpp);
+ tegra_channel_update_gangports(chan);
+
+ return 0;
+}
+
+static int
+tegra_channel_subscribe_event(struct v4l2_fh *fh,
+ const struct v4l2_event_subscription *sub)
+{
+ switch (sub->type) {
+ case V4L2_EVENT_SOURCE_CHANGE:
+ return v4l2_event_subscribe(fh, sub, 4, NULL);
+ }
+
+ return v4l2_ctrl_subscribe_event(fh, sub);
+}
+
+static int tegra_channel_g_selection(struct file *file, void *priv,
+ struct v4l2_selection *sel)
+{
+ struct tegra_vi_channel *chan = video_drvdata(file);
+ struct v4l2_subdev *subdev;
+ struct v4l2_subdev_format fmt = {
+ .which = V4L2_SUBDEV_FORMAT_ACTIVE,
+ };
+ struct v4l2_subdev_selection sdsel = {
+ .which = V4L2_SUBDEV_FORMAT_ACTIVE,
+ .target = sel->target,
+ };
+ int ret;
+
+ subdev = tegra_channel_get_remote_source_subdev(chan);
+ if (!v4l2_subdev_has_op(subdev, pad, get_selection))
+ return -ENOTTY;
+
+ if (sel->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+ /*
+ * Try the get selection operation and fallback to get format if not
+ * implemented.
+ */
+ ret = v4l2_subdev_call(subdev, pad, get_selection, NULL, &sdsel);
+ if (!ret)
+ sel->r = sdsel.r;
+ if (ret != -ENOIOCTLCMD)
+ return ret;
+
+ ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &fmt);
+ if (ret < 0)
+ return ret;
+
+ sel->r.left = 0;
+ sel->r.top = 0;
+ sel->r.width = fmt.format.width;
+ sel->r.height = fmt.format.height;
+
+ return 0;
+}
+
+static int tegra_channel_s_selection(struct file *file, void *fh,
+ struct v4l2_selection *sel)
+{
+ struct tegra_vi_channel *chan = video_drvdata(file);
+ struct v4l2_subdev *subdev;
+ int ret;
+ struct v4l2_subdev_selection sdsel = {
+ .which = V4L2_SUBDEV_FORMAT_ACTIVE,
+ .target = sel->target,
+ .flags = sel->flags,
+ .r = sel->r,
+ };
+
+ subdev = tegra_channel_get_remote_source_subdev(chan);
+ if (!v4l2_subdev_has_op(subdev, pad, set_selection))
+ return -ENOTTY;
+
+ if (sel->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+
+ if (vb2_is_busy(&chan->queue))
+ return -EBUSY;
+
+ ret = v4l2_subdev_call(subdev, pad, set_selection, NULL, &sdsel);
+ if (!ret) {
+ sel->r = sdsel.r;
+ /*
+ * Subdev active format resolution may have changed during
+ * set selection operation. So, update channel format to
+ * the sub-device active format.
+ */
+ return tegra_channel_set_subdev_active_fmt(chan);
+ }
+
+ return ret;
+}
+
+static int tegra_channel_g_edid(struct file *file, void *fh,
+ struct v4l2_edid *edid)
+{
+ struct tegra_vi_channel *chan = video_drvdata(file);
+ struct v4l2_subdev *subdev;
+
+ subdev = tegra_channel_get_remote_source_subdev(chan);
+ if (!v4l2_subdev_has_op(subdev, pad, get_edid))
+ return -ENOTTY;
+
+ return v4l2_subdev_call(subdev, pad, get_edid, edid);
+}
+
+static int tegra_channel_s_edid(struct file *file, void *fh,
+ struct v4l2_edid *edid)
+{
+ struct tegra_vi_channel *chan = video_drvdata(file);
+ struct v4l2_subdev *subdev;
+
+ subdev = tegra_channel_get_remote_source_subdev(chan);
+ if (!v4l2_subdev_has_op(subdev, pad, set_edid))
+ return -ENOTTY;
+
+ return v4l2_subdev_call(subdev, pad, set_edid, edid);
+}
+
+static int tegra_channel_g_dv_timings(struct file *file, void *fh,
+ struct v4l2_dv_timings *timings)
+{
+ struct tegra_vi_channel *chan = video_drvdata(file);
+ struct v4l2_subdev *subdev;
+
+ subdev = tegra_channel_get_remote_source_subdev(chan);
+ if (!v4l2_subdev_has_op(subdev, pad, g_dv_timings))
+ return -ENOTTY;
+
+ return v4l2_device_call_until_err(chan->video.v4l2_dev, 0,
+ pad, g_dv_timings, 0, timings);
+}
+
+static int tegra_channel_s_dv_timings(struct file *file, void *fh,
+ struct v4l2_dv_timings *timings)
+{
+ struct tegra_vi_channel *chan = video_drvdata(file);
+ struct v4l2_subdev *subdev;
+ struct v4l2_bt_timings *bt = &timings->bt;
+ struct v4l2_dv_timings curr_timings;
+ int ret;
+
+ subdev = tegra_channel_get_remote_source_subdev(chan);
+ if (!v4l2_subdev_has_op(subdev, pad, s_dv_timings))
+ return -ENOTTY;
+
+ ret = tegra_channel_g_dv_timings(file, fh, &curr_timings);
+ if (ret)
+ return ret;
+
+ if (v4l2_match_dv_timings(timings, &curr_timings, 0, false))
+ return 0;
+
+ if (vb2_is_busy(&chan->queue))
+ return -EBUSY;
+
+ ret = v4l2_device_call_until_err(chan->video.v4l2_dev, 0,
+ pad, s_dv_timings, 0, timings);
+ if (ret)
+ return ret;
+
+ chan->format.width = bt->width;
+ chan->format.height = bt->height;
+ chan->format.bytesperline = bt->width * chan->fmtinfo->bpp;
+ chan->format.sizeimage = chan->format.bytesperline * bt->height;
+ chan->vi->ops->vi_fmt_align(&chan->format, chan->fmtinfo->bpp);
+ tegra_channel_update_gangports(chan);
+
+ return 0;
+}
+
+static int tegra_channel_query_dv_timings(struct file *file, void *fh,
+ struct v4l2_dv_timings *timings)
+{
+ struct tegra_vi_channel *chan = video_drvdata(file);
+ struct v4l2_subdev *subdev;
+
+ subdev = tegra_channel_get_remote_source_subdev(chan);
+ if (!v4l2_subdev_has_op(subdev, pad, query_dv_timings))
+ return -ENOTTY;
+
+ return v4l2_device_call_until_err(chan->video.v4l2_dev, 0,
+ pad, query_dv_timings, 0, timings);
+}
+
+static int tegra_channel_enum_dv_timings(struct file *file, void *fh,
+ struct v4l2_enum_dv_timings *timings)
+{
+ struct tegra_vi_channel *chan = video_drvdata(file);
+ struct v4l2_subdev *subdev;
+
+ subdev = tegra_channel_get_remote_source_subdev(chan);
+ if (!v4l2_subdev_has_op(subdev, pad, enum_dv_timings))
+ return -ENOTTY;
+
+ return v4l2_subdev_call(subdev, pad, enum_dv_timings, timings);
+}
+
+static int tegra_channel_dv_timings_cap(struct file *file, void *fh,
+ struct v4l2_dv_timings_cap *cap)
+{
+ struct tegra_vi_channel *chan = video_drvdata(file);
+ struct v4l2_subdev *subdev;
+
+ subdev = tegra_channel_get_remote_source_subdev(chan);
+ if (!v4l2_subdev_has_op(subdev, pad, dv_timings_cap))
+ return -ENOTTY;
+
+ return v4l2_subdev_call(subdev, pad, dv_timings_cap, cap);
+}
+
+static int tegra_channel_log_status(struct file *file, void *fh)
+{
+ struct tegra_vi_channel *chan = video_drvdata(file);
+
+ v4l2_device_call_all(chan->video.v4l2_dev, 0, core, log_status);
+
+ return 0;
+}
+
+static int tegra_channel_enum_input(struct file *file, void *fh,
+ struct v4l2_input *inp)
+{
+ struct tegra_vi_channel *chan = video_drvdata(file);
+ struct v4l2_subdev *subdev;
+
+ if (inp->index)
+ return -EINVAL;
+
+ inp->type = V4L2_INPUT_TYPE_CAMERA;
+ subdev = tegra_channel_get_remote_source_subdev(chan);
+ strscpy(inp->name, subdev->name, sizeof(inp->name));
+ if (v4l2_subdev_has_op(subdev, pad, dv_timings_cap))
+ inp->capabilities = V4L2_IN_CAP_DV_TIMINGS;
+
+ return 0;
+}
+
+static int tegra_channel_g_input(struct file *file, void *priv,
+ unsigned int *i)
+{
+ *i = 0;
+
+ return 0;
+}
+
+static int tegra_channel_s_input(struct file *file, void *priv,
+ unsigned int input)
+{
+ if (input > 0)
+ return -EINVAL;
+
+ return 0;
+}
+
+static const struct v4l2_ioctl_ops tegra_channel_ioctl_ops = {
+ .vidioc_querycap = tegra_channel_querycap,
+ .vidioc_g_parm = tegra_channel_g_parm,
+ .vidioc_s_parm = tegra_channel_s_parm,
+ .vidioc_enum_framesizes = tegra_channel_enum_framesizes,
+ .vidioc_enum_frameintervals = tegra_channel_enum_frameintervals,
+ .vidioc_enum_fmt_vid_cap = tegra_channel_enum_format,
+ .vidioc_g_fmt_vid_cap = tegra_channel_get_format,
+ .vidioc_s_fmt_vid_cap = tegra_channel_set_format,
+ .vidioc_try_fmt_vid_cap = tegra_channel_try_format,
+ .vidioc_enum_input = tegra_channel_enum_input,
+ .vidioc_g_input = tegra_channel_g_input,
+ .vidioc_s_input = tegra_channel_s_input,
+ .vidioc_reqbufs = vb2_ioctl_reqbufs,
+ .vidioc_prepare_buf = vb2_ioctl_prepare_buf,
+ .vidioc_querybuf = vb2_ioctl_querybuf,
+ .vidioc_qbuf = vb2_ioctl_qbuf,
+ .vidioc_dqbuf = vb2_ioctl_dqbuf,
+ .vidioc_create_bufs = vb2_ioctl_create_bufs,
+ .vidioc_expbuf = vb2_ioctl_expbuf,
+ .vidioc_streamon = vb2_ioctl_streamon,
+ .vidioc_streamoff = vb2_ioctl_streamoff,
+ .vidioc_subscribe_event = tegra_channel_subscribe_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
+ .vidioc_g_selection = tegra_channel_g_selection,
+ .vidioc_s_selection = tegra_channel_s_selection,
+ .vidioc_g_edid = tegra_channel_g_edid,
+ .vidioc_s_edid = tegra_channel_s_edid,
+ .vidioc_g_dv_timings = tegra_channel_g_dv_timings,
+ .vidioc_s_dv_timings = tegra_channel_s_dv_timings,
+ .vidioc_query_dv_timings = tegra_channel_query_dv_timings,
+ .vidioc_enum_dv_timings = tegra_channel_enum_dv_timings,
+ .vidioc_dv_timings_cap = tegra_channel_dv_timings_cap,
+ .vidioc_log_status = tegra_channel_log_status,
+};
+
+/*
+ * V4L2 file operations
+ */
+static const struct v4l2_file_operations tegra_channel_fops = {
+ .owner = THIS_MODULE,
+ .unlocked_ioctl = video_ioctl2,
+ .open = v4l2_fh_open,
+ .release = vb2_fop_release,
+ .read = vb2_fop_read,
+ .poll = vb2_fop_poll,
+ .mmap = vb2_fop_mmap,
+};
+
+/*
+ * V4L2 control operations
+ */
+static int vi_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct tegra_vi_channel *chan = container_of(ctrl->handler,
+ struct tegra_vi_channel,
+ ctrl_handler);
+
+ switch (ctrl->id) {
+ case V4L2_CID_TEST_PATTERN:
+ /* pattern change takes effect on next stream */
+ chan->pg_mode = ctrl->val + 1;
+ break;
+ case V4L2_CID_TEGRA_SYNCPT_TIMEOUT_RETRY:
+ chan->syncpt_timeout_retry = ctrl->val;
+ break;
+ case V4L2_CID_HFLIP:
+ chan->hflip = ctrl->val;
+ break;
+ case V4L2_CID_VFLIP:
+ chan->vflip = ctrl->val;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static const struct v4l2_ctrl_ops vi_ctrl_ops = {
+ .s_ctrl = vi_s_ctrl,
+};
+
+#if IS_ENABLED(CONFIG_VIDEO_TEGRA_TPG)
+static const char *const vi_pattern_strings[] = {
+ "Black/White Direct Mode",
+ "Color Patch Mode",
+};
+#else
+static const struct v4l2_ctrl_config syncpt_timeout_ctrl = {
+ .ops = &vi_ctrl_ops,
+ .id = V4L2_CID_TEGRA_SYNCPT_TIMEOUT_RETRY,
+ .name = "Syncpt timeout retry",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .min = 1,
+ .max = 10000,
+ .step = 1,
+ .def = 5,
+};
+#endif
+
+static int tegra_channel_setup_ctrl_handler(struct tegra_vi_channel *chan)
+{
+ int ret;
+
+#if IS_ENABLED(CONFIG_VIDEO_TEGRA_TPG)
+ /* add test pattern control handler to v4l2 device */
+ v4l2_ctrl_new_std_menu_items(&chan->ctrl_handler, &vi_ctrl_ops,
+ V4L2_CID_TEST_PATTERN,
+ ARRAY_SIZE(vi_pattern_strings) - 1,
+ 0, 0, vi_pattern_strings);
+ if (chan->ctrl_handler.error) {
+ dev_err(chan->vi->dev, "failed to add TPG ctrl handler: %d\n",
+ chan->ctrl_handler.error);
+ v4l2_ctrl_handler_free(&chan->ctrl_handler);
+ return chan->ctrl_handler.error;
+ }
+#else
+ struct v4l2_subdev *subdev;
+
+ /* custom control */
+ v4l2_ctrl_new_custom(&chan->ctrl_handler, &syncpt_timeout_ctrl, NULL);
+ if (chan->ctrl_handler.error) {
+ dev_err(chan->vi->dev, "failed to add %s ctrl handler: %d\n",
+ syncpt_timeout_ctrl.name,
+ chan->ctrl_handler.error);
+ v4l2_ctrl_handler_free(&chan->ctrl_handler);
+ return chan->ctrl_handler.error;
+ }
+
+ subdev = tegra_channel_get_remote_source_subdev(chan);
+ if (!subdev)
+ return -ENODEV;
+
+ ret = v4l2_ctrl_add_handler(&chan->ctrl_handler, subdev->ctrl_handler,
+ NULL, true);
+ if (ret < 0) {
+ dev_err(chan->vi->dev,
+ "failed to add subdev %s ctrl handler: %d\n",
+ subdev->name, ret);
+ v4l2_ctrl_handler_free(&chan->ctrl_handler);
+ return ret;
+ }
+
+ if (chan->vi->soc->has_h_v_flip) {
+ v4l2_ctrl_new_std(&chan->ctrl_handler, &vi_ctrl_ops, V4L2_CID_HFLIP, 0, 1, 1, 0);
+ v4l2_ctrl_new_std(&chan->ctrl_handler, &vi_ctrl_ops, V4L2_CID_VFLIP, 0, 1, 1, 0);
+ }
+
+#endif
+
+ /* setup the controls */
+ ret = v4l2_ctrl_handler_setup(&chan->ctrl_handler);
+ if (ret < 0) {
+ dev_err(chan->vi->dev,
+ "failed to setup v4l2 ctrl handler: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+/* VI only support 2 formats in TPG mode */
+static void vi_tpg_fmts_bitmap_init(struct tegra_vi_channel *chan)
+{
+ int index;
+
+ bitmap_zero(chan->tpg_fmts_bitmap, MAX_FORMAT_NUM);
+
+ index = tegra_get_format_idx_by_code(chan->vi,
+ MEDIA_BUS_FMT_SRGGB10_1X10, 0);
+ bitmap_set(chan->tpg_fmts_bitmap, index, 1);
+
+ index = tegra_get_format_idx_by_code(chan->vi,
+ MEDIA_BUS_FMT_RGB888_1X32_PADHI,
+ 0);
+ bitmap_set(chan->tpg_fmts_bitmap, index, 1);
+}
+
+static int vi_fmts_bitmap_init(struct tegra_vi_channel *chan)
+{
+ int index, ret, match_code = 0;
+ struct v4l2_subdev *subdev;
+ struct v4l2_subdev_mbus_code_enum code = {
+ .which = V4L2_SUBDEV_FORMAT_ACTIVE,
+ };
+
+ bitmap_zero(chan->fmts_bitmap, MAX_FORMAT_NUM);
+
+ /*
+ * Set the bitmap bits based on all the matched formats between the
+ * available media bus formats of sub-device and the pre-defined Tegra
+ * supported video formats.
+ */
+ subdev = tegra_channel_get_remote_source_subdev(chan);
+ while (1) {
+ ret = v4l2_subdev_call(subdev, pad, enum_mbus_code,
+ NULL, &code);
+ if (ret < 0)
+ break;
+
+ index = tegra_get_format_idx_by_code(chan->vi, code.code, 0);
+ while (index >= 0) {
+ bitmap_set(chan->fmts_bitmap, index, 1);
+ if (!match_code)
+ match_code = code.code;
+ /* look for other formats with same mbus code */
+ index = tegra_get_format_idx_by_code(chan->vi,
+ code.code,
+ index + 1);
+ }
+
+ code.index++;
+ }
+
+ /*
+ * Set the bitmap bit corresponding to default tegra video format if
+ * there are no matched formats.
+ */
+ if (!match_code) {
+ match_code = chan->vi->soc->default_video_format->code;
+ index = tegra_get_format_idx_by_code(chan->vi, match_code, 0);
+ if (WARN_ON(index < 0))
+ return -EINVAL;
+
+ bitmap_set(chan->fmts_bitmap, index, 1);
+ }
+
+ /* initialize channel format to the sub-device active format */
+ tegra_channel_set_subdev_active_fmt(chan);
+
+ return 0;
+}
+
+static void tegra_channel_cleanup(struct tegra_vi_channel *chan)
+{
+ v4l2_ctrl_handler_free(&chan->ctrl_handler);
+ media_entity_cleanup(&chan->video.entity);
+ chan->vi->ops->channel_host1x_syncpt_free(chan);
+ mutex_destroy(&chan->video_lock);
+}
+
+void tegra_channels_cleanup(struct tegra_vi *vi)
+{
+ struct tegra_vi_channel *chan, *tmp;
+
+ if (!vi)
+ return;
+
+ list_for_each_entry_safe(chan, tmp, &vi->vi_chans, list) {
+ tegra_channel_cleanup(chan);
+ list_del(&chan->list);
+ kfree(chan);
+ }
+}
+
+static int tegra_channel_init(struct tegra_vi_channel *chan)
+{
+ struct tegra_vi *vi = chan->vi;
+ struct tegra_video_device *vid = dev_get_drvdata(vi->client.host);
+ int ret;
+
+ mutex_init(&chan->video_lock);
+ INIT_LIST_HEAD(&chan->capture);
+ INIT_LIST_HEAD(&chan->done);
+ spin_lock_init(&chan->start_lock);
+ spin_lock_init(&chan->done_lock);
+ init_waitqueue_head(&chan->start_wait);
+ init_waitqueue_head(&chan->done_wait);
+
+ /* initialize the video format */
+ chan->fmtinfo = chan->vi->soc->default_video_format;
+ chan->format.pixelformat = chan->fmtinfo->fourcc;
+ chan->format.colorspace = V4L2_COLORSPACE_SRGB;
+ chan->format.field = V4L2_FIELD_NONE;
+ chan->format.width = TEGRA_DEF_WIDTH;
+ chan->format.height = TEGRA_DEF_HEIGHT;
+ chan->format.bytesperline = TEGRA_DEF_WIDTH * chan->fmtinfo->bpp;
+ chan->format.sizeimage = chan->format.bytesperline * TEGRA_DEF_HEIGHT;
+ vi->ops->vi_fmt_align(&chan->format, chan->fmtinfo->bpp);
+
+ ret = vi->ops->channel_host1x_syncpt_init(chan);
+ if (ret)
+ return ret;
+
+ /* initialize the media entity */
+ chan->pad.flags = MEDIA_PAD_FL_SINK;
+ ret = media_entity_pads_init(&chan->video.entity, 1, &chan->pad);
+ if (ret < 0) {
+ dev_err(vi->dev,
+ "failed to initialize media entity: %d\n", ret);
+ goto free_syncpts;
+ }
+
+ ret = v4l2_ctrl_handler_init(&chan->ctrl_handler, MAX_CID_CONTROLS);
+ if (chan->ctrl_handler.error) {
+ dev_err(vi->dev,
+ "failed to initialize v4l2 ctrl handler: %d\n", ret);
+ goto cleanup_media;
+ }
+
+ /* initialize the video_device */
+ chan->video.fops = &tegra_channel_fops;
+ chan->video.v4l2_dev = &vid->v4l2_dev;
+ chan->video.release = video_device_release_empty;
+ chan->video.queue = &chan->queue;
+ snprintf(chan->video.name, sizeof(chan->video.name), "%s-%s-%u",
+ dev_name(vi->dev), "output", chan->portnos[0]);
+ chan->video.vfl_type = VFL_TYPE_VIDEO;
+ chan->video.vfl_dir = VFL_DIR_RX;
+ chan->video.ioctl_ops = &tegra_channel_ioctl_ops;
+ chan->video.ctrl_handler = &chan->ctrl_handler;
+ chan->video.lock = &chan->video_lock;
+ chan->video.device_caps = V4L2_CAP_VIDEO_CAPTURE |
+ V4L2_CAP_STREAMING |
+ V4L2_CAP_READWRITE;
+ video_set_drvdata(&chan->video, chan);
+
+ chan->queue.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ chan->queue.io_modes = VB2_MMAP | VB2_DMABUF | VB2_READ;
+ chan->queue.lock = &chan->video_lock;
+ chan->queue.drv_priv = chan;
+ chan->queue.buf_struct_size = sizeof(struct tegra_channel_buffer);
+ chan->queue.ops = &tegra_channel_queue_qops;
+ chan->queue.mem_ops = &vb2_dma_contig_memops;
+ chan->queue.timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+ chan->queue.min_queued_buffers = 2;
+ chan->queue.dev = vi->dev;
+ ret = vb2_queue_init(&chan->queue);
+ if (ret < 0) {
+ dev_err(vi->dev, "failed to initialize vb2 queue: %d\n", ret);
+ goto free_v4l2_ctrl_hdl;
+ }
+
+ if (!IS_ENABLED(CONFIG_VIDEO_TEGRA_TPG))
+ v4l2_async_nf_init(&chan->notifier, &vid->v4l2_dev);
+
+ return 0;
+
+free_v4l2_ctrl_hdl:
+ v4l2_ctrl_handler_free(&chan->ctrl_handler);
+cleanup_media:
+ media_entity_cleanup(&chan->video.entity);
+free_syncpts:
+ vi->ops->channel_host1x_syncpt_free(chan);
+ return ret;
+}
+
+static int tegra_vi_channel_alloc(struct tegra_vi *vi, unsigned int port_num,
+ struct device_node *node, unsigned int lanes)
+{
+ struct tegra_vi_channel *chan;
+ unsigned int i;
+
+ /*
+ * Do not use devm_kzalloc as memory is freed immediately
+ * when device instance is unbound but application might still
+ * be holding the device node open. Channel memory allocated
+ * with kzalloc is freed during video device release callback.
+ */
+ chan = kzalloc(sizeof(*chan), GFP_KERNEL);
+ if (!chan)
+ return -ENOMEM;
+
+ chan->vi = vi;
+ chan->portnos[0] = port_num;
+ /*
+ * For data lanes more than maximum csi lanes per brick, multiple of
+ * x4 ports are used simultaneously for capture.
+ */
+ if (lanes <= CSI_LANES_PER_BRICK)
+ chan->totalports = 1;
+ else
+ chan->totalports = lanes / CSI_LANES_PER_BRICK;
+ chan->numgangports = chan->totalports;
+
+ for (i = 1; i < chan->totalports; i++)
+ chan->portnos[i] = chan->portnos[0] + i * CSI_PORTS_PER_BRICK;
+
+ chan->of_node = node;
+ list_add_tail(&chan->list, &vi->vi_chans);
+
+ return 0;
+}
+
+static int tegra_vi_tpg_channels_alloc(struct tegra_vi *vi)
+{
+ unsigned int port_num;
+ unsigned int nchannels = vi->soc->vi_max_channels;
+ int ret;
+
+ for (port_num = 0; port_num < nchannels; port_num++) {
+ ret = tegra_vi_channel_alloc(vi, port_num,
+ vi->dev->of_node, 2);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int tegra_vi_channels_alloc(struct tegra_vi *vi)
+{
+ struct device_node *node = vi->dev->of_node;
+ struct device_node *ep = NULL;
+ struct device_node *ports;
+ struct device_node *port = NULL;
+ unsigned int port_num;
+ struct device_node *parent;
+ struct v4l2_fwnode_endpoint v4l2_ep = { .bus_type = 0 };
+ unsigned int lanes;
+ int ret = 0;
+
+ ports = of_get_child_by_name(node, "ports");
+ if (!ports)
+ return dev_err_probe(vi->dev, -ENODEV, "%pOF: missing 'ports' node\n", node);
+
+ for_each_child_of_node(ports, port) {
+ if (!of_node_name_eq(port, "port"))
+ continue;
+
+ ret = of_property_read_u32(port, "reg", &port_num);
+ if (ret < 0)
+ continue;
+
+ if (port_num > vi->soc->vi_max_channels) {
+ dev_err(vi->dev, "invalid port num %d for %pOF\n",
+ port_num, port);
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ ep = of_get_child_by_name(port, "endpoint");
+ if (!ep)
+ continue;
+
+ parent = of_graph_get_remote_port_parent(ep);
+ of_node_put(ep);
+ if (!parent)
+ continue;
+
+ ep = of_graph_get_endpoint_by_regs(parent, 0, 0);
+ of_node_put(parent);
+ ret = v4l2_fwnode_endpoint_parse(of_fwnode_handle(ep),
+ &v4l2_ep);
+ of_node_put(ep);
+ if (ret)
+ continue;
+
+ lanes = v4l2_ep.bus.mipi_csi2.num_data_lanes;
+ ret = tegra_vi_channel_alloc(vi, port_num, port, lanes);
+ if (ret < 0)
+ goto cleanup;
+ }
+
+cleanup:
+ of_node_put(port);
+ of_node_put(ports);
+ return ret;
+}
+
+static int tegra_vi_channels_init(struct tegra_vi *vi)
+{
+ struct tegra_vi_channel *chan;
+ int ret;
+
+ list_for_each_entry(chan, &vi->vi_chans, list) {
+ ret = tegra_channel_init(chan);
+ if (ret < 0) {
+ dev_err(vi->dev,
+ "failed to initialize channel-%d: %d\n",
+ chan->portnos[0], ret);
+ goto cleanup;
+ }
+ }
+
+ return 0;
+
+cleanup:
+ list_for_each_entry_continue_reverse(chan, &vi->vi_chans, list)
+ tegra_channel_cleanup(chan);
+
+ return ret;
+}
+
+void tegra_v4l2_nodes_cleanup_tpg(struct tegra_video_device *vid)
+{
+ struct tegra_vi *vi = vid->vi;
+ struct tegra_csi *csi = vid->csi;
+ struct tegra_csi_channel *csi_chan;
+ struct tegra_vi_channel *chan;
+
+ list_for_each_entry(chan, &vi->vi_chans, list)
+ vb2_video_unregister_device(&chan->video);
+
+ list_for_each_entry(csi_chan, &csi->csi_chans, list)
+ v4l2_device_unregister_subdev(&csi_chan->subdev);
+}
+
+int tegra_v4l2_nodes_setup_tpg(struct tegra_video_device *vid)
+{
+ struct tegra_vi *vi = vid->vi;
+ struct tegra_csi *csi = vid->csi;
+ struct tegra_vi_channel *vi_chan;
+ struct tegra_csi_channel *csi_chan;
+ u32 link_flags = MEDIA_LNK_FL_ENABLED;
+ int ret;
+
+ if (!vi || !csi)
+ return -ENODEV;
+
+ csi_chan = list_first_entry(&csi->csi_chans,
+ struct tegra_csi_channel, list);
+
+ list_for_each_entry(vi_chan, &vi->vi_chans, list) {
+ struct media_entity *source = &csi_chan->subdev.entity;
+ struct media_entity *sink = &vi_chan->video.entity;
+ struct media_pad *source_pad = csi_chan->pads;
+ struct media_pad *sink_pad = &vi_chan->pad;
+
+ ret = v4l2_device_register_subdev(&vid->v4l2_dev,
+ &csi_chan->subdev);
+ if (ret) {
+ dev_err(vi->dev,
+ "failed to register subdev: %d\n", ret);
+ goto cleanup;
+ }
+
+ ret = video_register_device(&vi_chan->video,
+ VFL_TYPE_VIDEO, -1);
+ if (ret < 0) {
+ dev_err(vi->dev,
+ "failed to register video device: %d\n", ret);
+ goto cleanup;
+ }
+
+ dev_dbg(vi->dev, "creating %s:%u -> %s:%u link\n",
+ source->name, source_pad->index,
+ sink->name, sink_pad->index);
+
+ ret = media_create_pad_link(source, source_pad->index,
+ sink, sink_pad->index,
+ link_flags);
+ if (ret < 0) {
+ dev_err(vi->dev,
+ "failed to create %s:%u -> %s:%u link: %d\n",
+ source->name, source_pad->index,
+ sink->name, sink_pad->index, ret);
+ goto cleanup;
+ }
+
+ ret = tegra_channel_setup_ctrl_handler(vi_chan);
+ if (ret < 0)
+ goto cleanup;
+
+ v4l2_set_subdev_hostdata(&csi_chan->subdev, vi_chan);
+ vi_tpg_fmts_bitmap_init(vi_chan);
+ csi_chan = list_next_entry(csi_chan, list);
+ }
+
+ return 0;
+
+cleanup:
+ tegra_v4l2_nodes_cleanup_tpg(vid);
+ return ret;
+}
+
+static int __maybe_unused vi_runtime_resume(struct device *dev)
+{
+ struct tegra_vi *vi = dev_get_drvdata(dev);
+ int ret;
+
+ ret = regulator_enable(vi->vdd);
+ if (ret) {
+ dev_err(dev, "failed to enable VDD supply: %d\n", ret);
+ return ret;
+ }
+
+ ret = clk_set_rate(vi->clk, vi->soc->vi_max_clk_hz);
+ if (ret) {
+ dev_err(dev, "failed to set vi clock rate: %d\n", ret);
+ goto disable_vdd;
+ }
+
+ ret = clk_prepare_enable(vi->clk);
+ if (ret) {
+ dev_err(dev, "failed to enable vi clock: %d\n", ret);
+ goto disable_vdd;
+ }
+
+ return 0;
+
+disable_vdd:
+ regulator_disable(vi->vdd);
+ return ret;
+}
+
+static int __maybe_unused vi_runtime_suspend(struct device *dev)
+{
+ struct tegra_vi *vi = dev_get_drvdata(dev);
+
+ clk_disable_unprepare(vi->clk);
+
+ regulator_disable(vi->vdd);
+
+ return 0;
+}
+
+/*
+ * Find the entity matching a given fwnode in an v4l2_async_notifier list
+ */
+static struct tegra_vi_graph_entity *
+tegra_vi_graph_find_entity(struct list_head *list,
+ const struct fwnode_handle *fwnode)
+{
+ struct tegra_vi_graph_entity *entity;
+ struct v4l2_async_connection *asd;
+
+ list_for_each_entry(asd, list, asc_entry) {
+ entity = to_tegra_vi_graph_entity(asd);
+
+ if (entity->asd.match.fwnode == fwnode)
+ return entity;
+ }
+
+ return NULL;
+}
+
+static int tegra_vi_graph_build(struct tegra_vi_channel *chan,
+ struct tegra_vi_graph_entity *entity)
+{
+ struct tegra_vi *vi = chan->vi;
+ struct tegra_vi_graph_entity *ent;
+ struct fwnode_handle *ep = NULL;
+ struct v4l2_fwnode_link link;
+ struct media_entity *local = entity->entity;
+ struct media_entity *remote;
+ struct media_pad *local_pad;
+ struct media_pad *remote_pad;
+ u32 link_flags = MEDIA_LNK_FL_ENABLED;
+ int ret = 0;
+
+ dev_dbg(vi->dev, "creating links for entity %s\n", local->name);
+
+ while (1) {
+ ep = fwnode_graph_get_next_endpoint(entity->asd.match.fwnode,
+ ep);
+ if (!ep)
+ break;
+
+ ret = v4l2_fwnode_parse_link(ep, &link);
+ if (ret < 0) {
+ dev_err(vi->dev, "failed to parse link for %pOF: %d\n",
+ to_of_node(ep), ret);
+ continue;
+ }
+
+ if (link.local_port >= local->num_pads) {
+ dev_err(vi->dev, "invalid port number %u on %pOF\n",
+ link.local_port, to_of_node(link.local_node));
+ v4l2_fwnode_put_link(&link);
+ ret = -EINVAL;
+ break;
+ }
+
+ local_pad = &local->pads[link.local_port];
+ /* Remote node is vi node. So use channel video entity and pad
+ * as remote/sink.
+ */
+ if (link.remote_node == of_fwnode_handle(vi->dev->of_node)) {
+ remote = &chan->video.entity;
+ remote_pad = &chan->pad;
+ goto create_link;
+ }
+
+ /*
+ * Skip sink ports, they will be processed from the other end
+ * of the link.
+ */
+ if (local_pad->flags & MEDIA_PAD_FL_SINK) {
+ dev_dbg(vi->dev, "skipping sink port %pOF:%u\n",
+ to_of_node(link.local_node), link.local_port);
+ v4l2_fwnode_put_link(&link);
+ continue;
+ }
+
+ /* find the remote entity from notifier list */
+ ent = tegra_vi_graph_find_entity(&chan->notifier.done_list,
+ link.remote_node);
+ if (!ent) {
+ dev_err(vi->dev, "no entity found for %pOF\n",
+ to_of_node(link.remote_node));
+ v4l2_fwnode_put_link(&link);
+ ret = -ENODEV;
+ break;
+ }
+
+ remote = ent->entity;
+ if (link.remote_port >= remote->num_pads) {
+ dev_err(vi->dev, "invalid port number %u on %pOF\n",
+ link.remote_port,
+ to_of_node(link.remote_node));
+ v4l2_fwnode_put_link(&link);
+ ret = -EINVAL;
+ break;
+ }
+
+ remote_pad = &remote->pads[link.remote_port];
+
+create_link:
+ dev_dbg(vi->dev, "creating %s:%u -> %s:%u link\n",
+ local->name, local_pad->index,
+ remote->name, remote_pad->index);
+
+ ret = media_create_pad_link(local, local_pad->index,
+ remote, remote_pad->index,
+ link_flags);
+ v4l2_fwnode_put_link(&link);
+ if (ret < 0) {
+ dev_err(vi->dev,
+ "failed to create %s:%u -> %s:%u link: %d\n",
+ local->name, local_pad->index,
+ remote->name, remote_pad->index, ret);
+ break;
+ }
+ }
+
+ fwnode_handle_put(ep);
+ return ret;
+}
+
+static int tegra_vi_graph_notify_complete(struct v4l2_async_notifier *notifier)
+{
+ struct tegra_vi_graph_entity *entity;
+ struct v4l2_async_connection *asd;
+ struct v4l2_subdev *subdev;
+ struct tegra_vi_channel *chan;
+ struct tegra_vi *vi;
+ int ret;
+
+ chan = container_of(notifier, struct tegra_vi_channel, notifier);
+ vi = chan->vi;
+
+ dev_dbg(vi->dev, "notify complete, all subdevs registered\n");
+
+ /*
+ * Video device node should be created at the end of all the device
+ * related initialization/setup.
+ * Current video_register_device() does both initialize and register
+ * video device in same API.
+ *
+ * TODO: Update v4l2-dev driver to split initialize and register into
+ * separate APIs and then update Tegra video driver to do video device
+ * initialize followed by all video device related setup and then
+ * register the video device.
+ */
+ ret = video_register_device(&chan->video, VFL_TYPE_VIDEO, -1);
+ if (ret < 0) {
+ dev_err(vi->dev,
+ "failed to register video device: %d\n", ret);
+ goto unregister_video;
+ }
+
+ /* create links between the entities */
+ list_for_each_entry(asd, &chan->notifier.done_list, asc_entry) {
+ entity = to_tegra_vi_graph_entity(asd);
+ ret = tegra_vi_graph_build(chan, entity);
+ if (ret < 0)
+ goto unregister_video;
+ }
+
+ ret = tegra_channel_setup_ctrl_handler(chan);
+ if (ret < 0) {
+ dev_err(vi->dev,
+ "failed to setup channel controls: %d\n", ret);
+ goto unregister_video;
+ }
+
+ ret = vi_fmts_bitmap_init(chan);
+ if (ret < 0) {
+ dev_err(vi->dev,
+ "failed to initialize formats bitmap: %d\n", ret);
+ goto unregister_video;
+ }
+
+ subdev = tegra_channel_get_remote_csi_subdev(chan);
+ if (!subdev) {
+ ret = -ENODEV;
+ dev_err(vi->dev,
+ "failed to get remote csi subdev: %d\n", ret);
+ goto unregister_video;
+ }
+
+ v4l2_set_subdev_hostdata(subdev, chan);
+
+ subdev = tegra_channel_get_remote_source_subdev(chan);
+ v4l2_set_subdev_hostdata(subdev, chan);
+
+ return 0;
+
+unregister_video:
+ vb2_video_unregister_device(&chan->video);
+ return ret;
+}
+
+static int tegra_vi_graph_notify_bound(struct v4l2_async_notifier *notifier,
+ struct v4l2_subdev *subdev,
+ struct v4l2_async_connection *asd)
+{
+ struct tegra_vi_graph_entity *entity;
+ struct tegra_vi *vi;
+ struct tegra_vi_channel *chan;
+
+ chan = container_of(notifier, struct tegra_vi_channel, notifier);
+ vi = chan->vi;
+
+ /*
+ * Locate the entity corresponding to the bound subdev and store the
+ * subdev pointer.
+ */
+ entity = tegra_vi_graph_find_entity(&chan->notifier.waiting_list,
+ subdev->fwnode);
+ if (!entity) {
+ dev_err(vi->dev, "no entity for subdev %s\n", subdev->name);
+ return -EINVAL;
+ }
+
+ if (entity->subdev) {
+ dev_err(vi->dev, "duplicate subdev for node %pOF\n",
+ to_of_node(entity->asd.match.fwnode));
+ return -EINVAL;
+ }
+
+ dev_dbg(vi->dev, "subdev %s bound\n", subdev->name);
+ entity->entity = &subdev->entity;
+ entity->subdev = subdev;
+
+ return 0;
+}
+
+static const struct v4l2_async_notifier_operations tegra_vi_async_ops = {
+ .bound = tegra_vi_graph_notify_bound,
+ .complete = tegra_vi_graph_notify_complete,
+};
+
+static int tegra_vi_graph_parse_one(struct tegra_vi_channel *chan,
+ struct fwnode_handle *fwnode)
+{
+ struct tegra_vi *vi = chan->vi;
+ struct fwnode_handle *ep = NULL;
+ struct fwnode_handle *remote = NULL;
+ struct tegra_vi_graph_entity *tvge;
+ struct device_node *node = NULL;
+ int ret;
+
+ dev_dbg(vi->dev, "parsing node %pOF\n", to_of_node(fwnode));
+
+ /* parse all the remote entities and put them into the list */
+ for_each_endpoint_of_node(to_of_node(fwnode), node) {
+ ep = of_fwnode_handle(node);
+ remote = fwnode_graph_get_remote_port_parent(ep);
+ if (!remote) {
+ dev_err(vi->dev,
+ "remote device at %pOF not found\n", node);
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ /* skip entities that are already processed */
+ if (device_match_fwnode(vi->dev, remote) ||
+ tegra_vi_graph_find_entity(&chan->notifier.waiting_list,
+ remote)) {
+ fwnode_handle_put(remote);
+ continue;
+ }
+
+ tvge = v4l2_async_nf_add_fwnode(&chan->notifier, remote,
+ struct tegra_vi_graph_entity);
+ if (IS_ERR(tvge)) {
+ ret = PTR_ERR(tvge);
+ dev_err(vi->dev,
+ "failed to add subdev to notifier: %d\n", ret);
+ fwnode_handle_put(remote);
+ goto cleanup;
+ }
+
+ ret = tegra_vi_graph_parse_one(chan, remote);
+ if (ret < 0) {
+ fwnode_handle_put(remote);
+ goto cleanup;
+ }
+
+ fwnode_handle_put(remote);
+ }
+
+ return 0;
+
+cleanup:
+ dev_err(vi->dev, "failed parsing the graph: %d\n", ret);
+ v4l2_async_nf_cleanup(&chan->notifier);
+ of_node_put(node);
+ return ret;
+}
+
+static int tegra_vi_graph_init(struct tegra_vi *vi)
+{
+ struct tegra_vi_channel *chan;
+ struct fwnode_handle *fwnode = dev_fwnode(vi->dev);
+ int ret;
+
+ /*
+ * Walk the links to parse the full graph. Each channel will have
+ * one endpoint of the composite node. Start by parsing the
+ * composite node and parse the remote entities in turn.
+ * Each channel will register a v4l2 async notifier to make the graph
+ * independent between the channels so we can skip the current channel
+ * in case of something wrong during graph parsing and continue with
+ * the next channels.
+ */
+ list_for_each_entry(chan, &vi->vi_chans, list) {
+ struct fwnode_handle *ep, *remote;
+
+ ep = fwnode_graph_get_endpoint_by_id(fwnode,
+ chan->portnos[0], 0, 0);
+ if (!ep)
+ continue;
+
+ remote = fwnode_graph_get_remote_port_parent(ep);
+ fwnode_handle_put(ep);
+
+ ret = tegra_vi_graph_parse_one(chan, remote);
+ fwnode_handle_put(remote);
+ if (ret < 0 || list_empty(&chan->notifier.waiting_list))
+ continue;
+
+ chan->notifier.ops = &tegra_vi_async_ops;
+ ret = v4l2_async_nf_register(&chan->notifier);
+ if (ret < 0) {
+ dev_err(vi->dev,
+ "failed to register channel %d notifier: %d\n",
+ chan->portnos[0], ret);
+ v4l2_async_nf_cleanup(&chan->notifier);
+ }
+ }
+
+ return 0;
+}
+
+static void tegra_vi_graph_cleanup(struct tegra_vi *vi)
+{
+ struct tegra_vi_channel *chan;
+
+ list_for_each_entry(chan, &vi->vi_chans, list) {
+ vb2_video_unregister_device(&chan->video);
+ v4l2_async_nf_unregister(&chan->notifier);
+ v4l2_async_nf_cleanup(&chan->notifier);
+ }
+}
+
+static int tegra_vi_init(struct host1x_client *client)
+{
+ struct tegra_video_device *vid = dev_get_drvdata(client->host);
+ struct tegra_vi *vi = host1x_client_to_vi(client);
+ struct tegra_vi_channel *chan, *tmp;
+ int ret;
+
+ vid->media_dev.hw_revision = vi->soc->hw_revision;
+ snprintf(vid->media_dev.bus_info, sizeof(vid->media_dev.bus_info),
+ "platform:%s", dev_name(vi->dev));
+
+ INIT_LIST_HEAD(&vi->vi_chans);
+
+ if (IS_ENABLED(CONFIG_VIDEO_TEGRA_TPG))
+ ret = tegra_vi_tpg_channels_alloc(vi);
+ else
+ ret = tegra_vi_channels_alloc(vi);
+ if (ret < 0)
+ goto free_chans;
+
+ ret = tegra_vi_channels_init(vi);
+ if (ret < 0)
+ goto free_chans;
+
+ vid->vi = vi;
+
+ if (!IS_ENABLED(CONFIG_VIDEO_TEGRA_TPG)) {
+ ret = tegra_vi_graph_init(vi);
+ if (ret < 0)
+ goto cleanup_chans;
+ }
+
+ return 0;
+
+cleanup_chans:
+ list_for_each_entry(chan, &vi->vi_chans, list)
+ tegra_channel_cleanup(chan);
+free_chans:
+ list_for_each_entry_safe(chan, tmp, &vi->vi_chans, list) {
+ list_del(&chan->list);
+ kfree(chan);
+ }
+
+ return ret;
+}
+
+static int tegra_vi_exit(struct host1x_client *client)
+{
+ struct tegra_vi *vi = host1x_client_to_vi(client);
+
+ /*
+ * Do not cleanup the channels here as application might still be
+ * holding video device nodes. Channels cleanup will happen during
+ * v4l2_device release callback which gets called after all video
+ * device nodes are released.
+ */
+
+ if (!IS_ENABLED(CONFIG_VIDEO_TEGRA_TPG))
+ tegra_vi_graph_cleanup(vi);
+
+ return 0;
+}
+
+static const struct host1x_client_ops vi_client_ops = {
+ .init = tegra_vi_init,
+ .exit = tegra_vi_exit,
+};
+
+static int tegra_vi_probe(struct platform_device *pdev)
+{
+ struct tegra_vi *vi;
+ int ret;
+
+ vi = devm_kzalloc(&pdev->dev, sizeof(*vi), GFP_KERNEL);
+ if (!vi)
+ return -ENOMEM;
+
+ vi->iomem = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(vi->iomem))
+ return PTR_ERR(vi->iomem);
+
+ vi->soc = of_device_get_match_data(&pdev->dev);
+
+ vi->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(vi->clk)) {
+ ret = PTR_ERR(vi->clk);
+ dev_err(&pdev->dev, "failed to get vi clock: %d\n", ret);
+ return ret;
+ }
+
+ vi->vdd = devm_regulator_get(&pdev->dev, "avdd-dsi-csi");
+ if (IS_ERR(vi->vdd)) {
+ ret = PTR_ERR(vi->vdd);
+ dev_err(&pdev->dev, "failed to get VDD supply: %d\n", ret);
+ return ret;
+ }
+
+ if (!pdev->dev.pm_domain) {
+ ret = -ENOENT;
+ dev_warn(&pdev->dev, "PM domain is not attached: %d\n", ret);
+ return ret;
+ }
+
+ ret = devm_of_platform_populate(&pdev->dev);
+ if (ret < 0) {
+ dev_err(&pdev->dev,
+ "failed to populate vi child device: %d\n", ret);
+ return ret;
+ }
+
+ vi->dev = &pdev->dev;
+ vi->ops = vi->soc->ops;
+ platform_set_drvdata(pdev, vi);
+ pm_runtime_enable(&pdev->dev);
+
+ /* initialize host1x interface */
+ INIT_LIST_HEAD(&vi->client.list);
+ vi->client.ops = &vi_client_ops;
+ vi->client.dev = &pdev->dev;
+
+ if (vi->ops->vi_enable)
+ vi->ops->vi_enable(vi, true);
+
+ ret = host1x_client_register(&vi->client);
+ if (ret < 0) {
+ dev_err(&pdev->dev,
+ "failed to register host1x client: %d\n", ret);
+ goto rpm_disable;
+ }
+
+ return 0;
+
+rpm_disable:
+ if (vi->ops->vi_enable)
+ vi->ops->vi_enable(vi, false);
+ pm_runtime_disable(&pdev->dev);
+ return ret;
+}
+
+static void tegra_vi_remove(struct platform_device *pdev)
+{
+ struct tegra_vi *vi = platform_get_drvdata(pdev);
+
+ host1x_client_unregister(&vi->client);
+
+ if (vi->ops->vi_enable)
+ vi->ops->vi_enable(vi, false);
+ pm_runtime_disable(&pdev->dev);
+}
+
+static const struct of_device_id tegra_vi_of_id_table[] = {
+#if defined(CONFIG_ARCH_TEGRA_2x_SOC)
+ { .compatible = "nvidia,tegra20-vi", .data = &tegra20_vi_soc },
+#endif
+#if defined(CONFIG_ARCH_TEGRA_210_SOC)
+ { .compatible = "nvidia,tegra210-vi", .data = &tegra210_vi_soc },
+#endif
+ { }
+};
+MODULE_DEVICE_TABLE(of, tegra_vi_of_id_table);
+
+static const struct dev_pm_ops tegra_vi_pm_ops = {
+ SET_RUNTIME_PM_OPS(vi_runtime_suspend, vi_runtime_resume, NULL)
+};
+
+struct platform_driver tegra_vi_driver = {
+ .driver = {
+ .name = "tegra-vi",
+ .of_match_table = tegra_vi_of_id_table,
+ .pm = &tegra_vi_pm_ops,
+ },
+ .probe = tegra_vi_probe,
+ .remove = tegra_vi_remove,
+};
diff --git a/drivers/staging/media/tegra-video/vi.h b/drivers/staging/media/tegra-video/vi.h
new file mode 100644
index 000000000000..1e6a5caa7082
--- /dev/null
+++ b/drivers/staging/media/tegra-video/vi.h
@@ -0,0 +1,314 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2020 NVIDIA CORPORATION. All rights reserved.
+ */
+
+#ifndef __TEGRA_VI_H__
+#define __TEGRA_VI_H__
+
+#include <linux/host1x.h>
+#include <linux/list.h>
+
+#include <linux/mutex.h>
+#include <linux/spinlock.h>
+#include <linux/wait.h>
+
+#include <media/media-entity.h>
+#include <media/v4l2-async.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-dev.h>
+#include <media/v4l2-subdev.h>
+#include <media/videobuf2-v4l2.h>
+
+#include "csi.h"
+
+#define V4L2_CID_TEGRA_SYNCPT_TIMEOUT_RETRY (V4L2_CTRL_CLASS_CAMERA | 0x1001)
+
+#define TEGRA_DEF_WIDTH 1920
+#define TEGRA_DEF_HEIGHT 1080
+#define TEGRA_IMAGE_FORMAT_DEF 32
+
+#define MAX_FORMAT_NUM 64
+
+enum tegra_vi_pg_mode {
+ TEGRA_VI_PG_DISABLED = 0,
+ TEGRA_VI_PG_DIRECT,
+ TEGRA_VI_PG_PATCH,
+};
+
+struct tegra_vi;
+struct tegra_vi_channel;
+
+/**
+ * struct tegra_vi_ops - Tegra VI operations
+ * @vi_enable: soc-specific operations needed to enable/disable the VI peripheral
+ * @channel_host1x_syncpt_init: initialize synchronization points
+ * @channel_host1x_syncpt_free: free all synchronization points
+ * @vi_fmt_align: modify `pix` to fit the hardware alignment
+ * requirements and fill image geometry
+ * @channel_queue_setup: additional operations at the end of vb2_ops::queue_setup
+ * @vi_start_streaming: starts media pipeline, subdevice streaming, sets up
+ * VI for capture and runs capture start and capture finish
+ * kthreads for capturing frames to buffer and returns them back.
+ * @vi_stop_streaming: stops media pipeline and subdevice streaming and returns
+ * back any queued buffers.
+ */
+struct tegra_vi_ops {
+ int (*vi_enable)(struct tegra_vi *vi, bool on);
+ int (*channel_host1x_syncpt_init)(struct tegra_vi_channel *chan);
+ void (*channel_host1x_syncpt_free)(struct tegra_vi_channel *chan);
+ void (*vi_fmt_align)(struct v4l2_pix_format *pix, unsigned int bpp);
+ void (*channel_queue_setup)(struct tegra_vi_channel *chan);
+ int (*vi_start_streaming)(struct vb2_queue *vq, u32 count);
+ void (*vi_stop_streaming)(struct vb2_queue *vq);
+};
+
+/**
+ * struct tegra_vi_soc - NVIDIA Tegra Video Input SoC structure
+ *
+ * @video_formats: supported video formats
+ * @nformats: total video formats
+ * @default_video_format: default video format (pointer to a @video_formats item)
+ * @ops: vi operations
+ * @hw_revision: VI hw_revision
+ * @vi_max_channels: supported max streaming channels
+ * @vi_max_clk_hz: VI clock max frequency
+ * @has_h_v_flip: the chip can do H and V flip, and the driver implements it
+ */
+struct tegra_vi_soc {
+ const struct tegra_video_format *video_formats;
+ const unsigned int nformats;
+ const struct tegra_video_format *default_video_format;
+ const struct tegra_vi_ops *ops;
+ u32 hw_revision;
+ unsigned int vi_max_channels;
+ unsigned int vi_max_clk_hz;
+ bool has_h_v_flip:1;
+};
+
+/**
+ * struct tegra_vi - NVIDIA Tegra Video Input device structure
+ *
+ * @dev: device struct
+ * @client: host1x_client struct
+ * @iomem: register base
+ * @clk: main clock for VI block
+ * @vdd: vdd regulator for VI hardware, normally it is avdd_dsi_csi
+ * @soc: pointer to SoC data structure
+ * @ops: vi operations
+ * @vi_chans: list head for VI channels
+ */
+struct tegra_vi {
+ struct device *dev;
+ struct host1x_client client;
+ void __iomem *iomem;
+ struct clk *clk;
+ struct regulator *vdd;
+ const struct tegra_vi_soc *soc;
+ const struct tegra_vi_ops *ops;
+ struct list_head vi_chans;
+};
+
+/**
+ * struct tegra_vi_channel - Tegra video channel
+ *
+ * @list: list head for this entry
+ * @video: V4L2 video device associated with the video channel
+ * @video_lock: protects the @format and @queue fields
+ * @pad: media pad for the video device entity
+ *
+ * @vi: Tegra video input device structure
+ * @frame_start_sp: host1x syncpoint pointer to synchronize programmed capture
+ * start condition with hardware frame start events through host1x
+ * syncpoint counters. (Tegra210)
+ * @mw_ack_sp: host1x syncpoint pointer to synchronize programmed memory write
+ * ack trigger condition with hardware memory write done at end of
+ * frame through host1x syncpoint counters (On Tegra20 used for the
+ * OUT_1 syncpt)
+ * @sp_incr_lock: protects cpu syncpoint increment.
+ * @next_out_sp_idx: next expected value for mw_ack_sp[0], i.e. OUT_1 (Tegra20)
+ *
+ * @kthread_start_capture: kthread to start capture of single frame when
+ * vb buffer is available. This thread programs VI CSI hardware
+ * for single frame capture and waits for frame start event from
+ * the hardware. On receiving frame start event, it wakes up
+ * kthread_finish_capture thread to wait for finishing frame data
+ * write to the memory. In case of missing frame start event, this
+ * thread returns buffer back to vb with VB2_BUF_STATE_ERROR.
+ * @start_wait: waitqueue for starting frame capture when buffer is available.
+ * @kthread_finish_capture: kthread to finish the buffer capture and return to.
+ * This thread is woken up by kthread_start_capture on receiving
+ * frame start event from the hardware and this thread waits for
+ * MW_ACK_DONE event which indicates completion of writing frame
+ * data to the memory. On receiving MW_ACK_DONE event, buffer is
+ * returned back to vb with VB2_BUF_STATE_DONE and in case of
+ * missing MW_ACK_DONE event, buffer is returned back to vb with
+ * VB2_BUF_STATE_ERROR.
+ * @done_wait: waitqueue for finishing capture data writes to memory.
+ *
+ * @format: active V4L2 pixel format
+ * @fmtinfo: format information corresponding to the active @format
+ * @queue: vb2 buffers queue
+ * @sequence: V4L2 buffers sequence number
+ *
+ * @addr_offset_u: U plane base address, relative to buffer base address (only for planar)
+ * @addr_offset_v: V plane base address, relative to buffer base address (only for planar)
+ * @start_offset: 1st Y byte to write, relative to buffer base address (for H/V flip)
+ * @start_offset_u: 1st U byte to write, relative to buffer base address (for H/V flip)
+ * @start_offset_v: 1st V byte to write, relative to buffer base address (for H/V flip)
+ *
+ * @capture: list of queued buffers for capture
+ * @start_lock: protects the capture queued list
+ * @done: list of capture done queued buffers
+ * @done_lock: protects the capture done queue list
+ *
+ * @portnos: VI channel port numbers
+ * @totalports: total number of ports used for this channel
+ * @numgangports: number of ports combined together as a gang for capture
+ * @of_node: device node of VI channel
+ *
+ * @ctrl_handler: V4L2 control handler of this video channel
+ * @syncpt_timeout_retry: syncpt timeout retry count for the capture
+ * @fmts_bitmap: a bitmap for supported formats matching v4l2 subdev formats
+ * @tpg_fmts_bitmap: a bitmap for supported TPG formats
+ * @pg_mode: test pattern generator mode (disabled/direct/patch)
+ * @notifier: V4L2 asynchronous subdevs notifier
+ *
+ * @hflip: Horizontal flip is enabled
+ * @vflip: Vertical flip is enabled
+ */
+struct tegra_vi_channel {
+ struct list_head list;
+ struct video_device video;
+ /* protects the @format and @queue fields */
+ struct mutex video_lock;
+ struct media_pad pad;
+
+ struct tegra_vi *vi;
+ struct host1x_syncpt *frame_start_sp[GANG_PORTS_MAX];
+ struct host1x_syncpt *mw_ack_sp[GANG_PORTS_MAX];
+ /* protects the cpu syncpoint increment */
+ spinlock_t sp_incr_lock[GANG_PORTS_MAX];
+ u32 next_out_sp_idx;
+
+ struct task_struct *kthread_start_capture;
+ wait_queue_head_t start_wait;
+ struct task_struct *kthread_finish_capture;
+ wait_queue_head_t done_wait;
+
+ struct v4l2_pix_format format;
+ const struct tegra_video_format *fmtinfo;
+ struct vb2_queue queue;
+ u32 sequence;
+
+ unsigned int addr_offset_u;
+ unsigned int addr_offset_v;
+ unsigned int start_offset;
+ unsigned int start_offset_u;
+ unsigned int start_offset_v;
+
+ struct list_head capture;
+ /* protects the capture queued list */
+ spinlock_t start_lock;
+ struct list_head done;
+ /* protects the capture done queue list */
+ spinlock_t done_lock;
+
+ unsigned char portnos[GANG_PORTS_MAX];
+ u8 totalports;
+ u8 numgangports;
+ struct device_node *of_node;
+
+ struct v4l2_ctrl_handler ctrl_handler;
+ unsigned int syncpt_timeout_retry;
+ DECLARE_BITMAP(fmts_bitmap, MAX_FORMAT_NUM);
+ DECLARE_BITMAP(tpg_fmts_bitmap, MAX_FORMAT_NUM);
+ enum tegra_vi_pg_mode pg_mode;
+
+ struct v4l2_async_notifier notifier;
+
+ bool hflip:1;
+ bool vflip:1;
+};
+
+/**
+ * struct tegra_channel_buffer - video channel buffer
+ *
+ * @buf: vb2 buffer base object
+ * @queue: buffer list entry in the channel queued buffers list
+ * @chan: channel that uses the buffer
+ * @addr: Tegra IOVA buffer address for VI output
+ * @mw_ack_sp_thresh: MW_ACK_DONE syncpoint threshold corresponding
+ * to the capture buffer.
+ */
+struct tegra_channel_buffer {
+ struct vb2_v4l2_buffer buf;
+ struct list_head queue;
+ struct tegra_vi_channel *chan;
+ dma_addr_t addr;
+ u32 mw_ack_sp_thresh[GANG_PORTS_MAX];
+};
+
+/*
+ * VI channel input data type enum.
+ * These data type enum value gets programmed into corresponding Tegra VI
+ * channel register bits.
+ */
+enum tegra_image_dt {
+ TEGRA_IMAGE_DT_YUV420_8 = 24,
+ TEGRA_IMAGE_DT_YUV420_10,
+
+ TEGRA_IMAGE_DT_YUV420CSPS_8 = 28,
+ TEGRA_IMAGE_DT_YUV420CSPS_10,
+ TEGRA_IMAGE_DT_YUV422_8,
+ TEGRA_IMAGE_DT_YUV422_10,
+ TEGRA_IMAGE_DT_RGB444,
+ TEGRA_IMAGE_DT_RGB555,
+ TEGRA_IMAGE_DT_RGB565,
+ TEGRA_IMAGE_DT_RGB666,
+ TEGRA_IMAGE_DT_RGB888,
+
+ TEGRA_IMAGE_DT_RAW6 = 40,
+ TEGRA_IMAGE_DT_RAW7,
+ TEGRA_IMAGE_DT_RAW8,
+ TEGRA_IMAGE_DT_RAW10,
+ TEGRA_IMAGE_DT_RAW12,
+ TEGRA_IMAGE_DT_RAW14,
+};
+
+/**
+ * struct tegra_video_format - Tegra video format description
+ *
+ * @img_dt: MIPI CSI-2 data type (for CSI-2 only)
+ * @bit_width: format width in bits per component (for CSI/Tegra210 only)
+ * @code: media bus format code
+ * @bpp: bytes per pixel (when stored in memory)
+ * @img_fmt: image format (for CSI/Tegra210 only)
+ * @fourcc: V4L2 pixel format FCC identifier
+ */
+struct tegra_video_format {
+ enum tegra_image_dt img_dt;
+ unsigned int bit_width;
+ unsigned int code;
+ unsigned int bpp;
+ u32 img_fmt;
+ u32 fourcc;
+};
+
+#if defined(CONFIG_ARCH_TEGRA_2x_SOC)
+extern const struct tegra_vi_soc tegra20_vi_soc;
+#endif
+#if defined(CONFIG_ARCH_TEGRA_210_SOC)
+extern const struct tegra_vi_soc tegra210_vi_soc;
+#endif
+
+struct v4l2_subdev *
+tegra_channel_get_remote_csi_subdev(struct tegra_vi_channel *chan);
+struct v4l2_subdev *
+tegra_channel_get_remote_source_subdev(struct tegra_vi_channel *chan);
+int tegra_channel_set_stream(struct tegra_vi_channel *chan, bool on);
+void tegra_channel_release_buffers(struct tegra_vi_channel *chan,
+ enum vb2_buffer_state state);
+void tegra_channels_cleanup(struct tegra_vi *vi);
+#endif
diff --git a/drivers/staging/media/tegra-video/video.c b/drivers/staging/media/tegra-video/video.c
new file mode 100644
index 000000000000..074ad0dc56ca
--- /dev/null
+++ b/drivers/staging/media/tegra-video/video.c
@@ -0,0 +1,181 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2020 NVIDIA CORPORATION. All rights reserved.
+ */
+
+#include <linux/host1x.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#include <media/v4l2-event.h>
+
+#include "video.h"
+
+static void tegra_v4l2_dev_release(struct v4l2_device *v4l2_dev)
+{
+ struct tegra_video_device *vid;
+
+ vid = container_of(v4l2_dev, struct tegra_video_device, v4l2_dev);
+
+ /* cleanup channels here as all video device nodes are released */
+ tegra_channels_cleanup(vid->vi);
+
+ v4l2_device_unregister(v4l2_dev);
+ media_device_unregister(&vid->media_dev);
+ media_device_cleanup(&vid->media_dev);
+ kfree(vid);
+}
+
+static void tegra_v4l2_dev_notify(struct v4l2_subdev *sd,
+ unsigned int notification, void *arg)
+{
+ struct tegra_vi_channel *chan;
+ const struct v4l2_event *ev = arg;
+
+ if (notification != V4L2_DEVICE_NOTIFY_EVENT)
+ return;
+
+ chan = v4l2_get_subdev_hostdata(sd);
+ v4l2_event_queue(&chan->video, arg);
+ if (ev->type == V4L2_EVENT_SOURCE_CHANGE && vb2_is_streaming(&chan->queue))
+ vb2_queue_error(&chan->queue);
+}
+
+static int host1x_video_probe(struct host1x_device *dev)
+{
+ struct tegra_video_device *vid;
+ int ret;
+
+ vid = kzalloc(sizeof(*vid), GFP_KERNEL);
+ if (!vid)
+ return -ENOMEM;
+
+ dev_set_drvdata(&dev->dev, vid);
+
+ vid->media_dev.dev = &dev->dev;
+ strscpy(vid->media_dev.model, "NVIDIA Tegra Video Input Device",
+ sizeof(vid->media_dev.model));
+
+ media_device_init(&vid->media_dev);
+ ret = media_device_register(&vid->media_dev);
+ if (ret < 0) {
+ dev_err(&dev->dev,
+ "failed to register media device: %d\n", ret);
+ goto cleanup;
+ }
+
+ vid->v4l2_dev.mdev = &vid->media_dev;
+ vid->v4l2_dev.release = tegra_v4l2_dev_release;
+ vid->v4l2_dev.notify = tegra_v4l2_dev_notify;
+ ret = v4l2_device_register(&dev->dev, &vid->v4l2_dev);
+ if (ret < 0) {
+ dev_err(&dev->dev,
+ "V4L2 device registration failed: %d\n", ret);
+ goto unregister_media;
+ }
+
+ ret = host1x_device_init(dev);
+ if (ret < 0)
+ goto unregister_v4l2;
+
+ if (IS_ENABLED(CONFIG_VIDEO_TEGRA_TPG)) {
+ /*
+ * Both vi and csi channels are available now.
+ * Register v4l2 nodes and create media links for TPG.
+ */
+ ret = tegra_v4l2_nodes_setup_tpg(vid);
+ if (ret < 0) {
+ dev_err(&dev->dev,
+ "failed to setup tpg graph: %d\n", ret);
+ goto device_exit;
+ }
+ }
+
+ return 0;
+
+device_exit:
+ host1x_device_exit(dev);
+ /* vi exit ops does not clean channels, so clean them here */
+ tegra_channels_cleanup(vid->vi);
+unregister_v4l2:
+ v4l2_device_unregister(&vid->v4l2_dev);
+unregister_media:
+ media_device_unregister(&vid->media_dev);
+cleanup:
+ media_device_cleanup(&vid->media_dev);
+ kfree(vid);
+ return ret;
+}
+
+static int host1x_video_remove(struct host1x_device *dev)
+{
+ struct tegra_video_device *vid = dev_get_drvdata(&dev->dev);
+
+ if (IS_ENABLED(CONFIG_VIDEO_TEGRA_TPG))
+ tegra_v4l2_nodes_cleanup_tpg(vid);
+
+ host1x_device_exit(dev);
+
+ /* This calls v4l2_dev release callback on last reference */
+ v4l2_device_put(&vid->v4l2_dev);
+
+ return 0;
+}
+
+static const struct of_device_id host1x_video_subdevs[] = {
+#if defined(CONFIG_ARCH_TEGRA_2x_SOC)
+ { .compatible = "nvidia,tegra20-vip", },
+ { .compatible = "nvidia,tegra20-vi", },
+#endif
+#if defined(CONFIG_ARCH_TEGRA_210_SOC)
+ { .compatible = "nvidia,tegra210-csi", },
+ { .compatible = "nvidia,tegra210-vi", },
+#endif
+ { }
+};
+
+static struct host1x_driver host1x_video_driver = {
+ .driver = {
+ .name = "tegra-video",
+ },
+ .probe = host1x_video_probe,
+ .remove = host1x_video_remove,
+ .subdevs = host1x_video_subdevs,
+};
+
+static struct platform_driver * const drivers[] = {
+ &tegra_csi_driver,
+ &tegra_vip_driver,
+ &tegra_vi_driver,
+};
+
+static int __init host1x_video_init(void)
+{
+ int err;
+
+ err = host1x_driver_register(&host1x_video_driver);
+ if (err < 0)
+ return err;
+
+ err = platform_register_drivers(drivers, ARRAY_SIZE(drivers));
+ if (err < 0)
+ goto unregister_host1x;
+
+ return 0;
+
+unregister_host1x:
+ host1x_driver_unregister(&host1x_video_driver);
+ return err;
+}
+module_init(host1x_video_init);
+
+static void __exit host1x_video_exit(void)
+{
+ platform_unregister_drivers(drivers, ARRAY_SIZE(drivers));
+ host1x_driver_unregister(&host1x_video_driver);
+}
+module_exit(host1x_video_exit);
+
+MODULE_AUTHOR("Sowjanya Komatineni <skomatineni@nvidia.com>");
+MODULE_DESCRIPTION("NVIDIA Tegra Host1x Video driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/media/tegra-video/video.h b/drivers/staging/media/tegra-video/video.h
new file mode 100644
index 000000000000..7275affa6558
--- /dev/null
+++ b/drivers/staging/media/tegra-video/video.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2020 NVIDIA CORPORATION. All rights reserved.
+ */
+
+#ifndef __TEGRA_VIDEO_H__
+#define __TEGRA_VIDEO_H__
+
+#include <linux/host1x.h>
+
+#include <media/media-device.h>
+#include <media/v4l2-device.h>
+
+#include "vi.h"
+
+struct tegra_video_device {
+ struct v4l2_device v4l2_dev;
+ struct media_device media_dev;
+ struct tegra_vi *vi;
+ struct tegra_csi *csi;
+};
+
+int tegra_v4l2_nodes_setup_tpg(struct tegra_video_device *vid);
+void tegra_v4l2_nodes_cleanup_tpg(struct tegra_video_device *vid);
+
+extern struct platform_driver tegra_vi_driver;
+extern struct platform_driver tegra_vip_driver;
+extern struct platform_driver tegra_csi_driver;
+#endif
diff --git a/drivers/staging/media/tegra-video/vip.c b/drivers/staging/media/tegra-video/vip.c
new file mode 100644
index 000000000000..5ec717f3afd5
--- /dev/null
+++ b/drivers/staging/media/tegra-video/vip.c
@@ -0,0 +1,285 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Parallel video capture module (VIP) for the Tegra VI.
+ *
+ * This file implements the VIP-specific infrastructure.
+ *
+ * Copyright (C) 2023 SKIDATA GmbH
+ * Author: Luca Ceresoli <luca.ceresoli@bootlin.com>
+ */
+
+#include <linux/device.h>
+#include <linux/host1x.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_graph.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+
+#include <media/v4l2-fwnode.h>
+
+#include "vip.h"
+#include "video.h"
+
+static inline struct tegra_vip *host1x_client_to_vip(struct host1x_client *client)
+{
+ return container_of(client, struct tegra_vip, client);
+}
+
+static inline struct tegra_vip_channel *subdev_to_vip_channel(struct v4l2_subdev *subdev)
+{
+ return container_of(subdev, struct tegra_vip_channel, subdev);
+}
+
+static inline struct tegra_vip *vip_channel_to_vip(struct tegra_vip_channel *chan)
+{
+ return container_of(chan, struct tegra_vip, chan);
+}
+
+/* Find the previous subdev in the pipeline (i.e. the one connected to our sink pad) */
+static struct v4l2_subdev *tegra_vip_channel_get_prev_subdev(struct tegra_vip_channel *chan)
+{
+ struct media_pad *remote_pad;
+
+ remote_pad = media_pad_remote_pad_first(&chan->pads[TEGRA_VIP_PAD_SINK]);
+ if (!remote_pad)
+ return NULL;
+
+ return media_entity_to_v4l2_subdev(remote_pad->entity);
+}
+
+static int tegra_vip_enable_stream(struct v4l2_subdev *subdev)
+{
+ struct tegra_vip_channel *vip_chan = subdev_to_vip_channel(subdev);
+ struct tegra_vip *vip = vip_channel_to_vip(vip_chan);
+ struct v4l2_subdev *prev_subdev = tegra_vip_channel_get_prev_subdev(vip_chan);
+ int err;
+
+ err = pm_runtime_resume_and_get(vip->dev);
+ if (err)
+ return dev_err_probe(vip->dev, err, "failed to get runtime PM\n");
+
+ err = vip->soc->ops->vip_start_streaming(vip_chan);
+ if (err < 0)
+ goto err_start_streaming;
+
+ err = v4l2_subdev_call(prev_subdev, video, s_stream, true);
+ if (err < 0 && err != -ENOIOCTLCMD)
+ goto err_prev_subdev_start_stream;
+
+ return 0;
+
+err_prev_subdev_start_stream:
+err_start_streaming:
+ pm_runtime_put(vip->dev);
+ return err;
+}
+
+static int tegra_vip_disable_stream(struct v4l2_subdev *subdev)
+{
+ struct tegra_vip_channel *vip_chan = subdev_to_vip_channel(subdev);
+ struct tegra_vip *vip = vip_channel_to_vip(vip_chan);
+ struct v4l2_subdev *prev_subdev = tegra_vip_channel_get_prev_subdev(vip_chan);
+
+ v4l2_subdev_call(prev_subdev, video, s_stream, false);
+
+ pm_runtime_put(vip->dev);
+
+ return 0;
+}
+
+static int tegra_vip_s_stream(struct v4l2_subdev *subdev, int enable)
+{
+ int err;
+
+ if (enable)
+ err = tegra_vip_enable_stream(subdev);
+ else
+ err = tegra_vip_disable_stream(subdev);
+
+ return err;
+}
+
+static const struct v4l2_subdev_video_ops tegra_vip_video_ops = {
+ .s_stream = tegra_vip_s_stream,
+};
+
+static const struct v4l2_subdev_ops tegra_vip_ops = {
+ .video = &tegra_vip_video_ops,
+};
+
+static int tegra_vip_channel_of_parse(struct tegra_vip *vip)
+{
+ struct device *dev = vip->dev;
+ struct device_node *np = dev->of_node;
+ struct v4l2_fwnode_endpoint v4l2_ep = {
+ .bus_type = V4L2_MBUS_PARALLEL
+ };
+ struct fwnode_handle *fwh;
+ struct device_node *ep;
+ unsigned int num_pads;
+ int err;
+
+ dev_dbg(dev, "Parsing %pOF", np);
+
+ ep = of_graph_get_endpoint_by_regs(np, 0, 0);
+ if (!ep) {
+ err = -EINVAL;
+ dev_err_probe(dev, err, "%pOF: error getting endpoint node\n", np);
+ goto err_node_put;
+ }
+
+ fwh = of_fwnode_handle(ep);
+ err = v4l2_fwnode_endpoint_parse(fwh, &v4l2_ep);
+ of_node_put(ep);
+ if (err) {
+ dev_err_probe(dev, err, "%pOF: failed to parse v4l2 endpoint\n", np);
+ goto err_node_put;
+ }
+
+ num_pads = of_graph_get_endpoint_count(np);
+ if (num_pads != TEGRA_VIP_PADS_NUM) {
+ err = -EINVAL;
+ dev_err_probe(dev, err, "%pOF: need 2 pads, got %d\n", np, num_pads);
+ goto err_node_put;
+ }
+
+ vip->chan.of_node = of_node_get(np);
+ vip->chan.pads[TEGRA_VIP_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
+ vip->chan.pads[TEGRA_VIP_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE;
+
+ return 0;
+
+err_node_put:
+ of_node_put(np);
+ return err;
+}
+
+static int tegra_vip_channel_init(struct tegra_vip *vip)
+{
+ struct v4l2_subdev *subdev;
+ int err;
+
+ subdev = &vip->chan.subdev;
+ v4l2_subdev_init(subdev, &tegra_vip_ops);
+ subdev->dev = vip->dev;
+ snprintf(subdev->name, sizeof(subdev->name), "%s",
+ kbasename(vip->chan.of_node->full_name));
+
+ v4l2_set_subdevdata(subdev, &vip->chan);
+ subdev->fwnode = of_fwnode_handle(vip->chan.of_node);
+ subdev->entity.function = MEDIA_ENT_F_VID_IF_BRIDGE;
+
+ err = media_entity_pads_init(&subdev->entity, TEGRA_VIP_PADS_NUM, vip->chan.pads);
+ if (err)
+ return dev_err_probe(vip->dev, err, "failed to initialize media entity\n");
+
+ err = v4l2_async_register_subdev(subdev);
+ if (err) {
+ dev_err_probe(vip->dev, err, "failed to register subdev\n");
+ goto err_register_subdev;
+ }
+
+ return 0;
+
+err_register_subdev:
+ media_entity_cleanup(&subdev->entity);
+ return err;
+}
+
+static int tegra_vip_init(struct host1x_client *client)
+{
+ struct tegra_vip *vip = host1x_client_to_vip(client);
+ int err;
+
+ err = tegra_vip_channel_of_parse(vip);
+ if (err)
+ return err;
+
+ err = tegra_vip_channel_init(vip);
+ if (err)
+ goto err_init;
+
+ return 0;
+
+err_init:
+ of_node_put(vip->chan.of_node);
+ return err;
+}
+
+static int tegra_vip_exit(struct host1x_client *client)
+{
+ struct tegra_vip *vip = host1x_client_to_vip(client);
+ struct v4l2_subdev *subdev = &vip->chan.subdev;
+
+ v4l2_async_unregister_subdev(subdev);
+ media_entity_cleanup(&subdev->entity);
+ of_node_put(vip->chan.of_node);
+
+ return 0;
+}
+
+static const struct host1x_client_ops vip_client_ops = {
+ .init = tegra_vip_init,
+ .exit = tegra_vip_exit,
+};
+
+static int tegra_vip_probe(struct platform_device *pdev)
+{
+ struct tegra_vip *vip;
+ int err;
+
+ dev_dbg(&pdev->dev, "Probing VIP \"%s\" from %pOF\n", pdev->name, pdev->dev.of_node);
+
+ vip = devm_kzalloc(&pdev->dev, sizeof(*vip), GFP_KERNEL);
+ if (!vip)
+ return -ENOMEM;
+
+ vip->soc = of_device_get_match_data(&pdev->dev);
+
+ vip->dev = &pdev->dev;
+ platform_set_drvdata(pdev, vip);
+
+ /* initialize host1x interface */
+ INIT_LIST_HEAD(&vip->client.list);
+ vip->client.ops = &vip_client_ops;
+ vip->client.dev = &pdev->dev;
+
+ err = host1x_client_register(&vip->client);
+ if (err)
+ return dev_err_probe(&pdev->dev, err, "failed to register host1x client\n");
+
+ pm_runtime_enable(&pdev->dev);
+
+ return 0;
+}
+
+static void tegra_vip_remove(struct platform_device *pdev)
+{
+ struct tegra_vip *vip = platform_get_drvdata(pdev);
+
+ host1x_client_unregister(&vip->client);
+
+ pm_runtime_disable(&pdev->dev);
+}
+
+#if defined(CONFIG_ARCH_TEGRA_2x_SOC)
+extern const struct tegra_vip_soc tegra20_vip_soc;
+#endif
+
+static const struct of_device_id tegra_vip_of_id_table[] = {
+#if defined(CONFIG_ARCH_TEGRA_2x_SOC)
+ { .compatible = "nvidia,tegra20-vip", .data = &tegra20_vip_soc },
+#endif
+ { }
+};
+MODULE_DEVICE_TABLE(of, tegra_vip_of_id_table);
+
+struct platform_driver tegra_vip_driver = {
+ .driver = {
+ .name = "tegra-vip",
+ .of_match_table = tegra_vip_of_id_table,
+ },
+ .probe = tegra_vip_probe,
+ .remove = tegra_vip_remove,
+};
diff --git a/drivers/staging/media/tegra-video/vip.h b/drivers/staging/media/tegra-video/vip.h
new file mode 100644
index 000000000000..32ceaaccbba2
--- /dev/null
+++ b/drivers/staging/media/tegra-video/vip.h
@@ -0,0 +1,68 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2023 SKIDATA GmbH
+ * Author: Luca Ceresoli <luca.ceresoli@bootlin.com>
+ */
+
+#ifndef __TEGRA_VIP_H__
+#define __TEGRA_VIP_H__
+
+#include <media/media-entity.h>
+#include <media/v4l2-async.h>
+#include <media/v4l2-subdev.h>
+
+enum {
+ TEGRA_VIP_PAD_SINK,
+ TEGRA_VIP_PAD_SOURCE,
+ TEGRA_VIP_PADS_NUM,
+};
+
+struct tegra_vip;
+
+/**
+ * struct tegra_vip_channel - Tegra VIP (parallel video capture) channel
+ *
+ * @subdev: V4L2 subdevice associated with this channel
+ * @pads: media pads for the subdevice entity
+ * @of_node: vip device tree node
+ */
+struct tegra_vip_channel {
+ struct v4l2_subdev subdev;
+ struct media_pad pads[TEGRA_VIP_PADS_NUM];
+ struct device_node *of_node;
+};
+
+/**
+ * struct tegra_vip_ops - Tegra VIP operations
+ *
+ * @vip_start_streaming: programs vip hardware to enable streaming.
+ */
+struct tegra_vip_ops {
+ int (*vip_start_streaming)(struct tegra_vip_channel *vip_chan);
+};
+
+/**
+ * struct tegra_vip_soc - NVIDIA Tegra VIP SoC structure
+ *
+ * @ops: vip hardware operations
+ */
+struct tegra_vip_soc {
+ const struct tegra_vip_ops *ops;
+};
+
+/**
+ * struct tegra_vip - NVIDIA Tegra VIP device structure
+ *
+ * @dev: device struct
+ * @client: host1x_client struct
+ * @soc: pointer to SoC data structure
+ * @chan: the VIP channel
+ */
+struct tegra_vip {
+ struct device *dev;
+ struct host1x_client client;
+ const struct tegra_vip_soc *soc;
+ struct tegra_vip_channel chan;
+};
+
+#endif
diff --git a/drivers/staging/most/Documentation/ABI/sysfs-class-most.txt b/drivers/staging/most/Documentation/ABI/sysfs-class-most.txt
index 380c137089d0..48aa45acc953 100644
--- a/drivers/staging/most/Documentation/ABI/sysfs-class-most.txt
+++ b/drivers/staging/most/Documentation/ABI/sysfs-class-most.txt
@@ -47,10 +47,144 @@ Date: June 2015
KernelVersion: 4.3
Contact: Christian Gromm <christian.gromm@microchip.com>
Description:
- Indicates the type of peripherial interface the current device
+ Indicates the type of peripheral interface the current device
uses.
Users:
+What: /sys/class/most/mostcore/devices/<mdev>/dci
+Date: June 2016
+KernelVersion: 4.9
+Contact: Christian Gromm <christian.gromm@microchip.com>
+Description:
+ If the network interface controller is attached via USB, a dci
+ directory is created that allows applications to use the
+ controller's direct communication interface (DCI) to exchange
+ information.
+Users:
+
+What: /sys/class/most/mostcore/devices/<mdev>/dci/arb_address
+Date: June 2016
+KernelVersion: 4.9
+Contact: Christian Gromm <christian.gromm@microchip.com>
+Description:
+ This is used to set an arbitrary DCI register address an
+ application wants to read from or write to.
+Users:
+
+What: /sys/class/most/mostcore/devices/<mdev>/dci/arb_value
+Date: June 2016
+KernelVersion: 4.9
+Contact: Christian Gromm <christian.gromm@microchip.com>
+Description:
+ This is used to read from or write to the arbitrary DCI register
+ whose address is stored in arb_address.
+Users:
+
+What: /sys/class/most/mostcore/devices/<mdev>/dci/mep_eui48_hi
+Date: June 2016
+KernelVersion: 4.9
+Contact: Christian Gromm <christian.gromm@microchip.com>
+Description:
+ This is used to check and configure the MAC address.
+Users:
+
+What: /sys/class/most/mostcore/devices/<mdev>/dci/mep_eui48_lo
+Date: June 2016
+KernelVersion: 4.9
+Contact: Christian Gromm <christian.gromm@microchip.com>
+Description:
+ This is used to check and configure the MAC address.
+Users:
+
+What: /sys/class/most/mostcore/devices/<mdev>/dci/mep_eui48_mi
+Date: June 2016
+KernelVersion: 4.9
+Contact: Christian Gromm <christian.gromm@microchip.com>
+Description:
+ This is used to check and configure the MAC address.
+Users:
+
+What: /sys/class/most/mostcore/devices/<mdev>/dci/mep_filter
+Date: June 2016
+KernelVersion: 4.9
+Contact: Christian Gromm <christian.gromm@microchip.com>
+Description:
+ This is used to check and configure the MEP filter address.
+Users:
+
+What: /sys/class/most/mostcore/devices/<mdev>/dci/mep_hash0
+Date: June 2016
+KernelVersion: 4.9
+Contact: Christian Gromm <christian.gromm@microchip.com>
+Description:
+ This is used to check and configure the MEP hash table.
+Users:
+
+What: /sys/class/most/mostcore/devices/<mdev>/dci/mep_hash1
+Date: June 2016
+KernelVersion: 4.9
+Contact: Christian Gromm <christian.gromm@microchip.com>
+Description:
+ This is used to check and configure the MEP hash table.
+Users:
+
+What: /sys/class/most/mostcore/devices/<mdev>/dci/mep_hash2
+Date: June 2016
+KernelVersion: 4.9
+Contact: Christian Gromm <christian.gromm@microchip.com>
+Description:
+ This is used to check and configure the MEP hash table.
+Users:
+
+What: /sys/class/most/mostcore/devices/<mdev>/dci/mep_hash3
+Date: June 2016
+KernelVersion: 4.9
+Contact: Christian Gromm <christian.gromm@microchip.com>
+Description:
+ This is used to check and configure the MEP hash table.
+Users:
+
+What: /sys/class/most/mostcore/devices/<mdev>/dci/ni_state
+Date: June 2016
+KernelVersion: 4.9
+Contact: Christian Gromm <christian.gromm@microchip.com>
+Description:
+ Indicates the current network interface state.
+Users:
+
+What: /sys/class/most/mostcore/devices/<mdev>/dci/node_address
+Date: June 2016
+KernelVersion: 4.9
+Contact: Christian Gromm <christian.gromm@microchip.com>
+Description:
+ Indicates the current node address.
+Users:
+
+What: /sys/class/most/mostcore/devices/<mdev>/dci/node_position
+Date: June 2016
+KernelVersion: 4.9
+Contact: Christian Gromm <christian.gromm@microchip.com>
+Description:
+ Indicates the current node position.
+Users:
+
+What: /sys/class/most/mostcore/devices/<mdev>/dci/packet_bandwidth
+Date: June 2016
+KernelVersion: 4.9
+Contact: Christian Gromm <christian.gromm@microchip.com>
+Description:
+ Indicates the configured packet bandwidth.
+Users:
+
+What: /sys/class/most/mostcore/devices/<mdev>/dci/sync_ep
+Date: June 2016
+KernelVersion: 4.9
+Contact: Christian Gromm <christian.gromm@microchip.com>
+Description:
+ Triggers the controller's synchronization process for a certain
+ endpoint.
+Users:
+
What: /sys/class/most/mostcore/devices/<mdev>/<channel>/
Date: June 2015
KernelVersion: 4.3
diff --git a/drivers/staging/most/Documentation/driver_usage.txt b/drivers/staging/most/Documentation/driver_usage.txt
index a4dc0c348fbc..2fa8dea1da4d 100644
--- a/drivers/staging/most/Documentation/driver_usage.txt
+++ b/drivers/staging/most/Documentation/driver_usage.txt
@@ -23,20 +23,29 @@ audio/video streaming. Therefore, the driver perfectly fits to the mission
of Automotive Grade Linux to create open source software solutions for
automotive applications.
-The driver consists basically of three layers. The hardware layer, the
-core layer and the application layer. The core layer consists of the core
-module only. This module handles the communication flow through all three
-layers, the configuration of the driver, the configuration interface
-representation in sysfs, and the buffer management.
-For each of the other two layers a selection of modules is provided. These
-modules can arbitrarily be combined to meet the needs of the desired
-system architecture. A module of the hardware layer is referred to as an
-HDM (hardware dependent module). Each module of this layer handles exactly
-one of the peripheral interfaces of a network interface controller (e.g.
-USB, MediaLB, I2C). A module of the application layer is referred to as an
-AIM (application interfacing module). The modules of this layer give access
-to MOST via one the following ways: character devices, ALSA, Networking or
-V4L2.
+The MOST driver uses module stacking to divide the associated modules into
+three layers. From bottom up these layers are: the adapter layer, the core
+layer and the application layer. The core layer implements the MOST
+subsystem and consists basically of the module core.c and its API. It
+registers the MOST bus with the kernel's device model, handles the data
+routing through all three layers, the configuration of the driver, the
+representation of the configuration interface in sysfs and the buffer
+management.
+
+For each of the other two layers a set of modules is provided. Those can be
+arbitrarily combined with the core to meet the connectivity of the desired
+system architecture.
+
+A module of the adapter layer is basically a device driver for a different
+subsystem. It is registered with the core to connect the MOST subsystem to
+the attached network interface controller hardware. Hence, a given module
+of this layer is designed to handle exactly one of the peripheral
+interfaces (e.g. USB, MediaLB, I2C) the hardware provides.
+
+A module of the application layer is referred to as a core component,
+which kind of extends the core by providing connectivity to the user space.
+Applications, then, can access a MOST network via character devices, an
+ALSA soundcard, a Network adapter or a V4L2 capture device.
To physically access MOST, an Intelligent Network Interface Controller
(INIC) is needed. For more information on available controllers visit:
@@ -44,15 +53,14 @@ www.microchip.com
- Section 1.1 Hardware Layer
+ Section 1.1 Adapter Layer
-The hardware layer contains so called hardware dependent modules (HDM). For each
-peripheral interface the hardware supports the driver has a suitable module
-that handles the interface.
-
-The HDMs encapsulate the peripheral interface specific knowledge of the driver
-and provides an easy way of extending the number of supported interfaces.
-Currently the following HDMs are available:
+The adapter layer contains a pool of device drivers. For each peripheral
+interface the hardware supports there is one suitable module that handles
+the interface. Adapter drivers encapsulate the peripheral interface
+specific knowledge of the MOST driver stack and provide an easy way of
+extending the number of supported interfaces. Currently the following
+interfaces are available:
1) MediaLB (DIM2)
Host wants to communicate with hardware via MediaLB.
@@ -63,26 +71,34 @@ Currently the following HDMs are available:
3) USB
Host wants to communicate with the hardware via USB.
+Once an adapter driver recognizes a MOST device being attached, it
+registers it with the core, which, in turn, assigns the necessary members
+of the embedded struct device (e.g. the bus this device belongs to and
+attribute groups) and registers it with the kernel's device model.
- Section 1.2 Core Layer
-
-The core layer contains the mostcore module only, which processes the driver
-configuration via sysfs, buffer management and data forwarding.
+ Section 1.2 Core Layer
+This layer implements the MOST subsystem. It contains the core module and
+the header file most.h that exposes the API of the core. When inserted in
+the kernel, it registers the MOST bus_type with the kernel's device model
+and registers itself as a device driver for this bus. Besides these meta
+tasks the core populates the configuration directory for a registered MOST
+device (represented by struct most_interface) in sysfs and processes the
+configuration of the device's interface. The core layer also handles the
+buffer management and the data/message routing.
- Section 1.2 Application Layer
-The application layer contains so called application interfacing modules (AIM).
-Depending on how the driver should interface to the application, one or more
-suitable modules can be selected.
+ Section 1.3 Application Layer
-The AIMs encapsulate the application interface specific knowledge of the driver
-and provides access to user space or other kernel subsystems.
-Currently the following AIMs are available
+This layer contains a pool of device drivers that are components of the
+core designed to make up the userspace experience of the MOST driver stack.
+Depending on how an application is meant to interface the driver, one or
+more modules of this pool can be registered with the core. Currently the
+following components are available
1) Character Device
- Applications can access the driver by means of character devices.
+ Userspace can access the driver by means of character devices.
2) Networking
Standard networking applications (e.g. iperf) can by used to access
@@ -97,28 +113,103 @@ Currently the following AIMs are available
used to access the driver via the ALSA subsystem.
+ Section 2 Usage of the MOST Driver
+
+ Section 2.1 Configuration and Data Link
+
+The driver is to be configured via configfs. Each loaded component kernel
+object (see section 1.3) registers a subsystem with configfs, which is used to
+configure and establish communication pathways (links) to attached devices on
+the bus. To do so, the user has to descend into the component's configuration
+directory and create a new directory (child config itmes). The name of this
+directory will be used as a reference for the link and it will contain the
+following attributes:
+
+ - buffer_size
+ configure the buffer size for this channel
+ - subbuffer_size
+ configure the sub-buffer size for this channel (needed for
+ synchronous and isochrnous data)
+ - num_buffers
+ configure number of buffers used for this channel
+ - datatype
+ configure type of data that will travel over this channel
+ - direction
+ configure whether this link will be an input or output
+ - dbr_size
+ configure DBR data buffer size (this is used for MediaLB communication
+ only)
+ - packets_per_xact
+ configure the number of packets that will be collected from the
+ network before being transmitted via USB (this is used for USB
+ communication only)
+ - device
+ name of the device the link is to be attached to
+ - channel
+ name of the channel the link is to be attached to
+ - comp_params
+ pass parameters needed by some components
+ - create_link
+ write '1' to this attribute to trigger the creation of the link. In
+ case of speculative configuration, the creation is post-poned until
+ a physical device is being attached to the bus.
+ - destroy_link
+ write '1' to this attribute to destroy an already established link
+
+
+See ABI/sysfs-bus-most.txt and ABI/configfs-most.txt
+
+
+ Section 2.2 Configure a Sound Card
+
+Setting up synchronous channels to be mapped as an ALSA sound adapter is a two
+step process. Firstly, a directory (child config group) has to be created
+inside the most_sound's configuration directory. This adapter dir will
+represent the sound adapter. The name of the directory is for user reference
+only and has no further influence, as all sound adapters will be given a static
+name in ALSA. The sound adapter will have the following attribute:
+
+ - create_card
+ write '1' to this attribute to trigger the registration of the card
+ with the ALSA subsystem.
+ In case of speculative configuration, the creation is post-poned
+ until a physical device is being attached to the bus.
+
+Secondly, links will have to be created inside the adapter dir as described in
+section 2.1. These links will become the PCM devices of the sound card. The
+name of a PCM device will be inherited from the directory name. When all
+channels have been configured and created, the sound card itself can be created
+by writing '1' to the create_card attribute.
+
+The sound component needs an additional parameter to determine the audio
+resolution that is going to be used.
+The following audio formats are available:
- Section 2 Configuration
-
-See ABI/sysfs-class-most.txt
-
+ - "1x8" (Mono)
+ - "2x16" (16-bit stereo)
+ - "2x24" (24-bit stereo)
+ - "2x32" (32-bit stereo)
+ - "6x16" (16-bit surround 5.1)
+The resolution string has to be written to the link directory's comp_params
+attribute.
- Section 3 USB Padding
+ Section 2.3 USB Padding
-When transceiving synchronous or isochronous data, the number of packets per USB
-transaction and the sub-buffer size need to be configured. These values
-are needed for the driver to process buffer padding, as expected by hardware,
-which is for performance optimization purposes of the USB transmission.
+When transceiving synchronous or isochronous data, the number of packets
+per USB transaction and the sub-buffer size need to be configured. These
+values are needed for the driver to process buffer padding, as expected by
+hardware, which is for performance optimization purposes of the USB
+transmission.
When transmitting synchronous data the allocated channel width needs to be
-written to 'set_subbuffer_size'. Additionally, the number of MOST frames that
+written to 'subbuffer_size'. Additionally, the number of MOST frames that
should travel to the host within one USB transaction need to be written to
'packets_per_xact'.
-Internally the synchronous threshold is calculated as follows:
+The driver, then, calculates the synchronous threshold as follows:
- frame_size = set_subbuffer_size * packets_per_xact
+ frame_size = subbuffer_size * packets_per_xact
In case 'packets_per_xact' is set to 0xFF the maximum number of packets,
allocated within one MOST frame, is calculated that fit into _one_ 512 byte
@@ -126,55 +217,21 @@ USB full packet.
frame_size = floor(MTU_USB / bandwidth_sync) * bandwidth_sync
-This frame_size is the number of synchronous data within an USB transaction,
-which renders MTU_USB - frame_size bytes for padding.
+This frame_size is the number of synchronous data within an USB
+transaction, which renders MTU_USB - frame_size bytes for padding.
When transmitting isochronous AVP data the desired packet size needs to be
-written to 'set_subbuffer_size' and hardware will always expect two isochronous
+written to 'subbuffer_size' and hardware will always expect two isochronous
packets within one USB transaction. This renders
- MTU_USB - (2 * set_subbuffer_size)
+ MTU_USB - (2 * subbuffer_size)
bytes for padding.
-Note that at least 2 times set_subbuffer_size bytes for isochronous data or
-set_subbuffer_size times packts_per_xact bytes for synchronous data need to be
-put in the transmission buffer and passed to the driver.
-
-Since HDMs are allowed to change a chosen configuration to best fit its
-constraints, it is recommended to always double check the configuration and read
-back the previously written files.
-
-
-
- Section 4 Routing Channels
-
-To connect a channel that has been configured as outlined above to an AIM and
-make it accessible to user space applications, the attribute file 'add_link' is
-used. To actually bind a channel to the AIM a string needs to be written to the
-file that complies with the following syntax:
-
- "most_device:channel_name:link_name[.param]"
-
-The example above links the channel "channel_name" of the device "most_device"
-to the AIM. In case the AIM interfaces the VFS this would also create a device
-node "link_name" in the /dev directory. The parameter "param" is an AIM dependent
-string, which can be omitted in case the used AIM does not make any use of it.
-
-Cdev AIM example:
- $ echo "mdev0:ep_81:my_rx_channel" >add_link
- $ echo "mdev0:ep_81" >add_link
-
-
-Sound/ALSA AIM example:
-
-The sound/ALSA AIM needs an additional parameter to determine the audio resolution
-that is going to be used. The following strings can be used:
-
- - "1x8" (Mono)
- - "2x16" (16-bit stereo)
- - "2x24" (24-bit stereo)
- - "2x32" (32-bit stereo)
+Note that at least (2 * subbuffer_size) bytes for isochronous data or
+(subbuffer_size * packts_per_xact) bytes for synchronous data need to
+be put in the transmission buffer and passed to the driver.
- $ echo "mdev0:ep_81:audio_rx.2x16" >add_link
- $ echo "mdev0:ep_81" >add_link
+Since adapter drivers are allowed to change a chosen configuration to best
+fit its constraints, it is recommended to always double check the
+configuration and read back the previously written files.
diff --git a/drivers/staging/most/Kconfig b/drivers/staging/most/Kconfig
index 0b9b9b539f70..e89658df6f12 100644
--- a/drivers/staging/most/Kconfig
+++ b/drivers/staging/most/Kconfig
@@ -1,31 +1,27 @@
-menuconfig MOST
- tristate "MOST driver"
- depends on HAS_DMA
- select MOSTCORE
- default n
- ---help---
- This option allows you to enable support for MOST Network transceivers.
+# SPDX-License-Identifier: GPL-2.0
+menuconfig MOST_COMPONENTS
+ tristate "MOST support"
+ depends on HAS_DMA && CONFIGFS_FS && MOST
+ default n
+ help
+ Say Y here if you want to enable MOST support.
+ This driver needs at least one additional component to enable the
+ desired access from userspace (e.g. character devices) and one that
+ matches the network controller's hardware interface (e.g. USB).
- If in doubt, say N here.
+ To compile this driver as a module, choose M here: the
+ module will be called most_core.
+ If in doubt, say N here.
-if MOST
-source "drivers/staging/most/mostcore/Kconfig"
+if MOST_COMPONENTS
-source "drivers/staging/most/aim-cdev/Kconfig"
+source "drivers/staging/most/net/Kconfig"
-source "drivers/staging/most/aim-network/Kconfig"
+source "drivers/staging/most/video/Kconfig"
-source "drivers/staging/most/aim-sound/Kconfig"
-
-source "drivers/staging/most/aim-v4l2/Kconfig"
-
-source "drivers/staging/most/hdm-dim2/Kconfig"
-
-source "drivers/staging/most/hdm-i2c/Kconfig"
-
-source "drivers/staging/most/hdm-usb/Kconfig"
+source "drivers/staging/most/dim2/Kconfig"
endif
diff --git a/drivers/staging/most/Makefile b/drivers/staging/most/Makefile
index 9ee981c7786b..e45084df7803 100644
--- a/drivers/staging/most/Makefile
+++ b/drivers/staging/most/Makefile
@@ -1,8 +1,5 @@
-obj-$(CONFIG_MOSTCORE) += mostcore/
-obj-$(CONFIG_AIM_CDEV) += aim-cdev/
-obj-$(CONFIG_AIM_NETWORK) += aim-network/
-obj-$(CONFIG_AIM_SOUND) += aim-sound/
-obj-$(CONFIG_AIM_V4L2) += aim-v4l2/
-obj-$(CONFIG_HDM_DIM2) += hdm-dim2/
-obj-$(CONFIG_HDM_I2C) += hdm-i2c/
-obj-$(CONFIG_HDM_USB) += hdm-usb/
+# SPDX-License-Identifier: GPL-2.0
+
+obj-$(CONFIG_MOST_NET) += net/
+obj-$(CONFIG_MOST_VIDEO) += video/
+obj-$(CONFIG_MOST_DIM2) += dim2/
diff --git a/drivers/staging/most/TODO b/drivers/staging/most/TODO
index 4fa11a9d2cf7..a6448a05ed46 100644
--- a/drivers/staging/most/TODO
+++ b/drivers/staging/most/TODO
@@ -1,8 +1 @@
* Get through code review with Greg Kroah-Hartman
-
-Contact:
-To:
-Christian Gromm <christian.gromm@microchip.com>
-Cc:
-Michael Fabry <Michael.Fabry@microchip.com>
-Christian Gromm <chris@engineersdelight.de>
diff --git a/drivers/staging/most/aim-cdev/Kconfig b/drivers/staging/most/aim-cdev/Kconfig
deleted file mode 100644
index 3c59f1bac127..000000000000
--- a/drivers/staging/most/aim-cdev/Kconfig
+++ /dev/null
@@ -1,12 +0,0 @@
-#
-# MOST Cdev configuration
-#
-
-config AIM_CDEV
- tristate "Cdev AIM"
-
- ---help---
- Say Y here if you want to commumicate via character devices.
-
- To compile this driver as a module, choose M here: the
- module will be called aim_cdev. \ No newline at end of file
diff --git a/drivers/staging/most/aim-cdev/Makefile b/drivers/staging/most/aim-cdev/Makefile
deleted file mode 100644
index 0bcc6c637b75..000000000000
--- a/drivers/staging/most/aim-cdev/Makefile
+++ /dev/null
@@ -1,4 +0,0 @@
-obj-$(CONFIG_AIM_CDEV) += aim_cdev.o
-
-aim_cdev-objs := cdev.o
-ccflags-y += -Idrivers/staging/most/mostcore/ \ No newline at end of file
diff --git a/drivers/staging/most/aim-cdev/cdev.c b/drivers/staging/most/aim-cdev/cdev.c
deleted file mode 100644
index 930ada0922f3..000000000000
--- a/drivers/staging/most/aim-cdev/cdev.c
+++ /dev/null
@@ -1,554 +0,0 @@
-/*
- * cdev.c - Application interfacing module for character devices
- *
- * Copyright (C) 2013-2015 Microchip Technology Germany II GmbH & Co. KG
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * This file is licensed under GPLv2.
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-#include <linux/module.h>
-#include <linux/sched.h>
-#include <linux/fs.h>
-#include <linux/slab.h>
-#include <linux/device.h>
-#include <linux/cdev.h>
-#include <linux/poll.h>
-#include <linux/kfifo.h>
-#include <linux/uaccess.h>
-#include <linux/idr.h>
-#include "mostcore.h"
-
-static dev_t aim_devno;
-static struct class *aim_class;
-static struct ida minor_id;
-static unsigned int major;
-static struct most_aim cdev_aim;
-
-struct aim_channel {
- wait_queue_head_t wq;
- wait_queue_head_t poll_wq;
- struct cdev cdev;
- struct device *dev;
- struct mutex io_mutex;
- struct most_interface *iface;
- struct most_channel_config *cfg;
- unsigned int channel_id;
- dev_t devno;
- bool keep_mbo;
- unsigned int mbo_offs;
- struct mbo *stacked_mbo;
- DECLARE_KFIFO_PTR(fifo, typeof(struct mbo *));
- atomic_t access_ref;
- struct list_head list;
-};
-#define to_channel(d) container_of(d, struct aim_channel, cdev)
-static struct list_head channel_list;
-static spinlock_t ch_list_lock;
-
-static struct aim_channel *get_channel(struct most_interface *iface, int id)
-{
- struct aim_channel *channel, *tmp;
- unsigned long flags;
- int found_channel = 0;
-
- spin_lock_irqsave(&ch_list_lock, flags);
- list_for_each_entry_safe(channel, tmp, &channel_list, list) {
- if ((channel->iface == iface) && (channel->channel_id == id)) {
- found_channel = 1;
- break;
- }
- }
- spin_unlock_irqrestore(&ch_list_lock, flags);
- if (!found_channel)
- return NULL;
- return channel;
-}
-
-/**
- * aim_open - implements the syscall to open the device
- * @inode: inode pointer
- * @filp: file pointer
- *
- * This stores the channel pointer in the private data field of
- * the file structure and activates the channel within the core.
- */
-static int aim_open(struct inode *inode, struct file *filp)
-{
- struct aim_channel *channel;
- int ret;
-
- channel = to_channel(inode->i_cdev);
- filp->private_data = channel;
-
- if (((channel->cfg->direction == MOST_CH_RX) &&
- ((filp->f_flags & O_ACCMODE) != O_RDONLY))
- || ((channel->cfg->direction == MOST_CH_TX) &&
- ((filp->f_flags & O_ACCMODE) != O_WRONLY))) {
- pr_info("WARN: Access flags mismatch\n");
- return -EACCES;
- }
- if (!atomic_inc_and_test(&channel->access_ref)) {
- pr_info("WARN: Device is busy\n");
- atomic_dec(&channel->access_ref);
- return -EBUSY;
- }
-
- ret = most_start_channel(channel->iface, channel->channel_id, &cdev_aim);
- if (ret)
- atomic_dec(&channel->access_ref);
- return ret;
-}
-
-/**
- * aim_close - implements the syscall to close the device
- * @inode: inode pointer
- * @filp: file pointer
- *
- * This stops the channel within the core.
- */
-static int aim_close(struct inode *inode, struct file *filp)
-{
- int ret;
- struct mbo *mbo;
- struct aim_channel *channel = to_channel(inode->i_cdev);
-
- mutex_lock(&channel->io_mutex);
- if (!channel->dev) {
- mutex_unlock(&channel->io_mutex);
- atomic_dec(&channel->access_ref);
- device_destroy(aim_class, channel->devno);
- cdev_del(&channel->cdev);
- kfifo_free(&channel->fifo);
- list_del(&channel->list);
- ida_simple_remove(&minor_id, MINOR(channel->devno));
- wake_up_interruptible(&channel->wq);
- kfree(channel);
- return 0;
- }
- mutex_unlock(&channel->io_mutex);
-
- while (0 != kfifo_out((struct kfifo *)&channel->fifo, &mbo, 1))
- most_put_mbo(mbo);
- if (channel->keep_mbo)
- most_put_mbo(channel->stacked_mbo);
- ret = most_stop_channel(channel->iface, channel->channel_id, &cdev_aim);
- atomic_dec(&channel->access_ref);
- wake_up_interruptible(&channel->wq);
- return ret;
-}
-
-/**
- * aim_write - implements the syscall to write to the device
- * @filp: file pointer
- * @buf: pointer to user buffer
- * @count: number of bytes to write
- * @offset: offset from where to start writing
- */
-static ssize_t aim_write(struct file *filp, const char __user *buf,
- size_t count, loff_t *offset)
-{
- int ret, err;
- size_t actual_len = 0;
- size_t max_len = 0;
- ssize_t retval;
- struct mbo *mbo;
- struct aim_channel *channel = filp->private_data;
-
- mutex_lock(&channel->io_mutex);
- if (unlikely(!channel->dev)) {
- mutex_unlock(&channel->io_mutex);
- return -EPIPE;
- }
- mutex_unlock(&channel->io_mutex);
-
- mbo = most_get_mbo(channel->iface, channel->channel_id, &cdev_aim);
-
- if (!mbo) {
- if ((filp->f_flags & O_NONBLOCK))
- return -EAGAIN;
- if (wait_event_interruptible(
- channel->wq,
- (mbo = most_get_mbo(channel->iface,
- channel->channel_id,
- &cdev_aim)) ||
- (!channel->dev)))
- return -ERESTARTSYS;
- }
-
- mutex_lock(&channel->io_mutex);
- if (unlikely(!channel->dev)) {
- mutex_unlock(&channel->io_mutex);
- err = -EPIPE;
- goto error;
- }
- mutex_unlock(&channel->io_mutex);
-
- max_len = channel->cfg->buffer_size;
- actual_len = min(count, max_len);
- mbo->buffer_length = actual_len;
-
- retval = copy_from_user(mbo->virt_address, buf, mbo->buffer_length);
- if (retval) {
- err = -EIO;
- goto error;
- }
-
- ret = most_submit_mbo(mbo);
- if (ret) {
- pr_info("submitting MBO to core failed\n");
- err = ret;
- goto error;
- }
- return actual_len - retval;
-error:
- if (mbo)
- most_put_mbo(mbo);
- return err;
-}
-
-/**
- * aim_read - implements the syscall to read from the device
- * @filp: file pointer
- * @buf: pointer to user buffer
- * @count: number of bytes to read
- * @offset: offset from where to start reading
- */
-static ssize_t
-aim_read(struct file *filp, char __user *buf, size_t count, loff_t *offset)
-{
- ssize_t retval;
- size_t not_copied, proc_len;
- struct mbo *mbo;
- struct aim_channel *channel = filp->private_data;
-
- if (channel->keep_mbo) {
- mbo = channel->stacked_mbo;
- channel->keep_mbo = false;
- goto start_copy;
- }
- while ((0 == kfifo_out(&channel->fifo, &mbo, 1))
- && (channel->dev)) {
- if (filp->f_flags & O_NONBLOCK)
- return -EAGAIN;
- if (wait_event_interruptible(channel->wq,
- (!kfifo_is_empty(&channel->fifo) ||
- (!channel->dev))))
- return -ERESTARTSYS;
- }
-
-start_copy:
- /* make sure we don't submit to gone devices */
- mutex_lock(&channel->io_mutex);
- if (unlikely(!channel->dev)) {
- mutex_unlock(&channel->io_mutex);
- return -EIO;
- }
-
- if (count < mbo->processed_length)
- channel->keep_mbo = true;
-
- proc_len = min((int)count,
- (int)(mbo->processed_length - channel->mbo_offs));
-
- not_copied = copy_to_user(buf,
- mbo->virt_address + channel->mbo_offs,
- proc_len);
-
- retval = not_copied ? proc_len - not_copied : proc_len;
-
- if (channel->keep_mbo) {
- channel->mbo_offs = retval;
- channel->stacked_mbo = mbo;
- } else {
- most_put_mbo(mbo);
- channel->mbo_offs = 0;
- }
- mutex_unlock(&channel->io_mutex);
- return retval;
-}
-
-static inline bool __must_check IS_ERR_OR_FALSE(int x)
-{
- return x <= 0;
-}
-
-static unsigned int aim_poll(struct file *filp, poll_table *wait)
-{
- struct aim_channel *c = filp->private_data;
- unsigned int mask = 0;
-
- poll_wait(filp, &c->poll_wq, wait);
-
- if (c->cfg->direction == MOST_CH_RX) {
- if (!kfifo_is_empty(&c->fifo))
- mask |= POLLIN | POLLRDNORM;
- } else {
- if (!IS_ERR_OR_FALSE(channel_has_mbo(c->iface, c->channel_id)))
- mask |= POLLOUT | POLLWRNORM;
- }
- return mask;
-}
-
-/**
- * Initialization of struct file_operations
- */
-static const struct file_operations channel_fops = {
- .owner = THIS_MODULE,
- .read = aim_read,
- .write = aim_write,
- .open = aim_open,
- .release = aim_close,
- .poll = aim_poll,
-};
-
-/**
- * aim_disconnect_channel - disconnect a channel
- * @iface: pointer to interface instance
- * @channel_id: channel index
- *
- * This frees allocated memory and removes the cdev that represents this
- * channel in user space.
- */
-static int aim_disconnect_channel(struct most_interface *iface, int channel_id)
-{
- struct aim_channel *channel;
- unsigned long flags;
-
- if (!iface) {
- pr_info("Bad interface pointer\n");
- return -EINVAL;
- }
-
- channel = get_channel(iface, channel_id);
- if (!channel)
- return -ENXIO;
-
- mutex_lock(&channel->io_mutex);
- channel->dev = NULL;
- mutex_unlock(&channel->io_mutex);
-
- if (atomic_read(&channel->access_ref)) {
- device_destroy(aim_class, channel->devno);
- cdev_del(&channel->cdev);
- kfifo_free(&channel->fifo);
- ida_simple_remove(&minor_id, MINOR(channel->devno));
- spin_lock_irqsave(&ch_list_lock, flags);
- list_del(&channel->list);
- spin_unlock_irqrestore(&ch_list_lock, flags);
- kfree(channel);
- } else {
- wake_up_interruptible(&channel->wq);
- }
- return 0;
-}
-
-/**
- * aim_rx_completion - completion handler for rx channels
- * @mbo: pointer to buffer object that has completed
- *
- * This searches for the channel linked to this MBO and stores it in the local
- * fifo buffer.
- */
-static int aim_rx_completion(struct mbo *mbo)
-{
- struct aim_channel *channel;
-
- if (!mbo)
- return -EINVAL;
-
- channel = get_channel(mbo->ifp, mbo->hdm_channel_id);
- if (!channel)
- return -ENXIO;
-
- kfifo_in(&channel->fifo, &mbo, 1);
-#ifdef DEBUG_MESG
- if (kfifo_is_full(&channel->fifo))
- pr_info("WARN: Fifo is full\n");
-#endif
- wake_up_interruptible(&channel->wq);
- return 0;
-}
-
-/**
- * aim_tx_completion - completion handler for tx channels
- * @iface: pointer to interface instance
- * @channel_id: channel index/ID
- *
- * This wakes sleeping processes in the wait-queue.
- */
-static int aim_tx_completion(struct most_interface *iface, int channel_id)
-{
- struct aim_channel *channel;
-
- if (!iface) {
- pr_info("Bad interface pointer\n");
- return -EINVAL;
- }
- if ((channel_id < 0) || (channel_id >= iface->num_channels)) {
- pr_info("Channel ID out of range\n");
- return -EINVAL;
- }
-
- channel = get_channel(iface, channel_id);
- if (!channel)
- return -ENXIO;
- wake_up_interruptible(&channel->wq);
- return 0;
-}
-
-static struct most_aim cdev_aim;
-
-/**
- * aim_probe - probe function of the driver module
- * @iface: pointer to interface instance
- * @channel_id: channel index/ID
- * @cfg: pointer to actual channel configuration
- * @parent: pointer to kobject (needed for sysfs hook-up)
- * @name: name of the device to be created
- *
- * This allocates achannel object and creates the device node in /dev
- *
- * Returns 0 on success or error code otherwise.
- */
-static int aim_probe(struct most_interface *iface, int channel_id,
- struct most_channel_config *cfg,
- struct kobject *parent, char *name)
-{
- struct aim_channel *channel;
- unsigned long cl_flags;
- int retval;
- int current_minor;
-
- if ((!iface) || (!cfg) || (!parent) || (!name)) {
- pr_info("Probing AIM with bad arguments");
- return -EINVAL;
- }
- channel = get_channel(iface, channel_id);
- if (channel)
- return -EEXIST;
-
- current_minor = ida_simple_get(&minor_id, 0, 0, GFP_KERNEL);
- if (current_minor < 0)
- return current_minor;
-
- channel = kzalloc(sizeof(*channel), GFP_KERNEL);
- if (!channel) {
- retval = -ENOMEM;
- goto error_alloc_channel;
- }
-
- channel->devno = MKDEV(major, current_minor);
- cdev_init(&channel->cdev, &channel_fops);
- channel->cdev.owner = THIS_MODULE;
- cdev_add(&channel->cdev, channel->devno, 1);
- channel->iface = iface;
- channel->cfg = cfg;
- channel->channel_id = channel_id;
- channel->mbo_offs = 0;
- atomic_set(&channel->access_ref, -1);
- INIT_KFIFO(channel->fifo);
- retval = kfifo_alloc(&channel->fifo, cfg->num_buffers, GFP_KERNEL);
- if (retval) {
- pr_info("failed to alloc channel kfifo");
- goto error_alloc_kfifo;
- }
- init_waitqueue_head(&channel->wq);
- init_waitqueue_head(&channel->poll_wq);
- mutex_init(&channel->io_mutex);
- spin_lock_irqsave(&ch_list_lock, cl_flags);
- list_add_tail(&channel->list, &channel_list);
- spin_unlock_irqrestore(&ch_list_lock, cl_flags);
- channel->dev = device_create(aim_class,
- NULL,
- channel->devno,
- NULL,
- "%s", name);
-
- retval = IS_ERR(channel->dev);
- if (retval) {
- pr_info("failed to create new device node %s\n", name);
- goto error_create_device;
- }
- kobject_uevent(&channel->dev->kobj, KOBJ_ADD);
- return 0;
-
-error_create_device:
- kfifo_free(&channel->fifo);
- list_del(&channel->list);
-error_alloc_kfifo:
- cdev_del(&channel->cdev);
- kfree(channel);
-error_alloc_channel:
- ida_simple_remove(&minor_id, current_minor);
- return retval;
-}
-
-static struct most_aim cdev_aim = {
- .name = "cdev",
- .probe_channel = aim_probe,
- .disconnect_channel = aim_disconnect_channel,
- .rx_completion = aim_rx_completion,
- .tx_completion = aim_tx_completion,
-};
-
-static int __init mod_init(void)
-{
- pr_info("init()\n");
-
- INIT_LIST_HEAD(&channel_list);
- spin_lock_init(&ch_list_lock);
- ida_init(&minor_id);
-
- if (alloc_chrdev_region(&aim_devno, 0, 50, "cdev") < 0)
- return -EIO;
- major = MAJOR(aim_devno);
-
- aim_class = class_create(THIS_MODULE, "most_cdev_aim");
- if (IS_ERR(aim_class)) {
- pr_err("no udev support\n");
- goto free_cdev;
- }
-
- if (most_register_aim(&cdev_aim))
- goto dest_class;
- return 0;
-
-dest_class:
- class_destroy(aim_class);
-free_cdev:
- unregister_chrdev_region(aim_devno, 1);
- return -EIO;
-}
-
-static void __exit mod_exit(void)
-{
- struct aim_channel *channel, *tmp;
-
- pr_info("exit module\n");
-
- most_deregister_aim(&cdev_aim);
-
- list_for_each_entry_safe(channel, tmp, &channel_list, list) {
- device_destroy(aim_class, channel->devno);
- cdev_del(&channel->cdev);
- kfifo_free(&channel->fifo);
- list_del(&channel->list);
- ida_simple_remove(&minor_id, MINOR(channel->devno));
- kfree(channel);
- }
- class_destroy(aim_class);
- unregister_chrdev_region(aim_devno, 1);
- ida_destroy(&minor_id);
-}
-
-module_init(mod_init);
-module_exit(mod_exit);
-MODULE_AUTHOR("Christian Gromm <christian.gromm@microchip.com>");
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("character device AIM for mostcore");
diff --git a/drivers/staging/most/aim-network/Kconfig b/drivers/staging/most/aim-network/Kconfig
deleted file mode 100644
index 4c66b24cf73c..000000000000
--- a/drivers/staging/most/aim-network/Kconfig
+++ /dev/null
@@ -1,13 +0,0 @@
-#
-# MOST Networking configuration
-#
-
-config AIM_NETWORK
- tristate "Networking AIM"
- depends on NET
-
- ---help---
- Say Y here if you want to commumicate via a networking device.
-
- To compile this driver as a module, choose M here: the
- module will be called aim_network.
diff --git a/drivers/staging/most/aim-network/Makefile b/drivers/staging/most/aim-network/Makefile
deleted file mode 100644
index 840c1dd94873..000000000000
--- a/drivers/staging/most/aim-network/Makefile
+++ /dev/null
@@ -1,4 +0,0 @@
-obj-$(CONFIG_AIM_NETWORK) += aim_network.o
-
-aim_network-objs := networking.o
-ccflags-y += -Idrivers/staging/most/mostcore/
diff --git a/drivers/staging/most/aim-network/networking.c b/drivers/staging/most/aim-network/networking.c
deleted file mode 100644
index f0d9a439544e..000000000000
--- a/drivers/staging/most/aim-network/networking.c
+++ /dev/null
@@ -1,573 +0,0 @@
-/*
- * Networking AIM - Networking Application Interface Module for MostCore
- *
- * Copyright (C) 2015, Microchip Technology Germany II GmbH & Co. KG
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * This file is licensed under GPLv2.
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/module.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/slab.h>
-#include <linux/init.h>
-#include <linux/list.h>
-#include <linux/wait.h>
-#include <linux/kobject.h>
-#include "mostcore.h"
-#include "networking.h"
-
-
-#define MEP_HDR_LEN 8
-#define MDP_HDR_LEN 16
-#define MAMAC_DATA_LEN (1024 - MDP_HDR_LEN)
-
-#define PMHL 5
-
-#define PMS_TELID_UNSEGM_MAMAC 0x0A
-#define PMS_FIFONO_MDP 0x01
-#define PMS_FIFONO_MEP 0x04
-#define PMS_MSGTYPE_DATA 0x04
-#define PMS_DEF_PRIO 0
-#define MEP_DEF_RETRY 15
-
-#define PMS_FIFONO_MASK 0x07
-#define PMS_FIFONO_SHIFT 3
-#define PMS_RETRY_SHIFT 4
-#define PMS_TELID_MASK 0x0F
-#define PMS_TELID_SHIFT 4
-
-#define HB(value) ((u8)((u16)(value) >> 8))
-#define LB(value) ((u8)(value))
-
-
-
-#define EXTRACT_BIT_SET(bitset_name, value) \
- (((value) >> bitset_name##_SHIFT) & bitset_name##_MASK)
-
-#define PMS_IS_MEP(buf, len) \
- ((len) > MEP_HDR_LEN && \
- EXTRACT_BIT_SET(PMS_FIFONO, (buf)[3]) == PMS_FIFONO_MEP)
-
-#define PMS_IS_MAMAC(buf, len) \
- ((len) > MDP_HDR_LEN && \
- EXTRACT_BIT_SET(PMS_FIFONO, (buf)[3]) == PMS_FIFONO_MDP && \
- EXTRACT_BIT_SET(PMS_TELID, (buf)[14]) == PMS_TELID_UNSEGM_MAMAC)
-
-struct net_dev_channel {
- bool linked;
- int ch_id;
-};
-
-struct net_dev_context {
- struct most_interface *iface;
- bool channels_opened;
- bool is_mamac;
- unsigned char link_stat;
- struct net_device *dev;
- struct net_dev_channel rx;
- struct net_dev_channel tx;
- struct list_head list;
-};
-
-static struct list_head net_devices = LIST_HEAD_INIT(net_devices);
-static struct spinlock list_lock;
-static struct most_aim aim;
-
-
-static int skb_to_mamac(const struct sk_buff *skb, struct mbo *mbo)
-{
- u8 *buff = mbo->virt_address;
- const u8 broadcast[] = { 0x03, 0xFF };
- const u8 *dest_addr = skb->data + 4;
- const u8 *eth_type = skb->data + 12;
- unsigned int payload_len = skb->len - ETH_HLEN;
- unsigned int mdp_len = payload_len + MDP_HDR_LEN;
-
- if (mbo->buffer_length < mdp_len) {
- pr_err("drop: too small buffer! (%d for %d)\n",
- mbo->buffer_length, mdp_len);
- return -EINVAL;
- }
-
- if (skb->len < ETH_HLEN) {
- pr_err("drop: too small packet! (%d)\n", skb->len);
- return -EINVAL;
- }
-
- if (dest_addr[0] == 0xFF && dest_addr[1] == 0xFF)
- dest_addr = broadcast;
-
- *buff++ = HB(mdp_len - 2);
- *buff++ = LB(mdp_len - 2);
-
- *buff++ = PMHL;
- *buff++ = (PMS_FIFONO_MDP << PMS_FIFONO_SHIFT) | PMS_MSGTYPE_DATA;
- *buff++ = PMS_DEF_PRIO;
- *buff++ = dest_addr[0];
- *buff++ = dest_addr[1];
- *buff++ = 0x00;
-
- *buff++ = HB(payload_len + 6);
- *buff++ = LB(payload_len + 6);
-
- /* end of FPH here */
-
- *buff++ = eth_type[0];
- *buff++ = eth_type[1];
- *buff++ = 0;
- *buff++ = 0;
-
- *buff++ = PMS_TELID_UNSEGM_MAMAC << 4 | HB(payload_len);
- *buff++ = LB(payload_len);
-
- memcpy(buff, skb->data + ETH_HLEN, payload_len);
- mbo->buffer_length = mdp_len;
- return 0;
-}
-
-static int skb_to_mep(const struct sk_buff *skb, struct mbo *mbo)
-{
- u8 *buff = mbo->virt_address;
- unsigned int mep_len = skb->len + MEP_HDR_LEN;
-
- if (mbo->buffer_length < mep_len) {
- pr_err("drop: too small buffer! (%d for %d)\n",
- mbo->buffer_length, mep_len);
- return -EINVAL;
- }
-
- *buff++ = HB(mep_len - 2);
- *buff++ = LB(mep_len - 2);
-
- *buff++ = PMHL;
- *buff++ = (PMS_FIFONO_MEP << PMS_FIFONO_SHIFT) | PMS_MSGTYPE_DATA;
- *buff++ = (MEP_DEF_RETRY << PMS_RETRY_SHIFT) | PMS_DEF_PRIO;
- *buff++ = 0;
- *buff++ = 0;
- *buff++ = 0;
-
- memcpy(buff, skb->data, skb->len);
- mbo->buffer_length = mep_len;
- return 0;
-}
-
-static int most_nd_set_mac_address(struct net_device *dev, void *p)
-{
- struct net_dev_context *nd = dev->ml_priv;
- int err = eth_mac_addr(dev, p);
-
- if (err)
- return err;
-
- BUG_ON(nd->dev != dev);
-
- nd->is_mamac =
- (dev->dev_addr[0] == 0 && dev->dev_addr[1] == 0 &&
- dev->dev_addr[2] == 0 && dev->dev_addr[3] == 0);
-
- /*
- * Set default MTU for the given packet type.
- * It is still possible to change MTU using ip tools afterwards.
- */
- dev->mtu = nd->is_mamac ? MAMAC_DATA_LEN : ETH_DATA_LEN;
-
- return 0;
-}
-
-static int most_nd_open(struct net_device *dev)
-{
- struct net_dev_context *nd = dev->ml_priv;
-
- netdev_info(dev, "open net device\n");
-
- BUG_ON(nd->dev != dev);
-
- if (nd->channels_opened)
- return -EFAULT;
-
- BUG_ON(!nd->tx.linked || !nd->rx.linked);
-
- if (most_start_channel(nd->iface, nd->rx.ch_id, &aim)) {
- netdev_err(dev, "most_start_channel() failed\n");
- return -EBUSY;
- }
-
- if (most_start_channel(nd->iface, nd->tx.ch_id, &aim)) {
- netdev_err(dev, "most_start_channel() failed\n");
- most_stop_channel(nd->iface, nd->rx.ch_id, &aim);
- return -EBUSY;
- }
-
- nd->channels_opened = true;
-
- if (nd->is_mamac) {
- nd->link_stat = 1;
- netif_wake_queue(dev);
- } else {
- nd->iface->request_netinfo(nd->iface, nd->tx.ch_id);
- }
-
- return 0;
-}
-
-static int most_nd_stop(struct net_device *dev)
-{
- struct net_dev_context *nd = dev->ml_priv;
-
- netdev_info(dev, "stop net device\n");
-
- BUG_ON(nd->dev != dev);
- netif_stop_queue(dev);
-
- if (nd->channels_opened) {
- most_stop_channel(nd->iface, nd->rx.ch_id, &aim);
- most_stop_channel(nd->iface, nd->tx.ch_id, &aim);
- nd->channels_opened = false;
- }
-
- return 0;
-}
-
-static netdev_tx_t most_nd_start_xmit(struct sk_buff *skb,
- struct net_device *dev)
-{
- struct net_dev_context *nd = dev->ml_priv;
- struct mbo *mbo;
- int ret;
-
- BUG_ON(nd->dev != dev);
-
- mbo = most_get_mbo(nd->iface, nd->tx.ch_id, &aim);
-
- if (!mbo) {
- netif_stop_queue(dev);
- dev->stats.tx_fifo_errors++;
- return NETDEV_TX_BUSY;
- }
-
- if (nd->is_mamac)
- ret = skb_to_mamac(skb, mbo);
- else
- ret = skb_to_mep(skb, mbo);
-
- if (ret) {
- most_put_mbo(mbo);
- dev->stats.tx_dropped++;
- kfree_skb(skb);
- return NETDEV_TX_OK;
- }
-
- most_submit_mbo(mbo);
- dev->stats.tx_packets++;
- dev->stats.tx_bytes += skb->len;
- kfree_skb(skb);
- return NETDEV_TX_OK;
-}
-
-static const struct net_device_ops most_nd_ops = {
- .ndo_open = most_nd_open,
- .ndo_stop = most_nd_stop,
- .ndo_start_xmit = most_nd_start_xmit,
- .ndo_set_mac_address = most_nd_set_mac_address,
-};
-
-static void most_nd_setup(struct net_device *dev)
-{
- netdev_info(dev, "setup net device\n");
- ether_setup(dev);
- dev->netdev_ops = &most_nd_ops;
-}
-
-static void most_net_rm_netdev_safe(struct net_dev_context *nd)
-{
- if (!nd->dev)
- return;
-
- pr_info("remove net device %p\n", nd->dev);
-
- unregister_netdev(nd->dev);
- free_netdev(nd->dev);
- nd->dev = NULL;
-}
-
-static struct net_dev_context *get_net_dev_context(
- struct most_interface *iface)
-{
- struct net_dev_context *nd, *tmp;
-
- spin_lock(&list_lock);
- list_for_each_entry_safe(nd, tmp, &net_devices, list) {
- if (nd->iface == iface) {
- spin_unlock(&list_lock);
- return nd;
- }
- }
- spin_unlock(&list_lock);
- return NULL;
-}
-
-static int aim_probe_channel(struct most_interface *iface, int channel_idx,
- struct most_channel_config *ccfg,
- struct kobject *parent, char *name)
-{
- struct net_dev_context *nd;
- struct net_dev_channel *ch;
-
- if (!iface)
- return -EINVAL;
-
- if (ccfg->data_type != MOST_CH_ASYNC)
- return -EINVAL;
-
- nd = get_net_dev_context(iface);
-
- if (!nd) {
- nd = kzalloc(sizeof(*nd), GFP_KERNEL);
- if (!nd)
- return -ENOMEM;
-
- nd->iface = iface;
-
- spin_lock(&list_lock);
- list_add(&nd->list, &net_devices);
- spin_unlock(&list_lock);
- }
-
- ch = ccfg->direction == MOST_CH_TX ? &nd->tx : &nd->rx;
- if (ch->linked) {
- pr_err("only one channel per instance & direction allowed\n");
- return -EINVAL;
- }
-
- if (nd->tx.linked || nd->rx.linked) {
- struct net_device *dev =
- alloc_netdev(0, "meth%d", NET_NAME_UNKNOWN, most_nd_setup);
-
- if (!dev) {
- pr_err("no memory for net_device\n");
- return -ENOMEM;
- }
-
- nd->dev = dev;
- ch->ch_id = channel_idx;
- ch->linked = true;
-
- dev->ml_priv = nd;
- if (register_netdev(dev)) {
- pr_err("registering net device failed\n");
- ch->linked = false;
- free_netdev(dev);
- return -EINVAL;
- }
- }
-
- ch->ch_id = channel_idx;
- ch->linked = true;
-
- return 0;
-}
-
-static int aim_disconnect_channel(struct most_interface *iface,
- int channel_idx)
-{
- struct net_dev_context *nd;
- struct net_dev_channel *ch;
-
- nd = get_net_dev_context(iface);
- if (!nd)
- return -EINVAL;
-
- if (nd->rx.linked && channel_idx == nd->rx.ch_id)
- ch = &nd->rx;
- else if (nd->tx.linked && channel_idx == nd->tx.ch_id)
- ch = &nd->tx;
- else
- return -EINVAL;
-
- ch->linked = false;
-
- /*
- * do not call most_stop_channel() here, because channels are
- * going to be closed in ndo_stop() after unregister_netdev()
- */
- most_net_rm_netdev_safe(nd);
-
- if (!nd->rx.linked && !nd->tx.linked) {
- spin_lock(&list_lock);
- list_del(&nd->list);
- spin_unlock(&list_lock);
- kfree(nd);
- }
-
- return 0;
-}
-
-static int aim_resume_tx_channel(struct most_interface *iface,
- int channel_idx)
-{
- struct net_dev_context *nd;
-
- nd = get_net_dev_context(iface);
- if (!nd || !nd->channels_opened || nd->tx.ch_id != channel_idx)
- return 0;
-
- if (!nd->dev)
- return 0;
-
- netif_wake_queue(nd->dev);
- return 0;
-}
-
-static int aim_rx_data(struct mbo *mbo)
-{
- const u32 zero = 0;
- struct net_dev_context *nd;
- char *buf = mbo->virt_address;
- uint32_t len = mbo->processed_length;
- struct sk_buff *skb;
- struct net_device *dev;
-
- nd = get_net_dev_context(mbo->ifp);
- if (!nd || !nd->channels_opened || nd->rx.ch_id != mbo->hdm_channel_id)
- return -EIO;
-
- dev = nd->dev;
- if (!dev) {
- pr_err_once("drop packet: missing net_device\n");
- return -EIO;
- }
-
- if (nd->is_mamac) {
- if (!PMS_IS_MAMAC(buf, len))
- return -EIO;
-
- skb = dev_alloc_skb(len - MDP_HDR_LEN + 2 * ETH_ALEN + 2);
- } else {
- if (!PMS_IS_MEP(buf, len))
- return -EIO;
-
- skb = dev_alloc_skb(len - MEP_HDR_LEN);
- }
-
- if (!skb) {
- dev->stats.rx_dropped++;
- pr_err_once("drop packet: no memory for skb\n");
- goto out;
- }
-
- skb->dev = dev;
-
- if (nd->is_mamac) {
- /* dest */
- memcpy(skb_put(skb, ETH_ALEN), dev->dev_addr, ETH_ALEN);
-
- /* src */
- memcpy(skb_put(skb, 4), &zero, 4);
- memcpy(skb_put(skb, 2), buf + 5, 2);
-
- /* eth type */
- memcpy(skb_put(skb, 2), buf + 10, 2);
-
- buf += MDP_HDR_LEN;
- len -= MDP_HDR_LEN;
- } else {
- buf += MEP_HDR_LEN;
- len -= MEP_HDR_LEN;
- }
-
- memcpy(skb_put(skb, len), buf, len);
- skb->protocol = eth_type_trans(skb, dev);
- dev->stats.rx_packets++;
- dev->stats.rx_bytes += skb->len;
- netif_rx(skb);
-
-out:
- most_put_mbo(mbo);
- return 0;
-}
-
-static struct most_aim aim = {
- .name = "networking",
- .probe_channel = aim_probe_channel,
- .disconnect_channel = aim_disconnect_channel,
- .tx_completion = aim_resume_tx_channel,
- .rx_completion = aim_rx_data,
-};
-
-static int __init most_net_init(void)
-{
- pr_info("most_net_init()\n");
- spin_lock_init(&list_lock);
- return most_register_aim(&aim);
-}
-
-static void __exit most_net_exit(void)
-{
- struct net_dev_context *nd, *tmp;
-
- spin_lock(&list_lock);
- list_for_each_entry_safe(nd, tmp, &net_devices, list) {
- list_del(&nd->list);
- spin_unlock(&list_lock);
- /*
- * do not call most_stop_channel() here, because channels are
- * going to be closed in ndo_stop() after unregister_netdev()
- */
- most_net_rm_netdev_safe(nd);
- kfree(nd);
- spin_lock(&list_lock);
- }
- spin_unlock(&list_lock);
-
- most_deregister_aim(&aim);
- pr_info("most_net_exit()\n");
-}
-
-/**
- * most_deliver_netinfo - callback for HDM to be informed about HW's MAC
- * @param iface - most interface instance
- * @param link_stat - link status
- * @param mac_addr - MAC address
- */
-void most_deliver_netinfo(struct most_interface *iface,
- unsigned char link_stat, unsigned char *mac_addr)
-{
- struct net_dev_context *nd;
- struct net_device *dev;
-
- pr_info("Received netinfo from %s\n", iface->description);
-
- nd = get_net_dev_context(iface);
- if (!nd)
- return;
-
- dev = nd->dev;
- if (!dev)
- return;
-
- if (mac_addr)
- memcpy(dev->dev_addr, mac_addr, ETH_ALEN);
-
- if (nd->link_stat != link_stat) {
- nd->link_stat = link_stat;
- if (nd->link_stat)
- netif_wake_queue(dev);
- else
- netif_stop_queue(dev);
- }
-}
-EXPORT_SYMBOL(most_deliver_netinfo);
-
-module_init(most_net_init);
-module_exit(most_net_exit);
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Andrey Shvetsov <andrey.shvetsov@k2l.de>");
-MODULE_DESCRIPTION("Networking Application Interface Module for MostCore");
diff --git a/drivers/staging/most/aim-network/networking.h b/drivers/staging/most/aim-network/networking.h
deleted file mode 100644
index 1b8b434fabb0..000000000000
--- a/drivers/staging/most/aim-network/networking.h
+++ /dev/null
@@ -1,23 +0,0 @@
-/*
- * Networking AIM - Networking Application Interface Module for MostCore
- *
- * Copyright (C) 2015, Microchip Technology Germany II GmbH & Co. KG
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * This file is licensed under GPLv2.
- */
-#ifndef _NETWORKING_H_
-#define _NETWORKING_H_
-
-#include "mostcore.h"
-
-
-void most_deliver_netinfo(struct most_interface *iface,
- unsigned char link_stat, unsigned char *mac_addr);
-
-
-#endif
diff --git a/drivers/staging/most/aim-sound/Kconfig b/drivers/staging/most/aim-sound/Kconfig
deleted file mode 100644
index 3194c219ff14..000000000000
--- a/drivers/staging/most/aim-sound/Kconfig
+++ /dev/null
@@ -1,13 +0,0 @@
-#
-# MOST ALSA configuration
-#
-
-config AIM_SOUND
- tristate "ALSA AIM"
- depends on SND
- select SND_PCM
- ---help---
- Say Y here if you want to commumicate via ALSA/sound devices.
-
- To compile this driver as a module, choose M here: the
- module will be called aim_sound.
diff --git a/drivers/staging/most/aim-sound/Makefile b/drivers/staging/most/aim-sound/Makefile
deleted file mode 100644
index beba9586fd28..000000000000
--- a/drivers/staging/most/aim-sound/Makefile
+++ /dev/null
@@ -1,4 +0,0 @@
-obj-$(CONFIG_AIM_SOUND) += aim_sound.o
-
-aim_sound-objs := sound.o
-ccflags-y += -Idrivers/staging/most/mostcore/
diff --git a/drivers/staging/most/aim-sound/sound.c b/drivers/staging/most/aim-sound/sound.c
deleted file mode 100644
index f08545c56110..000000000000
--- a/drivers/staging/most/aim-sound/sound.c
+++ /dev/null
@@ -1,769 +0,0 @@
-/*
- * sound.c - Audio Application Interface Module for Mostcore
- *
- * Copyright (C) 2015 Microchip Technology Germany II GmbH & Co. KG
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * This file is licensed under GPLv2.
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/module.h>
-#include <linux/printk.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <sound/core.h>
-#include <sound/pcm.h>
-#include <sound/pcm_params.h>
-#include <linux/sched.h>
-#include <linux/kthread.h>
-#include <mostcore.h>
-
-#define DRIVER_NAME "sound"
-
-static struct list_head dev_list;
-static struct most_aim audio_aim;
-
-/**
- * struct channel - private structure to keep channel specific data
- * @substream: stores the substream structure
- * @iface: interface for which the channel belongs to
- * @cfg: channel configuration
- * @card: registered sound card
- * @list: list for private use
- * @id: channel index
- * @period_pos: current period position (ring buffer)
- * @buffer_pos: current buffer position (ring buffer)
- * @is_stream_running: identifies whether a stream is running or not
- * @opened: set when the stream is opened
- * @playback_task: playback thread
- * @playback_waitq: waitq used by playback thread
- */
-struct channel {
- struct snd_pcm_substream *substream;
- struct snd_pcm_hardware pcm_hardware;
- struct most_interface *iface;
- struct most_channel_config *cfg;
- struct snd_card *card;
- struct list_head list;
- int id;
- unsigned int period_pos;
- unsigned int buffer_pos;
- bool is_stream_running;
-
- struct task_struct *playback_task;
- wait_queue_head_t playback_waitq;
-
- void (*copy_fn)(void *alsa, void *most, unsigned int bytes);
-};
-
-#define MOST_PCM_INFO (SNDRV_PCM_INFO_MMAP | \
- SNDRV_PCM_INFO_MMAP_VALID | \
- SNDRV_PCM_INFO_BATCH | \
- SNDRV_PCM_INFO_INTERLEAVED | \
- SNDRV_PCM_INFO_BLOCK_TRANSFER)
-
-#define swap16(val) ( \
- (((u16)(val) << 8) & (u16)0xFF00) | \
- (((u16)(val) >> 8) & (u16)0x00FF))
-
-#define swap32(val) ( \
- (((u32)(val) << 24) & (u32)0xFF000000) | \
- (((u32)(val) << 8) & (u32)0x00FF0000) | \
- (((u32)(val) >> 8) & (u32)0x0000FF00) | \
- (((u32)(val) >> 24) & (u32)0x000000FF))
-
-static void swap_copy16(u16 *dest, const u16 *source, unsigned int bytes)
-{
- unsigned int i = 0;
-
- while (i < (bytes / 2)) {
- dest[i] = swap16(source[i]);
- i++;
- }
-}
-
-static void swap_copy24(u8 *dest, const u8 *source, unsigned int bytes)
-{
- unsigned int i = 0;
-
- while (i < bytes - 2) {
- dest[i] = source[i + 2];
- dest[i + 1] = source[i + 1];
- dest[i + 2] = source[i];
- i += 3;
- }
-}
-
-static void swap_copy32(u32 *dest, const u32 *source, unsigned int bytes)
-{
- unsigned int i = 0;
-
- while (i < bytes / 4) {
- dest[i] = swap32(source[i]);
- i++;
- }
-}
-
-static void alsa_to_most_memcpy(void *alsa, void *most, unsigned int bytes)
-{
- memcpy(most, alsa, bytes);
-}
-
-static void alsa_to_most_copy16(void *alsa, void *most, unsigned int bytes)
-{
- swap_copy16(most, alsa, bytes);
-}
-
-static void alsa_to_most_copy24(void *alsa, void *most, unsigned int bytes)
-{
- swap_copy24(most, alsa, bytes);
-}
-
-static void alsa_to_most_copy32(void *alsa, void *most, unsigned int bytes)
-{
- swap_copy32(most, alsa, bytes);
-}
-
-static void most_to_alsa_memcpy(void *alsa, void *most, unsigned int bytes)
-{
- memcpy(alsa, most, bytes);
-}
-
-static void most_to_alsa_copy16(void *alsa, void *most, unsigned int bytes)
-{
- swap_copy16(alsa, most, bytes);
-}
-
-static void most_to_alsa_copy24(void *alsa, void *most, unsigned int bytes)
-{
- swap_copy24(alsa, most, bytes);
-}
-
-static void most_to_alsa_copy32(void *alsa, void *most, unsigned int bytes)
-{
- swap_copy32(alsa, most, bytes);
-}
-
-/**
- * get_channel - get pointer to channel
- * @iface: interface structure
- * @channel_id: channel ID
- *
- * This traverses the channel list and returns the channel matching the
- * ID and interface.
- *
- * Returns pointer to channel on success or NULL otherwise.
- */
-static struct channel *get_channel(struct most_interface *iface,
- int channel_id)
-{
- struct channel *channel, *tmp;
-
- list_for_each_entry_safe(channel, tmp, &dev_list, list) {
- if ((channel->iface == iface) && (channel->id == channel_id))
- return channel;
- }
-
- return NULL;
-}
-
-/**
- * copy_data - implements data copying function
- * @channel: channel
- * @mbo: MBO from core
- *
- * Copy data from/to ring buffer to/from MBO and update the buffer position
- */
-static bool copy_data(struct channel *channel, struct mbo *mbo)
-{
- struct snd_pcm_runtime *const runtime = channel->substream->runtime;
- unsigned int const frame_bytes = channel->cfg->subbuffer_size;
- unsigned int const buffer_size = runtime->buffer_size;
- unsigned int frames;
- unsigned int fr0;
-
- if (channel->cfg->direction & MOST_CH_RX)
- frames = mbo->processed_length / frame_bytes;
- else
- frames = mbo->buffer_length / frame_bytes;
- fr0 = min(buffer_size - channel->buffer_pos, frames);
-
- channel->copy_fn(runtime->dma_area + channel->buffer_pos * frame_bytes,
- mbo->virt_address,
- fr0 * frame_bytes);
-
- if (frames > fr0) {
- /* wrap around at end of ring buffer */
- channel->copy_fn(runtime->dma_area,
- mbo->virt_address + fr0 * frame_bytes,
- (frames - fr0) * frame_bytes);
- }
-
- channel->buffer_pos += frames;
- if (channel->buffer_pos >= buffer_size)
- channel->buffer_pos -= buffer_size;
- channel->period_pos += frames;
- if (channel->period_pos >= runtime->period_size) {
- channel->period_pos -= runtime->period_size;
- return true;
- }
-
- return false;
-}
-
-/**
- * playback_thread - function implements the playback thread
- * @data: private data
- *
- * Thread which does the playback functionality in a loop. It waits for a free
- * MBO from mostcore for a particular channel and copy the data from ring buffer
- * to MBO. Submit the MBO back to mostcore, after copying the data.
- *
- * Returns 0 on success or error code otherwise.
- */
-static int playback_thread(void *data)
-{
- struct channel *const channel = data;
-
- while (!kthread_should_stop()) {
- struct mbo *mbo = NULL;
- bool period_elapsed = false;
- int ret;
-
- wait_event_interruptible(
- channel->playback_waitq,
- kthread_should_stop() ||
- (channel->is_stream_running &&
- (mbo = most_get_mbo(channel->iface, channel->id,
- &audio_aim))));
- if (!mbo)
- continue;
-
- if (channel->is_stream_running)
- period_elapsed = copy_data(channel, mbo);
- else
- memset(mbo->virt_address, 0, mbo->buffer_length);
-
- ret = most_submit_mbo(mbo);
- if (ret)
- channel->is_stream_running = false;
-
- if (period_elapsed)
- snd_pcm_period_elapsed(channel->substream);
- }
-
- return 0;
-}
-
-/**
- * pcm_open - implements open callback function for PCM middle layer
- * @substream: pointer to ALSA PCM substream
- *
- * This is called when a PCM substream is opened. At least, the function should
- * initialize the runtime->hw record.
- *
- * Returns 0 on success or error code otherwise.
- */
-static int pcm_open(struct snd_pcm_substream *substream)
-{
- struct channel *channel = substream->private_data;
- struct snd_pcm_runtime *runtime = substream->runtime;
- struct most_channel_config *cfg = channel->cfg;
-
- channel->substream = substream;
-
- if (cfg->direction == MOST_CH_TX) {
- channel->playback_task = kthread_run(&playback_thread, channel,
- "most_audio_playback");
- if (IS_ERR(channel->playback_task)) {
- pr_err("Couldn't start thread\n");
- return PTR_ERR(channel->playback_task);
- }
- }
-
- if (most_start_channel(channel->iface, channel->id, &audio_aim)) {
- pr_err("most_start_channel() failed!\n");
- if (cfg->direction == MOST_CH_TX)
- kthread_stop(channel->playback_task);
- return -EBUSY;
- }
-
- runtime->hw = channel->pcm_hardware;
- return 0;
-}
-
-/**
- * pcm_close - implements close callback function for PCM middle layer
- * @substream: sub-stream pointer
- *
- * Obviously, this is called when a PCM substream is closed. Any private
- * instance for a PCM substream allocated in the open callback will be
- * released here.
- *
- * Returns 0 on success or error code otherwise.
- */
-static int pcm_close(struct snd_pcm_substream *substream)
-{
- struct channel *channel = substream->private_data;
-
- if (channel->cfg->direction == MOST_CH_TX)
- kthread_stop(channel->playback_task);
- most_stop_channel(channel->iface, channel->id, &audio_aim);
-
- return 0;
-}
-
-/**
- * pcm_hw_params - implements hw_params callback function for PCM middle layer
- * @substream: sub-stream pointer
- * @hw_params: contains the hardware parameters set by the application
- *
- * This is called when the hardware parameters is set by the application, that
- * is, once when the buffer size, the period size, the format, etc. are defined
- * for the PCM substream. Many hardware setups should be done is this callback,
- * including the allocation of buffers.
- *
- * Returns 0 on success or error code otherwise.
- */
-static int pcm_hw_params(struct snd_pcm_substream *substream,
- struct snd_pcm_hw_params *hw_params)
-{
- struct channel *channel = substream->private_data;
-
- if ((params_channels(hw_params) > channel->pcm_hardware.channels_max) ||
- (params_channels(hw_params) < channel->pcm_hardware.channels_min)) {
- pr_err("Requested number of channels not supported.\n");
- return -EINVAL;
- }
- return snd_pcm_lib_alloc_vmalloc_buffer(substream,
- params_buffer_bytes(hw_params));
-}
-
-/**
- * pcm_hw_free - implements hw_free callback function for PCM middle layer
- * @substream: substream pointer
- *
- * This is called to release the resources allocated via hw_params.
- * This function will be always called before the close callback is called.
- *
- * Returns 0 on success or error code otherwise.
- */
-static int pcm_hw_free(struct snd_pcm_substream *substream)
-{
- return snd_pcm_lib_free_vmalloc_buffer(substream);
-}
-
-/**
- * pcm_prepare - implements prepare callback function for PCM middle layer
- * @substream: substream pointer
- *
- * This callback is called when the PCM is "prepared". Format rate, sample rate,
- * etc., can be set here. This callback can be called many times at each setup.
- *
- * Returns 0 on success or error code otherwise.
- */
-static int pcm_prepare(struct snd_pcm_substream *substream)
-{
- struct channel *channel = substream->private_data;
- struct snd_pcm_runtime *runtime = substream->runtime;
- struct most_channel_config *cfg = channel->cfg;
- int width = snd_pcm_format_physical_width(runtime->format);
-
- channel->copy_fn = NULL;
-
- if (cfg->direction == MOST_CH_TX) {
- if (snd_pcm_format_big_endian(runtime->format) || width == 8)
- channel->copy_fn = alsa_to_most_memcpy;
- else if (width == 16)
- channel->copy_fn = alsa_to_most_copy16;
- else if (width == 24)
- channel->copy_fn = alsa_to_most_copy24;
- else if (width == 32)
- channel->copy_fn = alsa_to_most_copy32;
- } else {
- if (snd_pcm_format_big_endian(runtime->format) || width == 8)
- channel->copy_fn = most_to_alsa_memcpy;
- else if (width == 16)
- channel->copy_fn = most_to_alsa_copy16;
- else if (width == 24)
- channel->copy_fn = most_to_alsa_copy24;
- else if (width == 32)
- channel->copy_fn = most_to_alsa_copy32;
- }
-
- if (!channel->copy_fn) {
- pr_err("unsupported format\n");
- return -EINVAL;
- }
-
- channel->period_pos = 0;
- channel->buffer_pos = 0;
-
- return 0;
-}
-
-/**
- * pcm_trigger - implements trigger callback function for PCM middle layer
- * @substream: substream pointer
- * @cmd: action to perform
- *
- * This is called when the PCM is started, stopped or paused. The action will be
- * specified in the second argument, SNDRV_PCM_TRIGGER_XXX
- *
- * Returns 0 on success or error code otherwise.
- */
-static int pcm_trigger(struct snd_pcm_substream *substream, int cmd)
-{
- struct channel *channel = substream->private_data;
-
- switch (cmd) {
- case SNDRV_PCM_TRIGGER_START:
- channel->is_stream_running = true;
- wake_up_interruptible(&channel->playback_waitq);
- return 0;
-
- case SNDRV_PCM_TRIGGER_STOP:
- channel->is_stream_running = false;
- return 0;
-
- default:
- pr_info("pcm_trigger(), invalid\n");
- return -EINVAL;
- }
- return 0;
-}
-
-/**
- * pcm_pointer - implements pointer callback function for PCM middle layer
- * @substream: substream pointer
- *
- * This callback is called when the PCM middle layer inquires the current
- * hardware position on the buffer. The position must be returned in frames,
- * ranging from 0 to buffer_size-1.
- */
-static snd_pcm_uframes_t pcm_pointer(struct snd_pcm_substream *substream)
-{
- struct channel *channel = substream->private_data;
-
- return channel->buffer_pos;
-}
-
-/**
- * Initialization of struct snd_pcm_ops
- */
-static struct snd_pcm_ops pcm_ops = {
- .open = pcm_open,
- .close = pcm_close,
- .ioctl = snd_pcm_lib_ioctl,
- .hw_params = pcm_hw_params,
- .hw_free = pcm_hw_free,
- .prepare = pcm_prepare,
- .trigger = pcm_trigger,
- .pointer = pcm_pointer,
- .page = snd_pcm_lib_get_vmalloc_page,
- .mmap = snd_pcm_lib_mmap_vmalloc,
-};
-
-
-static int split_arg_list(char *buf, char **card_name, char **pcm_format)
-{
- *card_name = strsep(&buf, ".");
- if (!*card_name)
- return -EIO;
- *pcm_format = strsep(&buf, ".\n");
- if (!*pcm_format)
- return -EIO;
- return 0;
-}
-
-static int audio_set_hw_params(struct snd_pcm_hardware *pcm_hw,
- char *pcm_format,
- struct most_channel_config *cfg)
-{
- pcm_hw->info = MOST_PCM_INFO;
- pcm_hw->rates = SNDRV_PCM_RATE_48000;
- pcm_hw->rate_min = 48000;
- pcm_hw->rate_max = 48000;
- pcm_hw->buffer_bytes_max = cfg->num_buffers * cfg->buffer_size;
- pcm_hw->period_bytes_min = cfg->buffer_size;
- pcm_hw->period_bytes_max = cfg->buffer_size;
- pcm_hw->periods_min = 1;
- pcm_hw->periods_max = cfg->num_buffers;
-
- if (!strcmp(pcm_format, "1x8")) {
- if (cfg->subbuffer_size != 1)
- goto error;
- pr_info("PCM format is 8-bit mono\n");
- pcm_hw->channels_min = 1;
- pcm_hw->channels_max = 1;
- pcm_hw->formats = SNDRV_PCM_FMTBIT_S8;
- } else if (!strcmp(pcm_format, "2x16")) {
- if (cfg->subbuffer_size != 4)
- goto error;
- pr_info("PCM format is 16-bit stereo\n");
- pcm_hw->channels_min = 2;
- pcm_hw->channels_max = 2;
- pcm_hw->formats = SNDRV_PCM_FMTBIT_S16_LE |
- SNDRV_PCM_FMTBIT_S16_BE;
- } else if (!strcmp(pcm_format, "2x24")) {
- if (cfg->subbuffer_size != 6)
- goto error;
- pr_info("PCM format is 24-bit stereo\n");
- pcm_hw->channels_min = 2;
- pcm_hw->channels_max = 2;
- pcm_hw->formats = SNDRV_PCM_FMTBIT_S24_3LE |
- SNDRV_PCM_FMTBIT_S24_3BE;
- } else if (!strcmp(pcm_format, "2x32")) {
- if (cfg->subbuffer_size != 8)
- goto error;
- pr_info("PCM format is 32-bit stereo\n");
- pcm_hw->channels_min = 2;
- pcm_hw->channels_max = 2;
- pcm_hw->formats = SNDRV_PCM_FMTBIT_S32_LE |
- SNDRV_PCM_FMTBIT_S32_BE;
- } else if (!strcmp(pcm_format, "6x16")) {
- if (cfg->subbuffer_size != 12)
- goto error;
- pr_info("PCM format is 16-bit 5.1 multi channel\n");
- pcm_hw->channels_min = 6;
- pcm_hw->channels_max = 6;
- pcm_hw->formats = SNDRV_PCM_FMTBIT_S16_LE |
- SNDRV_PCM_FMTBIT_S16_BE;
- } else {
- pr_err("PCM format %s not supported\n", pcm_format);
- return -EIO;
- }
- return 0;
-error:
- pr_err("Audio resolution doesn't fit subbuffer size\n");
- return -EINVAL;
-}
-
-/**
- * audio_probe_channel - probe function of the driver module
- * @iface: pointer to interface instance
- * @channel_id: channel index/ID
- * @cfg: pointer to actual channel configuration
- * @parent: pointer to kobject (needed for sysfs hook-up)
- * @arg_list: string that provides the name of the device to be created in /dev
- * plus the desired audio resolution
- *
- * Creates sound card, pcm device, sets pcm ops and registers sound card.
- *
- * Returns 0 on success or error code otherwise.
- */
-static int audio_probe_channel(struct most_interface *iface, int channel_id,
- struct most_channel_config *cfg,
- struct kobject *parent, char *arg_list)
-{
- struct channel *channel;
- struct snd_card *card;
- struct snd_pcm *pcm;
- int playback_count = 0;
- int capture_count = 0;
- int ret;
- int direction;
- char *card_name;
- char *pcm_format;
-
- if (!iface)
- return -EINVAL;
-
- if (cfg->data_type != MOST_CH_SYNC) {
- pr_err("Incompatible channel type\n");
- return -EINVAL;
- }
-
- if (get_channel(iface, channel_id)) {
- pr_err("channel (%s:%d) is already linked\n",
- iface->description, channel_id);
- return -EINVAL;
- }
-
- if (cfg->direction == MOST_CH_TX) {
- playback_count = 1;
- direction = SNDRV_PCM_STREAM_PLAYBACK;
- } else {
- capture_count = 1;
- direction = SNDRV_PCM_STREAM_CAPTURE;
- }
-
- ret = split_arg_list(arg_list, &card_name, &pcm_format);
- if (ret < 0) {
- pr_info("PCM format missing\n");
- return ret;
- }
-
- ret = snd_card_new(NULL, -1, card_name, THIS_MODULE,
- sizeof(*channel), &card);
- if (ret < 0)
- return ret;
-
- channel = card->private_data;
- channel->card = card;
- channel->cfg = cfg;
- channel->iface = iface;
- channel->id = channel_id;
- init_waitqueue_head(&channel->playback_waitq);
-
- if (audio_set_hw_params(&channel->pcm_hardware, pcm_format, cfg))
- goto err_free_card;
-
- snprintf(card->driver, sizeof(card->driver), "%s", DRIVER_NAME);
- snprintf(card->shortname, sizeof(card->shortname), "Microchip MOST:%d",
- card->number);
- snprintf(card->longname, sizeof(card->longname), "%s at %s, ch %d",
- card->shortname, iface->description, channel_id);
-
- ret = snd_pcm_new(card, card_name, 0, playback_count,
- capture_count, &pcm);
- if (ret < 0)
- goto err_free_card;
-
- pcm->private_data = channel;
-
- snd_pcm_set_ops(pcm, direction, &pcm_ops);
-
- ret = snd_card_register(card);
- if (ret < 0)
- goto err_free_card;
-
- list_add_tail(&channel->list, &dev_list);
-
- return 0;
-
-err_free_card:
- snd_card_free(card);
- return ret;
-}
-
-/**
- * audio_disconnect_channel - function to disconnect a channel
- * @iface: pointer to interface instance
- * @channel_id: channel index
- *
- * This frees allocated memory and removes the sound card from ALSA
- *
- * Returns 0 on success or error code otherwise.
- */
-static int audio_disconnect_channel(struct most_interface *iface,
- int channel_id)
-{
- struct channel *channel;
-
- channel = get_channel(iface, channel_id);
- if (!channel) {
- pr_err("sound_disconnect_channel(), invalid channel %d\n",
- channel_id);
- return -EINVAL;
- }
-
- list_del(&channel->list);
- snd_card_free(channel->card);
-
- return 0;
-}
-
-/**
- * audio_rx_completion - completion handler for rx channels
- * @mbo: pointer to buffer object that has completed
- *
- * This searches for the channel this MBO belongs to and copy the data from MBO
- * to ring buffer
- *
- * Returns 0 on success or error code otherwise.
- */
-static int audio_rx_completion(struct mbo *mbo)
-{
- struct channel *channel = get_channel(mbo->ifp, mbo->hdm_channel_id);
- bool period_elapsed = false;
-
- if (!channel) {
- pr_err("sound_rx_completion(), invalid channel %d\n",
- mbo->hdm_channel_id);
- return -EINVAL;
- }
-
- if (channel->is_stream_running)
- period_elapsed = copy_data(channel, mbo);
-
- most_put_mbo(mbo);
-
- if (period_elapsed)
- snd_pcm_period_elapsed(channel->substream);
-
- return 0;
-}
-
-/**
- * audio_tx_completion - completion handler for tx channels
- * @iface: pointer to interface instance
- * @channel_id: channel index/ID
- *
- * This searches the channel that belongs to this combination of interface
- * pointer and channel ID and wakes a process sitting in the wait queue of
- * this channel.
- *
- * Returns 0 on success or error code otherwise.
- */
-static int audio_tx_completion(struct most_interface *iface, int channel_id)
-{
- struct channel *channel = get_channel(iface, channel_id);
-
- if (!channel) {
- pr_err("sound_tx_completion(), invalid channel %d\n",
- channel_id);
- return -EINVAL;
- }
-
- wake_up_interruptible(&channel->playback_waitq);
-
- return 0;
-}
-
-/**
- * Initialization of the struct most_aim
- */
-static struct most_aim audio_aim = {
- .name = DRIVER_NAME,
- .probe_channel = audio_probe_channel,
- .disconnect_channel = audio_disconnect_channel,
- .rx_completion = audio_rx_completion,
- .tx_completion = audio_tx_completion,
-};
-
-static int __init audio_init(void)
-{
- pr_info("init()\n");
-
- INIT_LIST_HEAD(&dev_list);
-
- return most_register_aim(&audio_aim);
-}
-
-static void __exit audio_exit(void)
-{
- struct channel *channel, *tmp;
-
- pr_info("exit()\n");
-
- list_for_each_entry_safe(channel, tmp, &dev_list, list) {
- list_del(&channel->list);
- snd_card_free(channel->card);
- }
-
- most_deregister_aim(&audio_aim);
-}
-
-module_init(audio_init);
-module_exit(audio_exit);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Christian Gromm <christian.gromm@microchip.com>");
-MODULE_DESCRIPTION("Audio Application Interface Module for MostCore");
diff --git a/drivers/staging/most/aim-v4l2/Kconfig b/drivers/staging/most/aim-v4l2/Kconfig
deleted file mode 100644
index d70eaaf0936c..000000000000
--- a/drivers/staging/most/aim-v4l2/Kconfig
+++ /dev/null
@@ -1,12 +0,0 @@
-#
-# MOST V4L2 configuration
-#
-
-config AIM_V4L2
- tristate "V4L2 AIM"
- depends on VIDEO_V4L2
- ---help---
- Say Y here if you want to commumicate via Video 4 Linux.
-
- To compile this driver as a module, choose M here: the
- module will be called aim_v4l2. \ No newline at end of file
diff --git a/drivers/staging/most/aim-v4l2/Makefile b/drivers/staging/most/aim-v4l2/Makefile
deleted file mode 100644
index 69a7524b466c..000000000000
--- a/drivers/staging/most/aim-v4l2/Makefile
+++ /dev/null
@@ -1,5 +0,0 @@
-obj-$(CONFIG_AIM_V4L2) += aim_v4l2.o
-
-aim_v4l2-objs := video.o
-
-ccflags-y += -Idrivers/staging/most/mostcore/
diff --git a/drivers/staging/most/aim-v4l2/video.c b/drivers/staging/most/aim-v4l2/video.c
deleted file mode 100644
index 345a82437311..000000000000
--- a/drivers/staging/most/aim-v4l2/video.c
+++ /dev/null
@@ -1,637 +0,0 @@
-/*
- * V4L2 AIM - V4L2 Application Interface Module for MostCore
- *
- * Copyright (C) 2015, Microchip Technology Germany II GmbH & Co. KG
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * This file is licensed under GPLv2.
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/init.h>
-#include <linux/device.h>
-#include <linux/suspend.h>
-#include <linux/videodev2.h>
-#include <linux/mutex.h>
-#include <media/v4l2-common.h>
-#include <media/v4l2-ioctl.h>
-#include <media/v4l2-event.h>
-#include <media/v4l2-device.h>
-#include <media/v4l2-ctrls.h>
-#include <media/v4l2-fh.h>
-
-#include "mostcore.h"
-
-
-#define V4L2_AIM_MAX_INPUT 1
-
-static struct most_aim aim_info;
-
-struct most_video_dev {
- struct most_interface *iface;
- int ch_idx;
- struct list_head list;
- bool mute;
-
- struct list_head pending_mbos;
- spinlock_t list_lock;
-
- struct v4l2_device v4l2_dev;
- atomic_t access_ref;
- struct video_device *vdev;
- unsigned int ctrl_input;
-
- struct mutex lock;
-
- wait_queue_head_t wait_data;
-};
-
-struct aim_fh {
- /* must be the first field of this struct! */
- struct v4l2_fh fh;
- struct most_video_dev *mdev;
- u32 offs;
-};
-
-
-static struct list_head video_devices = LIST_HEAD_INIT(video_devices);
-static struct spinlock list_lock;
-
-
-static inline bool data_ready(struct most_video_dev *mdev)
-{
- return !list_empty(&mdev->pending_mbos);
-}
-
-static inline struct mbo *get_top_mbo(struct most_video_dev *mdev)
-{
- return list_first_entry(&mdev->pending_mbos, struct mbo, list);
-}
-
-
-static int aim_vdev_open(struct file *filp)
-{
- int ret;
- struct video_device *vdev = video_devdata(filp);
- struct most_video_dev *mdev = video_drvdata(filp);
- struct aim_fh *fh;
-
- pr_info("aim_vdev_open()\n");
-
- switch (vdev->vfl_type) {
- case VFL_TYPE_GRABBER:
- break;
- default:
- return -EINVAL;
- }
-
- fh = kzalloc(sizeof(struct aim_fh), GFP_KERNEL);
- if (!fh)
- return -ENOMEM;
-
- if (!atomic_inc_and_test(&mdev->access_ref)) {
- pr_err("too many clients\n");
- ret = -EBUSY;
- goto err_dec;
- }
-
- fh->mdev = mdev;
- v4l2_fh_init(&fh->fh, vdev);
- filp->private_data = fh;
-
- v4l2_fh_add(&fh->fh);
-
- ret = most_start_channel(mdev->iface, mdev->ch_idx, &aim_info);
- if (ret) {
- pr_err("most_start_channel() failed\n");
- goto err_rm;
- }
-
- return 0;
-
-err_rm:
- v4l2_fh_del(&fh->fh);
- v4l2_fh_exit(&fh->fh);
-
-err_dec:
- atomic_dec(&mdev->access_ref);
- kfree(fh);
- return ret;
-}
-
-static int aim_vdev_close(struct file *filp)
-{
- struct aim_fh *fh = filp->private_data;
- struct most_video_dev *mdev = fh->mdev;
- struct mbo *mbo, *tmp;
-
- pr_info("aim_vdev_close()\n");
-
- /*
- * We need to put MBOs back before we call most_stop_channel()
- * to deallocate MBOs.
- * From the other hand mostcore still calling rx_completion()
- * to deliver MBOs until most_stop_channel() is called.
- * Use mute to work around this issue.
- * This must be implemented in core.
- */
-
- spin_lock(&mdev->list_lock);
- mdev->mute = true;
- list_for_each_entry_safe(mbo, tmp, &mdev->pending_mbos, list) {
- list_del(&mbo->list);
- spin_unlock(&mdev->list_lock);
- most_put_mbo(mbo);
- spin_lock(&mdev->list_lock);
- }
- spin_unlock(&mdev->list_lock);
- most_stop_channel(mdev->iface, mdev->ch_idx, &aim_info);
- mdev->mute = false;
-
- v4l2_fh_del(&fh->fh);
- v4l2_fh_exit(&fh->fh);
-
- atomic_dec(&mdev->access_ref);
- kfree(fh);
- return 0;
-}
-
-static ssize_t aim_vdev_read(struct file *filp, char __user *buf,
- size_t count, loff_t *pos)
-{
- struct aim_fh *fh = filp->private_data;
- struct most_video_dev *mdev = fh->mdev;
- int ret = 0;
-
- if (*pos)
- return -ESPIPE;
-
- if (!mdev)
- return -ENODEV;
-
- /* wait for the first buffer */
- if (!(filp->f_flags & O_NONBLOCK)) {
- if (wait_event_interruptible(mdev->wait_data, data_ready(mdev)))
- return -ERESTARTSYS;
- }
-
- if (!data_ready(mdev))
- return -EAGAIN;
-
- while (count > 0 && data_ready(mdev)) {
- struct mbo *const mbo = get_top_mbo(mdev);
- int const rem = mbo->processed_length - fh->offs;
- int const cnt = rem < count ? rem : count;
-
- if (copy_to_user(buf, mbo->virt_address + fh->offs, cnt)) {
- pr_err("read: copy_to_user failed\n");
- if (!ret)
- ret = -EFAULT;
- return ret;
- }
-
- fh->offs += cnt;
- count -= cnt;
- buf += cnt;
- ret += cnt;
-
- if (cnt >= rem) {
- fh->offs = 0;
- spin_lock(&mdev->list_lock);
- list_del(&mbo->list);
- spin_unlock(&mdev->list_lock);
- most_put_mbo(mbo);
- }
- }
- return ret;
-}
-
-static unsigned int aim_vdev_poll(struct file *filp, poll_table *wait)
-{
- struct aim_fh *fh = filp->private_data;
- struct most_video_dev *mdev = fh->mdev;
- unsigned int mask = 0;
-
- /* only wait if no data is available */
- if (!data_ready(mdev))
- poll_wait(filp, &mdev->wait_data, wait);
- if (data_ready(mdev))
- mask |= POLLIN | POLLRDNORM;
-
- return mask;
-}
-
-static void aim_set_format_struct(struct v4l2_format *f)
-{
- f->fmt.pix.width = 8;
- f->fmt.pix.height = 8;
- f->fmt.pix.pixelformat = V4L2_PIX_FMT_MPEG;
- f->fmt.pix.bytesperline = 0;
- f->fmt.pix.sizeimage = 188 * 2;
- f->fmt.pix.colorspace = V4L2_COLORSPACE_REC709;
- f->fmt.pix.field = V4L2_FIELD_NONE;
- f->fmt.pix.priv = 0;
-}
-
-static int aim_set_format(struct most_video_dev *mdev, unsigned int cmd,
- struct v4l2_format *format)
-{
-#if 0
- u32 const pixfmt = format->fmt.pix.pixelformat;
- const char *fmt;
-
- if (pixfmt != V4L2_PIX_FMT_MPEG) {
- if (cmd == VIDIOC_TRY_FMT)
- fmt = KERN_ERR "try %c%c%c%c failed\n";
- else
- fmt = KERN_ERR "set %c%c%c%c failed\n";
- } else {
- if (cmd == VIDIOC_TRY_FMT)
- fmt = KERN_ERR "try %c%c%c%c\n";
- else
- fmt = KERN_ERR "set %c%c%c%c\n";
- }
- printk(fmt,
- (pixfmt) & 255,
- (pixfmt >> 8) & 255,
- (pixfmt >> 16) & 255,
- (pixfmt >> 24) & 255);
-#endif
-
- if (format->fmt.pix.pixelformat != V4L2_PIX_FMT_MPEG)
- return -EINVAL;
-
- if (cmd == VIDIOC_TRY_FMT)
- return 0;
-
- aim_set_format_struct(format);
-
- return 0;
-}
-
-
-static int vidioc_querycap(struct file *file, void *priv,
- struct v4l2_capability *cap)
-{
- struct aim_fh *fh = priv;
- struct most_video_dev *mdev = fh->mdev;
-
- pr_info("vidioc_querycap()\n");
-
- strlcpy(cap->driver, "v4l2_most_aim", sizeof(cap->driver));
- strlcpy(cap->card, "my_card", sizeof(cap->card));
- snprintf(cap->bus_info, sizeof(cap->bus_info),
- "%s", mdev->iface->description);
-
- cap->capabilities =
- V4L2_CAP_READWRITE |
- V4L2_CAP_TUNER |
- V4L2_CAP_VIDEO_CAPTURE;
- return 0;
-}
-
-static int vidioc_enum_fmt_vid_cap(struct file *file, void *priv,
- struct v4l2_fmtdesc *f)
-{
- pr_info("vidioc_enum_fmt_vid_cap() %d\n", f->index);
-
- if (f->index)
- return -EINVAL;
-
- strcpy(f->description, "MPEG");
- f->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
- f->flags = V4L2_FMT_FLAG_COMPRESSED;
- f->pixelformat = V4L2_PIX_FMT_MPEG;
-
- return 0;
-}
-
-static int vidioc_g_fmt_vid_cap(struct file *file, void *priv,
- struct v4l2_format *f)
-{
- pr_info("vidioc_g_fmt_vid_cap()\n");
-
- aim_set_format_struct(f);
- return 0;
-}
-
-static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
- struct v4l2_format *f)
-{
- struct aim_fh *fh = priv;
- struct most_video_dev *mdev = fh->mdev;
-
- return aim_set_format(mdev, VIDIOC_TRY_FMT, f);
-}
-
-static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
- struct v4l2_format *f)
-{
- struct aim_fh *fh = priv;
- struct most_video_dev *mdev = fh->mdev;
-
- return aim_set_format(mdev, VIDIOC_S_FMT, f);
-}
-
-static int vidioc_g_std(struct file *file, void *priv, v4l2_std_id *norm)
-{
- pr_info("vidioc_g_std()\n");
-
- *norm = V4L2_STD_UNKNOWN;
- return 0;
-}
-
-static int vidioc_enum_input(struct file *file, void *priv,
- struct v4l2_input *input)
-{
- struct aim_fh *fh = priv;
- struct most_video_dev *mdev = fh->mdev;
-
- if (input->index >= V4L2_AIM_MAX_INPUT)
- return -EINVAL;
-
- strcpy(input->name, "MOST Video");
- input->type |= V4L2_INPUT_TYPE_CAMERA;
- input->audioset = 0;
-
- input->std = mdev->vdev->tvnorms;
-
- return 0;
-}
-
-static int vidioc_g_input(struct file *file, void *priv, unsigned int *i)
-{
- struct aim_fh *fh = priv;
- struct most_video_dev *mdev = fh->mdev;
- *i = mdev->ctrl_input;
- return 0;
-}
-
-static int vidioc_s_input(struct file *file, void *priv, unsigned int index)
-{
- struct aim_fh *fh = priv;
- struct most_video_dev *mdev = fh->mdev;
-
- pr_info("vidioc_s_input(%d)\n", index);
-
- if (index >= V4L2_AIM_MAX_INPUT)
- return -EINVAL;
- mdev->ctrl_input = index;
- return 0;
-}
-
-static struct v4l2_file_operations aim_fops = {
- .owner = THIS_MODULE,
- .open = aim_vdev_open,
- .release = aim_vdev_close,
- .read = aim_vdev_read,
- .poll = aim_vdev_poll,
- .unlocked_ioctl = video_ioctl2,
-};
-
-static const struct v4l2_ioctl_ops video_ioctl_ops = {
- .vidioc_querycap = vidioc_querycap,
- .vidioc_enum_fmt_vid_cap = vidioc_enum_fmt_vid_cap,
- .vidioc_g_fmt_vid_cap = vidioc_g_fmt_vid_cap,
- .vidioc_try_fmt_vid_cap = vidioc_try_fmt_vid_cap,
- .vidioc_s_fmt_vid_cap = vidioc_s_fmt_vid_cap,
- .vidioc_g_std = vidioc_g_std,
- .vidioc_enum_input = vidioc_enum_input,
- .vidioc_g_input = vidioc_g_input,
- .vidioc_s_input = vidioc_s_input,
-};
-
-static const struct video_device aim_videodev_template = {
- .fops = &aim_fops,
- .release = video_device_release,
- .ioctl_ops = &video_ioctl_ops,
- .tvnorms = V4L2_STD_UNKNOWN,
-};
-
-/**************************************************************************/
-
-static struct most_video_dev *get_aim_dev(
- struct most_interface *iface, int channel_idx)
-{
- struct most_video_dev *mdev, *tmp;
-
- spin_lock(&list_lock);
- list_for_each_entry_safe(mdev, tmp, &video_devices, list) {
- if (mdev->iface == iface && mdev->ch_idx == channel_idx) {
- spin_unlock(&list_lock);
- return mdev;
- }
- }
- spin_unlock(&list_lock);
- return NULL;
-}
-
-static int aim_rx_data(struct mbo *mbo)
-{
- struct most_video_dev *mdev =
- get_aim_dev(mbo->ifp, mbo->hdm_channel_id);
-
- if (!mdev)
- return -EIO;
-
- spin_lock(&mdev->list_lock);
- if (unlikely(mdev->mute)) {
- spin_unlock(&mdev->list_lock);
- return -EIO;
- }
-
- list_add_tail(&mbo->list, &mdev->pending_mbos);
- spin_unlock(&mdev->list_lock);
- wake_up_interruptible(&mdev->wait_data);
- return 0;
-}
-
-static int aim_register_videodev(struct most_video_dev *mdev)
-{
- int retval = -ENOMEM;
- int ret;
-
- pr_info("aim_register_videodev()\n");
-
- init_waitqueue_head(&mdev->wait_data);
-
- /* allocate and fill v4l2 video struct */
- mdev->vdev = video_device_alloc();
- if (!mdev->vdev)
- return -ENOMEM;
-
- /* Fill the video capture device struct */
- *mdev->vdev = aim_videodev_template;
- mdev->vdev->v4l2_dev = &mdev->v4l2_dev;
- mdev->vdev->lock = &mdev->lock;
- strcpy(mdev->vdev->name, "most v4l2 aim video");
-
- /* Register the v4l2 device */
- video_set_drvdata(mdev->vdev, mdev);
- retval = video_register_device(mdev->vdev, VFL_TYPE_GRABBER, -1);
- if (retval != 0) {
- pr_err("video_register_device failed (%d)\n", retval);
- ret = -ENODEV;
- goto err_vbi_dev;
- }
-
- return 0;
-
-err_vbi_dev:
- video_device_release(mdev->vdev);
- return ret;
-}
-
-static void aim_unregister_videodev(struct most_video_dev *mdev)
-{
- pr_info("aim_unregister_videodev()\n");
-
- video_unregister_device(mdev->vdev);
-}
-
-
-static void aim_v4l2_dev_release(struct v4l2_device *v4l2_dev)
-{
- struct most_video_dev *mdev =
- container_of(v4l2_dev, struct most_video_dev, v4l2_dev);
-
- v4l2_device_unregister(v4l2_dev);
- kfree(mdev);
-}
-
-static int aim_probe_channel(struct most_interface *iface, int channel_idx,
- struct most_channel_config *ccfg,
- struct kobject *parent, char *name)
-{
- int ret;
- struct most_video_dev *mdev = get_aim_dev(iface, channel_idx);
-
- pr_info("aim_probe_channel()\n");
-
- if (mdev) {
- pr_err("channel already linked\n");
- return -EEXIST;
- }
-
- if (ccfg->direction != MOST_CH_RX) {
- pr_err("wrong direction, expect rx\n");
- return -EINVAL;
- }
-
- if (ccfg->data_type != MOST_CH_SYNC &&
- ccfg->data_type != MOST_CH_ISOC_AVP) {
- pr_err("wrong channel type, expect sync or isoc_avp\n");
- return -EINVAL;
- }
-
- mdev = kzalloc(sizeof(*mdev), GFP_KERNEL);
- if (!mdev)
- return -ENOMEM;
-
- mutex_init(&mdev->lock);
- atomic_set(&mdev->access_ref, -1);
- spin_lock_init(&mdev->list_lock);
- INIT_LIST_HEAD(&mdev->pending_mbos);
- mdev->iface = iface;
- mdev->ch_idx = channel_idx;
- mdev->v4l2_dev.release = aim_v4l2_dev_release;
-
- /* Create the v4l2_device */
- strlcpy(mdev->v4l2_dev.name, "most_video_device",
- sizeof(mdev->v4l2_dev.name));
- ret = v4l2_device_register(NULL, &mdev->v4l2_dev);
- if (ret) {
- pr_err("v4l2_device_register() failed\n");
- kfree(mdev);
- return ret;
- }
-
- ret = aim_register_videodev(mdev);
- if (ret)
- goto err_unreg;
-
- spin_lock(&list_lock);
- list_add(&mdev->list, &video_devices);
- spin_unlock(&list_lock);
- return 0;
-
-err_unreg:
- v4l2_device_disconnect(&mdev->v4l2_dev);
- v4l2_device_put(&mdev->v4l2_dev);
- return ret;
-}
-
-static int aim_disconnect_channel(struct most_interface *iface,
- int channel_idx)
-{
- struct most_video_dev *mdev = get_aim_dev(iface, channel_idx);
-
- pr_info("aim_disconnect_channel()\n");
-
- if (!mdev) {
- pr_err("no such channel is linked\n");
- return -ENOENT;
- }
-
- spin_lock(&list_lock);
- list_del(&mdev->list);
- spin_unlock(&list_lock);
-
- aim_unregister_videodev(mdev);
- v4l2_device_disconnect(&mdev->v4l2_dev);
- v4l2_device_put(&mdev->v4l2_dev);
- return 0;
-}
-
-static struct most_aim aim_info = {
- .name = "v4l",
- .probe_channel = aim_probe_channel,
- .disconnect_channel = aim_disconnect_channel,
- .rx_completion = aim_rx_data,
-};
-
-static int __init aim_init(void)
-{
- spin_lock_init(&list_lock);
- return most_register_aim(&aim_info);
-}
-
-static void __exit aim_exit(void)
-{
- struct most_video_dev *mdev, *tmp;
-
- /*
- * As the mostcore currently doesn't call disconnect_channel()
- * for linked channels while we call most_deregister_aim()
- * we simulate this call here.
- * This must be fixed in core.
- */
- spin_lock(&list_lock);
- list_for_each_entry_safe(mdev, tmp, &video_devices, list) {
- list_del(&mdev->list);
- spin_unlock(&list_lock);
-
- aim_unregister_videodev(mdev);
- v4l2_device_disconnect(&mdev->v4l2_dev);
- v4l2_device_put(&mdev->v4l2_dev);
- spin_lock(&list_lock);
- }
- spin_unlock(&list_lock);
-
- most_deregister_aim(&aim_info);
- BUG_ON(!list_empty(&video_devices));
-}
-
-module_init(aim_init);
-module_exit(aim_exit);
-
-MODULE_DESCRIPTION("V4L2 Application Interface Module for MostCore");
-MODULE_AUTHOR("Andrey Shvetsov <andrey.shvetsov@k2l.de>");
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/most/dim2/Kconfig b/drivers/staging/most/dim2/Kconfig
new file mode 100644
index 000000000000..c211f0d9139c
--- /dev/null
+++ b/drivers/staging/most/dim2/Kconfig
@@ -0,0 +1,17 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# MediaLB configuration
+#
+
+config MOST_DIM2
+ tristate "DIM2"
+ depends on HAS_IOMEM && OF
+
+ help
+ Say Y here if you want to connect via MediaLB to network transceiver.
+ This device driver is platform dependent and needs an additional
+ platform driver to be installed. For more information contact
+ maintainer of this driver.
+
+ To compile this driver as a module, choose M here: the
+ module will be called most_dim2.
diff --git a/drivers/staging/most/dim2/Makefile b/drivers/staging/most/dim2/Makefile
new file mode 100644
index 000000000000..5f9612af3fa3
--- /dev/null
+++ b/drivers/staging/most/dim2/Makefile
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_MOST_DIM2) += most_dim2.o
+
+most_dim2-objs := dim2.o hal.o
diff --git a/drivers/staging/most/dim2/dim2.c b/drivers/staging/most/dim2/dim2.c
new file mode 100644
index 000000000000..dad2abe6c0c9
--- /dev/null
+++ b/drivers/staging/most/dim2/dim2.c
@@ -0,0 +1,1105 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * dim2.c - MediaLB DIM2 Hardware Dependent Module
+ *
+ * Copyright (C) 2015-2016, Microchip Technology Germany II GmbH & Co. KG
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/printk.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/dma-mapping.h>
+#include <linux/sched.h>
+#include <linux/kthread.h>
+#include <linux/most.h>
+#include <linux/of.h>
+#include "hal.h"
+#include "errors.h"
+#include "sysfs.h"
+
+#define DMA_CHANNELS (32 - 1) /* channel 0 is a system channel */
+
+#define MAX_BUFFERS_PACKET 32
+#define MAX_BUFFERS_STREAMING 32
+#define MAX_BUF_SIZE_PACKET 2048
+#define MAX_BUF_SIZE_STREAMING (8 * 1024)
+
+/*
+ * The parameter representing the number of frames per sub-buffer for
+ * synchronous channels. Valid values: [0 .. 6].
+ *
+ * The values 0, 1, 2, 3, 4, 5, 6 represent corresponding number of frames per
+ * sub-buffer 1, 2, 4, 8, 16, 32, 64.
+ */
+static u8 fcnt = 4; /* (1 << fcnt) frames per subbuffer */
+module_param(fcnt, byte, 0000);
+MODULE_PARM_DESC(fcnt, "Num of frames per sub-buffer for sync channels as a power of 2");
+
+static DEFINE_SPINLOCK(dim_lock);
+
+/**
+ * struct hdm_channel - private structure to keep channel specific data
+ * @name: channel name
+ * @is_initialized: identifier to know whether the channel is initialized
+ * @ch: HAL specific channel data
+ * @reset_dbr_size: reset DBR data buffer size
+ * @pending_list: list to keep MBO's before starting transfer
+ * @started_list: list to keep MBO's after starting transfer
+ * @direction: channel direction (TX or RX)
+ * @data_type: channel data type
+ */
+struct hdm_channel {
+ char name[sizeof "caNNN"];
+ bool is_initialized;
+ struct dim_channel ch;
+ u16 *reset_dbr_size;
+ struct list_head pending_list; /* before dim_enqueue_buffer() */
+ struct list_head started_list; /* after dim_enqueue_buffer() */
+ enum most_channel_direction direction;
+ enum most_channel_data_type data_type;
+};
+
+/*
+ * struct dim2_hdm - private structure to keep interface specific data
+ * @hch: an array of channel specific data
+ * @most_iface: most interface structure
+ * @capabilities: an array of channel capability data
+ * @io_base: I/O register base address
+ * @netinfo_task: thread to deliver network status
+ * @netinfo_waitq: waitq for the thread to sleep
+ * @deliver_netinfo: to identify whether network status received
+ * @mac_addrs: INIC mac address
+ * @link_state: network link state
+ * @atx_idx: index of async tx channel
+ */
+struct dim2_hdm {
+ struct device dev;
+ struct hdm_channel hch[DMA_CHANNELS];
+ struct most_channel_capability capabilities[DMA_CHANNELS];
+ struct most_interface most_iface;
+ char name[16 + sizeof "dim2-"];
+ void __iomem *io_base;
+ u8 clk_speed;
+ struct clk *clk;
+ struct clk *clk_pll;
+ struct task_struct *netinfo_task;
+ wait_queue_head_t netinfo_waitq;
+ int deliver_netinfo;
+ unsigned char mac_addrs[6];
+ unsigned char link_state;
+ int atx_idx;
+ struct medialb_bus bus;
+ void (*on_netinfo)(struct most_interface *most_iface,
+ unsigned char link_state, unsigned char *addrs);
+ void (*disable_platform)(struct platform_device *pdev);
+};
+
+struct dim2_platform_data {
+ int (*enable)(struct platform_device *pdev);
+ void (*disable)(struct platform_device *pdev);
+ u8 fcnt;
+};
+
+static inline struct dim2_hdm *iface_to_hdm(struct most_interface *iface)
+{
+ return container_of(iface, struct dim2_hdm, most_iface);
+}
+
+/* Macro to identify a network status message */
+#define PACKET_IS_NET_INFO(p) \
+ (((p)[1] == 0x18) && ((p)[2] == 0x05) && ((p)[3] == 0x0C) && \
+ ((p)[13] == 0x3C) && ((p)[14] == 0x00) && ((p)[15] == 0x0A))
+
+static ssize_t state_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ bool state;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dim_lock, flags);
+ state = dim_get_lock_state();
+ spin_unlock_irqrestore(&dim_lock, flags);
+
+ return sysfs_emit(buf, "%s\n", state ? "locked" : "");
+}
+
+static DEVICE_ATTR_RO(state);
+
+static struct attribute *dim2_attrs[] = {
+ &dev_attr_state.attr,
+ NULL,
+};
+
+ATTRIBUTE_GROUPS(dim2);
+
+/**
+ * dimcb_on_error - callback from HAL to report miscommunication between
+ * HDM and HAL
+ * @error_id: Error ID
+ * @error_message: Error message. Some text in a free format
+ */
+void dimcb_on_error(u8 error_id, const char *error_message)
+{
+ pr_err("%s: error_id - %d, error_message - %s\n", __func__, error_id,
+ error_message);
+}
+
+/**
+ * try_start_dim_transfer - try to transfer a buffer on a channel
+ * @hdm_ch: channel specific data
+ *
+ * Transfer a buffer from pending_list if the channel is ready
+ */
+static int try_start_dim_transfer(struct hdm_channel *hdm_ch)
+{
+ u16 buf_size;
+ struct list_head *head = &hdm_ch->pending_list;
+ struct mbo *mbo;
+ unsigned long flags;
+ struct dim_ch_state st;
+
+ BUG_ON(!hdm_ch);
+ BUG_ON(!hdm_ch->is_initialized);
+
+ spin_lock_irqsave(&dim_lock, flags);
+ if (list_empty(head)) {
+ spin_unlock_irqrestore(&dim_lock, flags);
+ return -EAGAIN;
+ }
+
+ if (!dim_get_channel_state(&hdm_ch->ch, &st)->ready) {
+ spin_unlock_irqrestore(&dim_lock, flags);
+ return -EAGAIN;
+ }
+
+ mbo = list_first_entry(head, struct mbo, list);
+ buf_size = mbo->buffer_length;
+
+ if (dim_dbr_space(&hdm_ch->ch) < buf_size) {
+ spin_unlock_irqrestore(&dim_lock, flags);
+ return -EAGAIN;
+ }
+
+ BUG_ON(mbo->bus_address == 0);
+ if (!dim_enqueue_buffer(&hdm_ch->ch, mbo->bus_address, buf_size)) {
+ list_del(head->next);
+ spin_unlock_irqrestore(&dim_lock, flags);
+ mbo->processed_length = 0;
+ mbo->status = MBO_E_INVAL;
+ mbo->complete(mbo);
+ return -EFAULT;
+ }
+
+ list_move_tail(head->next, &hdm_ch->started_list);
+ spin_unlock_irqrestore(&dim_lock, flags);
+
+ return 0;
+}
+
+/**
+ * deliver_netinfo_thread - thread to deliver network status to mostcore
+ * @data: private data
+ *
+ * Wait for network status and deliver it to mostcore once it is received
+ */
+static int deliver_netinfo_thread(void *data)
+{
+ struct dim2_hdm *dev = data;
+
+ while (!kthread_should_stop()) {
+ wait_event_interruptible(dev->netinfo_waitq,
+ dev->deliver_netinfo ||
+ kthread_should_stop());
+
+ if (dev->deliver_netinfo) {
+ dev->deliver_netinfo--;
+ if (dev->on_netinfo) {
+ dev->on_netinfo(&dev->most_iface,
+ dev->link_state,
+ dev->mac_addrs);
+ }
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * retrieve_netinfo - retrieve network status from received buffer
+ * @dev: private data
+ * @mbo: received MBO
+ *
+ * Parse the message in buffer and get node address, link state, MAC address.
+ * Wake up a thread to deliver this status to mostcore
+ */
+static void retrieve_netinfo(struct dim2_hdm *dev, struct mbo *mbo)
+{
+ u8 *data = mbo->virt_address;
+
+ pr_info("Node Address: 0x%03x\n", (u16)data[16] << 8 | data[17]);
+ dev->link_state = data[18];
+ pr_info("NIState: %d\n", dev->link_state);
+ memcpy(dev->mac_addrs, data + 19, 6);
+ dev->deliver_netinfo++;
+ wake_up_interruptible(&dev->netinfo_waitq);
+}
+
+/**
+ * service_done_flag - handle completed buffers
+ * @dev: private data
+ * @ch_idx: channel index
+ *
+ * Return back the completed buffers to mostcore, using completion callback
+ */
+static void service_done_flag(struct dim2_hdm *dev, int ch_idx)
+{
+ struct hdm_channel *hdm_ch = dev->hch + ch_idx;
+ struct dim_ch_state st;
+ struct list_head *head;
+ struct mbo *mbo;
+ int done_buffers;
+ unsigned long flags;
+ u8 *data;
+
+ BUG_ON(!hdm_ch);
+ BUG_ON(!hdm_ch->is_initialized);
+
+ spin_lock_irqsave(&dim_lock, flags);
+
+ done_buffers = dim_get_channel_state(&hdm_ch->ch, &st)->done_buffers;
+ if (!done_buffers) {
+ spin_unlock_irqrestore(&dim_lock, flags);
+ return;
+ }
+
+ if (!dim_detach_buffers(&hdm_ch->ch, done_buffers)) {
+ spin_unlock_irqrestore(&dim_lock, flags);
+ return;
+ }
+ spin_unlock_irqrestore(&dim_lock, flags);
+
+ head = &hdm_ch->started_list;
+
+ while (done_buffers) {
+ spin_lock_irqsave(&dim_lock, flags);
+ if (list_empty(head)) {
+ spin_unlock_irqrestore(&dim_lock, flags);
+ pr_crit("hard error: started_mbo list is empty whereas DIM2 has sent buffers\n");
+ break;
+ }
+
+ mbo = list_first_entry(head, struct mbo, list);
+ list_del(head->next);
+ spin_unlock_irqrestore(&dim_lock, flags);
+
+ data = mbo->virt_address;
+
+ if (hdm_ch->data_type == MOST_CH_ASYNC &&
+ hdm_ch->direction == MOST_CH_RX &&
+ PACKET_IS_NET_INFO(data)) {
+ retrieve_netinfo(dev, mbo);
+
+ spin_lock_irqsave(&dim_lock, flags);
+ list_add_tail(&mbo->list, &hdm_ch->pending_list);
+ spin_unlock_irqrestore(&dim_lock, flags);
+ } else {
+ if (hdm_ch->data_type == MOST_CH_CONTROL ||
+ hdm_ch->data_type == MOST_CH_ASYNC) {
+ u32 const data_size =
+ (u32)data[0] * 256 + data[1] + 2;
+
+ mbo->processed_length =
+ min_t(u32, data_size,
+ mbo->buffer_length);
+ } else {
+ mbo->processed_length = mbo->buffer_length;
+ }
+ mbo->status = MBO_SUCCESS;
+ mbo->complete(mbo);
+ }
+
+ done_buffers--;
+ }
+}
+
+static struct dim_channel **get_active_channels(struct dim2_hdm *dev,
+ struct dim_channel **buffer)
+{
+ int idx = 0;
+ int ch_idx;
+
+ for (ch_idx = 0; ch_idx < DMA_CHANNELS; ch_idx++) {
+ if (dev->hch[ch_idx].is_initialized)
+ buffer[idx++] = &dev->hch[ch_idx].ch;
+ }
+ buffer[idx++] = NULL;
+
+ return buffer;
+}
+
+static irqreturn_t dim2_mlb_isr(int irq, void *_dev)
+{
+ struct dim2_hdm *dev = _dev;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dim_lock, flags);
+ dim_service_mlb_int_irq();
+ spin_unlock_irqrestore(&dim_lock, flags);
+
+ if (dev->atx_idx >= 0 && dev->hch[dev->atx_idx].is_initialized)
+ while (!try_start_dim_transfer(dev->hch + dev->atx_idx))
+ continue;
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t dim2_task_irq(int irq, void *_dev)
+{
+ struct dim2_hdm *dev = _dev;
+ unsigned long flags;
+ int ch_idx;
+
+ for (ch_idx = 0; ch_idx < DMA_CHANNELS; ch_idx++) {
+ if (!dev->hch[ch_idx].is_initialized)
+ continue;
+
+ spin_lock_irqsave(&dim_lock, flags);
+ dim_service_channel(&dev->hch[ch_idx].ch);
+ spin_unlock_irqrestore(&dim_lock, flags);
+
+ service_done_flag(dev, ch_idx);
+ while (!try_start_dim_transfer(dev->hch + ch_idx))
+ continue;
+ }
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * dim2_ahb_isr - interrupt service routine
+ * @irq: irq number
+ * @_dev: private data
+ *
+ * Acknowledge the interrupt and service each initialized channel,
+ * if needed, in task context.
+ */
+static irqreturn_t dim2_ahb_isr(int irq, void *_dev)
+{
+ struct dim2_hdm *dev = _dev;
+ struct dim_channel *buffer[DMA_CHANNELS + 1];
+ unsigned long flags;
+
+ spin_lock_irqsave(&dim_lock, flags);
+ dim_service_ahb_int_irq(get_active_channels(dev, buffer));
+ spin_unlock_irqrestore(&dim_lock, flags);
+
+ return IRQ_WAKE_THREAD;
+}
+
+/**
+ * complete_all_mbos - complete MBO's in a list
+ * @head: list head
+ *
+ * Delete all the entries in list and return back MBO's to mostcore using
+ * completion call back.
+ */
+static void complete_all_mbos(struct list_head *head)
+{
+ unsigned long flags;
+ struct mbo *mbo;
+
+ for (;;) {
+ spin_lock_irqsave(&dim_lock, flags);
+ if (list_empty(head)) {
+ spin_unlock_irqrestore(&dim_lock, flags);
+ break;
+ }
+
+ mbo = list_first_entry(head, struct mbo, list);
+ list_del(head->next);
+ spin_unlock_irqrestore(&dim_lock, flags);
+
+ mbo->processed_length = 0;
+ mbo->status = MBO_E_CLOSE;
+ mbo->complete(mbo);
+ }
+}
+
+/**
+ * configure_channel - initialize a channel
+ * @most_iface: interface the channel belongs to
+ * @ch_idx: channel index to be configured
+ * @ccfg: structure that holds the configuration information
+ *
+ * Receives configuration information from mostcore and initialize
+ * the corresponding channel. Return 0 on success, negative on failure.
+ */
+static int configure_channel(struct most_interface *most_iface, int ch_idx,
+ struct most_channel_config *ccfg)
+{
+ struct dim2_hdm *dev = iface_to_hdm(most_iface);
+ bool const is_tx = ccfg->direction == MOST_CH_TX;
+ u16 const sub_size = ccfg->subbuffer_size;
+ u16 const buf_size = ccfg->buffer_size;
+ u16 new_size;
+ unsigned long flags;
+ u8 hal_ret;
+ int const ch_addr = ch_idx * 2 + 2;
+ struct hdm_channel *const hdm_ch = dev->hch + ch_idx;
+
+ BUG_ON(ch_idx < 0 || ch_idx >= DMA_CHANNELS);
+
+ if (hdm_ch->is_initialized)
+ return -EPERM;
+
+ /* do not reset if the property was set by user, see poison_channel */
+ hdm_ch->reset_dbr_size = ccfg->dbr_size ? NULL : &ccfg->dbr_size;
+
+ /* zero value is default dbr_size, see dim2 hal */
+ hdm_ch->ch.dbr_size = ccfg->dbr_size;
+
+ switch (ccfg->data_type) {
+ case MOST_CH_CONTROL:
+ new_size = dim_norm_ctrl_async_buffer_size(buf_size);
+ if (new_size == 0) {
+ pr_err("%s: too small buffer size\n", hdm_ch->name);
+ return -EINVAL;
+ }
+ ccfg->buffer_size = new_size;
+ if (new_size != buf_size)
+ pr_warn("%s: fixed buffer size (%d -> %d)\n",
+ hdm_ch->name, buf_size, new_size);
+ spin_lock_irqsave(&dim_lock, flags);
+ hal_ret = dim_init_control(&hdm_ch->ch, is_tx, ch_addr,
+ is_tx ? new_size * 2 : new_size);
+ break;
+ case MOST_CH_ASYNC:
+ new_size = dim_norm_ctrl_async_buffer_size(buf_size);
+ if (new_size == 0) {
+ pr_err("%s: too small buffer size\n", hdm_ch->name);
+ return -EINVAL;
+ }
+ ccfg->buffer_size = new_size;
+ if (new_size != buf_size)
+ pr_warn("%s: fixed buffer size (%d -> %d)\n",
+ hdm_ch->name, buf_size, new_size);
+ spin_lock_irqsave(&dim_lock, flags);
+ hal_ret = dim_init_async(&hdm_ch->ch, is_tx, ch_addr,
+ is_tx ? new_size * 2 : new_size);
+ break;
+ case MOST_CH_ISOC:
+ new_size = dim_norm_isoc_buffer_size(buf_size, sub_size);
+ if (new_size == 0) {
+ pr_err("%s: invalid sub-buffer size or too small buffer size\n",
+ hdm_ch->name);
+ return -EINVAL;
+ }
+ ccfg->buffer_size = new_size;
+ if (new_size != buf_size)
+ pr_warn("%s: fixed buffer size (%d -> %d)\n",
+ hdm_ch->name, buf_size, new_size);
+ spin_lock_irqsave(&dim_lock, flags);
+ hal_ret = dim_init_isoc(&hdm_ch->ch, is_tx, ch_addr, sub_size);
+ break;
+ case MOST_CH_SYNC:
+ new_size = dim_norm_sync_buffer_size(buf_size, sub_size);
+ if (new_size == 0) {
+ pr_err("%s: invalid sub-buffer size or too small buffer size\n",
+ hdm_ch->name);
+ return -EINVAL;
+ }
+ ccfg->buffer_size = new_size;
+ if (new_size != buf_size)
+ pr_warn("%s: fixed buffer size (%d -> %d)\n",
+ hdm_ch->name, buf_size, new_size);
+ spin_lock_irqsave(&dim_lock, flags);
+ hal_ret = dim_init_sync(&hdm_ch->ch, is_tx, ch_addr, sub_size);
+ break;
+ default:
+ pr_err("%s: configure failed, bad channel type: %d\n",
+ hdm_ch->name, ccfg->data_type);
+ return -EINVAL;
+ }
+
+ if (hal_ret != DIM_NO_ERROR) {
+ spin_unlock_irqrestore(&dim_lock, flags);
+ pr_err("%s: configure failed (%d), type: %d, is_tx: %d\n",
+ hdm_ch->name, hal_ret, ccfg->data_type, (int)is_tx);
+ return -ENODEV;
+ }
+
+ hdm_ch->data_type = ccfg->data_type;
+ hdm_ch->direction = ccfg->direction;
+ hdm_ch->is_initialized = true;
+
+ if (hdm_ch->data_type == MOST_CH_ASYNC &&
+ hdm_ch->direction == MOST_CH_TX &&
+ dev->atx_idx < 0)
+ dev->atx_idx = ch_idx;
+
+ spin_unlock_irqrestore(&dim_lock, flags);
+ ccfg->dbr_size = hdm_ch->ch.dbr_size;
+
+ return 0;
+}
+
+/**
+ * enqueue - enqueue a buffer for data transfer
+ * @most_iface: intended interface
+ * @ch_idx: ID of the channel the buffer is intended for
+ * @mbo: pointer to the buffer object
+ *
+ * Push the buffer into pending_list and try to transfer one buffer from
+ * pending_list. Return 0 on success, negative on failure.
+ */
+static int enqueue(struct most_interface *most_iface, int ch_idx,
+ struct mbo *mbo)
+{
+ struct dim2_hdm *dev = iface_to_hdm(most_iface);
+ struct hdm_channel *hdm_ch = dev->hch + ch_idx;
+ unsigned long flags;
+
+ BUG_ON(ch_idx < 0 || ch_idx >= DMA_CHANNELS);
+
+ if (!hdm_ch->is_initialized)
+ return -EPERM;
+
+ if (mbo->bus_address == 0)
+ return -EFAULT;
+
+ spin_lock_irqsave(&dim_lock, flags);
+ list_add_tail(&mbo->list, &hdm_ch->pending_list);
+ spin_unlock_irqrestore(&dim_lock, flags);
+
+ (void)try_start_dim_transfer(hdm_ch);
+
+ return 0;
+}
+
+/**
+ * request_netinfo - triggers retrieving of network info
+ * @most_iface: pointer to the interface
+ * @ch_idx: corresponding channel ID
+ * @on_netinfo: call-back used to deliver network status to mostcore
+ *
+ * Send a command to INIC which triggers retrieving of network info by means of
+ * "Message exchange over MDP/MEP". Return 0 on success, negative on failure.
+ */
+static void request_netinfo(struct most_interface *most_iface, int ch_idx,
+ void (*on_netinfo)(struct most_interface *,
+ unsigned char, unsigned char *))
+{
+ struct dim2_hdm *dev = iface_to_hdm(most_iface);
+ struct mbo *mbo;
+ u8 *data;
+
+ dev->on_netinfo = on_netinfo;
+ if (!on_netinfo)
+ return;
+
+ if (dev->atx_idx < 0) {
+ pr_err("Async Tx Not initialized\n");
+ return;
+ }
+
+ mbo = most_get_mbo(&dev->most_iface, dev->atx_idx, NULL);
+ if (!mbo)
+ return;
+
+ mbo->buffer_length = 5;
+
+ data = mbo->virt_address;
+
+ data[0] = 0x00; /* PML High byte */
+ data[1] = 0x03; /* PML Low byte */
+ data[2] = 0x02; /* PMHL */
+ data[3] = 0x08; /* FPH */
+ data[4] = 0x40; /* FMF (FIFO cmd msg - Triggers NAOverMDP) */
+
+ most_submit_mbo(mbo);
+}
+
+/**
+ * poison_channel - poison buffers of a channel
+ * @most_iface: pointer to the interface the channel to be poisoned belongs to
+ * @ch_idx: corresponding channel ID
+ *
+ * Destroy a channel and complete all the buffers in both started_list &
+ * pending_list. Return 0 on success, negative on failure.
+ */
+static int poison_channel(struct most_interface *most_iface, int ch_idx)
+{
+ struct dim2_hdm *dev = iface_to_hdm(most_iface);
+ struct hdm_channel *hdm_ch = dev->hch + ch_idx;
+ unsigned long flags;
+ u8 hal_ret;
+ int ret = 0;
+
+ BUG_ON(ch_idx < 0 || ch_idx >= DMA_CHANNELS);
+
+ if (!hdm_ch->is_initialized)
+ return -EPERM;
+
+ spin_lock_irqsave(&dim_lock, flags);
+ hal_ret = dim_destroy_channel(&hdm_ch->ch);
+ hdm_ch->is_initialized = false;
+ if (ch_idx == dev->atx_idx)
+ dev->atx_idx = -1;
+ spin_unlock_irqrestore(&dim_lock, flags);
+ if (hal_ret != DIM_NO_ERROR) {
+ pr_err("HAL Failed to close channel %s\n", hdm_ch->name);
+ ret = -EFAULT;
+ }
+
+ complete_all_mbos(&hdm_ch->started_list);
+ complete_all_mbos(&hdm_ch->pending_list);
+ if (hdm_ch->reset_dbr_size)
+ *hdm_ch->reset_dbr_size = 0;
+
+ return ret;
+}
+
+static void *dma_alloc(struct mbo *mbo, u32 size)
+{
+ struct device *dev = mbo->ifp->driver_dev;
+
+ return dma_alloc_coherent(dev, size, &mbo->bus_address, GFP_KERNEL);
+}
+
+static void dma_free(struct mbo *mbo, u32 size)
+{
+ struct device *dev = mbo->ifp->driver_dev;
+
+ dma_free_coherent(dev, size, mbo->virt_address, mbo->bus_address);
+}
+
+static const struct of_device_id dim2_of_match[];
+
+static struct {
+ const char *clock_speed;
+ u8 clk_speed;
+} clk_mt[] = {
+ { "256fs", CLK_256FS },
+ { "512fs", CLK_512FS },
+ { "1024fs", CLK_1024FS },
+ { "2048fs", CLK_2048FS },
+ { "3072fs", CLK_3072FS },
+ { "4096fs", CLK_4096FS },
+ { "6144fs", CLK_6144FS },
+ { "8192fs", CLK_8192FS },
+};
+
+/**
+ * get_dim2_clk_speed - converts string to DIM2 clock speed value
+ *
+ * @clock_speed: string in the format "{NUMBER}fs"
+ * @val: pointer to get one of the CLK_{NUMBER}FS values
+ *
+ * By success stores one of the CLK_{NUMBER}FS in the *val and returns 0,
+ * otherwise returns -EINVAL.
+ */
+static int get_dim2_clk_speed(const char *clock_speed, u8 *val)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(clk_mt); i++) {
+ if (!strcmp(clock_speed, clk_mt[i].clock_speed)) {
+ *val = clk_mt[i].clk_speed;
+ return 0;
+ }
+ }
+ return -EINVAL;
+}
+
+static void dim2_release(struct device *d)
+{
+ struct dim2_hdm *dev = container_of(d, struct dim2_hdm, dev);
+ unsigned long flags;
+
+ kthread_stop(dev->netinfo_task);
+
+ spin_lock_irqsave(&dim_lock, flags);
+ dim_shutdown();
+ spin_unlock_irqrestore(&dim_lock, flags);
+
+ if (dev->disable_platform)
+ dev->disable_platform(to_platform_device(d->parent));
+
+ kfree(dev);
+}
+
+/*
+ * dim2_probe - dim2 probe handler
+ * @pdev: platform device structure
+ *
+ * Register the dim2 interface with mostcore and initialize it.
+ * Return 0 on success, negative on failure.
+ */
+static int dim2_probe(struct platform_device *pdev)
+{
+ const struct dim2_platform_data *pdata;
+ const struct of_device_id *of_id;
+ const char *clock_speed;
+ struct dim2_hdm *dev;
+ struct resource *res;
+ int ret, i;
+ u8 hal_ret;
+ u8 dev_fcnt = fcnt;
+ int irq;
+
+ enum { MLB_INT_IDX, AHB0_INT_IDX };
+
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (!dev)
+ return -ENOMEM;
+
+ dev->atx_idx = -1;
+
+ platform_set_drvdata(pdev, dev);
+
+ ret = of_property_read_string(pdev->dev.of_node,
+ "microchip,clock-speed", &clock_speed);
+ if (ret) {
+ dev_err(&pdev->dev, "missing dt property clock-speed\n");
+ goto err_free_dev;
+ }
+
+ ret = get_dim2_clk_speed(clock_speed, &dev->clk_speed);
+ if (ret) {
+ dev_err(&pdev->dev, "bad dt property clock-speed\n");
+ goto err_free_dev;
+ }
+
+ dev->io_base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
+ if (IS_ERR(dev->io_base)) {
+ ret = PTR_ERR(dev->io_base);
+ goto err_free_dev;
+ }
+
+ of_id = of_match_node(dim2_of_match, pdev->dev.of_node);
+ pdata = of_id->data;
+ if (pdata) {
+ if (pdata->enable) {
+ ret = pdata->enable(pdev);
+ if (ret)
+ goto err_free_dev;
+ }
+ dev->disable_platform = pdata->disable;
+ if (pdata->fcnt)
+ dev_fcnt = pdata->fcnt;
+ }
+
+ dev_info(&pdev->dev, "sync: num of frames per sub-buffer: %u\n",
+ dev_fcnt);
+ hal_ret = dim_startup(dev->io_base, dev->clk_speed, dev_fcnt);
+ if (hal_ret != DIM_NO_ERROR) {
+ dev_err(&pdev->dev, "dim_startup failed: %d\n", hal_ret);
+ ret = -ENODEV;
+ goto err_disable_platform;
+ }
+
+ irq = platform_get_irq(pdev, AHB0_INT_IDX);
+ if (irq < 0) {
+ ret = irq;
+ goto err_shutdown_dim;
+ }
+
+ ret = devm_request_threaded_irq(&pdev->dev, irq, dim2_ahb_isr,
+ dim2_task_irq, 0, "dim2_ahb0_int", dev);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to request ahb0_int irq %d\n", irq);
+ goto err_shutdown_dim;
+ }
+
+ irq = platform_get_irq(pdev, MLB_INT_IDX);
+ if (irq < 0) {
+ ret = irq;
+ goto err_shutdown_dim;
+ }
+
+ ret = devm_request_irq(&pdev->dev, irq, dim2_mlb_isr, 0,
+ "dim2_mlb_int", dev);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to request mlb_int irq %d\n", irq);
+ goto err_shutdown_dim;
+ }
+
+ init_waitqueue_head(&dev->netinfo_waitq);
+ dev->deliver_netinfo = 0;
+ dev->netinfo_task = kthread_run(&deliver_netinfo_thread, dev,
+ "dim2_netinfo");
+ if (IS_ERR(dev->netinfo_task)) {
+ ret = PTR_ERR(dev->netinfo_task);
+ goto err_shutdown_dim;
+ }
+
+ for (i = 0; i < DMA_CHANNELS; i++) {
+ struct most_channel_capability *cap = dev->capabilities + i;
+ struct hdm_channel *hdm_ch = dev->hch + i;
+
+ INIT_LIST_HEAD(&hdm_ch->pending_list);
+ INIT_LIST_HEAD(&hdm_ch->started_list);
+ hdm_ch->is_initialized = false;
+ snprintf(hdm_ch->name, sizeof(hdm_ch->name), "ca%d", i * 2 + 2);
+
+ cap->name_suffix = hdm_ch->name;
+ cap->direction = MOST_CH_RX | MOST_CH_TX;
+ cap->data_type = MOST_CH_CONTROL | MOST_CH_ASYNC |
+ MOST_CH_ISOC | MOST_CH_SYNC;
+ cap->num_buffers_packet = MAX_BUFFERS_PACKET;
+ cap->buffer_size_packet = MAX_BUF_SIZE_PACKET;
+ cap->num_buffers_streaming = MAX_BUFFERS_STREAMING;
+ cap->buffer_size_streaming = MAX_BUF_SIZE_STREAMING;
+ }
+
+ {
+ const char *fmt;
+
+ if (sizeof(res->start) == sizeof(long long))
+ fmt = "dim2-%016llx";
+ else if (sizeof(res->start) == sizeof(long))
+ fmt = "dim2-%016lx";
+ else
+ fmt = "dim2-%016x";
+
+ snprintf(dev->name, sizeof(dev->name), fmt, res->start);
+ }
+
+ dev->most_iface.interface = ITYPE_MEDIALB_DIM2;
+ dev->most_iface.description = dev->name;
+ dev->most_iface.num_channels = DMA_CHANNELS;
+ dev->most_iface.channel_vector = dev->capabilities;
+ dev->most_iface.configure = configure_channel;
+ dev->most_iface.enqueue = enqueue;
+ dev->most_iface.dma_alloc = dma_alloc;
+ dev->most_iface.dma_free = dma_free;
+ dev->most_iface.poison_channel = poison_channel;
+ dev->most_iface.request_netinfo = request_netinfo;
+ dev->most_iface.driver_dev = &pdev->dev;
+ dev->most_iface.dev = &dev->dev;
+ dev->dev.init_name = dev->name;
+ dev->dev.parent = &pdev->dev;
+ dev->dev.release = dim2_release;
+
+ return most_register_interface(&dev->most_iface);
+
+err_shutdown_dim:
+ dim_shutdown();
+err_disable_platform:
+ if (dev->disable_platform)
+ dev->disable_platform(pdev);
+err_free_dev:
+ kfree(dev);
+
+ return ret;
+}
+
+/**
+ * dim2_remove - dim2 remove handler
+ * @pdev: platform device structure
+ *
+ * Unregister the interface from mostcore
+ */
+static void dim2_remove(struct platform_device *pdev)
+{
+ struct dim2_hdm *dev = platform_get_drvdata(pdev);
+
+ most_deregister_interface(&dev->most_iface);
+}
+
+/* platform specific functions [[ */
+
+static int fsl_mx6_enable(struct platform_device *pdev)
+{
+ struct dim2_hdm *dev = platform_get_drvdata(pdev);
+ int ret;
+
+ dev->clk = devm_clk_get(&pdev->dev, "mlb");
+ if (IS_ERR_OR_NULL(dev->clk)) {
+ dev_err(&pdev->dev, "unable to get mlb clock\n");
+ return -EFAULT;
+ }
+
+ ret = clk_prepare_enable(dev->clk);
+ if (ret) {
+ dev_err(&pdev->dev, "%s\n", "clk_prepare_enable failed");
+ return ret;
+ }
+
+ if (dev->clk_speed >= CLK_2048FS) {
+ /* enable pll */
+ dev->clk_pll = devm_clk_get(&pdev->dev, "pll8_mlb");
+ if (IS_ERR_OR_NULL(dev->clk_pll)) {
+ dev_err(&pdev->dev, "unable to get mlb pll clock\n");
+ clk_disable_unprepare(dev->clk);
+ return -EFAULT;
+ }
+
+ writel(0x888, dev->io_base + 0x38);
+ clk_prepare_enable(dev->clk_pll);
+ }
+
+ return 0;
+}
+
+static void fsl_mx6_disable(struct platform_device *pdev)
+{
+ struct dim2_hdm *dev = platform_get_drvdata(pdev);
+
+ if (dev->clk_speed >= CLK_2048FS)
+ clk_disable_unprepare(dev->clk_pll);
+
+ clk_disable_unprepare(dev->clk);
+}
+
+static int rcar_gen2_enable(struct platform_device *pdev)
+{
+ struct dim2_hdm *dev = platform_get_drvdata(pdev);
+ int ret;
+
+ dev->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(dev->clk)) {
+ dev_err(&pdev->dev, "cannot get clock\n");
+ return PTR_ERR(dev->clk);
+ }
+
+ ret = clk_prepare_enable(dev->clk);
+ if (ret) {
+ dev_err(&pdev->dev, "%s\n", "clk_prepare_enable failed");
+ return ret;
+ }
+
+ if (dev->clk_speed >= CLK_2048FS) {
+ /* enable MLP pll and LVDS drivers */
+ writel(0x03, dev->io_base + 0x600);
+ /* set bias */
+ writel(0x888, dev->io_base + 0x38);
+ } else {
+ /* PLL */
+ writel(0x04, dev->io_base + 0x600);
+ }
+
+ /* BBCR = 0b11 */
+ writel(0x03, dev->io_base + 0x500);
+ writel(0x0002FF02, dev->io_base + 0x508);
+
+ return 0;
+}
+
+static void rcar_gen2_disable(struct platform_device *pdev)
+{
+ struct dim2_hdm *dev = platform_get_drvdata(pdev);
+
+ clk_disable_unprepare(dev->clk);
+
+ /* disable PLLs and LVDS drivers */
+ writel(0x0, dev->io_base + 0x600);
+}
+
+static int rcar_gen3_enable(struct platform_device *pdev)
+{
+ struct dim2_hdm *dev = platform_get_drvdata(pdev);
+ u32 enable_512fs = dev->clk_speed == CLK_512FS;
+ int ret;
+
+ dev->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(dev->clk)) {
+ dev_err(&pdev->dev, "cannot get clock\n");
+ return PTR_ERR(dev->clk);
+ }
+
+ ret = clk_prepare_enable(dev->clk);
+ if (ret) {
+ dev_err(&pdev->dev, "%s\n", "clk_prepare_enable failed");
+ return ret;
+ }
+
+ /* PLL */
+ writel(0x04, dev->io_base + 0x600);
+
+ writel(enable_512fs, dev->io_base + 0x604);
+
+ /* BBCR = 0b11 */
+ writel(0x03, dev->io_base + 0x500);
+ writel(0x0002FF02, dev->io_base + 0x508);
+
+ return 0;
+}
+
+static void rcar_gen3_disable(struct platform_device *pdev)
+{
+ struct dim2_hdm *dev = platform_get_drvdata(pdev);
+
+ clk_disable_unprepare(dev->clk);
+
+ /* disable PLLs and LVDS drivers */
+ writel(0x0, dev->io_base + 0x600);
+}
+
+/* ]] platform specific functions */
+
+enum dim2_platforms { FSL_MX6, RCAR_GEN2, RCAR_GEN3 };
+
+static struct dim2_platform_data plat_data[] = {
+ [FSL_MX6] = {
+ .enable = fsl_mx6_enable,
+ .disable = fsl_mx6_disable,
+ },
+ [RCAR_GEN2] = {
+ .enable = rcar_gen2_enable,
+ .disable = rcar_gen2_disable,
+ },
+ [RCAR_GEN3] = {
+ .enable = rcar_gen3_enable,
+ .disable = rcar_gen3_disable,
+ .fcnt = 3,
+ },
+};
+
+static const struct of_device_id dim2_of_match[] = {
+ {
+ .compatible = "fsl,imx6q-mlb150",
+ .data = plat_data + FSL_MX6
+ },
+ {
+ .compatible = "renesas,mlp",
+ .data = plat_data + RCAR_GEN2
+ },
+ {
+ .compatible = "renesas,rcar-gen3-mlp",
+ .data = plat_data + RCAR_GEN3
+ },
+ {
+ .compatible = "xlnx,axi4-os62420_3pin-1.00.a",
+ },
+ {
+ .compatible = "xlnx,axi4-os62420_6pin-1.00.a",
+ },
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, dim2_of_match);
+
+static struct platform_driver dim2_driver = {
+ .probe = dim2_probe,
+ .remove = dim2_remove,
+ .driver = {
+ .name = "hdm_dim2",
+ .of_match_table = dim2_of_match,
+ .dev_groups = dim2_groups,
+ },
+};
+
+module_platform_driver(dim2_driver);
+
+MODULE_AUTHOR("Andrey Shvetsov <andrey.shvetsov@k2l.de>");
+MODULE_DESCRIPTION("MediaLB DIM2 Hardware Dependent Module");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/most/dim2/errors.h b/drivers/staging/most/dim2/errors.h
new file mode 100644
index 000000000000..268332e5735e
--- /dev/null
+++ b/drivers/staging/most/dim2/errors.h
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * errors.h - Definitions of errors for DIM2 HAL API
+ * (MediaLB, Device Interface Macro IP, OS62420)
+ *
+ * Copyright (C) 2015, Microchip Technology Germany II GmbH & Co. KG
+ */
+
+#ifndef _MOST_DIM_ERRORS_H
+#define _MOST_DIM_ERRORS_H
+
+/**
+ * MOST DIM errors.
+ */
+enum dim_errors_t {
+ /** Not an error */
+ DIM_NO_ERROR = 0,
+
+ /** Bad base address for DIM2 IP */
+ DIM_INIT_ERR_DIM_ADDR = 0x10,
+
+ /**< Bad MediaLB clock */
+ DIM_INIT_ERR_MLB_CLOCK,
+
+ /** Bad channel address */
+ DIM_INIT_ERR_CHANNEL_ADDRESS,
+
+ /** Out of DBR memory */
+ DIM_INIT_ERR_OUT_OF_MEMORY,
+
+ /** DIM API is called while DIM is not initialized successfully */
+ DIM_ERR_DRIVER_NOT_INITIALIZED = 0x20,
+
+ /**
+ * Configuration does not respect hardware limitations
+ * for isochronous or synchronous channels
+ */
+ DIM_ERR_BAD_CONFIG,
+
+ /**
+ * Buffer size does not respect hardware limitations
+ * for isochronous or synchronous channels
+ */
+ DIM_ERR_BAD_BUFFER_SIZE,
+
+ DIM_ERR_UNDERFLOW,
+
+ DIM_ERR_OVERFLOW,
+};
+
+#endif /* _MOST_DIM_ERRORS_H */
diff --git a/drivers/staging/most/dim2/hal.c b/drivers/staging/most/dim2/hal.c
new file mode 100644
index 000000000000..6abe3ab2b2cf
--- /dev/null
+++ b/drivers/staging/most/dim2/hal.c
@@ -0,0 +1,974 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * hal.c - DIM2 HAL implementation
+ * (MediaLB, Device Interface Macro IP, OS62420)
+ *
+ * Copyright (C) 2015-2016, Microchip Technology Germany II GmbH & Co. KG
+ */
+
+/* Author: Andrey Shvetsov <andrey.shvetsov@k2l.de> */
+
+#include "hal.h"
+#include "errors.h"
+#include "reg.h"
+#include <linux/stddef.h>
+#include <linux/kernel.h>
+#include <linux/io.h>
+
+/*
+ * Size factor for isochronous DBR buffer.
+ * Minimal value is 3.
+ */
+#define ISOC_DBR_FACTOR 3u
+
+/*
+ * Number of 32-bit units for DBR map.
+ *
+ * 1: block size is 512, max allocation is 16K
+ * 2: block size is 256, max allocation is 8K
+ * 4: block size is 128, max allocation is 4K
+ * 8: block size is 64, max allocation is 2K
+ *
+ * Min allocated space is block size.
+ * Max possible allocated space is 32 blocks.
+ */
+#define DBR_MAP_SIZE 2
+
+/* -------------------------------------------------------------------------- */
+/* not configurable area */
+
+#define CDT 0x00
+#define ADT 0x40
+#define MLB_CAT 0x80
+#define AHB_CAT 0x88
+
+#define DBR_SIZE (16 * 1024) /* specified by IP */
+#define DBR_BLOCK_SIZE (DBR_SIZE / 32 / DBR_MAP_SIZE)
+
+#define ROUND_UP_TO(x, d) (DIV_ROUND_UP(x, (d)) * (d))
+
+/* -------------------------------------------------------------------------- */
+/* generic helper functions and macros */
+
+static inline u32 bit_mask(u8 position)
+{
+ return (u32)1 << position;
+}
+
+static inline bool dim_on_error(u8 error_id, const char *error_message)
+{
+ dimcb_on_error(error_id, error_message);
+ return false;
+}
+
+/* -------------------------------------------------------------------------- */
+/* types and local variables */
+
+struct async_tx_dbr {
+ u8 ch_addr;
+ u16 rpc;
+ u16 wpc;
+ u16 rest_size;
+ u16 sz_queue[CDT0_RPC_MASK + 1];
+};
+
+struct lld_global_vars_t {
+ bool dim_is_initialized;
+ bool mcm_is_initialized;
+ struct dim2_regs __iomem *dim2; /* DIM2 core base address */
+ struct async_tx_dbr atx_dbr;
+ u32 fcnt;
+ u32 dbr_map[DBR_MAP_SIZE];
+};
+
+static struct lld_global_vars_t g = { false };
+
+/* -------------------------------------------------------------------------- */
+
+static int dbr_get_mask_size(u16 size)
+{
+ int i;
+
+ for (i = 0; i < 6; i++)
+ if (size <= (DBR_BLOCK_SIZE << i))
+ return 1 << i;
+ return 0;
+}
+
+/**
+ * alloc_dbr() - Allocates DBR memory.
+ * @size: Allocating memory size.
+ * Returns: Offset in DBR memory by success or DBR_SIZE if out of memory.
+ */
+static int alloc_dbr(u16 size)
+{
+ int mask_size;
+ int i, block_idx = 0;
+
+ if (size <= 0)
+ return DBR_SIZE; /* out of memory */
+
+ mask_size = dbr_get_mask_size(size);
+ if (mask_size == 0)
+ return DBR_SIZE; /* out of memory */
+
+ for (i = 0; i < DBR_MAP_SIZE; i++) {
+ u32 const blocks = DIV_ROUND_UP(size, DBR_BLOCK_SIZE);
+ u32 mask = ~((~(u32)0) << blocks);
+
+ do {
+ if ((g.dbr_map[i] & mask) == 0) {
+ g.dbr_map[i] |= mask;
+ return block_idx * DBR_BLOCK_SIZE;
+ }
+ block_idx += mask_size;
+ /* do shift left with 2 steps in case mask_size == 32 */
+ mask <<= mask_size - 1;
+ } while ((mask <<= 1) != 0);
+ }
+
+ return DBR_SIZE; /* out of memory */
+}
+
+static void free_dbr(int offs, int size)
+{
+ int block_idx = offs / DBR_BLOCK_SIZE;
+ u32 const blocks = DIV_ROUND_UP(size, DBR_BLOCK_SIZE);
+ u32 mask = ~((~(u32)0) << blocks);
+
+ mask <<= block_idx % 32;
+ g.dbr_map[block_idx / 32] &= ~mask;
+}
+
+/* -------------------------------------------------------------------------- */
+
+static void dim2_transfer_madr(u32 val)
+{
+ writel(val, &g.dim2->MADR);
+
+ /* wait for transfer completion */
+ while ((readl(&g.dim2->MCTL) & 1) != 1)
+ continue;
+
+ writel(0, &g.dim2->MCTL); /* clear transfer complete */
+}
+
+static void dim2_clear_dbr(u16 addr, u16 size)
+{
+ enum { MADR_TB_BIT = 30, MADR_WNR_BIT = 31 };
+
+ u16 const end_addr = addr + size;
+ u32 const cmd = bit_mask(MADR_WNR_BIT) | bit_mask(MADR_TB_BIT);
+
+ writel(0, &g.dim2->MCTL); /* clear transfer complete */
+ writel(0, &g.dim2->MDAT0);
+
+ for (; addr < end_addr; addr++)
+ dim2_transfer_madr(cmd | addr);
+}
+
+static u32 dim2_read_ctr(u32 ctr_addr, u16 mdat_idx)
+{
+ dim2_transfer_madr(ctr_addr);
+
+ return readl((&g.dim2->MDAT0) + mdat_idx);
+}
+
+static void dim2_write_ctr_mask(u32 ctr_addr, const u32 *mask, const u32 *value)
+{
+ enum { MADR_WNR_BIT = 31 };
+
+ writel(0, &g.dim2->MCTL); /* clear transfer complete */
+
+ if (mask[0] != 0)
+ writel(value[0], &g.dim2->MDAT0);
+ if (mask[1] != 0)
+ writel(value[1], &g.dim2->MDAT1);
+ if (mask[2] != 0)
+ writel(value[2], &g.dim2->MDAT2);
+ if (mask[3] != 0)
+ writel(value[3], &g.dim2->MDAT3);
+
+ writel(mask[0], &g.dim2->MDWE0);
+ writel(mask[1], &g.dim2->MDWE1);
+ writel(mask[2], &g.dim2->MDWE2);
+ writel(mask[3], &g.dim2->MDWE3);
+
+ dim2_transfer_madr(bit_mask(MADR_WNR_BIT) | ctr_addr);
+}
+
+static inline void dim2_write_ctr(u32 ctr_addr, const u32 *value)
+{
+ u32 const mask[4] = { 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF };
+
+ dim2_write_ctr_mask(ctr_addr, mask, value);
+}
+
+static inline void dim2_clear_ctr(u32 ctr_addr)
+{
+ u32 const value[4] = { 0, 0, 0, 0 };
+
+ dim2_write_ctr(ctr_addr, value);
+}
+
+static void dim2_configure_cat(u8 cat_base, u8 ch_addr, u8 ch_type,
+ bool read_not_write)
+{
+ bool isoc_fce = ch_type == CAT_CT_VAL_ISOC;
+ bool sync_mfe = ch_type == CAT_CT_VAL_SYNC;
+ u16 const cat =
+ (read_not_write << CAT_RNW_BIT) |
+ (ch_type << CAT_CT_SHIFT) |
+ (ch_addr << CAT_CL_SHIFT) |
+ (isoc_fce << CAT_FCE_BIT) |
+ (sync_mfe << CAT_MFE_BIT) |
+ (false << CAT_MT_BIT) |
+ (true << CAT_CE_BIT);
+ u8 const ctr_addr = cat_base + ch_addr / 8;
+ u8 const idx = (ch_addr % 8) / 2;
+ u8 const shift = (ch_addr % 2) * 16;
+ u32 mask[4] = { 0, 0, 0, 0 };
+ u32 value[4] = { 0, 0, 0, 0 };
+
+ mask[idx] = (u32)0xFFFF << shift;
+ value[idx] = cat << shift;
+ dim2_write_ctr_mask(ctr_addr, mask, value);
+}
+
+static void dim2_clear_cat(u8 cat_base, u8 ch_addr)
+{
+ u8 const ctr_addr = cat_base + ch_addr / 8;
+ u8 const idx = (ch_addr % 8) / 2;
+ u8 const shift = (ch_addr % 2) * 16;
+ u32 mask[4] = { 0, 0, 0, 0 };
+ u32 value[4] = { 0, 0, 0, 0 };
+
+ mask[idx] = (u32)0xFFFF << shift;
+ dim2_write_ctr_mask(ctr_addr, mask, value);
+}
+
+static void dim2_configure_cdt(u8 ch_addr, u16 dbr_address, u16 hw_buffer_size,
+ u16 packet_length)
+{
+ u32 cdt[4] = { 0, 0, 0, 0 };
+
+ if (packet_length)
+ cdt[1] = ((packet_length - 1) << CDT1_BS_ISOC_SHIFT);
+
+ cdt[3] =
+ ((hw_buffer_size - 1) << CDT3_BD_SHIFT) |
+ (dbr_address << CDT3_BA_SHIFT);
+ dim2_write_ctr(CDT + ch_addr, cdt);
+}
+
+static u16 dim2_rpc(u8 ch_addr)
+{
+ u32 cdt0 = dim2_read_ctr(CDT + ch_addr, 0);
+
+ return (cdt0 >> CDT0_RPC_SHIFT) & CDT0_RPC_MASK;
+}
+
+static void dim2_clear_cdt(u8 ch_addr)
+{
+ u32 cdt[4] = { 0, 0, 0, 0 };
+
+ dim2_write_ctr(CDT + ch_addr, cdt);
+}
+
+static void dim2_configure_adt(u8 ch_addr)
+{
+ u32 adt[4] = { 0, 0, 0, 0 };
+
+ adt[0] =
+ (true << ADT0_CE_BIT) |
+ (true << ADT0_LE_BIT) |
+ (0 << ADT0_PG_BIT);
+
+ dim2_write_ctr(ADT + ch_addr, adt);
+}
+
+static void dim2_clear_adt(u8 ch_addr)
+{
+ u32 adt[4] = { 0, 0, 0, 0 };
+
+ dim2_write_ctr(ADT + ch_addr, adt);
+}
+
+static void dim2_start_ctrl_async(u8 ch_addr, u8 idx, u32 buf_addr,
+ u16 buffer_size)
+{
+ u8 const shift = idx * 16;
+
+ u32 mask[4] = { 0, 0, 0, 0 };
+ u32 adt[4] = { 0, 0, 0, 0 };
+
+ mask[1] =
+ bit_mask(ADT1_PS_BIT + shift) |
+ bit_mask(ADT1_RDY_BIT + shift) |
+ (ADT1_CTRL_ASYNC_BD_MASK << (ADT1_BD_SHIFT + shift));
+ adt[1] =
+ (true << (ADT1_PS_BIT + shift)) |
+ (true << (ADT1_RDY_BIT + shift)) |
+ ((buffer_size - 1) << (ADT1_BD_SHIFT + shift));
+
+ mask[idx + 2] = 0xFFFFFFFF;
+ adt[idx + 2] = buf_addr;
+
+ dim2_write_ctr_mask(ADT + ch_addr, mask, adt);
+}
+
+static void dim2_start_isoc_sync(u8 ch_addr, u8 idx, u32 buf_addr,
+ u16 buffer_size)
+{
+ u8 const shift = idx * 16;
+
+ u32 mask[4] = { 0, 0, 0, 0 };
+ u32 adt[4] = { 0, 0, 0, 0 };
+
+ mask[1] =
+ bit_mask(ADT1_RDY_BIT + shift) |
+ (ADT1_ISOC_SYNC_BD_MASK << (ADT1_BD_SHIFT + shift));
+ adt[1] =
+ (true << (ADT1_RDY_BIT + shift)) |
+ ((buffer_size - 1) << (ADT1_BD_SHIFT + shift));
+
+ mask[idx + 2] = 0xFFFFFFFF;
+ adt[idx + 2] = buf_addr;
+
+ dim2_write_ctr_mask(ADT + ch_addr, mask, adt);
+}
+
+static void dim2_clear_ctram(void)
+{
+ u32 ctr_addr;
+
+ for (ctr_addr = 0; ctr_addr < 0x90; ctr_addr++)
+ dim2_clear_ctr(ctr_addr);
+}
+
+static void dim2_configure_channel(u8 ch_addr, u8 type, u8 is_tx, u16 dbr_address,
+ u16 hw_buffer_size, u16 packet_length)
+{
+ dim2_configure_cdt(ch_addr, dbr_address, hw_buffer_size, packet_length);
+ dim2_configure_cat(MLB_CAT, ch_addr, type, is_tx ? 1 : 0);
+
+ dim2_configure_adt(ch_addr);
+ dim2_configure_cat(AHB_CAT, ch_addr, type, is_tx ? 0 : 1);
+
+ /* unmask interrupt for used channel, enable mlb_sys_int[0] interrupt */
+ writel(readl(&g.dim2->ACMR0) | bit_mask(ch_addr), &g.dim2->ACMR0);
+}
+
+static void dim2_clear_channel(u8 ch_addr)
+{
+ /* mask interrupt for used channel, disable mlb_sys_int[0] interrupt */
+ writel(readl(&g.dim2->ACMR0) & ~bit_mask(ch_addr), &g.dim2->ACMR0);
+
+ dim2_clear_cat(AHB_CAT, ch_addr);
+ dim2_clear_adt(ch_addr);
+
+ dim2_clear_cat(MLB_CAT, ch_addr);
+ dim2_clear_cdt(ch_addr);
+
+ /* clear channel status bit */
+ writel(bit_mask(ch_addr), &g.dim2->ACSR0);
+}
+
+/* -------------------------------------------------------------------------- */
+/* trace async tx dbr fill state */
+
+static inline u16 norm_pc(u16 pc)
+{
+ return pc & CDT0_RPC_MASK;
+}
+
+static void dbrcnt_init(u8 ch_addr, u16 dbr_size)
+{
+ g.atx_dbr.rest_size = dbr_size;
+ g.atx_dbr.rpc = dim2_rpc(ch_addr);
+ g.atx_dbr.wpc = g.atx_dbr.rpc;
+}
+
+static void dbrcnt_enq(int buf_sz)
+{
+ g.atx_dbr.rest_size -= buf_sz;
+ g.atx_dbr.sz_queue[norm_pc(g.atx_dbr.wpc)] = buf_sz;
+ g.atx_dbr.wpc++;
+}
+
+u16 dim_dbr_space(struct dim_channel *ch)
+{
+ u16 cur_rpc;
+ struct async_tx_dbr *dbr = &g.atx_dbr;
+
+ if (ch->addr != dbr->ch_addr)
+ return 0xFFFF;
+
+ cur_rpc = dim2_rpc(ch->addr);
+
+ while (norm_pc(dbr->rpc) != cur_rpc) {
+ dbr->rest_size += dbr->sz_queue[norm_pc(dbr->rpc)];
+ dbr->rpc++;
+ }
+
+ if ((u16)(dbr->wpc - dbr->rpc) >= CDT0_RPC_MASK)
+ return 0;
+
+ return dbr->rest_size;
+}
+
+/* -------------------------------------------------------------------------- */
+/* channel state helpers */
+
+static void state_init(struct int_ch_state *state)
+{
+ state->request_counter = 0;
+ state->service_counter = 0;
+
+ state->idx1 = 0;
+ state->idx2 = 0;
+ state->level = 0;
+}
+
+/* -------------------------------------------------------------------------- */
+/* macro helper functions */
+
+static inline bool check_channel_address(u32 ch_address)
+{
+ return ch_address > 0 && (ch_address % 2) == 0 &&
+ (ch_address / 2) <= (u32)CAT_CL_MASK;
+}
+
+static inline bool check_packet_length(u32 packet_length)
+{
+ u16 const max_size = ((u16)CDT3_BD_ISOC_MASK + 1u) / ISOC_DBR_FACTOR;
+
+ if (packet_length <= 0)
+ return false; /* too small */
+
+ if (packet_length > max_size)
+ return false; /* too big */
+
+ if (packet_length - 1u > (u32)CDT1_BS_ISOC_MASK)
+ return false; /* too big */
+
+ return true;
+}
+
+static inline bool check_bytes_per_frame(u32 bytes_per_frame)
+{
+ u16 const bd_factor = g.fcnt + 2;
+ u16 const max_size = ((u16)CDT3_BD_MASK + 1u) >> bd_factor;
+
+ if (bytes_per_frame <= 0)
+ return false; /* too small */
+
+ if (bytes_per_frame > max_size)
+ return false; /* too big */
+
+ return true;
+}
+
+u16 dim_norm_ctrl_async_buffer_size(u16 buf_size)
+{
+ u16 const max_size = (u16)ADT1_CTRL_ASYNC_BD_MASK + 1u;
+
+ if (buf_size > max_size)
+ return max_size;
+
+ return buf_size;
+}
+
+static inline u16 norm_isoc_buffer_size(u16 buf_size, u16 packet_length)
+{
+ u16 n;
+ u16 const max_size = (u16)ADT1_ISOC_SYNC_BD_MASK + 1u;
+
+ if (buf_size > max_size)
+ buf_size = max_size;
+
+ n = buf_size / packet_length;
+
+ if (n < 2u)
+ return 0; /* too small buffer for given packet_length */
+
+ return packet_length * n;
+}
+
+static inline u16 norm_sync_buffer_size(u16 buf_size, u16 bytes_per_frame)
+{
+ u16 n;
+ u16 const max_size = (u16)ADT1_ISOC_SYNC_BD_MASK + 1u;
+ u32 const unit = bytes_per_frame << g.fcnt;
+
+ if (buf_size > max_size)
+ buf_size = max_size;
+
+ n = buf_size / unit;
+
+ if (n < 1u)
+ return 0; /* too small buffer for given bytes_per_frame */
+
+ return unit * n;
+}
+
+static void dim2_cleanup(void)
+{
+ /* disable MediaLB */
+ writel(false << MLBC0_MLBEN_BIT, &g.dim2->MLBC0);
+
+ dim2_clear_ctram();
+
+ /* disable mlb_int interrupt */
+ writel(0, &g.dim2->MIEN);
+
+ /* clear status for all dma channels */
+ writel(0xFFFFFFFF, &g.dim2->ACSR0);
+ writel(0xFFFFFFFF, &g.dim2->ACSR1);
+
+ /* mask interrupts for all channels */
+ writel(0, &g.dim2->ACMR0);
+ writel(0, &g.dim2->ACMR1);
+}
+
+static void dim2_initialize(bool enable_6pin, u8 mlb_clock)
+{
+ dim2_cleanup();
+
+ /* configure and enable MediaLB */
+ writel(enable_6pin << MLBC0_MLBPEN_BIT |
+ mlb_clock << MLBC0_MLBCLK_SHIFT |
+ g.fcnt << MLBC0_FCNT_SHIFT |
+ true << MLBC0_MLBEN_BIT,
+ &g.dim2->MLBC0);
+
+ /* activate all HBI channels */
+ writel(0xFFFFFFFF, &g.dim2->HCMR0);
+ writel(0xFFFFFFFF, &g.dim2->HCMR1);
+
+ /* enable HBI */
+ writel(bit_mask(HCTL_EN_BIT), &g.dim2->HCTL);
+
+ /* configure DMA */
+ writel(ACTL_DMA_MODE_VAL_DMA_MODE_1 << ACTL_DMA_MODE_BIT |
+ true << ACTL_SCE_BIT, &g.dim2->ACTL);
+}
+
+static bool dim2_is_mlb_locked(void)
+{
+ u32 const mask0 = bit_mask(MLBC0_MLBLK_BIT);
+ u32 const mask1 = bit_mask(MLBC1_CLKMERR_BIT) |
+ bit_mask(MLBC1_LOCKERR_BIT);
+ u32 const c1 = readl(&g.dim2->MLBC1);
+ u32 const nda_mask = (u32)MLBC1_NDA_MASK << MLBC1_NDA_SHIFT;
+
+ writel(c1 & nda_mask, &g.dim2->MLBC1);
+ return (readl(&g.dim2->MLBC1) & mask1) == 0 &&
+ (readl(&g.dim2->MLBC0) & mask0) != 0;
+}
+
+/* -------------------------------------------------------------------------- */
+/* channel help routines */
+
+static inline bool service_channel(u8 ch_addr, u8 idx)
+{
+ u8 const shift = idx * 16;
+ u32 const adt1 = dim2_read_ctr(ADT + ch_addr, 1);
+ u32 mask[4] = { 0, 0, 0, 0 };
+ u32 adt_w[4] = { 0, 0, 0, 0 };
+
+ if (((adt1 >> (ADT1_DNE_BIT + shift)) & 1) == 0)
+ return false;
+
+ mask[1] =
+ bit_mask(ADT1_DNE_BIT + shift) |
+ bit_mask(ADT1_ERR_BIT + shift) |
+ bit_mask(ADT1_RDY_BIT + shift);
+ dim2_write_ctr_mask(ADT + ch_addr, mask, adt_w);
+
+ /* clear channel status bit */
+ writel(bit_mask(ch_addr), &g.dim2->ACSR0);
+
+ return true;
+}
+
+/* -------------------------------------------------------------------------- */
+/* channel init routines */
+
+static void isoc_init(struct dim_channel *ch, u8 ch_addr, u16 packet_length)
+{
+ state_init(&ch->state);
+
+ ch->addr = ch_addr;
+
+ ch->packet_length = packet_length;
+ ch->bytes_per_frame = 0;
+ ch->done_sw_buffers_number = 0;
+}
+
+static void sync_init(struct dim_channel *ch, u8 ch_addr, u16 bytes_per_frame)
+{
+ state_init(&ch->state);
+
+ ch->addr = ch_addr;
+
+ ch->packet_length = 0;
+ ch->bytes_per_frame = bytes_per_frame;
+ ch->done_sw_buffers_number = 0;
+}
+
+static void channel_init(struct dim_channel *ch, u8 ch_addr)
+{
+ state_init(&ch->state);
+
+ ch->addr = ch_addr;
+
+ ch->packet_length = 0;
+ ch->bytes_per_frame = 0;
+ ch->done_sw_buffers_number = 0;
+}
+
+/* returns true if channel interrupt state is cleared */
+static bool channel_service_interrupt(struct dim_channel *ch)
+{
+ struct int_ch_state *const state = &ch->state;
+
+ if (!service_channel(ch->addr, state->idx2))
+ return false;
+
+ state->idx2 ^= 1;
+ state->request_counter++;
+ return true;
+}
+
+static bool channel_start(struct dim_channel *ch, u32 buf_addr, u16 buf_size)
+{
+ struct int_ch_state *const state = &ch->state;
+
+ if (buf_size <= 0)
+ return dim_on_error(DIM_ERR_BAD_BUFFER_SIZE, "Bad buffer size");
+
+ if (ch->packet_length == 0 && ch->bytes_per_frame == 0 &&
+ buf_size != dim_norm_ctrl_async_buffer_size(buf_size))
+ return dim_on_error(DIM_ERR_BAD_BUFFER_SIZE,
+ "Bad control/async buffer size");
+
+ if (ch->packet_length &&
+ buf_size != norm_isoc_buffer_size(buf_size, ch->packet_length))
+ return dim_on_error(DIM_ERR_BAD_BUFFER_SIZE,
+ "Bad isochronous buffer size");
+
+ if (ch->bytes_per_frame &&
+ buf_size != norm_sync_buffer_size(buf_size, ch->bytes_per_frame))
+ return dim_on_error(DIM_ERR_BAD_BUFFER_SIZE,
+ "Bad synchronous buffer size");
+
+ if (state->level >= 2u)
+ return dim_on_error(DIM_ERR_OVERFLOW, "Channel overflow");
+
+ ++state->level;
+
+ if (ch->addr == g.atx_dbr.ch_addr)
+ dbrcnt_enq(buf_size);
+
+ if (ch->packet_length || ch->bytes_per_frame)
+ dim2_start_isoc_sync(ch->addr, state->idx1, buf_addr, buf_size);
+ else
+ dim2_start_ctrl_async(ch->addr, state->idx1, buf_addr,
+ buf_size);
+ state->idx1 ^= 1;
+
+ return true;
+}
+
+static u8 channel_service(struct dim_channel *ch)
+{
+ struct int_ch_state *const state = &ch->state;
+
+ if (state->service_counter != state->request_counter) {
+ state->service_counter++;
+ if (state->level == 0)
+ return DIM_ERR_UNDERFLOW;
+
+ --state->level;
+ ch->done_sw_buffers_number++;
+ }
+
+ return DIM_NO_ERROR;
+}
+
+static bool channel_detach_buffers(struct dim_channel *ch, u16 buffers_number)
+{
+ if (buffers_number > ch->done_sw_buffers_number)
+ return dim_on_error(DIM_ERR_UNDERFLOW, "Channel underflow");
+
+ ch->done_sw_buffers_number -= buffers_number;
+ return true;
+}
+
+/* -------------------------------------------------------------------------- */
+/* API */
+
+u8 dim_startup(struct dim2_regs __iomem *dim_base_address, u32 mlb_clock,
+ u32 fcnt)
+{
+ g.dim_is_initialized = false;
+
+ if (!dim_base_address)
+ return DIM_INIT_ERR_DIM_ADDR;
+
+ /* MediaLB clock: 0 - 256 fs, 1 - 512 fs, 2 - 1024 fs, 3 - 2048 fs */
+ /* MediaLB clock: 4 - 3072 fs, 5 - 4096 fs, 6 - 6144 fs, 7 - 8192 fs */
+ if (mlb_clock >= 8)
+ return DIM_INIT_ERR_MLB_CLOCK;
+
+ if (fcnt > MLBC0_FCNT_MAX_VAL)
+ return DIM_INIT_ERR_MLB_CLOCK;
+
+ g.dim2 = dim_base_address;
+ g.fcnt = fcnt;
+ g.dbr_map[0] = 0;
+ g.dbr_map[1] = 0;
+
+ dim2_initialize(mlb_clock >= 3, mlb_clock);
+
+ g.dim_is_initialized = true;
+
+ return DIM_NO_ERROR;
+}
+
+void dim_shutdown(void)
+{
+ g.dim_is_initialized = false;
+ dim2_cleanup();
+}
+
+bool dim_get_lock_state(void)
+{
+ return dim2_is_mlb_locked();
+}
+
+static u8 init_ctrl_async(struct dim_channel *ch, u8 type, u8 is_tx,
+ u16 ch_address, u16 hw_buffer_size)
+{
+ if (!g.dim_is_initialized || !ch)
+ return DIM_ERR_DRIVER_NOT_INITIALIZED;
+
+ if (!check_channel_address(ch_address))
+ return DIM_INIT_ERR_CHANNEL_ADDRESS;
+
+ if (!ch->dbr_size)
+ ch->dbr_size = ROUND_UP_TO(hw_buffer_size, DBR_BLOCK_SIZE);
+ ch->dbr_addr = alloc_dbr(ch->dbr_size);
+ if (ch->dbr_addr >= DBR_SIZE)
+ return DIM_INIT_ERR_OUT_OF_MEMORY;
+
+ channel_init(ch, ch_address / 2);
+
+ dim2_configure_channel(ch->addr, type, is_tx,
+ ch->dbr_addr, ch->dbr_size, 0);
+
+ return DIM_NO_ERROR;
+}
+
+void dim_service_mlb_int_irq(void)
+{
+ writel(0, &g.dim2->MS0);
+ writel(0, &g.dim2->MS1);
+}
+
+/*
+ * Retrieves maximal possible correct buffer size for isochronous data type
+ * conform to given packet length and not bigger than given buffer size.
+ *
+ * Returns non-zero correct buffer size or zero by error.
+ */
+u16 dim_norm_isoc_buffer_size(u16 buf_size, u16 packet_length)
+{
+ if (!check_packet_length(packet_length))
+ return 0;
+
+ return norm_isoc_buffer_size(buf_size, packet_length);
+}
+
+/*
+ * Retrieves maximal possible correct buffer size for synchronous data type
+ * conform to given bytes per frame and not bigger than given buffer size.
+ *
+ * Returns non-zero correct buffer size or zero by error.
+ */
+u16 dim_norm_sync_buffer_size(u16 buf_size, u16 bytes_per_frame)
+{
+ if (!check_bytes_per_frame(bytes_per_frame))
+ return 0;
+
+ return norm_sync_buffer_size(buf_size, bytes_per_frame);
+}
+
+u8 dim_init_control(struct dim_channel *ch, u8 is_tx, u16 ch_address,
+ u16 max_buffer_size)
+{
+ return init_ctrl_async(ch, CAT_CT_VAL_CONTROL, is_tx, ch_address,
+ max_buffer_size);
+}
+
+u8 dim_init_async(struct dim_channel *ch, u8 is_tx, u16 ch_address,
+ u16 max_buffer_size)
+{
+ u8 ret = init_ctrl_async(ch, CAT_CT_VAL_ASYNC, is_tx, ch_address,
+ max_buffer_size);
+
+ if (is_tx && !g.atx_dbr.ch_addr) {
+ g.atx_dbr.ch_addr = ch->addr;
+ dbrcnt_init(ch->addr, ch->dbr_size);
+ writel(bit_mask(20), &g.dim2->MIEN);
+ }
+
+ return ret;
+}
+
+u8 dim_init_isoc(struct dim_channel *ch, u8 is_tx, u16 ch_address,
+ u16 packet_length)
+{
+ if (!g.dim_is_initialized || !ch)
+ return DIM_ERR_DRIVER_NOT_INITIALIZED;
+
+ if (!check_channel_address(ch_address))
+ return DIM_INIT_ERR_CHANNEL_ADDRESS;
+
+ if (!check_packet_length(packet_length))
+ return DIM_ERR_BAD_CONFIG;
+
+ if (!ch->dbr_size)
+ ch->dbr_size = packet_length * ISOC_DBR_FACTOR;
+ ch->dbr_addr = alloc_dbr(ch->dbr_size);
+ if (ch->dbr_addr >= DBR_SIZE)
+ return DIM_INIT_ERR_OUT_OF_MEMORY;
+
+ isoc_init(ch, ch_address / 2, packet_length);
+
+ dim2_configure_channel(ch->addr, CAT_CT_VAL_ISOC, is_tx, ch->dbr_addr,
+ ch->dbr_size, packet_length);
+
+ return DIM_NO_ERROR;
+}
+
+u8 dim_init_sync(struct dim_channel *ch, u8 is_tx, u16 ch_address,
+ u16 bytes_per_frame)
+{
+ u16 bd_factor = g.fcnt + 2;
+
+ if (!g.dim_is_initialized || !ch)
+ return DIM_ERR_DRIVER_NOT_INITIALIZED;
+
+ if (!check_channel_address(ch_address))
+ return DIM_INIT_ERR_CHANNEL_ADDRESS;
+
+ if (!check_bytes_per_frame(bytes_per_frame))
+ return DIM_ERR_BAD_CONFIG;
+
+ if (!ch->dbr_size)
+ ch->dbr_size = bytes_per_frame << bd_factor;
+ ch->dbr_addr = alloc_dbr(ch->dbr_size);
+ if (ch->dbr_addr >= DBR_SIZE)
+ return DIM_INIT_ERR_OUT_OF_MEMORY;
+
+ sync_init(ch, ch_address / 2, bytes_per_frame);
+
+ dim2_clear_dbr(ch->dbr_addr, ch->dbr_size);
+ dim2_configure_channel(ch->addr, CAT_CT_VAL_SYNC, is_tx,
+ ch->dbr_addr, ch->dbr_size, 0);
+
+ return DIM_NO_ERROR;
+}
+
+u8 dim_destroy_channel(struct dim_channel *ch)
+{
+ if (!g.dim_is_initialized || !ch)
+ return DIM_ERR_DRIVER_NOT_INITIALIZED;
+
+ if (ch->addr == g.atx_dbr.ch_addr) {
+ writel(0, &g.dim2->MIEN);
+ g.atx_dbr.ch_addr = 0;
+ }
+
+ dim2_clear_channel(ch->addr);
+ if (ch->dbr_addr < DBR_SIZE)
+ free_dbr(ch->dbr_addr, ch->dbr_size);
+ ch->dbr_addr = DBR_SIZE;
+
+ return DIM_NO_ERROR;
+}
+
+void dim_service_ahb_int_irq(struct dim_channel *const *channels)
+{
+ bool state_changed;
+
+ if (!g.dim_is_initialized) {
+ dim_on_error(DIM_ERR_DRIVER_NOT_INITIALIZED,
+ "DIM is not initialized");
+ return;
+ }
+
+ if (!channels) {
+ dim_on_error(DIM_ERR_DRIVER_NOT_INITIALIZED, "Bad channels");
+ return;
+ }
+
+ /*
+ * Use while-loop and a flag to make sure the age is changed back at
+ * least once, otherwise the interrupt may never come if CPU generates
+ * interrupt on changing age.
+ * This cycle runs not more than number of channels, because
+ * channel_service_interrupt() routine doesn't start the channel again.
+ */
+ do {
+ struct dim_channel *const *ch = channels;
+
+ state_changed = false;
+
+ while (*ch) {
+ state_changed |= channel_service_interrupt(*ch);
+ ++ch;
+ }
+ } while (state_changed);
+}
+
+u8 dim_service_channel(struct dim_channel *ch)
+{
+ if (!g.dim_is_initialized || !ch)
+ return DIM_ERR_DRIVER_NOT_INITIALIZED;
+
+ return channel_service(ch);
+}
+
+struct dim_ch_state *dim_get_channel_state(struct dim_channel *ch,
+ struct dim_ch_state *state_ptr)
+{
+ if (!ch || !state_ptr)
+ return NULL;
+
+ state_ptr->ready = ch->state.level < 2;
+ state_ptr->done_buffers = ch->done_sw_buffers_number;
+
+ return state_ptr;
+}
+
+bool dim_enqueue_buffer(struct dim_channel *ch, u32 buffer_addr,
+ u16 buffer_size)
+{
+ if (!ch)
+ return dim_on_error(DIM_ERR_DRIVER_NOT_INITIALIZED,
+ "Bad channel");
+
+ return channel_start(ch, buffer_addr, buffer_size);
+}
+
+bool dim_detach_buffers(struct dim_channel *ch, u16 buffers_number)
+{
+ if (!ch)
+ return dim_on_error(DIM_ERR_DRIVER_NOT_INITIALIZED,
+ "Bad channel");
+
+ return channel_detach_buffers(ch, buffers_number);
+}
diff --git a/drivers/staging/most/dim2/hal.h b/drivers/staging/most/dim2/hal.h
new file mode 100644
index 000000000000..ef10a8741c10
--- /dev/null
+++ b/drivers/staging/most/dim2/hal.h
@@ -0,0 +1,102 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * hal.h - DIM2 HAL interface
+ * (MediaLB, Device Interface Macro IP, OS62420)
+ *
+ * Copyright (C) 2015, Microchip Technology Germany II GmbH & Co. KG
+ */
+
+#ifndef _DIM2_HAL_H
+#define _DIM2_HAL_H
+
+#include <linux/types.h>
+#include "reg.h"
+
+/*
+ * The values below are specified in the hardware specification.
+ * So, they should not be changed until the hardware specification changes.
+ */
+enum mlb_clk_speed {
+ CLK_256FS = 0,
+ CLK_512FS = 1,
+ CLK_1024FS = 2,
+ CLK_2048FS = 3,
+ CLK_3072FS = 4,
+ CLK_4096FS = 5,
+ CLK_6144FS = 6,
+ CLK_8192FS = 7,
+};
+
+struct dim_ch_state {
+ bool ready; /* Shows readiness to enqueue next buffer */
+ u16 done_buffers; /* Number of completed buffers */
+};
+
+struct int_ch_state {
+ /* changed only in interrupt context */
+ volatile int request_counter;
+
+ /* changed only in task context */
+ volatile int service_counter;
+
+ u8 idx1;
+ u8 idx2;
+ u8 level; /* [0..2], buffering level */
+};
+
+struct dim_channel {
+ struct int_ch_state state;
+ u8 addr;
+ u16 dbr_addr;
+ u16 dbr_size;
+ u16 packet_length; /*< Isochronous packet length in bytes. */
+ u16 bytes_per_frame; /*< Synchronous bytes per frame. */
+ u16 done_sw_buffers_number; /*< Done software buffers number. */
+};
+
+u8 dim_startup(struct dim2_regs __iomem *dim_base_address, u32 mlb_clock,
+ u32 fcnt);
+
+void dim_shutdown(void);
+
+bool dim_get_lock_state(void);
+
+u16 dim_norm_ctrl_async_buffer_size(u16 buf_size);
+
+u16 dim_norm_isoc_buffer_size(u16 buf_size, u16 packet_length);
+
+u16 dim_norm_sync_buffer_size(u16 buf_size, u16 bytes_per_frame);
+
+u8 dim_init_control(struct dim_channel *ch, u8 is_tx, u16 ch_address,
+ u16 max_buffer_size);
+
+u8 dim_init_async(struct dim_channel *ch, u8 is_tx, u16 ch_address,
+ u16 max_buffer_size);
+
+u8 dim_init_isoc(struct dim_channel *ch, u8 is_tx, u16 ch_address,
+ u16 packet_length);
+
+u8 dim_init_sync(struct dim_channel *ch, u8 is_tx, u16 ch_address,
+ u16 bytes_per_frame);
+
+u8 dim_destroy_channel(struct dim_channel *ch);
+
+void dim_service_mlb_int_irq(void);
+
+void dim_service_ahb_int_irq(struct dim_channel *const *channels);
+
+u8 dim_service_channel(struct dim_channel *ch);
+
+struct dim_ch_state *dim_get_channel_state(struct dim_channel *ch,
+ struct dim_ch_state *state_ptr);
+
+u16 dim_dbr_space(struct dim_channel *ch);
+
+bool dim_enqueue_buffer(struct dim_channel *ch, u32 buffer_addr,
+ u16 buffer_size);
+
+bool dim_detach_buffers(struct dim_channel *ch, u16 buffers_number);
+
+void dimcb_on_error(u8 error_id, const char *error_message);
+
+#endif /* _DIM2_HAL_H */
diff --git a/drivers/staging/most/dim2/reg.h b/drivers/staging/most/dim2/reg.h
new file mode 100644
index 000000000000..b0f36c208a57
--- /dev/null
+++ b/drivers/staging/most/dim2/reg.h
@@ -0,0 +1,157 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * reg.h - Definitions for registers of DIM2
+ * (MediaLB, Device Interface Macro IP, OS62420)
+ *
+ * Copyright (C) 2015, Microchip Technology Germany II GmbH & Co. KG
+ */
+
+#ifndef DIM2_OS62420_H
+#define DIM2_OS62420_H
+
+#include <linux/types.h>
+
+struct dim2_regs {
+ u32 MLBC0; /* 0x00 */
+ u32 rsvd0[1]; /* 0x01 */
+ u32 MLBPC0; /* 0x02 */
+ u32 MS0; /* 0x03 */
+ u32 rsvd1[1]; /* 0x04 */
+ u32 MS1; /* 0x05 */
+ u32 rsvd2[2]; /* 0x06 */
+ u32 MSS; /* 0x08 */
+ u32 MSD; /* 0x09 */
+ u32 rsvd3[1]; /* 0x0A */
+ u32 MIEN; /* 0x0B */
+ u32 rsvd4[1]; /* 0x0C */
+ u32 MLBPC2; /* 0x0D */
+ u32 MLBPC1; /* 0x0E */
+ u32 MLBC1; /* 0x0F */
+ u32 rsvd5[0x10]; /* 0x10 */
+ u32 HCTL; /* 0x20 */
+ u32 rsvd6[1]; /* 0x21 */
+ u32 HCMR0; /* 0x22 */
+ u32 HCMR1; /* 0x23 */
+ u32 HCER0; /* 0x24 */
+ u32 HCER1; /* 0x25 */
+ u32 HCBR0; /* 0x26 */
+ u32 HCBR1; /* 0x27 */
+ u32 rsvd7[8]; /* 0x28 */
+ u32 MDAT0; /* 0x30 */
+ u32 MDAT1; /* 0x31 */
+ u32 MDAT2; /* 0x32 */
+ u32 MDAT3; /* 0x33 */
+ u32 MDWE0; /* 0x34 */
+ u32 MDWE1; /* 0x35 */
+ u32 MDWE2; /* 0x36 */
+ u32 MDWE3; /* 0x37 */
+ u32 MCTL; /* 0x38 */
+ u32 MADR; /* 0x39 */
+ u32 rsvd8[0xb6]; /* 0x3A */
+ u32 ACTL; /* 0xF0 */
+ u32 rsvd9[3]; /* 0xF1 */
+ u32 ACSR0; /* 0xF4 */
+ u32 ACSR1; /* 0xF5 */
+ u32 ACMR0; /* 0xF6 */
+ u32 ACMR1; /* 0xF7 */
+};
+
+#define DIM2_MASK(n) (~((~(u32)0) << (n)))
+
+enum {
+ MLBC0_MLBLK_BIT = 7,
+
+ MLBC0_MLBPEN_BIT = 5,
+
+ MLBC0_MLBCLK_SHIFT = 2,
+ MLBC0_MLBCLK_VAL_256FS = 0,
+ MLBC0_MLBCLK_VAL_512FS = 1,
+ MLBC0_MLBCLK_VAL_1024FS = 2,
+ MLBC0_MLBCLK_VAL_2048FS = 3,
+
+ MLBC0_FCNT_SHIFT = 15,
+ MLBC0_FCNT_MASK = 7,
+ MLBC0_FCNT_MAX_VAL = 6,
+
+ MLBC0_MLBEN_BIT = 0,
+
+ MIEN_CTX_BREAK_BIT = 29,
+ MIEN_CTX_PE_BIT = 28,
+ MIEN_CTX_DONE_BIT = 27,
+
+ MIEN_CRX_BREAK_BIT = 26,
+ MIEN_CRX_PE_BIT = 25,
+ MIEN_CRX_DONE_BIT = 24,
+
+ MIEN_ATX_BREAK_BIT = 22,
+ MIEN_ATX_PE_BIT = 21,
+ MIEN_ATX_DONE_BIT = 20,
+
+ MIEN_ARX_BREAK_BIT = 19,
+ MIEN_ARX_PE_BIT = 18,
+ MIEN_ARX_DONE_BIT = 17,
+
+ MIEN_SYNC_PE_BIT = 16,
+
+ MIEN_ISOC_BUFO_BIT = 1,
+ MIEN_ISOC_PE_BIT = 0,
+
+ MLBC1_NDA_SHIFT = 8,
+ MLBC1_NDA_MASK = 0xFF,
+
+ MLBC1_CLKMERR_BIT = 7,
+ MLBC1_LOCKERR_BIT = 6,
+
+ ACTL_DMA_MODE_BIT = 2,
+ ACTL_DMA_MODE_VAL_DMA_MODE_0 = 0,
+ ACTL_DMA_MODE_VAL_DMA_MODE_1 = 1,
+ ACTL_SCE_BIT = 0,
+
+ HCTL_EN_BIT = 15
+};
+
+enum {
+ CDT0_RPC_SHIFT = 16 + 11,
+ CDT0_RPC_MASK = DIM2_MASK(5),
+
+ CDT1_BS_ISOC_SHIFT = 0,
+ CDT1_BS_ISOC_MASK = DIM2_MASK(9),
+
+ CDT3_BD_SHIFT = 0,
+ CDT3_BD_MASK = DIM2_MASK(12),
+ CDT3_BD_ISOC_MASK = DIM2_MASK(13),
+ CDT3_BA_SHIFT = 16,
+
+ ADT0_CE_BIT = 15,
+ ADT0_LE_BIT = 14,
+ ADT0_PG_BIT = 13,
+
+ ADT1_RDY_BIT = 15,
+ ADT1_DNE_BIT = 14,
+ ADT1_ERR_BIT = 13,
+ ADT1_PS_BIT = 12,
+ ADT1_MEP_BIT = 11,
+ ADT1_BD_SHIFT = 0,
+ ADT1_CTRL_ASYNC_BD_MASK = DIM2_MASK(11),
+ ADT1_ISOC_SYNC_BD_MASK = DIM2_MASK(13),
+
+ CAT_FCE_BIT = 14,
+ CAT_MFE_BIT = 14,
+
+ CAT_MT_BIT = 13,
+
+ CAT_RNW_BIT = 12,
+
+ CAT_CE_BIT = 11,
+
+ CAT_CT_SHIFT = 8,
+ CAT_CT_VAL_SYNC = 0,
+ CAT_CT_VAL_CONTROL = 1,
+ CAT_CT_VAL_ASYNC = 2,
+ CAT_CT_VAL_ISOC = 3,
+
+ CAT_CL_SHIFT = 0,
+ CAT_CL_MASK = DIM2_MASK(6)
+};
+
+#endif /* DIM2_OS62420_H */
diff --git a/drivers/staging/most/dim2/sysfs.h b/drivers/staging/most/dim2/sysfs.h
new file mode 100644
index 000000000000..09115cf4ed00
--- /dev/null
+++ b/drivers/staging/most/dim2/sysfs.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * sysfs.h - MediaLB sysfs information
+ *
+ * Copyright (C) 2015, Microchip Technology Germany II GmbH & Co. KG
+ */
+
+/* Author: Andrey Shvetsov <andrey.shvetsov@k2l.de> */
+
+#ifndef DIM2_SYSFS_H
+#define DIM2_SYSFS_H
+
+#include <linux/kobject.h>
+
+struct medialb_bus {
+ struct kobject kobj_group;
+};
+
+#endif /* DIM2_SYSFS_H */
diff --git a/drivers/staging/most/hdm-dim2/Kconfig b/drivers/staging/most/hdm-dim2/Kconfig
deleted file mode 100644
index fc548769479b..000000000000
--- a/drivers/staging/most/hdm-dim2/Kconfig
+++ /dev/null
@@ -1,17 +0,0 @@
-#
-# MediaLB configuration
-#
-
-config HDM_DIM2
- tristate "DIM2 HDM"
- depends on AIM_NETWORK
- depends on HAS_IOMEM
-
- ---help---
- Say Y here if you want to connect via MediaLB to network transceiver.
- This device driver is platform dependent and needs an addtional
- platform driver to be installed. For more information contact
- maintainer of this driver.
-
- To compile this driver as a module, choose M here: the
- module will be called hdm_dim2.
diff --git a/drivers/staging/most/hdm-dim2/Makefile b/drivers/staging/most/hdm-dim2/Makefile
deleted file mode 100644
index 6bbee879a8ea..000000000000
--- a/drivers/staging/most/hdm-dim2/Makefile
+++ /dev/null
@@ -1,5 +0,0 @@
-obj-$(CONFIG_HDM_DIM2) += hdm_dim2.o
-
-hdm_dim2-objs := dim2_hdm.o dim2_hal.o dim2_sysfs.o
-ccflags-y += -Idrivers/staging/most/mostcore/
-ccflags-y += -Idrivers/staging/most/aim-network/
diff --git a/drivers/staging/most/hdm-dim2/dim2_errors.h b/drivers/staging/most/hdm-dim2/dim2_errors.h
deleted file mode 100644
index 314f7de2be73..000000000000
--- a/drivers/staging/most/hdm-dim2/dim2_errors.h
+++ /dev/null
@@ -1,67 +0,0 @@
-/*
- * dim2_errors.h - Definitions of errors for DIM2 HAL API
- * (MediaLB, Device Interface Macro IP, OS62420)
- *
- * Copyright (C) 2015, Microchip Technology Germany II GmbH & Co. KG
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * This file is licensed under GPLv2.
- */
-
-#ifndef _MOST_DIM_ERRORS_H
-#define _MOST_DIM_ERRORS_H
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-
-/**
- * MOST DIM errors.
- */
-enum dim_errors_t {
- /** Not an error */
- DIM_NO_ERROR = 0,
-
- /** Bad base address for DIM2 IP */
- DIM_INIT_ERR_DIM_ADDR = 0x10,
-
- /**< Bad MediaLB clock */
- DIM_INIT_ERR_MLB_CLOCK,
-
- /** Bad channel address */
- DIM_INIT_ERR_CHANNEL_ADDRESS,
-
- /** Out of DBR memory */
- DIM_INIT_ERR_OUT_OF_MEMORY,
-
- /** DIM API is called while DIM is not initialized successfully */
- DIM_ERR_DRIVER_NOT_INITIALIZED = 0x20,
-
- /**
- * Configuration does not respect hardware limitations
- * for isochronous or synchronous channels
- */
- DIM_ERR_BAD_CONFIG,
-
- /**
- * Buffer size does not respect hardware limitations
- * for isochronous or synchronous channels
- */
- DIM_ERR_BAD_BUFFER_SIZE,
-
- DIM_ERR_UNDERFLOW,
-
- DIM_ERR_OVERFLOW,
-};
-
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* _MOST_DIM_ERRORS_H */
diff --git a/drivers/staging/most/hdm-dim2/dim2_hal.c b/drivers/staging/most/hdm-dim2/dim2_hal.c
deleted file mode 100644
index 0e548301e3bc..000000000000
--- a/drivers/staging/most/hdm-dim2/dim2_hal.c
+++ /dev/null
@@ -1,914 +0,0 @@
-/*
- * dim2_hal.c - DIM2 HAL implementation
- * (MediaLB, Device Interface Macro IP, OS62420)
- *
- * Copyright (C) 2015, Microchip Technology Germany II GmbH & Co. KG
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * This file is licensed under GPLv2.
- */
-
-/* Author: Andrey Shvetsov <andrey.shvetsov@k2l.de> */
-
-#include "dim2_hal.h"
-#include "dim2_errors.h"
-#include "dim2_reg.h"
-#include <linux/stddef.h>
-
-
-/*
- * The number of frames per sub-buffer for synchronous channels.
- * Allowed values: 1, 2, 4, 8, 16, 32, 64.
- */
-#define FRAMES_PER_SUBBUFF 16
-
-/*
- * Size factor for synchronous DBR buffer.
- * Minimal value is 4*FRAMES_PER_SUBBUFF.
- */
-#define SYNC_DBR_FACTOR (4u * (u16)FRAMES_PER_SUBBUFF)
-
-/*
- * Size factor for isochronous DBR buffer.
- * Minimal value is 3.
- */
-#define ISOC_DBR_FACTOR 3u
-
-/*
- * Number of 32-bit units for DBR map.
- *
- * 1: block size is 512, max allocation is 16K
- * 2: block size is 256, max allocation is 8K
- * 4: block size is 128, max allocation is 4K
- * 8: block size is 64, max allocation is 2K
- *
- * Min allocated space is block size.
- * Max possible allocated space is 32 blocks.
- */
-#define DBR_MAP_SIZE 2
-
-
-/* -------------------------------------------------------------------------- */
-/* not configurable area */
-
-#define CDT 0x00
-#define ADT 0x40
-#define MLB_CAT 0x80
-#define AHB_CAT 0x88
-
-#define DBR_SIZE (16*1024) /* specified by IP */
-#define DBR_BLOCK_SIZE (DBR_SIZE / 32 / DBR_MAP_SIZE)
-
-
-/* -------------------------------------------------------------------------- */
-/* generic helper functions and macros */
-
-#define MLBC0_FCNT_VAL_MACRO(n) MLBC0_FCNT_VAL_ ## n ## FPSB
-#define MLBC0_FCNT_VAL(fpsb) MLBC0_FCNT_VAL_MACRO(fpsb)
-
-static inline u32 bit_mask(u8 position)
-{
- return (u32)1 << position;
-}
-
-static inline bool dim_on_error(u8 error_id, const char *error_message)
-{
- DIMCB_OnError(error_id, error_message);
- return false;
-}
-
-
-/* -------------------------------------------------------------------------- */
-/* types and local variables */
-
-struct lld_global_vars_t {
- bool dim_is_initialized;
- bool mcm_is_initialized;
- struct dim2_regs *dim2; /* DIM2 core base address */
- u32 dbr_map[DBR_MAP_SIZE];
-};
-
-static struct lld_global_vars_t g = { false };
-
-
-/* -------------------------------------------------------------------------- */
-
-static int dbr_get_mask_size(u16 size)
-{
- int i;
-
- for (i = 0; i < 6; i++)
- if (size <= (DBR_BLOCK_SIZE << i))
- return 1 << i;
- return 0;
-}
-
-/**
- * Allocates DBR memory.
- * @param size Allocating memory size.
- * @return Offset in DBR memory by success or DBR_SIZE if out of memory.
- */
-static int alloc_dbr(u16 size)
-{
- int mask_size;
- int i, block_idx = 0;
-
- if (size <= 0)
- return DBR_SIZE; /* out of memory */
-
- mask_size = dbr_get_mask_size(size);
- if (mask_size == 0)
- return DBR_SIZE; /* out of memory */
-
- for (i = 0; i < DBR_MAP_SIZE; i++) {
- u32 const blocks = (size + DBR_BLOCK_SIZE - 1) / DBR_BLOCK_SIZE;
- u32 mask = ~((~(u32)0) << blocks);
-
- do {
- if ((g.dbr_map[i] & mask) == 0) {
- g.dbr_map[i] |= mask;
- return block_idx * DBR_BLOCK_SIZE;
- }
- block_idx += mask_size;
- /* do shift left with 2 steps for case mask_size == 32 */
- mask <<= mask_size - 1;
- } while ((mask <<= 1) != 0);
- }
-
- return DBR_SIZE; /* out of memory */
-}
-
-static void free_dbr(int offs, int size)
-{
- int block_idx = offs / DBR_BLOCK_SIZE;
- u32 const blocks = (size + DBR_BLOCK_SIZE - 1) / DBR_BLOCK_SIZE;
- u32 mask = ~((~(u32)0) << blocks);
-
- mask <<= block_idx % 32;
- g.dbr_map[block_idx / 32] &= ~mask;
-}
-
-/* -------------------------------------------------------------------------- */
-
-static u32 dim2_read_ctr(u32 ctr_addr, u16 mdat_idx)
-{
- DIMCB_IoWrite(&g.dim2->MADR, ctr_addr);
-
- /* wait till transfer is completed */
- while ((DIMCB_IoRead(&g.dim2->MCTL) & 1) != 1)
- continue;
-
- DIMCB_IoWrite(&g.dim2->MCTL, 0); /* clear transfer complete */
-
- return DIMCB_IoRead((&g.dim2->MDAT0) + mdat_idx);
-}
-
-static void dim2_write_ctr_mask(u32 ctr_addr, const u32 *mask, const u32 *value)
-{
- enum { MADR_WNR_BIT = 31 };
-
- DIMCB_IoWrite(&g.dim2->MCTL, 0); /* clear transfer complete */
-
- if (mask[0] != 0)
- DIMCB_IoWrite(&g.dim2->MDAT0, value[0]);
- if (mask[1] != 0)
- DIMCB_IoWrite(&g.dim2->MDAT1, value[1]);
- if (mask[2] != 0)
- DIMCB_IoWrite(&g.dim2->MDAT2, value[2]);
- if (mask[3] != 0)
- DIMCB_IoWrite(&g.dim2->MDAT3, value[3]);
-
- DIMCB_IoWrite(&g.dim2->MDWE0, mask[0]);
- DIMCB_IoWrite(&g.dim2->MDWE1, mask[1]);
- DIMCB_IoWrite(&g.dim2->MDWE2, mask[2]);
- DIMCB_IoWrite(&g.dim2->MDWE3, mask[3]);
-
- DIMCB_IoWrite(&g.dim2->MADR, bit_mask(MADR_WNR_BIT) | ctr_addr);
-
- /* wait till transfer is completed */
- while ((DIMCB_IoRead(&g.dim2->MCTL) & 1) != 1)
- continue;
-
- DIMCB_IoWrite(&g.dim2->MCTL, 0); /* clear transfer complete */
-}
-
-static inline void dim2_write_ctr(u32 ctr_addr, const u32 *value)
-{
- u32 const mask[4] = { 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF };
-
- dim2_write_ctr_mask(ctr_addr, mask, value);
-}
-
-static inline void dim2_clear_ctr(u32 ctr_addr)
-{
- u32 const value[4] = { 0, 0, 0, 0 };
-
- dim2_write_ctr(ctr_addr, value);
-}
-
-static void dim2_configure_cat(u8 cat_base, u8 ch_addr, u8 ch_type,
- bool read_not_write, bool sync_mfe)
-{
- u16 const cat =
- (read_not_write << CAT_RNW_BIT) |
- (ch_type << CAT_CT_SHIFT) |
- (ch_addr << CAT_CL_SHIFT) |
- (sync_mfe << CAT_MFE_BIT) |
- (false << CAT_MT_BIT) |
- (true << CAT_CE_BIT);
- u8 const ctr_addr = cat_base + ch_addr / 8;
- u8 const idx = (ch_addr % 8) / 2;
- u8 const shift = (ch_addr % 2) * 16;
- u32 mask[4] = { 0, 0, 0, 0 };
- u32 value[4] = { 0, 0, 0, 0 };
-
- mask[idx] = (u32)0xFFFF << shift;
- value[idx] = cat << shift;
- dim2_write_ctr_mask(ctr_addr, mask, value);
-}
-
-static void dim2_clear_cat(u8 cat_base, u8 ch_addr)
-{
- u8 const ctr_addr = cat_base + ch_addr / 8;
- u8 const idx = (ch_addr % 8) / 2;
- u8 const shift = (ch_addr % 2) * 16;
- u32 mask[4] = { 0, 0, 0, 0 };
- u32 value[4] = { 0, 0, 0, 0 };
-
- mask[idx] = (u32)0xFFFF << shift;
- dim2_write_ctr_mask(ctr_addr, mask, value);
-}
-
-static void dim2_configure_cdt(u8 ch_addr, u16 dbr_address, u16 hw_buffer_size,
- u16 packet_length)
-{
- u32 cdt[4] = { 0, 0, 0, 0 };
-
- if (packet_length)
- cdt[1] = ((packet_length - 1) << CDT1_BS_ISOC_SHIFT);
-
- cdt[3] =
- ((hw_buffer_size - 1) << CDT3_BD_SHIFT) |
- (dbr_address << CDT3_BA_SHIFT);
- dim2_write_ctr(CDT + ch_addr, cdt);
-}
-
-static void dim2_clear_cdt(u8 ch_addr)
-{
- u32 cdt[4] = { 0, 0, 0, 0 };
-
- dim2_write_ctr(CDT + ch_addr, cdt);
-}
-
-static void dim2_configure_adt(u8 ch_addr)
-{
- u32 adt[4] = { 0, 0, 0, 0 };
-
- adt[0] =
- (true << ADT0_CE_BIT) |
- (true << ADT0_LE_BIT) |
- (0 << ADT0_PG_BIT);
-
- dim2_write_ctr(ADT + ch_addr, adt);
-}
-
-static void dim2_clear_adt(u8 ch_addr)
-{
- u32 adt[4] = { 0, 0, 0, 0 };
-
- dim2_write_ctr(ADT + ch_addr, adt);
-}
-
-static void dim2_start_ctrl_async(u8 ch_addr, u8 idx, u32 buf_addr,
- u16 buffer_size)
-{
- u8 const shift = idx * 16;
-
- u32 mask[4] = { 0, 0, 0, 0 };
- u32 adt[4] = { 0, 0, 0, 0 };
-
- mask[1] =
- bit_mask(ADT1_PS_BIT + shift) |
- bit_mask(ADT1_RDY_BIT + shift) |
- (ADT1_CTRL_ASYNC_BD_MASK << (ADT1_BD_SHIFT + shift));
- adt[1] =
- (true << (ADT1_PS_BIT + shift)) |
- (true << (ADT1_RDY_BIT + shift)) |
- ((buffer_size - 1) << (ADT1_BD_SHIFT + shift));
-
- mask[idx + 2] = 0xFFFFFFFF;
- adt[idx + 2] = buf_addr;
-
- dim2_write_ctr_mask(ADT + ch_addr, mask, adt);
-}
-
-static void dim2_start_isoc_sync(u8 ch_addr, u8 idx, u32 buf_addr,
- u16 buffer_size)
-{
- u8 const shift = idx * 16;
-
- u32 mask[4] = { 0, 0, 0, 0 };
- u32 adt[4] = { 0, 0, 0, 0 };
-
- mask[1] =
- bit_mask(ADT1_RDY_BIT + shift) |
- (ADT1_ISOC_SYNC_BD_MASK << (ADT1_BD_SHIFT + shift));
- adt[1] =
- (true << (ADT1_RDY_BIT + shift)) |
- ((buffer_size - 1) << (ADT1_BD_SHIFT + shift));
-
- mask[idx + 2] = 0xFFFFFFFF;
- adt[idx + 2] = buf_addr;
-
- dim2_write_ctr_mask(ADT + ch_addr, mask, adt);
-}
-
-
-static void dim2_clear_ctram(void)
-{
- u32 ctr_addr;
-
- for (ctr_addr = 0; ctr_addr < 0x90; ctr_addr++)
- dim2_clear_ctr(ctr_addr);
-}
-
-static void dim2_configure_channel(
- u8 ch_addr, u8 type, u8 is_tx, u16 dbr_address, u16 hw_buffer_size,
- u16 packet_length, bool sync_mfe)
-{
- dim2_configure_cdt(ch_addr, dbr_address, hw_buffer_size, packet_length);
- dim2_configure_cat(MLB_CAT, ch_addr, type, is_tx ? 1 : 0, sync_mfe);
-
- dim2_configure_adt(ch_addr);
- dim2_configure_cat(AHB_CAT, ch_addr, type, is_tx ? 0 : 1, sync_mfe);
-
- /* unmask interrupt for used channel, enable mlb_sys_int[0] interrupt */
- DIMCB_IoWrite(&g.dim2->ACMR0,
- DIMCB_IoRead(&g.dim2->ACMR0) | bit_mask(ch_addr));
-}
-
-static void dim2_clear_channel(u8 ch_addr)
-{
- /* mask interrupt for used channel, disable mlb_sys_int[0] interrupt */
- DIMCB_IoWrite(&g.dim2->ACMR0,
- DIMCB_IoRead(&g.dim2->ACMR0) & ~bit_mask(ch_addr));
-
- dim2_clear_cat(AHB_CAT, ch_addr);
- dim2_clear_adt(ch_addr);
-
- dim2_clear_cat(MLB_CAT, ch_addr);
- dim2_clear_cdt(ch_addr);
-}
-
-/* -------------------------------------------------------------------------- */
-/* channel state helpers */
-
-static void state_init(struct int_ch_state *state)
-{
- state->request_counter = 0;
- state->service_counter = 0;
-
- state->idx1 = 0;
- state->idx2 = 0;
- state->level = 0;
-}
-
-/* -------------------------------------------------------------------------- */
-/* macro helper functions */
-
-static inline bool check_channel_address(u32 ch_address)
-{
- return ch_address > 0 && (ch_address % 2) == 0 &&
- (ch_address / 2) <= (u32)CAT_CL_MASK;
-}
-
-static inline bool check_packet_length(u32 packet_length)
-{
- u16 const max_size = ((u16)CDT3_BD_ISOC_MASK + 1u) / ISOC_DBR_FACTOR;
-
- if (packet_length <= 0)
- return false; /* too small */
-
- if (packet_length > max_size)
- return false; /* too big */
-
- if (packet_length - 1u > (u32)CDT1_BS_ISOC_MASK)
- return false; /* too big */
-
- return true;
-}
-
-static inline bool check_bytes_per_frame(u32 bytes_per_frame)
-{
- u16 const max_size = ((u16)CDT3_BD_MASK + 1u) / SYNC_DBR_FACTOR;
-
- if (bytes_per_frame <= 0)
- return false; /* too small */
-
- if (bytes_per_frame > max_size)
- return false; /* too big */
-
- return true;
-}
-
-static inline u16 norm_ctrl_async_buffer_size(u16 buf_size)
-{
- u16 const max_size = (u16)ADT1_CTRL_ASYNC_BD_MASK + 1u;
-
- if (buf_size > max_size)
- return max_size;
-
- return buf_size;
-}
-
-static inline u16 norm_isoc_buffer_size(u16 buf_size, u16 packet_length)
-{
- u16 n;
- u16 const max_size = (u16)ADT1_ISOC_SYNC_BD_MASK + 1u;
-
- if (buf_size > max_size)
- buf_size = max_size;
-
- n = buf_size / packet_length;
-
- if (n < 2u)
- return 0; /* too small buffer for given packet_length */
-
- return packet_length * n;
-}
-
-static inline u16 norm_sync_buffer_size(u16 buf_size, u16 bytes_per_frame)
-{
- u16 n;
- u16 const max_size = (u16)ADT1_ISOC_SYNC_BD_MASK + 1u;
- u32 const unit = bytes_per_frame * (u16)FRAMES_PER_SUBBUFF;
-
- if (buf_size > max_size)
- buf_size = max_size;
-
- n = buf_size / unit;
-
- if (n < 1u)
- return 0; /* too small buffer for given bytes_per_frame */
-
- return unit * n;
-}
-
-static void dim2_cleanup(void)
-{
- /* disable MediaLB */
- DIMCB_IoWrite(&g.dim2->MLBC0, false << MLBC0_MLBEN_BIT);
-
- dim2_clear_ctram();
-
- /* disable mlb_int interrupt */
- DIMCB_IoWrite(&g.dim2->MIEN, 0);
-
- /* clear status for all dma channels */
- DIMCB_IoWrite(&g.dim2->ACSR0, 0xFFFFFFFF);
- DIMCB_IoWrite(&g.dim2->ACSR1, 0xFFFFFFFF);
-
- /* mask interrupts for all channels */
- DIMCB_IoWrite(&g.dim2->ACMR0, 0);
- DIMCB_IoWrite(&g.dim2->ACMR1, 0);
-}
-
-static void dim2_initialize(bool enable_6pin, u8 mlb_clock)
-{
- dim2_cleanup();
-
- /* configure and enable MediaLB */
- DIMCB_IoWrite(&g.dim2->MLBC0,
- enable_6pin << MLBC0_MLBPEN_BIT |
- mlb_clock << MLBC0_MLBCLK_SHIFT |
- MLBC0_FCNT_VAL(FRAMES_PER_SUBBUFF) << MLBC0_FCNT_SHIFT |
- true << MLBC0_MLBEN_BIT);
-
- /* activate all HBI channels */
- DIMCB_IoWrite(&g.dim2->HCMR0, 0xFFFFFFFF);
- DIMCB_IoWrite(&g.dim2->HCMR1, 0xFFFFFFFF);
-
- /* enable HBI */
- DIMCB_IoWrite(&g.dim2->HCTL, bit_mask(HCTL_EN_BIT));
-
- /* configure DMA */
- DIMCB_IoWrite(&g.dim2->ACTL,
- ACTL_DMA_MODE_VAL_DMA_MODE_1 << ACTL_DMA_MODE_BIT |
- true << ACTL_SCE_BIT);
-
-#if 0
- DIMCB_IoWrite(&g.dim2->MIEN,
- bit_mask(MIEN_CTX_BREAK_BIT) |
- bit_mask(MIEN_CTX_PE_BIT) |
- bit_mask(MIEN_CTX_DONE_BIT) |
- bit_mask(MIEN_CRX_BREAK_BIT) |
- bit_mask(MIEN_CRX_PE_BIT) |
- bit_mask(MIEN_CRX_DONE_BIT) |
- bit_mask(MIEN_ATX_BREAK_BIT) |
- bit_mask(MIEN_ATX_PE_BIT) |
- bit_mask(MIEN_ATX_DONE_BIT) |
- bit_mask(MIEN_ARX_BREAK_BIT) |
- bit_mask(MIEN_ARX_PE_BIT) |
- bit_mask(MIEN_ARX_DONE_BIT));
-#endif
-}
-
-static bool dim2_is_mlb_locked(void)
-{
- u32 const mask0 = bit_mask(MLBC0_MLBLK_BIT);
- u32 const mask1 = bit_mask(MLBC1_CLKMERR_BIT) |
- bit_mask(MLBC1_LOCKERR_BIT);
- u32 const c1 = DIMCB_IoRead(&g.dim2->MLBC1);
- u32 const nda_mask = (u32)MLBC1_NDA_MASK << MLBC1_NDA_SHIFT;
-
- DIMCB_IoWrite(&g.dim2->MLBC1, c1 & nda_mask);
- return (DIMCB_IoRead(&g.dim2->MLBC1) & mask1) == 0 &&
- (DIMCB_IoRead(&g.dim2->MLBC0) & mask0) != 0;
-}
-
-
-/* -------------------------------------------------------------------------- */
-/* channel help routines */
-
-static inline bool service_channel(u8 ch_addr, u8 idx)
-{
- u8 const shift = idx * 16;
- u32 const adt1 = dim2_read_ctr(ADT + ch_addr, 1);
-
- if (((adt1 >> (ADT1_DNE_BIT + shift)) & 1) == 0)
- return false;
-
- {
- u32 mask[4] = { 0, 0, 0, 0 };
- u32 adt_w[4] = { 0, 0, 0, 0 };
-
- mask[1] =
- bit_mask(ADT1_DNE_BIT + shift) |
- bit_mask(ADT1_ERR_BIT + shift) |
- bit_mask(ADT1_RDY_BIT + shift);
- dim2_write_ctr_mask(ADT + ch_addr, mask, adt_w);
- }
-
- /* clear channel status bit */
- DIMCB_IoWrite(&g.dim2->ACSR0, bit_mask(ch_addr));
-
- return true;
-}
-
-
-/* -------------------------------------------------------------------------- */
-/* channel init routines */
-
-static void isoc_init(struct dim_channel *ch, u8 ch_addr, u16 packet_length)
-{
- state_init(&ch->state);
-
- ch->addr = ch_addr;
-
- ch->packet_length = packet_length;
- ch->bytes_per_frame = 0;
- ch->done_sw_buffers_number = 0;
-}
-
-static void sync_init(struct dim_channel *ch, u8 ch_addr, u16 bytes_per_frame)
-{
- state_init(&ch->state);
-
- ch->addr = ch_addr;
-
- ch->packet_length = 0;
- ch->bytes_per_frame = bytes_per_frame;
- ch->done_sw_buffers_number = 0;
-}
-
-static void channel_init(struct dim_channel *ch, u8 ch_addr)
-{
- state_init(&ch->state);
-
- ch->addr = ch_addr;
-
- ch->packet_length = 0;
- ch->bytes_per_frame = 0;
- ch->done_sw_buffers_number = 0;
-}
-
-/* returns true if channel interrupt state is cleared */
-static bool channel_service_interrupt(struct dim_channel *ch)
-{
- struct int_ch_state *const state = &ch->state;
-
- if (!service_channel(ch->addr, state->idx2))
- return false;
-
- state->idx2 ^= 1;
- state->request_counter++;
- return true;
-}
-
-static bool channel_start(struct dim_channel *ch, u32 buf_addr, u16 buf_size)
-{
- struct int_ch_state *const state = &ch->state;
-
- if (buf_size <= 0)
- return dim_on_error(DIM_ERR_BAD_BUFFER_SIZE, "Bad buffer size");
-
- if (ch->packet_length == 0 && ch->bytes_per_frame == 0 &&
- buf_size != norm_ctrl_async_buffer_size(buf_size))
- return dim_on_error(DIM_ERR_BAD_BUFFER_SIZE,
- "Bad control/async buffer size");
-
- if (ch->packet_length &&
- buf_size != norm_isoc_buffer_size(buf_size, ch->packet_length))
- return dim_on_error(DIM_ERR_BAD_BUFFER_SIZE,
- "Bad isochronous buffer size");
-
- if (ch->bytes_per_frame &&
- buf_size != norm_sync_buffer_size(buf_size, ch->bytes_per_frame))
- return dim_on_error(DIM_ERR_BAD_BUFFER_SIZE,
- "Bad synchronous buffer size");
-
- if (state->level >= 2u)
- return dim_on_error(DIM_ERR_OVERFLOW, "Channel overflow");
-
- ++state->level;
-
- if (ch->packet_length || ch->bytes_per_frame)
- dim2_start_isoc_sync(ch->addr, state->idx1, buf_addr, buf_size);
- else
- dim2_start_ctrl_async(ch->addr, state->idx1, buf_addr, buf_size);
- state->idx1 ^= 1;
-
- return true;
-}
-
-static u8 channel_service(struct dim_channel *ch)
-{
- struct int_ch_state *const state = &ch->state;
-
- if (state->service_counter != state->request_counter) {
- state->service_counter++;
- if (state->level == 0)
- return DIM_ERR_UNDERFLOW;
-
- --state->level;
- ch->done_sw_buffers_number++;
- }
-
- return DIM_NO_ERROR;
-}
-
-static bool channel_detach_buffers(struct dim_channel *ch, u16 buffers_number)
-{
- if (buffers_number > ch->done_sw_buffers_number)
- return dim_on_error(DIM_ERR_UNDERFLOW, "Channel underflow");
-
- ch->done_sw_buffers_number -= buffers_number;
- return true;
-}
-
-
-/* -------------------------------------------------------------------------- */
-/* API */
-
-u8 DIM_Startup(void *dim_base_address, u32 mlb_clock)
-{
- g.dim_is_initialized = false;
-
- if (!dim_base_address)
- return DIM_INIT_ERR_DIM_ADDR;
-
- /* MediaLB clock: 0 - 256 fs, 1 - 512 fs, 2 - 1024 fs, 3 - 2048 fs */
- /* MediaLB clock: 4 - 3072 fs, 5 - 4096 fs, 6 - 6144 fs, 7 - 8192 fs */
- if (mlb_clock >= 8)
- return DIM_INIT_ERR_MLB_CLOCK;
-
- g.dim2 = dim_base_address;
- g.dbr_map[0] = g.dbr_map[1] = 0;
-
- dim2_initialize(mlb_clock >= 3, mlb_clock);
-
- g.dim_is_initialized = true;
-
- return DIM_NO_ERROR;
-}
-
-void DIM_Shutdown(void)
-{
- g.dim_is_initialized = false;
- dim2_cleanup();
-}
-
-bool DIM_GetLockState(void)
-{
- return dim2_is_mlb_locked();
-}
-
-static u8 init_ctrl_async(struct dim_channel *ch, u8 type, u8 is_tx,
- u16 ch_address, u16 hw_buffer_size)
-{
- if (!g.dim_is_initialized || !ch)
- return DIM_ERR_DRIVER_NOT_INITIALIZED;
-
- if (!check_channel_address(ch_address))
- return DIM_INIT_ERR_CHANNEL_ADDRESS;
-
- ch->dbr_size = hw_buffer_size;
- ch->dbr_addr = alloc_dbr(ch->dbr_size);
- if (ch->dbr_addr >= DBR_SIZE)
- return DIM_INIT_ERR_OUT_OF_MEMORY;
-
- channel_init(ch, ch_address / 2);
-
- dim2_configure_channel(ch->addr, type, is_tx,
- ch->dbr_addr, ch->dbr_size, 0, false);
-
- return DIM_NO_ERROR;
-}
-
-u16 DIM_NormCtrlAsyncBufferSize(u16 buf_size)
-{
- return norm_ctrl_async_buffer_size(buf_size);
-}
-
-/**
- * Retrieves maximal possible correct buffer size for isochronous data type
- * conform to given packet length and not bigger than given buffer size.
- *
- * Returns non-zero correct buffer size or zero by error.
- */
-u16 DIM_NormIsocBufferSize(u16 buf_size, u16 packet_length)
-{
- if (!check_packet_length(packet_length))
- return 0;
-
- return norm_isoc_buffer_size(buf_size, packet_length);
-}
-
-/**
- * Retrieves maximal possible correct buffer size for synchronous data type
- * conform to given bytes per frame and not bigger than given buffer size.
- *
- * Returns non-zero correct buffer size or zero by error.
- */
-u16 DIM_NormSyncBufferSize(u16 buf_size, u16 bytes_per_frame)
-{
- if (!check_bytes_per_frame(bytes_per_frame))
- return 0;
-
- return norm_sync_buffer_size(buf_size, bytes_per_frame);
-}
-
-u8 DIM_InitControl(struct dim_channel *ch, u8 is_tx, u16 ch_address,
- u16 max_buffer_size)
-{
- return init_ctrl_async(ch, CAT_CT_VAL_CONTROL, is_tx, ch_address,
- max_buffer_size);
-}
-
-u8 DIM_InitAsync(struct dim_channel *ch, u8 is_tx, u16 ch_address,
- u16 max_buffer_size)
-{
- return init_ctrl_async(ch, CAT_CT_VAL_ASYNC, is_tx, ch_address,
- max_buffer_size);
-}
-
-u8 DIM_InitIsoc(struct dim_channel *ch, u8 is_tx, u16 ch_address,
- u16 packet_length)
-{
- if (!g.dim_is_initialized || !ch)
- return DIM_ERR_DRIVER_NOT_INITIALIZED;
-
- if (!check_channel_address(ch_address))
- return DIM_INIT_ERR_CHANNEL_ADDRESS;
-
- if (!check_packet_length(packet_length))
- return DIM_ERR_BAD_CONFIG;
-
- ch->dbr_size = packet_length * ISOC_DBR_FACTOR;
- ch->dbr_addr = alloc_dbr(ch->dbr_size);
- if (ch->dbr_addr >= DBR_SIZE)
- return DIM_INIT_ERR_OUT_OF_MEMORY;
-
- isoc_init(ch, ch_address / 2, packet_length);
-
- dim2_configure_channel(ch->addr, CAT_CT_VAL_ISOC, is_tx, ch->dbr_addr,
- ch->dbr_size, packet_length, false);
-
- return DIM_NO_ERROR;
-}
-
-u8 DIM_InitSync(struct dim_channel *ch, u8 is_tx, u16 ch_address,
- u16 bytes_per_frame)
-{
- if (!g.dim_is_initialized || !ch)
- return DIM_ERR_DRIVER_NOT_INITIALIZED;
-
- if (!check_channel_address(ch_address))
- return DIM_INIT_ERR_CHANNEL_ADDRESS;
-
- if (!check_bytes_per_frame(bytes_per_frame))
- return DIM_ERR_BAD_CONFIG;
-
- ch->dbr_size = bytes_per_frame * SYNC_DBR_FACTOR;
- ch->dbr_addr = alloc_dbr(ch->dbr_size);
- if (ch->dbr_addr >= DBR_SIZE)
- return DIM_INIT_ERR_OUT_OF_MEMORY;
-
- sync_init(ch, ch_address / 2, bytes_per_frame);
-
- dim2_configure_channel(ch->addr, CAT_CT_VAL_SYNC, is_tx,
- ch->dbr_addr, ch->dbr_size, 0, true);
-
- return DIM_NO_ERROR;
-}
-
-u8 DIM_DestroyChannel(struct dim_channel *ch)
-{
- if (!g.dim_is_initialized || !ch)
- return DIM_ERR_DRIVER_NOT_INITIALIZED;
-
- dim2_clear_channel(ch->addr);
- if (ch->dbr_addr < DBR_SIZE)
- free_dbr(ch->dbr_addr, ch->dbr_size);
- ch->dbr_addr = DBR_SIZE;
-
- return DIM_NO_ERROR;
-}
-
-void DIM_ServiceIrq(struct dim_channel *const *channels)
-{
- bool state_changed;
-
- if (!g.dim_is_initialized) {
- dim_on_error(DIM_ERR_DRIVER_NOT_INITIALIZED,
- "DIM is not initialized");
- return;
- }
-
- if (!channels) {
- dim_on_error(DIM_ERR_DRIVER_NOT_INITIALIZED, "Bad channels");
- return;
- }
-
- /*
- * Use while-loop and a flag to make sure the age is changed back at least once,
- * otherwise the interrupt may never come if CPU generates interrupt on changing age.
- *
- * This cycle runs not more than number of channels, because service_interrupts
- * routine doesn't start the channel again.
- */
- do {
- struct dim_channel *const *ch = channels;
-
- state_changed = false;
-
- while (*ch) {
- state_changed |= channel_service_interrupt(*ch);
- ++ch;
- }
- } while (state_changed);
-
- /* clear pending Interrupts */
- DIMCB_IoWrite(&g.dim2->MS0, 0);
- DIMCB_IoWrite(&g.dim2->MS1, 0);
-}
-
-u8 DIM_ServiceChannel(struct dim_channel *ch)
-{
- if (!g.dim_is_initialized || !ch)
- return DIM_ERR_DRIVER_NOT_INITIALIZED;
-
- return channel_service(ch);
-}
-
-struct dim_ch_state_t *DIM_GetChannelState(struct dim_channel *ch,
- struct dim_ch_state_t *state_ptr)
-{
- if (!ch || !state_ptr)
- return NULL;
-
- state_ptr->ready = ch->state.level < 2;
- state_ptr->done_buffers = ch->done_sw_buffers_number;
-
- return state_ptr;
-}
-
-bool DIM_EnqueueBuffer(struct dim_channel *ch, u32 buffer_addr, u16 buffer_size)
-{
- if (!ch)
- return dim_on_error(DIM_ERR_DRIVER_NOT_INITIALIZED, "Bad channel");
-
- return channel_start(ch, buffer_addr, buffer_size);
-}
-
-bool DIM_DetachBuffers(struct dim_channel *ch, u16 buffers_number)
-{
- if (!ch)
- return dim_on_error(DIM_ERR_DRIVER_NOT_INITIALIZED, "Bad channel");
-
- return channel_detach_buffers(ch, buffers_number);
-}
diff --git a/drivers/staging/most/hdm-dim2/dim2_hal.h b/drivers/staging/most/hdm-dim2/dim2_hal.h
deleted file mode 100644
index ebb7d87a45fc..000000000000
--- a/drivers/staging/most/hdm-dim2/dim2_hal.h
+++ /dev/null
@@ -1,119 +0,0 @@
-/*
- * dim2_hal.h - DIM2 HAL interface
- * (MediaLB, Device Interface Macro IP, OS62420)
- *
- * Copyright (C) 2015, Microchip Technology Germany II GmbH & Co. KG
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * This file is licensed under GPLv2.
- */
-
-#ifndef _DIM2_HAL_H
-#define _DIM2_HAL_H
-
-#include <linux/types.h>
-
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-/*
- * The values below are specified in the hardware specification.
- * So, they should not be changed until the hardware specification changes.
- */
-enum mlb_clk_speed {
- CLK_256FS = 0,
- CLK_512FS = 1,
- CLK_1024FS = 2,
- CLK_2048FS = 3,
- CLK_3072FS = 4,
- CLK_4096FS = 5,
- CLK_6144FS = 6,
- CLK_8192FS = 7,
-};
-
-struct dim_ch_state_t {
- bool ready; /* Shows readiness to enqueue next buffer */
- u16 done_buffers; /* Number of completed buffers */
-};
-
-typedef int atomic_counter_t;
-
-struct int_ch_state {
- /* changed only in interrupt context */
- volatile atomic_counter_t request_counter;
-
- /* changed only in task context */
- volatile atomic_counter_t service_counter;
-
- u8 idx1;
- u8 idx2;
- u8 level; /* [0..2], buffering level */
-};
-
-struct dim_channel {
- struct int_ch_state state;
- u8 addr;
- u16 dbr_addr;
- u16 dbr_size;
- u16 packet_length; /*< Isochronous packet length in bytes. */
- u16 bytes_per_frame; /*< Synchronous bytes per frame. */
- u16 done_sw_buffers_number; /*< Done software buffers number. */
-};
-
-
-u8 DIM_Startup(void *dim_base_address, u32 mlb_clock);
-
-void DIM_Shutdown(void);
-
-bool DIM_GetLockState(void);
-
-u16 DIM_NormCtrlAsyncBufferSize(u16 buf_size);
-
-u16 DIM_NormIsocBufferSize(u16 buf_size, u16 packet_length);
-
-u16 DIM_NormSyncBufferSize(u16 buf_size, u16 bytes_per_frame);
-
-u8 DIM_InitControl(struct dim_channel *ch, u8 is_tx, u16 ch_address,
- u16 max_buffer_size);
-
-u8 DIM_InitAsync(struct dim_channel *ch, u8 is_tx, u16 ch_address,
- u16 max_buffer_size);
-
-u8 DIM_InitIsoc(struct dim_channel *ch, u8 is_tx, u16 ch_address,
- u16 packet_length);
-
-u8 DIM_InitSync(struct dim_channel *ch, u8 is_tx, u16 ch_address,
- u16 bytes_per_frame);
-
-u8 DIM_DestroyChannel(struct dim_channel *ch);
-
-void DIM_ServiceIrq(struct dim_channel *const *channels);
-
-u8 DIM_ServiceChannel(struct dim_channel *ch);
-
-struct dim_ch_state_t *DIM_GetChannelState(struct dim_channel *ch,
- struct dim_ch_state_t *dim_ch_state_ptr);
-
-bool DIM_EnqueueBuffer(struct dim_channel *ch, u32 buffer_addr,
- u16 buffer_size);
-
-bool DIM_DetachBuffers(struct dim_channel *ch, u16 buffers_number);
-
-u32 DIMCB_IoRead(u32 *ptr32);
-
-void DIMCB_IoWrite(u32 *ptr32, u32 value);
-
-void DIMCB_OnError(u8 error_id, const char *error_message);
-
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* _DIM2_HAL_H */
diff --git a/drivers/staging/most/hdm-dim2/dim2_hdm.c b/drivers/staging/most/hdm-dim2/dim2_hdm.c
deleted file mode 100644
index 1bb70b738155..000000000000
--- a/drivers/staging/most/hdm-dim2/dim2_hdm.c
+++ /dev/null
@@ -1,953 +0,0 @@
-/*
- * dim2_hdm.c - MediaLB DIM2 Hardware Dependent Module
- *
- * Copyright (C) 2015, Microchip Technology Germany II GmbH & Co. KG
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * This file is licensed under GPLv2.
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/module.h>
-#include <linux/printk.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/platform_device.h>
-#include <linux/interrupt.h>
-#include <linux/slab.h>
-#include <linux/io.h>
-#include <linux/dma-mapping.h>
-#include <linux/sched.h>
-#include <linux/kthread.h>
-
-#include <mostcore.h>
-#include <networking.h>
-#include "dim2_hal.h"
-#include "dim2_hdm.h"
-#include "dim2_errors.h"
-#include "dim2_sysfs.h"
-
-#define DMA_CHANNELS (32 - 1) /* channel 0 is a system channel */
-
-#define MAX_BUFFERS_PACKET 32
-#define MAX_BUFFERS_STREAMING 32
-#define MAX_BUF_SIZE_PACKET 2048
-#define MAX_BUF_SIZE_STREAMING (8*1024)
-
-/* command line parameter to select clock speed */
-static char *clock_speed;
-module_param(clock_speed, charp, 0);
-MODULE_PARM_DESC(clock_speed, "MediaLB Clock Speed");
-
-/*
- * #############################################################################
- *
- * The define below activates an utility function used by HAL-simu
- * for calling DIM interrupt handler.
- * It is used only for TEST PURPOSE and shall be commented before release.
- *
- * #############################################################################
- */
-/* #define ENABLE_HDM_TEST */
-
-static DEFINE_SPINLOCK(dim_lock);
-
-static void dim2_tasklet_fn(unsigned long data);
-static DECLARE_TASKLET(dim2_tasklet, dim2_tasklet_fn, 0);
-
-/**
- * struct hdm_channel - private structure to keep channel specific data
- * @is_initialized: identifier to know whether the channel is initialized
- * @ch: HAL specific channel data
- * @pending_list: list to keep MBO's before starting transfer
- * @started_list: list to keep MBO's after starting transfer
- * @direction: channel direction (TX or RX)
- * @data_type: channel data type
- */
-struct hdm_channel {
- char name[sizeof "caNNN"];
- bool is_initialized;
- struct dim_channel ch;
- struct list_head pending_list; /* before DIM_EnqueueBuffer() */
- struct list_head started_list; /* after DIM_EnqueueBuffer() */
- enum most_channel_direction direction;
- enum most_channel_data_type data_type;
-};
-
-/**
- * struct dim2_hdm - private structure to keep interface specific data
- * @hch: an array of channel specific data
- * @most_iface: most interface structure
- * @capabilities: an array of channel capability data
- * @io_base: I/O register base address
- * @irq_ahb0: dim2 AHB0 irq number
- * @clk_speed: user selectable (through command line parameter) clock speed
- * @netinfo_task: thread to deliver network status
- * @netinfo_waitq: waitq for the thread to sleep
- * @deliver_netinfo: to identify whether network status received
- * @mac_addrs: INIC mac address
- * @link_state: network link state
- * @atx_idx: index of async tx channel
- */
-struct dim2_hdm {
- struct hdm_channel hch[DMA_CHANNELS];
- struct most_channel_capability capabilities[DMA_CHANNELS];
- struct most_interface most_iface;
- char name[16 + sizeof "dim2-"];
- void *io_base;
- unsigned int irq_ahb0;
- int clk_speed;
- struct task_struct *netinfo_task;
- wait_queue_head_t netinfo_waitq;
- int deliver_netinfo;
- unsigned char mac_addrs[6];
- unsigned char link_state;
- int atx_idx;
- struct medialb_bus bus;
-};
-
-#define iface_to_hdm(iface) container_of(iface, struct dim2_hdm, most_iface)
-
-/* Macro to identify a network status message */
-#define PACKET_IS_NET_INFO(p) \
- (((p)[1] == 0x18) && ((p)[2] == 0x05) && ((p)[3] == 0x0C) && \
- ((p)[13] == 0x3C) && ((p)[14] == 0x00) && ((p)[15] == 0x0A))
-
-#if defined(ENABLE_HDM_TEST)
-static struct dim2_hdm *test_dev;
-#endif
-
-bool dim2_sysfs_get_state_cb(void)
-{
- bool state;
- unsigned long flags;
-
- spin_lock_irqsave(&dim_lock, flags);
- state = DIM_GetLockState();
- spin_unlock_irqrestore(&dim_lock, flags);
-
- return state;
-}
-
-/**
- * DIMCB_IoRead - callback from HAL to read an I/O register
- * @ptr32: register address
- */
-u32 DIMCB_IoRead(u32 *ptr32)
-{
- return __raw_readl(ptr32);
-}
-
-/**
- * DIMCB_IoWrite - callback from HAL to write value to an I/O register
- * @ptr32: register address
- * @value: value to write
- */
-void DIMCB_IoWrite(u32 *ptr32, u32 value)
-{
- __raw_writel(value, ptr32);
-}
-
-/**
- * DIMCB_OnError - callback from HAL to report miscommunication between
- * HDM and HAL
- * @error_id: Error ID
- * @error_message: Error message. Some text in a free format
- */
-void DIMCB_OnError(u8 error_id, const char *error_message)
-{
- pr_err("DIMCB_OnError: error_id - %d, error_message - %s\n", error_id,
- error_message);
-}
-
-/**
- * startup_dim - initialize the dim2 interface
- * @pdev: platform device
- *
- * Get the value of command line parameter "clock_speed" if given or use the
- * default value, enable the clock and PLL, and initialize the dim2 interface.
- */
-static int startup_dim(struct platform_device *pdev)
-{
- struct dim2_hdm *dev = platform_get_drvdata(pdev);
- struct dim2_platform_data *pdata = pdev->dev.platform_data;
- u8 hal_ret;
-
- dev->clk_speed = -1;
-
- if (clock_speed) {
- if (!strcmp(clock_speed, "256fs"))
- dev->clk_speed = CLK_256FS;
- else if (!strcmp(clock_speed, "512fs"))
- dev->clk_speed = CLK_512FS;
- else if (!strcmp(clock_speed, "1024fs"))
- dev->clk_speed = CLK_1024FS;
- else if (!strcmp(clock_speed, "2048fs"))
- dev->clk_speed = CLK_2048FS;
- else if (!strcmp(clock_speed, "3072fs"))
- dev->clk_speed = CLK_3072FS;
- else if (!strcmp(clock_speed, "4096fs"))
- dev->clk_speed = CLK_4096FS;
- else if (!strcmp(clock_speed, "6144fs"))
- dev->clk_speed = CLK_6144FS;
- else if (!strcmp(clock_speed, "8192fs"))
- dev->clk_speed = CLK_8192FS;
- }
-
- if (dev->clk_speed == -1) {
- pr_info("Bad or missing clock speed parameter,"
- " using default value: 3072fs\n");
- dev->clk_speed = CLK_3072FS;
- } else
- pr_info("Selected clock speed: %s\n", clock_speed);
-
- if (pdata && pdata->init) {
- int ret = pdata->init(pdata, dev->io_base, dev->clk_speed);
-
- if (ret)
- return ret;
- }
-
- hal_ret = DIM_Startup(dev->io_base, dev->clk_speed);
- if (hal_ret != DIM_NO_ERROR) {
- pr_err("DIM_Startup failed: %d\n", hal_ret);
- if (pdata && pdata->destroy)
- pdata->destroy(pdata);
- return -ENODEV;
- }
-
- return 0;
-}
-
-/**
- * try_start_dim_transfer - try to transfer a buffer on a channel
- * @hdm_ch: channel specific data
- *
- * Transfer a buffer from pending_list if the channel is ready
- */
-static int try_start_dim_transfer(struct hdm_channel *hdm_ch)
-{
- u16 buf_size;
- struct list_head *head = &hdm_ch->pending_list;
- struct mbo *mbo;
- unsigned long flags;
- struct dim_ch_state_t st;
-
- BUG_ON(!hdm_ch);
- BUG_ON(!hdm_ch->is_initialized);
-
- spin_lock_irqsave(&dim_lock, flags);
- if (list_empty(head)) {
- spin_unlock_irqrestore(&dim_lock, flags);
- return -EAGAIN;
- }
-
- if (!DIM_GetChannelState(&hdm_ch->ch, &st)->ready) {
- spin_unlock_irqrestore(&dim_lock, flags);
- return -EAGAIN;
- }
-
- mbo = list_entry(head->next, struct mbo, list);
- buf_size = mbo->buffer_length;
-
- BUG_ON(mbo->bus_address == 0);
- if (!DIM_EnqueueBuffer(&hdm_ch->ch, mbo->bus_address, buf_size)) {
- list_del(head->next);
- spin_unlock_irqrestore(&dim_lock, flags);
- mbo->processed_length = 0;
- mbo->status = MBO_E_INVAL;
- mbo->complete(mbo);
- return -EFAULT;
- }
-
- list_move_tail(head->next, &hdm_ch->started_list);
- spin_unlock_irqrestore(&dim_lock, flags);
-
- return 0;
-}
-
-/**
- * deliver_netinfo_thread - thread to deliver network status to mostcore
- * @data: private data
- *
- * Wait for network status and deliver it to mostcore once it is received
- */
-static int deliver_netinfo_thread(void *data)
-{
- struct dim2_hdm *dev = (struct dim2_hdm *)data;
-
- while (!kthread_should_stop()) {
- wait_event_interruptible(dev->netinfo_waitq,
- dev->deliver_netinfo ||
- kthread_should_stop());
-
- if (dev->deliver_netinfo) {
- dev->deliver_netinfo--;
- most_deliver_netinfo(&dev->most_iface, dev->link_state,
- dev->mac_addrs);
- }
- }
-
- return 0;
-}
-
-/**
- * retrieve_netinfo - retrieve network status from received buffer
- * @dev: private data
- * @mbo: received MBO
- *
- * Parse the message in buffer and get node address, link state, MAC address.
- * Wake up a thread to deliver this status to mostcore
- */
-static void retrieve_netinfo(struct dim2_hdm *dev, struct mbo *mbo)
-{
- u8 *data = mbo->virt_address;
- u8 *mac = dev->mac_addrs;
-
- pr_info("Node Address: 0x%03x\n", (u16)data[16] << 8 | data[17]);
- dev->link_state = data[18];
- pr_info("NIState: %d\n", dev->link_state);
- memcpy(mac, data + 19, 6);
- pr_info("MAC address: %02X:%02X:%02X:%02X:%02X:%02X\n",
- mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
- dev->deliver_netinfo++;
- wake_up_interruptible(&dev->netinfo_waitq);
-}
-
-/**
- * service_done_flag - handle completed buffers
- * @dev: private data
- * @ch_idx: channel index
- *
- * Return back the completed buffers to mostcore, using completion callback
- */
-static void service_done_flag(struct dim2_hdm *dev, int ch_idx)
-{
- struct hdm_channel *hdm_ch = dev->hch + ch_idx;
- struct dim_ch_state_t st;
- struct list_head *head;
- struct mbo *mbo;
- int done_buffers;
- unsigned long flags;
- u8 *data;
-
- BUG_ON(!hdm_ch);
- BUG_ON(!hdm_ch->is_initialized);
-
- spin_lock_irqsave(&dim_lock, flags);
-
- done_buffers = DIM_GetChannelState(&hdm_ch->ch, &st)->done_buffers;
- if (!done_buffers) {
- spin_unlock_irqrestore(&dim_lock, flags);
- return;
- }
-
- if (!DIM_DetachBuffers(&hdm_ch->ch, done_buffers)) {
- spin_unlock_irqrestore(&dim_lock, flags);
- return;
- }
- spin_unlock_irqrestore(&dim_lock, flags);
-
- head = &hdm_ch->started_list;
-
- while (done_buffers) {
- spin_lock_irqsave(&dim_lock, flags);
- if (list_empty(head)) {
- spin_unlock_irqrestore(&dim_lock, flags);
- pr_crit("hard error: started_mbo list is empty "
- "whereas DIM2 has sent buffers\n");
- break;
- }
-
- mbo = list_entry(head->next, struct mbo, list);
- list_del(head->next);
- spin_unlock_irqrestore(&dim_lock, flags);
-
- data = mbo->virt_address;
-
- if (hdm_ch->data_type == MOST_CH_ASYNC &&
- hdm_ch->direction == MOST_CH_RX &&
- PACKET_IS_NET_INFO(data)) {
-
- retrieve_netinfo(dev, mbo);
-
- spin_lock_irqsave(&dim_lock, flags);
- list_add_tail(&mbo->list, &hdm_ch->pending_list);
- spin_unlock_irqrestore(&dim_lock, flags);
- } else {
- if (hdm_ch->data_type == MOST_CH_CONTROL ||
- hdm_ch->data_type == MOST_CH_ASYNC) {
-
- u32 const data_size =
- (u32)data[0] * 256 + data[1] + 2;
-
- mbo->processed_length =
- min(data_size, (u32)mbo->buffer_length);
- } else {
- mbo->processed_length = mbo->buffer_length;
- }
- mbo->status = MBO_SUCCESS;
- mbo->complete(mbo);
- }
-
- done_buffers--;
- }
-}
-
-static struct dim_channel **get_active_channels(struct dim2_hdm *dev,
- struct dim_channel **buffer)
-{
- int idx = 0;
- int ch_idx;
-
- for (ch_idx = 0; ch_idx < DMA_CHANNELS; ch_idx++) {
- if (dev->hch[ch_idx].is_initialized)
- buffer[idx++] = &dev->hch[ch_idx].ch;
- }
- buffer[idx++] = NULL;
-
- return buffer;
-}
-
-/**
- * dim2_tasklet_fn - tasklet function
- * @data: private data
- *
- * Service each initialized channel, if needed
- */
-static void dim2_tasklet_fn(unsigned long data)
-{
- struct dim2_hdm *dev = (struct dim2_hdm *)data;
- unsigned long flags;
- int ch_idx;
-
- for (ch_idx = 0; ch_idx < DMA_CHANNELS; ch_idx++) {
- if (!dev->hch[ch_idx].is_initialized)
- continue;
-
- spin_lock_irqsave(&dim_lock, flags);
- DIM_ServiceChannel(&(dev->hch[ch_idx].ch));
- spin_unlock_irqrestore(&dim_lock, flags);
-
- service_done_flag(dev, ch_idx);
- while (!try_start_dim_transfer(dev->hch + ch_idx))
- continue;
- }
-}
-
-/**
- * dim2_ahb_isr - interrupt service routine
- * @irq: irq number
- * @_dev: private data
- *
- * Acknowledge the interrupt and schedule a tasklet to service channels.
- * Return IRQ_HANDLED.
- */
-static irqreturn_t dim2_ahb_isr(int irq, void *_dev)
-{
- struct dim2_hdm *dev = (struct dim2_hdm *)_dev;
- struct dim_channel *buffer[DMA_CHANNELS + 1];
- unsigned long flags;
-
- spin_lock_irqsave(&dim_lock, flags);
- DIM_ServiceIrq(get_active_channels(dev, buffer));
- spin_unlock_irqrestore(&dim_lock, flags);
-
-#if !defined(ENABLE_HDM_TEST)
- dim2_tasklet.data = (unsigned long)dev;
- tasklet_schedule(&dim2_tasklet);
-#else
- dim2_tasklet_fn((unsigned long)dev);
-#endif
- return IRQ_HANDLED;
-}
-
-#if defined(ENABLE_HDM_TEST)
-
-/*
- * Utility function used by HAL-simu for calling DIM interrupt handler.
- * It is used only for TEST PURPOSE.
- */
-void raise_dim_interrupt(void)
-{
- (void)dim2_ahb_isr(0, test_dev);
-}
-#endif
-
-/**
- * complete_all_mbos - complete MBO's in a list
- * @head: list head
- *
- * Delete all the entries in list and return back MBO's to mostcore using
- * completion call back.
- */
-static void complete_all_mbos(struct list_head *head)
-{
- unsigned long flags;
- struct mbo *mbo;
-
- for (;;) {
- spin_lock_irqsave(&dim_lock, flags);
- if (list_empty(head)) {
- spin_unlock_irqrestore(&dim_lock, flags);
- break;
- }
-
- mbo = list_entry(head->next, struct mbo, list);
- list_del(head->next);
- spin_unlock_irqrestore(&dim_lock, flags);
-
- mbo->processed_length = 0;
- mbo->status = MBO_E_CLOSE;
- mbo->complete(mbo);
- }
-}
-
-/**
- * configure_channel - initialize a channel
- * @iface: interface the channel belongs to
- * @channel: channel to be configured
- * @channel_config: structure that holds the configuration information
- *
- * Receives configuration information from mostcore and initialize
- * the corresponding channel. Return 0 on success, negative on failure.
- */
-static int configure_channel(struct most_interface *most_iface, int ch_idx,
- struct most_channel_config *ccfg)
-{
- struct dim2_hdm *dev = iface_to_hdm(most_iface);
- bool const is_tx = ccfg->direction == MOST_CH_TX;
- u16 const sub_size = ccfg->subbuffer_size;
- u16 const buf_size = ccfg->buffer_size;
- u16 new_size;
- unsigned long flags;
- u8 hal_ret;
- int const ch_addr = ch_idx * 2 + 2;
- struct hdm_channel *const hdm_ch = dev->hch + ch_idx;
-
- BUG_ON(ch_idx < 0 || ch_idx >= DMA_CHANNELS);
-
- if (hdm_ch->is_initialized)
- return -EPERM;
-
- switch (ccfg->data_type) {
- case MOST_CH_CONTROL:
- new_size = DIM_NormCtrlAsyncBufferSize(buf_size);
- if (new_size == 0) {
- pr_err("%s: too small buffer size\n", hdm_ch->name);
- return -EINVAL;
- }
- ccfg->buffer_size = new_size;
- if (new_size != buf_size)
- pr_warn("%s: fixed buffer size (%d -> %d)\n",
- hdm_ch->name, buf_size, new_size);
- spin_lock_irqsave(&dim_lock, flags);
- hal_ret = DIM_InitControl(&hdm_ch->ch, is_tx, ch_addr, buf_size);
- break;
- case MOST_CH_ASYNC:
- new_size = DIM_NormCtrlAsyncBufferSize(buf_size);
- if (new_size == 0) {
- pr_err("%s: too small buffer size\n", hdm_ch->name);
- return -EINVAL;
- }
- ccfg->buffer_size = new_size;
- if (new_size != buf_size)
- pr_warn("%s: fixed buffer size (%d -> %d)\n",
- hdm_ch->name, buf_size, new_size);
- spin_lock_irqsave(&dim_lock, flags);
- hal_ret = DIM_InitAsync(&hdm_ch->ch, is_tx, ch_addr, buf_size);
- break;
- case MOST_CH_ISOC_AVP:
- new_size = DIM_NormIsocBufferSize(buf_size, sub_size);
- if (new_size == 0) {
- pr_err("%s: invalid sub-buffer size or "
- "too small buffer size\n", hdm_ch->name);
- return -EINVAL;
- }
- ccfg->buffer_size = new_size;
- if (new_size != buf_size)
- pr_warn("%s: fixed buffer size (%d -> %d)\n",
- hdm_ch->name, buf_size, new_size);
- spin_lock_irqsave(&dim_lock, flags);
- hal_ret = DIM_InitIsoc(&hdm_ch->ch, is_tx, ch_addr, sub_size);
- break;
- case MOST_CH_SYNC:
- new_size = DIM_NormSyncBufferSize(buf_size, sub_size);
- if (new_size == 0) {
- pr_err("%s: invalid sub-buffer size or "
- "too small buffer size\n", hdm_ch->name);
- return -EINVAL;
- }
- ccfg->buffer_size = new_size;
- if (new_size != buf_size)
- pr_warn("%s: fixed buffer size (%d -> %d)\n",
- hdm_ch->name, buf_size, new_size);
- spin_lock_irqsave(&dim_lock, flags);
- hal_ret = DIM_InitSync(&hdm_ch->ch, is_tx, ch_addr, sub_size);
- break;
- default:
- pr_err("%s: configure failed, bad channel type: %d\n",
- hdm_ch->name, ccfg->data_type);
- return -EINVAL;
- }
-
- if (hal_ret != DIM_NO_ERROR) {
- spin_unlock_irqrestore(&dim_lock, flags);
- pr_err("%s: configure failed (%d), type: %d, is_tx: %d\n",
- hdm_ch->name, hal_ret, ccfg->data_type, (int)is_tx);
- return -ENODEV;
- }
-
- hdm_ch->data_type = ccfg->data_type;
- hdm_ch->direction = ccfg->direction;
- hdm_ch->is_initialized = true;
-
- if (hdm_ch->data_type == MOST_CH_ASYNC &&
- hdm_ch->direction == MOST_CH_TX &&
- dev->atx_idx < 0)
- dev->atx_idx = ch_idx;
-
- spin_unlock_irqrestore(&dim_lock, flags);
-
- return 0;
-}
-
-/**
- * enqueue - enqueue a buffer for data transfer
- * @iface: intended interface
- * @channel: ID of the channel the buffer is intended for
- * @mbo: pointer to the buffer object
- *
- * Push the buffer into pending_list and try to transfer one buffer from
- * pending_list. Return 0 on success, negative on failure.
- */
-static int enqueue(struct most_interface *most_iface, int ch_idx,
- struct mbo *mbo)
-{
- struct dim2_hdm *dev = iface_to_hdm(most_iface);
- struct hdm_channel *hdm_ch = dev->hch + ch_idx;
- unsigned long flags;
-
- BUG_ON(ch_idx < 0 || ch_idx >= DMA_CHANNELS);
-
- if (!hdm_ch->is_initialized)
- return -EPERM;
-
- if (mbo->bus_address == 0)
- return -EFAULT;
-
- spin_lock_irqsave(&dim_lock, flags);
- list_add_tail(&mbo->list, &hdm_ch->pending_list);
- spin_unlock_irqrestore(&dim_lock, flags);
-
- (void)try_start_dim_transfer(hdm_ch);
-
- return 0;
-}
-
-/**
- * request_netinfo - triggers retrieving of network info
- * @iface: pointer to the interface
- * @channel_id: corresponding channel ID
- *
- * Send a command to INIC which triggers retrieving of network info by means of
- * "Message exchange over MDP/MEP". Return 0 on success, negative on failure.
- */
-static void request_netinfo(struct most_interface *most_iface, int ch_idx)
-{
- struct dim2_hdm *dev = iface_to_hdm(most_iface);
- struct mbo *mbo;
- u8 *data;
-
- if (dev->atx_idx < 0) {
- pr_err("Async Tx Not initialized\n");
- return;
- }
-
- mbo = most_get_mbo(&dev->most_iface, dev->atx_idx, NULL);
- if (!mbo)
- return;
-
- mbo->buffer_length = 5;
-
- data = mbo->virt_address;
-
- data[0] = 0x00; /* PML High byte */
- data[1] = 0x03; /* PML Low byte */
- data[2] = 0x02; /* PMHL */
- data[3] = 0x08; /* FPH */
- data[4] = 0x40; /* FMF (FIFO cmd msg - Triggers NAOverMDP) */
-
- most_submit_mbo(mbo);
-}
-
-/**
- * poison_channel - poison buffers of a channel
- * @iface: pointer to the interface the channel to be poisoned belongs to
- * @channel_id: corresponding channel ID
- *
- * Destroy a channel and complete all the buffers in both started_list &
- * pending_list. Return 0 on success, negative on failure.
- */
-static int poison_channel(struct most_interface *most_iface, int ch_idx)
-{
- struct dim2_hdm *dev = iface_to_hdm(most_iface);
- struct hdm_channel *hdm_ch = dev->hch + ch_idx;
- unsigned long flags;
- u8 hal_ret;
- int ret = 0;
-
- BUG_ON(ch_idx < 0 || ch_idx >= DMA_CHANNELS);
-
- if (!hdm_ch->is_initialized)
- return -EPERM;
-
- spin_lock_irqsave(&dim_lock, flags);
- hal_ret = DIM_DestroyChannel(&hdm_ch->ch);
- hdm_ch->is_initialized = false;
- if (ch_idx == dev->atx_idx)
- dev->atx_idx = -1;
- spin_unlock_irqrestore(&dim_lock, flags);
- if (hal_ret != DIM_NO_ERROR) {
- pr_err("HAL Failed to close channel %s\n", hdm_ch->name);
- ret = -EFAULT;
- }
-
- complete_all_mbos(&hdm_ch->started_list);
- complete_all_mbos(&hdm_ch->pending_list);
-
- return ret;
-}
-
-/*
- * dim2_probe - dim2 probe handler
- * @pdev: platform device structure
- *
- * Register the dim2 interface with mostcore and initialize it.
- * Return 0 on success, negative on failure.
- */
-static int dim2_probe(struct platform_device *pdev)
-{
- struct dim2_hdm *dev;
- struct resource *res;
- int ret, i;
- struct kobject *kobj;
-
- dev = kzalloc(sizeof(*dev), GFP_KERNEL);
- if (!dev)
- return -ENOMEM;
-
- dev->atx_idx = -1;
-
- platform_set_drvdata(pdev, dev);
-#if defined(ENABLE_HDM_TEST)
- test_dev = dev;
-#else
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- pr_err("no memory region defined\n");
- ret = -ENOENT;
- goto err_free_dev;
- }
-
- if (!request_mem_region(res->start, resource_size(res), pdev->name)) {
- pr_err("failed to request mem region\n");
- ret = -EBUSY;
- goto err_free_dev;
- }
-
- dev->io_base = ioremap(res->start, resource_size(res));
- if (!dev->io_base) {
- pr_err("failed to ioremap\n");
- ret = -ENOMEM;
- goto err_release_mem;
- }
-
- ret = platform_get_irq(pdev, 0);
- if (ret < 0) {
- pr_err("failed to get irq\n");
- goto err_unmap_io;
- }
- dev->irq_ahb0 = ret;
-
- ret = request_irq(dev->irq_ahb0, dim2_ahb_isr, 0, "mlb_ahb0", dev);
- if (ret) {
- pr_err("failed to request IRQ: %d, err: %d\n", dev->irq_ahb0, ret);
- goto err_unmap_io;
- }
-#endif
- init_waitqueue_head(&dev->netinfo_waitq);
- dev->deliver_netinfo = 0;
- dev->netinfo_task = kthread_run(&deliver_netinfo_thread, (void *)dev,
- "dim2_netinfo");
- if (IS_ERR(dev->netinfo_task)) {
- ret = PTR_ERR(dev->netinfo_task);
- goto err_free_irq;
- }
-
- for (i = 0; i < DMA_CHANNELS; i++) {
- struct most_channel_capability *cap = dev->capabilities + i;
- struct hdm_channel *hdm_ch = dev->hch + i;
-
- INIT_LIST_HEAD(&hdm_ch->pending_list);
- INIT_LIST_HEAD(&hdm_ch->started_list);
- hdm_ch->is_initialized = false;
- snprintf(hdm_ch->name, sizeof(hdm_ch->name), "ca%d", i * 2 + 2);
-
- cap->name_suffix = hdm_ch->name;
- cap->direction = MOST_CH_RX | MOST_CH_TX;
- cap->data_type = MOST_CH_CONTROL | MOST_CH_ASYNC |
- MOST_CH_ISOC_AVP | MOST_CH_SYNC;
- cap->num_buffers_packet = MAX_BUFFERS_PACKET;
- cap->buffer_size_packet = MAX_BUF_SIZE_PACKET;
- cap->num_buffers_streaming = MAX_BUFFERS_STREAMING;
- cap->buffer_size_streaming = MAX_BUF_SIZE_STREAMING;
- }
-
- {
- const char *fmt;
-
- if (sizeof(res->start) == sizeof(long long))
- fmt = "dim2-%016llx";
- else if (sizeof(res->start) == sizeof(long))
- fmt = "dim2-%016lx";
- else
- fmt = "dim2-%016x";
-
- snprintf(dev->name, sizeof(dev->name), fmt, res->start);
- }
-
- dev->most_iface.interface = ITYPE_MEDIALB_DIM2;
- dev->most_iface.description = dev->name;
- dev->most_iface.num_channels = DMA_CHANNELS;
- dev->most_iface.channel_vector = dev->capabilities;
- dev->most_iface.configure = configure_channel;
- dev->most_iface.enqueue = enqueue;
- dev->most_iface.poison_channel = poison_channel;
- dev->most_iface.request_netinfo = request_netinfo;
-
- kobj = most_register_interface(&dev->most_iface);
- if (IS_ERR(kobj)) {
- ret = PTR_ERR(kobj);
- pr_err("failed to register MOST interface\n");
- goto err_stop_thread;
- }
-
- ret = dim2_sysfs_probe(&dev->bus, kobj);
- if (ret)
- goto err_unreg_iface;
-
- ret = startup_dim(pdev);
- if (ret) {
- pr_err("failed to initialize DIM2\n");
- goto err_destroy_bus;
- }
-
- return 0;
-
-err_destroy_bus:
- dim2_sysfs_destroy(&dev->bus);
-err_unreg_iface:
- most_deregister_interface(&dev->most_iface);
-err_stop_thread:
- kthread_stop(dev->netinfo_task);
-err_free_irq:
-#if !defined(ENABLE_HDM_TEST)
- free_irq(dev->irq_ahb0, dev);
-err_unmap_io:
- iounmap(dev->io_base);
-err_release_mem:
- release_mem_region(res->start, resource_size(res));
-err_free_dev:
-#endif
- kfree(dev);
-
- return ret;
-}
-
-/**
- * dim2_remove - dim2 remove handler
- * @pdev: platform device structure
- *
- * Unregister the interface from mostcore
- */
-static int dim2_remove(struct platform_device *pdev)
-{
- struct dim2_hdm *dev = platform_get_drvdata(pdev);
- struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- struct dim2_platform_data *pdata = pdev->dev.platform_data;
- unsigned long flags;
-
- spin_lock_irqsave(&dim_lock, flags);
- DIM_Shutdown();
- spin_unlock_irqrestore(&dim_lock, flags);
-
- if (pdata && pdata->destroy)
- pdata->destroy(pdata);
-
- dim2_sysfs_destroy(&dev->bus);
- most_deregister_interface(&dev->most_iface);
- kthread_stop(dev->netinfo_task);
-#if !defined(ENABLE_HDM_TEST)
- free_irq(dev->irq_ahb0, dev);
- iounmap(dev->io_base);
- release_mem_region(res->start, resource_size(res));
-#endif
- kfree(dev);
- platform_set_drvdata(pdev, NULL);
-
- /*
- * break link to local platform_device_id struct
- * to prevent crash by unload platform device module
- */
- pdev->id_entry = NULL;
-
- return 0;
-}
-
-static struct platform_device_id dim2_id[] = {
- { "medialb_dim2" },
- { }, /* Terminating entry */
-};
-
-MODULE_DEVICE_TABLE(platform, dim2_id);
-
-static struct platform_driver dim2_driver = {
- .probe = dim2_probe,
- .remove = dim2_remove,
- .id_table = dim2_id,
- .driver = {
- .name = "hdm_dim2",
- },
-};
-
-/**
- * dim2_hdm_init - Driver Registration Routine
- */
-static int __init dim2_hdm_init(void)
-{
- pr_info("dim2_hdm_init()\n");
- return platform_driver_register(&dim2_driver);
-}
-
-/**
- * dim2_hdm_exit - Driver Cleanup Routine
- **/
-static void __exit dim2_hdm_exit(void)
-{
- pr_info("dim2_hdm_exit()\n");
- platform_driver_unregister(&dim2_driver);
-}
-
-module_init(dim2_hdm_init);
-module_exit(dim2_hdm_exit);
-
-MODULE_AUTHOR("Jain Roy Ambi <JainRoy.Ambi@microchip.com>");
-MODULE_AUTHOR("Andrey Shvetsov <andrey.shvetsov@k2l.de>");
-MODULE_DESCRIPTION("MediaLB DIM2 Hardware Dependent Module");
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/most/hdm-dim2/dim2_hdm.h b/drivers/staging/most/hdm-dim2/dim2_hdm.h
deleted file mode 100644
index 6e6883232809..000000000000
--- a/drivers/staging/most/hdm-dim2/dim2_hdm.h
+++ /dev/null
@@ -1,26 +0,0 @@
-/*
- * dim2_hdm.h - MediaLB DIM2 HDM Header
- *
- * Copyright (C) 2015, Microchip Technology Germany II GmbH & Co. KG
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * This file is licensed under GPLv2.
- */
-
-#ifndef DIM2_HDM_H
-#define DIM2_HDM_H
-
-struct device;
-
-/* platform dependent data for dim2 interface */
-struct dim2_platform_data {
- int (*init)(struct dim2_platform_data *pd, void *io_base, int clk_speed);
- void (*destroy)(struct dim2_platform_data *pd);
- void *priv;
-};
-
-#endif /* DIM2_HDM_H */
diff --git a/drivers/staging/most/hdm-dim2/dim2_reg.h b/drivers/staging/most/hdm-dim2/dim2_reg.h
deleted file mode 100644
index 476f66f4c566..000000000000
--- a/drivers/staging/most/hdm-dim2/dim2_reg.h
+++ /dev/null
@@ -1,176 +0,0 @@
-/*
- * dim2_reg.h - Definitions for registers of DIM2
- * (MediaLB, Device Interface Macro IP, OS62420)
- *
- * Copyright (C) 2015, Microchip Technology Germany II GmbH & Co. KG
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * This file is licensed under GPLv2.
- */
-
-#ifndef DIM2_OS62420_H
-#define DIM2_OS62420_H
-
-#include <linux/types.h>
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-
-struct dim2_regs {
- /* 0x00 */ u32 MLBC0;
- /* 0x01 */ u32 rsvd0[1];
- /* 0x02 */ u32 MLBPC0;
- /* 0x03 */ u32 MS0;
- /* 0x04 */ u32 rsvd1[1];
- /* 0x05 */ u32 MS1;
- /* 0x06 */ u32 rsvd2[2];
- /* 0x08 */ u32 MSS;
- /* 0x09 */ u32 MSD;
- /* 0x0A */ u32 rsvd3[1];
- /* 0x0B */ u32 MIEN;
- /* 0x0C */ u32 rsvd4[1];
- /* 0x0D */ u32 MLBPC2;
- /* 0x0E */ u32 MLBPC1;
- /* 0x0F */ u32 MLBC1;
- /* 0x10 */ u32 rsvd5[0x10];
- /* 0x20 */ u32 HCTL;
- /* 0x21 */ u32 rsvd6[1];
- /* 0x22 */ u32 HCMR0;
- /* 0x23 */ u32 HCMR1;
- /* 0x24 */ u32 HCER0;
- /* 0x25 */ u32 HCER1;
- /* 0x26 */ u32 HCBR0;
- /* 0x27 */ u32 HCBR1;
- /* 0x28 */ u32 rsvd7[8];
- /* 0x30 */ u32 MDAT0;
- /* 0x31 */ u32 MDAT1;
- /* 0x32 */ u32 MDAT2;
- /* 0x33 */ u32 MDAT3;
- /* 0x34 */ u32 MDWE0;
- /* 0x35 */ u32 MDWE1;
- /* 0x36 */ u32 MDWE2;
- /* 0x37 */ u32 MDWE3;
- /* 0x38 */ u32 MCTL;
- /* 0x39 */ u32 MADR;
- /* 0x3A */ u32 rsvd8[0xB6];
- /* 0xF0 */ u32 ACTL;
- /* 0xF1 */ u32 rsvd9[3];
- /* 0xF4 */ u32 ACSR0;
- /* 0xF5 */ u32 ACSR1;
- /* 0xF6 */ u32 ACMR0;
- /* 0xF7 */ u32 ACMR1;
-};
-
-
-#define DIM2_MASK(n) (~((~(u32)0)<<(n)))
-
-enum {
- MLBC0_MLBLK_BIT = 7,
-
- MLBC0_MLBPEN_BIT = 5,
-
- MLBC0_MLBCLK_SHIFT = 2,
- MLBC0_MLBCLK_VAL_256FS = 0,
- MLBC0_MLBCLK_VAL_512FS = 1,
- MLBC0_MLBCLK_VAL_1024FS = 2,
- MLBC0_MLBCLK_VAL_2048FS = 3,
-
- MLBC0_FCNT_SHIFT = 15,
- MLBC0_FCNT_MASK = 7,
- MLBC0_FCNT_VAL_1FPSB = 0,
- MLBC0_FCNT_VAL_2FPSB = 1,
- MLBC0_FCNT_VAL_4FPSB = 2,
- MLBC0_FCNT_VAL_8FPSB = 3,
- MLBC0_FCNT_VAL_16FPSB = 4,
- MLBC0_FCNT_VAL_32FPSB = 5,
- MLBC0_FCNT_VAL_64FPSB = 6,
-
- MLBC0_MLBEN_BIT = 0,
-
- MIEN_CTX_BREAK_BIT = 29,
- MIEN_CTX_PE_BIT = 28,
- MIEN_CTX_DONE_BIT = 27,
-
- MIEN_CRX_BREAK_BIT = 26,
- MIEN_CRX_PE_BIT = 25,
- MIEN_CRX_DONE_BIT = 24,
-
- MIEN_ATX_BREAK_BIT = 22,
- MIEN_ATX_PE_BIT = 21,
- MIEN_ATX_DONE_BIT = 20,
-
- MIEN_ARX_BREAK_BIT = 19,
- MIEN_ARX_PE_BIT = 18,
- MIEN_ARX_DONE_BIT = 17,
-
- MIEN_SYNC_PE_BIT = 16,
-
- MIEN_ISOC_BUFO_BIT = 1,
- MIEN_ISOC_PE_BIT = 0,
-
- MLBC1_NDA_SHIFT = 8,
- MLBC1_NDA_MASK = 0xFF,
-
- MLBC1_CLKMERR_BIT = 7,
- MLBC1_LOCKERR_BIT = 6,
-
- ACTL_DMA_MODE_BIT = 2,
- ACTL_DMA_MODE_VAL_DMA_MODE_0 = 0,
- ACTL_DMA_MODE_VAL_DMA_MODE_1 = 1,
- ACTL_SCE_BIT = 0,
-
- HCTL_EN_BIT = 15
-};
-
-enum {
- CDT1_BS_ISOC_SHIFT = 0,
- CDT1_BS_ISOC_MASK = DIM2_MASK(9),
-
- CDT3_BD_SHIFT = 0,
- CDT3_BD_MASK = DIM2_MASK(12),
- CDT3_BD_ISOC_MASK = DIM2_MASK(13),
- CDT3_BA_SHIFT = 16,
-
- ADT0_CE_BIT = 15,
- ADT0_LE_BIT = 14,
- ADT0_PG_BIT = 13,
-
- ADT1_RDY_BIT = 15,
- ADT1_DNE_BIT = 14,
- ADT1_ERR_BIT = 13,
- ADT1_PS_BIT = 12,
- ADT1_MEP_BIT = 11,
- ADT1_BD_SHIFT = 0,
- ADT1_CTRL_ASYNC_BD_MASK = DIM2_MASK(11),
- ADT1_ISOC_SYNC_BD_MASK = DIM2_MASK(13),
-
- CAT_MFE_BIT = 14,
-
- CAT_MT_BIT = 13,
-
- CAT_RNW_BIT = 12,
-
- CAT_CE_BIT = 11,
-
- CAT_CT_SHIFT = 8,
- CAT_CT_VAL_SYNC = 0,
- CAT_CT_VAL_CONTROL = 1,
- CAT_CT_VAL_ASYNC = 2,
- CAT_CT_VAL_ISOC = 3,
-
- CAT_CL_SHIFT = 0,
- CAT_CL_MASK = DIM2_MASK(6)
-};
-
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* DIM2_OS62420_H */
diff --git a/drivers/staging/most/hdm-dim2/dim2_sysfs.c b/drivers/staging/most/hdm-dim2/dim2_sysfs.c
deleted file mode 100644
index 8e331a286fc3..000000000000
--- a/drivers/staging/most/hdm-dim2/dim2_sysfs.c
+++ /dev/null
@@ -1,116 +0,0 @@
-/*
- * dim2_sysfs.c - MediaLB sysfs information
- *
- * Copyright (C) 2015, Microchip Technology Germany II GmbH & Co. KG
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * This file is licensed under GPLv2.
- */
-
-/* Author: Andrey Shvetsov <andrey.shvetsov@k2l.de> */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/kernel.h>
-#include "dim2_sysfs.h"
-
-struct bus_attr {
- struct attribute attr;
- ssize_t (*show)(struct medialb_bus *bus, char *buf);
- ssize_t (*store)(struct medialb_bus *bus, const char *buf, size_t count);
-};
-
-static ssize_t state_show(struct medialb_bus *bus, char *buf)
-{
- bool state = dim2_sysfs_get_state_cb();
-
- return sprintf(buf, "%s\n", state ? "locked" : "");
-}
-
-static struct bus_attr state_attr = __ATTR_RO(state);
-
-static struct attribute *bus_default_attrs[] = {
- &state_attr.attr,
- NULL,
-};
-
-static struct attribute_group bus_attr_group = {
- .attrs = bus_default_attrs,
-};
-
-static void bus_kobj_release(struct kobject *kobj)
-{
-}
-
-static ssize_t bus_kobj_attr_show(struct kobject *kobj, struct attribute *attr,
- char *buf)
-{
- struct medialb_bus *bus =
- container_of(kobj, struct medialb_bus, kobj_group);
- struct bus_attr *xattr = container_of(attr, struct bus_attr, attr);
-
- if (!xattr->show)
- return -EIO;
-
- return xattr->show(bus, buf);
-}
-
-static ssize_t bus_kobj_attr_store(struct kobject *kobj, struct attribute *attr,
- const char *buf, size_t count)
-{
- ssize_t ret;
- struct medialb_bus *bus =
- container_of(kobj, struct medialb_bus, kobj_group);
- struct bus_attr *xattr = container_of(attr, struct bus_attr, attr);
-
- if (!xattr->store)
- return -EIO;
-
- ret = xattr->store(bus, buf, count);
- return ret;
-}
-
-static struct sysfs_ops const bus_kobj_sysfs_ops = {
- .show = bus_kobj_attr_show,
- .store = bus_kobj_attr_store,
-};
-
-static struct kobj_type bus_ktype = {
- .release = bus_kobj_release,
- .sysfs_ops = &bus_kobj_sysfs_ops,
-};
-
-int dim2_sysfs_probe(struct medialb_bus *bus, struct kobject *parent_kobj)
-{
- int err;
-
- kobject_init(&bus->kobj_group, &bus_ktype);
- err = kobject_add(&bus->kobj_group, parent_kobj, "bus");
- if (err) {
- pr_err("kobject_add() failed: %d\n", err);
- goto err_kobject_add;
- }
-
- err = sysfs_create_group(&bus->kobj_group, &bus_attr_group);
- if (err) {
- pr_err("sysfs_create_group() failed: %d\n", err);
- goto err_create_group;
- }
-
- return 0;
-
-err_create_group:
- kobject_put(&bus->kobj_group);
-
-err_kobject_add:
- return err;
-}
-
-void dim2_sysfs_destroy(struct medialb_bus *bus)
-{
- kobject_put(&bus->kobj_group);
-}
diff --git a/drivers/staging/most/hdm-dim2/dim2_sysfs.h b/drivers/staging/most/hdm-dim2/dim2_sysfs.h
deleted file mode 100644
index e719691035b0..000000000000
--- a/drivers/staging/most/hdm-dim2/dim2_sysfs.h
+++ /dev/null
@@ -1,39 +0,0 @@
-/*
- * dim2_sysfs.h - MediaLB sysfs information
- *
- * Copyright (C) 2015, Microchip Technology Germany II GmbH & Co. KG
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * This file is licensed under GPLv2.
- */
-
-/* Author: Andrey Shvetsov <andrey.shvetsov@k2l.de> */
-
-#ifndef DIM2_SYSFS_H
-#define DIM2_SYSFS_H
-
-
-#include <linux/kobject.h>
-
-
-struct medialb_bus {
- struct kobject kobj_group;
-};
-
-struct dim2_hdm;
-
-int dim2_sysfs_probe(struct medialb_bus *bus, struct kobject *parent_kobj);
-void dim2_sysfs_destroy(struct medialb_bus *bus);
-
-/*
- * callback,
- * must deliver MediaLB state as true if locked or false if unlocked
- */
-bool dim2_sysfs_get_state_cb(void);
-
-
-#endif /* DIM2_SYSFS_H */
diff --git a/drivers/staging/most/hdm-i2c/Kconfig b/drivers/staging/most/hdm-i2c/Kconfig
deleted file mode 100644
index 6fd7983668ad..000000000000
--- a/drivers/staging/most/hdm-i2c/Kconfig
+++ /dev/null
@@ -1,12 +0,0 @@
-#
-# MOST I2C configuration
-#
-
-config HDM_I2C
- tristate "I2C HDM"
- depends on I2C
- ---help---
- Say Y here if you want to connect via I2C to network tranceiver.
-
- To compile this driver as a module, choose M here: the
- module will be called hdm_i2c.
diff --git a/drivers/staging/most/hdm-i2c/Makefile b/drivers/staging/most/hdm-i2c/Makefile
deleted file mode 100644
index 03a4a59b1f9f..000000000000
--- a/drivers/staging/most/hdm-i2c/Makefile
+++ /dev/null
@@ -1,3 +0,0 @@
-obj-$(CONFIG_HDM_I2C) += hdm_i2c.o
-
-ccflags-y += -Idrivers/staging/most/mostcore/
diff --git a/drivers/staging/most/hdm-i2c/hdm_i2c.c b/drivers/staging/most/hdm-i2c/hdm_i2c.c
deleted file mode 100644
index cffea40d09d0..000000000000
--- a/drivers/staging/most/hdm-i2c/hdm_i2c.c
+++ /dev/null
@@ -1,431 +0,0 @@
-/*
- * hdm_i2c.c - Hardware Dependent Module for I2C Interface
- *
- * Copyright (C) 2013-2015, Microchip Technology Germany II GmbH & Co. KG
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * This file is licensed under GPLv2.
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/i2c.h>
-#include <linux/sched.h>
-#include <linux/interrupt.h>
-#include <linux/err.h>
-
-#include <mostcore.h>
-
-enum { CH_RX, CH_TX, NUM_CHANNELS };
-
-#define MAX_BUFFERS_CONTROL 32
-#define MAX_BUF_SIZE_CONTROL 256
-
-/**
- * list_first_mbo - get the first mbo from a list
- * @ptr: the list head to take the mbo from.
- */
-#define list_first_mbo(ptr) \
- list_first_entry(ptr, struct mbo, list)
-
-
-/* IRQ / Polling option */
-static bool polling_req;
-module_param(polling_req, bool, S_IRUGO);
-MODULE_PARM_DESC(polling_req, "Request Polling. Default = 0 (use irq)");
-
-/* Polling Rate */
-static int scan_rate = 100;
-module_param(scan_rate, int, 0644);
-MODULE_PARM_DESC(scan_rate, "Polling rate in times/sec. Default = 100");
-
-struct hdm_i2c {
- bool is_open[NUM_CHANNELS];
- bool polling_mode;
- struct most_interface most_iface;
- struct most_channel_capability capabilities[NUM_CHANNELS];
- struct i2c_client *client;
- struct rx {
- struct delayed_work dwork;
- wait_queue_head_t waitq;
- struct list_head list;
- struct mutex list_mutex;
- } rx;
- char name[64];
-};
-
-#define to_hdm(iface) container_of(iface, struct hdm_i2c, most_iface)
-
-/**
- * configure_channel - called from MOST core to configure a channel
- * @iface: interface the channel belongs to
- * @channel: channel to be configured
- * @channel_config: structure that holds the configuration information
- *
- * Return 0 on success, negative on failure.
- *
- * Receives configuration information from MOST core and initialize the
- * corresponding channel.
- */
-static int configure_channel(struct most_interface *most_iface,
- int ch_idx,
- struct most_channel_config *channel_config)
-{
- struct hdm_i2c *dev = to_hdm(most_iface);
-
- BUG_ON(ch_idx < 0 || ch_idx >= NUM_CHANNELS);
- BUG_ON(dev->is_open[ch_idx]);
-
- if (channel_config->data_type != MOST_CH_CONTROL) {
- pr_err("bad data type for channel %d\n", ch_idx);
- return -EPERM;
- }
-
- if (channel_config->direction != dev->capabilities[ch_idx].direction) {
- pr_err("bad direction for channel %d\n", ch_idx);
- return -EPERM;
- }
-
- if (channel_config->direction == MOST_CH_RX) {
- if (dev->polling_mode)
- schedule_delayed_work(&dev->rx.dwork,
- msecs_to_jiffies(MSEC_PER_SEC / 4));
- }
- dev->is_open[ch_idx] = true;
-
- return 0;
-}
-
-/**
- * enqueue - called from MOST core to enqueue a buffer for data transfer
- * @iface: intended interface
- * @channel: ID of the channel the buffer is intended for
- * @mbo: pointer to the buffer object
- *
- * Return 0 on success, negative on failure.
- *
- * Transmit the data over I2C if it is a "write" request or push the buffer into
- * list if it is an "read" request
- */
-static int enqueue(struct most_interface *most_iface,
- int ch_idx, struct mbo *mbo)
-{
- struct hdm_i2c *dev = to_hdm(most_iface);
- int ret;
-
- BUG_ON(ch_idx < 0 || ch_idx >= NUM_CHANNELS);
- BUG_ON(!dev->is_open[ch_idx]);
-
- if (ch_idx == CH_RX) {
- /* RX */
- mutex_lock(&dev->rx.list_mutex);
- list_add_tail(&mbo->list, &dev->rx.list);
- mutex_unlock(&dev->rx.list_mutex);
- wake_up_interruptible(&dev->rx.waitq);
- } else {
- /* TX */
- ret = i2c_master_send(dev->client, mbo->virt_address,
- mbo->buffer_length);
- if (ret <= 0) {
- mbo->processed_length = 0;
- mbo->status = MBO_E_INVAL;
- } else {
- mbo->processed_length = mbo->buffer_length;
- mbo->status = MBO_SUCCESS;
- }
- mbo->complete(mbo);
- }
-
- return 0;
-}
-
-/**
- * poison_channel - called from MOST core to poison buffers of a channel
- * @iface: pointer to the interface the channel to be poisoned belongs to
- * @channel_id: corresponding channel ID
- *
- * Return 0 on success, negative on failure.
- *
- * If channel direction is RX, complete the buffers in list with
- * status MBO_E_CLOSE
- */
-static int poison_channel(struct most_interface *most_iface,
- int ch_idx)
-{
- struct hdm_i2c *dev = to_hdm(most_iface);
- struct mbo *mbo;
-
- BUG_ON(ch_idx < 0 || ch_idx >= NUM_CHANNELS);
- BUG_ON(!dev->is_open[ch_idx]);
-
- dev->is_open[ch_idx] = false;
-
- if (ch_idx == CH_RX) {
- mutex_lock(&dev->rx.list_mutex);
- while (!list_empty(&dev->rx.list)) {
- mbo = list_first_mbo(&dev->rx.list);
- list_del(&mbo->list);
- mutex_unlock(&dev->rx.list_mutex);
-
- mbo->processed_length = 0;
- mbo->status = MBO_E_CLOSE;
- mbo->complete(mbo);
-
- mutex_lock(&dev->rx.list_mutex);
- }
- mutex_unlock(&dev->rx.list_mutex);
- wake_up_interruptible(&dev->rx.waitq);
- }
-
- return 0;
-}
-
-static void request_netinfo(struct most_interface *most_iface,
- int ch_idx)
-{
- pr_info("request_netinfo()\n");
-}
-
-static void do_rx_work(struct hdm_i2c *dev)
-{
- struct mbo *mbo;
- unsigned char msg[MAX_BUF_SIZE_CONTROL];
- int ret, ch_idx = CH_RX;
- uint16_t pml, data_size;
-
- /* Read PML (2 bytes) */
- ret = i2c_master_recv(dev->client, msg, 2);
- if (ret <= 0) {
- pr_err("Failed to receive PML\n");
- return;
- }
-
- pml = (msg[0] << 8) | msg[1];
- if (!pml)
- return;
-
- data_size = pml + 2;
-
- /* Read the whole message, including PML */
- ret = i2c_master_recv(dev->client, msg, data_size);
- if (ret <= 0) {
- pr_err("Failed to receive a Port Message\n");
- return;
- }
-
- for (;;) {
- /* Conditions to wait for: poisoned channel or free buffer
- available for reading */
- if (wait_event_interruptible(dev->rx.waitq,
- !dev->is_open[ch_idx] ||
- !list_empty(&dev->rx.list))) {
- pr_err("wait_event_interruptible() failed\n");
- return;
- }
-
- if (!dev->is_open[ch_idx])
- return;
-
- mutex_lock(&dev->rx.list_mutex);
-
- /* list may be empty if poison or remove is called */
- if (!list_empty(&dev->rx.list))
- break;
-
- mutex_unlock(&dev->rx.list_mutex);
- }
-
- mbo = list_first_mbo(&dev->rx.list);
- list_del(&mbo->list);
- mutex_unlock(&dev->rx.list_mutex);
-
- mbo->processed_length = min(data_size, mbo->buffer_length);
- memcpy(mbo->virt_address, msg, mbo->processed_length);
- mbo->status = MBO_SUCCESS;
- mbo->complete(mbo);
-}
-
-/**
- * pending_rx_work - Read pending messages through I2C
- * @work: definition of this work item
- *
- * Invoked by the Interrupt Service Routine, most_irq_handler()
- */
-static void pending_rx_work(struct work_struct *work)
-{
- struct hdm_i2c *dev = container_of(work, struct hdm_i2c, rx.dwork.work);
-
- do_rx_work(dev);
-
- if (dev->polling_mode) {
- if (dev->is_open[CH_RX])
- schedule_delayed_work(&dev->rx.dwork,
- msecs_to_jiffies(MSEC_PER_SEC
- / scan_rate));
- } else
- enable_irq(dev->client->irq);
-}
-
-/*
- * most_irq_handler - Interrupt Service Routine
- * @irq: irq number
- * @_dev: private data
- *
- * Schedules a delayed work
- *
- * By default the interrupt line behavior is Active Low. Once an interrupt is
- * generated by the device, until driver clears the interrupt (by reading
- * the PMP message), device keeps the interrupt line in low state. Since i2c
- * read is done in work queue, the interrupt line must be disabled temporarily
- * to avoid ISR being called repeatedly. Re-enable the interrupt in workqueue,
- * after reading the message.
- *
- * Note: If we use the interrupt line in Falling edge mode, there is a
- * possibility to miss interrupts when ISR is getting executed.
- *
- */
-static irqreturn_t most_irq_handler(int irq, void *_dev)
-{
- struct hdm_i2c *dev = _dev;
-
- disable_irq_nosync(irq);
-
- schedule_delayed_work(&dev->rx.dwork, 0);
-
- return IRQ_HANDLED;
-}
-
-/*
- * i2c_probe - i2c probe handler
- * @client: i2c client device structure
- * @id: i2c client device id
- *
- * Return 0 on success, negative on failure.
- *
- * Register the i2c client device as a MOST interface
- */
-static int i2c_probe(struct i2c_client *client, const struct i2c_device_id *id)
-{
- struct hdm_i2c *dev;
- int ret, i;
- struct kobject *kobj;
-
- dev = kzalloc(sizeof(*dev), GFP_KERNEL);
- if (!dev)
- return -ENOMEM;
-
- /* ID format: i2c-<bus>-<address> */
- snprintf(dev->name, sizeof(dev->name), "i2c-%d-%04x",
- client->adapter->nr, client->addr);
-
- for (i = 0; i < NUM_CHANNELS; i++) {
- dev->is_open[i] = false;
- dev->capabilities[i].data_type = MOST_CH_CONTROL;
- dev->capabilities[i].num_buffers_packet = MAX_BUFFERS_CONTROL;
- dev->capabilities[i].buffer_size_packet = MAX_BUF_SIZE_CONTROL;
- }
- dev->capabilities[CH_RX].direction = MOST_CH_RX;
- dev->capabilities[CH_RX].name_suffix = "rx";
- dev->capabilities[CH_TX].direction = MOST_CH_TX;
- dev->capabilities[CH_TX].name_suffix = "tx";
-
- dev->most_iface.interface = ITYPE_I2C;
- dev->most_iface.description = dev->name;
- dev->most_iface.num_channels = NUM_CHANNELS;
- dev->most_iface.channel_vector = dev->capabilities;
- dev->most_iface.configure = configure_channel;
- dev->most_iface.enqueue = enqueue;
- dev->most_iface.poison_channel = poison_channel;
- dev->most_iface.request_netinfo = request_netinfo;
-
- INIT_LIST_HEAD(&dev->rx.list);
- mutex_init(&dev->rx.list_mutex);
- init_waitqueue_head(&dev->rx.waitq);
-
- INIT_DELAYED_WORK(&dev->rx.dwork, pending_rx_work);
-
- dev->client = client;
- i2c_set_clientdata(client, dev);
-
- kobj = most_register_interface(&dev->most_iface);
- if (IS_ERR(kobj)) {
- pr_err("Failed to register i2c as a MOST interface\n");
- kfree(dev);
- return PTR_ERR(kobj);
- }
-
- dev->polling_mode = polling_req || client->irq <= 0;
- if (!dev->polling_mode) {
- pr_info("Requesting IRQ: %d\n", client->irq);
- ret = request_irq(client->irq, most_irq_handler, 0,
- client->name, dev);
- if (ret) {
- pr_info("IRQ request failed: %d, "
- "falling back to polling\n", ret);
- dev->polling_mode = true;
- }
- }
-
- if (dev->polling_mode)
- pr_info("Using polling at rate: %d times/sec\n", scan_rate);
-
- return 0;
-}
-
-/*
- * i2c_remove - i2c remove handler
- * @client: i2c client device structure
- *
- * Return 0 on success.
- *
- * Unregister the i2c client device as a MOST interface
- */
-static int i2c_remove(struct i2c_client *client)
-{
- struct hdm_i2c *dev = i2c_get_clientdata(client);
- int i;
-
- if (!dev->polling_mode)
- free_irq(client->irq, dev);
-
- most_deregister_interface(&dev->most_iface);
-
- for (i = 0 ; i < NUM_CHANNELS; i++)
- if (dev->is_open[i])
- poison_channel(&dev->most_iface, i);
- cancel_delayed_work_sync(&dev->rx.dwork);
- kfree(dev);
-
- return 0;
-}
-
-static const struct i2c_device_id i2c_id[] = {
- { "most_i2c", 0 },
- { }, /* Terminating entry */
-};
-
-MODULE_DEVICE_TABLE(i2c, i2c_id);
-
-static struct i2c_driver i2c_driver = {
- .driver = {
- .name = "hdm_i2c",
- .owner = THIS_MODULE,
- },
- .probe = i2c_probe,
- .remove = i2c_remove,
- .id_table = i2c_id,
-};
-
-module_i2c_driver(i2c_driver);
-
-MODULE_AUTHOR("Jain Roy Ambi <JainRoy.Ambi@microchip.com>");
-MODULE_AUTHOR("Andrey Shvetsov <andrey.shvetsov@k2l.de>");
-MODULE_DESCRIPTION("I2C Hardware Dependent Module");
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/most/hdm-usb/Kconfig b/drivers/staging/most/hdm-usb/Kconfig
deleted file mode 100644
index ec1546312ee6..000000000000
--- a/drivers/staging/most/hdm-usb/Kconfig
+++ /dev/null
@@ -1,14 +0,0 @@
-#
-# MOST USB configuration
-#
-
-config HDM_USB
- tristate "USB HDM"
- depends on USB && NET
- select AIM_NETWORK
- ---help---
- Say Y here if you want to connect via USB to network tranceiver.
- This device driver depends on the networking AIM.
-
- To compile this driver as a module, choose M here: the
- module will be called hdm_usb.
diff --git a/drivers/staging/most/hdm-usb/Makefile b/drivers/staging/most/hdm-usb/Makefile
deleted file mode 100644
index 6bbacb41e94b..000000000000
--- a/drivers/staging/most/hdm-usb/Makefile
+++ /dev/null
@@ -1,4 +0,0 @@
-obj-$(CONFIG_HDM_USB) += hdm_usb.o
-
-ccflags-y += -Idrivers/staging/most/mostcore/
-ccflags-y += -Idrivers/staging/most/aim-network/
diff --git a/drivers/staging/most/hdm-usb/hdm_usb.c b/drivers/staging/most/hdm-usb/hdm_usb.c
deleted file mode 100644
index fcd75590146f..000000000000
--- a/drivers/staging/most/hdm-usb/hdm_usb.c
+++ /dev/null
@@ -1,1433 +0,0 @@
-/*
- * hdm_usb.c - Hardware dependent module for USB
- *
- * Copyright (C) 2013-2015 Microchip Technology Germany II GmbH & Co. KG
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * This file is licensed under GPLv2.
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-#include <linux/module.h>
-#include <linux/fs.h>
-#include <linux/usb.h>
-#include <linux/slab.h>
-#include <linux/init.h>
-#include <linux/cdev.h>
-#include <linux/device.h>
-#include <linux/list.h>
-#include <linux/completion.h>
-#include <linux/mutex.h>
-#include <linux/spinlock.h>
-#include <linux/interrupt.h>
-#include <linux/workqueue.h>
-#include <linux/sysfs.h>
-#include <linux/dma-mapping.h>
-#include <linux/etherdevice.h>
-#include <linux/uaccess.h>
-#include "mostcore.h"
-#include "networking.h"
-
-#define USB_MTU 512
-#define NO_ISOCHRONOUS_URB 0
-#define AV_PACKETS_PER_XACT 2
-#define BUF_CHAIN_SIZE 0xFFFF
-#define MAX_NUM_ENDPOINTS 30
-#define MAX_SUFFIX_LEN 10
-#define MAX_STRING_LEN 80
-#define MAX_BUF_SIZE 0xFFFF
-#define CEILING(x, y) (((x) + (y) - 1) / (y))
-
-#define USB_VENDOR_ID_SMSC 0x0424 /* VID: SMSC */
-#define USB_DEV_ID_BRDG 0xC001 /* PID: USB Bridge */
-#define USB_DEV_ID_INIC 0xCF18 /* PID: USB INIC */
-#define HW_RESYNC 0x0000
-/* DRCI Addresses */
-#define DRCI_REG_NI_STATE 0x0100
-#define DRCI_REG_PACKET_BW 0x0101
-#define DRCI_REG_NODE_ADDR 0x0102
-#define DRCI_REG_NODE_POS 0x0103
-#define DRCI_REG_MEP_FILTER 0x0140
-#define DRCI_REG_HASH_TBL0 0x0141
-#define DRCI_REG_HASH_TBL1 0x0142
-#define DRCI_REG_HASH_TBL2 0x0143
-#define DRCI_REG_HASH_TBL3 0x0144
-#define DRCI_REG_HW_ADDR_HI 0x0145
-#define DRCI_REG_HW_ADDR_MI 0x0146
-#define DRCI_REG_HW_ADDR_LO 0x0147
-#define DRCI_REG_BASE 0x1100
-#define DRCI_COMMAND 0x02
-#define DRCI_READ_REQ 0xA0
-#define DRCI_WRITE_REQ 0xA1
-
-/**
- * struct buf_anchor - used to create a list of pending URBs
- * @urb: pointer to USB request block
- * @clear_work_obj:
- * @list: linked list
- * @urb_completion:
- */
-struct buf_anchor {
- struct urb *urb;
- struct work_struct clear_work_obj;
- struct list_head list;
- struct completion urb_compl;
-};
-#define to_buf_anchor(w) container_of(w, struct buf_anchor, clear_work_obj)
-
-/**
- * struct most_dci_obj - Direct Communication Interface
- * @kobj:position in sysfs
- * @usb_device: pointer to the usb device
- */
-struct most_dci_obj {
- struct kobject kobj;
- struct usb_device *usb_device;
-};
-#define to_dci_obj(p) container_of(p, struct most_dci_obj, kobj)
-
-/**
- * struct most_dev - holds all usb interface specific stuff
- * @parent: parent object in sysfs
- * @usb_device: pointer to usb device
- * @iface: hardware interface
- * @cap: channel capabilities
- * @conf: channel configuration
- * @dci: direct communication interface of hardware
- * @hw_addr: MAC address of hardware
- * @ep_address: endpoint address table
- * @link_stat: link status of hardware
- * @description: device description
- * @suffix: suffix for channel name
- * @anchor_list_lock: locks list access
- * @padding_active: indicates channel uses padding
- * @is_channel_healthy: health status table of each channel
- * @anchor_list: list of anchored items
- * @io_mutex: synchronize I/O with disconnect
- * @link_stat_timer: timer for link status reports
- * @poll_work_obj: work for polling link status
- */
-struct most_dev {
- struct kobject *parent;
- struct usb_device *usb_device;
- struct most_interface iface;
- struct most_channel_capability *cap;
- struct most_channel_config *conf;
- struct most_dci_obj *dci;
- u8 hw_addr[6];
- u8 *ep_address;
- u16 link_stat;
- char description[MAX_STRING_LEN];
- char suffix[MAX_NUM_ENDPOINTS][MAX_SUFFIX_LEN];
- spinlock_t anchor_list_lock[MAX_NUM_ENDPOINTS];
- bool padding_active[MAX_NUM_ENDPOINTS];
- bool is_channel_healthy[MAX_NUM_ENDPOINTS];
- struct list_head *anchor_list;
- struct mutex io_mutex;
- struct timer_list link_stat_timer;
- struct work_struct poll_work_obj;
-};
-#define to_mdev(d) container_of(d, struct most_dev, iface)
-#define to_mdev_from_work(w) container_of(w, struct most_dev, poll_work_obj)
-
-static struct workqueue_struct *schedule_usb_work;
-static void wq_clear_halt(struct work_struct *wq_obj);
-static void wq_netinfo(struct work_struct *wq_obj);
-
-/**
- * drci_rd_reg - read a DCI register
- * @dev: usb device
- * @reg: register address
- * @buf: buffer to store data
- *
- * This is reads data from INIC's direct register communication interface
- */
-static inline int drci_rd_reg(struct usb_device *dev, u16 reg, u16 *buf)
-{
- int retval;
- u16 *dma_buf = kzalloc(sizeof(u16), GFP_KERNEL);
- u8 req_type = USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE;
-
- if (!dma_buf)
- return -ENOMEM;
-
- retval = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0),
- DRCI_READ_REQ, req_type,
- 0x0000,
- reg, dma_buf, sizeof(u16), 5 * HZ);
- *buf = le16_to_cpu(*dma_buf);
- kfree(dma_buf);
-
- return retval;
-}
-
-/**
- * drci_wr_reg - write a DCI register
- * @dev: usb device
- * @reg: register address
- * @data: data to write
- *
- * This is writes data to INIC's direct register communication interface
- */
-static inline int drci_wr_reg(struct usb_device *dev, u16 reg, u16 data)
-{
- return usb_control_msg(dev,
- usb_sndctrlpipe(dev, 0),
- DRCI_WRITE_REQ,
- USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
- data,
- reg,
- NULL,
- 0,
- 5 * HZ);
-}
-
-/**
- * free_anchored_buffers - free device's anchored items
- * @mdev: the device
- * @channel: channel ID
- */
-static void free_anchored_buffers(struct most_dev *mdev, unsigned int channel)
-{
- struct mbo *mbo;
- struct buf_anchor *anchor, *tmp;
- unsigned long flags;
-
- spin_lock_irqsave(&mdev->anchor_list_lock[channel], flags);
- list_for_each_entry_safe(anchor, tmp, &mdev->anchor_list[channel], list) {
- struct urb *urb = anchor->urb;
-
- spin_unlock_irqrestore(&mdev->anchor_list_lock[channel], flags);
- if (likely(urb)) {
- mbo = urb->context;
- if (!irqs_disabled()) {
- usb_kill_urb(urb);
- } else {
- usb_unlink_urb(urb);
- wait_for_completion(&anchor->urb_compl);
- }
- if ((mbo) && (mbo->complete)) {
- mbo->status = MBO_E_CLOSE;
- mbo->processed_length = 0;
- mbo->complete(mbo);
- }
- usb_free_urb(urb);
- }
- spin_lock_irqsave(&mdev->anchor_list_lock[channel], flags);
- list_del(&anchor->list);
- kfree(anchor);
- }
- spin_unlock_irqrestore(&mdev->anchor_list_lock[channel], flags);
-}
-
-/**
- * get_stream_frame_size - calculate frame size of current configuration
- * @cfg: channel configuration
- */
-static unsigned int get_stream_frame_size(struct most_channel_config *cfg)
-{
- unsigned int frame_size = 0;
- unsigned int sub_size = cfg->subbuffer_size;
-
- if (!sub_size) {
- pr_warn("Misconfig: Subbuffer size zero.\n");
- return frame_size;
- }
- switch (cfg->data_type) {
- case MOST_CH_ISOC_AVP:
- frame_size = AV_PACKETS_PER_XACT * sub_size;
- break;
- case MOST_CH_SYNC:
- if (cfg->packets_per_xact == 0) {
- pr_warn("Misconfig: Packets per XACT zero\n");
- frame_size = 0;
- } else if (cfg->packets_per_xact == 0xFF)
- frame_size = (USB_MTU / sub_size) * sub_size;
- else
- frame_size = cfg->packets_per_xact * sub_size;
- break;
- default:
- pr_warn("Query frame size of non-streaming channel\n");
- break;
- }
- return frame_size;
-}
-
-/**
- * hdm_poison_channel - mark buffers of this channel as invalid
- * @iface: pointer to the interface
- * @channel: channel ID
- *
- * This unlinks all URBs submitted to the HCD,
- * calls the associated completion function of the core and removes
- * them from the list.
- *
- * Returns 0 on success or error code otherwise.
- */
-static int hdm_poison_channel(struct most_interface *iface, int channel)
-{
- struct most_dev *mdev;
-
- mdev = to_mdev(iface);
- if (unlikely(!iface)) {
- dev_warn(&mdev->usb_device->dev, "Poison: Bad interface.\n");
- return -EIO;
- }
- if (unlikely((channel < 0) || (channel >= iface->num_channels))) {
- dev_warn(&mdev->usb_device->dev, "Channel ID out of range.\n");
- return -ECHRNG;
- }
-
- mdev->is_channel_healthy[channel] = false;
-
- mutex_lock(&mdev->io_mutex);
- free_anchored_buffers(mdev, channel);
- if (mdev->padding_active[channel])
- mdev->padding_active[channel] = false;
-
- if (mdev->conf[channel].data_type == MOST_CH_ASYNC) {
- del_timer_sync(&mdev->link_stat_timer);
- cancel_work_sync(&mdev->poll_work_obj);
- }
- mutex_unlock(&mdev->io_mutex);
- return 0;
-}
-
-/**
- * hdm_add_padding - add padding bytes
- * @mdev: most device
- * @channel: channel ID
- * @mbo: buffer object
- *
- * This inserts the INIC hardware specific padding bytes into a streaming
- * channel's buffer
- */
-static int hdm_add_padding(struct most_dev *mdev, int channel, struct mbo *mbo)
-{
- struct most_channel_config *conf = &mdev->conf[channel];
- unsigned int j, num_frames, frame_size;
- u16 rd_addr, wr_addr;
-
- frame_size = get_stream_frame_size(conf);
- if (!frame_size)
- return -EIO;
- num_frames = mbo->buffer_length / frame_size;
-
- if (num_frames < 1) {
- dev_err(&mdev->usb_device->dev,
- "Missed minimal transfer unit.\n");
- return -EIO;
- }
-
- for (j = 1; j < num_frames; j++) {
- wr_addr = (num_frames - j) * USB_MTU;
- rd_addr = (num_frames - j) * frame_size;
- memmove(mbo->virt_address + wr_addr,
- mbo->virt_address + rd_addr,
- frame_size);
- }
- mbo->buffer_length = num_frames * USB_MTU;
- return 0;
-}
-
-/**
- * hdm_remove_padding - remove padding bytes
- * @mdev: most device
- * @channel: channel ID
- * @mbo: buffer object
- *
- * This takes the INIC hardware specific padding bytes off a streaming
- * channel's buffer.
- */
-static int hdm_remove_padding(struct most_dev *mdev, int channel, struct mbo *mbo)
-{
- unsigned int j, num_frames, frame_size;
- struct most_channel_config *const conf = &mdev->conf[channel];
-
- frame_size = get_stream_frame_size(conf);
- if (!frame_size)
- return -EIO;
- num_frames = mbo->processed_length / USB_MTU;
-
- for (j = 1; j < num_frames; j++)
- memmove(mbo->virt_address + frame_size * j,
- mbo->virt_address + USB_MTU * j,
- frame_size);
-
- mbo->processed_length = frame_size * num_frames;
- return 0;
-}
-
-/**
- * hdm_write_completion - completion function for submitted Tx URBs
- * @urb: the URB that has been completed
- *
- * This checks the status of the completed URB. In case the URB has been
- * unlinked before, it is immediately freed. On any other error the MBO
- * transfer flag is set. On success it frees allocated resources and calls
- * the completion function.
- *
- * Context: interrupt!
- */
-static void hdm_write_completion(struct urb *urb)
-{
- struct mbo *mbo;
- struct buf_anchor *anchor;
- struct most_dev *mdev;
- struct device *dev;
- unsigned int channel;
- unsigned long flags;
-
- mbo = urb->context;
- anchor = mbo->priv;
- mdev = to_mdev(mbo->ifp);
- channel = mbo->hdm_channel_id;
- dev = &mdev->usb_device->dev;
-
- if ((urb->status == -ENOENT) || (urb->status == -ECONNRESET) ||
- (!mdev->is_channel_healthy[channel])) {
- complete(&anchor->urb_compl);
- return;
- }
-
- if (unlikely(urb->status && !(urb->status == -ENOENT ||
- urb->status == -ECONNRESET ||
- urb->status == -ESHUTDOWN))) {
- mbo->processed_length = 0;
- switch (urb->status) {
- case -EPIPE:
- dev_warn(dev, "Broken OUT pipe detected\n");
- most_stop_enqueue(&mdev->iface, channel);
- mbo->status = MBO_E_INVAL;
- usb_unlink_urb(urb);
- INIT_WORK(&anchor->clear_work_obj, wq_clear_halt);
- queue_work(schedule_usb_work, &anchor->clear_work_obj);
- return;
- case -ENODEV:
- case -EPROTO:
- mbo->status = MBO_E_CLOSE;
- break;
- default:
- mbo->status = MBO_E_INVAL;
- break;
- }
- } else {
- mbo->status = MBO_SUCCESS;
- mbo->processed_length = urb->actual_length;
- }
-
- spin_lock_irqsave(&mdev->anchor_list_lock[channel], flags);
- list_del(&anchor->list);
- spin_unlock_irqrestore(&mdev->anchor_list_lock[channel], flags);
- kfree(anchor);
-
- if (likely(mbo->complete))
- mbo->complete(mbo);
- usb_free_urb(urb);
-}
-
-/**
- * hdm_read_completion - completion funciton for submitted Rx URBs
- * @urb: the URB that has been completed
- *
- * This checks the status of the completed URB. In case the URB has been
- * unlinked before it is immediately freed. On any other error the MBO transfer
- * flag is set. On success it frees allocated resources, removes
- * padding bytes -if necessary- and calls the completion function.
- *
- * Context: interrupt!
- *
- * **************************************************************************
- * Error codes returned by in urb->status
- * or in iso_frame_desc[n].status (for ISO)
- * *************************************************************************
- *
- * USB device drivers may only test urb status values in completion handlers.
- * This is because otherwise there would be a race between HCDs updating
- * these values on one CPU, and device drivers testing them on another CPU.
- *
- * A transfer's actual_length may be positive even when an error has been
- * reported. That's because transfers often involve several packets, so that
- * one or more packets could finish before an error stops further endpoint I/O.
- *
- * For isochronous URBs, the urb status value is non-zero only if the URB is
- * unlinked, the device is removed, the host controller is disabled or the total
- * transferred length is less than the requested length and the URB_SHORT_NOT_OK
- * flag is set. Completion handlers for isochronous URBs should only see
- * urb->status set to zero, -ENOENT, -ECONNRESET, -ESHUTDOWN, or -EREMOTEIO.
- * Individual frame descriptor status fields may report more status codes.
- *
- *
- * 0 Transfer completed successfully
- *
- * -ENOENT URB was synchronously unlinked by usb_unlink_urb
- *
- * -EINPROGRESS URB still pending, no results yet
- * (That is, if drivers see this it's a bug.)
- *
- * -EPROTO (*, **) a) bitstuff error
- * b) no response packet received within the
- * prescribed bus turn-around time
- * c) unknown USB error
- *
- * -EILSEQ (*, **) a) CRC mismatch
- * b) no response packet received within the
- * prescribed bus turn-around time
- * c) unknown USB error
- *
- * Note that often the controller hardware does not
- * distinguish among cases a), b), and c), so a
- * driver cannot tell whether there was a protocol
- * error, a failure to respond (often caused by
- * device disconnect), or some other fault.
- *
- * -ETIME (**) No response packet received within the prescribed
- * bus turn-around time. This error may instead be
- * reported as -EPROTO or -EILSEQ.
- *
- * -ETIMEDOUT Synchronous USB message functions use this code
- * to indicate timeout expired before the transfer
- * completed, and no other error was reported by HC.
- *
- * -EPIPE (**) Endpoint stalled. For non-control endpoints,
- * reset this status with usb_clear_halt().
- *
- * -ECOMM During an IN transfer, the host controller
- * received data from an endpoint faster than it
- * could be written to system memory
- *
- * -ENOSR During an OUT transfer, the host controller
- * could not retrieve data from system memory fast
- * enough to keep up with the USB data rate
- *
- * -EOVERFLOW (*) The amount of data returned by the endpoint was
- * greater than either the max packet size of the
- * endpoint or the remaining buffer size. "Babble".
- *
- * -EREMOTEIO The data read from the endpoint did not fill the
- * specified buffer, and URB_SHORT_NOT_OK was set in
- * urb->transfer_flags.
- *
- * -ENODEV Device was removed. Often preceded by a burst of
- * other errors, since the hub driver doesn't detect
- * device removal events immediately.
- *
- * -EXDEV ISO transfer only partially completed
- * (only set in iso_frame_desc[n].status, not urb->status)
- *
- * -EINVAL ISO madness, if this happens: Log off and go home
- *
- * -ECONNRESET URB was asynchronously unlinked by usb_unlink_urb
- *
- * -ESHUTDOWN The device or host controller has been disabled due
- * to some problem that could not be worked around,
- * such as a physical disconnect.
- *
- *
- * (*) Error codes like -EPROTO, -EILSEQ and -EOVERFLOW normally indicate
- * hardware problems such as bad devices (including firmware) or cables.
- *
- * (**) This is also one of several codes that different kinds of host
- * controller use to indicate a transfer has failed because of device
- * disconnect. In the interval before the hub driver starts disconnect
- * processing, devices may receive such fault reports for every request.
- *
- * See <https://www.kernel.org/doc/Documentation/usb/error-codes.txt>
- */
-static void hdm_read_completion(struct urb *urb)
-{
- struct mbo *mbo;
- struct buf_anchor *anchor;
- struct most_dev *mdev;
- struct device *dev;
- unsigned long flags;
- unsigned int channel;
-
- mbo = urb->context;
- anchor = mbo->priv;
- mdev = to_mdev(mbo->ifp);
- channel = mbo->hdm_channel_id;
- dev = &mdev->usb_device->dev;
-
- if ((urb->status == -ENOENT) || (urb->status == -ECONNRESET) ||
- (!mdev->is_channel_healthy[channel])) {
- complete(&anchor->urb_compl);
- return;
- }
-
- if (unlikely(urb->status && !(urb->status == -ENOENT ||
- urb->status == -ECONNRESET ||
- urb->status == -ESHUTDOWN))) {
- mbo->processed_length = 0;
- switch (urb->status) {
- case -EPIPE:
- dev_warn(dev, "Broken IN pipe detected\n");
- mbo->status = MBO_E_INVAL;
- usb_unlink_urb(urb);
- INIT_WORK(&anchor->clear_work_obj, wq_clear_halt);
- queue_work(schedule_usb_work, &anchor->clear_work_obj);
- return;
- case -ENODEV:
- case -EPROTO:
- mbo->status = MBO_E_CLOSE;
- break;
- case -EOVERFLOW:
- dev_warn(dev, "Babble on IN pipe detected\n");
- default:
- mbo->status = MBO_E_INVAL;
- break;
- }
- } else {
- mbo->processed_length = urb->actual_length;
- if (!mdev->padding_active[channel]) {
- mbo->status = MBO_SUCCESS;
- } else {
- if (hdm_remove_padding(mdev, channel, mbo)) {
- mbo->processed_length = 0;
- mbo->status = MBO_E_INVAL;
- } else {
- mbo->status = MBO_SUCCESS;
- }
- }
- }
- spin_lock_irqsave(&mdev->anchor_list_lock[channel], flags);
- list_del(&anchor->list);
- spin_unlock_irqrestore(&mdev->anchor_list_lock[channel], flags);
- kfree(anchor);
-
- if (likely(mbo->complete))
- mbo->complete(mbo);
- usb_free_urb(urb);
-}
-
-/**
- * hdm_enqueue - receive a buffer to be used for data transfer
- * @iface: interface to enqueue to
- * @channel: ID of the channel
- * @mbo: pointer to the buffer object
- *
- * This allocates a new URB and fills it according to the channel
- * that is being used for transmission of data. Before the URB is
- * submitted it is stored in the private anchor list.
- *
- * Returns 0 on success. On any error the URB is freed and a error code
- * is returned.
- *
- * Context: Could in _some_ cases be interrupt!
- */
-static int hdm_enqueue(struct most_interface *iface, int channel, struct mbo *mbo)
-{
- struct most_dev *mdev;
- struct buf_anchor *anchor;
- struct most_channel_config *conf;
- struct device *dev;
- int retval = 0;
- struct urb *urb;
- unsigned long flags;
- unsigned long length;
- void *virt_address;
-
- if (unlikely(!iface || !mbo))
- return -EIO;
- if (unlikely(iface->num_channels <= channel) || (channel < 0))
- return -ECHRNG;
-
- mdev = to_mdev(iface);
- conf = &mdev->conf[channel];
- dev = &mdev->usb_device->dev;
-
- if (!mdev->usb_device)
- return -ENODEV;
-
- urb = usb_alloc_urb(NO_ISOCHRONOUS_URB, GFP_ATOMIC);
- if (!urb) {
- dev_err(dev, "Failed to allocate URB\n");
- return -ENOMEM;
- }
-
- anchor = kzalloc(sizeof(*anchor), GFP_ATOMIC);
- if (!anchor) {
- retval = -ENOMEM;
- goto _error;
- }
-
- anchor->urb = urb;
- init_completion(&anchor->urb_compl);
- mbo->priv = anchor;
-
- spin_lock_irqsave(&mdev->anchor_list_lock[channel], flags);
- list_add_tail(&anchor->list, &mdev->anchor_list[channel]);
- spin_unlock_irqrestore(&mdev->anchor_list_lock[channel], flags);
-
- if ((mdev->padding_active[channel]) &&
- (conf->direction & MOST_CH_TX))
- if (hdm_add_padding(mdev, channel, mbo)) {
- retval = -EIO;
- goto _error_1;
- }
-
- urb->transfer_dma = mbo->bus_address;
- virt_address = mbo->virt_address;
- length = mbo->buffer_length;
-
- if (conf->direction & MOST_CH_TX) {
- usb_fill_bulk_urb(urb, mdev->usb_device,
- usb_sndbulkpipe(mdev->usb_device,
- mdev->ep_address[channel]),
- virt_address,
- length,
- hdm_write_completion,
- mbo);
- if (conf->data_type != MOST_CH_ISOC_AVP)
- urb->transfer_flags |= URB_ZERO_PACKET;
- } else {
- usb_fill_bulk_urb(urb, mdev->usb_device,
- usb_rcvbulkpipe(mdev->usb_device,
- mdev->ep_address[channel]),
- virt_address,
- length + conf->extra_len,
- hdm_read_completion,
- mbo);
- }
- urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
-
- retval = usb_submit_urb(urb, GFP_KERNEL);
- if (retval) {
- dev_err(dev, "URB submit failed with error %d.\n", retval);
- goto _error_1;
- }
- return 0;
-
-_error_1:
- spin_lock_irqsave(&mdev->anchor_list_lock[channel], flags);
- list_del(&anchor->list);
- spin_unlock_irqrestore(&mdev->anchor_list_lock[channel], flags);
- kfree(anchor);
-_error:
- usb_free_urb(urb);
- return retval;
-}
-
-/**
- * hdm_configure_channel - receive channel configuration from core
- * @iface: interface
- * @channel: channel ID
- * @conf: structure that holds the configuration information
- */
-static int hdm_configure_channel(struct most_interface *iface, int channel,
- struct most_channel_config *conf)
-{
- unsigned int num_frames;
- unsigned int frame_size;
- unsigned int temp_size;
- unsigned int tail_space;
- struct most_dev *mdev;
- struct device *dev;
-
- mdev = to_mdev(iface);
- mdev->is_channel_healthy[channel] = true;
- dev = &mdev->usb_device->dev;
-
- if (unlikely(!iface || !conf)) {
- dev_err(dev, "Bad interface or config pointer.\n");
- return -EINVAL;
- }
- if (unlikely((channel < 0) || (channel >= iface->num_channels))) {
- dev_err(dev, "Channel ID out of range.\n");
- return -EINVAL;
- }
- if ((!conf->num_buffers) || (!conf->buffer_size)) {
- dev_err(dev, "Misconfig: buffer size or #buffers zero.\n");
- return -EINVAL;
- }
-
- if (!(conf->data_type == MOST_CH_SYNC) &&
- !((conf->data_type == MOST_CH_ISOC_AVP) &&
- (conf->packets_per_xact != 0xFF))) {
- mdev->padding_active[channel] = false;
- goto exit;
- }
-
- mdev->padding_active[channel] = true;
- temp_size = conf->buffer_size;
-
- frame_size = get_stream_frame_size(conf);
- if ((frame_size == 0) || (frame_size > USB_MTU)) {
- dev_warn(dev, "Misconfig: frame size wrong\n");
- return -EINVAL;
- }
-
- if (conf->buffer_size % frame_size) {
- u16 tmp_val;
-
- tmp_val = conf->buffer_size / frame_size;
- conf->buffer_size = tmp_val * frame_size;
- dev_notice(dev,
- "Channel %d - rouding buffer size to %d bytes, "
- "channel config says %d bytes\n",
- channel,
- conf->buffer_size,
- temp_size);
- }
-
- num_frames = conf->buffer_size / frame_size;
- tail_space = num_frames * (USB_MTU - frame_size);
- temp_size += tail_space;
-
- /* calculate extra length to comply w/ HW padding */
- conf->extra_len = (CEILING(temp_size, USB_MTU) * USB_MTU)
- - conf->buffer_size;
-exit:
- mdev->conf[channel] = *conf;
- return 0;
-}
-
-/**
- * hdm_update_netinfo - retrieve latest networking information
- * @mdev: device interface
- *
- * This triggers the USB vendor requests to read the hardware address and
- * the current link status of the attached device.
- */
-static int hdm_update_netinfo(struct most_dev *mdev)
-{
- struct usb_device *usb_device = mdev->usb_device;
- struct device *dev = &usb_device->dev;
- u16 hi, mi, lo, link;
-
- if (!is_valid_ether_addr(mdev->hw_addr)) {
- if (drci_rd_reg(usb_device, DRCI_REG_HW_ADDR_HI, &hi) < 0) {
- dev_err(dev, "Vendor request \"hw_addr_hi\" failed\n");
- return -1;
- }
-
- if (drci_rd_reg(usb_device, DRCI_REG_HW_ADDR_MI, &mi) < 0) {
- dev_err(dev, "Vendor request \"hw_addr_mid\" failed\n");
- return -1;
- }
-
- if (drci_rd_reg(usb_device, DRCI_REG_HW_ADDR_LO, &lo) < 0) {
- dev_err(dev, "Vendor request \"hw_addr_low\" failed\n");
- return -1;
- }
-
- mutex_lock(&mdev->io_mutex);
- mdev->hw_addr[0] = hi >> 8;
- mdev->hw_addr[1] = hi;
- mdev->hw_addr[2] = mi >> 8;
- mdev->hw_addr[3] = mi;
- mdev->hw_addr[4] = lo >> 8;
- mdev->hw_addr[5] = lo;
- mutex_unlock(&mdev->io_mutex);
- }
-
- if (drci_rd_reg(usb_device, DRCI_REG_NI_STATE, &link) < 0) {
- dev_err(dev, "Vendor request \"link status\" failed\n");
- return -1;
- }
-
- mutex_lock(&mdev->io_mutex);
- mdev->link_stat = link;
- mutex_unlock(&mdev->io_mutex);
- return 0;
-}
-
-/**
- * hdm_request_netinfo - request network information
- * @iface: pointer to interface
- * @channel: channel ID
- *
- * This is used as trigger to set up the link status timer that
- * polls for the NI state of the INIC every 2 seconds.
- *
- */
-static void hdm_request_netinfo(struct most_interface *iface, int channel)
-{
- struct most_dev *mdev;
-
- BUG_ON(!iface);
- mdev = to_mdev(iface);
- mdev->link_stat_timer.expires = jiffies + HZ;
- mod_timer(&mdev->link_stat_timer, mdev->link_stat_timer.expires);
-}
-
-/**
- * link_stat_timer_handler - add work to link_stat work queue
- * @data: pointer to USB device instance
- *
- * The handler runs in interrupt context. That's why we need to defer the
- * tasks to a work queue.
- */
-static void link_stat_timer_handler(unsigned long data)
-{
- struct most_dev *mdev = (struct most_dev *)data;
-
- queue_work(schedule_usb_work, &mdev->poll_work_obj);
- mdev->link_stat_timer.expires = jiffies + (2 * HZ);
- add_timer(&mdev->link_stat_timer);
-}
-
-/**
- * wq_netinfo - work queue function
- * @wq_obj: object that holds data for our deferred work to do
- *
- * This retrieves the network interface status of the USB INIC
- * and compares it with the current status. If the status has
- * changed, it updates the status of the core.
- */
-static void wq_netinfo(struct work_struct *wq_obj)
-{
- struct most_dev *mdev;
- int i, prev_link_stat;
- u8 prev_hw_addr[6];
-
- mdev = to_mdev_from_work(wq_obj);
- prev_link_stat = mdev->link_stat;
-
- for (i = 0; i < 6; i++)
- prev_hw_addr[i] = mdev->hw_addr[i];
-
- if (0 > hdm_update_netinfo(mdev))
- return;
- if ((prev_link_stat != mdev->link_stat) ||
- (prev_hw_addr[0] != mdev->hw_addr[0]) ||
- (prev_hw_addr[1] != mdev->hw_addr[1]) ||
- (prev_hw_addr[2] != mdev->hw_addr[2]) ||
- (prev_hw_addr[3] != mdev->hw_addr[3]) ||
- (prev_hw_addr[4] != mdev->hw_addr[4]) ||
- (prev_hw_addr[5] != mdev->hw_addr[5]))
- most_deliver_netinfo(&mdev->iface, mdev->link_stat,
- &mdev->hw_addr[0]);
-}
-
-/**
- * wq_clear_halt - work queue function
- * @wq_obj: work_struct object to execute
- *
- * This sends a clear_halt to the given USB pipe.
- */
-static void wq_clear_halt(struct work_struct *wq_obj)
-{
- struct buf_anchor *anchor;
- struct most_dev *mdev;
- struct mbo *mbo;
- struct urb *urb;
- unsigned int channel;
- unsigned long flags;
-
- anchor = to_buf_anchor(wq_obj);
- urb = anchor->urb;
- mbo = urb->context;
- mdev = to_mdev(mbo->ifp);
- channel = mbo->hdm_channel_id;
-
- if (usb_clear_halt(urb->dev, urb->pipe))
- dev_warn(&mdev->usb_device->dev, "Failed to reset endpoint.\n");
-
- usb_free_urb(urb);
- spin_lock_irqsave(&mdev->anchor_list_lock[channel], flags);
- list_del(&anchor->list);
- spin_unlock_irqrestore(&mdev->anchor_list_lock[channel], flags);
-
- if (likely(mbo->complete))
- mbo->complete(mbo);
- if (mdev->conf[channel].direction & MOST_CH_TX)
- most_resume_enqueue(&mdev->iface, channel);
-
- kfree(anchor);
-}
-
-/**
- * hdm_usb_fops - file operation table for USB driver
- */
-static const struct file_operations hdm_usb_fops = {
- .owner = THIS_MODULE,
-};
-
-/**
- * usb_device_id - ID table for HCD device probing
- */
-static struct usb_device_id usbid[] = {
- { USB_DEVICE(USB_VENDOR_ID_SMSC, USB_DEV_ID_BRDG), },
- { USB_DEVICE(USB_VENDOR_ID_SMSC, USB_DEV_ID_INIC), },
- { } /* Terminating entry */
-};
-
-#define MOST_DCI_RO_ATTR(_name) \
- struct most_dci_attribute most_dci_attr_##_name = \
- __ATTR(_name, S_IRUGO, show_value, NULL)
-
-#define MOST_DCI_ATTR(_name) \
- struct most_dci_attribute most_dci_attr_##_name = \
- __ATTR(_name, S_IRUGO | S_IWUSR, show_value, store_value)
-
-/**
- * struct most_dci_attribute - to access the attributes of a dci object
- * @attr: attributes of a dci object
- * @show: pointer to the show function
- * @store: pointer to the store function
- */
-struct most_dci_attribute {
- struct attribute attr;
- ssize_t (*show)(struct most_dci_obj *d,
- struct most_dci_attribute *attr,
- char *buf);
- ssize_t (*store)(struct most_dci_obj *d,
- struct most_dci_attribute *attr,
- const char *buf,
- size_t count);
-};
-#define to_dci_attr(a) container_of(a, struct most_dci_attribute, attr)
-
-
-/**
- * dci_attr_show - show function for dci object
- * @kobj: pointer to kobject
- * @attr: pointer to attribute struct
- * @buf: buffer
- */
-static ssize_t dci_attr_show(struct kobject *kobj, struct attribute *attr,
- char *buf)
-{
- struct most_dci_attribute *dci_attr = to_dci_attr(attr);
- struct most_dci_obj *dci_obj = to_dci_obj(kobj);
-
- if (!dci_attr->show)
- return -EIO;
-
- return dci_attr->show(dci_obj, dci_attr, buf);
-}
-
-/**
- * dci_attr_store - store function for dci object
- * @kobj: pointer to kobject
- * @attr: pointer to attribute struct
- * @buf: buffer
- * @len: length of buffer
- */
-static ssize_t dci_attr_store(struct kobject *kobj,
- struct attribute *attr,
- const char *buf,
- size_t len)
-{
- struct most_dci_attribute *dci_attr = to_dci_attr(attr);
- struct most_dci_obj *dci_obj = to_dci_obj(kobj);
-
- if (!dci_attr->store)
- return -EIO;
-
- return dci_attr->store(dci_obj, dci_attr, buf, len);
-}
-
-static const struct sysfs_ops most_dci_sysfs_ops = {
- .show = dci_attr_show,
- .store = dci_attr_store,
-};
-
-/**
- * most_dci_release - release function for dci object
- * @kobj: pointer to kobject
- *
- * This frees the memory allocated for the dci object
- */
-static void most_dci_release(struct kobject *kobj)
-{
- struct most_dci_obj *dci_obj = to_dci_obj(kobj);
-
- kfree(dci_obj);
-}
-
-static ssize_t show_value(struct most_dci_obj *dci_obj,
- struct most_dci_attribute *attr, char *buf)
-{
- u16 tmp_val;
- u16 reg_addr;
- int err;
-
- if (!strcmp(attr->attr.name, "ni_state"))
- reg_addr = DRCI_REG_NI_STATE;
- else if (!strcmp(attr->attr.name, "packet_bandwidth"))
- reg_addr = DRCI_REG_PACKET_BW;
- else if (!strcmp(attr->attr.name, "node_address"))
- reg_addr = DRCI_REG_NODE_ADDR;
- else if (!strcmp(attr->attr.name, "node_position"))
- reg_addr = DRCI_REG_NODE_POS;
- else if (!strcmp(attr->attr.name, "mep_filter"))
- reg_addr = DRCI_REG_MEP_FILTER;
- else if (!strcmp(attr->attr.name, "mep_hash0"))
- reg_addr = DRCI_REG_HASH_TBL0;
- else if (!strcmp(attr->attr.name, "mep_hash1"))
- reg_addr = DRCI_REG_HASH_TBL1;
- else if (!strcmp(attr->attr.name, "mep_hash2"))
- reg_addr = DRCI_REG_HASH_TBL2;
- else if (!strcmp(attr->attr.name, "mep_hash3"))
- reg_addr = DRCI_REG_HASH_TBL3;
- else if (!strcmp(attr->attr.name, "mep_eui48_hi"))
- reg_addr = DRCI_REG_HW_ADDR_HI;
- else if (!strcmp(attr->attr.name, "mep_eui48_mi"))
- reg_addr = DRCI_REG_HW_ADDR_MI;
- else if (!strcmp(attr->attr.name, "mep_eui48_lo"))
- reg_addr = DRCI_REG_HW_ADDR_LO;
- else
- return -EIO;
-
- err = drci_rd_reg(dci_obj->usb_device, reg_addr, &tmp_val);
- if (err < 0)
- return err;
-
- return snprintf(buf, PAGE_SIZE, "%04x\n", tmp_val);
-}
-
-static ssize_t store_value(struct most_dci_obj *dci_obj,
- struct most_dci_attribute *attr,
- const char *buf, size_t count)
-{
- u16 val;
- u16 reg_addr;
- int err;
-
- if (!strcmp(attr->attr.name, "mep_filter"))
- reg_addr = DRCI_REG_MEP_FILTER;
- else if (!strcmp(attr->attr.name, "mep_hash0"))
- reg_addr = DRCI_REG_HASH_TBL0;
- else if (!strcmp(attr->attr.name, "mep_hash1"))
- reg_addr = DRCI_REG_HASH_TBL1;
- else if (!strcmp(attr->attr.name, "mep_hash2"))
- reg_addr = DRCI_REG_HASH_TBL2;
- else if (!strcmp(attr->attr.name, "mep_hash3"))
- reg_addr = DRCI_REG_HASH_TBL3;
- else if (!strcmp(attr->attr.name, "mep_eui48_hi"))
- reg_addr = DRCI_REG_HW_ADDR_HI;
- else if (!strcmp(attr->attr.name, "mep_eui48_mi"))
- reg_addr = DRCI_REG_HW_ADDR_MI;
- else if (!strcmp(attr->attr.name, "mep_eui48_lo"))
- reg_addr = DRCI_REG_HW_ADDR_LO;
- else
- return -EIO;
-
- err = kstrtou16(buf, 16, &val);
- if (err)
- return err;
-
- err = drci_wr_reg(dci_obj->usb_device, reg_addr, val);
- if (err < 0)
- return err;
-
- return count;
-}
-
-static MOST_DCI_RO_ATTR(ni_state);
-static MOST_DCI_RO_ATTR(packet_bandwidth);
-static MOST_DCI_RO_ATTR(node_address);
-static MOST_DCI_RO_ATTR(node_position);
-static MOST_DCI_ATTR(mep_filter);
-static MOST_DCI_ATTR(mep_hash0);
-static MOST_DCI_ATTR(mep_hash1);
-static MOST_DCI_ATTR(mep_hash2);
-static MOST_DCI_ATTR(mep_hash3);
-static MOST_DCI_ATTR(mep_eui48_hi);
-static MOST_DCI_ATTR(mep_eui48_mi);
-static MOST_DCI_ATTR(mep_eui48_lo);
-
-/**
- * most_dci_def_attrs - array of default attribute files of the dci object
- */
-static struct attribute *most_dci_def_attrs[] = {
- &most_dci_attr_ni_state.attr,
- &most_dci_attr_packet_bandwidth.attr,
- &most_dci_attr_node_address.attr,
- &most_dci_attr_node_position.attr,
- &most_dci_attr_mep_filter.attr,
- &most_dci_attr_mep_hash0.attr,
- &most_dci_attr_mep_hash1.attr,
- &most_dci_attr_mep_hash2.attr,
- &most_dci_attr_mep_hash3.attr,
- &most_dci_attr_mep_eui48_hi.attr,
- &most_dci_attr_mep_eui48_mi.attr,
- &most_dci_attr_mep_eui48_lo.attr,
- NULL,
-};
-
-/**
- * DCI ktype
- */
-static struct kobj_type most_dci_ktype = {
- .sysfs_ops = &most_dci_sysfs_ops,
- .release = most_dci_release,
- .default_attrs = most_dci_def_attrs,
-};
-
-/**
- * create_most_dci_obj - allocates a dci object
- * @parent: parent kobject
- *
- * This creates a dci object and registers it with sysfs.
- * Returns a pointer to the object or NULL when something went wrong.
- */
-static struct
-most_dci_obj *create_most_dci_obj(struct kobject *parent)
-{
- struct most_dci_obj *most_dci;
- int retval;
-
- most_dci = kzalloc(sizeof(*most_dci), GFP_KERNEL);
- if (!most_dci)
- return NULL;
-
- retval = kobject_init_and_add(&most_dci->kobj, &most_dci_ktype, parent,
- "dci");
- if (retval) {
- kobject_put(&most_dci->kobj);
- return NULL;
- }
- return most_dci;
-}
-
-/**
- * destroy_most_dci_obj - DCI object release function
- * @p: pointer to dci object
- */
-static void destroy_most_dci_obj(struct most_dci_obj *p)
-{
- kobject_put(&p->kobj);
-}
-
-/**
- * hdm_probe - probe function of USB device driver
- * @interface: Interface of the attached USB device
- * @id: Pointer to the USB ID table.
- *
- * This allocates and initializes the device instance, adds the new
- * entry to the internal list, scans the USB descriptors and registers
- * the interface with the core.
- * Additionally, the DCI objects are created and the hardware is sync'd.
- *
- * Return 0 on success. In case of an error a negative number is returned.
- */
-static int
-hdm_probe(struct usb_interface *interface, const struct usb_device_id *id)
-{
- unsigned int i;
- unsigned int num_endpoints;
- struct most_channel_capability *tmp_cap;
- struct most_dev *mdev;
- struct usb_device *usb_dev;
- struct device *dev;
- struct usb_host_interface *usb_iface_desc;
- struct usb_endpoint_descriptor *ep_desc;
- int ret = 0;
- int err;
-
- usb_iface_desc = interface->cur_altsetting;
- usb_dev = interface_to_usbdev(interface);
- dev = &usb_dev->dev;
- mdev = kzalloc(sizeof(*mdev), GFP_KERNEL);
- if (!mdev)
- goto exit_ENOMEM;
-
- usb_set_intfdata(interface, mdev);
- num_endpoints = usb_iface_desc->desc.bNumEndpoints;
- mutex_init(&mdev->io_mutex);
- INIT_WORK(&mdev->poll_work_obj, wq_netinfo);
- init_timer(&mdev->link_stat_timer);
-
- mdev->usb_device = usb_dev;
- mdev->link_stat_timer.function = link_stat_timer_handler;
- mdev->link_stat_timer.data = (unsigned long)mdev;
- mdev->link_stat_timer.expires = jiffies + (2 * HZ);
-
- mdev->iface.mod = hdm_usb_fops.owner;
- mdev->iface.interface = ITYPE_USB;
- mdev->iface.configure = hdm_configure_channel;
- mdev->iface.request_netinfo = hdm_request_netinfo;
- mdev->iface.enqueue = hdm_enqueue;
- mdev->iface.poison_channel = hdm_poison_channel;
- mdev->iface.description = mdev->description;
- mdev->iface.num_channels = num_endpoints;
-
- snprintf(mdev->description, sizeof(mdev->description),
- "usb_device %d-%s:%d.%d",
- usb_dev->bus->busnum,
- usb_dev->devpath,
- usb_dev->config->desc.bConfigurationValue,
- usb_iface_desc->desc.bInterfaceNumber);
-
- mdev->conf = kcalloc(num_endpoints, sizeof(*mdev->conf), GFP_KERNEL);
- if (!mdev->conf)
- goto exit_free;
-
- mdev->cap = kcalloc(num_endpoints, sizeof(*mdev->cap), GFP_KERNEL);
- if (!mdev->cap)
- goto exit_free1;
-
- mdev->iface.channel_vector = mdev->cap;
- mdev->iface.priv = NULL;
-
- mdev->ep_address =
- kcalloc(num_endpoints, sizeof(*mdev->ep_address), GFP_KERNEL);
- if (!mdev->ep_address)
- goto exit_free2;
-
- mdev->anchor_list =
- kcalloc(num_endpoints, sizeof(*mdev->anchor_list), GFP_KERNEL);
- if (!mdev->anchor_list)
- goto exit_free3;
-
- tmp_cap = mdev->cap;
- for (i = 0; i < num_endpoints; i++) {
- ep_desc = &usb_iface_desc->endpoint[i].desc;
- mdev->ep_address[i] = ep_desc->bEndpointAddress;
- mdev->padding_active[i] = false;
- mdev->is_channel_healthy[i] = true;
-
- snprintf(&mdev->suffix[i][0], MAX_SUFFIX_LEN, "ep%02x",
- mdev->ep_address[i]);
-
- tmp_cap->name_suffix = &mdev->suffix[i][0];
- tmp_cap->buffer_size_packet = MAX_BUF_SIZE;
- tmp_cap->buffer_size_streaming = MAX_BUF_SIZE;
- tmp_cap->num_buffers_packet = BUF_CHAIN_SIZE;
- tmp_cap->num_buffers_streaming = BUF_CHAIN_SIZE;
- tmp_cap->data_type = MOST_CH_CONTROL | MOST_CH_ASYNC |
- MOST_CH_ISOC_AVP | MOST_CH_SYNC;
- if (ep_desc->bEndpointAddress & USB_DIR_IN)
- tmp_cap->direction = MOST_CH_RX;
- else
- tmp_cap->direction = MOST_CH_TX;
- tmp_cap++;
- INIT_LIST_HEAD(&mdev->anchor_list[i]);
- spin_lock_init(&mdev->anchor_list_lock[i]);
- err = drci_wr_reg(usb_dev,
- DRCI_REG_BASE + DRCI_COMMAND +
- ep_desc->bEndpointAddress * 16,
- 1);
- if (err < 0)
- pr_warn("DCI Sync for EP %02x failed",
- ep_desc->bEndpointAddress);
- }
- dev_notice(dev, "claimed gadget: Vendor=%4.4x ProdID=%4.4x Bus=%02x Device=%02x\n",
- le16_to_cpu(usb_dev->descriptor.idVendor),
- le16_to_cpu(usb_dev->descriptor.idProduct),
- usb_dev->bus->busnum,
- usb_dev->devnum);
-
- dev_notice(dev, "device path: /sys/bus/usb/devices/%d-%s:%d.%d\n",
- usb_dev->bus->busnum,
- usb_dev->devpath,
- usb_dev->config->desc.bConfigurationValue,
- usb_iface_desc->desc.bInterfaceNumber);
-
- mdev->parent = most_register_interface(&mdev->iface);
- if (IS_ERR(mdev->parent)) {
- ret = PTR_ERR(mdev->parent);
- goto exit_free4;
- }
-
- mutex_lock(&mdev->io_mutex);
- if (le16_to_cpu(usb_dev->descriptor.idProduct) == USB_DEV_ID_INIC) {
- /* this increments the reference count of the instance
- * object of the core
- */
- mdev->dci = create_most_dci_obj(mdev->parent);
- if (!mdev->dci) {
- mutex_unlock(&mdev->io_mutex);
- most_deregister_interface(&mdev->iface);
- ret = -ENOMEM;
- goto exit_free4;
- }
-
- kobject_uevent(&mdev->dci->kobj, KOBJ_ADD);
- mdev->dci->usb_device = mdev->usb_device;
- }
- mutex_unlock(&mdev->io_mutex);
- return 0;
-
-exit_free4:
- kfree(mdev->anchor_list);
-exit_free3:
- kfree(mdev->ep_address);
-exit_free2:
- kfree(mdev->cap);
-exit_free1:
- kfree(mdev->conf);
-exit_free:
- kfree(mdev);
-exit_ENOMEM:
- if (ret == 0 || ret == -ENOMEM) {
- ret = -ENOMEM;
- dev_err(dev, "out of memory\n");
- }
- return ret;
-}
-
-/**
- * hdm_disconnect - disconnect function of USB device driver
- * @interface: Interface of the attached USB device
- *
- * This deregisters the interface with the core, removes the kernel timer
- * and frees resources.
- *
- * Context: hub kernel thread
- */
-static void hdm_disconnect(struct usb_interface *interface)
-{
- struct most_dev *mdev;
-
- mdev = usb_get_intfdata(interface);
- mutex_lock(&mdev->io_mutex);
- usb_set_intfdata(interface, NULL);
- mdev->usb_device = NULL;
- mutex_unlock(&mdev->io_mutex);
-
- del_timer_sync(&mdev->link_stat_timer);
- cancel_work_sync(&mdev->poll_work_obj);
-
- destroy_most_dci_obj(mdev->dci);
- most_deregister_interface(&mdev->iface);
-
- kfree(mdev->anchor_list);
- kfree(mdev->cap);
- kfree(mdev->conf);
- kfree(mdev->ep_address);
- kfree(mdev);
-}
-
-static struct usb_driver hdm_usb = {
- .name = "hdm_usb",
- .id_table = usbid,
- .probe = hdm_probe,
- .disconnect = hdm_disconnect,
-};
-
-static int __init hdm_usb_init(void)
-{
- pr_info("hdm_usb_init()\n");
- if (usb_register(&hdm_usb)) {
- pr_err("could not register hdm_usb driver\n");
- return -EIO;
- }
- schedule_usb_work = create_workqueue("hdmu_work");
- if (schedule_usb_work == NULL) {
- pr_err("could not create workqueue\n");
- usb_deregister(&hdm_usb);
- return -ENOMEM;
- }
- return 0;
-}
-
-static void __exit hdm_usb_exit(void)
-{
- pr_info("hdm_usb_exit()\n");
- destroy_workqueue(schedule_usb_work);
- usb_deregister(&hdm_usb);
-}
-
-module_init(hdm_usb_init);
-module_exit(hdm_usb_exit);
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Christian Gromm <christian.gromm@microchip.com>");
-MODULE_DESCRIPTION("HDM_4_USB");
diff --git a/drivers/staging/most/mostcore/Kconfig b/drivers/staging/most/mostcore/Kconfig
deleted file mode 100644
index 47172546d728..000000000000
--- a/drivers/staging/most/mostcore/Kconfig
+++ /dev/null
@@ -1,14 +0,0 @@
-#
-# MOSTCore configuration
-#
-
-config MOSTCORE
- tristate "MOST Core"
- depends on HAS_DMA
-
- ---help---
- Say Y here if you want to enable MOST support.
- This device driver needs at least an additional AIM and HDM to work.
-
- To compile this driver as a module, choose M here: the
- module will be called mostcore.
diff --git a/drivers/staging/most/mostcore/Makefile b/drivers/staging/most/mostcore/Makefile
deleted file mode 100644
index a078f01cf7c2..000000000000
--- a/drivers/staging/most/mostcore/Makefile
+++ /dev/null
@@ -1,3 +0,0 @@
-obj-$(CONFIG_MOSTCORE) += mostcore.o
-
-mostcore-objs := core.o
diff --git a/drivers/staging/most/mostcore/core.c b/drivers/staging/most/mostcore/core.c
deleted file mode 100644
index 0045c10413eb..000000000000
--- a/drivers/staging/most/mostcore/core.c
+++ /dev/null
@@ -1,1993 +0,0 @@
-/*
- * core.c - Implementation of core module of MOST Linux driver stack
- *
- * Copyright (C) 2013-2015 Microchip Technology Germany II GmbH & Co. KG
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * This file is licensed under GPLv2.
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-#include <linux/module.h>
-#include <linux/fs.h>
-#include <linux/slab.h>
-#include <linux/init.h>
-#include <linux/device.h>
-#include <linux/list.h>
-#include <linux/poll.h>
-#include <linux/wait.h>
-#include <linux/kobject.h>
-#include <linux/mutex.h>
-#include <linux/completion.h>
-#include <linux/sysfs.h>
-#include <linux/kthread.h>
-#include <linux/dma-mapping.h>
-#include <linux/idr.h>
-#include "mostcore.h"
-
-#define MAX_CHANNELS 64
-#define STRING_SIZE 80
-
-static struct class *most_class;
-static struct device *class_glue_dir;
-static struct ida mdev_id;
-static int modref;
-static int dummy_num_buffers;
-
-struct most_c_aim_obj {
- struct most_aim *ptr;
- int refs;
- int num_buffers;
-};
-
-struct most_c_obj {
- struct kobject kobj;
- struct completion cleanup;
- atomic_t mbo_ref;
- atomic_t mbo_nq_level;
- uint16_t channel_id;
- bool is_poisoned;
- struct mutex start_mutex;
- int is_starving;
- struct most_interface *iface;
- struct most_inst_obj *inst;
- struct most_channel_config cfg;
- bool keep_mbo;
- bool enqueue_halt;
- struct list_head fifo;
- spinlock_t fifo_lock;
- struct list_head halt_fifo;
- struct list_head list;
- struct most_c_aim_obj aim0;
- struct most_c_aim_obj aim1;
- struct list_head trash_fifo;
- struct task_struct *hdm_enqueue_task;
- struct mutex stop_task_mutex;
- wait_queue_head_t hdm_fifo_wq;
-};
-#define to_c_obj(d) container_of(d, struct most_c_obj, kobj)
-
-struct most_inst_obj {
- int dev_id;
- atomic_t tainted;
- struct most_interface *iface;
- struct list_head channel_list;
- struct most_c_obj *channel[MAX_CHANNELS];
- struct kobject kobj;
- struct list_head list;
-};
-#define to_inst_obj(d) container_of(d, struct most_inst_obj, kobj)
-
-/**
- * list_pop_mbo - retrieves the first MBO of the list and removes it
- * @ptr: the list head to grab the MBO from.
- */
-#define list_pop_mbo(ptr) \
-({ \
- struct mbo *_mbo = list_first_entry(ptr, struct mbo, list); \
- list_del(&_mbo->list); \
- _mbo; \
-})
-
-static struct mutex deregister_mutex;
-
-/* ___ ___
- * ___C H A N N E L___
- */
-
-/**
- * struct most_c_attr - to access the attributes of a channel object
- * @attr: attributes of a channel
- * @show: pointer to the show function
- * @store: pointer to the store function
- */
-struct most_c_attr {
- struct attribute attr;
- ssize_t (*show)(struct most_c_obj *d,
- struct most_c_attr *attr,
- char *buf);
- ssize_t (*store)(struct most_c_obj *d,
- struct most_c_attr *attr,
- const char *buf,
- size_t count);
-};
-#define to_channel_attr(a) container_of(a, struct most_c_attr, attr)
-
-#define MOST_CHNL_ATTR(_name, _mode, _show, _store) \
- struct most_c_attr most_chnl_attr_##_name = \
- __ATTR(_name, _mode, _show, _store)
-
-/**
- * channel_attr_show - show function of channel object
- * @kobj: pointer to its kobject
- * @attr: pointer to its attributes
- * @buf: buffer
- */
-static ssize_t channel_attr_show(struct kobject *kobj, struct attribute *attr,
- char *buf)
-{
- struct most_c_attr *channel_attr = to_channel_attr(attr);
- struct most_c_obj *c_obj = to_c_obj(kobj);
-
- if (!channel_attr->show)
- return -EIO;
-
- return channel_attr->show(c_obj, channel_attr, buf);
-}
-
-/**
- * channel_attr_store - store function of channel object
- * @kobj: pointer to its kobject
- * @attr: pointer to its attributes
- * @buf: buffer
- * @len: length of buffer
- */
-static ssize_t channel_attr_store(struct kobject *kobj,
- struct attribute *attr,
- const char *buf,
- size_t len)
-{
- struct most_c_attr *channel_attr = to_channel_attr(attr);
- struct most_c_obj *c_obj = to_c_obj(kobj);
-
- if (!channel_attr->store)
- return -EIO;
- return channel_attr->store(c_obj, channel_attr, buf, len);
-}
-
-static const struct sysfs_ops most_channel_sysfs_ops = {
- .show = channel_attr_show,
- .store = channel_attr_store,
-};
-
-/**
- * most_free_mbo_coherent - free an MBO and its coherent buffer
- * @mbo: buffer to be released
- *
- */
-static void most_free_mbo_coherent(struct mbo *mbo)
-{
- struct most_c_obj *c = mbo->context;
- u16 const coherent_buf_size = c->cfg.buffer_size + c->cfg.extra_len;
-
- dma_free_coherent(NULL, coherent_buf_size, mbo->virt_address,
- mbo->bus_address);
- kfree(mbo);
- if (atomic_sub_and_test(1, &c->mbo_ref))
- complete(&c->cleanup);
-}
-
-/**
- * flush_channel_fifos - clear the channel fifos
- * @c: pointer to channel object
- */
-static void flush_channel_fifos(struct most_c_obj *c)
-{
- unsigned long flags, hf_flags;
- struct mbo *mbo, *tmp;
-
- if (list_empty(&c->fifo) && list_empty(&c->halt_fifo))
- return;
-
- spin_lock_irqsave(&c->fifo_lock, flags);
- list_for_each_entry_safe(mbo, tmp, &c->fifo, list) {
- list_del(&mbo->list);
- spin_unlock_irqrestore(&c->fifo_lock, flags);
- most_free_mbo_coherent(mbo);
- spin_lock_irqsave(&c->fifo_lock, flags);
- }
- spin_unlock_irqrestore(&c->fifo_lock, flags);
-
- spin_lock_irqsave(&c->fifo_lock, hf_flags);
- list_for_each_entry_safe(mbo, tmp, &c->halt_fifo, list) {
- list_del(&mbo->list);
- spin_unlock_irqrestore(&c->fifo_lock, hf_flags);
- most_free_mbo_coherent(mbo);
- spin_lock_irqsave(&c->fifo_lock, hf_flags);
- }
- spin_unlock_irqrestore(&c->fifo_lock, hf_flags);
-
- if (unlikely((!list_empty(&c->fifo) || !list_empty(&c->halt_fifo))))
- pr_info("WARN: fifo | trash fifo not empty\n");
-}
-
-/**
- * flush_trash_fifo - clear the trash fifo
- * @c: pointer to channel object
- */
-static int flush_trash_fifo(struct most_c_obj *c)
-{
- struct mbo *mbo, *tmp;
- unsigned long flags;
-
- spin_lock_irqsave(&c->fifo_lock, flags);
- list_for_each_entry_safe(mbo, tmp, &c->trash_fifo, list) {
- list_del(&mbo->list);
- spin_unlock_irqrestore(&c->fifo_lock, flags);
- most_free_mbo_coherent(mbo);
- spin_lock_irqsave(&c->fifo_lock, flags);
- }
- spin_unlock_irqrestore(&c->fifo_lock, flags);
- return 0;
-}
-
-/**
- * most_channel_release - release function of channel object
- * @kobj: pointer to channel's kobject
- */
-static void most_channel_release(struct kobject *kobj)
-{
- struct most_c_obj *c = to_c_obj(kobj);
-
- kfree(c);
-}
-
-static ssize_t show_available_directions(struct most_c_obj *c,
- struct most_c_attr *attr,
- char *buf)
-{
- unsigned int i = c->channel_id;
-
- strcpy(buf, "");
- if (c->iface->channel_vector[i].direction & MOST_CH_RX)
- strcat(buf, "dir_rx ");
- if (c->iface->channel_vector[i].direction & MOST_CH_TX)
- strcat(buf, "dir_tx ");
- strcat(buf, "\n");
- return strlen(buf) + 1;
-}
-
-static ssize_t show_available_datatypes(struct most_c_obj *c,
- struct most_c_attr *attr,
- char *buf)
-{
- unsigned int i = c->channel_id;
-
- strcpy(buf, "");
- if (c->iface->channel_vector[i].data_type & MOST_CH_CONTROL)
- strcat(buf, "control ");
- if (c->iface->channel_vector[i].data_type & MOST_CH_ASYNC)
- strcat(buf, "async ");
- if (c->iface->channel_vector[i].data_type & MOST_CH_SYNC)
- strcat(buf, "sync ");
- if (c->iface->channel_vector[i].data_type & MOST_CH_ISOC_AVP)
- strcat(buf, "isoc_avp ");
- strcat(buf, "\n");
- return strlen(buf) + 1;
-}
-
-static
-ssize_t show_number_of_packet_buffers(struct most_c_obj *c,
- struct most_c_attr *attr,
- char *buf)
-{
- unsigned int i = c->channel_id;
-
- return snprintf(buf, PAGE_SIZE, "%d\n",
- c->iface->channel_vector[i].num_buffers_packet);
-}
-
-static
-ssize_t show_number_of_stream_buffers(struct most_c_obj *c,
- struct most_c_attr *attr,
- char *buf)
-{
- unsigned int i = c->channel_id;
-
- return snprintf(buf, PAGE_SIZE, "%d\n",
- c->iface->channel_vector[i].num_buffers_streaming);
-}
-
-static
-ssize_t show_size_of_packet_buffer(struct most_c_obj *c,
- struct most_c_attr *attr,
- char *buf)
-{
- unsigned int i = c->channel_id;
-
- return snprintf(buf, PAGE_SIZE, "%d\n",
- c->iface->channel_vector[i].buffer_size_packet);
-}
-
-static
-ssize_t show_size_of_stream_buffer(struct most_c_obj *c,
- struct most_c_attr *attr,
- char *buf)
-{
- unsigned int i = c->channel_id;
-
- return snprintf(buf, PAGE_SIZE, "%d\n",
- c->iface->channel_vector[i].buffer_size_streaming);
-}
-
-static ssize_t show_channel_starving(struct most_c_obj *c,
- struct most_c_attr *attr,
- char *buf)
-{
- return snprintf(buf, PAGE_SIZE, "%d\n", c->is_starving);
-}
-
-
-#define create_show_channel_attribute(val) \
- static MOST_CHNL_ATTR(val, S_IRUGO, show_##val, NULL)
-
-create_show_channel_attribute(available_directions);
-create_show_channel_attribute(available_datatypes);
-create_show_channel_attribute(number_of_packet_buffers);
-create_show_channel_attribute(number_of_stream_buffers);
-create_show_channel_attribute(size_of_stream_buffer);
-create_show_channel_attribute(size_of_packet_buffer);
-create_show_channel_attribute(channel_starving);
-
-static ssize_t show_set_number_of_buffers(struct most_c_obj *c,
- struct most_c_attr *attr,
- char *buf)
-{
- return snprintf(buf, PAGE_SIZE, "%d\n", c->cfg.num_buffers);
-}
-
-static ssize_t store_set_number_of_buffers(struct most_c_obj *c,
- struct most_c_attr *attr,
- const char *buf,
- size_t count)
-{
- int ret = kstrtou16(buf, 0, &c->cfg.num_buffers);
-
- if (ret)
- return ret;
- return count;
-}
-
-static ssize_t show_set_buffer_size(struct most_c_obj *c,
- struct most_c_attr *attr,
- char *buf)
-{
- return snprintf(buf, PAGE_SIZE, "%d\n", c->cfg.buffer_size);
-}
-
-static ssize_t store_set_buffer_size(struct most_c_obj *c,
- struct most_c_attr *attr,
- const char *buf,
- size_t count)
-{
- int ret = kstrtou16(buf, 0, &c->cfg.buffer_size);
-
- if (ret)
- return ret;
- return count;
-}
-
-static ssize_t show_set_direction(struct most_c_obj *c,
- struct most_c_attr *attr,
- char *buf)
-{
- if (c->cfg.direction & MOST_CH_TX)
- return snprintf(buf, PAGE_SIZE, "dir_tx\n");
- else if (c->cfg.direction & MOST_CH_RX)
- return snprintf(buf, PAGE_SIZE, "dir_rx\n");
- return snprintf(buf, PAGE_SIZE, "unconfigured\n");
-}
-
-static ssize_t store_set_direction(struct most_c_obj *c,
- struct most_c_attr *attr,
- const char *buf,
- size_t count)
-{
- if (!strcmp(buf, "dir_rx\n"))
- c->cfg.direction = MOST_CH_RX;
- else if (!strcmp(buf, "dir_tx\n"))
- c->cfg.direction = MOST_CH_TX;
- else {
- pr_info("WARN: invalid attribute settings\n");
- return -EINVAL;
- }
- return count;
-}
-
-static ssize_t show_set_datatype(struct most_c_obj *c,
- struct most_c_attr *attr,
- char *buf)
-{
- if (c->cfg.data_type & MOST_CH_CONTROL)
- return snprintf(buf, PAGE_SIZE, "control\n");
- else if (c->cfg.data_type & MOST_CH_ASYNC)
- return snprintf(buf, PAGE_SIZE, "async\n");
- else if (c->cfg.data_type & MOST_CH_SYNC)
- return snprintf(buf, PAGE_SIZE, "sync\n");
- else if (c->cfg.data_type & MOST_CH_ISOC_AVP)
- return snprintf(buf, PAGE_SIZE, "isoc_avp\n");
- return snprintf(buf, PAGE_SIZE, "unconfigured\n");
-}
-
-static ssize_t store_set_datatype(struct most_c_obj *c,
- struct most_c_attr *attr,
- const char *buf,
- size_t count)
-{
- if (!strcmp(buf, "control\n"))
- c->cfg.data_type = MOST_CH_CONTROL;
- else if (!strcmp(buf, "async\n"))
- c->cfg.data_type = MOST_CH_ASYNC;
- else if (!strcmp(buf, "sync\n"))
- c->cfg.data_type = MOST_CH_SYNC;
- else if (!strcmp(buf, "isoc_avp\n"))
- c->cfg.data_type = MOST_CH_ISOC_AVP;
- else {
- pr_info("WARN: invalid attribute settings\n");
- return -EINVAL;
- }
- return count;
-}
-
-static ssize_t show_set_subbuffer_size(struct most_c_obj *c,
- struct most_c_attr *attr,
- char *buf)
-{
- return snprintf(buf, PAGE_SIZE, "%d\n", c->cfg.subbuffer_size);
-}
-
-static ssize_t store_set_subbuffer_size(struct most_c_obj *c,
- struct most_c_attr *attr,
- const char *buf,
- size_t count)
-{
- int ret = kstrtou16(buf, 0, &c->cfg.subbuffer_size);
-
- if (ret)
- return ret;
- return count;
-}
-
-static ssize_t show_set_packets_per_xact(struct most_c_obj *c,
- struct most_c_attr *attr,
- char *buf)
-{
- return snprintf(buf, PAGE_SIZE, "%d\n", c->cfg.packets_per_xact);
-}
-
-static ssize_t store_set_packets_per_xact(struct most_c_obj *c,
- struct most_c_attr *attr,
- const char *buf,
- size_t count)
-{
- int ret = kstrtou16(buf, 0, &c->cfg.packets_per_xact);
-
- if (ret)
- return ret;
- return count;
-}
-
-#define create_channel_attribute(value) \
- static MOST_CHNL_ATTR(value, S_IRUGO | S_IWUSR, \
- show_##value, \
- store_##value)
-
-create_channel_attribute(set_buffer_size);
-create_channel_attribute(set_number_of_buffers);
-create_channel_attribute(set_direction);
-create_channel_attribute(set_datatype);
-create_channel_attribute(set_subbuffer_size);
-create_channel_attribute(set_packets_per_xact);
-
-
-/**
- * most_channel_def_attrs - array of default attributes of channel object
- */
-static struct attribute *most_channel_def_attrs[] = {
- &most_chnl_attr_available_directions.attr,
- &most_chnl_attr_available_datatypes.attr,
- &most_chnl_attr_number_of_packet_buffers.attr,
- &most_chnl_attr_number_of_stream_buffers.attr,
- &most_chnl_attr_size_of_packet_buffer.attr,
- &most_chnl_attr_size_of_stream_buffer.attr,
- &most_chnl_attr_set_number_of_buffers.attr,
- &most_chnl_attr_set_buffer_size.attr,
- &most_chnl_attr_set_direction.attr,
- &most_chnl_attr_set_datatype.attr,
- &most_chnl_attr_set_subbuffer_size.attr,
- &most_chnl_attr_set_packets_per_xact.attr,
- &most_chnl_attr_channel_starving.attr,
- NULL,
-};
-
-static struct kobj_type most_channel_ktype = {
- .sysfs_ops = &most_channel_sysfs_ops,
- .release = most_channel_release,
- .default_attrs = most_channel_def_attrs,
-};
-
-static struct kset *most_channel_kset;
-
-/**
- * create_most_c_obj - allocates a channel object
- * @name: name of the channel object
- * @parent: parent kobject
- *
- * This create a channel object and registers it with sysfs.
- * Returns a pointer to the object or NULL when something went wrong.
- */
-static struct most_c_obj *
-create_most_c_obj(const char *name, struct kobject *parent)
-{
- struct most_c_obj *c;
- int retval;
-
- c = kzalloc(sizeof(*c), GFP_KERNEL);
- if (!c)
- return NULL;
- c->kobj.kset = most_channel_kset;
- retval = kobject_init_and_add(&c->kobj, &most_channel_ktype, parent,
- "%s", name);
- if (retval) {
- kobject_put(&c->kobj);
- return NULL;
- }
- kobject_uevent(&c->kobj, KOBJ_ADD);
- return c;
-}
-
-/**
- * destroy_most_c_obj - channel release function
- * @c: pointer to channel object
- *
- * This decrements the reference counter of the channel object.
- * If the reference count turns zero, its release function is called.
- */
-static void destroy_most_c_obj(struct most_c_obj *c)
-{
- if (c->aim0.ptr)
- c->aim0.ptr->disconnect_channel(c->iface, c->channel_id);
- if (c->aim1.ptr)
- c->aim1.ptr->disconnect_channel(c->iface, c->channel_id);
- c->aim0.ptr = NULL;
- c->aim1.ptr = NULL;
-
- mutex_lock(&deregister_mutex);
- flush_trash_fifo(c);
- flush_channel_fifos(c);
- mutex_unlock(&deregister_mutex);
- kobject_put(&c->kobj);
-}
-
-/* ___ ___
- * ___I N S T A N C E___
- */
-#define MOST_INST_ATTR(_name, _mode, _show, _store) \
- struct most_inst_attribute most_inst_attr_##_name = \
- __ATTR(_name, _mode, _show, _store)
-
-static struct list_head instance_list;
-
-/**
- * struct most_inst_attribute - to access the attributes of instance object
- * @attr: attributes of an instance
- * @show: pointer to the show function
- * @store: pointer to the store function
- */
-struct most_inst_attribute {
- struct attribute attr;
- ssize_t (*show)(struct most_inst_obj *d,
- struct most_inst_attribute *attr,
- char *buf);
- ssize_t (*store)(struct most_inst_obj *d,
- struct most_inst_attribute *attr,
- const char *buf,
- size_t count);
-};
-#define to_instance_attr(a) \
- container_of(a, struct most_inst_attribute, attr)
-
-/**
- * instance_attr_show - show function for an instance object
- * @kobj: pointer to kobject
- * @attr: pointer to attribute struct
- * @buf: buffer
- */
-static ssize_t instance_attr_show(struct kobject *kobj,
- struct attribute *attr,
- char *buf)
-{
- struct most_inst_attribute *instance_attr;
- struct most_inst_obj *instance_obj;
-
- instance_attr = to_instance_attr(attr);
- instance_obj = to_inst_obj(kobj);
-
- if (!instance_attr->show)
- return -EIO;
-
- return instance_attr->show(instance_obj, instance_attr, buf);
-}
-
-/**
- * instance_attr_store - store function for an instance object
- * @kobj: pointer to kobject
- * @attr: pointer to attribute struct
- * @buf: buffer
- * @len: length of buffer
- */
-static ssize_t instance_attr_store(struct kobject *kobj,
- struct attribute *attr,
- const char *buf,
- size_t len)
-{
- struct most_inst_attribute *instance_attr;
- struct most_inst_obj *instance_obj;
-
- instance_attr = to_instance_attr(attr);
- instance_obj = to_inst_obj(kobj);
-
- if (!instance_attr->store)
- return -EIO;
-
- return instance_attr->store(instance_obj, instance_attr, buf, len);
-}
-
-static const struct sysfs_ops most_inst_sysfs_ops = {
- .show = instance_attr_show,
- .store = instance_attr_store,
-};
-
-/**
- * most_inst_release - release function for instance object
- * @kobj: pointer to instance's kobject
- *
- * This frees the allocated memory for the instance object
- */
-static void most_inst_release(struct kobject *kobj)
-{
- struct most_inst_obj *inst = to_inst_obj(kobj);
-
- kfree(inst);
-}
-
-static ssize_t show_description(struct most_inst_obj *instance_obj,
- struct most_inst_attribute *attr,
- char *buf)
-{
- return snprintf(buf, PAGE_SIZE, "%s\n",
- instance_obj->iface->description);
-}
-
-static ssize_t show_interface(struct most_inst_obj *instance_obj,
- struct most_inst_attribute *attr,
- char *buf)
-{
- switch (instance_obj->iface->interface) {
- case ITYPE_LOOPBACK:
- return snprintf(buf, PAGE_SIZE, "loopback\n");
- case ITYPE_I2C:
- return snprintf(buf, PAGE_SIZE, "i2c\n");
- case ITYPE_I2S:
- return snprintf(buf, PAGE_SIZE, "i2s\n");
- case ITYPE_TSI:
- return snprintf(buf, PAGE_SIZE, "tsi\n");
- case ITYPE_HBI:
- return snprintf(buf, PAGE_SIZE, "hbi\n");
- case ITYPE_MEDIALB_DIM:
- return snprintf(buf, PAGE_SIZE, "mlb_dim\n");
- case ITYPE_MEDIALB_DIM2:
- return snprintf(buf, PAGE_SIZE, "mlb_dim2\n");
- case ITYPE_USB:
- return snprintf(buf, PAGE_SIZE, "usb\n");
- case ITYPE_PCIE:
- return snprintf(buf, PAGE_SIZE, "pcie\n");
- }
- return snprintf(buf, PAGE_SIZE, "unknown\n");
-}
-
-#define create_inst_attribute(value) \
- static MOST_INST_ATTR(value, S_IRUGO, show_##value, NULL)
-
-create_inst_attribute(description);
-create_inst_attribute(interface);
-
-static struct attribute *most_inst_def_attrs[] = {
- &most_inst_attr_description.attr,
- &most_inst_attr_interface.attr,
- NULL,
-};
-
-static struct kobj_type most_inst_ktype = {
- .sysfs_ops = &most_inst_sysfs_ops,
- .release = most_inst_release,
- .default_attrs = most_inst_def_attrs,
-};
-
-static struct kset *most_inst_kset;
-
-
-/**
- * create_most_inst_obj - creates an instance object
- * @name: name of the object to be created
- *
- * This allocates memory for an instance structure, assigns the proper kset
- * and registers it with sysfs.
- *
- * Returns a pointer to the instance object or NULL when something went wrong.
- */
-static struct most_inst_obj *create_most_inst_obj(const char *name)
-{
- struct most_inst_obj *inst;
- int retval;
-
- inst = kzalloc(sizeof(*inst), GFP_KERNEL);
- if (!inst)
- return NULL;
- inst->kobj.kset = most_inst_kset;
- retval = kobject_init_and_add(&inst->kobj, &most_inst_ktype, NULL,
- "%s", name);
- if (retval) {
- kobject_put(&inst->kobj);
- return NULL;
- }
- kobject_uevent(&inst->kobj, KOBJ_ADD);
- return inst;
-}
-
-/**
- * destroy_most_inst_obj - MOST instance release function
- * @inst: pointer to the instance object
- *
- * This decrements the reference counter of the instance object.
- * If the reference count turns zero, its release function is called
- */
-static void destroy_most_inst_obj(struct most_inst_obj *inst)
-{
- struct most_c_obj *c, *tmp;
-
- /* need to destroy channels first, since
- * each channel incremented the
- * reference count of the inst->kobj
- */
- list_for_each_entry_safe(c, tmp, &inst->channel_list, list) {
- destroy_most_c_obj(c);
- }
- kobject_put(&inst->kobj);
-}
-
-/* ___ ___
- * ___A I M___
- */
-struct most_aim_obj {
- struct kobject kobj;
- struct list_head list;
- struct most_aim *driver;
- char add_link[STRING_SIZE];
- char remove_link[STRING_SIZE];
-};
-#define to_aim_obj(d) container_of(d, struct most_aim_obj, kobj)
-
-static struct list_head aim_list;
-
-
-/**
- * struct most_aim_attribute - to access the attributes of AIM object
- * @attr: attributes of an AIM
- * @show: pointer to the show function
- * @store: pointer to the store function
- */
-struct most_aim_attribute {
- struct attribute attr;
- ssize_t (*show)(struct most_aim_obj *d,
- struct most_aim_attribute *attr,
- char *buf);
- ssize_t (*store)(struct most_aim_obj *d,
- struct most_aim_attribute *attr,
- const char *buf,
- size_t count);
-};
-#define to_aim_attr(a) container_of(a, struct most_aim_attribute, attr)
-
-/**
- * aim_attr_show - show function of an AIM object
- * @kobj: pointer to kobject
- * @attr: pointer to attribute struct
- * @buf: buffer
- */
-static ssize_t aim_attr_show(struct kobject *kobj,
- struct attribute *attr,
- char *buf)
-{
- struct most_aim_attribute *aim_attr;
- struct most_aim_obj *aim_obj;
-
- aim_attr = to_aim_attr(attr);
- aim_obj = to_aim_obj(kobj);
-
- if (!aim_attr->show)
- return -EIO;
-
- return aim_attr->show(aim_obj, aim_attr, buf);
-}
-
-/**
- * aim_attr_store - store function of an AIM object
- * @kobj: pointer to kobject
- * @attr: pointer to attribute struct
- * @buf: buffer
- * @len: length of buffer
- */
-static ssize_t aim_attr_store(struct kobject *kobj,
- struct attribute *attr,
- const char *buf,
- size_t len)
-{
- struct most_aim_attribute *aim_attr;
- struct most_aim_obj *aim_obj;
-
- aim_attr = to_aim_attr(attr);
- aim_obj = to_aim_obj(kobj);
-
- if (!aim_attr->store)
- return -EIO;
- return aim_attr->store(aim_obj, aim_attr, buf, len);
-}
-
-static const struct sysfs_ops most_aim_sysfs_ops = {
- .show = aim_attr_show,
- .store = aim_attr_store,
-};
-
-/**
- * most_aim_release - AIM release function
- * @kobj: pointer to AIM's kobject
- */
-static void most_aim_release(struct kobject *kobj)
-{
- struct most_aim_obj *aim_obj = to_aim_obj(kobj);
-
- kfree(aim_obj);
-}
-
-static ssize_t show_add_link(struct most_aim_obj *aim_obj,
- struct most_aim_attribute *attr,
- char *buf)
-{
- return snprintf(buf, PAGE_SIZE, "%s\n", aim_obj->add_link);
-}
-
-/**
- * split_string - parses and changes string in the buffer buf and
- * splits it into two mandatory and one optional substrings.
- *
- * @buf: complete string from attribute 'add_channel'
- * @a: address of pointer to 1st substring (=instance name)
- * @b: address of pointer to 2nd substring (=channel name)
- * @c: optional address of pointer to 3rd substring (=user defined name)
- *
- * Examples:
- *
- * Input: "mdev0:ch0@ep_81:my_channel\n" or
- * "mdev0:ch0@ep_81:my_channel"
- *
- * Output: *a -> "mdev0", *b -> "ch0@ep_81", *c -> "my_channel"
- *
- * Input: "mdev0:ch0@ep_81\n"
- * Output: *a -> "mdev0", *b -> "ch0@ep_81", *c -> ""
- *
- * Input: "mdev0:ch0@ep_81"
- * Output: *a -> "mdev0", *b -> "ch0@ep_81", *c == NULL
- */
-static int split_string(char *buf, char **a, char **b, char **c)
-{
- *a = strsep(&buf, ":");
- if (!*a)
- return -EIO;
-
- *b = strsep(&buf, ":\n");
- if (!*b)
- return -EIO;
-
- if (c)
- *c = strsep(&buf, ":\n");
-
- return 0;
-}
-
-/**
- * get_channel_by_name - get pointer to channel object
- * @mdev: name of the device instance
- * @mdev_ch: name of the respective channel
- *
- * This retrieves the pointer to a channel object.
- */
-static struct
-most_c_obj *get_channel_by_name(char *mdev, char *mdev_ch)
-{
- struct most_c_obj *c, *tmp;
- struct most_inst_obj *i, *i_tmp;
- int found = 0;
-
- list_for_each_entry_safe(i, i_tmp, &instance_list, list) {
- if (!strcmp(kobject_name(&i->kobj), mdev)) {
- found++;
- break;
- }
- }
- if (unlikely(!found))
- return ERR_PTR(-EIO);
-
- list_for_each_entry_safe(c, tmp, &i->channel_list, list) {
- if (!strcmp(kobject_name(&c->kobj), mdev_ch)) {
- found++;
- break;
- }
- }
- if (unlikely(2 > found))
- return ERR_PTR(-EIO);
- return c;
-}
-
-/**
- * store_add_link - store() function for add_link attribute
- * @aim_obj: pointer to AIM object
- * @attr: its attributes
- * @buf: buffer
- * @len: buffer length
- *
- * This parses the string given by buf and splits it into
- * three substrings. Note: third substring is optional. In case a cdev
- * AIM is loaded the optional 3rd substring will make up the name of
- * device node in the /dev directory. If omitted, the device node will
- * inherit the channel's name within sysfs.
- *
- * Searches for a pair of device and channel and probes the AIM
- *
- * Example:
- * (1) echo -n -e "mdev0:ch0@ep_81:my_rxchannel\n" >add_link
- * (2) echo -n -e "mdev0:ch0@ep_81\n" >add_link
- *
- * (1) would create the device node /dev/my_rxchannel
- * (2) would create the device node /dev/mdev0-ch0@ep_81
- */
-static ssize_t store_add_link(struct most_aim_obj *aim_obj,
- struct most_aim_attribute *attr,
- const char *buf,
- size_t len)
-{
- struct most_c_obj *c;
- struct most_aim **aim_ptr;
- char buffer[STRING_SIZE];
- char *mdev;
- char *mdev_ch;
- char *mdev_devnod;
- char devnod_buf[STRING_SIZE];
- int ret;
- size_t max_len = min_t(size_t, len + 1, STRING_SIZE);
-
- strlcpy(buffer, buf, max_len);
- strlcpy(aim_obj->add_link, buf, max_len);
-
- ret = split_string(buffer, &mdev, &mdev_ch, &mdev_devnod);
- if (ret)
- return ret;
-
- if (!mdev_devnod || *mdev_devnod == 0) {
- snprintf(devnod_buf, sizeof(devnod_buf), "%s-%s", mdev, mdev_ch);
- mdev_devnod = devnod_buf;
- }
-
- c = get_channel_by_name(mdev, mdev_ch);
- if (IS_ERR(c))
- return -ENODEV;
-
- if (!c->aim0.ptr)
- aim_ptr = &c->aim0.ptr;
- else if (!c->aim1.ptr)
- aim_ptr = &c->aim1.ptr;
- else
- return -ENOSPC;
-
- ret = aim_obj->driver->probe_channel(c->iface, c->channel_id,
- &c->cfg, &c->kobj, mdev_devnod);
- if (ret)
- return ret;
- *aim_ptr = aim_obj->driver;
- return len;
-}
-
-static struct most_aim_attribute most_aim_attr_add_link =
- __ATTR(add_link, S_IRUGO | S_IWUSR, show_add_link, store_add_link);
-
-static ssize_t show_remove_link(struct most_aim_obj *aim_obj,
- struct most_aim_attribute *attr,
- char *buf)
-{
- return snprintf(buf, PAGE_SIZE, "%s\n", aim_obj->remove_link);
-}
-
-/**
- * store_remove_link - store function for remove_link attribute
- * @aim_obj: pointer to AIM object
- * @attr: its attributes
- * @buf: buffer
- * @len: buffer length
- *
- * Example:
- * echo -n -e "mdev0:ch0@ep_81\n" >remove_link
- */
-static ssize_t store_remove_link(struct most_aim_obj *aim_obj,
- struct most_aim_attribute *attr,
- const char *buf,
- size_t len)
-{
- struct most_c_obj *c;
- char buffer[STRING_SIZE];
- char *mdev;
- char *mdev_ch;
- int ret;
- size_t max_len = min_t(size_t, len + 1, STRING_SIZE);
-
- strlcpy(buffer, buf, max_len);
- strlcpy(aim_obj->remove_link, buf, max_len);
- ret = split_string(buffer, &mdev, &mdev_ch, NULL);
- if (ret)
- return ret;
-
- c = get_channel_by_name(mdev, mdev_ch);
- if (IS_ERR(c))
- return -ENODEV;
-
- if (c->aim0.ptr == aim_obj->driver)
- c->aim0.ptr = NULL;
- if (c->aim1.ptr == aim_obj->driver)
- c->aim1.ptr = NULL;
- if (aim_obj->driver->disconnect_channel(c->iface, c->channel_id))
- return -EIO;
- return len;
-}
-
-static struct most_aim_attribute most_aim_attr_remove_link =
- __ATTR(remove_link, S_IRUGO | S_IWUSR, show_remove_link, store_remove_link);
-
-static struct attribute *most_aim_def_attrs[] = {
- &most_aim_attr_add_link.attr,
- &most_aim_attr_remove_link.attr,
- NULL,
-};
-
-static struct kobj_type most_aim_ktype = {
- .sysfs_ops = &most_aim_sysfs_ops,
- .release = most_aim_release,
- .default_attrs = most_aim_def_attrs,
-};
-
-static struct kset *most_aim_kset;
-
-/**
- * create_most_aim_obj - creates an AIM object
- * @name: name of the AIM
- *
- * This creates an AIM object assigns the proper kset and registers
- * it with sysfs.
- * Returns a pointer to the object or NULL if something went wrong.
- */
-static struct most_aim_obj *create_most_aim_obj(const char *name)
-{
- struct most_aim_obj *most_aim;
- int retval;
-
- most_aim = kzalloc(sizeof(*most_aim), GFP_KERNEL);
- if (!most_aim)
- return NULL;
- most_aim->kobj.kset = most_aim_kset;
- retval = kobject_init_and_add(&most_aim->kobj, &most_aim_ktype,
- NULL, "%s", name);
- if (retval) {
- kobject_put(&most_aim->kobj);
- return NULL;
- }
- kobject_uevent(&most_aim->kobj, KOBJ_ADD);
- return most_aim;
-}
-
-/**
- * destroy_most_aim_obj - AIM release function
- * @p: pointer to AIM object
- *
- * This decrements the reference counter of the AIM object. If the
- * reference count turns zero, its release function will be called.
- */
-static void destroy_most_aim_obj(struct most_aim_obj *p)
-{
- kobject_put(&p->kobj);
-}
-
-
-/* ___ ___
- * ___C O R E___
- */
-
-/**
- * Instantiation of the MOST bus
- */
-static struct bus_type most_bus = {
- .name = "most",
-};
-
-/**
- * Instantiation of the core driver
- */
-static struct device_driver mostcore = {
- .name = "mostcore",
- .bus = &most_bus,
-};
-
-static inline void trash_mbo(struct mbo *mbo)
-{
- unsigned long flags;
- struct most_c_obj *c = mbo->context;
-
- spin_lock_irqsave(&c->fifo_lock, flags);
- list_add(&mbo->list, &c->trash_fifo);
- spin_unlock_irqrestore(&c->fifo_lock, flags);
-}
-
-static struct mbo *get_hdm_mbo(struct most_c_obj *c)
-{
- unsigned long flags;
- struct mbo *mbo;
-
- spin_lock_irqsave(&c->fifo_lock, flags);
- if (c->enqueue_halt || list_empty(&c->halt_fifo))
- mbo = NULL;
- else
- mbo = list_pop_mbo(&c->halt_fifo);
- spin_unlock_irqrestore(&c->fifo_lock, flags);
- return mbo;
-}
-
-static void nq_hdm_mbo(struct mbo *mbo)
-{
- unsigned long flags;
- struct most_c_obj *c = mbo->context;
-
- spin_lock_irqsave(&c->fifo_lock, flags);
- list_add_tail(&mbo->list, &c->halt_fifo);
- spin_unlock_irqrestore(&c->fifo_lock, flags);
- wake_up_interruptible(&c->hdm_fifo_wq);
-}
-
-static int hdm_enqueue_thread(void *data)
-{
- struct most_c_obj *c = data;
- struct mbo *mbo;
- typeof(c->iface->enqueue) enqueue = c->iface->enqueue;
-
- while (likely(!kthread_should_stop())) {
- wait_event_interruptible(c->hdm_fifo_wq,
- (mbo = get_hdm_mbo(c))
- || kthread_should_stop());
-
- if (unlikely(!mbo))
- continue;
-
- if (c->cfg.direction == MOST_CH_RX)
- mbo->buffer_length = c->cfg.buffer_size;
-
- if (unlikely(enqueue(mbo->ifp, mbo->hdm_channel_id, mbo))) {
- pr_err("hdm enqueue failed\n");
- nq_hdm_mbo(mbo);
- c->hdm_enqueue_task = NULL;
- return 0;
- }
- }
-
- return 0;
-}
-
-static int run_enqueue_thread(struct most_c_obj *c, int channel_id)
-{
- struct task_struct *task =
- kthread_run(&hdm_enqueue_thread, c, "hdm_fifo_%d", channel_id);
-
- if (IS_ERR(task))
- return PTR_ERR(task);
-
- c->hdm_enqueue_task = task;
- return 0;
-}
-
-/**
- * arm_mbo - recycle MBO for further usage
- * @mbo: buffer object
- *
- * This puts an MBO back to the list to have it ready for up coming
- * tx transactions.
- *
- * In case the MBO belongs to a channel that recently has been
- * poisoned, the MBO is scheduled to be trashed.
- * Calls the completion handler of an attached AIM.
- */
-static void arm_mbo(struct mbo *mbo)
-{
- unsigned long flags;
- struct most_c_obj *c;
-
- BUG_ON((!mbo) || (!mbo->context));
- c = mbo->context;
-
- if (c->is_poisoned) {
- trash_mbo(mbo);
- return;
- }
-
- spin_lock_irqsave(&c->fifo_lock, flags);
- ++*mbo->num_buffers_ptr;
- list_add_tail(&mbo->list, &c->fifo);
- spin_unlock_irqrestore(&c->fifo_lock, flags);
-
- if (c->aim0.refs && c->aim0.ptr->tx_completion)
- c->aim0.ptr->tx_completion(c->iface, c->channel_id);
-
- if (c->aim1.refs && c->aim1.ptr->tx_completion)
- c->aim1.ptr->tx_completion(c->iface, c->channel_id);
-}
-
-/**
- * arm_mbo_chain - helper function that arms an MBO chain for the HDM
- * @c: pointer to interface channel
- * @dir: direction of the channel
- * @compl: pointer to completion function
- *
- * This allocates buffer objects including the containing DMA coherent
- * buffer and puts them in the fifo.
- * Buffers of Rx channels are put in the kthread fifo, hence immediately
- * submitted to the HDM.
- *
- * Returns the number of allocated and enqueued MBOs.
- */
-static int arm_mbo_chain(struct most_c_obj *c, int dir,
- void (*compl)(struct mbo *))
-{
- unsigned int i;
- int retval;
- struct mbo *mbo;
- u32 coherent_buf_size = c->cfg.buffer_size + c->cfg.extra_len;
-
- atomic_set(&c->mbo_nq_level, 0);
-
- for (i = 0; i < c->cfg.num_buffers; i++) {
- mbo = kzalloc(sizeof(*mbo), GFP_KERNEL);
- if (!mbo) {
- pr_info("WARN: Allocation of MBO failed.\n");
- retval = i;
- goto _exit;
- }
- mbo->context = c;
- mbo->ifp = c->iface;
- mbo->hdm_channel_id = c->channel_id;
- mbo->virt_address = dma_alloc_coherent(NULL,
- coherent_buf_size,
- &mbo->bus_address,
- GFP_KERNEL);
- if (!mbo->virt_address) {
- pr_info("WARN: No DMA coherent buffer.\n");
- retval = i;
- goto _error1;
- }
- mbo->complete = compl;
- mbo->num_buffers_ptr = &dummy_num_buffers;
- if (dir == MOST_CH_RX) {
- nq_hdm_mbo(mbo);
- atomic_inc(&c->mbo_nq_level);
- } else {
- arm_mbo(mbo);
- }
- }
- return i;
-
-_error1:
- kfree(mbo);
-_exit:
- return retval;
-}
-
-/**
- * most_submit_mbo - submits an MBO to fifo
- * @mbo: pointer to the MBO
- *
- */
-int most_submit_mbo(struct mbo *mbo)
-{
- struct most_c_obj *c;
- struct most_inst_obj *i;
-
- if (unlikely((!mbo) || (!mbo->context))) {
- pr_err("Bad MBO or missing channel reference\n");
- return -EINVAL;
- }
- c = mbo->context;
- i = c->inst;
-
- if (unlikely(atomic_read(&i->tainted)))
- return -ENODEV;
-
- nq_hdm_mbo(mbo);
- return 0;
-}
-EXPORT_SYMBOL_GPL(most_submit_mbo);
-
-/**
- * most_write_completion - write completion handler
- * @mbo: pointer to MBO
- *
- * This recycles the MBO for further usage. In case the channel has been
- * poisoned, the MBO is scheduled to be trashed.
- */
-static void most_write_completion(struct mbo *mbo)
-{
- struct most_c_obj *c;
-
- BUG_ON((!mbo) || (!mbo->context));
-
- c = mbo->context;
- if (mbo->status == MBO_E_INVAL)
- pr_info("WARN: Tx MBO status: invalid\n");
- if (unlikely(c->is_poisoned || (mbo->status == MBO_E_CLOSE)))
- trash_mbo(mbo);
- else
- arm_mbo(mbo);
-}
-
-/**
- * get_channel_by_iface - get pointer to channel object
- * @iface: pointer to interface instance
- * @id: channel ID
- *
- * This retrieves a pointer to a channel of the given interface and channel ID.
- */
-static struct
-most_c_obj *get_channel_by_iface(struct most_interface *iface, int id)
-{
- struct most_inst_obj *i;
-
- if (unlikely(!iface)) {
- pr_err("Bad interface\n");
- return NULL;
- }
- if (unlikely((id < 0) || (id >= iface->num_channels))) {
- pr_err("Channel index (%d) out of range\n", id);
- return NULL;
- }
- i = iface->priv;
- if (unlikely(!i)) {
- pr_err("interface is not registered\n");
- return NULL;
- }
- return i->channel[id];
-}
-
-int channel_has_mbo(struct most_interface *iface, int id)
-{
- struct most_c_obj *c = get_channel_by_iface(iface, id);
- unsigned long flags;
- int empty;
-
- if (unlikely(!c))
- return -EINVAL;
-
- spin_lock_irqsave(&c->fifo_lock, flags);
- empty = list_empty(&c->fifo);
- spin_unlock_irqrestore(&c->fifo_lock, flags);
- return !empty;
-}
-EXPORT_SYMBOL_GPL(channel_has_mbo);
-
-/**
- * most_get_mbo - get pointer to an MBO of pool
- * @iface: pointer to interface instance
- * @id: channel ID
- *
- * This attempts to get a free buffer out of the channel fifo.
- * Returns a pointer to MBO on success or NULL otherwise.
- */
-struct mbo *most_get_mbo(struct most_interface *iface, int id,
- struct most_aim *aim)
-{
- struct mbo *mbo;
- struct most_c_obj *c;
- unsigned long flags;
- int *num_buffers_ptr;
-
- c = get_channel_by_iface(iface, id);
- if (unlikely(!c))
- return NULL;
-
- if (c->aim0.refs && c->aim1.refs &&
- ((aim == c->aim0.ptr && c->aim0.num_buffers <= 0) ||
- (aim == c->aim1.ptr && c->aim1.num_buffers <= 0)))
- return NULL;
-
- if (aim == c->aim0.ptr)
- num_buffers_ptr = &c->aim0.num_buffers;
- else if (aim == c->aim1.ptr)
- num_buffers_ptr = &c->aim1.num_buffers;
- else
- num_buffers_ptr = &dummy_num_buffers;
-
- spin_lock_irqsave(&c->fifo_lock, flags);
- if (list_empty(&c->fifo)) {
- spin_unlock_irqrestore(&c->fifo_lock, flags);
- return NULL;
- }
- mbo = list_pop_mbo(&c->fifo);
- --*num_buffers_ptr;
- spin_unlock_irqrestore(&c->fifo_lock, flags);
-
- mbo->num_buffers_ptr = num_buffers_ptr;
- mbo->buffer_length = c->cfg.buffer_size;
- return mbo;
-}
-EXPORT_SYMBOL_GPL(most_get_mbo);
-
-
-/**
- * most_put_mbo - return buffer to pool
- * @mbo: buffer object
- */
-void most_put_mbo(struct mbo *mbo)
-{
- struct most_c_obj *c;
- struct most_inst_obj *i;
-
- c = mbo->context;
- i = c->inst;
-
- if (unlikely(atomic_read(&i->tainted))) {
- mbo->status = MBO_E_CLOSE;
- trash_mbo(mbo);
- return;
- }
- if (c->cfg.direction == MOST_CH_TX) {
- arm_mbo(mbo);
- return;
- }
- nq_hdm_mbo(mbo);
- atomic_inc(&c->mbo_nq_level);
-}
-EXPORT_SYMBOL_GPL(most_put_mbo);
-
-/**
- * most_read_completion - read completion handler
- * @mbo: pointer to MBO
- *
- * This function is called by the HDM when data has been received from the
- * hardware and copied to the buffer of the MBO.
- *
- * In case the channel has been poisoned it puts the buffer in the trash queue.
- * Otherwise, it passes the buffer to an AIM for further processing.
- */
-static void most_read_completion(struct mbo *mbo)
-{
- struct most_c_obj *c = mbo->context;
-
- if (unlikely(c->is_poisoned || (mbo->status == MBO_E_CLOSE))) {
- trash_mbo(mbo);
- return;
- }
-
- if (mbo->status == MBO_E_INVAL) {
- nq_hdm_mbo(mbo);
- atomic_inc(&c->mbo_nq_level);
- return;
- }
-
- if (atomic_sub_and_test(1, &c->mbo_nq_level)) {
- pr_info("WARN: rx device out of buffers\n");
- c->is_starving = 1;
- }
-
- if (c->aim0.refs && c->aim0.ptr->rx_completion &&
- c->aim0.ptr->rx_completion(mbo) == 0)
- return;
-
- if (c->aim1.refs && c->aim1.ptr->rx_completion &&
- c->aim1.ptr->rx_completion(mbo) == 0)
- return;
-
- most_put_mbo(mbo);
-}
-
-/**
- * most_start_channel - prepares a channel for communication
- * @iface: pointer to interface instance
- * @id: channel ID
- *
- * This prepares the channel for usage. Cross-checks whether the
- * channel's been properly configured.
- *
- * Returns 0 on success or error code otherwise.
- */
-int most_start_channel(struct most_interface *iface, int id,
- struct most_aim *aim)
-{
- int num_buffer;
- int ret;
- struct most_c_obj *c = get_channel_by_iface(iface, id);
-
- if (unlikely(!c))
- return -EINVAL;
-
- mutex_lock(&c->start_mutex);
- if (c->aim0.refs + c->aim1.refs > 0)
- goto out; /* already started by other aim */
-
- if (!try_module_get(iface->mod)) {
- pr_info("failed to acquire HDM lock\n");
- mutex_unlock(&c->start_mutex);
- return -ENOLCK;
- }
- modref++;
-
- c->cfg.extra_len = 0;
- if (c->iface->configure(c->iface, c->channel_id, &c->cfg)) {
- pr_info("channel configuration failed. Go check settings...\n");
- ret = -EINVAL;
- goto error;
- }
-
- init_waitqueue_head(&c->hdm_fifo_wq);
-
- if (c->cfg.direction == MOST_CH_RX)
- num_buffer = arm_mbo_chain(c, c->cfg.direction,
- most_read_completion);
- else
- num_buffer = arm_mbo_chain(c, c->cfg.direction,
- most_write_completion);
- if (unlikely(0 == num_buffer)) {
- pr_info("failed to allocate memory\n");
- ret = -ENOMEM;
- goto error;
- }
-
- ret = run_enqueue_thread(c, id);
- if (ret)
- goto error;
-
- c->is_starving = 0;
- c->aim0.num_buffers = c->cfg.num_buffers / 2;
- c->aim1.num_buffers = c->cfg.num_buffers - c->aim0.num_buffers;
- atomic_set(&c->mbo_ref, num_buffer);
-
-out:
- if (aim == c->aim0.ptr)
- c->aim0.refs++;
- if (aim == c->aim1.ptr)
- c->aim1.refs++;
- mutex_unlock(&c->start_mutex);
- return 0;
-
-error:
- if (iface->mod)
- module_put(iface->mod);
- modref--;
- mutex_unlock(&c->start_mutex);
- return ret;
-}
-EXPORT_SYMBOL_GPL(most_start_channel);
-
-/**
- * most_stop_channel - stops a running channel
- * @iface: pointer to interface instance
- * @id: channel ID
- */
-int most_stop_channel(struct most_interface *iface, int id,
- struct most_aim *aim)
-{
- struct most_c_obj *c;
-
- if (unlikely((!iface) || (id >= iface->num_channels) || (id < 0))) {
- pr_err("Bad interface or index out of range\n");
- return -EINVAL;
- }
- c = get_channel_by_iface(iface, id);
- if (unlikely(!c))
- return -EINVAL;
-
- mutex_lock(&c->start_mutex);
- if (c->aim0.refs + c->aim1.refs >= 2)
- goto out;
-
- mutex_lock(&c->stop_task_mutex);
- if (c->hdm_enqueue_task)
- kthread_stop(c->hdm_enqueue_task);
- c->hdm_enqueue_task = NULL;
- mutex_unlock(&c->stop_task_mutex);
-
- mutex_lock(&deregister_mutex);
- if (atomic_read(&c->inst->tainted)) {
- mutex_unlock(&deregister_mutex);
- mutex_unlock(&c->start_mutex);
- return -ENODEV;
- }
- mutex_unlock(&deregister_mutex);
-
- if (iface->mod && modref) {
- module_put(iface->mod);
- modref--;
- }
-
- c->is_poisoned = true;
- if (c->iface->poison_channel(c->iface, c->channel_id)) {
- pr_err("Cannot stop channel %d of mdev %s\n", c->channel_id,
- c->iface->description);
- mutex_unlock(&c->start_mutex);
- return -EAGAIN;
- }
- flush_trash_fifo(c);
- flush_channel_fifos(c);
-
-#ifdef CMPL_INTERRUPTIBLE
- if (wait_for_completion_interruptible(&c->cleanup)) {
- pr_info("Interrupted while clean up ch %d\n", c->channel_id);
- mutex_unlock(&c->start_mutex);
- return -EINTR;
- }
-#else
- wait_for_completion(&c->cleanup);
-#endif
- c->is_poisoned = false;
-
-out:
- if (aim == c->aim0.ptr)
- c->aim0.refs--;
- if (aim == c->aim1.ptr)
- c->aim1.refs--;
- mutex_unlock(&c->start_mutex);
- return 0;
-}
-EXPORT_SYMBOL_GPL(most_stop_channel);
-
-/**
- * most_register_aim - registers an AIM (driver) with the core
- * @aim: instance of AIM to be registered
- */
-int most_register_aim(struct most_aim *aim)
-{
- struct most_aim_obj *aim_obj;
-
- if (!aim) {
- pr_err("Bad driver\n");
- return -EINVAL;
- }
- aim_obj = create_most_aim_obj(aim->name);
- if (!aim_obj) {
- pr_info("failed to alloc driver object\n");
- return -ENOMEM;
- }
- aim_obj->driver = aim;
- aim->context = aim_obj;
- pr_info("registered new application interfacing module %s\n",
- aim->name);
- list_add_tail(&aim_obj->list, &aim_list);
- return 0;
-}
-EXPORT_SYMBOL_GPL(most_register_aim);
-
-/**
- * most_deregister_aim - deregisters an AIM (driver) with the core
- * @aim: AIM to be removed
- */
-int most_deregister_aim(struct most_aim *aim)
-{
- struct most_aim_obj *aim_obj;
- struct most_c_obj *c, *tmp;
- struct most_inst_obj *i, *i_tmp;
-
- if (!aim) {
- pr_err("Bad driver\n");
- return -EINVAL;
- }
-
- aim_obj = aim->context;
- if (!aim_obj) {
- pr_info("driver not registered.\n");
- return -EINVAL;
- }
- list_for_each_entry_safe(i, i_tmp, &instance_list, list) {
- list_for_each_entry_safe(c, tmp, &i->channel_list, list) {
- if (c->aim0.ptr == aim || c->aim1.ptr == aim)
- aim->disconnect_channel(
- c->iface, c->channel_id);
- if (c->aim0.ptr == aim)
- c->aim0.ptr = NULL;
- if (c->aim1.ptr == aim)
- c->aim1.ptr = NULL;
- }
- }
- list_del(&aim_obj->list);
- destroy_most_aim_obj(aim_obj);
- pr_info("deregistering application interfacing module %s\n", aim->name);
- return 0;
-}
-EXPORT_SYMBOL_GPL(most_deregister_aim);
-
-/**
- * most_register_interface - registers an interface with core
- * @iface: pointer to the instance of the interface description.
- *
- * Allocates and initializes a new interface instance and all of its channels.
- * Returns a pointer to kobject or an error pointer.
- */
-struct kobject *most_register_interface(struct most_interface *iface)
-{
- unsigned int i;
- int id;
- char name[STRING_SIZE];
- char channel_name[STRING_SIZE];
- struct most_c_obj *c;
- struct most_inst_obj *inst;
-
- if (!iface || !iface->enqueue || !iface->configure ||
- !iface->poison_channel || (iface->num_channels > MAX_CHANNELS)) {
- pr_err("Bad interface or channel overflow\n");
- return ERR_PTR(-EINVAL);
- }
-
- id = ida_simple_get(&mdev_id, 0, 0, GFP_KERNEL);
- if (id < 0) {
- pr_info("Failed to alloc mdev ID\n");
- return ERR_PTR(id);
- }
- snprintf(name, STRING_SIZE, "mdev%d", id);
-
- inst = create_most_inst_obj(name);
- if (!inst) {
- pr_info("Failed to allocate interface instance\n");
- return ERR_PTR(-ENOMEM);
- }
-
- iface->priv = inst;
- INIT_LIST_HEAD(&inst->channel_list);
- inst->iface = iface;
- inst->dev_id = id;
- atomic_set(&inst->tainted, 0);
- list_add_tail(&inst->list, &instance_list);
-
- for (i = 0; i < iface->num_channels; i++) {
- const char *name_suffix = iface->channel_vector[i].name_suffix;
-
- if (!name_suffix)
- snprintf(channel_name, STRING_SIZE, "ch%d", i);
- else if (name_suffix[0] == '@')
- snprintf(channel_name, STRING_SIZE, "ch%d%s", i,
- name_suffix);
- else
- snprintf(channel_name, STRING_SIZE, "%s", name_suffix);
-
- /* this increments the reference count of this instance */
- c = create_most_c_obj(channel_name, &inst->kobj);
- if (!c)
- goto free_instance;
- inst->channel[i] = c;
- c->is_starving = 0;
- c->iface = iface;
- c->inst = inst;
- c->channel_id = i;
- c->keep_mbo = false;
- c->enqueue_halt = false;
- c->is_poisoned = false;
- c->cfg.direction = 0;
- c->cfg.data_type = 0;
- c->cfg.num_buffers = 0;
- c->cfg.buffer_size = 0;
- c->cfg.subbuffer_size = 0;
- c->cfg.packets_per_xact = 0;
- spin_lock_init(&c->fifo_lock);
- INIT_LIST_HEAD(&c->fifo);
- INIT_LIST_HEAD(&c->trash_fifo);
- INIT_LIST_HEAD(&c->halt_fifo);
- init_completion(&c->cleanup);
- atomic_set(&c->mbo_ref, 0);
- mutex_init(&c->start_mutex);
- mutex_init(&c->stop_task_mutex);
- list_add_tail(&c->list, &inst->channel_list);
- }
- pr_info("registered new MOST device mdev%d (%s)\n",
- inst->dev_id, iface->description);
- return &inst->kobj;
-
-free_instance:
- pr_info("Failed allocate channel(s)\n");
- list_del(&inst->list);
- destroy_most_inst_obj(inst);
- return ERR_PTR(-ENOMEM);
-}
-EXPORT_SYMBOL_GPL(most_register_interface);
-
-/**
- * most_deregister_interface - deregisters an interface with core
- * @iface: pointer to the interface instance description.
- *
- * Before removing an interface instance from the list, all running
- * channels are stopped and poisoned.
- */
-void most_deregister_interface(struct most_interface *iface)
-{
- struct most_inst_obj *i = iface->priv;
- struct most_c_obj *c;
-
- mutex_lock(&deregister_mutex);
- if (unlikely(!i)) {
- pr_info("Bad Interface\n");
- mutex_unlock(&deregister_mutex);
- return;
- }
- pr_info("deregistering MOST device %s (%s)\n", i->kobj.name,
- iface->description);
-
- atomic_set(&i->tainted, 1);
- mutex_unlock(&deregister_mutex);
-
- while (modref) {
- if (iface->mod && modref)
- module_put(iface->mod);
- modref--;
- }
-
- list_for_each_entry(c, &i->channel_list, list) {
- if (c->aim0.refs + c->aim1.refs <= 0)
- continue;
-
- mutex_lock(&c->stop_task_mutex);
- if (c->hdm_enqueue_task)
- kthread_stop(c->hdm_enqueue_task);
- c->hdm_enqueue_task = NULL;
- mutex_unlock(&c->stop_task_mutex);
-
- if (iface->poison_channel(iface, c->channel_id))
- pr_err("Can't poison channel %d\n", c->channel_id);
- }
- ida_simple_remove(&mdev_id, i->dev_id);
- list_del(&i->list);
- destroy_most_inst_obj(i);
-}
-EXPORT_SYMBOL_GPL(most_deregister_interface);
-
-/**
- * most_stop_enqueue - prevents core from enqueueing MBOs
- * @iface: pointer to interface
- * @id: channel id
- *
- * This is called by an HDM that _cannot_ attend to its duties and
- * is imminent to get run over by the core. The core is not going to
- * enqueue any further packets unless the flagging HDM calls
- * most_resume enqueue().
- */
-void most_stop_enqueue(struct most_interface *iface, int id)
-{
- struct most_c_obj *c = get_channel_by_iface(iface, id);
-
- if (likely(c))
- c->enqueue_halt = true;
-}
-EXPORT_SYMBOL_GPL(most_stop_enqueue);
-
-/**
- * most_resume_enqueue - allow core to enqueue MBOs again
- * @iface: pointer to interface
- * @id: channel id
- *
- * This clears the enqueue halt flag and enqueues all MBOs currently
- * sitting in the wait fifo.
- */
-void most_resume_enqueue(struct most_interface *iface, int id)
-{
- struct most_c_obj *c = get_channel_by_iface(iface, id);
-
- if (unlikely(!c))
- return;
- c->enqueue_halt = false;
-
- wake_up_interruptible(&c->hdm_fifo_wq);
-}
-EXPORT_SYMBOL_GPL(most_resume_enqueue);
-
-static int __init most_init(void)
-{
- pr_info("init()\n");
- INIT_LIST_HEAD(&instance_list);
- INIT_LIST_HEAD(&aim_list);
- mutex_init(&deregister_mutex);
- ida_init(&mdev_id);
-
- if (bus_register(&most_bus)) {
- pr_info("Cannot register most bus\n");
- goto exit;
- }
-
- most_class = class_create(THIS_MODULE, "most");
- if (IS_ERR(most_class)) {
- pr_info("No udev support.\n");
- goto exit_bus;
- }
- if (driver_register(&mostcore)) {
- pr_info("Cannot register core driver\n");
- goto exit_class;
- }
-
- class_glue_dir =
- device_create(most_class, NULL, 0, NULL, "mostcore");
- if (!class_glue_dir)
- goto exit_driver;
-
- most_aim_kset =
- kset_create_and_add("aims", NULL, &class_glue_dir->kobj);
- if (!most_aim_kset)
- goto exit_class_container;
-
- most_inst_kset =
- kset_create_and_add("devices", NULL, &class_glue_dir->kobj);
- if (!most_inst_kset)
- goto exit_driver_kset;
-
- return 0;
-
-exit_driver_kset:
- kset_unregister(most_aim_kset);
-exit_class_container:
- device_destroy(most_class, 0);
-exit_driver:
- driver_unregister(&mostcore);
-exit_class:
- class_destroy(most_class);
-exit_bus:
- bus_unregister(&most_bus);
-exit:
- return -ENOMEM;
-}
-
-static void __exit most_exit(void)
-{
- struct most_inst_obj *i, *i_tmp;
- struct most_aim_obj *d, *d_tmp;
-
- pr_info("exit core module\n");
- list_for_each_entry_safe(d, d_tmp, &aim_list, list) {
- destroy_most_aim_obj(d);
- }
-
- list_for_each_entry_safe(i, i_tmp, &instance_list, list) {
- list_del(&i->list);
- destroy_most_inst_obj(i);
- }
- kset_unregister(most_inst_kset);
- kset_unregister(most_aim_kset);
- device_destroy(most_class, 0);
- driver_unregister(&mostcore);
- class_destroy(most_class);
- bus_unregister(&most_bus);
- ida_destroy(&mdev_id);
-}
-
-module_init(most_init);
-module_exit(most_exit);
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Christian Gromm <christian.gromm@microchip.com>");
-MODULE_DESCRIPTION("Core module of stacked MOST Linux driver");
diff --git a/drivers/staging/most/mostcore/mostcore.h b/drivers/staging/most/mostcore/mostcore.h
deleted file mode 100644
index 9bd4c779f1b6..000000000000
--- a/drivers/staging/most/mostcore/mostcore.h
+++ /dev/null
@@ -1,321 +0,0 @@
-/*
- * mostcore.h - Interface between MostCore,
- * Hardware Dependent Module (HDM) and Application Interface Module (AIM).
- *
- * Copyright (C) 2013-2015, Microchip Technology Germany II GmbH & Co. KG
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * This file is licensed under GPLv2.
- */
-
-/*
- * Authors:
- * Andrey Shvetsov <andrey.shvetsov@k2l.de>
- * Christian Gromm <christian.gromm@microchip.com>
- * Sebastian Graf
- */
-
-#ifndef __MOST_CORE_H__
-#define __MOST_CORE_H__
-
-#include <linux/types.h>
-
-struct kobject;
-struct module;
-
-/**
- * Interface type
- */
-enum most_interface_type {
- ITYPE_LOOPBACK = 1,
- ITYPE_I2C,
- ITYPE_I2S,
- ITYPE_TSI,
- ITYPE_HBI,
- ITYPE_MEDIALB_DIM,
- ITYPE_MEDIALB_DIM2,
- ITYPE_USB,
- ITYPE_PCIE
-};
-
-/**
- * Channel direction.
- */
-enum most_channel_direction {
- MOST_CH_RX = 1 << 0,
- MOST_CH_TX = 1 << 1,
-};
-
-/**
- * Channel data type.
- */
-enum most_channel_data_type {
- MOST_CH_CONTROL = 1 << 0,
- MOST_CH_ASYNC = 1 << 1,
- MOST_CH_ISOC_AVP = 1 << 2,
- MOST_CH_SYNC = 1 << 5,
-};
-
-
-enum mbo_status_flags {
- /* MBO was processed successfully (data was send or received )*/
- MBO_SUCCESS = 0,
- /* The MBO contains wrong or missing information. */
- MBO_E_INVAL,
- /* MBO was completed as HDM Channel will be closed */
- MBO_E_CLOSE,
-};
-
-/**
- * struct most_channel_capability - Channel capability
- * @direction: Supported channel directions.
- * The value is bitwise OR-combination of the values from the
- * enumeration most_channel_direction. Zero is allowed value and means
- * "channel may not be used".
- * @data_type: Supported channel data types.
- * The value is bitwise OR-combination of the values from the
- * enumeration most_channel_data_type. Zero is allowed value and means
- * "channel may not be used".
- * @num_buffer_packet: Maximum number of buffers supported by this channel
- * for packet data types (Async,Control,QoS)
- * @buffer_size_packet: Maximum buffer size supported by this channel
- * for packet data types (Async,Control,QoS)
- * @num_buffer_streaming: Maximum number of buffers supported by this channel
- * for streaming data types (Sync,AV Packetized)
- * @buffer_size_streaming: Maximum buffer size supported by this channel
- * for streaming data types (Sync,AV Packetized)
- * @name_suffix: Optional suffix providean by an HDM that is attached to the
- * regular channel name.
- *
- * Describes the capabilities of a MostCore channel like supported Data Types
- * and directions. This information is provided by an HDM for the MostCore.
- *
- * The Core creates read only sysfs attribute files in
- * /sys/devices/virtual/most/mostcore/devices/mdev-#/mdev#-ch#/ with the
- * following attributes:
- * -available_directions
- * -available_datatypes
- * -number_of_packet_buffers
- * -number_of_stream_buffers
- * -size_of_packet_buffer
- * -size_of_stream_buffer
- * where content of each file is a string with all supported properties of this
- * very channel attribute.
- */
-struct most_channel_capability {
- u16 direction;
- u16 data_type;
- u16 num_buffers_packet;
- u16 buffer_size_packet;
- u16 num_buffers_streaming;
- u16 buffer_size_streaming;
- char *name_suffix;
-};
-
-/**
- * struct most_channel_config - stores channel configuration
- * @direction: direction of the channel
- * @data_type: data type travelling over this channel
- * @num_buffers: number of buffers
- * @buffer_size: size of a buffer for AIM.
- * Buffer size may be cutted down by HDM in a configure callback
- * to match to a given interface and channel type.
- * @extra_len: additional buffer space for internal HDM purposes like padding.
- * May be set by HDM in a configure callback if needed.
- * @subbuffer_size: size of a subbuffer
- * @packets_per_xact: number of MOST frames that are packet inside one USB
- * packet. This is USB specific
- *
- * Describes the configuration for a MostCore channel. This information is
- * provided from the MostCore to a HDM (like the Medusa PCIe Interface) as a
- * parameter of the "configure" function call.
- */
-struct most_channel_config {
- enum most_channel_direction direction;
- enum most_channel_data_type data_type;
- u16 num_buffers;
- u16 buffer_size;
- u16 extra_len;
- u16 subbuffer_size;
- u16 packets_per_xact;
-};
-
-/*
- * struct mbo - MOST Buffer Object.
- * @context: context for core completion handler
- * @priv: private data for HDM
- *
- * public: documented fields that are used for the communications
- * between MostCore and HDMs
- *
- * @list: list head for use by the mbo's current owner
- * @ifp: (in) associated interface instance
- * @hdm_channel_id: (in) HDM channel instance
- * @virt_address: (in) kernel virtual address of the buffer
- * @bus_address: (in) bus address of the buffer
- * @buffer_length: (in) buffer payload length
- * @processed_length: (out) processed length
- * @status: (out) transfer status
- * @complete: (in) completion routine
- *
- * The MostCore allocates and initializes the MBO.
- *
- * The HDM receives MBO for transfer from MostCore with the call to enqueue().
- * The HDM copies the data to- or from the buffer depending on configured
- * channel direction, set "processed_length" and "status" and completes
- * the transfer procedure by calling the completion routine.
- *
- * At the end the MostCore deallocates the MBO or recycles it for further
- * transfers for the same or different HDM.
- *
- * Directions of usage:
- * The core driver should never access any MBO fields (even if marked
- * as "public") while the MBO is owned by an HDM. The ownership starts with
- * the call of enqueue() and ends with the call of its complete() routine.
- *
- * II.
- * Every HDM attached to the core driver _must_ ensure that it returns any MBO
- * it owns (due to a previous call to enqueue() by the core driver) before it
- * de-registers an interface or gets unloaded from the kernel. If this direction
- * is violated memory leaks will occur, since the core driver does _not_ track
- * MBOs it is currently not in control of.
- *
- */
-struct mbo {
- void *context;
- void *priv;
- struct list_head list;
- struct most_interface *ifp;
- int *num_buffers_ptr;
- u16 hdm_channel_id;
- void *virt_address;
- dma_addr_t bus_address;
- u16 buffer_length;
- u16 processed_length;
- enum mbo_status_flags status;
- void (*complete)(struct mbo *);
-};
-
-/**
- * Interface instance description.
- *
- * Describes one instance of an interface like Medusa PCIe or Vantage USB.
- * This structure is allocated and initialized in the HDM. MostCore may not
- * modify this structure.
- *
- * @interface Interface type. \sa most_interface_type.
- * @description PRELIMINARY.
- * Unique description of the device instance from point of view of the
- * interface in free text form (ASCII).
- * It may be a hexadecimal presentation of the memory address for the MediaLB
- * IP or USB device ID with USB properties for USB interface, etc.
- * @num_channels Number of channels and size of the channel_vector.
- * @channel_vector Properties of the channels.
- * Array index represents channel ID by the driver.
- * @configure Callback to change data type for the channel of the
- * interface instance. May be zero if the instance of the interface is not
- * configurable. Parameter channel_config describes direction and data
- * type for the channel, configured by the higher level. The content of
- * @enqueue Delivers MBO to the HDM for processing.
- * After HDM completes Rx- or Tx- operation the processed MBO shall
- * be returned back to the MostCore using completion routine.
- * The reason to get the MBO delivered from the MostCore after the channel
- * is poisoned is the re-opening of the channel by the application.
- * In this case the HDM shall hold MBOs and service the channel as usual.
- * The HDM must be able to hold at least one MBO for each channel.
- * The callback returns a negative value on error, otherwise 0.
- * @poison_channel Informs HDM about closing the channel. The HDM shall
- * cancel all transfers and synchronously or asynchronously return
- * all enqueued for this channel MBOs using the completion routine.
- * The callback returns a negative value on error, otherwise 0.
- * @request_netinfo: triggers retrieving of network info from the HDM by
- * means of "Message exchange over MDP/MEP"
- * @priv Private field used by mostcore to store context information.
- */
-struct most_interface {
- struct module *mod;
- enum most_interface_type interface;
- const char *description;
- int num_channels;
- struct most_channel_capability *channel_vector;
- int (*configure)(struct most_interface *iface, int channel_idx,
- struct most_channel_config *channel_config);
- int (*enqueue)(struct most_interface *iface, int channel_idx,
- struct mbo *mbo);
- int (*poison_channel)(struct most_interface *iface, int channel_idx);
- void (*request_netinfo)(struct most_interface *iface, int channel_idx);
- void *priv;
-};
-
-/**
- * struct most_aim - identifies MOST device driver to mostcore
- * @name: Driver name
- * @probe_channel: function for core to notify driver about channel connection
- * @disconnect_channel: notification that a certain channel isn't available anymore
- * @rx_completion: completion handler for received packets
- * @tx_completion: completion handler for transmitted packets
- * @context: context pointer to be used by mostcore
- */
-struct most_aim {
- const char *name;
- int (*probe_channel)(struct most_interface *iface, int channel_idx,
- struct most_channel_config *cfg,
- struct kobject *parent, char *name);
- int (*disconnect_channel)(struct most_interface *iface,
- int channel_idx);
- int (*rx_completion)(struct mbo *mbo);
- int (*tx_completion)(struct most_interface *iface, int channel_idx);
- void *context;
-};
-
-/**
- * most_register_interface - Registers instance of the interface.
- * @iface: Pointer to the interface instance description.
- *
- * Returns a pointer to the kobject of the generated instance.
- *
- * Note: HDM has to ensure that any reference held on the kobj is
- * released before deregistering the interface.
- */
-struct kobject *most_register_interface(struct most_interface *iface);
-
-/**
- * Deregisters instance of the interface.
- * @intf_instance Pointer to the interface instance description.
- */
-void most_deregister_interface(struct most_interface *iface);
-int most_submit_mbo(struct mbo *mbo);
-
-/**
- * most_stop_enqueue - prevents core from enqueing MBOs
- * @iface: pointer to interface
- * @channel_idx: channel index
- */
-void most_stop_enqueue(struct most_interface *iface, int channel_idx);
-
-/**
- * most_resume_enqueue - allow core to enqueue MBOs again
- * @iface: pointer to interface
- * @channel_idx: channel index
- *
- * This clears the enqueue halt flag and enqueues all MBOs currently
- * in wait fifo.
- */
-void most_resume_enqueue(struct most_interface *iface, int channel_idx);
-int most_register_aim(struct most_aim *aim);
-int most_deregister_aim(struct most_aim *aim);
-struct mbo *most_get_mbo(struct most_interface *iface, int channel_idx,
- struct most_aim *);
-void most_put_mbo(struct mbo *mbo);
-int channel_has_mbo(struct most_interface *iface, int channel_idx);
-int most_start_channel(struct most_interface *iface, int channel_idx,
- struct most_aim *);
-int most_stop_channel(struct most_interface *iface, int channel_idx,
- struct most_aim *);
-
-
-#endif /* MOST_CORE_H_ */
diff --git a/drivers/staging/most/net/Kconfig b/drivers/staging/most/net/Kconfig
new file mode 100644
index 000000000000..ed8ac7e076d1
--- /dev/null
+++ b/drivers/staging/most/net/Kconfig
@@ -0,0 +1,14 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# MOST Networking configuration
+#
+
+config MOST_NET
+ tristate "Net"
+ depends on NET
+
+ help
+ Say Y here if you want to commumicate via a networking device.
+
+ To compile this driver as a module, choose M here: the
+ module will be called most_net.
diff --git a/drivers/staging/most/net/Makefile b/drivers/staging/most/net/Makefile
new file mode 100644
index 000000000000..1582c97eb204
--- /dev/null
+++ b/drivers/staging/most/net/Makefile
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_MOST_NET) += most_net.o
+
+most_net-objs := net.o
diff --git a/drivers/staging/most/net/net.c b/drivers/staging/most/net/net.c
new file mode 100644
index 000000000000..1d1fe8bff7ee
--- /dev/null
+++ b/drivers/staging/most/net/net.c
@@ -0,0 +1,581 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * net.c - Networking component for Mostcore
+ *
+ * Copyright (C) 2015, Microchip Technology Germany II GmbH & Co. KG
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/wait.h>
+#include <linux/kobject.h>
+#include <linux/most.h>
+
+#define MEP_HDR_LEN 8
+#define MDP_HDR_LEN 16
+#define MAMAC_DATA_LEN (1024 - MDP_HDR_LEN)
+
+#define PMHL 5
+
+#define PMS_TELID_UNSEGM_MAMAC 0x0A
+#define PMS_FIFONO_MDP 0x01
+#define PMS_FIFONO_MEP 0x04
+#define PMS_MSGTYPE_DATA 0x04
+#define PMS_DEF_PRIO 0
+#define MEP_DEF_RETRY 15
+
+#define PMS_FIFONO_MASK 0x07
+#define PMS_FIFONO_SHIFT 3
+#define PMS_RETRY_SHIFT 4
+#define PMS_TELID_MASK 0x0F
+#define PMS_TELID_SHIFT 4
+
+#define HB(value) ((u8)((u16)(value) >> 8))
+#define LB(value) ((u8)(value))
+
+#define EXTRACT_BIT_SET(bitset_name, value) \
+ (((value) >> bitset_name##_SHIFT) & bitset_name##_MASK)
+
+#define PMS_IS_MEP(buf, len) \
+ ((len) > MEP_HDR_LEN && \
+ EXTRACT_BIT_SET(PMS_FIFONO, (buf)[3]) == PMS_FIFONO_MEP)
+
+static inline bool pms_is_mamac(char *buf, u32 len)
+{
+ return (len > MDP_HDR_LEN &&
+ EXTRACT_BIT_SET(PMS_FIFONO, buf[3]) == PMS_FIFONO_MDP &&
+ EXTRACT_BIT_SET(PMS_TELID, buf[14]) == PMS_TELID_UNSEGM_MAMAC);
+}
+
+struct net_dev_channel {
+ bool linked;
+ int ch_id;
+};
+
+struct net_dev_context {
+ struct most_interface *iface;
+ bool is_mamac;
+ struct net_device *dev;
+ struct net_dev_channel rx;
+ struct net_dev_channel tx;
+ struct list_head list;
+};
+
+static LIST_HEAD(net_devices);
+static DEFINE_MUTEX(probe_disc_mt); /* ch->linked = true, most_nd_open */
+static DEFINE_SPINLOCK(list_lock); /* list_head, ch->linked = false, dev_hold */
+static struct most_component comp;
+
+static int skb_to_mamac(const struct sk_buff *skb, struct mbo *mbo)
+{
+ u8 *buff = mbo->virt_address;
+ static const u8 broadcast[] = { 0x03, 0xFF };
+ const u8 *dest_addr = skb->data + 4;
+ const u8 *eth_type = skb->data + 12;
+ unsigned int payload_len = skb->len - ETH_HLEN;
+ unsigned int mdp_len = payload_len + MDP_HDR_LEN;
+
+ if (mdp_len < skb->len) {
+ pr_err("drop: too large packet! (%u)\n", skb->len);
+ return -EINVAL;
+ }
+
+ if (mbo->buffer_length < mdp_len) {
+ pr_err("drop: too small buffer! (%d for %d)\n",
+ mbo->buffer_length, mdp_len);
+ return -EINVAL;
+ }
+
+ if (skb->len < ETH_HLEN) {
+ pr_err("drop: too small packet! (%d)\n", skb->len);
+ return -EINVAL;
+ }
+
+ if (dest_addr[0] == 0xFF && dest_addr[1] == 0xFF)
+ dest_addr = broadcast;
+
+ *buff++ = HB(mdp_len - 2);
+ *buff++ = LB(mdp_len - 2);
+
+ *buff++ = PMHL;
+ *buff++ = (PMS_FIFONO_MDP << PMS_FIFONO_SHIFT) | PMS_MSGTYPE_DATA;
+ *buff++ = PMS_DEF_PRIO;
+ *buff++ = dest_addr[0];
+ *buff++ = dest_addr[1];
+ *buff++ = 0x00;
+
+ *buff++ = HB(payload_len + 6);
+ *buff++ = LB(payload_len + 6);
+
+ /* end of FPH here */
+
+ *buff++ = eth_type[0];
+ *buff++ = eth_type[1];
+ *buff++ = 0;
+ *buff++ = 0;
+
+ *buff++ = PMS_TELID_UNSEGM_MAMAC << 4 | HB(payload_len);
+ *buff++ = LB(payload_len);
+
+ memcpy(buff, skb->data + ETH_HLEN, payload_len);
+ mbo->buffer_length = mdp_len;
+ return 0;
+}
+
+static int skb_to_mep(const struct sk_buff *skb, struct mbo *mbo)
+{
+ u8 *buff = mbo->virt_address;
+ unsigned int mep_len = skb->len + MEP_HDR_LEN;
+
+ if (mep_len < skb->len) {
+ pr_err("drop: too large packet! (%u)\n", skb->len);
+ return -EINVAL;
+ }
+
+ if (mbo->buffer_length < mep_len) {
+ pr_err("drop: too small buffer! (%d for %d)\n",
+ mbo->buffer_length, mep_len);
+ return -EINVAL;
+ }
+
+ *buff++ = HB(mep_len - 2);
+ *buff++ = LB(mep_len - 2);
+
+ *buff++ = PMHL;
+ *buff++ = (PMS_FIFONO_MEP << PMS_FIFONO_SHIFT) | PMS_MSGTYPE_DATA;
+ *buff++ = (MEP_DEF_RETRY << PMS_RETRY_SHIFT) | PMS_DEF_PRIO;
+ *buff++ = 0;
+ *buff++ = 0;
+ *buff++ = 0;
+
+ memcpy(buff, skb->data, skb->len);
+ mbo->buffer_length = mep_len;
+ return 0;
+}
+
+static int most_nd_set_mac_address(struct net_device *dev, void *p)
+{
+ struct net_dev_context *nd = netdev_priv(dev);
+ int err = eth_mac_addr(dev, p);
+
+ if (err)
+ return err;
+
+ nd->is_mamac =
+ (dev->dev_addr[0] == 0 && dev->dev_addr[1] == 0 &&
+ dev->dev_addr[2] == 0 && dev->dev_addr[3] == 0);
+
+ /*
+ * Set default MTU for the given packet type.
+ * It is still possible to change MTU using ip tools afterwards.
+ */
+ dev->mtu = nd->is_mamac ? MAMAC_DATA_LEN : ETH_DATA_LEN;
+
+ return 0;
+}
+
+static void on_netinfo(struct most_interface *iface,
+ unsigned char link_stat, unsigned char *mac_addr);
+
+static int most_nd_open(struct net_device *dev)
+{
+ struct net_dev_context *nd = netdev_priv(dev);
+ int ret = 0;
+
+ mutex_lock(&probe_disc_mt);
+
+ if (most_start_channel(nd->iface, nd->rx.ch_id, &comp)) {
+ netdev_err(dev, "most_start_channel() failed\n");
+ ret = -EBUSY;
+ goto unlock;
+ }
+
+ if (most_start_channel(nd->iface, nd->tx.ch_id, &comp)) {
+ netdev_err(dev, "most_start_channel() failed\n");
+ most_stop_channel(nd->iface, nd->rx.ch_id, &comp);
+ ret = -EBUSY;
+ goto unlock;
+ }
+
+ netif_carrier_off(dev);
+ if (is_valid_ether_addr(dev->dev_addr))
+ netif_dormant_off(dev);
+ else
+ netif_dormant_on(dev);
+ netif_wake_queue(dev);
+ if (nd->iface->request_netinfo)
+ nd->iface->request_netinfo(nd->iface, nd->tx.ch_id, on_netinfo);
+
+unlock:
+ mutex_unlock(&probe_disc_mt);
+ return ret;
+}
+
+static int most_nd_stop(struct net_device *dev)
+{
+ struct net_dev_context *nd = netdev_priv(dev);
+
+ netif_stop_queue(dev);
+ if (nd->iface->request_netinfo)
+ nd->iface->request_netinfo(nd->iface, nd->tx.ch_id, NULL);
+ most_stop_channel(nd->iface, nd->rx.ch_id, &comp);
+ most_stop_channel(nd->iface, nd->tx.ch_id, &comp);
+
+ return 0;
+}
+
+static netdev_tx_t most_nd_start_xmit(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ struct net_dev_context *nd = netdev_priv(dev);
+ struct mbo *mbo;
+ int ret;
+
+ mbo = most_get_mbo(nd->iface, nd->tx.ch_id, &comp);
+
+ if (!mbo) {
+ netif_stop_queue(dev);
+ dev->stats.tx_fifo_errors++;
+ return NETDEV_TX_BUSY;
+ }
+
+ if (nd->is_mamac)
+ ret = skb_to_mamac(skb, mbo);
+ else
+ ret = skb_to_mep(skb, mbo);
+
+ if (ret) {
+ most_put_mbo(mbo);
+ dev->stats.tx_dropped++;
+ kfree_skb(skb);
+ return NETDEV_TX_OK;
+ }
+
+ most_submit_mbo(mbo);
+ dev->stats.tx_packets++;
+ dev->stats.tx_bytes += skb->len;
+ kfree_skb(skb);
+ return NETDEV_TX_OK;
+}
+
+static const struct net_device_ops most_nd_ops = {
+ .ndo_open = most_nd_open,
+ .ndo_stop = most_nd_stop,
+ .ndo_start_xmit = most_nd_start_xmit,
+ .ndo_set_mac_address = most_nd_set_mac_address,
+};
+
+static void most_nd_setup(struct net_device *dev)
+{
+ ether_setup(dev);
+ dev->netdev_ops = &most_nd_ops;
+}
+
+static struct net_dev_context *get_net_dev(struct most_interface *iface)
+{
+ struct net_dev_context *nd;
+
+ list_for_each_entry(nd, &net_devices, list)
+ if (nd->iface == iface)
+ return nd;
+ return NULL;
+}
+
+static struct net_dev_context *get_net_dev_hold(struct most_interface *iface)
+{
+ struct net_dev_context *nd;
+ unsigned long flags;
+
+ spin_lock_irqsave(&list_lock, flags);
+ nd = get_net_dev(iface);
+ if (nd && nd->rx.linked && nd->tx.linked)
+ dev_hold(nd->dev);
+ else
+ nd = NULL;
+ spin_unlock_irqrestore(&list_lock, flags);
+ return nd;
+}
+
+static int comp_probe_channel(struct most_interface *iface, int channel_idx,
+ struct most_channel_config *ccfg, char *name,
+ char *args)
+{
+ struct net_dev_context *nd;
+ struct net_dev_channel *ch;
+ struct net_device *dev;
+ unsigned long flags;
+ int ret = 0;
+
+ if (!iface)
+ return -EINVAL;
+
+ if (ccfg->data_type != MOST_CH_ASYNC)
+ return -EINVAL;
+
+ mutex_lock(&probe_disc_mt);
+ nd = get_net_dev(iface);
+ if (!nd) {
+ dev = alloc_netdev(sizeof(struct net_dev_context), "meth%d",
+ NET_NAME_UNKNOWN, most_nd_setup);
+ if (!dev) {
+ ret = -ENOMEM;
+ goto unlock;
+ }
+
+ nd = netdev_priv(dev);
+ nd->iface = iface;
+ nd->dev = dev;
+
+ spin_lock_irqsave(&list_lock, flags);
+ list_add(&nd->list, &net_devices);
+ spin_unlock_irqrestore(&list_lock, flags);
+
+ ch = ccfg->direction == MOST_CH_TX ? &nd->tx : &nd->rx;
+ } else {
+ ch = ccfg->direction == MOST_CH_TX ? &nd->tx : &nd->rx;
+ if (ch->linked) {
+ pr_err("direction is allocated\n");
+ ret = -EINVAL;
+ goto unlock;
+ }
+
+ if (register_netdev(nd->dev)) {
+ pr_err("register_netdev() failed\n");
+ ret = -EINVAL;
+ goto unlock;
+ }
+ }
+ ch->ch_id = channel_idx;
+ ch->linked = true;
+
+unlock:
+ mutex_unlock(&probe_disc_mt);
+ return ret;
+}
+
+static int comp_disconnect_channel(struct most_interface *iface,
+ int channel_idx)
+{
+ struct net_dev_context *nd;
+ struct net_dev_channel *ch;
+ unsigned long flags;
+ int ret = 0;
+
+ mutex_lock(&probe_disc_mt);
+ nd = get_net_dev(iface);
+ if (!nd) {
+ ret = -EINVAL;
+ goto unlock;
+ }
+
+ if (nd->rx.linked && channel_idx == nd->rx.ch_id) {
+ ch = &nd->rx;
+ } else if (nd->tx.linked && channel_idx == nd->tx.ch_id) {
+ ch = &nd->tx;
+ } else {
+ ret = -EINVAL;
+ goto unlock;
+ }
+
+ if (nd->rx.linked && nd->tx.linked) {
+ spin_lock_irqsave(&list_lock, flags);
+ ch->linked = false;
+ spin_unlock_irqrestore(&list_lock, flags);
+
+ /*
+ * do not call most_stop_channel() here, because channels are
+ * going to be closed in ndo_stop() after unregister_netdev()
+ */
+ unregister_netdev(nd->dev);
+ } else {
+ spin_lock_irqsave(&list_lock, flags);
+ list_del(&nd->list);
+ spin_unlock_irqrestore(&list_lock, flags);
+
+ free_netdev(nd->dev);
+ }
+
+unlock:
+ mutex_unlock(&probe_disc_mt);
+ return ret;
+}
+
+static int comp_resume_tx_channel(struct most_interface *iface,
+ int channel_idx)
+{
+ struct net_dev_context *nd;
+
+ nd = get_net_dev_hold(iface);
+ if (!nd)
+ return 0;
+
+ if (nd->tx.ch_id != channel_idx)
+ goto put_nd;
+
+ netif_wake_queue(nd->dev);
+
+put_nd:
+ dev_put(nd->dev);
+ return 0;
+}
+
+static int comp_rx_data(struct mbo *mbo)
+{
+ const u32 zero = 0;
+ struct net_dev_context *nd;
+ char *buf = mbo->virt_address;
+ u32 len = mbo->processed_length;
+ struct sk_buff *skb;
+ struct net_device *dev;
+ unsigned int skb_len;
+ int ret = 0;
+
+ nd = get_net_dev_hold(mbo->ifp);
+ if (!nd)
+ return -EIO;
+
+ if (nd->rx.ch_id != mbo->hdm_channel_id) {
+ ret = -EIO;
+ goto put_nd;
+ }
+
+ dev = nd->dev;
+
+ if (nd->is_mamac) {
+ if (!pms_is_mamac(buf, len)) {
+ ret = -EIO;
+ goto put_nd;
+ }
+
+ skb = dev_alloc_skb(len - MDP_HDR_LEN + 2 * ETH_ALEN + 2);
+ } else {
+ if (!PMS_IS_MEP(buf, len)) {
+ ret = -EIO;
+ goto put_nd;
+ }
+
+ skb = dev_alloc_skb(len - MEP_HDR_LEN);
+ }
+
+ if (!skb) {
+ dev->stats.rx_dropped++;
+ pr_err_once("drop packet: no memory for skb\n");
+ goto out;
+ }
+
+ skb->dev = dev;
+
+ if (nd->is_mamac) {
+ /* dest */
+ ether_addr_copy(skb_put(skb, ETH_ALEN), dev->dev_addr);
+
+ /* src */
+ skb_put_data(skb, &zero, 4);
+ skb_put_data(skb, buf + 5, 2);
+
+ /* eth type */
+ skb_put_data(skb, buf + 10, 2);
+
+ buf += MDP_HDR_LEN;
+ len -= MDP_HDR_LEN;
+ } else {
+ buf += MEP_HDR_LEN;
+ len -= MEP_HDR_LEN;
+ }
+
+ skb_put_data(skb, buf, len);
+ skb->protocol = eth_type_trans(skb, dev);
+ skb_len = skb->len;
+ if (netif_rx(skb) == NET_RX_SUCCESS) {
+ dev->stats.rx_packets++;
+ dev->stats.rx_bytes += skb_len;
+ } else {
+ dev->stats.rx_dropped++;
+ }
+
+out:
+ most_put_mbo(mbo);
+
+put_nd:
+ dev_put(nd->dev);
+ return ret;
+}
+
+static struct most_component comp = {
+ .mod = THIS_MODULE,
+ .name = "net",
+ .probe_channel = comp_probe_channel,
+ .disconnect_channel = comp_disconnect_channel,
+ .tx_completion = comp_resume_tx_channel,
+ .rx_completion = comp_rx_data,
+};
+
+static int __init most_net_init(void)
+{
+ int err;
+
+ err = most_register_component(&comp);
+ if (err)
+ return err;
+ err = most_register_configfs_subsys(&comp);
+ if (err) {
+ most_deregister_component(&comp);
+ return err;
+ }
+ return 0;
+}
+
+static void __exit most_net_exit(void)
+{
+ most_deregister_configfs_subsys(&comp);
+ most_deregister_component(&comp);
+}
+
+/**
+ * on_netinfo - callback for HDM to be informed about HW's MAC
+ * @iface: most interface instance
+ * @link_stat: link status
+ * @mac_addr: MAC address
+ */
+static void on_netinfo(struct most_interface *iface,
+ unsigned char link_stat, unsigned char *mac_addr)
+{
+ struct net_dev_context *nd;
+ struct net_device *dev;
+ const u8 *m = mac_addr;
+
+ nd = get_net_dev_hold(iface);
+ if (!nd)
+ return;
+
+ dev = nd->dev;
+
+ if (link_stat)
+ netif_carrier_on(dev);
+ else
+ netif_carrier_off(dev);
+
+ if (m && is_valid_ether_addr(m)) {
+ if (!is_valid_ether_addr(dev->dev_addr)) {
+ netdev_info(dev, "set mac %pM\n", m);
+ eth_hw_addr_set(dev, m);
+ netif_dormant_off(dev);
+ } else if (!ether_addr_equal(dev->dev_addr, m)) {
+ netdev_warn(dev, "reject mac %pM\n", m);
+ }
+ }
+
+ dev_put(nd->dev);
+}
+
+module_init(most_net_init);
+module_exit(most_net_exit);
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Andrey Shvetsov <andrey.shvetsov@k2l.de>");
+MODULE_DESCRIPTION("Networking Component Module for Mostcore");
diff --git a/drivers/staging/most/video/Kconfig b/drivers/staging/most/video/Kconfig
new file mode 100644
index 000000000000..e16cc5e104b7
--- /dev/null
+++ b/drivers/staging/most/video/Kconfig
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# MOST V4L2 configuration
+#
+
+config MOST_VIDEO
+ tristate "Video"
+ depends on VIDEO_DEV
+ help
+ Say Y here if you want to commumicate via Video 4 Linux.
+
+ To compile this driver as a module, choose M here: the
+ module will be called most_video.
diff --git a/drivers/staging/most/video/Makefile b/drivers/staging/most/video/Makefile
new file mode 100644
index 000000000000..856175fec8b6
--- /dev/null
+++ b/drivers/staging/most/video/Makefile
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_MOST_VIDEO) += most_video.o
+
+most_video-objs := video.o
diff --git a/drivers/staging/most/video/video.c b/drivers/staging/most/video/video.c
new file mode 100644
index 000000000000..32f71d9a9cf7
--- /dev/null
+++ b/drivers/staging/most/video/video.c
@@ -0,0 +1,586 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * video.c - V4L2 component for Mostcore
+ *
+ * Copyright (C) 2015, Microchip Technology Germany II GmbH & Co. KG
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/suspend.h>
+#include <linux/videodev2.h>
+#include <linux/mutex.h>
+#include <media/v4l2-common.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-fh.h>
+#include <linux/most.h>
+
+#define V4L2_CMP_MAX_INPUT 1
+
+static struct most_component comp;
+
+struct most_video_dev {
+ struct most_interface *iface;
+ int ch_idx;
+ struct list_head list;
+ bool mute;
+
+ struct list_head pending_mbos;
+ spinlock_t list_lock;
+
+ struct v4l2_device v4l2_dev;
+ atomic_t access_ref;
+ struct video_device *vdev;
+ unsigned int ctrl_input;
+
+ struct mutex lock;
+
+ wait_queue_head_t wait_data;
+};
+
+struct comp_fh {
+ /* must be the first field of this struct! */
+ struct v4l2_fh fh;
+ struct most_video_dev *mdev;
+ u32 offs;
+};
+
+static inline struct comp_fh *to_comp_fh(struct file *filp)
+{
+ return container_of(file_to_v4l2_fh(filp), struct comp_fh, fh);
+}
+
+static LIST_HEAD(video_devices);
+static DEFINE_SPINLOCK(list_lock);
+
+static inline bool data_ready(struct most_video_dev *mdev)
+{
+ return !list_empty(&mdev->pending_mbos);
+}
+
+static inline struct mbo *get_top_mbo(struct most_video_dev *mdev)
+{
+ return list_first_entry(&mdev->pending_mbos, struct mbo, list);
+}
+
+static int comp_vdev_open(struct file *filp)
+{
+ int ret;
+ struct video_device *vdev = video_devdata(filp);
+ struct most_video_dev *mdev = video_drvdata(filp);
+ struct comp_fh *fh;
+
+ switch (vdev->vfl_type) {
+ case VFL_TYPE_VIDEO:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ fh = kzalloc(sizeof(*fh), GFP_KERNEL);
+ if (!fh)
+ return -ENOMEM;
+
+ if (!atomic_inc_and_test(&mdev->access_ref)) {
+ v4l2_err(&mdev->v4l2_dev, "too many clients\n");
+ ret = -EBUSY;
+ goto err_dec;
+ }
+
+ fh->mdev = mdev;
+ v4l2_fh_init(&fh->fh, vdev);
+ v4l2_fh_add(&fh->fh, filp);
+
+ ret = most_start_channel(mdev->iface, mdev->ch_idx, &comp);
+ if (ret) {
+ v4l2_err(&mdev->v4l2_dev, "most_start_channel() failed\n");
+ goto err_rm;
+ }
+
+ return 0;
+
+err_rm:
+ v4l2_fh_del(&fh->fh, filp);
+ v4l2_fh_exit(&fh->fh);
+
+err_dec:
+ atomic_dec(&mdev->access_ref);
+ kfree(fh);
+ return ret;
+}
+
+static int comp_vdev_close(struct file *filp)
+{
+ struct comp_fh *fh = to_comp_fh(filp);
+ struct most_video_dev *mdev = fh->mdev;
+ struct mbo *mbo, *tmp;
+
+ /*
+ * We need to put MBOs back before we call most_stop_channel()
+ * to deallocate MBOs.
+ * From the other hand mostcore still calling rx_completion()
+ * to deliver MBOs until most_stop_channel() is called.
+ * Use mute to work around this issue.
+ * This must be implemented in core.
+ */
+
+ spin_lock_irq(&mdev->list_lock);
+ mdev->mute = true;
+ list_for_each_entry_safe(mbo, tmp, &mdev->pending_mbos, list) {
+ list_del(&mbo->list);
+ spin_unlock_irq(&mdev->list_lock);
+ most_put_mbo(mbo);
+ spin_lock_irq(&mdev->list_lock);
+ }
+ spin_unlock_irq(&mdev->list_lock);
+ most_stop_channel(mdev->iface, mdev->ch_idx, &comp);
+ mdev->mute = false;
+
+ v4l2_fh_del(&fh->fh, filp);
+ v4l2_fh_exit(&fh->fh);
+
+ atomic_dec(&mdev->access_ref);
+ kfree(fh);
+ return 0;
+}
+
+static ssize_t comp_vdev_read(struct file *filp, char __user *buf,
+ size_t count, loff_t *pos)
+{
+ struct comp_fh *fh = to_comp_fh(filp);
+ struct most_video_dev *mdev = fh->mdev;
+ int ret = 0;
+
+ if (*pos)
+ return -ESPIPE;
+
+ if (!mdev)
+ return -ENODEV;
+
+ /* wait for the first buffer */
+ if (!(filp->f_flags & O_NONBLOCK)) {
+ if (wait_event_interruptible(mdev->wait_data, data_ready(mdev)))
+ return -ERESTARTSYS;
+ }
+
+ if (!data_ready(mdev))
+ return -EAGAIN;
+
+ while (count > 0 && data_ready(mdev)) {
+ struct mbo *const mbo = get_top_mbo(mdev);
+ int const rem = mbo->processed_length - fh->offs;
+ int const cnt = rem < count ? rem : count;
+
+ if (copy_to_user(buf, mbo->virt_address + fh->offs, cnt)) {
+ v4l2_err(&mdev->v4l2_dev, "read: copy_to_user failed\n");
+ if (!ret)
+ ret = -EFAULT;
+ return ret;
+ }
+
+ fh->offs += cnt;
+ count -= cnt;
+ buf += cnt;
+ ret += cnt;
+
+ if (cnt >= rem) {
+ fh->offs = 0;
+ spin_lock_irq(&mdev->list_lock);
+ list_del(&mbo->list);
+ spin_unlock_irq(&mdev->list_lock);
+ most_put_mbo(mbo);
+ }
+ }
+ return ret;
+}
+
+static __poll_t comp_vdev_poll(struct file *filp, poll_table *wait)
+{
+ struct comp_fh *fh = to_comp_fh(filp);
+ struct most_video_dev *mdev = fh->mdev;
+ __poll_t mask = 0;
+
+ /* only wait if no data is available */
+ if (!data_ready(mdev))
+ poll_wait(filp, &mdev->wait_data, wait);
+ if (data_ready(mdev))
+ mask |= EPOLLIN | EPOLLRDNORM;
+
+ return mask;
+}
+
+static void comp_set_format_struct(struct v4l2_format *f)
+{
+ f->fmt.pix.width = 8;
+ f->fmt.pix.height = 8;
+ f->fmt.pix.pixelformat = V4L2_PIX_FMT_MPEG;
+ f->fmt.pix.bytesperline = 0;
+ f->fmt.pix.sizeimage = 188 * 2;
+ f->fmt.pix.colorspace = V4L2_COLORSPACE_REC709;
+ f->fmt.pix.field = V4L2_FIELD_NONE;
+ f->fmt.pix.priv = 0;
+}
+
+static int comp_set_format(struct most_video_dev *mdev, unsigned int cmd,
+ struct v4l2_format *format)
+{
+ if (format->fmt.pix.pixelformat != V4L2_PIX_FMT_MPEG)
+ return -EINVAL;
+
+ if (cmd == VIDIOC_TRY_FMT)
+ return 0;
+
+ comp_set_format_struct(format);
+
+ return 0;
+}
+
+static int vidioc_querycap(struct file *file, void *priv,
+ struct v4l2_capability *cap)
+{
+ struct comp_fh *fh = priv;
+ struct most_video_dev *mdev = fh->mdev;
+
+ strscpy(cap->driver, "v4l2_component", sizeof(cap->driver));
+ strscpy(cap->card, "MOST", sizeof(cap->card));
+ snprintf(cap->bus_info, sizeof(cap->bus_info),
+ "%s", mdev->iface->description);
+ return 0;
+}
+
+static int vidioc_enum_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ if (f->index)
+ return -EINVAL;
+
+ strscpy(f->description, "MPEG", sizeof(f->description));
+ f->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ f->flags = V4L2_FMT_FLAG_COMPRESSED;
+ f->pixelformat = V4L2_PIX_FMT_MPEG;
+
+ return 0;
+}
+
+static int vidioc_g_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ comp_set_format_struct(f);
+ return 0;
+}
+
+static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct comp_fh *fh = priv;
+ struct most_video_dev *mdev = fh->mdev;
+
+ return comp_set_format(mdev, VIDIOC_TRY_FMT, f);
+}
+
+static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct comp_fh *fh = priv;
+ struct most_video_dev *mdev = fh->mdev;
+
+ return comp_set_format(mdev, VIDIOC_S_FMT, f);
+}
+
+static int vidioc_g_std(struct file *file, void *priv, v4l2_std_id *norm)
+{
+ *norm = V4L2_STD_UNKNOWN;
+ return 0;
+}
+
+static int vidioc_enum_input(struct file *file, void *priv,
+ struct v4l2_input *input)
+{
+ struct comp_fh *fh = priv;
+ struct most_video_dev *mdev = fh->mdev;
+
+ if (input->index >= V4L2_CMP_MAX_INPUT)
+ return -EINVAL;
+
+ strscpy(input->name, "MOST Video", sizeof(input->name));
+ input->type |= V4L2_INPUT_TYPE_CAMERA;
+ input->audioset = 0;
+
+ input->std = mdev->vdev->tvnorms;
+
+ return 0;
+}
+
+static int vidioc_g_input(struct file *file, void *priv, unsigned int *i)
+{
+ struct comp_fh *fh = priv;
+ struct most_video_dev *mdev = fh->mdev;
+ *i = mdev->ctrl_input;
+ return 0;
+}
+
+static int vidioc_s_input(struct file *file, void *priv, unsigned int index)
+{
+ struct comp_fh *fh = priv;
+ struct most_video_dev *mdev = fh->mdev;
+
+ if (index >= V4L2_CMP_MAX_INPUT)
+ return -EINVAL;
+ mdev->ctrl_input = index;
+ return 0;
+}
+
+static const struct v4l2_file_operations comp_fops = {
+ .owner = THIS_MODULE,
+ .open = comp_vdev_open,
+ .release = comp_vdev_close,
+ .read = comp_vdev_read,
+ .poll = comp_vdev_poll,
+ .unlocked_ioctl = video_ioctl2,
+};
+
+static const struct v4l2_ioctl_ops video_ioctl_ops = {
+ .vidioc_querycap = vidioc_querycap,
+ .vidioc_enum_fmt_vid_cap = vidioc_enum_fmt_vid_cap,
+ .vidioc_g_fmt_vid_cap = vidioc_g_fmt_vid_cap,
+ .vidioc_try_fmt_vid_cap = vidioc_try_fmt_vid_cap,
+ .vidioc_s_fmt_vid_cap = vidioc_s_fmt_vid_cap,
+ .vidioc_g_std = vidioc_g_std,
+ .vidioc_enum_input = vidioc_enum_input,
+ .vidioc_g_input = vidioc_g_input,
+ .vidioc_s_input = vidioc_s_input,
+};
+
+static const struct video_device comp_videodev_template = {
+ .fops = &comp_fops,
+ .release = video_device_release,
+ .ioctl_ops = &video_ioctl_ops,
+ .tvnorms = V4L2_STD_UNKNOWN,
+ .device_caps = V4L2_CAP_READWRITE | V4L2_CAP_VIDEO_CAPTURE,
+};
+
+/**************************************************************************/
+
+static struct most_video_dev *get_comp_dev(struct most_interface *iface, int channel_idx)
+{
+ struct most_video_dev *mdev;
+ unsigned long flags;
+
+ spin_lock_irqsave(&list_lock, flags);
+ list_for_each_entry(mdev, &video_devices, list) {
+ if (mdev->iface == iface && mdev->ch_idx == channel_idx) {
+ spin_unlock_irqrestore(&list_lock, flags);
+ return mdev;
+ }
+ }
+ spin_unlock_irqrestore(&list_lock, flags);
+ return NULL;
+}
+
+static int comp_rx_data(struct mbo *mbo)
+{
+ unsigned long flags;
+ struct most_video_dev *mdev =
+ get_comp_dev(mbo->ifp, mbo->hdm_channel_id);
+
+ if (!mdev)
+ return -EIO;
+
+ spin_lock_irqsave(&mdev->list_lock, flags);
+ if (unlikely(mdev->mute)) {
+ spin_unlock_irqrestore(&mdev->list_lock, flags);
+ return -EIO;
+ }
+
+ list_add_tail(&mbo->list, &mdev->pending_mbos);
+ spin_unlock_irqrestore(&mdev->list_lock, flags);
+ wake_up_interruptible(&mdev->wait_data);
+ return 0;
+}
+
+static int comp_register_videodev(struct most_video_dev *mdev)
+{
+ int ret;
+
+ init_waitqueue_head(&mdev->wait_data);
+
+ /* allocate and fill v4l2 video struct */
+ mdev->vdev = video_device_alloc();
+ if (!mdev->vdev)
+ return -ENOMEM;
+
+ /* Fill the video capture device struct */
+ *mdev->vdev = comp_videodev_template;
+ mdev->vdev->v4l2_dev = &mdev->v4l2_dev;
+ mdev->vdev->lock = &mdev->lock;
+ snprintf(mdev->vdev->name, sizeof(mdev->vdev->name), "MOST: %s",
+ mdev->v4l2_dev.name);
+
+ /* Register the v4l2 device */
+ video_set_drvdata(mdev->vdev, mdev);
+ ret = video_register_device(mdev->vdev, VFL_TYPE_VIDEO, -1);
+ if (ret) {
+ v4l2_err(&mdev->v4l2_dev, "video_register_device failed (%d)\n",
+ ret);
+ video_device_release(mdev->vdev);
+ }
+
+ return ret;
+}
+
+static void comp_unregister_videodev(struct most_video_dev *mdev)
+{
+ video_unregister_device(mdev->vdev);
+}
+
+static void comp_v4l2_dev_release(struct v4l2_device *v4l2_dev)
+{
+ struct most_video_dev *mdev =
+ container_of(v4l2_dev, struct most_video_dev, v4l2_dev);
+
+ v4l2_device_unregister(v4l2_dev);
+ kfree(mdev);
+}
+
+static int comp_probe_channel(struct most_interface *iface, int channel_idx,
+ struct most_channel_config *ccfg, char *name,
+ char *args)
+{
+ int ret;
+ struct most_video_dev *mdev = get_comp_dev(iface, channel_idx);
+
+ if (mdev) {
+ pr_err("Channel already linked\n");
+ return -EEXIST;
+ }
+
+ if (ccfg->direction != MOST_CH_RX) {
+ pr_err("Wrong direction, expected rx\n");
+ return -EINVAL;
+ }
+
+ if (ccfg->data_type != MOST_CH_SYNC &&
+ ccfg->data_type != MOST_CH_ISOC) {
+ pr_err("Wrong channel type, expected sync or isoc\n");
+ return -EINVAL;
+ }
+
+ mdev = kzalloc(sizeof(*mdev), GFP_KERNEL);
+ if (!mdev)
+ return -ENOMEM;
+
+ mutex_init(&mdev->lock);
+ atomic_set(&mdev->access_ref, -1);
+ spin_lock_init(&mdev->list_lock);
+ INIT_LIST_HEAD(&mdev->pending_mbos);
+ mdev->iface = iface;
+ mdev->ch_idx = channel_idx;
+ mdev->v4l2_dev.release = comp_v4l2_dev_release;
+
+ /* Create the v4l2_device */
+ strscpy(mdev->v4l2_dev.name, name, sizeof(mdev->v4l2_dev.name));
+ ret = v4l2_device_register(NULL, &mdev->v4l2_dev);
+ if (ret) {
+ pr_err("v4l2_device_register() failed\n");
+ kfree(mdev);
+ return ret;
+ }
+
+ ret = comp_register_videodev(mdev);
+ if (ret)
+ goto err_unreg;
+
+ spin_lock_irq(&list_lock);
+ list_add(&mdev->list, &video_devices);
+ spin_unlock_irq(&list_lock);
+ return 0;
+
+err_unreg:
+ v4l2_device_disconnect(&mdev->v4l2_dev);
+ v4l2_device_put(&mdev->v4l2_dev);
+ return ret;
+}
+
+static int comp_disconnect_channel(struct most_interface *iface,
+ int channel_idx)
+{
+ struct most_video_dev *mdev = get_comp_dev(iface, channel_idx);
+
+ if (!mdev) {
+ pr_err("no such channel is linked\n");
+ return -ENOENT;
+ }
+
+ spin_lock_irq(&list_lock);
+ list_del(&mdev->list);
+ spin_unlock_irq(&list_lock);
+
+ comp_unregister_videodev(mdev);
+ v4l2_device_disconnect(&mdev->v4l2_dev);
+ v4l2_device_put(&mdev->v4l2_dev);
+ return 0;
+}
+
+static struct most_component comp = {
+ .mod = THIS_MODULE,
+ .name = "video",
+ .probe_channel = comp_probe_channel,
+ .disconnect_channel = comp_disconnect_channel,
+ .rx_completion = comp_rx_data,
+};
+
+static int __init comp_init(void)
+{
+ int err;
+
+ err = most_register_component(&comp);
+ if (err)
+ return err;
+ err = most_register_configfs_subsys(&comp);
+ if (err) {
+ most_deregister_component(&comp);
+ return err;
+ }
+ return 0;
+}
+
+static void __exit comp_exit(void)
+{
+ struct most_video_dev *mdev, *tmp;
+
+ /*
+ * As the mostcore currently doesn't call disconnect_channel()
+ * for linked channels while we call most_deregister_component()
+ * we simulate this call here.
+ * This must be fixed in core.
+ */
+ spin_lock_irq(&list_lock);
+ list_for_each_entry_safe(mdev, tmp, &video_devices, list) {
+ list_del(&mdev->list);
+ spin_unlock_irq(&list_lock);
+
+ comp_unregister_videodev(mdev);
+ v4l2_device_disconnect(&mdev->v4l2_dev);
+ v4l2_device_put(&mdev->v4l2_dev);
+ spin_lock_irq(&list_lock);
+ }
+ spin_unlock_irq(&list_lock);
+
+ most_deregister_configfs_subsys(&comp);
+ most_deregister_component(&comp);
+ BUG_ON(!list_empty(&video_devices));
+}
+
+module_init(comp_init);
+module_exit(comp_exit);
+
+MODULE_DESCRIPTION("V4L2 Component Module for Mostcore");
+MODULE_AUTHOR("Andrey Shvetsov <andrey.shvetsov@k2l.de>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/mt29f_spinand/Kconfig b/drivers/staging/mt29f_spinand/Kconfig
deleted file mode 100644
index f3f9cb3b5c35..000000000000
--- a/drivers/staging/mt29f_spinand/Kconfig
+++ /dev/null
@@ -1,16 +0,0 @@
-config MTD_SPINAND_MT29F
- tristate "SPINAND Device Support for Micron"
- depends on MTD_NAND && SPI
- help
- This enables support for accessing Micron SPI NAND flash
- devices.
- If you have Micron SPI NAND chip say yes.
-
- If unsure, say no here.
-
-config MTD_SPINAND_ONDIEECC
- bool "Use SPINAND internal ECC"
- depends on MTD_SPINAND_MT29F
- help
- Internal ECC.
- Enables Hardware ECC support for Micron SPI NAND.
diff --git a/drivers/staging/mt29f_spinand/Makefile b/drivers/staging/mt29f_spinand/Makefile
deleted file mode 100644
index e47af0f7fda9..000000000000
--- a/drivers/staging/mt29f_spinand/Makefile
+++ /dev/null
@@ -1 +0,0 @@
-obj-$(CONFIG_MTD_SPINAND_MT29F) += mt29f_spinand.o
diff --git a/drivers/staging/mt29f_spinand/TODO b/drivers/staging/mt29f_spinand/TODO
deleted file mode 100644
index a2209b72d371..000000000000
--- a/drivers/staging/mt29f_spinand/TODO
+++ /dev/null
@@ -1,13 +0,0 @@
-TODO:
- - Tested on XLP platform, needs to be tested on other platforms.
- - Checkpatch.pl cleanups
- - Sparce fixes.
- - Clean up coding style to meet kernel standard.
-
-Please send patches
-To:
-Kamlakant Patel <kamlakant.patel@broadcom.com>
-Cc:
-Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-Mona Anonuevo <manonuevo@micron.com>
-linux-mtd@lists.infradead.org
diff --git a/drivers/staging/mt29f_spinand/mt29f_spinand.c b/drivers/staging/mt29f_spinand/mt29f_spinand.c
deleted file mode 100644
index 1aa449e5fecf..000000000000
--- a/drivers/staging/mt29f_spinand/mt29f_spinand.c
+++ /dev/null
@@ -1,963 +0,0 @@
-/*
- * Copyright (c) 2003-2013 Broadcom Corporation
- *
- * Copyright (c) 2009-2010 Micron Technology, Inc.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#include <linux/module.h>
-#include <linux/delay.h>
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/partitions.h>
-#include <linux/mtd/nand.h>
-#include <linux/spi/spi.h>
-
-#include "mt29f_spinand.h"
-
-#define BUFSIZE (10 * 64 * 2048)
-#define CACHE_BUF 2112
-/*
- * OOB area specification layout: Total 32 available free bytes.
- */
-
-static inline struct spinand_state *mtd_to_state(struct mtd_info *mtd)
-{
- struct nand_chip *chip = (struct nand_chip *)mtd->priv;
- struct spinand_info *info = (struct spinand_info *)chip->priv;
- struct spinand_state *state = (struct spinand_state *)info->priv;
-
- return state;
-}
-
-#ifdef CONFIG_MTD_SPINAND_ONDIEECC
-static int enable_hw_ecc;
-static int enable_read_hw_ecc;
-
-static struct nand_ecclayout spinand_oob_64 = {
- .eccbytes = 24,
- .eccpos = {
- 1, 2, 3, 4, 5, 6,
- 17, 18, 19, 20, 21, 22,
- 33, 34, 35, 36, 37, 38,
- 49, 50, 51, 52, 53, 54, },
- .oobavail = 32,
- .oobfree = {
- {.offset = 8,
- .length = 8},
- {.offset = 24,
- .length = 8},
- {.offset = 40,
- .length = 8},
- {.offset = 56,
- .length = 8},
- }
-};
-#endif
-
-/*
- * spinand_cmd - to process a command to send to the SPI Nand
- * Description:
- * Set up the command buffer to send to the SPI controller.
- * The command buffer has to initialized to 0.
- */
-
-static int spinand_cmd(struct spi_device *spi, struct spinand_cmd *cmd)
-{
- struct spi_message message;
- struct spi_transfer x[4];
- u8 dummy = 0xff;
-
- spi_message_init(&message);
- memset(x, 0, sizeof(x));
-
- x[0].len = 1;
- x[0].tx_buf = &cmd->cmd;
- spi_message_add_tail(&x[0], &message);
-
- if (cmd->n_addr) {
- x[1].len = cmd->n_addr;
- x[1].tx_buf = cmd->addr;
- spi_message_add_tail(&x[1], &message);
- }
-
- if (cmd->n_dummy) {
- x[2].len = cmd->n_dummy;
- x[2].tx_buf = &dummy;
- spi_message_add_tail(&x[2], &message);
- }
-
- if (cmd->n_tx) {
- x[3].len = cmd->n_tx;
- x[3].tx_buf = cmd->tx_buf;
- spi_message_add_tail(&x[3], &message);
- }
-
- if (cmd->n_rx) {
- x[3].len = cmd->n_rx;
- x[3].rx_buf = cmd->rx_buf;
- spi_message_add_tail(&x[3], &message);
- }
-
- return spi_sync(spi, &message);
-}
-
-/*
- * spinand_read_id- Read SPI Nand ID
- * Description:
- * Read ID: read two ID bytes from the SPI Nand device
- */
-static int spinand_read_id(struct spi_device *spi_nand, u8 *id)
-{
- int retval;
- u8 nand_id[3];
- struct spinand_cmd cmd = {0};
-
- cmd.cmd = CMD_READ_ID;
- cmd.n_rx = 3;
- cmd.rx_buf = &nand_id[0];
-
- retval = spinand_cmd(spi_nand, &cmd);
- if (retval < 0) {
- dev_err(&spi_nand->dev, "error %d reading id\n", retval);
- return retval;
- }
- id[0] = nand_id[1];
- id[1] = nand_id[2];
- return retval;
-}
-
-/*
- * spinand_read_status- send command 0xf to the SPI Nand status register
- * Description:
- * After read, write, or erase, the Nand device is expected to set the
- * busy status.
- * This function is to allow reading the status of the command: read,
- * write, and erase.
- * Once the status turns to be ready, the other status bits also are
- * valid status bits.
- */
-static int spinand_read_status(struct spi_device *spi_nand, uint8_t *status)
-{
- struct spinand_cmd cmd = {0};
- int ret;
-
- cmd.cmd = CMD_READ_REG;
- cmd.n_addr = 1;
- cmd.addr[0] = REG_STATUS;
- cmd.n_rx = 1;
- cmd.rx_buf = status;
-
- ret = spinand_cmd(spi_nand, &cmd);
- if (ret < 0)
- dev_err(&spi_nand->dev, "err: %d read status register\n", ret);
-
- return ret;
-}
-
-#define MAX_WAIT_JIFFIES (40 * HZ)
-static int wait_till_ready(struct spi_device *spi_nand)
-{
- unsigned long deadline;
- int retval;
- u8 stat = 0;
-
- deadline = jiffies + MAX_WAIT_JIFFIES;
- do {
- retval = spinand_read_status(spi_nand, &stat);
- if (retval < 0)
- return -1;
- else if (!(stat & 0x1))
- break;
-
- cond_resched();
- } while (!time_after_eq(jiffies, deadline));
-
- if ((stat & 0x1) == 0)
- return 0;
-
- return -1;
-}
-/**
- * spinand_get_otp- send command 0xf to read the SPI Nand OTP register
- * Description:
- * There is one bit( bit 0x10 ) to set or to clear the internal ECC.
- * Enable chip internal ECC, set the bit to 1
- * Disable chip internal ECC, clear the bit to 0
- */
-static int spinand_get_otp(struct spi_device *spi_nand, u8 *otp)
-{
- struct spinand_cmd cmd = {0};
- int retval;
-
- cmd.cmd = CMD_READ_REG;
- cmd.n_addr = 1;
- cmd.addr[0] = REG_OTP;
- cmd.n_rx = 1;
- cmd.rx_buf = otp;
-
- retval = spinand_cmd(spi_nand, &cmd);
- if (retval < 0)
- dev_err(&spi_nand->dev, "error %d get otp\n", retval);
- return retval;
-}
-
-/**
- * spinand_set_otp- send command 0x1f to write the SPI Nand OTP register
- * Description:
- * There is one bit( bit 0x10 ) to set or to clear the internal ECC.
- * Enable chip internal ECC, set the bit to 1
- * Disable chip internal ECC, clear the bit to 0
- */
-static int spinand_set_otp(struct spi_device *spi_nand, u8 *otp)
-{
- int retval;
- struct spinand_cmd cmd = {0};
-
- cmd.cmd = CMD_WRITE_REG,
- cmd.n_addr = 1,
- cmd.addr[0] = REG_OTP,
- cmd.n_tx = 1,
- cmd.tx_buf = otp,
-
- retval = spinand_cmd(spi_nand, &cmd);
- if (retval < 0)
- dev_err(&spi_nand->dev, "error %d set otp\n", retval);
-
- return retval;
-}
-
-#ifdef CONFIG_MTD_SPINAND_ONDIEECC
-/**
- * spinand_enable_ecc- send command 0x1f to write the SPI Nand OTP register
- * Description:
- * There is one bit( bit 0x10 ) to set or to clear the internal ECC.
- * Enable chip internal ECC, set the bit to 1
- * Disable chip internal ECC, clear the bit to 0
- */
-static int spinand_enable_ecc(struct spi_device *spi_nand)
-{
- int retval;
- u8 otp = 0;
-
- retval = spinand_get_otp(spi_nand, &otp);
- if (retval < 0)
- return retval;
-
- if ((otp & OTP_ECC_MASK) == OTP_ECC_MASK)
- return 0;
- otp |= OTP_ECC_MASK;
- retval = spinand_set_otp(spi_nand, &otp);
- if (retval < 0)
- return retval;
- return spinand_get_otp(spi_nand, &otp);
-}
-#endif
-
-static int spinand_disable_ecc(struct spi_device *spi_nand)
-{
- int retval;
- u8 otp = 0;
-
- retval = spinand_get_otp(spi_nand, &otp);
- if (retval < 0)
- return retval;
-
- if ((otp & OTP_ECC_MASK) == OTP_ECC_MASK) {
- otp &= ~OTP_ECC_MASK;
- retval = spinand_set_otp(spi_nand, &otp);
- if (retval < 0)
- return retval;
- return spinand_get_otp(spi_nand, &otp);
- }
- return 0;
-}
-
-/**
- * spinand_write_enable- send command 0x06 to enable write or erase the
- * Nand cells
- * Description:
- * Before write and erase the Nand cells, the write enable has to be set.
- * After the write or erase, the write enable bit is automatically
- * cleared (status register bit 2)
- * Set the bit 2 of the status register has the same effect
- */
-static int spinand_write_enable(struct spi_device *spi_nand)
-{
- struct spinand_cmd cmd = {0};
-
- cmd.cmd = CMD_WR_ENABLE;
- return spinand_cmd(spi_nand, &cmd);
-}
-
-static int spinand_read_page_to_cache(struct spi_device *spi_nand, u16 page_id)
-{
- struct spinand_cmd cmd = {0};
- u16 row;
-
- row = page_id;
- cmd.cmd = CMD_READ;
- cmd.n_addr = 3;
- cmd.addr[1] = (u8)((row & 0xff00) >> 8);
- cmd.addr[2] = (u8)(row & 0x00ff);
-
- return spinand_cmd(spi_nand, &cmd);
-}
-
-/*
- * spinand_read_from_cache- send command 0x03 to read out the data from the
- * cache register(2112 bytes max)
- * Description:
- * The read can specify 1 to 2112 bytes of data read at the corresponding
- * locations.
- * No tRd delay.
- */
-static int spinand_read_from_cache(struct spi_device *spi_nand, u16 page_id,
- u16 byte_id, u16 len, u8 *rbuf)
-{
- struct spinand_cmd cmd = {0};
- u16 column;
-
- column = byte_id;
- cmd.cmd = CMD_READ_RDM;
- cmd.n_addr = 3;
- cmd.addr[0] = (u8)((column & 0xff00) >> 8);
- cmd.addr[0] |= (u8)(((page_id >> 6) & 0x1) << 4);
- cmd.addr[1] = (u8)(column & 0x00ff);
- cmd.addr[2] = (u8)(0xff);
- cmd.n_dummy = 0;
- cmd.n_rx = len;
- cmd.rx_buf = rbuf;
-
- return spinand_cmd(spi_nand, &cmd);
-}
-
-/*
- * spinand_read_page-to read a page with:
- * @page_id: the physical page number
- * @offset: the location from 0 to 2111
- * @len: number of bytes to read
- * @rbuf: read buffer to hold @len bytes
- *
- * Description:
- * The read includes two commands to the Nand: 0x13 and 0x03 commands
- * Poll to read status to wait for tRD time.
- */
-static int spinand_read_page(struct spi_device *spi_nand, u16 page_id,
- u16 offset, u16 len, u8 *rbuf)
-{
- int ret;
- u8 status = 0;
-
-#ifdef CONFIG_MTD_SPINAND_ONDIEECC
- if (enable_read_hw_ecc) {
- if (spinand_enable_ecc(spi_nand) < 0)
- dev_err(&spi_nand->dev, "enable HW ECC failed!");
- }
-#endif
- ret = spinand_read_page_to_cache(spi_nand, page_id);
- if (ret < 0)
- return ret;
-
- if (wait_till_ready(spi_nand))
- dev_err(&spi_nand->dev, "WAIT timedout!!!\n");
-
- while (1) {
- ret = spinand_read_status(spi_nand, &status);
- if (ret < 0) {
- dev_err(&spi_nand->dev,
- "err %d read status register\n", ret);
- return ret;
- }
-
- if ((status & STATUS_OIP_MASK) == STATUS_READY) {
- if ((status & STATUS_ECC_MASK) == STATUS_ECC_ERROR) {
- dev_err(&spi_nand->dev, "ecc error, page=%d\n",
- page_id);
- return 0;
- }
- break;
- }
- }
-
- ret = spinand_read_from_cache(spi_nand, page_id, offset, len, rbuf);
- if (ret < 0) {
- dev_err(&spi_nand->dev, "read from cache failed!!\n");
- return ret;
- }
-
-#ifdef CONFIG_MTD_SPINAND_ONDIEECC
- if (enable_read_hw_ecc) {
- ret = spinand_disable_ecc(spi_nand);
- if (ret < 0) {
- dev_err(&spi_nand->dev, "disable ecc failed!!\n");
- return ret;
- }
- enable_read_hw_ecc = 0;
- }
-#endif
- return ret;
-}
-
-/*
- * spinand_program_data_to_cache--to write a page to cache with:
- * @byte_id: the location to write to the cache
- * @len: number of bytes to write
- * @rbuf: read buffer to hold @len bytes
- *
- * Description:
- * The write command used here is 0x84--indicating that the cache is
- * not cleared first.
- * Since it is writing the data to cache, there is no tPROG time.
- */
-static int spinand_program_data_to_cache(struct spi_device *spi_nand,
- u16 page_id, u16 byte_id, u16 len, u8 *wbuf)
-{
- struct spinand_cmd cmd = {0};
- u16 column;
-
- column = byte_id;
- cmd.cmd = CMD_PROG_PAGE_CLRCACHE;
- cmd.n_addr = 2;
- cmd.addr[0] = (u8)((column & 0xff00) >> 8);
- cmd.addr[0] |= (u8)(((page_id >> 6) & 0x1) << 4);
- cmd.addr[1] = (u8)(column & 0x00ff);
- cmd.n_tx = len;
- cmd.tx_buf = wbuf;
-
- return spinand_cmd(spi_nand, &cmd);
-}
-
-/**
- * spinand_program_execute--to write a page from cache to the Nand array with
- * @page_id: the physical page location to write the page.
- *
- * Description:
- * The write command used here is 0x10--indicating the cache is writing to
- * the Nand array.
- * Need to wait for tPROG time to finish the transaction.
- */
-static int spinand_program_execute(struct spi_device *spi_nand, u16 page_id)
-{
- struct spinand_cmd cmd = {0};
- u16 row;
-
- row = page_id;
- cmd.cmd = CMD_PROG_PAGE_EXC;
- cmd.n_addr = 3;
- cmd.addr[1] = (u8)((row & 0xff00) >> 8);
- cmd.addr[2] = (u8)(row & 0x00ff);
-
- return spinand_cmd(spi_nand, &cmd);
-}
-
-/**
- * spinand_program_page--to write a page with:
- * @page_id: the physical page location to write the page.
- * @offset: the location from the cache starting from 0 to 2111
- * @len: the number of bytes to write
- * @wbuf: the buffer to hold the number of bytes
- *
- * Description:
- * The commands used here are 0x06, 0x84, and 0x10--indicating that
- * the write enable is first sent, the write cache command, and the
- * write execute command.
- * Poll to wait for the tPROG time to finish the transaction.
- */
-static int spinand_program_page(struct spi_device *spi_nand,
- u16 page_id, u16 offset, u16 len, u8 *buf)
-{
- int retval;
- u8 status = 0;
- uint8_t *wbuf;
-#ifdef CONFIG_MTD_SPINAND_ONDIEECC
- unsigned int i, j;
-
- enable_read_hw_ecc = 0;
- wbuf = devm_kzalloc(&spi_nand->dev, CACHE_BUF, GFP_KERNEL);
- spinand_read_page(spi_nand, page_id, 0, CACHE_BUF, wbuf);
-
- for (i = offset, j = 0; i < len; i++, j++)
- wbuf[i] &= buf[j];
-
- if (enable_hw_ecc) {
- retval = spinand_enable_ecc(spi_nand);
- if (retval < 0) {
- dev_err(&spi_nand->dev, "enable ecc failed!!\n");
- return retval;
- }
- }
-#else
- wbuf = buf;
-#endif
- retval = spinand_write_enable(spi_nand);
- if (retval < 0) {
- dev_err(&spi_nand->dev, "write enable failed!!\n");
- return retval;
- }
- if (wait_till_ready(spi_nand))
- dev_err(&spi_nand->dev, "wait timedout!!!\n");
-
- retval = spinand_program_data_to_cache(spi_nand, page_id,
- offset, len, wbuf);
- if (retval < 0)
- return retval;
- retval = spinand_program_execute(spi_nand, page_id);
- if (retval < 0)
- return retval;
- while (1) {
- retval = spinand_read_status(spi_nand, &status);
- if (retval < 0) {
- dev_err(&spi_nand->dev,
- "error %d reading status register\n",
- retval);
- return retval;
- }
-
- if ((status & STATUS_OIP_MASK) == STATUS_READY) {
- if ((status & STATUS_P_FAIL_MASK) == STATUS_P_FAIL) {
- dev_err(&spi_nand->dev,
- "program error, page %d\n", page_id);
- return -1;
- }
- break;
- }
- }
-#ifdef CONFIG_MTD_SPINAND_ONDIEECC
- if (enable_hw_ecc) {
- retval = spinand_disable_ecc(spi_nand);
- if (retval < 0) {
- dev_err(&spi_nand->dev, "disable ecc failed!!\n");
- return retval;
- }
- enable_hw_ecc = 0;
- }
-#endif
-
- return 0;
-}
-
-/**
- * spinand_erase_block_erase--to erase a page with:
- * @block_id: the physical block location to erase.
- *
- * Description:
- * The command used here is 0xd8--indicating an erase command to erase
- * one block--64 pages
- * Need to wait for tERS.
- */
-static int spinand_erase_block_erase(struct spi_device *spi_nand, u16 block_id)
-{
- struct spinand_cmd cmd = {0};
- u16 row;
-
- row = block_id;
- cmd.cmd = CMD_ERASE_BLK;
- cmd.n_addr = 3;
- cmd.addr[1] = (u8)((row & 0xff00) >> 8);
- cmd.addr[2] = (u8)(row & 0x00ff);
-
- return spinand_cmd(spi_nand, &cmd);
-}
-
-/**
- * spinand_erase_block--to erase a page with:
- * @block_id: the physical block location to erase.
- *
- * Description:
- * The commands used here are 0x06 and 0xd8--indicating an erase
- * command to erase one block--64 pages
- * It will first to enable the write enable bit (0x06 command),
- * and then send the 0xd8 erase command
- * Poll to wait for the tERS time to complete the tranaction.
- */
-static int spinand_erase_block(struct spi_device *spi_nand, u16 block_id)
-{
- int retval;
- u8 status = 0;
-
- retval = spinand_write_enable(spi_nand);
- if (wait_till_ready(spi_nand))
- dev_err(&spi_nand->dev, "wait timedout!!!\n");
-
- retval = spinand_erase_block_erase(spi_nand, block_id);
- while (1) {
- retval = spinand_read_status(spi_nand, &status);
- if (retval < 0) {
- dev_err(&spi_nand->dev,
- "error %d reading status register\n",
- (int) retval);
- return retval;
- }
-
- if ((status & STATUS_OIP_MASK) == STATUS_READY) {
- if ((status & STATUS_E_FAIL_MASK) == STATUS_E_FAIL) {
- dev_err(&spi_nand->dev,
- "erase error, block %d\n", block_id);
- return -1;
- }
- break;
- }
- }
- return 0;
-}
-
-#ifdef CONFIG_MTD_SPINAND_ONDIEECC
-static int spinand_write_page_hwecc(struct mtd_info *mtd,
- struct nand_chip *chip, const uint8_t *buf, int oob_required)
-{
- const uint8_t *p = buf;
- int eccsize = chip->ecc.size;
- int eccsteps = chip->ecc.steps;
-
- enable_hw_ecc = 1;
- chip->write_buf(mtd, p, eccsize * eccsteps);
- return 0;
-}
-
-static int spinand_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
- uint8_t *buf, int oob_required, int page)
-{
- int retval;
- u8 status;
- uint8_t *p = buf;
- int eccsize = chip->ecc.size;
- int eccsteps = chip->ecc.steps;
- struct spinand_info *info = (struct spinand_info *)chip->priv;
-
- enable_read_hw_ecc = 1;
-
- chip->read_buf(mtd, p, eccsize * eccsteps);
- if (oob_required)
- chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
-
- while (1) {
- retval = spinand_read_status(info->spi, &status);
- if (retval < 0) {
- dev_err(&mtd->dev,
- "error %d reading status register\n",
- retval);
- return retval;
- }
-
- if ((status & STATUS_OIP_MASK) == STATUS_READY) {
- if ((status & STATUS_ECC_MASK) == STATUS_ECC_ERROR) {
- pr_info("spinand: ECC error\n");
- mtd->ecc_stats.failed++;
- } else if ((status & STATUS_ECC_MASK) ==
- STATUS_ECC_1BIT_CORRECTED)
- mtd->ecc_stats.corrected++;
- break;
- }
- }
- return 0;
-
-}
-#endif
-
-static void spinand_select_chip(struct mtd_info *mtd, int dev)
-{
-}
-
-static uint8_t spinand_read_byte(struct mtd_info *mtd)
-{
- struct spinand_state *state = mtd_to_state(mtd);
- u8 data;
-
- data = state->buf[state->buf_ptr];
- state->buf_ptr++;
- return data;
-}
-
-
-static int spinand_wait(struct mtd_info *mtd, struct nand_chip *chip)
-{
- struct spinand_info *info = (struct spinand_info *)chip->priv;
-
- unsigned long timeo = jiffies;
- int retval, state = chip->state;
- u8 status;
-
- if (state == FL_ERASING)
- timeo += (HZ * 400) / 1000;
- else
- timeo += (HZ * 20) / 1000;
-
- while (time_before(jiffies, timeo)) {
- retval = spinand_read_status(info->spi, &status);
- if (retval < 0) {
- dev_err(&mtd->dev,
- "error %d reading status register\n",
- retval);
- return retval;
- }
-
- if ((status & STATUS_OIP_MASK) == STATUS_READY)
- return 0;
-
- cond_resched();
- }
- return 0;
-}
-
-static void spinand_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
-{
-
- struct spinand_state *state = mtd_to_state(mtd);
-
- memcpy(state->buf + state->buf_ptr, buf, len);
- state->buf_ptr += len;
-}
-
-static void spinand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
-{
- struct spinand_state *state = mtd_to_state(mtd);
-
- memcpy(buf, state->buf + state->buf_ptr, len);
- state->buf_ptr += len;
-}
-
-/*
- * spinand_reset- send RESET command "0xff" to the Nand device.
- */
-static void spinand_reset(struct spi_device *spi_nand)
-{
- struct spinand_cmd cmd = {0};
-
- cmd.cmd = CMD_RESET;
-
- if (spinand_cmd(spi_nand, &cmd) < 0)
- pr_info("spinand reset failed!\n");
-
- /* elapse 1ms before issuing any other command */
- udelay(1000);
-
- if (wait_till_ready(spi_nand))
- dev_err(&spi_nand->dev, "wait timedout!\n");
-}
-
-static void spinand_cmdfunc(struct mtd_info *mtd, unsigned int command,
- int column, int page)
-{
- struct nand_chip *chip = (struct nand_chip *)mtd->priv;
- struct spinand_info *info = (struct spinand_info *)chip->priv;
- struct spinand_state *state = (struct spinand_state *)info->priv;
-
- switch (command) {
- /*
- * READ0 - read in first 0x800 bytes
- */
- case NAND_CMD_READ1:
- case NAND_CMD_READ0:
- state->buf_ptr = 0;
- spinand_read_page(info->spi, page, 0x0, 0x840, state->buf);
- break;
- /* READOOB reads only the OOB because no ECC is performed. */
- case NAND_CMD_READOOB:
- state->buf_ptr = 0;
- spinand_read_page(info->spi, page, 0x800, 0x40, state->buf);
- break;
- case NAND_CMD_RNDOUT:
- state->buf_ptr = column;
- break;
- case NAND_CMD_READID:
- state->buf_ptr = 0;
- spinand_read_id(info->spi, state->buf);
- break;
- case NAND_CMD_PARAM:
- state->buf_ptr = 0;
- break;
- /* ERASE1 stores the block and page address */
- case NAND_CMD_ERASE1:
- spinand_erase_block(info->spi, page);
- break;
- /* ERASE2 uses the block and page address from ERASE1 */
- case NAND_CMD_ERASE2:
- break;
- /* SEQIN sets up the addr buffer and all registers except the length */
- case NAND_CMD_SEQIN:
- state->col = column;
- state->row = page;
- state->buf_ptr = 0;
- break;
- /* PAGEPROG reuses all of the setup from SEQIN and adds the length */
- case NAND_CMD_PAGEPROG:
- spinand_program_page(info->spi, state->row, state->col,
- state->buf_ptr, state->buf);
- break;
- case NAND_CMD_STATUS:
- spinand_get_otp(info->spi, state->buf);
- if (!(state->buf[0] & 0x80))
- state->buf[0] = 0x80;
- state->buf_ptr = 0;
- break;
- /* RESET command */
- case NAND_CMD_RESET:
- if (wait_till_ready(info->spi))
- dev_err(&info->spi->dev, "WAIT timedout!!!\n");
- /* a minimum of 250us must elapse before issuing RESET cmd*/
- udelay(250);
- spinand_reset(info->spi);
- break;
- default:
- dev_err(&mtd->dev, "Unknown CMD: 0x%x\n", command);
- }
-}
-
-/**
- * spinand_lock_block- send write register 0x1f command to the Nand device
- *
- * Description:
- * After power up, all the Nand blocks are locked. This function allows
- * one to unlock the blocks, and so it can be written or erased.
- */
-static int spinand_lock_block(struct spi_device *spi_nand, u8 lock)
-{
- struct spinand_cmd cmd = {0};
- int ret;
- u8 otp = 0;
-
- ret = spinand_get_otp(spi_nand, &otp);
-
- cmd.cmd = CMD_WRITE_REG;
- cmd.n_addr = 1;
- cmd.addr[0] = REG_BLOCK_LOCK;
- cmd.n_tx = 1;
- cmd.tx_buf = &lock;
-
- ret = spinand_cmd(spi_nand, &cmd);
- if (ret < 0)
- dev_err(&spi_nand->dev, "error %d lock block\n", ret);
-
- return ret;
-}
-/*
- * spinand_probe - [spinand Interface]
- * @spi_nand: registered device driver.
- *
- * Description:
- * To set up the device driver parameters to make the device available.
- */
-static int spinand_probe(struct spi_device *spi_nand)
-{
- struct mtd_info *mtd;
- struct nand_chip *chip;
- struct spinand_info *info;
- struct spinand_state *state;
- struct mtd_part_parser_data ppdata;
-
- info = devm_kzalloc(&spi_nand->dev, sizeof(struct spinand_info),
- GFP_KERNEL);
- if (!info)
- return -ENOMEM;
-
- info->spi = spi_nand;
-
- spinand_lock_block(spi_nand, BL_ALL_UNLOCKED);
-
- state = devm_kzalloc(&spi_nand->dev, sizeof(struct spinand_state),
- GFP_KERNEL);
- if (!state)
- return -ENOMEM;
-
- info->priv = state;
- state->buf_ptr = 0;
- state->buf = devm_kzalloc(&spi_nand->dev, BUFSIZE, GFP_KERNEL);
- if (!state->buf)
- return -ENOMEM;
-
- chip = devm_kzalloc(&spi_nand->dev, sizeof(struct nand_chip),
- GFP_KERNEL);
- if (!chip)
- return -ENOMEM;
-
-#ifdef CONFIG_MTD_SPINAND_ONDIEECC
- chip->ecc.mode = NAND_ECC_HW;
- chip->ecc.size = 0x200;
- chip->ecc.bytes = 0x6;
- chip->ecc.steps = 0x4;
-
- chip->ecc.strength = 1;
- chip->ecc.total = chip->ecc.steps * chip->ecc.bytes;
- chip->ecc.layout = &spinand_oob_64;
- chip->ecc.read_page = spinand_read_page_hwecc;
- chip->ecc.write_page = spinand_write_page_hwecc;
-#else
- chip->ecc.mode = NAND_ECC_SOFT;
- if (spinand_disable_ecc(spi_nand) < 0)
- pr_info("%s: disable ecc failed!\n", __func__);
-#endif
-
- chip->priv = info;
- chip->read_buf = spinand_read_buf;
- chip->write_buf = spinand_write_buf;
- chip->read_byte = spinand_read_byte;
- chip->cmdfunc = spinand_cmdfunc;
- chip->waitfunc = spinand_wait;
- chip->options |= NAND_CACHEPRG;
- chip->select_chip = spinand_select_chip;
-
- mtd = devm_kzalloc(&spi_nand->dev, sizeof(struct mtd_info), GFP_KERNEL);
- if (!mtd)
- return -ENOMEM;
-
- dev_set_drvdata(&spi_nand->dev, mtd);
-
- mtd->priv = chip;
- mtd->name = dev_name(&spi_nand->dev);
- mtd->owner = THIS_MODULE;
- mtd->oobsize = 64;
-
- if (nand_scan(mtd, 1))
- return -ENXIO;
-
- ppdata.of_node = spi_nand->dev.of_node;
- return mtd_device_parse_register(mtd, NULL, &ppdata, NULL, 0);
-}
-
-/*
- * spinand_remove: Remove the device driver
- * @spi: the spi device.
- *
- * Description:
- * To remove the device driver parameters and free up allocated memories.
- */
-static int spinand_remove(struct spi_device *spi)
-{
- mtd_device_unregister(dev_get_drvdata(&spi->dev));
-
- return 0;
-}
-
-static const struct of_device_id spinand_dt[] = {
- { .compatible = "spinand,mt29f", },
- {}
-};
-MODULE_DEVICE_TABLE(of, spinand_dt);
-
-/*
- * Device name structure description
- */
-static struct spi_driver spinand_driver = {
- .driver = {
- .name = "mt29f",
- .owner = THIS_MODULE,
- .of_match_table = spinand_dt,
- },
- .probe = spinand_probe,
- .remove = spinand_remove,
-};
-
-module_spi_driver(spinand_driver);
-
-MODULE_DESCRIPTION("SPI NAND driver for Micron");
-MODULE_AUTHOR("Henry Pan <hspan@micron.com>, Kamlakant Patel <kamlakant.patel@broadcom.com>");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/mt29f_spinand/mt29f_spinand.h b/drivers/staging/mt29f_spinand/mt29f_spinand.h
deleted file mode 100644
index 6c8e413b5b63..000000000000
--- a/drivers/staging/mt29f_spinand/mt29f_spinand.h
+++ /dev/null
@@ -1,107 +0,0 @@
-/*-
- * Copyright 2013 Broadcom Corporation
- *
- * Copyright (c) 2009-2010 Micron Technology, Inc.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * Henry Pan <hspan@micron.com>
- *
- * based on nand.h
- */
-#ifndef __LINUX_MTD_SPI_NAND_H
-#define __LINUX_MTD_SPI_NAND_H
-
-#include <linux/wait.h>
-#include <linux/spinlock.h>
-#include <linux/mtd/mtd.h>
-
-/* cmd */
-#define CMD_READ 0x13
-#define CMD_READ_RDM 0x03
-#define CMD_PROG_PAGE_CLRCACHE 0x02
-#define CMD_PROG_PAGE 0x84
-#define CMD_PROG_PAGE_EXC 0x10
-#define CMD_ERASE_BLK 0xd8
-#define CMD_WR_ENABLE 0x06
-#define CMD_WR_DISABLE 0x04
-#define CMD_READ_ID 0x9f
-#define CMD_RESET 0xff
-#define CMD_READ_REG 0x0f
-#define CMD_WRITE_REG 0x1f
-
-/* feature/ status reg */
-#define REG_BLOCK_LOCK 0xa0
-#define REG_OTP 0xb0
-#define REG_STATUS 0xc0/* timing */
-
-/* status */
-#define STATUS_OIP_MASK 0x01
-#define STATUS_READY (0 << 0)
-#define STATUS_BUSY (1 << 0)
-
-#define STATUS_E_FAIL_MASK 0x04
-#define STATUS_E_FAIL (1 << 2)
-
-#define STATUS_P_FAIL_MASK 0x08
-#define STATUS_P_FAIL (1 << 3)
-
-#define STATUS_ECC_MASK 0x30
-#define STATUS_ECC_1BIT_CORRECTED (1 << 4)
-#define STATUS_ECC_ERROR (2 << 4)
-#define STATUS_ECC_RESERVED (3 << 4)
-
-/*ECC enable defines*/
-#define OTP_ECC_MASK 0x10
-#define OTP_ECC_OFF 0
-#define OTP_ECC_ON 1
-
-#define ECC_DISABLED
-#define ECC_IN_NAND
-#define ECC_SOFT
-
-/* block lock */
-#define BL_ALL_LOCKED 0x38
-#define BL_1_2_LOCKED 0x30
-#define BL_1_4_LOCKED 0x28
-#define BL_1_8_LOCKED 0x20
-#define BL_1_16_LOCKED 0x18
-#define BL_1_32_LOCKED 0x10
-#define BL_1_64_LOCKED 0x08
-#define BL_ALL_UNLOCKED 0
-
-struct spinand_info {
- struct nand_ecclayout *ecclayout;
- struct spi_device *spi;
- void *priv;
-};
-
-struct spinand_state {
- uint32_t col;
- uint32_t row;
- int buf_ptr;
- u8 *buf;
-};
-
-struct spinand_cmd {
- u8 cmd;
- u32 n_addr; /* Number of address */
- u8 addr[3]; /* Reg Offset */
- u32 n_dummy; /* Dummy use */
- u32 n_tx; /* Number of tx bytes */
- u8 *tx_buf; /* Tx buf */
- u32 n_rx; /* Number of rx bytes */
- u8 *rx_buf; /* Rx buf */
-};
-
-int spinand_mtd(struct mtd_info *mtd);
-void spinand_mtd_release(struct mtd_info *mtd);
-
-#endif /* __LINUX_MTD_SPI_NAND_H */
diff --git a/drivers/staging/netlogic/Kconfig b/drivers/staging/netlogic/Kconfig
deleted file mode 100644
index d660de51b541..000000000000
--- a/drivers/staging/netlogic/Kconfig
+++ /dev/null
@@ -1,7 +0,0 @@
-config NETLOGIC_XLR_NET
- tristate "Netlogic XLR/XLS network device"
- depends on CPU_XLR
- select PHYLIB
- ---help---
- This driver support Netlogic XLR/XLS on chip gigabit
- Ethernet.
diff --git a/drivers/staging/netlogic/Makefile b/drivers/staging/netlogic/Makefile
deleted file mode 100644
index f7355e3e9c4c..000000000000
--- a/drivers/staging/netlogic/Makefile
+++ /dev/null
@@ -1 +0,0 @@
-obj-$(CONFIG_NETLOGIC_XLR_NET) += xlr_net.o platform_net.o
diff --git a/drivers/staging/netlogic/TODO b/drivers/staging/netlogic/TODO
deleted file mode 100644
index 8f172b017b94..000000000000
--- a/drivers/staging/netlogic/TODO
+++ /dev/null
@@ -1,11 +0,0 @@
-* Implementing 64bit stat counter in software
-* All memory allocation should be changed to DMA allocations
-* Changing comments in to linux standred format
-
-Please send patches
-To:
-Ganesan Ramalingam <ganesanr@broadcom.com>
-Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-Cc:
-Jayachandran Chandrashekaran Nair <jchandra@broadcom.com>
-
diff --git a/drivers/staging/netlogic/platform_net.c b/drivers/staging/netlogic/platform_net.c
deleted file mode 100644
index e914147d7379..000000000000
--- a/drivers/staging/netlogic/platform_net.c
+++ /dev/null
@@ -1,245 +0,0 @@
-/*
- * Copyright (c) 2003-2012 Broadcom Corporation
- * All Rights Reserved
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * 1. Redistributions of source code must retain the above copyright
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the Broadcom
- * license below:
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY BROADCOM ``AS IS'' AND ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL BROADCOM OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
- * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
- * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
- * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
- * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <linux/device.h>
-#include <linux/platform_device.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/io.h>
-#include <linux/delay.h>
-#include <linux/ioport.h>
-#include <linux/resource.h>
-#include <linux/phy.h>
-
-#include <asm/netlogic/haldefs.h>
-#include <asm/netlogic/common.h>
-#include <asm/netlogic/xlr/fmn.h>
-#include <asm/netlogic/xlr/xlr.h>
-#include <asm/netlogic/psb-bootinfo.h>
-#include <asm/netlogic/xlr/pic.h>
-#include <asm/netlogic/xlr/iomap.h>
-
-#include "platform_net.h"
-
-/* Linux Net */
-#define MAX_NUM_GMAC 8
-#define MAX_NUM_XLS_GMAC 8
-#define MAX_NUM_XLR_GMAC 4
-
-
-static u32 xlr_gmac_offsets[] = {
- NETLOGIC_IO_GMAC_0_OFFSET, NETLOGIC_IO_GMAC_1_OFFSET,
- NETLOGIC_IO_GMAC_2_OFFSET, NETLOGIC_IO_GMAC_3_OFFSET,
- NETLOGIC_IO_GMAC_4_OFFSET, NETLOGIC_IO_GMAC_5_OFFSET,
- NETLOGIC_IO_GMAC_6_OFFSET, NETLOGIC_IO_GMAC_7_OFFSET
-};
-
-static u32 xlr_gmac_irqs[] = { PIC_GMAC_0_IRQ, PIC_GMAC_1_IRQ,
- PIC_GMAC_2_IRQ, PIC_GMAC_3_IRQ,
- PIC_GMAC_4_IRQ, PIC_GMAC_5_IRQ,
- PIC_GMAC_6_IRQ, PIC_GMAC_7_IRQ
-};
-
-static struct resource xlr_net0_res[8];
-static struct resource xlr_net1_res[8];
-static u32 __iomem *gmac4_addr;
-static u32 __iomem *gpio_addr;
-
-static void xlr_resource_init(struct resource *res, int offset, int irq)
-{
- res->name = "gmac";
-
- res->start = CPHYSADDR(nlm_mmio_base(offset));
- res->end = res->start + 0xfff;
- res->flags = IORESOURCE_MEM;
-
- res++;
- res->name = "gmac";
- res->start = res->end = irq;
- res->flags = IORESOURCE_IRQ;
-}
-
-static struct platform_device *gmac_controller2_init(void *gmac0_addr)
-{
- int mac;
- static struct xlr_net_data ndata1 = {
- .phy_interface = PHY_INTERFACE_MODE_SGMII,
- .rfr_station = FMN_STNID_GMAC1_FR_0,
- .bucket_size = xlr_board_fmn_config.bucket_size,
- .gmac_fmn_info = &xlr_board_fmn_config.gmac[1],
- };
-
- static struct platform_device xlr_net_dev1 = {
- .name = "xlr-net",
- .id = 1,
- .dev.platform_data = &ndata1,
- };
-
- gmac4_addr = ioremap(CPHYSADDR(
- nlm_mmio_base(NETLOGIC_IO_GMAC_4_OFFSET)), 0xfff);
- ndata1.serdes_addr = gmac4_addr;
- ndata1.pcs_addr = gmac4_addr;
- ndata1.mii_addr = gmac0_addr;
- ndata1.gpio_addr = gpio_addr;
- ndata1.cpu_mask = nlm_current_node()->coremask;
-
- xlr_net_dev1.resource = xlr_net1_res;
-
- for (mac = 0; mac < 4; mac++) {
- ndata1.tx_stnid[mac] = FMN_STNID_GMAC1_TX0 + mac;
- ndata1.phy_addr[mac] = mac + 4 + 0x10;
-
- xlr_resource_init(&xlr_net1_res[mac * 2],
- xlr_gmac_offsets[mac + 4],
- xlr_gmac_irqs[mac + 4]);
- }
- xlr_net_dev1.num_resources = 8;
-
- return &xlr_net_dev1;
-}
-
-static void xls_gmac_init(void)
-{
- int mac;
- struct platform_device *xlr_net_dev1;
- void __iomem *gmac0_addr = ioremap(CPHYSADDR(
- nlm_mmio_base(NETLOGIC_IO_GMAC_0_OFFSET)), 0xfff);
-
- static struct xlr_net_data ndata0 = {
- .rfr_station = FMN_STNID_GMACRFR_0,
- .bucket_size = xlr_board_fmn_config.bucket_size,
- .gmac_fmn_info = &xlr_board_fmn_config.gmac[0],
- };
-
- static struct platform_device xlr_net_dev0 = {
- .name = "xlr-net",
- .id = 0,
- };
- xlr_net_dev0.dev.platform_data = &ndata0;
- ndata0.serdes_addr = gmac0_addr;
- ndata0.pcs_addr = gmac0_addr;
- ndata0.mii_addr = gmac0_addr;
-
- /* Passing GPIO base for serdes init. Only needed on sgmii ports */
- gpio_addr = ioremap(CPHYSADDR(
- nlm_mmio_base(NETLOGIC_IO_GPIO_OFFSET)), 0xfff);
- ndata0.gpio_addr = gpio_addr;
- ndata0.cpu_mask = nlm_current_node()->coremask;
-
- xlr_net_dev0.resource = xlr_net0_res;
-
- switch (nlm_prom_info.board_major_version) {
- case 12:
- /* first block RGMII or XAUI, use RGMII */
- ndata0.phy_interface = PHY_INTERFACE_MODE_RGMII;
- ndata0.tx_stnid[0] = FMN_STNID_GMAC0_TX0;
- ndata0.phy_addr[0] = 0;
-
- xlr_net_dev0.num_resources = 2;
-
- xlr_resource_init(&xlr_net0_res[0], xlr_gmac_offsets[0],
- xlr_gmac_irqs[0]);
- platform_device_register(&xlr_net_dev0);
-
- /* second block is XAUI, not supported yet */
- break;
- default:
- /* default XLS config, all ports SGMII */
- ndata0.phy_interface = PHY_INTERFACE_MODE_SGMII;
- for (mac = 0; mac < 4; mac++) {
- ndata0.tx_stnid[mac] = FMN_STNID_GMAC0_TX0 + mac;
- ndata0.phy_addr[mac] = mac + 0x10;
-
- xlr_resource_init(&xlr_net0_res[mac * 2],
- xlr_gmac_offsets[mac],
- xlr_gmac_irqs[mac]);
- }
- xlr_net_dev0.num_resources = 8;
- platform_device_register(&xlr_net_dev0);
-
- xlr_net_dev1 = gmac_controller2_init(gmac0_addr);
- platform_device_register(xlr_net_dev1);
- }
-}
-
-static void xlr_gmac_init(void)
-{
- int mac;
-
- /* assume all GMACs for now */
- static struct xlr_net_data ndata0 = {
- .phy_interface = PHY_INTERFACE_MODE_RGMII,
- .serdes_addr = NULL,
- .pcs_addr = NULL,
- .rfr_station = FMN_STNID_GMACRFR_0,
- .bucket_size = xlr_board_fmn_config.bucket_size,
- .gmac_fmn_info = &xlr_board_fmn_config.gmac[0],
- .gpio_addr = NULL,
- };
-
-
- static struct platform_device xlr_net_dev0 = {
- .name = "xlr-net",
- .id = 0,
- .dev.platform_data = &ndata0,
- };
- ndata0.mii_addr = ioremap(CPHYSADDR(
- nlm_mmio_base(NETLOGIC_IO_GMAC_0_OFFSET)), 0xfff);
-
- ndata0.cpu_mask = nlm_current_node()->coremask;
-
- for (mac = 0; mac < MAX_NUM_XLR_GMAC; mac++) {
- ndata0.tx_stnid[mac] = FMN_STNID_GMAC0_TX0 + mac;
- ndata0.phy_addr[mac] = mac;
- xlr_resource_init(&xlr_net0_res[mac * 2], xlr_gmac_offsets[mac],
- xlr_gmac_irqs[mac]);
- }
- xlr_net_dev0.num_resources = 8;
- xlr_net_dev0.resource = xlr_net0_res;
-
- platform_device_register(&xlr_net_dev0);
-}
-
-static int __init xlr_net_init(void)
-{
- if (nlm_chip_is_xls())
- xls_gmac_init();
- else
- xlr_gmac_init();
-
- return 0;
-}
-
-arch_initcall(xlr_net_init);
diff --git a/drivers/staging/netlogic/platform_net.h b/drivers/staging/netlogic/platform_net.h
deleted file mode 100644
index e1b27f649590..000000000000
--- a/drivers/staging/netlogic/platform_net.h
+++ /dev/null
@@ -1,49 +0,0 @@
-/*
- * Copyright (c) 2003-2012 Broadcom Corporation
- * All Rights Reserved
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the Broadcom
- * license below:
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY BROADCOM ``AS IS'' AND ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL BROADCOM OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
- * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
- * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
- * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
- * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#define PORTS_PER_CONTROLLER 4
-
-struct xlr_net_data {
- int cpu_mask;
- u32 __iomem *mii_addr;
- u32 __iomem *serdes_addr;
- u32 __iomem *pcs_addr;
- u32 __iomem *gpio_addr;
- int phy_interface;
- int rfr_station;
- int tx_stnid[PORTS_PER_CONTROLLER];
- int *bucket_size;
- int phy_addr[PORTS_PER_CONTROLLER];
- struct xlr_fmn_info *gmac_fmn_info;
-};
diff --git a/drivers/staging/netlogic/xlr_net.c b/drivers/staging/netlogic/xlr_net.c
deleted file mode 100644
index 8ae01753b011..000000000000
--- a/drivers/staging/netlogic/xlr_net.c
+++ /dev/null
@@ -1,1144 +0,0 @@
-/*
- * Copyright (c) 2003-2012 Broadcom Corporation
- * All Rights Reserved
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the Broadcom
- * license below:
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY BROADCOM ``AS IS'' AND ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL BROADCOM OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
- * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
- * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
- * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
- * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-#include <linux/phy.h>
-#include <linux/delay.h>
-#include <linux/netdevice.h>
-#include <linux/smp.h>
-#include <linux/ethtool.h>
-#include <linux/module.h>
-#include <linux/etherdevice.h>
-#include <linux/skbuff.h>
-#include <linux/jiffies.h>
-#include <linux/interrupt.h>
-#include <linux/platform_device.h>
-
-#include <asm/mipsregs.h>
-/*
- * fmn.h - For FMN credit configuration and registering fmn_handler.
- * FMN is communication mechanism that allows processing agents within
- * XLR/XLS to communicate each other.
- */
-#include <asm/netlogic/xlr/fmn.h>
-
-#include "platform_net.h"
-#include "xlr_net.h"
-
-/*
- * The readl/writel implementation byteswaps on XLR/XLS, so
- * we need to use __raw_ IO to read the NAE registers
- * because they are in the big-endian MMIO area on the SoC.
- */
-static inline void xlr_nae_wreg(u32 __iomem *base, unsigned int reg, u32 val)
-{
- __raw_writel(val, base + reg);
-}
-
-static inline u32 xlr_nae_rdreg(u32 __iomem *base, unsigned int reg)
-{
- return __raw_readl(base + reg);
-}
-
-static inline void xlr_reg_update(u32 *base_addr,
- u32 off, u32 val, u32 mask)
-{
- u32 tmp;
-
- tmp = xlr_nae_rdreg(base_addr, off);
- xlr_nae_wreg(base_addr, off, (tmp & ~mask) | (val & mask));
-}
-
-#define MAC_SKB_BACK_PTR_SIZE SMP_CACHE_BYTES
-
-static int send_to_rfr_fifo(struct xlr_net_priv *priv, void *addr)
-{
- struct nlm_fmn_msg msg;
- int ret = 0, num_try = 0, stnid;
- unsigned long paddr, mflags;
-
- paddr = virt_to_bus(addr);
- msg.msg0 = (u64)paddr & 0xffffffffe0ULL;
- msg.msg1 = 0;
- msg.msg2 = 0;
- msg.msg3 = 0;
- stnid = priv->nd->rfr_station;
- do {
- mflags = nlm_cop2_enable_irqsave();
- ret = nlm_fmn_send(1, 0, stnid, &msg);
- nlm_cop2_disable_irqrestore(mflags);
- if (ret == 0)
- return 0;
- } while (++num_try < 10000);
-
- pr_err("Send to RFR failed in RX path\n");
- return ret;
-}
-
-static inline unsigned char *xlr_alloc_skb(void)
-{
- struct sk_buff *skb;
- int buf_len = sizeof(struct sk_buff *);
- unsigned char *skb_data;
-
- /* skb->data is cache aligned */
- skb = alloc_skb(XLR_RX_BUF_SIZE, GFP_ATOMIC);
- if (!skb)
- return NULL;
- skb_data = skb->data;
- skb_put(skb, MAC_SKB_BACK_PTR_SIZE);
- skb_pull(skb, MAC_SKB_BACK_PTR_SIZE);
- memcpy(skb_data, &skb, buf_len);
-
- return skb->data;
-}
-
-static void xlr_net_fmn_handler(int bkt, int src_stnid, int size,
- int code, struct nlm_fmn_msg *msg, void *arg)
-{
- struct sk_buff *skb;
- void *skb_data = NULL;
- struct net_device *ndev;
- struct xlr_net_priv *priv;
- u32 port, length;
- unsigned char *addr;
- struct xlr_adapter *adapter = (struct xlr_adapter *) arg;
-
- length = (msg->msg0 >> 40) & 0x3fff;
- if (length == 0) {
- addr = bus_to_virt(msg->msg0 & 0xffffffffffULL);
- addr = addr - MAC_SKB_BACK_PTR_SIZE;
- skb = (struct sk_buff *) *(unsigned long *)addr;
- dev_kfree_skb_any((struct sk_buff *)addr);
- } else {
- addr = (unsigned char *)
- bus_to_virt(msg->msg0 & 0xffffffffe0ULL);
- length = length - BYTE_OFFSET - MAC_CRC_LEN;
- port = ((int)msg->msg0) & 0x0f;
- addr = addr - MAC_SKB_BACK_PTR_SIZE;
- skb = (struct sk_buff *) *(unsigned long *)addr;
- skb->dev = adapter->netdev[port];
- if (skb->dev == NULL)
- return;
- ndev = skb->dev;
- priv = netdev_priv(ndev);
-
- /* 16 byte IP header align */
- skb_reserve(skb, BYTE_OFFSET);
- skb_put(skb, length);
- skb->protocol = eth_type_trans(skb, skb->dev);
- skb->dev->last_rx = jiffies;
- netif_rx(skb);
- /* Fill rx ring */
- skb_data = xlr_alloc_skb();
- if (skb_data)
- send_to_rfr_fifo(priv, skb_data);
- }
-}
-
-/*
- * Ethtool operation
- */
-static int xlr_get_settings(struct net_device *ndev, struct ethtool_cmd *ecmd)
-{
- struct xlr_net_priv *priv = netdev_priv(ndev);
- struct phy_device *phydev = priv->mii_bus->phy_map[priv->phy_addr];
-
- if (!phydev)
- return -ENODEV;
- return phy_ethtool_gset(phydev, ecmd);
-}
-
-static int xlr_set_settings(struct net_device *ndev, struct ethtool_cmd *ecmd)
-{
- struct xlr_net_priv *priv = netdev_priv(ndev);
- struct phy_device *phydev = priv->mii_bus->phy_map[priv->phy_addr];
-
- if (!phydev)
- return -ENODEV;
- return phy_ethtool_sset(phydev, ecmd);
-}
-
-static struct ethtool_ops xlr_ethtool_ops = {
- .get_settings = xlr_get_settings,
- .set_settings = xlr_set_settings,
-};
-
-/*
- * Net operations
- */
-static int xlr_net_fill_rx_ring(struct net_device *ndev)
-{
- void *skb_data;
- struct xlr_net_priv *priv = netdev_priv(ndev);
- int i;
-
- for (i = 0; i < MAX_FRIN_SPILL/4; i++) {
- skb_data = xlr_alloc_skb();
- if (!skb_data) {
- pr_err("SKB allocation failed\n");
- return -ENOMEM;
- }
- send_to_rfr_fifo(priv, skb_data);
- }
- pr_info("Rx ring setup done\n");
- return 0;
-}
-
-static int xlr_net_open(struct net_device *ndev)
-{
- u32 err;
- struct xlr_net_priv *priv = netdev_priv(ndev);
- struct phy_device *phydev = priv->mii_bus->phy_map[priv->phy_addr];
-
- /* schedule a link state check */
- phy_start(phydev);
-
- err = phy_start_aneg(phydev);
- if (err) {
- pr_err("Autoneg failed\n");
- return err;
- }
- /* Setup the speed from PHY to internal reg*/
- xlr_set_gmac_speed(priv);
-
- netif_tx_start_all_queues(ndev);
-
- return 0;
-}
-
-static int xlr_net_stop(struct net_device *ndev)
-{
- struct xlr_net_priv *priv = netdev_priv(ndev);
- struct phy_device *phydev = priv->mii_bus->phy_map[priv->phy_addr];
-
- phy_stop(phydev);
- netif_tx_stop_all_queues(ndev);
- return 0;
-}
-
-static void xlr_make_tx_desc(struct nlm_fmn_msg *msg, unsigned long addr,
- struct sk_buff *skb)
-{
- unsigned long physkb = virt_to_phys(skb);
- int cpu_core = nlm_core_id();
- int fr_stn_id = cpu_core * 8 + XLR_FB_STN; /* FB to 6th bucket */
-
- msg->msg0 = (((u64)1 << 63) | /* End of packet descriptor */
- ((u64)127 << 54) | /* No Free back */
- (u64)skb->len << 40 | /* Length of data */
- ((u64)addr));
- msg->msg1 = (((u64)1 << 63) |
- ((u64)fr_stn_id << 54) | /* Free back id */
- (u64)0 << 40 | /* Set len to 0 */
- ((u64)physkb & 0xffffffff)); /* 32bit address */
- msg->msg2 = msg->msg3 = 0;
-}
-
-static void __maybe_unused xlr_wakeup_queue(unsigned long dev)
-{
- struct net_device *ndev = (struct net_device *) dev;
- struct xlr_net_priv *priv = netdev_priv(ndev);
- struct phy_device *phydev = priv->mii_bus->phy_map[priv->phy_addr];
-
- if (phydev->link)
- netif_tx_wake_queue(netdev_get_tx_queue(ndev, priv->wakeup_q));
-}
-
-static netdev_tx_t xlr_net_start_xmit(struct sk_buff *skb,
- struct net_device *ndev)
-{
- struct nlm_fmn_msg msg;
- struct xlr_net_priv *priv = netdev_priv(ndev);
- int ret;
- u32 flags;
-
- xlr_make_tx_desc(&msg, virt_to_phys(skb->data), skb);
- flags = nlm_cop2_enable_irqsave();
- ret = nlm_fmn_send(2, 0, priv->tx_stnid, &msg);
- nlm_cop2_disable_irqrestore(flags);
- if (ret)
- dev_kfree_skb_any(skb);
- return NETDEV_TX_OK;
-}
-
-static u16 xlr_net_select_queue(struct net_device *ndev, struct sk_buff *skb,
- void *accel_priv,
- select_queue_fallback_t fallback)
-{
- return (u16)smp_processor_id();
-}
-
-static void xlr_hw_set_mac_addr(struct net_device *ndev)
-{
- struct xlr_net_priv *priv = netdev_priv(ndev);
-
- /* set mac station address */
- xlr_nae_wreg(priv->base_addr, R_MAC_ADDR0,
- ((ndev->dev_addr[5] << 24) | (ndev->dev_addr[4] << 16) |
- (ndev->dev_addr[3] << 8) | (ndev->dev_addr[2])));
- xlr_nae_wreg(priv->base_addr, R_MAC_ADDR0 + 1,
- ((ndev->dev_addr[1] << 24) | (ndev->dev_addr[0] << 16)));
-
- xlr_nae_wreg(priv->base_addr, R_MAC_ADDR_MASK2, 0xffffffff);
- xlr_nae_wreg(priv->base_addr, R_MAC_ADDR_MASK2 + 1, 0xffffffff);
- xlr_nae_wreg(priv->base_addr, R_MAC_ADDR_MASK3, 0xffffffff);
- xlr_nae_wreg(priv->base_addr, R_MAC_ADDR_MASK3 + 1, 0xffffffff);
-
- xlr_nae_wreg(priv->base_addr, R_MAC_FILTER_CONFIG,
- (1 << O_MAC_FILTER_CONFIG__BROADCAST_EN) |
- (1 << O_MAC_FILTER_CONFIG__ALL_MCAST_EN) |
- (1 << O_MAC_FILTER_CONFIG__MAC_ADDR0_VALID));
-
- if (priv->nd->phy_interface == PHY_INTERFACE_MODE_RGMII ||
- priv->nd->phy_interface == PHY_INTERFACE_MODE_SGMII)
- xlr_reg_update(priv->base_addr, R_IPG_IFG, MAC_B2B_IPG, 0x7f);
-}
-
-static int xlr_net_set_mac_addr(struct net_device *ndev, void *data)
-{
- int err;
-
- err = eth_mac_addr(ndev, data);
- if (err)
- return err;
- xlr_hw_set_mac_addr(ndev);
- return 0;
-}
-
-static void xlr_set_rx_mode(struct net_device *ndev)
-{
- struct xlr_net_priv *priv = netdev_priv(ndev);
- u32 regval;
-
- regval = xlr_nae_rdreg(priv->base_addr, R_MAC_FILTER_CONFIG);
-
- if (ndev->flags & IFF_PROMISC) {
- regval |= (1 << O_MAC_FILTER_CONFIG__BROADCAST_EN) |
- (1 << O_MAC_FILTER_CONFIG__PAUSE_FRAME_EN) |
- (1 << O_MAC_FILTER_CONFIG__ALL_MCAST_EN) |
- (1 << O_MAC_FILTER_CONFIG__ALL_UCAST_EN);
- } else {
- regval &= ~((1 << O_MAC_FILTER_CONFIG__PAUSE_FRAME_EN) |
- (1 << O_MAC_FILTER_CONFIG__ALL_UCAST_EN));
- }
-
- xlr_nae_wreg(priv->base_addr, R_MAC_FILTER_CONFIG, regval);
-}
-
-static void xlr_stats(struct net_device *ndev, struct rtnl_link_stats64 *stats)
-{
- struct xlr_net_priv *priv = netdev_priv(ndev);
-
- stats->rx_packets = xlr_nae_rdreg(priv->base_addr, RX_PACKET_COUNTER);
- stats->tx_packets = xlr_nae_rdreg(priv->base_addr, TX_PACKET_COUNTER);
- stats->rx_bytes = xlr_nae_rdreg(priv->base_addr, RX_BYTE_COUNTER);
- stats->tx_bytes = xlr_nae_rdreg(priv->base_addr, TX_BYTE_COUNTER);
- stats->tx_errors = xlr_nae_rdreg(priv->base_addr, TX_FCS_ERROR_COUNTER);
- stats->rx_dropped = xlr_nae_rdreg(priv->base_addr,
- RX_DROP_PACKET_COUNTER);
- stats->tx_dropped = xlr_nae_rdreg(priv->base_addr,
- TX_DROP_FRAME_COUNTER);
-
- stats->multicast = xlr_nae_rdreg(priv->base_addr,
- RX_MULTICAST_PACKET_COUNTER);
- stats->collisions = xlr_nae_rdreg(priv->base_addr,
- TX_TOTAL_COLLISION_COUNTER);
-
- stats->rx_length_errors = xlr_nae_rdreg(priv->base_addr,
- RX_FRAME_LENGTH_ERROR_COUNTER);
- stats->rx_over_errors = xlr_nae_rdreg(priv->base_addr,
- RX_DROP_PACKET_COUNTER);
- stats->rx_crc_errors = xlr_nae_rdreg(priv->base_addr,
- RX_FCS_ERROR_COUNTER);
- stats->rx_frame_errors = xlr_nae_rdreg(priv->base_addr,
- RX_ALIGNMENT_ERROR_COUNTER);
-
- stats->rx_fifo_errors = xlr_nae_rdreg(priv->base_addr,
- RX_DROP_PACKET_COUNTER);
- stats->rx_missed_errors = xlr_nae_rdreg(priv->base_addr,
- RX_CARRIER_SENSE_ERROR_COUNTER);
-
- stats->rx_errors = (stats->rx_over_errors + stats->rx_crc_errors +
- stats->rx_frame_errors + stats->rx_fifo_errors +
- stats->rx_missed_errors);
-
- stats->tx_aborted_errors = xlr_nae_rdreg(priv->base_addr,
- TX_EXCESSIVE_COLLISION_PACKET_COUNTER);
- stats->tx_carrier_errors = xlr_nae_rdreg(priv->base_addr,
- TX_DROP_FRAME_COUNTER);
- stats->tx_fifo_errors = xlr_nae_rdreg(priv->base_addr,
- TX_DROP_FRAME_COUNTER);
-}
-
-static struct rtnl_link_stats64 *xlr_get_stats64(struct net_device *ndev,
- struct rtnl_link_stats64 *stats)
-{
- xlr_stats(ndev, stats);
- return stats;
-}
-
-static struct net_device_ops xlr_netdev_ops = {
- .ndo_open = xlr_net_open,
- .ndo_stop = xlr_net_stop,
- .ndo_start_xmit = xlr_net_start_xmit,
- .ndo_select_queue = xlr_net_select_queue,
- .ndo_set_mac_address = xlr_net_set_mac_addr,
- .ndo_set_rx_mode = xlr_set_rx_mode,
- .ndo_get_stats64 = xlr_get_stats64,
-};
-
-/*
- * Gmac init
- */
-static void *xlr_config_spill(struct xlr_net_priv *priv, int reg_start_0,
- int reg_start_1, int reg_size, int size)
-{
- void *spill;
- u32 *base;
- unsigned long phys_addr;
- u32 spill_size;
-
- base = priv->base_addr;
- spill_size = size;
- spill = kmalloc(spill_size + SMP_CACHE_BYTES, GFP_ATOMIC);
- if (!spill)
- pr_err("Unable to allocate memory for spill area!\n");
-
- spill = PTR_ALIGN(spill, SMP_CACHE_BYTES);
- phys_addr = virt_to_phys(spill);
- dev_dbg(&priv->ndev->dev, "Allocated spill %d bytes at %lx\n",
- size, phys_addr);
- xlr_nae_wreg(base, reg_start_0, (phys_addr >> 5) & 0xffffffff);
- xlr_nae_wreg(base, reg_start_1, ((u64)phys_addr >> 37) & 0x07);
- xlr_nae_wreg(base, reg_size, spill_size);
-
- return spill;
-}
-
-/*
- * Configure the 6 FIFO's that are used by the network accelarator to
- * communicate with the rest of the XLx device. 4 of the FIFO's are for
- * packets from NA --> cpu (called Class FIFO's) and 2 are for feeding
- * the NA with free descriptors.
- */
-static void xlr_config_fifo_spill_area(struct xlr_net_priv *priv)
-{
- priv->frin_spill = xlr_config_spill(priv,
- R_REG_FRIN_SPILL_MEM_START_0,
- R_REG_FRIN_SPILL_MEM_START_1,
- R_REG_FRIN_SPILL_MEM_SIZE,
- MAX_FRIN_SPILL *
- sizeof(u64));
- priv->frout_spill = xlr_config_spill(priv,
- R_FROUT_SPILL_MEM_START_0,
- R_FROUT_SPILL_MEM_START_1,
- R_FROUT_SPILL_MEM_SIZE,
- MAX_FROUT_SPILL *
- sizeof(u64));
- priv->class_0_spill = xlr_config_spill(priv,
- R_CLASS0_SPILL_MEM_START_0,
- R_CLASS0_SPILL_MEM_START_1,
- R_CLASS0_SPILL_MEM_SIZE,
- MAX_CLASS_0_SPILL *
- sizeof(u64));
- priv->class_1_spill = xlr_config_spill(priv,
- R_CLASS1_SPILL_MEM_START_0,
- R_CLASS1_SPILL_MEM_START_1,
- R_CLASS1_SPILL_MEM_SIZE,
- MAX_CLASS_1_SPILL *
- sizeof(u64));
- priv->class_2_spill = xlr_config_spill(priv,
- R_CLASS2_SPILL_MEM_START_0,
- R_CLASS2_SPILL_MEM_START_1,
- R_CLASS2_SPILL_MEM_SIZE,
- MAX_CLASS_2_SPILL *
- sizeof(u64));
- priv->class_3_spill = xlr_config_spill(priv,
- R_CLASS3_SPILL_MEM_START_0,
- R_CLASS3_SPILL_MEM_START_1,
- R_CLASS3_SPILL_MEM_SIZE,
- MAX_CLASS_3_SPILL *
- sizeof(u64));
-}
-
-/*
- * Configure PDE to Round-Robin distribution of packets to the
- * available cpu
- */
-static void xlr_config_pde(struct xlr_net_priv *priv)
-{
- int i = 0;
- u64 bkt_map = 0;
-
- /* Each core has 8 buckets(station) */
- for (i = 0; i < hweight32(priv->nd->cpu_mask); i++)
- bkt_map |= (0xff << (i * 8));
-
- xlr_nae_wreg(priv->base_addr, R_PDE_CLASS_0, (bkt_map & 0xffffffff));
- xlr_nae_wreg(priv->base_addr, R_PDE_CLASS_0 + 1,
- ((bkt_map >> 32) & 0xffffffff));
-
- xlr_nae_wreg(priv->base_addr, R_PDE_CLASS_1, (bkt_map & 0xffffffff));
- xlr_nae_wreg(priv->base_addr, R_PDE_CLASS_1 + 1,
- ((bkt_map >> 32) & 0xffffffff));
-
- xlr_nae_wreg(priv->base_addr, R_PDE_CLASS_2, (bkt_map & 0xffffffff));
- xlr_nae_wreg(priv->base_addr, R_PDE_CLASS_2 + 1,
- ((bkt_map >> 32) & 0xffffffff));
-
- xlr_nae_wreg(priv->base_addr, R_PDE_CLASS_3, (bkt_map & 0xffffffff));
- xlr_nae_wreg(priv->base_addr, R_PDE_CLASS_3 + 1,
- ((bkt_map >> 32) & 0xffffffff));
-}
-
-/*
- * Setup the Message ring credits, bucket size and other
- * common configuration
- */
-static int xlr_config_common(struct xlr_net_priv *priv)
-{
- struct xlr_fmn_info *gmac = priv->nd->gmac_fmn_info;
- int start_stn_id = gmac->start_stn_id;
- int end_stn_id = gmac->end_stn_id;
- int *bucket_size = priv->nd->bucket_size;
- int i, j, err;
-
- /* Setting non-core MsgBktSize(0x321 - 0x325) */
- for (i = start_stn_id; i <= end_stn_id; i++) {
- xlr_nae_wreg(priv->base_addr,
- R_GMAC_RFR0_BUCKET_SIZE + i - start_stn_id,
- bucket_size[i]);
- }
-
- /*
- * Setting non-core Credit counter register
- * Distributing Gmac's credit to CPU's
- */
- for (i = 0; i < 8; i++) {
- for (j = 0; j < 8; j++)
- xlr_nae_wreg(priv->base_addr,
- (R_CC_CPU0_0 + (i * 8)) + j,
- gmac->credit_config[(i * 8) + j]);
- }
-
- xlr_nae_wreg(priv->base_addr, R_MSG_TX_THRESHOLD, 3);
- xlr_nae_wreg(priv->base_addr, R_DMACR0, 0xffffffff);
- xlr_nae_wreg(priv->base_addr, R_DMACR1, 0xffffffff);
- xlr_nae_wreg(priv->base_addr, R_DMACR2, 0xffffffff);
- xlr_nae_wreg(priv->base_addr, R_DMACR3, 0xffffffff);
- xlr_nae_wreg(priv->base_addr, R_FREEQCARVE, 0);
-
- err = xlr_net_fill_rx_ring(priv->ndev);
- if (err)
- return err;
- nlm_register_fmn_handler(start_stn_id, end_stn_id, xlr_net_fmn_handler,
- priv->adapter);
- return 0;
-}
-
-static void xlr_config_translate_table(struct xlr_net_priv *priv)
-{
- u32 cpu_mask;
- u32 val;
- int bkts[32]; /* one bucket is assumed for each cpu */
- int b1, b2, c1, c2, i, j, k;
- int use_bkt;
-
- use_bkt = 0;
- cpu_mask = priv->nd->cpu_mask;
-
- pr_info("Using %s-based distribution\n",
- (use_bkt) ? "bucket" : "class");
- j = 0;
- for (i = 0; i < 32; i++) {
- if ((1 << i) & cpu_mask) {
- /* for each cpu, mark the 4+threadid bucket */
- bkts[j] = ((i / 4) * 8) + (i % 4);
- j++;
- }
- }
-
- /*configure the 128 * 9 Translation table to send to available buckets*/
- k = 0;
- c1 = 3;
- c2 = 0;
- for (i = 0; i < 64; i++) {
- /*
- * On use_bkt set the b0, b1 are used, else
- * the 4 classes are used, here implemented
- * a logic to distribute the packets to the
- * buckets equally or based on the class
- */
- c1 = (c1 + 1) & 3;
- c2 = (c1 + 1) & 3;
- b1 = bkts[k];
- k = (k + 1) % j;
- b2 = bkts[k];
- k = (k + 1) % j;
-
- val = ((c1 << 23) | (b1 << 17) | (use_bkt << 16) |
- (c2 << 7) | (b2 << 1) | (use_bkt << 0));
- dev_dbg(&priv->ndev->dev, "Table[%d] b1=%d b2=%d c1=%d c2=%d\n",
- i, b1, b2, c1, c2);
- xlr_nae_wreg(priv->base_addr, R_TRANSLATETABLE + i, val);
- c1 = c2;
- }
-}
-
-static void xlr_config_parser(struct xlr_net_priv *priv)
-{
- u32 val;
-
- /* Mark it as ETHERNET type */
- xlr_nae_wreg(priv->base_addr, R_L2TYPE_0, 0x01);
-
- /* Use 7bit CRChash for flow classification with 127 as CRC polynomial*/
- xlr_nae_wreg(priv->base_addr, R_PARSERCONFIGREG,
- ((0x7f << 8) | (1 << 1)));
-
- /* configure the parser : L2 Type is configured in the bootloader */
- /* extract IP: src, dest protocol */
- xlr_nae_wreg(priv->base_addr, R_L3CTABLE,
- (9 << 20) | (1 << 19) | (1 << 18) | (0x01 << 16) |
- (0x0800 << 0));
- xlr_nae_wreg(priv->base_addr, R_L3CTABLE + 1,
- (9 << 25) | (1 << 21) | (12 << 14) | (4 << 10) |
- (16 << 4) | 4);
-
- /* Configure to extract SRC port and Dest port for TCP and UDP pkts */
- xlr_nae_wreg(priv->base_addr, R_L4CTABLE, 6);
- xlr_nae_wreg(priv->base_addr, R_L4CTABLE + 2, 17);
- val = ((0 << 21) | (2 << 17) | (2 << 11) | (2 << 7));
- xlr_nae_wreg(priv->base_addr, R_L4CTABLE + 1, val);
- xlr_nae_wreg(priv->base_addr, R_L4CTABLE + 3, val);
-
- xlr_config_translate_table(priv);
-}
-
-static int xlr_phy_write(u32 *base_addr, int phy_addr, int regnum, u16 val)
-{
- unsigned long timeout, stoptime, checktime;
- int timedout;
-
- /* 100ms timeout*/
- timeout = msecs_to_jiffies(100);
- stoptime = jiffies + timeout;
- timedout = 0;
-
- xlr_nae_wreg(base_addr, R_MII_MGMT_ADDRESS, (phy_addr << 8) | regnum);
-
- /* Write the data which starts the write cycle */
- xlr_nae_wreg(base_addr, R_MII_MGMT_WRITE_DATA, (u32) val);
-
- /* poll for the read cycle to complete */
- while (!timedout) {
- checktime = jiffies;
- if (xlr_nae_rdreg(base_addr, R_MII_MGMT_INDICATORS) == 0)
- break;
- timedout = time_after(checktime, stoptime);
- }
- if (timedout) {
- pr_info("Phy device write err: device busy");
- return -EBUSY;
- }
-
- return 0;
-}
-
-static int xlr_phy_read(u32 *base_addr, int phy_addr, int regnum)
-{
- unsigned long timeout, stoptime, checktime;
- int timedout;
-
- /* 100ms timeout*/
- timeout = msecs_to_jiffies(100);
- stoptime = jiffies + timeout;
- timedout = 0;
-
- /* setup the phy reg to be used */
- xlr_nae_wreg(base_addr, R_MII_MGMT_ADDRESS,
- (phy_addr << 8) | (regnum << 0));
-
- /* Issue the read command */
- xlr_nae_wreg(base_addr, R_MII_MGMT_COMMAND,
- (1 << O_MII_MGMT_COMMAND__rstat));
-
- /* poll for the read cycle to complete */
- while (!timedout) {
- checktime = jiffies;
- if (xlr_nae_rdreg(base_addr, R_MII_MGMT_INDICATORS) == 0)
- break;
- timedout = time_after(checktime, stoptime);
- }
- if (timedout) {
- pr_info("Phy device read err: device busy");
- return -EBUSY;
- }
-
- /* clear the read cycle */
- xlr_nae_wreg(base_addr, R_MII_MGMT_COMMAND, 0);
-
- /* Read the data */
- return xlr_nae_rdreg(base_addr, R_MII_MGMT_STATUS);
-}
-
-static int xlr_mii_write(struct mii_bus *bus, int phy_addr, int regnum, u16 val)
-{
- struct xlr_net_priv *priv = bus->priv;
- int ret;
-
- ret = xlr_phy_write(priv->mii_addr, phy_addr, regnum, val);
- dev_dbg(&priv->ndev->dev, "mii_write phy %d : %d <- %x [%x]\n",
- phy_addr, regnum, val, ret);
- return ret;
-}
-
-static int xlr_mii_read(struct mii_bus *bus, int phy_addr, int regnum)
-{
- struct xlr_net_priv *priv = bus->priv;
- int ret;
-
- ret = xlr_phy_read(priv->mii_addr, phy_addr, regnum);
- dev_dbg(&priv->ndev->dev, "mii_read phy %d : %d [%x]\n",
- phy_addr, regnum, ret);
- return ret;
-}
-
-/*
- * XLR ports are RGMII. XLS ports are SGMII mostly except the port0,
- * which can be configured either SGMII or RGMII, considered SGMII
- * by default, if board setup to RGMII the port_type need to set
- * accordingly.Serdes and PCS layer need to configured for SGMII
- */
-static void xlr_sgmii_init(struct xlr_net_priv *priv)
-{
- int phy;
-
- xlr_phy_write(priv->serdes_addr, 26, 0, 0x6DB0);
- xlr_phy_write(priv->serdes_addr, 26, 1, 0xFFFF);
- xlr_phy_write(priv->serdes_addr, 26, 2, 0xB6D0);
- xlr_phy_write(priv->serdes_addr, 26, 3, 0x00FF);
- xlr_phy_write(priv->serdes_addr, 26, 4, 0x0000);
- xlr_phy_write(priv->serdes_addr, 26, 5, 0x0000);
- xlr_phy_write(priv->serdes_addr, 26, 6, 0x0005);
- xlr_phy_write(priv->serdes_addr, 26, 7, 0x0001);
- xlr_phy_write(priv->serdes_addr, 26, 8, 0x0000);
- xlr_phy_write(priv->serdes_addr, 26, 9, 0x0000);
- xlr_phy_write(priv->serdes_addr, 26, 10, 0x0000);
-
- /* program GPIO values for serdes init parameters */
- xlr_nae_wreg(priv->gpio_addr, 0x20, 0x7e6802);
- xlr_nae_wreg(priv->gpio_addr, 0x10, 0x7104);
-
- xlr_nae_wreg(priv->gpio_addr, 0x22, 0x7e6802);
- xlr_nae_wreg(priv->gpio_addr, 0x21, 0x7104);
-
- /* enable autoneg - more magic */
- phy = priv->phy_addr % 4 + 27;
- xlr_phy_write(priv->pcs_addr, phy, 0, 0x1000);
- xlr_phy_write(priv->pcs_addr, phy, 0, 0x0200);
-}
-
-void xlr_set_gmac_speed(struct xlr_net_priv *priv)
-{
- struct phy_device *phydev = priv->mii_bus->phy_map[priv->phy_addr];
- int speed;
-
- if (phydev->interface == PHY_INTERFACE_MODE_SGMII)
- xlr_sgmii_init(priv);
-
- if (phydev->speed != priv->phy_speed) {
- speed = phydev->speed;
- if (speed == SPEED_1000) {
- /* Set interface to Byte mode */
- xlr_nae_wreg(priv->base_addr, R_MAC_CONFIG_2, 0x7217);
- priv->phy_speed = speed;
- } else if (speed == SPEED_100 || speed == SPEED_10) {
- /* Set interface to Nibble mode */
- xlr_nae_wreg(priv->base_addr, R_MAC_CONFIG_2, 0x7117);
- priv->phy_speed = speed;
- }
- /* Set SGMII speed in Interface control reg */
- if (phydev->interface == PHY_INTERFACE_MODE_SGMII) {
- if (speed == SPEED_10)
- xlr_nae_wreg(priv->base_addr,
- R_INTERFACE_CONTROL, SGMII_SPEED_10);
- if (speed == SPEED_100)
- xlr_nae_wreg(priv->base_addr,
- R_INTERFACE_CONTROL, SGMII_SPEED_100);
- if (speed == SPEED_1000)
- xlr_nae_wreg(priv->base_addr,
- R_INTERFACE_CONTROL, SGMII_SPEED_1000);
- }
- if (speed == SPEED_10)
- xlr_nae_wreg(priv->base_addr, R_CORECONTROL, 0x2);
- if (speed == SPEED_100)
- xlr_nae_wreg(priv->base_addr, R_CORECONTROL, 0x1);
- if (speed == SPEED_1000)
- xlr_nae_wreg(priv->base_addr, R_CORECONTROL, 0x0);
- }
- pr_info("gmac%d : %dMbps\n", priv->port_id, priv->phy_speed);
-}
-
-static void xlr_gmac_link_adjust(struct net_device *ndev)
-{
- struct xlr_net_priv *priv = netdev_priv(ndev);
- struct phy_device *phydev = priv->mii_bus->phy_map[priv->phy_addr];
- u32 intreg;
-
- intreg = xlr_nae_rdreg(priv->base_addr, R_INTREG);
- if (phydev->link) {
- if (phydev->speed != priv->phy_speed) {
- xlr_set_gmac_speed(priv);
- pr_info("gmac%d : Link up\n", priv->port_id);
- }
- } else {
- xlr_set_gmac_speed(priv);
- pr_info("gmac%d : Link down\n", priv->port_id);
- }
-}
-
-static int xlr_mii_probe(struct xlr_net_priv *priv)
-{
- struct phy_device *phydev = priv->mii_bus->phy_map[priv->phy_addr];
-
- if (!phydev) {
- pr_err("no PHY found on phy_addr %d\n", priv->phy_addr);
- return -ENODEV;
- }
-
- /* Attach MAC to PHY */
- phydev = phy_connect(priv->ndev, dev_name(&phydev->dev),
- &xlr_gmac_link_adjust, priv->nd->phy_interface);
-
- if (IS_ERR(phydev)) {
- pr_err("could not attach PHY\n");
- return PTR_ERR(phydev);
- }
- phydev->supported &= (ADVERTISED_10baseT_Full
- | ADVERTISED_10baseT_Half
- | ADVERTISED_100baseT_Full
- | ADVERTISED_100baseT_Half
- | ADVERTISED_1000baseT_Full
- | ADVERTISED_Autoneg
- | ADVERTISED_MII);
-
- phydev->advertising = phydev->supported;
- pr_info("attached PHY driver [%s] (mii_bus:phy_addr=%s\n",
- phydev->drv->name, dev_name(&phydev->dev));
- return 0;
-}
-
-static int xlr_setup_mdio(struct xlr_net_priv *priv,
- struct platform_device *pdev)
-{
- int err;
-
- priv->mii_bus = mdiobus_alloc();
- if (!priv->mii_bus) {
- pr_err("mdiobus alloc failed\n");
- return -ENOMEM;
- }
-
- priv->mii_bus->priv = priv;
- priv->mii_bus->name = "xlr-mdio";
- snprintf(priv->mii_bus->id, MII_BUS_ID_SIZE, "%s-%d",
- priv->mii_bus->name, priv->port_id);
- priv->mii_bus->read = xlr_mii_read;
- priv->mii_bus->write = xlr_mii_write;
- priv->mii_bus->parent = &pdev->dev;
- priv->mii_bus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL);
- if (priv->mii_bus->irq == NULL) {
- pr_err("irq alloc failed\n");
- mdiobus_free(priv->mii_bus);
- return -ENOMEM;
- }
-
- priv->mii_bus->irq[priv->phy_addr] = priv->ndev->irq;
-
- /* Scan only the enabled address */
- priv->mii_bus->phy_mask = ~(1 << priv->phy_addr);
-
- /* setting clock divisor to 54 */
- xlr_nae_wreg(priv->base_addr, R_MII_MGMT_CONFIG, 0x7);
-
- err = mdiobus_register(priv->mii_bus);
- if (err) {
- mdiobus_free(priv->mii_bus);
- pr_err("mdio bus registration failed\n");
- return err;
- }
-
- pr_info("Registered mdio bus id : %s\n", priv->mii_bus->id);
- err = xlr_mii_probe(priv);
- if (err) {
- mdiobus_free(priv->mii_bus);
- return err;
- }
- return 0;
-}
-
-static void xlr_port_enable(struct xlr_net_priv *priv)
-{
- u32 prid = (read_c0_prid() & 0xf000);
-
- /* Setup MAC_CONFIG reg if (xls & rgmii) */
- if ((prid == 0x8000 || prid == 0x4000 || prid == 0xc000) &&
- priv->nd->phy_interface == PHY_INTERFACE_MODE_RGMII)
- xlr_reg_update(priv->base_addr, R_RX_CONTROL,
- (1 << O_RX_CONTROL__RGMII), (1 << O_RX_CONTROL__RGMII));
-
- /* Rx Tx enable */
- xlr_reg_update(priv->base_addr, R_MAC_CONFIG_1,
- ((1 << O_MAC_CONFIG_1__rxen) | (1 << O_MAC_CONFIG_1__txen) |
- (1 << O_MAC_CONFIG_1__rxfc) | (1 << O_MAC_CONFIG_1__txfc)),
- ((1 << O_MAC_CONFIG_1__rxen) | (1 << O_MAC_CONFIG_1__txen) |
- (1 << O_MAC_CONFIG_1__rxfc) | (1 << O_MAC_CONFIG_1__txfc)));
-
- /* Setup tx control reg */
- xlr_reg_update(priv->base_addr, R_TX_CONTROL,
- ((1 << O_TX_CONTROL__TxEnable) |
- (512 << O_TX_CONTROL__TxThreshold)), 0x3fff);
-
- /* Setup rx control reg */
- xlr_reg_update(priv->base_addr, R_RX_CONTROL,
- 1 << O_RX_CONTROL__RxEnable, 1 << O_RX_CONTROL__RxEnable);
-}
-
-static void xlr_port_disable(struct xlr_net_priv *priv)
-{
- /* Setup MAC_CONFIG reg */
- /* Rx Tx disable*/
- xlr_reg_update(priv->base_addr, R_MAC_CONFIG_1,
- ((1 << O_MAC_CONFIG_1__rxen) | (1 << O_MAC_CONFIG_1__txen) |
- (1 << O_MAC_CONFIG_1__rxfc) | (1 << O_MAC_CONFIG_1__txfc)),
- 0x0);
-
- /* Setup tx control reg */
- xlr_reg_update(priv->base_addr, R_TX_CONTROL,
- ((1 << O_TX_CONTROL__TxEnable) |
- (512 << O_TX_CONTROL__TxThreshold)), 0);
-
- /* Setup rx control reg */
- xlr_reg_update(priv->base_addr, R_RX_CONTROL,
- 1 << O_RX_CONTROL__RxEnable, 0);
-}
-
-/*
- * Initialization of gmac
- */
-static int xlr_gmac_init(struct xlr_net_priv *priv,
- struct platform_device *pdev)
-{
- int ret;
-
- pr_info("Initializing the gmac%d\n", priv->port_id);
-
- xlr_port_disable(priv);
-
- xlr_nae_wreg(priv->base_addr, R_DESC_PACK_CTRL,
- (1 << O_DESC_PACK_CTRL__MaxEntry)
- | (BYTE_OFFSET << O_DESC_PACK_CTRL__ByteOffset)
- | (1600 << O_DESC_PACK_CTRL__RegularSize));
-
- ret = xlr_setup_mdio(priv, pdev);
- if (ret)
- return ret;
- xlr_port_enable(priv);
-
- /* Enable Full-duplex/1000Mbps/CRC */
- xlr_nae_wreg(priv->base_addr, R_MAC_CONFIG_2, 0x7217);
- /* speed 2.5Mhz */
- xlr_nae_wreg(priv->base_addr, R_CORECONTROL, 0x02);
- /* Setup Interrupt mask reg */
- xlr_nae_wreg(priv->base_addr, R_INTMASK,
- (1 << O_INTMASK__TxIllegal) |
- (1 << O_INTMASK__MDInt) |
- (1 << O_INTMASK__TxFetchError) |
- (1 << O_INTMASK__P2PSpillEcc) |
- (1 << O_INTMASK__TagFull) |
- (1 << O_INTMASK__Underrun) |
- (1 << O_INTMASK__Abort)
- );
-
- /* Clear all stats */
- xlr_reg_update(priv->base_addr, R_STATCTRL,
- 0, 1 << O_STATCTRL__ClrCnt);
- xlr_reg_update(priv->base_addr, R_STATCTRL, 1 << 2,
- 1 << 2);
- return 0;
-}
-
-static int xlr_net_probe(struct platform_device *pdev)
-{
- struct xlr_net_priv *priv = NULL;
- struct net_device *ndev;
- struct resource *res;
- struct xlr_adapter *adapter;
- int err, port;
-
- pr_info("XLR/XLS Ethernet Driver controller %d\n", pdev->id);
- /*
- * Allocate our adapter data structure and attach it to the device.
- */
- adapter = (struct xlr_adapter *)
- devm_kzalloc(&pdev->dev, sizeof(*adapter), GFP_KERNEL);
- if (!adapter) {
- err = -ENOMEM;
- return err;
- }
-
- /*
- * XLR and XLS have 1 and 2 NAE controller respectively
- * Each controller has 4 gmac ports, mapping each controller
- * under one parent device, 4 gmac ports under one device.
- */
- for (port = 0; port < pdev->num_resources/2; port++) {
- ndev = alloc_etherdev_mq(sizeof(struct xlr_net_priv), 32);
- if (!ndev) {
- pr_err("Allocation of Ethernet device failed\n");
- return -ENOMEM;
- }
-
- priv = netdev_priv(ndev);
- priv->pdev = pdev;
- priv->ndev = ndev;
- priv->port_id = (pdev->id * 4) + port;
- priv->nd = (struct xlr_net_data *)pdev->dev.platform_data;
- res = platform_get_resource(pdev, IORESOURCE_MEM, port);
-
- if (res == NULL) {
- pr_err("No memory resource for MAC %d\n",
- priv->port_id);
- err = -ENODEV;
- goto err_gmac;
- }
- priv->base_addr = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(priv->base_addr)) {
- err = PTR_ERR(priv->base_addr);
- goto err_gmac;
- }
- priv->adapter = adapter;
- adapter->netdev[port] = ndev;
-
- res = platform_get_resource(pdev, IORESOURCE_IRQ, port);
- if (res == NULL) {
- pr_err("No irq resource for MAC %d\n", priv->port_id);
- err = -ENODEV;
- goto err_gmac;
- }
-
- ndev->irq = res->start;
-
- priv->phy_addr = priv->nd->phy_addr[port];
- priv->tx_stnid = priv->nd->tx_stnid[port];
- priv->mii_addr = priv->nd->mii_addr;
- priv->serdes_addr = priv->nd->serdes_addr;
- priv->pcs_addr = priv->nd->pcs_addr;
- priv->gpio_addr = priv->nd->gpio_addr;
-
- ndev->netdev_ops = &xlr_netdev_ops;
- ndev->watchdog_timeo = HZ;
-
- /* Setup Mac address and Rx mode */
- eth_hw_addr_random(ndev);
- xlr_hw_set_mac_addr(ndev);
- xlr_set_rx_mode(ndev);
-
- priv->num_rx_desc += MAX_NUM_DESC_SPILL;
- ndev->ethtool_ops = &xlr_ethtool_ops;
- SET_NETDEV_DEV(ndev, &pdev->dev);
-
- xlr_config_fifo_spill_area(priv);
- /* Configure PDE to Round-Robin pkt distribution */
- xlr_config_pde(priv);
- xlr_config_parser(priv);
-
- /* Call init with respect to port */
- if (strcmp(res->name, "gmac") == 0) {
- err = xlr_gmac_init(priv, pdev);
- if (err) {
- pr_err("gmac%d init failed\n", priv->port_id);
- goto err_gmac;
- }
- }
-
- if (priv->port_id == 0 || priv->port_id == 4) {
- err = xlr_config_common(priv);
- if (err)
- goto err_netdev;
- }
-
- err = register_netdev(ndev);
- if (err) {
- pr_err("Registering netdev failed for gmac%d\n",
- priv->port_id);
- goto err_netdev;
- }
- platform_set_drvdata(pdev, priv);
- }
-
- return 0;
-
-err_netdev:
- mdiobus_free(priv->mii_bus);
-err_gmac:
- free_netdev(ndev);
- return err;
-}
-
-static int xlr_net_remove(struct platform_device *pdev)
-{
- struct xlr_net_priv *priv = platform_get_drvdata(pdev);
-
- unregister_netdev(priv->ndev);
- mdiobus_unregister(priv->mii_bus);
- mdiobus_free(priv->mii_bus);
- free_netdev(priv->ndev);
- return 0;
-}
-
-static struct platform_driver xlr_net_driver = {
- .probe = xlr_net_probe,
- .remove = xlr_net_remove,
- .driver = {
- .name = "xlr-net",
- },
-};
-
-module_platform_driver(xlr_net_driver);
-
-MODULE_AUTHOR("Ganesan Ramalingam <ganesanr@broadcom.com>");
-MODULE_DESCRIPTION("Ethernet driver for Netlogic XLR/XLS");
-MODULE_LICENSE("Dual BSD/GPL");
-MODULE_ALIAS("platform:xlr-net");
diff --git a/drivers/staging/netlogic/xlr_net.h b/drivers/staging/netlogic/xlr_net.h
deleted file mode 100644
index 2f65ec5a615c..000000000000
--- a/drivers/staging/netlogic/xlr_net.h
+++ /dev/null
@@ -1,1105 +0,0 @@
-/*
- * Copyright (c) 2003-2012 Broadcom Corporation
- * All Rights Reserved
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the Broadcom
- * license below:
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY BROADCOM ``AS IS'' AND ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL BROADCOM OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
- * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
- * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
- * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
- * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-/* #define MAC_SPLIT_MODE */
-
-#define MAC_SPACING 0x400
-#define XGMAC_SPACING 0x400
-
-/* PE-MCXMAC register and bit field definitions */
-#define R_MAC_CONFIG_1 0x00
-#define O_MAC_CONFIG_1__srst 31
-#define O_MAC_CONFIG_1__simr 30
-#define O_MAC_CONFIG_1__hrrmc 18
-#define W_MAC_CONFIG_1__hrtmc 2
-#define O_MAC_CONFIG_1__hrrfn 16
-#define W_MAC_CONFIG_1__hrtfn 2
-#define O_MAC_CONFIG_1__intlb 8
-#define O_MAC_CONFIG_1__rxfc 5
-#define O_MAC_CONFIG_1__txfc 4
-#define O_MAC_CONFIG_1__srxen 3
-#define O_MAC_CONFIG_1__rxen 2
-#define O_MAC_CONFIG_1__stxen 1
-#define O_MAC_CONFIG_1__txen 0
-#define R_MAC_CONFIG_2 0x01
-#define O_MAC_CONFIG_2__prlen 12
-#define W_MAC_CONFIG_2__prlen 4
-#define O_MAC_CONFIG_2__speed 8
-#define W_MAC_CONFIG_2__speed 2
-#define O_MAC_CONFIG_2__hugen 5
-#define O_MAC_CONFIG_2__flchk 4
-#define O_MAC_CONFIG_2__crce 1
-#define O_MAC_CONFIG_2__fulld 0
-#define R_IPG_IFG 0x02
-#define O_IPG_IFG__ipgr1 24
-#define W_IPG_IFG__ipgr1 7
-#define O_IPG_IFG__ipgr2 16
-#define W_IPG_IFG__ipgr2 7
-#define O_IPG_IFG__mifg 8
-#define W_IPG_IFG__mifg 8
-#define O_IPG_IFG__ipgt 0
-#define W_IPG_IFG__ipgt 7
-#define R_HALF_DUPLEX 0x03
-#define O_HALF_DUPLEX__abebt 24
-#define W_HALF_DUPLEX__abebt 4
-#define O_HALF_DUPLEX__abebe 19
-#define O_HALF_DUPLEX__bpnb 18
-#define O_HALF_DUPLEX__nobo 17
-#define O_HALF_DUPLEX__edxsdfr 16
-#define O_HALF_DUPLEX__retry 12
-#define W_HALF_DUPLEX__retry 4
-#define O_HALF_DUPLEX__lcol 0
-#define W_HALF_DUPLEX__lcol 10
-#define R_MAXIMUM_FRAME_LENGTH 0x04
-#define O_MAXIMUM_FRAME_LENGTH__maxf 0
-#define W_MAXIMUM_FRAME_LENGTH__maxf 16
-#define R_TEST 0x07
-#define O_TEST__mbof 3
-#define O_TEST__rthdf 2
-#define O_TEST__tpause 1
-#define O_TEST__sstct 0
-#define R_MII_MGMT_CONFIG 0x08
-#define O_MII_MGMT_CONFIG__scinc 5
-#define O_MII_MGMT_CONFIG__spre 4
-#define O_MII_MGMT_CONFIG__clks 3
-#define W_MII_MGMT_CONFIG__clks 3
-#define R_MII_MGMT_COMMAND 0x09
-#define O_MII_MGMT_COMMAND__scan 1
-#define O_MII_MGMT_COMMAND__rstat 0
-#define R_MII_MGMT_ADDRESS 0x0A
-#define O_MII_MGMT_ADDRESS__fiad 8
-#define W_MII_MGMT_ADDRESS__fiad 5
-#define O_MII_MGMT_ADDRESS__fgad 5
-#define W_MII_MGMT_ADDRESS__fgad 0
-#define R_MII_MGMT_WRITE_DATA 0x0B
-#define O_MII_MGMT_WRITE_DATA__ctld 0
-#define W_MII_MGMT_WRITE_DATA__ctld 16
-#define R_MII_MGMT_STATUS 0x0C
-#define R_MII_MGMT_INDICATORS 0x0D
-#define O_MII_MGMT_INDICATORS__nvalid 2
-#define O_MII_MGMT_INDICATORS__scan 1
-#define O_MII_MGMT_INDICATORS__busy 0
-#define R_INTERFACE_CONTROL 0x0E
-#define O_INTERFACE_CONTROL__hrstint 31
-#define O_INTERFACE_CONTROL__tbimode 27
-#define O_INTERFACE_CONTROL__ghdmode 26
-#define O_INTERFACE_CONTROL__lhdmode 25
-#define O_INTERFACE_CONTROL__phymod 24
-#define O_INTERFACE_CONTROL__hrrmi 23
-#define O_INTERFACE_CONTROL__rspd 16
-#define O_INTERFACE_CONTROL__hr100 15
-#define O_INTERFACE_CONTROL__frcq 10
-#define O_INTERFACE_CONTROL__nocfr 9
-#define O_INTERFACE_CONTROL__dlfct 8
-#define O_INTERFACE_CONTROL__enjab 0
-#define R_INTERFACE_STATUS 0x0F
-#define O_INTERFACE_STATUS__xsdfr 9
-#define O_INTERFACE_STATUS__ssrr 8
-#define W_INTERFACE_STATUS__ssrr 5
-#define O_INTERFACE_STATUS__miilf 3
-#define O_INTERFACE_STATUS__locar 2
-#define O_INTERFACE_STATUS__sqerr 1
-#define O_INTERFACE_STATUS__jabber 0
-#define R_STATION_ADDRESS_LS 0x10
-#define R_STATION_ADDRESS_MS 0x11
-
-/* A-XGMAC register and bit field definitions */
-#define R_XGMAC_CONFIG_0 0x00
-#define O_XGMAC_CONFIG_0__hstmacrst 31
-#define O_XGMAC_CONFIG_0__hstrstrctl 23
-#define O_XGMAC_CONFIG_0__hstrstrfn 22
-#define O_XGMAC_CONFIG_0__hstrsttctl 18
-#define O_XGMAC_CONFIG_0__hstrsttfn 17
-#define O_XGMAC_CONFIG_0__hstrstmiim 16
-#define O_XGMAC_CONFIG_0__hstloopback 8
-#define R_XGMAC_CONFIG_1 0x01
-#define O_XGMAC_CONFIG_1__hsttctlen 31
-#define O_XGMAC_CONFIG_1__hsttfen 30
-#define O_XGMAC_CONFIG_1__hstrctlen 29
-#define O_XGMAC_CONFIG_1__hstrfen 28
-#define O_XGMAC_CONFIG_1__tfen 26
-#define O_XGMAC_CONFIG_1__rfen 24
-#define O_XGMAC_CONFIG_1__hstrctlshrtp 12
-#define O_XGMAC_CONFIG_1__hstdlyfcstx 10
-#define W_XGMAC_CONFIG_1__hstdlyfcstx 2
-#define O_XGMAC_CONFIG_1__hstdlyfcsrx 8
-#define W_XGMAC_CONFIG_1__hstdlyfcsrx 2
-#define O_XGMAC_CONFIG_1__hstppen 7
-#define O_XGMAC_CONFIG_1__hstbytswp 6
-#define O_XGMAC_CONFIG_1__hstdrplt64 5
-#define O_XGMAC_CONFIG_1__hstprmscrx 4
-#define O_XGMAC_CONFIG_1__hstlenchk 3
-#define O_XGMAC_CONFIG_1__hstgenfcs 2
-#define O_XGMAC_CONFIG_1__hstpadmode 0
-#define W_XGMAC_CONFIG_1__hstpadmode 2
-#define R_XGMAC_CONFIG_2 0x02
-#define O_XGMAC_CONFIG_2__hsttctlfrcp 31
-#define O_XGMAC_CONFIG_2__hstmlnkflth 27
-#define O_XGMAC_CONFIG_2__hstalnkflth 26
-#define O_XGMAC_CONFIG_2__rflnkflt 24
-#define W_XGMAC_CONFIG_2__rflnkflt 2
-#define O_XGMAC_CONFIG_2__hstipgextmod 16
-#define W_XGMAC_CONFIG_2__hstipgextmod 5
-#define O_XGMAC_CONFIG_2__hstrctlfrcp 15
-#define O_XGMAC_CONFIG_2__hstipgexten 5
-#define O_XGMAC_CONFIG_2__hstmipgext 0
-#define W_XGMAC_CONFIG_2__hstmipgext 5
-#define R_XGMAC_CONFIG_3 0x03
-#define O_XGMAC_CONFIG_3__hstfltrfrm 31
-#define W_XGMAC_CONFIG_3__hstfltrfrm 16
-#define O_XGMAC_CONFIG_3__hstfltrfrmdc 15
-#define W_XGMAC_CONFIG_3__hstfltrfrmdc 16
-#define R_XGMAC_STATION_ADDRESS_LS 0x04
-#define O_XGMAC_STATION_ADDRESS_LS__hstmacadr0 0
-#define W_XGMAC_STATION_ADDRESS_LS__hstmacadr0 32
-#define R_XGMAC_STATION_ADDRESS_MS 0x05
-#define R_XGMAC_MAX_FRAME_LEN 0x08
-#define O_XGMAC_MAX_FRAME_LEN__hstmxfrmwctx 16
-#define W_XGMAC_MAX_FRAME_LEN__hstmxfrmwctx 14
-#define O_XGMAC_MAX_FRAME_LEN__hstmxfrmbcrx 0
-#define W_XGMAC_MAX_FRAME_LEN__hstmxfrmbcrx 16
-#define R_XGMAC_REV_LEVEL 0x0B
-#define O_XGMAC_REV_LEVEL__revlvl 0
-#define W_XGMAC_REV_LEVEL__revlvl 15
-#define R_XGMAC_MIIM_COMMAND 0x10
-#define O_XGMAC_MIIM_COMMAND__hstldcmd 3
-#define O_XGMAC_MIIM_COMMAND__hstmiimcmd 0
-#define W_XGMAC_MIIM_COMMAND__hstmiimcmd 3
-#define R_XGMAC_MIIM_FILED 0x11
-#define O_XGMAC_MIIM_FILED__hststfield 30
-#define W_XGMAC_MIIM_FILED__hststfield 2
-#define O_XGMAC_MIIM_FILED__hstopfield 28
-#define W_XGMAC_MIIM_FILED__hstopfield 2
-#define O_XGMAC_MIIM_FILED__hstphyadx 23
-#define W_XGMAC_MIIM_FILED__hstphyadx 5
-#define O_XGMAC_MIIM_FILED__hstregadx 18
-#define W_XGMAC_MIIM_FILED__hstregadx 5
-#define O_XGMAC_MIIM_FILED__hsttafield 16
-#define W_XGMAC_MIIM_FILED__hsttafield 2
-#define O_XGMAC_MIIM_FILED__miimrddat 0
-#define W_XGMAC_MIIM_FILED__miimrddat 16
-#define R_XGMAC_MIIM_CONFIG 0x12
-#define O_XGMAC_MIIM_CONFIG__hstnopram 7
-#define O_XGMAC_MIIM_CONFIG__hstclkdiv 0
-#define W_XGMAC_MIIM_CONFIG__hstclkdiv 7
-#define R_XGMAC_MIIM_LINK_FAIL_VECTOR 0x13
-#define O_XGMAC_MIIM_LINK_FAIL_VECTOR__miimlfvec 0
-#define W_XGMAC_MIIM_LINK_FAIL_VECTOR__miimlfvec 32
-#define R_XGMAC_MIIM_INDICATOR 0x14
-#define O_XGMAC_MIIM_INDICATOR__miimphylf 4
-#define O_XGMAC_MIIM_INDICATOR__miimmoncplt 3
-#define O_XGMAC_MIIM_INDICATOR__miimmonvld 2
-#define O_XGMAC_MIIM_INDICATOR__miimmon 1
-#define O_XGMAC_MIIM_INDICATOR__miimbusy 0
-
-/* GMAC stats registers */
-#define R_RBYT 0x27
-#define R_RPKT 0x28
-#define R_RFCS 0x29
-#define R_RMCA 0x2A
-#define R_RBCA 0x2B
-#define R_RXCF 0x2C
-#define R_RXPF 0x2D
-#define R_RXUO 0x2E
-#define R_RALN 0x2F
-#define R_RFLR 0x30
-#define R_RCDE 0x31
-#define R_RCSE 0x32
-#define R_RUND 0x33
-#define R_ROVR 0x34
-#define R_TBYT 0x38
-#define R_TPKT 0x39
-#define R_TMCA 0x3A
-#define R_TBCA 0x3B
-#define R_TXPF 0x3C
-#define R_TDFR 0x3D
-#define R_TEDF 0x3E
-#define R_TSCL 0x3F
-#define R_TMCL 0x40
-#define R_TLCL 0x41
-#define R_TXCL 0x42
-#define R_TNCL 0x43
-#define R_TJBR 0x46
-#define R_TFCS 0x47
-#define R_TXCF 0x48
-#define R_TOVR 0x49
-#define R_TUND 0x4A
-#define R_TFRG 0x4B
-
-/* Glue logic register and bit field definitions */
-#define R_MAC_ADDR0 0x50
-#define R_MAC_ADDR1 0x52
-#define R_MAC_ADDR2 0x54
-#define R_MAC_ADDR3 0x56
-#define R_MAC_ADDR_MASK2 0x58
-#define R_MAC_ADDR_MASK3 0x5A
-#define R_MAC_FILTER_CONFIG 0x5C
-#define O_MAC_FILTER_CONFIG__BROADCAST_EN 10
-#define O_MAC_FILTER_CONFIG__PAUSE_FRAME_EN 9
-#define O_MAC_FILTER_CONFIG__ALL_MCAST_EN 8
-#define O_MAC_FILTER_CONFIG__ALL_UCAST_EN 7
-#define O_MAC_FILTER_CONFIG__HASH_MCAST_EN 6
-#define O_MAC_FILTER_CONFIG__HASH_UCAST_EN 5
-#define O_MAC_FILTER_CONFIG__ADDR_MATCH_DISC 4
-#define O_MAC_FILTER_CONFIG__MAC_ADDR3_VALID 3
-#define O_MAC_FILTER_CONFIG__MAC_ADDR2_VALID 2
-#define O_MAC_FILTER_CONFIG__MAC_ADDR1_VALID 1
-#define O_MAC_FILTER_CONFIG__MAC_ADDR0_VALID 0
-#define R_HASH_TABLE_VECTOR 0x30
-#define R_TX_CONTROL 0x0A0
-#define O_TX_CONTROL__Tx15Halt 31
-#define O_TX_CONTROL__Tx14Halt 30
-#define O_TX_CONTROL__Tx13Halt 29
-#define O_TX_CONTROL__Tx12Halt 28
-#define O_TX_CONTROL__Tx11Halt 27
-#define O_TX_CONTROL__Tx10Halt 26
-#define O_TX_CONTROL__Tx9Halt 25
-#define O_TX_CONTROL__Tx8Halt 24
-#define O_TX_CONTROL__Tx7Halt 23
-#define O_TX_CONTROL__Tx6Halt 22
-#define O_TX_CONTROL__Tx5Halt 21
-#define O_TX_CONTROL__Tx4Halt 20
-#define O_TX_CONTROL__Tx3Halt 19
-#define O_TX_CONTROL__Tx2Halt 18
-#define O_TX_CONTROL__Tx1Halt 17
-#define O_TX_CONTROL__Tx0Halt 16
-#define O_TX_CONTROL__TxIdle 15
-#define O_TX_CONTROL__TxEnable 14
-#define O_TX_CONTROL__TxThreshold 0
-#define W_TX_CONTROL__TxThreshold 14
-#define R_RX_CONTROL 0x0A1
-#define O_RX_CONTROL__RGMII 10
-#define O_RX_CONTROL__SoftReset 2
-#define O_RX_CONTROL__RxHalt 1
-#define O_RX_CONTROL__RxEnable 0
-#define R_DESC_PACK_CTRL 0x0A2
-#define O_DESC_PACK_CTRL__ByteOffset 17
-#define W_DESC_PACK_CTRL__ByteOffset 3
-#define O_DESC_PACK_CTRL__PrePadEnable 16
-#define O_DESC_PACK_CTRL__MaxEntry 14
-#define W_DESC_PACK_CTRL__MaxEntry 2
-#define O_DESC_PACK_CTRL__RegularSize 0
-#define W_DESC_PACK_CTRL__RegularSize 14
-#define R_STATCTRL 0x0A3
-#define O_STATCTRL__OverFlowEn 4
-#define O_STATCTRL__GIG 3
-#define O_STATCTRL__Sten 2
-#define O_STATCTRL__ClrCnt 1
-#define O_STATCTRL__AutoZ 0
-#define R_L2ALLOCCTRL 0x0A4
-#define O_L2ALLOCCTRL__TxL2Allocate 9
-#define W_L2ALLOCCTRL__TxL2Allocate 9
-#define O_L2ALLOCCTRL__RxL2Allocate 0
-#define W_L2ALLOCCTRL__RxL2Allocate 9
-#define R_INTMASK 0x0A5
-#define O_INTMASK__Spi4TxError 28
-#define O_INTMASK__Spi4RxError 27
-#define O_INTMASK__RGMIIHalfDupCollision 27
-#define O_INTMASK__Abort 26
-#define O_INTMASK__Underrun 25
-#define O_INTMASK__DiscardPacket 24
-#define O_INTMASK__AsyncFifoFull 23
-#define O_INTMASK__TagFull 22
-#define O_INTMASK__Class3Full 21
-#define O_INTMASK__C3EarlyFull 20
-#define O_INTMASK__Class2Full 19
-#define O_INTMASK__C2EarlyFull 18
-#define O_INTMASK__Class1Full 17
-#define O_INTMASK__C1EarlyFull 16
-#define O_INTMASK__Class0Full 15
-#define O_INTMASK__C0EarlyFull 14
-#define O_INTMASK__RxDataFull 13
-#define O_INTMASK__RxEarlyFull 12
-#define O_INTMASK__RFreeEmpty 9
-#define O_INTMASK__RFEarlyEmpty 8
-#define O_INTMASK__P2PSpillEcc 7
-#define O_INTMASK__FreeDescFull 5
-#define O_INTMASK__FreeEarlyFull 4
-#define O_INTMASK__TxFetchError 3
-#define O_INTMASK__StatCarry 2
-#define O_INTMASK__MDInt 1
-#define O_INTMASK__TxIllegal 0
-#define R_INTREG 0x0A6
-#define O_INTREG__Spi4TxError 28
-#define O_INTREG__Spi4RxError 27
-#define O_INTREG__RGMIIHalfDupCollision 27
-#define O_INTREG__Abort 26
-#define O_INTREG__Underrun 25
-#define O_INTREG__DiscardPacket 24
-#define O_INTREG__AsyncFifoFull 23
-#define O_INTREG__TagFull 22
-#define O_INTREG__Class3Full 21
-#define O_INTREG__C3EarlyFull 20
-#define O_INTREG__Class2Full 19
-#define O_INTREG__C2EarlyFull 18
-#define O_INTREG__Class1Full 17
-#define O_INTREG__C1EarlyFull 16
-#define O_INTREG__Class0Full 15
-#define O_INTREG__C0EarlyFull 14
-#define O_INTREG__RxDataFull 13
-#define O_INTREG__RxEarlyFull 12
-#define O_INTREG__RFreeEmpty 9
-#define O_INTREG__RFEarlyEmpty 8
-#define O_INTREG__P2PSpillEcc 7
-#define O_INTREG__FreeDescFull 5
-#define O_INTREG__FreeEarlyFull 4
-#define O_INTREG__TxFetchError 3
-#define O_INTREG__StatCarry 2
-#define O_INTREG__MDInt 1
-#define O_INTREG__TxIllegal 0
-#define R_TXRETRY 0x0A7
-#define O_TXRETRY__CollisionRetry 6
-#define O_TXRETRY__BusErrorRetry 5
-#define O_TXRETRY__UnderRunRetry 4
-#define O_TXRETRY__Retries 0
-#define W_TXRETRY__Retries 4
-#define R_CORECONTROL 0x0A8
-#define O_CORECONTROL__ErrorThread 4
-#define W_CORECONTROL__ErrorThread 7
-#define O_CORECONTROL__Shutdown 2
-#define O_CORECONTROL__Speed 0
-#define W_CORECONTROL__Speed 2
-#define R_BYTEOFFSET0 0x0A9
-#define R_BYTEOFFSET1 0x0AA
-#define R_L2TYPE_0 0x0F0
-#define O_L2TYPE__ExtraHdrProtoSize 26
-#define W_L2TYPE__ExtraHdrProtoSize 5
-#define O_L2TYPE__ExtraHdrProtoOffset 20
-#define W_L2TYPE__ExtraHdrProtoOffset 6
-#define O_L2TYPE__ExtraHeaderSize 14
-#define W_L2TYPE__ExtraHeaderSize 6
-#define O_L2TYPE__ProtoOffset 8
-#define W_L2TYPE__ProtoOffset 6
-#define O_L2TYPE__L2HdrOffset 2
-#define W_L2TYPE__L2HdrOffset 6
-#define O_L2TYPE__L2Proto 0
-#define W_L2TYPE__L2Proto 2
-#define R_L2TYPE_1 0xF0
-#define R_L2TYPE_2 0xF0
-#define R_L2TYPE_3 0xF0
-#define R_PARSERCONFIGREG 0x100
-#define O_PARSERCONFIGREG__CRCHashPoly 8
-#define W_PARSERCONFIGREG__CRCHashPoly 7
-#define O_PARSERCONFIGREG__PrePadOffset 4
-#define W_PARSERCONFIGREG__PrePadOffset 4
-#define O_PARSERCONFIGREG__UseCAM 2
-#define O_PARSERCONFIGREG__UseHASH 1
-#define O_PARSERCONFIGREG__UseProto 0
-#define R_L3CTABLE 0x140
-#define O_L3CTABLE__Offset0 25
-#define W_L3CTABLE__Offset0 7
-#define O_L3CTABLE__Len0 21
-#define W_L3CTABLE__Len0 4
-#define O_L3CTABLE__Offset1 14
-#define W_L3CTABLE__Offset1 7
-#define O_L3CTABLE__Len1 10
-#define W_L3CTABLE__Len1 4
-#define O_L3CTABLE__Offset2 4
-#define W_L3CTABLE__Offset2 6
-#define O_L3CTABLE__Len2 0
-#define W_L3CTABLE__Len2 4
-#define O_L3CTABLE__L3HdrOffset 26
-#define W_L3CTABLE__L3HdrOffset 6
-#define O_L3CTABLE__L4ProtoOffset 20
-#define W_L3CTABLE__L4ProtoOffset 6
-#define O_L3CTABLE__IPChksumCompute 19
-#define O_L3CTABLE__L4Classify 18
-#define O_L3CTABLE__L2Proto 16
-#define W_L3CTABLE__L2Proto 2
-#define O_L3CTABLE__L3ProtoKey 0
-#define W_L3CTABLE__L3ProtoKey 16
-#define R_L4CTABLE 0x160
-#define O_L4CTABLE__Offset0 21
-#define W_L4CTABLE__Offset0 6
-#define O_L4CTABLE__Len0 17
-#define W_L4CTABLE__Len0 4
-#define O_L4CTABLE__Offset1 11
-#define W_L4CTABLE__Offset1 6
-#define O_L4CTABLE__Len1 7
-#define W_L4CTABLE__Len1 4
-#define O_L4CTABLE__TCPChksumEnable 0
-#define R_CAM4X128TABLE 0x172
-#define O_CAM4X128TABLE__ClassId 7
-#define W_CAM4X128TABLE__ClassId 2
-#define O_CAM4X128TABLE__BucketId 1
-#define W_CAM4X128TABLE__BucketId 6
-#define O_CAM4X128TABLE__UseBucket 0
-#define R_CAM4X128KEY 0x180
-#define R_TRANSLATETABLE 0x1A0
-#define R_DMACR0 0x200
-#define O_DMACR0__Data0WrMaxCr 27
-#define W_DMACR0__Data0WrMaxCr 3
-#define O_DMACR0__Data0RdMaxCr 24
-#define W_DMACR0__Data0RdMaxCr 3
-#define O_DMACR0__Data1WrMaxCr 21
-#define W_DMACR0__Data1WrMaxCr 3
-#define O_DMACR0__Data1RdMaxCr 18
-#define W_DMACR0__Data1RdMaxCr 3
-#define O_DMACR0__Data2WrMaxCr 15
-#define W_DMACR0__Data2WrMaxCr 3
-#define O_DMACR0__Data2RdMaxCr 12
-#define W_DMACR0__Data2RdMaxCr 3
-#define O_DMACR0__Data3WrMaxCr 9
-#define W_DMACR0__Data3WrMaxCr 3
-#define O_DMACR0__Data3RdMaxCr 6
-#define W_DMACR0__Data3RdMaxCr 3
-#define O_DMACR0__Data4WrMaxCr 3
-#define W_DMACR0__Data4WrMaxCr 3
-#define O_DMACR0__Data4RdMaxCr 0
-#define W_DMACR0__Data4RdMaxCr 3
-#define R_DMACR1 0x201
-#define O_DMACR1__Data5WrMaxCr 27
-#define W_DMACR1__Data5WrMaxCr 3
-#define O_DMACR1__Data5RdMaxCr 24
-#define W_DMACR1__Data5RdMaxCr 3
-#define O_DMACR1__Data6WrMaxCr 21
-#define W_DMACR1__Data6WrMaxCr 3
-#define O_DMACR1__Data6RdMaxCr 18
-#define W_DMACR1__Data6RdMaxCr 3
-#define O_DMACR1__Data7WrMaxCr 15
-#define W_DMACR1__Data7WrMaxCr 3
-#define O_DMACR1__Data7RdMaxCr 12
-#define W_DMACR1__Data7RdMaxCr 3
-#define O_DMACR1__Data8WrMaxCr 9
-#define W_DMACR1__Data8WrMaxCr 3
-#define O_DMACR1__Data8RdMaxCr 6
-#define W_DMACR1__Data8RdMaxCr 3
-#define O_DMACR1__Data9WrMaxCr 3
-#define W_DMACR1__Data9WrMaxCr 3
-#define O_DMACR1__Data9RdMaxCr 0
-#define W_DMACR1__Data9RdMaxCr 3
-#define R_DMACR2 0x202
-#define O_DMACR2__Data10WrMaxCr 27
-#define W_DMACR2__Data10WrMaxCr 3
-#define O_DMACR2__Data10RdMaxCr 24
-#define W_DMACR2__Data10RdMaxCr 3
-#define O_DMACR2__Data11WrMaxCr 21
-#define W_DMACR2__Data11WrMaxCr 3
-#define O_DMACR2__Data11RdMaxCr 18
-#define W_DMACR2__Data11RdMaxCr 3
-#define O_DMACR2__Data12WrMaxCr 15
-#define W_DMACR2__Data12WrMaxCr 3
-#define O_DMACR2__Data12RdMaxCr 12
-#define W_DMACR2__Data12RdMaxCr 3
-#define O_DMACR2__Data13WrMaxCr 9
-#define W_DMACR2__Data13WrMaxCr 3
-#define O_DMACR2__Data13RdMaxCr 6
-#define W_DMACR2__Data13RdMaxCr 3
-#define O_DMACR2__Data14WrMaxCr 3
-#define W_DMACR2__Data14WrMaxCr 3
-#define O_DMACR2__Data14RdMaxCr 0
-#define W_DMACR2__Data14RdMaxCr 3
-#define R_DMACR3 0x203
-#define O_DMACR3__Data15WrMaxCr 27
-#define W_DMACR3__Data15WrMaxCr 3
-#define O_DMACR3__Data15RdMaxCr 24
-#define W_DMACR3__Data15RdMaxCr 3
-#define O_DMACR3__SpClassWrMaxCr 21
-#define W_DMACR3__SpClassWrMaxCr 3
-#define O_DMACR3__SpClassRdMaxCr 18
-#define W_DMACR3__SpClassRdMaxCr 3
-#define O_DMACR3__JumFrInWrMaxCr 15
-#define W_DMACR3__JumFrInWrMaxCr 3
-#define O_DMACR3__JumFrInRdMaxCr 12
-#define W_DMACR3__JumFrInRdMaxCr 3
-#define O_DMACR3__RegFrInWrMaxCr 9
-#define W_DMACR3__RegFrInWrMaxCr 3
-#define O_DMACR3__RegFrInRdMaxCr 6
-#define W_DMACR3__RegFrInRdMaxCr 3
-#define O_DMACR3__FrOutWrMaxCr 3
-#define W_DMACR3__FrOutWrMaxCr 3
-#define O_DMACR3__FrOutRdMaxCr 0
-#define W_DMACR3__FrOutRdMaxCr 3
-#define R_REG_FRIN_SPILL_MEM_START_0 0x204
-#define O_REG_FRIN_SPILL_MEM_START_0__RegFrInSpillMemStart0 0
-#define W_REG_FRIN_SPILL_MEM_START_0__RegFrInSpillMemStart0 32
-#define R_REG_FRIN_SPILL_MEM_START_1 0x205
-#define O_REG_FRIN_SPILL_MEM_START_1__RegFrInSpillMemStart1 0
-#define W_REG_FRIN_SPILL_MEM_START_1__RegFrInSpillMemStart1 3
-#define R_REG_FRIN_SPILL_MEM_SIZE 0x206
-#define O_REG_FRIN_SPILL_MEM_SIZE__RegFrInSpillMemSize 0
-#define W_REG_FRIN_SPILL_MEM_SIZE__RegFrInSpillMemSize 32
-#define R_FROUT_SPILL_MEM_START_0 0x207
-#define O_FROUT_SPILL_MEM_START_0__FrOutSpillMemStart0 0
-#define W_FROUT_SPILL_MEM_START_0__FrOutSpillMemStart0 32
-#define R_FROUT_SPILL_MEM_START_1 0x208
-#define O_FROUT_SPILL_MEM_START_1__FrOutSpillMemStart1 0
-#define W_FROUT_SPILL_MEM_START_1__FrOutSpillMemStart1 3
-#define R_FROUT_SPILL_MEM_SIZE 0x209
-#define O_FROUT_SPILL_MEM_SIZE__FrOutSpillMemSize 0
-#define W_FROUT_SPILL_MEM_SIZE__FrOutSpillMemSize 32
-#define R_CLASS0_SPILL_MEM_START_0 0x20A
-#define O_CLASS0_SPILL_MEM_START_0__Class0SpillMemStart0 0
-#define W_CLASS0_SPILL_MEM_START_0__Class0SpillMemStart0 32
-#define R_CLASS0_SPILL_MEM_START_1 0x20B
-#define O_CLASS0_SPILL_MEM_START_1__Class0SpillMemStart1 0
-#define W_CLASS0_SPILL_MEM_START_1__Class0SpillMemStart1 3
-#define R_CLASS0_SPILL_MEM_SIZE 0x20C
-#define O_CLASS0_SPILL_MEM_SIZE__Class0SpillMemSize 0
-#define W_CLASS0_SPILL_MEM_SIZE__Class0SpillMemSize 32
-#define R_JUMFRIN_SPILL_MEM_START_0 0x20D
-#define O_JUMFRIN_SPILL_MEM_START_0__JumFrInSpillMemStar0 0
-#define W_JUMFRIN_SPILL_MEM_START_0__JumFrInSpillMemStar0 32
-#define R_JUMFRIN_SPILL_MEM_START_1 0x20E
-#define O_JUMFRIN_SPILL_MEM_START_1__JumFrInSpillMemStart1 0
-#define W_JUMFRIN_SPILL_MEM_START_1__JumFrInSpillMemStart1 3
-#define R_JUMFRIN_SPILL_MEM_SIZE 0x20F
-#define O_JUMFRIN_SPILL_MEM_SIZE__JumFrInSpillMemSize 0
-#define W_JUMFRIN_SPILL_MEM_SIZE__JumFrInSpillMemSize 32
-#define R_CLASS1_SPILL_MEM_START_0 0x210
-#define O_CLASS1_SPILL_MEM_START_0__Class1SpillMemStart0 0
-#define W_CLASS1_SPILL_MEM_START_0__Class1SpillMemStart0 32
-#define R_CLASS1_SPILL_MEM_START_1 0x211
-#define O_CLASS1_SPILL_MEM_START_1__Class1SpillMemStart1 0
-#define W_CLASS1_SPILL_MEM_START_1__Class1SpillMemStart1 3
-#define R_CLASS1_SPILL_MEM_SIZE 0x212
-#define O_CLASS1_SPILL_MEM_SIZE__Class1SpillMemSize 0
-#define W_CLASS1_SPILL_MEM_SIZE__Class1SpillMemSize 32
-#define R_CLASS2_SPILL_MEM_START_0 0x213
-#define O_CLASS2_SPILL_MEM_START_0__Class2SpillMemStart0 0
-#define W_CLASS2_SPILL_MEM_START_0__Class2SpillMemStart0 32
-#define R_CLASS2_SPILL_MEM_START_1 0x214
-#define O_CLASS2_SPILL_MEM_START_1__Class2SpillMemStart1 0
-#define W_CLASS2_SPILL_MEM_START_1__Class2SpillMemStart1 3
-#define R_CLASS2_SPILL_MEM_SIZE 0x215
-#define O_CLASS2_SPILL_MEM_SIZE__Class2SpillMemSize 0
-#define W_CLASS2_SPILL_MEM_SIZE__Class2SpillMemSize 32
-#define R_CLASS3_SPILL_MEM_START_0 0x216
-#define O_CLASS3_SPILL_MEM_START_0__Class3SpillMemStart0 0
-#define W_CLASS3_SPILL_MEM_START_0__Class3SpillMemStart0 32
-#define R_CLASS3_SPILL_MEM_START_1 0x217
-#define O_CLASS3_SPILL_MEM_START_1__Class3SpillMemStart1 0
-#define W_CLASS3_SPILL_MEM_START_1__Class3SpillMemStart1 3
-#define R_CLASS3_SPILL_MEM_SIZE 0x218
-#define O_CLASS3_SPILL_MEM_SIZE__Class3SpillMemSize 0
-#define W_CLASS3_SPILL_MEM_SIZE__Class3SpillMemSize 32
-#define R_REG_FRIN1_SPILL_MEM_START_0 0x219
-#define R_REG_FRIN1_SPILL_MEM_START_1 0x21a
-#define R_REG_FRIN1_SPILL_MEM_SIZE 0x21b
-#define R_SPIHNGY0 0x219
-#define O_SPIHNGY0__EG_HNGY_THRESH_0 24
-#define W_SPIHNGY0__EG_HNGY_THRESH_0 7
-#define O_SPIHNGY0__EG_HNGY_THRESH_1 16
-#define W_SPIHNGY0__EG_HNGY_THRESH_1 7
-#define O_SPIHNGY0__EG_HNGY_THRESH_2 8
-#define W_SPIHNGY0__EG_HNGY_THRESH_2 7
-#define O_SPIHNGY0__EG_HNGY_THRESH_3 0
-#define W_SPIHNGY0__EG_HNGY_THRESH_3 7
-#define R_SPIHNGY1 0x21A
-#define O_SPIHNGY1__EG_HNGY_THRESH_4 24
-#define W_SPIHNGY1__EG_HNGY_THRESH_4 7
-#define O_SPIHNGY1__EG_HNGY_THRESH_5 16
-#define W_SPIHNGY1__EG_HNGY_THRESH_5 7
-#define O_SPIHNGY1__EG_HNGY_THRESH_6 8
-#define W_SPIHNGY1__EG_HNGY_THRESH_6 7
-#define O_SPIHNGY1__EG_HNGY_THRESH_7 0
-#define W_SPIHNGY1__EG_HNGY_THRESH_7 7
-#define R_SPIHNGY2 0x21B
-#define O_SPIHNGY2__EG_HNGY_THRESH_8 24
-#define W_SPIHNGY2__EG_HNGY_THRESH_8 7
-#define O_SPIHNGY2__EG_HNGY_THRESH_9 16
-#define W_SPIHNGY2__EG_HNGY_THRESH_9 7
-#define O_SPIHNGY2__EG_HNGY_THRESH_10 8
-#define W_SPIHNGY2__EG_HNGY_THRESH_10 7
-#define O_SPIHNGY2__EG_HNGY_THRESH_11 0
-#define W_SPIHNGY2__EG_HNGY_THRESH_11 7
-#define R_SPIHNGY3 0x21C
-#define O_SPIHNGY3__EG_HNGY_THRESH_12 24
-#define W_SPIHNGY3__EG_HNGY_THRESH_12 7
-#define O_SPIHNGY3__EG_HNGY_THRESH_13 16
-#define W_SPIHNGY3__EG_HNGY_THRESH_13 7
-#define O_SPIHNGY3__EG_HNGY_THRESH_14 8
-#define W_SPIHNGY3__EG_HNGY_THRESH_14 7
-#define O_SPIHNGY3__EG_HNGY_THRESH_15 0
-#define W_SPIHNGY3__EG_HNGY_THRESH_15 7
-#define R_SPISTRV0 0x21D
-#define O_SPISTRV0__EG_STRV_THRESH_0 24
-#define W_SPISTRV0__EG_STRV_THRESH_0 7
-#define O_SPISTRV0__EG_STRV_THRESH_1 16
-#define W_SPISTRV0__EG_STRV_THRESH_1 7
-#define O_SPISTRV0__EG_STRV_THRESH_2 8
-#define W_SPISTRV0__EG_STRV_THRESH_2 7
-#define O_SPISTRV0__EG_STRV_THRESH_3 0
-#define W_SPISTRV0__EG_STRV_THRESH_3 7
-#define R_SPISTRV1 0x21E
-#define O_SPISTRV1__EG_STRV_THRESH_4 24
-#define W_SPISTRV1__EG_STRV_THRESH_4 7
-#define O_SPISTRV1__EG_STRV_THRESH_5 16
-#define W_SPISTRV1__EG_STRV_THRESH_5 7
-#define O_SPISTRV1__EG_STRV_THRESH_6 8
-#define W_SPISTRV1__EG_STRV_THRESH_6 7
-#define O_SPISTRV1__EG_STRV_THRESH_7 0
-#define W_SPISTRV1__EG_STRV_THRESH_7 7
-#define R_SPISTRV2 0x21F
-#define O_SPISTRV2__EG_STRV_THRESH_8 24
-#define W_SPISTRV2__EG_STRV_THRESH_8 7
-#define O_SPISTRV2__EG_STRV_THRESH_9 16
-#define W_SPISTRV2__EG_STRV_THRESH_9 7
-#define O_SPISTRV2__EG_STRV_THRESH_10 8
-#define W_SPISTRV2__EG_STRV_THRESH_10 7
-#define O_SPISTRV2__EG_STRV_THRESH_11 0
-#define W_SPISTRV2__EG_STRV_THRESH_11 7
-#define R_SPISTRV3 0x220
-#define O_SPISTRV3__EG_STRV_THRESH_12 24
-#define W_SPISTRV3__EG_STRV_THRESH_12 7
-#define O_SPISTRV3__EG_STRV_THRESH_13 16
-#define W_SPISTRV3__EG_STRV_THRESH_13 7
-#define O_SPISTRV3__EG_STRV_THRESH_14 8
-#define W_SPISTRV3__EG_STRV_THRESH_14 7
-#define O_SPISTRV3__EG_STRV_THRESH_15 0
-#define W_SPISTRV3__EG_STRV_THRESH_15 7
-#define R_TXDATAFIFO0 0x221
-#define O_TXDATAFIFO0__Tx0DataFifoStart 24
-#define W_TXDATAFIFO0__Tx0DataFifoStart 7
-#define O_TXDATAFIFO0__Tx0DataFifoSize 16
-#define W_TXDATAFIFO0__Tx0DataFifoSize 7
-#define O_TXDATAFIFO0__Tx1DataFifoStart 8
-#define W_TXDATAFIFO0__Tx1DataFifoStart 7
-#define O_TXDATAFIFO0__Tx1DataFifoSize 0
-#define W_TXDATAFIFO0__Tx1DataFifoSize 7
-#define R_TXDATAFIFO1 0x222
-#define O_TXDATAFIFO1__Tx2DataFifoStart 24
-#define W_TXDATAFIFO1__Tx2DataFifoStart 7
-#define O_TXDATAFIFO1__Tx2DataFifoSize 16
-#define W_TXDATAFIFO1__Tx2DataFifoSize 7
-#define O_TXDATAFIFO1__Tx3DataFifoStart 8
-#define W_TXDATAFIFO1__Tx3DataFifoStart 7
-#define O_TXDATAFIFO1__Tx3DataFifoSize 0
-#define W_TXDATAFIFO1__Tx3DataFifoSize 7
-#define R_TXDATAFIFO2 0x223
-#define O_TXDATAFIFO2__Tx4DataFifoStart 24
-#define W_TXDATAFIFO2__Tx4DataFifoStart 7
-#define O_TXDATAFIFO2__Tx4DataFifoSize 16
-#define W_TXDATAFIFO2__Tx4DataFifoSize 7
-#define O_TXDATAFIFO2__Tx5DataFifoStart 8
-#define W_TXDATAFIFO2__Tx5DataFifoStart 7
-#define O_TXDATAFIFO2__Tx5DataFifoSize 0
-#define W_TXDATAFIFO2__Tx5DataFifoSize 7
-#define R_TXDATAFIFO3 0x224
-#define O_TXDATAFIFO3__Tx6DataFifoStart 24
-#define W_TXDATAFIFO3__Tx6DataFifoStart 7
-#define O_TXDATAFIFO3__Tx6DataFifoSize 16
-#define W_TXDATAFIFO3__Tx6DataFifoSize 7
-#define O_TXDATAFIFO3__Tx7DataFifoStart 8
-#define W_TXDATAFIFO3__Tx7DataFifoStart 7
-#define O_TXDATAFIFO3__Tx7DataFifoSize 0
-#define W_TXDATAFIFO3__Tx7DataFifoSize 7
-#define R_TXDATAFIFO4 0x225
-#define O_TXDATAFIFO4__Tx8DataFifoStart 24
-#define W_TXDATAFIFO4__Tx8DataFifoStart 7
-#define O_TXDATAFIFO4__Tx8DataFifoSize 16
-#define W_TXDATAFIFO4__Tx8DataFifoSize 7
-#define O_TXDATAFIFO4__Tx9DataFifoStart 8
-#define W_TXDATAFIFO4__Tx9DataFifoStart 7
-#define O_TXDATAFIFO4__Tx9DataFifoSize 0
-#define W_TXDATAFIFO4__Tx9DataFifoSize 7
-#define R_TXDATAFIFO5 0x226
-#define O_TXDATAFIFO5__Tx10DataFifoStart 24
-#define W_TXDATAFIFO5__Tx10DataFifoStart 7
-#define O_TXDATAFIFO5__Tx10DataFifoSize 16
-#define W_TXDATAFIFO5__Tx10DataFifoSize 7
-#define O_TXDATAFIFO5__Tx11DataFifoStart 8
-#define W_TXDATAFIFO5__Tx11DataFifoStart 7
-#define O_TXDATAFIFO5__Tx11DataFifoSize 0
-#define W_TXDATAFIFO5__Tx11DataFifoSize 7
-#define R_TXDATAFIFO6 0x227
-#define O_TXDATAFIFO6__Tx12DataFifoStart 24
-#define W_TXDATAFIFO6__Tx12DataFifoStart 7
-#define O_TXDATAFIFO6__Tx12DataFifoSize 16
-#define W_TXDATAFIFO6__Tx12DataFifoSize 7
-#define O_TXDATAFIFO6__Tx13DataFifoStart 8
-#define W_TXDATAFIFO6__Tx13DataFifoStart 7
-#define O_TXDATAFIFO6__Tx13DataFifoSize 0
-#define W_TXDATAFIFO6__Tx13DataFifoSize 7
-#define R_TXDATAFIFO7 0x228
-#define O_TXDATAFIFO7__Tx14DataFifoStart 24
-#define W_TXDATAFIFO7__Tx14DataFifoStart 7
-#define O_TXDATAFIFO7__Tx14DataFifoSize 16
-#define W_TXDATAFIFO7__Tx14DataFifoSize 7
-#define O_TXDATAFIFO7__Tx15DataFifoStart 8
-#define W_TXDATAFIFO7__Tx15DataFifoStart 7
-#define O_TXDATAFIFO7__Tx15DataFifoSize 0
-#define W_TXDATAFIFO7__Tx15DataFifoSize 7
-#define R_RXDATAFIFO0 0x229
-#define O_RXDATAFIFO0__Rx0DataFifoStart 24
-#define W_RXDATAFIFO0__Rx0DataFifoStart 7
-#define O_RXDATAFIFO0__Rx0DataFifoSize 16
-#define W_RXDATAFIFO0__Rx0DataFifoSize 7
-#define O_RXDATAFIFO0__Rx1DataFifoStart 8
-#define W_RXDATAFIFO0__Rx1DataFifoStart 7
-#define O_RXDATAFIFO0__Rx1DataFifoSize 0
-#define W_RXDATAFIFO0__Rx1DataFifoSize 7
-#define R_RXDATAFIFO1 0x22A
-#define O_RXDATAFIFO1__Rx2DataFifoStart 24
-#define W_RXDATAFIFO1__Rx2DataFifoStart 7
-#define O_RXDATAFIFO1__Rx2DataFifoSize 16
-#define W_RXDATAFIFO1__Rx2DataFifoSize 7
-#define O_RXDATAFIFO1__Rx3DataFifoStart 8
-#define W_RXDATAFIFO1__Rx3DataFifoStart 7
-#define O_RXDATAFIFO1__Rx3DataFifoSize 0
-#define W_RXDATAFIFO1__Rx3DataFifoSize 7
-#define R_RXDATAFIFO2 0x22B
-#define O_RXDATAFIFO2__Rx4DataFifoStart 24
-#define W_RXDATAFIFO2__Rx4DataFifoStart 7
-#define O_RXDATAFIFO2__Rx4DataFifoSize 16
-#define W_RXDATAFIFO2__Rx4DataFifoSize 7
-#define O_RXDATAFIFO2__Rx5DataFifoStart 8
-#define W_RXDATAFIFO2__Rx5DataFifoStart 7
-#define O_RXDATAFIFO2__Rx5DataFifoSize 0
-#define W_RXDATAFIFO2__Rx5DataFifoSize 7
-#define R_RXDATAFIFO3 0x22C
-#define O_RXDATAFIFO3__Rx6DataFifoStart 24
-#define W_RXDATAFIFO3__Rx6DataFifoStart 7
-#define O_RXDATAFIFO3__Rx6DataFifoSize 16
-#define W_RXDATAFIFO3__Rx6DataFifoSize 7
-#define O_RXDATAFIFO3__Rx7DataFifoStart 8
-#define W_RXDATAFIFO3__Rx7DataFifoStart 7
-#define O_RXDATAFIFO3__Rx7DataFifoSize 0
-#define W_RXDATAFIFO3__Rx7DataFifoSize 7
-#define R_RXDATAFIFO4 0x22D
-#define O_RXDATAFIFO4__Rx8DataFifoStart 24
-#define W_RXDATAFIFO4__Rx8DataFifoStart 7
-#define O_RXDATAFIFO4__Rx8DataFifoSize 16
-#define W_RXDATAFIFO4__Rx8DataFifoSize 7
-#define O_RXDATAFIFO4__Rx9DataFifoStart 8
-#define W_RXDATAFIFO4__Rx9DataFifoStart 7
-#define O_RXDATAFIFO4__Rx9DataFifoSize 0
-#define W_RXDATAFIFO4__Rx9DataFifoSize 7
-#define R_RXDATAFIFO5 0x22E
-#define O_RXDATAFIFO5__Rx10DataFifoStart 24
-#define W_RXDATAFIFO5__Rx10DataFifoStart 7
-#define O_RXDATAFIFO5__Rx10DataFifoSize 16
-#define W_RXDATAFIFO5__Rx10DataFifoSize 7
-#define O_RXDATAFIFO5__Rx11DataFifoStart 8
-#define W_RXDATAFIFO5__Rx11DataFifoStart 7
-#define O_RXDATAFIFO5__Rx11DataFifoSize 0
-#define W_RXDATAFIFO5__Rx11DataFifoSize 7
-#define R_RXDATAFIFO6 0x22F
-#define O_RXDATAFIFO6__Rx12DataFifoStart 24
-#define W_RXDATAFIFO6__Rx12DataFifoStart 7
-#define O_RXDATAFIFO6__Rx12DataFifoSize 16
-#define W_RXDATAFIFO6__Rx12DataFifoSize 7
-#define O_RXDATAFIFO6__Rx13DataFifoStart 8
-#define W_RXDATAFIFO6__Rx13DataFifoStart 7
-#define O_RXDATAFIFO6__Rx13DataFifoSize 0
-#define W_RXDATAFIFO6__Rx13DataFifoSize 7
-#define R_RXDATAFIFO7 0x230
-#define O_RXDATAFIFO7__Rx14DataFifoStart 24
-#define W_RXDATAFIFO7__Rx14DataFifoStart 7
-#define O_RXDATAFIFO7__Rx14DataFifoSize 16
-#define W_RXDATAFIFO7__Rx14DataFifoSize 7
-#define O_RXDATAFIFO7__Rx15DataFifoStart 8
-#define W_RXDATAFIFO7__Rx15DataFifoStart 7
-#define O_RXDATAFIFO7__Rx15DataFifoSize 0
-#define W_RXDATAFIFO7__Rx15DataFifoSize 7
-#define R_XGMACPADCALIBRATION 0x231
-#define R_FREEQCARVE 0x233
-#define R_SPI4STATICDELAY0 0x240
-#define O_SPI4STATICDELAY0__DataLine7 28
-#define W_SPI4STATICDELAY0__DataLine7 4
-#define O_SPI4STATICDELAY0__DataLine6 24
-#define W_SPI4STATICDELAY0__DataLine6 4
-#define O_SPI4STATICDELAY0__DataLine5 20
-#define W_SPI4STATICDELAY0__DataLine5 4
-#define O_SPI4STATICDELAY0__DataLine4 16
-#define W_SPI4STATICDELAY0__DataLine4 4
-#define O_SPI4STATICDELAY0__DataLine3 12
-#define W_SPI4STATICDELAY0__DataLine3 4
-#define O_SPI4STATICDELAY0__DataLine2 8
-#define W_SPI4STATICDELAY0__DataLine2 4
-#define O_SPI4STATICDELAY0__DataLine1 4
-#define W_SPI4STATICDELAY0__DataLine1 4
-#define O_SPI4STATICDELAY0__DataLine0 0
-#define W_SPI4STATICDELAY0__DataLine0 4
-#define R_SPI4STATICDELAY1 0x241
-#define O_SPI4STATICDELAY1__DataLine15 28
-#define W_SPI4STATICDELAY1__DataLine15 4
-#define O_SPI4STATICDELAY1__DataLine14 24
-#define W_SPI4STATICDELAY1__DataLine14 4
-#define O_SPI4STATICDELAY1__DataLine13 20
-#define W_SPI4STATICDELAY1__DataLine13 4
-#define O_SPI4STATICDELAY1__DataLine12 16
-#define W_SPI4STATICDELAY1__DataLine12 4
-#define O_SPI4STATICDELAY1__DataLine11 12
-#define W_SPI4STATICDELAY1__DataLine11 4
-#define O_SPI4STATICDELAY1__DataLine10 8
-#define W_SPI4STATICDELAY1__DataLine10 4
-#define O_SPI4STATICDELAY1__DataLine9 4
-#define W_SPI4STATICDELAY1__DataLine9 4
-#define O_SPI4STATICDELAY1__DataLine8 0
-#define W_SPI4STATICDELAY1__DataLine8 4
-#define R_SPI4STATICDELAY2 0x242
-#define O_SPI4STATICDELAY0__TxStat1 8
-#define W_SPI4STATICDELAY0__TxStat1 4
-#define O_SPI4STATICDELAY0__TxStat0 4
-#define W_SPI4STATICDELAY0__TxStat0 4
-#define O_SPI4STATICDELAY0__RxControl 0
-#define W_SPI4STATICDELAY0__RxControl 4
-#define R_SPI4CONTROL 0x243
-#define O_SPI4CONTROL__StaticDelay 2
-#define O_SPI4CONTROL__LVDS_LVTTL 1
-#define O_SPI4CONTROL__SPI4Enable 0
-#define R_CLASSWATERMARKS 0x244
-#define O_CLASSWATERMARKS__Class0Watermark 24
-#define W_CLASSWATERMARKS__Class0Watermark 5
-#define O_CLASSWATERMARKS__Class1Watermark 16
-#define W_CLASSWATERMARKS__Class1Watermark 5
-#define O_CLASSWATERMARKS__Class3Watermark 0
-#define W_CLASSWATERMARKS__Class3Watermark 5
-#define R_RXWATERMARKS1 0x245
-#define O_RXWATERMARKS__Rx0DataWatermark 24
-#define W_RXWATERMARKS__Rx0DataWatermark 7
-#define O_RXWATERMARKS__Rx1DataWatermark 16
-#define W_RXWATERMARKS__Rx1DataWatermark 7
-#define O_RXWATERMARKS__Rx3DataWatermark 0
-#define W_RXWATERMARKS__Rx3DataWatermark 7
-#define R_RXWATERMARKS2 0x246
-#define O_RXWATERMARKS__Rx4DataWatermark 24
-#define W_RXWATERMARKS__Rx4DataWatermark 7
-#define O_RXWATERMARKS__Rx5DataWatermark 16
-#define W_RXWATERMARKS__Rx5DataWatermark 7
-#define O_RXWATERMARKS__Rx6DataWatermark 8
-#define W_RXWATERMARKS__Rx6DataWatermark 7
-#define O_RXWATERMARKS__Rx7DataWatermark 0
-#define W_RXWATERMARKS__Rx7DataWatermark 7
-#define R_RXWATERMARKS3 0x247
-#define O_RXWATERMARKS__Rx8DataWatermark 24
-#define W_RXWATERMARKS__Rx8DataWatermark 7
-#define O_RXWATERMARKS__Rx9DataWatermark 16
-#define W_RXWATERMARKS__Rx9DataWatermark 7
-#define O_RXWATERMARKS__Rx10DataWatermark 8
-#define W_RXWATERMARKS__Rx10DataWatermark 7
-#define O_RXWATERMARKS__Rx11DataWatermark 0
-#define W_RXWATERMARKS__Rx11DataWatermark 7
-#define R_RXWATERMARKS4 0x248
-#define O_RXWATERMARKS__Rx12DataWatermark 24
-#define W_RXWATERMARKS__Rx12DataWatermark 7
-#define O_RXWATERMARKS__Rx13DataWatermark 16
-#define W_RXWATERMARKS__Rx13DataWatermark 7
-#define O_RXWATERMARKS__Rx14DataWatermark 8
-#define W_RXWATERMARKS__Rx14DataWatermark 7
-#define O_RXWATERMARKS__Rx15DataWatermark 0
-#define W_RXWATERMARKS__Rx15DataWatermark 7
-#define R_FREEWATERMARKS 0x249
-#define O_FREEWATERMARKS__FreeOutWatermark 16
-#define W_FREEWATERMARKS__FreeOutWatermark 16
-#define O_FREEWATERMARKS__JumFrWatermark 8
-#define W_FREEWATERMARKS__JumFrWatermark 7
-#define O_FREEWATERMARKS__RegFrWatermark 0
-#define W_FREEWATERMARKS__RegFrWatermark 7
-#define R_EGRESSFIFOCARVINGSLOTS 0x24a
-
-#define CTRL_RES0 0
-#define CTRL_RES1 1
-#define CTRL_REG_FREE 2
-#define CTRL_JUMBO_FREE 3
-#define CTRL_CONT 4
-#define CTRL_EOP 5
-#define CTRL_START 6
-#define CTRL_SNGL 7
-
-#define CTRL_B0_NOT_EOP 0
-#define CTRL_B0_EOP 1
-
-#define R_ROUND_ROBIN_TABLE 0
-#define R_PDE_CLASS_0 0x300
-#define R_PDE_CLASS_1 0x302
-#define R_PDE_CLASS_2 0x304
-#define R_PDE_CLASS_3 0x306
-
-#define R_MSG_TX_THRESHOLD 0x308
-
-#define R_GMAC_JFR0_BUCKET_SIZE 0x320
-#define R_GMAC_RFR0_BUCKET_SIZE 0x321
-#define R_GMAC_TX0_BUCKET_SIZE 0x322
-#define R_GMAC_TX1_BUCKET_SIZE 0x323
-#define R_GMAC_TX2_BUCKET_SIZE 0x324
-#define R_GMAC_TX3_BUCKET_SIZE 0x325
-#define R_GMAC_JFR1_BUCKET_SIZE 0x326
-#define R_GMAC_RFR1_BUCKET_SIZE 0x327
-
-#define R_XGS_TX0_BUCKET_SIZE 0x320
-#define R_XGS_TX1_BUCKET_SIZE 0x321
-#define R_XGS_TX2_BUCKET_SIZE 0x322
-#define R_XGS_TX3_BUCKET_SIZE 0x323
-#define R_XGS_TX4_BUCKET_SIZE 0x324
-#define R_XGS_TX5_BUCKET_SIZE 0x325
-#define R_XGS_TX6_BUCKET_SIZE 0x326
-#define R_XGS_TX7_BUCKET_SIZE 0x327
-#define R_XGS_TX8_BUCKET_SIZE 0x328
-#define R_XGS_TX9_BUCKET_SIZE 0x329
-#define R_XGS_TX10_BUCKET_SIZE 0x32A
-#define R_XGS_TX11_BUCKET_SIZE 0x32B
-#define R_XGS_TX12_BUCKET_SIZE 0x32C
-#define R_XGS_TX13_BUCKET_SIZE 0x32D
-#define R_XGS_TX14_BUCKET_SIZE 0x32E
-#define R_XGS_TX15_BUCKET_SIZE 0x32F
-#define R_XGS_JFR_BUCKET_SIZE 0x330
-#define R_XGS_RFR_BUCKET_SIZE 0x331
-
-#define R_CC_CPU0_0 0x380
-#define R_CC_CPU1_0 0x388
-#define R_CC_CPU2_0 0x390
-#define R_CC_CPU3_0 0x398
-#define R_CC_CPU4_0 0x3a0
-#define R_CC_CPU5_0 0x3a8
-#define R_CC_CPU6_0 0x3b0
-#define R_CC_CPU7_0 0x3b8
-
-#define XLR_GMAC_BLK_SZ (XLR_IO_GMAC_1_OFFSET - \
- XLR_IO_GMAC_0_OFFSET)
-
-/* Constants used for configuring the devices */
-
-#define XLR_FB_STN 6 /* Bucket used for Tx freeback */
-
-#define MAC_B2B_IPG 88
-
-#define XLR_NET_PREPAD_LEN 32
-
-/* frame sizes need to be cacheline aligned */
-#define MAX_FRAME_SIZE (1536 + XLR_NET_PREPAD_LEN)
-#define MAX_FRAME_SIZE_JUMBO 9216
-
-#define MAC_SKB_BACK_PTR_SIZE SMP_CACHE_BYTES
-#define MAC_PREPAD 0
-#define BYTE_OFFSET 2
-#define XLR_RX_BUF_SIZE (MAX_FRAME_SIZE + BYTE_OFFSET + \
- MAC_PREPAD + MAC_SKB_BACK_PTR_SIZE + SMP_CACHE_BYTES)
-#define MAC_CRC_LEN 4
-#define MAX_NUM_MSGRNG_STN_CC 128
-#define MAX_MSG_SND_ATTEMPTS 100 /* 13 stns x 4 entry msg/stn +
- headroom */
-
-#define MAC_FRIN_TO_BE_SENT_THRESHOLD 16
-
-#define MAX_NUM_DESC_SPILL 1024
-#define MAX_FRIN_SPILL (MAX_NUM_DESC_SPILL << 2)
-#define MAX_FROUT_SPILL (MAX_NUM_DESC_SPILL << 2)
-#define MAX_CLASS_0_SPILL (MAX_NUM_DESC_SPILL << 2)
-#define MAX_CLASS_1_SPILL (MAX_NUM_DESC_SPILL << 2)
-#define MAX_CLASS_2_SPILL (MAX_NUM_DESC_SPILL << 2)
-#define MAX_CLASS_3_SPILL (MAX_NUM_DESC_SPILL << 2)
-
-enum {
- SGMII_SPEED_10 = 0x00000000,
- SGMII_SPEED_100 = 0x02000000,
- SGMII_SPEED_1000 = 0x04000000,
-};
-
-enum tsv_rsv_reg {
- TX_RX_64_BYTE_FRAME = 0x20,
- TX_RX_64_127_BYTE_FRAME,
- TX_RX_128_255_BYTE_FRAME,
- TX_RX_256_511_BYTE_FRAME,
- TX_RX_512_1023_BYTE_FRAME,
- TX_RX_1024_1518_BYTE_FRAME,
- TX_RX_1519_1522_VLAN_BYTE_FRAME,
-
- RX_BYTE_COUNTER = 0x27,
- RX_PACKET_COUNTER,
- RX_FCS_ERROR_COUNTER,
- RX_MULTICAST_PACKET_COUNTER,
- RX_BROADCAST_PACKET_COUNTER,
- RX_CONTROL_FRAME_PACKET_COUNTER,
- RX_PAUSE_FRAME_PACKET_COUNTER,
- RX_UNKNOWN_OP_CODE_COUNTER,
- RX_ALIGNMENT_ERROR_COUNTER,
- RX_FRAME_LENGTH_ERROR_COUNTER,
- RX_CODE_ERROR_COUNTER,
- RX_CARRIER_SENSE_ERROR_COUNTER,
- RX_UNDERSIZE_PACKET_COUNTER,
- RX_OVERSIZE_PACKET_COUNTER,
- RX_FRAGMENTS_COUNTER,
- RX_JABBER_COUNTER,
- RX_DROP_PACKET_COUNTER,
-
- TX_BYTE_COUNTER = 0x38,
- TX_PACKET_COUNTER,
- TX_MULTICAST_PACKET_COUNTER,
- TX_BROADCAST_PACKET_COUNTER,
- TX_PAUSE_CONTROL_FRAME_COUNTER,
- TX_DEFERRAL_PACKET_COUNTER,
- TX_EXCESSIVE_DEFERRAL_PACKET_COUNTER,
- TX_SINGLE_COLLISION_PACKET_COUNTER,
- TX_MULTI_COLLISION_PACKET_COUNTER,
- TX_LATE_COLLISION_PACKET_COUNTER,
- TX_EXCESSIVE_COLLISION_PACKET_COUNTER,
- TX_TOTAL_COLLISION_COUNTER,
- TX_PAUSE_FRAME_HONERED_COUNTER,
- TX_DROP_FRAME_COUNTER,
- TX_JABBER_FRAME_COUNTER,
- TX_FCS_ERROR_COUNTER,
- TX_CONTROL_FRAME_COUNTER,
- TX_OVERSIZE_FRAME_COUNTER,
- TX_UNDERSIZE_FRAME_COUNTER,
- TX_FRAGMENT_FRAME_COUNTER,
-
- CARRY_REG_1 = 0x4c,
- CARRY_REG_2 = 0x4d,
-};
-
-struct xlr_adapter {
- struct net_device *netdev[4];
-};
-
-struct xlr_net_priv {
- u32 __iomem *base_addr;
- struct net_device *ndev;
- struct xlr_adapter *adapter;
- struct mii_bus *mii_bus;
- int num_rx_desc;
- int phy_addr; /* PHY addr on MDIO bus */
- int pcs_id; /* PCS id on MDIO bus */
- int port_id; /* Port(gmac/xgmac) number, i.e 0-7 */
- int tx_stnid;
- u32 __iomem *mii_addr;
- u32 __iomem *serdes_addr;
- u32 __iomem *pcs_addr;
- u32 __iomem *gpio_addr;
- int phy_speed;
- int port_type;
- struct timer_list queue_timer;
- int wakeup_q;
- struct platform_device *pdev;
- struct xlr_net_data *nd;
-
- u64 *frin_spill;
- u64 *frout_spill;
- u64 *class_0_spill;
- u64 *class_1_spill;
- u64 *class_2_spill;
- u64 *class_3_spill;
-};
-
-void xlr_set_gmac_speed(struct xlr_net_priv *priv);
diff --git a/drivers/staging/nvec/Kconfig b/drivers/staging/nvec/Kconfig
index e3a89fb1a4a2..9fa98c16f1d9 100644
--- a/drivers/staging/nvec/Kconfig
+++ b/drivers/staging/nvec/Kconfig
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
config MFD_NVEC
tristate "NV Tegra Embedded Controller SMBus Interface"
depends on I2C && GPIOLIB && ARCH_TEGRA
@@ -7,7 +8,7 @@ config MFD_NVEC
controller.
To compile this driver as a module, say M here: the module will be
- called mfd-nvec
+ called mfd-nvec
config KEYBOARD_NVEC
tristate "Keyboard on nVidia compliant EC"
@@ -17,7 +18,7 @@ config KEYBOARD_NVEC
a nVidia compliant embedded controller.
To compile this driver as a module, say M here: the module will be
- called keyboard-nvec
+ called keyboard-nvec
config SERIO_NVEC_PS2
tristate "PS2 on nVidia EC"
@@ -27,7 +28,7 @@ config SERIO_NVEC_PS2
to a nVidia compliant embedded controller.
To compile this driver as a module, say M here: the module will be
- called serio-nvec-ps2
+ called serio-nvec-ps2
config NVEC_POWER
@@ -38,7 +39,7 @@ config NVEC_POWER
nVidia compliant embedded controllers.
To compile this driver as a module, say M here: the module will be
- called nvec-power
+ called nvec-power
config NVEC_PAZ00
@@ -49,5 +50,5 @@ config NVEC_PAZ00
devices, e.g. Toshbia AC100 and Dynabooks AZ netbooks.
To compile this driver as a module, say M here: the module will be
- called nvec-paz00
+ called nvec-paz00
diff --git a/drivers/staging/nvec/Makefile b/drivers/staging/nvec/Makefile
index 0db0e1f43337..f0cff8f9fdf6 100644
--- a/drivers/staging/nvec/Makefile
+++ b/drivers/staging/nvec/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_SERIO_NVEC_PS2) += nvec_ps2.o
obj-$(CONFIG_MFD_NVEC) += nvec.o
obj-$(CONFIG_NVEC_POWER) += nvec_power.o
diff --git a/drivers/staging/nvec/README b/drivers/staging/nvec/README
index 9a320b7fdbe6..510e6933f402 100644
--- a/drivers/staging/nvec/README
+++ b/drivers/staging/nvec/README
@@ -1,4 +1,4 @@
-NVEC: An NVidia compliant Embedded Controller Protocol Implemenation
+NVEC: An NVidia compliant Embedded Controller Protocol Implementation
This is an implementation of the NVEC protocol used to communicate with an
embedded controller (EC) via I2C bus. The EC is an I2C master while the host
@@ -10,5 +10,5 @@ but the source code[1] of the published nvec reference drivers can be a guide.
This driver is currently only used by the AC100 project[2], but it is likely,
that other Tegra boards (not yet mainlined, if ever) also use it.
-[1] e.g. http://nv-tegra.nvidia.com/gitweb/?p=linux-2.6.git;a=tree;f=arch/arm/mach-tegra/nvec;hb=android-tegra-2.6.32
+[1] e.g. https://nv-tegra.nvidia.com/gitweb/?p=linux-2.6.git;a=tree;f=arch/arm/mach-tegra/nvec;hb=android-tegra-2.6.32
[2] http://gitorious.org/ac100, http://launchpad.net/ac100
diff --git a/drivers/staging/nvec/TODO b/drivers/staging/nvec/TODO
index e5ae42a0b44a..33f9ebe6d59b 100644
--- a/drivers/staging/nvec/TODO
+++ b/drivers/staging/nvec/TODO
@@ -1,8 +1,4 @@
ToDo list (incomplete, unordered)
- - add compile as module support
- - move half of the nvec init stuff to i2c-tegra.c
- - move event handling to nvec_events
+ - move the driver to the new i2c slave framework
- finish suspend/resume support
- - modifiy the sync_write method to return the received
- message in a variable (and return the error code).
- - add support for more device implementations
+ - add atomic ops in order to fix shutoff/reboot problems
diff --git a/drivers/staging/nvec/nvec-keytable.h b/drivers/staging/nvec/nvec-keytable.h
index 1dc22cb8812a..ac58e87e6a4e 100644
--- a/drivers/staging/nvec/nvec-keytable.h
+++ b/drivers/staging/nvec/nvec-keytable.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* drivers/input/keyboard/tegra-nvec.c
*
@@ -5,20 +6,6 @@
* embedded controller
*
* Copyright (c) 2009, NVIDIA Corporation.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
static unsigned short code_tab_102us[] = {
diff --git a/drivers/staging/nvec/nvec.c b/drivers/staging/nvec/nvec.c
index 164634d61ac5..263774e6a78c 100644
--- a/drivers/staging/nvec/nvec.c
+++ b/drivers/staging/nvec/nvec.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* NVEC: NVIDIA compliant embedded controller interface
*
@@ -7,15 +8,8 @@
* Ilya Petrov <ilya.muromec@gmail.com>
* Marc Dietrich <marvin24@gmx.de>
* Julian Andres Klode <jak@jak-linux.org>
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
*/
-/* #define DEBUG */
-
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/atomic.h>
@@ -23,12 +17,11 @@
#include <linux/completion.h>
#include <linux/delay.h>
#include <linux/err.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/irq.h>
#include <linux/of.h>
-#include <linux/of_gpio.h>
#include <linux/list.h>
#include <linux/mfd/core.h>
#include <linux/mutex.h>
@@ -40,18 +33,18 @@
#include "nvec.h"
#define I2C_CNFG 0x00
-#define I2C_CNFG_PACKET_MODE_EN (1<<10)
-#define I2C_CNFG_NEW_MASTER_SFM (1<<11)
+#define I2C_CNFG_PACKET_MODE_EN BIT(10)
+#define I2C_CNFG_NEW_MASTER_SFM BIT(11)
#define I2C_CNFG_DEBOUNCE_CNT_SHIFT 12
#define I2C_SL_CNFG 0x20
-#define I2C_SL_NEWSL (1<<2)
-#define I2C_SL_NACK (1<<1)
-#define I2C_SL_RESP (1<<0)
-#define I2C_SL_IRQ (1<<3)
-#define END_TRANS (1<<4)
-#define RCVD (1<<2)
-#define RNW (1<<1)
+#define I2C_SL_NEWSL BIT(2)
+#define I2C_SL_NACK BIT(1)
+#define I2C_SL_RESP BIT(0)
+#define I2C_SL_IRQ BIT(3)
+#define END_TRANS BIT(4)
+#define RCVD BIT(2)
+#define RNW BIT(1)
#define I2C_SL_RCVD 0x24
#define I2C_SL_STATUS 0x28
@@ -106,6 +99,7 @@ static const struct mfd_cell nvec_devices[] = {
* nvec_register_notifier - Register a notifier with nvec
* @nvec: A &struct nvec_chip
* @nb: The notifier block to register
+ * @events: Unused
*
* Registers a notifier with @nvec. The notifier will be added to an atomic
* notifier chain that is called for all received messages except those that
@@ -132,7 +126,7 @@ int nvec_unregister_notifier(struct nvec_chip *nvec, struct notifier_block *nb)
}
EXPORT_SYMBOL_GPL(nvec_unregister_notifier);
-/**
+/*
* nvec_status_notifier - The final notifier
*
* Prints a message about control events not handled in the notifier
@@ -143,14 +137,14 @@ static int nvec_status_notifier(struct notifier_block *nb,
{
struct nvec_chip *nvec = container_of(nb, struct nvec_chip,
nvec_status_notifier);
- unsigned char *msg = (unsigned char *)data;
+ unsigned char *msg = data;
if (event_type != NVEC_CNTL)
return NOTIFY_DONE;
dev_warn(nvec->dev, "unhandled msg type %ld\n", event_type);
print_hex_dump(KERN_WARNING, "payload: ", DUMP_PREFIX_NONE, 16, 1,
- msg, msg[1] + 2, true);
+ msg, msg[1] + 2, true);
return NOTIFY_OK;
}
@@ -181,7 +175,7 @@ static struct nvec_msg *nvec_msg_alloc(struct nvec_chip *nvec,
}
}
- dev_err(nvec->dev, "could not allocate %s buffer\n",
+ dev_err(nvec->dev, "Could not allocate %s buffer\n",
(category == NVEC_MSG_TX) ? "TX" : "RX");
return NULL;
@@ -242,8 +236,8 @@ static size_t nvec_msg_size(struct nvec_msg *msg)
static void nvec_gpio_set_value(struct nvec_chip *nvec, int value)
{
dev_dbg(nvec->dev, "GPIO changed from %u to %u\n",
- gpio_get_value(nvec->gpio), value);
- gpio_set_value(nvec->gpio, value);
+ gpiod_get_value(nvec->gpiod), value);
+ gpiod_set_value(nvec->gpiod, value);
}
/**
@@ -259,14 +253,14 @@ static void nvec_gpio_set_value(struct nvec_chip *nvec, int value)
* occurred, the nvec driver may print an error.
*/
int nvec_write_async(struct nvec_chip *nvec, const unsigned char *data,
- short size)
+ short size)
{
struct nvec_msg *msg;
unsigned long flags;
msg = nvec_msg_alloc(nvec, NVEC_MSG_TX);
- if (msg == NULL)
+ if (!msg)
return -ENOMEM;
msg->data[0] = size;
@@ -288,46 +282,54 @@ EXPORT_SYMBOL(nvec_write_async);
* @nvec: An &struct nvec_chip
* @data: The data to write
* @size: The size of @data
+ * @msg: The response message received
*
* This is similar to nvec_write_async(), but waits for the
* request to be answered before returning. This function
* uses a mutex and can thus not be called from e.g.
* interrupt handlers.
*
- * Returns: A pointer to the response message on success,
- * %NULL on failure. Free with nvec_msg_free() once no longer
- * used.
+ * Returns: 0 on success, a negative error code on failure.
+ * The response message is returned in @msg. Shall be freed
+ * with nvec_msg_free() once no longer used.
+ *
*/
-struct nvec_msg *nvec_write_sync(struct nvec_chip *nvec,
- const unsigned char *data, short size)
+int nvec_write_sync(struct nvec_chip *nvec,
+ const unsigned char *data, short size,
+ struct nvec_msg **msg)
{
- struct nvec_msg *msg;
-
mutex_lock(&nvec->sync_write_mutex);
+ if (msg)
+ *msg = NULL;
+
nvec->sync_write_pending = (data[1] << 8) + data[0];
if (nvec_write_async(nvec, data, size) < 0) {
mutex_unlock(&nvec->sync_write_mutex);
- return NULL;
+ return -ENOMEM;
}
dev_dbg(nvec->dev, "nvec_sync_write: 0x%04x\n",
- nvec->sync_write_pending);
+ nvec->sync_write_pending);
if (!(wait_for_completion_timeout(&nvec->sync_write,
- msecs_to_jiffies(2000)))) {
- dev_warn(nvec->dev, "timeout waiting for sync write to complete\n");
+ msecs_to_jiffies(2000)))) {
+ dev_warn(nvec->dev,
+ "Timeout waiting for sync write to complete\n");
mutex_unlock(&nvec->sync_write_mutex);
- return NULL;
+ return -ETIMEDOUT;
}
dev_dbg(nvec->dev, "nvec_sync_write: pong!\n");
- msg = nvec->last_sync_msg;
+ if (msg)
+ *msg = nvec->last_sync_msg;
+ else
+ nvec_msg_free(nvec, nvec->last_sync_msg);
mutex_unlock(&nvec->sync_write_mutex);
- return msg;
+ return 0;
}
EXPORT_SYMBOL(nvec_write_sync);
@@ -347,8 +349,8 @@ static void nvec_toggle_global_events(struct nvec_chip *nvec, bool state)
/**
* nvec_event_mask - fill the command string with event bitfield
- * ev: points to event command string
- * mask: bit to insert into the event mask
+ * @ev: points to event command string
+ * @mask: bit to insert into the event mask
*
* Configure event command expects a 32 bit bitfield which describes
* which events to enable. The bitfield has the following structure
@@ -386,11 +388,11 @@ static void nvec_request_master(struct work_struct *work)
msg = list_first_entry(&nvec->tx_data, struct nvec_msg, node);
spin_unlock_irqrestore(&nvec->tx_lock, flags);
nvec_gpio_set_value(nvec, 0);
- err = wait_for_completion_interruptible_timeout(
- &nvec->ec_transfer, msecs_to_jiffies(5000));
+ err = wait_for_completion_interruptible_timeout(&nvec->ec_transfer,
+ msecs_to_jiffies(5000));
if (err == 0) {
- dev_warn(nvec->dev, "timeout waiting for ec transfer\n");
+ dev_warn(nvec->dev, "Timeout waiting for ec transfer\n");
nvec_gpio_set_value(nvec, 1);
msg->pos = 0;
}
@@ -422,8 +424,8 @@ static int parse_msg(struct nvec_chip *nvec, struct nvec_msg *msg)
if ((msg->data[0] >> 7) == 1 && (msg->data[0] & 0x0f) == 5)
print_hex_dump(KERN_WARNING, "ec system event ",
- DUMP_PREFIX_NONE, 16, 1, msg->data,
- msg->data[1] + 2, true);
+ DUMP_PREFIX_NONE, 16, 1, msg->data,
+ msg->data[1] + 2, true);
atomic_notifier_call_chain(&nvec->notifier_list, msg->data[0] & 0x8f,
msg->data);
@@ -452,7 +454,7 @@ static void nvec_dispatch(struct work_struct *work)
if (nvec->sync_write_pending ==
(msg->data[2] << 8) + msg->data[0]) {
- dev_dbg(nvec->dev, "sync write completed!\n");
+ dev_dbg(nvec->dev, "Sync write completed!\n");
nvec->sync_write_pending = 0;
nvec->last_sync_msg = msg;
complete(&nvec->sync_write);
@@ -475,7 +477,7 @@ static void nvec_tx_completed(struct nvec_chip *nvec)
{
/* We got an END_TRANS, let's skip this, maybe there's an event */
if (nvec->tx->pos != nvec->tx->size) {
- dev_err(nvec->dev, "premature END_TRANS, resending\n");
+ dev_err(nvec->dev, "Premature END_TRANS, resending\n");
nvec->tx->pos = 0;
nvec_gpio_set_value(nvec, 0);
} else {
@@ -493,8 +495,8 @@ static void nvec_rx_completed(struct nvec_chip *nvec)
{
if (nvec->rx->pos != nvec_msg_size(nvec->rx)) {
dev_err(nvec->dev, "RX incomplete: Expected %u bytes, got %u\n",
- (uint) nvec_msg_size(nvec->rx),
- (uint) nvec->rx->pos);
+ (uint)nvec_msg_size(nvec->rx),
+ (uint)nvec->rx->pos);
nvec_msg_free(nvec, nvec->rx);
nvec->state = 0;
@@ -508,8 +510,10 @@ static void nvec_rx_completed(struct nvec_chip *nvec)
spin_lock(&nvec->rx_lock);
- /* add the received data to the work list
- and move the ring buffer pointer to the next entry */
+ /*
+ * Add the received data to the work list and move the ring buffer
+ * pointer to the next entry.
+ */
list_add_tail(&nvec->rx->node, &nvec->rx_data);
spin_unlock(&nvec->rx_lock);
@@ -567,6 +571,22 @@ static void nvec_tx_set(struct nvec_chip *nvec)
}
/**
+ * tegra_i2c_writel - safely write to an I2C client controller register
+ * @val: value to be written
+ * @reg: register to write to
+ *
+ * A write to an I2C controller register needs to be read back to make sure
+ * that the value has arrived.
+ */
+static void tegra_i2c_writel(u32 val, void *reg)
+{
+ writel_relaxed(val, reg);
+
+ /* read back register to make sure that register writes completed */
+ readl_relaxed(reg);
+}
+
+/**
* nvec_interrupt - Interrupt handler
* @irq: The IRQ
* @dev: The nvec device
@@ -588,7 +608,7 @@ static irqreturn_t nvec_interrupt(int irq, void *dev)
/* Filter out some errors */
if ((status & irq_mask) == 0 && (status & ~irq_mask) != 0) {
- dev_err(nvec->dev, "unexpected irq mask %lx\n", status);
+ dev_err(nvec->dev, "Unexpected irq mask %lx\n", status);
return IRQ_HANDLED;
}
if ((status & I2C_SL_IRQ) == 0) {
@@ -600,7 +620,7 @@ static irqreturn_t nvec_interrupt(int irq, void *dev)
if ((status & RNW) == 0) {
received = readl(nvec->base + I2C_SL_RCVD);
if (status & RCVD)
- writel(0, nvec->base + I2C_SL_RCVD);
+ tegra_i2c_writel(0, nvec->base + I2C_SL_RCVD);
}
if (status == (I2C_SL_IRQ | RCVD))
@@ -611,13 +631,13 @@ static irqreturn_t nvec_interrupt(int irq, void *dev)
if (status != (I2C_SL_IRQ | RCVD))
nvec_invalid_flags(nvec, status, false);
break;
- case 1: /* command byte */
+ case 1: /* Command byte */
if (status != I2C_SL_IRQ) {
nvec_invalid_flags(nvec, status, true);
} else {
nvec->rx = nvec_msg_alloc(nvec, NVEC_MSG_RX);
/* Should not happen in a normal world */
- if (unlikely(nvec->rx == NULL)) {
+ if (unlikely(!nvec->rx)) {
nvec->state = 0;
break;
}
@@ -638,11 +658,9 @@ static irqreturn_t nvec_interrupt(int irq, void *dev)
nvec_msg_free(nvec, nvec->rx);
nvec->state = 3;
nvec_tx_set(nvec);
- BUG_ON(nvec->tx->size < 1);
to_send = nvec->tx->data[0];
nvec->tx->pos = 1;
} else if (status == (I2C_SL_IRQ)) {
- BUG_ON(nvec->rx == NULL);
nvec->rx->data[1] = received;
nvec->rx->pos = 2;
nvec->state = 4;
@@ -658,10 +676,11 @@ static irqreturn_t nvec_interrupt(int irq, void *dev)
} else if (nvec->tx && nvec->tx->pos < nvec->tx->size) {
to_send = nvec->tx->data[nvec->tx->pos++];
} else {
- dev_err(nvec->dev, "tx buffer underflow on %p (%u > %u)\n",
+ dev_err(nvec->dev,
+ "tx buffer underflow on %p (%u > %u)\n",
nvec->tx,
- (uint) (nvec->tx ? nvec->tx->pos : 0),
- (uint) (nvec->tx ? nvec->tx->size : 0));
+ (uint)(nvec->tx ? nvec->tx->pos : 0),
+ (uint)(nvec->tx ? nvec->tx->size : 0));
nvec->state = 0;
}
break;
@@ -686,14 +705,14 @@ static irqreturn_t nvec_interrupt(int irq, void *dev)
if ((status & (RCVD | RNW)) == RCVD) {
if (received != nvec->i2c_addr)
dev_err(nvec->dev,
- "received address 0x%02x, expected 0x%02x\n",
- received, nvec->i2c_addr);
+ "received address 0x%02x, expected 0x%02x\n",
+ received, nvec->i2c_addr);
nvec->state = 1;
}
/* Send data if requested, but not on end of transmission */
if ((status & (RNW | END_TRANS)) == RNW)
- writel(to_send, nvec->base + I2C_SL_RCVD);
+ tegra_i2c_writel(to_send, nvec->base + I2C_SL_RCVD);
/* If we have send the first byte */
if (status == (I2C_SL_IRQ | RNW | RCVD))
@@ -710,15 +729,6 @@ static irqreturn_t nvec_interrupt(int irq, void *dev)
status & RCVD ? " RCVD" : "",
status & RNW ? " RNW" : "");
-
- /*
- * TODO: A correct fix needs to be found for this.
- *
- * We experience less incomplete messages with this delay than without
- * it, but we don't know why. Help is appreciated.
- */
- udelay(100);
-
return IRQ_HANDLED;
}
@@ -734,15 +744,15 @@ static void tegra_init_i2c_slave(struct nvec_chip *nvec)
val = I2C_CNFG_NEW_MASTER_SFM | I2C_CNFG_PACKET_MODE_EN |
(0x2 << I2C_CNFG_DEBOUNCE_CNT_SHIFT);
- writel(val, nvec->base + I2C_CNFG);
+ tegra_i2c_writel(val, nvec->base + I2C_CNFG);
clk_set_rate(nvec->i2c_clk, 8 * 80000);
- writel(I2C_SL_NEWSL, nvec->base + I2C_SL_CNFG);
- writel(0x1E, nvec->base + I2C_SL_DELAY_COUNT);
+ tegra_i2c_writel(I2C_SL_NEWSL, nvec->base + I2C_SL_CNFG);
+ tegra_i2c_writel(0x1E, nvec->base + I2C_SL_DELAY_COUNT);
- writel(nvec->i2c_addr>>1, nvec->base + I2C_SL_ADDR1);
- writel(0, nvec->base + I2C_SL_ADDR2);
+ tegra_i2c_writel(nvec->i2c_addr >> 1, nvec->base + I2C_SL_ADDR1);
+ tegra_i2c_writel(0, nvec->base + I2C_SL_ADDR2);
enable_irq(nvec->irq);
}
@@ -751,7 +761,7 @@ static void tegra_init_i2c_slave(struct nvec_chip *nvec)
static void nvec_disable_i2c_slave(struct nvec_chip *nvec)
{
disable_irq(nvec->irq);
- writel(I2C_SL_NEWSL | I2C_SL_NACK, nvec->base + I2C_SL_CNFG);
+ tegra_i2c_writel(I2C_SL_NEWSL | I2C_SL_NACK, nvec->base + I2C_SL_CNFG);
clk_disable_unprepare(nvec->i2c_clk);
}
#endif
@@ -764,75 +774,52 @@ static void nvec_power_off(void)
nvec_write_async(nvec_power_handle, ap_pwr_down, 2);
}
-/*
- * Parse common device tree data
- */
-static int nvec_i2c_parse_dt_pdata(struct nvec_chip *nvec)
-{
- nvec->gpio = of_get_named_gpio(nvec->dev->of_node, "request-gpios", 0);
-
- if (nvec->gpio < 0) {
- dev_err(nvec->dev, "no gpio specified");
- return -ENODEV;
- }
-
- if (of_property_read_u32(nvec->dev->of_node, "slave-addr",
- &nvec->i2c_addr)) {
- dev_err(nvec->dev, "no i2c address specified");
- return -ENODEV;
- }
-
- return 0;
-}
-
static int tegra_nvec_probe(struct platform_device *pdev)
{
int err, ret;
struct clk *i2c_clk;
+ struct device *dev = &pdev->dev;
struct nvec_chip *nvec;
struct nvec_msg *msg;
- struct resource *res;
void __iomem *base;
char get_firmware_version[] = { NVEC_CNTL, GET_FIRMWARE_VERSION },
unmute_speakers[] = { NVEC_OEM0, 0x10, 0x59, 0x95 },
enable_event[7] = { NVEC_SYS, CNF_EVENT_REPORTING, true };
- if (!pdev->dev.of_node) {
- dev_err(&pdev->dev, "must be instantiated using device tree\n");
+ if (!dev->of_node) {
+ dev_err(dev, "must be instantiated using device tree\n");
return -ENODEV;
}
- nvec = devm_kzalloc(&pdev->dev, sizeof(struct nvec_chip), GFP_KERNEL);
+ nvec = devm_kzalloc(dev, sizeof(struct nvec_chip), GFP_KERNEL);
if (!nvec)
return -ENOMEM;
platform_set_drvdata(pdev, nvec);
- nvec->dev = &pdev->dev;
+ nvec->dev = dev;
- err = nvec_i2c_parse_dt_pdata(nvec);
- if (err < 0)
- return err;
+ if (of_property_read_u32(dev->of_node, "slave-addr", &nvec->i2c_addr)) {
+ dev_err(dev, "no i2c address specified");
+ return -ENODEV;
+ }
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- base = devm_ioremap_resource(&pdev->dev, res);
+ base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
return PTR_ERR(base);
nvec->irq = platform_get_irq(pdev, 0);
- if (nvec->irq < 0) {
- dev_err(&pdev->dev, "no irq resource?\n");
+ if (nvec->irq < 0)
return -ENODEV;
- }
- i2c_clk = devm_clk_get(&pdev->dev, "div-clk");
+ i2c_clk = devm_clk_get(dev, "div-clk");
if (IS_ERR(i2c_clk)) {
- dev_err(nvec->dev, "failed to get controller clock\n");
+ dev_err(dev, "failed to get controller clock\n");
return -ENODEV;
}
- nvec->rst = devm_reset_control_get(&pdev->dev, "i2c");
+ nvec->rst = devm_reset_control_get_exclusive(dev, "i2c");
if (IS_ERR(nvec->rst)) {
- dev_err(nvec->dev, "failed to get controller reset\n");
+ dev_err(dev, "failed to get controller reset\n");
return PTR_ERR(nvec->rst);
}
@@ -852,20 +839,18 @@ static int tegra_nvec_probe(struct platform_device *pdev)
INIT_WORK(&nvec->rx_work, nvec_dispatch);
INIT_WORK(&nvec->tx_work, nvec_request_master);
- err = devm_gpio_request_one(&pdev->dev, nvec->gpio, GPIOF_OUT_INIT_HIGH,
- "nvec gpio");
- if (err < 0) {
- dev_err(nvec->dev, "couldn't request gpio\n");
- return -ENODEV;
+ nvec->gpiod = devm_gpiod_get(dev, "request", GPIOD_OUT_HIGH);
+ if (IS_ERR(nvec->gpiod)) {
+ dev_err(dev, "couldn't request gpio\n");
+ return PTR_ERR(nvec->gpiod);
}
- err = devm_request_irq(&pdev->dev, nvec->irq, nvec_interrupt, 0,
- "nvec", nvec);
+ err = devm_request_irq(dev, nvec->irq, nvec_interrupt, IRQF_NO_AUTOEN,
+ "nvec", nvec);
if (err) {
- dev_err(nvec->dev, "couldn't request irq\n");
+ dev_err(dev, "couldn't request irq\n");
return -ENODEV;
}
- disable_irq(nvec->irq);
tegra_init_i2c_slave(nvec);
@@ -879,19 +864,21 @@ static int tegra_nvec_probe(struct platform_device *pdev)
pm_power_off = nvec_power_off;
/* Get Firmware Version */
- msg = nvec_write_sync(nvec, get_firmware_version, 2);
+ err = nvec_write_sync(nvec, get_firmware_version, 2, &msg);
- if (msg) {
- dev_warn(nvec->dev, "ec firmware version %02x.%02x.%02x / %02x\n",
- msg->data[4], msg->data[5], msg->data[6], msg->data[7]);
+ if (!err) {
+ dev_warn(dev,
+ "ec firmware version %02x.%02x.%02x / %02x\n",
+ msg->data[4], msg->data[5],
+ msg->data[6], msg->data[7]);
nvec_msg_free(nvec, msg);
}
- ret = mfd_add_devices(nvec->dev, 0, nvec_devices,
+ ret = mfd_add_devices(dev, 0, nvec_devices,
ARRAY_SIZE(nvec_devices), NULL, 0, NULL);
if (ret)
- dev_err(nvec->dev, "error adding subdevices\n");
+ dev_err(dev, "error adding subdevices\n");
/* unmute speakers? */
nvec_write_async(nvec, unmute_speakers, 4);
@@ -907,7 +894,7 @@ static int tegra_nvec_probe(struct platform_device *pdev)
return 0;
}
-static int tegra_nvec_remove(struct platform_device *pdev)
+static void tegra_nvec_remove(struct platform_device *pdev)
{
struct nvec_chip *nvec = platform_get_drvdata(pdev);
@@ -918,15 +905,13 @@ static int tegra_nvec_remove(struct platform_device *pdev)
cancel_work_sync(&nvec->tx_work);
/* FIXME: needs check whether nvec is responsible for power off */
pm_power_off = NULL;
-
- return 0;
}
#ifdef CONFIG_PM_SLEEP
static int nvec_suspend(struct device *dev)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct nvec_chip *nvec = platform_get_drvdata(pdev);
+ int err;
+ struct nvec_chip *nvec = dev_get_drvdata(dev);
struct nvec_msg *msg;
char ap_suspend[] = { NVEC_SLEEP, AP_SUSPEND };
@@ -935,8 +920,9 @@ static int nvec_suspend(struct device *dev)
/* keep these sync or you'll break suspend */
nvec_toggle_global_events(nvec, false);
- msg = nvec_write_sync(nvec, ap_suspend, sizeof(ap_suspend));
- nvec_msg_free(nvec, msg);
+ err = nvec_write_sync(nvec, ap_suspend, sizeof(ap_suspend), &msg);
+ if (!err)
+ nvec_msg_free(nvec, msg);
nvec_disable_i2c_slave(nvec);
@@ -945,8 +931,7 @@ static int nvec_suspend(struct device *dev)
static int nvec_resume(struct device *dev)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct nvec_chip *nvec = platform_get_drvdata(pdev);
+ struct nvec_chip *nvec = dev_get_drvdata(dev);
dev_dbg(nvec->dev, "resuming\n");
tegra_init_i2c_slave(nvec);
@@ -967,7 +952,7 @@ MODULE_DEVICE_TABLE(of, nvidia_nvec_of_match);
static struct platform_driver nvec_device_driver = {
.probe = tegra_nvec_probe,
- .remove = tegra_nvec_remove,
+ .remove = tegra_nvec_remove,
.driver = {
.name = "nvec",
.pm = &nvec_pm_ops,
diff --git a/drivers/staging/nvec/nvec.h b/drivers/staging/nvec/nvec.h
index 2ec9de906ca3..80c0353f141c 100644
--- a/drivers/staging/nvec/nvec.h
+++ b/drivers/staging/nvec/nvec.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* NVEC: NVIDIA compliant embedded controller interface
*
@@ -7,11 +8,6 @@
* Ilya Petrov <ilya.muromec@gmail.com>
* Marc Dietrich <marvin24@gmx.de>
* Julian Andres Klode <jak@jak-linux.org>
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
*/
#ifndef __LINUX_MFD_NVEC
@@ -136,9 +132,9 @@ struct nvec_msg {
*/
struct nvec_chip {
struct device *dev;
- int gpio;
+ struct gpio_desc *gpiod;
int irq;
- int i2c_addr;
+ u32 i2c_addr;
void __iomem *base;
struct clk *i2c_clk;
struct reset_control *rst;
@@ -168,8 +164,9 @@ struct nvec_chip {
int nvec_write_async(struct nvec_chip *nvec, const unsigned char *data,
short size);
-struct nvec_msg *nvec_write_sync(struct nvec_chip *nvec,
- const unsigned char *data, short size);
+int nvec_write_sync(struct nvec_chip *nvec,
+ const unsigned char *data, short size,
+ struct nvec_msg **msg);
int nvec_register_notifier(struct nvec_chip *nvec,
struct notifier_block *nb,
diff --git a/drivers/staging/nvec/nvec_kbd.c b/drivers/staging/nvec/nvec_kbd.c
index e881e6b26a4c..d2b91318f151 100644
--- a/drivers/staging/nvec/nvec_kbd.c
+++ b/drivers/staging/nvec/nvec_kbd.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* nvec_kbd: keyboard driver for a NVIDIA compliant embedded controller
*
@@ -5,11 +6,6 @@
*
* Authors: Pierre-Hugues Husson <phhusson@free.fr>
* Marc Dietrich <marvin24@gmx.de>
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
*/
#include <linux/module.h>
@@ -58,7 +54,7 @@ static int nvec_keys_notifier(struct notifier_block *nb,
unsigned long event_type, void *data)
{
int code, state;
- unsigned char *msg = (unsigned char *)data;
+ unsigned char *msg = data;
if (event_type == NVEC_KB_EVT) {
int _size = (msg[0] & (3 << 5)) >> 5;
@@ -127,6 +123,8 @@ static int nvec_kbd_probe(struct platform_device *pdev)
keycodes[j++] = extcode_tab_us102[i];
idev = devm_input_allocate_device(&pdev->dev);
+ if (!idev)
+ return -ENOMEM;
idev->name = "nvec keyboard";
idev->phys = "nvec";
idev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_REP) | BIT_MASK(EV_LED);
@@ -150,20 +148,21 @@ static int nvec_kbd_probe(struct platform_device *pdev)
nvec_register_notifier(nvec, &keys_dev.notifier, 0);
/* Enable keyboard */
- nvec_write_async(nvec, enable_kbd, 2);
+ nvec_write_sync(nvec, enable_kbd, 2, NULL);
/* configures wake on special keys */
- nvec_write_async(nvec, cnfg_wake, 4);
+ nvec_write_sync(nvec, cnfg_wake, 4, NULL);
+
/* enable wake key reporting */
- nvec_write_async(nvec, cnfg_wake_key_reporting, 3);
+ nvec_write_sync(nvec, cnfg_wake_key_reporting, 3, NULL);
/* Disable caps lock LED */
- nvec_write_async(nvec, clear_leds, sizeof(clear_leds));
+ nvec_write_sync(nvec, clear_leds, sizeof(clear_leds), NULL);
return 0;
}
-static int nvec_kbd_remove(struct platform_device *pdev)
+static void nvec_kbd_remove(struct platform_device *pdev)
{
struct nvec_chip *nvec = dev_get_drvdata(pdev->dev.parent);
char disable_kbd[] = { NVEC_KBD, DISABLE_KBD },
@@ -172,8 +171,6 @@ static int nvec_kbd_remove(struct platform_device *pdev)
nvec_write_async(nvec, uncnfg_wake_key_reporting, 3);
nvec_write_async(nvec, disable_kbd, 2);
nvec_unregister_notifier(nvec, &keys_dev.notifier);
-
- return 0;
}
static struct platform_driver nvec_kbd_driver = {
diff --git a/drivers/staging/nvec/nvec_paz00.c b/drivers/staging/nvec/nvec_paz00.c
index 68146bfee2b3..55d59840fca4 100644
--- a/drivers/staging/nvec/nvec_paz00.c
+++ b/drivers/staging/nvec/nvec_paz00.c
@@ -1,14 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* nvec_paz00: OEM specific driver for Compal PAZ00 based devices
*
* Copyright (C) 2011 The AC100 Kernel Team <ac100@lists.launchpad.net>
*
* Authors: Ilya Petrov <ilya.muromec@gmail.com>
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
*/
#include <linux/module.h>
@@ -18,9 +14,6 @@
#include <linux/platform_device.h>
#include "nvec.h"
-#define to_nvec_led(led_cdev) \
- container_of(led_cdev, struct nvec_led, cdev)
-
#define NVEC_LED_REQ {'\x0d', '\x10', '\x45', '\x10', '\x00'}
#define NVEC_LED_MAX 8
@@ -33,7 +26,7 @@ struct nvec_led {
static void nvec_led_brightness_set(struct led_classdev *led_cdev,
enum led_brightness value)
{
- struct nvec_led *led = to_nvec_led(led_cdev);
+ struct nvec_led *led = container_of(led_cdev, struct nvec_led, cdev);
unsigned char buf[] = NVEC_LED_REQ;
buf[4] = value;
@@ -41,7 +34,6 @@ static void nvec_led_brightness_set(struct led_classdev *led_cdev,
nvec_write_async(led->nvec, buf, sizeof(buf));
led->cdev.brightness = value;
-
}
static int nvec_paz00_probe(struct platform_device *pdev)
@@ -63,7 +55,7 @@ static int nvec_paz00_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, led);
- ret = led_classdev_register(&pdev->dev, &led->cdev);
+ ret = devm_led_classdev_register(&pdev->dev, &led->cdev);
if (ret < 0)
return ret;
@@ -73,18 +65,8 @@ static int nvec_paz00_probe(struct platform_device *pdev)
return 0;
}
-static int nvec_paz00_remove(struct platform_device *pdev)
-{
- struct nvec_led *led = platform_get_drvdata(pdev);
-
- led_classdev_unregister(&led->cdev);
-
- return 0;
-}
-
static struct platform_driver nvec_paz00_driver = {
.probe = nvec_paz00_probe,
- .remove = nvec_paz00_remove,
.driver = {
.name = "nvec-paz00",
},
diff --git a/drivers/staging/nvec/nvec_power.c b/drivers/staging/nvec/nvec_power.c
index 04a7402ae2df..2faab9fdedef 100644
--- a/drivers/staging/nvec/nvec_power.c
+++ b/drivers/staging/nvec/nvec_power.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* nvec_power: power supply driver for a NVIDIA compliant embedded controller
*
@@ -5,11 +6,6 @@
*
* Authors: Ilya Petrov <ilya.muromec@gmail.com>
* Marc Dietrich <marvin24@gmx.de>
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
*/
#include <linux/module.h>
@@ -90,7 +86,7 @@ static int nvec_power_notifier(struct notifier_block *nb,
{
struct nvec_power *power =
container_of(nb, struct nvec_power, notifier);
- struct bat_response *res = (struct bat_response *)data;
+ struct bat_response *res = data;
if (event_type != NVEC_SYS)
return NOTIFY_DONE;
@@ -126,7 +122,7 @@ static int nvec_power_bat_notifier(struct notifier_block *nb,
{
struct nvec_power *power =
container_of(nb, struct nvec_power, notifier);
- struct bat_response *res = (struct bat_response *)data;
+ struct bat_response *res = data;
int status_changed = 0;
if (event_type != NVEC_BAT)
@@ -198,7 +194,7 @@ static int nvec_power_bat_notifier(struct notifier_block *nb,
break;
case MANUFACTURER:
memcpy(power->bat_manu, &res->plc, res->length - 2);
- power->bat_model[res->length - 2] = '\0';
+ power->bat_manu[res->length - 2] = '\0';
break;
case MODEL:
memcpy(power->bat_model, &res->plc, res->length - 2);
@@ -207,8 +203,10 @@ static int nvec_power_bat_notifier(struct notifier_block *nb,
case TYPE:
memcpy(power->bat_type, &res->plc, res->length - 2);
power->bat_type[res->length - 2] = '\0';
- /* this differs a little from the spec
- fill in more if you find some */
+ /*
+ * This differs a little from the spec fill in more if you find
+ * some.
+ */
if (!strncmp(power->bat_type, "Li", 30))
power->bat_type_enum = POWER_SUPPLY_TECHNOLOGY_LION;
else
@@ -340,7 +338,7 @@ static const struct power_supply_desc nvec_psy_desc = {
};
static int counter;
-static int const bat_iter[] = {
+static const int bat_iter[] = {
SLOT_STATUS, VOLTAGE, CURRENT, CAPACITY_REMAINING,
#ifdef EC_FULL_DIAG
AVERAGE_CURRENT, TEMPERATURE, TIME_REMAINING,
@@ -356,12 +354,14 @@ static void nvec_power_poll(struct work_struct *work)
if (counter >= ARRAY_SIZE(bat_iter))
counter = 0;
-/* AC status via sys req */
+ /* AC status via sys req */
nvec_write_async(power->nvec, buf, 2);
msleep(100);
-/* select a battery request function via round robin
- doing it all at once seems to overload the power supply */
+ /*
+ * Select a battery request function via round robin doing it all at
+ * once seems to overload the power supply.
+ */
buf[0] = NVEC_BAT;
buf[1] = bat_iter[counter++];
nvec_write_async(power->nvec, buf, 2);
@@ -416,7 +416,7 @@ static int nvec_power_probe(struct platform_device *pdev)
return PTR_ERR_OR_ZERO(*psy);
}
-static int nvec_power_remove(struct platform_device *pdev)
+static void nvec_power_remove(struct platform_device *pdev)
{
struct nvec_power *power = platform_get_drvdata(pdev);
@@ -429,8 +429,6 @@ static int nvec_power_remove(struct platform_device *pdev)
case BAT:
power_supply_unregister(nvec_bat_psy);
}
-
- return 0;
}
static struct platform_driver nvec_power_driver = {
@@ -438,7 +436,7 @@ static struct platform_driver nvec_power_driver = {
.remove = nvec_power_remove,
.driver = {
.name = "nvec-power",
- }
+ }
};
module_platform_driver(nvec_power_driver);
diff --git a/drivers/staging/nvec/nvec_ps2.c b/drivers/staging/nvec/nvec_ps2.c
index 0922dd3a08d3..2db57795ea2f 100644
--- a/drivers/staging/nvec/nvec_ps2.c
+++ b/drivers/staging/nvec/nvec_ps2.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* nvec_ps2: mouse driver for a NVIDIA compliant embedded controller
*
@@ -6,11 +7,6 @@
* Authors: Pierre-Hugues Husson <phhusson@free.fr>
* Ilya Petrov <ilya.muromec@gmail.com>
* Marc Dietrich <marvin24@gmx.de>
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
*/
#include <linux/module.h>
@@ -27,14 +23,6 @@
#define DISABLE_MOUSE 0xf5
#define PSMOUSE_RST 0xff
-#ifdef NVEC_PS2_DEBUG
-#define NVEC_PHD(str, buf, len) \
- print_hex_dump(KERN_DEBUG, str, DUMP_PREFIX_NONE, \
- 16, 1, buf, len, false)
-#else
-#define NVEC_PHD(str, buf, len)
-#endif
-
enum ps2_subcmds {
SEND_COMMAND = 1,
RECEIVE_N,
@@ -64,61 +52,67 @@ static void ps2_stopstreaming(struct serio *ser_dev)
nvec_write_async(ps2_dev.nvec, buf, sizeof(buf));
}
-static int ps2_sendcommand(struct serio *ser_dev, unsigned char cmd)
-{
- unsigned char buf[] = { NVEC_PS2, SEND_COMMAND, ENABLE_MOUSE, 1 };
-
- buf[2] = cmd & 0xff;
-
- dev_dbg(&ser_dev->dev, "Sending ps2 cmd %02x\n", cmd);
- return nvec_write_async(ps2_dev.nvec, buf, sizeof(buf));
-}
-
static int nvec_ps2_notifier(struct notifier_block *nb,
unsigned long event_type, void *data)
{
int i;
- unsigned char *msg = (unsigned char *)data;
+ unsigned char *msg = data;
switch (event_type) {
case NVEC_PS2_EVT:
for (i = 0; i < msg[1]; i++)
serio_interrupt(ps2_dev.ser_dev, msg[2 + i], 0);
- NVEC_PHD("ps/2 mouse event: ", &msg[2], msg[1]);
return NOTIFY_STOP;
case NVEC_PS2:
if (msg[2] == 1) {
for (i = 0; i < (msg[1] - 2); i++)
serio_interrupt(ps2_dev.ser_dev, msg[i + 4], 0);
- NVEC_PHD("ps/2 mouse reply: ", &msg[4], msg[1] - 2);
}
- else if (msg[1] != 2) /* !ack */
- NVEC_PHD("unhandled mouse event: ", msg, msg[1] + 2);
return NOTIFY_STOP;
}
return NOTIFY_DONE;
}
+static int ps2_sendcommand(struct serio *ser_dev, unsigned char cmd)
+{
+ unsigned char buf[] = { NVEC_PS2, SEND_COMMAND, ENABLE_MOUSE, 1 };
+ struct nvec_msg *msg;
+ int ret;
+
+ buf[2] = cmd & 0xff;
+
+ dev_dbg(&ser_dev->dev, "Sending ps2 cmd %02x\n", cmd);
+
+ ret = nvec_write_sync(ps2_dev.nvec, buf, sizeof(buf), &msg);
+ if (ret < 0)
+ return ret;
+
+ nvec_ps2_notifier(NULL, NVEC_PS2, msg->data);
+
+ nvec_msg_free(ps2_dev.nvec, msg);
+
+ return 0;
+}
+
static int nvec_mouse_probe(struct platform_device *pdev)
{
struct nvec_chip *nvec = dev_get_drvdata(pdev->dev.parent);
struct serio *ser_dev;
- char mouse_reset[] = { NVEC_PS2, SEND_COMMAND, PSMOUSE_RST, 3 };
- ser_dev = devm_kzalloc(&pdev->dev, sizeof(struct serio), GFP_KERNEL);
+ ser_dev = kzalloc(sizeof(*ser_dev), GFP_KERNEL);
if (!ser_dev)
return -ENOMEM;
- ser_dev->id.type = SERIO_PS_PSTHRU;
+ ser_dev->id.type = SERIO_8042;
ser_dev->write = ps2_sendcommand;
ser_dev->start = ps2_startstreaming;
ser_dev->stop = ps2_stopstreaming;
- strlcpy(ser_dev->name, "nvec mouse", sizeof(ser_dev->name));
- strlcpy(ser_dev->phys, "nvec", sizeof(ser_dev->phys));
+ strscpy(ser_dev->name, "nvec mouse", sizeof(ser_dev->name));
+ strscpy(ser_dev->phys, "nvec", sizeof(ser_dev->phys));
ps2_dev.ser_dev = ser_dev;
ps2_dev.notifier.notifier_call = nvec_ps2_notifier;
@@ -127,13 +121,10 @@ static int nvec_mouse_probe(struct platform_device *pdev)
serio_register_port(ser_dev);
- /* mouse reset */
- nvec_write_async(nvec, mouse_reset, sizeof(mouse_reset));
-
return 0;
}
-static int nvec_mouse_remove(struct platform_device *pdev)
+static void nvec_mouse_remove(struct platform_device *pdev)
{
struct nvec_chip *nvec = dev_get_drvdata(pdev->dev.parent);
@@ -141,8 +132,6 @@ static int nvec_mouse_remove(struct platform_device *pdev)
ps2_stopstreaming(ps2_dev.ser_dev);
nvec_unregister_notifier(nvec, &ps2_dev.notifier);
serio_unregister_port(ps2_dev.ser_dev);
-
- return 0;
}
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/staging/octeon-usb/Kconfig b/drivers/staging/octeon-usb/Kconfig
deleted file mode 100644
index 16ea17ff3fd2..000000000000
--- a/drivers/staging/octeon-usb/Kconfig
+++ /dev/null
@@ -1,10 +0,0 @@
-config OCTEON_USB
- tristate "Cavium Networks Octeon USB support"
- depends on CAVIUM_OCTEON_SOC && USB
- help
- This driver supports USB host controller on some Cavium
- Networks' products in the Octeon family.
-
- To compile this driver as a module, choose M here. The module
- will be called octeon-usb.
-
diff --git a/drivers/staging/octeon-usb/Makefile b/drivers/staging/octeon-usb/Makefile
deleted file mode 100644
index 5588be395f2a..000000000000
--- a/drivers/staging/octeon-usb/Makefile
+++ /dev/null
@@ -1 +0,0 @@
-obj-${CONFIG_OCTEON_USB} := octeon-hcd.o
diff --git a/drivers/staging/octeon-usb/TODO b/drivers/staging/octeon-usb/TODO
deleted file mode 100644
index cc58a7e88baf..000000000000
--- a/drivers/staging/octeon-usb/TODO
+++ /dev/null
@@ -1,11 +0,0 @@
-This driver is functional and has been tested on EdgeRouter Lite with
-USB mass storage.
-
-TODO:
- - kernel coding style
- - checkpatch warnings
- - dead code elimination
- - device tree bindings
- - possibly eliminate the extra "hardware abstraction layer"
-
-Contact: Aaro Koskinen <aaro.koskinen@iki.fi>
diff --git a/drivers/staging/octeon-usb/octeon-hcd.c b/drivers/staging/octeon-usb/octeon-hcd.c
deleted file mode 100644
index 61940284311c..000000000000
--- a/drivers/staging/octeon-usb/octeon-hcd.c
+++ /dev/null
@@ -1,3777 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2008 Cavium Networks
- *
- * Some parts of the code were originally released under BSD license:
- *
- * Copyright (c) 2003-2010 Cavium Networks (support@cavium.com). All rights
- * reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- *
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials provided
- * with the distribution.
- *
- * * Neither the name of Cavium Networks nor the names of
- * its contributors may be used to endorse or promote products
- * derived from this software without specific prior written
- * permission.
- *
- * This Software, including technical data, may be subject to U.S. export
- * control laws, including the U.S. Export Administration Act and its associated
- * regulations, and may be subject to export or import regulations in other
- * countries.
- *
- * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
- * AND WITH ALL FAULTS AND CAVIUM NETWORKS MAKES NO PROMISES, REPRESENTATIONS OR
- * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
- * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION
- * OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
- * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
- * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
- * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
- * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
- * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
- */
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/pci.h>
-#include <linux/prefetch.h>
-#include <linux/interrupt.h>
-#include <linux/platform_device.h>
-#include <linux/usb.h>
-
-#include <linux/time.h>
-#include <linux/delay.h>
-
-#include <asm/octeon/cvmx.h>
-#include <asm/octeon/cvmx-iob-defs.h>
-
-#include <linux/usb/hcd.h>
-
-#include <linux/err.h>
-
-#include <asm/octeon/octeon.h>
-#include <asm/octeon/cvmx-helper.h>
-#include <asm/octeon/cvmx-sysinfo.h>
-#include <asm/octeon/cvmx-helper-board.h>
-
-#include "octeon-hcd.h"
-
-/**
- * enum cvmx_usb_speed - the possible USB device speeds
- *
- * @CVMX_USB_SPEED_HIGH: Device is operation at 480Mbps
- * @CVMX_USB_SPEED_FULL: Device is operation at 12Mbps
- * @CVMX_USB_SPEED_LOW: Device is operation at 1.5Mbps
- */
-enum cvmx_usb_speed {
- CVMX_USB_SPEED_HIGH = 0,
- CVMX_USB_SPEED_FULL = 1,
- CVMX_USB_SPEED_LOW = 2,
-};
-
-/**
- * enum cvmx_usb_transfer - the possible USB transfer types
- *
- * @CVMX_USB_TRANSFER_CONTROL: USB transfer type control for hub and status
- * transfers
- * @CVMX_USB_TRANSFER_ISOCHRONOUS: USB transfer type isochronous for low
- * priority periodic transfers
- * @CVMX_USB_TRANSFER_BULK: USB transfer type bulk for large low priority
- * transfers
- * @CVMX_USB_TRANSFER_INTERRUPT: USB transfer type interrupt for high priority
- * periodic transfers
- */
-enum cvmx_usb_transfer {
- CVMX_USB_TRANSFER_CONTROL = 0,
- CVMX_USB_TRANSFER_ISOCHRONOUS = 1,
- CVMX_USB_TRANSFER_BULK = 2,
- CVMX_USB_TRANSFER_INTERRUPT = 3,
-};
-
-/**
- * enum cvmx_usb_direction - the transfer directions
- *
- * @CVMX_USB_DIRECTION_OUT: Data is transferring from Octeon to the device/host
- * @CVMX_USB_DIRECTION_IN: Data is transferring from the device/host to Octeon
- */
-enum cvmx_usb_direction {
- CVMX_USB_DIRECTION_OUT,
- CVMX_USB_DIRECTION_IN,
-};
-
-/**
- * enum cvmx_usb_complete - possible callback function status codes
- *
- * @CVMX_USB_COMPLETE_SUCCESS: The transaction / operation finished without
- * any errors
- * @CVMX_USB_COMPLETE_SHORT: FIXME: This is currently not implemented
- * @CVMX_USB_COMPLETE_CANCEL: The transaction was canceled while in flight
- * by a user call to cvmx_usb_cancel
- * @CVMX_USB_COMPLETE_ERROR: The transaction aborted with an unexpected
- * error status
- * @CVMX_USB_COMPLETE_STALL: The transaction received a USB STALL response
- * from the device
- * @CVMX_USB_COMPLETE_XACTERR: The transaction failed with an error from the
- * device even after a number of retries
- * @CVMX_USB_COMPLETE_DATATGLERR: The transaction failed with a data toggle
- * error even after a number of retries
- * @CVMX_USB_COMPLETE_BABBLEERR: The transaction failed with a babble error
- * @CVMX_USB_COMPLETE_FRAMEERR: The transaction failed with a frame error
- * even after a number of retries
- */
-enum cvmx_usb_complete {
- CVMX_USB_COMPLETE_SUCCESS,
- CVMX_USB_COMPLETE_SHORT,
- CVMX_USB_COMPLETE_CANCEL,
- CVMX_USB_COMPLETE_ERROR,
- CVMX_USB_COMPLETE_STALL,
- CVMX_USB_COMPLETE_XACTERR,
- CVMX_USB_COMPLETE_DATATGLERR,
- CVMX_USB_COMPLETE_BABBLEERR,
- CVMX_USB_COMPLETE_FRAMEERR,
-};
-
-/**
- * struct cvmx_usb_port_status - the USB port status information
- *
- * @port_enabled: 1 = Usb port is enabled, 0 = disabled
- * @port_over_current: 1 = Over current detected, 0 = Over current not
- * detected. Octeon doesn't support over current detection.
- * @port_powered: 1 = Port power is being supplied to the device, 0 =
- * power is off. Octeon doesn't support turning port power
- * off.
- * @port_speed: Current port speed.
- * @connected: 1 = A device is connected to the port, 0 = No device is
- * connected.
- * @connect_change: 1 = Device connected state changed since the last set
- * status call.
- */
-struct cvmx_usb_port_status {
- uint32_t reserved : 25;
- uint32_t port_enabled : 1;
- uint32_t port_over_current : 1;
- uint32_t port_powered : 1;
- enum cvmx_usb_speed port_speed : 2;
- uint32_t connected : 1;
- uint32_t connect_change : 1;
-};
-
-/**
- * struct cvmx_usb_iso_packet - descriptor for Isochronous packets
- *
- * @offset: This is the offset in bytes into the main buffer where this data
- * is stored.
- * @length: This is the length in bytes of the data.
- * @status: This is the status of this individual packet transfer.
- */
-struct cvmx_usb_iso_packet {
- int offset;
- int length;
- enum cvmx_usb_complete status;
-};
-
-/**
- * enum cvmx_usb_initialize_flags - flags used by the initialization function
- *
- * @CVMX_USB_INITIALIZE_FLAGS_CLOCK_XO_XI: The USB port uses a 12MHz crystal
- * as clock source at USB_XO and
- * USB_XI.
- * @CVMX_USB_INITIALIZE_FLAGS_CLOCK_XO_GND: The USB port uses 12/24/48MHz 2.5V
- * board clock source at USB_XO.
- * USB_XI should be tied to GND.
- * @CVMX_USB_INITIALIZE_FLAGS_CLOCK_MHZ_MASK: Mask for clock speed field
- * @CVMX_USB_INITIALIZE_FLAGS_CLOCK_12MHZ: Speed of reference clock or
- * crystal
- * @CVMX_USB_INITIALIZE_FLAGS_CLOCK_24MHZ: Speed of reference clock
- * @CVMX_USB_INITIALIZE_FLAGS_CLOCK_48MHZ: Speed of reference clock
- * @CVMX_USB_INITIALIZE_FLAGS_NO_DMA: Disable DMA and used polled IO for
- * data transfer use for the USB
- */
-enum cvmx_usb_initialize_flags {
- CVMX_USB_INITIALIZE_FLAGS_CLOCK_XO_XI = 1 << 0,
- CVMX_USB_INITIALIZE_FLAGS_CLOCK_XO_GND = 1 << 1,
- CVMX_USB_INITIALIZE_FLAGS_CLOCK_MHZ_MASK = 3 << 3,
- CVMX_USB_INITIALIZE_FLAGS_CLOCK_12MHZ = 1 << 3,
- CVMX_USB_INITIALIZE_FLAGS_CLOCK_24MHZ = 2 << 3,
- CVMX_USB_INITIALIZE_FLAGS_CLOCK_48MHZ = 3 << 3,
- /* Bits 3-4 used to encode the clock frequency */
- CVMX_USB_INITIALIZE_FLAGS_NO_DMA = 1 << 5,
-};
-
-/**
- * enum cvmx_usb_pipe_flags - internal flags for a pipe.
- *
- * @CVMX_USB_PIPE_FLAGS_SCHEDULED: Used internally to determine if a pipe is
- * actively using hardware.
- * @CVMX_USB_PIPE_FLAGS_NEED_PING: Used internally to determine if a high speed
- * pipe is in the ping state.
- */
-enum cvmx_usb_pipe_flags {
- CVMX_USB_PIPE_FLAGS_SCHEDULED = 1 << 17,
- CVMX_USB_PIPE_FLAGS_NEED_PING = 1 << 18,
-};
-
-/* Maximum number of times to retry failed transactions */
-#define MAX_RETRIES 3
-
-/* Maximum number of hardware channels supported by the USB block */
-#define MAX_CHANNELS 8
-
-/*
- * The low level hardware can transfer a maximum of this number of bytes in each
- * transfer. The field is 19 bits wide
- */
-#define MAX_TRANSFER_BYTES ((1<<19)-1)
-
-/*
- * The low level hardware can transfer a maximum of this number of packets in
- * each transfer. The field is 10 bits wide
- */
-#define MAX_TRANSFER_PACKETS ((1<<10)-1)
-
-/**
- * Logical transactions may take numerous low level
- * transactions, especially when splits are concerned. This
- * enum represents all of the possible stages a transaction can
- * be in. Note that split completes are always even. This is so
- * the NAK handler can backup to the previous low level
- * transaction with a simple clearing of bit 0.
- */
-enum cvmx_usb_stage {
- CVMX_USB_STAGE_NON_CONTROL,
- CVMX_USB_STAGE_NON_CONTROL_SPLIT_COMPLETE,
- CVMX_USB_STAGE_SETUP,
- CVMX_USB_STAGE_SETUP_SPLIT_COMPLETE,
- CVMX_USB_STAGE_DATA,
- CVMX_USB_STAGE_DATA_SPLIT_COMPLETE,
- CVMX_USB_STAGE_STATUS,
- CVMX_USB_STAGE_STATUS_SPLIT_COMPLETE,
-};
-
-/**
- * struct cvmx_usb_transaction - describes each pending USB transaction
- * regardless of type. These are linked together
- * to form a list of pending requests for a pipe.
- *
- * @node: List node for transactions in the pipe.
- * @type: Type of transaction, duplicated of the pipe.
- * @flags: State flags for this transaction.
- * @buffer: User's physical buffer address to read/write.
- * @buffer_length: Size of the user's buffer in bytes.
- * @control_header: For control transactions, physical address of the 8
- * byte standard header.
- * @iso_start_frame: For ISO transactions, the starting frame number.
- * @iso_number_packets: For ISO transactions, the number of packets in the
- * request.
- * @iso_packets: For ISO transactions, the sub packets in the request.
- * @actual_bytes: Actual bytes transfer for this transaction.
- * @stage: For control transactions, the current stage.
- * @urb: URB.
- */
-struct cvmx_usb_transaction {
- struct list_head node;
- enum cvmx_usb_transfer type;
- uint64_t buffer;
- int buffer_length;
- uint64_t control_header;
- int iso_start_frame;
- int iso_number_packets;
- struct cvmx_usb_iso_packet *iso_packets;
- int xfersize;
- int pktcnt;
- int retries;
- int actual_bytes;
- enum cvmx_usb_stage stage;
- struct urb *urb;
-};
-
-/**
- * struct cvmx_usb_pipe - a pipe represents a virtual connection between Octeon
- * and some USB device. It contains a list of pending
- * request to the device.
- *
- * @node: List node for pipe list
- * @next: Pipe after this one in the list
- * @transactions: List of pending transactions
- * @interval: For periodic pipes, the interval between packets in
- * frames
- * @next_tx_frame: The next frame this pipe is allowed to transmit on
- * @flags: State flags for this pipe
- * @device_speed: Speed of device connected to this pipe
- * @transfer_type: Type of transaction supported by this pipe
- * @transfer_dir: IN or OUT. Ignored for Control
- * @multi_count: Max packet in a row for the device
- * @max_packet: The device's maximum packet size in bytes
- * @device_addr: USB device address at other end of pipe
- * @endpoint_num: USB endpoint number at other end of pipe
- * @hub_device_addr: Hub address this device is connected to
- * @hub_port: Hub port this device is connected to
- * @pid_toggle: This toggles between 0/1 on every packet send to track
- * the data pid needed
- * @channel: Hardware DMA channel for this pipe
- * @split_sc_frame: The low order bits of the frame number the split
- * complete should be sent on
- */
-struct cvmx_usb_pipe {
- struct list_head node;
- struct list_head transactions;
- uint64_t interval;
- uint64_t next_tx_frame;
- enum cvmx_usb_pipe_flags flags;
- enum cvmx_usb_speed device_speed;
- enum cvmx_usb_transfer transfer_type;
- enum cvmx_usb_direction transfer_dir;
- int multi_count;
- uint16_t max_packet;
- uint8_t device_addr;
- uint8_t endpoint_num;
- uint8_t hub_device_addr;
- uint8_t hub_port;
- uint8_t pid_toggle;
- uint8_t channel;
- int8_t split_sc_frame;
-};
-
-struct cvmx_usb_tx_fifo {
- struct {
- int channel;
- int size;
- uint64_t address;
- } entry[MAX_CHANNELS+1];
- int head;
- int tail;
-};
-
-/**
- * struct cvmx_usb_state - the state of the USB block
- *
- * init_flags: Flags passed to initialize.
- * index: Which USB block this is for.
- * idle_hardware_channels: Bit set for every idle hardware channel.
- * usbcx_hprt: Stored port status so we don't need to read a CSR to
- * determine splits.
- * pipe_for_channel: Map channels to pipes.
- * pipe: Storage for pipes.
- * indent: Used by debug output to indent functions.
- * port_status: Last port status used for change notification.
- * idle_pipes: List of open pipes that have no transactions.
- * active_pipes: Active pipes indexed by transfer type.
- * frame_number: Increments every SOF interrupt for time keeping.
- * active_split: Points to the current active split, or NULL.
- */
-struct cvmx_usb_state {
- int init_flags;
- int index;
- int idle_hardware_channels;
- union cvmx_usbcx_hprt usbcx_hprt;
- struct cvmx_usb_pipe *pipe_for_channel[MAX_CHANNELS];
- int indent;
- struct cvmx_usb_port_status port_status;
- struct list_head idle_pipes;
- struct list_head active_pipes[4];
- uint64_t frame_number;
- struct cvmx_usb_transaction *active_split;
- struct cvmx_usb_tx_fifo periodic;
- struct cvmx_usb_tx_fifo nonperiodic;
-};
-
-struct octeon_hcd {
- spinlock_t lock;
- struct cvmx_usb_state usb;
-};
-
-/* This macro spins on a register waiting for it to reach a condition. */
-#define CVMX_WAIT_FOR_FIELD32(address, _union, cond, timeout_usec) \
- ({int result; \
- do { \
- uint64_t done = cvmx_get_cycle() + (uint64_t)timeout_usec * \
- octeon_get_clock_rate() / 1000000; \
- union _union c; \
- \
- while (1) { \
- c.u32 = cvmx_usb_read_csr32(usb, address); \
- \
- if (cond) { \
- result = 0; \
- break; \
- } else if (cvmx_get_cycle() > done) { \
- result = -1; \
- break; \
- } else \
- cvmx_wait(100); \
- } \
- } while (0); \
- result; })
-
-/*
- * This macro logically sets a single field in a CSR. It does the sequence
- * read, modify, and write
- */
-#define USB_SET_FIELD32(address, _union, field, value) \
- do { \
- union _union c; \
- \
- c.u32 = cvmx_usb_read_csr32(usb, address); \
- c.s.field = value; \
- cvmx_usb_write_csr32(usb, address, c.u32); \
- } while (0)
-
-/* Returns the IO address to push/pop stuff data from the FIFOs */
-#define USB_FIFO_ADDRESS(channel, usb_index) \
- (CVMX_USBCX_GOTGCTL(usb_index) + ((channel)+1)*0x1000)
-
-/**
- * struct octeon_temp_buffer - a bounce buffer for USB transfers
- * @orig_buffer: the original buffer passed by the USB stack
- * @data: the newly allocated temporary buffer (excluding meta-data)
- *
- * Both the DMA engine and FIFO mode will always transfer full 32-bit words. If
- * the buffer is too short, we need to allocate a temporary one, and this struct
- * represents it.
- */
-struct octeon_temp_buffer {
- void *orig_buffer;
- u8 data[0];
-};
-
-static inline struct octeon_hcd *cvmx_usb_to_octeon(struct cvmx_usb_state *p)
-{
- return container_of(p, struct octeon_hcd, usb);
-}
-
-static inline struct usb_hcd *octeon_to_hcd(struct octeon_hcd *p)
-{
- return container_of((void *)p, struct usb_hcd, hcd_priv);
-}
-
-/**
- * octeon_alloc_temp_buffer - allocate a temporary buffer for USB transfer
- * (if needed)
- * @urb: URB.
- * @mem_flags: Memory allocation flags.
- *
- * This function allocates a temporary bounce buffer whenever it's needed
- * due to HW limitations.
- */
-static int octeon_alloc_temp_buffer(struct urb *urb, gfp_t mem_flags)
-{
- struct octeon_temp_buffer *temp;
-
- if (urb->num_sgs || urb->sg ||
- (urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP) ||
- !(urb->transfer_buffer_length % sizeof(u32)))
- return 0;
-
- temp = kmalloc(ALIGN(urb->transfer_buffer_length, sizeof(u32)) +
- sizeof(*temp), mem_flags);
- if (!temp)
- return -ENOMEM;
-
- temp->orig_buffer = urb->transfer_buffer;
- if (usb_urb_dir_out(urb))
- memcpy(temp->data, urb->transfer_buffer,
- urb->transfer_buffer_length);
- urb->transfer_buffer = temp->data;
- urb->transfer_flags |= URB_ALIGNED_TEMP_BUFFER;
-
- return 0;
-}
-
-/**
- * octeon_free_temp_buffer - free a temporary buffer used by USB transfers.
- * @urb: URB.
- *
- * Frees a buffer allocated by octeon_alloc_temp_buffer().
- */
-static void octeon_free_temp_buffer(struct urb *urb)
-{
- struct octeon_temp_buffer *temp;
- size_t length;
-
- if (!(urb->transfer_flags & URB_ALIGNED_TEMP_BUFFER))
- return;
-
- temp = container_of(urb->transfer_buffer, struct octeon_temp_buffer,
- data);
- if (usb_urb_dir_in(urb)) {
- if (usb_pipeisoc(urb->pipe))
- length = urb->transfer_buffer_length;
- else
- length = urb->actual_length;
-
- memcpy(temp->orig_buffer, urb->transfer_buffer, length);
- }
- urb->transfer_buffer = temp->orig_buffer;
- urb->transfer_flags &= ~URB_ALIGNED_TEMP_BUFFER;
- kfree(temp);
-}
-
-/**
- * octeon_map_urb_for_dma - Octeon-specific map_urb_for_dma().
- * @hcd: USB HCD structure.
- * @urb: URB.
- * @mem_flags: Memory allocation flags.
- */
-static int octeon_map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb,
- gfp_t mem_flags)
-{
- int ret;
-
- ret = octeon_alloc_temp_buffer(urb, mem_flags);
- if (ret)
- return ret;
-
- ret = usb_hcd_map_urb_for_dma(hcd, urb, mem_flags);
- if (ret)
- octeon_free_temp_buffer(urb);
-
- return ret;
-}
-
-/**
- * octeon_unmap_urb_for_dma - Octeon-specific unmap_urb_for_dma()
- * @hcd: USB HCD structure.
- * @urb: URB.
- */
-static void octeon_unmap_urb_for_dma(struct usb_hcd *hcd, struct urb *urb)
-{
- usb_hcd_unmap_urb_for_dma(hcd, urb);
- octeon_free_temp_buffer(urb);
-}
-
-/**
- * Read a USB 32bit CSR. It performs the necessary address swizzle
- * for 32bit CSRs and logs the value in a readable format if
- * debugging is on.
- *
- * @usb: USB block this access is for
- * @address: 64bit address to read
- *
- * Returns: Result of the read
- */
-static inline uint32_t cvmx_usb_read_csr32(struct cvmx_usb_state *usb,
- uint64_t address)
-{
- uint32_t result = cvmx_read64_uint32(address ^ 4);
- return result;
-}
-
-
-/**
- * Write a USB 32bit CSR. It performs the necessary address
- * swizzle for 32bit CSRs and logs the value in a readable format
- * if debugging is on.
- *
- * @usb: USB block this access is for
- * @address: 64bit address to write
- * @value: Value to write
- */
-static inline void cvmx_usb_write_csr32(struct cvmx_usb_state *usb,
- uint64_t address, uint32_t value)
-{
- cvmx_write64_uint32(address ^ 4, value);
- cvmx_read64_uint64(CVMX_USBNX_DMA0_INB_CHN0(usb->index));
-}
-
-/**
- * Return non zero if this pipe connects to a non HIGH speed
- * device through a high speed hub.
- *
- * @usb: USB block this access is for
- * @pipe: Pipe to check
- *
- * Returns: Non zero if we need to do split transactions
- */
-static inline int cvmx_usb_pipe_needs_split(struct cvmx_usb_state *usb,
- struct cvmx_usb_pipe *pipe)
-{
- return pipe->device_speed != CVMX_USB_SPEED_HIGH &&
- usb->usbcx_hprt.s.prtspd == CVMX_USB_SPEED_HIGH;
-}
-
-
-/**
- * Trivial utility function to return the correct PID for a pipe
- *
- * @pipe: pipe to check
- *
- * Returns: PID for pipe
- */
-static inline int cvmx_usb_get_data_pid(struct cvmx_usb_pipe *pipe)
-{
- if (pipe->pid_toggle)
- return 2; /* Data1 */
- return 0; /* Data0 */
-}
-
-static void cvmx_fifo_setup(struct cvmx_usb_state *usb)
-{
- union cvmx_usbcx_ghwcfg3 usbcx_ghwcfg3;
- union cvmx_usbcx_gnptxfsiz npsiz;
- union cvmx_usbcx_hptxfsiz psiz;
-
- usbcx_ghwcfg3.u32 = cvmx_usb_read_csr32(usb,
- CVMX_USBCX_GHWCFG3(usb->index));
-
- /*
- * Program the USBC_GRXFSIZ register to select the size of the receive
- * FIFO (25%).
- */
- USB_SET_FIELD32(CVMX_USBCX_GRXFSIZ(usb->index), cvmx_usbcx_grxfsiz,
- rxfdep, usbcx_ghwcfg3.s.dfifodepth / 4);
-
- /*
- * Program the USBC_GNPTXFSIZ register to select the size and the start
- * address of the non-periodic transmit FIFO for nonperiodic
- * transactions (50%).
- */
- npsiz.u32 = cvmx_usb_read_csr32(usb, CVMX_USBCX_GNPTXFSIZ(usb->index));
- npsiz.s.nptxfdep = usbcx_ghwcfg3.s.dfifodepth / 2;
- npsiz.s.nptxfstaddr = usbcx_ghwcfg3.s.dfifodepth / 4;
- cvmx_usb_write_csr32(usb, CVMX_USBCX_GNPTXFSIZ(usb->index), npsiz.u32);
-
- /*
- * Program the USBC_HPTXFSIZ register to select the size and start
- * address of the periodic transmit FIFO for periodic transactions
- * (25%).
- */
- psiz.u32 = cvmx_usb_read_csr32(usb, CVMX_USBCX_HPTXFSIZ(usb->index));
- psiz.s.ptxfsize = usbcx_ghwcfg3.s.dfifodepth / 4;
- psiz.s.ptxfstaddr = 3 * usbcx_ghwcfg3.s.dfifodepth / 4;
- cvmx_usb_write_csr32(usb, CVMX_USBCX_HPTXFSIZ(usb->index), psiz.u32);
-
- /* Flush all FIFOs */
- USB_SET_FIELD32(CVMX_USBCX_GRSTCTL(usb->index),
- cvmx_usbcx_grstctl, txfnum, 0x10);
- USB_SET_FIELD32(CVMX_USBCX_GRSTCTL(usb->index),
- cvmx_usbcx_grstctl, txfflsh, 1);
- CVMX_WAIT_FOR_FIELD32(CVMX_USBCX_GRSTCTL(usb->index),
- cvmx_usbcx_grstctl, c.s.txfflsh == 0, 100);
- USB_SET_FIELD32(CVMX_USBCX_GRSTCTL(usb->index),
- cvmx_usbcx_grstctl, rxfflsh, 1);
- CVMX_WAIT_FOR_FIELD32(CVMX_USBCX_GRSTCTL(usb->index),
- cvmx_usbcx_grstctl, c.s.rxfflsh == 0, 100);
-}
-
-/**
- * Shutdown a USB port after a call to cvmx_usb_initialize().
- * The port should be disabled with all pipes closed when this
- * function is called.
- *
- * @usb: USB device state populated by cvmx_usb_initialize().
- *
- * Returns: 0 or a negative error code.
- */
-static int cvmx_usb_shutdown(struct cvmx_usb_state *usb)
-{
- union cvmx_usbnx_clk_ctl usbn_clk_ctl;
-
- /* Make sure all pipes are closed */
- if (!list_empty(&usb->idle_pipes) ||
- !list_empty(&usb->active_pipes[CVMX_USB_TRANSFER_ISOCHRONOUS]) ||
- !list_empty(&usb->active_pipes[CVMX_USB_TRANSFER_INTERRUPT]) ||
- !list_empty(&usb->active_pipes[CVMX_USB_TRANSFER_CONTROL]) ||
- !list_empty(&usb->active_pipes[CVMX_USB_TRANSFER_BULK]))
- return -EBUSY;
-
- /* Disable the clocks and put them in power on reset */
- usbn_clk_ctl.u64 = cvmx_read64_uint64(CVMX_USBNX_CLK_CTL(usb->index));
- usbn_clk_ctl.s.enable = 1;
- usbn_clk_ctl.s.por = 1;
- usbn_clk_ctl.s.hclk_rst = 1;
- usbn_clk_ctl.s.prst = 0;
- usbn_clk_ctl.s.hrst = 0;
- cvmx_write64_uint64(CVMX_USBNX_CLK_CTL(usb->index), usbn_clk_ctl.u64);
- return 0;
-}
-
-/**
- * Initialize a USB port for use. This must be called before any
- * other access to the Octeon USB port is made. The port starts
- * off in the disabled state.
- *
- * @dev: Pointer to struct device for logging purposes.
- * @usb: Pointer to struct cvmx_usb_state.
- *
- * Returns: 0 or a negative error code.
- */
-static int cvmx_usb_initialize(struct device *dev,
- struct cvmx_usb_state *usb)
-{
- int channel;
- int divisor;
- int retries = 0;
- union cvmx_usbcx_hcfg usbcx_hcfg;
- union cvmx_usbnx_clk_ctl usbn_clk_ctl;
- union cvmx_usbcx_gintsts usbc_gintsts;
- union cvmx_usbcx_gahbcfg usbcx_gahbcfg;
- union cvmx_usbcx_gintmsk usbcx_gintmsk;
- union cvmx_usbcx_gusbcfg usbcx_gusbcfg;
- union cvmx_usbnx_usbp_ctl_status usbn_usbp_ctl_status;
-
-retry:
- /*
- * Power On Reset and PHY Initialization
- *
- * 1. Wait for DCOK to assert (nothing to do)
- *
- * 2a. Write USBN0/1_CLK_CTL[POR] = 1 and
- * USBN0/1_CLK_CTL[HRST,PRST,HCLK_RST] = 0
- */
- usbn_clk_ctl.u64 = cvmx_read64_uint64(CVMX_USBNX_CLK_CTL(usb->index));
- usbn_clk_ctl.s.por = 1;
- usbn_clk_ctl.s.hrst = 0;
- usbn_clk_ctl.s.prst = 0;
- usbn_clk_ctl.s.hclk_rst = 0;
- usbn_clk_ctl.s.enable = 0;
- /*
- * 2b. Select the USB reference clock/crystal parameters by writing
- * appropriate values to USBN0/1_CLK_CTL[P_C_SEL, P_RTYPE, P_COM_ON]
- */
- if (usb->init_flags & CVMX_USB_INITIALIZE_FLAGS_CLOCK_XO_GND) {
- /*
- * The USB port uses 12/24/48MHz 2.5V board clock
- * source at USB_XO. USB_XI should be tied to GND.
- * Most Octeon evaluation boards require this setting
- */
- if (OCTEON_IS_MODEL(OCTEON_CN3XXX) ||
- OCTEON_IS_MODEL(OCTEON_CN56XX) ||
- OCTEON_IS_MODEL(OCTEON_CN50XX))
- /* From CN56XX,CN50XX,CN31XX,CN30XX manuals */
- usbn_clk_ctl.s.p_rtype = 2; /* p_rclk=1 & p_xenbn=0 */
- else
- /* From CN52XX manual */
- usbn_clk_ctl.s.p_rtype = 1;
-
- switch (usb->init_flags &
- CVMX_USB_INITIALIZE_FLAGS_CLOCK_MHZ_MASK) {
- case CVMX_USB_INITIALIZE_FLAGS_CLOCK_12MHZ:
- usbn_clk_ctl.s.p_c_sel = 0;
- break;
- case CVMX_USB_INITIALIZE_FLAGS_CLOCK_24MHZ:
- usbn_clk_ctl.s.p_c_sel = 1;
- break;
- case CVMX_USB_INITIALIZE_FLAGS_CLOCK_48MHZ:
- usbn_clk_ctl.s.p_c_sel = 2;
- break;
- }
- } else {
- /*
- * The USB port uses a 12MHz crystal as clock source
- * at USB_XO and USB_XI
- */
- if (OCTEON_IS_MODEL(OCTEON_CN3XXX))
- /* From CN31XX,CN30XX manual */
- usbn_clk_ctl.s.p_rtype = 3; /* p_rclk=1 & p_xenbn=1 */
- else
- /* From CN56XX,CN52XX,CN50XX manuals. */
- usbn_clk_ctl.s.p_rtype = 0;
-
- usbn_clk_ctl.s.p_c_sel = 0;
- }
- /*
- * 2c. Select the HCLK via writing USBN0/1_CLK_CTL[DIVIDE, DIVIDE2] and
- * setting USBN0/1_CLK_CTL[ENABLE] = 1. Divide the core clock down
- * such that USB is as close as possible to 125Mhz
- */
- divisor = DIV_ROUND_UP(octeon_get_clock_rate(), 125000000);
- /* Lower than 4 doesn't seem to work properly */
- if (divisor < 4)
- divisor = 4;
- usbn_clk_ctl.s.divide = divisor;
- usbn_clk_ctl.s.divide2 = 0;
- cvmx_write64_uint64(CVMX_USBNX_CLK_CTL(usb->index), usbn_clk_ctl.u64);
-
- /* 2d. Write USBN0/1_CLK_CTL[HCLK_RST] = 1 */
- usbn_clk_ctl.s.hclk_rst = 1;
- cvmx_write64_uint64(CVMX_USBNX_CLK_CTL(usb->index), usbn_clk_ctl.u64);
- /* 2e. Wait 64 core-clock cycles for HCLK to stabilize */
- cvmx_wait(64);
- /*
- * 3. Program the power-on reset field in the USBN clock-control
- * register:
- * USBN_CLK_CTL[POR] = 0
- */
- usbn_clk_ctl.s.por = 0;
- cvmx_write64_uint64(CVMX_USBNX_CLK_CTL(usb->index), usbn_clk_ctl.u64);
- /* 4. Wait 1 ms for PHY clock to start */
- mdelay(1);
- /*
- * 5. Program the Reset input from automatic test equipment field in the
- * USBP control and status register:
- * USBN_USBP_CTL_STATUS[ATE_RESET] = 1
- */
- usbn_usbp_ctl_status.u64 =
- cvmx_read64_uint64(CVMX_USBNX_USBP_CTL_STATUS(usb->index));
- usbn_usbp_ctl_status.s.ate_reset = 1;
- cvmx_write64_uint64(CVMX_USBNX_USBP_CTL_STATUS(usb->index),
- usbn_usbp_ctl_status.u64);
- /* 6. Wait 10 cycles */
- cvmx_wait(10);
- /*
- * 7. Clear ATE_RESET field in the USBN clock-control register:
- * USBN_USBP_CTL_STATUS[ATE_RESET] = 0
- */
- usbn_usbp_ctl_status.s.ate_reset = 0;
- cvmx_write64_uint64(CVMX_USBNX_USBP_CTL_STATUS(usb->index),
- usbn_usbp_ctl_status.u64);
- /*
- * 8. Program the PHY reset field in the USBN clock-control register:
- * USBN_CLK_CTL[PRST] = 1
- */
- usbn_clk_ctl.s.prst = 1;
- cvmx_write64_uint64(CVMX_USBNX_CLK_CTL(usb->index), usbn_clk_ctl.u64);
- /*
- * 9. Program the USBP control and status register to select host or
- * device mode. USBN_USBP_CTL_STATUS[HST_MODE] = 0 for host, = 1 for
- * device
- */
- usbn_usbp_ctl_status.s.hst_mode = 0;
- cvmx_write64_uint64(CVMX_USBNX_USBP_CTL_STATUS(usb->index),
- usbn_usbp_ctl_status.u64);
- /* 10. Wait 1 us */
- udelay(1);
- /*
- * 11. Program the hreset_n field in the USBN clock-control register:
- * USBN_CLK_CTL[HRST] = 1
- */
- usbn_clk_ctl.s.hrst = 1;
- cvmx_write64_uint64(CVMX_USBNX_CLK_CTL(usb->index), usbn_clk_ctl.u64);
- /* 12. Proceed to USB core initialization */
- usbn_clk_ctl.s.enable = 1;
- cvmx_write64_uint64(CVMX_USBNX_CLK_CTL(usb->index), usbn_clk_ctl.u64);
- udelay(1);
-
- /*
- * USB Core Initialization
- *
- * 1. Read USBC_GHWCFG1, USBC_GHWCFG2, USBC_GHWCFG3, USBC_GHWCFG4 to
- * determine USB core configuration parameters.
- *
- * Nothing needed
- *
- * 2. Program the following fields in the global AHB configuration
- * register (USBC_GAHBCFG)
- * DMA mode, USBC_GAHBCFG[DMAEn]: 1 = DMA mode, 0 = slave mode
- * Burst length, USBC_GAHBCFG[HBSTLEN] = 0
- * Nonperiodic TxFIFO empty level (slave mode only),
- * USBC_GAHBCFG[NPTXFEMPLVL]
- * Periodic TxFIFO empty level (slave mode only),
- * USBC_GAHBCFG[PTXFEMPLVL]
- * Global interrupt mask, USBC_GAHBCFG[GLBLINTRMSK] = 1
- */
- usbcx_gahbcfg.u32 = 0;
- usbcx_gahbcfg.s.dmaen = !(usb->init_flags &
- CVMX_USB_INITIALIZE_FLAGS_NO_DMA);
- usbcx_gahbcfg.s.hbstlen = 0;
- usbcx_gahbcfg.s.nptxfemplvl = 1;
- usbcx_gahbcfg.s.ptxfemplvl = 1;
- usbcx_gahbcfg.s.glblintrmsk = 1;
- cvmx_usb_write_csr32(usb, CVMX_USBCX_GAHBCFG(usb->index),
- usbcx_gahbcfg.u32);
-
- /*
- * 3. Program the following fields in USBC_GUSBCFG register.
- * HS/FS timeout calibration, USBC_GUSBCFG[TOUTCAL] = 0
- * ULPI DDR select, USBC_GUSBCFG[DDRSEL] = 0
- * USB turnaround time, USBC_GUSBCFG[USBTRDTIM] = 0x5
- * PHY low-power clock select, USBC_GUSBCFG[PHYLPWRCLKSEL] = 0
- */
- usbcx_gusbcfg.u32 = cvmx_usb_read_csr32(usb,
- CVMX_USBCX_GUSBCFG(usb->index));
- usbcx_gusbcfg.s.toutcal = 0;
- usbcx_gusbcfg.s.ddrsel = 0;
- usbcx_gusbcfg.s.usbtrdtim = 0x5;
- usbcx_gusbcfg.s.phylpwrclksel = 0;
- cvmx_usb_write_csr32(usb, CVMX_USBCX_GUSBCFG(usb->index),
- usbcx_gusbcfg.u32);
-
- /*
- * 4. The software must unmask the following bits in the USBC_GINTMSK
- * register.
- * OTG interrupt mask, USBC_GINTMSK[OTGINTMSK] = 1
- * Mode mismatch interrupt mask, USBC_GINTMSK[MODEMISMSK] = 1
- */
- usbcx_gintmsk.u32 = cvmx_usb_read_csr32(usb,
- CVMX_USBCX_GINTMSK(usb->index));
- usbcx_gintmsk.s.otgintmsk = 1;
- usbcx_gintmsk.s.modemismsk = 1;
- usbcx_gintmsk.s.hchintmsk = 1;
- usbcx_gintmsk.s.sofmsk = 0;
- /* We need RX FIFO interrupts if we don't have DMA */
- if (usb->init_flags & CVMX_USB_INITIALIZE_FLAGS_NO_DMA)
- usbcx_gintmsk.s.rxflvlmsk = 1;
- cvmx_usb_write_csr32(usb, CVMX_USBCX_GINTMSK(usb->index),
- usbcx_gintmsk.u32);
-
- /*
- * Disable all channel interrupts. We'll enable them per channel later.
- */
- for (channel = 0; channel < 8; channel++)
- cvmx_usb_write_csr32(usb,
- CVMX_USBCX_HCINTMSKX(channel, usb->index),
- 0);
-
- /*
- * Host Port Initialization
- *
- * 1. Program the host-port interrupt-mask field to unmask,
- * USBC_GINTMSK[PRTINT] = 1
- */
- USB_SET_FIELD32(CVMX_USBCX_GINTMSK(usb->index),
- cvmx_usbcx_gintmsk, prtintmsk, 1);
- USB_SET_FIELD32(CVMX_USBCX_GINTMSK(usb->index),
- cvmx_usbcx_gintmsk, disconnintmsk, 1);
-
- /*
- * 2. Program the USBC_HCFG register to select full-speed host
- * or high-speed host.
- */
- usbcx_hcfg.u32 = cvmx_usb_read_csr32(usb, CVMX_USBCX_HCFG(usb->index));
- usbcx_hcfg.s.fslssupp = 0;
- usbcx_hcfg.s.fslspclksel = 0;
- cvmx_usb_write_csr32(usb, CVMX_USBCX_HCFG(usb->index), usbcx_hcfg.u32);
-
- cvmx_fifo_setup(usb);
-
- /*
- * If the controller is getting port events right after the reset, it
- * means the initialization failed. Try resetting the controller again
- * in such case. This is seen to happen after cold boot on DSR-1000N.
- */
- usbc_gintsts.u32 = cvmx_usb_read_csr32(usb,
- CVMX_USBCX_GINTSTS(usb->index));
- cvmx_usb_write_csr32(usb, CVMX_USBCX_GINTSTS(usb->index),
- usbc_gintsts.u32);
- dev_dbg(dev, "gintsts after reset: 0x%x\n", (int)usbc_gintsts.u32);
- if (!usbc_gintsts.s.disconnint && !usbc_gintsts.s.prtint)
- return 0;
- if (retries++ >= 5)
- return -EAGAIN;
- dev_info(dev, "controller reset failed (gintsts=0x%x) - retrying\n",
- (int)usbc_gintsts.u32);
- msleep(50);
- cvmx_usb_shutdown(usb);
- msleep(50);
- goto retry;
-}
-
-/**
- * Reset a USB port. After this call succeeds, the USB port is
- * online and servicing requests.
- *
- * @usb: USB device state populated by cvmx_usb_initialize().
- */
-static void cvmx_usb_reset_port(struct cvmx_usb_state *usb)
-{
- usb->usbcx_hprt.u32 = cvmx_usb_read_csr32(usb,
- CVMX_USBCX_HPRT(usb->index));
-
- /* Program the port reset bit to start the reset process */
- USB_SET_FIELD32(CVMX_USBCX_HPRT(usb->index), cvmx_usbcx_hprt,
- prtrst, 1);
-
- /*
- * Wait at least 50ms (high speed), or 10ms (full speed) for the reset
- * process to complete.
- */
- mdelay(50);
-
- /* Program the port reset bit to 0, USBC_HPRT[PRTRST] = 0 */
- USB_SET_FIELD32(CVMX_USBCX_HPRT(usb->index), cvmx_usbcx_hprt,
- prtrst, 0);
-
- /*
- * Read the port speed field to get the enumerated speed,
- * USBC_HPRT[PRTSPD].
- */
- usb->usbcx_hprt.u32 = cvmx_usb_read_csr32(usb,
- CVMX_USBCX_HPRT(usb->index));
-}
-
-
-/**
- * Disable a USB port. After this call the USB port will not
- * generate data transfers and will not generate events.
- * Transactions in process will fail and call their
- * associated callbacks.
- *
- * @usb: USB device state populated by cvmx_usb_initialize().
- *
- * Returns: 0 or a negative error code.
- */
-static int cvmx_usb_disable(struct cvmx_usb_state *usb)
-{
- /* Disable the port */
- USB_SET_FIELD32(CVMX_USBCX_HPRT(usb->index), cvmx_usbcx_hprt,
- prtena, 1);
- return 0;
-}
-
-
-/**
- * Get the current state of the USB port. Use this call to
- * determine if the usb port has anything connected, is enabled,
- * or has some sort of error condition. The return value of this
- * call has "changed" bits to signal of the value of some fields
- * have changed between calls.
- *
- * @usb: USB device state populated by cvmx_usb_initialize().
- *
- * Returns: Port status information
- */
-static struct cvmx_usb_port_status cvmx_usb_get_status(
- struct cvmx_usb_state *usb)
-{
- union cvmx_usbcx_hprt usbc_hprt;
- struct cvmx_usb_port_status result;
-
- memset(&result, 0, sizeof(result));
-
- usbc_hprt.u32 = cvmx_usb_read_csr32(usb, CVMX_USBCX_HPRT(usb->index));
- result.port_enabled = usbc_hprt.s.prtena;
- result.port_over_current = usbc_hprt.s.prtovrcurract;
- result.port_powered = usbc_hprt.s.prtpwr;
- result.port_speed = usbc_hprt.s.prtspd;
- result.connected = usbc_hprt.s.prtconnsts;
- result.connect_change =
- (result.connected != usb->port_status.connected);
-
- return result;
-}
-
-/**
- * Open a virtual pipe between the host and a USB device. A pipe
- * must be opened before data can be transferred between a device
- * and Octeon.
- *
- * @usb: USB device state populated by cvmx_usb_initialize().
- * @device_addr:
- * USB device address to open the pipe to
- * (0-127).
- * @endpoint_num:
- * USB endpoint number to open the pipe to
- * (0-15).
- * @device_speed:
- * The speed of the device the pipe is going
- * to. This must match the device's speed,
- * which may be different than the port speed.
- * @max_packet: The maximum packet length the device can
- * transmit/receive (low speed=0-8, full
- * speed=0-1023, high speed=0-1024). This value
- * comes from the standard endpoint descriptor
- * field wMaxPacketSize bits <10:0>.
- * @transfer_type:
- * The type of transfer this pipe is for.
- * @transfer_dir:
- * The direction the pipe is in. This is not
- * used for control pipes.
- * @interval: For ISOCHRONOUS and INTERRUPT transfers,
- * this is how often the transfer is scheduled
- * for. All other transfers should specify
- * zero. The units are in frames (8000/sec at
- * high speed, 1000/sec for full speed).
- * @multi_count:
- * For high speed devices, this is the maximum
- * allowed number of packet per microframe.
- * Specify zero for non high speed devices. This
- * value comes from the standard endpoint descriptor
- * field wMaxPacketSize bits <12:11>.
- * @hub_device_addr:
- * Hub device address this device is connected
- * to. Devices connected directly to Octeon
- * use zero. This is only used when the device
- * is full/low speed behind a high speed hub.
- * The address will be of the high speed hub,
- * not and full speed hubs after it.
- * @hub_port: Which port on the hub the device is
- * connected. Use zero for devices connected
- * directly to Octeon. Like hub_device_addr,
- * this is only used for full/low speed
- * devices behind a high speed hub.
- *
- * Returns: A non-NULL value is a pipe. NULL means an error.
- */
-static struct cvmx_usb_pipe *cvmx_usb_open_pipe(struct cvmx_usb_state *usb,
- int device_addr,
- int endpoint_num,
- enum cvmx_usb_speed
- device_speed,
- int max_packet,
- enum cvmx_usb_transfer
- transfer_type,
- enum cvmx_usb_direction
- transfer_dir,
- int interval, int multi_count,
- int hub_device_addr,
- int hub_port)
-{
- struct cvmx_usb_pipe *pipe;
-
- pipe = kzalloc(sizeof(*pipe), GFP_ATOMIC);
- if (!pipe)
- return NULL;
- if ((device_speed == CVMX_USB_SPEED_HIGH) &&
- (transfer_dir == CVMX_USB_DIRECTION_OUT) &&
- (transfer_type == CVMX_USB_TRANSFER_BULK))
- pipe->flags |= CVMX_USB_PIPE_FLAGS_NEED_PING;
- pipe->device_addr = device_addr;
- pipe->endpoint_num = endpoint_num;
- pipe->device_speed = device_speed;
- pipe->max_packet = max_packet;
- pipe->transfer_type = transfer_type;
- pipe->transfer_dir = transfer_dir;
- INIT_LIST_HEAD(&pipe->transactions);
-
- /*
- * All pipes use interval to rate limit NAK processing. Force an
- * interval if one wasn't supplied
- */
- if (!interval)
- interval = 1;
- if (cvmx_usb_pipe_needs_split(usb, pipe)) {
- pipe->interval = interval*8;
- /* Force start splits to be schedule on uFrame 0 */
- pipe->next_tx_frame = ((usb->frame_number+7)&~7) +
- pipe->interval;
- } else {
- pipe->interval = interval;
- pipe->next_tx_frame = usb->frame_number + pipe->interval;
- }
- pipe->multi_count = multi_count;
- pipe->hub_device_addr = hub_device_addr;
- pipe->hub_port = hub_port;
- pipe->pid_toggle = 0;
- pipe->split_sc_frame = -1;
- list_add_tail(&pipe->node, &usb->idle_pipes);
-
- /*
- * We don't need to tell the hardware about this pipe yet since
- * it doesn't have any submitted requests
- */
-
- return pipe;
-}
-
-
-/**
- * Poll the RX FIFOs and remove data as needed. This function is only used
- * in non DMA mode. It is very important that this function be called quickly
- * enough to prevent FIFO overflow.
- *
- * @usb: USB device state populated by cvmx_usb_initialize().
- */
-static void cvmx_usb_poll_rx_fifo(struct cvmx_usb_state *usb)
-{
- union cvmx_usbcx_grxstsph rx_status;
- int channel;
- int bytes;
- uint64_t address;
- uint32_t *ptr;
-
- rx_status.u32 = cvmx_usb_read_csr32(usb,
- CVMX_USBCX_GRXSTSPH(usb->index));
- /* Only read data if IN data is there */
- if (rx_status.s.pktsts != 2)
- return;
- /* Check if no data is available */
- if (!rx_status.s.bcnt)
- return;
-
- channel = rx_status.s.chnum;
- bytes = rx_status.s.bcnt;
- if (!bytes)
- return;
-
- /* Get where the DMA engine would have written this data */
- address = cvmx_read64_uint64(CVMX_USBNX_DMA0_INB_CHN0(usb->index) +
- channel * 8);
-
- ptr = cvmx_phys_to_ptr(address);
- cvmx_write64_uint64(CVMX_USBNX_DMA0_INB_CHN0(usb->index) + channel * 8,
- address + bytes);
-
- /* Loop writing the FIFO data for this packet into memory */
- while (bytes > 0) {
- *ptr++ = cvmx_usb_read_csr32(usb,
- USB_FIFO_ADDRESS(channel, usb->index));
- bytes -= 4;
- }
- CVMX_SYNCW;
-}
-
-
-/**
- * Fill the TX hardware fifo with data out of the software
- * fifos
- *
- * @usb: USB device state populated by cvmx_usb_initialize().
- * @fifo: Software fifo to use
- * @available: Amount of space in the hardware fifo
- *
- * Returns: Non zero if the hardware fifo was too small and needs
- * to be serviced again.
- */
-static int cvmx_usb_fill_tx_hw(struct cvmx_usb_state *usb,
- struct cvmx_usb_tx_fifo *fifo, int available)
-{
- /*
- * We're done either when there isn't anymore space or the software FIFO
- * is empty
- */
- while (available && (fifo->head != fifo->tail)) {
- int i = fifo->tail;
- const uint32_t *ptr = cvmx_phys_to_ptr(fifo->entry[i].address);
- uint64_t csr_address = USB_FIFO_ADDRESS(fifo->entry[i].channel,
- usb->index) ^ 4;
- int words = available;
-
- /* Limit the amount of data to what the SW fifo has */
- if (fifo->entry[i].size <= available) {
- words = fifo->entry[i].size;
- fifo->tail++;
- if (fifo->tail > MAX_CHANNELS)
- fifo->tail = 0;
- }
-
- /* Update the next locations and counts */
- available -= words;
- fifo->entry[i].address += words * 4;
- fifo->entry[i].size -= words;
-
- /*
- * Write the HW fifo data. The read every three writes is due
- * to an errata on CN3XXX chips
- */
- while (words > 3) {
- cvmx_write64_uint32(csr_address, *ptr++);
- cvmx_write64_uint32(csr_address, *ptr++);
- cvmx_write64_uint32(csr_address, *ptr++);
- cvmx_read64_uint64(
- CVMX_USBNX_DMA0_INB_CHN0(usb->index));
- words -= 3;
- }
- cvmx_write64_uint32(csr_address, *ptr++);
- if (--words) {
- cvmx_write64_uint32(csr_address, *ptr++);
- if (--words)
- cvmx_write64_uint32(csr_address, *ptr++);
- }
- cvmx_read64_uint64(CVMX_USBNX_DMA0_INB_CHN0(usb->index));
- }
- return fifo->head != fifo->tail;
-}
-
-
-/**
- * Check the hardware FIFOs and fill them as needed
- *
- * @usb: USB device state populated by cvmx_usb_initialize().
- */
-static void cvmx_usb_poll_tx_fifo(struct cvmx_usb_state *usb)
-{
- if (usb->periodic.head != usb->periodic.tail) {
- union cvmx_usbcx_hptxsts tx_status;
-
- tx_status.u32 = cvmx_usb_read_csr32(usb,
- CVMX_USBCX_HPTXSTS(usb->index));
- if (cvmx_usb_fill_tx_hw(usb, &usb->periodic,
- tx_status.s.ptxfspcavail))
- USB_SET_FIELD32(CVMX_USBCX_GINTMSK(usb->index),
- cvmx_usbcx_gintmsk, ptxfempmsk, 1);
- else
- USB_SET_FIELD32(CVMX_USBCX_GINTMSK(usb->index),
- cvmx_usbcx_gintmsk, ptxfempmsk, 0);
- }
-
- if (usb->nonperiodic.head != usb->nonperiodic.tail) {
- union cvmx_usbcx_gnptxsts tx_status;
-
- tx_status.u32 = cvmx_usb_read_csr32(usb,
- CVMX_USBCX_GNPTXSTS(usb->index));
- if (cvmx_usb_fill_tx_hw(usb, &usb->nonperiodic,
- tx_status.s.nptxfspcavail))
- USB_SET_FIELD32(CVMX_USBCX_GINTMSK(usb->index),
- cvmx_usbcx_gintmsk, nptxfempmsk, 1);
- else
- USB_SET_FIELD32(CVMX_USBCX_GINTMSK(usb->index),
- cvmx_usbcx_gintmsk, nptxfempmsk, 0);
- }
-}
-
-
-/**
- * Fill the TX FIFO with an outgoing packet
- *
- * @usb: USB device state populated by cvmx_usb_initialize().
- * @channel: Channel number to get packet from
- */
-static void cvmx_usb_fill_tx_fifo(struct cvmx_usb_state *usb, int channel)
-{
- union cvmx_usbcx_hccharx hcchar;
- union cvmx_usbcx_hcspltx usbc_hcsplt;
- union cvmx_usbcx_hctsizx usbc_hctsiz;
- struct cvmx_usb_tx_fifo *fifo;
-
- /* We only need to fill data on outbound channels */
- hcchar.u32 = cvmx_usb_read_csr32(usb,
- CVMX_USBCX_HCCHARX(channel, usb->index));
- if (hcchar.s.epdir != CVMX_USB_DIRECTION_OUT)
- return;
-
- /* OUT Splits only have data on the start and not the complete */
- usbc_hcsplt.u32 = cvmx_usb_read_csr32(usb,
- CVMX_USBCX_HCSPLTX(channel, usb->index));
- if (usbc_hcsplt.s.spltena && usbc_hcsplt.s.compsplt)
- return;
-
- /*
- * Find out how many bytes we need to fill and convert it into 32bit
- * words.
- */
- usbc_hctsiz.u32 = cvmx_usb_read_csr32(usb,
- CVMX_USBCX_HCTSIZX(channel, usb->index));
- if (!usbc_hctsiz.s.xfersize)
- return;
-
- if ((hcchar.s.eptype == CVMX_USB_TRANSFER_INTERRUPT) ||
- (hcchar.s.eptype == CVMX_USB_TRANSFER_ISOCHRONOUS))
- fifo = &usb->periodic;
- else
- fifo = &usb->nonperiodic;
-
- fifo->entry[fifo->head].channel = channel;
- fifo->entry[fifo->head].address =
- cvmx_read64_uint64(CVMX_USBNX_DMA0_OUTB_CHN0(usb->index) +
- channel * 8);
- fifo->entry[fifo->head].size = (usbc_hctsiz.s.xfersize+3)>>2;
- fifo->head++;
- if (fifo->head > MAX_CHANNELS)
- fifo->head = 0;
-
- cvmx_usb_poll_tx_fifo(usb);
-}
-
-/**
- * Perform channel specific setup for Control transactions. All
- * the generic stuff will already have been done in cvmx_usb_start_channel().
- *
- * @usb: USB device state populated by cvmx_usb_initialize().
- * @channel: Channel to setup
- * @pipe: Pipe for control transaction
- */
-static void cvmx_usb_start_channel_control(struct cvmx_usb_state *usb,
- int channel,
- struct cvmx_usb_pipe *pipe)
-{
- struct octeon_hcd *priv = cvmx_usb_to_octeon(usb);
- struct usb_hcd *hcd = octeon_to_hcd(priv);
- struct device *dev = hcd->self.controller;
- struct cvmx_usb_transaction *transaction =
- list_first_entry(&pipe->transactions, typeof(*transaction),
- node);
- struct usb_ctrlrequest *header =
- cvmx_phys_to_ptr(transaction->control_header);
- int bytes_to_transfer = transaction->buffer_length -
- transaction->actual_bytes;
- int packets_to_transfer;
- union cvmx_usbcx_hctsizx usbc_hctsiz;
-
- usbc_hctsiz.u32 = cvmx_usb_read_csr32(usb,
- CVMX_USBCX_HCTSIZX(channel, usb->index));
-
- switch (transaction->stage) {
- case CVMX_USB_STAGE_NON_CONTROL:
- case CVMX_USB_STAGE_NON_CONTROL_SPLIT_COMPLETE:
- dev_err(dev, "%s: ERROR - Non control stage\n", __func__);
- break;
- case CVMX_USB_STAGE_SETUP:
- usbc_hctsiz.s.pid = 3; /* Setup */
- bytes_to_transfer = sizeof(*header);
- /* All Control operations start with a setup going OUT */
- USB_SET_FIELD32(CVMX_USBCX_HCCHARX(channel, usb->index),
- cvmx_usbcx_hccharx, epdir,
- CVMX_USB_DIRECTION_OUT);
- /*
- * Setup send the control header instead of the buffer data. The
- * buffer data will be used in the next stage
- */
- cvmx_write64_uint64(CVMX_USBNX_DMA0_OUTB_CHN0(usb->index) +
- channel * 8,
- transaction->control_header);
- break;
- case CVMX_USB_STAGE_SETUP_SPLIT_COMPLETE:
- usbc_hctsiz.s.pid = 3; /* Setup */
- bytes_to_transfer = 0;
- /* All Control operations start with a setup going OUT */
- USB_SET_FIELD32(CVMX_USBCX_HCCHARX(channel, usb->index),
- cvmx_usbcx_hccharx, epdir,
- CVMX_USB_DIRECTION_OUT);
-
- USB_SET_FIELD32(CVMX_USBCX_HCSPLTX(channel, usb->index),
- cvmx_usbcx_hcspltx, compsplt, 1);
- break;
- case CVMX_USB_STAGE_DATA:
- usbc_hctsiz.s.pid = cvmx_usb_get_data_pid(pipe);
- if (cvmx_usb_pipe_needs_split(usb, pipe)) {
- if (header->bRequestType & USB_DIR_IN)
- bytes_to_transfer = 0;
- else if (bytes_to_transfer > pipe->max_packet)
- bytes_to_transfer = pipe->max_packet;
- }
- USB_SET_FIELD32(CVMX_USBCX_HCCHARX(channel, usb->index),
- cvmx_usbcx_hccharx, epdir,
- ((header->bRequestType & USB_DIR_IN) ?
- CVMX_USB_DIRECTION_IN :
- CVMX_USB_DIRECTION_OUT));
- break;
- case CVMX_USB_STAGE_DATA_SPLIT_COMPLETE:
- usbc_hctsiz.s.pid = cvmx_usb_get_data_pid(pipe);
- if (!(header->bRequestType & USB_DIR_IN))
- bytes_to_transfer = 0;
- USB_SET_FIELD32(CVMX_USBCX_HCCHARX(channel, usb->index),
- cvmx_usbcx_hccharx, epdir,
- ((header->bRequestType & USB_DIR_IN) ?
- CVMX_USB_DIRECTION_IN :
- CVMX_USB_DIRECTION_OUT));
- USB_SET_FIELD32(CVMX_USBCX_HCSPLTX(channel, usb->index),
- cvmx_usbcx_hcspltx, compsplt, 1);
- break;
- case CVMX_USB_STAGE_STATUS:
- usbc_hctsiz.s.pid = cvmx_usb_get_data_pid(pipe);
- bytes_to_transfer = 0;
- USB_SET_FIELD32(CVMX_USBCX_HCCHARX(channel, usb->index),
- cvmx_usbcx_hccharx, epdir,
- ((header->bRequestType & USB_DIR_IN) ?
- CVMX_USB_DIRECTION_OUT :
- CVMX_USB_DIRECTION_IN));
- break;
- case CVMX_USB_STAGE_STATUS_SPLIT_COMPLETE:
- usbc_hctsiz.s.pid = cvmx_usb_get_data_pid(pipe);
- bytes_to_transfer = 0;
- USB_SET_FIELD32(CVMX_USBCX_HCCHARX(channel, usb->index),
- cvmx_usbcx_hccharx, epdir,
- ((header->bRequestType & USB_DIR_IN) ?
- CVMX_USB_DIRECTION_OUT :
- CVMX_USB_DIRECTION_IN));
- USB_SET_FIELD32(CVMX_USBCX_HCSPLTX(channel, usb->index),
- cvmx_usbcx_hcspltx, compsplt, 1);
- break;
- }
-
- /*
- * Make sure the transfer never exceeds the byte limit of the hardware.
- * Further bytes will be sent as continued transactions
- */
- if (bytes_to_transfer > MAX_TRANSFER_BYTES) {
- /* Round MAX_TRANSFER_BYTES to a multiple of out packet size */
- bytes_to_transfer = MAX_TRANSFER_BYTES / pipe->max_packet;
- bytes_to_transfer *= pipe->max_packet;
- }
-
- /*
- * Calculate the number of packets to transfer. If the length is zero
- * we still need to transfer one packet
- */
- packets_to_transfer = DIV_ROUND_UP(bytes_to_transfer,
- pipe->max_packet);
- if (packets_to_transfer == 0)
- packets_to_transfer = 1;
- else if ((packets_to_transfer > 1) &&
- (usb->init_flags & CVMX_USB_INITIALIZE_FLAGS_NO_DMA)) {
- /*
- * Limit to one packet when not using DMA. Channels must be
- * restarted between every packet for IN transactions, so there
- * is no reason to do multiple packets in a row
- */
- packets_to_transfer = 1;
- bytes_to_transfer = packets_to_transfer * pipe->max_packet;
- } else if (packets_to_transfer > MAX_TRANSFER_PACKETS) {
- /*
- * Limit the number of packet and data transferred to what the
- * hardware can handle
- */
- packets_to_transfer = MAX_TRANSFER_PACKETS;
- bytes_to_transfer = packets_to_transfer * pipe->max_packet;
- }
-
- usbc_hctsiz.s.xfersize = bytes_to_transfer;
- usbc_hctsiz.s.pktcnt = packets_to_transfer;
-
- cvmx_usb_write_csr32(usb, CVMX_USBCX_HCTSIZX(channel, usb->index),
- usbc_hctsiz.u32);
-}
-
-
-/**
- * Start a channel to perform the pipe's head transaction
- *
- * @usb: USB device state populated by cvmx_usb_initialize().
- * @channel: Channel to setup
- * @pipe: Pipe to start
- */
-static void cvmx_usb_start_channel(struct cvmx_usb_state *usb, int channel,
- struct cvmx_usb_pipe *pipe)
-{
- struct cvmx_usb_transaction *transaction =
- list_first_entry(&pipe->transactions, typeof(*transaction),
- node);
-
- /* Make sure all writes to the DMA region get flushed */
- CVMX_SYNCW;
-
- /* Attach the channel to the pipe */
- usb->pipe_for_channel[channel] = pipe;
- pipe->channel = channel;
- pipe->flags |= CVMX_USB_PIPE_FLAGS_SCHEDULED;
-
- /* Mark this channel as in use */
- usb->idle_hardware_channels &= ~(1<<channel);
-
- /* Enable the channel interrupt bits */
- {
- union cvmx_usbcx_hcintx usbc_hcint;
- union cvmx_usbcx_hcintmskx usbc_hcintmsk;
- union cvmx_usbcx_haintmsk usbc_haintmsk;
-
- /* Clear all channel status bits */
- usbc_hcint.u32 = cvmx_usb_read_csr32(usb,
- CVMX_USBCX_HCINTX(channel, usb->index));
-
- cvmx_usb_write_csr32(usb,
- CVMX_USBCX_HCINTX(channel, usb->index),
- usbc_hcint.u32);
-
- usbc_hcintmsk.u32 = 0;
- usbc_hcintmsk.s.chhltdmsk = 1;
- if (usb->init_flags & CVMX_USB_INITIALIZE_FLAGS_NO_DMA) {
- /*
- * Channels need these extra interrupts when we aren't
- * in DMA mode.
- */
- usbc_hcintmsk.s.datatglerrmsk = 1;
- usbc_hcintmsk.s.frmovrunmsk = 1;
- usbc_hcintmsk.s.bblerrmsk = 1;
- usbc_hcintmsk.s.xacterrmsk = 1;
- if (cvmx_usb_pipe_needs_split(usb, pipe)) {
- /*
- * Splits don't generate xfercompl, so we need
- * ACK and NYET.
- */
- usbc_hcintmsk.s.nyetmsk = 1;
- usbc_hcintmsk.s.ackmsk = 1;
- }
- usbc_hcintmsk.s.nakmsk = 1;
- usbc_hcintmsk.s.stallmsk = 1;
- usbc_hcintmsk.s.xfercomplmsk = 1;
- }
- cvmx_usb_write_csr32(usb,
- CVMX_USBCX_HCINTMSKX(channel, usb->index),
- usbc_hcintmsk.u32);
-
- /* Enable the channel interrupt to propagate */
- usbc_haintmsk.u32 = cvmx_usb_read_csr32(usb,
- CVMX_USBCX_HAINTMSK(usb->index));
- usbc_haintmsk.s.haintmsk |= 1<<channel;
- cvmx_usb_write_csr32(usb, CVMX_USBCX_HAINTMSK(usb->index),
- usbc_haintmsk.u32);
- }
-
- /* Setup the location the DMA engine uses. */
- {
- uint64_t reg;
- uint64_t dma_address = transaction->buffer +
- transaction->actual_bytes;
-
- if (transaction->type == CVMX_USB_TRANSFER_ISOCHRONOUS)
- dma_address = transaction->buffer +
- transaction->iso_packets[0].offset +
- transaction->actual_bytes;
-
- if (pipe->transfer_dir == CVMX_USB_DIRECTION_OUT)
- reg = CVMX_USBNX_DMA0_OUTB_CHN0(usb->index);
- else
- reg = CVMX_USBNX_DMA0_INB_CHN0(usb->index);
- cvmx_write64_uint64(reg + channel * 8, dma_address);
- }
-
- /* Setup both the size of the transfer and the SPLIT characteristics */
- {
- union cvmx_usbcx_hcspltx usbc_hcsplt = {.u32 = 0};
- union cvmx_usbcx_hctsizx usbc_hctsiz = {.u32 = 0};
- int packets_to_transfer;
- int bytes_to_transfer = transaction->buffer_length -
- transaction->actual_bytes;
-
- /*
- * ISOCHRONOUS transactions store each individual transfer size
- * in the packet structure, not the global buffer_length
- */
- if (transaction->type == CVMX_USB_TRANSFER_ISOCHRONOUS)
- bytes_to_transfer =
- transaction->iso_packets[0].length -
- transaction->actual_bytes;
-
- /*
- * We need to do split transactions when we are talking to non
- * high speed devices that are behind a high speed hub
- */
- if (cvmx_usb_pipe_needs_split(usb, pipe)) {
- /*
- * On the start split phase (stage is even) record the
- * frame number we will need to send the split complete.
- * We only store the lower two bits since the time ahead
- * can only be two frames
- */
- if ((transaction->stage&1) == 0) {
- if (transaction->type == CVMX_USB_TRANSFER_BULK)
- pipe->split_sc_frame =
- (usb->frame_number + 1) & 0x7f;
- else
- pipe->split_sc_frame =
- (usb->frame_number + 2) & 0x7f;
- } else
- pipe->split_sc_frame = -1;
-
- usbc_hcsplt.s.spltena = 1;
- usbc_hcsplt.s.hubaddr = pipe->hub_device_addr;
- usbc_hcsplt.s.prtaddr = pipe->hub_port;
- usbc_hcsplt.s.compsplt = (transaction->stage ==
- CVMX_USB_STAGE_NON_CONTROL_SPLIT_COMPLETE);
-
- /*
- * SPLIT transactions can only ever transmit one data
- * packet so limit the transfer size to the max packet
- * size
- */
- if (bytes_to_transfer > pipe->max_packet)
- bytes_to_transfer = pipe->max_packet;
-
- /*
- * ISOCHRONOUS OUT splits are unique in that they limit
- * data transfers to 188 byte chunks representing the
- * begin/middle/end of the data or all
- */
- if (!usbc_hcsplt.s.compsplt &&
- (pipe->transfer_dir ==
- CVMX_USB_DIRECTION_OUT) &&
- (pipe->transfer_type ==
- CVMX_USB_TRANSFER_ISOCHRONOUS)) {
- /*
- * Clear the split complete frame number as
- * there isn't going to be a split complete
- */
- pipe->split_sc_frame = -1;
- /*
- * See if we've started this transfer and sent
- * data
- */
- if (transaction->actual_bytes == 0) {
- /*
- * Nothing sent yet, this is either a
- * begin or the entire payload
- */
- if (bytes_to_transfer <= 188)
- /* Entire payload in one go */
- usbc_hcsplt.s.xactpos = 3;
- else
- /* First part of payload */
- usbc_hcsplt.s.xactpos = 2;
- } else {
- /*
- * Continuing the previous data, we must
- * either be in the middle or at the end
- */
- if (bytes_to_transfer <= 188)
- /* End of payload */
- usbc_hcsplt.s.xactpos = 1;
- else
- /* Middle of payload */
- usbc_hcsplt.s.xactpos = 0;
- }
- /*
- * Again, the transfer size is limited to 188
- * bytes
- */
- if (bytes_to_transfer > 188)
- bytes_to_transfer = 188;
- }
- }
-
- /*
- * Make sure the transfer never exceeds the byte limit of the
- * hardware. Further bytes will be sent as continued
- * transactions
- */
- if (bytes_to_transfer > MAX_TRANSFER_BYTES) {
- /*
- * Round MAX_TRANSFER_BYTES to a multiple of out packet
- * size
- */
- bytes_to_transfer = MAX_TRANSFER_BYTES /
- pipe->max_packet;
- bytes_to_transfer *= pipe->max_packet;
- }
-
- /*
- * Calculate the number of packets to transfer. If the length is
- * zero we still need to transfer one packet
- */
- packets_to_transfer =
- DIV_ROUND_UP(bytes_to_transfer, pipe->max_packet);
- if (packets_to_transfer == 0)
- packets_to_transfer = 1;
- else if ((packets_to_transfer > 1) &&
- (usb->init_flags &
- CVMX_USB_INITIALIZE_FLAGS_NO_DMA)) {
- /*
- * Limit to one packet when not using DMA. Channels must
- * be restarted between every packet for IN
- * transactions, so there is no reason to do multiple
- * packets in a row
- */
- packets_to_transfer = 1;
- bytes_to_transfer = packets_to_transfer *
- pipe->max_packet;
- } else if (packets_to_transfer > MAX_TRANSFER_PACKETS) {
- /*
- * Limit the number of packet and data transferred to
- * what the hardware can handle
- */
- packets_to_transfer = MAX_TRANSFER_PACKETS;
- bytes_to_transfer = packets_to_transfer *
- pipe->max_packet;
- }
-
- usbc_hctsiz.s.xfersize = bytes_to_transfer;
- usbc_hctsiz.s.pktcnt = packets_to_transfer;
-
- /* Update the DATA0/DATA1 toggle */
- usbc_hctsiz.s.pid = cvmx_usb_get_data_pid(pipe);
- /*
- * High speed pipes may need a hardware ping before they start
- */
- if (pipe->flags & CVMX_USB_PIPE_FLAGS_NEED_PING)
- usbc_hctsiz.s.dopng = 1;
-
- cvmx_usb_write_csr32(usb,
- CVMX_USBCX_HCSPLTX(channel, usb->index),
- usbc_hcsplt.u32);
- cvmx_usb_write_csr32(usb,
- CVMX_USBCX_HCTSIZX(channel, usb->index),
- usbc_hctsiz.u32);
- }
-
- /* Setup the Host Channel Characteristics Register */
- {
- union cvmx_usbcx_hccharx usbc_hcchar = {.u32 = 0};
-
- /*
- * Set the startframe odd/even properly. This is only used for
- * periodic
- */
- usbc_hcchar.s.oddfrm = usb->frame_number&1;
-
- /*
- * Set the number of back to back packets allowed by this
- * endpoint. Split transactions interpret "ec" as the number of
- * immediate retries of failure. These retries happen too
- * quickly, so we disable these entirely for splits
- */
- if (cvmx_usb_pipe_needs_split(usb, pipe))
- usbc_hcchar.s.ec = 1;
- else if (pipe->multi_count < 1)
- usbc_hcchar.s.ec = 1;
- else if (pipe->multi_count > 3)
- usbc_hcchar.s.ec = 3;
- else
- usbc_hcchar.s.ec = pipe->multi_count;
-
- /* Set the rest of the endpoint specific settings */
- usbc_hcchar.s.devaddr = pipe->device_addr;
- usbc_hcchar.s.eptype = transaction->type;
- usbc_hcchar.s.lspddev =
- (pipe->device_speed == CVMX_USB_SPEED_LOW);
- usbc_hcchar.s.epdir = pipe->transfer_dir;
- usbc_hcchar.s.epnum = pipe->endpoint_num;
- usbc_hcchar.s.mps = pipe->max_packet;
- cvmx_usb_write_csr32(usb,
- CVMX_USBCX_HCCHARX(channel, usb->index),
- usbc_hcchar.u32);
- }
-
- /* Do transaction type specific fixups as needed */
- switch (transaction->type) {
- case CVMX_USB_TRANSFER_CONTROL:
- cvmx_usb_start_channel_control(usb, channel, pipe);
- break;
- case CVMX_USB_TRANSFER_BULK:
- case CVMX_USB_TRANSFER_INTERRUPT:
- break;
- case CVMX_USB_TRANSFER_ISOCHRONOUS:
- if (!cvmx_usb_pipe_needs_split(usb, pipe)) {
- /*
- * ISO transactions require different PIDs depending on
- * direction and how many packets are needed
- */
- if (pipe->transfer_dir == CVMX_USB_DIRECTION_OUT) {
- if (pipe->multi_count < 2) /* Need DATA0 */
- USB_SET_FIELD32(
- CVMX_USBCX_HCTSIZX(channel,
- usb->index),
- cvmx_usbcx_hctsizx, pid, 0);
- else /* Need MDATA */
- USB_SET_FIELD32(
- CVMX_USBCX_HCTSIZX(channel,
- usb->index),
- cvmx_usbcx_hctsizx, pid, 3);
- }
- }
- break;
- }
- {
- union cvmx_usbcx_hctsizx usbc_hctsiz = {.u32 =
- cvmx_usb_read_csr32(usb,
- CVMX_USBCX_HCTSIZX(channel, usb->index))};
- transaction->xfersize = usbc_hctsiz.s.xfersize;
- transaction->pktcnt = usbc_hctsiz.s.pktcnt;
- }
- /* Remember when we start a split transaction */
- if (cvmx_usb_pipe_needs_split(usb, pipe))
- usb->active_split = transaction;
- USB_SET_FIELD32(CVMX_USBCX_HCCHARX(channel, usb->index),
- cvmx_usbcx_hccharx, chena, 1);
- if (usb->init_flags & CVMX_USB_INITIALIZE_FLAGS_NO_DMA)
- cvmx_usb_fill_tx_fifo(usb, channel);
-}
-
-
-/**
- * Find a pipe that is ready to be scheduled to hardware.
- * @usb: USB device state populated by cvmx_usb_initialize().
- * @list: Pipe list to search
- * @current_frame:
- * Frame counter to use as a time reference.
- *
- * Returns: Pipe or NULL if none are ready
- */
-static struct cvmx_usb_pipe *cvmx_usb_find_ready_pipe(
- struct cvmx_usb_state *usb,
- struct list_head *list,
- uint64_t current_frame)
-{
- struct cvmx_usb_pipe *pipe;
-
- list_for_each_entry(pipe, list, node) {
- struct cvmx_usb_transaction *t =
- list_first_entry(&pipe->transactions, typeof(*t),
- node);
- if (!(pipe->flags & CVMX_USB_PIPE_FLAGS_SCHEDULED) && t &&
- (pipe->next_tx_frame <= current_frame) &&
- ((pipe->split_sc_frame == -1) ||
- ((((int)current_frame - (int)pipe->split_sc_frame)
- & 0x7f) < 0x40)) &&
- (!usb->active_split || (usb->active_split == t))) {
- prefetch(t);
- return pipe;
- }
- }
- return NULL;
-}
-
-
-/**
- * Called whenever a pipe might need to be scheduled to the
- * hardware.
- *
- * @usb: USB device state populated by cvmx_usb_initialize().
- * @is_sof: True if this schedule was called on a SOF interrupt.
- */
-static void cvmx_usb_schedule(struct cvmx_usb_state *usb, int is_sof)
-{
- int channel;
- struct cvmx_usb_pipe *pipe;
- int need_sof;
- enum cvmx_usb_transfer ttype;
-
- if (usb->init_flags & CVMX_USB_INITIALIZE_FLAGS_NO_DMA) {
- /*
- * Without DMA we need to be careful to not schedule something
- * at the end of a frame and cause an overrun.
- */
- union cvmx_usbcx_hfnum hfnum = {
- .u32 = cvmx_usb_read_csr32(usb,
- CVMX_USBCX_HFNUM(usb->index))
- };
-
- union cvmx_usbcx_hfir hfir = {
- .u32 = cvmx_usb_read_csr32(usb,
- CVMX_USBCX_HFIR(usb->index))
- };
-
- if (hfnum.s.frrem < hfir.s.frint/4)
- goto done;
- }
-
- while (usb->idle_hardware_channels) {
- /* Find an idle channel */
- channel = __fls(usb->idle_hardware_channels);
- if (unlikely(channel > 7))
- break;
-
- /* Find a pipe needing service */
- pipe = NULL;
- if (is_sof) {
- /*
- * Only process periodic pipes on SOF interrupts. This
- * way we are sure that the periodic data is sent in the
- * beginning of the frame
- */
- pipe = cvmx_usb_find_ready_pipe(usb,
- usb->active_pipes +
- CVMX_USB_TRANSFER_ISOCHRONOUS,
- usb->frame_number);
- if (likely(!pipe))
- pipe = cvmx_usb_find_ready_pipe(usb,
- usb->active_pipes +
- CVMX_USB_TRANSFER_INTERRUPT,
- usb->frame_number);
- }
- if (likely(!pipe)) {
- pipe = cvmx_usb_find_ready_pipe(usb,
- usb->active_pipes +
- CVMX_USB_TRANSFER_CONTROL,
- usb->frame_number);
- if (likely(!pipe))
- pipe = cvmx_usb_find_ready_pipe(usb,
- usb->active_pipes +
- CVMX_USB_TRANSFER_BULK,
- usb->frame_number);
- }
- if (!pipe)
- break;
-
- cvmx_usb_start_channel(usb, channel, pipe);
- }
-
-done:
- /*
- * Only enable SOF interrupts when we have transactions pending in the
- * future that might need to be scheduled
- */
- need_sof = 0;
- for (ttype = CVMX_USB_TRANSFER_CONTROL;
- ttype <= CVMX_USB_TRANSFER_INTERRUPT; ttype++) {
- list_for_each_entry(pipe, &usb->active_pipes[ttype], node) {
- if (pipe->next_tx_frame > usb->frame_number) {
- need_sof = 1;
- break;
- }
- }
- }
- USB_SET_FIELD32(CVMX_USBCX_GINTMSK(usb->index),
- cvmx_usbcx_gintmsk, sofmsk, need_sof);
-}
-
-static void octeon_usb_urb_complete_callback(struct cvmx_usb_state *usb,
- enum cvmx_usb_complete status,
- struct cvmx_usb_pipe *pipe,
- struct cvmx_usb_transaction
- *transaction,
- int bytes_transferred,
- struct urb *urb)
-{
- struct octeon_hcd *priv = cvmx_usb_to_octeon(usb);
- struct usb_hcd *hcd = octeon_to_hcd(priv);
- struct device *dev = hcd->self.controller;
-
- if (likely(status == CVMX_USB_COMPLETE_SUCCESS))
- urb->actual_length = bytes_transferred;
- else
- urb->actual_length = 0;
-
- urb->hcpriv = NULL;
-
- /* For Isochronous transactions we need to update the URB packet status
- list from data in our private copy */
- if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
- int i;
- /*
- * The pointer to the private list is stored in the setup_packet
- * field.
- */
- struct cvmx_usb_iso_packet *iso_packet =
- (struct cvmx_usb_iso_packet *) urb->setup_packet;
- /* Recalculate the transfer size by adding up each packet */
- urb->actual_length = 0;
- for (i = 0; i < urb->number_of_packets; i++) {
- if (iso_packet[i].status ==
- CVMX_USB_COMPLETE_SUCCESS) {
- urb->iso_frame_desc[i].status = 0;
- urb->iso_frame_desc[i].actual_length =
- iso_packet[i].length;
- urb->actual_length +=
- urb->iso_frame_desc[i].actual_length;
- } else {
- dev_dbg(dev, "ISOCHRONOUS packet=%d of %d status=%d pipe=%p transaction=%p size=%d\n",
- i, urb->number_of_packets,
- iso_packet[i].status, pipe,
- transaction, iso_packet[i].length);
- urb->iso_frame_desc[i].status = -EREMOTEIO;
- }
- }
- /* Free the private list now that we don't need it anymore */
- kfree(iso_packet);
- urb->setup_packet = NULL;
- }
-
- switch (status) {
- case CVMX_USB_COMPLETE_SUCCESS:
- urb->status = 0;
- break;
- case CVMX_USB_COMPLETE_CANCEL:
- if (urb->status == 0)
- urb->status = -ENOENT;
- break;
- case CVMX_USB_COMPLETE_STALL:
- dev_dbg(dev, "status=stall pipe=%p transaction=%p size=%d\n",
- pipe, transaction, bytes_transferred);
- urb->status = -EPIPE;
- break;
- case CVMX_USB_COMPLETE_BABBLEERR:
- dev_dbg(dev, "status=babble pipe=%p transaction=%p size=%d\n",
- pipe, transaction, bytes_transferred);
- urb->status = -EPIPE;
- break;
- case CVMX_USB_COMPLETE_SHORT:
- dev_dbg(dev, "status=short pipe=%p transaction=%p size=%d\n",
- pipe, transaction, bytes_transferred);
- urb->status = -EREMOTEIO;
- break;
- case CVMX_USB_COMPLETE_ERROR:
- case CVMX_USB_COMPLETE_XACTERR:
- case CVMX_USB_COMPLETE_DATATGLERR:
- case CVMX_USB_COMPLETE_FRAMEERR:
- dev_dbg(dev, "status=%d pipe=%p transaction=%p size=%d\n",
- status, pipe, transaction, bytes_transferred);
- urb->status = -EPROTO;
- break;
- }
- usb_hcd_unlink_urb_from_ep(octeon_to_hcd(priv), urb);
- spin_unlock(&priv->lock);
- usb_hcd_giveback_urb(octeon_to_hcd(priv), urb, urb->status);
- spin_lock(&priv->lock);
-}
-
-/**
- * Signal the completion of a transaction and free it. The
- * transaction will be removed from the pipe transaction list.
- *
- * @usb: USB device state populated by cvmx_usb_initialize().
- * @pipe: Pipe the transaction is on
- * @transaction:
- * Transaction that completed
- * @complete_code:
- * Completion code
- */
-static void cvmx_usb_perform_complete(struct cvmx_usb_state *usb,
- struct cvmx_usb_pipe *pipe,
- struct cvmx_usb_transaction *transaction,
- enum cvmx_usb_complete complete_code)
-{
- /* If this was a split then clear our split in progress marker */
- if (usb->active_split == transaction)
- usb->active_split = NULL;
-
- /*
- * Isochronous transactions need extra processing as they might not be
- * done after a single data transfer
- */
- if (unlikely(transaction->type == CVMX_USB_TRANSFER_ISOCHRONOUS)) {
- /* Update the number of bytes transferred in this ISO packet */
- transaction->iso_packets[0].length = transaction->actual_bytes;
- transaction->iso_packets[0].status = complete_code;
-
- /*
- * If there are more ISOs pending and we succeeded, schedule the
- * next one
- */
- if ((transaction->iso_number_packets > 1) &&
- (complete_code == CVMX_USB_COMPLETE_SUCCESS)) {
- /* No bytes transferred for this packet as of yet */
- transaction->actual_bytes = 0;
- /* One less ISO waiting to transfer */
- transaction->iso_number_packets--;
- /* Increment to the next location in our packet array */
- transaction->iso_packets++;
- transaction->stage = CVMX_USB_STAGE_NON_CONTROL;
- return;
- }
- }
-
- /* Remove the transaction from the pipe list */
- list_del(&transaction->node);
- if (list_empty(&pipe->transactions))
- list_move_tail(&pipe->node, &usb->idle_pipes);
- octeon_usb_urb_complete_callback(usb, complete_code, pipe,
- transaction,
- transaction->actual_bytes,
- transaction->urb);
- kfree(transaction);
-}
-
-
-/**
- * Submit a usb transaction to a pipe. Called for all types
- * of transactions.
- *
- * @usb:
- * @pipe: Which pipe to submit to.
- * @type: Transaction type
- * @buffer: User buffer for the transaction
- * @buffer_length:
- * User buffer's length in bytes
- * @control_header:
- * For control transactions, the 8 byte standard header
- * @iso_start_frame:
- * For ISO transactions, the start frame
- * @iso_number_packets:
- * For ISO, the number of packet in the transaction.
- * @iso_packets:
- * A description of each ISO packet
- * @urb: URB for the callback
- *
- * Returns: Transaction or NULL on failure.
- */
-static struct cvmx_usb_transaction *cvmx_usb_submit_transaction(
- struct cvmx_usb_state *usb,
- struct cvmx_usb_pipe *pipe,
- enum cvmx_usb_transfer type,
- uint64_t buffer,
- int buffer_length,
- uint64_t control_header,
- int iso_start_frame,
- int iso_number_packets,
- struct cvmx_usb_iso_packet *iso_packets,
- struct urb *urb)
-{
- struct cvmx_usb_transaction *transaction;
-
- if (unlikely(pipe->transfer_type != type))
- return NULL;
-
- transaction = kzalloc(sizeof(*transaction), GFP_ATOMIC);
- if (unlikely(!transaction))
- return NULL;
-
- transaction->type = type;
- transaction->buffer = buffer;
- transaction->buffer_length = buffer_length;
- transaction->control_header = control_header;
- /* FIXME: This is not used, implement it. */
- transaction->iso_start_frame = iso_start_frame;
- transaction->iso_number_packets = iso_number_packets;
- transaction->iso_packets = iso_packets;
- transaction->urb = urb;
- if (transaction->type == CVMX_USB_TRANSFER_CONTROL)
- transaction->stage = CVMX_USB_STAGE_SETUP;
- else
- transaction->stage = CVMX_USB_STAGE_NON_CONTROL;
-
- if (!list_empty(&pipe->transactions)) {
- list_add_tail(&transaction->node, &pipe->transactions);
- } else {
- list_add_tail(&transaction->node, &pipe->transactions);
- list_move_tail(&pipe->node,
- &usb->active_pipes[pipe->transfer_type]);
-
- /*
- * We may need to schedule the pipe if this was the head of the
- * pipe.
- */
- cvmx_usb_schedule(usb, 0);
- }
-
- return transaction;
-}
-
-
-/**
- * Call to submit a USB Bulk transfer to a pipe.
- *
- * @usb: USB device state populated by cvmx_usb_initialize().
- * @pipe: Handle to the pipe for the transfer.
- * @urb: URB.
- *
- * Returns: A submitted transaction or NULL on failure.
- */
-static struct cvmx_usb_transaction *cvmx_usb_submit_bulk(
- struct cvmx_usb_state *usb,
- struct cvmx_usb_pipe *pipe,
- struct urb *urb)
-{
- return cvmx_usb_submit_transaction(usb, pipe, CVMX_USB_TRANSFER_BULK,
- urb->transfer_dma,
- urb->transfer_buffer_length,
- 0, /* control_header */
- 0, /* iso_start_frame */
- 0, /* iso_number_packets */
- NULL, /* iso_packets */
- urb);
-}
-
-
-/**
- * Call to submit a USB Interrupt transfer to a pipe.
- *
- * @usb: USB device state populated by cvmx_usb_initialize().
- * @pipe: Handle to the pipe for the transfer.
- * @urb: URB returned when the callback is called.
- *
- * Returns: A submitted transaction or NULL on failure.
- */
-static struct cvmx_usb_transaction *cvmx_usb_submit_interrupt(
- struct cvmx_usb_state *usb,
- struct cvmx_usb_pipe *pipe,
- struct urb *urb)
-{
- return cvmx_usb_submit_transaction(usb, pipe,
- CVMX_USB_TRANSFER_INTERRUPT,
- urb->transfer_dma,
- urb->transfer_buffer_length,
- 0, /* control_header */
- 0, /* iso_start_frame */
- 0, /* iso_number_packets */
- NULL, /* iso_packets */
- urb);
-}
-
-
-/**
- * Call to submit a USB Control transfer to a pipe.
- *
- * @usb: USB device state populated by cvmx_usb_initialize().
- * @pipe: Handle to the pipe for the transfer.
- * @urb: URB.
- *
- * Returns: A submitted transaction or NULL on failure.
- */
-static struct cvmx_usb_transaction *cvmx_usb_submit_control(
- struct cvmx_usb_state *usb,
- struct cvmx_usb_pipe *pipe,
- struct urb *urb)
-{
- int buffer_length = urb->transfer_buffer_length;
- uint64_t control_header = urb->setup_dma;
- struct usb_ctrlrequest *header = cvmx_phys_to_ptr(control_header);
-
- if ((header->bRequestType & USB_DIR_IN) == 0)
- buffer_length = le16_to_cpu(header->wLength);
-
- return cvmx_usb_submit_transaction(usb, pipe,
- CVMX_USB_TRANSFER_CONTROL,
- urb->transfer_dma, buffer_length,
- control_header,
- 0, /* iso_start_frame */
- 0, /* iso_number_packets */
- NULL, /* iso_packets */
- urb);
-}
-
-
-/**
- * Call to submit a USB Isochronous transfer to a pipe.
- *
- * @usb: USB device state populated by cvmx_usb_initialize().
- * @pipe: Handle to the pipe for the transfer.
- * @urb: URB returned when the callback is called.
- *
- * Returns: A submitted transaction or NULL on failure.
- */
-static struct cvmx_usb_transaction *cvmx_usb_submit_isochronous(
- struct cvmx_usb_state *usb,
- struct cvmx_usb_pipe *pipe,
- struct urb *urb)
-{
- struct cvmx_usb_iso_packet *packets;
-
- packets = (struct cvmx_usb_iso_packet *) urb->setup_packet;
- return cvmx_usb_submit_transaction(usb, pipe,
- CVMX_USB_TRANSFER_ISOCHRONOUS,
- urb->transfer_dma,
- urb->transfer_buffer_length,
- 0, /* control_header */
- urb->start_frame,
- urb->number_of_packets,
- packets, urb);
-}
-
-
-/**
- * Cancel one outstanding request in a pipe. Canceling a request
- * can fail if the transaction has already completed before cancel
- * is called. Even after a successful cancel call, it may take
- * a frame or two for the cvmx_usb_poll() function to call the
- * associated callback.
- *
- * @usb: USB device state populated by cvmx_usb_initialize().
- * @pipe: Pipe to cancel requests in.
- * @transaction: Transaction to cancel, returned by the submit function.
- *
- * Returns: 0 or a negative error code.
- */
-static int cvmx_usb_cancel(struct cvmx_usb_state *usb,
- struct cvmx_usb_pipe *pipe,
- struct cvmx_usb_transaction *transaction)
-{
- /*
- * If the transaction is the HEAD of the queue and scheduled. We need to
- * treat it special
- */
- if (list_first_entry(&pipe->transactions, typeof(*transaction), node) ==
- transaction && (pipe->flags & CVMX_USB_PIPE_FLAGS_SCHEDULED)) {
- union cvmx_usbcx_hccharx usbc_hcchar;
-
- usb->pipe_for_channel[pipe->channel] = NULL;
- pipe->flags &= ~CVMX_USB_PIPE_FLAGS_SCHEDULED;
-
- CVMX_SYNCW;
-
- usbc_hcchar.u32 = cvmx_usb_read_csr32(usb,
- CVMX_USBCX_HCCHARX(pipe->channel, usb->index));
- /*
- * If the channel isn't enabled then the transaction already
- * completed.
- */
- if (usbc_hcchar.s.chena) {
- usbc_hcchar.s.chdis = 1;
- cvmx_usb_write_csr32(usb,
- CVMX_USBCX_HCCHARX(pipe->channel,
- usb->index),
- usbc_hcchar.u32);
- }
- }
- cvmx_usb_perform_complete(usb, pipe, transaction,
- CVMX_USB_COMPLETE_CANCEL);
- return 0;
-}
-
-
-/**
- * Cancel all outstanding requests in a pipe. Logically all this
- * does is call cvmx_usb_cancel() in a loop.
- *
- * @usb: USB device state populated by cvmx_usb_initialize().
- * @pipe: Pipe to cancel requests in.
- *
- * Returns: 0 or a negative error code.
- */
-static int cvmx_usb_cancel_all(struct cvmx_usb_state *usb,
- struct cvmx_usb_pipe *pipe)
-{
- struct cvmx_usb_transaction *transaction, *next;
-
- /* Simply loop through and attempt to cancel each transaction */
- list_for_each_entry_safe(transaction, next, &pipe->transactions, node) {
- int result = cvmx_usb_cancel(usb, pipe, transaction);
-
- if (unlikely(result != 0))
- return result;
- }
- return 0;
-}
-
-
-/**
- * Close a pipe created with cvmx_usb_open_pipe().
- *
- * @usb: USB device state populated by cvmx_usb_initialize().
- * @pipe: Pipe to close.
- *
- * Returns: 0 or a negative error code. EBUSY is returned if the pipe has
- * outstanding transfers.
- */
-static int cvmx_usb_close_pipe(struct cvmx_usb_state *usb,
- struct cvmx_usb_pipe *pipe)
-{
- /* Fail if the pipe has pending transactions */
- if (!list_empty(&pipe->transactions))
- return -EBUSY;
-
- list_del(&pipe->node);
- kfree(pipe);
-
- return 0;
-}
-
-/**
- * Get the current USB protocol level frame number. The frame
- * number is always in the range of 0-0x7ff.
- *
- * @usb: USB device state populated by cvmx_usb_initialize().
- *
- * Returns: USB frame number
- */
-static int cvmx_usb_get_frame_number(struct cvmx_usb_state *usb)
-{
- int frame_number;
- union cvmx_usbcx_hfnum usbc_hfnum;
-
- usbc_hfnum.u32 = cvmx_usb_read_csr32(usb, CVMX_USBCX_HFNUM(usb->index));
- frame_number = usbc_hfnum.s.frnum;
-
- return frame_number;
-}
-
-
-/**
- * Poll a channel for status
- *
- * @usb: USB device
- * @channel: Channel to poll
- *
- * Returns: Zero on success
- */
-static int cvmx_usb_poll_channel(struct cvmx_usb_state *usb, int channel)
-{
- struct octeon_hcd *priv = cvmx_usb_to_octeon(usb);
- struct usb_hcd *hcd = octeon_to_hcd(priv);
- struct device *dev = hcd->self.controller;
- union cvmx_usbcx_hcintx usbc_hcint;
- union cvmx_usbcx_hctsizx usbc_hctsiz;
- union cvmx_usbcx_hccharx usbc_hcchar;
- struct cvmx_usb_pipe *pipe;
- struct cvmx_usb_transaction *transaction;
- int bytes_this_transfer;
- int bytes_in_last_packet;
- int packets_processed;
- int buffer_space_left;
-
- /* Read the interrupt status bits for the channel */
- usbc_hcint.u32 = cvmx_usb_read_csr32(usb,
- CVMX_USBCX_HCINTX(channel, usb->index));
-
- if (usb->init_flags & CVMX_USB_INITIALIZE_FLAGS_NO_DMA) {
- usbc_hcchar.u32 = cvmx_usb_read_csr32(usb,
- CVMX_USBCX_HCCHARX(channel, usb->index));
-
- if (usbc_hcchar.s.chena && usbc_hcchar.s.chdis) {
- /*
- * There seems to be a bug in CN31XX which can cause
- * interrupt IN transfers to get stuck until we do a
- * write of HCCHARX without changing things
- */
- cvmx_usb_write_csr32(usb,
- CVMX_USBCX_HCCHARX(channel,
- usb->index),
- usbc_hcchar.u32);
- return 0;
- }
-
- /*
- * In non DMA mode the channels don't halt themselves. We need
- * to manually disable channels that are left running
- */
- if (!usbc_hcint.s.chhltd) {
- if (usbc_hcchar.s.chena) {
- union cvmx_usbcx_hcintmskx hcintmsk;
- /* Disable all interrupts except CHHLTD */
- hcintmsk.u32 = 0;
- hcintmsk.s.chhltdmsk = 1;
- cvmx_usb_write_csr32(usb,
- CVMX_USBCX_HCINTMSKX(channel,
- usb->index),
- hcintmsk.u32);
- usbc_hcchar.s.chdis = 1;
- cvmx_usb_write_csr32(usb,
- CVMX_USBCX_HCCHARX(channel,
- usb->index),
- usbc_hcchar.u32);
- return 0;
- } else if (usbc_hcint.s.xfercompl) {
- /*
- * Successful IN/OUT with transfer complete.
- * Channel halt isn't needed.
- */
- } else {
- dev_err(dev, "USB%d: Channel %d interrupt without halt\n",
- usb->index, channel);
- return 0;
- }
- }
- } else {
- /*
- * There is are no interrupts that we need to process when the
- * channel is still running
- */
- if (!usbc_hcint.s.chhltd)
- return 0;
- }
-
- /* Disable the channel interrupts now that it is done */
- cvmx_usb_write_csr32(usb, CVMX_USBCX_HCINTMSKX(channel, usb->index), 0);
- usb->idle_hardware_channels |= (1<<channel);
-
- /* Make sure this channel is tied to a valid pipe */
- pipe = usb->pipe_for_channel[channel];
- prefetch(pipe);
- if (!pipe)
- return 0;
- transaction = list_first_entry(&pipe->transactions,
- typeof(*transaction),
- node);
- prefetch(transaction);
-
- /*
- * Disconnect this pipe from the HW channel. Later the schedule
- * function will figure out which pipe needs to go
- */
- usb->pipe_for_channel[channel] = NULL;
- pipe->flags &= ~CVMX_USB_PIPE_FLAGS_SCHEDULED;
-
- /*
- * Read the channel config info so we can figure out how much data
- * transferred
- */
- usbc_hcchar.u32 = cvmx_usb_read_csr32(usb,
- CVMX_USBCX_HCCHARX(channel, usb->index));
- usbc_hctsiz.u32 = cvmx_usb_read_csr32(usb,
- CVMX_USBCX_HCTSIZX(channel, usb->index));
-
- /*
- * Calculating the number of bytes successfully transferred is dependent
- * on the transfer direction
- */
- packets_processed = transaction->pktcnt - usbc_hctsiz.s.pktcnt;
- if (usbc_hcchar.s.epdir) {
- /*
- * IN transactions are easy. For every byte received the
- * hardware decrements xfersize. All we need to do is subtract
- * the current value of xfersize from its starting value and we
- * know how many bytes were written to the buffer
- */
- bytes_this_transfer = transaction->xfersize -
- usbc_hctsiz.s.xfersize;
- } else {
- /*
- * OUT transaction don't decrement xfersize. Instead pktcnt is
- * decremented on every successful packet send. The hardware
- * does this when it receives an ACK, or NYET. If it doesn't
- * receive one of these responses pktcnt doesn't change
- */
- bytes_this_transfer = packets_processed * usbc_hcchar.s.mps;
- /*
- * The last packet may not be a full transfer if we didn't have
- * enough data
- */
- if (bytes_this_transfer > transaction->xfersize)
- bytes_this_transfer = transaction->xfersize;
- }
- /* Figure out how many bytes were in the last packet of the transfer */
- if (packets_processed)
- bytes_in_last_packet = bytes_this_transfer -
- (packets_processed - 1) * usbc_hcchar.s.mps;
- else
- bytes_in_last_packet = bytes_this_transfer;
-
- /*
- * As a special case, setup transactions output the setup header, not
- * the user's data. For this reason we don't count setup data as bytes
- * transferred
- */
- if ((transaction->stage == CVMX_USB_STAGE_SETUP) ||
- (transaction->stage == CVMX_USB_STAGE_SETUP_SPLIT_COMPLETE))
- bytes_this_transfer = 0;
-
- /*
- * Add the bytes transferred to the running total. It is important that
- * bytes_this_transfer doesn't count any data that needs to be
- * retransmitted
- */
- transaction->actual_bytes += bytes_this_transfer;
- if (transaction->type == CVMX_USB_TRANSFER_ISOCHRONOUS)
- buffer_space_left = transaction->iso_packets[0].length -
- transaction->actual_bytes;
- else
- buffer_space_left = transaction->buffer_length -
- transaction->actual_bytes;
-
- /*
- * We need to remember the PID toggle state for the next transaction.
- * The hardware already updated it for the next transaction
- */
- pipe->pid_toggle = !(usbc_hctsiz.s.pid == 0);
-
- /*
- * For high speed bulk out, assume the next transaction will need to do
- * a ping before proceeding. If this isn't true the ACK processing below
- * will clear this flag
- */
- if ((pipe->device_speed == CVMX_USB_SPEED_HIGH) &&
- (pipe->transfer_type == CVMX_USB_TRANSFER_BULK) &&
- (pipe->transfer_dir == CVMX_USB_DIRECTION_OUT))
- pipe->flags |= CVMX_USB_PIPE_FLAGS_NEED_PING;
-
- if (unlikely(WARN_ON_ONCE(bytes_this_transfer < 0))) {
- /*
- * In some rare cases the DMA engine seems to get stuck and
- * keeps substracting same byte count over and over again. In
- * such case we just need to fail every transaction.
- */
- cvmx_usb_perform_complete(usb, pipe, transaction,
- CVMX_USB_COMPLETE_ERROR);
- return 0;
- }
-
- if (usbc_hcint.s.stall) {
- /*
- * STALL as a response means this transaction cannot be
- * completed because the device can't process transactions. Tell
- * the user. Any data that was transferred will be counted on
- * the actual bytes transferred
- */
- pipe->pid_toggle = 0;
- cvmx_usb_perform_complete(usb, pipe, transaction,
- CVMX_USB_COMPLETE_STALL);
- } else if (usbc_hcint.s.xacterr) {
- /*
- * XactErr as a response means the device signaled
- * something wrong with the transfer. For example, PID
- * toggle errors cause these.
- */
- cvmx_usb_perform_complete(usb, pipe, transaction,
- CVMX_USB_COMPLETE_XACTERR);
- } else if (usbc_hcint.s.bblerr) {
- /* Babble Error (BblErr) */
- cvmx_usb_perform_complete(usb, pipe, transaction,
- CVMX_USB_COMPLETE_BABBLEERR);
- } else if (usbc_hcint.s.datatglerr) {
- /* Data toggle error */
- cvmx_usb_perform_complete(usb, pipe, transaction,
- CVMX_USB_COMPLETE_DATATGLERR);
- } else if (usbc_hcint.s.nyet) {
- /*
- * NYET as a response is only allowed in three cases: as a
- * response to a ping, as a response to a split transaction, and
- * as a response to a bulk out. The ping case is handled by
- * hardware, so we only have splits and bulk out
- */
- if (!cvmx_usb_pipe_needs_split(usb, pipe)) {
- transaction->retries = 0;
- /*
- * If there is more data to go then we need to try
- * again. Otherwise this transaction is complete
- */
- if ((buffer_space_left == 0) ||
- (bytes_in_last_packet < pipe->max_packet))
- cvmx_usb_perform_complete(usb, pipe,
- transaction,
- CVMX_USB_COMPLETE_SUCCESS);
- } else {
- /*
- * Split transactions retry the split complete 4 times
- * then rewind to the start split and do the entire
- * transactions again
- */
- transaction->retries++;
- if ((transaction->retries & 0x3) == 0) {
- /*
- * Rewind to the beginning of the transaction by
- * anding off the split complete bit
- */
- transaction->stage &= ~1;
- pipe->split_sc_frame = -1;
- }
- }
- } else if (usbc_hcint.s.ack) {
- transaction->retries = 0;
- /*
- * The ACK bit can only be checked after the other error bits.
- * This is because a multi packet transfer may succeed in a
- * number of packets and then get a different response on the
- * last packet. In this case both ACK and the last response bit
- * will be set. If none of the other response bits is set, then
- * the last packet must have been an ACK
- *
- * Since we got an ACK, we know we don't need to do a ping on
- * this pipe
- */
- pipe->flags &= ~CVMX_USB_PIPE_FLAGS_NEED_PING;
-
- switch (transaction->type) {
- case CVMX_USB_TRANSFER_CONTROL:
- switch (transaction->stage) {
- case CVMX_USB_STAGE_NON_CONTROL:
- case CVMX_USB_STAGE_NON_CONTROL_SPLIT_COMPLETE:
- /* This should be impossible */
- cvmx_usb_perform_complete(usb, pipe,
- transaction, CVMX_USB_COMPLETE_ERROR);
- break;
- case CVMX_USB_STAGE_SETUP:
- pipe->pid_toggle = 1;
- if (cvmx_usb_pipe_needs_split(usb, pipe))
- transaction->stage =
- CVMX_USB_STAGE_SETUP_SPLIT_COMPLETE;
- else {
- struct usb_ctrlrequest *header =
- cvmx_phys_to_ptr(transaction->control_header);
- if (header->wLength)
- transaction->stage =
- CVMX_USB_STAGE_DATA;
- else
- transaction->stage =
- CVMX_USB_STAGE_STATUS;
- }
- break;
- case CVMX_USB_STAGE_SETUP_SPLIT_COMPLETE:
- {
- struct usb_ctrlrequest *header =
- cvmx_phys_to_ptr(transaction->control_header);
- if (header->wLength)
- transaction->stage =
- CVMX_USB_STAGE_DATA;
- else
- transaction->stage =
- CVMX_USB_STAGE_STATUS;
- }
- break;
- case CVMX_USB_STAGE_DATA:
- if (cvmx_usb_pipe_needs_split(usb, pipe)) {
- transaction->stage =
- CVMX_USB_STAGE_DATA_SPLIT_COMPLETE;
- /*
- * For setup OUT data that are splits,
- * the hardware doesn't appear to count
- * transferred data. Here we manually
- * update the data transferred
- */
- if (!usbc_hcchar.s.epdir) {
- if (buffer_space_left < pipe->max_packet)
- transaction->actual_bytes +=
- buffer_space_left;
- else
- transaction->actual_bytes +=
- pipe->max_packet;
- }
- } else if ((buffer_space_left == 0) ||
- (bytes_in_last_packet <
- pipe->max_packet)) {
- pipe->pid_toggle = 1;
- transaction->stage =
- CVMX_USB_STAGE_STATUS;
- }
- break;
- case CVMX_USB_STAGE_DATA_SPLIT_COMPLETE:
- if ((buffer_space_left == 0) ||
- (bytes_in_last_packet <
- pipe->max_packet)) {
- pipe->pid_toggle = 1;
- transaction->stage =
- CVMX_USB_STAGE_STATUS;
- } else {
- transaction->stage =
- CVMX_USB_STAGE_DATA;
- }
- break;
- case CVMX_USB_STAGE_STATUS:
- if (cvmx_usb_pipe_needs_split(usb, pipe))
- transaction->stage =
- CVMX_USB_STAGE_STATUS_SPLIT_COMPLETE;
- else
- cvmx_usb_perform_complete(usb, pipe,
- transaction,
- CVMX_USB_COMPLETE_SUCCESS);
- break;
- case CVMX_USB_STAGE_STATUS_SPLIT_COMPLETE:
- cvmx_usb_perform_complete(usb, pipe,
- transaction,
- CVMX_USB_COMPLETE_SUCCESS);
- break;
- }
- break;
- case CVMX_USB_TRANSFER_BULK:
- case CVMX_USB_TRANSFER_INTERRUPT:
- /*
- * The only time a bulk transfer isn't complete when it
- * finishes with an ACK is during a split transaction.
- * For splits we need to continue the transfer if more
- * data is needed
- */
- if (cvmx_usb_pipe_needs_split(usb, pipe)) {
- if (transaction->stage ==
- CVMX_USB_STAGE_NON_CONTROL)
- transaction->stage =
- CVMX_USB_STAGE_NON_CONTROL_SPLIT_COMPLETE;
- else {
- if (buffer_space_left &&
- (bytes_in_last_packet ==
- pipe->max_packet))
- transaction->stage =
- CVMX_USB_STAGE_NON_CONTROL;
- else {
- if (transaction->type ==
- CVMX_USB_TRANSFER_INTERRUPT)
- pipe->next_tx_frame +=
- pipe->interval;
- cvmx_usb_perform_complete(
- usb,
- pipe,
- transaction,
- CVMX_USB_COMPLETE_SUCCESS);
- }
- }
- } else {
- if ((pipe->device_speed ==
- CVMX_USB_SPEED_HIGH) &&
- (pipe->transfer_type ==
- CVMX_USB_TRANSFER_BULK) &&
- (pipe->transfer_dir ==
- CVMX_USB_DIRECTION_OUT) &&
- (usbc_hcint.s.nak))
- pipe->flags |=
- CVMX_USB_PIPE_FLAGS_NEED_PING;
- if (!buffer_space_left ||
- (bytes_in_last_packet <
- pipe->max_packet)) {
- if (transaction->type ==
- CVMX_USB_TRANSFER_INTERRUPT)
- pipe->next_tx_frame +=
- pipe->interval;
- cvmx_usb_perform_complete(usb, pipe,
- transaction,
- CVMX_USB_COMPLETE_SUCCESS);
- }
- }
- break;
- case CVMX_USB_TRANSFER_ISOCHRONOUS:
- if (cvmx_usb_pipe_needs_split(usb, pipe)) {
- /*
- * ISOCHRONOUS OUT splits don't require a
- * complete split stage. Instead they use a
- * sequence of begin OUT splits to transfer the
- * data 188 bytes at a time. Once the transfer
- * is complete, the pipe sleeps until the next
- * schedule interval
- */
- if (pipe->transfer_dir ==
- CVMX_USB_DIRECTION_OUT) {
- /*
- * If no space left or this wasn't a max
- * size packet then this transfer is
- * complete. Otherwise start it again to
- * send the next 188 bytes
- */
- if (!buffer_space_left ||
- (bytes_this_transfer < 188)) {
- pipe->next_tx_frame +=
- pipe->interval;
- cvmx_usb_perform_complete(usb,
- pipe, transaction,
- CVMX_USB_COMPLETE_SUCCESS);
- }
- } else {
- if (transaction->stage ==
- CVMX_USB_STAGE_NON_CONTROL_SPLIT_COMPLETE) {
- /*
- * We are in the incoming data
- * phase. Keep getting data
- * until we run out of space or
- * get a small packet
- */
- if ((buffer_space_left == 0) ||
- (bytes_in_last_packet <
- pipe->max_packet)) {
- pipe->next_tx_frame +=
- pipe->interval;
- cvmx_usb_perform_complete(
- usb,
- pipe,
- transaction,
- CVMX_USB_COMPLETE_SUCCESS);
- }
- } else
- transaction->stage =
- CVMX_USB_STAGE_NON_CONTROL_SPLIT_COMPLETE;
- }
- } else {
- pipe->next_tx_frame += pipe->interval;
- cvmx_usb_perform_complete(usb, pipe,
- transaction,
- CVMX_USB_COMPLETE_SUCCESS);
- }
- break;
- }
- } else if (usbc_hcint.s.nak) {
- /*
- * If this was a split then clear our split in progress marker.
- */
- if (usb->active_split == transaction)
- usb->active_split = NULL;
- /*
- * NAK as a response means the device couldn't accept the
- * transaction, but it should be retried in the future. Rewind
- * to the beginning of the transaction by anding off the split
- * complete bit. Retry in the next interval
- */
- transaction->retries = 0;
- transaction->stage &= ~1;
- pipe->next_tx_frame += pipe->interval;
- if (pipe->next_tx_frame < usb->frame_number)
- pipe->next_tx_frame = usb->frame_number +
- pipe->interval -
- (usb->frame_number - pipe->next_tx_frame) %
- pipe->interval;
- } else {
- struct cvmx_usb_port_status port;
-
- port = cvmx_usb_get_status(usb);
- if (port.port_enabled) {
- /* We'll retry the exact same transaction again */
- transaction->retries++;
- } else {
- /*
- * We get channel halted interrupts with no result bits
- * sets when the cable is unplugged
- */
- cvmx_usb_perform_complete(usb, pipe, transaction,
- CVMX_USB_COMPLETE_ERROR);
- }
- }
- return 0;
-}
-
-static void octeon_usb_port_callback(struct cvmx_usb_state *usb)
-{
- struct octeon_hcd *priv = cvmx_usb_to_octeon(usb);
-
- spin_unlock(&priv->lock);
- usb_hcd_poll_rh_status(octeon_to_hcd(priv));
- spin_lock(&priv->lock);
-}
-
-/**
- * Poll the USB block for status and call all needed callback
- * handlers. This function is meant to be called in the interrupt
- * handler for the USB controller. It can also be called
- * periodically in a loop for non-interrupt based operation.
- *
- * @usb: USB device state populated by cvmx_usb_initialize().
- *
- * Returns: 0 or a negative error code.
- */
-static int cvmx_usb_poll(struct cvmx_usb_state *usb)
-{
- union cvmx_usbcx_hfnum usbc_hfnum;
- union cvmx_usbcx_gintsts usbc_gintsts;
-
- prefetch_range(usb, sizeof(*usb));
-
- /* Update the frame counter */
- usbc_hfnum.u32 = cvmx_usb_read_csr32(usb, CVMX_USBCX_HFNUM(usb->index));
- if ((usb->frame_number&0x3fff) > usbc_hfnum.s.frnum)
- usb->frame_number += 0x4000;
- usb->frame_number &= ~0x3fffull;
- usb->frame_number |= usbc_hfnum.s.frnum;
-
- /* Read the pending interrupts */
- usbc_gintsts.u32 = cvmx_usb_read_csr32(usb,
- CVMX_USBCX_GINTSTS(usb->index));
-
- /* Clear the interrupts now that we know about them */
- cvmx_usb_write_csr32(usb, CVMX_USBCX_GINTSTS(usb->index),
- usbc_gintsts.u32);
-
- if (usbc_gintsts.s.rxflvl) {
- /*
- * RxFIFO Non-Empty (RxFLvl)
- * Indicates that there is at least one packet pending to be
- * read from the RxFIFO.
- *
- * In DMA mode this is handled by hardware
- */
- if (usb->init_flags & CVMX_USB_INITIALIZE_FLAGS_NO_DMA)
- cvmx_usb_poll_rx_fifo(usb);
- }
- if (usbc_gintsts.s.ptxfemp || usbc_gintsts.s.nptxfemp) {
- /* Fill the Tx FIFOs when not in DMA mode */
- if (usb->init_flags & CVMX_USB_INITIALIZE_FLAGS_NO_DMA)
- cvmx_usb_poll_tx_fifo(usb);
- }
- if (usbc_gintsts.s.disconnint || usbc_gintsts.s.prtint) {
- union cvmx_usbcx_hprt usbc_hprt;
- /*
- * Disconnect Detected Interrupt (DisconnInt)
- * Asserted when a device disconnect is detected.
- *
- * Host Port Interrupt (PrtInt)
- * The core sets this bit to indicate a change in port status of
- * one of the O2P USB core ports in Host mode. The application
- * must read the Host Port Control and Status (HPRT) register to
- * determine the exact event that caused this interrupt. The
- * application must clear the appropriate status bit in the Host
- * Port Control and Status register to clear this bit.
- *
- * Call the user's port callback
- */
- octeon_usb_port_callback(usb);
- /* Clear the port change bits */
- usbc_hprt.u32 = cvmx_usb_read_csr32(usb,
- CVMX_USBCX_HPRT(usb->index));
- usbc_hprt.s.prtena = 0;
- cvmx_usb_write_csr32(usb, CVMX_USBCX_HPRT(usb->index),
- usbc_hprt.u32);
- }
- if (usbc_gintsts.s.hchint) {
- /*
- * Host Channels Interrupt (HChInt)
- * The core sets this bit to indicate that an interrupt is
- * pending on one of the channels of the core (in Host mode).
- * The application must read the Host All Channels Interrupt
- * (HAINT) register to determine the exact number of the channel
- * on which the interrupt occurred, and then read the
- * corresponding Host Channel-n Interrupt (HCINTn) register to
- * determine the exact cause of the interrupt. The application
- * must clear the appropriate status bit in the HCINTn register
- * to clear this bit.
- */
- union cvmx_usbcx_haint usbc_haint;
-
- usbc_haint.u32 = cvmx_usb_read_csr32(usb,
- CVMX_USBCX_HAINT(usb->index));
- while (usbc_haint.u32) {
- int channel;
-
- channel = __fls(usbc_haint.u32);
- cvmx_usb_poll_channel(usb, channel);
- usbc_haint.u32 ^= 1<<channel;
- }
- }
-
- cvmx_usb_schedule(usb, usbc_gintsts.s.sof);
-
- return 0;
-}
-
-/* convert between an HCD pointer and the corresponding struct octeon_hcd */
-static inline struct octeon_hcd *hcd_to_octeon(struct usb_hcd *hcd)
-{
- return (struct octeon_hcd *)(hcd->hcd_priv);
-}
-
-static irqreturn_t octeon_usb_irq(struct usb_hcd *hcd)
-{
- struct octeon_hcd *priv = hcd_to_octeon(hcd);
- unsigned long flags;
-
- spin_lock_irqsave(&priv->lock, flags);
- cvmx_usb_poll(&priv->usb);
- spin_unlock_irqrestore(&priv->lock, flags);
- return IRQ_HANDLED;
-}
-
-static int octeon_usb_start(struct usb_hcd *hcd)
-{
- hcd->state = HC_STATE_RUNNING;
- return 0;
-}
-
-static void octeon_usb_stop(struct usb_hcd *hcd)
-{
- hcd->state = HC_STATE_HALT;
-}
-
-static int octeon_usb_get_frame_number(struct usb_hcd *hcd)
-{
- struct octeon_hcd *priv = hcd_to_octeon(hcd);
-
- return cvmx_usb_get_frame_number(&priv->usb);
-}
-
-static int octeon_usb_urb_enqueue(struct usb_hcd *hcd,
- struct urb *urb,
- gfp_t mem_flags)
-{
- struct octeon_hcd *priv = hcd_to_octeon(hcd);
- struct device *dev = hcd->self.controller;
- struct cvmx_usb_transaction *transaction = NULL;
- struct cvmx_usb_pipe *pipe;
- unsigned long flags;
- struct cvmx_usb_iso_packet *iso_packet;
- struct usb_host_endpoint *ep = urb->ep;
- int rc;
-
- urb->status = 0;
- spin_lock_irqsave(&priv->lock, flags);
-
- rc = usb_hcd_link_urb_to_ep(hcd, urb);
- if (rc) {
- spin_unlock_irqrestore(&priv->lock, flags);
- return rc;
- }
-
- if (!ep->hcpriv) {
- enum cvmx_usb_transfer transfer_type;
- enum cvmx_usb_speed speed;
- int split_device = 0;
- int split_port = 0;
-
- switch (usb_pipetype(urb->pipe)) {
- case PIPE_ISOCHRONOUS:
- transfer_type = CVMX_USB_TRANSFER_ISOCHRONOUS;
- break;
- case PIPE_INTERRUPT:
- transfer_type = CVMX_USB_TRANSFER_INTERRUPT;
- break;
- case PIPE_CONTROL:
- transfer_type = CVMX_USB_TRANSFER_CONTROL;
- break;
- default:
- transfer_type = CVMX_USB_TRANSFER_BULK;
- break;
- }
- switch (urb->dev->speed) {
- case USB_SPEED_LOW:
- speed = CVMX_USB_SPEED_LOW;
- break;
- case USB_SPEED_FULL:
- speed = CVMX_USB_SPEED_FULL;
- break;
- default:
- speed = CVMX_USB_SPEED_HIGH;
- break;
- }
- /*
- * For slow devices on high speed ports we need to find the hub
- * that does the speed translation so we know where to send the
- * split transactions.
- */
- if (speed != CVMX_USB_SPEED_HIGH) {
- /*
- * Start at this device and work our way up the usb
- * tree.
- */
- struct usb_device *dev = urb->dev;
-
- while (dev->parent) {
- /*
- * If our parent is high speed then he'll
- * receive the splits.
- */
- if (dev->parent->speed == USB_SPEED_HIGH) {
- split_device = dev->parent->devnum;
- split_port = dev->portnum;
- break;
- }
- /*
- * Move up the tree one level. If we make it all
- * the way up the tree, then the port must not
- * be in high speed mode and we don't need a
- * split.
- */
- dev = dev->parent;
- }
- }
- pipe = cvmx_usb_open_pipe(&priv->usb, usb_pipedevice(urb->pipe),
- usb_pipeendpoint(urb->pipe), speed,
- le16_to_cpu(ep->desc.wMaxPacketSize)
- & 0x7ff,
- transfer_type,
- usb_pipein(urb->pipe) ?
- CVMX_USB_DIRECTION_IN :
- CVMX_USB_DIRECTION_OUT,
- urb->interval,
- (le16_to_cpu(ep->desc.wMaxPacketSize)
- >> 11) & 0x3,
- split_device, split_port);
- if (!pipe) {
- usb_hcd_unlink_urb_from_ep(hcd, urb);
- spin_unlock_irqrestore(&priv->lock, flags);
- dev_dbg(dev, "Failed to create pipe\n");
- return -ENOMEM;
- }
- ep->hcpriv = pipe;
- } else {
- pipe = ep->hcpriv;
- }
-
- switch (usb_pipetype(urb->pipe)) {
- case PIPE_ISOCHRONOUS:
- dev_dbg(dev, "Submit isochronous to %d.%d\n",
- usb_pipedevice(urb->pipe),
- usb_pipeendpoint(urb->pipe));
- /*
- * Allocate a structure to use for our private list of
- * isochronous packets.
- */
- iso_packet = kmalloc(urb->number_of_packets *
- sizeof(struct cvmx_usb_iso_packet),
- GFP_ATOMIC);
- if (iso_packet) {
- int i;
- /* Fill the list with the data from the URB */
- for (i = 0; i < urb->number_of_packets; i++) {
- iso_packet[i].offset =
- urb->iso_frame_desc[i].offset;
- iso_packet[i].length =
- urb->iso_frame_desc[i].length;
- iso_packet[i].status =
- CVMX_USB_COMPLETE_ERROR;
- }
- /*
- * Store a pointer to the list in the URB setup_packet
- * field. We know this currently isn't being used and
- * this saves us a bunch of logic.
- */
- urb->setup_packet = (char *)iso_packet;
- transaction = cvmx_usb_submit_isochronous(&priv->usb,
- pipe, urb);
- /*
- * If submit failed we need to free our private packet
- * list.
- */
- if (!transaction) {
- urb->setup_packet = NULL;
- kfree(iso_packet);
- }
- }
- break;
- case PIPE_INTERRUPT:
- dev_dbg(dev, "Submit interrupt to %d.%d\n",
- usb_pipedevice(urb->pipe),
- usb_pipeendpoint(urb->pipe));
- transaction = cvmx_usb_submit_interrupt(&priv->usb, pipe, urb);
- break;
- case PIPE_CONTROL:
- dev_dbg(dev, "Submit control to %d.%d\n",
- usb_pipedevice(urb->pipe),
- usb_pipeendpoint(urb->pipe));
- transaction = cvmx_usb_submit_control(&priv->usb, pipe, urb);
- break;
- case PIPE_BULK:
- dev_dbg(dev, "Submit bulk to %d.%d\n",
- usb_pipedevice(urb->pipe),
- usb_pipeendpoint(urb->pipe));
- transaction = cvmx_usb_submit_bulk(&priv->usb, pipe, urb);
- break;
- }
- if (!transaction) {
- usb_hcd_unlink_urb_from_ep(hcd, urb);
- spin_unlock_irqrestore(&priv->lock, flags);
- dev_dbg(dev, "Failed to submit\n");
- return -ENOMEM;
- }
- urb->hcpriv = transaction;
- spin_unlock_irqrestore(&priv->lock, flags);
- return 0;
-}
-
-static int octeon_usb_urb_dequeue(struct usb_hcd *hcd,
- struct urb *urb,
- int status)
-{
- struct octeon_hcd *priv = hcd_to_octeon(hcd);
- unsigned long flags;
- int rc;
-
- if (!urb->dev)
- return -EINVAL;
-
- spin_lock_irqsave(&priv->lock, flags);
-
- rc = usb_hcd_check_unlink_urb(hcd, urb, status);
- if (rc)
- goto out;
-
- urb->status = status;
- cvmx_usb_cancel(&priv->usb, urb->ep->hcpriv, urb->hcpriv);
-
-out:
- spin_unlock_irqrestore(&priv->lock, flags);
-
- return rc;
-}
-
-static void octeon_usb_endpoint_disable(struct usb_hcd *hcd,
- struct usb_host_endpoint *ep)
-{
- struct device *dev = hcd->self.controller;
-
- if (ep->hcpriv) {
- struct octeon_hcd *priv = hcd_to_octeon(hcd);
- struct cvmx_usb_pipe *pipe = ep->hcpriv;
- unsigned long flags;
-
- spin_lock_irqsave(&priv->lock, flags);
- cvmx_usb_cancel_all(&priv->usb, pipe);
- if (cvmx_usb_close_pipe(&priv->usb, pipe))
- dev_dbg(dev, "Closing pipe %p failed\n", pipe);
- spin_unlock_irqrestore(&priv->lock, flags);
- ep->hcpriv = NULL;
- }
-}
-
-static int octeon_usb_hub_status_data(struct usb_hcd *hcd, char *buf)
-{
- struct octeon_hcd *priv = hcd_to_octeon(hcd);
- struct cvmx_usb_port_status port_status;
- unsigned long flags;
-
- spin_lock_irqsave(&priv->lock, flags);
- port_status = cvmx_usb_get_status(&priv->usb);
- spin_unlock_irqrestore(&priv->lock, flags);
- buf[0] = 0;
- buf[0] = port_status.connect_change << 1;
-
- return buf[0] != 0;
-}
-
-static int octeon_usb_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
- u16 wIndex, char *buf, u16 wLength)
-{
- struct octeon_hcd *priv = hcd_to_octeon(hcd);
- struct device *dev = hcd->self.controller;
- struct cvmx_usb_port_status usb_port_status;
- struct cvmx_usb_state *usb = &priv->usb;
- int port_status;
- struct usb_hub_descriptor *desc;
- unsigned long flags;
-
- switch (typeReq) {
- case ClearHubFeature:
- dev_dbg(dev, "ClearHubFeature\n");
- switch (wValue) {
- case C_HUB_LOCAL_POWER:
- case C_HUB_OVER_CURRENT:
- /* Nothing required here */
- break;
- default:
- return -EINVAL;
- }
- break;
- case ClearPortFeature:
- dev_dbg(dev, "ClearPortFeature\n");
- if (wIndex != 1) {
- dev_dbg(dev, " INVALID\n");
- return -EINVAL;
- }
-
- switch (wValue) {
- case USB_PORT_FEAT_ENABLE:
- dev_dbg(dev, " ENABLE\n");
- spin_lock_irqsave(&priv->lock, flags);
- cvmx_usb_disable(&priv->usb);
- spin_unlock_irqrestore(&priv->lock, flags);
- break;
- case USB_PORT_FEAT_SUSPEND:
- dev_dbg(dev, " SUSPEND\n");
- /* Not supported on Octeon */
- break;
- case USB_PORT_FEAT_POWER:
- dev_dbg(dev, " POWER\n");
- /* Not supported on Octeon */
- break;
- case USB_PORT_FEAT_INDICATOR:
- dev_dbg(dev, " INDICATOR\n");
- /* Port inidicator not supported */
- break;
- case USB_PORT_FEAT_C_CONNECTION:
- dev_dbg(dev, " C_CONNECTION\n");
- /* Clears drivers internal connect status change flag */
- spin_lock_irqsave(&priv->lock, flags);
- priv->usb.port_status =
- cvmx_usb_get_status(&priv->usb);
- spin_unlock_irqrestore(&priv->lock, flags);
- break;
- case USB_PORT_FEAT_C_RESET:
- dev_dbg(dev, " C_RESET\n");
- /*
- * Clears the driver's internal Port Reset Change flag.
- */
- spin_lock_irqsave(&priv->lock, flags);
- priv->usb.port_status =
- cvmx_usb_get_status(&priv->usb);
- spin_unlock_irqrestore(&priv->lock, flags);
- break;
- case USB_PORT_FEAT_C_ENABLE:
- dev_dbg(dev, " C_ENABLE\n");
- /*
- * Clears the driver's internal Port Enable/Disable
- * Change flag.
- */
- spin_lock_irqsave(&priv->lock, flags);
- priv->usb.port_status =
- cvmx_usb_get_status(&priv->usb);
- spin_unlock_irqrestore(&priv->lock, flags);
- break;
- case USB_PORT_FEAT_C_SUSPEND:
- dev_dbg(dev, " C_SUSPEND\n");
- /*
- * Clears the driver's internal Port Suspend Change
- * flag, which is set when resume signaling on the host
- * port is complete.
- */
- break;
- case USB_PORT_FEAT_C_OVER_CURRENT:
- dev_dbg(dev, " C_OVER_CURRENT\n");
- /* Clears the driver's overcurrent Change flag */
- spin_lock_irqsave(&priv->lock, flags);
- priv->usb.port_status =
- cvmx_usb_get_status(&priv->usb);
- spin_unlock_irqrestore(&priv->lock, flags);
- break;
- default:
- dev_dbg(dev, " UNKNOWN\n");
- return -EINVAL;
- }
- break;
- case GetHubDescriptor:
- dev_dbg(dev, "GetHubDescriptor\n");
- desc = (struct usb_hub_descriptor *)buf;
- desc->bDescLength = 9;
- desc->bDescriptorType = 0x29;
- desc->bNbrPorts = 1;
- desc->wHubCharacteristics = cpu_to_le16(0x08);
- desc->bPwrOn2PwrGood = 1;
- desc->bHubContrCurrent = 0;
- desc->u.hs.DeviceRemovable[0] = 0;
- desc->u.hs.DeviceRemovable[1] = 0xff;
- break;
- case GetHubStatus:
- dev_dbg(dev, "GetHubStatus\n");
- *(__le32 *) buf = 0;
- break;
- case GetPortStatus:
- dev_dbg(dev, "GetPortStatus\n");
- if (wIndex != 1) {
- dev_dbg(dev, " INVALID\n");
- return -EINVAL;
- }
-
- spin_lock_irqsave(&priv->lock, flags);
- usb_port_status = cvmx_usb_get_status(&priv->usb);
- spin_unlock_irqrestore(&priv->lock, flags);
- port_status = 0;
-
- if (usb_port_status.connect_change) {
- port_status |= (1 << USB_PORT_FEAT_C_CONNECTION);
- dev_dbg(dev, " C_CONNECTION\n");
- }
-
- if (usb_port_status.port_enabled) {
- port_status |= (1 << USB_PORT_FEAT_C_ENABLE);
- dev_dbg(dev, " C_ENABLE\n");
- }
-
- if (usb_port_status.connected) {
- port_status |= (1 << USB_PORT_FEAT_CONNECTION);
- dev_dbg(dev, " CONNECTION\n");
- }
-
- if (usb_port_status.port_enabled) {
- port_status |= (1 << USB_PORT_FEAT_ENABLE);
- dev_dbg(dev, " ENABLE\n");
- }
-
- if (usb_port_status.port_over_current) {
- port_status |= (1 << USB_PORT_FEAT_OVER_CURRENT);
- dev_dbg(dev, " OVER_CURRENT\n");
- }
-
- if (usb_port_status.port_powered) {
- port_status |= (1 << USB_PORT_FEAT_POWER);
- dev_dbg(dev, " POWER\n");
- }
-
- if (usb_port_status.port_speed == CVMX_USB_SPEED_HIGH) {
- port_status |= USB_PORT_STAT_HIGH_SPEED;
- dev_dbg(dev, " HIGHSPEED\n");
- } else if (usb_port_status.port_speed == CVMX_USB_SPEED_LOW) {
- port_status |= (1 << USB_PORT_FEAT_LOWSPEED);
- dev_dbg(dev, " LOWSPEED\n");
- }
-
- *((__le32 *) buf) = cpu_to_le32(port_status);
- break;
- case SetHubFeature:
- dev_dbg(dev, "SetHubFeature\n");
- /* No HUB features supported */
- break;
- case SetPortFeature:
- dev_dbg(dev, "SetPortFeature\n");
- if (wIndex != 1) {
- dev_dbg(dev, " INVALID\n");
- return -EINVAL;
- }
-
- switch (wValue) {
- case USB_PORT_FEAT_SUSPEND:
- dev_dbg(dev, " SUSPEND\n");
- return -EINVAL;
- case USB_PORT_FEAT_POWER:
- dev_dbg(dev, " POWER\n");
- /*
- * Program the port power bit to drive VBUS on the USB.
- */
- spin_lock_irqsave(&priv->lock, flags);
- USB_SET_FIELD32(CVMX_USBCX_HPRT(usb->index),
- cvmx_usbcx_hprt, prtpwr, 1);
- spin_unlock_irqrestore(&priv->lock, flags);
- return 0;
- case USB_PORT_FEAT_RESET:
- dev_dbg(dev, " RESET\n");
- spin_lock_irqsave(&priv->lock, flags);
- cvmx_usb_reset_port(&priv->usb);
- spin_unlock_irqrestore(&priv->lock, flags);
- return 0;
- case USB_PORT_FEAT_INDICATOR:
- dev_dbg(dev, " INDICATOR\n");
- /* Not supported */
- break;
- default:
- dev_dbg(dev, " UNKNOWN\n");
- return -EINVAL;
- }
- break;
- default:
- dev_dbg(dev, "Unknown root hub request\n");
- return -EINVAL;
- }
- return 0;
-}
-
-static const struct hc_driver octeon_hc_driver = {
- .description = "Octeon USB",
- .product_desc = "Octeon Host Controller",
- .hcd_priv_size = sizeof(struct octeon_hcd),
- .irq = octeon_usb_irq,
- .flags = HCD_MEMORY | HCD_USB2,
- .start = octeon_usb_start,
- .stop = octeon_usb_stop,
- .urb_enqueue = octeon_usb_urb_enqueue,
- .urb_dequeue = octeon_usb_urb_dequeue,
- .endpoint_disable = octeon_usb_endpoint_disable,
- .get_frame_number = octeon_usb_get_frame_number,
- .hub_status_data = octeon_usb_hub_status_data,
- .hub_control = octeon_usb_hub_control,
- .map_urb_for_dma = octeon_map_urb_for_dma,
- .unmap_urb_for_dma = octeon_unmap_urb_for_dma,
-};
-
-static int octeon_usb_probe(struct platform_device *pdev)
-{
- int status;
- int initialize_flags;
- int usb_num;
- struct resource *res_mem;
- struct device_node *usbn_node;
- int irq = platform_get_irq(pdev, 0);
- struct device *dev = &pdev->dev;
- struct octeon_hcd *priv;
- struct usb_hcd *hcd;
- u32 clock_rate = 48000000;
- bool is_crystal_clock = false;
- const char *clock_type;
- int i;
-
- if (dev->of_node == NULL) {
- dev_err(dev, "Error: empty of_node\n");
- return -ENXIO;
- }
- usbn_node = dev->of_node->parent;
-
- i = of_property_read_u32(usbn_node,
- "refclk-frequency", &clock_rate);
- if (i) {
- dev_err(dev, "No USBN \"refclk-frequency\"\n");
- return -ENXIO;
- }
- switch (clock_rate) {
- case 12000000:
- initialize_flags = CVMX_USB_INITIALIZE_FLAGS_CLOCK_12MHZ;
- break;
- case 24000000:
- initialize_flags = CVMX_USB_INITIALIZE_FLAGS_CLOCK_24MHZ;
- break;
- case 48000000:
- initialize_flags = CVMX_USB_INITIALIZE_FLAGS_CLOCK_48MHZ;
- break;
- default:
- dev_err(dev, "Illebal USBN \"refclk-frequency\" %u\n",
- clock_rate);
- return -ENXIO;
-
- }
-
- i = of_property_read_string(usbn_node,
- "refclk-type", &clock_type);
-
- if (!i && strcmp("crystal", clock_type) == 0)
- is_crystal_clock = true;
-
- if (is_crystal_clock)
- initialize_flags |= CVMX_USB_INITIALIZE_FLAGS_CLOCK_XO_XI;
- else
- initialize_flags |= CVMX_USB_INITIALIZE_FLAGS_CLOCK_XO_GND;
-
- res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (res_mem == NULL) {
- dev_err(dev, "found no memory resource\n");
- return -ENXIO;
- }
- usb_num = (res_mem->start >> 44) & 1;
-
- if (irq < 0) {
- /* Defective device tree, but we know how to fix it. */
- irq_hw_number_t hwirq = usb_num ? (1 << 6) + 17 : 56;
-
- irq = irq_create_mapping(NULL, hwirq);
- }
-
- /*
- * Set the DMA mask to 64bits so we get buffers already translated for
- * DMA.
- */
- dev->coherent_dma_mask = ~0;
- dev->dma_mask = &dev->coherent_dma_mask;
-
- /*
- * Only cn52XX and cn56XX have DWC_OTG USB hardware and the
- * IOB priority registers. Under heavy network load USB
- * hardware can be starved by the IOB causing a crash. Give
- * it a priority boost if it has been waiting more than 400
- * cycles to avoid this situation.
- *
- * Testing indicates that a cnt_val of 8192 is not sufficient,
- * but no failures are seen with 4096. We choose a value of
- * 400 to give a safety factor of 10.
- */
- if (OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)) {
- union cvmx_iob_n2c_l2c_pri_cnt pri_cnt;
-
- pri_cnt.u64 = 0;
- pri_cnt.s.cnt_enb = 1;
- pri_cnt.s.cnt_val = 400;
- cvmx_write_csr(CVMX_IOB_N2C_L2C_PRI_CNT, pri_cnt.u64);
- }
-
- hcd = usb_create_hcd(&octeon_hc_driver, dev, dev_name(dev));
- if (!hcd) {
- dev_dbg(dev, "Failed to allocate memory for HCD\n");
- return -1;
- }
- hcd->uses_new_polling = 1;
- priv = (struct octeon_hcd *)hcd->hcd_priv;
-
- spin_lock_init(&priv->lock);
-
- priv->usb.init_flags = initialize_flags;
-
- /* Initialize the USB state structure */
- priv->usb.index = usb_num;
- INIT_LIST_HEAD(&priv->usb.idle_pipes);
- for (i = 0; i < ARRAY_SIZE(priv->usb.active_pipes); i++)
- INIT_LIST_HEAD(&priv->usb.active_pipes[i]);
-
- /* Due to an errata, CN31XX doesn't support DMA */
- if (OCTEON_IS_MODEL(OCTEON_CN31XX)) {
- priv->usb.init_flags |= CVMX_USB_INITIALIZE_FLAGS_NO_DMA;
- /* Only use one channel with non DMA */
- priv->usb.idle_hardware_channels = 0x1;
- } else if (OCTEON_IS_MODEL(OCTEON_CN5XXX)) {
- /* CN5XXX have an errata with channel 3 */
- priv->usb.idle_hardware_channels = 0xf7;
- } else {
- priv->usb.idle_hardware_channels = 0xff;
- }
-
- status = cvmx_usb_initialize(dev, &priv->usb);
- if (status) {
- dev_dbg(dev, "USB initialization failed with %d\n", status);
- kfree(hcd);
- return -1;
- }
-
- status = usb_add_hcd(hcd, irq, 0);
- if (status) {
- dev_dbg(dev, "USB add HCD failed with %d\n", status);
- kfree(hcd);
- return -1;
- }
- device_wakeup_enable(hcd->self.controller);
-
- dev_info(dev, "Registered HCD for port %d on irq %d\n", usb_num, irq);
-
- return 0;
-}
-
-static int octeon_usb_remove(struct platform_device *pdev)
-{
- int status;
- struct device *dev = &pdev->dev;
- struct usb_hcd *hcd = dev_get_drvdata(dev);
- struct octeon_hcd *priv = hcd_to_octeon(hcd);
- unsigned long flags;
-
- usb_remove_hcd(hcd);
- spin_lock_irqsave(&priv->lock, flags);
- status = cvmx_usb_shutdown(&priv->usb);
- spin_unlock_irqrestore(&priv->lock, flags);
- if (status)
- dev_dbg(dev, "USB shutdown failed with %d\n", status);
-
- kfree(hcd);
-
- return 0;
-}
-
-static const struct of_device_id octeon_usb_match[] = {
- {
- .compatible = "cavium,octeon-5750-usbc",
- },
- {},
-};
-MODULE_DEVICE_TABLE(of, octeon_usb_match);
-
-static struct platform_driver octeon_usb_driver = {
- .driver = {
- .name = "OcteonUSB",
- .of_match_table = octeon_usb_match,
- },
- .probe = octeon_usb_probe,
- .remove = octeon_usb_remove,
-};
-
-static int __init octeon_usb_driver_init(void)
-{
- if (usb_disabled())
- return 0;
-
- return platform_driver_register(&octeon_usb_driver);
-}
-module_init(octeon_usb_driver_init);
-
-static void __exit octeon_usb_driver_exit(void)
-{
- if (usb_disabled())
- return;
-
- platform_driver_unregister(&octeon_usb_driver);
-}
-module_exit(octeon_usb_driver_exit);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Cavium, Inc. <support@cavium.com>");
-MODULE_DESCRIPTION("Cavium Inc. OCTEON USB Host driver.");
diff --git a/drivers/staging/octeon-usb/octeon-hcd.h b/drivers/staging/octeon-usb/octeon-hcd.h
deleted file mode 100644
index 70e7fa5e37d9..000000000000
--- a/drivers/staging/octeon-usb/octeon-hcd.h
+++ /dev/null
@@ -1,1846 +0,0 @@
-/*
- * Octeon HCD hardware register definitions.
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Some parts of the code were originally released under BSD license:
- *
- * Copyright (c) 2003-2010 Cavium Networks (support@cavium.com). All rights
- * reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- *
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials provided
- * with the distribution.
- *
- * * Neither the name of Cavium Networks nor the names of
- * its contributors may be used to endorse or promote products
- * derived from this software without specific prior written
- * permission.
- *
- * This Software, including technical data, may be subject to U.S. export
- * control laws, including the U.S. Export Administration Act and its associated
- * regulations, and may be subject to export or import regulations in other
- * countries.
- *
- * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
- * AND WITH ALL FAULTS AND CAVIUM NETWORKS MAKES NO PROMISES, REPRESENTATIONS OR
- * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
- * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION
- * OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
- * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
- * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
- * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
- * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
- * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
- */
-
-#ifndef __OCTEON_HCD_H__
-#define __OCTEON_HCD_H__
-
-#include <asm/bitfield.h>
-
-#define CVMX_USBCXBASE 0x00016F0010000000ull
-#define CVMX_USBCXREG1(reg, bid) \
- (CVMX_ADD_IO_SEG(CVMX_USBCXBASE | reg) + \
- ((bid) & 1) * 0x100000000000ull)
-#define CVMX_USBCXREG2(reg, bid, off) \
- (CVMX_ADD_IO_SEG(CVMX_USBCXBASE | reg) + \
- (((off) & 7) + ((bid) & 1) * 0x8000000000ull) * 32)
-
-#define CVMX_USBCX_GAHBCFG(bid) CVMX_USBCXREG1(0x008, bid)
-#define CVMX_USBCX_GHWCFG3(bid) CVMX_USBCXREG1(0x04c, bid)
-#define CVMX_USBCX_GINTMSK(bid) CVMX_USBCXREG1(0x018, bid)
-#define CVMX_USBCX_GINTSTS(bid) CVMX_USBCXREG1(0x014, bid)
-#define CVMX_USBCX_GNPTXFSIZ(bid) CVMX_USBCXREG1(0x028, bid)
-#define CVMX_USBCX_GNPTXSTS(bid) CVMX_USBCXREG1(0x02c, bid)
-#define CVMX_USBCX_GOTGCTL(bid) CVMX_USBCXREG1(0x000, bid)
-#define CVMX_USBCX_GRSTCTL(bid) CVMX_USBCXREG1(0x010, bid)
-#define CVMX_USBCX_GRXFSIZ(bid) CVMX_USBCXREG1(0x024, bid)
-#define CVMX_USBCX_GRXSTSPH(bid) CVMX_USBCXREG1(0x020, bid)
-#define CVMX_USBCX_GUSBCFG(bid) CVMX_USBCXREG1(0x00c, bid)
-#define CVMX_USBCX_HAINT(bid) CVMX_USBCXREG1(0x414, bid)
-#define CVMX_USBCX_HAINTMSK(bid) CVMX_USBCXREG1(0x418, bid)
-#define CVMX_USBCX_HCCHARX(off, bid) CVMX_USBCXREG2(0x500, bid, off)
-#define CVMX_USBCX_HCFG(bid) CVMX_USBCXREG1(0x400, bid)
-#define CVMX_USBCX_HCINTMSKX(off, bid) CVMX_USBCXREG2(0x50c, bid, off)
-#define CVMX_USBCX_HCINTX(off, bid) CVMX_USBCXREG2(0x508, bid, off)
-#define CVMX_USBCX_HCSPLTX(off, bid) CVMX_USBCXREG2(0x504, bid, off)
-#define CVMX_USBCX_HCTSIZX(off, bid) CVMX_USBCXREG2(0x510, bid, off)
-#define CVMX_USBCX_HFIR(bid) CVMX_USBCXREG1(0x404, bid)
-#define CVMX_USBCX_HFNUM(bid) CVMX_USBCXREG1(0x408, bid)
-#define CVMX_USBCX_HPRT(bid) CVMX_USBCXREG1(0x440, bid)
-#define CVMX_USBCX_HPTXFSIZ(bid) CVMX_USBCXREG1(0x100, bid)
-#define CVMX_USBCX_HPTXSTS(bid) CVMX_USBCXREG1(0x410, bid)
-
-#define CVMX_USBNXBID1(bid) (((bid) & 1) * 0x10000000ull)
-#define CVMX_USBNXBID2(bid) (((bid) & 1) * 0x100000000000ull)
-
-#define CVMX_USBNXREG1(reg, bid) \
- (CVMX_ADD_IO_SEG(0x0001180068000000ull | reg) + CVMX_USBNXBID1(bid))
-#define CVMX_USBNXREG2(reg, bid) \
- (CVMX_ADD_IO_SEG(0x00016F0000000000ull | reg) + CVMX_USBNXBID2(bid))
-
-#define CVMX_USBNX_CLK_CTL(bid) CVMX_USBNXREG1(0x10, bid)
-#define CVMX_USBNX_DMA0_INB_CHN0(bid) CVMX_USBNXREG2(0x818, bid)
-#define CVMX_USBNX_DMA0_OUTB_CHN0(bid) CVMX_USBNXREG2(0x858, bid)
-#define CVMX_USBNX_USBP_CTL_STATUS(bid) CVMX_USBNXREG1(0x18, bid)
-
-/**
- * cvmx_usbc#_gahbcfg
- *
- * Core AHB Configuration Register (GAHBCFG)
- *
- * This register can be used to configure the core after power-on or a change in
- * mode of operation. This register mainly contains AHB system-related
- * configuration parameters. The AHB is the processor interface to the O2P USB
- * core. In general, software need not know about this interface except to
- * program the values as specified.
- *
- * The application must program this register as part of the O2P USB core
- * initialization. Do not change this register after the initial programming.
- */
-union cvmx_usbcx_gahbcfg {
- uint32_t u32;
- /**
- * struct cvmx_usbcx_gahbcfg_s
- * @ptxfemplvl: Periodic TxFIFO Empty Level (PTxFEmpLvl)
- * Software should set this bit to 0x1.
- * Indicates when the Periodic TxFIFO Empty Interrupt bit in the
- * Core Interrupt register (GINTSTS.PTxFEmp) is triggered. This
- * bit is used only in Slave mode.
- * * 1'b0: GINTSTS.PTxFEmp interrupt indicates that the Periodic
- * TxFIFO is half empty
- * * 1'b1: GINTSTS.PTxFEmp interrupt indicates that the Periodic
- * TxFIFO is completely empty
- * @nptxfemplvl: Non-Periodic TxFIFO Empty Level (NPTxFEmpLvl)
- * Software should set this bit to 0x1.
- * Indicates when the Non-Periodic TxFIFO Empty Interrupt bit in
- * the Core Interrupt register (GINTSTS.NPTxFEmp) is triggered.
- * This bit is used only in Slave mode.
- * * 1'b0: GINTSTS.NPTxFEmp interrupt indicates that the Non-
- * Periodic TxFIFO is half empty
- * * 1'b1: GINTSTS.NPTxFEmp interrupt indicates that the Non-
- * Periodic TxFIFO is completely empty
- * @dmaen: DMA Enable (DMAEn)
- * * 1'b0: Core operates in Slave mode
- * * 1'b1: Core operates in a DMA mode
- * @hbstlen: Burst Length/Type (HBstLen)
- * This field has not effect and should be left as 0x0.
- * @glblintrmsk: Global Interrupt Mask (GlblIntrMsk)
- * Software should set this field to 0x1.
- * The application uses this bit to mask or unmask the interrupt
- * line assertion to itself. Irrespective of this bit's setting,
- * the interrupt status registers are updated by the core.
- * * 1'b0: Mask the interrupt assertion to the application.
- * * 1'b1: Unmask the interrupt assertion to the application.
- */
- struct cvmx_usbcx_gahbcfg_s {
- __BITFIELD_FIELD(uint32_t reserved_9_31 : 23,
- __BITFIELD_FIELD(uint32_t ptxfemplvl : 1,
- __BITFIELD_FIELD(uint32_t nptxfemplvl : 1,
- __BITFIELD_FIELD(uint32_t reserved_6_6 : 1,
- __BITFIELD_FIELD(uint32_t dmaen : 1,
- __BITFIELD_FIELD(uint32_t hbstlen : 4,
- __BITFIELD_FIELD(uint32_t glblintrmsk : 1,
- ;)))))))
- } s;
-};
-
-/**
- * cvmx_usbc#_ghwcfg3
- *
- * User HW Config3 Register (GHWCFG3)
- *
- * This register contains the configuration options of the O2P USB core.
- */
-union cvmx_usbcx_ghwcfg3 {
- uint32_t u32;
- /**
- * struct cvmx_usbcx_ghwcfg3_s
- * @dfifodepth: DFIFO Depth (DfifoDepth)
- * This value is in terms of 32-bit words.
- * * Minimum value is 32
- * * Maximum value is 32768
- * @ahbphysync: AHB and PHY Synchronous (AhbPhySync)
- * Indicates whether AHB and PHY clocks are synchronous to
- * each other.
- * * 1'b0: No
- * * 1'b1: Yes
- * This bit is tied to 1.
- * @rsttype: Reset Style for Clocked always Blocks in RTL (RstType)
- * * 1'b0: Asynchronous reset is used in the core
- * * 1'b1: Synchronous reset is used in the core
- * @optfeature: Optional Features Removed (OptFeature)
- * Indicates whether the User ID register, GPIO interface ports,
- * and SOF toggle and counter ports were removed for gate count
- * optimization.
- * @vendor_control_interface_support: Vendor Control Interface Support
- * * 1'b0: Vendor Control Interface is not available on the core.
- * * 1'b1: Vendor Control Interface is available.
- * @i2c_selection: I2C Selection
- * * 1'b0: I2C Interface is not available on the core.
- * * 1'b1: I2C Interface is available on the core.
- * @otgen: OTG Function Enabled (OtgEn)
- * The application uses this bit to indicate the O2P USB core's
- * OTG capabilities.
- * * 1'b0: Not OTG capable
- * * 1'b1: OTG Capable
- * @pktsizewidth: Width of Packet Size Counters (PktSizeWidth)
- * * 3'b000: 4 bits
- * * 3'b001: 5 bits
- * * 3'b010: 6 bits
- * * 3'b011: 7 bits
- * * 3'b100: 8 bits
- * * 3'b101: 9 bits
- * * 3'b110: 10 bits
- * * Others: Reserved
- * @xfersizewidth: Width of Transfer Size Counters (XferSizeWidth)
- * * 4'b0000: 11 bits
- * * 4'b0001: 12 bits
- * - ...
- * * 4'b1000: 19 bits
- * * Others: Reserved
- */
- struct cvmx_usbcx_ghwcfg3_s {
- __BITFIELD_FIELD(uint32_t dfifodepth : 16,
- __BITFIELD_FIELD(uint32_t reserved_13_15 : 3,
- __BITFIELD_FIELD(uint32_t ahbphysync : 1,
- __BITFIELD_FIELD(uint32_t rsttype : 1,
- __BITFIELD_FIELD(uint32_t optfeature : 1,
- __BITFIELD_FIELD(uint32_t vendor_control_interface_support : 1,
- __BITFIELD_FIELD(uint32_t i2c_selection : 1,
- __BITFIELD_FIELD(uint32_t otgen : 1,
- __BITFIELD_FIELD(uint32_t pktsizewidth : 3,
- __BITFIELD_FIELD(uint32_t xfersizewidth : 4,
- ;))))))))))
- } s;
-};
-
-/**
- * cvmx_usbc#_gintmsk
- *
- * Core Interrupt Mask Register (GINTMSK)
- *
- * This register works with the Core Interrupt register to interrupt the
- * application. When an interrupt bit is masked, the interrupt associated with
- * that bit will not be generated. However, the Core Interrupt (GINTSTS)
- * register bit corresponding to that interrupt will still be set.
- * Mask interrupt: 1'b0, Unmask interrupt: 1'b1
- */
-union cvmx_usbcx_gintmsk {
- uint32_t u32;
- /**
- * struct cvmx_usbcx_gintmsk_s
- * @wkupintmsk: Resume/Remote Wakeup Detected Interrupt Mask
- * (WkUpIntMsk)
- * @sessreqintmsk: Session Request/New Session Detected Interrupt Mask
- * (SessReqIntMsk)
- * @disconnintmsk: Disconnect Detected Interrupt Mask (DisconnIntMsk)
- * @conidstschngmsk: Connector ID Status Change Mask (ConIDStsChngMsk)
- * @ptxfempmsk: Periodic TxFIFO Empty Mask (PTxFEmpMsk)
- * @hchintmsk: Host Channels Interrupt Mask (HChIntMsk)
- * @prtintmsk: Host Port Interrupt Mask (PrtIntMsk)
- * @fetsuspmsk: Data Fetch Suspended Mask (FetSuspMsk)
- * @incomplpmsk: Incomplete Periodic Transfer Mask (incomplPMsk)
- * Incomplete Isochronous OUT Transfer Mask
- * (incompISOOUTMsk)
- * @incompisoinmsk: Incomplete Isochronous IN Transfer Mask
- * (incompISOINMsk)
- * @oepintmsk: OUT Endpoints Interrupt Mask (OEPIntMsk)
- * @inepintmsk: IN Endpoints Interrupt Mask (INEPIntMsk)
- * @epmismsk: Endpoint Mismatch Interrupt Mask (EPMisMsk)
- * @eopfmsk: End of Periodic Frame Interrupt Mask (EOPFMsk)
- * @isooutdropmsk: Isochronous OUT Packet Dropped Interrupt Mask
- * (ISOOutDropMsk)
- * @enumdonemsk: Enumeration Done Mask (EnumDoneMsk)
- * @usbrstmsk: USB Reset Mask (USBRstMsk)
- * @usbsuspmsk: USB Suspend Mask (USBSuspMsk)
- * @erlysuspmsk: Early Suspend Mask (ErlySuspMsk)
- * @i2cint: I2C Interrupt Mask (I2CINT)
- * @ulpickintmsk: ULPI Carkit Interrupt Mask (ULPICKINTMsk)
- * I2C Carkit Interrupt Mask (I2CCKINTMsk)
- * @goutnakeffmsk: Global OUT NAK Effective Mask (GOUTNakEffMsk)
- * @ginnakeffmsk: Global Non-Periodic IN NAK Effective Mask
- * (GINNakEffMsk)
- * @nptxfempmsk: Non-Periodic TxFIFO Empty Mask (NPTxFEmpMsk)
- * @rxflvlmsk: Receive FIFO Non-Empty Mask (RxFLvlMsk)
- * @sofmsk: Start of (micro)Frame Mask (SofMsk)
- * @otgintmsk: OTG Interrupt Mask (OTGIntMsk)
- * @modemismsk: Mode Mismatch Interrupt Mask (ModeMisMsk)
- */
- struct cvmx_usbcx_gintmsk_s {
- __BITFIELD_FIELD(uint32_t wkupintmsk : 1,
- __BITFIELD_FIELD(uint32_t sessreqintmsk : 1,
- __BITFIELD_FIELD(uint32_t disconnintmsk : 1,
- __BITFIELD_FIELD(uint32_t conidstschngmsk : 1,
- __BITFIELD_FIELD(uint32_t reserved_27_27 : 1,
- __BITFIELD_FIELD(uint32_t ptxfempmsk : 1,
- __BITFIELD_FIELD(uint32_t hchintmsk : 1,
- __BITFIELD_FIELD(uint32_t prtintmsk : 1,
- __BITFIELD_FIELD(uint32_t reserved_23_23 : 1,
- __BITFIELD_FIELD(uint32_t fetsuspmsk : 1,
- __BITFIELD_FIELD(uint32_t incomplpmsk : 1,
- __BITFIELD_FIELD(uint32_t incompisoinmsk : 1,
- __BITFIELD_FIELD(uint32_t oepintmsk : 1,
- __BITFIELD_FIELD(uint32_t inepintmsk : 1,
- __BITFIELD_FIELD(uint32_t epmismsk : 1,
- __BITFIELD_FIELD(uint32_t reserved_16_16 : 1,
- __BITFIELD_FIELD(uint32_t eopfmsk : 1,
- __BITFIELD_FIELD(uint32_t isooutdropmsk : 1,
- __BITFIELD_FIELD(uint32_t enumdonemsk : 1,
- __BITFIELD_FIELD(uint32_t usbrstmsk : 1,
- __BITFIELD_FIELD(uint32_t usbsuspmsk : 1,
- __BITFIELD_FIELD(uint32_t erlysuspmsk : 1,
- __BITFIELD_FIELD(uint32_t i2cint : 1,
- __BITFIELD_FIELD(uint32_t ulpickintmsk : 1,
- __BITFIELD_FIELD(uint32_t goutnakeffmsk : 1,
- __BITFIELD_FIELD(uint32_t ginnakeffmsk : 1,
- __BITFIELD_FIELD(uint32_t nptxfempmsk : 1,
- __BITFIELD_FIELD(uint32_t rxflvlmsk : 1,
- __BITFIELD_FIELD(uint32_t sofmsk : 1,
- __BITFIELD_FIELD(uint32_t otgintmsk : 1,
- __BITFIELD_FIELD(uint32_t modemismsk : 1,
- __BITFIELD_FIELD(uint32_t reserved_0_0 : 1,
- ;))))))))))))))))))))))))))))))))
- } s;
-};
-
-/**
- * cvmx_usbc#_gintsts
- *
- * Core Interrupt Register (GINTSTS)
- *
- * This register interrupts the application for system-level events in the
- * current mode of operation (Device mode or Host mode). It is shown in
- * Interrupt. Some of the bits in this register are valid only in Host mode,
- * while others are valid in Device mode only. This register also indicates the
- * current mode of operation. In order to clear the interrupt status bits of
- * type R_SS_WC, the application must write 1'b1 into the bit. The FIFO status
- * interrupts are read only; once software reads from or writes to the FIFO
- * while servicing these interrupts, FIFO interrupt conditions are cleared
- * automatically.
- */
-union cvmx_usbcx_gintsts {
- uint32_t u32;
- /**
- * struct cvmx_usbcx_gintsts_s
- * @wkupint: Resume/Remote Wakeup Detected Interrupt (WkUpInt)
- * In Device mode, this interrupt is asserted when a resume is
- * detected on the USB. In Host mode, this interrupt is asserted
- * when a remote wakeup is detected on the USB.
- * For more information on how to use this interrupt, see "Partial
- * Power-Down and Clock Gating Programming Model" on
- * page 353.
- * @sessreqint: Session Request/New Session Detected Interrupt
- * (SessReqInt)
- * In Host mode, this interrupt is asserted when a session request
- * is detected from the device. In Device mode, this interrupt is
- * asserted when the utmiotg_bvalid signal goes high.
- * For more information on how to use this interrupt, see "Partial
- * Power-Down and Clock Gating Programming Model" on
- * page 353.
- * @disconnint: Disconnect Detected Interrupt (DisconnInt)
- * Asserted when a device disconnect is detected.
- * @conidstschng: Connector ID Status Change (ConIDStsChng)
- * The core sets this bit when there is a change in connector ID
- * status.
- * @ptxfemp: Periodic TxFIFO Empty (PTxFEmp)
- * Asserted when the Periodic Transmit FIFO is either half or
- * completely empty and there is space for at least one entry to be
- * written in the Periodic Request Queue. The half or completely
- * empty status is determined by the Periodic TxFIFO Empty Level
- * bit in the Core AHB Configuration register
- * (GAHBCFG.PTxFEmpLvl).
- * @hchint: Host Channels Interrupt (HChInt)
- * The core sets this bit to indicate that an interrupt is pending
- * on one of the channels of the core (in Host mode). The
- * application must read the Host All Channels Interrupt (HAINT)
- * register to determine the exact number of the channel on which
- * the interrupt occurred, and then read the corresponding Host
- * Channel-n Interrupt (HCINTn) register to determine the exact
- * cause of the interrupt. The application must clear the
- * appropriate status bit in the HCINTn register to clear this bit.
- * @prtint: Host Port Interrupt (PrtInt)
- * The core sets this bit to indicate a change in port status of
- * one of the O2P USB core ports in Host mode. The application must
- * read the Host Port Control and Status (HPRT) register to
- * determine the exact event that caused this interrupt. The
- * application must clear the appropriate status bit in the Host
- * Port Control and Status register to clear this bit.
- * @fetsusp: Data Fetch Suspended (FetSusp)
- * This interrupt is valid only in DMA mode. This interrupt
- * indicates that the core has stopped fetching data for IN
- * endpoints due to the unavailability of TxFIFO space or Request
- * Queue space. This interrupt is used by the application for an
- * endpoint mismatch algorithm.
- * @incomplp: Incomplete Periodic Transfer (incomplP)
- * In Host mode, the core sets this interrupt bit when there are
- * incomplete periodic transactions still pending which are
- * scheduled for the current microframe.
- * Incomplete Isochronous OUT Transfer (incompISOOUT)
- * The Device mode, the core sets this interrupt to indicate that
- * there is at least one isochronous OUT endpoint on which the
- * transfer is not completed in the current microframe. This
- * interrupt is asserted along with the End of Periodic Frame
- * Interrupt (EOPF) bit in this register.
- * @incompisoin: Incomplete Isochronous IN Transfer (incompISOIN)
- * The core sets this interrupt to indicate that there is at least
- * one isochronous IN endpoint on which the transfer is not
- * completed in the current microframe. This interrupt is asserted
- * along with the End of Periodic Frame Interrupt (EOPF) bit in
- * this register.
- * @oepint: OUT Endpoints Interrupt (OEPInt)
- * The core sets this bit to indicate that an interrupt is pending
- * on one of the OUT endpoints of the core (in Device mode). The
- * application must read the Device All Endpoints Interrupt
- * (DAINT) register to determine the exact number of the OUT
- * endpoint on which the interrupt occurred, and then read the
- * corresponding Device OUT Endpoint-n Interrupt (DOEPINTn)
- * register to determine the exact cause of the interrupt. The
- * application must clear the appropriate status bit in the
- * corresponding DOEPINTn register to clear this bit.
- * @iepint: IN Endpoints Interrupt (IEPInt)
- * The core sets this bit to indicate that an interrupt is pending
- * on one of the IN endpoints of the core (in Device mode). The
- * application must read the Device All Endpoints Interrupt
- * (DAINT) register to determine the exact number of the IN
- * endpoint on which the interrupt occurred, and then read the
- * corresponding Device IN Endpoint-n Interrupt (DIEPINTn)
- * register to determine the exact cause of the interrupt. The
- * application must clear the appropriate status bit in the
- * corresponding DIEPINTn register to clear this bit.
- * @epmis: Endpoint Mismatch Interrupt (EPMis)
- * Indicates that an IN token has been received for a non-periodic
- * endpoint, but the data for another endpoint is present in the
- * top of the Non-Periodic Transmit FIFO and the IN endpoint
- * mismatch count programmed by the application has expired.
- * @eopf: End of Periodic Frame Interrupt (EOPF)
- * Indicates that the period specified in the Periodic Frame
- * Interval field of the Device Configuration register
- * (DCFG.PerFrInt) has been reached in the current microframe.
- * @isooutdrop: Isochronous OUT Packet Dropped Interrupt (ISOOutDrop)
- * The core sets this bit when it fails to write an isochronous OUT
- * packet into the RxFIFO because the RxFIFO doesn't have
- * enough space to accommodate a maximum packet size packet
- * for the isochronous OUT endpoint.
- * @enumdone: Enumeration Done (EnumDone)
- * The core sets this bit to indicate that speed enumeration is
- * complete. The application must read the Device Status (DSTS)
- * register to obtain the enumerated speed.
- * @usbrst: USB Reset (USBRst)
- * The core sets this bit to indicate that a reset is detected on
- * the USB.
- * @usbsusp: USB Suspend (USBSusp)
- * The core sets this bit to indicate that a suspend was detected
- * on the USB. The core enters the Suspended state when there
- * is no activity on the phy_line_state_i signal for an extended
- * period of time.
- * @erlysusp: Early Suspend (ErlySusp)
- * The core sets this bit to indicate that an Idle state has been
- * detected on the USB for 3 ms.
- * @i2cint: I2C Interrupt (I2CINT)
- * This bit is always 0x0.
- * @ulpickint: ULPI Carkit Interrupt (ULPICKINT)
- * This bit is always 0x0.
- * @goutnakeff: Global OUT NAK Effective (GOUTNakEff)
- * Indicates that the Set Global OUT NAK bit in the Device Control
- * register (DCTL.SGOUTNak), set by the application, has taken
- * effect in the core. This bit can be cleared by writing the Clear
- * Global OUT NAK bit in the Device Control register
- * (DCTL.CGOUTNak).
- * @ginnakeff: Global IN Non-Periodic NAK Effective (GINNakEff)
- * Indicates that the Set Global Non-Periodic IN NAK bit in the
- * Device Control register (DCTL.SGNPInNak), set by the
- * application, has taken effect in the core. That is, the core has
- * sampled the Global IN NAK bit set by the application. This bit
- * can be cleared by clearing the Clear Global Non-Periodic IN
- * NAK bit in the Device Control register (DCTL.CGNPInNak).
- * This interrupt does not necessarily mean that a NAK handshake
- * is sent out on the USB. The STALL bit takes precedence over
- * the NAK bit.
- * @nptxfemp: Non-Periodic TxFIFO Empty (NPTxFEmp)
- * This interrupt is asserted when the Non-Periodic TxFIFO is
- * either half or completely empty, and there is space for at least
- * one entry to be written to the Non-Periodic Transmit Request
- * Queue. The half or completely empty status is determined by
- * the Non-Periodic TxFIFO Empty Level bit in the Core AHB
- * Configuration register (GAHBCFG.NPTxFEmpLvl).
- * @rxflvl: RxFIFO Non-Empty (RxFLvl)
- * Indicates that there is at least one packet pending to be read
- * from the RxFIFO.
- * @sof: Start of (micro)Frame (Sof)
- * In Host mode, the core sets this bit to indicate that an SOF
- * (FS), micro-SOF (HS), or Keep-Alive (LS) is transmitted on the
- * USB. The application must write a 1 to this bit to clear the
- * interrupt.
- * In Device mode, in the core sets this bit to indicate that an
- * SOF token has been received on the USB. The application can read
- * the Device Status register to get the current (micro)frame
- * number. This interrupt is seen only when the core is operating
- * at either HS or FS.
- * @otgint: OTG Interrupt (OTGInt)
- * The core sets this bit to indicate an OTG protocol event. The
- * application must read the OTG Interrupt Status (GOTGINT)
- * register to determine the exact event that caused this
- * interrupt. The application must clear the appropriate status bit
- * in the GOTGINT register to clear this bit.
- * @modemis: Mode Mismatch Interrupt (ModeMis)
- * The core sets this bit when the application is trying to access:
- * * A Host mode register, when the core is operating in Device
- * mode
- * * A Device mode register, when the core is operating in Host
- * mode
- * The register access is completed on the AHB with an OKAY
- * response, but is ignored by the core internally and doesn't
- * affect the operation of the core.
- * @curmod: Current Mode of Operation (CurMod)
- * Indicates the current mode of operation.
- * * 1'b0: Device mode
- * * 1'b1: Host mode
- */
- struct cvmx_usbcx_gintsts_s {
- __BITFIELD_FIELD(uint32_t wkupint : 1,
- __BITFIELD_FIELD(uint32_t sessreqint : 1,
- __BITFIELD_FIELD(uint32_t disconnint : 1,
- __BITFIELD_FIELD(uint32_t conidstschng : 1,
- __BITFIELD_FIELD(uint32_t reserved_27_27 : 1,
- __BITFIELD_FIELD(uint32_t ptxfemp : 1,
- __BITFIELD_FIELD(uint32_t hchint : 1,
- __BITFIELD_FIELD(uint32_t prtint : 1,
- __BITFIELD_FIELD(uint32_t reserved_23_23 : 1,
- __BITFIELD_FIELD(uint32_t fetsusp : 1,
- __BITFIELD_FIELD(uint32_t incomplp : 1,
- __BITFIELD_FIELD(uint32_t incompisoin : 1,
- __BITFIELD_FIELD(uint32_t oepint : 1,
- __BITFIELD_FIELD(uint32_t iepint : 1,
- __BITFIELD_FIELD(uint32_t epmis : 1,
- __BITFIELD_FIELD(uint32_t reserved_16_16 : 1,
- __BITFIELD_FIELD(uint32_t eopf : 1,
- __BITFIELD_FIELD(uint32_t isooutdrop : 1,
- __BITFIELD_FIELD(uint32_t enumdone : 1,
- __BITFIELD_FIELD(uint32_t usbrst : 1,
- __BITFIELD_FIELD(uint32_t usbsusp : 1,
- __BITFIELD_FIELD(uint32_t erlysusp : 1,
- __BITFIELD_FIELD(uint32_t i2cint : 1,
- __BITFIELD_FIELD(uint32_t ulpickint : 1,
- __BITFIELD_FIELD(uint32_t goutnakeff : 1,
- __BITFIELD_FIELD(uint32_t ginnakeff : 1,
- __BITFIELD_FIELD(uint32_t nptxfemp : 1,
- __BITFIELD_FIELD(uint32_t rxflvl : 1,
- __BITFIELD_FIELD(uint32_t sof : 1,
- __BITFIELD_FIELD(uint32_t otgint : 1,
- __BITFIELD_FIELD(uint32_t modemis : 1,
- __BITFIELD_FIELD(uint32_t curmod : 1,
- ;))))))))))))))))))))))))))))))))
- } s;
-};
-
-/**
- * cvmx_usbc#_gnptxfsiz
- *
- * Non-Periodic Transmit FIFO Size Register (GNPTXFSIZ)
- *
- * The application can program the RAM size and the memory start address for the
- * Non-Periodic TxFIFO.
- */
-union cvmx_usbcx_gnptxfsiz {
- uint32_t u32;
- /**
- * struct cvmx_usbcx_gnptxfsiz_s
- * @nptxfdep: Non-Periodic TxFIFO Depth (NPTxFDep)
- * This value is in terms of 32-bit words.
- * Minimum value is 16
- * Maximum value is 32768
- * @nptxfstaddr: Non-Periodic Transmit RAM Start Address (NPTxFStAddr)
- * This field contains the memory start address for Non-Periodic
- * Transmit FIFO RAM.
- */
- struct cvmx_usbcx_gnptxfsiz_s {
- __BITFIELD_FIELD(uint32_t nptxfdep : 16,
- __BITFIELD_FIELD(uint32_t nptxfstaddr : 16,
- ;))
- } s;
-};
-
-/**
- * cvmx_usbc#_gnptxsts
- *
- * Non-Periodic Transmit FIFO/Queue Status Register (GNPTXSTS)
- *
- * This read-only register contains the free space information for the
- * Non-Periodic TxFIFO and the Non-Periodic Transmit Request Queue.
- */
-union cvmx_usbcx_gnptxsts {
- uint32_t u32;
- /**
- * struct cvmx_usbcx_gnptxsts_s
- * @nptxqtop: Top of the Non-Periodic Transmit Request Queue (NPTxQTop)
- * Entry in the Non-Periodic Tx Request Queue that is currently
- * being processed by the MAC.
- * * Bits [30:27]: Channel/endpoint number
- * * Bits [26:25]:
- * - 2'b00: IN/OUT token
- * - 2'b01: Zero-length transmit packet (device IN/host OUT)
- * - 2'b10: PING/CSPLIT token
- * - 2'b11: Channel halt command
- * * Bit [24]: Terminate (last entry for selected channel/endpoint)
- * @nptxqspcavail: Non-Periodic Transmit Request Queue Space Available
- * (NPTxQSpcAvail)
- * Indicates the amount of free space available in the Non-
- * Periodic Transmit Request Queue. This queue holds both IN
- * and OUT requests in Host mode. Device mode has only IN
- * requests.
- * * 8'h0: Non-Periodic Transmit Request Queue is full
- * * 8'h1: 1 location available
- * * 8'h2: 2 locations available
- * * n: n locations available (0..8)
- * * Others: Reserved
- * @nptxfspcavail: Non-Periodic TxFIFO Space Avail (NPTxFSpcAvail)
- * Indicates the amount of free space available in the Non-
- * Periodic TxFIFO.
- * Values are in terms of 32-bit words.
- * * 16'h0: Non-Periodic TxFIFO is full
- * * 16'h1: 1 word available
- * * 16'h2: 2 words available
- * * 16'hn: n words available (where 0..32768)
- * * 16'h8000: 32768 words available
- * * Others: Reserved
- */
- struct cvmx_usbcx_gnptxsts_s {
- __BITFIELD_FIELD(uint32_t reserved_31_31 : 1,
- __BITFIELD_FIELD(uint32_t nptxqtop : 7,
- __BITFIELD_FIELD(uint32_t nptxqspcavail : 8,
- __BITFIELD_FIELD(uint32_t nptxfspcavail : 16,
- ;))))
- } s;
-};
-
-/**
- * cvmx_usbc#_grstctl
- *
- * Core Reset Register (GRSTCTL)
- *
- * The application uses this register to reset various hardware features inside
- * the core.
- */
-union cvmx_usbcx_grstctl {
- uint32_t u32;
- /**
- * struct cvmx_usbcx_grstctl_s
- * @ahbidle: AHB Master Idle (AHBIdle)
- * Indicates that the AHB Master State Machine is in the IDLE
- * condition.
- * @dmareq: DMA Request Signal (DMAReq)
- * Indicates that the DMA request is in progress. Used for debug.
- * @txfnum: TxFIFO Number (TxFNum)
- * This is the FIFO number that must be flushed using the TxFIFO
- * Flush bit. This field must not be changed until the core clears
- * the TxFIFO Flush bit.
- * * 5'h0: Non-Periodic TxFIFO flush
- * * 5'h1: Periodic TxFIFO 1 flush in Device mode or Periodic
- * TxFIFO flush in Host mode
- * * 5'h2: Periodic TxFIFO 2 flush in Device mode
- * - ...
- * * 5'hF: Periodic TxFIFO 15 flush in Device mode
- * * 5'h10: Flush all the Periodic and Non-Periodic TxFIFOs in the
- * core
- * @txfflsh: TxFIFO Flush (TxFFlsh)
- * This bit selectively flushes a single or all transmit FIFOs, but
- * cannot do so if the core is in the midst of a transaction.
- * The application must only write this bit after checking that the
- * core is neither writing to the TxFIFO nor reading from the
- * TxFIFO.
- * The application must wait until the core clears this bit before
- * performing any operations. This bit takes 8 clocks (of phy_clk
- * or hclk, whichever is slower) to clear.
- * @rxfflsh: RxFIFO Flush (RxFFlsh)
- * The application can flush the entire RxFIFO using this bit, but
- * must first ensure that the core is not in the middle of a
- * transaction.
- * The application must only write to this bit after checking that
- * the core is neither reading from the RxFIFO nor writing to the
- * RxFIFO.
- * The application must wait until the bit is cleared before
- * performing any other operations. This bit will take 8 clocks
- * (slowest of PHY or AHB clock) to clear.
- * @intknqflsh: IN Token Sequence Learning Queue Flush (INTknQFlsh)
- * The application writes this bit to flush the IN Token Sequence
- * Learning Queue.
- * @frmcntrrst: Host Frame Counter Reset (FrmCntrRst)
- * The application writes this bit to reset the (micro)frame number
- * counter inside the core. When the (micro)frame counter is reset,
- * the subsequent SOF sent out by the core will have a
- * (micro)frame number of 0.
- * @hsftrst: HClk Soft Reset (HSftRst)
- * The application uses this bit to flush the control logic in the
- * AHB Clock domain. Only AHB Clock Domain pipelines are reset.
- * * FIFOs are not flushed with this bit.
- * * All state machines in the AHB clock domain are reset to the
- * Idle state after terminating the transactions on the AHB,
- * following the protocol.
- * * CSR control bits used by the AHB clock domain state
- * machines are cleared.
- * * To clear this interrupt, status mask bits that control the
- * interrupt status and are generated by the AHB clock domain
- * state machine are cleared.
- * * Because interrupt status bits are not cleared, the application
- * can get the status of any core events that occurred after it set
- * this bit.
- * This is a self-clearing bit that the core clears after all
- * necessary logic is reset in the core. This may take several
- * clocks, depending on the core's current state.
- * @csftrst: Core Soft Reset (CSftRst)
- * Resets the hclk and phy_clock domains as follows:
- * * Clears the interrupts and all the CSR registers except the
- * following register bits:
- * - PCGCCTL.RstPdwnModule
- * - PCGCCTL.GateHclk
- * - PCGCCTL.PwrClmp
- * - PCGCCTL.StopPPhyLPwrClkSelclk
- * - GUSBCFG.PhyLPwrClkSel
- * - GUSBCFG.DDRSel
- * - GUSBCFG.PHYSel
- * - GUSBCFG.FSIntf
- * - GUSBCFG.ULPI_UTMI_Sel
- * - GUSBCFG.PHYIf
- * - HCFG.FSLSPclkSel
- * - DCFG.DevSpd
- * * All module state machines (except the AHB Slave Unit) are
- * reset to the IDLE state, and all the transmit FIFOs and the
- * receive FIFO are flushed.
- * * Any transactions on the AHB Master are terminated as soon
- * as possible, after gracefully completing the last data phase of
- * an AHB transfer. Any transactions on the USB are terminated
- * immediately.
- * The application can write to this bit any time it wants to reset
- * the core. This is a self-clearing bit and the core clears this
- * bit after all the necessary logic is reset in the core, which
- * may take several clocks, depending on the current state of the
- * core. Once this bit is cleared software should wait at least 3
- * PHY clocks before doing any access to the PHY domain
- * (synchronization delay). Software should also should check that
- * bit 31 of this register is 1 (AHB Master is IDLE) before
- * starting any operation.
- * Typically software reset is used during software development
- * and also when you dynamically change the PHY selection bits
- * in the USB configuration registers listed above. When you
- * change the PHY, the corresponding clock for the PHY is
- * selected and used in the PHY domain. Once a new clock is
- * selected, the PHY domain has to be reset for proper operation.
- */
- struct cvmx_usbcx_grstctl_s {
- __BITFIELD_FIELD(uint32_t ahbidle : 1,
- __BITFIELD_FIELD(uint32_t dmareq : 1,
- __BITFIELD_FIELD(uint32_t reserved_11_29 : 19,
- __BITFIELD_FIELD(uint32_t txfnum : 5,
- __BITFIELD_FIELD(uint32_t txfflsh : 1,
- __BITFIELD_FIELD(uint32_t rxfflsh : 1,
- __BITFIELD_FIELD(uint32_t intknqflsh : 1,
- __BITFIELD_FIELD(uint32_t frmcntrrst : 1,
- __BITFIELD_FIELD(uint32_t hsftrst : 1,
- __BITFIELD_FIELD(uint32_t csftrst : 1,
- ;))))))))))
- } s;
-};
-
-/**
- * cvmx_usbc#_grxfsiz
- *
- * Receive FIFO Size Register (GRXFSIZ)
- *
- * The application can program the RAM size that must be allocated to the
- * RxFIFO.
- */
-union cvmx_usbcx_grxfsiz {
- uint32_t u32;
- /**
- * struct cvmx_usbcx_grxfsiz_s
- * @rxfdep: RxFIFO Depth (RxFDep)
- * This value is in terms of 32-bit words.
- * * Minimum value is 16
- * * Maximum value is 32768
- */
- struct cvmx_usbcx_grxfsiz_s {
- __BITFIELD_FIELD(uint32_t reserved_16_31 : 16,
- __BITFIELD_FIELD(uint32_t rxfdep : 16,
- ;))
- } s;
-};
-
-/**
- * cvmx_usbc#_grxstsph
- *
- * Receive Status Read and Pop Register, Host Mode (GRXSTSPH)
- *
- * A read to the Receive Status Read and Pop register returns and additionally
- * pops the top data entry out of the RxFIFO.
- * This Description is only valid when the core is in Host Mode. For Device Mode
- * use USBC_GRXSTSPD instead.
- * NOTE: GRXSTSPH and GRXSTSPD are physically the same register and share the
- * same offset in the O2P USB core. The offset difference shown in this
- * document is for software clarity and is actually ignored by the
- * hardware.
- */
-union cvmx_usbcx_grxstsph {
- uint32_t u32;
- /**
- * struct cvmx_usbcx_grxstsph_s
- * @pktsts: Packet Status (PktSts)
- * Indicates the status of the received packet
- * * 4'b0010: IN data packet received
- * * 4'b0011: IN transfer completed (triggers an interrupt)
- * * 4'b0101: Data toggle error (triggers an interrupt)
- * * 4'b0111: Channel halted (triggers an interrupt)
- * * Others: Reserved
- * @dpid: Data PID (DPID)
- * * 2'b00: DATA0
- * * 2'b10: DATA1
- * * 2'b01: DATA2
- * * 2'b11: MDATA
- * @bcnt: Byte Count (BCnt)
- * Indicates the byte count of the received IN data packet
- * @chnum: Channel Number (ChNum)
- * Indicates the channel number to which the current received
- * packet belongs.
- */
- struct cvmx_usbcx_grxstsph_s {
- __BITFIELD_FIELD(uint32_t reserved_21_31 : 11,
- __BITFIELD_FIELD(uint32_t pktsts : 4,
- __BITFIELD_FIELD(uint32_t dpid : 2,
- __BITFIELD_FIELD(uint32_t bcnt : 11,
- __BITFIELD_FIELD(uint32_t chnum : 4,
- ;)))))
- } s;
-};
-
-/**
- * cvmx_usbc#_gusbcfg
- *
- * Core USB Configuration Register (GUSBCFG)
- *
- * This register can be used to configure the core after power-on or a changing
- * to Host mode or Device mode. It contains USB and USB-PHY related
- * configuration parameters. The application must program this register before
- * starting any transactions on either the AHB or the USB. Do not make changes
- * to this register after the initial programming.
- */
-union cvmx_usbcx_gusbcfg {
- uint32_t u32;
- /**
- * struct cvmx_usbcx_gusbcfg_s
- * @otgi2csel: UTMIFS or I2C Interface Select (OtgI2CSel)
- * This bit is always 0x0.
- * @phylpwrclksel: PHY Low-Power Clock Select (PhyLPwrClkSel)
- * Software should set this bit to 0x0.
- * Selects either 480-MHz or 48-MHz (low-power) PHY mode. In
- * FS and LS modes, the PHY can usually operate on a 48-MHz
- * clock to save power.
- * * 1'b0: 480-MHz Internal PLL clock
- * * 1'b1: 48-MHz External Clock
- * In 480 MHz mode, the UTMI interface operates at either 60 or
- * 30-MHz, depending upon whether 8- or 16-bit data width is
- * selected. In 48-MHz mode, the UTMI interface operates at 48
- * MHz in FS mode and at either 48 or 6 MHz in LS mode
- * (depending on the PHY vendor).
- * This bit drives the utmi_fsls_low_power core output signal, and
- * is valid only for UTMI+ PHYs.
- * @usbtrdtim: USB Turnaround Time (USBTrdTim)
- * Sets the turnaround time in PHY clocks.
- * Specifies the response time for a MAC request to the Packet
- * FIFO Controller (PFC) to fetch data from the DFIFO (SPRAM).
- * This must be programmed to 0x5.
- * @hnpcap: HNP-Capable (HNPCap)
- * This bit is always 0x0.
- * @srpcap: SRP-Capable (SRPCap)
- * This bit is always 0x0.
- * @ddrsel: ULPI DDR Select (DDRSel)
- * Software should set this bit to 0x0.
- * @physel: USB 2.0 High-Speed PHY or USB 1.1 Full-Speed Serial
- * Software should set this bit to 0x0.
- * @fsintf: Full-Speed Serial Interface Select (FSIntf)
- * Software should set this bit to 0x0.
- * @ulpi_utmi_sel: ULPI or UTMI+ Select (ULPI_UTMI_Sel)
- * This bit is always 0x0.
- * @phyif: PHY Interface (PHYIf)
- * This bit is always 0x1.
- * @toutcal: HS/FS Timeout Calibration (TOutCal)
- * The number of PHY clocks that the application programs in this
- * field is added to the high-speed/full-speed interpacket timeout
- * duration in the core to account for any additional delays
- * introduced by the PHY. This may be required, since the delay
- * introduced by the PHY in generating the linestate condition may
- * vary from one PHY to another.
- * The USB standard timeout value for high-speed operation is
- * 736 to 816 (inclusive) bit times. The USB standard timeout
- * value for full-speed operation is 16 to 18 (inclusive) bit
- * times. The application must program this field based on the
- * speed of enumeration. The number of bit times added per PHY
- * clock are:
- * High-speed operation:
- * * One 30-MHz PHY clock = 16 bit times
- * * One 60-MHz PHY clock = 8 bit times
- * Full-speed operation:
- * * One 30-MHz PHY clock = 0.4 bit times
- * * One 60-MHz PHY clock = 0.2 bit times
- * * One 48-MHz PHY clock = 0.25 bit times
- */
- struct cvmx_usbcx_gusbcfg_s {
- __BITFIELD_FIELD(uint32_t reserved_17_31 : 15,
- __BITFIELD_FIELD(uint32_t otgi2csel : 1,
- __BITFIELD_FIELD(uint32_t phylpwrclksel : 1,
- __BITFIELD_FIELD(uint32_t reserved_14_14 : 1,
- __BITFIELD_FIELD(uint32_t usbtrdtim : 4,
- __BITFIELD_FIELD(uint32_t hnpcap : 1,
- __BITFIELD_FIELD(uint32_t srpcap : 1,
- __BITFIELD_FIELD(uint32_t ddrsel : 1,
- __BITFIELD_FIELD(uint32_t physel : 1,
- __BITFIELD_FIELD(uint32_t fsintf : 1,
- __BITFIELD_FIELD(uint32_t ulpi_utmi_sel : 1,
- __BITFIELD_FIELD(uint32_t phyif : 1,
- __BITFIELD_FIELD(uint32_t toutcal : 3,
- ;)))))))))))))
- } s;
-};
-
-/**
- * cvmx_usbc#_haint
- *
- * Host All Channels Interrupt Register (HAINT)
- *
- * When a significant event occurs on a channel, the Host All Channels Interrupt
- * register interrupts the application using the Host Channels Interrupt bit of
- * the Core Interrupt register (GINTSTS.HChInt). This is shown in Interrupt.
- * There is one interrupt bit per channel, up to a maximum of 16 bits. Bits in
- * this register are set and cleared when the application sets and clears bits
- * in the corresponding Host Channel-n Interrupt register.
- */
-union cvmx_usbcx_haint {
- uint32_t u32;
- /**
- * struct cvmx_usbcx_haint_s
- * @haint: Channel Interrupts (HAINT)
- * One bit per channel: Bit 0 for Channel 0, bit 15 for Channel 15
- */
- struct cvmx_usbcx_haint_s {
- __BITFIELD_FIELD(uint32_t reserved_16_31 : 16,
- __BITFIELD_FIELD(uint32_t haint : 16,
- ;))
- } s;
-};
-
-/**
- * cvmx_usbc#_haintmsk
- *
- * Host All Channels Interrupt Mask Register (HAINTMSK)
- *
- * The Host All Channel Interrupt Mask register works with the Host All Channel
- * Interrupt register to interrupt the application when an event occurs on a
- * channel. There is one interrupt mask bit per channel, up to a maximum of 16
- * bits.
- * Mask interrupt: 1'b0 Unmask interrupt: 1'b1
- */
-union cvmx_usbcx_haintmsk {
- uint32_t u32;
- /**
- * struct cvmx_usbcx_haintmsk_s
- * @haintmsk: Channel Interrupt Mask (HAINTMsk)
- * One bit per channel: Bit 0 for channel 0, bit 15 for channel 15
- */
- struct cvmx_usbcx_haintmsk_s {
- __BITFIELD_FIELD(uint32_t reserved_16_31 : 16,
- __BITFIELD_FIELD(uint32_t haintmsk : 16,
- ;))
- } s;
-};
-
-/**
- * cvmx_usbc#_hcchar#
- *
- * Host Channel-n Characteristics Register (HCCHAR)
- *
- */
-union cvmx_usbcx_hccharx {
- uint32_t u32;
- /**
- * struct cvmx_usbcx_hccharx_s
- * @chena: Channel Enable (ChEna)
- * This field is set by the application and cleared by the OTG
- * host.
- * * 1'b0: Channel disabled
- * * 1'b1: Channel enabled
- * @chdis: Channel Disable (ChDis)
- * The application sets this bit to stop transmitting/receiving
- * data on a channel, even before the transfer for that channel is
- * complete. The application must wait for the Channel Disabled
- * interrupt before treating the channel as disabled.
- * @oddfrm: Odd Frame (OddFrm)
- * This field is set (reset) by the application to indicate that
- * the OTG host must perform a transfer in an odd (micro)frame.
- * This field is applicable for only periodic (isochronous and
- * interrupt) transactions.
- * * 1'b0: Even (micro)frame
- * * 1'b1: Odd (micro)frame
- * @devaddr: Device Address (DevAddr)
- * This field selects the specific device serving as the data
- * source or sink.
- * @ec: Multi Count (MC) / Error Count (EC)
- * When the Split Enable bit of the Host Channel-n Split Control
- * register (HCSPLTn.SpltEna) is reset (1'b0), this field indicates
- * to the host the number of transactions that should be executed
- * per microframe for this endpoint.
- * * 2'b00: Reserved. This field yields undefined results.
- * * 2'b01: 1 transaction
- * * 2'b10: 2 transactions to be issued for this endpoint per
- * microframe
- * * 2'b11: 3 transactions to be issued for this endpoint per
- * microframe
- * When HCSPLTn.SpltEna is set (1'b1), this field indicates the
- * number of immediate retries to be performed for a periodic split
- * transactions on transaction errors. This field must be set to at
- * least 2'b01.
- * @eptype: Endpoint Type (EPType)
- * Indicates the transfer type selected.
- * * 2'b00: Control
- * * 2'b01: Isochronous
- * * 2'b10: Bulk
- * * 2'b11: Interrupt
- * @lspddev: Low-Speed Device (LSpdDev)
- * This field is set by the application to indicate that this
- * channel is communicating to a low-speed device.
- * @epdir: Endpoint Direction (EPDir)
- * Indicates whether the transaction is IN or OUT.
- * * 1'b0: OUT
- * * 1'b1: IN
- * @epnum: Endpoint Number (EPNum)
- * Indicates the endpoint number on the device serving as the
- * data source or sink.
- * @mps: Maximum Packet Size (MPS)
- * Indicates the maximum packet size of the associated endpoint.
- */
- struct cvmx_usbcx_hccharx_s {
- __BITFIELD_FIELD(uint32_t chena : 1,
- __BITFIELD_FIELD(uint32_t chdis : 1,
- __BITFIELD_FIELD(uint32_t oddfrm : 1,
- __BITFIELD_FIELD(uint32_t devaddr : 7,
- __BITFIELD_FIELD(uint32_t ec : 2,
- __BITFIELD_FIELD(uint32_t eptype : 2,
- __BITFIELD_FIELD(uint32_t lspddev : 1,
- __BITFIELD_FIELD(uint32_t reserved_16_16 : 1,
- __BITFIELD_FIELD(uint32_t epdir : 1,
- __BITFIELD_FIELD(uint32_t epnum : 4,
- __BITFIELD_FIELD(uint32_t mps : 11,
- ;)))))))))))
- } s;
-};
-
-/**
- * cvmx_usbc#_hcfg
- *
- * Host Configuration Register (HCFG)
- *
- * This register configures the core after power-on. Do not make changes to this
- * register after initializing the host.
- */
-union cvmx_usbcx_hcfg {
- uint32_t u32;
- /**
- * struct cvmx_usbcx_hcfg_s
- * @fslssupp: FS- and LS-Only Support (FSLSSupp)
- * The application uses this bit to control the core's enumeration
- * speed. Using this bit, the application can make the core
- * enumerate as a FS host, even if the connected device supports
- * HS traffic. Do not make changes to this field after initial
- * programming.
- * * 1'b0: HS/FS/LS, based on the maximum speed supported by
- * the connected device
- * * 1'b1: FS/LS-only, even if the connected device can support HS
- * @fslspclksel: FS/LS PHY Clock Select (FSLSPclkSel)
- * When the core is in FS Host mode
- * * 2'b00: PHY clock is running at 30/60 MHz
- * * 2'b01: PHY clock is running at 48 MHz
- * * Others: Reserved
- * When the core is in LS Host mode
- * * 2'b00: PHY clock is running at 30/60 MHz. When the
- * UTMI+/ULPI PHY Low Power mode is not selected, use
- * 30/60 MHz.
- * * 2'b01: PHY clock is running at 48 MHz. When the UTMI+
- * PHY Low Power mode is selected, use 48MHz if the PHY
- * supplies a 48 MHz clock during LS mode.
- * * 2'b10: PHY clock is running at 6 MHz. In USB 1.1 FS mode,
- * use 6 MHz when the UTMI+ PHY Low Power mode is
- * selected and the PHY supplies a 6 MHz clock during LS
- * mode. If you select a 6 MHz clock during LS mode, you must
- * do a soft reset.
- * * 2'b11: Reserved
- */
- struct cvmx_usbcx_hcfg_s {
- __BITFIELD_FIELD(uint32_t reserved_3_31 : 29,
- __BITFIELD_FIELD(uint32_t fslssupp : 1,
- __BITFIELD_FIELD(uint32_t fslspclksel : 2,
- ;)))
- } s;
-};
-
-/**
- * cvmx_usbc#_hcint#
- *
- * Host Channel-n Interrupt Register (HCINT)
- *
- * This register indicates the status of a channel with respect to USB- and
- * AHB-related events. The application must read this register when the Host
- * Channels Interrupt bit of the Core Interrupt register (GINTSTS.HChInt) is
- * set. Before the application can read this register, it must first read
- * the Host All Channels Interrupt (HAINT) register to get the exact channel
- * number for the Host Channel-n Interrupt register. The application must clear
- * the appropriate bit in this register to clear the corresponding bits in the
- * HAINT and GINTSTS registers.
- */
-union cvmx_usbcx_hcintx {
- uint32_t u32;
- /**
- * struct cvmx_usbcx_hcintx_s
- * @datatglerr: Data Toggle Error (DataTglErr)
- * @frmovrun: Frame Overrun (FrmOvrun)
- * @bblerr: Babble Error (BblErr)
- * @xacterr: Transaction Error (XactErr)
- * @nyet: NYET Response Received Interrupt (NYET)
- * @ack: ACK Response Received Interrupt (ACK)
- * @nak: NAK Response Received Interrupt (NAK)
- * @stall: STALL Response Received Interrupt (STALL)
- * @ahberr: This bit is always 0x0.
- * @chhltd: Channel Halted (ChHltd)
- * Indicates the transfer completed abnormally either because of
- * any USB transaction error or in response to disable request by
- * the application.
- * @xfercompl: Transfer Completed (XferCompl)
- * Transfer completed normally without any errors.
- */
- struct cvmx_usbcx_hcintx_s {
- __BITFIELD_FIELD(uint32_t reserved_11_31 : 21,
- __BITFIELD_FIELD(uint32_t datatglerr : 1,
- __BITFIELD_FIELD(uint32_t frmovrun : 1,
- __BITFIELD_FIELD(uint32_t bblerr : 1,
- __BITFIELD_FIELD(uint32_t xacterr : 1,
- __BITFIELD_FIELD(uint32_t nyet : 1,
- __BITFIELD_FIELD(uint32_t ack : 1,
- __BITFIELD_FIELD(uint32_t nak : 1,
- __BITFIELD_FIELD(uint32_t stall : 1,
- __BITFIELD_FIELD(uint32_t ahberr : 1,
- __BITFIELD_FIELD(uint32_t chhltd : 1,
- __BITFIELD_FIELD(uint32_t xfercompl : 1,
- ;))))))))))))
- } s;
-};
-
-/**
- * cvmx_usbc#_hcintmsk#
- *
- * Host Channel-n Interrupt Mask Register (HCINTMSKn)
- *
- * This register reflects the mask for each channel status described in the
- * previous section.
- * Mask interrupt: 1'b0 Unmask interrupt: 1'b1
- */
-union cvmx_usbcx_hcintmskx {
- uint32_t u32;
- /**
- * struct cvmx_usbcx_hcintmskx_s
- * @datatglerrmsk: Data Toggle Error Mask (DataTglErrMsk)
- * @frmovrunmsk: Frame Overrun Mask (FrmOvrunMsk)
- * @bblerrmsk: Babble Error Mask (BblErrMsk)
- * @xacterrmsk: Transaction Error Mask (XactErrMsk)
- * @nyetmsk: NYET Response Received Interrupt Mask (NyetMsk)
- * @ackmsk: ACK Response Received Interrupt Mask (AckMsk)
- * @nakmsk: NAK Response Received Interrupt Mask (NakMsk)
- * @stallmsk: STALL Response Received Interrupt Mask (StallMsk)
- * @ahberrmsk: AHB Error Mask (AHBErrMsk)
- * @chhltdmsk: Channel Halted Mask (ChHltdMsk)
- * @xfercomplmsk: Transfer Completed Mask (XferComplMsk)
- */
- struct cvmx_usbcx_hcintmskx_s {
- __BITFIELD_FIELD(uint32_t reserved_11_31 : 21,
- __BITFIELD_FIELD(uint32_t datatglerrmsk : 1,
- __BITFIELD_FIELD(uint32_t frmovrunmsk : 1,
- __BITFIELD_FIELD(uint32_t bblerrmsk : 1,
- __BITFIELD_FIELD(uint32_t xacterrmsk : 1,
- __BITFIELD_FIELD(uint32_t nyetmsk : 1,
- __BITFIELD_FIELD(uint32_t ackmsk : 1,
- __BITFIELD_FIELD(uint32_t nakmsk : 1,
- __BITFIELD_FIELD(uint32_t stallmsk : 1,
- __BITFIELD_FIELD(uint32_t ahberrmsk : 1,
- __BITFIELD_FIELD(uint32_t chhltdmsk : 1,
- __BITFIELD_FIELD(uint32_t xfercomplmsk : 1,
- ;))))))))))))
- } s;
-};
-
-/**
- * cvmx_usbc#_hcsplt#
- *
- * Host Channel-n Split Control Register (HCSPLT)
- *
- */
-union cvmx_usbcx_hcspltx {
- uint32_t u32;
- /**
- * struct cvmx_usbcx_hcspltx_s
- * @spltena: Split Enable (SpltEna)
- * The application sets this field to indicate that this channel is
- * enabled to perform split transactions.
- * @compsplt: Do Complete Split (CompSplt)
- * The application sets this field to request the OTG host to
- * perform a complete split transaction.
- * @xactpos: Transaction Position (XactPos)
- * This field is used to determine whether to send all, first,
- * middle, or last payloads with each OUT transaction.
- * * 2'b11: All. This is the entire data payload is of this
- * transaction (which is less than or equal to 188 bytes).
- * * 2'b10: Begin. This is the first data payload of this
- * transaction (which is larger than 188 bytes).
- * * 2'b00: Mid. This is the middle payload of this transaction
- * (which is larger than 188 bytes).
- * * 2'b01: End. This is the last payload of this transaction
- * (which is larger than 188 bytes).
- * @hubaddr: Hub Address (HubAddr)
- * This field holds the device address of the transaction
- * translator's hub.
- * @prtaddr: Port Address (PrtAddr)
- * This field is the port number of the recipient transaction
- * translator.
- */
- struct cvmx_usbcx_hcspltx_s {
- __BITFIELD_FIELD(uint32_t spltena : 1,
- __BITFIELD_FIELD(uint32_t reserved_17_30 : 14,
- __BITFIELD_FIELD(uint32_t compsplt : 1,
- __BITFIELD_FIELD(uint32_t xactpos : 2,
- __BITFIELD_FIELD(uint32_t hubaddr : 7,
- __BITFIELD_FIELD(uint32_t prtaddr : 7,
- ;))))))
- } s;
-};
-
-/**
- * cvmx_usbc#_hctsiz#
- *
- * Host Channel-n Transfer Size Register (HCTSIZ)
- *
- */
-union cvmx_usbcx_hctsizx {
- uint32_t u32;
- /**
- * struct cvmx_usbcx_hctsizx_s
- * @dopng: Do Ping (DoPng)
- * Setting this field to 1 directs the host to do PING protocol.
- * @pid: PID (Pid)
- * The application programs this field with the type of PID to use
- * for the initial transaction. The host will maintain this field
- * for the rest of the transfer.
- * * 2'b00: DATA0
- * * 2'b01: DATA2
- * * 2'b10: DATA1
- * * 2'b11: MDATA (non-control)/SETUP (control)
- * @pktcnt: Packet Count (PktCnt)
- * This field is programmed by the application with the expected
- * number of packets to be transmitted (OUT) or received (IN).
- * The host decrements this count on every successful
- * transmission or reception of an OUT/IN packet. Once this count
- * reaches zero, the application is interrupted to indicate normal
- * completion.
- * @xfersize: Transfer Size (XferSize)
- * For an OUT, this field is the number of data bytes the host will
- * send during the transfer.
- * For an IN, this field is the buffer size that the application
- * has reserved for the transfer. The application is expected to
- * program this field as an integer multiple of the maximum packet
- * size for IN transactions (periodic and non-periodic).
- */
- struct cvmx_usbcx_hctsizx_s {
- __BITFIELD_FIELD(uint32_t dopng : 1,
- __BITFIELD_FIELD(uint32_t pid : 2,
- __BITFIELD_FIELD(uint32_t pktcnt : 10,
- __BITFIELD_FIELD(uint32_t xfersize : 19,
- ;))))
- } s;
-};
-
-/**
- * cvmx_usbc#_hfir
- *
- * Host Frame Interval Register (HFIR)
- *
- * This register stores the frame interval information for the current speed to
- * which the O2P USB core has enumerated.
- */
-union cvmx_usbcx_hfir {
- uint32_t u32;
- /**
- * struct cvmx_usbcx_hfir_s
- * @frint: Frame Interval (FrInt)
- * The value that the application programs to this field specifies
- * the interval between two consecutive SOFs (FS) or micro-
- * SOFs (HS) or Keep-Alive tokens (HS). This field contains the
- * number of PHY clocks that constitute the required frame
- * interval. The default value set in this field for a FS operation
- * when the PHY clock frequency is 60 MHz. The application can
- * write a value to this register only after the Port Enable bit of
- * the Host Port Control and Status register (HPRT.PrtEnaPort)
- * has been set. If no value is programmed, the core calculates
- * the value based on the PHY clock specified in the FS/LS PHY
- * Clock Select field of the Host Configuration register
- * (HCFG.FSLSPclkSel). Do not change the value of this field
- * after the initial configuration.
- * * 125 us (PHY clock frequency for HS)
- * * 1 ms (PHY clock frequency for FS/LS)
- */
- struct cvmx_usbcx_hfir_s {
- __BITFIELD_FIELD(uint32_t reserved_16_31 : 16,
- __BITFIELD_FIELD(uint32_t frint : 16,
- ;))
- } s;
-};
-
-/**
- * cvmx_usbc#_hfnum
- *
- * Host Frame Number/Frame Time Remaining Register (HFNUM)
- *
- * This register indicates the current frame number.
- * It also indicates the time remaining (in terms of the number of PHY clocks)
- * in the current (micro)frame.
- */
-union cvmx_usbcx_hfnum {
- uint32_t u32;
- /**
- * struct cvmx_usbcx_hfnum_s
- * @frrem: Frame Time Remaining (FrRem)
- * Indicates the amount of time remaining in the current
- * microframe (HS) or frame (FS/LS), in terms of PHY clocks.
- * This field decrements on each PHY clock. When it reaches
- * zero, this field is reloaded with the value in the Frame
- * Interval register and a new SOF is transmitted on the USB.
- * @frnum: Frame Number (FrNum)
- * This field increments when a new SOF is transmitted on the
- * USB, and is reset to 0 when it reaches 16'h3FFF.
- */
- struct cvmx_usbcx_hfnum_s {
- __BITFIELD_FIELD(uint32_t frrem : 16,
- __BITFIELD_FIELD(uint32_t frnum : 16,
- ;))
- } s;
-};
-
-/**
- * cvmx_usbc#_hprt
- *
- * Host Port Control and Status Register (HPRT)
- *
- * This register is available in both Host and Device modes.
- * Currently, the OTG Host supports only one port.
- * A single register holds USB port-related information such as USB reset,
- * enable, suspend, resume, connect status, and test mode for each port. The
- * R_SS_WC bits in this register can trigger an interrupt to the application
- * through the Host Port Interrupt bit of the Core Interrupt register
- * (GINTSTS.PrtInt). On a Port Interrupt, the application must read this
- * register and clear the bit that caused the interrupt. For the R_SS_WC bits,
- * the application must write a 1 to the bit to clear the interrupt.
- */
-union cvmx_usbcx_hprt {
- uint32_t u32;
- /**
- * struct cvmx_usbcx_hprt_s
- * @prtspd: Port Speed (PrtSpd)
- * Indicates the speed of the device attached to this port.
- * * 2'b00: High speed
- * * 2'b01: Full speed
- * * 2'b10: Low speed
- * * 2'b11: Reserved
- * @prttstctl: Port Test Control (PrtTstCtl)
- * The application writes a nonzero value to this field to put
- * the port into a Test mode, and the corresponding pattern is
- * signaled on the port.
- * * 4'b0000: Test mode disabled
- * * 4'b0001: Test_J mode
- * * 4'b0010: Test_K mode
- * * 4'b0011: Test_SE0_NAK mode
- * * 4'b0100: Test_Packet mode
- * * 4'b0101: Test_Force_Enable
- * * Others: Reserved
- * PrtSpd must be zero (i.e. the interface must be in high-speed
- * mode) to use the PrtTstCtl test modes.
- * @prtpwr: Port Power (PrtPwr)
- * The application uses this field to control power to this port,
- * and the core clears this bit on an overcurrent condition.
- * * 1'b0: Power off
- * * 1'b1: Power on
- * @prtlnsts: Port Line Status (PrtLnSts)
- * Indicates the current logic level USB data lines
- * * Bit [10]: Logic level of D-
- * * Bit [11]: Logic level of D+
- * @prtrst: Port Reset (PrtRst)
- * When the application sets this bit, a reset sequence is
- * started on this port. The application must time the reset
- * period and clear this bit after the reset sequence is
- * complete.
- * * 1'b0: Port not in reset
- * * 1'b1: Port in reset
- * The application must leave this bit set for at least a
- * minimum duration mentioned below to start a reset on the
- * port. The application can leave it set for another 10 ms in
- * addition to the required minimum duration, before clearing
- * the bit, even though there is no maximum limit set by the
- * USB standard.
- * * High speed: 50 ms
- * * Full speed/Low speed: 10 ms
- * @prtsusp: Port Suspend (PrtSusp)
- * The application sets this bit to put this port in Suspend
- * mode. The core only stops sending SOFs when this is set.
- * To stop the PHY clock, the application must set the Port
- * Clock Stop bit, which will assert the suspend input pin of
- * the PHY.
- * The read value of this bit reflects the current suspend
- * status of the port. This bit is cleared by the core after a
- * remote wakeup signal is detected or the application sets
- * the Port Reset bit or Port Resume bit in this register or the
- * Resume/Remote Wakeup Detected Interrupt bit or
- * Disconnect Detected Interrupt bit in the Core Interrupt
- * register (GINTSTS.WkUpInt or GINTSTS.DisconnInt,
- * respectively).
- * * 1'b0: Port not in Suspend mode
- * * 1'b1: Port in Suspend mode
- * @prtres: Port Resume (PrtRes)
- * The application sets this bit to drive resume signaling on
- * the port. The core continues to drive the resume signal
- * until the application clears this bit.
- * If the core detects a USB remote wakeup sequence, as
- * indicated by the Port Resume/Remote Wakeup Detected
- * Interrupt bit of the Core Interrupt register
- * (GINTSTS.WkUpInt), the core starts driving resume
- * signaling without application intervention and clears this bit
- * when it detects a disconnect condition. The read value of
- * this bit indicates whether the core is currently driving
- * resume signaling.
- * * 1'b0: No resume driven
- * * 1'b1: Resume driven
- * @prtovrcurrchng: Port Overcurrent Change (PrtOvrCurrChng)
- * The core sets this bit when the status of the Port
- * Overcurrent Active bit (bit 4) in this register changes.
- * @prtovrcurract: Port Overcurrent Active (PrtOvrCurrAct)
- * Indicates the overcurrent condition of the port.
- * * 1'b0: No overcurrent condition
- * * 1'b1: Overcurrent condition
- * @prtenchng: Port Enable/Disable Change (PrtEnChng)
- * The core sets this bit when the status of the Port Enable bit
- * [2] of this register changes.
- * @prtena: Port Enable (PrtEna)
- * A port is enabled only by the core after a reset sequence,
- * and is disabled by an overcurrent condition, a disconnect
- * condition, or by the application clearing this bit. The
- * application cannot set this bit by a register write. It can only
- * clear it to disable the port. This bit does not trigger any
- * interrupt to the application.
- * * 1'b0: Port disabled
- * * 1'b1: Port enabled
- * @prtconndet: Port Connect Detected (PrtConnDet)
- * The core sets this bit when a device connection is detected
- * to trigger an interrupt to the application using the Host Port
- * Interrupt bit of the Core Interrupt register (GINTSTS.PrtInt).
- * The application must write a 1 to this bit to clear the
- * interrupt.
- * @prtconnsts: Port Connect Status (PrtConnSts)
- * * 0: No device is attached to the port.
- * * 1: A device is attached to the port.
- */
- struct cvmx_usbcx_hprt_s {
- __BITFIELD_FIELD(uint32_t reserved_19_31 : 13,
- __BITFIELD_FIELD(uint32_t prtspd : 2,
- __BITFIELD_FIELD(uint32_t prttstctl : 4,
- __BITFIELD_FIELD(uint32_t prtpwr : 1,
- __BITFIELD_FIELD(uint32_t prtlnsts : 2,
- __BITFIELD_FIELD(uint32_t reserved_9_9 : 1,
- __BITFIELD_FIELD(uint32_t prtrst : 1,
- __BITFIELD_FIELD(uint32_t prtsusp : 1,
- __BITFIELD_FIELD(uint32_t prtres : 1,
- __BITFIELD_FIELD(uint32_t prtovrcurrchng : 1,
- __BITFIELD_FIELD(uint32_t prtovrcurract : 1,
- __BITFIELD_FIELD(uint32_t prtenchng : 1,
- __BITFIELD_FIELD(uint32_t prtena : 1,
- __BITFIELD_FIELD(uint32_t prtconndet : 1,
- __BITFIELD_FIELD(uint32_t prtconnsts : 1,
- ;)))))))))))))))
- } s;
-};
-
-/**
- * cvmx_usbc#_hptxfsiz
- *
- * Host Periodic Transmit FIFO Size Register (HPTXFSIZ)
- *
- * This register holds the size and the memory start address of the Periodic
- * TxFIFO, as shown in Figures 310 and 311.
- */
-union cvmx_usbcx_hptxfsiz {
- uint32_t u32;
- /**
- * struct cvmx_usbcx_hptxfsiz_s
- * @ptxfsize: Host Periodic TxFIFO Depth (PTxFSize)
- * This value is in terms of 32-bit words.
- * * Minimum value is 16
- * * Maximum value is 32768
- * @ptxfstaddr: Host Periodic TxFIFO Start Address (PTxFStAddr)
- */
- struct cvmx_usbcx_hptxfsiz_s {
- __BITFIELD_FIELD(uint32_t ptxfsize : 16,
- __BITFIELD_FIELD(uint32_t ptxfstaddr : 16,
- ;))
- } s;
-};
-
-/**
- * cvmx_usbc#_hptxsts
- *
- * Host Periodic Transmit FIFO/Queue Status Register (HPTXSTS)
- *
- * This read-only register contains the free space information for the Periodic
- * TxFIFO and the Periodic Transmit Request Queue
- */
-union cvmx_usbcx_hptxsts {
- uint32_t u32;
- /**
- * struct cvmx_usbcx_hptxsts_s
- * @ptxqtop: Top of the Periodic Transmit Request Queue (PTxQTop)
- * This indicates the entry in the Periodic Tx Request Queue that
- * is currently being processes by the MAC.
- * This register is used for debugging.
- * * Bit [31]: Odd/Even (micro)frame
- * - 1'b0: send in even (micro)frame
- * - 1'b1: send in odd (micro)frame
- * * Bits [30:27]: Channel/endpoint number
- * * Bits [26:25]: Type
- * - 2'b00: IN/OUT
- * - 2'b01: Zero-length packet
- * - 2'b10: CSPLIT
- * - 2'b11: Disable channel command
- * * Bit [24]: Terminate (last entry for the selected
- * channel/endpoint)
- * @ptxqspcavail: Periodic Transmit Request Queue Space Available
- * (PTxQSpcAvail)
- * Indicates the number of free locations available to be written
- * in the Periodic Transmit Request Queue. This queue holds both
- * IN and OUT requests.
- * * 8'h0: Periodic Transmit Request Queue is full
- * * 8'h1: 1 location available
- * * 8'h2: 2 locations available
- * * n: n locations available (0..8)
- * * Others: Reserved
- * @ptxfspcavail: Periodic Transmit Data FIFO Space Available
- * (PTxFSpcAvail)
- * Indicates the number of free locations available to be written
- * to in the Periodic TxFIFO.
- * Values are in terms of 32-bit words
- * * 16'h0: Periodic TxFIFO is full
- * * 16'h1: 1 word available
- * * 16'h2: 2 words available
- * * 16'hn: n words available (where 0..32768)
- * * 16'h8000: 32768 words available
- * * Others: Reserved
- */
- struct cvmx_usbcx_hptxsts_s {
- __BITFIELD_FIELD(uint32_t ptxqtop : 8,
- __BITFIELD_FIELD(uint32_t ptxqspcavail : 8,
- __BITFIELD_FIELD(uint32_t ptxfspcavail : 16,
- ;)))
- } s;
-};
-
-/**
- * cvmx_usbn#_clk_ctl
- *
- * USBN_CLK_CTL = USBN's Clock Control
- *
- * This register is used to control the frequency of the hclk and the
- * hreset and phy_rst signals.
- */
-union cvmx_usbnx_clk_ctl {
- uint64_t u64;
- /**
- * struct cvmx_usbnx_clk_ctl_s
- * @divide2: The 'hclk' used by the USB subsystem is derived
- * from the eclk.
- * Also see the field DIVIDE. DIVIDE2<1> must currently
- * be zero because it is not implemented, so the maximum
- * ratio of eclk/hclk is currently 16.
- * The actual divide number for hclk is:
- * (DIVIDE2 + 1) * (DIVIDE + 1)
- * @hclk_rst: When this field is '0' the HCLK-DIVIDER used to
- * generate the hclk in the USB Subsystem is held
- * in reset. This bit must be set to '0' before
- * changing the value os DIVIDE in this register.
- * The reset to the HCLK_DIVIDERis also asserted
- * when core reset is asserted.
- * @p_x_on: Force USB-PHY on during suspend.
- * '1' USB-PHY XO block is powered-down during
- * suspend.
- * '0' USB-PHY XO block is powered-up during
- * suspend.
- * The value of this field must be set while POR is
- * active.
- * @p_rtype: PHY reference clock type
- * On CN50XX/CN52XX/CN56XX the values are:
- * '0' The USB-PHY uses a 12MHz crystal as a clock source
- * at the USB_XO and USB_XI pins.
- * '1' Reserved.
- * '2' The USB_PHY uses 12/24/48MHz 2.5V board clock at the
- * USB_XO pin. USB_XI should be tied to ground in this
- * case.
- * '3' Reserved.
- * On CN3xxx bits 14 and 15 are p_xenbn and p_rclk and values are:
- * '0' Reserved.
- * '1' Reserved.
- * '2' The PHY PLL uses the XO block output as a reference.
- * The XO block uses an external clock supplied on the
- * XO pin. USB_XI should be tied to ground for this
- * usage.
- * '3' The XO block uses the clock from a crystal.
- * @p_com_on: '0' Force USB-PHY XO Bias, Bandgap and PLL to
- * remain powered in Suspend Mode.
- * '1' The USB-PHY XO Bias, Bandgap and PLL are
- * powered down in suspend mode.
- * The value of this field must be set while POR is
- * active.
- * @p_c_sel: Phy clock speed select.
- * Selects the reference clock / crystal frequency.
- * '11': Reserved
- * '10': 48 MHz (reserved when a crystal is used)
- * '01': 24 MHz (reserved when a crystal is used)
- * '00': 12 MHz
- * The value of this field must be set while POR is
- * active.
- * NOTE: if a crystal is used as a reference clock,
- * this field must be set to 12 MHz.
- * @cdiv_byp: Used to enable the bypass input to the USB_CLK_DIV.
- * @sd_mode: Scaledown mode for the USBC. Control timing events
- * in the USBC, for normal operation this must be '0'.
- * @s_bist: Starts bist on the hclk memories, during the '0'
- * to '1' transition.
- * @por: Power On Reset for the PHY.
- * Resets all the PHYS registers and state machines.
- * @enable: When '1' allows the generation of the hclk. When
- * '0' the hclk will not be generated. SEE DIVIDE
- * field of this register.
- * @prst: When this field is '0' the reset associated with
- * the phy_clk functionality in the USB Subsystem is
- * help in reset. This bit should not be set to '1'
- * until the time it takes 6 clocks (hclk or phy_clk,
- * whichever is slower) has passed. Under normal
- * operation once this bit is set to '1' it should not
- * be set to '0'.
- * @hrst: When this field is '0' the reset associated with
- * the hclk functioanlity in the USB Subsystem is
- * held in reset.This bit should not be set to '1'
- * until 12ms after phy_clk is stable. Under normal
- * operation, once this bit is set to '1' it should
- * not be set to '0'.
- * @divide: The frequency of 'hclk' used by the USB subsystem
- * is the eclk frequency divided by the value of
- * (DIVIDE2 + 1) * (DIVIDE + 1), also see the field
- * DIVIDE2 of this register.
- * The hclk frequency should be less than 125Mhz.
- * After writing a value to this field the SW should
- * read the field for the value written.
- * The ENABLE field of this register should not be set
- * until AFTER this field is set and then read.
- */
- struct cvmx_usbnx_clk_ctl_s {
- __BITFIELD_FIELD(uint64_t reserved_20_63 : 44,
- __BITFIELD_FIELD(uint64_t divide2 : 2,
- __BITFIELD_FIELD(uint64_t hclk_rst : 1,
- __BITFIELD_FIELD(uint64_t p_x_on : 1,
- __BITFIELD_FIELD(uint64_t p_rtype : 2,
- __BITFIELD_FIELD(uint64_t p_com_on : 1,
- __BITFIELD_FIELD(uint64_t p_c_sel : 2,
- __BITFIELD_FIELD(uint64_t cdiv_byp : 1,
- __BITFIELD_FIELD(uint64_t sd_mode : 2,
- __BITFIELD_FIELD(uint64_t s_bist : 1,
- __BITFIELD_FIELD(uint64_t por : 1,
- __BITFIELD_FIELD(uint64_t enable : 1,
- __BITFIELD_FIELD(uint64_t prst : 1,
- __BITFIELD_FIELD(uint64_t hrst : 1,
- __BITFIELD_FIELD(uint64_t divide : 3,
- ;)))))))))))))))
- } s;
-};
-
-/**
- * cvmx_usbn#_usbp_ctl_status
- *
- * USBN_USBP_CTL_STATUS = USBP Control And Status Register
- *
- * Contains general control and status information for the USBN block.
- */
-union cvmx_usbnx_usbp_ctl_status {
- uint64_t u64;
- /**
- * struct cvmx_usbnx_usbp_ctl_status_s
- * @txrisetune: HS Transmitter Rise/Fall Time Adjustment
- * @txvreftune: HS DC Voltage Level Adjustment
- * @txfslstune: FS/LS Source Impedance Adjustment
- * @txhsxvtune: Transmitter High-Speed Crossover Adjustment
- * @sqrxtune: Squelch Threshold Adjustment
- * @compdistune: Disconnect Threshold Adjustment
- * @otgtune: VBUS Valid Threshold Adjustment
- * @otgdisable: OTG Block Disable
- * @portreset: Per_Port Reset
- * @drvvbus: Drive VBUS
- * @lsbist: Low-Speed BIST Enable.
- * @fsbist: Full-Speed BIST Enable.
- * @hsbist: High-Speed BIST Enable.
- * @bist_done: PHY Bist Done.
- * Asserted at the end of the PHY BIST sequence.
- * @bist_err: PHY Bist Error.
- * Indicates an internal error was detected during
- * the BIST sequence.
- * @tdata_out: PHY Test Data Out.
- * Presents either internaly generated signals or
- * test register contents, based upon the value of
- * test_data_out_sel.
- * @siddq: Drives the USBP (USB-PHY) SIDDQ input.
- * Normally should be set to zero.
- * When customers have no intent to use USB PHY
- * interface, they should:
- * - still provide 3.3V to USB_VDD33, and
- * - tie USB_REXT to 3.3V supply, and
- * - set USBN*_USBP_CTL_STATUS[SIDDQ]=1
- * @txpreemphasistune: HS Transmitter Pre-Emphasis Enable
- * @dma_bmode: When set to 1 the L2C DMA address will be updated
- * with byte-counts between packets. When set to 0
- * the L2C DMA address is incremented to the next
- * 4-byte aligned address after adding byte-count.
- * @usbc_end: Bigendian input to the USB Core. This should be
- * set to '0' for operation.
- * @usbp_bist: PHY, This is cleared '0' to run BIST on the USBP.
- * @tclk: PHY Test Clock, used to load TDATA_IN to the USBP.
- * @dp_pulld: PHY DP_PULLDOWN input to the USB-PHY.
- * This signal enables the pull-down resistance on
- * the D+ line. '1' pull down-resistance is connected
- * to D+/ '0' pull down resistance is not connected
- * to D+. When an A/B device is acting as a host
- * (downstream-facing port), dp_pulldown and
- * dm_pulldown are enabled. This must not toggle
- * during normal opeartion.
- * @dm_pulld: PHY DM_PULLDOWN input to the USB-PHY.
- * This signal enables the pull-down resistance on
- * the D- line. '1' pull down-resistance is connected
- * to D-. '0' pull down resistance is not connected
- * to D-. When an A/B device is acting as a host
- * (downstream-facing port), dp_pulldown and
- * dm_pulldown are enabled. This must not toggle
- * during normal opeartion.
- * @hst_mode: When '0' the USB is acting as HOST, when '1'
- * USB is acting as device. This field needs to be
- * set while the USB is in reset.
- * @tuning: Transmitter Tuning for High-Speed Operation.
- * Tunes the current supply and rise/fall output
- * times for high-speed operation.
- * [20:19] == 11: Current supply increased
- * approximately 9%
- * [20:19] == 10: Current supply increased
- * approximately 4.5%
- * [20:19] == 01: Design default.
- * [20:19] == 00: Current supply decreased
- * approximately 4.5%
- * [22:21] == 11: Rise and fall times are increased.
- * [22:21] == 10: Design default.
- * [22:21] == 01: Rise and fall times are decreased.
- * [22:21] == 00: Rise and fall times are decreased
- * further as compared to the 01 setting.
- * @tx_bs_enh: Transmit Bit Stuffing on [15:8].
- * Enables or disables bit stuffing on data[15:8]
- * when bit-stuffing is enabled.
- * @tx_bs_en: Transmit Bit Stuffing on [7:0].
- * Enables or disables bit stuffing on data[7:0]
- * when bit-stuffing is enabled.
- * @loop_enb: PHY Loopback Test Enable.
- * '1': During data transmission the receive is
- * enabled.
- * '0': During data transmission the receive is
- * disabled.
- * Must be '0' for normal operation.
- * @vtest_enb: Analog Test Pin Enable.
- * '1' The PHY's analog_test pin is enabled for the
- * input and output of applicable analog test signals.
- * '0' THe analog_test pin is disabled.
- * @bist_enb: Built-In Self Test Enable.
- * Used to activate BIST in the PHY.
- * @tdata_sel: Test Data Out Select.
- * '1' test_data_out[3:0] (PHY) register contents
- * are output. '0' internaly generated signals are
- * output.
- * @taddr_in: Mode Address for Test Interface.
- * Specifies the register address for writing to or
- * reading from the PHY test interface register.
- * @tdata_in: Internal Testing Register Input Data and Select
- * This is a test bus. Data is present on [3:0],
- * and its corresponding select (enable) is present
- * on bits [7:4].
- * @ate_reset: Reset input from automatic test equipment.
- * This is a test signal. When the USB Core is
- * powered up (not in Susned Mode), an automatic
- * tester can use this to disable phy_clock and
- * free_clk, then re-eanable them with an aligned
- * phase.
- * '1': The phy_clk and free_clk outputs are
- * disabled. "0": The phy_clock and free_clk outputs
- * are available within a specific period after the
- * de-assertion.
- */
- struct cvmx_usbnx_usbp_ctl_status_s {
- __BITFIELD_FIELD(uint64_t txrisetune : 1,
- __BITFIELD_FIELD(uint64_t txvreftune : 4,
- __BITFIELD_FIELD(uint64_t txfslstune : 4,
- __BITFIELD_FIELD(uint64_t txhsxvtune : 2,
- __BITFIELD_FIELD(uint64_t sqrxtune : 3,
- __BITFIELD_FIELD(uint64_t compdistune : 3,
- __BITFIELD_FIELD(uint64_t otgtune : 3,
- __BITFIELD_FIELD(uint64_t otgdisable : 1,
- __BITFIELD_FIELD(uint64_t portreset : 1,
- __BITFIELD_FIELD(uint64_t drvvbus : 1,
- __BITFIELD_FIELD(uint64_t lsbist : 1,
- __BITFIELD_FIELD(uint64_t fsbist : 1,
- __BITFIELD_FIELD(uint64_t hsbist : 1,
- __BITFIELD_FIELD(uint64_t bist_done : 1,
- __BITFIELD_FIELD(uint64_t bist_err : 1,
- __BITFIELD_FIELD(uint64_t tdata_out : 4,
- __BITFIELD_FIELD(uint64_t siddq : 1,
- __BITFIELD_FIELD(uint64_t txpreemphasistune : 1,
- __BITFIELD_FIELD(uint64_t dma_bmode : 1,
- __BITFIELD_FIELD(uint64_t usbc_end : 1,
- __BITFIELD_FIELD(uint64_t usbp_bist : 1,
- __BITFIELD_FIELD(uint64_t tclk : 1,
- __BITFIELD_FIELD(uint64_t dp_pulld : 1,
- __BITFIELD_FIELD(uint64_t dm_pulld : 1,
- __BITFIELD_FIELD(uint64_t hst_mode : 1,
- __BITFIELD_FIELD(uint64_t tuning : 4,
- __BITFIELD_FIELD(uint64_t tx_bs_enh : 1,
- __BITFIELD_FIELD(uint64_t tx_bs_en : 1,
- __BITFIELD_FIELD(uint64_t loop_enb : 1,
- __BITFIELD_FIELD(uint64_t vtest_enb : 1,
- __BITFIELD_FIELD(uint64_t bist_enb : 1,
- __BITFIELD_FIELD(uint64_t tdata_sel : 1,
- __BITFIELD_FIELD(uint64_t taddr_in : 4,
- __BITFIELD_FIELD(uint64_t tdata_in : 8,
- __BITFIELD_FIELD(uint64_t ate_reset : 1,
- ;)))))))))))))))))))))))))))))))))))
- } s;
-};
-
-#endif /* __OCTEON_HCD_H__ */
diff --git a/drivers/staging/octeon/Kconfig b/drivers/staging/octeon/Kconfig
index 6e1d5f8d3ec1..5319909eb2f6 100644
--- a/drivers/staging/octeon/Kconfig
+++ b/drivers/staging/octeon/Kconfig
@@ -1,6 +1,8 @@
+# SPDX-License-Identifier: GPL-2.0
config OCTEON_ETHERNET
tristate "Cavium Networks Octeon Ethernet support"
- depends on CAVIUM_OCTEON_SOC && NETDEVICES
+ depends on CAVIUM_OCTEON_SOC || COMPILE_TEST
+ depends on NETDEVICES
select PHYLIB
select MDIO_OCTEON
help
diff --git a/drivers/staging/octeon/Makefile b/drivers/staging/octeon/Makefile
index 9012dee0c348..3887cf5f1e84 100644
--- a/drivers/staging/octeon/Makefile
+++ b/drivers/staging/octeon/Makefile
@@ -1,6 +1,4 @@
-# This file is subject to the terms and conditions of the GNU General Public
-# License. See the file "COPYING" in the main directory of this archive
-# for more details.
+# SPDX-License-Identifier: GPL-2.0
#
# Copyright (C) 2005-2009 Cavium Networks
#
@@ -19,5 +17,3 @@ octeon-ethernet-y += ethernet-rx.o
octeon-ethernet-y += ethernet-sgmii.o
octeon-ethernet-y += ethernet-spi.o
octeon-ethernet-y += ethernet-tx.o
-octeon-ethernet-y += ethernet-xaui.o
-
diff --git a/drivers/staging/octeon/TODO b/drivers/staging/octeon/TODO
new file mode 100644
index 000000000000..044e48e3d65f
--- /dev/null
+++ b/drivers/staging/octeon/TODO
@@ -0,0 +1,8 @@
+This driver is functional and supports Ethernet on OCTEON+/OCTEON2/OCTEON3
+chips at least up to CN7030.
+
+TODO:
+ - general code review and clean up
+ - make driver self-contained instead of being split between staging and
+ arch/mips/cavium-octeon.
+
diff --git a/drivers/staging/octeon/ethernet-defines.h b/drivers/staging/octeon/ethernet-defines.h
index f92e0c478e16..c060374a3da2 100644
--- a/drivers/staging/octeon/ethernet-defines.h
+++ b/drivers/staging/octeon/ethernet-defines.h
@@ -1,11 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* This file is based on code from OCTEON SDK by Cavium Networks.
*
* Copyright (c) 2003-2007 Cavium Networks
- *
- * This file is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, Version 2, as
- * published by the Free Software Foundation.
*/
/*
@@ -24,23 +21,20 @@
#ifndef __ETHERNET_DEFINES_H__
#define __ETHERNET_DEFINES_H__
-#include <asm/octeon/cvmx-config.h>
-
#ifdef CONFIG_NETFILTER
-#define REUSE_SKBUFFS_WITHOUT_FREE 0
+#define REUSE_SKBUFFS_WITHOUT_FREE 0
#else
-#define REUSE_SKBUFFS_WITHOUT_FREE 1
+#define REUSE_SKBUFFS_WITHOUT_FREE 1
#endif
-#define USE_ASYNC_IOBDMA (CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE > 0)
+#define USE_ASYNC_IOBDMA (CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE > 0)
/* Maximum number of SKBs to try to free per xmit packet. */
-#define MAX_OUT_QUEUE_DEPTH 1000
-
-#define FAU_TOTAL_TX_TO_CLEAN (CVMX_FAU_REG_END - sizeof(uint32_t))
-#define FAU_NUM_PACKET_BUFFERS_TO_FREE (FAU_TOTAL_TX_TO_CLEAN - sizeof(uint32_t))
+#define MAX_OUT_QUEUE_DEPTH 1000
-#define TOTAL_NUMBER_OF_PORTS (CVMX_PIP_NUM_INPUT_PORTS+1)
+#define FAU_TOTAL_TX_TO_CLEAN (CVMX_FAU_REG_END - sizeof(u32))
+#define FAU_NUM_PACKET_BUFFERS_TO_FREE (FAU_TOTAL_TX_TO_CLEAN - sizeof(u32))
+#define TOTAL_NUMBER_OF_PORTS (CVMX_PIP_NUM_INPUT_PORTS + 1)
#endif /* __ETHERNET_DEFINES_H__ */
diff --git a/drivers/staging/octeon/ethernet-mdio.c b/drivers/staging/octeon/ethernet-mdio.c
index fd9b3d899c1f..211423059e30 100644
--- a/drivers/staging/octeon/ethernet-mdio.c
+++ b/drivers/staging/octeon/ethernet-mdio.c
@@ -1,11 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* This file is based on code from OCTEON SDK by Cavium Networks.
*
* Copyright (c) 2003-2007 Cavium Networks
- *
- * This file is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, Version 2, as
- * published by the Free Software Foundation.
*/
#include <linux/kernel.h>
@@ -13,69 +10,37 @@
#include <linux/phy.h>
#include <linux/ratelimit.h>
#include <linux/of_mdio.h>
-#include <generated/utsrelease.h>
#include <net/dst.h>
-#include <asm/octeon/octeon.h>
-
-#include "ethernet-defines.h"
#include "octeon-ethernet.h"
+#include "ethernet-defines.h"
#include "ethernet-mdio.h"
#include "ethernet-util.h"
-#include <asm/octeon/cvmx-gmxx-defs.h>
-#include <asm/octeon/cvmx-smix-defs.h>
-
static void cvm_oct_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
- strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
- strlcpy(info->version, UTS_RELEASE, sizeof(info->version));
- strlcpy(info->bus_info, "Builtin", sizeof(info->bus_info));
-}
-
-static int cvm_oct_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
-{
- struct octeon_ethernet *priv = netdev_priv(dev);
-
- if (priv->phydev)
- return phy_ethtool_gset(priv->phydev, cmd);
-
- return -EINVAL;
-}
-
-static int cvm_oct_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
-{
- struct octeon_ethernet *priv = netdev_priv(dev);
-
- if (!capable(CAP_NET_ADMIN))
- return -EPERM;
-
- if (priv->phydev)
- return phy_ethtool_sset(priv->phydev, cmd);
-
- return -EINVAL;
+ strscpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
+ strscpy(info->bus_info, "Builtin", sizeof(info->bus_info));
}
static int cvm_oct_nway_reset(struct net_device *dev)
{
- struct octeon_ethernet *priv = netdev_priv(dev);
-
if (!capable(CAP_NET_ADMIN))
return -EPERM;
- if (priv->phydev)
- return phy_start_aneg(priv->phydev);
+ if (dev->phydev)
+ return phy_start_aneg(dev->phydev);
return -EINVAL;
}
const struct ethtool_ops cvm_oct_ethtool_ops = {
.get_drvinfo = cvm_oct_get_drvinfo,
- .get_settings = cvm_oct_get_settings,
- .set_settings = cvm_oct_set_settings,
.nway_reset = cvm_oct_nway_reset,
.get_link = ethtool_op_get_link,
+ .get_link_ksettings = phy_ethtool_get_link_ksettings,
+ .set_link_ksettings = phy_ethtool_set_link_ksettings,
};
/**
@@ -88,19 +53,17 @@ const struct ethtool_ops cvm_oct_ethtool_ops = {
*/
int cvm_oct_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
- struct octeon_ethernet *priv = netdev_priv(dev);
-
if (!netif_running(dev))
return -EINVAL;
- if (!priv->phydev)
+ if (!dev->phydev)
return -EINVAL;
- return phy_mii_ioctl(priv->phydev, rq, cmd);
+ return phy_mii_ioctl(dev->phydev, rq, cmd);
}
void cvm_oct_note_carrier(struct octeon_ethernet *priv,
- cvmx_helper_link_info_t li)
+ union cvmx_helper_link_info li)
{
if (li.s.link_up) {
pr_notice_ratelimited("%s: %u Mbps %s duplex, port %d, queue %d\n",
@@ -116,15 +79,22 @@ void cvm_oct_note_carrier(struct octeon_ethernet *priv,
void cvm_oct_adjust_link(struct net_device *dev)
{
struct octeon_ethernet *priv = netdev_priv(dev);
- cvmx_helper_link_info_t link_info;
+ union cvmx_helper_link_info link_info;
- if (priv->last_link != priv->phydev->link) {
- priv->last_link = priv->phydev->link;
- link_info.u64 = 0;
- link_info.s.link_up = priv->last_link ? 1 : 0;
- link_info.s.full_duplex = priv->phydev->duplex ? 1 : 0;
- link_info.s.speed = priv->phydev->speed;
+ link_info.u64 = 0;
+ link_info.s.link_up = dev->phydev->link ? 1 : 0;
+ link_info.s.full_duplex = dev->phydev->duplex ? 1 : 0;
+ link_info.s.speed = dev->phydev->speed;
+ priv->link_info = link_info.u64;
+
+ /*
+ * The polling task need to know about link status changes.
+ */
+ if (priv->poll)
+ priv->poll(dev);
+ if (priv->last_link != dev->phydev->link) {
+ priv->last_link = dev->phydev->link;
cvmx_helper_link_set(priv->port, link_info);
cvm_oct_note_carrier(priv, link_info);
}
@@ -134,7 +104,7 @@ int cvm_oct_common_stop(struct net_device *dev)
{
struct octeon_ethernet *priv = netdev_priv(dev);
int interface = INTERFACE(priv->port);
- cvmx_helper_link_info_t link_info;
+ union cvmx_helper_link_info link_info;
union cvmx_gmxx_prtx_cfg gmx_cfg;
int index = INDEX(priv->port);
@@ -144,9 +114,8 @@ int cvm_oct_common_stop(struct net_device *dev)
priv->poll = NULL;
- if (priv->phydev)
- phy_disconnect(priv->phydev);
- priv->phydev = NULL;
+ if (dev->phydev)
+ phy_disconnect(dev->phydev);
if (priv->last_link) {
link_info.u64 = 0;
@@ -169,22 +138,26 @@ int cvm_oct_phy_setup_device(struct net_device *dev)
{
struct octeon_ethernet *priv = netdev_priv(dev);
struct device_node *phy_node;
+ struct phy_device *phydev = NULL;
if (!priv->of_node)
goto no_phy;
phy_node = of_parse_phandle(priv->of_node, "phy-handle", 0);
+ if (!phy_node && of_phy_is_fixed_link(priv->of_node))
+ phy_node = of_node_get(priv->of_node);
if (!phy_node)
goto no_phy;
- priv->phydev = of_phy_connect(dev, phy_node, cvm_oct_adjust_link, 0,
- PHY_INTERFACE_MODE_GMII);
+ phydev = of_phy_connect(dev, phy_node, cvm_oct_adjust_link, 0,
+ priv->phy_mode);
+ of_node_put(phy_node);
- if (priv->phydev == NULL)
- return -ENODEV;
+ if (!phydev)
+ return -EPROBE_DEFER;
priv->last_link = 0;
- phy_start_aneg(priv->phydev);
+ phy_start(phydev);
return 0;
no_phy:
diff --git a/drivers/staging/octeon/ethernet-mdio.h b/drivers/staging/octeon/ethernet-mdio.h
index 5ed8483fc24d..7f6716e3fad4 100644
--- a/drivers/staging/octeon/ethernet-mdio.h
+++ b/drivers/staging/octeon/ethernet-mdio.h
@@ -1,11 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* This file is based on code from OCTEON SDK by Cavium Networks.
*
* Copyright (c) 2003-2007 Cavium Networks
- *
- * This file is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, Version 2, as
- * published by the Free Software Foundation.
*/
#include <linux/module.h>
@@ -25,7 +22,5 @@
extern const struct ethtool_ops cvm_oct_ethtool_ops;
-void octeon_mdiobus_force_mod_depencency(void);
-
int cvm_oct_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
int cvm_oct_phy_setup_device(struct net_device *dev);
diff --git a/drivers/staging/octeon/ethernet-mem.c b/drivers/staging/octeon/ethernet-mem.c
index 5a5cdb3cd740..532594957ebc 100644
--- a/drivers/staging/octeon/ethernet-mem.c
+++ b/drivers/staging/octeon/ethernet-mem.c
@@ -1,24 +1,18 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* This file is based on code from OCTEON SDK by Cavium Networks.
*
* Copyright (c) 2003-2010 Cavium Networks
- *
- * This file is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, Version 2, as
- * published by the Free Software Foundation.
*/
#include <linux/kernel.h>
#include <linux/netdevice.h>
#include <linux/slab.h>
-#include <asm/octeon/octeon.h>
-
+#include "octeon-ethernet.h"
#include "ethernet-mem.h"
#include "ethernet-defines.h"
-#include <asm/octeon/cvmx-fpa.h>
-
/**
* cvm_oct_fill_hw_skbuff - fill the supplied hardware pool with skbuffs
* @pool: Pool to allocate an skbuff for
@@ -34,7 +28,7 @@ static int cvm_oct_fill_hw_skbuff(int pool, int size, int elements)
while (freed) {
struct sk_buff *skb = dev_alloc_skb(size + 256);
- if (unlikely(skb == NULL))
+ if (unlikely(!skb))
break;
skb_reserve(skb, 256 - (((unsigned long)skb->data) & 0x7f));
*(struct sk_buff **)(skb->data - sizeof(void *)) = skb;
@@ -98,7 +92,7 @@ static int cvm_oct_fill_hw_memory(int pool, int size, int elements)
* just before the block.
*/
memory = kmalloc(size + 256, GFP_ATOMIC);
- if (unlikely(memory == NULL)) {
+ if (unlikely(!memory)) {
pr_warn("Unable to allocate %u bytes for FPA pool %d\n",
elements * size, pool);
break;
diff --git a/drivers/staging/octeon/ethernet-mem.h b/drivers/staging/octeon/ethernet-mem.h
index 62d07c426f89..692dcdb7154d 100644
--- a/drivers/staging/octeon/ethernet-mem.h
+++ b/drivers/staging/octeon/ethernet-mem.h
@@ -1,11 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* This file is based on code from OCTEON SDK by Cavium Networks.
*
* Copyright (c) 2003-2007 Cavium Networks
- *
- * This file is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, Version 2, as
- * published by the Free Software Foundation.
*/
int cvm_oct_mem_fill_fpa(int pool, int size, int elements);
diff --git a/drivers/staging/octeon/ethernet-rgmii.c b/drivers/staging/octeon/ethernet-rgmii.c
index 51dcb611702f..0c4fac31540a 100644
--- a/drivers/staging/octeon/ethernet-rgmii.c
+++ b/drivers/staging/octeon/ethernet-rgmii.c
@@ -1,11 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* This file is based on code from OCTEON SDK by Cavium Networks.
*
* Copyright (c) 2003-2007 Cavium Networks
- *
- * This file is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, Version 2, as
- * published by the Free Software Foundation.
*/
#include <linux/kernel.h>
@@ -15,23 +12,13 @@
#include <linux/ratelimit.h>
#include <net/dst.h>
-#include <asm/octeon/octeon.h>
-
-#include "ethernet-defines.h"
#include "octeon-ethernet.h"
+#include "ethernet-defines.h"
#include "ethernet-util.h"
#include "ethernet-mdio.h"
-#include <asm/octeon/cvmx-helper.h>
-
-#include <asm/octeon/cvmx-ipd-defs.h>
-#include <asm/octeon/cvmx-npi-defs.h>
-#include <asm/octeon/cvmx-gmxx-defs.h>
-
static DEFINE_SPINLOCK(global_register_lock);
-static int number_rgmii_ports;
-
static void cvm_oct_set_hw_preamble(struct octeon_ethernet *priv, bool enable)
{
union cvmx_gmxx_rxx_frm_ctl gmxx_rxx_frm_ctl;
@@ -63,251 +50,109 @@ static void cvm_oct_set_hw_preamble(struct octeon_ethernet *priv, bool enable)
gmxx_rxx_int_reg.u64);
}
-static void cvm_oct_rgmii_poll(struct net_device *dev)
+static void cvm_oct_check_preamble_errors(struct net_device *dev)
{
struct octeon_ethernet *priv = netdev_priv(dev);
- unsigned long flags = 0;
- cvmx_helper_link_info_t link_info;
- int use_global_register_lock = (priv->phydev == NULL);
+ union cvmx_helper_link_info link_info;
+ unsigned long flags;
+
+ link_info.u64 = priv->link_info;
+
+ /*
+ * Take the global register lock since we are going to
+ * touch registers that affect more than one port.
+ */
+ spin_lock_irqsave(&global_register_lock, flags);
- BUG_ON(in_interrupt());
- if (use_global_register_lock) {
+ if (link_info.s.speed == 10 && priv->last_speed == 10) {
/*
- * Take the global register lock since we are going to
- * touch registers that affect more than one port.
+ * Read the GMXX_RXX_INT_REG[PCTERR] bit and see if we are
+ * getting preamble errors.
*/
- spin_lock_irqsave(&global_register_lock, flags);
- } else {
- mutex_lock(&priv->phydev->bus->mdio_lock);
- }
+ int interface = INTERFACE(priv->port);
+ int index = INDEX(priv->port);
+ union cvmx_gmxx_rxx_int_reg gmxx_rxx_int_reg;
- link_info = cvmx_helper_link_get(priv->port);
- if (link_info.u64 == priv->link_info) {
- if (link_info.s.speed == 10) {
+ gmxx_rxx_int_reg.u64 = cvmx_read_csr(CVMX_GMXX_RXX_INT_REG
+ (index, interface));
+ if (gmxx_rxx_int_reg.s.pcterr) {
/*
- * Read the GMXX_RXX_INT_REG[PCTERR] bit and
- * see if we are getting preamble errors.
+ * We are getting preamble errors at 10Mbps. Most
+ * likely the PHY is giving us packets with misaligned
+ * preambles. In order to get these packets we need to
+ * disable preamble checking and do it in software.
*/
- int interface = INTERFACE(priv->port);
- int index = INDEX(priv->port);
- union cvmx_gmxx_rxx_int_reg gmxx_rxx_int_reg;
-
- gmxx_rxx_int_reg.u64 =
- cvmx_read_csr(CVMX_GMXX_RXX_INT_REG
- (index, interface));
- if (gmxx_rxx_int_reg.s.pcterr) {
- /*
- * We are getting preamble errors at
- * 10Mbps. Most likely the PHY is
- * giving us packets with mis aligned
- * preambles. In order to get these
- * packets we need to disable preamble
- * checking and do it in software.
- */
- cvm_oct_set_hw_preamble(priv, false);
- printk_ratelimited("%s: Using 10Mbps with software preamble removal\n",
- dev->name);
- }
+ cvm_oct_set_hw_preamble(priv, false);
+ printk_ratelimited("%s: Using 10Mbps with software preamble removal\n",
+ dev->name);
}
-
- if (use_global_register_lock)
- spin_unlock_irqrestore(&global_register_lock, flags);
- else
- mutex_unlock(&priv->phydev->bus->mdio_lock);
- return;
- }
-
- /* Since the 10Mbps preamble workaround is allowed we need to enable
- * preamble checking, FCS stripping, and clear error bits on
- * every speed change. If errors occur during 10Mbps operation
- * the above code will change this stuff
- */
- cvm_oct_set_hw_preamble(priv, true);
-
- if (priv->phydev == NULL) {
- link_info = cvmx_helper_link_autoconf(priv->port);
- priv->link_info = link_info.u64;
- }
-
- if (use_global_register_lock)
- spin_unlock_irqrestore(&global_register_lock, flags);
- else
- mutex_unlock(&priv->phydev->bus->mdio_lock);
-
- if (priv->phydev == NULL) {
- /* Tell core. */
- if (link_info.s.link_up) {
- if (!netif_carrier_ok(dev))
- netif_carrier_on(dev);
- } else if (netif_carrier_ok(dev)) {
- netif_carrier_off(dev);
- }
- cvm_oct_note_carrier(priv, link_info);
+ } else {
+ /*
+ * Since the 10Mbps preamble workaround is allowed we need to
+ * enable preamble checking, FCS stripping, and clear error
+ * bits on every speed change. If errors occur during 10Mbps
+ * operation the above code will change this stuff
+ */
+ if (priv->last_speed != link_info.s.speed)
+ cvm_oct_set_hw_preamble(priv, true);
+ priv->last_speed = link_info.s.speed;
}
+ spin_unlock_irqrestore(&global_register_lock, flags);
}
-static int cmv_oct_rgmii_gmx_interrupt(int interface)
+static void cvm_oct_rgmii_poll(struct net_device *dev)
{
- int index;
- int count = 0;
-
- /* Loop through every port of this interface */
- for (index = 0;
- index < cvmx_helper_ports_on_interface(interface);
- index++) {
- union cvmx_gmxx_rxx_int_reg gmx_rx_int_reg;
+ struct octeon_ethernet *priv = netdev_priv(dev);
+ union cvmx_helper_link_info link_info;
+ bool status_change;
- /* Read the GMX interrupt status bits */
- gmx_rx_int_reg.u64 = cvmx_read_csr(CVMX_GMXX_RXX_INT_REG
- (index, interface));
- gmx_rx_int_reg.u64 &= cvmx_read_csr(CVMX_GMXX_RXX_INT_EN
- (index, interface));
+ link_info = cvmx_helper_link_get(priv->port);
+ if (priv->link_info != link_info.u64 &&
+ cvmx_helper_link_set(priv->port, link_info))
+ link_info.u64 = priv->link_info;
+ status_change = priv->link_info != link_info.u64;
+ priv->link_info = link_info.u64;
- /* Poll the port if inband status changed */
- if (gmx_rx_int_reg.s.phy_dupx || gmx_rx_int_reg.s.phy_link ||
- gmx_rx_int_reg.s.phy_spd) {
- struct net_device *dev =
- cvm_oct_device[cvmx_helper_get_ipd_port
- (interface, index)];
- struct octeon_ethernet *priv = netdev_priv(dev);
+ cvm_oct_check_preamble_errors(dev);
- if (dev && !atomic_read(&cvm_oct_poll_queue_stopping))
- queue_work(cvm_oct_poll_queue,
- &priv->port_work);
+ if (likely(!status_change))
+ return;
- gmx_rx_int_reg.u64 = 0;
- gmx_rx_int_reg.s.phy_dupx = 1;
- gmx_rx_int_reg.s.phy_link = 1;
- gmx_rx_int_reg.s.phy_spd = 1;
- cvmx_write_csr(CVMX_GMXX_RXX_INT_REG(index, interface),
- gmx_rx_int_reg.u64);
- count++;
- }
+ /* Tell core. */
+ if (link_info.s.link_up) {
+ if (!netif_carrier_ok(dev))
+ netif_carrier_on(dev);
+ } else if (netif_carrier_ok(dev)) {
+ netif_carrier_off(dev);
}
- return count;
-}
-
-static irqreturn_t cvm_oct_rgmii_rml_interrupt(int cpl, void *dev_id)
-{
- union cvmx_npi_rsl_int_blocks rsl_int_blocks;
- int count = 0;
-
- rsl_int_blocks.u64 = cvmx_read_csr(CVMX_NPI_RSL_INT_BLOCKS);
-
- /* Check and see if this interrupt was caused by the GMX0 block */
- if (rsl_int_blocks.s.gmx0)
- count += cmv_oct_rgmii_gmx_interrupt(0);
-
- /* Check and see if this interrupt was caused by the GMX1 block */
- if (rsl_int_blocks.s.gmx1)
- count += cmv_oct_rgmii_gmx_interrupt(1);
-
- return count ? IRQ_HANDLED : IRQ_NONE;
+ cvm_oct_note_carrier(priv, link_info);
}
int cvm_oct_rgmii_open(struct net_device *dev)
{
- return cvm_oct_common_open(dev, cvm_oct_rgmii_poll, false);
-}
-
-static void cvm_oct_rgmii_immediate_poll(struct work_struct *work)
-{
- struct octeon_ethernet *priv =
- container_of(work, struct octeon_ethernet, port_work);
- cvm_oct_rgmii_poll(cvm_oct_device[priv->port]);
-}
-
-int cvm_oct_rgmii_init(struct net_device *dev)
-{
struct octeon_ethernet *priv = netdev_priv(dev);
- int r;
+ int ret;
- cvm_oct_common_init(dev);
- INIT_WORK(&priv->port_work, cvm_oct_rgmii_immediate_poll);
- /*
- * Due to GMX errata in CN3XXX series chips, it is necessary
- * to take the link down immediately when the PHY changes
- * state. In order to do this we call the poll function every
- * time the RGMII inband status changes. This may cause
- * problems if the PHY doesn't implement inband status
- * properly.
- */
- if (number_rgmii_ports == 0) {
- r = request_irq(OCTEON_IRQ_RML, cvm_oct_rgmii_rml_interrupt,
- IRQF_SHARED, "RGMII", &number_rgmii_ports);
- if (r != 0)
- return r;
- }
- number_rgmii_ports++;
+ ret = cvm_oct_common_open(dev, cvm_oct_rgmii_poll);
+ if (ret)
+ return ret;
- /*
- * Only true RGMII ports need to be polled. In GMII mode, port
- * 0 is really a RGMII port.
- */
- if (((priv->imode == CVMX_HELPER_INTERFACE_MODE_GMII)
- && (priv->port == 0))
- || (priv->imode == CVMX_HELPER_INTERFACE_MODE_RGMII)) {
-
- if (!octeon_is_simulation()) {
-
- union cvmx_gmxx_rxx_int_en gmx_rx_int_en;
- int interface = INTERFACE(priv->port);
- int index = INDEX(priv->port);
-
- /*
- * Enable interrupts on inband status changes
- * for this port.
- */
- gmx_rx_int_en.u64 = 0;
- gmx_rx_int_en.s.phy_dupx = 1;
- gmx_rx_int_en.s.phy_link = 1;
- gmx_rx_int_en.s.phy_spd = 1;
- cvmx_write_csr(CVMX_GMXX_RXX_INT_EN(index, interface),
- gmx_rx_int_en.u64);
+ if (dev->phydev) {
+ /*
+ * In phydev mode, we need still periodic polling for the
+ * preamble error checking, and we also need to call this
+ * function on every link state change.
+ *
+ * Only true RGMII ports need to be polled. In GMII mode, port
+ * 0 is really a RGMII port.
+ */
+ if ((priv->imode == CVMX_HELPER_INTERFACE_MODE_GMII &&
+ priv->port == 0) ||
+ (priv->imode == CVMX_HELPER_INTERFACE_MODE_RGMII)) {
+ priv->poll = cvm_oct_check_preamble_errors;
+ cvm_oct_check_preamble_errors(dev);
}
}
return 0;
}
-
-void cvm_oct_rgmii_uninit(struct net_device *dev)
-{
- struct octeon_ethernet *priv = netdev_priv(dev);
-
- cvm_oct_common_uninit(dev);
-
- /*
- * Only true RGMII ports need to be polled. In GMII mode, port
- * 0 is really a RGMII port.
- */
- if (((priv->imode == CVMX_HELPER_INTERFACE_MODE_GMII)
- && (priv->port == 0))
- || (priv->imode == CVMX_HELPER_INTERFACE_MODE_RGMII)) {
-
- if (!octeon_is_simulation()) {
-
- union cvmx_gmxx_rxx_int_en gmx_rx_int_en;
- int interface = INTERFACE(priv->port);
- int index = INDEX(priv->port);
-
- /*
- * Disable interrupts on inband status changes
- * for this port.
- */
- gmx_rx_int_en.u64 =
- cvmx_read_csr(CVMX_GMXX_RXX_INT_EN
- (index, interface));
- gmx_rx_int_en.s.phy_dupx = 0;
- gmx_rx_int_en.s.phy_link = 0;
- gmx_rx_int_en.s.phy_spd = 0;
- cvmx_write_csr(CVMX_GMXX_RXX_INT_EN(index, interface),
- gmx_rx_int_en.u64);
- }
- }
-
- /* Remove the interrupt handler when the last port is removed. */
- number_rgmii_ports--;
- if (number_rgmii_ports == 0)
- free_irq(OCTEON_IRQ_RML, &number_rgmii_ports);
- cancel_work_sync(&priv->port_work);
-}
diff --git a/drivers/staging/octeon/ethernet-rx.c b/drivers/staging/octeon/ethernet-rx.c
index d1a33a927f6d..965330eec80a 100644
--- a/drivers/staging/octeon/ethernet-rx.c
+++ b/drivers/staging/octeon/ethernet-rx.c
@@ -1,11 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* This file is based on code from OCTEON SDK by Cavium Networks.
*
* Copyright (c) 2003-2010 Cavium Networks
- *
- * This file is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, Version 2, as
- * published by the Free Software Foundation.
*/
#include <linux/module.h>
@@ -26,38 +23,33 @@
#include <net/xfrm.h>
#endif /* CONFIG_XFRM */
-#include <linux/atomic.h>
-
-#include <asm/octeon/octeon.h>
-
+#include "octeon-ethernet.h"
#include "ethernet-defines.h"
#include "ethernet-mem.h"
#include "ethernet-rx.h"
-#include "octeon-ethernet.h"
#include "ethernet-util.h"
-#include <asm/octeon/cvmx-helper.h>
-#include <asm/octeon/cvmx-wqe.h>
-#include <asm/octeon/cvmx-fau.h>
-#include <asm/octeon/cvmx-pow.h>
-#include <asm/octeon/cvmx-pip.h>
-#include <asm/octeon/cvmx-scratch.h>
-
-#include <asm/octeon/cvmx-gmxx-defs.h>
+static atomic_t oct_rx_ready = ATOMIC_INIT(0);
-static struct napi_struct cvm_oct_napi;
+static struct oct_rx_group {
+ int irq;
+ int group;
+ struct napi_struct napi;
+} oct_rx_group[16];
/**
* cvm_oct_do_interrupt - interrupt handler.
+ * @irq: Interrupt number.
+ * @napi_id: Cookie to identify the NAPI instance.
*
* The interrupt occurs whenever the POW has packets in our group.
*
*/
-static irqreturn_t cvm_oct_do_interrupt(int cpl, void *dev_id)
+static irqreturn_t cvm_oct_do_interrupt(int irq, void *napi_id)
{
/* Disable the IRQ and start napi_poll. */
- disable_irq_nosync(OCTEON_IRQ_WORKQ0 + pow_receive_group);
- napi_schedule(&cvm_oct_napi);
+ disable_irq_nosync(irq);
+ napi_schedule(napi_id);
return IRQ_HANDLED;
}
@@ -68,7 +60,7 @@ static irqreturn_t cvm_oct_do_interrupt(int cpl, void *dev_id)
*
* Returns Non-zero if the packet can be dropped, zero otherwise.
*/
-static inline int cvm_oct_check_rcv_error(cvmx_wqe_t *work)
+static inline int cvm_oct_check_rcv_error(struct cvmx_wqe *work)
{
int port;
@@ -77,15 +69,17 @@ static inline int cvm_oct_check_rcv_error(cvmx_wqe_t *work)
else
port = work->word1.cn38xx.ipprt;
- if ((work->word2.snoip.err_code == 10) && (work->word1.len <= 64)) {
+ if ((work->word2.snoip.err_code == 10) && (work->word1.len <= 64))
/*
* Ignore length errors on min size packets. Some
* equipment incorrectly pads packets to 64+4FCS
* instead of 60+4FCS. Note these packets still get
* counted as frame errors.
*/
- } else if (work->word2.snoip.err_code == 5 ||
- work->word2.snoip.err_code == 7) {
+ return 0;
+
+ if (work->word2.snoip.err_code == 5 ||
+ work->word2.snoip.err_code == 7) {
/*
* We received a packet with either an alignment error
* or a FCS error. This may be signalling that we are
@@ -101,8 +95,7 @@ static inline int cvm_oct_check_rcv_error(cvmx_wqe_t *work)
gmxx_rxx_frm_ctl.u64 =
cvmx_read_csr(CVMX_GMXX_RXX_FRM_CTL(index, interface));
if (gmxx_rxx_frm_ctl.s.pre_chk == 0) {
-
- uint8_t *ptr =
+ u8 *ptr =
cvmx_phys_to_ptr(work->packet_ptr.s.addr);
int i = 0;
@@ -114,17 +107,14 @@ static inline int cvm_oct_check_rcv_error(cvmx_wqe_t *work)
}
if (*ptr == 0xd5) {
- /*
- printk_ratelimited("Port %d received 0xd5 preamble\n",
- port);
- */
+ /* Port received 0xd5 preamble */
work->packet_ptr.s.addr += i + 1;
work->word1.len -= i + 5;
- } else if ((*ptr & 0xf) == 0xd) {
- /*
- printk_ratelimited("Port %d received 0x?d preamble\n",
- port);
- */
+ return 0;
+ }
+
+ if ((*ptr & 0xf) == 0xd) {
+ /* Port received 0xd preamble */
work->packet_ptr.s.addr += i;
work->word1.len -= i + 4;
for (i = 0; i < work->word1.len; i++) {
@@ -133,38 +123,67 @@ static inline int cvm_oct_check_rcv_error(cvmx_wqe_t *work)
((*(ptr + 1) & 0xf) << 4);
ptr++;
}
- } else {
- printk_ratelimited("Port %d unknown preamble, packet dropped\n",
- port);
- /*
- cvmx_helper_dump_packet(work);
- */
- cvm_oct_free_work(work);
- return 1;
+ return 0;
}
+
+ printk_ratelimited("Port %d unknown preamble, packet dropped\n",
+ port);
+ cvm_oct_free_work(work);
+ return 1;
}
- } else {
- printk_ratelimited("Port %d receive error code %d, packet dropped\n",
- port, work->word2.snoip.err_code);
- cvm_oct_free_work(work);
- return 1;
}
- return 0;
+ printk_ratelimited("Port %d receive error code %d, packet dropped\n",
+ port, work->word2.snoip.err_code);
+ cvm_oct_free_work(work);
+ return 1;
}
-/**
- * cvm_oct_napi_poll - the NAPI poll function.
- * @napi: The NAPI instance, or null if called from cvm_oct_poll_controller
- * @budget: Maximum number of packets to receive.
- *
- * Returns the number of packets processed.
- */
-static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
+static void copy_segments_to_skb(struct cvmx_wqe *work, struct sk_buff *skb)
+{
+ int segments = work->word2.s.bufs;
+ union cvmx_buf_ptr segment_ptr = work->packet_ptr;
+ int len = work->word1.len;
+ int segment_size;
+
+ while (segments--) {
+ union cvmx_buf_ptr next_ptr;
+
+ next_ptr = *(union cvmx_buf_ptr *)
+ cvmx_phys_to_ptr(segment_ptr.s.addr - 8);
+
+ /*
+ * Octeon Errata PKI-100: The segment size is wrong.
+ *
+ * Until it is fixed, calculate the segment size based on
+ * the packet pool buffer size.
+ * When it is fixed, the following line should be replaced
+ * with this one:
+ * int segment_size = segment_ptr.s.size;
+ */
+ segment_size =
+ CVMX_FPA_PACKET_POOL_SIZE -
+ (segment_ptr.s.addr -
+ (((segment_ptr.s.addr >> 7) -
+ segment_ptr.s.back) << 7));
+
+ /* Don't copy more than what is left in the packet */
+ if (segment_size > len)
+ segment_size = len;
+
+ /* Copy the data into the packet */
+ skb_put_data(skb, cvmx_phys_to_ptr(segment_ptr.s.addr),
+ segment_size);
+ len -= segment_size;
+ segment_ptr = next_ptr;
+ }
+}
+
+static int cvm_oct_poll(struct oct_rx_group *rx_group, int budget)
{
const int coreid = cvmx_get_core_num();
- uint64_t old_group_mask;
- uint64_t old_scratch;
+ u64 old_group_mask;
+ u64 old_scratch;
int rx_count = 0;
int did_work_request = 0;
int packet_not_copied;
@@ -182,12 +201,13 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
if (OCTEON_IS_MODEL(OCTEON_CN68XX)) {
old_group_mask = cvmx_read_csr(CVMX_SSO_PPX_GRP_MSK(coreid));
cvmx_write_csr(CVMX_SSO_PPX_GRP_MSK(coreid),
- 1ull << pow_receive_group);
+ BIT(rx_group->group));
cvmx_read_csr(CVMX_SSO_PPX_GRP_MSK(coreid)); /* Flush */
} else {
old_group_mask = cvmx_read_csr(CVMX_POW_PP_GRP_MSKX(coreid));
cvmx_write_csr(CVMX_POW_PP_GRP_MSKX(coreid),
- (old_group_mask & ~0xFFFFull) | 1 << pow_receive_group);
+ (old_group_mask & ~0xFFFFull) |
+ BIT(rx_group->group));
}
if (USE_ASYNC_IOBDMA) {
@@ -199,7 +219,7 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
struct sk_buff *skb = NULL;
struct sk_buff **pskb = NULL;
int skb_in_hw;
- cvmx_wqe_t *work;
+ struct cvmx_wqe *work;
int port;
if (USE_ASYNC_IOBDMA && did_work_request)
@@ -209,23 +229,24 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
prefetch(work);
did_work_request = 0;
- if (work == NULL) {
+ if (!work) {
if (OCTEON_IS_MODEL(OCTEON_CN68XX)) {
cvmx_write_csr(CVMX_SSO_WQ_IQ_DIS,
- 1ull << pow_receive_group);
+ BIT(rx_group->group));
cvmx_write_csr(CVMX_SSO_WQ_INT,
- 1ull << pow_receive_group);
+ BIT(rx_group->group));
} else {
union cvmx_pow_wq_int wq_int;
wq_int.u64 = 0;
- wq_int.s.iq_dis = 1 << pow_receive_group;
- wq_int.s.wq_int = 1 << pow_receive_group;
+ wq_int.s.iq_dis = BIT(rx_group->group);
+ wq_int.s.wq_int = BIT(rx_group->group);
cvmx_write_csr(CVMX_POW_WQ_INT, wq_int.u64);
}
break;
}
- pskb = (struct sk_buff **)(cvm_oct_get_buffer_ptr(work->packet_ptr) -
+ pskb = (struct sk_buff **)
+ (cvm_oct_get_buffer_ptr(work->packet_ptr) -
sizeof(void *));
prefetch(pskb);
@@ -284,7 +305,7 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
* entirely stored in the work entry.
*/
if (unlikely(work->word2.s.bufs == 0)) {
- uint8_t *ptr = work->packet_data;
+ u8 *ptr = work->packet_data;
if (likely(!work->word2.s.not_IP)) {
/*
@@ -296,53 +317,16 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
else
ptr += 6;
}
- memcpy(skb_put(skb, work->word1.len), ptr,
- work->word1.len);
+ skb_put_data(skb, ptr, work->word1.len);
/* No packet buffers to free */
} else {
- int segments = work->word2.s.bufs;
- union cvmx_buf_ptr segment_ptr =
- work->packet_ptr;
- int len = work->word1.len;
-
- while (segments--) {
- union cvmx_buf_ptr next_ptr =
- *(union cvmx_buf_ptr *)cvmx_phys_to_ptr(segment_ptr.s.addr - 8);
-
- /*
- * Octeon Errata PKI-100: The segment size is
- * wrong. Until it is fixed, calculate the
- * segment size based on the packet pool
- * buffer size. When it is fixed, the
- * following line should be replaced with this
- * one: int segment_size =
- * segment_ptr.s.size;
- */
- int segment_size =
- CVMX_FPA_PACKET_POOL_SIZE -
- (segment_ptr.s.addr -
- (((segment_ptr.s.addr >> 7) -
- segment_ptr.s.back) << 7));
- /*
- * Don't copy more than what
- * is left in the packet.
- */
- if (segment_size > len)
- segment_size = len;
- /* Copy the data into the packet */
- memcpy(skb_put(skb, segment_size),
- cvmx_phys_to_ptr(segment_ptr.s.addr),
- segment_size);
- len -= segment_size;
- segment_ptr = next_ptr;
- }
+ copy_segments_to_skb(work, skb);
}
packet_not_copied = 0;
}
if (likely((port < TOTAL_NUMBER_OF_PORTS) &&
cvm_oct_device[port])) {
struct net_device *dev = cvm_oct_device[port];
- struct octeon_ethernet *priv = netdev_priv(dev);
/*
* Only accept packets for devices that are
@@ -362,32 +346,16 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
/* Increment RX stats for virtual ports */
if (port >= CVMX_PIP_NUM_INPUT_PORTS) {
-#ifdef CONFIG_64BIT
- atomic64_add(1,
- (atomic64_t *)&priv->stats.rx_packets);
- atomic64_add(skb->len,
- (atomic64_t *)&priv->stats.rx_bytes);
-#else
- atomic_add(1,
- (atomic_t *)&priv->stats.rx_packets);
- atomic_add(skb->len,
- (atomic_t *)&priv->stats.rx_bytes);
-#endif
+ dev->stats.rx_packets++;
+ dev->stats.rx_bytes += skb->len;
}
netif_receive_skb(skb);
} else {
- /* Drop any packet received for a device that isn't up */
/*
- printk_ratelimited("%s: Device not up, packet dropped\n",
- dev->name);
- */
-#ifdef CONFIG_64BIT
- atomic64_add(1,
- (atomic64_t *)&priv->stats.rx_dropped);
-#else
- atomic_add(1,
- (atomic_t *)&priv->stats.rx_dropped);
-#endif
+ * Drop any packet received for a device that
+ * isn't up.
+ */
+ dev->stats.rx_dropped++;
dev_kfree_skb_irq(skb);
}
} else {
@@ -396,7 +364,7 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
* doesn't exist.
*/
printk_ratelimited("Port %d not controlled by Linux, packet dropped\n",
- port);
+ port);
dev_kfree_skb_irq(skb);
}
/*
@@ -431,10 +399,28 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
}
cvm_oct_rx_refill_pool(0);
- if (rx_count < budget && napi != NULL) {
+ return rx_count;
+}
+
+/**
+ * cvm_oct_napi_poll - the NAPI poll function.
+ * @napi: The NAPI instance.
+ * @budget: Maximum number of packets to receive.
+ *
+ * Returns the number of packets processed.
+ */
+static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
+{
+ struct oct_rx_group *rx_group = container_of(napi, struct oct_rx_group,
+ napi);
+ int rx_count;
+
+ rx_count = cvm_oct_poll(rx_group, budget);
+
+ if (rx_count < budget) {
/* No more work */
- napi_complete(napi);
- enable_irq(OCTEON_IRQ_WORKQ0 + pow_receive_group);
+ napi_complete_done(napi, rx_count);
+ enable_irq(rx_group->irq);
}
return rx_count;
}
@@ -448,7 +434,17 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
*/
void cvm_oct_poll_controller(struct net_device *dev)
{
- cvm_oct_napi_poll(NULL, 16);
+ int i;
+
+ if (!atomic_read(&oct_rx_ready))
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(oct_rx_group); i++) {
+ if (!(pow_receive_groups & BIT(i)))
+ continue;
+
+ cvm_oct_poll(&oct_rx_group[i], 16);
+ }
}
#endif
@@ -464,57 +460,83 @@ void cvm_oct_rx_initialize(void)
}
}
- if (NULL == dev_for_napi)
+ if (!dev_for_napi)
panic("No net_devices were allocated.");
- netif_napi_add(dev_for_napi, &cvm_oct_napi, cvm_oct_napi_poll,
- rx_napi_weight);
- napi_enable(&cvm_oct_napi);
+ for (i = 0; i < ARRAY_SIZE(oct_rx_group); i++) {
+ int ret;
- /* Register an IRQ handler to receive POW interrupts */
- i = request_irq(OCTEON_IRQ_WORKQ0 + pow_receive_group,
- cvm_oct_do_interrupt, 0, "Ethernet", cvm_oct_device);
+ if (!(pow_receive_groups & BIT(i)))
+ continue;
- if (i)
- panic("Could not acquire Ethernet IRQ %d\n",
- OCTEON_IRQ_WORKQ0 + pow_receive_group);
+ netif_napi_add_weight(dev_for_napi, &oct_rx_group[i].napi,
+ cvm_oct_napi_poll, rx_napi_weight);
+ napi_enable(&oct_rx_group[i].napi);
- disable_irq_nosync(OCTEON_IRQ_WORKQ0 + pow_receive_group);
+ oct_rx_group[i].irq = OCTEON_IRQ_WORKQ0 + i;
+ oct_rx_group[i].group = i;
- /* Enable POW interrupt when our port has at least one packet */
- if (OCTEON_IS_MODEL(OCTEON_CN68XX)) {
- union cvmx_sso_wq_int_thrx int_thr;
- union cvmx_pow_wq_int_pc int_pc;
-
- int_thr.u64 = 0;
- int_thr.s.tc_en = 1;
- int_thr.s.tc_thr = 1;
- cvmx_write_csr(CVMX_SSO_WQ_INT_THRX(pow_receive_group),
- int_thr.u64);
-
- int_pc.u64 = 0;
- int_pc.s.pc_thr = 5;
- cvmx_write_csr(CVMX_SSO_WQ_INT_PC, int_pc.u64);
- } else {
- union cvmx_pow_wq_int_thrx int_thr;
- union cvmx_pow_wq_int_pc int_pc;
-
- int_thr.u64 = 0;
- int_thr.s.tc_en = 1;
- int_thr.s.tc_thr = 1;
- cvmx_write_csr(CVMX_POW_WQ_INT_THRX(pow_receive_group),
- int_thr.u64);
-
- int_pc.u64 = 0;
- int_pc.s.pc_thr = 5;
- cvmx_write_csr(CVMX_POW_WQ_INT_PC, int_pc.u64);
- }
+ /* Register an IRQ handler to receive POW interrupts */
+ ret = request_irq(oct_rx_group[i].irq, cvm_oct_do_interrupt, 0,
+ "Ethernet", &oct_rx_group[i].napi);
+ if (ret)
+ panic("Could not acquire Ethernet IRQ %d\n",
+ oct_rx_group[i].irq);
+
+ disable_irq_nosync(oct_rx_group[i].irq);
+
+ /* Enable POW interrupt when our port has at least one packet */
+ if (OCTEON_IS_MODEL(OCTEON_CN68XX)) {
+ union cvmx_sso_wq_int_thrx int_thr;
+ union cvmx_pow_wq_int_pc int_pc;
- /* Schedule NAPI now. This will indirectly enable the interrupt. */
- napi_schedule(&cvm_oct_napi);
+ int_thr.u64 = 0;
+ int_thr.s.tc_en = 1;
+ int_thr.s.tc_thr = 1;
+ cvmx_write_csr(CVMX_SSO_WQ_INT_THRX(i), int_thr.u64);
+
+ int_pc.u64 = 0;
+ int_pc.s.pc_thr = 5;
+ cvmx_write_csr(CVMX_SSO_WQ_INT_PC, int_pc.u64);
+ } else {
+ union cvmx_pow_wq_int_thrx int_thr;
+ union cvmx_pow_wq_int_pc int_pc;
+
+ int_thr.u64 = 0;
+ int_thr.s.tc_en = 1;
+ int_thr.s.tc_thr = 1;
+ cvmx_write_csr(CVMX_POW_WQ_INT_THRX(i), int_thr.u64);
+
+ int_pc.u64 = 0;
+ int_pc.s.pc_thr = 5;
+ cvmx_write_csr(CVMX_POW_WQ_INT_PC, int_pc.u64);
+ }
+
+ /* Schedule NAPI now. This will indirectly enable the
+ * interrupt.
+ */
+ napi_schedule(&oct_rx_group[i].napi);
+ }
+ atomic_inc(&oct_rx_ready);
}
void cvm_oct_rx_shutdown(void)
{
- netif_napi_del(&cvm_oct_napi);
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(oct_rx_group); i++) {
+ if (!(pow_receive_groups & BIT(i)))
+ continue;
+
+ /* Disable POW interrupt */
+ if (OCTEON_IS_MODEL(OCTEON_CN68XX))
+ cvmx_write_csr(CVMX_SSO_WQ_INT_THRX(i), 0);
+ else
+ cvmx_write_csr(CVMX_POW_WQ_INT_THRX(i), 0);
+
+ /* Free the interrupt handler */
+ free_irq(oct_rx_group[i].irq, cvm_oct_device);
+
+ netif_napi_del(&oct_rx_group[i].napi);
+ }
}
diff --git a/drivers/staging/octeon/ethernet-rx.h b/drivers/staging/octeon/ethernet-rx.h
index a5973fd015fc..ff6482fa20d6 100644
--- a/drivers/staging/octeon/ethernet-rx.h
+++ b/drivers/staging/octeon/ethernet-rx.h
@@ -1,15 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* This file is based on code from OCTEON SDK by Cavium Networks.
*
* Copyright (c) 2003-2007 Cavium Networks
- *
- * This file is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, Version 2, as
- * published by the Free Software Foundation.
*/
-#include <asm/octeon/cvmx-fau.h>
-
void cvm_oct_poll_controller(struct net_device *dev);
void cvm_oct_rx_initialize(void);
void cvm_oct_rx_shutdown(void);
@@ -30,7 +25,7 @@ static inline void cvm_oct_rx_refill_pool(int fill_threshold)
number_to_free);
if (num_freed != number_to_free) {
cvmx_fau_atomic_add32(FAU_NUM_PACKET_BUFFERS_TO_FREE,
- number_to_free - num_freed);
+ number_to_free - num_freed);
}
}
}
diff --git a/drivers/staging/octeon/ethernet-sgmii.c b/drivers/staging/octeon/ethernet-sgmii.c
index 8bceb769166c..d7fbd9159302 100644
--- a/drivers/staging/octeon/ethernet-sgmii.c
+++ b/drivers/staging/octeon/ethernet-sgmii.c
@@ -1,11 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* This file is based on code from OCTEON SDK by Cavium Networks.
*
* Copyright (c) 2003-2007 Cavium Networks
- *
- * This file is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, Version 2, as
- * published by the Free Software Foundation.
*/
#include <linux/phy.h>
@@ -14,20 +11,14 @@
#include <linux/ratelimit.h>
#include <net/dst.h>
-#include <asm/octeon/octeon.h>
-
-#include "ethernet-defines.h"
#include "octeon-ethernet.h"
+#include "ethernet-defines.h"
#include "ethernet-util.h"
#include "ethernet-mdio.h"
-#include <asm/octeon/cvmx-helper.h>
-
-#include <asm/octeon/cvmx-gmxx-defs.h>
-
int cvm_oct_sgmii_open(struct net_device *dev)
{
- return cvm_oct_common_open(dev, cvm_oct_link_poll, true);
+ return cvm_oct_common_open(dev, cvm_oct_link_poll);
}
int cvm_oct_sgmii_init(struct net_device *dev)
diff --git a/drivers/staging/octeon/ethernet-spi.c b/drivers/staging/octeon/ethernet-spi.c
index 2ae1944b3a1b..699c98c5ec13 100644
--- a/drivers/staging/octeon/ethernet-spi.c
+++ b/drivers/staging/octeon/ethernet-spi.c
@@ -1,11 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* This file is based on code from OCTEON SDK by Cavium Networks.
*
* Copyright (c) 2003-2007 Cavium Networks
- *
- * This file is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, Version 2, as
- * published by the Free Software Foundation.
*/
#include <linux/kernel.h>
@@ -13,18 +10,10 @@
#include <linux/interrupt.h>
#include <net/dst.h>
-#include <asm/octeon/octeon.h>
-
-#include "ethernet-defines.h"
#include "octeon-ethernet.h"
+#include "ethernet-defines.h"
#include "ethernet-util.h"
-#include <asm/octeon/cvmx-spi.h>
-
-#include <asm/octeon/cvmx-npi-defs.h>
-#include <asm/octeon/cvmx-spxx-defs.h>
-#include <asm/octeon/cvmx-stxx-defs.h>
-
static int number_spi_ports;
static int need_retrain[2] = { 0, 0 };
@@ -167,9 +156,7 @@ static void cvm_oct_spi_poll(struct net_device *dev)
int interface;
for (interface = 0; interface < 2; interface++) {
-
if ((priv->port == interface * 16) && need_retrain[interface]) {
-
if (cvmx_spi_restart_interface
(interface, CVMX_SPI_MODE_DUPLEX, 10) == 0) {
need_retrain[interface] = 0;
@@ -215,7 +202,7 @@ int cvm_oct_spi_init(struct net_device *dev)
}
number_spi_ports++;
- if ((priv->port == 0) || (priv->port == 16)) {
+ if (priv->port == 0 || priv->port == 16) {
cvm_oct_spi_enable_error_reporting(INTERFACE(priv->port));
priv->poll = cvm_oct_spi_poll;
}
diff --git a/drivers/staging/octeon/ethernet-tx.c b/drivers/staging/octeon/ethernet-tx.c
index 9e2116f4c915..f5bbedac6a65 100644
--- a/drivers/staging/octeon/ethernet-tx.c
+++ b/drivers/staging/octeon/ethernet-tx.c
@@ -1,11 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* This file is based on code from OCTEON SDK by Cavium Networks.
*
* Copyright (c) 2003-2010 Cavium Networks
- *
- * This file is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, Version 2, as
- * published by the Free Software Foundation.
*/
#include <linux/module.h>
@@ -23,22 +20,13 @@
#endif /* CONFIG_XFRM */
#include <linux/atomic.h>
+#include <net/sch_generic.h>
-#include <asm/octeon/octeon.h>
-
-#include "ethernet-defines.h"
#include "octeon-ethernet.h"
+#include "ethernet-defines.h"
#include "ethernet-tx.h"
#include "ethernet-util.h"
-#include <asm/octeon/cvmx-wqe.h>
-#include <asm/octeon/cvmx-fau.h>
-#include <asm/octeon/cvmx-pip.h>
-#include <asm/octeon/cvmx-pko.h>
-#include <asm/octeon/cvmx-helper.h>
-
-#include <asm/octeon/cvmx-gmxx-defs.h>
-
#define CVM_OCT_SKB_CB(skb) ((u64 *)((skb)->cb))
/*
@@ -52,15 +40,15 @@
#define GET_SKBUFF_QOS(skb) 0
#endif
-static void cvm_oct_tx_do_cleanup(unsigned long arg);
-static DECLARE_TASKLET(cvm_oct_tx_cleanup_tasklet, cvm_oct_tx_do_cleanup, 0);
+static void cvm_oct_tx_do_cleanup(struct tasklet_struct *clean);
+static DECLARE_TASKLET(cvm_oct_tx_cleanup_tasklet, cvm_oct_tx_do_cleanup);
/* Maximum number of SKBs to try to free per xmit packet. */
#define MAX_SKB_TO_FREE (MAX_OUT_QUEUE_DEPTH * 2)
-static inline int32_t cvm_oct_adjust_skb_to_free(int32_t skb_to_free, int fau)
+static inline int cvm_oct_adjust_skb_to_free(int skb_to_free, int fau)
{
- int32_t undo;
+ int undo;
undo = skb_to_free > 0 ? MAX_SKB_TO_FREE : skb_to_free +
MAX_SKB_TO_FREE;
@@ -83,9 +71,8 @@ static void cvm_oct_kick_tx_poll_watchdog(void)
static void cvm_oct_free_tx_skbs(struct net_device *dev)
{
- int32_t skb_to_free;
+ int skb_to_free;
int qos, queues_per_port;
- int total_freed = 0;
int total_remaining = 0;
unsigned long flags;
struct octeon_ethernet *priv = netdev_priv(dev);
@@ -95,13 +82,10 @@ static void cvm_oct_free_tx_skbs(struct net_device *dev)
for (qos = 0; qos < queues_per_port; qos++) {
if (skb_queue_len(&priv->tx_free_list[qos]) == 0)
continue;
- skb_to_free = cvmx_fau_fetch_and_add32(priv->fau+qos*4,
+ skb_to_free = cvmx_fau_fetch_and_add32(priv->fau + qos * 4,
MAX_SKB_TO_FREE);
skb_to_free = cvm_oct_adjust_skb_to_free(skb_to_free,
- priv->fau+qos*4);
-
-
- total_freed += skb_to_free;
+ priv->fau + qos * 4);
if (skb_to_free > 0) {
struct sk_buff *to_free_list = NULL;
@@ -126,7 +110,7 @@ static void cvm_oct_free_tx_skbs(struct net_device *dev)
}
total_remaining += skb_queue_len(&priv->tx_free_list[qos]);
}
- if (total_freed >= 0 && netif_queue_stopped(dev))
+ if (total_remaining < MAX_OUT_QUEUE_DEPTH && netif_queue_stopped(dev))
netif_wake_queue(dev);
if (total_remaining)
cvm_oct_kick_tx_poll_watchdog();
@@ -139,19 +123,19 @@ static void cvm_oct_free_tx_skbs(struct net_device *dev)
*
* Returns Always returns NETDEV_TX_OK
*/
-int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev)
+netdev_tx_t cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev)
{
- cvmx_pko_command_word0_t pko_command;
+ union cvmx_pko_command_word0 pko_command;
union cvmx_buf_ptr hw_buffer;
- uint64_t old_scratch;
- uint64_t old_scratch2;
+ u64 old_scratch;
+ u64 old_scratch2;
int qos;
int i;
enum {QUEUE_CORE, QUEUE_HW, QUEUE_DROP} queue_type;
struct octeon_ethernet *priv = netdev_priv(dev);
struct sk_buff *to_free_list;
- int32_t skb_to_free;
- int32_t buffers_to_free;
+ int skb_to_free;
+ int buffers_to_free;
u32 total_to_clean;
unsigned long flags;
#if REUSE_SKBUFFS_WITHOUT_FREE
@@ -176,8 +160,9 @@ int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev)
qos = 0;
else if (qos >= cvmx_pko_get_num_queues(priv->port))
qos = 0;
- } else
+ } else {
qos = 0;
+ }
if (USE_ASYNC_IOBDMA) {
/* Save scratch in case userspace is using it */
@@ -217,11 +202,14 @@ int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev)
* Get the number of skbuffs in use
* by the hardware
*/
- skb_to_free = cvmx_fau_fetch_and_add32(
- priv->fau + qos * 4, MAX_SKB_TO_FREE);
+ skb_to_free =
+ cvmx_fau_fetch_and_add32(priv->fau +
+ qos * 4,
+ MAX_SKB_TO_FREE);
}
skb_to_free = cvm_oct_adjust_skb_to_free(skb_to_free,
- priv->fau + qos * 4);
+ priv->fau +
+ qos * 4);
spin_lock_irqsave(&priv->tx_free_list[qos].lock, flags);
goto skip_xmit;
}
@@ -250,8 +238,7 @@ int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev)
if ((skb_tail_pointer(skb) + add_bytes) <=
skb_end_pointer(skb))
- memset(__skb_put(skb, add_bytes), 0,
- add_bytes);
+ __skb_put_zero(skb, add_bytes);
}
}
}
@@ -272,24 +259,24 @@ int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev)
/* Build the PKO buffer pointer */
hw_buffer.u64 = 0;
if (skb_shinfo(skb)->nr_frags == 0) {
- hw_buffer.s.addr = XKPHYS_TO_PHYS((u64)skb->data);
+ hw_buffer.s.addr = XKPHYS_TO_PHYS((uintptr_t)skb->data);
hw_buffer.s.pool = 0;
hw_buffer.s.size = skb->len;
} else {
- hw_buffer.s.addr = XKPHYS_TO_PHYS((u64)skb->data);
+ hw_buffer.s.addr = XKPHYS_TO_PHYS((uintptr_t)skb->data);
hw_buffer.s.pool = 0;
hw_buffer.s.size = skb_headlen(skb);
CVM_OCT_SKB_CB(skb)[0] = hw_buffer.u64;
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
- struct skb_frag_struct *fs = skb_shinfo(skb)->frags + i;
+ skb_frag_t *fs = skb_shinfo(skb)->frags + i;
- hw_buffer.s.addr = XKPHYS_TO_PHYS(
- (u64)(page_address(fs->page.p) +
- fs->page_offset));
- hw_buffer.s.size = fs->size;
+ hw_buffer.s.addr =
+ XKPHYS_TO_PHYS((uintptr_t)skb_frag_address(fs));
+ hw_buffer.s.size = skb_frag_size(fs);
CVM_OCT_SKB_CB(skb)[i + 1] = hw_buffer.u64;
}
- hw_buffer.s.addr = XKPHYS_TO_PHYS((u64)CVM_OCT_SKB_CB(skb));
+ hw_buffer.s.addr =
+ XKPHYS_TO_PHYS((uintptr_t)CVM_OCT_SKB_CB(skb));
hw_buffer.s.size = skb_shinfo(skb)->nr_frags + 1;
pko_command.s.segs = skb_shinfo(skb)->nr_frags + 1;
pko_command.s.gather = 1;
@@ -309,55 +296,38 @@ int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev)
#if REUSE_SKBUFFS_WITHOUT_FREE
fpa_head = skb->head + 256 - ((unsigned long)skb->head & 0x7f);
if (unlikely(skb->data < fpa_head)) {
- /*
- * printk("TX buffer beginning can't meet FPA
- * alignment constraints\n");
- */
+ /* TX buffer beginning can't meet FPA alignment constraints */
goto dont_put_skbuff_in_hw;
}
if (unlikely
((skb_end_pointer(skb) - fpa_head) < CVMX_FPA_PACKET_POOL_SIZE)) {
- /*
- printk("TX buffer isn't large enough for the FPA\n");
- */
+ /* TX buffer isn't large enough for the FPA */
goto dont_put_skbuff_in_hw;
}
if (unlikely(skb_shared(skb))) {
- /*
- printk("TX buffer sharing data with someone else\n");
- */
+ /* TX buffer sharing data with someone else */
goto dont_put_skbuff_in_hw;
}
if (unlikely(skb_cloned(skb))) {
- /*
- printk("TX buffer has been cloned\n");
- */
+ /* TX buffer has been cloned */
goto dont_put_skbuff_in_hw;
}
if (unlikely(skb_header_cloned(skb))) {
- /*
- printk("TX buffer header has been cloned\n");
- */
+ /* TX buffer header has been cloned */
goto dont_put_skbuff_in_hw;
}
if (unlikely(skb->destructor)) {
- /*
- printk("TX buffer has a destructor\n");
- */
+ /* TX buffer has a destructor */
goto dont_put_skbuff_in_hw;
}
if (unlikely(skb_shinfo(skb)->nr_frags)) {
- /*
- printk("TX buffer has fragments\n");
- */
+ /* TX buffer has fragments */
goto dont_put_skbuff_in_hw;
}
if (unlikely
(skb->truesize !=
sizeof(*skb) + skb_end_offset(skb))) {
- /*
- printk("TX buffer truesize has been changed\n");
- */
+ /* TX buffer truesize has been changed */
goto dont_put_skbuff_in_hw;
}
@@ -376,19 +346,13 @@ int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev)
* The skbuff will be reused without ever being freed. We must
* cleanup a bunch of core things.
*/
- dst_release(skb_dst(skb));
- skb_dst_set(skb, NULL);
-#ifdef CONFIG_XFRM
- secpath_put(skb->sp);
- skb->sp = NULL;
-#endif
- nf_reset(skb);
+ skb_dst_drop(skb);
+ skb_ext_reset(skb);
+ nf_reset_ct(skb);
+ skb_reset_redirect(skb);
#ifdef CONFIG_NET_SCHED
skb->tc_index = 0;
-#ifdef CONFIG_NET_CLS_ACT
- skb->tc_verd = 0;
-#endif /* CONFIG_NET_CLS_ACT */
#endif /* CONFIG_NET_SCHED */
#endif /* REUSE_SKBUFFS_WITHOUT_FREE */
@@ -403,7 +367,7 @@ dont_put_skbuff_in_hw:
((ip_hdr(skb)->protocol == IPPROTO_TCP) ||
(ip_hdr(skb)->protocol == IPPROTO_UDP))) {
/* Use hardware checksum calc */
- pko_command.s.ipoffp1 = sizeof(struct ethhdr) + 1;
+ pko_command.s.ipoffp1 = skb_network_offset(skb) + 1;
}
if (USE_ASYNC_IOBDMA) {
@@ -419,7 +383,8 @@ dont_put_skbuff_in_hw:
cvmx_fau_fetch_and_add32(FAU_NUM_PACKET_BUFFERS_TO_FREE, 0);
}
- skb_to_free = cvm_oct_adjust_skb_to_free(skb_to_free, priv->fau+qos*4);
+ skb_to_free = cvm_oct_adjust_skb_to_free(skb_to_free,
+ priv->fau + qos * 4);
/*
* If we're sending faster than the receive can free them then
@@ -430,20 +395,19 @@ dont_put_skbuff_in_hw:
if (pko_command.s.dontfree) {
queue_type = QUEUE_CORE;
- pko_command.s.reg0 = priv->fau+qos*4;
+ pko_command.s.reg0 = priv->fau + qos * 4;
} else {
queue_type = QUEUE_HW;
}
if (USE_ASYNC_IOBDMA)
- cvmx_fau_async_fetch_and_add32(
- CVMX_SCR_SCRATCH, FAU_TOTAL_TX_TO_CLEAN, 1);
+ cvmx_fau_async_fetch_and_add32(CVMX_SCR_SCRATCH,
+ FAU_TOTAL_TX_TO_CLEAN, 1);
spin_lock_irqsave(&priv->tx_free_list[qos].lock, flags);
/* Drop this packet if we have too many already queued to the HW */
if (unlikely(skb_queue_len(&priv->tx_free_list[qos]) >=
MAX_OUT_QUEUE_DEPTH)) {
-
if (dev->tx_queue_len != 0) {
/* Drop the lock when notifying the core. */
spin_unlock_irqrestore(&priv->tx_free_list[qos].lock,
@@ -477,7 +441,7 @@ skip_xmit:
case QUEUE_DROP:
skb->next = to_free_list;
to_free_list = skb;
- priv->stats.tx_dropped++;
+ dev->stats.tx_dropped++;
break;
case QUEUE_HW:
cvmx_fau_atomic_add32(FAU_NUM_PACKET_BUFFERS_TO_FREE, -1);
@@ -514,8 +478,8 @@ skip_xmit:
cvmx_scratch_write64(CVMX_SCR_SCRATCH, old_scratch);
cvmx_scratch_write64(CVMX_SCR_SCRATCH + 8, old_scratch2);
} else {
- total_to_clean = cvmx_fau_fetch_and_add32(
- FAU_TOTAL_TX_TO_CLEAN, 1);
+ total_to_clean =
+ cvmx_fau_fetch_and_add32(FAU_TOTAL_TX_TO_CLEAN, 1);
}
if (total_to_clean & 0x3ff) {
@@ -537,33 +501,32 @@ skip_xmit:
* cvm_oct_xmit_pow - transmit a packet to the POW
* @skb: Packet to send
* @dev: Device info structure
-
* Returns Always returns zero
*/
-int cvm_oct_xmit_pow(struct sk_buff *skb, struct net_device *dev)
+netdev_tx_t cvm_oct_xmit_pow(struct sk_buff *skb, struct net_device *dev)
{
struct octeon_ethernet *priv = netdev_priv(dev);
void *packet_buffer;
void *copy_location;
/* Get a work queue entry */
- cvmx_wqe_t *work = cvmx_fpa_alloc(CVMX_FPA_WQE_POOL);
+ struct cvmx_wqe *work = cvmx_fpa_alloc(CVMX_FPA_WQE_POOL);
- if (unlikely(work == NULL)) {
+ if (unlikely(!work)) {
printk_ratelimited("%s: Failed to allocate a work queue entry\n",
dev->name);
- priv->stats.tx_dropped++;
+ dev->stats.tx_dropped++;
dev_kfree_skb_any(skb);
return 0;
}
/* Get a packet buffer */
packet_buffer = cvmx_fpa_alloc(CVMX_FPA_PACKET_POOL);
- if (unlikely(packet_buffer == NULL)) {
+ if (unlikely(!packet_buffer)) {
printk_ratelimited("%s: Failed to allocate a packet buffer\n",
dev->name);
cvmx_fpa_free(work, CVMX_FPA_WQE_POOL, 1);
- priv->stats.tx_dropped++;
+ dev->stats.tx_dropped++;
dev_kfree_skb_any(skb);
return 0;
}
@@ -576,7 +539,7 @@ int cvm_oct_xmit_pow(struct sk_buff *skb, struct net_device *dev)
* calculation may add a little extra, but that doesn't
* hurt.
*/
- copy_location = packet_buffer + sizeof(uint64_t);
+ copy_location = packet_buffer + sizeof(u64);
copy_location += ((CVMX_HELPER_FIRST_MBUFF_SKIP + 7) & 0xfff8) + 6;
/*
@@ -610,42 +573,14 @@ int cvm_oct_xmit_pow(struct sk_buff *skb, struct net_device *dev)
if (skb->protocol == htons(ETH_P_IP)) {
work->word2.s.ip_offset = 14;
-#if 0
- work->word2.s.vlan_valid = 0; /* FIXME */
- work->word2.s.vlan_cfi = 0; /* FIXME */
- work->word2.s.vlan_id = 0; /* FIXME */
- work->word2.s.dec_ipcomp = 0; /* FIXME */
-#endif
work->word2.s.tcp_or_udp =
- (ip_hdr(skb)->protocol == IPPROTO_TCP)
- || (ip_hdr(skb)->protocol == IPPROTO_UDP);
-#if 0
- /* FIXME */
- work->word2.s.dec_ipsec = 0;
- /* We only support IPv4 right now */
- work->word2.s.is_v6 = 0;
- /* Hardware would set to zero */
- work->word2.s.software = 0;
- /* No error, packet is internal */
- work->word2.s.L4_error = 0;
-#endif
- work->word2.s.is_frag = !((ip_hdr(skb)->frag_off == 0)
- || (ip_hdr(skb)->frag_off ==
- 1 << 14));
-#if 0
- /* Assume Linux is sending a good packet */
- work->word2.s.IP_exc = 0;
-#endif
+ (ip_hdr(skb)->protocol == IPPROTO_TCP) ||
+ (ip_hdr(skb)->protocol == IPPROTO_UDP);
+ work->word2.s.is_frag = !((ip_hdr(skb)->frag_off == 0) ||
+ (ip_hdr(skb)->frag_off ==
+ cpu_to_be16(1 << 14)));
work->word2.s.is_bcast = (skb->pkt_type == PACKET_BROADCAST);
work->word2.s.is_mcast = (skb->pkt_type == PACKET_MULTICAST);
-#if 0
- /* This is an IP packet */
- work->word2.s.not_IP = 0;
- /* No error, packet is internal */
- work->word2.s.rcv_error = 0;
- /* No error, packet is internal */
- work->word2.s.err_code = 0;
-#endif
/*
* When copying the data, include 4 bytes of the
@@ -655,12 +590,6 @@ int cvm_oct_xmit_pow(struct sk_buff *skb, struct net_device *dev)
memcpy(work->packet_data, skb->data + 10,
sizeof(work->packet_data));
} else {
-#if 0
- work->word2.snoip.vlan_valid = 0; /* FIXME */
- work->word2.snoip.vlan_cfi = 0; /* FIXME */
- work->word2.snoip.vlan_id = 0; /* FIXME */
- work->word2.snoip.software = 0; /* Hardware would set to zero */
-#endif
work->word2.snoip.is_rarp = skb->protocol == htons(ETH_P_RARP);
work->word2.snoip.is_arp = skb->protocol == htons(ETH_P_ARP);
work->word2.snoip.is_bcast =
@@ -668,20 +597,14 @@ int cvm_oct_xmit_pow(struct sk_buff *skb, struct net_device *dev)
work->word2.snoip.is_mcast =
(skb->pkt_type == PACKET_MULTICAST);
work->word2.snoip.not_IP = 1; /* IP was done up above */
-#if 0
- /* No error, packet is internal */
- work->word2.snoip.rcv_error = 0;
- /* No error, packet is internal */
- work->word2.snoip.err_code = 0;
-#endif
memcpy(work->packet_data, skb->data, sizeof(work->packet_data));
}
/* Submit the packet to the POW */
cvmx_pow_work_submit(work, work->word1.tag, work->word1.tag_type,
cvmx_wqe_get_qos(work), cvmx_wqe_get_grp(work));
- priv->stats.tx_packets++;
- priv->stats.tx_bytes += skb->len;
+ dev->stats.tx_packets++;
+ dev->stats.tx_bytes += skb->len;
dev_consume_skb_any(skb);
return 0;
}
@@ -706,7 +629,7 @@ void cvm_oct_tx_shutdown_dev(struct net_device *dev)
}
}
-static void cvm_oct_tx_do_cleanup(unsigned long arg)
+static void cvm_oct_tx_do_cleanup(struct tasklet_struct *clean)
{
int port;
diff --git a/drivers/staging/octeon/ethernet-tx.h b/drivers/staging/octeon/ethernet-tx.h
index 84848e4c1664..6c524668f65a 100644
--- a/drivers/staging/octeon/ethernet-tx.h
+++ b/drivers/staging/octeon/ethernet-tx.h
@@ -1,15 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* This file is based on code from OCTEON SDK by Cavium Networks.
*
* Copyright (c) 2003-2007 Cavium Networks
- *
- * This file is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, Version 2, as
- * published by the Free Software Foundation.
*/
-int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev);
-int cvm_oct_xmit_pow(struct sk_buff *skb, struct net_device *dev);
+netdev_tx_t cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev);
+netdev_tx_t cvm_oct_xmit_pow(struct sk_buff *skb, struct net_device *dev);
int cvm_oct_transmit_qos(struct net_device *dev, void *work_queue_entry,
int do_free, int qos);
void cvm_oct_tx_initialize(void);
diff --git a/drivers/staging/octeon/ethernet-util.h b/drivers/staging/octeon/ethernet-util.h
index 45f024bc5e33..2af83a12ca78 100644
--- a/drivers/staging/octeon/ethernet-util.h
+++ b/drivers/staging/octeon/ethernet-util.h
@@ -1,17 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* This file is based on code from OCTEON SDK by Cavium Networks.
*
* Copyright (c) 2003-2007 Cavium Networks
- *
- * This file is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, Version 2, as
- * published by the Free Software Foundation.
*/
-#include <asm/octeon/cvmx-pip.h>
-#include <asm/octeon/cvmx-helper.h>
-#include <asm/octeon/cvmx-helper-util.h>
-
/**
* cvm_oct_get_buffer_ptr - convert packet data address to pointer
* @packet_ptr: Packet data hardware address
@@ -32,13 +25,14 @@ static inline void *cvm_oct_get_buffer_ptr(union cvmx_buf_ptr packet_ptr)
*/
static inline int INTERFACE(int ipd_port)
{
- int interface = cvmx_helper_get_interface_num(ipd_port);
+ int interface;
+ if (ipd_port == CVMX_PIP_NUM_INPUT_PORTS)
+ return 10;
+ interface = cvmx_helper_get_interface_num(ipd_port);
if (interface >= 0)
return interface;
- else if (ipd_port == CVMX_PIP_NUM_INPUT_PORTS)
- return 10;
- panic("Illegal ipd_port %d passed to INTERFACE\n", ipd_port);
+ panic("Illegal ipd_port %d passed to %s\n", ipd_port, __func__);
}
/**
diff --git a/drivers/staging/octeon/ethernet-xaui.c b/drivers/staging/octeon/ethernet-xaui.c
deleted file mode 100644
index 4b47bcfaabb1..000000000000
--- a/drivers/staging/octeon/ethernet-xaui.c
+++ /dev/null
@@ -1,42 +0,0 @@
-/*
- * This file is based on code from OCTEON SDK by Cavium Networks.
- *
- * Copyright (c) 2003-2007 Cavium Networks
- *
- * This file is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, Version 2, as
- * published by the Free Software Foundation.
- */
-
-#include <linux/phy.h>
-#include <linux/kernel.h>
-#include <linux/netdevice.h>
-#include <linux/ratelimit.h>
-#include <net/dst.h>
-
-#include <asm/octeon/octeon.h>
-
-#include "ethernet-defines.h"
-#include "octeon-ethernet.h"
-#include "ethernet-util.h"
-#include "ethernet-mdio.h"
-
-#include <asm/octeon/cvmx-helper.h>
-
-#include <asm/octeon/cvmx-gmxx-defs.h>
-
-int cvm_oct_xaui_open(struct net_device *dev)
-{
- return cvm_oct_common_open(dev, cvm_oct_link_poll, true);
-}
-
-int cvm_oct_xaui_init(struct net_device *dev)
-{
- struct octeon_ethernet *priv = netdev_priv(dev);
-
- cvm_oct_common_init(dev);
- if (!octeon_is_simulation() && priv->phydev == NULL)
- priv->poll = cvm_oct_link_poll;
-
- return 0;
-}
diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c
index 7274fda0b77b..eadb74fc14c8 100644
--- a/drivers/staging/octeon/ethernet.c
+++ b/drivers/staging/octeon/ethernet.c
@@ -1,11 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* This file is based on code from OCTEON SDK by Cavium Networks.
*
* Copyright (c) 2003-2007 Cavium Networks
- *
- * This file is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, Version 2, as
- * published by the Free Software Foundation.
*/
#include <linux/platform_device.h>
@@ -16,28 +13,22 @@
#include <linux/phy.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
+#include <linux/of_mdio.h>
#include <linux/of_net.h>
+#include <linux/if_ether.h>
+#include <linux/if_vlan.h>
#include <net/dst.h>
-#include <asm/octeon/octeon.h>
-
-#include "ethernet-defines.h"
#include "octeon-ethernet.h"
+#include "ethernet-defines.h"
#include "ethernet-mem.h"
#include "ethernet-rx.h"
#include "ethernet-tx.h"
#include "ethernet-mdio.h"
#include "ethernet-util.h"
-#include <asm/octeon/cvmx-pip.h>
-#include <asm/octeon/cvmx-pko.h>
-#include <asm/octeon/cvmx-fau.h>
-#include <asm/octeon/cvmx-ipd.h>
-#include <asm/octeon/cvmx-helper.h>
-
-#include <asm/octeon/cvmx-gmxx-defs.h>
-#include <asm/octeon/cvmx-smix-defs.h>
+#define OCTEON_MAX_MTU 65392
static int num_packet_buffers = 1024;
module_param(num_packet_buffers, int, 0444);
@@ -45,7 +36,7 @@ MODULE_PARM_DESC(num_packet_buffers, "\n"
"\tNumber of packet buffers to allocate and store in the\n"
"\tFPA. By default, 1024 packet buffers are used.\n");
-int pow_receive_group = 15;
+static int pow_receive_group = 15;
module_param(pow_receive_group, int, 0444);
MODULE_PARM_DESC(pow_receive_group, "\n"
"\tPOW group to receive packets from. All ethernet hardware\n"
@@ -53,6 +44,15 @@ MODULE_PARM_DESC(pow_receive_group, "\n"
"\tgroup. Also any other software can submit packets to this\n"
"\tgroup for the kernel to process.");
+static int receive_group_order;
+module_param(receive_group_order, int, 0444);
+MODULE_PARM_DESC(receive_group_order, "\n"
+ "\tOrder (0..4) of receive groups to take into use. Ethernet hardware\n"
+ "\twill be configured to send incoming packets to multiple POW\n"
+ "\tgroups. pow_receive_group parameter is ignored when multiple\n"
+ "\tgroups are taken into use and groups are allocated starting\n"
+ "\tfrom 0. By default, a single group is used.\n");
+
int pow_send_group = -1;
module_param(pow_send_group, int, 0644);
MODULE_PARM_DESC(pow_send_group, "\n"
@@ -86,19 +86,17 @@ int rx_napi_weight = 32;
module_param(rx_napi_weight, int, 0444);
MODULE_PARM_DESC(rx_napi_weight, "The NAPI WEIGHT parameter.");
-/**
- * cvm_oct_poll_queue - Workqueue for polling operations.
- */
-struct workqueue_struct *cvm_oct_poll_queue;
+/* Mask indicating which receive groups are in use. */
+int pow_receive_groups;
-/**
+/*
* cvm_oct_poll_queue_stopping - flag to indicate polling should stop.
*
* Set to one right before cvm_oct_poll_queue is destroyed.
*/
atomic_t cvm_oct_poll_queue_stopping = ATOMIC_INIT(0);
-/**
+/*
* Array of every ethernet device owned by this driver indexed by
* the ipd input port number.
*/
@@ -121,8 +119,7 @@ static void cvm_oct_rx_refill_worker(struct work_struct *work)
cvm_oct_rx_refill_pool(num_packet_buffers / 2);
if (!atomic_read(&cvm_oct_poll_queue_stopping))
- queue_delayed_work(cvm_oct_poll_queue,
- &cvm_oct_rx_refill_work, HZ);
+ schedule_delayed_work(&cvm_oct_rx_refill_work, HZ);
}
static void cvm_oct_periodic_worker(struct work_struct *work)
@@ -134,12 +131,11 @@ static void cvm_oct_periodic_worker(struct work_struct *work)
if (priv->poll)
priv->poll(cvm_oct_device[priv->port]);
- cvm_oct_device[priv->port]->netdev_ops->ndo_get_stats(
- cvm_oct_device[priv->port]);
+ cvm_oct_device[priv->port]->netdev_ops->ndo_get_stats
+ (cvm_oct_device[priv->port]);
if (!atomic_read(&cvm_oct_poll_queue_stopping))
- queue_delayed_work(cvm_oct_poll_queue,
- &priv->port_periodic_work, HZ);
+ schedule_delayed_work(&priv->port_periodic_work, HZ);
}
static void cvm_oct_configure_common_hw(void)
@@ -177,7 +173,7 @@ static void cvm_oct_configure_common_hw(void)
*/
int cvm_oct_free_work(void *work_queue_entry)
{
- cvmx_wqe_t *work = work_queue_entry;
+ struct cvmx_wqe *work = work_queue_entry;
int segments = work->word2.s.bufs;
union cvmx_buf_ptr segment_ptr = work->packet_ptr;
@@ -219,28 +215,17 @@ static struct net_device_stats *cvm_oct_common_get_stats(struct net_device *dev)
cvmx_pko_get_port_status(priv->port, 1, &tx_status);
}
- priv->stats.rx_packets += rx_status.inb_packets;
- priv->stats.tx_packets += tx_status.packets;
- priv->stats.rx_bytes += rx_status.inb_octets;
- priv->stats.tx_bytes += tx_status.octets;
- priv->stats.multicast += rx_status.multicast_packets;
- priv->stats.rx_crc_errors += rx_status.inb_errors;
- priv->stats.rx_frame_errors += rx_status.fcs_align_err_packets;
-
- /*
- * The drop counter must be incremented atomically
- * since the RX tasklet also increments it.
- */
-#ifdef CONFIG_64BIT
- atomic64_add(rx_status.dropped_packets,
- (atomic64_t *)&priv->stats.rx_dropped);
-#else
- atomic_add(rx_status.dropped_packets,
- (atomic_t *)&priv->stats.rx_dropped);
-#endif
+ dev->stats.rx_packets += rx_status.inb_packets;
+ dev->stats.tx_packets += tx_status.packets;
+ dev->stats.rx_bytes += rx_status.inb_octets;
+ dev->stats.tx_bytes += tx_status.octets;
+ dev->stats.multicast += rx_status.multicast_packets;
+ dev->stats.rx_crc_errors += rx_status.inb_errors;
+ dev->stats.rx_frame_errors += rx_status.fcs_align_err_packets;
+ dev->stats.rx_dropped += rx_status.dropped_packets;
}
- return &priv->stats;
+ return &dev->stats;
}
/**
@@ -254,33 +239,24 @@ static int cvm_oct_common_change_mtu(struct net_device *dev, int new_mtu)
{
struct octeon_ethernet *priv = netdev_priv(dev);
int interface = INTERFACE(priv->port);
- int index = INDEX(priv->port);
-#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
- int vlan_bytes = 4;
+#if IS_ENABLED(CONFIG_VLAN_8021Q)
+ int vlan_bytes = VLAN_HLEN;
#else
int vlan_bytes = 0;
#endif
+ int mtu_overhead = ETH_HLEN + ETH_FCS_LEN + vlan_bytes;
- /*
- * Limit the MTU to make sure the ethernet packets are between
- * 64 bytes and 65535 bytes.
- */
- if ((new_mtu + 14 + 4 + vlan_bytes < 64)
- || (new_mtu + 14 + 4 + vlan_bytes > 65392)) {
- pr_err("MTU must be between %d and %d.\n",
- 64 - 14 - 4 - vlan_bytes, 65392 - 14 - 4 - vlan_bytes);
- return -EINVAL;
- }
dev->mtu = new_mtu;
- if ((interface < 2)
- && (cvmx_helper_interface_get_mode(interface) !=
+ if ((interface < 2) &&
+ (cvmx_helper_interface_get_mode(interface) !=
CVMX_HELPER_INTERFACE_MODE_SPI)) {
+ int index = INDEX(priv->port);
/* Add ethernet header and FCS, and VLAN if configured. */
- int max_packet = new_mtu + 14 + 4 + vlan_bytes;
+ int max_packet = new_mtu + mtu_overhead;
- if (OCTEON_IS_MODEL(OCTEON_CN3XXX)
- || OCTEON_IS_MODEL(OCTEON_CN58XX)) {
+ if (OCTEON_IS_MODEL(OCTEON_CN3XXX) ||
+ OCTEON_IS_MODEL(OCTEON_CN58XX)) {
/* Signal errors on packets larger than the MTU */
cvmx_write_csr(CVMX_GMXX_RXX_FRM_MAX(index, interface),
max_packet);
@@ -292,7 +268,7 @@ static int cvm_oct_common_change_mtu(struct net_device *dev, int new_mtu)
union cvmx_pip_frm_len_chkx frm_len_chk;
frm_len_chk.u64 = 0;
- frm_len_chk.s.minlen = 64;
+ frm_len_chk.s.minlen = VLAN_ETH_ZLEN;
frm_len_chk.s.maxlen = max_packet;
cvmx_write_csr(CVMX_PIP_FRM_LEN_CHKX(interface),
frm_len_chk.u64);
@@ -317,12 +293,12 @@ static void cvm_oct_common_set_multicast_list(struct net_device *dev)
union cvmx_gmxx_prtx_cfg gmx_cfg;
struct octeon_ethernet *priv = netdev_priv(dev);
int interface = INTERFACE(priv->port);
- int index = INDEX(priv->port);
- if ((interface < 2)
- && (cvmx_helper_interface_get_mode(interface) !=
+ if ((interface < 2) &&
+ (cvmx_helper_interface_get_mode(interface) !=
CVMX_HELPER_INTERFACE_MODE_SPI)) {
union cvmx_gmxx_rxx_adr_ctl control;
+ int index = INDEX(priv->port);
control.u64 = 0;
control.s.bcst = 1; /* Allow broadcast MAC addresses */
@@ -364,29 +340,22 @@ static void cvm_oct_common_set_multicast_list(struct net_device *dev)
}
}
-/**
- * cvm_oct_common_set_mac_address - set the hardware MAC address for a device
- * @dev: The device in question.
- * @addr: Address structure to change it too.
-
- * Returns Zero on success
- */
static int cvm_oct_set_mac_filter(struct net_device *dev)
{
struct octeon_ethernet *priv = netdev_priv(dev);
union cvmx_gmxx_prtx_cfg gmx_cfg;
int interface = INTERFACE(priv->port);
- int index = INDEX(priv->port);
- if ((interface < 2)
- && (cvmx_helper_interface_get_mode(interface) !=
+ if ((interface < 2) &&
+ (cvmx_helper_interface_get_mode(interface) !=
CVMX_HELPER_INTERFACE_MODE_SPI)) {
int i;
- uint8_t *ptr = dev->dev_addr;
- uint64_t mac = 0;
+ const u8 *ptr = dev->dev_addr;
+ u64 mac = 0;
+ int index = INDEX(priv->port);
for (i = 0; i < 6; i++)
- mac = (mac << 8) | (uint64_t)ptr[i];
+ mac = (mac << 8) | (u64)ptr[i];
gmx_cfg.u64 =
cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
@@ -413,6 +382,13 @@ static int cvm_oct_set_mac_filter(struct net_device *dev)
return 0;
}
+/**
+ * cvm_oct_common_set_mac_address - set the hardware MAC address for a device
+ * @dev: The device in question.
+ * @addr: Socket address.
+ *
+ * Returns Zero on success
+ */
static int cvm_oct_common_set_mac_address(struct net_device *dev, void *addr)
{
int r = eth_mac_addr(dev, addr);
@@ -431,33 +407,29 @@ static int cvm_oct_common_set_mac_address(struct net_device *dev, void *addr)
int cvm_oct_common_init(struct net_device *dev)
{
struct octeon_ethernet *priv = netdev_priv(dev);
- const u8 *mac = NULL;
+ int ret;
- if (priv->of_node)
- mac = of_get_mac_address(priv->of_node);
-
- if (mac)
- ether_addr_copy(dev->dev_addr, mac);
- else
+ ret = of_get_ethdev_address(priv->of_node, dev);
+ if (ret)
eth_hw_addr_random(dev);
/*
* Force the interface to use the POW send if always_use_pow
* was specified or it is in the pow send list.
*/
- if ((pow_send_group != -1)
- && (always_use_pow || strstr(pow_send_list, dev->name)))
+ if ((pow_send_group != -1) &&
+ (always_use_pow || strstr(pow_send_list, dev->name)))
priv->queue = -1;
if (priv->queue != -1)
dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
/* We do our own locking, Linux doesn't need to */
- dev->features |= NETIF_F_LLTX;
+ dev->lltx = true;
dev->ethtool_ops = &cvm_oct_ethtool_ops;
cvm_oct_set_mac_filter(dev);
- dev->netdev_ops->ndo_change_mtu(dev, dev->mtu);
+ dev_set_mtu(dev, dev->mtu);
/*
* Zero out stats for port so we won't mistakenly show
@@ -474,20 +446,18 @@ int cvm_oct_common_init(struct net_device *dev)
void cvm_oct_common_uninit(struct net_device *dev)
{
- struct octeon_ethernet *priv = netdev_priv(dev);
-
- if (priv->phydev)
- phy_disconnect(priv->phydev);
+ if (dev->phydev)
+ phy_disconnect(dev->phydev);
}
int cvm_oct_common_open(struct net_device *dev,
- void (*link_poll)(struct net_device *), bool poll_now)
+ void (*link_poll)(struct net_device *))
{
union cvmx_gmxx_prtx_cfg gmx_cfg;
struct octeon_ethernet *priv = netdev_priv(dev);
int interface = INTERFACE(priv->port);
int index = INDEX(priv->port);
- cvmx_helper_link_info_t link_info;
+ union cvmx_helper_link_info link_info;
int rv;
rv = cvm_oct_phy_setup_device(dev);
@@ -496,15 +466,17 @@ int cvm_oct_common_open(struct net_device *dev,
gmx_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
gmx_cfg.s.en = 1;
+ if (octeon_has_feature(OCTEON_FEATURE_PKND))
+ gmx_cfg.s.pknd = priv->port;
cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), gmx_cfg.u64);
if (octeon_is_simulation())
return 0;
- if (priv->phydev) {
- int r = phy_read_status(priv->phydev);
+ if (dev->phydev) {
+ int r = phy_read_status(dev->phydev);
- if (r == 0 && priv->phydev->link == 0)
+ if (r == 0 && dev->phydev->link == 0)
netif_carrier_off(dev);
cvm_oct_adjust_link(dev);
} else {
@@ -512,8 +484,7 @@ int cvm_oct_common_open(struct net_device *dev,
if (!link_info.s.link_up)
netif_carrier_off(dev);
priv->poll = link_poll;
- if (poll_now)
- link_poll(dev);
+ link_poll(dev);
}
return 0;
@@ -522,14 +493,16 @@ int cvm_oct_common_open(struct net_device *dev,
void cvm_oct_link_poll(struct net_device *dev)
{
struct octeon_ethernet *priv = netdev_priv(dev);
- cvmx_helper_link_info_t link_info;
+ union cvmx_helper_link_info link_info;
link_info = cvmx_helper_link_get(priv->port);
if (link_info.u64 == priv->link_info)
return;
- link_info = cvmx_helper_link_autoconf(priv->port);
- priv->link_info = link_info.u64;
+ if (cvmx_helper_link_set(priv->port, link_info))
+ link_info.u64 = priv->link_info;
+ else
+ priv->link_info = link_info.u64;
if (link_info.s.link_up) {
if (!netif_carrier_ok(dev))
@@ -540,34 +513,41 @@ void cvm_oct_link_poll(struct net_device *dev)
cvm_oct_note_carrier(priv, link_info);
}
+static int cvm_oct_xaui_open(struct net_device *dev)
+{
+ return cvm_oct_common_open(dev, cvm_oct_link_poll);
+}
+
static const struct net_device_ops cvm_oct_npi_netdev_ops = {
.ndo_init = cvm_oct_common_init,
.ndo_uninit = cvm_oct_common_uninit,
.ndo_start_xmit = cvm_oct_xmit,
.ndo_set_rx_mode = cvm_oct_common_set_multicast_list,
.ndo_set_mac_address = cvm_oct_common_set_mac_address,
- .ndo_do_ioctl = cvm_oct_ioctl,
+ .ndo_eth_ioctl = cvm_oct_ioctl,
.ndo_change_mtu = cvm_oct_common_change_mtu,
.ndo_get_stats = cvm_oct_common_get_stats,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = cvm_oct_poll_controller,
#endif
};
+
static const struct net_device_ops cvm_oct_xaui_netdev_ops = {
- .ndo_init = cvm_oct_xaui_init,
+ .ndo_init = cvm_oct_common_init,
.ndo_uninit = cvm_oct_common_uninit,
.ndo_open = cvm_oct_xaui_open,
.ndo_stop = cvm_oct_common_stop,
.ndo_start_xmit = cvm_oct_xmit,
.ndo_set_rx_mode = cvm_oct_common_set_multicast_list,
.ndo_set_mac_address = cvm_oct_common_set_mac_address,
- .ndo_do_ioctl = cvm_oct_ioctl,
+ .ndo_eth_ioctl = cvm_oct_ioctl,
.ndo_change_mtu = cvm_oct_common_change_mtu,
.ndo_get_stats = cvm_oct_common_get_stats,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = cvm_oct_poll_controller,
#endif
};
+
static const struct net_device_ops cvm_oct_sgmii_netdev_ops = {
.ndo_init = cvm_oct_sgmii_init,
.ndo_uninit = cvm_oct_common_uninit,
@@ -576,47 +556,50 @@ static const struct net_device_ops cvm_oct_sgmii_netdev_ops = {
.ndo_start_xmit = cvm_oct_xmit,
.ndo_set_rx_mode = cvm_oct_common_set_multicast_list,
.ndo_set_mac_address = cvm_oct_common_set_mac_address,
- .ndo_do_ioctl = cvm_oct_ioctl,
+ .ndo_eth_ioctl = cvm_oct_ioctl,
.ndo_change_mtu = cvm_oct_common_change_mtu,
.ndo_get_stats = cvm_oct_common_get_stats,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = cvm_oct_poll_controller,
#endif
};
+
static const struct net_device_ops cvm_oct_spi_netdev_ops = {
.ndo_init = cvm_oct_spi_init,
.ndo_uninit = cvm_oct_spi_uninit,
.ndo_start_xmit = cvm_oct_xmit,
.ndo_set_rx_mode = cvm_oct_common_set_multicast_list,
.ndo_set_mac_address = cvm_oct_common_set_mac_address,
- .ndo_do_ioctl = cvm_oct_ioctl,
+ .ndo_eth_ioctl = cvm_oct_ioctl,
.ndo_change_mtu = cvm_oct_common_change_mtu,
.ndo_get_stats = cvm_oct_common_get_stats,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = cvm_oct_poll_controller,
#endif
};
+
static const struct net_device_ops cvm_oct_rgmii_netdev_ops = {
- .ndo_init = cvm_oct_rgmii_init,
- .ndo_uninit = cvm_oct_rgmii_uninit,
+ .ndo_init = cvm_oct_common_init,
+ .ndo_uninit = cvm_oct_common_uninit,
.ndo_open = cvm_oct_rgmii_open,
.ndo_stop = cvm_oct_common_stop,
.ndo_start_xmit = cvm_oct_xmit,
.ndo_set_rx_mode = cvm_oct_common_set_multicast_list,
.ndo_set_mac_address = cvm_oct_common_set_mac_address,
- .ndo_do_ioctl = cvm_oct_ioctl,
+ .ndo_eth_ioctl = cvm_oct_ioctl,
.ndo_change_mtu = cvm_oct_common_change_mtu,
.ndo_get_stats = cvm_oct_common_get_stats,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = cvm_oct_poll_controller,
#endif
};
+
static const struct net_device_ops cvm_oct_pow_netdev_ops = {
.ndo_init = cvm_oct_common_init,
.ndo_start_xmit = cvm_oct_xmit_pow,
.ndo_set_rx_mode = cvm_oct_common_set_multicast_list,
.ndo_set_mac_address = cvm_oct_common_set_mac_address,
- .ndo_do_ioctl = cvm_oct_ioctl,
+ .ndo_eth_ioctl = cvm_oct_ioctl,
.ndo_change_mtu = cvm_oct_common_change_mtu,
.ndo_get_stats = cvm_oct_common_get_stats,
#ifdef CONFIG_NET_POLL_CONTROLLER
@@ -624,17 +607,14 @@ static const struct net_device_ops cvm_oct_pow_netdev_ops = {
#endif
};
-static struct device_node *cvm_oct_of_get_child(
- const struct device_node *parent, int reg_val)
+static struct device_node *cvm_oct_of_get_child
+ (const struct device_node *parent, int reg_val)
{
- struct device_node *node = NULL;
- int size;
+ struct device_node *node;
const __be32 *addr;
+ int size;
- for (;;) {
- node = of_get_next_child(parent, node);
- if (!node)
- break;
+ for_each_child_of_node(parent, node) {
addr = of_get_property(node, "reg", &size);
if (addr && (be32_to_cpu(*addr) == reg_val))
break;
@@ -643,7 +623,7 @@ static struct device_node *cvm_oct_of_get_child(
}
static struct device_node *cvm_oct_node_for_port(struct device_node *pip,
- int interface, int port)
+ int interface, int port)
{
struct device_node *ni, *np;
@@ -657,6 +637,39 @@ static struct device_node *cvm_oct_node_for_port(struct device_node *pip,
return np;
}
+static void cvm_set_rgmii_delay(struct octeon_ethernet *priv, int iface,
+ int port)
+{
+ struct device_node *np = priv->of_node;
+ u32 delay_value;
+ bool rx_delay;
+ bool tx_delay;
+
+ /* By default, both RX/TX delay is enabled in
+ * __cvmx_helper_rgmii_enable().
+ */
+ rx_delay = true;
+ tx_delay = true;
+
+ if (!of_property_read_u32(np, "rx-delay", &delay_value)) {
+ cvmx_write_csr(CVMX_ASXX_RX_CLK_SETX(port, iface), delay_value);
+ rx_delay = delay_value > 0;
+ }
+ if (!of_property_read_u32(np, "tx-delay", &delay_value)) {
+ cvmx_write_csr(CVMX_ASXX_TX_CLK_SETX(port, iface), delay_value);
+ tx_delay = delay_value > 0;
+ }
+
+ if (!rx_delay && !tx_delay)
+ priv->phy_mode = PHY_INTERFACE_MODE_RGMII_ID;
+ else if (!rx_delay)
+ priv->phy_mode = PHY_INTERFACE_MODE_RGMII_RXID;
+ else if (!tx_delay)
+ priv->phy_mode = PHY_INTERFACE_MODE_RGMII_TXID;
+ else
+ priv->phy_mode = PHY_INTERFACE_MODE_RGMII;
+}
+
static int cvm_oct_probe(struct platform_device *pdev)
{
int num_interfaces;
@@ -664,8 +677,11 @@ static int cvm_oct_probe(struct platform_device *pdev)
int fau = FAU_NUM_PACKET_BUFFERS_TO_FREE;
int qos;
struct device_node *pip;
+ int mtu_overhead = ETH_HLEN + ETH_FCS_LEN;
- octeon_mdiobus_force_mod_depencency();
+#if IS_ENABLED(CONFIG_VLAN_8021Q)
+ mtu_overhead += VLAN_HLEN;
+#endif
pip = pdev->dev.of_node;
if (!pip) {
@@ -673,16 +689,18 @@ static int cvm_oct_probe(struct platform_device *pdev)
return -EINVAL;
}
- cvm_oct_poll_queue = create_singlethread_workqueue("octeon-ethernet");
- if (cvm_oct_poll_queue == NULL) {
- pr_err("octeon-ethernet: Cannot create workqueue");
- return -ENOMEM;
- }
-
cvm_oct_configure_common_hw();
cvmx_helper_initialize_packet_io_global();
+ if (receive_group_order) {
+ if (receive_group_order > 4)
+ receive_group_order = 4;
+ pow_receive_groups = (1 << (1 << receive_group_order)) - 1;
+ } else {
+ pow_receive_groups = BIT(pow_receive_group);
+ }
+
/* Change the input group for all ports before input is enabled */
num_interfaces = cvmx_helper_get_number_of_interfaces();
for (interface = 0; interface < num_interfaces; interface++) {
@@ -696,7 +714,37 @@ static int cvm_oct_probe(struct platform_device *pdev)
pip_prt_tagx.u64 =
cvmx_read_csr(CVMX_PIP_PRT_TAGX(port));
- pip_prt_tagx.s.grp = pow_receive_group;
+
+ if (receive_group_order) {
+ int tag_mask;
+
+ /* We support only 16 groups at the moment, so
+ * always disable the two additional "hidden"
+ * tag_mask bits on CN68XX.
+ */
+ if (OCTEON_IS_MODEL(OCTEON_CN68XX))
+ pip_prt_tagx.u64 |= 0x3ull << 44;
+
+ tag_mask = ~((1 << receive_group_order) - 1);
+ pip_prt_tagx.s.grptagbase = 0;
+ pip_prt_tagx.s.grptagmask = tag_mask;
+ pip_prt_tagx.s.grptag = 1;
+ pip_prt_tagx.s.tag_mode = 0;
+ pip_prt_tagx.s.inc_prt_flag = 1;
+ pip_prt_tagx.s.ip6_dprt_flag = 1;
+ pip_prt_tagx.s.ip4_dprt_flag = 1;
+ pip_prt_tagx.s.ip6_sprt_flag = 1;
+ pip_prt_tagx.s.ip4_sprt_flag = 1;
+ pip_prt_tagx.s.ip6_dst_flag = 1;
+ pip_prt_tagx.s.ip4_dst_flag = 1;
+ pip_prt_tagx.s.ip6_src_flag = 1;
+ pip_prt_tagx.s.ip4_src_flag = 1;
+ pip_prt_tagx.s.grp = 0;
+ } else {
+ pip_prt_tagx.s.grptag = 0;
+ pip_prt_tagx.s.grp = pow_receive_group;
+ }
+
cvmx_write_csr(CVMX_PIP_PRT_TAGX(port),
pip_prt_tagx.u64);
}
@@ -718,19 +766,21 @@ static int cvm_oct_probe(struct platform_device *pdev)
if ((pow_send_group != -1)) {
struct net_device *dev;
- pr_info("\tConfiguring device for POW only access\n");
dev = alloc_etherdev(sizeof(struct octeon_ethernet));
if (dev) {
/* Initialize the device private structure. */
struct octeon_ethernet *priv = netdev_priv(dev);
+ SET_NETDEV_DEV(dev, &pdev->dev);
dev->netdev_ops = &cvm_oct_pow_netdev_ops;
priv->imode = CVMX_HELPER_INTERFACE_MODE_DISABLED;
priv->port = CVMX_PIP_NUM_INPUT_PORTS;
priv->queue = -1;
- strcpy(dev->name, "pow%d");
+ strscpy(dev->name, "pow%d", sizeof(dev->name));
for (qos = 0; qos < 16; qos++)
skb_queue_head_init(&priv->tx_free_list[qos]);
+ dev->min_mtu = VLAN_ETH_ZLEN - mtu_overhead;
+ dev->max_mtu = OCTEON_MAX_MTU - mtu_overhead;
if (register_netdev(dev) < 0) {
pr_err("Failed to register ethernet device for POW\n");
@@ -768,10 +818,11 @@ static int cvm_oct_probe(struct platform_device *pdev)
}
/* Initialize the device private structure. */
+ SET_NETDEV_DEV(dev, &pdev->dev);
priv = netdev_priv(dev);
priv->netdev = dev;
priv->of_node = cvm_oct_node_for_port(pip, interface,
- port_index);
+ port_index);
INIT_DELAYED_WORK(&priv->port_periodic_work,
cvm_oct_periodic_worker);
@@ -779,14 +830,16 @@ static int cvm_oct_probe(struct platform_device *pdev)
priv->port = port;
priv->queue = cvmx_pko_get_base_queue(priv->port);
priv->fau = fau - cvmx_pko_get_num_queues(port) * 4;
+ priv->phy_mode = PHY_INTERFACE_MODE_NA;
for (qos = 0; qos < 16; qos++)
skb_queue_head_init(&priv->tx_free_list[qos]);
for (qos = 0; qos < cvmx_pko_get_num_queues(port);
qos++)
cvmx_fau_atomic_write32(priv->fau + qos * 4, 0);
+ dev->min_mtu = VLAN_ETH_ZLEN - mtu_overhead;
+ dev->max_mtu = OCTEON_MAX_MTU - mtu_overhead;
switch (priv->imode) {
-
/* These types don't support ports to IPD/PKO */
case CVMX_HELPER_INTERFACE_MODE_DISABLED:
case CVMX_HELPER_INTERFACE_MODE_PCIE:
@@ -795,49 +848,65 @@ static int cvm_oct_probe(struct platform_device *pdev)
case CVMX_HELPER_INTERFACE_MODE_NPI:
dev->netdev_ops = &cvm_oct_npi_netdev_ops;
- strcpy(dev->name, "npi%d");
+ strscpy(dev->name, "npi%d", sizeof(dev->name));
break;
case CVMX_HELPER_INTERFACE_MODE_XAUI:
dev->netdev_ops = &cvm_oct_xaui_netdev_ops;
- strcpy(dev->name, "xaui%d");
+ strscpy(dev->name, "xaui%d", sizeof(dev->name));
break;
case CVMX_HELPER_INTERFACE_MODE_LOOP:
dev->netdev_ops = &cvm_oct_npi_netdev_ops;
- strcpy(dev->name, "loop%d");
+ strscpy(dev->name, "loop%d", sizeof(dev->name));
break;
case CVMX_HELPER_INTERFACE_MODE_SGMII:
+ priv->phy_mode = PHY_INTERFACE_MODE_SGMII;
dev->netdev_ops = &cvm_oct_sgmii_netdev_ops;
- strcpy(dev->name, "eth%d");
+ strscpy(dev->name, "eth%d", sizeof(dev->name));
break;
case CVMX_HELPER_INTERFACE_MODE_SPI:
dev->netdev_ops = &cvm_oct_spi_netdev_ops;
- strcpy(dev->name, "spi%d");
+ strscpy(dev->name, "spi%d", sizeof(dev->name));
break;
- case CVMX_HELPER_INTERFACE_MODE_RGMII:
case CVMX_HELPER_INTERFACE_MODE_GMII:
+ priv->phy_mode = PHY_INTERFACE_MODE_GMII;
dev->netdev_ops = &cvm_oct_rgmii_netdev_ops;
- strcpy(dev->name, "eth%d");
+ strscpy(dev->name, "eth%d", sizeof(dev->name));
break;
+
+ case CVMX_HELPER_INTERFACE_MODE_RGMII:
+ dev->netdev_ops = &cvm_oct_rgmii_netdev_ops;
+ strscpy(dev->name, "eth%d", sizeof(dev->name));
+ cvm_set_rgmii_delay(priv, interface,
+ port_index);
+ break;
+ }
+
+ if (priv->of_node && of_phy_is_fixed_link(priv->of_node)) {
+ if (of_phy_register_fixed_link(priv->of_node)) {
+ netdev_err(dev, "Failed to register fixed link for interface %d, port %d\n",
+ interface, priv->port);
+ dev->netdev_ops = NULL;
+ }
}
if (!dev->netdev_ops) {
free_netdev(dev);
} else if (register_netdev(dev) < 0) {
pr_err("Failed to register ethernet device for interface %d, port %d\n",
- interface, priv->port);
+ interface, priv->port);
free_netdev(dev);
} else {
cvm_oct_device[priv->port] = dev;
fau -=
cvmx_pko_get_num_queues(priv->port) *
- sizeof(uint32_t);
- queue_delayed_work(cvm_oct_poll_queue,
- &priv->port_periodic_work, HZ);
+ sizeof(u32);
+ schedule_delayed_work(&priv->port_periodic_work,
+ HZ);
}
}
}
@@ -850,26 +919,17 @@ static int cvm_oct_probe(struct platform_device *pdev)
*/
cvm_oct_tx_poll_interval = 150 * (octeon_get_clock_rate() / 1000000);
- queue_delayed_work(cvm_oct_poll_queue, &cvm_oct_rx_refill_work, HZ);
+ schedule_delayed_work(&cvm_oct_rx_refill_work, HZ);
return 0;
}
-static int cvm_oct_remove(struct platform_device *pdev)
+static void cvm_oct_remove(struct platform_device *pdev)
{
int port;
- /* Disable POW interrupt */
- if (OCTEON_IS_MODEL(OCTEON_CN68XX))
- cvmx_write_csr(CVMX_SSO_WQ_INT_THRX(pow_receive_group), 0);
- else
- cvmx_write_csr(CVMX_POW_WQ_INT_THRX(pow_receive_group), 0);
-
cvmx_ipd_disable();
- /* Free the interrupt handler */
- free_irq(OCTEON_IRQ_WORKQ0 + pow_receive_group, cvm_oct_device);
-
atomic_inc_return(&cvm_oct_poll_queue_stopping);
cancel_delayed_work_sync(&cvm_oct_rx_refill_work);
@@ -893,8 +953,6 @@ static int cvm_oct_remove(struct platform_device *pdev)
}
}
- destroy_workqueue(cvm_oct_poll_queue);
-
cvmx_pko_shutdown();
cvmx_ipd_free_ptr();
@@ -907,7 +965,6 @@ static int cvm_oct_remove(struct platform_device *pdev)
if (CVMX_FPA_OUTPUT_BUFFER_POOL != CVMX_FPA_PACKET_POOL)
cvm_oct_mem_empty_fpa(CVMX_FPA_OUTPUT_BUFFER_POOL,
CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE, 128);
- return 0;
}
static const struct of_device_id cvm_oct_match[] = {
@@ -929,6 +986,7 @@ static struct platform_driver cvm_oct_driver = {
module_platform_driver(cvm_oct_driver);
+MODULE_SOFTDEP("pre: mdio-cavium");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Cavium Networks <support@caviumnetworks.com>");
MODULE_DESCRIPTION("Cavium Networks Octeon ethernet driver.");
diff --git a/drivers/staging/octeon/octeon-ethernet.h b/drivers/staging/octeon/octeon-ethernet.h
index a242c700bc53..a6140705706f 100644
--- a/drivers/staging/octeon/octeon-ethernet.h
+++ b/drivers/staging/octeon/octeon-ethernet.h
@@ -1,11 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* This file is based on code from OCTEON SDK by Cavium Networks.
*
* Copyright (c) 2003-2010 Cavium Networks
- *
- * This file is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, Version 2, as
- * published by the Free Software Foundation.
*/
/*
@@ -15,8 +12,35 @@
#define OCTEON_ETHERNET_H
#include <linux/of.h>
+#include <linux/phy.h>
+
+#ifdef CONFIG_CAVIUM_OCTEON_SOC
+
+#include <asm/octeon/octeon.h>
+
+#include <asm/octeon/cvmx-asxx-defs.h>
+#include <asm/octeon/cvmx-config.h>
+#include <asm/octeon/cvmx-fau.h>
+#include <asm/octeon/cvmx-gmxx-defs.h>
+#include <asm/octeon/cvmx-helper.h>
+#include <asm/octeon/cvmx-helper-util.h>
+#include <asm/octeon/cvmx-ipd.h>
+#include <asm/octeon/cvmx-ipd-defs.h>
+#include <asm/octeon/cvmx-npi-defs.h>
+#include <asm/octeon/cvmx-pip.h>
+#include <asm/octeon/cvmx-pko.h>
+#include <asm/octeon/cvmx-pow.h>
+#include <asm/octeon/cvmx-scratch.h>
+#include <asm/octeon/cvmx-spi.h>
+#include <asm/octeon/cvmx-spxx-defs.h>
+#include <asm/octeon/cvmx-stxx-defs.h>
+#include <asm/octeon/cvmx-wqe.h>
-#include <asm/octeon/cvmx-helper-board.h>
+#else
+
+#include "octeon-stubs.h"
+
+#endif
/**
* This is the definition of the Ethernet driver's private
@@ -36,25 +60,22 @@ struct octeon_ethernet {
* cvmx_helper_interface_mode_t
*/
int imode;
+ /* PHY mode */
+ phy_interface_t phy_mode;
/* List of outstanding tx buffers per queue */
struct sk_buff_head tx_free_list[16];
- /* Device statistics */
- struct net_device_stats stats;
- struct phy_device *phydev;
+ unsigned int last_speed;
unsigned int last_link;
/* Last negotiated link state */
- uint64_t link_info;
+ u64 link_info;
/* Called periodically to check link status */
void (*poll)(struct net_device *dev);
struct delayed_work port_periodic_work;
- struct work_struct port_work; /* may be unused. */
struct device_node *of_node;
};
int cvm_oct_free_work(void *work_queue_entry);
-int cvm_oct_rgmii_init(struct net_device *dev);
-void cvm_oct_rgmii_uninit(struct net_device *dev);
int cvm_oct_rgmii_open(struct net_device *dev);
int cvm_oct_sgmii_init(struct net_device *dev);
@@ -62,25 +83,22 @@ int cvm_oct_sgmii_open(struct net_device *dev);
int cvm_oct_spi_init(struct net_device *dev);
void cvm_oct_spi_uninit(struct net_device *dev);
-int cvm_oct_xaui_init(struct net_device *dev);
-int cvm_oct_xaui_open(struct net_device *dev);
int cvm_oct_common_init(struct net_device *dev);
void cvm_oct_common_uninit(struct net_device *dev);
void cvm_oct_adjust_link(struct net_device *dev);
int cvm_oct_common_stop(struct net_device *dev);
int cvm_oct_common_open(struct net_device *dev,
- void (*link_poll)(struct net_device *), bool poll_now);
+ void (*link_poll)(struct net_device *));
void cvm_oct_note_carrier(struct octeon_ethernet *priv,
- cvmx_helper_link_info_t li);
+ union cvmx_helper_link_info li);
void cvm_oct_link_poll(struct net_device *dev);
extern int always_use_pow;
extern int pow_send_group;
-extern int pow_receive_group;
+extern int pow_receive_groups;
extern char pow_send_list[];
extern struct net_device *cvm_oct_device[];
-extern struct workqueue_struct *cvm_oct_poll_queue;
extern atomic_t cvm_oct_poll_queue_stopping;
extern u64 cvm_oct_tx_poll_interval;
diff --git a/drivers/staging/octeon/octeon-stubs.h b/drivers/staging/octeon/octeon-stubs.h
new file mode 100644
index 000000000000..35b5078ba51e
--- /dev/null
+++ b/drivers/staging/octeon/octeon-stubs.h
@@ -0,0 +1,1435 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#define CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE 512
+
+#ifndef XKPHYS_TO_PHYS
+# define XKPHYS_TO_PHYS(p) (p)
+#endif
+
+#define OCTEON_IRQ_WORKQ0 0
+#define OCTEON_IRQ_RML 0
+#define OCTEON_IRQ_TIMER1 0
+#define OCTEON_IS_MODEL(x) 0
+#define octeon_has_feature(x) 0
+#define octeon_get_clock_rate() 0
+
+#define CVMX_SYNCIOBDMA do { } while (0)
+
+#define CVMX_HELPER_INPUT_TAG_TYPE 0
+#define CVMX_HELPER_FIRST_MBUFF_SKIP 7
+#define CVMX_FAU_REG_END (2048)
+#define CVMX_FPA_OUTPUT_BUFFER_POOL (2)
+#define CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE 16
+#define CVMX_FPA_PACKET_POOL (0)
+#define CVMX_FPA_PACKET_POOL_SIZE 16
+#define CVMX_FPA_WQE_POOL (1)
+#define CVMX_FPA_WQE_POOL_SIZE 16
+#define CVMX_GMXX_RXX_ADR_CAM_EN(a, b) ((a) + (b))
+#define CVMX_GMXX_RXX_ADR_CTL(a, b) ((a) + (b))
+#define CVMX_GMXX_PRTX_CFG(a, b) ((a) + (b))
+#define CVMX_GMXX_RXX_FRM_MAX(a, b) ((a) + (b))
+#define CVMX_GMXX_RXX_JABBER(a, b) ((a) + (b))
+#define CVMX_IPD_CTL_STATUS 0
+#define CVMX_PIP_FRM_LEN_CHKX(a) (a)
+#define CVMX_PIP_NUM_INPUT_PORTS 1
+#define CVMX_SCR_SCRATCH 0
+#define CVMX_PKO_QUEUES_PER_PORT_INTERFACE0 2
+#define CVMX_PKO_QUEUES_PER_PORT_INTERFACE1 2
+#define CVMX_IPD_SUB_PORT_FCS 0
+#define CVMX_SSO_WQ_IQ_DIS 0
+#define CVMX_SSO_WQ_INT 0
+#define CVMX_POW_WQ_INT 0
+#define CVMX_SSO_WQ_INT_PC 0
+#define CVMX_NPI_RSL_INT_BLOCKS 0
+#define CVMX_POW_WQ_INT_PC 0
+
+union cvmx_pip_wqe_word2 {
+ u64 u64;
+
+ struct {
+ u64 bufs : 8;
+ u64 ip_offset : 8;
+ u64 vlan_valid : 1;
+ u64 vlan_stacked : 1;
+ u64 unassigned : 1;
+ u64 vlan_cfi : 1;
+ u64 vlan_id : 12;
+ u64 pr : 4;
+ u64 unassigned2 : 8;
+ u64 dec_ipcomp : 1;
+ u64 tcp_or_udp : 1;
+ u64 dec_ipsec : 1;
+ u64 is_v6 : 1;
+ u64 software : 1;
+ u64 L4_error : 1;
+ u64 is_frag : 1;
+ u64 IP_exc : 1;
+ u64 is_bcast : 1;
+ u64 is_mcast : 1;
+ u64 not_IP : 1;
+ u64 rcv_error : 1;
+ u64 err_code : 8;
+ } s;
+
+ struct {
+ u64 bufs : 8;
+ u64 ip_offset : 8;
+ u64 vlan_valid : 1;
+ u64 vlan_stacked : 1;
+ u64 unassigned : 1;
+ u64 vlan_cfi : 1;
+ u64 vlan_id : 12;
+ u64 port : 12;
+ u64 dec_ipcomp : 1;
+ u64 tcp_or_udp : 1;
+ u64 dec_ipsec : 1;
+ u64 is_v6 : 1;
+ u64 software : 1;
+ u64 L4_error : 1;
+ u64 is_frag : 1;
+ u64 IP_exc : 1;
+ u64 is_bcast : 1;
+ u64 is_mcast : 1;
+ u64 not_IP : 1;
+ u64 rcv_error : 1;
+ u64 err_code : 8;
+ } s_cn68xx;
+
+ struct {
+ u64 unused1 : 16;
+ u64 vlan : 16;
+ u64 unused2 : 32;
+ } svlan;
+
+ struct {
+ u64 bufs : 8;
+ u64 unused : 8;
+ u64 vlan_valid : 1;
+ u64 vlan_stacked : 1;
+ u64 unassigned : 1;
+ u64 vlan_cfi : 1;
+ u64 vlan_id : 12;
+ u64 pr : 4;
+ u64 unassigned2 : 12;
+ u64 software : 1;
+ u64 unassigned3 : 1;
+ u64 is_rarp : 1;
+ u64 is_arp : 1;
+ u64 is_bcast : 1;
+ u64 is_mcast : 1;
+ u64 not_IP : 1;
+ u64 rcv_error : 1;
+ u64 err_code : 8;
+ } snoip;
+};
+
+union cvmx_pip_wqe_word0 {
+ struct {
+ uint64_t next_ptr:40;
+ uint8_t unused;
+ __wsum hw_chksum;
+ } cn38xx;
+ struct {
+ uint64_t pknd:6; /* 0..5 */
+ uint64_t unused2:2; /* 6..7 */
+ uint64_t bpid:6; /* 8..13 */
+ uint64_t unused1:18; /* 14..31 */
+ uint64_t l2ptr:8; /* 32..39 */
+ uint64_t l3ptr:8; /* 40..47 */
+ uint64_t unused0:8; /* 48..55 */
+ uint64_t l4ptr:8; /* 56..63 */
+ } cn68xx;
+};
+
+union cvmx_wqe_word0 {
+ uint64_t u64;
+ union cvmx_pip_wqe_word0 pip;
+};
+
+union cvmx_wqe_word1 {
+ uint64_t u64;
+ struct {
+ uint64_t tag:32;
+ uint64_t tag_type:2;
+ uint64_t varies:14;
+ uint64_t len:16;
+ };
+ struct {
+ uint64_t tag:32;
+ uint64_t tag_type:2;
+ uint64_t zero_2:3;
+ uint64_t grp:6;
+ uint64_t zero_1:1;
+ uint64_t qos:3;
+ uint64_t zero_0:1;
+ uint64_t len:16;
+ } cn68xx;
+ struct {
+ uint64_t tag:32;
+ uint64_t tag_type:2;
+ uint64_t zero_2:1;
+ uint64_t grp:4;
+ uint64_t qos:3;
+ uint64_t ipprt:6;
+ uint64_t len:16;
+ } cn38xx;
+};
+
+union cvmx_buf_ptr {
+ void *ptr;
+ uint64_t u64;
+ struct {
+ uint64_t i:1;
+ uint64_t back:4;
+ uint64_t pool:3;
+ uint64_t size:16;
+ uint64_t addr:40;
+ } s;
+};
+
+struct cvmx_wqe {
+ union cvmx_wqe_word0 word0;
+ union cvmx_wqe_word1 word1;
+ union cvmx_pip_wqe_word2 word2;
+ union cvmx_buf_ptr packet_ptr;
+ uint8_t packet_data[96];
+};
+
+union cvmx_helper_link_info {
+ uint64_t u64;
+ struct {
+ uint64_t reserved_20_63:44;
+ uint64_t link_up:1; /**< Is the physical link up? */
+ uint64_t full_duplex:1; /**< 1 if the link is full duplex */
+ uint64_t speed:18; /**< Speed of the link in Mbps */
+ } s;
+};
+
+enum cvmx_fau_reg_32 {
+ CVMX_FAU_REG_32_START = 0,
+};
+
+enum cvmx_fau_op_size {
+ CVMX_FAU_OP_SIZE_8 = 0,
+ CVMX_FAU_OP_SIZE_16 = 1,
+ CVMX_FAU_OP_SIZE_32 = 2,
+ CVMX_FAU_OP_SIZE_64 = 3
+};
+
+typedef enum {
+ CVMX_SPI_MODE_UNKNOWN = 0,
+ CVMX_SPI_MODE_TX_HALFPLEX = 1,
+ CVMX_SPI_MODE_RX_HALFPLEX = 2,
+ CVMX_SPI_MODE_DUPLEX = 3
+} cvmx_spi_mode_t;
+
+typedef enum {
+ CVMX_HELPER_INTERFACE_MODE_DISABLED,
+ CVMX_HELPER_INTERFACE_MODE_RGMII,
+ CVMX_HELPER_INTERFACE_MODE_GMII,
+ CVMX_HELPER_INTERFACE_MODE_SPI,
+ CVMX_HELPER_INTERFACE_MODE_PCIE,
+ CVMX_HELPER_INTERFACE_MODE_XAUI,
+ CVMX_HELPER_INTERFACE_MODE_SGMII,
+ CVMX_HELPER_INTERFACE_MODE_PICMG,
+ CVMX_HELPER_INTERFACE_MODE_NPI,
+ CVMX_HELPER_INTERFACE_MODE_LOOP,
+} cvmx_helper_interface_mode_t;
+
+typedef enum {
+ CVMX_POW_WAIT = 1,
+ CVMX_POW_NO_WAIT = 0,
+} cvmx_pow_wait_t;
+
+typedef enum {
+ CVMX_PKO_LOCK_NONE = 0,
+ CVMX_PKO_LOCK_ATOMIC_TAG = 1,
+ CVMX_PKO_LOCK_CMD_QUEUE = 2,
+} cvmx_pko_lock_t;
+
+typedef enum {
+ CVMX_PKO_SUCCESS,
+ CVMX_PKO_INVALID_PORT,
+ CVMX_PKO_INVALID_QUEUE,
+ CVMX_PKO_INVALID_PRIORITY,
+ CVMX_PKO_NO_MEMORY,
+ CVMX_PKO_PORT_ALREADY_SETUP,
+ CVMX_PKO_CMD_QUEUE_INIT_ERROR
+} cvmx_pko_status_t;
+
+enum cvmx_pow_tag_type {
+ CVMX_POW_TAG_TYPE_ORDERED = 0L,
+ CVMX_POW_TAG_TYPE_ATOMIC = 1L,
+ CVMX_POW_TAG_TYPE_NULL = 2L,
+ CVMX_POW_TAG_TYPE_NULL_NULL = 3L
+};
+
+union cvmx_ipd_ctl_status {
+ uint64_t u64;
+ struct cvmx_ipd_ctl_status_s {
+ uint64_t reserved_18_63:46;
+ uint64_t use_sop:1;
+ uint64_t rst_done:1;
+ uint64_t clken:1;
+ uint64_t no_wptr:1;
+ uint64_t pq_apkt:1;
+ uint64_t pq_nabuf:1;
+ uint64_t ipd_full:1;
+ uint64_t pkt_off:1;
+ uint64_t len_m8:1;
+ uint64_t reset:1;
+ uint64_t addpkt:1;
+ uint64_t naddbuf:1;
+ uint64_t pkt_lend:1;
+ uint64_t wqe_lend:1;
+ uint64_t pbp_en:1;
+ uint64_t opc_mode:2;
+ uint64_t ipd_en:1;
+ } s;
+ struct cvmx_ipd_ctl_status_cn30xx {
+ uint64_t reserved_10_63:54;
+ uint64_t len_m8:1;
+ uint64_t reset:1;
+ uint64_t addpkt:1;
+ uint64_t naddbuf:1;
+ uint64_t pkt_lend:1;
+ uint64_t wqe_lend:1;
+ uint64_t pbp_en:1;
+ uint64_t opc_mode:2;
+ uint64_t ipd_en:1;
+ } cn30xx;
+ struct cvmx_ipd_ctl_status_cn38xxp2 {
+ uint64_t reserved_9_63:55;
+ uint64_t reset:1;
+ uint64_t addpkt:1;
+ uint64_t naddbuf:1;
+ uint64_t pkt_lend:1;
+ uint64_t wqe_lend:1;
+ uint64_t pbp_en:1;
+ uint64_t opc_mode:2;
+ uint64_t ipd_en:1;
+ } cn38xxp2;
+ struct cvmx_ipd_ctl_status_cn50xx {
+ uint64_t reserved_15_63:49;
+ uint64_t no_wptr:1;
+ uint64_t pq_apkt:1;
+ uint64_t pq_nabuf:1;
+ uint64_t ipd_full:1;
+ uint64_t pkt_off:1;
+ uint64_t len_m8:1;
+ uint64_t reset:1;
+ uint64_t addpkt:1;
+ uint64_t naddbuf:1;
+ uint64_t pkt_lend:1;
+ uint64_t wqe_lend:1;
+ uint64_t pbp_en:1;
+ uint64_t opc_mode:2;
+ uint64_t ipd_en:1;
+ } cn50xx;
+ struct cvmx_ipd_ctl_status_cn58xx {
+ uint64_t reserved_12_63:52;
+ uint64_t ipd_full:1;
+ uint64_t pkt_off:1;
+ uint64_t len_m8:1;
+ uint64_t reset:1;
+ uint64_t addpkt:1;
+ uint64_t naddbuf:1;
+ uint64_t pkt_lend:1;
+ uint64_t wqe_lend:1;
+ uint64_t pbp_en:1;
+ uint64_t opc_mode:2;
+ uint64_t ipd_en:1;
+ } cn58xx;
+ struct cvmx_ipd_ctl_status_cn63xxp1 {
+ uint64_t reserved_16_63:48;
+ uint64_t clken:1;
+ uint64_t no_wptr:1;
+ uint64_t pq_apkt:1;
+ uint64_t pq_nabuf:1;
+ uint64_t ipd_full:1;
+ uint64_t pkt_off:1;
+ uint64_t len_m8:1;
+ uint64_t reset:1;
+ uint64_t addpkt:1;
+ uint64_t naddbuf:1;
+ uint64_t pkt_lend:1;
+ uint64_t wqe_lend:1;
+ uint64_t pbp_en:1;
+ uint64_t opc_mode:2;
+ uint64_t ipd_en:1;
+ } cn63xxp1;
+};
+
+union cvmx_ipd_sub_port_fcs {
+ uint64_t u64;
+ struct cvmx_ipd_sub_port_fcs_s {
+ uint64_t port_bit:32;
+ uint64_t reserved_32_35:4;
+ uint64_t port_bit2:4;
+ uint64_t reserved_40_63:24;
+ } s;
+ struct cvmx_ipd_sub_port_fcs_cn30xx {
+ uint64_t port_bit:3;
+ uint64_t reserved_3_63:61;
+ } cn30xx;
+ struct cvmx_ipd_sub_port_fcs_cn38xx {
+ uint64_t port_bit:32;
+ uint64_t reserved_32_63:32;
+ } cn38xx;
+};
+
+union cvmx_ipd_sub_port_qos_cnt {
+ uint64_t u64;
+ struct cvmx_ipd_sub_port_qos_cnt_s {
+ uint64_t cnt:32;
+ uint64_t port_qos:9;
+ uint64_t reserved_41_63:23;
+ } s;
+};
+
+typedef struct {
+ uint32_t dropped_octets;
+ uint32_t dropped_packets;
+ uint32_t pci_raw_packets;
+ uint32_t octets;
+ uint32_t packets;
+ uint32_t multicast_packets;
+ uint32_t broadcast_packets;
+ uint32_t len_64_packets;
+ uint32_t len_65_127_packets;
+ uint32_t len_128_255_packets;
+ uint32_t len_256_511_packets;
+ uint32_t len_512_1023_packets;
+ uint32_t len_1024_1518_packets;
+ uint32_t len_1519_max_packets;
+ uint32_t fcs_align_err_packets;
+ uint32_t runt_packets;
+ uint32_t runt_crc_packets;
+ uint32_t oversize_packets;
+ uint32_t oversize_crc_packets;
+ uint32_t inb_packets;
+ uint64_t inb_octets;
+ uint16_t inb_errors;
+} cvmx_pip_port_status_t;
+
+typedef struct {
+ uint32_t packets;
+ uint64_t octets;
+ uint64_t doorbell;
+} cvmx_pko_port_status_t;
+
+union cvmx_pip_frm_len_chkx {
+ uint64_t u64;
+ struct cvmx_pip_frm_len_chkx_s {
+ uint64_t reserved_32_63:32;
+ uint64_t maxlen:16;
+ uint64_t minlen:16;
+ } s;
+};
+
+union cvmx_gmxx_rxx_frm_ctl {
+ uint64_t u64;
+ struct cvmx_gmxx_rxx_frm_ctl_s {
+ uint64_t pre_chk:1;
+ uint64_t pre_strp:1;
+ uint64_t ctl_drp:1;
+ uint64_t ctl_bck:1;
+ uint64_t ctl_mcst:1;
+ uint64_t ctl_smac:1;
+ uint64_t pre_free:1;
+ uint64_t vlan_len:1;
+ uint64_t pad_len:1;
+ uint64_t pre_align:1;
+ uint64_t null_dis:1;
+ uint64_t reserved_11_11:1;
+ uint64_t ptp_mode:1;
+ uint64_t reserved_13_63:51;
+ } s;
+ struct cvmx_gmxx_rxx_frm_ctl_cn30xx {
+ uint64_t pre_chk:1;
+ uint64_t pre_strp:1;
+ uint64_t ctl_drp:1;
+ uint64_t ctl_bck:1;
+ uint64_t ctl_mcst:1;
+ uint64_t ctl_smac:1;
+ uint64_t pre_free:1;
+ uint64_t vlan_len:1;
+ uint64_t pad_len:1;
+ uint64_t reserved_9_63:55;
+ } cn30xx;
+ struct cvmx_gmxx_rxx_frm_ctl_cn31xx {
+ uint64_t pre_chk:1;
+ uint64_t pre_strp:1;
+ uint64_t ctl_drp:1;
+ uint64_t ctl_bck:1;
+ uint64_t ctl_mcst:1;
+ uint64_t ctl_smac:1;
+ uint64_t pre_free:1;
+ uint64_t vlan_len:1;
+ uint64_t reserved_8_63:56;
+ } cn31xx;
+ struct cvmx_gmxx_rxx_frm_ctl_cn50xx {
+ uint64_t pre_chk:1;
+ uint64_t pre_strp:1;
+ uint64_t ctl_drp:1;
+ uint64_t ctl_bck:1;
+ uint64_t ctl_mcst:1;
+ uint64_t ctl_smac:1;
+ uint64_t pre_free:1;
+ uint64_t reserved_7_8:2;
+ uint64_t pre_align:1;
+ uint64_t null_dis:1;
+ uint64_t reserved_11_63:53;
+ } cn50xx;
+ struct cvmx_gmxx_rxx_frm_ctl_cn56xxp1 {
+ uint64_t pre_chk:1;
+ uint64_t pre_strp:1;
+ uint64_t ctl_drp:1;
+ uint64_t ctl_bck:1;
+ uint64_t ctl_mcst:1;
+ uint64_t ctl_smac:1;
+ uint64_t pre_free:1;
+ uint64_t reserved_7_8:2;
+ uint64_t pre_align:1;
+ uint64_t reserved_10_63:54;
+ } cn56xxp1;
+ struct cvmx_gmxx_rxx_frm_ctl_cn58xx {
+ uint64_t pre_chk:1;
+ uint64_t pre_strp:1;
+ uint64_t ctl_drp:1;
+ uint64_t ctl_bck:1;
+ uint64_t ctl_mcst:1;
+ uint64_t ctl_smac:1;
+ uint64_t pre_free:1;
+ uint64_t vlan_len:1;
+ uint64_t pad_len:1;
+ uint64_t pre_align:1;
+ uint64_t null_dis:1;
+ uint64_t reserved_11_63:53;
+ } cn58xx;
+ struct cvmx_gmxx_rxx_frm_ctl_cn61xx {
+ uint64_t pre_chk:1;
+ uint64_t pre_strp:1;
+ uint64_t ctl_drp:1;
+ uint64_t ctl_bck:1;
+ uint64_t ctl_mcst:1;
+ uint64_t ctl_smac:1;
+ uint64_t pre_free:1;
+ uint64_t reserved_7_8:2;
+ uint64_t pre_align:1;
+ uint64_t null_dis:1;
+ uint64_t reserved_11_11:1;
+ uint64_t ptp_mode:1;
+ uint64_t reserved_13_63:51;
+ } cn61xx;
+};
+
+union cvmx_gmxx_rxx_int_reg {
+ uint64_t u64;
+ struct cvmx_gmxx_rxx_int_reg_s {
+ uint64_t minerr:1;
+ uint64_t carext:1;
+ uint64_t maxerr:1;
+ uint64_t jabber:1;
+ uint64_t fcserr:1;
+ uint64_t alnerr:1;
+ uint64_t lenerr:1;
+ uint64_t rcverr:1;
+ uint64_t skperr:1;
+ uint64_t niberr:1;
+ uint64_t ovrerr:1;
+ uint64_t pcterr:1;
+ uint64_t rsverr:1;
+ uint64_t falerr:1;
+ uint64_t coldet:1;
+ uint64_t ifgerr:1;
+ uint64_t phy_link:1;
+ uint64_t phy_spd:1;
+ uint64_t phy_dupx:1;
+ uint64_t pause_drp:1;
+ uint64_t loc_fault:1;
+ uint64_t rem_fault:1;
+ uint64_t bad_seq:1;
+ uint64_t bad_term:1;
+ uint64_t unsop:1;
+ uint64_t uneop:1;
+ uint64_t undat:1;
+ uint64_t hg2fld:1;
+ uint64_t hg2cc:1;
+ uint64_t reserved_29_63:35;
+ } s;
+ struct cvmx_gmxx_rxx_int_reg_cn30xx {
+ uint64_t minerr:1;
+ uint64_t carext:1;
+ uint64_t maxerr:1;
+ uint64_t jabber:1;
+ uint64_t fcserr:1;
+ uint64_t alnerr:1;
+ uint64_t lenerr:1;
+ uint64_t rcverr:1;
+ uint64_t skperr:1;
+ uint64_t niberr:1;
+ uint64_t ovrerr:1;
+ uint64_t pcterr:1;
+ uint64_t rsverr:1;
+ uint64_t falerr:1;
+ uint64_t coldet:1;
+ uint64_t ifgerr:1;
+ uint64_t phy_link:1;
+ uint64_t phy_spd:1;
+ uint64_t phy_dupx:1;
+ uint64_t reserved_19_63:45;
+ } cn30xx;
+ struct cvmx_gmxx_rxx_int_reg_cn50xx {
+ uint64_t reserved_0_0:1;
+ uint64_t carext:1;
+ uint64_t reserved_2_2:1;
+ uint64_t jabber:1;
+ uint64_t fcserr:1;
+ uint64_t alnerr:1;
+ uint64_t reserved_6_6:1;
+ uint64_t rcverr:1;
+ uint64_t skperr:1;
+ uint64_t niberr:1;
+ uint64_t ovrerr:1;
+ uint64_t pcterr:1;
+ uint64_t rsverr:1;
+ uint64_t falerr:1;
+ uint64_t coldet:1;
+ uint64_t ifgerr:1;
+ uint64_t phy_link:1;
+ uint64_t phy_spd:1;
+ uint64_t phy_dupx:1;
+ uint64_t pause_drp:1;
+ uint64_t reserved_20_63:44;
+ } cn50xx;
+ struct cvmx_gmxx_rxx_int_reg_cn52xx {
+ uint64_t reserved_0_0:1;
+ uint64_t carext:1;
+ uint64_t reserved_2_2:1;
+ uint64_t jabber:1;
+ uint64_t fcserr:1;
+ uint64_t reserved_5_6:2;
+ uint64_t rcverr:1;
+ uint64_t skperr:1;
+ uint64_t reserved_9_9:1;
+ uint64_t ovrerr:1;
+ uint64_t pcterr:1;
+ uint64_t rsverr:1;
+ uint64_t falerr:1;
+ uint64_t coldet:1;
+ uint64_t ifgerr:1;
+ uint64_t reserved_16_18:3;
+ uint64_t pause_drp:1;
+ uint64_t loc_fault:1;
+ uint64_t rem_fault:1;
+ uint64_t bad_seq:1;
+ uint64_t bad_term:1;
+ uint64_t unsop:1;
+ uint64_t uneop:1;
+ uint64_t undat:1;
+ uint64_t hg2fld:1;
+ uint64_t hg2cc:1;
+ uint64_t reserved_29_63:35;
+ } cn52xx;
+ struct cvmx_gmxx_rxx_int_reg_cn56xxp1 {
+ uint64_t reserved_0_0:1;
+ uint64_t carext:1;
+ uint64_t reserved_2_2:1;
+ uint64_t jabber:1;
+ uint64_t fcserr:1;
+ uint64_t reserved_5_6:2;
+ uint64_t rcverr:1;
+ uint64_t skperr:1;
+ uint64_t reserved_9_9:1;
+ uint64_t ovrerr:1;
+ uint64_t pcterr:1;
+ uint64_t rsverr:1;
+ uint64_t falerr:1;
+ uint64_t coldet:1;
+ uint64_t ifgerr:1;
+ uint64_t reserved_16_18:3;
+ uint64_t pause_drp:1;
+ uint64_t loc_fault:1;
+ uint64_t rem_fault:1;
+ uint64_t bad_seq:1;
+ uint64_t bad_term:1;
+ uint64_t unsop:1;
+ uint64_t uneop:1;
+ uint64_t undat:1;
+ uint64_t reserved_27_63:37;
+ } cn56xxp1;
+ struct cvmx_gmxx_rxx_int_reg_cn58xx {
+ uint64_t minerr:1;
+ uint64_t carext:1;
+ uint64_t maxerr:1;
+ uint64_t jabber:1;
+ uint64_t fcserr:1;
+ uint64_t alnerr:1;
+ uint64_t lenerr:1;
+ uint64_t rcverr:1;
+ uint64_t skperr:1;
+ uint64_t niberr:1;
+ uint64_t ovrerr:1;
+ uint64_t pcterr:1;
+ uint64_t rsverr:1;
+ uint64_t falerr:1;
+ uint64_t coldet:1;
+ uint64_t ifgerr:1;
+ uint64_t phy_link:1;
+ uint64_t phy_spd:1;
+ uint64_t phy_dupx:1;
+ uint64_t pause_drp:1;
+ uint64_t reserved_20_63:44;
+ } cn58xx;
+ struct cvmx_gmxx_rxx_int_reg_cn61xx {
+ uint64_t minerr:1;
+ uint64_t carext:1;
+ uint64_t reserved_2_2:1;
+ uint64_t jabber:1;
+ uint64_t fcserr:1;
+ uint64_t reserved_5_6:2;
+ uint64_t rcverr:1;
+ uint64_t skperr:1;
+ uint64_t reserved_9_9:1;
+ uint64_t ovrerr:1;
+ uint64_t pcterr:1;
+ uint64_t rsverr:1;
+ uint64_t falerr:1;
+ uint64_t coldet:1;
+ uint64_t ifgerr:1;
+ uint64_t reserved_16_18:3;
+ uint64_t pause_drp:1;
+ uint64_t loc_fault:1;
+ uint64_t rem_fault:1;
+ uint64_t bad_seq:1;
+ uint64_t bad_term:1;
+ uint64_t unsop:1;
+ uint64_t uneop:1;
+ uint64_t undat:1;
+ uint64_t hg2fld:1;
+ uint64_t hg2cc:1;
+ uint64_t reserved_29_63:35;
+ } cn61xx;
+};
+
+union cvmx_gmxx_prtx_cfg {
+ uint64_t u64;
+ struct cvmx_gmxx_prtx_cfg_s {
+ uint64_t reserved_22_63:42;
+ uint64_t pknd:6;
+ uint64_t reserved_14_15:2;
+ uint64_t tx_idle:1;
+ uint64_t rx_idle:1;
+ uint64_t reserved_9_11:3;
+ uint64_t speed_msb:1;
+ uint64_t reserved_4_7:4;
+ uint64_t slottime:1;
+ uint64_t duplex:1;
+ uint64_t speed:1;
+ uint64_t en:1;
+ } s;
+ struct cvmx_gmxx_prtx_cfg_cn30xx {
+ uint64_t reserved_4_63:60;
+ uint64_t slottime:1;
+ uint64_t duplex:1;
+ uint64_t speed:1;
+ uint64_t en:1;
+ } cn30xx;
+ struct cvmx_gmxx_prtx_cfg_cn52xx {
+ uint64_t reserved_14_63:50;
+ uint64_t tx_idle:1;
+ uint64_t rx_idle:1;
+ uint64_t reserved_9_11:3;
+ uint64_t speed_msb:1;
+ uint64_t reserved_4_7:4;
+ uint64_t slottime:1;
+ uint64_t duplex:1;
+ uint64_t speed:1;
+ uint64_t en:1;
+ } cn52xx;
+};
+
+union cvmx_gmxx_rxx_adr_ctl {
+ uint64_t u64;
+ struct cvmx_gmxx_rxx_adr_ctl_s {
+ uint64_t reserved_4_63:60;
+ uint64_t cam_mode:1;
+ uint64_t mcst:2;
+ uint64_t bcst:1;
+ } s;
+};
+
+union cvmx_pip_prt_tagx {
+ uint64_t u64;
+ struct cvmx_pip_prt_tagx_s {
+ uint64_t reserved_54_63:10;
+ uint64_t portadd_en:1;
+ uint64_t inc_hwchk:1;
+ uint64_t reserved_50_51:2;
+ uint64_t grptagbase_msb:2;
+ uint64_t reserved_46_47:2;
+ uint64_t grptagmask_msb:2;
+ uint64_t reserved_42_43:2;
+ uint64_t grp_msb:2;
+ uint64_t grptagbase:4;
+ uint64_t grptagmask:4;
+ uint64_t grptag:1;
+ uint64_t grptag_mskip:1;
+ uint64_t tag_mode:2;
+ uint64_t inc_vs:2;
+ uint64_t inc_vlan:1;
+ uint64_t inc_prt_flag:1;
+ uint64_t ip6_dprt_flag:1;
+ uint64_t ip4_dprt_flag:1;
+ uint64_t ip6_sprt_flag:1;
+ uint64_t ip4_sprt_flag:1;
+ uint64_t ip6_nxth_flag:1;
+ uint64_t ip4_pctl_flag:1;
+ uint64_t ip6_dst_flag:1;
+ uint64_t ip4_dst_flag:1;
+ uint64_t ip6_src_flag:1;
+ uint64_t ip4_src_flag:1;
+ uint64_t tcp6_tag_type:2;
+ uint64_t tcp4_tag_type:2;
+ uint64_t ip6_tag_type:2;
+ uint64_t ip4_tag_type:2;
+ uint64_t non_tag_type:2;
+ uint64_t grp:4;
+ } s;
+ struct cvmx_pip_prt_tagx_cn30xx {
+ uint64_t reserved_40_63:24;
+ uint64_t grptagbase:4;
+ uint64_t grptagmask:4;
+ uint64_t grptag:1;
+ uint64_t reserved_30_30:1;
+ uint64_t tag_mode:2;
+ uint64_t inc_vs:2;
+ uint64_t inc_vlan:1;
+ uint64_t inc_prt_flag:1;
+ uint64_t ip6_dprt_flag:1;
+ uint64_t ip4_dprt_flag:1;
+ uint64_t ip6_sprt_flag:1;
+ uint64_t ip4_sprt_flag:1;
+ uint64_t ip6_nxth_flag:1;
+ uint64_t ip4_pctl_flag:1;
+ uint64_t ip6_dst_flag:1;
+ uint64_t ip4_dst_flag:1;
+ uint64_t ip6_src_flag:1;
+ uint64_t ip4_src_flag:1;
+ uint64_t tcp6_tag_type:2;
+ uint64_t tcp4_tag_type:2;
+ uint64_t ip6_tag_type:2;
+ uint64_t ip4_tag_type:2;
+ uint64_t non_tag_type:2;
+ uint64_t grp:4;
+ } cn30xx;
+ struct cvmx_pip_prt_tagx_cn50xx {
+ uint64_t reserved_40_63:24;
+ uint64_t grptagbase:4;
+ uint64_t grptagmask:4;
+ uint64_t grptag:1;
+ uint64_t grptag_mskip:1;
+ uint64_t tag_mode:2;
+ uint64_t inc_vs:2;
+ uint64_t inc_vlan:1;
+ uint64_t inc_prt_flag:1;
+ uint64_t ip6_dprt_flag:1;
+ uint64_t ip4_dprt_flag:1;
+ uint64_t ip6_sprt_flag:1;
+ uint64_t ip4_sprt_flag:1;
+ uint64_t ip6_nxth_flag:1;
+ uint64_t ip4_pctl_flag:1;
+ uint64_t ip6_dst_flag:1;
+ uint64_t ip4_dst_flag:1;
+ uint64_t ip6_src_flag:1;
+ uint64_t ip4_src_flag:1;
+ uint64_t tcp6_tag_type:2;
+ uint64_t tcp4_tag_type:2;
+ uint64_t ip6_tag_type:2;
+ uint64_t ip4_tag_type:2;
+ uint64_t non_tag_type:2;
+ uint64_t grp:4;
+ } cn50xx;
+};
+
+union cvmx_spxx_int_reg {
+ uint64_t u64;
+ struct cvmx_spxx_int_reg_s {
+ uint64_t reserved_32_63:32;
+ uint64_t spf:1;
+ uint64_t reserved_12_30:19;
+ uint64_t calerr:1;
+ uint64_t syncerr:1;
+ uint64_t diperr:1;
+ uint64_t tpaovr:1;
+ uint64_t rsverr:1;
+ uint64_t drwnng:1;
+ uint64_t clserr:1;
+ uint64_t spiovr:1;
+ uint64_t reserved_2_3:2;
+ uint64_t abnorm:1;
+ uint64_t prtnxa:1;
+ } s;
+};
+
+union cvmx_spxx_int_msk {
+ uint64_t u64;
+ struct cvmx_spxx_int_msk_s {
+ uint64_t reserved_12_63:52;
+ uint64_t calerr:1;
+ uint64_t syncerr:1;
+ uint64_t diperr:1;
+ uint64_t tpaovr:1;
+ uint64_t rsverr:1;
+ uint64_t drwnng:1;
+ uint64_t clserr:1;
+ uint64_t spiovr:1;
+ uint64_t reserved_2_3:2;
+ uint64_t abnorm:1;
+ uint64_t prtnxa:1;
+ } s;
+};
+
+union cvmx_pow_wq_int {
+ uint64_t u64;
+ struct cvmx_pow_wq_int_s {
+ uint64_t wq_int:16;
+ uint64_t iq_dis:16;
+ uint64_t reserved_32_63:32;
+ } s;
+};
+
+union cvmx_sso_wq_int_thrx {
+ uint64_t u64;
+ struct {
+ uint64_t iq_thr:12;
+ uint64_t reserved_12_13:2;
+ uint64_t ds_thr:12;
+ uint64_t reserved_26_27:2;
+ uint64_t tc_thr:4;
+ uint64_t tc_en:1;
+ uint64_t reserved_33_63:31;
+ } s;
+};
+
+union cvmx_stxx_int_reg {
+ uint64_t u64;
+ struct cvmx_stxx_int_reg_s {
+ uint64_t reserved_9_63:55;
+ uint64_t syncerr:1;
+ uint64_t frmerr:1;
+ uint64_t unxfrm:1;
+ uint64_t nosync:1;
+ uint64_t diperr:1;
+ uint64_t datovr:1;
+ uint64_t ovrbst:1;
+ uint64_t calpar1:1;
+ uint64_t calpar0:1;
+ } s;
+};
+
+union cvmx_stxx_int_msk {
+ uint64_t u64;
+ struct cvmx_stxx_int_msk_s {
+ uint64_t reserved_8_63:56;
+ uint64_t frmerr:1;
+ uint64_t unxfrm:1;
+ uint64_t nosync:1;
+ uint64_t diperr:1;
+ uint64_t datovr:1;
+ uint64_t ovrbst:1;
+ uint64_t calpar1:1;
+ uint64_t calpar0:1;
+ } s;
+};
+
+union cvmx_pow_wq_int_pc {
+ uint64_t u64;
+ struct cvmx_pow_wq_int_pc_s {
+ uint64_t reserved_0_7:8;
+ uint64_t pc_thr:20;
+ uint64_t reserved_28_31:4;
+ uint64_t pc:28;
+ uint64_t reserved_60_63:4;
+ } s;
+};
+
+union cvmx_pow_wq_int_thrx {
+ uint64_t u64;
+ struct cvmx_pow_wq_int_thrx_s {
+ uint64_t reserved_29_63:35;
+ uint64_t tc_en:1;
+ uint64_t tc_thr:4;
+ uint64_t reserved_23_23:1;
+ uint64_t ds_thr:11;
+ uint64_t reserved_11_11:1;
+ uint64_t iq_thr:11;
+ } s;
+ struct cvmx_pow_wq_int_thrx_cn30xx {
+ uint64_t reserved_29_63:35;
+ uint64_t tc_en:1;
+ uint64_t tc_thr:4;
+ uint64_t reserved_18_23:6;
+ uint64_t ds_thr:6;
+ uint64_t reserved_6_11:6;
+ uint64_t iq_thr:6;
+ } cn30xx;
+ struct cvmx_pow_wq_int_thrx_cn31xx {
+ uint64_t reserved_29_63:35;
+ uint64_t tc_en:1;
+ uint64_t tc_thr:4;
+ uint64_t reserved_20_23:4;
+ uint64_t ds_thr:8;
+ uint64_t reserved_8_11:4;
+ uint64_t iq_thr:8;
+ } cn31xx;
+ struct cvmx_pow_wq_int_thrx_cn52xx {
+ uint64_t reserved_29_63:35;
+ uint64_t tc_en:1;
+ uint64_t tc_thr:4;
+ uint64_t reserved_21_23:3;
+ uint64_t ds_thr:9;
+ uint64_t reserved_9_11:3;
+ uint64_t iq_thr:9;
+ } cn52xx;
+ struct cvmx_pow_wq_int_thrx_cn63xx {
+ uint64_t reserved_29_63:35;
+ uint64_t tc_en:1;
+ uint64_t tc_thr:4;
+ uint64_t reserved_22_23:2;
+ uint64_t ds_thr:10;
+ uint64_t reserved_10_11:2;
+ uint64_t iq_thr:10;
+ } cn63xx;
+};
+
+union cvmx_npi_rsl_int_blocks {
+ uint64_t u64;
+ struct cvmx_npi_rsl_int_blocks_s {
+ uint64_t reserved_32_63:32;
+ uint64_t rint_31:1;
+ uint64_t iob:1;
+ uint64_t reserved_28_29:2;
+ uint64_t rint_27:1;
+ uint64_t rint_26:1;
+ uint64_t rint_25:1;
+ uint64_t rint_24:1;
+ uint64_t asx1:1;
+ uint64_t asx0:1;
+ uint64_t rint_21:1;
+ uint64_t pip:1;
+ uint64_t spx1:1;
+ uint64_t spx0:1;
+ uint64_t lmc:1;
+ uint64_t l2c:1;
+ uint64_t rint_15:1;
+ uint64_t reserved_13_14:2;
+ uint64_t pow:1;
+ uint64_t tim:1;
+ uint64_t pko:1;
+ uint64_t ipd:1;
+ uint64_t rint_8:1;
+ uint64_t zip:1;
+ uint64_t dfa:1;
+ uint64_t fpa:1;
+ uint64_t key:1;
+ uint64_t npi:1;
+ uint64_t gmx1:1;
+ uint64_t gmx0:1;
+ uint64_t mio:1;
+ } s;
+ struct cvmx_npi_rsl_int_blocks_cn30xx {
+ uint64_t reserved_32_63:32;
+ uint64_t rint_31:1;
+ uint64_t iob:1;
+ uint64_t rint_29:1;
+ uint64_t rint_28:1;
+ uint64_t rint_27:1;
+ uint64_t rint_26:1;
+ uint64_t rint_25:1;
+ uint64_t rint_24:1;
+ uint64_t asx1:1;
+ uint64_t asx0:1;
+ uint64_t rint_21:1;
+ uint64_t pip:1;
+ uint64_t spx1:1;
+ uint64_t spx0:1;
+ uint64_t lmc:1;
+ uint64_t l2c:1;
+ uint64_t rint_15:1;
+ uint64_t rint_14:1;
+ uint64_t usb:1;
+ uint64_t pow:1;
+ uint64_t tim:1;
+ uint64_t pko:1;
+ uint64_t ipd:1;
+ uint64_t rint_8:1;
+ uint64_t zip:1;
+ uint64_t dfa:1;
+ uint64_t fpa:1;
+ uint64_t key:1;
+ uint64_t npi:1;
+ uint64_t gmx1:1;
+ uint64_t gmx0:1;
+ uint64_t mio:1;
+ } cn30xx;
+ struct cvmx_npi_rsl_int_blocks_cn38xx {
+ uint64_t reserved_32_63:32;
+ uint64_t rint_31:1;
+ uint64_t iob:1;
+ uint64_t rint_29:1;
+ uint64_t rint_28:1;
+ uint64_t rint_27:1;
+ uint64_t rint_26:1;
+ uint64_t rint_25:1;
+ uint64_t rint_24:1;
+ uint64_t asx1:1;
+ uint64_t asx0:1;
+ uint64_t rint_21:1;
+ uint64_t pip:1;
+ uint64_t spx1:1;
+ uint64_t spx0:1;
+ uint64_t lmc:1;
+ uint64_t l2c:1;
+ uint64_t rint_15:1;
+ uint64_t rint_14:1;
+ uint64_t rint_13:1;
+ uint64_t pow:1;
+ uint64_t tim:1;
+ uint64_t pko:1;
+ uint64_t ipd:1;
+ uint64_t rint_8:1;
+ uint64_t zip:1;
+ uint64_t dfa:1;
+ uint64_t fpa:1;
+ uint64_t key:1;
+ uint64_t npi:1;
+ uint64_t gmx1:1;
+ uint64_t gmx0:1;
+ uint64_t mio:1;
+ } cn38xx;
+ struct cvmx_npi_rsl_int_blocks_cn50xx {
+ uint64_t reserved_31_63:33;
+ uint64_t iob:1;
+ uint64_t lmc1:1;
+ uint64_t agl:1;
+ uint64_t reserved_24_27:4;
+ uint64_t asx1:1;
+ uint64_t asx0:1;
+ uint64_t reserved_21_21:1;
+ uint64_t pip:1;
+ uint64_t spx1:1;
+ uint64_t spx0:1;
+ uint64_t lmc:1;
+ uint64_t l2c:1;
+ uint64_t reserved_15_15:1;
+ uint64_t rad:1;
+ uint64_t usb:1;
+ uint64_t pow:1;
+ uint64_t tim:1;
+ uint64_t pko:1;
+ uint64_t ipd:1;
+ uint64_t reserved_8_8:1;
+ uint64_t zip:1;
+ uint64_t dfa:1;
+ uint64_t fpa:1;
+ uint64_t key:1;
+ uint64_t npi:1;
+ uint64_t gmx1:1;
+ uint64_t gmx0:1;
+ uint64_t mio:1;
+ } cn50xx;
+};
+
+union cvmx_pko_command_word0 {
+ uint64_t u64;
+ struct {
+ uint64_t total_bytes:16;
+ uint64_t segs:6;
+ uint64_t dontfree:1;
+ uint64_t ignore_i:1;
+ uint64_t ipoffp1:7;
+ uint64_t gather:1;
+ uint64_t rsp:1;
+ uint64_t wqp:1;
+ uint64_t n2:1;
+ uint64_t le:1;
+ uint64_t reg0:11;
+ uint64_t subone0:1;
+ uint64_t reg1:11;
+ uint64_t subone1:1;
+ uint64_t size0:2;
+ uint64_t size1:2;
+ } s;
+};
+
+union cvmx_ciu_timx {
+ uint64_t u64;
+ struct cvmx_ciu_timx_s {
+ uint64_t reserved_37_63:27;
+ uint64_t one_shot:1;
+ uint64_t len:36;
+ } s;
+};
+
+union cvmx_gmxx_rxx_rx_inbnd {
+ uint64_t u64;
+ struct cvmx_gmxx_rxx_rx_inbnd_s {
+ uint64_t status:1;
+ uint64_t speed:2;
+ uint64_t duplex:1;
+ uint64_t reserved_4_63:60;
+ } s;
+};
+
+static inline int32_t cvmx_fau_fetch_and_add32(enum cvmx_fau_reg_32 reg,
+ int32_t value)
+{
+ return value;
+}
+
+static inline void cvmx_fau_atomic_add32(enum cvmx_fau_reg_32 reg,
+ int32_t value)
+{ }
+
+static inline void cvmx_fau_atomic_write32(enum cvmx_fau_reg_32 reg,
+ int32_t value)
+{ }
+
+static inline uint64_t cvmx_scratch_read64(uint64_t address)
+{
+ return 0;
+}
+
+static inline void cvmx_scratch_write64(uint64_t address, uint64_t value)
+{ }
+
+static inline int cvmx_wqe_get_grp(struct cvmx_wqe *work)
+{
+ return 0;
+}
+
+static inline void *cvmx_phys_to_ptr(uint64_t physical_address)
+{
+ return (void *)(uintptr_t)(physical_address);
+}
+
+static inline phys_addr_t cvmx_ptr_to_phys(void *ptr)
+{
+ return (unsigned long)ptr;
+}
+
+static inline int cvmx_helper_get_interface_num(int ipd_port)
+{
+ return ipd_port;
+}
+
+static inline int cvmx_helper_get_interface_index_num(int ipd_port)
+{
+ return ipd_port;
+}
+
+static inline void cvmx_fpa_enable(void)
+{ }
+
+static inline uint64_t cvmx_read_csr(uint64_t csr_addr)
+{
+ return 0;
+}
+
+static inline void cvmx_write_csr(uint64_t csr_addr, uint64_t val)
+{ }
+
+static inline int cvmx_helper_setup_red(int pass_thresh, int drop_thresh)
+{
+ return 0;
+}
+
+static inline void *cvmx_fpa_alloc(uint64_t pool)
+{
+ return NULL;
+}
+
+static inline void cvmx_fpa_free(void *ptr, uint64_t pool,
+ uint64_t num_cache_lines)
+{ }
+
+static inline int octeon_is_simulation(void)
+{
+ return 1;
+}
+
+static inline void cvmx_pip_get_port_status(uint64_t port_num, uint64_t clear,
+ cvmx_pip_port_status_t *status)
+{ }
+
+static inline void cvmx_pko_get_port_status(uint64_t port_num, uint64_t clear,
+ cvmx_pko_port_status_t *status)
+{ }
+
+static inline cvmx_helper_interface_mode_t cvmx_helper_interface_get_mode(int
+ interface)
+{
+ return 0;
+}
+
+static inline union cvmx_helper_link_info cvmx_helper_link_get(int ipd_port)
+{
+ union cvmx_helper_link_info ret = { .u64 = 0 };
+
+ return ret;
+}
+
+static inline int cvmx_helper_link_set(int ipd_port,
+ union cvmx_helper_link_info link_info)
+{
+ return 0;
+}
+
+static inline int cvmx_helper_initialize_packet_io_global(void)
+{
+ return 0;
+}
+
+static inline int cvmx_helper_get_number_of_interfaces(void)
+{
+ return 2;
+}
+
+static inline int cvmx_helper_ports_on_interface(int interface)
+{
+ return 1;
+}
+
+static inline int cvmx_helper_get_ipd_port(int interface, int port)
+{
+ return 0;
+}
+
+static inline int cvmx_helper_ipd_and_packet_input_enable(void)
+{
+ return 0;
+}
+
+static inline void cvmx_ipd_disable(void)
+{ }
+
+static inline void cvmx_ipd_free_ptr(void)
+{ }
+
+static inline void cvmx_pko_disable(void)
+{ }
+
+static inline void cvmx_pko_shutdown(void)
+{ }
+
+static inline int cvmx_pko_get_base_queue_per_core(int port, int core)
+{
+ return port;
+}
+
+static inline int cvmx_pko_get_base_queue(int port)
+{
+ return port;
+}
+
+static inline int cvmx_pko_get_num_queues(int port)
+{
+ return port;
+}
+
+static inline unsigned int cvmx_get_core_num(void)
+{
+ return 0;
+}
+
+static inline void cvmx_pow_work_request_async_nocheck(int scr_addr,
+ cvmx_pow_wait_t wait)
+{ }
+
+static inline void cvmx_pow_work_request_async(int scr_addr,
+ cvmx_pow_wait_t wait)
+{ }
+
+static inline struct cvmx_wqe *cvmx_pow_work_response_async(int scr_addr)
+{
+ struct cvmx_wqe *wqe = (void *)(unsigned long)scr_addr;
+
+ return wqe;
+}
+
+static inline struct cvmx_wqe *cvmx_pow_work_request_sync(cvmx_pow_wait_t wait)
+{
+ return (void *)(unsigned long)wait;
+}
+
+static inline int cvmx_spi_restart_interface(int interface,
+ cvmx_spi_mode_t mode, int timeout)
+{
+ return 0;
+}
+
+static inline void cvmx_fau_async_fetch_and_add32(uint64_t scraddr,
+ enum cvmx_fau_reg_32 reg,
+ int32_t value)
+{ }
+
+static inline union cvmx_gmxx_rxx_rx_inbnd cvmx_spi4000_check_speed(int interface, int port)
+{
+ union cvmx_gmxx_rxx_rx_inbnd r;
+
+ r.u64 = 0;
+ return r;
+}
+
+static inline void cvmx_pko_send_packet_prepare(uint64_t port, uint64_t queue,
+ cvmx_pko_lock_t use_locking)
+{ }
+
+static inline cvmx_pko_status_t cvmx_pko_send_packet_finish(uint64_t port,
+ uint64_t queue, union cvmx_pko_command_word0 pko_command,
+ union cvmx_buf_ptr packet, cvmx_pko_lock_t use_locking)
+{
+ return 0;
+}
+
+static inline void cvmx_wqe_set_port(struct cvmx_wqe *work, int port)
+{ }
+
+static inline void cvmx_wqe_set_qos(struct cvmx_wqe *work, int qos)
+{ }
+
+static inline int cvmx_wqe_get_qos(struct cvmx_wqe *work)
+{
+ return 0;
+}
+
+static inline void cvmx_wqe_set_grp(struct cvmx_wqe *work, int grp)
+{ }
+
+static inline void cvmx_pow_work_submit(struct cvmx_wqe *wqp, uint32_t tag,
+ enum cvmx_pow_tag_type tag_type,
+ uint64_t qos, uint64_t grp)
+{ }
+
+#define CVMX_ASXX_RX_CLK_SETX(a, b) ((a) + (b))
+#define CVMX_ASXX_TX_CLK_SETX(a, b) ((a) + (b))
+#define CVMX_CIU_TIMX(a) (a)
+#define CVMX_GMXX_RXX_ADR_CAM0(a, b) ((a) + (b))
+#define CVMX_GMXX_RXX_ADR_CAM1(a, b) ((a) + (b))
+#define CVMX_GMXX_RXX_ADR_CAM2(a, b) ((a) + (b))
+#define CVMX_GMXX_RXX_ADR_CAM3(a, b) ((a) + (b))
+#define CVMX_GMXX_RXX_ADR_CAM4(a, b) ((a) + (b))
+#define CVMX_GMXX_RXX_ADR_CAM5(a, b) ((a) + (b))
+#define CVMX_GMXX_RXX_FRM_CTL(a, b) ((a) + (b))
+#define CVMX_GMXX_RXX_INT_REG(a, b) ((a) + (b))
+#define CVMX_GMXX_SMACX(a, b) ((a) + (b))
+#define CVMX_PIP_PRT_TAGX(a) (a)
+#define CVMX_POW_PP_GRP_MSKX(a) (a)
+#define CVMX_POW_WQ_INT_THRX(a) (a)
+#define CVMX_SPXX_INT_MSK(a) (a)
+#define CVMX_SPXX_INT_REG(a) (a)
+#define CVMX_SSO_PPX_GRP_MSK(a) (a)
+#define CVMX_SSO_WQ_INT_THRX(a) (a)
+#define CVMX_STXX_INT_MSK(a) (a)
+#define CVMX_STXX_INT_REG(a) (a)
diff --git a/drivers/staging/olpc_dcon/Kconfig b/drivers/staging/olpc_dcon/Kconfig
deleted file mode 100644
index d277f048789e..000000000000
--- a/drivers/staging/olpc_dcon/Kconfig
+++ /dev/null
@@ -1,35 +0,0 @@
-config FB_OLPC_DCON
- tristate "One Laptop Per Child Display CONtroller support"
- depends on OLPC && FB
- depends on I2C
- depends on (GPIO_CS5535 || GPIO_CS5535=n)
- select BACKLIGHT_CLASS_DEVICE
- ---help---
- In order to support very low power operation, the XO laptop uses a
- secondary Display CONtroller, or DCON. This secondary controller
- is present in the video pipeline between the primary display
- controller (integrate into the processor or chipset) and the LCD
- panel. It allows the main processor/display controller to be
- completely powered off while still retaining an image on the display.
- This controller is only available on OLPC platforms. Unless you have
- one of these platforms, you will want to say 'N'.
-
-config FB_OLPC_DCON_1
- bool "OLPC XO-1 DCON support"
- depends on FB_OLPC_DCON && GPIO_CS5535
- default y
- ---help---
- Enable support for the DCON in XO-1 model laptops. The kernel
- communicates with the DCON using model-specific code. If you
- have an XO-1 (or if you're unsure what model you have), you should
- say 'Y'.
-
-config FB_OLPC_DCON_1_5
- bool "OLPC XO-1.5 DCON support"
- depends on FB_OLPC_DCON && ACPI
- default y
- ---help---
- Enable support for the DCON in XO-1.5 model laptops. The kernel
- communicates with the DCON using model-specific code. If you
- have an XO-1.5 (or if you're unsure what model you have), you
- should say 'Y'.
diff --git a/drivers/staging/olpc_dcon/Makefile b/drivers/staging/olpc_dcon/Makefile
deleted file mode 100644
index 36c7e67fec20..000000000000
--- a/drivers/staging/olpc_dcon/Makefile
+++ /dev/null
@@ -1,6 +0,0 @@
-olpc-dcon-objs += olpc_dcon.o
-olpc-dcon-$(CONFIG_FB_OLPC_DCON_1) += olpc_dcon_xo_1.o
-olpc-dcon-$(CONFIG_FB_OLPC_DCON_1_5) += olpc_dcon_xo_1_5.o
-obj-$(CONFIG_FB_OLPC_DCON) += olpc-dcon.o
-
-
diff --git a/drivers/staging/olpc_dcon/TODO b/drivers/staging/olpc_dcon/TODO
deleted file mode 100644
index 61c2e65ac354..000000000000
--- a/drivers/staging/olpc_dcon/TODO
+++ /dev/null
@@ -1,9 +0,0 @@
-TODO:
- - see if vx855 gpio API can be made similar enough to cs5535 so we can
- share more code
- - allow simultaneous XO-1 and XO-1.5 support
-
-Please send patches to Greg Kroah-Hartman <greg@kroah.com> and
-copy:
- Daniel Drake <dsd@laptop.org>
- Jens Frederich <jfrederich@gmail.com>
diff --git a/drivers/staging/olpc_dcon/olpc_dcon.c b/drivers/staging/olpc_dcon/olpc_dcon.c
deleted file mode 100644
index d115f5c0e341..000000000000
--- a/drivers/staging/olpc_dcon/olpc_dcon.c
+++ /dev/null
@@ -1,817 +0,0 @@
-/*
- * Mainly by David Woodhouse, somewhat modified by Jordan Crouse
- *
- * Copyright © 2006-2007 Red Hat, Inc.
- * Copyright © 2006-2007 Advanced Micro Devices, Inc.
- * Copyright © 2009 VIA Technology, Inc.
- * Copyright (c) 2010-2011 Andres Salomon <dilinger@queued.net>
- *
- * This program is free software. You can redistribute it and/or
- * modify it under the terms of version 2 of the GNU General Public
- * License as published by the Free Software Foundation.
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/kernel.h>
-#include <linux/fb.h>
-#include <linux/console.h>
-#include <linux/i2c.h>
-#include <linux/platform_device.h>
-#include <linux/interrupt.h>
-#include <linux/delay.h>
-#include <linux/module.h>
-#include <linux/backlight.h>
-#include <linux/device.h>
-#include <linux/uaccess.h>
-#include <linux/ctype.h>
-#include <linux/reboot.h>
-#include <linux/olpc-ec.h>
-#include <asm/tsc.h>
-#include <asm/olpc.h>
-
-#include "olpc_dcon.h"
-
-/* Module definitions */
-
-static ushort resumeline = 898;
-module_param(resumeline, ushort, 0444);
-
-static struct dcon_platform_data *pdata;
-
-/* I2C structures */
-
-/* Platform devices */
-static struct platform_device *dcon_device;
-
-static unsigned short normal_i2c[] = { 0x0d, I2C_CLIENT_END };
-
-static s32 dcon_write(struct dcon_priv *dcon, u8 reg, u16 val)
-{
- return i2c_smbus_write_word_data(dcon->client, reg, val);
-}
-
-static s32 dcon_read(struct dcon_priv *dcon, u8 reg)
-{
- return i2c_smbus_read_word_data(dcon->client, reg);
-}
-
-/* ===== API functions - these are called by a variety of users ==== */
-
-static int dcon_hw_init(struct dcon_priv *dcon, int is_init)
-{
- uint16_t ver;
- int rc = 0;
-
- ver = dcon_read(dcon, DCON_REG_ID);
- if ((ver >> 8) != 0xDC) {
- pr_err("DCON ID not 0xDCxx: 0x%04x instead.\n", ver);
- rc = -ENXIO;
- goto err;
- }
-
- if (is_init) {
- pr_info("Discovered DCON version %x\n", ver & 0xFF);
- rc = pdata->init(dcon);
- if (rc != 0) {
- pr_err("Unable to init.\n");
- goto err;
- }
- }
-
- if (ver < 0xdc02) {
- dev_err(&dcon->client->dev,
- "DCON v1 is unsupported, giving up..\n");
- rc = -ENODEV;
- goto err;
- }
-
- /* SDRAM setup/hold time */
- dcon_write(dcon, 0x3a, 0xc040);
- dcon_write(dcon, DCON_REG_MEM_OPT_A, 0x0000); /* clear option bits */
- dcon_write(dcon, DCON_REG_MEM_OPT_A,
- MEM_DLL_CLOCK_DELAY | MEM_POWER_DOWN);
- dcon_write(dcon, DCON_REG_MEM_OPT_B, MEM_SOFT_RESET);
-
- /* Colour swizzle, AA, no passthrough, backlight */
- if (is_init) {
- dcon->disp_mode = MODE_PASSTHRU | MODE_BL_ENABLE |
- MODE_CSWIZZLE | MODE_COL_AA;
- }
- dcon_write(dcon, DCON_REG_MODE, dcon->disp_mode);
-
-
- /* Set the scanline to interrupt on during resume */
- dcon_write(dcon, DCON_REG_SCAN_INT, resumeline);
-
-err:
- return rc;
-}
-
-/*
- * The smbus doesn't always come back due to what is believed to be
- * hardware (power rail) bugs. For older models where this is known to
- * occur, our solution is to attempt to wait for the bus to stabilize;
- * if it doesn't happen, cut power to the dcon, repower it, and wait
- * for the bus to stabilize. Rinse, repeat until we have a working
- * smbus. For newer models, we simply BUG(); we want to know if this
- * still happens despite the power fixes that have been made!
- */
-static int dcon_bus_stabilize(struct dcon_priv *dcon, int is_powered_down)
-{
- unsigned long timeout;
- u8 pm;
- int x;
-
-power_up:
- if (is_powered_down) {
- pm = 1;
- x = olpc_ec_cmd(EC_DCON_POWER_MODE, &pm, 1, NULL, 0);
- if (x) {
- pr_warn("unable to force dcon to power up: %d!\n", x);
- return x;
- }
- usleep_range(10000, 11000); /* we'll be conservative */
- }
-
- pdata->bus_stabilize_wiggle();
-
- for (x = -1, timeout = 50; timeout && x < 0; timeout--) {
- usleep_range(1000, 1100);
- x = dcon_read(dcon, DCON_REG_ID);
- }
- if (x < 0) {
- pr_err("unable to stabilize dcon's smbus, reasserting power and praying.\n");
- BUG_ON(olpc_board_at_least(olpc_board(0xc2)));
- pm = 0;
- olpc_ec_cmd(EC_DCON_POWER_MODE, &pm, 1, NULL, 0);
- msleep(100);
- is_powered_down = 1;
- goto power_up; /* argh, stupid hardware.. */
- }
-
- if (is_powered_down)
- return dcon_hw_init(dcon, 0);
- return 0;
-}
-
-static void dcon_set_backlight(struct dcon_priv *dcon, u8 level)
-{
- dcon->bl_val = level;
- dcon_write(dcon, DCON_REG_BRIGHT, dcon->bl_val);
-
- /* Purposely turn off the backlight when we go to level 0 */
- if (dcon->bl_val == 0) {
- dcon->disp_mode &= ~MODE_BL_ENABLE;
- dcon_write(dcon, DCON_REG_MODE, dcon->disp_mode);
- } else if (!(dcon->disp_mode & MODE_BL_ENABLE)) {
- dcon->disp_mode |= MODE_BL_ENABLE;
- dcon_write(dcon, DCON_REG_MODE, dcon->disp_mode);
- }
-}
-
-/* Set the output type to either color or mono */
-static int dcon_set_mono_mode(struct dcon_priv *dcon, bool enable_mono)
-{
- if (dcon->mono == enable_mono)
- return 0;
-
- dcon->mono = enable_mono;
-
- if (enable_mono) {
- dcon->disp_mode &= ~(MODE_CSWIZZLE | MODE_COL_AA);
- dcon->disp_mode |= MODE_MONO_LUMA;
- } else {
- dcon->disp_mode &= ~(MODE_MONO_LUMA);
- dcon->disp_mode |= MODE_CSWIZZLE | MODE_COL_AA;
- }
-
- dcon_write(dcon, DCON_REG_MODE, dcon->disp_mode);
- return 0;
-}
-
-/* For now, this will be really stupid - we need to address how
- * DCONLOAD works in a sleep and account for it accordingly
- */
-
-static void dcon_sleep(struct dcon_priv *dcon, bool sleep)
-{
- int x;
-
- /* Turn off the backlight and put the DCON to sleep */
-
- if (dcon->asleep == sleep)
- return;
-
- if (!olpc_board_at_least(olpc_board(0xc2)))
- return;
-
- if (sleep) {
- u8 pm = 0;
-
- x = olpc_ec_cmd(EC_DCON_POWER_MODE, &pm, 1, NULL, 0);
- if (x)
- pr_warn("unable to force dcon to power down: %d!\n", x);
- else
- dcon->asleep = sleep;
- } else {
- /* Only re-enable the backlight if the backlight value is set */
- if (dcon->bl_val != 0)
- dcon->disp_mode |= MODE_BL_ENABLE;
- x = dcon_bus_stabilize(dcon, 1);
- if (x)
- pr_warn("unable to reinit dcon hardware: %d!\n", x);
- else
- dcon->asleep = sleep;
-
- /* Restore backlight */
- dcon_set_backlight(dcon, dcon->bl_val);
- }
-
- /* We should turn off some stuff in the framebuffer - but what? */
-}
-
-/* the DCON seems to get confused if we change DCONLOAD too
- * frequently -- i.e., approximately faster than frame time.
- * normally we don't change it this fast, so in general we won't
- * delay here.
- */
-static void dcon_load_holdoff(struct dcon_priv *dcon)
-{
- struct timespec delta_t, now;
-
- while (1) {
- getnstimeofday(&now);
- delta_t = timespec_sub(now, dcon->load_time);
- if (delta_t.tv_sec != 0 ||
- delta_t.tv_nsec > NSEC_PER_MSEC * 20) {
- break;
- }
- mdelay(4);
- }
-}
-
-static bool dcon_blank_fb(struct dcon_priv *dcon, bool blank)
-{
- int err;
-
- console_lock();
- if (!lock_fb_info(dcon->fbinfo)) {
- console_unlock();
- dev_err(&dcon->client->dev, "unable to lock framebuffer\n");
- return false;
- }
-
- dcon->ignore_fb_events = true;
- err = fb_blank(dcon->fbinfo,
- blank ? FB_BLANK_POWERDOWN : FB_BLANK_UNBLANK);
- dcon->ignore_fb_events = false;
- unlock_fb_info(dcon->fbinfo);
- console_unlock();
-
- if (err) {
- dev_err(&dcon->client->dev, "couldn't %sblank framebuffer\n",
- blank ? "" : "un");
- return false;
- }
- return true;
-}
-
-/* Set the source of the display (CPU or DCON) */
-static void dcon_source_switch(struct work_struct *work)
-{
- struct dcon_priv *dcon = container_of(work, struct dcon_priv,
- switch_source);
- int source = dcon->pending_src;
-
- if (dcon->curr_src == source)
- return;
-
- dcon_load_holdoff(dcon);
-
- dcon->switched = false;
-
- switch (source) {
- case DCON_SOURCE_CPU:
- pr_info("dcon_source_switch to CPU\n");
- /* Enable the scanline interrupt bit */
- if (dcon_write(dcon, DCON_REG_MODE,
- dcon->disp_mode | MODE_SCAN_INT))
- pr_err("couldn't enable scanline interrupt!\n");
- else
- /* Wait up to one second for the scanline interrupt */
- wait_event_timeout(dcon->waitq, dcon->switched, HZ);
-
- if (!dcon->switched)
- pr_err("Timeout entering CPU mode; expect a screen glitch.\n");
-
- /* Turn off the scanline interrupt */
- if (dcon_write(dcon, DCON_REG_MODE, dcon->disp_mode))
- pr_err("couldn't disable scanline interrupt!\n");
-
- /*
- * Ideally we'd like to disable interrupts here so that the
- * fb unblanking and DCON turn on happen at a known time value;
- * however, we can't do that right now with fb_blank
- * messing with semaphores.
- *
- * For now, we just hope..
- */
- if (!dcon_blank_fb(dcon, false)) {
- pr_err("Failed to enter CPU mode\n");
- dcon->pending_src = DCON_SOURCE_DCON;
- return;
- }
-
- /* And turn off the DCON */
- pdata->set_dconload(1);
- getnstimeofday(&dcon->load_time);
-
- pr_info("The CPU has control\n");
- break;
- case DCON_SOURCE_DCON:
- {
- struct timespec delta_t;
-
- pr_info("dcon_source_switch to DCON\n");
-
- /* Clear DCONLOAD - this implies that the DCON is in control */
- pdata->set_dconload(0);
- getnstimeofday(&dcon->load_time);
-
- wait_event_timeout(dcon->waitq, dcon->switched, HZ/2);
-
- if (!dcon->switched) {
- pr_err("Timeout entering DCON mode; expect a screen glitch.\n");
- } else {
- /* sometimes the DCON doesn't follow its own rules,
- * and doesn't wait for two vsync pulses before
- * ack'ing the frame load with an IRQ. the result
- * is that the display shows the *previously*
- * loaded frame. we can detect this by looking at
- * the time between asserting DCONLOAD and the IRQ --
- * if it's less than 20msec, then the DCON couldn't
- * have seen two VSYNC pulses. in that case we
- * deassert and reassert, and hope for the best.
- * see http://dev.laptop.org/ticket/9664
- */
- delta_t = timespec_sub(dcon->irq_time, dcon->load_time);
- if (dcon->switched && delta_t.tv_sec == 0 &&
- delta_t.tv_nsec < NSEC_PER_MSEC * 20) {
- pr_err("missed loading, retrying\n");
- pdata->set_dconload(1);
- mdelay(41);
- pdata->set_dconload(0);
- getnstimeofday(&dcon->load_time);
- mdelay(41);
- }
- }
-
- dcon_blank_fb(dcon, true);
- pr_info("The DCON has control\n");
- break;
- }
- default:
- BUG();
- }
-
- dcon->curr_src = source;
-}
-
-static void dcon_set_source(struct dcon_priv *dcon, int arg)
-{
- if (dcon->pending_src == arg)
- return;
-
- dcon->pending_src = arg;
-
- if (dcon->curr_src != arg)
- schedule_work(&dcon->switch_source);
-}
-
-static void dcon_set_source_sync(struct dcon_priv *dcon, int arg)
-{
- dcon_set_source(dcon, arg);
- flush_scheduled_work();
-}
-
-static ssize_t dcon_mode_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct dcon_priv *dcon = dev_get_drvdata(dev);
-
- return sprintf(buf, "%4.4X\n", dcon->disp_mode);
-}
-
-static ssize_t dcon_sleep_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct dcon_priv *dcon = dev_get_drvdata(dev);
-
- return sprintf(buf, "%d\n", dcon->asleep);
-}
-
-static ssize_t dcon_freeze_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct dcon_priv *dcon = dev_get_drvdata(dev);
-
- return sprintf(buf, "%d\n", dcon->curr_src == DCON_SOURCE_DCON ? 1 : 0);
-}
-
-static ssize_t dcon_mono_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct dcon_priv *dcon = dev_get_drvdata(dev);
-
- return sprintf(buf, "%d\n", dcon->mono);
-}
-
-static ssize_t dcon_resumeline_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "%d\n", resumeline);
-}
-
-static ssize_t dcon_mono_store(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t count)
-{
- unsigned long enable_mono;
- int rc;
-
- rc = kstrtoul(buf, 10, &enable_mono);
- if (rc)
- return rc;
-
- dcon_set_mono_mode(dev_get_drvdata(dev), enable_mono ? true : false);
-
- return count;
-}
-
-static ssize_t dcon_freeze_store(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t count)
-{
- struct dcon_priv *dcon = dev_get_drvdata(dev);
- unsigned long output;
- int ret;
-
- ret = kstrtoul(buf, 10, &output);
- if (ret)
- return ret;
-
- pr_info("dcon_freeze_store: %lu\n", output);
-
- switch (output) {
- case 0:
- dcon_set_source(dcon, DCON_SOURCE_CPU);
- break;
- case 1:
- dcon_set_source_sync(dcon, DCON_SOURCE_DCON);
- break;
- case 2: /* normally unused */
- dcon_set_source(dcon, DCON_SOURCE_DCON);
- break;
- default:
- return -EINVAL;
- }
-
- return count;
-}
-
-static ssize_t dcon_resumeline_store(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t count)
-{
- unsigned short rl;
- int rc;
-
- rc = kstrtou16(buf, 10, &rl);
- if (rc)
- return rc;
-
- resumeline = rl;
- dcon_write(dev_get_drvdata(dev), DCON_REG_SCAN_INT, resumeline);
-
- return count;
-}
-
-static ssize_t dcon_sleep_store(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t count)
-{
- unsigned long output;
- int ret;
-
- ret = kstrtoul(buf, 10, &output);
- if (ret)
- return ret;
-
- dcon_sleep(dev_get_drvdata(dev), output ? true : false);
- return count;
-}
-
-static struct device_attribute dcon_device_files[] = {
- __ATTR(mode, 0444, dcon_mode_show, NULL),
- __ATTR(sleep, 0644, dcon_sleep_show, dcon_sleep_store),
- __ATTR(freeze, 0644, dcon_freeze_show, dcon_freeze_store),
- __ATTR(monochrome, 0644, dcon_mono_show, dcon_mono_store),
- __ATTR(resumeline, 0644, dcon_resumeline_show, dcon_resumeline_store),
-};
-
-static int dcon_bl_update(struct backlight_device *dev)
-{
- struct dcon_priv *dcon = bl_get_data(dev);
- u8 level = dev->props.brightness & 0x0F;
-
- if (dev->props.power != FB_BLANK_UNBLANK)
- level = 0;
-
- if (level != dcon->bl_val)
- dcon_set_backlight(dcon, level);
-
- /* power down the DCON when the screen is blanked */
- if (!dcon->ignore_fb_events)
- dcon_sleep(dcon, !!(dev->props.state & BL_CORE_FBBLANK));
-
- return 0;
-}
-
-static int dcon_bl_get(struct backlight_device *dev)
-{
- struct dcon_priv *dcon = bl_get_data(dev);
-
- return dcon->bl_val;
-}
-
-static const struct backlight_ops dcon_bl_ops = {
- .update_status = dcon_bl_update,
- .get_brightness = dcon_bl_get,
-};
-
-static struct backlight_properties dcon_bl_props = {
- .max_brightness = 15,
- .type = BACKLIGHT_RAW,
- .power = FB_BLANK_UNBLANK,
-};
-
-static int dcon_reboot_notify(struct notifier_block *nb,
- unsigned long foo, void *bar)
-{
- struct dcon_priv *dcon = container_of(nb, struct dcon_priv, reboot_nb);
-
- if (!dcon || !dcon->client)
- return NOTIFY_DONE;
-
- /* Turn off the DCON. Entirely. */
- dcon_write(dcon, DCON_REG_MODE, 0x39);
- dcon_write(dcon, DCON_REG_MODE, 0x32);
- return NOTIFY_DONE;
-}
-
-static int unfreeze_on_panic(struct notifier_block *nb,
- unsigned long e, void *p)
-{
- pdata->set_dconload(1);
- return NOTIFY_DONE;
-}
-
-static struct notifier_block dcon_panic_nb = {
- .notifier_call = unfreeze_on_panic,
-};
-
-static int dcon_detect(struct i2c_client *client, struct i2c_board_info *info)
-{
- strlcpy(info->type, "olpc_dcon", I2C_NAME_SIZE);
-
- return 0;
-}
-
-static int dcon_probe(struct i2c_client *client, const struct i2c_device_id *id)
-{
- struct dcon_priv *dcon;
- int rc, i, j;
-
- if (!pdata)
- return -ENXIO;
-
- dcon = kzalloc(sizeof(*dcon), GFP_KERNEL);
- if (!dcon)
- return -ENOMEM;
-
- dcon->client = client;
- init_waitqueue_head(&dcon->waitq);
- INIT_WORK(&dcon->switch_source, dcon_source_switch);
- dcon->reboot_nb.notifier_call = dcon_reboot_notify;
- dcon->reboot_nb.priority = -1;
-
- i2c_set_clientdata(client, dcon);
-
- if (num_registered_fb < 1) {
- dev_err(&client->dev, "DCON driver requires a registered fb\n");
- rc = -EIO;
- goto einit;
- }
- dcon->fbinfo = registered_fb[0];
-
- rc = dcon_hw_init(dcon, 1);
- if (rc)
- goto einit;
-
- /* Add the DCON device */
-
- dcon_device = platform_device_alloc("dcon", -1);
-
- if (dcon_device == NULL) {
- pr_err("Unable to create the DCON device\n");
- rc = -ENOMEM;
- goto eirq;
- }
- rc = platform_device_add(dcon_device);
- platform_set_drvdata(dcon_device, dcon);
-
- if (rc) {
- pr_err("Unable to add the DCON device\n");
- goto edev;
- }
-
- for (i = 0; i < ARRAY_SIZE(dcon_device_files); i++) {
- rc = device_create_file(&dcon_device->dev,
- &dcon_device_files[i]);
- if (rc) {
- dev_err(&dcon_device->dev, "Cannot create sysfs file\n");
- goto ecreate;
- }
- }
-
- dcon->bl_val = dcon_read(dcon, DCON_REG_BRIGHT) & 0x0F;
-
- /* Add the backlight device for the DCON */
- dcon_bl_props.brightness = dcon->bl_val;
- dcon->bl_dev = backlight_device_register("dcon-bl", &dcon_device->dev,
- dcon, &dcon_bl_ops, &dcon_bl_props);
- if (IS_ERR(dcon->bl_dev)) {
- dev_err(&client->dev, "cannot register backlight dev (%ld)\n",
- PTR_ERR(dcon->bl_dev));
- dcon->bl_dev = NULL;
- }
-
- register_reboot_notifier(&dcon->reboot_nb);
- atomic_notifier_chain_register(&panic_notifier_list, &dcon_panic_nb);
-
- return 0;
-
- ecreate:
- for (j = 0; j < i; j++)
- device_remove_file(&dcon_device->dev, &dcon_device_files[j]);
- edev:
- platform_device_unregister(dcon_device);
- dcon_device = NULL;
- eirq:
- free_irq(DCON_IRQ, dcon);
- einit:
- kfree(dcon);
- return rc;
-}
-
-static int dcon_remove(struct i2c_client *client)
-{
- struct dcon_priv *dcon = i2c_get_clientdata(client);
-
- unregister_reboot_notifier(&dcon->reboot_nb);
- atomic_notifier_chain_unregister(&panic_notifier_list, &dcon_panic_nb);
-
- free_irq(DCON_IRQ, dcon);
-
- backlight_device_unregister(dcon->bl_dev);
-
- if (dcon_device != NULL)
- platform_device_unregister(dcon_device);
- cancel_work_sync(&dcon->switch_source);
-
- kfree(dcon);
-
- return 0;
-}
-
-#ifdef CONFIG_PM
-static int dcon_suspend(struct device *dev)
-{
- struct i2c_client *client = to_i2c_client(dev);
- struct dcon_priv *dcon = i2c_get_clientdata(client);
-
- if (!dcon->asleep) {
- /* Set up the DCON to have the source */
- dcon_set_source_sync(dcon, DCON_SOURCE_DCON);
- }
-
- return 0;
-}
-
-static int dcon_resume(struct device *dev)
-{
- struct i2c_client *client = to_i2c_client(dev);
- struct dcon_priv *dcon = i2c_get_clientdata(client);
-
- if (!dcon->asleep) {
- dcon_bus_stabilize(dcon, 0);
- dcon_set_source(dcon, DCON_SOURCE_CPU);
- }
-
- return 0;
-}
-
-#else
-
-#define dcon_suspend NULL
-#define dcon_resume NULL
-
-#endif /* CONFIG_PM */
-
-
-irqreturn_t dcon_interrupt(int irq, void *id)
-{
- struct dcon_priv *dcon = id;
- u8 status;
-
- if (pdata->read_status(&status))
- return IRQ_NONE;
-
- switch (status & 3) {
- case 3:
- pr_debug("DCONLOAD_MISSED interrupt\n");
- break;
-
- case 2: /* switch to DCON mode */
- case 1: /* switch to CPU mode */
- dcon->switched = true;
- getnstimeofday(&dcon->irq_time);
- wake_up(&dcon->waitq);
- break;
-
- case 0:
- /* workaround resume case: the DCON (on 1.5) doesn't
- * ever assert status 0x01 when switching to CPU mode
- * during resume. this is because DCONLOAD is de-asserted
- * _immediately_ upon exiting S3, so the actual release
- * of the DCON happened long before this point.
- * see http://dev.laptop.org/ticket/9869
- */
- if (dcon->curr_src != dcon->pending_src && !dcon->switched) {
- dcon->switched = true;
- getnstimeofday(&dcon->irq_time);
- wake_up(&dcon->waitq);
- pr_debug("switching w/ status 0/0\n");
- } else {
- pr_debug("scanline interrupt w/CPU\n");
- }
- }
-
- return IRQ_HANDLED;
-}
-
-static const struct dev_pm_ops dcon_pm_ops = {
- .suspend = dcon_suspend,
- .resume = dcon_resume,
-};
-
-static const struct i2c_device_id dcon_idtable[] = {
- { "olpc_dcon", 0 },
- { }
-};
-MODULE_DEVICE_TABLE(i2c, dcon_idtable);
-
-static struct i2c_driver dcon_driver = {
- .driver = {
- .name = "olpc_dcon",
- .pm = &dcon_pm_ops,
- },
- .class = I2C_CLASS_DDC | I2C_CLASS_HWMON,
- .id_table = dcon_idtable,
- .probe = dcon_probe,
- .remove = dcon_remove,
- .detect = dcon_detect,
- .address_list = normal_i2c,
-};
-
-static int __init olpc_dcon_init(void)
-{
-#ifdef CONFIG_FB_OLPC_DCON_1_5
- /* XO-1.5 */
- if (olpc_board_at_least(olpc_board(0xd0)))
- pdata = &dcon_pdata_xo_1_5;
-#endif
-#ifdef CONFIG_FB_OLPC_DCON_1
- if (!pdata)
- pdata = &dcon_pdata_xo_1;
-#endif
-
- return i2c_add_driver(&dcon_driver);
-}
-
-static void __exit olpc_dcon_exit(void)
-{
- i2c_del_driver(&dcon_driver);
-}
-
-module_init(olpc_dcon_init);
-module_exit(olpc_dcon_exit);
-
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/olpc_dcon/olpc_dcon.h b/drivers/staging/olpc_dcon/olpc_dcon.h
deleted file mode 100644
index d06e19db1b80..000000000000
--- a/drivers/staging/olpc_dcon/olpc_dcon.h
+++ /dev/null
@@ -1,111 +0,0 @@
-#ifndef OLPC_DCON_H_
-#define OLPC_DCON_H_
-
-#include <linux/notifier.h>
-#include <linux/workqueue.h>
-
-/* DCON registers */
-
-#define DCON_REG_ID 0
-#define DCON_REG_MODE 1
-
-#define MODE_PASSTHRU (1<<0)
-#define MODE_SLEEP (1<<1)
-#define MODE_SLEEP_AUTO (1<<2)
-#define MODE_BL_ENABLE (1<<3)
-#define MODE_BLANK (1<<4)
-#define MODE_CSWIZZLE (1<<5)
-#define MODE_COL_AA (1<<6)
-#define MODE_MONO_LUMA (1<<7)
-#define MODE_SCAN_INT (1<<8)
-#define MODE_CLOCKDIV (1<<9)
-#define MODE_DEBUG (1<<14)
-#define MODE_SELFTEST (1<<15)
-
-#define DCON_REG_HRES 0x2
-#define DCON_REG_HTOTAL 0x3
-#define DCON_REG_HSYNC_WIDTH 0x4
-#define DCON_REG_VRES 0x5
-#define DCON_REG_VTOTAL 0x6
-#define DCON_REG_VSYNC_WIDTH 0x7
-#define DCON_REG_TIMEOUT 0x8
-#define DCON_REG_SCAN_INT 0x9
-#define DCON_REG_BRIGHT 0xa
-#define DCON_REG_MEM_OPT_A 0x41
-#define DCON_REG_MEM_OPT_B 0x42
-
-/* Load Delay Locked Loop (DLL) settings for clock delay */
-#define MEM_DLL_CLOCK_DELAY (1<<0)
-/* Memory controller power down function */
-#define MEM_POWER_DOWN (1<<8)
-/* Memory controller software reset */
-#define MEM_SOFT_RESET (1<<0)
-
-/* Status values */
-
-#define DCONSTAT_SCANINT 0
-#define DCONSTAT_SCANINT_DCON 1
-#define DCONSTAT_DISPLAYLOAD 2
-#define DCONSTAT_MISSED 3
-
-/* Source values */
-
-#define DCON_SOURCE_DCON 0
-#define DCON_SOURCE_CPU 1
-
-/* Interrupt */
-#define DCON_IRQ 6
-
-struct dcon_priv {
- struct i2c_client *client;
- struct fb_info *fbinfo;
- struct backlight_device *bl_dev;
-
- wait_queue_head_t waitq;
- struct work_struct switch_source;
- struct notifier_block reboot_nb;
-
- /* Shadow register for the DCON_REG_MODE register */
- u8 disp_mode;
-
- /* The current backlight value - this saves us some smbus traffic */
- u8 bl_val;
-
- /* Current source, initialized at probe time */
- int curr_src;
-
- /* Desired source */
- int pending_src;
-
- /* Variables used during switches */
- bool switched;
- struct timespec irq_time;
- struct timespec load_time;
-
- /* Current output type; true == mono, false == color */
- bool mono;
- bool asleep;
- /* This get set while controlling fb blank state from the driver */
- bool ignore_fb_events;
-};
-
-struct dcon_platform_data {
- int (*init)(struct dcon_priv *);
- void (*bus_stabilize_wiggle)(void);
- void (*set_dconload)(int);
- int (*read_status)(u8 *);
-};
-
-#include <linux/interrupt.h>
-
-irqreturn_t dcon_interrupt(int irq, void *id);
-
-#ifdef CONFIG_FB_OLPC_DCON_1
-extern struct dcon_platform_data dcon_pdata_xo_1;
-#endif
-
-#ifdef CONFIG_FB_OLPC_DCON_1_5
-extern struct dcon_platform_data dcon_pdata_xo_1_5;
-#endif
-
-#endif
diff --git a/drivers/staging/olpc_dcon/olpc_dcon_xo_1.c b/drivers/staging/olpc_dcon/olpc_dcon_xo_1.c
deleted file mode 100644
index 0c5a10c69401..000000000000
--- a/drivers/staging/olpc_dcon/olpc_dcon_xo_1.c
+++ /dev/null
@@ -1,205 +0,0 @@
-/*
- * Mainly by David Woodhouse, somewhat modified by Jordan Crouse
- *
- * Copyright © 2006-2007 Red Hat, Inc.
- * Copyright © 2006-2007 Advanced Micro Devices, Inc.
- * Copyright © 2009 VIA Technology, Inc.
- * Copyright (c) 2010 Andres Salomon <dilinger@queued.net>
- *
- * This program is free software. You can redistribute it and/or
- * modify it under the terms of version 2 of the GNU General Public
- * License as published by the Free Software Foundation.
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/cs5535.h>
-#include <linux/gpio.h>
-#include <linux/delay.h>
-#include <asm/olpc.h>
-
-#include "olpc_dcon.h"
-
-static int dcon_init_xo_1(struct dcon_priv *dcon)
-{
- unsigned char lob;
-
- if (gpio_request(OLPC_GPIO_DCON_STAT0, "OLPC-DCON")) {
- pr_err("failed to request STAT0 GPIO\n");
- return -EIO;
- }
- if (gpio_request(OLPC_GPIO_DCON_STAT1, "OLPC-DCON")) {
- pr_err("failed to request STAT1 GPIO\n");
- goto err_gp_stat1;
- }
- if (gpio_request(OLPC_GPIO_DCON_IRQ, "OLPC-DCON")) {
- pr_err("failed to request IRQ GPIO\n");
- goto err_gp_irq;
- }
- if (gpio_request(OLPC_GPIO_DCON_LOAD, "OLPC-DCON")) {
- pr_err("failed to request LOAD GPIO\n");
- goto err_gp_load;
- }
- if (gpio_request(OLPC_GPIO_DCON_BLANK, "OLPC-DCON")) {
- pr_err("failed to request BLANK GPIO\n");
- goto err_gp_blank;
- }
-
- /* Turn off the event enable for GPIO7 just to be safe */
- cs5535_gpio_clear(OLPC_GPIO_DCON_IRQ, GPIO_EVENTS_ENABLE);
-
- /*
- * Determine the current state by reading the GPIO bit; earlier
- * stages of the boot process have established the state.
- *
- * Note that we read GPIO_OUTPUT_VAL rather than GPIO_READ_BACK here;
- * this is because OFW will disable input for the pin and set a value..
- * READ_BACK will only contain a valid value if input is enabled and
- * then a value is set. So, future readings of the pin can use
- * READ_BACK, but the first one cannot. Awesome, huh?
- */
- dcon->curr_src = cs5535_gpio_isset(OLPC_GPIO_DCON_LOAD, GPIO_OUTPUT_VAL)
- ? DCON_SOURCE_CPU
- : DCON_SOURCE_DCON;
- dcon->pending_src = dcon->curr_src;
-
- /* Set the directions for the GPIO pins */
- gpio_direction_input(OLPC_GPIO_DCON_STAT0);
- gpio_direction_input(OLPC_GPIO_DCON_STAT1);
- gpio_direction_input(OLPC_GPIO_DCON_IRQ);
- gpio_direction_input(OLPC_GPIO_DCON_BLANK);
- gpio_direction_output(OLPC_GPIO_DCON_LOAD,
- dcon->curr_src == DCON_SOURCE_CPU);
-
- /* Set up the interrupt mappings */
-
- /* Set the IRQ to pair 2 */
- cs5535_gpio_setup_event(OLPC_GPIO_DCON_IRQ, 2, 0);
-
- /* Enable group 2 to trigger the DCON interrupt */
- cs5535_gpio_set_irq(2, DCON_IRQ);
-
- /* Select edge level for interrupt (in PIC) */
- lob = inb(0x4d0);
- lob &= ~(1 << DCON_IRQ);
- outb(lob, 0x4d0);
-
- /* Register the interrupt handler */
- if (request_irq(DCON_IRQ, &dcon_interrupt, 0, "DCON", dcon)) {
- pr_err("failed to request DCON's irq\n");
- goto err_req_irq;
- }
-
- /* Clear INV_EN for GPIO7 (DCONIRQ) */
- cs5535_gpio_clear(OLPC_GPIO_DCON_IRQ, GPIO_INPUT_INVERT);
-
- /* Enable filter for GPIO12 (DCONBLANK) */
- cs5535_gpio_set(OLPC_GPIO_DCON_BLANK, GPIO_INPUT_FILTER);
-
- /* Disable filter for GPIO7 */
- cs5535_gpio_clear(OLPC_GPIO_DCON_IRQ, GPIO_INPUT_FILTER);
-
- /* Disable event counter for GPIO7 (DCONIRQ) and GPIO12 (DCONBLANK) */
- cs5535_gpio_clear(OLPC_GPIO_DCON_IRQ, GPIO_INPUT_EVENT_COUNT);
- cs5535_gpio_clear(OLPC_GPIO_DCON_BLANK, GPIO_INPUT_EVENT_COUNT);
-
- /* Add GPIO12 to the Filter Event Pair #7 */
- cs5535_gpio_set(OLPC_GPIO_DCON_BLANK, GPIO_FE7_SEL);
-
- /* Turn off negative Edge Enable for GPIO12 */
- cs5535_gpio_clear(OLPC_GPIO_DCON_BLANK, GPIO_NEGATIVE_EDGE_EN);
-
- /* Enable negative Edge Enable for GPIO7 */
- cs5535_gpio_set(OLPC_GPIO_DCON_IRQ, GPIO_NEGATIVE_EDGE_EN);
-
- /* Zero the filter amount for Filter Event Pair #7 */
- cs5535_gpio_set(0, GPIO_FLTR7_AMOUNT);
-
- /* Clear the negative edge status for GPIO7 and GPIO12 */
- cs5535_gpio_set(OLPC_GPIO_DCON_IRQ, GPIO_NEGATIVE_EDGE_STS);
- cs5535_gpio_set(OLPC_GPIO_DCON_BLANK, GPIO_NEGATIVE_EDGE_STS);
-
- /* FIXME: Clear the positive status as well, just to be sure */
- cs5535_gpio_set(OLPC_GPIO_DCON_IRQ, GPIO_POSITIVE_EDGE_STS);
- cs5535_gpio_set(OLPC_GPIO_DCON_BLANK, GPIO_POSITIVE_EDGE_STS);
-
- /* Enable events for GPIO7 (DCONIRQ) and GPIO12 (DCONBLANK) */
- cs5535_gpio_set(OLPC_GPIO_DCON_IRQ, GPIO_EVENTS_ENABLE);
- cs5535_gpio_set(OLPC_GPIO_DCON_BLANK, GPIO_EVENTS_ENABLE);
-
- return 0;
-
-err_req_irq:
- gpio_free(OLPC_GPIO_DCON_BLANK);
-err_gp_blank:
- gpio_free(OLPC_GPIO_DCON_LOAD);
-err_gp_load:
- gpio_free(OLPC_GPIO_DCON_IRQ);
-err_gp_irq:
- gpio_free(OLPC_GPIO_DCON_STAT1);
-err_gp_stat1:
- gpio_free(OLPC_GPIO_DCON_STAT0);
- return -EIO;
-}
-
-static void dcon_wiggle_xo_1(void)
-{
- int x;
-
- /*
- * According to HiMax, when powering the DCON up we should hold
- * SMB_DATA high for 8 SMB_CLK cycles. This will force the DCON
- * state machine to reset to a (sane) initial state. Mitch Bradley
- * did some testing and discovered that holding for 16 SMB_CLK cycles
- * worked a lot more reliably, so that's what we do here.
- *
- * According to the cs5536 spec, to set GPIO14 to SMB_CLK we must
- * simultaneously set AUX1 IN/OUT to GPIO14; ditto for SMB_DATA and
- * GPIO15.
- */
- cs5535_gpio_set(OLPC_GPIO_SMB_CLK, GPIO_OUTPUT_VAL);
- cs5535_gpio_set(OLPC_GPIO_SMB_DATA, GPIO_OUTPUT_VAL);
- cs5535_gpio_set(OLPC_GPIO_SMB_CLK, GPIO_OUTPUT_ENABLE);
- cs5535_gpio_set(OLPC_GPIO_SMB_DATA, GPIO_OUTPUT_ENABLE);
- cs5535_gpio_clear(OLPC_GPIO_SMB_CLK, GPIO_OUTPUT_AUX1);
- cs5535_gpio_clear(OLPC_GPIO_SMB_DATA, GPIO_OUTPUT_AUX1);
- cs5535_gpio_clear(OLPC_GPIO_SMB_CLK, GPIO_OUTPUT_AUX2);
- cs5535_gpio_clear(OLPC_GPIO_SMB_DATA, GPIO_OUTPUT_AUX2);
- cs5535_gpio_clear(OLPC_GPIO_SMB_CLK, GPIO_INPUT_AUX1);
- cs5535_gpio_clear(OLPC_GPIO_SMB_DATA, GPIO_INPUT_AUX1);
-
- for (x = 0; x < 16; x++) {
- udelay(5);
- cs5535_gpio_clear(OLPC_GPIO_SMB_CLK, GPIO_OUTPUT_VAL);
- udelay(5);
- cs5535_gpio_set(OLPC_GPIO_SMB_CLK, GPIO_OUTPUT_VAL);
- }
- udelay(5);
- cs5535_gpio_set(OLPC_GPIO_SMB_CLK, GPIO_OUTPUT_AUX1);
- cs5535_gpio_set(OLPC_GPIO_SMB_DATA, GPIO_OUTPUT_AUX1);
- cs5535_gpio_set(OLPC_GPIO_SMB_CLK, GPIO_INPUT_AUX1);
- cs5535_gpio_set(OLPC_GPIO_SMB_DATA, GPIO_INPUT_AUX1);
-}
-
-static void dcon_set_dconload_1(int val)
-{
- gpio_set_value(OLPC_GPIO_DCON_LOAD, val);
-}
-
-static int dcon_read_status_xo_1(u8 *status)
-{
- *status = gpio_get_value(OLPC_GPIO_DCON_STAT0);
- *status |= gpio_get_value(OLPC_GPIO_DCON_STAT1) << 1;
-
- /* Clear the negative edge status for GPIO7 */
- cs5535_gpio_set(OLPC_GPIO_DCON_IRQ, GPIO_NEGATIVE_EDGE_STS);
-
- return 0;
-}
-
-struct dcon_platform_data dcon_pdata_xo_1 = {
- .init = dcon_init_xo_1,
- .bus_stabilize_wiggle = dcon_wiggle_xo_1,
- .set_dconload = dcon_set_dconload_1,
- .read_status = dcon_read_status_xo_1,
-};
diff --git a/drivers/staging/olpc_dcon/olpc_dcon_xo_1_5.c b/drivers/staging/olpc_dcon/olpc_dcon_xo_1_5.c
deleted file mode 100644
index 6a4d379c16a3..000000000000
--- a/drivers/staging/olpc_dcon/olpc_dcon_xo_1_5.c
+++ /dev/null
@@ -1,161 +0,0 @@
-/*
- * Copyright (c) 2009,2010 One Laptop per Child
- *
- * This program is free software. You can redistribute it and/or
- * modify it under the terms of version 2 of the GNU General Public
- * License as published by the Free Software Foundation.
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/acpi.h>
-#include <linux/delay.h>
-#include <linux/gpio.h>
-#include <asm/olpc.h>
-
-/* TODO: this eventually belongs in linux/vx855.h */
-#define NR_VX855_GPI 14
-#define NR_VX855_GPO 13
-#define NR_VX855_GPIO 15
-
-#define VX855_GPI(n) (n)
-#define VX855_GPO(n) (NR_VX855_GPI + (n))
-#define VX855_GPIO(n) (NR_VX855_GPI + NR_VX855_GPO + (n))
-
-#include "olpc_dcon.h"
-
-/* Hardware setup on the XO 1.5:
- * DCONLOAD connects to VX855_GPIO1 (not SMBCK2)
- * DCONBLANK connects to VX855_GPIO8 (not SSPICLK) unused in driver
- * DCONSTAT0 connects to VX855_GPI10 (not SSPISDI)
- * DCONSTAT1 connects to VX855_GPI11 (not nSSPISS)
- * DCONIRQ connects to VX855_GPIO12
- * DCONSMBDATA connects to VX855 graphics CRTSPD
- * DCONSMBCLK connects to VX855 graphics CRTSPCLK
- */
-
-#define VX855_GENL_PURPOSE_OUTPUT 0x44c /* PMIO_Rx4c-4f */
-#define VX855_GPI_STATUS_CHG 0x450 /* PMIO_Rx50 */
-#define VX855_GPI_SCI_SMI 0x452 /* PMIO_Rx52 */
-#define BIT_GPIO12 0x40
-
-#define PREFIX "OLPC DCON:"
-
-static void dcon_clear_irq(void)
-{
- /* irq status will appear in PMIO_Rx50[6] (RW1C) on gpio12 */
- outb(BIT_GPIO12, VX855_GPI_STATUS_CHG);
-}
-
-static int dcon_was_irq(void)
-{
- u_int8_t tmp;
-
- /* irq status will appear in PMIO_Rx50[6] on gpio12 */
- tmp = inb(VX855_GPI_STATUS_CHG);
- return !!(tmp & BIT_GPIO12);
-
- return 0;
-}
-
-static int dcon_init_xo_1_5(struct dcon_priv *dcon)
-{
- unsigned int irq;
-
- dcon_clear_irq();
-
- /* set PMIO_Rx52[6] to enable SCI/SMI on gpio12 */
- outb(inb(VX855_GPI_SCI_SMI)|BIT_GPIO12, VX855_GPI_SCI_SMI);
-
- /* Determine the current state of DCONLOAD, likely set by firmware */
- /* GPIO1 */
- dcon->curr_src = (inl(VX855_GENL_PURPOSE_OUTPUT) & 0x1000) ?
- DCON_SOURCE_CPU : DCON_SOURCE_DCON;
- dcon->pending_src = dcon->curr_src;
-
- /* we're sharing the IRQ with ACPI */
- irq = acpi_gbl_FADT.sci_interrupt;
- if (request_irq(irq, &dcon_interrupt, IRQF_SHARED, "DCON", dcon)) {
- pr_err("DCON (IRQ%d) allocation failed\n", irq);
- return 1;
- }
-
- return 0;
-}
-
-static void set_i2c_line(int sda, int scl)
-{
- unsigned char tmp;
- unsigned int port = 0x26;
-
- /* FIXME: This directly accesses the CRT GPIO controller !!! */
- outb(port, 0x3c4);
- tmp = inb(0x3c5);
-
- if (scl)
- tmp |= 0x20;
- else
- tmp &= ~0x20;
-
- if (sda)
- tmp |= 0x10;
- else
- tmp &= ~0x10;
-
- tmp |= 0x01;
-
- outb(port, 0x3c4);
- outb(tmp, 0x3c5);
-}
-
-
-static void dcon_wiggle_xo_1_5(void)
-{
- int x;
-
- /*
- * According to HiMax, when powering the DCON up we should hold
- * SMB_DATA high for 8 SMB_CLK cycles. This will force the DCON
- * state machine to reset to a (sane) initial state. Mitch Bradley
- * did some testing and discovered that holding for 16 SMB_CLK cycles
- * worked a lot more reliably, so that's what we do here.
- */
- set_i2c_line(1, 1);
-
- for (x = 0; x < 16; x++) {
- udelay(5);
- set_i2c_line(1, 0);
- udelay(5);
- set_i2c_line(1, 1);
- }
- udelay(5);
-
- /* set PMIO_Rx52[6] to enable SCI/SMI on gpio12 */
- outb(inb(VX855_GPI_SCI_SMI)|BIT_GPIO12, VX855_GPI_SCI_SMI);
-}
-
-static void dcon_set_dconload_xo_1_5(int val)
-{
- gpio_set_value(VX855_GPIO(1), val);
-}
-
-static int dcon_read_status_xo_1_5(u8 *status)
-{
- if (!dcon_was_irq())
- return -1;
-
- /* i believe this is the same as "inb(0x44b) & 3" */
- *status = gpio_get_value(VX855_GPI(10));
- *status |= gpio_get_value(VX855_GPI(11)) << 1;
-
- dcon_clear_irq();
-
- return 0;
-}
-
-struct dcon_platform_data dcon_pdata_xo_1_5 = {
- .init = dcon_init_xo_1_5,
- .bus_stabilize_wiggle = dcon_wiggle_xo_1_5,
- .set_dconload = dcon_set_dconload_xo_1_5,
- .read_status = dcon_read_status_xo_1_5,
-};
diff --git a/drivers/staging/panel/Kconfig b/drivers/staging/panel/Kconfig
deleted file mode 100644
index 3defa0133f2e..000000000000
--- a/drivers/staging/panel/Kconfig
+++ /dev/null
@@ -1,278 +0,0 @@
-config PANEL
- tristate "Parallel port LCD/Keypad Panel support"
- depends on PARPORT
- ---help---
- Say Y here if you have an HD44780 or KS-0074 LCD connected to your
- parallel port. This driver also features 4 and 6-key keypads. The LCD
- is accessible through the /dev/lcd char device (10, 156), and the
- keypad through /dev/keypad (10, 185). Both require misc device to be
- enabled. This code can either be compiled as a module, or linked into
- the kernel and started at boot. If you don't understand what all this
- is about, say N.
-
-config PANEL_PARPORT
- int "Default parallel port number (0=LPT1)"
- depends on PANEL
- range 0 255
- default "0"
- ---help---
- This is the index of the parallel port the panel is connected to. One
- driver instance only supports one parallel port, so if your keypad
- and LCD are connected to two separate ports, you have to start two
- modules with different arguments. Numbering starts with '0' for LPT1,
- and so on.
-
-config PANEL_PROFILE
- int "Default panel profile (0-5, 0=custom)"
- depends on PANEL
- range 0 5
- default "5"
- ---help---
- To ease configuration, the driver supports different configuration
- profiles for past and recent wirings. These profiles can also be
- used to define an approximative configuration, completed by a few
- other options. Here are the profiles :
-
- 0 = custom (see further)
- 1 = 2x16 parallel LCD, old keypad
- 2 = 2x16 serial LCD (KS-0074), new keypad
- 3 = 2x16 parallel LCD (Hantronix), no keypad
- 4 = 2x16 parallel LCD (Nexcom NSA1045) with Nexcom's keypad
- 5 = 2x40 parallel LCD (old one), with old keypad
-
- Custom configurations allow you to define how your display is
- wired to the parallel port, and how it works. This is only intended
- for experts.
-
-config PANEL_KEYPAD
- depends on PANEL && PANEL_PROFILE="0"
- int "Keypad type (0=none, 1=old 6 keys, 2=new 6 keys, 3=Nexcom 4 keys)"
- range 0 3
- default 0
- ---help---
- This enables and configures a keypad connected to the parallel port.
- The keys will be read from character device 10,185. Valid values are :
-
- 0 : do not enable this driver
- 1 : old 6 keys keypad
- 2 : new 6 keys keypad, as used on the server at www.ant-computing.com
- 3 : Nexcom NSA1045's 4 keys keypad
-
- New profiles can be described in the driver source. The driver also
- supports simultaneous keys pressed when the keypad supports them.
-
-config PANEL_LCD
- depends on PANEL && PANEL_PROFILE="0"
- int "LCD type (0=none, 1=custom, 2=old //, 3=ks0074, 4=hantronix, 5=Nexcom)"
- range 0 5
- default 0
- ---help---
- This enables and configures an LCD connected to the parallel port.
- The driver includes an interpreter for escape codes starting with
- '\e[L' which are specific to the LCD, and a few ANSI codes. The
- driver will be registered as character device 10,156, usually
- under the name '/dev/lcd'. There are a total of 6 supported types :
-
- 0 : do not enable the driver
- 1 : custom configuration and wiring (see further)
- 2 : 2x16 & 2x40 parallel LCD (old wiring)
- 3 : 2x16 serial LCD (KS-0074 based)
- 4 : 2x16 parallel LCD (Hantronix wiring)
- 5 : 2x16 parallel LCD (Nexcom wiring)
-
- When type '1' is specified, other options will appear to configure
- more precise aspects (wiring, dimensions, protocol, ...). Please note
- that those values changed from the 2.4 driver for better consistency.
-
-config PANEL_LCD_HEIGHT
- depends on PANEL && PANEL_PROFILE="0" && PANEL_LCD="1"
- int "Number of lines on the LCD (1-2)"
- range 1 2
- default 2
- ---help---
- This is the number of visible character lines on the LCD in custom profile.
- It can either be 1 or 2.
-
-config PANEL_LCD_WIDTH
- depends on PANEL && PANEL_PROFILE="0" && PANEL_LCD="1"
- int "Number of characters per line on the LCD (1-40)"
- range 1 40
- default 40
- ---help---
- This is the number of characters per line on the LCD in custom profile.
- Common values are 16,20,24,40.
-
-config PANEL_LCD_BWIDTH
- depends on PANEL && PANEL_PROFILE="0" && PANEL_LCD="1"
- int "Internal LCD line width (1-40, 40 by default)"
- range 1 40
- default 40
- ---help---
- Most LCDs use a standard controller which supports hardware lines of 40
- characters, although sometimes only 16, 20 or 24 of them are really wired
- to the terminal. This results in some non-visible but addressable characters,
- and is the case for most parallel LCDs. Other LCDs, and some serial ones,
- however, use the same line width internally as what is visible. The KS0074
- for example, uses 16 characters per line for 16 visible characters per line.
-
- This option lets you configure the value used by your LCD in 'custom' profile.
- If you don't know, put '40' here.
-
-config PANEL_LCD_HWIDTH
- depends on PANEL && PANEL_PROFILE="0" && PANEL_LCD="1"
- int "Hardware LCD line width (1-64, 64 by default)"
- range 1 64
- default 64
- ---help---
- Most LCDs use a single address bit to differentiate line 0 and line 1. Since
- some of them need to be able to address 40 chars with the lower bits, they
- often use the immediately superior power of 2, which is 64, to address the
- next line.
-
- If you don't know what your LCD uses, in doubt let 16 here for a 2x16, and
- 64 here for a 2x40.
-
-config PANEL_LCD_CHARSET
- depends on PANEL && PANEL_PROFILE="0" && PANEL_LCD="1"
- int "LCD character set (0=normal, 1=KS0074)"
- range 0 1
- default 0
- ---help---
- Some controllers such as the KS0074 use a somewhat strange character set
- where many symbols are at unusual places. The driver knows how to map
- 'standard' ASCII characters to the character sets used by these controllers.
- Valid values are :
-
- 0 : normal (untranslated) character set
- 1 : KS0074 character set
-
- If you don't know, use the normal one (0).
-
-config PANEL_LCD_PROTO
- depends on PANEL && PANEL_PROFILE="0" && PANEL_LCD="1"
- int "LCD communication mode (0=parallel 8 bits, 1=serial)"
- range 0 1
- default 0
- ---help---
- This driver now supports any serial or parallel LCD wired to a parallel
- port. But before assigning signals, the driver needs to know if it will
- be driving a serial LCD or a parallel one. Serial LCDs only use 2 wires
- (SDA/SCL), while parallel ones use 2 or 3 wires for the control signals
- (E, RS, sometimes RW), and 4 or 8 for the data. Use 0 here for a 8 bits
- parallel LCD, and 1 for a serial LCD.
-
-config PANEL_LCD_PIN_E
- depends on PANEL && PANEL_PROFILE="0" && PANEL_LCD="1" && PANEL_LCD_PROTO="0"
- int "Parallel port pin number & polarity connected to the LCD E signal (-17...17) "
- range -17 17
- default 14
- ---help---
- This describes the number of the parallel port pin to which the LCD 'E'
- signal has been connected. It can be :
-
- 0 : no connection (eg: connected to ground)
- 1..17 : directly connected to any of these pins on the DB25 plug
- -1..-17 : connected to the same pin through an inverter (eg: transistor).
-
- Default for the 'E' pin in custom profile is '14' (AUTOFEED).
-
-config PANEL_LCD_PIN_RS
- depends on PANEL && PANEL_PROFILE="0" && PANEL_LCD="1" && PANEL_LCD_PROTO="0"
- int "Parallel port pin number & polarity connected to the LCD RS signal (-17...17) "
- range -17 17
- default 17
- ---help---
- This describes the number of the parallel port pin to which the LCD 'RS'
- signal has been connected. It can be :
-
- 0 : no connection (eg: connected to ground)
- 1..17 : directly connected to any of these pins on the DB25 plug
- -1..-17 : connected to the same pin through an inverter (eg: transistor).
-
- Default for the 'RS' pin in custom profile is '17' (SELECT IN).
-
-config PANEL_LCD_PIN_RW
- depends on PANEL && PANEL_PROFILE="0" && PANEL_LCD="1" && PANEL_LCD_PROTO="0"
- int "Parallel port pin number & polarity connected to the LCD RW signal (-17...17) "
- range -17 17
- default 16
- ---help---
- This describes the number of the parallel port pin to which the LCD 'RW'
- signal has been connected. It can be :
-
- 0 : no connection (eg: connected to ground)
- 1..17 : directly connected to any of these pins on the DB25 plug
- -1..-17 : connected to the same pin through an inverter (eg: transistor).
-
- Default for the 'RW' pin in custom profile is '16' (INIT).
-
-config PANEL_LCD_PIN_SCL
- depends on PANEL && PANEL_PROFILE="0" && PANEL_LCD="1" && PANEL_LCD_PROTO!="0"
- int "Parallel port pin number & polarity connected to the LCD SCL signal (-17...17) "
- range -17 17
- default 1
- ---help---
- This describes the number of the parallel port pin to which the serial
- LCD 'SCL' signal has been connected. It can be :
-
- 0 : no connection (eg: connected to ground)
- 1..17 : directly connected to any of these pins on the DB25 plug
- -1..-17 : connected to the same pin through an inverter (eg: transistor).
-
- Default for the 'SCL' pin in custom profile is '1' (STROBE).
-
-config PANEL_LCD_PIN_SDA
- depends on PANEL && PANEL_PROFILE="0" && PANEL_LCD="1" && PANEL_LCD_PROTO!="0"
- int "Parallel port pin number & polarity connected to the LCD SDA signal (-17...17) "
- range -17 17
- default 2
- ---help---
- This describes the number of the parallel port pin to which the serial
- LCD 'SDA' signal has been connected. It can be :
-
- 0 : no connection (eg: connected to ground)
- 1..17 : directly connected to any of these pins on the DB25 plug
- -1..-17 : connected to the same pin through an inverter (eg: transistor).
-
- Default for the 'SDA' pin in custom profile is '2' (D0).
-
-config PANEL_LCD_PIN_BL
- depends on PANEL && PANEL_PROFILE="0" && PANEL_LCD="1"
- int "Parallel port pin number & polarity connected to the LCD backlight signal (-17...17) "
- range -17 17
- default 0
- ---help---
- This describes the number of the parallel port pin to which the LCD 'BL' signal
- has been connected. It can be :
-
- 0 : no connection (eg: connected to ground)
- 1..17 : directly connected to any of these pins on the DB25 plug
- -1..-17 : connected to the same pin through an inverter (eg: transistor).
-
- Default for the 'BL' pin in custom profile is '0' (uncontrolled).
-
-config PANEL_CHANGE_MESSAGE
- depends on PANEL
- bool "Change LCD initialization message ?"
- default "n"
- ---help---
- This allows you to replace the boot message indicating the kernel version
- and the driver version with a custom message. This is useful on appliances
- where a simple 'Starting system' message can be enough to stop a customer
- from worrying.
-
- If you say 'Y' here, you'll be able to choose a message yourself. Otherwise,
- say 'N' and keep the default message with the version.
-
-config PANEL_BOOT_MESSAGE
- depends on PANEL && PANEL_CHANGE_MESSAGE="y"
- string "New initialization message"
- default ""
- ---help---
- This allows you to replace the boot message indicating the kernel version
- and the driver version with a custom message. This is useful on appliances
- where a simple 'Starting system' message can be enough to stop a customer
- from worrying.
-
- An empty message will only clear the display at driver init time. Any other
- printf()-formatted message is valid with newline and escape codes.
diff --git a/drivers/staging/panel/Makefile b/drivers/staging/panel/Makefile
deleted file mode 100644
index 747c238b82f9..000000000000
--- a/drivers/staging/panel/Makefile
+++ /dev/null
@@ -1 +0,0 @@
-obj-$(CONFIG_PANEL) += panel.o
diff --git a/drivers/staging/panel/TODO b/drivers/staging/panel/TODO
deleted file mode 100644
index 2db3f994b632..000000000000
--- a/drivers/staging/panel/TODO
+++ /dev/null
@@ -1,8 +0,0 @@
-TODO:
- - checkpatch.pl cleanups
- - review major/minor usages
- - review userspace api
- - see if all of this could be easier done in userspace instead.
-
-Please send patches to Greg Kroah-Hartman <greg@kroah.com> and
-Willy Tarreau <willy@meta-x.org>
diff --git a/drivers/staging/panel/lcd-panel-cgram.txt b/drivers/staging/panel/lcd-panel-cgram.txt
deleted file mode 100644
index 7f82c905763d..000000000000
--- a/drivers/staging/panel/lcd-panel-cgram.txt
+++ /dev/null
@@ -1,24 +0,0 @@
-Some LCDs allow you to define up to 8 characters, mapped to ASCII
-characters 0 to 7. The escape code to define a new character is
-'\e[LG' followed by one digit from 0 to 7, representing the character
-number, and up to 8 couples of hex digits terminated by a semi-colon
-(';'). Each couple of digits represents a line, with 1-bits for each
-illuminated pixel with LSB on the right. Lines are numbered from the
-top of the character to the bottom. On a 5x7 matrix, only the 5 lower
-bits of the 7 first bytes are used for each character. If the string
-is incomplete, only complete lines will be redefined. Here are some
-examples :
-
- printf "\e[LG0010101050D1F0C04;" => 0 = [enter]
- printf "\e[LG1040E1F0000000000;" => 1 = [up]
- printf "\e[LG2000000001F0E0400;" => 2 = [down]
- printf "\e[LG3040E1F001F0E0400;" => 3 = [up-down]
- printf "\e[LG40002060E1E0E0602;" => 4 = [left]
- printf "\e[LG500080C0E0F0E0C08;" => 5 = [right]
- printf "\e[LG60016051516141400;" => 6 = "IP"
-
- printf "\e[LG00103071F1F070301;" => big speaker
- printf "\e[LG00002061E1E060200;" => small speaker
-
-Willy
-
diff --git a/drivers/staging/panel/panel.c b/drivers/staging/panel/panel.c
deleted file mode 100644
index 498d6233648f..000000000000
--- a/drivers/staging/panel/panel.c
+++ /dev/null
@@ -1,2447 +0,0 @@
-/*
- * Front panel driver for Linux
- * Copyright (C) 2000-2008, Willy Tarreau <w@1wt.eu>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- *
- * This code drives an LCD module (/dev/lcd), and a keypad (/dev/keypad)
- * connected to a parallel printer port.
- *
- * The LCD module may either be an HD44780-like 8-bit parallel LCD, or a 1-bit
- * serial module compatible with Samsung's KS0074. The pins may be connected in
- * any combination, everything is programmable.
- *
- * The keypad consists in a matrix of push buttons connecting input pins to
- * data output pins or to the ground. The combinations have to be hard-coded
- * in the driver, though several profiles exist and adding new ones is easy.
- *
- * Several profiles are provided for commonly found LCD+keypad modules on the
- * market, such as those found in Nexcom's appliances.
- *
- * FIXME:
- * - the initialization/deinitialization process is very dirty and should
- * be rewritten. It may even be buggy.
- *
- * TODO:
- * - document 24 keys keyboard (3 rows of 8 cols, 32 diodes + 2 inputs)
- * - make the LCD a part of a virtual screen of Vx*Vy
- * - make the inputs list smp-safe
- * - change the keyboard to a double mapping : signals -> key_id -> values
- * so that applications can change values without knowing signals
- *
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/module.h>
-
-#include <linux/types.h>
-#include <linux/errno.h>
-#include <linux/signal.h>
-#include <linux/sched.h>
-#include <linux/spinlock.h>
-#include <linux/interrupt.h>
-#include <linux/miscdevice.h>
-#include <linux/slab.h>
-#include <linux/ioport.h>
-#include <linux/fcntl.h>
-#include <linux/init.h>
-#include <linux/delay.h>
-#include <linux/kernel.h>
-#include <linux/ctype.h>
-#include <linux/parport.h>
-#include <linux/list.h>
-#include <linux/notifier.h>
-#include <linux/reboot.h>
-#include <generated/utsrelease.h>
-
-#include <linux/io.h>
-#include <linux/uaccess.h>
-
-#define LCD_MINOR 156
-#define KEYPAD_MINOR 185
-
-#define PANEL_VERSION "0.9.5"
-
-#define LCD_MAXBYTES 256 /* max burst write */
-
-#define KEYPAD_BUFFER 64
-
-/* poll the keyboard this every second */
-#define INPUT_POLL_TIME (HZ/50)
-/* a key starts to repeat after this times INPUT_POLL_TIME */
-#define KEYPAD_REP_START (10)
-/* a key repeats this times INPUT_POLL_TIME */
-#define KEYPAD_REP_DELAY (2)
-
-/* keep the light on this times INPUT_POLL_TIME for each flash */
-#define FLASH_LIGHT_TEMPO (200)
-
-/* converts an r_str() input to an active high, bits string : 000BAOSE */
-#define PNL_PINPUT(a) ((((unsigned char)(a)) ^ 0x7F) >> 3)
-
-#define PNL_PBUSY 0x80 /* inverted input, active low */
-#define PNL_PACK 0x40 /* direct input, active low */
-#define PNL_POUTPA 0x20 /* direct input, active high */
-#define PNL_PSELECD 0x10 /* direct input, active high */
-#define PNL_PERRORP 0x08 /* direct input, active low */
-
-#define PNL_PBIDIR 0x20 /* bi-directional ports */
-/* high to read data in or-ed with data out */
-#define PNL_PINTEN 0x10
-#define PNL_PSELECP 0x08 /* inverted output, active low */
-#define PNL_PINITP 0x04 /* direct output, active low */
-#define PNL_PAUTOLF 0x02 /* inverted output, active low */
-#define PNL_PSTROBE 0x01 /* inverted output */
-
-#define PNL_PD0 0x01
-#define PNL_PD1 0x02
-#define PNL_PD2 0x04
-#define PNL_PD3 0x08
-#define PNL_PD4 0x10
-#define PNL_PD5 0x20
-#define PNL_PD6 0x40
-#define PNL_PD7 0x80
-
-#define PIN_NONE 0
-#define PIN_STROBE 1
-#define PIN_D0 2
-#define PIN_D1 3
-#define PIN_D2 4
-#define PIN_D3 5
-#define PIN_D4 6
-#define PIN_D5 7
-#define PIN_D6 8
-#define PIN_D7 9
-#define PIN_AUTOLF 14
-#define PIN_INITP 16
-#define PIN_SELECP 17
-#define PIN_NOT_SET 127
-
-#define LCD_FLAG_S 0x0001
-#define LCD_FLAG_ID 0x0002
-#define LCD_FLAG_B 0x0004 /* blink on */
-#define LCD_FLAG_C 0x0008 /* cursor on */
-#define LCD_FLAG_D 0x0010 /* display on */
-#define LCD_FLAG_F 0x0020 /* large font mode */
-#define LCD_FLAG_N 0x0040 /* 2-rows mode */
-#define LCD_FLAG_L 0x0080 /* backlight enabled */
-
-/* LCD commands */
-#define LCD_CMD_DISPLAY_CLEAR 0x01 /* Clear entire display */
-
-#define LCD_CMD_ENTRY_MODE 0x04 /* Set entry mode */
-#define LCD_CMD_CURSOR_INC 0x02 /* Increment cursor */
-
-#define LCD_CMD_DISPLAY_CTRL 0x08 /* Display control */
-#define LCD_CMD_DISPLAY_ON 0x04 /* Set display on */
-#define LCD_CMD_CURSOR_ON 0x02 /* Set cursor on */
-#define LCD_CMD_BLINK_ON 0x01 /* Set blink on */
-
-#define LCD_CMD_SHIFT 0x10 /* Shift cursor/display */
-#define LCD_CMD_DISPLAY_SHIFT 0x08 /* Shift display instead of cursor */
-#define LCD_CMD_SHIFT_RIGHT 0x04 /* Shift display/cursor to the right */
-
-#define LCD_CMD_FUNCTION_SET 0x20 /* Set function */
-#define LCD_CMD_DATA_LEN_8BITS 0x10 /* Set data length to 8 bits */
-#define LCD_CMD_TWO_LINES 0x08 /* Set to two display lines */
-#define LCD_CMD_FONT_5X10_DOTS 0x04 /* Set char font to 5x10 dots */
-
-#define LCD_CMD_SET_CGRAM_ADDR 0x40 /* Set char generator RAM address */
-
-#define LCD_CMD_SET_DDRAM_ADDR 0x80 /* Set display data RAM address */
-
-#define LCD_ESCAPE_LEN 24 /* max chars for LCD escape command */
-#define LCD_ESCAPE_CHAR 27 /* use char 27 for escape command */
-
-#define NOT_SET -1
-
-/* macros to simplify use of the parallel port */
-#define r_ctr(x) (parport_read_control((x)->port))
-#define r_dtr(x) (parport_read_data((x)->port))
-#define r_str(x) (parport_read_status((x)->port))
-#define w_ctr(x, y) (parport_write_control((x)->port, (y)))
-#define w_dtr(x, y) (parport_write_data((x)->port, (y)))
-
-/* this defines which bits are to be used and which ones to be ignored */
-/* logical or of the output bits involved in the scan matrix */
-static __u8 scan_mask_o;
-/* logical or of the input bits involved in the scan matrix */
-static __u8 scan_mask_i;
-
-typedef __u64 pmask_t;
-
-enum input_type {
- INPUT_TYPE_STD,
- INPUT_TYPE_KBD,
-};
-
-enum input_state {
- INPUT_ST_LOW,
- INPUT_ST_RISING,
- INPUT_ST_HIGH,
- INPUT_ST_FALLING,
-};
-
-struct logical_input {
- struct list_head list;
- pmask_t mask;
- pmask_t value;
- enum input_type type;
- enum input_state state;
- __u8 rise_time, fall_time;
- __u8 rise_timer, fall_timer, high_timer;
-
- union {
- struct { /* valid when type == INPUT_TYPE_STD */
- void (*press_fct)(int);
- void (*release_fct)(int);
- int press_data;
- int release_data;
- } std;
- struct { /* valid when type == INPUT_TYPE_KBD */
- /* strings can be non null-terminated */
- char press_str[sizeof(void *) + sizeof(int)];
- char repeat_str[sizeof(void *) + sizeof(int)];
- char release_str[sizeof(void *) + sizeof(int)];
- } kbd;
- } u;
-};
-
-static LIST_HEAD(logical_inputs); /* list of all defined logical inputs */
-
-/* physical contacts history
- * Physical contacts are a 45 bits string of 9 groups of 5 bits each.
- * The 8 lower groups correspond to output bits 0 to 7, and the 9th group
- * corresponds to the ground.
- * Within each group, bits are stored in the same order as read on the port :
- * BAPSE (busy=4, ack=3, paper empty=2, select=1, error=0).
- * So, each __u64 (or pmask_t) is represented like this :
- * 0000000000000000000BAPSEBAPSEBAPSEBAPSEBAPSEBAPSEBAPSEBAPSEBAPSE
- * <-----unused------><gnd><d07><d06><d05><d04><d03><d02><d01><d00>
- */
-
-/* what has just been read from the I/O ports */
-static pmask_t phys_read;
-/* previous phys_read */
-static pmask_t phys_read_prev;
-/* stabilized phys_read (phys_read|phys_read_prev) */
-static pmask_t phys_curr;
-/* previous phys_curr */
-static pmask_t phys_prev;
-/* 0 means that at least one logical signal needs be computed */
-static char inputs_stable;
-
-/* these variables are specific to the keypad */
-static struct {
- bool enabled;
-} keypad;
-
-static char keypad_buffer[KEYPAD_BUFFER];
-static int keypad_buflen;
-static int keypad_start;
-static char keypressed;
-static wait_queue_head_t keypad_read_wait;
-
-/* lcd-specific variables */
-static struct {
- bool enabled;
- bool initialized;
- bool must_clear;
-
- int height;
- int width;
- int bwidth;
- int hwidth;
- int charset;
- int proto;
- int light_tempo;
-
- /* TODO: use union here? */
- struct {
- int e;
- int rs;
- int rw;
- int cl;
- int da;
- int bl;
- } pins;
-
- /* contains the LCD config state */
- unsigned long int flags;
-
- /* Contains the LCD X and Y offset */
- struct {
- unsigned long int x;
- unsigned long int y;
- } addr;
-
- /* Current escape sequence and it's length or -1 if outside */
- struct {
- char buf[LCD_ESCAPE_LEN + 1];
- int len;
- } esc_seq;
-} lcd;
-
-/* Needed only for init */
-static int selected_lcd_type = NOT_SET;
-
-/*
- * Bit masks to convert LCD signals to parallel port outputs.
- * _d_ are values for data port, _c_ are for control port.
- * [0] = signal OFF, [1] = signal ON, [2] = mask
- */
-#define BIT_CLR 0
-#define BIT_SET 1
-#define BIT_MSK 2
-#define BIT_STATES 3
-/*
- * one entry for each bit on the LCD
- */
-#define LCD_BIT_E 0
-#define LCD_BIT_RS 1
-#define LCD_BIT_RW 2
-#define LCD_BIT_BL 3
-#define LCD_BIT_CL 4
-#define LCD_BIT_DA 5
-#define LCD_BITS 6
-
-/*
- * each bit can be either connected to a DATA or CTRL port
- */
-#define LCD_PORT_C 0
-#define LCD_PORT_D 1
-#define LCD_PORTS 2
-
-static unsigned char lcd_bits[LCD_PORTS][LCD_BITS][BIT_STATES];
-
-/*
- * LCD protocols
- */
-#define LCD_PROTO_PARALLEL 0
-#define LCD_PROTO_SERIAL 1
-#define LCD_PROTO_TI_DA8XX_LCD 2
-
-/*
- * LCD character sets
- */
-#define LCD_CHARSET_NORMAL 0
-#define LCD_CHARSET_KS0074 1
-
-/*
- * LCD types
- */
-#define LCD_TYPE_NONE 0
-#define LCD_TYPE_CUSTOM 1
-#define LCD_TYPE_OLD 2
-#define LCD_TYPE_KS0074 3
-#define LCD_TYPE_HANTRONIX 4
-#define LCD_TYPE_NEXCOM 5
-
-/*
- * keypad types
- */
-#define KEYPAD_TYPE_NONE 0
-#define KEYPAD_TYPE_OLD 1
-#define KEYPAD_TYPE_NEW 2
-#define KEYPAD_TYPE_NEXCOM 3
-
-/*
- * panel profiles
- */
-#define PANEL_PROFILE_CUSTOM 0
-#define PANEL_PROFILE_OLD 1
-#define PANEL_PROFILE_NEW 2
-#define PANEL_PROFILE_HANTRONIX 3
-#define PANEL_PROFILE_NEXCOM 4
-#define PANEL_PROFILE_LARGE 5
-
-/*
- * Construct custom config from the kernel's configuration
- */
-#define DEFAULT_PARPORT 0
-#define DEFAULT_PROFILE PANEL_PROFILE_LARGE
-#define DEFAULT_KEYPAD_TYPE KEYPAD_TYPE_OLD
-#define DEFAULT_LCD_TYPE LCD_TYPE_OLD
-#define DEFAULT_LCD_HEIGHT 2
-#define DEFAULT_LCD_WIDTH 40
-#define DEFAULT_LCD_BWIDTH 40
-#define DEFAULT_LCD_HWIDTH 64
-#define DEFAULT_LCD_CHARSET LCD_CHARSET_NORMAL
-#define DEFAULT_LCD_PROTO LCD_PROTO_PARALLEL
-
-#define DEFAULT_LCD_PIN_E PIN_AUTOLF
-#define DEFAULT_LCD_PIN_RS PIN_SELECP
-#define DEFAULT_LCD_PIN_RW PIN_INITP
-#define DEFAULT_LCD_PIN_SCL PIN_STROBE
-#define DEFAULT_LCD_PIN_SDA PIN_D0
-#define DEFAULT_LCD_PIN_BL PIN_NOT_SET
-
-#ifdef CONFIG_PANEL_PARPORT
-#undef DEFAULT_PARPORT
-#define DEFAULT_PARPORT CONFIG_PANEL_PARPORT
-#endif
-
-#ifdef CONFIG_PANEL_PROFILE
-#undef DEFAULT_PROFILE
-#define DEFAULT_PROFILE CONFIG_PANEL_PROFILE
-#endif
-
-#if DEFAULT_PROFILE == 0 /* custom */
-#ifdef CONFIG_PANEL_KEYPAD
-#undef DEFAULT_KEYPAD_TYPE
-#define DEFAULT_KEYPAD_TYPE CONFIG_PANEL_KEYPAD
-#endif
-
-#ifdef CONFIG_PANEL_LCD
-#undef DEFAULT_LCD_TYPE
-#define DEFAULT_LCD_TYPE CONFIG_PANEL_LCD
-#endif
-
-#ifdef CONFIG_PANEL_LCD_HEIGHT
-#undef DEFAULT_LCD_HEIGHT
-#define DEFAULT_LCD_HEIGHT CONFIG_PANEL_LCD_HEIGHT
-#endif
-
-#ifdef CONFIG_PANEL_LCD_WIDTH
-#undef DEFAULT_LCD_WIDTH
-#define DEFAULT_LCD_WIDTH CONFIG_PANEL_LCD_WIDTH
-#endif
-
-#ifdef CONFIG_PANEL_LCD_BWIDTH
-#undef DEFAULT_LCD_BWIDTH
-#define DEFAULT_LCD_BWIDTH CONFIG_PANEL_LCD_BWIDTH
-#endif
-
-#ifdef CONFIG_PANEL_LCD_HWIDTH
-#undef DEFAULT_LCD_HWIDTH
-#define DEFAULT_LCD_HWIDTH CONFIG_PANEL_LCD_HWIDTH
-#endif
-
-#ifdef CONFIG_PANEL_LCD_CHARSET
-#undef DEFAULT_LCD_CHARSET
-#define DEFAULT_LCD_CHARSET CONFIG_PANEL_LCD_CHARSET
-#endif
-
-#ifdef CONFIG_PANEL_LCD_PROTO
-#undef DEFAULT_LCD_PROTO
-#define DEFAULT_LCD_PROTO CONFIG_PANEL_LCD_PROTO
-#endif
-
-#ifdef CONFIG_PANEL_LCD_PIN_E
-#undef DEFAULT_LCD_PIN_E
-#define DEFAULT_LCD_PIN_E CONFIG_PANEL_LCD_PIN_E
-#endif
-
-#ifdef CONFIG_PANEL_LCD_PIN_RS
-#undef DEFAULT_LCD_PIN_RS
-#define DEFAULT_LCD_PIN_RS CONFIG_PANEL_LCD_PIN_RS
-#endif
-
-#ifdef CONFIG_PANEL_LCD_PIN_RW
-#undef DEFAULT_LCD_PIN_RW
-#define DEFAULT_LCD_PIN_RW CONFIG_PANEL_LCD_PIN_RW
-#endif
-
-#ifdef CONFIG_PANEL_LCD_PIN_SCL
-#undef DEFAULT_LCD_PIN_SCL
-#define DEFAULT_LCD_PIN_SCL CONFIG_PANEL_LCD_PIN_SCL
-#endif
-
-#ifdef CONFIG_PANEL_LCD_PIN_SDA
-#undef DEFAULT_LCD_PIN_SDA
-#define DEFAULT_LCD_PIN_SDA CONFIG_PANEL_LCD_PIN_SDA
-#endif
-
-#ifdef CONFIG_PANEL_LCD_PIN_BL
-#undef DEFAULT_LCD_PIN_BL
-#define DEFAULT_LCD_PIN_BL CONFIG_PANEL_LCD_PIN_BL
-#endif
-
-#endif /* DEFAULT_PROFILE == 0 */
-
-/* global variables */
-
-/* Device single-open policy control */
-static atomic_t lcd_available = ATOMIC_INIT(1);
-static atomic_t keypad_available = ATOMIC_INIT(1);
-
-static struct pardevice *pprt;
-
-static int keypad_initialized;
-
-static void (*lcd_write_cmd)(int);
-static void (*lcd_write_data)(int);
-static void (*lcd_clear_fast)(void);
-
-static DEFINE_SPINLOCK(pprt_lock);
-static struct timer_list scan_timer;
-
-MODULE_DESCRIPTION("Generic parallel port LCD/Keypad driver");
-
-static int parport = DEFAULT_PARPORT;
-module_param(parport, int, 0000);
-MODULE_PARM_DESC(parport, "Parallel port index (0=lpt1, 1=lpt2, ...)");
-
-static int profile = DEFAULT_PROFILE;
-module_param(profile, int, 0000);
-MODULE_PARM_DESC(profile,
- "1=16x2 old kp; 2=serial 16x2, new kp; 3=16x2 hantronix; "
- "4=16x2 nexcom; default=40x2, old kp");
-
-static int keypad_type = NOT_SET;
-module_param(keypad_type, int, 0000);
-MODULE_PARM_DESC(keypad_type,
- "Keypad type: 0=none, 1=old 6 keys, 2=new 6+1 keys, 3=nexcom 4 keys");
-
-static int lcd_type = NOT_SET;
-module_param(lcd_type, int, 0000);
-MODULE_PARM_DESC(lcd_type,
- "LCD type: 0=none, 1=compiled-in, 2=old, 3=serial ks0074, 4=hantronix, 5=nexcom");
-
-static int lcd_height = NOT_SET;
-module_param(lcd_height, int, 0000);
-MODULE_PARM_DESC(lcd_height, "Number of lines on the LCD");
-
-static int lcd_width = NOT_SET;
-module_param(lcd_width, int, 0000);
-MODULE_PARM_DESC(lcd_width, "Number of columns on the LCD");
-
-static int lcd_bwidth = NOT_SET; /* internal buffer width (usually 40) */
-module_param(lcd_bwidth, int, 0000);
-MODULE_PARM_DESC(lcd_bwidth, "Internal LCD line width (40)");
-
-static int lcd_hwidth = NOT_SET; /* hardware buffer width (usually 64) */
-module_param(lcd_hwidth, int, 0000);
-MODULE_PARM_DESC(lcd_hwidth, "LCD line hardware address (64)");
-
-static int lcd_charset = NOT_SET;
-module_param(lcd_charset, int, 0000);
-MODULE_PARM_DESC(lcd_charset, "LCD character set: 0=standard, 1=KS0074");
-
-static int lcd_proto = NOT_SET;
-module_param(lcd_proto, int, 0000);
-MODULE_PARM_DESC(lcd_proto,
- "LCD communication: 0=parallel (//), 1=serial, 2=TI LCD Interface");
-
-/*
- * These are the parallel port pins the LCD control signals are connected to.
- * Set this to 0 if the signal is not used. Set it to its opposite value
- * (negative) if the signal is negated. -MAXINT is used to indicate that the
- * pin has not been explicitly specified.
- *
- * WARNING! no check will be performed about collisions with keypad !
- */
-
-static int lcd_e_pin = PIN_NOT_SET;
-module_param(lcd_e_pin, int, 0000);
-MODULE_PARM_DESC(lcd_e_pin,
- "# of the // port pin connected to LCD 'E' signal, with polarity (-17..17)");
-
-static int lcd_rs_pin = PIN_NOT_SET;
-module_param(lcd_rs_pin, int, 0000);
-MODULE_PARM_DESC(lcd_rs_pin,
- "# of the // port pin connected to LCD 'RS' signal, with polarity (-17..17)");
-
-static int lcd_rw_pin = PIN_NOT_SET;
-module_param(lcd_rw_pin, int, 0000);
-MODULE_PARM_DESC(lcd_rw_pin,
- "# of the // port pin connected to LCD 'RW' signal, with polarity (-17..17)");
-
-static int lcd_cl_pin = PIN_NOT_SET;
-module_param(lcd_cl_pin, int, 0000);
-MODULE_PARM_DESC(lcd_cl_pin,
- "# of the // port pin connected to serial LCD 'SCL' signal, with polarity (-17..17)");
-
-static int lcd_da_pin = PIN_NOT_SET;
-module_param(lcd_da_pin, int, 0000);
-MODULE_PARM_DESC(lcd_da_pin,
- "# of the // port pin connected to serial LCD 'SDA' signal, with polarity (-17..17)");
-
-static int lcd_bl_pin = PIN_NOT_SET;
-module_param(lcd_bl_pin, int, 0000);
-MODULE_PARM_DESC(lcd_bl_pin,
- "# of the // port pin connected to LCD backlight, with polarity (-17..17)");
-
-/* Deprecated module parameters - consider not using them anymore */
-
-static int lcd_enabled = NOT_SET;
-module_param(lcd_enabled, int, 0000);
-MODULE_PARM_DESC(lcd_enabled, "Deprecated option, use lcd_type instead");
-
-static int keypad_enabled = NOT_SET;
-module_param(keypad_enabled, int, 0000);
-MODULE_PARM_DESC(keypad_enabled, "Deprecated option, use keypad_type instead");
-
-
-static const unsigned char *lcd_char_conv;
-
-/* for some LCD drivers (ks0074) we need a charset conversion table. */
-static const unsigned char lcd_char_conv_ks0074[256] = {
- /* 0|8 1|9 2|A 3|B 4|C 5|D 6|E 7|F */
- /* 0x00 */ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
- /* 0x08 */ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
- /* 0x10 */ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
- /* 0x18 */ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
- /* 0x20 */ 0x20, 0x21, 0x22, 0x23, 0xa2, 0x25, 0x26, 0x27,
- /* 0x28 */ 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f,
- /* 0x30 */ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
- /* 0x38 */ 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f,
- /* 0x40 */ 0xa0, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47,
- /* 0x48 */ 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f,
- /* 0x50 */ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57,
- /* 0x58 */ 0x58, 0x59, 0x5a, 0xfa, 0xfb, 0xfc, 0x1d, 0xc4,
- /* 0x60 */ 0x96, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67,
- /* 0x68 */ 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f,
- /* 0x70 */ 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77,
- /* 0x78 */ 0x78, 0x79, 0x7a, 0xfd, 0xfe, 0xff, 0xce, 0x20,
- /* 0x80 */ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
- /* 0x88 */ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
- /* 0x90 */ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
- /* 0x98 */ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f,
- /* 0xA0 */ 0x20, 0x40, 0xb1, 0xa1, 0x24, 0xa3, 0xfe, 0x5f,
- /* 0xA8 */ 0x22, 0xc8, 0x61, 0x14, 0x97, 0x2d, 0xad, 0x96,
- /* 0xB0 */ 0x80, 0x8c, 0x82, 0x83, 0x27, 0x8f, 0x86, 0xdd,
- /* 0xB8 */ 0x2c, 0x81, 0x6f, 0x15, 0x8b, 0x8a, 0x84, 0x60,
- /* 0xC0 */ 0xe2, 0xe2, 0xe2, 0x5b, 0x5b, 0xae, 0xbc, 0xa9,
- /* 0xC8 */ 0xc5, 0xbf, 0xc6, 0xf1, 0xe3, 0xe3, 0xe3, 0xe3,
- /* 0xD0 */ 0x44, 0x5d, 0xa8, 0xe4, 0xec, 0xec, 0x5c, 0x78,
- /* 0xD8 */ 0xab, 0xa6, 0xe5, 0x5e, 0x5e, 0xe6, 0xaa, 0xbe,
- /* 0xE0 */ 0x7f, 0xe7, 0xaf, 0x7b, 0x7b, 0xaf, 0xbd, 0xc8,
- /* 0xE8 */ 0xa4, 0xa5, 0xc7, 0xf6, 0xa7, 0xe8, 0x69, 0x69,
- /* 0xF0 */ 0xed, 0x7d, 0xa8, 0xe4, 0xec, 0x5c, 0x5c, 0x25,
- /* 0xF8 */ 0xac, 0xa6, 0xea, 0xef, 0x7e, 0xeb, 0xb2, 0x79,
-};
-
-static const char old_keypad_profile[][4][9] = {
- {"S0", "Left\n", "Left\n", ""},
- {"S1", "Down\n", "Down\n", ""},
- {"S2", "Up\n", "Up\n", ""},
- {"S3", "Right\n", "Right\n", ""},
- {"S4", "Esc\n", "Esc\n", ""},
- {"S5", "Ret\n", "Ret\n", ""},
- {"", "", "", ""}
-};
-
-/* signals, press, repeat, release */
-static const char new_keypad_profile[][4][9] = {
- {"S0", "Left\n", "Left\n", ""},
- {"S1", "Down\n", "Down\n", ""},
- {"S2", "Up\n", "Up\n", ""},
- {"S3", "Right\n", "Right\n", ""},
- {"S4s5", "", "Esc\n", "Esc\n"},
- {"s4S5", "", "Ret\n", "Ret\n"},
- {"S4S5", "Help\n", "", ""},
- /* add new signals above this line */
- {"", "", "", ""}
-};
-
-/* signals, press, repeat, release */
-static const char nexcom_keypad_profile[][4][9] = {
- {"a-p-e-", "Down\n", "Down\n", ""},
- {"a-p-E-", "Ret\n", "Ret\n", ""},
- {"a-P-E-", "Esc\n", "Esc\n", ""},
- {"a-P-e-", "Up\n", "Up\n", ""},
- /* add new signals above this line */
- {"", "", "", ""}
-};
-
-static const char (*keypad_profile)[4][9] = old_keypad_profile;
-
-/* FIXME: this should be converted to a bit array containing signals states */
-static struct {
- unsigned char e; /* parallel LCD E (data latch on falling edge) */
- unsigned char rs; /* parallel LCD RS (0 = cmd, 1 = data) */
- unsigned char rw; /* parallel LCD R/W (0 = W, 1 = R) */
- unsigned char bl; /* parallel LCD backlight (0 = off, 1 = on) */
- unsigned char cl; /* serial LCD clock (latch on rising edge) */
- unsigned char da; /* serial LCD data */
-} bits;
-
-static void init_scan_timer(void);
-
-/* sets data port bits according to current signals values */
-static int set_data_bits(void)
-{
- int val, bit;
-
- val = r_dtr(pprt);
- for (bit = 0; bit < LCD_BITS; bit++)
- val &= lcd_bits[LCD_PORT_D][bit][BIT_MSK];
-
- val |= lcd_bits[LCD_PORT_D][LCD_BIT_E][bits.e]
- | lcd_bits[LCD_PORT_D][LCD_BIT_RS][bits.rs]
- | lcd_bits[LCD_PORT_D][LCD_BIT_RW][bits.rw]
- | lcd_bits[LCD_PORT_D][LCD_BIT_BL][bits.bl]
- | lcd_bits[LCD_PORT_D][LCD_BIT_CL][bits.cl]
- | lcd_bits[LCD_PORT_D][LCD_BIT_DA][bits.da];
-
- w_dtr(pprt, val);
- return val;
-}
-
-/* sets ctrl port bits according to current signals values */
-static int set_ctrl_bits(void)
-{
- int val, bit;
-
- val = r_ctr(pprt);
- for (bit = 0; bit < LCD_BITS; bit++)
- val &= lcd_bits[LCD_PORT_C][bit][BIT_MSK];
-
- val |= lcd_bits[LCD_PORT_C][LCD_BIT_E][bits.e]
- | lcd_bits[LCD_PORT_C][LCD_BIT_RS][bits.rs]
- | lcd_bits[LCD_PORT_C][LCD_BIT_RW][bits.rw]
- | lcd_bits[LCD_PORT_C][LCD_BIT_BL][bits.bl]
- | lcd_bits[LCD_PORT_C][LCD_BIT_CL][bits.cl]
- | lcd_bits[LCD_PORT_C][LCD_BIT_DA][bits.da];
-
- w_ctr(pprt, val);
- return val;
-}
-
-/* sets ctrl & data port bits according to current signals values */
-static void panel_set_bits(void)
-{
- set_data_bits();
- set_ctrl_bits();
-}
-
-/*
- * Converts a parallel port pin (from -25 to 25) to data and control ports
- * masks, and data and control port bits. The signal will be considered
- * unconnected if it's on pin 0 or an invalid pin (<-25 or >25).
- *
- * Result will be used this way :
- * out(dport, in(dport) & d_val[2] | d_val[signal_state])
- * out(cport, in(cport) & c_val[2] | c_val[signal_state])
- */
-static void pin_to_bits(int pin, unsigned char *d_val, unsigned char *c_val)
-{
- int d_bit, c_bit, inv;
-
- d_val[0] = 0;
- c_val[0] = 0;
- d_val[1] = 0;
- c_val[1] = 0;
- d_val[2] = 0xFF;
- c_val[2] = 0xFF;
-
- if (pin == 0)
- return;
-
- inv = (pin < 0);
- if (inv)
- pin = -pin;
-
- d_bit = 0;
- c_bit = 0;
-
- switch (pin) {
- case PIN_STROBE: /* strobe, inverted */
- c_bit = PNL_PSTROBE;
- inv = !inv;
- break;
- case PIN_D0...PIN_D7: /* D0 - D7 = 2 - 9 */
- d_bit = 1 << (pin - 2);
- break;
- case PIN_AUTOLF: /* autofeed, inverted */
- c_bit = PNL_PAUTOLF;
- inv = !inv;
- break;
- case PIN_INITP: /* init, direct */
- c_bit = PNL_PINITP;
- break;
- case PIN_SELECP: /* select_in, inverted */
- c_bit = PNL_PSELECP;
- inv = !inv;
- break;
- default: /* unknown pin, ignore */
- break;
- }
-
- if (c_bit) {
- c_val[2] &= ~c_bit;
- c_val[!inv] = c_bit;
- } else if (d_bit) {
- d_val[2] &= ~d_bit;
- d_val[!inv] = d_bit;
- }
-}
-
-/* sleeps that many milliseconds with a reschedule */
-static void long_sleep(int ms)
-{
- if (in_interrupt())
- mdelay(ms);
- else
- schedule_timeout_interruptible(msecs_to_jiffies(ms));
-}
-
-/*
- * send a serial byte to the LCD panel. The caller is responsible for locking
- * if needed.
- */
-static void lcd_send_serial(int byte)
-{
- int bit;
-
- /*
- * the data bit is set on D0, and the clock on STROBE.
- * LCD reads D0 on STROBE's rising edge.
- */
- for (bit = 0; bit < 8; bit++) {
- bits.cl = BIT_CLR; /* CLK low */
- panel_set_bits();
- bits.da = byte & 1;
- panel_set_bits();
- udelay(2); /* maintain the data during 2 us before CLK up */
- bits.cl = BIT_SET; /* CLK high */
- panel_set_bits();
- udelay(1); /* maintain the strobe during 1 us */
- byte >>= 1;
- }
-}
-
-/* turn the backlight on or off */
-static void lcd_backlight(int on)
-{
- if (lcd.pins.bl == PIN_NONE)
- return;
-
- /* The backlight is activated by setting the AUTOFEED line to +5V */
- spin_lock_irq(&pprt_lock);
- bits.bl = on;
- panel_set_bits();
- spin_unlock_irq(&pprt_lock);
-}
-
-/* send a command to the LCD panel in serial mode */
-static void lcd_write_cmd_s(int cmd)
-{
- spin_lock_irq(&pprt_lock);
- lcd_send_serial(0x1F); /* R/W=W, RS=0 */
- lcd_send_serial(cmd & 0x0F);
- lcd_send_serial((cmd >> 4) & 0x0F);
- udelay(40); /* the shortest command takes at least 40 us */
- spin_unlock_irq(&pprt_lock);
-}
-
-/* send data to the LCD panel in serial mode */
-static void lcd_write_data_s(int data)
-{
- spin_lock_irq(&pprt_lock);
- lcd_send_serial(0x5F); /* R/W=W, RS=1 */
- lcd_send_serial(data & 0x0F);
- lcd_send_serial((data >> 4) & 0x0F);
- udelay(40); /* the shortest data takes at least 40 us */
- spin_unlock_irq(&pprt_lock);
-}
-
-/* send a command to the LCD panel in 8 bits parallel mode */
-static void lcd_write_cmd_p8(int cmd)
-{
- spin_lock_irq(&pprt_lock);
- /* present the data to the data port */
- w_dtr(pprt, cmd);
- udelay(20); /* maintain the data during 20 us before the strobe */
-
- bits.e = BIT_SET;
- bits.rs = BIT_CLR;
- bits.rw = BIT_CLR;
- set_ctrl_bits();
-
- udelay(40); /* maintain the strobe during 40 us */
-
- bits.e = BIT_CLR;
- set_ctrl_bits();
-
- udelay(120); /* the shortest command takes at least 120 us */
- spin_unlock_irq(&pprt_lock);
-}
-
-/* send data to the LCD panel in 8 bits parallel mode */
-static void lcd_write_data_p8(int data)
-{
- spin_lock_irq(&pprt_lock);
- /* present the data to the data port */
- w_dtr(pprt, data);
- udelay(20); /* maintain the data during 20 us before the strobe */
-
- bits.e = BIT_SET;
- bits.rs = BIT_SET;
- bits.rw = BIT_CLR;
- set_ctrl_bits();
-
- udelay(40); /* maintain the strobe during 40 us */
-
- bits.e = BIT_CLR;
- set_ctrl_bits();
-
- udelay(45); /* the shortest data takes at least 45 us */
- spin_unlock_irq(&pprt_lock);
-}
-
-/* send a command to the TI LCD panel */
-static void lcd_write_cmd_tilcd(int cmd)
-{
- spin_lock_irq(&pprt_lock);
- /* present the data to the control port */
- w_ctr(pprt, cmd);
- udelay(60);
- spin_unlock_irq(&pprt_lock);
-}
-
-/* send data to the TI LCD panel */
-static void lcd_write_data_tilcd(int data)
-{
- spin_lock_irq(&pprt_lock);
- /* present the data to the data port */
- w_dtr(pprt, data);
- udelay(60);
- spin_unlock_irq(&pprt_lock);
-}
-
-static void lcd_gotoxy(void)
-{
- lcd_write_cmd(LCD_CMD_SET_DDRAM_ADDR
- | (lcd.addr.y ? lcd.hwidth : 0)
- /*
- * we force the cursor to stay at the end of the
- * line if it wants to go farther
- */
- | ((lcd.addr.x < lcd.bwidth) ? lcd.addr.x &
- (lcd.hwidth - 1) : lcd.bwidth - 1));
-}
-
-static void lcd_print(char c)
-{
- if (lcd.addr.x < lcd.bwidth) {
- if (lcd_char_conv != NULL)
- c = lcd_char_conv[(unsigned char)c];
- lcd_write_data(c);
- lcd.addr.x++;
- }
- /* prevents the cursor from wrapping onto the next line */
- if (lcd.addr.x == lcd.bwidth)
- lcd_gotoxy();
-}
-
-/* fills the display with spaces and resets X/Y */
-static void lcd_clear_fast_s(void)
-{
- int pos;
-
- lcd.addr.x = 0;
- lcd.addr.y = 0;
- lcd_gotoxy();
-
- spin_lock_irq(&pprt_lock);
- for (pos = 0; pos < lcd.height * lcd.hwidth; pos++) {
- lcd_send_serial(0x5F); /* R/W=W, RS=1 */
- lcd_send_serial(' ' & 0x0F);
- lcd_send_serial((' ' >> 4) & 0x0F);
- udelay(40); /* the shortest data takes at least 40 us */
- }
- spin_unlock_irq(&pprt_lock);
-
- lcd.addr.x = 0;
- lcd.addr.y = 0;
- lcd_gotoxy();
-}
-
-/* fills the display with spaces and resets X/Y */
-static void lcd_clear_fast_p8(void)
-{
- int pos;
-
- lcd.addr.x = 0;
- lcd.addr.y = 0;
- lcd_gotoxy();
-
- spin_lock_irq(&pprt_lock);
- for (pos = 0; pos < lcd.height * lcd.hwidth; pos++) {
- /* present the data to the data port */
- w_dtr(pprt, ' ');
-
- /* maintain the data during 20 us before the strobe */
- udelay(20);
-
- bits.e = BIT_SET;
- bits.rs = BIT_SET;
- bits.rw = BIT_CLR;
- set_ctrl_bits();
-
- /* maintain the strobe during 40 us */
- udelay(40);
-
- bits.e = BIT_CLR;
- set_ctrl_bits();
-
- /* the shortest data takes at least 45 us */
- udelay(45);
- }
- spin_unlock_irq(&pprt_lock);
-
- lcd.addr.x = 0;
- lcd.addr.y = 0;
- lcd_gotoxy();
-}
-
-/* fills the display with spaces and resets X/Y */
-static void lcd_clear_fast_tilcd(void)
-{
- int pos;
-
- lcd.addr.x = 0;
- lcd.addr.y = 0;
- lcd_gotoxy();
-
- spin_lock_irq(&pprt_lock);
- for (pos = 0; pos < lcd.height * lcd.hwidth; pos++) {
- /* present the data to the data port */
- w_dtr(pprt, ' ');
- udelay(60);
- }
-
- spin_unlock_irq(&pprt_lock);
-
- lcd.addr.x = 0;
- lcd.addr.y = 0;
- lcd_gotoxy();
-}
-
-/* clears the display and resets X/Y */
-static void lcd_clear_display(void)
-{
- lcd_write_cmd(LCD_CMD_DISPLAY_CLEAR);
- lcd.addr.x = 0;
- lcd.addr.y = 0;
- /* we must wait a few milliseconds (15) */
- long_sleep(15);
-}
-
-static void lcd_init_display(void)
-{
- lcd.flags = ((lcd.height > 1) ? LCD_FLAG_N : 0)
- | LCD_FLAG_D | LCD_FLAG_C | LCD_FLAG_B;
-
- long_sleep(20); /* wait 20 ms after power-up for the paranoid */
-
- /* 8bits, 1 line, small fonts; let's do it 3 times */
- lcd_write_cmd(LCD_CMD_FUNCTION_SET | LCD_CMD_DATA_LEN_8BITS);
- long_sleep(10);
- lcd_write_cmd(LCD_CMD_FUNCTION_SET | LCD_CMD_DATA_LEN_8BITS);
- long_sleep(10);
- lcd_write_cmd(LCD_CMD_FUNCTION_SET | LCD_CMD_DATA_LEN_8BITS);
- long_sleep(10);
-
- /* set font height and lines number */
- lcd_write_cmd(LCD_CMD_FUNCTION_SET | LCD_CMD_DATA_LEN_8BITS
- | ((lcd.flags & LCD_FLAG_F) ? LCD_CMD_FONT_5X10_DOTS : 0)
- | ((lcd.flags & LCD_FLAG_N) ? LCD_CMD_TWO_LINES : 0)
- );
- long_sleep(10);
-
- /* display off, cursor off, blink off */
- lcd_write_cmd(LCD_CMD_DISPLAY_CTRL);
- long_sleep(10);
-
- lcd_write_cmd(LCD_CMD_DISPLAY_CTRL /* set display mode */
- | ((lcd.flags & LCD_FLAG_D) ? LCD_CMD_DISPLAY_ON : 0)
- | ((lcd.flags & LCD_FLAG_C) ? LCD_CMD_CURSOR_ON : 0)
- | ((lcd.flags & LCD_FLAG_B) ? LCD_CMD_BLINK_ON : 0)
- );
-
- lcd_backlight((lcd.flags & LCD_FLAG_L) ? 1 : 0);
-
- long_sleep(10);
-
- /* entry mode set : increment, cursor shifting */
- lcd_write_cmd(LCD_CMD_ENTRY_MODE | LCD_CMD_CURSOR_INC);
-
- lcd_clear_display();
-}
-
-/*
- * These are the file operation function for user access to /dev/lcd
- * This function can also be called from inside the kernel, by
- * setting file and ppos to NULL.
- *
- */
-
-static inline int handle_lcd_special_code(void)
-{
- /* LCD special codes */
-
- int processed = 0;
-
- char *esc = lcd.esc_seq.buf + 2;
- int oldflags = lcd.flags;
-
- /* check for display mode flags */
- switch (*esc) {
- case 'D': /* Display ON */
- lcd.flags |= LCD_FLAG_D;
- processed = 1;
- break;
- case 'd': /* Display OFF */
- lcd.flags &= ~LCD_FLAG_D;
- processed = 1;
- break;
- case 'C': /* Cursor ON */
- lcd.flags |= LCD_FLAG_C;
- processed = 1;
- break;
- case 'c': /* Cursor OFF */
- lcd.flags &= ~LCD_FLAG_C;
- processed = 1;
- break;
- case 'B': /* Blink ON */
- lcd.flags |= LCD_FLAG_B;
- processed = 1;
- break;
- case 'b': /* Blink OFF */
- lcd.flags &= ~LCD_FLAG_B;
- processed = 1;
- break;
- case '+': /* Back light ON */
- lcd.flags |= LCD_FLAG_L;
- processed = 1;
- break;
- case '-': /* Back light OFF */
- lcd.flags &= ~LCD_FLAG_L;
- processed = 1;
- break;
- case '*':
- /* flash back light using the keypad timer */
- if (scan_timer.function != NULL) {
- if (lcd.light_tempo == 0
- && ((lcd.flags & LCD_FLAG_L) == 0))
- lcd_backlight(1);
- lcd.light_tempo = FLASH_LIGHT_TEMPO;
- }
- processed = 1;
- break;
- case 'f': /* Small Font */
- lcd.flags &= ~LCD_FLAG_F;
- processed = 1;
- break;
- case 'F': /* Large Font */
- lcd.flags |= LCD_FLAG_F;
- processed = 1;
- break;
- case 'n': /* One Line */
- lcd.flags &= ~LCD_FLAG_N;
- processed = 1;
- break;
- case 'N': /* Two Lines */
- lcd.flags |= LCD_FLAG_N;
- break;
- case 'l': /* Shift Cursor Left */
- if (lcd.addr.x > 0) {
- /* back one char if not at end of line */
- if (lcd.addr.x < lcd.bwidth)
- lcd_write_cmd(LCD_CMD_SHIFT);
- lcd.addr.x--;
- }
- processed = 1;
- break;
- case 'r': /* shift cursor right */
- if (lcd.addr.x < lcd.width) {
- /* allow the cursor to pass the end of the line */
- if (lcd.addr.x < (lcd.bwidth - 1))
- lcd_write_cmd(LCD_CMD_SHIFT |
- LCD_CMD_SHIFT_RIGHT);
- lcd.addr.x++;
- }
- processed = 1;
- break;
- case 'L': /* shift display left */
- lcd_write_cmd(LCD_CMD_SHIFT | LCD_CMD_DISPLAY_SHIFT);
- processed = 1;
- break;
- case 'R': /* shift display right */
- lcd_write_cmd(LCD_CMD_SHIFT | LCD_CMD_DISPLAY_SHIFT |
- LCD_CMD_SHIFT_RIGHT);
- processed = 1;
- break;
- case 'k': { /* kill end of line */
- int x;
-
- for (x = lcd.addr.x; x < lcd.bwidth; x++)
- lcd_write_data(' ');
-
- /* restore cursor position */
- lcd_gotoxy();
- processed = 1;
- break;
- }
- case 'I': /* reinitialize display */
- lcd_init_display();
- processed = 1;
- break;
- case 'G': {
- /* Generator : LGcxxxxx...xx; must have <c> between '0'
- * and '7', representing the numerical ASCII code of the
- * redefined character, and <xx...xx> a sequence of 16
- * hex digits representing 8 bytes for each character.
- * Most LCDs will only use 5 lower bits of the 7 first
- * bytes.
- */
-
- unsigned char cgbytes[8];
- unsigned char cgaddr;
- int cgoffset;
- int shift;
- char value;
- int addr;
-
- if (strchr(esc, ';') == NULL)
- break;
-
- esc++;
-
- cgaddr = *(esc++) - '0';
- if (cgaddr > 7) {
- processed = 1;
- break;
- }
-
- cgoffset = 0;
- shift = 0;
- value = 0;
- while (*esc && cgoffset < 8) {
- shift ^= 4;
- if (*esc >= '0' && *esc <= '9') {
- value |= (*esc - '0') << shift;
- } else if (*esc >= 'A' && *esc <= 'Z') {
- value |= (*esc - 'A' + 10) << shift;
- } else if (*esc >= 'a' && *esc <= 'z') {
- value |= (*esc - 'a' + 10) << shift;
- } else {
- esc++;
- continue;
- }
-
- if (shift == 0) {
- cgbytes[cgoffset++] = value;
- value = 0;
- }
-
- esc++;
- }
-
- lcd_write_cmd(LCD_CMD_SET_CGRAM_ADDR | (cgaddr * 8));
- for (addr = 0; addr < cgoffset; addr++)
- lcd_write_data(cgbytes[addr]);
-
- /* ensures that we stop writing to CGRAM */
- lcd_gotoxy();
- processed = 1;
- break;
- }
- case 'x': /* gotoxy : LxXXX[yYYY]; */
- case 'y': /* gotoxy : LyYYY[xXXX]; */
- if (strchr(esc, ';') == NULL)
- break;
-
- while (*esc) {
- if (*esc == 'x') {
- esc++;
- if (kstrtoul(esc, 10, &lcd.addr.x) < 0)
- break;
- } else if (*esc == 'y') {
- esc++;
- if (kstrtoul(esc, 10, &lcd.addr.y) < 0)
- break;
- } else {
- break;
- }
- }
-
- lcd_gotoxy();
- processed = 1;
- break;
- }
-
- /* TODO: This indent party here got ugly, clean it! */
- /* Check whether one flag was changed */
- if (oldflags != lcd.flags) {
- /* check whether one of B,C,D flags were changed */
- if ((oldflags ^ lcd.flags) &
- (LCD_FLAG_B | LCD_FLAG_C | LCD_FLAG_D))
- /* set display mode */
- lcd_write_cmd(LCD_CMD_DISPLAY_CTRL
- | ((lcd.flags & LCD_FLAG_D)
- ? LCD_CMD_DISPLAY_ON : 0)
- | ((lcd.flags & LCD_FLAG_C)
- ? LCD_CMD_CURSOR_ON : 0)
- | ((lcd.flags & LCD_FLAG_B)
- ? LCD_CMD_BLINK_ON : 0));
- /* check whether one of F,N flags was changed */
- else if ((oldflags ^ lcd.flags) & (LCD_FLAG_F | LCD_FLAG_N))
- lcd_write_cmd(LCD_CMD_FUNCTION_SET
- | LCD_CMD_DATA_LEN_8BITS
- | ((lcd.flags & LCD_FLAG_F)
- ? LCD_CMD_TWO_LINES : 0)
- | ((lcd.flags & LCD_FLAG_N)
- ? LCD_CMD_FONT_5X10_DOTS
- : 0));
- /* check whether L flag was changed */
- else if ((oldflags ^ lcd.flags) & (LCD_FLAG_L)) {
- if (lcd.flags & (LCD_FLAG_L))
- lcd_backlight(1);
- else if (lcd.light_tempo == 0)
- /*
- * switch off the light only when the tempo
- * lighting is gone
- */
- lcd_backlight(0);
- }
- }
-
- return processed;
-}
-
-static void lcd_write_char(char c)
-{
- /* first, we'll test if we're in escape mode */
- if ((c != '\n') && lcd.esc_seq.len >= 0) {
- /* yes, let's add this char to the buffer */
- lcd.esc_seq.buf[lcd.esc_seq.len++] = c;
- lcd.esc_seq.buf[lcd.esc_seq.len] = 0;
- } else {
- /* aborts any previous escape sequence */
- lcd.esc_seq.len = -1;
-
- switch (c) {
- case LCD_ESCAPE_CHAR:
- /* start of an escape sequence */
- lcd.esc_seq.len = 0;
- lcd.esc_seq.buf[lcd.esc_seq.len] = 0;
- break;
- case '\b':
- /* go back one char and clear it */
- if (lcd.addr.x > 0) {
- /*
- * check if we're not at the
- * end of the line
- */
- if (lcd.addr.x < lcd.bwidth)
- /* back one char */
- lcd_write_cmd(LCD_CMD_SHIFT);
- lcd.addr.x--;
- }
- /* replace with a space */
- lcd_write_data(' ');
- /* back one char again */
- lcd_write_cmd(LCD_CMD_SHIFT);
- break;
- case '\014':
- /* quickly clear the display */
- lcd_clear_fast();
- break;
- case '\n':
- /*
- * flush the remainder of the current line and
- * go to the beginning of the next line
- */
- for (; lcd.addr.x < lcd.bwidth; lcd.addr.x++)
- lcd_write_data(' ');
- lcd.addr.x = 0;
- lcd.addr.y = (lcd.addr.y + 1) % lcd.height;
- lcd_gotoxy();
- break;
- case '\r':
- /* go to the beginning of the same line */
- lcd.addr.x = 0;
- lcd_gotoxy();
- break;
- case '\t':
- /* print a space instead of the tab */
- lcd_print(' ');
- break;
- default:
- /* simply print this char */
- lcd_print(c);
- break;
- }
- }
-
- /*
- * now we'll see if we're in an escape mode and if the current
- * escape sequence can be understood.
- */
- if (lcd.esc_seq.len >= 2) {
- int processed = 0;
-
- if (!strcmp(lcd.esc_seq.buf, "[2J")) {
- /* clear the display */
- lcd_clear_fast();
- processed = 1;
- } else if (!strcmp(lcd.esc_seq.buf, "[H")) {
- /* cursor to home */
- lcd.addr.x = 0;
- lcd.addr.y = 0;
- lcd_gotoxy();
- processed = 1;
- }
- /* codes starting with ^[[L */
- else if ((lcd.esc_seq.len >= 3) &&
- (lcd.esc_seq.buf[0] == '[') &&
- (lcd.esc_seq.buf[1] == 'L')) {
- processed = handle_lcd_special_code();
- }
-
- /* LCD special escape codes */
- /*
- * flush the escape sequence if it's been processed
- * or if it is getting too long.
- */
- if (processed || (lcd.esc_seq.len >= LCD_ESCAPE_LEN))
- lcd.esc_seq.len = -1;
- } /* escape codes */
-}
-
-static ssize_t lcd_write(struct file *file,
- const char __user *buf, size_t count, loff_t *ppos)
-{
- const char __user *tmp = buf;
- char c;
-
- for (; count-- > 0; (*ppos)++, tmp++) {
- if (!in_interrupt() && (((count + 1) & 0x1f) == 0))
- /*
- * let's be a little nice with other processes
- * that need some CPU
- */
- schedule();
-
- if (get_user(c, tmp))
- return -EFAULT;
-
- lcd_write_char(c);
- }
-
- return tmp - buf;
-}
-
-static int lcd_open(struct inode *inode, struct file *file)
-{
- if (!atomic_dec_and_test(&lcd_available))
- return -EBUSY; /* open only once at a time */
-
- if (file->f_mode & FMODE_READ) /* device is write-only */
- return -EPERM;
-
- if (lcd.must_clear) {
- lcd_clear_display();
- lcd.must_clear = false;
- }
- return nonseekable_open(inode, file);
-}
-
-static int lcd_release(struct inode *inode, struct file *file)
-{
- atomic_inc(&lcd_available);
- return 0;
-}
-
-static const struct file_operations lcd_fops = {
- .write = lcd_write,
- .open = lcd_open,
- .release = lcd_release,
- .llseek = no_llseek,
-};
-
-static struct miscdevice lcd_dev = {
- .minor = LCD_MINOR,
- .name = "lcd",
- .fops = &lcd_fops,
-};
-
-/* public function usable from the kernel for any purpose */
-static void panel_lcd_print(const char *s)
-{
- const char *tmp = s;
- int count = strlen(s);
-
- if (lcd.enabled && lcd.initialized) {
- for (; count-- > 0; tmp++) {
- if (!in_interrupt() && (((count + 1) & 0x1f) == 0))
- /*
- * let's be a little nice with other processes
- * that need some CPU
- */
- schedule();
-
- lcd_write_char(*tmp);
- }
- }
-}
-
-/* initialize the LCD driver */
-static void lcd_init(void)
-{
- switch (selected_lcd_type) {
- case LCD_TYPE_OLD:
- /* parallel mode, 8 bits */
- lcd.proto = LCD_PROTO_PARALLEL;
- lcd.charset = LCD_CHARSET_NORMAL;
- lcd.pins.e = PIN_STROBE;
- lcd.pins.rs = PIN_AUTOLF;
-
- lcd.width = 40;
- lcd.bwidth = 40;
- lcd.hwidth = 64;
- lcd.height = 2;
- break;
- case LCD_TYPE_KS0074:
- /* serial mode, ks0074 */
- lcd.proto = LCD_PROTO_SERIAL;
- lcd.charset = LCD_CHARSET_KS0074;
- lcd.pins.bl = PIN_AUTOLF;
- lcd.pins.cl = PIN_STROBE;
- lcd.pins.da = PIN_D0;
-
- lcd.width = 16;
- lcd.bwidth = 40;
- lcd.hwidth = 16;
- lcd.height = 2;
- break;
- case LCD_TYPE_NEXCOM:
- /* parallel mode, 8 bits, generic */
- lcd.proto = LCD_PROTO_PARALLEL;
- lcd.charset = LCD_CHARSET_NORMAL;
- lcd.pins.e = PIN_AUTOLF;
- lcd.pins.rs = PIN_SELECP;
- lcd.pins.rw = PIN_INITP;
-
- lcd.width = 16;
- lcd.bwidth = 40;
- lcd.hwidth = 64;
- lcd.height = 2;
- break;
- case LCD_TYPE_CUSTOM:
- /* customer-defined */
- lcd.proto = DEFAULT_LCD_PROTO;
- lcd.charset = DEFAULT_LCD_CHARSET;
- /* default geometry will be set later */
- break;
- case LCD_TYPE_HANTRONIX:
- /* parallel mode, 8 bits, hantronix-like */
- default:
- lcd.proto = LCD_PROTO_PARALLEL;
- lcd.charset = LCD_CHARSET_NORMAL;
- lcd.pins.e = PIN_STROBE;
- lcd.pins.rs = PIN_SELECP;
-
- lcd.width = 16;
- lcd.bwidth = 40;
- lcd.hwidth = 64;
- lcd.height = 2;
- break;
- }
-
- /* Overwrite with module params set on loading */
- if (lcd_height != NOT_SET)
- lcd.height = lcd_height;
- if (lcd_width != NOT_SET)
- lcd.width = lcd_width;
- if (lcd_bwidth != NOT_SET)
- lcd.bwidth = lcd_bwidth;
- if (lcd_hwidth != NOT_SET)
- lcd.hwidth = lcd_hwidth;
- if (lcd_charset != NOT_SET)
- lcd.charset = lcd_charset;
- if (lcd_proto != NOT_SET)
- lcd.proto = lcd_proto;
- if (lcd_e_pin != PIN_NOT_SET)
- lcd.pins.e = lcd_e_pin;
- if (lcd_rs_pin != PIN_NOT_SET)
- lcd.pins.rs = lcd_rs_pin;
- if (lcd_rw_pin != PIN_NOT_SET)
- lcd.pins.rw = lcd_rw_pin;
- if (lcd_cl_pin != PIN_NOT_SET)
- lcd.pins.cl = lcd_cl_pin;
- if (lcd_da_pin != PIN_NOT_SET)
- lcd.pins.da = lcd_da_pin;
- if (lcd_bl_pin != PIN_NOT_SET)
- lcd.pins.bl = lcd_bl_pin;
-
- /* this is used to catch wrong and default values */
- if (lcd.width <= 0)
- lcd.width = DEFAULT_LCD_WIDTH;
- if (lcd.bwidth <= 0)
- lcd.bwidth = DEFAULT_LCD_BWIDTH;
- if (lcd.hwidth <= 0)
- lcd.hwidth = DEFAULT_LCD_HWIDTH;
- if (lcd.height <= 0)
- lcd.height = DEFAULT_LCD_HEIGHT;
-
- if (lcd.proto == LCD_PROTO_SERIAL) { /* SERIAL */
- lcd_write_cmd = lcd_write_cmd_s;
- lcd_write_data = lcd_write_data_s;
- lcd_clear_fast = lcd_clear_fast_s;
-
- if (lcd.pins.cl == PIN_NOT_SET)
- lcd.pins.cl = DEFAULT_LCD_PIN_SCL;
- if (lcd.pins.da == PIN_NOT_SET)
- lcd.pins.da = DEFAULT_LCD_PIN_SDA;
-
- } else if (lcd.proto == LCD_PROTO_PARALLEL) { /* PARALLEL */
- lcd_write_cmd = lcd_write_cmd_p8;
- lcd_write_data = lcd_write_data_p8;
- lcd_clear_fast = lcd_clear_fast_p8;
-
- if (lcd.pins.e == PIN_NOT_SET)
- lcd.pins.e = DEFAULT_LCD_PIN_E;
- if (lcd.pins.rs == PIN_NOT_SET)
- lcd.pins.rs = DEFAULT_LCD_PIN_RS;
- if (lcd.pins.rw == PIN_NOT_SET)
- lcd.pins.rw = DEFAULT_LCD_PIN_RW;
- } else {
- lcd_write_cmd = lcd_write_cmd_tilcd;
- lcd_write_data = lcd_write_data_tilcd;
- lcd_clear_fast = lcd_clear_fast_tilcd;
- }
-
- if (lcd.pins.bl == PIN_NOT_SET)
- lcd.pins.bl = DEFAULT_LCD_PIN_BL;
-
- if (lcd.pins.e == PIN_NOT_SET)
- lcd.pins.e = PIN_NONE;
- if (lcd.pins.rs == PIN_NOT_SET)
- lcd.pins.rs = PIN_NONE;
- if (lcd.pins.rw == PIN_NOT_SET)
- lcd.pins.rw = PIN_NONE;
- if (lcd.pins.bl == PIN_NOT_SET)
- lcd.pins.bl = PIN_NONE;
- if (lcd.pins.cl == PIN_NOT_SET)
- lcd.pins.cl = PIN_NONE;
- if (lcd.pins.da == PIN_NOT_SET)
- lcd.pins.da = PIN_NONE;
-
- if (lcd.charset == NOT_SET)
- lcd.charset = DEFAULT_LCD_CHARSET;
-
- if (lcd.charset == LCD_CHARSET_KS0074)
- lcd_char_conv = lcd_char_conv_ks0074;
- else
- lcd_char_conv = NULL;
-
- if (lcd.pins.bl != PIN_NONE)
- init_scan_timer();
-
- pin_to_bits(lcd.pins.e, lcd_bits[LCD_PORT_D][LCD_BIT_E],
- lcd_bits[LCD_PORT_C][LCD_BIT_E]);
- pin_to_bits(lcd.pins.rs, lcd_bits[LCD_PORT_D][LCD_BIT_RS],
- lcd_bits[LCD_PORT_C][LCD_BIT_RS]);
- pin_to_bits(lcd.pins.rw, lcd_bits[LCD_PORT_D][LCD_BIT_RW],
- lcd_bits[LCD_PORT_C][LCD_BIT_RW]);
- pin_to_bits(lcd.pins.bl, lcd_bits[LCD_PORT_D][LCD_BIT_BL],
- lcd_bits[LCD_PORT_C][LCD_BIT_BL]);
- pin_to_bits(lcd.pins.cl, lcd_bits[LCD_PORT_D][LCD_BIT_CL],
- lcd_bits[LCD_PORT_C][LCD_BIT_CL]);
- pin_to_bits(lcd.pins.da, lcd_bits[LCD_PORT_D][LCD_BIT_DA],
- lcd_bits[LCD_PORT_C][LCD_BIT_DA]);
-
- /*
- * before this line, we must NOT send anything to the display.
- * Since lcd_init_display() needs to write data, we have to
- * enable mark the LCD initialized just before.
- */
- lcd.initialized = true;
- lcd_init_display();
-
- /* display a short message */
-#ifdef CONFIG_PANEL_CHANGE_MESSAGE
-#ifdef CONFIG_PANEL_BOOT_MESSAGE
- panel_lcd_print("\x1b[Lc\x1b[Lb\x1b[L*" CONFIG_PANEL_BOOT_MESSAGE);
-#endif
-#else
- panel_lcd_print("\x1b[Lc\x1b[Lb\x1b[L*Linux-" UTS_RELEASE "\nPanel-"
- PANEL_VERSION);
-#endif
- lcd.addr.x = 0;
- lcd.addr.y = 0;
- /* clear the display on the next device opening */
- lcd.must_clear = true;
- lcd_gotoxy();
-}
-
-/*
- * These are the file operation function for user access to /dev/keypad
- */
-
-static ssize_t keypad_read(struct file *file,
- char __user *buf, size_t count, loff_t *ppos)
-{
- unsigned i = *ppos;
- char __user *tmp = buf;
-
- if (keypad_buflen == 0) {
- if (file->f_flags & O_NONBLOCK)
- return -EAGAIN;
-
- if (wait_event_interruptible(keypad_read_wait,
- keypad_buflen != 0))
- return -EINTR;
- }
-
- for (; count-- > 0 && (keypad_buflen > 0);
- ++i, ++tmp, --keypad_buflen) {
- put_user(keypad_buffer[keypad_start], tmp);
- keypad_start = (keypad_start + 1) % KEYPAD_BUFFER;
- }
- *ppos = i;
-
- return tmp - buf;
-}
-
-static int keypad_open(struct inode *inode, struct file *file)
-{
- if (!atomic_dec_and_test(&keypad_available))
- return -EBUSY; /* open only once at a time */
-
- if (file->f_mode & FMODE_WRITE) /* device is read-only */
- return -EPERM;
-
- keypad_buflen = 0; /* flush the buffer on opening */
- return 0;
-}
-
-static int keypad_release(struct inode *inode, struct file *file)
-{
- atomic_inc(&keypad_available);
- return 0;
-}
-
-static const struct file_operations keypad_fops = {
- .read = keypad_read, /* read */
- .open = keypad_open, /* open */
- .release = keypad_release, /* close */
- .llseek = default_llseek,
-};
-
-static struct miscdevice keypad_dev = {
- .minor = KEYPAD_MINOR,
- .name = "keypad",
- .fops = &keypad_fops,
-};
-
-static void keypad_send_key(const char *string, int max_len)
-{
- /* send the key to the device only if a process is attached to it. */
- if (!atomic_read(&keypad_available)) {
- while (max_len-- && keypad_buflen < KEYPAD_BUFFER && *string) {
- keypad_buffer[(keypad_start + keypad_buflen++) %
- KEYPAD_BUFFER] = *string++;
- }
- wake_up_interruptible(&keypad_read_wait);
- }
-}
-
-/* this function scans all the bits involving at least one logical signal,
- * and puts the results in the bitfield "phys_read" (one bit per established
- * contact), and sets "phys_read_prev" to "phys_read".
- *
- * Note: to debounce input signals, we will only consider as switched a signal
- * which is stable across 2 measures. Signals which are different between two
- * reads will be kept as they previously were in their logical form (phys_prev).
- * A signal which has just switched will have a 1 in
- * (phys_read ^ phys_read_prev).
- */
-static void phys_scan_contacts(void)
-{
- int bit, bitval;
- char oldval;
- char bitmask;
- char gndmask;
-
- phys_prev = phys_curr;
- phys_read_prev = phys_read;
- phys_read = 0; /* flush all signals */
-
- /* keep track of old value, with all outputs disabled */
- oldval = r_dtr(pprt) | scan_mask_o;
- /* activate all keyboard outputs (active low) */
- w_dtr(pprt, oldval & ~scan_mask_o);
-
- /* will have a 1 for each bit set to gnd */
- bitmask = PNL_PINPUT(r_str(pprt)) & scan_mask_i;
- /* disable all matrix signals */
- w_dtr(pprt, oldval);
-
- /* now that all outputs are cleared, the only active input bits are
- * directly connected to the ground
- */
-
- /* 1 for each grounded input */
- gndmask = PNL_PINPUT(r_str(pprt)) & scan_mask_i;
-
- /* grounded inputs are signals 40-44 */
- phys_read |= (pmask_t) gndmask << 40;
-
- if (bitmask != gndmask) {
- /*
- * since clearing the outputs changed some inputs, we know
- * that some input signals are currently tied to some outputs.
- * So we'll scan them.
- */
- for (bit = 0; bit < 8; bit++) {
- bitval = 1 << bit;
-
- if (!(scan_mask_o & bitval)) /* skip unused bits */
- continue;
-
- w_dtr(pprt, oldval & ~bitval); /* enable this output */
- bitmask = PNL_PINPUT(r_str(pprt)) & ~gndmask;
- phys_read |= (pmask_t) bitmask << (5 * bit);
- }
- w_dtr(pprt, oldval); /* disable all outputs */
- }
- /*
- * this is easy: use old bits when they are flapping,
- * use new ones when stable
- */
- phys_curr = (phys_prev & (phys_read ^ phys_read_prev)) |
- (phys_read & ~(phys_read ^ phys_read_prev));
-}
-
-static inline int input_state_high(struct logical_input *input)
-{
-#if 0
- /* FIXME:
- * this is an invalid test. It tries to catch
- * transitions from single-key to multiple-key, but
- * doesn't take into account the contacts polarity.
- * The only solution to the problem is to parse keys
- * from the most complex to the simplest combinations,
- * and mark them as 'caught' once a combination
- * matches, then unmatch it for all other ones.
- */
-
- /* try to catch dangerous transitions cases :
- * someone adds a bit, so this signal was a false
- * positive resulting from a transition. We should
- * invalidate the signal immediately and not call the
- * release function.
- * eg: 0 -(press A)-> A -(press B)-> AB : don't match A's release.
- */
- if (((phys_prev & input->mask) == input->value) &&
- ((phys_curr & input->mask) > input->value)) {
- input->state = INPUT_ST_LOW; /* invalidate */
- return 1;
- }
-#endif
-
- if ((phys_curr & input->mask) == input->value) {
- if ((input->type == INPUT_TYPE_STD) &&
- (input->high_timer == 0)) {
- input->high_timer++;
- if (input->u.std.press_fct != NULL)
- input->u.std.press_fct(input->u.std.press_data);
- } else if (input->type == INPUT_TYPE_KBD) {
- /* will turn on the light */
- keypressed = 1;
-
- if (input->high_timer == 0) {
- char *press_str = input->u.kbd.press_str;
-
- if (press_str[0]) {
- int s = sizeof(input->u.kbd.press_str);
-
- keypad_send_key(press_str, s);
- }
- }
-
- if (input->u.kbd.repeat_str[0]) {
- char *repeat_str = input->u.kbd.repeat_str;
-
- if (input->high_timer >= KEYPAD_REP_START) {
- int s = sizeof(input->u.kbd.repeat_str);
-
- input->high_timer -= KEYPAD_REP_DELAY;
- keypad_send_key(repeat_str, s);
- }
- /* we will need to come back here soon */
- inputs_stable = 0;
- }
-
- if (input->high_timer < 255)
- input->high_timer++;
- }
- return 1;
- }
-
- /* else signal falling down. Let's fall through. */
- input->state = INPUT_ST_FALLING;
- input->fall_timer = 0;
-
- return 0;
-}
-
-static inline void input_state_falling(struct logical_input *input)
-{
-#if 0
- /* FIXME !!! same comment as in input_state_high */
- if (((phys_prev & input->mask) == input->value) &&
- ((phys_curr & input->mask) > input->value)) {
- input->state = INPUT_ST_LOW; /* invalidate */
- return;
- }
-#endif
-
- if ((phys_curr & input->mask) == input->value) {
- if (input->type == INPUT_TYPE_KBD) {
- /* will turn on the light */
- keypressed = 1;
-
- if (input->u.kbd.repeat_str[0]) {
- char *repeat_str = input->u.kbd.repeat_str;
-
- if (input->high_timer >= KEYPAD_REP_START) {
- int s = sizeof(input->u.kbd.repeat_str);
-
- input->high_timer -= KEYPAD_REP_DELAY;
- keypad_send_key(repeat_str, s);
- }
- /* we will need to come back here soon */
- inputs_stable = 0;
- }
-
- if (input->high_timer < 255)
- input->high_timer++;
- }
- input->state = INPUT_ST_HIGH;
- } else if (input->fall_timer >= input->fall_time) {
- /* call release event */
- if (input->type == INPUT_TYPE_STD) {
- void (*release_fct)(int) = input->u.std.release_fct;
-
- if (release_fct != NULL)
- release_fct(input->u.std.release_data);
- } else if (input->type == INPUT_TYPE_KBD) {
- char *release_str = input->u.kbd.release_str;
-
- if (release_str[0]) {
- int s = sizeof(input->u.kbd.release_str);
-
- keypad_send_key(release_str, s);
- }
- }
-
- input->state = INPUT_ST_LOW;
- } else {
- input->fall_timer++;
- inputs_stable = 0;
- }
-}
-
-static void panel_process_inputs(void)
-{
- struct list_head *item;
- struct logical_input *input;
-
- keypressed = 0;
- inputs_stable = 1;
- list_for_each(item, &logical_inputs) {
- input = list_entry(item, struct logical_input, list);
-
- switch (input->state) {
- case INPUT_ST_LOW:
- if ((phys_curr & input->mask) != input->value)
- break;
- /* if all needed ones were already set previously,
- * this means that this logical signal has been
- * activated by the releasing of another combined
- * signal, so we don't want to match.
- * eg: AB -(release B)-> A -(release A)-> 0 :
- * don't match A.
- */
- if ((phys_prev & input->mask) == input->value)
- break;
- input->rise_timer = 0;
- input->state = INPUT_ST_RISING;
- /* no break here, fall through */
- case INPUT_ST_RISING:
- if ((phys_curr & input->mask) != input->value) {
- input->state = INPUT_ST_LOW;
- break;
- }
- if (input->rise_timer < input->rise_time) {
- inputs_stable = 0;
- input->rise_timer++;
- break;
- }
- input->high_timer = 0;
- input->state = INPUT_ST_HIGH;
- /* no break here, fall through */
- case INPUT_ST_HIGH:
- if (input_state_high(input))
- break;
- /* no break here, fall through */
- case INPUT_ST_FALLING:
- input_state_falling(input);
- }
- }
-}
-
-static void panel_scan_timer(void)
-{
- if (keypad.enabled && keypad_initialized) {
- if (spin_trylock_irq(&pprt_lock)) {
- phys_scan_contacts();
-
- /* no need for the parport anymore */
- spin_unlock_irq(&pprt_lock);
- }
-
- if (!inputs_stable || phys_curr != phys_prev)
- panel_process_inputs();
- }
-
- if (lcd.enabled && lcd.initialized) {
- if (keypressed) {
- if (lcd.light_tempo == 0
- && ((lcd.flags & LCD_FLAG_L) == 0))
- lcd_backlight(1);
- lcd.light_tempo = FLASH_LIGHT_TEMPO;
- } else if (lcd.light_tempo > 0) {
- lcd.light_tempo--;
- if (lcd.light_tempo == 0
- && ((lcd.flags & LCD_FLAG_L) == 0))
- lcd_backlight(0);
- }
- }
-
- mod_timer(&scan_timer, jiffies + INPUT_POLL_TIME);
-}
-
-static void init_scan_timer(void)
-{
- if (scan_timer.function != NULL)
- return; /* already started */
-
- setup_timer(&scan_timer, (void *)&panel_scan_timer, 0);
- scan_timer.expires = jiffies + INPUT_POLL_TIME;
- add_timer(&scan_timer);
-}
-
-/* converts a name of the form "({BbAaPpSsEe}{01234567-})*" to a series of bits.
- * if <omask> or <imask> are non-null, they will be or'ed with the bits
- * corresponding to out and in bits respectively.
- * returns 1 if ok, 0 if error (in which case, nothing is written).
- */
-static int input_name2mask(const char *name, pmask_t *mask, pmask_t *value,
- char *imask, char *omask)
-{
- static char sigtab[10] = "EeSsPpAaBb";
- char im, om;
- pmask_t m, v;
-
- om = 0ULL;
- im = 0ULL;
- m = 0ULL;
- v = 0ULL;
- while (*name) {
- int in, out, bit, neg;
-
- for (in = 0; (in < sizeof(sigtab)) && (sigtab[in] != *name);
- in++)
- ;
-
- if (in >= sizeof(sigtab))
- return 0; /* input name not found */
- neg = (in & 1); /* odd (lower) names are negated */
- in >>= 1;
- im |= (1 << in);
-
- name++;
- if (isdigit(*name)) {
- out = *name - '0';
- om |= (1 << out);
- } else if (*name == '-') {
- out = 8;
- } else {
- return 0; /* unknown bit name */
- }
-
- bit = (out * 5) + in;
-
- m |= 1ULL << bit;
- if (!neg)
- v |= 1ULL << bit;
- name++;
- }
- *mask = m;
- *value = v;
- if (imask)
- *imask |= im;
- if (omask)
- *omask |= om;
- return 1;
-}
-
-/* tries to bind a key to the signal name <name>. The key will send the
- * strings <press>, <repeat>, <release> for these respective events.
- * Returns the pointer to the new key if ok, NULL if the key could not be bound.
- */
-static struct logical_input *panel_bind_key(const char *name, const char *press,
- const char *repeat,
- const char *release)
-{
- struct logical_input *key;
-
- key = kzalloc(sizeof(*key), GFP_KERNEL);
- if (!key)
- return NULL;
-
- if (!input_name2mask(name, &key->mask, &key->value, &scan_mask_i,
- &scan_mask_o)) {
- kfree(key);
- return NULL;
- }
-
- key->type = INPUT_TYPE_KBD;
- key->state = INPUT_ST_LOW;
- key->rise_time = 1;
- key->fall_time = 1;
-
- strncpy(key->u.kbd.press_str, press, sizeof(key->u.kbd.press_str));
- strncpy(key->u.kbd.repeat_str, repeat, sizeof(key->u.kbd.repeat_str));
- strncpy(key->u.kbd.release_str, release,
- sizeof(key->u.kbd.release_str));
- list_add(&key->list, &logical_inputs);
- return key;
-}
-
-#if 0
-/* tries to bind a callback function to the signal name <name>. The function
- * <press_fct> will be called with the <press_data> arg when the signal is
- * activated, and so on for <release_fct>/<release_data>
- * Returns the pointer to the new signal if ok, NULL if the signal could not
- * be bound.
- */
-static struct logical_input *panel_bind_callback(char *name,
- void (*press_fct)(int),
- int press_data,
- void (*release_fct)(int),
- int release_data)
-{
- struct logical_input *callback;
-
- callback = kmalloc(sizeof(*callback), GFP_KERNEL);
- if (!callback)
- return NULL;
-
- memset(callback, 0, sizeof(struct logical_input));
- if (!input_name2mask(name, &callback->mask, &callback->value,
- &scan_mask_i, &scan_mask_o))
- return NULL;
-
- callback->type = INPUT_TYPE_STD;
- callback->state = INPUT_ST_LOW;
- callback->rise_time = 1;
- callback->fall_time = 1;
- callback->u.std.press_fct = press_fct;
- callback->u.std.press_data = press_data;
- callback->u.std.release_fct = release_fct;
- callback->u.std.release_data = release_data;
- list_add(&callback->list, &logical_inputs);
- return callback;
-}
-#endif
-
-static void keypad_init(void)
-{
- int keynum;
-
- init_waitqueue_head(&keypad_read_wait);
- keypad_buflen = 0; /* flushes any eventual noisy keystroke */
-
- /* Let's create all known keys */
-
- for (keynum = 0; keypad_profile[keynum][0][0]; keynum++) {
- panel_bind_key(keypad_profile[keynum][0],
- keypad_profile[keynum][1],
- keypad_profile[keynum][2],
- keypad_profile[keynum][3]);
- }
-
- init_scan_timer();
- keypad_initialized = 1;
-}
-
-/**************************************************/
-/* device initialization */
-/**************************************************/
-
-static int panel_notify_sys(struct notifier_block *this, unsigned long code,
- void *unused)
-{
- if (lcd.enabled && lcd.initialized) {
- switch (code) {
- case SYS_DOWN:
- panel_lcd_print
- ("\x0cReloading\nSystem...\x1b[Lc\x1b[Lb\x1b[L+");
- break;
- case SYS_HALT:
- panel_lcd_print
- ("\x0cSystem Halted.\x1b[Lc\x1b[Lb\x1b[L+");
- break;
- case SYS_POWER_OFF:
- panel_lcd_print("\x0cPower off.\x1b[Lc\x1b[Lb\x1b[L+");
- break;
- default:
- break;
- }
- }
- return NOTIFY_DONE;
-}
-
-static struct notifier_block panel_notifier = {
- panel_notify_sys,
- NULL,
- 0
-};
-
-static void panel_attach(struct parport *port)
-{
- struct pardev_cb panel_cb;
-
- if (port->number != parport)
- return;
-
- if (pprt) {
- pr_err("%s: port->number=%d parport=%d, already registered!\n",
- __func__, port->number, parport);
- return;
- }
-
- memset(&panel_cb, 0, sizeof(panel_cb));
- panel_cb.private = &pprt;
- /* panel_cb.flags = 0 should be PARPORT_DEV_EXCL? */
-
- pprt = parport_register_dev_model(port, "panel", &panel_cb, 0);
- if (pprt == NULL) {
- pr_err("%s: port->number=%d parport=%d, parport_register_device() failed\n",
- __func__, port->number, parport);
- return;
- }
-
- if (parport_claim(pprt)) {
- pr_err("could not claim access to parport%d. Aborting.\n",
- parport);
- goto err_unreg_device;
- }
-
- /* must init LCD first, just in case an IRQ from the keypad is
- * generated at keypad init
- */
- if (lcd.enabled) {
- lcd_init();
- if (misc_register(&lcd_dev))
- goto err_unreg_device;
- }
-
- if (keypad.enabled) {
- keypad_init();
- if (misc_register(&keypad_dev))
- goto err_lcd_unreg;
- }
- register_reboot_notifier(&panel_notifier);
- return;
-
-err_lcd_unreg:
- if (lcd.enabled)
- misc_deregister(&lcd_dev);
-err_unreg_device:
- parport_unregister_device(pprt);
- pprt = NULL;
-}
-
-static void panel_detach(struct parport *port)
-{
- if (port->number != parport)
- return;
-
- if (!pprt) {
- pr_err("%s: port->number=%d parport=%d, nothing to unregister.\n",
- __func__, port->number, parport);
- return;
- }
- if (scan_timer.function != NULL)
- del_timer_sync(&scan_timer);
-
- if (pprt != NULL) {
- if (keypad.enabled) {
- misc_deregister(&keypad_dev);
- keypad_initialized = 0;
- }
-
- if (lcd.enabled) {
- panel_lcd_print("\x0cLCD driver " PANEL_VERSION
- "\nunloaded.\x1b[Lc\x1b[Lb\x1b[L-");
- misc_deregister(&lcd_dev);
- lcd.initialized = false;
- }
-
- /* TODO: free all input signals */
- parport_release(pprt);
- parport_unregister_device(pprt);
- pprt = NULL;
- unregister_reboot_notifier(&panel_notifier);
- }
-}
-
-static struct parport_driver panel_driver = {
- .name = "panel",
- .match_port = panel_attach,
- .detach = panel_detach,
- .devmodel = true,
-};
-
-/* init function */
-static int __init panel_init_module(void)
-{
- int selected_keypad_type = NOT_SET, err;
-
- /* take care of an eventual profile */
- switch (profile) {
- case PANEL_PROFILE_CUSTOM:
- /* custom profile */
- selected_keypad_type = DEFAULT_KEYPAD_TYPE;
- selected_lcd_type = DEFAULT_LCD_TYPE;
- break;
- case PANEL_PROFILE_OLD:
- /* 8 bits, 2*16, old keypad */
- selected_keypad_type = KEYPAD_TYPE_OLD;
- selected_lcd_type = LCD_TYPE_OLD;
-
- /* TODO: This two are a little hacky, sort it out later */
- if (lcd_width == NOT_SET)
- lcd_width = 16;
- if (lcd_hwidth == NOT_SET)
- lcd_hwidth = 16;
- break;
- case PANEL_PROFILE_NEW:
- /* serial, 2*16, new keypad */
- selected_keypad_type = KEYPAD_TYPE_NEW;
- selected_lcd_type = LCD_TYPE_KS0074;
- break;
- case PANEL_PROFILE_HANTRONIX:
- /* 8 bits, 2*16 hantronix-like, no keypad */
- selected_keypad_type = KEYPAD_TYPE_NONE;
- selected_lcd_type = LCD_TYPE_HANTRONIX;
- break;
- case PANEL_PROFILE_NEXCOM:
- /* generic 8 bits, 2*16, nexcom keypad, eg. Nexcom. */
- selected_keypad_type = KEYPAD_TYPE_NEXCOM;
- selected_lcd_type = LCD_TYPE_NEXCOM;
- break;
- case PANEL_PROFILE_LARGE:
- /* 8 bits, 2*40, old keypad */
- selected_keypad_type = KEYPAD_TYPE_OLD;
- selected_lcd_type = LCD_TYPE_OLD;
- break;
- }
-
- /*
- * Overwrite selection with module param values (both keypad and lcd),
- * where the deprecated params have lower prio.
- */
- if (keypad_enabled != NOT_SET)
- selected_keypad_type = keypad_enabled;
- if (keypad_type != NOT_SET)
- selected_keypad_type = keypad_type;
-
- keypad.enabled = (selected_keypad_type > 0);
-
- if (lcd_enabled != NOT_SET)
- selected_lcd_type = lcd_enabled;
- if (lcd_type != NOT_SET)
- selected_lcd_type = lcd_type;
-
- lcd.enabled = (selected_lcd_type > 0);
-
- if (lcd.enabled) {
- /*
- * Init lcd struct with load-time values to preserve exact
- * current functionality (at least for now).
- */
- lcd.height = lcd_height;
- lcd.width = lcd_width;
- lcd.bwidth = lcd_bwidth;
- lcd.hwidth = lcd_hwidth;
- lcd.charset = lcd_charset;
- lcd.proto = lcd_proto;
- lcd.pins.e = lcd_e_pin;
- lcd.pins.rs = lcd_rs_pin;
- lcd.pins.rw = lcd_rw_pin;
- lcd.pins.cl = lcd_cl_pin;
- lcd.pins.da = lcd_da_pin;
- lcd.pins.bl = lcd_bl_pin;
-
- /* Leave it for now, just in case */
- lcd.esc_seq.len = -1;
- }
-
- switch (selected_keypad_type) {
- case KEYPAD_TYPE_OLD:
- keypad_profile = old_keypad_profile;
- break;
- case KEYPAD_TYPE_NEW:
- keypad_profile = new_keypad_profile;
- break;
- case KEYPAD_TYPE_NEXCOM:
- keypad_profile = nexcom_keypad_profile;
- break;
- default:
- keypad_profile = NULL;
- break;
- }
-
- if (!lcd.enabled && !keypad.enabled) {
- /* no device enabled, let's exit */
- pr_err("driver version " PANEL_VERSION " disabled.\n");
- return -ENODEV;
- }
-
- err = parport_register_driver(&panel_driver);
- if (err) {
- pr_err("could not register with parport. Aborting.\n");
- return err;
- }
-
- if (pprt)
- pr_info("driver version " PANEL_VERSION
- " registered on parport%d (io=0x%lx).\n", parport,
- pprt->port->base);
- else
- pr_info("driver version " PANEL_VERSION
- " not yet registered\n");
- return 0;
-}
-
-static void __exit panel_cleanup_module(void)
-{
- parport_unregister_driver(&panel_driver);
-}
-
-module_init(panel_init_module);
-module_exit(panel_cleanup_module);
-MODULE_AUTHOR("Willy Tarreau");
-MODULE_LICENSE("GPL");
-
-/*
- * Local variables:
- * c-indent-level: 4
- * tab-width: 8
- * End:
- */
diff --git a/drivers/staging/rdma/Kconfig b/drivers/staging/rdma/Kconfig
deleted file mode 100644
index ba8765063174..000000000000
--- a/drivers/staging/rdma/Kconfig
+++ /dev/null
@@ -1,33 +0,0 @@
-menuconfig STAGING_RDMA
- tristate "RDMA staging drivers"
- depends on INFINIBAND
- depends on PCI || BROKEN
- depends on HAS_IOMEM
- depends on NET
- depends on INET
- default n
- ---help---
- This option allows you to select a number of RDMA drivers that
- fall into one of two categories: deprecated drivers being held
- here before finally being removed or new drivers that still need
- some work before being moved to the normal RDMA driver area.
-
- If you wish to work on these drivers, to help improve them, or
- to report problems you have with them, please use the
- linux-rdma@vger.kernel.org mailing list.
-
- If in doubt, say N here.
-
-
-# Please keep entries in alphabetic order
-if STAGING_RDMA
-
-source "drivers/staging/rdma/amso1100/Kconfig"
-
-source "drivers/staging/rdma/ehca/Kconfig"
-
-source "drivers/staging/rdma/hfi1/Kconfig"
-
-source "drivers/staging/rdma/ipath/Kconfig"
-
-endif
diff --git a/drivers/staging/rdma/Makefile b/drivers/staging/rdma/Makefile
deleted file mode 100644
index 139d78ef2c24..000000000000
--- a/drivers/staging/rdma/Makefile
+++ /dev/null
@@ -1,5 +0,0 @@
-# Entries for RDMA_STAGING tree
-obj-$(CONFIG_INFINIBAND_AMSO1100) += amso1100/
-obj-$(CONFIG_INFINIBAND_EHCA) += ehca/
-obj-$(CONFIG_INFINIBAND_HFI1) += hfi1/
-obj-$(CONFIG_INFINIBAND_IPATH) += ipath/
diff --git a/drivers/staging/rdma/amso1100/Kbuild b/drivers/staging/rdma/amso1100/Kbuild
deleted file mode 100644
index 950dfabcd89d..000000000000
--- a/drivers/staging/rdma/amso1100/Kbuild
+++ /dev/null
@@ -1,6 +0,0 @@
-ccflags-$(CONFIG_INFINIBAND_AMSO1100_DEBUG) := -DDEBUG
-
-obj-$(CONFIG_INFINIBAND_AMSO1100) += iw_c2.o
-
-iw_c2-y := c2.o c2_provider.o c2_rnic.o c2_alloc.o c2_mq.o c2_ae.o c2_vq.o \
- c2_intr.o c2_cq.o c2_qp.o c2_cm.o c2_mm.o c2_pd.o
diff --git a/drivers/staging/rdma/amso1100/Kconfig b/drivers/staging/rdma/amso1100/Kconfig
deleted file mode 100644
index e6ce5f209e47..000000000000
--- a/drivers/staging/rdma/amso1100/Kconfig
+++ /dev/null
@@ -1,15 +0,0 @@
-config INFINIBAND_AMSO1100
- tristate "Ammasso 1100 HCA support"
- depends on PCI && INET
- ---help---
- This is a low-level driver for the Ammasso 1100 host
- channel adapter (HCA).
-
-config INFINIBAND_AMSO1100_DEBUG
- bool "Verbose debugging output"
- depends on INFINIBAND_AMSO1100
- default n
- ---help---
- This option causes the amso1100 driver to produce a bunch of
- debug messages. Select this if you are developing the driver
- or trying to diagnose a problem.
diff --git a/drivers/staging/rdma/amso1100/TODO b/drivers/staging/rdma/amso1100/TODO
deleted file mode 100644
index 18b00a5cb549..000000000000
--- a/drivers/staging/rdma/amso1100/TODO
+++ /dev/null
@@ -1,4 +0,0 @@
-7/2015
-
-The amso1100 driver has been deprecated and moved to drivers/staging.
-It will be removed in the 4.6 merge window.
diff --git a/drivers/staging/rdma/amso1100/c2.c b/drivers/staging/rdma/amso1100/c2.c
deleted file mode 100644
index 766a71ccefed..000000000000
--- a/drivers/staging/rdma/amso1100/c2.c
+++ /dev/null
@@ -1,1241 +0,0 @@
-/*
- * Copyright (c) 2005 Ammasso, Inc. All rights reserved.
- * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include <linux/pci.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/inetdevice.h>
-#include <linux/interrupt.h>
-#include <linux/delay.h>
-#include <linux/ethtool.h>
-#include <linux/mii.h>
-#include <linux/if_vlan.h>
-#include <linux/crc32.h>
-#include <linux/in.h>
-#include <linux/ip.h>
-#include <linux/tcp.h>
-#include <linux/init.h>
-#include <linux/dma-mapping.h>
-#include <linux/slab.h>
-#include <linux/prefetch.h>
-
-#include <asm/io.h>
-#include <asm/irq.h>
-#include <asm/byteorder.h>
-
-#include <rdma/ib_smi.h>
-#include "c2.h"
-#include "c2_provider.h"
-
-MODULE_AUTHOR("Tom Tucker <tom@opengridcomputing.com>");
-MODULE_DESCRIPTION("Ammasso AMSO1100 Low-level iWARP Driver");
-MODULE_LICENSE("Dual BSD/GPL");
-MODULE_VERSION(DRV_VERSION);
-
-static const u32 default_msg = NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK
- | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN;
-
-static int debug = -1; /* defaults above */
-module_param(debug, int, 0);
-MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
-
-static int c2_up(struct net_device *netdev);
-static int c2_down(struct net_device *netdev);
-static int c2_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
-static void c2_tx_interrupt(struct net_device *netdev);
-static void c2_rx_interrupt(struct net_device *netdev);
-static irqreturn_t c2_interrupt(int irq, void *dev_id);
-static void c2_tx_timeout(struct net_device *netdev);
-static int c2_change_mtu(struct net_device *netdev, int new_mtu);
-static void c2_reset(struct c2_port *c2_port);
-
-static struct pci_device_id c2_pci_table[] = {
- { PCI_DEVICE(0x18b8, 0xb001) },
- { 0 }
-};
-
-MODULE_DEVICE_TABLE(pci, c2_pci_table);
-
-static void c2_print_macaddr(struct net_device *netdev)
-{
- pr_debug("%s: MAC %pM, IRQ %u\n", netdev->name, netdev->dev_addr, netdev->irq);
-}
-
-static void c2_set_rxbufsize(struct c2_port *c2_port)
-{
- struct net_device *netdev = c2_port->netdev;
-
- if (netdev->mtu > RX_BUF_SIZE)
- c2_port->rx_buf_size =
- netdev->mtu + ETH_HLEN + sizeof(struct c2_rxp_hdr) +
- NET_IP_ALIGN;
- else
- c2_port->rx_buf_size = sizeof(struct c2_rxp_hdr) + RX_BUF_SIZE;
-}
-
-/*
- * Allocate TX ring elements and chain them together.
- * One-to-one association of adapter descriptors with ring elements.
- */
-static int c2_tx_ring_alloc(struct c2_ring *tx_ring, void *vaddr,
- dma_addr_t base, void __iomem * mmio_txp_ring)
-{
- struct c2_tx_desc *tx_desc;
- struct c2_txp_desc __iomem *txp_desc;
- struct c2_element *elem;
- int i;
-
- tx_ring->start = kmalloc(sizeof(*elem) * tx_ring->count, GFP_KERNEL);
- if (!tx_ring->start)
- return -ENOMEM;
-
- elem = tx_ring->start;
- tx_desc = vaddr;
- txp_desc = mmio_txp_ring;
- for (i = 0; i < tx_ring->count; i++, elem++, tx_desc++, txp_desc++) {
- tx_desc->len = 0;
- tx_desc->status = 0;
-
- /* Set TXP_HTXD_UNINIT */
- __raw_writeq((__force u64) cpu_to_be64(0x1122334455667788ULL),
- (void __iomem *) txp_desc + C2_TXP_ADDR);
- __raw_writew(0, (void __iomem *) txp_desc + C2_TXP_LEN);
- __raw_writew((__force u16) cpu_to_be16(TXP_HTXD_UNINIT),
- (void __iomem *) txp_desc + C2_TXP_FLAGS);
-
- elem->skb = NULL;
- elem->ht_desc = tx_desc;
- elem->hw_desc = txp_desc;
-
- if (i == tx_ring->count - 1) {
- elem->next = tx_ring->start;
- tx_desc->next_offset = base;
- } else {
- elem->next = elem + 1;
- tx_desc->next_offset =
- base + (i + 1) * sizeof(*tx_desc);
- }
- }
-
- tx_ring->to_use = tx_ring->to_clean = tx_ring->start;
-
- return 0;
-}
-
-/*
- * Allocate RX ring elements and chain them together.
- * One-to-one association of adapter descriptors with ring elements.
- */
-static int c2_rx_ring_alloc(struct c2_ring *rx_ring, void *vaddr,
- dma_addr_t base, void __iomem * mmio_rxp_ring)
-{
- struct c2_rx_desc *rx_desc;
- struct c2_rxp_desc __iomem *rxp_desc;
- struct c2_element *elem;
- int i;
-
- rx_ring->start = kmalloc(sizeof(*elem) * rx_ring->count, GFP_KERNEL);
- if (!rx_ring->start)
- return -ENOMEM;
-
- elem = rx_ring->start;
- rx_desc = vaddr;
- rxp_desc = mmio_rxp_ring;
- for (i = 0; i < rx_ring->count; i++, elem++, rx_desc++, rxp_desc++) {
- rx_desc->len = 0;
- rx_desc->status = 0;
-
- /* Set RXP_HRXD_UNINIT */
- __raw_writew((__force u16) cpu_to_be16(RXP_HRXD_OK),
- (void __iomem *) rxp_desc + C2_RXP_STATUS);
- __raw_writew(0, (void __iomem *) rxp_desc + C2_RXP_COUNT);
- __raw_writew(0, (void __iomem *) rxp_desc + C2_RXP_LEN);
- __raw_writeq((__force u64) cpu_to_be64(0x99aabbccddeeffULL),
- (void __iomem *) rxp_desc + C2_RXP_ADDR);
- __raw_writew((__force u16) cpu_to_be16(RXP_HRXD_UNINIT),
- (void __iomem *) rxp_desc + C2_RXP_FLAGS);
-
- elem->skb = NULL;
- elem->ht_desc = rx_desc;
- elem->hw_desc = rxp_desc;
-
- if (i == rx_ring->count - 1) {
- elem->next = rx_ring->start;
- rx_desc->next_offset = base;
- } else {
- elem->next = elem + 1;
- rx_desc->next_offset =
- base + (i + 1) * sizeof(*rx_desc);
- }
- }
-
- rx_ring->to_use = rx_ring->to_clean = rx_ring->start;
-
- return 0;
-}
-
-/* Setup buffer for receiving */
-static inline int c2_rx_alloc(struct c2_port *c2_port, struct c2_element *elem)
-{
- struct c2_dev *c2dev = c2_port->c2dev;
- struct c2_rx_desc *rx_desc = elem->ht_desc;
- struct sk_buff *skb;
- dma_addr_t mapaddr;
- u32 maplen;
- struct c2_rxp_hdr *rxp_hdr;
-
- skb = dev_alloc_skb(c2_port->rx_buf_size);
- if (unlikely(!skb)) {
- pr_debug("%s: out of memory for receive\n",
- c2_port->netdev->name);
- return -ENOMEM;
- }
-
- /* Zero out the rxp hdr in the sk_buff */
- memset(skb->data, 0, sizeof(*rxp_hdr));
-
- skb->dev = c2_port->netdev;
-
- maplen = c2_port->rx_buf_size;
- mapaddr =
- pci_map_single(c2dev->pcidev, skb->data, maplen,
- PCI_DMA_FROMDEVICE);
-
- /* Set the sk_buff RXP_header to RXP_HRXD_READY */
- rxp_hdr = (struct c2_rxp_hdr *) skb->data;
- rxp_hdr->flags = RXP_HRXD_READY;
-
- __raw_writew(0, elem->hw_desc + C2_RXP_STATUS);
- __raw_writew((__force u16) cpu_to_be16((u16) maplen - sizeof(*rxp_hdr)),
- elem->hw_desc + C2_RXP_LEN);
- __raw_writeq((__force u64) cpu_to_be64(mapaddr), elem->hw_desc + C2_RXP_ADDR);
- __raw_writew((__force u16) cpu_to_be16(RXP_HRXD_READY),
- elem->hw_desc + C2_RXP_FLAGS);
-
- elem->skb = skb;
- elem->mapaddr = mapaddr;
- elem->maplen = maplen;
- rx_desc->len = maplen;
-
- return 0;
-}
-
-/*
- * Allocate buffers for the Rx ring
- * For receive: rx_ring.to_clean is next received frame
- */
-static int c2_rx_fill(struct c2_port *c2_port)
-{
- struct c2_ring *rx_ring = &c2_port->rx_ring;
- struct c2_element *elem;
- int ret = 0;
-
- elem = rx_ring->start;
- do {
- if (c2_rx_alloc(c2_port, elem)) {
- ret = 1;
- break;
- }
- } while ((elem = elem->next) != rx_ring->start);
-
- rx_ring->to_clean = rx_ring->start;
- return ret;
-}
-
-/* Free all buffers in RX ring, assumes receiver stopped */
-static void c2_rx_clean(struct c2_port *c2_port)
-{
- struct c2_dev *c2dev = c2_port->c2dev;
- struct c2_ring *rx_ring = &c2_port->rx_ring;
- struct c2_element *elem;
- struct c2_rx_desc *rx_desc;
-
- elem = rx_ring->start;
- do {
- rx_desc = elem->ht_desc;
- rx_desc->len = 0;
-
- __raw_writew(0, elem->hw_desc + C2_RXP_STATUS);
- __raw_writew(0, elem->hw_desc + C2_RXP_COUNT);
- __raw_writew(0, elem->hw_desc + C2_RXP_LEN);
- __raw_writeq((__force u64) cpu_to_be64(0x99aabbccddeeffULL),
- elem->hw_desc + C2_RXP_ADDR);
- __raw_writew((__force u16) cpu_to_be16(RXP_HRXD_UNINIT),
- elem->hw_desc + C2_RXP_FLAGS);
-
- if (elem->skb) {
- pci_unmap_single(c2dev->pcidev, elem->mapaddr,
- elem->maplen, PCI_DMA_FROMDEVICE);
- dev_kfree_skb(elem->skb);
- elem->skb = NULL;
- }
- } while ((elem = elem->next) != rx_ring->start);
-}
-
-static inline int c2_tx_free(struct c2_dev *c2dev, struct c2_element *elem)
-{
- struct c2_tx_desc *tx_desc = elem->ht_desc;
-
- tx_desc->len = 0;
-
- pci_unmap_single(c2dev->pcidev, elem->mapaddr, elem->maplen,
- PCI_DMA_TODEVICE);
-
- if (elem->skb) {
- dev_kfree_skb_any(elem->skb);
- elem->skb = NULL;
- }
-
- return 0;
-}
-
-/* Free all buffers in TX ring, assumes transmitter stopped */
-static void c2_tx_clean(struct c2_port *c2_port)
-{
- struct c2_ring *tx_ring = &c2_port->tx_ring;
- struct c2_element *elem;
- struct c2_txp_desc txp_htxd;
- int retry;
- unsigned long flags;
-
- spin_lock_irqsave(&c2_port->tx_lock, flags);
-
- elem = tx_ring->start;
-
- do {
- retry = 0;
- do {
- txp_htxd.flags =
- readw(elem->hw_desc + C2_TXP_FLAGS);
-
- if (txp_htxd.flags == TXP_HTXD_READY) {
- retry = 1;
- __raw_writew(0,
- elem->hw_desc + C2_TXP_LEN);
- __raw_writeq(0,
- elem->hw_desc + C2_TXP_ADDR);
- __raw_writew((__force u16) cpu_to_be16(TXP_HTXD_DONE),
- elem->hw_desc + C2_TXP_FLAGS);
- c2_port->netdev->stats.tx_dropped++;
- break;
- } else {
- __raw_writew(0,
- elem->hw_desc + C2_TXP_LEN);
- __raw_writeq((__force u64) cpu_to_be64(0x1122334455667788ULL),
- elem->hw_desc + C2_TXP_ADDR);
- __raw_writew((__force u16) cpu_to_be16(TXP_HTXD_UNINIT),
- elem->hw_desc + C2_TXP_FLAGS);
- }
-
- c2_tx_free(c2_port->c2dev, elem);
-
- } while ((elem = elem->next) != tx_ring->start);
- } while (retry);
-
- c2_port->tx_avail = c2_port->tx_ring.count - 1;
- c2_port->c2dev->cur_tx = tx_ring->to_use - tx_ring->start;
-
- if (c2_port->tx_avail > MAX_SKB_FRAGS + 1)
- netif_wake_queue(c2_port->netdev);
-
- spin_unlock_irqrestore(&c2_port->tx_lock, flags);
-}
-
-/*
- * Process transmit descriptors marked 'DONE' by the firmware,
- * freeing up their unneeded sk_buffs.
- */
-static void c2_tx_interrupt(struct net_device *netdev)
-{
- struct c2_port *c2_port = netdev_priv(netdev);
- struct c2_dev *c2dev = c2_port->c2dev;
- struct c2_ring *tx_ring = &c2_port->tx_ring;
- struct c2_element *elem;
- struct c2_txp_desc txp_htxd;
-
- spin_lock(&c2_port->tx_lock);
-
- for (elem = tx_ring->to_clean; elem != tx_ring->to_use;
- elem = elem->next) {
- txp_htxd.flags =
- be16_to_cpu((__force __be16) readw(elem->hw_desc + C2_TXP_FLAGS));
-
- if (txp_htxd.flags != TXP_HTXD_DONE)
- break;
-
- if (netif_msg_tx_done(c2_port)) {
- /* PCI reads are expensive in fast path */
- txp_htxd.len =
- be16_to_cpu((__force __be16) readw(elem->hw_desc + C2_TXP_LEN));
- pr_debug("%s: tx done slot %3Zu status 0x%x len "
- "%5u bytes\n",
- netdev->name, elem - tx_ring->start,
- txp_htxd.flags, txp_htxd.len);
- }
-
- c2_tx_free(c2dev, elem);
- ++(c2_port->tx_avail);
- }
-
- tx_ring->to_clean = elem;
-
- if (netif_queue_stopped(netdev)
- && c2_port->tx_avail > MAX_SKB_FRAGS + 1)
- netif_wake_queue(netdev);
-
- spin_unlock(&c2_port->tx_lock);
-}
-
-static void c2_rx_error(struct c2_port *c2_port, struct c2_element *elem)
-{
- struct c2_rx_desc *rx_desc = elem->ht_desc;
- struct c2_rxp_hdr *rxp_hdr = (struct c2_rxp_hdr *) elem->skb->data;
-
- if (rxp_hdr->status != RXP_HRXD_OK ||
- rxp_hdr->len > (rx_desc->len - sizeof(*rxp_hdr))) {
- pr_debug("BAD RXP_HRXD\n");
- pr_debug(" rx_desc : %p\n", rx_desc);
- pr_debug(" index : %Zu\n",
- elem - c2_port->rx_ring.start);
- pr_debug(" len : %u\n", rx_desc->len);
- pr_debug(" rxp_hdr : %p [PA %p]\n", rxp_hdr,
- (void *) __pa((unsigned long) rxp_hdr));
- pr_debug(" flags : 0x%x\n", rxp_hdr->flags);
- pr_debug(" status: 0x%x\n", rxp_hdr->status);
- pr_debug(" len : %u\n", rxp_hdr->len);
- pr_debug(" rsvd : 0x%x\n", rxp_hdr->rsvd);
- }
-
- /* Setup the skb for reuse since we're dropping this pkt */
- elem->skb->data = elem->skb->head;
- skb_reset_tail_pointer(elem->skb);
-
- /* Zero out the rxp hdr in the sk_buff */
- memset(elem->skb->data, 0, sizeof(*rxp_hdr));
-
- /* Write the descriptor to the adapter's rx ring */
- __raw_writew(0, elem->hw_desc + C2_RXP_STATUS);
- __raw_writew(0, elem->hw_desc + C2_RXP_COUNT);
- __raw_writew((__force u16) cpu_to_be16((u16) elem->maplen - sizeof(*rxp_hdr)),
- elem->hw_desc + C2_RXP_LEN);
- __raw_writeq((__force u64) cpu_to_be64(elem->mapaddr),
- elem->hw_desc + C2_RXP_ADDR);
- __raw_writew((__force u16) cpu_to_be16(RXP_HRXD_READY),
- elem->hw_desc + C2_RXP_FLAGS);
-
- pr_debug("packet dropped\n");
- c2_port->netdev->stats.rx_dropped++;
-}
-
-static void c2_rx_interrupt(struct net_device *netdev)
-{
- struct c2_port *c2_port = netdev_priv(netdev);
- struct c2_dev *c2dev = c2_port->c2dev;
- struct c2_ring *rx_ring = &c2_port->rx_ring;
- struct c2_element *elem;
- struct c2_rx_desc *rx_desc;
- struct c2_rxp_hdr *rxp_hdr;
- struct sk_buff *skb;
- dma_addr_t mapaddr;
- u32 maplen, buflen;
- unsigned long flags;
-
- spin_lock_irqsave(&c2dev->lock, flags);
-
- /* Begin where we left off */
- rx_ring->to_clean = rx_ring->start + c2dev->cur_rx;
-
- for (elem = rx_ring->to_clean; elem->next != rx_ring->to_clean;
- elem = elem->next) {
- rx_desc = elem->ht_desc;
- mapaddr = elem->mapaddr;
- maplen = elem->maplen;
- skb = elem->skb;
- rxp_hdr = (struct c2_rxp_hdr *) skb->data;
-
- if (rxp_hdr->flags != RXP_HRXD_DONE)
- break;
- buflen = rxp_hdr->len;
-
- /* Sanity check the RXP header */
- if (rxp_hdr->status != RXP_HRXD_OK ||
- buflen > (rx_desc->len - sizeof(*rxp_hdr))) {
- c2_rx_error(c2_port, elem);
- continue;
- }
-
- /*
- * Allocate and map a new skb for replenishing the host
- * RX desc
- */
- if (c2_rx_alloc(c2_port, elem)) {
- c2_rx_error(c2_port, elem);
- continue;
- }
-
- /* Unmap the old skb */
- pci_unmap_single(c2dev->pcidev, mapaddr, maplen,
- PCI_DMA_FROMDEVICE);
-
- prefetch(skb->data);
-
- /*
- * Skip past the leading 8 bytes comprising of the
- * "struct c2_rxp_hdr", prepended by the adapter
- * to the usual Ethernet header ("struct ethhdr"),
- * to the start of the raw Ethernet packet.
- *
- * Fix up the various fields in the sk_buff before
- * passing it up to netif_rx(). The transfer size
- * (in bytes) specified by the adapter len field of
- * the "struct rxp_hdr_t" does NOT include the
- * "sizeof(struct c2_rxp_hdr)".
- */
- skb->data += sizeof(*rxp_hdr);
- skb_set_tail_pointer(skb, buflen);
- skb->len = buflen;
- skb->protocol = eth_type_trans(skb, netdev);
-
- netif_rx(skb);
-
- netdev->stats.rx_packets++;
- netdev->stats.rx_bytes += buflen;
- }
-
- /* Save where we left off */
- rx_ring->to_clean = elem;
- c2dev->cur_rx = elem - rx_ring->start;
- C2_SET_CUR_RX(c2dev, c2dev->cur_rx);
-
- spin_unlock_irqrestore(&c2dev->lock, flags);
-}
-
-/*
- * Handle netisr0 TX & RX interrupts.
- */
-static irqreturn_t c2_interrupt(int irq, void *dev_id)
-{
- unsigned int netisr0, dmaisr;
- int handled = 0;
- struct c2_dev *c2dev = (struct c2_dev *) dev_id;
-
- /* Process CCILNET interrupts */
- netisr0 = readl(c2dev->regs + C2_NISR0);
- if (netisr0) {
-
- /*
- * There is an issue with the firmware that always
- * provides the status of RX for both TX & RX
- * interrupts. So process both queues here.
- */
- c2_rx_interrupt(c2dev->netdev);
- c2_tx_interrupt(c2dev->netdev);
-
- /* Clear the interrupt */
- writel(netisr0, c2dev->regs + C2_NISR0);
- handled++;
- }
-
- /* Process RNIC interrupts */
- dmaisr = readl(c2dev->regs + C2_DISR);
- if (dmaisr) {
- writel(dmaisr, c2dev->regs + C2_DISR);
- c2_rnic_interrupt(c2dev);
- handled++;
- }
-
- if (handled) {
- return IRQ_HANDLED;
- } else {
- return IRQ_NONE;
- }
-}
-
-static int c2_up(struct net_device *netdev)
-{
- struct c2_port *c2_port = netdev_priv(netdev);
- struct c2_dev *c2dev = c2_port->c2dev;
- struct c2_element *elem;
- struct c2_rxp_hdr *rxp_hdr;
- struct in_device *in_dev;
- size_t rx_size, tx_size;
- int ret, i;
- unsigned int netimr0;
-
- if (netif_msg_ifup(c2_port))
- pr_debug("%s: enabling interface\n", netdev->name);
-
- /* Set the Rx buffer size based on MTU */
- c2_set_rxbufsize(c2_port);
-
- /* Allocate DMA'able memory for Tx/Rx host descriptor rings */
- rx_size = c2_port->rx_ring.count * sizeof(struct c2_rx_desc);
- tx_size = c2_port->tx_ring.count * sizeof(struct c2_tx_desc);
-
- c2_port->mem_size = tx_size + rx_size;
- c2_port->mem = pci_zalloc_consistent(c2dev->pcidev, c2_port->mem_size,
- &c2_port->dma);
- if (c2_port->mem == NULL) {
- pr_debug("Unable to allocate memory for "
- "host descriptor rings\n");
- return -ENOMEM;
- }
-
- /* Create the Rx host descriptor ring */
- if ((ret =
- c2_rx_ring_alloc(&c2_port->rx_ring, c2_port->mem, c2_port->dma,
- c2dev->mmio_rxp_ring))) {
- pr_debug("Unable to create RX ring\n");
- goto bail0;
- }
-
- /* Allocate Rx buffers for the host descriptor ring */
- if (c2_rx_fill(c2_port)) {
- pr_debug("Unable to fill RX ring\n");
- goto bail1;
- }
-
- /* Create the Tx host descriptor ring */
- if ((ret = c2_tx_ring_alloc(&c2_port->tx_ring, c2_port->mem + rx_size,
- c2_port->dma + rx_size,
- c2dev->mmio_txp_ring))) {
- pr_debug("Unable to create TX ring\n");
- goto bail1;
- }
-
- /* Set the TX pointer to where we left off */
- c2_port->tx_avail = c2_port->tx_ring.count - 1;
- c2_port->tx_ring.to_use = c2_port->tx_ring.to_clean =
- c2_port->tx_ring.start + c2dev->cur_tx;
-
- /* missing: Initialize MAC */
-
- BUG_ON(c2_port->tx_ring.to_use != c2_port->tx_ring.to_clean);
-
- /* Reset the adapter, ensures the driver is in sync with the RXP */
- c2_reset(c2_port);
-
- /* Reset the READY bit in the sk_buff RXP headers & adapter HRXDQ */
- for (i = 0, elem = c2_port->rx_ring.start; i < c2_port->rx_ring.count;
- i++, elem++) {
- rxp_hdr = (struct c2_rxp_hdr *) elem->skb->data;
- rxp_hdr->flags = 0;
- __raw_writew((__force u16) cpu_to_be16(RXP_HRXD_READY),
- elem->hw_desc + C2_RXP_FLAGS);
- }
-
- /* Enable network packets */
- netif_start_queue(netdev);
-
- /* Enable IRQ */
- writel(0, c2dev->regs + C2_IDIS);
- netimr0 = readl(c2dev->regs + C2_NIMR0);
- netimr0 &= ~(C2_PCI_HTX_INT | C2_PCI_HRX_INT);
- writel(netimr0, c2dev->regs + C2_NIMR0);
-
- /* Tell the stack to ignore arp requests for ipaddrs bound to
- * other interfaces. This is needed to prevent the host stack
- * from responding to arp requests to the ipaddr bound on the
- * rdma interface.
- */
- in_dev = in_dev_get(netdev);
- IN_DEV_CONF_SET(in_dev, ARP_IGNORE, 1);
- in_dev_put(in_dev);
-
- return 0;
-
- bail1:
- c2_rx_clean(c2_port);
- kfree(c2_port->rx_ring.start);
-
- bail0:
- pci_free_consistent(c2dev->pcidev, c2_port->mem_size, c2_port->mem,
- c2_port->dma);
-
- return ret;
-}
-
-static int c2_down(struct net_device *netdev)
-{
- struct c2_port *c2_port = netdev_priv(netdev);
- struct c2_dev *c2dev = c2_port->c2dev;
-
- if (netif_msg_ifdown(c2_port))
- pr_debug("%s: disabling interface\n",
- netdev->name);
-
- /* Wait for all the queued packets to get sent */
- c2_tx_interrupt(netdev);
-
- /* Disable network packets */
- netif_stop_queue(netdev);
-
- /* Disable IRQs by clearing the interrupt mask */
- writel(1, c2dev->regs + C2_IDIS);
- writel(0, c2dev->regs + C2_NIMR0);
-
- /* missing: Stop transmitter */
-
- /* missing: Stop receiver */
-
- /* Reset the adapter, ensures the driver is in sync with the RXP */
- c2_reset(c2_port);
-
- /* missing: Turn off LEDs here */
-
- /* Free all buffers in the host descriptor rings */
- c2_tx_clean(c2_port);
- c2_rx_clean(c2_port);
-
- /* Free the host descriptor rings */
- kfree(c2_port->rx_ring.start);
- kfree(c2_port->tx_ring.start);
- pci_free_consistent(c2dev->pcidev, c2_port->mem_size, c2_port->mem,
- c2_port->dma);
-
- return 0;
-}
-
-static void c2_reset(struct c2_port *c2_port)
-{
- struct c2_dev *c2dev = c2_port->c2dev;
- unsigned int cur_rx = c2dev->cur_rx;
-
- /* Tell the hardware to quiesce */
- C2_SET_CUR_RX(c2dev, cur_rx | C2_PCI_HRX_QUI);
-
- /*
- * The hardware will reset the C2_PCI_HRX_QUI bit once
- * the RXP is quiesced. Wait 2 seconds for this.
- */
- ssleep(2);
-
- cur_rx = C2_GET_CUR_RX(c2dev);
-
- if (cur_rx & C2_PCI_HRX_QUI)
- pr_debug("c2_reset: failed to quiesce the hardware!\n");
-
- cur_rx &= ~C2_PCI_HRX_QUI;
-
- c2dev->cur_rx = cur_rx;
-
- pr_debug("Current RX: %u\n", c2dev->cur_rx);
-}
-
-static int c2_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
-{
- struct c2_port *c2_port = netdev_priv(netdev);
- struct c2_dev *c2dev = c2_port->c2dev;
- struct c2_ring *tx_ring = &c2_port->tx_ring;
- struct c2_element *elem;
- dma_addr_t mapaddr;
- u32 maplen;
- unsigned long flags;
- unsigned int i;
-
- spin_lock_irqsave(&c2_port->tx_lock, flags);
-
- if (unlikely(c2_port->tx_avail < (skb_shinfo(skb)->nr_frags + 1))) {
- netif_stop_queue(netdev);
- spin_unlock_irqrestore(&c2_port->tx_lock, flags);
-
- pr_debug("%s: Tx ring full when queue awake!\n",
- netdev->name);
- return NETDEV_TX_BUSY;
- }
-
- maplen = skb_headlen(skb);
- mapaddr =
- pci_map_single(c2dev->pcidev, skb->data, maplen, PCI_DMA_TODEVICE);
-
- elem = tx_ring->to_use;
- elem->skb = skb;
- elem->mapaddr = mapaddr;
- elem->maplen = maplen;
-
- /* Tell HW to xmit */
- __raw_writeq((__force u64) cpu_to_be64(mapaddr),
- elem->hw_desc + C2_TXP_ADDR);
- __raw_writew((__force u16) cpu_to_be16(maplen),
- elem->hw_desc + C2_TXP_LEN);
- __raw_writew((__force u16) cpu_to_be16(TXP_HTXD_READY),
- elem->hw_desc + C2_TXP_FLAGS);
-
- netdev->stats.tx_packets++;
- netdev->stats.tx_bytes += maplen;
-
- /* Loop thru additional data fragments and queue them */
- if (skb_shinfo(skb)->nr_frags) {
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
- const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
- maplen = skb_frag_size(frag);
- mapaddr = skb_frag_dma_map(&c2dev->pcidev->dev, frag,
- 0, maplen, DMA_TO_DEVICE);
- elem = elem->next;
- elem->skb = NULL;
- elem->mapaddr = mapaddr;
- elem->maplen = maplen;
-
- /* Tell HW to xmit */
- __raw_writeq((__force u64) cpu_to_be64(mapaddr),
- elem->hw_desc + C2_TXP_ADDR);
- __raw_writew((__force u16) cpu_to_be16(maplen),
- elem->hw_desc + C2_TXP_LEN);
- __raw_writew((__force u16) cpu_to_be16(TXP_HTXD_READY),
- elem->hw_desc + C2_TXP_FLAGS);
-
- netdev->stats.tx_packets++;
- netdev->stats.tx_bytes += maplen;
- }
- }
-
- tx_ring->to_use = elem->next;
- c2_port->tx_avail -= (skb_shinfo(skb)->nr_frags + 1);
-
- if (c2_port->tx_avail <= MAX_SKB_FRAGS + 1) {
- netif_stop_queue(netdev);
- if (netif_msg_tx_queued(c2_port))
- pr_debug("%s: transmit queue full\n",
- netdev->name);
- }
-
- spin_unlock_irqrestore(&c2_port->tx_lock, flags);
-
- netdev->trans_start = jiffies;
-
- return NETDEV_TX_OK;
-}
-
-static void c2_tx_timeout(struct net_device *netdev)
-{
- struct c2_port *c2_port = netdev_priv(netdev);
-
- if (netif_msg_timer(c2_port))
- pr_debug("%s: tx timeout\n", netdev->name);
-
- c2_tx_clean(c2_port);
-}
-
-static int c2_change_mtu(struct net_device *netdev, int new_mtu)
-{
- int ret = 0;
-
- if (new_mtu < ETH_ZLEN || new_mtu > ETH_JUMBO_MTU)
- return -EINVAL;
-
- netdev->mtu = new_mtu;
-
- if (netif_running(netdev)) {
- c2_down(netdev);
-
- c2_up(netdev);
- }
-
- return ret;
-}
-
-static const struct net_device_ops c2_netdev = {
- .ndo_open = c2_up,
- .ndo_stop = c2_down,
- .ndo_start_xmit = c2_xmit_frame,
- .ndo_tx_timeout = c2_tx_timeout,
- .ndo_change_mtu = c2_change_mtu,
- .ndo_set_mac_address = eth_mac_addr,
- .ndo_validate_addr = eth_validate_addr,
-};
-
-/* Initialize network device */
-static struct net_device *c2_devinit(struct c2_dev *c2dev,
- void __iomem * mmio_addr)
-{
- struct c2_port *c2_port = NULL;
- struct net_device *netdev = alloc_etherdev(sizeof(*c2_port));
-
- if (!netdev) {
- pr_debug("c2_port etherdev alloc failed");
- return NULL;
- }
-
- SET_NETDEV_DEV(netdev, &c2dev->pcidev->dev);
-
- netdev->netdev_ops = &c2_netdev;
- netdev->watchdog_timeo = C2_TX_TIMEOUT;
- netdev->irq = c2dev->pcidev->irq;
-
- c2_port = netdev_priv(netdev);
- c2_port->netdev = netdev;
- c2_port->c2dev = c2dev;
- c2_port->msg_enable = netif_msg_init(debug, default_msg);
- c2_port->tx_ring.count = C2_NUM_TX_DESC;
- c2_port->rx_ring.count = C2_NUM_RX_DESC;
-
- spin_lock_init(&c2_port->tx_lock);
-
- /* Copy our 48-bit ethernet hardware address */
- memcpy_fromio(netdev->dev_addr, mmio_addr + C2_REGS_ENADDR, 6);
-
- /* Validate the MAC address */
- if (!is_valid_ether_addr(netdev->dev_addr)) {
- pr_debug("Invalid MAC Address\n");
- c2_print_macaddr(netdev);
- free_netdev(netdev);
- return NULL;
- }
-
- c2dev->netdev = netdev;
-
- return netdev;
-}
-
-static int c2_probe(struct pci_dev *pcidev, const struct pci_device_id *ent)
-{
- int ret = 0, i;
- unsigned long reg0_start, reg0_flags, reg0_len;
- unsigned long reg2_start, reg2_flags, reg2_len;
- unsigned long reg4_start, reg4_flags, reg4_len;
- unsigned kva_map_size;
- struct net_device *netdev = NULL;
- struct c2_dev *c2dev = NULL;
- void __iomem *mmio_regs = NULL;
-
- printk(KERN_INFO PFX "AMSO1100 Gigabit Ethernet driver v%s loaded\n",
- DRV_VERSION);
-
- /* Enable PCI device */
- ret = pci_enable_device(pcidev);
- if (ret) {
- printk(KERN_ERR PFX "%s: Unable to enable PCI device\n",
- pci_name(pcidev));
- goto bail0;
- }
-
- reg0_start = pci_resource_start(pcidev, BAR_0);
- reg0_len = pci_resource_len(pcidev, BAR_0);
- reg0_flags = pci_resource_flags(pcidev, BAR_0);
-
- reg2_start = pci_resource_start(pcidev, BAR_2);
- reg2_len = pci_resource_len(pcidev, BAR_2);
- reg2_flags = pci_resource_flags(pcidev, BAR_2);
-
- reg4_start = pci_resource_start(pcidev, BAR_4);
- reg4_len = pci_resource_len(pcidev, BAR_4);
- reg4_flags = pci_resource_flags(pcidev, BAR_4);
-
- pr_debug("BAR0 size = 0x%lX bytes\n", reg0_len);
- pr_debug("BAR2 size = 0x%lX bytes\n", reg2_len);
- pr_debug("BAR4 size = 0x%lX bytes\n", reg4_len);
-
- /* Make sure PCI base addr are MMIO */
- if (!(reg0_flags & IORESOURCE_MEM) ||
- !(reg2_flags & IORESOURCE_MEM) || !(reg4_flags & IORESOURCE_MEM)) {
- printk(KERN_ERR PFX "PCI regions not an MMIO resource\n");
- ret = -ENODEV;
- goto bail1;
- }
-
- /* Check for weird/broken PCI region reporting */
- if ((reg0_len < C2_REG0_SIZE) ||
- (reg2_len < C2_REG2_SIZE) || (reg4_len < C2_REG4_SIZE)) {
- printk(KERN_ERR PFX "Invalid PCI region sizes\n");
- ret = -ENODEV;
- goto bail1;
- }
-
- /* Reserve PCI I/O and memory resources */
- ret = pci_request_regions(pcidev, DRV_NAME);
- if (ret) {
- printk(KERN_ERR PFX "%s: Unable to request regions\n",
- pci_name(pcidev));
- goto bail1;
- }
-
- if ((sizeof(dma_addr_t) > 4)) {
- ret = pci_set_dma_mask(pcidev, DMA_BIT_MASK(64));
- if (ret < 0) {
- printk(KERN_ERR PFX "64b DMA configuration failed\n");
- goto bail2;
- }
- } else {
- ret = pci_set_dma_mask(pcidev, DMA_BIT_MASK(32));
- if (ret < 0) {
- printk(KERN_ERR PFX "32b DMA configuration failed\n");
- goto bail2;
- }
- }
-
- /* Enables bus-mastering on the device */
- pci_set_master(pcidev);
-
- /* Remap the adapter PCI registers in BAR4 */
- mmio_regs = ioremap_nocache(reg4_start + C2_PCI_REGS_OFFSET,
- sizeof(struct c2_adapter_pci_regs));
- if (!mmio_regs) {
- printk(KERN_ERR PFX
- "Unable to remap adapter PCI registers in BAR4\n");
- ret = -EIO;
- goto bail2;
- }
-
- /* Validate PCI regs magic */
- for (i = 0; i < sizeof(c2_magic); i++) {
- if (c2_magic[i] != readb(mmio_regs + C2_REGS_MAGIC + i)) {
- printk(KERN_ERR PFX "Downlevel Firmware boot loader "
- "[%d/%Zd: got 0x%x, exp 0x%x]. Use the cc_flash "
- "utility to update your boot loader\n",
- i + 1, sizeof(c2_magic),
- readb(mmio_regs + C2_REGS_MAGIC + i),
- c2_magic[i]);
- printk(KERN_ERR PFX "Adapter not claimed\n");
- iounmap(mmio_regs);
- ret = -EIO;
- goto bail2;
- }
- }
-
- /* Validate the adapter version */
- if (be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_VERS)) != C2_VERSION) {
- printk(KERN_ERR PFX "Version mismatch "
- "[fw=%u, c2=%u], Adapter not claimed\n",
- be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_VERS)),
- C2_VERSION);
- ret = -EINVAL;
- iounmap(mmio_regs);
- goto bail2;
- }
-
- /* Validate the adapter IVN */
- if (be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_IVN)) != C2_IVN) {
- printk(KERN_ERR PFX "Downlevel FIrmware level. You should be using "
- "the OpenIB device support kit. "
- "[fw=0x%x, c2=0x%x], Adapter not claimed\n",
- be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_IVN)),
- C2_IVN);
- ret = -EINVAL;
- iounmap(mmio_regs);
- goto bail2;
- }
-
- /* Allocate hardware structure */
- c2dev = (struct c2_dev *) ib_alloc_device(sizeof(*c2dev));
- if (!c2dev) {
- printk(KERN_ERR PFX "%s: Unable to alloc hardware struct\n",
- pci_name(pcidev));
- ret = -ENOMEM;
- iounmap(mmio_regs);
- goto bail2;
- }
-
- memset(c2dev, 0, sizeof(*c2dev));
- spin_lock_init(&c2dev->lock);
- c2dev->pcidev = pcidev;
- c2dev->cur_tx = 0;
-
- /* Get the last RX index */
- c2dev->cur_rx =
- (be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_HRX_CUR)) -
- 0xffffc000) / sizeof(struct c2_rxp_desc);
-
- /* Request an interrupt line for the driver */
- ret = request_irq(pcidev->irq, c2_interrupt, IRQF_SHARED, DRV_NAME, c2dev);
- if (ret) {
- printk(KERN_ERR PFX "%s: requested IRQ %u is busy\n",
- pci_name(pcidev), pcidev->irq);
- iounmap(mmio_regs);
- goto bail3;
- }
-
- /* Set driver specific data */
- pci_set_drvdata(pcidev, c2dev);
-
- /* Initialize network device */
- if ((netdev = c2_devinit(c2dev, mmio_regs)) == NULL) {
- ret = -ENOMEM;
- iounmap(mmio_regs);
- goto bail4;
- }
-
- /* Save off the actual size prior to unmapping mmio_regs */
- kva_map_size = be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_PCI_WINSIZE));
-
- /* Unmap the adapter PCI registers in BAR4 */
- iounmap(mmio_regs);
-
- /* Register network device */
- ret = register_netdev(netdev);
- if (ret) {
- printk(KERN_ERR PFX "Unable to register netdev, ret = %d\n",
- ret);
- goto bail5;
- }
-
- /* Disable network packets */
- netif_stop_queue(netdev);
-
- /* Remap the adapter HRXDQ PA space to kernel VA space */
- c2dev->mmio_rxp_ring = ioremap_nocache(reg4_start + C2_RXP_HRXDQ_OFFSET,
- C2_RXP_HRXDQ_SIZE);
- if (!c2dev->mmio_rxp_ring) {
- printk(KERN_ERR PFX "Unable to remap MMIO HRXDQ region\n");
- ret = -EIO;
- goto bail6;
- }
-
- /* Remap the adapter HTXDQ PA space to kernel VA space */
- c2dev->mmio_txp_ring = ioremap_nocache(reg4_start + C2_TXP_HTXDQ_OFFSET,
- C2_TXP_HTXDQ_SIZE);
- if (!c2dev->mmio_txp_ring) {
- printk(KERN_ERR PFX "Unable to remap MMIO HTXDQ region\n");
- ret = -EIO;
- goto bail7;
- }
-
- /* Save off the current RX index in the last 4 bytes of the TXP Ring */
- C2_SET_CUR_RX(c2dev, c2dev->cur_rx);
-
- /* Remap the PCI registers in adapter BAR0 to kernel VA space */
- c2dev->regs = ioremap_nocache(reg0_start, reg0_len);
- if (!c2dev->regs) {
- printk(KERN_ERR PFX "Unable to remap BAR0\n");
- ret = -EIO;
- goto bail8;
- }
-
- /* Remap the PCI registers in adapter BAR4 to kernel VA space */
- c2dev->pa = reg4_start + C2_PCI_REGS_OFFSET;
- c2dev->kva = ioremap_nocache(reg4_start + C2_PCI_REGS_OFFSET,
- kva_map_size);
- if (!c2dev->kva) {
- printk(KERN_ERR PFX "Unable to remap BAR4\n");
- ret = -EIO;
- goto bail9;
- }
-
- /* Print out the MAC address */
- c2_print_macaddr(netdev);
-
- ret = c2_rnic_init(c2dev);
- if (ret) {
- printk(KERN_ERR PFX "c2_rnic_init failed: %d\n", ret);
- goto bail10;
- }
-
- ret = c2_register_device(c2dev);
- if (ret)
- goto bail10;
-
- return 0;
-
- bail10:
- iounmap(c2dev->kva);
-
- bail9:
- iounmap(c2dev->regs);
-
- bail8:
- iounmap(c2dev->mmio_txp_ring);
-
- bail7:
- iounmap(c2dev->mmio_rxp_ring);
-
- bail6:
- unregister_netdev(netdev);
-
- bail5:
- free_netdev(netdev);
-
- bail4:
- free_irq(pcidev->irq, c2dev);
-
- bail3:
- ib_dealloc_device(&c2dev->ibdev);
-
- bail2:
- pci_release_regions(pcidev);
-
- bail1:
- pci_disable_device(pcidev);
-
- bail0:
- return ret;
-}
-
-static void c2_remove(struct pci_dev *pcidev)
-{
- struct c2_dev *c2dev = pci_get_drvdata(pcidev);
- struct net_device *netdev = c2dev->netdev;
-
- /* Unregister with OpenIB */
- c2_unregister_device(c2dev);
-
- /* Clean up the RNIC resources */
- c2_rnic_term(c2dev);
-
- /* Remove network device from the kernel */
- unregister_netdev(netdev);
-
- /* Free network device */
- free_netdev(netdev);
-
- /* Free the interrupt line */
- free_irq(pcidev->irq, c2dev);
-
- /* missing: Turn LEDs off here */
-
- /* Unmap adapter PA space */
- iounmap(c2dev->kva);
- iounmap(c2dev->regs);
- iounmap(c2dev->mmio_txp_ring);
- iounmap(c2dev->mmio_rxp_ring);
-
- /* Free the hardware structure */
- ib_dealloc_device(&c2dev->ibdev);
-
- /* Release reserved PCI I/O and memory resources */
- pci_release_regions(pcidev);
-
- /* Disable PCI device */
- pci_disable_device(pcidev);
-
- /* Clear driver specific data */
- pci_set_drvdata(pcidev, NULL);
-}
-
-static struct pci_driver c2_pci_driver = {
- .name = DRV_NAME,
- .id_table = c2_pci_table,
- .probe = c2_probe,
- .remove = c2_remove,
-};
-
-module_pci_driver(c2_pci_driver);
diff --git a/drivers/staging/rdma/amso1100/c2.h b/drivers/staging/rdma/amso1100/c2.h
deleted file mode 100644
index d619d735838b..000000000000
--- a/drivers/staging/rdma/amso1100/c2.h
+++ /dev/null
@@ -1,547 +0,0 @@
-/*
- * Copyright (c) 2005 Ammasso, Inc. All rights reserved.
- * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#ifndef __C2_H
-#define __C2_H
-
-#include <linux/netdevice.h>
-#include <linux/spinlock.h>
-#include <linux/kernel.h>
-#include <linux/pci.h>
-#include <linux/dma-mapping.h>
-#include <linux/idr.h>
-
-#include "c2_provider.h"
-#include "c2_mq.h"
-#include "c2_status.h"
-
-#define DRV_NAME "c2"
-#define DRV_VERSION "1.1"
-#define PFX DRV_NAME ": "
-
-#define BAR_0 0
-#define BAR_2 2
-#define BAR_4 4
-
-#define RX_BUF_SIZE (1536 + 8)
-#define ETH_JUMBO_MTU 9000
-#define C2_MAGIC "CEPHEUS"
-#define C2_VERSION 4
-#define C2_IVN (18 & 0x7fffffff)
-
-#define C2_REG0_SIZE (16 * 1024)
-#define C2_REG2_SIZE (2 * 1024 * 1024)
-#define C2_REG4_SIZE (256 * 1024 * 1024)
-#define C2_NUM_TX_DESC 341
-#define C2_NUM_RX_DESC 256
-#define C2_PCI_REGS_OFFSET (0x10000)
-#define C2_RXP_HRXDQ_OFFSET (((C2_REG4_SIZE)/2))
-#define C2_RXP_HRXDQ_SIZE (4096)
-#define C2_TXP_HTXDQ_OFFSET (((C2_REG4_SIZE)/2) + C2_RXP_HRXDQ_SIZE)
-#define C2_TXP_HTXDQ_SIZE (4096)
-#define C2_TX_TIMEOUT (6*HZ)
-
-/* CEPHEUS */
-static const u8 c2_magic[] = {
- 0x43, 0x45, 0x50, 0x48, 0x45, 0x55, 0x53
-};
-
-enum adapter_pci_regs {
- C2_REGS_MAGIC = 0x0000,
- C2_REGS_VERS = 0x0008,
- C2_REGS_IVN = 0x000C,
- C2_REGS_PCI_WINSIZE = 0x0010,
- C2_REGS_Q0_QSIZE = 0x0014,
- C2_REGS_Q0_MSGSIZE = 0x0018,
- C2_REGS_Q0_POOLSTART = 0x001C,
- C2_REGS_Q0_SHARED = 0x0020,
- C2_REGS_Q1_QSIZE = 0x0024,
- C2_REGS_Q1_MSGSIZE = 0x0028,
- C2_REGS_Q1_SHARED = 0x0030,
- C2_REGS_Q2_QSIZE = 0x0034,
- C2_REGS_Q2_MSGSIZE = 0x0038,
- C2_REGS_Q2_SHARED = 0x0040,
- C2_REGS_ENADDR = 0x004C,
- C2_REGS_RDMA_ENADDR = 0x0054,
- C2_REGS_HRX_CUR = 0x006C,
-};
-
-struct c2_adapter_pci_regs {
- char reg_magic[8];
- u32 version;
- u32 ivn;
- u32 pci_window_size;
- u32 q0_q_size;
- u32 q0_msg_size;
- u32 q0_pool_start;
- u32 q0_shared;
- u32 q1_q_size;
- u32 q1_msg_size;
- u32 q1_pool_start;
- u32 q1_shared;
- u32 q2_q_size;
- u32 q2_msg_size;
- u32 q2_pool_start;
- u32 q2_shared;
- u32 log_start;
- u32 log_size;
- u8 host_enaddr[8];
- u8 rdma_enaddr[8];
- u32 crash_entry;
- u32 crash_ready[2];
- u32 fw_txd_cur;
- u32 fw_hrxd_cur;
- u32 fw_rxd_cur;
-};
-
-enum pci_regs {
- C2_HISR = 0x0000,
- C2_DISR = 0x0004,
- C2_HIMR = 0x0008,
- C2_DIMR = 0x000C,
- C2_NISR0 = 0x0010,
- C2_NISR1 = 0x0014,
- C2_NIMR0 = 0x0018,
- C2_NIMR1 = 0x001C,
- C2_IDIS = 0x0020,
-};
-
-enum {
- C2_PCI_HRX_INT = 1 << 8,
- C2_PCI_HTX_INT = 1 << 17,
- C2_PCI_HRX_QUI = 1 << 31,
-};
-
-/*
- * Cepheus registers in BAR0.
- */
-struct c2_pci_regs {
- u32 hostisr;
- u32 dmaisr;
- u32 hostimr;
- u32 dmaimr;
- u32 netisr0;
- u32 netisr1;
- u32 netimr0;
- u32 netimr1;
- u32 int_disable;
-};
-
-/* TXP flags */
-enum c2_txp_flags {
- TXP_HTXD_DONE = 0,
- TXP_HTXD_READY = 1 << 0,
- TXP_HTXD_UNINIT = 1 << 1,
-};
-
-/* RXP flags */
-enum c2_rxp_flags {
- RXP_HRXD_UNINIT = 0,
- RXP_HRXD_READY = 1 << 0,
- RXP_HRXD_DONE = 1 << 1,
-};
-
-/* RXP status */
-enum c2_rxp_status {
- RXP_HRXD_ZERO = 0,
- RXP_HRXD_OK = 1 << 0,
- RXP_HRXD_BUF_OV = 1 << 1,
-};
-
-/* TXP descriptor fields */
-enum txp_desc {
- C2_TXP_FLAGS = 0x0000,
- C2_TXP_LEN = 0x0002,
- C2_TXP_ADDR = 0x0004,
-};
-
-/* RXP descriptor fields */
-enum rxp_desc {
- C2_RXP_FLAGS = 0x0000,
- C2_RXP_STATUS = 0x0002,
- C2_RXP_COUNT = 0x0004,
- C2_RXP_LEN = 0x0006,
- C2_RXP_ADDR = 0x0008,
-};
-
-struct c2_txp_desc {
- u16 flags;
- u16 len;
- u64 addr;
-} __attribute__ ((packed));
-
-struct c2_rxp_desc {
- u16 flags;
- u16 status;
- u16 count;
- u16 len;
- u64 addr;
-} __attribute__ ((packed));
-
-struct c2_rxp_hdr {
- u16 flags;
- u16 status;
- u16 len;
- u16 rsvd;
-} __attribute__ ((packed));
-
-struct c2_tx_desc {
- u32 len;
- u32 status;
- dma_addr_t next_offset;
-};
-
-struct c2_rx_desc {
- u32 len;
- u32 status;
- dma_addr_t next_offset;
-};
-
-struct c2_alloc {
- u32 last;
- u32 max;
- spinlock_t lock;
- unsigned long *table;
-};
-
-struct c2_array {
- struct {
- void **page;
- int used;
- } *page_list;
-};
-
-/*
- * The MQ shared pointer pool is organized as a linked list of
- * chunks. Each chunk contains a linked list of free shared pointers
- * that can be allocated to a given user mode client.
- *
- */
-struct sp_chunk {
- struct sp_chunk *next;
- dma_addr_t dma_addr;
- DEFINE_DMA_UNMAP_ADDR(mapping);
- u16 head;
- u16 shared_ptr[0];
-};
-
-struct c2_pd_table {
- u32 last;
- u32 max;
- spinlock_t lock;
- unsigned long *table;
-};
-
-struct c2_qp_table {
- struct idr idr;
- spinlock_t lock;
-};
-
-struct c2_element {
- struct c2_element *next;
- void *ht_desc; /* host descriptor */
- void __iomem *hw_desc; /* hardware descriptor */
- struct sk_buff *skb;
- dma_addr_t mapaddr;
- u32 maplen;
-};
-
-struct c2_ring {
- struct c2_element *to_clean;
- struct c2_element *to_use;
- struct c2_element *start;
- unsigned long count;
-};
-
-struct c2_dev {
- struct ib_device ibdev;
- void __iomem *regs;
- void __iomem *mmio_txp_ring; /* remapped adapter memory for hw rings */
- void __iomem *mmio_rxp_ring;
- spinlock_t lock;
- struct pci_dev *pcidev;
- struct net_device *netdev;
- struct net_device *pseudo_netdev;
- unsigned int cur_tx;
- unsigned int cur_rx;
- u32 adapter_handle;
- int device_cap_flags;
- void __iomem *kva; /* KVA device memory */
- unsigned long pa; /* PA device memory */
- void **qptr_array;
-
- struct kmem_cache *host_msg_cache;
-
- struct list_head cca_link; /* adapter list */
- struct list_head eh_wakeup_list; /* event wakeup list */
- wait_queue_head_t req_vq_wo;
-
- /* Cached RNIC properties */
- struct ib_device_attr props;
-
- struct c2_pd_table pd_table;
- struct c2_qp_table qp_table;
- int ports; /* num of GigE ports */
- int devnum;
- spinlock_t vqlock; /* sync vbs req MQ */
-
- /* Verbs Queues */
- struct c2_mq req_vq; /* Verbs Request MQ */
- struct c2_mq rep_vq; /* Verbs Reply MQ */
- struct c2_mq aeq; /* Async Events MQ */
-
- /* Kernel client MQs */
- struct sp_chunk *kern_mqsp_pool;
-
- /* Device updates these values when posting messages to a host
- * target queue */
- u16 req_vq_shared;
- u16 rep_vq_shared;
- u16 aeq_shared;
- u16 irq_claimed;
-
- /*
- * Shared host target pages for user-accessible MQs.
- */
- int hthead; /* index of first free entry */
- void *htpages; /* kernel vaddr */
- int htlen; /* length of htpages memory */
- void *htuva; /* user mapped vaddr */
- spinlock_t htlock; /* serialize allocation */
-
- u64 adapter_hint_uva; /* access to the activity FIFO */
-
- // spinlock_t aeq_lock;
- // spinlock_t rnic_lock;
-
- __be16 *hint_count;
- dma_addr_t hint_count_dma;
- u16 hints_read;
-
- int init; /* TRUE if it's ready */
- char ae_cache_name[16];
- char vq_cache_name[16];
-};
-
-struct c2_port {
- u32 msg_enable;
- struct c2_dev *c2dev;
- struct net_device *netdev;
-
- spinlock_t tx_lock;
- u32 tx_avail;
- struct c2_ring tx_ring;
- struct c2_ring rx_ring;
-
- void *mem; /* PCI memory for host rings */
- dma_addr_t dma;
- unsigned long mem_size;
-
- u32 rx_buf_size;
-};
-
-/*
- * Activity FIFO registers in BAR0.
- */
-#define PCI_BAR0_HOST_HINT 0x100
-#define PCI_BAR0_ADAPTER_HINT 0x2000
-
-/*
- * Ammasso PCI vendor id and Cepheus PCI device id.
- */
-#define CQ_ARMED 0x01
-#define CQ_WAIT_FOR_DMA 0x80
-
-/*
- * The format of a hint is as follows:
- * Lower 16 bits are the count of hints for the queue.
- * Next 15 bits are the qp_index
- * Upper most bit depends on who reads it:
- * If read by producer, then it means Full (1) or Not-Full (0)
- * If read by consumer, then it means Empty (1) or Not-Empty (0)
- */
-#define C2_HINT_MAKE(q_index, hint_count) (((q_index) << 16) | hint_count)
-#define C2_HINT_GET_INDEX(hint) (((hint) & 0x7FFF0000) >> 16)
-#define C2_HINT_GET_COUNT(hint) ((hint) & 0x0000FFFF)
-
-
-/*
- * The following defines the offset in SDRAM for the c2_adapter_pci_regs_t
- * struct.
- */
-#define C2_ADAPTER_PCI_REGS_OFFSET 0x10000
-
-#ifndef readq
-static inline u64 readq(const void __iomem * addr)
-{
- u64 ret = readl(addr + 4);
- ret <<= 32;
- ret |= readl(addr);
-
- return ret;
-}
-#endif
-
-#ifndef writeq
-static inline void __raw_writeq(u64 val, void __iomem * addr)
-{
- __raw_writel((u32) (val), addr);
- __raw_writel((u32) (val >> 32), (addr + 4));
-}
-#endif
-
-#define C2_SET_CUR_RX(c2dev, cur_rx) \
- __raw_writel((__force u32) cpu_to_be32(cur_rx), c2dev->mmio_txp_ring + 4092)
-
-#define C2_GET_CUR_RX(c2dev) \
- be32_to_cpu((__force __be32) readl(c2dev->mmio_txp_ring + 4092))
-
-static inline struct c2_dev *to_c2dev(struct ib_device *ibdev)
-{
- return container_of(ibdev, struct c2_dev, ibdev);
-}
-
-static inline int c2_errno(void *reply)
-{
- switch (c2_wr_get_result(reply)) {
- case C2_OK:
- return 0;
- case CCERR_NO_BUFS:
- case CCERR_INSUFFICIENT_RESOURCES:
- case CCERR_ZERO_RDMA_READ_RESOURCES:
- return -ENOMEM;
- case CCERR_MR_IN_USE:
- case CCERR_QP_IN_USE:
- return -EBUSY;
- case CCERR_ADDR_IN_USE:
- return -EADDRINUSE;
- case CCERR_ADDR_NOT_AVAIL:
- return -EADDRNOTAVAIL;
- case CCERR_CONN_RESET:
- return -ECONNRESET;
- case CCERR_NOT_IMPLEMENTED:
- case CCERR_INVALID_WQE:
- return -ENOSYS;
- case CCERR_QP_NOT_PRIVILEGED:
- return -EPERM;
- case CCERR_STACK_ERROR:
- return -EPROTO;
- case CCERR_ACCESS_VIOLATION:
- case CCERR_BASE_AND_BOUNDS_VIOLATION:
- return -EFAULT;
- case CCERR_STAG_STATE_NOT_INVALID:
- case CCERR_INVALID_ADDRESS:
- case CCERR_INVALID_CQ:
- case CCERR_INVALID_EP:
- case CCERR_INVALID_MODIFIER:
- case CCERR_INVALID_MTU:
- case CCERR_INVALID_PD_ID:
- case CCERR_INVALID_QP:
- case CCERR_INVALID_RNIC:
- case CCERR_INVALID_STAG:
- return -EINVAL;
- default:
- return -EAGAIN;
- }
-}
-
-/* Device */
-extern int c2_register_device(struct c2_dev *c2dev);
-extern void c2_unregister_device(struct c2_dev *c2dev);
-extern int c2_rnic_init(struct c2_dev *c2dev);
-extern void c2_rnic_term(struct c2_dev *c2dev);
-extern void c2_rnic_interrupt(struct c2_dev *c2dev);
-extern int c2_del_addr(struct c2_dev *c2dev, __be32 inaddr, __be32 inmask);
-extern int c2_add_addr(struct c2_dev *c2dev, __be32 inaddr, __be32 inmask);
-
-/* QPs */
-extern int c2_alloc_qp(struct c2_dev *c2dev, struct c2_pd *pd,
- struct ib_qp_init_attr *qp_attrs, struct c2_qp *qp);
-extern void c2_free_qp(struct c2_dev *c2dev, struct c2_qp *qp);
-extern struct ib_qp *c2_get_qp(struct ib_device *device, int qpn);
-extern int c2_qp_modify(struct c2_dev *c2dev, struct c2_qp *qp,
- struct ib_qp_attr *attr, int attr_mask);
-extern int c2_qp_set_read_limits(struct c2_dev *c2dev, struct c2_qp *qp,
- int ord, int ird);
-extern int c2_post_send(struct ib_qp *ibqp, struct ib_send_wr *ib_wr,
- struct ib_send_wr **bad_wr);
-extern int c2_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *ib_wr,
- struct ib_recv_wr **bad_wr);
-extern void c2_init_qp_table(struct c2_dev *c2dev);
-extern void c2_cleanup_qp_table(struct c2_dev *c2dev);
-extern void c2_set_qp_state(struct c2_qp *, int);
-extern struct c2_qp *c2_find_qpn(struct c2_dev *c2dev, int qpn);
-
-/* PDs */
-extern int c2_pd_alloc(struct c2_dev *c2dev, int privileged, struct c2_pd *pd);
-extern void c2_pd_free(struct c2_dev *c2dev, struct c2_pd *pd);
-extern int c2_init_pd_table(struct c2_dev *c2dev);
-extern void c2_cleanup_pd_table(struct c2_dev *c2dev);
-
-/* CQs */
-extern int c2_init_cq(struct c2_dev *c2dev, int entries,
- struct c2_ucontext *ctx, struct c2_cq *cq);
-extern void c2_free_cq(struct c2_dev *c2dev, struct c2_cq *cq);
-extern void c2_cq_event(struct c2_dev *c2dev, u32 mq_index);
-extern void c2_cq_clean(struct c2_dev *c2dev, struct c2_qp *qp, u32 mq_index);
-extern int c2_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry);
-extern int c2_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags);
-
-/* CM */
-extern int c2_llp_connect(struct iw_cm_id *cm_id,
- struct iw_cm_conn_param *iw_param);
-extern int c2_llp_accept(struct iw_cm_id *cm_id,
- struct iw_cm_conn_param *iw_param);
-extern int c2_llp_reject(struct iw_cm_id *cm_id, const void *pdata,
- u8 pdata_len);
-extern int c2_llp_service_create(struct iw_cm_id *cm_id, int backlog);
-extern int c2_llp_service_destroy(struct iw_cm_id *cm_id);
-
-/* MM */
-extern int c2_nsmr_register_phys_kern(struct c2_dev *c2dev, u64 *addr_list,
- int page_size, int pbl_depth, u32 length,
- u32 off, u64 *va, enum c2_acf acf,
- struct c2_mr *mr);
-extern int c2_stag_dealloc(struct c2_dev *c2dev, u32 stag_index);
-
-/* AE */
-extern void c2_ae_event(struct c2_dev *c2dev, u32 mq_index);
-
-/* MQSP Allocator */
-extern int c2_init_mqsp_pool(struct c2_dev *c2dev, gfp_t gfp_mask,
- struct sp_chunk **root);
-extern void c2_free_mqsp_pool(struct c2_dev *c2dev, struct sp_chunk *root);
-extern __be16 *c2_alloc_mqsp(struct c2_dev *c2dev, struct sp_chunk *head,
- dma_addr_t *dma_addr, gfp_t gfp_mask);
-extern void c2_free_mqsp(__be16* mqsp);
-#endif
diff --git a/drivers/staging/rdma/amso1100/c2_ae.c b/drivers/staging/rdma/amso1100/c2_ae.c
deleted file mode 100644
index cedda25232be..000000000000
--- a/drivers/staging/rdma/amso1100/c2_ae.c
+++ /dev/null
@@ -1,327 +0,0 @@
-/*
- * Copyright (c) 2005 Ammasso, Inc. All rights reserved.
- * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "c2.h"
-#include <rdma/iw_cm.h>
-#include "c2_status.h"
-#include "c2_ae.h"
-
-static int c2_convert_cm_status(u32 c2_status)
-{
- switch (c2_status) {
- case C2_CONN_STATUS_SUCCESS:
- return 0;
- case C2_CONN_STATUS_REJECTED:
- return -ENETRESET;
- case C2_CONN_STATUS_REFUSED:
- return -ECONNREFUSED;
- case C2_CONN_STATUS_TIMEDOUT:
- return -ETIMEDOUT;
- case C2_CONN_STATUS_NETUNREACH:
- return -ENETUNREACH;
- case C2_CONN_STATUS_HOSTUNREACH:
- return -EHOSTUNREACH;
- case C2_CONN_STATUS_INVALID_RNIC:
- return -EINVAL;
- case C2_CONN_STATUS_INVALID_QP:
- return -EINVAL;
- case C2_CONN_STATUS_INVALID_QP_STATE:
- return -EINVAL;
- case C2_CONN_STATUS_ADDR_NOT_AVAIL:
- return -EADDRNOTAVAIL;
- default:
- printk(KERN_ERR PFX
- "%s - Unable to convert CM status: %d\n",
- __func__, c2_status);
- return -EIO;
- }
-}
-
-static const char* to_event_str(int event)
-{
- static const char* event_str[] = {
- "CCAE_REMOTE_SHUTDOWN",
- "CCAE_ACTIVE_CONNECT_RESULTS",
- "CCAE_CONNECTION_REQUEST",
- "CCAE_LLP_CLOSE_COMPLETE",
- "CCAE_TERMINATE_MESSAGE_RECEIVED",
- "CCAE_LLP_CONNECTION_RESET",
- "CCAE_LLP_CONNECTION_LOST",
- "CCAE_LLP_SEGMENT_SIZE_INVALID",
- "CCAE_LLP_INVALID_CRC",
- "CCAE_LLP_BAD_FPDU",
- "CCAE_INVALID_DDP_VERSION",
- "CCAE_INVALID_RDMA_VERSION",
- "CCAE_UNEXPECTED_OPCODE",
- "CCAE_INVALID_DDP_QUEUE_NUMBER",
- "CCAE_RDMA_READ_NOT_ENABLED",
- "CCAE_RDMA_WRITE_NOT_ENABLED",
- "CCAE_RDMA_READ_TOO_SMALL",
- "CCAE_NO_L_BIT",
- "CCAE_TAGGED_INVALID_STAG",
- "CCAE_TAGGED_BASE_BOUNDS_VIOLATION",
- "CCAE_TAGGED_ACCESS_RIGHTS_VIOLATION",
- "CCAE_TAGGED_INVALID_PD",
- "CCAE_WRAP_ERROR",
- "CCAE_BAD_CLOSE",
- "CCAE_BAD_LLP_CLOSE",
- "CCAE_INVALID_MSN_RANGE",
- "CCAE_INVALID_MSN_GAP",
- "CCAE_IRRQ_OVERFLOW",
- "CCAE_IRRQ_MSN_GAP",
- "CCAE_IRRQ_MSN_RANGE",
- "CCAE_IRRQ_INVALID_STAG",
- "CCAE_IRRQ_BASE_BOUNDS_VIOLATION",
- "CCAE_IRRQ_ACCESS_RIGHTS_VIOLATION",
- "CCAE_IRRQ_INVALID_PD",
- "CCAE_IRRQ_WRAP_ERROR",
- "CCAE_CQ_SQ_COMPLETION_OVERFLOW",
- "CCAE_CQ_RQ_COMPLETION_ERROR",
- "CCAE_QP_SRQ_WQE_ERROR",
- "CCAE_QP_LOCAL_CATASTROPHIC_ERROR",
- "CCAE_CQ_OVERFLOW",
- "CCAE_CQ_OPERATION_ERROR",
- "CCAE_SRQ_LIMIT_REACHED",
- "CCAE_QP_RQ_LIMIT_REACHED",
- "CCAE_SRQ_CATASTROPHIC_ERROR",
- "CCAE_RNIC_CATASTROPHIC_ERROR"
- };
-
- if (event < CCAE_REMOTE_SHUTDOWN ||
- event > CCAE_RNIC_CATASTROPHIC_ERROR)
- return "<invalid event>";
-
- event -= CCAE_REMOTE_SHUTDOWN;
- return event_str[event];
-}
-
-static const char *to_qp_state_str(int state)
-{
- switch (state) {
- case C2_QP_STATE_IDLE:
- return "C2_QP_STATE_IDLE";
- case C2_QP_STATE_CONNECTING:
- return "C2_QP_STATE_CONNECTING";
- case C2_QP_STATE_RTS:
- return "C2_QP_STATE_RTS";
- case C2_QP_STATE_CLOSING:
- return "C2_QP_STATE_CLOSING";
- case C2_QP_STATE_TERMINATE:
- return "C2_QP_STATE_TERMINATE";
- case C2_QP_STATE_ERROR:
- return "C2_QP_STATE_ERROR";
- default:
- return "<invalid QP state>";
- }
-}
-
-void c2_ae_event(struct c2_dev *c2dev, u32 mq_index)
-{
- struct c2_mq *mq = c2dev->qptr_array[mq_index];
- union c2wr *wr;
- void *resource_user_context;
- struct iw_cm_event cm_event;
- struct ib_event ib_event;
- enum c2_resource_indicator resource_indicator;
- enum c2_event_id event_id;
- unsigned long flags;
- int status;
- struct sockaddr_in *laddr = (struct sockaddr_in *)&cm_event.local_addr;
- struct sockaddr_in *raddr = (struct sockaddr_in *)&cm_event.remote_addr;
-
- /*
- * retrieve the message
- */
- wr = c2_mq_consume(mq);
- if (!wr)
- return;
-
- memset(&ib_event, 0, sizeof(ib_event));
- memset(&cm_event, 0, sizeof(cm_event));
-
- event_id = c2_wr_get_id(wr);
- resource_indicator = be32_to_cpu(wr->ae.ae_generic.resource_type);
- resource_user_context =
- (void *) (unsigned long) wr->ae.ae_generic.user_context;
-
- status = cm_event.status = c2_convert_cm_status(c2_wr_get_result(wr));
-
- pr_debug("event received c2_dev=%p, event_id=%d, "
- "resource_indicator=%d, user_context=%p, status = %d\n",
- c2dev, event_id, resource_indicator, resource_user_context,
- status);
-
- switch (resource_indicator) {
- case C2_RES_IND_QP:{
-
- struct c2_qp *qp = (struct c2_qp *)resource_user_context;
- struct iw_cm_id *cm_id = qp->cm_id;
- struct c2wr_ae_active_connect_results *res;
-
- if (!cm_id) {
- pr_debug("event received, but cm_id is <nul>, qp=%p!\n",
- qp);
- goto ignore_it;
- }
- pr_debug("%s: event = %s, user_context=%llx, "
- "resource_type=%x, "
- "resource=%x, qp_state=%s\n",
- __func__,
- to_event_str(event_id),
- (unsigned long long) wr->ae.ae_generic.user_context,
- be32_to_cpu(wr->ae.ae_generic.resource_type),
- be32_to_cpu(wr->ae.ae_generic.resource),
- to_qp_state_str(be32_to_cpu(wr->ae.ae_generic.qp_state)));
-
- c2_set_qp_state(qp, be32_to_cpu(wr->ae.ae_generic.qp_state));
-
- switch (event_id) {
- case CCAE_ACTIVE_CONNECT_RESULTS:
- res = &wr->ae.ae_active_connect_results;
- cm_event.event = IW_CM_EVENT_CONNECT_REPLY;
- laddr->sin_addr.s_addr = res->laddr;
- raddr->sin_addr.s_addr = res->raddr;
- laddr->sin_port = res->lport;
- raddr->sin_port = res->rport;
- if (status == 0) {
- cm_event.private_data_len =
- be32_to_cpu(res->private_data_length);
- cm_event.private_data = res->private_data;
- } else {
- spin_lock_irqsave(&qp->lock, flags);
- if (qp->cm_id) {
- qp->cm_id->rem_ref(qp->cm_id);
- qp->cm_id = NULL;
- }
- spin_unlock_irqrestore(&qp->lock, flags);
- cm_event.private_data_len = 0;
- cm_event.private_data = NULL;
- }
- if (cm_id->event_handler)
- cm_id->event_handler(cm_id, &cm_event);
- break;
- case CCAE_TERMINATE_MESSAGE_RECEIVED:
- case CCAE_CQ_SQ_COMPLETION_OVERFLOW:
- ib_event.device = &c2dev->ibdev;
- ib_event.element.qp = &qp->ibqp;
- ib_event.event = IB_EVENT_QP_REQ_ERR;
-
- if (qp->ibqp.event_handler)
- qp->ibqp.event_handler(&ib_event,
- qp->ibqp.
- qp_context);
- break;
- case CCAE_BAD_CLOSE:
- case CCAE_LLP_CLOSE_COMPLETE:
- case CCAE_LLP_CONNECTION_RESET:
- case CCAE_LLP_CONNECTION_LOST:
- BUG_ON(cm_id->event_handler==(void*)0x6b6b6b6b);
-
- spin_lock_irqsave(&qp->lock, flags);
- if (qp->cm_id) {
- qp->cm_id->rem_ref(qp->cm_id);
- qp->cm_id = NULL;
- }
- spin_unlock_irqrestore(&qp->lock, flags);
- cm_event.event = IW_CM_EVENT_CLOSE;
- cm_event.status = 0;
- if (cm_id->event_handler)
- cm_id->event_handler(cm_id, &cm_event);
- break;
- default:
- BUG_ON(1);
- pr_debug("%s:%d Unexpected event_id=%d on QP=%p, "
- "CM_ID=%p\n",
- __func__, __LINE__,
- event_id, qp, cm_id);
- break;
- }
- break;
- }
-
- case C2_RES_IND_EP:{
-
- struct c2wr_ae_connection_request *req =
- &wr->ae.ae_connection_request;
- struct iw_cm_id *cm_id =
- (struct iw_cm_id *)resource_user_context;
-
- pr_debug("C2_RES_IND_EP event_id=%d\n", event_id);
- if (event_id != CCAE_CONNECTION_REQUEST) {
- pr_debug("%s: Invalid event_id: %d\n",
- __func__, event_id);
- break;
- }
- cm_event.event = IW_CM_EVENT_CONNECT_REQUEST;
- cm_event.provider_data = (void*)(unsigned long)req->cr_handle;
- laddr->sin_addr.s_addr = req->laddr;
- raddr->sin_addr.s_addr = req->raddr;
- laddr->sin_port = req->lport;
- raddr->sin_port = req->rport;
- cm_event.private_data_len =
- be32_to_cpu(req->private_data_length);
- cm_event.private_data = req->private_data;
- /*
- * Until ird/ord negotiation via MPAv2 support is added, send
- * max supported values
- */
- cm_event.ird = cm_event.ord = 128;
-
- if (cm_id->event_handler)
- cm_id->event_handler(cm_id, &cm_event);
- break;
- }
-
- case C2_RES_IND_CQ:{
- struct c2_cq *cq =
- (struct c2_cq *) resource_user_context;
-
- pr_debug("IB_EVENT_CQ_ERR\n");
- ib_event.device = &c2dev->ibdev;
- ib_event.element.cq = &cq->ibcq;
- ib_event.event = IB_EVENT_CQ_ERR;
-
- if (cq->ibcq.event_handler)
- cq->ibcq.event_handler(&ib_event,
- cq->ibcq.cq_context);
- break;
- }
-
- default:
- printk("Bad resource indicator = %d\n",
- resource_indicator);
- break;
- }
-
- ignore_it:
- c2_mq_free(mq);
-}
diff --git a/drivers/staging/rdma/amso1100/c2_ae.h b/drivers/staging/rdma/amso1100/c2_ae.h
deleted file mode 100644
index 3a065c33b83b..000000000000
--- a/drivers/staging/rdma/amso1100/c2_ae.h
+++ /dev/null
@@ -1,108 +0,0 @@
-/*
- * Copyright (c) 2005 Ammasso, Inc. All rights reserved.
- * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef _C2_AE_H_
-#define _C2_AE_H_
-
-/*
- * WARNING: If you change this file, also bump C2_IVN_BASE
- * in common/include/clustercore/c2_ivn.h.
- */
-
-/*
- * Asynchronous Event Identifiers
- *
- * These start at 0x80 only so it's obvious from inspection that
- * they are not work-request statuses. This isn't critical.
- *
- * NOTE: these event id's must fit in eight bits.
- */
-enum c2_event_id {
- CCAE_REMOTE_SHUTDOWN = 0x80,
- CCAE_ACTIVE_CONNECT_RESULTS,
- CCAE_CONNECTION_REQUEST,
- CCAE_LLP_CLOSE_COMPLETE,
- CCAE_TERMINATE_MESSAGE_RECEIVED,
- CCAE_LLP_CONNECTION_RESET,
- CCAE_LLP_CONNECTION_LOST,
- CCAE_LLP_SEGMENT_SIZE_INVALID,
- CCAE_LLP_INVALID_CRC,
- CCAE_LLP_BAD_FPDU,
- CCAE_INVALID_DDP_VERSION,
- CCAE_INVALID_RDMA_VERSION,
- CCAE_UNEXPECTED_OPCODE,
- CCAE_INVALID_DDP_QUEUE_NUMBER,
- CCAE_RDMA_READ_NOT_ENABLED,
- CCAE_RDMA_WRITE_NOT_ENABLED,
- CCAE_RDMA_READ_TOO_SMALL,
- CCAE_NO_L_BIT,
- CCAE_TAGGED_INVALID_STAG,
- CCAE_TAGGED_BASE_BOUNDS_VIOLATION,
- CCAE_TAGGED_ACCESS_RIGHTS_VIOLATION,
- CCAE_TAGGED_INVALID_PD,
- CCAE_WRAP_ERROR,
- CCAE_BAD_CLOSE,
- CCAE_BAD_LLP_CLOSE,
- CCAE_INVALID_MSN_RANGE,
- CCAE_INVALID_MSN_GAP,
- CCAE_IRRQ_OVERFLOW,
- CCAE_IRRQ_MSN_GAP,
- CCAE_IRRQ_MSN_RANGE,
- CCAE_IRRQ_INVALID_STAG,
- CCAE_IRRQ_BASE_BOUNDS_VIOLATION,
- CCAE_IRRQ_ACCESS_RIGHTS_VIOLATION,
- CCAE_IRRQ_INVALID_PD,
- CCAE_IRRQ_WRAP_ERROR,
- CCAE_CQ_SQ_COMPLETION_OVERFLOW,
- CCAE_CQ_RQ_COMPLETION_ERROR,
- CCAE_QP_SRQ_WQE_ERROR,
- CCAE_QP_LOCAL_CATASTROPHIC_ERROR,
- CCAE_CQ_OVERFLOW,
- CCAE_CQ_OPERATION_ERROR,
- CCAE_SRQ_LIMIT_REACHED,
- CCAE_QP_RQ_LIMIT_REACHED,
- CCAE_SRQ_CATASTROPHIC_ERROR,
- CCAE_RNIC_CATASTROPHIC_ERROR
-/* WARNING If you add more id's, make sure their values fit in eight bits. */
-};
-
-/*
- * Resource Indicators and Identifiers
- */
-enum c2_resource_indicator {
- C2_RES_IND_QP = 1,
- C2_RES_IND_EP,
- C2_RES_IND_CQ,
- C2_RES_IND_SRQ,
-};
-
-#endif /* _C2_AE_H_ */
diff --git a/drivers/staging/rdma/amso1100/c2_alloc.c b/drivers/staging/rdma/amso1100/c2_alloc.c
deleted file mode 100644
index 78d247ec6961..000000000000
--- a/drivers/staging/rdma/amso1100/c2_alloc.c
+++ /dev/null
@@ -1,142 +0,0 @@
-/*
- * Copyright (c) 2004 Topspin Communications. All rights reserved.
- * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <linux/errno.h>
-#include <linux/bitmap.h>
-
-#include "c2.h"
-
-static int c2_alloc_mqsp_chunk(struct c2_dev *c2dev, gfp_t gfp_mask,
- struct sp_chunk **head)
-{
- int i;
- struct sp_chunk *new_head;
- dma_addr_t dma_addr;
-
- new_head = dma_alloc_coherent(&c2dev->pcidev->dev, PAGE_SIZE,
- &dma_addr, gfp_mask);
- if (new_head == NULL)
- return -ENOMEM;
-
- new_head->dma_addr = dma_addr;
- dma_unmap_addr_set(new_head, mapping, new_head->dma_addr);
-
- new_head->next = NULL;
- new_head->head = 0;
-
- /* build list where each index is the next free slot */
- for (i = 0;
- i < (PAGE_SIZE - sizeof(struct sp_chunk) -
- sizeof(u16)) / sizeof(u16) - 1;
- i++) {
- new_head->shared_ptr[i] = i + 1;
- }
- /* terminate list */
- new_head->shared_ptr[i] = 0xFFFF;
-
- *head = new_head;
- return 0;
-}
-
-int c2_init_mqsp_pool(struct c2_dev *c2dev, gfp_t gfp_mask,
- struct sp_chunk **root)
-{
- return c2_alloc_mqsp_chunk(c2dev, gfp_mask, root);
-}
-
-void c2_free_mqsp_pool(struct c2_dev *c2dev, struct sp_chunk *root)
-{
- struct sp_chunk *next;
-
- while (root) {
- next = root->next;
- dma_free_coherent(&c2dev->pcidev->dev, PAGE_SIZE, root,
- dma_unmap_addr(root, mapping));
- root = next;
- }
-}
-
-__be16 *c2_alloc_mqsp(struct c2_dev *c2dev, struct sp_chunk *head,
- dma_addr_t *dma_addr, gfp_t gfp_mask)
-{
- u16 mqsp;
-
- while (head) {
- mqsp = head->head;
- if (mqsp != 0xFFFF) {
- head->head = head->shared_ptr[mqsp];
- break;
- } else if (head->next == NULL) {
- if (c2_alloc_mqsp_chunk(c2dev, gfp_mask, &head->next) ==
- 0) {
- head = head->next;
- mqsp = head->head;
- head->head = head->shared_ptr[mqsp];
- break;
- } else
- return NULL;
- } else
- head = head->next;
- }
- if (head) {
- *dma_addr = head->dma_addr +
- ((unsigned long) &(head->shared_ptr[mqsp]) -
- (unsigned long) head);
- pr_debug("%s addr %p dma_addr %llx\n", __func__,
- &(head->shared_ptr[mqsp]), (unsigned long long) *dma_addr);
- return (__force __be16 *) &(head->shared_ptr[mqsp]);
- }
- return NULL;
-}
-
-void c2_free_mqsp(__be16 *mqsp)
-{
- struct sp_chunk *head;
- u16 idx;
-
- /* The chunk containing this ptr begins at the page boundary */
- head = (struct sp_chunk *) ((unsigned long) mqsp & PAGE_MASK);
-
- /* Link head to new mqsp */
- *mqsp = (__force __be16) head->head;
-
- /* Compute the shared_ptr index */
- idx = ((unsigned long) mqsp & ~PAGE_MASK) >> 1;
- idx -= (unsigned long) &(((struct sp_chunk *) 0)->shared_ptr[0]) >> 1;
-
- /* Point this index at the head */
- head->shared_ptr[idx] = head->head;
-
- /* Point head at this index */
- head->head = idx;
-}
diff --git a/drivers/staging/rdma/amso1100/c2_cm.c b/drivers/staging/rdma/amso1100/c2_cm.c
deleted file mode 100644
index 23bfa94fbd4e..000000000000
--- a/drivers/staging/rdma/amso1100/c2_cm.c
+++ /dev/null
@@ -1,461 +0,0 @@
-/*
- * Copyright (c) 2005 Ammasso, Inc. All rights reserved.
- * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- */
-#include <linux/slab.h>
-
-#include "c2.h"
-#include "c2_wr.h"
-#include "c2_vq.h"
-#include <rdma/iw_cm.h>
-
-int c2_llp_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *iw_param)
-{
- struct c2_dev *c2dev = to_c2dev(cm_id->device);
- struct ib_qp *ibqp;
- struct c2_qp *qp;
- struct c2wr_qp_connect_req *wr; /* variable size needs a malloc. */
- struct c2_vq_req *vq_req;
- int err;
- struct sockaddr_in *raddr = (struct sockaddr_in *)&cm_id->remote_addr;
-
- if (cm_id->remote_addr.ss_family != AF_INET)
- return -ENOSYS;
-
- ibqp = c2_get_qp(cm_id->device, iw_param->qpn);
- if (!ibqp)
- return -EINVAL;
- qp = to_c2qp(ibqp);
-
- /* Associate QP <--> CM_ID */
- cm_id->provider_data = qp;
- cm_id->add_ref(cm_id);
- qp->cm_id = cm_id;
-
- /*
- * only support the max private_data length
- */
- if (iw_param->private_data_len > C2_MAX_PRIVATE_DATA_SIZE) {
- err = -EINVAL;
- goto bail0;
- }
- /*
- * Set the rdma read limits
- */
- err = c2_qp_set_read_limits(c2dev, qp, iw_param->ord, iw_param->ird);
- if (err)
- goto bail0;
-
- /*
- * Create and send a WR_QP_CONNECT...
- */
- wr = kmalloc(c2dev->req_vq.msg_size, GFP_KERNEL);
- if (!wr) {
- err = -ENOMEM;
- goto bail0;
- }
-
- vq_req = vq_req_alloc(c2dev);
- if (!vq_req) {
- err = -ENOMEM;
- goto bail1;
- }
-
- c2_wr_set_id(wr, CCWR_QP_CONNECT);
- wr->hdr.context = 0;
- wr->rnic_handle = c2dev->adapter_handle;
- wr->qp_handle = qp->adapter_handle;
-
- wr->remote_addr = raddr->sin_addr.s_addr;
- wr->remote_port = raddr->sin_port;
-
- /*
- * Move any private data from the callers's buf into
- * the WR.
- */
- if (iw_param->private_data) {
- wr->private_data_length =
- cpu_to_be32(iw_param->private_data_len);
- memcpy(&wr->private_data[0], iw_param->private_data,
- iw_param->private_data_len);
- } else
- wr->private_data_length = 0;
-
- /*
- * Send WR to adapter. NOTE: There is no synch reply from
- * the adapter.
- */
- err = vq_send_wr(c2dev, (union c2wr *) wr);
- vq_req_free(c2dev, vq_req);
-
- bail1:
- kfree(wr);
- bail0:
- if (err) {
- /*
- * If we fail, release reference on QP and
- * disassociate QP from CM_ID
- */
- cm_id->provider_data = NULL;
- qp->cm_id = NULL;
- cm_id->rem_ref(cm_id);
- }
- return err;
-}
-
-int c2_llp_service_create(struct iw_cm_id *cm_id, int backlog)
-{
- struct c2_dev *c2dev;
- struct c2wr_ep_listen_create_req wr;
- struct c2wr_ep_listen_create_rep *reply;
- struct c2_vq_req *vq_req;
- int err;
- struct sockaddr_in *laddr = (struct sockaddr_in *)&cm_id->local_addr;
-
- if (cm_id->local_addr.ss_family != AF_INET)
- return -ENOSYS;
-
- c2dev = to_c2dev(cm_id->device);
- if (c2dev == NULL)
- return -EINVAL;
-
- /*
- * Allocate verbs request.
- */
- vq_req = vq_req_alloc(c2dev);
- if (!vq_req)
- return -ENOMEM;
-
- /*
- * Build the WR
- */
- c2_wr_set_id(&wr, CCWR_EP_LISTEN_CREATE);
- wr.hdr.context = (u64) (unsigned long) vq_req;
- wr.rnic_handle = c2dev->adapter_handle;
- wr.local_addr = laddr->sin_addr.s_addr;
- wr.local_port = laddr->sin_port;
- wr.backlog = cpu_to_be32(backlog);
- wr.user_context = (u64) (unsigned long) cm_id;
-
- /*
- * Reference the request struct. Dereferenced in the int handler.
- */
- vq_req_get(c2dev, vq_req);
-
- /*
- * Send WR to adapter
- */
- err = vq_send_wr(c2dev, (union c2wr *) & wr);
- if (err) {
- vq_req_put(c2dev, vq_req);
- goto bail0;
- }
-
- /*
- * Wait for reply from adapter
- */
- err = vq_wait_for_reply(c2dev, vq_req);
- if (err)
- goto bail0;
-
- /*
- * Process reply
- */
- reply =
- (struct c2wr_ep_listen_create_rep *) (unsigned long) vq_req->reply_msg;
- if (!reply) {
- err = -ENOMEM;
- goto bail1;
- }
-
- if ((err = c2_errno(reply)) != 0)
- goto bail1;
-
- /*
- * Keep the adapter handle. Used in subsequent destroy
- */
- cm_id->provider_data = (void*)(unsigned long) reply->ep_handle;
-
- /*
- * free vq stuff
- */
- vq_repbuf_free(c2dev, reply);
- vq_req_free(c2dev, vq_req);
-
- return 0;
-
- bail1:
- vq_repbuf_free(c2dev, reply);
- bail0:
- vq_req_free(c2dev, vq_req);
- return err;
-}
-
-
-int c2_llp_service_destroy(struct iw_cm_id *cm_id)
-{
-
- struct c2_dev *c2dev;
- struct c2wr_ep_listen_destroy_req wr;
- struct c2wr_ep_listen_destroy_rep *reply;
- struct c2_vq_req *vq_req;
- int err;
-
- c2dev = to_c2dev(cm_id->device);
- if (c2dev == NULL)
- return -EINVAL;
-
- /*
- * Allocate verbs request.
- */
- vq_req = vq_req_alloc(c2dev);
- if (!vq_req)
- return -ENOMEM;
-
- /*
- * Build the WR
- */
- c2_wr_set_id(&wr, CCWR_EP_LISTEN_DESTROY);
- wr.hdr.context = (unsigned long) vq_req;
- wr.rnic_handle = c2dev->adapter_handle;
- wr.ep_handle = (u32)(unsigned long)cm_id->provider_data;
-
- /*
- * reference the request struct. dereferenced in the int handler.
- */
- vq_req_get(c2dev, vq_req);
-
- /*
- * Send WR to adapter
- */
- err = vq_send_wr(c2dev, (union c2wr *) & wr);
- if (err) {
- vq_req_put(c2dev, vq_req);
- goto bail0;
- }
-
- /*
- * Wait for reply from adapter
- */
- err = vq_wait_for_reply(c2dev, vq_req);
- if (err)
- goto bail0;
-
- /*
- * Process reply
- */
- reply=(struct c2wr_ep_listen_destroy_rep *)(unsigned long)vq_req->reply_msg;
- if (!reply) {
- err = -ENOMEM;
- goto bail0;
- }
- if ((err = c2_errno(reply)) != 0)
- goto bail1;
-
- bail1:
- vq_repbuf_free(c2dev, reply);
- bail0:
- vq_req_free(c2dev, vq_req);
- return err;
-}
-
-int c2_llp_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *iw_param)
-{
- struct c2_dev *c2dev = to_c2dev(cm_id->device);
- struct c2_qp *qp;
- struct ib_qp *ibqp;
- struct c2wr_cr_accept_req *wr; /* variable length WR */
- struct c2_vq_req *vq_req;
- struct c2wr_cr_accept_rep *reply; /* VQ Reply msg ptr. */
- int err;
-
- ibqp = c2_get_qp(cm_id->device, iw_param->qpn);
- if (!ibqp)
- return -EINVAL;
- qp = to_c2qp(ibqp);
-
- /* Set the RDMA read limits */
- err = c2_qp_set_read_limits(c2dev, qp, iw_param->ord, iw_param->ird);
- if (err)
- goto bail0;
-
- /* Allocate verbs request. */
- vq_req = vq_req_alloc(c2dev);
- if (!vq_req) {
- err = -ENOMEM;
- goto bail0;
- }
- vq_req->qp = qp;
- vq_req->cm_id = cm_id;
- vq_req->event = IW_CM_EVENT_ESTABLISHED;
-
- wr = kmalloc(c2dev->req_vq.msg_size, GFP_KERNEL);
- if (!wr) {
- err = -ENOMEM;
- goto bail1;
- }
-
- /* Build the WR */
- c2_wr_set_id(wr, CCWR_CR_ACCEPT);
- wr->hdr.context = (unsigned long) vq_req;
- wr->rnic_handle = c2dev->adapter_handle;
- wr->ep_handle = (u32) (unsigned long) cm_id->provider_data;
- wr->qp_handle = qp->adapter_handle;
-
- /* Replace the cr_handle with the QP after accept */
- cm_id->provider_data = qp;
- cm_id->add_ref(cm_id);
- qp->cm_id = cm_id;
-
- cm_id->provider_data = qp;
-
- /* Validate private_data length */
- if (iw_param->private_data_len > C2_MAX_PRIVATE_DATA_SIZE) {
- err = -EINVAL;
- goto bail1;
- }
-
- if (iw_param->private_data) {
- wr->private_data_length = cpu_to_be32(iw_param->private_data_len);
- memcpy(&wr->private_data[0],
- iw_param->private_data, iw_param->private_data_len);
- } else
- wr->private_data_length = 0;
-
- /* Reference the request struct. Dereferenced in the int handler. */
- vq_req_get(c2dev, vq_req);
-
- /* Send WR to adapter */
- err = vq_send_wr(c2dev, (union c2wr *) wr);
- if (err) {
- vq_req_put(c2dev, vq_req);
- goto bail1;
- }
-
- /* Wait for reply from adapter */
- err = vq_wait_for_reply(c2dev, vq_req);
- if (err)
- goto bail1;
-
- /* Check that reply is present */
- reply = (struct c2wr_cr_accept_rep *) (unsigned long) vq_req->reply_msg;
- if (!reply) {
- err = -ENOMEM;
- goto bail1;
- }
-
- err = c2_errno(reply);
- vq_repbuf_free(c2dev, reply);
-
- if (!err)
- c2_set_qp_state(qp, C2_QP_STATE_RTS);
- bail1:
- kfree(wr);
- vq_req_free(c2dev, vq_req);
- bail0:
- if (err) {
- /*
- * If we fail, release reference on QP and
- * disassociate QP from CM_ID
- */
- cm_id->provider_data = NULL;
- qp->cm_id = NULL;
- cm_id->rem_ref(cm_id);
- }
- return err;
-}
-
-int c2_llp_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
-{
- struct c2_dev *c2dev;
- struct c2wr_cr_reject_req wr;
- struct c2_vq_req *vq_req;
- struct c2wr_cr_reject_rep *reply;
- int err;
-
- c2dev = to_c2dev(cm_id->device);
-
- /*
- * Allocate verbs request.
- */
- vq_req = vq_req_alloc(c2dev);
- if (!vq_req)
- return -ENOMEM;
-
- /*
- * Build the WR
- */
- c2_wr_set_id(&wr, CCWR_CR_REJECT);
- wr.hdr.context = (unsigned long) vq_req;
- wr.rnic_handle = c2dev->adapter_handle;
- wr.ep_handle = (u32) (unsigned long) cm_id->provider_data;
-
- /*
- * reference the request struct. dereferenced in the int handler.
- */
- vq_req_get(c2dev, vq_req);
-
- /*
- * Send WR to adapter
- */
- err = vq_send_wr(c2dev, (union c2wr *) & wr);
- if (err) {
- vq_req_put(c2dev, vq_req);
- goto bail0;
- }
-
- /*
- * Wait for reply from adapter
- */
- err = vq_wait_for_reply(c2dev, vq_req);
- if (err)
- goto bail0;
-
- /*
- * Process reply
- */
- reply = (struct c2wr_cr_reject_rep *) (unsigned long)
- vq_req->reply_msg;
- if (!reply) {
- err = -ENOMEM;
- goto bail0;
- }
- err = c2_errno(reply);
- /*
- * free vq stuff
- */
- vq_repbuf_free(c2dev, reply);
-
- bail0:
- vq_req_free(c2dev, vq_req);
- return err;
-}
diff --git a/drivers/staging/rdma/amso1100/c2_cq.c b/drivers/staging/rdma/amso1100/c2_cq.c
deleted file mode 100644
index 1b63185b4ad4..000000000000
--- a/drivers/staging/rdma/amso1100/c2_cq.c
+++ /dev/null
@@ -1,440 +0,0 @@
-/*
- * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
- * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
- * Copyright (c) 2005 Cisco Systems, Inc. All rights reserved.
- * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2004 Voltaire, Inc. All rights reserved.
- * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- */
-#include <linux/gfp.h>
-
-#include "c2.h"
-#include "c2_vq.h"
-#include "c2_status.h"
-
-#define C2_CQ_MSG_SIZE ((sizeof(struct c2wr_ce) + 32-1) & ~(32-1))
-
-static struct c2_cq *c2_cq_get(struct c2_dev *c2dev, int cqn)
-{
- struct c2_cq *cq;
- unsigned long flags;
-
- spin_lock_irqsave(&c2dev->lock, flags);
- cq = c2dev->qptr_array[cqn];
- if (!cq) {
- spin_unlock_irqrestore(&c2dev->lock, flags);
- return NULL;
- }
- atomic_inc(&cq->refcount);
- spin_unlock_irqrestore(&c2dev->lock, flags);
- return cq;
-}
-
-static void c2_cq_put(struct c2_cq *cq)
-{
- if (atomic_dec_and_test(&cq->refcount))
- wake_up(&cq->wait);
-}
-
-void c2_cq_event(struct c2_dev *c2dev, u32 mq_index)
-{
- struct c2_cq *cq;
-
- cq = c2_cq_get(c2dev, mq_index);
- if (!cq) {
- printk("discarding events on destroyed CQN=%d\n", mq_index);
- return;
- }
-
- (*cq->ibcq.comp_handler) (&cq->ibcq, cq->ibcq.cq_context);
- c2_cq_put(cq);
-}
-
-void c2_cq_clean(struct c2_dev *c2dev, struct c2_qp *qp, u32 mq_index)
-{
- struct c2_cq *cq;
- struct c2_mq *q;
-
- cq = c2_cq_get(c2dev, mq_index);
- if (!cq)
- return;
-
- spin_lock_irq(&cq->lock);
- q = &cq->mq;
- if (q && !c2_mq_empty(q)) {
- u16 priv = q->priv;
- struct c2wr_ce *msg;
-
- while (priv != be16_to_cpu(*q->shared)) {
- msg = (struct c2wr_ce *)
- (q->msg_pool.host + priv * q->msg_size);
- if (msg->qp_user_context == (u64) (unsigned long) qp) {
- msg->qp_user_context = (u64) 0;
- }
- priv = (priv + 1) % q->q_size;
- }
- }
- spin_unlock_irq(&cq->lock);
- c2_cq_put(cq);
-}
-
-static inline enum ib_wc_status c2_cqe_status_to_openib(u8 status)
-{
- switch (status) {
- case C2_OK:
- return IB_WC_SUCCESS;
- case CCERR_FLUSHED:
- return IB_WC_WR_FLUSH_ERR;
- case CCERR_BASE_AND_BOUNDS_VIOLATION:
- return IB_WC_LOC_PROT_ERR;
- case CCERR_ACCESS_VIOLATION:
- return IB_WC_LOC_ACCESS_ERR;
- case CCERR_TOTAL_LENGTH_TOO_BIG:
- return IB_WC_LOC_LEN_ERR;
- case CCERR_INVALID_WINDOW:
- return IB_WC_MW_BIND_ERR;
- default:
- return IB_WC_GENERAL_ERR;
- }
-}
-
-
-static inline int c2_poll_one(struct c2_dev *c2dev,
- struct c2_cq *cq, struct ib_wc *entry)
-{
- struct c2wr_ce *ce;
- struct c2_qp *qp;
- int is_recv = 0;
-
- ce = c2_mq_consume(&cq->mq);
- if (!ce) {
- return -EAGAIN;
- }
-
- /*
- * if the qp returned is null then this qp has already
- * been freed and we are unable process the completion.
- * try pulling the next message
- */
- while ((qp =
- (struct c2_qp *) (unsigned long) ce->qp_user_context) == NULL) {
- c2_mq_free(&cq->mq);
- ce = c2_mq_consume(&cq->mq);
- if (!ce)
- return -EAGAIN;
- }
-
- entry->status = c2_cqe_status_to_openib(c2_wr_get_result(ce));
- entry->wr_id = ce->hdr.context;
- entry->qp = &qp->ibqp;
- entry->wc_flags = 0;
- entry->slid = 0;
- entry->sl = 0;
- entry->src_qp = 0;
- entry->dlid_path_bits = 0;
- entry->pkey_index = 0;
-
- switch (c2_wr_get_id(ce)) {
- case C2_WR_TYPE_SEND:
- entry->opcode = IB_WC_SEND;
- break;
- case C2_WR_TYPE_RDMA_WRITE:
- entry->opcode = IB_WC_RDMA_WRITE;
- break;
- case C2_WR_TYPE_RDMA_READ:
- entry->opcode = IB_WC_RDMA_READ;
- break;
- case C2_WR_TYPE_BIND_MW:
- entry->opcode = IB_WC_BIND_MW;
- break;
- case C2_WR_TYPE_RECV:
- entry->byte_len = be32_to_cpu(ce->bytes_rcvd);
- entry->opcode = IB_WC_RECV;
- is_recv = 1;
- break;
- default:
- break;
- }
-
- /* consume the WQEs */
- if (is_recv)
- c2_mq_lconsume(&qp->rq_mq, 1);
- else
- c2_mq_lconsume(&qp->sq_mq,
- be32_to_cpu(c2_wr_get_wqe_count(ce)) + 1);
-
- /* free the message */
- c2_mq_free(&cq->mq);
-
- return 0;
-}
-
-int c2_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry)
-{
- struct c2_dev *c2dev = to_c2dev(ibcq->device);
- struct c2_cq *cq = to_c2cq(ibcq);
- unsigned long flags;
- int npolled, err;
-
- spin_lock_irqsave(&cq->lock, flags);
-
- for (npolled = 0; npolled < num_entries; ++npolled) {
-
- err = c2_poll_one(c2dev, cq, entry + npolled);
- if (err)
- break;
- }
-
- spin_unlock_irqrestore(&cq->lock, flags);
-
- return npolled;
-}
-
-int c2_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags notify_flags)
-{
- struct c2_mq_shared __iomem *shared;
- struct c2_cq *cq;
- unsigned long flags;
- int ret = 0;
-
- cq = to_c2cq(ibcq);
- shared = cq->mq.peer;
-
- if ((notify_flags & IB_CQ_SOLICITED_MASK) == IB_CQ_NEXT_COMP)
- writeb(C2_CQ_NOTIFICATION_TYPE_NEXT, &shared->notification_type);
- else if ((notify_flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED)
- writeb(C2_CQ_NOTIFICATION_TYPE_NEXT_SE, &shared->notification_type);
- else
- return -EINVAL;
-
- writeb(CQ_WAIT_FOR_DMA | CQ_ARMED, &shared->armed);
-
- /*
- * Now read back shared->armed to make the PCI
- * write synchronous. This is necessary for
- * correct cq notification semantics.
- */
- readb(&shared->armed);
-
- if (notify_flags & IB_CQ_REPORT_MISSED_EVENTS) {
- spin_lock_irqsave(&cq->lock, flags);
- ret = !c2_mq_empty(&cq->mq);
- spin_unlock_irqrestore(&cq->lock, flags);
- }
-
- return ret;
-}
-
-static void c2_free_cq_buf(struct c2_dev *c2dev, struct c2_mq *mq)
-{
- dma_free_coherent(&c2dev->pcidev->dev, mq->q_size * mq->msg_size,
- mq->msg_pool.host, dma_unmap_addr(mq, mapping));
-}
-
-static int c2_alloc_cq_buf(struct c2_dev *c2dev, struct c2_mq *mq,
- size_t q_size, size_t msg_size)
-{
- u8 *pool_start;
-
- if (q_size > SIZE_MAX / msg_size)
- return -EINVAL;
-
- pool_start = dma_alloc_coherent(&c2dev->pcidev->dev, q_size * msg_size,
- &mq->host_dma, GFP_KERNEL);
- if (!pool_start)
- return -ENOMEM;
-
- c2_mq_rep_init(mq,
- 0, /* index (currently unknown) */
- q_size,
- msg_size,
- pool_start,
- NULL, /* peer (currently unknown) */
- C2_MQ_HOST_TARGET);
-
- dma_unmap_addr_set(mq, mapping, mq->host_dma);
-
- return 0;
-}
-
-int c2_init_cq(struct c2_dev *c2dev, int entries,
- struct c2_ucontext *ctx, struct c2_cq *cq)
-{
- struct c2wr_cq_create_req wr;
- struct c2wr_cq_create_rep *reply;
- unsigned long peer_pa;
- struct c2_vq_req *vq_req;
- int err;
-
- might_sleep();
-
- cq->ibcq.cqe = entries - 1;
- cq->is_kernel = !ctx;
-
- /* Allocate a shared pointer */
- cq->mq.shared = c2_alloc_mqsp(c2dev, c2dev->kern_mqsp_pool,
- &cq->mq.shared_dma, GFP_KERNEL);
- if (!cq->mq.shared)
- return -ENOMEM;
-
- /* Allocate pages for the message pool */
- err = c2_alloc_cq_buf(c2dev, &cq->mq, entries + 1, C2_CQ_MSG_SIZE);
- if (err)
- goto bail0;
-
- vq_req = vq_req_alloc(c2dev);
- if (!vq_req) {
- err = -ENOMEM;
- goto bail1;
- }
-
- memset(&wr, 0, sizeof(wr));
- c2_wr_set_id(&wr, CCWR_CQ_CREATE);
- wr.hdr.context = (unsigned long) vq_req;
- wr.rnic_handle = c2dev->adapter_handle;
- wr.msg_size = cpu_to_be32(cq->mq.msg_size);
- wr.depth = cpu_to_be32(cq->mq.q_size);
- wr.shared_ht = cpu_to_be64(cq->mq.shared_dma);
- wr.msg_pool = cpu_to_be64(cq->mq.host_dma);
- wr.user_context = (u64) (unsigned long) (cq);
-
- vq_req_get(c2dev, vq_req);
-
- err = vq_send_wr(c2dev, (union c2wr *) & wr);
- if (err) {
- vq_req_put(c2dev, vq_req);
- goto bail2;
- }
-
- err = vq_wait_for_reply(c2dev, vq_req);
- if (err)
- goto bail2;
-
- reply = (struct c2wr_cq_create_rep *) (unsigned long) (vq_req->reply_msg);
- if (!reply) {
- err = -ENOMEM;
- goto bail2;
- }
-
- if ((err = c2_errno(reply)) != 0)
- goto bail3;
-
- cq->adapter_handle = reply->cq_handle;
- cq->mq.index = be32_to_cpu(reply->mq_index);
-
- peer_pa = c2dev->pa + be32_to_cpu(reply->adapter_shared);
- cq->mq.peer = ioremap_nocache(peer_pa, PAGE_SIZE);
- if (!cq->mq.peer) {
- err = -ENOMEM;
- goto bail3;
- }
-
- vq_repbuf_free(c2dev, reply);
- vq_req_free(c2dev, vq_req);
-
- spin_lock_init(&cq->lock);
- atomic_set(&cq->refcount, 1);
- init_waitqueue_head(&cq->wait);
-
- /*
- * Use the MQ index allocated by the adapter to
- * store the CQ in the qptr_array
- */
- cq->cqn = cq->mq.index;
- c2dev->qptr_array[cq->cqn] = cq;
-
- return 0;
-
- bail3:
- vq_repbuf_free(c2dev, reply);
- bail2:
- vq_req_free(c2dev, vq_req);
- bail1:
- c2_free_cq_buf(c2dev, &cq->mq);
- bail0:
- c2_free_mqsp(cq->mq.shared);
-
- return err;
-}
-
-void c2_free_cq(struct c2_dev *c2dev, struct c2_cq *cq)
-{
- int err;
- struct c2_vq_req *vq_req;
- struct c2wr_cq_destroy_req wr;
- struct c2wr_cq_destroy_rep *reply;
-
- might_sleep();
-
- /* Clear CQ from the qptr array */
- spin_lock_irq(&c2dev->lock);
- c2dev->qptr_array[cq->mq.index] = NULL;
- atomic_dec(&cq->refcount);
- spin_unlock_irq(&c2dev->lock);
-
- wait_event(cq->wait, !atomic_read(&cq->refcount));
-
- vq_req = vq_req_alloc(c2dev);
- if (!vq_req) {
- goto bail0;
- }
-
- memset(&wr, 0, sizeof(wr));
- c2_wr_set_id(&wr, CCWR_CQ_DESTROY);
- wr.hdr.context = (unsigned long) vq_req;
- wr.rnic_handle = c2dev->adapter_handle;
- wr.cq_handle = cq->adapter_handle;
-
- vq_req_get(c2dev, vq_req);
-
- err = vq_send_wr(c2dev, (union c2wr *) & wr);
- if (err) {
- vq_req_put(c2dev, vq_req);
- goto bail1;
- }
-
- err = vq_wait_for_reply(c2dev, vq_req);
- if (err)
- goto bail1;
-
- reply = (struct c2wr_cq_destroy_rep *) (unsigned long) (vq_req->reply_msg);
- if (reply)
- vq_repbuf_free(c2dev, reply);
- bail1:
- vq_req_free(c2dev, vq_req);
- bail0:
- if (cq->is_kernel) {
- c2_free_cq_buf(c2dev, &cq->mq);
- }
-
- return;
-}
diff --git a/drivers/staging/rdma/amso1100/c2_intr.c b/drivers/staging/rdma/amso1100/c2_intr.c
deleted file mode 100644
index 3a17d9b36dba..000000000000
--- a/drivers/staging/rdma/amso1100/c2_intr.c
+++ /dev/null
@@ -1,219 +0,0 @@
-/*
- * Copyright (c) 2005 Ammasso, Inc. All rights reserved.
- * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "c2.h"
-#include <rdma/iw_cm.h>
-#include "c2_vq.h"
-
-static void handle_mq(struct c2_dev *c2dev, u32 index);
-static void handle_vq(struct c2_dev *c2dev, u32 mq_index);
-
-/*
- * Handle RNIC interrupts
- */
-void c2_rnic_interrupt(struct c2_dev *c2dev)
-{
- unsigned int mq_index;
-
- while (c2dev->hints_read != be16_to_cpu(*c2dev->hint_count)) {
- mq_index = readl(c2dev->regs + PCI_BAR0_HOST_HINT);
- if (mq_index & 0x80000000) {
- break;
- }
-
- c2dev->hints_read++;
- handle_mq(c2dev, mq_index);
- }
-
-}
-
-/*
- * Top level MQ handler
- */
-static void handle_mq(struct c2_dev *c2dev, u32 mq_index)
-{
- if (c2dev->qptr_array[mq_index] == NULL) {
- pr_debug("handle_mq: stray activity for mq_index=%d\n",
- mq_index);
- return;
- }
-
- switch (mq_index) {
- case (0):
- /*
- * An index of 0 in the activity queue
- * indicates the req vq now has messages
- * available...
- *
- * Wake up any waiters waiting on req VQ
- * message availability.
- */
- wake_up(&c2dev->req_vq_wo);
- break;
- case (1):
- handle_vq(c2dev, mq_index);
- break;
- case (2):
- /* We have to purge the VQ in case there are pending
- * accept reply requests that would result in the
- * generation of an ESTABLISHED event. If we don't
- * generate these first, a CLOSE event could end up
- * being delivered before the ESTABLISHED event.
- */
- handle_vq(c2dev, 1);
-
- c2_ae_event(c2dev, mq_index);
- break;
- default:
- /* There is no event synchronization between CQ events
- * and AE or CM events. In fact, CQE could be
- * delivered for all of the I/O up to and including the
- * FLUSH for a peer disconenct prior to the ESTABLISHED
- * event being delivered to the app. The reason for this
- * is that CM events are delivered on a thread, while AE
- * and CM events are delivered on interrupt context.
- */
- c2_cq_event(c2dev, mq_index);
- break;
- }
-
- return;
-}
-
-/*
- * Handles verbs WR replies.
- */
-static void handle_vq(struct c2_dev *c2dev, u32 mq_index)
-{
- void *adapter_msg, *reply_msg;
- struct c2wr_hdr *host_msg;
- struct c2wr_hdr tmp;
- struct c2_mq *reply_vq;
- struct c2_vq_req *req;
- struct iw_cm_event cm_event;
- int err;
-
- reply_vq = (struct c2_mq *) c2dev->qptr_array[mq_index];
-
- /*
- * get next msg from mq_index into adapter_msg.
- * don't free it yet.
- */
- adapter_msg = c2_mq_consume(reply_vq);
- if (adapter_msg == NULL) {
- return;
- }
-
- host_msg = vq_repbuf_alloc(c2dev);
-
- /*
- * If we can't get a host buffer, then we'll still
- * wakeup the waiter, we just won't give him the msg.
- * It is assumed the waiter will deal with this...
- */
- if (!host_msg) {
- pr_debug("handle_vq: no repbufs!\n");
-
- /*
- * just copy the WR header into a local variable.
- * this allows us to still demux on the context
- */
- host_msg = &tmp;
- memcpy(host_msg, adapter_msg, sizeof(tmp));
- reply_msg = NULL;
- } else {
- memcpy(host_msg, adapter_msg, reply_vq->msg_size);
- reply_msg = host_msg;
- }
-
- /*
- * consume the msg from the MQ
- */
- c2_mq_free(reply_vq);
-
- /*
- * wakeup the waiter.
- */
- req = (struct c2_vq_req *) (unsigned long) host_msg->context;
- if (req == NULL) {
- /*
- * We should never get here, as the adapter should
- * never send us a reply that we're not expecting.
- */
- if (reply_msg != NULL)
- vq_repbuf_free(c2dev, host_msg);
- pr_debug("handle_vq: UNEXPECTEDLY got NULL req\n");
- return;
- }
-
- if (reply_msg)
- err = c2_errno(reply_msg);
- else
- err = -ENOMEM;
-
- if (!err) switch (req->event) {
- case IW_CM_EVENT_ESTABLISHED:
- c2_set_qp_state(req->qp,
- C2_QP_STATE_RTS);
- /*
- * Until ird/ord negotiation via MPAv2 support is added, send
- * max supported values
- */
- cm_event.ird = cm_event.ord = 128;
- case IW_CM_EVENT_CLOSE:
-
- /*
- * Move the QP to RTS if this is
- * the established event
- */
- cm_event.event = req->event;
- cm_event.status = 0;
- cm_event.local_addr = req->cm_id->local_addr;
- cm_event.remote_addr = req->cm_id->remote_addr;
- cm_event.private_data = NULL;
- cm_event.private_data_len = 0;
- req->cm_id->event_handler(req->cm_id, &cm_event);
- break;
- default:
- break;
- }
-
- req->reply_msg = (u64) (unsigned long) (reply_msg);
- atomic_set(&req->reply_ready, 1);
- wake_up(&req->wait_object);
-
- /*
- * If the request was cancelled, then this put will
- * free the vq_req memory...and reply_msg!!!
- */
- vq_req_put(c2dev, req);
-}
diff --git a/drivers/staging/rdma/amso1100/c2_mm.c b/drivers/staging/rdma/amso1100/c2_mm.c
deleted file mode 100644
index 119c4f3d9791..000000000000
--- a/drivers/staging/rdma/amso1100/c2_mm.c
+++ /dev/null
@@ -1,377 +0,0 @@
-/*
- * Copyright (c) 2005 Ammasso, Inc. All rights reserved.
- * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include <linux/slab.h>
-
-#include "c2.h"
-#include "c2_vq.h"
-
-#define PBL_VIRT 1
-#define PBL_PHYS 2
-
-/*
- * Send all the PBL messages to convey the remainder of the PBL
- * Wait for the adapter's reply on the last one.
- * This is indicated by setting the MEM_PBL_COMPLETE in the flags.
- *
- * NOTE: vq_req is _not_ freed by this function. The VQ Host
- * Reply buffer _is_ freed by this function.
- */
-static int
-send_pbl_messages(struct c2_dev *c2dev, __be32 stag_index,
- unsigned long va, u32 pbl_depth,
- struct c2_vq_req *vq_req, int pbl_type)
-{
- u32 pbe_count; /* amt that fits in a PBL msg */
- u32 count; /* amt in this PBL MSG. */
- struct c2wr_nsmr_pbl_req *wr; /* PBL WR ptr */
- struct c2wr_nsmr_pbl_rep *reply; /* reply ptr */
- int err, pbl_virt, pbl_index, i;
-
- switch (pbl_type) {
- case PBL_VIRT:
- pbl_virt = 1;
- break;
- case PBL_PHYS:
- pbl_virt = 0;
- break;
- default:
- return -EINVAL;
- break;
- }
-
- pbe_count = (c2dev->req_vq.msg_size -
- sizeof(struct c2wr_nsmr_pbl_req)) / sizeof(u64);
- wr = kmalloc(c2dev->req_vq.msg_size, GFP_KERNEL);
- if (!wr) {
- return -ENOMEM;
- }
- c2_wr_set_id(wr, CCWR_NSMR_PBL);
-
- /*
- * Only the last PBL message will generate a reply from the verbs,
- * so we set the context to 0 indicating there is no kernel verbs
- * handler blocked awaiting this reply.
- */
- wr->hdr.context = 0;
- wr->rnic_handle = c2dev->adapter_handle;
- wr->stag_index = stag_index; /* already swapped */
- wr->flags = 0;
- pbl_index = 0;
- while (pbl_depth) {
- count = min(pbe_count, pbl_depth);
- wr->addrs_length = cpu_to_be32(count);
-
- /*
- * If this is the last message, then reference the
- * vq request struct cuz we're gonna wait for a reply.
- * also make this PBL msg as the last one.
- */
- if (count == pbl_depth) {
- /*
- * reference the request struct. dereferenced in the
- * int handler.
- */
- vq_req_get(c2dev, vq_req);
- wr->flags = cpu_to_be32(MEM_PBL_COMPLETE);
-
- /*
- * This is the last PBL message.
- * Set the context to our VQ Request Object so we can
- * wait for the reply.
- */
- wr->hdr.context = (unsigned long) vq_req;
- }
-
- /*
- * If pbl_virt is set then va is a virtual address
- * that describes a virtually contiguous memory
- * allocation. The wr needs the start of each virtual page
- * to be converted to the corresponding physical address
- * of the page. If pbl_virt is not set then va is an array
- * of physical addresses and there is no conversion to do.
- * Just fill in the wr with what is in the array.
- */
- for (i = 0; i < count; i++) {
- if (pbl_virt) {
- va += PAGE_SIZE;
- } else {
- wr->paddrs[i] =
- cpu_to_be64(((u64 *)va)[pbl_index + i]);
- }
- }
-
- /*
- * Send WR to adapter
- */
- err = vq_send_wr(c2dev, (union c2wr *) wr);
- if (err) {
- if (count <= pbe_count) {
- vq_req_put(c2dev, vq_req);
- }
- goto bail0;
- }
- pbl_depth -= count;
- pbl_index += count;
- }
-
- /*
- * Now wait for the reply...
- */
- err = vq_wait_for_reply(c2dev, vq_req);
- if (err) {
- goto bail0;
- }
-
- /*
- * Process reply
- */
- reply = (struct c2wr_nsmr_pbl_rep *) (unsigned long) vq_req->reply_msg;
- if (!reply) {
- err = -ENOMEM;
- goto bail0;
- }
-
- err = c2_errno(reply);
-
- vq_repbuf_free(c2dev, reply);
- bail0:
- kfree(wr);
- return err;
-}
-
-#define C2_PBL_MAX_DEPTH 131072
-int
-c2_nsmr_register_phys_kern(struct c2_dev *c2dev, u64 *addr_list,
- int page_size, int pbl_depth, u32 length,
- u32 offset, u64 *va, enum c2_acf acf,
- struct c2_mr *mr)
-{
- struct c2_vq_req *vq_req;
- struct c2wr_nsmr_register_req *wr;
- struct c2wr_nsmr_register_rep *reply;
- u16 flags;
- int i, pbe_count, count;
- int err;
-
- if (!va || !length || !addr_list || !pbl_depth)
- return -EINTR;
-
- /*
- * Verify PBL depth is within rnic max
- */
- if (pbl_depth > C2_PBL_MAX_DEPTH) {
- return -EINTR;
- }
-
- /*
- * allocate verbs request object
- */
- vq_req = vq_req_alloc(c2dev);
- if (!vq_req)
- return -ENOMEM;
-
- wr = kmalloc(c2dev->req_vq.msg_size, GFP_KERNEL);
- if (!wr) {
- err = -ENOMEM;
- goto bail0;
- }
-
- /*
- * build the WR
- */
- c2_wr_set_id(wr, CCWR_NSMR_REGISTER);
- wr->hdr.context = (unsigned long) vq_req;
- wr->rnic_handle = c2dev->adapter_handle;
-
- flags = (acf | MEM_VA_BASED | MEM_REMOTE);
-
- /*
- * compute how many pbes can fit in the message
- */
- pbe_count = (c2dev->req_vq.msg_size -
- sizeof(struct c2wr_nsmr_register_req)) / sizeof(u64);
-
- if (pbl_depth <= pbe_count) {
- flags |= MEM_PBL_COMPLETE;
- }
- wr->flags = cpu_to_be16(flags);
- wr->stag_key = 0; //stag_key;
- wr->va = cpu_to_be64(*va);
- wr->pd_id = mr->pd->pd_id;
- wr->pbe_size = cpu_to_be32(page_size);
- wr->length = cpu_to_be32(length);
- wr->pbl_depth = cpu_to_be32(pbl_depth);
- wr->fbo = cpu_to_be32(offset);
- count = min(pbl_depth, pbe_count);
- wr->addrs_length = cpu_to_be32(count);
-
- /*
- * fill out the PBL for this message
- */
- for (i = 0; i < count; i++) {
- wr->paddrs[i] = cpu_to_be64(addr_list[i]);
- }
-
- /*
- * regerence the request struct
- */
- vq_req_get(c2dev, vq_req);
-
- /*
- * send the WR to the adapter
- */
- err = vq_send_wr(c2dev, (union c2wr *) wr);
- if (err) {
- vq_req_put(c2dev, vq_req);
- goto bail1;
- }
-
- /*
- * wait for reply from adapter
- */
- err = vq_wait_for_reply(c2dev, vq_req);
- if (err) {
- goto bail1;
- }
-
- /*
- * process reply
- */
- reply =
- (struct c2wr_nsmr_register_rep *) (unsigned long) (vq_req->reply_msg);
- if (!reply) {
- err = -ENOMEM;
- goto bail1;
- }
- if ((err = c2_errno(reply))) {
- goto bail2;
- }
- //*p_pb_entries = be32_to_cpu(reply->pbl_depth);
- mr->ibmr.lkey = mr->ibmr.rkey = be32_to_cpu(reply->stag_index);
- vq_repbuf_free(c2dev, reply);
-
- /*
- * if there are still more PBEs we need to send them to
- * the adapter and wait for a reply on the final one.
- * reuse vq_req for this purpose.
- */
- pbl_depth -= count;
- if (pbl_depth) {
-
- vq_req->reply_msg = (unsigned long) NULL;
- atomic_set(&vq_req->reply_ready, 0);
- err = send_pbl_messages(c2dev,
- cpu_to_be32(mr->ibmr.lkey),
- (unsigned long) &addr_list[i],
- pbl_depth, vq_req, PBL_PHYS);
- if (err) {
- goto bail1;
- }
- }
-
- vq_req_free(c2dev, vq_req);
- kfree(wr);
-
- return err;
-
- bail2:
- vq_repbuf_free(c2dev, reply);
- bail1:
- kfree(wr);
- bail0:
- vq_req_free(c2dev, vq_req);
- return err;
-}
-
-int c2_stag_dealloc(struct c2_dev *c2dev, u32 stag_index)
-{
- struct c2_vq_req *vq_req; /* verbs request object */
- struct c2wr_stag_dealloc_req wr; /* work request */
- struct c2wr_stag_dealloc_rep *reply; /* WR reply */
- int err;
-
-
- /*
- * allocate verbs request object
- */
- vq_req = vq_req_alloc(c2dev);
- if (!vq_req) {
- return -ENOMEM;
- }
-
- /*
- * Build the WR
- */
- c2_wr_set_id(&wr, CCWR_STAG_DEALLOC);
- wr.hdr.context = (u64) (unsigned long) vq_req;
- wr.rnic_handle = c2dev->adapter_handle;
- wr.stag_index = cpu_to_be32(stag_index);
-
- /*
- * reference the request struct. dereferenced in the int handler.
- */
- vq_req_get(c2dev, vq_req);
-
- /*
- * Send WR to adapter
- */
- err = vq_send_wr(c2dev, (union c2wr *) & wr);
- if (err) {
- vq_req_put(c2dev, vq_req);
- goto bail0;
- }
-
- /*
- * Wait for reply from adapter
- */
- err = vq_wait_for_reply(c2dev, vq_req);
- if (err) {
- goto bail0;
- }
-
- /*
- * Process reply
- */
- reply = (struct c2wr_stag_dealloc_rep *) (unsigned long) vq_req->reply_msg;
- if (!reply) {
- err = -ENOMEM;
- goto bail0;
- }
-
- err = c2_errno(reply);
-
- vq_repbuf_free(c2dev, reply);
- bail0:
- vq_req_free(c2dev, vq_req);
- return err;
-}
diff --git a/drivers/staging/rdma/amso1100/c2_mq.c b/drivers/staging/rdma/amso1100/c2_mq.c
deleted file mode 100644
index 7827fb8bdb10..000000000000
--- a/drivers/staging/rdma/amso1100/c2_mq.c
+++ /dev/null
@@ -1,175 +0,0 @@
-/*
- * Copyright (c) 2005 Ammasso, Inc. All rights reserved.
- * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "c2.h"
-#include "c2_mq.h"
-
-void *c2_mq_alloc(struct c2_mq *q)
-{
- BUG_ON(q->magic != C2_MQ_MAGIC);
- BUG_ON(q->type != C2_MQ_ADAPTER_TARGET);
-
- if (c2_mq_full(q)) {
- return NULL;
- } else {
-#ifdef DEBUG
- struct c2wr_hdr *m =
- (struct c2wr_hdr *) (q->msg_pool.host + q->priv * q->msg_size);
-#ifdef CCMSGMAGIC
- BUG_ON(m->magic != be32_to_cpu(~CCWR_MAGIC));
- m->magic = cpu_to_be32(CCWR_MAGIC);
-#endif
- return m;
-#else
- return q->msg_pool.host + q->priv * q->msg_size;
-#endif
- }
-}
-
-void c2_mq_produce(struct c2_mq *q)
-{
- BUG_ON(q->magic != C2_MQ_MAGIC);
- BUG_ON(q->type != C2_MQ_ADAPTER_TARGET);
-
- if (!c2_mq_full(q)) {
- q->priv = (q->priv + 1) % q->q_size;
- q->hint_count++;
- /* Update peer's offset. */
- __raw_writew((__force u16) cpu_to_be16(q->priv), &q->peer->shared);
- }
-}
-
-void *c2_mq_consume(struct c2_mq *q)
-{
- BUG_ON(q->magic != C2_MQ_MAGIC);
- BUG_ON(q->type != C2_MQ_HOST_TARGET);
-
- if (c2_mq_empty(q)) {
- return NULL;
- } else {
-#ifdef DEBUG
- struct c2wr_hdr *m = (struct c2wr_hdr *)
- (q->msg_pool.host + q->priv * q->msg_size);
-#ifdef CCMSGMAGIC
- BUG_ON(m->magic != be32_to_cpu(CCWR_MAGIC));
-#endif
- return m;
-#else
- return q->msg_pool.host + q->priv * q->msg_size;
-#endif
- }
-}
-
-void c2_mq_free(struct c2_mq *q)
-{
- BUG_ON(q->magic != C2_MQ_MAGIC);
- BUG_ON(q->type != C2_MQ_HOST_TARGET);
-
- if (!c2_mq_empty(q)) {
-
-#ifdef CCMSGMAGIC
- {
- struct c2wr_hdr __iomem *m = (struct c2wr_hdr __iomem *)
- (q->msg_pool.adapter + q->priv * q->msg_size);
- __raw_writel(cpu_to_be32(~CCWR_MAGIC), &m->magic);
- }
-#endif
- q->priv = (q->priv + 1) % q->q_size;
- /* Update peer's offset. */
- __raw_writew((__force u16) cpu_to_be16(q->priv), &q->peer->shared);
- }
-}
-
-
-void c2_mq_lconsume(struct c2_mq *q, u32 wqe_count)
-{
- BUG_ON(q->magic != C2_MQ_MAGIC);
- BUG_ON(q->type != C2_MQ_ADAPTER_TARGET);
-
- while (wqe_count--) {
- BUG_ON(c2_mq_empty(q));
- *q->shared = cpu_to_be16((be16_to_cpu(*q->shared)+1) % q->q_size);
- }
-}
-
-#if 0
-u32 c2_mq_count(struct c2_mq *q)
-{
- s32 count;
-
- if (q->type == C2_MQ_HOST_TARGET)
- count = be16_to_cpu(*q->shared) - q->priv;
- else
- count = q->priv - be16_to_cpu(*q->shared);
-
- if (count < 0)
- count += q->q_size;
-
- return (u32) count;
-}
-#endif /* 0 */
-
-void c2_mq_req_init(struct c2_mq *q, u32 index, u32 q_size, u32 msg_size,
- u8 __iomem *pool_start, u16 __iomem *peer, u32 type)
-{
- BUG_ON(!q->shared);
-
- /* This code assumes the byte swapping has already been done! */
- q->index = index;
- q->q_size = q_size;
- q->msg_size = msg_size;
- q->msg_pool.adapter = pool_start;
- q->peer = (struct c2_mq_shared __iomem *) peer;
- q->magic = C2_MQ_MAGIC;
- q->type = type;
- q->priv = 0;
- q->hint_count = 0;
- return;
-}
-
-void c2_mq_rep_init(struct c2_mq *q, u32 index, u32 q_size, u32 msg_size,
- u8 *pool_start, u16 __iomem *peer, u32 type)
-{
- BUG_ON(!q->shared);
-
- /* This code assumes the byte swapping has already been done! */
- q->index = index;
- q->q_size = q_size;
- q->msg_size = msg_size;
- q->msg_pool.host = pool_start;
- q->peer = (struct c2_mq_shared __iomem *) peer;
- q->magic = C2_MQ_MAGIC;
- q->type = type;
- q->priv = 0;
- q->hint_count = 0;
- return;
-}
diff --git a/drivers/staging/rdma/amso1100/c2_mq.h b/drivers/staging/rdma/amso1100/c2_mq.h
deleted file mode 100644
index fc1b9a7cec4b..000000000000
--- a/drivers/staging/rdma/amso1100/c2_mq.h
+++ /dev/null
@@ -1,106 +0,0 @@
-/*
- * Copyright (c) 2005 Ammasso, Inc. All rights reserved.
- * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#ifndef _C2_MQ_H_
-#define _C2_MQ_H_
-#include <linux/kernel.h>
-#include <linux/dma-mapping.h>
-#include "c2_wr.h"
-
-enum c2_shared_regs {
-
- C2_SHARED_ARMED = 0x10,
- C2_SHARED_NOTIFY = 0x18,
- C2_SHARED_SHARED = 0x40,
-};
-
-struct c2_mq_shared {
- u16 unused1;
- u8 armed;
- u8 notification_type;
- u32 unused2;
- u16 shared;
- /* Pad to 64 bytes. */
- u8 pad[64 - sizeof(u16) - 2 * sizeof(u8) - sizeof(u32) - sizeof(u16)];
-};
-
-enum c2_mq_type {
- C2_MQ_HOST_TARGET = 1,
- C2_MQ_ADAPTER_TARGET = 2,
-};
-
-/*
- * c2_mq_t is for kernel-mode MQs like the VQs Cand the AEQ.
- * c2_user_mq_t (which is the same format) is for user-mode MQs...
- */
-#define C2_MQ_MAGIC 0x4d512020 /* 'MQ ' */
-struct c2_mq {
- u32 magic;
- union {
- u8 *host;
- u8 __iomem *adapter;
- } msg_pool;
- dma_addr_t host_dma;
- DEFINE_DMA_UNMAP_ADDR(mapping);
- u16 hint_count;
- u16 priv;
- struct c2_mq_shared __iomem *peer;
- __be16 *shared;
- dma_addr_t shared_dma;
- u32 q_size;
- u32 msg_size;
- u32 index;
- enum c2_mq_type type;
-};
-
-static __inline__ int c2_mq_empty(struct c2_mq *q)
-{
- return q->priv == be16_to_cpu(*q->shared);
-}
-
-static __inline__ int c2_mq_full(struct c2_mq *q)
-{
- return q->priv == (be16_to_cpu(*q->shared) + q->q_size - 1) % q->q_size;
-}
-
-extern void c2_mq_lconsume(struct c2_mq *q, u32 wqe_count);
-extern void *c2_mq_alloc(struct c2_mq *q);
-extern void c2_mq_produce(struct c2_mq *q);
-extern void *c2_mq_consume(struct c2_mq *q);
-extern void c2_mq_free(struct c2_mq *q);
-extern void c2_mq_req_init(struct c2_mq *q, u32 index, u32 q_size, u32 msg_size,
- u8 __iomem *pool_start, u16 __iomem *peer, u32 type);
-extern void c2_mq_rep_init(struct c2_mq *q, u32 index, u32 q_size, u32 msg_size,
- u8 *pool_start, u16 __iomem *peer, u32 type);
-
-#endif /* _C2_MQ_H_ */
diff --git a/drivers/staging/rdma/amso1100/c2_pd.c b/drivers/staging/rdma/amso1100/c2_pd.c
deleted file mode 100644
index f3e81dc357bb..000000000000
--- a/drivers/staging/rdma/amso1100/c2_pd.c
+++ /dev/null
@@ -1,90 +0,0 @@
-/*
- * Copyright (c) 2004 Topspin Communications. All rights reserved.
- * Copyright (c) 2005 Cisco Systems. All rights reserved.
- * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <linux/init.h>
-#include <linux/slab.h>
-#include <linux/errno.h>
-
-#include "c2.h"
-#include "c2_provider.h"
-
-int c2_pd_alloc(struct c2_dev *c2dev, int privileged, struct c2_pd *pd)
-{
- u32 obj;
- int ret = 0;
-
- spin_lock(&c2dev->pd_table.lock);
- obj = find_next_zero_bit(c2dev->pd_table.table, c2dev->pd_table.max,
- c2dev->pd_table.last);
- if (obj >= c2dev->pd_table.max)
- obj = find_first_zero_bit(c2dev->pd_table.table,
- c2dev->pd_table.max);
- if (obj < c2dev->pd_table.max) {
- pd->pd_id = obj;
- __set_bit(obj, c2dev->pd_table.table);
- c2dev->pd_table.last = obj+1;
- if (c2dev->pd_table.last >= c2dev->pd_table.max)
- c2dev->pd_table.last = 0;
- } else
- ret = -ENOMEM;
- spin_unlock(&c2dev->pd_table.lock);
- return ret;
-}
-
-void c2_pd_free(struct c2_dev *c2dev, struct c2_pd *pd)
-{
- spin_lock(&c2dev->pd_table.lock);
- __clear_bit(pd->pd_id, c2dev->pd_table.table);
- spin_unlock(&c2dev->pd_table.lock);
-}
-
-int c2_init_pd_table(struct c2_dev *c2dev)
-{
-
- c2dev->pd_table.last = 0;
- c2dev->pd_table.max = c2dev->props.max_pd;
- spin_lock_init(&c2dev->pd_table.lock);
- c2dev->pd_table.table = kmalloc(BITS_TO_LONGS(c2dev->props.max_pd) *
- sizeof(long), GFP_KERNEL);
- if (!c2dev->pd_table.table)
- return -ENOMEM;
- bitmap_zero(c2dev->pd_table.table, c2dev->props.max_pd);
- return 0;
-}
-
-void c2_cleanup_pd_table(struct c2_dev *c2dev)
-{
- kfree(c2dev->pd_table.table);
-}
diff --git a/drivers/staging/rdma/amso1100/c2_provider.c b/drivers/staging/rdma/amso1100/c2_provider.c
deleted file mode 100644
index 25c3f0085563..000000000000
--- a/drivers/staging/rdma/amso1100/c2_provider.c
+++ /dev/null
@@ -1,912 +0,0 @@
-/*
- * Copyright (c) 2005 Ammasso, Inc. All rights reserved.
- * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- */
-
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include <linux/pci.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/inetdevice.h>
-#include <linux/delay.h>
-#include <linux/ethtool.h>
-#include <linux/mii.h>
-#include <linux/if_vlan.h>
-#include <linux/crc32.h>
-#include <linux/in.h>
-#include <linux/ip.h>
-#include <linux/tcp.h>
-#include <linux/init.h>
-#include <linux/dma-mapping.h>
-#include <linux/if_arp.h>
-#include <linux/vmalloc.h>
-#include <linux/slab.h>
-
-#include <asm/io.h>
-#include <asm/irq.h>
-#include <asm/byteorder.h>
-
-#include <rdma/ib_smi.h>
-#include <rdma/ib_umem.h>
-#include <rdma/ib_user_verbs.h>
-#include "c2.h"
-#include "c2_provider.h"
-#include "c2_user.h"
-
-static int c2_query_device(struct ib_device *ibdev, struct ib_device_attr *props,
- struct ib_udata *uhw)
-{
- struct c2_dev *c2dev = to_c2dev(ibdev);
-
- pr_debug("%s:%u\n", __func__, __LINE__);
-
- if (uhw->inlen || uhw->outlen)
- return -EINVAL;
-
- *props = c2dev->props;
- return 0;
-}
-
-static int c2_query_port(struct ib_device *ibdev,
- u8 port, struct ib_port_attr *props)
-{
- pr_debug("%s:%u\n", __func__, __LINE__);
-
- props->max_mtu = IB_MTU_4096;
- props->lid = 0;
- props->lmc = 0;
- props->sm_lid = 0;
- props->sm_sl = 0;
- props->state = IB_PORT_ACTIVE;
- props->phys_state = 0;
- props->port_cap_flags =
- IB_PORT_CM_SUP |
- IB_PORT_REINIT_SUP |
- IB_PORT_VENDOR_CLASS_SUP | IB_PORT_BOOT_MGMT_SUP;
- props->gid_tbl_len = 1;
- props->pkey_tbl_len = 1;
- props->qkey_viol_cntr = 0;
- props->active_width = 1;
- props->active_speed = IB_SPEED_SDR;
-
- return 0;
-}
-
-static int c2_query_pkey(struct ib_device *ibdev,
- u8 port, u16 index, u16 * pkey)
-{
- pr_debug("%s:%u\n", __func__, __LINE__);
- *pkey = 0;
- return 0;
-}
-
-static int c2_query_gid(struct ib_device *ibdev, u8 port,
- int index, union ib_gid *gid)
-{
- struct c2_dev *c2dev = to_c2dev(ibdev);
-
- pr_debug("%s:%u\n", __func__, __LINE__);
- memset(&(gid->raw[0]), 0, sizeof(gid->raw));
- memcpy(&(gid->raw[0]), c2dev->pseudo_netdev->dev_addr, 6);
-
- return 0;
-}
-
-/* Allocate the user context data structure. This keeps track
- * of all objects associated with a particular user-mode client.
- */
-static struct ib_ucontext *c2_alloc_ucontext(struct ib_device *ibdev,
- struct ib_udata *udata)
-{
- struct c2_ucontext *context;
-
- pr_debug("%s:%u\n", __func__, __LINE__);
- context = kmalloc(sizeof(*context), GFP_KERNEL);
- if (!context)
- return ERR_PTR(-ENOMEM);
-
- return &context->ibucontext;
-}
-
-static int c2_dealloc_ucontext(struct ib_ucontext *context)
-{
- pr_debug("%s:%u\n", __func__, __LINE__);
- kfree(context);
- return 0;
-}
-
-static int c2_mmap_uar(struct ib_ucontext *context, struct vm_area_struct *vma)
-{
- pr_debug("%s:%u\n", __func__, __LINE__);
- return -ENOSYS;
-}
-
-static struct ib_pd *c2_alloc_pd(struct ib_device *ibdev,
- struct ib_ucontext *context,
- struct ib_udata *udata)
-{
- struct c2_pd *pd;
- int err;
-
- pr_debug("%s:%u\n", __func__, __LINE__);
-
- pd = kmalloc(sizeof(*pd), GFP_KERNEL);
- if (!pd)
- return ERR_PTR(-ENOMEM);
-
- err = c2_pd_alloc(to_c2dev(ibdev), !context, pd);
- if (err) {
- kfree(pd);
- return ERR_PTR(err);
- }
-
- if (context) {
- if (ib_copy_to_udata(udata, &pd->pd_id, sizeof(__u32))) {
- c2_pd_free(to_c2dev(ibdev), pd);
- kfree(pd);
- return ERR_PTR(-EFAULT);
- }
- }
-
- return &pd->ibpd;
-}
-
-static int c2_dealloc_pd(struct ib_pd *pd)
-{
- pr_debug("%s:%u\n", __func__, __LINE__);
- c2_pd_free(to_c2dev(pd->device), to_c2pd(pd));
- kfree(pd);
-
- return 0;
-}
-
-static struct ib_ah *c2_ah_create(struct ib_pd *pd, struct ib_ah_attr *ah_attr)
-{
- pr_debug("%s:%u\n", __func__, __LINE__);
- return ERR_PTR(-ENOSYS);
-}
-
-static int c2_ah_destroy(struct ib_ah *ah)
-{
- pr_debug("%s:%u\n", __func__, __LINE__);
- return -ENOSYS;
-}
-
-static void c2_add_ref(struct ib_qp *ibqp)
-{
- struct c2_qp *qp;
- BUG_ON(!ibqp);
- qp = to_c2qp(ibqp);
- atomic_inc(&qp->refcount);
-}
-
-static void c2_rem_ref(struct ib_qp *ibqp)
-{
- struct c2_qp *qp;
- BUG_ON(!ibqp);
- qp = to_c2qp(ibqp);
- if (atomic_dec_and_test(&qp->refcount))
- wake_up(&qp->wait);
-}
-
-struct ib_qp *c2_get_qp(struct ib_device *device, int qpn)
-{
- struct c2_dev* c2dev = to_c2dev(device);
- struct c2_qp *qp;
-
- qp = c2_find_qpn(c2dev, qpn);
- pr_debug("%s Returning QP=%p for QPN=%d, device=%p, refcount=%d\n",
- __func__, qp, qpn, device,
- (qp?atomic_read(&qp->refcount):0));
-
- return (qp?&qp->ibqp:NULL);
-}
-
-static struct ib_qp *c2_create_qp(struct ib_pd *pd,
- struct ib_qp_init_attr *init_attr,
- struct ib_udata *udata)
-{
- struct c2_qp *qp;
- int err;
-
- pr_debug("%s:%u\n", __func__, __LINE__);
-
- if (init_attr->create_flags)
- return ERR_PTR(-EINVAL);
-
- switch (init_attr->qp_type) {
- case IB_QPT_RC:
- qp = kzalloc(sizeof(*qp), GFP_KERNEL);
- if (!qp) {
- pr_debug("%s: Unable to allocate QP\n", __func__);
- return ERR_PTR(-ENOMEM);
- }
- spin_lock_init(&qp->lock);
- if (pd->uobject) {
- /* userspace specific */
- }
-
- err = c2_alloc_qp(to_c2dev(pd->device),
- to_c2pd(pd), init_attr, qp);
-
- if (err && pd->uobject) {
- /* userspace specific */
- }
-
- break;
- default:
- pr_debug("%s: Invalid QP type: %d\n", __func__,
- init_attr->qp_type);
- return ERR_PTR(-EINVAL);
- }
-
- if (err) {
- kfree(qp);
- return ERR_PTR(err);
- }
-
- return &qp->ibqp;
-}
-
-static int c2_destroy_qp(struct ib_qp *ib_qp)
-{
- struct c2_qp *qp = to_c2qp(ib_qp);
-
- pr_debug("%s:%u qp=%p,qp->state=%d\n",
- __func__, __LINE__, ib_qp, qp->state);
- c2_free_qp(to_c2dev(ib_qp->device), qp);
- kfree(qp);
- return 0;
-}
-
-static struct ib_cq *c2_create_cq(struct ib_device *ibdev,
- const struct ib_cq_init_attr *attr,
- struct ib_ucontext *context,
- struct ib_udata *udata)
-{
- int entries = attr->cqe;
- struct c2_cq *cq;
- int err;
-
- if (attr->flags)
- return ERR_PTR(-EINVAL);
-
- cq = kmalloc(sizeof(*cq), GFP_KERNEL);
- if (!cq) {
- pr_debug("%s: Unable to allocate CQ\n", __func__);
- return ERR_PTR(-ENOMEM);
- }
-
- err = c2_init_cq(to_c2dev(ibdev), entries, NULL, cq);
- if (err) {
- pr_debug("%s: error initializing CQ\n", __func__);
- kfree(cq);
- return ERR_PTR(err);
- }
-
- return &cq->ibcq;
-}
-
-static int c2_destroy_cq(struct ib_cq *ib_cq)
-{
- struct c2_cq *cq = to_c2cq(ib_cq);
-
- pr_debug("%s:%u\n", __func__, __LINE__);
-
- c2_free_cq(to_c2dev(ib_cq->device), cq);
- kfree(cq);
-
- return 0;
-}
-
-static inline u32 c2_convert_access(int acc)
-{
- return (acc & IB_ACCESS_REMOTE_WRITE ? C2_ACF_REMOTE_WRITE : 0) |
- (acc & IB_ACCESS_REMOTE_READ ? C2_ACF_REMOTE_READ : 0) |
- (acc & IB_ACCESS_LOCAL_WRITE ? C2_ACF_LOCAL_WRITE : 0) |
- C2_ACF_LOCAL_READ | C2_ACF_WINDOW_BIND;
-}
-
-static struct ib_mr *c2_reg_phys_mr(struct ib_pd *ib_pd,
- struct ib_phys_buf *buffer_list,
- int num_phys_buf, int acc, u64 * iova_start)
-{
- struct c2_mr *mr;
- u64 *page_list;
- u32 total_len;
- int err, i, j, k, page_shift, pbl_depth;
-
- pbl_depth = 0;
- total_len = 0;
-
- page_shift = PAGE_SHIFT;
- /*
- * If there is only 1 buffer we assume this could
- * be a map of all phy mem...use a 32k page_shift.
- */
- if (num_phys_buf == 1)
- page_shift += 3;
-
- for (i = 0; i < num_phys_buf; i++) {
-
- if (buffer_list[i].addr & ~PAGE_MASK) {
- pr_debug("Unaligned Memory Buffer: 0x%x\n",
- (unsigned int) buffer_list[i].addr);
- return ERR_PTR(-EINVAL);
- }
-
- if (!buffer_list[i].size) {
- pr_debug("Invalid Buffer Size\n");
- return ERR_PTR(-EINVAL);
- }
-
- total_len += buffer_list[i].size;
- pbl_depth += ALIGN(buffer_list[i].size,
- (1 << page_shift)) >> page_shift;
- }
-
- page_list = vmalloc(sizeof(u64) * pbl_depth);
- if (!page_list) {
- pr_debug("couldn't vmalloc page_list of size %zd\n",
- (sizeof(u64) * pbl_depth));
- return ERR_PTR(-ENOMEM);
- }
-
- for (i = 0, j = 0; i < num_phys_buf; i++) {
-
- int naddrs;
-
- naddrs = ALIGN(buffer_list[i].size,
- (1 << page_shift)) >> page_shift;
- for (k = 0; k < naddrs; k++)
- page_list[j++] = (buffer_list[i].addr +
- (k << page_shift));
- }
-
- mr = kmalloc(sizeof(*mr), GFP_KERNEL);
- if (!mr) {
- vfree(page_list);
- return ERR_PTR(-ENOMEM);
- }
-
- mr->pd = to_c2pd(ib_pd);
- mr->umem = NULL;
- pr_debug("%s - page shift %d, pbl_depth %d, total_len %u, "
- "*iova_start %llx, first pa %llx, last pa %llx\n",
- __func__, page_shift, pbl_depth, total_len,
- (unsigned long long) *iova_start,
- (unsigned long long) page_list[0],
- (unsigned long long) page_list[pbl_depth-1]);
- err = c2_nsmr_register_phys_kern(to_c2dev(ib_pd->device), page_list,
- (1 << page_shift), pbl_depth,
- total_len, 0, iova_start,
- c2_convert_access(acc), mr);
- vfree(page_list);
- if (err) {
- kfree(mr);
- return ERR_PTR(err);
- }
-
- return &mr->ibmr;
-}
-
-static struct ib_mr *c2_get_dma_mr(struct ib_pd *pd, int acc)
-{
- struct ib_phys_buf bl;
- u64 kva = 0;
-
- pr_debug("%s:%u\n", __func__, __LINE__);
-
- /* AMSO1100 limit */
- bl.size = 0xffffffff;
- bl.addr = 0;
- return c2_reg_phys_mr(pd, &bl, 1, acc, &kva);
-}
-
-static struct ib_mr *c2_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
- u64 virt, int acc, struct ib_udata *udata)
-{
- u64 *pages;
- u64 kva = 0;
- int shift, n, len;
- int i, k, entry;
- int err = 0;
- struct scatterlist *sg;
- struct c2_pd *c2pd = to_c2pd(pd);
- struct c2_mr *c2mr;
-
- pr_debug("%s:%u\n", __func__, __LINE__);
-
- c2mr = kmalloc(sizeof(*c2mr), GFP_KERNEL);
- if (!c2mr)
- return ERR_PTR(-ENOMEM);
- c2mr->pd = c2pd;
-
- c2mr->umem = ib_umem_get(pd->uobject->context, start, length, acc, 0);
- if (IS_ERR(c2mr->umem)) {
- err = PTR_ERR(c2mr->umem);
- kfree(c2mr);
- return ERR_PTR(err);
- }
-
- shift = ffs(c2mr->umem->page_size) - 1;
- n = c2mr->umem->nmap;
-
- pages = kmalloc(n * sizeof(u64), GFP_KERNEL);
- if (!pages) {
- err = -ENOMEM;
- goto err;
- }
-
- i = 0;
- for_each_sg(c2mr->umem->sg_head.sgl, sg, c2mr->umem->nmap, entry) {
- len = sg_dma_len(sg) >> shift;
- for (k = 0; k < len; ++k) {
- pages[i++] =
- sg_dma_address(sg) +
- (c2mr->umem->page_size * k);
- }
- }
-
- kva = virt;
- err = c2_nsmr_register_phys_kern(to_c2dev(pd->device),
- pages,
- c2mr->umem->page_size,
- i,
- length,
- ib_umem_offset(c2mr->umem),
- &kva,
- c2_convert_access(acc),
- c2mr);
- kfree(pages);
- if (err)
- goto err;
- return &c2mr->ibmr;
-
-err:
- ib_umem_release(c2mr->umem);
- kfree(c2mr);
- return ERR_PTR(err);
-}
-
-static int c2_dereg_mr(struct ib_mr *ib_mr)
-{
- struct c2_mr *mr = to_c2mr(ib_mr);
- int err;
-
- pr_debug("%s:%u\n", __func__, __LINE__);
-
- err = c2_stag_dealloc(to_c2dev(ib_mr->device), ib_mr->lkey);
- if (err)
- pr_debug("c2_stag_dealloc failed: %d\n", err);
- else {
- if (mr->umem)
- ib_umem_release(mr->umem);
- kfree(mr);
- }
-
- return err;
-}
-
-static ssize_t show_rev(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- struct c2_dev *c2dev = container_of(dev, struct c2_dev, ibdev.dev);
- pr_debug("%s:%u\n", __func__, __LINE__);
- return sprintf(buf, "%x\n", c2dev->props.hw_ver);
-}
-
-static ssize_t show_fw_ver(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- struct c2_dev *c2dev = container_of(dev, struct c2_dev, ibdev.dev);
- pr_debug("%s:%u\n", __func__, __LINE__);
- return sprintf(buf, "%x.%x.%x\n",
- (int) (c2dev->props.fw_ver >> 32),
- (int) (c2dev->props.fw_ver >> 16) & 0xffff,
- (int) (c2dev->props.fw_ver & 0xffff));
-}
-
-static ssize_t show_hca(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- pr_debug("%s:%u\n", __func__, __LINE__);
- return sprintf(buf, "AMSO1100\n");
-}
-
-static ssize_t show_board(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- pr_debug("%s:%u\n", __func__, __LINE__);
- return sprintf(buf, "%.*s\n", 32, "AMSO1100 Board ID");
-}
-
-static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL);
-static DEVICE_ATTR(fw_ver, S_IRUGO, show_fw_ver, NULL);
-static DEVICE_ATTR(hca_type, S_IRUGO, show_hca, NULL);
-static DEVICE_ATTR(board_id, S_IRUGO, show_board, NULL);
-
-static struct device_attribute *c2_dev_attributes[] = {
- &dev_attr_hw_rev,
- &dev_attr_fw_ver,
- &dev_attr_hca_type,
- &dev_attr_board_id
-};
-
-static int c2_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
- int attr_mask, struct ib_udata *udata)
-{
- int err;
-
- err =
- c2_qp_modify(to_c2dev(ibqp->device), to_c2qp(ibqp), attr,
- attr_mask);
-
- return err;
-}
-
-static int c2_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
-{
- pr_debug("%s:%u\n", __func__, __LINE__);
- return -ENOSYS;
-}
-
-static int c2_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
-{
- pr_debug("%s:%u\n", __func__, __LINE__);
- return -ENOSYS;
-}
-
-static int c2_process_mad(struct ib_device *ibdev,
- int mad_flags,
- u8 port_num,
- const struct ib_wc *in_wc,
- const struct ib_grh *in_grh,
- const struct ib_mad_hdr *in_mad,
- size_t in_mad_size,
- struct ib_mad_hdr *out_mad,
- size_t *out_mad_size,
- u16 *out_mad_pkey_index)
-{
- pr_debug("%s:%u\n", __func__, __LINE__);
- return -ENOSYS;
-}
-
-static int c2_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *iw_param)
-{
- pr_debug("%s:%u\n", __func__, __LINE__);
-
- /* Request a connection */
- return c2_llp_connect(cm_id, iw_param);
-}
-
-static int c2_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *iw_param)
-{
- pr_debug("%s:%u\n", __func__, __LINE__);
-
- /* Accept the new connection */
- return c2_llp_accept(cm_id, iw_param);
-}
-
-static int c2_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
-{
- int err;
-
- pr_debug("%s:%u\n", __func__, __LINE__);
-
- err = c2_llp_reject(cm_id, pdata, pdata_len);
- return err;
-}
-
-static int c2_service_create(struct iw_cm_id *cm_id, int backlog)
-{
- int err;
-
- pr_debug("%s:%u\n", __func__, __LINE__);
- err = c2_llp_service_create(cm_id, backlog);
- pr_debug("%s:%u err=%d\n",
- __func__, __LINE__,
- err);
- return err;
-}
-
-static int c2_service_destroy(struct iw_cm_id *cm_id)
-{
- int err;
- pr_debug("%s:%u\n", __func__, __LINE__);
-
- err = c2_llp_service_destroy(cm_id);
-
- return err;
-}
-
-static int c2_pseudo_up(struct net_device *netdev)
-{
- struct in_device *ind;
- struct c2_dev *c2dev = netdev->ml_priv;
-
- ind = in_dev_get(netdev);
- if (!ind)
- return 0;
-
- pr_debug("adding...\n");
- for_ifa(ind) {
-#ifdef DEBUG
- u8 *ip = (u8 *) & ifa->ifa_address;
-
- pr_debug("%s: %d.%d.%d.%d\n",
- ifa->ifa_label, ip[0], ip[1], ip[2], ip[3]);
-#endif
- c2_add_addr(c2dev, ifa->ifa_address, ifa->ifa_mask);
- }
- endfor_ifa(ind);
- in_dev_put(ind);
-
- return 0;
-}
-
-static int c2_pseudo_down(struct net_device *netdev)
-{
- struct in_device *ind;
- struct c2_dev *c2dev = netdev->ml_priv;
-
- ind = in_dev_get(netdev);
- if (!ind)
- return 0;
-
- pr_debug("deleting...\n");
- for_ifa(ind) {
-#ifdef DEBUG
- u8 *ip = (u8 *) & ifa->ifa_address;
-
- pr_debug("%s: %d.%d.%d.%d\n",
- ifa->ifa_label, ip[0], ip[1], ip[2], ip[3]);
-#endif
- c2_del_addr(c2dev, ifa->ifa_address, ifa->ifa_mask);
- }
- endfor_ifa(ind);
- in_dev_put(ind);
-
- return 0;
-}
-
-static int c2_pseudo_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
-{
- kfree_skb(skb);
- return NETDEV_TX_OK;
-}
-
-static int c2_pseudo_change_mtu(struct net_device *netdev, int new_mtu)
-{
- if (new_mtu < ETH_ZLEN || new_mtu > ETH_JUMBO_MTU)
- return -EINVAL;
-
- netdev->mtu = new_mtu;
-
- /* TODO: Tell rnic about new rmda interface mtu */
- return 0;
-}
-
-static const struct net_device_ops c2_pseudo_netdev_ops = {
- .ndo_open = c2_pseudo_up,
- .ndo_stop = c2_pseudo_down,
- .ndo_start_xmit = c2_pseudo_xmit_frame,
- .ndo_change_mtu = c2_pseudo_change_mtu,
- .ndo_validate_addr = eth_validate_addr,
-};
-
-static void setup(struct net_device *netdev)
-{
- netdev->netdev_ops = &c2_pseudo_netdev_ops;
-
- netdev->watchdog_timeo = 0;
- netdev->type = ARPHRD_ETHER;
- netdev->mtu = 1500;
- netdev->hard_header_len = ETH_HLEN;
- netdev->addr_len = ETH_ALEN;
- netdev->tx_queue_len = 0;
- netdev->flags |= IFF_NOARP;
-}
-
-static struct net_device *c2_pseudo_netdev_init(struct c2_dev *c2dev)
-{
- char name[IFNAMSIZ];
- struct net_device *netdev;
-
- /* change ethxxx to iwxxx */
- strcpy(name, "iw");
- strcat(name, &c2dev->netdev->name[3]);
- netdev = alloc_netdev(0, name, NET_NAME_UNKNOWN, setup);
- if (!netdev) {
- printk(KERN_ERR PFX "%s - etherdev alloc failed",
- __func__);
- return NULL;
- }
-
- netdev->ml_priv = c2dev;
-
- SET_NETDEV_DEV(netdev, &c2dev->pcidev->dev);
-
- memcpy_fromio(netdev->dev_addr, c2dev->kva + C2_REGS_RDMA_ENADDR, 6);
-
- /* Print out the MAC address */
- pr_debug("%s: MAC %pM\n", netdev->name, netdev->dev_addr);
-
-#if 0
- /* Disable network packets */
- netif_stop_queue(netdev);
-#endif
- return netdev;
-}
-
-static int c2_port_immutable(struct ib_device *ibdev, u8 port_num,
- struct ib_port_immutable *immutable)
-{
- struct ib_port_attr attr;
- int err;
-
- err = c2_query_port(ibdev, port_num, &attr);
- if (err)
- return err;
-
- immutable->pkey_tbl_len = attr.pkey_tbl_len;
- immutable->gid_tbl_len = attr.gid_tbl_len;
- immutable->core_cap_flags = RDMA_CORE_PORT_IWARP;
-
- return 0;
-}
-
-int c2_register_device(struct c2_dev *dev)
-{
- int ret = -ENOMEM;
- int i;
-
- /* Register pseudo network device */
- dev->pseudo_netdev = c2_pseudo_netdev_init(dev);
- if (!dev->pseudo_netdev)
- goto out;
-
- ret = register_netdev(dev->pseudo_netdev);
- if (ret)
- goto out_free_netdev;
-
- pr_debug("%s:%u\n", __func__, __LINE__);
- strlcpy(dev->ibdev.name, "amso%d", IB_DEVICE_NAME_MAX);
- dev->ibdev.owner = THIS_MODULE;
- dev->ibdev.uverbs_cmd_mask =
- (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
- (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) |
- (1ull << IB_USER_VERBS_CMD_QUERY_PORT) |
- (1ull << IB_USER_VERBS_CMD_ALLOC_PD) |
- (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) |
- (1ull << IB_USER_VERBS_CMD_REG_MR) |
- (1ull << IB_USER_VERBS_CMD_DEREG_MR) |
- (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
- (1ull << IB_USER_VERBS_CMD_CREATE_CQ) |
- (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) |
- (1ull << IB_USER_VERBS_CMD_REQ_NOTIFY_CQ) |
- (1ull << IB_USER_VERBS_CMD_CREATE_QP) |
- (1ull << IB_USER_VERBS_CMD_MODIFY_QP) |
- (1ull << IB_USER_VERBS_CMD_POLL_CQ) |
- (1ull << IB_USER_VERBS_CMD_DESTROY_QP) |
- (1ull << IB_USER_VERBS_CMD_POST_SEND) |
- (1ull << IB_USER_VERBS_CMD_POST_RECV);
-
- dev->ibdev.node_type = RDMA_NODE_RNIC;
- memset(&dev->ibdev.node_guid, 0, sizeof(dev->ibdev.node_guid));
- memcpy(&dev->ibdev.node_guid, dev->pseudo_netdev->dev_addr, 6);
- dev->ibdev.phys_port_cnt = 1;
- dev->ibdev.num_comp_vectors = 1;
- dev->ibdev.dma_device = &dev->pcidev->dev;
- dev->ibdev.query_device = c2_query_device;
- dev->ibdev.query_port = c2_query_port;
- dev->ibdev.query_pkey = c2_query_pkey;
- dev->ibdev.query_gid = c2_query_gid;
- dev->ibdev.alloc_ucontext = c2_alloc_ucontext;
- dev->ibdev.dealloc_ucontext = c2_dealloc_ucontext;
- dev->ibdev.mmap = c2_mmap_uar;
- dev->ibdev.alloc_pd = c2_alloc_pd;
- dev->ibdev.dealloc_pd = c2_dealloc_pd;
- dev->ibdev.create_ah = c2_ah_create;
- dev->ibdev.destroy_ah = c2_ah_destroy;
- dev->ibdev.create_qp = c2_create_qp;
- dev->ibdev.modify_qp = c2_modify_qp;
- dev->ibdev.destroy_qp = c2_destroy_qp;
- dev->ibdev.create_cq = c2_create_cq;
- dev->ibdev.destroy_cq = c2_destroy_cq;
- dev->ibdev.poll_cq = c2_poll_cq;
- dev->ibdev.get_dma_mr = c2_get_dma_mr;
- dev->ibdev.reg_phys_mr = c2_reg_phys_mr;
- dev->ibdev.reg_user_mr = c2_reg_user_mr;
- dev->ibdev.dereg_mr = c2_dereg_mr;
- dev->ibdev.get_port_immutable = c2_port_immutable;
-
- dev->ibdev.alloc_fmr = NULL;
- dev->ibdev.unmap_fmr = NULL;
- dev->ibdev.dealloc_fmr = NULL;
- dev->ibdev.map_phys_fmr = NULL;
-
- dev->ibdev.attach_mcast = c2_multicast_attach;
- dev->ibdev.detach_mcast = c2_multicast_detach;
- dev->ibdev.process_mad = c2_process_mad;
-
- dev->ibdev.req_notify_cq = c2_arm_cq;
- dev->ibdev.post_send = c2_post_send;
- dev->ibdev.post_recv = c2_post_receive;
-
- dev->ibdev.iwcm = kmalloc(sizeof(*dev->ibdev.iwcm), GFP_KERNEL);
- if (dev->ibdev.iwcm == NULL) {
- ret = -ENOMEM;
- goto out_unregister_netdev;
- }
- dev->ibdev.iwcm->add_ref = c2_add_ref;
- dev->ibdev.iwcm->rem_ref = c2_rem_ref;
- dev->ibdev.iwcm->get_qp = c2_get_qp;
- dev->ibdev.iwcm->connect = c2_connect;
- dev->ibdev.iwcm->accept = c2_accept;
- dev->ibdev.iwcm->reject = c2_reject;
- dev->ibdev.iwcm->create_listen = c2_service_create;
- dev->ibdev.iwcm->destroy_listen = c2_service_destroy;
-
- ret = ib_register_device(&dev->ibdev, NULL);
- if (ret)
- goto out_free_iwcm;
-
- for (i = 0; i < ARRAY_SIZE(c2_dev_attributes); ++i) {
- ret = device_create_file(&dev->ibdev.dev,
- c2_dev_attributes[i]);
- if (ret)
- goto out_unregister_ibdev;
- }
- goto out;
-
-out_unregister_ibdev:
- ib_unregister_device(&dev->ibdev);
-out_free_iwcm:
- kfree(dev->ibdev.iwcm);
-out_unregister_netdev:
- unregister_netdev(dev->pseudo_netdev);
-out_free_netdev:
- free_netdev(dev->pseudo_netdev);
-out:
- pr_debug("%s:%u ret=%d\n", __func__, __LINE__, ret);
- return ret;
-}
-
-void c2_unregister_device(struct c2_dev *dev)
-{
- pr_debug("%s:%u\n", __func__, __LINE__);
- unregister_netdev(dev->pseudo_netdev);
- free_netdev(dev->pseudo_netdev);
- ib_unregister_device(&dev->ibdev);
-}
diff --git a/drivers/staging/rdma/amso1100/c2_provider.h b/drivers/staging/rdma/amso1100/c2_provider.h
deleted file mode 100644
index bf189987711f..000000000000
--- a/drivers/staging/rdma/amso1100/c2_provider.h
+++ /dev/null
@@ -1,182 +0,0 @@
-/*
- * Copyright (c) 2005 Ammasso, Inc. All rights reserved.
- * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- */
-
-#ifndef C2_PROVIDER_H
-#define C2_PROVIDER_H
-#include <linux/inetdevice.h>
-
-#include <rdma/ib_verbs.h>
-#include <rdma/ib_pack.h>
-
-#include "c2_mq.h"
-#include <rdma/iw_cm.h>
-
-#define C2_MPT_FLAG_ATOMIC (1 << 14)
-#define C2_MPT_FLAG_REMOTE_WRITE (1 << 13)
-#define C2_MPT_FLAG_REMOTE_READ (1 << 12)
-#define C2_MPT_FLAG_LOCAL_WRITE (1 << 11)
-#define C2_MPT_FLAG_LOCAL_READ (1 << 10)
-
-struct c2_buf_list {
- void *buf;
- DEFINE_DMA_UNMAP_ADDR(mapping);
-};
-
-
-/* The user context keeps track of objects allocated for a
- * particular user-mode client. */
-struct c2_ucontext {
- struct ib_ucontext ibucontext;
-};
-
-struct c2_mtt;
-
-/* All objects associated with a PD are kept in the
- * associated user context if present.
- */
-struct c2_pd {
- struct ib_pd ibpd;
- u32 pd_id;
-};
-
-struct c2_mr {
- struct ib_mr ibmr;
- struct c2_pd *pd;
- struct ib_umem *umem;
-};
-
-struct c2_av;
-
-enum c2_ah_type {
- C2_AH_ON_HCA,
- C2_AH_PCI_POOL,
- C2_AH_KMALLOC
-};
-
-struct c2_ah {
- struct ib_ah ibah;
-};
-
-struct c2_cq {
- struct ib_cq ibcq;
- spinlock_t lock;
- atomic_t refcount;
- int cqn;
- int is_kernel;
- wait_queue_head_t wait;
-
- u32 adapter_handle;
- struct c2_mq mq;
-};
-
-struct c2_wq {
- spinlock_t lock;
-};
-struct iw_cm_id;
-struct c2_qp {
- struct ib_qp ibqp;
- struct iw_cm_id *cm_id;
- spinlock_t lock;
- atomic_t refcount;
- wait_queue_head_t wait;
- int qpn;
-
- u32 adapter_handle;
- u32 send_sgl_depth;
- u32 recv_sgl_depth;
- u32 rdma_write_sgl_depth;
- u8 state;
-
- struct c2_mq sq_mq;
- struct c2_mq rq_mq;
-};
-
-struct c2_cr_query_attrs {
- u32 local_addr;
- u32 remote_addr;
- u16 local_port;
- u16 remote_port;
-};
-
-static inline struct c2_pd *to_c2pd(struct ib_pd *ibpd)
-{
- return container_of(ibpd, struct c2_pd, ibpd);
-}
-
-static inline struct c2_ucontext *to_c2ucontext(struct ib_ucontext *ibucontext)
-{
- return container_of(ibucontext, struct c2_ucontext, ibucontext);
-}
-
-static inline struct c2_mr *to_c2mr(struct ib_mr *ibmr)
-{
- return container_of(ibmr, struct c2_mr, ibmr);
-}
-
-
-static inline struct c2_ah *to_c2ah(struct ib_ah *ibah)
-{
- return container_of(ibah, struct c2_ah, ibah);
-}
-
-static inline struct c2_cq *to_c2cq(struct ib_cq *ibcq)
-{
- return container_of(ibcq, struct c2_cq, ibcq);
-}
-
-static inline struct c2_qp *to_c2qp(struct ib_qp *ibqp)
-{
- return container_of(ibqp, struct c2_qp, ibqp);
-}
-
-static inline int is_rnic_addr(struct net_device *netdev, u32 addr)
-{
- struct in_device *ind;
- int ret = 0;
-
- ind = in_dev_get(netdev);
- if (!ind)
- return 0;
-
- for_ifa(ind) {
- if (ifa->ifa_address == addr) {
- ret = 1;
- break;
- }
- }
- endfor_ifa(ind);
- in_dev_put(ind);
- return ret;
-}
-#endif /* C2_PROVIDER_H */
diff --git a/drivers/staging/rdma/amso1100/c2_qp.c b/drivers/staging/rdma/amso1100/c2_qp.c
deleted file mode 100644
index 86708dee58b1..000000000000
--- a/drivers/staging/rdma/amso1100/c2_qp.c
+++ /dev/null
@@ -1,1024 +0,0 @@
-/*
- * Copyright (c) 2004 Topspin Communications. All rights reserved.
- * Copyright (c) 2005 Cisco Systems. All rights reserved.
- * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2004 Voltaire, Inc. All rights reserved.
- * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- */
-
-#include <linux/delay.h>
-#include <linux/gfp.h>
-
-#include "c2.h"
-#include "c2_vq.h"
-#include "c2_status.h"
-
-#define C2_MAX_ORD_PER_QP 128
-#define C2_MAX_IRD_PER_QP 128
-
-#define C2_HINT_MAKE(q_index, hint_count) (((q_index) << 16) | hint_count)
-#define C2_HINT_GET_INDEX(hint) (((hint) & 0x7FFF0000) >> 16)
-#define C2_HINT_GET_COUNT(hint) ((hint) & 0x0000FFFF)
-
-#define NO_SUPPORT -1
-static const u8 c2_opcode[] = {
- [IB_WR_SEND] = C2_WR_TYPE_SEND,
- [IB_WR_SEND_WITH_IMM] = NO_SUPPORT,
- [IB_WR_RDMA_WRITE] = C2_WR_TYPE_RDMA_WRITE,
- [IB_WR_RDMA_WRITE_WITH_IMM] = NO_SUPPORT,
- [IB_WR_RDMA_READ] = C2_WR_TYPE_RDMA_READ,
- [IB_WR_ATOMIC_CMP_AND_SWP] = NO_SUPPORT,
- [IB_WR_ATOMIC_FETCH_AND_ADD] = NO_SUPPORT,
-};
-
-static int to_c2_state(enum ib_qp_state ib_state)
-{
- switch (ib_state) {
- case IB_QPS_RESET:
- return C2_QP_STATE_IDLE;
- case IB_QPS_RTS:
- return C2_QP_STATE_RTS;
- case IB_QPS_SQD:
- return C2_QP_STATE_CLOSING;
- case IB_QPS_SQE:
- return C2_QP_STATE_CLOSING;
- case IB_QPS_ERR:
- return C2_QP_STATE_ERROR;
- default:
- return -1;
- }
-}
-
-static int to_ib_state(enum c2_qp_state c2_state)
-{
- switch (c2_state) {
- case C2_QP_STATE_IDLE:
- return IB_QPS_RESET;
- case C2_QP_STATE_CONNECTING:
- return IB_QPS_RTR;
- case C2_QP_STATE_RTS:
- return IB_QPS_RTS;
- case C2_QP_STATE_CLOSING:
- return IB_QPS_SQD;
- case C2_QP_STATE_ERROR:
- return IB_QPS_ERR;
- case C2_QP_STATE_TERMINATE:
- return IB_QPS_SQE;
- default:
- return -1;
- }
-}
-
-static const char *to_ib_state_str(int ib_state)
-{
- static const char *state_str[] = {
- "IB_QPS_RESET",
- "IB_QPS_INIT",
- "IB_QPS_RTR",
- "IB_QPS_RTS",
- "IB_QPS_SQD",
- "IB_QPS_SQE",
- "IB_QPS_ERR"
- };
- if (ib_state < IB_QPS_RESET ||
- ib_state > IB_QPS_ERR)
- return "<invalid IB QP state>";
-
- ib_state -= IB_QPS_RESET;
- return state_str[ib_state];
-}
-
-void c2_set_qp_state(struct c2_qp *qp, int c2_state)
-{
- int new_state = to_ib_state(c2_state);
-
- pr_debug("%s: qp[%p] state modify %s --> %s\n",
- __func__,
- qp,
- to_ib_state_str(qp->state),
- to_ib_state_str(new_state));
- qp->state = new_state;
-}
-
-#define C2_QP_NO_ATTR_CHANGE 0xFFFFFFFF
-
-int c2_qp_modify(struct c2_dev *c2dev, struct c2_qp *qp,
- struct ib_qp_attr *attr, int attr_mask)
-{
- struct c2wr_qp_modify_req wr;
- struct c2wr_qp_modify_rep *reply;
- struct c2_vq_req *vq_req;
- unsigned long flags;
- u8 next_state;
- int err;
-
- pr_debug("%s:%d qp=%p, %s --> %s\n",
- __func__, __LINE__,
- qp,
- to_ib_state_str(qp->state),
- to_ib_state_str(attr->qp_state));
-
- vq_req = vq_req_alloc(c2dev);
- if (!vq_req)
- return -ENOMEM;
-
- c2_wr_set_id(&wr, CCWR_QP_MODIFY);
- wr.hdr.context = (unsigned long) vq_req;
- wr.rnic_handle = c2dev->adapter_handle;
- wr.qp_handle = qp->adapter_handle;
- wr.ord = cpu_to_be32(C2_QP_NO_ATTR_CHANGE);
- wr.ird = cpu_to_be32(C2_QP_NO_ATTR_CHANGE);
- wr.sq_depth = cpu_to_be32(C2_QP_NO_ATTR_CHANGE);
- wr.rq_depth = cpu_to_be32(C2_QP_NO_ATTR_CHANGE);
-
- if (attr_mask & IB_QP_STATE) {
- /* Ensure the state is valid */
- if (attr->qp_state < 0 || attr->qp_state > IB_QPS_ERR) {
- err = -EINVAL;
- goto bail0;
- }
-
- wr.next_qp_state = cpu_to_be32(to_c2_state(attr->qp_state));
-
- if (attr->qp_state == IB_QPS_ERR) {
- spin_lock_irqsave(&qp->lock, flags);
- if (qp->cm_id && qp->state == IB_QPS_RTS) {
- pr_debug("Generating CLOSE event for QP-->ERR, "
- "qp=%p, cm_id=%p\n",qp,qp->cm_id);
- /* Generate an CLOSE event */
- vq_req->cm_id = qp->cm_id;
- vq_req->event = IW_CM_EVENT_CLOSE;
- }
- spin_unlock_irqrestore(&qp->lock, flags);
- }
- next_state = attr->qp_state;
-
- } else if (attr_mask & IB_QP_CUR_STATE) {
-
- if (attr->cur_qp_state != IB_QPS_RTR &&
- attr->cur_qp_state != IB_QPS_RTS &&
- attr->cur_qp_state != IB_QPS_SQD &&
- attr->cur_qp_state != IB_QPS_SQE) {
- err = -EINVAL;
- goto bail0;
- } else
- wr.next_qp_state =
- cpu_to_be32(to_c2_state(attr->cur_qp_state));
-
- next_state = attr->cur_qp_state;
-
- } else {
- err = 0;
- goto bail0;
- }
-
- /* reference the request struct */
- vq_req_get(c2dev, vq_req);
-
- err = vq_send_wr(c2dev, (union c2wr *) & wr);
- if (err) {
- vq_req_put(c2dev, vq_req);
- goto bail0;
- }
-
- err = vq_wait_for_reply(c2dev, vq_req);
- if (err)
- goto bail0;
-
- reply = (struct c2wr_qp_modify_rep *) (unsigned long) vq_req->reply_msg;
- if (!reply) {
- err = -ENOMEM;
- goto bail0;
- }
-
- err = c2_errno(reply);
- if (!err)
- qp->state = next_state;
-#ifdef DEBUG
- else
- pr_debug("%s: c2_errno=%d\n", __func__, err);
-#endif
- /*
- * If we're going to error and generating the event here, then
- * we need to remove the reference because there will be no
- * close event generated by the adapter
- */
- spin_lock_irqsave(&qp->lock, flags);
- if (vq_req->event==IW_CM_EVENT_CLOSE && qp->cm_id) {
- qp->cm_id->rem_ref(qp->cm_id);
- qp->cm_id = NULL;
- }
- spin_unlock_irqrestore(&qp->lock, flags);
-
- vq_repbuf_free(c2dev, reply);
- bail0:
- vq_req_free(c2dev, vq_req);
-
- pr_debug("%s:%d qp=%p, cur_state=%s\n",
- __func__, __LINE__,
- qp,
- to_ib_state_str(qp->state));
- return err;
-}
-
-int c2_qp_set_read_limits(struct c2_dev *c2dev, struct c2_qp *qp,
- int ord, int ird)
-{
- struct c2wr_qp_modify_req wr;
- struct c2wr_qp_modify_rep *reply;
- struct c2_vq_req *vq_req;
- int err;
-
- vq_req = vq_req_alloc(c2dev);
- if (!vq_req)
- return -ENOMEM;
-
- c2_wr_set_id(&wr, CCWR_QP_MODIFY);
- wr.hdr.context = (unsigned long) vq_req;
- wr.rnic_handle = c2dev->adapter_handle;
- wr.qp_handle = qp->adapter_handle;
- wr.ord = cpu_to_be32(ord);
- wr.ird = cpu_to_be32(ird);
- wr.sq_depth = cpu_to_be32(C2_QP_NO_ATTR_CHANGE);
- wr.rq_depth = cpu_to_be32(C2_QP_NO_ATTR_CHANGE);
- wr.next_qp_state = cpu_to_be32(C2_QP_NO_ATTR_CHANGE);
-
- /* reference the request struct */
- vq_req_get(c2dev, vq_req);
-
- err = vq_send_wr(c2dev, (union c2wr *) & wr);
- if (err) {
- vq_req_put(c2dev, vq_req);
- goto bail0;
- }
-
- err = vq_wait_for_reply(c2dev, vq_req);
- if (err)
- goto bail0;
-
- reply = (struct c2wr_qp_modify_rep *) (unsigned long)
- vq_req->reply_msg;
- if (!reply) {
- err = -ENOMEM;
- goto bail0;
- }
-
- err = c2_errno(reply);
- vq_repbuf_free(c2dev, reply);
- bail0:
- vq_req_free(c2dev, vq_req);
- return err;
-}
-
-static int destroy_qp(struct c2_dev *c2dev, struct c2_qp *qp)
-{
- struct c2_vq_req *vq_req;
- struct c2wr_qp_destroy_req wr;
- struct c2wr_qp_destroy_rep *reply;
- unsigned long flags;
- int err;
-
- /*
- * Allocate a verb request message
- */
- vq_req = vq_req_alloc(c2dev);
- if (!vq_req) {
- return -ENOMEM;
- }
-
- /*
- * Initialize the WR
- */
- c2_wr_set_id(&wr, CCWR_QP_DESTROY);
- wr.hdr.context = (unsigned long) vq_req;
- wr.rnic_handle = c2dev->adapter_handle;
- wr.qp_handle = qp->adapter_handle;
-
- /*
- * reference the request struct. dereferenced in the int handler.
- */
- vq_req_get(c2dev, vq_req);
-
- spin_lock_irqsave(&qp->lock, flags);
- if (qp->cm_id && qp->state == IB_QPS_RTS) {
- pr_debug("destroy_qp: generating CLOSE event for QP-->ERR, "
- "qp=%p, cm_id=%p\n",qp,qp->cm_id);
- /* Generate an CLOSE event */
- vq_req->qp = qp;
- vq_req->cm_id = qp->cm_id;
- vq_req->event = IW_CM_EVENT_CLOSE;
- }
- spin_unlock_irqrestore(&qp->lock, flags);
-
- /*
- * Send WR to adapter
- */
- err = vq_send_wr(c2dev, (union c2wr *) & wr);
- if (err) {
- vq_req_put(c2dev, vq_req);
- goto bail0;
- }
-
- /*
- * Wait for reply from adapter
- */
- err = vq_wait_for_reply(c2dev, vq_req);
- if (err) {
- goto bail0;
- }
-
- /*
- * Process reply
- */
- reply = (struct c2wr_qp_destroy_rep *) (unsigned long) (vq_req->reply_msg);
- if (!reply) {
- err = -ENOMEM;
- goto bail0;
- }
-
- spin_lock_irqsave(&qp->lock, flags);
- if (qp->cm_id) {
- qp->cm_id->rem_ref(qp->cm_id);
- qp->cm_id = NULL;
- }
- spin_unlock_irqrestore(&qp->lock, flags);
-
- vq_repbuf_free(c2dev, reply);
- bail0:
- vq_req_free(c2dev, vq_req);
- return err;
-}
-
-static int c2_alloc_qpn(struct c2_dev *c2dev, struct c2_qp *qp)
-{
- int ret;
-
- idr_preload(GFP_KERNEL);
- spin_lock_irq(&c2dev->qp_table.lock);
-
- ret = idr_alloc_cyclic(&c2dev->qp_table.idr, qp, 0, 0, GFP_NOWAIT);
- if (ret >= 0)
- qp->qpn = ret;
-
- spin_unlock_irq(&c2dev->qp_table.lock);
- idr_preload_end();
- return ret < 0 ? ret : 0;
-}
-
-static void c2_free_qpn(struct c2_dev *c2dev, int qpn)
-{
- spin_lock_irq(&c2dev->qp_table.lock);
- idr_remove(&c2dev->qp_table.idr, qpn);
- spin_unlock_irq(&c2dev->qp_table.lock);
-}
-
-struct c2_qp *c2_find_qpn(struct c2_dev *c2dev, int qpn)
-{
- unsigned long flags;
- struct c2_qp *qp;
-
- spin_lock_irqsave(&c2dev->qp_table.lock, flags);
- qp = idr_find(&c2dev->qp_table.idr, qpn);
- spin_unlock_irqrestore(&c2dev->qp_table.lock, flags);
- return qp;
-}
-
-int c2_alloc_qp(struct c2_dev *c2dev,
- struct c2_pd *pd,
- struct ib_qp_init_attr *qp_attrs, struct c2_qp *qp)
-{
- struct c2wr_qp_create_req wr;
- struct c2wr_qp_create_rep *reply;
- struct c2_vq_req *vq_req;
- struct c2_cq *send_cq = to_c2cq(qp_attrs->send_cq);
- struct c2_cq *recv_cq = to_c2cq(qp_attrs->recv_cq);
- unsigned long peer_pa;
- u32 q_size, msg_size, mmap_size;
- void __iomem *mmap;
- int err;
-
- err = c2_alloc_qpn(c2dev, qp);
- if (err)
- return err;
- qp->ibqp.qp_num = qp->qpn;
- qp->ibqp.qp_type = IB_QPT_RC;
-
- /* Allocate the SQ and RQ shared pointers */
- qp->sq_mq.shared = c2_alloc_mqsp(c2dev, c2dev->kern_mqsp_pool,
- &qp->sq_mq.shared_dma, GFP_KERNEL);
- if (!qp->sq_mq.shared) {
- err = -ENOMEM;
- goto bail0;
- }
-
- qp->rq_mq.shared = c2_alloc_mqsp(c2dev, c2dev->kern_mqsp_pool,
- &qp->rq_mq.shared_dma, GFP_KERNEL);
- if (!qp->rq_mq.shared) {
- err = -ENOMEM;
- goto bail1;
- }
-
- /* Allocate the verbs request */
- vq_req = vq_req_alloc(c2dev);
- if (vq_req == NULL) {
- err = -ENOMEM;
- goto bail2;
- }
-
- /* Initialize the work request */
- memset(&wr, 0, sizeof(wr));
- c2_wr_set_id(&wr, CCWR_QP_CREATE);
- wr.hdr.context = (unsigned long) vq_req;
- wr.rnic_handle = c2dev->adapter_handle;
- wr.sq_cq_handle = send_cq->adapter_handle;
- wr.rq_cq_handle = recv_cq->adapter_handle;
- wr.sq_depth = cpu_to_be32(qp_attrs->cap.max_send_wr + 1);
- wr.rq_depth = cpu_to_be32(qp_attrs->cap.max_recv_wr + 1);
- wr.srq_handle = 0;
- wr.flags = cpu_to_be32(QP_RDMA_READ | QP_RDMA_WRITE | QP_MW_BIND |
- QP_ZERO_STAG | QP_RDMA_READ_RESPONSE);
- wr.send_sgl_depth = cpu_to_be32(qp_attrs->cap.max_send_sge);
- wr.recv_sgl_depth = cpu_to_be32(qp_attrs->cap.max_recv_sge);
- wr.rdma_write_sgl_depth = cpu_to_be32(qp_attrs->cap.max_send_sge);
- wr.shared_sq_ht = cpu_to_be64(qp->sq_mq.shared_dma);
- wr.shared_rq_ht = cpu_to_be64(qp->rq_mq.shared_dma);
- wr.ord = cpu_to_be32(C2_MAX_ORD_PER_QP);
- wr.ird = cpu_to_be32(C2_MAX_IRD_PER_QP);
- wr.pd_id = pd->pd_id;
- wr.user_context = (unsigned long) qp;
-
- vq_req_get(c2dev, vq_req);
-
- /* Send the WR to the adapter */
- err = vq_send_wr(c2dev, (union c2wr *) & wr);
- if (err) {
- vq_req_put(c2dev, vq_req);
- goto bail3;
- }
-
- /* Wait for the verb reply */
- err = vq_wait_for_reply(c2dev, vq_req);
- if (err) {
- goto bail3;
- }
-
- /* Process the reply */
- reply = (struct c2wr_qp_create_rep *) (unsigned long) (vq_req->reply_msg);
- if (!reply) {
- err = -ENOMEM;
- goto bail3;
- }
-
- if ((err = c2_wr_get_result(reply)) != 0) {
- goto bail4;
- }
-
- /* Fill in the kernel QP struct */
- atomic_set(&qp->refcount, 1);
- qp->adapter_handle = reply->qp_handle;
- qp->state = IB_QPS_RESET;
- qp->send_sgl_depth = qp_attrs->cap.max_send_sge;
- qp->rdma_write_sgl_depth = qp_attrs->cap.max_send_sge;
- qp->recv_sgl_depth = qp_attrs->cap.max_recv_sge;
- init_waitqueue_head(&qp->wait);
-
- /* Initialize the SQ MQ */
- q_size = be32_to_cpu(reply->sq_depth);
- msg_size = be32_to_cpu(reply->sq_msg_size);
- peer_pa = c2dev->pa + be32_to_cpu(reply->sq_mq_start);
- mmap_size = PAGE_ALIGN(sizeof(struct c2_mq_shared) + msg_size * q_size);
- mmap = ioremap_nocache(peer_pa, mmap_size);
- if (!mmap) {
- err = -ENOMEM;
- goto bail5;
- }
-
- c2_mq_req_init(&qp->sq_mq,
- be32_to_cpu(reply->sq_mq_index),
- q_size,
- msg_size,
- mmap + sizeof(struct c2_mq_shared), /* pool start */
- mmap, /* peer */
- C2_MQ_ADAPTER_TARGET);
-
- /* Initialize the RQ mq */
- q_size = be32_to_cpu(reply->rq_depth);
- msg_size = be32_to_cpu(reply->rq_msg_size);
- peer_pa = c2dev->pa + be32_to_cpu(reply->rq_mq_start);
- mmap_size = PAGE_ALIGN(sizeof(struct c2_mq_shared) + msg_size * q_size);
- mmap = ioremap_nocache(peer_pa, mmap_size);
- if (!mmap) {
- err = -ENOMEM;
- goto bail6;
- }
-
- c2_mq_req_init(&qp->rq_mq,
- be32_to_cpu(reply->rq_mq_index),
- q_size,
- msg_size,
- mmap + sizeof(struct c2_mq_shared), /* pool start */
- mmap, /* peer */
- C2_MQ_ADAPTER_TARGET);
-
- vq_repbuf_free(c2dev, reply);
- vq_req_free(c2dev, vq_req);
-
- return 0;
-
- bail6:
- iounmap(qp->sq_mq.peer);
- bail5:
- destroy_qp(c2dev, qp);
- bail4:
- vq_repbuf_free(c2dev, reply);
- bail3:
- vq_req_free(c2dev, vq_req);
- bail2:
- c2_free_mqsp(qp->rq_mq.shared);
- bail1:
- c2_free_mqsp(qp->sq_mq.shared);
- bail0:
- c2_free_qpn(c2dev, qp->qpn);
- return err;
-}
-
-static inline void c2_lock_cqs(struct c2_cq *send_cq, struct c2_cq *recv_cq)
-{
- if (send_cq == recv_cq)
- spin_lock_irq(&send_cq->lock);
- else if (send_cq > recv_cq) {
- spin_lock_irq(&send_cq->lock);
- spin_lock_nested(&recv_cq->lock, SINGLE_DEPTH_NESTING);
- } else {
- spin_lock_irq(&recv_cq->lock);
- spin_lock_nested(&send_cq->lock, SINGLE_DEPTH_NESTING);
- }
-}
-
-static inline void c2_unlock_cqs(struct c2_cq *send_cq, struct c2_cq *recv_cq)
-{
- if (send_cq == recv_cq)
- spin_unlock_irq(&send_cq->lock);
- else if (send_cq > recv_cq) {
- spin_unlock(&recv_cq->lock);
- spin_unlock_irq(&send_cq->lock);
- } else {
- spin_unlock(&send_cq->lock);
- spin_unlock_irq(&recv_cq->lock);
- }
-}
-
-void c2_free_qp(struct c2_dev *c2dev, struct c2_qp *qp)
-{
- struct c2_cq *send_cq;
- struct c2_cq *recv_cq;
-
- send_cq = to_c2cq(qp->ibqp.send_cq);
- recv_cq = to_c2cq(qp->ibqp.recv_cq);
-
- /*
- * Lock CQs here, so that CQ polling code can do QP lookup
- * without taking a lock.
- */
- c2_lock_cqs(send_cq, recv_cq);
- c2_free_qpn(c2dev, qp->qpn);
- c2_unlock_cqs(send_cq, recv_cq);
-
- /*
- * Destroy qp in the rnic...
- */
- destroy_qp(c2dev, qp);
-
- /*
- * Mark any unreaped CQEs as null and void.
- */
- c2_cq_clean(c2dev, qp, send_cq->cqn);
- if (send_cq != recv_cq)
- c2_cq_clean(c2dev, qp, recv_cq->cqn);
- /*
- * Unmap the MQs and return the shared pointers
- * to the message pool.
- */
- iounmap(qp->sq_mq.peer);
- iounmap(qp->rq_mq.peer);
- c2_free_mqsp(qp->sq_mq.shared);
- c2_free_mqsp(qp->rq_mq.shared);
-
- atomic_dec(&qp->refcount);
- wait_event(qp->wait, !atomic_read(&qp->refcount));
-}
-
-/*
- * Function: move_sgl
- *
- * Description:
- * Move an SGL from the user's work request struct into a CCIL Work Request
- * message, swapping to WR byte order and ensure the total length doesn't
- * overflow.
- *
- * IN:
- * dst - ptr to CCIL Work Request message SGL memory.
- * src - ptr to the consumers SGL memory.
- *
- * OUT: none
- *
- * Return:
- * CCIL status codes.
- */
-static int
-move_sgl(struct c2_data_addr * dst, struct ib_sge *src, int count, u32 * p_len,
- u8 * actual_count)
-{
- u32 tot = 0; /* running total */
- u8 acount = 0; /* running total non-0 len sge's */
-
- while (count > 0) {
- /*
- * If the addition of this SGE causes the
- * total SGL length to exceed 2^32-1, then
- * fail-n-bail.
- *
- * If the current total plus the next element length
- * wraps, then it will go negative and be less than the
- * current total...
- */
- if ((tot + src->length) < tot) {
- return -EINVAL;
- }
- /*
- * Bug: 1456 (as well as 1498 & 1643)
- * Skip over any sge's supplied with len=0
- */
- if (src->length) {
- tot += src->length;
- dst->stag = cpu_to_be32(src->lkey);
- dst->to = cpu_to_be64(src->addr);
- dst->length = cpu_to_be32(src->length);
- dst++;
- acount++;
- }
- src++;
- count--;
- }
-
- if (acount == 0) {
- /*
- * Bug: 1476 (as well as 1498, 1456 and 1643)
- * Setup the SGL in the WR to make it easier for the RNIC.
- * This way, the FW doesn't have to deal with special cases.
- * Setting length=0 should be sufficient.
- */
- dst->stag = 0;
- dst->to = 0;
- dst->length = 0;
- }
-
- *p_len = tot;
- *actual_count = acount;
- return 0;
-}
-
-/*
- * Function: c2_activity (private function)
- *
- * Description:
- * Post an mq index to the host->adapter activity fifo.
- *
- * IN:
- * c2dev - ptr to c2dev structure
- * mq_index - mq index to post
- * shared - value most recently written to shared
- *
- * OUT:
- *
- * Return:
- * none
- */
-static inline void c2_activity(struct c2_dev *c2dev, u32 mq_index, u16 shared)
-{
- /*
- * First read the register to see if the FIFO is full, and if so,
- * spin until it's not. This isn't perfect -- there is no
- * synchronization among the clients of the register, but in
- * practice it prevents multiple CPU from hammering the bus
- * with PCI RETRY. Note that when this does happen, the card
- * cannot get on the bus and the card and system hang in a
- * deadlock -- thus the need for this code. [TOT]
- */
- while (readl(c2dev->regs + PCI_BAR0_ADAPTER_HINT) & 0x80000000)
- udelay(10);
-
- __raw_writel(C2_HINT_MAKE(mq_index, shared),
- c2dev->regs + PCI_BAR0_ADAPTER_HINT);
-}
-
-/*
- * Function: qp_wr_post
- *
- * Description:
- * This in-line function allocates a MQ msg, then moves the host-copy of
- * the completed WR into msg. Then it posts the message.
- *
- * IN:
- * q - ptr to user MQ.
- * wr - ptr to host-copy of the WR.
- * qp - ptr to user qp
- * size - Number of bytes to post. Assumed to be divisible by 4.
- *
- * OUT: none
- *
- * Return:
- * CCIL status codes.
- */
-static int qp_wr_post(struct c2_mq *q, union c2wr * wr, struct c2_qp *qp, u32 size)
-{
- union c2wr *msg;
-
- msg = c2_mq_alloc(q);
- if (msg == NULL) {
- return -EINVAL;
- }
-#ifdef CCMSGMAGIC
- ((c2wr_hdr_t *) wr)->magic = cpu_to_be32(CCWR_MAGIC);
-#endif
-
- /*
- * Since all header fields in the WR are the same as the
- * CQE, set the following so the adapter need not.
- */
- c2_wr_set_result(wr, CCERR_PENDING);
-
- /*
- * Copy the wr down to the adapter
- */
- memcpy((void *) msg, (void *) wr, size);
-
- c2_mq_produce(q);
- return 0;
-}
-
-
-int c2_post_send(struct ib_qp *ibqp, struct ib_send_wr *ib_wr,
- struct ib_send_wr **bad_wr)
-{
- struct c2_dev *c2dev = to_c2dev(ibqp->device);
- struct c2_qp *qp = to_c2qp(ibqp);
- union c2wr wr;
- unsigned long lock_flags;
- int err = 0;
-
- u32 flags;
- u32 tot_len;
- u8 actual_sge_count;
- u32 msg_size;
-
- if (qp->state > IB_QPS_RTS) {
- err = -EINVAL;
- goto out;
- }
-
- while (ib_wr) {
-
- flags = 0;
- wr.sqwr.sq_hdr.user_hdr.hdr.context = ib_wr->wr_id;
- if (ib_wr->send_flags & IB_SEND_SIGNALED) {
- flags |= SQ_SIGNALED;
- }
-
- switch (ib_wr->opcode) {
- case IB_WR_SEND:
- case IB_WR_SEND_WITH_INV:
- if (ib_wr->opcode == IB_WR_SEND) {
- if (ib_wr->send_flags & IB_SEND_SOLICITED)
- c2_wr_set_id(&wr, C2_WR_TYPE_SEND_SE);
- else
- c2_wr_set_id(&wr, C2_WR_TYPE_SEND);
- wr.sqwr.send.remote_stag = 0;
- } else {
- if (ib_wr->send_flags & IB_SEND_SOLICITED)
- c2_wr_set_id(&wr, C2_WR_TYPE_SEND_SE_INV);
- else
- c2_wr_set_id(&wr, C2_WR_TYPE_SEND_INV);
- wr.sqwr.send.remote_stag =
- cpu_to_be32(ib_wr->ex.invalidate_rkey);
- }
-
- msg_size = sizeof(struct c2wr_send_req) +
- sizeof(struct c2_data_addr) * ib_wr->num_sge;
- if (ib_wr->num_sge > qp->send_sgl_depth) {
- err = -EINVAL;
- break;
- }
- if (ib_wr->send_flags & IB_SEND_FENCE) {
- flags |= SQ_READ_FENCE;
- }
- err = move_sgl((struct c2_data_addr *) & (wr.sqwr.send.data),
- ib_wr->sg_list,
- ib_wr->num_sge,
- &tot_len, &actual_sge_count);
- wr.sqwr.send.sge_len = cpu_to_be32(tot_len);
- c2_wr_set_sge_count(&wr, actual_sge_count);
- break;
- case IB_WR_RDMA_WRITE:
- c2_wr_set_id(&wr, C2_WR_TYPE_RDMA_WRITE);
- msg_size = sizeof(struct c2wr_rdma_write_req) +
- (sizeof(struct c2_data_addr) * ib_wr->num_sge);
- if (ib_wr->num_sge > qp->rdma_write_sgl_depth) {
- err = -EINVAL;
- break;
- }
- if (ib_wr->send_flags & IB_SEND_FENCE) {
- flags |= SQ_READ_FENCE;
- }
- wr.sqwr.rdma_write.remote_stag =
- cpu_to_be32(ib_wr->wr.rdma.rkey);
- wr.sqwr.rdma_write.remote_to =
- cpu_to_be64(ib_wr->wr.rdma.remote_addr);
- err = move_sgl((struct c2_data_addr *)
- & (wr.sqwr.rdma_write.data),
- ib_wr->sg_list,
- ib_wr->num_sge,
- &tot_len, &actual_sge_count);
- wr.sqwr.rdma_write.sge_len = cpu_to_be32(tot_len);
- c2_wr_set_sge_count(&wr, actual_sge_count);
- break;
- case IB_WR_RDMA_READ:
- c2_wr_set_id(&wr, C2_WR_TYPE_RDMA_READ);
- msg_size = sizeof(struct c2wr_rdma_read_req);
-
- /* IWarp only suppots 1 sge for RDMA reads */
- if (ib_wr->num_sge > 1) {
- err = -EINVAL;
- break;
- }
-
- /*
- * Move the local and remote stag/to/len into the WR.
- */
- wr.sqwr.rdma_read.local_stag =
- cpu_to_be32(ib_wr->sg_list->lkey);
- wr.sqwr.rdma_read.local_to =
- cpu_to_be64(ib_wr->sg_list->addr);
- wr.sqwr.rdma_read.remote_stag =
- cpu_to_be32(ib_wr->wr.rdma.rkey);
- wr.sqwr.rdma_read.remote_to =
- cpu_to_be64(ib_wr->wr.rdma.remote_addr);
- wr.sqwr.rdma_read.length =
- cpu_to_be32(ib_wr->sg_list->length);
- break;
- default:
- /* error */
- msg_size = 0;
- err = -EINVAL;
- break;
- }
-
- /*
- * If we had an error on the last wr build, then
- * break out. Possible errors include bogus WR
- * type, and a bogus SGL length...
- */
- if (err) {
- break;
- }
-
- /*
- * Store flags
- */
- c2_wr_set_flags(&wr, flags);
-
- /*
- * Post the puppy!
- */
- spin_lock_irqsave(&qp->lock, lock_flags);
- err = qp_wr_post(&qp->sq_mq, &wr, qp, msg_size);
- if (err) {
- spin_unlock_irqrestore(&qp->lock, lock_flags);
- break;
- }
-
- /*
- * Enqueue mq index to activity FIFO.
- */
- c2_activity(c2dev, qp->sq_mq.index, qp->sq_mq.hint_count);
- spin_unlock_irqrestore(&qp->lock, lock_flags);
-
- ib_wr = ib_wr->next;
- }
-
-out:
- if (err)
- *bad_wr = ib_wr;
- return err;
-}
-
-int c2_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *ib_wr,
- struct ib_recv_wr **bad_wr)
-{
- struct c2_dev *c2dev = to_c2dev(ibqp->device);
- struct c2_qp *qp = to_c2qp(ibqp);
- union c2wr wr;
- unsigned long lock_flags;
- int err = 0;
-
- if (qp->state > IB_QPS_RTS) {
- err = -EINVAL;
- goto out;
- }
-
- /*
- * Try and post each work request
- */
- while (ib_wr) {
- u32 tot_len;
- u8 actual_sge_count;
-
- if (ib_wr->num_sge > qp->recv_sgl_depth) {
- err = -EINVAL;
- break;
- }
-
- /*
- * Create local host-copy of the WR
- */
- wr.rqwr.rq_hdr.user_hdr.hdr.context = ib_wr->wr_id;
- c2_wr_set_id(&wr, CCWR_RECV);
- c2_wr_set_flags(&wr, 0);
-
- /* sge_count is limited to eight bits. */
- BUG_ON(ib_wr->num_sge >= 256);
- err = move_sgl((struct c2_data_addr *) & (wr.rqwr.data),
- ib_wr->sg_list,
- ib_wr->num_sge, &tot_len, &actual_sge_count);
- c2_wr_set_sge_count(&wr, actual_sge_count);
-
- /*
- * If we had an error on the last wr build, then
- * break out. Possible errors include bogus WR
- * type, and a bogus SGL length...
- */
- if (err) {
- break;
- }
-
- spin_lock_irqsave(&qp->lock, lock_flags);
- err = qp_wr_post(&qp->rq_mq, &wr, qp, qp->rq_mq.msg_size);
- if (err) {
- spin_unlock_irqrestore(&qp->lock, lock_flags);
- break;
- }
-
- /*
- * Enqueue mq index to activity FIFO
- */
- c2_activity(c2dev, qp->rq_mq.index, qp->rq_mq.hint_count);
- spin_unlock_irqrestore(&qp->lock, lock_flags);
-
- ib_wr = ib_wr->next;
- }
-
-out:
- if (err)
- *bad_wr = ib_wr;
- return err;
-}
-
-void c2_init_qp_table(struct c2_dev *c2dev)
-{
- spin_lock_init(&c2dev->qp_table.lock);
- idr_init(&c2dev->qp_table.idr);
-}
-
-void c2_cleanup_qp_table(struct c2_dev *c2dev)
-{
- idr_destroy(&c2dev->qp_table.idr);
-}
diff --git a/drivers/staging/rdma/amso1100/c2_rnic.c b/drivers/staging/rdma/amso1100/c2_rnic.c
deleted file mode 100644
index d2a6d961344b..000000000000
--- a/drivers/staging/rdma/amso1100/c2_rnic.c
+++ /dev/null
@@ -1,655 +0,0 @@
-/*
- * Copyright (c) 2005 Ammasso, Inc. All rights reserved.
- * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- */
-
-
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include <linux/pci.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/delay.h>
-#include <linux/ethtool.h>
-#include <linux/mii.h>
-#include <linux/if_vlan.h>
-#include <linux/crc32.h>
-#include <linux/in.h>
-#include <linux/ip.h>
-#include <linux/tcp.h>
-#include <linux/init.h>
-#include <linux/dma-mapping.h>
-#include <linux/mm.h>
-#include <linux/inet.h>
-#include <linux/vmalloc.h>
-#include <linux/slab.h>
-
-#include <linux/route.h>
-
-#include <asm/io.h>
-#include <asm/irq.h>
-#include <asm/byteorder.h>
-#include <rdma/ib_smi.h>
-#include "c2.h"
-#include "c2_vq.h"
-
-/* Device capabilities */
-#define C2_MIN_PAGESIZE 1024
-
-#define C2_MAX_MRS 32768
-#define C2_MAX_QPS 16000
-#define C2_MAX_WQE_SZ 256
-#define C2_MAX_QP_WR ((128*1024)/C2_MAX_WQE_SZ)
-#define C2_MAX_SGES 4
-#define C2_MAX_SGE_RD 1
-#define C2_MAX_CQS 32768
-#define C2_MAX_CQES 4096
-#define C2_MAX_PDS 16384
-
-/*
- * Send the adapter INIT message to the amso1100
- */
-static int c2_adapter_init(struct c2_dev *c2dev)
-{
- struct c2wr_init_req wr;
- int err;
-
- memset(&wr, 0, sizeof(wr));
- c2_wr_set_id(&wr, CCWR_INIT);
- wr.hdr.context = 0;
- wr.hint_count = cpu_to_be64(c2dev->hint_count_dma);
- wr.q0_host_shared = cpu_to_be64(c2dev->req_vq.shared_dma);
- wr.q1_host_shared = cpu_to_be64(c2dev->rep_vq.shared_dma);
- wr.q1_host_msg_pool = cpu_to_be64(c2dev->rep_vq.host_dma);
- wr.q2_host_shared = cpu_to_be64(c2dev->aeq.shared_dma);
- wr.q2_host_msg_pool = cpu_to_be64(c2dev->aeq.host_dma);
-
- /* Post the init message */
- err = vq_send_wr(c2dev, (union c2wr *) & wr);
-
- return err;
-}
-
-/*
- * Send the adapter TERM message to the amso1100
- */
-static void c2_adapter_term(struct c2_dev *c2dev)
-{
- struct c2wr_init_req wr;
-
- memset(&wr, 0, sizeof(wr));
- c2_wr_set_id(&wr, CCWR_TERM);
- wr.hdr.context = 0;
-
- /* Post the init message */
- vq_send_wr(c2dev, (union c2wr *) & wr);
- c2dev->init = 0;
-
- return;
-}
-
-/*
- * Query the adapter
- */
-static int c2_rnic_query(struct c2_dev *c2dev, struct ib_device_attr *props)
-{
- struct c2_vq_req *vq_req;
- struct c2wr_rnic_query_req wr;
- struct c2wr_rnic_query_rep *reply;
- int err;
-
- vq_req = vq_req_alloc(c2dev);
- if (!vq_req)
- return -ENOMEM;
-
- c2_wr_set_id(&wr, CCWR_RNIC_QUERY);
- wr.hdr.context = (unsigned long) vq_req;
- wr.rnic_handle = c2dev->adapter_handle;
-
- vq_req_get(c2dev, vq_req);
-
- err = vq_send_wr(c2dev, (union c2wr *) &wr);
- if (err) {
- vq_req_put(c2dev, vq_req);
- goto bail1;
- }
-
- err = vq_wait_for_reply(c2dev, vq_req);
- if (err)
- goto bail1;
-
- reply =
- (struct c2wr_rnic_query_rep *) (unsigned long) (vq_req->reply_msg);
- if (!reply)
- err = -ENOMEM;
- else
- err = c2_errno(reply);
- if (err)
- goto bail2;
-
- props->fw_ver =
- ((u64)be32_to_cpu(reply->fw_ver_major) << 32) |
- ((be32_to_cpu(reply->fw_ver_minor) & 0xFFFF) << 16) |
- (be32_to_cpu(reply->fw_ver_patch) & 0xFFFF);
- memcpy(&props->sys_image_guid, c2dev->netdev->dev_addr, 6);
- props->max_mr_size = 0xFFFFFFFF;
- props->page_size_cap = ~(C2_MIN_PAGESIZE-1);
- props->vendor_id = be32_to_cpu(reply->vendor_id);
- props->vendor_part_id = be32_to_cpu(reply->part_number);
- props->hw_ver = be32_to_cpu(reply->hw_version);
- props->max_qp = be32_to_cpu(reply->max_qps);
- props->max_qp_wr = be32_to_cpu(reply->max_qp_depth);
- props->device_cap_flags = c2dev->device_cap_flags;
- props->max_sge = C2_MAX_SGES;
- props->max_sge_rd = C2_MAX_SGE_RD;
- props->max_cq = be32_to_cpu(reply->max_cqs);
- props->max_cqe = be32_to_cpu(reply->max_cq_depth);
- props->max_mr = be32_to_cpu(reply->max_mrs);
- props->max_pd = be32_to_cpu(reply->max_pds);
- props->max_qp_rd_atom = be32_to_cpu(reply->max_qp_ird);
- props->max_ee_rd_atom = 0;
- props->max_res_rd_atom = be32_to_cpu(reply->max_global_ird);
- props->max_qp_init_rd_atom = be32_to_cpu(reply->max_qp_ord);
- props->max_ee_init_rd_atom = 0;
- props->atomic_cap = IB_ATOMIC_NONE;
- props->max_ee = 0;
- props->max_rdd = 0;
- props->max_mw = be32_to_cpu(reply->max_mws);
- props->max_raw_ipv6_qp = 0;
- props->max_raw_ethy_qp = 0;
- props->max_mcast_grp = 0;
- props->max_mcast_qp_attach = 0;
- props->max_total_mcast_qp_attach = 0;
- props->max_ah = 0;
- props->max_fmr = 0;
- props->max_map_per_fmr = 0;
- props->max_srq = 0;
- props->max_srq_wr = 0;
- props->max_srq_sge = 0;
- props->max_pkeys = 0;
- props->local_ca_ack_delay = 0;
-
- bail2:
- vq_repbuf_free(c2dev, reply);
-
- bail1:
- vq_req_free(c2dev, vq_req);
- return err;
-}
-
-/*
- * Add an IP address to the RNIC interface
- */
-int c2_add_addr(struct c2_dev *c2dev, __be32 inaddr, __be32 inmask)
-{
- struct c2_vq_req *vq_req;
- struct c2wr_rnic_setconfig_req *wr;
- struct c2wr_rnic_setconfig_rep *reply;
- struct c2_netaddr netaddr;
- int err, len;
-
- vq_req = vq_req_alloc(c2dev);
- if (!vq_req)
- return -ENOMEM;
-
- len = sizeof(struct c2_netaddr);
- wr = kmalloc(c2dev->req_vq.msg_size, GFP_KERNEL);
- if (!wr) {
- err = -ENOMEM;
- goto bail0;
- }
-
- c2_wr_set_id(wr, CCWR_RNIC_SETCONFIG);
- wr->hdr.context = (unsigned long) vq_req;
- wr->rnic_handle = c2dev->adapter_handle;
- wr->option = cpu_to_be32(C2_CFG_ADD_ADDR);
-
- netaddr.ip_addr = inaddr;
- netaddr.netmask = inmask;
- netaddr.mtu = 0;
-
- memcpy(wr->data, &netaddr, len);
-
- vq_req_get(c2dev, vq_req);
-
- err = vq_send_wr(c2dev, (union c2wr *) wr);
- if (err) {
- vq_req_put(c2dev, vq_req);
- goto bail1;
- }
-
- err = vq_wait_for_reply(c2dev, vq_req);
- if (err)
- goto bail1;
-
- reply =
- (struct c2wr_rnic_setconfig_rep *) (unsigned long) (vq_req->reply_msg);
- if (!reply) {
- err = -ENOMEM;
- goto bail1;
- }
-
- err = c2_errno(reply);
- vq_repbuf_free(c2dev, reply);
-
- bail1:
- kfree(wr);
- bail0:
- vq_req_free(c2dev, vq_req);
- return err;
-}
-
-/*
- * Delete an IP address from the RNIC interface
- */
-int c2_del_addr(struct c2_dev *c2dev, __be32 inaddr, __be32 inmask)
-{
- struct c2_vq_req *vq_req;
- struct c2wr_rnic_setconfig_req *wr;
- struct c2wr_rnic_setconfig_rep *reply;
- struct c2_netaddr netaddr;
- int err, len;
-
- vq_req = vq_req_alloc(c2dev);
- if (!vq_req)
- return -ENOMEM;
-
- len = sizeof(struct c2_netaddr);
- wr = kmalloc(c2dev->req_vq.msg_size, GFP_KERNEL);
- if (!wr) {
- err = -ENOMEM;
- goto bail0;
- }
-
- c2_wr_set_id(wr, CCWR_RNIC_SETCONFIG);
- wr->hdr.context = (unsigned long) vq_req;
- wr->rnic_handle = c2dev->adapter_handle;
- wr->option = cpu_to_be32(C2_CFG_DEL_ADDR);
-
- netaddr.ip_addr = inaddr;
- netaddr.netmask = inmask;
- netaddr.mtu = 0;
-
- memcpy(wr->data, &netaddr, len);
-
- vq_req_get(c2dev, vq_req);
-
- err = vq_send_wr(c2dev, (union c2wr *) wr);
- if (err) {
- vq_req_put(c2dev, vq_req);
- goto bail1;
- }
-
- err = vq_wait_for_reply(c2dev, vq_req);
- if (err)
- goto bail1;
-
- reply =
- (struct c2wr_rnic_setconfig_rep *) (unsigned long) (vq_req->reply_msg);
- if (!reply) {
- err = -ENOMEM;
- goto bail1;
- }
-
- err = c2_errno(reply);
- vq_repbuf_free(c2dev, reply);
-
- bail1:
- kfree(wr);
- bail0:
- vq_req_free(c2dev, vq_req);
- return err;
-}
-
-/*
- * Open a single RNIC instance to use with all
- * low level openib calls
- */
-static int c2_rnic_open(struct c2_dev *c2dev)
-{
- struct c2_vq_req *vq_req;
- union c2wr wr;
- struct c2wr_rnic_open_rep *reply;
- int err;
-
- vq_req = vq_req_alloc(c2dev);
- if (vq_req == NULL) {
- return -ENOMEM;
- }
-
- memset(&wr, 0, sizeof(wr));
- c2_wr_set_id(&wr, CCWR_RNIC_OPEN);
- wr.rnic_open.req.hdr.context = (unsigned long) (vq_req);
- wr.rnic_open.req.flags = cpu_to_be16(RNIC_PRIV_MODE);
- wr.rnic_open.req.port_num = cpu_to_be16(0);
- wr.rnic_open.req.user_context = (unsigned long) c2dev;
-
- vq_req_get(c2dev, vq_req);
-
- err = vq_send_wr(c2dev, &wr);
- if (err) {
- vq_req_put(c2dev, vq_req);
- goto bail0;
- }
-
- err = vq_wait_for_reply(c2dev, vq_req);
- if (err) {
- goto bail0;
- }
-
- reply = (struct c2wr_rnic_open_rep *) (unsigned long) (vq_req->reply_msg);
- if (!reply) {
- err = -ENOMEM;
- goto bail0;
- }
-
- if ((err = c2_errno(reply)) != 0) {
- goto bail1;
- }
-
- c2dev->adapter_handle = reply->rnic_handle;
-
- bail1:
- vq_repbuf_free(c2dev, reply);
- bail0:
- vq_req_free(c2dev, vq_req);
- return err;
-}
-
-/*
- * Close the RNIC instance
- */
-static int c2_rnic_close(struct c2_dev *c2dev)
-{
- struct c2_vq_req *vq_req;
- union c2wr wr;
- struct c2wr_rnic_close_rep *reply;
- int err;
-
- vq_req = vq_req_alloc(c2dev);
- if (vq_req == NULL) {
- return -ENOMEM;
- }
-
- memset(&wr, 0, sizeof(wr));
- c2_wr_set_id(&wr, CCWR_RNIC_CLOSE);
- wr.rnic_close.req.hdr.context = (unsigned long) vq_req;
- wr.rnic_close.req.rnic_handle = c2dev->adapter_handle;
-
- vq_req_get(c2dev, vq_req);
-
- err = vq_send_wr(c2dev, &wr);
- if (err) {
- vq_req_put(c2dev, vq_req);
- goto bail0;
- }
-
- err = vq_wait_for_reply(c2dev, vq_req);
- if (err) {
- goto bail0;
- }
-
- reply = (struct c2wr_rnic_close_rep *) (unsigned long) (vq_req->reply_msg);
- if (!reply) {
- err = -ENOMEM;
- goto bail0;
- }
-
- if ((err = c2_errno(reply)) != 0) {
- goto bail1;
- }
-
- c2dev->adapter_handle = 0;
-
- bail1:
- vq_repbuf_free(c2dev, reply);
- bail0:
- vq_req_free(c2dev, vq_req);
- return err;
-}
-
-/*
- * Called by c2_probe to initialize the RNIC. This principally
- * involves initializing the various limits and resource pools that
- * comprise the RNIC instance.
- */
-int c2_rnic_init(struct c2_dev *c2dev)
-{
- int err;
- u32 qsize, msgsize;
- void *q1_pages;
- void *q2_pages;
- void __iomem *mmio_regs;
-
- /* Device capabilities */
- c2dev->device_cap_flags =
- (IB_DEVICE_RESIZE_MAX_WR |
- IB_DEVICE_CURR_QP_STATE_MOD |
- IB_DEVICE_SYS_IMAGE_GUID |
- IB_DEVICE_LOCAL_DMA_LKEY |
- IB_DEVICE_MEM_WINDOW);
-
- /* Allocate the qptr_array */
- c2dev->qptr_array = vzalloc(C2_MAX_CQS * sizeof(void *));
- if (!c2dev->qptr_array) {
- return -ENOMEM;
- }
-
- /* Initialize the qptr_array */
- c2dev->qptr_array[0] = (void *) &c2dev->req_vq;
- c2dev->qptr_array[1] = (void *) &c2dev->rep_vq;
- c2dev->qptr_array[2] = (void *) &c2dev->aeq;
-
- /* Initialize data structures */
- init_waitqueue_head(&c2dev->req_vq_wo);
- spin_lock_init(&c2dev->vqlock);
- spin_lock_init(&c2dev->lock);
-
- /* Allocate MQ shared pointer pool for kernel clients. User
- * mode client pools are hung off the user context
- */
- err = c2_init_mqsp_pool(c2dev, GFP_KERNEL, &c2dev->kern_mqsp_pool);
- if (err) {
- goto bail0;
- }
-
- /* Allocate shared pointers for Q0, Q1, and Q2 from
- * the shared pointer pool.
- */
-
- c2dev->hint_count = c2_alloc_mqsp(c2dev, c2dev->kern_mqsp_pool,
- &c2dev->hint_count_dma,
- GFP_KERNEL);
- c2dev->req_vq.shared = c2_alloc_mqsp(c2dev, c2dev->kern_mqsp_pool,
- &c2dev->req_vq.shared_dma,
- GFP_KERNEL);
- c2dev->rep_vq.shared = c2_alloc_mqsp(c2dev, c2dev->kern_mqsp_pool,
- &c2dev->rep_vq.shared_dma,
- GFP_KERNEL);
- c2dev->aeq.shared = c2_alloc_mqsp(c2dev, c2dev->kern_mqsp_pool,
- &c2dev->aeq.shared_dma, GFP_KERNEL);
- if (!c2dev->hint_count || !c2dev->req_vq.shared ||
- !c2dev->rep_vq.shared || !c2dev->aeq.shared) {
- err = -ENOMEM;
- goto bail1;
- }
-
- mmio_regs = c2dev->kva;
- /* Initialize the Verbs Request Queue */
- c2_mq_req_init(&c2dev->req_vq, 0,
- be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_Q0_QSIZE)),
- be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_Q0_MSGSIZE)),
- mmio_regs +
- be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_Q0_POOLSTART)),
- mmio_regs +
- be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_Q0_SHARED)),
- C2_MQ_ADAPTER_TARGET);
-
- /* Initialize the Verbs Reply Queue */
- qsize = be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_Q1_QSIZE));
- msgsize = be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_Q1_MSGSIZE));
- q1_pages = dma_alloc_coherent(&c2dev->pcidev->dev, qsize * msgsize,
- &c2dev->rep_vq.host_dma, GFP_KERNEL);
- if (!q1_pages) {
- err = -ENOMEM;
- goto bail1;
- }
- dma_unmap_addr_set(&c2dev->rep_vq, mapping, c2dev->rep_vq.host_dma);
- pr_debug("%s rep_vq va %p dma %llx\n", __func__, q1_pages,
- (unsigned long long) c2dev->rep_vq.host_dma);
- c2_mq_rep_init(&c2dev->rep_vq,
- 1,
- qsize,
- msgsize,
- q1_pages,
- mmio_regs +
- be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_Q1_SHARED)),
- C2_MQ_HOST_TARGET);
-
- /* Initialize the Asynchronus Event Queue */
- qsize = be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_Q2_QSIZE));
- msgsize = be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_Q2_MSGSIZE));
- q2_pages = dma_alloc_coherent(&c2dev->pcidev->dev, qsize * msgsize,
- &c2dev->aeq.host_dma, GFP_KERNEL);
- if (!q2_pages) {
- err = -ENOMEM;
- goto bail2;
- }
- dma_unmap_addr_set(&c2dev->aeq, mapping, c2dev->aeq.host_dma);
- pr_debug("%s aeq va %p dma %llx\n", __func__, q2_pages,
- (unsigned long long) c2dev->aeq.host_dma);
- c2_mq_rep_init(&c2dev->aeq,
- 2,
- qsize,
- msgsize,
- q2_pages,
- mmio_regs +
- be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_Q2_SHARED)),
- C2_MQ_HOST_TARGET);
-
- /* Initialize the verbs request allocator */
- err = vq_init(c2dev);
- if (err)
- goto bail3;
-
- /* Enable interrupts on the adapter */
- writel(0, c2dev->regs + C2_IDIS);
-
- /* create the WR init message */
- err = c2_adapter_init(c2dev);
- if (err)
- goto bail4;
- c2dev->init++;
-
- /* open an adapter instance */
- err = c2_rnic_open(c2dev);
- if (err)
- goto bail4;
-
- /* Initialize cached the adapter limits */
- err = c2_rnic_query(c2dev, &c2dev->props);
- if (err)
- goto bail5;
-
- /* Initialize the PD pool */
- err = c2_init_pd_table(c2dev);
- if (err)
- goto bail5;
-
- /* Initialize the QP pool */
- c2_init_qp_table(c2dev);
- return 0;
-
- bail5:
- c2_rnic_close(c2dev);
- bail4:
- vq_term(c2dev);
- bail3:
- dma_free_coherent(&c2dev->pcidev->dev,
- c2dev->aeq.q_size * c2dev->aeq.msg_size,
- q2_pages, dma_unmap_addr(&c2dev->aeq, mapping));
- bail2:
- dma_free_coherent(&c2dev->pcidev->dev,
- c2dev->rep_vq.q_size * c2dev->rep_vq.msg_size,
- q1_pages, dma_unmap_addr(&c2dev->rep_vq, mapping));
- bail1:
- c2_free_mqsp_pool(c2dev, c2dev->kern_mqsp_pool);
- bail0:
- vfree(c2dev->qptr_array);
-
- return err;
-}
-
-/*
- * Called by c2_remove to cleanup the RNIC resources.
- */
-void c2_rnic_term(struct c2_dev *c2dev)
-{
-
- /* Close the open adapter instance */
- c2_rnic_close(c2dev);
-
- /* Send the TERM message to the adapter */
- c2_adapter_term(c2dev);
-
- /* Disable interrupts on the adapter */
- writel(1, c2dev->regs + C2_IDIS);
-
- /* Free the QP pool */
- c2_cleanup_qp_table(c2dev);
-
- /* Free the PD pool */
- c2_cleanup_pd_table(c2dev);
-
- /* Free the verbs request allocator */
- vq_term(c2dev);
-
- /* Free the asynchronus event queue */
- dma_free_coherent(&c2dev->pcidev->dev,
- c2dev->aeq.q_size * c2dev->aeq.msg_size,
- c2dev->aeq.msg_pool.host,
- dma_unmap_addr(&c2dev->aeq, mapping));
-
- /* Free the verbs reply queue */
- dma_free_coherent(&c2dev->pcidev->dev,
- c2dev->rep_vq.q_size * c2dev->rep_vq.msg_size,
- c2dev->rep_vq.msg_pool.host,
- dma_unmap_addr(&c2dev->rep_vq, mapping));
-
- /* Free the MQ shared pointer pool */
- c2_free_mqsp_pool(c2dev, c2dev->kern_mqsp_pool);
-
- /* Free the qptr_array */
- vfree(c2dev->qptr_array);
-
- return;
-}
diff --git a/drivers/staging/rdma/amso1100/c2_status.h b/drivers/staging/rdma/amso1100/c2_status.h
deleted file mode 100644
index 6ee4aa92d875..000000000000
--- a/drivers/staging/rdma/amso1100/c2_status.h
+++ /dev/null
@@ -1,158 +0,0 @@
-/*
- * Copyright (c) 2005 Ammasso, Inc. All rights reserved.
- * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef _C2_STATUS_H_
-#define _C2_STATUS_H_
-
-/*
- * Verbs Status Codes
- */
-enum c2_status {
- C2_OK = 0, /* This must be zero */
- CCERR_INSUFFICIENT_RESOURCES = 1,
- CCERR_INVALID_MODIFIER = 2,
- CCERR_INVALID_MODE = 3,
- CCERR_IN_USE = 4,
- CCERR_INVALID_RNIC = 5,
- CCERR_INTERRUPTED_OPERATION = 6,
- CCERR_INVALID_EH = 7,
- CCERR_INVALID_CQ = 8,
- CCERR_CQ_EMPTY = 9,
- CCERR_NOT_IMPLEMENTED = 10,
- CCERR_CQ_DEPTH_TOO_SMALL = 11,
- CCERR_PD_IN_USE = 12,
- CCERR_INVALID_PD = 13,
- CCERR_INVALID_SRQ = 14,
- CCERR_INVALID_ADDRESS = 15,
- CCERR_INVALID_NETMASK = 16,
- CCERR_INVALID_QP = 17,
- CCERR_INVALID_QP_STATE = 18,
- CCERR_TOO_MANY_WRS_POSTED = 19,
- CCERR_INVALID_WR_TYPE = 20,
- CCERR_INVALID_SGL_LENGTH = 21,
- CCERR_INVALID_SQ_DEPTH = 22,
- CCERR_INVALID_RQ_DEPTH = 23,
- CCERR_INVALID_ORD = 24,
- CCERR_INVALID_IRD = 25,
- CCERR_QP_ATTR_CANNOT_CHANGE = 26,
- CCERR_INVALID_STAG = 27,
- CCERR_QP_IN_USE = 28,
- CCERR_OUTSTANDING_WRS = 29,
- CCERR_STAG_IN_USE = 30,
- CCERR_INVALID_STAG_INDEX = 31,
- CCERR_INVALID_SGL_FORMAT = 32,
- CCERR_ADAPTER_TIMEOUT = 33,
- CCERR_INVALID_CQ_DEPTH = 34,
- CCERR_INVALID_PRIVATE_DATA_LENGTH = 35,
- CCERR_INVALID_EP = 36,
- CCERR_MR_IN_USE = CCERR_STAG_IN_USE,
- CCERR_FLUSHED = 38,
- CCERR_INVALID_WQE = 39,
- CCERR_LOCAL_QP_CATASTROPHIC_ERROR = 40,
- CCERR_REMOTE_TERMINATION_ERROR = 41,
- CCERR_BASE_AND_BOUNDS_VIOLATION = 42,
- CCERR_ACCESS_VIOLATION = 43,
- CCERR_INVALID_PD_ID = 44,
- CCERR_WRAP_ERROR = 45,
- CCERR_INV_STAG_ACCESS_ERROR = 46,
- CCERR_ZERO_RDMA_READ_RESOURCES = 47,
- CCERR_QP_NOT_PRIVILEGED = 48,
- CCERR_STAG_STATE_NOT_INVALID = 49,
- CCERR_INVALID_PAGE_SIZE = 50,
- CCERR_INVALID_BUFFER_SIZE = 51,
- CCERR_INVALID_PBE = 52,
- CCERR_INVALID_FBO = 53,
- CCERR_INVALID_LENGTH = 54,
- CCERR_INVALID_ACCESS_RIGHTS = 55,
- CCERR_PBL_TOO_BIG = 56,
- CCERR_INVALID_VA = 57,
- CCERR_INVALID_REGION = 58,
- CCERR_INVALID_WINDOW = 59,
- CCERR_TOTAL_LENGTH_TOO_BIG = 60,
- CCERR_INVALID_QP_ID = 61,
- CCERR_ADDR_IN_USE = 62,
- CCERR_ADDR_NOT_AVAIL = 63,
- CCERR_NET_DOWN = 64,
- CCERR_NET_UNREACHABLE = 65,
- CCERR_CONN_ABORTED = 66,
- CCERR_CONN_RESET = 67,
- CCERR_NO_BUFS = 68,
- CCERR_CONN_TIMEDOUT = 69,
- CCERR_CONN_REFUSED = 70,
- CCERR_HOST_UNREACHABLE = 71,
- CCERR_INVALID_SEND_SGL_DEPTH = 72,
- CCERR_INVALID_RECV_SGL_DEPTH = 73,
- CCERR_INVALID_RDMA_WRITE_SGL_DEPTH = 74,
- CCERR_INSUFFICIENT_PRIVILEGES = 75,
- CCERR_STACK_ERROR = 76,
- CCERR_INVALID_VERSION = 77,
- CCERR_INVALID_MTU = 78,
- CCERR_INVALID_IMAGE = 79,
- CCERR_PENDING = 98, /* not an error; user internally by adapter */
- CCERR_DEFER = 99, /* not an error; used internally by adapter */
- CCERR_FAILED_WRITE = 100,
- CCERR_FAILED_ERASE = 101,
- CCERR_FAILED_VERIFICATION = 102,
- CCERR_NOT_FOUND = 103,
-
-};
-
-/*
- * CCAE_ACTIVE_CONNECT_RESULTS status result codes.
- */
-enum c2_connect_status {
- C2_CONN_STATUS_SUCCESS = C2_OK,
- C2_CONN_STATUS_NO_MEM = CCERR_INSUFFICIENT_RESOURCES,
- C2_CONN_STATUS_TIMEDOUT = CCERR_CONN_TIMEDOUT,
- C2_CONN_STATUS_REFUSED = CCERR_CONN_REFUSED,
- C2_CONN_STATUS_NETUNREACH = CCERR_NET_UNREACHABLE,
- C2_CONN_STATUS_HOSTUNREACH = CCERR_HOST_UNREACHABLE,
- C2_CONN_STATUS_INVALID_RNIC = CCERR_INVALID_RNIC,
- C2_CONN_STATUS_INVALID_QP = CCERR_INVALID_QP,
- C2_CONN_STATUS_INVALID_QP_STATE = CCERR_INVALID_QP_STATE,
- C2_CONN_STATUS_REJECTED = CCERR_CONN_RESET,
- C2_CONN_STATUS_ADDR_NOT_AVAIL = CCERR_ADDR_NOT_AVAIL,
-};
-
-/*
- * Flash programming status codes.
- */
-enum c2_flash_status {
- C2_FLASH_STATUS_SUCCESS = 0x0000,
- C2_FLASH_STATUS_VERIFY_ERR = 0x0002,
- C2_FLASH_STATUS_IMAGE_ERR = 0x0004,
- C2_FLASH_STATUS_ECLBS = 0x0400,
- C2_FLASH_STATUS_PSLBS = 0x0800,
- C2_FLASH_STATUS_VPENS = 0x1000,
-};
-
-#endif /* _C2_STATUS_H_ */
diff --git a/drivers/staging/rdma/amso1100/c2_user.h b/drivers/staging/rdma/amso1100/c2_user.h
deleted file mode 100644
index 7e9e7ad65467..000000000000
--- a/drivers/staging/rdma/amso1100/c2_user.h
+++ /dev/null
@@ -1,82 +0,0 @@
-/*
- * Copyright (c) 2005 Topspin Communications. All rights reserved.
- * Copyright (c) 2005 Cisco Systems. All rights reserved.
- * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- */
-
-#ifndef C2_USER_H
-#define C2_USER_H
-
-#include <linux/types.h>
-
-/*
- * Make sure that all structs defined in this file remain laid out so
- * that they pack the same way on 32-bit and 64-bit architectures (to
- * avoid incompatibility between 32-bit userspace and 64-bit kernels).
- * In particular do not use pointer types -- pass pointers in __u64
- * instead.
- */
-
-struct c2_alloc_ucontext_resp {
- __u32 qp_tab_size;
- __u32 uarc_size;
-};
-
-struct c2_alloc_pd_resp {
- __u32 pdn;
- __u32 reserved;
-};
-
-struct c2_create_cq {
- __u32 lkey;
- __u32 pdn;
- __u64 arm_db_page;
- __u64 set_db_page;
- __u32 arm_db_index;
- __u32 set_db_index;
-};
-
-struct c2_create_cq_resp {
- __u32 cqn;
- __u32 reserved;
-};
-
-struct c2_create_qp {
- __u32 lkey;
- __u32 reserved;
- __u64 sq_db_page;
- __u64 rq_db_page;
- __u32 sq_db_index;
- __u32 rq_db_index;
-};
-
-#endif /* C2_USER_H */
diff --git a/drivers/staging/rdma/amso1100/c2_vq.c b/drivers/staging/rdma/amso1100/c2_vq.c
deleted file mode 100644
index 2ec716fb2edb..000000000000
--- a/drivers/staging/rdma/amso1100/c2_vq.c
+++ /dev/null
@@ -1,260 +0,0 @@
-/*
- * Copyright (c) 2005 Ammasso, Inc. All rights reserved.
- * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include <linux/slab.h>
-#include <linux/spinlock.h>
-
-#include "c2_vq.h"
-#include "c2_provider.h"
-
-/*
- * Verbs Request Objects:
- *
- * VQ Request Objects are allocated by the kernel verbs handlers.
- * They contain a wait object, a refcnt, an atomic bool indicating that the
- * adapter has replied, and a copy of the verb reply work request.
- * A pointer to the VQ Request Object is passed down in the context
- * field of the work request message, and reflected back by the adapter
- * in the verbs reply message. The function handle_vq() in the interrupt
- * path will use this pointer to:
- * 1) append a copy of the verbs reply message
- * 2) mark that the reply is ready
- * 3) wake up the kernel verbs handler blocked awaiting the reply.
- *
- *
- * The kernel verbs handlers do a "get" to put a 2nd reference on the
- * VQ Request object. If the kernel verbs handler exits before the adapter
- * can respond, this extra reference will keep the VQ Request object around
- * until the adapter's reply can be processed. The reason we need this is
- * because a pointer to this object is stuffed into the context field of
- * the verbs work request message, and reflected back in the reply message.
- * It is used in the interrupt handler (handle_vq()) to wake up the appropriate
- * kernel verb handler that is blocked awaiting the verb reply.
- * So handle_vq() will do a "put" on the object when it's done accessing it.
- * NOTE: If we guarantee that the kernel verb handler will never bail before
- * getting the reply, then we don't need these refcnts.
- *
- *
- * VQ Request objects are freed by the kernel verbs handlers only
- * after the verb has been processed, or when the adapter fails and
- * does not reply.
- *
- *
- * Verbs Reply Buffers:
- *
- * VQ Reply bufs are local host memory copies of a
- * outstanding Verb Request reply
- * message. The are always allocated by the kernel verbs handlers, and _may_ be
- * freed by either the kernel verbs handler -or- the interrupt handler. The
- * kernel verbs handler _must_ free the repbuf, then free the vq request object
- * in that order.
- */
-
-int vq_init(struct c2_dev *c2dev)
-{
- sprintf(c2dev->vq_cache_name, "c2-vq:dev%c",
- (char) ('0' + c2dev->devnum));
- c2dev->host_msg_cache =
- kmem_cache_create(c2dev->vq_cache_name, c2dev->rep_vq.msg_size, 0,
- SLAB_HWCACHE_ALIGN, NULL);
- if (c2dev->host_msg_cache == NULL) {
- return -ENOMEM;
- }
- return 0;
-}
-
-void vq_term(struct c2_dev *c2dev)
-{
- kmem_cache_destroy(c2dev->host_msg_cache);
-}
-
-/* vq_req_alloc - allocate a VQ Request Object and initialize it.
- * The refcnt is set to 1.
- */
-struct c2_vq_req *vq_req_alloc(struct c2_dev *c2dev)
-{
- struct c2_vq_req *r;
-
- r = kmalloc(sizeof(struct c2_vq_req), GFP_KERNEL);
- if (r) {
- init_waitqueue_head(&r->wait_object);
- r->reply_msg = 0;
- r->event = 0;
- r->cm_id = NULL;
- r->qp = NULL;
- atomic_set(&r->refcnt, 1);
- atomic_set(&r->reply_ready, 0);
- }
- return r;
-}
-
-
-/* vq_req_free - free the VQ Request Object. It is assumed the verbs handler
- * has already free the VQ Reply Buffer if it existed.
- */
-void vq_req_free(struct c2_dev *c2dev, struct c2_vq_req *r)
-{
- r->reply_msg = 0;
- if (atomic_dec_and_test(&r->refcnt)) {
- kfree(r);
- }
-}
-
-/* vq_req_get - reference a VQ Request Object. Done
- * only in the kernel verbs handlers.
- */
-void vq_req_get(struct c2_dev *c2dev, struct c2_vq_req *r)
-{
- atomic_inc(&r->refcnt);
-}
-
-
-/* vq_req_put - dereference and potentially free a VQ Request Object.
- *
- * This is only called by handle_vq() on the
- * interrupt when it is done processing
- * a verb reply message. If the associated
- * kernel verbs handler has already bailed,
- * then this put will actually free the VQ
- * Request object _and_ the VQ Reply Buffer
- * if it exists.
- */
-void vq_req_put(struct c2_dev *c2dev, struct c2_vq_req *r)
-{
- if (atomic_dec_and_test(&r->refcnt)) {
- if (r->reply_msg != 0)
- vq_repbuf_free(c2dev,
- (void *) (unsigned long) r->reply_msg);
- kfree(r);
- }
-}
-
-
-/*
- * vq_repbuf_alloc - allocate a VQ Reply Buffer.
- */
-void *vq_repbuf_alloc(struct c2_dev *c2dev)
-{
- return kmem_cache_alloc(c2dev->host_msg_cache, GFP_ATOMIC);
-}
-
-/*
- * vq_send_wr - post a verbs request message to the Verbs Request Queue.
- * If a message is not available in the MQ, then block until one is available.
- * NOTE: handle_mq() on the interrupt context will wake up threads blocked here.
- * When the adapter drains the Verbs Request Queue,
- * it inserts MQ index 0 in to the
- * adapter->host activity fifo and interrupts the host.
- */
-int vq_send_wr(struct c2_dev *c2dev, union c2wr *wr)
-{
- void *msg;
- wait_queue_t __wait;
-
- /*
- * grab adapter vq lock
- */
- spin_lock(&c2dev->vqlock);
-
- /*
- * allocate msg
- */
- msg = c2_mq_alloc(&c2dev->req_vq);
-
- /*
- * If we cannot get a msg, then we'll wait
- * When a messages are available, the int handler will wake_up()
- * any waiters.
- */
- while (msg == NULL) {
- pr_debug("%s:%d no available msg in VQ, waiting...\n",
- __func__, __LINE__);
- init_waitqueue_entry(&__wait, current);
- add_wait_queue(&c2dev->req_vq_wo, &__wait);
- spin_unlock(&c2dev->vqlock);
- for (;;) {
- set_current_state(TASK_INTERRUPTIBLE);
- if (!c2_mq_full(&c2dev->req_vq)) {
- break;
- }
- if (!signal_pending(current)) {
- schedule_timeout(1 * HZ); /* 1 second... */
- continue;
- }
- set_current_state(TASK_RUNNING);
- remove_wait_queue(&c2dev->req_vq_wo, &__wait);
- return -EINTR;
- }
- set_current_state(TASK_RUNNING);
- remove_wait_queue(&c2dev->req_vq_wo, &__wait);
- spin_lock(&c2dev->vqlock);
- msg = c2_mq_alloc(&c2dev->req_vq);
- }
-
- /*
- * copy wr into adapter msg
- */
- memcpy(msg, wr, c2dev->req_vq.msg_size);
-
- /*
- * post msg
- */
- c2_mq_produce(&c2dev->req_vq);
-
- /*
- * release adapter vq lock
- */
- spin_unlock(&c2dev->vqlock);
- return 0;
-}
-
-
-/*
- * vq_wait_for_reply - block until the adapter posts a Verb Reply Message.
- */
-int vq_wait_for_reply(struct c2_dev *c2dev, struct c2_vq_req *req)
-{
- if (!wait_event_timeout(req->wait_object,
- atomic_read(&req->reply_ready),
- 60*HZ))
- return -ETIMEDOUT;
-
- return 0;
-}
-
-/*
- * vq_repbuf_free - Free a Verbs Reply Buffer.
- */
-void vq_repbuf_free(struct c2_dev *c2dev, void *reply)
-{
- kmem_cache_free(c2dev->host_msg_cache, reply);
-}
diff --git a/drivers/staging/rdma/amso1100/c2_vq.h b/drivers/staging/rdma/amso1100/c2_vq.h
deleted file mode 100644
index 33805627a607..000000000000
--- a/drivers/staging/rdma/amso1100/c2_vq.h
+++ /dev/null
@@ -1,63 +0,0 @@
-/*
- * Copyright (c) 2005 Ammasso, Inc. All rights reserved.
- * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef _C2_VQ_H_
-#define _C2_VQ_H_
-#include <linux/sched.h>
-#include "c2.h"
-#include "c2_wr.h"
-#include "c2_provider.h"
-
-struct c2_vq_req {
- u64 reply_msg; /* ptr to reply msg */
- wait_queue_head_t wait_object; /* wait object for vq reqs */
- atomic_t reply_ready; /* set when reply is ready */
- atomic_t refcnt; /* used to cancel WRs... */
- int event;
- struct iw_cm_id *cm_id;
- struct c2_qp *qp;
-};
-
-extern int vq_init(struct c2_dev *c2dev);
-extern void vq_term(struct c2_dev *c2dev);
-
-extern struct c2_vq_req *vq_req_alloc(struct c2_dev *c2dev);
-extern void vq_req_free(struct c2_dev *c2dev, struct c2_vq_req *req);
-extern void vq_req_get(struct c2_dev *c2dev, struct c2_vq_req *req);
-extern void vq_req_put(struct c2_dev *c2dev, struct c2_vq_req *req);
-extern int vq_send_wr(struct c2_dev *c2dev, union c2wr * wr);
-
-extern void *vq_repbuf_alloc(struct c2_dev *c2dev);
-extern void vq_repbuf_free(struct c2_dev *c2dev, void *reply);
-
-extern int vq_wait_for_reply(struct c2_dev *c2dev, struct c2_vq_req *req);
-#endif /* _C2_VQ_H_ */
diff --git a/drivers/staging/rdma/amso1100/c2_wr.h b/drivers/staging/rdma/amso1100/c2_wr.h
deleted file mode 100644
index 8d4b4ca463ca..000000000000
--- a/drivers/staging/rdma/amso1100/c2_wr.h
+++ /dev/null
@@ -1,1520 +0,0 @@
-/*
- * Copyright (c) 2005 Ammasso, Inc. All rights reserved.
- * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef _C2_WR_H_
-#define _C2_WR_H_
-
-#ifdef CCDEBUG
-#define CCWR_MAGIC 0xb07700b0
-#endif
-
-#define C2_QP_NO_ATTR_CHANGE 0xFFFFFFFF
-
-/* Maximum allowed size in bytes of private_data exchange
- * on connect.
- */
-#define C2_MAX_PRIVATE_DATA_SIZE 200
-
-/*
- * These types are shared among the adapter, host, and CCIL consumer.
- */
-enum c2_cq_notification_type {
- C2_CQ_NOTIFICATION_TYPE_NONE = 1,
- C2_CQ_NOTIFICATION_TYPE_NEXT,
- C2_CQ_NOTIFICATION_TYPE_NEXT_SE
-};
-
-enum c2_setconfig_cmd {
- C2_CFG_ADD_ADDR = 1,
- C2_CFG_DEL_ADDR = 2,
- C2_CFG_ADD_ROUTE = 3,
- C2_CFG_DEL_ROUTE = 4
-};
-
-enum c2_getconfig_cmd {
- C2_GETCONFIG_ROUTES = 1,
- C2_GETCONFIG_ADDRS
-};
-
-/*
- * CCIL Work Request Identifiers
- */
-enum c2wr_ids {
- CCWR_RNIC_OPEN = 1,
- CCWR_RNIC_QUERY,
- CCWR_RNIC_SETCONFIG,
- CCWR_RNIC_GETCONFIG,
- CCWR_RNIC_CLOSE,
- CCWR_CQ_CREATE,
- CCWR_CQ_QUERY,
- CCWR_CQ_MODIFY,
- CCWR_CQ_DESTROY,
- CCWR_QP_CONNECT,
- CCWR_PD_ALLOC,
- CCWR_PD_DEALLOC,
- CCWR_SRQ_CREATE,
- CCWR_SRQ_QUERY,
- CCWR_SRQ_MODIFY,
- CCWR_SRQ_DESTROY,
- CCWR_QP_CREATE,
- CCWR_QP_QUERY,
- CCWR_QP_MODIFY,
- CCWR_QP_DESTROY,
- CCWR_NSMR_STAG_ALLOC,
- CCWR_NSMR_REGISTER,
- CCWR_NSMR_PBL,
- CCWR_STAG_DEALLOC,
- CCWR_NSMR_REREGISTER,
- CCWR_SMR_REGISTER,
- CCWR_MR_QUERY,
- CCWR_MW_ALLOC,
- CCWR_MW_QUERY,
- CCWR_EP_CREATE,
- CCWR_EP_GETOPT,
- CCWR_EP_SETOPT,
- CCWR_EP_DESTROY,
- CCWR_EP_BIND,
- CCWR_EP_CONNECT,
- CCWR_EP_LISTEN,
- CCWR_EP_SHUTDOWN,
- CCWR_EP_LISTEN_CREATE,
- CCWR_EP_LISTEN_DESTROY,
- CCWR_EP_QUERY,
- CCWR_CR_ACCEPT,
- CCWR_CR_REJECT,
- CCWR_CONSOLE,
- CCWR_TERM,
- CCWR_FLASH_INIT,
- CCWR_FLASH,
- CCWR_BUF_ALLOC,
- CCWR_BUF_FREE,
- CCWR_FLASH_WRITE,
- CCWR_INIT, /* WARNING: Don't move this ever again! */
-
-
-
- /* Add new IDs here */
-
-
-
- /*
- * WARNING: CCWR_LAST must always be the last verbs id defined!
- * All the preceding IDs are fixed, and must not change.
- * You can add new IDs, but must not remove or reorder
- * any IDs. If you do, YOU will ruin any hope of
- * compatibility between versions.
- */
- CCWR_LAST,
-
- /*
- * Start over at 1 so that arrays indexed by user wr id's
- * begin at 1. This is OK since the verbs and user wr id's
- * are always used on disjoint sets of queues.
- */
- /*
- * The order of the CCWR_SEND_XX verbs must
- * match the order of the RDMA_OPs
- */
- CCWR_SEND = 1,
- CCWR_SEND_INV,
- CCWR_SEND_SE,
- CCWR_SEND_SE_INV,
- CCWR_RDMA_WRITE,
- CCWR_RDMA_READ,
- CCWR_RDMA_READ_INV,
- CCWR_MW_BIND,
- CCWR_NSMR_FASTREG,
- CCWR_STAG_INVALIDATE,
- CCWR_RECV,
- CCWR_NOP,
- CCWR_UNIMPL,
-/* WARNING: This must always be the last user wr id defined! */
-};
-#define RDMA_SEND_OPCODE_FROM_WR_ID(x) (x+2)
-
-/*
- * SQ/RQ Work Request Types
- */
-enum c2_wr_type {
- C2_WR_TYPE_SEND = CCWR_SEND,
- C2_WR_TYPE_SEND_SE = CCWR_SEND_SE,
- C2_WR_TYPE_SEND_INV = CCWR_SEND_INV,
- C2_WR_TYPE_SEND_SE_INV = CCWR_SEND_SE_INV,
- C2_WR_TYPE_RDMA_WRITE = CCWR_RDMA_WRITE,
- C2_WR_TYPE_RDMA_READ = CCWR_RDMA_READ,
- C2_WR_TYPE_RDMA_READ_INV_STAG = CCWR_RDMA_READ_INV,
- C2_WR_TYPE_BIND_MW = CCWR_MW_BIND,
- C2_WR_TYPE_FASTREG_NSMR = CCWR_NSMR_FASTREG,
- C2_WR_TYPE_INV_STAG = CCWR_STAG_INVALIDATE,
- C2_WR_TYPE_RECV = CCWR_RECV,
- C2_WR_TYPE_NOP = CCWR_NOP,
-};
-
-struct c2_netaddr {
- __be32 ip_addr;
- __be32 netmask;
- u32 mtu;
-};
-
-struct c2_route {
- u32 ip_addr; /* 0 indicates the default route */
- u32 netmask; /* netmask associated with dst */
- u32 flags;
- union {
- u32 ipaddr; /* address of the nexthop interface */
- u8 enaddr[6];
- } nexthop;
-};
-
-/*
- * A Scatter Gather Entry.
- */
-struct c2_data_addr {
- __be32 stag;
- __be32 length;
- __be64 to;
-};
-
-/*
- * MR and MW flags used by the consumer, RI, and RNIC.
- */
-enum c2_mm_flags {
- MEM_REMOTE = 0x0001, /* allow mw binds with remote access. */
- MEM_VA_BASED = 0x0002, /* Not Zero-based */
- MEM_PBL_COMPLETE = 0x0004, /* PBL array is complete in this msg */
- MEM_LOCAL_READ = 0x0008, /* allow local reads */
- MEM_LOCAL_WRITE = 0x0010, /* allow local writes */
- MEM_REMOTE_READ = 0x0020, /* allow remote reads */
- MEM_REMOTE_WRITE = 0x0040, /* allow remote writes */
- MEM_WINDOW_BIND = 0x0080, /* binds allowed */
- MEM_SHARED = 0x0100, /* set if MR is shared */
- MEM_STAG_VALID = 0x0200 /* set if STAG is in valid state */
-};
-
-/*
- * CCIL API ACF flags defined in terms of the low level mem flags.
- * This minimizes translation needed in the user API
- */
-enum c2_acf {
- C2_ACF_LOCAL_READ = MEM_LOCAL_READ,
- C2_ACF_LOCAL_WRITE = MEM_LOCAL_WRITE,
- C2_ACF_REMOTE_READ = MEM_REMOTE_READ,
- C2_ACF_REMOTE_WRITE = MEM_REMOTE_WRITE,
- C2_ACF_WINDOW_BIND = MEM_WINDOW_BIND
-};
-
-/*
- * Image types of objects written to flash
- */
-#define C2_FLASH_IMG_BITFILE 1
-#define C2_FLASH_IMG_OPTION_ROM 2
-#define C2_FLASH_IMG_VPD 3
-
-/*
- * to fix bug 1815 we define the max size allowable of the
- * terminate message (per the IETF spec).Refer to the IETF
- * protocol specification, section 12.1.6, page 64)
- * The message is prefixed by 20 types of DDP info.
- *
- * Then the message has 6 bytes for the terminate control
- * and DDP segment length info plus a DDP header (either
- * 14 or 18 byts) plus 28 bytes for the RDMA header.
- * Thus the max size in:
- * 20 + (6 + 18 + 28) = 72
- */
-#define C2_MAX_TERMINATE_MESSAGE_SIZE (72)
-
-/*
- * Build String Length. It must be the same as C2_BUILD_STR_LEN in ccil_api.h
- */
-#define WR_BUILD_STR_LEN 64
-
-/*
- * WARNING: All of these structs need to align any 64bit types on
- * 64 bit boundaries! 64bit types include u64 and u64.
- */
-
-/*
- * Clustercore Work Request Header. Be sensitive to field layout
- * and alignment.
- */
-struct c2wr_hdr {
- /* wqe_count is part of the cqe. It is put here so the
- * adapter can write to it while the wr is pending without
- * clobbering part of the wr. This word need not be dma'd
- * from the host to adapter by libccil, but we copy it anyway
- * to make the memcpy to the adapter better aligned.
- */
- __be32 wqe_count;
-
- /* Put these fields next so that later 32- and 64-bit
- * quantities are naturally aligned.
- */
- u8 id;
- u8 result; /* adapter -> host */
- u8 sge_count; /* host -> adapter */
- u8 flags; /* host -> adapter */
-
- u64 context;
-#ifdef CCMSGMAGIC
- u32 magic;
- u32 pad;
-#endif
-} __attribute__((packed));
-
-/*
- *------------------------ RNIC ------------------------
- */
-
-/*
- * WR_RNIC_OPEN
- */
-
-/*
- * Flags for the RNIC WRs
- */
-enum c2_rnic_flags {
- RNIC_IRD_STATIC = 0x0001,
- RNIC_ORD_STATIC = 0x0002,
- RNIC_QP_STATIC = 0x0004,
- RNIC_SRQ_SUPPORTED = 0x0008,
- RNIC_PBL_BLOCK_MODE = 0x0010,
- RNIC_SRQ_MODEL_ARRIVAL = 0x0020,
- RNIC_CQ_OVF_DETECTED = 0x0040,
- RNIC_PRIV_MODE = 0x0080
-};
-
-struct c2wr_rnic_open_req {
- struct c2wr_hdr hdr;
- u64 user_context;
- __be16 flags; /* See enum c2_rnic_flags */
- __be16 port_num;
-} __attribute__((packed));
-
-struct c2wr_rnic_open_rep {
- struct c2wr_hdr hdr;
- u32 rnic_handle;
-} __attribute__((packed));
-
-union c2wr_rnic_open {
- struct c2wr_rnic_open_req req;
- struct c2wr_rnic_open_rep rep;
-} __attribute__((packed));
-
-struct c2wr_rnic_query_req {
- struct c2wr_hdr hdr;
- u32 rnic_handle;
-} __attribute__((packed));
-
-/*
- * WR_RNIC_QUERY
- */
-struct c2wr_rnic_query_rep {
- struct c2wr_hdr hdr;
- u64 user_context;
- __be32 vendor_id;
- __be32 part_number;
- __be32 hw_version;
- __be32 fw_ver_major;
- __be32 fw_ver_minor;
- __be32 fw_ver_patch;
- char fw_ver_build_str[WR_BUILD_STR_LEN];
- __be32 max_qps;
- __be32 max_qp_depth;
- u32 max_srq_depth;
- u32 max_send_sgl_depth;
- u32 max_rdma_sgl_depth;
- __be32 max_cqs;
- __be32 max_cq_depth;
- u32 max_cq_event_handlers;
- __be32 max_mrs;
- u32 max_pbl_depth;
- __be32 max_pds;
- __be32 max_global_ird;
- u32 max_global_ord;
- __be32 max_qp_ird;
- __be32 max_qp_ord;
- u32 flags;
- __be32 max_mws;
- u32 pbe_range_low;
- u32 pbe_range_high;
- u32 max_srqs;
- u32 page_size;
-} __attribute__((packed));
-
-union c2wr_rnic_query {
- struct c2wr_rnic_query_req req;
- struct c2wr_rnic_query_rep rep;
-} __attribute__((packed));
-
-/*
- * WR_RNIC_GETCONFIG
- */
-
-struct c2wr_rnic_getconfig_req {
- struct c2wr_hdr hdr;
- u32 rnic_handle;
- u32 option; /* see c2_getconfig_cmd_t */
- u64 reply_buf;
- u32 reply_buf_len;
-} __attribute__((packed)) ;
-
-struct c2wr_rnic_getconfig_rep {
- struct c2wr_hdr hdr;
- u32 option; /* see c2_getconfig_cmd_t */
- u32 count_len; /* length of the number of addresses configured */
-} __attribute__((packed)) ;
-
-union c2wr_rnic_getconfig {
- struct c2wr_rnic_getconfig_req req;
- struct c2wr_rnic_getconfig_rep rep;
-} __attribute__((packed)) ;
-
-/*
- * WR_RNIC_SETCONFIG
- */
-struct c2wr_rnic_setconfig_req {
- struct c2wr_hdr hdr;
- u32 rnic_handle;
- __be32 option; /* See c2_setconfig_cmd_t */
- /* variable data and pad. See c2_netaddr and c2_route */
- u8 data[0];
-} __attribute__((packed)) ;
-
-struct c2wr_rnic_setconfig_rep {
- struct c2wr_hdr hdr;
-} __attribute__((packed)) ;
-
-union c2wr_rnic_setconfig {
- struct c2wr_rnic_setconfig_req req;
- struct c2wr_rnic_setconfig_rep rep;
-} __attribute__((packed)) ;
-
-/*
- * WR_RNIC_CLOSE
- */
-struct c2wr_rnic_close_req {
- struct c2wr_hdr hdr;
- u32 rnic_handle;
-} __attribute__((packed)) ;
-
-struct c2wr_rnic_close_rep {
- struct c2wr_hdr hdr;
-} __attribute__((packed)) ;
-
-union c2wr_rnic_close {
- struct c2wr_rnic_close_req req;
- struct c2wr_rnic_close_rep rep;
-} __attribute__((packed)) ;
-
-/*
- *------------------------ CQ ------------------------
- */
-struct c2wr_cq_create_req {
- struct c2wr_hdr hdr;
- __be64 shared_ht;
- u64 user_context;
- __be64 msg_pool;
- u32 rnic_handle;
- __be32 msg_size;
- __be32 depth;
-} __attribute__((packed)) ;
-
-struct c2wr_cq_create_rep {
- struct c2wr_hdr hdr;
- __be32 mq_index;
- __be32 adapter_shared;
- u32 cq_handle;
-} __attribute__((packed)) ;
-
-union c2wr_cq_create {
- struct c2wr_cq_create_req req;
- struct c2wr_cq_create_rep rep;
-} __attribute__((packed)) ;
-
-struct c2wr_cq_modify_req {
- struct c2wr_hdr hdr;
- u32 rnic_handle;
- u32 cq_handle;
- u32 new_depth;
- u64 new_msg_pool;
-} __attribute__((packed)) ;
-
-struct c2wr_cq_modify_rep {
- struct c2wr_hdr hdr;
-} __attribute__((packed)) ;
-
-union c2wr_cq_modify {
- struct c2wr_cq_modify_req req;
- struct c2wr_cq_modify_rep rep;
-} __attribute__((packed)) ;
-
-struct c2wr_cq_destroy_req {
- struct c2wr_hdr hdr;
- u32 rnic_handle;
- u32 cq_handle;
-} __attribute__((packed)) ;
-
-struct c2wr_cq_destroy_rep {
- struct c2wr_hdr hdr;
-} __attribute__((packed)) ;
-
-union c2wr_cq_destroy {
- struct c2wr_cq_destroy_req req;
- struct c2wr_cq_destroy_rep rep;
-} __attribute__((packed)) ;
-
-/*
- *------------------------ PD ------------------------
- */
-struct c2wr_pd_alloc_req {
- struct c2wr_hdr hdr;
- u32 rnic_handle;
- u32 pd_id;
-} __attribute__((packed)) ;
-
-struct c2wr_pd_alloc_rep {
- struct c2wr_hdr hdr;
-} __attribute__((packed)) ;
-
-union c2wr_pd_alloc {
- struct c2wr_pd_alloc_req req;
- struct c2wr_pd_alloc_rep rep;
-} __attribute__((packed)) ;
-
-struct c2wr_pd_dealloc_req {
- struct c2wr_hdr hdr;
- u32 rnic_handle;
- u32 pd_id;
-} __attribute__((packed)) ;
-
-struct c2wr_pd_dealloc_rep {
- struct c2wr_hdr hdr;
-} __attribute__((packed)) ;
-
-union c2wr_pd_dealloc {
- struct c2wr_pd_dealloc_req req;
- struct c2wr_pd_dealloc_rep rep;
-} __attribute__((packed)) ;
-
-/*
- *------------------------ SRQ ------------------------
- */
-struct c2wr_srq_create_req {
- struct c2wr_hdr hdr;
- u64 shared_ht;
- u64 user_context;
- u32 rnic_handle;
- u32 srq_depth;
- u32 srq_limit;
- u32 sgl_depth;
- u32 pd_id;
-} __attribute__((packed)) ;
-
-struct c2wr_srq_create_rep {
- struct c2wr_hdr hdr;
- u32 srq_depth;
- u32 sgl_depth;
- u32 msg_size;
- u32 mq_index;
- u32 mq_start;
- u32 srq_handle;
-} __attribute__((packed)) ;
-
-union c2wr_srq_create {
- struct c2wr_srq_create_req req;
- struct c2wr_srq_create_rep rep;
-} __attribute__((packed)) ;
-
-struct c2wr_srq_destroy_req {
- struct c2wr_hdr hdr;
- u32 rnic_handle;
- u32 srq_handle;
-} __attribute__((packed)) ;
-
-struct c2wr_srq_destroy_rep {
- struct c2wr_hdr hdr;
-} __attribute__((packed)) ;
-
-union c2wr_srq_destroy {
- struct c2wr_srq_destroy_req req;
- struct c2wr_srq_destroy_rep rep;
-} __attribute__((packed)) ;
-
-/*
- *------------------------ QP ------------------------
- */
-enum c2wr_qp_flags {
- QP_RDMA_READ = 0x00000001, /* RDMA read enabled? */
- QP_RDMA_WRITE = 0x00000002, /* RDMA write enabled? */
- QP_MW_BIND = 0x00000004, /* MWs enabled */
- QP_ZERO_STAG = 0x00000008, /* enabled? */
- QP_REMOTE_TERMINATION = 0x00000010, /* remote end terminated */
- QP_RDMA_READ_RESPONSE = 0x00000020 /* Remote RDMA read */
- /* enabled? */
-};
-
-struct c2wr_qp_create_req {
- struct c2wr_hdr hdr;
- __be64 shared_sq_ht;
- __be64 shared_rq_ht;
- u64 user_context;
- u32 rnic_handle;
- u32 sq_cq_handle;
- u32 rq_cq_handle;
- __be32 sq_depth;
- __be32 rq_depth;
- u32 srq_handle;
- u32 srq_limit;
- __be32 flags; /* see enum c2wr_qp_flags */
- __be32 send_sgl_depth;
- __be32 recv_sgl_depth;
- __be32 rdma_write_sgl_depth;
- __be32 ord;
- __be32 ird;
- u32 pd_id;
-} __attribute__((packed)) ;
-
-struct c2wr_qp_create_rep {
- struct c2wr_hdr hdr;
- __be32 sq_depth;
- __be32 rq_depth;
- u32 send_sgl_depth;
- u32 recv_sgl_depth;
- u32 rdma_write_sgl_depth;
- u32 ord;
- u32 ird;
- __be32 sq_msg_size;
- __be32 sq_mq_index;
- __be32 sq_mq_start;
- __be32 rq_msg_size;
- __be32 rq_mq_index;
- __be32 rq_mq_start;
- u32 qp_handle;
-} __attribute__((packed)) ;
-
-union c2wr_qp_create {
- struct c2wr_qp_create_req req;
- struct c2wr_qp_create_rep rep;
-} __attribute__((packed)) ;
-
-struct c2wr_qp_query_req {
- struct c2wr_hdr hdr;
- u32 rnic_handle;
- u32 qp_handle;
-} __attribute__((packed)) ;
-
-struct c2wr_qp_query_rep {
- struct c2wr_hdr hdr;
- u64 user_context;
- u32 rnic_handle;
- u32 sq_depth;
- u32 rq_depth;
- u32 send_sgl_depth;
- u32 rdma_write_sgl_depth;
- u32 recv_sgl_depth;
- u32 ord;
- u32 ird;
- u16 qp_state;
- u16 flags; /* see c2wr_qp_flags_t */
- u32 qp_id;
- u32 local_addr;
- u32 remote_addr;
- u16 local_port;
- u16 remote_port;
- u32 terminate_msg_length; /* 0 if not present */
- u8 data[0];
- /* Terminate Message in-line here. */
-} __attribute__((packed)) ;
-
-union c2wr_qp_query {
- struct c2wr_qp_query_req req;
- struct c2wr_qp_query_rep rep;
-} __attribute__((packed)) ;
-
-struct c2wr_qp_modify_req {
- struct c2wr_hdr hdr;
- u64 stream_msg;
- u32 stream_msg_length;
- u32 rnic_handle;
- u32 qp_handle;
- __be32 next_qp_state;
- __be32 ord;
- __be32 ird;
- __be32 sq_depth;
- __be32 rq_depth;
- u32 llp_ep_handle;
-} __attribute__((packed)) ;
-
-struct c2wr_qp_modify_rep {
- struct c2wr_hdr hdr;
- u32 ord;
- u32 ird;
- u32 sq_depth;
- u32 rq_depth;
- u32 sq_msg_size;
- u32 sq_mq_index;
- u32 sq_mq_start;
- u32 rq_msg_size;
- u32 rq_mq_index;
- u32 rq_mq_start;
-} __attribute__((packed)) ;
-
-union c2wr_qp_modify {
- struct c2wr_qp_modify_req req;
- struct c2wr_qp_modify_rep rep;
-} __attribute__((packed)) ;
-
-struct c2wr_qp_destroy_req {
- struct c2wr_hdr hdr;
- u32 rnic_handle;
- u32 qp_handle;
-} __attribute__((packed)) ;
-
-struct c2wr_qp_destroy_rep {
- struct c2wr_hdr hdr;
-} __attribute__((packed)) ;
-
-union c2wr_qp_destroy {
- struct c2wr_qp_destroy_req req;
- struct c2wr_qp_destroy_rep rep;
-} __attribute__((packed)) ;
-
-/*
- * The CCWR_QP_CONNECT msg is posted on the verbs request queue. It can
- * only be posted when a QP is in IDLE state. After the connect request is
- * submitted to the LLP, the adapter moves the QP to CONNECT_PENDING state.
- * No synchronous reply from adapter to this WR. The results of
- * connection are passed back in an async event CCAE_ACTIVE_CONNECT_RESULTS
- * See c2wr_ae_active_connect_results_t
- */
-struct c2wr_qp_connect_req {
- struct c2wr_hdr hdr;
- u32 rnic_handle;
- u32 qp_handle;
- __be32 remote_addr;
- __be16 remote_port;
- u16 pad;
- __be32 private_data_length;
- u8 private_data[0]; /* Private data in-line. */
-} __attribute__((packed)) ;
-
-struct c2wr_qp_connect {
- struct c2wr_qp_connect_req req;
- /* no synchronous reply. */
-} __attribute__((packed)) ;
-
-
-/*
- *------------------------ MM ------------------------
- */
-
-struct c2wr_nsmr_stag_alloc_req {
- struct c2wr_hdr hdr;
- u32 rnic_handle;
- u32 pbl_depth;
- u32 pd_id;
- u32 flags;
-} __attribute__((packed)) ;
-
-struct c2wr_nsmr_stag_alloc_rep {
- struct c2wr_hdr hdr;
- u32 pbl_depth;
- u32 stag_index;
-} __attribute__((packed)) ;
-
-union c2wr_nsmr_stag_alloc {
- struct c2wr_nsmr_stag_alloc_req req;
- struct c2wr_nsmr_stag_alloc_rep rep;
-} __attribute__((packed)) ;
-
-struct c2wr_nsmr_register_req {
- struct c2wr_hdr hdr;
- __be64 va;
- u32 rnic_handle;
- __be16 flags;
- u8 stag_key;
- u8 pad;
- u32 pd_id;
- __be32 pbl_depth;
- __be32 pbe_size;
- __be32 fbo;
- __be32 length;
- __be32 addrs_length;
- /* array of paddrs (must be aligned on a 64bit boundary) */
- __be64 paddrs[0];
-} __attribute__((packed)) ;
-
-struct c2wr_nsmr_register_rep {
- struct c2wr_hdr hdr;
- u32 pbl_depth;
- __be32 stag_index;
-} __attribute__((packed)) ;
-
-union c2wr_nsmr_register {
- struct c2wr_nsmr_register_req req;
- struct c2wr_nsmr_register_rep rep;
-} __attribute__((packed)) ;
-
-struct c2wr_nsmr_pbl_req {
- struct c2wr_hdr hdr;
- u32 rnic_handle;
- __be32 flags;
- __be32 stag_index;
- __be32 addrs_length;
- /* array of paddrs (must be aligned on a 64bit boundary) */
- __be64 paddrs[0];
-} __attribute__((packed)) ;
-
-struct c2wr_nsmr_pbl_rep {
- struct c2wr_hdr hdr;
-} __attribute__((packed)) ;
-
-union c2wr_nsmr_pbl {
- struct c2wr_nsmr_pbl_req req;
- struct c2wr_nsmr_pbl_rep rep;
-} __attribute__((packed)) ;
-
-struct c2wr_mr_query_req {
- struct c2wr_hdr hdr;
- u32 rnic_handle;
- u32 stag_index;
-} __attribute__((packed)) ;
-
-struct c2wr_mr_query_rep {
- struct c2wr_hdr hdr;
- u8 stag_key;
- u8 pad[3];
- u32 pd_id;
- u32 flags;
- u32 pbl_depth;
-} __attribute__((packed)) ;
-
-union c2wr_mr_query {
- struct c2wr_mr_query_req req;
- struct c2wr_mr_query_rep rep;
-} __attribute__((packed)) ;
-
-struct c2wr_mw_query_req {
- struct c2wr_hdr hdr;
- u32 rnic_handle;
- u32 stag_index;
-} __attribute__((packed)) ;
-
-struct c2wr_mw_query_rep {
- struct c2wr_hdr hdr;
- u8 stag_key;
- u8 pad[3];
- u32 pd_id;
- u32 flags;
-} __attribute__((packed)) ;
-
-union c2wr_mw_query {
- struct c2wr_mw_query_req req;
- struct c2wr_mw_query_rep rep;
-} __attribute__((packed)) ;
-
-
-struct c2wr_stag_dealloc_req {
- struct c2wr_hdr hdr;
- u32 rnic_handle;
- __be32 stag_index;
-} __attribute__((packed)) ;
-
-struct c2wr_stag_dealloc_rep {
- struct c2wr_hdr hdr;
-} __attribute__((packed)) ;
-
-union c2wr_stag_dealloc {
- struct c2wr_stag_dealloc_req req;
- struct c2wr_stag_dealloc_rep rep;
-} __attribute__((packed)) ;
-
-struct c2wr_nsmr_reregister_req {
- struct c2wr_hdr hdr;
- u64 va;
- u32 rnic_handle;
- u16 flags;
- u8 stag_key;
- u8 pad;
- u32 stag_index;
- u32 pd_id;
- u32 pbl_depth;
- u32 pbe_size;
- u32 fbo;
- u32 length;
- u32 addrs_length;
- u32 pad1;
- /* array of paddrs (must be aligned on a 64bit boundary) */
- u64 paddrs[0];
-} __attribute__((packed)) ;
-
-struct c2wr_nsmr_reregister_rep {
- struct c2wr_hdr hdr;
- u32 pbl_depth;
- u32 stag_index;
-} __attribute__((packed)) ;
-
-union c2wr_nsmr_reregister {
- struct c2wr_nsmr_reregister_req req;
- struct c2wr_nsmr_reregister_rep rep;
-} __attribute__((packed)) ;
-
-struct c2wr_smr_register_req {
- struct c2wr_hdr hdr;
- u64 va;
- u32 rnic_handle;
- u16 flags;
- u8 stag_key;
- u8 pad;
- u32 stag_index;
- u32 pd_id;
-} __attribute__((packed)) ;
-
-struct c2wr_smr_register_rep {
- struct c2wr_hdr hdr;
- u32 stag_index;
-} __attribute__((packed)) ;
-
-union c2wr_smr_register {
- struct c2wr_smr_register_req req;
- struct c2wr_smr_register_rep rep;
-} __attribute__((packed)) ;
-
-struct c2wr_mw_alloc_req {
- struct c2wr_hdr hdr;
- u32 rnic_handle;
- u32 pd_id;
-} __attribute__((packed)) ;
-
-struct c2wr_mw_alloc_rep {
- struct c2wr_hdr hdr;
- u32 stag_index;
-} __attribute__((packed)) ;
-
-union c2wr_mw_alloc {
- struct c2wr_mw_alloc_req req;
- struct c2wr_mw_alloc_rep rep;
-} __attribute__((packed)) ;
-
-/*
- *------------------------ WRs -----------------------
- */
-
-struct c2wr_user_hdr {
- struct c2wr_hdr hdr; /* Has status and WR Type */
-} __attribute__((packed)) ;
-
-enum c2_qp_state {
- C2_QP_STATE_IDLE = 0x01,
- C2_QP_STATE_CONNECTING = 0x02,
- C2_QP_STATE_RTS = 0x04,
- C2_QP_STATE_CLOSING = 0x08,
- C2_QP_STATE_TERMINATE = 0x10,
- C2_QP_STATE_ERROR = 0x20,
-};
-
-/* Completion queue entry. */
-struct c2wr_ce {
- struct c2wr_hdr hdr; /* Has status and WR Type */
- u64 qp_user_context; /* c2_user_qp_t * */
- u32 qp_state; /* Current QP State */
- u32 handle; /* QPID or EP Handle */
- __be32 bytes_rcvd; /* valid for RECV WCs */
- u32 stag;
-} __attribute__((packed)) ;
-
-
-/*
- * Flags used for all post-sq WRs. These must fit in the flags
- * field of the struct c2wr_hdr (eight bits).
- */
-enum {
- SQ_SIGNALED = 0x01,
- SQ_READ_FENCE = 0x02,
- SQ_FENCE = 0x04,
-};
-
-/*
- * Common fields for all post-sq WRs. Namely the standard header and a
- * secondary header with fields common to all post-sq WRs.
- */
-struct c2_sq_hdr {
- struct c2wr_user_hdr user_hdr;
-} __attribute__((packed));
-
-/*
- * Same as above but for post-rq WRs.
- */
-struct c2_rq_hdr {
- struct c2wr_user_hdr user_hdr;
-} __attribute__((packed));
-
-/*
- * use the same struct for all sends.
- */
-struct c2wr_send_req {
- struct c2_sq_hdr sq_hdr;
- __be32 sge_len;
- __be32 remote_stag;
- u8 data[0]; /* SGE array */
-} __attribute__((packed));
-
-union c2wr_send {
- struct c2wr_send_req req;
- struct c2wr_ce rep;
-} __attribute__((packed));
-
-struct c2wr_rdma_write_req {
- struct c2_sq_hdr sq_hdr;
- __be64 remote_to;
- __be32 remote_stag;
- __be32 sge_len;
- u8 data[0]; /* SGE array */
-} __attribute__((packed));
-
-union c2wr_rdma_write {
- struct c2wr_rdma_write_req req;
- struct c2wr_ce rep;
-} __attribute__((packed));
-
-struct c2wr_rdma_read_req {
- struct c2_sq_hdr sq_hdr;
- __be64 local_to;
- __be64 remote_to;
- __be32 local_stag;
- __be32 remote_stag;
- __be32 length;
-} __attribute__((packed));
-
-union c2wr_rdma_read {
- struct c2wr_rdma_read_req req;
- struct c2wr_ce rep;
-} __attribute__((packed));
-
-struct c2wr_mw_bind_req {
- struct c2_sq_hdr sq_hdr;
- u64 va;
- u8 stag_key;
- u8 pad[3];
- u32 mw_stag_index;
- u32 mr_stag_index;
- u32 length;
- u32 flags;
-} __attribute__((packed));
-
-union c2wr_mw_bind {
- struct c2wr_mw_bind_req req;
- struct c2wr_ce rep;
-} __attribute__((packed));
-
-struct c2wr_nsmr_fastreg_req {
- struct c2_sq_hdr sq_hdr;
- u64 va;
- u8 stag_key;
- u8 pad[3];
- u32 stag_index;
- u32 pbe_size;
- u32 fbo;
- u32 length;
- u32 addrs_length;
- /* array of paddrs (must be aligned on a 64bit boundary) */
- u64 paddrs[0];
-} __attribute__((packed));
-
-union c2wr_nsmr_fastreg {
- struct c2wr_nsmr_fastreg_req req;
- struct c2wr_ce rep;
-} __attribute__((packed));
-
-struct c2wr_stag_invalidate_req {
- struct c2_sq_hdr sq_hdr;
- u8 stag_key;
- u8 pad[3];
- u32 stag_index;
-} __attribute__((packed));
-
-union c2wr_stag_invalidate {
- struct c2wr_stag_invalidate_req req;
- struct c2wr_ce rep;
-} __attribute__((packed));
-
-union c2wr_sqwr {
- struct c2_sq_hdr sq_hdr;
- struct c2wr_send_req send;
- struct c2wr_send_req send_se;
- struct c2wr_send_req send_inv;
- struct c2wr_send_req send_se_inv;
- struct c2wr_rdma_write_req rdma_write;
- struct c2wr_rdma_read_req rdma_read;
- struct c2wr_mw_bind_req mw_bind;
- struct c2wr_nsmr_fastreg_req nsmr_fastreg;
- struct c2wr_stag_invalidate_req stag_inv;
-} __attribute__((packed));
-
-
-/*
- * RQ WRs
- */
-struct c2wr_rqwr {
- struct c2_rq_hdr rq_hdr;
- u8 data[0]; /* array of SGEs */
-} __attribute__((packed));
-
-union c2wr_recv {
- struct c2wr_rqwr req;
- struct c2wr_ce rep;
-} __attribute__((packed));
-
-/*
- * All AEs start with this header. Most AEs only need to convey the
- * information in the header. Some, like LLP connection events, need
- * more info. The union typdef c2wr_ae_t has all the possible AEs.
- *
- * hdr.context is the user_context from the rnic_open WR. NULL If this
- * is not affiliated with an rnic
- *
- * hdr.id is the AE identifier (eg; CCAE_REMOTE_SHUTDOWN,
- * CCAE_LLP_CLOSE_COMPLETE)
- *
- * resource_type is one of: C2_RES_IND_QP, C2_RES_IND_CQ, C2_RES_IND_SRQ
- *
- * user_context is the context passed down when the host created the resource.
- */
-struct c2wr_ae_hdr {
- struct c2wr_hdr hdr;
- u64 user_context; /* user context for this res. */
- __be32 resource_type; /* see enum c2_resource_indicator */
- __be32 resource; /* handle for resource */
- __be32 qp_state; /* current QP State */
-} __attribute__((packed));
-
-/*
- * After submitting the CCAE_ACTIVE_CONNECT_RESULTS message on the AEQ,
- * the adapter moves the QP into RTS state
- */
-struct c2wr_ae_active_connect_results {
- struct c2wr_ae_hdr ae_hdr;
- __be32 laddr;
- __be32 raddr;
- __be16 lport;
- __be16 rport;
- __be32 private_data_length;
- u8 private_data[0]; /* data is in-line in the msg. */
-} __attribute__((packed));
-
-/*
- * When connections are established by the stack (and the private data
- * MPA frame is received), the adapter will generate an event to the host.
- * The details of the connection, any private data, and the new connection
- * request handle is passed up via the CCAE_CONNECTION_REQUEST msg on the
- * AE queue:
- */
-struct c2wr_ae_connection_request {
- struct c2wr_ae_hdr ae_hdr;
- u32 cr_handle; /* connreq handle (sock ptr) */
- __be32 laddr;
- __be32 raddr;
- __be16 lport;
- __be16 rport;
- __be32 private_data_length;
- u8 private_data[0]; /* data is in-line in the msg. */
-} __attribute__((packed));
-
-union c2wr_ae {
- struct c2wr_ae_hdr ae_generic;
- struct c2wr_ae_active_connect_results ae_active_connect_results;
- struct c2wr_ae_connection_request ae_connection_request;
-} __attribute__((packed));
-
-struct c2wr_init_req {
- struct c2wr_hdr hdr;
- __be64 hint_count;
- __be64 q0_host_shared;
- __be64 q1_host_shared;
- __be64 q1_host_msg_pool;
- __be64 q2_host_shared;
- __be64 q2_host_msg_pool;
-} __attribute__((packed));
-
-struct c2wr_init_rep {
- struct c2wr_hdr hdr;
-} __attribute__((packed));
-
-union c2wr_init {
- struct c2wr_init_req req;
- struct c2wr_init_rep rep;
-} __attribute__((packed));
-
-/*
- * For upgrading flash.
- */
-
-struct c2wr_flash_init_req {
- struct c2wr_hdr hdr;
- u32 rnic_handle;
-} __attribute__((packed));
-
-struct c2wr_flash_init_rep {
- struct c2wr_hdr hdr;
- u32 adapter_flash_buf_offset;
- u32 adapter_flash_len;
-} __attribute__((packed));
-
-union c2wr_flash_init {
- struct c2wr_flash_init_req req;
- struct c2wr_flash_init_rep rep;
-} __attribute__((packed));
-
-struct c2wr_flash_req {
- struct c2wr_hdr hdr;
- u32 rnic_handle;
- u32 len;
-} __attribute__((packed));
-
-struct c2wr_flash_rep {
- struct c2wr_hdr hdr;
- u32 status;
-} __attribute__((packed));
-
-union c2wr_flash {
- struct c2wr_flash_req req;
- struct c2wr_flash_rep rep;
-} __attribute__((packed));
-
-struct c2wr_buf_alloc_req {
- struct c2wr_hdr hdr;
- u32 rnic_handle;
- u32 size;
-} __attribute__((packed));
-
-struct c2wr_buf_alloc_rep {
- struct c2wr_hdr hdr;
- u32 offset; /* 0 if mem not available */
- u32 size; /* 0 if mem not available */
-} __attribute__((packed));
-
-union c2wr_buf_alloc {
- struct c2wr_buf_alloc_req req;
- struct c2wr_buf_alloc_rep rep;
-} __attribute__((packed));
-
-struct c2wr_buf_free_req {
- struct c2wr_hdr hdr;
- u32 rnic_handle;
- u32 offset; /* Must match value from alloc */
- u32 size; /* Must match value from alloc */
-} __attribute__((packed));
-
-struct c2wr_buf_free_rep {
- struct c2wr_hdr hdr;
-} __attribute__((packed));
-
-union c2wr_buf_free {
- struct c2wr_buf_free_req req;
- struct c2wr_ce rep;
-} __attribute__((packed));
-
-struct c2wr_flash_write_req {
- struct c2wr_hdr hdr;
- u32 rnic_handle;
- u32 offset;
- u32 size;
- u32 type;
- u32 flags;
-} __attribute__((packed));
-
-struct c2wr_flash_write_rep {
- struct c2wr_hdr hdr;
- u32 status;
-} __attribute__((packed));
-
-union c2wr_flash_write {
- struct c2wr_flash_write_req req;
- struct c2wr_flash_write_rep rep;
-} __attribute__((packed));
-
-/*
- * Messages for LLP connection setup.
- */
-
-/*
- * Listen Request. This allocates a listening endpoint to allow passive
- * connection setup. Newly established LLP connections are passed up
- * via an AE. See c2wr_ae_connection_request_t
- */
-struct c2wr_ep_listen_create_req {
- struct c2wr_hdr hdr;
- u64 user_context; /* returned in AEs. */
- u32 rnic_handle;
- __be32 local_addr; /* local addr, or 0 */
- __be16 local_port; /* 0 means "pick one" */
- u16 pad;
- __be32 backlog; /* tradional tcp listen bl */
-} __attribute__((packed));
-
-struct c2wr_ep_listen_create_rep {
- struct c2wr_hdr hdr;
- u32 ep_handle; /* handle to new listening ep */
- u16 local_port; /* resulting port... */
- u16 pad;
-} __attribute__((packed));
-
-union c2wr_ep_listen_create {
- struct c2wr_ep_listen_create_req req;
- struct c2wr_ep_listen_create_rep rep;
-} __attribute__((packed));
-
-struct c2wr_ep_listen_destroy_req {
- struct c2wr_hdr hdr;
- u32 rnic_handle;
- u32 ep_handle;
-} __attribute__((packed));
-
-struct c2wr_ep_listen_destroy_rep {
- struct c2wr_hdr hdr;
-} __attribute__((packed));
-
-union c2wr_ep_listen_destroy {
- struct c2wr_ep_listen_destroy_req req;
- struct c2wr_ep_listen_destroy_rep rep;
-} __attribute__((packed));
-
-struct c2wr_ep_query_req {
- struct c2wr_hdr hdr;
- u32 rnic_handle;
- u32 ep_handle;
-} __attribute__((packed));
-
-struct c2wr_ep_query_rep {
- struct c2wr_hdr hdr;
- u32 rnic_handle;
- u32 local_addr;
- u32 remote_addr;
- u16 local_port;
- u16 remote_port;
-} __attribute__((packed));
-
-union c2wr_ep_query {
- struct c2wr_ep_query_req req;
- struct c2wr_ep_query_rep rep;
-} __attribute__((packed));
-
-
-/*
- * The host passes this down to indicate acceptance of a pending iWARP
- * connection. The cr_handle was obtained from the CONNECTION_REQUEST
- * AE passed up by the adapter. See c2wr_ae_connection_request_t.
- */
-struct c2wr_cr_accept_req {
- struct c2wr_hdr hdr;
- u32 rnic_handle;
- u32 qp_handle; /* QP to bind to this LLP conn */
- u32 ep_handle; /* LLP handle to accept */
- __be32 private_data_length;
- u8 private_data[0]; /* data in-line in msg. */
-} __attribute__((packed));
-
-/*
- * adapter sends reply when private data is successfully submitted to
- * the LLP.
- */
-struct c2wr_cr_accept_rep {
- struct c2wr_hdr hdr;
-} __attribute__((packed));
-
-union c2wr_cr_accept {
- struct c2wr_cr_accept_req req;
- struct c2wr_cr_accept_rep rep;
-} __attribute__((packed));
-
-/*
- * The host sends this down if a given iWARP connection request was
- * rejected by the consumer. The cr_handle was obtained from a
- * previous c2wr_ae_connection_request_t AE sent by the adapter.
- */
-struct c2wr_cr_reject_req {
- struct c2wr_hdr hdr;
- u32 rnic_handle;
- u32 ep_handle; /* LLP handle to reject */
-} __attribute__((packed));
-
-/*
- * Dunno if this is needed, but we'll add it for now. The adapter will
- * send the reject_reply after the LLP endpoint has been destroyed.
- */
-struct c2wr_cr_reject_rep {
- struct c2wr_hdr hdr;
-} __attribute__((packed));
-
-union c2wr_cr_reject {
- struct c2wr_cr_reject_req req;
- struct c2wr_cr_reject_rep rep;
-} __attribute__((packed));
-
-/*
- * console command. Used to implement a debug console over the verbs
- * request and reply queues.
- */
-
-/*
- * Console request message. It contains:
- * - message hdr with id = CCWR_CONSOLE
- * - the physaddr/len of host memory to be used for the reply.
- * - the command string. eg: "netstat -s" or "zoneinfo"
- */
-struct c2wr_console_req {
- struct c2wr_hdr hdr; /* id = CCWR_CONSOLE */
- u64 reply_buf; /* pinned host buf for reply */
- u32 reply_buf_len; /* length of reply buffer */
- u8 command[0]; /* NUL terminated ascii string */
- /* containing the command req */
-} __attribute__((packed));
-
-/*
- * flags used in the console reply.
- */
-enum c2_console_flags {
- CONS_REPLY_TRUNCATED = 0x00000001 /* reply was truncated */
-} __attribute__((packed));
-
-/*
- * Console reply message.
- * hdr.result contains the c2_status_t error if the reply was _not_ generated,
- * or C2_OK if the reply was generated.
- */
-struct c2wr_console_rep {
- struct c2wr_hdr hdr; /* id = CCWR_CONSOLE */
- u32 flags;
-} __attribute__((packed));
-
-union c2wr_console {
- struct c2wr_console_req req;
- struct c2wr_console_rep rep;
-} __attribute__((packed));
-
-
-/*
- * Giant union with all WRs. Makes life easier...
- */
-union c2wr {
- struct c2wr_hdr hdr;
- struct c2wr_user_hdr user_hdr;
- union c2wr_rnic_open rnic_open;
- union c2wr_rnic_query rnic_query;
- union c2wr_rnic_getconfig rnic_getconfig;
- union c2wr_rnic_setconfig rnic_setconfig;
- union c2wr_rnic_close rnic_close;
- union c2wr_cq_create cq_create;
- union c2wr_cq_modify cq_modify;
- union c2wr_cq_destroy cq_destroy;
- union c2wr_pd_alloc pd_alloc;
- union c2wr_pd_dealloc pd_dealloc;
- union c2wr_srq_create srq_create;
- union c2wr_srq_destroy srq_destroy;
- union c2wr_qp_create qp_create;
- union c2wr_qp_query qp_query;
- union c2wr_qp_modify qp_modify;
- union c2wr_qp_destroy qp_destroy;
- struct c2wr_qp_connect qp_connect;
- union c2wr_nsmr_stag_alloc nsmr_stag_alloc;
- union c2wr_nsmr_register nsmr_register;
- union c2wr_nsmr_pbl nsmr_pbl;
- union c2wr_mr_query mr_query;
- union c2wr_mw_query mw_query;
- union c2wr_stag_dealloc stag_dealloc;
- union c2wr_sqwr sqwr;
- struct c2wr_rqwr rqwr;
- struct c2wr_ce ce;
- union c2wr_ae ae;
- union c2wr_init init;
- union c2wr_ep_listen_create ep_listen_create;
- union c2wr_ep_listen_destroy ep_listen_destroy;
- union c2wr_cr_accept cr_accept;
- union c2wr_cr_reject cr_reject;
- union c2wr_console console;
- union c2wr_flash_init flash_init;
- union c2wr_flash flash;
- union c2wr_buf_alloc buf_alloc;
- union c2wr_buf_free buf_free;
- union c2wr_flash_write flash_write;
-} __attribute__((packed));
-
-
-/*
- * Accessors for the wr fields that are packed together tightly to
- * reduce the wr message size. The wr arguments are void* so that
- * either a struct c2wr*, a struct c2wr_hdr*, or a pointer to any of the types
- * in the struct c2wr union can be passed in.
- */
-static __inline__ u8 c2_wr_get_id(void *wr)
-{
- return ((struct c2wr_hdr *) wr)->id;
-}
-static __inline__ void c2_wr_set_id(void *wr, u8 id)
-{
- ((struct c2wr_hdr *) wr)->id = id;
-}
-static __inline__ u8 c2_wr_get_result(void *wr)
-{
- return ((struct c2wr_hdr *) wr)->result;
-}
-static __inline__ void c2_wr_set_result(void *wr, u8 result)
-{
- ((struct c2wr_hdr *) wr)->result = result;
-}
-static __inline__ u8 c2_wr_get_flags(void *wr)
-{
- return ((struct c2wr_hdr *) wr)->flags;
-}
-static __inline__ void c2_wr_set_flags(void *wr, u8 flags)
-{
- ((struct c2wr_hdr *) wr)->flags = flags;
-}
-static __inline__ u8 c2_wr_get_sge_count(void *wr)
-{
- return ((struct c2wr_hdr *) wr)->sge_count;
-}
-static __inline__ void c2_wr_set_sge_count(void *wr, u8 sge_count)
-{
- ((struct c2wr_hdr *) wr)->sge_count = sge_count;
-}
-static __inline__ __be32 c2_wr_get_wqe_count(void *wr)
-{
- return ((struct c2wr_hdr *) wr)->wqe_count;
-}
-static __inline__ void c2_wr_set_wqe_count(void *wr, u32 wqe_count)
-{
- ((struct c2wr_hdr *) wr)->wqe_count = wqe_count;
-}
-
-#endif /* _C2_WR_H_ */
diff --git a/drivers/staging/rdma/ehca/Kconfig b/drivers/staging/rdma/ehca/Kconfig
deleted file mode 100644
index 3fadd2ad6426..000000000000
--- a/drivers/staging/rdma/ehca/Kconfig
+++ /dev/null
@@ -1,10 +0,0 @@
-config INFINIBAND_EHCA
- tristate "eHCA support"
- depends on IBMEBUS
- ---help---
- This driver supports the deprecated IBM pSeries eHCA InfiniBand
- adapter.
-
- To compile the driver as a module, choose M here. The module
- will be called ib_ehca.
-
diff --git a/drivers/staging/rdma/ehca/Makefile b/drivers/staging/rdma/ehca/Makefile
deleted file mode 100644
index 74d284e46a40..000000000000
--- a/drivers/staging/rdma/ehca/Makefile
+++ /dev/null
@@ -1,16 +0,0 @@
-# Authors: Heiko J Schick <schickhj@de.ibm.com>
-# Christoph Raisch <raisch@de.ibm.com>
-# Joachim Fenkes <fenkes@de.ibm.com>
-#
-# Copyright (c) 2005 IBM Corporation
-#
-# All rights reserved.
-#
-# This source code is distributed under a dual license of GPL v2.0 and OpenIB BSD.
-
-obj-$(CONFIG_INFINIBAND_EHCA) += ib_ehca.o
-
-ib_ehca-objs = ehca_main.o ehca_hca.o ehca_mcast.o ehca_pd.o ehca_av.o ehca_eq.o \
- ehca_cq.o ehca_qp.o ehca_sqp.o ehca_mrmw.o ehca_reqs.o ehca_irq.o \
- ehca_uverbs.o ipz_pt_fn.o hcp_if.o hcp_phyp.o
-
diff --git a/drivers/staging/rdma/ehca/TODO b/drivers/staging/rdma/ehca/TODO
deleted file mode 100644
index 199a4a600142..000000000000
--- a/drivers/staging/rdma/ehca/TODO
+++ /dev/null
@@ -1,4 +0,0 @@
-9/2015
-
-The ehca driver has been deprecated and moved to drivers/staging/rdma.
-It will be removed in the 4.6 merge window.
diff --git a/drivers/staging/rdma/ehca/ehca_av.c b/drivers/staging/rdma/ehca/ehca_av.c
deleted file mode 100644
index 465926319f3d..000000000000
--- a/drivers/staging/rdma/ehca/ehca_av.c
+++ /dev/null
@@ -1,277 +0,0 @@
-/*
- * IBM eServer eHCA Infiniband device driver for Linux on POWER
- *
- * address vector functions
- *
- * Authors: Hoang-Nam Nguyen <hnguyen@de.ibm.com>
- * Khadija Souissi <souissik@de.ibm.com>
- * Reinhard Ernst <rernst@de.ibm.com>
- * Christoph Raisch <raisch@de.ibm.com>
- *
- * Copyright (c) 2005 IBM Corporation
- *
- * All rights reserved.
- *
- * This source code is distributed under a dual license of GPL v2.0 and OpenIB
- * BSD.
- *
- * OpenIB BSD License
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials
- * provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
- * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
- * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <linux/slab.h>
-
-#include "ehca_tools.h"
-#include "ehca_iverbs.h"
-#include "hcp_if.h"
-
-static struct kmem_cache *av_cache;
-
-int ehca_calc_ipd(struct ehca_shca *shca, int port,
- enum ib_rate path_rate, u32 *ipd)
-{
- int path = ib_rate_to_mult(path_rate);
- int link, ret;
- struct ib_port_attr pa;
-
- if (path_rate == IB_RATE_PORT_CURRENT) {
- *ipd = 0;
- return 0;
- }
-
- if (unlikely(path < 0)) {
- ehca_err(&shca->ib_device, "Invalid static rate! path_rate=%x",
- path_rate);
- return -EINVAL;
- }
-
- ret = ehca_query_port(&shca->ib_device, port, &pa);
- if (unlikely(ret < 0)) {
- ehca_err(&shca->ib_device, "Failed to query port ret=%i", ret);
- return ret;
- }
-
- link = ib_width_enum_to_int(pa.active_width) * pa.active_speed;
-
- if (path >= link)
- /* no need to throttle if path faster than link */
- *ipd = 0;
- else
- /* IPD = round((link / path) - 1) */
- *ipd = ((link + (path >> 1)) / path) - 1;
-
- return 0;
-}
-
-struct ib_ah *ehca_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr)
-{
- int ret;
- struct ehca_av *av;
- struct ehca_shca *shca = container_of(pd->device, struct ehca_shca,
- ib_device);
-
- av = kmem_cache_alloc(av_cache, GFP_KERNEL);
- if (!av) {
- ehca_err(pd->device, "Out of memory pd=%p ah_attr=%p",
- pd, ah_attr);
- return ERR_PTR(-ENOMEM);
- }
-
- av->av.sl = ah_attr->sl;
- av->av.dlid = ah_attr->dlid;
- av->av.slid_path_bits = ah_attr->src_path_bits;
-
- if (ehca_static_rate < 0) {
- u32 ipd;
- if (ehca_calc_ipd(shca, ah_attr->port_num,
- ah_attr->static_rate, &ipd)) {
- ret = -EINVAL;
- goto create_ah_exit1;
- }
- av->av.ipd = ipd;
- } else
- av->av.ipd = ehca_static_rate;
-
- av->av.lnh = ah_attr->ah_flags;
- av->av.grh.word_0 = EHCA_BMASK_SET(GRH_IPVERSION_MASK, 6);
- av->av.grh.word_0 |= EHCA_BMASK_SET(GRH_TCLASS_MASK,
- ah_attr->grh.traffic_class);
- av->av.grh.word_0 |= EHCA_BMASK_SET(GRH_FLOWLABEL_MASK,
- ah_attr->grh.flow_label);
- av->av.grh.word_0 |= EHCA_BMASK_SET(GRH_HOPLIMIT_MASK,
- ah_attr->grh.hop_limit);
- av->av.grh.word_0 |= EHCA_BMASK_SET(GRH_NEXTHEADER_MASK, 0x1B);
- /* set sgid in grh.word_1 */
- if (ah_attr->ah_flags & IB_AH_GRH) {
- int rc;
- struct ib_port_attr port_attr;
- union ib_gid gid;
- memset(&port_attr, 0, sizeof(port_attr));
- rc = ehca_query_port(pd->device, ah_attr->port_num,
- &port_attr);
- if (rc) { /* invalid port number */
- ret = -EINVAL;
- ehca_err(pd->device, "Invalid port number "
- "ehca_query_port() returned %x "
- "pd=%p ah_attr=%p", rc, pd, ah_attr);
- goto create_ah_exit1;
- }
- memset(&gid, 0, sizeof(gid));
- rc = ehca_query_gid(pd->device,
- ah_attr->port_num,
- ah_attr->grh.sgid_index, &gid);
- if (rc) {
- ret = -EINVAL;
- ehca_err(pd->device, "Failed to retrieve sgid "
- "ehca_query_gid() returned %x "
- "pd=%p ah_attr=%p", rc, pd, ah_attr);
- goto create_ah_exit1;
- }
- memcpy(&av->av.grh.word_1, &gid, sizeof(gid));
- }
- av->av.pmtu = shca->max_mtu;
-
- /* dgid comes in grh.word_3 */
- memcpy(&av->av.grh.word_3, &ah_attr->grh.dgid,
- sizeof(ah_attr->grh.dgid));
-
- return &av->ib_ah;
-
-create_ah_exit1:
- kmem_cache_free(av_cache, av);
-
- return ERR_PTR(ret);
-}
-
-int ehca_modify_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr)
-{
- struct ehca_av *av;
- struct ehca_ud_av new_ehca_av;
- struct ehca_shca *shca = container_of(ah->pd->device, struct ehca_shca,
- ib_device);
-
- memset(&new_ehca_av, 0, sizeof(new_ehca_av));
- new_ehca_av.sl = ah_attr->sl;
- new_ehca_av.dlid = ah_attr->dlid;
- new_ehca_av.slid_path_bits = ah_attr->src_path_bits;
- new_ehca_av.ipd = ah_attr->static_rate;
- new_ehca_av.lnh = EHCA_BMASK_SET(GRH_FLAG_MASK,
- (ah_attr->ah_flags & IB_AH_GRH) > 0);
- new_ehca_av.grh.word_0 = EHCA_BMASK_SET(GRH_TCLASS_MASK,
- ah_attr->grh.traffic_class);
- new_ehca_av.grh.word_0 |= EHCA_BMASK_SET(GRH_FLOWLABEL_MASK,
- ah_attr->grh.flow_label);
- new_ehca_av.grh.word_0 |= EHCA_BMASK_SET(GRH_HOPLIMIT_MASK,
- ah_attr->grh.hop_limit);
- new_ehca_av.grh.word_0 |= EHCA_BMASK_SET(GRH_NEXTHEADER_MASK, 0x1b);
-
- /* set sgid in grh.word_1 */
- if (ah_attr->ah_flags & IB_AH_GRH) {
- int rc;
- struct ib_port_attr port_attr;
- union ib_gid gid;
- memset(&port_attr, 0, sizeof(port_attr));
- rc = ehca_query_port(ah->device, ah_attr->port_num,
- &port_attr);
- if (rc) { /* invalid port number */
- ehca_err(ah->device, "Invalid port number "
- "ehca_query_port() returned %x "
- "ah=%p ah_attr=%p port_num=%x",
- rc, ah, ah_attr, ah_attr->port_num);
- return -EINVAL;
- }
- memset(&gid, 0, sizeof(gid));
- rc = ehca_query_gid(ah->device,
- ah_attr->port_num,
- ah_attr->grh.sgid_index, &gid);
- if (rc) {
- ehca_err(ah->device, "Failed to retrieve sgid "
- "ehca_query_gid() returned %x "
- "ah=%p ah_attr=%p port_num=%x "
- "sgid_index=%x",
- rc, ah, ah_attr, ah_attr->port_num,
- ah_attr->grh.sgid_index);
- return -EINVAL;
- }
- memcpy(&new_ehca_av.grh.word_1, &gid, sizeof(gid));
- }
-
- new_ehca_av.pmtu = shca->max_mtu;
-
- memcpy(&new_ehca_av.grh.word_3, &ah_attr->grh.dgid,
- sizeof(ah_attr->grh.dgid));
-
- av = container_of(ah, struct ehca_av, ib_ah);
- av->av = new_ehca_av;
-
- return 0;
-}
-
-int ehca_query_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr)
-{
- struct ehca_av *av = container_of(ah, struct ehca_av, ib_ah);
-
- memcpy(&ah_attr->grh.dgid, &av->av.grh.word_3,
- sizeof(ah_attr->grh.dgid));
- ah_attr->sl = av->av.sl;
-
- ah_attr->dlid = av->av.dlid;
-
- ah_attr->src_path_bits = av->av.slid_path_bits;
- ah_attr->static_rate = av->av.ipd;
- ah_attr->ah_flags = EHCA_BMASK_GET(GRH_FLAG_MASK, av->av.lnh);
- ah_attr->grh.traffic_class = EHCA_BMASK_GET(GRH_TCLASS_MASK,
- av->av.grh.word_0);
- ah_attr->grh.hop_limit = EHCA_BMASK_GET(GRH_HOPLIMIT_MASK,
- av->av.grh.word_0);
- ah_attr->grh.flow_label = EHCA_BMASK_GET(GRH_FLOWLABEL_MASK,
- av->av.grh.word_0);
-
- return 0;
-}
-
-int ehca_destroy_ah(struct ib_ah *ah)
-{
- kmem_cache_free(av_cache, container_of(ah, struct ehca_av, ib_ah));
-
- return 0;
-}
-
-int ehca_init_av_cache(void)
-{
- av_cache = kmem_cache_create("ehca_cache_av",
- sizeof(struct ehca_av), 0,
- SLAB_HWCACHE_ALIGN,
- NULL);
- if (!av_cache)
- return -ENOMEM;
- return 0;
-}
-
-void ehca_cleanup_av_cache(void)
-{
- if (av_cache)
- kmem_cache_destroy(av_cache);
-}
diff --git a/drivers/staging/rdma/ehca/ehca_classes.h b/drivers/staging/rdma/ehca/ehca_classes.h
deleted file mode 100644
index bd45e0f3923f..000000000000
--- a/drivers/staging/rdma/ehca/ehca_classes.h
+++ /dev/null
@@ -1,482 +0,0 @@
-/*
- * IBM eServer eHCA Infiniband device driver for Linux on POWER
- *
- * Struct definition for eHCA internal structures
- *
- * Authors: Heiko J Schick <schickhj@de.ibm.com>
- * Christoph Raisch <raisch@de.ibm.com>
- * Joachim Fenkes <fenkes@de.ibm.com>
- *
- * Copyright (c) 2005 IBM Corporation
- *
- * All rights reserved.
- *
- * This source code is distributed under a dual license of GPL v2.0 and OpenIB
- * BSD.
- *
- * OpenIB BSD License
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials
- * provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
- * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
- * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef __EHCA_CLASSES_H__
-#define __EHCA_CLASSES_H__
-
-struct ehca_module;
-struct ehca_qp;
-struct ehca_cq;
-struct ehca_eq;
-struct ehca_mr;
-struct ehca_mw;
-struct ehca_pd;
-struct ehca_av;
-
-#include <linux/wait.h>
-#include <linux/mutex.h>
-
-#include <rdma/ib_verbs.h>
-#include <rdma/ib_user_verbs.h>
-
-#ifdef CONFIG_PPC64
-#include "ehca_classes_pSeries.h"
-#endif
-#include "ipz_pt_fn.h"
-#include "ehca_qes.h"
-#include "ehca_irq.h"
-
-#define EHCA_EQE_CACHE_SIZE 20
-#define EHCA_MAX_NUM_QUEUES 0xffff
-
-struct ehca_eqe_cache_entry {
- struct ehca_eqe *eqe;
- struct ehca_cq *cq;
-};
-
-struct ehca_eq {
- u32 length;
- struct ipz_queue ipz_queue;
- struct ipz_eq_handle ipz_eq_handle;
- struct work_struct work;
- struct h_galpas galpas;
- int is_initialized;
- struct ehca_pfeq pf;
- spinlock_t spinlock;
- struct tasklet_struct interrupt_task;
- u32 ist;
- spinlock_t irq_spinlock;
- struct ehca_eqe_cache_entry eqe_cache[EHCA_EQE_CACHE_SIZE];
-};
-
-struct ehca_sma_attr {
- u16 lid, lmc, sm_sl, sm_lid;
- u16 pkey_tbl_len, pkeys[16];
-};
-
-struct ehca_sport {
- struct ib_cq *ibcq_aqp1;
- struct ib_qp *ibqp_sqp[2];
- /* lock to serialze modify_qp() calls for sqp in normal
- * and irq path (when event PORT_ACTIVE is received first time)
- */
- spinlock_t mod_sqp_lock;
- enum ib_port_state port_state;
- struct ehca_sma_attr saved_attr;
- u32 pma_qp_nr;
-};
-
-#define HCA_CAP_MR_PGSIZE_4K 0x80000000
-#define HCA_CAP_MR_PGSIZE_64K 0x40000000
-#define HCA_CAP_MR_PGSIZE_1M 0x20000000
-#define HCA_CAP_MR_PGSIZE_16M 0x10000000
-
-struct ehca_shca {
- struct ib_device ib_device;
- struct platform_device *ofdev;
- u8 num_ports;
- int hw_level;
- struct list_head shca_list;
- struct ipz_adapter_handle ipz_hca_handle;
- struct ehca_sport sport[2];
- struct ehca_eq eq;
- struct ehca_eq neq;
- struct ehca_mr *maxmr;
- struct ehca_pd *pd;
- struct h_galpas galpas;
- struct mutex modify_mutex;
- u64 hca_cap;
- /* MR pgsize: bit 0-3 means 4K, 64K, 1M, 16M respectively */
- u32 hca_cap_mr_pgsize;
- int max_mtu;
- int max_num_qps;
- int max_num_cqs;
- atomic_t num_cqs;
- atomic_t num_qps;
-};
-
-struct ehca_pd {
- struct ib_pd ib_pd;
- struct ipz_pd fw_pd;
- /* small queue mgmt */
- struct mutex lock;
- struct list_head free[2];
- struct list_head full[2];
-};
-
-enum ehca_ext_qp_type {
- EQPT_NORMAL = 0,
- EQPT_LLQP = 1,
- EQPT_SRQBASE = 2,
- EQPT_SRQ = 3,
-};
-
-/* struct to cache modify_qp()'s parms for GSI/SMI qp */
-struct ehca_mod_qp_parm {
- int mask;
- struct ib_qp_attr attr;
-};
-
-#define EHCA_MOD_QP_PARM_MAX 4
-
-#define QMAP_IDX_MASK 0xFFFFULL
-
-/* struct for tracking if cqes have been reported to the application */
-struct ehca_qmap_entry {
- u16 app_wr_id;
- u8 reported;
- u8 cqe_req;
-};
-
-struct ehca_queue_map {
- struct ehca_qmap_entry *map;
- unsigned int entries;
- unsigned int tail;
- unsigned int left_to_poll;
- unsigned int next_wqe_idx; /* Idx to first wqe to be flushed */
-};
-
-/* function to calculate the next index for the qmap */
-static inline unsigned int next_index(unsigned int cur_index, unsigned int limit)
-{
- unsigned int temp = cur_index + 1;
- return (temp == limit) ? 0 : temp;
-}
-
-struct ehca_qp {
- union {
- struct ib_qp ib_qp;
- struct ib_srq ib_srq;
- };
- u32 qp_type;
- enum ehca_ext_qp_type ext_type;
- enum ib_qp_state state;
- struct ipz_queue ipz_squeue;
- struct ehca_queue_map sq_map;
- struct ipz_queue ipz_rqueue;
- struct ehca_queue_map rq_map;
- struct h_galpas galpas;
- u32 qkey;
- u32 real_qp_num;
- u32 token;
- spinlock_t spinlock_s;
- spinlock_t spinlock_r;
- u32 sq_max_inline_data_size;
- struct ipz_qp_handle ipz_qp_handle;
- struct ehca_pfqp pf;
- struct ib_qp_init_attr init_attr;
- struct ehca_cq *send_cq;
- struct ehca_cq *recv_cq;
- unsigned int sqerr_purgeflag;
- struct hlist_node list_entries;
- /* array to cache modify_qp()'s parms for GSI/SMI qp */
- struct ehca_mod_qp_parm *mod_qp_parm;
- int mod_qp_parm_idx;
- /* mmap counter for resources mapped into user space */
- u32 mm_count_squeue;
- u32 mm_count_rqueue;
- u32 mm_count_galpa;
- /* unsolicited ack circumvention */
- int unsol_ack_circ;
- int mtu_shift;
- u32 message_count;
- u32 packet_count;
- atomic_t nr_events; /* events seen */
- wait_queue_head_t wait_completion;
- int mig_armed;
- struct list_head sq_err_node;
- struct list_head rq_err_node;
-};
-
-#define IS_SRQ(qp) (qp->ext_type == EQPT_SRQ)
-#define HAS_SQ(qp) (qp->ext_type != EQPT_SRQ)
-#define HAS_RQ(qp) (qp->ext_type != EQPT_SRQBASE)
-
-/* must be power of 2 */
-#define QP_HASHTAB_LEN 8
-
-struct ehca_cq {
- struct ib_cq ib_cq;
- struct ipz_queue ipz_queue;
- struct h_galpas galpas;
- spinlock_t spinlock;
- u32 cq_number;
- u32 token;
- u32 nr_of_entries;
- struct ipz_cq_handle ipz_cq_handle;
- struct ehca_pfcq pf;
- spinlock_t cb_lock;
- struct hlist_head qp_hashtab[QP_HASHTAB_LEN];
- struct list_head entry;
- u32 nr_callbacks; /* #events assigned to cpu by scaling code */
- atomic_t nr_events; /* #events seen */
- wait_queue_head_t wait_completion;
- spinlock_t task_lock;
- /* mmap counter for resources mapped into user space */
- u32 mm_count_queue;
- u32 mm_count_galpa;
- struct list_head sqp_err_list;
- struct list_head rqp_err_list;
-};
-
-enum ehca_mr_flag {
- EHCA_MR_FLAG_FMR = 0x80000000, /* FMR, created with ehca_alloc_fmr */
- EHCA_MR_FLAG_MAXMR = 0x40000000, /* max-MR */
-};
-
-struct ehca_mr {
- union {
- struct ib_mr ib_mr; /* must always be first in ehca_mr */
- struct ib_fmr ib_fmr; /* must always be first in ehca_mr */
- } ib;
- struct ib_umem *umem;
- spinlock_t mrlock;
-
- enum ehca_mr_flag flags;
- u32 num_kpages; /* number of kernel pages */
- u32 num_hwpages; /* number of hw pages to form MR */
- u64 hwpage_size; /* hw page size used for this MR */
- int acl; /* ACL (stored here for usage in reregister) */
- u64 *start; /* virtual start address (stored here for */
- /* usage in reregister) */
- u64 size; /* size (stored here for usage in reregister) */
- u32 fmr_page_size; /* page size for FMR */
- u32 fmr_max_pages; /* max pages for FMR */
- u32 fmr_max_maps; /* max outstanding maps for FMR */
- u32 fmr_map_cnt; /* map counter for FMR */
- /* fw specific data */
- struct ipz_mrmw_handle ipz_mr_handle; /* MR handle for h-calls */
- struct h_galpas galpas;
-};
-
-struct ehca_mw {
- struct ib_mw ib_mw; /* gen2 mw, must always be first in ehca_mw */
- spinlock_t mwlock;
-
- u8 never_bound; /* indication MW was never bound */
- struct ipz_mrmw_handle ipz_mw_handle; /* MW handle for h-calls */
- struct h_galpas galpas;
-};
-
-enum ehca_mr_pgi_type {
- EHCA_MR_PGI_PHYS = 1, /* type of ehca_reg_phys_mr,
- * ehca_rereg_phys_mr,
- * ehca_reg_internal_maxmr */
- EHCA_MR_PGI_USER = 2, /* type of ehca_reg_user_mr */
- EHCA_MR_PGI_FMR = 3 /* type of ehca_map_phys_fmr */
-};
-
-struct ehca_mr_pginfo {
- enum ehca_mr_pgi_type type;
- u64 num_kpages;
- u64 kpage_cnt;
- u64 hwpage_size; /* hw page size used for this MR */
- u64 num_hwpages; /* number of hw pages */
- u64 hwpage_cnt; /* counter for hw pages */
- u64 next_hwpage; /* next hw page in buffer/chunk/listelem */
-
- union {
- struct { /* type EHCA_MR_PGI_PHYS section */
- int num_phys_buf;
- struct ib_phys_buf *phys_buf_array;
- u64 next_buf;
- } phy;
- struct { /* type EHCA_MR_PGI_USER section */
- struct ib_umem *region;
- struct scatterlist *next_sg;
- u64 next_nmap;
- } usr;
- struct { /* type EHCA_MR_PGI_FMR section */
- u64 fmr_pgsize;
- u64 *page_list;
- u64 next_listelem;
- } fmr;
- } u;
-};
-
-/* output parameters for MR/FMR hipz calls */
-struct ehca_mr_hipzout_parms {
- struct ipz_mrmw_handle handle;
- u32 lkey;
- u32 rkey;
- u64 len;
- u64 vaddr;
- u32 acl;
-};
-
-/* output parameters for MW hipz calls */
-struct ehca_mw_hipzout_parms {
- struct ipz_mrmw_handle handle;
- u32 rkey;
-};
-
-struct ehca_av {
- struct ib_ah ib_ah;
- struct ehca_ud_av av;
-};
-
-struct ehca_ucontext {
- struct ib_ucontext ib_ucontext;
-};
-
-int ehca_init_pd_cache(void);
-void ehca_cleanup_pd_cache(void);
-int ehca_init_cq_cache(void);
-void ehca_cleanup_cq_cache(void);
-int ehca_init_qp_cache(void);
-void ehca_cleanup_qp_cache(void);
-int ehca_init_av_cache(void);
-void ehca_cleanup_av_cache(void);
-int ehca_init_mrmw_cache(void);
-void ehca_cleanup_mrmw_cache(void);
-int ehca_init_small_qp_cache(void);
-void ehca_cleanup_small_qp_cache(void);
-
-extern rwlock_t ehca_qp_idr_lock;
-extern rwlock_t ehca_cq_idr_lock;
-extern struct idr ehca_qp_idr;
-extern struct idr ehca_cq_idr;
-extern spinlock_t shca_list_lock;
-
-extern int ehca_static_rate;
-extern int ehca_port_act_time;
-extern bool ehca_use_hp_mr;
-extern bool ehca_scaling_code;
-extern int ehca_lock_hcalls;
-extern int ehca_nr_ports;
-extern int ehca_max_cq;
-extern int ehca_max_qp;
-
-struct ipzu_queue_resp {
- u32 qe_size; /* queue entry size */
- u32 act_nr_of_sg;
- u32 queue_length; /* queue length allocated in bytes */
- u32 pagesize;
- u32 toggle_state;
- u32 offset; /* save offset within a page for small_qp */
-};
-
-struct ehca_create_cq_resp {
- u32 cq_number;
- u32 token;
- struct ipzu_queue_resp ipz_queue;
- u32 fw_handle_ofs;
- u32 dummy;
-};
-
-struct ehca_create_qp_resp {
- u32 qp_num;
- u32 token;
- u32 qp_type;
- u32 ext_type;
- u32 qkey;
- /* qp_num assigned by ehca: sqp0/1 may have got different numbers */
- u32 real_qp_num;
- u32 fw_handle_ofs;
- u32 dummy;
- struct ipzu_queue_resp ipz_squeue;
- struct ipzu_queue_resp ipz_rqueue;
-};
-
-struct ehca_alloc_cq_parms {
- u32 nr_cqe;
- u32 act_nr_of_entries;
- u32 act_pages;
- struct ipz_eq_handle eq_handle;
-};
-
-enum ehca_service_type {
- ST_RC = 0,
- ST_UC = 1,
- ST_RD = 2,
- ST_UD = 3,
-};
-
-enum ehca_ll_comp_flags {
- LLQP_SEND_COMP = 0x20,
- LLQP_RECV_COMP = 0x40,
- LLQP_COMP_MASK = 0x60,
-};
-
-struct ehca_alloc_queue_parms {
- /* input parameters */
- int max_wr;
- int max_sge;
- int page_size;
- int is_small;
-
- /* output parameters */
- u16 act_nr_wqes;
- u8 act_nr_sges;
- u32 queue_size; /* bytes for small queues, pages otherwise */
-};
-
-struct ehca_alloc_qp_parms {
- struct ehca_alloc_queue_parms squeue;
- struct ehca_alloc_queue_parms rqueue;
-
- /* input parameters */
- enum ehca_service_type servicetype;
- int qp_storage;
- int sigtype;
- enum ehca_ext_qp_type ext_type;
- enum ehca_ll_comp_flags ll_comp_flags;
- int ud_av_l_key_ctl;
-
- u32 token;
- struct ipz_eq_handle eq_handle;
- struct ipz_pd pd;
- struct ipz_cq_handle send_cq_handle, recv_cq_handle;
-
- u32 srq_qpn, srq_token, srq_limit;
-
- /* output parameters */
- u32 real_qp_num;
- struct ipz_qp_handle qp_handle;
- struct h_galpas galpas;
-};
-
-int ehca_cq_assign_qp(struct ehca_cq *cq, struct ehca_qp *qp);
-int ehca_cq_unassign_qp(struct ehca_cq *cq, unsigned int qp_num);
-struct ehca_qp *ehca_cq_get_qp(struct ehca_cq *cq, int qp_num);
-
-#endif
diff --git a/drivers/staging/rdma/ehca/ehca_classes_pSeries.h b/drivers/staging/rdma/ehca/ehca_classes_pSeries.h
deleted file mode 100644
index 689c35786dd2..000000000000
--- a/drivers/staging/rdma/ehca/ehca_classes_pSeries.h
+++ /dev/null
@@ -1,208 +0,0 @@
-/*
- * IBM eServer eHCA Infiniband device driver for Linux on POWER
- *
- * pSeries interface definitions
- *
- * Authors: Waleri Fomin <fomin@de.ibm.com>
- * Christoph Raisch <raisch@de.ibm.com>
- *
- * Copyright (c) 2005 IBM Corporation
- *
- * All rights reserved.
- *
- * This source code is distributed under a dual license of GPL v2.0 and OpenIB
- * BSD.
- *
- * OpenIB BSD License
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials
- * provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
- * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
- * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef __EHCA_CLASSES_PSERIES_H__
-#define __EHCA_CLASSES_PSERIES_H__
-
-#include "hcp_phyp.h"
-#include "ipz_pt_fn.h"
-
-
-struct ehca_pfqp {
- struct ipz_qpt sqpt;
- struct ipz_qpt rqpt;
-};
-
-struct ehca_pfcq {
- struct ipz_qpt qpt;
- u32 cqnr;
-};
-
-struct ehca_pfeq {
- struct ipz_qpt qpt;
- struct h_galpa galpa;
- u32 eqnr;
-};
-
-struct ipz_adapter_handle {
- u64 handle;
-};
-
-struct ipz_cq_handle {
- u64 handle;
-};
-
-struct ipz_eq_handle {
- u64 handle;
-};
-
-struct ipz_qp_handle {
- u64 handle;
-};
-struct ipz_mrmw_handle {
- u64 handle;
-};
-
-struct ipz_pd {
- u32 value;
-};
-
-struct hcp_modify_qp_control_block {
- u32 qkey; /* 00 */
- u32 rdd; /* reliable datagram domain */
- u32 send_psn; /* 02 */
- u32 receive_psn; /* 03 */
- u32 prim_phys_port; /* 04 */
- u32 alt_phys_port; /* 05 */
- u32 prim_p_key_idx; /* 06 */
- u32 alt_p_key_idx; /* 07 */
- u32 rdma_atomic_ctrl; /* 08 */
- u32 qp_state; /* 09 */
- u32 reserved_10; /* 10 */
- u32 rdma_nr_atomic_resp_res; /* 11 */
- u32 path_migration_state; /* 12 */
- u32 rdma_atomic_outst_dest_qp; /* 13 */
- u32 dest_qp_nr; /* 14 */
- u32 min_rnr_nak_timer_field; /* 15 */
- u32 service_level; /* 16 */
- u32 send_grh_flag; /* 17 */
- u32 retry_count; /* 18 */
- u32 timeout; /* 19 */
- u32 path_mtu; /* 20 */
- u32 max_static_rate; /* 21 */
- u32 dlid; /* 22 */
- u32 rnr_retry_count; /* 23 */
- u32 source_path_bits; /* 24 */
- u32 traffic_class; /* 25 */
- u32 hop_limit; /* 26 */
- u32 source_gid_idx; /* 27 */
- u32 flow_label; /* 28 */
- u32 reserved_29; /* 29 */
- union { /* 30 */
- u64 dw[2];
- u8 byte[16];
- } dest_gid;
- u32 service_level_al; /* 34 */
- u32 send_grh_flag_al; /* 35 */
- u32 retry_count_al; /* 36 */
- u32 timeout_al; /* 37 */
- u32 max_static_rate_al; /* 38 */
- u32 dlid_al; /* 39 */
- u32 rnr_retry_count_al; /* 40 */
- u32 source_path_bits_al; /* 41 */
- u32 traffic_class_al; /* 42 */
- u32 hop_limit_al; /* 43 */
- u32 source_gid_idx_al; /* 44 */
- u32 flow_label_al; /* 45 */
- u32 reserved_46; /* 46 */
- u32 reserved_47; /* 47 */
- union { /* 48 */
- u64 dw[2];
- u8 byte[16];
- } dest_gid_al;
- u32 max_nr_outst_send_wr; /* 52 */
- u32 max_nr_outst_recv_wr; /* 53 */
- u32 disable_ete_credit_check; /* 54 */
- u32 qp_number; /* 55 */
- u64 send_queue_handle; /* 56 */
- u64 recv_queue_handle; /* 58 */
- u32 actual_nr_sges_in_sq_wqe; /* 60 */
- u32 actual_nr_sges_in_rq_wqe; /* 61 */
- u32 qp_enable; /* 62 */
- u32 curr_srq_limit; /* 63 */
- u64 qp_aff_asyn_ev_log_reg; /* 64 */
- u64 shared_rq_hndl; /* 66 */
- u64 trigg_doorbell_qp_hndl; /* 68 */
- u32 reserved_70_127[58]; /* 70 */
-};
-
-#define MQPCB_MASK_QKEY EHCA_BMASK_IBM( 0, 0)
-#define MQPCB_MASK_SEND_PSN EHCA_BMASK_IBM( 2, 2)
-#define MQPCB_MASK_RECEIVE_PSN EHCA_BMASK_IBM( 3, 3)
-#define MQPCB_MASK_PRIM_PHYS_PORT EHCA_BMASK_IBM( 4, 4)
-#define MQPCB_PRIM_PHYS_PORT EHCA_BMASK_IBM(24, 31)
-#define MQPCB_MASK_ALT_PHYS_PORT EHCA_BMASK_IBM( 5, 5)
-#define MQPCB_MASK_PRIM_P_KEY_IDX EHCA_BMASK_IBM( 6, 6)
-#define MQPCB_PRIM_P_KEY_IDX EHCA_BMASK_IBM(24, 31)
-#define MQPCB_MASK_ALT_P_KEY_IDX EHCA_BMASK_IBM( 7, 7)
-#define MQPCB_MASK_RDMA_ATOMIC_CTRL EHCA_BMASK_IBM( 8, 8)
-#define MQPCB_MASK_QP_STATE EHCA_BMASK_IBM( 9, 9)
-#define MQPCB_MASK_RDMA_NR_ATOMIC_RESP_RES EHCA_BMASK_IBM(11, 11)
-#define MQPCB_MASK_PATH_MIGRATION_STATE EHCA_BMASK_IBM(12, 12)
-#define MQPCB_MASK_RDMA_ATOMIC_OUTST_DEST_QP EHCA_BMASK_IBM(13, 13)
-#define MQPCB_MASK_DEST_QP_NR EHCA_BMASK_IBM(14, 14)
-#define MQPCB_MASK_MIN_RNR_NAK_TIMER_FIELD EHCA_BMASK_IBM(15, 15)
-#define MQPCB_MASK_SERVICE_LEVEL EHCA_BMASK_IBM(16, 16)
-#define MQPCB_MASK_SEND_GRH_FLAG EHCA_BMASK_IBM(17, 17)
-#define MQPCB_MASK_RETRY_COUNT EHCA_BMASK_IBM(18, 18)
-#define MQPCB_MASK_TIMEOUT EHCA_BMASK_IBM(19, 19)
-#define MQPCB_MASK_PATH_MTU EHCA_BMASK_IBM(20, 20)
-#define MQPCB_MASK_MAX_STATIC_RATE EHCA_BMASK_IBM(21, 21)
-#define MQPCB_MASK_DLID EHCA_BMASK_IBM(22, 22)
-#define MQPCB_MASK_RNR_RETRY_COUNT EHCA_BMASK_IBM(23, 23)
-#define MQPCB_MASK_SOURCE_PATH_BITS EHCA_BMASK_IBM(24, 24)
-#define MQPCB_MASK_TRAFFIC_CLASS EHCA_BMASK_IBM(25, 25)
-#define MQPCB_MASK_HOP_LIMIT EHCA_BMASK_IBM(26, 26)
-#define MQPCB_MASK_SOURCE_GID_IDX EHCA_BMASK_IBM(27, 27)
-#define MQPCB_MASK_FLOW_LABEL EHCA_BMASK_IBM(28, 28)
-#define MQPCB_MASK_DEST_GID EHCA_BMASK_IBM(30, 30)
-#define MQPCB_MASK_SERVICE_LEVEL_AL EHCA_BMASK_IBM(31, 31)
-#define MQPCB_MASK_SEND_GRH_FLAG_AL EHCA_BMASK_IBM(32, 32)
-#define MQPCB_MASK_RETRY_COUNT_AL EHCA_BMASK_IBM(33, 33)
-#define MQPCB_MASK_TIMEOUT_AL EHCA_BMASK_IBM(34, 34)
-#define MQPCB_MASK_MAX_STATIC_RATE_AL EHCA_BMASK_IBM(35, 35)
-#define MQPCB_MASK_DLID_AL EHCA_BMASK_IBM(36, 36)
-#define MQPCB_MASK_RNR_RETRY_COUNT_AL EHCA_BMASK_IBM(37, 37)
-#define MQPCB_MASK_SOURCE_PATH_BITS_AL EHCA_BMASK_IBM(38, 38)
-#define MQPCB_MASK_TRAFFIC_CLASS_AL EHCA_BMASK_IBM(39, 39)
-#define MQPCB_MASK_HOP_LIMIT_AL EHCA_BMASK_IBM(40, 40)
-#define MQPCB_MASK_SOURCE_GID_IDX_AL EHCA_BMASK_IBM(41, 41)
-#define MQPCB_MASK_FLOW_LABEL_AL EHCA_BMASK_IBM(42, 42)
-#define MQPCB_MASK_DEST_GID_AL EHCA_BMASK_IBM(44, 44)
-#define MQPCB_MASK_MAX_NR_OUTST_SEND_WR EHCA_BMASK_IBM(45, 45)
-#define MQPCB_MASK_MAX_NR_OUTST_RECV_WR EHCA_BMASK_IBM(46, 46)
-#define MQPCB_MASK_DISABLE_ETE_CREDIT_CHECK EHCA_BMASK_IBM(47, 47)
-#define MQPCB_MASK_QP_ENABLE EHCA_BMASK_IBM(48, 48)
-#define MQPCB_MASK_CURR_SRQ_LIMIT EHCA_BMASK_IBM(49, 49)
-#define MQPCB_MASK_QP_AFF_ASYN_EV_LOG_REG EHCA_BMASK_IBM(50, 50)
-#define MQPCB_MASK_SHARED_RQ_HNDL EHCA_BMASK_IBM(51, 51)
-
-#endif /* __EHCA_CLASSES_PSERIES_H__ */
diff --git a/drivers/staging/rdma/ehca/ehca_cq.c b/drivers/staging/rdma/ehca/ehca_cq.c
deleted file mode 100644
index 9b68b175069b..000000000000
--- a/drivers/staging/rdma/ehca/ehca_cq.c
+++ /dev/null
@@ -1,397 +0,0 @@
-/*
- * IBM eServer eHCA Infiniband device driver for Linux on POWER
- *
- * Completion queue handling
- *
- * Authors: Waleri Fomin <fomin@de.ibm.com>
- * Khadija Souissi <souissi@de.ibm.com>
- * Reinhard Ernst <rernst@de.ibm.com>
- * Heiko J Schick <schickhj@de.ibm.com>
- * Hoang-Nam Nguyen <hnguyen@de.ibm.com>
- *
- *
- * Copyright (c) 2005 IBM Corporation
- *
- * All rights reserved.
- *
- * This source code is distributed under a dual license of GPL v2.0 and OpenIB
- * BSD.
- *
- * OpenIB BSD License
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials
- * provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
- * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
- * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <linux/slab.h>
-
-#include "ehca_iverbs.h"
-#include "ehca_classes.h"
-#include "ehca_irq.h"
-#include "hcp_if.h"
-
-static struct kmem_cache *cq_cache;
-
-int ehca_cq_assign_qp(struct ehca_cq *cq, struct ehca_qp *qp)
-{
- unsigned int qp_num = qp->real_qp_num;
- unsigned int key = qp_num & (QP_HASHTAB_LEN-1);
- unsigned long flags;
-
- spin_lock_irqsave(&cq->spinlock, flags);
- hlist_add_head(&qp->list_entries, &cq->qp_hashtab[key]);
- spin_unlock_irqrestore(&cq->spinlock, flags);
-
- ehca_dbg(cq->ib_cq.device, "cq_num=%x real_qp_num=%x",
- cq->cq_number, qp_num);
-
- return 0;
-}
-
-int ehca_cq_unassign_qp(struct ehca_cq *cq, unsigned int real_qp_num)
-{
- int ret = -EINVAL;
- unsigned int key = real_qp_num & (QP_HASHTAB_LEN-1);
- struct hlist_node *iter;
- struct ehca_qp *qp;
- unsigned long flags;
-
- spin_lock_irqsave(&cq->spinlock, flags);
- hlist_for_each(iter, &cq->qp_hashtab[key]) {
- qp = hlist_entry(iter, struct ehca_qp, list_entries);
- if (qp->real_qp_num == real_qp_num) {
- hlist_del(iter);
- ehca_dbg(cq->ib_cq.device,
- "removed qp from cq .cq_num=%x real_qp_num=%x",
- cq->cq_number, real_qp_num);
- ret = 0;
- break;
- }
- }
- spin_unlock_irqrestore(&cq->spinlock, flags);
- if (ret)
- ehca_err(cq->ib_cq.device,
- "qp not found cq_num=%x real_qp_num=%x",
- cq->cq_number, real_qp_num);
-
- return ret;
-}
-
-struct ehca_qp *ehca_cq_get_qp(struct ehca_cq *cq, int real_qp_num)
-{
- struct ehca_qp *ret = NULL;
- unsigned int key = real_qp_num & (QP_HASHTAB_LEN-1);
- struct hlist_node *iter;
- struct ehca_qp *qp;
- hlist_for_each(iter, &cq->qp_hashtab[key]) {
- qp = hlist_entry(iter, struct ehca_qp, list_entries);
- if (qp->real_qp_num == real_qp_num) {
- ret = qp;
- break;
- }
- }
- return ret;
-}
-
-struct ib_cq *ehca_create_cq(struct ib_device *device,
- const struct ib_cq_init_attr *attr,
- struct ib_ucontext *context,
- struct ib_udata *udata)
-{
- int cqe = attr->cqe;
- static const u32 additional_cqe = 20;
- struct ib_cq *cq;
- struct ehca_cq *my_cq;
- struct ehca_shca *shca =
- container_of(device, struct ehca_shca, ib_device);
- struct ipz_adapter_handle adapter_handle;
- struct ehca_alloc_cq_parms param; /* h_call's out parameters */
- struct h_galpa gal;
- void *vpage;
- u32 counter;
- u64 rpage, cqx_fec, h_ret;
- int ipz_rc, i;
- unsigned long flags;
-
- if (attr->flags)
- return ERR_PTR(-EINVAL);
-
- if (cqe >= 0xFFFFFFFF - 64 - additional_cqe)
- return ERR_PTR(-EINVAL);
-
- if (!atomic_add_unless(&shca->num_cqs, 1, shca->max_num_cqs)) {
- ehca_err(device, "Unable to create CQ, max number of %i "
- "CQs reached.", shca->max_num_cqs);
- ehca_err(device, "To increase the maximum number of CQs "
- "use the number_of_cqs module parameter.\n");
- return ERR_PTR(-ENOSPC);
- }
-
- my_cq = kmem_cache_zalloc(cq_cache, GFP_KERNEL);
- if (!my_cq) {
- ehca_err(device, "Out of memory for ehca_cq struct device=%p",
- device);
- atomic_dec(&shca->num_cqs);
- return ERR_PTR(-ENOMEM);
- }
-
- memset(&param, 0, sizeof(struct ehca_alloc_cq_parms));
-
- spin_lock_init(&my_cq->spinlock);
- spin_lock_init(&my_cq->cb_lock);
- spin_lock_init(&my_cq->task_lock);
- atomic_set(&my_cq->nr_events, 0);
- init_waitqueue_head(&my_cq->wait_completion);
-
- cq = &my_cq->ib_cq;
-
- adapter_handle = shca->ipz_hca_handle;
- param.eq_handle = shca->eq.ipz_eq_handle;
-
- idr_preload(GFP_KERNEL);
- write_lock_irqsave(&ehca_cq_idr_lock, flags);
- my_cq->token = idr_alloc(&ehca_cq_idr, my_cq, 0, 0x2000000, GFP_NOWAIT);
- write_unlock_irqrestore(&ehca_cq_idr_lock, flags);
- idr_preload_end();
-
- if (my_cq->token < 0) {
- cq = ERR_PTR(-ENOMEM);
- ehca_err(device, "Can't allocate new idr entry. device=%p",
- device);
- goto create_cq_exit1;
- }
-
- /*
- * CQs maximum depth is 4GB-64, but we need additional 20 as buffer
- * for receiving errors CQEs.
- */
- param.nr_cqe = cqe + additional_cqe;
- h_ret = hipz_h_alloc_resource_cq(adapter_handle, my_cq, &param);
-
- if (h_ret != H_SUCCESS) {
- ehca_err(device, "hipz_h_alloc_resource_cq() failed "
- "h_ret=%lli device=%p", h_ret, device);
- cq = ERR_PTR(ehca2ib_return_code(h_ret));
- goto create_cq_exit2;
- }
-
- ipz_rc = ipz_queue_ctor(NULL, &my_cq->ipz_queue, param.act_pages,
- EHCA_PAGESIZE, sizeof(struct ehca_cqe), 0, 0);
- if (!ipz_rc) {
- ehca_err(device, "ipz_queue_ctor() failed ipz_rc=%i device=%p",
- ipz_rc, device);
- cq = ERR_PTR(-EINVAL);
- goto create_cq_exit3;
- }
-
- for (counter = 0; counter < param.act_pages; counter++) {
- vpage = ipz_qpageit_get_inc(&my_cq->ipz_queue);
- if (!vpage) {
- ehca_err(device, "ipz_qpageit_get_inc() "
- "returns NULL device=%p", device);
- cq = ERR_PTR(-EAGAIN);
- goto create_cq_exit4;
- }
- rpage = __pa(vpage);
-
- h_ret = hipz_h_register_rpage_cq(adapter_handle,
- my_cq->ipz_cq_handle,
- &my_cq->pf,
- 0,
- 0,
- rpage,
- 1,
- my_cq->galpas.
- kernel);
-
- if (h_ret < H_SUCCESS) {
- ehca_err(device, "hipz_h_register_rpage_cq() failed "
- "ehca_cq=%p cq_num=%x h_ret=%lli counter=%i "
- "act_pages=%i", my_cq, my_cq->cq_number,
- h_ret, counter, param.act_pages);
- cq = ERR_PTR(-EINVAL);
- goto create_cq_exit4;
- }
-
- if (counter == (param.act_pages - 1)) {
- vpage = ipz_qpageit_get_inc(&my_cq->ipz_queue);
- if ((h_ret != H_SUCCESS) || vpage) {
- ehca_err(device, "Registration of pages not "
- "complete ehca_cq=%p cq_num=%x "
- "h_ret=%lli", my_cq, my_cq->cq_number,
- h_ret);
- cq = ERR_PTR(-EAGAIN);
- goto create_cq_exit4;
- }
- } else {
- if (h_ret != H_PAGE_REGISTERED) {
- ehca_err(device, "Registration of page failed "
- "ehca_cq=%p cq_num=%x h_ret=%lli "
- "counter=%i act_pages=%i",
- my_cq, my_cq->cq_number,
- h_ret, counter, param.act_pages);
- cq = ERR_PTR(-ENOMEM);
- goto create_cq_exit4;
- }
- }
- }
-
- ipz_qeit_reset(&my_cq->ipz_queue);
-
- gal = my_cq->galpas.kernel;
- cqx_fec = hipz_galpa_load(gal, CQTEMM_OFFSET(cqx_fec));
- ehca_dbg(device, "ehca_cq=%p cq_num=%x CQX_FEC=%llx",
- my_cq, my_cq->cq_number, cqx_fec);
-
- my_cq->ib_cq.cqe = my_cq->nr_of_entries =
- param.act_nr_of_entries - additional_cqe;
- my_cq->cq_number = (my_cq->ipz_cq_handle.handle) & 0xffff;
-
- for (i = 0; i < QP_HASHTAB_LEN; i++)
- INIT_HLIST_HEAD(&my_cq->qp_hashtab[i]);
-
- INIT_LIST_HEAD(&my_cq->sqp_err_list);
- INIT_LIST_HEAD(&my_cq->rqp_err_list);
-
- if (context) {
- struct ipz_queue *ipz_queue = &my_cq->ipz_queue;
- struct ehca_create_cq_resp resp;
- memset(&resp, 0, sizeof(resp));
- resp.cq_number = my_cq->cq_number;
- resp.token = my_cq->token;
- resp.ipz_queue.qe_size = ipz_queue->qe_size;
- resp.ipz_queue.act_nr_of_sg = ipz_queue->act_nr_of_sg;
- resp.ipz_queue.queue_length = ipz_queue->queue_length;
- resp.ipz_queue.pagesize = ipz_queue->pagesize;
- resp.ipz_queue.toggle_state = ipz_queue->toggle_state;
- resp.fw_handle_ofs = (u32)
- (my_cq->galpas.user.fw_handle & (PAGE_SIZE - 1));
- if (ib_copy_to_udata(udata, &resp, sizeof(resp))) {
- ehca_err(device, "Copy to udata failed.");
- cq = ERR_PTR(-EFAULT);
- goto create_cq_exit4;
- }
- }
-
- return cq;
-
-create_cq_exit4:
- ipz_queue_dtor(NULL, &my_cq->ipz_queue);
-
-create_cq_exit3:
- h_ret = hipz_h_destroy_cq(adapter_handle, my_cq, 1);
- if (h_ret != H_SUCCESS)
- ehca_err(device, "hipz_h_destroy_cq() failed ehca_cq=%p "
- "cq_num=%x h_ret=%lli", my_cq, my_cq->cq_number, h_ret);
-
-create_cq_exit2:
- write_lock_irqsave(&ehca_cq_idr_lock, flags);
- idr_remove(&ehca_cq_idr, my_cq->token);
- write_unlock_irqrestore(&ehca_cq_idr_lock, flags);
-
-create_cq_exit1:
- kmem_cache_free(cq_cache, my_cq);
-
- atomic_dec(&shca->num_cqs);
- return cq;
-}
-
-int ehca_destroy_cq(struct ib_cq *cq)
-{
- u64 h_ret;
- struct ehca_cq *my_cq = container_of(cq, struct ehca_cq, ib_cq);
- int cq_num = my_cq->cq_number;
- struct ib_device *device = cq->device;
- struct ehca_shca *shca = container_of(device, struct ehca_shca,
- ib_device);
- struct ipz_adapter_handle adapter_handle = shca->ipz_hca_handle;
- unsigned long flags;
-
- if (cq->uobject) {
- if (my_cq->mm_count_galpa || my_cq->mm_count_queue) {
- ehca_err(device, "Resources still referenced in "
- "user space cq_num=%x", my_cq->cq_number);
- return -EINVAL;
- }
- }
-
- /*
- * remove the CQ from the idr first to make sure
- * no more interrupt tasklets will touch this CQ
- */
- write_lock_irqsave(&ehca_cq_idr_lock, flags);
- idr_remove(&ehca_cq_idr, my_cq->token);
- write_unlock_irqrestore(&ehca_cq_idr_lock, flags);
-
- /* now wait until all pending events have completed */
- wait_event(my_cq->wait_completion, !atomic_read(&my_cq->nr_events));
-
- /* nobody's using our CQ any longer -- we can destroy it */
- h_ret = hipz_h_destroy_cq(adapter_handle, my_cq, 0);
- if (h_ret == H_R_STATE) {
- /* cq in err: read err data and destroy it forcibly */
- ehca_dbg(device, "ehca_cq=%p cq_num=%x resource=%llx in err "
- "state. Try to delete it forcibly.",
- my_cq, cq_num, my_cq->ipz_cq_handle.handle);
- ehca_error_data(shca, my_cq, my_cq->ipz_cq_handle.handle);
- h_ret = hipz_h_destroy_cq(adapter_handle, my_cq, 1);
- if (h_ret == H_SUCCESS)
- ehca_dbg(device, "cq_num=%x deleted successfully.",
- cq_num);
- }
- if (h_ret != H_SUCCESS) {
- ehca_err(device, "hipz_h_destroy_cq() failed h_ret=%lli "
- "ehca_cq=%p cq_num=%x", h_ret, my_cq, cq_num);
- return ehca2ib_return_code(h_ret);
- }
- ipz_queue_dtor(NULL, &my_cq->ipz_queue);
- kmem_cache_free(cq_cache, my_cq);
-
- atomic_dec(&shca->num_cqs);
- return 0;
-}
-
-int ehca_resize_cq(struct ib_cq *cq, int cqe, struct ib_udata *udata)
-{
- /* TODO: proper resize needs to be done */
- ehca_err(cq->device, "not implemented yet");
-
- return -EFAULT;
-}
-
-int ehca_init_cq_cache(void)
-{
- cq_cache = kmem_cache_create("ehca_cache_cq",
- sizeof(struct ehca_cq), 0,
- SLAB_HWCACHE_ALIGN,
- NULL);
- if (!cq_cache)
- return -ENOMEM;
- return 0;
-}
-
-void ehca_cleanup_cq_cache(void)
-{
- if (cq_cache)
- kmem_cache_destroy(cq_cache);
-}
diff --git a/drivers/staging/rdma/ehca/ehca_eq.c b/drivers/staging/rdma/ehca/ehca_eq.c
deleted file mode 100644
index 90da6747d395..000000000000
--- a/drivers/staging/rdma/ehca/ehca_eq.c
+++ /dev/null
@@ -1,189 +0,0 @@
-/*
- * IBM eServer eHCA Infiniband device driver for Linux on POWER
- *
- * Event queue handling
- *
- * Authors: Waleri Fomin <fomin@de.ibm.com>
- * Khadija Souissi <souissi@de.ibm.com>
- * Reinhard Ernst <rernst@de.ibm.com>
- * Heiko J Schick <schickhj@de.ibm.com>
- * Hoang-Nam Nguyen <hnguyen@de.ibm.com>
- *
- *
- * Copyright (c) 2005 IBM Corporation
- *
- * All rights reserved.
- *
- * This source code is distributed under a dual license of GPL v2.0 and OpenIB
- * BSD.
- *
- * OpenIB BSD License
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials
- * provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
- * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
- * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "ehca_classes.h"
-#include "ehca_irq.h"
-#include "ehca_iverbs.h"
-#include "ehca_qes.h"
-#include "hcp_if.h"
-#include "ipz_pt_fn.h"
-
-int ehca_create_eq(struct ehca_shca *shca,
- struct ehca_eq *eq,
- const enum ehca_eq_type type, const u32 length)
-{
- int ret;
- u64 h_ret;
- u32 nr_pages;
- u32 i;
- void *vpage;
- struct ib_device *ib_dev = &shca->ib_device;
-
- spin_lock_init(&eq->spinlock);
- spin_lock_init(&eq->irq_spinlock);
- eq->is_initialized = 0;
-
- if (type != EHCA_EQ && type != EHCA_NEQ) {
- ehca_err(ib_dev, "Invalid EQ type %x. eq=%p", type, eq);
- return -EINVAL;
- }
- if (!length) {
- ehca_err(ib_dev, "EQ length must not be zero. eq=%p", eq);
- return -EINVAL;
- }
-
- h_ret = hipz_h_alloc_resource_eq(shca->ipz_hca_handle,
- &eq->pf,
- type,
- length,
- &eq->ipz_eq_handle,
- &eq->length,
- &nr_pages, &eq->ist);
-
- if (h_ret != H_SUCCESS) {
- ehca_err(ib_dev, "Can't allocate EQ/NEQ. eq=%p", eq);
- return -EINVAL;
- }
-
- ret = ipz_queue_ctor(NULL, &eq->ipz_queue, nr_pages,
- EHCA_PAGESIZE, sizeof(struct ehca_eqe), 0, 0);
- if (!ret) {
- ehca_err(ib_dev, "Can't allocate EQ pages eq=%p", eq);
- goto create_eq_exit1;
- }
-
- for (i = 0; i < nr_pages; i++) {
- u64 rpage;
-
- vpage = ipz_qpageit_get_inc(&eq->ipz_queue);
- if (!vpage)
- goto create_eq_exit2;
-
- rpage = __pa(vpage);
- h_ret = hipz_h_register_rpage_eq(shca->ipz_hca_handle,
- eq->ipz_eq_handle,
- &eq->pf,
- 0, 0, rpage, 1);
-
- if (i == (nr_pages - 1)) {
- /* last page */
- vpage = ipz_qpageit_get_inc(&eq->ipz_queue);
- if (h_ret != H_SUCCESS || vpage)
- goto create_eq_exit2;
- } else {
- if (h_ret != H_PAGE_REGISTERED)
- goto create_eq_exit2;
- }
- }
-
- ipz_qeit_reset(&eq->ipz_queue);
-
- /* register interrupt handlers and initialize work queues */
- if (type == EHCA_EQ) {
- tasklet_init(&eq->interrupt_task, ehca_tasklet_eq, (long)shca);
-
- ret = ibmebus_request_irq(eq->ist, ehca_interrupt_eq,
- 0, "ehca_eq",
- (void *)shca);
- if (ret < 0)
- ehca_err(ib_dev, "Can't map interrupt handler.");
- } else if (type == EHCA_NEQ) {
- tasklet_init(&eq->interrupt_task, ehca_tasklet_neq, (long)shca);
-
- ret = ibmebus_request_irq(eq->ist, ehca_interrupt_neq,
- 0, "ehca_neq",
- (void *)shca);
- if (ret < 0)
- ehca_err(ib_dev, "Can't map interrupt handler.");
- }
-
- eq->is_initialized = 1;
-
- return 0;
-
-create_eq_exit2:
- ipz_queue_dtor(NULL, &eq->ipz_queue);
-
-create_eq_exit1:
- hipz_h_destroy_eq(shca->ipz_hca_handle, eq);
-
- return -EINVAL;
-}
-
-void *ehca_poll_eq(struct ehca_shca *shca, struct ehca_eq *eq)
-{
- unsigned long flags;
- void *eqe;
-
- spin_lock_irqsave(&eq->spinlock, flags);
- eqe = ipz_eqit_eq_get_inc_valid(&eq->ipz_queue);
- spin_unlock_irqrestore(&eq->spinlock, flags);
-
- return eqe;
-}
-
-int ehca_destroy_eq(struct ehca_shca *shca, struct ehca_eq *eq)
-{
- unsigned long flags;
- u64 h_ret;
-
- ibmebus_free_irq(eq->ist, (void *)shca);
-
- spin_lock_irqsave(&shca_list_lock, flags);
- eq->is_initialized = 0;
- spin_unlock_irqrestore(&shca_list_lock, flags);
-
- tasklet_kill(&eq->interrupt_task);
-
- h_ret = hipz_h_destroy_eq(shca->ipz_hca_handle, eq);
-
- if (h_ret != H_SUCCESS) {
- ehca_err(&shca->ib_device, "Can't free EQ resources.");
- return -EINVAL;
- }
- ipz_queue_dtor(NULL, &eq->ipz_queue);
-
- return 0;
-}
diff --git a/drivers/staging/rdma/ehca/ehca_hca.c b/drivers/staging/rdma/ehca/ehca_hca.c
deleted file mode 100644
index e8b1bb65797a..000000000000
--- a/drivers/staging/rdma/ehca/ehca_hca.c
+++ /dev/null
@@ -1,414 +0,0 @@
-/*
- * IBM eServer eHCA Infiniband device driver for Linux on POWER
- *
- * HCA query functions
- *
- * Authors: Heiko J Schick <schickhj@de.ibm.com>
- * Christoph Raisch <raisch@de.ibm.com>
- *
- * Copyright (c) 2005 IBM Corporation
- *
- * All rights reserved.
- *
- * This source code is distributed under a dual license of GPL v2.0 and OpenIB
- * BSD.
- *
- * OpenIB BSD License
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials
- * provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
- * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
- * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <linux/gfp.h>
-
-#include "ehca_tools.h"
-#include "ehca_iverbs.h"
-#include "hcp_if.h"
-
-static unsigned int limit_uint(unsigned int value)
-{
- return min_t(unsigned int, value, INT_MAX);
-}
-
-int ehca_query_device(struct ib_device *ibdev, struct ib_device_attr *props,
- struct ib_udata *uhw)
-{
- int i, ret = 0;
- struct ehca_shca *shca = container_of(ibdev, struct ehca_shca,
- ib_device);
- struct hipz_query_hca *rblock;
-
- static const u32 cap_mapping[] = {
- IB_DEVICE_RESIZE_MAX_WR, HCA_CAP_WQE_RESIZE,
- IB_DEVICE_BAD_PKEY_CNTR, HCA_CAP_BAD_P_KEY_CTR,
- IB_DEVICE_BAD_QKEY_CNTR, HCA_CAP_Q_KEY_VIOL_CTR,
- IB_DEVICE_RAW_MULTI, HCA_CAP_RAW_PACKET_MCAST,
- IB_DEVICE_AUTO_PATH_MIG, HCA_CAP_AUTO_PATH_MIG,
- IB_DEVICE_CHANGE_PHY_PORT, HCA_CAP_SQD_RTS_PORT_CHANGE,
- IB_DEVICE_UD_AV_PORT_ENFORCE, HCA_CAP_AH_PORT_NR_CHECK,
- IB_DEVICE_CURR_QP_STATE_MOD, HCA_CAP_CUR_QP_STATE_MOD,
- IB_DEVICE_SHUTDOWN_PORT, HCA_CAP_SHUTDOWN_PORT,
- IB_DEVICE_INIT_TYPE, HCA_CAP_INIT_TYPE,
- IB_DEVICE_PORT_ACTIVE_EVENT, HCA_CAP_PORT_ACTIVE_EVENT,
- };
-
- if (uhw->inlen || uhw->outlen)
- return -EINVAL;
-
- rblock = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
- if (!rblock) {
- ehca_err(&shca->ib_device, "Can't allocate rblock memory.");
- return -ENOMEM;
- }
-
- if (hipz_h_query_hca(shca->ipz_hca_handle, rblock) != H_SUCCESS) {
- ehca_err(&shca->ib_device, "Can't query device properties");
- ret = -EINVAL;
- goto query_device1;
- }
-
- memset(props, 0, sizeof(struct ib_device_attr));
- props->page_size_cap = shca->hca_cap_mr_pgsize;
- props->fw_ver = rblock->hw_ver;
- props->max_mr_size = rblock->max_mr_size;
- props->vendor_id = rblock->vendor_id >> 8;
- props->vendor_part_id = rblock->vendor_part_id >> 16;
- props->hw_ver = rblock->hw_ver;
- props->max_qp = limit_uint(rblock->max_qp);
- props->max_qp_wr = limit_uint(rblock->max_wqes_wq);
- props->max_sge = limit_uint(rblock->max_sge);
- props->max_sge_rd = limit_uint(rblock->max_sge_rd);
- props->max_cq = limit_uint(rblock->max_cq);
- props->max_cqe = limit_uint(rblock->max_cqe);
- props->max_mr = limit_uint(rblock->max_mr);
- props->max_mw = limit_uint(rblock->max_mw);
- props->max_pd = limit_uint(rblock->max_pd);
- props->max_ah = limit_uint(rblock->max_ah);
- props->max_ee = limit_uint(rblock->max_rd_ee_context);
- props->max_rdd = limit_uint(rblock->max_rd_domain);
- props->max_fmr = limit_uint(rblock->max_mr);
- props->max_qp_rd_atom = limit_uint(rblock->max_rr_qp);
- props->max_ee_rd_atom = limit_uint(rblock->max_rr_ee_context);
- props->max_res_rd_atom = limit_uint(rblock->max_rr_hca);
- props->max_qp_init_rd_atom = limit_uint(rblock->max_act_wqs_qp);
- props->max_ee_init_rd_atom = limit_uint(rblock->max_act_wqs_ee_context);
-
- if (EHCA_BMASK_GET(HCA_CAP_SRQ, shca->hca_cap)) {
- props->max_srq = limit_uint(props->max_qp);
- props->max_srq_wr = limit_uint(props->max_qp_wr);
- props->max_srq_sge = 3;
- }
-
- props->max_pkeys = 16;
- /* Some FW versions say 0 here; insert sensible value in that case */
- props->local_ca_ack_delay = rblock->local_ca_ack_delay ?
- min_t(u8, rblock->local_ca_ack_delay, 255) : 12;
- props->max_raw_ipv6_qp = limit_uint(rblock->max_raw_ipv6_qp);
- props->max_raw_ethy_qp = limit_uint(rblock->max_raw_ethy_qp);
- props->max_mcast_grp = limit_uint(rblock->max_mcast_grp);
- props->max_mcast_qp_attach = limit_uint(rblock->max_mcast_qp_attach);
- props->max_total_mcast_qp_attach
- = limit_uint(rblock->max_total_mcast_qp_attach);
-
- /* translate device capabilities */
- props->device_cap_flags = IB_DEVICE_SYS_IMAGE_GUID |
- IB_DEVICE_RC_RNR_NAK_GEN | IB_DEVICE_N_NOTIFY_CQ;
- for (i = 0; i < ARRAY_SIZE(cap_mapping); i += 2)
- if (rblock->hca_cap_indicators & cap_mapping[i + 1])
- props->device_cap_flags |= cap_mapping[i];
-
-query_device1:
- ehca_free_fw_ctrlblock(rblock);
-
- return ret;
-}
-
-static enum ib_mtu map_mtu(struct ehca_shca *shca, u32 fw_mtu)
-{
- switch (fw_mtu) {
- case 0x1:
- return IB_MTU_256;
- case 0x2:
- return IB_MTU_512;
- case 0x3:
- return IB_MTU_1024;
- case 0x4:
- return IB_MTU_2048;
- case 0x5:
- return IB_MTU_4096;
- default:
- ehca_err(&shca->ib_device, "Unknown MTU size: %x.",
- fw_mtu);
- return 0;
- }
-}
-
-static u8 map_number_of_vls(struct ehca_shca *shca, u32 vl_cap)
-{
- switch (vl_cap) {
- case 0x1:
- return 1;
- case 0x2:
- return 2;
- case 0x3:
- return 4;
- case 0x4:
- return 8;
- case 0x5:
- return 15;
- default:
- ehca_err(&shca->ib_device, "invalid Vl Capability: %x.",
- vl_cap);
- return 0;
- }
-}
-
-int ehca_query_port(struct ib_device *ibdev,
- u8 port, struct ib_port_attr *props)
-{
- int ret = 0;
- u64 h_ret;
- struct ehca_shca *shca = container_of(ibdev, struct ehca_shca,
- ib_device);
- struct hipz_query_port *rblock;
-
- rblock = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
- if (!rblock) {
- ehca_err(&shca->ib_device, "Can't allocate rblock memory.");
- return -ENOMEM;
- }
-
- h_ret = hipz_h_query_port(shca->ipz_hca_handle, port, rblock);
- if (h_ret != H_SUCCESS) {
- ehca_err(&shca->ib_device, "Can't query port properties");
- ret = -EINVAL;
- goto query_port1;
- }
-
- memset(props, 0, sizeof(struct ib_port_attr));
-
- props->active_mtu = props->max_mtu = map_mtu(shca, rblock->max_mtu);
- props->port_cap_flags = rblock->capability_mask;
- props->gid_tbl_len = rblock->gid_tbl_len;
- if (rblock->max_msg_sz)
- props->max_msg_sz = rblock->max_msg_sz;
- else
- props->max_msg_sz = 0x1 << 31;
- props->bad_pkey_cntr = rblock->bad_pkey_cntr;
- props->qkey_viol_cntr = rblock->qkey_viol_cntr;
- props->pkey_tbl_len = rblock->pkey_tbl_len;
- props->lid = rblock->lid;
- props->sm_lid = rblock->sm_lid;
- props->lmc = rblock->lmc;
- props->sm_sl = rblock->sm_sl;
- props->subnet_timeout = rblock->subnet_timeout;
- props->init_type_reply = rblock->init_type_reply;
- props->max_vl_num = map_number_of_vls(shca, rblock->vl_cap);
-
- if (rblock->state && rblock->phys_width) {
- props->phys_state = rblock->phys_pstate;
- props->state = rblock->phys_state;
- props->active_width = rblock->phys_width;
- props->active_speed = rblock->phys_speed;
- } else {
- /* old firmware releases don't report physical
- * port info, so use default values
- */
- props->phys_state = 5;
- props->state = rblock->state;
- props->active_width = IB_WIDTH_12X;
- props->active_speed = IB_SPEED_SDR;
- }
-
-query_port1:
- ehca_free_fw_ctrlblock(rblock);
-
- return ret;
-}
-
-int ehca_query_sma_attr(struct ehca_shca *shca,
- u8 port, struct ehca_sma_attr *attr)
-{
- int ret = 0;
- u64 h_ret;
- struct hipz_query_port *rblock;
-
- rblock = ehca_alloc_fw_ctrlblock(GFP_ATOMIC);
- if (!rblock) {
- ehca_err(&shca->ib_device, "Can't allocate rblock memory.");
- return -ENOMEM;
- }
-
- h_ret = hipz_h_query_port(shca->ipz_hca_handle, port, rblock);
- if (h_ret != H_SUCCESS) {
- ehca_err(&shca->ib_device, "Can't query port properties");
- ret = -EINVAL;
- goto query_sma_attr1;
- }
-
- memset(attr, 0, sizeof(struct ehca_sma_attr));
-
- attr->lid = rblock->lid;
- attr->lmc = rblock->lmc;
- attr->sm_sl = rblock->sm_sl;
- attr->sm_lid = rblock->sm_lid;
-
- attr->pkey_tbl_len = rblock->pkey_tbl_len;
- memcpy(attr->pkeys, rblock->pkey_entries, sizeof(attr->pkeys));
-
-query_sma_attr1:
- ehca_free_fw_ctrlblock(rblock);
-
- return ret;
-}
-
-int ehca_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey)
-{
- int ret = 0;
- u64 h_ret;
- struct ehca_shca *shca;
- struct hipz_query_port *rblock;
-
- shca = container_of(ibdev, struct ehca_shca, ib_device);
- if (index > 16) {
- ehca_err(&shca->ib_device, "Invalid index: %x.", index);
- return -EINVAL;
- }
-
- rblock = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
- if (!rblock) {
- ehca_err(&shca->ib_device, "Can't allocate rblock memory.");
- return -ENOMEM;
- }
-
- h_ret = hipz_h_query_port(shca->ipz_hca_handle, port, rblock);
- if (h_ret != H_SUCCESS) {
- ehca_err(&shca->ib_device, "Can't query port properties");
- ret = -EINVAL;
- goto query_pkey1;
- }
-
- memcpy(pkey, &rblock->pkey_entries + index, sizeof(u16));
-
-query_pkey1:
- ehca_free_fw_ctrlblock(rblock);
-
- return ret;
-}
-
-int ehca_query_gid(struct ib_device *ibdev, u8 port,
- int index, union ib_gid *gid)
-{
- int ret = 0;
- u64 h_ret;
- struct ehca_shca *shca = container_of(ibdev, struct ehca_shca,
- ib_device);
- struct hipz_query_port *rblock;
-
- if (index < 0 || index > 255) {
- ehca_err(&shca->ib_device, "Invalid index: %x.", index);
- return -EINVAL;
- }
-
- rblock = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
- if (!rblock) {
- ehca_err(&shca->ib_device, "Can't allocate rblock memory.");
- return -ENOMEM;
- }
-
- h_ret = hipz_h_query_port(shca->ipz_hca_handle, port, rblock);
- if (h_ret != H_SUCCESS) {
- ehca_err(&shca->ib_device, "Can't query port properties");
- ret = -EINVAL;
- goto query_gid1;
- }
-
- memcpy(&gid->raw[0], &rblock->gid_prefix, sizeof(u64));
- memcpy(&gid->raw[8], &rblock->guid_entries[index], sizeof(u64));
-
-query_gid1:
- ehca_free_fw_ctrlblock(rblock);
-
- return ret;
-}
-
-static const u32 allowed_port_caps = (
- IB_PORT_SM | IB_PORT_LED_INFO_SUP | IB_PORT_CM_SUP |
- IB_PORT_SNMP_TUNNEL_SUP | IB_PORT_DEVICE_MGMT_SUP |
- IB_PORT_VENDOR_CLASS_SUP);
-
-int ehca_modify_port(struct ib_device *ibdev,
- u8 port, int port_modify_mask,
- struct ib_port_modify *props)
-{
- int ret = 0;
- struct ehca_shca *shca;
- struct hipz_query_port *rblock;
- u32 cap;
- u64 hret;
-
- shca = container_of(ibdev, struct ehca_shca, ib_device);
- if ((props->set_port_cap_mask | props->clr_port_cap_mask)
- & ~allowed_port_caps) {
- ehca_err(&shca->ib_device, "Non-changeable bits set in masks "
- "set=%x clr=%x allowed=%x", props->set_port_cap_mask,
- props->clr_port_cap_mask, allowed_port_caps);
- return -EINVAL;
- }
-
- if (mutex_lock_interruptible(&shca->modify_mutex))
- return -ERESTARTSYS;
-
- rblock = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
- if (!rblock) {
- ehca_err(&shca->ib_device, "Can't allocate rblock memory.");
- ret = -ENOMEM;
- goto modify_port1;
- }
-
- hret = hipz_h_query_port(shca->ipz_hca_handle, port, rblock);
- if (hret != H_SUCCESS) {
- ehca_err(&shca->ib_device, "Can't query port properties");
- ret = -EINVAL;
- goto modify_port2;
- }
-
- cap = (rblock->capability_mask | props->set_port_cap_mask)
- & ~props->clr_port_cap_mask;
-
- hret = hipz_h_modify_port(shca->ipz_hca_handle, port,
- cap, props->init_type, port_modify_mask);
- if (hret != H_SUCCESS) {
- ehca_err(&shca->ib_device, "Modify port failed h_ret=%lli",
- hret);
- ret = -EINVAL;
- }
-
-modify_port2:
- ehca_free_fw_ctrlblock(rblock);
-
-modify_port1:
- mutex_unlock(&shca->modify_mutex);
-
- return ret;
-}
diff --git a/drivers/staging/rdma/ehca/ehca_irq.c b/drivers/staging/rdma/ehca/ehca_irq.c
deleted file mode 100644
index 8615d7cf7e01..000000000000
--- a/drivers/staging/rdma/ehca/ehca_irq.c
+++ /dev/null
@@ -1,870 +0,0 @@
-/*
- * IBM eServer eHCA Infiniband device driver for Linux on POWER
- *
- * Functions for EQs, NEQs and interrupts
- *
- * Authors: Heiko J Schick <schickhj@de.ibm.com>
- * Khadija Souissi <souissi@de.ibm.com>
- * Hoang-Nam Nguyen <hnguyen@de.ibm.com>
- * Joachim Fenkes <fenkes@de.ibm.com>
- *
- * Copyright (c) 2005 IBM Corporation
- *
- * All rights reserved.
- *
- * This source code is distributed under a dual license of GPL v2.0 and OpenIB
- * BSD.
- *
- * OpenIB BSD License
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials
- * provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
- * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
- * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <linux/slab.h>
-#include <linux/smpboot.h>
-
-#include "ehca_classes.h"
-#include "ehca_irq.h"
-#include "ehca_iverbs.h"
-#include "ehca_tools.h"
-#include "hcp_if.h"
-#include "hipz_fns.h"
-#include "ipz_pt_fn.h"
-
-#define EQE_COMPLETION_EVENT EHCA_BMASK_IBM( 1, 1)
-#define EQE_CQ_QP_NUMBER EHCA_BMASK_IBM( 8, 31)
-#define EQE_EE_IDENTIFIER EHCA_BMASK_IBM( 2, 7)
-#define EQE_CQ_NUMBER EHCA_BMASK_IBM( 8, 31)
-#define EQE_QP_NUMBER EHCA_BMASK_IBM( 8, 31)
-#define EQE_QP_TOKEN EHCA_BMASK_IBM(32, 63)
-#define EQE_CQ_TOKEN EHCA_BMASK_IBM(32, 63)
-
-#define NEQE_COMPLETION_EVENT EHCA_BMASK_IBM( 1, 1)
-#define NEQE_EVENT_CODE EHCA_BMASK_IBM( 2, 7)
-#define NEQE_PORT_NUMBER EHCA_BMASK_IBM( 8, 15)
-#define NEQE_PORT_AVAILABILITY EHCA_BMASK_IBM(16, 16)
-#define NEQE_DISRUPTIVE EHCA_BMASK_IBM(16, 16)
-#define NEQE_SPECIFIC_EVENT EHCA_BMASK_IBM(16, 23)
-
-#define ERROR_DATA_LENGTH EHCA_BMASK_IBM(52, 63)
-#define ERROR_DATA_TYPE EHCA_BMASK_IBM( 0, 7)
-
-static void queue_comp_task(struct ehca_cq *__cq);
-
-static struct ehca_comp_pool *pool;
-
-static inline void comp_event_callback(struct ehca_cq *cq)
-{
- if (!cq->ib_cq.comp_handler)
- return;
-
- spin_lock(&cq->cb_lock);
- cq->ib_cq.comp_handler(&cq->ib_cq, cq->ib_cq.cq_context);
- spin_unlock(&cq->cb_lock);
-
- return;
-}
-
-static void print_error_data(struct ehca_shca *shca, void *data,
- u64 *rblock, int length)
-{
- u64 type = EHCA_BMASK_GET(ERROR_DATA_TYPE, rblock[2]);
- u64 resource = rblock[1];
-
- switch (type) {
- case 0x1: /* Queue Pair */
- {
- struct ehca_qp *qp = (struct ehca_qp *)data;
-
- /* only print error data if AER is set */
- if (rblock[6] == 0)
- return;
-
- ehca_err(&shca->ib_device,
- "QP 0x%x (resource=%llx) has errors.",
- qp->ib_qp.qp_num, resource);
- break;
- }
- case 0x4: /* Completion Queue */
- {
- struct ehca_cq *cq = (struct ehca_cq *)data;
-
- ehca_err(&shca->ib_device,
- "CQ 0x%x (resource=%llx) has errors.",
- cq->cq_number, resource);
- break;
- }
- default:
- ehca_err(&shca->ib_device,
- "Unknown error type: %llx on %s.",
- type, shca->ib_device.name);
- break;
- }
-
- ehca_err(&shca->ib_device, "Error data is available: %llx.", resource);
- ehca_err(&shca->ib_device, "EHCA ----- error data begin "
- "---------------------------------------------------");
- ehca_dmp(rblock, length, "resource=%llx", resource);
- ehca_err(&shca->ib_device, "EHCA ----- error data end "
- "----------------------------------------------------");
-
- return;
-}
-
-int ehca_error_data(struct ehca_shca *shca, void *data,
- u64 resource)
-{
-
- unsigned long ret;
- u64 *rblock;
- unsigned long block_count;
-
- rblock = ehca_alloc_fw_ctrlblock(GFP_ATOMIC);
- if (!rblock) {
- ehca_err(&shca->ib_device, "Cannot allocate rblock memory.");
- ret = -ENOMEM;
- goto error_data1;
- }
-
- /* rblock must be 4K aligned and should be 4K large */
- ret = hipz_h_error_data(shca->ipz_hca_handle,
- resource,
- rblock,
- &block_count);
-
- if (ret == H_R_STATE)
- ehca_err(&shca->ib_device,
- "No error data is available: %llx.", resource);
- else if (ret == H_SUCCESS) {
- int length;
-
- length = EHCA_BMASK_GET(ERROR_DATA_LENGTH, rblock[0]);
-
- if (length > EHCA_PAGESIZE)
- length = EHCA_PAGESIZE;
-
- print_error_data(shca, data, rblock, length);
- } else
- ehca_err(&shca->ib_device,
- "Error data could not be fetched: %llx", resource);
-
- ehca_free_fw_ctrlblock(rblock);
-
-error_data1:
- return ret;
-
-}
-
-static void dispatch_qp_event(struct ehca_shca *shca, struct ehca_qp *qp,
- enum ib_event_type event_type)
-{
- struct ib_event event;
-
- /* PATH_MIG without the QP ever having been armed is false alarm */
- if (event_type == IB_EVENT_PATH_MIG && !qp->mig_armed)
- return;
-
- event.device = &shca->ib_device;
- event.event = event_type;
-
- if (qp->ext_type == EQPT_SRQ) {
- if (!qp->ib_srq.event_handler)
- return;
-
- event.element.srq = &qp->ib_srq;
- qp->ib_srq.event_handler(&event, qp->ib_srq.srq_context);
- } else {
- if (!qp->ib_qp.event_handler)
- return;
-
- event.element.qp = &qp->ib_qp;
- qp->ib_qp.event_handler(&event, qp->ib_qp.qp_context);
- }
-}
-
-static void qp_event_callback(struct ehca_shca *shca, u64 eqe,
- enum ib_event_type event_type, int fatal)
-{
- struct ehca_qp *qp;
- u32 token = EHCA_BMASK_GET(EQE_QP_TOKEN, eqe);
-
- read_lock(&ehca_qp_idr_lock);
- qp = idr_find(&ehca_qp_idr, token);
- if (qp)
- atomic_inc(&qp->nr_events);
- read_unlock(&ehca_qp_idr_lock);
-
- if (!qp)
- return;
-
- if (fatal)
- ehca_error_data(shca, qp, qp->ipz_qp_handle.handle);
-
- dispatch_qp_event(shca, qp, fatal && qp->ext_type == EQPT_SRQ ?
- IB_EVENT_SRQ_ERR : event_type);
-
- /*
- * eHCA only processes one WQE at a time for SRQ base QPs,
- * so the last WQE has been processed as soon as the QP enters
- * error state.
- */
- if (fatal && qp->ext_type == EQPT_SRQBASE)
- dispatch_qp_event(shca, qp, IB_EVENT_QP_LAST_WQE_REACHED);
-
- if (atomic_dec_and_test(&qp->nr_events))
- wake_up(&qp->wait_completion);
- return;
-}
-
-static void cq_event_callback(struct ehca_shca *shca,
- u64 eqe)
-{
- struct ehca_cq *cq;
- u32 token = EHCA_BMASK_GET(EQE_CQ_TOKEN, eqe);
-
- read_lock(&ehca_cq_idr_lock);
- cq = idr_find(&ehca_cq_idr, token);
- if (cq)
- atomic_inc(&cq->nr_events);
- read_unlock(&ehca_cq_idr_lock);
-
- if (!cq)
- return;
-
- ehca_error_data(shca, cq, cq->ipz_cq_handle.handle);
-
- if (atomic_dec_and_test(&cq->nr_events))
- wake_up(&cq->wait_completion);
-
- return;
-}
-
-static void parse_identifier(struct ehca_shca *shca, u64 eqe)
-{
- u8 identifier = EHCA_BMASK_GET(EQE_EE_IDENTIFIER, eqe);
-
- switch (identifier) {
- case 0x02: /* path migrated */
- qp_event_callback(shca, eqe, IB_EVENT_PATH_MIG, 0);
- break;
- case 0x03: /* communication established */
- qp_event_callback(shca, eqe, IB_EVENT_COMM_EST, 0);
- break;
- case 0x04: /* send queue drained */
- qp_event_callback(shca, eqe, IB_EVENT_SQ_DRAINED, 0);
- break;
- case 0x05: /* QP error */
- case 0x06: /* QP error */
- qp_event_callback(shca, eqe, IB_EVENT_QP_FATAL, 1);
- break;
- case 0x07: /* CQ error */
- case 0x08: /* CQ error */
- cq_event_callback(shca, eqe);
- break;
- case 0x09: /* MRMWPTE error */
- ehca_err(&shca->ib_device, "MRMWPTE error.");
- break;
- case 0x0A: /* port event */
- ehca_err(&shca->ib_device, "Port event.");
- break;
- case 0x0B: /* MR access error */
- ehca_err(&shca->ib_device, "MR access error.");
- break;
- case 0x0C: /* EQ error */
- ehca_err(&shca->ib_device, "EQ error.");
- break;
- case 0x0D: /* P/Q_Key mismatch */
- ehca_err(&shca->ib_device, "P/Q_Key mismatch.");
- break;
- case 0x10: /* sampling complete */
- ehca_err(&shca->ib_device, "Sampling complete.");
- break;
- case 0x11: /* unaffiliated access error */
- ehca_err(&shca->ib_device, "Unaffiliated access error.");
- break;
- case 0x12: /* path migrating */
- ehca_err(&shca->ib_device, "Path migrating.");
- break;
- case 0x13: /* interface trace stopped */
- ehca_err(&shca->ib_device, "Interface trace stopped.");
- break;
- case 0x14: /* first error capture info available */
- ehca_info(&shca->ib_device, "First error capture available");
- break;
- case 0x15: /* SRQ limit reached */
- qp_event_callback(shca, eqe, IB_EVENT_SRQ_LIMIT_REACHED, 0);
- break;
- default:
- ehca_err(&shca->ib_device, "Unknown identifier: %x on %s.",
- identifier, shca->ib_device.name);
- break;
- }
-
- return;
-}
-
-static void dispatch_port_event(struct ehca_shca *shca, int port_num,
- enum ib_event_type type, const char *msg)
-{
- struct ib_event event;
-
- ehca_info(&shca->ib_device, "port %d %s.", port_num, msg);
- event.device = &shca->ib_device;
- event.event = type;
- event.element.port_num = port_num;
- ib_dispatch_event(&event);
-}
-
-static void notify_port_conf_change(struct ehca_shca *shca, int port_num)
-{
- struct ehca_sma_attr new_attr;
- struct ehca_sma_attr *old_attr = &shca->sport[port_num - 1].saved_attr;
-
- ehca_query_sma_attr(shca, port_num, &new_attr);
-
- if (new_attr.sm_sl != old_attr->sm_sl ||
- new_attr.sm_lid != old_attr->sm_lid)
- dispatch_port_event(shca, port_num, IB_EVENT_SM_CHANGE,
- "SM changed");
-
- if (new_attr.lid != old_attr->lid ||
- new_attr.lmc != old_attr->lmc)
- dispatch_port_event(shca, port_num, IB_EVENT_LID_CHANGE,
- "LID changed");
-
- if (new_attr.pkey_tbl_len != old_attr->pkey_tbl_len ||
- memcmp(new_attr.pkeys, old_attr->pkeys,
- sizeof(u16) * new_attr.pkey_tbl_len))
- dispatch_port_event(shca, port_num, IB_EVENT_PKEY_CHANGE,
- "P_Key changed");
-
- *old_attr = new_attr;
-}
-
-/* replay modify_qp for sqps -- return 0 if all is well, 1 if AQP1 destroyed */
-static int replay_modify_qp(struct ehca_sport *sport)
-{
- int aqp1_destroyed;
- unsigned long flags;
-
- spin_lock_irqsave(&sport->mod_sqp_lock, flags);
-
- aqp1_destroyed = !sport->ibqp_sqp[IB_QPT_GSI];
-
- if (sport->ibqp_sqp[IB_QPT_SMI])
- ehca_recover_sqp(sport->ibqp_sqp[IB_QPT_SMI]);
- if (!aqp1_destroyed)
- ehca_recover_sqp(sport->ibqp_sqp[IB_QPT_GSI]);
-
- spin_unlock_irqrestore(&sport->mod_sqp_lock, flags);
-
- return aqp1_destroyed;
-}
-
-static void parse_ec(struct ehca_shca *shca, u64 eqe)
-{
- u8 ec = EHCA_BMASK_GET(NEQE_EVENT_CODE, eqe);
- u8 port = EHCA_BMASK_GET(NEQE_PORT_NUMBER, eqe);
- u8 spec_event;
- struct ehca_sport *sport = &shca->sport[port - 1];
-
- switch (ec) {
- case 0x30: /* port availability change */
- if (EHCA_BMASK_GET(NEQE_PORT_AVAILABILITY, eqe)) {
- /* only replay modify_qp calls in autodetect mode;
- * if AQP1 was destroyed, the port is already down
- * again and we can drop the event.
- */
- if (ehca_nr_ports < 0)
- if (replay_modify_qp(sport))
- break;
-
- sport->port_state = IB_PORT_ACTIVE;
- dispatch_port_event(shca, port, IB_EVENT_PORT_ACTIVE,
- "is active");
- ehca_query_sma_attr(shca, port, &sport->saved_attr);
- } else {
- sport->port_state = IB_PORT_DOWN;
- dispatch_port_event(shca, port, IB_EVENT_PORT_ERR,
- "is inactive");
- }
- break;
- case 0x31:
- /* port configuration change
- * disruptive change is caused by
- * LID, PKEY or SM change
- */
- if (EHCA_BMASK_GET(NEQE_DISRUPTIVE, eqe)) {
- ehca_warn(&shca->ib_device, "disruptive port "
- "%d configuration change", port);
-
- sport->port_state = IB_PORT_DOWN;
- dispatch_port_event(shca, port, IB_EVENT_PORT_ERR,
- "is inactive");
-
- sport->port_state = IB_PORT_ACTIVE;
- dispatch_port_event(shca, port, IB_EVENT_PORT_ACTIVE,
- "is active");
- ehca_query_sma_attr(shca, port,
- &sport->saved_attr);
- } else
- notify_port_conf_change(shca, port);
- break;
- case 0x32: /* adapter malfunction */
- ehca_err(&shca->ib_device, "Adapter malfunction.");
- break;
- case 0x33: /* trace stopped */
- ehca_err(&shca->ib_device, "Traced stopped.");
- break;
- case 0x34: /* util async event */
- spec_event = EHCA_BMASK_GET(NEQE_SPECIFIC_EVENT, eqe);
- if (spec_event == 0x80) /* client reregister required */
- dispatch_port_event(shca, port,
- IB_EVENT_CLIENT_REREGISTER,
- "client reregister req.");
- else
- ehca_warn(&shca->ib_device, "Unknown util async "
- "event %x on port %x", spec_event, port);
- break;
- default:
- ehca_err(&shca->ib_device, "Unknown event code: %x on %s.",
- ec, shca->ib_device.name);
- break;
- }
-
- return;
-}
-
-static inline void reset_eq_pending(struct ehca_cq *cq)
-{
- u64 CQx_EP;
- struct h_galpa gal = cq->galpas.kernel;
-
- hipz_galpa_store_cq(gal, cqx_ep, 0x0);
- CQx_EP = hipz_galpa_load(gal, CQTEMM_OFFSET(cqx_ep));
-
- return;
-}
-
-irqreturn_t ehca_interrupt_neq(int irq, void *dev_id)
-{
- struct ehca_shca *shca = (struct ehca_shca*)dev_id;
-
- tasklet_hi_schedule(&shca->neq.interrupt_task);
-
- return IRQ_HANDLED;
-}
-
-void ehca_tasklet_neq(unsigned long data)
-{
- struct ehca_shca *shca = (struct ehca_shca*)data;
- struct ehca_eqe *eqe;
- u64 ret;
-
- eqe = ehca_poll_eq(shca, &shca->neq);
-
- while (eqe) {
- if (!EHCA_BMASK_GET(NEQE_COMPLETION_EVENT, eqe->entry))
- parse_ec(shca, eqe->entry);
-
- eqe = ehca_poll_eq(shca, &shca->neq);
- }
-
- ret = hipz_h_reset_event(shca->ipz_hca_handle,
- shca->neq.ipz_eq_handle, 0xFFFFFFFFFFFFFFFFL);
-
- if (ret != H_SUCCESS)
- ehca_err(&shca->ib_device, "Can't clear notification events.");
-
- return;
-}
-
-irqreturn_t ehca_interrupt_eq(int irq, void *dev_id)
-{
- struct ehca_shca *shca = (struct ehca_shca*)dev_id;
-
- tasklet_hi_schedule(&shca->eq.interrupt_task);
-
- return IRQ_HANDLED;
-}
-
-
-static inline void process_eqe(struct ehca_shca *shca, struct ehca_eqe *eqe)
-{
- u64 eqe_value;
- u32 token;
- struct ehca_cq *cq;
-
- eqe_value = eqe->entry;
- ehca_dbg(&shca->ib_device, "eqe_value=%llx", eqe_value);
- if (EHCA_BMASK_GET(EQE_COMPLETION_EVENT, eqe_value)) {
- ehca_dbg(&shca->ib_device, "Got completion event");
- token = EHCA_BMASK_GET(EQE_CQ_TOKEN, eqe_value);
- read_lock(&ehca_cq_idr_lock);
- cq = idr_find(&ehca_cq_idr, token);
- if (cq)
- atomic_inc(&cq->nr_events);
- read_unlock(&ehca_cq_idr_lock);
- if (cq == NULL) {
- ehca_err(&shca->ib_device,
- "Invalid eqe for non-existing cq token=%x",
- token);
- return;
- }
- reset_eq_pending(cq);
- if (ehca_scaling_code)
- queue_comp_task(cq);
- else {
- comp_event_callback(cq);
- if (atomic_dec_and_test(&cq->nr_events))
- wake_up(&cq->wait_completion);
- }
- } else {
- ehca_dbg(&shca->ib_device, "Got non completion event");
- parse_identifier(shca, eqe_value);
- }
-}
-
-void ehca_process_eq(struct ehca_shca *shca, int is_irq)
-{
- struct ehca_eq *eq = &shca->eq;
- struct ehca_eqe_cache_entry *eqe_cache = eq->eqe_cache;
- u64 eqe_value, ret;
- int eqe_cnt, i;
- int eq_empty = 0;
-
- spin_lock(&eq->irq_spinlock);
- if (is_irq) {
- const int max_query_cnt = 100;
- int query_cnt = 0;
- int int_state = 1;
- do {
- int_state = hipz_h_query_int_state(
- shca->ipz_hca_handle, eq->ist);
- query_cnt++;
- iosync();
- } while (int_state && query_cnt < max_query_cnt);
- if (unlikely((query_cnt == max_query_cnt)))
- ehca_dbg(&shca->ib_device, "int_state=%x query_cnt=%x",
- int_state, query_cnt);
- }
-
- /* read out all eqes */
- eqe_cnt = 0;
- do {
- u32 token;
- eqe_cache[eqe_cnt].eqe = ehca_poll_eq(shca, eq);
- if (!eqe_cache[eqe_cnt].eqe)
- break;
- eqe_value = eqe_cache[eqe_cnt].eqe->entry;
- if (EHCA_BMASK_GET(EQE_COMPLETION_EVENT, eqe_value)) {
- token = EHCA_BMASK_GET(EQE_CQ_TOKEN, eqe_value);
- read_lock(&ehca_cq_idr_lock);
- eqe_cache[eqe_cnt].cq = idr_find(&ehca_cq_idr, token);
- if (eqe_cache[eqe_cnt].cq)
- atomic_inc(&eqe_cache[eqe_cnt].cq->nr_events);
- read_unlock(&ehca_cq_idr_lock);
- if (!eqe_cache[eqe_cnt].cq) {
- ehca_err(&shca->ib_device,
- "Invalid eqe for non-existing cq "
- "token=%x", token);
- continue;
- }
- } else
- eqe_cache[eqe_cnt].cq = NULL;
- eqe_cnt++;
- } while (eqe_cnt < EHCA_EQE_CACHE_SIZE);
- if (!eqe_cnt) {
- if (is_irq)
- ehca_dbg(&shca->ib_device,
- "No eqe found for irq event");
- goto unlock_irq_spinlock;
- } else if (!is_irq) {
- ret = hipz_h_eoi(eq->ist);
- if (ret != H_SUCCESS)
- ehca_err(&shca->ib_device,
- "bad return code EOI -rc = %lld\n", ret);
- ehca_dbg(&shca->ib_device, "deadman found %x eqe", eqe_cnt);
- }
- if (unlikely(eqe_cnt == EHCA_EQE_CACHE_SIZE))
- ehca_dbg(&shca->ib_device, "too many eqes for one irq event");
- /* enable irq for new packets */
- for (i = 0; i < eqe_cnt; i++) {
- if (eq->eqe_cache[i].cq)
- reset_eq_pending(eq->eqe_cache[i].cq);
- }
- /* check eq */
- spin_lock(&eq->spinlock);
- eq_empty = (!ipz_eqit_eq_peek_valid(&shca->eq.ipz_queue));
- spin_unlock(&eq->spinlock);
- /* call completion handler for cached eqes */
- for (i = 0; i < eqe_cnt; i++)
- if (eq->eqe_cache[i].cq) {
- if (ehca_scaling_code)
- queue_comp_task(eq->eqe_cache[i].cq);
- else {
- struct ehca_cq *cq = eq->eqe_cache[i].cq;
- comp_event_callback(cq);
- if (atomic_dec_and_test(&cq->nr_events))
- wake_up(&cq->wait_completion);
- }
- } else {
- ehca_dbg(&shca->ib_device, "Got non completion event");
- parse_identifier(shca, eq->eqe_cache[i].eqe->entry);
- }
- /* poll eq if not empty */
- if (eq_empty)
- goto unlock_irq_spinlock;
- do {
- struct ehca_eqe *eqe;
- eqe = ehca_poll_eq(shca, &shca->eq);
- if (!eqe)
- break;
- process_eqe(shca, eqe);
- } while (1);
-
-unlock_irq_spinlock:
- spin_unlock(&eq->irq_spinlock);
-}
-
-void ehca_tasklet_eq(unsigned long data)
-{
- ehca_process_eq((struct ehca_shca*)data, 1);
-}
-
-static int find_next_online_cpu(struct ehca_comp_pool *pool)
-{
- int cpu;
- unsigned long flags;
-
- WARN_ON_ONCE(!in_interrupt());
- if (ehca_debug_level >= 3)
- ehca_dmp(cpu_online_mask, cpumask_size(), "");
-
- spin_lock_irqsave(&pool->last_cpu_lock, flags);
- do {
- cpu = cpumask_next(pool->last_cpu, cpu_online_mask);
- if (cpu >= nr_cpu_ids)
- cpu = cpumask_first(cpu_online_mask);
- pool->last_cpu = cpu;
- } while (!per_cpu_ptr(pool->cpu_comp_tasks, cpu)->active);
- spin_unlock_irqrestore(&pool->last_cpu_lock, flags);
-
- return cpu;
-}
-
-static void __queue_comp_task(struct ehca_cq *__cq,
- struct ehca_cpu_comp_task *cct,
- struct task_struct *thread)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&cct->task_lock, flags);
- spin_lock(&__cq->task_lock);
-
- if (__cq->nr_callbacks == 0) {
- __cq->nr_callbacks++;
- list_add_tail(&__cq->entry, &cct->cq_list);
- cct->cq_jobs++;
- wake_up_process(thread);
- } else
- __cq->nr_callbacks++;
-
- spin_unlock(&__cq->task_lock);
- spin_unlock_irqrestore(&cct->task_lock, flags);
-}
-
-static void queue_comp_task(struct ehca_cq *__cq)
-{
- int cpu_id;
- struct ehca_cpu_comp_task *cct;
- struct task_struct *thread;
- int cq_jobs;
- unsigned long flags;
-
- cpu_id = find_next_online_cpu(pool);
- BUG_ON(!cpu_online(cpu_id));
-
- cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu_id);
- thread = *per_cpu_ptr(pool->cpu_comp_threads, cpu_id);
- BUG_ON(!cct || !thread);
-
- spin_lock_irqsave(&cct->task_lock, flags);
- cq_jobs = cct->cq_jobs;
- spin_unlock_irqrestore(&cct->task_lock, flags);
- if (cq_jobs > 0) {
- cpu_id = find_next_online_cpu(pool);
- cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu_id);
- thread = *per_cpu_ptr(pool->cpu_comp_threads, cpu_id);
- BUG_ON(!cct || !thread);
- }
- __queue_comp_task(__cq, cct, thread);
-}
-
-static void run_comp_task(struct ehca_cpu_comp_task *cct)
-{
- struct ehca_cq *cq;
-
- while (!list_empty(&cct->cq_list)) {
- cq = list_entry(cct->cq_list.next, struct ehca_cq, entry);
- spin_unlock_irq(&cct->task_lock);
-
- comp_event_callback(cq);
- if (atomic_dec_and_test(&cq->nr_events))
- wake_up(&cq->wait_completion);
-
- spin_lock_irq(&cct->task_lock);
- spin_lock(&cq->task_lock);
- cq->nr_callbacks--;
- if (!cq->nr_callbacks) {
- list_del_init(cct->cq_list.next);
- cct->cq_jobs--;
- }
- spin_unlock(&cq->task_lock);
- }
-}
-
-static void comp_task_park(unsigned int cpu)
-{
- struct ehca_cpu_comp_task *cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu);
- struct ehca_cpu_comp_task *target;
- struct task_struct *thread;
- struct ehca_cq *cq, *tmp;
- LIST_HEAD(list);
-
- spin_lock_irq(&cct->task_lock);
- cct->cq_jobs = 0;
- cct->active = 0;
- list_splice_init(&cct->cq_list, &list);
- spin_unlock_irq(&cct->task_lock);
-
- cpu = find_next_online_cpu(pool);
- target = per_cpu_ptr(pool->cpu_comp_tasks, cpu);
- thread = *per_cpu_ptr(pool->cpu_comp_threads, cpu);
- spin_lock_irq(&target->task_lock);
- list_for_each_entry_safe(cq, tmp, &list, entry) {
- list_del(&cq->entry);
- __queue_comp_task(cq, target, thread);
- }
- spin_unlock_irq(&target->task_lock);
-}
-
-static void comp_task_stop(unsigned int cpu, bool online)
-{
- struct ehca_cpu_comp_task *cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu);
-
- spin_lock_irq(&cct->task_lock);
- cct->cq_jobs = 0;
- cct->active = 0;
- WARN_ON(!list_empty(&cct->cq_list));
- spin_unlock_irq(&cct->task_lock);
-}
-
-static int comp_task_should_run(unsigned int cpu)
-{
- struct ehca_cpu_comp_task *cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu);
-
- return cct->cq_jobs;
-}
-
-static void comp_task(unsigned int cpu)
-{
- struct ehca_cpu_comp_task *cct = this_cpu_ptr(pool->cpu_comp_tasks);
- int cql_empty;
-
- spin_lock_irq(&cct->task_lock);
- cql_empty = list_empty(&cct->cq_list);
- if (!cql_empty) {
- __set_current_state(TASK_RUNNING);
- run_comp_task(cct);
- }
- spin_unlock_irq(&cct->task_lock);
-}
-
-static struct smp_hotplug_thread comp_pool_threads = {
- .thread_should_run = comp_task_should_run,
- .thread_fn = comp_task,
- .thread_comm = "ehca_comp/%u",
- .cleanup = comp_task_stop,
- .park = comp_task_park,
-};
-
-int ehca_create_comp_pool(void)
-{
- int cpu, ret = -ENOMEM;
-
- if (!ehca_scaling_code)
- return 0;
-
- pool = kzalloc(sizeof(struct ehca_comp_pool), GFP_KERNEL);
- if (pool == NULL)
- return -ENOMEM;
-
- spin_lock_init(&pool->last_cpu_lock);
- pool->last_cpu = cpumask_any(cpu_online_mask);
-
- pool->cpu_comp_tasks = alloc_percpu(struct ehca_cpu_comp_task);
- if (!pool->cpu_comp_tasks)
- goto out_pool;
-
- pool->cpu_comp_threads = alloc_percpu(struct task_struct *);
- if (!pool->cpu_comp_threads)
- goto out_tasks;
-
- for_each_present_cpu(cpu) {
- struct ehca_cpu_comp_task *cct;
-
- cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu);
- spin_lock_init(&cct->task_lock);
- INIT_LIST_HEAD(&cct->cq_list);
- }
-
- comp_pool_threads.store = pool->cpu_comp_threads;
- ret = smpboot_register_percpu_thread(&comp_pool_threads);
- if (ret)
- goto out_threads;
-
- pr_info("eHCA scaling code enabled\n");
- return ret;
-
-out_threads:
- free_percpu(pool->cpu_comp_threads);
-out_tasks:
- free_percpu(pool->cpu_comp_tasks);
-out_pool:
- kfree(pool);
- return ret;
-}
-
-void ehca_destroy_comp_pool(void)
-{
- if (!ehca_scaling_code)
- return;
-
- smpboot_unregister_percpu_thread(&comp_pool_threads);
-
- free_percpu(pool->cpu_comp_threads);
- free_percpu(pool->cpu_comp_tasks);
- kfree(pool);
-}
diff --git a/drivers/staging/rdma/ehca/ehca_irq.h b/drivers/staging/rdma/ehca/ehca_irq.h
deleted file mode 100644
index 5370199f08c7..000000000000
--- a/drivers/staging/rdma/ehca/ehca_irq.h
+++ /dev/null
@@ -1,77 +0,0 @@
-/*
- * IBM eServer eHCA Infiniband device driver for Linux on POWER
- *
- * Function definitions and structs for EQs, NEQs and interrupts
- *
- * Authors: Heiko J Schick <schickhj@de.ibm.com>
- * Khadija Souissi <souissi@de.ibm.com>
- *
- * Copyright (c) 2005 IBM Corporation
- *
- * All rights reserved.
- *
- * This source code is distributed under a dual license of GPL v2.0 and OpenIB
- * BSD.
- *
- * OpenIB BSD License
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials
- * provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
- * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
- * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef __EHCA_IRQ_H
-#define __EHCA_IRQ_H
-
-
-struct ehca_shca;
-
-#include <linux/interrupt.h>
-#include <linux/types.h>
-
-int ehca_error_data(struct ehca_shca *shca, void *data, u64 resource);
-
-irqreturn_t ehca_interrupt_neq(int irq, void *dev_id);
-void ehca_tasklet_neq(unsigned long data);
-
-irqreturn_t ehca_interrupt_eq(int irq, void *dev_id);
-void ehca_tasklet_eq(unsigned long data);
-void ehca_process_eq(struct ehca_shca *shca, int is_irq);
-
-struct ehca_cpu_comp_task {
- struct list_head cq_list;
- spinlock_t task_lock;
- int cq_jobs;
- int active;
-};
-
-struct ehca_comp_pool {
- struct ehca_cpu_comp_task __percpu *cpu_comp_tasks;
- struct task_struct * __percpu *cpu_comp_threads;
- int last_cpu;
- spinlock_t last_cpu_lock;
-};
-
-int ehca_create_comp_pool(void);
-void ehca_destroy_comp_pool(void);
-
-#endif
diff --git a/drivers/staging/rdma/ehca/ehca_iverbs.h b/drivers/staging/rdma/ehca/ehca_iverbs.h
deleted file mode 100644
index 80e6a3d5df3e..000000000000
--- a/drivers/staging/rdma/ehca/ehca_iverbs.h
+++ /dev/null
@@ -1,218 +0,0 @@
-/*
- * IBM eServer eHCA Infiniband device driver for Linux on POWER
- *
- * Function definitions for internal functions
- *
- * Authors: Heiko J Schick <schickhj@de.ibm.com>
- * Dietmar Decker <ddecker@de.ibm.com>
- *
- * Copyright (c) 2005 IBM Corporation
- *
- * All rights reserved.
- *
- * This source code is distributed under a dual license of GPL v2.0 and OpenIB
- * BSD.
- *
- * OpenIB BSD License
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials
- * provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
- * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
- * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef __EHCA_IVERBS_H__
-#define __EHCA_IVERBS_H__
-
-#include "ehca_classes.h"
-
-int ehca_query_device(struct ib_device *ibdev, struct ib_device_attr *props,
- struct ib_udata *uhw);
-
-int ehca_query_port(struct ib_device *ibdev, u8 port,
- struct ib_port_attr *props);
-
-enum rdma_protocol_type
-ehca_query_protocol(struct ib_device *device, u8 port_num);
-
-int ehca_query_sma_attr(struct ehca_shca *shca, u8 port,
- struct ehca_sma_attr *attr);
-
-int ehca_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 * pkey);
-
-int ehca_query_gid(struct ib_device *ibdev, u8 port, int index,
- union ib_gid *gid);
-
-int ehca_modify_port(struct ib_device *ibdev, u8 port, int port_modify_mask,
- struct ib_port_modify *props);
-
-struct ib_pd *ehca_alloc_pd(struct ib_device *device,
- struct ib_ucontext *context,
- struct ib_udata *udata);
-
-int ehca_dealloc_pd(struct ib_pd *pd);
-
-struct ib_ah *ehca_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr);
-
-int ehca_modify_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr);
-
-int ehca_query_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr);
-
-int ehca_destroy_ah(struct ib_ah *ah);
-
-struct ib_mr *ehca_get_dma_mr(struct ib_pd *pd, int mr_access_flags);
-
-struct ib_mr *ehca_reg_phys_mr(struct ib_pd *pd,
- struct ib_phys_buf *phys_buf_array,
- int num_phys_buf,
- int mr_access_flags, u64 *iova_start);
-
-struct ib_mr *ehca_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
- u64 virt, int mr_access_flags,
- struct ib_udata *udata);
-
-int ehca_rereg_phys_mr(struct ib_mr *mr,
- int mr_rereg_mask,
- struct ib_pd *pd,
- struct ib_phys_buf *phys_buf_array,
- int num_phys_buf, int mr_access_flags, u64 *iova_start);
-
-int ehca_query_mr(struct ib_mr *mr, struct ib_mr_attr *mr_attr);
-
-int ehca_dereg_mr(struct ib_mr *mr);
-
-struct ib_mw *ehca_alloc_mw(struct ib_pd *pd, enum ib_mw_type type);
-
-int ehca_bind_mw(struct ib_qp *qp, struct ib_mw *mw,
- struct ib_mw_bind *mw_bind);
-
-int ehca_dealloc_mw(struct ib_mw *mw);
-
-struct ib_fmr *ehca_alloc_fmr(struct ib_pd *pd,
- int mr_access_flags,
- struct ib_fmr_attr *fmr_attr);
-
-int ehca_map_phys_fmr(struct ib_fmr *fmr,
- u64 *page_list, int list_len, u64 iova);
-
-int ehca_unmap_fmr(struct list_head *fmr_list);
-
-int ehca_dealloc_fmr(struct ib_fmr *fmr);
-
-enum ehca_eq_type {
- EHCA_EQ = 0, /* Event Queue */
- EHCA_NEQ /* Notification Event Queue */
-};
-
-int ehca_create_eq(struct ehca_shca *shca, struct ehca_eq *eq,
- enum ehca_eq_type type, const u32 length);
-
-int ehca_destroy_eq(struct ehca_shca *shca, struct ehca_eq *eq);
-
-void *ehca_poll_eq(struct ehca_shca *shca, struct ehca_eq *eq);
-
-
-struct ib_cq *ehca_create_cq(struct ib_device *device,
- const struct ib_cq_init_attr *attr,
- struct ib_ucontext *context,
- struct ib_udata *udata);
-
-int ehca_destroy_cq(struct ib_cq *cq);
-
-int ehca_resize_cq(struct ib_cq *cq, int cqe, struct ib_udata *udata);
-
-int ehca_poll_cq(struct ib_cq *cq, int num_entries, struct ib_wc *wc);
-
-int ehca_peek_cq(struct ib_cq *cq, int wc_cnt);
-
-int ehca_req_notify_cq(struct ib_cq *cq, enum ib_cq_notify_flags notify_flags);
-
-struct ib_qp *ehca_create_qp(struct ib_pd *pd,
- struct ib_qp_init_attr *init_attr,
- struct ib_udata *udata);
-
-int ehca_destroy_qp(struct ib_qp *qp);
-
-int ehca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask,
- struct ib_udata *udata);
-
-int ehca_query_qp(struct ib_qp *qp, struct ib_qp_attr *qp_attr,
- int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr);
-
-int ehca_post_send(struct ib_qp *qp, struct ib_send_wr *send_wr,
- struct ib_send_wr **bad_send_wr);
-
-int ehca_post_recv(struct ib_qp *qp, struct ib_recv_wr *recv_wr,
- struct ib_recv_wr **bad_recv_wr);
-
-int ehca_post_srq_recv(struct ib_srq *srq,
- struct ib_recv_wr *recv_wr,
- struct ib_recv_wr **bad_recv_wr);
-
-struct ib_srq *ehca_create_srq(struct ib_pd *pd,
- struct ib_srq_init_attr *init_attr,
- struct ib_udata *udata);
-
-int ehca_modify_srq(struct ib_srq *srq, struct ib_srq_attr *attr,
- enum ib_srq_attr_mask attr_mask, struct ib_udata *udata);
-
-int ehca_query_srq(struct ib_srq *srq, struct ib_srq_attr *srq_attr);
-
-int ehca_destroy_srq(struct ib_srq *srq);
-
-u64 ehca_define_sqp(struct ehca_shca *shca, struct ehca_qp *ibqp,
- struct ib_qp_init_attr *qp_init_attr);
-
-int ehca_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);
-
-int ehca_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);
-
-struct ib_ucontext *ehca_alloc_ucontext(struct ib_device *device,
- struct ib_udata *udata);
-
-int ehca_dealloc_ucontext(struct ib_ucontext *context);
-
-int ehca_mmap(struct ib_ucontext *context, struct vm_area_struct *vma);
-
-int ehca_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
- const struct ib_wc *in_wc, const struct ib_grh *in_grh,
- const struct ib_mad_hdr *in, size_t in_mad_size,
- struct ib_mad_hdr *out, size_t *out_mad_size,
- u16 *out_mad_pkey_index);
-
-void ehca_poll_eqs(unsigned long data);
-
-int ehca_calc_ipd(struct ehca_shca *shca, int port,
- enum ib_rate path_rate, u32 *ipd);
-
-void ehca_add_to_err_list(struct ehca_qp *qp, int on_sq);
-
-#ifdef CONFIG_PPC_64K_PAGES
-void *ehca_alloc_fw_ctrlblock(gfp_t flags);
-void ehca_free_fw_ctrlblock(void *ptr);
-#else
-#define ehca_alloc_fw_ctrlblock(flags) ((void *)get_zeroed_page(flags))
-#define ehca_free_fw_ctrlblock(ptr) free_page((unsigned long)(ptr))
-#endif
-
-void ehca_recover_sqp(struct ib_qp *sqp);
-
-#endif
diff --git a/drivers/staging/rdma/ehca/ehca_main.c b/drivers/staging/rdma/ehca/ehca_main.c
deleted file mode 100644
index 8246418cd4e0..000000000000
--- a/drivers/staging/rdma/ehca/ehca_main.c
+++ /dev/null
@@ -1,1123 +0,0 @@
-/*
- * IBM eServer eHCA Infiniband device driver for Linux on POWER
- *
- * module start stop, hca detection
- *
- * Authors: Heiko J Schick <schickhj@de.ibm.com>
- * Hoang-Nam Nguyen <hnguyen@de.ibm.com>
- * Joachim Fenkes <fenkes@de.ibm.com>
- *
- * Copyright (c) 2005 IBM Corporation
- *
- * All rights reserved.
- *
- * This source code is distributed under a dual license of GPL v2.0 and OpenIB
- * BSD.
- *
- * OpenIB BSD License
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials
- * provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
- * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
- * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifdef CONFIG_PPC_64K_PAGES
-#include <linux/slab.h>
-#endif
-
-#include <linux/notifier.h>
-#include <linux/memory.h>
-#include <rdma/ib_mad.h>
-#include "ehca_classes.h"
-#include "ehca_iverbs.h"
-#include "ehca_mrmw.h"
-#include "ehca_tools.h"
-#include "hcp_if.h"
-
-#define HCAD_VERSION "0029"
-
-MODULE_LICENSE("Dual BSD/GPL");
-MODULE_AUTHOR("Christoph Raisch <raisch@de.ibm.com>");
-MODULE_DESCRIPTION("IBM eServer HCA InfiniBand Device Driver");
-MODULE_VERSION(HCAD_VERSION);
-
-static bool ehca_open_aqp1 = 0;
-static int ehca_hw_level = 0;
-static bool ehca_poll_all_eqs = 1;
-
-int ehca_debug_level = 0;
-int ehca_nr_ports = -1;
-bool ehca_use_hp_mr = 0;
-int ehca_port_act_time = 30;
-int ehca_static_rate = -1;
-bool ehca_scaling_code = 0;
-int ehca_lock_hcalls = -1;
-int ehca_max_cq = -1;
-int ehca_max_qp = -1;
-
-module_param_named(open_aqp1, ehca_open_aqp1, bool, S_IRUGO);
-module_param_named(debug_level, ehca_debug_level, int, S_IRUGO);
-module_param_named(hw_level, ehca_hw_level, int, S_IRUGO);
-module_param_named(nr_ports, ehca_nr_ports, int, S_IRUGO);
-module_param_named(use_hp_mr, ehca_use_hp_mr, bool, S_IRUGO);
-module_param_named(port_act_time, ehca_port_act_time, int, S_IRUGO);
-module_param_named(poll_all_eqs, ehca_poll_all_eqs, bool, S_IRUGO);
-module_param_named(static_rate, ehca_static_rate, int, S_IRUGO);
-module_param_named(scaling_code, ehca_scaling_code, bool, S_IRUGO);
-module_param_named(lock_hcalls, ehca_lock_hcalls, bint, S_IRUGO);
-module_param_named(number_of_cqs, ehca_max_cq, int, S_IRUGO);
-module_param_named(number_of_qps, ehca_max_qp, int, S_IRUGO);
-
-MODULE_PARM_DESC(open_aqp1,
- "Open AQP1 on startup (default: no)");
-MODULE_PARM_DESC(debug_level,
- "Amount of debug output (0: none (default), 1: traces, "
- "2: some dumps, 3: lots)");
-MODULE_PARM_DESC(hw_level,
- "Hardware level (0: autosensing (default), "
- "0x10..0x14: eHCA, 0x20..0x23: eHCA2)");
-MODULE_PARM_DESC(nr_ports,
- "number of connected ports (-1: autodetect (default), "
- "1: port one only, 2: two ports)");
-MODULE_PARM_DESC(use_hp_mr,
- "Use high performance MRs (default: no)");
-MODULE_PARM_DESC(port_act_time,
- "Time to wait for port activation (default: 30 sec)");
-MODULE_PARM_DESC(poll_all_eqs,
- "Poll all event queues periodically (default: yes)");
-MODULE_PARM_DESC(static_rate,
- "Set permanent static rate (default: no static rate)");
-MODULE_PARM_DESC(scaling_code,
- "Enable scaling code (default: no)");
-MODULE_PARM_DESC(lock_hcalls,
- "Serialize all hCalls made by the driver "
- "(default: autodetect)");
-MODULE_PARM_DESC(number_of_cqs,
- "Max number of CQs which can be allocated "
- "(default: autodetect)");
-MODULE_PARM_DESC(number_of_qps,
- "Max number of QPs which can be allocated "
- "(default: autodetect)");
-
-DEFINE_RWLOCK(ehca_qp_idr_lock);
-DEFINE_RWLOCK(ehca_cq_idr_lock);
-DEFINE_IDR(ehca_qp_idr);
-DEFINE_IDR(ehca_cq_idr);
-
-static LIST_HEAD(shca_list); /* list of all registered ehcas */
-DEFINE_SPINLOCK(shca_list_lock);
-
-static struct timer_list poll_eqs_timer;
-
-#ifdef CONFIG_PPC_64K_PAGES
-static struct kmem_cache *ctblk_cache;
-
-void *ehca_alloc_fw_ctrlblock(gfp_t flags)
-{
- void *ret = kmem_cache_zalloc(ctblk_cache, flags);
- if (!ret)
- ehca_gen_err("Out of memory for ctblk");
- return ret;
-}
-
-void ehca_free_fw_ctrlblock(void *ptr)
-{
- if (ptr)
- kmem_cache_free(ctblk_cache, ptr);
-
-}
-#endif
-
-int ehca2ib_return_code(u64 ehca_rc)
-{
- switch (ehca_rc) {
- case H_SUCCESS:
- return 0;
- case H_RESOURCE: /* Resource in use */
- case H_BUSY:
- return -EBUSY;
- case H_NOT_ENOUGH_RESOURCES: /* insufficient resources */
- case H_CONSTRAINED: /* resource constraint */
- case H_NO_MEM:
- return -ENOMEM;
- default:
- return -EINVAL;
- }
-}
-
-static int ehca_create_slab_caches(void)
-{
- int ret;
-
- ret = ehca_init_pd_cache();
- if (ret) {
- ehca_gen_err("Cannot create PD SLAB cache.");
- return ret;
- }
-
- ret = ehca_init_cq_cache();
- if (ret) {
- ehca_gen_err("Cannot create CQ SLAB cache.");
- goto create_slab_caches2;
- }
-
- ret = ehca_init_qp_cache();
- if (ret) {
- ehca_gen_err("Cannot create QP SLAB cache.");
- goto create_slab_caches3;
- }
-
- ret = ehca_init_av_cache();
- if (ret) {
- ehca_gen_err("Cannot create AV SLAB cache.");
- goto create_slab_caches4;
- }
-
- ret = ehca_init_mrmw_cache();
- if (ret) {
- ehca_gen_err("Cannot create MR&MW SLAB cache.");
- goto create_slab_caches5;
- }
-
- ret = ehca_init_small_qp_cache();
- if (ret) {
- ehca_gen_err("Cannot create small queue SLAB cache.");
- goto create_slab_caches6;
- }
-
-#ifdef CONFIG_PPC_64K_PAGES
- ctblk_cache = kmem_cache_create("ehca_cache_ctblk",
- EHCA_PAGESIZE, H_CB_ALIGNMENT,
- SLAB_HWCACHE_ALIGN,
- NULL);
- if (!ctblk_cache) {
- ehca_gen_err("Cannot create ctblk SLAB cache.");
- ehca_cleanup_small_qp_cache();
- ret = -ENOMEM;
- goto create_slab_caches6;
- }
-#endif
- return 0;
-
-create_slab_caches6:
- ehca_cleanup_mrmw_cache();
-
-create_slab_caches5:
- ehca_cleanup_av_cache();
-
-create_slab_caches4:
- ehca_cleanup_qp_cache();
-
-create_slab_caches3:
- ehca_cleanup_cq_cache();
-
-create_slab_caches2:
- ehca_cleanup_pd_cache();
-
- return ret;
-}
-
-static void ehca_destroy_slab_caches(void)
-{
- ehca_cleanup_small_qp_cache();
- ehca_cleanup_mrmw_cache();
- ehca_cleanup_av_cache();
- ehca_cleanup_qp_cache();
- ehca_cleanup_cq_cache();
- ehca_cleanup_pd_cache();
-#ifdef CONFIG_PPC_64K_PAGES
- if (ctblk_cache)
- kmem_cache_destroy(ctblk_cache);
-#endif
-}
-
-#define EHCA_HCAAVER EHCA_BMASK_IBM(32, 39)
-#define EHCA_REVID EHCA_BMASK_IBM(40, 63)
-
-static struct cap_descr {
- u64 mask;
- char *descr;
-} hca_cap_descr[] = {
- { HCA_CAP_AH_PORT_NR_CHECK, "HCA_CAP_AH_PORT_NR_CHECK" },
- { HCA_CAP_ATOMIC, "HCA_CAP_ATOMIC" },
- { HCA_CAP_AUTO_PATH_MIG, "HCA_CAP_AUTO_PATH_MIG" },
- { HCA_CAP_BAD_P_KEY_CTR, "HCA_CAP_BAD_P_KEY_CTR" },
- { HCA_CAP_SQD_RTS_PORT_CHANGE, "HCA_CAP_SQD_RTS_PORT_CHANGE" },
- { HCA_CAP_CUR_QP_STATE_MOD, "HCA_CAP_CUR_QP_STATE_MOD" },
- { HCA_CAP_INIT_TYPE, "HCA_CAP_INIT_TYPE" },
- { HCA_CAP_PORT_ACTIVE_EVENT, "HCA_CAP_PORT_ACTIVE_EVENT" },
- { HCA_CAP_Q_KEY_VIOL_CTR, "HCA_CAP_Q_KEY_VIOL_CTR" },
- { HCA_CAP_WQE_RESIZE, "HCA_CAP_WQE_RESIZE" },
- { HCA_CAP_RAW_PACKET_MCAST, "HCA_CAP_RAW_PACKET_MCAST" },
- { HCA_CAP_SHUTDOWN_PORT, "HCA_CAP_SHUTDOWN_PORT" },
- { HCA_CAP_RC_LL_QP, "HCA_CAP_RC_LL_QP" },
- { HCA_CAP_SRQ, "HCA_CAP_SRQ" },
- { HCA_CAP_UD_LL_QP, "HCA_CAP_UD_LL_QP" },
- { HCA_CAP_RESIZE_MR, "HCA_CAP_RESIZE_MR" },
- { HCA_CAP_MINI_QP, "HCA_CAP_MINI_QP" },
- { HCA_CAP_H_ALLOC_RES_SYNC, "HCA_CAP_H_ALLOC_RES_SYNC" },
-};
-
-static int ehca_sense_attributes(struct ehca_shca *shca)
-{
- int i, ret = 0;
- u64 h_ret;
- struct hipz_query_hca *rblock;
- struct hipz_query_port *port;
- const char *loc_code;
-
- static const u32 pgsize_map[] = {
- HCA_CAP_MR_PGSIZE_4K, 0x1000,
- HCA_CAP_MR_PGSIZE_64K, 0x10000,
- HCA_CAP_MR_PGSIZE_1M, 0x100000,
- HCA_CAP_MR_PGSIZE_16M, 0x1000000,
- };
-
- ehca_gen_dbg("Probing adapter %s...",
- shca->ofdev->dev.of_node->full_name);
- loc_code = of_get_property(shca->ofdev->dev.of_node, "ibm,loc-code",
- NULL);
- if (loc_code)
- ehca_gen_dbg(" ... location lode=%s", loc_code);
-
- rblock = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
- if (!rblock) {
- ehca_gen_err("Cannot allocate rblock memory.");
- return -ENOMEM;
- }
-
- h_ret = hipz_h_query_hca(shca->ipz_hca_handle, rblock);
- if (h_ret != H_SUCCESS) {
- ehca_gen_err("Cannot query device properties. h_ret=%lli",
- h_ret);
- ret = -EPERM;
- goto sense_attributes1;
- }
-
- if (ehca_nr_ports == 1)
- shca->num_ports = 1;
- else
- shca->num_ports = (u8)rblock->num_ports;
-
- ehca_gen_dbg(" ... found %x ports", rblock->num_ports);
-
- if (ehca_hw_level == 0) {
- u32 hcaaver;
- u32 revid;
-
- hcaaver = EHCA_BMASK_GET(EHCA_HCAAVER, rblock->hw_ver);
- revid = EHCA_BMASK_GET(EHCA_REVID, rblock->hw_ver);
-
- ehca_gen_dbg(" ... hardware version=%x:%x", hcaaver, revid);
-
- if (hcaaver == 1) {
- if (revid <= 3)
- shca->hw_level = 0x10 | (revid + 1);
- else
- shca->hw_level = 0x14;
- } else if (hcaaver == 2) {
- if (revid == 0)
- shca->hw_level = 0x21;
- else if (revid == 0x10)
- shca->hw_level = 0x22;
- else if (revid == 0x20 || revid == 0x21)
- shca->hw_level = 0x23;
- }
-
- if (!shca->hw_level) {
- ehca_gen_warn("unknown hardware version"
- " - assuming default level");
- shca->hw_level = 0x22;
- }
- } else
- shca->hw_level = ehca_hw_level;
- ehca_gen_dbg(" ... hardware level=%x", shca->hw_level);
-
- shca->hca_cap = rblock->hca_cap_indicators;
- ehca_gen_dbg(" ... HCA capabilities:");
- for (i = 0; i < ARRAY_SIZE(hca_cap_descr); i++)
- if (EHCA_BMASK_GET(hca_cap_descr[i].mask, shca->hca_cap))
- ehca_gen_dbg(" %s", hca_cap_descr[i].descr);
-
- /* Autodetect hCall locking -- the "H_ALLOC_RESOURCE synced" flag is
- * a firmware property, so it's valid across all adapters
- */
- if (ehca_lock_hcalls == -1)
- ehca_lock_hcalls = !EHCA_BMASK_GET(HCA_CAP_H_ALLOC_RES_SYNC,
- shca->hca_cap);
-
- /* translate supported MR page sizes; always support 4K */
- shca->hca_cap_mr_pgsize = EHCA_PAGESIZE;
- for (i = 0; i < ARRAY_SIZE(pgsize_map); i += 2)
- if (rblock->memory_page_size_supported & pgsize_map[i])
- shca->hca_cap_mr_pgsize |= pgsize_map[i + 1];
-
- /* Set maximum number of CQs and QPs to calculate EQ size */
- if (shca->max_num_qps == -1)
- shca->max_num_qps = min_t(int, rblock->max_qp,
- EHCA_MAX_NUM_QUEUES);
- else if (shca->max_num_qps < 1 || shca->max_num_qps > rblock->max_qp) {
- ehca_gen_warn("The requested number of QPs is out of range "
- "(1 - %i) specified by HW. Value is set to %i",
- rblock->max_qp, rblock->max_qp);
- shca->max_num_qps = rblock->max_qp;
- }
-
- if (shca->max_num_cqs == -1)
- shca->max_num_cqs = min_t(int, rblock->max_cq,
- EHCA_MAX_NUM_QUEUES);
- else if (shca->max_num_cqs < 1 || shca->max_num_cqs > rblock->max_cq) {
- ehca_gen_warn("The requested number of CQs is out of range "
- "(1 - %i) specified by HW. Value is set to %i",
- rblock->max_cq, rblock->max_cq);
- }
-
- /* query max MTU from first port -- it's the same for all ports */
- port = (struct hipz_query_port *)rblock;
- h_ret = hipz_h_query_port(shca->ipz_hca_handle, 1, port);
- if (h_ret != H_SUCCESS) {
- ehca_gen_err("Cannot query port properties. h_ret=%lli",
- h_ret);
- ret = -EPERM;
- goto sense_attributes1;
- }
-
- shca->max_mtu = port->max_mtu;
-
-sense_attributes1:
- ehca_free_fw_ctrlblock(rblock);
- return ret;
-}
-
-static int init_node_guid(struct ehca_shca *shca)
-{
- int ret = 0;
- struct hipz_query_hca *rblock;
-
- rblock = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
- if (!rblock) {
- ehca_err(&shca->ib_device, "Can't allocate rblock memory.");
- return -ENOMEM;
- }
-
- if (hipz_h_query_hca(shca->ipz_hca_handle, rblock) != H_SUCCESS) {
- ehca_err(&shca->ib_device, "Can't query device properties");
- ret = -EINVAL;
- goto init_node_guid1;
- }
-
- memcpy(&shca->ib_device.node_guid, &rblock->node_guid, sizeof(u64));
-
-init_node_guid1:
- ehca_free_fw_ctrlblock(rblock);
- return ret;
-}
-
-static int ehca_port_immutable(struct ib_device *ibdev, u8 port_num,
- struct ib_port_immutable *immutable)
-{
- struct ib_port_attr attr;
- int err;
-
- err = ehca_query_port(ibdev, port_num, &attr);
- if (err)
- return err;
-
- immutable->pkey_tbl_len = attr.pkey_tbl_len;
- immutable->gid_tbl_len = attr.gid_tbl_len;
- immutable->core_cap_flags = RDMA_CORE_PORT_IBA_IB;
- immutable->max_mad_size = IB_MGMT_MAD_SIZE;
-
- return 0;
-}
-
-static int ehca_init_device(struct ehca_shca *shca)
-{
- int ret;
-
- ret = init_node_guid(shca);
- if (ret)
- return ret;
-
- strlcpy(shca->ib_device.name, "ehca%d", IB_DEVICE_NAME_MAX);
- shca->ib_device.owner = THIS_MODULE;
-
- shca->ib_device.uverbs_abi_ver = 8;
- shca->ib_device.uverbs_cmd_mask =
- (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
- (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) |
- (1ull << IB_USER_VERBS_CMD_QUERY_PORT) |
- (1ull << IB_USER_VERBS_CMD_ALLOC_PD) |
- (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) |
- (1ull << IB_USER_VERBS_CMD_REG_MR) |
- (1ull << IB_USER_VERBS_CMD_DEREG_MR) |
- (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
- (1ull << IB_USER_VERBS_CMD_CREATE_CQ) |
- (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) |
- (1ull << IB_USER_VERBS_CMD_CREATE_QP) |
- (1ull << IB_USER_VERBS_CMD_MODIFY_QP) |
- (1ull << IB_USER_VERBS_CMD_QUERY_QP) |
- (1ull << IB_USER_VERBS_CMD_DESTROY_QP) |
- (1ull << IB_USER_VERBS_CMD_ATTACH_MCAST) |
- (1ull << IB_USER_VERBS_CMD_DETACH_MCAST);
-
- shca->ib_device.node_type = RDMA_NODE_IB_CA;
- shca->ib_device.phys_port_cnt = shca->num_ports;
- shca->ib_device.num_comp_vectors = 1;
- shca->ib_device.dma_device = &shca->ofdev->dev;
- shca->ib_device.query_device = ehca_query_device;
- shca->ib_device.query_port = ehca_query_port;
- shca->ib_device.query_gid = ehca_query_gid;
- shca->ib_device.query_pkey = ehca_query_pkey;
- /* shca->in_device.modify_device = ehca_modify_device */
- shca->ib_device.modify_port = ehca_modify_port;
- shca->ib_device.alloc_ucontext = ehca_alloc_ucontext;
- shca->ib_device.dealloc_ucontext = ehca_dealloc_ucontext;
- shca->ib_device.alloc_pd = ehca_alloc_pd;
- shca->ib_device.dealloc_pd = ehca_dealloc_pd;
- shca->ib_device.create_ah = ehca_create_ah;
- /* shca->ib_device.modify_ah = ehca_modify_ah; */
- shca->ib_device.query_ah = ehca_query_ah;
- shca->ib_device.destroy_ah = ehca_destroy_ah;
- shca->ib_device.create_qp = ehca_create_qp;
- shca->ib_device.modify_qp = ehca_modify_qp;
- shca->ib_device.query_qp = ehca_query_qp;
- shca->ib_device.destroy_qp = ehca_destroy_qp;
- shca->ib_device.post_send = ehca_post_send;
- shca->ib_device.post_recv = ehca_post_recv;
- shca->ib_device.create_cq = ehca_create_cq;
- shca->ib_device.destroy_cq = ehca_destroy_cq;
- shca->ib_device.resize_cq = ehca_resize_cq;
- shca->ib_device.poll_cq = ehca_poll_cq;
- /* shca->ib_device.peek_cq = ehca_peek_cq; */
- shca->ib_device.req_notify_cq = ehca_req_notify_cq;
- /* shca->ib_device.req_ncomp_notif = ehca_req_ncomp_notif; */
- shca->ib_device.get_dma_mr = ehca_get_dma_mr;
- shca->ib_device.reg_phys_mr = ehca_reg_phys_mr;
- shca->ib_device.reg_user_mr = ehca_reg_user_mr;
- shca->ib_device.query_mr = ehca_query_mr;
- shca->ib_device.dereg_mr = ehca_dereg_mr;
- shca->ib_device.rereg_phys_mr = ehca_rereg_phys_mr;
- shca->ib_device.alloc_mw = ehca_alloc_mw;
- shca->ib_device.bind_mw = ehca_bind_mw;
- shca->ib_device.dealloc_mw = ehca_dealloc_mw;
- shca->ib_device.alloc_fmr = ehca_alloc_fmr;
- shca->ib_device.map_phys_fmr = ehca_map_phys_fmr;
- shca->ib_device.unmap_fmr = ehca_unmap_fmr;
- shca->ib_device.dealloc_fmr = ehca_dealloc_fmr;
- shca->ib_device.attach_mcast = ehca_attach_mcast;
- shca->ib_device.detach_mcast = ehca_detach_mcast;
- shca->ib_device.process_mad = ehca_process_mad;
- shca->ib_device.mmap = ehca_mmap;
- shca->ib_device.dma_ops = &ehca_dma_mapping_ops;
- shca->ib_device.get_port_immutable = ehca_port_immutable;
-
- if (EHCA_BMASK_GET(HCA_CAP_SRQ, shca->hca_cap)) {
- shca->ib_device.uverbs_cmd_mask |=
- (1ull << IB_USER_VERBS_CMD_CREATE_SRQ) |
- (1ull << IB_USER_VERBS_CMD_MODIFY_SRQ) |
- (1ull << IB_USER_VERBS_CMD_QUERY_SRQ) |
- (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ);
-
- shca->ib_device.create_srq = ehca_create_srq;
- shca->ib_device.modify_srq = ehca_modify_srq;
- shca->ib_device.query_srq = ehca_query_srq;
- shca->ib_device.destroy_srq = ehca_destroy_srq;
- shca->ib_device.post_srq_recv = ehca_post_srq_recv;
- }
-
- return ret;
-}
-
-static int ehca_create_aqp1(struct ehca_shca *shca, u32 port)
-{
- struct ehca_sport *sport = &shca->sport[port - 1];
- struct ib_cq *ibcq;
- struct ib_qp *ibqp;
- struct ib_qp_init_attr qp_init_attr;
- struct ib_cq_init_attr cq_attr = {};
- int ret;
-
- if (sport->ibcq_aqp1) {
- ehca_err(&shca->ib_device, "AQP1 CQ is already created.");
- return -EPERM;
- }
-
- cq_attr.cqe = 10;
- ibcq = ib_create_cq(&shca->ib_device, NULL, NULL, (void *)(-1),
- &cq_attr);
- if (IS_ERR(ibcq)) {
- ehca_err(&shca->ib_device, "Cannot create AQP1 CQ.");
- return PTR_ERR(ibcq);
- }
- sport->ibcq_aqp1 = ibcq;
-
- if (sport->ibqp_sqp[IB_QPT_GSI]) {
- ehca_err(&shca->ib_device, "AQP1 QP is already created.");
- ret = -EPERM;
- goto create_aqp1;
- }
-
- memset(&qp_init_attr, 0, sizeof(struct ib_qp_init_attr));
- qp_init_attr.send_cq = ibcq;
- qp_init_attr.recv_cq = ibcq;
- qp_init_attr.sq_sig_type = IB_SIGNAL_ALL_WR;
- qp_init_attr.cap.max_send_wr = 100;
- qp_init_attr.cap.max_recv_wr = 100;
- qp_init_attr.cap.max_send_sge = 2;
- qp_init_attr.cap.max_recv_sge = 1;
- qp_init_attr.qp_type = IB_QPT_GSI;
- qp_init_attr.port_num = port;
- qp_init_attr.qp_context = NULL;
- qp_init_attr.event_handler = NULL;
- qp_init_attr.srq = NULL;
-
- ibqp = ib_create_qp(&shca->pd->ib_pd, &qp_init_attr);
- if (IS_ERR(ibqp)) {
- ehca_err(&shca->ib_device, "Cannot create AQP1 QP.");
- ret = PTR_ERR(ibqp);
- goto create_aqp1;
- }
- sport->ibqp_sqp[IB_QPT_GSI] = ibqp;
-
- return 0;
-
-create_aqp1:
- ib_destroy_cq(sport->ibcq_aqp1);
- return ret;
-}
-
-static int ehca_destroy_aqp1(struct ehca_sport *sport)
-{
- int ret;
-
- ret = ib_destroy_qp(sport->ibqp_sqp[IB_QPT_GSI]);
- if (ret) {
- ehca_gen_err("Cannot destroy AQP1 QP. ret=%i", ret);
- return ret;
- }
-
- ret = ib_destroy_cq(sport->ibcq_aqp1);
- if (ret)
- ehca_gen_err("Cannot destroy AQP1 CQ. ret=%i", ret);
-
- return ret;
-}
-
-static ssize_t ehca_show_debug_level(struct device_driver *ddp, char *buf)
-{
- return snprintf(buf, PAGE_SIZE, "%d\n", ehca_debug_level);
-}
-
-static ssize_t ehca_store_debug_level(struct device_driver *ddp,
- const char *buf, size_t count)
-{
- int value = (*buf) - '0';
- if (value >= 0 && value <= 9)
- ehca_debug_level = value;
- return 1;
-}
-
-static DRIVER_ATTR(debug_level, S_IRUSR | S_IWUSR,
- ehca_show_debug_level, ehca_store_debug_level);
-
-static struct attribute *ehca_drv_attrs[] = {
- &driver_attr_debug_level.attr,
- NULL
-};
-
-static struct attribute_group ehca_drv_attr_grp = {
- .attrs = ehca_drv_attrs
-};
-
-static const struct attribute_group *ehca_drv_attr_groups[] = {
- &ehca_drv_attr_grp,
- NULL,
-};
-
-#define EHCA_RESOURCE_ATTR(name) \
-static ssize_t ehca_show_##name(struct device *dev, \
- struct device_attribute *attr, \
- char *buf) \
-{ \
- struct ehca_shca *shca; \
- struct hipz_query_hca *rblock; \
- int data; \
- \
- shca = dev_get_drvdata(dev); \
- \
- rblock = ehca_alloc_fw_ctrlblock(GFP_KERNEL); \
- if (!rblock) { \
- dev_err(dev, "Can't allocate rblock memory.\n"); \
- return 0; \
- } \
- \
- if (hipz_h_query_hca(shca->ipz_hca_handle, rblock) != H_SUCCESS) { \
- dev_err(dev, "Can't query device properties\n"); \
- ehca_free_fw_ctrlblock(rblock); \
- return 0; \
- } \
- \
- data = rblock->name; \
- ehca_free_fw_ctrlblock(rblock); \
- \
- if ((strcmp(#name, "num_ports") == 0) && (ehca_nr_ports == 1)) \
- return snprintf(buf, 256, "1\n"); \
- else \
- return snprintf(buf, 256, "%d\n", data); \
- \
-} \
-static DEVICE_ATTR(name, S_IRUGO, ehca_show_##name, NULL);
-
-EHCA_RESOURCE_ATTR(num_ports);
-EHCA_RESOURCE_ATTR(hw_ver);
-EHCA_RESOURCE_ATTR(max_eq);
-EHCA_RESOURCE_ATTR(cur_eq);
-EHCA_RESOURCE_ATTR(max_cq);
-EHCA_RESOURCE_ATTR(cur_cq);
-EHCA_RESOURCE_ATTR(max_qp);
-EHCA_RESOURCE_ATTR(cur_qp);
-EHCA_RESOURCE_ATTR(max_mr);
-EHCA_RESOURCE_ATTR(cur_mr);
-EHCA_RESOURCE_ATTR(max_mw);
-EHCA_RESOURCE_ATTR(cur_mw);
-EHCA_RESOURCE_ATTR(max_pd);
-EHCA_RESOURCE_ATTR(max_ah);
-
-static ssize_t ehca_show_adapter_handle(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct ehca_shca *shca = dev_get_drvdata(dev);
-
- return sprintf(buf, "%llx\n", shca->ipz_hca_handle.handle);
-
-}
-static DEVICE_ATTR(adapter_handle, S_IRUGO, ehca_show_adapter_handle, NULL);
-
-static struct attribute *ehca_dev_attrs[] = {
- &dev_attr_adapter_handle.attr,
- &dev_attr_num_ports.attr,
- &dev_attr_hw_ver.attr,
- &dev_attr_max_eq.attr,
- &dev_attr_cur_eq.attr,
- &dev_attr_max_cq.attr,
- &dev_attr_cur_cq.attr,
- &dev_attr_max_qp.attr,
- &dev_attr_cur_qp.attr,
- &dev_attr_max_mr.attr,
- &dev_attr_cur_mr.attr,
- &dev_attr_max_mw.attr,
- &dev_attr_cur_mw.attr,
- &dev_attr_max_pd.attr,
- &dev_attr_max_ah.attr,
- NULL
-};
-
-static struct attribute_group ehca_dev_attr_grp = {
- .attrs = ehca_dev_attrs
-};
-
-static int ehca_probe(struct platform_device *dev)
-{
- struct ehca_shca *shca;
- const u64 *handle;
- struct ib_pd *ibpd;
- int ret, i, eq_size;
- unsigned long flags;
-
- handle = of_get_property(dev->dev.of_node, "ibm,hca-handle", NULL);
- if (!handle) {
- ehca_gen_err("Cannot get eHCA handle for adapter: %s.",
- dev->dev.of_node->full_name);
- return -ENODEV;
- }
-
- if (!(*handle)) {
- ehca_gen_err("Wrong eHCA handle for adapter: %s.",
- dev->dev.of_node->full_name);
- return -ENODEV;
- }
-
- shca = (struct ehca_shca *)ib_alloc_device(sizeof(*shca));
- if (!shca) {
- ehca_gen_err("Cannot allocate shca memory.");
- return -ENOMEM;
- }
-
- mutex_init(&shca->modify_mutex);
- atomic_set(&shca->num_cqs, 0);
- atomic_set(&shca->num_qps, 0);
- shca->max_num_qps = ehca_max_qp;
- shca->max_num_cqs = ehca_max_cq;
-
- for (i = 0; i < ARRAY_SIZE(shca->sport); i++)
- spin_lock_init(&shca->sport[i].mod_sqp_lock);
-
- shca->ofdev = dev;
- shca->ipz_hca_handle.handle = *handle;
- dev_set_drvdata(&dev->dev, shca);
-
- ret = ehca_sense_attributes(shca);
- if (ret < 0) {
- ehca_gen_err("Cannot sense eHCA attributes.");
- goto probe1;
- }
-
- ret = ehca_init_device(shca);
- if (ret) {
- ehca_gen_err("Cannot init ehca device struct");
- goto probe1;
- }
-
- eq_size = 2 * shca->max_num_cqs + 4 * shca->max_num_qps;
- /* create event queues */
- ret = ehca_create_eq(shca, &shca->eq, EHCA_EQ, eq_size);
- if (ret) {
- ehca_err(&shca->ib_device, "Cannot create EQ.");
- goto probe1;
- }
-
- ret = ehca_create_eq(shca, &shca->neq, EHCA_NEQ, 513);
- if (ret) {
- ehca_err(&shca->ib_device, "Cannot create NEQ.");
- goto probe3;
- }
-
- /* create internal protection domain */
- ibpd = ehca_alloc_pd(&shca->ib_device, (void *)(-1), NULL);
- if (IS_ERR(ibpd)) {
- ehca_err(&shca->ib_device, "Cannot create internal PD.");
- ret = PTR_ERR(ibpd);
- goto probe4;
- }
-
- shca->pd = container_of(ibpd, struct ehca_pd, ib_pd);
- shca->pd->ib_pd.device = &shca->ib_device;
-
- /* create internal max MR */
- ret = ehca_reg_internal_maxmr(shca, shca->pd, &shca->maxmr);
-
- if (ret) {
- ehca_err(&shca->ib_device, "Cannot create internal MR ret=%i",
- ret);
- goto probe5;
- }
-
- ret = ib_register_device(&shca->ib_device, NULL);
- if (ret) {
- ehca_err(&shca->ib_device,
- "ib_register_device() failed ret=%i", ret);
- goto probe6;
- }
-
- /* create AQP1 for port 1 */
- if (ehca_open_aqp1 == 1) {
- shca->sport[0].port_state = IB_PORT_DOWN;
- ret = ehca_create_aqp1(shca, 1);
- if (ret) {
- ehca_err(&shca->ib_device,
- "Cannot create AQP1 for port 1.");
- goto probe7;
- }
- }
-
- /* create AQP1 for port 2 */
- if ((ehca_open_aqp1 == 1) && (shca->num_ports == 2)) {
- shca->sport[1].port_state = IB_PORT_DOWN;
- ret = ehca_create_aqp1(shca, 2);
- if (ret) {
- ehca_err(&shca->ib_device,
- "Cannot create AQP1 for port 2.");
- goto probe8;
- }
- }
-
- ret = sysfs_create_group(&dev->dev.kobj, &ehca_dev_attr_grp);
- if (ret) /* only complain; we can live without attributes */
- ehca_err(&shca->ib_device,
- "Cannot create device attributes ret=%d", ret);
-
- spin_lock_irqsave(&shca_list_lock, flags);
- list_add(&shca->shca_list, &shca_list);
- spin_unlock_irqrestore(&shca_list_lock, flags);
-
- return 0;
-
-probe8:
- ret = ehca_destroy_aqp1(&shca->sport[0]);
- if (ret)
- ehca_err(&shca->ib_device,
- "Cannot destroy AQP1 for port 1. ret=%i", ret);
-
-probe7:
- ib_unregister_device(&shca->ib_device);
-
-probe6:
- ret = ehca_dereg_internal_maxmr(shca);
- if (ret)
- ehca_err(&shca->ib_device,
- "Cannot destroy internal MR. ret=%x", ret);
-
-probe5:
- ret = ehca_dealloc_pd(&shca->pd->ib_pd);
- if (ret)
- ehca_err(&shca->ib_device,
- "Cannot destroy internal PD. ret=%x", ret);
-
-probe4:
- ret = ehca_destroy_eq(shca, &shca->neq);
- if (ret)
- ehca_err(&shca->ib_device,
- "Cannot destroy NEQ. ret=%x", ret);
-
-probe3:
- ret = ehca_destroy_eq(shca, &shca->eq);
- if (ret)
- ehca_err(&shca->ib_device,
- "Cannot destroy EQ. ret=%x", ret);
-
-probe1:
- ib_dealloc_device(&shca->ib_device);
-
- return -EINVAL;
-}
-
-static int ehca_remove(struct platform_device *dev)
-{
- struct ehca_shca *shca = dev_get_drvdata(&dev->dev);
- unsigned long flags;
- int ret;
-
- sysfs_remove_group(&dev->dev.kobj, &ehca_dev_attr_grp);
-
- if (ehca_open_aqp1 == 1) {
- int i;
- for (i = 0; i < shca->num_ports; i++) {
- ret = ehca_destroy_aqp1(&shca->sport[i]);
- if (ret)
- ehca_err(&shca->ib_device,
- "Cannot destroy AQP1 for port %x "
- "ret=%i", ret, i);
- }
- }
-
- ib_unregister_device(&shca->ib_device);
-
- ret = ehca_dereg_internal_maxmr(shca);
- if (ret)
- ehca_err(&shca->ib_device,
- "Cannot destroy internal MR. ret=%i", ret);
-
- ret = ehca_dealloc_pd(&shca->pd->ib_pd);
- if (ret)
- ehca_err(&shca->ib_device,
- "Cannot destroy internal PD. ret=%i", ret);
-
- ret = ehca_destroy_eq(shca, &shca->eq);
- if (ret)
- ehca_err(&shca->ib_device, "Cannot destroy EQ. ret=%i", ret);
-
- ret = ehca_destroy_eq(shca, &shca->neq);
- if (ret)
- ehca_err(&shca->ib_device, "Canot destroy NEQ. ret=%i", ret);
-
- ib_dealloc_device(&shca->ib_device);
-
- spin_lock_irqsave(&shca_list_lock, flags);
- list_del(&shca->shca_list);
- spin_unlock_irqrestore(&shca_list_lock, flags);
-
- return ret;
-}
-
-static struct of_device_id ehca_device_table[] =
-{
- {
- .name = "lhca",
- .compatible = "IBM,lhca",
- },
- {},
-};
-MODULE_DEVICE_TABLE(of, ehca_device_table);
-
-static struct platform_driver ehca_driver = {
- .probe = ehca_probe,
- .remove = ehca_remove,
- .driver = {
- .name = "ehca",
- .owner = THIS_MODULE,
- .groups = ehca_drv_attr_groups,
- .of_match_table = ehca_device_table,
- },
-};
-
-void ehca_poll_eqs(unsigned long data)
-{
- struct ehca_shca *shca;
-
- spin_lock(&shca_list_lock);
- list_for_each_entry(shca, &shca_list, shca_list) {
- if (shca->eq.is_initialized) {
- /* call deadman proc only if eq ptr does not change */
- struct ehca_eq *eq = &shca->eq;
- int max = 3;
- volatile u64 q_ofs, q_ofs2;
- unsigned long flags;
- spin_lock_irqsave(&eq->spinlock, flags);
- q_ofs = eq->ipz_queue.current_q_offset;
- spin_unlock_irqrestore(&eq->spinlock, flags);
- do {
- spin_lock_irqsave(&eq->spinlock, flags);
- q_ofs2 = eq->ipz_queue.current_q_offset;
- spin_unlock_irqrestore(&eq->spinlock, flags);
- max--;
- } while (q_ofs == q_ofs2 && max > 0);
- if (q_ofs == q_ofs2)
- ehca_process_eq(shca, 0);
- }
- }
- mod_timer(&poll_eqs_timer, round_jiffies(jiffies + HZ));
- spin_unlock(&shca_list_lock);
-}
-
-static int ehca_mem_notifier(struct notifier_block *nb,
- unsigned long action, void *data)
-{
- static unsigned long ehca_dmem_warn_time;
- unsigned long flags;
-
- switch (action) {
- case MEM_CANCEL_OFFLINE:
- case MEM_CANCEL_ONLINE:
- case MEM_ONLINE:
- case MEM_OFFLINE:
- return NOTIFY_OK;
- case MEM_GOING_ONLINE:
- case MEM_GOING_OFFLINE:
- /* only ok if no hca is attached to the lpar */
- spin_lock_irqsave(&shca_list_lock, flags);
- if (list_empty(&shca_list)) {
- spin_unlock_irqrestore(&shca_list_lock, flags);
- return NOTIFY_OK;
- } else {
- spin_unlock_irqrestore(&shca_list_lock, flags);
- if (printk_timed_ratelimit(&ehca_dmem_warn_time,
- 30 * 1000))
- ehca_gen_err("DMEM operations are not allowed"
- "in conjunction with eHCA");
- return NOTIFY_BAD;
- }
- }
- return NOTIFY_OK;
-}
-
-static struct notifier_block ehca_mem_nb = {
- .notifier_call = ehca_mem_notifier,
-};
-
-static int __init ehca_module_init(void)
-{
- int ret;
-
- printk(KERN_INFO "eHCA Infiniband Device Driver "
- "(Version " HCAD_VERSION ")\n");
-
- ret = ehca_create_comp_pool();
- if (ret) {
- ehca_gen_err("Cannot create comp pool.");
- return ret;
- }
-
- ret = ehca_create_slab_caches();
- if (ret) {
- ehca_gen_err("Cannot create SLAB caches");
- ret = -ENOMEM;
- goto module_init1;
- }
-
- ret = ehca_create_busmap();
- if (ret) {
- ehca_gen_err("Cannot create busmap.");
- goto module_init2;
- }
-
- ret = ibmebus_register_driver(&ehca_driver);
- if (ret) {
- ehca_gen_err("Cannot register eHCA device driver");
- ret = -EINVAL;
- goto module_init3;
- }
-
- ret = register_memory_notifier(&ehca_mem_nb);
- if (ret) {
- ehca_gen_err("Failed registering memory add/remove notifier");
- goto module_init4;
- }
-
- if (ehca_poll_all_eqs != 1) {
- ehca_gen_err("WARNING!!!");
- ehca_gen_err("It is possible to lose interrupts.");
- } else {
- init_timer(&poll_eqs_timer);
- poll_eqs_timer.function = ehca_poll_eqs;
- poll_eqs_timer.expires = jiffies + HZ;
- add_timer(&poll_eqs_timer);
- }
-
- return 0;
-
-module_init4:
- ibmebus_unregister_driver(&ehca_driver);
-
-module_init3:
- ehca_destroy_busmap();
-
-module_init2:
- ehca_destroy_slab_caches();
-
-module_init1:
- ehca_destroy_comp_pool();
- return ret;
-};
-
-static void __exit ehca_module_exit(void)
-{
- if (ehca_poll_all_eqs == 1)
- del_timer_sync(&poll_eqs_timer);
-
- ibmebus_unregister_driver(&ehca_driver);
-
- unregister_memory_notifier(&ehca_mem_nb);
-
- ehca_destroy_busmap();
-
- ehca_destroy_slab_caches();
-
- ehca_destroy_comp_pool();
-
- idr_destroy(&ehca_cq_idr);
- idr_destroy(&ehca_qp_idr);
-};
-
-module_init(ehca_module_init);
-module_exit(ehca_module_exit);
diff --git a/drivers/staging/rdma/ehca/ehca_mcast.c b/drivers/staging/rdma/ehca/ehca_mcast.c
deleted file mode 100644
index cec181532924..000000000000
--- a/drivers/staging/rdma/ehca/ehca_mcast.c
+++ /dev/null
@@ -1,131 +0,0 @@
-/*
- * IBM eServer eHCA Infiniband device driver for Linux on POWER
- *
- * mcast functions
- *
- * Authors: Khadija Souissi <souissik@de.ibm.com>
- * Waleri Fomin <fomin@de.ibm.com>
- * Reinhard Ernst <rernst@de.ibm.com>
- * Hoang-Nam Nguyen <hnguyen@de.ibm.com>
- * Heiko J Schick <schickhj@de.ibm.com>
- *
- * Copyright (c) 2005 IBM Corporation
- *
- * All rights reserved.
- *
- * This source code is distributed under a dual license of GPL v2.0 and OpenIB
- * BSD.
- *
- * OpenIB BSD License
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials
- * provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
- * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
- * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <linux/module.h>
-#include <linux/err.h>
-#include "ehca_classes.h"
-#include "ehca_tools.h"
-#include "ehca_qes.h"
-#include "ehca_iverbs.h"
-#include "hcp_if.h"
-
-#define MAX_MC_LID 0xFFFE
-#define MIN_MC_LID 0xC000 /* Multicast limits */
-#define EHCA_VALID_MULTICAST_GID(gid) ((gid)[0] == 0xFF)
-#define EHCA_VALID_MULTICAST_LID(lid) \
- (((lid) >= MIN_MC_LID) && ((lid) <= MAX_MC_LID))
-
-int ehca_attach_mcast(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
-{
- struct ehca_qp *my_qp = container_of(ibqp, struct ehca_qp, ib_qp);
- struct ehca_shca *shca = container_of(ibqp->device, struct ehca_shca,
- ib_device);
- union ib_gid my_gid;
- u64 subnet_prefix, interface_id, h_ret;
-
- if (ibqp->qp_type != IB_QPT_UD) {
- ehca_err(ibqp->device, "invalid qp_type=%x", ibqp->qp_type);
- return -EINVAL;
- }
-
- if (!(EHCA_VALID_MULTICAST_GID(gid->raw))) {
- ehca_err(ibqp->device, "invalid mulitcast gid");
- return -EINVAL;
- } else if ((lid < MIN_MC_LID) || (lid > MAX_MC_LID)) {
- ehca_err(ibqp->device, "invalid mulitcast lid=%x", lid);
- return -EINVAL;
- }
-
- memcpy(&my_gid, gid->raw, sizeof(union ib_gid));
-
- subnet_prefix = be64_to_cpu(my_gid.global.subnet_prefix);
- interface_id = be64_to_cpu(my_gid.global.interface_id);
- h_ret = hipz_h_attach_mcqp(shca->ipz_hca_handle,
- my_qp->ipz_qp_handle,
- my_qp->galpas.kernel,
- lid, subnet_prefix, interface_id);
- if (h_ret != H_SUCCESS)
- ehca_err(ibqp->device,
- "ehca_qp=%p qp_num=%x hipz_h_attach_mcqp() failed "
- "h_ret=%lli", my_qp, ibqp->qp_num, h_ret);
-
- return ehca2ib_return_code(h_ret);
-}
-
-int ehca_detach_mcast(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
-{
- struct ehca_qp *my_qp = container_of(ibqp, struct ehca_qp, ib_qp);
- struct ehca_shca *shca = container_of(ibqp->pd->device,
- struct ehca_shca, ib_device);
- union ib_gid my_gid;
- u64 subnet_prefix, interface_id, h_ret;
-
- if (ibqp->qp_type != IB_QPT_UD) {
- ehca_err(ibqp->device, "invalid qp_type %x", ibqp->qp_type);
- return -EINVAL;
- }
-
- if (!(EHCA_VALID_MULTICAST_GID(gid->raw))) {
- ehca_err(ibqp->device, "invalid mulitcast gid");
- return -EINVAL;
- } else if ((lid < MIN_MC_LID) || (lid > MAX_MC_LID)) {
- ehca_err(ibqp->device, "invalid mulitcast lid=%x", lid);
- return -EINVAL;
- }
-
- memcpy(&my_gid, gid->raw, sizeof(union ib_gid));
-
- subnet_prefix = be64_to_cpu(my_gid.global.subnet_prefix);
- interface_id = be64_to_cpu(my_gid.global.interface_id);
- h_ret = hipz_h_detach_mcqp(shca->ipz_hca_handle,
- my_qp->ipz_qp_handle,
- my_qp->galpas.kernel,
- lid, subnet_prefix, interface_id);
- if (h_ret != H_SUCCESS)
- ehca_err(ibqp->device,
- "ehca_qp=%p qp_num=%x hipz_h_detach_mcqp() failed "
- "h_ret=%lli", my_qp, ibqp->qp_num, h_ret);
-
- return ehca2ib_return_code(h_ret);
-}
diff --git a/drivers/staging/rdma/ehca/ehca_mrmw.c b/drivers/staging/rdma/ehca/ehca_mrmw.c
deleted file mode 100644
index f914b30999f8..000000000000
--- a/drivers/staging/rdma/ehca/ehca_mrmw.c
+++ /dev/null
@@ -1,2593 +0,0 @@
-/*
- * IBM eServer eHCA Infiniband device driver for Linux on POWER
- *
- * MR/MW functions
- *
- * Authors: Dietmar Decker <ddecker@de.ibm.com>
- * Christoph Raisch <raisch@de.ibm.com>
- * Hoang-Nam Nguyen <hnguyen@de.ibm.com>
- *
- * Copyright (c) 2005 IBM Corporation
- *
- * All rights reserved.
- *
- * This source code is distributed under a dual license of GPL v2.0 and OpenIB
- * BSD.
- *
- * OpenIB BSD License
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials
- * provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
- * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
- * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <linux/slab.h>
-#include <rdma/ib_umem.h>
-
-#include "ehca_iverbs.h"
-#include "ehca_mrmw.h"
-#include "hcp_if.h"
-#include "hipz_hw.h"
-
-#define NUM_CHUNKS(length, chunk_size) \
- (((length) + (chunk_size - 1)) / (chunk_size))
-
-/* max number of rpages (per hcall register_rpages) */
-#define MAX_RPAGES 512
-
-/* DMEM toleration management */
-#define EHCA_SECTSHIFT SECTION_SIZE_BITS
-#define EHCA_SECTSIZE (1UL << EHCA_SECTSHIFT)
-#define EHCA_HUGEPAGESHIFT 34
-#define EHCA_HUGEPAGE_SIZE (1UL << EHCA_HUGEPAGESHIFT)
-#define EHCA_HUGEPAGE_PFN_MASK ((EHCA_HUGEPAGE_SIZE - 1) >> PAGE_SHIFT)
-#define EHCA_INVAL_ADDR 0xFFFFFFFFFFFFFFFFULL
-#define EHCA_DIR_INDEX_SHIFT 13 /* 8k Entries in 64k block */
-#define EHCA_TOP_INDEX_SHIFT (EHCA_DIR_INDEX_SHIFT * 2)
-#define EHCA_MAP_ENTRIES (1 << EHCA_DIR_INDEX_SHIFT)
-#define EHCA_TOP_MAP_SIZE (0x10000) /* currently fixed map size */
-#define EHCA_DIR_MAP_SIZE (0x10000)
-#define EHCA_ENT_MAP_SIZE (0x10000)
-#define EHCA_INDEX_MASK (EHCA_MAP_ENTRIES - 1)
-
-static unsigned long ehca_mr_len;
-
-/*
- * Memory map data structures
- */
-struct ehca_dir_bmap {
- u64 ent[EHCA_MAP_ENTRIES];
-};
-struct ehca_top_bmap {
- struct ehca_dir_bmap *dir[EHCA_MAP_ENTRIES];
-};
-struct ehca_bmap {
- struct ehca_top_bmap *top[EHCA_MAP_ENTRIES];
-};
-
-static struct ehca_bmap *ehca_bmap;
-
-static struct kmem_cache *mr_cache;
-static struct kmem_cache *mw_cache;
-
-enum ehca_mr_pgsize {
- EHCA_MR_PGSIZE4K = 0x1000L,
- EHCA_MR_PGSIZE64K = 0x10000L,
- EHCA_MR_PGSIZE1M = 0x100000L,
- EHCA_MR_PGSIZE16M = 0x1000000L
-};
-
-#define EHCA_MR_PGSHIFT4K 12
-#define EHCA_MR_PGSHIFT64K 16
-#define EHCA_MR_PGSHIFT1M 20
-#define EHCA_MR_PGSHIFT16M 24
-
-static u64 ehca_map_vaddr(void *caddr);
-
-static u32 ehca_encode_hwpage_size(u32 pgsize)
-{
- int log = ilog2(pgsize);
- WARN_ON(log < 12 || log > 24 || log & 3);
- return (log - 12) / 4;
-}
-
-static u64 ehca_get_max_hwpage_size(struct ehca_shca *shca)
-{
- return rounddown_pow_of_two(shca->hca_cap_mr_pgsize);
-}
-
-static struct ehca_mr *ehca_mr_new(void)
-{
- struct ehca_mr *me;
-
- me = kmem_cache_zalloc(mr_cache, GFP_KERNEL);
- if (me)
- spin_lock_init(&me->mrlock);
- else
- ehca_gen_err("alloc failed");
-
- return me;
-}
-
-static void ehca_mr_delete(struct ehca_mr *me)
-{
- kmem_cache_free(mr_cache, me);
-}
-
-static struct ehca_mw *ehca_mw_new(void)
-{
- struct ehca_mw *me;
-
- me = kmem_cache_zalloc(mw_cache, GFP_KERNEL);
- if (me)
- spin_lock_init(&me->mwlock);
- else
- ehca_gen_err("alloc failed");
-
- return me;
-}
-
-static void ehca_mw_delete(struct ehca_mw *me)
-{
- kmem_cache_free(mw_cache, me);
-}
-
-/*----------------------------------------------------------------------*/
-
-struct ib_mr *ehca_get_dma_mr(struct ib_pd *pd, int mr_access_flags)
-{
- struct ib_mr *ib_mr;
- int ret;
- struct ehca_mr *e_maxmr;
- struct ehca_pd *e_pd = container_of(pd, struct ehca_pd, ib_pd);
- struct ehca_shca *shca =
- container_of(pd->device, struct ehca_shca, ib_device);
-
- if (shca->maxmr) {
- e_maxmr = ehca_mr_new();
- if (!e_maxmr) {
- ehca_err(&shca->ib_device, "out of memory");
- ib_mr = ERR_PTR(-ENOMEM);
- goto get_dma_mr_exit0;
- }
-
- ret = ehca_reg_maxmr(shca, e_maxmr,
- (void *)ehca_map_vaddr((void *)(KERNELBASE + PHYSICAL_START)),
- mr_access_flags, e_pd,
- &e_maxmr->ib.ib_mr.lkey,
- &e_maxmr->ib.ib_mr.rkey);
- if (ret) {
- ehca_mr_delete(e_maxmr);
- ib_mr = ERR_PTR(ret);
- goto get_dma_mr_exit0;
- }
- ib_mr = &e_maxmr->ib.ib_mr;
- } else {
- ehca_err(&shca->ib_device, "no internal max-MR exist!");
- ib_mr = ERR_PTR(-EINVAL);
- goto get_dma_mr_exit0;
- }
-
-get_dma_mr_exit0:
- if (IS_ERR(ib_mr))
- ehca_err(&shca->ib_device, "h_ret=%li pd=%p mr_access_flags=%x",
- PTR_ERR(ib_mr), pd, mr_access_flags);
- return ib_mr;
-} /* end ehca_get_dma_mr() */
-
-/*----------------------------------------------------------------------*/
-
-struct ib_mr *ehca_reg_phys_mr(struct ib_pd *pd,
- struct ib_phys_buf *phys_buf_array,
- int num_phys_buf,
- int mr_access_flags,
- u64 *iova_start)
-{
- struct ib_mr *ib_mr;
- int ret;
- struct ehca_mr *e_mr;
- struct ehca_shca *shca =
- container_of(pd->device, struct ehca_shca, ib_device);
- struct ehca_pd *e_pd = container_of(pd, struct ehca_pd, ib_pd);
-
- u64 size;
-
- if ((num_phys_buf <= 0) || !phys_buf_array) {
- ehca_err(pd->device, "bad input values: num_phys_buf=%x "
- "phys_buf_array=%p", num_phys_buf, phys_buf_array);
- ib_mr = ERR_PTR(-EINVAL);
- goto reg_phys_mr_exit0;
- }
- if (((mr_access_flags & IB_ACCESS_REMOTE_WRITE) &&
- !(mr_access_flags & IB_ACCESS_LOCAL_WRITE)) ||
- ((mr_access_flags & IB_ACCESS_REMOTE_ATOMIC) &&
- !(mr_access_flags & IB_ACCESS_LOCAL_WRITE))) {
- /*
- * Remote Write Access requires Local Write Access
- * Remote Atomic Access requires Local Write Access
- */
- ehca_err(pd->device, "bad input values: mr_access_flags=%x",
- mr_access_flags);
- ib_mr = ERR_PTR(-EINVAL);
- goto reg_phys_mr_exit0;
- }
-
- /* check physical buffer list and calculate size */
- ret = ehca_mr_chk_buf_and_calc_size(phys_buf_array, num_phys_buf,
- iova_start, &size);
- if (ret) {
- ib_mr = ERR_PTR(ret);
- goto reg_phys_mr_exit0;
- }
- if ((size == 0) ||
- (((u64)iova_start + size) < (u64)iova_start)) {
- ehca_err(pd->device, "bad input values: size=%llx iova_start=%p",
- size, iova_start);
- ib_mr = ERR_PTR(-EINVAL);
- goto reg_phys_mr_exit0;
- }
-
- e_mr = ehca_mr_new();
- if (!e_mr) {
- ehca_err(pd->device, "out of memory");
- ib_mr = ERR_PTR(-ENOMEM);
- goto reg_phys_mr_exit0;
- }
-
- /* register MR on HCA */
- if (ehca_mr_is_maxmr(size, iova_start)) {
- e_mr->flags |= EHCA_MR_FLAG_MAXMR;
- ret = ehca_reg_maxmr(shca, e_mr, iova_start, mr_access_flags,
- e_pd, &e_mr->ib.ib_mr.lkey,
- &e_mr->ib.ib_mr.rkey);
- if (ret) {
- ib_mr = ERR_PTR(ret);
- goto reg_phys_mr_exit1;
- }
- } else {
- struct ehca_mr_pginfo pginfo;
- u32 num_kpages;
- u32 num_hwpages;
- u64 hw_pgsize;
-
- num_kpages = NUM_CHUNKS(((u64)iova_start % PAGE_SIZE) + size,
- PAGE_SIZE);
- /* for kernel space we try most possible pgsize */
- hw_pgsize = ehca_get_max_hwpage_size(shca);
- num_hwpages = NUM_CHUNKS(((u64)iova_start % hw_pgsize) + size,
- hw_pgsize);
- memset(&pginfo, 0, sizeof(pginfo));
- pginfo.type = EHCA_MR_PGI_PHYS;
- pginfo.num_kpages = num_kpages;
- pginfo.hwpage_size = hw_pgsize;
- pginfo.num_hwpages = num_hwpages;
- pginfo.u.phy.num_phys_buf = num_phys_buf;
- pginfo.u.phy.phys_buf_array = phys_buf_array;
- pginfo.next_hwpage =
- ((u64)iova_start & ~PAGE_MASK) / hw_pgsize;
-
- ret = ehca_reg_mr(shca, e_mr, iova_start, size, mr_access_flags,
- e_pd, &pginfo, &e_mr->ib.ib_mr.lkey,
- &e_mr->ib.ib_mr.rkey, EHCA_REG_MR);
- if (ret) {
- ib_mr = ERR_PTR(ret);
- goto reg_phys_mr_exit1;
- }
- }
-
- /* successful registration of all pages */
- return &e_mr->ib.ib_mr;
-
-reg_phys_mr_exit1:
- ehca_mr_delete(e_mr);
-reg_phys_mr_exit0:
- if (IS_ERR(ib_mr))
- ehca_err(pd->device, "h_ret=%li pd=%p phys_buf_array=%p "
- "num_phys_buf=%x mr_access_flags=%x iova_start=%p",
- PTR_ERR(ib_mr), pd, phys_buf_array,
- num_phys_buf, mr_access_flags, iova_start);
- return ib_mr;
-} /* end ehca_reg_phys_mr() */
-
-/*----------------------------------------------------------------------*/
-
-struct ib_mr *ehca_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
- u64 virt, int mr_access_flags,
- struct ib_udata *udata)
-{
- struct ib_mr *ib_mr;
- struct ehca_mr *e_mr;
- struct ehca_shca *shca =
- container_of(pd->device, struct ehca_shca, ib_device);
- struct ehca_pd *e_pd = container_of(pd, struct ehca_pd, ib_pd);
- struct ehca_mr_pginfo pginfo;
- int ret, page_shift;
- u32 num_kpages;
- u32 num_hwpages;
- u64 hwpage_size;
-
- if (!pd) {
- ehca_gen_err("bad pd=%p", pd);
- return ERR_PTR(-EFAULT);
- }
-
- if (((mr_access_flags & IB_ACCESS_REMOTE_WRITE) &&
- !(mr_access_flags & IB_ACCESS_LOCAL_WRITE)) ||
- ((mr_access_flags & IB_ACCESS_REMOTE_ATOMIC) &&
- !(mr_access_flags & IB_ACCESS_LOCAL_WRITE))) {
- /*
- * Remote Write Access requires Local Write Access
- * Remote Atomic Access requires Local Write Access
- */
- ehca_err(pd->device, "bad input values: mr_access_flags=%x",
- mr_access_flags);
- ib_mr = ERR_PTR(-EINVAL);
- goto reg_user_mr_exit0;
- }
-
- if (length == 0 || virt + length < virt) {
- ehca_err(pd->device, "bad input values: length=%llx "
- "virt_base=%llx", length, virt);
- ib_mr = ERR_PTR(-EINVAL);
- goto reg_user_mr_exit0;
- }
-
- e_mr = ehca_mr_new();
- if (!e_mr) {
- ehca_err(pd->device, "out of memory");
- ib_mr = ERR_PTR(-ENOMEM);
- goto reg_user_mr_exit0;
- }
-
- e_mr->umem = ib_umem_get(pd->uobject->context, start, length,
- mr_access_flags, 0);
- if (IS_ERR(e_mr->umem)) {
- ib_mr = (void *)e_mr->umem;
- goto reg_user_mr_exit1;
- }
-
- if (e_mr->umem->page_size != PAGE_SIZE) {
- ehca_err(pd->device, "page size not supported, "
- "e_mr->umem->page_size=%x", e_mr->umem->page_size);
- ib_mr = ERR_PTR(-EINVAL);
- goto reg_user_mr_exit2;
- }
-
- /* determine number of MR pages */
- num_kpages = NUM_CHUNKS((virt % PAGE_SIZE) + length, PAGE_SIZE);
- /* select proper hw_pgsize */
- page_shift = PAGE_SHIFT;
- if (e_mr->umem->hugetlb) {
- /* determine page_shift, clamp between 4K and 16M */
- page_shift = (fls64(length - 1) + 3) & ~3;
- page_shift = min(max(page_shift, EHCA_MR_PGSHIFT4K),
- EHCA_MR_PGSHIFT16M);
- }
- hwpage_size = 1UL << page_shift;
-
- /* now that we have the desired page size, shift until it's
- * supported, too. 4K is always supported, so this terminates.
- */
- while (!(hwpage_size & shca->hca_cap_mr_pgsize))
- hwpage_size >>= 4;
-
-reg_user_mr_fallback:
- num_hwpages = NUM_CHUNKS((virt % hwpage_size) + length, hwpage_size);
- /* register MR on HCA */
- memset(&pginfo, 0, sizeof(pginfo));
- pginfo.type = EHCA_MR_PGI_USER;
- pginfo.hwpage_size = hwpage_size;
- pginfo.num_kpages = num_kpages;
- pginfo.num_hwpages = num_hwpages;
- pginfo.u.usr.region = e_mr->umem;
- pginfo.next_hwpage = ib_umem_offset(e_mr->umem) / hwpage_size;
- pginfo.u.usr.next_sg = pginfo.u.usr.region->sg_head.sgl;
- ret = ehca_reg_mr(shca, e_mr, (u64 *)virt, length, mr_access_flags,
- e_pd, &pginfo, &e_mr->ib.ib_mr.lkey,
- &e_mr->ib.ib_mr.rkey, EHCA_REG_MR);
- if (ret == -EINVAL && pginfo.hwpage_size > PAGE_SIZE) {
- ehca_warn(pd->device, "failed to register mr "
- "with hwpage_size=%llx", hwpage_size);
- ehca_info(pd->device, "try to register mr with "
- "kpage_size=%lx", PAGE_SIZE);
- /*
- * this means kpages are not contiguous for a hw page
- * try kernel page size as fallback solution
- */
- hwpage_size = PAGE_SIZE;
- goto reg_user_mr_fallback;
- }
- if (ret) {
- ib_mr = ERR_PTR(ret);
- goto reg_user_mr_exit2;
- }
-
- /* successful registration of all pages */
- return &e_mr->ib.ib_mr;
-
-reg_user_mr_exit2:
- ib_umem_release(e_mr->umem);
-reg_user_mr_exit1:
- ehca_mr_delete(e_mr);
-reg_user_mr_exit0:
- if (IS_ERR(ib_mr))
- ehca_err(pd->device, "rc=%li pd=%p mr_access_flags=%x udata=%p",
- PTR_ERR(ib_mr), pd, mr_access_flags, udata);
- return ib_mr;
-} /* end ehca_reg_user_mr() */
-
-/*----------------------------------------------------------------------*/
-
-int ehca_rereg_phys_mr(struct ib_mr *mr,
- int mr_rereg_mask,
- struct ib_pd *pd,
- struct ib_phys_buf *phys_buf_array,
- int num_phys_buf,
- int mr_access_flags,
- u64 *iova_start)
-{
- int ret;
-
- struct ehca_shca *shca =
- container_of(mr->device, struct ehca_shca, ib_device);
- struct ehca_mr *e_mr = container_of(mr, struct ehca_mr, ib.ib_mr);
- u64 new_size;
- u64 *new_start;
- u32 new_acl;
- struct ehca_pd *new_pd;
- u32 tmp_lkey, tmp_rkey;
- unsigned long sl_flags;
- u32 num_kpages = 0;
- u32 num_hwpages = 0;
- struct ehca_mr_pginfo pginfo;
-
- if (!(mr_rereg_mask & IB_MR_REREG_TRANS)) {
- /* TODO not supported, because PHYP rereg hCall needs pages */
- ehca_err(mr->device, "rereg without IB_MR_REREG_TRANS not "
- "supported yet, mr_rereg_mask=%x", mr_rereg_mask);
- ret = -EINVAL;
- goto rereg_phys_mr_exit0;
- }
-
- if (mr_rereg_mask & IB_MR_REREG_PD) {
- if (!pd) {
- ehca_err(mr->device, "rereg with bad pd, pd=%p "
- "mr_rereg_mask=%x", pd, mr_rereg_mask);
- ret = -EINVAL;
- goto rereg_phys_mr_exit0;
- }
- }
-
- if ((mr_rereg_mask &
- ~(IB_MR_REREG_TRANS | IB_MR_REREG_PD | IB_MR_REREG_ACCESS)) ||
- (mr_rereg_mask == 0)) {
- ret = -EINVAL;
- goto rereg_phys_mr_exit0;
- }
-
- /* check other parameters */
- if (e_mr == shca->maxmr) {
- /* should be impossible, however reject to be sure */
- ehca_err(mr->device, "rereg internal max-MR impossible, mr=%p "
- "shca->maxmr=%p mr->lkey=%x",
- mr, shca->maxmr, mr->lkey);
- ret = -EINVAL;
- goto rereg_phys_mr_exit0;
- }
- if (mr_rereg_mask & IB_MR_REREG_TRANS) { /* transl., i.e. addr/size */
- if (e_mr->flags & EHCA_MR_FLAG_FMR) {
- ehca_err(mr->device, "not supported for FMR, mr=%p "
- "flags=%x", mr, e_mr->flags);
- ret = -EINVAL;
- goto rereg_phys_mr_exit0;
- }
- if (!phys_buf_array || num_phys_buf <= 0) {
- ehca_err(mr->device, "bad input values mr_rereg_mask=%x"
- " phys_buf_array=%p num_phys_buf=%x",
- mr_rereg_mask, phys_buf_array, num_phys_buf);
- ret = -EINVAL;
- goto rereg_phys_mr_exit0;
- }
- }
- if ((mr_rereg_mask & IB_MR_REREG_ACCESS) && /* change ACL */
- (((mr_access_flags & IB_ACCESS_REMOTE_WRITE) &&
- !(mr_access_flags & IB_ACCESS_LOCAL_WRITE)) ||
- ((mr_access_flags & IB_ACCESS_REMOTE_ATOMIC) &&
- !(mr_access_flags & IB_ACCESS_LOCAL_WRITE)))) {
- /*
- * Remote Write Access requires Local Write Access
- * Remote Atomic Access requires Local Write Access
- */
- ehca_err(mr->device, "bad input values: mr_rereg_mask=%x "
- "mr_access_flags=%x", mr_rereg_mask, mr_access_flags);
- ret = -EINVAL;
- goto rereg_phys_mr_exit0;
- }
-
- /* set requested values dependent on rereg request */
- spin_lock_irqsave(&e_mr->mrlock, sl_flags);
- new_start = e_mr->start;
- new_size = e_mr->size;
- new_acl = e_mr->acl;
- new_pd = container_of(mr->pd, struct ehca_pd, ib_pd);
-
- if (mr_rereg_mask & IB_MR_REREG_TRANS) {
- u64 hw_pgsize = ehca_get_max_hwpage_size(shca);
-
- new_start = iova_start; /* change address */
- /* check physical buffer list and calculate size */
- ret = ehca_mr_chk_buf_and_calc_size(phys_buf_array,
- num_phys_buf, iova_start,
- &new_size);
- if (ret)
- goto rereg_phys_mr_exit1;
- if ((new_size == 0) ||
- (((u64)iova_start + new_size) < (u64)iova_start)) {
- ehca_err(mr->device, "bad input values: new_size=%llx "
- "iova_start=%p", new_size, iova_start);
- ret = -EINVAL;
- goto rereg_phys_mr_exit1;
- }
- num_kpages = NUM_CHUNKS(((u64)new_start % PAGE_SIZE) +
- new_size, PAGE_SIZE);
- num_hwpages = NUM_CHUNKS(((u64)new_start % hw_pgsize) +
- new_size, hw_pgsize);
- memset(&pginfo, 0, sizeof(pginfo));
- pginfo.type = EHCA_MR_PGI_PHYS;
- pginfo.num_kpages = num_kpages;
- pginfo.hwpage_size = hw_pgsize;
- pginfo.num_hwpages = num_hwpages;
- pginfo.u.phy.num_phys_buf = num_phys_buf;
- pginfo.u.phy.phys_buf_array = phys_buf_array;
- pginfo.next_hwpage =
- ((u64)iova_start & ~PAGE_MASK) / hw_pgsize;
- }
- if (mr_rereg_mask & IB_MR_REREG_ACCESS)
- new_acl = mr_access_flags;
- if (mr_rereg_mask & IB_MR_REREG_PD)
- new_pd = container_of(pd, struct ehca_pd, ib_pd);
-
- ret = ehca_rereg_mr(shca, e_mr, new_start, new_size, new_acl,
- new_pd, &pginfo, &tmp_lkey, &tmp_rkey);
- if (ret)
- goto rereg_phys_mr_exit1;
-
- /* successful reregistration */
- if (mr_rereg_mask & IB_MR_REREG_PD)
- mr->pd = pd;
- mr->lkey = tmp_lkey;
- mr->rkey = tmp_rkey;
-
-rereg_phys_mr_exit1:
- spin_unlock_irqrestore(&e_mr->mrlock, sl_flags);
-rereg_phys_mr_exit0:
- if (ret)
- ehca_err(mr->device, "ret=%i mr=%p mr_rereg_mask=%x pd=%p "
- "phys_buf_array=%p num_phys_buf=%x mr_access_flags=%x "
- "iova_start=%p",
- ret, mr, mr_rereg_mask, pd, phys_buf_array,
- num_phys_buf, mr_access_flags, iova_start);
- return ret;
-} /* end ehca_rereg_phys_mr() */
-
-/*----------------------------------------------------------------------*/
-
-int ehca_query_mr(struct ib_mr *mr, struct ib_mr_attr *mr_attr)
-{
- int ret = 0;
- u64 h_ret;
- struct ehca_shca *shca =
- container_of(mr->device, struct ehca_shca, ib_device);
- struct ehca_mr *e_mr = container_of(mr, struct ehca_mr, ib.ib_mr);
- unsigned long sl_flags;
- struct ehca_mr_hipzout_parms hipzout;
-
- if ((e_mr->flags & EHCA_MR_FLAG_FMR)) {
- ehca_err(mr->device, "not supported for FMR, mr=%p e_mr=%p "
- "e_mr->flags=%x", mr, e_mr, e_mr->flags);
- ret = -EINVAL;
- goto query_mr_exit0;
- }
-
- memset(mr_attr, 0, sizeof(struct ib_mr_attr));
- spin_lock_irqsave(&e_mr->mrlock, sl_flags);
-
- h_ret = hipz_h_query_mr(shca->ipz_hca_handle, e_mr, &hipzout);
- if (h_ret != H_SUCCESS) {
- ehca_err(mr->device, "hipz_mr_query failed, h_ret=%lli mr=%p "
- "hca_hndl=%llx mr_hndl=%llx lkey=%x",
- h_ret, mr, shca->ipz_hca_handle.handle,
- e_mr->ipz_mr_handle.handle, mr->lkey);
- ret = ehca2ib_return_code(h_ret);
- goto query_mr_exit1;
- }
- mr_attr->pd = mr->pd;
- mr_attr->device_virt_addr = hipzout.vaddr;
- mr_attr->size = hipzout.len;
- mr_attr->lkey = hipzout.lkey;
- mr_attr->rkey = hipzout.rkey;
- ehca_mrmw_reverse_map_acl(&hipzout.acl, &mr_attr->mr_access_flags);
-
-query_mr_exit1:
- spin_unlock_irqrestore(&e_mr->mrlock, sl_flags);
-query_mr_exit0:
- if (ret)
- ehca_err(mr->device, "ret=%i mr=%p mr_attr=%p",
- ret, mr, mr_attr);
- return ret;
-} /* end ehca_query_mr() */
-
-/*----------------------------------------------------------------------*/
-
-int ehca_dereg_mr(struct ib_mr *mr)
-{
- int ret = 0;
- u64 h_ret;
- struct ehca_shca *shca =
- container_of(mr->device, struct ehca_shca, ib_device);
- struct ehca_mr *e_mr = container_of(mr, struct ehca_mr, ib.ib_mr);
-
- if ((e_mr->flags & EHCA_MR_FLAG_FMR)) {
- ehca_err(mr->device, "not supported for FMR, mr=%p e_mr=%p "
- "e_mr->flags=%x", mr, e_mr, e_mr->flags);
- ret = -EINVAL;
- goto dereg_mr_exit0;
- } else if (e_mr == shca->maxmr) {
- /* should be impossible, however reject to be sure */
- ehca_err(mr->device, "dereg internal max-MR impossible, mr=%p "
- "shca->maxmr=%p mr->lkey=%x",
- mr, shca->maxmr, mr->lkey);
- ret = -EINVAL;
- goto dereg_mr_exit0;
- }
-
- /* TODO: BUSY: MR still has bound window(s) */
- h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle, e_mr);
- if (h_ret != H_SUCCESS) {
- ehca_err(mr->device, "hipz_free_mr failed, h_ret=%lli shca=%p "
- "e_mr=%p hca_hndl=%llx mr_hndl=%llx mr->lkey=%x",
- h_ret, shca, e_mr, shca->ipz_hca_handle.handle,
- e_mr->ipz_mr_handle.handle, mr->lkey);
- ret = ehca2ib_return_code(h_ret);
- goto dereg_mr_exit0;
- }
-
- if (e_mr->umem)
- ib_umem_release(e_mr->umem);
-
- /* successful deregistration */
- ehca_mr_delete(e_mr);
-
-dereg_mr_exit0:
- if (ret)
- ehca_err(mr->device, "ret=%i mr=%p", ret, mr);
- return ret;
-} /* end ehca_dereg_mr() */
-
-/*----------------------------------------------------------------------*/
-
-struct ib_mw *ehca_alloc_mw(struct ib_pd *pd, enum ib_mw_type type)
-{
- struct ib_mw *ib_mw;
- u64 h_ret;
- struct ehca_mw *e_mw;
- struct ehca_pd *e_pd = container_of(pd, struct ehca_pd, ib_pd);
- struct ehca_shca *shca =
- container_of(pd->device, struct ehca_shca, ib_device);
- struct ehca_mw_hipzout_parms hipzout;
-
- if (type != IB_MW_TYPE_1)
- return ERR_PTR(-EINVAL);
-
- e_mw = ehca_mw_new();
- if (!e_mw) {
- ib_mw = ERR_PTR(-ENOMEM);
- goto alloc_mw_exit0;
- }
-
- h_ret = hipz_h_alloc_resource_mw(shca->ipz_hca_handle, e_mw,
- e_pd->fw_pd, &hipzout);
- if (h_ret != H_SUCCESS) {
- ehca_err(pd->device, "hipz_mw_allocate failed, h_ret=%lli "
- "shca=%p hca_hndl=%llx mw=%p",
- h_ret, shca, shca->ipz_hca_handle.handle, e_mw);
- ib_mw = ERR_PTR(ehca2ib_return_code(h_ret));
- goto alloc_mw_exit1;
- }
- /* successful MW allocation */
- e_mw->ipz_mw_handle = hipzout.handle;
- e_mw->ib_mw.rkey = hipzout.rkey;
- return &e_mw->ib_mw;
-
-alloc_mw_exit1:
- ehca_mw_delete(e_mw);
-alloc_mw_exit0:
- if (IS_ERR(ib_mw))
- ehca_err(pd->device, "h_ret=%li pd=%p", PTR_ERR(ib_mw), pd);
- return ib_mw;
-} /* end ehca_alloc_mw() */
-
-/*----------------------------------------------------------------------*/
-
-int ehca_bind_mw(struct ib_qp *qp,
- struct ib_mw *mw,
- struct ib_mw_bind *mw_bind)
-{
- /* TODO: not supported up to now */
- ehca_gen_err("bind MW currently not supported by HCAD");
-
- return -EPERM;
-} /* end ehca_bind_mw() */
-
-/*----------------------------------------------------------------------*/
-
-int ehca_dealloc_mw(struct ib_mw *mw)
-{
- u64 h_ret;
- struct ehca_shca *shca =
- container_of(mw->device, struct ehca_shca, ib_device);
- struct ehca_mw *e_mw = container_of(mw, struct ehca_mw, ib_mw);
-
- h_ret = hipz_h_free_resource_mw(shca->ipz_hca_handle, e_mw);
- if (h_ret != H_SUCCESS) {
- ehca_err(mw->device, "hipz_free_mw failed, h_ret=%lli shca=%p "
- "mw=%p rkey=%x hca_hndl=%llx mw_hndl=%llx",
- h_ret, shca, mw, mw->rkey, shca->ipz_hca_handle.handle,
- e_mw->ipz_mw_handle.handle);
- return ehca2ib_return_code(h_ret);
- }
- /* successful deallocation */
- ehca_mw_delete(e_mw);
- return 0;
-} /* end ehca_dealloc_mw() */
-
-/*----------------------------------------------------------------------*/
-
-struct ib_fmr *ehca_alloc_fmr(struct ib_pd *pd,
- int mr_access_flags,
- struct ib_fmr_attr *fmr_attr)
-{
- struct ib_fmr *ib_fmr;
- struct ehca_shca *shca =
- container_of(pd->device, struct ehca_shca, ib_device);
- struct ehca_pd *e_pd = container_of(pd, struct ehca_pd, ib_pd);
- struct ehca_mr *e_fmr;
- int ret;
- u32 tmp_lkey, tmp_rkey;
- struct ehca_mr_pginfo pginfo;
- u64 hw_pgsize;
-
- /* check other parameters */
- if (((mr_access_flags & IB_ACCESS_REMOTE_WRITE) &&
- !(mr_access_flags & IB_ACCESS_LOCAL_WRITE)) ||
- ((mr_access_flags & IB_ACCESS_REMOTE_ATOMIC) &&
- !(mr_access_flags & IB_ACCESS_LOCAL_WRITE))) {
- /*
- * Remote Write Access requires Local Write Access
- * Remote Atomic Access requires Local Write Access
- */
- ehca_err(pd->device, "bad input values: mr_access_flags=%x",
- mr_access_flags);
- ib_fmr = ERR_PTR(-EINVAL);
- goto alloc_fmr_exit0;
- }
- if (mr_access_flags & IB_ACCESS_MW_BIND) {
- ehca_err(pd->device, "bad input values: mr_access_flags=%x",
- mr_access_flags);
- ib_fmr = ERR_PTR(-EINVAL);
- goto alloc_fmr_exit0;
- }
- if ((fmr_attr->max_pages == 0) || (fmr_attr->max_maps == 0)) {
- ehca_err(pd->device, "bad input values: fmr_attr->max_pages=%x "
- "fmr_attr->max_maps=%x fmr_attr->page_shift=%x",
- fmr_attr->max_pages, fmr_attr->max_maps,
- fmr_attr->page_shift);
- ib_fmr = ERR_PTR(-EINVAL);
- goto alloc_fmr_exit0;
- }
-
- hw_pgsize = 1 << fmr_attr->page_shift;
- if (!(hw_pgsize & shca->hca_cap_mr_pgsize)) {
- ehca_err(pd->device, "unsupported fmr_attr->page_shift=%x",
- fmr_attr->page_shift);
- ib_fmr = ERR_PTR(-EINVAL);
- goto alloc_fmr_exit0;
- }
-
- e_fmr = ehca_mr_new();
- if (!e_fmr) {
- ib_fmr = ERR_PTR(-ENOMEM);
- goto alloc_fmr_exit0;
- }
- e_fmr->flags |= EHCA_MR_FLAG_FMR;
-
- /* register MR on HCA */
- memset(&pginfo, 0, sizeof(pginfo));
- pginfo.hwpage_size = hw_pgsize;
- /*
- * pginfo.num_hwpages==0, ie register_rpages() will not be called
- * but deferred to map_phys_fmr()
- */
- ret = ehca_reg_mr(shca, e_fmr, NULL,
- fmr_attr->max_pages * (1 << fmr_attr->page_shift),
- mr_access_flags, e_pd, &pginfo,
- &tmp_lkey, &tmp_rkey, EHCA_REG_MR);
- if (ret) {
- ib_fmr = ERR_PTR(ret);
- goto alloc_fmr_exit1;
- }
-
- /* successful */
- e_fmr->hwpage_size = hw_pgsize;
- e_fmr->fmr_page_size = 1 << fmr_attr->page_shift;
- e_fmr->fmr_max_pages = fmr_attr->max_pages;
- e_fmr->fmr_max_maps = fmr_attr->max_maps;
- e_fmr->fmr_map_cnt = 0;
- return &e_fmr->ib.ib_fmr;
-
-alloc_fmr_exit1:
- ehca_mr_delete(e_fmr);
-alloc_fmr_exit0:
- return ib_fmr;
-} /* end ehca_alloc_fmr() */
-
-/*----------------------------------------------------------------------*/
-
-int ehca_map_phys_fmr(struct ib_fmr *fmr,
- u64 *page_list,
- int list_len,
- u64 iova)
-{
- int ret;
- struct ehca_shca *shca =
- container_of(fmr->device, struct ehca_shca, ib_device);
- struct ehca_mr *e_fmr = container_of(fmr, struct ehca_mr, ib.ib_fmr);
- struct ehca_pd *e_pd = container_of(fmr->pd, struct ehca_pd, ib_pd);
- struct ehca_mr_pginfo pginfo;
- u32 tmp_lkey, tmp_rkey;
-
- if (!(e_fmr->flags & EHCA_MR_FLAG_FMR)) {
- ehca_err(fmr->device, "not a FMR, e_fmr=%p e_fmr->flags=%x",
- e_fmr, e_fmr->flags);
- ret = -EINVAL;
- goto map_phys_fmr_exit0;
- }
- ret = ehca_fmr_check_page_list(e_fmr, page_list, list_len);
- if (ret)
- goto map_phys_fmr_exit0;
- if (iova % e_fmr->fmr_page_size) {
- /* only whole-numbered pages */
- ehca_err(fmr->device, "bad iova, iova=%llx fmr_page_size=%x",
- iova, e_fmr->fmr_page_size);
- ret = -EINVAL;
- goto map_phys_fmr_exit0;
- }
- if (e_fmr->fmr_map_cnt >= e_fmr->fmr_max_maps) {
- /* HCAD does not limit the maps, however trace this anyway */
- ehca_info(fmr->device, "map limit exceeded, fmr=%p "
- "e_fmr->fmr_map_cnt=%x e_fmr->fmr_max_maps=%x",
- fmr, e_fmr->fmr_map_cnt, e_fmr->fmr_max_maps);
- }
-
- memset(&pginfo, 0, sizeof(pginfo));
- pginfo.type = EHCA_MR_PGI_FMR;
- pginfo.num_kpages = list_len;
- pginfo.hwpage_size = e_fmr->hwpage_size;
- pginfo.num_hwpages =
- list_len * e_fmr->fmr_page_size / pginfo.hwpage_size;
- pginfo.u.fmr.page_list = page_list;
- pginfo.next_hwpage =
- (iova & (e_fmr->fmr_page_size-1)) / pginfo.hwpage_size;
- pginfo.u.fmr.fmr_pgsize = e_fmr->fmr_page_size;
-
- ret = ehca_rereg_mr(shca, e_fmr, (u64 *)iova,
- list_len * e_fmr->fmr_page_size,
- e_fmr->acl, e_pd, &pginfo, &tmp_lkey, &tmp_rkey);
- if (ret)
- goto map_phys_fmr_exit0;
-
- /* successful reregistration */
- e_fmr->fmr_map_cnt++;
- e_fmr->ib.ib_fmr.lkey = tmp_lkey;
- e_fmr->ib.ib_fmr.rkey = tmp_rkey;
- return 0;
-
-map_phys_fmr_exit0:
- if (ret)
- ehca_err(fmr->device, "ret=%i fmr=%p page_list=%p list_len=%x "
- "iova=%llx", ret, fmr, page_list, list_len, iova);
- return ret;
-} /* end ehca_map_phys_fmr() */
-
-/*----------------------------------------------------------------------*/
-
-int ehca_unmap_fmr(struct list_head *fmr_list)
-{
- int ret = 0;
- struct ib_fmr *ib_fmr;
- struct ehca_shca *shca = NULL;
- struct ehca_shca *prev_shca;
- struct ehca_mr *e_fmr;
- u32 num_fmr = 0;
- u32 unmap_fmr_cnt = 0;
-
- /* check all FMR belong to same SHCA, and check internal flag */
- list_for_each_entry(ib_fmr, fmr_list, list) {
- prev_shca = shca;
- shca = container_of(ib_fmr->device, struct ehca_shca,
- ib_device);
- e_fmr = container_of(ib_fmr, struct ehca_mr, ib.ib_fmr);
- if ((shca != prev_shca) && prev_shca) {
- ehca_err(&shca->ib_device, "SHCA mismatch, shca=%p "
- "prev_shca=%p e_fmr=%p",
- shca, prev_shca, e_fmr);
- ret = -EINVAL;
- goto unmap_fmr_exit0;
- }
- if (!(e_fmr->flags & EHCA_MR_FLAG_FMR)) {
- ehca_err(&shca->ib_device, "not a FMR, e_fmr=%p "
- "e_fmr->flags=%x", e_fmr, e_fmr->flags);
- ret = -EINVAL;
- goto unmap_fmr_exit0;
- }
- num_fmr++;
- }
-
- /* loop over all FMRs to unmap */
- list_for_each_entry(ib_fmr, fmr_list, list) {
- unmap_fmr_cnt++;
- e_fmr = container_of(ib_fmr, struct ehca_mr, ib.ib_fmr);
- shca = container_of(ib_fmr->device, struct ehca_shca,
- ib_device);
- ret = ehca_unmap_one_fmr(shca, e_fmr);
- if (ret) {
- /* unmap failed, stop unmapping of rest of FMRs */
- ehca_err(&shca->ib_device, "unmap of one FMR failed, "
- "stop rest, e_fmr=%p num_fmr=%x "
- "unmap_fmr_cnt=%x lkey=%x", e_fmr, num_fmr,
- unmap_fmr_cnt, e_fmr->ib.ib_fmr.lkey);
- goto unmap_fmr_exit0;
- }
- }
-
-unmap_fmr_exit0:
- if (ret)
- ehca_gen_err("ret=%i fmr_list=%p num_fmr=%x unmap_fmr_cnt=%x",
- ret, fmr_list, num_fmr, unmap_fmr_cnt);
- return ret;
-} /* end ehca_unmap_fmr() */
-
-/*----------------------------------------------------------------------*/
-
-int ehca_dealloc_fmr(struct ib_fmr *fmr)
-{
- int ret;
- u64 h_ret;
- struct ehca_shca *shca =
- container_of(fmr->device, struct ehca_shca, ib_device);
- struct ehca_mr *e_fmr = container_of(fmr, struct ehca_mr, ib.ib_fmr);
-
- if (!(e_fmr->flags & EHCA_MR_FLAG_FMR)) {
- ehca_err(fmr->device, "not a FMR, e_fmr=%p e_fmr->flags=%x",
- e_fmr, e_fmr->flags);
- ret = -EINVAL;
- goto free_fmr_exit0;
- }
-
- h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle, e_fmr);
- if (h_ret != H_SUCCESS) {
- ehca_err(fmr->device, "hipz_free_mr failed, h_ret=%lli e_fmr=%p "
- "hca_hndl=%llx fmr_hndl=%llx fmr->lkey=%x",
- h_ret, e_fmr, shca->ipz_hca_handle.handle,
- e_fmr->ipz_mr_handle.handle, fmr->lkey);
- ret = ehca2ib_return_code(h_ret);
- goto free_fmr_exit0;
- }
- /* successful deregistration */
- ehca_mr_delete(e_fmr);
- return 0;
-
-free_fmr_exit0:
- if (ret)
- ehca_err(&shca->ib_device, "ret=%i fmr=%p", ret, fmr);
- return ret;
-} /* end ehca_dealloc_fmr() */
-
-/*----------------------------------------------------------------------*/
-
-static int ehca_reg_bmap_mr_rpages(struct ehca_shca *shca,
- struct ehca_mr *e_mr,
- struct ehca_mr_pginfo *pginfo);
-
-int ehca_reg_mr(struct ehca_shca *shca,
- struct ehca_mr *e_mr,
- u64 *iova_start,
- u64 size,
- int acl,
- struct ehca_pd *e_pd,
- struct ehca_mr_pginfo *pginfo,
- u32 *lkey, /*OUT*/
- u32 *rkey, /*OUT*/
- enum ehca_reg_type reg_type)
-{
- int ret;
- u64 h_ret;
- u32 hipz_acl;
- struct ehca_mr_hipzout_parms hipzout;
-
- ehca_mrmw_map_acl(acl, &hipz_acl);
- ehca_mrmw_set_pgsize_hipz_acl(pginfo->hwpage_size, &hipz_acl);
- if (ehca_use_hp_mr == 1)
- hipz_acl |= 0x00000001;
-
- h_ret = hipz_h_alloc_resource_mr(shca->ipz_hca_handle, e_mr,
- (u64)iova_start, size, hipz_acl,
- e_pd->fw_pd, &hipzout);
- if (h_ret != H_SUCCESS) {
- ehca_err(&shca->ib_device, "hipz_alloc_mr failed, h_ret=%lli "
- "hca_hndl=%llx", h_ret, shca->ipz_hca_handle.handle);
- ret = ehca2ib_return_code(h_ret);
- goto ehca_reg_mr_exit0;
- }
-
- e_mr->ipz_mr_handle = hipzout.handle;
-
- if (reg_type == EHCA_REG_BUSMAP_MR)
- ret = ehca_reg_bmap_mr_rpages(shca, e_mr, pginfo);
- else if (reg_type == EHCA_REG_MR)
- ret = ehca_reg_mr_rpages(shca, e_mr, pginfo);
- else
- ret = -EINVAL;
-
- if (ret)
- goto ehca_reg_mr_exit1;
-
- /* successful registration */
- e_mr->num_kpages = pginfo->num_kpages;
- e_mr->num_hwpages = pginfo->num_hwpages;
- e_mr->hwpage_size = pginfo->hwpage_size;
- e_mr->start = iova_start;
- e_mr->size = size;
- e_mr->acl = acl;
- *lkey = hipzout.lkey;
- *rkey = hipzout.rkey;
- return 0;
-
-ehca_reg_mr_exit1:
- h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle, e_mr);
- if (h_ret != H_SUCCESS) {
- ehca_err(&shca->ib_device, "h_ret=%lli shca=%p e_mr=%p "
- "iova_start=%p size=%llx acl=%x e_pd=%p lkey=%x "
- "pginfo=%p num_kpages=%llx num_hwpages=%llx ret=%i",
- h_ret, shca, e_mr, iova_start, size, acl, e_pd,
- hipzout.lkey, pginfo, pginfo->num_kpages,
- pginfo->num_hwpages, ret);
- ehca_err(&shca->ib_device, "internal error in ehca_reg_mr, "
- "not recoverable");
- }
-ehca_reg_mr_exit0:
- if (ret)
- ehca_err(&shca->ib_device, "ret=%i shca=%p e_mr=%p "
- "iova_start=%p size=%llx acl=%x e_pd=%p pginfo=%p "
- "num_kpages=%llx num_hwpages=%llx",
- ret, shca, e_mr, iova_start, size, acl, e_pd, pginfo,
- pginfo->num_kpages, pginfo->num_hwpages);
- return ret;
-} /* end ehca_reg_mr() */
-
-/*----------------------------------------------------------------------*/
-
-int ehca_reg_mr_rpages(struct ehca_shca *shca,
- struct ehca_mr *e_mr,
- struct ehca_mr_pginfo *pginfo)
-{
- int ret = 0;
- u64 h_ret;
- u32 rnum;
- u64 rpage;
- u32 i;
- u64 *kpage;
-
- if (!pginfo->num_hwpages) /* in case of fmr */
- return 0;
-
- kpage = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
- if (!kpage) {
- ehca_err(&shca->ib_device, "kpage alloc failed");
- ret = -ENOMEM;
- goto ehca_reg_mr_rpages_exit0;
- }
-
- /* max MAX_RPAGES ehca mr pages per register call */
- for (i = 0; i < NUM_CHUNKS(pginfo->num_hwpages, MAX_RPAGES); i++) {
-
- if (i == NUM_CHUNKS(pginfo->num_hwpages, MAX_RPAGES) - 1) {
- rnum = pginfo->num_hwpages % MAX_RPAGES; /* last shot */
- if (rnum == 0)
- rnum = MAX_RPAGES; /* last shot is full */
- } else
- rnum = MAX_RPAGES;
-
- ret = ehca_set_pagebuf(pginfo, rnum, kpage);
- if (ret) {
- ehca_err(&shca->ib_device, "ehca_set_pagebuf "
- "bad rc, ret=%i rnum=%x kpage=%p",
- ret, rnum, kpage);
- goto ehca_reg_mr_rpages_exit1;
- }
-
- if (rnum > 1) {
- rpage = __pa(kpage);
- if (!rpage) {
- ehca_err(&shca->ib_device, "kpage=%p i=%x",
- kpage, i);
- ret = -EFAULT;
- goto ehca_reg_mr_rpages_exit1;
- }
- } else
- rpage = *kpage;
-
- h_ret = hipz_h_register_rpage_mr(
- shca->ipz_hca_handle, e_mr,
- ehca_encode_hwpage_size(pginfo->hwpage_size),
- 0, rpage, rnum);
-
- if (i == NUM_CHUNKS(pginfo->num_hwpages, MAX_RPAGES) - 1) {
- /*
- * check for 'registration complete'==H_SUCCESS
- * and for 'page registered'==H_PAGE_REGISTERED
- */
- if (h_ret != H_SUCCESS) {
- ehca_err(&shca->ib_device, "last "
- "hipz_reg_rpage_mr failed, h_ret=%lli "
- "e_mr=%p i=%x hca_hndl=%llx mr_hndl=%llx"
- " lkey=%x", h_ret, e_mr, i,
- shca->ipz_hca_handle.handle,
- e_mr->ipz_mr_handle.handle,
- e_mr->ib.ib_mr.lkey);
- ret = ehca2ib_return_code(h_ret);
- break;
- } else
- ret = 0;
- } else if (h_ret != H_PAGE_REGISTERED) {
- ehca_err(&shca->ib_device, "hipz_reg_rpage_mr failed, "
- "h_ret=%lli e_mr=%p i=%x lkey=%x hca_hndl=%llx "
- "mr_hndl=%llx", h_ret, e_mr, i,
- e_mr->ib.ib_mr.lkey,
- shca->ipz_hca_handle.handle,
- e_mr->ipz_mr_handle.handle);
- ret = ehca2ib_return_code(h_ret);
- break;
- } else
- ret = 0;
- } /* end for(i) */
-
-
-ehca_reg_mr_rpages_exit1:
- ehca_free_fw_ctrlblock(kpage);
-ehca_reg_mr_rpages_exit0:
- if (ret)
- ehca_err(&shca->ib_device, "ret=%i shca=%p e_mr=%p pginfo=%p "
- "num_kpages=%llx num_hwpages=%llx", ret, shca, e_mr,
- pginfo, pginfo->num_kpages, pginfo->num_hwpages);
- return ret;
-} /* end ehca_reg_mr_rpages() */
-
-/*----------------------------------------------------------------------*/
-
-inline int ehca_rereg_mr_rereg1(struct ehca_shca *shca,
- struct ehca_mr *e_mr,
- u64 *iova_start,
- u64 size,
- u32 acl,
- struct ehca_pd *e_pd,
- struct ehca_mr_pginfo *pginfo,
- u32 *lkey, /*OUT*/
- u32 *rkey) /*OUT*/
-{
- int ret;
- u64 h_ret;
- u32 hipz_acl;
- u64 *kpage;
- u64 rpage;
- struct ehca_mr_pginfo pginfo_save;
- struct ehca_mr_hipzout_parms hipzout;
-
- ehca_mrmw_map_acl(acl, &hipz_acl);
- ehca_mrmw_set_pgsize_hipz_acl(pginfo->hwpage_size, &hipz_acl);
-
- kpage = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
- if (!kpage) {
- ehca_err(&shca->ib_device, "kpage alloc failed");
- ret = -ENOMEM;
- goto ehca_rereg_mr_rereg1_exit0;
- }
-
- pginfo_save = *pginfo;
- ret = ehca_set_pagebuf(pginfo, pginfo->num_hwpages, kpage);
- if (ret) {
- ehca_err(&shca->ib_device, "set pagebuf failed, e_mr=%p "
- "pginfo=%p type=%x num_kpages=%llx num_hwpages=%llx "
- "kpage=%p", e_mr, pginfo, pginfo->type,
- pginfo->num_kpages, pginfo->num_hwpages, kpage);
- goto ehca_rereg_mr_rereg1_exit1;
- }
- rpage = __pa(kpage);
- if (!rpage) {
- ehca_err(&shca->ib_device, "kpage=%p", kpage);
- ret = -EFAULT;
- goto ehca_rereg_mr_rereg1_exit1;
- }
- h_ret = hipz_h_reregister_pmr(shca->ipz_hca_handle, e_mr,
- (u64)iova_start, size, hipz_acl,
- e_pd->fw_pd, rpage, &hipzout);
- if (h_ret != H_SUCCESS) {
- /*
- * reregistration unsuccessful, try it again with the 3 hCalls,
- * e.g. this is required in case H_MR_CONDITION
- * (MW bound or MR is shared)
- */
- ehca_warn(&shca->ib_device, "hipz_h_reregister_pmr failed "
- "(Rereg1), h_ret=%lli e_mr=%p", h_ret, e_mr);
- *pginfo = pginfo_save;
- ret = -EAGAIN;
- } else if ((u64 *)hipzout.vaddr != iova_start) {
- ehca_err(&shca->ib_device, "PHYP changed iova_start in "
- "rereg_pmr, iova_start=%p iova_start_out=%llx e_mr=%p "
- "mr_handle=%llx lkey=%x lkey_out=%x", iova_start,
- hipzout.vaddr, e_mr, e_mr->ipz_mr_handle.handle,
- e_mr->ib.ib_mr.lkey, hipzout.lkey);
- ret = -EFAULT;
- } else {
- /*
- * successful reregistration
- * note: start and start_out are identical for eServer HCAs
- */
- e_mr->num_kpages = pginfo->num_kpages;
- e_mr->num_hwpages = pginfo->num_hwpages;
- e_mr->hwpage_size = pginfo->hwpage_size;
- e_mr->start = iova_start;
- e_mr->size = size;
- e_mr->acl = acl;
- *lkey = hipzout.lkey;
- *rkey = hipzout.rkey;
- }
-
-ehca_rereg_mr_rereg1_exit1:
- ehca_free_fw_ctrlblock(kpage);
-ehca_rereg_mr_rereg1_exit0:
- if ( ret && (ret != -EAGAIN) )
- ehca_err(&shca->ib_device, "ret=%i lkey=%x rkey=%x "
- "pginfo=%p num_kpages=%llx num_hwpages=%llx",
- ret, *lkey, *rkey, pginfo, pginfo->num_kpages,
- pginfo->num_hwpages);
- return ret;
-} /* end ehca_rereg_mr_rereg1() */
-
-/*----------------------------------------------------------------------*/
-
-int ehca_rereg_mr(struct ehca_shca *shca,
- struct ehca_mr *e_mr,
- u64 *iova_start,
- u64 size,
- int acl,
- struct ehca_pd *e_pd,
- struct ehca_mr_pginfo *pginfo,
- u32 *lkey,
- u32 *rkey)
-{
- int ret = 0;
- u64 h_ret;
- int rereg_1_hcall = 1; /* 1: use hipz_h_reregister_pmr directly */
- int rereg_3_hcall = 0; /* 1: use 3 hipz calls for reregistration */
-
- /* first determine reregistration hCall(s) */
- if ((pginfo->num_hwpages > MAX_RPAGES) ||
- (e_mr->num_hwpages > MAX_RPAGES) ||
- (pginfo->num_hwpages > e_mr->num_hwpages)) {
- ehca_dbg(&shca->ib_device, "Rereg3 case, "
- "pginfo->num_hwpages=%llx e_mr->num_hwpages=%x",
- pginfo->num_hwpages, e_mr->num_hwpages);
- rereg_1_hcall = 0;
- rereg_3_hcall = 1;
- }
-
- if (e_mr->flags & EHCA_MR_FLAG_MAXMR) { /* check for max-MR */
- rereg_1_hcall = 0;
- rereg_3_hcall = 1;
- e_mr->flags &= ~EHCA_MR_FLAG_MAXMR;
- ehca_err(&shca->ib_device, "Rereg MR for max-MR! e_mr=%p",
- e_mr);
- }
-
- if (rereg_1_hcall) {
- ret = ehca_rereg_mr_rereg1(shca, e_mr, iova_start, size,
- acl, e_pd, pginfo, lkey, rkey);
- if (ret) {
- if (ret == -EAGAIN)
- rereg_3_hcall = 1;
- else
- goto ehca_rereg_mr_exit0;
- }
- }
-
- if (rereg_3_hcall) {
- struct ehca_mr save_mr;
-
- /* first deregister old MR */
- h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle, e_mr);
- if (h_ret != H_SUCCESS) {
- ehca_err(&shca->ib_device, "hipz_free_mr failed, "
- "h_ret=%lli e_mr=%p hca_hndl=%llx mr_hndl=%llx "
- "mr->lkey=%x",
- h_ret, e_mr, shca->ipz_hca_handle.handle,
- e_mr->ipz_mr_handle.handle,
- e_mr->ib.ib_mr.lkey);
- ret = ehca2ib_return_code(h_ret);
- goto ehca_rereg_mr_exit0;
- }
- /* clean ehca_mr_t, without changing struct ib_mr and lock */
- save_mr = *e_mr;
- ehca_mr_deletenew(e_mr);
-
- /* set some MR values */
- e_mr->flags = save_mr.flags;
- e_mr->hwpage_size = save_mr.hwpage_size;
- e_mr->fmr_page_size = save_mr.fmr_page_size;
- e_mr->fmr_max_pages = save_mr.fmr_max_pages;
- e_mr->fmr_max_maps = save_mr.fmr_max_maps;
- e_mr->fmr_map_cnt = save_mr.fmr_map_cnt;
-
- ret = ehca_reg_mr(shca, e_mr, iova_start, size, acl,
- e_pd, pginfo, lkey, rkey, EHCA_REG_MR);
- if (ret) {
- u32 offset = (u64)(&e_mr->flags) - (u64)e_mr;
- memcpy(&e_mr->flags, &(save_mr.flags),
- sizeof(struct ehca_mr) - offset);
- goto ehca_rereg_mr_exit0;
- }
- }
-
-ehca_rereg_mr_exit0:
- if (ret)
- ehca_err(&shca->ib_device, "ret=%i shca=%p e_mr=%p "
- "iova_start=%p size=%llx acl=%x e_pd=%p pginfo=%p "
- "num_kpages=%llx lkey=%x rkey=%x rereg_1_hcall=%x "
- "rereg_3_hcall=%x", ret, shca, e_mr, iova_start, size,
- acl, e_pd, pginfo, pginfo->num_kpages, *lkey, *rkey,
- rereg_1_hcall, rereg_3_hcall);
- return ret;
-} /* end ehca_rereg_mr() */
-
-/*----------------------------------------------------------------------*/
-
-int ehca_unmap_one_fmr(struct ehca_shca *shca,
- struct ehca_mr *e_fmr)
-{
- int ret = 0;
- u64 h_ret;
- struct ehca_pd *e_pd =
- container_of(e_fmr->ib.ib_fmr.pd, struct ehca_pd, ib_pd);
- struct ehca_mr save_fmr;
- u32 tmp_lkey, tmp_rkey;
- struct ehca_mr_pginfo pginfo;
- struct ehca_mr_hipzout_parms hipzout;
- struct ehca_mr save_mr;
-
- if (e_fmr->fmr_max_pages <= MAX_RPAGES) {
- /*
- * note: after using rereg hcall with len=0,
- * rereg hcall must be used again for registering pages
- */
- h_ret = hipz_h_reregister_pmr(shca->ipz_hca_handle, e_fmr, 0,
- 0, 0, e_pd->fw_pd, 0, &hipzout);
- if (h_ret == H_SUCCESS) {
- /* successful reregistration */
- e_fmr->start = NULL;
- e_fmr->size = 0;
- tmp_lkey = hipzout.lkey;
- tmp_rkey = hipzout.rkey;
- return 0;
- }
- /*
- * should not happen, because length checked above,
- * FMRs are not shared and no MW bound to FMRs
- */
- ehca_err(&shca->ib_device, "hipz_reregister_pmr failed "
- "(Rereg1), h_ret=%lli e_fmr=%p hca_hndl=%llx "
- "mr_hndl=%llx lkey=%x lkey_out=%x",
- h_ret, e_fmr, shca->ipz_hca_handle.handle,
- e_fmr->ipz_mr_handle.handle,
- e_fmr->ib.ib_fmr.lkey, hipzout.lkey);
- /* try free and rereg */
- }
-
- /* first free old FMR */
- h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle, e_fmr);
- if (h_ret != H_SUCCESS) {
- ehca_err(&shca->ib_device, "hipz_free_mr failed, "
- "h_ret=%lli e_fmr=%p hca_hndl=%llx mr_hndl=%llx "
- "lkey=%x",
- h_ret, e_fmr, shca->ipz_hca_handle.handle,
- e_fmr->ipz_mr_handle.handle,
- e_fmr->ib.ib_fmr.lkey);
- ret = ehca2ib_return_code(h_ret);
- goto ehca_unmap_one_fmr_exit0;
- }
- /* clean ehca_mr_t, without changing lock */
- save_fmr = *e_fmr;
- ehca_mr_deletenew(e_fmr);
-
- /* set some MR values */
- e_fmr->flags = save_fmr.flags;
- e_fmr->hwpage_size = save_fmr.hwpage_size;
- e_fmr->fmr_page_size = save_fmr.fmr_page_size;
- e_fmr->fmr_max_pages = save_fmr.fmr_max_pages;
- e_fmr->fmr_max_maps = save_fmr.fmr_max_maps;
- e_fmr->fmr_map_cnt = save_fmr.fmr_map_cnt;
- e_fmr->acl = save_fmr.acl;
-
- memset(&pginfo, 0, sizeof(pginfo));
- pginfo.type = EHCA_MR_PGI_FMR;
- ret = ehca_reg_mr(shca, e_fmr, NULL,
- (e_fmr->fmr_max_pages * e_fmr->fmr_page_size),
- e_fmr->acl, e_pd, &pginfo, &tmp_lkey,
- &tmp_rkey, EHCA_REG_MR);
- if (ret) {
- u32 offset = (u64)(&e_fmr->flags) - (u64)e_fmr;
- memcpy(&e_fmr->flags, &(save_mr.flags),
- sizeof(struct ehca_mr) - offset);
- }
-
-ehca_unmap_one_fmr_exit0:
- if (ret)
- ehca_err(&shca->ib_device, "ret=%i tmp_lkey=%x tmp_rkey=%x "
- "fmr_max_pages=%x",
- ret, tmp_lkey, tmp_rkey, e_fmr->fmr_max_pages);
- return ret;
-} /* end ehca_unmap_one_fmr() */
-
-/*----------------------------------------------------------------------*/
-
-int ehca_reg_smr(struct ehca_shca *shca,
- struct ehca_mr *e_origmr,
- struct ehca_mr *e_newmr,
- u64 *iova_start,
- int acl,
- struct ehca_pd *e_pd,
- u32 *lkey, /*OUT*/
- u32 *rkey) /*OUT*/
-{
- int ret = 0;
- u64 h_ret;
- u32 hipz_acl;
- struct ehca_mr_hipzout_parms hipzout;
-
- ehca_mrmw_map_acl(acl, &hipz_acl);
- ehca_mrmw_set_pgsize_hipz_acl(e_origmr->hwpage_size, &hipz_acl);
-
- h_ret = hipz_h_register_smr(shca->ipz_hca_handle, e_newmr, e_origmr,
- (u64)iova_start, hipz_acl, e_pd->fw_pd,
- &hipzout);
- if (h_ret != H_SUCCESS) {
- ehca_err(&shca->ib_device, "hipz_reg_smr failed, h_ret=%lli "
- "shca=%p e_origmr=%p e_newmr=%p iova_start=%p acl=%x "
- "e_pd=%p hca_hndl=%llx mr_hndl=%llx lkey=%x",
- h_ret, shca, e_origmr, e_newmr, iova_start, acl, e_pd,
- shca->ipz_hca_handle.handle,
- e_origmr->ipz_mr_handle.handle,
- e_origmr->ib.ib_mr.lkey);
- ret = ehca2ib_return_code(h_ret);
- goto ehca_reg_smr_exit0;
- }
- /* successful registration */
- e_newmr->num_kpages = e_origmr->num_kpages;
- e_newmr->num_hwpages = e_origmr->num_hwpages;
- e_newmr->hwpage_size = e_origmr->hwpage_size;
- e_newmr->start = iova_start;
- e_newmr->size = e_origmr->size;
- e_newmr->acl = acl;
- e_newmr->ipz_mr_handle = hipzout.handle;
- *lkey = hipzout.lkey;
- *rkey = hipzout.rkey;
- return 0;
-
-ehca_reg_smr_exit0:
- if (ret)
- ehca_err(&shca->ib_device, "ret=%i shca=%p e_origmr=%p "
- "e_newmr=%p iova_start=%p acl=%x e_pd=%p",
- ret, shca, e_origmr, e_newmr, iova_start, acl, e_pd);
- return ret;
-} /* end ehca_reg_smr() */
-
-/*----------------------------------------------------------------------*/
-static inline void *ehca_calc_sectbase(int top, int dir, int idx)
-{
- unsigned long ret = idx;
- ret |= dir << EHCA_DIR_INDEX_SHIFT;
- ret |= top << EHCA_TOP_INDEX_SHIFT;
- return __va(ret << SECTION_SIZE_BITS);
-}
-
-#define ehca_bmap_valid(entry) \
- ((u64)entry != (u64)EHCA_INVAL_ADDR)
-
-static u64 ehca_reg_mr_section(int top, int dir, int idx, u64 *kpage,
- struct ehca_shca *shca, struct ehca_mr *mr,
- struct ehca_mr_pginfo *pginfo)
-{
- u64 h_ret = 0;
- unsigned long page = 0;
- u64 rpage = __pa(kpage);
- int page_count;
-
- void *sectbase = ehca_calc_sectbase(top, dir, idx);
- if ((unsigned long)sectbase & (pginfo->hwpage_size - 1)) {
- ehca_err(&shca->ib_device, "reg_mr_section will probably fail:"
- "hwpage_size does not fit to "
- "section start address");
- }
- page_count = EHCA_SECTSIZE / pginfo->hwpage_size;
-
- while (page < page_count) {
- u64 rnum;
- for (rnum = 0; (rnum < MAX_RPAGES) && (page < page_count);
- rnum++) {
- void *pg = sectbase + ((page++) * pginfo->hwpage_size);
- kpage[rnum] = __pa(pg);
- }
-
- h_ret = hipz_h_register_rpage_mr(shca->ipz_hca_handle, mr,
- ehca_encode_hwpage_size(pginfo->hwpage_size),
- 0, rpage, rnum);
-
- if ((h_ret != H_SUCCESS) && (h_ret != H_PAGE_REGISTERED)) {
- ehca_err(&shca->ib_device, "register_rpage_mr failed");
- return h_ret;
- }
- }
- return h_ret;
-}
-
-static u64 ehca_reg_mr_sections(int top, int dir, u64 *kpage,
- struct ehca_shca *shca, struct ehca_mr *mr,
- struct ehca_mr_pginfo *pginfo)
-{
- u64 hret = H_SUCCESS;
- int idx;
-
- for (idx = 0; idx < EHCA_MAP_ENTRIES; idx++) {
- if (!ehca_bmap_valid(ehca_bmap->top[top]->dir[dir]->ent[idx]))
- continue;
-
- hret = ehca_reg_mr_section(top, dir, idx, kpage, shca, mr,
- pginfo);
- if ((hret != H_SUCCESS) && (hret != H_PAGE_REGISTERED))
- return hret;
- }
- return hret;
-}
-
-static u64 ehca_reg_mr_dir_sections(int top, u64 *kpage, struct ehca_shca *shca,
- struct ehca_mr *mr,
- struct ehca_mr_pginfo *pginfo)
-{
- u64 hret = H_SUCCESS;
- int dir;
-
- for (dir = 0; dir < EHCA_MAP_ENTRIES; dir++) {
- if (!ehca_bmap_valid(ehca_bmap->top[top]->dir[dir]))
- continue;
-
- hret = ehca_reg_mr_sections(top, dir, kpage, shca, mr, pginfo);
- if ((hret != H_SUCCESS) && (hret != H_PAGE_REGISTERED))
- return hret;
- }
- return hret;
-}
-
-/* register internal max-MR to internal SHCA */
-int ehca_reg_internal_maxmr(
- struct ehca_shca *shca,
- struct ehca_pd *e_pd,
- struct ehca_mr **e_maxmr) /*OUT*/
-{
- int ret;
- struct ehca_mr *e_mr;
- u64 *iova_start;
- u64 size_maxmr;
- struct ehca_mr_pginfo pginfo;
- struct ib_phys_buf ib_pbuf;
- u32 num_kpages;
- u32 num_hwpages;
- u64 hw_pgsize;
-
- if (!ehca_bmap) {
- ret = -EFAULT;
- goto ehca_reg_internal_maxmr_exit0;
- }
-
- e_mr = ehca_mr_new();
- if (!e_mr) {
- ehca_err(&shca->ib_device, "out of memory");
- ret = -ENOMEM;
- goto ehca_reg_internal_maxmr_exit0;
- }
- e_mr->flags |= EHCA_MR_FLAG_MAXMR;
-
- /* register internal max-MR on HCA */
- size_maxmr = ehca_mr_len;
- iova_start = (u64 *)ehca_map_vaddr((void *)(KERNELBASE + PHYSICAL_START));
- ib_pbuf.addr = 0;
- ib_pbuf.size = size_maxmr;
- num_kpages = NUM_CHUNKS(((u64)iova_start % PAGE_SIZE) + size_maxmr,
- PAGE_SIZE);
- hw_pgsize = ehca_get_max_hwpage_size(shca);
- num_hwpages = NUM_CHUNKS(((u64)iova_start % hw_pgsize) + size_maxmr,
- hw_pgsize);
-
- memset(&pginfo, 0, sizeof(pginfo));
- pginfo.type = EHCA_MR_PGI_PHYS;
- pginfo.num_kpages = num_kpages;
- pginfo.num_hwpages = num_hwpages;
- pginfo.hwpage_size = hw_pgsize;
- pginfo.u.phy.num_phys_buf = 1;
- pginfo.u.phy.phys_buf_array = &ib_pbuf;
-
- ret = ehca_reg_mr(shca, e_mr, iova_start, size_maxmr, 0, e_pd,
- &pginfo, &e_mr->ib.ib_mr.lkey,
- &e_mr->ib.ib_mr.rkey, EHCA_REG_BUSMAP_MR);
- if (ret) {
- ehca_err(&shca->ib_device, "reg of internal max MR failed, "
- "e_mr=%p iova_start=%p size_maxmr=%llx num_kpages=%x "
- "num_hwpages=%x", e_mr, iova_start, size_maxmr,
- num_kpages, num_hwpages);
- goto ehca_reg_internal_maxmr_exit1;
- }
-
- /* successful registration of all pages */
- e_mr->ib.ib_mr.device = e_pd->ib_pd.device;
- e_mr->ib.ib_mr.pd = &e_pd->ib_pd;
- e_mr->ib.ib_mr.uobject = NULL;
- atomic_inc(&(e_pd->ib_pd.usecnt));
- atomic_set(&(e_mr->ib.ib_mr.usecnt), 0);
- *e_maxmr = e_mr;
- return 0;
-
-ehca_reg_internal_maxmr_exit1:
- ehca_mr_delete(e_mr);
-ehca_reg_internal_maxmr_exit0:
- if (ret)
- ehca_err(&shca->ib_device, "ret=%i shca=%p e_pd=%p e_maxmr=%p",
- ret, shca, e_pd, e_maxmr);
- return ret;
-} /* end ehca_reg_internal_maxmr() */
-
-/*----------------------------------------------------------------------*/
-
-int ehca_reg_maxmr(struct ehca_shca *shca,
- struct ehca_mr *e_newmr,
- u64 *iova_start,
- int acl,
- struct ehca_pd *e_pd,
- u32 *lkey,
- u32 *rkey)
-{
- u64 h_ret;
- struct ehca_mr *e_origmr = shca->maxmr;
- u32 hipz_acl;
- struct ehca_mr_hipzout_parms hipzout;
-
- ehca_mrmw_map_acl(acl, &hipz_acl);
- ehca_mrmw_set_pgsize_hipz_acl(e_origmr->hwpage_size, &hipz_acl);
-
- h_ret = hipz_h_register_smr(shca->ipz_hca_handle, e_newmr, e_origmr,
- (u64)iova_start, hipz_acl, e_pd->fw_pd,
- &hipzout);
- if (h_ret != H_SUCCESS) {
- ehca_err(&shca->ib_device, "hipz_reg_smr failed, h_ret=%lli "
- "e_origmr=%p hca_hndl=%llx mr_hndl=%llx lkey=%x",
- h_ret, e_origmr, shca->ipz_hca_handle.handle,
- e_origmr->ipz_mr_handle.handle,
- e_origmr->ib.ib_mr.lkey);
- return ehca2ib_return_code(h_ret);
- }
- /* successful registration */
- e_newmr->num_kpages = e_origmr->num_kpages;
- e_newmr->num_hwpages = e_origmr->num_hwpages;
- e_newmr->hwpage_size = e_origmr->hwpage_size;
- e_newmr->start = iova_start;
- e_newmr->size = e_origmr->size;
- e_newmr->acl = acl;
- e_newmr->ipz_mr_handle = hipzout.handle;
- *lkey = hipzout.lkey;
- *rkey = hipzout.rkey;
- return 0;
-} /* end ehca_reg_maxmr() */
-
-/*----------------------------------------------------------------------*/
-
-int ehca_dereg_internal_maxmr(struct ehca_shca *shca)
-{
- int ret;
- struct ehca_mr *e_maxmr;
- struct ib_pd *ib_pd;
-
- if (!shca->maxmr) {
- ehca_err(&shca->ib_device, "bad call, shca=%p", shca);
- ret = -EINVAL;
- goto ehca_dereg_internal_maxmr_exit0;
- }
-
- e_maxmr = shca->maxmr;
- ib_pd = e_maxmr->ib.ib_mr.pd;
- shca->maxmr = NULL; /* remove internal max-MR indication from SHCA */
-
- ret = ehca_dereg_mr(&e_maxmr->ib.ib_mr);
- if (ret) {
- ehca_err(&shca->ib_device, "dereg internal max-MR failed, "
- "ret=%i e_maxmr=%p shca=%p lkey=%x",
- ret, e_maxmr, shca, e_maxmr->ib.ib_mr.lkey);
- shca->maxmr = e_maxmr;
- goto ehca_dereg_internal_maxmr_exit0;
- }
-
- atomic_dec(&ib_pd->usecnt);
-
-ehca_dereg_internal_maxmr_exit0:
- if (ret)
- ehca_err(&shca->ib_device, "ret=%i shca=%p shca->maxmr=%p",
- ret, shca, shca->maxmr);
- return ret;
-} /* end ehca_dereg_internal_maxmr() */
-
-/*----------------------------------------------------------------------*/
-
-/*
- * check physical buffer array of MR verbs for validness and
- * calculates MR size
- */
-int ehca_mr_chk_buf_and_calc_size(struct ib_phys_buf *phys_buf_array,
- int num_phys_buf,
- u64 *iova_start,
- u64 *size)
-{
- struct ib_phys_buf *pbuf = phys_buf_array;
- u64 size_count = 0;
- u32 i;
-
- if (num_phys_buf == 0) {
- ehca_gen_err("bad phys buf array len, num_phys_buf=0");
- return -EINVAL;
- }
- /* check first buffer */
- if (((u64)iova_start & ~PAGE_MASK) != (pbuf->addr & ~PAGE_MASK)) {
- ehca_gen_err("iova_start/addr mismatch, iova_start=%p "
- "pbuf->addr=%llx pbuf->size=%llx",
- iova_start, pbuf->addr, pbuf->size);
- return -EINVAL;
- }
- if (((pbuf->addr + pbuf->size) % PAGE_SIZE) &&
- (num_phys_buf > 1)) {
- ehca_gen_err("addr/size mismatch in 1st buf, pbuf->addr=%llx "
- "pbuf->size=%llx", pbuf->addr, pbuf->size);
- return -EINVAL;
- }
-
- for (i = 0; i < num_phys_buf; i++) {
- if ((i > 0) && (pbuf->addr % PAGE_SIZE)) {
- ehca_gen_err("bad address, i=%x pbuf->addr=%llx "
- "pbuf->size=%llx",
- i, pbuf->addr, pbuf->size);
- return -EINVAL;
- }
- if (((i > 0) && /* not 1st */
- (i < (num_phys_buf - 1)) && /* not last */
- (pbuf->size % PAGE_SIZE)) || (pbuf->size == 0)) {
- ehca_gen_err("bad size, i=%x pbuf->size=%llx",
- i, pbuf->size);
- return -EINVAL;
- }
- size_count += pbuf->size;
- pbuf++;
- }
-
- *size = size_count;
- return 0;
-} /* end ehca_mr_chk_buf_and_calc_size() */
-
-/*----------------------------------------------------------------------*/
-
-/* check page list of map FMR verb for validness */
-int ehca_fmr_check_page_list(struct ehca_mr *e_fmr,
- u64 *page_list,
- int list_len)
-{
- u32 i;
- u64 *page;
-
- if ((list_len == 0) || (list_len > e_fmr->fmr_max_pages)) {
- ehca_gen_err("bad list_len, list_len=%x "
- "e_fmr->fmr_max_pages=%x fmr=%p",
- list_len, e_fmr->fmr_max_pages, e_fmr);
- return -EINVAL;
- }
-
- /* each page must be aligned */
- page = page_list;
- for (i = 0; i < list_len; i++) {
- if (*page % e_fmr->fmr_page_size) {
- ehca_gen_err("bad page, i=%x *page=%llx page=%p fmr=%p "
- "fmr_page_size=%x", i, *page, page, e_fmr,
- e_fmr->fmr_page_size);
- return -EINVAL;
- }
- page++;
- }
-
- return 0;
-} /* end ehca_fmr_check_page_list() */
-
-/*----------------------------------------------------------------------*/
-
-/* PAGE_SIZE >= pginfo->hwpage_size */
-static int ehca_set_pagebuf_user1(struct ehca_mr_pginfo *pginfo,
- u32 number,
- u64 *kpage)
-{
- int ret = 0;
- u64 pgaddr;
- u32 j = 0;
- int hwpages_per_kpage = PAGE_SIZE / pginfo->hwpage_size;
- struct scatterlist **sg = &pginfo->u.usr.next_sg;
-
- while (*sg != NULL) {
- pgaddr = page_to_pfn(sg_page(*sg))
- << PAGE_SHIFT;
- *kpage = pgaddr + (pginfo->next_hwpage *
- pginfo->hwpage_size);
- if (!(*kpage)) {
- ehca_gen_err("pgaddr=%llx "
- "sg_dma_address=%llx "
- "entry=%llx next_hwpage=%llx",
- pgaddr, (u64)sg_dma_address(*sg),
- pginfo->u.usr.next_nmap,
- pginfo->next_hwpage);
- return -EFAULT;
- }
- (pginfo->hwpage_cnt)++;
- (pginfo->next_hwpage)++;
- kpage++;
- if (pginfo->next_hwpage % hwpages_per_kpage == 0) {
- (pginfo->kpage_cnt)++;
- (pginfo->u.usr.next_nmap)++;
- pginfo->next_hwpage = 0;
- *sg = sg_next(*sg);
- }
- j++;
- if (j >= number)
- break;
- }
-
- return ret;
-}
-
-/*
- * check given pages for contiguous layout
- * last page addr is returned in prev_pgaddr for further check
- */
-static int ehca_check_kpages_per_ate(struct scatterlist **sg,
- int num_pages,
- u64 *prev_pgaddr)
-{
- for (; *sg && num_pages > 0; *sg = sg_next(*sg), num_pages--) {
- u64 pgaddr = page_to_pfn(sg_page(*sg)) << PAGE_SHIFT;
- if (ehca_debug_level >= 3)
- ehca_gen_dbg("chunk_page=%llx value=%016llx", pgaddr,
- *(u64 *)__va(pgaddr));
- if (pgaddr - PAGE_SIZE != *prev_pgaddr) {
- ehca_gen_err("uncontiguous page found pgaddr=%llx "
- "prev_pgaddr=%llx entries_left_in_hwpage=%x",
- pgaddr, *prev_pgaddr, num_pages);
- return -EINVAL;
- }
- *prev_pgaddr = pgaddr;
- }
- return 0;
-}
-
-/* PAGE_SIZE < pginfo->hwpage_size */
-static int ehca_set_pagebuf_user2(struct ehca_mr_pginfo *pginfo,
- u32 number,
- u64 *kpage)
-{
- int ret = 0;
- u64 pgaddr, prev_pgaddr;
- u32 j = 0;
- int kpages_per_hwpage = pginfo->hwpage_size / PAGE_SIZE;
- int nr_kpages = kpages_per_hwpage;
- struct scatterlist **sg = &pginfo->u.usr.next_sg;
-
- while (*sg != NULL) {
-
- if (nr_kpages == kpages_per_hwpage) {
- pgaddr = (page_to_pfn(sg_page(*sg))
- << PAGE_SHIFT);
- *kpage = pgaddr;
- if (!(*kpage)) {
- ehca_gen_err("pgaddr=%llx entry=%llx",
- pgaddr, pginfo->u.usr.next_nmap);
- ret = -EFAULT;
- return ret;
- }
- /*
- * The first page in a hwpage must be aligned;
- * the first MR page is exempt from this rule.
- */
- if (pgaddr & (pginfo->hwpage_size - 1)) {
- if (pginfo->hwpage_cnt) {
- ehca_gen_err(
- "invalid alignment "
- "pgaddr=%llx entry=%llx "
- "mr_pgsize=%llx",
- pgaddr, pginfo->u.usr.next_nmap,
- pginfo->hwpage_size);
- ret = -EFAULT;
- return ret;
- }
- /* first MR page */
- pginfo->kpage_cnt =
- (pgaddr &
- (pginfo->hwpage_size - 1)) >>
- PAGE_SHIFT;
- nr_kpages -= pginfo->kpage_cnt;
- *kpage = pgaddr &
- ~(pginfo->hwpage_size - 1);
- }
- if (ehca_debug_level >= 3) {
- u64 val = *(u64 *)__va(pgaddr);
- ehca_gen_dbg("kpage=%llx page=%llx "
- "value=%016llx",
- *kpage, pgaddr, val);
- }
- prev_pgaddr = pgaddr;
- *sg = sg_next(*sg);
- pginfo->kpage_cnt++;
- pginfo->u.usr.next_nmap++;
- nr_kpages--;
- if (!nr_kpages)
- goto next_kpage;
- continue;
- }
-
- ret = ehca_check_kpages_per_ate(sg, nr_kpages,
- &prev_pgaddr);
- if (ret)
- return ret;
- pginfo->kpage_cnt += nr_kpages;
- pginfo->u.usr.next_nmap += nr_kpages;
-
-next_kpage:
- nr_kpages = kpages_per_hwpage;
- (pginfo->hwpage_cnt)++;
- kpage++;
- j++;
- if (j >= number)
- break;
- }
-
- return ret;
-}
-
-static int ehca_set_pagebuf_phys(struct ehca_mr_pginfo *pginfo,
- u32 number, u64 *kpage)
-{
- int ret = 0;
- struct ib_phys_buf *pbuf;
- u64 num_hw, offs_hw;
- u32 i = 0;
-
- /* loop over desired phys_buf_array entries */
- while (i < number) {
- pbuf = pginfo->u.phy.phys_buf_array + pginfo->u.phy.next_buf;
- num_hw = NUM_CHUNKS((pbuf->addr % pginfo->hwpage_size) +
- pbuf->size, pginfo->hwpage_size);
- offs_hw = (pbuf->addr & ~(pginfo->hwpage_size - 1)) /
- pginfo->hwpage_size;
- while (pginfo->next_hwpage < offs_hw + num_hw) {
- /* sanity check */
- if ((pginfo->kpage_cnt >= pginfo->num_kpages) ||
- (pginfo->hwpage_cnt >= pginfo->num_hwpages)) {
- ehca_gen_err("kpage_cnt >= num_kpages, "
- "kpage_cnt=%llx num_kpages=%llx "
- "hwpage_cnt=%llx "
- "num_hwpages=%llx i=%x",
- pginfo->kpage_cnt,
- pginfo->num_kpages,
- pginfo->hwpage_cnt,
- pginfo->num_hwpages, i);
- return -EFAULT;
- }
- *kpage = (pbuf->addr & ~(pginfo->hwpage_size - 1)) +
- (pginfo->next_hwpage * pginfo->hwpage_size);
- if ( !(*kpage) && pbuf->addr ) {
- ehca_gen_err("pbuf->addr=%llx pbuf->size=%llx "
- "next_hwpage=%llx", pbuf->addr,
- pbuf->size, pginfo->next_hwpage);
- return -EFAULT;
- }
- (pginfo->hwpage_cnt)++;
- (pginfo->next_hwpage)++;
- if (PAGE_SIZE >= pginfo->hwpage_size) {
- if (pginfo->next_hwpage %
- (PAGE_SIZE / pginfo->hwpage_size) == 0)
- (pginfo->kpage_cnt)++;
- } else
- pginfo->kpage_cnt += pginfo->hwpage_size /
- PAGE_SIZE;
- kpage++;
- i++;
- if (i >= number) break;
- }
- if (pginfo->next_hwpage >= offs_hw + num_hw) {
- (pginfo->u.phy.next_buf)++;
- pginfo->next_hwpage = 0;
- }
- }
- return ret;
-}
-
-static int ehca_set_pagebuf_fmr(struct ehca_mr_pginfo *pginfo,
- u32 number, u64 *kpage)
-{
- int ret = 0;
- u64 *fmrlist;
- u32 i;
-
- /* loop over desired page_list entries */
- fmrlist = pginfo->u.fmr.page_list + pginfo->u.fmr.next_listelem;
- for (i = 0; i < number; i++) {
- *kpage = (*fmrlist & ~(pginfo->hwpage_size - 1)) +